summaryrefslogtreecommitdiff
path: root/drivers/scsi
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/scsi')
-rw-r--r--drivers/scsi/.gitignore1
-rw-r--r--drivers/scsi/3w-9xxx.c2308
-rw-r--r--drivers/scsi/3w-9xxx.h681
-rw-r--r--drivers/scsi/3w-sas.c1888
-rw-r--r--drivers/scsi/3w-sas.h392
-rw-r--r--drivers/scsi/3w-xxxx.c2419
-rw-r--r--drivers/scsi/3w-xxxx.h430
-rw-r--r--drivers/scsi/53c700.c2130
-rw-r--r--drivers/scsi/53c700.h524
-rw-r--r--drivers/scsi/53c700.scr411
-rw-r--r--drivers/scsi/53c700_d.h_shipped1329
-rw-r--r--drivers/scsi/BusLogic.c3906
-rw-r--r--drivers/scsi/BusLogic.h1302
-rw-r--r--drivers/scsi/FlashPoint.c7588
-rw-r--r--drivers/scsi/Kconfig1761
-rw-r--r--drivers/scsi/Makefile200
-rw-r--r--drivers/scsi/NCR5380.c2749
-rw-r--r--drivers/scsi/NCR5380.h447
-rw-r--r--drivers/scsi/NCR53c406a.c1091
-rw-r--r--drivers/scsi/NCR_D700.c405
-rw-r--r--drivers/scsi/NCR_D700.h29
-rw-r--r--drivers/scsi/NCR_Q720.c377
-rw-r--r--drivers/scsi/NCR_Q720.h28
-rw-r--r--drivers/scsi/a100u2w.c1241
-rw-r--r--drivers/scsi/a100u2w.h371
-rw-r--r--drivers/scsi/a2091.c281
-rw-r--r--drivers/scsi/a2091.h69
-rw-r--r--drivers/scsi/a3000.c286
-rw-r--r--drivers/scsi/a3000.h72
-rw-r--r--drivers/scsi/a4000t.c124
-rw-r--r--drivers/scsi/aacraid/Makefile8
-rw-r--r--drivers/scsi/aacraid/TODO3
-rw-r--r--drivers/scsi/aacraid/aachba.c3501
-rw-r--r--drivers/scsi/aacraid/aacraid.h2159
-rw-r--r--drivers/scsi/aacraid/commctrl.c895
-rw-r--r--drivers/scsi/aacraid/comminit.c586
-rw-r--r--drivers/scsi/aacraid/commsup.c1980
-rw-r--r--drivers/scsi/aacraid/dpcsup.c425
-rw-r--r--drivers/scsi/aacraid/linit.c1390
-rw-r--r--drivers/scsi/aacraid/nark.c84
-rw-r--r--drivers/scsi/aacraid/rkt.c107
-rw-r--r--drivers/scsi/aacraid/rx.c676
-rw-r--r--drivers/scsi/aacraid/sa.c416
-rw-r--r--drivers/scsi/aacraid/src.c1063
-rw-r--r--drivers/scsi/advansys.c12319
-rw-r--r--drivers/scsi/aha152x.c3398
-rw-r--r--drivers/scsi/aha152x.h337
-rw-r--r--drivers/scsi/aha1542.c1072
-rw-r--r--drivers/scsi/aha1542.h102
-rw-r--r--drivers/scsi/aha1740.c678
-rw-r--r--drivers/scsi/aha1740.h154
-rw-r--r--drivers/scsi/aic7xxx/.gitignore6
-rw-r--r--drivers/scsi/aic7xxx/Kconfig.aic79xx85
-rw-r--r--drivers/scsi/aic7xxx/Kconfig.aic7xxx90
-rw-r--r--drivers/scsi/aic7xxx/Makefile85
-rw-r--r--drivers/scsi/aic7xxx/aic7770.c391
-rw-r--r--drivers/scsi/aic7xxx/aic7770_osm.c155
-rw-r--r--drivers/scsi/aic7xxx/aic79xx.h1478
-rw-r--r--drivers/scsi/aic7xxx/aic79xx.reg4281
-rw-r--r--drivers/scsi/aic7xxx/aic79xx.seq2290
-rw-r--r--drivers/scsi/aic7xxx/aic79xx_core.c10822
-rw-r--r--drivers/scsi/aic7xxx/aic79xx_inline.h172
-rw-r--r--drivers/scsi/aic7xxx/aic79xx_osm.c2886
-rw-r--r--drivers/scsi/aic7xxx/aic79xx_osm.h695
-rw-r--r--drivers/scsi/aic7xxx/aic79xx_osm_pci.c397
-rw-r--r--drivers/scsi/aic7xxx/aic79xx_pci.c1014
-rw-r--r--drivers/scsi/aic7xxx/aic79xx_pci.h72
-rw-r--r--drivers/scsi/aic7xxx/aic79xx_proc.c315
-rw-r--r--drivers/scsi/aic7xxx/aic79xx_reg.h_shipped2685
-rw-r--r--drivers/scsi/aic7xxx/aic79xx_reg_print.c_shipped745
-rw-r--r--drivers/scsi/aic7xxx/aic79xx_seq.h_shipped1190
-rw-r--r--drivers/scsi/aic7xxx/aic7xxx.h1284
-rw-r--r--drivers/scsi/aic7xxx/aic7xxx.reg1761
-rw-r--r--drivers/scsi/aic7xxx/aic7xxx.seq2399
-rw-r--r--drivers/scsi/aic7xxx/aic7xxx_93cx6.c324
-rw-r--r--drivers/scsi/aic7xxx/aic7xxx_93cx6.h102
-rw-r--r--drivers/scsi/aic7xxx/aic7xxx_core.c7971
-rw-r--r--drivers/scsi/aic7xxx/aic7xxx_inline.h97
-rw-r--r--drivers/scsi/aic7xxx/aic7xxx_osm.c2598
-rw-r--r--drivers/scsi/aic7xxx/aic7xxx_osm.h705
-rw-r--r--drivers/scsi/aic7xxx/aic7xxx_osm_pci.c470
-rw-r--r--drivers/scsi/aic7xxx/aic7xxx_pci.c2469
-rw-r--r--drivers/scsi/aic7xxx/aic7xxx_pci.h125
-rw-r--r--drivers/scsi/aic7xxx/aic7xxx_proc.c342
-rw-r--r--drivers/scsi/aic7xxx/aic7xxx_reg.h_shipped912
-rw-r--r--drivers/scsi/aic7xxx/aic7xxx_reg_print.c_shipped413
-rw-r--r--drivers/scsi/aic7xxx/aic7xxx_seq.h_shipped1308
-rw-r--r--drivers/scsi/aic7xxx/aicasm/Makefile80
-rw-r--r--drivers/scsi/aic7xxx/aicasm/aicasm.c844
-rw-r--r--drivers/scsi/aic7xxx/aicasm/aicasm.h95
-rw-r--r--drivers/scsi/aic7xxx/aicasm/aicasm_gram.y2004
-rw-r--r--drivers/scsi/aic7xxx/aicasm/aicasm_insformat.h218
-rw-r--r--drivers/scsi/aic7xxx/aicasm/aicasm_macro_gram.y165
-rw-r--r--drivers/scsi/aic7xxx/aicasm/aicasm_macro_scan.l157
-rw-r--r--drivers/scsi/aic7xxx/aicasm/aicasm_scan.l622
-rw-r--r--drivers/scsi/aic7xxx/aicasm/aicasm_symbol.c693
-rw-r--r--drivers/scsi/aic7xxx/aicasm/aicasm_symbol.h209
-rw-r--r--drivers/scsi/aic7xxx/aiclib.c34
-rw-r--r--drivers/scsi/aic7xxx/aiclib.h195
-rw-r--r--drivers/scsi/aic7xxx/cam.h111
-rw-r--r--drivers/scsi/aic7xxx/queue.h501
-rw-r--r--drivers/scsi/aic7xxx/scsi_iu.h39
-rw-r--r--drivers/scsi/aic7xxx/scsi_message.h70
-rw-r--r--drivers/scsi/aic94xx/Kconfig42
-rw-r--r--drivers/scsi/aic94xx/Makefile37
-rw-r--r--drivers/scsi/aic94xx/aic94xx.h101
-rw-r--r--drivers/scsi/aic94xx/aic94xx_dev.c363
-rw-r--r--drivers/scsi/aic94xx/aic94xx_dump.c967
-rw-r--r--drivers/scsi/aic94xx/aic94xx_dump.h43
-rw-r--r--drivers/scsi/aic94xx/aic94xx_hwi.c1390
-rw-r--r--drivers/scsi/aic94xx/aic94xx_hwi.h398
-rw-r--r--drivers/scsi/aic94xx/aic94xx_init.c1080
-rw-r--r--drivers/scsi/aic94xx/aic94xx_reg.c331
-rw-r--r--drivers/scsi/aic94xx/aic94xx_reg.h302
-rw-r--r--drivers/scsi/aic94xx/aic94xx_reg_def.h2399
-rw-r--r--drivers/scsi/aic94xx/aic94xx_sas.h787
-rw-r--r--drivers/scsi/aic94xx/aic94xx_scb.c937
-rw-r--r--drivers/scsi/aic94xx/aic94xx_sds.c1475
-rw-r--r--drivers/scsi/aic94xx/aic94xx_sds.h121
-rw-r--r--drivers/scsi/aic94xx/aic94xx_seq.c1415
-rw-r--r--drivers/scsi/aic94xx/aic94xx_seq.h68
-rw-r--r--drivers/scsi/aic94xx/aic94xx_task.c643
-rw-r--r--drivers/scsi/aic94xx/aic94xx_tmf.c714
-rw-r--r--drivers/scsi/am53c974.c582
-rw-r--r--drivers/scsi/arcmsr/Makefile6
-rw-r--r--drivers/scsi/arcmsr/arcmsr.h814
-rw-r--r--drivers/scsi/arcmsr/arcmsr_attr.c404
-rw-r--r--drivers/scsi/arcmsr/arcmsr_hba.c4027
-rw-r--r--drivers/scsi/arm/Kconfig84
-rw-r--r--drivers/scsi/arm/Makefile13
-rw-r--r--drivers/scsi/arm/acornscsi-io.S138
-rw-r--r--drivers/scsi/arm/acornscsi.c3014
-rw-r--r--drivers/scsi/arm/acornscsi.h353
-rw-r--r--drivers/scsi/arm/arxescsi.c358
-rw-r--r--drivers/scsi/arm/cumana_1.c324
-rw-r--r--drivers/scsi/arm/cumana_2.c521
-rw-r--r--drivers/scsi/arm/eesox.c645
-rw-r--r--drivers/scsi/arm/fas216.c3032
-rw-r--r--drivers/scsi/arm/fas216.h393
-rw-r--r--drivers/scsi/arm/msgqueue.c171
-rw-r--r--drivers/scsi/arm/msgqueue.h82
-rw-r--r--drivers/scsi/arm/oak.c207
-rw-r--r--drivers/scsi/arm/powertec.c451
-rw-r--r--drivers/scsi/arm/queue.c318
-rw-r--r--drivers/scsi/arm/queue.h107
-rw-r--r--drivers/scsi/arm/scsi.h128
-rw-r--r--drivers/scsi/atari_NCR5380.c2927
-rw-r--r--drivers/scsi/atari_scsi.c1023
-rw-r--r--drivers/scsi/atp870u.c3897
-rw-r--r--drivers/scsi/atp870u.h67
-rw-r--r--drivers/scsi/be2iscsi/Kconfig9
-rw-r--r--drivers/scsi/be2iscsi/Makefile8
-rw-r--r--drivers/scsi/be2iscsi/be.h211
-rw-r--r--drivers/scsi/be2iscsi/be_cmds.c1443
-rw-r--r--drivers/scsi/be2iscsi/be_cmds.h1359
-rw-r--r--drivers/scsi/be2iscsi/be_iscsi.c1472
-rw-r--r--drivers/scsi/be2iscsi/be_iscsi.h91
-rw-r--r--drivers/scsi/be2iscsi/be_main.c5828
-rw-r--r--drivers/scsi/be2iscsi/be_main.h1080
-rw-r--r--drivers/scsi/be2iscsi/be_mgmt.c1709
-rw-r--r--drivers/scsi/be2iscsi/be_mgmt.h341
-rw-r--r--drivers/scsi/bfa/Makefile6
-rw-r--r--drivers/scsi/bfa/bfa.h448
-rw-r--r--drivers/scsi/bfa/bfa_core.c2000
-rw-r--r--drivers/scsi/bfa/bfa_cs.h366
-rw-r--r--drivers/scsi/bfa/bfa_defs.h1287
-rw-r--r--drivers/scsi/bfa/bfa_defs_fcs.h478
-rw-r--r--drivers/scsi/bfa/bfa_defs_svc.h1462
-rw-r--r--drivers/scsi/bfa/bfa_fc.h1628
-rw-r--r--drivers/scsi/bfa/bfa_fcbuild.c1463
-rw-r--r--drivers/scsi/bfa/bfa_fcbuild.h328
-rw-r--r--drivers/scsi/bfa/bfa_fcpim.c3936
-rw-r--r--drivers/scsi/bfa/bfa_fcpim.h429
-rw-r--r--drivers/scsi/bfa/bfa_fcs.c1712
-rw-r--r--drivers/scsi/bfa/bfa_fcs.h883
-rw-r--r--drivers/scsi/bfa/bfa_fcs_fcpim.c839
-rw-r--r--drivers/scsi/bfa/bfa_fcs_lport.c6988
-rw-r--r--drivers/scsi/bfa/bfa_fcs_rport.c3464
-rw-r--r--drivers/scsi/bfa/bfa_hw_cb.c191
-rw-r--r--drivers/scsi/bfa/bfa_hw_ct.c178
-rw-r--r--drivers/scsi/bfa/bfa_ioc.c7065
-rw-r--r--drivers/scsi/bfa/bfa_ioc.h1048
-rw-r--r--drivers/scsi/bfa/bfa_ioc_cb.c408
-rw-r--r--drivers/scsi/bfa/bfa_ioc_ct.c997
-rw-r--r--drivers/scsi/bfa/bfa_modules.h139
-rw-r--r--drivers/scsi/bfa/bfa_plog.h155
-rw-r--r--drivers/scsi/bfa/bfa_port.c880
-rw-r--r--drivers/scsi/bfa/bfa_port.h126
-rw-r--r--drivers/scsi/bfa/bfa_svc.c7062
-rw-r--r--drivers/scsi/bfa/bfa_svc.h763
-rw-r--r--drivers/scsi/bfa/bfad.c1820
-rw-r--r--drivers/scsi/bfa/bfad_attr.c999
-rw-r--r--drivers/scsi/bfa/bfad_bsg.c3593
-rw-r--r--drivers/scsi/bfa/bfad_bsg.h836
-rw-r--r--drivers/scsi/bfa/bfad_debugfs.c532
-rw-r--r--drivers/scsi/bfa/bfad_drv.h360
-rw-r--r--drivers/scsi/bfa/bfad_im.c1320
-rw-r--r--drivers/scsi/bfa/bfad_im.h197
-rw-r--r--drivers/scsi/bfa/bfi.h1324
-rw-r--r--drivers/scsi/bfa/bfi_ms.h878
-rw-r--r--drivers/scsi/bfa/bfi_reg.h459
-rw-r--r--drivers/scsi/bnx2fc/57xx_hsi_bnx2fc.h1003
-rw-r--r--drivers/scsi/bnx2fc/Kconfig13
-rw-r--r--drivers/scsi/bnx2fc/Makefile4
-rw-r--r--drivers/scsi/bnx2fc/bnx2fc.h594
-rw-r--r--drivers/scsi/bnx2fc/bnx2fc_constants.h287
-rw-r--r--drivers/scsi/bnx2fc/bnx2fc_debug.c83
-rw-r--r--drivers/scsi/bnx2fc/bnx2fc_debug.h46
-rw-r--r--drivers/scsi/bnx2fc/bnx2fc_els.c918
-rw-r--r--drivers/scsi/bnx2fc/bnx2fc_fcoe.c2826
-rw-r--r--drivers/scsi/bnx2fc/bnx2fc_hwi.c2195
-rw-r--r--drivers/scsi/bnx2fc/bnx2fc_io.c2081
-rw-r--r--drivers/scsi/bnx2fc/bnx2fc_tgt.c910
-rw-r--r--drivers/scsi/bnx2i/57xx_iscsi_constants.h161
-rw-r--r--drivers/scsi/bnx2i/57xx_iscsi_hsi.h1526
-rw-r--r--drivers/scsi/bnx2i/Kconfig13
-rw-r--r--drivers/scsi/bnx2i/Makefile3
-rw-r--r--drivers/scsi/bnx2i/bnx2i.h882
-rw-r--r--drivers/scsi/bnx2i/bnx2i_hwi.c2771
-rw-r--r--drivers/scsi/bnx2i/bnx2i_init.c604
-rw-r--r--drivers/scsi/bnx2i/bnx2i_iscsi.c2305
-rw-r--r--drivers/scsi/bnx2i/bnx2i_sysfs.c145
-rw-r--r--drivers/scsi/bvme6000_scsi.c138
-rw-r--r--drivers/scsi/ch.c1031
-rw-r--r--drivers/scsi/constants.c1274
-rw-r--r--drivers/scsi/csiostor/Kconfig19
-rw-r--r--drivers/scsi/csiostor/Makefile12
-rw-r--r--drivers/scsi/csiostor/csio_attr.c796
-rw-r--r--drivers/scsi/csiostor/csio_defs.h121
-rw-r--r--drivers/scsi/csiostor/csio_hw.c3970
-rw-r--r--drivers/scsi/csiostor/csio_hw.h603
-rw-r--r--drivers/scsi/csiostor/csio_hw_chip.h121
-rw-r--r--drivers/scsi/csiostor/csio_hw_t5.c398
-rw-r--r--drivers/scsi/csiostor/csio_init.c1255
-rw-r--r--drivers/scsi/csiostor/csio_init.h137
-rw-r--r--drivers/scsi/csiostor/csio_isr.c618
-rw-r--r--drivers/scsi/csiostor/csio_lnode.c2135
-rw-r--r--drivers/scsi/csiostor/csio_lnode.h255
-rw-r--r--drivers/scsi/csiostor/csio_mb.c1676
-rw-r--r--drivers/scsi/csiostor/csio_mb.h267
-rw-r--r--drivers/scsi/csiostor/csio_rnode.c921
-rw-r--r--drivers/scsi/csiostor/csio_rnode.h141
-rw-r--r--drivers/scsi/csiostor/csio_scsi.c2529
-rw-r--r--drivers/scsi/csiostor/csio_scsi.h342
-rw-r--r--drivers/scsi/csiostor/csio_wr.c1645
-rw-r--r--drivers/scsi/csiostor/csio_wr.h512
-rw-r--r--drivers/scsi/csiostor/t4fw_api_stor.h539
-rw-r--r--drivers/scsi/cxgbi/Kconfig2
-rw-r--r--drivers/scsi/cxgbi/Makefile2
-rw-r--r--drivers/scsi/cxgbi/cxgb3i/Kbuild3
-rw-r--r--drivers/scsi/cxgbi/cxgb3i/Kconfig10
-rw-r--r--drivers/scsi/cxgbi/cxgb3i/cxgb3i.c1414
-rw-r--r--drivers/scsi/cxgbi/cxgb3i/cxgb3i.h62
-rw-r--r--drivers/scsi/cxgbi/cxgb4i/Kbuild3
-rw-r--r--drivers/scsi/cxgbi/cxgb4i/Kconfig10
-rw-r--r--drivers/scsi/cxgbi/cxgb4i/cxgb4i.c1894
-rw-r--r--drivers/scsi/cxgbi/cxgb4i/cxgb4i.h43
-rw-r--r--drivers/scsi/cxgbi/libcxgbi.c2931
-rw-r--r--drivers/scsi/cxgbi/libcxgbi.h758
-rw-r--r--drivers/scsi/dc395x.c4900
-rw-r--r--drivers/scsi/dc395x.h648
-rw-r--r--drivers/scsi/device_handler/Kconfig40
-rw-r--r--drivers/scsi/device_handler/Makefile8
-rw-r--r--drivers/scsi/device_handler/scsi_dh.c621
-rw-r--r--drivers/scsi/device_handler/scsi_dh_alua.c905
-rw-r--r--drivers/scsi/device_handler/scsi_dh_emc.c725
-rw-r--r--drivers/scsi/device_handler/scsi_dh_hp_sw.c400
-rw-r--r--drivers/scsi/device_handler/scsi_dh_rdac.c937
-rw-r--r--drivers/scsi/dmx3191d.c162
-rw-r--r--drivers/scsi/dpt/dpti_i2o.h446
-rw-r--r--drivers/scsi/dpt/dpti_ioctl.h139
-rw-r--r--drivers/scsi/dpt/dptsig.h336
-rw-r--r--drivers/scsi/dpt/osd_defs.h79
-rw-r--r--drivers/scsi/dpt/osd_util.h358
-rw-r--r--drivers/scsi/dpt/sys_info.h417
-rw-r--r--drivers/scsi/dpt_i2o.c3612
-rw-r--r--drivers/scsi/dpti.h336
-rw-r--r--drivers/scsi/dtc.c459
-rw-r--r--drivers/scsi/dtc.h76
-rw-r--r--drivers/scsi/eata.c2578
-rw-r--r--drivers/scsi/eata_generic.h400
-rw-r--r--drivers/scsi/eata_pio.c965
-rw-r--r--drivers/scsi/eata_pio.h53
-rw-r--r--drivers/scsi/esas2r/Kconfig5
-rw-r--r--drivers/scsi/esas2r/Makefile5
-rw-r--r--drivers/scsi/esas2r/atioctl.h1254
-rw-r--r--drivers/scsi/esas2r/atvda.h1319
-rw-r--r--drivers/scsi/esas2r/esas2r.h1426
-rw-r--r--drivers/scsi/esas2r/esas2r_disc.c1184
-rw-r--r--drivers/scsi/esas2r/esas2r_flash.c1521
-rw-r--r--drivers/scsi/esas2r/esas2r_init.c1772
-rw-r--r--drivers/scsi/esas2r/esas2r_int.c942
-rw-r--r--drivers/scsi/esas2r/esas2r_io.c877
-rw-r--r--drivers/scsi/esas2r/esas2r_ioctl.c2114
-rw-r--r--drivers/scsi/esas2r/esas2r_log.c250
-rw-r--r--drivers/scsi/esas2r/esas2r_log.h118
-rw-r--r--drivers/scsi/esas2r/esas2r_main.c1975
-rw-r--r--drivers/scsi/esas2r/esas2r_targdb.c306
-rw-r--r--drivers/scsi/esas2r/esas2r_vda.c524
-rw-r--r--drivers/scsi/esp_scsi.c2800
-rw-r--r--drivers/scsi/esp_scsi.h583
-rw-r--r--drivers/scsi/fcoe/Makefile4
-rw-r--r--drivers/scsi/fcoe/fcoe.c2965
-rw-r--r--drivers/scsi/fcoe/fcoe.h104
-rw-r--r--drivers/scsi/fcoe/fcoe_ctlr.c2965
-rw-r--r--drivers/scsi/fcoe/fcoe_sysfs.c954
-rw-r--r--drivers/scsi/fcoe/fcoe_transport.c1040
-rw-r--r--drivers/scsi/fcoe/libfcoe.h35
-rw-r--r--drivers/scsi/fdomain.c1784
-rw-r--r--drivers/scsi/fdomain.h24
-rw-r--r--drivers/scsi/fnic/Makefile17
-rw-r--r--drivers/scsi/fnic/cq_desc.h78
-rw-r--r--drivers/scsi/fnic/cq_enet_desc.h167
-rw-r--r--drivers/scsi/fnic/cq_exch_desc.h182
-rw-r--r--drivers/scsi/fnic/fcpio.h780
-rw-r--r--drivers/scsi/fnic/fnic.h379
-rw-r--r--drivers/scsi/fnic/fnic_attrs.c56
-rw-r--r--drivers/scsi/fnic/fnic_debugfs.c836
-rw-r--r--drivers/scsi/fnic/fnic_fcs.c1347
-rw-r--r--drivers/scsi/fnic/fnic_fip.h68
-rw-r--r--drivers/scsi/fnic/fnic_io.h69
-rw-r--r--drivers/scsi/fnic/fnic_isr.c350
-rw-r--r--drivers/scsi/fnic/fnic_main.c1168
-rw-r--r--drivers/scsi/fnic/fnic_res.c443
-rw-r--r--drivers/scsi/fnic/fnic_res.h249
-rw-r--r--drivers/scsi/fnic/fnic_scsi.c2710
-rw-r--r--drivers/scsi/fnic/fnic_stats.h116
-rw-r--r--drivers/scsi/fnic/fnic_trace.c779
-rw-r--r--drivers/scsi/fnic/fnic_trace.h129
-rw-r--r--drivers/scsi/fnic/rq_enet_desc.h58
-rw-r--r--drivers/scsi/fnic/vnic_cq.c85
-rw-r--r--drivers/scsi/fnic/vnic_cq.h121
-rw-r--r--drivers/scsi/fnic/vnic_cq_copy.h62
-rw-r--r--drivers/scsi/fnic/vnic_dev.c701
-rw-r--r--drivers/scsi/fnic/vnic_dev.h163
-rw-r--r--drivers/scsi/fnic/vnic_devcmd.h348
-rw-r--r--drivers/scsi/fnic/vnic_intr.c60
-rw-r--r--drivers/scsi/fnic/vnic_intr.h118
-rw-r--r--drivers/scsi/fnic/vnic_nic.h69
-rw-r--r--drivers/scsi/fnic/vnic_resource.h61
-rw-r--r--drivers/scsi/fnic/vnic_rq.c197
-rw-r--r--drivers/scsi/fnic/vnic_rq.h235
-rw-r--r--drivers/scsi/fnic/vnic_scsi.h100
-rw-r--r--drivers/scsi/fnic/vnic_stats.h68
-rw-r--r--drivers/scsi/fnic/vnic_wq.c183
-rw-r--r--drivers/scsi/fnic/vnic_wq.h175
-rw-r--r--drivers/scsi/fnic/vnic_wq_copy.c117
-rw-r--r--drivers/scsi/fnic/vnic_wq_copy.h128
-rw-r--r--drivers/scsi/fnic/wq_enet_desc.h96
-rw-r--r--drivers/scsi/g_NCR5380.c741
-rw-r--r--drivers/scsi/g_NCR5380.h108
-rw-r--r--drivers/scsi/g_NCR5380_mmio.c10
-rw-r--r--drivers/scsi/gdth.c5235
-rw-r--r--drivers/scsi/gdth.h1013
-rw-r--r--drivers/scsi/gdth_ioctl.h339
-rw-r--r--drivers/scsi/gdth_proc.c645
-rw-r--r--drivers/scsi/gdth_proc.h20
-rw-r--r--drivers/scsi/gvp11.c433
-rw-r--r--drivers/scsi/gvp11.h52
-rw-r--r--drivers/scsi/hosts.c640
-rw-r--r--drivers/scsi/hpsa.c7627
-rw-r--r--drivers/scsi/hpsa.h569
-rw-r--r--drivers/scsi/hpsa_cmd.h794
-rw-r--r--drivers/scsi/hptiop.c1685
-rw-r--r--drivers/scsi/hptiop.h382
-rw-r--r--drivers/scsi/ibmvscsi/Makefile2
-rw-r--r--drivers/scsi/ibmvscsi/ibmvfc.c5016
-rw-r--r--drivers/scsi/ibmvscsi/ibmvfc.h775
-rw-r--r--drivers/scsi/ibmvscsi/ibmvscsi.c2438
-rw-r--r--drivers/scsi/ibmvscsi/ibmvscsi.h110
-rw-r--r--drivers/scsi/ibmvscsi/viosrp.h217
-rw-r--r--drivers/scsi/imm.c1268
-rw-r--r--drivers/scsi/imm.h143
-rw-r--r--drivers/scsi/in2000.c2302
-rw-r--r--drivers/scsi/in2000.h412
-rw-r--r--drivers/scsi/initio.c3013
-rw-r--r--drivers/scsi/initio.h667
-rw-r--r--drivers/scsi/ipr.c10536
-rw-r--r--drivers/scsi/ipr.h1973
-rw-r--r--drivers/scsi/ips.c7175
-rw-r--r--drivers/scsi/ips.h1251
-rw-r--r--drivers/scsi/isci/Makefile8
-rw-r--r--drivers/scsi/isci/host.c2807
-rw-r--r--drivers/scsi/isci/host.h517
-rw-r--r--drivers/scsi/isci/init.c811
-rw-r--r--drivers/scsi/isci/isci.h539
-rw-r--r--drivers/scsi/isci/phy.c1487
-rw-r--r--drivers/scsi/isci/phy.h460
-rw-r--r--drivers/scsi/isci/port.c1770
-rw-r--r--drivers/scsi/isci/port.h283
-rw-r--r--drivers/scsi/isci/port_config.c760
-rw-r--r--drivers/scsi/isci/probe_roms.c230
-rw-r--r--drivers/scsi/isci/probe_roms.h330
-rw-r--r--drivers/scsi/isci/registers.h1863
-rw-r--r--drivers/scsi/isci/remote_device.c1726
-rw-r--r--drivers/scsi/isci/remote_device.h387
-rw-r--r--drivers/scsi/isci/remote_node_context.c809
-rw-r--r--drivers/scsi/isci/remote_node_context.h236
-rw-r--r--drivers/scsi/isci/remote_node_table.c598
-rw-r--r--drivers/scsi/isci/remote_node_table.h188
-rw-r--r--drivers/scsi/isci/request.c3528
-rw-r--r--drivers/scsi/isci/request.h310
-rw-r--r--drivers/scsi/isci/sas.h217
-rw-r--r--drivers/scsi/isci/scu_completion_codes.h285
-rw-r--r--drivers/scsi/isci/scu_event_codes.h336
-rw-r--r--drivers/scsi/isci/scu_remote_node_context.h229
-rw-r--r--drivers/scsi/isci/scu_task_context.h965
-rw-r--r--drivers/scsi/isci/task.c805
-rw-r--r--drivers/scsi/isci/task.h189
-rw-r--r--drivers/scsi/isci/unsolicited_frame_control.c211
-rw-r--r--drivers/scsi/isci/unsolicited_frame_control.h282
-rw-r--r--drivers/scsi/iscsi_boot_sysfs.c495
-rw-r--r--drivers/scsi/iscsi_tcp.c1030
-rw-r--r--drivers/scsi/iscsi_tcp.h68
-rw-r--r--drivers/scsi/jazz_esp.c248
-rw-r--r--drivers/scsi/lasi700.c187
-rw-r--r--drivers/scsi/libfc/Makefile14
-rw-r--r--drivers/scsi/libfc/fc_disc.c753
-rw-r--r--drivers/scsi/libfc/fc_elsct.c152
-rw-r--r--drivers/scsi/libfc/fc_exch.c2631
-rw-r--r--drivers/scsi/libfc/fc_fcp.c2245
-rw-r--r--drivers/scsi/libfc/fc_frame.c91
-rw-r--r--drivers/scsi/libfc/fc_libfc.c331
-rw-r--r--drivers/scsi/libfc/fc_libfc.h139
-rw-r--r--drivers/scsi/libfc/fc_lport.c2144
-rw-r--r--drivers/scsi/libfc/fc_npiv.c159
-rw-r--r--drivers/scsi/libfc/fc_rport.c2069
-rw-r--r--drivers/scsi/libiscsi.c3649
-rw-r--r--drivers/scsi/libiscsi_tcp.c1214
-rw-r--r--drivers/scsi/libsas/Kconfig48
-rw-r--r--drivers/scsi/libsas/Makefile35
-rw-r--r--drivers/scsi/libsas/sas_ata.c865
-rw-r--r--drivers/scsi/libsas/sas_discover.c592
-rw-r--r--drivers/scsi/libsas/sas_dump.c73
-rw-r--r--drivers/scsi/libsas/sas_dump.h30
-rw-r--r--drivers/scsi/libsas/sas_event.c165
-rw-r--r--drivers/scsi/libsas/sas_expander.c2186
-rw-r--r--drivers/scsi/libsas/sas_host_smp.c383
-rw-r--r--drivers/scsi/libsas/sas_init.c601
-rw-r--r--drivers/scsi/libsas/sas_internal.h200
-rw-r--r--drivers/scsi/libsas/sas_phy.c181
-rw-r--r--drivers/scsi/libsas/sas_port.c355
-rw-r--r--drivers/scsi/libsas/sas_scsi_host.c1013
-rw-r--r--drivers/scsi/libsas/sas_task.c37
-rw-r--r--drivers/scsi/lpfc/Makefile33
-rw-r--r--drivers/scsi/lpfc/lpfc.h1063
-rw-r--r--drivers/scsi/lpfc/lpfc_attr.c5885
-rw-r--r--drivers/scsi/lpfc/lpfc_bsg.c5410
-rw-r--r--drivers/scsi/lpfc/lpfc_bsg.h289
-rw-r--r--drivers/scsi/lpfc/lpfc_compat.h96
-rw-r--r--drivers/scsi/lpfc/lpfc_crtn.h500
-rw-r--r--drivers/scsi/lpfc/lpfc_ct.c2278
-rw-r--r--drivers/scsi/lpfc/lpfc_debugfs.c4698
-rw-r--r--drivers/scsi/lpfc/lpfc_debugfs.h671
-rw-r--r--drivers/scsi/lpfc/lpfc_disc.h272
-rw-r--r--drivers/scsi/lpfc/lpfc_els.c8286
-rw-r--r--drivers/scsi/lpfc/lpfc_hbadisc.c6423
-rw-r--r--drivers/scsi/lpfc/lpfc_hw.h3834
-rw-r--r--drivers/scsi/lpfc/lpfc_hw4.h3694
-rw-r--r--drivers/scsi/lpfc/lpfc_init.c11477
-rw-r--r--drivers/scsi/lpfc/lpfc_logmsg.h59
-rw-r--r--drivers/scsi/lpfc/lpfc_mbox.c2368
-rw-r--r--drivers/scsi/lpfc/lpfc_mem.c588
-rw-r--r--drivers/scsi/lpfc/lpfc_nl.h179
-rw-r--r--drivers/scsi/lpfc/lpfc_nportdisc.c2577
-rw-r--r--drivers/scsi/lpfc/lpfc_scsi.c5936
-rw-r--r--drivers/scsi/lpfc/lpfc_scsi.h186
-rw-r--r--drivers/scsi/lpfc/lpfc_sli.c17060
-rw-r--r--drivers/scsi/lpfc/lpfc_sli.h331
-rw-r--r--drivers/scsi/lpfc/lpfc_sli4.h739
-rw-r--r--drivers/scsi/lpfc/lpfc_version.h33
-rw-r--r--drivers/scsi/lpfc/lpfc_vport.c903
-rw-r--r--drivers/scsi/lpfc/lpfc_vport.h120
-rw-r--r--drivers/scsi/mac53c94.c572
-rw-r--r--drivers/scsi/mac53c94.h214
-rw-r--r--drivers/scsi/mac_esp.c640
-rw-r--r--drivers/scsi/mac_scsi.c492
-rw-r--r--drivers/scsi/megaraid.c4738
-rw-r--r--drivers/scsi/megaraid.h1010
-rw-r--r--drivers/scsi/megaraid/Kconfig.megaraid85
-rw-r--r--drivers/scsi/megaraid/Makefile5
-rw-r--r--drivers/scsi/megaraid/mbox_defs.h790
-rw-r--r--drivers/scsi/megaraid/mega_common.h290
-rw-r--r--drivers/scsi/megaraid/megaraid_ioctl.h300
-rw-r--r--drivers/scsi/megaraid/megaraid_mbox.c4145
-rw-r--r--drivers/scsi/megaraid/megaraid_mbox.h238
-rw-r--r--drivers/scsi/megaraid/megaraid_mm.c1263
-rw-r--r--drivers/scsi/megaraid/megaraid_mm.h101
-rw-r--r--drivers/scsi/megaraid/megaraid_sas.h1990
-rw-r--r--drivers/scsi/megaraid/megaraid_sas_base.c6892
-rw-r--r--drivers/scsi/megaraid/megaraid_sas_fp.c1360
-rw-r--r--drivers/scsi/megaraid/megaraid_sas_fusion.c2993
-rw-r--r--drivers/scsi/megaraid/megaraid_sas_fusion.h850
-rw-r--r--drivers/scsi/mesh.c2074
-rw-r--r--drivers/scsi/mesh.h127
-rw-r--r--drivers/scsi/mpt2sas/Kconfig67
-rw-r--r--drivers/scsi/mpt2sas/Makefile7
-rw-r--r--drivers/scsi/mpt2sas/mpi/mpi2.h1170
-rw-r--r--drivers/scsi/mpt2sas/mpi/mpi2_cnfg.h3068
-rw-r--r--drivers/scsi/mpt2sas/mpi/mpi2_init.h461
-rw-r--r--drivers/scsi/mpt2sas/mpi/mpi2_ioc.h1708
-rw-r--r--drivers/scsi/mpt2sas/mpi/mpi2_raid.h366
-rw-r--r--drivers/scsi/mpt2sas/mpi/mpi2_sas.h288
-rw-r--r--drivers/scsi/mpt2sas/mpi/mpi2_tool.h481
-rw-r--r--drivers/scsi/mpt2sas/mpi/mpi2_type.h61
-rw-r--r--drivers/scsi/mpt2sas/mpt2sas_base.c4891
-rw-r--r--drivers/scsi/mpt2sas/mpt2sas_base.h1198
-rw-r--r--drivers/scsi/mpt2sas/mpt2sas_config.c1527
-rw-r--r--drivers/scsi/mpt2sas/mpt2sas_ctl.c3077
-rw-r--r--drivers/scsi/mpt2sas/mpt2sas_ctl.h419
-rw-r--r--drivers/scsi/mpt2sas/mpt2sas_debug.h182
-rw-r--r--drivers/scsi/mpt2sas/mpt2sas_scsih.c8592
-rw-r--r--drivers/scsi/mpt2sas/mpt2sas_transport.c2169
-rw-r--r--drivers/scsi/mpt3sas/Kconfig67
-rw-r--r--drivers/scsi/mpt3sas/Makefile8
-rw-r--r--drivers/scsi/mpt3sas/mpi/mpi2.h1172
-rw-r--r--drivers/scsi/mpt3sas/mpi/mpi2_cnfg.h3344
-rw-r--r--drivers/scsi/mpt3sas/mpi/mpi2_init.h562
-rw-r--r--drivers/scsi/mpt3sas/mpi/mpi2_ioc.h1727
-rw-r--r--drivers/scsi/mpt3sas/mpi/mpi2_raid.h354
-rw-r--r--drivers/scsi/mpt3sas/mpi/mpi2_sas.h297
-rw-r--r--drivers/scsi/mpt3sas/mpi/mpi2_tool.h480
-rw-r--r--drivers/scsi/mpt3sas/mpi/mpi2_type.h56
-rw-r--r--drivers/scsi/mpt3sas/mpt3sas_base.c5033
-rw-r--r--drivers/scsi/mpt3sas/mpt3sas_base.h1180
-rw-r--r--drivers/scsi/mpt3sas/mpt3sas_config.c1686
-rw-r--r--drivers/scsi/mpt3sas/mpt3sas_ctl.c3283
-rw-r--r--drivers/scsi/mpt3sas/mpt3sas_ctl.h419
-rw-r--r--drivers/scsi/mpt3sas/mpt3sas_debug.h220
-rw-r--r--drivers/scsi/mpt3sas/mpt3sas_scsih.c8209
-rw-r--r--drivers/scsi/mpt3sas/mpt3sas_transport.c2130
-rw-r--r--drivers/scsi/mpt3sas/mpt3sas_trigger_diag.c434
-rw-r--r--drivers/scsi/mpt3sas/mpt3sas_trigger_diag.h194
-rw-r--r--drivers/scsi/mvme147.c166
-rw-r--r--drivers/scsi/mvme147.h24
-rw-r--r--drivers/scsi/mvme16x_scsi.c159
-rw-r--r--drivers/scsi/mvsas/Kconfig50
-rw-r--r--drivers/scsi/mvsas/Makefile31
-rw-r--r--drivers/scsi/mvsas/mv_64xx.c828
-rw-r--r--drivers/scsi/mvsas/mv_64xx.h152
-rw-r--r--drivers/scsi/mvsas/mv_94xx.c1061
-rw-r--r--drivers/scsi/mvsas/mv_94xx.h278
-rw-r--r--drivers/scsi/mvsas/mv_chips.h270
-rw-r--r--drivers/scsi/mvsas/mv_defs.h510
-rw-r--r--drivers/scsi/mvsas/mv_init.c855
-rw-r--r--drivers/scsi/mvsas/mv_sas.c2105
-rw-r--r--drivers/scsi/mvsas/mv_sas.h480
-rw-r--r--drivers/scsi/mvumi.c2751
-rw-r--r--drivers/scsi/mvumi.h573
-rw-r--r--drivers/scsi/ncr53c8xx.c8626
-rw-r--r--drivers/scsi/ncr53c8xx.h1325
-rw-r--r--drivers/scsi/nsp32.c3431
-rw-r--r--drivers/scsi/nsp32.h617
-rw-r--r--drivers/scsi/nsp32_debug.c263
-rw-r--r--drivers/scsi/nsp32_io.h259
-rw-r--r--drivers/scsi/osd/Kbuild20
-rw-r--r--drivers/scsi/osd/Kconfig49
-rw-r--r--drivers/scsi/osd/osd_debug.h30
-rw-r--r--drivers/scsi/osd/osd_initiator.c2071
-rw-r--r--drivers/scsi/osd/osd_uld.c594
-rw-r--r--drivers/scsi/osst.c6096
-rw-r--r--drivers/scsi/osst.h650
-rw-r--r--drivers/scsi/osst_detect.h6
-rw-r--r--drivers/scsi/osst_options.h106
-rw-r--r--drivers/scsi/pas16.c594
-rw-r--r--drivers/scsi/pas16.h154
-rw-r--r--drivers/scsi/pcmcia/Kconfig83
-rw-r--r--drivers/scsi/pcmcia/Makefile13
-rw-r--r--drivers/scsi/pcmcia/aha152x_core.c3
-rw-r--r--drivers/scsi/pcmcia/aha152x_stub.c235
-rw-r--r--drivers/scsi/pcmcia/fdomain_core.c2
-rw-r--r--drivers/scsi/pcmcia/fdomain_stub.c209
-rw-r--r--drivers/scsi/pcmcia/nsp_cs.c1761
-rw-r--r--drivers/scsi/pcmcia/nsp_cs.h392
-rw-r--r--drivers/scsi/pcmcia/nsp_debug.c215
-rw-r--r--drivers/scsi/pcmcia/nsp_io.h274
-rw-r--r--drivers/scsi/pcmcia/nsp_message.c78
-rw-r--r--drivers/scsi/pcmcia/qlogic_stub.c318
-rw-r--r--drivers/scsi/pcmcia/sym53c500_cs.c898
-rw-r--r--drivers/scsi/pm8001/Makefile13
-rw-r--r--drivers/scsi/pm8001/pm8001_chips.h89
-rw-r--r--drivers/scsi/pm8001/pm8001_ctl.c753
-rw-r--r--drivers/scsi/pm8001/pm8001_ctl.h63
-rw-r--r--drivers/scsi/pm8001/pm8001_defs.h131
-rw-r--r--drivers/scsi/pm8001/pm8001_hwi.c5125
-rw-r--r--drivers/scsi/pm8001/pm8001_hwi.h1038
-rw-r--r--drivers/scsi/pm8001/pm8001_init.c1226
-rw-r--r--drivers/scsi/pm8001/pm8001_sas.c1258
-rw-r--r--drivers/scsi/pm8001/pm8001_sas.h723
-rw-r--r--drivers/scsi/pm8001/pm80xx_hwi.c4551
-rw-r--r--drivers/scsi/pm8001/pm80xx_hwi.h1532
-rw-r--r--drivers/scsi/pmcraid.c6061
-rw-r--r--drivers/scsi/pmcraid.h1095
-rw-r--r--drivers/scsi/ppa.c1132
-rw-r--r--drivers/scsi/ppa.h150
-rw-r--r--drivers/scsi/ps3rom.c457
-rw-r--r--drivers/scsi/qla1280.c4492
-rw-r--r--drivers/scsi/qla1280.h1081
-rw-r--r--drivers/scsi/qla2xxx/Kconfig17
-rw-r--r--drivers/scsi/qla2xxx/Makefile6
-rw-r--r--drivers/scsi/qla2xxx/qla_attr.c2323
-rw-r--r--drivers/scsi/qla2xxx/qla_bsg.c2279
-rw-r--r--drivers/scsi/qla2xxx/qla_bsg.h235
-rw-r--r--drivers/scsi/qla2xxx/qla_dbg.c2696
-rw-r--r--drivers/scsi/qla2xxx/qla_dbg.h358
-rw-r--r--drivers/scsi/qla2xxx/qla_def.h3713
-rw-r--r--drivers/scsi/qla2xxx/qla_devtbl.h99
-rw-r--r--drivers/scsi/qla2xxx/qla_dfs.c182
-rw-r--r--drivers/scsi/qla2xxx/qla_fw.h1954
-rw-r--r--drivers/scsi/qla2xxx/qla_gbl.h769
-rw-r--r--drivers/scsi/qla2xxx/qla_gs.c2694
-rw-r--r--drivers/scsi/qla2xxx/qla_init.c6473
-rw-r--r--drivers/scsi/qla2xxx/qla_inline.h289
-rw-r--r--drivers/scsi/qla2xxx/qla_iocb.c2857
-rw-r--r--drivers/scsi/qla2xxx/qla_isr.c3210
-rw-r--r--drivers/scsi/qla2xxx/qla_mbx.c5471
-rw-r--r--drivers/scsi/qla2xxx/qla_mid.c859
-rw-r--r--drivers/scsi/qla2xxx/qla_mr.c3461
-rw-r--r--drivers/scsi/qla2xxx/qla_mr.h527
-rw-r--r--drivers/scsi/qla2xxx/qla_nx.c4518
-rw-r--r--drivers/scsi/qla2xxx/qla_nx.h1202
-rw-r--r--drivers/scsi/qla2xxx/qla_nx2.c4079
-rw-r--r--drivers/scsi/qla2xxx/qla_nx2.h599
-rw-r--r--drivers/scsi/qla2xxx/qla_os.c5831
-rw-r--r--drivers/scsi/qla2xxx/qla_settings.h12
-rw-r--r--drivers/scsi/qla2xxx/qla_sup.c3219
-rw-r--r--drivers/scsi/qla2xxx/qla_target.c5965
-rw-r--r--drivers/scsi/qla2xxx/qla_target.h1114
-rw-r--r--drivers/scsi/qla2xxx/qla_tmpl.c959
-rw-r--r--drivers/scsi/qla2xxx/qla_tmpl.h224
-rw-r--r--drivers/scsi/qla2xxx/qla_version.h15
-rw-r--r--drivers/scsi/qla2xxx/tcm_qla2xxx.c2168
-rw-r--r--drivers/scsi/qla2xxx/tcm_qla2xxx.h84
-rw-r--r--drivers/scsi/qla4xxx/Kconfig8
-rw-r--r--drivers/scsi/qla4xxx/Makefile5
-rw-r--r--drivers/scsi/qla4xxx/ql4_83xx.c1594
-rw-r--r--drivers/scsi/qla4xxx/ql4_83xx.h371
-rw-r--r--drivers/scsi/qla4xxx/ql4_attr.c351
-rw-r--r--drivers/scsi/qla4xxx/ql4_bsg.c873
-rw-r--r--drivers/scsi/qla4xxx/ql4_bsg.h32
-rw-r--r--drivers/scsi/qla4xxx/ql4_dbg.c162
-rw-r--r--drivers/scsi/qla4xxx/ql4_dbg.h62
-rw-r--r--drivers/scsi/qla4xxx/ql4_def.h1090
-rw-r--r--drivers/scsi/qla4xxx/ql4_fw.h1443
-rw-r--r--drivers/scsi/qla4xxx/ql4_glbl.h293
-rw-r--r--drivers/scsi/qla4xxx/ql4_init.c1267
-rw-r--r--drivers/scsi/qla4xxx/ql4_inline.h96
-rw-r--r--drivers/scsi/qla4xxx/ql4_iocb.c542
-rw-r--r--drivers/scsi/qla4xxx/ql4_isr.c1627
-rw-r--r--drivers/scsi/qla4xxx/ql4_mbx.c2464
-rw-r--r--drivers/scsi/qla4xxx/ql4_nvram.c256
-rw-r--r--drivers/scsi/qla4xxx/ql4_nvram.h254
-rw-r--r--drivers/scsi/qla4xxx/ql4_nx.c4275
-rw-r--r--drivers/scsi/qla4xxx/ql4_nx.h1032
-rw-r--r--drivers/scsi/qla4xxx/ql4_os.c9919
-rw-r--r--drivers/scsi/qla4xxx/ql4_version.h8
-rw-r--r--drivers/scsi/qlogicfas.c226
-rw-r--r--drivers/scsi/qlogicfas408.c617
-rw-r--r--drivers/scsi/qlogicfas408.h118
-rw-r--r--drivers/scsi/qlogicpti.c1481
-rw-r--r--drivers/scsi/qlogicpti.h507
-rw-r--r--drivers/scsi/raid_class.c317
-rw-r--r--drivers/scsi/script_asm.pl984
-rw-r--r--drivers/scsi/scsi.c1272
-rw-r--r--drivers/scsi/scsi.h48
-rw-r--r--drivers/scsi/scsi_debug.c5462
-rw-r--r--drivers/scsi/scsi_devinfo.c902
-rw-r--r--drivers/scsi/scsi_error.c2619
-rw-r--r--drivers/scsi/scsi_ioctl.c289
-rw-r--r--drivers/scsi/scsi_lib.c3147
-rw-r--r--drivers/scsi/scsi_lib_dma.c51
-rw-r--r--drivers/scsi/scsi_logging.c485
-rw-r--r--drivers/scsi/scsi_logging.h84
-rw-r--r--drivers/scsi/scsi_module.c73
-rw-r--r--drivers/scsi/scsi_netlink.c158
-rw-r--r--drivers/scsi/scsi_pm.c344
-rw-r--r--drivers/scsi/scsi_priv.h183
-rw-r--r--drivers/scsi/scsi_proc.c481
-rw-r--r--drivers/scsi/scsi_sas_internal.h42
-rw-r--r--drivers/scsi/scsi_scan.c1985
-rw-r--r--drivers/scsi/scsi_sysctl.c51
-rw-r--r--drivers/scsi/scsi_sysfs.c1275
-rw-r--r--drivers/scsi/scsi_trace.c288
-rw-r--r--drivers/scsi/scsi_transport_api.h6
-rw-r--r--drivers/scsi/scsi_transport_fc.c4152
-rw-r--r--drivers/scsi/scsi_transport_iscsi.c4603
-rw-r--r--drivers/scsi/scsi_transport_sas.c1971
-rw-r--r--drivers/scsi/scsi_transport_spi.c1632
-rw-r--r--drivers/scsi/scsi_transport_srp.c937
-rw-r--r--drivers/scsi/scsi_typedefs.h2
-rw-r--r--drivers/scsi/scsicam.c259
-rw-r--r--drivers/scsi/sd.c3302
-rw-r--r--drivers/scsi/sd.h261
-rw-r--r--drivers/scsi/sd_dif.c205
-rw-r--r--drivers/scsi/ses.c840
-rw-r--r--drivers/scsi/sg.c2699
-rw-r--r--drivers/scsi/sgiwd93.c337
-rw-r--r--drivers/scsi/sim710.c256
-rw-r--r--drivers/scsi/sni_53c710.c154
-rw-r--r--drivers/scsi/sr.c1018
-rw-r--r--drivers/scsi/sr.h81
-rw-r--r--drivers/scsi/sr_ioctl.c593
-rw-r--r--drivers/scsi/sr_vendor.c329
-rw-r--r--drivers/scsi/st.c4610
-rw-r--r--drivers/scsi/st.h227
-rw-r--r--drivers/scsi/st_options.h104
-rw-r--r--drivers/scsi/stex.c1812
-rw-r--r--drivers/scsi/storvsc_drv.c1935
-rw-r--r--drivers/scsi/sun3_scsi.c685
-rw-r--r--drivers/scsi/sun3_scsi.h102
-rw-r--r--drivers/scsi/sun3_scsi_vme.c3
-rw-r--r--drivers/scsi/sun3x_esp.c318
-rw-r--r--drivers/scsi/sun_esp.c658
-rw-r--r--drivers/scsi/sym53c416.c845
-rw-r--r--drivers/scsi/sym53c416.h33
-rw-r--r--drivers/scsi/sym53c8xx_2/Makefile4
-rw-r--r--drivers/scsi/sym53c8xx_2/sym53c8xx.h215
-rw-r--r--drivers/scsi/sym53c8xx_2/sym_defs.h792
-rw-r--r--drivers/scsi/sym53c8xx_2/sym_fw.c554
-rw-r--r--drivers/scsi/sym53c8xx_2/sym_fw.h205
-rw-r--r--drivers/scsi/sym53c8xx_2/sym_fw1.h1790
-rw-r--r--drivers/scsi/sym53c8xx_2/sym_fw2.h1875
-rw-r--r--drivers/scsi/sym53c8xx_2/sym_glue.c2080
-rw-r--r--drivers/scsi/sym53c8xx_2/sym_glue.h270
-rw-r--r--drivers/scsi/sym53c8xx_2/sym_hipd.c5842
-rw-r--r--drivers/scsi/sym53c8xx_2/sym_hipd.h1226
-rw-r--r--drivers/scsi/sym53c8xx_2/sym_malloc.c378
-rw-r--r--drivers/scsi/sym53c8xx_2/sym_misc.h190
-rw-r--r--drivers/scsi/sym53c8xx_2/sym_nvram.c779
-rw-r--r--drivers/scsi/sym53c8xx_2/sym_nvram.h214
-rw-r--r--drivers/scsi/t128.c412
-rw-r--r--drivers/scsi/t128.h125
-rw-r--r--drivers/scsi/u14-34f.c1971
-rw-r--r--drivers/scsi/ufs/Kconfig85
-rw-r--r--drivers/scsi/ufs/Makefile5
-rw-r--r--drivers/scsi/ufs/ufs-qcom.c1016
-rw-r--r--drivers/scsi/ufs/ufs-qcom.h196
-rw-r--r--drivers/scsi/ufs/ufs.h491
-rw-r--r--drivers/scsi/ufs/ufshcd-pci.c192
-rw-r--r--drivers/scsi/ufs/ufshcd-pltfrm.c404
-rw-r--r--drivers/scsi/ufs/ufshcd.c5583
-rw-r--r--drivers/scsi/ufs/ufshcd.h605
-rw-r--r--drivers/scsi/ufs/ufshci.h393
-rw-r--r--drivers/scsi/ufs/unipro.h207
-rw-r--r--drivers/scsi/ultrastor.c1210
-rw-r--r--drivers/scsi/ultrastor.h80
-rw-r--r--drivers/scsi/virtio_scsi.c1157
-rw-r--r--drivers/scsi/vmw_pvscsi.c1597
-rw-r--r--drivers/scsi/vmw_pvscsi.h468
-rw-r--r--drivers/scsi/wd33c93.c2207
-rw-r--r--drivers/scsi/wd33c93.h352
-rw-r--r--drivers/scsi/wd7000.c1657
-rw-r--r--drivers/scsi/wd719x.c996
-rw-r--r--drivers/scsi/wd719x.h249
-rw-r--r--drivers/scsi/xen-scsifront.c1171
-rw-r--r--drivers/scsi/zalon.c205
-rw-r--r--drivers/scsi/zorro7xx.c184
757 files changed, 895230 insertions, 0 deletions
diff --git a/drivers/scsi/.gitignore b/drivers/scsi/.gitignore
new file mode 100644
index 000000000..c89ae9a04
--- /dev/null
+++ b/drivers/scsi/.gitignore
@@ -0,0 +1 @@
+53c700_d.h
diff --git a/drivers/scsi/3w-9xxx.c b/drivers/scsi/3w-9xxx.c
new file mode 100644
index 000000000..add419d6f
--- /dev/null
+++ b/drivers/scsi/3w-9xxx.c
@@ -0,0 +1,2308 @@
+/*
+ 3w-9xxx.c -- 3ware 9000 Storage Controller device driver for Linux.
+
+ Written By: Adam Radford <linuxraid@lsi.com>
+ Modifications By: Tom Couch <linuxraid@lsi.com>
+
+ Copyright (C) 2004-2009 Applied Micro Circuits Corporation.
+ Copyright (C) 2010 LSI Corporation.
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; version 2 of the License.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ NO WARRANTY
+ THE PROGRAM IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OR
+ CONDITIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED INCLUDING, WITHOUT
+ LIMITATION, ANY WARRANTIES OR CONDITIONS OF TITLE, NON-INFRINGEMENT,
+ MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE. Each Recipient is
+ solely responsible for determining the appropriateness of using and
+ distributing the Program and assumes all risks associated with its
+ exercise of rights under this Agreement, including but not limited to
+ the risks and costs of program errors, damage to or loss of data,
+ programs or equipment, and unavailability or interruption of operations.
+
+ DISCLAIMER OF LIABILITY
+ NEITHER RECIPIENT NOR ANY CONTRIBUTORS SHALL HAVE ANY LIABILITY FOR ANY
+ DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ DAMAGES (INCLUDING WITHOUT LIMITATION LOST PROFITS), HOWEVER CAUSED AND
+ ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR
+ TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
+ USE OR DISTRIBUTION OF THE PROGRAM OR THE EXERCISE OF ANY RIGHTS GRANTED
+ HEREUNDER, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGES
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+
+ Bugs/Comments/Suggestions should be mailed to:
+ linuxraid@lsi.com
+
+ For more information, goto:
+ http://www.lsi.com
+
+ Note: This version of the driver does not contain a bundled firmware
+ image.
+
+ History
+ -------
+ 2.26.02.000 - Driver cleanup for kernel submission.
+ 2.26.02.001 - Replace schedule_timeout() calls with msleep().
+ 2.26.02.002 - Add support for PAE mode.
+ Add lun support.
+ Fix twa_remove() to free irq handler/unregister_chrdev()
+ before shutting down card.
+ Change to new 'change_queue_depth' api.
+ Fix 'handled=1' ISR usage, remove bogus IRQ check.
+ Remove un-needed eh_abort handler.
+ Add support for embedded firmware error strings.
+ 2.26.02.003 - Correctly handle single sgl's with use_sg=1.
+ 2.26.02.004 - Add support for 9550SX controllers.
+ 2.26.02.005 - Fix use_sg == 0 mapping on systems with 4GB or higher.
+ 2.26.02.006 - Fix 9550SX pchip reset timeout.
+ Add big endian support.
+ 2.26.02.007 - Disable local interrupts during kmap/unmap_atomic().
+ 2.26.02.008 - Free irq handler in __twa_shutdown().
+ Serialize reset code.
+ Add support for 9650SE controllers.
+ 2.26.02.009 - Fix dma mask setting to fallback to 32-bit if 64-bit fails.
+ 2.26.02.010 - Add support for 9690SA controllers.
+ 2.26.02.011 - Increase max AENs drained to 256.
+ Add MSI support and "use_msi" module parameter.
+ Fix bug in twa_get_param() on 4GB+.
+ Use pci_resource_len() for ioremap().
+ 2.26.02.012 - Add power management support.
+ 2.26.02.013 - Fix bug in twa_load_sgl().
+ 2.26.02.014 - Force 60 second timeout default.
+*/
+
+#include <linux/module.h>
+#include <linux/reboot.h>
+#include <linux/spinlock.h>
+#include <linux/interrupt.h>
+#include <linux/moduleparam.h>
+#include <linux/errno.h>
+#include <linux/types.h>
+#include <linux/delay.h>
+#include <linux/pci.h>
+#include <linux/time.h>
+#include <linux/mutex.h>
+#include <linux/slab.h>
+#include <asm/io.h>
+#include <asm/irq.h>
+#include <asm/uaccess.h>
+#include <scsi/scsi.h>
+#include <scsi/scsi_host.h>
+#include <scsi/scsi_tcq.h>
+#include <scsi/scsi_cmnd.h>
+#include "3w-9xxx.h"
+
+/* Globals */
+#define TW_DRIVER_VERSION "2.26.02.014"
+static DEFINE_MUTEX(twa_chrdev_mutex);
+static TW_Device_Extension *twa_device_extension_list[TW_MAX_SLOT];
+static unsigned int twa_device_extension_count;
+static int twa_major = -1;
+extern struct timezone sys_tz;
+
+/* Module parameters */
+MODULE_AUTHOR ("LSI");
+MODULE_DESCRIPTION ("3ware 9000 Storage Controller Linux Driver");
+MODULE_LICENSE("GPL");
+MODULE_VERSION(TW_DRIVER_VERSION);
+
+static int use_msi = 0;
+module_param(use_msi, int, S_IRUGO);
+MODULE_PARM_DESC(use_msi, "Use Message Signaled Interrupts. Default: 0");
+
+/* Function prototypes */
+static void twa_aen_queue_event(TW_Device_Extension *tw_dev, TW_Command_Apache_Header *header);
+static int twa_aen_read_queue(TW_Device_Extension *tw_dev, int request_id);
+static char *twa_aen_severity_lookup(unsigned char severity_code);
+static void twa_aen_sync_time(TW_Device_Extension *tw_dev, int request_id);
+static long twa_chrdev_ioctl(struct file *file, unsigned int cmd, unsigned long arg);
+static int twa_chrdev_open(struct inode *inode, struct file *file);
+static int twa_fill_sense(TW_Device_Extension *tw_dev, int request_id, int copy_sense, int print_host);
+static void twa_free_request_id(TW_Device_Extension *tw_dev,int request_id);
+static void twa_get_request_id(TW_Device_Extension *tw_dev, int *request_id);
+static int twa_initconnection(TW_Device_Extension *tw_dev, int message_credits,
+ u32 set_features, unsigned short current_fw_srl,
+ unsigned short current_fw_arch_id,
+ unsigned short current_fw_branch,
+ unsigned short current_fw_build,
+ unsigned short *fw_on_ctlr_srl,
+ unsigned short *fw_on_ctlr_arch_id,
+ unsigned short *fw_on_ctlr_branch,
+ unsigned short *fw_on_ctlr_build,
+ u32 *init_connect_result);
+static void twa_load_sgl(TW_Device_Extension *tw_dev, TW_Command_Full *full_command_packet, int request_id, dma_addr_t dma_handle, int length);
+static int twa_poll_response(TW_Device_Extension *tw_dev, int request_id, int seconds);
+static int twa_poll_status_gone(TW_Device_Extension *tw_dev, u32 flag, int seconds);
+static int twa_post_command_packet(TW_Device_Extension *tw_dev, int request_id, char internal);
+static int twa_reset_device_extension(TW_Device_Extension *tw_dev);
+static int twa_reset_sequence(TW_Device_Extension *tw_dev, int soft_reset);
+static int twa_scsiop_execute_scsi(TW_Device_Extension *tw_dev, int request_id, char *cdb, int use_sg, TW_SG_Entry *sglistarg);
+static void twa_scsiop_execute_scsi_complete(TW_Device_Extension *tw_dev, int request_id);
+static char *twa_string_lookup(twa_message_type *table, unsigned int aen_code);
+
+/* Functions */
+
+/* Show some statistics about the card */
+static ssize_t twa_show_stats(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct Scsi_Host *host = class_to_shost(dev);
+ TW_Device_Extension *tw_dev = (TW_Device_Extension *)host->hostdata;
+ unsigned long flags = 0;
+ ssize_t len;
+
+ spin_lock_irqsave(tw_dev->host->host_lock, flags);
+ len = snprintf(buf, PAGE_SIZE, "3w-9xxx Driver version: %s\n"
+ "Current commands posted: %4d\n"
+ "Max commands posted: %4d\n"
+ "Current pending commands: %4d\n"
+ "Max pending commands: %4d\n"
+ "Last sgl length: %4d\n"
+ "Max sgl length: %4d\n"
+ "Last sector count: %4d\n"
+ "Max sector count: %4d\n"
+ "SCSI Host Resets: %4d\n"
+ "AEN's: %4d\n",
+ TW_DRIVER_VERSION,
+ tw_dev->posted_request_count,
+ tw_dev->max_posted_request_count,
+ tw_dev->pending_request_count,
+ tw_dev->max_pending_request_count,
+ tw_dev->sgl_entries,
+ tw_dev->max_sgl_entries,
+ tw_dev->sector_count,
+ tw_dev->max_sector_count,
+ tw_dev->num_resets,
+ tw_dev->aen_count);
+ spin_unlock_irqrestore(tw_dev->host->host_lock, flags);
+ return len;
+} /* End twa_show_stats() */
+
+/* Create sysfs 'stats' entry */
+static struct device_attribute twa_host_stats_attr = {
+ .attr = {
+ .name = "stats",
+ .mode = S_IRUGO,
+ },
+ .show = twa_show_stats
+};
+
+/* Host attributes initializer */
+static struct device_attribute *twa_host_attrs[] = {
+ &twa_host_stats_attr,
+ NULL,
+};
+
+/* File operations struct for character device */
+static const struct file_operations twa_fops = {
+ .owner = THIS_MODULE,
+ .unlocked_ioctl = twa_chrdev_ioctl,
+ .open = twa_chrdev_open,
+ .release = NULL,
+ .llseek = noop_llseek,
+};
+
+/* This function will complete an aen request from the isr */
+static int twa_aen_complete(TW_Device_Extension *tw_dev, int request_id)
+{
+ TW_Command_Full *full_command_packet;
+ TW_Command *command_packet;
+ TW_Command_Apache_Header *header;
+ unsigned short aen;
+ int retval = 1;
+
+ header = (TW_Command_Apache_Header *)tw_dev->generic_buffer_virt[request_id];
+ tw_dev->posted_request_count--;
+ aen = le16_to_cpu(header->status_block.error);
+ full_command_packet = tw_dev->command_packet_virt[request_id];
+ command_packet = &full_command_packet->command.oldcommand;
+
+ /* First check for internal completion of set param for time sync */
+ if (TW_OP_OUT(command_packet->opcode__sgloffset) == TW_OP_SET_PARAM) {
+ /* Keep reading the queue in case there are more aen's */
+ if (twa_aen_read_queue(tw_dev, request_id))
+ goto out2;
+ else {
+ retval = 0;
+ goto out;
+ }
+ }
+
+ switch (aen) {
+ case TW_AEN_QUEUE_EMPTY:
+ /* Quit reading the queue if this is the last one */
+ break;
+ case TW_AEN_SYNC_TIME_WITH_HOST:
+ twa_aen_sync_time(tw_dev, request_id);
+ retval = 0;
+ goto out;
+ default:
+ twa_aen_queue_event(tw_dev, header);
+
+ /* If there are more aen's, keep reading the queue */
+ if (twa_aen_read_queue(tw_dev, request_id))
+ goto out2;
+ else {
+ retval = 0;
+ goto out;
+ }
+ }
+ retval = 0;
+out2:
+ tw_dev->state[request_id] = TW_S_COMPLETED;
+ twa_free_request_id(tw_dev, request_id);
+ clear_bit(TW_IN_ATTENTION_LOOP, &tw_dev->flags);
+out:
+ return retval;
+} /* End twa_aen_complete() */
+
+/* This function will drain aen queue */
+static int twa_aen_drain_queue(TW_Device_Extension *tw_dev, int no_check_reset)
+{
+ int request_id = 0;
+ char cdb[TW_MAX_CDB_LEN];
+ TW_SG_Entry sglist[1];
+ int finished = 0, count = 0;
+ TW_Command_Full *full_command_packet;
+ TW_Command_Apache_Header *header;
+ unsigned short aen;
+ int first_reset = 0, queue = 0, retval = 1;
+
+ if (no_check_reset)
+ first_reset = 0;
+ else
+ first_reset = 1;
+
+ full_command_packet = tw_dev->command_packet_virt[request_id];
+ memset(full_command_packet, 0, sizeof(TW_Command_Full));
+
+ /* Initialize cdb */
+ memset(&cdb, 0, TW_MAX_CDB_LEN);
+ cdb[0] = REQUEST_SENSE; /* opcode */
+ cdb[4] = TW_ALLOCATION_LENGTH; /* allocation length */
+
+ /* Initialize sglist */
+ memset(&sglist, 0, sizeof(TW_SG_Entry));
+ sglist[0].length = TW_SECTOR_SIZE;
+ sglist[0].address = tw_dev->generic_buffer_phys[request_id];
+
+ if (sglist[0].address & TW_ALIGNMENT_9000_SGL) {
+ TW_PRINTK(tw_dev->host, TW_DRIVER, 0x1, "Found unaligned address during AEN drain");
+ goto out;
+ }
+
+ /* Mark internal command */
+ tw_dev->srb[request_id] = NULL;
+
+ do {
+ /* Send command to the board */
+ if (twa_scsiop_execute_scsi(tw_dev, request_id, cdb, 1, sglist)) {
+ TW_PRINTK(tw_dev->host, TW_DRIVER, 0x2, "Error posting request sense");
+ goto out;
+ }
+
+ /* Now poll for completion */
+ if (twa_poll_response(tw_dev, request_id, 30)) {
+ TW_PRINTK(tw_dev->host, TW_DRIVER, 0x3, "No valid response while draining AEN queue");
+ tw_dev->posted_request_count--;
+ goto out;
+ }
+
+ tw_dev->posted_request_count--;
+ header = (TW_Command_Apache_Header *)tw_dev->generic_buffer_virt[request_id];
+ aen = le16_to_cpu(header->status_block.error);
+ queue = 0;
+ count++;
+
+ switch (aen) {
+ case TW_AEN_QUEUE_EMPTY:
+ if (first_reset != 1)
+ goto out;
+ else
+ finished = 1;
+ break;
+ case TW_AEN_SOFT_RESET:
+ if (first_reset == 0)
+ first_reset = 1;
+ else
+ queue = 1;
+ break;
+ case TW_AEN_SYNC_TIME_WITH_HOST:
+ break;
+ default:
+ queue = 1;
+ }
+
+ /* Now queue an event info */
+ if (queue)
+ twa_aen_queue_event(tw_dev, header);
+ } while ((finished == 0) && (count < TW_MAX_AEN_DRAIN));
+
+ if (count == TW_MAX_AEN_DRAIN)
+ goto out;
+
+ retval = 0;
+out:
+ tw_dev->state[request_id] = TW_S_INITIAL;
+ return retval;
+} /* End twa_aen_drain_queue() */
+
+/* This function will queue an event */
+static void twa_aen_queue_event(TW_Device_Extension *tw_dev, TW_Command_Apache_Header *header)
+{
+ u32 local_time;
+ struct timeval time;
+ TW_Event *event;
+ unsigned short aen;
+ char host[16];
+ char *error_str;
+
+ tw_dev->aen_count++;
+
+ /* Fill out event info */
+ event = tw_dev->event_queue[tw_dev->error_index];
+
+ /* Check for clobber */
+ host[0] = '\0';
+ if (tw_dev->host) {
+ sprintf(host, " scsi%d:", tw_dev->host->host_no);
+ if (event->retrieved == TW_AEN_NOT_RETRIEVED)
+ tw_dev->aen_clobber = 1;
+ }
+
+ aen = le16_to_cpu(header->status_block.error);
+ memset(event, 0, sizeof(TW_Event));
+
+ event->severity = TW_SEV_OUT(header->status_block.severity__reserved);
+ do_gettimeofday(&time);
+ local_time = (u32)(time.tv_sec - (sys_tz.tz_minuteswest * 60));
+ event->time_stamp_sec = local_time;
+ event->aen_code = aen;
+ event->retrieved = TW_AEN_NOT_RETRIEVED;
+ event->sequence_id = tw_dev->error_sequence_id;
+ tw_dev->error_sequence_id++;
+
+ /* Check for embedded error string */
+ error_str = &(header->err_specific_desc[strlen(header->err_specific_desc)+1]);
+
+ header->err_specific_desc[sizeof(header->err_specific_desc) - 1] = '\0';
+ event->parameter_len = strlen(header->err_specific_desc);
+ memcpy(event->parameter_data, header->err_specific_desc, event->parameter_len + (error_str[0] == '\0' ? 0 : (1 + strlen(error_str))));
+ if (event->severity != TW_AEN_SEVERITY_DEBUG)
+ printk(KERN_WARNING "3w-9xxx:%s AEN: %s (0x%02X:0x%04X): %s:%s.\n",
+ host,
+ twa_aen_severity_lookup(TW_SEV_OUT(header->status_block.severity__reserved)),
+ TW_MESSAGE_SOURCE_CONTROLLER_EVENT, aen,
+ error_str[0] == '\0' ? twa_string_lookup(twa_aen_table, aen) : error_str,
+ header->err_specific_desc);
+ else
+ tw_dev->aen_count--;
+
+ if ((tw_dev->error_index + 1) == TW_Q_LENGTH)
+ tw_dev->event_queue_wrapped = 1;
+ tw_dev->error_index = (tw_dev->error_index + 1 ) % TW_Q_LENGTH;
+} /* End twa_aen_queue_event() */
+
+/* This function will read the aen queue from the isr */
+static int twa_aen_read_queue(TW_Device_Extension *tw_dev, int request_id)
+{
+ char cdb[TW_MAX_CDB_LEN];
+ TW_SG_Entry sglist[1];
+ TW_Command_Full *full_command_packet;
+ int retval = 1;
+
+ full_command_packet = tw_dev->command_packet_virt[request_id];
+ memset(full_command_packet, 0, sizeof(TW_Command_Full));
+
+ /* Initialize cdb */
+ memset(&cdb, 0, TW_MAX_CDB_LEN);
+ cdb[0] = REQUEST_SENSE; /* opcode */
+ cdb[4] = TW_ALLOCATION_LENGTH; /* allocation length */
+
+ /* Initialize sglist */
+ memset(&sglist, 0, sizeof(TW_SG_Entry));
+ sglist[0].length = TW_SECTOR_SIZE;
+ sglist[0].address = tw_dev->generic_buffer_phys[request_id];
+
+ /* Mark internal command */
+ tw_dev->srb[request_id] = NULL;
+
+ /* Now post the command packet */
+ if (twa_scsiop_execute_scsi(tw_dev, request_id, cdb, 1, sglist)) {
+ TW_PRINTK(tw_dev->host, TW_DRIVER, 0x4, "Post failed while reading AEN queue");
+ goto out;
+ }
+ retval = 0;
+out:
+ return retval;
+} /* End twa_aen_read_queue() */
+
+/* This function will look up an AEN severity string */
+static char *twa_aen_severity_lookup(unsigned char severity_code)
+{
+ char *retval = NULL;
+
+ if ((severity_code < (unsigned char) TW_AEN_SEVERITY_ERROR) ||
+ (severity_code > (unsigned char) TW_AEN_SEVERITY_DEBUG))
+ goto out;
+
+ retval = twa_aen_severity_table[severity_code];
+out:
+ return retval;
+} /* End twa_aen_severity_lookup() */
+
+/* This function will sync firmware time with the host time */
+static void twa_aen_sync_time(TW_Device_Extension *tw_dev, int request_id)
+{
+ u32 schedulertime;
+ struct timeval utc;
+ TW_Command_Full *full_command_packet;
+ TW_Command *command_packet;
+ TW_Param_Apache *param;
+ u32 local_time;
+
+ /* Fill out the command packet */
+ full_command_packet = tw_dev->command_packet_virt[request_id];
+ memset(full_command_packet, 0, sizeof(TW_Command_Full));
+ command_packet = &full_command_packet->command.oldcommand;
+ command_packet->opcode__sgloffset = TW_OPSGL_IN(2, TW_OP_SET_PARAM);
+ command_packet->request_id = request_id;
+ command_packet->byte8_offset.param.sgl[0].address = TW_CPU_TO_SGL(tw_dev->generic_buffer_phys[request_id]);
+ command_packet->byte8_offset.param.sgl[0].length = cpu_to_le32(TW_SECTOR_SIZE);
+ command_packet->size = TW_COMMAND_SIZE;
+ command_packet->byte6_offset.parameter_count = cpu_to_le16(1);
+
+ /* Setup the param */
+ param = (TW_Param_Apache *)tw_dev->generic_buffer_virt[request_id];
+ memset(param, 0, TW_SECTOR_SIZE);
+ param->table_id = cpu_to_le16(TW_TIMEKEEP_TABLE | 0x8000); /* Controller time keep table */
+ param->parameter_id = cpu_to_le16(0x3); /* SchedulerTime */
+ param->parameter_size_bytes = cpu_to_le16(4);
+
+ /* Convert system time in UTC to local time seconds since last
+ Sunday 12:00AM */
+ do_gettimeofday(&utc);
+ local_time = (u32)(utc.tv_sec - (sys_tz.tz_minuteswest * 60));
+ schedulertime = local_time - (3 * 86400);
+ schedulertime = cpu_to_le32(schedulertime % 604800);
+
+ memcpy(param->data, &schedulertime, sizeof(u32));
+
+ /* Mark internal command */
+ tw_dev->srb[request_id] = NULL;
+
+ /* Now post the command */
+ twa_post_command_packet(tw_dev, request_id, 1);
+} /* End twa_aen_sync_time() */
+
+/* This function will allocate memory and check if it is correctly aligned */
+static int twa_allocate_memory(TW_Device_Extension *tw_dev, int size, int which)
+{
+ int i;
+ dma_addr_t dma_handle;
+ unsigned long *cpu_addr;
+ int retval = 1;
+
+ cpu_addr = pci_alloc_consistent(tw_dev->tw_pci_dev, size*TW_Q_LENGTH, &dma_handle);
+ if (!cpu_addr) {
+ TW_PRINTK(tw_dev->host, TW_DRIVER, 0x5, "Memory allocation failed");
+ goto out;
+ }
+
+ if ((unsigned long)cpu_addr % (TW_ALIGNMENT_9000)) {
+ TW_PRINTK(tw_dev->host, TW_DRIVER, 0x6, "Failed to allocate correctly aligned memory");
+ pci_free_consistent(tw_dev->tw_pci_dev, size*TW_Q_LENGTH, cpu_addr, dma_handle);
+ goto out;
+ }
+
+ memset(cpu_addr, 0, size*TW_Q_LENGTH);
+
+ for (i = 0; i < TW_Q_LENGTH; i++) {
+ switch(which) {
+ case 0:
+ tw_dev->command_packet_phys[i] = dma_handle+(i*size);
+ tw_dev->command_packet_virt[i] = (TW_Command_Full *)((unsigned char *)cpu_addr + (i*size));
+ break;
+ case 1:
+ tw_dev->generic_buffer_phys[i] = dma_handle+(i*size);
+ tw_dev->generic_buffer_virt[i] = (unsigned long *)((unsigned char *)cpu_addr + (i*size));
+ break;
+ }
+ }
+ retval = 0;
+out:
+ return retval;
+} /* End twa_allocate_memory() */
+
+/* This function will check the status register for unexpected bits */
+static int twa_check_bits(u32 status_reg_value)
+{
+ int retval = 1;
+
+ if ((status_reg_value & TW_STATUS_EXPECTED_BITS) != TW_STATUS_EXPECTED_BITS)
+ goto out;
+ if ((status_reg_value & TW_STATUS_UNEXPECTED_BITS) != 0)
+ goto out;
+
+ retval = 0;
+out:
+ return retval;
+} /* End twa_check_bits() */
+
+/* This function will check the srl and decide if we are compatible */
+static int twa_check_srl(TW_Device_Extension *tw_dev, int *flashed)
+{
+ int retval = 1;
+ unsigned short fw_on_ctlr_srl = 0, fw_on_ctlr_arch_id = 0;
+ unsigned short fw_on_ctlr_branch = 0, fw_on_ctlr_build = 0;
+ u32 init_connect_result = 0;
+
+ if (twa_initconnection(tw_dev, TW_INIT_MESSAGE_CREDITS,
+ TW_EXTENDED_INIT_CONNECT, TW_CURRENT_DRIVER_SRL,
+ TW_9000_ARCH_ID, TW_CURRENT_DRIVER_BRANCH,
+ TW_CURRENT_DRIVER_BUILD, &fw_on_ctlr_srl,
+ &fw_on_ctlr_arch_id, &fw_on_ctlr_branch,
+ &fw_on_ctlr_build, &init_connect_result)) {
+ TW_PRINTK(tw_dev->host, TW_DRIVER, 0x7, "Initconnection failed while checking SRL");
+ goto out;
+ }
+
+ tw_dev->tw_compat_info.working_srl = fw_on_ctlr_srl;
+ tw_dev->tw_compat_info.working_branch = fw_on_ctlr_branch;
+ tw_dev->tw_compat_info.working_build = fw_on_ctlr_build;
+
+ /* Try base mode compatibility */
+ if (!(init_connect_result & TW_CTLR_FW_COMPATIBLE)) {
+ if (twa_initconnection(tw_dev, TW_INIT_MESSAGE_CREDITS,
+ TW_EXTENDED_INIT_CONNECT,
+ TW_BASE_FW_SRL, TW_9000_ARCH_ID,
+ TW_BASE_FW_BRANCH, TW_BASE_FW_BUILD,
+ &fw_on_ctlr_srl, &fw_on_ctlr_arch_id,
+ &fw_on_ctlr_branch, &fw_on_ctlr_build,
+ &init_connect_result)) {
+ TW_PRINTK(tw_dev->host, TW_DRIVER, 0xa, "Initconnection (base mode) failed while checking SRL");
+ goto out;
+ }
+ if (!(init_connect_result & TW_CTLR_FW_COMPATIBLE)) {
+ if (TW_CURRENT_DRIVER_SRL > fw_on_ctlr_srl) {
+ TW_PRINTK(tw_dev->host, TW_DRIVER, 0x32, "Firmware and driver incompatibility: please upgrade firmware");
+ } else {
+ TW_PRINTK(tw_dev->host, TW_DRIVER, 0x33, "Firmware and driver incompatibility: please upgrade driver");
+ }
+ goto out;
+ }
+ tw_dev->tw_compat_info.working_srl = TW_BASE_FW_SRL;
+ tw_dev->tw_compat_info.working_branch = TW_BASE_FW_BRANCH;
+ tw_dev->tw_compat_info.working_build = TW_BASE_FW_BUILD;
+ }
+
+ /* Load rest of compatibility struct */
+ strlcpy(tw_dev->tw_compat_info.driver_version, TW_DRIVER_VERSION,
+ sizeof(tw_dev->tw_compat_info.driver_version));
+ tw_dev->tw_compat_info.driver_srl_high = TW_CURRENT_DRIVER_SRL;
+ tw_dev->tw_compat_info.driver_branch_high = TW_CURRENT_DRIVER_BRANCH;
+ tw_dev->tw_compat_info.driver_build_high = TW_CURRENT_DRIVER_BUILD;
+ tw_dev->tw_compat_info.driver_srl_low = TW_BASE_FW_SRL;
+ tw_dev->tw_compat_info.driver_branch_low = TW_BASE_FW_BRANCH;
+ tw_dev->tw_compat_info.driver_build_low = TW_BASE_FW_BUILD;
+ tw_dev->tw_compat_info.fw_on_ctlr_srl = fw_on_ctlr_srl;
+ tw_dev->tw_compat_info.fw_on_ctlr_branch = fw_on_ctlr_branch;
+ tw_dev->tw_compat_info.fw_on_ctlr_build = fw_on_ctlr_build;
+
+ retval = 0;
+out:
+ return retval;
+} /* End twa_check_srl() */
+
+/* This function handles ioctl for the character device */
+static long twa_chrdev_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
+{
+ struct inode *inode = file_inode(file);
+ long timeout;
+ unsigned long *cpu_addr, data_buffer_length_adjusted = 0, flags = 0;
+ dma_addr_t dma_handle;
+ int request_id = 0;
+ unsigned int sequence_id = 0;
+ unsigned char event_index, start_index;
+ TW_Ioctl_Driver_Command driver_command;
+ TW_Ioctl_Buf_Apache *tw_ioctl;
+ TW_Lock *tw_lock;
+ TW_Command_Full *full_command_packet;
+ TW_Compatibility_Info *tw_compat_info;
+ TW_Event *event;
+ struct timeval current_time;
+ u32 current_time_ms;
+ TW_Device_Extension *tw_dev = twa_device_extension_list[iminor(inode)];
+ int retval = TW_IOCTL_ERROR_OS_EFAULT;
+ void __user *argp = (void __user *)arg;
+
+ mutex_lock(&twa_chrdev_mutex);
+
+ /* Only let one of these through at a time */
+ if (mutex_lock_interruptible(&tw_dev->ioctl_lock)) {
+ retval = TW_IOCTL_ERROR_OS_EINTR;
+ goto out;
+ }
+
+ /* First copy down the driver command */
+ if (copy_from_user(&driver_command, argp, sizeof(TW_Ioctl_Driver_Command)))
+ goto out2;
+
+ /* Check data buffer size */
+ if (driver_command.buffer_length > TW_MAX_SECTORS * 2048) {
+ retval = TW_IOCTL_ERROR_OS_EINVAL;
+ goto out2;
+ }
+
+ /* Hardware can only do multiple of 512 byte transfers */
+ data_buffer_length_adjusted = (driver_command.buffer_length + 511) & ~511;
+
+ /* Now allocate ioctl buf memory */
+ cpu_addr = dma_alloc_coherent(&tw_dev->tw_pci_dev->dev, data_buffer_length_adjusted+sizeof(TW_Ioctl_Buf_Apache) - 1, &dma_handle, GFP_KERNEL);
+ if (!cpu_addr) {
+ retval = TW_IOCTL_ERROR_OS_ENOMEM;
+ goto out2;
+ }
+
+ tw_ioctl = (TW_Ioctl_Buf_Apache *)cpu_addr;
+
+ /* Now copy down the entire ioctl */
+ if (copy_from_user(tw_ioctl, argp, driver_command.buffer_length + sizeof(TW_Ioctl_Buf_Apache) - 1))
+ goto out3;
+
+ /* See which ioctl we are doing */
+ switch (cmd) {
+ case TW_IOCTL_FIRMWARE_PASS_THROUGH:
+ spin_lock_irqsave(tw_dev->host->host_lock, flags);
+ twa_get_request_id(tw_dev, &request_id);
+
+ /* Flag internal command */
+ tw_dev->srb[request_id] = NULL;
+
+ /* Flag chrdev ioctl */
+ tw_dev->chrdev_request_id = request_id;
+
+ full_command_packet = &tw_ioctl->firmware_command;
+
+ /* Load request id and sglist for both command types */
+ twa_load_sgl(tw_dev, full_command_packet, request_id, dma_handle, data_buffer_length_adjusted);
+
+ memcpy(tw_dev->command_packet_virt[request_id], &(tw_ioctl->firmware_command), sizeof(TW_Command_Full));
+
+ /* Now post the command packet to the controller */
+ twa_post_command_packet(tw_dev, request_id, 1);
+ spin_unlock_irqrestore(tw_dev->host->host_lock, flags);
+
+ timeout = TW_IOCTL_CHRDEV_TIMEOUT*HZ;
+
+ /* Now wait for command to complete */
+ timeout = wait_event_timeout(tw_dev->ioctl_wqueue, tw_dev->chrdev_request_id == TW_IOCTL_CHRDEV_FREE, timeout);
+
+ /* We timed out, and didn't get an interrupt */
+ if (tw_dev->chrdev_request_id != TW_IOCTL_CHRDEV_FREE) {
+ /* Now we need to reset the board */
+ printk(KERN_WARNING "3w-9xxx: scsi%d: WARNING: (0x%02X:0x%04X): Character ioctl (0x%x) timed out, resetting card.\n",
+ tw_dev->host->host_no, TW_DRIVER, 0x37,
+ cmd);
+ retval = TW_IOCTL_ERROR_OS_EIO;
+ twa_reset_device_extension(tw_dev);
+ goto out3;
+ }
+
+ /* Now copy in the command packet response */
+ memcpy(&(tw_ioctl->firmware_command), tw_dev->command_packet_virt[request_id], sizeof(TW_Command_Full));
+
+ /* Now complete the io */
+ spin_lock_irqsave(tw_dev->host->host_lock, flags);
+ tw_dev->posted_request_count--;
+ tw_dev->state[request_id] = TW_S_COMPLETED;
+ twa_free_request_id(tw_dev, request_id);
+ spin_unlock_irqrestore(tw_dev->host->host_lock, flags);
+ break;
+ case TW_IOCTL_GET_COMPATIBILITY_INFO:
+ tw_ioctl->driver_command.status = 0;
+ /* Copy compatibility struct into ioctl data buffer */
+ tw_compat_info = (TW_Compatibility_Info *)tw_ioctl->data_buffer;
+ memcpy(tw_compat_info, &tw_dev->tw_compat_info, sizeof(TW_Compatibility_Info));
+ break;
+ case TW_IOCTL_GET_LAST_EVENT:
+ if (tw_dev->event_queue_wrapped) {
+ if (tw_dev->aen_clobber) {
+ tw_ioctl->driver_command.status = TW_IOCTL_ERROR_STATUS_AEN_CLOBBER;
+ tw_dev->aen_clobber = 0;
+ } else
+ tw_ioctl->driver_command.status = 0;
+ } else {
+ if (!tw_dev->error_index) {
+ tw_ioctl->driver_command.status = TW_IOCTL_ERROR_STATUS_NO_MORE_EVENTS;
+ break;
+ }
+ tw_ioctl->driver_command.status = 0;
+ }
+ event_index = (tw_dev->error_index - 1 + TW_Q_LENGTH) % TW_Q_LENGTH;
+ memcpy(tw_ioctl->data_buffer, tw_dev->event_queue[event_index], sizeof(TW_Event));
+ tw_dev->event_queue[event_index]->retrieved = TW_AEN_RETRIEVED;
+ break;
+ case TW_IOCTL_GET_FIRST_EVENT:
+ if (tw_dev->event_queue_wrapped) {
+ if (tw_dev->aen_clobber) {
+ tw_ioctl->driver_command.status = TW_IOCTL_ERROR_STATUS_AEN_CLOBBER;
+ tw_dev->aen_clobber = 0;
+ } else
+ tw_ioctl->driver_command.status = 0;
+ event_index = tw_dev->error_index;
+ } else {
+ if (!tw_dev->error_index) {
+ tw_ioctl->driver_command.status = TW_IOCTL_ERROR_STATUS_NO_MORE_EVENTS;
+ break;
+ }
+ tw_ioctl->driver_command.status = 0;
+ event_index = 0;
+ }
+ memcpy(tw_ioctl->data_buffer, tw_dev->event_queue[event_index], sizeof(TW_Event));
+ tw_dev->event_queue[event_index]->retrieved = TW_AEN_RETRIEVED;
+ break;
+ case TW_IOCTL_GET_NEXT_EVENT:
+ event = (TW_Event *)tw_ioctl->data_buffer;
+ sequence_id = event->sequence_id;
+ tw_ioctl->driver_command.status = 0;
+
+ if (tw_dev->event_queue_wrapped) {
+ if (tw_dev->aen_clobber) {
+ tw_ioctl->driver_command.status = TW_IOCTL_ERROR_STATUS_AEN_CLOBBER;
+ tw_dev->aen_clobber = 0;
+ }
+ start_index = tw_dev->error_index;
+ } else {
+ if (!tw_dev->error_index) {
+ tw_ioctl->driver_command.status = TW_IOCTL_ERROR_STATUS_NO_MORE_EVENTS;
+ break;
+ }
+ start_index = 0;
+ }
+ event_index = (start_index + sequence_id - tw_dev->event_queue[start_index]->sequence_id + 1) % TW_Q_LENGTH;
+
+ if (!(tw_dev->event_queue[event_index]->sequence_id > sequence_id)) {
+ if (tw_ioctl->driver_command.status == TW_IOCTL_ERROR_STATUS_AEN_CLOBBER)
+ tw_dev->aen_clobber = 1;
+ tw_ioctl->driver_command.status = TW_IOCTL_ERROR_STATUS_NO_MORE_EVENTS;
+ break;
+ }
+ memcpy(tw_ioctl->data_buffer, tw_dev->event_queue[event_index], sizeof(TW_Event));
+ tw_dev->event_queue[event_index]->retrieved = TW_AEN_RETRIEVED;
+ break;
+ case TW_IOCTL_GET_PREVIOUS_EVENT:
+ event = (TW_Event *)tw_ioctl->data_buffer;
+ sequence_id = event->sequence_id;
+ tw_ioctl->driver_command.status = 0;
+
+ if (tw_dev->event_queue_wrapped) {
+ if (tw_dev->aen_clobber) {
+ tw_ioctl->driver_command.status = TW_IOCTL_ERROR_STATUS_AEN_CLOBBER;
+ tw_dev->aen_clobber = 0;
+ }
+ start_index = tw_dev->error_index;
+ } else {
+ if (!tw_dev->error_index) {
+ tw_ioctl->driver_command.status = TW_IOCTL_ERROR_STATUS_NO_MORE_EVENTS;
+ break;
+ }
+ start_index = 0;
+ }
+ event_index = (start_index + sequence_id - tw_dev->event_queue[start_index]->sequence_id - 1) % TW_Q_LENGTH;
+
+ if (!(tw_dev->event_queue[event_index]->sequence_id < sequence_id)) {
+ if (tw_ioctl->driver_command.status == TW_IOCTL_ERROR_STATUS_AEN_CLOBBER)
+ tw_dev->aen_clobber = 1;
+ tw_ioctl->driver_command.status = TW_IOCTL_ERROR_STATUS_NO_MORE_EVENTS;
+ break;
+ }
+ memcpy(tw_ioctl->data_buffer, tw_dev->event_queue[event_index], sizeof(TW_Event));
+ tw_dev->event_queue[event_index]->retrieved = TW_AEN_RETRIEVED;
+ break;
+ case TW_IOCTL_GET_LOCK:
+ tw_lock = (TW_Lock *)tw_ioctl->data_buffer;
+ do_gettimeofday(&current_time);
+ current_time_ms = (current_time.tv_sec * 1000) + (current_time.tv_usec / 1000);
+
+ if ((tw_lock->force_flag == 1) || (tw_dev->ioctl_sem_lock == 0) || (current_time_ms >= tw_dev->ioctl_msec)) {
+ tw_dev->ioctl_sem_lock = 1;
+ tw_dev->ioctl_msec = current_time_ms + tw_lock->timeout_msec;
+ tw_ioctl->driver_command.status = 0;
+ tw_lock->time_remaining_msec = tw_lock->timeout_msec;
+ } else {
+ tw_ioctl->driver_command.status = TW_IOCTL_ERROR_STATUS_LOCKED;
+ tw_lock->time_remaining_msec = tw_dev->ioctl_msec - current_time_ms;
+ }
+ break;
+ case TW_IOCTL_RELEASE_LOCK:
+ if (tw_dev->ioctl_sem_lock == 1) {
+ tw_dev->ioctl_sem_lock = 0;
+ tw_ioctl->driver_command.status = 0;
+ } else {
+ tw_ioctl->driver_command.status = TW_IOCTL_ERROR_STATUS_NOT_LOCKED;
+ }
+ break;
+ default:
+ retval = TW_IOCTL_ERROR_OS_ENOTTY;
+ goto out3;
+ }
+
+ /* Now copy the entire response to userspace */
+ if (copy_to_user(argp, tw_ioctl, sizeof(TW_Ioctl_Buf_Apache) + driver_command.buffer_length - 1) == 0)
+ retval = 0;
+out3:
+ /* Now free ioctl buf memory */
+ dma_free_coherent(&tw_dev->tw_pci_dev->dev, data_buffer_length_adjusted+sizeof(TW_Ioctl_Buf_Apache) - 1, cpu_addr, dma_handle);
+out2:
+ mutex_unlock(&tw_dev->ioctl_lock);
+out:
+ mutex_unlock(&twa_chrdev_mutex);
+ return retval;
+} /* End twa_chrdev_ioctl() */
+
+/* This function handles open for the character device */
+/* NOTE that this function will race with remove. */
+static int twa_chrdev_open(struct inode *inode, struct file *file)
+{
+ unsigned int minor_number;
+ int retval = TW_IOCTL_ERROR_OS_ENODEV;
+
+ minor_number = iminor(inode);
+ if (minor_number >= twa_device_extension_count)
+ goto out;
+ retval = 0;
+out:
+ return retval;
+} /* End twa_chrdev_open() */
+
+/* This function will print readable messages from status register errors */
+static int twa_decode_bits(TW_Device_Extension *tw_dev, u32 status_reg_value)
+{
+ int retval = 1;
+
+ /* Check for various error conditions and handle them appropriately */
+ if (status_reg_value & TW_STATUS_PCI_PARITY_ERROR) {
+ TW_PRINTK(tw_dev->host, TW_DRIVER, 0xc, "PCI Parity Error: clearing");
+ writel(TW_CONTROL_CLEAR_PARITY_ERROR, TW_CONTROL_REG_ADDR(tw_dev));
+ }
+
+ if (status_reg_value & TW_STATUS_PCI_ABORT) {
+ TW_PRINTK(tw_dev->host, TW_DRIVER, 0xd, "PCI Abort: clearing");
+ writel(TW_CONTROL_CLEAR_PCI_ABORT, TW_CONTROL_REG_ADDR(tw_dev));
+ pci_write_config_word(tw_dev->tw_pci_dev, PCI_STATUS, TW_PCI_CLEAR_PCI_ABORT);
+ }
+
+ if (status_reg_value & TW_STATUS_QUEUE_ERROR) {
+ if (((tw_dev->tw_pci_dev->device != PCI_DEVICE_ID_3WARE_9650SE) &&
+ (tw_dev->tw_pci_dev->device != PCI_DEVICE_ID_3WARE_9690SA)) ||
+ (!test_bit(TW_IN_RESET, &tw_dev->flags)))
+ TW_PRINTK(tw_dev->host, TW_DRIVER, 0xe, "Controller Queue Error: clearing");
+ writel(TW_CONTROL_CLEAR_QUEUE_ERROR, TW_CONTROL_REG_ADDR(tw_dev));
+ }
+
+ if (status_reg_value & TW_STATUS_MICROCONTROLLER_ERROR) {
+ if (tw_dev->reset_print == 0) {
+ TW_PRINTK(tw_dev->host, TW_DRIVER, 0x10, "Microcontroller Error: clearing");
+ tw_dev->reset_print = 1;
+ }
+ goto out;
+ }
+ retval = 0;
+out:
+ return retval;
+} /* End twa_decode_bits() */
+
+/* This function will empty the response queue */
+static int twa_empty_response_queue(TW_Device_Extension *tw_dev)
+{
+ u32 status_reg_value, response_que_value;
+ int count = 0, retval = 1;
+
+ status_reg_value = readl(TW_STATUS_REG_ADDR(tw_dev));
+
+ while (((status_reg_value & TW_STATUS_RESPONSE_QUEUE_EMPTY) == 0) && (count < TW_MAX_RESPONSE_DRAIN)) {
+ response_que_value = readl(TW_RESPONSE_QUEUE_REG_ADDR(tw_dev));
+ status_reg_value = readl(TW_STATUS_REG_ADDR(tw_dev));
+ count++;
+ }
+ if (count == TW_MAX_RESPONSE_DRAIN)
+ goto out;
+
+ retval = 0;
+out:
+ return retval;
+} /* End twa_empty_response_queue() */
+
+/* This function will clear the pchip/response queue on 9550SX */
+static int twa_empty_response_queue_large(TW_Device_Extension *tw_dev)
+{
+ u32 response_que_value = 0;
+ unsigned long before;
+ int retval = 1;
+
+ if (tw_dev->tw_pci_dev->device != PCI_DEVICE_ID_3WARE_9000) {
+ before = jiffies;
+ while ((response_que_value & TW_9550SX_DRAIN_COMPLETED) != TW_9550SX_DRAIN_COMPLETED) {
+ response_que_value = readl(TW_RESPONSE_QUEUE_REG_ADDR_LARGE(tw_dev));
+ msleep(1);
+ if (time_after(jiffies, before + HZ * 30))
+ goto out;
+ }
+ /* P-chip settle time */
+ msleep(500);
+ retval = 0;
+ } else
+ retval = 0;
+out:
+ return retval;
+} /* End twa_empty_response_queue_large() */
+
+/* This function passes sense keys from firmware to scsi layer */
+static int twa_fill_sense(TW_Device_Extension *tw_dev, int request_id, int copy_sense, int print_host)
+{
+ TW_Command_Full *full_command_packet;
+ unsigned short error;
+ int retval = 1;
+ char *error_str;
+
+ full_command_packet = tw_dev->command_packet_virt[request_id];
+
+ /* Check for embedded error string */
+ error_str = &(full_command_packet->header.err_specific_desc[strlen(full_command_packet->header.err_specific_desc) + 1]);
+
+ /* Don't print error for Logical unit not supported during rollcall */
+ error = le16_to_cpu(full_command_packet->header.status_block.error);
+ if ((error != TW_ERROR_LOGICAL_UNIT_NOT_SUPPORTED) && (error != TW_ERROR_UNIT_OFFLINE)) {
+ if (print_host)
+ printk(KERN_WARNING "3w-9xxx: scsi%d: ERROR: (0x%02X:0x%04X): %s:%s.\n",
+ tw_dev->host->host_no,
+ TW_MESSAGE_SOURCE_CONTROLLER_ERROR,
+ full_command_packet->header.status_block.error,
+ error_str[0] == '\0' ?
+ twa_string_lookup(twa_error_table,
+ full_command_packet->header.status_block.error) : error_str,
+ full_command_packet->header.err_specific_desc);
+ else
+ printk(KERN_WARNING "3w-9xxx: ERROR: (0x%02X:0x%04X): %s:%s.\n",
+ TW_MESSAGE_SOURCE_CONTROLLER_ERROR,
+ full_command_packet->header.status_block.error,
+ error_str[0] == '\0' ?
+ twa_string_lookup(twa_error_table,
+ full_command_packet->header.status_block.error) : error_str,
+ full_command_packet->header.err_specific_desc);
+ }
+
+ if (copy_sense) {
+ memcpy(tw_dev->srb[request_id]->sense_buffer, full_command_packet->header.sense_data, TW_SENSE_DATA_LENGTH);
+ tw_dev->srb[request_id]->result = (full_command_packet->command.newcommand.status << 1);
+ retval = TW_ISR_DONT_RESULT;
+ goto out;
+ }
+ retval = 0;
+out:
+ return retval;
+} /* End twa_fill_sense() */
+
+/* This function will free up device extension resources */
+static void twa_free_device_extension(TW_Device_Extension *tw_dev)
+{
+ if (tw_dev->command_packet_virt[0])
+ pci_free_consistent(tw_dev->tw_pci_dev,
+ sizeof(TW_Command_Full)*TW_Q_LENGTH,
+ tw_dev->command_packet_virt[0],
+ tw_dev->command_packet_phys[0]);
+
+ if (tw_dev->generic_buffer_virt[0])
+ pci_free_consistent(tw_dev->tw_pci_dev,
+ TW_SECTOR_SIZE*TW_Q_LENGTH,
+ tw_dev->generic_buffer_virt[0],
+ tw_dev->generic_buffer_phys[0]);
+
+ kfree(tw_dev->event_queue[0]);
+} /* End twa_free_device_extension() */
+
+/* This function will free a request id */
+static void twa_free_request_id(TW_Device_Extension *tw_dev, int request_id)
+{
+ tw_dev->free_queue[tw_dev->free_tail] = request_id;
+ tw_dev->state[request_id] = TW_S_FINISHED;
+ tw_dev->free_tail = (tw_dev->free_tail + 1) % TW_Q_LENGTH;
+} /* End twa_free_request_id() */
+
+/* This function will get parameter table entries from the firmware */
+static void *twa_get_param(TW_Device_Extension *tw_dev, int request_id, int table_id, int parameter_id, int parameter_size_bytes)
+{
+ TW_Command_Full *full_command_packet;
+ TW_Command *command_packet;
+ TW_Param_Apache *param;
+ void *retval = NULL;
+
+ /* Setup the command packet */
+ full_command_packet = tw_dev->command_packet_virt[request_id];
+ memset(full_command_packet, 0, sizeof(TW_Command_Full));
+ command_packet = &full_command_packet->command.oldcommand;
+
+ command_packet->opcode__sgloffset = TW_OPSGL_IN(2, TW_OP_GET_PARAM);
+ command_packet->size = TW_COMMAND_SIZE;
+ command_packet->request_id = request_id;
+ command_packet->byte6_offset.block_count = cpu_to_le16(1);
+
+ /* Now setup the param */
+ param = (TW_Param_Apache *)tw_dev->generic_buffer_virt[request_id];
+ memset(param, 0, TW_SECTOR_SIZE);
+ param->table_id = cpu_to_le16(table_id | 0x8000);
+ param->parameter_id = cpu_to_le16(parameter_id);
+ param->parameter_size_bytes = cpu_to_le16(parameter_size_bytes);
+
+ command_packet->byte8_offset.param.sgl[0].address = TW_CPU_TO_SGL(tw_dev->generic_buffer_phys[request_id]);
+ command_packet->byte8_offset.param.sgl[0].length = cpu_to_le32(TW_SECTOR_SIZE);
+
+ /* Post the command packet to the board */
+ twa_post_command_packet(tw_dev, request_id, 1);
+
+ /* Poll for completion */
+ if (twa_poll_response(tw_dev, request_id, 30))
+ TW_PRINTK(tw_dev->host, TW_DRIVER, 0x13, "No valid response during get param")
+ else
+ retval = (void *)&(param->data[0]);
+
+ tw_dev->posted_request_count--;
+ tw_dev->state[request_id] = TW_S_INITIAL;
+
+ return retval;
+} /* End twa_get_param() */
+
+/* This function will assign an available request id */
+static void twa_get_request_id(TW_Device_Extension *tw_dev, int *request_id)
+{
+ *request_id = tw_dev->free_queue[tw_dev->free_head];
+ tw_dev->free_head = (tw_dev->free_head + 1) % TW_Q_LENGTH;
+ tw_dev->state[*request_id] = TW_S_STARTED;
+} /* End twa_get_request_id() */
+
+/* This function will send an initconnection command to controller */
+static int twa_initconnection(TW_Device_Extension *tw_dev, int message_credits,
+ u32 set_features, unsigned short current_fw_srl,
+ unsigned short current_fw_arch_id,
+ unsigned short current_fw_branch,
+ unsigned short current_fw_build,
+ unsigned short *fw_on_ctlr_srl,
+ unsigned short *fw_on_ctlr_arch_id,
+ unsigned short *fw_on_ctlr_branch,
+ unsigned short *fw_on_ctlr_build,
+ u32 *init_connect_result)
+{
+ TW_Command_Full *full_command_packet;
+ TW_Initconnect *tw_initconnect;
+ int request_id = 0, retval = 1;
+
+ /* Initialize InitConnection command packet */
+ full_command_packet = tw_dev->command_packet_virt[request_id];
+ memset(full_command_packet, 0, sizeof(TW_Command_Full));
+ full_command_packet->header.header_desc.size_header = 128;
+
+ tw_initconnect = (TW_Initconnect *)&full_command_packet->command.oldcommand;
+ tw_initconnect->opcode__reserved = TW_OPRES_IN(0, TW_OP_INIT_CONNECTION);
+ tw_initconnect->request_id = request_id;
+ tw_initconnect->message_credits = cpu_to_le16(message_credits);
+ tw_initconnect->features = set_features;
+
+ /* Turn on 64-bit sgl support if we need to */
+ tw_initconnect->features |= sizeof(dma_addr_t) > 4 ? 1 : 0;
+
+ tw_initconnect->features = cpu_to_le32(tw_initconnect->features);
+
+ if (set_features & TW_EXTENDED_INIT_CONNECT) {
+ tw_initconnect->size = TW_INIT_COMMAND_PACKET_SIZE_EXTENDED;
+ tw_initconnect->fw_srl = cpu_to_le16(current_fw_srl);
+ tw_initconnect->fw_arch_id = cpu_to_le16(current_fw_arch_id);
+ tw_initconnect->fw_branch = cpu_to_le16(current_fw_branch);
+ tw_initconnect->fw_build = cpu_to_le16(current_fw_build);
+ } else
+ tw_initconnect->size = TW_INIT_COMMAND_PACKET_SIZE;
+
+ /* Send command packet to the board */
+ twa_post_command_packet(tw_dev, request_id, 1);
+
+ /* Poll for completion */
+ if (twa_poll_response(tw_dev, request_id, 30)) {
+ TW_PRINTK(tw_dev->host, TW_DRIVER, 0x15, "No valid response during init connection");
+ } else {
+ if (set_features & TW_EXTENDED_INIT_CONNECT) {
+ *fw_on_ctlr_srl = le16_to_cpu(tw_initconnect->fw_srl);
+ *fw_on_ctlr_arch_id = le16_to_cpu(tw_initconnect->fw_arch_id);
+ *fw_on_ctlr_branch = le16_to_cpu(tw_initconnect->fw_branch);
+ *fw_on_ctlr_build = le16_to_cpu(tw_initconnect->fw_build);
+ *init_connect_result = le32_to_cpu(tw_initconnect->result);
+ }
+ retval = 0;
+ }
+
+ tw_dev->posted_request_count--;
+ tw_dev->state[request_id] = TW_S_INITIAL;
+
+ return retval;
+} /* End twa_initconnection() */
+
+/* This function will initialize the fields of a device extension */
+static int twa_initialize_device_extension(TW_Device_Extension *tw_dev)
+{
+ int i, retval = 1;
+
+ /* Initialize command packet buffers */
+ if (twa_allocate_memory(tw_dev, sizeof(TW_Command_Full), 0)) {
+ TW_PRINTK(tw_dev->host, TW_DRIVER, 0x16, "Command packet memory allocation failed");
+ goto out;
+ }
+
+ /* Initialize generic buffer */
+ if (twa_allocate_memory(tw_dev, TW_SECTOR_SIZE, 1)) {
+ TW_PRINTK(tw_dev->host, TW_DRIVER, 0x17, "Generic memory allocation failed");
+ goto out;
+ }
+
+ /* Allocate event info space */
+ tw_dev->event_queue[0] = kcalloc(TW_Q_LENGTH, sizeof(TW_Event), GFP_KERNEL);
+ if (!tw_dev->event_queue[0]) {
+ TW_PRINTK(tw_dev->host, TW_DRIVER, 0x18, "Event info memory allocation failed");
+ goto out;
+ }
+
+
+ for (i = 0; i < TW_Q_LENGTH; i++) {
+ tw_dev->event_queue[i] = (TW_Event *)((unsigned char *)tw_dev->event_queue[0] + (i * sizeof(TW_Event)));
+ tw_dev->free_queue[i] = i;
+ tw_dev->state[i] = TW_S_INITIAL;
+ }
+
+ tw_dev->pending_head = TW_Q_START;
+ tw_dev->pending_tail = TW_Q_START;
+ tw_dev->free_head = TW_Q_START;
+ tw_dev->free_tail = TW_Q_START;
+ tw_dev->error_sequence_id = 1;
+ tw_dev->chrdev_request_id = TW_IOCTL_CHRDEV_FREE;
+
+ mutex_init(&tw_dev->ioctl_lock);
+ init_waitqueue_head(&tw_dev->ioctl_wqueue);
+
+ retval = 0;
+out:
+ return retval;
+} /* End twa_initialize_device_extension() */
+
+/* This function is the interrupt service routine */
+static irqreturn_t twa_interrupt(int irq, void *dev_instance)
+{
+ int request_id, error = 0;
+ u32 status_reg_value;
+ TW_Response_Queue response_que;
+ TW_Command_Full *full_command_packet;
+ TW_Device_Extension *tw_dev = (TW_Device_Extension *)dev_instance;
+ int handled = 0;
+
+ /* Get the per adapter lock */
+ spin_lock(tw_dev->host->host_lock);
+
+ /* Read the registers */
+ status_reg_value = readl(TW_STATUS_REG_ADDR(tw_dev));
+
+ /* Check if this is our interrupt, otherwise bail */
+ if (!(status_reg_value & TW_STATUS_VALID_INTERRUPT))
+ goto twa_interrupt_bail;
+
+ handled = 1;
+
+ /* If we are resetting, bail */
+ if (test_bit(TW_IN_RESET, &tw_dev->flags))
+ goto twa_interrupt_bail;
+
+ /* Check controller for errors */
+ if (twa_check_bits(status_reg_value)) {
+ if (twa_decode_bits(tw_dev, status_reg_value)) {
+ TW_CLEAR_ALL_INTERRUPTS(tw_dev);
+ goto twa_interrupt_bail;
+ }
+ }
+
+ /* Handle host interrupt */
+ if (status_reg_value & TW_STATUS_HOST_INTERRUPT)
+ TW_CLEAR_HOST_INTERRUPT(tw_dev);
+
+ /* Handle attention interrupt */
+ if (status_reg_value & TW_STATUS_ATTENTION_INTERRUPT) {
+ TW_CLEAR_ATTENTION_INTERRUPT(tw_dev);
+ if (!(test_and_set_bit(TW_IN_ATTENTION_LOOP, &tw_dev->flags))) {
+ twa_get_request_id(tw_dev, &request_id);
+
+ error = twa_aen_read_queue(tw_dev, request_id);
+ if (error) {
+ tw_dev->state[request_id] = TW_S_COMPLETED;
+ twa_free_request_id(tw_dev, request_id);
+ clear_bit(TW_IN_ATTENTION_LOOP, &tw_dev->flags);
+ }
+ }
+ }
+
+ /* Handle command interrupt */
+ if (status_reg_value & TW_STATUS_COMMAND_INTERRUPT) {
+ TW_MASK_COMMAND_INTERRUPT(tw_dev);
+ /* Drain as many pending commands as we can */
+ while (tw_dev->pending_request_count > 0) {
+ request_id = tw_dev->pending_queue[tw_dev->pending_head];
+ if (tw_dev->state[request_id] != TW_S_PENDING) {
+ TW_PRINTK(tw_dev->host, TW_DRIVER, 0x19, "Found request id that wasn't pending");
+ TW_CLEAR_ALL_INTERRUPTS(tw_dev);
+ goto twa_interrupt_bail;
+ }
+ if (twa_post_command_packet(tw_dev, request_id, 1)==0) {
+ tw_dev->pending_head = (tw_dev->pending_head + 1) % TW_Q_LENGTH;
+ tw_dev->pending_request_count--;
+ } else {
+ /* If we get here, we will continue re-posting on the next command interrupt */
+ break;
+ }
+ }
+ }
+
+ /* Handle response interrupt */
+ if (status_reg_value & TW_STATUS_RESPONSE_INTERRUPT) {
+
+ /* Drain the response queue from the board */
+ while ((status_reg_value & TW_STATUS_RESPONSE_QUEUE_EMPTY) == 0) {
+ /* Complete the response */
+ response_que.value = readl(TW_RESPONSE_QUEUE_REG_ADDR(tw_dev));
+ request_id = TW_RESID_OUT(response_que.response_id);
+ full_command_packet = tw_dev->command_packet_virt[request_id];
+ error = 0;
+ /* Check for command packet errors */
+ if (full_command_packet->command.newcommand.status != 0) {
+ if (tw_dev->srb[request_id] != NULL) {
+ error = twa_fill_sense(tw_dev, request_id, 1, 1);
+ } else {
+ /* Skip ioctl error prints */
+ if (request_id != tw_dev->chrdev_request_id) {
+ error = twa_fill_sense(tw_dev, request_id, 0, 1);
+ }
+ }
+ }
+
+ /* Check for correct state */
+ if (tw_dev->state[request_id] != TW_S_POSTED) {
+ if (tw_dev->srb[request_id] != NULL) {
+ TW_PRINTK(tw_dev->host, TW_DRIVER, 0x1a, "Received a request id that wasn't posted");
+ TW_CLEAR_ALL_INTERRUPTS(tw_dev);
+ goto twa_interrupt_bail;
+ }
+ }
+
+ /* Check for internal command completion */
+ if (tw_dev->srb[request_id] == NULL) {
+ if (request_id != tw_dev->chrdev_request_id) {
+ if (twa_aen_complete(tw_dev, request_id))
+ TW_PRINTK(tw_dev->host, TW_DRIVER, 0x1b, "Error completing AEN during attention interrupt");
+ } else {
+ tw_dev->chrdev_request_id = TW_IOCTL_CHRDEV_FREE;
+ wake_up(&tw_dev->ioctl_wqueue);
+ }
+ } else {
+ struct scsi_cmnd *cmd;
+
+ cmd = tw_dev->srb[request_id];
+
+ twa_scsiop_execute_scsi_complete(tw_dev, request_id);
+ /* If no error command was a success */
+ if (error == 0) {
+ cmd->result = (DID_OK << 16);
+ }
+
+ /* If error, command failed */
+ if (error == 1) {
+ /* Ask for a host reset */
+ cmd->result = (DID_OK << 16) | (CHECK_CONDITION << 1);
+ }
+
+ /* Report residual bytes for single sgl */
+ if ((scsi_sg_count(cmd) <= 1) && (full_command_packet->command.newcommand.status == 0)) {
+ if (full_command_packet->command.newcommand.sg_list[0].length < scsi_bufflen(tw_dev->srb[request_id]))
+ scsi_set_resid(cmd, scsi_bufflen(cmd) - full_command_packet->command.newcommand.sg_list[0].length);
+ }
+
+ /* Now complete the io */
+ scsi_dma_unmap(cmd);
+ cmd->scsi_done(cmd);
+ tw_dev->state[request_id] = TW_S_COMPLETED;
+ twa_free_request_id(tw_dev, request_id);
+ tw_dev->posted_request_count--;
+ }
+
+ /* Check for valid status after each drain */
+ status_reg_value = readl(TW_STATUS_REG_ADDR(tw_dev));
+ if (twa_check_bits(status_reg_value)) {
+ if (twa_decode_bits(tw_dev, status_reg_value)) {
+ TW_CLEAR_ALL_INTERRUPTS(tw_dev);
+ goto twa_interrupt_bail;
+ }
+ }
+ }
+ }
+
+twa_interrupt_bail:
+ spin_unlock(tw_dev->host->host_lock);
+ return IRQ_RETVAL(handled);
+} /* End twa_interrupt() */
+
+/* This function will load the request id and various sgls for ioctls */
+static void twa_load_sgl(TW_Device_Extension *tw_dev, TW_Command_Full *full_command_packet, int request_id, dma_addr_t dma_handle, int length)
+{
+ TW_Command *oldcommand;
+ TW_Command_Apache *newcommand;
+ TW_SG_Entry *sgl;
+ unsigned int pae = 0;
+
+ if ((sizeof(long) < 8) && (sizeof(dma_addr_t) > 4))
+ pae = 1;
+
+ if (TW_OP_OUT(full_command_packet->command.newcommand.opcode__reserved) == TW_OP_EXECUTE_SCSI) {
+ newcommand = &full_command_packet->command.newcommand;
+ newcommand->request_id__lunl =
+ cpu_to_le16(TW_REQ_LUN_IN(TW_LUN_OUT(newcommand->request_id__lunl), request_id));
+ if (length) {
+ newcommand->sg_list[0].address = TW_CPU_TO_SGL(dma_handle + sizeof(TW_Ioctl_Buf_Apache) - 1);
+ newcommand->sg_list[0].length = cpu_to_le32(length);
+ }
+ newcommand->sgl_entries__lunh =
+ cpu_to_le16(TW_REQ_LUN_IN(TW_LUN_OUT(newcommand->sgl_entries__lunh), length ? 1 : 0));
+ } else {
+ oldcommand = &full_command_packet->command.oldcommand;
+ oldcommand->request_id = request_id;
+
+ if (TW_SGL_OUT(oldcommand->opcode__sgloffset)) {
+ /* Load the sg list */
+ if (tw_dev->tw_pci_dev->device == PCI_DEVICE_ID_3WARE_9690SA)
+ sgl = (TW_SG_Entry *)((u32 *)oldcommand+oldcommand->size - (sizeof(TW_SG_Entry)/4) + pae);
+ else
+ sgl = (TW_SG_Entry *)((u32 *)oldcommand+TW_SGL_OUT(oldcommand->opcode__sgloffset));
+ sgl->address = TW_CPU_TO_SGL(dma_handle + sizeof(TW_Ioctl_Buf_Apache) - 1);
+ sgl->length = cpu_to_le32(length);
+
+ oldcommand->size += pae;
+ }
+ }
+} /* End twa_load_sgl() */
+
+/* This function will poll for a response interrupt of a request */
+static int twa_poll_response(TW_Device_Extension *tw_dev, int request_id, int seconds)
+{
+ int retval = 1, found = 0, response_request_id;
+ TW_Response_Queue response_queue;
+ TW_Command_Full *full_command_packet = tw_dev->command_packet_virt[request_id];
+
+ if (twa_poll_status_gone(tw_dev, TW_STATUS_RESPONSE_QUEUE_EMPTY, seconds) == 0) {
+ response_queue.value = readl(TW_RESPONSE_QUEUE_REG_ADDR(tw_dev));
+ response_request_id = TW_RESID_OUT(response_queue.response_id);
+ if (request_id != response_request_id) {
+ TW_PRINTK(tw_dev->host, TW_DRIVER, 0x1e, "Found unexpected request id while polling for response");
+ goto out;
+ }
+ if (TW_OP_OUT(full_command_packet->command.newcommand.opcode__reserved) == TW_OP_EXECUTE_SCSI) {
+ if (full_command_packet->command.newcommand.status != 0) {
+ /* bad response */
+ twa_fill_sense(tw_dev, request_id, 0, 0);
+ goto out;
+ }
+ found = 1;
+ } else {
+ if (full_command_packet->command.oldcommand.status != 0) {
+ /* bad response */
+ twa_fill_sense(tw_dev, request_id, 0, 0);
+ goto out;
+ }
+ found = 1;
+ }
+ }
+
+ if (found)
+ retval = 0;
+out:
+ return retval;
+} /* End twa_poll_response() */
+
+/* This function will poll the status register for a flag */
+static int twa_poll_status(TW_Device_Extension *tw_dev, u32 flag, int seconds)
+{
+ u32 status_reg_value;
+ unsigned long before;
+ int retval = 1;
+
+ status_reg_value = readl(TW_STATUS_REG_ADDR(tw_dev));
+ before = jiffies;
+
+ if (twa_check_bits(status_reg_value))
+ twa_decode_bits(tw_dev, status_reg_value);
+
+ while ((status_reg_value & flag) != flag) {
+ status_reg_value = readl(TW_STATUS_REG_ADDR(tw_dev));
+
+ if (twa_check_bits(status_reg_value))
+ twa_decode_bits(tw_dev, status_reg_value);
+
+ if (time_after(jiffies, before + HZ * seconds))
+ goto out;
+
+ msleep(50);
+ }
+ retval = 0;
+out:
+ return retval;
+} /* End twa_poll_status() */
+
+/* This function will poll the status register for disappearance of a flag */
+static int twa_poll_status_gone(TW_Device_Extension *tw_dev, u32 flag, int seconds)
+{
+ u32 status_reg_value;
+ unsigned long before;
+ int retval = 1;
+
+ status_reg_value = readl(TW_STATUS_REG_ADDR(tw_dev));
+ before = jiffies;
+
+ if (twa_check_bits(status_reg_value))
+ twa_decode_bits(tw_dev, status_reg_value);
+
+ while ((status_reg_value & flag) != 0) {
+ status_reg_value = readl(TW_STATUS_REG_ADDR(tw_dev));
+ if (twa_check_bits(status_reg_value))
+ twa_decode_bits(tw_dev, status_reg_value);
+
+ if (time_after(jiffies, before + HZ * seconds))
+ goto out;
+
+ msleep(50);
+ }
+ retval = 0;
+out:
+ return retval;
+} /* End twa_poll_status_gone() */
+
+/* This function will attempt to post a command packet to the board */
+static int twa_post_command_packet(TW_Device_Extension *tw_dev, int request_id, char internal)
+{
+ u32 status_reg_value;
+ dma_addr_t command_que_value;
+ int retval = 1;
+
+ command_que_value = tw_dev->command_packet_phys[request_id];
+
+ /* For 9650SE write low 4 bytes first */
+ if ((tw_dev->tw_pci_dev->device == PCI_DEVICE_ID_3WARE_9650SE) ||
+ (tw_dev->tw_pci_dev->device == PCI_DEVICE_ID_3WARE_9690SA)) {
+ command_que_value += TW_COMMAND_OFFSET;
+ writel((u32)command_que_value, TW_COMMAND_QUEUE_REG_ADDR_LARGE(tw_dev));
+ }
+
+ status_reg_value = readl(TW_STATUS_REG_ADDR(tw_dev));
+
+ if (twa_check_bits(status_reg_value))
+ twa_decode_bits(tw_dev, status_reg_value);
+
+ if (((tw_dev->pending_request_count > 0) && (tw_dev->state[request_id] != TW_S_PENDING)) || (status_reg_value & TW_STATUS_COMMAND_QUEUE_FULL)) {
+
+ /* Only pend internal driver commands */
+ if (!internal) {
+ retval = SCSI_MLQUEUE_HOST_BUSY;
+ goto out;
+ }
+
+ /* Couldn't post the command packet, so we do it later */
+ if (tw_dev->state[request_id] != TW_S_PENDING) {
+ tw_dev->state[request_id] = TW_S_PENDING;
+ tw_dev->pending_request_count++;
+ if (tw_dev->pending_request_count > tw_dev->max_pending_request_count) {
+ tw_dev->max_pending_request_count = tw_dev->pending_request_count;
+ }
+ tw_dev->pending_queue[tw_dev->pending_tail] = request_id;
+ tw_dev->pending_tail = (tw_dev->pending_tail + 1) % TW_Q_LENGTH;
+ }
+ TW_UNMASK_COMMAND_INTERRUPT(tw_dev);
+ goto out;
+ } else {
+ if ((tw_dev->tw_pci_dev->device == PCI_DEVICE_ID_3WARE_9650SE) ||
+ (tw_dev->tw_pci_dev->device == PCI_DEVICE_ID_3WARE_9690SA)) {
+ /* Now write upper 4 bytes */
+ writel((u32)((u64)command_que_value >> 32), TW_COMMAND_QUEUE_REG_ADDR_LARGE(tw_dev) + 0x4);
+ } else {
+ if (sizeof(dma_addr_t) > 4) {
+ command_que_value += TW_COMMAND_OFFSET;
+ writel((u32)command_que_value, TW_COMMAND_QUEUE_REG_ADDR(tw_dev));
+ writel((u32)((u64)command_que_value >> 32), TW_COMMAND_QUEUE_REG_ADDR(tw_dev) + 0x4);
+ } else {
+ writel(TW_COMMAND_OFFSET + command_que_value, TW_COMMAND_QUEUE_REG_ADDR(tw_dev));
+ }
+ }
+ tw_dev->state[request_id] = TW_S_POSTED;
+ tw_dev->posted_request_count++;
+ if (tw_dev->posted_request_count > tw_dev->max_posted_request_count) {
+ tw_dev->max_posted_request_count = tw_dev->posted_request_count;
+ }
+ }
+ retval = 0;
+out:
+ return retval;
+} /* End twa_post_command_packet() */
+
+/* This function will reset a device extension */
+static int twa_reset_device_extension(TW_Device_Extension *tw_dev)
+{
+ int i = 0;
+ int retval = 1;
+ unsigned long flags = 0;
+
+ set_bit(TW_IN_RESET, &tw_dev->flags);
+ TW_DISABLE_INTERRUPTS(tw_dev);
+ TW_MASK_COMMAND_INTERRUPT(tw_dev);
+ spin_lock_irqsave(tw_dev->host->host_lock, flags);
+
+ /* Abort all requests that are in progress */
+ for (i = 0; i < TW_Q_LENGTH; i++) {
+ if ((tw_dev->state[i] != TW_S_FINISHED) &&
+ (tw_dev->state[i] != TW_S_INITIAL) &&
+ (tw_dev->state[i] != TW_S_COMPLETED)) {
+ if (tw_dev->srb[i]) {
+ struct scsi_cmnd *cmd = tw_dev->srb[i];
+
+ cmd->result = (DID_RESET << 16);
+ scsi_dma_unmap(cmd);
+ cmd->scsi_done(cmd);
+ }
+ }
+ }
+
+ /* Reset queues and counts */
+ for (i = 0; i < TW_Q_LENGTH; i++) {
+ tw_dev->free_queue[i] = i;
+ tw_dev->state[i] = TW_S_INITIAL;
+ }
+ tw_dev->free_head = TW_Q_START;
+ tw_dev->free_tail = TW_Q_START;
+ tw_dev->posted_request_count = 0;
+ tw_dev->pending_request_count = 0;
+ tw_dev->pending_head = TW_Q_START;
+ tw_dev->pending_tail = TW_Q_START;
+ tw_dev->reset_print = 0;
+
+ spin_unlock_irqrestore(tw_dev->host->host_lock, flags);
+
+ if (twa_reset_sequence(tw_dev, 1))
+ goto out;
+
+ TW_ENABLE_AND_CLEAR_INTERRUPTS(tw_dev);
+ clear_bit(TW_IN_RESET, &tw_dev->flags);
+ tw_dev->chrdev_request_id = TW_IOCTL_CHRDEV_FREE;
+
+ retval = 0;
+out:
+ return retval;
+} /* End twa_reset_device_extension() */
+
+/* This function will reset a controller */
+static int twa_reset_sequence(TW_Device_Extension *tw_dev, int soft_reset)
+{
+ int tries = 0, retval = 1, flashed = 0, do_soft_reset = soft_reset;
+
+ while (tries < TW_MAX_RESET_TRIES) {
+ if (do_soft_reset) {
+ TW_SOFT_RESET(tw_dev);
+ /* Clear pchip/response queue on 9550SX */
+ if (twa_empty_response_queue_large(tw_dev)) {
+ TW_PRINTK(tw_dev->host, TW_DRIVER, 0x36, "Response queue (large) empty failed during reset sequence");
+ do_soft_reset = 1;
+ tries++;
+ continue;
+ }
+ }
+
+ /* Make sure controller is in a good state */
+ if (twa_poll_status(tw_dev, TW_STATUS_MICROCONTROLLER_READY | (do_soft_reset == 1 ? TW_STATUS_ATTENTION_INTERRUPT : 0), 60)) {
+ TW_PRINTK(tw_dev->host, TW_DRIVER, 0x1f, "Microcontroller not ready during reset sequence");
+ do_soft_reset = 1;
+ tries++;
+ continue;
+ }
+
+ /* Empty response queue */
+ if (twa_empty_response_queue(tw_dev)) {
+ TW_PRINTK(tw_dev->host, TW_DRIVER, 0x20, "Response queue empty failed during reset sequence");
+ do_soft_reset = 1;
+ tries++;
+ continue;
+ }
+
+ flashed = 0;
+
+ /* Check for compatibility/flash */
+ if (twa_check_srl(tw_dev, &flashed)) {
+ TW_PRINTK(tw_dev->host, TW_DRIVER, 0x21, "Compatibility check failed during reset sequence");
+ do_soft_reset = 1;
+ tries++;
+ continue;
+ } else {
+ if (flashed) {
+ tries++;
+ continue;
+ }
+ }
+
+ /* Drain the AEN queue */
+ if (twa_aen_drain_queue(tw_dev, soft_reset)) {
+ TW_PRINTK(tw_dev->host, TW_DRIVER, 0x22, "AEN drain failed during reset sequence");
+ do_soft_reset = 1;
+ tries++;
+ continue;
+ }
+
+ /* If we got here, controller is in a good state */
+ retval = 0;
+ goto out;
+ }
+out:
+ return retval;
+} /* End twa_reset_sequence() */
+
+/* This funciton returns unit geometry in cylinders/heads/sectors */
+static int twa_scsi_biosparam(struct scsi_device *sdev, struct block_device *bdev, sector_t capacity, int geom[])
+{
+ int heads, sectors, cylinders;
+ TW_Device_Extension *tw_dev;
+
+ tw_dev = (TW_Device_Extension *)sdev->host->hostdata;
+
+ if (capacity >= 0x200000) {
+ heads = 255;
+ sectors = 63;
+ cylinders = sector_div(capacity, heads * sectors);
+ } else {
+ heads = 64;
+ sectors = 32;
+ cylinders = sector_div(capacity, heads * sectors);
+ }
+
+ geom[0] = heads;
+ geom[1] = sectors;
+ geom[2] = cylinders;
+
+ return 0;
+} /* End twa_scsi_biosparam() */
+
+/* This is the new scsi eh reset function */
+static int twa_scsi_eh_reset(struct scsi_cmnd *SCpnt)
+{
+ TW_Device_Extension *tw_dev = NULL;
+ int retval = FAILED;
+
+ tw_dev = (TW_Device_Extension *)SCpnt->device->host->hostdata;
+
+ tw_dev->num_resets++;
+
+ sdev_printk(KERN_WARNING, SCpnt->device,
+ "WARNING: (0x%02X:0x%04X): Command (0x%x) timed out, resetting card.\n",
+ TW_DRIVER, 0x2c, SCpnt->cmnd[0]);
+
+ /* Make sure we are not issuing an ioctl or resetting from ioctl */
+ mutex_lock(&tw_dev->ioctl_lock);
+
+ /* Now reset the card and some of the device extension data */
+ if (twa_reset_device_extension(tw_dev)) {
+ TW_PRINTK(tw_dev->host, TW_DRIVER, 0x2b, "Controller reset failed during scsi host reset");
+ goto out;
+ }
+
+ retval = SUCCESS;
+out:
+ mutex_unlock(&tw_dev->ioctl_lock);
+ return retval;
+} /* End twa_scsi_eh_reset() */
+
+/* This is the main scsi queue function to handle scsi opcodes */
+static int twa_scsi_queue_lck(struct scsi_cmnd *SCpnt, void (*done)(struct scsi_cmnd *))
+{
+ int request_id, retval;
+ TW_Device_Extension *tw_dev = (TW_Device_Extension *)SCpnt->device->host->hostdata;
+
+ /* If we are resetting due to timed out ioctl, report as busy */
+ if (test_bit(TW_IN_RESET, &tw_dev->flags)) {
+ retval = SCSI_MLQUEUE_HOST_BUSY;
+ goto out;
+ }
+
+ /* Check if this FW supports luns */
+ if ((SCpnt->device->lun != 0) && (tw_dev->tw_compat_info.working_srl < TW_FW_SRL_LUNS_SUPPORTED)) {
+ SCpnt->result = (DID_BAD_TARGET << 16);
+ done(SCpnt);
+ retval = 0;
+ goto out;
+ }
+
+ /* Save done function into scsi_cmnd struct */
+ SCpnt->scsi_done = done;
+
+ /* Get a free request id */
+ twa_get_request_id(tw_dev, &request_id);
+
+ /* Save the scsi command for use by the ISR */
+ tw_dev->srb[request_id] = SCpnt;
+
+ retval = twa_scsiop_execute_scsi(tw_dev, request_id, NULL, 0, NULL);
+ switch (retval) {
+ case SCSI_MLQUEUE_HOST_BUSY:
+ scsi_dma_unmap(SCpnt);
+ twa_free_request_id(tw_dev, request_id);
+ break;
+ case 1:
+ SCpnt->result = (DID_ERROR << 16);
+ scsi_dma_unmap(SCpnt);
+ done(SCpnt);
+ tw_dev->state[request_id] = TW_S_COMPLETED;
+ twa_free_request_id(tw_dev, request_id);
+ retval = 0;
+ }
+out:
+ return retval;
+} /* End twa_scsi_queue() */
+
+static DEF_SCSI_QCMD(twa_scsi_queue)
+
+/* This function hands scsi cdb's to the firmware */
+static int twa_scsiop_execute_scsi(TW_Device_Extension *tw_dev, int request_id, char *cdb, int use_sg, TW_SG_Entry *sglistarg)
+{
+ TW_Command_Full *full_command_packet;
+ TW_Command_Apache *command_packet;
+ u32 num_sectors = 0x0;
+ int i, sg_count;
+ struct scsi_cmnd *srb = NULL;
+ struct scatterlist *sglist = NULL, *sg;
+ int retval = 1;
+
+ if (tw_dev->srb[request_id]) {
+ srb = tw_dev->srb[request_id];
+ if (scsi_sglist(srb))
+ sglist = scsi_sglist(srb);
+ }
+
+ /* Initialize command packet */
+ full_command_packet = tw_dev->command_packet_virt[request_id];
+ full_command_packet->header.header_desc.size_header = 128;
+ full_command_packet->header.status_block.error = 0;
+ full_command_packet->header.status_block.severity__reserved = 0;
+
+ command_packet = &full_command_packet->command.newcommand;
+ command_packet->status = 0;
+ command_packet->opcode__reserved = TW_OPRES_IN(0, TW_OP_EXECUTE_SCSI);
+
+ /* We forced 16 byte cdb use earlier */
+ if (!cdb)
+ memcpy(command_packet->cdb, srb->cmnd, TW_MAX_CDB_LEN);
+ else
+ memcpy(command_packet->cdb, cdb, TW_MAX_CDB_LEN);
+
+ if (srb) {
+ command_packet->unit = srb->device->id;
+ command_packet->request_id__lunl =
+ cpu_to_le16(TW_REQ_LUN_IN(srb->device->lun, request_id));
+ } else {
+ command_packet->request_id__lunl =
+ cpu_to_le16(TW_REQ_LUN_IN(0, request_id));
+ command_packet->unit = 0;
+ }
+
+ command_packet->sgl_offset = 16;
+
+ if (!sglistarg) {
+ /* Map sglist from scsi layer to cmd packet */
+
+ if (scsi_sg_count(srb)) {
+ if ((scsi_sg_count(srb) == 1) &&
+ (scsi_bufflen(srb) < TW_MIN_SGL_LENGTH)) {
+ if (srb->sc_data_direction == DMA_TO_DEVICE ||
+ srb->sc_data_direction == DMA_BIDIRECTIONAL)
+ scsi_sg_copy_to_buffer(srb,
+ tw_dev->generic_buffer_virt[request_id],
+ TW_SECTOR_SIZE);
+ command_packet->sg_list[0].address = TW_CPU_TO_SGL(tw_dev->generic_buffer_phys[request_id]);
+ command_packet->sg_list[0].length = cpu_to_le32(TW_MIN_SGL_LENGTH);
+ } else {
+ sg_count = scsi_dma_map(srb);
+ if (sg_count < 0)
+ goto out;
+
+ scsi_for_each_sg(srb, sg, sg_count, i) {
+ command_packet->sg_list[i].address = TW_CPU_TO_SGL(sg_dma_address(sg));
+ command_packet->sg_list[i].length = cpu_to_le32(sg_dma_len(sg));
+ if (command_packet->sg_list[i].address & TW_CPU_TO_SGL(TW_ALIGNMENT_9000_SGL)) {
+ TW_PRINTK(tw_dev->host, TW_DRIVER, 0x2e, "Found unaligned sgl address during execute scsi");
+ goto out;
+ }
+ }
+ }
+ command_packet->sgl_entries__lunh = cpu_to_le16(TW_REQ_LUN_IN((srb->device->lun >> 4), scsi_sg_count(tw_dev->srb[request_id])));
+ }
+ } else {
+ /* Internal cdb post */
+ for (i = 0; i < use_sg; i++) {
+ command_packet->sg_list[i].address = TW_CPU_TO_SGL(sglistarg[i].address);
+ command_packet->sg_list[i].length = cpu_to_le32(sglistarg[i].length);
+ if (command_packet->sg_list[i].address & TW_CPU_TO_SGL(TW_ALIGNMENT_9000_SGL)) {
+ TW_PRINTK(tw_dev->host, TW_DRIVER, 0x2f, "Found unaligned sgl address during internal post");
+ goto out;
+ }
+ }
+ command_packet->sgl_entries__lunh = cpu_to_le16(TW_REQ_LUN_IN(0, use_sg));
+ }
+
+ if (srb) {
+ if (srb->cmnd[0] == READ_6 || srb->cmnd[0] == WRITE_6)
+ num_sectors = (u32)srb->cmnd[4];
+
+ if (srb->cmnd[0] == READ_10 || srb->cmnd[0] == WRITE_10)
+ num_sectors = (u32)srb->cmnd[8] | ((u32)srb->cmnd[7] << 8);
+ }
+
+ /* Update sector statistic */
+ tw_dev->sector_count = num_sectors;
+ if (tw_dev->sector_count > tw_dev->max_sector_count)
+ tw_dev->max_sector_count = tw_dev->sector_count;
+
+ /* Update SG statistics */
+ if (srb) {
+ tw_dev->sgl_entries = scsi_sg_count(tw_dev->srb[request_id]);
+ if (tw_dev->sgl_entries > tw_dev->max_sgl_entries)
+ tw_dev->max_sgl_entries = tw_dev->sgl_entries;
+ }
+
+ /* Now post the command to the board */
+ if (srb) {
+ retval = twa_post_command_packet(tw_dev, request_id, 0);
+ } else {
+ twa_post_command_packet(tw_dev, request_id, 1);
+ retval = 0;
+ }
+out:
+ return retval;
+} /* End twa_scsiop_execute_scsi() */
+
+/* This function completes an execute scsi operation */
+static void twa_scsiop_execute_scsi_complete(TW_Device_Extension *tw_dev, int request_id)
+{
+ struct scsi_cmnd *cmd = tw_dev->srb[request_id];
+
+ if (scsi_bufflen(cmd) < TW_MIN_SGL_LENGTH &&
+ (cmd->sc_data_direction == DMA_FROM_DEVICE ||
+ cmd->sc_data_direction == DMA_BIDIRECTIONAL)) {
+ if (scsi_sg_count(cmd) == 1) {
+ void *buf = tw_dev->generic_buffer_virt[request_id];
+
+ scsi_sg_copy_from_buffer(cmd, buf, TW_SECTOR_SIZE);
+ }
+ }
+} /* End twa_scsiop_execute_scsi_complete() */
+
+/* This function tells the controller to shut down */
+static void __twa_shutdown(TW_Device_Extension *tw_dev)
+{
+ /* Disable interrupts */
+ TW_DISABLE_INTERRUPTS(tw_dev);
+
+ /* Free up the IRQ */
+ free_irq(tw_dev->tw_pci_dev->irq, tw_dev);
+
+ printk(KERN_WARNING "3w-9xxx: Shutting down host %d.\n", tw_dev->host->host_no);
+
+ /* Tell the card we are shutting down */
+ if (twa_initconnection(tw_dev, 1, 0, 0, 0, 0, 0, NULL, NULL, NULL, NULL, NULL)) {
+ TW_PRINTK(tw_dev->host, TW_DRIVER, 0x31, "Connection shutdown failed");
+ } else {
+ printk(KERN_WARNING "3w-9xxx: Shutdown complete.\n");
+ }
+
+ /* Clear all interrupts just before exit */
+ TW_CLEAR_ALL_INTERRUPTS(tw_dev);
+} /* End __twa_shutdown() */
+
+/* Wrapper for __twa_shutdown */
+static void twa_shutdown(struct pci_dev *pdev)
+{
+ struct Scsi_Host *host = pci_get_drvdata(pdev);
+ TW_Device_Extension *tw_dev = (TW_Device_Extension *)host->hostdata;
+
+ __twa_shutdown(tw_dev);
+} /* End twa_shutdown() */
+
+/* This function will look up a string */
+static char *twa_string_lookup(twa_message_type *table, unsigned int code)
+{
+ int index;
+
+ for (index = 0; ((code != table[index].code) &&
+ (table[index].text != (char *)0)); index++);
+ return(table[index].text);
+} /* End twa_string_lookup() */
+
+/* This function gets called when a disk is coming on-line */
+static int twa_slave_configure(struct scsi_device *sdev)
+{
+ /* Force 60 second timeout */
+ blk_queue_rq_timeout(sdev->request_queue, 60 * HZ);
+
+ return 0;
+} /* End twa_slave_configure() */
+
+/* scsi_host_template initializer */
+static struct scsi_host_template driver_template = {
+ .module = THIS_MODULE,
+ .name = "3ware 9000 Storage Controller",
+ .queuecommand = twa_scsi_queue,
+ .eh_host_reset_handler = twa_scsi_eh_reset,
+ .bios_param = twa_scsi_biosparam,
+ .change_queue_depth = scsi_change_queue_depth,
+ .can_queue = TW_Q_LENGTH-2,
+ .slave_configure = twa_slave_configure,
+ .this_id = -1,
+ .sg_tablesize = TW_APACHE_MAX_SGL_LENGTH,
+ .max_sectors = TW_MAX_SECTORS,
+ .cmd_per_lun = TW_MAX_CMDS_PER_LUN,
+ .use_clustering = ENABLE_CLUSTERING,
+ .shost_attrs = twa_host_attrs,
+ .emulated = 1,
+ .no_write_same = 1,
+};
+
+/* This function will probe and initialize a card */
+static int twa_probe(struct pci_dev *pdev, const struct pci_device_id *dev_id)
+{
+ struct Scsi_Host *host = NULL;
+ TW_Device_Extension *tw_dev;
+ unsigned long mem_addr, mem_len;
+ int retval = -ENODEV;
+
+ retval = pci_enable_device(pdev);
+ if (retval) {
+ TW_PRINTK(host, TW_DRIVER, 0x34, "Failed to enable pci device");
+ goto out_disable_device;
+ }
+
+ pci_set_master(pdev);
+ pci_try_set_mwi(pdev);
+
+ if (pci_set_dma_mask(pdev, DMA_BIT_MASK(64))
+ || pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64)))
+ if (pci_set_dma_mask(pdev, DMA_BIT_MASK(32))
+ || pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32))) {
+ TW_PRINTK(host, TW_DRIVER, 0x23, "Failed to set dma mask");
+ retval = -ENODEV;
+ goto out_disable_device;
+ }
+
+ host = scsi_host_alloc(&driver_template, sizeof(TW_Device_Extension));
+ if (!host) {
+ TW_PRINTK(host, TW_DRIVER, 0x24, "Failed to allocate memory for device extension");
+ retval = -ENOMEM;
+ goto out_disable_device;
+ }
+ tw_dev = (TW_Device_Extension *)host->hostdata;
+
+ /* Save values to device extension */
+ tw_dev->host = host;
+ tw_dev->tw_pci_dev = pdev;
+
+ if (twa_initialize_device_extension(tw_dev)) {
+ TW_PRINTK(tw_dev->host, TW_DRIVER, 0x25, "Failed to initialize device extension");
+ goto out_free_device_extension;
+ }
+
+ /* Request IO regions */
+ retval = pci_request_regions(pdev, "3w-9xxx");
+ if (retval) {
+ TW_PRINTK(tw_dev->host, TW_DRIVER, 0x26, "Failed to get mem region");
+ goto out_free_device_extension;
+ }
+
+ if (pdev->device == PCI_DEVICE_ID_3WARE_9000) {
+ mem_addr = pci_resource_start(pdev, 1);
+ mem_len = pci_resource_len(pdev, 1);
+ } else {
+ mem_addr = pci_resource_start(pdev, 2);
+ mem_len = pci_resource_len(pdev, 2);
+ }
+
+ /* Save base address */
+ tw_dev->base_addr = ioremap(mem_addr, mem_len);
+ if (!tw_dev->base_addr) {
+ TW_PRINTK(tw_dev->host, TW_DRIVER, 0x35, "Failed to ioremap");
+ goto out_release_mem_region;
+ }
+
+ /* Disable interrupts on the card */
+ TW_DISABLE_INTERRUPTS(tw_dev);
+
+ /* Initialize the card */
+ if (twa_reset_sequence(tw_dev, 0))
+ goto out_iounmap;
+
+ /* Set host specific parameters */
+ if ((pdev->device == PCI_DEVICE_ID_3WARE_9650SE) ||
+ (pdev->device == PCI_DEVICE_ID_3WARE_9690SA))
+ host->max_id = TW_MAX_UNITS_9650SE;
+ else
+ host->max_id = TW_MAX_UNITS;
+
+ host->max_cmd_len = TW_MAX_CDB_LEN;
+
+ /* Channels aren't supported by adapter */
+ host->max_lun = TW_MAX_LUNS(tw_dev->tw_compat_info.working_srl);
+ host->max_channel = 0;
+
+ /* Register the card with the kernel SCSI layer */
+ retval = scsi_add_host(host, &pdev->dev);
+ if (retval) {
+ TW_PRINTK(tw_dev->host, TW_DRIVER, 0x27, "scsi add host failed");
+ goto out_iounmap;
+ }
+
+ pci_set_drvdata(pdev, host);
+
+ printk(KERN_WARNING "3w-9xxx: scsi%d: Found a 3ware 9000 Storage Controller at 0x%lx, IRQ: %d.\n",
+ host->host_no, mem_addr, pdev->irq);
+ printk(KERN_WARNING "3w-9xxx: scsi%d: Firmware %s, BIOS %s, Ports: %d.\n",
+ host->host_no,
+ (char *)twa_get_param(tw_dev, 0, TW_VERSION_TABLE,
+ TW_PARAM_FWVER, TW_PARAM_FWVER_LENGTH),
+ (char *)twa_get_param(tw_dev, 1, TW_VERSION_TABLE,
+ TW_PARAM_BIOSVER, TW_PARAM_BIOSVER_LENGTH),
+ le32_to_cpu(*(int *)twa_get_param(tw_dev, 2, TW_INFORMATION_TABLE,
+ TW_PARAM_PORTCOUNT, TW_PARAM_PORTCOUNT_LENGTH)));
+
+ /* Try to enable MSI */
+ if (use_msi && (pdev->device != PCI_DEVICE_ID_3WARE_9000) &&
+ !pci_enable_msi(pdev))
+ set_bit(TW_USING_MSI, &tw_dev->flags);
+
+ /* Now setup the interrupt handler */
+ retval = request_irq(pdev->irq, twa_interrupt, IRQF_SHARED, "3w-9xxx", tw_dev);
+ if (retval) {
+ TW_PRINTK(tw_dev->host, TW_DRIVER, 0x30, "Error requesting IRQ");
+ goto out_remove_host;
+ }
+
+ twa_device_extension_list[twa_device_extension_count] = tw_dev;
+ twa_device_extension_count++;
+
+ /* Re-enable interrupts on the card */
+ TW_ENABLE_AND_CLEAR_INTERRUPTS(tw_dev);
+
+ /* Finally, scan the host */
+ scsi_scan_host(host);
+
+ if (twa_major == -1) {
+ if ((twa_major = register_chrdev (0, "twa", &twa_fops)) < 0)
+ TW_PRINTK(host, TW_DRIVER, 0x29, "Failed to register character device");
+ }
+ return 0;
+
+out_remove_host:
+ if (test_bit(TW_USING_MSI, &tw_dev->flags))
+ pci_disable_msi(pdev);
+ scsi_remove_host(host);
+out_iounmap:
+ iounmap(tw_dev->base_addr);
+out_release_mem_region:
+ pci_release_regions(pdev);
+out_free_device_extension:
+ twa_free_device_extension(tw_dev);
+ scsi_host_put(host);
+out_disable_device:
+ pci_disable_device(pdev);
+
+ return retval;
+} /* End twa_probe() */
+
+/* This function is called to remove a device */
+static void twa_remove(struct pci_dev *pdev)
+{
+ struct Scsi_Host *host = pci_get_drvdata(pdev);
+ TW_Device_Extension *tw_dev = (TW_Device_Extension *)host->hostdata;
+
+ scsi_remove_host(tw_dev->host);
+
+ /* Unregister character device */
+ if (twa_major >= 0) {
+ unregister_chrdev(twa_major, "twa");
+ twa_major = -1;
+ }
+
+ /* Shutdown the card */
+ __twa_shutdown(tw_dev);
+
+ /* Disable MSI if enabled */
+ if (test_bit(TW_USING_MSI, &tw_dev->flags))
+ pci_disable_msi(pdev);
+
+ /* Free IO remapping */
+ iounmap(tw_dev->base_addr);
+
+ /* Free up the mem region */
+ pci_release_regions(pdev);
+
+ /* Free up device extension resources */
+ twa_free_device_extension(tw_dev);
+
+ scsi_host_put(tw_dev->host);
+ pci_disable_device(pdev);
+ twa_device_extension_count--;
+} /* End twa_remove() */
+
+#ifdef CONFIG_PM
+/* This function is called on PCI suspend */
+static int twa_suspend(struct pci_dev *pdev, pm_message_t state)
+{
+ struct Scsi_Host *host = pci_get_drvdata(pdev);
+ TW_Device_Extension *tw_dev = (TW_Device_Extension *)host->hostdata;
+
+ printk(KERN_WARNING "3w-9xxx: Suspending host %d.\n", tw_dev->host->host_no);
+
+ TW_DISABLE_INTERRUPTS(tw_dev);
+ free_irq(tw_dev->tw_pci_dev->irq, tw_dev);
+
+ if (test_bit(TW_USING_MSI, &tw_dev->flags))
+ pci_disable_msi(pdev);
+
+ /* Tell the card we are shutting down */
+ if (twa_initconnection(tw_dev, 1, 0, 0, 0, 0, 0, NULL, NULL, NULL, NULL, NULL)) {
+ TW_PRINTK(tw_dev->host, TW_DRIVER, 0x38, "Connection shutdown failed during suspend");
+ } else {
+ printk(KERN_WARNING "3w-9xxx: Suspend complete.\n");
+ }
+ TW_CLEAR_ALL_INTERRUPTS(tw_dev);
+
+ pci_save_state(pdev);
+ pci_disable_device(pdev);
+ pci_set_power_state(pdev, pci_choose_state(pdev, state));
+
+ return 0;
+} /* End twa_suspend() */
+
+/* This function is called on PCI resume */
+static int twa_resume(struct pci_dev *pdev)
+{
+ int retval = 0;
+ struct Scsi_Host *host = pci_get_drvdata(pdev);
+ TW_Device_Extension *tw_dev = (TW_Device_Extension *)host->hostdata;
+
+ printk(KERN_WARNING "3w-9xxx: Resuming host %d.\n", tw_dev->host->host_no);
+ pci_set_power_state(pdev, PCI_D0);
+ pci_enable_wake(pdev, PCI_D0, 0);
+ pci_restore_state(pdev);
+
+ retval = pci_enable_device(pdev);
+ if (retval) {
+ TW_PRINTK(tw_dev->host, TW_DRIVER, 0x39, "Enable device failed during resume");
+ return retval;
+ }
+
+ pci_set_master(pdev);
+ pci_try_set_mwi(pdev);
+
+ if (pci_set_dma_mask(pdev, DMA_BIT_MASK(64))
+ || pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64)))
+ if (pci_set_dma_mask(pdev, DMA_BIT_MASK(32))
+ || pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32))) {
+ TW_PRINTK(host, TW_DRIVER, 0x40, "Failed to set dma mask during resume");
+ retval = -ENODEV;
+ goto out_disable_device;
+ }
+
+ /* Initialize the card */
+ if (twa_reset_sequence(tw_dev, 0)) {
+ retval = -ENODEV;
+ goto out_disable_device;
+ }
+
+ /* Now setup the interrupt handler */
+ retval = request_irq(pdev->irq, twa_interrupt, IRQF_SHARED, "3w-9xxx", tw_dev);
+ if (retval) {
+ TW_PRINTK(tw_dev->host, TW_DRIVER, 0x42, "Error requesting IRQ during resume");
+ retval = -ENODEV;
+ goto out_disable_device;
+ }
+
+ /* Now enable MSI if enabled */
+ if (test_bit(TW_USING_MSI, &tw_dev->flags))
+ pci_enable_msi(pdev);
+
+ /* Re-enable interrupts on the card */
+ TW_ENABLE_AND_CLEAR_INTERRUPTS(tw_dev);
+
+ printk(KERN_WARNING "3w-9xxx: Resume complete.\n");
+ return 0;
+
+out_disable_device:
+ scsi_remove_host(host);
+ pci_disable_device(pdev);
+
+ return retval;
+} /* End twa_resume() */
+#endif
+
+/* PCI Devices supported by this driver */
+static struct pci_device_id twa_pci_tbl[] = {
+ { PCI_VENDOR_ID_3WARE, PCI_DEVICE_ID_3WARE_9000,
+ PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
+ { PCI_VENDOR_ID_3WARE, PCI_DEVICE_ID_3WARE_9550SX,
+ PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
+ { PCI_VENDOR_ID_3WARE, PCI_DEVICE_ID_3WARE_9650SE,
+ PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
+ { PCI_VENDOR_ID_3WARE, PCI_DEVICE_ID_3WARE_9690SA,
+ PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
+ { }
+};
+MODULE_DEVICE_TABLE(pci, twa_pci_tbl);
+
+/* pci_driver initializer */
+static struct pci_driver twa_driver = {
+ .name = "3w-9xxx",
+ .id_table = twa_pci_tbl,
+ .probe = twa_probe,
+ .remove = twa_remove,
+#ifdef CONFIG_PM
+ .suspend = twa_suspend,
+ .resume = twa_resume,
+#endif
+ .shutdown = twa_shutdown
+};
+
+/* This function is called on driver initialization */
+static int __init twa_init(void)
+{
+ printk(KERN_WARNING "3ware 9000 Storage Controller device driver for Linux v%s.\n", TW_DRIVER_VERSION);
+
+ return pci_register_driver(&twa_driver);
+} /* End twa_init() */
+
+/* This function is called on driver exit */
+static void __exit twa_exit(void)
+{
+ pci_unregister_driver(&twa_driver);
+} /* End twa_exit() */
+
+module_init(twa_init);
+module_exit(twa_exit);
+
diff --git a/drivers/scsi/3w-9xxx.h b/drivers/scsi/3w-9xxx.h
new file mode 100644
index 000000000..0fdc83cfa
--- /dev/null
+++ b/drivers/scsi/3w-9xxx.h
@@ -0,0 +1,681 @@
+/*
+ 3w-9xxx.h -- 3ware 9000 Storage Controller device driver for Linux.
+
+ Written By: Adam Radford <linuxraid@lsi.com>
+ Modifications By: Tom Couch <linuxraid@lsi.com>
+
+ Copyright (C) 2004-2009 Applied Micro Circuits Corporation.
+ Copyright (C) 2010 LSI Corporation.
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; version 2 of the License.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ NO WARRANTY
+ THE PROGRAM IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OR
+ CONDITIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED INCLUDING, WITHOUT
+ LIMITATION, ANY WARRANTIES OR CONDITIONS OF TITLE, NON-INFRINGEMENT,
+ MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE. Each Recipient is
+ solely responsible for determining the appropriateness of using and
+ distributing the Program and assumes all risks associated with its
+ exercise of rights under this Agreement, including but not limited to
+ the risks and costs of program errors, damage to or loss of data,
+ programs or equipment, and unavailability or interruption of operations.
+
+ DISCLAIMER OF LIABILITY
+ NEITHER RECIPIENT NOR ANY CONTRIBUTORS SHALL HAVE ANY LIABILITY FOR ANY
+ DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ DAMAGES (INCLUDING WITHOUT LIMITATION LOST PROFITS), HOWEVER CAUSED AND
+ ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR
+ TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
+ USE OR DISTRIBUTION OF THE PROGRAM OR THE EXERCISE OF ANY RIGHTS GRANTED
+ HEREUNDER, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGES
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+
+ Bugs/Comments/Suggestions should be mailed to:
+ linuxraid@lsi.com
+
+ For more information, goto:
+ http://www.lsi.com
+*/
+
+#ifndef _3W_9XXX_H
+#define _3W_9XXX_H
+
+/* AEN string type */
+typedef struct TAG_twa_message_type {
+ unsigned int code;
+ char* text;
+} twa_message_type;
+
+/* AEN strings */
+static twa_message_type twa_aen_table[] = {
+ {0x0000, "AEN queue empty"},
+ {0x0001, "Controller reset occurred"},
+ {0x0002, "Degraded unit detected"},
+ {0x0003, "Controller error occurred"},
+ {0x0004, "Background rebuild failed"},
+ {0x0005, "Background rebuild done"},
+ {0x0006, "Incomplete unit detected"},
+ {0x0007, "Background initialize done"},
+ {0x0008, "Unclean shutdown detected"},
+ {0x0009, "Drive timeout detected"},
+ {0x000A, "Drive error detected"},
+ {0x000B, "Rebuild started"},
+ {0x000C, "Background initialize started"},
+ {0x000D, "Entire logical unit was deleted"},
+ {0x000E, "Background initialize failed"},
+ {0x000F, "SMART attribute exceeded threshold"},
+ {0x0010, "Power supply reported AC under range"},
+ {0x0011, "Power supply reported DC out of range"},
+ {0x0012, "Power supply reported a malfunction"},
+ {0x0013, "Power supply predicted malfunction"},
+ {0x0014, "Battery charge is below threshold"},
+ {0x0015, "Fan speed is below threshold"},
+ {0x0016, "Temperature sensor is above threshold"},
+ {0x0017, "Power supply was removed"},
+ {0x0018, "Power supply was inserted"},
+ {0x0019, "Drive was removed from a bay"},
+ {0x001A, "Drive was inserted into a bay"},
+ {0x001B, "Drive bay cover door was opened"},
+ {0x001C, "Drive bay cover door was closed"},
+ {0x001D, "Product case was opened"},
+ {0x0020, "Prepare for shutdown (power-off)"},
+ {0x0021, "Downgrade UDMA mode to lower speed"},
+ {0x0022, "Upgrade UDMA mode to higher speed"},
+ {0x0023, "Sector repair completed"},
+ {0x0024, "Sbuf memory test failed"},
+ {0x0025, "Error flushing cached write data to array"},
+ {0x0026, "Drive reported data ECC error"},
+ {0x0027, "DCB has checksum error"},
+ {0x0028, "DCB version is unsupported"},
+ {0x0029, "Background verify started"},
+ {0x002A, "Background verify failed"},
+ {0x002B, "Background verify done"},
+ {0x002C, "Bad sector overwritten during rebuild"},
+ {0x002D, "Background rebuild error on source drive"},
+ {0x002E, "Replace failed because replacement drive too small"},
+ {0x002F, "Verify failed because array was never initialized"},
+ {0x0030, "Unsupported ATA drive"},
+ {0x0031, "Synchronize host/controller time"},
+ {0x0032, "Spare capacity is inadequate for some units"},
+ {0x0033, "Background migration started"},
+ {0x0034, "Background migration failed"},
+ {0x0035, "Background migration done"},
+ {0x0036, "Verify detected and fixed data/parity mismatch"},
+ {0x0037, "SO-DIMM incompatible"},
+ {0x0038, "SO-DIMM not detected"},
+ {0x0039, "Corrected Sbuf ECC error"},
+ {0x003A, "Drive power on reset detected"},
+ {0x003B, "Background rebuild paused"},
+ {0x003C, "Background initialize paused"},
+ {0x003D, "Background verify paused"},
+ {0x003E, "Background migration paused"},
+ {0x003F, "Corrupt flash file system detected"},
+ {0x0040, "Flash file system repaired"},
+ {0x0041, "Unit number assignments were lost"},
+ {0x0042, "Error during read of primary DCB"},
+ {0x0043, "Latent error found in backup DCB"},
+ {0x00FC, "Recovered/finished array membership update"},
+ {0x00FD, "Handler lockup"},
+ {0x00FE, "Retrying PCI transfer"},
+ {0x00FF, "AEN queue is full"},
+ {0xFFFFFFFF, (char*) 0}
+};
+
+/* AEN severity table */
+static char *twa_aen_severity_table[] =
+{
+ "None", "ERROR", "WARNING", "INFO", "DEBUG", (char*) 0
+};
+
+/* Error strings */
+static twa_message_type twa_error_table[] = {
+ {0x0100, "SGL entry contains zero data"},
+ {0x0101, "Invalid command opcode"},
+ {0x0102, "SGL entry has unaligned address"},
+ {0x0103, "SGL size does not match command"},
+ {0x0104, "SGL entry has illegal length"},
+ {0x0105, "Command packet is not aligned"},
+ {0x0106, "Invalid request ID"},
+ {0x0107, "Duplicate request ID"},
+ {0x0108, "ID not locked"},
+ {0x0109, "LBA out of range"},
+ {0x010A, "Logical unit not supported"},
+ {0x010B, "Parameter table does not exist"},
+ {0x010C, "Parameter index does not exist"},
+ {0x010D, "Invalid field in CDB"},
+ {0x010E, "Specified port has invalid drive"},
+ {0x010F, "Parameter item size mismatch"},
+ {0x0110, "Failed memory allocation"},
+ {0x0111, "Memory request too large"},
+ {0x0112, "Out of memory segments"},
+ {0x0113, "Invalid address to deallocate"},
+ {0x0114, "Out of memory"},
+ {0x0115, "Out of heap"},
+ {0x0120, "Double degrade"},
+ {0x0121, "Drive not degraded"},
+ {0x0122, "Reconstruct error"},
+ {0x0123, "Replace not accepted"},
+ {0x0124, "Replace drive capacity too small"},
+ {0x0125, "Sector count not allowed"},
+ {0x0126, "No spares left"},
+ {0x0127, "Reconstruct error"},
+ {0x0128, "Unit is offline"},
+ {0x0129, "Cannot update status to DCB"},
+ {0x0130, "Invalid stripe handle"},
+ {0x0131, "Handle that was not locked"},
+ {0x0132, "Handle that was not empty"},
+ {0x0133, "Handle has different owner"},
+ {0x0140, "IPR has parent"},
+ {0x0150, "Illegal Pbuf address alignment"},
+ {0x0151, "Illegal Pbuf transfer length"},
+ {0x0152, "Illegal Sbuf address alignment"},
+ {0x0153, "Illegal Sbuf transfer length"},
+ {0x0160, "Command packet too large"},
+ {0x0161, "SGL exceeds maximum length"},
+ {0x0162, "SGL has too many entries"},
+ {0x0170, "Insufficient resources for rebuilder"},
+ {0x0171, "Verify error (data != parity)"},
+ {0x0180, "Requested segment not in directory of this DCB"},
+ {0x0181, "DCB segment has unsupported version"},
+ {0x0182, "DCB segment has checksum error"},
+ {0x0183, "DCB support (settings) segment invalid"},
+ {0x0184, "DCB UDB (unit descriptor block) segment invalid"},
+ {0x0185, "DCB GUID (globally unique identifier) segment invalid"},
+ {0x01A0, "Could not clear Sbuf"},
+ {0x01C0, "Flash identify failed"},
+ {0x01C1, "Flash out of bounds"},
+ {0x01C2, "Flash verify error"},
+ {0x01C3, "Flash file object not found"},
+ {0x01C4, "Flash file already present"},
+ {0x01C5, "Flash file system full"},
+ {0x01C6, "Flash file not present"},
+ {0x01C7, "Flash file size error"},
+ {0x01C8, "Bad flash file checksum"},
+ {0x01CA, "Corrupt flash file system detected"},
+ {0x01D0, "Invalid field in parameter list"},
+ {0x01D1, "Parameter list length error"},
+ {0x01D2, "Parameter item is not changeable"},
+ {0x01D3, "Parameter item is not saveable"},
+ {0x0200, "UDMA CRC error"},
+ {0x0201, "Internal CRC error"},
+ {0x0202, "Data ECC error"},
+ {0x0203, "ADP level 1 error"},
+ {0x0204, "Port timeout"},
+ {0x0205, "Drive power on reset"},
+ {0x0206, "ADP level 2 error"},
+ {0x0207, "Soft reset failed"},
+ {0x0208, "Drive not ready"},
+ {0x0209, "Unclassified port error"},
+ {0x020A, "Drive aborted command"},
+ {0x0210, "Internal CRC error"},
+ {0x0211, "PCI abort error"},
+ {0x0212, "PCI parity error"},
+ {0x0213, "Port handler error"},
+ {0x0214, "Token interrupt count error"},
+ {0x0215, "Timeout waiting for PCI transfer"},
+ {0x0216, "Corrected buffer ECC"},
+ {0x0217, "Uncorrected buffer ECC"},
+ {0x0230, "Unsupported command during flash recovery"},
+ {0x0231, "Next image buffer expected"},
+ {0x0232, "Binary image architecture incompatible"},
+ {0x0233, "Binary image has no signature"},
+ {0x0234, "Binary image has bad checksum"},
+ {0x0235, "Image downloaded overflowed buffer"},
+ {0x0240, "I2C device not found"},
+ {0x0241, "I2C transaction aborted"},
+ {0x0242, "SO-DIMM parameter(s) incompatible using defaults"},
+ {0x0243, "SO-DIMM unsupported"},
+ {0x0248, "SPI transfer status error"},
+ {0x0249, "SPI transfer timeout error"},
+ {0x0250, "Invalid unit descriptor size in CreateUnit"},
+ {0x0251, "Unit descriptor size exceeds data buffer in CreateUnit"},
+ {0x0252, "Invalid value in CreateUnit descriptor"},
+ {0x0253, "Inadequate disk space to support descriptor in CreateUnit"},
+ {0x0254, "Unable to create data channel for this unit descriptor"},
+ {0x0255, "CreateUnit descriptor specifies a drive already in use"},
+ {0x0256, "Unable to write configuration to all disks during CreateUnit"},
+ {0x0257, "CreateUnit does not support this descriptor version"},
+ {0x0258, "Invalid subunit for RAID 0 or 5 in CreateUnit"},
+ {0x0259, "Too many descriptors in CreateUnit"},
+ {0x025A, "Invalid configuration specified in CreateUnit descriptor"},
+ {0x025B, "Invalid LBA offset specified in CreateUnit descriptor"},
+ {0x025C, "Invalid stripelet size specified in CreateUnit descriptor"},
+ {0x0260, "SMART attribute exceeded threshold"},
+ {0xFFFFFFFF, (char*) 0}
+};
+
+/* Control register bit definitions */
+#define TW_CONTROL_CLEAR_HOST_INTERRUPT 0x00080000
+#define TW_CONTROL_CLEAR_ATTENTION_INTERRUPT 0x00040000
+#define TW_CONTROL_MASK_COMMAND_INTERRUPT 0x00020000
+#define TW_CONTROL_MASK_RESPONSE_INTERRUPT 0x00010000
+#define TW_CONTROL_UNMASK_COMMAND_INTERRUPT 0x00008000
+#define TW_CONTROL_UNMASK_RESPONSE_INTERRUPT 0x00004000
+#define TW_CONTROL_CLEAR_ERROR_STATUS 0x00000200
+#define TW_CONTROL_ISSUE_SOFT_RESET 0x00000100
+#define TW_CONTROL_ENABLE_INTERRUPTS 0x00000080
+#define TW_CONTROL_DISABLE_INTERRUPTS 0x00000040
+#define TW_CONTROL_ISSUE_HOST_INTERRUPT 0x00000020
+#define TW_CONTROL_CLEAR_PARITY_ERROR 0x00800000
+#define TW_CONTROL_CLEAR_QUEUE_ERROR 0x00400000
+#define TW_CONTROL_CLEAR_PCI_ABORT 0x00100000
+
+/* Status register bit definitions */
+#define TW_STATUS_MAJOR_VERSION_MASK 0xF0000000
+#define TW_STATUS_MINOR_VERSION_MASK 0x0F000000
+#define TW_STATUS_PCI_PARITY_ERROR 0x00800000
+#define TW_STATUS_QUEUE_ERROR 0x00400000
+#define TW_STATUS_MICROCONTROLLER_ERROR 0x00200000
+#define TW_STATUS_PCI_ABORT 0x00100000
+#define TW_STATUS_HOST_INTERRUPT 0x00080000
+#define TW_STATUS_ATTENTION_INTERRUPT 0x00040000
+#define TW_STATUS_COMMAND_INTERRUPT 0x00020000
+#define TW_STATUS_RESPONSE_INTERRUPT 0x00010000
+#define TW_STATUS_COMMAND_QUEUE_FULL 0x00008000
+#define TW_STATUS_RESPONSE_QUEUE_EMPTY 0x00004000
+#define TW_STATUS_MICROCONTROLLER_READY 0x00002000
+#define TW_STATUS_COMMAND_QUEUE_EMPTY 0x00001000
+#define TW_STATUS_EXPECTED_BITS 0x00002000
+#define TW_STATUS_UNEXPECTED_BITS 0x00F00000
+#define TW_STATUS_VALID_INTERRUPT 0x00DF0000
+
+/* PCI related defines */
+#define TW_PCI_CLEAR_PARITY_ERRORS 0xc100
+#define TW_PCI_CLEAR_PCI_ABORT 0x2000
+
+/* Command packet opcodes used by the driver */
+#define TW_OP_INIT_CONNECTION 0x1
+#define TW_OP_GET_PARAM 0x12
+#define TW_OP_SET_PARAM 0x13
+#define TW_OP_EXECUTE_SCSI 0x10
+#define TW_OP_DOWNLOAD_FIRMWARE 0x16
+#define TW_OP_RESET 0x1C
+
+/* Asynchronous Event Notification (AEN) codes used by the driver */
+#define TW_AEN_QUEUE_EMPTY 0x0000
+#define TW_AEN_SOFT_RESET 0x0001
+#define TW_AEN_SYNC_TIME_WITH_HOST 0x031
+#define TW_AEN_SEVERITY_ERROR 0x1
+#define TW_AEN_SEVERITY_DEBUG 0x4
+#define TW_AEN_NOT_RETRIEVED 0x1
+#define TW_AEN_RETRIEVED 0x2
+
+/* Command state defines */
+#define TW_S_INITIAL 0x1 /* Initial state */
+#define TW_S_STARTED 0x2 /* Id in use */
+#define TW_S_POSTED 0x4 /* Posted to the controller */
+#define TW_S_PENDING 0x8 /* Waiting to be posted in isr */
+#define TW_S_COMPLETED 0x10 /* Completed by isr */
+#define TW_S_FINISHED 0x20 /* I/O completely done */
+
+/* Compatibility defines */
+#define TW_9000_ARCH_ID 0x5
+#define TW_CURRENT_DRIVER_SRL 35
+#define TW_CURRENT_DRIVER_BUILD 0
+#define TW_CURRENT_DRIVER_BRANCH 0
+
+/* Misc defines */
+#define TW_9550SX_DRAIN_COMPLETED 0xFFFF
+#define TW_SECTOR_SIZE 512
+#define TW_ALIGNMENT_9000 4 /* 4 bytes */
+#define TW_ALIGNMENT_9000_SGL 0x3
+#define TW_MAX_UNITS 16
+#define TW_MAX_UNITS_9650SE 32
+#define TW_INIT_MESSAGE_CREDITS 0x100
+#define TW_INIT_COMMAND_PACKET_SIZE 0x3
+#define TW_INIT_COMMAND_PACKET_SIZE_EXTENDED 0x6
+#define TW_EXTENDED_INIT_CONNECT 0x2
+#define TW_BUNDLED_FW_SAFE_TO_FLASH 0x4
+#define TW_CTLR_FW_RECOMMENDS_FLASH 0x8
+#define TW_CTLR_FW_COMPATIBLE 0x2
+#define TW_BASE_FW_SRL 24
+#define TW_BASE_FW_BRANCH 0
+#define TW_BASE_FW_BUILD 1
+#define TW_FW_SRL_LUNS_SUPPORTED 28
+#define TW_Q_LENGTH 256
+#define TW_Q_START 0
+#define TW_MAX_SLOT 32
+#define TW_MAX_RESET_TRIES 2
+#define TW_MAX_CMDS_PER_LUN 254
+#define TW_MAX_RESPONSE_DRAIN 256
+#define TW_MAX_AEN_DRAIN 255
+#define TW_IN_RESET 2
+#define TW_USING_MSI 3
+#define TW_IN_ATTENTION_LOOP 4
+#define TW_MAX_SECTORS 256
+#define TW_AEN_WAIT_TIME 1000
+#define TW_IOCTL_WAIT_TIME (1 * HZ) /* 1 second */
+#define TW_MAX_CDB_LEN 16
+#define TW_ISR_DONT_COMPLETE 2
+#define TW_ISR_DONT_RESULT 3
+#define TW_IOCTL_CHRDEV_TIMEOUT 60 /* 60 seconds */
+#define TW_IOCTL_CHRDEV_FREE -1
+#define TW_COMMAND_OFFSET 128 /* 128 bytes */
+#define TW_VERSION_TABLE 0x0402
+#define TW_TIMEKEEP_TABLE 0x040A
+#define TW_INFORMATION_TABLE 0x0403
+#define TW_PARAM_FWVER 3
+#define TW_PARAM_FWVER_LENGTH 16
+#define TW_PARAM_BIOSVER 4
+#define TW_PARAM_BIOSVER_LENGTH 16
+#define TW_PARAM_PORTCOUNT 3
+#define TW_PARAM_PORTCOUNT_LENGTH 1
+#define TW_MIN_SGL_LENGTH 0x200 /* 512 bytes */
+#define TW_MAX_SENSE_LENGTH 256
+#define TW_EVENT_SOURCE_AEN 0x1000
+#define TW_EVENT_SOURCE_COMMAND 0x1001
+#define TW_EVENT_SOURCE_PCHIP 0x1002
+#define TW_EVENT_SOURCE_DRIVER 0x1003
+#define TW_IOCTL_GET_COMPATIBILITY_INFO 0x101
+#define TW_IOCTL_GET_LAST_EVENT 0x102
+#define TW_IOCTL_GET_FIRST_EVENT 0x103
+#define TW_IOCTL_GET_NEXT_EVENT 0x104
+#define TW_IOCTL_GET_PREVIOUS_EVENT 0x105
+#define TW_IOCTL_GET_LOCK 0x106
+#define TW_IOCTL_RELEASE_LOCK 0x107
+#define TW_IOCTL_FIRMWARE_PASS_THROUGH 0x108
+#define TW_IOCTL_ERROR_STATUS_NOT_LOCKED 0x1001 // Not locked
+#define TW_IOCTL_ERROR_STATUS_LOCKED 0x1002 // Already locked
+#define TW_IOCTL_ERROR_STATUS_NO_MORE_EVENTS 0x1003 // No more events
+#define TW_IOCTL_ERROR_STATUS_AEN_CLOBBER 0x1004 // AEN clobber occurred
+#define TW_IOCTL_ERROR_OS_EFAULT -EFAULT // Bad address
+#define TW_IOCTL_ERROR_OS_EINTR -EINTR // Interrupted system call
+#define TW_IOCTL_ERROR_OS_EINVAL -EINVAL // Invalid argument
+#define TW_IOCTL_ERROR_OS_ENOMEM -ENOMEM // Out of memory
+#define TW_IOCTL_ERROR_OS_ERESTARTSYS -ERESTARTSYS // Restart system call
+#define TW_IOCTL_ERROR_OS_EIO -EIO // I/O error
+#define TW_IOCTL_ERROR_OS_ENOTTY -ENOTTY // Not a typewriter
+#define TW_IOCTL_ERROR_OS_ENODEV -ENODEV // No such device
+#define TW_ALLOCATION_LENGTH 128
+#define TW_SENSE_DATA_LENGTH 18
+#define TW_STATUS_CHECK_CONDITION 2
+#define TW_ERROR_LOGICAL_UNIT_NOT_SUPPORTED 0x10a
+#define TW_ERROR_UNIT_OFFLINE 0x128
+#define TW_MESSAGE_SOURCE_CONTROLLER_ERROR 3
+#define TW_MESSAGE_SOURCE_CONTROLLER_EVENT 4
+#define TW_MESSAGE_SOURCE_LINUX_DRIVER 6
+#define TW_DRIVER TW_MESSAGE_SOURCE_LINUX_DRIVER
+#define TW_MESSAGE_SOURCE_LINUX_OS 9
+#define TW_OS TW_MESSAGE_SOURCE_LINUX_OS
+#ifndef PCI_DEVICE_ID_3WARE_9000
+#define PCI_DEVICE_ID_3WARE_9000 0x1002
+#endif
+#ifndef PCI_DEVICE_ID_3WARE_9550SX
+#define PCI_DEVICE_ID_3WARE_9550SX 0x1003
+#endif
+#ifndef PCI_DEVICE_ID_3WARE_9650SE
+#define PCI_DEVICE_ID_3WARE_9650SE 0x1004
+#endif
+#ifndef PCI_DEVICE_ID_3WARE_9690SA
+#define PCI_DEVICE_ID_3WARE_9690SA 0x1005
+#endif
+
+/* Bitmask macros to eliminate bitfields */
+
+/* opcode: 5, reserved: 3 */
+#define TW_OPRES_IN(x,y) ((x << 5) | (y & 0x1f))
+#define TW_OP_OUT(x) (x & 0x1f)
+
+/* opcode: 5, sgloffset: 3 */
+#define TW_OPSGL_IN(x,y) ((x << 5) | (y & 0x1f))
+#define TW_SGL_OUT(x) ((x >> 5) & 0x7)
+
+/* severity: 3, reserved: 5 */
+#define TW_SEV_OUT(x) (x & 0x7)
+
+/* reserved_1: 4, response_id: 8, reserved_2: 20 */
+#define TW_RESID_OUT(x) ((x >> 4) & 0xff)
+
+/* request_id: 12, lun: 4 */
+#define TW_REQ_LUN_IN(lun, request_id) (((lun << 12) & 0xf000) | (request_id & 0xfff))
+#define TW_LUN_OUT(lun) ((lun >> 12) & 0xf)
+
+/* Macros */
+#define TW_CONTROL_REG_ADDR(x) (x->base_addr)
+#define TW_STATUS_REG_ADDR(x) ((unsigned char __iomem *)x->base_addr + 0x4)
+#define TW_COMMAND_QUEUE_REG_ADDR(x) (sizeof(dma_addr_t) > 4 ? ((unsigned char __iomem *)x->base_addr + 0x20) : ((unsigned char __iomem *)x->base_addr + 0x8))
+#define TW_COMMAND_QUEUE_REG_ADDR_LARGE(x) ((unsigned char __iomem *)x->base_addr + 0x20)
+#define TW_RESPONSE_QUEUE_REG_ADDR(x) ((unsigned char __iomem *)x->base_addr + 0xC)
+#define TW_RESPONSE_QUEUE_REG_ADDR_LARGE(x) ((unsigned char __iomem *)x->base_addr + 0x30)
+#define TW_CLEAR_ALL_INTERRUPTS(x) (writel(TW_STATUS_VALID_INTERRUPT, TW_CONTROL_REG_ADDR(x)))
+#define TW_CLEAR_ATTENTION_INTERRUPT(x) (writel(TW_CONTROL_CLEAR_ATTENTION_INTERRUPT, TW_CONTROL_REG_ADDR(x)))
+#define TW_CLEAR_HOST_INTERRUPT(x) (writel(TW_CONTROL_CLEAR_HOST_INTERRUPT, TW_CONTROL_REG_ADDR(x)))
+#define TW_DISABLE_INTERRUPTS(x) (writel(TW_CONTROL_DISABLE_INTERRUPTS, TW_CONTROL_REG_ADDR(x)))
+#define TW_ENABLE_AND_CLEAR_INTERRUPTS(x) (writel(TW_CONTROL_CLEAR_ATTENTION_INTERRUPT | TW_CONTROL_UNMASK_RESPONSE_INTERRUPT | TW_CONTROL_ENABLE_INTERRUPTS, TW_CONTROL_REG_ADDR(x)))
+#define TW_MASK_COMMAND_INTERRUPT(x) (writel(TW_CONTROL_MASK_COMMAND_INTERRUPT, TW_CONTROL_REG_ADDR(x)))
+#define TW_UNMASK_COMMAND_INTERRUPT(x) (writel(TW_CONTROL_UNMASK_COMMAND_INTERRUPT, TW_CONTROL_REG_ADDR(x)))
+#define TW_SOFT_RESET(x) (writel(TW_CONTROL_ISSUE_SOFT_RESET | \
+ TW_CONTROL_CLEAR_HOST_INTERRUPT | \
+ TW_CONTROL_CLEAR_ATTENTION_INTERRUPT | \
+ TW_CONTROL_MASK_COMMAND_INTERRUPT | \
+ TW_CONTROL_MASK_RESPONSE_INTERRUPT | \
+ TW_CONTROL_CLEAR_ERROR_STATUS | \
+ TW_CONTROL_DISABLE_INTERRUPTS, TW_CONTROL_REG_ADDR(x)))
+#define TW_PRINTK(h,a,b,c) { \
+if (h) \
+printk(KERN_WARNING "3w-9xxx: scsi%d: ERROR: (0x%02X:0x%04X): %s.\n",h->host_no,a,b,c); \
+else \
+printk(KERN_WARNING "3w-9xxx: ERROR: (0x%02X:0x%04X): %s.\n",a,b,c); \
+}
+#define TW_MAX_LUNS(srl) (srl < TW_FW_SRL_LUNS_SUPPORTED ? 1 : 16)
+#define TW_COMMAND_SIZE (sizeof(dma_addr_t) > 4 ? 5 : 4)
+#define TW_APACHE_MAX_SGL_LENGTH (sizeof(dma_addr_t) > 4 ? 72 : 109)
+#define TW_ESCALADE_MAX_SGL_LENGTH (sizeof(dma_addr_t) > 4 ? 41 : 62)
+#define TW_PADDING_LENGTH (sizeof(dma_addr_t) > 4 ? 8 : 0)
+#define TW_CPU_TO_SGL(x) (sizeof(dma_addr_t) > 4 ? cpu_to_le64(x) : cpu_to_le32(x))
+
+#pragma pack(1)
+
+/* Scatter Gather List Entry */
+typedef struct TAG_TW_SG_Entry {
+ dma_addr_t address;
+ u32 length;
+} TW_SG_Entry;
+
+/* Command Packet */
+typedef struct TW_Command {
+ unsigned char opcode__sgloffset;
+ unsigned char size;
+ unsigned char request_id;
+ unsigned char unit__hostid;
+ /* Second DWORD */
+ unsigned char status;
+ unsigned char flags;
+ union {
+ unsigned short block_count;
+ unsigned short parameter_count;
+ } byte6_offset;
+ union {
+ struct {
+ u32 lba;
+ TW_SG_Entry sgl[TW_ESCALADE_MAX_SGL_LENGTH];
+ dma_addr_t padding;
+ } io;
+ struct {
+ TW_SG_Entry sgl[TW_ESCALADE_MAX_SGL_LENGTH];
+ u32 padding;
+ dma_addr_t padding2;
+ } param;
+ } byte8_offset;
+} TW_Command;
+
+/* Command Packet for 9000+ controllers */
+typedef struct TAG_TW_Command_Apache {
+ unsigned char opcode__reserved;
+ unsigned char unit;
+ unsigned short request_id__lunl;
+ unsigned char status;
+ unsigned char sgl_offset;
+ unsigned short sgl_entries__lunh;
+ unsigned char cdb[16];
+ TW_SG_Entry sg_list[TW_APACHE_MAX_SGL_LENGTH];
+ unsigned char padding[TW_PADDING_LENGTH];
+} TW_Command_Apache;
+
+/* New command packet header */
+typedef struct TAG_TW_Command_Apache_Header {
+ unsigned char sense_data[TW_SENSE_DATA_LENGTH];
+ struct {
+ char reserved[4];
+ unsigned short error;
+ unsigned char padding;
+ unsigned char severity__reserved;
+ } status_block;
+ unsigned char err_specific_desc[98];
+ struct {
+ unsigned char size_header;
+ unsigned short reserved;
+ unsigned char size_sense;
+ } header_desc;
+} TW_Command_Apache_Header;
+
+/* This struct is a union of the 2 command packets */
+typedef struct TAG_TW_Command_Full {
+ TW_Command_Apache_Header header;
+ union {
+ TW_Command oldcommand;
+ TW_Command_Apache newcommand;
+ } command;
+} TW_Command_Full;
+
+/* Initconnection structure */
+typedef struct TAG_TW_Initconnect {
+ unsigned char opcode__reserved;
+ unsigned char size;
+ unsigned char request_id;
+ unsigned char res2;
+ unsigned char status;
+ unsigned char flags;
+ unsigned short message_credits;
+ u32 features;
+ unsigned short fw_srl;
+ unsigned short fw_arch_id;
+ unsigned short fw_branch;
+ unsigned short fw_build;
+ u32 result;
+} TW_Initconnect;
+
+/* Event info structure */
+typedef struct TAG_TW_Event
+{
+ unsigned int sequence_id;
+ unsigned int time_stamp_sec;
+ unsigned short aen_code;
+ unsigned char severity;
+ unsigned char retrieved;
+ unsigned char repeat_count;
+ unsigned char parameter_len;
+ unsigned char parameter_data[98];
+} TW_Event;
+
+typedef struct TAG_TW_Ioctl_Driver_Command {
+ unsigned int control_code;
+ unsigned int status;
+ unsigned int unique_id;
+ unsigned int sequence_id;
+ unsigned int os_specific;
+ unsigned int buffer_length;
+} TW_Ioctl_Driver_Command;
+
+typedef struct TAG_TW_Ioctl_Apache {
+ TW_Ioctl_Driver_Command driver_command;
+ char padding[488];
+ TW_Command_Full firmware_command;
+ char data_buffer[1];
+} TW_Ioctl_Buf_Apache;
+
+/* Lock structure for ioctl get/release lock */
+typedef struct TAG_TW_Lock {
+ unsigned long timeout_msec;
+ unsigned long time_remaining_msec;
+ unsigned long force_flag;
+} TW_Lock;
+
+/* GetParam descriptor */
+typedef struct {
+ unsigned short table_id;
+ unsigned short parameter_id;
+ unsigned short parameter_size_bytes;
+ unsigned short actual_parameter_size_bytes;
+ unsigned char data[1];
+} TW_Param_Apache, *PTW_Param_Apache;
+
+/* Response queue */
+typedef union TAG_TW_Response_Queue {
+ u32 response_id;
+ u32 value;
+} TW_Response_Queue;
+
+/* Compatibility information structure */
+typedef struct TAG_TW_Compatibility_Info
+{
+ char driver_version[32];
+ unsigned short working_srl;
+ unsigned short working_branch;
+ unsigned short working_build;
+ unsigned short driver_srl_high;
+ unsigned short driver_branch_high;
+ unsigned short driver_build_high;
+ unsigned short driver_srl_low;
+ unsigned short driver_branch_low;
+ unsigned short driver_build_low;
+ unsigned short fw_on_ctlr_srl;
+ unsigned short fw_on_ctlr_branch;
+ unsigned short fw_on_ctlr_build;
+} TW_Compatibility_Info;
+
+#pragma pack()
+
+typedef struct TAG_TW_Device_Extension {
+ u32 __iomem *base_addr;
+ unsigned long *generic_buffer_virt[TW_Q_LENGTH];
+ dma_addr_t generic_buffer_phys[TW_Q_LENGTH];
+ TW_Command_Full *command_packet_virt[TW_Q_LENGTH];
+ dma_addr_t command_packet_phys[TW_Q_LENGTH];
+ struct pci_dev *tw_pci_dev;
+ struct scsi_cmnd *srb[TW_Q_LENGTH];
+ unsigned char free_queue[TW_Q_LENGTH];
+ unsigned char free_head;
+ unsigned char free_tail;
+ unsigned char pending_queue[TW_Q_LENGTH];
+ unsigned char pending_head;
+ unsigned char pending_tail;
+ int state[TW_Q_LENGTH];
+ unsigned int posted_request_count;
+ unsigned int max_posted_request_count;
+ unsigned int pending_request_count;
+ unsigned int max_pending_request_count;
+ unsigned int max_sgl_entries;
+ unsigned int sgl_entries;
+ unsigned int num_resets;
+ unsigned int sector_count;
+ unsigned int max_sector_count;
+ unsigned int aen_count;
+ struct Scsi_Host *host;
+ long flags;
+ int reset_print;
+ TW_Event *event_queue[TW_Q_LENGTH];
+ unsigned char error_index;
+ unsigned char event_queue_wrapped;
+ unsigned int error_sequence_id;
+ int ioctl_sem_lock;
+ u32 ioctl_msec;
+ int chrdev_request_id;
+ wait_queue_head_t ioctl_wqueue;
+ struct mutex ioctl_lock;
+ char aen_clobber;
+ TW_Compatibility_Info tw_compat_info;
+} TW_Device_Extension;
+
+#endif /* _3W_9XXX_H */
+
diff --git a/drivers/scsi/3w-sas.c b/drivers/scsi/3w-sas.c
new file mode 100644
index 000000000..f8374850f
--- /dev/null
+++ b/drivers/scsi/3w-sas.c
@@ -0,0 +1,1888 @@
+/*
+ 3w-sas.c -- LSI 3ware SAS/SATA-RAID Controller device driver for Linux.
+
+ Written By: Adam Radford <linuxraid@lsi.com>
+
+ Copyright (C) 2009 LSI Corporation.
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; version 2 of the License.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ NO WARRANTY
+ THE PROGRAM IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OR
+ CONDITIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED INCLUDING, WITHOUT
+ LIMITATION, ANY WARRANTIES OR CONDITIONS OF TITLE, NON-INFRINGEMENT,
+ MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE. Each Recipient is
+ solely responsible for determining the appropriateness of using and
+ distributing the Program and assumes all risks associated with its
+ exercise of rights under this Agreement, including but not limited to
+ the risks and costs of program errors, damage to or loss of data,
+ programs or equipment, and unavailability or interruption of operations.
+
+ DISCLAIMER OF LIABILITY
+ NEITHER RECIPIENT NOR ANY CONTRIBUTORS SHALL HAVE ANY LIABILITY FOR ANY
+ DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ DAMAGES (INCLUDING WITHOUT LIMITATION LOST PROFITS), HOWEVER CAUSED AND
+ ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR
+ TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
+ USE OR DISTRIBUTION OF THE PROGRAM OR THE EXERCISE OF ANY RIGHTS GRANTED
+ HEREUNDER, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGES
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+
+ Controllers supported by this driver:
+
+ LSI 3ware 9750 6Gb/s SAS/SATA-RAID
+
+ Bugs/Comments/Suggestions should be mailed to:
+ linuxraid@lsi.com
+
+ For more information, goto:
+ http://www.lsi.com
+
+ History
+ -------
+ 3.26.02.000 - Initial driver release.
+*/
+
+#include <linux/module.h>
+#include <linux/reboot.h>
+#include <linux/spinlock.h>
+#include <linux/interrupt.h>
+#include <linux/moduleparam.h>
+#include <linux/errno.h>
+#include <linux/types.h>
+#include <linux/delay.h>
+#include <linux/pci.h>
+#include <linux/time.h>
+#include <linux/mutex.h>
+#include <linux/slab.h>
+#include <asm/io.h>
+#include <asm/irq.h>
+#include <asm/uaccess.h>
+#include <scsi/scsi.h>
+#include <scsi/scsi_host.h>
+#include <scsi/scsi_tcq.h>
+#include <scsi/scsi_cmnd.h>
+#include "3w-sas.h"
+
+/* Globals */
+#define TW_DRIVER_VERSION "3.26.02.000"
+static DEFINE_MUTEX(twl_chrdev_mutex);
+static TW_Device_Extension *twl_device_extension_list[TW_MAX_SLOT];
+static unsigned int twl_device_extension_count;
+static int twl_major = -1;
+extern struct timezone sys_tz;
+
+/* Module parameters */
+MODULE_AUTHOR ("LSI");
+MODULE_DESCRIPTION ("LSI 3ware SAS/SATA-RAID Linux Driver");
+MODULE_LICENSE("GPL");
+MODULE_VERSION(TW_DRIVER_VERSION);
+
+static int use_msi;
+module_param(use_msi, int, S_IRUGO);
+MODULE_PARM_DESC(use_msi, "Use Message Signaled Interrupts. Default: 0");
+
+/* Function prototypes */
+static int twl_reset_device_extension(TW_Device_Extension *tw_dev, int ioctl_reset);
+
+/* Functions */
+
+/* This function returns AENs through sysfs */
+static ssize_t twl_sysfs_aen_read(struct file *filp, struct kobject *kobj,
+ struct bin_attribute *bin_attr,
+ char *outbuf, loff_t offset, size_t count)
+{
+ struct device *dev = container_of(kobj, struct device, kobj);
+ struct Scsi_Host *shost = class_to_shost(dev);
+ TW_Device_Extension *tw_dev = (TW_Device_Extension *)shost->hostdata;
+ unsigned long flags = 0;
+ ssize_t ret;
+
+ if (!capable(CAP_SYS_ADMIN))
+ return -EACCES;
+
+ spin_lock_irqsave(tw_dev->host->host_lock, flags);
+ ret = memory_read_from_buffer(outbuf, count, &offset, tw_dev->event_queue[0], sizeof(TW_Event) * TW_Q_LENGTH);
+ spin_unlock_irqrestore(tw_dev->host->host_lock, flags);
+
+ return ret;
+} /* End twl_sysfs_aen_read() */
+
+/* aen_read sysfs attribute initializer */
+static struct bin_attribute twl_sysfs_aen_read_attr = {
+ .attr = {
+ .name = "3ware_aen_read",
+ .mode = S_IRUSR,
+ },
+ .size = 0,
+ .read = twl_sysfs_aen_read
+};
+
+/* This function returns driver compatibility info through sysfs */
+static ssize_t twl_sysfs_compat_info(struct file *filp, struct kobject *kobj,
+ struct bin_attribute *bin_attr,
+ char *outbuf, loff_t offset, size_t count)
+{
+ struct device *dev = container_of(kobj, struct device, kobj);
+ struct Scsi_Host *shost = class_to_shost(dev);
+ TW_Device_Extension *tw_dev = (TW_Device_Extension *)shost->hostdata;
+ unsigned long flags = 0;
+ ssize_t ret;
+
+ if (!capable(CAP_SYS_ADMIN))
+ return -EACCES;
+
+ spin_lock_irqsave(tw_dev->host->host_lock, flags);
+ ret = memory_read_from_buffer(outbuf, count, &offset, &tw_dev->tw_compat_info, sizeof(TW_Compatibility_Info));
+ spin_unlock_irqrestore(tw_dev->host->host_lock, flags);
+
+ return ret;
+} /* End twl_sysfs_compat_info() */
+
+/* compat_info sysfs attribute initializer */
+static struct bin_attribute twl_sysfs_compat_info_attr = {
+ .attr = {
+ .name = "3ware_compat_info",
+ .mode = S_IRUSR,
+ },
+ .size = 0,
+ .read = twl_sysfs_compat_info
+};
+
+/* Show some statistics about the card */
+static ssize_t twl_show_stats(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct Scsi_Host *host = class_to_shost(dev);
+ TW_Device_Extension *tw_dev = (TW_Device_Extension *)host->hostdata;
+ unsigned long flags = 0;
+ ssize_t len;
+
+ spin_lock_irqsave(tw_dev->host->host_lock, flags);
+ len = snprintf(buf, PAGE_SIZE, "3w-sas Driver version: %s\n"
+ "Current commands posted: %4d\n"
+ "Max commands posted: %4d\n"
+ "Last sgl length: %4d\n"
+ "Max sgl length: %4d\n"
+ "Last sector count: %4d\n"
+ "Max sector count: %4d\n"
+ "SCSI Host Resets: %4d\n"
+ "AEN's: %4d\n",
+ TW_DRIVER_VERSION,
+ tw_dev->posted_request_count,
+ tw_dev->max_posted_request_count,
+ tw_dev->sgl_entries,
+ tw_dev->max_sgl_entries,
+ tw_dev->sector_count,
+ tw_dev->max_sector_count,
+ tw_dev->num_resets,
+ tw_dev->aen_count);
+ spin_unlock_irqrestore(tw_dev->host->host_lock, flags);
+ return len;
+} /* End twl_show_stats() */
+
+/* stats sysfs attribute initializer */
+static struct device_attribute twl_host_stats_attr = {
+ .attr = {
+ .name = "3ware_stats",
+ .mode = S_IRUGO,
+ },
+ .show = twl_show_stats
+};
+
+/* Host attributes initializer */
+static struct device_attribute *twl_host_attrs[] = {
+ &twl_host_stats_attr,
+ NULL,
+};
+
+/* This function will look up an AEN severity string */
+static char *twl_aen_severity_lookup(unsigned char severity_code)
+{
+ char *retval = NULL;
+
+ if ((severity_code < (unsigned char) TW_AEN_SEVERITY_ERROR) ||
+ (severity_code > (unsigned char) TW_AEN_SEVERITY_DEBUG))
+ goto out;
+
+ retval = twl_aen_severity_table[severity_code];
+out:
+ return retval;
+} /* End twl_aen_severity_lookup() */
+
+/* This function will queue an event */
+static void twl_aen_queue_event(TW_Device_Extension *tw_dev, TW_Command_Apache_Header *header)
+{
+ u32 local_time;
+ struct timeval time;
+ TW_Event *event;
+ unsigned short aen;
+ char host[16];
+ char *error_str;
+
+ tw_dev->aen_count++;
+
+ /* Fill out event info */
+ event = tw_dev->event_queue[tw_dev->error_index];
+
+ host[0] = '\0';
+ if (tw_dev->host)
+ sprintf(host, " scsi%d:", tw_dev->host->host_no);
+
+ aen = le16_to_cpu(header->status_block.error);
+ memset(event, 0, sizeof(TW_Event));
+
+ event->severity = TW_SEV_OUT(header->status_block.severity__reserved);
+ do_gettimeofday(&time);
+ local_time = (u32)(time.tv_sec - (sys_tz.tz_minuteswest * 60));
+ event->time_stamp_sec = local_time;
+ event->aen_code = aen;
+ event->retrieved = TW_AEN_NOT_RETRIEVED;
+ event->sequence_id = tw_dev->error_sequence_id;
+ tw_dev->error_sequence_id++;
+
+ /* Check for embedded error string */
+ error_str = &(header->err_specific_desc[strlen(header->err_specific_desc)+1]);
+
+ header->err_specific_desc[sizeof(header->err_specific_desc) - 1] = '\0';
+ event->parameter_len = strlen(header->err_specific_desc);
+ memcpy(event->parameter_data, header->err_specific_desc, event->parameter_len + 1 + strlen(error_str));
+ if (event->severity != TW_AEN_SEVERITY_DEBUG)
+ printk(KERN_WARNING "3w-sas:%s AEN: %s (0x%02X:0x%04X): %s:%s.\n",
+ host,
+ twl_aen_severity_lookup(TW_SEV_OUT(header->status_block.severity__reserved)),
+ TW_MESSAGE_SOURCE_CONTROLLER_EVENT, aen, error_str,
+ header->err_specific_desc);
+ else
+ tw_dev->aen_count--;
+
+ tw_dev->error_index = (tw_dev->error_index + 1 ) % TW_Q_LENGTH;
+} /* End twl_aen_queue_event() */
+
+/* This function will attempt to post a command packet to the board */
+static int twl_post_command_packet(TW_Device_Extension *tw_dev, int request_id)
+{
+ dma_addr_t command_que_value;
+
+ command_que_value = tw_dev->command_packet_phys[request_id];
+ command_que_value += TW_COMMAND_OFFSET;
+
+ /* First write upper 4 bytes */
+ writel((u32)((u64)command_que_value >> 32), TWL_HIBQPH_REG_ADDR(tw_dev));
+ /* Then the lower 4 bytes */
+ writel((u32)(command_que_value | TWL_PULL_MODE), TWL_HIBQPL_REG_ADDR(tw_dev));
+
+ tw_dev->state[request_id] = TW_S_POSTED;
+ tw_dev->posted_request_count++;
+ if (tw_dev->posted_request_count > tw_dev->max_posted_request_count)
+ tw_dev->max_posted_request_count = tw_dev->posted_request_count;
+
+ return 0;
+} /* End twl_post_command_packet() */
+
+/* This function hands scsi cdb's to the firmware */
+static int twl_scsiop_execute_scsi(TW_Device_Extension *tw_dev, int request_id, char *cdb, int use_sg, TW_SG_Entry_ISO *sglistarg)
+{
+ TW_Command_Full *full_command_packet;
+ TW_Command_Apache *command_packet;
+ int i, sg_count;
+ struct scsi_cmnd *srb = NULL;
+ struct scatterlist *sglist = NULL, *sg;
+ int retval = 1;
+
+ if (tw_dev->srb[request_id]) {
+ srb = tw_dev->srb[request_id];
+ if (scsi_sglist(srb))
+ sglist = scsi_sglist(srb);
+ }
+
+ /* Initialize command packet */
+ full_command_packet = tw_dev->command_packet_virt[request_id];
+ full_command_packet->header.header_desc.size_header = 128;
+ full_command_packet->header.status_block.error = 0;
+ full_command_packet->header.status_block.severity__reserved = 0;
+
+ command_packet = &full_command_packet->command.newcommand;
+ command_packet->status = 0;
+ command_packet->opcode__reserved = TW_OPRES_IN(0, TW_OP_EXECUTE_SCSI);
+
+ /* We forced 16 byte cdb use earlier */
+ if (!cdb)
+ memcpy(command_packet->cdb, srb->cmnd, TW_MAX_CDB_LEN);
+ else
+ memcpy(command_packet->cdb, cdb, TW_MAX_CDB_LEN);
+
+ if (srb) {
+ command_packet->unit = srb->device->id;
+ command_packet->request_id__lunl =
+ cpu_to_le16(TW_REQ_LUN_IN(srb->device->lun, request_id));
+ } else {
+ command_packet->request_id__lunl =
+ cpu_to_le16(TW_REQ_LUN_IN(0, request_id));
+ command_packet->unit = 0;
+ }
+
+ command_packet->sgl_offset = 16;
+
+ if (!sglistarg) {
+ /* Map sglist from scsi layer to cmd packet */
+ if (scsi_sg_count(srb)) {
+ sg_count = scsi_dma_map(srb);
+ if (sg_count <= 0)
+ goto out;
+
+ scsi_for_each_sg(srb, sg, sg_count, i) {
+ command_packet->sg_list[i].address = TW_CPU_TO_SGL(sg_dma_address(sg));
+ command_packet->sg_list[i].length = TW_CPU_TO_SGL(sg_dma_len(sg));
+ }
+ command_packet->sgl_entries__lunh = cpu_to_le16(TW_REQ_LUN_IN((srb->device->lun >> 4), scsi_sg_count(tw_dev->srb[request_id])));
+ }
+ } else {
+ /* Internal cdb post */
+ for (i = 0; i < use_sg; i++) {
+ command_packet->sg_list[i].address = TW_CPU_TO_SGL(sglistarg[i].address);
+ command_packet->sg_list[i].length = TW_CPU_TO_SGL(sglistarg[i].length);
+ }
+ command_packet->sgl_entries__lunh = cpu_to_le16(TW_REQ_LUN_IN(0, use_sg));
+ }
+
+ /* Update some stats */
+ if (srb) {
+ tw_dev->sector_count = scsi_bufflen(srb) / 512;
+ if (tw_dev->sector_count > tw_dev->max_sector_count)
+ tw_dev->max_sector_count = tw_dev->sector_count;
+ tw_dev->sgl_entries = scsi_sg_count(srb);
+ if (tw_dev->sgl_entries > tw_dev->max_sgl_entries)
+ tw_dev->max_sgl_entries = tw_dev->sgl_entries;
+ }
+
+ /* Now post the command to the board */
+ retval = twl_post_command_packet(tw_dev, request_id);
+
+out:
+ return retval;
+} /* End twl_scsiop_execute_scsi() */
+
+/* This function will read the aen queue from the isr */
+static int twl_aen_read_queue(TW_Device_Extension *tw_dev, int request_id)
+{
+ char cdb[TW_MAX_CDB_LEN];
+ TW_SG_Entry_ISO sglist[1];
+ TW_Command_Full *full_command_packet;
+ int retval = 1;
+
+ full_command_packet = tw_dev->command_packet_virt[request_id];
+ memset(full_command_packet, 0, sizeof(TW_Command_Full));
+
+ /* Initialize cdb */
+ memset(&cdb, 0, TW_MAX_CDB_LEN);
+ cdb[0] = REQUEST_SENSE; /* opcode */
+ cdb[4] = TW_ALLOCATION_LENGTH; /* allocation length */
+
+ /* Initialize sglist */
+ memset(&sglist, 0, sizeof(TW_SG_Entry_ISO));
+ sglist[0].length = TW_SECTOR_SIZE;
+ sglist[0].address = tw_dev->generic_buffer_phys[request_id];
+
+ /* Mark internal command */
+ tw_dev->srb[request_id] = NULL;
+
+ /* Now post the command packet */
+ if (twl_scsiop_execute_scsi(tw_dev, request_id, cdb, 1, sglist)) {
+ TW_PRINTK(tw_dev->host, TW_DRIVER, 0x2, "Post failed while reading AEN queue");
+ goto out;
+ }
+ retval = 0;
+out:
+ return retval;
+} /* End twl_aen_read_queue() */
+
+/* This function will sync firmware time with the host time */
+static void twl_aen_sync_time(TW_Device_Extension *tw_dev, int request_id)
+{
+ u32 schedulertime;
+ struct timeval utc;
+ TW_Command_Full *full_command_packet;
+ TW_Command *command_packet;
+ TW_Param_Apache *param;
+ u32 local_time;
+
+ /* Fill out the command packet */
+ full_command_packet = tw_dev->command_packet_virt[request_id];
+ memset(full_command_packet, 0, sizeof(TW_Command_Full));
+ command_packet = &full_command_packet->command.oldcommand;
+ command_packet->opcode__sgloffset = TW_OPSGL_IN(2, TW_OP_SET_PARAM);
+ command_packet->request_id = request_id;
+ command_packet->byte8_offset.param.sgl[0].address = TW_CPU_TO_SGL(tw_dev->generic_buffer_phys[request_id]);
+ command_packet->byte8_offset.param.sgl[0].length = TW_CPU_TO_SGL(TW_SECTOR_SIZE);
+ command_packet->size = TW_COMMAND_SIZE;
+ command_packet->byte6_offset.parameter_count = cpu_to_le16(1);
+
+ /* Setup the param */
+ param = (TW_Param_Apache *)tw_dev->generic_buffer_virt[request_id];
+ memset(param, 0, TW_SECTOR_SIZE);
+ param->table_id = cpu_to_le16(TW_TIMEKEEP_TABLE | 0x8000); /* Controller time keep table */
+ param->parameter_id = cpu_to_le16(0x3); /* SchedulerTime */
+ param->parameter_size_bytes = cpu_to_le16(4);
+
+ /* Convert system time in UTC to local time seconds since last
+ Sunday 12:00AM */
+ do_gettimeofday(&utc);
+ local_time = (u32)(utc.tv_sec - (sys_tz.tz_minuteswest * 60));
+ schedulertime = local_time - (3 * 86400);
+ schedulertime = cpu_to_le32(schedulertime % 604800);
+
+ memcpy(param->data, &schedulertime, sizeof(u32));
+
+ /* Mark internal command */
+ tw_dev->srb[request_id] = NULL;
+
+ /* Now post the command */
+ twl_post_command_packet(tw_dev, request_id);
+} /* End twl_aen_sync_time() */
+
+/* This function will assign an available request id */
+static void twl_get_request_id(TW_Device_Extension *tw_dev, int *request_id)
+{
+ *request_id = tw_dev->free_queue[tw_dev->free_head];
+ tw_dev->free_head = (tw_dev->free_head + 1) % TW_Q_LENGTH;
+ tw_dev->state[*request_id] = TW_S_STARTED;
+} /* End twl_get_request_id() */
+
+/* This function will free a request id */
+static void twl_free_request_id(TW_Device_Extension *tw_dev, int request_id)
+{
+ tw_dev->free_queue[tw_dev->free_tail] = request_id;
+ tw_dev->state[request_id] = TW_S_FINISHED;
+ tw_dev->free_tail = (tw_dev->free_tail + 1) % TW_Q_LENGTH;
+} /* End twl_free_request_id() */
+
+/* This function will complete an aen request from the isr */
+static int twl_aen_complete(TW_Device_Extension *tw_dev, int request_id)
+{
+ TW_Command_Full *full_command_packet;
+ TW_Command *command_packet;
+ TW_Command_Apache_Header *header;
+ unsigned short aen;
+ int retval = 1;
+
+ header = (TW_Command_Apache_Header *)tw_dev->generic_buffer_virt[request_id];
+ tw_dev->posted_request_count--;
+ aen = le16_to_cpu(header->status_block.error);
+ full_command_packet = tw_dev->command_packet_virt[request_id];
+ command_packet = &full_command_packet->command.oldcommand;
+
+ /* First check for internal completion of set param for time sync */
+ if (TW_OP_OUT(command_packet->opcode__sgloffset) == TW_OP_SET_PARAM) {
+ /* Keep reading the queue in case there are more aen's */
+ if (twl_aen_read_queue(tw_dev, request_id))
+ goto out2;
+ else {
+ retval = 0;
+ goto out;
+ }
+ }
+
+ switch (aen) {
+ case TW_AEN_QUEUE_EMPTY:
+ /* Quit reading the queue if this is the last one */
+ break;
+ case TW_AEN_SYNC_TIME_WITH_HOST:
+ twl_aen_sync_time(tw_dev, request_id);
+ retval = 0;
+ goto out;
+ default:
+ twl_aen_queue_event(tw_dev, header);
+
+ /* If there are more aen's, keep reading the queue */
+ if (twl_aen_read_queue(tw_dev, request_id))
+ goto out2;
+ else {
+ retval = 0;
+ goto out;
+ }
+ }
+ retval = 0;
+out2:
+ tw_dev->state[request_id] = TW_S_COMPLETED;
+ twl_free_request_id(tw_dev, request_id);
+ clear_bit(TW_IN_ATTENTION_LOOP, &tw_dev->flags);
+out:
+ return retval;
+} /* End twl_aen_complete() */
+
+/* This function will poll for a response */
+static int twl_poll_response(TW_Device_Extension *tw_dev, int request_id, int seconds)
+{
+ unsigned long before;
+ dma_addr_t mfa;
+ u32 regh, regl;
+ u32 response;
+ int retval = 1;
+ int found = 0;
+
+ before = jiffies;
+
+ while (!found) {
+ if (sizeof(dma_addr_t) > 4) {
+ regh = readl(TWL_HOBQPH_REG_ADDR(tw_dev));
+ regl = readl(TWL_HOBQPL_REG_ADDR(tw_dev));
+ mfa = ((u64)regh << 32) | regl;
+ } else
+ mfa = readl(TWL_HOBQPL_REG_ADDR(tw_dev));
+
+ response = (u32)mfa;
+
+ if (TW_RESID_OUT(response) == request_id)
+ found = 1;
+
+ if (time_after(jiffies, before + HZ * seconds))
+ goto out;
+
+ msleep(50);
+ }
+ retval = 0;
+out:
+ return retval;
+} /* End twl_poll_response() */
+
+/* This function will drain the aen queue */
+static int twl_aen_drain_queue(TW_Device_Extension *tw_dev, int no_check_reset)
+{
+ int request_id = 0;
+ char cdb[TW_MAX_CDB_LEN];
+ TW_SG_Entry_ISO sglist[1];
+ int finished = 0, count = 0;
+ TW_Command_Full *full_command_packet;
+ TW_Command_Apache_Header *header;
+ unsigned short aen;
+ int first_reset = 0, queue = 0, retval = 1;
+
+ if (no_check_reset)
+ first_reset = 0;
+ else
+ first_reset = 1;
+
+ full_command_packet = tw_dev->command_packet_virt[request_id];
+ memset(full_command_packet, 0, sizeof(TW_Command_Full));
+
+ /* Initialize cdb */
+ memset(&cdb, 0, TW_MAX_CDB_LEN);
+ cdb[0] = REQUEST_SENSE; /* opcode */
+ cdb[4] = TW_ALLOCATION_LENGTH; /* allocation length */
+
+ /* Initialize sglist */
+ memset(&sglist, 0, sizeof(TW_SG_Entry_ISO));
+ sglist[0].length = TW_SECTOR_SIZE;
+ sglist[0].address = tw_dev->generic_buffer_phys[request_id];
+
+ /* Mark internal command */
+ tw_dev->srb[request_id] = NULL;
+
+ do {
+ /* Send command to the board */
+ if (twl_scsiop_execute_scsi(tw_dev, request_id, cdb, 1, sglist)) {
+ TW_PRINTK(tw_dev->host, TW_DRIVER, 0x3, "Error posting request sense");
+ goto out;
+ }
+
+ /* Now poll for completion */
+ if (twl_poll_response(tw_dev, request_id, 30)) {
+ TW_PRINTK(tw_dev->host, TW_DRIVER, 0x4, "No valid response while draining AEN queue");
+ tw_dev->posted_request_count--;
+ goto out;
+ }
+
+ tw_dev->posted_request_count--;
+ header = (TW_Command_Apache_Header *)tw_dev->generic_buffer_virt[request_id];
+ aen = le16_to_cpu(header->status_block.error);
+ queue = 0;
+ count++;
+
+ switch (aen) {
+ case TW_AEN_QUEUE_EMPTY:
+ if (first_reset != 1)
+ goto out;
+ else
+ finished = 1;
+ break;
+ case TW_AEN_SOFT_RESET:
+ if (first_reset == 0)
+ first_reset = 1;
+ else
+ queue = 1;
+ break;
+ case TW_AEN_SYNC_TIME_WITH_HOST:
+ break;
+ default:
+ queue = 1;
+ }
+
+ /* Now queue an event info */
+ if (queue)
+ twl_aen_queue_event(tw_dev, header);
+ } while ((finished == 0) && (count < TW_MAX_AEN_DRAIN));
+
+ if (count == TW_MAX_AEN_DRAIN)
+ goto out;
+
+ retval = 0;
+out:
+ tw_dev->state[request_id] = TW_S_INITIAL;
+ return retval;
+} /* End twl_aen_drain_queue() */
+
+/* This function will allocate memory and check if it is correctly aligned */
+static int twl_allocate_memory(TW_Device_Extension *tw_dev, int size, int which)
+{
+ int i;
+ dma_addr_t dma_handle;
+ unsigned long *cpu_addr;
+ int retval = 1;
+
+ cpu_addr = pci_zalloc_consistent(tw_dev->tw_pci_dev, size * TW_Q_LENGTH,
+ &dma_handle);
+ if (!cpu_addr) {
+ TW_PRINTK(tw_dev->host, TW_DRIVER, 0x5, "Memory allocation failed");
+ goto out;
+ }
+
+ for (i = 0; i < TW_Q_LENGTH; i++) {
+ switch(which) {
+ case 0:
+ tw_dev->command_packet_phys[i] = dma_handle+(i*size);
+ tw_dev->command_packet_virt[i] = (TW_Command_Full *)((unsigned char *)cpu_addr + (i*size));
+ break;
+ case 1:
+ tw_dev->generic_buffer_phys[i] = dma_handle+(i*size);
+ tw_dev->generic_buffer_virt[i] = (unsigned long *)((unsigned char *)cpu_addr + (i*size));
+ break;
+ case 2:
+ tw_dev->sense_buffer_phys[i] = dma_handle+(i*size);
+ tw_dev->sense_buffer_virt[i] = (TW_Command_Apache_Header *)((unsigned char *)cpu_addr + (i*size));
+ break;
+ }
+ }
+ retval = 0;
+out:
+ return retval;
+} /* End twl_allocate_memory() */
+
+/* This function will load the request id and various sgls for ioctls */
+static void twl_load_sgl(TW_Device_Extension *tw_dev, TW_Command_Full *full_command_packet, int request_id, dma_addr_t dma_handle, int length)
+{
+ TW_Command *oldcommand;
+ TW_Command_Apache *newcommand;
+ TW_SG_Entry_ISO *sgl;
+ unsigned int pae = 0;
+
+ if ((sizeof(long) < 8) && (sizeof(dma_addr_t) > 4))
+ pae = 1;
+
+ if (TW_OP_OUT(full_command_packet->command.newcommand.opcode__reserved) == TW_OP_EXECUTE_SCSI) {
+ newcommand = &full_command_packet->command.newcommand;
+ newcommand->request_id__lunl =
+ cpu_to_le16(TW_REQ_LUN_IN(TW_LUN_OUT(newcommand->request_id__lunl), request_id));
+ if (length) {
+ newcommand->sg_list[0].address = TW_CPU_TO_SGL(dma_handle + sizeof(TW_Ioctl_Buf_Apache) - 1);
+ newcommand->sg_list[0].length = TW_CPU_TO_SGL(length);
+ }
+ newcommand->sgl_entries__lunh =
+ cpu_to_le16(TW_REQ_LUN_IN(TW_LUN_OUT(newcommand->sgl_entries__lunh), length ? 1 : 0));
+ } else {
+ oldcommand = &full_command_packet->command.oldcommand;
+ oldcommand->request_id = request_id;
+
+ if (TW_SGL_OUT(oldcommand->opcode__sgloffset)) {
+ /* Load the sg list */
+ sgl = (TW_SG_Entry_ISO *)((u32 *)oldcommand+oldcommand->size - (sizeof(TW_SG_Entry_ISO)/4) + pae + (sizeof(dma_addr_t) > 4 ? 1 : 0));
+ sgl->address = TW_CPU_TO_SGL(dma_handle + sizeof(TW_Ioctl_Buf_Apache) - 1);
+ sgl->length = TW_CPU_TO_SGL(length);
+ oldcommand->size += pae;
+ oldcommand->size += sizeof(dma_addr_t) > 4 ? 1 : 0;
+ }
+ }
+} /* End twl_load_sgl() */
+
+/* This function handles ioctl for the character device
+ This interface is used by smartmontools open source software */
+static long twl_chrdev_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
+{
+ long timeout;
+ unsigned long *cpu_addr, data_buffer_length_adjusted = 0, flags = 0;
+ dma_addr_t dma_handle;
+ int request_id = 0;
+ TW_Ioctl_Driver_Command driver_command;
+ struct inode *inode = file_inode(file);
+ TW_Ioctl_Buf_Apache *tw_ioctl;
+ TW_Command_Full *full_command_packet;
+ TW_Device_Extension *tw_dev = twl_device_extension_list[iminor(inode)];
+ int retval = -EFAULT;
+ void __user *argp = (void __user *)arg;
+
+ mutex_lock(&twl_chrdev_mutex);
+
+ /* Only let one of these through at a time */
+ if (mutex_lock_interruptible(&tw_dev->ioctl_lock)) {
+ retval = -EINTR;
+ goto out;
+ }
+
+ /* First copy down the driver command */
+ if (copy_from_user(&driver_command, argp, sizeof(TW_Ioctl_Driver_Command)))
+ goto out2;
+
+ /* Check data buffer size */
+ if (driver_command.buffer_length > TW_MAX_SECTORS * 2048) {
+ retval = -EINVAL;
+ goto out2;
+ }
+
+ /* Hardware can only do multiple of 512 byte transfers */
+ data_buffer_length_adjusted = (driver_command.buffer_length + 511) & ~511;
+
+ /* Now allocate ioctl buf memory */
+ cpu_addr = dma_alloc_coherent(&tw_dev->tw_pci_dev->dev, data_buffer_length_adjusted+sizeof(TW_Ioctl_Buf_Apache) - 1, &dma_handle, GFP_KERNEL);
+ if (!cpu_addr) {
+ retval = -ENOMEM;
+ goto out2;
+ }
+
+ tw_ioctl = (TW_Ioctl_Buf_Apache *)cpu_addr;
+
+ /* Now copy down the entire ioctl */
+ if (copy_from_user(tw_ioctl, argp, driver_command.buffer_length + sizeof(TW_Ioctl_Buf_Apache) - 1))
+ goto out3;
+
+ /* See which ioctl we are doing */
+ switch (cmd) {
+ case TW_IOCTL_FIRMWARE_PASS_THROUGH:
+ spin_lock_irqsave(tw_dev->host->host_lock, flags);
+ twl_get_request_id(tw_dev, &request_id);
+
+ /* Flag internal command */
+ tw_dev->srb[request_id] = NULL;
+
+ /* Flag chrdev ioctl */
+ tw_dev->chrdev_request_id = request_id;
+
+ full_command_packet = (TW_Command_Full *)&tw_ioctl->firmware_command;
+
+ /* Load request id and sglist for both command types */
+ twl_load_sgl(tw_dev, full_command_packet, request_id, dma_handle, data_buffer_length_adjusted);
+
+ memcpy(tw_dev->command_packet_virt[request_id], &(tw_ioctl->firmware_command), sizeof(TW_Command_Full));
+
+ /* Now post the command packet to the controller */
+ twl_post_command_packet(tw_dev, request_id);
+ spin_unlock_irqrestore(tw_dev->host->host_lock, flags);
+
+ timeout = TW_IOCTL_CHRDEV_TIMEOUT*HZ;
+
+ /* Now wait for command to complete */
+ timeout = wait_event_timeout(tw_dev->ioctl_wqueue, tw_dev->chrdev_request_id == TW_IOCTL_CHRDEV_FREE, timeout);
+
+ /* We timed out, and didn't get an interrupt */
+ if (tw_dev->chrdev_request_id != TW_IOCTL_CHRDEV_FREE) {
+ /* Now we need to reset the board */
+ printk(KERN_WARNING "3w-sas: scsi%d: WARNING: (0x%02X:0x%04X): Character ioctl (0x%x) timed out, resetting card.\n",
+ tw_dev->host->host_no, TW_DRIVER, 0x6,
+ cmd);
+ retval = -EIO;
+ twl_reset_device_extension(tw_dev, 1);
+ goto out3;
+ }
+
+ /* Now copy in the command packet response */
+ memcpy(&(tw_ioctl->firmware_command), tw_dev->command_packet_virt[request_id], sizeof(TW_Command_Full));
+
+ /* Now complete the io */
+ spin_lock_irqsave(tw_dev->host->host_lock, flags);
+ tw_dev->posted_request_count--;
+ tw_dev->state[request_id] = TW_S_COMPLETED;
+ twl_free_request_id(tw_dev, request_id);
+ spin_unlock_irqrestore(tw_dev->host->host_lock, flags);
+ break;
+ default:
+ retval = -ENOTTY;
+ goto out3;
+ }
+
+ /* Now copy the entire response to userspace */
+ if (copy_to_user(argp, tw_ioctl, sizeof(TW_Ioctl_Buf_Apache) + driver_command.buffer_length - 1) == 0)
+ retval = 0;
+out3:
+ /* Now free ioctl buf memory */
+ dma_free_coherent(&tw_dev->tw_pci_dev->dev, data_buffer_length_adjusted+sizeof(TW_Ioctl_Buf_Apache) - 1, cpu_addr, dma_handle);
+out2:
+ mutex_unlock(&tw_dev->ioctl_lock);
+out:
+ mutex_unlock(&twl_chrdev_mutex);
+ return retval;
+} /* End twl_chrdev_ioctl() */
+
+/* This function handles open for the character device */
+static int twl_chrdev_open(struct inode *inode, struct file *file)
+{
+ unsigned int minor_number;
+ int retval = -ENODEV;
+
+ if (!capable(CAP_SYS_ADMIN)) {
+ retval = -EACCES;
+ goto out;
+ }
+
+ minor_number = iminor(inode);
+ if (minor_number >= twl_device_extension_count)
+ goto out;
+ retval = 0;
+out:
+ return retval;
+} /* End twl_chrdev_open() */
+
+/* File operations struct for character device */
+static const struct file_operations twl_fops = {
+ .owner = THIS_MODULE,
+ .unlocked_ioctl = twl_chrdev_ioctl,
+ .open = twl_chrdev_open,
+ .release = NULL,
+ .llseek = noop_llseek,
+};
+
+/* This function passes sense data from firmware to scsi layer */
+static int twl_fill_sense(TW_Device_Extension *tw_dev, int i, int request_id, int copy_sense, int print_host)
+{
+ TW_Command_Apache_Header *header;
+ TW_Command_Full *full_command_packet;
+ unsigned short error;
+ char *error_str;
+ int retval = 1;
+
+ header = tw_dev->sense_buffer_virt[i];
+ full_command_packet = tw_dev->command_packet_virt[request_id];
+
+ /* Get embedded firmware error string */
+ error_str = &(header->err_specific_desc[strlen(header->err_specific_desc) + 1]);
+
+ /* Don't print error for Logical unit not supported during rollcall */
+ error = le16_to_cpu(header->status_block.error);
+ if ((error != TW_ERROR_LOGICAL_UNIT_NOT_SUPPORTED) && (error != TW_ERROR_UNIT_OFFLINE) && (error != TW_ERROR_INVALID_FIELD_IN_CDB)) {
+ if (print_host)
+ printk(KERN_WARNING "3w-sas: scsi%d: ERROR: (0x%02X:0x%04X): %s:%s.\n",
+ tw_dev->host->host_no,
+ TW_MESSAGE_SOURCE_CONTROLLER_ERROR,
+ header->status_block.error,
+ error_str,
+ header->err_specific_desc);
+ else
+ printk(KERN_WARNING "3w-sas: ERROR: (0x%02X:0x%04X): %s:%s.\n",
+ TW_MESSAGE_SOURCE_CONTROLLER_ERROR,
+ header->status_block.error,
+ error_str,
+ header->err_specific_desc);
+ }
+
+ if (copy_sense) {
+ memcpy(tw_dev->srb[request_id]->sense_buffer, header->sense_data, TW_SENSE_DATA_LENGTH);
+ tw_dev->srb[request_id]->result = (full_command_packet->command.newcommand.status << 1);
+ goto out;
+ }
+out:
+ return retval;
+} /* End twl_fill_sense() */
+
+/* This function will free up device extension resources */
+static void twl_free_device_extension(TW_Device_Extension *tw_dev)
+{
+ if (tw_dev->command_packet_virt[0])
+ pci_free_consistent(tw_dev->tw_pci_dev,
+ sizeof(TW_Command_Full)*TW_Q_LENGTH,
+ tw_dev->command_packet_virt[0],
+ tw_dev->command_packet_phys[0]);
+
+ if (tw_dev->generic_buffer_virt[0])
+ pci_free_consistent(tw_dev->tw_pci_dev,
+ TW_SECTOR_SIZE*TW_Q_LENGTH,
+ tw_dev->generic_buffer_virt[0],
+ tw_dev->generic_buffer_phys[0]);
+
+ if (tw_dev->sense_buffer_virt[0])
+ pci_free_consistent(tw_dev->tw_pci_dev,
+ sizeof(TW_Command_Apache_Header)*
+ TW_Q_LENGTH,
+ tw_dev->sense_buffer_virt[0],
+ tw_dev->sense_buffer_phys[0]);
+
+ kfree(tw_dev->event_queue[0]);
+} /* End twl_free_device_extension() */
+
+/* This function will get parameter table entries from the firmware */
+static void *twl_get_param(TW_Device_Extension *tw_dev, int request_id, int table_id, int parameter_id, int parameter_size_bytes)
+{
+ TW_Command_Full *full_command_packet;
+ TW_Command *command_packet;
+ TW_Param_Apache *param;
+ void *retval = NULL;
+
+ /* Setup the command packet */
+ full_command_packet = tw_dev->command_packet_virt[request_id];
+ memset(full_command_packet, 0, sizeof(TW_Command_Full));
+ command_packet = &full_command_packet->command.oldcommand;
+
+ command_packet->opcode__sgloffset = TW_OPSGL_IN(2, TW_OP_GET_PARAM);
+ command_packet->size = TW_COMMAND_SIZE;
+ command_packet->request_id = request_id;
+ command_packet->byte6_offset.block_count = cpu_to_le16(1);
+
+ /* Now setup the param */
+ param = (TW_Param_Apache *)tw_dev->generic_buffer_virt[request_id];
+ memset(param, 0, TW_SECTOR_SIZE);
+ param->table_id = cpu_to_le16(table_id | 0x8000);
+ param->parameter_id = cpu_to_le16(parameter_id);
+ param->parameter_size_bytes = cpu_to_le16(parameter_size_bytes);
+
+ command_packet->byte8_offset.param.sgl[0].address = TW_CPU_TO_SGL(tw_dev->generic_buffer_phys[request_id]);
+ command_packet->byte8_offset.param.sgl[0].length = TW_CPU_TO_SGL(TW_SECTOR_SIZE);
+
+ /* Post the command packet to the board */
+ twl_post_command_packet(tw_dev, request_id);
+
+ /* Poll for completion */
+ if (twl_poll_response(tw_dev, request_id, 30))
+ TW_PRINTK(tw_dev->host, TW_DRIVER, 0x7, "No valid response during get param")
+ else
+ retval = (void *)&(param->data[0]);
+
+ tw_dev->posted_request_count--;
+ tw_dev->state[request_id] = TW_S_INITIAL;
+
+ return retval;
+} /* End twl_get_param() */
+
+/* This function will send an initconnection command to controller */
+static int twl_initconnection(TW_Device_Extension *tw_dev, int message_credits,
+ u32 set_features, unsigned short current_fw_srl,
+ unsigned short current_fw_arch_id,
+ unsigned short current_fw_branch,
+ unsigned short current_fw_build,
+ unsigned short *fw_on_ctlr_srl,
+ unsigned short *fw_on_ctlr_arch_id,
+ unsigned short *fw_on_ctlr_branch,
+ unsigned short *fw_on_ctlr_build,
+ u32 *init_connect_result)
+{
+ TW_Command_Full *full_command_packet;
+ TW_Initconnect *tw_initconnect;
+ int request_id = 0, retval = 1;
+
+ /* Initialize InitConnection command packet */
+ full_command_packet = tw_dev->command_packet_virt[request_id];
+ memset(full_command_packet, 0, sizeof(TW_Command_Full));
+ full_command_packet->header.header_desc.size_header = 128;
+
+ tw_initconnect = (TW_Initconnect *)&full_command_packet->command.oldcommand;
+ tw_initconnect->opcode__reserved = TW_OPRES_IN(0, TW_OP_INIT_CONNECTION);
+ tw_initconnect->request_id = request_id;
+ tw_initconnect->message_credits = cpu_to_le16(message_credits);
+ tw_initconnect->features = set_features;
+
+ /* Turn on 64-bit sgl support if we need to */
+ tw_initconnect->features |= sizeof(dma_addr_t) > 4 ? 1 : 0;
+
+ tw_initconnect->features = cpu_to_le32(tw_initconnect->features);
+
+ if (set_features & TW_EXTENDED_INIT_CONNECT) {
+ tw_initconnect->size = TW_INIT_COMMAND_PACKET_SIZE_EXTENDED;
+ tw_initconnect->fw_srl = cpu_to_le16(current_fw_srl);
+ tw_initconnect->fw_arch_id = cpu_to_le16(current_fw_arch_id);
+ tw_initconnect->fw_branch = cpu_to_le16(current_fw_branch);
+ tw_initconnect->fw_build = cpu_to_le16(current_fw_build);
+ } else
+ tw_initconnect->size = TW_INIT_COMMAND_PACKET_SIZE;
+
+ /* Send command packet to the board */
+ twl_post_command_packet(tw_dev, request_id);
+
+ /* Poll for completion */
+ if (twl_poll_response(tw_dev, request_id, 30)) {
+ TW_PRINTK(tw_dev->host, TW_DRIVER, 0x8, "No valid response during init connection");
+ } else {
+ if (set_features & TW_EXTENDED_INIT_CONNECT) {
+ *fw_on_ctlr_srl = le16_to_cpu(tw_initconnect->fw_srl);
+ *fw_on_ctlr_arch_id = le16_to_cpu(tw_initconnect->fw_arch_id);
+ *fw_on_ctlr_branch = le16_to_cpu(tw_initconnect->fw_branch);
+ *fw_on_ctlr_build = le16_to_cpu(tw_initconnect->fw_build);
+ *init_connect_result = le32_to_cpu(tw_initconnect->result);
+ }
+ retval = 0;
+ }
+
+ tw_dev->posted_request_count--;
+ tw_dev->state[request_id] = TW_S_INITIAL;
+
+ return retval;
+} /* End twl_initconnection() */
+
+/* This function will initialize the fields of a device extension */
+static int twl_initialize_device_extension(TW_Device_Extension *tw_dev)
+{
+ int i, retval = 1;
+
+ /* Initialize command packet buffers */
+ if (twl_allocate_memory(tw_dev, sizeof(TW_Command_Full), 0)) {
+ TW_PRINTK(tw_dev->host, TW_DRIVER, 0x9, "Command packet memory allocation failed");
+ goto out;
+ }
+
+ /* Initialize generic buffer */
+ if (twl_allocate_memory(tw_dev, TW_SECTOR_SIZE, 1)) {
+ TW_PRINTK(tw_dev->host, TW_DRIVER, 0xa, "Generic memory allocation failed");
+ goto out;
+ }
+
+ /* Allocate sense buffers */
+ if (twl_allocate_memory(tw_dev, sizeof(TW_Command_Apache_Header), 2)) {
+ TW_PRINTK(tw_dev->host, TW_DRIVER, 0xb, "Sense buffer allocation failed");
+ goto out;
+ }
+
+ /* Allocate event info space */
+ tw_dev->event_queue[0] = kcalloc(TW_Q_LENGTH, sizeof(TW_Event), GFP_KERNEL);
+ if (!tw_dev->event_queue[0]) {
+ TW_PRINTK(tw_dev->host, TW_DRIVER, 0xc, "Event info memory allocation failed");
+ goto out;
+ }
+
+ for (i = 0; i < TW_Q_LENGTH; i++) {
+ tw_dev->event_queue[i] = (TW_Event *)((unsigned char *)tw_dev->event_queue[0] + (i * sizeof(TW_Event)));
+ tw_dev->free_queue[i] = i;
+ tw_dev->state[i] = TW_S_INITIAL;
+ }
+
+ tw_dev->free_head = TW_Q_START;
+ tw_dev->free_tail = TW_Q_START;
+ tw_dev->error_sequence_id = 1;
+ tw_dev->chrdev_request_id = TW_IOCTL_CHRDEV_FREE;
+
+ mutex_init(&tw_dev->ioctl_lock);
+ init_waitqueue_head(&tw_dev->ioctl_wqueue);
+
+ retval = 0;
+out:
+ return retval;
+} /* End twl_initialize_device_extension() */
+
+/* This function will handle attention interrupts */
+static int twl_handle_attention_interrupt(TW_Device_Extension *tw_dev)
+{
+ int retval = 1;
+ u32 request_id, doorbell;
+
+ /* Read doorbell status */
+ doorbell = readl(TWL_HOBDB_REG_ADDR(tw_dev));
+
+ /* Check for controller errors */
+ if (doorbell & TWL_DOORBELL_CONTROLLER_ERROR) {
+ TW_PRINTK(tw_dev->host, TW_DRIVER, 0xd, "Microcontroller Error: clearing");
+ goto out;
+ }
+
+ /* Check if we need to perform an AEN drain */
+ if (doorbell & TWL_DOORBELL_ATTENTION_INTERRUPT) {
+ if (!(test_and_set_bit(TW_IN_ATTENTION_LOOP, &tw_dev->flags))) {
+ twl_get_request_id(tw_dev, &request_id);
+ if (twl_aen_read_queue(tw_dev, request_id)) {
+ tw_dev->state[request_id] = TW_S_COMPLETED;
+ twl_free_request_id(tw_dev, request_id);
+ clear_bit(TW_IN_ATTENTION_LOOP, &tw_dev->flags);
+ }
+ }
+ }
+
+ retval = 0;
+out:
+ /* Clear doorbell interrupt */
+ TWL_CLEAR_DB_INTERRUPT(tw_dev);
+
+ /* Make sure the clear was flushed by reading it back */
+ readl(TWL_HOBDBC_REG_ADDR(tw_dev));
+
+ return retval;
+} /* End twl_handle_attention_interrupt() */
+
+/* Interrupt service routine */
+static irqreturn_t twl_interrupt(int irq, void *dev_instance)
+{
+ TW_Device_Extension *tw_dev = (TW_Device_Extension *)dev_instance;
+ int i, handled = 0, error = 0;
+ dma_addr_t mfa = 0;
+ u32 reg, regl, regh, response, request_id = 0;
+ struct scsi_cmnd *cmd;
+ TW_Command_Full *full_command_packet;
+
+ spin_lock(tw_dev->host->host_lock);
+
+ /* Read host interrupt status */
+ reg = readl(TWL_HISTAT_REG_ADDR(tw_dev));
+
+ /* Check if this is our interrupt, otherwise bail */
+ if (!(reg & TWL_HISTATUS_VALID_INTERRUPT))
+ goto twl_interrupt_bail;
+
+ handled = 1;
+
+ /* If we are resetting, bail */
+ if (test_bit(TW_IN_RESET, &tw_dev->flags))
+ goto twl_interrupt_bail;
+
+ /* Attention interrupt */
+ if (reg & TWL_HISTATUS_ATTENTION_INTERRUPT) {
+ if (twl_handle_attention_interrupt(tw_dev)) {
+ TWL_MASK_INTERRUPTS(tw_dev);
+ goto twl_interrupt_bail;
+ }
+ }
+
+ /* Response interrupt */
+ while (reg & TWL_HISTATUS_RESPONSE_INTERRUPT) {
+ if (sizeof(dma_addr_t) > 4) {
+ regh = readl(TWL_HOBQPH_REG_ADDR(tw_dev));
+ regl = readl(TWL_HOBQPL_REG_ADDR(tw_dev));
+ mfa = ((u64)regh << 32) | regl;
+ } else
+ mfa = readl(TWL_HOBQPL_REG_ADDR(tw_dev));
+
+ error = 0;
+ response = (u32)mfa;
+
+ /* Check for command packet error */
+ if (!TW_NOTMFA_OUT(response)) {
+ for (i=0;i<TW_Q_LENGTH;i++) {
+ if (tw_dev->sense_buffer_phys[i] == mfa) {
+ request_id = le16_to_cpu(tw_dev->sense_buffer_virt[i]->header_desc.request_id);
+ if (tw_dev->srb[request_id] != NULL)
+ error = twl_fill_sense(tw_dev, i, request_id, 1, 1);
+ else {
+ /* Skip ioctl error prints */
+ if (request_id != tw_dev->chrdev_request_id)
+ error = twl_fill_sense(tw_dev, i, request_id, 0, 1);
+ else
+ memcpy(tw_dev->command_packet_virt[request_id], tw_dev->sense_buffer_virt[i], sizeof(TW_Command_Apache_Header));
+ }
+
+ /* Now re-post the sense buffer */
+ writel((u32)((u64)tw_dev->sense_buffer_phys[i] >> 32), TWL_HOBQPH_REG_ADDR(tw_dev));
+ writel((u32)tw_dev->sense_buffer_phys[i], TWL_HOBQPL_REG_ADDR(tw_dev));
+ break;
+ }
+ }
+ } else
+ request_id = TW_RESID_OUT(response);
+
+ full_command_packet = tw_dev->command_packet_virt[request_id];
+
+ /* Check for correct state */
+ if (tw_dev->state[request_id] != TW_S_POSTED) {
+ if (tw_dev->srb[request_id] != NULL) {
+ TW_PRINTK(tw_dev->host, TW_DRIVER, 0xe, "Received a request id that wasn't posted");
+ TWL_MASK_INTERRUPTS(tw_dev);
+ goto twl_interrupt_bail;
+ }
+ }
+
+ /* Check for internal command completion */
+ if (tw_dev->srb[request_id] == NULL) {
+ if (request_id != tw_dev->chrdev_request_id) {
+ if (twl_aen_complete(tw_dev, request_id))
+ TW_PRINTK(tw_dev->host, TW_DRIVER, 0xf, "Error completing AEN during attention interrupt");
+ } else {
+ tw_dev->chrdev_request_id = TW_IOCTL_CHRDEV_FREE;
+ wake_up(&tw_dev->ioctl_wqueue);
+ }
+ } else {
+ cmd = tw_dev->srb[request_id];
+
+ if (!error)
+ cmd->result = (DID_OK << 16);
+
+ /* Report residual bytes for single sgl */
+ if ((scsi_sg_count(cmd) <= 1) && (full_command_packet->command.newcommand.status == 0)) {
+ if (full_command_packet->command.newcommand.sg_list[0].length < scsi_bufflen(tw_dev->srb[request_id]))
+ scsi_set_resid(cmd, scsi_bufflen(cmd) - full_command_packet->command.newcommand.sg_list[0].length);
+ }
+
+ /* Now complete the io */
+ scsi_dma_unmap(cmd);
+ cmd->scsi_done(cmd);
+ tw_dev->state[request_id] = TW_S_COMPLETED;
+ twl_free_request_id(tw_dev, request_id);
+ tw_dev->posted_request_count--;
+ }
+
+ /* Check for another response interrupt */
+ reg = readl(TWL_HISTAT_REG_ADDR(tw_dev));
+ }
+
+twl_interrupt_bail:
+ spin_unlock(tw_dev->host->host_lock);
+ return IRQ_RETVAL(handled);
+} /* End twl_interrupt() */
+
+/* This function will poll for a register change */
+static int twl_poll_register(TW_Device_Extension *tw_dev, void *reg, u32 value, u32 result, int seconds)
+{
+ unsigned long before;
+ int retval = 1;
+ u32 reg_value;
+
+ reg_value = readl(reg);
+ before = jiffies;
+
+ while ((reg_value & value) != result) {
+ reg_value = readl(reg);
+ if (time_after(jiffies, before + HZ * seconds))
+ goto out;
+ msleep(50);
+ }
+ retval = 0;
+out:
+ return retval;
+} /* End twl_poll_register() */
+
+/* This function will reset a controller */
+static int twl_reset_sequence(TW_Device_Extension *tw_dev, int soft_reset)
+{
+ int retval = 1;
+ int i = 0;
+ u32 status = 0;
+ unsigned short fw_on_ctlr_srl = 0, fw_on_ctlr_arch_id = 0;
+ unsigned short fw_on_ctlr_branch = 0, fw_on_ctlr_build = 0;
+ u32 init_connect_result = 0;
+ int tries = 0;
+ int do_soft_reset = soft_reset;
+
+ while (tries < TW_MAX_RESET_TRIES) {
+ /* Do a soft reset if one is needed */
+ if (do_soft_reset) {
+ TWL_SOFT_RESET(tw_dev);
+
+ /* Make sure controller is in a good state */
+ if (twl_poll_register(tw_dev, TWL_SCRPD3_REG_ADDR(tw_dev), TWL_CONTROLLER_READY, 0x0, 30)) {
+ TW_PRINTK(tw_dev->host, TW_DRIVER, 0x10, "Controller never went non-ready during reset sequence");
+ tries++;
+ continue;
+ }
+ if (twl_poll_register(tw_dev, TWL_SCRPD3_REG_ADDR(tw_dev), TWL_CONTROLLER_READY, TWL_CONTROLLER_READY, 60)) {
+ TW_PRINTK(tw_dev->host, TW_DRIVER, 0x11, "Controller not ready during reset sequence");
+ tries++;
+ continue;
+ }
+ }
+
+ /* Initconnect */
+ if (twl_initconnection(tw_dev, TW_INIT_MESSAGE_CREDITS,
+ TW_EXTENDED_INIT_CONNECT, TW_CURRENT_DRIVER_SRL,
+ TW_9750_ARCH_ID, TW_CURRENT_DRIVER_BRANCH,
+ TW_CURRENT_DRIVER_BUILD, &fw_on_ctlr_srl,
+ &fw_on_ctlr_arch_id, &fw_on_ctlr_branch,
+ &fw_on_ctlr_build, &init_connect_result)) {
+ TW_PRINTK(tw_dev->host, TW_DRIVER, 0x12, "Initconnection failed while checking SRL");
+ do_soft_reset = 1;
+ tries++;
+ continue;
+ }
+
+ /* Load sense buffers */
+ while (i < TW_Q_LENGTH) {
+ writel((u32)((u64)tw_dev->sense_buffer_phys[i] >> 32), TWL_HOBQPH_REG_ADDR(tw_dev));
+ writel((u32)tw_dev->sense_buffer_phys[i], TWL_HOBQPL_REG_ADDR(tw_dev));
+
+ /* Check status for over-run after each write */
+ status = readl(TWL_STATUS_REG_ADDR(tw_dev));
+ if (!(status & TWL_STATUS_OVERRUN_SUBMIT))
+ i++;
+ }
+
+ /* Now check status */
+ status = readl(TWL_STATUS_REG_ADDR(tw_dev));
+ if (status) {
+ TW_PRINTK(tw_dev->host, TW_DRIVER, 0x13, "Bad controller status after loading sense buffers");
+ do_soft_reset = 1;
+ tries++;
+ continue;
+ }
+
+ /* Drain the AEN queue */
+ if (twl_aen_drain_queue(tw_dev, soft_reset)) {
+ TW_PRINTK(tw_dev->host, TW_DRIVER, 0x14, "AEN drain failed during reset sequence");
+ do_soft_reset = 1;
+ tries++;
+ continue;
+ }
+
+ /* Load rest of compatibility struct */
+ strncpy(tw_dev->tw_compat_info.driver_version, TW_DRIVER_VERSION, strlen(TW_DRIVER_VERSION));
+ tw_dev->tw_compat_info.driver_srl_high = TW_CURRENT_DRIVER_SRL;
+ tw_dev->tw_compat_info.driver_branch_high = TW_CURRENT_DRIVER_BRANCH;
+ tw_dev->tw_compat_info.driver_build_high = TW_CURRENT_DRIVER_BUILD;
+ tw_dev->tw_compat_info.driver_srl_low = TW_BASE_FW_SRL;
+ tw_dev->tw_compat_info.driver_branch_low = TW_BASE_FW_BRANCH;
+ tw_dev->tw_compat_info.driver_build_low = TW_BASE_FW_BUILD;
+ tw_dev->tw_compat_info.fw_on_ctlr_srl = fw_on_ctlr_srl;
+ tw_dev->tw_compat_info.fw_on_ctlr_branch = fw_on_ctlr_branch;
+ tw_dev->tw_compat_info.fw_on_ctlr_build = fw_on_ctlr_build;
+
+ /* If we got here, controller is in a good state */
+ retval = 0;
+ goto out;
+ }
+out:
+ return retval;
+} /* End twl_reset_sequence() */
+
+/* This function will reset a device extension */
+static int twl_reset_device_extension(TW_Device_Extension *tw_dev, int ioctl_reset)
+{
+ int i = 0, retval = 1;
+ unsigned long flags = 0;
+
+ /* Block SCSI requests while we are resetting */
+ if (ioctl_reset)
+ scsi_block_requests(tw_dev->host);
+
+ set_bit(TW_IN_RESET, &tw_dev->flags);
+ TWL_MASK_INTERRUPTS(tw_dev);
+ TWL_CLEAR_DB_INTERRUPT(tw_dev);
+
+ spin_lock_irqsave(tw_dev->host->host_lock, flags);
+
+ /* Abort all requests that are in progress */
+ for (i = 0; i < TW_Q_LENGTH; i++) {
+ if ((tw_dev->state[i] != TW_S_FINISHED) &&
+ (tw_dev->state[i] != TW_S_INITIAL) &&
+ (tw_dev->state[i] != TW_S_COMPLETED)) {
+ struct scsi_cmnd *cmd = tw_dev->srb[i];
+
+ if (cmd) {
+ cmd->result = (DID_RESET << 16);
+ scsi_dma_unmap(cmd);
+ cmd->scsi_done(cmd);
+ }
+ }
+ }
+
+ /* Reset queues and counts */
+ for (i = 0; i < TW_Q_LENGTH; i++) {
+ tw_dev->free_queue[i] = i;
+ tw_dev->state[i] = TW_S_INITIAL;
+ }
+ tw_dev->free_head = TW_Q_START;
+ tw_dev->free_tail = TW_Q_START;
+ tw_dev->posted_request_count = 0;
+
+ spin_unlock_irqrestore(tw_dev->host->host_lock, flags);
+
+ if (twl_reset_sequence(tw_dev, 1))
+ goto out;
+
+ TWL_UNMASK_INTERRUPTS(tw_dev);
+
+ clear_bit(TW_IN_RESET, &tw_dev->flags);
+ tw_dev->chrdev_request_id = TW_IOCTL_CHRDEV_FREE;
+
+ retval = 0;
+out:
+ if (ioctl_reset)
+ scsi_unblock_requests(tw_dev->host);
+ return retval;
+} /* End twl_reset_device_extension() */
+
+/* This funciton returns unit geometry in cylinders/heads/sectors */
+static int twl_scsi_biosparam(struct scsi_device *sdev, struct block_device *bdev, sector_t capacity, int geom[])
+{
+ int heads, sectors;
+ TW_Device_Extension *tw_dev;
+
+ tw_dev = (TW_Device_Extension *)sdev->host->hostdata;
+
+ if (capacity >= 0x200000) {
+ heads = 255;
+ sectors = 63;
+ } else {
+ heads = 64;
+ sectors = 32;
+ }
+
+ geom[0] = heads;
+ geom[1] = sectors;
+ geom[2] = sector_div(capacity, heads * sectors); /* cylinders */
+
+ return 0;
+} /* End twl_scsi_biosparam() */
+
+/* This is the new scsi eh reset function */
+static int twl_scsi_eh_reset(struct scsi_cmnd *SCpnt)
+{
+ TW_Device_Extension *tw_dev = NULL;
+ int retval = FAILED;
+
+ tw_dev = (TW_Device_Extension *)SCpnt->device->host->hostdata;
+
+ tw_dev->num_resets++;
+
+ sdev_printk(KERN_WARNING, SCpnt->device,
+ "WARNING: (0x%02X:0x%04X): Command (0x%x) timed out, resetting card.\n",
+ TW_DRIVER, 0x2c, SCpnt->cmnd[0]);
+
+ /* Make sure we are not issuing an ioctl or resetting from ioctl */
+ mutex_lock(&tw_dev->ioctl_lock);
+
+ /* Now reset the card and some of the device extension data */
+ if (twl_reset_device_extension(tw_dev, 0)) {
+ TW_PRINTK(tw_dev->host, TW_DRIVER, 0x15, "Controller reset failed during scsi host reset");
+ goto out;
+ }
+
+ retval = SUCCESS;
+out:
+ mutex_unlock(&tw_dev->ioctl_lock);
+ return retval;
+} /* End twl_scsi_eh_reset() */
+
+/* This is the main scsi queue function to handle scsi opcodes */
+static int twl_scsi_queue_lck(struct scsi_cmnd *SCpnt, void (*done)(struct scsi_cmnd *))
+{
+ int request_id, retval;
+ TW_Device_Extension *tw_dev = (TW_Device_Extension *)SCpnt->device->host->hostdata;
+
+ /* If we are resetting due to timed out ioctl, report as busy */
+ if (test_bit(TW_IN_RESET, &tw_dev->flags)) {
+ retval = SCSI_MLQUEUE_HOST_BUSY;
+ goto out;
+ }
+
+ /* Save done function into scsi_cmnd struct */
+ SCpnt->scsi_done = done;
+
+ /* Get a free request id */
+ twl_get_request_id(tw_dev, &request_id);
+
+ /* Save the scsi command for use by the ISR */
+ tw_dev->srb[request_id] = SCpnt;
+
+ retval = twl_scsiop_execute_scsi(tw_dev, request_id, NULL, 0, NULL);
+ if (retval) {
+ tw_dev->state[request_id] = TW_S_COMPLETED;
+ twl_free_request_id(tw_dev, request_id);
+ SCpnt->result = (DID_ERROR << 16);
+ done(SCpnt);
+ retval = 0;
+ }
+out:
+ return retval;
+} /* End twl_scsi_queue() */
+
+static DEF_SCSI_QCMD(twl_scsi_queue)
+
+/* This function tells the controller to shut down */
+static void __twl_shutdown(TW_Device_Extension *tw_dev)
+{
+ /* Disable interrupts */
+ TWL_MASK_INTERRUPTS(tw_dev);
+
+ /* Free up the IRQ */
+ free_irq(tw_dev->tw_pci_dev->irq, tw_dev);
+
+ printk(KERN_WARNING "3w-sas: Shutting down host %d.\n", tw_dev->host->host_no);
+
+ /* Tell the card we are shutting down */
+ if (twl_initconnection(tw_dev, 1, 0, 0, 0, 0, 0, NULL, NULL, NULL, NULL, NULL)) {
+ TW_PRINTK(tw_dev->host, TW_DRIVER, 0x16, "Connection shutdown failed");
+ } else {
+ printk(KERN_WARNING "3w-sas: Shutdown complete.\n");
+ }
+
+ /* Clear doorbell interrupt just before exit */
+ TWL_CLEAR_DB_INTERRUPT(tw_dev);
+} /* End __twl_shutdown() */
+
+/* Wrapper for __twl_shutdown */
+static void twl_shutdown(struct pci_dev *pdev)
+{
+ struct Scsi_Host *host = pci_get_drvdata(pdev);
+ TW_Device_Extension *tw_dev;
+
+ if (!host)
+ return;
+
+ tw_dev = (TW_Device_Extension *)host->hostdata;
+
+ if (tw_dev->online)
+ __twl_shutdown(tw_dev);
+} /* End twl_shutdown() */
+
+/* This function configures unit settings when a unit is coming on-line */
+static int twl_slave_configure(struct scsi_device *sdev)
+{
+ /* Force 60 second timeout */
+ blk_queue_rq_timeout(sdev->request_queue, 60 * HZ);
+
+ return 0;
+} /* End twl_slave_configure() */
+
+/* scsi_host_template initializer */
+static struct scsi_host_template driver_template = {
+ .module = THIS_MODULE,
+ .name = "3w-sas",
+ .queuecommand = twl_scsi_queue,
+ .eh_host_reset_handler = twl_scsi_eh_reset,
+ .bios_param = twl_scsi_biosparam,
+ .change_queue_depth = scsi_change_queue_depth,
+ .can_queue = TW_Q_LENGTH-2,
+ .slave_configure = twl_slave_configure,
+ .this_id = -1,
+ .sg_tablesize = TW_LIBERATOR_MAX_SGL_LENGTH,
+ .max_sectors = TW_MAX_SECTORS,
+ .cmd_per_lun = TW_MAX_CMDS_PER_LUN,
+ .use_clustering = ENABLE_CLUSTERING,
+ .shost_attrs = twl_host_attrs,
+ .emulated = 1,
+ .no_write_same = 1,
+};
+
+/* This function will probe and initialize a card */
+static int twl_probe(struct pci_dev *pdev, const struct pci_device_id *dev_id)
+{
+ struct Scsi_Host *host = NULL;
+ TW_Device_Extension *tw_dev;
+ int retval = -ENODEV;
+ int *ptr_phycount, phycount=0;
+
+ retval = pci_enable_device(pdev);
+ if (retval) {
+ TW_PRINTK(host, TW_DRIVER, 0x17, "Failed to enable pci device");
+ goto out_disable_device;
+ }
+
+ pci_set_master(pdev);
+ pci_try_set_mwi(pdev);
+
+ if (pci_set_dma_mask(pdev, DMA_BIT_MASK(64))
+ || pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64)))
+ if (pci_set_dma_mask(pdev, DMA_BIT_MASK(32))
+ || pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32))) {
+ TW_PRINTK(host, TW_DRIVER, 0x18, "Failed to set dma mask");
+ retval = -ENODEV;
+ goto out_disable_device;
+ }
+
+ host = scsi_host_alloc(&driver_template, sizeof(TW_Device_Extension));
+ if (!host) {
+ TW_PRINTK(host, TW_DRIVER, 0x19, "Failed to allocate memory for device extension");
+ retval = -ENOMEM;
+ goto out_disable_device;
+ }
+ tw_dev = shost_priv(host);
+
+ /* Save values to device extension */
+ tw_dev->host = host;
+ tw_dev->tw_pci_dev = pdev;
+
+ if (twl_initialize_device_extension(tw_dev)) {
+ TW_PRINTK(tw_dev->host, TW_DRIVER, 0x1a, "Failed to initialize device extension");
+ goto out_free_device_extension;
+ }
+
+ /* Request IO regions */
+ retval = pci_request_regions(pdev, "3w-sas");
+ if (retval) {
+ TW_PRINTK(tw_dev->host, TW_DRIVER, 0x1b, "Failed to get mem region");
+ goto out_free_device_extension;
+ }
+
+ /* Save base address, use region 1 */
+ tw_dev->base_addr = pci_iomap(pdev, 1, 0);
+ if (!tw_dev->base_addr) {
+ TW_PRINTK(tw_dev->host, TW_DRIVER, 0x1c, "Failed to ioremap");
+ goto out_release_mem_region;
+ }
+
+ /* Disable interrupts on the card */
+ TWL_MASK_INTERRUPTS(tw_dev);
+
+ /* Initialize the card */
+ if (twl_reset_sequence(tw_dev, 0)) {
+ TW_PRINTK(tw_dev->host, TW_DRIVER, 0x1d, "Controller reset failed during probe");
+ goto out_iounmap;
+ }
+
+ /* Set host specific parameters */
+ host->max_id = TW_MAX_UNITS;
+ host->max_cmd_len = TW_MAX_CDB_LEN;
+ host->max_lun = TW_MAX_LUNS;
+ host->max_channel = 0;
+
+ /* Register the card with the kernel SCSI layer */
+ retval = scsi_add_host(host, &pdev->dev);
+ if (retval) {
+ TW_PRINTK(tw_dev->host, TW_DRIVER, 0x1e, "scsi add host failed");
+ goto out_iounmap;
+ }
+
+ pci_set_drvdata(pdev, host);
+
+ printk(KERN_WARNING "3w-sas: scsi%d: Found an LSI 3ware %s Controller at 0x%llx, IRQ: %d.\n",
+ host->host_no,
+ (char *)twl_get_param(tw_dev, 1, TW_VERSION_TABLE,
+ TW_PARAM_MODEL, TW_PARAM_MODEL_LENGTH),
+ (u64)pci_resource_start(pdev, 1), pdev->irq);
+
+ ptr_phycount = twl_get_param(tw_dev, 2, TW_PARAM_PHY_SUMMARY_TABLE,
+ TW_PARAM_PHYCOUNT, TW_PARAM_PHYCOUNT_LENGTH);
+ if (ptr_phycount)
+ phycount = le32_to_cpu(*(int *)ptr_phycount);
+
+ printk(KERN_WARNING "3w-sas: scsi%d: Firmware %s, BIOS %s, Phys: %d.\n",
+ host->host_no,
+ (char *)twl_get_param(tw_dev, 1, TW_VERSION_TABLE,
+ TW_PARAM_FWVER, TW_PARAM_FWVER_LENGTH),
+ (char *)twl_get_param(tw_dev, 2, TW_VERSION_TABLE,
+ TW_PARAM_BIOSVER, TW_PARAM_BIOSVER_LENGTH),
+ phycount);
+
+ /* Try to enable MSI */
+ if (use_msi && !pci_enable_msi(pdev))
+ set_bit(TW_USING_MSI, &tw_dev->flags);
+
+ /* Now setup the interrupt handler */
+ retval = request_irq(pdev->irq, twl_interrupt, IRQF_SHARED, "3w-sas", tw_dev);
+ if (retval) {
+ TW_PRINTK(tw_dev->host, TW_DRIVER, 0x1f, "Error requesting IRQ");
+ goto out_remove_host;
+ }
+
+ twl_device_extension_list[twl_device_extension_count] = tw_dev;
+ twl_device_extension_count++;
+
+ /* Re-enable interrupts on the card */
+ TWL_UNMASK_INTERRUPTS(tw_dev);
+
+ /* Finally, scan the host */
+ scsi_scan_host(host);
+
+ /* Add sysfs binary files */
+ if (sysfs_create_bin_file(&host->shost_dev.kobj, &twl_sysfs_aen_read_attr))
+ TW_PRINTK(tw_dev->host, TW_DRIVER, 0x20, "Failed to create sysfs binary file: 3ware_aen_read");
+ if (sysfs_create_bin_file(&host->shost_dev.kobj, &twl_sysfs_compat_info_attr))
+ TW_PRINTK(tw_dev->host, TW_DRIVER, 0x21, "Failed to create sysfs binary file: 3ware_compat_info");
+
+ if (twl_major == -1) {
+ if ((twl_major = register_chrdev (0, "twl", &twl_fops)) < 0)
+ TW_PRINTK(host, TW_DRIVER, 0x22, "Failed to register character device");
+ }
+ tw_dev->online = 1;
+ return 0;
+
+out_remove_host:
+ if (test_bit(TW_USING_MSI, &tw_dev->flags))
+ pci_disable_msi(pdev);
+ scsi_remove_host(host);
+out_iounmap:
+ iounmap(tw_dev->base_addr);
+out_release_mem_region:
+ pci_release_regions(pdev);
+out_free_device_extension:
+ twl_free_device_extension(tw_dev);
+ scsi_host_put(host);
+out_disable_device:
+ pci_disable_device(pdev);
+
+ return retval;
+} /* End twl_probe() */
+
+/* This function is called to remove a device */
+static void twl_remove(struct pci_dev *pdev)
+{
+ struct Scsi_Host *host = pci_get_drvdata(pdev);
+ TW_Device_Extension *tw_dev;
+
+ if (!host)
+ return;
+
+ tw_dev = (TW_Device_Extension *)host->hostdata;
+
+ if (!tw_dev->online)
+ return;
+
+ /* Remove sysfs binary files */
+ sysfs_remove_bin_file(&host->shost_dev.kobj, &twl_sysfs_aen_read_attr);
+ sysfs_remove_bin_file(&host->shost_dev.kobj, &twl_sysfs_compat_info_attr);
+
+ scsi_remove_host(tw_dev->host);
+
+ /* Unregister character device */
+ if (twl_major >= 0) {
+ unregister_chrdev(twl_major, "twl");
+ twl_major = -1;
+ }
+
+ /* Shutdown the card */
+ __twl_shutdown(tw_dev);
+
+ /* Disable MSI if enabled */
+ if (test_bit(TW_USING_MSI, &tw_dev->flags))
+ pci_disable_msi(pdev);
+
+ /* Free IO remapping */
+ iounmap(tw_dev->base_addr);
+
+ /* Free up the mem region */
+ pci_release_regions(pdev);
+
+ /* Free up device extension resources */
+ twl_free_device_extension(tw_dev);
+
+ scsi_host_put(tw_dev->host);
+ pci_disable_device(pdev);
+ twl_device_extension_count--;
+} /* End twl_remove() */
+
+#ifdef CONFIG_PM
+/* This function is called on PCI suspend */
+static int twl_suspend(struct pci_dev *pdev, pm_message_t state)
+{
+ struct Scsi_Host *host = pci_get_drvdata(pdev);
+ TW_Device_Extension *tw_dev = (TW_Device_Extension *)host->hostdata;
+
+ printk(KERN_WARNING "3w-sas: Suspending host %d.\n", tw_dev->host->host_no);
+ /* Disable interrupts */
+ TWL_MASK_INTERRUPTS(tw_dev);
+
+ free_irq(tw_dev->tw_pci_dev->irq, tw_dev);
+
+ /* Tell the card we are shutting down */
+ if (twl_initconnection(tw_dev, 1, 0, 0, 0, 0, 0, NULL, NULL, NULL, NULL, NULL)) {
+ TW_PRINTK(tw_dev->host, TW_DRIVER, 0x23, "Connection shutdown failed during suspend");
+ } else {
+ printk(KERN_WARNING "3w-sas: Suspend complete.\n");
+ }
+
+ /* Clear doorbell interrupt */
+ TWL_CLEAR_DB_INTERRUPT(tw_dev);
+
+ pci_save_state(pdev);
+ pci_disable_device(pdev);
+ pci_set_power_state(pdev, pci_choose_state(pdev, state));
+
+ return 0;
+} /* End twl_suspend() */
+
+/* This function is called on PCI resume */
+static int twl_resume(struct pci_dev *pdev)
+{
+ int retval = 0;
+ struct Scsi_Host *host = pci_get_drvdata(pdev);
+ TW_Device_Extension *tw_dev = (TW_Device_Extension *)host->hostdata;
+
+ printk(KERN_WARNING "3w-sas: Resuming host %d.\n", tw_dev->host->host_no);
+ pci_set_power_state(pdev, PCI_D0);
+ pci_enable_wake(pdev, PCI_D0, 0);
+ pci_restore_state(pdev);
+
+ retval = pci_enable_device(pdev);
+ if (retval) {
+ TW_PRINTK(tw_dev->host, TW_DRIVER, 0x24, "Enable device failed during resume");
+ return retval;
+ }
+
+ pci_set_master(pdev);
+ pci_try_set_mwi(pdev);
+
+ if (pci_set_dma_mask(pdev, DMA_BIT_MASK(64))
+ || pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64)))
+ if (pci_set_dma_mask(pdev, DMA_BIT_MASK(32))
+ || pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32))) {
+ TW_PRINTK(host, TW_DRIVER, 0x25, "Failed to set dma mask during resume");
+ retval = -ENODEV;
+ goto out_disable_device;
+ }
+
+ /* Initialize the card */
+ if (twl_reset_sequence(tw_dev, 0)) {
+ retval = -ENODEV;
+ goto out_disable_device;
+ }
+
+ /* Now setup the interrupt handler */
+ retval = request_irq(pdev->irq, twl_interrupt, IRQF_SHARED, "3w-sas", tw_dev);
+ if (retval) {
+ TW_PRINTK(tw_dev->host, TW_DRIVER, 0x26, "Error requesting IRQ during resume");
+ retval = -ENODEV;
+ goto out_disable_device;
+ }
+
+ /* Now enable MSI if enabled */
+ if (test_bit(TW_USING_MSI, &tw_dev->flags))
+ pci_enable_msi(pdev);
+
+ /* Re-enable interrupts on the card */
+ TWL_UNMASK_INTERRUPTS(tw_dev);
+
+ printk(KERN_WARNING "3w-sas: Resume complete.\n");
+ return 0;
+
+out_disable_device:
+ scsi_remove_host(host);
+ pci_disable_device(pdev);
+
+ return retval;
+} /* End twl_resume() */
+#endif
+
+/* PCI Devices supported by this driver */
+static struct pci_device_id twl_pci_tbl[] = {
+ { PCI_VDEVICE(3WARE, PCI_DEVICE_ID_3WARE_9750) },
+ { }
+};
+MODULE_DEVICE_TABLE(pci, twl_pci_tbl);
+
+/* pci_driver initializer */
+static struct pci_driver twl_driver = {
+ .name = "3w-sas",
+ .id_table = twl_pci_tbl,
+ .probe = twl_probe,
+ .remove = twl_remove,
+#ifdef CONFIG_PM
+ .suspend = twl_suspend,
+ .resume = twl_resume,
+#endif
+ .shutdown = twl_shutdown
+};
+
+/* This function is called on driver initialization */
+static int __init twl_init(void)
+{
+ printk(KERN_INFO "LSI 3ware SAS/SATA-RAID Controller device driver for Linux v%s.\n", TW_DRIVER_VERSION);
+
+ return pci_register_driver(&twl_driver);
+} /* End twl_init() */
+
+/* This function is called on driver exit */
+static void __exit twl_exit(void)
+{
+ pci_unregister_driver(&twl_driver);
+} /* End twl_exit() */
+
+module_init(twl_init);
+module_exit(twl_exit);
+
diff --git a/drivers/scsi/3w-sas.h b/drivers/scsi/3w-sas.h
new file mode 100644
index 000000000..fec6449c7
--- /dev/null
+++ b/drivers/scsi/3w-sas.h
@@ -0,0 +1,392 @@
+/*
+ 3w-sas.h -- LSI 3ware SAS/SATA-RAID Controller device driver for Linux.
+
+ Written By: Adam Radford <linuxraid@lsi.com>
+
+ Copyright (C) 2009 LSI Corporation.
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; version 2 of the License.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ NO WARRANTY
+ THE PROGRAM IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OR
+ CONDITIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED INCLUDING, WITHOUT
+ LIMITATION, ANY WARRANTIES OR CONDITIONS OF TITLE, NON-INFRINGEMENT,
+ MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE. Each Recipient is
+ solely responsible for determining the appropriateness of using and
+ distributing the Program and assumes all risks associated with its
+ exercise of rights under this Agreement, including but not limited to
+ the risks and costs of program errors, damage to or loss of data,
+ programs or equipment, and unavailability or interruption of operations.
+
+ DISCLAIMER OF LIABILITY
+ NEITHER RECIPIENT NOR ANY CONTRIBUTORS SHALL HAVE ANY LIABILITY FOR ANY
+ DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ DAMAGES (INCLUDING WITHOUT LIMITATION LOST PROFITS), HOWEVER CAUSED AND
+ ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR
+ TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
+ USE OR DISTRIBUTION OF THE PROGRAM OR THE EXERCISE OF ANY RIGHTS GRANTED
+ HEREUNDER, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGES
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+
+ Bugs/Comments/Suggestions should be mailed to:
+ linuxraid@lsi.com
+
+ For more information, goto:
+ http://www.lsi.com
+*/
+
+#ifndef _3W_SAS_H
+#define _3W_SAS_H
+
+/* AEN severity table */
+static char *twl_aen_severity_table[] =
+{
+ "None", "ERROR", "WARNING", "INFO", "DEBUG", NULL
+};
+
+/* Liberator register offsets */
+#define TWL_STATUS 0x0 /* Status */
+#define TWL_HIBDB 0x20 /* Inbound doorbell */
+#define TWL_HISTAT 0x30 /* Host interrupt status */
+#define TWL_HIMASK 0x34 /* Host interrupt mask */
+#define TWL_HOBDB 0x9C /* Outbound doorbell */
+#define TWL_HOBDBC 0xA0 /* Outbound doorbell clear */
+#define TWL_SCRPD3 0xBC /* Scratchpad */
+#define TWL_HIBQPL 0xC0 /* Host inbound Q low */
+#define TWL_HIBQPH 0xC4 /* Host inbound Q high */
+#define TWL_HOBQPL 0xC8 /* Host outbound Q low */
+#define TWL_HOBQPH 0xCC /* Host outbound Q high */
+#define TWL_HISTATUS_VALID_INTERRUPT 0xC
+#define TWL_HISTATUS_ATTENTION_INTERRUPT 0x4
+#define TWL_HISTATUS_RESPONSE_INTERRUPT 0x8
+#define TWL_STATUS_OVERRUN_SUBMIT 0x2000
+#define TWL_ISSUE_SOFT_RESET 0x100
+#define TWL_CONTROLLER_READY 0x2000
+#define TWL_DOORBELL_CONTROLLER_ERROR 0x200000
+#define TWL_DOORBELL_ATTENTION_INTERRUPT 0x40000
+#define TWL_PULL_MODE 0x1
+
+/* Command packet opcodes used by the driver */
+#define TW_OP_INIT_CONNECTION 0x1
+#define TW_OP_GET_PARAM 0x12
+#define TW_OP_SET_PARAM 0x13
+#define TW_OP_EXECUTE_SCSI 0x10
+
+/* Asynchronous Event Notification (AEN) codes used by the driver */
+#define TW_AEN_QUEUE_EMPTY 0x0000
+#define TW_AEN_SOFT_RESET 0x0001
+#define TW_AEN_SYNC_TIME_WITH_HOST 0x031
+#define TW_AEN_SEVERITY_ERROR 0x1
+#define TW_AEN_SEVERITY_DEBUG 0x4
+#define TW_AEN_NOT_RETRIEVED 0x1
+
+/* Command state defines */
+#define TW_S_INITIAL 0x1 /* Initial state */
+#define TW_S_STARTED 0x2 /* Id in use */
+#define TW_S_POSTED 0x4 /* Posted to the controller */
+#define TW_S_COMPLETED 0x8 /* Completed by isr */
+#define TW_S_FINISHED 0x10 /* I/O completely done */
+
+/* Compatibility defines */
+#define TW_9750_ARCH_ID 10
+#define TW_CURRENT_DRIVER_SRL 40
+#define TW_CURRENT_DRIVER_BUILD 0
+#define TW_CURRENT_DRIVER_BRANCH 0
+
+/* Misc defines */
+#define TW_SECTOR_SIZE 512
+#define TW_MAX_UNITS 32
+#define TW_INIT_MESSAGE_CREDITS 0x100
+#define TW_INIT_COMMAND_PACKET_SIZE 0x3
+#define TW_INIT_COMMAND_PACKET_SIZE_EXTENDED 0x6
+#define TW_EXTENDED_INIT_CONNECT 0x2
+#define TW_BASE_FW_SRL 24
+#define TW_BASE_FW_BRANCH 0
+#define TW_BASE_FW_BUILD 1
+#define TW_Q_LENGTH 256
+#define TW_Q_START 0
+#define TW_MAX_SLOT 32
+#define TW_MAX_RESET_TRIES 2
+#define TW_MAX_CMDS_PER_LUN 254
+#define TW_MAX_AEN_DRAIN 255
+#define TW_IN_RESET 2
+#define TW_USING_MSI 3
+#define TW_IN_ATTENTION_LOOP 4
+#define TW_MAX_SECTORS 256
+#define TW_MAX_CDB_LEN 16
+#define TW_IOCTL_CHRDEV_TIMEOUT 60 /* 60 seconds */
+#define TW_IOCTL_CHRDEV_FREE -1
+#define TW_COMMAND_OFFSET 128 /* 128 bytes */
+#define TW_VERSION_TABLE 0x0402
+#define TW_TIMEKEEP_TABLE 0x040A
+#define TW_INFORMATION_TABLE 0x0403
+#define TW_PARAM_FWVER 3
+#define TW_PARAM_FWVER_LENGTH 16
+#define TW_PARAM_BIOSVER 4
+#define TW_PARAM_BIOSVER_LENGTH 16
+#define TW_PARAM_MODEL 8
+#define TW_PARAM_MODEL_LENGTH 16
+#define TW_PARAM_PHY_SUMMARY_TABLE 1
+#define TW_PARAM_PHYCOUNT 2
+#define TW_PARAM_PHYCOUNT_LENGTH 1
+#define TW_IOCTL_FIRMWARE_PASS_THROUGH 0x108 // Used by smartmontools
+#define TW_ALLOCATION_LENGTH 128
+#define TW_SENSE_DATA_LENGTH 18
+#define TW_ERROR_LOGICAL_UNIT_NOT_SUPPORTED 0x10a
+#define TW_ERROR_INVALID_FIELD_IN_CDB 0x10d
+#define TW_ERROR_UNIT_OFFLINE 0x128
+#define TW_MESSAGE_SOURCE_CONTROLLER_ERROR 3
+#define TW_MESSAGE_SOURCE_CONTROLLER_EVENT 4
+#define TW_DRIVER 6
+#ifndef PCI_DEVICE_ID_3WARE_9750
+#define PCI_DEVICE_ID_3WARE_9750 0x1010
+#endif
+
+/* Bitmask macros to eliminate bitfields */
+
+/* opcode: 5, reserved: 3 */
+#define TW_OPRES_IN(x,y) ((x << 5) | (y & 0x1f))
+#define TW_OP_OUT(x) (x & 0x1f)
+
+/* opcode: 5, sgloffset: 3 */
+#define TW_OPSGL_IN(x,y) ((x << 5) | (y & 0x1f))
+#define TW_SGL_OUT(x) ((x >> 5) & 0x7)
+
+/* severity: 3, reserved: 5 */
+#define TW_SEV_OUT(x) (x & 0x7)
+
+/* not_mfa: 1, reserved: 7, status: 8, request_id: 16 */
+#define TW_RESID_OUT(x) ((x >> 16) & 0xffff)
+#define TW_NOTMFA_OUT(x) (x & 0x1)
+
+/* request_id: 12, lun: 4 */
+#define TW_REQ_LUN_IN(lun, request_id) (((lun << 12) & 0xf000) | (request_id & 0xfff))
+#define TW_LUN_OUT(lun) ((lun >> 12) & 0xf)
+
+/* Register access macros */
+#define TWL_STATUS_REG_ADDR(x) ((unsigned char __iomem *)x->base_addr + TWL_STATUS)
+#define TWL_HOBQPL_REG_ADDR(x) ((unsigned char __iomem *)x->base_addr + TWL_HOBQPL)
+#define TWL_HOBQPH_REG_ADDR(x) ((unsigned char __iomem *)x->base_addr + TWL_HOBQPH)
+#define TWL_HOBDB_REG_ADDR(x) ((unsigned char __iomem *)x->base_addr + TWL_HOBDB)
+#define TWL_HOBDBC_REG_ADDR(x) ((unsigned char __iomem *)x->base_addr + TWL_HOBDBC)
+#define TWL_HIMASK_REG_ADDR(x) ((unsigned char __iomem *)x->base_addr + TWL_HIMASK)
+#define TWL_HISTAT_REG_ADDR(x) ((unsigned char __iomem *)x->base_addr + TWL_HISTAT)
+#define TWL_HIBQPH_REG_ADDR(x) ((unsigned char __iomem *)x->base_addr + TWL_HIBQPH)
+#define TWL_HIBQPL_REG_ADDR(x) ((unsigned char __iomem *)x->base_addr + TWL_HIBQPL)
+#define TWL_HIBDB_REG_ADDR(x) ((unsigned char __iomem *)x->base_addr + TWL_HIBDB)
+#define TWL_SCRPD3_REG_ADDR(x) ((unsigned char __iomem *)x->base_addr + TWL_SCRPD3)
+#define TWL_MASK_INTERRUPTS(x) (writel(~0, TWL_HIMASK_REG_ADDR(tw_dev)))
+#define TWL_UNMASK_INTERRUPTS(x) (writel(~TWL_HISTATUS_VALID_INTERRUPT, TWL_HIMASK_REG_ADDR(tw_dev)))
+#define TWL_CLEAR_DB_INTERRUPT(x) (writel(~0, TWL_HOBDBC_REG_ADDR(tw_dev)))
+#define TWL_SOFT_RESET(x) (writel(TWL_ISSUE_SOFT_RESET, TWL_HIBDB_REG_ADDR(tw_dev)))
+
+/* Macros */
+#define TW_PRINTK(h,a,b,c) { \
+if (h) \
+printk(KERN_WARNING "3w-sas: scsi%d: ERROR: (0x%02X:0x%04X): %s.\n",h->host_no,a,b,c); \
+else \
+printk(KERN_WARNING "3w-sas: ERROR: (0x%02X:0x%04X): %s.\n",a,b,c); \
+}
+#define TW_MAX_LUNS 16
+#define TW_COMMAND_SIZE (sizeof(dma_addr_t) > 4 ? 6 : 4)
+#define TW_LIBERATOR_MAX_SGL_LENGTH (sizeof(dma_addr_t) > 4 ? 46 : 92)
+#define TW_LIBERATOR_MAX_SGL_LENGTH_OLD (sizeof(dma_addr_t) > 4 ? 47 : 94)
+#define TW_PADDING_LENGTH_LIBERATOR 136
+#define TW_PADDING_LENGTH_LIBERATOR_OLD 132
+#define TW_CPU_TO_SGL(x) (sizeof(dma_addr_t) > 4 ? cpu_to_le64(x) : cpu_to_le32(x))
+
+#pragma pack(1)
+
+/* SGL entry */
+typedef struct TAG_TW_SG_Entry_ISO {
+ dma_addr_t address;
+ dma_addr_t length;
+} TW_SG_Entry_ISO;
+
+/* Old Command Packet with ISO SGL */
+typedef struct TW_Command {
+ unsigned char opcode__sgloffset;
+ unsigned char size;
+ unsigned char request_id;
+ unsigned char unit__hostid;
+ /* Second DWORD */
+ unsigned char status;
+ unsigned char flags;
+ union {
+ unsigned short block_count;
+ unsigned short parameter_count;
+ } byte6_offset;
+ union {
+ struct {
+ u32 lba;
+ TW_SG_Entry_ISO sgl[TW_LIBERATOR_MAX_SGL_LENGTH_OLD];
+ unsigned char padding[TW_PADDING_LENGTH_LIBERATOR_OLD];
+ } io;
+ struct {
+ TW_SG_Entry_ISO sgl[TW_LIBERATOR_MAX_SGL_LENGTH_OLD];
+ u32 padding;
+ unsigned char padding2[TW_PADDING_LENGTH_LIBERATOR_OLD];
+ } param;
+ } byte8_offset;
+} TW_Command;
+
+/* New Command Packet with ISO SGL */
+typedef struct TAG_TW_Command_Apache {
+ unsigned char opcode__reserved;
+ unsigned char unit;
+ unsigned short request_id__lunl;
+ unsigned char status;
+ unsigned char sgl_offset;
+ unsigned short sgl_entries__lunh;
+ unsigned char cdb[16];
+ TW_SG_Entry_ISO sg_list[TW_LIBERATOR_MAX_SGL_LENGTH];
+ unsigned char padding[TW_PADDING_LENGTH_LIBERATOR];
+} TW_Command_Apache;
+
+/* New command packet header */
+typedef struct TAG_TW_Command_Apache_Header {
+ unsigned char sense_data[TW_SENSE_DATA_LENGTH];
+ struct {
+ char reserved[4];
+ unsigned short error;
+ unsigned char padding;
+ unsigned char severity__reserved;
+ } status_block;
+ unsigned char err_specific_desc[98];
+ struct {
+ unsigned char size_header;
+ unsigned short request_id;
+ unsigned char size_sense;
+ } header_desc;
+} TW_Command_Apache_Header;
+
+/* This struct is a union of the 2 command packets */
+typedef struct TAG_TW_Command_Full {
+ TW_Command_Apache_Header header;
+ union {
+ TW_Command oldcommand;
+ TW_Command_Apache newcommand;
+ } command;
+} TW_Command_Full;
+
+/* Initconnection structure */
+typedef struct TAG_TW_Initconnect {
+ unsigned char opcode__reserved;
+ unsigned char size;
+ unsigned char request_id;
+ unsigned char res2;
+ unsigned char status;
+ unsigned char flags;
+ unsigned short message_credits;
+ u32 features;
+ unsigned short fw_srl;
+ unsigned short fw_arch_id;
+ unsigned short fw_branch;
+ unsigned short fw_build;
+ u32 result;
+} TW_Initconnect;
+
+/* Event info structure */
+typedef struct TAG_TW_Event
+{
+ unsigned int sequence_id;
+ unsigned int time_stamp_sec;
+ unsigned short aen_code;
+ unsigned char severity;
+ unsigned char retrieved;
+ unsigned char repeat_count;
+ unsigned char parameter_len;
+ unsigned char parameter_data[98];
+} TW_Event;
+
+typedef struct TAG_TW_Ioctl_Driver_Command {
+ unsigned int control_code;
+ unsigned int status;
+ unsigned int unique_id;
+ unsigned int sequence_id;
+ unsigned int os_specific;
+ unsigned int buffer_length;
+} TW_Ioctl_Driver_Command;
+
+typedef struct TAG_TW_Ioctl_Apache {
+ TW_Ioctl_Driver_Command driver_command;
+ char padding[488];
+ TW_Command_Full firmware_command;
+ char data_buffer[1];
+} TW_Ioctl_Buf_Apache;
+
+/* GetParam descriptor */
+typedef struct {
+ unsigned short table_id;
+ unsigned short parameter_id;
+ unsigned short parameter_size_bytes;
+ unsigned short actual_parameter_size_bytes;
+ unsigned char data[1];
+} TW_Param_Apache;
+
+/* Compatibility information structure */
+typedef struct TAG_TW_Compatibility_Info
+{
+ char driver_version[32];
+ unsigned short working_srl;
+ unsigned short working_branch;
+ unsigned short working_build;
+ unsigned short driver_srl_high;
+ unsigned short driver_branch_high;
+ unsigned short driver_build_high;
+ unsigned short driver_srl_low;
+ unsigned short driver_branch_low;
+ unsigned short driver_build_low;
+ unsigned short fw_on_ctlr_srl;
+ unsigned short fw_on_ctlr_branch;
+ unsigned short fw_on_ctlr_build;
+} TW_Compatibility_Info;
+
+#pragma pack()
+
+typedef struct TAG_TW_Device_Extension {
+ void __iomem *base_addr;
+ unsigned long *generic_buffer_virt[TW_Q_LENGTH];
+ dma_addr_t generic_buffer_phys[TW_Q_LENGTH];
+ TW_Command_Full *command_packet_virt[TW_Q_LENGTH];
+ dma_addr_t command_packet_phys[TW_Q_LENGTH];
+ TW_Command_Apache_Header *sense_buffer_virt[TW_Q_LENGTH];
+ dma_addr_t sense_buffer_phys[TW_Q_LENGTH];
+ struct pci_dev *tw_pci_dev;
+ struct scsi_cmnd *srb[TW_Q_LENGTH];
+ unsigned char free_queue[TW_Q_LENGTH];
+ unsigned char free_head;
+ unsigned char free_tail;
+ int state[TW_Q_LENGTH];
+ unsigned int posted_request_count;
+ unsigned int max_posted_request_count;
+ unsigned int max_sgl_entries;
+ unsigned int sgl_entries;
+ unsigned int num_resets;
+ unsigned int sector_count;
+ unsigned int max_sector_count;
+ unsigned int aen_count;
+ struct Scsi_Host *host;
+ long flags;
+ TW_Event *event_queue[TW_Q_LENGTH];
+ unsigned char error_index;
+ unsigned int error_sequence_id;
+ int chrdev_request_id;
+ wait_queue_head_t ioctl_wqueue;
+ struct mutex ioctl_lock;
+ TW_Compatibility_Info tw_compat_info;
+ char online;
+} TW_Device_Extension;
+
+#endif /* _3W_SAS_H */
+
diff --git a/drivers/scsi/3w-xxxx.c b/drivers/scsi/3w-xxxx.c
new file mode 100644
index 000000000..2940bd769
--- /dev/null
+++ b/drivers/scsi/3w-xxxx.c
@@ -0,0 +1,2419 @@
+/*
+ 3w-xxxx.c -- 3ware Storage Controller device driver for Linux.
+
+ Written By: Adam Radford <linuxraid@lsi.com>
+ Modifications By: Joel Jacobson <linux@3ware.com>
+ Arnaldo Carvalho de Melo <acme@conectiva.com.br>
+ Brad Strand <linux@3ware.com>
+
+ Copyright (C) 1999-2010 3ware Inc.
+
+ Kernel compatibility By: Andre Hedrick <andre@suse.com>
+ Non-Copyright (C) 2000 Andre Hedrick <andre@suse.com>
+
+ Further tiny build fixes and trivial hoovering Alan Cox
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; version 2 of the License.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ NO WARRANTY
+ THE PROGRAM IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OR
+ CONDITIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED INCLUDING, WITHOUT
+ LIMITATION, ANY WARRANTIES OR CONDITIONS OF TITLE, NON-INFRINGEMENT,
+ MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE. Each Recipient is
+ solely responsible for determining the appropriateness of using and
+ distributing the Program and assumes all risks associated with its
+ exercise of rights under this Agreement, including but not limited to
+ the risks and costs of program errors, damage to or loss of data,
+ programs or equipment, and unavailability or interruption of operations.
+
+ DISCLAIMER OF LIABILITY
+ NEITHER RECIPIENT NOR ANY CONTRIBUTORS SHALL HAVE ANY LIABILITY FOR ANY
+ DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ DAMAGES (INCLUDING WITHOUT LIMITATION LOST PROFITS), HOWEVER CAUSED AND
+ ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR
+ TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
+ USE OR DISTRIBUTION OF THE PROGRAM OR THE EXERCISE OF ANY RIGHTS GRANTED
+ HEREUNDER, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGES
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+
+ Bugs/Comments/Suggestions should be mailed to:
+ linuxraid@lsi.com
+
+ For more information, goto:
+ http://www.lsi.com
+
+ History
+ -------
+ 0.1.000 - Initial release.
+ 0.4.000 - Added support for Asynchronous Event Notification through
+ ioctls for 3DM.
+ 1.0.000 - Added DPO & FUA bit support for WRITE_10 & WRITE_6 cdb
+ to disable drive write-cache before writes.
+ 1.1.000 - Fixed performance bug with DPO & FUA not existing for WRITE_6.
+ 1.2.000 - Added support for clean shutdown notification/feature table.
+ 1.02.00.001 - Added support for full command packet posts through ioctls
+ for 3DM.
+ Bug fix so hot spare drives don't show up.
+ 1.02.00.002 - Fix bug with tw_setfeature() call that caused oops on some
+ systems.
+ 08/21/00 - release previously allocated resources on failure at
+ tw_allocate_memory (acme)
+ 1.02.00.003 - Fix tw_interrupt() to report error to scsi layer when
+ controller status is non-zero.
+ Added handling of request_sense opcode.
+ Fix possible null pointer dereference in
+ tw_reset_device_extension()
+ 1.02.00.004 - Add support for device id of 3ware 7000 series controllers.
+ Make tw_setfeature() call with interrupts disabled.
+ Register interrupt handler before enabling interrupts.
+ Clear attention interrupt before draining aen queue.
+ 1.02.00.005 - Allocate bounce buffers and custom queue depth for raid5 for
+ 6000 and 5000 series controllers.
+ Reduce polling mdelays causing problems on some systems.
+ Fix use_sg = 1 calculation bug.
+ Check for scsi_register returning NULL.
+ Add aen count to /proc/scsi/3w-xxxx.
+ Remove aen code unit masking in tw_aen_complete().
+ 1.02.00.006 - Remove unit from printk in tw_scsi_eh_abort(), causing
+ possible oops.
+ Fix possible null pointer dereference in tw_scsi_queue()
+ if done function pointer was invalid.
+ 1.02.00.007 - Fix possible null pointer dereferences in tw_ioctl().
+ Remove check for invalid done function pointer from
+ tw_scsi_queue().
+ 1.02.00.008 - Set max sectors per io to TW_MAX_SECTORS in tw_findcards().
+ Add tw_decode_error() for printing readable error messages.
+ Print some useful information on certain aen codes.
+ Add tw_decode_bits() for interpreting status register output.
+ Make scsi_set_pci_device() for kernels >= 2.4.4
+ Fix bug where aen's could be lost before a reset.
+ Re-add spinlocks in tw_scsi_detect().
+ Fix possible null pointer dereference in tw_aen_drain_queue()
+ during initialization.
+ Clear pci parity errors during initialization and during io.
+ 1.02.00.009 - Remove redundant increment in tw_state_request_start().
+ Add ioctl support for direct ATA command passthru.
+ Add entire aen code string list.
+ 1.02.00.010 - Cleanup queueing code, fix jbod thoughput.
+ Fix get_param for specific units.
+ 1.02.00.011 - Fix bug in tw_aen_complete() where aen's could be lost.
+ Fix tw_aen_drain_queue() to display useful info at init.
+ Set tw_host->max_id for 12 port cards.
+ Add ioctl support for raw command packet post from userspace
+ with sglist fragments (parameter and io).
+ 1.02.00.012 - Fix read capacity to under report by 1 sector to fix get
+ last sector ioctl.
+ 1.02.00.013 - Fix bug where more AEN codes weren't coming out during
+ driver initialization.
+ Improved handling of PCI aborts.
+ 1.02.00.014 - Fix bug in tw_findcards() where AEN code could be lost.
+ Increase timeout in tw_aen_drain_queue() to 30 seconds.
+ 1.02.00.015 - Re-write raw command post with data ioctl method.
+ Remove raid5 bounce buffers for raid5 for 6XXX for kernel 2.5
+ Add tw_map/unmap_scsi_sg/single_data() for kernel 2.5
+ Replace io_request_lock with host_lock for kernel 2.5
+ Set max_cmd_len to 16 for 3dm for kernel 2.5
+ 1.02.00.016 - Set host->max_sectors back up to 256.
+ 1.02.00.017 - Modified pci parity error handling/clearing from config space
+ during initialization.
+ 1.02.00.018 - Better handling of request sense opcode and sense information
+ for failed commands. Add tw_decode_sense().
+ Replace all mdelay()'s with scsi_sleep().
+ 1.02.00.019 - Revert mdelay's and scsi_sleep's, this caused problems on
+ some SMP systems.
+ 1.02.00.020 - Add pci_set_dma_mask(), rewrite kmalloc()/virt_to_bus() to
+ pci_alloc/free_consistent().
+ Better alignment checking in tw_allocate_memory().
+ Cleanup tw_initialize_device_extension().
+ 1.02.00.021 - Bump cmd_per_lun in SHT to 255 for better jbod performance.
+ Improve handling of errors in tw_interrupt().
+ Add handling/clearing of controller queue error.
+ Empty stale responses before draining aen queue.
+ Fix tw_scsi_eh_abort() to not reset on every io abort.
+ Set can_queue in SHT to 255 to prevent hang from AEN.
+ 1.02.00.022 - Fix possible null pointer dereference in tw_scsi_release().
+ 1.02.00.023 - Fix bug in tw_aen_drain_queue() where unit # was always zero.
+ 1.02.00.024 - Add severity levels to AEN strings.
+ 1.02.00.025 - Fix command interrupt spurious error messages.
+ Fix bug in raw command post with data ioctl method.
+ Fix bug where rollcall sometimes failed with cable errors.
+ Print unit # on all command timeouts.
+ 1.02.00.026 - Fix possible infinite retry bug with power glitch induced
+ drive timeouts.
+ Cleanup some AEN severity levels.
+ 1.02.00.027 - Add drive not supported AEN code for SATA controllers.
+ Remove spurious unknown ioctl error message.
+ 1.02.00.028 - Fix bug where multiple controllers with no units were the
+ same card number.
+ Fix bug where cards were being shut down more than once.
+ 1.02.00.029 - Add missing pci_free_consistent() in tw_allocate_memory().
+ Replace pci_map_single() with pci_map_page() for highmem.
+ Check for tw_setfeature() failure.
+ 1.02.00.030 - Make driver 64-bit clean.
+ 1.02.00.031 - Cleanup polling timeouts/routines in several places.
+ Add support for mode sense opcode.
+ Add support for cache mode page.
+ Add support for synchronize cache opcode.
+ 1.02.00.032 - Fix small multicard rollcall bug.
+ Make driver stay loaded with no units for hot add/swap.
+ Add support for "twe" character device for ioctls.
+ Clean up request_id queueing code.
+ Fix tw_scsi_queue() spinlocks.
+ 1.02.00.033 - Fix tw_aen_complete() to not queue 'queue empty' AEN's.
+ Initialize queues correctly when loading with no valid units.
+ 1.02.00.034 - Fix tw_decode_bits() to handle multiple errors.
+ Add support for user configurable cmd_per_lun.
+ Add support for sht->slave_configure().
+ 1.02.00.035 - Improve tw_allocate_memory() memory allocation.
+ Fix tw_chrdev_ioctl() to sleep correctly.
+ 1.02.00.036 - Increase character ioctl timeout to 60 seconds.
+ 1.02.00.037 - Fix tw_ioctl() to handle all non-data ATA passthru cmds
+ for 'smartmontools' support.
+ 1.26.00.038 - Roll driver minor version to 26 to denote kernel 2.6.
+ Add support for cmds_per_lun module parameter.
+ 1.26.00.039 - Fix bug in tw_chrdev_ioctl() polling code.
+ Fix data_buffer_length usage in tw_chrdev_ioctl().
+ Update contact information.
+ 1.26.02.000 - Convert driver to pci_driver format.
+ 1.26.02.001 - Increase max ioctl buffer size to 512 sectors.
+ Make tw_scsi_queue() return 0 for 'Unknown scsi opcode'.
+ Fix tw_remove() to free irq handler/unregister_chrdev()
+ before shutting down card.
+ Change to new 'change_queue_depth' api.
+ Fix 'handled=1' ISR usage, remove bogus IRQ check.
+ 1.26.02.002 - Free irq handler in __tw_shutdown().
+ Turn on RCD bit for caching mode page.
+ Serialize reset code.
+ 1.26.02.003 - Force 60 second timeout default.
+*/
+
+#include <linux/module.h>
+#include <linux/reboot.h>
+#include <linux/spinlock.h>
+#include <linux/interrupt.h>
+#include <linux/moduleparam.h>
+#include <linux/errno.h>
+#include <linux/types.h>
+#include <linux/delay.h>
+#include <linux/gfp.h>
+#include <linux/pci.h>
+#include <linux/time.h>
+#include <linux/mutex.h>
+#include <asm/io.h>
+#include <asm/irq.h>
+#include <asm/uaccess.h>
+#include <scsi/scsi.h>
+#include <scsi/scsi_host.h>
+#include <scsi/scsi_tcq.h>
+#include <scsi/scsi_cmnd.h>
+#include <scsi/scsi_eh.h>
+#include "3w-xxxx.h"
+
+/* Globals */
+#define TW_DRIVER_VERSION "1.26.02.003"
+static DEFINE_MUTEX(tw_mutex);
+static TW_Device_Extension *tw_device_extension_list[TW_MAX_SLOT];
+static int tw_device_extension_count = 0;
+static int twe_major = -1;
+
+/* Module parameters */
+MODULE_AUTHOR("LSI");
+MODULE_DESCRIPTION("3ware Storage Controller Linux Driver");
+MODULE_LICENSE("GPL");
+MODULE_VERSION(TW_DRIVER_VERSION);
+
+/* Function prototypes */
+static int tw_reset_device_extension(TW_Device_Extension *tw_dev);
+
+/* Functions */
+
+/* This function will check the status register for unexpected bits */
+static int tw_check_bits(u32 status_reg_value)
+{
+ if ((status_reg_value & TW_STATUS_EXPECTED_BITS) != TW_STATUS_EXPECTED_BITS) {
+ dprintk(KERN_WARNING "3w-xxxx: tw_check_bits(): No expected bits (0x%x).\n", status_reg_value);
+ return 1;
+ }
+ if ((status_reg_value & TW_STATUS_UNEXPECTED_BITS) != 0) {
+ dprintk(KERN_WARNING "3w-xxxx: tw_check_bits(): Found unexpected bits (0x%x).\n", status_reg_value);
+ return 1;
+ }
+
+ return 0;
+} /* End tw_check_bits() */
+
+/* This function will print readable messages from status register errors */
+static int tw_decode_bits(TW_Device_Extension *tw_dev, u32 status_reg_value, int print_host)
+{
+ char host[16];
+
+ dprintk(KERN_WARNING "3w-xxxx: tw_decode_bits()\n");
+
+ if (print_host)
+ sprintf(host, " scsi%d:", tw_dev->host->host_no);
+ else
+ host[0] = '\0';
+
+ if (status_reg_value & TW_STATUS_PCI_PARITY_ERROR) {
+ printk(KERN_WARNING "3w-xxxx:%s PCI Parity Error: clearing.\n", host);
+ outl(TW_CONTROL_CLEAR_PARITY_ERROR, TW_CONTROL_REG_ADDR(tw_dev));
+ }
+
+ if (status_reg_value & TW_STATUS_PCI_ABORT) {
+ printk(KERN_WARNING "3w-xxxx:%s PCI Abort: clearing.\n", host);
+ outl(TW_CONTROL_CLEAR_PCI_ABORT, TW_CONTROL_REG_ADDR(tw_dev));
+ pci_write_config_word(tw_dev->tw_pci_dev, PCI_STATUS, TW_PCI_CLEAR_PCI_ABORT);
+ }
+
+ if (status_reg_value & TW_STATUS_QUEUE_ERROR) {
+ printk(KERN_WARNING "3w-xxxx:%s Controller Queue Error: clearing.\n", host);
+ outl(TW_CONTROL_CLEAR_QUEUE_ERROR, TW_CONTROL_REG_ADDR(tw_dev));
+ }
+
+ if (status_reg_value & TW_STATUS_SBUF_WRITE_ERROR) {
+ printk(KERN_WARNING "3w-xxxx:%s SBUF Write Error: clearing.\n", host);
+ outl(TW_CONTROL_CLEAR_SBUF_WRITE_ERROR, TW_CONTROL_REG_ADDR(tw_dev));
+ }
+
+ if (status_reg_value & TW_STATUS_MICROCONTROLLER_ERROR) {
+ if (tw_dev->reset_print == 0) {
+ printk(KERN_WARNING "3w-xxxx:%s Microcontroller Error: clearing.\n", host);
+ tw_dev->reset_print = 1;
+ }
+ return 1;
+ }
+
+ return 0;
+} /* End tw_decode_bits() */
+
+/* This function will poll the status register for a flag */
+static int tw_poll_status(TW_Device_Extension *tw_dev, u32 flag, int seconds)
+{
+ u32 status_reg_value;
+ unsigned long before;
+ int retval = 1;
+
+ status_reg_value = inl(TW_STATUS_REG_ADDR(tw_dev));
+ before = jiffies;
+
+ if (tw_check_bits(status_reg_value))
+ tw_decode_bits(tw_dev, status_reg_value, 0);
+
+ while ((status_reg_value & flag) != flag) {
+ status_reg_value = inl(TW_STATUS_REG_ADDR(tw_dev));
+
+ if (tw_check_bits(status_reg_value))
+ tw_decode_bits(tw_dev, status_reg_value, 0);
+
+ if (time_after(jiffies, before + HZ * seconds))
+ goto out;
+
+ msleep(50);
+ }
+ retval = 0;
+out:
+ return retval;
+} /* End tw_poll_status() */
+
+/* This function will poll the status register for disappearance of a flag */
+static int tw_poll_status_gone(TW_Device_Extension *tw_dev, u32 flag, int seconds)
+{
+ u32 status_reg_value;
+ unsigned long before;
+ int retval = 1;
+
+ status_reg_value = inl(TW_STATUS_REG_ADDR(tw_dev));
+ before = jiffies;
+
+ if (tw_check_bits(status_reg_value))
+ tw_decode_bits(tw_dev, status_reg_value, 0);
+
+ while ((status_reg_value & flag) != 0) {
+ status_reg_value = inl(TW_STATUS_REG_ADDR(tw_dev));
+
+ if (tw_check_bits(status_reg_value))
+ tw_decode_bits(tw_dev, status_reg_value, 0);
+
+ if (time_after(jiffies, before + HZ * seconds))
+ goto out;
+
+ msleep(50);
+ }
+ retval = 0;
+out:
+ return retval;
+} /* End tw_poll_status_gone() */
+
+/* This function will attempt to post a command packet to the board */
+static int tw_post_command_packet(TW_Device_Extension *tw_dev, int request_id)
+{
+ u32 status_reg_value;
+ unsigned long command_que_value;
+
+ dprintk(KERN_NOTICE "3w-xxxx: tw_post_command_packet()\n");
+ command_que_value = tw_dev->command_packet_physical_address[request_id];
+ status_reg_value = inl(TW_STATUS_REG_ADDR(tw_dev));
+
+ if (tw_check_bits(status_reg_value)) {
+ dprintk(KERN_WARNING "3w-xxxx: tw_post_command_packet(): Unexpected bits.\n");
+ tw_decode_bits(tw_dev, status_reg_value, 1);
+ }
+
+ if ((status_reg_value & TW_STATUS_COMMAND_QUEUE_FULL) == 0) {
+ /* We successfully posted the command packet */
+ outl(command_que_value, TW_COMMAND_QUEUE_REG_ADDR(tw_dev));
+ tw_dev->state[request_id] = TW_S_POSTED;
+ tw_dev->posted_request_count++;
+ if (tw_dev->posted_request_count > tw_dev->max_posted_request_count) {
+ tw_dev->max_posted_request_count = tw_dev->posted_request_count;
+ }
+ } else {
+ /* Couldn't post the command packet, so we do it in the isr */
+ if (tw_dev->state[request_id] != TW_S_PENDING) {
+ tw_dev->state[request_id] = TW_S_PENDING;
+ tw_dev->pending_request_count++;
+ if (tw_dev->pending_request_count > tw_dev->max_pending_request_count) {
+ tw_dev->max_pending_request_count = tw_dev->pending_request_count;
+ }
+ tw_dev->pending_queue[tw_dev->pending_tail] = request_id;
+ if (tw_dev->pending_tail == TW_Q_LENGTH-1) {
+ tw_dev->pending_tail = TW_Q_START;
+ } else {
+ tw_dev->pending_tail = tw_dev->pending_tail + 1;
+ }
+ }
+ TW_UNMASK_COMMAND_INTERRUPT(tw_dev);
+ return 1;
+ }
+ return 0;
+} /* End tw_post_command_packet() */
+
+/* This function will return valid sense buffer information for failed cmds */
+static int tw_decode_sense(TW_Device_Extension *tw_dev, int request_id, int fill_sense)
+{
+ int i;
+ TW_Command *command;
+
+ dprintk(KERN_WARNING "3w-xxxx: tw_decode_sense()\n");
+ command = (TW_Command *)tw_dev->command_packet_virtual_address[request_id];
+
+ printk(KERN_WARNING "3w-xxxx: scsi%d: Command failed: status = 0x%x, flags = 0x%x, unit #%d.\n", tw_dev->host->host_no, command->status, command->flags, TW_UNIT_OUT(command->unit__hostid));
+
+ /* Attempt to return intelligent sense information */
+ if (fill_sense) {
+ if ((command->status == 0xc7) || (command->status == 0xcb)) {
+ for (i = 0; i < ARRAY_SIZE(tw_sense_table); i++) {
+ if (command->flags == tw_sense_table[i][0]) {
+
+ /* Valid bit and 'current errors' */
+ tw_dev->srb[request_id]->sense_buffer[0] = (0x1 << 7 | 0x70);
+
+ /* Sense key */
+ tw_dev->srb[request_id]->sense_buffer[2] = tw_sense_table[i][1];
+
+ /* Additional sense length */
+ tw_dev->srb[request_id]->sense_buffer[7] = 0xa; /* 10 bytes */
+
+ /* Additional sense code */
+ tw_dev->srb[request_id]->sense_buffer[12] = tw_sense_table[i][2];
+
+ /* Additional sense code qualifier */
+ tw_dev->srb[request_id]->sense_buffer[13] = tw_sense_table[i][3];
+
+ tw_dev->srb[request_id]->result = (DID_OK << 16) | (CHECK_CONDITION << 1);
+ return TW_ISR_DONT_RESULT; /* Special case for isr to not over-write result */
+ }
+ }
+ }
+
+ /* If no table match, error so we get a reset */
+ return 1;
+ }
+
+ return 0;
+} /* End tw_decode_sense() */
+
+/* This function will report controller error status */
+static int tw_check_errors(TW_Device_Extension *tw_dev)
+{
+ u32 status_reg_value;
+
+ status_reg_value = inl(TW_STATUS_REG_ADDR(tw_dev));
+
+ if (TW_STATUS_ERRORS(status_reg_value) || tw_check_bits(status_reg_value)) {
+ tw_decode_bits(tw_dev, status_reg_value, 0);
+ return 1;
+ }
+
+ return 0;
+} /* End tw_check_errors() */
+
+/* This function will empty the response que */
+static void tw_empty_response_que(TW_Device_Extension *tw_dev)
+{
+ u32 status_reg_value, response_que_value;
+
+ status_reg_value = inl(TW_STATUS_REG_ADDR(tw_dev));
+
+ while ((status_reg_value & TW_STATUS_RESPONSE_QUEUE_EMPTY) == 0) {
+ response_que_value = inl(TW_RESPONSE_QUEUE_REG_ADDR(tw_dev));
+ status_reg_value = inl(TW_STATUS_REG_ADDR(tw_dev));
+ }
+} /* End tw_empty_response_que() */
+
+/* This function will free a request_id */
+static void tw_state_request_finish(TW_Device_Extension *tw_dev, int request_id)
+{
+ tw_dev->free_queue[tw_dev->free_tail] = request_id;
+ tw_dev->state[request_id] = TW_S_FINISHED;
+ tw_dev->free_tail = (tw_dev->free_tail + 1) % TW_Q_LENGTH;
+} /* End tw_state_request_finish() */
+
+/* This function will assign an available request_id */
+static void tw_state_request_start(TW_Device_Extension *tw_dev, int *request_id)
+{
+ *request_id = tw_dev->free_queue[tw_dev->free_head];
+ tw_dev->free_head = (tw_dev->free_head + 1) % TW_Q_LENGTH;
+ tw_dev->state[*request_id] = TW_S_STARTED;
+} /* End tw_state_request_start() */
+
+/* Show some statistics about the card */
+static ssize_t tw_show_stats(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct Scsi_Host *host = class_to_shost(dev);
+ TW_Device_Extension *tw_dev = (TW_Device_Extension *)host->hostdata;
+ unsigned long flags = 0;
+ ssize_t len;
+
+ spin_lock_irqsave(tw_dev->host->host_lock, flags);
+ len = snprintf(buf, PAGE_SIZE, "3w-xxxx Driver version: %s\n"
+ "Current commands posted: %4d\n"
+ "Max commands posted: %4d\n"
+ "Current pending commands: %4d\n"
+ "Max pending commands: %4d\n"
+ "Last sgl length: %4d\n"
+ "Max sgl length: %4d\n"
+ "Last sector count: %4d\n"
+ "Max sector count: %4d\n"
+ "SCSI Host Resets: %4d\n"
+ "AEN's: %4d\n",
+ TW_DRIVER_VERSION,
+ tw_dev->posted_request_count,
+ tw_dev->max_posted_request_count,
+ tw_dev->pending_request_count,
+ tw_dev->max_pending_request_count,
+ tw_dev->sgl_entries,
+ tw_dev->max_sgl_entries,
+ tw_dev->sector_count,
+ tw_dev->max_sector_count,
+ tw_dev->num_resets,
+ tw_dev->aen_count);
+ spin_unlock_irqrestore(tw_dev->host->host_lock, flags);
+ return len;
+} /* End tw_show_stats() */
+
+/* Create sysfs 'stats' entry */
+static struct device_attribute tw_host_stats_attr = {
+ .attr = {
+ .name = "stats",
+ .mode = S_IRUGO,
+ },
+ .show = tw_show_stats
+};
+
+/* Host attributes initializer */
+static struct device_attribute *tw_host_attrs[] = {
+ &tw_host_stats_attr,
+ NULL,
+};
+
+/* This function will read the aen queue from the isr */
+static int tw_aen_read_queue(TW_Device_Extension *tw_dev, int request_id)
+{
+ TW_Command *command_packet;
+ TW_Param *param;
+ unsigned long command_que_value;
+ u32 status_reg_value;
+ unsigned long param_value = 0;
+
+ dprintk(KERN_NOTICE "3w-xxxx: tw_aen_read_queue()\n");
+
+ status_reg_value = inl(TW_STATUS_REG_ADDR(tw_dev));
+ if (tw_check_bits(status_reg_value)) {
+ dprintk(KERN_WARNING "3w-xxxx: tw_aen_read_queue(): Unexpected bits.\n");
+ tw_decode_bits(tw_dev, status_reg_value, 1);
+ return 1;
+ }
+ if (tw_dev->command_packet_virtual_address[request_id] == NULL) {
+ printk(KERN_WARNING "3w-xxxx: tw_aen_read_queue(): Bad command packet virtual address.\n");
+ return 1;
+ }
+ command_packet = (TW_Command *)tw_dev->command_packet_virtual_address[request_id];
+ memset(command_packet, 0, sizeof(TW_Sector));
+ command_packet->opcode__sgloffset = TW_OPSGL_IN(2, TW_OP_GET_PARAM);
+ command_packet->size = 4;
+ command_packet->request_id = request_id;
+ command_packet->status = 0;
+ command_packet->flags = 0;
+ command_packet->byte6.parameter_count = 1;
+ command_que_value = tw_dev->command_packet_physical_address[request_id];
+ if (command_que_value == 0) {
+ printk(KERN_WARNING "3w-xxxx: tw_aen_read_queue(): Bad command packet physical address.\n");
+ return 1;
+ }
+ /* Now setup the param */
+ if (tw_dev->alignment_virtual_address[request_id] == NULL) {
+ printk(KERN_WARNING "3w-xxxx: tw_aen_read_queue(): Bad alignment virtual address.\n");
+ return 1;
+ }
+ param = (TW_Param *)tw_dev->alignment_virtual_address[request_id];
+ memset(param, 0, sizeof(TW_Sector));
+ param->table_id = 0x401; /* AEN table */
+ param->parameter_id = 2; /* Unit code */
+ param->parameter_size_bytes = 2;
+ param_value = tw_dev->alignment_physical_address[request_id];
+ if (param_value == 0) {
+ printk(KERN_WARNING "3w-xxxx: tw_aen_read_queue(): Bad alignment physical address.\n");
+ return 1;
+ }
+ command_packet->byte8.param.sgl[0].address = param_value;
+ command_packet->byte8.param.sgl[0].length = sizeof(TW_Sector);
+
+ /* Now post the command packet */
+ if ((status_reg_value & TW_STATUS_COMMAND_QUEUE_FULL) == 0) {
+ dprintk(KERN_WARNING "3w-xxxx: tw_aen_read_queue(): Post succeeded.\n");
+ tw_dev->srb[request_id] = NULL; /* Flag internal command */
+ tw_dev->state[request_id] = TW_S_POSTED;
+ outl(command_que_value, TW_COMMAND_QUEUE_REG_ADDR(tw_dev));
+ } else {
+ printk(KERN_WARNING "3w-xxxx: tw_aen_read_queue(): Post failed, will retry.\n");
+ return 1;
+ }
+
+ return 0;
+} /* End tw_aen_read_queue() */
+
+/* This function will complete an aen request from the isr */
+static int tw_aen_complete(TW_Device_Extension *tw_dev, int request_id)
+{
+ TW_Param *param;
+ unsigned short aen;
+ int error = 0, table_max = 0;
+
+ dprintk(KERN_WARNING "3w-xxxx: tw_aen_complete()\n");
+ if (tw_dev->alignment_virtual_address[request_id] == NULL) {
+ printk(KERN_WARNING "3w-xxxx: tw_aen_complete(): Bad alignment virtual address.\n");
+ return 1;
+ }
+ param = (TW_Param *)tw_dev->alignment_virtual_address[request_id];
+ aen = *(unsigned short *)(param->data);
+ dprintk(KERN_NOTICE "3w-xxxx: tw_aen_complete(): Queue'd code 0x%x\n", aen);
+
+ /* Print some useful info when certain aen codes come out */
+ if (aen == 0x0ff) {
+ printk(KERN_WARNING "3w-xxxx: scsi%d: AEN: INFO: AEN queue overflow.\n", tw_dev->host->host_no);
+ } else {
+ table_max = ARRAY_SIZE(tw_aen_string);
+ if ((aen & 0x0ff) < table_max) {
+ if ((tw_aen_string[aen & 0xff][strlen(tw_aen_string[aen & 0xff])-1]) == '#') {
+ printk(KERN_WARNING "3w-xxxx: scsi%d: AEN: %s%d.\n", tw_dev->host->host_no, tw_aen_string[aen & 0xff], aen >> 8);
+ } else {
+ if (aen != 0x0)
+ printk(KERN_WARNING "3w-xxxx: scsi%d: AEN: %s.\n", tw_dev->host->host_no, tw_aen_string[aen & 0xff]);
+ }
+ } else {
+ printk(KERN_WARNING "3w-xxxx: scsi%d: Received AEN %d.\n", tw_dev->host->host_no, aen);
+ }
+ }
+ if (aen != TW_AEN_QUEUE_EMPTY) {
+ tw_dev->aen_count++;
+
+ /* Now queue the code */
+ tw_dev->aen_queue[tw_dev->aen_tail] = aen;
+ if (tw_dev->aen_tail == TW_Q_LENGTH - 1) {
+ tw_dev->aen_tail = TW_Q_START;
+ } else {
+ tw_dev->aen_tail = tw_dev->aen_tail + 1;
+ }
+ if (tw_dev->aen_head == tw_dev->aen_tail) {
+ if (tw_dev->aen_head == TW_Q_LENGTH - 1) {
+ tw_dev->aen_head = TW_Q_START;
+ } else {
+ tw_dev->aen_head = tw_dev->aen_head + 1;
+ }
+ }
+
+ error = tw_aen_read_queue(tw_dev, request_id);
+ if (error) {
+ printk(KERN_WARNING "3w-xxxx: scsi%d: Error completing AEN.\n", tw_dev->host->host_no);
+ tw_dev->state[request_id] = TW_S_COMPLETED;
+ tw_state_request_finish(tw_dev, request_id);
+ }
+ } else {
+ tw_dev->state[request_id] = TW_S_COMPLETED;
+ tw_state_request_finish(tw_dev, request_id);
+ }
+
+ return 0;
+} /* End tw_aen_complete() */
+
+/* This function will drain the aen queue after a soft reset */
+static int tw_aen_drain_queue(TW_Device_Extension *tw_dev)
+{
+ TW_Command *command_packet;
+ TW_Param *param;
+ int request_id = 0;
+ unsigned long command_que_value;
+ unsigned long param_value;
+ TW_Response_Queue response_queue;
+ unsigned short aen;
+ unsigned short aen_code;
+ int finished = 0;
+ int first_reset = 0;
+ int queue = 0;
+ int found = 0, table_max = 0;
+
+ dprintk(KERN_NOTICE "3w-xxxx: tw_aen_drain_queue()\n");
+
+ if (tw_poll_status(tw_dev, TW_STATUS_ATTENTION_INTERRUPT | TW_STATUS_MICROCONTROLLER_READY, 30)) {
+ dprintk(KERN_WARNING "3w-xxxx: tw_aen_drain_queue(): No attention interrupt for card %d.\n", tw_device_extension_count);
+ return 1;
+ }
+ TW_CLEAR_ATTENTION_INTERRUPT(tw_dev);
+
+ /* Empty response queue */
+ tw_empty_response_que(tw_dev);
+
+ /* Initialize command packet */
+ if (tw_dev->command_packet_virtual_address[request_id] == NULL) {
+ printk(KERN_WARNING "3w-xxxx: tw_aen_drain_queue(): Bad command packet virtual address.\n");
+ return 1;
+ }
+ command_packet = (TW_Command *)tw_dev->command_packet_virtual_address[request_id];
+ memset(command_packet, 0, sizeof(TW_Sector));
+ command_packet->opcode__sgloffset = TW_OPSGL_IN(2, TW_OP_GET_PARAM);
+ command_packet->size = 4;
+ command_packet->request_id = request_id;
+ command_packet->status = 0;
+ command_packet->flags = 0;
+ command_packet->byte6.parameter_count = 1;
+ command_que_value = tw_dev->command_packet_physical_address[request_id];
+ if (command_que_value == 0) {
+ printk(KERN_WARNING "3w-xxxx: tw_aen_drain_queue(): Bad command packet physical address.\n");
+ return 1;
+ }
+
+ /* Now setup the param */
+ if (tw_dev->alignment_virtual_address[request_id] == NULL) {
+ printk(KERN_WARNING "3w-xxxx: tw_aen_drain_queue(): Bad alignment virtual address.\n");
+ return 1;
+ }
+ param = (TW_Param *)tw_dev->alignment_virtual_address[request_id];
+ memset(param, 0, sizeof(TW_Sector));
+ param->table_id = 0x401; /* AEN table */
+ param->parameter_id = 2; /* Unit code */
+ param->parameter_size_bytes = 2;
+ param_value = tw_dev->alignment_physical_address[request_id];
+ if (param_value == 0) {
+ printk(KERN_WARNING "3w-xxxx: tw_aen_drain_queue(): Bad alignment physical address.\n");
+ return 1;
+ }
+ command_packet->byte8.param.sgl[0].address = param_value;
+ command_packet->byte8.param.sgl[0].length = sizeof(TW_Sector);
+
+ /* Now drain the controller's aen queue */
+ do {
+ /* Post command packet */
+ outl(command_que_value, TW_COMMAND_QUEUE_REG_ADDR(tw_dev));
+
+ /* Now poll for completion */
+ if (tw_poll_status_gone(tw_dev, TW_STATUS_RESPONSE_QUEUE_EMPTY, 30) == 0) {
+ response_queue.value = inl(TW_RESPONSE_QUEUE_REG_ADDR(tw_dev));
+ request_id = TW_RESID_OUT(response_queue.response_id);
+
+ if (request_id != 0) {
+ /* Unexpected request id */
+ printk(KERN_WARNING "3w-xxxx: tw_aen_drain_queue(): Unexpected request id.\n");
+ return 1;
+ }
+
+ if (command_packet->status != 0) {
+ if (command_packet->flags != TW_AEN_TABLE_UNDEFINED) {
+ /* Bad response */
+ tw_decode_sense(tw_dev, request_id, 0);
+ return 1;
+ } else {
+ /* We know this is a 3w-1x00, and doesn't support aen's */
+ return 0;
+ }
+ }
+
+ /* Now check the aen */
+ aen = *(unsigned short *)(param->data);
+ aen_code = (aen & 0x0ff);
+ queue = 0;
+ switch (aen_code) {
+ case TW_AEN_QUEUE_EMPTY:
+ dprintk(KERN_WARNING "3w-xxxx: AEN: %s.\n", tw_aen_string[aen & 0xff]);
+ if (first_reset != 1) {
+ return 1;
+ } else {
+ finished = 1;
+ }
+ break;
+ case TW_AEN_SOFT_RESET:
+ if (first_reset == 0) {
+ first_reset = 1;
+ } else {
+ printk(KERN_WARNING "3w-xxxx: AEN: %s.\n", tw_aen_string[aen & 0xff]);
+ tw_dev->aen_count++;
+ queue = 1;
+ }
+ break;
+ default:
+ if (aen == 0x0ff) {
+ printk(KERN_WARNING "3w-xxxx: AEN: INFO: AEN queue overflow.\n");
+ } else {
+ table_max = ARRAY_SIZE(tw_aen_string);
+ if ((aen & 0x0ff) < table_max) {
+ if ((tw_aen_string[aen & 0xff][strlen(tw_aen_string[aen & 0xff])-1]) == '#') {
+ printk(KERN_WARNING "3w-xxxx: AEN: %s%d.\n", tw_aen_string[aen & 0xff], aen >> 8);
+ } else {
+ printk(KERN_WARNING "3w-xxxx: AEN: %s.\n", tw_aen_string[aen & 0xff]);
+ }
+ } else
+ printk(KERN_WARNING "3w-xxxx: Received AEN %d.\n", aen);
+ }
+ tw_dev->aen_count++;
+ queue = 1;
+ }
+
+ /* Now put the aen on the aen_queue */
+ if (queue == 1) {
+ tw_dev->aen_queue[tw_dev->aen_tail] = aen;
+ if (tw_dev->aen_tail == TW_Q_LENGTH - 1) {
+ tw_dev->aen_tail = TW_Q_START;
+ } else {
+ tw_dev->aen_tail = tw_dev->aen_tail + 1;
+ }
+ if (tw_dev->aen_head == tw_dev->aen_tail) {
+ if (tw_dev->aen_head == TW_Q_LENGTH - 1) {
+ tw_dev->aen_head = TW_Q_START;
+ } else {
+ tw_dev->aen_head = tw_dev->aen_head + 1;
+ }
+ }
+ }
+ found = 1;
+ }
+ if (found == 0) {
+ printk(KERN_WARNING "3w-xxxx: tw_aen_drain_queue(): Response never received.\n");
+ return 1;
+ }
+ } while (finished == 0);
+
+ return 0;
+} /* End tw_aen_drain_queue() */
+
+/* This function will allocate memory */
+static int tw_allocate_memory(TW_Device_Extension *tw_dev, int size, int which)
+{
+ int i;
+ dma_addr_t dma_handle;
+ unsigned long *cpu_addr = NULL;
+
+ dprintk(KERN_NOTICE "3w-xxxx: tw_allocate_memory()\n");
+
+ cpu_addr = pci_alloc_consistent(tw_dev->tw_pci_dev, size*TW_Q_LENGTH, &dma_handle);
+ if (cpu_addr == NULL) {
+ printk(KERN_WARNING "3w-xxxx: pci_alloc_consistent() failed.\n");
+ return 1;
+ }
+
+ if ((unsigned long)cpu_addr % (tw_dev->tw_pci_dev->device == TW_DEVICE_ID ? TW_ALIGNMENT_6000 : TW_ALIGNMENT_7000)) {
+ printk(KERN_WARNING "3w-xxxx: Couldn't allocate correctly aligned memory.\n");
+ pci_free_consistent(tw_dev->tw_pci_dev, size*TW_Q_LENGTH, cpu_addr, dma_handle);
+ return 1;
+ }
+
+ memset(cpu_addr, 0, size*TW_Q_LENGTH);
+
+ for (i=0;i<TW_Q_LENGTH;i++) {
+ switch(which) {
+ case 0:
+ tw_dev->command_packet_physical_address[i] = dma_handle+(i*size);
+ tw_dev->command_packet_virtual_address[i] = (unsigned long *)((unsigned char *)cpu_addr + (i*size));
+ break;
+ case 1:
+ tw_dev->alignment_physical_address[i] = dma_handle+(i*size);
+ tw_dev->alignment_virtual_address[i] = (unsigned long *)((unsigned char *)cpu_addr + (i*size));
+ break;
+ default:
+ printk(KERN_WARNING "3w-xxxx: tw_allocate_memory(): case slip in tw_allocate_memory()\n");
+ return 1;
+ }
+ }
+
+ return 0;
+} /* End tw_allocate_memory() */
+
+/* This function handles ioctl for the character device */
+static long tw_chrdev_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
+{
+ int request_id;
+ dma_addr_t dma_handle;
+ unsigned short tw_aen_code;
+ unsigned long flags;
+ unsigned int data_buffer_length = 0;
+ unsigned long data_buffer_length_adjusted = 0;
+ struct inode *inode = file_inode(file);
+ unsigned long *cpu_addr;
+ long timeout;
+ TW_New_Ioctl *tw_ioctl;
+ TW_Passthru *passthru;
+ TW_Device_Extension *tw_dev = tw_device_extension_list[iminor(inode)];
+ int retval = -EFAULT;
+ void __user *argp = (void __user *)arg;
+
+ dprintk(KERN_WARNING "3w-xxxx: tw_chrdev_ioctl()\n");
+
+ mutex_lock(&tw_mutex);
+ /* Only let one of these through at a time */
+ if (mutex_lock_interruptible(&tw_dev->ioctl_lock)) {
+ mutex_unlock(&tw_mutex);
+ return -EINTR;
+ }
+
+ /* First copy down the buffer length */
+ if (copy_from_user(&data_buffer_length, argp, sizeof(unsigned int)))
+ goto out;
+
+ /* Check size */
+ if (data_buffer_length > TW_MAX_IOCTL_SECTORS * 512) {
+ retval = -EINVAL;
+ goto out;
+ }
+
+ /* Hardware can only do multiple of 512 byte transfers */
+ data_buffer_length_adjusted = (data_buffer_length + 511) & ~511;
+
+ /* Now allocate ioctl buf memory */
+ cpu_addr = dma_alloc_coherent(&tw_dev->tw_pci_dev->dev, data_buffer_length_adjusted+sizeof(TW_New_Ioctl) - 1, &dma_handle, GFP_KERNEL);
+ if (cpu_addr == NULL) {
+ retval = -ENOMEM;
+ goto out;
+ }
+
+ tw_ioctl = (TW_New_Ioctl *)cpu_addr;
+
+ /* Now copy down the entire ioctl */
+ if (copy_from_user(tw_ioctl, argp, data_buffer_length + sizeof(TW_New_Ioctl) - 1))
+ goto out2;
+
+ passthru = (TW_Passthru *)&tw_ioctl->firmware_command;
+
+ /* See which ioctl we are doing */
+ switch (cmd) {
+ case TW_OP_NOP:
+ dprintk(KERN_WARNING "3w-xxxx: tw_chrdev_ioctl(): caught TW_OP_NOP.\n");
+ break;
+ case TW_OP_AEN_LISTEN:
+ dprintk(KERN_WARNING "3w-xxxx: tw_chrdev_ioctl(): caught TW_AEN_LISTEN.\n");
+ memset(tw_ioctl->data_buffer, 0, data_buffer_length);
+
+ spin_lock_irqsave(tw_dev->host->host_lock, flags);
+ if (tw_dev->aen_head == tw_dev->aen_tail) {
+ tw_aen_code = TW_AEN_QUEUE_EMPTY;
+ } else {
+ tw_aen_code = tw_dev->aen_queue[tw_dev->aen_head];
+ if (tw_dev->aen_head == TW_Q_LENGTH - 1) {
+ tw_dev->aen_head = TW_Q_START;
+ } else {
+ tw_dev->aen_head = tw_dev->aen_head + 1;
+ }
+ }
+ spin_unlock_irqrestore(tw_dev->host->host_lock, flags);
+ memcpy(tw_ioctl->data_buffer, &tw_aen_code, sizeof(tw_aen_code));
+ break;
+ case TW_CMD_PACKET_WITH_DATA:
+ dprintk(KERN_WARNING "3w-xxxx: tw_chrdev_ioctl(): caught TW_CMD_PACKET_WITH_DATA.\n");
+ spin_lock_irqsave(tw_dev->host->host_lock, flags);
+
+ tw_state_request_start(tw_dev, &request_id);
+
+ /* Flag internal command */
+ tw_dev->srb[request_id] = NULL;
+
+ /* Flag chrdev ioctl */
+ tw_dev->chrdev_request_id = request_id;
+
+ tw_ioctl->firmware_command.request_id = request_id;
+
+ /* Load the sg list */
+ switch (TW_SGL_OUT(tw_ioctl->firmware_command.opcode__sgloffset)) {
+ case 2:
+ tw_ioctl->firmware_command.byte8.param.sgl[0].address = dma_handle + sizeof(TW_New_Ioctl) - 1;
+ tw_ioctl->firmware_command.byte8.param.sgl[0].length = data_buffer_length_adjusted;
+ break;
+ case 3:
+ tw_ioctl->firmware_command.byte8.io.sgl[0].address = dma_handle + sizeof(TW_New_Ioctl) - 1;
+ tw_ioctl->firmware_command.byte8.io.sgl[0].length = data_buffer_length_adjusted;
+ break;
+ case 5:
+ passthru->sg_list[0].address = dma_handle + sizeof(TW_New_Ioctl) - 1;
+ passthru->sg_list[0].length = data_buffer_length_adjusted;
+ break;
+ }
+
+ memcpy(tw_dev->command_packet_virtual_address[request_id], &(tw_ioctl->firmware_command), sizeof(TW_Command));
+
+ /* Now post the command packet to the controller */
+ tw_post_command_packet(tw_dev, request_id);
+ spin_unlock_irqrestore(tw_dev->host->host_lock, flags);
+
+ timeout = TW_IOCTL_CHRDEV_TIMEOUT*HZ;
+
+ /* Now wait for the command to complete */
+ timeout = wait_event_timeout(tw_dev->ioctl_wqueue, tw_dev->chrdev_request_id == TW_IOCTL_CHRDEV_FREE, timeout);
+
+ /* We timed out, and didn't get an interrupt */
+ if (tw_dev->chrdev_request_id != TW_IOCTL_CHRDEV_FREE) {
+ /* Now we need to reset the board */
+ printk(KERN_WARNING "3w-xxxx: scsi%d: Character ioctl (0x%x) timed out, resetting card.\n", tw_dev->host->host_no, cmd);
+ retval = -EIO;
+ if (tw_reset_device_extension(tw_dev)) {
+ printk(KERN_WARNING "3w-xxxx: tw_chrdev_ioctl(): Reset failed for card %d.\n", tw_dev->host->host_no);
+ }
+ goto out2;
+ }
+
+ /* Now copy in the command packet response */
+ memcpy(&(tw_ioctl->firmware_command), tw_dev->command_packet_virtual_address[request_id], sizeof(TW_Command));
+
+ /* Now complete the io */
+ spin_lock_irqsave(tw_dev->host->host_lock, flags);
+ tw_dev->posted_request_count--;
+ tw_dev->state[request_id] = TW_S_COMPLETED;
+ tw_state_request_finish(tw_dev, request_id);
+ spin_unlock_irqrestore(tw_dev->host->host_lock, flags);
+ break;
+ default:
+ retval = -ENOTTY;
+ goto out2;
+ }
+
+ /* Now copy the response to userspace */
+ if (copy_to_user(argp, tw_ioctl, sizeof(TW_New_Ioctl) + data_buffer_length - 1))
+ goto out2;
+ retval = 0;
+out2:
+ /* Now free ioctl buf memory */
+ dma_free_coherent(&tw_dev->tw_pci_dev->dev, data_buffer_length_adjusted+sizeof(TW_New_Ioctl) - 1, cpu_addr, dma_handle);
+out:
+ mutex_unlock(&tw_dev->ioctl_lock);
+ mutex_unlock(&tw_mutex);
+ return retval;
+} /* End tw_chrdev_ioctl() */
+
+/* This function handles open for the character device */
+/* NOTE that this function races with remove. */
+static int tw_chrdev_open(struct inode *inode, struct file *file)
+{
+ unsigned int minor_number;
+
+ dprintk(KERN_WARNING "3w-xxxx: tw_ioctl_open()\n");
+
+ minor_number = iminor(inode);
+ if (minor_number >= tw_device_extension_count)
+ return -ENODEV;
+
+ return 0;
+} /* End tw_chrdev_open() */
+
+/* File operations struct for character device */
+static const struct file_operations tw_fops = {
+ .owner = THIS_MODULE,
+ .unlocked_ioctl = tw_chrdev_ioctl,
+ .open = tw_chrdev_open,
+ .release = NULL,
+ .llseek = noop_llseek,
+};
+
+/* This function will free up device extension resources */
+static void tw_free_device_extension(TW_Device_Extension *tw_dev)
+{
+ dprintk(KERN_NOTICE "3w-xxxx: tw_free_device_extension()\n");
+
+ /* Free command packet and generic buffer memory */
+ if (tw_dev->command_packet_virtual_address[0])
+ pci_free_consistent(tw_dev->tw_pci_dev, sizeof(TW_Command)*TW_Q_LENGTH, tw_dev->command_packet_virtual_address[0], tw_dev->command_packet_physical_address[0]);
+
+ if (tw_dev->alignment_virtual_address[0])
+ pci_free_consistent(tw_dev->tw_pci_dev, sizeof(TW_Sector)*TW_Q_LENGTH, tw_dev->alignment_virtual_address[0], tw_dev->alignment_physical_address[0]);
+} /* End tw_free_device_extension() */
+
+/* This function will send an initconnection command to controller */
+static int tw_initconnection(TW_Device_Extension *tw_dev, int message_credits)
+{
+ unsigned long command_que_value;
+ TW_Command *command_packet;
+ TW_Response_Queue response_queue;
+ int request_id = 0;
+
+ dprintk(KERN_NOTICE "3w-xxxx: tw_initconnection()\n");
+
+ /* Initialize InitConnection command packet */
+ if (tw_dev->command_packet_virtual_address[request_id] == NULL) {
+ printk(KERN_WARNING "3w-xxxx: tw_initconnection(): Bad command packet virtual address.\n");
+ return 1;
+ }
+
+ command_packet = (TW_Command *)tw_dev->command_packet_virtual_address[request_id];
+ memset(command_packet, 0, sizeof(TW_Sector));
+ command_packet->opcode__sgloffset = TW_OPSGL_IN(0, TW_OP_INIT_CONNECTION);
+ command_packet->size = TW_INIT_COMMAND_PACKET_SIZE;
+ command_packet->request_id = request_id;
+ command_packet->status = 0x0;
+ command_packet->flags = 0x0;
+ command_packet->byte6.message_credits = message_credits;
+ command_packet->byte8.init_connection.response_queue_pointer = 0x0;
+ command_que_value = tw_dev->command_packet_physical_address[request_id];
+
+ if (command_que_value == 0) {
+ printk(KERN_WARNING "3w-xxxx: tw_initconnection(): Bad command packet physical address.\n");
+ return 1;
+ }
+
+ /* Send command packet to the board */
+ outl(command_que_value, TW_COMMAND_QUEUE_REG_ADDR(tw_dev));
+
+ /* Poll for completion */
+ if (tw_poll_status_gone(tw_dev, TW_STATUS_RESPONSE_QUEUE_EMPTY, 30) == 0) {
+ response_queue.value = inl(TW_RESPONSE_QUEUE_REG_ADDR(tw_dev));
+ request_id = TW_RESID_OUT(response_queue.response_id);
+
+ if (request_id != 0) {
+ /* unexpected request id */
+ printk(KERN_WARNING "3w-xxxx: tw_initconnection(): Unexpected request id.\n");
+ return 1;
+ }
+ if (command_packet->status != 0) {
+ /* bad response */
+ tw_decode_sense(tw_dev, request_id, 0);
+ return 1;
+ }
+ }
+ return 0;
+} /* End tw_initconnection() */
+
+/* Set a value in the features table */
+static int tw_setfeature(TW_Device_Extension *tw_dev, int parm, int param_size,
+ unsigned char *val)
+{
+ TW_Param *param;
+ TW_Command *command_packet;
+ TW_Response_Queue response_queue;
+ int request_id = 0;
+ unsigned long command_que_value;
+ unsigned long param_value;
+
+ /* Initialize SetParam command packet */
+ if (tw_dev->command_packet_virtual_address[request_id] == NULL) {
+ printk(KERN_WARNING "3w-xxxx: tw_setfeature(): Bad command packet virtual address.\n");
+ return 1;
+ }
+ command_packet = (TW_Command *)tw_dev->command_packet_virtual_address[request_id];
+ memset(command_packet, 0, sizeof(TW_Sector));
+ param = (TW_Param *)tw_dev->alignment_virtual_address[request_id];
+
+ command_packet->opcode__sgloffset = TW_OPSGL_IN(2, TW_OP_SET_PARAM);
+ param->table_id = 0x404; /* Features table */
+ param->parameter_id = parm;
+ param->parameter_size_bytes = param_size;
+ memcpy(param->data, val, param_size);
+
+ param_value = tw_dev->alignment_physical_address[request_id];
+ if (param_value == 0) {
+ printk(KERN_WARNING "3w-xxxx: tw_setfeature(): Bad alignment physical address.\n");
+ tw_dev->state[request_id] = TW_S_COMPLETED;
+ tw_state_request_finish(tw_dev, request_id);
+ tw_dev->srb[request_id]->result = (DID_OK << 16);
+ tw_dev->srb[request_id]->scsi_done(tw_dev->srb[request_id]);
+ }
+ command_packet->byte8.param.sgl[0].address = param_value;
+ command_packet->byte8.param.sgl[0].length = sizeof(TW_Sector);
+
+ command_packet->size = 4;
+ command_packet->request_id = request_id;
+ command_packet->byte6.parameter_count = 1;
+
+ command_que_value = tw_dev->command_packet_physical_address[request_id];
+ if (command_que_value == 0) {
+ printk(KERN_WARNING "3w-xxxx: tw_setfeature(): Bad command packet physical address.\n");
+ return 1;
+ }
+
+ /* Send command packet to the board */
+ outl(command_que_value, TW_COMMAND_QUEUE_REG_ADDR(tw_dev));
+
+ /* Poll for completion */
+ if (tw_poll_status_gone(tw_dev, TW_STATUS_RESPONSE_QUEUE_EMPTY, 30) == 0) {
+ response_queue.value = inl(TW_RESPONSE_QUEUE_REG_ADDR(tw_dev));
+ request_id = TW_RESID_OUT(response_queue.response_id);
+
+ if (request_id != 0) {
+ /* unexpected request id */
+ printk(KERN_WARNING "3w-xxxx: tw_setfeature(): Unexpected request id.\n");
+ return 1;
+ }
+ if (command_packet->status != 0) {
+ /* bad response */
+ tw_decode_sense(tw_dev, request_id, 0);
+ return 1;
+ }
+ }
+
+ return 0;
+} /* End tw_setfeature() */
+
+/* This function will reset a controller */
+static int tw_reset_sequence(TW_Device_Extension *tw_dev)
+{
+ int error = 0;
+ int tries = 0;
+ unsigned char c = 1;
+
+ /* Reset the board */
+ while (tries < TW_MAX_RESET_TRIES) {
+ TW_SOFT_RESET(tw_dev);
+
+ error = tw_aen_drain_queue(tw_dev);
+ if (error) {
+ printk(KERN_WARNING "3w-xxxx: scsi%d: AEN drain failed, retrying.\n", tw_dev->host->host_no);
+ tries++;
+ continue;
+ }
+
+ /* Check for controller errors */
+ if (tw_check_errors(tw_dev)) {
+ printk(KERN_WARNING "3w-xxxx: scsi%d: Controller errors found, retrying.\n", tw_dev->host->host_no);
+ tries++;
+ continue;
+ }
+
+ /* Now the controller is in a good state */
+ break;
+ }
+
+ if (tries >= TW_MAX_RESET_TRIES) {
+ printk(KERN_WARNING "3w-xxxx: scsi%d: Controller errors, card not responding, check all cabling.\n", tw_dev->host->host_no);
+ return 1;
+ }
+
+ error = tw_initconnection(tw_dev, TW_INIT_MESSAGE_CREDITS);
+ if (error) {
+ printk(KERN_WARNING "3w-xxxx: scsi%d: Connection initialization failed.\n", tw_dev->host->host_no);
+ return 1;
+ }
+
+ error = tw_setfeature(tw_dev, 2, 1, &c);
+ if (error) {
+ printk(KERN_WARNING "3w-xxxx: Unable to set features for card, probable old firmware or card.\n");
+ }
+
+ return 0;
+} /* End tw_reset_sequence() */
+
+/* This function will initialize the fields of a device extension */
+static int tw_initialize_device_extension(TW_Device_Extension *tw_dev)
+{
+ int i, error=0;
+
+ dprintk(KERN_NOTICE "3w-xxxx: tw_initialize_device_extension()\n");
+
+ /* Initialize command packet buffers */
+ error = tw_allocate_memory(tw_dev, sizeof(TW_Command), 0);
+ if (error) {
+ printk(KERN_WARNING "3w-xxxx: Command packet memory allocation failed.\n");
+ return 1;
+ }
+
+ /* Initialize generic buffer */
+ error = tw_allocate_memory(tw_dev, sizeof(TW_Sector), 1);
+ if (error) {
+ printk(KERN_WARNING "3w-xxxx: Generic memory allocation failed.\n");
+ return 1;
+ }
+
+ for (i=0;i<TW_Q_LENGTH;i++) {
+ tw_dev->free_queue[i] = i;
+ tw_dev->state[i] = TW_S_INITIAL;
+ }
+
+ tw_dev->pending_head = TW_Q_START;
+ tw_dev->pending_tail = TW_Q_START;
+ tw_dev->chrdev_request_id = TW_IOCTL_CHRDEV_FREE;
+
+ mutex_init(&tw_dev->ioctl_lock);
+ init_waitqueue_head(&tw_dev->ioctl_wqueue);
+
+ return 0;
+} /* End tw_initialize_device_extension() */
+
+/* This function will reset a device extension */
+static int tw_reset_device_extension(TW_Device_Extension *tw_dev)
+{
+ int i = 0;
+ struct scsi_cmnd *srb;
+ unsigned long flags = 0;
+
+ dprintk(KERN_NOTICE "3w-xxxx: tw_reset_device_extension()\n");
+
+ set_bit(TW_IN_RESET, &tw_dev->flags);
+ TW_DISABLE_INTERRUPTS(tw_dev);
+ TW_MASK_COMMAND_INTERRUPT(tw_dev);
+ spin_lock_irqsave(tw_dev->host->host_lock, flags);
+
+ /* Abort all requests that are in progress */
+ for (i=0;i<TW_Q_LENGTH;i++) {
+ if ((tw_dev->state[i] != TW_S_FINISHED) &&
+ (tw_dev->state[i] != TW_S_INITIAL) &&
+ (tw_dev->state[i] != TW_S_COMPLETED)) {
+ srb = tw_dev->srb[i];
+ if (srb != NULL) {
+ srb->result = (DID_RESET << 16);
+ scsi_dma_unmap(srb);
+ srb->scsi_done(srb);
+ }
+ }
+ }
+
+ /* Reset queues and counts */
+ for (i=0;i<TW_Q_LENGTH;i++) {
+ tw_dev->free_queue[i] = i;
+ tw_dev->state[i] = TW_S_INITIAL;
+ }
+ tw_dev->free_head = TW_Q_START;
+ tw_dev->free_tail = TW_Q_START;
+ tw_dev->posted_request_count = 0;
+ tw_dev->pending_request_count = 0;
+ tw_dev->pending_head = TW_Q_START;
+ tw_dev->pending_tail = TW_Q_START;
+ tw_dev->reset_print = 0;
+
+ spin_unlock_irqrestore(tw_dev->host->host_lock, flags);
+
+ if (tw_reset_sequence(tw_dev)) {
+ printk(KERN_WARNING "3w-xxxx: scsi%d: Reset sequence failed.\n", tw_dev->host->host_no);
+ return 1;
+ }
+
+ TW_ENABLE_AND_CLEAR_INTERRUPTS(tw_dev);
+ clear_bit(TW_IN_RESET, &tw_dev->flags);
+ tw_dev->chrdev_request_id = TW_IOCTL_CHRDEV_FREE;
+
+ return 0;
+} /* End tw_reset_device_extension() */
+
+/* This funciton returns unit geometry in cylinders/heads/sectors */
+static int tw_scsi_biosparam(struct scsi_device *sdev, struct block_device *bdev,
+ sector_t capacity, int geom[])
+{
+ int heads, sectors, cylinders;
+ TW_Device_Extension *tw_dev;
+
+ dprintk(KERN_NOTICE "3w-xxxx: tw_scsi_biosparam()\n");
+ tw_dev = (TW_Device_Extension *)sdev->host->hostdata;
+
+ heads = 64;
+ sectors = 32;
+ cylinders = sector_div(capacity, heads * sectors);
+
+ if (capacity >= 0x200000) {
+ heads = 255;
+ sectors = 63;
+ cylinders = sector_div(capacity, heads * sectors);
+ }
+
+ dprintk(KERN_NOTICE "3w-xxxx: tw_scsi_biosparam(): heads = %d, sectors = %d, cylinders = %d\n", heads, sectors, cylinders);
+ geom[0] = heads;
+ geom[1] = sectors;
+ geom[2] = cylinders;
+
+ return 0;
+} /* End tw_scsi_biosparam() */
+
+/* This is the new scsi eh reset function */
+static int tw_scsi_eh_reset(struct scsi_cmnd *SCpnt)
+{
+ TW_Device_Extension *tw_dev=NULL;
+ int retval = FAILED;
+
+ tw_dev = (TW_Device_Extension *)SCpnt->device->host->hostdata;
+
+ tw_dev->num_resets++;
+
+ sdev_printk(KERN_WARNING, SCpnt->device,
+ "WARNING: Command (0x%x) timed out, resetting card.\n",
+ SCpnt->cmnd[0]);
+
+ /* Make sure we are not issuing an ioctl or resetting from ioctl */
+ mutex_lock(&tw_dev->ioctl_lock);
+
+ /* Now reset the card and some of the device extension data */
+ if (tw_reset_device_extension(tw_dev)) {
+ printk(KERN_WARNING "3w-xxxx: scsi%d: Reset failed.\n", tw_dev->host->host_no);
+ goto out;
+ }
+
+ retval = SUCCESS;
+out:
+ mutex_unlock(&tw_dev->ioctl_lock);
+ return retval;
+} /* End tw_scsi_eh_reset() */
+
+/* This function handles scsi inquiry commands */
+static int tw_scsiop_inquiry(TW_Device_Extension *tw_dev, int request_id)
+{
+ TW_Param *param;
+ TW_Command *command_packet;
+ unsigned long command_que_value;
+ unsigned long param_value;
+
+ dprintk(KERN_NOTICE "3w-xxxx: tw_scsiop_inquiry()\n");
+
+ /* Initialize command packet */
+ command_packet = (TW_Command *)tw_dev->command_packet_virtual_address[request_id];
+ if (command_packet == NULL) {
+ printk(KERN_WARNING "3w-xxxx: tw_scsiop_inquiry(): Bad command packet virtual address.\n");
+ return 1;
+ }
+ memset(command_packet, 0, sizeof(TW_Sector));
+ command_packet->opcode__sgloffset = TW_OPSGL_IN(2, TW_OP_GET_PARAM);
+ command_packet->size = 4;
+ command_packet->request_id = request_id;
+ command_packet->status = 0;
+ command_packet->flags = 0;
+ command_packet->byte6.parameter_count = 1;
+
+ /* Now setup the param */
+ if (tw_dev->alignment_virtual_address[request_id] == NULL) {
+ printk(KERN_WARNING "3w-xxxx: tw_scsiop_inquiry(): Bad alignment virtual address.\n");
+ return 1;
+ }
+ param = (TW_Param *)tw_dev->alignment_virtual_address[request_id];
+ memset(param, 0, sizeof(TW_Sector));
+ param->table_id = 3; /* unit summary table */
+ param->parameter_id = 3; /* unitsstatus parameter */
+ param->parameter_size_bytes = TW_MAX_UNITS;
+ param_value = tw_dev->alignment_physical_address[request_id];
+ if (param_value == 0) {
+ printk(KERN_WARNING "3w-xxxx: tw_scsiop_inquiry(): Bad alignment physical address.\n");
+ return 1;
+ }
+
+ command_packet->byte8.param.sgl[0].address = param_value;
+ command_packet->byte8.param.sgl[0].length = sizeof(TW_Sector);
+ command_que_value = tw_dev->command_packet_physical_address[request_id];
+ if (command_que_value == 0) {
+ printk(KERN_WARNING "3w-xxxx: tw_scsiop_inquiry(): Bad command packet physical address.\n");
+ return 1;
+ }
+
+ /* Now try to post the command packet */
+ tw_post_command_packet(tw_dev, request_id);
+
+ return 0;
+} /* End tw_scsiop_inquiry() */
+
+static void tw_transfer_internal(TW_Device_Extension *tw_dev, int request_id,
+ void *data, unsigned int len)
+{
+ scsi_sg_copy_from_buffer(tw_dev->srb[request_id], data, len);
+}
+
+/* This function is called by the isr to complete an inquiry command */
+static int tw_scsiop_inquiry_complete(TW_Device_Extension *tw_dev, int request_id)
+{
+ unsigned char *is_unit_present;
+ unsigned char request_buffer[36];
+ TW_Param *param;
+
+ dprintk(KERN_NOTICE "3w-xxxx: tw_scsiop_inquiry_complete()\n");
+
+ memset(request_buffer, 0, sizeof(request_buffer));
+ request_buffer[0] = TYPE_DISK; /* Peripheral device type */
+ request_buffer[1] = 0; /* Device type modifier */
+ request_buffer[2] = 0; /* No ansi/iso compliance */
+ request_buffer[4] = 31; /* Additional length */
+ memcpy(&request_buffer[8], "3ware ", 8); /* Vendor ID */
+ sprintf(&request_buffer[16], "Logical Disk %-2d ", tw_dev->srb[request_id]->device->id);
+ memcpy(&request_buffer[32], TW_DRIVER_VERSION, 3);
+ tw_transfer_internal(tw_dev, request_id, request_buffer,
+ sizeof(request_buffer));
+
+ param = (TW_Param *)tw_dev->alignment_virtual_address[request_id];
+ if (param == NULL) {
+ printk(KERN_WARNING "3w-xxxx: tw_scsiop_inquiry_complete(): Bad alignment virtual address.\n");
+ return 1;
+ }
+ is_unit_present = &(param->data[0]);
+
+ if (is_unit_present[tw_dev->srb[request_id]->device->id] & TW_UNIT_ONLINE) {
+ tw_dev->is_unit_present[tw_dev->srb[request_id]->device->id] = 1;
+ } else {
+ tw_dev->is_unit_present[tw_dev->srb[request_id]->device->id] = 0;
+ tw_dev->srb[request_id]->result = (DID_BAD_TARGET << 16);
+ return TW_ISR_DONT_RESULT;
+ }
+
+ return 0;
+} /* End tw_scsiop_inquiry_complete() */
+
+/* This function handles scsi mode_sense commands */
+static int tw_scsiop_mode_sense(TW_Device_Extension *tw_dev, int request_id)
+{
+ TW_Param *param;
+ TW_Command *command_packet;
+ unsigned long command_que_value;
+ unsigned long param_value;
+
+ dprintk(KERN_NOTICE "3w-xxxx: tw_scsiop_mode_sense()\n");
+
+ /* Only page control = 0, page code = 0x8 (cache page) supported */
+ if (tw_dev->srb[request_id]->cmnd[2] != 0x8) {
+ tw_dev->state[request_id] = TW_S_COMPLETED;
+ tw_state_request_finish(tw_dev, request_id);
+ tw_dev->srb[request_id]->result = (DID_OK << 16);
+ tw_dev->srb[request_id]->scsi_done(tw_dev->srb[request_id]);
+ return 0;
+ }
+
+ /* Now read firmware cache setting for this unit */
+ command_packet = (TW_Command *)tw_dev->command_packet_virtual_address[request_id];
+ if (command_packet == NULL) {
+ printk(KERN_WARNING "3w-xxxx: tw_scsiop_mode_sense(): Bad command packet virtual address.\n");
+ return 1;
+ }
+
+ /* Setup the command packet */
+ memset(command_packet, 0, sizeof(TW_Sector));
+ command_packet->opcode__sgloffset = TW_OPSGL_IN(2, TW_OP_GET_PARAM);
+ command_packet->size = 4;
+ command_packet->request_id = request_id;
+ command_packet->status = 0;
+ command_packet->flags = 0;
+ command_packet->byte6.parameter_count = 1;
+
+ /* Setup the param */
+ if (tw_dev->alignment_virtual_address[request_id] == NULL) {
+ printk(KERN_WARNING "3w-xxxx: tw_scsiop_mode_sense(): Bad alignment virtual address.\n");
+ return 1;
+ }
+
+ param = (TW_Param *)tw_dev->alignment_virtual_address[request_id];
+ memset(param, 0, sizeof(TW_Sector));
+ param->table_id = TW_UNIT_INFORMATION_TABLE_BASE + tw_dev->srb[request_id]->device->id;
+ param->parameter_id = 7; /* unit flags */
+ param->parameter_size_bytes = 1;
+ param_value = tw_dev->alignment_physical_address[request_id];
+ if (param_value == 0) {
+ printk(KERN_WARNING "3w-xxxx: tw_scsiop_mode_sense(): Bad alignment physical address.\n");
+ return 1;
+ }
+
+ command_packet->byte8.param.sgl[0].address = param_value;
+ command_packet->byte8.param.sgl[0].length = sizeof(TW_Sector);
+ command_que_value = tw_dev->command_packet_physical_address[request_id];
+ if (command_que_value == 0) {
+ printk(KERN_WARNING "3w-xxxx: tw_scsiop_mode_sense(): Bad command packet physical address.\n");
+ return 1;
+ }
+
+ /* Now try to post the command packet */
+ tw_post_command_packet(tw_dev, request_id);
+
+ return 0;
+} /* End tw_scsiop_mode_sense() */
+
+/* This function is called by the isr to complete a mode sense command */
+static int tw_scsiop_mode_sense_complete(TW_Device_Extension *tw_dev, int request_id)
+{
+ TW_Param *param;
+ unsigned char *flags;
+ unsigned char request_buffer[8];
+
+ dprintk(KERN_NOTICE "3w-xxxx: tw_scsiop_mode_sense_complete()\n");
+
+ param = (TW_Param *)tw_dev->alignment_virtual_address[request_id];
+ if (param == NULL) {
+ printk(KERN_WARNING "3w-xxxx: tw_scsiop_mode_sense_complete(): Bad alignment virtual address.\n");
+ return 1;
+ }
+ flags = (char *)&(param->data[0]);
+ memset(request_buffer, 0, sizeof(request_buffer));
+
+ request_buffer[0] = 0xf; /* mode data length */
+ request_buffer[1] = 0; /* default medium type */
+ request_buffer[2] = 0x10; /* dpo/fua support on */
+ request_buffer[3] = 0; /* no block descriptors */
+ request_buffer[4] = 0x8; /* caching page */
+ request_buffer[5] = 0xa; /* page length */
+ if (*flags & 0x1)
+ request_buffer[6] = 0x5; /* WCE on, RCD on */
+ else
+ request_buffer[6] = 0x1; /* WCE off, RCD on */
+ tw_transfer_internal(tw_dev, request_id, request_buffer,
+ sizeof(request_buffer));
+
+ return 0;
+} /* End tw_scsiop_mode_sense_complete() */
+
+/* This function handles scsi read_capacity commands */
+static int tw_scsiop_read_capacity(TW_Device_Extension *tw_dev, int request_id)
+{
+ TW_Param *param;
+ TW_Command *command_packet;
+ unsigned long command_que_value;
+ unsigned long param_value;
+
+ dprintk(KERN_NOTICE "3w-xxxx: tw_scsiop_read_capacity()\n");
+
+ /* Initialize command packet */
+ command_packet = (TW_Command *)tw_dev->command_packet_virtual_address[request_id];
+
+ if (command_packet == NULL) {
+ dprintk(KERN_NOTICE "3w-xxxx: tw_scsiop_read_capacity(): Bad command packet virtual address.\n");
+ return 1;
+ }
+ memset(command_packet, 0, sizeof(TW_Sector));
+ command_packet->opcode__sgloffset = TW_OPSGL_IN(2, TW_OP_GET_PARAM);
+ command_packet->size = 4;
+ command_packet->request_id = request_id;
+ command_packet->unit__hostid = TW_UNITHOST_IN(0, tw_dev->srb[request_id]->device->id);
+ command_packet->status = 0;
+ command_packet->flags = 0;
+ command_packet->byte6.block_count = 1;
+
+ /* Now setup the param */
+ if (tw_dev->alignment_virtual_address[request_id] == NULL) {
+ dprintk(KERN_NOTICE "3w-xxxx: tw_scsiop_read_capacity(): Bad alignment virtual address.\n");
+ return 1;
+ }
+ param = (TW_Param *)tw_dev->alignment_virtual_address[request_id];
+ memset(param, 0, sizeof(TW_Sector));
+ param->table_id = TW_UNIT_INFORMATION_TABLE_BASE +
+ tw_dev->srb[request_id]->device->id;
+ param->parameter_id = 4; /* unitcapacity parameter */
+ param->parameter_size_bytes = 4;
+ param_value = tw_dev->alignment_physical_address[request_id];
+ if (param_value == 0) {
+ dprintk(KERN_NOTICE "3w-xxxx: tw_scsiop_read_capacity(): Bad alignment physical address.\n");
+ return 1;
+ }
+
+ command_packet->byte8.param.sgl[0].address = param_value;
+ command_packet->byte8.param.sgl[0].length = sizeof(TW_Sector);
+ command_que_value = tw_dev->command_packet_physical_address[request_id];
+ if (command_que_value == 0) {
+ dprintk(KERN_NOTICE "3w-xxxx: tw_scsiop_read_capacity(): Bad command packet physical address.\n");
+ return 1;
+ }
+
+ /* Now try to post the command to the board */
+ tw_post_command_packet(tw_dev, request_id);
+
+ return 0;
+} /* End tw_scsiop_read_capacity() */
+
+/* This function is called by the isr to complete a readcapacity command */
+static int tw_scsiop_read_capacity_complete(TW_Device_Extension *tw_dev, int request_id)
+{
+ unsigned char *param_data;
+ u32 capacity;
+ char buff[8];
+ TW_Param *param;
+
+ dprintk(KERN_NOTICE "3w-xxxx: tw_scsiop_read_capacity_complete()\n");
+
+ memset(buff, 0, sizeof(buff));
+ param = (TW_Param *)tw_dev->alignment_virtual_address[request_id];
+ if (param == NULL) {
+ printk(KERN_WARNING "3w-xxxx: tw_scsiop_read_capacity_complete(): Bad alignment virtual address.\n");
+ return 1;
+ }
+ param_data = &(param->data[0]);
+
+ capacity = (param_data[3] << 24) | (param_data[2] << 16) |
+ (param_data[1] << 8) | param_data[0];
+
+ /* Subtract one sector to fix get last sector ioctl */
+ capacity -= 1;
+
+ dprintk(KERN_NOTICE "3w-xxxx: tw_scsiop_read_capacity_complete(): Capacity = 0x%x.\n", capacity);
+
+ /* Number of LBA's */
+ buff[0] = (capacity >> 24);
+ buff[1] = (capacity >> 16) & 0xff;
+ buff[2] = (capacity >> 8) & 0xff;
+ buff[3] = capacity & 0xff;
+
+ /* Block size in bytes (512) */
+ buff[4] = (TW_BLOCK_SIZE >> 24);
+ buff[5] = (TW_BLOCK_SIZE >> 16) & 0xff;
+ buff[6] = (TW_BLOCK_SIZE >> 8) & 0xff;
+ buff[7] = TW_BLOCK_SIZE & 0xff;
+
+ tw_transfer_internal(tw_dev, request_id, buff, sizeof(buff));
+
+ return 0;
+} /* End tw_scsiop_read_capacity_complete() */
+
+/* This function handles scsi read or write commands */
+static int tw_scsiop_read_write(TW_Device_Extension *tw_dev, int request_id)
+{
+ TW_Command *command_packet;
+ unsigned long command_que_value;
+ u32 lba = 0x0, num_sectors = 0x0;
+ int i, use_sg;
+ struct scsi_cmnd *srb;
+ struct scatterlist *sglist, *sg;
+
+ dprintk(KERN_NOTICE "3w-xxxx: tw_scsiop_read_write()\n");
+
+ srb = tw_dev->srb[request_id];
+
+ sglist = scsi_sglist(srb);
+ if (!sglist) {
+ printk(KERN_WARNING "3w-xxxx: tw_scsiop_read_write(): Request buffer NULL.\n");
+ return 1;
+ }
+
+ /* Initialize command packet */
+ command_packet = (TW_Command *)tw_dev->command_packet_virtual_address[request_id];
+ if (command_packet == NULL) {
+ dprintk(KERN_NOTICE "3w-xxxx: tw_scsiop_read_write(): Bad command packet virtual address.\n");
+ return 1;
+ }
+
+ if (srb->cmnd[0] == READ_6 || srb->cmnd[0] == READ_10) {
+ command_packet->opcode__sgloffset = TW_OPSGL_IN(3, TW_OP_READ);
+ } else {
+ command_packet->opcode__sgloffset = TW_OPSGL_IN(3, TW_OP_WRITE);
+ }
+
+ command_packet->size = 3;
+ command_packet->request_id = request_id;
+ command_packet->unit__hostid = TW_UNITHOST_IN(0, srb->device->id);
+ command_packet->status = 0;
+ command_packet->flags = 0;
+
+ if (srb->cmnd[0] == WRITE_10) {
+ if ((srb->cmnd[1] & 0x8) || (srb->cmnd[1] & 0x10))
+ command_packet->flags = 1;
+ }
+
+ if (srb->cmnd[0] == READ_6 || srb->cmnd[0] == WRITE_6) {
+ lba = ((u32)srb->cmnd[1] << 16) | ((u32)srb->cmnd[2] << 8) | (u32)srb->cmnd[3];
+ num_sectors = (u32)srb->cmnd[4];
+ } else {
+ lba = ((u32)srb->cmnd[2] << 24) | ((u32)srb->cmnd[3] << 16) | ((u32)srb->cmnd[4] << 8) | (u32)srb->cmnd[5];
+ num_sectors = (u32)srb->cmnd[8] | ((u32)srb->cmnd[7] << 8);
+ }
+
+ /* Update sector statistic */
+ tw_dev->sector_count = num_sectors;
+ if (tw_dev->sector_count > tw_dev->max_sector_count)
+ tw_dev->max_sector_count = tw_dev->sector_count;
+
+ dprintk(KERN_NOTICE "3w-xxxx: tw_scsiop_read_write(): lba = 0x%x num_sectors = 0x%x\n", lba, num_sectors);
+ command_packet->byte8.io.lba = lba;
+ command_packet->byte6.block_count = num_sectors;
+
+ use_sg = scsi_dma_map(srb);
+ if (use_sg <= 0)
+ return 1;
+
+ scsi_for_each_sg(tw_dev->srb[request_id], sg, use_sg, i) {
+ command_packet->byte8.io.sgl[i].address = sg_dma_address(sg);
+ command_packet->byte8.io.sgl[i].length = sg_dma_len(sg);
+ command_packet->size+=2;
+ }
+
+ /* Update SG statistics */
+ tw_dev->sgl_entries = scsi_sg_count(tw_dev->srb[request_id]);
+ if (tw_dev->sgl_entries > tw_dev->max_sgl_entries)
+ tw_dev->max_sgl_entries = tw_dev->sgl_entries;
+
+ command_que_value = tw_dev->command_packet_physical_address[request_id];
+ if (command_que_value == 0) {
+ dprintk(KERN_WARNING "3w-xxxx: tw_scsiop_read_write(): Bad command packet physical address.\n");
+ return 1;
+ }
+
+ /* Now try to post the command to the board */
+ tw_post_command_packet(tw_dev, request_id);
+
+ return 0;
+} /* End tw_scsiop_read_write() */
+
+/* This function will handle the request sense scsi command */
+static int tw_scsiop_request_sense(TW_Device_Extension *tw_dev, int request_id)
+{
+ char request_buffer[18];
+
+ dprintk(KERN_NOTICE "3w-xxxx: tw_scsiop_request_sense()\n");
+
+ memset(request_buffer, 0, sizeof(request_buffer));
+ request_buffer[0] = 0x70; /* Immediate fixed format */
+ request_buffer[7] = 10; /* minimum size per SPC: 18 bytes */
+ /* leave all other fields zero, giving effectively NO_SENSE return */
+ tw_transfer_internal(tw_dev, request_id, request_buffer,
+ sizeof(request_buffer));
+
+ tw_dev->state[request_id] = TW_S_COMPLETED;
+ tw_state_request_finish(tw_dev, request_id);
+
+ /* If we got a request_sense, we probably want a reset, return error */
+ tw_dev->srb[request_id]->result = (DID_ERROR << 16);
+ tw_dev->srb[request_id]->scsi_done(tw_dev->srb[request_id]);
+
+ return 0;
+} /* End tw_scsiop_request_sense() */
+
+/* This function will handle synchronize cache scsi command */
+static int tw_scsiop_synchronize_cache(TW_Device_Extension *tw_dev, int request_id)
+{
+ TW_Command *command_packet;
+ unsigned long command_que_value;
+
+ dprintk(KERN_NOTICE "3w-xxxx: tw_scsiop_synchronize_cache()\n");
+
+ /* Send firmware flush command for this unit */
+ command_packet = (TW_Command *)tw_dev->command_packet_virtual_address[request_id];
+ if (command_packet == NULL) {
+ printk(KERN_WARNING "3w-xxxx: tw_scsiop_synchronize_cache(): Bad command packet virtual address.\n");
+ return 1;
+ }
+
+ /* Setup the command packet */
+ memset(command_packet, 0, sizeof(TW_Sector));
+ command_packet->opcode__sgloffset = TW_OPSGL_IN(0, TW_OP_FLUSH_CACHE);
+ command_packet->size = 2;
+ command_packet->request_id = request_id;
+ command_packet->unit__hostid = TW_UNITHOST_IN(0, tw_dev->srb[request_id]->device->id);
+ command_packet->status = 0;
+ command_packet->flags = 0;
+ command_packet->byte6.parameter_count = 1;
+ command_que_value = tw_dev->command_packet_physical_address[request_id];
+ if (command_que_value == 0) {
+ printk(KERN_WARNING "3w-xxxx: tw_scsiop_synchronize_cache(): Bad command packet physical address.\n");
+ return 1;
+ }
+
+ /* Now try to post the command packet */
+ tw_post_command_packet(tw_dev, request_id);
+
+ return 0;
+} /* End tw_scsiop_synchronize_cache() */
+
+/* This function will handle test unit ready scsi command */
+static int tw_scsiop_test_unit_ready(TW_Device_Extension *tw_dev, int request_id)
+{
+ TW_Param *param;
+ TW_Command *command_packet;
+ unsigned long command_que_value;
+ unsigned long param_value;
+
+ dprintk(KERN_NOTICE "3w-xxxx: tw_scsiop_test_unit_ready()\n");
+
+ /* Initialize command packet */
+ command_packet = (TW_Command *)tw_dev->command_packet_virtual_address[request_id];
+ if (command_packet == NULL) {
+ printk(KERN_WARNING "3w-xxxx: tw_scsiop_test_unit_ready(): Bad command packet virtual address.\n");
+ return 1;
+ }
+ memset(command_packet, 0, sizeof(TW_Sector));
+ command_packet->opcode__sgloffset = TW_OPSGL_IN(2, TW_OP_GET_PARAM);
+ command_packet->size = 4;
+ command_packet->request_id = request_id;
+ command_packet->status = 0;
+ command_packet->flags = 0;
+ command_packet->byte6.parameter_count = 1;
+
+ /* Now setup the param */
+ if (tw_dev->alignment_virtual_address[request_id] == NULL) {
+ printk(KERN_WARNING "3w-xxxx: tw_scsiop_test_unit_ready(): Bad alignment virtual address.\n");
+ return 1;
+ }
+ param = (TW_Param *)tw_dev->alignment_virtual_address[request_id];
+ memset(param, 0, sizeof(TW_Sector));
+ param->table_id = 3; /* unit summary table */
+ param->parameter_id = 3; /* unitsstatus parameter */
+ param->parameter_size_bytes = TW_MAX_UNITS;
+ param_value = tw_dev->alignment_physical_address[request_id];
+ if (param_value == 0) {
+ printk(KERN_WARNING "3w-xxxx: tw_scsiop_test_unit_ready(): Bad alignment physical address.\n");
+ return 1;
+ }
+
+ command_packet->byte8.param.sgl[0].address = param_value;
+ command_packet->byte8.param.sgl[0].length = sizeof(TW_Sector);
+ command_que_value = tw_dev->command_packet_physical_address[request_id];
+ if (command_que_value == 0) {
+ printk(KERN_WARNING "3w-xxxx: tw_scsiop_test_unit_ready(): Bad command packet physical address.\n");
+ return 1;
+ }
+
+ /* Now try to post the command packet */
+ tw_post_command_packet(tw_dev, request_id);
+
+ return 0;
+} /* End tw_scsiop_test_unit_ready() */
+
+/* This function is called by the isr to complete a testunitready command */
+static int tw_scsiop_test_unit_ready_complete(TW_Device_Extension *tw_dev, int request_id)
+{
+ unsigned char *is_unit_present;
+ TW_Param *param;
+
+ dprintk(KERN_WARNING "3w-xxxx: tw_scsiop_test_unit_ready_complete()\n");
+
+ param = (TW_Param *)tw_dev->alignment_virtual_address[request_id];
+ if (param == NULL) {
+ printk(KERN_WARNING "3w-xxxx: tw_scsiop_test_unit_ready_complete(): Bad alignment virtual address.\n");
+ return 1;
+ }
+ is_unit_present = &(param->data[0]);
+
+ if (is_unit_present[tw_dev->srb[request_id]->device->id] & TW_UNIT_ONLINE) {
+ tw_dev->is_unit_present[tw_dev->srb[request_id]->device->id] = 1;
+ } else {
+ tw_dev->is_unit_present[tw_dev->srb[request_id]->device->id] = 0;
+ tw_dev->srb[request_id]->result = (DID_BAD_TARGET << 16);
+ return TW_ISR_DONT_RESULT;
+ }
+
+ return 0;
+} /* End tw_scsiop_test_unit_ready_complete() */
+
+/* This is the main scsi queue function to handle scsi opcodes */
+static int tw_scsi_queue_lck(struct scsi_cmnd *SCpnt, void (*done)(struct scsi_cmnd *))
+{
+ unsigned char *command = SCpnt->cmnd;
+ int request_id = 0;
+ int retval = 1;
+ TW_Device_Extension *tw_dev = (TW_Device_Extension *)SCpnt->device->host->hostdata;
+
+ /* If we are resetting due to timed out ioctl, report as busy */
+ if (test_bit(TW_IN_RESET, &tw_dev->flags))
+ return SCSI_MLQUEUE_HOST_BUSY;
+
+ /* Save done function into Scsi_Cmnd struct */
+ SCpnt->scsi_done = done;
+
+ /* Queue the command and get a request id */
+ tw_state_request_start(tw_dev, &request_id);
+
+ /* Save the scsi command for use by the ISR */
+ tw_dev->srb[request_id] = SCpnt;
+
+ switch (*command) {
+ case READ_10:
+ case READ_6:
+ case WRITE_10:
+ case WRITE_6:
+ dprintk(KERN_NOTICE "3w-xxxx: tw_scsi_queue(): caught READ/WRITE.\n");
+ retval = tw_scsiop_read_write(tw_dev, request_id);
+ break;
+ case TEST_UNIT_READY:
+ dprintk(KERN_NOTICE "3w-xxxx: tw_scsi_queue(): caught TEST_UNIT_READY.\n");
+ retval = tw_scsiop_test_unit_ready(tw_dev, request_id);
+ break;
+ case INQUIRY:
+ dprintk(KERN_NOTICE "3w-xxxx: tw_scsi_queue(): caught INQUIRY.\n");
+ retval = tw_scsiop_inquiry(tw_dev, request_id);
+ break;
+ case READ_CAPACITY:
+ dprintk(KERN_NOTICE "3w-xxxx: tw_scsi_queue(): caught READ_CAPACITY.\n");
+ retval = tw_scsiop_read_capacity(tw_dev, request_id);
+ break;
+ case REQUEST_SENSE:
+ dprintk(KERN_NOTICE "3w-xxxx: tw_scsi_queue(): caught REQUEST_SENSE.\n");
+ retval = tw_scsiop_request_sense(tw_dev, request_id);
+ break;
+ case MODE_SENSE:
+ dprintk(KERN_NOTICE "3w-xxxx: tw_scsi_queue(): caught MODE_SENSE.\n");
+ retval = tw_scsiop_mode_sense(tw_dev, request_id);
+ break;
+ case SYNCHRONIZE_CACHE:
+ dprintk(KERN_NOTICE "3w-xxxx: tw_scsi_queue(): caught SYNCHRONIZE_CACHE.\n");
+ retval = tw_scsiop_synchronize_cache(tw_dev, request_id);
+ break;
+ case TW_IOCTL:
+ printk(KERN_WARNING "3w-xxxx: SCSI_IOCTL_SEND_COMMAND deprecated, please update your 3ware tools.\n");
+ break;
+ default:
+ printk(KERN_NOTICE "3w-xxxx: scsi%d: Unknown scsi opcode: 0x%x\n", tw_dev->host->host_no, *command);
+ tw_dev->state[request_id] = TW_S_COMPLETED;
+ tw_state_request_finish(tw_dev, request_id);
+ SCpnt->result = (DRIVER_SENSE << 24) | SAM_STAT_CHECK_CONDITION;
+ scsi_build_sense_buffer(1, SCpnt->sense_buffer, ILLEGAL_REQUEST, 0x20, 0);
+ done(SCpnt);
+ retval = 0;
+ }
+ if (retval) {
+ tw_dev->state[request_id] = TW_S_COMPLETED;
+ tw_state_request_finish(tw_dev, request_id);
+ SCpnt->result = (DID_ERROR << 16);
+ done(SCpnt);
+ retval = 0;
+ }
+ return retval;
+} /* End tw_scsi_queue() */
+
+static DEF_SCSI_QCMD(tw_scsi_queue)
+
+/* This function is the interrupt service routine */
+static irqreturn_t tw_interrupt(int irq, void *dev_instance)
+{
+ int request_id;
+ u32 status_reg_value;
+ TW_Device_Extension *tw_dev = (TW_Device_Extension *)dev_instance;
+ TW_Response_Queue response_que;
+ int error = 0, retval = 0;
+ TW_Command *command_packet;
+ int handled = 0;
+
+ /* Get the host lock for io completions */
+ spin_lock(tw_dev->host->host_lock);
+
+ /* Read the registers */
+ status_reg_value = inl(TW_STATUS_REG_ADDR(tw_dev));
+
+ /* Check if this is our interrupt, otherwise bail */
+ if (!(status_reg_value & TW_STATUS_VALID_INTERRUPT))
+ goto tw_interrupt_bail;
+
+ handled = 1;
+
+ /* If we are resetting, bail */
+ if (test_bit(TW_IN_RESET, &tw_dev->flags))
+ goto tw_interrupt_bail;
+
+ /* Check controller for errors */
+ if (tw_check_bits(status_reg_value)) {
+ dprintk(KERN_WARNING "3w-xxxx: tw_interrupt(): Unexpected bits.\n");
+ if (tw_decode_bits(tw_dev, status_reg_value, 1)) {
+ TW_CLEAR_ALL_INTERRUPTS(tw_dev);
+ goto tw_interrupt_bail;
+ }
+ }
+
+ /* Handle host interrupt */
+ if (status_reg_value & TW_STATUS_HOST_INTERRUPT) {
+ dprintk(KERN_NOTICE "3w-xxxx: tw_interrupt(): Received host interrupt.\n");
+ TW_CLEAR_HOST_INTERRUPT(tw_dev);
+ }
+
+ /* Handle attention interrupt */
+ if (status_reg_value & TW_STATUS_ATTENTION_INTERRUPT) {
+ dprintk(KERN_NOTICE "3w-xxxx: tw_interrupt(): Received attention interrupt.\n");
+ TW_CLEAR_ATTENTION_INTERRUPT(tw_dev);
+ tw_state_request_start(tw_dev, &request_id);
+ error = tw_aen_read_queue(tw_dev, request_id);
+ if (error) {
+ printk(KERN_WARNING "3w-xxxx: scsi%d: Error reading aen queue.\n", tw_dev->host->host_no);
+ tw_dev->state[request_id] = TW_S_COMPLETED;
+ tw_state_request_finish(tw_dev, request_id);
+ }
+ }
+
+ /* Handle command interrupt */
+ if (status_reg_value & TW_STATUS_COMMAND_INTERRUPT) {
+ /* Drain as many pending commands as we can */
+ while (tw_dev->pending_request_count > 0) {
+ request_id = tw_dev->pending_queue[tw_dev->pending_head];
+ if (tw_dev->state[request_id] != TW_S_PENDING) {
+ printk(KERN_WARNING "3w-xxxx: scsi%d: Found request id that wasn't pending.\n", tw_dev->host->host_no);
+ break;
+ }
+ if (tw_post_command_packet(tw_dev, request_id)==0) {
+ if (tw_dev->pending_head == TW_Q_LENGTH-1) {
+ tw_dev->pending_head = TW_Q_START;
+ } else {
+ tw_dev->pending_head = tw_dev->pending_head + 1;
+ }
+ tw_dev->pending_request_count--;
+ } else {
+ /* If we get here, we will continue re-posting on the next command interrupt */
+ break;
+ }
+ }
+ /* If there are no more pending requests, we mask command interrupt */
+ if (tw_dev->pending_request_count == 0)
+ TW_MASK_COMMAND_INTERRUPT(tw_dev);
+ }
+
+ /* Handle response interrupt */
+ if (status_reg_value & TW_STATUS_RESPONSE_INTERRUPT) {
+ /* Drain the response queue from the board */
+ while ((status_reg_value & TW_STATUS_RESPONSE_QUEUE_EMPTY) == 0) {
+ /* Read response queue register */
+ response_que.value = inl(TW_RESPONSE_QUEUE_REG_ADDR(tw_dev));
+ request_id = TW_RESID_OUT(response_que.response_id);
+ command_packet = (TW_Command *)tw_dev->command_packet_virtual_address[request_id];
+ error = 0;
+
+ /* Check for bad response */
+ if (command_packet->status != 0) {
+ /* If internal command, don't error, don't fill sense */
+ if (tw_dev->srb[request_id] == NULL) {
+ tw_decode_sense(tw_dev, request_id, 0);
+ } else {
+ error = tw_decode_sense(tw_dev, request_id, 1);
+ }
+ }
+
+ /* Check for correct state */
+ if (tw_dev->state[request_id] != TW_S_POSTED) {
+ if (tw_dev->srb[request_id] != NULL) {
+ printk(KERN_WARNING "3w-xxxx: scsi%d: Received a request id that wasn't posted.\n", tw_dev->host->host_no);
+ error = 1;
+ }
+ }
+
+ dprintk(KERN_NOTICE "3w-xxxx: tw_interrupt(): Response queue request id: %d.\n", request_id);
+
+ /* Check for internal command completion */
+ if (tw_dev->srb[request_id] == NULL) {
+ dprintk(KERN_WARNING "3w-xxxx: tw_interrupt(): Found internally posted command.\n");
+ /* Check for chrdev ioctl completion */
+ if (request_id != tw_dev->chrdev_request_id) {
+ retval = tw_aen_complete(tw_dev, request_id);
+ if (retval) {
+ printk(KERN_WARNING "3w-xxxx: scsi%d: Error completing aen.\n", tw_dev->host->host_no);
+ }
+ } else {
+ tw_dev->chrdev_request_id = TW_IOCTL_CHRDEV_FREE;
+ wake_up(&tw_dev->ioctl_wqueue);
+ }
+ } else {
+ switch (tw_dev->srb[request_id]->cmnd[0]) {
+ case READ_10:
+ case READ_6:
+ dprintk(KERN_NOTICE "3w-xxxx: tw_interrupt(): caught READ_10/READ_6\n");
+ break;
+ case WRITE_10:
+ case WRITE_6:
+ dprintk(KERN_NOTICE "3w-xxxx: tw_interrupt(): caught WRITE_10/WRITE_6\n");
+ break;
+ case TEST_UNIT_READY:
+ dprintk(KERN_NOTICE "3w-xxxx: tw_interrupt(): caught TEST_UNIT_READY\n");
+ error = tw_scsiop_test_unit_ready_complete(tw_dev, request_id);
+ break;
+ case INQUIRY:
+ dprintk(KERN_NOTICE "3w-xxxx: tw_interrupt(): caught INQUIRY\n");
+ error = tw_scsiop_inquiry_complete(tw_dev, request_id);
+ break;
+ case READ_CAPACITY:
+ dprintk(KERN_NOTICE "3w-xxxx: tw_interrupt(): caught READ_CAPACITY\n");
+ error = tw_scsiop_read_capacity_complete(tw_dev, request_id);
+ break;
+ case MODE_SENSE:
+ dprintk(KERN_NOTICE "3w-xxxx: tw_interrupt(): caught MODE_SENSE\n");
+ error = tw_scsiop_mode_sense_complete(tw_dev, request_id);
+ break;
+ case SYNCHRONIZE_CACHE:
+ dprintk(KERN_NOTICE "3w-xxxx: tw_interrupt(): caught SYNCHRONIZE_CACHE\n");
+ break;
+ default:
+ printk(KERN_WARNING "3w-xxxx: case slip in tw_interrupt()\n");
+ error = 1;
+ }
+
+ /* If no error command was a success */
+ if (error == 0) {
+ tw_dev->srb[request_id]->result = (DID_OK << 16);
+ }
+
+ /* If error, command failed */
+ if (error == 1) {
+ /* Ask for a host reset */
+ tw_dev->srb[request_id]->result = (DID_OK << 16) | (CHECK_CONDITION << 1);
+ }
+
+ /* Now complete the io */
+ if ((error != TW_ISR_DONT_COMPLETE)) {
+ scsi_dma_unmap(tw_dev->srb[request_id]);
+ tw_dev->srb[request_id]->scsi_done(tw_dev->srb[request_id]);
+ tw_dev->state[request_id] = TW_S_COMPLETED;
+ tw_state_request_finish(tw_dev, request_id);
+ tw_dev->posted_request_count--;
+ }
+ }
+
+ /* Check for valid status after each drain */
+ status_reg_value = inl(TW_STATUS_REG_ADDR(tw_dev));
+ if (tw_check_bits(status_reg_value)) {
+ dprintk(KERN_WARNING "3w-xxxx: tw_interrupt(): Unexpected bits.\n");
+ if (tw_decode_bits(tw_dev, status_reg_value, 1)) {
+ TW_CLEAR_ALL_INTERRUPTS(tw_dev);
+ goto tw_interrupt_bail;
+ }
+ }
+ }
+ }
+
+tw_interrupt_bail:
+ spin_unlock(tw_dev->host->host_lock);
+ return IRQ_RETVAL(handled);
+} /* End tw_interrupt() */
+
+/* This function tells the controller to shut down */
+static void __tw_shutdown(TW_Device_Extension *tw_dev)
+{
+ /* Disable interrupts */
+ TW_DISABLE_INTERRUPTS(tw_dev);
+
+ /* Free up the IRQ */
+ free_irq(tw_dev->tw_pci_dev->irq, tw_dev);
+
+ printk(KERN_WARNING "3w-xxxx: Shutting down host %d.\n", tw_dev->host->host_no);
+
+ /* Tell the card we are shutting down */
+ if (tw_initconnection(tw_dev, 1)) {
+ printk(KERN_WARNING "3w-xxxx: Connection shutdown failed.\n");
+ } else {
+ printk(KERN_WARNING "3w-xxxx: Shutdown complete.\n");
+ }
+
+ /* Clear all interrupts just before exit */
+ TW_ENABLE_AND_CLEAR_INTERRUPTS(tw_dev);
+} /* End __tw_shutdown() */
+
+/* Wrapper for __tw_shutdown */
+static void tw_shutdown(struct pci_dev *pdev)
+{
+ struct Scsi_Host *host = pci_get_drvdata(pdev);
+ TW_Device_Extension *tw_dev = (TW_Device_Extension *)host->hostdata;
+
+ __tw_shutdown(tw_dev);
+} /* End tw_shutdown() */
+
+/* This function gets called when a disk is coming online */
+static int tw_slave_configure(struct scsi_device *sdev)
+{
+ /* Force 60 second timeout */
+ blk_queue_rq_timeout(sdev->request_queue, 60 * HZ);
+
+ return 0;
+} /* End tw_slave_configure() */
+
+static struct scsi_host_template driver_template = {
+ .module = THIS_MODULE,
+ .name = "3ware Storage Controller",
+ .queuecommand = tw_scsi_queue,
+ .eh_host_reset_handler = tw_scsi_eh_reset,
+ .bios_param = tw_scsi_biosparam,
+ .change_queue_depth = scsi_change_queue_depth,
+ .can_queue = TW_Q_LENGTH-2,
+ .slave_configure = tw_slave_configure,
+ .this_id = -1,
+ .sg_tablesize = TW_MAX_SGL_LENGTH,
+ .max_sectors = TW_MAX_SECTORS,
+ .cmd_per_lun = TW_MAX_CMDS_PER_LUN,
+ .use_clustering = ENABLE_CLUSTERING,
+ .shost_attrs = tw_host_attrs,
+ .emulated = 1,
+ .no_write_same = 1,
+};
+
+/* This function will probe and initialize a card */
+static int tw_probe(struct pci_dev *pdev, const struct pci_device_id *dev_id)
+{
+ struct Scsi_Host *host = NULL;
+ TW_Device_Extension *tw_dev;
+ int retval = -ENODEV;
+
+ retval = pci_enable_device(pdev);
+ if (retval) {
+ printk(KERN_WARNING "3w-xxxx: Failed to enable pci device.");
+ goto out_disable_device;
+ }
+
+ pci_set_master(pdev);
+
+ retval = pci_set_dma_mask(pdev, TW_DMA_MASK);
+ if (retval) {
+ printk(KERN_WARNING "3w-xxxx: Failed to set dma mask.");
+ goto out_disable_device;
+ }
+
+ host = scsi_host_alloc(&driver_template, sizeof(TW_Device_Extension));
+ if (!host) {
+ printk(KERN_WARNING "3w-xxxx: Failed to allocate memory for device extension.");
+ retval = -ENOMEM;
+ goto out_disable_device;
+ }
+ tw_dev = (TW_Device_Extension *)host->hostdata;
+
+ /* Save values to device extension */
+ tw_dev->host = host;
+ tw_dev->tw_pci_dev = pdev;
+
+ if (tw_initialize_device_extension(tw_dev)) {
+ printk(KERN_WARNING "3w-xxxx: Failed to initialize device extension.");
+ goto out_free_device_extension;
+ }
+
+ /* Request IO regions */
+ retval = pci_request_regions(pdev, "3w-xxxx");
+ if (retval) {
+ printk(KERN_WARNING "3w-xxxx: Failed to get mem region.");
+ goto out_free_device_extension;
+ }
+
+ /* Save base address */
+ tw_dev->base_addr = pci_resource_start(pdev, 0);
+ if (!tw_dev->base_addr) {
+ printk(KERN_WARNING "3w-xxxx: Failed to get io address.");
+ goto out_release_mem_region;
+ }
+
+ /* Disable interrupts on the card */
+ TW_DISABLE_INTERRUPTS(tw_dev);
+
+ /* Initialize the card */
+ if (tw_reset_sequence(tw_dev))
+ goto out_release_mem_region;
+
+ /* Set host specific parameters */
+ host->max_id = TW_MAX_UNITS;
+ host->max_cmd_len = TW_MAX_CDB_LEN;
+
+ /* Luns and channels aren't supported by adapter */
+ host->max_lun = 0;
+ host->max_channel = 0;
+
+ /* Register the card with the kernel SCSI layer */
+ retval = scsi_add_host(host, &pdev->dev);
+ if (retval) {
+ printk(KERN_WARNING "3w-xxxx: scsi add host failed");
+ goto out_release_mem_region;
+ }
+
+ pci_set_drvdata(pdev, host);
+
+ printk(KERN_WARNING "3w-xxxx: scsi%d: Found a 3ware Storage Controller at 0x%x, IRQ: %d.\n", host->host_no, tw_dev->base_addr, pdev->irq);
+
+ /* Now setup the interrupt handler */
+ retval = request_irq(pdev->irq, tw_interrupt, IRQF_SHARED, "3w-xxxx", tw_dev);
+ if (retval) {
+ printk(KERN_WARNING "3w-xxxx: Error requesting IRQ.");
+ goto out_remove_host;
+ }
+
+ tw_device_extension_list[tw_device_extension_count] = tw_dev;
+ tw_device_extension_count++;
+
+ /* Re-enable interrupts on the card */
+ TW_ENABLE_AND_CLEAR_INTERRUPTS(tw_dev);
+
+ /* Finally, scan the host */
+ scsi_scan_host(host);
+
+ if (twe_major == -1) {
+ if ((twe_major = register_chrdev (0, "twe", &tw_fops)) < 0)
+ printk(KERN_WARNING "3w-xxxx: Failed to register character device.");
+ }
+ return 0;
+
+out_remove_host:
+ scsi_remove_host(host);
+out_release_mem_region:
+ pci_release_regions(pdev);
+out_free_device_extension:
+ tw_free_device_extension(tw_dev);
+ scsi_host_put(host);
+out_disable_device:
+ pci_disable_device(pdev);
+
+ return retval;
+} /* End tw_probe() */
+
+/* This function is called to remove a device */
+static void tw_remove(struct pci_dev *pdev)
+{
+ struct Scsi_Host *host = pci_get_drvdata(pdev);
+ TW_Device_Extension *tw_dev = (TW_Device_Extension *)host->hostdata;
+
+ scsi_remove_host(tw_dev->host);
+
+ /* Unregister character device */
+ if (twe_major >= 0) {
+ unregister_chrdev(twe_major, "twe");
+ twe_major = -1;
+ }
+
+ /* Shutdown the card */
+ __tw_shutdown(tw_dev);
+
+ /* Free up the mem region */
+ pci_release_regions(pdev);
+
+ /* Free up device extension resources */
+ tw_free_device_extension(tw_dev);
+
+ scsi_host_put(tw_dev->host);
+ pci_disable_device(pdev);
+ tw_device_extension_count--;
+} /* End tw_remove() */
+
+/* PCI Devices supported by this driver */
+static struct pci_device_id tw_pci_tbl[] = {
+ { PCI_VENDOR_ID_3WARE, PCI_DEVICE_ID_3WARE_1000,
+ PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
+ { PCI_VENDOR_ID_3WARE, PCI_DEVICE_ID_3WARE_7000,
+ PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
+ { }
+};
+MODULE_DEVICE_TABLE(pci, tw_pci_tbl);
+
+/* pci_driver initializer */
+static struct pci_driver tw_driver = {
+ .name = "3w-xxxx",
+ .id_table = tw_pci_tbl,
+ .probe = tw_probe,
+ .remove = tw_remove,
+ .shutdown = tw_shutdown,
+};
+
+/* This function is called on driver initialization */
+static int __init tw_init(void)
+{
+ printk(KERN_WARNING "3ware Storage Controller device driver for Linux v%s.\n", TW_DRIVER_VERSION);
+
+ return pci_register_driver(&tw_driver);
+} /* End tw_init() */
+
+/* This function is called on driver exit */
+static void __exit tw_exit(void)
+{
+ pci_unregister_driver(&tw_driver);
+} /* End tw_exit() */
+
+module_init(tw_init);
+module_exit(tw_exit);
+
diff --git a/drivers/scsi/3w-xxxx.h b/drivers/scsi/3w-xxxx.h
new file mode 100644
index 000000000..6f65e663d
--- /dev/null
+++ b/drivers/scsi/3w-xxxx.h
@@ -0,0 +1,430 @@
+/*
+ 3w-xxxx.h -- 3ware Storage Controller device driver for Linux.
+
+ Written By: Adam Radford <linuxraid@lsi.com>
+ Modifications By: Joel Jacobson <linux@3ware.com>
+ Arnaldo Carvalho de Melo <acme@conectiva.com.br>
+ Brad Strand <linux@3ware.com>
+
+ Copyright (C) 1999-2010 3ware Inc.
+
+ Kernel compatibility By: Andre Hedrick <andre@suse.com>
+ Non-Copyright (C) 2000 Andre Hedrick <andre@suse.com>
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; version 2 of the License.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ NO WARRANTY
+ THE PROGRAM IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OR
+ CONDITIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED INCLUDING, WITHOUT
+ LIMITATION, ANY WARRANTIES OR CONDITIONS OF TITLE, NON-INFRINGEMENT,
+ MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE. Each Recipient is
+ solely responsible for determining the appropriateness of using and
+ distributing the Program and assumes all risks associated with its
+ exercise of rights under this Agreement, including but not limited to
+ the risks and costs of program errors, damage to or loss of data,
+ programs or equipment, and unavailability or interruption of operations.
+
+ DISCLAIMER OF LIABILITY
+ NEITHER RECIPIENT NOR ANY CONTRIBUTORS SHALL HAVE ANY LIABILITY FOR ANY
+ DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ DAMAGES (INCLUDING WITHOUT LIMITATION LOST PROFITS), HOWEVER CAUSED AND
+ ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR
+ TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
+ USE OR DISTRIBUTION OF THE PROGRAM OR THE EXERCISE OF ANY RIGHTS GRANTED
+ HEREUNDER, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGES
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+
+ Bugs/Comments/Suggestions should be mailed to:
+ linuxraid@lsi.com
+
+ For more information, goto:
+ http://www.lsi.com
+*/
+
+#ifndef _3W_XXXX_H
+#define _3W_XXXX_H
+
+#include <linux/types.h>
+
+/* AEN strings */
+static char *tw_aen_string[] = {
+ [0x000] = "INFO: AEN queue empty",
+ [0x001] = "INFO: Soft reset occurred",
+ [0x002] = "ERROR: Unit degraded: Unit #",
+ [0x003] = "ERROR: Controller error",
+ [0x004] = "ERROR: Rebuild failed: Unit #",
+ [0x005] = "INFO: Rebuild complete: Unit #",
+ [0x006] = "ERROR: Incomplete unit detected: Unit #",
+ [0x007] = "INFO: Initialization complete: Unit #",
+ [0x008] = "WARNING: Unclean shutdown detected: Unit #",
+ [0x009] = "WARNING: ATA port timeout: Port #",
+ [0x00A] = "ERROR: Drive error: Port #",
+ [0x00B] = "INFO: Rebuild started: Unit #",
+ [0x00C] = "INFO: Initialization started: Unit #",
+ [0x00D] = "ERROR: Logical unit deleted: Unit #",
+ [0x00F] = "WARNING: SMART threshold exceeded: Port #",
+ [0x021] = "WARNING: ATA UDMA downgrade: Port #",
+ [0x022] = "WARNING: ATA UDMA upgrade: Port #",
+ [0x023] = "WARNING: Sector repair occurred: Port #",
+ [0x024] = "ERROR: SBUF integrity check failure",
+ [0x025] = "ERROR: Lost cached write: Port #",
+ [0x026] = "ERROR: Drive ECC error detected: Port #",
+ [0x027] = "ERROR: DCB checksum error: Port #",
+ [0x028] = "ERROR: DCB unsupported version: Port #",
+ [0x029] = "INFO: Verify started: Unit #",
+ [0x02A] = "ERROR: Verify failed: Port #",
+ [0x02B] = "INFO: Verify complete: Unit #",
+ [0x02C] = "WARNING: Overwrote bad sector during rebuild: Port #",
+ [0x02D] = "ERROR: Encountered bad sector during rebuild: Port #",
+ [0x02E] = "ERROR: Replacement drive is too small: Port #",
+ [0x02F] = "WARNING: Verify error: Unit not previously initialized: Unit #",
+ [0x030] = "ERROR: Drive not supported: Port #"
+};
+
+/*
+ Sense key lookup table
+ Format: ESDC/flags,SenseKey,AdditionalSenseCode,AdditionalSenseCodeQualifier
+*/
+static unsigned char tw_sense_table[][4] =
+{
+ /* Codes for newer firmware */
+ // ATA Error SCSI Error
+ {0x01, 0x03, 0x13, 0x00}, // Address mark not found Address mark not found for data field
+ {0x04, 0x0b, 0x00, 0x00}, // Aborted command Aborted command
+ {0x10, 0x0b, 0x14, 0x00}, // ID not found Recorded entity not found
+ {0x40, 0x03, 0x11, 0x00}, // Uncorrectable ECC error Unrecovered read error
+ {0x61, 0x04, 0x00, 0x00}, // Device fault Hardware error
+ {0x84, 0x0b, 0x47, 0x00}, // Data CRC error SCSI parity error
+ {0xd0, 0x0b, 0x00, 0x00}, // Device busy Aborted command
+ {0xd1, 0x0b, 0x00, 0x00}, // Device busy Aborted command
+ {0x37, 0x02, 0x04, 0x00}, // Unit offline Not ready
+ {0x09, 0x02, 0x04, 0x00}, // Unrecovered disk error Not ready
+
+ /* Codes for older firmware */
+ // 3ware Error SCSI Error
+ {0x51, 0x0b, 0x00, 0x00} // Unspecified Aborted command
+};
+
+/* Control register bit definitions */
+#define TW_CONTROL_CLEAR_HOST_INTERRUPT 0x00080000
+#define TW_CONTROL_CLEAR_ATTENTION_INTERRUPT 0x00040000
+#define TW_CONTROL_MASK_COMMAND_INTERRUPT 0x00020000
+#define TW_CONTROL_MASK_RESPONSE_INTERRUPT 0x00010000
+#define TW_CONTROL_UNMASK_COMMAND_INTERRUPT 0x00008000
+#define TW_CONTROL_UNMASK_RESPONSE_INTERRUPT 0x00004000
+#define TW_CONTROL_CLEAR_ERROR_STATUS 0x00000200
+#define TW_CONTROL_ISSUE_SOFT_RESET 0x00000100
+#define TW_CONTROL_ENABLE_INTERRUPTS 0x00000080
+#define TW_CONTROL_DISABLE_INTERRUPTS 0x00000040
+#define TW_CONTROL_ISSUE_HOST_INTERRUPT 0x00000020
+#define TW_CONTROL_CLEAR_PARITY_ERROR 0x00800000
+#define TW_CONTROL_CLEAR_QUEUE_ERROR 0x00400000
+#define TW_CONTROL_CLEAR_PCI_ABORT 0x00100000
+#define TW_CONTROL_CLEAR_SBUF_WRITE_ERROR 0x00000008
+
+/* Status register bit definitions */
+#define TW_STATUS_MAJOR_VERSION_MASK 0xF0000000
+#define TW_STATUS_MINOR_VERSION_MASK 0x0F000000
+#define TW_STATUS_PCI_PARITY_ERROR 0x00800000
+#define TW_STATUS_QUEUE_ERROR 0x00400000
+#define TW_STATUS_MICROCONTROLLER_ERROR 0x00200000
+#define TW_STATUS_PCI_ABORT 0x00100000
+#define TW_STATUS_HOST_INTERRUPT 0x00080000
+#define TW_STATUS_ATTENTION_INTERRUPT 0x00040000
+#define TW_STATUS_COMMAND_INTERRUPT 0x00020000
+#define TW_STATUS_RESPONSE_INTERRUPT 0x00010000
+#define TW_STATUS_COMMAND_QUEUE_FULL 0x00008000
+#define TW_STATUS_RESPONSE_QUEUE_EMPTY 0x00004000
+#define TW_STATUS_MICROCONTROLLER_READY 0x00002000
+#define TW_STATUS_COMMAND_QUEUE_EMPTY 0x00001000
+#define TW_STATUS_ALL_INTERRUPTS 0x000F0000
+#define TW_STATUS_CLEARABLE_BITS 0x00D00000
+#define TW_STATUS_EXPECTED_BITS 0x00002000
+#define TW_STATUS_UNEXPECTED_BITS 0x00F00008
+#define TW_STATUS_SBUF_WRITE_ERROR 0x00000008
+#define TW_STATUS_VALID_INTERRUPT 0x00DF0008
+
+/* RESPONSE QUEUE BIT DEFINITIONS */
+#define TW_RESPONSE_ID_MASK 0x00000FF0
+
+/* PCI related defines */
+#define TW_IO_ADDRESS_RANGE 0x10
+#define TW_DEVICE_NAME "3ware Storage Controller"
+#define TW_VENDOR_ID (0x13C1) /* 3ware */
+#define TW_DEVICE_ID (0x1000) /* Storage Controller */
+#define TW_DEVICE_ID2 (0x1001) /* 7000 series controller */
+#define TW_NUMDEVICES 2
+#define TW_PCI_CLEAR_PARITY_ERRORS 0xc100
+#define TW_PCI_CLEAR_PCI_ABORT 0x2000
+
+/* Command packet opcodes */
+#define TW_OP_NOP 0x0
+#define TW_OP_INIT_CONNECTION 0x1
+#define TW_OP_READ 0x2
+#define TW_OP_WRITE 0x3
+#define TW_OP_VERIFY 0x4
+#define TW_OP_GET_PARAM 0x12
+#define TW_OP_SET_PARAM 0x13
+#define TW_OP_SECTOR_INFO 0x1a
+#define TW_OP_AEN_LISTEN 0x1c
+#define TW_OP_FLUSH_CACHE 0x0e
+#define TW_CMD_PACKET 0x1d
+#define TW_CMD_PACKET_WITH_DATA 0x1f
+
+/* Asynchronous Event Notification (AEN) Codes */
+#define TW_AEN_QUEUE_EMPTY 0x0000
+#define TW_AEN_SOFT_RESET 0x0001
+#define TW_AEN_DEGRADED_MIRROR 0x0002
+#define TW_AEN_CONTROLLER_ERROR 0x0003
+#define TW_AEN_REBUILD_FAIL 0x0004
+#define TW_AEN_REBUILD_DONE 0x0005
+#define TW_AEN_QUEUE_FULL 0x00ff
+#define TW_AEN_TABLE_UNDEFINED 0x15
+#define TW_AEN_APORT_TIMEOUT 0x0009
+#define TW_AEN_DRIVE_ERROR 0x000A
+#define TW_AEN_SMART_FAIL 0x000F
+#define TW_AEN_SBUF_FAIL 0x0024
+
+/* Misc defines */
+#define TW_ALIGNMENT_6000 64 /* 64 bytes */
+#define TW_ALIGNMENT_7000 4 /* 4 bytes */
+#define TW_MAX_UNITS 16
+#define TW_COMMAND_ALIGNMENT_MASK 0x1ff
+#define TW_INIT_MESSAGE_CREDITS 0x100
+#define TW_INIT_COMMAND_PACKET_SIZE 0x3
+#define TW_POLL_MAX_RETRIES 20000
+#define TW_MAX_SGL_LENGTH 62
+#define TW_ATA_PASS_SGL_MAX 60
+#define TW_Q_LENGTH 256
+#define TW_Q_START 0
+#define TW_MAX_SLOT 32
+#define TW_MAX_PCI_BUSES 255
+#define TW_MAX_RESET_TRIES 3
+#define TW_UNIT_INFORMATION_TABLE_BASE 0x300
+#define TW_MAX_CMDS_PER_LUN 254 /* 254 for io, 1 for
+ chrdev ioctl, one for
+ internal aen post */
+#define TW_BLOCK_SIZE 0x200 /* 512-byte blocks */
+#define TW_IOCTL 0x80
+#define TW_UNIT_ONLINE 1
+#define TW_IN_INTR 1
+#define TW_IN_RESET 2
+#define TW_IN_CHRDEV_IOCTL 3
+#define TW_MAX_SECTORS 256
+#define TW_MAX_IOCTL_SECTORS 512
+#define TW_AEN_WAIT_TIME 1000
+#define TW_IOCTL_WAIT_TIME (1 * HZ) /* 1 second */
+#define TW_ISR_DONT_COMPLETE 2
+#define TW_ISR_DONT_RESULT 3
+#define TW_IOCTL_TIMEOUT 25 /* 25 seconds */
+#define TW_IOCTL_CHRDEV_TIMEOUT 60 /* 60 seconds */
+#define TW_IOCTL_CHRDEV_FREE -1
+#define TW_DMA_MASK DMA_BIT_MASK(32)
+#define TW_MAX_CDB_LEN 16
+
+/* Bitmask macros to eliminate bitfields */
+
+/* opcode: 5, sgloffset: 3 */
+#define TW_OPSGL_IN(x,y) ((x << 5) | (y & 0x1f))
+#define TW_SGL_OUT(x) ((x >> 5) & 0x7)
+
+/* reserved_1: 4, response_id: 8, reserved_2: 20 */
+#define TW_RESID_OUT(x) ((x >> 4) & 0xff)
+
+/* unit: 4, host_id: 4 */
+#define TW_UNITHOST_IN(x,y) ((x << 4) | ( y & 0xf))
+#define TW_UNIT_OUT(x) (x & 0xf)
+
+/* Macros */
+#define TW_CONTROL_REG_ADDR(x) (x->base_addr)
+#define TW_STATUS_REG_ADDR(x) (x->base_addr + 0x4)
+#define TW_COMMAND_QUEUE_REG_ADDR(x) (x->base_addr + 0x8)
+#define TW_RESPONSE_QUEUE_REG_ADDR(x) (x->base_addr + 0xC)
+#define TW_CLEAR_ALL_INTERRUPTS(x) (outl(TW_STATUS_VALID_INTERRUPT, TW_CONTROL_REG_ADDR(x)))
+#define TW_CLEAR_ATTENTION_INTERRUPT(x) (outl(TW_CONTROL_CLEAR_ATTENTION_INTERRUPT, TW_CONTROL_REG_ADDR(x)))
+#define TW_CLEAR_HOST_INTERRUPT(x) (outl(TW_CONTROL_CLEAR_HOST_INTERRUPT, TW_CONTROL_REG_ADDR(x)))
+#define TW_DISABLE_INTERRUPTS(x) (outl(TW_CONTROL_DISABLE_INTERRUPTS, TW_CONTROL_REG_ADDR(x)))
+#define TW_ENABLE_AND_CLEAR_INTERRUPTS(x) (outl(TW_CONTROL_CLEAR_ATTENTION_INTERRUPT | TW_CONTROL_UNMASK_RESPONSE_INTERRUPT | TW_CONTROL_ENABLE_INTERRUPTS, TW_CONTROL_REG_ADDR(x)))
+#define TW_MASK_COMMAND_INTERRUPT(x) (outl(TW_CONTROL_MASK_COMMAND_INTERRUPT, TW_CONTROL_REG_ADDR(x)))
+#define TW_UNMASK_COMMAND_INTERRUPT(x) (outl(TW_CONTROL_UNMASK_COMMAND_INTERRUPT, TW_CONTROL_REG_ADDR(x)))
+#define TW_SOFT_RESET(x) (outl(TW_CONTROL_ISSUE_SOFT_RESET | \
+ TW_CONTROL_CLEAR_HOST_INTERRUPT | \
+ TW_CONTROL_CLEAR_ATTENTION_INTERRUPT | \
+ TW_CONTROL_MASK_COMMAND_INTERRUPT | \
+ TW_CONTROL_MASK_RESPONSE_INTERRUPT | \
+ TW_CONTROL_CLEAR_ERROR_STATUS | \
+ TW_CONTROL_DISABLE_INTERRUPTS, TW_CONTROL_REG_ADDR(x)))
+#define TW_STATUS_ERRORS(x) \
+ (((x & TW_STATUS_PCI_ABORT) || \
+ (x & TW_STATUS_PCI_PARITY_ERROR) || \
+ (x & TW_STATUS_QUEUE_ERROR) || \
+ (x & TW_STATUS_MICROCONTROLLER_ERROR)) && \
+ (x & TW_STATUS_MICROCONTROLLER_READY))
+
+#ifdef TW_DEBUG
+#define dprintk(msg...) printk(msg)
+#else
+#define dprintk(msg...) do { } while(0)
+#endif
+
+#pragma pack(1)
+
+/* Scatter Gather List Entry */
+typedef struct TAG_TW_SG_Entry {
+ u32 address;
+ u32 length;
+} TW_SG_Entry;
+
+typedef unsigned char TW_Sector[512];
+
+/* Command Packet */
+typedef struct TW_Command {
+ unsigned char opcode__sgloffset;
+ unsigned char size;
+ unsigned char request_id;
+ unsigned char unit__hostid;
+ /* Second DWORD */
+ unsigned char status;
+ unsigned char flags;
+ union {
+ unsigned short block_count;
+ unsigned short parameter_count;
+ unsigned short message_credits;
+ } byte6;
+ union {
+ struct {
+ u32 lba;
+ TW_SG_Entry sgl[TW_MAX_SGL_LENGTH];
+ u32 padding; /* pad to 512 bytes */
+ } io;
+ struct {
+ TW_SG_Entry sgl[TW_MAX_SGL_LENGTH];
+ u32 padding[2];
+ } param;
+ struct {
+ u32 response_queue_pointer;
+ u32 padding[125];
+ } init_connection;
+ struct {
+ char version[504];
+ } ioctl_miniport_version;
+ } byte8;
+} TW_Command;
+
+#pragma pack()
+
+typedef struct TAG_TW_Ioctl {
+ unsigned char opcode;
+ unsigned short table_id;
+ unsigned char parameter_id;
+ unsigned char parameter_size_bytes;
+ unsigned char unit_index;
+ unsigned char data[1];
+} TW_Ioctl;
+
+#pragma pack(1)
+
+/* Structure for new chardev ioctls */
+typedef struct TAG_TW_New_Ioctl {
+ unsigned int data_buffer_length;
+ unsigned char padding [508];
+ TW_Command firmware_command;
+ char data_buffer[1];
+} TW_New_Ioctl;
+
+/* GetParam descriptor */
+typedef struct {
+ unsigned short table_id;
+ unsigned char parameter_id;
+ unsigned char parameter_size_bytes;
+ unsigned char data[1];
+} TW_Param, *PTW_Param;
+
+/* Response queue */
+typedef union TAG_TW_Response_Queue {
+ u32 response_id;
+ u32 value;
+} TW_Response_Queue;
+
+typedef int TW_Cmd_State;
+
+#define TW_S_INITIAL 0x1 /* Initial state */
+#define TW_S_STARTED 0x2 /* Id in use */
+#define TW_S_POSTED 0x4 /* Posted to the controller */
+#define TW_S_PENDING 0x8 /* Waiting to be posted in isr */
+#define TW_S_COMPLETED 0x10 /* Completed by isr */
+#define TW_S_FINISHED 0x20 /* I/O completely done */
+#define TW_START_MASK (TW_S_STARTED | TW_S_POSTED | TW_S_PENDING | TW_S_COMPLETED)
+
+/* Command header for ATA pass-thru */
+typedef struct TAG_TW_Passthru
+{
+ unsigned char opcode__sgloffset;
+ unsigned char size;
+ unsigned char request_id;
+ unsigned char aport__hostid;
+ unsigned char status;
+ unsigned char flags;
+ unsigned short param;
+ unsigned short features;
+ unsigned short sector_count;
+ unsigned short sector_num;
+ unsigned short cylinder_lo;
+ unsigned short cylinder_hi;
+ unsigned char drive_head;
+ unsigned char command;
+ TW_SG_Entry sg_list[TW_ATA_PASS_SGL_MAX];
+ unsigned char padding[12];
+} TW_Passthru;
+
+#pragma pack()
+
+typedef struct TAG_TW_Device_Extension {
+ u32 base_addr;
+ unsigned long *alignment_virtual_address[TW_Q_LENGTH];
+ unsigned long alignment_physical_address[TW_Q_LENGTH];
+ int is_unit_present[TW_MAX_UNITS];
+ unsigned long *command_packet_virtual_address[TW_Q_LENGTH];
+ unsigned long command_packet_physical_address[TW_Q_LENGTH];
+ struct pci_dev *tw_pci_dev;
+ struct scsi_cmnd *srb[TW_Q_LENGTH];
+ unsigned char free_queue[TW_Q_LENGTH];
+ unsigned char free_head;
+ unsigned char free_tail;
+ unsigned char pending_queue[TW_Q_LENGTH];
+ unsigned char pending_head;
+ unsigned char pending_tail;
+ TW_Cmd_State state[TW_Q_LENGTH];
+ u32 posted_request_count;
+ u32 max_posted_request_count;
+ u32 request_count_marked_pending;
+ u32 pending_request_count;
+ u32 max_pending_request_count;
+ u32 max_sgl_entries;
+ u32 sgl_entries;
+ u32 num_resets;
+ u32 sector_count;
+ u32 max_sector_count;
+ u32 aen_count;
+ struct Scsi_Host *host;
+ struct mutex ioctl_lock;
+ unsigned short aen_queue[TW_Q_LENGTH];
+ unsigned char aen_head;
+ unsigned char aen_tail;
+ volatile long flags; /* long req'd for set_bit --RR */
+ int reset_print;
+ volatile int chrdev_request_id;
+ wait_queue_head_t ioctl_wqueue;
+} TW_Device_Extension;
+
+#endif /* _3W_XXXX_H */
diff --git a/drivers/scsi/53c700.c b/drivers/scsi/53c700.c
new file mode 100644
index 000000000..82abfce1c
--- /dev/null
+++ b/drivers/scsi/53c700.c
@@ -0,0 +1,2130 @@
+/* -*- mode: c; c-basic-offset: 8 -*- */
+
+/* NCR (or Symbios) 53c700 and 53c700-66 Driver
+ *
+ * Copyright (C) 2001 by James.Bottomley@HansenPartnership.com
+**-----------------------------------------------------------------------------
+**
+** This program is free software; you can redistribute it and/or modify
+** it under the terms of the GNU General Public License as published by
+** the Free Software Foundation; either version 2 of the License, or
+** (at your option) any later version.
+**
+** This program is distributed in the hope that it will be useful,
+** but WITHOUT ANY WARRANTY; without even the implied warranty of
+** MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+** GNU General Public License for more details.
+**
+** You should have received a copy of the GNU General Public License
+** along with this program; if not, write to the Free Software
+** Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+**
+**-----------------------------------------------------------------------------
+ */
+
+/* Notes:
+ *
+ * This driver is designed exclusively for these chips (virtually the
+ * earliest of the scripts engine chips). They need their own drivers
+ * because they are missing so many of the scripts and snazzy register
+ * features of their elder brothers (the 710, 720 and 770).
+ *
+ * The 700 is the lowliest of the line, it can only do async SCSI.
+ * The 700-66 can at least do synchronous SCSI up to 10MHz.
+ *
+ * The 700 chip has no host bus interface logic of its own. However,
+ * it is usually mapped to a location with well defined register
+ * offsets. Therefore, if you can determine the base address and the
+ * irq your board incorporating this chip uses, you can probably use
+ * this driver to run it (although you'll probably have to write a
+ * minimal wrapper for the purpose---see the NCR_D700 driver for
+ * details about how to do this).
+ *
+ *
+ * TODO List:
+ *
+ * 1. Better statistics in the proc fs
+ *
+ * 2. Implement message queue (queues SCSI messages like commands) and make
+ * the abort and device reset functions use them.
+ * */
+
+/* CHANGELOG
+ *
+ * Version 2.8
+ *
+ * Fixed bad bug affecting tag starvation processing (previously the
+ * driver would hang the system if too many tags starved. Also fixed
+ * bad bug having to do with 10 byte command processing and REQUEST
+ * SENSE (the command would loop forever getting a transfer length
+ * mismatch in the CMD phase).
+ *
+ * Version 2.7
+ *
+ * Fixed scripts problem which caused certain devices (notably CDRWs)
+ * to hang on initial INQUIRY. Updated NCR_700_readl/writel to use
+ * __raw_readl/writel for parisc compatibility (Thomas
+ * Bogendoerfer). Added missing SCp->request_bufflen initialisation
+ * for sense requests (Ryan Bradetich).
+ *
+ * Version 2.6
+ *
+ * Following test of the 64 bit parisc kernel by Richard Hirst,
+ * several problems have now been corrected. Also adds support for
+ * consistent memory allocation.
+ *
+ * Version 2.5
+ *
+ * More Compatibility changes for 710 (now actually works). Enhanced
+ * support for odd clock speeds which constrain SDTR negotiations.
+ * correct cacheline separation for scsi messages and status for
+ * incoherent architectures. Use of the pci mapping functions on
+ * buffers to begin support for 64 bit drivers.
+ *
+ * Version 2.4
+ *
+ * Added support for the 53c710 chip (in 53c700 emulation mode only---no
+ * special 53c710 instructions or registers are used).
+ *
+ * Version 2.3
+ *
+ * More endianness/cache coherency changes.
+ *
+ * Better bad device handling (handles devices lying about tag
+ * queueing support and devices which fail to provide sense data on
+ * contingent allegiance conditions)
+ *
+ * Many thanks to Richard Hirst <rhirst@linuxcare.com> for patiently
+ * debugging this driver on the parisc architecture and suggesting
+ * many improvements and bug fixes.
+ *
+ * Thanks also go to Linuxcare Inc. for providing several PARISC
+ * machines for me to debug the driver on.
+ *
+ * Version 2.2
+ *
+ * Made the driver mem or io mapped; added endian invariance; added
+ * dma cache flushing operations for architectures which need it;
+ * added support for more varied clocking speeds.
+ *
+ * Version 2.1
+ *
+ * Initial modularisation from the D700. See NCR_D700.c for the rest of
+ * the changelog.
+ * */
+#define NCR_700_VERSION "2.8"
+
+#include <linux/kernel.h>
+#include <linux/types.h>
+#include <linux/string.h>
+#include <linux/slab.h>
+#include <linux/ioport.h>
+#include <linux/delay.h>
+#include <linux/spinlock.h>
+#include <linux/completion.h>
+#include <linux/init.h>
+#include <linux/proc_fs.h>
+#include <linux/blkdev.h>
+#include <linux/module.h>
+#include <linux/interrupt.h>
+#include <linux/device.h>
+#include <asm/dma.h>
+#include <asm/io.h>
+#include <asm/pgtable.h>
+#include <asm/byteorder.h>
+
+#include <scsi/scsi.h>
+#include <scsi/scsi_cmnd.h>
+#include <scsi/scsi_dbg.h>
+#include <scsi/scsi_eh.h>
+#include <scsi/scsi_host.h>
+#include <scsi/scsi_tcq.h>
+#include <scsi/scsi_transport.h>
+#include <scsi/scsi_transport_spi.h>
+
+#include "53c700.h"
+
+/* NOTE: For 64 bit drivers there are points in the code where we use
+ * a non dereferenceable pointer to point to a structure in dma-able
+ * memory (which is 32 bits) so that we can use all of the structure
+ * operations but take the address at the end. This macro allows us
+ * to truncate the 64 bit pointer down to 32 bits without the compiler
+ * complaining */
+#define to32bit(x) ((__u32)((unsigned long)(x)))
+
+#ifdef NCR_700_DEBUG
+#define STATIC
+#else
+#define STATIC static
+#endif
+
+MODULE_AUTHOR("James Bottomley");
+MODULE_DESCRIPTION("53c700 and 53c700-66 Driver");
+MODULE_LICENSE("GPL");
+
+/* This is the script */
+#include "53c700_d.h"
+
+
+STATIC int NCR_700_queuecommand(struct Scsi_Host *h, struct scsi_cmnd *);
+STATIC int NCR_700_abort(struct scsi_cmnd * SCpnt);
+STATIC int NCR_700_bus_reset(struct scsi_cmnd * SCpnt);
+STATIC int NCR_700_host_reset(struct scsi_cmnd * SCpnt);
+STATIC void NCR_700_chip_setup(struct Scsi_Host *host);
+STATIC void NCR_700_chip_reset(struct Scsi_Host *host);
+STATIC int NCR_700_slave_alloc(struct scsi_device *SDpnt);
+STATIC int NCR_700_slave_configure(struct scsi_device *SDpnt);
+STATIC void NCR_700_slave_destroy(struct scsi_device *SDpnt);
+static int NCR_700_change_queue_depth(struct scsi_device *SDpnt, int depth);
+
+STATIC struct device_attribute *NCR_700_dev_attrs[];
+
+STATIC struct scsi_transport_template *NCR_700_transport_template = NULL;
+
+static char *NCR_700_phase[] = {
+ "",
+ "after selection",
+ "before command phase",
+ "after command phase",
+ "after status phase",
+ "after data in phase",
+ "after data out phase",
+ "during data phase",
+};
+
+static char *NCR_700_condition[] = {
+ "",
+ "NOT MSG_OUT",
+ "UNEXPECTED PHASE",
+ "NOT MSG_IN",
+ "UNEXPECTED MSG",
+ "MSG_IN",
+ "SDTR_MSG RECEIVED",
+ "REJECT_MSG RECEIVED",
+ "DISCONNECT_MSG RECEIVED",
+ "MSG_OUT",
+ "DATA_IN",
+
+};
+
+static char *NCR_700_fatal_messages[] = {
+ "unexpected message after reselection",
+ "still MSG_OUT after message injection",
+ "not MSG_IN after selection",
+ "Illegal message length received",
+};
+
+static char *NCR_700_SBCL_bits[] = {
+ "IO ",
+ "CD ",
+ "MSG ",
+ "ATN ",
+ "SEL ",
+ "BSY ",
+ "ACK ",
+ "REQ ",
+};
+
+static char *NCR_700_SBCL_to_phase[] = {
+ "DATA_OUT",
+ "DATA_IN",
+ "CMD_OUT",
+ "STATE",
+ "ILLEGAL PHASE",
+ "ILLEGAL PHASE",
+ "MSG OUT",
+ "MSG IN",
+};
+
+/* This translates the SDTR message offset and period to a value
+ * which can be loaded into the SXFER_REG.
+ *
+ * NOTE: According to SCSI-2, the true transfer period (in ns) is
+ * actually four times this period value */
+static inline __u8
+NCR_700_offset_period_to_sxfer(struct NCR_700_Host_Parameters *hostdata,
+ __u8 offset, __u8 period)
+{
+ int XFERP;
+
+ __u8 min_xferp = (hostdata->chip710
+ ? NCR_710_MIN_XFERP : NCR_700_MIN_XFERP);
+ __u8 max_offset = (hostdata->chip710
+ ? NCR_710_MAX_OFFSET : NCR_700_MAX_OFFSET);
+
+ if(offset == 0)
+ return 0;
+
+ if(period < hostdata->min_period) {
+ printk(KERN_WARNING "53c700: Period %dns is less than this chip's minimum, setting to %d\n", period*4, NCR_700_MIN_PERIOD*4);
+ period = hostdata->min_period;
+ }
+ XFERP = (period*4 * hostdata->sync_clock)/1000 - 4;
+ if(offset > max_offset) {
+ printk(KERN_WARNING "53c700: Offset %d exceeds chip maximum, setting to %d\n",
+ offset, max_offset);
+ offset = max_offset;
+ }
+ if(XFERP < min_xferp) {
+ XFERP = min_xferp;
+ }
+ return (offset & 0x0f) | (XFERP & 0x07)<<4;
+}
+
+static inline __u8
+NCR_700_get_SXFER(struct scsi_device *SDp)
+{
+ struct NCR_700_Host_Parameters *hostdata =
+ (struct NCR_700_Host_Parameters *)SDp->host->hostdata[0];
+
+ return NCR_700_offset_period_to_sxfer(hostdata,
+ spi_offset(SDp->sdev_target),
+ spi_period(SDp->sdev_target));
+}
+
+struct Scsi_Host *
+NCR_700_detect(struct scsi_host_template *tpnt,
+ struct NCR_700_Host_Parameters *hostdata, struct device *dev)
+{
+ dma_addr_t pScript, pSlots;
+ __u8 *memory;
+ __u32 *script;
+ struct Scsi_Host *host;
+ static int banner = 0;
+ int j;
+
+ if(tpnt->sdev_attrs == NULL)
+ tpnt->sdev_attrs = NCR_700_dev_attrs;
+
+ memory = dma_alloc_noncoherent(hostdata->dev, TOTAL_MEM_SIZE,
+ &pScript, GFP_KERNEL);
+ if(memory == NULL) {
+ printk(KERN_ERR "53c700: Failed to allocate memory for driver, detatching\n");
+ return NULL;
+ }
+
+ script = (__u32 *)memory;
+ hostdata->msgin = memory + MSGIN_OFFSET;
+ hostdata->msgout = memory + MSGOUT_OFFSET;
+ hostdata->status = memory + STATUS_OFFSET;
+ hostdata->slots = (struct NCR_700_command_slot *)(memory + SLOTS_OFFSET);
+ hostdata->dev = dev;
+
+ pSlots = pScript + SLOTS_OFFSET;
+
+ /* Fill in the missing routines from the host template */
+ tpnt->queuecommand = NCR_700_queuecommand;
+ tpnt->eh_abort_handler = NCR_700_abort;
+ tpnt->eh_bus_reset_handler = NCR_700_bus_reset;
+ tpnt->eh_host_reset_handler = NCR_700_host_reset;
+ tpnt->can_queue = NCR_700_COMMAND_SLOTS_PER_HOST;
+ tpnt->sg_tablesize = NCR_700_SG_SEGMENTS;
+ tpnt->cmd_per_lun = NCR_700_CMD_PER_LUN;
+ tpnt->use_clustering = ENABLE_CLUSTERING;
+ tpnt->slave_configure = NCR_700_slave_configure;
+ tpnt->slave_destroy = NCR_700_slave_destroy;
+ tpnt->slave_alloc = NCR_700_slave_alloc;
+ tpnt->change_queue_depth = NCR_700_change_queue_depth;
+ tpnt->use_blk_tags = 1;
+
+ if(tpnt->name == NULL)
+ tpnt->name = "53c700";
+ if(tpnt->proc_name == NULL)
+ tpnt->proc_name = "53c700";
+
+ host = scsi_host_alloc(tpnt, 4);
+ if (!host)
+ return NULL;
+ memset(hostdata->slots, 0, sizeof(struct NCR_700_command_slot)
+ * NCR_700_COMMAND_SLOTS_PER_HOST);
+ for (j = 0; j < NCR_700_COMMAND_SLOTS_PER_HOST; j++) {
+ dma_addr_t offset = (dma_addr_t)((unsigned long)&hostdata->slots[j].SG[0]
+ - (unsigned long)&hostdata->slots[0].SG[0]);
+ hostdata->slots[j].pSG = (struct NCR_700_SG_List *)((unsigned long)(pSlots + offset));
+ if(j == 0)
+ hostdata->free_list = &hostdata->slots[j];
+ else
+ hostdata->slots[j-1].ITL_forw = &hostdata->slots[j];
+ hostdata->slots[j].state = NCR_700_SLOT_FREE;
+ }
+
+ for (j = 0; j < ARRAY_SIZE(SCRIPT); j++)
+ script[j] = bS_to_host(SCRIPT[j]);
+
+ /* adjust all labels to be bus physical */
+ for (j = 0; j < PATCHES; j++)
+ script[LABELPATCHES[j]] = bS_to_host(pScript + SCRIPT[LABELPATCHES[j]]);
+ /* now patch up fixed addresses. */
+ script_patch_32(hostdata->dev, script, MessageLocation,
+ pScript + MSGOUT_OFFSET);
+ script_patch_32(hostdata->dev, script, StatusAddress,
+ pScript + STATUS_OFFSET);
+ script_patch_32(hostdata->dev, script, ReceiveMsgAddress,
+ pScript + MSGIN_OFFSET);
+
+ hostdata->script = script;
+ hostdata->pScript = pScript;
+ dma_sync_single_for_device(hostdata->dev, pScript, sizeof(SCRIPT), DMA_TO_DEVICE);
+ hostdata->state = NCR_700_HOST_FREE;
+ hostdata->cmd = NULL;
+ host->max_id = 8;
+ host->max_lun = NCR_700_MAX_LUNS;
+ BUG_ON(NCR_700_transport_template == NULL);
+ host->transportt = NCR_700_transport_template;
+ host->unique_id = (unsigned long)hostdata->base;
+ hostdata->eh_complete = NULL;
+ host->hostdata[0] = (unsigned long)hostdata;
+ /* kick the chip */
+ NCR_700_writeb(0xff, host, CTEST9_REG);
+ if (hostdata->chip710)
+ hostdata->rev = (NCR_700_readb(host, CTEST8_REG)>>4) & 0x0f;
+ else
+ hostdata->rev = (NCR_700_readb(host, CTEST7_REG)>>4) & 0x0f;
+ hostdata->fast = (NCR_700_readb(host, CTEST9_REG) == 0);
+ if (banner == 0) {
+ printk(KERN_NOTICE "53c700: Version " NCR_700_VERSION " By James.Bottomley@HansenPartnership.com\n");
+ banner = 1;
+ }
+ printk(KERN_NOTICE "scsi%d: %s rev %d %s\n", host->host_no,
+ hostdata->chip710 ? "53c710" :
+ (hostdata->fast ? "53c700-66" : "53c700"),
+ hostdata->rev, hostdata->differential ?
+ "(Differential)" : "");
+ /* reset the chip */
+ NCR_700_chip_reset(host);
+
+ if (scsi_add_host(host, dev)) {
+ dev_printk(KERN_ERR, dev, "53c700: scsi_add_host failed\n");
+ scsi_host_put(host);
+ return NULL;
+ }
+
+ spi_signalling(host) = hostdata->differential ? SPI_SIGNAL_HVD :
+ SPI_SIGNAL_SE;
+
+ return host;
+}
+
+int
+NCR_700_release(struct Scsi_Host *host)
+{
+ struct NCR_700_Host_Parameters *hostdata =
+ (struct NCR_700_Host_Parameters *)host->hostdata[0];
+
+ dma_free_noncoherent(hostdata->dev, TOTAL_MEM_SIZE,
+ hostdata->script, hostdata->pScript);
+ return 1;
+}
+
+static inline __u8
+NCR_700_identify(int can_disconnect, __u8 lun)
+{
+ return IDENTIFY_BASE |
+ ((can_disconnect) ? 0x40 : 0) |
+ (lun & NCR_700_LUN_MASK);
+}
+
+/*
+ * Function : static int data_residual (Scsi_Host *host)
+ *
+ * Purpose : return residual data count of what's in the chip. If you
+ * really want to know what this function is doing, it's almost a
+ * direct transcription of the algorithm described in the 53c710
+ * guide, except that the DBC and DFIFO registers are only 6 bits
+ * wide on a 53c700.
+ *
+ * Inputs : host - SCSI host */
+static inline int
+NCR_700_data_residual (struct Scsi_Host *host) {
+ struct NCR_700_Host_Parameters *hostdata =
+ (struct NCR_700_Host_Parameters *)host->hostdata[0];
+ int count, synchronous = 0;
+ unsigned int ddir;
+
+ if(hostdata->chip710) {
+ count = ((NCR_700_readb(host, DFIFO_REG) & 0x7f) -
+ (NCR_700_readl(host, DBC_REG) & 0x7f)) & 0x7f;
+ } else {
+ count = ((NCR_700_readb(host, DFIFO_REG) & 0x3f) -
+ (NCR_700_readl(host, DBC_REG) & 0x3f)) & 0x3f;
+ }
+
+ if(hostdata->fast)
+ synchronous = NCR_700_readb(host, SXFER_REG) & 0x0f;
+
+ /* get the data direction */
+ ddir = NCR_700_readb(host, CTEST0_REG) & 0x01;
+
+ if (ddir) {
+ /* Receive */
+ if (synchronous)
+ count += (NCR_700_readb(host, SSTAT2_REG) & 0xf0) >> 4;
+ else
+ if (NCR_700_readb(host, SSTAT1_REG) & SIDL_REG_FULL)
+ ++count;
+ } else {
+ /* Send */
+ __u8 sstat = NCR_700_readb(host, SSTAT1_REG);
+ if (sstat & SODL_REG_FULL)
+ ++count;
+ if (synchronous && (sstat & SODR_REG_FULL))
+ ++count;
+ }
+#ifdef NCR_700_DEBUG
+ if(count)
+ printk("RESIDUAL IS %d (ddir %d)\n", count, ddir);
+#endif
+ return count;
+}
+
+/* print out the SCSI wires and corresponding phase from the SBCL register
+ * in the chip */
+static inline char *
+sbcl_to_string(__u8 sbcl)
+{
+ int i;
+ static char ret[256];
+
+ ret[0]='\0';
+ for(i=0; i<8; i++) {
+ if((1<<i) & sbcl)
+ strcat(ret, NCR_700_SBCL_bits[i]);
+ }
+ strcat(ret, NCR_700_SBCL_to_phase[sbcl & 0x07]);
+ return ret;
+}
+
+static inline __u8
+bitmap_to_number(__u8 bitmap)
+{
+ __u8 i;
+
+ for(i=0; i<8 && !(bitmap &(1<<i)); i++)
+ ;
+ return i;
+}
+
+/* Pull a slot off the free list */
+STATIC struct NCR_700_command_slot *
+find_empty_slot(struct NCR_700_Host_Parameters *hostdata)
+{
+ struct NCR_700_command_slot *slot = hostdata->free_list;
+
+ if(slot == NULL) {
+ /* sanity check */
+ if(hostdata->command_slot_count != NCR_700_COMMAND_SLOTS_PER_HOST)
+ printk(KERN_ERR "SLOTS FULL, but count is %d, should be %d\n", hostdata->command_slot_count, NCR_700_COMMAND_SLOTS_PER_HOST);
+ return NULL;
+ }
+
+ if(slot->state != NCR_700_SLOT_FREE)
+ /* should panic! */
+ printk(KERN_ERR "BUSY SLOT ON FREE LIST!!!\n");
+
+
+ hostdata->free_list = slot->ITL_forw;
+ slot->ITL_forw = NULL;
+
+
+ /* NOTE: set the state to busy here, not queued, since this
+ * indicates the slot is in use and cannot be run by the IRQ
+ * finish routine. If we cannot queue the command when it
+ * is properly build, we then change to NCR_700_SLOT_QUEUED */
+ slot->state = NCR_700_SLOT_BUSY;
+ slot->flags = 0;
+ hostdata->command_slot_count++;
+
+ return slot;
+}
+
+STATIC void
+free_slot(struct NCR_700_command_slot *slot,
+ struct NCR_700_Host_Parameters *hostdata)
+{
+ if((slot->state & NCR_700_SLOT_MASK) != NCR_700_SLOT_MAGIC) {
+ printk(KERN_ERR "53c700: SLOT %p is not MAGIC!!!\n", slot);
+ }
+ if(slot->state == NCR_700_SLOT_FREE) {
+ printk(KERN_ERR "53c700: SLOT %p is FREE!!!\n", slot);
+ }
+
+ slot->resume_offset = 0;
+ slot->cmnd = NULL;
+ slot->state = NCR_700_SLOT_FREE;
+ slot->ITL_forw = hostdata->free_list;
+ hostdata->free_list = slot;
+ hostdata->command_slot_count--;
+}
+
+
+/* This routine really does very little. The command is indexed on
+ the ITL and (if tagged) the ITLQ lists in _queuecommand */
+STATIC void
+save_for_reselection(struct NCR_700_Host_Parameters *hostdata,
+ struct scsi_cmnd *SCp, __u32 dsp)
+{
+ /* Its just possible that this gets executed twice */
+ if(SCp != NULL) {
+ struct NCR_700_command_slot *slot =
+ (struct NCR_700_command_slot *)SCp->host_scribble;
+
+ slot->resume_offset = dsp;
+ }
+ hostdata->state = NCR_700_HOST_FREE;
+ hostdata->cmd = NULL;
+}
+
+STATIC inline void
+NCR_700_unmap(struct NCR_700_Host_Parameters *hostdata, struct scsi_cmnd *SCp,
+ struct NCR_700_command_slot *slot)
+{
+ if(SCp->sc_data_direction != DMA_NONE &&
+ SCp->sc_data_direction != DMA_BIDIRECTIONAL)
+ scsi_dma_unmap(SCp);
+}
+
+STATIC inline void
+NCR_700_scsi_done(struct NCR_700_Host_Parameters *hostdata,
+ struct scsi_cmnd *SCp, int result)
+{
+ hostdata->state = NCR_700_HOST_FREE;
+ hostdata->cmd = NULL;
+
+ if(SCp != NULL) {
+ struct NCR_700_command_slot *slot =
+ (struct NCR_700_command_slot *)SCp->host_scribble;
+
+ dma_unmap_single(hostdata->dev, slot->pCmd,
+ MAX_COMMAND_SIZE, DMA_TO_DEVICE);
+ if (slot->flags == NCR_700_FLAG_AUTOSENSE) {
+ char *cmnd = NCR_700_get_sense_cmnd(SCp->device);
+
+ dma_unmap_single(hostdata->dev, slot->dma_handle,
+ SCSI_SENSE_BUFFERSIZE, DMA_FROM_DEVICE);
+ /* restore the old result if the request sense was
+ * successful */
+ if (result == 0)
+ result = cmnd[7];
+ /* restore the original length */
+ SCp->cmd_len = cmnd[8];
+ } else
+ NCR_700_unmap(hostdata, SCp, slot);
+
+ free_slot(slot, hostdata);
+#ifdef NCR_700_DEBUG
+ if(NCR_700_get_depth(SCp->device) == 0 ||
+ NCR_700_get_depth(SCp->device) > SCp->device->queue_depth)
+ printk(KERN_ERR "Invalid depth in NCR_700_scsi_done(): %d\n",
+ NCR_700_get_depth(SCp->device));
+#endif /* NCR_700_DEBUG */
+ NCR_700_set_depth(SCp->device, NCR_700_get_depth(SCp->device) - 1);
+
+ SCp->host_scribble = NULL;
+ SCp->result = result;
+ SCp->scsi_done(SCp);
+ } else {
+ printk(KERN_ERR "53c700: SCSI DONE HAS NULL SCp\n");
+ }
+}
+
+
+STATIC void
+NCR_700_internal_bus_reset(struct Scsi_Host *host)
+{
+ /* Bus reset */
+ NCR_700_writeb(ASSERT_RST, host, SCNTL1_REG);
+ udelay(50);
+ NCR_700_writeb(0, host, SCNTL1_REG);
+
+}
+
+STATIC void
+NCR_700_chip_setup(struct Scsi_Host *host)
+{
+ struct NCR_700_Host_Parameters *hostdata =
+ (struct NCR_700_Host_Parameters *)host->hostdata[0];
+ __u8 min_period;
+ __u8 min_xferp = (hostdata->chip710 ? NCR_710_MIN_XFERP : NCR_700_MIN_XFERP);
+
+ if(hostdata->chip710) {
+ __u8 burst_disable = 0;
+ __u8 burst_length = 0;
+
+ switch (hostdata->burst_length) {
+ case 1:
+ burst_length = BURST_LENGTH_1;
+ break;
+ case 2:
+ burst_length = BURST_LENGTH_2;
+ break;
+ case 4:
+ burst_length = BURST_LENGTH_4;
+ break;
+ case 8:
+ burst_length = BURST_LENGTH_8;
+ break;
+ default:
+ burst_disable = BURST_DISABLE;
+ break;
+ }
+ hostdata->dcntl_extra |= COMPAT_700_MODE;
+
+ NCR_700_writeb(hostdata->dcntl_extra, host, DCNTL_REG);
+ NCR_700_writeb(burst_length | hostdata->dmode_extra,
+ host, DMODE_710_REG);
+ NCR_700_writeb(burst_disable | hostdata->ctest7_extra |
+ (hostdata->differential ? DIFF : 0),
+ host, CTEST7_REG);
+ NCR_700_writeb(BTB_TIMER_DISABLE, host, CTEST0_REG);
+ NCR_700_writeb(FULL_ARBITRATION | ENABLE_PARITY | PARITY
+ | AUTO_ATN, host, SCNTL0_REG);
+ } else {
+ NCR_700_writeb(BURST_LENGTH_8 | hostdata->dmode_extra,
+ host, DMODE_700_REG);
+ NCR_700_writeb(hostdata->differential ?
+ DIFF : 0, host, CTEST7_REG);
+ if(hostdata->fast) {
+ /* this is for 700-66, does nothing on 700 */
+ NCR_700_writeb(LAST_DIS_ENBL | ENABLE_ACTIVE_NEGATION
+ | GENERATE_RECEIVE_PARITY, host,
+ CTEST8_REG);
+ } else {
+ NCR_700_writeb(FULL_ARBITRATION | ENABLE_PARITY
+ | PARITY | AUTO_ATN, host, SCNTL0_REG);
+ }
+ }
+
+ NCR_700_writeb(1 << host->this_id, host, SCID_REG);
+ NCR_700_writeb(0, host, SBCL_REG);
+ NCR_700_writeb(ASYNC_OPERATION, host, SXFER_REG);
+
+ NCR_700_writeb(PHASE_MM_INT | SEL_TIMEOUT_INT | GROSS_ERR_INT | UX_DISC_INT
+ | RST_INT | PAR_ERR_INT | SELECT_INT, host, SIEN_REG);
+
+ NCR_700_writeb(ABORT_INT | INT_INST_INT | ILGL_INST_INT, host, DIEN_REG);
+ NCR_700_writeb(ENABLE_SELECT, host, SCNTL1_REG);
+ if(hostdata->clock > 75) {
+ printk(KERN_ERR "53c700: Clock speed %dMHz is too high: 75Mhz is the maximum this chip can be driven at\n", hostdata->clock);
+ /* do the best we can, but the async clock will be out
+ * of spec: sync divider 2, async divider 3 */
+ DEBUG(("53c700: sync 2 async 3\n"));
+ NCR_700_writeb(SYNC_DIV_2_0, host, SBCL_REG);
+ NCR_700_writeb(ASYNC_DIV_3_0 | hostdata->dcntl_extra, host, DCNTL_REG);
+ hostdata->sync_clock = hostdata->clock/2;
+ } else if(hostdata->clock > 50 && hostdata->clock <= 75) {
+ /* sync divider 1.5, async divider 3 */
+ DEBUG(("53c700: sync 1.5 async 3\n"));
+ NCR_700_writeb(SYNC_DIV_1_5, host, SBCL_REG);
+ NCR_700_writeb(ASYNC_DIV_3_0 | hostdata->dcntl_extra, host, DCNTL_REG);
+ hostdata->sync_clock = hostdata->clock*2;
+ hostdata->sync_clock /= 3;
+
+ } else if(hostdata->clock > 37 && hostdata->clock <= 50) {
+ /* sync divider 1, async divider 2 */
+ DEBUG(("53c700: sync 1 async 2\n"));
+ NCR_700_writeb(SYNC_DIV_1_0, host, SBCL_REG);
+ NCR_700_writeb(ASYNC_DIV_2_0 | hostdata->dcntl_extra, host, DCNTL_REG);
+ hostdata->sync_clock = hostdata->clock;
+ } else if(hostdata->clock > 25 && hostdata->clock <=37) {
+ /* sync divider 1, async divider 1.5 */
+ DEBUG(("53c700: sync 1 async 1.5\n"));
+ NCR_700_writeb(SYNC_DIV_1_0, host, SBCL_REG);
+ NCR_700_writeb(ASYNC_DIV_1_5 | hostdata->dcntl_extra, host, DCNTL_REG);
+ hostdata->sync_clock = hostdata->clock;
+ } else {
+ DEBUG(("53c700: sync 1 async 1\n"));
+ NCR_700_writeb(SYNC_DIV_1_0, host, SBCL_REG);
+ NCR_700_writeb(ASYNC_DIV_1_0 | hostdata->dcntl_extra, host, DCNTL_REG);
+ /* sync divider 1, async divider 1 */
+ hostdata->sync_clock = hostdata->clock;
+ }
+ /* Calculate the actual minimum period that can be supported
+ * by our synchronous clock speed. See the 710 manual for
+ * exact details of this calculation which is based on a
+ * setting of the SXFER register */
+ min_period = 1000*(4+min_xferp)/(4*hostdata->sync_clock);
+ hostdata->min_period = NCR_700_MIN_PERIOD;
+ if(min_period > NCR_700_MIN_PERIOD)
+ hostdata->min_period = min_period;
+}
+
+STATIC void
+NCR_700_chip_reset(struct Scsi_Host *host)
+{
+ struct NCR_700_Host_Parameters *hostdata =
+ (struct NCR_700_Host_Parameters *)host->hostdata[0];
+ if(hostdata->chip710) {
+ NCR_700_writeb(SOFTWARE_RESET_710, host, ISTAT_REG);
+ udelay(100);
+
+ NCR_700_writeb(0, host, ISTAT_REG);
+ } else {
+ NCR_700_writeb(SOFTWARE_RESET, host, DCNTL_REG);
+ udelay(100);
+
+ NCR_700_writeb(0, host, DCNTL_REG);
+ }
+
+ mdelay(1000);
+
+ NCR_700_chip_setup(host);
+}
+
+/* The heart of the message processing engine is that the instruction
+ * immediately after the INT is the normal case (and so must be CLEAR
+ * ACK). If we want to do something else, we call that routine in
+ * scripts and set temp to be the normal case + 8 (skipping the CLEAR
+ * ACK) so that the routine returns correctly to resume its activity
+ * */
+STATIC __u32
+process_extended_message(struct Scsi_Host *host,
+ struct NCR_700_Host_Parameters *hostdata,
+ struct scsi_cmnd *SCp, __u32 dsp, __u32 dsps)
+{
+ __u32 resume_offset = dsp, temp = dsp + 8;
+ __u8 pun = 0xff, lun = 0xff;
+
+ if(SCp != NULL) {
+ pun = SCp->device->id;
+ lun = SCp->device->lun;
+ }
+
+ switch(hostdata->msgin[2]) {
+ case A_SDTR_MSG:
+ if(SCp != NULL && NCR_700_is_flag_set(SCp->device, NCR_700_DEV_BEGIN_SYNC_NEGOTIATION)) {
+ struct scsi_target *starget = SCp->device->sdev_target;
+ __u8 period = hostdata->msgin[3];
+ __u8 offset = hostdata->msgin[4];
+
+ if(offset == 0 || period == 0) {
+ offset = 0;
+ period = 0;
+ }
+
+ spi_offset(starget) = offset;
+ spi_period(starget) = period;
+
+ if(NCR_700_is_flag_set(SCp->device, NCR_700_DEV_PRINT_SYNC_NEGOTIATION)) {
+ spi_display_xfer_agreement(starget);
+ NCR_700_clear_flag(SCp->device, NCR_700_DEV_PRINT_SYNC_NEGOTIATION);
+ }
+
+ NCR_700_set_flag(SCp->device, NCR_700_DEV_NEGOTIATED_SYNC);
+ NCR_700_clear_flag(SCp->device, NCR_700_DEV_BEGIN_SYNC_NEGOTIATION);
+
+ NCR_700_writeb(NCR_700_get_SXFER(SCp->device),
+ host, SXFER_REG);
+
+ } else {
+ /* SDTR message out of the blue, reject it */
+ shost_printk(KERN_WARNING, host,
+ "Unexpected SDTR msg\n");
+ hostdata->msgout[0] = A_REJECT_MSG;
+ dma_cache_sync(hostdata->dev, hostdata->msgout, 1, DMA_TO_DEVICE);
+ script_patch_16(hostdata->dev, hostdata->script,
+ MessageCount, 1);
+ /* SendMsgOut returns, so set up the return
+ * address */
+ resume_offset = hostdata->pScript + Ent_SendMessageWithATN;
+ }
+ break;
+
+ case A_WDTR_MSG:
+ printk(KERN_INFO "scsi%d: (%d:%d), Unsolicited WDTR after CMD, Rejecting\n",
+ host->host_no, pun, lun);
+ hostdata->msgout[0] = A_REJECT_MSG;
+ dma_cache_sync(hostdata->dev, hostdata->msgout, 1, DMA_TO_DEVICE);
+ script_patch_16(hostdata->dev, hostdata->script, MessageCount,
+ 1);
+ resume_offset = hostdata->pScript + Ent_SendMessageWithATN;
+
+ break;
+
+ default:
+ printk(KERN_INFO "scsi%d (%d:%d): Unexpected message %s: ",
+ host->host_no, pun, lun,
+ NCR_700_phase[(dsps & 0xf00) >> 8]);
+ spi_print_msg(hostdata->msgin);
+ printk("\n");
+ /* just reject it */
+ hostdata->msgout[0] = A_REJECT_MSG;
+ dma_cache_sync(hostdata->dev, hostdata->msgout, 1, DMA_TO_DEVICE);
+ script_patch_16(hostdata->dev, hostdata->script, MessageCount,
+ 1);
+ /* SendMsgOut returns, so set up the return
+ * address */
+ resume_offset = hostdata->pScript + Ent_SendMessageWithATN;
+ }
+ NCR_700_writel(temp, host, TEMP_REG);
+ return resume_offset;
+}
+
+STATIC __u32
+process_message(struct Scsi_Host *host, struct NCR_700_Host_Parameters *hostdata,
+ struct scsi_cmnd *SCp, __u32 dsp, __u32 dsps)
+{
+ /* work out where to return to */
+ __u32 temp = dsp + 8, resume_offset = dsp;
+ __u8 pun = 0xff, lun = 0xff;
+
+ if(SCp != NULL) {
+ pun = SCp->device->id;
+ lun = SCp->device->lun;
+ }
+
+#ifdef NCR_700_DEBUG
+ printk("scsi%d (%d:%d): message %s: ", host->host_no, pun, lun,
+ NCR_700_phase[(dsps & 0xf00) >> 8]);
+ spi_print_msg(hostdata->msgin);
+ printk("\n");
+#endif
+
+ switch(hostdata->msgin[0]) {
+
+ case A_EXTENDED_MSG:
+ resume_offset = process_extended_message(host, hostdata, SCp,
+ dsp, dsps);
+ break;
+
+ case A_REJECT_MSG:
+ if(SCp != NULL && NCR_700_is_flag_set(SCp->device, NCR_700_DEV_BEGIN_SYNC_NEGOTIATION)) {
+ /* Rejected our sync negotiation attempt */
+ spi_period(SCp->device->sdev_target) =
+ spi_offset(SCp->device->sdev_target) = 0;
+ NCR_700_set_flag(SCp->device, NCR_700_DEV_NEGOTIATED_SYNC);
+ NCR_700_clear_flag(SCp->device, NCR_700_DEV_BEGIN_SYNC_NEGOTIATION);
+ } else if(SCp != NULL && NCR_700_get_tag_neg_state(SCp->device) == NCR_700_DURING_TAG_NEGOTIATION) {
+ /* rejected our first simple tag message */
+ scmd_printk(KERN_WARNING, SCp,
+ "Rejected first tag queue attempt, turning off tag queueing\n");
+ /* we're done negotiating */
+ NCR_700_set_tag_neg_state(SCp->device, NCR_700_FINISHED_TAG_NEGOTIATION);
+ hostdata->tag_negotiated &= ~(1<<scmd_id(SCp));
+
+ SCp->device->tagged_supported = 0;
+ SCp->device->simple_tags = 0;
+ scsi_change_queue_depth(SCp->device, host->cmd_per_lun);
+ } else {
+ shost_printk(KERN_WARNING, host,
+ "(%d:%d) Unexpected REJECT Message %s\n",
+ pun, lun,
+ NCR_700_phase[(dsps & 0xf00) >> 8]);
+ /* however, just ignore it */
+ }
+ break;
+
+ case A_PARITY_ERROR_MSG:
+ printk(KERN_ERR "scsi%d (%d:%d) Parity Error!\n", host->host_no,
+ pun, lun);
+ NCR_700_internal_bus_reset(host);
+ break;
+ case A_SIMPLE_TAG_MSG:
+ printk(KERN_INFO "scsi%d (%d:%d) SIMPLE TAG %d %s\n", host->host_no,
+ pun, lun, hostdata->msgin[1],
+ NCR_700_phase[(dsps & 0xf00) >> 8]);
+ /* just ignore it */
+ break;
+ default:
+ printk(KERN_INFO "scsi%d (%d:%d): Unexpected message %s: ",
+ host->host_no, pun, lun,
+ NCR_700_phase[(dsps & 0xf00) >> 8]);
+
+ spi_print_msg(hostdata->msgin);
+ printk("\n");
+ /* just reject it */
+ hostdata->msgout[0] = A_REJECT_MSG;
+ dma_cache_sync(hostdata->dev, hostdata->msgout, 1, DMA_TO_DEVICE);
+ script_patch_16(hostdata->dev, hostdata->script, MessageCount,
+ 1);
+ /* SendMsgOut returns, so set up the return
+ * address */
+ resume_offset = hostdata->pScript + Ent_SendMessageWithATN;
+
+ break;
+ }
+ NCR_700_writel(temp, host, TEMP_REG);
+ /* set us up to receive another message */
+ dma_cache_sync(hostdata->dev, hostdata->msgin, MSG_ARRAY_SIZE, DMA_FROM_DEVICE);
+ return resume_offset;
+}
+
+STATIC __u32
+process_script_interrupt(__u32 dsps, __u32 dsp, struct scsi_cmnd *SCp,
+ struct Scsi_Host *host,
+ struct NCR_700_Host_Parameters *hostdata)
+{
+ __u32 resume_offset = 0;
+ __u8 pun = 0xff, lun=0xff;
+
+ if(SCp != NULL) {
+ pun = SCp->device->id;
+ lun = SCp->device->lun;
+ }
+
+ if(dsps == A_GOOD_STATUS_AFTER_STATUS) {
+ DEBUG((" COMMAND COMPLETE, status=%02x\n",
+ hostdata->status[0]));
+ /* OK, if TCQ still under negotiation, we now know it works */
+ if (NCR_700_get_tag_neg_state(SCp->device) == NCR_700_DURING_TAG_NEGOTIATION)
+ NCR_700_set_tag_neg_state(SCp->device,
+ NCR_700_FINISHED_TAG_NEGOTIATION);
+
+ /* check for contingent allegiance contitions */
+ if(status_byte(hostdata->status[0]) == CHECK_CONDITION ||
+ status_byte(hostdata->status[0]) == COMMAND_TERMINATED) {
+ struct NCR_700_command_slot *slot =
+ (struct NCR_700_command_slot *)SCp->host_scribble;
+ if(slot->flags == NCR_700_FLAG_AUTOSENSE) {
+ /* OOPS: bad device, returning another
+ * contingent allegiance condition */
+ scmd_printk(KERN_ERR, SCp,
+ "broken device is looping in contingent allegiance: ignoring\n");
+ NCR_700_scsi_done(hostdata, SCp, hostdata->status[0]);
+ } else {
+ char *cmnd =
+ NCR_700_get_sense_cmnd(SCp->device);
+#ifdef NCR_DEBUG
+ scsi_print_command(SCp);
+ printk(" cmd %p has status %d, requesting sense\n",
+ SCp, hostdata->status[0]);
+#endif
+ /* we can destroy the command here
+ * because the contingent allegiance
+ * condition will cause a retry which
+ * will re-copy the command from the
+ * saved data_cmnd. We also unmap any
+ * data associated with the command
+ * here */
+ NCR_700_unmap(hostdata, SCp, slot);
+ dma_unmap_single(hostdata->dev, slot->pCmd,
+ MAX_COMMAND_SIZE,
+ DMA_TO_DEVICE);
+
+ cmnd[0] = REQUEST_SENSE;
+ cmnd[1] = (lun & 0x7) << 5;
+ cmnd[2] = 0;
+ cmnd[3] = 0;
+ cmnd[4] = SCSI_SENSE_BUFFERSIZE;
+ cmnd[5] = 0;
+ /* Here's a quiet hack: the
+ * REQUEST_SENSE command is six bytes,
+ * so store a flag indicating that
+ * this was an internal sense request
+ * and the original status at the end
+ * of the command */
+ cmnd[6] = NCR_700_INTERNAL_SENSE_MAGIC;
+ cmnd[7] = hostdata->status[0];
+ cmnd[8] = SCp->cmd_len;
+ SCp->cmd_len = 6; /* command length for
+ * REQUEST_SENSE */
+ slot->pCmd = dma_map_single(hostdata->dev, cmnd, MAX_COMMAND_SIZE, DMA_TO_DEVICE);
+ slot->dma_handle = dma_map_single(hostdata->dev, SCp->sense_buffer, SCSI_SENSE_BUFFERSIZE, DMA_FROM_DEVICE);
+ slot->SG[0].ins = bS_to_host(SCRIPT_MOVE_DATA_IN | SCSI_SENSE_BUFFERSIZE);
+ slot->SG[0].pAddr = bS_to_host(slot->dma_handle);
+ slot->SG[1].ins = bS_to_host(SCRIPT_RETURN);
+ slot->SG[1].pAddr = 0;
+ slot->resume_offset = hostdata->pScript;
+ dma_cache_sync(hostdata->dev, slot->SG, sizeof(slot->SG[0])*2, DMA_TO_DEVICE);
+ dma_cache_sync(hostdata->dev, SCp->sense_buffer, SCSI_SENSE_BUFFERSIZE, DMA_FROM_DEVICE);
+
+ /* queue the command for reissue */
+ slot->state = NCR_700_SLOT_QUEUED;
+ slot->flags = NCR_700_FLAG_AUTOSENSE;
+ hostdata->state = NCR_700_HOST_FREE;
+ hostdata->cmd = NULL;
+ }
+ } else {
+ // Currently rely on the mid layer evaluation
+ // of the tag queuing capability
+ //
+ //if(status_byte(hostdata->status[0]) == GOOD &&
+ // SCp->cmnd[0] == INQUIRY && SCp->use_sg == 0) {
+ // /* Piggy back the tag queueing support
+ // * on this command */
+ // dma_sync_single_for_cpu(hostdata->dev,
+ // slot->dma_handle,
+ // SCp->request_bufflen,
+ // DMA_FROM_DEVICE);
+ // if(((char *)SCp->request_buffer)[7] & 0x02) {
+ // scmd_printk(KERN_INFO, SCp,
+ // "Enabling Tag Command Queuing\n");
+ // hostdata->tag_negotiated |= (1<<scmd_id(SCp));
+ // NCR_700_set_flag(SCp->device, NCR_700_DEV_BEGIN_TAG_QUEUEING);
+ // } else {
+ // NCR_700_clear_flag(SCp->device, NCR_700_DEV_BEGIN_TAG_QUEUEING);
+ // hostdata->tag_negotiated &= ~(1<<scmd_id(SCp));
+ // }
+ //}
+ NCR_700_scsi_done(hostdata, SCp, hostdata->status[0]);
+ }
+ } else if((dsps & 0xfffff0f0) == A_UNEXPECTED_PHASE) {
+ __u8 i = (dsps & 0xf00) >> 8;
+
+ scmd_printk(KERN_ERR, SCp, "UNEXPECTED PHASE %s (%s)\n",
+ NCR_700_phase[i],
+ sbcl_to_string(NCR_700_readb(host, SBCL_REG)));
+ scmd_printk(KERN_ERR, SCp, " len = %d, cmd =",
+ SCp->cmd_len);
+ scsi_print_command(SCp);
+
+ NCR_700_internal_bus_reset(host);
+ } else if((dsps & 0xfffff000) == A_FATAL) {
+ int i = (dsps & 0xfff);
+
+ printk(KERN_ERR "scsi%d: (%d:%d) FATAL ERROR: %s\n",
+ host->host_no, pun, lun, NCR_700_fatal_messages[i]);
+ if(dsps == A_FATAL_ILLEGAL_MSG_LENGTH) {
+ printk(KERN_ERR " msg begins %02x %02x\n",
+ hostdata->msgin[0], hostdata->msgin[1]);
+ }
+ NCR_700_internal_bus_reset(host);
+ } else if((dsps & 0xfffff0f0) == A_DISCONNECT) {
+#ifdef NCR_700_DEBUG
+ __u8 i = (dsps & 0xf00) >> 8;
+
+ printk("scsi%d: (%d:%d), DISCONNECTED (%d) %s\n",
+ host->host_no, pun, lun,
+ i, NCR_700_phase[i]);
+#endif
+ save_for_reselection(hostdata, SCp, dsp);
+
+ } else if(dsps == A_RESELECTION_IDENTIFIED) {
+ __u8 lun;
+ struct NCR_700_command_slot *slot;
+ __u8 reselection_id = hostdata->reselection_id;
+ struct scsi_device *SDp;
+
+ lun = hostdata->msgin[0] & 0x1f;
+
+ hostdata->reselection_id = 0xff;
+ DEBUG(("scsi%d: (%d:%d) RESELECTED!\n",
+ host->host_no, reselection_id, lun));
+ /* clear the reselection indicator */
+ SDp = __scsi_device_lookup(host, 0, reselection_id, lun);
+ if(unlikely(SDp == NULL)) {
+ printk(KERN_ERR "scsi%d: (%d:%d) HAS NO device\n",
+ host->host_no, reselection_id, lun);
+ BUG();
+ }
+ if(hostdata->msgin[1] == A_SIMPLE_TAG_MSG) {
+ struct scsi_cmnd *SCp = scsi_find_tag(SDp, hostdata->msgin[2]);
+ if(unlikely(SCp == NULL)) {
+ printk(KERN_ERR "scsi%d: (%d:%d) no saved request for tag %d\n",
+ host->host_no, reselection_id, lun, hostdata->msgin[2]);
+ BUG();
+ }
+
+ slot = (struct NCR_700_command_slot *)SCp->host_scribble;
+ DDEBUG(KERN_DEBUG, SDp,
+ "reselection is tag %d, slot %p(%d)\n",
+ hostdata->msgin[2], slot, slot->tag);
+ } else {
+ struct scsi_cmnd *SCp = scsi_find_tag(SDp, SCSI_NO_TAG);
+ if(unlikely(SCp == NULL)) {
+ sdev_printk(KERN_ERR, SDp,
+ "no saved request for untagged cmd\n");
+ BUG();
+ }
+ slot = (struct NCR_700_command_slot *)SCp->host_scribble;
+ }
+
+ if(slot == NULL) {
+ printk(KERN_ERR "scsi%d: (%d:%d) RESELECTED but no saved command (MSG = %02x %02x %02x)!!\n",
+ host->host_no, reselection_id, lun,
+ hostdata->msgin[0], hostdata->msgin[1],
+ hostdata->msgin[2]);
+ } else {
+ if(hostdata->state != NCR_700_HOST_BUSY)
+ printk(KERN_ERR "scsi%d: FATAL, host not busy during valid reselection!\n",
+ host->host_no);
+ resume_offset = slot->resume_offset;
+ hostdata->cmd = slot->cmnd;
+
+ /* re-patch for this command */
+ script_patch_32_abs(hostdata->dev, hostdata->script,
+ CommandAddress, slot->pCmd);
+ script_patch_16(hostdata->dev, hostdata->script,
+ CommandCount, slot->cmnd->cmd_len);
+ script_patch_32_abs(hostdata->dev, hostdata->script,
+ SGScriptStartAddress,
+ to32bit(&slot->pSG[0].ins));
+
+ /* Note: setting SXFER only works if we're
+ * still in the MESSAGE phase, so it is vital
+ * that ACK is still asserted when we process
+ * the reselection message. The resume offset
+ * should therefore always clear ACK */
+ NCR_700_writeb(NCR_700_get_SXFER(hostdata->cmd->device),
+ host, SXFER_REG);
+ dma_cache_sync(hostdata->dev, hostdata->msgin,
+ MSG_ARRAY_SIZE, DMA_FROM_DEVICE);
+ dma_cache_sync(hostdata->dev, hostdata->msgout,
+ MSG_ARRAY_SIZE, DMA_TO_DEVICE);
+ /* I'm just being paranoid here, the command should
+ * already have been flushed from the cache */
+ dma_cache_sync(hostdata->dev, slot->cmnd->cmnd,
+ slot->cmnd->cmd_len, DMA_TO_DEVICE);
+
+
+
+ }
+ } else if(dsps == A_RESELECTED_DURING_SELECTION) {
+
+ /* This section is full of debugging code because I've
+ * never managed to reach it. I think what happens is
+ * that, because the 700 runs with selection
+ * interrupts enabled the whole time that we take a
+ * selection interrupt before we manage to get to the
+ * reselected script interrupt */
+
+ __u8 reselection_id = NCR_700_readb(host, SFBR_REG);
+ struct NCR_700_command_slot *slot;
+
+ /* Take out our own ID */
+ reselection_id &= ~(1<<host->this_id);
+
+ /* I've never seen this happen, so keep this as a printk rather
+ * than a debug */
+ printk(KERN_INFO "scsi%d: (%d:%d) RESELECTION DURING SELECTION, dsp=%08x[%04x] state=%d, count=%d\n",
+ host->host_no, reselection_id, lun, dsp, dsp - hostdata->pScript, hostdata->state, hostdata->command_slot_count);
+
+ {
+ /* FIXME: DEBUGGING CODE */
+ __u32 SG = (__u32)bS_to_cpu(hostdata->script[A_SGScriptStartAddress_used[0]]);
+ int i;
+
+ for(i=0; i< NCR_700_COMMAND_SLOTS_PER_HOST; i++) {
+ if(SG >= to32bit(&hostdata->slots[i].pSG[0])
+ && SG <= to32bit(&hostdata->slots[i].pSG[NCR_700_SG_SEGMENTS]))
+ break;
+ }
+ printk(KERN_INFO "IDENTIFIED SG segment as being %08x in slot %p, cmd %p, slot->resume_offset=%08x\n", SG, &hostdata->slots[i], hostdata->slots[i].cmnd, hostdata->slots[i].resume_offset);
+ SCp = hostdata->slots[i].cmnd;
+ }
+
+ if(SCp != NULL) {
+ slot = (struct NCR_700_command_slot *)SCp->host_scribble;
+ /* change slot from busy to queued to redo command */
+ slot->state = NCR_700_SLOT_QUEUED;
+ }
+ hostdata->cmd = NULL;
+
+ if(reselection_id == 0) {
+ if(hostdata->reselection_id == 0xff) {
+ printk(KERN_ERR "scsi%d: Invalid reselection during selection!!\n", host->host_no);
+ return 0;
+ } else {
+ printk(KERN_ERR "scsi%d: script reselected and we took a selection interrupt\n",
+ host->host_no);
+ reselection_id = hostdata->reselection_id;
+ }
+ } else {
+
+ /* convert to real ID */
+ reselection_id = bitmap_to_number(reselection_id);
+ }
+ hostdata->reselection_id = reselection_id;
+ /* just in case we have a stale simple tag message, clear it */
+ hostdata->msgin[1] = 0;
+ dma_cache_sync(hostdata->dev, hostdata->msgin,
+ MSG_ARRAY_SIZE, DMA_BIDIRECTIONAL);
+ if(hostdata->tag_negotiated & (1<<reselection_id)) {
+ resume_offset = hostdata->pScript + Ent_GetReselectionWithTag;
+ } else {
+ resume_offset = hostdata->pScript + Ent_GetReselectionData;
+ }
+ } else if(dsps == A_COMPLETED_SELECTION_AS_TARGET) {
+ /* we've just disconnected from the bus, do nothing since
+ * a return here will re-run the queued command slot
+ * that may have been interrupted by the initial selection */
+ DEBUG((" SELECTION COMPLETED\n"));
+ } else if((dsps & 0xfffff0f0) == A_MSG_IN) {
+ resume_offset = process_message(host, hostdata, SCp,
+ dsp, dsps);
+ } else if((dsps & 0xfffff000) == 0) {
+ __u8 i = (dsps & 0xf0) >> 4, j = (dsps & 0xf00) >> 8;
+ printk(KERN_ERR "scsi%d: (%d:%d), unhandled script condition %s %s at %04x\n",
+ host->host_no, pun, lun, NCR_700_condition[i],
+ NCR_700_phase[j], dsp - hostdata->pScript);
+ if(SCp != NULL) {
+ struct scatterlist *sg;
+
+ scsi_print_command(SCp);
+ scsi_for_each_sg(SCp, sg, scsi_sg_count(SCp) + 1, i) {
+ printk(KERN_INFO " SG[%d].length = %d, move_insn=%08x, addr %08x\n", i, sg->length, ((struct NCR_700_command_slot *)SCp->host_scribble)->SG[i].ins, ((struct NCR_700_command_slot *)SCp->host_scribble)->SG[i].pAddr);
+ }
+ }
+ NCR_700_internal_bus_reset(host);
+ } else if((dsps & 0xfffff000) == A_DEBUG_INTERRUPT) {
+ printk(KERN_NOTICE "scsi%d (%d:%d) DEBUG INTERRUPT %d AT %08x[%04x], continuing\n",
+ host->host_no, pun, lun, dsps & 0xfff, dsp, dsp - hostdata->pScript);
+ resume_offset = dsp;
+ } else {
+ printk(KERN_ERR "scsi%d: (%d:%d), unidentified script interrupt 0x%x at %04x\n",
+ host->host_no, pun, lun, dsps, dsp - hostdata->pScript);
+ NCR_700_internal_bus_reset(host);
+ }
+ return resume_offset;
+}
+
+/* We run the 53c700 with selection interrupts always enabled. This
+ * means that the chip may be selected as soon as the bus frees. On a
+ * busy bus, this can be before the scripts engine finishes its
+ * processing. Therefore, part of the selection processing has to be
+ * to find out what the scripts engine is doing and complete the
+ * function if necessary (i.e. process the pending disconnect or save
+ * the interrupted initial selection */
+STATIC inline __u32
+process_selection(struct Scsi_Host *host, __u32 dsp)
+{
+ __u8 id = 0; /* Squash compiler warning */
+ int count = 0;
+ __u32 resume_offset = 0;
+ struct NCR_700_Host_Parameters *hostdata =
+ (struct NCR_700_Host_Parameters *)host->hostdata[0];
+ struct scsi_cmnd *SCp = hostdata->cmd;
+ __u8 sbcl;
+
+ for(count = 0; count < 5; count++) {
+ id = NCR_700_readb(host, hostdata->chip710 ?
+ CTEST9_REG : SFBR_REG);
+
+ /* Take out our own ID */
+ id &= ~(1<<host->this_id);
+ if(id != 0)
+ break;
+ udelay(5);
+ }
+ sbcl = NCR_700_readb(host, SBCL_REG);
+ if((sbcl & SBCL_IO) == 0) {
+ /* mark as having been selected rather than reselected */
+ id = 0xff;
+ } else {
+ /* convert to real ID */
+ hostdata->reselection_id = id = bitmap_to_number(id);
+ DEBUG(("scsi%d: Reselected by %d\n",
+ host->host_no, id));
+ }
+ if(hostdata->state == NCR_700_HOST_BUSY && SCp != NULL) {
+ struct NCR_700_command_slot *slot =
+ (struct NCR_700_command_slot *)SCp->host_scribble;
+ DEBUG((" ID %d WARNING: RESELECTION OF BUSY HOST, saving cmd %p, slot %p, addr %x [%04x], resume %x!\n", id, hostdata->cmd, slot, dsp, dsp - hostdata->pScript, resume_offset));
+
+ switch(dsp - hostdata->pScript) {
+ case Ent_Disconnect1:
+ case Ent_Disconnect2:
+ save_for_reselection(hostdata, SCp, Ent_Disconnect2 + hostdata->pScript);
+ break;
+ case Ent_Disconnect3:
+ case Ent_Disconnect4:
+ save_for_reselection(hostdata, SCp, Ent_Disconnect4 + hostdata->pScript);
+ break;
+ case Ent_Disconnect5:
+ case Ent_Disconnect6:
+ save_for_reselection(hostdata, SCp, Ent_Disconnect6 + hostdata->pScript);
+ break;
+ case Ent_Disconnect7:
+ case Ent_Disconnect8:
+ save_for_reselection(hostdata, SCp, Ent_Disconnect8 + hostdata->pScript);
+ break;
+ case Ent_Finish1:
+ case Ent_Finish2:
+ process_script_interrupt(A_GOOD_STATUS_AFTER_STATUS, dsp, SCp, host, hostdata);
+ break;
+
+ default:
+ slot->state = NCR_700_SLOT_QUEUED;
+ break;
+ }
+ }
+ hostdata->state = NCR_700_HOST_BUSY;
+ hostdata->cmd = NULL;
+ /* clear any stale simple tag message */
+ hostdata->msgin[1] = 0;
+ dma_cache_sync(hostdata->dev, hostdata->msgin, MSG_ARRAY_SIZE,
+ DMA_BIDIRECTIONAL);
+
+ if(id == 0xff) {
+ /* Selected as target, Ignore */
+ resume_offset = hostdata->pScript + Ent_SelectedAsTarget;
+ } else if(hostdata->tag_negotiated & (1<<id)) {
+ resume_offset = hostdata->pScript + Ent_GetReselectionWithTag;
+ } else {
+ resume_offset = hostdata->pScript + Ent_GetReselectionData;
+ }
+ return resume_offset;
+}
+
+static inline void
+NCR_700_clear_fifo(struct Scsi_Host *host) {
+ const struct NCR_700_Host_Parameters *hostdata
+ = (struct NCR_700_Host_Parameters *)host->hostdata[0];
+ if(hostdata->chip710) {
+ NCR_700_writeb(CLR_FIFO_710, host, CTEST8_REG);
+ } else {
+ NCR_700_writeb(CLR_FIFO, host, DFIFO_REG);
+ }
+}
+
+static inline void
+NCR_700_flush_fifo(struct Scsi_Host *host) {
+ const struct NCR_700_Host_Parameters *hostdata
+ = (struct NCR_700_Host_Parameters *)host->hostdata[0];
+ if(hostdata->chip710) {
+ NCR_700_writeb(FLUSH_DMA_FIFO_710, host, CTEST8_REG);
+ udelay(10);
+ NCR_700_writeb(0, host, CTEST8_REG);
+ } else {
+ NCR_700_writeb(FLUSH_DMA_FIFO, host, DFIFO_REG);
+ udelay(10);
+ NCR_700_writeb(0, host, DFIFO_REG);
+ }
+}
+
+
+/* The queue lock with interrupts disabled must be held on entry to
+ * this function */
+STATIC int
+NCR_700_start_command(struct scsi_cmnd *SCp)
+{
+ struct NCR_700_command_slot *slot =
+ (struct NCR_700_command_slot *)SCp->host_scribble;
+ struct NCR_700_Host_Parameters *hostdata =
+ (struct NCR_700_Host_Parameters *)SCp->device->host->hostdata[0];
+ __u16 count = 1; /* for IDENTIFY message */
+ u8 lun = SCp->device->lun;
+
+ if(hostdata->state != NCR_700_HOST_FREE) {
+ /* keep this inside the lock to close the race window where
+ * the running command finishes on another CPU while we don't
+ * change the state to queued on this one */
+ slot->state = NCR_700_SLOT_QUEUED;
+
+ DEBUG(("scsi%d: host busy, queueing command %p, slot %p\n",
+ SCp->device->host->host_no, slot->cmnd, slot));
+ return 0;
+ }
+ hostdata->state = NCR_700_HOST_BUSY;
+ hostdata->cmd = SCp;
+ slot->state = NCR_700_SLOT_BUSY;
+ /* keep interrupts disabled until we have the command correctly
+ * set up so we cannot take a selection interrupt */
+
+ hostdata->msgout[0] = NCR_700_identify((SCp->cmnd[0] != REQUEST_SENSE &&
+ slot->flags != NCR_700_FLAG_AUTOSENSE),
+ lun);
+ /* for INQUIRY or REQUEST_SENSE commands, we cannot be sure
+ * if the negotiated transfer parameters still hold, so
+ * always renegotiate them */
+ if(SCp->cmnd[0] == INQUIRY || SCp->cmnd[0] == REQUEST_SENSE ||
+ slot->flags == NCR_700_FLAG_AUTOSENSE) {
+ NCR_700_clear_flag(SCp->device, NCR_700_DEV_NEGOTIATED_SYNC);
+ }
+
+ /* REQUEST_SENSE is asking for contingent I_T_L(_Q) status.
+ * If a contingent allegiance condition exists, the device
+ * will refuse all tags, so send the request sense as untagged
+ * */
+ if((hostdata->tag_negotiated & (1<<scmd_id(SCp)))
+ && (slot->tag != SCSI_NO_TAG && SCp->cmnd[0] != REQUEST_SENSE &&
+ slot->flags != NCR_700_FLAG_AUTOSENSE)) {
+ count += spi_populate_tag_msg(&hostdata->msgout[count], SCp);
+ }
+
+ if(hostdata->fast &&
+ NCR_700_is_flag_clear(SCp->device, NCR_700_DEV_NEGOTIATED_SYNC)) {
+ count += spi_populate_sync_msg(&hostdata->msgout[count],
+ spi_period(SCp->device->sdev_target),
+ spi_offset(SCp->device->sdev_target));
+ NCR_700_set_flag(SCp->device, NCR_700_DEV_BEGIN_SYNC_NEGOTIATION);
+ }
+
+ script_patch_16(hostdata->dev, hostdata->script, MessageCount, count);
+
+
+ script_patch_ID(hostdata->dev, hostdata->script,
+ Device_ID, 1<<scmd_id(SCp));
+
+ script_patch_32_abs(hostdata->dev, hostdata->script, CommandAddress,
+ slot->pCmd);
+ script_patch_16(hostdata->dev, hostdata->script, CommandCount,
+ SCp->cmd_len);
+ /* finally plumb the beginning of the SG list into the script
+ * */
+ script_patch_32_abs(hostdata->dev, hostdata->script,
+ SGScriptStartAddress, to32bit(&slot->pSG[0].ins));
+ NCR_700_clear_fifo(SCp->device->host);
+
+ if(slot->resume_offset == 0)
+ slot->resume_offset = hostdata->pScript;
+ /* now perform all the writebacks and invalidates */
+ dma_cache_sync(hostdata->dev, hostdata->msgout, count, DMA_TO_DEVICE);
+ dma_cache_sync(hostdata->dev, hostdata->msgin, MSG_ARRAY_SIZE,
+ DMA_FROM_DEVICE);
+ dma_cache_sync(hostdata->dev, SCp->cmnd, SCp->cmd_len, DMA_TO_DEVICE);
+ dma_cache_sync(hostdata->dev, hostdata->status, 1, DMA_FROM_DEVICE);
+
+ /* set the synchronous period/offset */
+ NCR_700_writeb(NCR_700_get_SXFER(SCp->device),
+ SCp->device->host, SXFER_REG);
+ NCR_700_writel(slot->temp, SCp->device->host, TEMP_REG);
+ NCR_700_writel(slot->resume_offset, SCp->device->host, DSP_REG);
+
+ return 1;
+}
+
+irqreturn_t
+NCR_700_intr(int irq, void *dev_id)
+{
+ struct Scsi_Host *host = (struct Scsi_Host *)dev_id;
+ struct NCR_700_Host_Parameters *hostdata =
+ (struct NCR_700_Host_Parameters *)host->hostdata[0];
+ __u8 istat;
+ __u32 resume_offset = 0;
+ __u8 pun = 0xff, lun = 0xff;
+ unsigned long flags;
+ int handled = 0;
+
+ /* Use the host lock to serialise access to the 53c700
+ * hardware. Note: In future, we may need to take the queue
+ * lock to enter the done routines. When that happens, we
+ * need to ensure that for this driver, the host lock and the
+ * queue lock point to the same thing. */
+ spin_lock_irqsave(host->host_lock, flags);
+ if((istat = NCR_700_readb(host, ISTAT_REG))
+ & (SCSI_INT_PENDING | DMA_INT_PENDING)) {
+ __u32 dsps;
+ __u8 sstat0 = 0, dstat = 0;
+ __u32 dsp;
+ struct scsi_cmnd *SCp = hostdata->cmd;
+ enum NCR_700_Host_State state;
+
+ handled = 1;
+ state = hostdata->state;
+ SCp = hostdata->cmd;
+
+ if(istat & SCSI_INT_PENDING) {
+ udelay(10);
+
+ sstat0 = NCR_700_readb(host, SSTAT0_REG);
+ }
+
+ if(istat & DMA_INT_PENDING) {
+ udelay(10);
+
+ dstat = NCR_700_readb(host, DSTAT_REG);
+ }
+
+ dsps = NCR_700_readl(host, DSPS_REG);
+ dsp = NCR_700_readl(host, DSP_REG);
+
+ DEBUG(("scsi%d: istat %02x sstat0 %02x dstat %02x dsp %04x[%08x] dsps 0x%x\n",
+ host->host_no, istat, sstat0, dstat,
+ (dsp - (__u32)(hostdata->pScript))/4,
+ dsp, dsps));
+
+ if(SCp != NULL) {
+ pun = SCp->device->id;
+ lun = SCp->device->lun;
+ }
+
+ if(sstat0 & SCSI_RESET_DETECTED) {
+ struct scsi_device *SDp;
+ int i;
+
+ hostdata->state = NCR_700_HOST_BUSY;
+
+ printk(KERN_ERR "scsi%d: Bus Reset detected, executing command %p, slot %p, dsp %08x[%04x]\n",
+ host->host_no, SCp, SCp == NULL ? NULL : SCp->host_scribble, dsp, dsp - hostdata->pScript);
+
+ scsi_report_bus_reset(host, 0);
+
+ /* clear all the negotiated parameters */
+ __shost_for_each_device(SDp, host)
+ NCR_700_clear_flag(SDp, ~0);
+
+ /* clear all the slots and their pending commands */
+ for(i = 0; i < NCR_700_COMMAND_SLOTS_PER_HOST; i++) {
+ struct scsi_cmnd *SCp;
+ struct NCR_700_command_slot *slot =
+ &hostdata->slots[i];
+
+ if(slot->state == NCR_700_SLOT_FREE)
+ continue;
+
+ SCp = slot->cmnd;
+ printk(KERN_ERR " failing command because of reset, slot %p, cmnd %p\n",
+ slot, SCp);
+ free_slot(slot, hostdata);
+ SCp->host_scribble = NULL;
+ NCR_700_set_depth(SCp->device, 0);
+ /* NOTE: deadlock potential here: we
+ * rely on mid-layer guarantees that
+ * scsi_done won't try to issue the
+ * command again otherwise we'll
+ * deadlock on the
+ * hostdata->state_lock */
+ SCp->result = DID_RESET << 16;
+ SCp->scsi_done(SCp);
+ }
+ mdelay(25);
+ NCR_700_chip_setup(host);
+
+ hostdata->state = NCR_700_HOST_FREE;
+ hostdata->cmd = NULL;
+ /* signal back if this was an eh induced reset */
+ if(hostdata->eh_complete != NULL)
+ complete(hostdata->eh_complete);
+ goto out_unlock;
+ } else if(sstat0 & SELECTION_TIMEOUT) {
+ DEBUG(("scsi%d: (%d:%d) selection timeout\n",
+ host->host_no, pun, lun));
+ NCR_700_scsi_done(hostdata, SCp, DID_NO_CONNECT<<16);
+ } else if(sstat0 & PHASE_MISMATCH) {
+ struct NCR_700_command_slot *slot = (SCp == NULL) ? NULL :
+ (struct NCR_700_command_slot *)SCp->host_scribble;
+
+ if(dsp == Ent_SendMessage + 8 + hostdata->pScript) {
+ /* It wants to reply to some part of
+ * our message */
+#ifdef NCR_700_DEBUG
+ __u32 temp = NCR_700_readl(host, TEMP_REG);
+ int count = (hostdata->script[Ent_SendMessage/4] & 0xffffff) - ((NCR_700_readl(host, DBC_REG) & 0xffffff) + NCR_700_data_residual(host));
+ printk("scsi%d (%d:%d) PHASE MISMATCH IN SEND MESSAGE %d remain, return %p[%04x], phase %s\n", host->host_no, pun, lun, count, (void *)temp, temp - hostdata->pScript, sbcl_to_string(NCR_700_readb(host, SBCL_REG)));
+#endif
+ resume_offset = hostdata->pScript + Ent_SendMessagePhaseMismatch;
+ } else if(dsp >= to32bit(&slot->pSG[0].ins) &&
+ dsp <= to32bit(&slot->pSG[NCR_700_SG_SEGMENTS].ins)) {
+ int data_transfer = NCR_700_readl(host, DBC_REG) & 0xffffff;
+ int SGcount = (dsp - to32bit(&slot->pSG[0].ins))/sizeof(struct NCR_700_SG_List);
+ int residual = NCR_700_data_residual(host);
+ int i;
+#ifdef NCR_700_DEBUG
+ __u32 naddr = NCR_700_readl(host, DNAD_REG);
+
+ printk("scsi%d: (%d:%d) Expected phase mismatch in slot->SG[%d], transferred 0x%x\n",
+ host->host_no, pun, lun,
+ SGcount, data_transfer);
+ scsi_print_command(SCp);
+ if(residual) {
+ printk("scsi%d: (%d:%d) Expected phase mismatch in slot->SG[%d], transferred 0x%x, residual %d\n",
+ host->host_no, pun, lun,
+ SGcount, data_transfer, residual);
+ }
+#endif
+ data_transfer += residual;
+
+ if(data_transfer != 0) {
+ int count;
+ __u32 pAddr;
+
+ SGcount--;
+
+ count = (bS_to_cpu(slot->SG[SGcount].ins) & 0x00ffffff);
+ DEBUG(("DATA TRANSFER MISMATCH, count = %d, transferred %d\n", count, count-data_transfer));
+ slot->SG[SGcount].ins &= bS_to_host(0xff000000);
+ slot->SG[SGcount].ins |= bS_to_host(data_transfer);
+ pAddr = bS_to_cpu(slot->SG[SGcount].pAddr);
+ pAddr += (count - data_transfer);
+#ifdef NCR_700_DEBUG
+ if(pAddr != naddr) {
+ printk("scsi%d (%d:%d) transfer mismatch pAddr=%lx, naddr=%lx, data_transfer=%d, residual=%d\n", host->host_no, pun, lun, (unsigned long)pAddr, (unsigned long)naddr, data_transfer, residual);
+ }
+#endif
+ slot->SG[SGcount].pAddr = bS_to_host(pAddr);
+ }
+ /* set the executed moves to nops */
+ for(i=0; i<SGcount; i++) {
+ slot->SG[i].ins = bS_to_host(SCRIPT_NOP);
+ slot->SG[i].pAddr = 0;
+ }
+ dma_cache_sync(hostdata->dev, slot->SG, sizeof(slot->SG), DMA_TO_DEVICE);
+ /* and pretend we disconnected after
+ * the command phase */
+ resume_offset = hostdata->pScript + Ent_MsgInDuringData;
+ /* make sure all the data is flushed */
+ NCR_700_flush_fifo(host);
+ } else {
+ __u8 sbcl = NCR_700_readb(host, SBCL_REG);
+ printk(KERN_ERR "scsi%d: (%d:%d) phase mismatch at %04x, phase %s\n",
+ host->host_no, pun, lun, dsp - hostdata->pScript, sbcl_to_string(sbcl));
+ NCR_700_internal_bus_reset(host);
+ }
+
+ } else if(sstat0 & SCSI_GROSS_ERROR) {
+ printk(KERN_ERR "scsi%d: (%d:%d) GROSS ERROR\n",
+ host->host_no, pun, lun);
+ NCR_700_scsi_done(hostdata, SCp, DID_ERROR<<16);
+ } else if(sstat0 & PARITY_ERROR) {
+ printk(KERN_ERR "scsi%d: (%d:%d) PARITY ERROR\n",
+ host->host_no, pun, lun);
+ NCR_700_scsi_done(hostdata, SCp, DID_ERROR<<16);
+ } else if(dstat & SCRIPT_INT_RECEIVED) {
+ DEBUG(("scsi%d: (%d:%d) ====>SCRIPT INTERRUPT<====\n",
+ host->host_no, pun, lun));
+ resume_offset = process_script_interrupt(dsps, dsp, SCp, host, hostdata);
+ } else if(dstat & (ILGL_INST_DETECTED)) {
+ printk(KERN_ERR "scsi%d: (%d:%d) Illegal Instruction detected at 0x%08x[0x%x]!!!\n"
+ " Please email James.Bottomley@HansenPartnership.com with the details\n",
+ host->host_no, pun, lun,
+ dsp, dsp - hostdata->pScript);
+ NCR_700_scsi_done(hostdata, SCp, DID_ERROR<<16);
+ } else if(dstat & (WATCH_DOG_INTERRUPT|ABORTED)) {
+ printk(KERN_ERR "scsi%d: (%d:%d) serious DMA problem, dstat=%02x\n",
+ host->host_no, pun, lun, dstat);
+ NCR_700_scsi_done(hostdata, SCp, DID_ERROR<<16);
+ }
+
+
+ /* NOTE: selection interrupt processing MUST occur
+ * after script interrupt processing to correctly cope
+ * with the case where we process a disconnect and
+ * then get reselected before we process the
+ * disconnection */
+ if(sstat0 & SELECTED) {
+ /* FIXME: It currently takes at least FOUR
+ * interrupts to complete a command that
+ * disconnects: one for the disconnect, one
+ * for the reselection, one to get the
+ * reselection data and one to complete the
+ * command. If we guess the reselected
+ * command here and prepare it, we only need
+ * to get a reselection data interrupt if we
+ * guessed wrongly. Since the interrupt
+ * overhead is much greater than the command
+ * setup, this would be an efficient
+ * optimisation particularly as we probably
+ * only have one outstanding command on a
+ * target most of the time */
+
+ resume_offset = process_selection(host, dsp);
+
+ }
+
+ }
+
+ if(resume_offset) {
+ if(hostdata->state != NCR_700_HOST_BUSY) {
+ printk(KERN_ERR "scsi%d: Driver error: resume at 0x%08x [0x%04x] with non busy host!\n",
+ host->host_no, resume_offset, resume_offset - hostdata->pScript);
+ hostdata->state = NCR_700_HOST_BUSY;
+ }
+
+ DEBUG(("Attempting to resume at %x\n", resume_offset));
+ NCR_700_clear_fifo(host);
+ NCR_700_writel(resume_offset, host, DSP_REG);
+ }
+ /* There is probably a technical no-no about this: If we're a
+ * shared interrupt and we got this interrupt because the
+ * other device needs servicing not us, we're still going to
+ * check our queued commands here---of course, there shouldn't
+ * be any outstanding.... */
+ if(hostdata->state == NCR_700_HOST_FREE) {
+ int i;
+
+ for(i = 0; i < NCR_700_COMMAND_SLOTS_PER_HOST; i++) {
+ /* fairness: always run the queue from the last
+ * position we left off */
+ int j = (i + hostdata->saved_slot_position)
+ % NCR_700_COMMAND_SLOTS_PER_HOST;
+
+ if(hostdata->slots[j].state != NCR_700_SLOT_QUEUED)
+ continue;
+ if(NCR_700_start_command(hostdata->slots[j].cmnd)) {
+ DEBUG(("scsi%d: Issuing saved command slot %p, cmd %p\t\n",
+ host->host_no, &hostdata->slots[j],
+ hostdata->slots[j].cmnd));
+ hostdata->saved_slot_position = j + 1;
+ }
+
+ break;
+ }
+ }
+ out_unlock:
+ spin_unlock_irqrestore(host->host_lock, flags);
+ return IRQ_RETVAL(handled);
+}
+
+static int
+NCR_700_queuecommand_lck(struct scsi_cmnd *SCp, void (*done)(struct scsi_cmnd *))
+{
+ struct NCR_700_Host_Parameters *hostdata =
+ (struct NCR_700_Host_Parameters *)SCp->device->host->hostdata[0];
+ __u32 move_ins;
+ enum dma_data_direction direction;
+ struct NCR_700_command_slot *slot;
+
+ if(hostdata->command_slot_count >= NCR_700_COMMAND_SLOTS_PER_HOST) {
+ /* We're over our allocation, this should never happen
+ * since we report the max allocation to the mid layer */
+ printk(KERN_WARNING "scsi%d: Command depth has gone over queue depth\n", SCp->device->host->host_no);
+ return 1;
+ }
+ /* check for untagged commands. We cannot have any outstanding
+ * commands if we accept them. Commands could be untagged because:
+ *
+ * - The tag negotiated bitmap is clear
+ * - The blk layer sent and untagged command
+ */
+ if(NCR_700_get_depth(SCp->device) != 0
+ && (!(hostdata->tag_negotiated & (1<<scmd_id(SCp)))
+ || !(SCp->flags & SCMD_TAGGED))) {
+ CDEBUG(KERN_ERR, SCp, "has non zero depth %d\n",
+ NCR_700_get_depth(SCp->device));
+ return SCSI_MLQUEUE_DEVICE_BUSY;
+ }
+ if(NCR_700_get_depth(SCp->device) >= SCp->device->queue_depth) {
+ CDEBUG(KERN_ERR, SCp, "has max tag depth %d\n",
+ NCR_700_get_depth(SCp->device));
+ return SCSI_MLQUEUE_DEVICE_BUSY;
+ }
+ NCR_700_set_depth(SCp->device, NCR_700_get_depth(SCp->device) + 1);
+
+ /* begin the command here */
+ /* no need to check for NULL, test for command_slot_count above
+ * ensures a slot is free */
+ slot = find_empty_slot(hostdata);
+
+ slot->cmnd = SCp;
+
+ SCp->scsi_done = done;
+ SCp->host_scribble = (unsigned char *)slot;
+ SCp->SCp.ptr = NULL;
+ SCp->SCp.buffer = NULL;
+
+#ifdef NCR_700_DEBUG
+ printk("53c700: scsi%d, command ", SCp->device->host->host_no);
+ scsi_print_command(SCp);
+#endif
+ if ((SCp->flags & SCMD_TAGGED)
+ && (hostdata->tag_negotiated &(1<<scmd_id(SCp))) == 0
+ && NCR_700_get_tag_neg_state(SCp->device) == NCR_700_START_TAG_NEGOTIATION) {
+ scmd_printk(KERN_ERR, SCp, "Enabling Tag Command Queuing\n");
+ hostdata->tag_negotiated |= (1<<scmd_id(SCp));
+ NCR_700_set_tag_neg_state(SCp->device, NCR_700_DURING_TAG_NEGOTIATION);
+ }
+
+ /* here we may have to process an untagged command. The gate
+ * above ensures that this will be the only one outstanding,
+ * so clear the tag negotiated bit.
+ *
+ * FIXME: This will royally screw up on multiple LUN devices
+ * */
+ if (!(SCp->flags & SCMD_TAGGED)
+ && (hostdata->tag_negotiated &(1<<scmd_id(SCp)))) {
+ scmd_printk(KERN_INFO, SCp, "Disabling Tag Command Queuing\n");
+ hostdata->tag_negotiated &= ~(1<<scmd_id(SCp));
+ }
+
+ if ((hostdata->tag_negotiated & (1<<scmd_id(SCp))) &&
+ SCp->device->simple_tags) {
+ slot->tag = SCp->request->tag;
+ CDEBUG(KERN_DEBUG, SCp, "sending out tag %d, slot %p\n",
+ slot->tag, slot);
+ } else {
+ slot->tag = SCSI_NO_TAG;
+ /* must populate current_cmnd for scsi_find_tag to work */
+ SCp->device->current_cmnd = SCp;
+ }
+ /* sanity check: some of the commands generated by the mid-layer
+ * have an eccentric idea of their sc_data_direction */
+ if(!scsi_sg_count(SCp) && !scsi_bufflen(SCp) &&
+ SCp->sc_data_direction != DMA_NONE) {
+#ifdef NCR_700_DEBUG
+ printk("53c700: Command");
+ scsi_print_command(SCp);
+ printk("Has wrong data direction %d\n", SCp->sc_data_direction);
+#endif
+ SCp->sc_data_direction = DMA_NONE;
+ }
+
+ switch (SCp->cmnd[0]) {
+ case REQUEST_SENSE:
+ /* clear the internal sense magic */
+ SCp->cmnd[6] = 0;
+ /* fall through */
+ default:
+ /* OK, get it from the command */
+ switch(SCp->sc_data_direction) {
+ case DMA_BIDIRECTIONAL:
+ default:
+ printk(KERN_ERR "53c700: Unknown command for data direction ");
+ scsi_print_command(SCp);
+
+ move_ins = 0;
+ break;
+ case DMA_NONE:
+ move_ins = 0;
+ break;
+ case DMA_FROM_DEVICE:
+ move_ins = SCRIPT_MOVE_DATA_IN;
+ break;
+ case DMA_TO_DEVICE:
+ move_ins = SCRIPT_MOVE_DATA_OUT;
+ break;
+ }
+ }
+
+ /* now build the scatter gather list */
+ direction = SCp->sc_data_direction;
+ if(move_ins != 0) {
+ int i;
+ int sg_count;
+ dma_addr_t vPtr = 0;
+ struct scatterlist *sg;
+ __u32 count = 0;
+
+ sg_count = scsi_dma_map(SCp);
+ BUG_ON(sg_count < 0);
+
+ scsi_for_each_sg(SCp, sg, sg_count, i) {
+ vPtr = sg_dma_address(sg);
+ count = sg_dma_len(sg);
+
+ slot->SG[i].ins = bS_to_host(move_ins | count);
+ DEBUG((" scatter block %d: move %d[%08x] from 0x%lx\n",
+ i, count, slot->SG[i].ins, (unsigned long)vPtr));
+ slot->SG[i].pAddr = bS_to_host(vPtr);
+ }
+ slot->SG[i].ins = bS_to_host(SCRIPT_RETURN);
+ slot->SG[i].pAddr = 0;
+ dma_cache_sync(hostdata->dev, slot->SG, sizeof(slot->SG), DMA_TO_DEVICE);
+ DEBUG((" SETTING %08lx to %x\n",
+ (&slot->pSG[i].ins),
+ slot->SG[i].ins));
+ }
+ slot->resume_offset = 0;
+ slot->pCmd = dma_map_single(hostdata->dev, SCp->cmnd,
+ MAX_COMMAND_SIZE, DMA_TO_DEVICE);
+ NCR_700_start_command(SCp);
+ return 0;
+}
+
+STATIC DEF_SCSI_QCMD(NCR_700_queuecommand)
+
+STATIC int
+NCR_700_abort(struct scsi_cmnd * SCp)
+{
+ struct NCR_700_command_slot *slot;
+
+ scmd_printk(KERN_INFO, SCp, "abort command\n");
+
+ slot = (struct NCR_700_command_slot *)SCp->host_scribble;
+
+ if(slot == NULL)
+ /* no outstanding command to abort */
+ return SUCCESS;
+ if(SCp->cmnd[0] == TEST_UNIT_READY) {
+ /* FIXME: This is because of a problem in the new
+ * error handler. When it is in error recovery, it
+ * will send a TUR to a device it thinks may still be
+ * showing a problem. If the TUR isn't responded to,
+ * it will abort it and mark the device off line.
+ * Unfortunately, it does no other error recovery, so
+ * this would leave us with an outstanding command
+ * occupying a slot. Rather than allow this to
+ * happen, we issue a bus reset to force all
+ * outstanding commands to terminate here. */
+ NCR_700_internal_bus_reset(SCp->device->host);
+ /* still drop through and return failed */
+ }
+ return FAILED;
+
+}
+
+STATIC int
+NCR_700_bus_reset(struct scsi_cmnd * SCp)
+{
+ DECLARE_COMPLETION_ONSTACK(complete);
+ struct NCR_700_Host_Parameters *hostdata =
+ (struct NCR_700_Host_Parameters *)SCp->device->host->hostdata[0];
+
+ scmd_printk(KERN_INFO, SCp,
+ "New error handler wants BUS reset, cmd %p\n\t", SCp);
+ scsi_print_command(SCp);
+
+ /* In theory, eh_complete should always be null because the
+ * eh is single threaded, but just in case we're handling a
+ * reset via sg or something */
+ spin_lock_irq(SCp->device->host->host_lock);
+ while (hostdata->eh_complete != NULL) {
+ spin_unlock_irq(SCp->device->host->host_lock);
+ msleep_interruptible(100);
+ spin_lock_irq(SCp->device->host->host_lock);
+ }
+
+ hostdata->eh_complete = &complete;
+ NCR_700_internal_bus_reset(SCp->device->host);
+
+ spin_unlock_irq(SCp->device->host->host_lock);
+ wait_for_completion(&complete);
+ spin_lock_irq(SCp->device->host->host_lock);
+
+ hostdata->eh_complete = NULL;
+ /* Revalidate the transport parameters of the failing device */
+ if(hostdata->fast)
+ spi_schedule_dv_device(SCp->device);
+
+ spin_unlock_irq(SCp->device->host->host_lock);
+ return SUCCESS;
+}
+
+STATIC int
+NCR_700_host_reset(struct scsi_cmnd * SCp)
+{
+ scmd_printk(KERN_INFO, SCp, "New error handler wants HOST reset\n\t");
+ scsi_print_command(SCp);
+
+ spin_lock_irq(SCp->device->host->host_lock);
+
+ NCR_700_internal_bus_reset(SCp->device->host);
+ NCR_700_chip_reset(SCp->device->host);
+
+ spin_unlock_irq(SCp->device->host->host_lock);
+
+ return SUCCESS;
+}
+
+STATIC void
+NCR_700_set_period(struct scsi_target *STp, int period)
+{
+ struct Scsi_Host *SHp = dev_to_shost(STp->dev.parent);
+ struct NCR_700_Host_Parameters *hostdata =
+ (struct NCR_700_Host_Parameters *)SHp->hostdata[0];
+
+ if(!hostdata->fast)
+ return;
+
+ if(period < hostdata->min_period)
+ period = hostdata->min_period;
+
+ spi_period(STp) = period;
+ spi_flags(STp) &= ~(NCR_700_DEV_NEGOTIATED_SYNC |
+ NCR_700_DEV_BEGIN_SYNC_NEGOTIATION);
+ spi_flags(STp) |= NCR_700_DEV_PRINT_SYNC_NEGOTIATION;
+}
+
+STATIC void
+NCR_700_set_offset(struct scsi_target *STp, int offset)
+{
+ struct Scsi_Host *SHp = dev_to_shost(STp->dev.parent);
+ struct NCR_700_Host_Parameters *hostdata =
+ (struct NCR_700_Host_Parameters *)SHp->hostdata[0];
+ int max_offset = hostdata->chip710
+ ? NCR_710_MAX_OFFSET : NCR_700_MAX_OFFSET;
+
+ if(!hostdata->fast)
+ return;
+
+ if(offset > max_offset)
+ offset = max_offset;
+
+ /* if we're currently async, make sure the period is reasonable */
+ if(spi_offset(STp) == 0 && (spi_period(STp) < hostdata->min_period ||
+ spi_period(STp) > 0xff))
+ spi_period(STp) = hostdata->min_period;
+
+ spi_offset(STp) = offset;
+ spi_flags(STp) &= ~(NCR_700_DEV_NEGOTIATED_SYNC |
+ NCR_700_DEV_BEGIN_SYNC_NEGOTIATION);
+ spi_flags(STp) |= NCR_700_DEV_PRINT_SYNC_NEGOTIATION;
+}
+
+STATIC int
+NCR_700_slave_alloc(struct scsi_device *SDp)
+{
+ SDp->hostdata = kzalloc(sizeof(struct NCR_700_Device_Parameters),
+ GFP_KERNEL);
+
+ if (!SDp->hostdata)
+ return -ENOMEM;
+
+ return 0;
+}
+
+STATIC int
+NCR_700_slave_configure(struct scsi_device *SDp)
+{
+ struct NCR_700_Host_Parameters *hostdata =
+ (struct NCR_700_Host_Parameters *)SDp->host->hostdata[0];
+
+ /* to do here: allocate memory; build a queue_full list */
+ if(SDp->tagged_supported) {
+ scsi_change_queue_depth(SDp, NCR_700_DEFAULT_TAGS);
+ NCR_700_set_tag_neg_state(SDp, NCR_700_START_TAG_NEGOTIATION);
+ }
+
+ if(hostdata->fast) {
+ /* Find the correct offset and period via domain validation */
+ if (!spi_initial_dv(SDp->sdev_target))
+ spi_dv_device(SDp);
+ } else {
+ spi_offset(SDp->sdev_target) = 0;
+ spi_period(SDp->sdev_target) = 0;
+ }
+ return 0;
+}
+
+STATIC void
+NCR_700_slave_destroy(struct scsi_device *SDp)
+{
+ kfree(SDp->hostdata);
+ SDp->hostdata = NULL;
+}
+
+static int
+NCR_700_change_queue_depth(struct scsi_device *SDp, int depth)
+{
+ if (depth > NCR_700_MAX_TAGS)
+ depth = NCR_700_MAX_TAGS;
+ return scsi_change_queue_depth(SDp, depth);
+}
+
+static ssize_t
+NCR_700_show_active_tags(struct device *dev, struct device_attribute *attr, char *buf)
+{
+ struct scsi_device *SDp = to_scsi_device(dev);
+
+ return snprintf(buf, 20, "%d\n", NCR_700_get_depth(SDp));
+}
+
+static struct device_attribute NCR_700_active_tags_attr = {
+ .attr = {
+ .name = "active_tags",
+ .mode = S_IRUGO,
+ },
+ .show = NCR_700_show_active_tags,
+};
+
+STATIC struct device_attribute *NCR_700_dev_attrs[] = {
+ &NCR_700_active_tags_attr,
+ NULL,
+};
+
+EXPORT_SYMBOL(NCR_700_detect);
+EXPORT_SYMBOL(NCR_700_release);
+EXPORT_SYMBOL(NCR_700_intr);
+
+static struct spi_function_template NCR_700_transport_functions = {
+ .set_period = NCR_700_set_period,
+ .show_period = 1,
+ .set_offset = NCR_700_set_offset,
+ .show_offset = 1,
+};
+
+static int __init NCR_700_init(void)
+{
+ NCR_700_transport_template = spi_attach_transport(&NCR_700_transport_functions);
+ if(!NCR_700_transport_template)
+ return -ENODEV;
+ return 0;
+}
+
+static void __exit NCR_700_exit(void)
+{
+ spi_release_transport(NCR_700_transport_template);
+}
+
+module_init(NCR_700_init);
+module_exit(NCR_700_exit);
+
diff --git a/drivers/scsi/53c700.h b/drivers/scsi/53c700.h
new file mode 100644
index 000000000..e06bdfeab
--- /dev/null
+++ b/drivers/scsi/53c700.h
@@ -0,0 +1,524 @@
+/* -*- mode: c; c-basic-offset: 8 -*- */
+
+/* Driver for 53c700 and 53c700-66 chips from NCR and Symbios
+ *
+ * Copyright (C) 2001 by James.Bottomley@HansenPartnership.com
+ */
+
+#ifndef _53C700_H
+#define _53C700_H
+
+#include <linux/interrupt.h>
+#include <asm/io.h>
+
+#include <scsi/scsi_device.h>
+#include <scsi/scsi_cmnd.h>
+
+/* Turn on for general debugging---too verbose for normal use */
+#undef NCR_700_DEBUG
+/* Debug the tag queues, checking hash queue allocation and deallocation
+ * and search for duplicate tags */
+#undef NCR_700_TAG_DEBUG
+
+#ifdef NCR_700_DEBUG
+#define DEBUG(x) printk x
+#define DDEBUG(prefix, sdev, fmt, a...) \
+ sdev_printk(prefix, sdev, fmt, ##a)
+#define CDEBUG(prefix, scmd, fmt, a...) \
+ scmd_printk(prefix, scmd, fmt, ##a)
+#else
+#define DEBUG(x) do {} while (0)
+#define DDEBUG(prefix, scmd, fmt, a...) do {} while (0)
+#define CDEBUG(prefix, scmd, fmt, a...) do {} while (0)
+#endif
+
+/* The number of available command slots */
+#define NCR_700_COMMAND_SLOTS_PER_HOST 64
+/* The maximum number of Scatter Gathers we allow */
+#define NCR_700_SG_SEGMENTS 32
+/* The maximum number of luns (make this of the form 2^n) */
+#define NCR_700_MAX_LUNS 32
+#define NCR_700_LUN_MASK (NCR_700_MAX_LUNS - 1)
+/* Maximum number of tags the driver ever allows per device */
+#define NCR_700_MAX_TAGS 16
+/* Tag depth the driver starts out with (can be altered in sysfs) */
+#define NCR_700_DEFAULT_TAGS 4
+/* This is the default number of commands per LUN in the untagged case.
+ * two is a good value because it means we can have one command active and
+ * one command fully prepared and waiting
+ */
+#define NCR_700_CMD_PER_LUN 2
+/* magic byte identifying an internally generated REQUEST_SENSE command */
+#define NCR_700_INTERNAL_SENSE_MAGIC 0x42
+
+struct NCR_700_Host_Parameters;
+
+/* These are the externally used routines */
+struct Scsi_Host *NCR_700_detect(struct scsi_host_template *,
+ struct NCR_700_Host_Parameters *, struct device *);
+int NCR_700_release(struct Scsi_Host *host);
+irqreturn_t NCR_700_intr(int, void *);
+
+
+enum NCR_700_Host_State {
+ NCR_700_HOST_BUSY,
+ NCR_700_HOST_FREE,
+};
+
+struct NCR_700_SG_List {
+ /* The following is a script fragment to move the buffer onto the
+ * bus and then link the next fragment or return */
+ #define SCRIPT_MOVE_DATA_IN 0x09000000
+ #define SCRIPT_MOVE_DATA_OUT 0x08000000
+ __u32 ins;
+ __u32 pAddr;
+ #define SCRIPT_NOP 0x80000000
+ #define SCRIPT_RETURN 0x90080000
+};
+
+struct NCR_700_Device_Parameters {
+ /* space for creating a request sense command. Really, except
+ * for the annoying SCSI-2 requirement for LUN information in
+ * cmnd[1], this could be in static storage */
+ unsigned char cmnd[MAX_COMMAND_SIZE];
+ __u8 depth;
+};
+
+
+/* The SYNC negotiation sequence looks like:
+ *
+ * If DEV_NEGOTIATED_SYNC not set, tack and SDTR message on to the
+ * initial identify for the device and set DEV_BEGIN_SYNC_NEGOTATION
+ * If we get an SDTR reply, work out the SXFER parameters, squirrel
+ * them away here, clear DEV_BEGIN_SYNC_NEGOTIATION and set
+ * DEV_NEGOTIATED_SYNC. If we get a REJECT msg, squirrel
+ *
+ *
+ * 0:7 SXFER_REG negotiated value for this device
+ * 8:15 Current queue depth
+ * 16 negotiated SYNC flag
+ * 17 begin SYNC negotiation flag
+ * 18 device supports tag queueing */
+#define NCR_700_DEV_NEGOTIATED_SYNC (1<<16)
+#define NCR_700_DEV_BEGIN_SYNC_NEGOTIATION (1<<17)
+#define NCR_700_DEV_PRINT_SYNC_NEGOTIATION (1<<19)
+
+static inline char *NCR_700_get_sense_cmnd(struct scsi_device *SDp)
+{
+ struct NCR_700_Device_Parameters *hostdata = SDp->hostdata;
+
+ return hostdata->cmnd;
+}
+
+static inline void
+NCR_700_set_depth(struct scsi_device *SDp, __u8 depth)
+{
+ struct NCR_700_Device_Parameters *hostdata = SDp->hostdata;
+
+ hostdata->depth = depth;
+}
+static inline __u8
+NCR_700_get_depth(struct scsi_device *SDp)
+{
+ struct NCR_700_Device_Parameters *hostdata = SDp->hostdata;
+
+ return hostdata->depth;
+}
+static inline int
+NCR_700_is_flag_set(struct scsi_device *SDp, __u32 flag)
+{
+ return (spi_flags(SDp->sdev_target) & flag) == flag;
+}
+static inline int
+NCR_700_is_flag_clear(struct scsi_device *SDp, __u32 flag)
+{
+ return (spi_flags(SDp->sdev_target) & flag) == 0;
+}
+static inline void
+NCR_700_set_flag(struct scsi_device *SDp, __u32 flag)
+{
+ spi_flags(SDp->sdev_target) |= flag;
+}
+static inline void
+NCR_700_clear_flag(struct scsi_device *SDp, __u32 flag)
+{
+ spi_flags(SDp->sdev_target) &= ~flag;
+}
+
+enum NCR_700_tag_neg_state {
+ NCR_700_START_TAG_NEGOTIATION = 0,
+ NCR_700_DURING_TAG_NEGOTIATION = 1,
+ NCR_700_FINISHED_TAG_NEGOTIATION = 2,
+};
+
+static inline enum NCR_700_tag_neg_state
+NCR_700_get_tag_neg_state(struct scsi_device *SDp)
+{
+ return (enum NCR_700_tag_neg_state)((spi_flags(SDp->sdev_target)>>20) & 0x3);
+}
+
+static inline void
+NCR_700_set_tag_neg_state(struct scsi_device *SDp,
+ enum NCR_700_tag_neg_state state)
+{
+ /* clear the slot */
+ spi_flags(SDp->sdev_target) &= ~(0x3 << 20);
+ spi_flags(SDp->sdev_target) |= ((__u32)state) << 20;
+}
+
+struct NCR_700_command_slot {
+ struct NCR_700_SG_List SG[NCR_700_SG_SEGMENTS+1];
+ struct NCR_700_SG_List *pSG;
+ #define NCR_700_SLOT_MASK 0xFC
+ #define NCR_700_SLOT_MAGIC 0xb8
+ #define NCR_700_SLOT_FREE (0|NCR_700_SLOT_MAGIC) /* slot may be used */
+ #define NCR_700_SLOT_BUSY (1|NCR_700_SLOT_MAGIC) /* slot has command active on HA */
+ #define NCR_700_SLOT_QUEUED (2|NCR_700_SLOT_MAGIC) /* slot has command to be made active on HA */
+ __u8 state;
+ #define NCR_700_FLAG_AUTOSENSE 0x01
+ __u8 flags;
+ __u8 pad1[2]; /* Needed for m68k where min alignment is 2 bytes */
+ int tag;
+ __u32 resume_offset;
+ struct scsi_cmnd *cmnd;
+ /* The pci_mapped address of the actual command in cmnd */
+ dma_addr_t pCmd;
+ __u32 temp;
+ /* if this command is a pci_single mapping, holds the dma address
+ * for later unmapping in the done routine */
+ dma_addr_t dma_handle;
+ /* historical remnant, now used to link free commands */
+ struct NCR_700_command_slot *ITL_forw;
+};
+
+struct NCR_700_Host_Parameters {
+ /* These must be filled in by the calling driver */
+ int clock; /* board clock speed in MHz */
+ void __iomem *base; /* the base for the port (copied to host) */
+ struct device *dev;
+ __u32 dmode_extra; /* adjustable bus settings */
+ __u32 dcntl_extra; /* adjustable bus settings */
+ __u32 ctest7_extra; /* adjustable bus settings */
+ __u32 differential:1; /* if we are differential */
+#ifdef CONFIG_53C700_LE_ON_BE
+ /* This option is for HP only. Set it if your chip is wired for
+ * little endian on this platform (which is big endian) */
+ __u32 force_le_on_be:1;
+#endif
+ __u32 chip710:1; /* set if really a 710 not 700 */
+ __u32 burst_length:4; /* set to 0 to disable 710 bursting */
+
+ /* NOTHING BELOW HERE NEEDS ALTERING */
+ __u32 fast:1; /* if we can alter the SCSI bus clock
+ speed (so can negiotiate sync) */
+ int sync_clock; /* The speed of the SYNC core */
+
+ __u32 *script; /* pointer to script location */
+ __u32 pScript; /* physical mem addr of script */
+
+ enum NCR_700_Host_State state; /* protected by state lock */
+ struct scsi_cmnd *cmd;
+ /* Note: pScript contains the single consistent block of
+ * memory. All the msgin, msgout and status are allocated in
+ * this memory too (at separate cache lines). TOTAL_MEM_SIZE
+ * represents the total size of this area */
+#define MSG_ARRAY_SIZE 8
+#define MSGOUT_OFFSET (L1_CACHE_ALIGN(sizeof(SCRIPT)))
+ __u8 *msgout;
+#define MSGIN_OFFSET (MSGOUT_OFFSET + L1_CACHE_ALIGN(MSG_ARRAY_SIZE))
+ __u8 *msgin;
+#define STATUS_OFFSET (MSGIN_OFFSET + L1_CACHE_ALIGN(MSG_ARRAY_SIZE))
+ __u8 *status;
+#define SLOTS_OFFSET (STATUS_OFFSET + L1_CACHE_ALIGN(MSG_ARRAY_SIZE))
+ struct NCR_700_command_slot *slots;
+#define TOTAL_MEM_SIZE (SLOTS_OFFSET + L1_CACHE_ALIGN(sizeof(struct NCR_700_command_slot) * NCR_700_COMMAND_SLOTS_PER_HOST))
+ int saved_slot_position;
+ int command_slot_count; /* protected by state lock */
+ __u8 tag_negotiated;
+ __u8 rev;
+ __u8 reselection_id;
+ __u8 min_period;
+
+ /* Free list, singly linked by ITL_forw elements */
+ struct NCR_700_command_slot *free_list;
+ /* Completion for waited for ops, like reset, abort or
+ * device reset.
+ *
+ * NOTE: relies on single threading in the error handler to
+ * have only one outstanding at once */
+ struct completion *eh_complete;
+};
+
+/*
+ * 53C700 Register Interface - the offset from the Selected base
+ * I/O address */
+#ifdef CONFIG_53C700_LE_ON_BE
+#define bE (hostdata->force_le_on_be ? 0 : 3)
+#define bSWAP (hostdata->force_le_on_be)
+#define bEBus (!hostdata->force_le_on_be)
+#elif defined(__BIG_ENDIAN)
+#define bE 3
+#define bSWAP 0
+#elif defined(__LITTLE_ENDIAN)
+#define bE 0
+#define bSWAP 0
+#else
+#error "__BIG_ENDIAN or __LITTLE_ENDIAN must be defined, did you include byteorder.h?"
+#endif
+#ifndef bEBus
+#ifdef CONFIG_53C700_BE_BUS
+#define bEBus 1
+#else
+#define bEBus 0
+#endif
+#endif
+#define bS_to_cpu(x) (bSWAP ? le32_to_cpu(x) : (x))
+#define bS_to_host(x) (bSWAP ? cpu_to_le32(x) : (x))
+
+/* NOTE: These registers are in the LE register space only, the required byte
+ * swapping is done by the NCR_700_{read|write}[b] functions */
+#define SCNTL0_REG 0x00
+#define FULL_ARBITRATION 0xc0
+#define PARITY 0x08
+#define ENABLE_PARITY 0x04
+#define AUTO_ATN 0x02
+#define SCNTL1_REG 0x01
+#define SLOW_BUS 0x80
+#define ENABLE_SELECT 0x20
+#define ASSERT_RST 0x08
+#define ASSERT_EVEN_PARITY 0x04
+#define SDID_REG 0x02
+#define SIEN_REG 0x03
+#define PHASE_MM_INT 0x80
+#define FUNC_COMP_INT 0x40
+#define SEL_TIMEOUT_INT 0x20
+#define SELECT_INT 0x10
+#define GROSS_ERR_INT 0x08
+#define UX_DISC_INT 0x04
+#define RST_INT 0x02
+#define PAR_ERR_INT 0x01
+#define SCID_REG 0x04
+#define SXFER_REG 0x05
+#define ASYNC_OPERATION 0x00
+#define SODL_REG 0x06
+#define SOCL_REG 0x07
+#define SFBR_REG 0x08
+#define SIDL_REG 0x09
+#define SBDL_REG 0x0A
+#define SBCL_REG 0x0B
+/* read bits */
+#define SBCL_IO 0x01
+/*write bits */
+#define SYNC_DIV_AS_ASYNC 0x00
+#define SYNC_DIV_1_0 0x01
+#define SYNC_DIV_1_5 0x02
+#define SYNC_DIV_2_0 0x03
+#define DSTAT_REG 0x0C
+#define ILGL_INST_DETECTED 0x01
+#define WATCH_DOG_INTERRUPT 0x02
+#define SCRIPT_INT_RECEIVED 0x04
+#define ABORTED 0x10
+#define SSTAT0_REG 0x0D
+#define PARITY_ERROR 0x01
+#define SCSI_RESET_DETECTED 0x02
+#define UNEXPECTED_DISCONNECT 0x04
+#define SCSI_GROSS_ERROR 0x08
+#define SELECTED 0x10
+#define SELECTION_TIMEOUT 0x20
+#define FUNCTION_COMPLETE 0x40
+#define PHASE_MISMATCH 0x80
+#define SSTAT1_REG 0x0E
+#define SIDL_REG_FULL 0x80
+#define SODR_REG_FULL 0x40
+#define SODL_REG_FULL 0x20
+#define SSTAT2_REG 0x0F
+#define CTEST0_REG 0x14
+#define BTB_TIMER_DISABLE 0x40
+#define CTEST1_REG 0x15
+#define CTEST2_REG 0x16
+#define CTEST3_REG 0x17
+#define CTEST4_REG 0x18
+#define DISABLE_FIFO 0x00
+#define SLBE 0x10
+#define SFWR 0x08
+#define BYTE_LANE0 0x04
+#define BYTE_LANE1 0x05
+#define BYTE_LANE2 0x06
+#define BYTE_LANE3 0x07
+#define SCSI_ZMODE 0x20
+#define ZMODE 0x40
+#define CTEST5_REG 0x19
+#define MASTER_CONTROL 0x10
+#define DMA_DIRECTION 0x08
+#define CTEST7_REG 0x1B
+#define BURST_DISABLE 0x80 /* 710 only */
+#define SEL_TIMEOUT_DISABLE 0x10 /* 710 only */
+#define DFP 0x08
+#define EVP 0x04
+#define CTEST7_TT1 0x02
+#define DIFF 0x01
+#define CTEST6_REG 0x1A
+#define TEMP_REG 0x1C
+#define DFIFO_REG 0x20
+#define FLUSH_DMA_FIFO 0x80
+#define CLR_FIFO 0x40
+#define ISTAT_REG 0x21
+#define ABORT_OPERATION 0x80
+#define SOFTWARE_RESET_710 0x40
+#define DMA_INT_PENDING 0x01
+#define SCSI_INT_PENDING 0x02
+#define CONNECTED 0x08
+#define CTEST8_REG 0x22
+#define LAST_DIS_ENBL 0x01
+#define SHORTEN_FILTERING 0x04
+#define ENABLE_ACTIVE_NEGATION 0x10
+#define GENERATE_RECEIVE_PARITY 0x20
+#define CLR_FIFO_710 0x04
+#define FLUSH_DMA_FIFO_710 0x08
+#define CTEST9_REG 0x23
+#define DBC_REG 0x24
+#define DCMD_REG 0x27
+#define DNAD_REG 0x28
+#define DIEN_REG 0x39
+#define BUS_FAULT 0x20
+#define ABORT_INT 0x10
+#define INT_INST_INT 0x04
+#define WD_INT 0x02
+#define ILGL_INST_INT 0x01
+#define DCNTL_REG 0x3B
+#define SOFTWARE_RESET 0x01
+#define COMPAT_700_MODE 0x01
+#define SCRPTS_16BITS 0x20
+#define EA_710 0x20
+#define ASYNC_DIV_2_0 0x00
+#define ASYNC_DIV_1_5 0x40
+#define ASYNC_DIV_1_0 0x80
+#define ASYNC_DIV_3_0 0xc0
+#define DMODE_710_REG 0x38
+#define DMODE_700_REG 0x34
+#define BURST_LENGTH_1 0x00
+#define BURST_LENGTH_2 0x40
+#define BURST_LENGTH_4 0x80
+#define BURST_LENGTH_8 0xC0
+#define DMODE_FC1 0x10
+#define DMODE_FC2 0x20
+#define BW16 32
+#define MODE_286 16
+#define IO_XFER 8
+#define FIXED_ADDR 4
+
+#define DSP_REG 0x2C
+#define DSPS_REG 0x30
+
+/* Parameters to begin SDTR negotiations. Empirically, I find that
+ * the 53c700-66 cannot handle an offset >8, so don't change this */
+#define NCR_700_MAX_OFFSET 8
+/* Was hoping the max offset would be greater for the 710, but
+ * empirically it seems to be 8 also */
+#define NCR_710_MAX_OFFSET 8
+#define NCR_700_MIN_XFERP 1
+#define NCR_710_MIN_XFERP 0
+#define NCR_700_MIN_PERIOD 25 /* for SDTR message, 100ns */
+
+#define script_patch_32(dev, script, symbol, value) \
+{ \
+ int i; \
+ for(i=0; i< (sizeof(A_##symbol##_used) / sizeof(__u32)); i++) { \
+ __u32 val = bS_to_cpu((script)[A_##symbol##_used[i]]) + value; \
+ (script)[A_##symbol##_used[i]] = bS_to_host(val); \
+ dma_cache_sync((dev), &(script)[A_##symbol##_used[i]], 4, DMA_TO_DEVICE); \
+ DEBUG((" script, patching %s at %d to 0x%lx\n", \
+ #symbol, A_##symbol##_used[i], (value))); \
+ } \
+}
+
+#define script_patch_32_abs(dev, script, symbol, value) \
+{ \
+ int i; \
+ for(i=0; i< (sizeof(A_##symbol##_used) / sizeof(__u32)); i++) { \
+ (script)[A_##symbol##_used[i]] = bS_to_host(value); \
+ dma_cache_sync((dev), &(script)[A_##symbol##_used[i]], 4, DMA_TO_DEVICE); \
+ DEBUG((" script, patching %s at %d to 0x%lx\n", \
+ #symbol, A_##symbol##_used[i], (value))); \
+ } \
+}
+
+/* Used for patching the SCSI ID in the SELECT instruction */
+#define script_patch_ID(dev, script, symbol, value) \
+{ \
+ int i; \
+ for(i=0; i< (sizeof(A_##symbol##_used) / sizeof(__u32)); i++) { \
+ __u32 val = bS_to_cpu((script)[A_##symbol##_used[i]]); \
+ val &= 0xff00ffff; \
+ val |= ((value) & 0xff) << 16; \
+ (script)[A_##symbol##_used[i]] = bS_to_host(val); \
+ dma_cache_sync((dev), &(script)[A_##symbol##_used[i]], 4, DMA_TO_DEVICE); \
+ DEBUG((" script, patching ID field %s at %d to 0x%x\n", \
+ #symbol, A_##symbol##_used[i], val)); \
+ } \
+}
+
+#define script_patch_16(dev, script, symbol, value) \
+{ \
+ int i; \
+ for(i=0; i< (sizeof(A_##symbol##_used) / sizeof(__u32)); i++) { \
+ __u32 val = bS_to_cpu((script)[A_##symbol##_used[i]]); \
+ val &= 0xffff0000; \
+ val |= ((value) & 0xffff); \
+ (script)[A_##symbol##_used[i]] = bS_to_host(val); \
+ dma_cache_sync((dev), &(script)[A_##symbol##_used[i]], 4, DMA_TO_DEVICE); \
+ DEBUG((" script, patching short field %s at %d to 0x%x\n", \
+ #symbol, A_##symbol##_used[i], val)); \
+ } \
+}
+
+
+static inline __u8
+NCR_700_readb(struct Scsi_Host *host, __u32 reg)
+{
+ const struct NCR_700_Host_Parameters *hostdata
+ = (struct NCR_700_Host_Parameters *)host->hostdata[0];
+
+ return ioread8(hostdata->base + (reg^bE));
+}
+
+static inline __u32
+NCR_700_readl(struct Scsi_Host *host, __u32 reg)
+{
+ const struct NCR_700_Host_Parameters *hostdata
+ = (struct NCR_700_Host_Parameters *)host->hostdata[0];
+ __u32 value = bEBus ? ioread32be(hostdata->base + reg) :
+ ioread32(hostdata->base + reg);
+#if 1
+ /* sanity check the register */
+ BUG_ON((reg & 0x3) != 0);
+#endif
+
+ return value;
+}
+
+static inline void
+NCR_700_writeb(__u8 value, struct Scsi_Host *host, __u32 reg)
+{
+ const struct NCR_700_Host_Parameters *hostdata
+ = (struct NCR_700_Host_Parameters *)host->hostdata[0];
+
+ iowrite8(value, hostdata->base + (reg^bE));
+}
+
+static inline void
+NCR_700_writel(__u32 value, struct Scsi_Host *host, __u32 reg)
+{
+ const struct NCR_700_Host_Parameters *hostdata
+ = (struct NCR_700_Host_Parameters *)host->hostdata[0];
+
+#if 1
+ /* sanity check the register */
+ BUG_ON((reg & 0x3) != 0);
+#endif
+
+ bEBus ? iowrite32be(value, hostdata->base + reg):
+ iowrite32(value, hostdata->base + reg);
+}
+
+#endif
diff --git a/drivers/scsi/53c700.scr b/drivers/scsi/53c700.scr
new file mode 100644
index 000000000..ec822e3b7
--- /dev/null
+++ b/drivers/scsi/53c700.scr
@@ -0,0 +1,411 @@
+; Script for the NCR (or symbios) 53c700 and 53c700-66 chip
+;
+; Copyright (C) 2001 James.Bottomley@HansenPartnership.com
+;;-----------------------------------------------------------------------------
+;;
+;; This program is free software; you can redistribute it and/or modify
+;; it under the terms of the GNU General Public License as published by
+;; the Free Software Foundation; either version 2 of the License, or
+;; (at your option) any later version.
+;;
+;; This program is distributed in the hope that it will be useful,
+;; but WITHOUT ANY WARRANTY; without even the implied warranty of
+;; MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+;; GNU General Public License for more details.
+;;
+;; You should have received a copy of the GNU General Public License
+;; along with this program; if not, write to the Free Software
+;; Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+;;
+;;-----------------------------------------------------------------------------
+;
+; This script is designed to be modified for the particular command in
+; operation. The particular variables pertaining to the commands are:
+;
+ABSOLUTE Device_ID = 0 ; ID of target for command
+ABSOLUTE MessageCount = 0 ; Number of bytes in message
+ABSOLUTE MessageLocation = 0 ; Addr of message
+ABSOLUTE CommandCount = 0 ; Number of bytes in command
+ABSOLUTE CommandAddress = 0 ; Addr of Command
+ABSOLUTE StatusAddress = 0 ; Addr to receive status return
+ABSOLUTE ReceiveMsgAddress = 0 ; Addr to receive msg
+;
+; This is the magic component for handling scatter-gather. Each of the
+; SG components is preceded by a script fragment which moves the
+; necessary amount of data and jumps to the next SG segment. The final
+; SG segment jumps back to . However, this address is the first SG script
+; segment.
+;
+ABSOLUTE SGScriptStartAddress = 0
+
+; The following represent status interrupts we use 3 hex digits for
+; this: 0xPRS where
+
+; P:
+ABSOLUTE AFTER_SELECTION = 0x100
+ABSOLUTE BEFORE_CMD = 0x200
+ABSOLUTE AFTER_CMD = 0x300
+ABSOLUTE AFTER_STATUS = 0x400
+ABSOLUTE AFTER_DATA_IN = 0x500
+ABSOLUTE AFTER_DATA_OUT = 0x600
+ABSOLUTE DURING_DATA_IN = 0x700
+
+; R:
+ABSOLUTE NOT_MSG_OUT = 0x10
+ABSOLUTE UNEXPECTED_PHASE = 0x20
+ABSOLUTE NOT_MSG_IN = 0x30
+ABSOLUTE UNEXPECTED_MSG = 0x40
+ABSOLUTE MSG_IN = 0x50
+ABSOLUTE SDTR_MSG_R = 0x60
+ABSOLUTE REJECT_MSG_R = 0x70
+ABSOLUTE DISCONNECT = 0x80
+ABSOLUTE MSG_OUT = 0x90
+ABSOLUTE WDTR_MSG_R = 0xA0
+
+; S:
+ABSOLUTE GOOD_STATUS = 0x1
+
+; Combinations, since the script assembler can't process |
+ABSOLUTE NOT_MSG_OUT_AFTER_SELECTION = 0x110
+ABSOLUTE UNEXPECTED_PHASE_BEFORE_CMD = 0x220
+ABSOLUTE UNEXPECTED_PHASE_AFTER_CMD = 0x320
+ABSOLUTE NOT_MSG_IN_AFTER_STATUS = 0x430
+ABSOLUTE GOOD_STATUS_AFTER_STATUS = 0x401
+ABSOLUTE UNEXPECTED_PHASE_AFTER_DATA_IN = 0x520
+ABSOLUTE UNEXPECTED_PHASE_AFTER_DATA_OUT = 0x620
+ABSOLUTE UNEXPECTED_MSG_BEFORE_CMD = 0x240
+ABSOLUTE MSG_IN_BEFORE_CMD = 0x250
+ABSOLUTE MSG_IN_AFTER_CMD = 0x350
+ABSOLUTE SDTR_MSG_BEFORE_CMD = 0x260
+ABSOLUTE REJECT_MSG_BEFORE_CMD = 0x270
+ABSOLUTE DISCONNECT_AFTER_CMD = 0x380
+ABSOLUTE SDTR_MSG_AFTER_CMD = 0x360
+ABSOLUTE WDTR_MSG_AFTER_CMD = 0x3A0
+ABSOLUTE MSG_IN_AFTER_STATUS = 0x440
+ABSOLUTE DISCONNECT_AFTER_DATA = 0x580
+ABSOLUTE MSG_IN_AFTER_DATA_IN = 0x550
+ABSOLUTE MSG_IN_AFTER_DATA_OUT = 0x650
+ABSOLUTE MSG_OUT_AFTER_DATA_IN = 0x590
+ABSOLUTE DATA_IN_AFTER_DATA_IN = 0x5a0
+ABSOLUTE MSG_IN_DURING_DATA_IN = 0x750
+ABSOLUTE DISCONNECT_DURING_DATA = 0x780
+
+;
+; Other interrupt conditions
+;
+ABSOLUTE RESELECTED_DURING_SELECTION = 0x1000
+ABSOLUTE COMPLETED_SELECTION_AS_TARGET = 0x1001
+ABSOLUTE RESELECTION_IDENTIFIED = 0x1003
+;
+; Fatal interrupt conditions. If you add to this, also add to the
+; array of corresponding messages
+;
+ABSOLUTE FATAL = 0x2000
+ABSOLUTE FATAL_UNEXPECTED_RESELECTION_MSG = 0x2000
+ABSOLUTE FATAL_SEND_MSG = 0x2001
+ABSOLUTE FATAL_NOT_MSG_IN_AFTER_SELECTION = 0x2002
+ABSOLUTE FATAL_ILLEGAL_MSG_LENGTH = 0x2003
+
+ABSOLUTE DEBUG_INTERRUPT = 0x3000
+ABSOLUTE DEBUG_INTERRUPT1 = 0x3001
+ABSOLUTE DEBUG_INTERRUPT2 = 0x3002
+ABSOLUTE DEBUG_INTERRUPT3 = 0x3003
+ABSOLUTE DEBUG_INTERRUPT4 = 0x3004
+ABSOLUTE DEBUG_INTERRUPT5 = 0x3005
+ABSOLUTE DEBUG_INTERRUPT6 = 0x3006
+
+
+;
+; SCSI Messages we interpret in the script
+;
+ABSOLUTE COMMAND_COMPLETE_MSG = 0x00
+ABSOLUTE EXTENDED_MSG = 0x01
+ABSOLUTE SDTR_MSG = 0x01
+ABSOLUTE SAVE_DATA_PTRS_MSG = 0x02
+ABSOLUTE RESTORE_DATA_PTRS_MSG = 0x03
+ABSOLUTE WDTR_MSG = 0x03
+ABSOLUTE DISCONNECT_MSG = 0x04
+ABSOLUTE REJECT_MSG = 0x07
+ABSOLUTE PARITY_ERROR_MSG = 0x09
+ABSOLUTE SIMPLE_TAG_MSG = 0x20
+ABSOLUTE IDENTIFY_MSG = 0x80
+ABSOLUTE IDENTIFY_MSG_MASK = 0x7F
+ABSOLUTE TWO_BYTE_MSG = 0x20
+ABSOLUTE TWO_BYTE_MSG_MASK = 0x0F
+
+; This is where the script begins
+
+ENTRY StartUp
+
+StartUp:
+ SELECT ATN Device_ID, Reselect
+ JUMP Finish, WHEN STATUS
+ JUMP SendIdentifyMsg, IF MSG_OUT
+ INT NOT_MSG_OUT_AFTER_SELECTION
+
+Reselect:
+ WAIT RESELECT SelectedAsTarget
+ INT RESELECTED_DURING_SELECTION, WHEN MSG_IN
+ INT FATAL_NOT_MSG_IN_AFTER_SELECTION
+
+ ENTRY GetReselectionData
+GetReselectionData:
+ MOVE 1, ReceiveMsgAddress, WHEN MSG_IN
+ INT RESELECTION_IDENTIFIED
+
+ ENTRY GetReselectionWithTag
+GetReselectionWithTag:
+ MOVE 3, ReceiveMsgAddress, WHEN MSG_IN
+ INT RESELECTION_IDENTIFIED
+
+ ENTRY SelectedAsTarget
+SelectedAsTarget:
+; Basically tell the selecting device that there's nothing here
+ SET TARGET
+ DISCONNECT
+ CLEAR TARGET
+ INT COMPLETED_SELECTION_AS_TARGET
+;
+; These are the messaging entries
+;
+; Send a message. Message count should be correctly patched
+ ENTRY SendMessage
+SendMessage:
+ MOVE MessageCount, MessageLocation, WHEN MSG_OUT
+ResumeSendMessage:
+ RETURN, WHEN NOT MSG_OUT
+ INT FATAL_SEND_MSG
+
+ ENTRY SendMessagePhaseMismatch
+SendMessagePhaseMismatch:
+ CLEAR ACK
+ JUMP ResumeSendMessage
+;
+; Receive a message. Need to identify the message to
+; receive it correctly
+ ENTRY ReceiveMessage
+ReceiveMessage:
+ MOVE 1, ReceiveMsgAddress, WHEN MSG_IN
+;
+; Use this entry if we've just tried to look at the first byte
+; of the message and want to process it further
+ProcessReceiveMessage:
+ JUMP ReceiveExtendedMessage, IF EXTENDED_MSG
+ RETURN, IF NOT TWO_BYTE_MSG, AND MASK TWO_BYTE_MSG_MASK
+ CLEAR ACK
+ MOVE 1, ReceiveMsgAddress + 1, WHEN MSG_IN
+ RETURN
+ReceiveExtendedMessage:
+ CLEAR ACK
+ MOVE 1, ReceiveMsgAddress + 1, WHEN MSG_IN
+ JUMP Receive1Byte, IF 0x01
+ JUMP Receive2Byte, IF 0x02
+ JUMP Receive3Byte, IF 0x03
+ JUMP Receive4Byte, IF 0x04
+ JUMP Receive5Byte, IF 0x05
+ INT FATAL_ILLEGAL_MSG_LENGTH
+Receive1Byte:
+ CLEAR ACK
+ MOVE 1, ReceiveMsgAddress + 2, WHEN MSG_IN
+ RETURN
+Receive2Byte:
+ CLEAR ACK
+ MOVE 2, ReceiveMsgAddress + 2, WHEN MSG_IN
+ RETURN
+Receive3Byte:
+ CLEAR ACK
+ MOVE 3, ReceiveMsgAddress + 2, WHEN MSG_IN
+ RETURN
+Receive4Byte:
+ CLEAR ACK
+ MOVE 4, ReceiveMsgAddress + 2, WHEN MSG_IN
+ RETURN
+Receive5Byte:
+ CLEAR ACK
+ MOVE 5, ReceiveMsgAddress + 2, WHEN MSG_IN
+ RETURN
+;
+; Come here from the message processor to ignore the message
+;
+ ENTRY IgnoreMessage
+IgnoreMessage:
+ CLEAR ACK
+ RETURN
+;
+; Come here to send a reply to a message
+;
+ ENTRY SendMessageWithATN
+SendMessageWithATN:
+ SET ATN
+ CLEAR ACK
+ JUMP SendMessage
+
+SendIdentifyMsg:
+ CALL SendMessage
+ CLEAR ATN
+
+IgnoreMsgBeforeCommand:
+ CLEAR ACK
+ ENTRY SendCommand
+SendCommand:
+ JUMP Finish, WHEN STATUS
+ JUMP MsgInBeforeCommand, IF MSG_IN
+ INT UNEXPECTED_PHASE_BEFORE_CMD, IF NOT CMD
+ MOVE CommandCount, CommandAddress, WHEN CMD
+ResumeSendCommand:
+ JUMP Finish, WHEN STATUS
+ JUMP MsgInAfterCmd, IF MSG_IN
+ JUMP DataIn, IF DATA_IN
+ JUMP DataOut, IF DATA_OUT
+ INT UNEXPECTED_PHASE_AFTER_CMD
+
+IgnoreMsgDuringData:
+ CLEAR ACK
+ ; fall through to MsgInDuringData
+
+Entry MsgInDuringData
+MsgInDuringData:
+;
+; Could be we have nothing more to transfer
+;
+ JUMP Finish, WHEN STATUS
+ MOVE 1, ReceiveMsgAddress, WHEN MSG_IN
+ JUMP DisconnectDuringDataIn, IF DISCONNECT_MSG
+ JUMP IgnoreMsgDuringData, IF SAVE_DATA_PTRS_MSG
+ JUMP IgnoreMsgDuringData, IF RESTORE_DATA_PTRS_MSG
+ INT MSG_IN_DURING_DATA_IN
+
+MsgInAfterCmd:
+ MOVE 1, ReceiveMsgAddress, WHEN MSG_IN
+ JUMP DisconnectAfterCmd, IF DISCONNECT_MSG
+ JUMP IgnoreMsgInAfterCmd, IF SAVE_DATA_PTRS_MSG
+ JUMP IgnoreMsgInAfterCmd, IF RESTORE_DATA_PTRS_MSG
+ CALL ProcessReceiveMessage
+ INT MSG_IN_AFTER_CMD
+ CLEAR ACK
+ JUMP ResumeSendCommand
+
+IgnoreMsgInAfterCmd:
+ CLEAR ACK
+ JUMP ResumeSendCommand
+
+DisconnectAfterCmd:
+ CLEAR ACK
+ WAIT DISCONNECT
+ ENTRY Disconnect1
+Disconnect1:
+ INT DISCONNECT_AFTER_CMD
+ ENTRY Disconnect2
+Disconnect2:
+; We return here after a reselection
+ CLEAR ACK
+ JUMP ResumeSendCommand
+
+MsgInBeforeCommand:
+ MOVE 1, ReceiveMsgAddress, WHEN MSG_IN
+ JUMP IgnoreMsgBeforeCommand, IF SAVE_DATA_PTRS_MSG
+ JUMP IgnoreMsgBeforeCommand, IF RESTORE_DATA_PTRS_MSG
+ CALL ProcessReceiveMessage
+ INT MSG_IN_BEFORE_CMD
+ CLEAR ACK
+ JUMP SendCommand
+
+DataIn:
+ CALL SGScriptStartAddress
+ResumeDataIn:
+ JUMP Finish, WHEN STATUS
+ JUMP MsgInAfterDataIn, IF MSG_IN
+ JUMP DataInAfterDataIn, if DATA_IN
+ INT MSG_OUT_AFTER_DATA_IN, if MSG_OUT
+ INT UNEXPECTED_PHASE_AFTER_DATA_IN
+
+DataInAfterDataIn:
+ INT DATA_IN_AFTER_DATA_IN
+ JUMP ResumeDataIn
+
+DataOut:
+ CALL SGScriptStartAddress
+ResumeDataOut:
+ JUMP Finish, WHEN STATUS
+ JUMP MsgInAfterDataOut, IF MSG_IN
+ INT UNEXPECTED_PHASE_AFTER_DATA_OUT
+
+MsgInAfterDataIn:
+ MOVE 1, ReceiveMsgAddress, WHEN MSG_IN
+ JUMP DisconnectAfterDataIn, IF DISCONNECT_MSG
+ JUMP IgnoreMsgAfterData, IF SAVE_DATA_PTRS_MSG
+ JUMP IgnoreMsgAfterData, IF RESTORE_DATA_PTRS_MSG
+ CALL ProcessReceiveMessage
+ INT MSG_IN_AFTER_DATA_IN
+ CLEAR ACK
+ JUMP ResumeDataIn
+
+DisconnectDuringDataIn:
+ CLEAR ACK
+ WAIT DISCONNECT
+ ENTRY Disconnect3
+Disconnect3:
+ INT DISCONNECT_DURING_DATA
+ ENTRY Disconnect4
+Disconnect4:
+; we return here after a reselection
+ CLEAR ACK
+ JUMP ResumeSendCommand
+
+
+DisconnectAfterDataIn:
+ CLEAR ACK
+ WAIT DISCONNECT
+ ENTRY Disconnect5
+Disconnect5:
+ INT DISCONNECT_AFTER_DATA
+ ENTRY Disconnect6
+Disconnect6:
+; we return here after a reselection
+ CLEAR ACK
+ JUMP ResumeDataIn
+
+MsgInAfterDataOut:
+ MOVE 1, ReceiveMsgAddress, WHEN MSG_IN
+ JUMP DisconnectAfterDataOut, if DISCONNECT_MSG
+ JUMP IgnoreMsgAfterData, IF SAVE_DATA_PTRS_MSG
+ JUMP IgnoreMsgAfterData, IF RESTORE_DATA_PTRS_MSG
+ CALL ProcessReceiveMessage
+ INT MSG_IN_AFTER_DATA_OUT
+ CLEAR ACK
+ JUMP ResumeDataOut
+
+IgnoreMsgAfterData:
+ CLEAR ACK
+; Data in and out do the same thing on resume, so pick one
+ JUMP ResumeDataIn
+
+DisconnectAfterDataOut:
+ CLEAR ACK
+ WAIT DISCONNECT
+ ENTRY Disconnect7
+Disconnect7:
+ INT DISCONNECT_AFTER_DATA
+ ENTRY Disconnect8
+Disconnect8:
+; we return here after a reselection
+ CLEAR ACK
+ JUMP ResumeDataOut
+
+Finish:
+ MOVE 1, StatusAddress, WHEN STATUS
+ INT NOT_MSG_IN_AFTER_STATUS, WHEN NOT MSG_IN
+ MOVE 1, ReceiveMsgAddress, WHEN MSG_IN
+ JUMP FinishCommandComplete, IF COMMAND_COMPLETE_MSG
+ CALL ProcessReceiveMessage
+ INT MSG_IN_AFTER_STATUS
+ ENTRY FinishCommandComplete
+FinishCommandComplete:
+ CLEAR ACK
+ WAIT DISCONNECT
+ ENTRY Finish1
+Finish1:
+ INT GOOD_STATUS_AFTER_STATUS
+ ENTRY Finish2
+Finish2:
+
diff --git a/drivers/scsi/53c700_d.h_shipped b/drivers/scsi/53c700_d.h_shipped
new file mode 100644
index 000000000..aa623da33
--- /dev/null
+++ b/drivers/scsi/53c700_d.h_shipped
@@ -0,0 +1,1329 @@
+/* DO NOT EDIT - Generated automatically by script_asm.pl */
+static u32 SCRIPT[] = {
+/*
+; Script for the NCR (or symbios) 53c700 and 53c700-66 chip
+;
+; Copyright (C) 2001 James.Bottomley@HansenPartnership.com
+;;-----------------------------------------------------------------------------
+;;
+;; This program is free software; you can redistribute it and/or modify
+;; it under the terms of the GNU General Public License as published by
+;; the Free Software Foundation; either version 2 of the License, or
+;; (at your option) any later version.
+;;
+;; This program is distributed in the hope that it will be useful,
+;; but WITHOUT ANY WARRANTY; without even the implied warranty of
+;; MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+;; GNU General Public License for more details.
+;;
+;; You should have received a copy of the GNU General Public License
+;; along with this program; if not, write to the Free Software
+;; Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+;;
+;;-----------------------------------------------------------------------------
+;
+; This script is designed to be modified for the particular command in
+; operation. The particular variables pertaining to the commands are:
+;
+ABSOLUTE Device_ID = 0 ; ID of target for command
+ABSOLUTE MessageCount = 0 ; Number of bytes in message
+ABSOLUTE MessageLocation = 0 ; Addr of message
+ABSOLUTE CommandCount = 0 ; Number of bytes in command
+ABSOLUTE CommandAddress = 0 ; Addr of Command
+ABSOLUTE StatusAddress = 0 ; Addr to receive status return
+ABSOLUTE ReceiveMsgAddress = 0 ; Addr to receive msg
+;
+; This is the magic component for handling scatter-gather. Each of the
+; SG components is preceded by a script fragment which moves the
+; necessary amount of data and jumps to the next SG segment. The final
+; SG segment jumps back to . However, this address is the first SG script
+; segment.
+;
+ABSOLUTE SGScriptStartAddress = 0
+
+; The following represent status interrupts we use 3 hex digits for
+; this: 0xPRS where
+
+; P:
+ABSOLUTE AFTER_SELECTION = 0x100
+ABSOLUTE BEFORE_CMD = 0x200
+ABSOLUTE AFTER_CMD = 0x300
+ABSOLUTE AFTER_STATUS = 0x400
+ABSOLUTE AFTER_DATA_IN = 0x500
+ABSOLUTE AFTER_DATA_OUT = 0x600
+ABSOLUTE DURING_DATA_IN = 0x700
+
+; R:
+ABSOLUTE NOT_MSG_OUT = 0x10
+ABSOLUTE UNEXPECTED_PHASE = 0x20
+ABSOLUTE NOT_MSG_IN = 0x30
+ABSOLUTE UNEXPECTED_MSG = 0x40
+ABSOLUTE MSG_IN = 0x50
+ABSOLUTE SDTR_MSG_R = 0x60
+ABSOLUTE REJECT_MSG_R = 0x70
+ABSOLUTE DISCONNECT = 0x80
+ABSOLUTE MSG_OUT = 0x90
+ABSOLUTE WDTR_MSG_R = 0xA0
+
+; S:
+ABSOLUTE GOOD_STATUS = 0x1
+
+; Combinations, since the script assembler can't process |
+ABSOLUTE NOT_MSG_OUT_AFTER_SELECTION = 0x110
+ABSOLUTE UNEXPECTED_PHASE_BEFORE_CMD = 0x220
+ABSOLUTE UNEXPECTED_PHASE_AFTER_CMD = 0x320
+ABSOLUTE NOT_MSG_IN_AFTER_STATUS = 0x430
+ABSOLUTE GOOD_STATUS_AFTER_STATUS = 0x401
+ABSOLUTE UNEXPECTED_PHASE_AFTER_DATA_IN = 0x520
+ABSOLUTE UNEXPECTED_PHASE_AFTER_DATA_OUT = 0x620
+ABSOLUTE UNEXPECTED_MSG_BEFORE_CMD = 0x240
+ABSOLUTE MSG_IN_BEFORE_CMD = 0x250
+ABSOLUTE MSG_IN_AFTER_CMD = 0x350
+ABSOLUTE SDTR_MSG_BEFORE_CMD = 0x260
+ABSOLUTE REJECT_MSG_BEFORE_CMD = 0x270
+ABSOLUTE DISCONNECT_AFTER_CMD = 0x380
+ABSOLUTE SDTR_MSG_AFTER_CMD = 0x360
+ABSOLUTE WDTR_MSG_AFTER_CMD = 0x3A0
+ABSOLUTE MSG_IN_AFTER_STATUS = 0x440
+ABSOLUTE DISCONNECT_AFTER_DATA = 0x580
+ABSOLUTE MSG_IN_AFTER_DATA_IN = 0x550
+ABSOLUTE MSG_IN_AFTER_DATA_OUT = 0x650
+ABSOLUTE MSG_OUT_AFTER_DATA_IN = 0x590
+ABSOLUTE DATA_IN_AFTER_DATA_IN = 0x5a0
+ABSOLUTE MSG_IN_DURING_DATA_IN = 0x750
+ABSOLUTE DISCONNECT_DURING_DATA = 0x780
+
+;
+; Other interrupt conditions
+;
+ABSOLUTE RESELECTED_DURING_SELECTION = 0x1000
+ABSOLUTE COMPLETED_SELECTION_AS_TARGET = 0x1001
+ABSOLUTE RESELECTION_IDENTIFIED = 0x1003
+;
+; Fatal interrupt conditions. If you add to this, also add to the
+; array of corresponding messages
+;
+ABSOLUTE FATAL = 0x2000
+ABSOLUTE FATAL_UNEXPECTED_RESELECTION_MSG = 0x2000
+ABSOLUTE FATAL_SEND_MSG = 0x2001
+ABSOLUTE FATAL_NOT_MSG_IN_AFTER_SELECTION = 0x2002
+ABSOLUTE FATAL_ILLEGAL_MSG_LENGTH = 0x2003
+
+ABSOLUTE DEBUG_INTERRUPT = 0x3000
+ABSOLUTE DEBUG_INTERRUPT1 = 0x3001
+ABSOLUTE DEBUG_INTERRUPT2 = 0x3002
+ABSOLUTE DEBUG_INTERRUPT3 = 0x3003
+ABSOLUTE DEBUG_INTERRUPT4 = 0x3004
+ABSOLUTE DEBUG_INTERRUPT5 = 0x3005
+ABSOLUTE DEBUG_INTERRUPT6 = 0x3006
+
+
+;
+; SCSI Messages we interpret in the script
+;
+ABSOLUTE COMMAND_COMPLETE_MSG = 0x00
+ABSOLUTE EXTENDED_MSG = 0x01
+ABSOLUTE SDTR_MSG = 0x01
+ABSOLUTE SAVE_DATA_PTRS_MSG = 0x02
+ABSOLUTE RESTORE_DATA_PTRS_MSG = 0x03
+ABSOLUTE WDTR_MSG = 0x03
+ABSOLUTE DISCONNECT_MSG = 0x04
+ABSOLUTE REJECT_MSG = 0x07
+ABSOLUTE PARITY_ERROR_MSG = 0x09
+ABSOLUTE SIMPLE_TAG_MSG = 0x20
+ABSOLUTE IDENTIFY_MSG = 0x80
+ABSOLUTE IDENTIFY_MSG_MASK = 0x7F
+ABSOLUTE TWO_BYTE_MSG = 0x20
+ABSOLUTE TWO_BYTE_MSG_MASK = 0x0F
+
+; This is where the script begins
+
+ENTRY StartUp
+
+StartUp:
+ SELECT ATN Device_ID, Reselect
+
+at 0x00000000 : */ 0x41000000,0x00000020,
+/*
+ JUMP Finish, WHEN STATUS
+
+at 0x00000002 : */ 0x830b0000,0x00000460,
+/*
+ JUMP SendIdentifyMsg, IF MSG_OUT
+
+at 0x00000004 : */ 0x860a0000,0x000001b0,
+/*
+ INT NOT_MSG_OUT_AFTER_SELECTION
+
+at 0x00000006 : */ 0x98080000,0x00000110,
+/*
+
+Reselect:
+ WAIT RESELECT SelectedAsTarget
+
+at 0x00000008 : */ 0x50000000,0x00000058,
+/*
+ INT RESELECTED_DURING_SELECTION, WHEN MSG_IN
+
+at 0x0000000a : */ 0x9f0b0000,0x00001000,
+/*
+ INT FATAL_NOT_MSG_IN_AFTER_SELECTION
+
+at 0x0000000c : */ 0x98080000,0x00002002,
+/*
+
+ ENTRY GetReselectionData
+GetReselectionData:
+ MOVE 1, ReceiveMsgAddress, WHEN MSG_IN
+
+at 0x0000000e : */ 0x0f000001,0x00000000,
+/*
+ INT RESELECTION_IDENTIFIED
+
+at 0x00000010 : */ 0x98080000,0x00001003,
+/*
+
+ ENTRY GetReselectionWithTag
+GetReselectionWithTag:
+ MOVE 3, ReceiveMsgAddress, WHEN MSG_IN
+
+at 0x00000012 : */ 0x0f000003,0x00000000,
+/*
+ INT RESELECTION_IDENTIFIED
+
+at 0x00000014 : */ 0x98080000,0x00001003,
+/*
+
+ ENTRY SelectedAsTarget
+SelectedAsTarget:
+; Basically tell the selecting device that there's nothing here
+ SET TARGET
+
+at 0x00000016 : */ 0x58000200,0x00000000,
+/*
+ DISCONNECT
+
+at 0x00000018 : */ 0x48000000,0x00000000,
+/*
+ CLEAR TARGET
+
+at 0x0000001a : */ 0x60000200,0x00000000,
+/*
+ INT COMPLETED_SELECTION_AS_TARGET
+
+at 0x0000001c : */ 0x98080000,0x00001001,
+/*
+;
+; These are the messaging entries
+;
+; Send a message. Message count should be correctly patched
+ ENTRY SendMessage
+SendMessage:
+ MOVE MessageCount, MessageLocation, WHEN MSG_OUT
+
+at 0x0000001e : */ 0x0e000000,0x00000000,
+/*
+ResumeSendMessage:
+ RETURN, WHEN NOT MSG_OUT
+
+at 0x00000020 : */ 0x96030000,0x00000000,
+/*
+ INT FATAL_SEND_MSG
+
+at 0x00000022 : */ 0x98080000,0x00002001,
+/*
+
+ ENTRY SendMessagePhaseMismatch
+SendMessagePhaseMismatch:
+ CLEAR ACK
+
+at 0x00000024 : */ 0x60000040,0x00000000,
+/*
+ JUMP ResumeSendMessage
+
+at 0x00000026 : */ 0x80080000,0x00000080,
+/*
+;
+; Receive a message. Need to identify the message to
+; receive it correctly
+ ENTRY ReceiveMessage
+ReceiveMessage:
+ MOVE 1, ReceiveMsgAddress, WHEN MSG_IN
+
+at 0x00000028 : */ 0x0f000001,0x00000000,
+/*
+;
+; Use this entry if we've just tried to look at the first byte
+; of the message and want to process it further
+ProcessReceiveMessage:
+ JUMP ReceiveExtendedMessage, IF EXTENDED_MSG
+
+at 0x0000002a : */ 0x800c0001,0x000000d0,
+/*
+ RETURN, IF NOT TWO_BYTE_MSG, AND MASK TWO_BYTE_MSG_MASK
+
+at 0x0000002c : */ 0x90040f20,0x00000000,
+/*
+ CLEAR ACK
+
+at 0x0000002e : */ 0x60000040,0x00000000,
+/*
+ MOVE 1, ReceiveMsgAddress + 1, WHEN MSG_IN
+
+at 0x00000030 : */ 0x0f000001,0x00000001,
+/*
+ RETURN
+
+at 0x00000032 : */ 0x90080000,0x00000000,
+/*
+ReceiveExtendedMessage:
+ CLEAR ACK
+
+at 0x00000034 : */ 0x60000040,0x00000000,
+/*
+ MOVE 1, ReceiveMsgAddress + 1, WHEN MSG_IN
+
+at 0x00000036 : */ 0x0f000001,0x00000001,
+/*
+ JUMP Receive1Byte, IF 0x01
+
+at 0x00000038 : */ 0x800c0001,0x00000110,
+/*
+ JUMP Receive2Byte, IF 0x02
+
+at 0x0000003a : */ 0x800c0002,0x00000128,
+/*
+ JUMP Receive3Byte, IF 0x03
+
+at 0x0000003c : */ 0x800c0003,0x00000140,
+/*
+ JUMP Receive4Byte, IF 0x04
+
+at 0x0000003e : */ 0x800c0004,0x00000158,
+/*
+ JUMP Receive5Byte, IF 0x05
+
+at 0x00000040 : */ 0x800c0005,0x00000170,
+/*
+ INT FATAL_ILLEGAL_MSG_LENGTH
+
+at 0x00000042 : */ 0x98080000,0x00002003,
+/*
+Receive1Byte:
+ CLEAR ACK
+
+at 0x00000044 : */ 0x60000040,0x00000000,
+/*
+ MOVE 1, ReceiveMsgAddress + 2, WHEN MSG_IN
+
+at 0x00000046 : */ 0x0f000001,0x00000002,
+/*
+ RETURN
+
+at 0x00000048 : */ 0x90080000,0x00000000,
+/*
+Receive2Byte:
+ CLEAR ACK
+
+at 0x0000004a : */ 0x60000040,0x00000000,
+/*
+ MOVE 2, ReceiveMsgAddress + 2, WHEN MSG_IN
+
+at 0x0000004c : */ 0x0f000002,0x00000002,
+/*
+ RETURN
+
+at 0x0000004e : */ 0x90080000,0x00000000,
+/*
+Receive3Byte:
+ CLEAR ACK
+
+at 0x00000050 : */ 0x60000040,0x00000000,
+/*
+ MOVE 3, ReceiveMsgAddress + 2, WHEN MSG_IN
+
+at 0x00000052 : */ 0x0f000003,0x00000002,
+/*
+ RETURN
+
+at 0x00000054 : */ 0x90080000,0x00000000,
+/*
+Receive4Byte:
+ CLEAR ACK
+
+at 0x00000056 : */ 0x60000040,0x00000000,
+/*
+ MOVE 4, ReceiveMsgAddress + 2, WHEN MSG_IN
+
+at 0x00000058 : */ 0x0f000004,0x00000002,
+/*
+ RETURN
+
+at 0x0000005a : */ 0x90080000,0x00000000,
+/*
+Receive5Byte:
+ CLEAR ACK
+
+at 0x0000005c : */ 0x60000040,0x00000000,
+/*
+ MOVE 5, ReceiveMsgAddress + 2, WHEN MSG_IN
+
+at 0x0000005e : */ 0x0f000005,0x00000002,
+/*
+ RETURN
+
+at 0x00000060 : */ 0x90080000,0x00000000,
+/*
+;
+; Come here from the message processor to ignore the message
+;
+ ENTRY IgnoreMessage
+IgnoreMessage:
+ CLEAR ACK
+
+at 0x00000062 : */ 0x60000040,0x00000000,
+/*
+ RETURN
+
+at 0x00000064 : */ 0x90080000,0x00000000,
+/*
+;
+; Come here to send a reply to a message
+;
+ ENTRY SendMessageWithATN
+SendMessageWithATN:
+ SET ATN
+
+at 0x00000066 : */ 0x58000008,0x00000000,
+/*
+ CLEAR ACK
+
+at 0x00000068 : */ 0x60000040,0x00000000,
+/*
+ JUMP SendMessage
+
+at 0x0000006a : */ 0x80080000,0x00000078,
+/*
+
+SendIdentifyMsg:
+ CALL SendMessage
+
+at 0x0000006c : */ 0x88080000,0x00000078,
+/*
+ CLEAR ATN
+
+at 0x0000006e : */ 0x60000008,0x00000000,
+/*
+
+IgnoreMsgBeforeCommand:
+ CLEAR ACK
+
+at 0x00000070 : */ 0x60000040,0x00000000,
+/*
+ ENTRY SendCommand
+SendCommand:
+ JUMP Finish, WHEN STATUS
+
+at 0x00000072 : */ 0x830b0000,0x00000460,
+/*
+ JUMP MsgInBeforeCommand, IF MSG_IN
+
+at 0x00000074 : */ 0x870a0000,0x000002c0,
+/*
+ INT UNEXPECTED_PHASE_BEFORE_CMD, IF NOT CMD
+
+at 0x00000076 : */ 0x9a020000,0x00000220,
+/*
+ MOVE CommandCount, CommandAddress, WHEN CMD
+
+at 0x00000078 : */ 0x0a000000,0x00000000,
+/*
+ResumeSendCommand:
+ JUMP Finish, WHEN STATUS
+
+at 0x0000007a : */ 0x830b0000,0x00000460,
+/*
+ JUMP MsgInAfterCmd, IF MSG_IN
+
+at 0x0000007c : */ 0x870a0000,0x00000248,
+/*
+ JUMP DataIn, IF DATA_IN
+
+at 0x0000007e : */ 0x810a0000,0x000002f8,
+/*
+ JUMP DataOut, IF DATA_OUT
+
+at 0x00000080 : */ 0x800a0000,0x00000338,
+/*
+ INT UNEXPECTED_PHASE_AFTER_CMD
+
+at 0x00000082 : */ 0x98080000,0x00000320,
+/*
+
+IgnoreMsgDuringData:
+ CLEAR ACK
+
+at 0x00000084 : */ 0x60000040,0x00000000,
+/*
+ ; fall through to MsgInDuringData
+
+Entry MsgInDuringData
+MsgInDuringData:
+;
+; Could be we have nothing more to transfer
+;
+ JUMP Finish, WHEN STATUS
+
+at 0x00000086 : */ 0x830b0000,0x00000460,
+/*
+ MOVE 1, ReceiveMsgAddress, WHEN MSG_IN
+
+at 0x00000088 : */ 0x0f000001,0x00000000,
+/*
+ JUMP DisconnectDuringDataIn, IF DISCONNECT_MSG
+
+at 0x0000008a : */ 0x800c0004,0x00000398,
+/*
+ JUMP IgnoreMsgDuringData, IF SAVE_DATA_PTRS_MSG
+
+at 0x0000008c : */ 0x800c0002,0x00000210,
+/*
+ JUMP IgnoreMsgDuringData, IF RESTORE_DATA_PTRS_MSG
+
+at 0x0000008e : */ 0x800c0003,0x00000210,
+/*
+ INT MSG_IN_DURING_DATA_IN
+
+at 0x00000090 : */ 0x98080000,0x00000750,
+/*
+
+MsgInAfterCmd:
+ MOVE 1, ReceiveMsgAddress, WHEN MSG_IN
+
+at 0x00000092 : */ 0x0f000001,0x00000000,
+/*
+ JUMP DisconnectAfterCmd, IF DISCONNECT_MSG
+
+at 0x00000094 : */ 0x800c0004,0x00000298,
+/*
+ JUMP IgnoreMsgInAfterCmd, IF SAVE_DATA_PTRS_MSG
+
+at 0x00000096 : */ 0x800c0002,0x00000288,
+/*
+ JUMP IgnoreMsgInAfterCmd, IF RESTORE_DATA_PTRS_MSG
+
+at 0x00000098 : */ 0x800c0003,0x00000288,
+/*
+ CALL ProcessReceiveMessage
+
+at 0x0000009a : */ 0x88080000,0x000000a8,
+/*
+ INT MSG_IN_AFTER_CMD
+
+at 0x0000009c : */ 0x98080000,0x00000350,
+/*
+ CLEAR ACK
+
+at 0x0000009e : */ 0x60000040,0x00000000,
+/*
+ JUMP ResumeSendCommand
+
+at 0x000000a0 : */ 0x80080000,0x000001e8,
+/*
+
+IgnoreMsgInAfterCmd:
+ CLEAR ACK
+
+at 0x000000a2 : */ 0x60000040,0x00000000,
+/*
+ JUMP ResumeSendCommand
+
+at 0x000000a4 : */ 0x80080000,0x000001e8,
+/*
+
+DisconnectAfterCmd:
+ CLEAR ACK
+
+at 0x000000a6 : */ 0x60000040,0x00000000,
+/*
+ WAIT DISCONNECT
+
+at 0x000000a8 : */ 0x48000000,0x00000000,
+/*
+ ENTRY Disconnect1
+Disconnect1:
+ INT DISCONNECT_AFTER_CMD
+
+at 0x000000aa : */ 0x98080000,0x00000380,
+/*
+ ENTRY Disconnect2
+Disconnect2:
+; We return here after a reselection
+ CLEAR ACK
+
+at 0x000000ac : */ 0x60000040,0x00000000,
+/*
+ JUMP ResumeSendCommand
+
+at 0x000000ae : */ 0x80080000,0x000001e8,
+/*
+
+MsgInBeforeCommand:
+ MOVE 1, ReceiveMsgAddress, WHEN MSG_IN
+
+at 0x000000b0 : */ 0x0f000001,0x00000000,
+/*
+ JUMP IgnoreMsgBeforeCommand, IF SAVE_DATA_PTRS_MSG
+
+at 0x000000b2 : */ 0x800c0002,0x000001c0,
+/*
+ JUMP IgnoreMsgBeforeCommand, IF RESTORE_DATA_PTRS_MSG
+
+at 0x000000b4 : */ 0x800c0003,0x000001c0,
+/*
+ CALL ProcessReceiveMessage
+
+at 0x000000b6 : */ 0x88080000,0x000000a8,
+/*
+ INT MSG_IN_BEFORE_CMD
+
+at 0x000000b8 : */ 0x98080000,0x00000250,
+/*
+ CLEAR ACK
+
+at 0x000000ba : */ 0x60000040,0x00000000,
+/*
+ JUMP SendCommand
+
+at 0x000000bc : */ 0x80080000,0x000001c8,
+/*
+
+DataIn:
+ CALL SGScriptStartAddress
+
+at 0x000000be : */ 0x88080000,0x00000000,
+/*
+ResumeDataIn:
+ JUMP Finish, WHEN STATUS
+
+at 0x000000c0 : */ 0x830b0000,0x00000460,
+/*
+ JUMP MsgInAfterDataIn, IF MSG_IN
+
+at 0x000000c2 : */ 0x870a0000,0x00000358,
+/*
+ JUMP DataInAfterDataIn, if DATA_IN
+
+at 0x000000c4 : */ 0x810a0000,0x00000328,
+/*
+ INT MSG_OUT_AFTER_DATA_IN, if MSG_OUT
+
+at 0x000000c6 : */ 0x9e0a0000,0x00000590,
+/*
+ INT UNEXPECTED_PHASE_AFTER_DATA_IN
+
+at 0x000000c8 : */ 0x98080000,0x00000520,
+/*
+
+DataInAfterDataIn:
+ INT DATA_IN_AFTER_DATA_IN
+
+at 0x000000ca : */ 0x98080000,0x000005a0,
+/*
+ JUMP ResumeDataIn
+
+at 0x000000cc : */ 0x80080000,0x00000300,
+/*
+
+DataOut:
+ CALL SGScriptStartAddress
+
+at 0x000000ce : */ 0x88080000,0x00000000,
+/*
+ResumeDataOut:
+ JUMP Finish, WHEN STATUS
+
+at 0x000000d0 : */ 0x830b0000,0x00000460,
+/*
+ JUMP MsgInAfterDataOut, IF MSG_IN
+
+at 0x000000d2 : */ 0x870a0000,0x000003e8,
+/*
+ INT UNEXPECTED_PHASE_AFTER_DATA_OUT
+
+at 0x000000d4 : */ 0x98080000,0x00000620,
+/*
+
+MsgInAfterDataIn:
+ MOVE 1, ReceiveMsgAddress, WHEN MSG_IN
+
+at 0x000000d6 : */ 0x0f000001,0x00000000,
+/*
+ JUMP DisconnectAfterDataIn, IF DISCONNECT_MSG
+
+at 0x000000d8 : */ 0x800c0004,0x000003c0,
+/*
+ JUMP IgnoreMsgAfterData, IF SAVE_DATA_PTRS_MSG
+
+at 0x000000da : */ 0x800c0002,0x00000428,
+/*
+ JUMP IgnoreMsgAfterData, IF RESTORE_DATA_PTRS_MSG
+
+at 0x000000dc : */ 0x800c0003,0x00000428,
+/*
+ CALL ProcessReceiveMessage
+
+at 0x000000de : */ 0x88080000,0x000000a8,
+/*
+ INT MSG_IN_AFTER_DATA_IN
+
+at 0x000000e0 : */ 0x98080000,0x00000550,
+/*
+ CLEAR ACK
+
+at 0x000000e2 : */ 0x60000040,0x00000000,
+/*
+ JUMP ResumeDataIn
+
+at 0x000000e4 : */ 0x80080000,0x00000300,
+/*
+
+DisconnectDuringDataIn:
+ CLEAR ACK
+
+at 0x000000e6 : */ 0x60000040,0x00000000,
+/*
+ WAIT DISCONNECT
+
+at 0x000000e8 : */ 0x48000000,0x00000000,
+/*
+ ENTRY Disconnect3
+Disconnect3:
+ INT DISCONNECT_DURING_DATA
+
+at 0x000000ea : */ 0x98080000,0x00000780,
+/*
+ ENTRY Disconnect4
+Disconnect4:
+; we return here after a reselection
+ CLEAR ACK
+
+at 0x000000ec : */ 0x60000040,0x00000000,
+/*
+ JUMP ResumeSendCommand
+
+at 0x000000ee : */ 0x80080000,0x000001e8,
+/*
+
+
+DisconnectAfterDataIn:
+ CLEAR ACK
+
+at 0x000000f0 : */ 0x60000040,0x00000000,
+/*
+ WAIT DISCONNECT
+
+at 0x000000f2 : */ 0x48000000,0x00000000,
+/*
+ ENTRY Disconnect5
+Disconnect5:
+ INT DISCONNECT_AFTER_DATA
+
+at 0x000000f4 : */ 0x98080000,0x00000580,
+/*
+ ENTRY Disconnect6
+Disconnect6:
+; we return here after a reselection
+ CLEAR ACK
+
+at 0x000000f6 : */ 0x60000040,0x00000000,
+/*
+ JUMP ResumeDataIn
+
+at 0x000000f8 : */ 0x80080000,0x00000300,
+/*
+
+MsgInAfterDataOut:
+ MOVE 1, ReceiveMsgAddress, WHEN MSG_IN
+
+at 0x000000fa : */ 0x0f000001,0x00000000,
+/*
+ JUMP DisconnectAfterDataOut, if DISCONNECT_MSG
+
+at 0x000000fc : */ 0x800c0004,0x00000438,
+/*
+ JUMP IgnoreMsgAfterData, IF SAVE_DATA_PTRS_MSG
+
+at 0x000000fe : */ 0x800c0002,0x00000428,
+/*
+ JUMP IgnoreMsgAfterData, IF RESTORE_DATA_PTRS_MSG
+
+at 0x00000100 : */ 0x800c0003,0x00000428,
+/*
+ CALL ProcessReceiveMessage
+
+at 0x00000102 : */ 0x88080000,0x000000a8,
+/*
+ INT MSG_IN_AFTER_DATA_OUT
+
+at 0x00000104 : */ 0x98080000,0x00000650,
+/*
+ CLEAR ACK
+
+at 0x00000106 : */ 0x60000040,0x00000000,
+/*
+ JUMP ResumeDataOut
+
+at 0x00000108 : */ 0x80080000,0x00000340,
+/*
+
+IgnoreMsgAfterData:
+ CLEAR ACK
+
+at 0x0000010a : */ 0x60000040,0x00000000,
+/*
+; Data in and out do the same thing on resume, so pick one
+ JUMP ResumeDataIn
+
+at 0x0000010c : */ 0x80080000,0x00000300,
+/*
+
+DisconnectAfterDataOut:
+ CLEAR ACK
+
+at 0x0000010e : */ 0x60000040,0x00000000,
+/*
+ WAIT DISCONNECT
+
+at 0x00000110 : */ 0x48000000,0x00000000,
+/*
+ ENTRY Disconnect7
+Disconnect7:
+ INT DISCONNECT_AFTER_DATA
+
+at 0x00000112 : */ 0x98080000,0x00000580,
+/*
+ ENTRY Disconnect8
+Disconnect8:
+; we return here after a reselection
+ CLEAR ACK
+
+at 0x00000114 : */ 0x60000040,0x00000000,
+/*
+ JUMP ResumeDataOut
+
+at 0x00000116 : */ 0x80080000,0x00000340,
+/*
+
+Finish:
+ MOVE 1, StatusAddress, WHEN STATUS
+
+at 0x00000118 : */ 0x0b000001,0x00000000,
+/*
+ INT NOT_MSG_IN_AFTER_STATUS, WHEN NOT MSG_IN
+
+at 0x0000011a : */ 0x9f030000,0x00000430,
+/*
+ MOVE 1, ReceiveMsgAddress, WHEN MSG_IN
+
+at 0x0000011c : */ 0x0f000001,0x00000000,
+/*
+ JUMP FinishCommandComplete, IF COMMAND_COMPLETE_MSG
+
+at 0x0000011e : */ 0x800c0000,0x00000490,
+/*
+ CALL ProcessReceiveMessage
+
+at 0x00000120 : */ 0x88080000,0x000000a8,
+/*
+ INT MSG_IN_AFTER_STATUS
+
+at 0x00000122 : */ 0x98080000,0x00000440,
+/*
+ ENTRY FinishCommandComplete
+FinishCommandComplete:
+ CLEAR ACK
+
+at 0x00000124 : */ 0x60000040,0x00000000,
+/*
+ WAIT DISCONNECT
+
+at 0x00000126 : */ 0x48000000,0x00000000,
+/*
+ ENTRY Finish1
+Finish1:
+ INT GOOD_STATUS_AFTER_STATUS
+
+at 0x00000128 : */ 0x98080000,0x00000401,
+};
+
+#define A_AFTER_CMD 0x00000300
+static u32 A_AFTER_CMD_used[] __attribute((unused)) = {
+};
+
+#define A_AFTER_DATA_IN 0x00000500
+static u32 A_AFTER_DATA_IN_used[] __attribute((unused)) = {
+};
+
+#define A_AFTER_DATA_OUT 0x00000600
+static u32 A_AFTER_DATA_OUT_used[] __attribute((unused)) = {
+};
+
+#define A_AFTER_SELECTION 0x00000100
+static u32 A_AFTER_SELECTION_used[] __attribute((unused)) = {
+};
+
+#define A_AFTER_STATUS 0x00000400
+static u32 A_AFTER_STATUS_used[] __attribute((unused)) = {
+};
+
+#define A_BEFORE_CMD 0x00000200
+static u32 A_BEFORE_CMD_used[] __attribute((unused)) = {
+};
+
+#define A_COMMAND_COMPLETE_MSG 0x00000000
+static u32 A_COMMAND_COMPLETE_MSG_used[] __attribute((unused)) = {
+ 0x0000011e,
+};
+
+#define A_COMPLETED_SELECTION_AS_TARGET 0x00001001
+static u32 A_COMPLETED_SELECTION_AS_TARGET_used[] __attribute((unused)) = {
+ 0x0000001d,
+};
+
+#define A_CommandAddress 0x00000000
+static u32 A_CommandAddress_used[] __attribute((unused)) = {
+ 0x00000079,
+};
+
+#define A_CommandCount 0x00000000
+static u32 A_CommandCount_used[] __attribute((unused)) = {
+ 0x00000078,
+};
+
+#define A_DATA_IN_AFTER_DATA_IN 0x000005a0
+static u32 A_DATA_IN_AFTER_DATA_IN_used[] __attribute((unused)) = {
+ 0x000000cb,
+};
+
+#define A_DEBUG_INTERRUPT 0x00003000
+static u32 A_DEBUG_INTERRUPT_used[] __attribute((unused)) = {
+};
+
+#define A_DEBUG_INTERRUPT1 0x00003001
+static u32 A_DEBUG_INTERRUPT1_used[] __attribute((unused)) = {
+};
+
+#define A_DEBUG_INTERRUPT2 0x00003002
+static u32 A_DEBUG_INTERRUPT2_used[] __attribute((unused)) = {
+};
+
+#define A_DEBUG_INTERRUPT3 0x00003003
+static u32 A_DEBUG_INTERRUPT3_used[] __attribute((unused)) = {
+};
+
+#define A_DEBUG_INTERRUPT4 0x00003004
+static u32 A_DEBUG_INTERRUPT4_used[] __attribute((unused)) = {
+};
+
+#define A_DEBUG_INTERRUPT5 0x00003005
+static u32 A_DEBUG_INTERRUPT5_used[] __attribute((unused)) = {
+};
+
+#define A_DEBUG_INTERRUPT6 0x00003006
+static u32 A_DEBUG_INTERRUPT6_used[] __attribute((unused)) = {
+};
+
+#define A_DISCONNECT 0x00000080
+static u32 A_DISCONNECT_used[] __attribute((unused)) = {
+};
+
+#define A_DISCONNECT_AFTER_CMD 0x00000380
+static u32 A_DISCONNECT_AFTER_CMD_used[] __attribute((unused)) = {
+ 0x000000ab,
+};
+
+#define A_DISCONNECT_AFTER_DATA 0x00000580
+static u32 A_DISCONNECT_AFTER_DATA_used[] __attribute((unused)) = {
+ 0x000000f5,
+ 0x00000113,
+};
+
+#define A_DISCONNECT_DURING_DATA 0x00000780
+static u32 A_DISCONNECT_DURING_DATA_used[] __attribute((unused)) = {
+ 0x000000eb,
+};
+
+#define A_DISCONNECT_MSG 0x00000004
+static u32 A_DISCONNECT_MSG_used[] __attribute((unused)) = {
+ 0x0000008a,
+ 0x00000094,
+ 0x000000d8,
+ 0x000000fc,
+};
+
+#define A_DURING_DATA_IN 0x00000700
+static u32 A_DURING_DATA_IN_used[] __attribute((unused)) = {
+};
+
+#define A_Device_ID 0x00000000
+static u32 A_Device_ID_used[] __attribute((unused)) = {
+ 0x00000000,
+};
+
+#define A_EXTENDED_MSG 0x00000001
+static u32 A_EXTENDED_MSG_used[] __attribute((unused)) = {
+ 0x0000002a,
+};
+
+#define A_FATAL 0x00002000
+static u32 A_FATAL_used[] __attribute((unused)) = {
+};
+
+#define A_FATAL_ILLEGAL_MSG_LENGTH 0x00002003
+static u32 A_FATAL_ILLEGAL_MSG_LENGTH_used[] __attribute((unused)) = {
+ 0x00000043,
+};
+
+#define A_FATAL_NOT_MSG_IN_AFTER_SELECTION 0x00002002
+static u32 A_FATAL_NOT_MSG_IN_AFTER_SELECTION_used[] __attribute((unused)) = {
+ 0x0000000d,
+};
+
+#define A_FATAL_SEND_MSG 0x00002001
+static u32 A_FATAL_SEND_MSG_used[] __attribute((unused)) = {
+ 0x00000023,
+};
+
+#define A_FATAL_UNEXPECTED_RESELECTION_MSG 0x00002000
+static u32 A_FATAL_UNEXPECTED_RESELECTION_MSG_used[] __attribute((unused)) = {
+};
+
+#define A_GOOD_STATUS 0x00000001
+static u32 A_GOOD_STATUS_used[] __attribute((unused)) = {
+};
+
+#define A_GOOD_STATUS_AFTER_STATUS 0x00000401
+static u32 A_GOOD_STATUS_AFTER_STATUS_used[] __attribute((unused)) = {
+ 0x00000129,
+};
+
+#define A_IDENTIFY_MSG 0x00000080
+static u32 A_IDENTIFY_MSG_used[] __attribute((unused)) = {
+};
+
+#define A_IDENTIFY_MSG_MASK 0x0000007f
+static u32 A_IDENTIFY_MSG_MASK_used[] __attribute((unused)) = {
+};
+
+#define A_MSG_IN 0x00000050
+static u32 A_MSG_IN_used[] __attribute((unused)) = {
+};
+
+#define A_MSG_IN_AFTER_CMD 0x00000350
+static u32 A_MSG_IN_AFTER_CMD_used[] __attribute((unused)) = {
+ 0x0000009d,
+};
+
+#define A_MSG_IN_AFTER_DATA_IN 0x00000550
+static u32 A_MSG_IN_AFTER_DATA_IN_used[] __attribute((unused)) = {
+ 0x000000e1,
+};
+
+#define A_MSG_IN_AFTER_DATA_OUT 0x00000650
+static u32 A_MSG_IN_AFTER_DATA_OUT_used[] __attribute((unused)) = {
+ 0x00000105,
+};
+
+#define A_MSG_IN_AFTER_STATUS 0x00000440
+static u32 A_MSG_IN_AFTER_STATUS_used[] __attribute((unused)) = {
+ 0x00000123,
+};
+
+#define A_MSG_IN_BEFORE_CMD 0x00000250
+static u32 A_MSG_IN_BEFORE_CMD_used[] __attribute((unused)) = {
+ 0x000000b9,
+};
+
+#define A_MSG_IN_DURING_DATA_IN 0x00000750
+static u32 A_MSG_IN_DURING_DATA_IN_used[] __attribute((unused)) = {
+ 0x00000091,
+};
+
+#define A_MSG_OUT 0x00000090
+static u32 A_MSG_OUT_used[] __attribute((unused)) = {
+};
+
+#define A_MSG_OUT_AFTER_DATA_IN 0x00000590
+static u32 A_MSG_OUT_AFTER_DATA_IN_used[] __attribute((unused)) = {
+ 0x000000c7,
+};
+
+#define A_MessageCount 0x00000000
+static u32 A_MessageCount_used[] __attribute((unused)) = {
+ 0x0000001e,
+};
+
+#define A_MessageLocation 0x00000000
+static u32 A_MessageLocation_used[] __attribute((unused)) = {
+ 0x0000001f,
+};
+
+#define A_NOT_MSG_IN 0x00000030
+static u32 A_NOT_MSG_IN_used[] __attribute((unused)) = {
+};
+
+#define A_NOT_MSG_IN_AFTER_STATUS 0x00000430
+static u32 A_NOT_MSG_IN_AFTER_STATUS_used[] __attribute((unused)) = {
+ 0x0000011b,
+};
+
+#define A_NOT_MSG_OUT 0x00000010
+static u32 A_NOT_MSG_OUT_used[] __attribute((unused)) = {
+};
+
+#define A_NOT_MSG_OUT_AFTER_SELECTION 0x00000110
+static u32 A_NOT_MSG_OUT_AFTER_SELECTION_used[] __attribute((unused)) = {
+ 0x00000007,
+};
+
+#define A_PARITY_ERROR_MSG 0x00000009
+static u32 A_PARITY_ERROR_MSG_used[] __attribute((unused)) = {
+};
+
+#define A_REJECT_MSG 0x00000007
+static u32 A_REJECT_MSG_used[] __attribute((unused)) = {
+};
+
+#define A_REJECT_MSG_BEFORE_CMD 0x00000270
+static u32 A_REJECT_MSG_BEFORE_CMD_used[] __attribute((unused)) = {
+};
+
+#define A_REJECT_MSG_R 0x00000070
+static u32 A_REJECT_MSG_R_used[] __attribute((unused)) = {
+};
+
+#define A_RESELECTED_DURING_SELECTION 0x00001000
+static u32 A_RESELECTED_DURING_SELECTION_used[] __attribute((unused)) = {
+ 0x0000000b,
+};
+
+#define A_RESELECTION_IDENTIFIED 0x00001003
+static u32 A_RESELECTION_IDENTIFIED_used[] __attribute((unused)) = {
+ 0x00000011,
+ 0x00000015,
+};
+
+#define A_RESTORE_DATA_PTRS_MSG 0x00000003
+static u32 A_RESTORE_DATA_PTRS_MSG_used[] __attribute((unused)) = {
+ 0x0000008e,
+ 0x00000098,
+ 0x000000b4,
+ 0x000000dc,
+ 0x00000100,
+};
+
+#define A_ReceiveMsgAddress 0x00000000
+static u32 A_ReceiveMsgAddress_used[] __attribute((unused)) = {
+ 0x0000000f,
+ 0x00000013,
+ 0x00000029,
+ 0x00000031,
+ 0x00000037,
+ 0x00000047,
+ 0x0000004d,
+ 0x00000053,
+ 0x00000059,
+ 0x0000005f,
+ 0x00000089,
+ 0x00000093,
+ 0x000000b1,
+ 0x000000d7,
+ 0x000000fb,
+ 0x0000011d,
+};
+
+#define A_SAVE_DATA_PTRS_MSG 0x00000002
+static u32 A_SAVE_DATA_PTRS_MSG_used[] __attribute((unused)) = {
+ 0x0000008c,
+ 0x00000096,
+ 0x000000b2,
+ 0x000000da,
+ 0x000000fe,
+};
+
+#define A_SDTR_MSG 0x00000001
+static u32 A_SDTR_MSG_used[] __attribute((unused)) = {
+};
+
+#define A_SDTR_MSG_AFTER_CMD 0x00000360
+static u32 A_SDTR_MSG_AFTER_CMD_used[] __attribute((unused)) = {
+};
+
+#define A_SDTR_MSG_BEFORE_CMD 0x00000260
+static u32 A_SDTR_MSG_BEFORE_CMD_used[] __attribute((unused)) = {
+};
+
+#define A_SDTR_MSG_R 0x00000060
+static u32 A_SDTR_MSG_R_used[] __attribute((unused)) = {
+};
+
+#define A_SGScriptStartAddress 0x00000000
+static u32 A_SGScriptStartAddress_used[] __attribute((unused)) = {
+ 0x000000bf,
+ 0x000000cf,
+};
+
+#define A_SIMPLE_TAG_MSG 0x00000020
+static u32 A_SIMPLE_TAG_MSG_used[] __attribute((unused)) = {
+};
+
+#define A_StatusAddress 0x00000000
+static u32 A_StatusAddress_used[] __attribute((unused)) = {
+ 0x00000119,
+};
+
+#define A_TWO_BYTE_MSG 0x00000020
+static u32 A_TWO_BYTE_MSG_used[] __attribute((unused)) = {
+ 0x0000002c,
+};
+
+#define A_TWO_BYTE_MSG_MASK 0x0000000f
+static u32 A_TWO_BYTE_MSG_MASK_used[] __attribute((unused)) = {
+ 0x0000002c,
+};
+
+#define A_UNEXPECTED_MSG 0x00000040
+static u32 A_UNEXPECTED_MSG_used[] __attribute((unused)) = {
+};
+
+#define A_UNEXPECTED_MSG_BEFORE_CMD 0x00000240
+static u32 A_UNEXPECTED_MSG_BEFORE_CMD_used[] __attribute((unused)) = {
+};
+
+#define A_UNEXPECTED_PHASE 0x00000020
+static u32 A_UNEXPECTED_PHASE_used[] __attribute((unused)) = {
+};
+
+#define A_UNEXPECTED_PHASE_AFTER_CMD 0x00000320
+static u32 A_UNEXPECTED_PHASE_AFTER_CMD_used[] __attribute((unused)) = {
+ 0x00000083,
+};
+
+#define A_UNEXPECTED_PHASE_AFTER_DATA_IN 0x00000520
+static u32 A_UNEXPECTED_PHASE_AFTER_DATA_IN_used[] __attribute((unused)) = {
+ 0x000000c9,
+};
+
+#define A_UNEXPECTED_PHASE_AFTER_DATA_OUT 0x00000620
+static u32 A_UNEXPECTED_PHASE_AFTER_DATA_OUT_used[] __attribute((unused)) = {
+ 0x000000d5,
+};
+
+#define A_UNEXPECTED_PHASE_BEFORE_CMD 0x00000220
+static u32 A_UNEXPECTED_PHASE_BEFORE_CMD_used[] __attribute((unused)) = {
+ 0x00000077,
+};
+
+#define A_WDTR_MSG 0x00000003
+static u32 A_WDTR_MSG_used[] __attribute((unused)) = {
+};
+
+#define A_WDTR_MSG_AFTER_CMD 0x000003a0
+static u32 A_WDTR_MSG_AFTER_CMD_used[] __attribute((unused)) = {
+};
+
+#define A_WDTR_MSG_R 0x000000a0
+static u32 A_WDTR_MSG_R_used[] __attribute((unused)) = {
+};
+
+#define Ent_Disconnect1 0x000002a8
+#define Ent_Disconnect2 0x000002b0
+#define Ent_Disconnect3 0x000003a8
+#define Ent_Disconnect4 0x000003b0
+#define Ent_Disconnect5 0x000003d0
+#define Ent_Disconnect6 0x000003d8
+#define Ent_Disconnect7 0x00000448
+#define Ent_Disconnect8 0x00000450
+#define Ent_Finish1 0x000004a0
+#define Ent_Finish2 0x000004a8
+#define Ent_FinishCommandComplete 0x00000490
+#define Ent_GetReselectionData 0x00000038
+#define Ent_GetReselectionWithTag 0x00000048
+#define Ent_IgnoreMessage 0x00000188
+#define Ent_MsgInDuringData 0x00000218
+#define Ent_ReceiveMessage 0x000000a0
+#define Ent_SelectedAsTarget 0x00000058
+#define Ent_SendCommand 0x000001c8
+#define Ent_SendMessage 0x00000078
+#define Ent_SendMessagePhaseMismatch 0x00000090
+#define Ent_SendMessageWithATN 0x00000198
+#define Ent_StartUp 0x00000000
+static u32 LABELPATCHES[] __attribute((unused)) = {
+ 0x00000001,
+ 0x00000003,
+ 0x00000005,
+ 0x00000009,
+ 0x00000027,
+ 0x0000002b,
+ 0x00000039,
+ 0x0000003b,
+ 0x0000003d,
+ 0x0000003f,
+ 0x00000041,
+ 0x0000006b,
+ 0x0000006d,
+ 0x00000073,
+ 0x00000075,
+ 0x0000007b,
+ 0x0000007d,
+ 0x0000007f,
+ 0x00000081,
+ 0x00000087,
+ 0x0000008b,
+ 0x0000008d,
+ 0x0000008f,
+ 0x00000095,
+ 0x00000097,
+ 0x00000099,
+ 0x0000009b,
+ 0x000000a1,
+ 0x000000a5,
+ 0x000000af,
+ 0x000000b3,
+ 0x000000b5,
+ 0x000000b7,
+ 0x000000bd,
+ 0x000000c1,
+ 0x000000c3,
+ 0x000000c5,
+ 0x000000cd,
+ 0x000000d1,
+ 0x000000d3,
+ 0x000000d9,
+ 0x000000db,
+ 0x000000dd,
+ 0x000000df,
+ 0x000000e5,
+ 0x000000ef,
+ 0x000000f9,
+ 0x000000fd,
+ 0x000000ff,
+ 0x00000101,
+ 0x00000103,
+ 0x00000109,
+ 0x0000010d,
+ 0x00000117,
+ 0x0000011f,
+ 0x00000121,
+};
+
+static struct {
+ u32 offset;
+ void *address;
+} EXTERNAL_PATCHES[] __attribute((unused)) = {
+};
+
+static u32 INSTRUCTIONS __attribute((unused)) = 149;
+static u32 PATCHES __attribute((unused)) = 56;
+static u32 EXTERNAL_PATCHES_LEN __attribute((unused)) = 0;
diff --git a/drivers/scsi/BusLogic.c b/drivers/scsi/BusLogic.c
new file mode 100644
index 000000000..c7be7bb37
--- /dev/null
+++ b/drivers/scsi/BusLogic.c
@@ -0,0 +1,3906 @@
+
+/*
+
+ Linux Driver for BusLogic MultiMaster and FlashPoint SCSI Host Adapters
+
+ Copyright 1995-1998 by Leonard N. Zubkoff <lnz@dandelion.com>
+
+ This program is free software; you may redistribute and/or modify it under
+ the terms of the GNU General Public License Version 2 as published by the
+ Free Software Foundation.
+
+ This program is distributed in the hope that it will be useful, but
+ WITHOUT ANY WARRANTY, without even the implied warranty of MERCHANTABILITY
+ or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ for complete details.
+
+ The author respectfully requests that any modifications to this software be
+ sent directly to him for evaluation and testing.
+
+ Special thanks to Wayne Yen, Jin-Lon Hon, and Alex Win of BusLogic, whose
+ advice has been invaluable, to David Gentzel, for writing the original Linux
+ BusLogic driver, and to Paul Gortmaker, for being such a dedicated test site.
+
+ Finally, special thanks to Mylex/BusLogic for making the FlashPoint SCCB
+ Manager available as freely redistributable source code.
+
+*/
+
+#define blogic_drvr_version "2.1.17"
+#define blogic_drvr_date "12 September 2013"
+
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/types.h>
+#include <linux/blkdev.h>
+#include <linux/delay.h>
+#include <linux/ioport.h>
+#include <linux/mm.h>
+#include <linux/stat.h>
+#include <linux/pci.h>
+#include <linux/spinlock.h>
+#include <linux/jiffies.h>
+#include <linux/dma-mapping.h>
+#include <linux/slab.h>
+#include <scsi/scsicam.h>
+
+#include <asm/dma.h>
+#include <asm/io.h>
+
+#include <scsi/scsi.h>
+#include <scsi/scsi_cmnd.h>
+#include <scsi/scsi_device.h>
+#include <scsi/scsi_host.h>
+#include <scsi/scsi_tcq.h>
+#include "BusLogic.h"
+#include "FlashPoint.c"
+
+#ifndef FAILURE
+#define FAILURE (-1)
+#endif
+
+static struct scsi_host_template blogic_template;
+
+/*
+ blogic_drvr_options_count is a count of the number of BusLogic Driver
+ Options specifications provided via the Linux Kernel Command Line or via
+ the Loadable Kernel Module Installation Facility.
+*/
+
+static int blogic_drvr_options_count;
+
+
+/*
+ blogic_drvr_options is an array of Driver Options structures representing
+ BusLogic Driver Options specifications provided via the Linux Kernel Command
+ Line or via the Loadable Kernel Module Installation Facility.
+*/
+
+static struct blogic_drvr_options blogic_drvr_options[BLOGIC_MAX_ADAPTERS];
+
+
+/*
+ BusLogic can be assigned a string by insmod.
+*/
+
+MODULE_LICENSE("GPL");
+#ifdef MODULE
+static char *BusLogic;
+module_param(BusLogic, charp, 0);
+#endif
+
+
+/*
+ blogic_probe_options is a set of Probe Options to be applied across
+ all BusLogic Host Adapters.
+*/
+
+static struct blogic_probe_options blogic_probe_options;
+
+
+/*
+ blogic_global_options is a set of Global Options to be applied across
+ all BusLogic Host Adapters.
+*/
+
+static struct blogic_global_options blogic_global_options;
+
+static LIST_HEAD(blogic_host_list);
+
+/*
+ blogic_probeinfo_count is the number of entries in blogic_probeinfo_list.
+*/
+
+static int blogic_probeinfo_count;
+
+
+/*
+ blogic_probeinfo_list is the list of I/O Addresses and Bus Probe Information
+ to be checked for potential BusLogic Host Adapters. It is initialized by
+ interrogating the PCI Configuration Space on PCI machines as well as from the
+ list of standard BusLogic I/O Addresses.
+*/
+
+static struct blogic_probeinfo *blogic_probeinfo_list;
+
+
+/*
+ blogic_cmd_failure_reason holds a string identifying the reason why a
+ call to blogic_cmd failed. It is only non-NULL when blogic_cmd
+ returns a failure code.
+*/
+
+static char *blogic_cmd_failure_reason;
+
+/*
+ blogic_announce_drvr announces the Driver Version and Date, Author's
+ Name, Copyright Notice, and Electronic Mail Address.
+*/
+
+static void blogic_announce_drvr(struct blogic_adapter *adapter)
+{
+ blogic_announce("***** BusLogic SCSI Driver Version " blogic_drvr_version " of " blogic_drvr_date " *****\n", adapter);
+ blogic_announce("Copyright 1995-1998 by Leonard N. Zubkoff " "<lnz@dandelion.com>\n", adapter);
+}
+
+
+/*
+ blogic_drvr_info returns the Host Adapter Name to identify this SCSI
+ Driver and Host Adapter.
+*/
+
+static const char *blogic_drvr_info(struct Scsi_Host *host)
+{
+ struct blogic_adapter *adapter =
+ (struct blogic_adapter *) host->hostdata;
+ return adapter->full_model;
+}
+
+/*
+ blogic_init_ccbs initializes a group of Command Control Blocks (CCBs)
+ for Host Adapter from the blk_size bytes located at blk_pointer. The newly
+ created CCBs are added to Host Adapter's free list.
+*/
+
+static void blogic_init_ccbs(struct blogic_adapter *adapter, void *blk_pointer,
+ int blk_size, dma_addr_t blkp)
+{
+ struct blogic_ccb *ccb = (struct blogic_ccb *) blk_pointer;
+ unsigned int offset = 0;
+ memset(blk_pointer, 0, blk_size);
+ ccb->allocgrp_head = blkp;
+ ccb->allocgrp_size = blk_size;
+ while ((blk_size -= sizeof(struct blogic_ccb)) >= 0) {
+ ccb->status = BLOGIC_CCB_FREE;
+ ccb->adapter = adapter;
+ ccb->dma_handle = (u32) blkp + offset;
+ if (blogic_flashpoint_type(adapter)) {
+ ccb->callback = blogic_qcompleted_ccb;
+ ccb->base_addr = adapter->fpinfo.base_addr;
+ }
+ ccb->next = adapter->free_ccbs;
+ ccb->next_all = adapter->all_ccbs;
+ adapter->free_ccbs = ccb;
+ adapter->all_ccbs = ccb;
+ adapter->alloc_ccbs++;
+ ccb++;
+ offset += sizeof(struct blogic_ccb);
+ }
+}
+
+
+/*
+ blogic_create_initccbs allocates the initial CCBs for Host Adapter.
+*/
+
+static bool __init blogic_create_initccbs(struct blogic_adapter *adapter)
+{
+ int blk_size = BLOGIC_CCB_GRP_ALLOCSIZE * sizeof(struct blogic_ccb);
+ void *blk_pointer;
+ dma_addr_t blkp;
+
+ while (adapter->alloc_ccbs < adapter->initccbs) {
+ blk_pointer = pci_alloc_consistent(adapter->pci_device,
+ blk_size, &blkp);
+ if (blk_pointer == NULL) {
+ blogic_err("UNABLE TO ALLOCATE CCB GROUP - DETACHING\n",
+ adapter);
+ return false;
+ }
+ blogic_init_ccbs(adapter, blk_pointer, blk_size, blkp);
+ }
+ return true;
+}
+
+
+/*
+ blogic_destroy_ccbs deallocates the CCBs for Host Adapter.
+*/
+
+static void blogic_destroy_ccbs(struct blogic_adapter *adapter)
+{
+ struct blogic_ccb *next_ccb = adapter->all_ccbs, *ccb, *lastccb = NULL;
+ adapter->all_ccbs = NULL;
+ adapter->free_ccbs = NULL;
+ while ((ccb = next_ccb) != NULL) {
+ next_ccb = ccb->next_all;
+ if (ccb->allocgrp_head) {
+ if (lastccb)
+ pci_free_consistent(adapter->pci_device,
+ lastccb->allocgrp_size, lastccb,
+ lastccb->allocgrp_head);
+ lastccb = ccb;
+ }
+ }
+ if (lastccb)
+ pci_free_consistent(adapter->pci_device, lastccb->allocgrp_size,
+ lastccb, lastccb->allocgrp_head);
+}
+
+
+/*
+ blogic_create_addlccbs allocates Additional CCBs for Host Adapter. If
+ allocation fails and there are no remaining CCBs available, the Driver Queue
+ Depth is decreased to a known safe value to avoid potential deadlocks when
+ multiple host adapters share the same IRQ Channel.
+*/
+
+static void blogic_create_addlccbs(struct blogic_adapter *adapter,
+ int addl_ccbs, bool print_success)
+{
+ int blk_size = BLOGIC_CCB_GRP_ALLOCSIZE * sizeof(struct blogic_ccb);
+ int prev_alloc = adapter->alloc_ccbs;
+ void *blk_pointer;
+ dma_addr_t blkp;
+ if (addl_ccbs <= 0)
+ return;
+ while (adapter->alloc_ccbs - prev_alloc < addl_ccbs) {
+ blk_pointer = pci_alloc_consistent(adapter->pci_device,
+ blk_size, &blkp);
+ if (blk_pointer == NULL)
+ break;
+ blogic_init_ccbs(adapter, blk_pointer, blk_size, blkp);
+ }
+ if (adapter->alloc_ccbs > prev_alloc) {
+ if (print_success)
+ blogic_notice("Allocated %d additional CCBs (total now %d)\n", adapter, adapter->alloc_ccbs - prev_alloc, adapter->alloc_ccbs);
+ return;
+ }
+ blogic_notice("Failed to allocate additional CCBs\n", adapter);
+ if (adapter->drvr_qdepth > adapter->alloc_ccbs - adapter->tgt_count) {
+ adapter->drvr_qdepth = adapter->alloc_ccbs - adapter->tgt_count;
+ adapter->scsi_host->can_queue = adapter->drvr_qdepth;
+ }
+}
+
+/*
+ blogic_alloc_ccb allocates a CCB from Host Adapter's free list,
+ allocating more memory from the Kernel if necessary. The Host Adapter's
+ Lock should already have been acquired by the caller.
+*/
+
+static struct blogic_ccb *blogic_alloc_ccb(struct blogic_adapter *adapter)
+{
+ static unsigned long serial;
+ struct blogic_ccb *ccb;
+ ccb = adapter->free_ccbs;
+ if (ccb != NULL) {
+ ccb->serial = ++serial;
+ adapter->free_ccbs = ccb->next;
+ ccb->next = NULL;
+ if (adapter->free_ccbs == NULL)
+ blogic_create_addlccbs(adapter, adapter->inc_ccbs,
+ true);
+ return ccb;
+ }
+ blogic_create_addlccbs(adapter, adapter->inc_ccbs, true);
+ ccb = adapter->free_ccbs;
+ if (ccb == NULL)
+ return NULL;
+ ccb->serial = ++serial;
+ adapter->free_ccbs = ccb->next;
+ ccb->next = NULL;
+ return ccb;
+}
+
+
+/*
+ blogic_dealloc_ccb deallocates a CCB, returning it to the Host Adapter's
+ free list. The Host Adapter's Lock should already have been acquired by the
+ caller.
+*/
+
+static void blogic_dealloc_ccb(struct blogic_ccb *ccb, int dma_unmap)
+{
+ struct blogic_adapter *adapter = ccb->adapter;
+
+ if (ccb->command != NULL)
+ scsi_dma_unmap(ccb->command);
+ if (dma_unmap)
+ pci_unmap_single(adapter->pci_device, ccb->sensedata,
+ ccb->sense_datalen, PCI_DMA_FROMDEVICE);
+
+ ccb->command = NULL;
+ ccb->status = BLOGIC_CCB_FREE;
+ ccb->next = adapter->free_ccbs;
+ adapter->free_ccbs = ccb;
+}
+
+
+/*
+ blogic_cmd sends the command opcode to adapter, optionally
+ providing paramlen bytes of param and receiving at most
+ replylen bytes of reply; any excess reply data is received but
+ discarded.
+
+ On success, this function returns the number of reply bytes read from
+ the Host Adapter (including any discarded data); on failure, it returns
+ -1 if the command was invalid, or -2 if a timeout occurred.
+
+ blogic_cmd is called exclusively during host adapter detection and
+ initialization, so performance and latency are not critical, and exclusive
+ access to the Host Adapter hardware is assumed. Once the host adapter and
+ driver are initialized, the only Host Adapter command that is issued is the
+ single byte Execute Mailbox Command operation code, which does not require
+ waiting for the Host Adapter Ready bit to be set in the Status Register.
+*/
+
+static int blogic_cmd(struct blogic_adapter *adapter, enum blogic_opcode opcode,
+ void *param, int paramlen, void *reply, int replylen)
+{
+ unsigned char *param_p = (unsigned char *) param;
+ unsigned char *reply_p = (unsigned char *) reply;
+ union blogic_stat_reg statusreg;
+ union blogic_int_reg intreg;
+ unsigned long processor_flag = 0;
+ int reply_b = 0, result;
+ long timeout;
+ /*
+ Clear out the Reply Data if provided.
+ */
+ if (replylen > 0)
+ memset(reply, 0, replylen);
+ /*
+ If the IRQ Channel has not yet been acquired, then interrupts
+ must be disabled while issuing host adapter commands since a
+ Command Complete interrupt could occur if the IRQ Channel was
+ previously enabled by another BusLogic Host Adapter or another
+ driver sharing the same IRQ Channel.
+ */
+ if (!adapter->irq_acquired)
+ local_irq_save(processor_flag);
+ /*
+ Wait for the Host Adapter Ready bit to be set and the
+ Command/Parameter Register Busy bit to be reset in the Status
+ Register.
+ */
+ timeout = 10000;
+ while (--timeout >= 0) {
+ statusreg.all = blogic_rdstatus(adapter);
+ if (statusreg.sr.adapter_ready && !statusreg.sr.cmd_param_busy)
+ break;
+ udelay(100);
+ }
+ if (timeout < 0) {
+ blogic_cmd_failure_reason =
+ "Timeout waiting for Host Adapter Ready";
+ result = -2;
+ goto done;
+ }
+ /*
+ Write the opcode to the Command/Parameter Register.
+ */
+ adapter->adapter_cmd_complete = false;
+ blogic_setcmdparam(adapter, opcode);
+ /*
+ Write any additional Parameter Bytes.
+ */
+ timeout = 10000;
+ while (paramlen > 0 && --timeout >= 0) {
+ /*
+ Wait 100 microseconds to give the Host Adapter enough
+ time to determine whether the last value written to the
+ Command/Parameter Register was valid or not. If the
+ Command Complete bit is set in the Interrupt Register,
+ then the Command Invalid bit in the Status Register will
+ be reset if the Operation Code or Parameter was valid
+ and the command has completed, or set if the Operation
+ Code or Parameter was invalid. If the Data In Register
+ Ready bit is set in the Status Register, then the
+ Operation Code was valid, and data is waiting to be read
+ back from the Host Adapter. Otherwise, wait for the
+ Command/Parameter Register Busy bit in the Status
+ Register to be reset.
+ */
+ udelay(100);
+ intreg.all = blogic_rdint(adapter);
+ statusreg.all = blogic_rdstatus(adapter);
+ if (intreg.ir.cmd_complete)
+ break;
+ if (adapter->adapter_cmd_complete)
+ break;
+ if (statusreg.sr.datain_ready)
+ break;
+ if (statusreg.sr.cmd_param_busy)
+ continue;
+ blogic_setcmdparam(adapter, *param_p++);
+ paramlen--;
+ }
+ if (timeout < 0) {
+ blogic_cmd_failure_reason =
+ "Timeout waiting for Parameter Acceptance";
+ result = -2;
+ goto done;
+ }
+ /*
+ The Modify I/O Address command does not cause a Command Complete
+ Interrupt.
+ */
+ if (opcode == BLOGIC_MOD_IOADDR) {
+ statusreg.all = blogic_rdstatus(adapter);
+ if (statusreg.sr.cmd_invalid) {
+ blogic_cmd_failure_reason =
+ "Modify I/O Address Invalid";
+ result = -1;
+ goto done;
+ }
+ if (blogic_global_options.trace_config)
+ blogic_notice("blogic_cmd(%02X) Status = %02X: " "(Modify I/O Address)\n", adapter, opcode, statusreg.all);
+ result = 0;
+ goto done;
+ }
+ /*
+ Select an appropriate timeout value for awaiting command completion.
+ */
+ switch (opcode) {
+ case BLOGIC_INQ_DEV0TO7:
+ case BLOGIC_INQ_DEV8TO15:
+ case BLOGIC_INQ_DEV:
+ /* Approximately 60 seconds. */
+ timeout = 60 * 10000;
+ break;
+ default:
+ /* Approximately 1 second. */
+ timeout = 10000;
+ break;
+ }
+ /*
+ Receive any Reply Bytes, waiting for either the Command
+ Complete bit to be set in the Interrupt Register, or for the
+ Interrupt Handler to set the Host Adapter Command Completed
+ bit in the Host Adapter structure.
+ */
+ while (--timeout >= 0) {
+ intreg.all = blogic_rdint(adapter);
+ statusreg.all = blogic_rdstatus(adapter);
+ if (intreg.ir.cmd_complete)
+ break;
+ if (adapter->adapter_cmd_complete)
+ break;
+ if (statusreg.sr.datain_ready) {
+ if (++reply_b <= replylen)
+ *reply_p++ = blogic_rddatain(adapter);
+ else
+ blogic_rddatain(adapter);
+ }
+ if (opcode == BLOGIC_FETCH_LOCALRAM &&
+ statusreg.sr.adapter_ready)
+ break;
+ udelay(100);
+ }
+ if (timeout < 0) {
+ blogic_cmd_failure_reason =
+ "Timeout waiting for Command Complete";
+ result = -2;
+ goto done;
+ }
+ /*
+ Clear any pending Command Complete Interrupt.
+ */
+ blogic_intreset(adapter);
+ /*
+ Provide tracing information if requested.
+ */
+ if (blogic_global_options.trace_config) {
+ int i;
+ blogic_notice("blogic_cmd(%02X) Status = %02X: %2d ==> %2d:",
+ adapter, opcode, statusreg.all, replylen,
+ reply_b);
+ if (replylen > reply_b)
+ replylen = reply_b;
+ for (i = 0; i < replylen; i++)
+ blogic_notice(" %02X", adapter,
+ ((unsigned char *) reply)[i]);
+ blogic_notice("\n", adapter);
+ }
+ /*
+ Process Command Invalid conditions.
+ */
+ if (statusreg.sr.cmd_invalid) {
+ /*
+ Some early BusLogic Host Adapters may not recover
+ properly from a Command Invalid condition, so if this
+ appears to be the case, a Soft Reset is issued to the
+ Host Adapter. Potentially invalid commands are never
+ attempted after Mailbox Initialization is performed,
+ so there should be no Host Adapter state lost by a
+ Soft Reset in response to a Command Invalid condition.
+ */
+ udelay(1000);
+ statusreg.all = blogic_rdstatus(adapter);
+ if (statusreg.sr.cmd_invalid || statusreg.sr.rsvd ||
+ statusreg.sr.datain_ready ||
+ statusreg.sr.cmd_param_busy ||
+ !statusreg.sr.adapter_ready ||
+ !statusreg.sr.init_reqd ||
+ statusreg.sr.diag_active ||
+ statusreg.sr.diag_failed) {
+ blogic_softreset(adapter);
+ udelay(1000);
+ }
+ blogic_cmd_failure_reason = "Command Invalid";
+ result = -1;
+ goto done;
+ }
+ /*
+ Handle Excess Parameters Supplied conditions.
+ */
+ if (paramlen > 0) {
+ blogic_cmd_failure_reason = "Excess Parameters Supplied";
+ result = -1;
+ goto done;
+ }
+ /*
+ Indicate the command completed successfully.
+ */
+ blogic_cmd_failure_reason = NULL;
+ result = reply_b;
+ /*
+ Restore the interrupt status if necessary and return.
+ */
+done:
+ if (!adapter->irq_acquired)
+ local_irq_restore(processor_flag);
+ return result;
+}
+
+
+/*
+ blogic_add_probeaddr_isa appends a single ISA I/O Address to the list
+ of I/O Address and Bus Probe Information to be checked for potential BusLogic
+ Host Adapters.
+*/
+
+static void __init blogic_add_probeaddr_isa(unsigned long io_addr)
+{
+ struct blogic_probeinfo *probeinfo;
+ if (blogic_probeinfo_count >= BLOGIC_MAX_ADAPTERS)
+ return;
+ probeinfo = &blogic_probeinfo_list[blogic_probeinfo_count++];
+ probeinfo->adapter_type = BLOGIC_MULTIMASTER;
+ probeinfo->adapter_bus_type = BLOGIC_ISA_BUS;
+ probeinfo->io_addr = io_addr;
+ probeinfo->pci_device = NULL;
+}
+
+
+/*
+ blogic_init_probeinfo_isa initializes the list of I/O Address and
+ Bus Probe Information to be checked for potential BusLogic SCSI Host Adapters
+ only from the list of standard BusLogic MultiMaster ISA I/O Addresses.
+*/
+
+static void __init blogic_init_probeinfo_isa(struct blogic_adapter *adapter)
+{
+ /*
+ If BusLogic Driver Options specifications requested that ISA
+ Bus Probes be inhibited, do not proceed further.
+ */
+ if (blogic_probe_options.noprobe_isa)
+ return;
+ /*
+ Append the list of standard BusLogic MultiMaster ISA I/O Addresses.
+ */
+ if (!blogic_probe_options.limited_isa || blogic_probe_options.probe330)
+ blogic_add_probeaddr_isa(0x330);
+ if (!blogic_probe_options.limited_isa || blogic_probe_options.probe334)
+ blogic_add_probeaddr_isa(0x334);
+ if (!blogic_probe_options.limited_isa || blogic_probe_options.probe230)
+ blogic_add_probeaddr_isa(0x230);
+ if (!blogic_probe_options.limited_isa || blogic_probe_options.probe234)
+ blogic_add_probeaddr_isa(0x234);
+ if (!blogic_probe_options.limited_isa || blogic_probe_options.probe130)
+ blogic_add_probeaddr_isa(0x130);
+ if (!blogic_probe_options.limited_isa || blogic_probe_options.probe134)
+ blogic_add_probeaddr_isa(0x134);
+}
+
+
+#ifdef CONFIG_PCI
+
+
+/*
+ blogic_sort_probeinfo sorts a section of blogic_probeinfo_list in order
+ of increasing PCI Bus and Device Number.
+*/
+
+static void __init blogic_sort_probeinfo(struct blogic_probeinfo
+ *probeinfo_list, int probeinfo_cnt)
+{
+ int last_exchange = probeinfo_cnt - 1, bound, j;
+
+ while (last_exchange > 0) {
+ bound = last_exchange;
+ last_exchange = 0;
+ for (j = 0; j < bound; j++) {
+ struct blogic_probeinfo *probeinfo1 =
+ &probeinfo_list[j];
+ struct blogic_probeinfo *probeinfo2 =
+ &probeinfo_list[j + 1];
+ if (probeinfo1->bus > probeinfo2->bus ||
+ (probeinfo1->bus == probeinfo2->bus &&
+ (probeinfo1->dev > probeinfo2->dev))) {
+ struct blogic_probeinfo tmp_probeinfo;
+
+ memcpy(&tmp_probeinfo, probeinfo1,
+ sizeof(struct blogic_probeinfo));
+ memcpy(probeinfo1, probeinfo2,
+ sizeof(struct blogic_probeinfo));
+ memcpy(probeinfo2, &tmp_probeinfo,
+ sizeof(struct blogic_probeinfo));
+ last_exchange = j;
+ }
+ }
+ }
+}
+
+
+/*
+ blogic_init_mm_probeinfo initializes the list of I/O Address
+ and Bus Probe Information to be checked for potential BusLogic MultiMaster
+ SCSI Host Adapters by interrogating the PCI Configuration Space on PCI
+ machines as well as from the list of standard BusLogic MultiMaster ISA
+ I/O Addresses. It returns the number of PCI MultiMaster Host Adapters found.
+*/
+
+static int __init blogic_init_mm_probeinfo(struct blogic_adapter *adapter)
+{
+ struct blogic_probeinfo *pr_probeinfo =
+ &blogic_probeinfo_list[blogic_probeinfo_count];
+ int nonpr_mmindex = blogic_probeinfo_count + 1;
+ int nonpr_mmcount = 0, mmcount = 0;
+ bool force_scan_order = false;
+ bool force_scan_order_checked = false;
+ bool addr_seen[6];
+ struct pci_dev *pci_device = NULL;
+ int i;
+ if (blogic_probeinfo_count >= BLOGIC_MAX_ADAPTERS)
+ return 0;
+ blogic_probeinfo_count++;
+ for (i = 0; i < 6; i++)
+ addr_seen[i] = false;
+ /*
+ Iterate over the MultiMaster PCI Host Adapters. For each
+ enumerated host adapter, determine whether its ISA Compatible
+ I/O Port is enabled and if so, whether it is assigned the
+ Primary I/O Address. A host adapter that is assigned the
+ Primary I/O Address will always be the preferred boot device.
+ The MultiMaster BIOS will first recognize a host adapter at
+ the Primary I/O Address, then any other PCI host adapters,
+ and finally any host adapters located at the remaining
+ standard ISA I/O Addresses. When a PCI host adapter is found
+ with its ISA Compatible I/O Port enabled, a command is issued
+ to disable the ISA Compatible I/O Port, and it is noted that the
+ particular standard ISA I/O Address need not be probed.
+ */
+ pr_probeinfo->io_addr = 0;
+ while ((pci_device = pci_get_device(PCI_VENDOR_ID_BUSLOGIC,
+ PCI_DEVICE_ID_BUSLOGIC_MULTIMASTER,
+ pci_device)) != NULL) {
+ struct blogic_adapter *host_adapter = adapter;
+ struct blogic_adapter_info adapter_info;
+ enum blogic_isa_ioport mod_ioaddr_req;
+ unsigned char bus;
+ unsigned char device;
+ unsigned int irq_ch;
+ unsigned long base_addr0;
+ unsigned long base_addr1;
+ unsigned long io_addr;
+ unsigned long pci_addr;
+
+ if (pci_enable_device(pci_device))
+ continue;
+
+ if (pci_set_dma_mask(pci_device, DMA_BIT_MASK(32)))
+ continue;
+
+ bus = pci_device->bus->number;
+ device = pci_device->devfn >> 3;
+ irq_ch = pci_device->irq;
+ io_addr = base_addr0 = pci_resource_start(pci_device, 0);
+ pci_addr = base_addr1 = pci_resource_start(pci_device, 1);
+
+ if (pci_resource_flags(pci_device, 0) & IORESOURCE_MEM) {
+ blogic_err("BusLogic: Base Address0 0x%X not I/O for " "MultiMaster Host Adapter\n", NULL, base_addr0);
+ blogic_err("at PCI Bus %d Device %d I/O Address 0x%X\n", NULL, bus, device, io_addr);
+ continue;
+ }
+ if (pci_resource_flags(pci_device, 1) & IORESOURCE_IO) {
+ blogic_err("BusLogic: Base Address1 0x%X not Memory for " "MultiMaster Host Adapter\n", NULL, base_addr1);
+ blogic_err("at PCI Bus %d Device %d PCI Address 0x%X\n", NULL, bus, device, pci_addr);
+ continue;
+ }
+ if (irq_ch == 0) {
+ blogic_err("BusLogic: IRQ Channel %d invalid for " "MultiMaster Host Adapter\n", NULL, irq_ch);
+ blogic_err("at PCI Bus %d Device %d I/O Address 0x%X\n", NULL, bus, device, io_addr);
+ continue;
+ }
+ if (blogic_global_options.trace_probe) {
+ blogic_notice("BusLogic: PCI MultiMaster Host Adapter " "detected at\n", NULL);
+ blogic_notice("BusLogic: PCI Bus %d Device %d I/O Address " "0x%X PCI Address 0x%X\n", NULL, bus, device, io_addr, pci_addr);
+ }
+ /*
+ Issue the Inquire PCI Host Adapter Information command to determine
+ the ISA Compatible I/O Port. If the ISA Compatible I/O Port is
+ known and enabled, note that the particular Standard ISA I/O
+ Address should not be probed.
+ */
+ host_adapter->io_addr = io_addr;
+ blogic_intreset(host_adapter);
+ if (blogic_cmd(host_adapter, BLOGIC_INQ_PCI_INFO, NULL, 0,
+ &adapter_info, sizeof(adapter_info)) ==
+ sizeof(adapter_info)) {
+ if (adapter_info.isa_port < 6)
+ addr_seen[adapter_info.isa_port] = true;
+ } else
+ adapter_info.isa_port = BLOGIC_IO_DISABLE;
+ /*
+ Issue the Modify I/O Address command to disable the
+ ISA Compatible I/O Port. On PCI Host Adapters, the
+ Modify I/O Address command allows modification of the
+ ISA compatible I/O Address that the Host Adapter
+ responds to; it does not affect the PCI compliant
+ I/O Address assigned at system initialization.
+ */
+ mod_ioaddr_req = BLOGIC_IO_DISABLE;
+ blogic_cmd(host_adapter, BLOGIC_MOD_IOADDR, &mod_ioaddr_req,
+ sizeof(mod_ioaddr_req), NULL, 0);
+ /*
+ For the first MultiMaster Host Adapter enumerated,
+ issue the Fetch Host Adapter Local RAM command to read
+ byte 45 of the AutoSCSI area, for the setting of the
+ "Use Bus And Device # For PCI Scanning Seq." option.
+ Issue the Inquire Board ID command since this option is
+ only valid for the BT-948/958/958D.
+ */
+ if (!force_scan_order_checked) {
+ struct blogic_fetch_localram fetch_localram;
+ struct blogic_autoscsi_byte45 autoscsi_byte45;
+ struct blogic_board_id id;
+
+ fetch_localram.offset = BLOGIC_AUTOSCSI_BASE + 45;
+ fetch_localram.count = sizeof(autoscsi_byte45);
+ blogic_cmd(host_adapter, BLOGIC_FETCH_LOCALRAM,
+ &fetch_localram, sizeof(fetch_localram),
+ &autoscsi_byte45,
+ sizeof(autoscsi_byte45));
+ blogic_cmd(host_adapter, BLOGIC_GET_BOARD_ID, NULL, 0,
+ &id, sizeof(id));
+ if (id.fw_ver_digit1 == '5')
+ force_scan_order =
+ autoscsi_byte45.force_scan_order;
+ force_scan_order_checked = true;
+ }
+ /*
+ Determine whether this MultiMaster Host Adapter has its
+ ISA Compatible I/O Port enabled and is assigned the
+ Primary I/O Address. If it does, then it is the Primary
+ MultiMaster Host Adapter and must be recognized first.
+ If it does not, then it is added to the list for probing
+ after any Primary MultiMaster Host Adapter is probed.
+ */
+ if (adapter_info.isa_port == BLOGIC_IO_330) {
+ pr_probeinfo->adapter_type = BLOGIC_MULTIMASTER;
+ pr_probeinfo->adapter_bus_type = BLOGIC_PCI_BUS;
+ pr_probeinfo->io_addr = io_addr;
+ pr_probeinfo->pci_addr = pci_addr;
+ pr_probeinfo->bus = bus;
+ pr_probeinfo->dev = device;
+ pr_probeinfo->irq_ch = irq_ch;
+ pr_probeinfo->pci_device = pci_dev_get(pci_device);
+ mmcount++;
+ } else if (blogic_probeinfo_count < BLOGIC_MAX_ADAPTERS) {
+ struct blogic_probeinfo *probeinfo =
+ &blogic_probeinfo_list[blogic_probeinfo_count++];
+ probeinfo->adapter_type = BLOGIC_MULTIMASTER;
+ probeinfo->adapter_bus_type = BLOGIC_PCI_BUS;
+ probeinfo->io_addr = io_addr;
+ probeinfo->pci_addr = pci_addr;
+ probeinfo->bus = bus;
+ probeinfo->dev = device;
+ probeinfo->irq_ch = irq_ch;
+ probeinfo->pci_device = pci_dev_get(pci_device);
+ nonpr_mmcount++;
+ mmcount++;
+ } else
+ blogic_warn("BusLogic: Too many Host Adapters " "detected\n", NULL);
+ }
+ /*
+ If the AutoSCSI "Use Bus And Device # For PCI Scanning Seq."
+ option is ON for the first enumerated MultiMaster Host Adapter,
+ and if that host adapter is a BT-948/958/958D, then the
+ MultiMaster BIOS will recognize MultiMaster Host Adapters in
+ the order of increasing PCI Bus and Device Number. In that case,
+ sort the probe information into the same order the BIOS uses.
+ If this option is OFF, then the MultiMaster BIOS will recognize
+ MultiMaster Host Adapters in the order they are enumerated by
+ the PCI BIOS, and hence no sorting is necessary.
+ */
+ if (force_scan_order)
+ blogic_sort_probeinfo(&blogic_probeinfo_list[nonpr_mmindex],
+ nonpr_mmcount);
+ /*
+ If no PCI MultiMaster Host Adapter is assigned the Primary
+ I/O Address, then the Primary I/O Address must be probed
+ explicitly before any PCI host adapters are probed.
+ */
+ if (!blogic_probe_options.noprobe_isa)
+ if (pr_probeinfo->io_addr == 0 &&
+ (!blogic_probe_options.limited_isa ||
+ blogic_probe_options.probe330)) {
+ pr_probeinfo->adapter_type = BLOGIC_MULTIMASTER;
+ pr_probeinfo->adapter_bus_type = BLOGIC_ISA_BUS;
+ pr_probeinfo->io_addr = 0x330;
+ }
+ /*
+ Append the list of standard BusLogic MultiMaster ISA I/O Addresses,
+ omitting the Primary I/O Address which has already been handled.
+ */
+ if (!blogic_probe_options.noprobe_isa) {
+ if (!addr_seen[1] &&
+ (!blogic_probe_options.limited_isa ||
+ blogic_probe_options.probe334))
+ blogic_add_probeaddr_isa(0x334);
+ if (!addr_seen[2] &&
+ (!blogic_probe_options.limited_isa ||
+ blogic_probe_options.probe230))
+ blogic_add_probeaddr_isa(0x230);
+ if (!addr_seen[3] &&
+ (!blogic_probe_options.limited_isa ||
+ blogic_probe_options.probe234))
+ blogic_add_probeaddr_isa(0x234);
+ if (!addr_seen[4] &&
+ (!blogic_probe_options.limited_isa ||
+ blogic_probe_options.probe130))
+ blogic_add_probeaddr_isa(0x130);
+ if (!addr_seen[5] &&
+ (!blogic_probe_options.limited_isa ||
+ blogic_probe_options.probe134))
+ blogic_add_probeaddr_isa(0x134);
+ }
+ /*
+ Iterate over the older non-compliant MultiMaster PCI Host Adapters,
+ noting the PCI bus location and assigned IRQ Channel.
+ */
+ pci_device = NULL;
+ while ((pci_device = pci_get_device(PCI_VENDOR_ID_BUSLOGIC,
+ PCI_DEVICE_ID_BUSLOGIC_MULTIMASTER_NC,
+ pci_device)) != NULL) {
+ unsigned char bus;
+ unsigned char device;
+ unsigned int irq_ch;
+ unsigned long io_addr;
+
+ if (pci_enable_device(pci_device))
+ continue;
+
+ if (pci_set_dma_mask(pci_device, DMA_BIT_MASK(32)))
+ continue;
+
+ bus = pci_device->bus->number;
+ device = pci_device->devfn >> 3;
+ irq_ch = pci_device->irq;
+ io_addr = pci_resource_start(pci_device, 0);
+
+ if (io_addr == 0 || irq_ch == 0)
+ continue;
+ for (i = 0; i < blogic_probeinfo_count; i++) {
+ struct blogic_probeinfo *probeinfo =
+ &blogic_probeinfo_list[i];
+ if (probeinfo->io_addr == io_addr &&
+ probeinfo->adapter_type == BLOGIC_MULTIMASTER) {
+ probeinfo->adapter_bus_type = BLOGIC_PCI_BUS;
+ probeinfo->pci_addr = 0;
+ probeinfo->bus = bus;
+ probeinfo->dev = device;
+ probeinfo->irq_ch = irq_ch;
+ probeinfo->pci_device = pci_dev_get(pci_device);
+ break;
+ }
+ }
+ }
+ return mmcount;
+}
+
+
+/*
+ blogic_init_fp_probeinfo initializes the list of I/O Address
+ and Bus Probe Information to be checked for potential BusLogic FlashPoint
+ Host Adapters by interrogating the PCI Configuration Space. It returns the
+ number of FlashPoint Host Adapters found.
+*/
+
+static int __init blogic_init_fp_probeinfo(struct blogic_adapter *adapter)
+{
+ int fpindex = blogic_probeinfo_count, fpcount = 0;
+ struct pci_dev *pci_device = NULL;
+ /*
+ Interrogate PCI Configuration Space for any FlashPoint Host Adapters.
+ */
+ while ((pci_device = pci_get_device(PCI_VENDOR_ID_BUSLOGIC,
+ PCI_DEVICE_ID_BUSLOGIC_FLASHPOINT,
+ pci_device)) != NULL) {
+ unsigned char bus;
+ unsigned char device;
+ unsigned int irq_ch;
+ unsigned long base_addr0;
+ unsigned long base_addr1;
+ unsigned long io_addr;
+ unsigned long pci_addr;
+
+ if (pci_enable_device(pci_device))
+ continue;
+
+ if (pci_set_dma_mask(pci_device, DMA_BIT_MASK(32)))
+ continue;
+
+ bus = pci_device->bus->number;
+ device = pci_device->devfn >> 3;
+ irq_ch = pci_device->irq;
+ io_addr = base_addr0 = pci_resource_start(pci_device, 0);
+ pci_addr = base_addr1 = pci_resource_start(pci_device, 1);
+#ifdef CONFIG_SCSI_FLASHPOINT
+ if (pci_resource_flags(pci_device, 0) & IORESOURCE_MEM) {
+ blogic_err("BusLogic: Base Address0 0x%X not I/O for " "FlashPoint Host Adapter\n", NULL, base_addr0);
+ blogic_err("at PCI Bus %d Device %d I/O Address 0x%X\n", NULL, bus, device, io_addr);
+ continue;
+ }
+ if (pci_resource_flags(pci_device, 1) & IORESOURCE_IO) {
+ blogic_err("BusLogic: Base Address1 0x%X not Memory for " "FlashPoint Host Adapter\n", NULL, base_addr1);
+ blogic_err("at PCI Bus %d Device %d PCI Address 0x%X\n", NULL, bus, device, pci_addr);
+ continue;
+ }
+ if (irq_ch == 0) {
+ blogic_err("BusLogic: IRQ Channel %d invalid for " "FlashPoint Host Adapter\n", NULL, irq_ch);
+ blogic_err("at PCI Bus %d Device %d I/O Address 0x%X\n", NULL, bus, device, io_addr);
+ continue;
+ }
+ if (blogic_global_options.trace_probe) {
+ blogic_notice("BusLogic: FlashPoint Host Adapter " "detected at\n", NULL);
+ blogic_notice("BusLogic: PCI Bus %d Device %d I/O Address " "0x%X PCI Address 0x%X\n", NULL, bus, device, io_addr, pci_addr);
+ }
+ if (blogic_probeinfo_count < BLOGIC_MAX_ADAPTERS) {
+ struct blogic_probeinfo *probeinfo =
+ &blogic_probeinfo_list[blogic_probeinfo_count++];
+ probeinfo->adapter_type = BLOGIC_FLASHPOINT;
+ probeinfo->adapter_bus_type = BLOGIC_PCI_BUS;
+ probeinfo->io_addr = io_addr;
+ probeinfo->pci_addr = pci_addr;
+ probeinfo->bus = bus;
+ probeinfo->dev = device;
+ probeinfo->irq_ch = irq_ch;
+ probeinfo->pci_device = pci_dev_get(pci_device);
+ fpcount++;
+ } else
+ blogic_warn("BusLogic: Too many Host Adapters " "detected\n", NULL);
+#else
+ blogic_err("BusLogic: FlashPoint Host Adapter detected at " "PCI Bus %d Device %d\n", NULL, bus, device);
+ blogic_err("BusLogic: I/O Address 0x%X PCI Address 0x%X, irq %d, " "but FlashPoint\n", NULL, io_addr, pci_addr, irq_ch);
+ blogic_err("BusLogic: support was omitted in this kernel " "configuration.\n", NULL);
+#endif
+ }
+ /*
+ The FlashPoint BIOS will scan for FlashPoint Host Adapters in the order of
+ increasing PCI Bus and Device Number, so sort the probe information into
+ the same order the BIOS uses.
+ */
+ blogic_sort_probeinfo(&blogic_probeinfo_list[fpindex], fpcount);
+ return fpcount;
+}
+
+
+/*
+ blogic_init_probeinfo_list initializes the list of I/O Address and Bus
+ Probe Information to be checked for potential BusLogic SCSI Host Adapters by
+ interrogating the PCI Configuration Space on PCI machines as well as from the
+ list of standard BusLogic MultiMaster ISA I/O Addresses. By default, if both
+ FlashPoint and PCI MultiMaster Host Adapters are present, this driver will
+ probe for FlashPoint Host Adapters first unless the BIOS primary disk is
+ controlled by the first PCI MultiMaster Host Adapter, in which case
+ MultiMaster Host Adapters will be probed first. The BusLogic Driver Options
+ specifications "MultiMasterFirst" and "FlashPointFirst" can be used to force
+ a particular probe order.
+*/
+
+static void __init blogic_init_probeinfo_list(struct blogic_adapter *adapter)
+{
+ /*
+ If a PCI BIOS is present, interrogate it for MultiMaster and
+ FlashPoint Host Adapters; otherwise, default to the standard
+ ISA MultiMaster probe.
+ */
+ if (!blogic_probe_options.noprobe_pci) {
+ if (blogic_probe_options.multimaster_first) {
+ blogic_init_mm_probeinfo(adapter);
+ blogic_init_fp_probeinfo(adapter);
+ } else if (blogic_probe_options.flashpoint_first) {
+ blogic_init_fp_probeinfo(adapter);
+ blogic_init_mm_probeinfo(adapter);
+ } else {
+ int fpcount = blogic_init_fp_probeinfo(adapter);
+ int mmcount = blogic_init_mm_probeinfo(adapter);
+ if (fpcount > 0 && mmcount > 0) {
+ struct blogic_probeinfo *probeinfo =
+ &blogic_probeinfo_list[fpcount];
+ struct blogic_adapter *myadapter = adapter;
+ struct blogic_fetch_localram fetch_localram;
+ struct blogic_bios_drvmap d0_mapbyte;
+
+ while (probeinfo->adapter_bus_type !=
+ BLOGIC_PCI_BUS)
+ probeinfo++;
+ myadapter->io_addr = probeinfo->io_addr;
+ fetch_localram.offset =
+ BLOGIC_BIOS_BASE + BLOGIC_BIOS_DRVMAP;
+ fetch_localram.count = sizeof(d0_mapbyte);
+ blogic_cmd(myadapter, BLOGIC_FETCH_LOCALRAM,
+ &fetch_localram,
+ sizeof(fetch_localram),
+ &d0_mapbyte,
+ sizeof(d0_mapbyte));
+ /*
+ If the Map Byte for BIOS Drive 0 indicates
+ that BIOS Drive 0 is controlled by this
+ PCI MultiMaster Host Adapter, then reverse
+ the probe order so that MultiMaster Host
+ Adapters are probed before FlashPoint Host
+ Adapters.
+ */
+ if (d0_mapbyte.diskgeom != BLOGIC_BIOS_NODISK) {
+ struct blogic_probeinfo saved_probeinfo[BLOGIC_MAX_ADAPTERS];
+ int mmcount = blogic_probeinfo_count - fpcount;
+
+ memcpy(saved_probeinfo,
+ blogic_probeinfo_list,
+ blogic_probeinfo_count * sizeof(struct blogic_probeinfo));
+ memcpy(&blogic_probeinfo_list[0],
+ &saved_probeinfo[fpcount],
+ mmcount * sizeof(struct blogic_probeinfo));
+ memcpy(&blogic_probeinfo_list[mmcount],
+ &saved_probeinfo[0],
+ fpcount * sizeof(struct blogic_probeinfo));
+ }
+ }
+ }
+ } else {
+ blogic_init_probeinfo_isa(adapter);
+ }
+}
+
+
+#else
+#define blogic_init_probeinfo_list(adapter) \
+ blogic_init_probeinfo_isa(adapter)
+#endif /* CONFIG_PCI */
+
+
+/*
+ blogic_failure prints a standardized error message, and then returns false.
+*/
+
+static bool blogic_failure(struct blogic_adapter *adapter, char *msg)
+{
+ blogic_announce_drvr(adapter);
+ if (adapter->adapter_bus_type == BLOGIC_PCI_BUS) {
+ blogic_err("While configuring BusLogic PCI Host Adapter at\n",
+ adapter);
+ blogic_err("Bus %d Device %d I/O Address 0x%X PCI Address 0x%X:\n", adapter, adapter->bus, adapter->dev, adapter->io_addr, adapter->pci_addr);
+ } else
+ blogic_err("While configuring BusLogic Host Adapter at " "I/O Address 0x%X:\n", adapter, adapter->io_addr);
+ blogic_err("%s FAILED - DETACHING\n", adapter, msg);
+ if (blogic_cmd_failure_reason != NULL)
+ blogic_err("ADDITIONAL FAILURE INFO - %s\n", adapter,
+ blogic_cmd_failure_reason);
+ return false;
+}
+
+
+/*
+ blogic_probe probes for a BusLogic Host Adapter.
+*/
+
+static bool __init blogic_probe(struct blogic_adapter *adapter)
+{
+ union blogic_stat_reg statusreg;
+ union blogic_int_reg intreg;
+ union blogic_geo_reg georeg;
+ /*
+ FlashPoint Host Adapters are Probed by the FlashPoint SCCB Manager.
+ */
+ if (blogic_flashpoint_type(adapter)) {
+ struct fpoint_info *fpinfo = &adapter->fpinfo;
+ fpinfo->base_addr = (u32) adapter->io_addr;
+ fpinfo->irq_ch = adapter->irq_ch;
+ fpinfo->present = false;
+ if (!(FlashPoint_ProbeHostAdapter(fpinfo) == 0 &&
+ fpinfo->present)) {
+ blogic_err("BusLogic: FlashPoint Host Adapter detected at " "PCI Bus %d Device %d\n", adapter, adapter->bus, adapter->dev);
+ blogic_err("BusLogic: I/O Address 0x%X PCI Address 0x%X, " "but FlashPoint\n", adapter, adapter->io_addr, adapter->pci_addr);
+ blogic_err("BusLogic: Probe Function failed to validate it.\n", adapter);
+ return false;
+ }
+ if (blogic_global_options.trace_probe)
+ blogic_notice("BusLogic_Probe(0x%X): FlashPoint Found\n", adapter, adapter->io_addr);
+ /*
+ Indicate the Host Adapter Probe completed successfully.
+ */
+ return true;
+ }
+ /*
+ Read the Status, Interrupt, and Geometry Registers to test if there are I/O
+ ports that respond, and to check the values to determine if they are from a
+ BusLogic Host Adapter. A nonexistent I/O port will return 0xFF, in which
+ case there is definitely no BusLogic Host Adapter at this base I/O Address.
+ The test here is a subset of that used by the BusLogic Host Adapter BIOS.
+ */
+ statusreg.all = blogic_rdstatus(adapter);
+ intreg.all = blogic_rdint(adapter);
+ georeg.all = blogic_rdgeom(adapter);
+ if (blogic_global_options.trace_probe)
+ blogic_notice("BusLogic_Probe(0x%X): Status 0x%02X, Interrupt 0x%02X, " "Geometry 0x%02X\n", adapter, adapter->io_addr, statusreg.all, intreg.all, georeg.all);
+ if (statusreg.all == 0 || statusreg.sr.diag_active ||
+ statusreg.sr.cmd_param_busy || statusreg.sr.rsvd ||
+ statusreg.sr.cmd_invalid || intreg.ir.rsvd != 0)
+ return false;
+ /*
+ Check the undocumented Geometry Register to test if there is
+ an I/O port that responded. Adaptec Host Adapters do not
+ implement the Geometry Register, so this test helps serve to
+ avoid incorrectly recognizing an Adaptec 1542A or 1542B as a
+ BusLogic. Unfortunately, the Adaptec 1542C series does respond
+ to the Geometry Register I/O port, but it will be rejected
+ later when the Inquire Extended Setup Information command is
+ issued in blogic_checkadapter. The AMI FastDisk Host Adapter
+ is a BusLogic clone that implements the same interface as
+ earlier BusLogic Host Adapters, including the undocumented
+ commands, and is therefore supported by this driver. However,
+ the AMI FastDisk always returns 0x00 upon reading the Geometry
+ Register, so the extended translation option should always be
+ left disabled on the AMI FastDisk.
+ */
+ if (georeg.all == 0xFF)
+ return false;
+ /*
+ Indicate the Host Adapter Probe completed successfully.
+ */
+ return true;
+}
+
+
+/*
+ blogic_hwreset issues a Hardware Reset to the Host Adapter
+ and waits for Host Adapter Diagnostics to complete. If hard_reset is true, a
+ Hard Reset is performed which also initiates a SCSI Bus Reset. Otherwise, a
+ Soft Reset is performed which only resets the Host Adapter without forcing a
+ SCSI Bus Reset.
+*/
+
+static bool blogic_hwreset(struct blogic_adapter *adapter, bool hard_reset)
+{
+ union blogic_stat_reg statusreg;
+ int timeout;
+ /*
+ FlashPoint Host Adapters are Hard Reset by the FlashPoint
+ SCCB Manager.
+ */
+ if (blogic_flashpoint_type(adapter)) {
+ struct fpoint_info *fpinfo = &adapter->fpinfo;
+ fpinfo->softreset = !hard_reset;
+ fpinfo->report_underrun = true;
+ adapter->cardhandle =
+ FlashPoint_HardwareResetHostAdapter(fpinfo);
+ if (adapter->cardhandle == (void *)FPOINT_BADCARD_HANDLE)
+ return false;
+ /*
+ Indicate the Host Adapter Hard Reset completed successfully.
+ */
+ return true;
+ }
+ /*
+ Issue a Hard Reset or Soft Reset Command to the Host Adapter.
+ The Host Adapter should respond by setting Diagnostic Active in
+ the Status Register.
+ */
+ if (hard_reset)
+ blogic_hardreset(adapter);
+ else
+ blogic_softreset(adapter);
+ /*
+ Wait until Diagnostic Active is set in the Status Register.
+ */
+ timeout = 5 * 10000;
+ while (--timeout >= 0) {
+ statusreg.all = blogic_rdstatus(adapter);
+ if (statusreg.sr.diag_active)
+ break;
+ udelay(100);
+ }
+ if (blogic_global_options.trace_hw_reset)
+ blogic_notice("BusLogic_HardwareReset(0x%X): Diagnostic Active, " "Status 0x%02X\n", adapter, adapter->io_addr, statusreg.all);
+ if (timeout < 0)
+ return false;
+ /*
+ Wait 100 microseconds to allow completion of any initial diagnostic
+ activity which might leave the contents of the Status Register
+ unpredictable.
+ */
+ udelay(100);
+ /*
+ Wait until Diagnostic Active is reset in the Status Register.
+ */
+ timeout = 10 * 10000;
+ while (--timeout >= 0) {
+ statusreg.all = blogic_rdstatus(adapter);
+ if (!statusreg.sr.diag_active)
+ break;
+ udelay(100);
+ }
+ if (blogic_global_options.trace_hw_reset)
+ blogic_notice("BusLogic_HardwareReset(0x%X): Diagnostic Completed, " "Status 0x%02X\n", adapter, adapter->io_addr, statusreg.all);
+ if (timeout < 0)
+ return false;
+ /*
+ Wait until at least one of the Diagnostic Failure, Host Adapter
+ Ready, or Data In Register Ready bits is set in the Status Register.
+ */
+ timeout = 10000;
+ while (--timeout >= 0) {
+ statusreg.all = blogic_rdstatus(adapter);
+ if (statusreg.sr.diag_failed || statusreg.sr.adapter_ready ||
+ statusreg.sr.datain_ready)
+ break;
+ udelay(100);
+ }
+ if (blogic_global_options.trace_hw_reset)
+ blogic_notice("BusLogic_HardwareReset(0x%X): Host Adapter Ready, " "Status 0x%02X\n", adapter, adapter->io_addr, statusreg.all);
+ if (timeout < 0)
+ return false;
+ /*
+ If Diagnostic Failure is set or Host Adapter Ready is reset,
+ then an error occurred during the Host Adapter diagnostics.
+ If Data In Register Ready is set, then there is an Error Code
+ available.
+ */
+ if (statusreg.sr.diag_failed || !statusreg.sr.adapter_ready) {
+ blogic_cmd_failure_reason = NULL;
+ blogic_failure(adapter, "HARD RESET DIAGNOSTICS");
+ blogic_err("HOST ADAPTER STATUS REGISTER = %02X\n", adapter,
+ statusreg.all);
+ if (statusreg.sr.datain_ready)
+ blogic_err("HOST ADAPTER ERROR CODE = %d\n", adapter,
+ blogic_rddatain(adapter));
+ return false;
+ }
+ /*
+ Indicate the Host Adapter Hard Reset completed successfully.
+ */
+ return true;
+}
+
+
+/*
+ blogic_checkadapter checks to be sure this really is a BusLogic
+ Host Adapter.
+*/
+
+static bool __init blogic_checkadapter(struct blogic_adapter *adapter)
+{
+ struct blogic_ext_setup ext_setupinfo;
+ unsigned char req_replylen;
+ bool result = true;
+ /*
+ FlashPoint Host Adapters do not require this protection.
+ */
+ if (blogic_flashpoint_type(adapter))
+ return true;
+ /*
+ Issue the Inquire Extended Setup Information command. Only genuine
+ BusLogic Host Adapters and true clones support this command.
+ Adaptec 1542C series Host Adapters that respond to the Geometry
+ Register I/O port will fail this command.
+ */
+ req_replylen = sizeof(ext_setupinfo);
+ if (blogic_cmd(adapter, BLOGIC_INQ_EXTSETUP, &req_replylen,
+ sizeof(req_replylen), &ext_setupinfo,
+ sizeof(ext_setupinfo)) != sizeof(ext_setupinfo))
+ result = false;
+ /*
+ Provide tracing information if requested and return.
+ */
+ if (blogic_global_options.trace_probe)
+ blogic_notice("BusLogic_Check(0x%X): MultiMaster %s\n", adapter,
+ adapter->io_addr,
+ (result ? "Found" : "Not Found"));
+ return result;
+}
+
+
+/*
+ blogic_rdconfig reads the Configuration Information
+ from Host Adapter and initializes the Host Adapter structure.
+*/
+
+static bool __init blogic_rdconfig(struct blogic_adapter *adapter)
+{
+ struct blogic_board_id id;
+ struct blogic_config config;
+ struct blogic_setup_info setupinfo;
+ struct blogic_ext_setup ext_setupinfo;
+ unsigned char model[5];
+ unsigned char fw_ver_digit3;
+ unsigned char fw_ver_letter;
+ struct blogic_adapter_info adapter_info;
+ struct blogic_fetch_localram fetch_localram;
+ struct blogic_autoscsi autoscsi;
+ union blogic_geo_reg georeg;
+ unsigned char req_replylen;
+ unsigned char *tgt, ch;
+ int tgt_id, i;
+ /*
+ Configuration Information for FlashPoint Host Adapters is
+ provided in the fpoint_info structure by the FlashPoint
+ SCCB Manager's Probe Function. Initialize fields in the
+ Host Adapter structure from the fpoint_info structure.
+ */
+ if (blogic_flashpoint_type(adapter)) {
+ struct fpoint_info *fpinfo = &adapter->fpinfo;
+ tgt = adapter->model;
+ *tgt++ = 'B';
+ *tgt++ = 'T';
+ *tgt++ = '-';
+ for (i = 0; i < sizeof(fpinfo->model); i++)
+ *tgt++ = fpinfo->model[i];
+ *tgt++ = '\0';
+ strcpy(adapter->fw_ver, FLASHPOINT_FW_VER);
+ adapter->scsi_id = fpinfo->scsi_id;
+ adapter->ext_trans_enable = fpinfo->ext_trans_enable;
+ adapter->parity = fpinfo->parity;
+ adapter->reset_enabled = !fpinfo->softreset;
+ adapter->level_int = true;
+ adapter->wide = fpinfo->wide;
+ adapter->differential = false;
+ adapter->scam = true;
+ adapter->ultra = true;
+ adapter->ext_lun = true;
+ adapter->terminfo_valid = true;
+ adapter->low_term = fpinfo->low_term;
+ adapter->high_term = fpinfo->high_term;
+ adapter->scam_enabled = fpinfo->scam_enabled;
+ adapter->scam_lev2 = fpinfo->scam_lev2;
+ adapter->drvr_sglimit = BLOGIC_SG_LIMIT;
+ adapter->maxdev = (adapter->wide ? 16 : 8);
+ adapter->maxlun = 32;
+ adapter->initccbs = 4 * BLOGIC_CCB_GRP_ALLOCSIZE;
+ adapter->inc_ccbs = BLOGIC_CCB_GRP_ALLOCSIZE;
+ adapter->drvr_qdepth = 255;
+ adapter->adapter_qdepth = adapter->drvr_qdepth;
+ adapter->sync_ok = fpinfo->sync_ok;
+ adapter->fast_ok = fpinfo->fast_ok;
+ adapter->ultra_ok = fpinfo->ultra_ok;
+ adapter->wide_ok = fpinfo->wide_ok;
+ adapter->discon_ok = fpinfo->discon_ok;
+ adapter->tagq_ok = 0xFFFF;
+ goto common;
+ }
+ /*
+ Issue the Inquire Board ID command.
+ */
+ if (blogic_cmd(adapter, BLOGIC_GET_BOARD_ID, NULL, 0, &id,
+ sizeof(id)) != sizeof(id))
+ return blogic_failure(adapter, "INQUIRE BOARD ID");
+ /*
+ Issue the Inquire Configuration command.
+ */
+ if (blogic_cmd(adapter, BLOGIC_INQ_CONFIG, NULL, 0, &config,
+ sizeof(config))
+ != sizeof(config))
+ return blogic_failure(adapter, "INQUIRE CONFIGURATION");
+ /*
+ Issue the Inquire Setup Information command.
+ */
+ req_replylen = sizeof(setupinfo);
+ if (blogic_cmd(adapter, BLOGIC_INQ_SETUPINFO, &req_replylen,
+ sizeof(req_replylen), &setupinfo,
+ sizeof(setupinfo)) != sizeof(setupinfo))
+ return blogic_failure(adapter, "INQUIRE SETUP INFORMATION");
+ /*
+ Issue the Inquire Extended Setup Information command.
+ */
+ req_replylen = sizeof(ext_setupinfo);
+ if (blogic_cmd(adapter, BLOGIC_INQ_EXTSETUP, &req_replylen,
+ sizeof(req_replylen), &ext_setupinfo,
+ sizeof(ext_setupinfo)) != sizeof(ext_setupinfo))
+ return blogic_failure(adapter,
+ "INQUIRE EXTENDED SETUP INFORMATION");
+ /*
+ Issue the Inquire Firmware Version 3rd Digit command.
+ */
+ fw_ver_digit3 = '\0';
+ if (id.fw_ver_digit1 > '0')
+ if (blogic_cmd(adapter, BLOGIC_INQ_FWVER_D3, NULL, 0,
+ &fw_ver_digit3,
+ sizeof(fw_ver_digit3)) != sizeof(fw_ver_digit3))
+ return blogic_failure(adapter,
+ "INQUIRE FIRMWARE 3RD DIGIT");
+ /*
+ Issue the Inquire Host Adapter Model Number command.
+ */
+ if (ext_setupinfo.bus_type == 'A' && id.fw_ver_digit1 == '2')
+ /* BusLogic BT-542B ISA 2.xx */
+ strcpy(model, "542B");
+ else if (ext_setupinfo.bus_type == 'E' && id.fw_ver_digit1 == '2' &&
+ (id.fw_ver_digit2 <= '1' || (id.fw_ver_digit2 == '2' &&
+ fw_ver_digit3 == '0')))
+ /* BusLogic BT-742A EISA 2.1x or 2.20 */
+ strcpy(model, "742A");
+ else if (ext_setupinfo.bus_type == 'E' && id.fw_ver_digit1 == '0')
+ /* AMI FastDisk EISA Series 441 0.x */
+ strcpy(model, "747A");
+ else {
+ req_replylen = sizeof(model);
+ if (blogic_cmd(adapter, BLOGIC_INQ_MODELNO, &req_replylen,
+ sizeof(req_replylen), &model,
+ sizeof(model)) != sizeof(model))
+ return blogic_failure(adapter,
+ "INQUIRE HOST ADAPTER MODEL NUMBER");
+ }
+ /*
+ BusLogic MultiMaster Host Adapters can be identified by their
+ model number and the major version number of their firmware
+ as follows:
+
+ 5.xx BusLogic "W" Series Host Adapters:
+ BT-948/958/958D
+ 4.xx BusLogic "C" Series Host Adapters:
+ BT-946C/956C/956CD/747C/757C/757CD/445C/545C/540CF
+ 3.xx BusLogic "S" Series Host Adapters:
+ BT-747S/747D/757S/757D/445S/545S/542D
+ BT-542B/742A (revision H)
+ 2.xx BusLogic "A" Series Host Adapters:
+ BT-542B/742A (revision G and below)
+ 0.xx AMI FastDisk VLB/EISA BusLogic Clone Host Adapter
+ */
+ /*
+ Save the Model Name and Host Adapter Name in the Host Adapter
+ structure.
+ */
+ tgt = adapter->model;
+ *tgt++ = 'B';
+ *tgt++ = 'T';
+ *tgt++ = '-';
+ for (i = 0; i < sizeof(model); i++) {
+ ch = model[i];
+ if (ch == ' ' || ch == '\0')
+ break;
+ *tgt++ = ch;
+ }
+ *tgt++ = '\0';
+ /*
+ Save the Firmware Version in the Host Adapter structure.
+ */
+ tgt = adapter->fw_ver;
+ *tgt++ = id.fw_ver_digit1;
+ *tgt++ = '.';
+ *tgt++ = id.fw_ver_digit2;
+ if (fw_ver_digit3 != ' ' && fw_ver_digit3 != '\0')
+ *tgt++ = fw_ver_digit3;
+ *tgt = '\0';
+ /*
+ Issue the Inquire Firmware Version Letter command.
+ */
+ if (strcmp(adapter->fw_ver, "3.3") >= 0) {
+ if (blogic_cmd(adapter, BLOGIC_INQ_FWVER_LETTER, NULL, 0,
+ &fw_ver_letter,
+ sizeof(fw_ver_letter)) != sizeof(fw_ver_letter))
+ return blogic_failure(adapter,
+ "INQUIRE FIRMWARE VERSION LETTER");
+ if (fw_ver_letter != ' ' && fw_ver_letter != '\0')
+ *tgt++ = fw_ver_letter;
+ *tgt = '\0';
+ }
+ /*
+ Save the Host Adapter SCSI ID in the Host Adapter structure.
+ */
+ adapter->scsi_id = config.id;
+ /*
+ Determine the Bus Type and save it in the Host Adapter structure,
+ determine and save the IRQ Channel if necessary, and determine
+ and save the DMA Channel for ISA Host Adapters.
+ */
+ adapter->adapter_bus_type =
+ blogic_adater_bus_types[adapter->model[3] - '4'];
+ if (adapter->irq_ch == 0) {
+ if (config.irq_ch9)
+ adapter->irq_ch = 9;
+ else if (config.irq_ch10)
+ adapter->irq_ch = 10;
+ else if (config.irq_ch11)
+ adapter->irq_ch = 11;
+ else if (config.irq_ch12)
+ adapter->irq_ch = 12;
+ else if (config.irq_ch14)
+ adapter->irq_ch = 14;
+ else if (config.irq_ch15)
+ adapter->irq_ch = 15;
+ }
+ if (adapter->adapter_bus_type == BLOGIC_ISA_BUS) {
+ if (config.dma_ch5)
+ adapter->dma_ch = 5;
+ else if (config.dma_ch6)
+ adapter->dma_ch = 6;
+ else if (config.dma_ch7)
+ adapter->dma_ch = 7;
+ }
+ /*
+ Determine whether Extended Translation is enabled and save it in
+ the Host Adapter structure.
+ */
+ georeg.all = blogic_rdgeom(adapter);
+ adapter->ext_trans_enable = georeg.gr.ext_trans_enable;
+ /*
+ Save the Scatter Gather Limits, Level Sensitive Interrupt flag, Wide
+ SCSI flag, Differential SCSI flag, SCAM Supported flag, and
+ Ultra SCSI flag in the Host Adapter structure.
+ */
+ adapter->adapter_sglimit = ext_setupinfo.sg_limit;
+ adapter->drvr_sglimit = adapter->adapter_sglimit;
+ if (adapter->adapter_sglimit > BLOGIC_SG_LIMIT)
+ adapter->drvr_sglimit = BLOGIC_SG_LIMIT;
+ if (ext_setupinfo.misc.level_int)
+ adapter->level_int = true;
+ adapter->wide = ext_setupinfo.wide;
+ adapter->differential = ext_setupinfo.differential;
+ adapter->scam = ext_setupinfo.scam;
+ adapter->ultra = ext_setupinfo.ultra;
+ /*
+ Determine whether Extended LUN Format CCBs are supported and save the
+ information in the Host Adapter structure.
+ */
+ if (adapter->fw_ver[0] == '5' || (adapter->fw_ver[0] == '4' &&
+ adapter->wide))
+ adapter->ext_lun = true;
+ /*
+ Issue the Inquire PCI Host Adapter Information command to read the
+ Termination Information from "W" series MultiMaster Host Adapters.
+ */
+ if (adapter->fw_ver[0] == '5') {
+ if (blogic_cmd(adapter, BLOGIC_INQ_PCI_INFO, NULL, 0,
+ &adapter_info,
+ sizeof(adapter_info)) != sizeof(adapter_info))
+ return blogic_failure(adapter,
+ "INQUIRE PCI HOST ADAPTER INFORMATION");
+ /*
+ Save the Termination Information in the Host Adapter
+ structure.
+ */
+ if (adapter_info.genericinfo_valid) {
+ adapter->terminfo_valid = true;
+ adapter->low_term = adapter_info.low_term;
+ adapter->high_term = adapter_info.high_term;
+ }
+ }
+ /*
+ Issue the Fetch Host Adapter Local RAM command to read the
+ AutoSCSI data from "W" and "C" series MultiMaster Host Adapters.
+ */
+ if (adapter->fw_ver[0] >= '4') {
+ fetch_localram.offset = BLOGIC_AUTOSCSI_BASE;
+ fetch_localram.count = sizeof(autoscsi);
+ if (blogic_cmd(adapter, BLOGIC_FETCH_LOCALRAM, &fetch_localram,
+ sizeof(fetch_localram), &autoscsi,
+ sizeof(autoscsi)) != sizeof(autoscsi))
+ return blogic_failure(adapter,
+ "FETCH HOST ADAPTER LOCAL RAM");
+ /*
+ Save the Parity Checking Enabled, Bus Reset Enabled,
+ and Termination Information in the Host Adapter structure.
+ */
+ adapter->parity = autoscsi.parity;
+ adapter->reset_enabled = autoscsi.reset_enabled;
+ if (adapter->fw_ver[0] == '4') {
+ adapter->terminfo_valid = true;
+ adapter->low_term = autoscsi.low_term;
+ adapter->high_term = autoscsi.high_term;
+ }
+ /*
+ Save the Wide Permitted, Fast Permitted, Synchronous
+ Permitted, Disconnect Permitted, Ultra Permitted, and
+ SCAM Information in the Host Adapter structure.
+ */
+ adapter->wide_ok = autoscsi.wide_ok;
+ adapter->fast_ok = autoscsi.fast_ok;
+ adapter->sync_ok = autoscsi.sync_ok;
+ adapter->discon_ok = autoscsi.discon_ok;
+ if (adapter->ultra)
+ adapter->ultra_ok = autoscsi.ultra_ok;
+ if (adapter->scam) {
+ adapter->scam_enabled = autoscsi.scam_enabled;
+ adapter->scam_lev2 = autoscsi.scam_lev2;
+ }
+ }
+ /*
+ Initialize fields in the Host Adapter structure for "S" and "A"
+ series MultiMaster Host Adapters.
+ */
+ if (adapter->fw_ver[0] < '4') {
+ if (setupinfo.sync) {
+ adapter->sync_ok = 0xFF;
+ if (adapter->adapter_bus_type == BLOGIC_EISA_BUS) {
+ if (ext_setupinfo.misc.fast_on_eisa)
+ adapter->fast_ok = 0xFF;
+ if (strcmp(adapter->model, "BT-757") == 0)
+ adapter->wide_ok = 0xFF;
+ }
+ }
+ adapter->discon_ok = 0xFF;
+ adapter->parity = setupinfo.parity;
+ adapter->reset_enabled = true;
+ }
+ /*
+ Determine the maximum number of Target IDs and Logical Units
+ supported by this driver for Wide and Narrow Host Adapters.
+ */
+ adapter->maxdev = (adapter->wide ? 16 : 8);
+ adapter->maxlun = (adapter->ext_lun ? 32 : 8);
+ /*
+ Select appropriate values for the Mailbox Count, Driver Queue Depth,
+ Initial CCBs, and Incremental CCBs variables based on whether
+ or not Strict Round Robin Mode is supported. If Strict Round
+ Robin Mode is supported, then there is no performance degradation
+ in using the maximum possible number of Outgoing and Incoming
+ Mailboxes and allowing the Tagged and Untagged Queue Depths to
+ determine the actual utilization. If Strict Round Robin Mode is
+ not supported, then the Host Adapter must scan all the Outgoing
+ Mailboxes whenever an Outgoing Mailbox entry is made, which can
+ cause a substantial performance penalty. The host adapters
+ actually have room to store the following number of CCBs
+ internally; that is, they can internally queue and manage this
+ many active commands on the SCSI bus simultaneously. Performance
+ measurements demonstrate that the Driver Queue Depth should be
+ set to the Mailbox Count, rather than the Host Adapter Queue
+ Depth (internal CCB capacity), as it is more efficient to have the
+ queued commands waiting in Outgoing Mailboxes if necessary than
+ to block the process in the higher levels of the SCSI Subsystem.
+
+ 192 BT-948/958/958D
+ 100 BT-946C/956C/956CD/747C/757C/757CD/445C
+ 50 BT-545C/540CF
+ 30 BT-747S/747D/757S/757D/445S/545S/542D/542B/742A
+ */
+ if (adapter->fw_ver[0] == '5')
+ adapter->adapter_qdepth = 192;
+ else if (adapter->fw_ver[0] == '4')
+ adapter->adapter_qdepth = (adapter->adapter_bus_type !=
+ BLOGIC_ISA_BUS ? 100 : 50);
+ else
+ adapter->adapter_qdepth = 30;
+ if (strcmp(adapter->fw_ver, "3.31") >= 0) {
+ adapter->strict_rr = true;
+ adapter->mbox_count = BLOGIC_MAX_MAILBOX;
+ } else {
+ adapter->strict_rr = false;
+ adapter->mbox_count = 32;
+ }
+ adapter->drvr_qdepth = adapter->mbox_count;
+ adapter->initccbs = 4 * BLOGIC_CCB_GRP_ALLOCSIZE;
+ adapter->inc_ccbs = BLOGIC_CCB_GRP_ALLOCSIZE;
+ /*
+ Tagged Queuing support is available and operates properly on
+ all "W" series MultiMaster Host Adapters, on "C" series
+ MultiMaster Host Adapters with firmware version 4.22 and above,
+ and on "S" series MultiMaster Host Adapters with firmware version
+ 3.35 and above.
+ */
+ adapter->tagq_ok = 0;
+ switch (adapter->fw_ver[0]) {
+ case '5':
+ adapter->tagq_ok = 0xFFFF;
+ break;
+ case '4':
+ if (strcmp(adapter->fw_ver, "4.22") >= 0)
+ adapter->tagq_ok = 0xFFFF;
+ break;
+ case '3':
+ if (strcmp(adapter->fw_ver, "3.35") >= 0)
+ adapter->tagq_ok = 0xFFFF;
+ break;
+ }
+ /*
+ Determine the Host Adapter BIOS Address if the BIOS is enabled and
+ save it in the Host Adapter structure. The BIOS is disabled if the
+ bios_addr is 0.
+ */
+ adapter->bios_addr = ext_setupinfo.bios_addr << 12;
+ /*
+ ISA Host Adapters require Bounce Buffers if there is more than
+ 16MB memory.
+ */
+ if (adapter->adapter_bus_type == BLOGIC_ISA_BUS &&
+ (void *) high_memory > (void *) MAX_DMA_ADDRESS)
+ adapter->need_bouncebuf = true;
+ /*
+ BusLogic BT-445S Host Adapters prior to board revision E have a
+ hardware bug whereby when the BIOS is enabled, transfers to/from
+ the same address range the BIOS occupies modulo 16MB are handled
+ incorrectly. Only properly functioning BT-445S Host Adapters
+ have firmware version 3.37, so require that ISA Bounce Buffers
+ be used for the buggy BT-445S models if there is more than 16MB
+ memory.
+ */
+ if (adapter->bios_addr > 0 && strcmp(adapter->model, "BT-445S") == 0 &&
+ strcmp(adapter->fw_ver, "3.37") < 0 &&
+ (void *) high_memory > (void *) MAX_DMA_ADDRESS)
+ adapter->need_bouncebuf = true;
+ /*
+ Initialize parameters common to MultiMaster and FlashPoint
+ Host Adapters.
+ */
+common:
+ /*
+ Initialize the Host Adapter Full Model Name from the Model Name.
+ */
+ strcpy(adapter->full_model, "BusLogic ");
+ strcat(adapter->full_model, adapter->model);
+ /*
+ Select an appropriate value for the Tagged Queue Depth either from a
+ BusLogic Driver Options specification, or based on whether this Host
+ Adapter requires that ISA Bounce Buffers be used. The Tagged Queue
+ Depth is left at 0 for automatic determination in
+ BusLogic_SelectQueueDepths. Initialize the Untagged Queue Depth.
+ */
+ for (tgt_id = 0; tgt_id < BLOGIC_MAXDEV; tgt_id++) {
+ unsigned char qdepth = 0;
+ if (adapter->drvr_opts != NULL &&
+ adapter->drvr_opts->qdepth[tgt_id] > 0)
+ qdepth = adapter->drvr_opts->qdepth[tgt_id];
+ else if (adapter->need_bouncebuf)
+ qdepth = BLOGIC_TAG_DEPTH_BB;
+ adapter->qdepth[tgt_id] = qdepth;
+ }
+ if (adapter->need_bouncebuf)
+ adapter->untag_qdepth = BLOGIC_UNTAG_DEPTH_BB;
+ else
+ adapter->untag_qdepth = BLOGIC_UNTAG_DEPTH;
+ if (adapter->drvr_opts != NULL)
+ adapter->common_qdepth = adapter->drvr_opts->common_qdepth;
+ if (adapter->common_qdepth > 0 &&
+ adapter->common_qdepth < adapter->untag_qdepth)
+ adapter->untag_qdepth = adapter->common_qdepth;
+ /*
+ Tagged Queuing is only allowed if Disconnect/Reconnect is permitted.
+ Therefore, mask the Tagged Queuing Permitted Default bits with the
+ Disconnect/Reconnect Permitted bits.
+ */
+ adapter->tagq_ok &= adapter->discon_ok;
+ /*
+ Combine the default Tagged Queuing Permitted bits with any
+ BusLogic Driver Options Tagged Queuing specification.
+ */
+ if (adapter->drvr_opts != NULL)
+ adapter->tagq_ok = (adapter->drvr_opts->tagq_ok &
+ adapter->drvr_opts->tagq_ok_mask) |
+ (adapter->tagq_ok & ~adapter->drvr_opts->tagq_ok_mask);
+
+ /*
+ Select an appropriate value for Bus Settle Time either from a
+ BusLogic Driver Options specification, or from
+ BLOGIC_BUS_SETTLE_TIME.
+ */
+ if (adapter->drvr_opts != NULL &&
+ adapter->drvr_opts->bus_settle_time > 0)
+ adapter->bus_settle_time = adapter->drvr_opts->bus_settle_time;
+ else
+ adapter->bus_settle_time = BLOGIC_BUS_SETTLE_TIME;
+ /*
+ Indicate reading the Host Adapter Configuration completed
+ successfully.
+ */
+ return true;
+}
+
+
+/*
+ blogic_reportconfig reports the configuration of Host Adapter.
+*/
+
+static bool __init blogic_reportconfig(struct blogic_adapter *adapter)
+{
+ unsigned short alltgt_mask = (1 << adapter->maxdev) - 1;
+ unsigned short sync_ok, fast_ok;
+ unsigned short ultra_ok, wide_ok;
+ unsigned short discon_ok, tagq_ok;
+ bool common_syncneg, common_tagq_depth;
+ char syncstr[BLOGIC_MAXDEV + 1];
+ char widestr[BLOGIC_MAXDEV + 1];
+ char discon_str[BLOGIC_MAXDEV + 1];
+ char tagq_str[BLOGIC_MAXDEV + 1];
+ char *syncmsg = syncstr;
+ char *widemsg = widestr;
+ char *discon_msg = discon_str;
+ char *tagq_msg = tagq_str;
+ int tgt_id;
+
+ blogic_info("Configuring BusLogic Model %s %s%s%s%s SCSI Host Adapter\n", adapter, adapter->model, blogic_adapter_busnames[adapter->adapter_bus_type], (adapter->wide ? " Wide" : ""), (adapter->differential ? " Differential" : ""), (adapter->ultra ? " Ultra" : ""));
+ blogic_info(" Firmware Version: %s, I/O Address: 0x%X, " "IRQ Channel: %d/%s\n", adapter, adapter->fw_ver, adapter->io_addr, adapter->irq_ch, (adapter->level_int ? "Level" : "Edge"));
+ if (adapter->adapter_bus_type != BLOGIC_PCI_BUS) {
+ blogic_info(" DMA Channel: ", adapter);
+ if (adapter->dma_ch > 0)
+ blogic_info("%d, ", adapter, adapter->dma_ch);
+ else
+ blogic_info("None, ", adapter);
+ if (adapter->bios_addr > 0)
+ blogic_info("BIOS Address: 0x%X, ", adapter,
+ adapter->bios_addr);
+ else
+ blogic_info("BIOS Address: None, ", adapter);
+ } else {
+ blogic_info(" PCI Bus: %d, Device: %d, Address: ", adapter,
+ adapter->bus, adapter->dev);
+ if (adapter->pci_addr > 0)
+ blogic_info("0x%X, ", adapter, adapter->pci_addr);
+ else
+ blogic_info("Unassigned, ", adapter);
+ }
+ blogic_info("Host Adapter SCSI ID: %d\n", adapter, adapter->scsi_id);
+ blogic_info(" Parity Checking: %s, Extended Translation: %s\n",
+ adapter, (adapter->parity ? "Enabled" : "Disabled"),
+ (adapter->ext_trans_enable ? "Enabled" : "Disabled"));
+ alltgt_mask &= ~(1 << adapter->scsi_id);
+ sync_ok = adapter->sync_ok & alltgt_mask;
+ fast_ok = adapter->fast_ok & alltgt_mask;
+ ultra_ok = adapter->ultra_ok & alltgt_mask;
+ if ((blogic_multimaster_type(adapter) &&
+ (adapter->fw_ver[0] >= '4' ||
+ adapter->adapter_bus_type == BLOGIC_EISA_BUS)) ||
+ blogic_flashpoint_type(adapter)) {
+ common_syncneg = false;
+ if (sync_ok == 0) {
+ syncmsg = "Disabled";
+ common_syncneg = true;
+ } else if (sync_ok == alltgt_mask) {
+ if (fast_ok == 0) {
+ syncmsg = "Slow";
+ common_syncneg = true;
+ } else if (fast_ok == alltgt_mask) {
+ if (ultra_ok == 0) {
+ syncmsg = "Fast";
+ common_syncneg = true;
+ } else if (ultra_ok == alltgt_mask) {
+ syncmsg = "Ultra";
+ common_syncneg = true;
+ }
+ }
+ }
+ if (!common_syncneg) {
+ for (tgt_id = 0; tgt_id < adapter->maxdev; tgt_id++)
+ syncstr[tgt_id] = ((!(sync_ok & (1 << tgt_id))) ? 'N' : (!(fast_ok & (1 << tgt_id)) ? 'S' : (!(ultra_ok & (1 << tgt_id)) ? 'F' : 'U')));
+ syncstr[adapter->scsi_id] = '#';
+ syncstr[adapter->maxdev] = '\0';
+ }
+ } else
+ syncmsg = (sync_ok == 0 ? "Disabled" : "Enabled");
+ wide_ok = adapter->wide_ok & alltgt_mask;
+ if (wide_ok == 0)
+ widemsg = "Disabled";
+ else if (wide_ok == alltgt_mask)
+ widemsg = "Enabled";
+ else {
+ for (tgt_id = 0; tgt_id < adapter->maxdev; tgt_id++)
+ widestr[tgt_id] = ((wide_ok & (1 << tgt_id)) ? 'Y' : 'N');
+ widestr[adapter->scsi_id] = '#';
+ widestr[adapter->maxdev] = '\0';
+ }
+ discon_ok = adapter->discon_ok & alltgt_mask;
+ if (discon_ok == 0)
+ discon_msg = "Disabled";
+ else if (discon_ok == alltgt_mask)
+ discon_msg = "Enabled";
+ else {
+ for (tgt_id = 0; tgt_id < adapter->maxdev; tgt_id++)
+ discon_str[tgt_id] = ((discon_ok & (1 << tgt_id)) ? 'Y' : 'N');
+ discon_str[adapter->scsi_id] = '#';
+ discon_str[adapter->maxdev] = '\0';
+ }
+ tagq_ok = adapter->tagq_ok & alltgt_mask;
+ if (tagq_ok == 0)
+ tagq_msg = "Disabled";
+ else if (tagq_ok == alltgt_mask)
+ tagq_msg = "Enabled";
+ else {
+ for (tgt_id = 0; tgt_id < adapter->maxdev; tgt_id++)
+ tagq_str[tgt_id] = ((tagq_ok & (1 << tgt_id)) ? 'Y' : 'N');
+ tagq_str[adapter->scsi_id] = '#';
+ tagq_str[adapter->maxdev] = '\0';
+ }
+ blogic_info(" Synchronous Negotiation: %s, Wide Negotiation: %s\n",
+ adapter, syncmsg, widemsg);
+ blogic_info(" Disconnect/Reconnect: %s, Tagged Queuing: %s\n", adapter,
+ discon_msg, tagq_msg);
+ if (blogic_multimaster_type(adapter)) {
+ blogic_info(" Scatter/Gather Limit: %d of %d segments, " "Mailboxes: %d\n", adapter, adapter->drvr_sglimit, adapter->adapter_sglimit, adapter->mbox_count);
+ blogic_info(" Driver Queue Depth: %d, " "Host Adapter Queue Depth: %d\n", adapter, adapter->drvr_qdepth, adapter->adapter_qdepth);
+ } else
+ blogic_info(" Driver Queue Depth: %d, " "Scatter/Gather Limit: %d segments\n", adapter, adapter->drvr_qdepth, adapter->drvr_sglimit);
+ blogic_info(" Tagged Queue Depth: ", adapter);
+ common_tagq_depth = true;
+ for (tgt_id = 1; tgt_id < adapter->maxdev; tgt_id++)
+ if (adapter->qdepth[tgt_id] != adapter->qdepth[0]) {
+ common_tagq_depth = false;
+ break;
+ }
+ if (common_tagq_depth) {
+ if (adapter->qdepth[0] > 0)
+ blogic_info("%d", adapter, adapter->qdepth[0]);
+ else
+ blogic_info("Automatic", adapter);
+ } else
+ blogic_info("Individual", adapter);
+ blogic_info(", Untagged Queue Depth: %d\n", adapter,
+ adapter->untag_qdepth);
+ if (adapter->terminfo_valid) {
+ if (adapter->wide)
+ blogic_info(" SCSI Bus Termination: %s", adapter,
+ (adapter->low_term ? (adapter->high_term ? "Both Enabled" : "Low Enabled") : (adapter->high_term ? "High Enabled" : "Both Disabled")));
+ else
+ blogic_info(" SCSI Bus Termination: %s", adapter,
+ (adapter->low_term ? "Enabled" : "Disabled"));
+ if (adapter->scam)
+ blogic_info(", SCAM: %s", adapter,
+ (adapter->scam_enabled ? (adapter->scam_lev2 ? "Enabled, Level 2" : "Enabled, Level 1") : "Disabled"));
+ blogic_info("\n", adapter);
+ }
+ /*
+ Indicate reporting the Host Adapter configuration completed
+ successfully.
+ */
+ return true;
+}
+
+
+/*
+ blogic_getres acquires the system resources necessary to use
+ Host Adapter.
+*/
+
+static bool __init blogic_getres(struct blogic_adapter *adapter)
+{
+ if (adapter->irq_ch == 0) {
+ blogic_err("NO LEGAL INTERRUPT CHANNEL ASSIGNED - DETACHING\n",
+ adapter);
+ return false;
+ }
+ /*
+ Acquire shared access to the IRQ Channel.
+ */
+ if (request_irq(adapter->irq_ch, blogic_inthandler, IRQF_SHARED,
+ adapter->full_model, adapter) < 0) {
+ blogic_err("UNABLE TO ACQUIRE IRQ CHANNEL %d - DETACHING\n",
+ adapter, adapter->irq_ch);
+ return false;
+ }
+ adapter->irq_acquired = true;
+ /*
+ Acquire exclusive access to the DMA Channel.
+ */
+ if (adapter->dma_ch > 0) {
+ if (request_dma(adapter->dma_ch, adapter->full_model) < 0) {
+ blogic_err("UNABLE TO ACQUIRE DMA CHANNEL %d - DETACHING\n", adapter, adapter->dma_ch);
+ return false;
+ }
+ set_dma_mode(adapter->dma_ch, DMA_MODE_CASCADE);
+ enable_dma(adapter->dma_ch);
+ adapter->dma_chan_acquired = true;
+ }
+ /*
+ Indicate the System Resource Acquisition completed successfully,
+ */
+ return true;
+}
+
+
+/*
+ blogic_relres releases any system resources previously acquired
+ by blogic_getres.
+*/
+
+static void blogic_relres(struct blogic_adapter *adapter)
+{
+ /*
+ Release shared access to the IRQ Channel.
+ */
+ if (adapter->irq_acquired)
+ free_irq(adapter->irq_ch, adapter);
+ /*
+ Release exclusive access to the DMA Channel.
+ */
+ if (adapter->dma_chan_acquired)
+ free_dma(adapter->dma_ch);
+ /*
+ Release any allocated memory structs not released elsewhere
+ */
+ if (adapter->mbox_space)
+ pci_free_consistent(adapter->pci_device, adapter->mbox_sz,
+ adapter->mbox_space, adapter->mbox_space_handle);
+ pci_dev_put(adapter->pci_device);
+ adapter->mbox_space = NULL;
+ adapter->mbox_space_handle = 0;
+ adapter->mbox_sz = 0;
+}
+
+
+/*
+ blogic_initadapter initializes Host Adapter. This is the only
+ function called during SCSI Host Adapter detection which modifies the state
+ of the Host Adapter from its initial power on or hard reset state.
+*/
+
+static bool blogic_initadapter(struct blogic_adapter *adapter)
+{
+ struct blogic_extmbox_req extmbox_req;
+ enum blogic_rr_req rr_req;
+ enum blogic_setccb_fmt setccb_fmt;
+ int tgt_id;
+
+ /*
+ Initialize the pointers to the first and last CCBs that are
+ queued for completion processing.
+ */
+ adapter->firstccb = NULL;
+ adapter->lastccb = NULL;
+
+ /*
+ Initialize the Bus Device Reset Pending CCB, Tagged Queuing Active,
+ Command Successful Flag, Active Commands, and Commands Since Reset
+ for each Target Device.
+ */
+ for (tgt_id = 0; tgt_id < adapter->maxdev; tgt_id++) {
+ adapter->bdr_pend[tgt_id] = NULL;
+ adapter->tgt_flags[tgt_id].tagq_active = false;
+ adapter->tgt_flags[tgt_id].cmd_good = false;
+ adapter->active_cmds[tgt_id] = 0;
+ adapter->cmds_since_rst[tgt_id] = 0;
+ }
+
+ /*
+ FlashPoint Host Adapters do not use Outgoing and Incoming Mailboxes.
+ */
+ if (blogic_flashpoint_type(adapter))
+ goto done;
+
+ /*
+ Initialize the Outgoing and Incoming Mailbox pointers.
+ */
+ adapter->mbox_sz = adapter->mbox_count * (sizeof(struct blogic_outbox) + sizeof(struct blogic_inbox));
+ adapter->mbox_space = pci_alloc_consistent(adapter->pci_device,
+ adapter->mbox_sz, &adapter->mbox_space_handle);
+ if (adapter->mbox_space == NULL)
+ return blogic_failure(adapter, "MAILBOX ALLOCATION");
+ adapter->first_outbox = (struct blogic_outbox *) adapter->mbox_space;
+ adapter->last_outbox = adapter->first_outbox + adapter->mbox_count - 1;
+ adapter->next_outbox = adapter->first_outbox;
+ adapter->first_inbox = (struct blogic_inbox *) (adapter->last_outbox + 1);
+ adapter->last_inbox = adapter->first_inbox + adapter->mbox_count - 1;
+ adapter->next_inbox = adapter->first_inbox;
+
+ /*
+ Initialize the Outgoing and Incoming Mailbox structures.
+ */
+ memset(adapter->first_outbox, 0,
+ adapter->mbox_count * sizeof(struct blogic_outbox));
+ memset(adapter->first_inbox, 0,
+ adapter->mbox_count * sizeof(struct blogic_inbox));
+
+ /*
+ Initialize the Host Adapter's Pointer to the Outgoing/Incoming
+ Mailboxes.
+ */
+ extmbox_req.mbox_count = adapter->mbox_count;
+ extmbox_req.base_mbox_addr = (u32) adapter->mbox_space_handle;
+ if (blogic_cmd(adapter, BLOGIC_INIT_EXT_MBOX, &extmbox_req,
+ sizeof(extmbox_req), NULL, 0) < 0)
+ return blogic_failure(adapter, "MAILBOX INITIALIZATION");
+ /*
+ Enable Strict Round Robin Mode if supported by the Host Adapter. In
+ Strict Round Robin Mode, the Host Adapter only looks at the next
+ Outgoing Mailbox for each new command, rather than scanning
+ through all the Outgoing Mailboxes to find any that have new
+ commands in them. Strict Round Robin Mode is significantly more
+ efficient.
+ */
+ if (adapter->strict_rr) {
+ rr_req = BLOGIC_STRICT_RR_MODE;
+ if (blogic_cmd(adapter, BLOGIC_STRICT_RR, &rr_req,
+ sizeof(rr_req), NULL, 0) < 0)
+ return blogic_failure(adapter,
+ "ENABLE STRICT ROUND ROBIN MODE");
+ }
+
+ /*
+ For Host Adapters that support Extended LUN Format CCBs, issue the
+ Set CCB Format command to allow 32 Logical Units per Target Device.
+ */
+ if (adapter->ext_lun) {
+ setccb_fmt = BLOGIC_EXT_LUN_CCB;
+ if (blogic_cmd(adapter, BLOGIC_SETCCB_FMT, &setccb_fmt,
+ sizeof(setccb_fmt), NULL, 0) < 0)
+ return blogic_failure(adapter, "SET CCB FORMAT");
+ }
+
+ /*
+ Announce Successful Initialization.
+ */
+done:
+ if (!adapter->adapter_initd) {
+ blogic_info("*** %s Initialized Successfully ***\n", adapter,
+ adapter->full_model);
+ blogic_info("\n", adapter);
+ } else
+ blogic_warn("*** %s Initialized Successfully ***\n", adapter,
+ adapter->full_model);
+ adapter->adapter_initd = true;
+
+ /*
+ Indicate the Host Adapter Initialization completed successfully.
+ */
+ return true;
+}
+
+
+/*
+ blogic_inquiry inquires about the Target Devices accessible
+ through Host Adapter.
+*/
+
+static bool __init blogic_inquiry(struct blogic_adapter *adapter)
+{
+ u16 installed_devs;
+ u8 installed_devs0to7[8];
+ struct blogic_setup_info setupinfo;
+ u8 sync_period[BLOGIC_MAXDEV];
+ unsigned char req_replylen;
+ int tgt_id;
+
+ /*
+ Wait a few seconds between the Host Adapter Hard Reset which
+ initiates a SCSI Bus Reset and issuing any SCSI Commands. Some
+ SCSI devices get confused if they receive SCSI Commands too soon
+ after a SCSI Bus Reset.
+ */
+ blogic_delay(adapter->bus_settle_time);
+ /*
+ FlashPoint Host Adapters do not provide for Target Device Inquiry.
+ */
+ if (blogic_flashpoint_type(adapter))
+ return true;
+ /*
+ Inhibit the Target Device Inquiry if requested.
+ */
+ if (adapter->drvr_opts != NULL && adapter->drvr_opts->stop_tgt_inquiry)
+ return true;
+ /*
+ Issue the Inquire Target Devices command for host adapters with
+ firmware version 4.25 or later, or the Inquire Installed Devices
+ ID 0 to 7 command for older host adapters. This is necessary to
+ force Synchronous Transfer Negotiation so that the Inquire Setup
+ Information and Inquire Synchronous Period commands will return
+ valid data. The Inquire Target Devices command is preferable to
+ Inquire Installed Devices ID 0 to 7 since it only probes Logical
+ Unit 0 of each Target Device.
+ */
+ if (strcmp(adapter->fw_ver, "4.25") >= 0) {
+
+ /*
+ Issue a Inquire Target Devices command. Inquire Target
+ Devices only tests Logical Unit 0 of each Target Device
+ unlike the Inquire Installed Devices commands which test
+ Logical Units 0 - 7. Two bytes are returned, where byte
+ 0 bit 0 set indicates that Target Device 0 exists, and so on.
+ */
+
+ if (blogic_cmd(adapter, BLOGIC_INQ_DEV, NULL, 0,
+ &installed_devs, sizeof(installed_devs))
+ != sizeof(installed_devs))
+ return blogic_failure(adapter, "INQUIRE TARGET DEVICES");
+ for (tgt_id = 0; tgt_id < adapter->maxdev; tgt_id++)
+ adapter->tgt_flags[tgt_id].tgt_exists =
+ (installed_devs & (1 << tgt_id) ? true : false);
+ } else {
+
+ /*
+ Issue an Inquire Installed Devices command. For each
+ Target Device, a byte is returned where bit 0 set
+ indicates that Logical Unit 0 * exists, bit 1 set
+ indicates that Logical Unit 1 exists, and so on.
+ */
+
+ if (blogic_cmd(adapter, BLOGIC_INQ_DEV0TO7, NULL, 0,
+ &installed_devs0to7, sizeof(installed_devs0to7))
+ != sizeof(installed_devs0to7))
+ return blogic_failure(adapter,
+ "INQUIRE INSTALLED DEVICES ID 0 TO 7");
+ for (tgt_id = 0; tgt_id < 8; tgt_id++)
+ adapter->tgt_flags[tgt_id].tgt_exists =
+ (installed_devs0to7[tgt_id] != 0 ? true : false);
+ }
+ /*
+ Issue the Inquire Setup Information command.
+ */
+ req_replylen = sizeof(setupinfo);
+ if (blogic_cmd(adapter, BLOGIC_INQ_SETUPINFO, &req_replylen,
+ sizeof(req_replylen), &setupinfo, sizeof(setupinfo))
+ != sizeof(setupinfo))
+ return blogic_failure(adapter, "INQUIRE SETUP INFORMATION");
+ for (tgt_id = 0; tgt_id < adapter->maxdev; tgt_id++)
+ adapter->sync_offset[tgt_id] = (tgt_id < 8 ? setupinfo.sync0to7[tgt_id].offset : setupinfo.sync8to15[tgt_id - 8].offset);
+ if (strcmp(adapter->fw_ver, "5.06L") >= 0)
+ for (tgt_id = 0; tgt_id < adapter->maxdev; tgt_id++)
+ adapter->tgt_flags[tgt_id].wide_active = (tgt_id < 8 ? (setupinfo.wide_tx_active0to7 & (1 << tgt_id) ? true : false) : (setupinfo.wide_tx_active8to15 & (1 << (tgt_id - 8)) ? true : false));
+ /*
+ Issue the Inquire Synchronous Period command.
+ */
+ if (adapter->fw_ver[0] >= '3') {
+
+ /* Issue a Inquire Synchronous Period command. For each
+ Target Device, a byte is returned which represents the
+ Synchronous Transfer Period in units of 10 nanoseconds.
+ */
+
+ req_replylen = sizeof(sync_period);
+ if (blogic_cmd(adapter, BLOGIC_INQ_SYNC_PERIOD, &req_replylen,
+ sizeof(req_replylen), &sync_period,
+ sizeof(sync_period)) != sizeof(sync_period))
+ return blogic_failure(adapter,
+ "INQUIRE SYNCHRONOUS PERIOD");
+ for (tgt_id = 0; tgt_id < adapter->maxdev; tgt_id++)
+ adapter->sync_period[tgt_id] = sync_period[tgt_id];
+ } else
+ for (tgt_id = 0; tgt_id < adapter->maxdev; tgt_id++)
+ if (setupinfo.sync0to7[tgt_id].offset > 0)
+ adapter->sync_period[tgt_id] = 20 + 5 * setupinfo.sync0to7[tgt_id].tx_period;
+ /*
+ Indicate the Target Device Inquiry completed successfully.
+ */
+ return true;
+}
+
+/*
+ blogic_inithoststruct initializes the fields in the SCSI Host
+ structure. The base, io_port, n_io_ports, irq, and dma_channel fields in the
+ SCSI Host structure are intentionally left uninitialized, as this driver
+ handles acquisition and release of these resources explicitly, as well as
+ ensuring exclusive access to the Host Adapter hardware and data structures
+ through explicit acquisition and release of the Host Adapter's Lock.
+*/
+
+static void __init blogic_inithoststruct(struct blogic_adapter *adapter,
+ struct Scsi_Host *host)
+{
+ host->max_id = adapter->maxdev;
+ host->max_lun = adapter->maxlun;
+ host->max_channel = 0;
+ host->unique_id = adapter->io_addr;
+ host->this_id = adapter->scsi_id;
+ host->can_queue = adapter->drvr_qdepth;
+ host->sg_tablesize = adapter->drvr_sglimit;
+ host->unchecked_isa_dma = adapter->need_bouncebuf;
+ host->cmd_per_lun = adapter->untag_qdepth;
+}
+
+/*
+ blogic_slaveconfig will actually set the queue depth on individual
+ scsi devices as they are permanently added to the device chain. We
+ shamelessly rip off the SelectQueueDepths code to make this work mostly
+ like it used to. Since we don't get called once at the end of the scan
+ but instead get called for each device, we have to do things a bit
+ differently.
+*/
+static int blogic_slaveconfig(struct scsi_device *dev)
+{
+ struct blogic_adapter *adapter =
+ (struct blogic_adapter *) dev->host->hostdata;
+ int tgt_id = dev->id;
+ int qdepth = adapter->qdepth[tgt_id];
+
+ if (adapter->tgt_flags[tgt_id].tagq_ok &&
+ (adapter->tagq_ok & (1 << tgt_id))) {
+ if (qdepth == 0)
+ qdepth = BLOGIC_MAX_AUTO_TAG_DEPTH;
+ adapter->qdepth[tgt_id] = qdepth;
+ scsi_change_queue_depth(dev, qdepth);
+ } else {
+ adapter->tagq_ok &= ~(1 << tgt_id);
+ qdepth = adapter->untag_qdepth;
+ adapter->qdepth[tgt_id] = qdepth;
+ scsi_change_queue_depth(dev, qdepth);
+ }
+ qdepth = 0;
+ for (tgt_id = 0; tgt_id < adapter->maxdev; tgt_id++)
+ if (adapter->tgt_flags[tgt_id].tgt_exists)
+ qdepth += adapter->qdepth[tgt_id];
+ if (qdepth > adapter->alloc_ccbs)
+ blogic_create_addlccbs(adapter, qdepth - adapter->alloc_ccbs,
+ false);
+ return 0;
+}
+
+/*
+ blogic_init probes for BusLogic Host Adapters at the standard
+ I/O Addresses where they may be located, initializing, registering, and
+ reporting the configuration of each BusLogic Host Adapter it finds. It
+ returns the number of BusLogic Host Adapters successfully initialized and
+ registered.
+*/
+
+static int __init blogic_init(void)
+{
+ int adapter_count = 0, drvr_optindex = 0, probeindex;
+ struct blogic_adapter *adapter;
+ int ret = 0;
+
+#ifdef MODULE
+ if (BusLogic)
+ blogic_setup(BusLogic);
+#endif
+
+ if (blogic_probe_options.noprobe)
+ return -ENODEV;
+ blogic_probeinfo_list =
+ kzalloc(BLOGIC_MAX_ADAPTERS * sizeof(struct blogic_probeinfo),
+ GFP_KERNEL);
+ if (blogic_probeinfo_list == NULL) {
+ blogic_err("BusLogic: Unable to allocate Probe Info List\n",
+ NULL);
+ return -ENOMEM;
+ }
+
+ adapter = kzalloc(sizeof(struct blogic_adapter), GFP_KERNEL);
+ if (adapter == NULL) {
+ kfree(blogic_probeinfo_list);
+ blogic_err("BusLogic: Unable to allocate Prototype Host Adapter\n", NULL);
+ return -ENOMEM;
+ }
+
+#ifdef MODULE
+ if (BusLogic != NULL)
+ blogic_setup(BusLogic);
+#endif
+ blogic_init_probeinfo_list(adapter);
+ for (probeindex = 0; probeindex < blogic_probeinfo_count; probeindex++) {
+ struct blogic_probeinfo *probeinfo =
+ &blogic_probeinfo_list[probeindex];
+ struct blogic_adapter *myadapter = adapter;
+ struct Scsi_Host *host;
+
+ if (probeinfo->io_addr == 0)
+ continue;
+ memset(myadapter, 0, sizeof(struct blogic_adapter));
+ myadapter->adapter_type = probeinfo->adapter_type;
+ myadapter->adapter_bus_type = probeinfo->adapter_bus_type;
+ myadapter->io_addr = probeinfo->io_addr;
+ myadapter->pci_addr = probeinfo->pci_addr;
+ myadapter->bus = probeinfo->bus;
+ myadapter->dev = probeinfo->dev;
+ myadapter->pci_device = probeinfo->pci_device;
+ myadapter->irq_ch = probeinfo->irq_ch;
+ myadapter->addr_count =
+ blogic_adapter_addr_count[myadapter->adapter_type];
+
+ /*
+ Make sure region is free prior to probing.
+ */
+ if (!request_region(myadapter->io_addr, myadapter->addr_count,
+ "BusLogic"))
+ continue;
+ /*
+ Probe the Host Adapter. If unsuccessful, abort further
+ initialization.
+ */
+ if (!blogic_probe(myadapter)) {
+ release_region(myadapter->io_addr,
+ myadapter->addr_count);
+ continue;
+ }
+ /*
+ Hard Reset the Host Adapter. If unsuccessful, abort further
+ initialization.
+ */
+ if (!blogic_hwreset(myadapter, true)) {
+ release_region(myadapter->io_addr,
+ myadapter->addr_count);
+ continue;
+ }
+ /*
+ Check the Host Adapter. If unsuccessful, abort further
+ initialization.
+ */
+ if (!blogic_checkadapter(myadapter)) {
+ release_region(myadapter->io_addr,
+ myadapter->addr_count);
+ continue;
+ }
+ /*
+ Initialize the Driver Options field if provided.
+ */
+ if (drvr_optindex < blogic_drvr_options_count)
+ myadapter->drvr_opts =
+ &blogic_drvr_options[drvr_optindex++];
+ /*
+ Announce the Driver Version and Date, Author's Name,
+ Copyright Notice, and Electronic Mail Address.
+ */
+ blogic_announce_drvr(myadapter);
+ /*
+ Register the SCSI Host structure.
+ */
+
+ host = scsi_host_alloc(&blogic_template,
+ sizeof(struct blogic_adapter));
+ if (host == NULL) {
+ release_region(myadapter->io_addr,
+ myadapter->addr_count);
+ continue;
+ }
+ myadapter = (struct blogic_adapter *) host->hostdata;
+ memcpy(myadapter, adapter, sizeof(struct blogic_adapter));
+ myadapter->scsi_host = host;
+ myadapter->host_no = host->host_no;
+ /*
+ Add Host Adapter to the end of the list of registered
+ BusLogic Host Adapters.
+ */
+ list_add_tail(&myadapter->host_list, &blogic_host_list);
+
+ /*
+ Read the Host Adapter Configuration, Configure the Host
+ Adapter, Acquire the System Resources necessary to use
+ the Host Adapter, then Create the Initial CCBs, Initialize
+ the Host Adapter, and finally perform Target Device
+ Inquiry. From this point onward, any failure will be
+ assumed to be due to a problem with the Host Adapter,
+ rather than due to having mistakenly identified this port
+ as belonging to a BusLogic Host Adapter. The I/O Address
+ range will not be released, thereby preventing it from
+ being incorrectly identified as any other type of Host
+ Adapter.
+ */
+ if (blogic_rdconfig(myadapter) &&
+ blogic_reportconfig(myadapter) &&
+ blogic_getres(myadapter) &&
+ blogic_create_initccbs(myadapter) &&
+ blogic_initadapter(myadapter) &&
+ blogic_inquiry(myadapter)) {
+ /*
+ Initialization has been completed successfully.
+ Release and re-register usage of the I/O Address
+ range so that the Model Name of the Host Adapter
+ will appear, and initialize the SCSI Host structure.
+ */
+ release_region(myadapter->io_addr,
+ myadapter->addr_count);
+ if (!request_region(myadapter->io_addr,
+ myadapter->addr_count,
+ myadapter->full_model)) {
+ printk(KERN_WARNING
+ "BusLogic: Release and re-register of "
+ "port 0x%04lx failed \n",
+ (unsigned long)myadapter->io_addr);
+ blogic_destroy_ccbs(myadapter);
+ blogic_relres(myadapter);
+ list_del(&myadapter->host_list);
+ scsi_host_put(host);
+ ret = -ENOMEM;
+ } else {
+ blogic_inithoststruct(myadapter,
+ host);
+ if (scsi_add_host(host, myadapter->pci_device
+ ? &myadapter->pci_device->dev
+ : NULL)) {
+ printk(KERN_WARNING
+ "BusLogic: scsi_add_host()"
+ "failed!\n");
+ blogic_destroy_ccbs(myadapter);
+ blogic_relres(myadapter);
+ list_del(&myadapter->host_list);
+ scsi_host_put(host);
+ ret = -ENODEV;
+ } else {
+ scsi_scan_host(host);
+ adapter_count++;
+ }
+ }
+ } else {
+ /*
+ An error occurred during Host Adapter Configuration
+ Querying, Host Adapter Configuration, Resource
+ Acquisition, CCB Creation, Host Adapter
+ Initialization, or Target Device Inquiry, so
+ remove Host Adapter from the list of registered
+ BusLogic Host Adapters, destroy the CCBs, Release
+ the System Resources, and Unregister the SCSI
+ Host.
+ */
+ blogic_destroy_ccbs(myadapter);
+ blogic_relres(myadapter);
+ list_del(&myadapter->host_list);
+ scsi_host_put(host);
+ ret = -ENODEV;
+ }
+ }
+ kfree(adapter);
+ kfree(blogic_probeinfo_list);
+ blogic_probeinfo_list = NULL;
+ return ret;
+}
+
+
+/*
+ blogic_deladapter releases all resources previously acquired to
+ support a specific Host Adapter, including the I/O Address range, and
+ unregisters the BusLogic Host Adapter.
+*/
+
+static int __exit blogic_deladapter(struct blogic_adapter *adapter)
+{
+ struct Scsi_Host *host = adapter->scsi_host;
+
+ scsi_remove_host(host);
+
+ /*
+ FlashPoint Host Adapters must first be released by the FlashPoint
+ SCCB Manager.
+ */
+ if (blogic_flashpoint_type(adapter))
+ FlashPoint_ReleaseHostAdapter(adapter->cardhandle);
+ /*
+ Destroy the CCBs and release any system resources acquired to
+ support Host Adapter.
+ */
+ blogic_destroy_ccbs(adapter);
+ blogic_relres(adapter);
+ /*
+ Release usage of the I/O Address range.
+ */
+ release_region(adapter->io_addr, adapter->addr_count);
+ /*
+ Remove Host Adapter from the list of registered BusLogic
+ Host Adapters.
+ */
+ list_del(&adapter->host_list);
+
+ scsi_host_put(host);
+ return 0;
+}
+
+
+/*
+ blogic_qcompleted_ccb queues CCB for completion processing.
+*/
+
+static void blogic_qcompleted_ccb(struct blogic_ccb *ccb)
+{
+ struct blogic_adapter *adapter = ccb->adapter;
+
+ ccb->status = BLOGIC_CCB_COMPLETE;
+ ccb->next = NULL;
+ if (adapter->firstccb == NULL) {
+ adapter->firstccb = ccb;
+ adapter->lastccb = ccb;
+ } else {
+ adapter->lastccb->next = ccb;
+ adapter->lastccb = ccb;
+ }
+ adapter->active_cmds[ccb->tgt_id]--;
+}
+
+
+/*
+ blogic_resultcode computes a SCSI Subsystem Result Code from
+ the Host Adapter Status and Target Device Status.
+*/
+
+static int blogic_resultcode(struct blogic_adapter *adapter,
+ enum blogic_adapter_status adapter_status,
+ enum blogic_tgt_status tgt_status)
+{
+ int hoststatus;
+
+ switch (adapter_status) {
+ case BLOGIC_CMD_CMPLT_NORMAL:
+ case BLOGIC_LINK_CMD_CMPLT:
+ case BLOGIC_LINK_CMD_CMPLT_FLAG:
+ hoststatus = DID_OK;
+ break;
+ case BLOGIC_SELECT_TIMEOUT:
+ hoststatus = DID_TIME_OUT;
+ break;
+ case BLOGIC_INVALID_OUTBOX_CODE:
+ case BLOGIC_INVALID_CMD_CODE:
+ case BLOGIC_BAD_CMD_PARAM:
+ blogic_warn("BusLogic Driver Protocol Error 0x%02X\n",
+ adapter, adapter_status);
+ case BLOGIC_DATA_UNDERRUN:
+ case BLOGIC_DATA_OVERRUN:
+ case BLOGIC_NOEXPECT_BUSFREE:
+ case BLOGIC_LINKCCB_BADLUN:
+ case BLOGIC_AUTOREQSENSE_FAIL:
+ case BLOGIC_TAGQUEUE_REJECT:
+ case BLOGIC_BAD_MSG_RCVD:
+ case BLOGIC_HW_FAIL:
+ case BLOGIC_BAD_RECONNECT:
+ case BLOGIC_ABRT_QUEUE:
+ case BLOGIC_ADAPTER_SW_ERROR:
+ case BLOGIC_HW_TIMEOUT:
+ case BLOGIC_PARITY_ERR:
+ hoststatus = DID_ERROR;
+ break;
+ case BLOGIC_INVALID_BUSPHASE:
+ case BLOGIC_NORESPONSE_TO_ATN:
+ case BLOGIC_HW_RESET:
+ case BLOGIC_RST_FROM_OTHERDEV:
+ case BLOGIC_HW_BDR:
+ hoststatus = DID_RESET;
+ break;
+ default:
+ blogic_warn("Unknown Host Adapter Status 0x%02X\n", adapter,
+ adapter_status);
+ hoststatus = DID_ERROR;
+ break;
+ }
+ return (hoststatus << 16) | tgt_status;
+}
+
+
+/*
+ blogic_scan_inbox scans the Incoming Mailboxes saving any
+ Incoming Mailbox entries for completion processing.
+*/
+
+static void blogic_scan_inbox(struct blogic_adapter *adapter)
+{
+ /*
+ Scan through the Incoming Mailboxes in Strict Round Robin
+ fashion, saving any completed CCBs for further processing. It
+ is essential that for each CCB and SCSI Command issued, command
+ completion processing is performed exactly once. Therefore,
+ only Incoming Mailboxes with completion code Command Completed
+ Without Error, Command Completed With Error, or Command Aborted
+ At Host Request are saved for completion processing. When an
+ Incoming Mailbox has a completion code of Aborted Command Not
+ Found, the CCB had already completed or been aborted before the
+ current Abort request was processed, and so completion processing
+ has already occurred and no further action should be taken.
+ */
+ struct blogic_inbox *next_inbox = adapter->next_inbox;
+ enum blogic_cmplt_code comp_code;
+
+ while ((comp_code = next_inbox->comp_code) != BLOGIC_INBOX_FREE) {
+ /*
+ We are only allowed to do this because we limit our
+ architectures we run on to machines where bus_to_virt(
+ actually works. There *needs* to be a dma_addr_to_virt()
+ in the new PCI DMA mapping interface to replace
+ bus_to_virt() or else this code is going to become very
+ innefficient.
+ */
+ struct blogic_ccb *ccb =
+ (struct blogic_ccb *) bus_to_virt(next_inbox->ccb);
+ if (comp_code != BLOGIC_CMD_NOTFOUND) {
+ if (ccb->status == BLOGIC_CCB_ACTIVE ||
+ ccb->status == BLOGIC_CCB_RESET) {
+ /*
+ Save the Completion Code for this CCB and
+ queue the CCB for completion processing.
+ */
+ ccb->comp_code = comp_code;
+ blogic_qcompleted_ccb(ccb);
+ } else {
+ /*
+ If a CCB ever appears in an Incoming Mailbox
+ and is not marked as status Active or Reset,
+ then there is most likely a bug in
+ the Host Adapter firmware.
+ */
+ blogic_warn("Illegal CCB #%ld status %d in " "Incoming Mailbox\n", adapter, ccb->serial, ccb->status);
+ }
+ }
+ next_inbox->comp_code = BLOGIC_INBOX_FREE;
+ if (++next_inbox > adapter->last_inbox)
+ next_inbox = adapter->first_inbox;
+ }
+ adapter->next_inbox = next_inbox;
+}
+
+
+/*
+ blogic_process_ccbs iterates over the completed CCBs for Host
+ Adapter setting the SCSI Command Result Codes, deallocating the CCBs, and
+ calling the SCSI Subsystem Completion Routines. The Host Adapter's Lock
+ should already have been acquired by the caller.
+*/
+
+static void blogic_process_ccbs(struct blogic_adapter *adapter)
+{
+ if (adapter->processing_ccbs)
+ return;
+ adapter->processing_ccbs = true;
+ while (adapter->firstccb != NULL) {
+ struct blogic_ccb *ccb = adapter->firstccb;
+ struct scsi_cmnd *command = ccb->command;
+ adapter->firstccb = ccb->next;
+ if (adapter->firstccb == NULL)
+ adapter->lastccb = NULL;
+ /*
+ Process the Completed CCB.
+ */
+ if (ccb->opcode == BLOGIC_BDR) {
+ int tgt_id = ccb->tgt_id;
+
+ blogic_warn("Bus Device Reset CCB #%ld to Target " "%d Completed\n", adapter, ccb->serial, tgt_id);
+ blogic_inc_count(&adapter->tgt_stats[tgt_id].bdr_done);
+ adapter->tgt_flags[tgt_id].tagq_active = false;
+ adapter->cmds_since_rst[tgt_id] = 0;
+ adapter->last_resetdone[tgt_id] = jiffies;
+ /*
+ Place CCB back on the Host Adapter's free list.
+ */
+ blogic_dealloc_ccb(ccb, 1);
+#if 0 /* this needs to be redone different for new EH */
+ /*
+ Bus Device Reset CCBs have the command field
+ non-NULL only when a Bus Device Reset was requested
+ for a command that did not have a currently active
+ CCB in the Host Adapter (i.e., a Synchronous Bus
+ Device Reset), and hence would not have its
+ Completion Routine called otherwise.
+ */
+ while (command != NULL) {
+ struct scsi_cmnd *nxt_cmd =
+ command->reset_chain;
+ command->reset_chain = NULL;
+ command->result = DID_RESET << 16;
+ command->scsi_done(command);
+ command = nxt_cmd;
+ }
+#endif
+ /*
+ Iterate over the CCBs for this Host Adapter
+ performing completion processing for any CCBs
+ marked as Reset for this Target.
+ */
+ for (ccb = adapter->all_ccbs; ccb != NULL;
+ ccb = ccb->next_all)
+ if (ccb->status == BLOGIC_CCB_RESET &&
+ ccb->tgt_id == tgt_id) {
+ command = ccb->command;
+ blogic_dealloc_ccb(ccb, 1);
+ adapter->active_cmds[tgt_id]--;
+ command->result = DID_RESET << 16;
+ command->scsi_done(command);
+ }
+ adapter->bdr_pend[tgt_id] = NULL;
+ } else {
+ /*
+ Translate the Completion Code, Host Adapter Status,
+ and Target Device Status into a SCSI Subsystem
+ Result Code.
+ */
+ switch (ccb->comp_code) {
+ case BLOGIC_INBOX_FREE:
+ case BLOGIC_CMD_NOTFOUND:
+ case BLOGIC_INVALID_CCB:
+ blogic_warn("CCB #%ld to Target %d Impossible State\n", adapter, ccb->serial, ccb->tgt_id);
+ break;
+ case BLOGIC_CMD_COMPLETE_GOOD:
+ adapter->tgt_stats[ccb->tgt_id]
+ .cmds_complete++;
+ adapter->tgt_flags[ccb->tgt_id]
+ .cmd_good = true;
+ command->result = DID_OK << 16;
+ break;
+ case BLOGIC_CMD_ABORT_BY_HOST:
+ blogic_warn("CCB #%ld to Target %d Aborted\n",
+ adapter, ccb->serial, ccb->tgt_id);
+ blogic_inc_count(&adapter->tgt_stats[ccb->tgt_id].aborts_done);
+ command->result = DID_ABORT << 16;
+ break;
+ case BLOGIC_CMD_COMPLETE_ERROR:
+ command->result = blogic_resultcode(adapter,
+ ccb->adapter_status, ccb->tgt_status);
+ if (ccb->adapter_status != BLOGIC_SELECT_TIMEOUT) {
+ adapter->tgt_stats[ccb->tgt_id]
+ .cmds_complete++;
+ if (blogic_global_options.trace_err) {
+ int i;
+ blogic_notice("CCB #%ld Target %d: Result %X Host "
+ "Adapter Status %02X " "Target Status %02X\n", adapter, ccb->serial, ccb->tgt_id, command->result, ccb->adapter_status, ccb->tgt_status);
+ blogic_notice("CDB ", adapter);
+ for (i = 0; i < ccb->cdblen; i++)
+ blogic_notice(" %02X", adapter, ccb->cdb[i]);
+ blogic_notice("\n", adapter);
+ blogic_notice("Sense ", adapter);
+ for (i = 0; i < ccb->sense_datalen; i++)
+ blogic_notice(" %02X", adapter, command->sense_buffer[i]);
+ blogic_notice("\n", adapter);
+ }
+ }
+ break;
+ }
+ /*
+ When an INQUIRY command completes normally, save the
+ CmdQue (Tagged Queuing Supported) and WBus16 (16 Bit
+ Wide Data Transfers Supported) bits.
+ */
+ if (ccb->cdb[0] == INQUIRY && ccb->cdb[1] == 0 &&
+ ccb->adapter_status == BLOGIC_CMD_CMPLT_NORMAL) {
+ struct blogic_tgt_flags *tgt_flags =
+ &adapter->tgt_flags[ccb->tgt_id];
+ struct scsi_inquiry *inquiry =
+ (struct scsi_inquiry *) scsi_sglist(command);
+ tgt_flags->tgt_exists = true;
+ tgt_flags->tagq_ok = inquiry->CmdQue;
+ tgt_flags->wide_ok = inquiry->WBus16;
+ }
+ /*
+ Place CCB back on the Host Adapter's free list.
+ */
+ blogic_dealloc_ccb(ccb, 1);
+ /*
+ Call the SCSI Command Completion Routine.
+ */
+ command->scsi_done(command);
+ }
+ }
+ adapter->processing_ccbs = false;
+}
+
+
+/*
+ blogic_inthandler handles hardware interrupts from BusLogic Host
+ Adapters.
+*/
+
+static irqreturn_t blogic_inthandler(int irq_ch, void *devid)
+{
+ struct blogic_adapter *adapter = (struct blogic_adapter *) devid;
+ unsigned long processor_flag;
+ /*
+ Acquire exclusive access to Host Adapter.
+ */
+ spin_lock_irqsave(adapter->scsi_host->host_lock, processor_flag);
+ /*
+ Handle Interrupts appropriately for each Host Adapter type.
+ */
+ if (blogic_multimaster_type(adapter)) {
+ union blogic_int_reg intreg;
+ /*
+ Read the Host Adapter Interrupt Register.
+ */
+ intreg.all = blogic_rdint(adapter);
+ if (intreg.ir.int_valid) {
+ /*
+ Acknowledge the interrupt and reset the Host Adapter
+ Interrupt Register.
+ */
+ blogic_intreset(adapter);
+ /*
+ Process valid External SCSI Bus Reset and Incoming
+ Mailbox Loaded Interrupts. Command Complete
+ Interrupts are noted, and Outgoing Mailbox Available
+ Interrupts are ignored, as they are never enabled.
+ */
+ if (intreg.ir.ext_busreset)
+ adapter->adapter_extreset = true;
+ else if (intreg.ir.mailin_loaded)
+ blogic_scan_inbox(adapter);
+ else if (intreg.ir.cmd_complete)
+ adapter->adapter_cmd_complete = true;
+ }
+ } else {
+ /*
+ Check if there is a pending interrupt for this Host Adapter.
+ */
+ if (FlashPoint_InterruptPending(adapter->cardhandle))
+ switch (FlashPoint_HandleInterrupt(adapter->cardhandle)) {
+ case FPOINT_NORMAL_INT:
+ break;
+ case FPOINT_EXT_RESET:
+ adapter->adapter_extreset = true;
+ break;
+ case FPOINT_INTERN_ERR:
+ blogic_warn("Internal FlashPoint Error detected - Resetting Host Adapter\n", adapter);
+ adapter->adapter_intern_err = true;
+ break;
+ }
+ }
+ /*
+ Process any completed CCBs.
+ */
+ if (adapter->firstccb != NULL)
+ blogic_process_ccbs(adapter);
+ /*
+ Reset the Host Adapter if requested.
+ */
+ if (adapter->adapter_extreset) {
+ blogic_warn("Resetting %s due to External SCSI Bus Reset\n", adapter, adapter->full_model);
+ blogic_inc_count(&adapter->ext_resets);
+ blogic_resetadapter(adapter, false);
+ adapter->adapter_extreset = false;
+ } else if (adapter->adapter_intern_err) {
+ blogic_warn("Resetting %s due to Host Adapter Internal Error\n", adapter, adapter->full_model);
+ blogic_inc_count(&adapter->adapter_intern_errors);
+ blogic_resetadapter(adapter, true);
+ adapter->adapter_intern_err = false;
+ }
+ /*
+ Release exclusive access to Host Adapter.
+ */
+ spin_unlock_irqrestore(adapter->scsi_host->host_lock, processor_flag);
+ return IRQ_HANDLED;
+}
+
+
+/*
+ blogic_write_outbox places CCB and Action Code into an Outgoing
+ Mailbox for execution by Host Adapter. The Host Adapter's Lock should
+ already have been acquired by the caller.
+*/
+
+static bool blogic_write_outbox(struct blogic_adapter *adapter,
+ enum blogic_action action, struct blogic_ccb *ccb)
+{
+ struct blogic_outbox *next_outbox;
+
+ next_outbox = adapter->next_outbox;
+ if (next_outbox->action == BLOGIC_OUTBOX_FREE) {
+ ccb->status = BLOGIC_CCB_ACTIVE;
+ /*
+ The CCB field must be written before the Action Code field
+ since the Host Adapter is operating asynchronously and the
+ locking code does not protect against simultaneous access
+ by the Host Adapter.
+ */
+ next_outbox->ccb = ccb->dma_handle;
+ next_outbox->action = action;
+ blogic_execmbox(adapter);
+ if (++next_outbox > adapter->last_outbox)
+ next_outbox = adapter->first_outbox;
+ adapter->next_outbox = next_outbox;
+ if (action == BLOGIC_MBOX_START) {
+ adapter->active_cmds[ccb->tgt_id]++;
+ if (ccb->opcode != BLOGIC_BDR)
+ adapter->tgt_stats[ccb->tgt_id].cmds_tried++;
+ }
+ return true;
+ }
+ return false;
+}
+
+/* Error Handling (EH) support */
+
+static int blogic_hostreset(struct scsi_cmnd *SCpnt)
+{
+ struct blogic_adapter *adapter =
+ (struct blogic_adapter *) SCpnt->device->host->hostdata;
+
+ unsigned int id = SCpnt->device->id;
+ struct blogic_tgt_stats *stats = &adapter->tgt_stats[id];
+ int rc;
+
+ spin_lock_irq(SCpnt->device->host->host_lock);
+
+ blogic_inc_count(&stats->adatper_reset_req);
+
+ rc = blogic_resetadapter(adapter, false);
+ spin_unlock_irq(SCpnt->device->host->host_lock);
+ return rc;
+}
+
+/*
+ blogic_qcmd creates a CCB for Command and places it into an
+ Outgoing Mailbox for execution by the associated Host Adapter.
+*/
+
+static int blogic_qcmd_lck(struct scsi_cmnd *command,
+ void (*comp_cb) (struct scsi_cmnd *))
+{
+ struct blogic_adapter *adapter =
+ (struct blogic_adapter *) command->device->host->hostdata;
+ struct blogic_tgt_flags *tgt_flags =
+ &adapter->tgt_flags[command->device->id];
+ struct blogic_tgt_stats *tgt_stats = adapter->tgt_stats;
+ unsigned char *cdb = command->cmnd;
+ int cdblen = command->cmd_len;
+ int tgt_id = command->device->id;
+ int lun = command->device->lun;
+ int buflen = scsi_bufflen(command);
+ int count;
+ struct blogic_ccb *ccb;
+ dma_addr_t sense_buf;
+
+ /*
+ SCSI REQUEST_SENSE commands will be executed automatically by the
+ Host Adapter for any errors, so they should not be executed
+ explicitly unless the Sense Data is zero indicating that no error
+ occurred.
+ */
+ if (cdb[0] == REQUEST_SENSE && command->sense_buffer[0] != 0) {
+ command->result = DID_OK << 16;
+ comp_cb(command);
+ return 0;
+ }
+ /*
+ Allocate a CCB from the Host Adapter's free list. In the unlikely
+ event that there are none available and memory allocation fails,
+ wait 1 second and try again. If that fails, the Host Adapter is
+ probably hung so signal an error as a Host Adapter Hard Reset
+ should be initiated soon.
+ */
+ ccb = blogic_alloc_ccb(adapter);
+ if (ccb == NULL) {
+ spin_unlock_irq(adapter->scsi_host->host_lock);
+ blogic_delay(1);
+ spin_lock_irq(adapter->scsi_host->host_lock);
+ ccb = blogic_alloc_ccb(adapter);
+ if (ccb == NULL) {
+ command->result = DID_ERROR << 16;
+ comp_cb(command);
+ return 0;
+ }
+ }
+
+ /*
+ Initialize the fields in the BusLogic Command Control Block (CCB).
+ */
+ count = scsi_dma_map(command);
+ BUG_ON(count < 0);
+ if (count) {
+ struct scatterlist *sg;
+ int i;
+
+ ccb->opcode = BLOGIC_INITIATOR_CCB_SG;
+ ccb->datalen = count * sizeof(struct blogic_sg_seg);
+ if (blogic_multimaster_type(adapter))
+ ccb->data = (void *)((unsigned int) ccb->dma_handle +
+ ((unsigned long) &ccb->sglist -
+ (unsigned long) ccb));
+ else
+ ccb->data = ccb->sglist;
+
+ scsi_for_each_sg(command, sg, count, i) {
+ ccb->sglist[i].segbytes = sg_dma_len(sg);
+ ccb->sglist[i].segdata = sg_dma_address(sg);
+ }
+ } else if (!count) {
+ ccb->opcode = BLOGIC_INITIATOR_CCB;
+ ccb->datalen = buflen;
+ ccb->data = 0;
+ }
+
+ switch (cdb[0]) {
+ case READ_6:
+ case READ_10:
+ ccb->datadir = BLOGIC_DATAIN_CHECKED;
+ tgt_stats[tgt_id].read_cmds++;
+ blogic_addcount(&tgt_stats[tgt_id].bytesread, buflen);
+ blogic_incszbucket(tgt_stats[tgt_id].read_sz_buckets, buflen);
+ break;
+ case WRITE_6:
+ case WRITE_10:
+ ccb->datadir = BLOGIC_DATAOUT_CHECKED;
+ tgt_stats[tgt_id].write_cmds++;
+ blogic_addcount(&tgt_stats[tgt_id].byteswritten, buflen);
+ blogic_incszbucket(tgt_stats[tgt_id].write_sz_buckets, buflen);
+ break;
+ default:
+ ccb->datadir = BLOGIC_UNCHECKED_TX;
+ break;
+ }
+ ccb->cdblen = cdblen;
+ ccb->adapter_status = 0;
+ ccb->tgt_status = 0;
+ ccb->tgt_id = tgt_id;
+ ccb->lun = lun;
+ ccb->tag_enable = false;
+ ccb->legacytag_enable = false;
+ /*
+ BusLogic recommends that after a Reset the first couple of
+ commands that are sent to a Target Device be sent in a non
+ Tagged Queue fashion so that the Host Adapter and Target Device
+ can establish Synchronous and Wide Transfer before Queue Tag
+ messages can interfere with the Synchronous and Wide Negotiation
+ messages. By waiting to enable Tagged Queuing until after the
+ first BLOGIC_MAX_TAG_DEPTH commands have been queued, it is
+ assured that after a Reset any pending commands are requeued
+ before Tagged Queuing is enabled and that the Tagged Queuing
+ message will not occur while the partition table is being printed.
+ In addition, some devices do not properly handle the transition
+ from non-tagged to tagged commands, so it is necessary to wait
+ until there are no pending commands for a target device
+ before queuing tagged commands.
+ */
+ if (adapter->cmds_since_rst[tgt_id]++ >= BLOGIC_MAX_TAG_DEPTH &&
+ !tgt_flags->tagq_active &&
+ adapter->active_cmds[tgt_id] == 0
+ && tgt_flags->tagq_ok &&
+ (adapter->tagq_ok & (1 << tgt_id))) {
+ tgt_flags->tagq_active = true;
+ blogic_notice("Tagged Queuing now active for Target %d\n",
+ adapter, tgt_id);
+ }
+ if (tgt_flags->tagq_active) {
+ enum blogic_queuetag queuetag = BLOGIC_SIMPLETAG;
+ /*
+ When using Tagged Queuing with Simple Queue Tags, it
+ appears that disk drive controllers do not guarantee that
+ a queued command will not remain in a disconnected state
+ indefinitely if commands that read or write nearer the
+ head position continue to arrive without interruption.
+ Therefore, for each Target Device this driver keeps track
+ of the last time either the queue was empty or an Ordered
+ Queue Tag was issued. If more than 4 seconds (one fifth
+ of the 20 second disk timeout) have elapsed since this
+ last sequence point, this command will be issued with an
+ Ordered Queue Tag rather than a Simple Queue Tag, which
+ forces the Target Device to complete all previously
+ queued commands before this command may be executed.
+ */
+ if (adapter->active_cmds[tgt_id] == 0)
+ adapter->last_seqpoint[tgt_id] = jiffies;
+ else if (time_after(jiffies,
+ adapter->last_seqpoint[tgt_id] + 4 * HZ)) {
+ adapter->last_seqpoint[tgt_id] = jiffies;
+ queuetag = BLOGIC_ORDEREDTAG;
+ }
+ if (adapter->ext_lun) {
+ ccb->tag_enable = true;
+ ccb->queuetag = queuetag;
+ } else {
+ ccb->legacytag_enable = true;
+ ccb->legacy_tag = queuetag;
+ }
+ }
+ memcpy(ccb->cdb, cdb, cdblen);
+ ccb->sense_datalen = SCSI_SENSE_BUFFERSIZE;
+ ccb->command = command;
+ sense_buf = pci_map_single(adapter->pci_device,
+ command->sense_buffer, ccb->sense_datalen,
+ PCI_DMA_FROMDEVICE);
+ if (dma_mapping_error(&adapter->pci_device->dev, sense_buf)) {
+ blogic_err("DMA mapping for sense data buffer failed\n",
+ adapter);
+ blogic_dealloc_ccb(ccb, 0);
+ return SCSI_MLQUEUE_HOST_BUSY;
+ }
+ ccb->sensedata = sense_buf;
+ command->scsi_done = comp_cb;
+ if (blogic_multimaster_type(adapter)) {
+ /*
+ Place the CCB in an Outgoing Mailbox. The higher levels
+ of the SCSI Subsystem should not attempt to queue more
+ commands than can be placed in Outgoing Mailboxes, so
+ there should always be one free. In the unlikely event
+ that there are none available, wait 1 second and try
+ again. If that fails, the Host Adapter is probably hung
+ so signal an error as a Host Adapter Hard Reset should
+ be initiated soon.
+ */
+ if (!blogic_write_outbox(adapter, BLOGIC_MBOX_START, ccb)) {
+ spin_unlock_irq(adapter->scsi_host->host_lock);
+ blogic_warn("Unable to write Outgoing Mailbox - " "Pausing for 1 second\n", adapter);
+ blogic_delay(1);
+ spin_lock_irq(adapter->scsi_host->host_lock);
+ if (!blogic_write_outbox(adapter, BLOGIC_MBOX_START,
+ ccb)) {
+ blogic_warn("Still unable to write Outgoing Mailbox - " "Host Adapter Dead?\n", adapter);
+ blogic_dealloc_ccb(ccb, 1);
+ command->result = DID_ERROR << 16;
+ command->scsi_done(command);
+ }
+ }
+ } else {
+ /*
+ Call the FlashPoint SCCB Manager to start execution of
+ the CCB.
+ */
+ ccb->status = BLOGIC_CCB_ACTIVE;
+ adapter->active_cmds[tgt_id]++;
+ tgt_stats[tgt_id].cmds_tried++;
+ FlashPoint_StartCCB(adapter->cardhandle, ccb);
+ /*
+ The Command may have already completed and
+ blogic_qcompleted_ccb been called, or it may still be
+ pending.
+ */
+ if (ccb->status == BLOGIC_CCB_COMPLETE)
+ blogic_process_ccbs(adapter);
+ }
+ return 0;
+}
+
+static DEF_SCSI_QCMD(blogic_qcmd)
+
+#if 0
+/*
+ blogic_abort aborts Command if possible.
+*/
+
+static int blogic_abort(struct scsi_cmnd *command)
+{
+ struct blogic_adapter *adapter =
+ (struct blogic_adapter *) command->device->host->hostdata;
+
+ int tgt_id = command->device->id;
+ struct blogic_ccb *ccb;
+ blogic_inc_count(&adapter->tgt_stats[tgt_id].aborts_request);
+
+ /*
+ Attempt to find an Active CCB for this Command. If no Active
+ CCB for this Command is found, then no Abort is necessary.
+ */
+ for (ccb = adapter->all_ccbs; ccb != NULL; ccb = ccb->next_all)
+ if (ccb->command == command)
+ break;
+ if (ccb == NULL) {
+ blogic_warn("Unable to Abort Command to Target %d - No CCB Found\n", adapter, tgt_id);
+ return SUCCESS;
+ } else if (ccb->status == BLOGIC_CCB_COMPLETE) {
+ blogic_warn("Unable to Abort Command to Target %d - CCB Completed\n", adapter, tgt_id);
+ return SUCCESS;
+ } else if (ccb->status == BLOGIC_CCB_RESET) {
+ blogic_warn("Unable to Abort Command to Target %d - CCB Reset\n", adapter, tgt_id);
+ return SUCCESS;
+ }
+ if (blogic_multimaster_type(adapter)) {
+ /*
+ Attempt to Abort this CCB. MultiMaster Firmware versions
+ prior to 5.xx do not generate Abort Tag messages, but only
+ generate the non-tagged Abort message. Since non-tagged
+ commands are not sent by the Host Adapter until the queue
+ of outstanding tagged commands has completed, and the
+ Abort message is treated as a non-tagged command, it is
+ effectively impossible to abort commands when Tagged
+ Queuing is active. Firmware version 5.xx does generate
+ Abort Tag messages, so it is possible to abort commands
+ when Tagged Queuing is active.
+ */
+ if (adapter->tgt_flags[tgt_id].tagq_active &&
+ adapter->fw_ver[0] < '5') {
+ blogic_warn("Unable to Abort CCB #%ld to Target %d - Abort Tag Not Supported\n", adapter, ccb->serial, tgt_id);
+ return FAILURE;
+ } else if (blogic_write_outbox(adapter, BLOGIC_MBOX_ABORT,
+ ccb)) {
+ blogic_warn("Aborting CCB #%ld to Target %d\n",
+ adapter, ccb->serial, tgt_id);
+ blogic_inc_count(&adapter->tgt_stats[tgt_id].aborts_tried);
+ return SUCCESS;
+ } else {
+ blogic_warn("Unable to Abort CCB #%ld to Target %d - No Outgoing Mailboxes\n", adapter, ccb->serial, tgt_id);
+ return FAILURE;
+ }
+ } else {
+ /*
+ Call the FlashPoint SCCB Manager to abort execution of
+ the CCB.
+ */
+ blogic_warn("Aborting CCB #%ld to Target %d\n", adapter,
+ ccb->serial, tgt_id);
+ blogic_inc_count(&adapter->tgt_stats[tgt_id].aborts_tried);
+ FlashPoint_AbortCCB(adapter->cardhandle, ccb);
+ /*
+ The Abort may have already been completed and
+ blogic_qcompleted_ccb been called, or it
+ may still be pending.
+ */
+ if (ccb->status == BLOGIC_CCB_COMPLETE)
+ blogic_process_ccbs(adapter);
+ return SUCCESS;
+ }
+ return SUCCESS;
+}
+
+#endif
+/*
+ blogic_resetadapter resets Host Adapter if possible, marking all
+ currently executing SCSI Commands as having been Reset.
+*/
+
+static int blogic_resetadapter(struct blogic_adapter *adapter, bool hard_reset)
+{
+ struct blogic_ccb *ccb;
+ int tgt_id;
+
+ /*
+ * Attempt to Reset and Reinitialize the Host Adapter.
+ */
+
+ if (!(blogic_hwreset(adapter, hard_reset) &&
+ blogic_initadapter(adapter))) {
+ blogic_err("Resetting %s Failed\n", adapter,
+ adapter->full_model);
+ return FAILURE;
+ }
+
+ /*
+ * Deallocate all currently executing CCBs.
+ */
+
+ for (ccb = adapter->all_ccbs; ccb != NULL; ccb = ccb->next_all)
+ if (ccb->status == BLOGIC_CCB_ACTIVE)
+ blogic_dealloc_ccb(ccb, 1);
+ /*
+ * Wait a few seconds between the Host Adapter Hard Reset which
+ * initiates a SCSI Bus Reset and issuing any SCSI Commands. Some
+ * SCSI devices get confused if they receive SCSI Commands too soon
+ * after a SCSI Bus Reset.
+ */
+
+ if (hard_reset) {
+ spin_unlock_irq(adapter->scsi_host->host_lock);
+ blogic_delay(adapter->bus_settle_time);
+ spin_lock_irq(adapter->scsi_host->host_lock);
+ }
+
+ for (tgt_id = 0; tgt_id < adapter->maxdev; tgt_id++) {
+ adapter->last_resettried[tgt_id] = jiffies;
+ adapter->last_resetdone[tgt_id] = jiffies;
+ }
+ return SUCCESS;
+}
+
+/*
+ blogic_diskparam returns the Heads/Sectors/Cylinders BIOS Disk
+ Parameters for Disk. The default disk geometry is 64 heads, 32 sectors, and
+ the appropriate number of cylinders so as not to exceed drive capacity. In
+ order for disks equal to or larger than 1 GB to be addressable by the BIOS
+ without exceeding the BIOS limitation of 1024 cylinders, Extended Translation
+ may be enabled in AutoSCSI on FlashPoint Host Adapters and on "W" and "C"
+ series MultiMaster Host Adapters, or by a dip switch setting on "S" and "A"
+ series MultiMaster Host Adapters. With Extended Translation enabled, drives
+ between 1 GB inclusive and 2 GB exclusive are given a disk geometry of 128
+ heads and 32 sectors, and drives above 2 GB inclusive are given a disk
+ geometry of 255 heads and 63 sectors. However, if the BIOS detects that the
+ Extended Translation setting does not match the geometry in the partition
+ table, then the translation inferred from the partition table will be used by
+ the BIOS, and a warning may be displayed.
+*/
+
+static int blogic_diskparam(struct scsi_device *sdev, struct block_device *dev,
+ sector_t capacity, int *params)
+{
+ struct blogic_adapter *adapter =
+ (struct blogic_adapter *) sdev->host->hostdata;
+ struct bios_diskparam *diskparam = (struct bios_diskparam *) params;
+ unsigned char *buf;
+
+ if (adapter->ext_trans_enable && capacity >= 2 * 1024 * 1024 /* 1 GB in 512 byte sectors */) {
+ if (capacity >= 4 * 1024 * 1024 /* 2 GB in 512 byte sectors */) {
+ diskparam->heads = 255;
+ diskparam->sectors = 63;
+ } else {
+ diskparam->heads = 128;
+ diskparam->sectors = 32;
+ }
+ } else {
+ diskparam->heads = 64;
+ diskparam->sectors = 32;
+ }
+ diskparam->cylinders = (unsigned long) capacity / (diskparam->heads * diskparam->sectors);
+ buf = scsi_bios_ptable(dev);
+ if (buf == NULL)
+ return 0;
+ /*
+ If the boot sector partition table flag is valid, search for
+ a partition table entry whose end_head matches one of the
+ standard BusLogic geometry translations (64/32, 128/32, or 255/63).
+ */
+ if (*(unsigned short *) (buf + 64) == 0xAA55) {
+ struct partition *part1_entry = (struct partition *) buf;
+ struct partition *part_entry = part1_entry;
+ int saved_cyl = diskparam->cylinders, part_no;
+ unsigned char part_end_head = 0, part_end_sector = 0;
+
+ for (part_no = 0; part_no < 4; part_no++) {
+ part_end_head = part_entry->end_head;
+ part_end_sector = part_entry->end_sector & 0x3F;
+ if (part_end_head == 64 - 1) {
+ diskparam->heads = 64;
+ diskparam->sectors = 32;
+ break;
+ } else if (part_end_head == 128 - 1) {
+ diskparam->heads = 128;
+ diskparam->sectors = 32;
+ break;
+ } else if (part_end_head == 255 - 1) {
+ diskparam->heads = 255;
+ diskparam->sectors = 63;
+ break;
+ }
+ part_entry++;
+ }
+ if (part_no == 4) {
+ part_end_head = part1_entry->end_head;
+ part_end_sector = part1_entry->end_sector & 0x3F;
+ }
+ diskparam->cylinders = (unsigned long) capacity / (diskparam->heads * diskparam->sectors);
+ if (part_no < 4 && part_end_sector == diskparam->sectors) {
+ if (diskparam->cylinders != saved_cyl)
+ blogic_warn("Adopting Geometry %d/%d from Partition Table\n", adapter, diskparam->heads, diskparam->sectors);
+ } else if (part_end_head > 0 || part_end_sector > 0) {
+ blogic_warn("Warning: Partition Table appears to " "have Geometry %d/%d which is\n", adapter, part_end_head + 1, part_end_sector);
+ blogic_warn("not compatible with current BusLogic " "Host Adapter Geometry %d/%d\n", adapter, diskparam->heads, diskparam->sectors);
+ }
+ }
+ kfree(buf);
+ return 0;
+}
+
+
+/*
+ BugLogic_ProcDirectoryInfo implements /proc/scsi/BusLogic/<N>.
+*/
+
+static int blogic_write_info(struct Scsi_Host *shost, char *procbuf,
+ int bytes_avail)
+{
+ struct blogic_adapter *adapter =
+ (struct blogic_adapter *) shost->hostdata;
+ struct blogic_tgt_stats *tgt_stats;
+
+ tgt_stats = adapter->tgt_stats;
+ adapter->ext_resets = 0;
+ adapter->adapter_intern_errors = 0;
+ memset(tgt_stats, 0, BLOGIC_MAXDEV * sizeof(struct blogic_tgt_stats));
+ return 0;
+}
+
+static int blogic_show_info(struct seq_file *m, struct Scsi_Host *shost)
+{
+ struct blogic_adapter *adapter = (struct blogic_adapter *) shost->hostdata;
+ struct blogic_tgt_stats *tgt_stats;
+ int tgt;
+
+ tgt_stats = adapter->tgt_stats;
+ seq_write(m, adapter->msgbuf, adapter->msgbuflen);
+ seq_printf(m, "\n\
+Current Driver Queue Depth: %d\n\
+Currently Allocated CCBs: %d\n", adapter->drvr_qdepth, adapter->alloc_ccbs);
+ seq_puts(m, "\n\n\
+ DATA TRANSFER STATISTICS\n\
+\n\
+Target Tagged Queuing Queue Depth Active Attempted Completed\n\
+====== ============== =========== ====== ========= =========\n");
+ for (tgt = 0; tgt < adapter->maxdev; tgt++) {
+ struct blogic_tgt_flags *tgt_flags = &adapter->tgt_flags[tgt];
+ if (!tgt_flags->tgt_exists)
+ continue;
+ seq_printf(m, " %2d %s", tgt, (tgt_flags->tagq_ok ? (tgt_flags->tagq_active ? " Active" : (adapter->tagq_ok & (1 << tgt)
+ ? " Permitted" : " Disabled"))
+ : "Not Supported"));
+ seq_printf(m,
+ " %3d %3u %9u %9u\n", adapter->qdepth[tgt], adapter->active_cmds[tgt], tgt_stats[tgt].cmds_tried, tgt_stats[tgt].cmds_complete);
+ }
+ seq_puts(m, "\n\
+Target Read Commands Write Commands Total Bytes Read Total Bytes Written\n\
+====== ============= ============== =================== ===================\n");
+ for (tgt = 0; tgt < adapter->maxdev; tgt++) {
+ struct blogic_tgt_flags *tgt_flags = &adapter->tgt_flags[tgt];
+ if (!tgt_flags->tgt_exists)
+ continue;
+ seq_printf(m, " %2d %9u %9u", tgt, tgt_stats[tgt].read_cmds, tgt_stats[tgt].write_cmds);
+ if (tgt_stats[tgt].bytesread.billions > 0)
+ seq_printf(m, " %9u%09u", tgt_stats[tgt].bytesread.billions, tgt_stats[tgt].bytesread.units);
+ else
+ seq_printf(m, " %9u", tgt_stats[tgt].bytesread.units);
+ if (tgt_stats[tgt].byteswritten.billions > 0)
+ seq_printf(m, " %9u%09u\n", tgt_stats[tgt].byteswritten.billions, tgt_stats[tgt].byteswritten.units);
+ else
+ seq_printf(m, " %9u\n", tgt_stats[tgt].byteswritten.units);
+ }
+ seq_puts(m, "\n\
+Target Command 0-1KB 1-2KB 2-4KB 4-8KB 8-16KB\n\
+====== ======= ========= ========= ========= ========= =========\n");
+ for (tgt = 0; tgt < adapter->maxdev; tgt++) {
+ struct blogic_tgt_flags *tgt_flags = &adapter->tgt_flags[tgt];
+ if (!tgt_flags->tgt_exists)
+ continue;
+ seq_printf(m,
+ " %2d Read %9u %9u %9u %9u %9u\n", tgt,
+ tgt_stats[tgt].read_sz_buckets[0],
+ tgt_stats[tgt].read_sz_buckets[1], tgt_stats[tgt].read_sz_buckets[2], tgt_stats[tgt].read_sz_buckets[3], tgt_stats[tgt].read_sz_buckets[4]);
+ seq_printf(m,
+ " %2d Write %9u %9u %9u %9u %9u\n", tgt,
+ tgt_stats[tgt].write_sz_buckets[0],
+ tgt_stats[tgt].write_sz_buckets[1], tgt_stats[tgt].write_sz_buckets[2], tgt_stats[tgt].write_sz_buckets[3], tgt_stats[tgt].write_sz_buckets[4]);
+ }
+ seq_puts(m, "\n\
+Target Command 16-32KB 32-64KB 64-128KB 128-256KB 256KB+\n\
+====== ======= ========= ========= ========= ========= =========\n");
+ for (tgt = 0; tgt < adapter->maxdev; tgt++) {
+ struct blogic_tgt_flags *tgt_flags = &adapter->tgt_flags[tgt];
+ if (!tgt_flags->tgt_exists)
+ continue;
+ seq_printf(m,
+ " %2d Read %9u %9u %9u %9u %9u\n", tgt,
+ tgt_stats[tgt].read_sz_buckets[5],
+ tgt_stats[tgt].read_sz_buckets[6], tgt_stats[tgt].read_sz_buckets[7], tgt_stats[tgt].read_sz_buckets[8], tgt_stats[tgt].read_sz_buckets[9]);
+ seq_printf(m,
+ " %2d Write %9u %9u %9u %9u %9u\n", tgt,
+ tgt_stats[tgt].write_sz_buckets[5],
+ tgt_stats[tgt].write_sz_buckets[6], tgt_stats[tgt].write_sz_buckets[7], tgt_stats[tgt].write_sz_buckets[8], tgt_stats[tgt].write_sz_buckets[9]);
+ }
+ seq_puts(m, "\n\n\
+ ERROR RECOVERY STATISTICS\n\
+\n\
+ Command Aborts Bus Device Resets Host Adapter Resets\n\
+Target Requested Completed Requested Completed Requested Completed\n\
+ ID \\\\\\\\ Attempted //// \\\\\\\\ Attempted //// \\\\\\\\ Attempted ////\n\
+====== ===== ===== ===== ===== ===== ===== ===== ===== =====\n");
+ for (tgt = 0; tgt < adapter->maxdev; tgt++) {
+ struct blogic_tgt_flags *tgt_flags = &adapter->tgt_flags[tgt];
+ if (!tgt_flags->tgt_exists)
+ continue;
+ seq_printf(m, "\
+ %2d %5d %5d %5d %5d %5d %5d %5d %5d %5d\n", tgt, tgt_stats[tgt].aborts_request, tgt_stats[tgt].aborts_tried, tgt_stats[tgt].aborts_done, tgt_stats[tgt].bdr_request, tgt_stats[tgt].bdr_tried, tgt_stats[tgt].bdr_done, tgt_stats[tgt].adatper_reset_req, tgt_stats[tgt].adapter_reset_attempt, tgt_stats[tgt].adapter_reset_done);
+ }
+ seq_printf(m, "\nExternal Host Adapter Resets: %d\n", adapter->ext_resets);
+ seq_printf(m, "Host Adapter Internal Errors: %d\n", adapter->adapter_intern_errors);
+ return 0;
+}
+
+
+/*
+ blogic_msg prints Driver Messages.
+*/
+
+static void blogic_msg(enum blogic_msglevel msglevel, char *fmt,
+ struct blogic_adapter *adapter, ...)
+{
+ static char buf[BLOGIC_LINEBUF_SIZE];
+ static bool begin = true;
+ va_list args;
+ int len = 0;
+
+ va_start(args, adapter);
+ len = vsprintf(buf, fmt, args);
+ va_end(args);
+ if (msglevel == BLOGIC_ANNOUNCE_LEVEL) {
+ static int msglines = 0;
+ strcpy(&adapter->msgbuf[adapter->msgbuflen], buf);
+ adapter->msgbuflen += len;
+ if (++msglines <= 2)
+ printk("%sscsi: %s", blogic_msglevelmap[msglevel], buf);
+ } else if (msglevel == BLOGIC_INFO_LEVEL) {
+ strcpy(&adapter->msgbuf[adapter->msgbuflen], buf);
+ adapter->msgbuflen += len;
+ if (begin) {
+ if (buf[0] != '\n' || len > 1)
+ printk("%sscsi%d: %s", blogic_msglevelmap[msglevel], adapter->host_no, buf);
+ } else
+ printk("%s", buf);
+ } else {
+ if (begin) {
+ if (adapter != NULL && adapter->adapter_initd)
+ printk("%sscsi%d: %s", blogic_msglevelmap[msglevel], adapter->host_no, buf);
+ else
+ printk("%s%s", blogic_msglevelmap[msglevel], buf);
+ } else
+ printk("%s", buf);
+ }
+ begin = (buf[len - 1] == '\n');
+}
+
+
+/*
+ blogic_parse parses an individual option keyword. It returns true
+ and updates the pointer if the keyword is recognized and false otherwise.
+*/
+
+static bool __init blogic_parse(char **str, char *keyword)
+{
+ char *pointer = *str;
+ while (*keyword != '\0') {
+ char strch = *pointer++;
+ char keywordch = *keyword++;
+ if (strch >= 'A' && strch <= 'Z')
+ strch += 'a' - 'Z';
+ if (keywordch >= 'A' && keywordch <= 'Z')
+ keywordch += 'a' - 'Z';
+ if (strch != keywordch)
+ return false;
+ }
+ *str = pointer;
+ return true;
+}
+
+
+/*
+ blogic_parseopts handles processing of BusLogic Driver Options
+ specifications.
+
+ BusLogic Driver Options may be specified either via the Linux Kernel Command
+ Line or via the Loadable Kernel Module Installation Facility. Driver Options
+ for multiple host adapters may be specified either by separating the option
+ strings by a semicolon, or by specifying multiple "BusLogic=" strings on the
+ command line. Individual option specifications for a single host adapter are
+ separated by commas. The Probing and Debugging Options apply to all host
+ adapters whereas the remaining options apply individually only to the
+ selected host adapter.
+
+ The BusLogic Driver Probing Options are described in
+ <file:Documentation/scsi/BusLogic.txt>.
+*/
+
+static int __init blogic_parseopts(char *options)
+{
+ while (true) {
+ struct blogic_drvr_options *drvr_opts =
+ &blogic_drvr_options[blogic_drvr_options_count++];
+ int tgt_id;
+
+ memset(drvr_opts, 0, sizeof(struct blogic_drvr_options));
+ while (*options != '\0' && *options != ';') {
+ /* Probing Options. */
+ if (blogic_parse(&options, "IO:")) {
+ unsigned long io_addr = simple_strtoul(options,
+ &options, 0);
+ blogic_probe_options.limited_isa = true;
+ switch (io_addr) {
+ case 0x330:
+ blogic_probe_options.probe330 = true;
+ break;
+ case 0x334:
+ blogic_probe_options.probe334 = true;
+ break;
+ case 0x230:
+ blogic_probe_options.probe230 = true;
+ break;
+ case 0x234:
+ blogic_probe_options.probe234 = true;
+ break;
+ case 0x130:
+ blogic_probe_options.probe130 = true;
+ break;
+ case 0x134:
+ blogic_probe_options.probe134 = true;
+ break;
+ default:
+ blogic_err("BusLogic: Invalid Driver Options " "(invalid I/O Address 0x%X)\n", NULL, io_addr);
+ return 0;
+ }
+ } else if (blogic_parse(&options, "NoProbeISA"))
+ blogic_probe_options.noprobe_isa = true;
+ else if (blogic_parse(&options, "NoProbePCI"))
+ blogic_probe_options.noprobe_pci = true;
+ else if (blogic_parse(&options, "NoProbe"))
+ blogic_probe_options.noprobe = true;
+ else if (blogic_parse(&options, "NoSortPCI"))
+ blogic_probe_options.nosort_pci = true;
+ else if (blogic_parse(&options, "MultiMasterFirst"))
+ blogic_probe_options.multimaster_first = true;
+ else if (blogic_parse(&options, "FlashPointFirst"))
+ blogic_probe_options.flashpoint_first = true;
+ /* Tagged Queuing Options. */
+ else if (blogic_parse(&options, "QueueDepth:[") ||
+ blogic_parse(&options, "QD:[")) {
+ for (tgt_id = 0; tgt_id < BLOGIC_MAXDEV; tgt_id++) {
+ unsigned short qdepth = simple_strtoul(options, &options, 0);
+ if (qdepth > BLOGIC_MAX_TAG_DEPTH) {
+ blogic_err("BusLogic: Invalid Driver Options " "(invalid Queue Depth %d)\n", NULL, qdepth);
+ return 0;
+ }
+ drvr_opts->qdepth[tgt_id] = qdepth;
+ if (*options == ',')
+ options++;
+ else if (*options == ']')
+ break;
+ else {
+ blogic_err("BusLogic: Invalid Driver Options " "(',' or ']' expected at '%s')\n", NULL, options);
+ return 0;
+ }
+ }
+ if (*options != ']') {
+ blogic_err("BusLogic: Invalid Driver Options " "(']' expected at '%s')\n", NULL, options);
+ return 0;
+ } else
+ options++;
+ } else if (blogic_parse(&options, "QueueDepth:") || blogic_parse(&options, "QD:")) {
+ unsigned short qdepth = simple_strtoul(options, &options, 0);
+ if (qdepth == 0 ||
+ qdepth > BLOGIC_MAX_TAG_DEPTH) {
+ blogic_err("BusLogic: Invalid Driver Options " "(invalid Queue Depth %d)\n", NULL, qdepth);
+ return 0;
+ }
+ drvr_opts->common_qdepth = qdepth;
+ for (tgt_id = 0; tgt_id < BLOGIC_MAXDEV; tgt_id++)
+ drvr_opts->qdepth[tgt_id] = qdepth;
+ } else if (blogic_parse(&options, "TaggedQueuing:") ||
+ blogic_parse(&options, "TQ:")) {
+ if (blogic_parse(&options, "Default")) {
+ drvr_opts->tagq_ok = 0x0000;
+ drvr_opts->tagq_ok_mask = 0x0000;
+ } else if (blogic_parse(&options, "Enable")) {
+ drvr_opts->tagq_ok = 0xFFFF;
+ drvr_opts->tagq_ok_mask = 0xFFFF;
+ } else if (blogic_parse(&options, "Disable")) {
+ drvr_opts->tagq_ok = 0x0000;
+ drvr_opts->tagq_ok_mask = 0xFFFF;
+ } else {
+ unsigned short tgt_bit;
+ for (tgt_id = 0, tgt_bit = 1;
+ tgt_id < BLOGIC_MAXDEV;
+ tgt_id++, tgt_bit <<= 1)
+ switch (*options++) {
+ case 'Y':
+ drvr_opts->tagq_ok |= tgt_bit;
+ drvr_opts->tagq_ok_mask |= tgt_bit;
+ break;
+ case 'N':
+ drvr_opts->tagq_ok &= ~tgt_bit;
+ drvr_opts->tagq_ok_mask |= tgt_bit;
+ break;
+ case 'X':
+ break;
+ default:
+ options--;
+ tgt_id = BLOGIC_MAXDEV;
+ break;
+ }
+ }
+ }
+ /* Miscellaneous Options. */
+ else if (blogic_parse(&options, "BusSettleTime:") ||
+ blogic_parse(&options, "BST:")) {
+ unsigned short bus_settle_time =
+ simple_strtoul(options, &options, 0);
+ if (bus_settle_time > 5 * 60) {
+ blogic_err("BusLogic: Invalid Driver Options " "(invalid Bus Settle Time %d)\n", NULL, bus_settle_time);
+ return 0;
+ }
+ drvr_opts->bus_settle_time = bus_settle_time;
+ } else if (blogic_parse(&options,
+ "InhibitTargetInquiry"))
+ drvr_opts->stop_tgt_inquiry = true;
+ /* Debugging Options. */
+ else if (blogic_parse(&options, "TraceProbe"))
+ blogic_global_options.trace_probe = true;
+ else if (blogic_parse(&options, "TraceHardwareReset"))
+ blogic_global_options.trace_hw_reset = true;
+ else if (blogic_parse(&options, "TraceConfiguration"))
+ blogic_global_options.trace_config = true;
+ else if (blogic_parse(&options, "TraceErrors"))
+ blogic_global_options.trace_err = true;
+ else if (blogic_parse(&options, "Debug")) {
+ blogic_global_options.trace_probe = true;
+ blogic_global_options.trace_hw_reset = true;
+ blogic_global_options.trace_config = true;
+ blogic_global_options.trace_err = true;
+ }
+ if (*options == ',')
+ options++;
+ else if (*options != ';' && *options != '\0') {
+ blogic_err("BusLogic: Unexpected Driver Option '%s' " "ignored\n", NULL, options);
+ *options = '\0';
+ }
+ }
+ if (!(blogic_drvr_options_count == 0 ||
+ blogic_probeinfo_count == 0 ||
+ blogic_drvr_options_count == blogic_probeinfo_count)) {
+ blogic_err("BusLogic: Invalid Driver Options " "(all or no I/O Addresses must be specified)\n", NULL);
+ return 0;
+ }
+ /*
+ Tagged Queuing is disabled when the Queue Depth is 1 since queuing
+ multiple commands is not possible.
+ */
+ for (tgt_id = 0; tgt_id < BLOGIC_MAXDEV; tgt_id++)
+ if (drvr_opts->qdepth[tgt_id] == 1) {
+ unsigned short tgt_bit = 1 << tgt_id;
+ drvr_opts->tagq_ok &= ~tgt_bit;
+ drvr_opts->tagq_ok_mask |= tgt_bit;
+ }
+ if (*options == ';')
+ options++;
+ if (*options == '\0')
+ return 0;
+ }
+ return 1;
+}
+
+/*
+ Get it all started
+*/
+
+static struct scsi_host_template blogic_template = {
+ .module = THIS_MODULE,
+ .proc_name = "BusLogic",
+ .write_info = blogic_write_info,
+ .show_info = blogic_show_info,
+ .name = "BusLogic",
+ .info = blogic_drvr_info,
+ .queuecommand = blogic_qcmd,
+ .slave_configure = blogic_slaveconfig,
+ .bios_param = blogic_diskparam,
+ .eh_host_reset_handler = blogic_hostreset,
+#if 0
+ .eh_abort_handler = blogic_abort,
+#endif
+ .unchecked_isa_dma = 1,
+ .max_sectors = 128,
+ .use_clustering = ENABLE_CLUSTERING,
+};
+
+/*
+ blogic_setup handles processing of Kernel Command Line Arguments.
+*/
+
+static int __init blogic_setup(char *str)
+{
+ int ints[3];
+
+ (void) get_options(str, ARRAY_SIZE(ints), ints);
+
+ if (ints[0] != 0) {
+ blogic_err("BusLogic: Obsolete Command Line Entry " "Format Ignored\n", NULL);
+ return 0;
+ }
+ if (str == NULL || *str == '\0')
+ return 0;
+ return blogic_parseopts(str);
+}
+
+/*
+ * Exit function. Deletes all hosts associated with this driver.
+ */
+
+static void __exit blogic_exit(void)
+{
+ struct blogic_adapter *ha, *next;
+
+ list_for_each_entry_safe(ha, next, &blogic_host_list, host_list)
+ blogic_deladapter(ha);
+}
+
+__setup("BusLogic=", blogic_setup);
+
+#ifdef MODULE
+/*static struct pci_device_id blogic_pci_tbl[] = {
+ { PCI_VENDOR_ID_BUSLOGIC, PCI_DEVICE_ID_BUSLOGIC_MULTIMASTER,
+ PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
+ { PCI_VENDOR_ID_BUSLOGIC, PCI_DEVICE_ID_BUSLOGIC_MULTIMASTER_NC,
+ PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
+ { PCI_VENDOR_ID_BUSLOGIC, PCI_DEVICE_ID_BUSLOGIC_FLASHPOINT,
+ PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
+ { }
+};*/
+static const struct pci_device_id blogic_pci_tbl[] = {
+ {PCI_DEVICE(PCI_VENDOR_ID_BUSLOGIC, PCI_DEVICE_ID_BUSLOGIC_MULTIMASTER)},
+ {PCI_DEVICE(PCI_VENDOR_ID_BUSLOGIC, PCI_DEVICE_ID_BUSLOGIC_MULTIMASTER_NC)},
+ {PCI_DEVICE(PCI_VENDOR_ID_BUSLOGIC, PCI_DEVICE_ID_BUSLOGIC_FLASHPOINT)},
+ {0, },
+};
+#endif
+MODULE_DEVICE_TABLE(pci, blogic_pci_tbl);
+
+module_init(blogic_init);
+module_exit(blogic_exit);
diff --git a/drivers/scsi/BusLogic.h b/drivers/scsi/BusLogic.h
new file mode 100644
index 000000000..b53ec2f1e
--- /dev/null
+++ b/drivers/scsi/BusLogic.h
@@ -0,0 +1,1302 @@
+/*
+
+ Linux Driver for BusLogic MultiMaster and FlashPoint SCSI Host Adapters
+
+ Copyright 1995-1998 by Leonard N. Zubkoff <lnz@dandelion.com>
+
+ This program is free software; you may redistribute and/or modify it under
+ the terms of the GNU General Public License Version 2 as published by the
+ Free Software Foundation.
+
+ This program is distributed in the hope that it will be useful, but
+ WITHOUT ANY WARRANTY, without even the implied warranty of MERCHANTABILITY
+ or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ for complete details.
+
+ The author respectfully requests that any modifications to this software be
+ sent directly to him for evaluation and testing.
+
+ Special thanks to Wayne Yen, Jin-Lon Hon, and Alex Win of BusLogic, whose
+ advice has been invaluable, to David Gentzel, for writing the original Linux
+ BusLogic driver, and to Paul Gortmaker, for being such a dedicated test site.
+
+ Finally, special thanks to Mylex/BusLogic for making the FlashPoint SCCB
+ Manager available as freely redistributable source code.
+
+*/
+
+#ifndef _BUSLOGIC_H
+#define _BUSLOGIC_H
+
+
+#ifndef PACKED
+#define PACKED __attribute__((packed))
+#endif
+
+/*
+ Define the maximum number of BusLogic Host Adapters supported by this driver.
+*/
+
+#define BLOGIC_MAX_ADAPTERS 16
+
+
+/*
+ Define the maximum number of Target Devices supported by this driver.
+*/
+
+#define BLOGIC_MAXDEV 16
+
+
+/*
+ Define the maximum number of Scatter/Gather Segments used by this driver.
+ For optimal performance, it is important that this limit be at least as
+ large as the largest single request generated by the I/O Subsystem.
+*/
+
+#define BLOGIC_SG_LIMIT 128
+
+
+/*
+ Define the maximum, maximum automatic, minimum automatic, and default Queue
+ Depth to allow for Target Devices depending on whether or not they support
+ Tagged Queuing and whether or not ISA Bounce Buffers are required.
+*/
+
+#define BLOGIC_MAX_TAG_DEPTH 64
+#define BLOGIC_MAX_AUTO_TAG_DEPTH 28
+#define BLOGIC_MIN_AUTO_TAG_DEPTH 7
+#define BLOGIC_TAG_DEPTH_BB 3
+#define BLOGIC_UNTAG_DEPTH 3
+#define BLOGIC_UNTAG_DEPTH_BB 2
+
+
+/*
+ Define the default amount of time in seconds to wait between a Host Adapter
+ Hard Reset which initiates a SCSI Bus Reset and issuing any SCSI commands.
+ Some SCSI devices get confused if they receive SCSI commands too soon after
+ a SCSI Bus Reset.
+*/
+
+#define BLOGIC_BUS_SETTLE_TIME 2
+
+
+/*
+ Define the maximum number of Mailboxes that should be used for MultiMaster
+ Host Adapters. This number is chosen to be larger than the maximum Host
+ Adapter Queue Depth and small enough so that the Host Adapter structure
+ does not cross an allocation block size boundary.
+*/
+
+#define BLOGIC_MAX_MAILBOX 211
+
+
+/*
+ Define the number of CCBs that should be allocated as a group to optimize
+ Kernel memory allocation.
+*/
+
+#define BLOGIC_CCB_GRP_ALLOCSIZE 7
+
+
+/*
+ Define the Host Adapter Line and Message Buffer Sizes.
+*/
+
+#define BLOGIC_LINEBUF_SIZE 100
+#define BLOGIC_MSGBUF_SIZE 9700
+
+
+/*
+ Define the Driver Message Levels.
+*/
+
+enum blogic_msglevel {
+ BLOGIC_ANNOUNCE_LEVEL = 0,
+ BLOGIC_INFO_LEVEL = 1,
+ BLOGIC_NOTICE_LEVEL = 2,
+ BLOGIC_WARN_LEVEL = 3,
+ BLOGIC_ERR_LEVEL = 4
+};
+
+static char *blogic_msglevelmap[] = { KERN_NOTICE, KERN_NOTICE, KERN_NOTICE, KERN_WARNING, KERN_ERR };
+
+
+/*
+ Define Driver Message macros.
+*/
+
+#define blogic_announce(format, args...) \
+ blogic_msg(BLOGIC_ANNOUNCE_LEVEL, format, ##args)
+
+#define blogic_info(format, args...) \
+ blogic_msg(BLOGIC_INFO_LEVEL, format, ##args)
+
+#define blogic_notice(format, args...) \
+ blogic_msg(BLOGIC_NOTICE_LEVEL, format, ##args)
+
+#define blogic_warn(format, args...) \
+ blogic_msg(BLOGIC_WARN_LEVEL, format, ##args)
+
+#define blogic_err(format, args...) \
+ blogic_msg(BLOGIC_ERR_LEVEL, format, ##args)
+
+
+/*
+ Define the types of BusLogic Host Adapters that are supported and the number
+ of I/O Addresses required by each type.
+*/
+
+enum blogic_adapter_type {
+ BLOGIC_MULTIMASTER = 1,
+ BLOGIC_FLASHPOINT = 2
+} PACKED;
+
+#define BLOGIC_MULTIMASTER_ADDR_COUNT 4
+#define BLOGIC_FLASHPOINT_ADDR_COUNT 256
+
+static int blogic_adapter_addr_count[3] = { 0, BLOGIC_MULTIMASTER_ADDR_COUNT, BLOGIC_FLASHPOINT_ADDR_COUNT };
+
+
+/*
+ Define macros for testing the Host Adapter Type.
+*/
+
+#ifdef CONFIG_SCSI_FLASHPOINT
+
+#define blogic_multimaster_type(adapter) \
+ (adapter->adapter_type == BLOGIC_MULTIMASTER)
+
+#define blogic_flashpoint_type(adapter) \
+ (adapter->adapter_type == BLOGIC_FLASHPOINT)
+
+#else
+
+#define blogic_multimaster_type(adapter) (true)
+#define blogic_flashpoint_type(adapter) (false)
+
+#endif
+
+
+/*
+ Define the possible Host Adapter Bus Types.
+*/
+
+enum blogic_adapter_bus_type {
+ BLOGIC_UNKNOWN_BUS = 0,
+ BLOGIC_ISA_BUS = 1,
+ BLOGIC_EISA_BUS = 2,
+ BLOGIC_PCI_BUS = 3,
+ BLOGIC_VESA_BUS = 4,
+ BLOGIC_MCA_BUS = 5
+} PACKED;
+
+static char *blogic_adapter_busnames[] = { "Unknown", "ISA", "EISA", "PCI", "VESA", "MCA" };
+
+static enum blogic_adapter_bus_type blogic_adater_bus_types[] = {
+ BLOGIC_VESA_BUS, /* BT-4xx */
+ BLOGIC_ISA_BUS, /* BT-5xx */
+ BLOGIC_MCA_BUS, /* BT-6xx */
+ BLOGIC_EISA_BUS, /* BT-7xx */
+ BLOGIC_UNKNOWN_BUS, /* BT-8xx */
+ BLOGIC_PCI_BUS /* BT-9xx */
+};
+
+/*
+ Define the possible Host Adapter BIOS Disk Geometry Translations.
+*/
+
+enum blogic_bios_diskgeometry {
+ BLOGIC_BIOS_NODISK = 0,
+ BLOGIC_BIOS_DISK64x32 = 1,
+ BLOGIC_BIOS_DISK128x32 = 2,
+ BLOGIC_BIOS_DISK255x63 = 3
+} PACKED;
+
+
+/*
+ Define a 10^18 Statistics Byte Counter data type.
+*/
+
+struct blogic_byte_count {
+ unsigned int units;
+ unsigned int billions;
+};
+
+
+/*
+ Define the structure for I/O Address and Bus Probing Information.
+*/
+
+struct blogic_probeinfo {
+ enum blogic_adapter_type adapter_type;
+ enum blogic_adapter_bus_type adapter_bus_type;
+ unsigned long io_addr;
+ unsigned long pci_addr;
+ struct pci_dev *pci_device;
+ unsigned char bus;
+ unsigned char dev;
+ unsigned char irq_ch;
+};
+
+/*
+ Define the Probe Options.
+*/
+
+struct blogic_probe_options {
+ bool noprobe:1; /* Bit 0 */
+ bool noprobe_isa:1; /* Bit 1 */
+ bool noprobe_pci:1; /* Bit 2 */
+ bool nosort_pci:1; /* Bit 3 */
+ bool multimaster_first:1; /* Bit 4 */
+ bool flashpoint_first:1; /* Bit 5 */
+ bool limited_isa:1; /* Bit 6 */
+ bool probe330:1; /* Bit 7 */
+ bool probe334:1; /* Bit 8 */
+ bool probe230:1; /* Bit 9 */
+ bool probe234:1; /* Bit 10 */
+ bool probe130:1; /* Bit 11 */
+ bool probe134:1; /* Bit 12 */
+};
+
+/*
+ Define the Global Options.
+*/
+
+struct blogic_global_options {
+ bool trace_probe:1; /* Bit 0 */
+ bool trace_hw_reset:1; /* Bit 1 */
+ bool trace_config:1; /* Bit 2 */
+ bool trace_err:1; /* Bit 3 */
+};
+
+/*
+ Define the BusLogic SCSI Host Adapter I/O Register Offsets.
+*/
+
+#define BLOGIC_CNTRL_REG 0 /* WO register */
+#define BLOGIC_STATUS_REG 0 /* RO register */
+#define BLOGIC_CMD_PARM_REG 1 /* WO register */
+#define BLOGIC_DATAIN_REG 1 /* RO register */
+#define BLOGIC_INT_REG 2 /* RO register */
+#define BLOGIC_GEOMETRY_REG 3 /* RO register */
+
+/*
+ Define the structure of the write-only Control Register.
+*/
+
+union blogic_cntrl_reg {
+ unsigned char all;
+ struct {
+ unsigned char:4; /* Bits 0-3 */
+ bool bus_reset:1; /* Bit 4 */
+ bool int_reset:1; /* Bit 5 */
+ bool soft_reset:1; /* Bit 6 */
+ bool hard_reset:1; /* Bit 7 */
+ } cr;
+};
+
+/*
+ Define the structure of the read-only Status Register.
+*/
+
+union blogic_stat_reg {
+ unsigned char all;
+ struct {
+ bool cmd_invalid:1; /* Bit 0 */
+ bool rsvd:1; /* Bit 1 */
+ bool datain_ready:1; /* Bit 2 */
+ bool cmd_param_busy:1; /* Bit 3 */
+ bool adapter_ready:1; /* Bit 4 */
+ bool init_reqd:1; /* Bit 5 */
+ bool diag_failed:1; /* Bit 6 */
+ bool diag_active:1; /* Bit 7 */
+ } sr;
+};
+
+/*
+ Define the structure of the read-only Interrupt Register.
+*/
+
+union blogic_int_reg {
+ unsigned char all;
+ struct {
+ bool mailin_loaded:1; /* Bit 0 */
+ bool mailout_avail:1; /* Bit 1 */
+ bool cmd_complete:1; /* Bit 2 */
+ bool ext_busreset:1; /* Bit 3 */
+ unsigned char rsvd:3; /* Bits 4-6 */
+ bool int_valid:1; /* Bit 7 */
+ } ir;
+};
+
+/*
+ Define the structure of the read-only Geometry Register.
+*/
+
+union blogic_geo_reg {
+ unsigned char all;
+ struct {
+ enum blogic_bios_diskgeometry d0_geo:2; /* Bits 0-1 */
+ enum blogic_bios_diskgeometry d1_geo:2; /* Bits 2-3 */
+ unsigned char:3; /* Bits 4-6 */
+ bool ext_trans_enable:1; /* Bit 7 */
+ } gr;
+};
+
+/*
+ Define the BusLogic SCSI Host Adapter Command Register Operation Codes.
+*/
+
+enum blogic_opcode {
+ BLOGIC_TEST_CMP_COMPLETE = 0x00,
+ BLOGIC_INIT_MBOX = 0x01,
+ BLOGIC_EXEC_MBOX_CMD = 0x02,
+ BLOGIC_EXEC_BIOS_CMD = 0x03,
+ BLOGIC_GET_BOARD_ID = 0x04,
+ BLOGIC_ENABLE_OUTBOX_AVAIL_INT = 0x05,
+ BLOGIC_SET_SELECT_TIMEOUT = 0x06,
+ BLOGIC_SET_PREEMPT_TIME = 0x07,
+ BLOGIC_SET_TIMEOFF_BUS = 0x08,
+ BLOGIC_SET_TXRATE = 0x09,
+ BLOGIC_INQ_DEV0TO7 = 0x0A,
+ BLOGIC_INQ_CONFIG = 0x0B,
+ BLOGIC_TGT_MODE = 0x0C,
+ BLOGIC_INQ_SETUPINFO = 0x0D,
+ BLOGIC_WRITE_LOCALRAM = 0x1A,
+ BLOGIC_READ_LOCALRAM = 0x1B,
+ BLOGIC_WRITE_BUSMASTER_FIFO = 0x1C,
+ BLOGIC_READ_BUSMASTER_FIFO = 0x1D,
+ BLOGIC_ECHO_CMDDATA = 0x1F,
+ BLOGIC_ADAPTER_DIAG = 0x20,
+ BLOGIC_SET_OPTIONS = 0x21,
+ BLOGIC_INQ_DEV8TO15 = 0x23,
+ BLOGIC_INQ_DEV = 0x24,
+ BLOGIC_DISABLE_INT = 0x25,
+ BLOGIC_INIT_EXT_MBOX = 0x81,
+ BLOGIC_EXEC_SCS_CMD = 0x83,
+ BLOGIC_INQ_FWVER_D3 = 0x84,
+ BLOGIC_INQ_FWVER_LETTER = 0x85,
+ BLOGIC_INQ_PCI_INFO = 0x86,
+ BLOGIC_INQ_MODELNO = 0x8B,
+ BLOGIC_INQ_SYNC_PERIOD = 0x8C,
+ BLOGIC_INQ_EXTSETUP = 0x8D,
+ BLOGIC_STRICT_RR = 0x8F,
+ BLOGIC_STORE_LOCALRAM = 0x90,
+ BLOGIC_FETCH_LOCALRAM = 0x91,
+ BLOGIC_STORE_TO_EEPROM = 0x92,
+ BLOGIC_LOAD_AUTOSCSICODE = 0x94,
+ BLOGIC_MOD_IOADDR = 0x95,
+ BLOGIC_SETCCB_FMT = 0x96,
+ BLOGIC_WRITE_INQBUF = 0x9A,
+ BLOGIC_READ_INQBUF = 0x9B,
+ BLOGIC_FLASH_LOAD = 0xA7,
+ BLOGIC_READ_SCAMDATA = 0xA8,
+ BLOGIC_WRITE_SCAMDATA = 0xA9
+};
+
+/*
+ Define the Inquire Board ID reply structure.
+*/
+
+struct blogic_board_id {
+ unsigned char type; /* Byte 0 */
+ unsigned char custom_features; /* Byte 1 */
+ unsigned char fw_ver_digit1; /* Byte 2 */
+ unsigned char fw_ver_digit2; /* Byte 3 */
+};
+
+/*
+ Define the Inquire Configuration reply structure.
+*/
+
+struct blogic_config {
+ unsigned char:5; /* Byte 0 Bits 0-4 */
+ bool dma_ch5:1; /* Byte 0 Bit 5 */
+ bool dma_ch6:1; /* Byte 0 Bit 6 */
+ bool dma_ch7:1; /* Byte 0 Bit 7 */
+ bool irq_ch9:1; /* Byte 1 Bit 0 */
+ bool irq_ch10:1; /* Byte 1 Bit 1 */
+ bool irq_ch11:1; /* Byte 1 Bit 2 */
+ bool irq_ch12:1; /* Byte 1 Bit 3 */
+ unsigned char:1; /* Byte 1 Bit 4 */
+ bool irq_ch14:1; /* Byte 1 Bit 5 */
+ bool irq_ch15:1; /* Byte 1 Bit 6 */
+ unsigned char:1; /* Byte 1 Bit 7 */
+ unsigned char id:4; /* Byte 2 Bits 0-3 */
+ unsigned char:4; /* Byte 2 Bits 4-7 */
+};
+
+/*
+ Define the Inquire Setup Information reply structure.
+*/
+
+struct blogic_syncval {
+ unsigned char offset:4; /* Bits 0-3 */
+ unsigned char tx_period:3; /* Bits 4-6 */
+ bool sync:1; /* Bit 7 */
+};
+
+struct blogic_setup_info {
+ bool sync:1; /* Byte 0 Bit 0 */
+ bool parity:1; /* Byte 0 Bit 1 */
+ unsigned char:6; /* Byte 0 Bits 2-7 */
+ unsigned char tx_rate; /* Byte 1 */
+ unsigned char preempt_time; /* Byte 2 */
+ unsigned char timeoff_bus; /* Byte 3 */
+ unsigned char mbox_count; /* Byte 4 */
+ unsigned char mbox_addr[3]; /* Bytes 5-7 */
+ struct blogic_syncval sync0to7[8]; /* Bytes 8-15 */
+ unsigned char disconnect_ok0to7; /* Byte 16 */
+ unsigned char sig; /* Byte 17 */
+ unsigned char char_d; /* Byte 18 */
+ unsigned char bus_type; /* Byte 19 */
+ unsigned char wide_tx_ok0to7; /* Byte 20 */
+ unsigned char wide_tx_active0to7; /* Byte 21 */
+ struct blogic_syncval sync8to15[8]; /* Bytes 22-29 */
+ unsigned char disconnect_ok8to15; /* Byte 30 */
+ unsigned char:8; /* Byte 31 */
+ unsigned char wide_tx_ok8to15; /* Byte 32 */
+ unsigned char wide_tx_active8to15; /* Byte 33 */
+};
+
+/*
+ Define the Initialize Extended Mailbox request structure.
+*/
+
+struct blogic_extmbox_req {
+ unsigned char mbox_count; /* Byte 0 */
+ u32 base_mbox_addr; /* Bytes 1-4 */
+} PACKED;
+
+
+/*
+ Define the Inquire PCI Host Adapter Information reply type. The ISA
+ Compatible I/O Port values are defined here and are also used with
+ the Modify I/O Address command.
+*/
+
+enum blogic_isa_ioport {
+ BLOGIC_IO_330 = 0,
+ BLOGIC_IO_334 = 1,
+ BLOGIC_IO_230 = 2,
+ BLOGIC_IO_234 = 3,
+ BLOGIC_IO_130 = 4,
+ BLOGIC_IO_134 = 5,
+ BLOGIC_IO_DISABLE = 6,
+ BLOGIC_IO_DISABLE2 = 7
+} PACKED;
+
+struct blogic_adapter_info {
+ enum blogic_isa_ioport isa_port; /* Byte 0 */
+ unsigned char irq_ch; /* Byte 1 */
+ bool low_term:1; /* Byte 2 Bit 0 */
+ bool high_term:1; /* Byte 2 Bit 1 */
+ unsigned char:2; /* Byte 2 Bits 2-3 */
+ bool JP1:1; /* Byte 2 Bit 4 */
+ bool JP2:1; /* Byte 2 Bit 5 */
+ bool JP3:1; /* Byte 2 Bit 6 */
+ bool genericinfo_valid:1; /* Byte 2 Bit 7 */
+ unsigned char:8; /* Byte 3 */
+};
+
+/*
+ Define the Inquire Extended Setup Information reply structure.
+*/
+
+struct blogic_ext_setup {
+ unsigned char bus_type; /* Byte 0 */
+ unsigned char bios_addr; /* Byte 1 */
+ unsigned short sg_limit; /* Bytes 2-3 */
+ unsigned char mbox_count; /* Byte 4 */
+ u32 base_mbox_addr; /* Bytes 5-8 */
+ struct {
+ unsigned char:2; /* Byte 9 Bits 0-1 */
+ bool fast_on_eisa:1; /* Byte 9 Bit 2 */
+ unsigned char:3; /* Byte 9 Bits 3-5 */
+ bool level_int:1; /* Byte 9 Bit 6 */
+ unsigned char:1; /* Byte 9 Bit 7 */
+ } misc;
+ unsigned char fw_rev[3]; /* Bytes 10-12 */
+ bool wide:1; /* Byte 13 Bit 0 */
+ bool differential:1; /* Byte 13 Bit 1 */
+ bool scam:1; /* Byte 13 Bit 2 */
+ bool ultra:1; /* Byte 13 Bit 3 */
+ bool smart_term:1; /* Byte 13 Bit 4 */
+ unsigned char:3; /* Byte 13 Bits 5-7 */
+} PACKED;
+
+/*
+ Define the Enable Strict Round Robin Mode request type.
+*/
+
+enum blogic_rr_req {
+ BLOGIC_AGGRESSIVE_RR = 0,
+ BLOGIC_STRICT_RR_MODE = 1
+} PACKED;
+
+
+/*
+ Define the Fetch Host Adapter Local RAM request type.
+*/
+
+#define BLOGIC_BIOS_BASE 0
+#define BLOGIC_AUTOSCSI_BASE 64
+
+struct blogic_fetch_localram {
+ unsigned char offset; /* Byte 0 */
+ unsigned char count; /* Byte 1 */
+};
+
+/*
+ Define the Host Adapter Local RAM AutoSCSI structure.
+*/
+
+struct blogic_autoscsi {
+ unsigned char factory_sig[2]; /* Bytes 0-1 */
+ unsigned char info_bytes; /* Byte 2 */
+ unsigned char adapter_type[6]; /* Bytes 3-8 */
+ unsigned char:8; /* Byte 9 */
+ bool floppy:1; /* Byte 10 Bit 0 */
+ bool floppy_sec:1; /* Byte 10 Bit 1 */
+ bool level_int:1; /* Byte 10 Bit 2 */
+ unsigned char:2; /* Byte 10 Bits 3-4 */
+ unsigned char systemram_bios:3; /* Byte 10 Bits 5-7 */
+ unsigned char dma_ch:7; /* Byte 11 Bits 0-6 */
+ bool dma_autoconf:1; /* Byte 11 Bit 7 */
+ unsigned char irq_ch:7; /* Byte 12 Bits 0-6 */
+ bool irq_autoconf:1; /* Byte 12 Bit 7 */
+ unsigned char dma_tx_rate; /* Byte 13 */
+ unsigned char scsi_id; /* Byte 14 */
+ bool low_term:1; /* Byte 15 Bit 0 */
+ bool parity:1; /* Byte 15 Bit 1 */
+ bool high_term:1; /* Byte 15 Bit 2 */
+ bool noisy_cable:1; /* Byte 15 Bit 3 */
+ bool fast_sync_neg:1; /* Byte 15 Bit 4 */
+ bool reset_enabled:1; /* Byte 15 Bit 5 */
+ bool:1; /* Byte 15 Bit 6 */
+ bool active_negation:1; /* Byte 15 Bit 7 */
+ unsigned char bus_on_delay; /* Byte 16 */
+ unsigned char bus_off_delay; /* Byte 17 */
+ bool bios_enabled:1; /* Byte 18 Bit 0 */
+ bool int19_redir_enabled:1; /* Byte 18 Bit 1 */
+ bool ext_trans_enable:1; /* Byte 18 Bit 2 */
+ bool removable_as_fixed:1; /* Byte 18 Bit 3 */
+ bool:1; /* Byte 18 Bit 4 */
+ bool morethan2_drives:1; /* Byte 18 Bit 5 */
+ bool bios_int:1; /* Byte 18 Bit 6 */
+ bool floptical:1; /* Byte 19 Bit 7 */
+ unsigned short dev_enabled; /* Bytes 19-20 */
+ unsigned short wide_ok; /* Bytes 21-22 */
+ unsigned short fast_ok; /* Bytes 23-24 */
+ unsigned short sync_ok; /* Bytes 25-26 */
+ unsigned short discon_ok; /* Bytes 27-28 */
+ unsigned short send_start_unit; /* Bytes 29-30 */
+ unsigned short ignore_bios_scan; /* Bytes 31-32 */
+ unsigned char pci_int_pin:2; /* Byte 33 Bits 0-1 */
+ unsigned char adapter_ioport:2; /* Byte 33 Bits 2-3 */
+ bool strict_rr_enabled:1; /* Byte 33 Bit 4 */
+ bool vesabus_33mhzplus:1; /* Byte 33 Bit 5 */
+ bool vesa_burst_write:1; /* Byte 33 Bit 6 */
+ bool vesa_burst_read:1; /* Byte 33 Bit 7 */
+ unsigned short ultra_ok; /* Bytes 34-35 */
+ unsigned int:32; /* Bytes 36-39 */
+ unsigned char:8; /* Byte 40 */
+ unsigned char autoscsi_maxlun; /* Byte 41 */
+ bool:1; /* Byte 42 Bit 0 */
+ bool scam_dominant:1; /* Byte 42 Bit 1 */
+ bool scam_enabled:1; /* Byte 42 Bit 2 */
+ bool scam_lev2:1; /* Byte 42 Bit 3 */
+ unsigned char:4; /* Byte 42 Bits 4-7 */
+ bool int13_exten:1; /* Byte 43 Bit 0 */
+ bool:1; /* Byte 43 Bit 1 */
+ bool cd_boot:1; /* Byte 43 Bit 2 */
+ unsigned char:5; /* Byte 43 Bits 3-7 */
+ unsigned char boot_id:4; /* Byte 44 Bits 0-3 */
+ unsigned char boot_ch:4; /* Byte 44 Bits 4-7 */
+ unsigned char force_scan_order:1; /* Byte 45 Bit 0 */
+ unsigned char:7; /* Byte 45 Bits 1-7 */
+ unsigned short nontagged_to_alt_ok; /* Bytes 46-47 */
+ unsigned short reneg_sync_on_check; /* Bytes 48-49 */
+ unsigned char rsvd[10]; /* Bytes 50-59 */
+ unsigned char manuf_diag[2]; /* Bytes 60-61 */
+ unsigned short cksum; /* Bytes 62-63 */
+} PACKED;
+
+/*
+ Define the Host Adapter Local RAM Auto SCSI Byte 45 structure.
+*/
+
+struct blogic_autoscsi_byte45 {
+ unsigned char force_scan_order:1; /* Bit 0 */
+ unsigned char:7; /* Bits 1-7 */
+};
+
+/*
+ Define the Host Adapter Local RAM BIOS Drive Map Byte structure.
+*/
+
+#define BLOGIC_BIOS_DRVMAP 17
+
+struct blogic_bios_drvmap {
+ unsigned char tgt_idbit3:1; /* Bit 0 */
+ unsigned char:2; /* Bits 1-2 */
+ enum blogic_bios_diskgeometry diskgeom:2; /* Bits 3-4 */
+ unsigned char tgt_id:3; /* Bits 5-7 */
+};
+
+/*
+ Define the Set CCB Format request type. Extended LUN Format CCBs are
+ necessary to support more than 8 Logical Units per Target Device.
+*/
+
+enum blogic_setccb_fmt {
+ BLOGIC_LEGACY_LUN_CCB = 0,
+ BLOGIC_EXT_LUN_CCB = 1
+} PACKED;
+
+/*
+ Define the Outgoing Mailbox Action Codes.
+*/
+
+enum blogic_action {
+ BLOGIC_OUTBOX_FREE = 0x00,
+ BLOGIC_MBOX_START = 0x01,
+ BLOGIC_MBOX_ABORT = 0x02
+} PACKED;
+
+
+/*
+ Define the Incoming Mailbox Completion Codes. The MultiMaster Firmware
+ only uses codes 0 - 4. The FlashPoint SCCB Manager has no mailboxes, so
+ completion codes are stored in the CCB; it only uses codes 1, 2, 4, and 5.
+*/
+
+enum blogic_cmplt_code {
+ BLOGIC_INBOX_FREE = 0x00,
+ BLOGIC_CMD_COMPLETE_GOOD = 0x01,
+ BLOGIC_CMD_ABORT_BY_HOST = 0x02,
+ BLOGIC_CMD_NOTFOUND = 0x03,
+ BLOGIC_CMD_COMPLETE_ERROR = 0x04,
+ BLOGIC_INVALID_CCB = 0x05
+} PACKED;
+
+/*
+ Define the Command Control Block (CCB) Opcodes.
+*/
+
+enum blogic_ccb_opcode {
+ BLOGIC_INITIATOR_CCB = 0x00,
+ BLOGIC_TGT_CCB = 0x01,
+ BLOGIC_INITIATOR_CCB_SG = 0x02,
+ BLOGIC_INITIATOR_CCBB_RESIDUAL = 0x03,
+ BLOGIC_INITIATOR_CCB_SG_RESIDUAL = 0x04,
+ BLOGIC_BDR = 0x81
+} PACKED;
+
+
+/*
+ Define the CCB Data Direction Codes.
+*/
+
+enum blogic_datadir {
+ BLOGIC_UNCHECKED_TX = 0,
+ BLOGIC_DATAIN_CHECKED = 1,
+ BLOGIC_DATAOUT_CHECKED = 2,
+ BLOGIC_NOTX = 3
+};
+
+
+/*
+ Define the Host Adapter Status Codes. The MultiMaster Firmware does not
+ return status code 0x0C; it uses 0x12 for both overruns and underruns.
+*/
+
+enum blogic_adapter_status {
+ BLOGIC_CMD_CMPLT_NORMAL = 0x00,
+ BLOGIC_LINK_CMD_CMPLT = 0x0A,
+ BLOGIC_LINK_CMD_CMPLT_FLAG = 0x0B,
+ BLOGIC_DATA_UNDERRUN = 0x0C,
+ BLOGIC_SELECT_TIMEOUT = 0x11,
+ BLOGIC_DATA_OVERRUN = 0x12,
+ BLOGIC_NOEXPECT_BUSFREE = 0x13,
+ BLOGIC_INVALID_BUSPHASE = 0x14,
+ BLOGIC_INVALID_OUTBOX_CODE = 0x15,
+ BLOGIC_INVALID_CMD_CODE = 0x16,
+ BLOGIC_LINKCCB_BADLUN = 0x17,
+ BLOGIC_BAD_CMD_PARAM = 0x1A,
+ BLOGIC_AUTOREQSENSE_FAIL = 0x1B,
+ BLOGIC_TAGQUEUE_REJECT = 0x1C,
+ BLOGIC_BAD_MSG_RCVD = 0x1D,
+ BLOGIC_HW_FAIL = 0x20,
+ BLOGIC_NORESPONSE_TO_ATN = 0x21,
+ BLOGIC_HW_RESET = 0x22,
+ BLOGIC_RST_FROM_OTHERDEV = 0x23,
+ BLOGIC_BAD_RECONNECT = 0x24,
+ BLOGIC_HW_BDR = 0x25,
+ BLOGIC_ABRT_QUEUE = 0x26,
+ BLOGIC_ADAPTER_SW_ERROR = 0x27,
+ BLOGIC_HW_TIMEOUT = 0x30,
+ BLOGIC_PARITY_ERR = 0x34
+} PACKED;
+
+
+/*
+ Define the SCSI Target Device Status Codes.
+*/
+
+enum blogic_tgt_status {
+ BLOGIC_OP_GOOD = 0x00,
+ BLOGIC_CHECKCONDITION = 0x02,
+ BLOGIC_DEVBUSY = 0x08
+} PACKED;
+
+/*
+ Define the Queue Tag Codes.
+*/
+
+enum blogic_queuetag {
+ BLOGIC_SIMPLETAG = 0,
+ BLOGIC_HEADTAG = 1,
+ BLOGIC_ORDEREDTAG = 2,
+ BLOGIC_RSVDTAG = 3
+};
+
+/*
+ Define the SCSI Command Descriptor Block (CDB).
+*/
+
+#define BLOGIC_CDB_MAXLEN 12
+
+
+/*
+ Define the Scatter/Gather Segment structure required by the MultiMaster
+ Firmware Interface and the FlashPoint SCCB Manager.
+*/
+
+struct blogic_sg_seg {
+ u32 segbytes; /* Bytes 0-3 */
+ u32 segdata; /* Bytes 4-7 */
+};
+
+/*
+ Define the Driver CCB Status Codes.
+*/
+
+enum blogic_ccb_status {
+ BLOGIC_CCB_FREE = 0,
+ BLOGIC_CCB_ACTIVE = 1,
+ BLOGIC_CCB_COMPLETE = 2,
+ BLOGIC_CCB_RESET = 3
+} PACKED;
+
+
+/*
+ Define the 32 Bit Mode Command Control Block (CCB) structure. The first 40
+ bytes are defined by and common to both the MultiMaster Firmware and the
+ FlashPoint SCCB Manager. The next 60 bytes are defined by the FlashPoint
+ SCCB Manager. The remaining components are defined by the Linux BusLogic
+ Driver. Extended LUN Format CCBs differ from Legacy LUN Format 32 Bit Mode
+ CCBs only in having the TagEnable and QueueTag fields moved from byte 17 to
+ byte 1, and the Logical Unit field in byte 17 expanded to 6 bits. In theory,
+ Extended LUN Format CCBs can support up to 64 Logical Units, but in practice
+ many devices will respond improperly to Logical Units between 32 and 63, and
+ the SCSI-2 specification defines Bit 5 as LUNTAR. Extended LUN Format CCBs
+ are used by recent versions of the MultiMaster Firmware, as well as by the
+ FlashPoint SCCB Manager; the FlashPoint SCCB Manager only supports 32 Logical
+ Units. Since 64 Logical Units are unlikely to be needed in practice, and
+ since they are problematic for the above reasons, and since limiting them to
+ 5 bits simplifies the CCB structure definition, this driver only supports
+ 32 Logical Units per Target Device.
+*/
+
+struct blogic_ccb {
+ /*
+ MultiMaster Firmware and FlashPoint SCCB Manager Common Portion.
+ */
+ enum blogic_ccb_opcode opcode; /* Byte 0 */
+ unsigned char:3; /* Byte 1 Bits 0-2 */
+ enum blogic_datadir datadir:2; /* Byte 1 Bits 3-4 */
+ bool tag_enable:1; /* Byte 1 Bit 5 */
+ enum blogic_queuetag queuetag:2; /* Byte 1 Bits 6-7 */
+ unsigned char cdblen; /* Byte 2 */
+ unsigned char sense_datalen; /* Byte 3 */
+ u32 datalen; /* Bytes 4-7 */
+ void *data; /* Bytes 8-11 */
+ unsigned char:8; /* Byte 12 */
+ unsigned char:8; /* Byte 13 */
+ enum blogic_adapter_status adapter_status; /* Byte 14 */
+ enum blogic_tgt_status tgt_status; /* Byte 15 */
+ unsigned char tgt_id; /* Byte 16 */
+ unsigned char lun:5; /* Byte 17 Bits 0-4 */
+ bool legacytag_enable:1; /* Byte 17 Bit 5 */
+ enum blogic_queuetag legacy_tag:2; /* Byte 17 Bits 6-7 */
+ unsigned char cdb[BLOGIC_CDB_MAXLEN]; /* Bytes 18-29 */
+ unsigned char:8; /* Byte 30 */
+ unsigned char:8; /* Byte 31 */
+ u32 rsvd_int; /* Bytes 32-35 */
+ u32 sensedata; /* Bytes 36-39 */
+ /*
+ FlashPoint SCCB Manager Defined Portion.
+ */
+ void (*callback) (struct blogic_ccb *); /* Bytes 40-43 */
+ u32 base_addr; /* Bytes 44-47 */
+ enum blogic_cmplt_code comp_code; /* Byte 48 */
+#ifdef CONFIG_SCSI_FLASHPOINT
+ unsigned char:8; /* Byte 49 */
+ u16 os_flags; /* Bytes 50-51 */
+ unsigned char private[24]; /* Bytes 52-99 */
+ void *rsvd1;
+ void *rsvd2;
+ unsigned char private2[16];
+#endif
+ /*
+ BusLogic Linux Driver Defined Portion.
+ */
+ dma_addr_t allocgrp_head;
+ unsigned int allocgrp_size;
+ u32 dma_handle;
+ enum blogic_ccb_status status;
+ unsigned long serial;
+ struct scsi_cmnd *command;
+ struct blogic_adapter *adapter;
+ struct blogic_ccb *next;
+ struct blogic_ccb *next_all;
+ struct blogic_sg_seg sglist[BLOGIC_SG_LIMIT];
+};
+
+/*
+ Define the 32 Bit Mode Outgoing Mailbox structure.
+*/
+
+struct blogic_outbox {
+ u32 ccb; /* Bytes 0-3 */
+ u32:24; /* Bytes 4-6 */
+ enum blogic_action action; /* Byte 7 */
+};
+
+/*
+ Define the 32 Bit Mode Incoming Mailbox structure.
+*/
+
+struct blogic_inbox {
+ u32 ccb; /* Bytes 0-3 */
+ enum blogic_adapter_status adapter_status; /* Byte 4 */
+ enum blogic_tgt_status tgt_status; /* Byte 5 */
+ unsigned char:8; /* Byte 6 */
+ enum blogic_cmplt_code comp_code; /* Byte 7 */
+};
+
+
+/*
+ Define the BusLogic Driver Options structure.
+*/
+
+struct blogic_drvr_options {
+ unsigned short tagq_ok;
+ unsigned short tagq_ok_mask;
+ unsigned short bus_settle_time;
+ unsigned short stop_tgt_inquiry;
+ unsigned char common_qdepth;
+ unsigned char qdepth[BLOGIC_MAXDEV];
+};
+
+/*
+ Define the Host Adapter Target Flags structure.
+*/
+
+struct blogic_tgt_flags {
+ bool tgt_exists:1;
+ bool tagq_ok:1;
+ bool wide_ok:1;
+ bool tagq_active:1;
+ bool wide_active:1;
+ bool cmd_good:1;
+ bool tgt_info_in:1;
+};
+
+/*
+ Define the Host Adapter Target Statistics structure.
+*/
+
+#define BLOGIC_SZ_BUCKETS 10
+
+struct blogic_tgt_stats {
+ unsigned int cmds_tried;
+ unsigned int cmds_complete;
+ unsigned int read_cmds;
+ unsigned int write_cmds;
+ struct blogic_byte_count bytesread;
+ struct blogic_byte_count byteswritten;
+ unsigned int read_sz_buckets[BLOGIC_SZ_BUCKETS];
+ unsigned int write_sz_buckets[BLOGIC_SZ_BUCKETS];
+ unsigned short aborts_request;
+ unsigned short aborts_tried;
+ unsigned short aborts_done;
+ unsigned short bdr_request;
+ unsigned short bdr_tried;
+ unsigned short bdr_done;
+ unsigned short adatper_reset_req;
+ unsigned short adapter_reset_attempt;
+ unsigned short adapter_reset_done;
+};
+
+/*
+ Define the FlashPoint Card Handle data type.
+*/
+
+#define FPOINT_BADCARD_HANDLE 0xFFFFFFFFL
+
+
+/*
+ Define the FlashPoint Information structure. This structure is defined
+ by the FlashPoint SCCB Manager.
+*/
+
+struct fpoint_info {
+ u32 base_addr; /* Bytes 0-3 */
+ bool present; /* Byte 4 */
+ unsigned char irq_ch; /* Byte 5 */
+ unsigned char scsi_id; /* Byte 6 */
+ unsigned char scsi_lun; /* Byte 7 */
+ u16 fw_rev; /* Bytes 8-9 */
+ u16 sync_ok; /* Bytes 10-11 */
+ u16 fast_ok; /* Bytes 12-13 */
+ u16 ultra_ok; /* Bytes 14-15 */
+ u16 discon_ok; /* Bytes 16-17 */
+ u16 wide_ok; /* Bytes 18-19 */
+ bool parity:1; /* Byte 20 Bit 0 */
+ bool wide:1; /* Byte 20 Bit 1 */
+ bool softreset:1; /* Byte 20 Bit 2 */
+ bool ext_trans_enable:1; /* Byte 20 Bit 3 */
+ bool low_term:1; /* Byte 20 Bit 4 */
+ bool high_term:1; /* Byte 20 Bit 5 */
+ bool report_underrun:1; /* Byte 20 Bit 6 */
+ bool scam_enabled:1; /* Byte 20 Bit 7 */
+ bool scam_lev2:1; /* Byte 21 Bit 0 */
+ unsigned char:7; /* Byte 21 Bits 1-7 */
+ unsigned char family; /* Byte 22 */
+ unsigned char bus_type; /* Byte 23 */
+ unsigned char model[3]; /* Bytes 24-26 */
+ unsigned char relative_cardnum; /* Byte 27 */
+ unsigned char rsvd[4]; /* Bytes 28-31 */
+ u32 os_rsvd; /* Bytes 32-35 */
+ unsigned char translation_info[4]; /* Bytes 36-39 */
+ u32 rsvd2[5]; /* Bytes 40-59 */
+ u32 sec_range; /* Bytes 60-63 */
+};
+
+/*
+ Define the BusLogic Driver Host Adapter structure.
+*/
+
+struct blogic_adapter {
+ struct Scsi_Host *scsi_host;
+ struct pci_dev *pci_device;
+ enum blogic_adapter_type adapter_type;
+ enum blogic_adapter_bus_type adapter_bus_type;
+ unsigned long io_addr;
+ unsigned long pci_addr;
+ unsigned short addr_count;
+ unsigned char host_no;
+ unsigned char model[9];
+ unsigned char fw_ver[6];
+ unsigned char full_model[18];
+ unsigned char bus;
+ unsigned char dev;
+ unsigned char irq_ch;
+ unsigned char dma_ch;
+ unsigned char scsi_id;
+ bool irq_acquired:1;
+ bool dma_chan_acquired:1;
+ bool ext_trans_enable:1;
+ bool parity:1;
+ bool reset_enabled:1;
+ bool level_int:1;
+ bool wide:1;
+ bool differential:1;
+ bool scam:1;
+ bool ultra:1;
+ bool ext_lun:1;
+ bool terminfo_valid:1;
+ bool low_term:1;
+ bool high_term:1;
+ bool need_bouncebuf:1;
+ bool strict_rr:1;
+ bool scam_enabled:1;
+ bool scam_lev2:1;
+ bool adapter_initd:1;
+ bool adapter_extreset:1;
+ bool adapter_intern_err:1;
+ bool processing_ccbs;
+ volatile bool adapter_cmd_complete;
+ unsigned short adapter_sglimit;
+ unsigned short drvr_sglimit;
+ unsigned short maxdev;
+ unsigned short maxlun;
+ unsigned short mbox_count;
+ unsigned short initccbs;
+ unsigned short inc_ccbs;
+ unsigned short alloc_ccbs;
+ unsigned short drvr_qdepth;
+ unsigned short adapter_qdepth;
+ unsigned short untag_qdepth;
+ unsigned short common_qdepth;
+ unsigned short bus_settle_time;
+ unsigned short sync_ok;
+ unsigned short fast_ok;
+ unsigned short ultra_ok;
+ unsigned short wide_ok;
+ unsigned short discon_ok;
+ unsigned short tagq_ok;
+ unsigned short ext_resets;
+ unsigned short adapter_intern_errors;
+ unsigned short tgt_count;
+ unsigned short msgbuflen;
+ u32 bios_addr;
+ struct blogic_drvr_options *drvr_opts;
+ struct fpoint_info fpinfo;
+ void *cardhandle;
+ struct list_head host_list;
+ struct blogic_ccb *all_ccbs;
+ struct blogic_ccb *free_ccbs;
+ struct blogic_ccb *firstccb;
+ struct blogic_ccb *lastccb;
+ struct blogic_ccb *bdr_pend[BLOGIC_MAXDEV];
+ struct blogic_tgt_flags tgt_flags[BLOGIC_MAXDEV];
+ unsigned char qdepth[BLOGIC_MAXDEV];
+ unsigned char sync_period[BLOGIC_MAXDEV];
+ unsigned char sync_offset[BLOGIC_MAXDEV];
+ unsigned char active_cmds[BLOGIC_MAXDEV];
+ unsigned int cmds_since_rst[BLOGIC_MAXDEV];
+ unsigned long last_seqpoint[BLOGIC_MAXDEV];
+ unsigned long last_resettried[BLOGIC_MAXDEV];
+ unsigned long last_resetdone[BLOGIC_MAXDEV];
+ struct blogic_outbox *first_outbox;
+ struct blogic_outbox *last_outbox;
+ struct blogic_outbox *next_outbox;
+ struct blogic_inbox *first_inbox;
+ struct blogic_inbox *last_inbox;
+ struct blogic_inbox *next_inbox;
+ struct blogic_tgt_stats tgt_stats[BLOGIC_MAXDEV];
+ unsigned char *mbox_space;
+ dma_addr_t mbox_space_handle;
+ unsigned int mbox_sz;
+ unsigned long ccb_offset;
+ char msgbuf[BLOGIC_MSGBUF_SIZE];
+};
+
+/*
+ Define a structure for the BIOS Disk Parameters.
+*/
+
+struct bios_diskparam {
+ int heads;
+ int sectors;
+ int cylinders;
+};
+
+/*
+ Define a structure for the SCSI Inquiry command results.
+*/
+
+struct scsi_inquiry {
+ unsigned char devtype:5; /* Byte 0 Bits 0-4 */
+ unsigned char dev_qual:3; /* Byte 0 Bits 5-7 */
+ unsigned char dev_modifier:7; /* Byte 1 Bits 0-6 */
+ bool rmb:1; /* Byte 1 Bit 7 */
+ unsigned char ansi_ver:3; /* Byte 2 Bits 0-2 */
+ unsigned char ecma_ver:3; /* Byte 2 Bits 3-5 */
+ unsigned char iso_ver:2; /* Byte 2 Bits 6-7 */
+ unsigned char resp_fmt:4; /* Byte 3 Bits 0-3 */
+ unsigned char:2; /* Byte 3 Bits 4-5 */
+ bool TrmIOP:1; /* Byte 3 Bit 6 */
+ bool AENC:1; /* Byte 3 Bit 7 */
+ unsigned char addl_len; /* Byte 4 */
+ unsigned char:8; /* Byte 5 */
+ unsigned char:8; /* Byte 6 */
+ bool SftRe:1; /* Byte 7 Bit 0 */
+ bool CmdQue:1; /* Byte 7 Bit 1 */
+ bool:1; /* Byte 7 Bit 2 */
+ bool linked:1; /* Byte 7 Bit 3 */
+ bool sync:1; /* Byte 7 Bit 4 */
+ bool WBus16:1; /* Byte 7 Bit 5 */
+ bool WBus32:1; /* Byte 7 Bit 6 */
+ bool RelAdr:1; /* Byte 7 Bit 7 */
+ unsigned char vendor[8]; /* Bytes 8-15 */
+ unsigned char product[16]; /* Bytes 16-31 */
+ unsigned char product_rev[4]; /* Bytes 32-35 */
+};
+
+
+/*
+ Define functions to provide an abstraction for reading and writing the
+ Host Adapter I/O Registers.
+*/
+
+static inline void blogic_busreset(struct blogic_adapter *adapter)
+{
+ union blogic_cntrl_reg cr;
+ cr.all = 0;
+ cr.cr.bus_reset = true;
+ outb(cr.all, adapter->io_addr + BLOGIC_CNTRL_REG);
+}
+
+static inline void blogic_intreset(struct blogic_adapter *adapter)
+{
+ union blogic_cntrl_reg cr;
+ cr.all = 0;
+ cr.cr.int_reset = true;
+ outb(cr.all, adapter->io_addr + BLOGIC_CNTRL_REG);
+}
+
+static inline void blogic_softreset(struct blogic_adapter *adapter)
+{
+ union blogic_cntrl_reg cr;
+ cr.all = 0;
+ cr.cr.soft_reset = true;
+ outb(cr.all, adapter->io_addr + BLOGIC_CNTRL_REG);
+}
+
+static inline void blogic_hardreset(struct blogic_adapter *adapter)
+{
+ union blogic_cntrl_reg cr;
+ cr.all = 0;
+ cr.cr.hard_reset = true;
+ outb(cr.all, adapter->io_addr + BLOGIC_CNTRL_REG);
+}
+
+static inline unsigned char blogic_rdstatus(struct blogic_adapter *adapter)
+{
+ return inb(adapter->io_addr + BLOGIC_STATUS_REG);
+}
+
+static inline void blogic_setcmdparam(struct blogic_adapter *adapter,
+ unsigned char value)
+{
+ outb(value, adapter->io_addr + BLOGIC_CMD_PARM_REG);
+}
+
+static inline unsigned char blogic_rddatain(struct blogic_adapter *adapter)
+{
+ return inb(adapter->io_addr + BLOGIC_DATAIN_REG);
+}
+
+static inline unsigned char blogic_rdint(struct blogic_adapter *adapter)
+{
+ return inb(adapter->io_addr + BLOGIC_INT_REG);
+}
+
+static inline unsigned char blogic_rdgeom(struct blogic_adapter *adapter)
+{
+ return inb(adapter->io_addr + BLOGIC_GEOMETRY_REG);
+}
+
+/*
+ blogic_execmbox issues an Execute Mailbox Command, which
+ notifies the Host Adapter that an entry has been made in an Outgoing
+ Mailbox.
+*/
+
+static inline void blogic_execmbox(struct blogic_adapter *adapter)
+{
+ blogic_setcmdparam(adapter, BLOGIC_EXEC_MBOX_CMD);
+}
+
+/*
+ blogic_delay waits for Seconds to elapse.
+*/
+
+static inline void blogic_delay(int seconds)
+{
+ mdelay(1000 * seconds);
+}
+
+/*
+ virt_to_32bit_virt maps between Kernel Virtual Addresses and
+ 32 bit Kernel Virtual Addresses. This avoids compilation warnings
+ on 64 bit architectures.
+*/
+
+static inline u32 virt_to_32bit_virt(void *virt_addr)
+{
+ return (u32) (unsigned long) virt_addr;
+}
+
+/*
+ blogic_inc_count increments counter by 1, stopping at
+ 65535 rather than wrapping around to 0.
+*/
+
+static inline void blogic_inc_count(unsigned short *count)
+{
+ if (*count < 65535)
+ (*count)++;
+}
+
+/*
+ blogic_addcount increments Byte Counter by Amount.
+*/
+
+static inline void blogic_addcount(struct blogic_byte_count *bytecount,
+ unsigned int amount)
+{
+ bytecount->units += amount;
+ if (bytecount->units > 999999999) {
+ bytecount->units -= 1000000000;
+ bytecount->billions++;
+ }
+}
+
+/*
+ blogic_incszbucket increments the Bucket for Amount.
+*/
+
+static inline void blogic_incszbucket(unsigned int *cmdsz_buckets,
+ unsigned int amount)
+{
+ int index = 0;
+ if (amount < 8 * 1024) {
+ if (amount < 2 * 1024)
+ index = (amount < 1 * 1024 ? 0 : 1);
+ else
+ index = (amount < 4 * 1024 ? 2 : 3);
+ } else if (amount < 128 * 1024) {
+ if (amount < 32 * 1024)
+ index = (amount < 16 * 1024 ? 4 : 5);
+ else
+ index = (amount < 64 * 1024 ? 6 : 7);
+ } else
+ index = (amount < 256 * 1024 ? 8 : 9);
+ cmdsz_buckets[index]++;
+}
+
+/*
+ Define the version number of the FlashPoint Firmware (SCCB Manager).
+*/
+
+#define FLASHPOINT_FW_VER "5.02"
+
+/*
+ Define the possible return values from FlashPoint_HandleInterrupt.
+*/
+
+#define FPOINT_NORMAL_INT 0x00
+#define FPOINT_INTERN_ERR 0xFE
+#define FPOINT_EXT_RESET 0xFF
+
+/*
+ Define prototypes for the forward referenced BusLogic Driver
+ Internal Functions.
+*/
+
+static const char *blogic_drvr_info(struct Scsi_Host *);
+static int blogic_qcmd(struct Scsi_Host *h, struct scsi_cmnd *);
+static int blogic_diskparam(struct scsi_device *, struct block_device *, sector_t, int *);
+static int blogic_slaveconfig(struct scsi_device *);
+static void blogic_qcompleted_ccb(struct blogic_ccb *);
+static irqreturn_t blogic_inthandler(int, void *);
+static int blogic_resetadapter(struct blogic_adapter *, bool hard_reset);
+static void blogic_msg(enum blogic_msglevel, char *, struct blogic_adapter *, ...);
+static int __init blogic_setup(char *);
+
+#endif /* _BUSLOGIC_H */
diff --git a/drivers/scsi/FlashPoint.c b/drivers/scsi/FlashPoint.c
new file mode 100644
index 000000000..5c74e4c52
--- /dev/null
+++ b/drivers/scsi/FlashPoint.c
@@ -0,0 +1,7588 @@
+/*
+
+ FlashPoint.c -- FlashPoint SCCB Manager for Linux
+
+ This file contains the FlashPoint SCCB Manager from BusLogic's FlashPoint
+ Driver Developer's Kit, with minor modifications by Leonard N. Zubkoff for
+ Linux compatibility. It was provided by BusLogic in the form of 16 separate
+ source files, which would have unnecessarily cluttered the scsi directory, so
+ the individual files have been combined into this single file.
+
+ Copyright 1995-1996 by Mylex Corporation. All Rights Reserved
+
+ This file is available under both the GNU General Public License
+ and a BSD-style copyright; see LICENSE.FlashPoint for details.
+
+*/
+
+
+#ifdef CONFIG_SCSI_FLASHPOINT
+
+#define MAX_CARDS 8
+#undef BUSTYPE_PCI
+
+#define CRCMASK 0xA001
+
+#define FAILURE 0xFFFFFFFFL
+
+struct sccb;
+typedef void (*CALL_BK_FN) (struct sccb *);
+
+struct sccb_mgr_info {
+ u32 si_baseaddr;
+ unsigned char si_present;
+ unsigned char si_intvect;
+ unsigned char si_id;
+ unsigned char si_lun;
+ u16 si_fw_revision;
+ u16 si_per_targ_init_sync;
+ u16 si_per_targ_fast_nego;
+ u16 si_per_targ_ultra_nego;
+ u16 si_per_targ_no_disc;
+ u16 si_per_targ_wide_nego;
+ u16 si_flags;
+ unsigned char si_card_family;
+ unsigned char si_bustype;
+ unsigned char si_card_model[3];
+ unsigned char si_relative_cardnum;
+ unsigned char si_reserved[4];
+ u32 si_OS_reserved;
+ unsigned char si_XlatInfo[4];
+ u32 si_reserved2[5];
+ u32 si_secondary_range;
+};
+
+#define SCSI_PARITY_ENA 0x0001
+#define LOW_BYTE_TERM 0x0010
+#define HIGH_BYTE_TERM 0x0020
+#define BUSTYPE_PCI 0x3
+
+#define SUPPORT_16TAR_32LUN 0x0002
+#define SOFT_RESET 0x0004
+#define EXTENDED_TRANSLATION 0x0008
+#define POST_ALL_UNDERRRUNS 0x0040
+#define FLAG_SCAM_ENABLED 0x0080
+#define FLAG_SCAM_LEVEL2 0x0100
+
+#define HARPOON_FAMILY 0x02
+
+/* SCCB struct used for both SCCB and UCB manager compiles!
+ * The UCB Manager treats the SCCB as it's 'native hardware structure'
+ */
+
+/*#pragma pack(1)*/
+struct sccb {
+ unsigned char OperationCode;
+ unsigned char ControlByte;
+ unsigned char CdbLength;
+ unsigned char RequestSenseLength;
+ u32 DataLength;
+ void *DataPointer;
+ unsigned char CcbRes[2];
+ unsigned char HostStatus;
+ unsigned char TargetStatus;
+ unsigned char TargID;
+ unsigned char Lun;
+ unsigned char Cdb[12];
+ unsigned char CcbRes1;
+ unsigned char Reserved1;
+ u32 Reserved2;
+ u32 SensePointer;
+
+ CALL_BK_FN SccbCallback; /* VOID (*SccbCallback)(); */
+ u32 SccbIOPort; /* Identifies board base port */
+ unsigned char SccbStatus;
+ unsigned char SCCBRes2;
+ u16 SccbOSFlags;
+
+ u32 Sccb_XferCnt; /* actual transfer count */
+ u32 Sccb_ATC;
+ u32 SccbVirtDataPtr; /* virtual addr for OS/2 */
+ u32 Sccb_res1;
+ u16 Sccb_MGRFlags;
+ u16 Sccb_sgseg;
+ unsigned char Sccb_scsimsg; /* identify msg for selection */
+ unsigned char Sccb_tag;
+ unsigned char Sccb_scsistat;
+ unsigned char Sccb_idmsg; /* image of last msg in */
+ struct sccb *Sccb_forwardlink;
+ struct sccb *Sccb_backlink;
+ u32 Sccb_savedATC;
+ unsigned char Save_Cdb[6];
+ unsigned char Save_CdbLen;
+ unsigned char Sccb_XferState;
+ u32 Sccb_SGoffset;
+};
+
+#pragma pack()
+
+#define SCATTER_GATHER_COMMAND 0x02
+#define RESIDUAL_COMMAND 0x03
+#define RESIDUAL_SG_COMMAND 0x04
+#define RESET_COMMAND 0x81
+
+#define F_USE_CMD_Q 0x20 /*Inidcates TAGGED command. */
+#define TAG_TYPE_MASK 0xC0 /*Type of tag msg to send. */
+#define SCCB_DATA_XFER_OUT 0x10 /* Write */
+#define SCCB_DATA_XFER_IN 0x08 /* Read */
+
+#define NO_AUTO_REQUEST_SENSE 0x01 /* No Request Sense Buffer */
+
+#define BUS_FREE_ST 0
+#define SELECT_ST 1
+#define SELECT_BDR_ST 2 /* Select w\ Bus Device Reset */
+#define SELECT_SN_ST 3 /* Select w\ Sync Nego */
+#define SELECT_WN_ST 4 /* Select w\ Wide Data Nego */
+#define SELECT_Q_ST 5 /* Select w\ Tagged Q'ing */
+#define COMMAND_ST 6
+#define DATA_OUT_ST 7
+#define DATA_IN_ST 8
+#define DISCONNECT_ST 9
+#define ABORT_ST 11
+
+#define F_HOST_XFER_DIR 0x01
+#define F_ALL_XFERRED 0x02
+#define F_SG_XFER 0x04
+#define F_AUTO_SENSE 0x08
+#define F_ODD_BALL_CNT 0x10
+#define F_NO_DATA_YET 0x80
+
+#define F_STATUSLOADED 0x01
+#define F_DEV_SELECTED 0x04
+
+#define SCCB_COMPLETE 0x00 /* SCCB completed without error */
+#define SCCB_DATA_UNDER_RUN 0x0C
+#define SCCB_SELECTION_TIMEOUT 0x11 /* Set SCSI selection timed out */
+#define SCCB_DATA_OVER_RUN 0x12
+#define SCCB_PHASE_SEQUENCE_FAIL 0x14 /* Target bus phase sequence failure */
+
+#define SCCB_GROSS_FW_ERR 0x27 /* Major problem! */
+#define SCCB_BM_ERR 0x30 /* BusMaster error. */
+#define SCCB_PARITY_ERR 0x34 /* SCSI parity error */
+
+#define SCCB_IN_PROCESS 0x00
+#define SCCB_SUCCESS 0x01
+#define SCCB_ABORT 0x02
+#define SCCB_ERROR 0x04
+
+#define ORION_FW_REV 3110
+
+#define QUEUE_DEPTH 254+1 /*1 for Normal disconnect 32 for Q'ing. */
+
+#define MAX_MB_CARDS 4 /* Max. no of cards suppoerted on Mother Board */
+
+#define MAX_SCSI_TAR 16
+#define MAX_LUN 32
+#define LUN_MASK 0x1f
+
+#define SG_BUF_CNT 16 /*Number of prefetched elements. */
+
+#define SG_ELEMENT_SIZE 8 /*Eight byte per element. */
+
+#define RD_HARPOON(ioport) inb((u32)ioport)
+#define RDW_HARPOON(ioport) inw((u32)ioport)
+#define RD_HARP32(ioport,offset,data) (data = inl((u32)(ioport + offset)))
+#define WR_HARPOON(ioport,val) outb((u8) val, (u32)ioport)
+#define WRW_HARPOON(ioport,val) outw((u16)val, (u32)ioport)
+#define WR_HARP32(ioport,offset,data) outl(data, (u32)(ioport + offset))
+
+#define TAR_SYNC_MASK (BIT(7)+BIT(6))
+#define SYNC_TRYING BIT(6)
+#define SYNC_SUPPORTED (BIT(7)+BIT(6))
+
+#define TAR_WIDE_MASK (BIT(5)+BIT(4))
+#define WIDE_ENABLED BIT(4)
+#define WIDE_NEGOCIATED BIT(5)
+
+#define TAR_TAG_Q_MASK (BIT(3)+BIT(2))
+#define TAG_Q_TRYING BIT(2)
+#define TAG_Q_REJECT BIT(3)
+
+#define TAR_ALLOW_DISC BIT(0)
+
+#define EE_SYNC_MASK (BIT(0)+BIT(1))
+#define EE_SYNC_5MB BIT(0)
+#define EE_SYNC_10MB BIT(1)
+#define EE_SYNC_20MB (BIT(0)+BIT(1))
+
+#define EE_WIDE_SCSI BIT(7)
+
+struct sccb_mgr_tar_info {
+
+ struct sccb *TarSelQ_Head;
+ struct sccb *TarSelQ_Tail;
+ unsigned char TarLUN_CA; /*Contingent Allgiance */
+ unsigned char TarTagQ_Cnt;
+ unsigned char TarSelQ_Cnt;
+ unsigned char TarStatus;
+ unsigned char TarEEValue;
+ unsigned char TarSyncCtrl;
+ unsigned char TarReserved[2]; /* for alignment */
+ unsigned char LunDiscQ_Idx[MAX_LUN];
+ unsigned char TarLUNBusy[MAX_LUN];
+};
+
+struct nvram_info {
+ unsigned char niModel; /* Model No. of card */
+ unsigned char niCardNo; /* Card no. */
+ u32 niBaseAddr; /* Port Address of card */
+ unsigned char niSysConf; /* Adapter Configuration byte -
+ Byte 16 of eeprom map */
+ unsigned char niScsiConf; /* SCSI Configuration byte -
+ Byte 17 of eeprom map */
+ unsigned char niScamConf; /* SCAM Configuration byte -
+ Byte 20 of eeprom map */
+ unsigned char niAdapId; /* Host Adapter ID -
+ Byte 24 of eerpom map */
+ unsigned char niSyncTbl[MAX_SCSI_TAR / 2]; /* Sync/Wide byte
+ of targets */
+ unsigned char niScamTbl[MAX_SCSI_TAR][4]; /* Compressed Scam name
+ string of Targets */
+};
+
+#define MODEL_LT 1
+#define MODEL_DL 2
+#define MODEL_LW 3
+#define MODEL_DW 4
+
+struct sccb_card {
+ struct sccb *currentSCCB;
+ struct sccb_mgr_info *cardInfo;
+
+ u32 ioPort;
+
+ unsigned short cmdCounter;
+ unsigned char discQCount;
+ unsigned char tagQ_Lst;
+ unsigned char cardIndex;
+ unsigned char scanIndex;
+ unsigned char globalFlags;
+ unsigned char ourId;
+ struct nvram_info *pNvRamInfo;
+ struct sccb *discQ_Tbl[QUEUE_DEPTH];
+
+};
+
+#define F_TAG_STARTED 0x01
+#define F_CONLUN_IO 0x02
+#define F_DO_RENEGO 0x04
+#define F_NO_FILTER 0x08
+#define F_GREEN_PC 0x10
+#define F_HOST_XFER_ACT 0x20
+#define F_NEW_SCCB_CMD 0x40
+#define F_UPDATE_EEPROM 0x80
+
+#define ID_STRING_LENGTH 32
+#define TYPE_CODE0 0x63 /*Level2 Mstr (bits 7-6), */
+
+#define SLV_TYPE_CODE0 0xA3 /*Priority Bit set (bits 7-6), */
+
+#define ASSIGN_ID 0x00
+#define SET_P_FLAG 0x01
+#define CFG_CMPLT 0x03
+#define DOM_MSTR 0x0F
+#define SYNC_PTRN 0x1F
+
+#define ID_0_7 0x18
+#define ID_8_F 0x11
+#define MISC_CODE 0x14
+#define CLR_P_FLAG 0x18
+
+#define INIT_SELTD 0x01
+#define LEVEL2_TAR 0x02
+
+enum scam_id_st { ID0, ID1, ID2, ID3, ID4, ID5, ID6, ID7, ID8, ID9, ID10, ID11,
+ ID12,
+ ID13, ID14, ID15, ID_UNUSED, ID_UNASSIGNED, ID_ASSIGNED, LEGACY,
+ CLR_PRIORITY, NO_ID_AVAIL
+};
+
+typedef struct SCCBscam_info {
+
+ unsigned char id_string[ID_STRING_LENGTH];
+ enum scam_id_st state;
+
+} SCCBSCAM_INFO;
+
+#define SCSI_REQUEST_SENSE 0x03
+#define SCSI_READ 0x08
+#define SCSI_WRITE 0x0A
+#define SCSI_START_STOP_UNIT 0x1B
+#define SCSI_READ_EXTENDED 0x28
+#define SCSI_WRITE_EXTENDED 0x2A
+#define SCSI_WRITE_AND_VERIFY 0x2E
+
+#define SSGOOD 0x00
+#define SSCHECK 0x02
+#define SSQ_FULL 0x28
+
+#define SMCMD_COMP 0x00
+#define SMEXT 0x01
+#define SMSAVE_DATA_PTR 0x02
+#define SMREST_DATA_PTR 0x03
+#define SMDISC 0x04
+#define SMABORT 0x06
+#define SMREJECT 0x07
+#define SMNO_OP 0x08
+#define SMPARITY 0x09
+#define SMDEV_RESET 0x0C
+#define SMABORT_TAG 0x0D
+#define SMINIT_RECOVERY 0x0F
+#define SMREL_RECOVERY 0x10
+
+#define SMIDENT 0x80
+#define DISC_PRIV 0x40
+
+#define SMSYNC 0x01
+#define SMWDTR 0x03
+#define SM8BIT 0x00
+#define SM16BIT 0x01
+#define SMIGNORWR 0x23 /* Ignore Wide Residue */
+
+#define SIX_BYTE_CMD 0x06
+#define TWELVE_BYTE_CMD 0x0C
+
+#define ASYNC 0x00
+#define MAX_OFFSET 0x0F /* Maxbyteoffset for Sync Xfers */
+
+#define EEPROM_WD_CNT 256
+
+#define EEPROM_CHECK_SUM 0
+#define FW_SIGNATURE 2
+#define MODEL_NUMB_0 4
+#define MODEL_NUMB_2 6
+#define MODEL_NUMB_4 8
+#define SYSTEM_CONFIG 16
+#define SCSI_CONFIG 17
+#define BIOS_CONFIG 18
+#define SCAM_CONFIG 20
+#define ADAPTER_SCSI_ID 24
+
+#define IGNORE_B_SCAN 32
+#define SEND_START_ENA 34
+#define DEVICE_ENABLE 36
+
+#define SYNC_RATE_TBL 38
+#define SYNC_RATE_TBL01 38
+#define SYNC_RATE_TBL23 40
+#define SYNC_RATE_TBL45 42
+#define SYNC_RATE_TBL67 44
+#define SYNC_RATE_TBL89 46
+#define SYNC_RATE_TBLab 48
+#define SYNC_RATE_TBLcd 50
+#define SYNC_RATE_TBLef 52
+
+#define EE_SCAMBASE 256
+
+#define SCAM_ENABLED BIT(2)
+#define SCAM_LEVEL2 BIT(3)
+
+#define RENEGO_ENA BIT(10)
+#define CONNIO_ENA BIT(11)
+#define GREEN_PC_ENA BIT(12)
+
+#define AUTO_RATE_00 00
+#define AUTO_RATE_05 01
+#define AUTO_RATE_10 02
+#define AUTO_RATE_20 03
+
+#define WIDE_NEGO_BIT BIT(7)
+#define DISC_ENABLE_BIT BIT(6)
+
+#define hp_vendor_id_0 0x00 /* LSB */
+#define ORION_VEND_0 0x4B
+
+#define hp_vendor_id_1 0x01 /* MSB */
+#define ORION_VEND_1 0x10
+
+#define hp_device_id_0 0x02 /* LSB */
+#define ORION_DEV_0 0x30
+
+#define hp_device_id_1 0x03 /* MSB */
+#define ORION_DEV_1 0x81
+
+ /* Sub Vendor ID and Sub Device ID only available in
+ Harpoon Version 2 and higher */
+
+#define hp_sub_device_id_0 0x06 /* LSB */
+
+#define hp_semaphore 0x0C
+#define SCCB_MGR_ACTIVE BIT(0)
+#define TICKLE_ME BIT(1)
+#define SCCB_MGR_PRESENT BIT(3)
+#define BIOS_IN_USE BIT(4)
+
+#define hp_sys_ctrl 0x0F
+
+#define STOP_CLK BIT(0) /*Turn off BusMaster Clock */
+#define DRVR_RST BIT(1) /*Firmware Reset to 80C15 chip */
+#define HALT_MACH BIT(3) /*Halt State Machine */
+#define HARD_ABORT BIT(4) /*Hard Abort */
+
+#define hp_host_blk_cnt 0x13
+
+#define XFER_BLK64 0x06 /* 1 1 0 64 byte per block */
+
+#define BM_THRESHOLD 0x40 /* PCI mode can only xfer 16 bytes */
+
+#define hp_int_mask 0x17
+
+#define INT_CMD_COMPL BIT(0) /* DMA command complete */
+#define INT_EXT_STATUS BIT(1) /* Extended Status Set */
+
+#define hp_xfer_cnt_lo 0x18
+#define hp_xfer_cnt_hi 0x1A
+#define hp_xfer_cmd 0x1B
+
+#define XFER_HOST_DMA 0x00 /* 0 0 0 Transfer Host -> DMA */
+#define XFER_DMA_HOST 0x01 /* 0 0 1 Transfer DMA -> Host */
+
+#define XFER_HOST_AUTO 0x00 /* 0 0 Auto Transfer Size */
+
+#define XFER_DMA_8BIT 0x20 /* 0 1 8 BIT Transfer Size */
+
+#define DISABLE_INT BIT(7) /*Do not interrupt at end of cmd. */
+
+#define HOST_WRT_CMD ((DISABLE_INT + XFER_HOST_DMA + XFER_HOST_AUTO + XFER_DMA_8BIT))
+#define HOST_RD_CMD ((DISABLE_INT + XFER_DMA_HOST + XFER_HOST_AUTO + XFER_DMA_8BIT))
+
+#define hp_host_addr_lo 0x1C
+#define hp_host_addr_hmi 0x1E
+
+#define hp_ee_ctrl 0x22
+
+#define EXT_ARB_ACK BIT(7)
+#define SCSI_TERM_ENA_H BIT(6) /* SCSI high byte terminator */
+#define SEE_MS BIT(5)
+#define SEE_CS BIT(3)
+#define SEE_CLK BIT(2)
+#define SEE_DO BIT(1)
+#define SEE_DI BIT(0)
+
+#define EE_READ 0x06
+#define EE_WRITE 0x05
+#define EWEN 0x04
+#define EWEN_ADDR 0x03C0
+#define EWDS 0x04
+#define EWDS_ADDR 0x0000
+
+#define hp_bm_ctrl 0x26
+
+#define SCSI_TERM_ENA_L BIT(0) /*Enable/Disable external terminators */
+#define FLUSH_XFER_CNTR BIT(1) /*Flush transfer counter */
+#define FORCE1_XFER BIT(5) /*Always xfer one byte in byte mode */
+#define FAST_SINGLE BIT(6) /*?? */
+
+#define BMCTRL_DEFAULT (FORCE1_XFER|FAST_SINGLE|SCSI_TERM_ENA_L)
+
+#define hp_sg_addr 0x28
+#define hp_page_ctrl 0x29
+
+#define SCATTER_EN BIT(0)
+#define SGRAM_ARAM BIT(1)
+#define G_INT_DISABLE BIT(3) /* Enable/Disable all Interrupts */
+#define NARROW_SCSI_CARD BIT(4) /* NARROW/WIDE SCSI config pin */
+
+#define hp_pci_stat_cfg 0x2D
+
+#define REC_MASTER_ABORT BIT(5) /*received Master abort */
+
+#define hp_rev_num 0x33
+
+#define hp_stack_data 0x34
+#define hp_stack_addr 0x35
+
+#define hp_ext_status 0x36
+
+#define BM_FORCE_OFF BIT(0) /*Bus Master is forced to get off */
+#define PCI_TGT_ABORT BIT(0) /*PCI bus master transaction aborted */
+#define PCI_DEV_TMOUT BIT(1) /*PCI Device Time out */
+#define CMD_ABORTED BIT(4) /*Command aborted */
+#define BM_PARITY_ERR BIT(5) /*parity error on data received */
+#define PIO_OVERRUN BIT(6) /*Slave data overrun */
+#define BM_CMD_BUSY BIT(7) /*Bus master transfer command busy */
+#define BAD_EXT_STATUS (BM_FORCE_OFF | PCI_DEV_TMOUT | CMD_ABORTED | \
+ BM_PARITY_ERR | PIO_OVERRUN)
+
+#define hp_int_status 0x37
+
+#define EXT_STATUS_ON BIT(1) /*Extended status is valid */
+#define SCSI_INTERRUPT BIT(2) /*Global indication of a SCSI int. */
+#define INT_ASSERTED BIT(5) /* */
+
+#define hp_fifo_cnt 0x38
+
+#define hp_intena 0x40
+
+#define RESET BIT(7)
+#define PROG_HLT BIT(6)
+#define PARITY BIT(5)
+#define FIFO BIT(4)
+#define SEL BIT(3)
+#define SCAM_SEL BIT(2)
+#define RSEL BIT(1)
+#define TIMEOUT BIT(0)
+#define BUS_FREE BIT(15)
+#define XFER_CNT_0 BIT(14)
+#define PHASE BIT(13)
+#define IUNKWN BIT(12)
+#define ICMD_COMP BIT(11)
+#define ITICKLE BIT(10)
+#define IDO_STRT BIT(9)
+#define ITAR_DISC BIT(8)
+#define AUTO_INT (BIT(12)+BIT(11)+BIT(10)+BIT(9)+BIT(8))
+#define CLR_ALL_INT 0xFFFF
+#define CLR_ALL_INT_1 0xFF00
+
+#define hp_intstat 0x42
+
+#define hp_scsisig 0x44
+
+#define SCSI_SEL BIT(7)
+#define SCSI_BSY BIT(6)
+#define SCSI_REQ BIT(5)
+#define SCSI_ACK BIT(4)
+#define SCSI_ATN BIT(3)
+#define SCSI_CD BIT(2)
+#define SCSI_MSG BIT(1)
+#define SCSI_IOBIT BIT(0)
+
+#define S_SCSI_PHZ (BIT(2)+BIT(1)+BIT(0))
+#define S_MSGO_PH (BIT(2)+BIT(1) )
+#define S_MSGI_PH (BIT(2)+BIT(1)+BIT(0))
+#define S_DATAI_PH ( BIT(0))
+#define S_DATAO_PH 0x00
+#define S_ILL_PH ( BIT(1) )
+
+#define hp_scsictrl_0 0x45
+
+#define SEL_TAR BIT(6)
+#define ENA_ATN BIT(4)
+#define ENA_RESEL BIT(2)
+#define SCSI_RST BIT(1)
+#define ENA_SCAM_SEL BIT(0)
+
+#define hp_portctrl_0 0x46
+
+#define SCSI_PORT BIT(7)
+#define SCSI_INBIT BIT(6)
+#define DMA_PORT BIT(5)
+#define DMA_RD BIT(4)
+#define HOST_PORT BIT(3)
+#define HOST_WRT BIT(2)
+#define SCSI_BUS_EN BIT(1)
+#define START_TO BIT(0)
+
+#define hp_scsireset 0x47
+
+#define SCSI_INI BIT(6)
+#define SCAM_EN BIT(5)
+#define DMA_RESET BIT(3)
+#define HPSCSI_RESET BIT(2)
+#define PROG_RESET BIT(1)
+#define FIFO_CLR BIT(0)
+
+#define hp_xfercnt_0 0x48
+#define hp_xfercnt_2 0x4A
+
+#define hp_fifodata_0 0x4C
+#define hp_addstat 0x4E
+
+#define SCAM_TIMER BIT(7)
+#define SCSI_MODE8 BIT(3)
+#define SCSI_PAR_ERR BIT(0)
+
+#define hp_prgmcnt_0 0x4F
+
+#define hp_selfid_0 0x50
+#define hp_selfid_1 0x51
+#define hp_arb_id 0x52
+
+#define hp_select_id 0x53
+
+#define hp_synctarg_base 0x54
+#define hp_synctarg_12 0x54
+#define hp_synctarg_13 0x55
+#define hp_synctarg_14 0x56
+#define hp_synctarg_15 0x57
+
+#define hp_synctarg_8 0x58
+#define hp_synctarg_9 0x59
+#define hp_synctarg_10 0x5A
+#define hp_synctarg_11 0x5B
+
+#define hp_synctarg_4 0x5C
+#define hp_synctarg_5 0x5D
+#define hp_synctarg_6 0x5E
+#define hp_synctarg_7 0x5F
+
+#define hp_synctarg_0 0x60
+#define hp_synctarg_1 0x61
+#define hp_synctarg_2 0x62
+#define hp_synctarg_3 0x63
+
+#define NARROW_SCSI BIT(4)
+#define DEFAULT_OFFSET 0x0F
+
+#define hp_autostart_0 0x64
+#define hp_autostart_1 0x65
+#define hp_autostart_3 0x67
+
+#define AUTO_IMMED BIT(5)
+#define SELECT BIT(6)
+#define END_DATA (BIT(7)+BIT(6))
+
+#define hp_gp_reg_0 0x68
+#define hp_gp_reg_1 0x69
+#define hp_gp_reg_3 0x6B
+
+#define hp_seltimeout 0x6C
+
+#define TO_4ms 0x67 /* 3.9959ms */
+
+#define TO_5ms 0x03 /* 4.9152ms */
+#define TO_10ms 0x07 /* 11.xxxms */
+#define TO_250ms 0x99 /* 250.68ms */
+#define TO_290ms 0xB1 /* 289.99ms */
+
+#define hp_clkctrl_0 0x6D
+
+#define PWR_DWN BIT(6)
+#define ACTdeassert BIT(4)
+#define CLK_40MHZ (BIT(1) + BIT(0))
+
+#define CLKCTRL_DEFAULT (ACTdeassert | CLK_40MHZ)
+
+#define hp_fiforead 0x6E
+#define hp_fifowrite 0x6F
+
+#define hp_offsetctr 0x70
+#define hp_xferstat 0x71
+
+#define FIFO_EMPTY BIT(6)
+
+#define hp_portctrl_1 0x72
+
+#define CHK_SCSI_P BIT(3)
+#define HOST_MODE8 BIT(0)
+
+#define hp_xfer_pad 0x73
+
+#define ID_UNLOCK BIT(3)
+
+#define hp_scsidata_0 0x74
+#define hp_scsidata_1 0x75
+
+#define hp_aramBase 0x80
+#define BIOS_DATA_OFFSET 0x60
+#define BIOS_RELATIVE_CARD 0x64
+
+#define AR3 (BIT(9) + BIT(8))
+#define SDATA BIT(10)
+
+#define CRD_OP BIT(11) /* Cmp Reg. w/ Data */
+
+#define CRR_OP BIT(12) /* Cmp Reg. w. Reg. */
+
+#define CPE_OP (BIT(14)+BIT(11)) /* Cmp SCSI phs & Branch EQ */
+
+#define CPN_OP (BIT(14)+BIT(12)) /* Cmp SCSI phs & Branch NOT EQ */
+
+#define ADATA_OUT 0x00
+#define ADATA_IN BIT(8)
+#define ACOMMAND BIT(10)
+#define ASTATUS (BIT(10)+BIT(8))
+#define AMSG_OUT (BIT(10)+BIT(9))
+#define AMSG_IN (BIT(10)+BIT(9)+BIT(8))
+
+#define BRH_OP BIT(13) /* Branch */
+
+#define ALWAYS 0x00
+#define EQUAL BIT(8)
+#define NOT_EQ BIT(9)
+
+#define TCB_OP (BIT(13)+BIT(11)) /* Test condition & branch */
+
+#define FIFO_0 BIT(10)
+
+#define MPM_OP BIT(15) /* Match phase and move data */
+
+#define MRR_OP BIT(14) /* Move DReg. to Reg. */
+
+#define S_IDREG (BIT(2)+BIT(1)+BIT(0))
+
+#define D_AR0 0x00
+#define D_AR1 BIT(0)
+#define D_BUCKET (BIT(2) + BIT(1) + BIT(0))
+
+#define RAT_OP (BIT(14)+BIT(13)+BIT(11))
+
+#define SSI_OP (BIT(15)+BIT(11))
+
+#define SSI_ITAR_DISC (ITAR_DISC >> 8)
+#define SSI_IDO_STRT (IDO_STRT >> 8)
+
+#define SSI_ICMD_COMP (ICMD_COMP >> 8)
+#define SSI_ITICKLE (ITICKLE >> 8)
+
+#define SSI_IUNKWN (IUNKWN >> 8)
+#define SSI_INO_CC (IUNKWN >> 8)
+#define SSI_IRFAIL (IUNKWN >> 8)
+
+#define NP 0x10 /*Next Phase */
+#define NTCMD 0x02 /*Non- Tagged Command start */
+#define CMDPZ 0x04 /*Command phase */
+#define DINT 0x12 /*Data Out/In interrupt */
+#define DI 0x13 /*Data Out */
+#define DC 0x19 /*Disconnect Message */
+#define ST 0x1D /*Status Phase */
+#define UNKNWN 0x24 /*Unknown bus action */
+#define CC 0x25 /*Command Completion failure */
+#define TICK 0x26 /*New target reselected us. */
+#define SELCHK 0x28 /*Select & Check SCSI ID latch reg */
+
+#define ID_MSG_STRT hp_aramBase + 0x00
+#define NON_TAG_ID_MSG hp_aramBase + 0x06
+#define CMD_STRT hp_aramBase + 0x08
+#define SYNC_MSGS hp_aramBase + 0x08
+
+#define TAG_STRT 0x00
+#define DISCONNECT_START 0x10/2
+#define END_DATA_START 0x14/2
+#define CMD_ONLY_STRT CMDPZ/2
+#define SELCHK_STRT SELCHK/2
+
+#define GET_XFER_CNT(port, xfercnt) {RD_HARP32(port,hp_xfercnt_0,xfercnt); xfercnt &= 0xFFFFFF;}
+/* #define GET_XFER_CNT(port, xfercnt) (xfercnt = RD_HARPOON(port+hp_xfercnt_2), \
+ xfercnt <<= 16,\
+ xfercnt |= RDW_HARPOON((unsigned short)(port+hp_xfercnt_0)))
+ */
+#define HP_SETUP_ADDR_CNT(port,addr,count) (WRW_HARPOON((port+hp_host_addr_lo), (unsigned short)(addr & 0x0000FFFFL)),\
+ addr >>= 16,\
+ WRW_HARPOON((port+hp_host_addr_hmi), (unsigned short)(addr & 0x0000FFFFL)),\
+ WR_HARP32(port,hp_xfercnt_0,count),\
+ WRW_HARPOON((port+hp_xfer_cnt_lo), (unsigned short)(count & 0x0000FFFFL)),\
+ count >>= 16,\
+ WR_HARPOON(port+hp_xfer_cnt_hi, (count & 0xFF)))
+
+#define ACCEPT_MSG(port) {while(RD_HARPOON(port+hp_scsisig) & SCSI_REQ){}\
+ WR_HARPOON(port+hp_scsisig, S_ILL_PH);}
+
+#define ACCEPT_MSG_ATN(port) {while(RD_HARPOON(port+hp_scsisig) & SCSI_REQ){}\
+ WR_HARPOON(port+hp_scsisig, (S_ILL_PH|SCSI_ATN));}
+
+#define DISABLE_AUTO(port) (WR_HARPOON(port+hp_scsireset, PROG_RESET),\
+ WR_HARPOON(port+hp_scsireset, 0x00))
+
+#define ARAM_ACCESS(p_port) (WR_HARPOON(p_port+hp_page_ctrl, \
+ (RD_HARPOON(p_port+hp_page_ctrl) | SGRAM_ARAM)))
+
+#define SGRAM_ACCESS(p_port) (WR_HARPOON(p_port+hp_page_ctrl, \
+ (RD_HARPOON(p_port+hp_page_ctrl) & ~SGRAM_ARAM)))
+
+#define MDISABLE_INT(p_port) (WR_HARPOON(p_port+hp_page_ctrl, \
+ (RD_HARPOON(p_port+hp_page_ctrl) | G_INT_DISABLE)))
+
+#define MENABLE_INT(p_port) (WR_HARPOON(p_port+hp_page_ctrl, \
+ (RD_HARPOON(p_port+hp_page_ctrl) & ~G_INT_DISABLE)))
+
+static unsigned char FPT_sisyncn(u32 port, unsigned char p_card,
+ unsigned char syncFlag);
+static void FPT_ssel(u32 port, unsigned char p_card);
+static void FPT_sres(u32 port, unsigned char p_card,
+ struct sccb_card *pCurrCard);
+static void FPT_shandem(u32 port, unsigned char p_card,
+ struct sccb *pCurrSCCB);
+static void FPT_stsyncn(u32 port, unsigned char p_card);
+static void FPT_sisyncr(u32 port, unsigned char sync_pulse,
+ unsigned char offset);
+static void FPT_sssyncv(u32 p_port, unsigned char p_id,
+ unsigned char p_sync_value,
+ struct sccb_mgr_tar_info *currTar_Info);
+static void FPT_sresb(u32 port, unsigned char p_card);
+static void FPT_sxfrp(u32 p_port, unsigned char p_card);
+static void FPT_schkdd(u32 port, unsigned char p_card);
+static unsigned char FPT_RdStack(u32 port, unsigned char index);
+static void FPT_WrStack(u32 portBase, unsigned char index,
+ unsigned char data);
+static unsigned char FPT_ChkIfChipInitialized(u32 ioPort);
+
+static void FPT_SendMsg(u32 port, unsigned char message);
+static void FPT_queueFlushTargSccb(unsigned char p_card, unsigned char thisTarg,
+ unsigned char error_code);
+
+static void FPT_sinits(struct sccb *p_sccb, unsigned char p_card);
+static void FPT_RNVRamData(struct nvram_info *pNvRamInfo);
+
+static unsigned char FPT_siwidn(u32 port, unsigned char p_card);
+static void FPT_stwidn(u32 port, unsigned char p_card);
+static void FPT_siwidr(u32 port, unsigned char width);
+
+static void FPT_queueSelectFail(struct sccb_card *pCurrCard,
+ unsigned char p_card);
+static void FPT_queueDisconnect(struct sccb *p_SCCB, unsigned char p_card);
+static void FPT_queueCmdComplete(struct sccb_card *pCurrCard,
+ struct sccb *p_SCCB, unsigned char p_card);
+static void FPT_queueSearchSelect(struct sccb_card *pCurrCard,
+ unsigned char p_card);
+static void FPT_queueFlushSccb(unsigned char p_card, unsigned char error_code);
+static void FPT_queueAddSccb(struct sccb *p_SCCB, unsigned char card);
+static unsigned char FPT_queueFindSccb(struct sccb *p_SCCB,
+ unsigned char p_card);
+static void FPT_utilUpdateResidual(struct sccb *p_SCCB);
+static unsigned short FPT_CalcCrc16(unsigned char buffer[]);
+static unsigned char FPT_CalcLrc(unsigned char buffer[]);
+
+static void FPT_Wait1Second(u32 p_port);
+static void FPT_Wait(u32 p_port, unsigned char p_delay);
+static void FPT_utilEEWriteOnOff(u32 p_port, unsigned char p_mode);
+static void FPT_utilEEWrite(u32 p_port, unsigned short ee_data,
+ unsigned short ee_addr);
+static unsigned short FPT_utilEERead(u32 p_port,
+ unsigned short ee_addr);
+static unsigned short FPT_utilEEReadOrg(u32 p_port,
+ unsigned short ee_addr);
+static void FPT_utilEESendCmdAddr(u32 p_port, unsigned char ee_cmd,
+ unsigned short ee_addr);
+
+static void FPT_phaseDataOut(u32 port, unsigned char p_card);
+static void FPT_phaseDataIn(u32 port, unsigned char p_card);
+static void FPT_phaseCommand(u32 port, unsigned char p_card);
+static void FPT_phaseStatus(u32 port, unsigned char p_card);
+static void FPT_phaseMsgOut(u32 port, unsigned char p_card);
+static void FPT_phaseMsgIn(u32 port, unsigned char p_card);
+static void FPT_phaseIllegal(u32 port, unsigned char p_card);
+
+static void FPT_phaseDecode(u32 port, unsigned char p_card);
+static void FPT_phaseChkFifo(u32 port, unsigned char p_card);
+static void FPT_phaseBusFree(u32 p_port, unsigned char p_card);
+
+static void FPT_XbowInit(u32 port, unsigned char scamFlg);
+static void FPT_BusMasterInit(u32 p_port);
+static void FPT_DiagEEPROM(u32 p_port);
+
+static void FPT_dataXferProcessor(u32 port,
+ struct sccb_card *pCurrCard);
+static void FPT_busMstrSGDataXferStart(u32 port,
+ struct sccb *pCurrSCCB);
+static void FPT_busMstrDataXferStart(u32 port,
+ struct sccb *pCurrSCCB);
+static void FPT_hostDataXferAbort(u32 port, unsigned char p_card,
+ struct sccb *pCurrSCCB);
+static void FPT_hostDataXferRestart(struct sccb *currSCCB);
+
+static unsigned char FPT_SccbMgr_bad_isr(u32 p_port,
+ unsigned char p_card,
+ struct sccb_card *pCurrCard,
+ unsigned short p_int);
+
+static void FPT_SccbMgrTableInitAll(void);
+static void FPT_SccbMgrTableInitCard(struct sccb_card *pCurrCard,
+ unsigned char p_card);
+static void FPT_SccbMgrTableInitTarget(unsigned char p_card,
+ unsigned char target);
+
+static void FPT_scini(unsigned char p_card, unsigned char p_our_id,
+ unsigned char p_power_up);
+
+static int FPT_scarb(u32 p_port, unsigned char p_sel_type);
+static void FPT_scbusf(u32 p_port);
+static void FPT_scsel(u32 p_port);
+static void FPT_scasid(unsigned char p_card, u32 p_port);
+static unsigned char FPT_scxferc(u32 p_port, unsigned char p_data);
+static unsigned char FPT_scsendi(u32 p_port,
+ unsigned char p_id_string[]);
+static unsigned char FPT_sciso(u32 p_port,
+ unsigned char p_id_string[]);
+static void FPT_scwirod(u32 p_port, unsigned char p_data_bit);
+static void FPT_scwiros(u32 p_port, unsigned char p_data_bit);
+static unsigned char FPT_scvalq(unsigned char p_quintet);
+static unsigned char FPT_scsell(u32 p_port, unsigned char targ_id);
+static void FPT_scwtsel(u32 p_port);
+static void FPT_inisci(unsigned char p_card, u32 p_port,
+ unsigned char p_our_id);
+static void FPT_scsavdi(unsigned char p_card, u32 p_port);
+static unsigned char FPT_scmachid(unsigned char p_card,
+ unsigned char p_id_string[]);
+
+static void FPT_autoCmdCmplt(u32 p_port, unsigned char p_card);
+static void FPT_autoLoadDefaultMap(u32 p_port);
+
+static struct sccb_mgr_tar_info FPT_sccbMgrTbl[MAX_CARDS][MAX_SCSI_TAR] =
+ { {{0}} };
+static struct sccb_card FPT_BL_Card[MAX_CARDS] = { {0} };
+static SCCBSCAM_INFO FPT_scamInfo[MAX_SCSI_TAR] = { {{0}} };
+static struct nvram_info FPT_nvRamInfo[MAX_MB_CARDS] = { {0} };
+
+static unsigned char FPT_mbCards = 0;
+static unsigned char FPT_scamHAString[] =
+ { 0x63, 0x07, 'B', 'U', 'S', 'L', 'O', 'G', 'I', 'C',
+ ' ', 'B', 'T', '-', '9', '3', '0',
+ 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
+ 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20
+};
+
+static unsigned short FPT_default_intena = 0;
+
+static void (*FPT_s_PhaseTbl[8]) (u32, unsigned char) = {
+0};
+
+/*---------------------------------------------------------------------
+ *
+ * Function: FlashPoint_ProbeHostAdapter
+ *
+ * Description: Setup and/or Search for cards and return info to caller.
+ *
+ *---------------------------------------------------------------------*/
+
+static int FlashPoint_ProbeHostAdapter(struct sccb_mgr_info *pCardInfo)
+{
+ static unsigned char first_time = 1;
+
+ unsigned char i, j, id, ScamFlg;
+ unsigned short temp, temp2, temp3, temp4, temp5, temp6;
+ u32 ioport;
+ struct nvram_info *pCurrNvRam;
+
+ ioport = pCardInfo->si_baseaddr;
+
+ if (RD_HARPOON(ioport + hp_vendor_id_0) != ORION_VEND_0)
+ return (int)FAILURE;
+
+ if ((RD_HARPOON(ioport + hp_vendor_id_1) != ORION_VEND_1))
+ return (int)FAILURE;
+
+ if ((RD_HARPOON(ioport + hp_device_id_0) != ORION_DEV_0))
+ return (int)FAILURE;
+
+ if ((RD_HARPOON(ioport + hp_device_id_1) != ORION_DEV_1))
+ return (int)FAILURE;
+
+ if (RD_HARPOON(ioport + hp_rev_num) != 0x0f) {
+
+/* For new Harpoon then check for sub_device ID LSB
+ the bits(0-3) must be all ZERO for compatible with
+ current version of SCCBMgr, else skip this Harpoon
+ device. */
+
+ if (RD_HARPOON(ioport + hp_sub_device_id_0) & 0x0f)
+ return (int)FAILURE;
+ }
+
+ if (first_time) {
+ FPT_SccbMgrTableInitAll();
+ first_time = 0;
+ FPT_mbCards = 0;
+ }
+
+ if (FPT_RdStack(ioport, 0) != 0x00) {
+ if (FPT_ChkIfChipInitialized(ioport) == 0) {
+ pCurrNvRam = NULL;
+ WR_HARPOON(ioport + hp_semaphore, 0x00);
+ FPT_XbowInit(ioport, 0); /*Must Init the SCSI before attempting */
+ FPT_DiagEEPROM(ioport);
+ } else {
+ if (FPT_mbCards < MAX_MB_CARDS) {
+ pCurrNvRam = &FPT_nvRamInfo[FPT_mbCards];
+ FPT_mbCards++;
+ pCurrNvRam->niBaseAddr = ioport;
+ FPT_RNVRamData(pCurrNvRam);
+ } else
+ return (int)FAILURE;
+ }
+ } else
+ pCurrNvRam = NULL;
+
+ WR_HARPOON(ioport + hp_clkctrl_0, CLKCTRL_DEFAULT);
+ WR_HARPOON(ioport + hp_sys_ctrl, 0x00);
+
+ if (pCurrNvRam)
+ pCardInfo->si_id = pCurrNvRam->niAdapId;
+ else
+ pCardInfo->si_id =
+ (unsigned
+ char)(FPT_utilEERead(ioport,
+ (ADAPTER_SCSI_ID /
+ 2)) & (unsigned char)0x0FF);
+
+ pCardInfo->si_lun = 0x00;
+ pCardInfo->si_fw_revision = ORION_FW_REV;
+ temp2 = 0x0000;
+ temp3 = 0x0000;
+ temp4 = 0x0000;
+ temp5 = 0x0000;
+ temp6 = 0x0000;
+
+ for (id = 0; id < (16 / 2); id++) {
+
+ if (pCurrNvRam) {
+ temp = (unsigned short)pCurrNvRam->niSyncTbl[id];
+ temp = ((temp & 0x03) + ((temp << 4) & 0xc0)) +
+ (((temp << 4) & 0x0300) + ((temp << 8) & 0xc000));
+ } else
+ temp =
+ FPT_utilEERead(ioport,
+ (unsigned short)((SYNC_RATE_TBL / 2)
+ + id));
+
+ for (i = 0; i < 2; temp >>= 8, i++) {
+
+ temp2 >>= 1;
+ temp3 >>= 1;
+ temp4 >>= 1;
+ temp5 >>= 1;
+ temp6 >>= 1;
+ switch (temp & 0x3) {
+ case AUTO_RATE_20: /* Synchronous, 20 mega-transfers/second */
+ temp6 |= 0x8000; /* Fall through */
+ case AUTO_RATE_10: /* Synchronous, 10 mega-transfers/second */
+ temp5 |= 0x8000; /* Fall through */
+ case AUTO_RATE_05: /* Synchronous, 5 mega-transfers/second */
+ temp2 |= 0x8000; /* Fall through */
+ case AUTO_RATE_00: /* Asynchronous */
+ break;
+ }
+
+ if (temp & DISC_ENABLE_BIT)
+ temp3 |= 0x8000;
+
+ if (temp & WIDE_NEGO_BIT)
+ temp4 |= 0x8000;
+
+ }
+ }
+
+ pCardInfo->si_per_targ_init_sync = temp2;
+ pCardInfo->si_per_targ_no_disc = temp3;
+ pCardInfo->si_per_targ_wide_nego = temp4;
+ pCardInfo->si_per_targ_fast_nego = temp5;
+ pCardInfo->si_per_targ_ultra_nego = temp6;
+
+ if (pCurrNvRam)
+ i = pCurrNvRam->niSysConf;
+ else
+ i = (unsigned
+ char)(FPT_utilEERead(ioport, (SYSTEM_CONFIG / 2)));
+
+ if (pCurrNvRam)
+ ScamFlg = pCurrNvRam->niScamConf;
+ else
+ ScamFlg =
+ (unsigned char)FPT_utilEERead(ioport, SCAM_CONFIG / 2);
+
+ pCardInfo->si_flags = 0x0000;
+
+ if (i & 0x01)
+ pCardInfo->si_flags |= SCSI_PARITY_ENA;
+
+ if (!(i & 0x02))
+ pCardInfo->si_flags |= SOFT_RESET;
+
+ if (i & 0x10)
+ pCardInfo->si_flags |= EXTENDED_TRANSLATION;
+
+ if (ScamFlg & SCAM_ENABLED)
+ pCardInfo->si_flags |= FLAG_SCAM_ENABLED;
+
+ if (ScamFlg & SCAM_LEVEL2)
+ pCardInfo->si_flags |= FLAG_SCAM_LEVEL2;
+
+ j = (RD_HARPOON(ioport + hp_bm_ctrl) & ~SCSI_TERM_ENA_L);
+ if (i & 0x04) {
+ j |= SCSI_TERM_ENA_L;
+ }
+ WR_HARPOON(ioport + hp_bm_ctrl, j);
+
+ j = (RD_HARPOON(ioport + hp_ee_ctrl) & ~SCSI_TERM_ENA_H);
+ if (i & 0x08) {
+ j |= SCSI_TERM_ENA_H;
+ }
+ WR_HARPOON(ioport + hp_ee_ctrl, j);
+
+ if (!(RD_HARPOON(ioport + hp_page_ctrl) & NARROW_SCSI_CARD))
+
+ pCardInfo->si_flags |= SUPPORT_16TAR_32LUN;
+
+ pCardInfo->si_card_family = HARPOON_FAMILY;
+ pCardInfo->si_bustype = BUSTYPE_PCI;
+
+ if (pCurrNvRam) {
+ pCardInfo->si_card_model[0] = '9';
+ switch (pCurrNvRam->niModel & 0x0f) {
+ case MODEL_LT:
+ pCardInfo->si_card_model[1] = '3';
+ pCardInfo->si_card_model[2] = '0';
+ break;
+ case MODEL_LW:
+ pCardInfo->si_card_model[1] = '5';
+ pCardInfo->si_card_model[2] = '0';
+ break;
+ case MODEL_DL:
+ pCardInfo->si_card_model[1] = '3';
+ pCardInfo->si_card_model[2] = '2';
+ break;
+ case MODEL_DW:
+ pCardInfo->si_card_model[1] = '5';
+ pCardInfo->si_card_model[2] = '2';
+ break;
+ }
+ } else {
+ temp = FPT_utilEERead(ioport, (MODEL_NUMB_0 / 2));
+ pCardInfo->si_card_model[0] = (unsigned char)(temp >> 8);
+ temp = FPT_utilEERead(ioport, (MODEL_NUMB_2 / 2));
+
+ pCardInfo->si_card_model[1] = (unsigned char)(temp & 0x00FF);
+ pCardInfo->si_card_model[2] = (unsigned char)(temp >> 8);
+ }
+
+ if (pCardInfo->si_card_model[1] == '3') {
+ if (RD_HARPOON(ioport + hp_ee_ctrl) & BIT(7))
+ pCardInfo->si_flags |= LOW_BYTE_TERM;
+ } else if (pCardInfo->si_card_model[2] == '0') {
+ temp = RD_HARPOON(ioport + hp_xfer_pad);
+ WR_HARPOON(ioport + hp_xfer_pad, (temp & ~BIT(4)));
+ if (RD_HARPOON(ioport + hp_ee_ctrl) & BIT(7))
+ pCardInfo->si_flags |= LOW_BYTE_TERM;
+ WR_HARPOON(ioport + hp_xfer_pad, (temp | BIT(4)));
+ if (RD_HARPOON(ioport + hp_ee_ctrl) & BIT(7))
+ pCardInfo->si_flags |= HIGH_BYTE_TERM;
+ WR_HARPOON(ioport + hp_xfer_pad, temp);
+ } else {
+ temp = RD_HARPOON(ioport + hp_ee_ctrl);
+ temp2 = RD_HARPOON(ioport + hp_xfer_pad);
+ WR_HARPOON(ioport + hp_ee_ctrl, (temp | SEE_CS));
+ WR_HARPOON(ioport + hp_xfer_pad, (temp2 | BIT(4)));
+ temp3 = 0;
+ for (i = 0; i < 8; i++) {
+ temp3 <<= 1;
+ if (!(RD_HARPOON(ioport + hp_ee_ctrl) & BIT(7)))
+ temp3 |= 1;
+ WR_HARPOON(ioport + hp_xfer_pad, (temp2 & ~BIT(4)));
+ WR_HARPOON(ioport + hp_xfer_pad, (temp2 | BIT(4)));
+ }
+ WR_HARPOON(ioport + hp_ee_ctrl, temp);
+ WR_HARPOON(ioport + hp_xfer_pad, temp2);
+ if (!(temp3 & BIT(7)))
+ pCardInfo->si_flags |= LOW_BYTE_TERM;
+ if (!(temp3 & BIT(6)))
+ pCardInfo->si_flags |= HIGH_BYTE_TERM;
+ }
+
+ ARAM_ACCESS(ioport);
+
+ for (i = 0; i < 4; i++) {
+
+ pCardInfo->si_XlatInfo[i] =
+ RD_HARPOON(ioport + hp_aramBase + BIOS_DATA_OFFSET + i);
+ }
+
+ /* return with -1 if no sort, else return with
+ logical card number sorted by BIOS (zero-based) */
+
+ pCardInfo->si_relative_cardnum =
+ (unsigned
+ char)(RD_HARPOON(ioport + hp_aramBase + BIOS_RELATIVE_CARD) - 1);
+
+ SGRAM_ACCESS(ioport);
+
+ FPT_s_PhaseTbl[0] = FPT_phaseDataOut;
+ FPT_s_PhaseTbl[1] = FPT_phaseDataIn;
+ FPT_s_PhaseTbl[2] = FPT_phaseIllegal;
+ FPT_s_PhaseTbl[3] = FPT_phaseIllegal;
+ FPT_s_PhaseTbl[4] = FPT_phaseCommand;
+ FPT_s_PhaseTbl[5] = FPT_phaseStatus;
+ FPT_s_PhaseTbl[6] = FPT_phaseMsgOut;
+ FPT_s_PhaseTbl[7] = FPT_phaseMsgIn;
+
+ pCardInfo->si_present = 0x01;
+
+ return 0;
+}
+
+/*---------------------------------------------------------------------
+ *
+ * Function: FlashPoint_HardwareResetHostAdapter
+ *
+ * Description: Setup adapter for normal operation (hard reset).
+ *
+ *---------------------------------------------------------------------*/
+
+static void *FlashPoint_HardwareResetHostAdapter(struct sccb_mgr_info
+ *pCardInfo)
+{
+ struct sccb_card *CurrCard = NULL;
+ struct nvram_info *pCurrNvRam;
+ unsigned char i, j, thisCard, ScamFlg;
+ unsigned short temp, sync_bit_map, id;
+ u32 ioport;
+
+ ioport = pCardInfo->si_baseaddr;
+
+ for (thisCard = 0; thisCard <= MAX_CARDS; thisCard++) {
+
+ if (thisCard == MAX_CARDS)
+ return (void *)FAILURE;
+
+ if (FPT_BL_Card[thisCard].ioPort == ioport) {
+
+ CurrCard = &FPT_BL_Card[thisCard];
+ FPT_SccbMgrTableInitCard(CurrCard, thisCard);
+ break;
+ }
+
+ else if (FPT_BL_Card[thisCard].ioPort == 0x00) {
+
+ FPT_BL_Card[thisCard].ioPort = ioport;
+ CurrCard = &FPT_BL_Card[thisCard];
+
+ if (FPT_mbCards)
+ for (i = 0; i < FPT_mbCards; i++) {
+ if (CurrCard->ioPort ==
+ FPT_nvRamInfo[i].niBaseAddr)
+ CurrCard->pNvRamInfo =
+ &FPT_nvRamInfo[i];
+ }
+ FPT_SccbMgrTableInitCard(CurrCard, thisCard);
+ CurrCard->cardIndex = thisCard;
+ CurrCard->cardInfo = pCardInfo;
+
+ break;
+ }
+ }
+
+ pCurrNvRam = CurrCard->pNvRamInfo;
+
+ if (pCurrNvRam) {
+ ScamFlg = pCurrNvRam->niScamConf;
+ } else {
+ ScamFlg =
+ (unsigned char)FPT_utilEERead(ioport, SCAM_CONFIG / 2);
+ }
+
+ FPT_BusMasterInit(ioport);
+ FPT_XbowInit(ioport, ScamFlg);
+
+ FPT_autoLoadDefaultMap(ioport);
+
+ for (i = 0, id = 0x01; i != pCardInfo->si_id; i++, id <<= 1) {
+ }
+
+ WR_HARPOON(ioport + hp_selfid_0, id);
+ WR_HARPOON(ioport + hp_selfid_1, 0x00);
+ WR_HARPOON(ioport + hp_arb_id, pCardInfo->si_id);
+ CurrCard->ourId = pCardInfo->si_id;
+
+ i = (unsigned char)pCardInfo->si_flags;
+ if (i & SCSI_PARITY_ENA)
+ WR_HARPOON(ioport + hp_portctrl_1, (HOST_MODE8 | CHK_SCSI_P));
+
+ j = (RD_HARPOON(ioport + hp_bm_ctrl) & ~SCSI_TERM_ENA_L);
+ if (i & LOW_BYTE_TERM)
+ j |= SCSI_TERM_ENA_L;
+ WR_HARPOON(ioport + hp_bm_ctrl, j);
+
+ j = (RD_HARPOON(ioport + hp_ee_ctrl) & ~SCSI_TERM_ENA_H);
+ if (i & HIGH_BYTE_TERM)
+ j |= SCSI_TERM_ENA_H;
+ WR_HARPOON(ioport + hp_ee_ctrl, j);
+
+ if (!(pCardInfo->si_flags & SOFT_RESET)) {
+
+ FPT_sresb(ioport, thisCard);
+
+ FPT_scini(thisCard, pCardInfo->si_id, 0);
+ }
+
+ if (pCardInfo->si_flags & POST_ALL_UNDERRRUNS)
+ CurrCard->globalFlags |= F_NO_FILTER;
+
+ if (pCurrNvRam) {
+ if (pCurrNvRam->niSysConf & 0x10)
+ CurrCard->globalFlags |= F_GREEN_PC;
+ } else {
+ if (FPT_utilEERead(ioport, (SYSTEM_CONFIG / 2)) & GREEN_PC_ENA)
+ CurrCard->globalFlags |= F_GREEN_PC;
+ }
+
+ /* Set global flag to indicate Re-Negotiation to be done on all
+ ckeck condition */
+ if (pCurrNvRam) {
+ if (pCurrNvRam->niScsiConf & 0x04)
+ CurrCard->globalFlags |= F_DO_RENEGO;
+ } else {
+ if (FPT_utilEERead(ioport, (SCSI_CONFIG / 2)) & RENEGO_ENA)
+ CurrCard->globalFlags |= F_DO_RENEGO;
+ }
+
+ if (pCurrNvRam) {
+ if (pCurrNvRam->niScsiConf & 0x08)
+ CurrCard->globalFlags |= F_CONLUN_IO;
+ } else {
+ if (FPT_utilEERead(ioport, (SCSI_CONFIG / 2)) & CONNIO_ENA)
+ CurrCard->globalFlags |= F_CONLUN_IO;
+ }
+
+ temp = pCardInfo->si_per_targ_no_disc;
+
+ for (i = 0, id = 1; i < MAX_SCSI_TAR; i++, id <<= 1) {
+
+ if (temp & id)
+ FPT_sccbMgrTbl[thisCard][i].TarStatus |= TAR_ALLOW_DISC;
+ }
+
+ sync_bit_map = 0x0001;
+
+ for (id = 0; id < (MAX_SCSI_TAR / 2); id++) {
+
+ if (pCurrNvRam) {
+ temp = (unsigned short)pCurrNvRam->niSyncTbl[id];
+ temp = ((temp & 0x03) + ((temp << 4) & 0xc0)) +
+ (((temp << 4) & 0x0300) + ((temp << 8) & 0xc000));
+ } else
+ temp =
+ FPT_utilEERead(ioport,
+ (unsigned short)((SYNC_RATE_TBL / 2)
+ + id));
+
+ for (i = 0; i < 2; temp >>= 8, i++) {
+
+ if (pCardInfo->si_per_targ_init_sync & sync_bit_map) {
+
+ FPT_sccbMgrTbl[thisCard][id * 2 +
+ i].TarEEValue =
+ (unsigned char)temp;
+ }
+
+ else {
+ FPT_sccbMgrTbl[thisCard][id * 2 +
+ i].TarStatus |=
+ SYNC_SUPPORTED;
+ FPT_sccbMgrTbl[thisCard][id * 2 +
+ i].TarEEValue =
+ (unsigned char)(temp & ~EE_SYNC_MASK);
+ }
+
+/* if ((pCardInfo->si_per_targ_wide_nego & sync_bit_map) ||
+ (id*2+i >= 8)){
+*/
+ if (pCardInfo->si_per_targ_wide_nego & sync_bit_map) {
+
+ FPT_sccbMgrTbl[thisCard][id * 2 +
+ i].TarEEValue |=
+ EE_WIDE_SCSI;
+
+ }
+
+ else { /* NARROW SCSI */
+ FPT_sccbMgrTbl[thisCard][id * 2 +
+ i].TarStatus |=
+ WIDE_NEGOCIATED;
+ }
+
+ sync_bit_map <<= 1;
+
+ }
+ }
+
+ WR_HARPOON((ioport + hp_semaphore),
+ (unsigned char)(RD_HARPOON((ioport + hp_semaphore)) |
+ SCCB_MGR_PRESENT));
+
+ return (void *)CurrCard;
+}
+
+static void FlashPoint_ReleaseHostAdapter(void *pCurrCard)
+{
+ unsigned char i;
+ u32 portBase;
+ u32 regOffset;
+ u32 scamData;
+ u32 *pScamTbl;
+ struct nvram_info *pCurrNvRam;
+
+ pCurrNvRam = ((struct sccb_card *)pCurrCard)->pNvRamInfo;
+
+ if (pCurrNvRam) {
+ FPT_WrStack(pCurrNvRam->niBaseAddr, 0, pCurrNvRam->niModel);
+ FPT_WrStack(pCurrNvRam->niBaseAddr, 1, pCurrNvRam->niSysConf);
+ FPT_WrStack(pCurrNvRam->niBaseAddr, 2, pCurrNvRam->niScsiConf);
+ FPT_WrStack(pCurrNvRam->niBaseAddr, 3, pCurrNvRam->niScamConf);
+ FPT_WrStack(pCurrNvRam->niBaseAddr, 4, pCurrNvRam->niAdapId);
+
+ for (i = 0; i < MAX_SCSI_TAR / 2; i++)
+ FPT_WrStack(pCurrNvRam->niBaseAddr,
+ (unsigned char)(i + 5),
+ pCurrNvRam->niSyncTbl[i]);
+
+ portBase = pCurrNvRam->niBaseAddr;
+
+ for (i = 0; i < MAX_SCSI_TAR; i++) {
+ regOffset = hp_aramBase + 64 + i * 4;
+ pScamTbl = (u32 *)&pCurrNvRam->niScamTbl[i];
+ scamData = *pScamTbl;
+ WR_HARP32(portBase, regOffset, scamData);
+ }
+
+ } else {
+ FPT_WrStack(((struct sccb_card *)pCurrCard)->ioPort, 0, 0);
+ }
+}
+
+static void FPT_RNVRamData(struct nvram_info *pNvRamInfo)
+{
+ unsigned char i;
+ u32 portBase;
+ u32 regOffset;
+ u32 scamData;
+ u32 *pScamTbl;
+
+ pNvRamInfo->niModel = FPT_RdStack(pNvRamInfo->niBaseAddr, 0);
+ pNvRamInfo->niSysConf = FPT_RdStack(pNvRamInfo->niBaseAddr, 1);
+ pNvRamInfo->niScsiConf = FPT_RdStack(pNvRamInfo->niBaseAddr, 2);
+ pNvRamInfo->niScamConf = FPT_RdStack(pNvRamInfo->niBaseAddr, 3);
+ pNvRamInfo->niAdapId = FPT_RdStack(pNvRamInfo->niBaseAddr, 4);
+
+ for (i = 0; i < MAX_SCSI_TAR / 2; i++)
+ pNvRamInfo->niSyncTbl[i] =
+ FPT_RdStack(pNvRamInfo->niBaseAddr, (unsigned char)(i + 5));
+
+ portBase = pNvRamInfo->niBaseAddr;
+
+ for (i = 0; i < MAX_SCSI_TAR; i++) {
+ regOffset = hp_aramBase + 64 + i * 4;
+ RD_HARP32(portBase, regOffset, scamData);
+ pScamTbl = (u32 *)&pNvRamInfo->niScamTbl[i];
+ *pScamTbl = scamData;
+ }
+
+}
+
+static unsigned char FPT_RdStack(u32 portBase, unsigned char index)
+{
+ WR_HARPOON(portBase + hp_stack_addr, index);
+ return RD_HARPOON(portBase + hp_stack_data);
+}
+
+static void FPT_WrStack(u32 portBase, unsigned char index, unsigned char data)
+{
+ WR_HARPOON(portBase + hp_stack_addr, index);
+ WR_HARPOON(portBase + hp_stack_data, data);
+}
+
+static unsigned char FPT_ChkIfChipInitialized(u32 ioPort)
+{
+ if ((RD_HARPOON(ioPort + hp_arb_id) & 0x0f) != FPT_RdStack(ioPort, 4))
+ return 0;
+ if ((RD_HARPOON(ioPort + hp_clkctrl_0) & CLKCTRL_DEFAULT)
+ != CLKCTRL_DEFAULT)
+ return 0;
+ if ((RD_HARPOON(ioPort + hp_seltimeout) == TO_250ms) ||
+ (RD_HARPOON(ioPort + hp_seltimeout) == TO_290ms))
+ return 1;
+ return 0;
+
+}
+
+/*---------------------------------------------------------------------
+ *
+ * Function: FlashPoint_StartCCB
+ *
+ * Description: Start a command pointed to by p_Sccb. When the
+ * command is completed it will be returned via the
+ * callback function.
+ *
+ *---------------------------------------------------------------------*/
+static void FlashPoint_StartCCB(void *curr_card, struct sccb *p_Sccb)
+{
+ u32 ioport;
+ unsigned char thisCard, lun;
+ struct sccb *pSaveSccb;
+ CALL_BK_FN callback;
+ struct sccb_card *pCurrCard = curr_card;
+
+ thisCard = pCurrCard->cardIndex;
+ ioport = pCurrCard->ioPort;
+
+ if ((p_Sccb->TargID >= MAX_SCSI_TAR) || (p_Sccb->Lun >= MAX_LUN)) {
+
+ p_Sccb->HostStatus = SCCB_COMPLETE;
+ p_Sccb->SccbStatus = SCCB_ERROR;
+ callback = (CALL_BK_FN) p_Sccb->SccbCallback;
+ if (callback)
+ callback(p_Sccb);
+
+ return;
+ }
+
+ FPT_sinits(p_Sccb, thisCard);
+
+ if (!pCurrCard->cmdCounter) {
+ WR_HARPOON(ioport + hp_semaphore,
+ (RD_HARPOON(ioport + hp_semaphore)
+ | SCCB_MGR_ACTIVE));
+
+ if (pCurrCard->globalFlags & F_GREEN_PC) {
+ WR_HARPOON(ioport + hp_clkctrl_0, CLKCTRL_DEFAULT);
+ WR_HARPOON(ioport + hp_sys_ctrl, 0x00);
+ }
+ }
+
+ pCurrCard->cmdCounter++;
+
+ if (RD_HARPOON(ioport + hp_semaphore) & BIOS_IN_USE) {
+
+ WR_HARPOON(ioport + hp_semaphore,
+ (RD_HARPOON(ioport + hp_semaphore)
+ | TICKLE_ME));
+ if (p_Sccb->OperationCode == RESET_COMMAND) {
+ pSaveSccb =
+ pCurrCard->currentSCCB;
+ pCurrCard->currentSCCB = p_Sccb;
+ FPT_queueSelectFail(&FPT_BL_Card[thisCard], thisCard);
+ pCurrCard->currentSCCB =
+ pSaveSccb;
+ } else {
+ FPT_queueAddSccb(p_Sccb, thisCard);
+ }
+ }
+
+ else if ((RD_HARPOON(ioport + hp_page_ctrl) & G_INT_DISABLE)) {
+
+ if (p_Sccb->OperationCode == RESET_COMMAND) {
+ pSaveSccb =
+ pCurrCard->currentSCCB;
+ pCurrCard->currentSCCB = p_Sccb;
+ FPT_queueSelectFail(&FPT_BL_Card[thisCard], thisCard);
+ pCurrCard->currentSCCB =
+ pSaveSccb;
+ } else {
+ FPT_queueAddSccb(p_Sccb, thisCard);
+ }
+ }
+
+ else {
+
+ MDISABLE_INT(ioport);
+
+ if ((pCurrCard->globalFlags & F_CONLUN_IO) &&
+ ((FPT_sccbMgrTbl[thisCard][p_Sccb->TargID].
+ TarStatus & TAR_TAG_Q_MASK) != TAG_Q_TRYING))
+ lun = p_Sccb->Lun;
+ else
+ lun = 0;
+ if ((pCurrCard->currentSCCB == NULL) &&
+ (FPT_sccbMgrTbl[thisCard][p_Sccb->TargID].TarSelQ_Cnt == 0)
+ && (FPT_sccbMgrTbl[thisCard][p_Sccb->TargID].TarLUNBusy[lun]
+ == 0)) {
+
+ pCurrCard->currentSCCB = p_Sccb;
+ FPT_ssel(p_Sccb->SccbIOPort, thisCard);
+ }
+
+ else {
+
+ if (p_Sccb->OperationCode == RESET_COMMAND) {
+ pSaveSccb = pCurrCard->currentSCCB;
+ pCurrCard->currentSCCB = p_Sccb;
+ FPT_queueSelectFail(&FPT_BL_Card[thisCard],
+ thisCard);
+ pCurrCard->currentSCCB = pSaveSccb;
+ } else {
+ FPT_queueAddSccb(p_Sccb, thisCard);
+ }
+ }
+
+ MENABLE_INT(ioport);
+ }
+
+}
+
+/*---------------------------------------------------------------------
+ *
+ * Function: FlashPoint_AbortCCB
+ *
+ * Description: Abort the command pointed to by p_Sccb. When the
+ * command is completed it will be returned via the
+ * callback function.
+ *
+ *---------------------------------------------------------------------*/
+static int FlashPoint_AbortCCB(void *pCurrCard, struct sccb *p_Sccb)
+{
+ u32 ioport;
+
+ unsigned char thisCard;
+ CALL_BK_FN callback;
+ unsigned char TID;
+ struct sccb *pSaveSCCB;
+ struct sccb_mgr_tar_info *currTar_Info;
+
+ ioport = ((struct sccb_card *)pCurrCard)->ioPort;
+
+ thisCard = ((struct sccb_card *)pCurrCard)->cardIndex;
+
+ if (!(RD_HARPOON(ioport + hp_page_ctrl) & G_INT_DISABLE)) {
+
+ if (FPT_queueFindSccb(p_Sccb, thisCard)) {
+
+ ((struct sccb_card *)pCurrCard)->cmdCounter--;
+
+ if (!((struct sccb_card *)pCurrCard)->cmdCounter)
+ WR_HARPOON(ioport + hp_semaphore,
+ (RD_HARPOON(ioport + hp_semaphore)
+ & (unsigned
+ char)(~(SCCB_MGR_ACTIVE |
+ TICKLE_ME))));
+
+ p_Sccb->SccbStatus = SCCB_ABORT;
+ callback = p_Sccb->SccbCallback;
+ callback(p_Sccb);
+
+ return 0;
+ }
+
+ else {
+ if (((struct sccb_card *)pCurrCard)->currentSCCB ==
+ p_Sccb) {
+ p_Sccb->SccbStatus = SCCB_ABORT;
+ return 0;
+
+ }
+
+ else {
+
+ TID = p_Sccb->TargID;
+
+ if (p_Sccb->Sccb_tag) {
+ MDISABLE_INT(ioport);
+ if (((struct sccb_card *)pCurrCard)->
+ discQ_Tbl[p_Sccb->Sccb_tag] ==
+ p_Sccb) {
+ p_Sccb->SccbStatus = SCCB_ABORT;
+ p_Sccb->Sccb_scsistat =
+ ABORT_ST;
+ p_Sccb->Sccb_scsimsg =
+ SMABORT_TAG;
+
+ if (((struct sccb_card *)
+ pCurrCard)->currentSCCB ==
+ NULL) {
+ ((struct sccb_card *)
+ pCurrCard)->
+ currentSCCB = p_Sccb;
+ FPT_ssel(ioport,
+ thisCard);
+ } else {
+ pSaveSCCB =
+ ((struct sccb_card
+ *)pCurrCard)->
+ currentSCCB;
+ ((struct sccb_card *)
+ pCurrCard)->
+ currentSCCB = p_Sccb;
+ FPT_queueSelectFail((struct sccb_card *)pCurrCard, thisCard);
+ ((struct sccb_card *)
+ pCurrCard)->
+ currentSCCB = pSaveSCCB;
+ }
+ }
+ MENABLE_INT(ioport);
+ return 0;
+ } else {
+ currTar_Info =
+ &FPT_sccbMgrTbl[thisCard][p_Sccb->
+ TargID];
+
+ if (FPT_BL_Card[thisCard].
+ discQ_Tbl[currTar_Info->
+ LunDiscQ_Idx[p_Sccb->Lun]]
+ == p_Sccb) {
+ p_Sccb->SccbStatus = SCCB_ABORT;
+ return 0;
+ }
+ }
+ }
+ }
+ }
+ return -1;
+}
+
+/*---------------------------------------------------------------------
+ *
+ * Function: FlashPoint_InterruptPending
+ *
+ * Description: Do a quick check to determine if there is a pending
+ * interrupt for this card and disable the IRQ Pin if so.
+ *
+ *---------------------------------------------------------------------*/
+static unsigned char FlashPoint_InterruptPending(void *pCurrCard)
+{
+ u32 ioport;
+
+ ioport = ((struct sccb_card *)pCurrCard)->ioPort;
+
+ if (RD_HARPOON(ioport + hp_int_status) & INT_ASSERTED) {
+ return 1;
+ }
+
+ else
+
+ return 0;
+}
+
+/*---------------------------------------------------------------------
+ *
+ * Function: FlashPoint_HandleInterrupt
+ *
+ * Description: This is our entry point when an interrupt is generated
+ * by the card and the upper level driver passes it on to
+ * us.
+ *
+ *---------------------------------------------------------------------*/
+static int FlashPoint_HandleInterrupt(void *pcard)
+{
+ struct sccb *currSCCB;
+ unsigned char thisCard, result, bm_status, bm_int_st;
+ unsigned short hp_int;
+ unsigned char i, target;
+ struct sccb_card *pCurrCard = pcard;
+ u32 ioport;
+
+ thisCard = pCurrCard->cardIndex;
+ ioport = pCurrCard->ioPort;
+
+ MDISABLE_INT(ioport);
+
+ if ((bm_int_st = RD_HARPOON(ioport + hp_int_status)) & EXT_STATUS_ON)
+ bm_status = RD_HARPOON(ioport + hp_ext_status) &
+ (unsigned char)BAD_EXT_STATUS;
+ else
+ bm_status = 0;
+
+ WR_HARPOON(ioport + hp_int_mask, (INT_CMD_COMPL | SCSI_INTERRUPT));
+
+ while ((hp_int = RDW_HARPOON((ioport + hp_intstat)) &
+ FPT_default_intena) | bm_status) {
+
+ currSCCB = pCurrCard->currentSCCB;
+
+ if (hp_int & (FIFO | TIMEOUT | RESET | SCAM_SEL) || bm_status) {
+ result =
+ FPT_SccbMgr_bad_isr(ioport, thisCard, pCurrCard,
+ hp_int);
+ WRW_HARPOON((ioport + hp_intstat),
+ (FIFO | TIMEOUT | RESET | SCAM_SEL));
+ bm_status = 0;
+
+ if (result) {
+
+ MENABLE_INT(ioport);
+ return result;
+ }
+ }
+
+ else if (hp_int & ICMD_COMP) {
+
+ if (!(hp_int & BUS_FREE)) {
+ /* Wait for the BusFree before starting a new command. We
+ must also check for being reselected since the BusFree
+ may not show up if another device reselects us in 1.5us or
+ less. SRR Wednesday, 3/8/1995.
+ */
+ while (!
+ (RDW_HARPOON((ioport + hp_intstat)) &
+ (BUS_FREE | RSEL))) ;
+ }
+
+ if (pCurrCard->globalFlags & F_HOST_XFER_ACT)
+
+ FPT_phaseChkFifo(ioport, thisCard);
+
+/* WRW_HARPOON((ioport+hp_intstat),
+ (BUS_FREE | ICMD_COMP | ITAR_DISC | XFER_CNT_0));
+ */
+
+ WRW_HARPOON((ioport + hp_intstat), CLR_ALL_INT_1);
+
+ FPT_autoCmdCmplt(ioport, thisCard);
+
+ }
+
+ else if (hp_int & ITAR_DISC) {
+
+ if (pCurrCard->globalFlags & F_HOST_XFER_ACT)
+ FPT_phaseChkFifo(ioport, thisCard);
+
+ if (RD_HARPOON(ioport + hp_gp_reg_1) ==
+ SMSAVE_DATA_PTR) {
+
+ WR_HARPOON(ioport + hp_gp_reg_1, 0x00);
+ currSCCB->Sccb_XferState |= F_NO_DATA_YET;
+
+ currSCCB->Sccb_savedATC = currSCCB->Sccb_ATC;
+ }
+
+ currSCCB->Sccb_scsistat = DISCONNECT_ST;
+ FPT_queueDisconnect(currSCCB, thisCard);
+
+ /* Wait for the BusFree before starting a new command. We
+ must also check for being reselected since the BusFree
+ may not show up if another device reselects us in 1.5us or
+ less. SRR Wednesday, 3/8/1995.
+ */
+ while (!
+ (RDW_HARPOON((ioport + hp_intstat)) &
+ (BUS_FREE | RSEL))
+ && !((RDW_HARPOON((ioport + hp_intstat)) & PHASE)
+ && RD_HARPOON((ioport + hp_scsisig)) ==
+ (SCSI_BSY | SCSI_REQ | SCSI_CD | SCSI_MSG |
+ SCSI_IOBIT))) ;
+
+ /*
+ The additional loop exit condition above detects a timing problem
+ with the revision D/E harpoon chips. The caller should reset the
+ host adapter to recover when 0xFE is returned.
+ */
+ if (!
+ (RDW_HARPOON((ioport + hp_intstat)) &
+ (BUS_FREE | RSEL))) {
+ MENABLE_INT(ioport);
+ return 0xFE;
+ }
+
+ WRW_HARPOON((ioport + hp_intstat),
+ (BUS_FREE | ITAR_DISC));
+
+ pCurrCard->globalFlags |= F_NEW_SCCB_CMD;
+
+ }
+
+ else if (hp_int & RSEL) {
+
+ WRW_HARPOON((ioport + hp_intstat),
+ (PROG_HLT | RSEL | PHASE | BUS_FREE));
+
+ if (RDW_HARPOON((ioport + hp_intstat)) & ITAR_DISC) {
+ if (pCurrCard->globalFlags & F_HOST_XFER_ACT)
+ FPT_phaseChkFifo(ioport, thisCard);
+
+ if (RD_HARPOON(ioport + hp_gp_reg_1) ==
+ SMSAVE_DATA_PTR) {
+ WR_HARPOON(ioport + hp_gp_reg_1, 0x00);
+ currSCCB->Sccb_XferState |=
+ F_NO_DATA_YET;
+ currSCCB->Sccb_savedATC =
+ currSCCB->Sccb_ATC;
+ }
+
+ WRW_HARPOON((ioport + hp_intstat),
+ (BUS_FREE | ITAR_DISC));
+ currSCCB->Sccb_scsistat = DISCONNECT_ST;
+ FPT_queueDisconnect(currSCCB, thisCard);
+ }
+
+ FPT_sres(ioport, thisCard, pCurrCard);
+ FPT_phaseDecode(ioport, thisCard);
+
+ }
+
+ else if ((hp_int & IDO_STRT) && (!(hp_int & BUS_FREE))) {
+
+ WRW_HARPOON((ioport + hp_intstat),
+ (IDO_STRT | XFER_CNT_0));
+ FPT_phaseDecode(ioport, thisCard);
+
+ }
+
+ else if ((hp_int & IUNKWN) || (hp_int & PROG_HLT)) {
+ WRW_HARPOON((ioport + hp_intstat),
+ (PHASE | IUNKWN | PROG_HLT));
+ if ((RD_HARPOON(ioport + hp_prgmcnt_0) & (unsigned char)
+ 0x3f) < (unsigned char)SELCHK) {
+ FPT_phaseDecode(ioport, thisCard);
+ } else {
+ /* Harpoon problem some SCSI target device respond to selection
+ with short BUSY pulse (<400ns) this will make the Harpoon is not able
+ to latch the correct Target ID into reg. x53.
+ The work around require to correct this reg. But when write to this
+ reg. (0x53) also increment the FIFO write addr reg (0x6f), thus we
+ need to read this reg first then restore it later. After update to 0x53 */
+
+ i = (unsigned
+ char)(RD_HARPOON(ioport + hp_fifowrite));
+ target =
+ (unsigned
+ char)(RD_HARPOON(ioport + hp_gp_reg_3));
+ WR_HARPOON(ioport + hp_xfer_pad,
+ (unsigned char)ID_UNLOCK);
+ WR_HARPOON(ioport + hp_select_id,
+ (unsigned char)(target | target <<
+ 4));
+ WR_HARPOON(ioport + hp_xfer_pad,
+ (unsigned char)0x00);
+ WR_HARPOON(ioport + hp_fifowrite, i);
+ WR_HARPOON(ioport + hp_autostart_3,
+ (AUTO_IMMED + TAG_STRT));
+ }
+ }
+
+ else if (hp_int & XFER_CNT_0) {
+
+ WRW_HARPOON((ioport + hp_intstat), XFER_CNT_0);
+
+ FPT_schkdd(ioport, thisCard);
+
+ }
+
+ else if (hp_int & BUS_FREE) {
+
+ WRW_HARPOON((ioport + hp_intstat), BUS_FREE);
+
+ if (pCurrCard->globalFlags & F_HOST_XFER_ACT) {
+
+ FPT_hostDataXferAbort(ioport, thisCard,
+ currSCCB);
+ }
+
+ FPT_phaseBusFree(ioport, thisCard);
+ }
+
+ else if (hp_int & ITICKLE) {
+
+ WRW_HARPOON((ioport + hp_intstat), ITICKLE);
+ pCurrCard->globalFlags |= F_NEW_SCCB_CMD;
+ }
+
+ if (((struct sccb_card *)pCurrCard)->
+ globalFlags & F_NEW_SCCB_CMD) {
+
+ pCurrCard->globalFlags &= ~F_NEW_SCCB_CMD;
+
+ if (pCurrCard->currentSCCB == NULL)
+ FPT_queueSearchSelect(pCurrCard, thisCard);
+
+ if (pCurrCard->currentSCCB != NULL) {
+ pCurrCard->globalFlags &= ~F_NEW_SCCB_CMD;
+ FPT_ssel(ioport, thisCard);
+ }
+
+ break;
+
+ }
+
+ } /*end while */
+
+ MENABLE_INT(ioport);
+
+ return 0;
+}
+
+/*---------------------------------------------------------------------
+ *
+ * Function: Sccb_bad_isr
+ *
+ * Description: Some type of interrupt has occurred which is slightly
+ * out of the ordinary. We will now decode it fully, in
+ * this routine. This is broken up in an attempt to save
+ * processing time.
+ *
+ *---------------------------------------------------------------------*/
+static unsigned char FPT_SccbMgr_bad_isr(u32 p_port, unsigned char p_card,
+ struct sccb_card *pCurrCard,
+ unsigned short p_int)
+{
+ unsigned char temp, ScamFlg;
+ struct sccb_mgr_tar_info *currTar_Info;
+ struct nvram_info *pCurrNvRam;
+
+ if (RD_HARPOON(p_port + hp_ext_status) &
+ (BM_FORCE_OFF | PCI_DEV_TMOUT | BM_PARITY_ERR | PIO_OVERRUN)) {
+
+ if (pCurrCard->globalFlags & F_HOST_XFER_ACT) {
+
+ FPT_hostDataXferAbort(p_port, p_card,
+ pCurrCard->currentSCCB);
+ }
+
+ if (RD_HARPOON(p_port + hp_pci_stat_cfg) & REC_MASTER_ABORT)
+ {
+ WR_HARPOON(p_port + hp_pci_stat_cfg,
+ (RD_HARPOON(p_port + hp_pci_stat_cfg) &
+ ~REC_MASTER_ABORT));
+
+ WR_HARPOON(p_port + hp_host_blk_cnt, 0x00);
+
+ }
+
+ if (pCurrCard->currentSCCB != NULL) {
+
+ if (!pCurrCard->currentSCCB->HostStatus)
+ pCurrCard->currentSCCB->HostStatus =
+ SCCB_BM_ERR;
+
+ FPT_sxfrp(p_port, p_card);
+
+ temp = (unsigned char)(RD_HARPOON(p_port + hp_ee_ctrl) &
+ (EXT_ARB_ACK | SCSI_TERM_ENA_H));
+ WR_HARPOON(p_port + hp_ee_ctrl,
+ ((unsigned char)temp | SEE_MS | SEE_CS));
+ WR_HARPOON(p_port + hp_ee_ctrl, temp);
+
+ if (!
+ (RDW_HARPOON((p_port + hp_intstat)) &
+ (BUS_FREE | RESET))) {
+ FPT_phaseDecode(p_port, p_card);
+ }
+ }
+ }
+
+ else if (p_int & RESET) {
+
+ WR_HARPOON(p_port + hp_clkctrl_0, CLKCTRL_DEFAULT);
+ WR_HARPOON(p_port + hp_sys_ctrl, 0x00);
+ if (pCurrCard->currentSCCB != NULL) {
+
+ if (pCurrCard->globalFlags & F_HOST_XFER_ACT)
+
+ FPT_hostDataXferAbort(p_port, p_card,
+ pCurrCard->currentSCCB);
+ }
+
+ DISABLE_AUTO(p_port);
+
+ FPT_sresb(p_port, p_card);
+
+ while (RD_HARPOON(p_port + hp_scsictrl_0) & SCSI_RST) {
+ }
+
+ pCurrNvRam = pCurrCard->pNvRamInfo;
+ if (pCurrNvRam) {
+ ScamFlg = pCurrNvRam->niScamConf;
+ } else {
+ ScamFlg =
+ (unsigned char)FPT_utilEERead(p_port,
+ SCAM_CONFIG / 2);
+ }
+
+ FPT_XbowInit(p_port, ScamFlg);
+
+ FPT_scini(p_card, pCurrCard->ourId, 0);
+
+ return 0xFF;
+ }
+
+ else if (p_int & FIFO) {
+
+ WRW_HARPOON((p_port + hp_intstat), FIFO);
+
+ if (pCurrCard->currentSCCB != NULL)
+ FPT_sxfrp(p_port, p_card);
+ }
+
+ else if (p_int & TIMEOUT) {
+
+ DISABLE_AUTO(p_port);
+
+ WRW_HARPOON((p_port + hp_intstat),
+ (PROG_HLT | TIMEOUT | SEL | BUS_FREE | PHASE |
+ IUNKWN));
+
+ pCurrCard->currentSCCB->HostStatus = SCCB_SELECTION_TIMEOUT;
+
+ currTar_Info =
+ &FPT_sccbMgrTbl[p_card][pCurrCard->currentSCCB->TargID];
+ if ((pCurrCard->globalFlags & F_CONLUN_IO)
+ && ((currTar_Info->TarStatus & TAR_TAG_Q_MASK) !=
+ TAG_Q_TRYING))
+ currTar_Info->TarLUNBusy[pCurrCard->currentSCCB->Lun] =
+ 0;
+ else
+ currTar_Info->TarLUNBusy[0] = 0;
+
+ if (currTar_Info->TarEEValue & EE_SYNC_MASK) {
+ currTar_Info->TarSyncCtrl = 0;
+ currTar_Info->TarStatus &= ~TAR_SYNC_MASK;
+ }
+
+ if (currTar_Info->TarEEValue & EE_WIDE_SCSI) {
+ currTar_Info->TarStatus &= ~TAR_WIDE_MASK;
+ }
+
+ FPT_sssyncv(p_port, pCurrCard->currentSCCB->TargID, NARROW_SCSI,
+ currTar_Info);
+
+ FPT_queueCmdComplete(pCurrCard, pCurrCard->currentSCCB, p_card);
+
+ }
+
+ else if (p_int & SCAM_SEL) {
+
+ FPT_scarb(p_port, LEVEL2_TAR);
+ FPT_scsel(p_port);
+ FPT_scasid(p_card, p_port);
+
+ FPT_scbusf(p_port);
+
+ WRW_HARPOON((p_port + hp_intstat), SCAM_SEL);
+ }
+
+ return 0x00;
+}
+
+/*---------------------------------------------------------------------
+ *
+ * Function: SccbMgrTableInit
+ *
+ * Description: Initialize all Sccb manager data structures.
+ *
+ *---------------------------------------------------------------------*/
+
+static void FPT_SccbMgrTableInitAll()
+{
+ unsigned char thisCard;
+
+ for (thisCard = 0; thisCard < MAX_CARDS; thisCard++) {
+ FPT_SccbMgrTableInitCard(&FPT_BL_Card[thisCard], thisCard);
+
+ FPT_BL_Card[thisCard].ioPort = 0x00;
+ FPT_BL_Card[thisCard].cardInfo = NULL;
+ FPT_BL_Card[thisCard].cardIndex = 0xFF;
+ FPT_BL_Card[thisCard].ourId = 0x00;
+ FPT_BL_Card[thisCard].pNvRamInfo = NULL;
+ }
+}
+
+/*---------------------------------------------------------------------
+ *
+ * Function: SccbMgrTableInit
+ *
+ * Description: Initialize all Sccb manager data structures.
+ *
+ *---------------------------------------------------------------------*/
+
+static void FPT_SccbMgrTableInitCard(struct sccb_card *pCurrCard,
+ unsigned char p_card)
+{
+ unsigned char scsiID, qtag;
+
+ for (qtag = 0; qtag < QUEUE_DEPTH; qtag++) {
+ FPT_BL_Card[p_card].discQ_Tbl[qtag] = NULL;
+ }
+
+ for (scsiID = 0; scsiID < MAX_SCSI_TAR; scsiID++) {
+ FPT_sccbMgrTbl[p_card][scsiID].TarStatus = 0;
+ FPT_sccbMgrTbl[p_card][scsiID].TarEEValue = 0;
+ FPT_SccbMgrTableInitTarget(p_card, scsiID);
+ }
+
+ pCurrCard->scanIndex = 0x00;
+ pCurrCard->currentSCCB = NULL;
+ pCurrCard->globalFlags = 0x00;
+ pCurrCard->cmdCounter = 0x00;
+ pCurrCard->tagQ_Lst = 0x01;
+ pCurrCard->discQCount = 0;
+
+}
+
+/*---------------------------------------------------------------------
+ *
+ * Function: SccbMgrTableInit
+ *
+ * Description: Initialize all Sccb manager data structures.
+ *
+ *---------------------------------------------------------------------*/
+
+static void FPT_SccbMgrTableInitTarget(unsigned char p_card,
+ unsigned char target)
+{
+
+ unsigned char lun, qtag;
+ struct sccb_mgr_tar_info *currTar_Info;
+
+ currTar_Info = &FPT_sccbMgrTbl[p_card][target];
+
+ currTar_Info->TarSelQ_Cnt = 0;
+ currTar_Info->TarSyncCtrl = 0;
+
+ currTar_Info->TarSelQ_Head = NULL;
+ currTar_Info->TarSelQ_Tail = NULL;
+ currTar_Info->TarTagQ_Cnt = 0;
+ currTar_Info->TarLUN_CA = 0;
+
+ for (lun = 0; lun < MAX_LUN; lun++) {
+ currTar_Info->TarLUNBusy[lun] = 0;
+ currTar_Info->LunDiscQ_Idx[lun] = 0;
+ }
+
+ for (qtag = 0; qtag < QUEUE_DEPTH; qtag++) {
+ if (FPT_BL_Card[p_card].discQ_Tbl[qtag] != NULL) {
+ if (FPT_BL_Card[p_card].discQ_Tbl[qtag]->TargID ==
+ target) {
+ FPT_BL_Card[p_card].discQ_Tbl[qtag] = NULL;
+ FPT_BL_Card[p_card].discQCount--;
+ }
+ }
+ }
+}
+
+/*---------------------------------------------------------------------
+ *
+ * Function: sfetm
+ *
+ * Description: Read in a message byte from the SCSI bus, and check
+ * for a parity error.
+ *
+ *---------------------------------------------------------------------*/
+
+static unsigned char FPT_sfm(u32 port, struct sccb *pCurrSCCB)
+{
+ unsigned char message;
+ unsigned short TimeOutLoop;
+
+ TimeOutLoop = 0;
+ while ((!(RD_HARPOON(port + hp_scsisig) & SCSI_REQ)) &&
+ (TimeOutLoop++ < 20000)) {
+ }
+
+ WR_HARPOON(port + hp_portctrl_0, SCSI_PORT);
+
+ message = RD_HARPOON(port + hp_scsidata_0);
+
+ WR_HARPOON(port + hp_scsisig, SCSI_ACK + S_MSGI_PH);
+
+ if (TimeOutLoop > 20000)
+ message = 0x00; /* force message byte = 0 if Time Out on Req */
+
+ if ((RDW_HARPOON((port + hp_intstat)) & PARITY) &&
+ (RD_HARPOON(port + hp_addstat) & SCSI_PAR_ERR)) {
+ WR_HARPOON(port + hp_scsisig, (SCSI_ACK + S_ILL_PH));
+ WR_HARPOON(port + hp_xferstat, 0);
+ WR_HARPOON(port + hp_fiforead, 0);
+ WR_HARPOON(port + hp_fifowrite, 0);
+ if (pCurrSCCB != NULL) {
+ pCurrSCCB->Sccb_scsimsg = SMPARITY;
+ }
+ message = 0x00;
+ do {
+ ACCEPT_MSG_ATN(port);
+ TimeOutLoop = 0;
+ while ((!(RD_HARPOON(port + hp_scsisig) & SCSI_REQ)) &&
+ (TimeOutLoop++ < 20000)) {
+ }
+ if (TimeOutLoop > 20000) {
+ WRW_HARPOON((port + hp_intstat), PARITY);
+ return message;
+ }
+ if ((RD_HARPOON(port + hp_scsisig) & S_SCSI_PHZ) !=
+ S_MSGI_PH) {
+ WRW_HARPOON((port + hp_intstat), PARITY);
+ return message;
+ }
+ WR_HARPOON(port + hp_portctrl_0, SCSI_PORT);
+
+ RD_HARPOON(port + hp_scsidata_0);
+
+ WR_HARPOON(port + hp_scsisig, (SCSI_ACK + S_ILL_PH));
+
+ } while (1);
+
+ }
+ WR_HARPOON(port + hp_scsisig, (SCSI_ACK + S_ILL_PH));
+ WR_HARPOON(port + hp_xferstat, 0);
+ WR_HARPOON(port + hp_fiforead, 0);
+ WR_HARPOON(port + hp_fifowrite, 0);
+ return message;
+}
+
+/*---------------------------------------------------------------------
+ *
+ * Function: FPT_ssel
+ *
+ * Description: Load up automation and select target device.
+ *
+ *---------------------------------------------------------------------*/
+
+static void FPT_ssel(u32 port, unsigned char p_card)
+{
+
+ unsigned char auto_loaded, i, target, *theCCB;
+
+ u32 cdb_reg;
+ struct sccb_card *CurrCard;
+ struct sccb *currSCCB;
+ struct sccb_mgr_tar_info *currTar_Info;
+ unsigned char lastTag, lun;
+
+ CurrCard = &FPT_BL_Card[p_card];
+ currSCCB = CurrCard->currentSCCB;
+ target = currSCCB->TargID;
+ currTar_Info = &FPT_sccbMgrTbl[p_card][target];
+ lastTag = CurrCard->tagQ_Lst;
+
+ ARAM_ACCESS(port);
+
+ if ((currTar_Info->TarStatus & TAR_TAG_Q_MASK) == TAG_Q_REJECT)
+ currSCCB->ControlByte &= ~F_USE_CMD_Q;
+
+ if (((CurrCard->globalFlags & F_CONLUN_IO) &&
+ ((currTar_Info->TarStatus & TAR_TAG_Q_MASK) != TAG_Q_TRYING)))
+
+ lun = currSCCB->Lun;
+ else
+ lun = 0;
+
+ if (CurrCard->globalFlags & F_TAG_STARTED) {
+ if (!(currSCCB->ControlByte & F_USE_CMD_Q)) {
+ if ((currTar_Info->TarLUN_CA == 0)
+ && ((currTar_Info->TarStatus & TAR_TAG_Q_MASK)
+ == TAG_Q_TRYING)) {
+
+ if (currTar_Info->TarTagQ_Cnt != 0) {
+ currTar_Info->TarLUNBusy[lun] = 1;
+ FPT_queueSelectFail(CurrCard, p_card);
+ SGRAM_ACCESS(port);
+ return;
+ }
+
+ else {
+ currTar_Info->TarLUNBusy[lun] = 1;
+ }
+
+ }
+ /*End non-tagged */
+ else {
+ currTar_Info->TarLUNBusy[lun] = 1;
+ }
+
+ }
+ /*!Use cmd Q Tagged */
+ else {
+ if (currTar_Info->TarLUN_CA == 1) {
+ FPT_queueSelectFail(CurrCard, p_card);
+ SGRAM_ACCESS(port);
+ return;
+ }
+
+ currTar_Info->TarLUNBusy[lun] = 1;
+
+ } /*else use cmd Q tagged */
+
+ }
+ /*if glob tagged started */
+ else {
+ currTar_Info->TarLUNBusy[lun] = 1;
+ }
+
+ if ((((CurrCard->globalFlags & F_CONLUN_IO) &&
+ ((currTar_Info->TarStatus & TAR_TAG_Q_MASK) != TAG_Q_TRYING))
+ || (!(currSCCB->ControlByte & F_USE_CMD_Q)))) {
+ if (CurrCard->discQCount >= QUEUE_DEPTH) {
+ currTar_Info->TarLUNBusy[lun] = 1;
+ FPT_queueSelectFail(CurrCard, p_card);
+ SGRAM_ACCESS(port);
+ return;
+ }
+ for (i = 1; i < QUEUE_DEPTH; i++) {
+ if (++lastTag >= QUEUE_DEPTH)
+ lastTag = 1;
+ if (CurrCard->discQ_Tbl[lastTag] == NULL) {
+ CurrCard->tagQ_Lst = lastTag;
+ currTar_Info->LunDiscQ_Idx[lun] = lastTag;
+ CurrCard->discQ_Tbl[lastTag] = currSCCB;
+ CurrCard->discQCount++;
+ break;
+ }
+ }
+ if (i == QUEUE_DEPTH) {
+ currTar_Info->TarLUNBusy[lun] = 1;
+ FPT_queueSelectFail(CurrCard, p_card);
+ SGRAM_ACCESS(port);
+ return;
+ }
+ }
+
+ auto_loaded = 0;
+
+ WR_HARPOON(port + hp_select_id, target);
+ WR_HARPOON(port + hp_gp_reg_3, target); /* Use by new automation logic */
+
+ if (currSCCB->OperationCode == RESET_COMMAND) {
+ WRW_HARPOON((port + ID_MSG_STRT), (MPM_OP + AMSG_OUT +
+ (currSCCB->
+ Sccb_idmsg & ~DISC_PRIV)));
+
+ WRW_HARPOON((port + ID_MSG_STRT + 2), BRH_OP + ALWAYS + NP);
+
+ currSCCB->Sccb_scsimsg = SMDEV_RESET;
+
+ WR_HARPOON(port + hp_autostart_3, (SELECT + SELCHK_STRT));
+ auto_loaded = 1;
+ currSCCB->Sccb_scsistat = SELECT_BDR_ST;
+
+ if (currTar_Info->TarEEValue & EE_SYNC_MASK) {
+ currTar_Info->TarSyncCtrl = 0;
+ currTar_Info->TarStatus &= ~TAR_SYNC_MASK;
+ }
+
+ if (currTar_Info->TarEEValue & EE_WIDE_SCSI) {
+ currTar_Info->TarStatus &= ~TAR_WIDE_MASK;
+ }
+
+ FPT_sssyncv(port, target, NARROW_SCSI, currTar_Info);
+ FPT_SccbMgrTableInitTarget(p_card, target);
+
+ }
+
+ else if (currSCCB->Sccb_scsistat == ABORT_ST) {
+ WRW_HARPOON((port + ID_MSG_STRT), (MPM_OP + AMSG_OUT +
+ (currSCCB->
+ Sccb_idmsg & ~DISC_PRIV)));
+
+ WRW_HARPOON((port + ID_MSG_STRT + 2), BRH_OP + ALWAYS + CMDPZ);
+
+ WRW_HARPOON((port + SYNC_MSGS + 0), (MPM_OP + AMSG_OUT +
+ (((unsigned
+ char)(currSCCB->
+ ControlByte &
+ TAG_TYPE_MASK)
+ >> 6) | (unsigned char)
+ 0x20)));
+ WRW_HARPOON((port + SYNC_MSGS + 2),
+ (MPM_OP + AMSG_OUT + currSCCB->Sccb_tag));
+ WRW_HARPOON((port + SYNC_MSGS + 4), (BRH_OP + ALWAYS + NP));
+
+ WR_HARPOON(port + hp_autostart_3, (SELECT + SELCHK_STRT));
+ auto_loaded = 1;
+
+ }
+
+ else if (!(currTar_Info->TarStatus & WIDE_NEGOCIATED)) {
+ auto_loaded = FPT_siwidn(port, p_card);
+ currSCCB->Sccb_scsistat = SELECT_WN_ST;
+ }
+
+ else if (!((currTar_Info->TarStatus & TAR_SYNC_MASK)
+ == SYNC_SUPPORTED)) {
+ auto_loaded = FPT_sisyncn(port, p_card, 0);
+ currSCCB->Sccb_scsistat = SELECT_SN_ST;
+ }
+
+ if (!auto_loaded) {
+
+ if (currSCCB->ControlByte & F_USE_CMD_Q) {
+
+ CurrCard->globalFlags |= F_TAG_STARTED;
+
+ if ((currTar_Info->TarStatus & TAR_TAG_Q_MASK)
+ == TAG_Q_REJECT) {
+ currSCCB->ControlByte &= ~F_USE_CMD_Q;
+
+ /* Fix up the start instruction with a jump to
+ Non-Tag-CMD handling */
+ WRW_HARPOON((port + ID_MSG_STRT),
+ BRH_OP + ALWAYS + NTCMD);
+
+ WRW_HARPOON((port + NON_TAG_ID_MSG),
+ (MPM_OP + AMSG_OUT +
+ currSCCB->Sccb_idmsg));
+
+ WR_HARPOON(port + hp_autostart_3,
+ (SELECT + SELCHK_STRT));
+
+ /* Setup our STATE so we know what happened when
+ the wheels fall off. */
+ currSCCB->Sccb_scsistat = SELECT_ST;
+
+ currTar_Info->TarLUNBusy[lun] = 1;
+ }
+
+ else {
+ WRW_HARPOON((port + ID_MSG_STRT),
+ (MPM_OP + AMSG_OUT +
+ currSCCB->Sccb_idmsg));
+
+ WRW_HARPOON((port + ID_MSG_STRT + 2),
+ (MPM_OP + AMSG_OUT +
+ (((unsigned char)(currSCCB->
+ ControlByte &
+ TAG_TYPE_MASK)
+ >> 6) | (unsigned char)0x20)));
+
+ for (i = 1; i < QUEUE_DEPTH; i++) {
+ if (++lastTag >= QUEUE_DEPTH)
+ lastTag = 1;
+ if (CurrCard->discQ_Tbl[lastTag] ==
+ NULL) {
+ WRW_HARPOON((port +
+ ID_MSG_STRT + 6),
+ (MPM_OP + AMSG_OUT +
+ lastTag));
+ CurrCard->tagQ_Lst = lastTag;
+ currSCCB->Sccb_tag = lastTag;
+ CurrCard->discQ_Tbl[lastTag] =
+ currSCCB;
+ CurrCard->discQCount++;
+ break;
+ }
+ }
+
+ if (i == QUEUE_DEPTH) {
+ currTar_Info->TarLUNBusy[lun] = 1;
+ FPT_queueSelectFail(CurrCard, p_card);
+ SGRAM_ACCESS(port);
+ return;
+ }
+
+ currSCCB->Sccb_scsistat = SELECT_Q_ST;
+
+ WR_HARPOON(port + hp_autostart_3,
+ (SELECT + SELCHK_STRT));
+ }
+ }
+
+ else {
+
+ WRW_HARPOON((port + ID_MSG_STRT),
+ BRH_OP + ALWAYS + NTCMD);
+
+ WRW_HARPOON((port + NON_TAG_ID_MSG),
+ (MPM_OP + AMSG_OUT + currSCCB->Sccb_idmsg));
+
+ currSCCB->Sccb_scsistat = SELECT_ST;
+
+ WR_HARPOON(port + hp_autostart_3,
+ (SELECT + SELCHK_STRT));
+ }
+
+ theCCB = (unsigned char *)&currSCCB->Cdb[0];
+
+ cdb_reg = port + CMD_STRT;
+
+ for (i = 0; i < currSCCB->CdbLength; i++) {
+ WRW_HARPOON(cdb_reg, (MPM_OP + ACOMMAND + *theCCB));
+ cdb_reg += 2;
+ theCCB++;
+ }
+
+ if (currSCCB->CdbLength != TWELVE_BYTE_CMD)
+ WRW_HARPOON(cdb_reg, (BRH_OP + ALWAYS + NP));
+
+ }
+ /* auto_loaded */
+ WRW_HARPOON((port + hp_fiforead), (unsigned short)0x00);
+ WR_HARPOON(port + hp_xferstat, 0x00);
+
+ WRW_HARPOON((port + hp_intstat), (PROG_HLT | TIMEOUT | SEL | BUS_FREE));
+
+ WR_HARPOON(port + hp_portctrl_0, (SCSI_PORT));
+
+ if (!(currSCCB->Sccb_MGRFlags & F_DEV_SELECTED)) {
+ WR_HARPOON(port + hp_scsictrl_0,
+ (SEL_TAR | ENA_ATN | ENA_RESEL | ENA_SCAM_SEL));
+ } else {
+
+/* auto_loaded = (RD_HARPOON(port+hp_autostart_3) & (unsigned char)0x1F);
+ auto_loaded |= AUTO_IMMED; */
+ auto_loaded = AUTO_IMMED;
+
+ DISABLE_AUTO(port);
+
+ WR_HARPOON(port + hp_autostart_3, auto_loaded);
+ }
+
+ SGRAM_ACCESS(port);
+}
+
+/*---------------------------------------------------------------------
+ *
+ * Function: FPT_sres
+ *
+ * Description: Hookup the correct CCB and handle the incoming messages.
+ *
+ *---------------------------------------------------------------------*/
+
+static void FPT_sres(u32 port, unsigned char p_card,
+ struct sccb_card *pCurrCard)
+{
+
+ unsigned char our_target, message, lun = 0, tag, msgRetryCount;
+
+ struct sccb_mgr_tar_info *currTar_Info;
+ struct sccb *currSCCB;
+
+ if (pCurrCard->currentSCCB != NULL) {
+ currTar_Info =
+ &FPT_sccbMgrTbl[p_card][pCurrCard->currentSCCB->TargID];
+ DISABLE_AUTO(port);
+
+ WR_HARPOON((port + hp_scsictrl_0), (ENA_RESEL | ENA_SCAM_SEL));
+
+ currSCCB = pCurrCard->currentSCCB;
+ if (currSCCB->Sccb_scsistat == SELECT_WN_ST) {
+ currTar_Info->TarStatus &= ~TAR_WIDE_MASK;
+ currSCCB->Sccb_scsistat = BUS_FREE_ST;
+ }
+ if (currSCCB->Sccb_scsistat == SELECT_SN_ST) {
+ currTar_Info->TarStatus &= ~TAR_SYNC_MASK;
+ currSCCB->Sccb_scsistat = BUS_FREE_ST;
+ }
+ if (((pCurrCard->globalFlags & F_CONLUN_IO) &&
+ ((currTar_Info->TarStatus & TAR_TAG_Q_MASK) !=
+ TAG_Q_TRYING))) {
+ currTar_Info->TarLUNBusy[currSCCB->Lun] = 0;
+ if (currSCCB->Sccb_scsistat != ABORT_ST) {
+ pCurrCard->discQCount--;
+ pCurrCard->discQ_Tbl[currTar_Info->
+ LunDiscQ_Idx[currSCCB->
+ Lun]]
+ = NULL;
+ }
+ } else {
+ currTar_Info->TarLUNBusy[0] = 0;
+ if (currSCCB->Sccb_tag) {
+ if (currSCCB->Sccb_scsistat != ABORT_ST) {
+ pCurrCard->discQCount--;
+ pCurrCard->discQ_Tbl[currSCCB->
+ Sccb_tag] = NULL;
+ }
+ } else {
+ if (currSCCB->Sccb_scsistat != ABORT_ST) {
+ pCurrCard->discQCount--;
+ pCurrCard->discQ_Tbl[currTar_Info->
+ LunDiscQ_Idx[0]] =
+ NULL;
+ }
+ }
+ }
+
+ FPT_queueSelectFail(&FPT_BL_Card[p_card], p_card);
+ }
+
+ WRW_HARPOON((port + hp_fiforead), (unsigned short)0x00);
+
+ our_target = (unsigned char)(RD_HARPOON(port + hp_select_id) >> 4);
+ currTar_Info = &FPT_sccbMgrTbl[p_card][our_target];
+
+ msgRetryCount = 0;
+ do {
+
+ currTar_Info = &FPT_sccbMgrTbl[p_card][our_target];
+ tag = 0;
+
+ while (!(RD_HARPOON(port + hp_scsisig) & SCSI_REQ)) {
+ if (!(RD_HARPOON(port + hp_scsisig) & SCSI_BSY)) {
+
+ WRW_HARPOON((port + hp_intstat), PHASE);
+ return;
+ }
+ }
+
+ WRW_HARPOON((port + hp_intstat), PHASE);
+ if ((RD_HARPOON(port + hp_scsisig) & S_SCSI_PHZ) == S_MSGI_PH) {
+
+ message = FPT_sfm(port, pCurrCard->currentSCCB);
+ if (message) {
+
+ if (message <= (0x80 | LUN_MASK)) {
+ lun = message & (unsigned char)LUN_MASK;
+
+ if ((currTar_Info->
+ TarStatus & TAR_TAG_Q_MASK) ==
+ TAG_Q_TRYING) {
+ if (currTar_Info->TarTagQ_Cnt !=
+ 0) {
+
+ if (!
+ (currTar_Info->
+ TarLUN_CA)) {
+ ACCEPT_MSG(port); /*Release the ACK for ID msg. */
+
+ message =
+ FPT_sfm
+ (port,
+ pCurrCard->
+ currentSCCB);
+ if (message) {
+ ACCEPT_MSG
+ (port);
+ }
+
+ else
+ message
+ = 0;
+
+ if (message !=
+ 0) {
+ tag =
+ FPT_sfm
+ (port,
+ pCurrCard->
+ currentSCCB);
+
+ if (!
+ (tag))
+ message
+ =
+ 0;
+ }
+
+ }
+ /*C.A. exists! */
+ }
+ /*End Q cnt != 0 */
+ }
+ /*End Tag cmds supported! */
+ }
+ /*End valid ID message. */
+ else {
+
+ ACCEPT_MSG_ATN(port);
+ }
+
+ }
+ /* End good id message. */
+ else {
+
+ message = 0;
+ }
+ } else {
+ ACCEPT_MSG_ATN(port);
+
+ while (!
+ (RDW_HARPOON((port + hp_intstat)) &
+ (PHASE | RESET))
+ && !(RD_HARPOON(port + hp_scsisig) & SCSI_REQ)
+ && (RD_HARPOON(port + hp_scsisig) & SCSI_BSY)) ;
+
+ return;
+ }
+
+ if (message == 0) {
+ msgRetryCount++;
+ if (msgRetryCount == 1) {
+ FPT_SendMsg(port, SMPARITY);
+ } else {
+ FPT_SendMsg(port, SMDEV_RESET);
+
+ FPT_sssyncv(port, our_target, NARROW_SCSI,
+ currTar_Info);
+
+ if (FPT_sccbMgrTbl[p_card][our_target].
+ TarEEValue & EE_SYNC_MASK) {
+
+ FPT_sccbMgrTbl[p_card][our_target].
+ TarStatus &= ~TAR_SYNC_MASK;
+
+ }
+
+ if (FPT_sccbMgrTbl[p_card][our_target].
+ TarEEValue & EE_WIDE_SCSI) {
+
+ FPT_sccbMgrTbl[p_card][our_target].
+ TarStatus &= ~TAR_WIDE_MASK;
+ }
+
+ FPT_queueFlushTargSccb(p_card, our_target,
+ SCCB_COMPLETE);
+ FPT_SccbMgrTableInitTarget(p_card, our_target);
+ return;
+ }
+ }
+ } while (message == 0);
+
+ if (((pCurrCard->globalFlags & F_CONLUN_IO) &&
+ ((currTar_Info->TarStatus & TAR_TAG_Q_MASK) != TAG_Q_TRYING))) {
+ currTar_Info->TarLUNBusy[lun] = 1;
+ pCurrCard->currentSCCB =
+ pCurrCard->discQ_Tbl[currTar_Info->LunDiscQ_Idx[lun]];
+ if (pCurrCard->currentSCCB != NULL) {
+ ACCEPT_MSG(port);
+ } else {
+ ACCEPT_MSG_ATN(port);
+ }
+ } else {
+ currTar_Info->TarLUNBusy[0] = 1;
+
+ if (tag) {
+ if (pCurrCard->discQ_Tbl[tag] != NULL) {
+ pCurrCard->currentSCCB =
+ pCurrCard->discQ_Tbl[tag];
+ currTar_Info->TarTagQ_Cnt--;
+ ACCEPT_MSG(port);
+ } else {
+ ACCEPT_MSG_ATN(port);
+ }
+ } else {
+ pCurrCard->currentSCCB =
+ pCurrCard->discQ_Tbl[currTar_Info->LunDiscQ_Idx[0]];
+ if (pCurrCard->currentSCCB != NULL) {
+ ACCEPT_MSG(port);
+ } else {
+ ACCEPT_MSG_ATN(port);
+ }
+ }
+ }
+
+ if (pCurrCard->currentSCCB != NULL) {
+ if (pCurrCard->currentSCCB->Sccb_scsistat == ABORT_ST) {
+ /* During Abort Tag command, the target could have got re-selected
+ and completed the command. Check the select Q and remove the CCB
+ if it is in the Select Q */
+ FPT_queueFindSccb(pCurrCard->currentSCCB, p_card);
+ }
+ }
+
+ while (!(RDW_HARPOON((port + hp_intstat)) & (PHASE | RESET)) &&
+ !(RD_HARPOON(port + hp_scsisig) & SCSI_REQ) &&
+ (RD_HARPOON(port + hp_scsisig) & SCSI_BSY)) ;
+}
+
+static void FPT_SendMsg(u32 port, unsigned char message)
+{
+ while (!(RD_HARPOON(port + hp_scsisig) & SCSI_REQ)) {
+ if (!(RD_HARPOON(port + hp_scsisig) & SCSI_BSY)) {
+
+ WRW_HARPOON((port + hp_intstat), PHASE);
+ return;
+ }
+ }
+
+ WRW_HARPOON((port + hp_intstat), PHASE);
+ if ((RD_HARPOON(port + hp_scsisig) & S_SCSI_PHZ) == S_MSGO_PH) {
+ WRW_HARPOON((port + hp_intstat),
+ (BUS_FREE | PHASE | XFER_CNT_0));
+
+ WR_HARPOON(port + hp_portctrl_0, SCSI_BUS_EN);
+
+ WR_HARPOON(port + hp_scsidata_0, message);
+
+ WR_HARPOON(port + hp_scsisig, (SCSI_ACK + S_ILL_PH));
+
+ ACCEPT_MSG(port);
+
+ WR_HARPOON(port + hp_portctrl_0, 0x00);
+
+ if ((message == SMABORT) || (message == SMDEV_RESET) ||
+ (message == SMABORT_TAG)) {
+ while (!
+ (RDW_HARPOON((port + hp_intstat)) &
+ (BUS_FREE | PHASE))) {
+ }
+
+ if (RDW_HARPOON((port + hp_intstat)) & BUS_FREE) {
+ WRW_HARPOON((port + hp_intstat), BUS_FREE);
+ }
+ }
+ }
+}
+
+/*---------------------------------------------------------------------
+ *
+ * Function: FPT_sdecm
+ *
+ * Description: Determine the proper response to the message from the
+ * target device.
+ *
+ *---------------------------------------------------------------------*/
+static void FPT_sdecm(unsigned char message, u32 port, unsigned char p_card)
+{
+ struct sccb *currSCCB;
+ struct sccb_card *CurrCard;
+ struct sccb_mgr_tar_info *currTar_Info;
+
+ CurrCard = &FPT_BL_Card[p_card];
+ currSCCB = CurrCard->currentSCCB;
+
+ currTar_Info = &FPT_sccbMgrTbl[p_card][currSCCB->TargID];
+
+ if (message == SMREST_DATA_PTR) {
+ if (!(currSCCB->Sccb_XferState & F_NO_DATA_YET)) {
+ currSCCB->Sccb_ATC = currSCCB->Sccb_savedATC;
+
+ FPT_hostDataXferRestart(currSCCB);
+ }
+
+ ACCEPT_MSG(port);
+ WR_HARPOON(port + hp_autostart_1,
+ (AUTO_IMMED + DISCONNECT_START));
+ }
+
+ else if (message == SMCMD_COMP) {
+
+ if (currSCCB->Sccb_scsistat == SELECT_Q_ST) {
+ currTar_Info->TarStatus &=
+ ~(unsigned char)TAR_TAG_Q_MASK;
+ currTar_Info->TarStatus |= (unsigned char)TAG_Q_REJECT;
+ }
+
+ ACCEPT_MSG(port);
+
+ }
+
+ else if ((message == SMNO_OP) || (message >= SMIDENT)
+ || (message == SMINIT_RECOVERY) || (message == SMREL_RECOVERY)) {
+
+ ACCEPT_MSG(port);
+ WR_HARPOON(port + hp_autostart_1,
+ (AUTO_IMMED + DISCONNECT_START));
+ }
+
+ else if (message == SMREJECT) {
+
+ if ((currSCCB->Sccb_scsistat == SELECT_SN_ST) ||
+ (currSCCB->Sccb_scsistat == SELECT_WN_ST) ||
+ ((currTar_Info->TarStatus & TAR_SYNC_MASK) == SYNC_TRYING)
+ || ((currTar_Info->TarStatus & TAR_TAG_Q_MASK) ==
+ TAG_Q_TRYING))
+ {
+ WRW_HARPOON((port + hp_intstat), BUS_FREE);
+
+ ACCEPT_MSG(port);
+
+ while ((!(RD_HARPOON(port + hp_scsisig) & SCSI_REQ)) &&
+ (!(RDW_HARPOON((port + hp_intstat)) & BUS_FREE)))
+ {
+ }
+
+ if (currSCCB->Lun == 0x00) {
+ if ((currSCCB->Sccb_scsistat == SELECT_SN_ST)) {
+
+ currTar_Info->TarStatus |=
+ (unsigned char)SYNC_SUPPORTED;
+
+ currTar_Info->TarEEValue &=
+ ~EE_SYNC_MASK;
+ }
+
+ else if ((currSCCB->Sccb_scsistat ==
+ SELECT_WN_ST)) {
+
+ currTar_Info->TarStatus =
+ (currTar_Info->
+ TarStatus & ~WIDE_ENABLED) |
+ WIDE_NEGOCIATED;
+
+ currTar_Info->TarEEValue &=
+ ~EE_WIDE_SCSI;
+
+ }
+
+ else if ((currTar_Info->
+ TarStatus & TAR_TAG_Q_MASK) ==
+ TAG_Q_TRYING) {
+ currTar_Info->TarStatus =
+ (currTar_Info->
+ TarStatus & ~(unsigned char)
+ TAR_TAG_Q_MASK) | TAG_Q_REJECT;
+
+ currSCCB->ControlByte &= ~F_USE_CMD_Q;
+ CurrCard->discQCount--;
+ CurrCard->discQ_Tbl[currSCCB->
+ Sccb_tag] = NULL;
+ currSCCB->Sccb_tag = 0x00;
+
+ }
+ }
+
+ if (RDW_HARPOON((port + hp_intstat)) & BUS_FREE) {
+
+ if (currSCCB->Lun == 0x00) {
+ WRW_HARPOON((port + hp_intstat),
+ BUS_FREE);
+ CurrCard->globalFlags |= F_NEW_SCCB_CMD;
+ }
+ }
+
+ else {
+
+ if ((CurrCard->globalFlags & F_CONLUN_IO) &&
+ ((currTar_Info->
+ TarStatus & TAR_TAG_Q_MASK) !=
+ TAG_Q_TRYING))
+ currTar_Info->TarLUNBusy[currSCCB->
+ Lun] = 1;
+ else
+ currTar_Info->TarLUNBusy[0] = 1;
+
+ currSCCB->ControlByte &=
+ ~(unsigned char)F_USE_CMD_Q;
+
+ WR_HARPOON(port + hp_autostart_1,
+ (AUTO_IMMED + DISCONNECT_START));
+
+ }
+ }
+
+ else {
+ ACCEPT_MSG(port);
+
+ while ((!(RD_HARPOON(port + hp_scsisig) & SCSI_REQ)) &&
+ (!(RDW_HARPOON((port + hp_intstat)) & BUS_FREE)))
+ {
+ }
+
+ if (!(RDW_HARPOON((port + hp_intstat)) & BUS_FREE)) {
+ WR_HARPOON(port + hp_autostart_1,
+ (AUTO_IMMED + DISCONNECT_START));
+ }
+ }
+ }
+
+ else if (message == SMEXT) {
+
+ ACCEPT_MSG(port);
+ FPT_shandem(port, p_card, currSCCB);
+ }
+
+ else if (message == SMIGNORWR) {
+
+ ACCEPT_MSG(port); /* ACK the RESIDUE MSG */
+
+ message = FPT_sfm(port, currSCCB);
+
+ if (currSCCB->Sccb_scsimsg != SMPARITY)
+ ACCEPT_MSG(port);
+ WR_HARPOON(port + hp_autostart_1,
+ (AUTO_IMMED + DISCONNECT_START));
+ }
+
+ else {
+
+ currSCCB->HostStatus = SCCB_PHASE_SEQUENCE_FAIL;
+ currSCCB->Sccb_scsimsg = SMREJECT;
+
+ ACCEPT_MSG_ATN(port);
+ WR_HARPOON(port + hp_autostart_1,
+ (AUTO_IMMED + DISCONNECT_START));
+ }
+}
+
+/*---------------------------------------------------------------------
+ *
+ * Function: FPT_shandem
+ *
+ * Description: Decide what to do with the extended message.
+ *
+ *---------------------------------------------------------------------*/
+static void FPT_shandem(u32 port, unsigned char p_card, struct sccb *pCurrSCCB)
+{
+ unsigned char length, message;
+
+ length = FPT_sfm(port, pCurrSCCB);
+ if (length) {
+
+ ACCEPT_MSG(port);
+ message = FPT_sfm(port, pCurrSCCB);
+ if (message) {
+
+ if (message == SMSYNC) {
+
+ if (length == 0x03) {
+
+ ACCEPT_MSG(port);
+ FPT_stsyncn(port, p_card);
+ } else {
+
+ pCurrSCCB->Sccb_scsimsg = SMREJECT;
+ ACCEPT_MSG_ATN(port);
+ }
+ } else if (message == SMWDTR) {
+
+ if (length == 0x02) {
+
+ ACCEPT_MSG(port);
+ FPT_stwidn(port, p_card);
+ } else {
+
+ pCurrSCCB->Sccb_scsimsg = SMREJECT;
+ ACCEPT_MSG_ATN(port);
+
+ WR_HARPOON(port + hp_autostart_1,
+ (AUTO_IMMED +
+ DISCONNECT_START));
+ }
+ } else {
+
+ pCurrSCCB->Sccb_scsimsg = SMREJECT;
+ ACCEPT_MSG_ATN(port);
+
+ WR_HARPOON(port + hp_autostart_1,
+ (AUTO_IMMED + DISCONNECT_START));
+ }
+ } else {
+ if (pCurrSCCB->Sccb_scsimsg != SMPARITY)
+ ACCEPT_MSG(port);
+ WR_HARPOON(port + hp_autostart_1,
+ (AUTO_IMMED + DISCONNECT_START));
+ }
+ } else {
+ if (pCurrSCCB->Sccb_scsimsg == SMPARITY)
+ WR_HARPOON(port + hp_autostart_1,
+ (AUTO_IMMED + DISCONNECT_START));
+ }
+}
+
+/*---------------------------------------------------------------------
+ *
+ * Function: FPT_sisyncn
+ *
+ * Description: Read in a message byte from the SCSI bus, and check
+ * for a parity error.
+ *
+ *---------------------------------------------------------------------*/
+
+static unsigned char FPT_sisyncn(u32 port, unsigned char p_card,
+ unsigned char syncFlag)
+{
+ struct sccb *currSCCB;
+ struct sccb_mgr_tar_info *currTar_Info;
+
+ currSCCB = FPT_BL_Card[p_card].currentSCCB;
+ currTar_Info = &FPT_sccbMgrTbl[p_card][currSCCB->TargID];
+
+ if (!((currTar_Info->TarStatus & TAR_SYNC_MASK) == SYNC_TRYING)) {
+
+ WRW_HARPOON((port + ID_MSG_STRT),
+ (MPM_OP + AMSG_OUT +
+ (currSCCB->
+ Sccb_idmsg & ~(unsigned char)DISC_PRIV)));
+
+ WRW_HARPOON((port + ID_MSG_STRT + 2), BRH_OP + ALWAYS + CMDPZ);
+
+ WRW_HARPOON((port + SYNC_MSGS + 0),
+ (MPM_OP + AMSG_OUT + SMEXT));
+ WRW_HARPOON((port + SYNC_MSGS + 2), (MPM_OP + AMSG_OUT + 0x03));
+ WRW_HARPOON((port + SYNC_MSGS + 4),
+ (MPM_OP + AMSG_OUT + SMSYNC));
+
+ if ((currTar_Info->TarEEValue & EE_SYNC_MASK) == EE_SYNC_20MB)
+
+ WRW_HARPOON((port + SYNC_MSGS + 6),
+ (MPM_OP + AMSG_OUT + 12));
+
+ else if ((currTar_Info->TarEEValue & EE_SYNC_MASK) ==
+ EE_SYNC_10MB)
+
+ WRW_HARPOON((port + SYNC_MSGS + 6),
+ (MPM_OP + AMSG_OUT + 25));
+
+ else if ((currTar_Info->TarEEValue & EE_SYNC_MASK) ==
+ EE_SYNC_5MB)
+
+ WRW_HARPOON((port + SYNC_MSGS + 6),
+ (MPM_OP + AMSG_OUT + 50));
+
+ else
+ WRW_HARPOON((port + SYNC_MSGS + 6),
+ (MPM_OP + AMSG_OUT + 00));
+
+ WRW_HARPOON((port + SYNC_MSGS + 8), (RAT_OP));
+ WRW_HARPOON((port + SYNC_MSGS + 10),
+ (MPM_OP + AMSG_OUT + DEFAULT_OFFSET));
+ WRW_HARPOON((port + SYNC_MSGS + 12), (BRH_OP + ALWAYS + NP));
+
+ if (syncFlag == 0) {
+ WR_HARPOON(port + hp_autostart_3,
+ (SELECT + SELCHK_STRT));
+ currTar_Info->TarStatus =
+ ((currTar_Info->
+ TarStatus & ~(unsigned char)TAR_SYNC_MASK) |
+ (unsigned char)SYNC_TRYING);
+ } else {
+ WR_HARPOON(port + hp_autostart_3,
+ (AUTO_IMMED + CMD_ONLY_STRT));
+ }
+
+ return 1;
+ }
+
+ else {
+
+ currTar_Info->TarStatus |= (unsigned char)SYNC_SUPPORTED;
+ currTar_Info->TarEEValue &= ~EE_SYNC_MASK;
+ return 0;
+ }
+}
+
+/*---------------------------------------------------------------------
+ *
+ * Function: FPT_stsyncn
+ *
+ * Description: The has sent us a Sync Nego message so handle it as
+ * necessary.
+ *
+ *---------------------------------------------------------------------*/
+static void FPT_stsyncn(u32 port, unsigned char p_card)
+{
+ unsigned char sync_msg, offset, sync_reg, our_sync_msg;
+ struct sccb *currSCCB;
+ struct sccb_mgr_tar_info *currTar_Info;
+
+ currSCCB = FPT_BL_Card[p_card].currentSCCB;
+ currTar_Info = &FPT_sccbMgrTbl[p_card][currSCCB->TargID];
+
+ sync_msg = FPT_sfm(port, currSCCB);
+
+ if ((sync_msg == 0x00) && (currSCCB->Sccb_scsimsg == SMPARITY)) {
+ WR_HARPOON(port + hp_autostart_1,
+ (AUTO_IMMED + DISCONNECT_START));
+ return;
+ }
+
+ ACCEPT_MSG(port);
+
+ offset = FPT_sfm(port, currSCCB);
+
+ if ((offset == 0x00) && (currSCCB->Sccb_scsimsg == SMPARITY)) {
+ WR_HARPOON(port + hp_autostart_1,
+ (AUTO_IMMED + DISCONNECT_START));
+ return;
+ }
+
+ if ((currTar_Info->TarEEValue & EE_SYNC_MASK) == EE_SYNC_20MB)
+
+ our_sync_msg = 12; /* Setup our Message to 20mb/s */
+
+ else if ((currTar_Info->TarEEValue & EE_SYNC_MASK) == EE_SYNC_10MB)
+
+ our_sync_msg = 25; /* Setup our Message to 10mb/s */
+
+ else if ((currTar_Info->TarEEValue & EE_SYNC_MASK) == EE_SYNC_5MB)
+
+ our_sync_msg = 50; /* Setup our Message to 5mb/s */
+ else
+
+ our_sync_msg = 0; /* Message = Async */
+
+ if (sync_msg < our_sync_msg) {
+ sync_msg = our_sync_msg; /*if faster, then set to max. */
+ }
+
+ if (offset == ASYNC)
+ sync_msg = ASYNC;
+
+ if (offset > MAX_OFFSET)
+ offset = MAX_OFFSET;
+
+ sync_reg = 0x00;
+
+ if (sync_msg > 12)
+
+ sync_reg = 0x20; /* Use 10MB/s */
+
+ if (sync_msg > 25)
+
+ sync_reg = 0x40; /* Use 6.6MB/s */
+
+ if (sync_msg > 38)
+
+ sync_reg = 0x60; /* Use 5MB/s */
+
+ if (sync_msg > 50)
+
+ sync_reg = 0x80; /* Use 4MB/s */
+
+ if (sync_msg > 62)
+
+ sync_reg = 0xA0; /* Use 3.33MB/s */
+
+ if (sync_msg > 75)
+
+ sync_reg = 0xC0; /* Use 2.85MB/s */
+
+ if (sync_msg > 87)
+
+ sync_reg = 0xE0; /* Use 2.5MB/s */
+
+ if (sync_msg > 100) {
+
+ sync_reg = 0x00; /* Use ASYNC */
+ offset = 0x00;
+ }
+
+ if (currTar_Info->TarStatus & WIDE_ENABLED)
+
+ sync_reg |= offset;
+
+ else
+
+ sync_reg |= (offset | NARROW_SCSI);
+
+ FPT_sssyncv(port, currSCCB->TargID, sync_reg, currTar_Info);
+
+ if (currSCCB->Sccb_scsistat == SELECT_SN_ST) {
+
+ ACCEPT_MSG(port);
+
+ currTar_Info->TarStatus = ((currTar_Info->TarStatus &
+ ~(unsigned char)TAR_SYNC_MASK) |
+ (unsigned char)SYNC_SUPPORTED);
+
+ WR_HARPOON(port + hp_autostart_1,
+ (AUTO_IMMED + DISCONNECT_START));
+ }
+
+ else {
+
+ ACCEPT_MSG_ATN(port);
+
+ FPT_sisyncr(port, sync_msg, offset);
+
+ currTar_Info->TarStatus = ((currTar_Info->TarStatus &
+ ~(unsigned char)TAR_SYNC_MASK) |
+ (unsigned char)SYNC_SUPPORTED);
+ }
+}
+
+/*---------------------------------------------------------------------
+ *
+ * Function: FPT_sisyncr
+ *
+ * Description: Answer the targets sync message.
+ *
+ *---------------------------------------------------------------------*/
+static void FPT_sisyncr(u32 port, unsigned char sync_pulse,
+ unsigned char offset)
+{
+ ARAM_ACCESS(port);
+ WRW_HARPOON((port + SYNC_MSGS + 0), (MPM_OP + AMSG_OUT + SMEXT));
+ WRW_HARPOON((port + SYNC_MSGS + 2), (MPM_OP + AMSG_OUT + 0x03));
+ WRW_HARPOON((port + SYNC_MSGS + 4), (MPM_OP + AMSG_OUT + SMSYNC));
+ WRW_HARPOON((port + SYNC_MSGS + 6), (MPM_OP + AMSG_OUT + sync_pulse));
+ WRW_HARPOON((port + SYNC_MSGS + 8), (RAT_OP));
+ WRW_HARPOON((port + SYNC_MSGS + 10), (MPM_OP + AMSG_OUT + offset));
+ WRW_HARPOON((port + SYNC_MSGS + 12), (BRH_OP + ALWAYS + NP));
+ SGRAM_ACCESS(port);
+
+ WR_HARPOON(port + hp_portctrl_0, SCSI_PORT);
+ WRW_HARPOON((port + hp_intstat), CLR_ALL_INT_1);
+
+ WR_HARPOON(port + hp_autostart_3, (AUTO_IMMED + CMD_ONLY_STRT));
+
+ while (!(RDW_HARPOON((port + hp_intstat)) & (BUS_FREE | AUTO_INT))) {
+ }
+}
+
+/*---------------------------------------------------------------------
+ *
+ * Function: FPT_siwidn
+ *
+ * Description: Read in a message byte from the SCSI bus, and check
+ * for a parity error.
+ *
+ *---------------------------------------------------------------------*/
+
+static unsigned char FPT_siwidn(u32 port, unsigned char p_card)
+{
+ struct sccb *currSCCB;
+ struct sccb_mgr_tar_info *currTar_Info;
+
+ currSCCB = FPT_BL_Card[p_card].currentSCCB;
+ currTar_Info = &FPT_sccbMgrTbl[p_card][currSCCB->TargID];
+
+ if (!((currTar_Info->TarStatus & TAR_WIDE_MASK) == WIDE_NEGOCIATED)) {
+
+ WRW_HARPOON((port + ID_MSG_STRT),
+ (MPM_OP + AMSG_OUT +
+ (currSCCB->
+ Sccb_idmsg & ~(unsigned char)DISC_PRIV)));
+
+ WRW_HARPOON((port + ID_MSG_STRT + 2), BRH_OP + ALWAYS + CMDPZ);
+
+ WRW_HARPOON((port + SYNC_MSGS + 0),
+ (MPM_OP + AMSG_OUT + SMEXT));
+ WRW_HARPOON((port + SYNC_MSGS + 2), (MPM_OP + AMSG_OUT + 0x02));
+ WRW_HARPOON((port + SYNC_MSGS + 4),
+ (MPM_OP + AMSG_OUT + SMWDTR));
+ WRW_HARPOON((port + SYNC_MSGS + 6), (RAT_OP));
+ WRW_HARPOON((port + SYNC_MSGS + 8),
+ (MPM_OP + AMSG_OUT + SM16BIT));
+ WRW_HARPOON((port + SYNC_MSGS + 10), (BRH_OP + ALWAYS + NP));
+
+ WR_HARPOON(port + hp_autostart_3, (SELECT + SELCHK_STRT));
+
+ currTar_Info->TarStatus = ((currTar_Info->TarStatus &
+ ~(unsigned char)TAR_WIDE_MASK) |
+ (unsigned char)WIDE_ENABLED);
+
+ return 1;
+ }
+
+ else {
+
+ currTar_Info->TarStatus = ((currTar_Info->TarStatus &
+ ~(unsigned char)TAR_WIDE_MASK) |
+ WIDE_NEGOCIATED);
+
+ currTar_Info->TarEEValue &= ~EE_WIDE_SCSI;
+ return 0;
+ }
+}
+
+/*---------------------------------------------------------------------
+ *
+ * Function: FPT_stwidn
+ *
+ * Description: The has sent us a Wide Nego message so handle it as
+ * necessary.
+ *
+ *---------------------------------------------------------------------*/
+static void FPT_stwidn(u32 port, unsigned char p_card)
+{
+ unsigned char width;
+ struct sccb *currSCCB;
+ struct sccb_mgr_tar_info *currTar_Info;
+
+ currSCCB = FPT_BL_Card[p_card].currentSCCB;
+ currTar_Info = &FPT_sccbMgrTbl[p_card][currSCCB->TargID];
+
+ width = FPT_sfm(port, currSCCB);
+
+ if ((width == 0x00) && (currSCCB->Sccb_scsimsg == SMPARITY)) {
+ WR_HARPOON(port + hp_autostart_1,
+ (AUTO_IMMED + DISCONNECT_START));
+ return;
+ }
+
+ if (!(currTar_Info->TarEEValue & EE_WIDE_SCSI))
+ width = 0;
+
+ if (width) {
+ currTar_Info->TarStatus |= WIDE_ENABLED;
+ width = 0;
+ } else {
+ width = NARROW_SCSI;
+ currTar_Info->TarStatus &= ~WIDE_ENABLED;
+ }
+
+ FPT_sssyncv(port, currSCCB->TargID, width, currTar_Info);
+
+ if (currSCCB->Sccb_scsistat == SELECT_WN_ST) {
+
+ currTar_Info->TarStatus |= WIDE_NEGOCIATED;
+
+ if (!
+ ((currTar_Info->TarStatus & TAR_SYNC_MASK) ==
+ SYNC_SUPPORTED)) {
+ ACCEPT_MSG_ATN(port);
+ ARAM_ACCESS(port);
+ FPT_sisyncn(port, p_card, 1);
+ currSCCB->Sccb_scsistat = SELECT_SN_ST;
+ SGRAM_ACCESS(port);
+ } else {
+ ACCEPT_MSG(port);
+ WR_HARPOON(port + hp_autostart_1,
+ (AUTO_IMMED + DISCONNECT_START));
+ }
+ }
+
+ else {
+
+ ACCEPT_MSG_ATN(port);
+
+ if (currTar_Info->TarEEValue & EE_WIDE_SCSI)
+ width = SM16BIT;
+ else
+ width = SM8BIT;
+
+ FPT_siwidr(port, width);
+
+ currTar_Info->TarStatus |= (WIDE_NEGOCIATED | WIDE_ENABLED);
+ }
+}
+
+/*---------------------------------------------------------------------
+ *
+ * Function: FPT_siwidr
+ *
+ * Description: Answer the targets Wide nego message.
+ *
+ *---------------------------------------------------------------------*/
+static void FPT_siwidr(u32 port, unsigned char width)
+{
+ ARAM_ACCESS(port);
+ WRW_HARPOON((port + SYNC_MSGS + 0), (MPM_OP + AMSG_OUT + SMEXT));
+ WRW_HARPOON((port + SYNC_MSGS + 2), (MPM_OP + AMSG_OUT + 0x02));
+ WRW_HARPOON((port + SYNC_MSGS + 4), (MPM_OP + AMSG_OUT + SMWDTR));
+ WRW_HARPOON((port + SYNC_MSGS + 6), (RAT_OP));
+ WRW_HARPOON((port + SYNC_MSGS + 8), (MPM_OP + AMSG_OUT + width));
+ WRW_HARPOON((port + SYNC_MSGS + 10), (BRH_OP + ALWAYS + NP));
+ SGRAM_ACCESS(port);
+
+ WR_HARPOON(port + hp_portctrl_0, SCSI_PORT);
+ WRW_HARPOON((port + hp_intstat), CLR_ALL_INT_1);
+
+ WR_HARPOON(port + hp_autostart_3, (AUTO_IMMED + CMD_ONLY_STRT));
+
+ while (!(RDW_HARPOON((port + hp_intstat)) & (BUS_FREE | AUTO_INT))) {
+ }
+}
+
+/*---------------------------------------------------------------------
+ *
+ * Function: FPT_sssyncv
+ *
+ * Description: Write the desired value to the Sync Register for the
+ * ID specified.
+ *
+ *---------------------------------------------------------------------*/
+static void FPT_sssyncv(u32 p_port, unsigned char p_id,
+ unsigned char p_sync_value,
+ struct sccb_mgr_tar_info *currTar_Info)
+{
+ unsigned char index;
+
+ index = p_id;
+
+ switch (index) {
+
+ case 0:
+ index = 12; /* hp_synctarg_0 */
+ break;
+ case 1:
+ index = 13; /* hp_synctarg_1 */
+ break;
+ case 2:
+ index = 14; /* hp_synctarg_2 */
+ break;
+ case 3:
+ index = 15; /* hp_synctarg_3 */
+ break;
+ case 4:
+ index = 8; /* hp_synctarg_4 */
+ break;
+ case 5:
+ index = 9; /* hp_synctarg_5 */
+ break;
+ case 6:
+ index = 10; /* hp_synctarg_6 */
+ break;
+ case 7:
+ index = 11; /* hp_synctarg_7 */
+ break;
+ case 8:
+ index = 4; /* hp_synctarg_8 */
+ break;
+ case 9:
+ index = 5; /* hp_synctarg_9 */
+ break;
+ case 10:
+ index = 6; /* hp_synctarg_10 */
+ break;
+ case 11:
+ index = 7; /* hp_synctarg_11 */
+ break;
+ case 12:
+ index = 0; /* hp_synctarg_12 */
+ break;
+ case 13:
+ index = 1; /* hp_synctarg_13 */
+ break;
+ case 14:
+ index = 2; /* hp_synctarg_14 */
+ break;
+ case 15:
+ index = 3; /* hp_synctarg_15 */
+
+ }
+
+ WR_HARPOON(p_port + hp_synctarg_base + index, p_sync_value);
+
+ currTar_Info->TarSyncCtrl = p_sync_value;
+}
+
+/*---------------------------------------------------------------------
+ *
+ * Function: FPT_sresb
+ *
+ * Description: Reset the desired card's SCSI bus.
+ *
+ *---------------------------------------------------------------------*/
+static void FPT_sresb(u32 port, unsigned char p_card)
+{
+ unsigned char scsiID, i;
+
+ struct sccb_mgr_tar_info *currTar_Info;
+
+ WR_HARPOON(port + hp_page_ctrl,
+ (RD_HARPOON(port + hp_page_ctrl) | G_INT_DISABLE));
+ WRW_HARPOON((port + hp_intstat), CLR_ALL_INT);
+
+ WR_HARPOON(port + hp_scsictrl_0, SCSI_RST);
+
+ scsiID = RD_HARPOON(port + hp_seltimeout);
+ WR_HARPOON(port + hp_seltimeout, TO_5ms);
+ WRW_HARPOON((port + hp_intstat), TIMEOUT);
+
+ WR_HARPOON(port + hp_portctrl_0, (SCSI_PORT | START_TO));
+
+ while (!(RDW_HARPOON((port + hp_intstat)) & TIMEOUT)) {
+ }
+
+ WR_HARPOON(port + hp_seltimeout, scsiID);
+
+ WR_HARPOON(port + hp_scsictrl_0, ENA_SCAM_SEL);
+
+ FPT_Wait(port, TO_5ms);
+
+ WRW_HARPOON((port + hp_intstat), CLR_ALL_INT);
+
+ WR_HARPOON(port + hp_int_mask, (RD_HARPOON(port + hp_int_mask) | 0x00));
+
+ for (scsiID = 0; scsiID < MAX_SCSI_TAR; scsiID++) {
+ currTar_Info = &FPT_sccbMgrTbl[p_card][scsiID];
+
+ if (currTar_Info->TarEEValue & EE_SYNC_MASK) {
+ currTar_Info->TarSyncCtrl = 0;
+ currTar_Info->TarStatus &= ~TAR_SYNC_MASK;
+ }
+
+ if (currTar_Info->TarEEValue & EE_WIDE_SCSI) {
+ currTar_Info->TarStatus &= ~TAR_WIDE_MASK;
+ }
+
+ FPT_sssyncv(port, scsiID, NARROW_SCSI, currTar_Info);
+
+ FPT_SccbMgrTableInitTarget(p_card, scsiID);
+ }
+
+ FPT_BL_Card[p_card].scanIndex = 0x00;
+ FPT_BL_Card[p_card].currentSCCB = NULL;
+ FPT_BL_Card[p_card].globalFlags &= ~(F_TAG_STARTED | F_HOST_XFER_ACT
+ | F_NEW_SCCB_CMD);
+ FPT_BL_Card[p_card].cmdCounter = 0x00;
+ FPT_BL_Card[p_card].discQCount = 0x00;
+ FPT_BL_Card[p_card].tagQ_Lst = 0x01;
+
+ for (i = 0; i < QUEUE_DEPTH; i++)
+ FPT_BL_Card[p_card].discQ_Tbl[i] = NULL;
+
+ WR_HARPOON(port + hp_page_ctrl,
+ (RD_HARPOON(port + hp_page_ctrl) & ~G_INT_DISABLE));
+
+}
+
+/*---------------------------------------------------------------------
+ *
+ * Function: FPT_ssenss
+ *
+ * Description: Setup for the Auto Sense command.
+ *
+ *---------------------------------------------------------------------*/
+static void FPT_ssenss(struct sccb_card *pCurrCard)
+{
+ unsigned char i;
+ struct sccb *currSCCB;
+
+ currSCCB = pCurrCard->currentSCCB;
+
+ currSCCB->Save_CdbLen = currSCCB->CdbLength;
+
+ for (i = 0; i < 6; i++) {
+
+ currSCCB->Save_Cdb[i] = currSCCB->Cdb[i];
+ }
+
+ currSCCB->CdbLength = SIX_BYTE_CMD;
+ currSCCB->Cdb[0] = SCSI_REQUEST_SENSE;
+ currSCCB->Cdb[1] = currSCCB->Cdb[1] & (unsigned char)0xE0; /*Keep LUN. */
+ currSCCB->Cdb[2] = 0x00;
+ currSCCB->Cdb[3] = 0x00;
+ currSCCB->Cdb[4] = currSCCB->RequestSenseLength;
+ currSCCB->Cdb[5] = 0x00;
+
+ currSCCB->Sccb_XferCnt = (u32)currSCCB->RequestSenseLength;
+
+ currSCCB->Sccb_ATC = 0x00;
+
+ currSCCB->Sccb_XferState |= F_AUTO_SENSE;
+
+ currSCCB->Sccb_XferState &= ~F_SG_XFER;
+
+ currSCCB->Sccb_idmsg = currSCCB->Sccb_idmsg & ~(unsigned char)DISC_PRIV;
+
+ currSCCB->ControlByte = 0x00;
+
+ currSCCB->Sccb_MGRFlags &= F_STATUSLOADED;
+}
+
+/*---------------------------------------------------------------------
+ *
+ * Function: FPT_sxfrp
+ *
+ * Description: Transfer data into the bit bucket until the device
+ * decides to switch phase.
+ *
+ *---------------------------------------------------------------------*/
+
+static void FPT_sxfrp(u32 p_port, unsigned char p_card)
+{
+ unsigned char curr_phz;
+
+ DISABLE_AUTO(p_port);
+
+ if (FPT_BL_Card[p_card].globalFlags & F_HOST_XFER_ACT) {
+
+ FPT_hostDataXferAbort(p_port, p_card,
+ FPT_BL_Card[p_card].currentSCCB);
+
+ }
+
+ /* If the Automation handled the end of the transfer then do not
+ match the phase or we will get out of sync with the ISR. */
+
+ if (RDW_HARPOON((p_port + hp_intstat)) &
+ (BUS_FREE | XFER_CNT_0 | AUTO_INT))
+ return;
+
+ WR_HARPOON(p_port + hp_xfercnt_0, 0x00);
+
+ curr_phz = RD_HARPOON(p_port + hp_scsisig) & (unsigned char)S_SCSI_PHZ;
+
+ WRW_HARPOON((p_port + hp_intstat), XFER_CNT_0);
+
+ WR_HARPOON(p_port + hp_scsisig, curr_phz);
+
+ while (!(RDW_HARPOON((p_port + hp_intstat)) & (BUS_FREE | RESET)) &&
+ (curr_phz ==
+ (RD_HARPOON(p_port + hp_scsisig) & (unsigned char)S_SCSI_PHZ)))
+ {
+ if (curr_phz & (unsigned char)SCSI_IOBIT) {
+ WR_HARPOON(p_port + hp_portctrl_0,
+ (SCSI_PORT | HOST_PORT | SCSI_INBIT));
+
+ if (!(RD_HARPOON(p_port + hp_xferstat) & FIFO_EMPTY)) {
+ RD_HARPOON(p_port + hp_fifodata_0);
+ }
+ } else {
+ WR_HARPOON(p_port + hp_portctrl_0,
+ (SCSI_PORT | HOST_PORT | HOST_WRT));
+ if (RD_HARPOON(p_port + hp_xferstat) & FIFO_EMPTY) {
+ WR_HARPOON(p_port + hp_fifodata_0, 0xFA);
+ }
+ }
+ } /* End of While loop for padding data I/O phase */
+
+ while (!(RDW_HARPOON((p_port + hp_intstat)) & (BUS_FREE | RESET))) {
+ if (RD_HARPOON(p_port + hp_scsisig) & SCSI_REQ)
+ break;
+ }
+
+ WR_HARPOON(p_port + hp_portctrl_0,
+ (SCSI_PORT | HOST_PORT | SCSI_INBIT));
+ while (!(RD_HARPOON(p_port + hp_xferstat) & FIFO_EMPTY)) {
+ RD_HARPOON(p_port + hp_fifodata_0);
+ }
+
+ if (!(RDW_HARPOON((p_port + hp_intstat)) & (BUS_FREE | RESET))) {
+ WR_HARPOON(p_port + hp_autostart_0,
+ (AUTO_IMMED + DISCONNECT_START));
+ while (!(RDW_HARPOON((p_port + hp_intstat)) & AUTO_INT)) {
+ }
+
+ if (RDW_HARPOON((p_port + hp_intstat)) &
+ (ICMD_COMP | ITAR_DISC))
+ while (!
+ (RDW_HARPOON((p_port + hp_intstat)) &
+ (BUS_FREE | RSEL))) ;
+ }
+}
+
+/*---------------------------------------------------------------------
+ *
+ * Function: FPT_schkdd
+ *
+ * Description: Make sure data has been flushed from both FIFOs and abort
+ * the operations if necessary.
+ *
+ *---------------------------------------------------------------------*/
+
+static void FPT_schkdd(u32 port, unsigned char p_card)
+{
+ unsigned short TimeOutLoop;
+ unsigned char sPhase;
+
+ struct sccb *currSCCB;
+
+ currSCCB = FPT_BL_Card[p_card].currentSCCB;
+
+ if ((currSCCB->Sccb_scsistat != DATA_OUT_ST) &&
+ (currSCCB->Sccb_scsistat != DATA_IN_ST)) {
+ return;
+ }
+
+ if (currSCCB->Sccb_XferState & F_ODD_BALL_CNT) {
+
+ currSCCB->Sccb_ATC += (currSCCB->Sccb_XferCnt - 1);
+
+ currSCCB->Sccb_XferCnt = 1;
+
+ currSCCB->Sccb_XferState &= ~F_ODD_BALL_CNT;
+ WRW_HARPOON((port + hp_fiforead), (unsigned short)0x00);
+ WR_HARPOON(port + hp_xferstat, 0x00);
+ }
+
+ else {
+
+ currSCCB->Sccb_ATC += currSCCB->Sccb_XferCnt;
+
+ currSCCB->Sccb_XferCnt = 0;
+ }
+
+ if ((RDW_HARPOON((port + hp_intstat)) & PARITY) &&
+ (currSCCB->HostStatus == SCCB_COMPLETE)) {
+
+ currSCCB->HostStatus = SCCB_PARITY_ERR;
+ WRW_HARPOON((port + hp_intstat), PARITY);
+ }
+
+ FPT_hostDataXferAbort(port, p_card, currSCCB);
+
+ while (RD_HARPOON(port + hp_scsisig) & SCSI_ACK) {
+ }
+
+ TimeOutLoop = 0;
+
+ while (RD_HARPOON(port + hp_xferstat) & FIFO_EMPTY) {
+ if (RDW_HARPOON((port + hp_intstat)) & BUS_FREE) {
+ return;
+ }
+ if (RD_HARPOON(port + hp_offsetctr) & (unsigned char)0x1F) {
+ break;
+ }
+ if (RDW_HARPOON((port + hp_intstat)) & RESET) {
+ return;
+ }
+ if ((RD_HARPOON(port + hp_scsisig) & SCSI_REQ)
+ || (TimeOutLoop++ > 0x3000))
+ break;
+ }
+
+ sPhase = RD_HARPOON(port + hp_scsisig) & (SCSI_BSY | S_SCSI_PHZ);
+ if ((!(RD_HARPOON(port + hp_xferstat) & FIFO_EMPTY)) ||
+ (RD_HARPOON(port + hp_offsetctr) & (unsigned char)0x1F) ||
+ (sPhase == (SCSI_BSY | S_DATAO_PH)) ||
+ (sPhase == (SCSI_BSY | S_DATAI_PH))) {
+
+ WR_HARPOON(port + hp_portctrl_0, SCSI_PORT);
+
+ if (!(currSCCB->Sccb_XferState & F_ALL_XFERRED)) {
+ if (currSCCB->Sccb_XferState & F_HOST_XFER_DIR) {
+ FPT_phaseDataIn(port, p_card);
+ }
+
+ else {
+ FPT_phaseDataOut(port, p_card);
+ }
+ } else {
+ FPT_sxfrp(port, p_card);
+ if (!(RDW_HARPOON((port + hp_intstat)) &
+ (BUS_FREE | ICMD_COMP | ITAR_DISC | RESET))) {
+ WRW_HARPOON((port + hp_intstat), AUTO_INT);
+ FPT_phaseDecode(port, p_card);
+ }
+ }
+
+ }
+
+ else {
+ WR_HARPOON(port + hp_portctrl_0, 0x00);
+ }
+}
+
+/*---------------------------------------------------------------------
+ *
+ * Function: FPT_sinits
+ *
+ * Description: Setup SCCB manager fields in this SCCB.
+ *
+ *---------------------------------------------------------------------*/
+
+static void FPT_sinits(struct sccb *p_sccb, unsigned char p_card)
+{
+ struct sccb_mgr_tar_info *currTar_Info;
+
+ if ((p_sccb->TargID >= MAX_SCSI_TAR) || (p_sccb->Lun >= MAX_LUN)) {
+ return;
+ }
+ currTar_Info = &FPT_sccbMgrTbl[p_card][p_sccb->TargID];
+
+ p_sccb->Sccb_XferState = 0x00;
+ p_sccb->Sccb_XferCnt = p_sccb->DataLength;
+
+ if ((p_sccb->OperationCode == SCATTER_GATHER_COMMAND) ||
+ (p_sccb->OperationCode == RESIDUAL_SG_COMMAND)) {
+
+ p_sccb->Sccb_SGoffset = 0;
+ p_sccb->Sccb_XferState = F_SG_XFER;
+ p_sccb->Sccb_XferCnt = 0x00;
+ }
+
+ if (p_sccb->DataLength == 0x00)
+
+ p_sccb->Sccb_XferState |= F_ALL_XFERRED;
+
+ if (p_sccb->ControlByte & F_USE_CMD_Q) {
+ if ((currTar_Info->TarStatus & TAR_TAG_Q_MASK) == TAG_Q_REJECT)
+ p_sccb->ControlByte &= ~F_USE_CMD_Q;
+
+ else
+ currTar_Info->TarStatus |= TAG_Q_TRYING;
+ }
+
+/* For !single SCSI device in system & device allow Disconnect
+ or command is tag_q type then send Cmd with Disconnect Enable
+ else send Cmd with Disconnect Disable */
+
+/*
+ if (((!(FPT_BL_Card[p_card].globalFlags & F_SINGLE_DEVICE)) &&
+ (currTar_Info->TarStatus & TAR_ALLOW_DISC)) ||
+ (currTar_Info->TarStatus & TAG_Q_TRYING)) {
+*/
+ if ((currTar_Info->TarStatus & TAR_ALLOW_DISC) ||
+ (currTar_Info->TarStatus & TAG_Q_TRYING)) {
+ p_sccb->Sccb_idmsg =
+ (unsigned char)(SMIDENT | DISC_PRIV) | p_sccb->Lun;
+ }
+
+ else {
+
+ p_sccb->Sccb_idmsg = (unsigned char)SMIDENT | p_sccb->Lun;
+ }
+
+ p_sccb->HostStatus = 0x00;
+ p_sccb->TargetStatus = 0x00;
+ p_sccb->Sccb_tag = 0x00;
+ p_sccb->Sccb_MGRFlags = 0x00;
+ p_sccb->Sccb_sgseg = 0x00;
+ p_sccb->Sccb_ATC = 0x00;
+ p_sccb->Sccb_savedATC = 0x00;
+/*
+ p_sccb->SccbVirtDataPtr = 0x00;
+ p_sccb->Sccb_forwardlink = NULL;
+ p_sccb->Sccb_backlink = NULL;
+ */
+ p_sccb->Sccb_scsistat = BUS_FREE_ST;
+ p_sccb->SccbStatus = SCCB_IN_PROCESS;
+ p_sccb->Sccb_scsimsg = SMNO_OP;
+
+}
+
+/*---------------------------------------------------------------------
+ *
+ * Function: Phase Decode
+ *
+ * Description: Determine the phase and call the appropriate function.
+ *
+ *---------------------------------------------------------------------*/
+
+static void FPT_phaseDecode(u32 p_port, unsigned char p_card)
+{
+ unsigned char phase_ref;
+ void (*phase) (u32, unsigned char);
+
+ DISABLE_AUTO(p_port);
+
+ phase_ref =
+ (unsigned char)(RD_HARPOON(p_port + hp_scsisig) & S_SCSI_PHZ);
+
+ phase = FPT_s_PhaseTbl[phase_ref];
+
+ (*phase) (p_port, p_card); /* Call the correct phase func */
+}
+
+/*---------------------------------------------------------------------
+ *
+ * Function: Data Out Phase
+ *
+ * Description: Start up both the BusMaster and Xbow.
+ *
+ *---------------------------------------------------------------------*/
+
+static void FPT_phaseDataOut(u32 port, unsigned char p_card)
+{
+
+ struct sccb *currSCCB;
+
+ currSCCB = FPT_BL_Card[p_card].currentSCCB;
+ if (currSCCB == NULL) {
+ return; /* Exit if No SCCB record */
+ }
+
+ currSCCB->Sccb_scsistat = DATA_OUT_ST;
+ currSCCB->Sccb_XferState &= ~(F_HOST_XFER_DIR | F_NO_DATA_YET);
+
+ WR_HARPOON(port + hp_portctrl_0, SCSI_PORT);
+
+ WRW_HARPOON((port + hp_intstat), XFER_CNT_0);
+
+ WR_HARPOON(port + hp_autostart_0, (END_DATA + END_DATA_START));
+
+ FPT_dataXferProcessor(port, &FPT_BL_Card[p_card]);
+
+ if (currSCCB->Sccb_XferCnt == 0) {
+
+ if ((currSCCB->ControlByte & SCCB_DATA_XFER_OUT) &&
+ (currSCCB->HostStatus == SCCB_COMPLETE))
+ currSCCB->HostStatus = SCCB_DATA_OVER_RUN;
+
+ FPT_sxfrp(port, p_card);
+ if (!(RDW_HARPOON((port + hp_intstat)) & (BUS_FREE | RESET)))
+ FPT_phaseDecode(port, p_card);
+ }
+}
+
+/*---------------------------------------------------------------------
+ *
+ * Function: Data In Phase
+ *
+ * Description: Startup the BusMaster and the XBOW.
+ *
+ *---------------------------------------------------------------------*/
+
+static void FPT_phaseDataIn(u32 port, unsigned char p_card)
+{
+
+ struct sccb *currSCCB;
+
+ currSCCB = FPT_BL_Card[p_card].currentSCCB;
+
+ if (currSCCB == NULL) {
+ return; /* Exit if No SCCB record */
+ }
+
+ currSCCB->Sccb_scsistat = DATA_IN_ST;
+ currSCCB->Sccb_XferState |= F_HOST_XFER_DIR;
+ currSCCB->Sccb_XferState &= ~F_NO_DATA_YET;
+
+ WR_HARPOON(port + hp_portctrl_0, SCSI_PORT);
+
+ WRW_HARPOON((port + hp_intstat), XFER_CNT_0);
+
+ WR_HARPOON(port + hp_autostart_0, (END_DATA + END_DATA_START));
+
+ FPT_dataXferProcessor(port, &FPT_BL_Card[p_card]);
+
+ if (currSCCB->Sccb_XferCnt == 0) {
+
+ if ((currSCCB->ControlByte & SCCB_DATA_XFER_IN) &&
+ (currSCCB->HostStatus == SCCB_COMPLETE))
+ currSCCB->HostStatus = SCCB_DATA_OVER_RUN;
+
+ FPT_sxfrp(port, p_card);
+ if (!(RDW_HARPOON((port + hp_intstat)) & (BUS_FREE | RESET)))
+ FPT_phaseDecode(port, p_card);
+
+ }
+}
+
+/*---------------------------------------------------------------------
+ *
+ * Function: Command Phase
+ *
+ * Description: Load the CDB into the automation and start it up.
+ *
+ *---------------------------------------------------------------------*/
+
+static void FPT_phaseCommand(u32 p_port, unsigned char p_card)
+{
+ struct sccb *currSCCB;
+ u32 cdb_reg;
+ unsigned char i;
+
+ currSCCB = FPT_BL_Card[p_card].currentSCCB;
+
+ if (currSCCB->OperationCode == RESET_COMMAND) {
+
+ currSCCB->HostStatus = SCCB_PHASE_SEQUENCE_FAIL;
+ currSCCB->CdbLength = SIX_BYTE_CMD;
+ }
+
+ WR_HARPOON(p_port + hp_scsisig, 0x00);
+
+ ARAM_ACCESS(p_port);
+
+ cdb_reg = p_port + CMD_STRT;
+
+ for (i = 0; i < currSCCB->CdbLength; i++) {
+
+ if (currSCCB->OperationCode == RESET_COMMAND)
+
+ WRW_HARPOON(cdb_reg, (MPM_OP + ACOMMAND + 0x00));
+
+ else
+ WRW_HARPOON(cdb_reg,
+ (MPM_OP + ACOMMAND + currSCCB->Cdb[i]));
+ cdb_reg += 2;
+ }
+
+ if (currSCCB->CdbLength != TWELVE_BYTE_CMD)
+ WRW_HARPOON(cdb_reg, (BRH_OP + ALWAYS + NP));
+
+ WR_HARPOON(p_port + hp_portctrl_0, (SCSI_PORT));
+
+ currSCCB->Sccb_scsistat = COMMAND_ST;
+
+ WR_HARPOON(p_port + hp_autostart_3, (AUTO_IMMED | CMD_ONLY_STRT));
+ SGRAM_ACCESS(p_port);
+}
+
+/*---------------------------------------------------------------------
+ *
+ * Function: Status phase
+ *
+ * Description: Bring in the status and command complete message bytes
+ *
+ *---------------------------------------------------------------------*/
+
+static void FPT_phaseStatus(u32 port, unsigned char p_card)
+{
+ /* Start-up the automation to finish off this command and let the
+ isr handle the interrupt for command complete when it comes in.
+ We could wait here for the interrupt to be generated?
+ */
+
+ WR_HARPOON(port + hp_scsisig, 0x00);
+
+ WR_HARPOON(port + hp_autostart_0, (AUTO_IMMED + END_DATA_START));
+}
+
+/*---------------------------------------------------------------------
+ *
+ * Function: Phase Message Out
+ *
+ * Description: Send out our message (if we have one) and handle whatever
+ * else is involed.
+ *
+ *---------------------------------------------------------------------*/
+
+static void FPT_phaseMsgOut(u32 port, unsigned char p_card)
+{
+ unsigned char message, scsiID;
+ struct sccb *currSCCB;
+ struct sccb_mgr_tar_info *currTar_Info;
+
+ currSCCB = FPT_BL_Card[p_card].currentSCCB;
+
+ if (currSCCB != NULL) {
+
+ message = currSCCB->Sccb_scsimsg;
+ scsiID = currSCCB->TargID;
+
+ if (message == SMDEV_RESET) {
+
+ currTar_Info = &FPT_sccbMgrTbl[p_card][scsiID];
+ currTar_Info->TarSyncCtrl = 0;
+ FPT_sssyncv(port, scsiID, NARROW_SCSI, currTar_Info);
+
+ if (FPT_sccbMgrTbl[p_card][scsiID].
+ TarEEValue & EE_SYNC_MASK) {
+
+ FPT_sccbMgrTbl[p_card][scsiID].TarStatus &=
+ ~TAR_SYNC_MASK;
+
+ }
+
+ if (FPT_sccbMgrTbl[p_card][scsiID].
+ TarEEValue & EE_WIDE_SCSI) {
+
+ FPT_sccbMgrTbl[p_card][scsiID].TarStatus &=
+ ~TAR_WIDE_MASK;
+ }
+
+ FPT_queueFlushSccb(p_card, SCCB_COMPLETE);
+ FPT_SccbMgrTableInitTarget(p_card, scsiID);
+ } else if (currSCCB->Sccb_scsistat == ABORT_ST) {
+ currSCCB->HostStatus = SCCB_COMPLETE;
+ if (FPT_BL_Card[p_card].discQ_Tbl[currSCCB->Sccb_tag] !=
+ NULL) {
+ FPT_BL_Card[p_card].discQ_Tbl[currSCCB->
+ Sccb_tag] = NULL;
+ FPT_sccbMgrTbl[p_card][scsiID].TarTagQ_Cnt--;
+ }
+
+ }
+
+ else if (currSCCB->Sccb_scsistat < COMMAND_ST) {
+
+ if (message == SMNO_OP) {
+ currSCCB->Sccb_MGRFlags |= F_DEV_SELECTED;
+
+ FPT_ssel(port, p_card);
+ return;
+ }
+ } else {
+
+ if (message == SMABORT)
+
+ FPT_queueFlushSccb(p_card, SCCB_COMPLETE);
+ }
+
+ } else {
+ message = SMABORT;
+ }
+
+ WRW_HARPOON((port + hp_intstat), (BUS_FREE | PHASE | XFER_CNT_0));
+
+ WR_HARPOON(port + hp_portctrl_0, SCSI_BUS_EN);
+
+ WR_HARPOON(port + hp_scsidata_0, message);
+
+ WR_HARPOON(port + hp_scsisig, (SCSI_ACK + S_ILL_PH));
+
+ ACCEPT_MSG(port);
+
+ WR_HARPOON(port + hp_portctrl_0, 0x00);
+
+ if ((message == SMABORT) || (message == SMDEV_RESET) ||
+ (message == SMABORT_TAG)) {
+
+ while (!(RDW_HARPOON((port + hp_intstat)) & (BUS_FREE | PHASE))) {
+ }
+
+ if (RDW_HARPOON((port + hp_intstat)) & BUS_FREE) {
+ WRW_HARPOON((port + hp_intstat), BUS_FREE);
+
+ if (currSCCB != NULL) {
+
+ if ((FPT_BL_Card[p_card].
+ globalFlags & F_CONLUN_IO)
+ &&
+ ((FPT_sccbMgrTbl[p_card][currSCCB->TargID].
+ TarStatus & TAR_TAG_Q_MASK) !=
+ TAG_Q_TRYING))
+ FPT_sccbMgrTbl[p_card][currSCCB->
+ TargID].
+ TarLUNBusy[currSCCB->Lun] = 0;
+ else
+ FPT_sccbMgrTbl[p_card][currSCCB->
+ TargID].
+ TarLUNBusy[0] = 0;
+
+ FPT_queueCmdComplete(&FPT_BL_Card[p_card],
+ currSCCB, p_card);
+ }
+
+ else {
+ FPT_BL_Card[p_card].globalFlags |=
+ F_NEW_SCCB_CMD;
+ }
+ }
+
+ else {
+
+ FPT_sxfrp(port, p_card);
+ }
+ }
+
+ else {
+
+ if (message == SMPARITY) {
+ currSCCB->Sccb_scsimsg = SMNO_OP;
+ WR_HARPOON(port + hp_autostart_1,
+ (AUTO_IMMED + DISCONNECT_START));
+ } else {
+ FPT_sxfrp(port, p_card);
+ }
+ }
+}
+
+/*---------------------------------------------------------------------
+ *
+ * Function: Message In phase
+ *
+ * Description: Bring in the message and determine what to do with it.
+ *
+ *---------------------------------------------------------------------*/
+
+static void FPT_phaseMsgIn(u32 port, unsigned char p_card)
+{
+ unsigned char message;
+ struct sccb *currSCCB;
+
+ currSCCB = FPT_BL_Card[p_card].currentSCCB;
+
+ if (FPT_BL_Card[p_card].globalFlags & F_HOST_XFER_ACT) {
+
+ FPT_phaseChkFifo(port, p_card);
+ }
+
+ message = RD_HARPOON(port + hp_scsidata_0);
+ if ((message == SMDISC) || (message == SMSAVE_DATA_PTR)) {
+
+ WR_HARPOON(port + hp_autostart_1,
+ (AUTO_IMMED + END_DATA_START));
+
+ }
+
+ else {
+
+ message = FPT_sfm(port, currSCCB);
+ if (message) {
+
+ FPT_sdecm(message, port, p_card);
+
+ } else {
+ if (currSCCB->Sccb_scsimsg != SMPARITY)
+ ACCEPT_MSG(port);
+ WR_HARPOON(port + hp_autostart_1,
+ (AUTO_IMMED + DISCONNECT_START));
+ }
+ }
+
+}
+
+/*---------------------------------------------------------------------
+ *
+ * Function: Illegal phase
+ *
+ * Description: Target switched to some illegal phase, so all we can do
+ * is report an error back to the host (if that is possible)
+ * and send an ABORT message to the misbehaving target.
+ *
+ *---------------------------------------------------------------------*/
+
+static void FPT_phaseIllegal(u32 port, unsigned char p_card)
+{
+ struct sccb *currSCCB;
+
+ currSCCB = FPT_BL_Card[p_card].currentSCCB;
+
+ WR_HARPOON(port + hp_scsisig, RD_HARPOON(port + hp_scsisig));
+ if (currSCCB != NULL) {
+
+ currSCCB->HostStatus = SCCB_PHASE_SEQUENCE_FAIL;
+ currSCCB->Sccb_scsistat = ABORT_ST;
+ currSCCB->Sccb_scsimsg = SMABORT;
+ }
+
+ ACCEPT_MSG_ATN(port);
+}
+
+/*---------------------------------------------------------------------
+ *
+ * Function: Phase Check FIFO
+ *
+ * Description: Make sure data has been flushed from both FIFOs and abort
+ * the operations if necessary.
+ *
+ *---------------------------------------------------------------------*/
+
+static void FPT_phaseChkFifo(u32 port, unsigned char p_card)
+{
+ u32 xfercnt;
+ struct sccb *currSCCB;
+
+ currSCCB = FPT_BL_Card[p_card].currentSCCB;
+
+ if (currSCCB->Sccb_scsistat == DATA_IN_ST) {
+
+ while ((!(RD_HARPOON(port + hp_xferstat) & FIFO_EMPTY)) &&
+ (RD_HARPOON(port + hp_ext_status) & BM_CMD_BUSY)) {
+ }
+
+ if (!(RD_HARPOON(port + hp_xferstat) & FIFO_EMPTY)) {
+ currSCCB->Sccb_ATC += currSCCB->Sccb_XferCnt;
+
+ currSCCB->Sccb_XferCnt = 0;
+
+ if ((RDW_HARPOON((port + hp_intstat)) & PARITY) &&
+ (currSCCB->HostStatus == SCCB_COMPLETE)) {
+ currSCCB->HostStatus = SCCB_PARITY_ERR;
+ WRW_HARPOON((port + hp_intstat), PARITY);
+ }
+
+ FPT_hostDataXferAbort(port, p_card, currSCCB);
+
+ FPT_dataXferProcessor(port, &FPT_BL_Card[p_card]);
+
+ while ((!(RD_HARPOON(port + hp_xferstat) & FIFO_EMPTY))
+ && (RD_HARPOON(port + hp_ext_status) &
+ BM_CMD_BUSY)) {
+ }
+
+ }
+ }
+
+ /*End Data In specific code. */
+ GET_XFER_CNT(port, xfercnt);
+
+ WR_HARPOON(port + hp_xfercnt_0, 0x00);
+
+ WR_HARPOON(port + hp_portctrl_0, 0x00);
+
+ currSCCB->Sccb_ATC += (currSCCB->Sccb_XferCnt - xfercnt);
+
+ currSCCB->Sccb_XferCnt = xfercnt;
+
+ if ((RDW_HARPOON((port + hp_intstat)) & PARITY) &&
+ (currSCCB->HostStatus == SCCB_COMPLETE)) {
+
+ currSCCB->HostStatus = SCCB_PARITY_ERR;
+ WRW_HARPOON((port + hp_intstat), PARITY);
+ }
+
+ FPT_hostDataXferAbort(port, p_card, currSCCB);
+
+ WR_HARPOON(port + hp_fifowrite, 0x00);
+ WR_HARPOON(port + hp_fiforead, 0x00);
+ WR_HARPOON(port + hp_xferstat, 0x00);
+
+ WRW_HARPOON((port + hp_intstat), XFER_CNT_0);
+}
+
+/*---------------------------------------------------------------------
+ *
+ * Function: Phase Bus Free
+ *
+ * Description: We just went bus free so figure out if it was
+ * because of command complete or from a disconnect.
+ *
+ *---------------------------------------------------------------------*/
+static void FPT_phaseBusFree(u32 port, unsigned char p_card)
+{
+ struct sccb *currSCCB;
+
+ currSCCB = FPT_BL_Card[p_card].currentSCCB;
+
+ if (currSCCB != NULL) {
+
+ DISABLE_AUTO(port);
+
+ if (currSCCB->OperationCode == RESET_COMMAND) {
+
+ if ((FPT_BL_Card[p_card].globalFlags & F_CONLUN_IO) &&
+ ((FPT_sccbMgrTbl[p_card][currSCCB->TargID].
+ TarStatus & TAR_TAG_Q_MASK) != TAG_Q_TRYING))
+ FPT_sccbMgrTbl[p_card][currSCCB->TargID].
+ TarLUNBusy[currSCCB->Lun] = 0;
+ else
+ FPT_sccbMgrTbl[p_card][currSCCB->TargID].
+ TarLUNBusy[0] = 0;
+
+ FPT_queueCmdComplete(&FPT_BL_Card[p_card], currSCCB,
+ p_card);
+
+ FPT_queueSearchSelect(&FPT_BL_Card[p_card], p_card);
+
+ }
+
+ else if (currSCCB->Sccb_scsistat == SELECT_SN_ST) {
+ FPT_sccbMgrTbl[p_card][currSCCB->TargID].TarStatus |=
+ (unsigned char)SYNC_SUPPORTED;
+ FPT_sccbMgrTbl[p_card][currSCCB->TargID].TarEEValue &=
+ ~EE_SYNC_MASK;
+ }
+
+ else if (currSCCB->Sccb_scsistat == SELECT_WN_ST) {
+ FPT_sccbMgrTbl[p_card][currSCCB->TargID].TarStatus =
+ (FPT_sccbMgrTbl[p_card][currSCCB->TargID].
+ TarStatus & ~WIDE_ENABLED) | WIDE_NEGOCIATED;
+
+ FPT_sccbMgrTbl[p_card][currSCCB->TargID].TarEEValue &=
+ ~EE_WIDE_SCSI;
+ }
+
+ else if (currSCCB->Sccb_scsistat == SELECT_Q_ST) {
+ /* Make sure this is not a phony BUS_FREE. If we were
+ reselected or if BUSY is NOT on then this is a
+ valid BUS FREE. SRR Wednesday, 5/10/1995. */
+
+ if ((!(RD_HARPOON(port + hp_scsisig) & SCSI_BSY)) ||
+ (RDW_HARPOON((port + hp_intstat)) & RSEL)) {
+ FPT_sccbMgrTbl[p_card][currSCCB->TargID].
+ TarStatus &= ~TAR_TAG_Q_MASK;
+ FPT_sccbMgrTbl[p_card][currSCCB->TargID].
+ TarStatus |= TAG_Q_REJECT;
+ }
+
+ else {
+ return;
+ }
+ }
+
+ else {
+
+ currSCCB->Sccb_scsistat = BUS_FREE_ST;
+
+ if (!currSCCB->HostStatus) {
+ currSCCB->HostStatus = SCCB_PHASE_SEQUENCE_FAIL;
+ }
+
+ if ((FPT_BL_Card[p_card].globalFlags & F_CONLUN_IO) &&
+ ((FPT_sccbMgrTbl[p_card][currSCCB->TargID].
+ TarStatus & TAR_TAG_Q_MASK) != TAG_Q_TRYING))
+ FPT_sccbMgrTbl[p_card][currSCCB->TargID].
+ TarLUNBusy[currSCCB->Lun] = 0;
+ else
+ FPT_sccbMgrTbl[p_card][currSCCB->TargID].
+ TarLUNBusy[0] = 0;
+
+ FPT_queueCmdComplete(&FPT_BL_Card[p_card], currSCCB,
+ p_card);
+ return;
+ }
+
+ FPT_BL_Card[p_card].globalFlags |= F_NEW_SCCB_CMD;
+
+ } /*end if !=null */
+}
+
+/*---------------------------------------------------------------------
+ *
+ * Function: Auto Load Default Map
+ *
+ * Description: Load the Automation RAM with the defualt map values.
+ *
+ *---------------------------------------------------------------------*/
+static void FPT_autoLoadDefaultMap(u32 p_port)
+{
+ u32 map_addr;
+
+ ARAM_ACCESS(p_port);
+ map_addr = p_port + hp_aramBase;
+
+ WRW_HARPOON(map_addr, (MPM_OP + AMSG_OUT + 0xC0)); /*ID MESSAGE */
+ map_addr += 2;
+ WRW_HARPOON(map_addr, (MPM_OP + AMSG_OUT + 0x20)); /*SIMPLE TAG QUEUEING MSG */
+ map_addr += 2;
+ WRW_HARPOON(map_addr, RAT_OP); /*RESET ATTENTION */
+ map_addr += 2;
+ WRW_HARPOON(map_addr, (MPM_OP + AMSG_OUT + 0x00)); /*TAG ID MSG */
+ map_addr += 2;
+ WRW_HARPOON(map_addr, (MPM_OP + ACOMMAND + 0x00)); /*CDB BYTE 0 */
+ map_addr += 2;
+ WRW_HARPOON(map_addr, (MPM_OP + ACOMMAND + 0x00)); /*CDB BYTE 1 */
+ map_addr += 2;
+ WRW_HARPOON(map_addr, (MPM_OP + ACOMMAND + 0x00)); /*CDB BYTE 2 */
+ map_addr += 2;
+ WRW_HARPOON(map_addr, (MPM_OP + ACOMMAND + 0x00)); /*CDB BYTE 3 */
+ map_addr += 2;
+ WRW_HARPOON(map_addr, (MPM_OP + ACOMMAND + 0x00)); /*CDB BYTE 4 */
+ map_addr += 2;
+ WRW_HARPOON(map_addr, (MPM_OP + ACOMMAND + 0x00)); /*CDB BYTE 5 */
+ map_addr += 2;
+ WRW_HARPOON(map_addr, (MPM_OP + ACOMMAND + 0x00)); /*CDB BYTE 6 */
+ map_addr += 2;
+ WRW_HARPOON(map_addr, (MPM_OP + ACOMMAND + 0x00)); /*CDB BYTE 7 */
+ map_addr += 2;
+ WRW_HARPOON(map_addr, (MPM_OP + ACOMMAND + 0x00)); /*CDB BYTE 8 */
+ map_addr += 2;
+ WRW_HARPOON(map_addr, (MPM_OP + ACOMMAND + 0x00)); /*CDB BYTE 9 */
+ map_addr += 2;
+ WRW_HARPOON(map_addr, (MPM_OP + ACOMMAND + 0x00)); /*CDB BYTE 10 */
+ map_addr += 2;
+ WRW_HARPOON(map_addr, (MPM_OP + ACOMMAND + 0x00)); /*CDB BYTE 11 */
+ map_addr += 2;
+ WRW_HARPOON(map_addr, (CPE_OP + ADATA_OUT + DINT)); /*JUMP IF DATA OUT */
+ map_addr += 2;
+ WRW_HARPOON(map_addr, (TCB_OP + FIFO_0 + DI)); /*JUMP IF NO DATA IN FIFO */
+ map_addr += 2; /*This means AYNC DATA IN */
+ WRW_HARPOON(map_addr, (SSI_OP + SSI_IDO_STRT)); /*STOP AND INTERRUPT */
+ map_addr += 2;
+ WRW_HARPOON(map_addr, (CPE_OP + ADATA_IN + DINT)); /*JUMP IF NOT DATA IN PHZ */
+ map_addr += 2;
+ WRW_HARPOON(map_addr, (CPN_OP + AMSG_IN + ST)); /*IF NOT MSG IN CHECK 4 DATA IN */
+ map_addr += 2;
+ WRW_HARPOON(map_addr, (CRD_OP + SDATA + 0x02)); /*SAVE DATA PTR MSG? */
+ map_addr += 2;
+ WRW_HARPOON(map_addr, (BRH_OP + NOT_EQ + DC)); /*GO CHECK FOR DISCONNECT MSG */
+ map_addr += 2;
+ WRW_HARPOON(map_addr, (MRR_OP + SDATA + D_AR1)); /*SAVE DATA PTRS MSG */
+ map_addr += 2;
+ WRW_HARPOON(map_addr, (CPN_OP + AMSG_IN + ST)); /*IF NOT MSG IN CHECK DATA IN */
+ map_addr += 2;
+ WRW_HARPOON(map_addr, (CRD_OP + SDATA + 0x04)); /*DISCONNECT MSG? */
+ map_addr += 2;
+ WRW_HARPOON(map_addr, (BRH_OP + NOT_EQ + UNKNWN)); /*UKNKNOWN MSG */
+ map_addr += 2;
+ WRW_HARPOON(map_addr, (MRR_OP + SDATA + D_BUCKET)); /*XFER DISCONNECT MSG */
+ map_addr += 2;
+ WRW_HARPOON(map_addr, (SSI_OP + SSI_ITAR_DISC)); /*STOP AND INTERRUPT */
+ map_addr += 2;
+ WRW_HARPOON(map_addr, (CPN_OP + ASTATUS + UNKNWN)); /*JUMP IF NOT STATUS PHZ. */
+ map_addr += 2;
+ WRW_HARPOON(map_addr, (MRR_OP + SDATA + D_AR0)); /*GET STATUS BYTE */
+ map_addr += 2;
+ WRW_HARPOON(map_addr, (CPN_OP + AMSG_IN + CC)); /*ERROR IF NOT MSG IN PHZ */
+ map_addr += 2;
+ WRW_HARPOON(map_addr, (CRD_OP + SDATA + 0x00)); /*CHECK FOR CMD COMPLETE MSG. */
+ map_addr += 2;
+ WRW_HARPOON(map_addr, (BRH_OP + NOT_EQ + CC)); /*ERROR IF NOT CMD COMPLETE MSG. */
+ map_addr += 2;
+ WRW_HARPOON(map_addr, (MRR_OP + SDATA + D_BUCKET)); /*GET CMD COMPLETE MSG */
+ map_addr += 2;
+ WRW_HARPOON(map_addr, (SSI_OP + SSI_ICMD_COMP)); /*END OF COMMAND */
+ map_addr += 2;
+
+ WRW_HARPOON(map_addr, (SSI_OP + SSI_IUNKWN)); /*RECEIVED UNKNOWN MSG BYTE */
+ map_addr += 2;
+ WRW_HARPOON(map_addr, (SSI_OP + SSI_INO_CC)); /*NO COMMAND COMPLETE AFTER STATUS */
+ map_addr += 2;
+ WRW_HARPOON(map_addr, (SSI_OP + SSI_ITICKLE)); /*BIOS Tickled the Mgr */
+ map_addr += 2;
+ WRW_HARPOON(map_addr, (SSI_OP + SSI_IRFAIL)); /*EXPECTED ID/TAG MESSAGES AND */
+ map_addr += 2; /* DIDN'T GET ONE */
+ WRW_HARPOON(map_addr, (CRR_OP + AR3 + S_IDREG)); /* comp SCSI SEL ID & AR3 */
+ map_addr += 2;
+ WRW_HARPOON(map_addr, (BRH_OP + EQUAL + 0x00)); /*SEL ID OK then Conti. */
+ map_addr += 2;
+ WRW_HARPOON(map_addr, (SSI_OP + SSI_INO_CC)); /*NO COMMAND COMPLETE AFTER STATUS */
+
+ SGRAM_ACCESS(p_port);
+}
+
+/*---------------------------------------------------------------------
+ *
+ * Function: Auto Command Complete
+ *
+ * Description: Post command back to host and find another command
+ * to execute.
+ *
+ *---------------------------------------------------------------------*/
+
+static void FPT_autoCmdCmplt(u32 p_port, unsigned char p_card)
+{
+ struct sccb *currSCCB;
+ unsigned char status_byte;
+
+ currSCCB = FPT_BL_Card[p_card].currentSCCB;
+
+ status_byte = RD_HARPOON(p_port + hp_gp_reg_0);
+
+ FPT_sccbMgrTbl[p_card][currSCCB->TargID].TarLUN_CA = 0;
+
+ if (status_byte != SSGOOD) {
+
+ if (status_byte == SSQ_FULL) {
+
+ if (((FPT_BL_Card[p_card].globalFlags & F_CONLUN_IO) &&
+ ((FPT_sccbMgrTbl[p_card][currSCCB->TargID].
+ TarStatus & TAR_TAG_Q_MASK) != TAG_Q_TRYING))) {
+ FPT_sccbMgrTbl[p_card][currSCCB->TargID].
+ TarLUNBusy[currSCCB->Lun] = 1;
+ if (FPT_BL_Card[p_card].discQCount != 0)
+ FPT_BL_Card[p_card].discQCount--;
+ FPT_BL_Card[p_card].
+ discQ_Tbl[FPT_sccbMgrTbl[p_card]
+ [currSCCB->TargID].
+ LunDiscQ_Idx[currSCCB->Lun]] =
+ NULL;
+ } else {
+ FPT_sccbMgrTbl[p_card][currSCCB->TargID].
+ TarLUNBusy[0] = 1;
+ if (currSCCB->Sccb_tag) {
+ if (FPT_BL_Card[p_card].discQCount != 0)
+ FPT_BL_Card[p_card].
+ discQCount--;
+ FPT_BL_Card[p_card].discQ_Tbl[currSCCB->
+ Sccb_tag]
+ = NULL;
+ } else {
+ if (FPT_BL_Card[p_card].discQCount != 0)
+ FPT_BL_Card[p_card].
+ discQCount--;
+ FPT_BL_Card[p_card].
+ discQ_Tbl[FPT_sccbMgrTbl[p_card]
+ [currSCCB->TargID].
+ LunDiscQ_Idx[0]] = NULL;
+ }
+ }
+
+ currSCCB->Sccb_MGRFlags |= F_STATUSLOADED;
+
+ FPT_queueSelectFail(&FPT_BL_Card[p_card], p_card);
+
+ return;
+ }
+
+ if (currSCCB->Sccb_scsistat == SELECT_SN_ST) {
+ FPT_sccbMgrTbl[p_card][currSCCB->TargID].TarStatus |=
+ (unsigned char)SYNC_SUPPORTED;
+
+ FPT_sccbMgrTbl[p_card][currSCCB->TargID].TarEEValue &=
+ ~EE_SYNC_MASK;
+ FPT_BL_Card[p_card].globalFlags |= F_NEW_SCCB_CMD;
+
+ if (((FPT_BL_Card[p_card].globalFlags & F_CONLUN_IO) &&
+ ((FPT_sccbMgrTbl[p_card][currSCCB->TargID].
+ TarStatus & TAR_TAG_Q_MASK) != TAG_Q_TRYING))) {
+ FPT_sccbMgrTbl[p_card][currSCCB->TargID].
+ TarLUNBusy[currSCCB->Lun] = 1;
+ if (FPT_BL_Card[p_card].discQCount != 0)
+ FPT_BL_Card[p_card].discQCount--;
+ FPT_BL_Card[p_card].
+ discQ_Tbl[FPT_sccbMgrTbl[p_card]
+ [currSCCB->TargID].
+ LunDiscQ_Idx[currSCCB->Lun]] =
+ NULL;
+ } else {
+ FPT_sccbMgrTbl[p_card][currSCCB->TargID].
+ TarLUNBusy[0] = 1;
+ if (currSCCB->Sccb_tag) {
+ if (FPT_BL_Card[p_card].discQCount != 0)
+ FPT_BL_Card[p_card].
+ discQCount--;
+ FPT_BL_Card[p_card].discQ_Tbl[currSCCB->
+ Sccb_tag]
+ = NULL;
+ } else {
+ if (FPT_BL_Card[p_card].discQCount != 0)
+ FPT_BL_Card[p_card].
+ discQCount--;
+ FPT_BL_Card[p_card].
+ discQ_Tbl[FPT_sccbMgrTbl[p_card]
+ [currSCCB->TargID].
+ LunDiscQ_Idx[0]] = NULL;
+ }
+ }
+ return;
+
+ }
+
+ if (currSCCB->Sccb_scsistat == SELECT_WN_ST) {
+
+ FPT_sccbMgrTbl[p_card][currSCCB->TargID].TarStatus =
+ (FPT_sccbMgrTbl[p_card][currSCCB->TargID].
+ TarStatus & ~WIDE_ENABLED) | WIDE_NEGOCIATED;
+
+ FPT_sccbMgrTbl[p_card][currSCCB->TargID].TarEEValue &=
+ ~EE_WIDE_SCSI;
+ FPT_BL_Card[p_card].globalFlags |= F_NEW_SCCB_CMD;
+
+ if (((FPT_BL_Card[p_card].globalFlags & F_CONLUN_IO) &&
+ ((FPT_sccbMgrTbl[p_card][currSCCB->TargID].
+ TarStatus & TAR_TAG_Q_MASK) != TAG_Q_TRYING))) {
+ FPT_sccbMgrTbl[p_card][currSCCB->TargID].
+ TarLUNBusy[currSCCB->Lun] = 1;
+ if (FPT_BL_Card[p_card].discQCount != 0)
+ FPT_BL_Card[p_card].discQCount--;
+ FPT_BL_Card[p_card].
+ discQ_Tbl[FPT_sccbMgrTbl[p_card]
+ [currSCCB->TargID].
+ LunDiscQ_Idx[currSCCB->Lun]] =
+ NULL;
+ } else {
+ FPT_sccbMgrTbl[p_card][currSCCB->TargID].
+ TarLUNBusy[0] = 1;
+ if (currSCCB->Sccb_tag) {
+ if (FPT_BL_Card[p_card].discQCount != 0)
+ FPT_BL_Card[p_card].
+ discQCount--;
+ FPT_BL_Card[p_card].discQ_Tbl[currSCCB->
+ Sccb_tag]
+ = NULL;
+ } else {
+ if (FPT_BL_Card[p_card].discQCount != 0)
+ FPT_BL_Card[p_card].
+ discQCount--;
+ FPT_BL_Card[p_card].
+ discQ_Tbl[FPT_sccbMgrTbl[p_card]
+ [currSCCB->TargID].
+ LunDiscQ_Idx[0]] = NULL;
+ }
+ }
+ return;
+
+ }
+
+ if (status_byte == SSCHECK) {
+ if (FPT_BL_Card[p_card].globalFlags & F_DO_RENEGO) {
+ if (FPT_sccbMgrTbl[p_card][currSCCB->TargID].
+ TarEEValue & EE_SYNC_MASK) {
+ FPT_sccbMgrTbl[p_card][currSCCB->
+ TargID].
+ TarStatus &= ~TAR_SYNC_MASK;
+ }
+ if (FPT_sccbMgrTbl[p_card][currSCCB->TargID].
+ TarEEValue & EE_WIDE_SCSI) {
+ FPT_sccbMgrTbl[p_card][currSCCB->
+ TargID].
+ TarStatus &= ~TAR_WIDE_MASK;
+ }
+ }
+ }
+
+ if (!(currSCCB->Sccb_XferState & F_AUTO_SENSE)) {
+
+ currSCCB->SccbStatus = SCCB_ERROR;
+ currSCCB->TargetStatus = status_byte;
+
+ if (status_byte == SSCHECK) {
+
+ FPT_sccbMgrTbl[p_card][currSCCB->TargID].
+ TarLUN_CA = 1;
+
+ if (currSCCB->RequestSenseLength !=
+ NO_AUTO_REQUEST_SENSE) {
+
+ if (currSCCB->RequestSenseLength == 0)
+ currSCCB->RequestSenseLength =
+ 14;
+
+ FPT_ssenss(&FPT_BL_Card[p_card]);
+ FPT_BL_Card[p_card].globalFlags |=
+ F_NEW_SCCB_CMD;
+
+ if (((FPT_BL_Card[p_card].
+ globalFlags & F_CONLUN_IO)
+ &&
+ ((FPT_sccbMgrTbl[p_card]
+ [currSCCB->TargID].
+ TarStatus & TAR_TAG_Q_MASK) !=
+ TAG_Q_TRYING))) {
+ FPT_sccbMgrTbl[p_card]
+ [currSCCB->TargID].
+ TarLUNBusy[currSCCB->Lun] =
+ 1;
+ if (FPT_BL_Card[p_card].
+ discQCount != 0)
+ FPT_BL_Card[p_card].
+ discQCount--;
+ FPT_BL_Card[p_card].
+ discQ_Tbl[FPT_sccbMgrTbl
+ [p_card]
+ [currSCCB->
+ TargID].
+ LunDiscQ_Idx
+ [currSCCB->Lun]] =
+ NULL;
+ } else {
+ FPT_sccbMgrTbl[p_card]
+ [currSCCB->TargID].
+ TarLUNBusy[0] = 1;
+ if (currSCCB->Sccb_tag) {
+ if (FPT_BL_Card[p_card].
+ discQCount != 0)
+ FPT_BL_Card
+ [p_card].
+ discQCount--;
+ FPT_BL_Card[p_card].
+ discQ_Tbl[currSCCB->
+ Sccb_tag]
+ = NULL;
+ } else {
+ if (FPT_BL_Card[p_card].
+ discQCount != 0)
+ FPT_BL_Card
+ [p_card].
+ discQCount--;
+ FPT_BL_Card[p_card].
+ discQ_Tbl
+ [FPT_sccbMgrTbl
+ [p_card][currSCCB->
+ TargID].
+ LunDiscQ_Idx[0]] =
+ NULL;
+ }
+ }
+ return;
+ }
+ }
+ }
+ }
+
+ if ((FPT_BL_Card[p_card].globalFlags & F_CONLUN_IO) &&
+ ((FPT_sccbMgrTbl[p_card][currSCCB->TargID].
+ TarStatus & TAR_TAG_Q_MASK) != TAG_Q_TRYING))
+ FPT_sccbMgrTbl[p_card][currSCCB->TargID].TarLUNBusy[currSCCB->
+ Lun] = 0;
+ else
+ FPT_sccbMgrTbl[p_card][currSCCB->TargID].TarLUNBusy[0] = 0;
+
+ FPT_queueCmdComplete(&FPT_BL_Card[p_card], currSCCB, p_card);
+}
+
+#define SHORT_WAIT 0x0000000F
+#define LONG_WAIT 0x0000FFFFL
+
+/*---------------------------------------------------------------------
+ *
+ * Function: Data Transfer Processor
+ *
+ * Description: This routine performs two tasks.
+ * (1) Start data transfer by calling HOST_DATA_XFER_START
+ * function. Once data transfer is started, (2) Depends
+ * on the type of data transfer mode Scatter/Gather mode
+ * or NON Scatter/Gather mode. In NON Scatter/Gather mode,
+ * this routine checks Sccb_MGRFlag (F_HOST_XFER_ACT bit) for
+ * data transfer done. In Scatter/Gather mode, this routine
+ * checks bus master command complete and dual rank busy
+ * bit to keep chaining SC transfer command. Similarly,
+ * in Scatter/Gather mode, it checks Sccb_MGRFlag
+ * (F_HOST_XFER_ACT bit) for data transfer done.
+ *
+ *---------------------------------------------------------------------*/
+
+static void FPT_dataXferProcessor(u32 port, struct sccb_card *pCurrCard)
+{
+ struct sccb *currSCCB;
+
+ currSCCB = pCurrCard->currentSCCB;
+
+ if (currSCCB->Sccb_XferState & F_SG_XFER) {
+ if (pCurrCard->globalFlags & F_HOST_XFER_ACT)
+ {
+ currSCCB->Sccb_sgseg += (unsigned char)SG_BUF_CNT;
+ currSCCB->Sccb_SGoffset = 0x00;
+ }
+ pCurrCard->globalFlags |= F_HOST_XFER_ACT;
+
+ FPT_busMstrSGDataXferStart(port, currSCCB);
+ }
+
+ else {
+ if (!(pCurrCard->globalFlags & F_HOST_XFER_ACT)) {
+ pCurrCard->globalFlags |= F_HOST_XFER_ACT;
+
+ FPT_busMstrDataXferStart(port, currSCCB);
+ }
+ }
+}
+
+/*---------------------------------------------------------------------
+ *
+ * Function: BusMaster Scatter Gather Data Transfer Start
+ *
+ * Description:
+ *
+ *---------------------------------------------------------------------*/
+static void FPT_busMstrSGDataXferStart(u32 p_port, struct sccb *pcurrSCCB)
+{
+ u32 count, addr, tmpSGCnt;
+ unsigned int sg_index;
+ unsigned char sg_count, i;
+ u32 reg_offset;
+ struct blogic_sg_seg *segp;
+
+ if (pcurrSCCB->Sccb_XferState & F_HOST_XFER_DIR)
+ count = ((u32)HOST_RD_CMD) << 24;
+ else
+ count = ((u32)HOST_WRT_CMD) << 24;
+
+ sg_count = 0;
+ tmpSGCnt = 0;
+ sg_index = pcurrSCCB->Sccb_sgseg;
+ reg_offset = hp_aramBase;
+
+ i = (unsigned char)(RD_HARPOON(p_port + hp_page_ctrl) &
+ ~(SGRAM_ARAM | SCATTER_EN));
+
+ WR_HARPOON(p_port + hp_page_ctrl, i);
+
+ while ((sg_count < (unsigned char)SG_BUF_CNT) &&
+ ((sg_index * (unsigned int)SG_ELEMENT_SIZE) <
+ pcurrSCCB->DataLength)) {
+
+ segp = (struct blogic_sg_seg *)(pcurrSCCB->DataPointer) +
+ sg_index;
+ tmpSGCnt += segp->segbytes;
+ count |= segp->segbytes;
+ addr = segp->segdata;
+
+ if ((!sg_count) && (pcurrSCCB->Sccb_SGoffset)) {
+ addr +=
+ ((count & 0x00FFFFFFL) - pcurrSCCB->Sccb_SGoffset);
+ count =
+ (count & 0xFF000000L) | pcurrSCCB->Sccb_SGoffset;
+ tmpSGCnt = count & 0x00FFFFFFL;
+ }
+
+ WR_HARP32(p_port, reg_offset, addr);
+ reg_offset += 4;
+
+ WR_HARP32(p_port, reg_offset, count);
+ reg_offset += 4;
+
+ count &= 0xFF000000L;
+ sg_index++;
+ sg_count++;
+
+ } /*End While */
+
+ pcurrSCCB->Sccb_XferCnt = tmpSGCnt;
+
+ WR_HARPOON(p_port + hp_sg_addr, (sg_count << 4));
+
+ if (pcurrSCCB->Sccb_XferState & F_HOST_XFER_DIR) {
+
+ WR_HARP32(p_port, hp_xfercnt_0, tmpSGCnt);
+
+ WR_HARPOON(p_port + hp_portctrl_0,
+ (DMA_PORT | SCSI_PORT | SCSI_INBIT));
+ WR_HARPOON(p_port + hp_scsisig, S_DATAI_PH);
+ }
+
+ else {
+
+ if ((!(RD_HARPOON(p_port + hp_synctarg_0) & NARROW_SCSI)) &&
+ (tmpSGCnt & 0x000000001)) {
+
+ pcurrSCCB->Sccb_XferState |= F_ODD_BALL_CNT;
+ tmpSGCnt--;
+ }
+
+ WR_HARP32(p_port, hp_xfercnt_0, tmpSGCnt);
+
+ WR_HARPOON(p_port + hp_portctrl_0,
+ (SCSI_PORT | DMA_PORT | DMA_RD));
+ WR_HARPOON(p_port + hp_scsisig, S_DATAO_PH);
+ }
+
+ WR_HARPOON(p_port + hp_page_ctrl, (unsigned char)(i | SCATTER_EN));
+
+}
+
+/*---------------------------------------------------------------------
+ *
+ * Function: BusMaster Data Transfer Start
+ *
+ * Description:
+ *
+ *---------------------------------------------------------------------*/
+static void FPT_busMstrDataXferStart(u32 p_port, struct sccb *pcurrSCCB)
+{
+ u32 addr, count;
+
+ if (!(pcurrSCCB->Sccb_XferState & F_AUTO_SENSE)) {
+
+ count = pcurrSCCB->Sccb_XferCnt;
+
+ addr = (u32)(unsigned long)pcurrSCCB->DataPointer + pcurrSCCB->Sccb_ATC;
+ }
+
+ else {
+ addr = pcurrSCCB->SensePointer;
+ count = pcurrSCCB->RequestSenseLength;
+
+ }
+
+ HP_SETUP_ADDR_CNT(p_port, addr, count);
+
+ if (pcurrSCCB->Sccb_XferState & F_HOST_XFER_DIR) {
+
+ WR_HARPOON(p_port + hp_portctrl_0,
+ (DMA_PORT | SCSI_PORT | SCSI_INBIT));
+ WR_HARPOON(p_port + hp_scsisig, S_DATAI_PH);
+
+ WR_HARPOON(p_port + hp_xfer_cmd,
+ (XFER_DMA_HOST | XFER_HOST_AUTO | XFER_DMA_8BIT));
+ }
+
+ else {
+
+ WR_HARPOON(p_port + hp_portctrl_0,
+ (SCSI_PORT | DMA_PORT | DMA_RD));
+ WR_HARPOON(p_port + hp_scsisig, S_DATAO_PH);
+
+ WR_HARPOON(p_port + hp_xfer_cmd,
+ (XFER_HOST_DMA | XFER_HOST_AUTO | XFER_DMA_8BIT));
+
+ }
+}
+
+/*---------------------------------------------------------------------
+ *
+ * Function: BusMaster Timeout Handler
+ *
+ * Description: This function is called after a bus master command busy time
+ * out is detected. This routines issue halt state machine
+ * with a software time out for command busy. If command busy
+ * is still asserted at the end of the time out, it issues
+ * hard abort with another software time out. It hard abort
+ * command busy is also time out, it'll just give up.
+ *
+ *---------------------------------------------------------------------*/
+static unsigned char FPT_busMstrTimeOut(u32 p_port)
+{
+ unsigned long timeout;
+
+ timeout = LONG_WAIT;
+
+ WR_HARPOON(p_port + hp_sys_ctrl, HALT_MACH);
+
+ while ((!(RD_HARPOON(p_port + hp_ext_status) & CMD_ABORTED))
+ && timeout--) {
+ }
+
+ if (RD_HARPOON(p_port + hp_ext_status) & BM_CMD_BUSY) {
+ WR_HARPOON(p_port + hp_sys_ctrl, HARD_ABORT);
+
+ timeout = LONG_WAIT;
+ while ((RD_HARPOON(p_port + hp_ext_status) & BM_CMD_BUSY)
+ && timeout--) {
+ }
+ }
+
+ RD_HARPOON(p_port + hp_int_status); /*Clear command complete */
+
+ if (RD_HARPOON(p_port + hp_ext_status) & BM_CMD_BUSY) {
+ return 1;
+ }
+
+ else {
+ return 0;
+ }
+}
+
+/*---------------------------------------------------------------------
+ *
+ * Function: Host Data Transfer Abort
+ *
+ * Description: Abort any in progress transfer.
+ *
+ *---------------------------------------------------------------------*/
+static void FPT_hostDataXferAbort(u32 port, unsigned char p_card,
+ struct sccb *pCurrSCCB)
+{
+
+ unsigned long timeout;
+ unsigned long remain_cnt;
+ u32 sg_ptr;
+ struct blogic_sg_seg *segp;
+
+ FPT_BL_Card[p_card].globalFlags &= ~F_HOST_XFER_ACT;
+
+ if (pCurrSCCB->Sccb_XferState & F_AUTO_SENSE) {
+
+ if (!(RD_HARPOON(port + hp_int_status) & INT_CMD_COMPL)) {
+
+ WR_HARPOON(port + hp_bm_ctrl,
+ (RD_HARPOON(port + hp_bm_ctrl) |
+ FLUSH_XFER_CNTR));
+ timeout = LONG_WAIT;
+
+ while ((RD_HARPOON(port + hp_ext_status) & BM_CMD_BUSY)
+ && timeout--) {
+ }
+
+ WR_HARPOON(port + hp_bm_ctrl,
+ (RD_HARPOON(port + hp_bm_ctrl) &
+ ~FLUSH_XFER_CNTR));
+
+ if (RD_HARPOON(port + hp_ext_status) & BM_CMD_BUSY) {
+
+ if (FPT_busMstrTimeOut(port)) {
+
+ if (pCurrSCCB->HostStatus == 0x00)
+
+ pCurrSCCB->HostStatus =
+ SCCB_BM_ERR;
+
+ }
+
+ if (RD_HARPOON(port + hp_int_status) &
+ INT_EXT_STATUS)
+
+ if (RD_HARPOON(port + hp_ext_status) &
+ BAD_EXT_STATUS)
+
+ if (pCurrSCCB->HostStatus ==
+ 0x00)
+ {
+ pCurrSCCB->HostStatus =
+ SCCB_BM_ERR;
+ }
+ }
+ }
+ }
+
+ else if (pCurrSCCB->Sccb_XferCnt) {
+
+ if (pCurrSCCB->Sccb_XferState & F_SG_XFER) {
+
+ WR_HARPOON(port + hp_page_ctrl,
+ (RD_HARPOON(port + hp_page_ctrl) &
+ ~SCATTER_EN));
+
+ WR_HARPOON(port + hp_sg_addr, 0x00);
+
+ sg_ptr = pCurrSCCB->Sccb_sgseg + SG_BUF_CNT;
+
+ if (sg_ptr >
+ (unsigned int)(pCurrSCCB->DataLength /
+ SG_ELEMENT_SIZE)) {
+
+ sg_ptr = (u32)(pCurrSCCB->DataLength /
+ SG_ELEMENT_SIZE);
+ }
+
+ remain_cnt = pCurrSCCB->Sccb_XferCnt;
+
+ while (remain_cnt < 0x01000000L) {
+
+ sg_ptr--;
+ segp = (struct blogic_sg_seg *)(pCurrSCCB->
+ DataPointer) + (sg_ptr * 2);
+ if (remain_cnt > (unsigned long)segp->segbytes)
+ remain_cnt -=
+ (unsigned long)segp->segbytes;
+ else
+ break;
+ }
+
+ if (remain_cnt < 0x01000000L) {
+
+ pCurrSCCB->Sccb_SGoffset = remain_cnt;
+
+ pCurrSCCB->Sccb_sgseg = (unsigned short)sg_ptr;
+
+ if ((unsigned long)(sg_ptr * SG_ELEMENT_SIZE) ==
+ pCurrSCCB->DataLength && (remain_cnt == 0))
+
+ pCurrSCCB->Sccb_XferState |=
+ F_ALL_XFERRED;
+ }
+
+ else {
+
+ if (pCurrSCCB->HostStatus == 0x00) {
+
+ pCurrSCCB->HostStatus =
+ SCCB_GROSS_FW_ERR;
+ }
+ }
+ }
+
+ if (!(pCurrSCCB->Sccb_XferState & F_HOST_XFER_DIR)) {
+
+ if (RD_HARPOON(port + hp_ext_status) & BM_CMD_BUSY) {
+
+ FPT_busMstrTimeOut(port);
+ }
+
+ else {
+
+ if (RD_HARPOON(port + hp_int_status) &
+ INT_EXT_STATUS) {
+
+ if (RD_HARPOON(port + hp_ext_status) &
+ BAD_EXT_STATUS) {
+
+ if (pCurrSCCB->HostStatus ==
+ 0x00) {
+
+ pCurrSCCB->HostStatus =
+ SCCB_BM_ERR;
+ }
+ }
+ }
+
+ }
+ }
+
+ else {
+
+ if ((RD_HARPOON(port + hp_fifo_cnt)) >= BM_THRESHOLD) {
+
+ timeout = SHORT_WAIT;
+
+ while ((RD_HARPOON(port + hp_ext_status) &
+ BM_CMD_BUSY)
+ && ((RD_HARPOON(port + hp_fifo_cnt)) >=
+ BM_THRESHOLD) && timeout--) {
+ }
+ }
+
+ if (RD_HARPOON(port + hp_ext_status) & BM_CMD_BUSY) {
+
+ WR_HARPOON(port + hp_bm_ctrl,
+ (RD_HARPOON(port + hp_bm_ctrl) |
+ FLUSH_XFER_CNTR));
+
+ timeout = LONG_WAIT;
+
+ while ((RD_HARPOON(port + hp_ext_status) &
+ BM_CMD_BUSY) && timeout--) {
+ }
+
+ WR_HARPOON(port + hp_bm_ctrl,
+ (RD_HARPOON(port + hp_bm_ctrl) &
+ ~FLUSH_XFER_CNTR));
+
+ if (RD_HARPOON(port + hp_ext_status) &
+ BM_CMD_BUSY) {
+
+ if (pCurrSCCB->HostStatus == 0x00) {
+
+ pCurrSCCB->HostStatus =
+ SCCB_BM_ERR;
+ }
+
+ FPT_busMstrTimeOut(port);
+ }
+ }
+
+ if (RD_HARPOON(port + hp_int_status) & INT_EXT_STATUS) {
+
+ if (RD_HARPOON(port + hp_ext_status) &
+ BAD_EXT_STATUS) {
+
+ if (pCurrSCCB->HostStatus == 0x00) {
+
+ pCurrSCCB->HostStatus =
+ SCCB_BM_ERR;
+ }
+ }
+ }
+ }
+
+ }
+
+ else {
+
+ if (RD_HARPOON(port + hp_ext_status) & BM_CMD_BUSY) {
+
+ timeout = LONG_WAIT;
+
+ while ((RD_HARPOON(port + hp_ext_status) & BM_CMD_BUSY)
+ && timeout--) {
+ }
+
+ if (RD_HARPOON(port + hp_ext_status) & BM_CMD_BUSY) {
+
+ if (pCurrSCCB->HostStatus == 0x00) {
+
+ pCurrSCCB->HostStatus = SCCB_BM_ERR;
+ }
+
+ FPT_busMstrTimeOut(port);
+ }
+ }
+
+ if (RD_HARPOON(port + hp_int_status) & INT_EXT_STATUS) {
+
+ if (RD_HARPOON(port + hp_ext_status) & BAD_EXT_STATUS) {
+
+ if (pCurrSCCB->HostStatus == 0x00) {
+
+ pCurrSCCB->HostStatus = SCCB_BM_ERR;
+ }
+ }
+
+ }
+
+ if (pCurrSCCB->Sccb_XferState & F_SG_XFER) {
+
+ WR_HARPOON(port + hp_page_ctrl,
+ (RD_HARPOON(port + hp_page_ctrl) &
+ ~SCATTER_EN));
+
+ WR_HARPOON(port + hp_sg_addr, 0x00);
+
+ pCurrSCCB->Sccb_sgseg += SG_BUF_CNT;
+
+ pCurrSCCB->Sccb_SGoffset = 0x00;
+
+ if ((u32)(pCurrSCCB->Sccb_sgseg * SG_ELEMENT_SIZE) >=
+ pCurrSCCB->DataLength) {
+
+ pCurrSCCB->Sccb_XferState |= F_ALL_XFERRED;
+ pCurrSCCB->Sccb_sgseg =
+ (unsigned short)(pCurrSCCB->DataLength /
+ SG_ELEMENT_SIZE);
+ }
+ }
+
+ else {
+ if (!(pCurrSCCB->Sccb_XferState & F_AUTO_SENSE))
+ pCurrSCCB->Sccb_XferState |= F_ALL_XFERRED;
+ }
+ }
+
+ WR_HARPOON(port + hp_int_mask, (INT_CMD_COMPL | SCSI_INTERRUPT));
+}
+
+/*---------------------------------------------------------------------
+ *
+ * Function: Host Data Transfer Restart
+ *
+ * Description: Reset the available count due to a restore data
+ * pointers message.
+ *
+ *---------------------------------------------------------------------*/
+static void FPT_hostDataXferRestart(struct sccb *currSCCB)
+{
+ unsigned long data_count;
+ unsigned int sg_index;
+ struct blogic_sg_seg *segp;
+
+ if (currSCCB->Sccb_XferState & F_SG_XFER) {
+
+ currSCCB->Sccb_XferCnt = 0;
+
+ sg_index = 0xffff; /*Index by long words into sg list. */
+ data_count = 0; /*Running count of SG xfer counts. */
+
+
+ while (data_count < currSCCB->Sccb_ATC) {
+
+ sg_index++;
+ segp = (struct blogic_sg_seg *)(currSCCB->DataPointer) +
+ (sg_index * 2);
+ data_count += segp->segbytes;
+ }
+
+ if (data_count == currSCCB->Sccb_ATC) {
+
+ currSCCB->Sccb_SGoffset = 0;
+ sg_index++;
+ }
+
+ else {
+ currSCCB->Sccb_SGoffset =
+ data_count - currSCCB->Sccb_ATC;
+ }
+
+ currSCCB->Sccb_sgseg = (unsigned short)sg_index;
+ }
+
+ else {
+ currSCCB->Sccb_XferCnt =
+ currSCCB->DataLength - currSCCB->Sccb_ATC;
+ }
+}
+
+/*---------------------------------------------------------------------
+ *
+ * Function: FPT_scini
+ *
+ * Description: Setup all data structures necessary for SCAM selection.
+ *
+ *---------------------------------------------------------------------*/
+
+static void FPT_scini(unsigned char p_card, unsigned char p_our_id,
+ unsigned char p_power_up)
+{
+
+ unsigned char loser, assigned_id;
+ u32 p_port;
+
+ unsigned char i, k, ScamFlg;
+ struct sccb_card *currCard;
+ struct nvram_info *pCurrNvRam;
+
+ currCard = &FPT_BL_Card[p_card];
+ p_port = currCard->ioPort;
+ pCurrNvRam = currCard->pNvRamInfo;
+
+ if (pCurrNvRam) {
+ ScamFlg = pCurrNvRam->niScamConf;
+ i = pCurrNvRam->niSysConf;
+ } else {
+ ScamFlg =
+ (unsigned char)FPT_utilEERead(p_port, SCAM_CONFIG / 2);
+ i = (unsigned
+ char)(FPT_utilEERead(p_port, (SYSTEM_CONFIG / 2)));
+ }
+ if (!(i & 0x02)) /* check if reset bus in AutoSCSI parameter set */
+ return;
+
+ FPT_inisci(p_card, p_port, p_our_id);
+
+ /* Force to wait 1 sec after SCSI bus reset. Some SCAM device FW
+ too slow to return to SCAM selection */
+
+ /* if (p_power_up)
+ FPT_Wait1Second(p_port);
+ else
+ FPT_Wait(p_port, TO_250ms); */
+
+ FPT_Wait1Second(p_port);
+
+ if ((ScamFlg & SCAM_ENABLED) && (ScamFlg & SCAM_LEVEL2)) {
+ while (!(FPT_scarb(p_port, INIT_SELTD))) {
+ }
+
+ FPT_scsel(p_port);
+
+ do {
+ FPT_scxferc(p_port, SYNC_PTRN);
+ FPT_scxferc(p_port, DOM_MSTR);
+ loser =
+ FPT_scsendi(p_port,
+ &FPT_scamInfo[p_our_id].id_string[0]);
+ } while (loser == 0xFF);
+
+ FPT_scbusf(p_port);
+
+ if ((p_power_up) && (!loser)) {
+ FPT_sresb(p_port, p_card);
+ FPT_Wait(p_port, TO_250ms);
+
+ while (!(FPT_scarb(p_port, INIT_SELTD))) {
+ }
+
+ FPT_scsel(p_port);
+
+ do {
+ FPT_scxferc(p_port, SYNC_PTRN);
+ FPT_scxferc(p_port, DOM_MSTR);
+ loser =
+ FPT_scsendi(p_port,
+ &FPT_scamInfo[p_our_id].
+ id_string[0]);
+ } while (loser == 0xFF);
+
+ FPT_scbusf(p_port);
+ }
+ }
+
+ else {
+ loser = 0;
+ }
+
+ if (!loser) {
+
+ FPT_scamInfo[p_our_id].state = ID_ASSIGNED;
+
+ if (ScamFlg & SCAM_ENABLED) {
+
+ for (i = 0; i < MAX_SCSI_TAR; i++) {
+ if ((FPT_scamInfo[i].state == ID_UNASSIGNED) ||
+ (FPT_scamInfo[i].state == ID_UNUSED)) {
+ if (FPT_scsell(p_port, i)) {
+ FPT_scamInfo[i].state = LEGACY;
+ if ((FPT_scamInfo[i].
+ id_string[0] != 0xFF)
+ || (FPT_scamInfo[i].
+ id_string[1] != 0xFA)) {
+
+ FPT_scamInfo[i].
+ id_string[0] = 0xFF;
+ FPT_scamInfo[i].
+ id_string[1] = 0xFA;
+ if (pCurrNvRam == NULL)
+ currCard->
+ globalFlags
+ |=
+ F_UPDATE_EEPROM;
+ }
+ }
+ }
+ }
+
+ FPT_sresb(p_port, p_card);
+ FPT_Wait1Second(p_port);
+ while (!(FPT_scarb(p_port, INIT_SELTD))) {
+ }
+ FPT_scsel(p_port);
+ FPT_scasid(p_card, p_port);
+ }
+
+ }
+
+ else if ((loser) && (ScamFlg & SCAM_ENABLED)) {
+ FPT_scamInfo[p_our_id].id_string[0] = SLV_TYPE_CODE0;
+ assigned_id = 0;
+ FPT_scwtsel(p_port);
+
+ do {
+ while (FPT_scxferc(p_port, 0x00) != SYNC_PTRN) {
+ }
+
+ i = FPT_scxferc(p_port, 0x00);
+ if (i == ASSIGN_ID) {
+ if (!
+ (FPT_scsendi
+ (p_port,
+ &FPT_scamInfo[p_our_id].id_string[0]))) {
+ i = FPT_scxferc(p_port, 0x00);
+ if (FPT_scvalq(i)) {
+ k = FPT_scxferc(p_port, 0x00);
+
+ if (FPT_scvalq(k)) {
+ currCard->ourId =
+ ((unsigned char)(i
+ <<
+ 3)
+ +
+ (k &
+ (unsigned char)7))
+ & (unsigned char)
+ 0x3F;
+ FPT_inisci(p_card,
+ p_port,
+ p_our_id);
+ FPT_scamInfo[currCard->
+ ourId].
+ state = ID_ASSIGNED;
+ FPT_scamInfo[currCard->
+ ourId].
+ id_string[0]
+ = SLV_TYPE_CODE0;
+ assigned_id = 1;
+ }
+ }
+ }
+ }
+
+ else if (i == SET_P_FLAG) {
+ if (!(FPT_scsendi(p_port,
+ &FPT_scamInfo[p_our_id].
+ id_string[0])))
+ FPT_scamInfo[p_our_id].id_string[0] |=
+ 0x80;
+ }
+ } while (!assigned_id);
+
+ while (FPT_scxferc(p_port, 0x00) != CFG_CMPLT) {
+ }
+ }
+
+ if (ScamFlg & SCAM_ENABLED) {
+ FPT_scbusf(p_port);
+ if (currCard->globalFlags & F_UPDATE_EEPROM) {
+ FPT_scsavdi(p_card, p_port);
+ currCard->globalFlags &= ~F_UPDATE_EEPROM;
+ }
+ }
+
+/*
+ for (i=0,k=0; i < MAX_SCSI_TAR; i++)
+ {
+ if ((FPT_scamInfo[i].state == ID_ASSIGNED) ||
+ (FPT_scamInfo[i].state == LEGACY))
+ k++;
+ }
+
+ if (k==2)
+ currCard->globalFlags |= F_SINGLE_DEVICE;
+ else
+ currCard->globalFlags &= ~F_SINGLE_DEVICE;
+*/
+}
+
+/*---------------------------------------------------------------------
+ *
+ * Function: FPT_scarb
+ *
+ * Description: Gain control of the bus and wait SCAM select time (250ms)
+ *
+ *---------------------------------------------------------------------*/
+
+static int FPT_scarb(u32 p_port, unsigned char p_sel_type)
+{
+ if (p_sel_type == INIT_SELTD) {
+
+ while (RD_HARPOON(p_port + hp_scsisig) & (SCSI_SEL | SCSI_BSY)) {
+ }
+
+ if (RD_HARPOON(p_port + hp_scsisig) & SCSI_SEL)
+ return 0;
+
+ if (RD_HARPOON(p_port + hp_scsidata_0) != 00)
+ return 0;
+
+ WR_HARPOON(p_port + hp_scsisig,
+ (RD_HARPOON(p_port + hp_scsisig) | SCSI_BSY));
+
+ if (RD_HARPOON(p_port + hp_scsisig) & SCSI_SEL) {
+
+ WR_HARPOON(p_port + hp_scsisig,
+ (RD_HARPOON(p_port + hp_scsisig) &
+ ~SCSI_BSY));
+ return 0;
+ }
+
+ WR_HARPOON(p_port + hp_scsisig,
+ (RD_HARPOON(p_port + hp_scsisig) | SCSI_SEL));
+
+ if (RD_HARPOON(p_port + hp_scsidata_0) != 00) {
+
+ WR_HARPOON(p_port + hp_scsisig,
+ (RD_HARPOON(p_port + hp_scsisig) &
+ ~(SCSI_BSY | SCSI_SEL)));
+ return 0;
+ }
+ }
+
+ WR_HARPOON(p_port + hp_clkctrl_0, (RD_HARPOON(p_port + hp_clkctrl_0)
+ & ~ACTdeassert));
+ WR_HARPOON(p_port + hp_scsireset, SCAM_EN);
+ WR_HARPOON(p_port + hp_scsidata_0, 0x00);
+ WR_HARPOON(p_port + hp_scsidata_1, 0x00);
+ WR_HARPOON(p_port + hp_portctrl_0, SCSI_BUS_EN);
+
+ WR_HARPOON(p_port + hp_scsisig,
+ (RD_HARPOON(p_port + hp_scsisig) | SCSI_MSG));
+
+ WR_HARPOON(p_port + hp_scsisig, (RD_HARPOON(p_port + hp_scsisig)
+ & ~SCSI_BSY));
+
+ FPT_Wait(p_port, TO_250ms);
+
+ return 1;
+}
+
+/*---------------------------------------------------------------------
+ *
+ * Function: FPT_scbusf
+ *
+ * Description: Release the SCSI bus and disable SCAM selection.
+ *
+ *---------------------------------------------------------------------*/
+
+static void FPT_scbusf(u32 p_port)
+{
+ WR_HARPOON(p_port + hp_page_ctrl,
+ (RD_HARPOON(p_port + hp_page_ctrl) | G_INT_DISABLE));
+
+ WR_HARPOON(p_port + hp_scsidata_0, 0x00);
+
+ WR_HARPOON(p_port + hp_portctrl_0, (RD_HARPOON(p_port + hp_portctrl_0)
+ & ~SCSI_BUS_EN));
+
+ WR_HARPOON(p_port + hp_scsisig, 0x00);
+
+ WR_HARPOON(p_port + hp_scsireset, (RD_HARPOON(p_port + hp_scsireset)
+ & ~SCAM_EN));
+
+ WR_HARPOON(p_port + hp_clkctrl_0, (RD_HARPOON(p_port + hp_clkctrl_0)
+ | ACTdeassert));
+
+ WRW_HARPOON((p_port + hp_intstat), (BUS_FREE | AUTO_INT | SCAM_SEL));
+
+ WR_HARPOON(p_port + hp_page_ctrl,
+ (RD_HARPOON(p_port + hp_page_ctrl) & ~G_INT_DISABLE));
+}
+
+/*---------------------------------------------------------------------
+ *
+ * Function: FPT_scasid
+ *
+ * Description: Assign an ID to all the SCAM devices.
+ *
+ *---------------------------------------------------------------------*/
+
+static void FPT_scasid(unsigned char p_card, u32 p_port)
+{
+ unsigned char temp_id_string[ID_STRING_LENGTH];
+
+ unsigned char i, k, scam_id;
+ unsigned char crcBytes[3];
+ struct nvram_info *pCurrNvRam;
+ unsigned short *pCrcBytes;
+
+ pCurrNvRam = FPT_BL_Card[p_card].pNvRamInfo;
+
+ i = 0;
+
+ while (!i) {
+
+ for (k = 0; k < ID_STRING_LENGTH; k++) {
+ temp_id_string[k] = (unsigned char)0x00;
+ }
+
+ FPT_scxferc(p_port, SYNC_PTRN);
+ FPT_scxferc(p_port, ASSIGN_ID);
+
+ if (!(FPT_sciso(p_port, &temp_id_string[0]))) {
+ if (pCurrNvRam) {
+ pCrcBytes = (unsigned short *)&crcBytes[0];
+ *pCrcBytes = FPT_CalcCrc16(&temp_id_string[0]);
+ crcBytes[2] = FPT_CalcLrc(&temp_id_string[0]);
+ temp_id_string[1] = crcBytes[2];
+ temp_id_string[2] = crcBytes[0];
+ temp_id_string[3] = crcBytes[1];
+ for (k = 4; k < ID_STRING_LENGTH; k++)
+ temp_id_string[k] = (unsigned char)0x00;
+ }
+ i = FPT_scmachid(p_card, temp_id_string);
+
+ if (i == CLR_PRIORITY) {
+ FPT_scxferc(p_port, MISC_CODE);
+ FPT_scxferc(p_port, CLR_P_FLAG);
+ i = 0; /*Not the last ID yet. */
+ }
+
+ else if (i != NO_ID_AVAIL) {
+ if (i < 8)
+ FPT_scxferc(p_port, ID_0_7);
+ else
+ FPT_scxferc(p_port, ID_8_F);
+
+ scam_id = (i & (unsigned char)0x07);
+
+ for (k = 1; k < 0x08; k <<= 1)
+ if (!(k & i))
+ scam_id += 0x08; /*Count number of zeros in DB0-3. */
+
+ FPT_scxferc(p_port, scam_id);
+
+ i = 0; /*Not the last ID yet. */
+ }
+ }
+
+ else {
+ i = 1;
+ }
+
+ } /*End while */
+
+ FPT_scxferc(p_port, SYNC_PTRN);
+ FPT_scxferc(p_port, CFG_CMPLT);
+}
+
+/*---------------------------------------------------------------------
+ *
+ * Function: FPT_scsel
+ *
+ * Description: Select all the SCAM devices.
+ *
+ *---------------------------------------------------------------------*/
+
+static void FPT_scsel(u32 p_port)
+{
+
+ WR_HARPOON(p_port + hp_scsisig, SCSI_SEL);
+ FPT_scwiros(p_port, SCSI_MSG);
+
+ WR_HARPOON(p_port + hp_scsisig, (SCSI_SEL | SCSI_BSY));
+
+ WR_HARPOON(p_port + hp_scsisig,
+ (SCSI_SEL | SCSI_BSY | SCSI_IOBIT | SCSI_CD));
+ WR_HARPOON(p_port + hp_scsidata_0,
+ (unsigned char)(RD_HARPOON(p_port + hp_scsidata_0) |
+ (unsigned char)(BIT(7) + BIT(6))));
+
+ WR_HARPOON(p_port + hp_scsisig, (SCSI_BSY | SCSI_IOBIT | SCSI_CD));
+ FPT_scwiros(p_port, SCSI_SEL);
+
+ WR_HARPOON(p_port + hp_scsidata_0,
+ (unsigned char)(RD_HARPOON(p_port + hp_scsidata_0) &
+ ~(unsigned char)BIT(6)));
+ FPT_scwirod(p_port, BIT(6));
+
+ WR_HARPOON(p_port + hp_scsisig,
+ (SCSI_SEL | SCSI_BSY | SCSI_IOBIT | SCSI_CD));
+}
+
+/*---------------------------------------------------------------------
+ *
+ * Function: FPT_scxferc
+ *
+ * Description: Handshake the p_data (DB4-0) across the bus.
+ *
+ *---------------------------------------------------------------------*/
+
+static unsigned char FPT_scxferc(u32 p_port, unsigned char p_data)
+{
+ unsigned char curr_data, ret_data;
+
+ curr_data = p_data | BIT(7) | BIT(5); /*Start with DB7 & DB5 asserted. */
+
+ WR_HARPOON(p_port + hp_scsidata_0, curr_data);
+
+ curr_data &= ~BIT(7);
+
+ WR_HARPOON(p_port + hp_scsidata_0, curr_data);
+
+ FPT_scwirod(p_port, BIT(7)); /*Wait for DB7 to be released. */
+ while (!(RD_HARPOON(p_port + hp_scsidata_0) & BIT(5))) ;
+
+ ret_data = (RD_HARPOON(p_port + hp_scsidata_0) & (unsigned char)0x1F);
+
+ curr_data |= BIT(6);
+
+ WR_HARPOON(p_port + hp_scsidata_0, curr_data);
+
+ curr_data &= ~BIT(5);
+
+ WR_HARPOON(p_port + hp_scsidata_0, curr_data);
+
+ FPT_scwirod(p_port, BIT(5)); /*Wait for DB5 to be released. */
+
+ curr_data &= ~(BIT(4) | BIT(3) | BIT(2) | BIT(1) | BIT(0)); /*Release data bits */
+ curr_data |= BIT(7);
+
+ WR_HARPOON(p_port + hp_scsidata_0, curr_data);
+
+ curr_data &= ~BIT(6);
+
+ WR_HARPOON(p_port + hp_scsidata_0, curr_data);
+
+ FPT_scwirod(p_port, BIT(6)); /*Wait for DB6 to be released. */
+
+ return ret_data;
+}
+
+/*---------------------------------------------------------------------
+ *
+ * Function: FPT_scsendi
+ *
+ * Description: Transfer our Identification string to determine if we
+ * will be the dominant master.
+ *
+ *---------------------------------------------------------------------*/
+
+static unsigned char FPT_scsendi(u32 p_port, unsigned char p_id_string[])
+{
+ unsigned char ret_data, byte_cnt, bit_cnt, defer;
+
+ defer = 0;
+
+ for (byte_cnt = 0; byte_cnt < ID_STRING_LENGTH; byte_cnt++) {
+
+ for (bit_cnt = 0x80; bit_cnt != 0; bit_cnt >>= 1) {
+
+ if (defer)
+ ret_data = FPT_scxferc(p_port, 00);
+
+ else if (p_id_string[byte_cnt] & bit_cnt)
+
+ ret_data = FPT_scxferc(p_port, 02);
+
+ else {
+
+ ret_data = FPT_scxferc(p_port, 01);
+ if (ret_data & 02)
+ defer = 1;
+ }
+
+ if ((ret_data & 0x1C) == 0x10)
+ return 0x00; /*End of isolation stage, we won! */
+
+ if (ret_data & 0x1C)
+ return 0xFF;
+
+ if ((defer) && (!(ret_data & 0x1F)))
+ return 0x01; /*End of isolation stage, we lost. */
+
+ } /*bit loop */
+
+ } /*byte loop */
+
+ if (defer)
+ return 0x01; /*We lost */
+ else
+ return 0; /*We WON! Yeeessss! */
+}
+
+/*---------------------------------------------------------------------
+ *
+ * Function: FPT_sciso
+ *
+ * Description: Transfer the Identification string.
+ *
+ *---------------------------------------------------------------------*/
+
+static unsigned char FPT_sciso(u32 p_port, unsigned char p_id_string[])
+{
+ unsigned char ret_data, the_data, byte_cnt, bit_cnt;
+
+ the_data = 0;
+
+ for (byte_cnt = 0; byte_cnt < ID_STRING_LENGTH; byte_cnt++) {
+
+ for (bit_cnt = 0; bit_cnt < 8; bit_cnt++) {
+
+ ret_data = FPT_scxferc(p_port, 0);
+
+ if (ret_data & 0xFC)
+ return 0xFF;
+
+ else {
+
+ the_data <<= 1;
+ if (ret_data & BIT(1)) {
+ the_data |= 1;
+ }
+ }
+
+ if ((ret_data & 0x1F) == 0) {
+/*
+ if(bit_cnt != 0 || bit_cnt != 8)
+ {
+ byte_cnt = 0;
+ bit_cnt = 0;
+ FPT_scxferc(p_port, SYNC_PTRN);
+ FPT_scxferc(p_port, ASSIGN_ID);
+ continue;
+ }
+*/
+ if (byte_cnt)
+ return 0x00;
+ else
+ return 0xFF;
+ }
+
+ } /*bit loop */
+
+ p_id_string[byte_cnt] = the_data;
+
+ } /*byte loop */
+
+ return 0;
+}
+
+/*---------------------------------------------------------------------
+ *
+ * Function: FPT_scwirod
+ *
+ * Description: Sample the SCSI data bus making sure the signal has been
+ * deasserted for the correct number of consecutive samples.
+ *
+ *---------------------------------------------------------------------*/
+
+static void FPT_scwirod(u32 p_port, unsigned char p_data_bit)
+{
+ unsigned char i;
+
+ i = 0;
+ while (i < MAX_SCSI_TAR) {
+
+ if (RD_HARPOON(p_port + hp_scsidata_0) & p_data_bit)
+
+ i = 0;
+
+ else
+
+ i++;
+
+ }
+}
+
+/*---------------------------------------------------------------------
+ *
+ * Function: FPT_scwiros
+ *
+ * Description: Sample the SCSI Signal lines making sure the signal has been
+ * deasserted for the correct number of consecutive samples.
+ *
+ *---------------------------------------------------------------------*/
+
+static void FPT_scwiros(u32 p_port, unsigned char p_data_bit)
+{
+ unsigned char i;
+
+ i = 0;
+ while (i < MAX_SCSI_TAR) {
+
+ if (RD_HARPOON(p_port + hp_scsisig) & p_data_bit)
+
+ i = 0;
+
+ else
+
+ i++;
+
+ }
+}
+
+/*---------------------------------------------------------------------
+ *
+ * Function: FPT_scvalq
+ *
+ * Description: Make sure we received a valid data byte.
+ *
+ *---------------------------------------------------------------------*/
+
+static unsigned char FPT_scvalq(unsigned char p_quintet)
+{
+ unsigned char count;
+
+ for (count = 1; count < 0x08; count <<= 1) {
+ if (!(p_quintet & count))
+ p_quintet -= 0x80;
+ }
+
+ if (p_quintet & 0x18)
+ return 0;
+
+ else
+ return 1;
+}
+
+/*---------------------------------------------------------------------
+ *
+ * Function: FPT_scsell
+ *
+ * Description: Select the specified device ID using a selection timeout
+ * less than 4ms. If somebody responds then it is a legacy
+ * drive and this ID must be marked as such.
+ *
+ *---------------------------------------------------------------------*/
+
+static unsigned char FPT_scsell(u32 p_port, unsigned char targ_id)
+{
+ unsigned long i;
+
+ WR_HARPOON(p_port + hp_page_ctrl,
+ (RD_HARPOON(p_port + hp_page_ctrl) | G_INT_DISABLE));
+
+ ARAM_ACCESS(p_port);
+
+ WR_HARPOON(p_port + hp_addstat,
+ (RD_HARPOON(p_port + hp_addstat) | SCAM_TIMER));
+ WR_HARPOON(p_port + hp_seltimeout, TO_4ms);
+
+ for (i = p_port + CMD_STRT; i < p_port + CMD_STRT + 12; i += 2) {
+ WRW_HARPOON(i, (MPM_OP + ACOMMAND));
+ }
+ WRW_HARPOON(i, (BRH_OP + ALWAYS + NP));
+
+ WRW_HARPOON((p_port + hp_intstat),
+ (RESET | TIMEOUT | SEL | BUS_FREE | AUTO_INT));
+
+ WR_HARPOON(p_port + hp_select_id, targ_id);
+
+ WR_HARPOON(p_port + hp_portctrl_0, SCSI_PORT);
+ WR_HARPOON(p_port + hp_autostart_3, (SELECT | CMD_ONLY_STRT));
+ WR_HARPOON(p_port + hp_scsictrl_0, (SEL_TAR | ENA_RESEL));
+
+ while (!(RDW_HARPOON((p_port + hp_intstat)) &
+ (RESET | PROG_HLT | TIMEOUT | AUTO_INT))) {
+ }
+
+ if (RDW_HARPOON((p_port + hp_intstat)) & RESET)
+ FPT_Wait(p_port, TO_250ms);
+
+ DISABLE_AUTO(p_port);
+
+ WR_HARPOON(p_port + hp_addstat,
+ (RD_HARPOON(p_port + hp_addstat) & ~SCAM_TIMER));
+ WR_HARPOON(p_port + hp_seltimeout, TO_290ms);
+
+ SGRAM_ACCESS(p_port);
+
+ if (RDW_HARPOON((p_port + hp_intstat)) & (RESET | TIMEOUT)) {
+
+ WRW_HARPOON((p_port + hp_intstat),
+ (RESET | TIMEOUT | SEL | BUS_FREE | PHASE));
+
+ WR_HARPOON(p_port + hp_page_ctrl,
+ (RD_HARPOON(p_port + hp_page_ctrl) &
+ ~G_INT_DISABLE));
+
+ return 0; /*No legacy device */
+ }
+
+ else {
+
+ while (!(RDW_HARPOON((p_port + hp_intstat)) & BUS_FREE)) {
+ if (RD_HARPOON(p_port + hp_scsisig) & SCSI_REQ) {
+ WR_HARPOON(p_port + hp_scsisig,
+ (SCSI_ACK + S_ILL_PH));
+ ACCEPT_MSG(p_port);
+ }
+ }
+
+ WRW_HARPOON((p_port + hp_intstat), CLR_ALL_INT_1);
+
+ WR_HARPOON(p_port + hp_page_ctrl,
+ (RD_HARPOON(p_port + hp_page_ctrl) &
+ ~G_INT_DISABLE));
+
+ return 1; /*Found one of them oldies! */
+ }
+}
+
+/*---------------------------------------------------------------------
+ *
+ * Function: FPT_scwtsel
+ *
+ * Description: Wait to be selected by another SCAM initiator.
+ *
+ *---------------------------------------------------------------------*/
+
+static void FPT_scwtsel(u32 p_port)
+{
+ while (!(RDW_HARPOON((p_port + hp_intstat)) & SCAM_SEL)) {
+ }
+}
+
+/*---------------------------------------------------------------------
+ *
+ * Function: FPT_inisci
+ *
+ * Description: Setup the data Structure with the info from the EEPROM.
+ *
+ *---------------------------------------------------------------------*/
+
+static void FPT_inisci(unsigned char p_card, u32 p_port, unsigned char p_our_id)
+{
+ unsigned char i, k, max_id;
+ unsigned short ee_data;
+ struct nvram_info *pCurrNvRam;
+
+ pCurrNvRam = FPT_BL_Card[p_card].pNvRamInfo;
+
+ if (RD_HARPOON(p_port + hp_page_ctrl) & NARROW_SCSI_CARD)
+ max_id = 0x08;
+
+ else
+ max_id = 0x10;
+
+ if (pCurrNvRam) {
+ for (i = 0; i < max_id; i++) {
+
+ for (k = 0; k < 4; k++)
+ FPT_scamInfo[i].id_string[k] =
+ pCurrNvRam->niScamTbl[i][k];
+ for (k = 4; k < ID_STRING_LENGTH; k++)
+ FPT_scamInfo[i].id_string[k] =
+ (unsigned char)0x00;
+
+ if (FPT_scamInfo[i].id_string[0] == 0x00)
+ FPT_scamInfo[i].state = ID_UNUSED; /*Default to unused ID. */
+ else
+ FPT_scamInfo[i].state = ID_UNASSIGNED; /*Default to unassigned ID. */
+
+ }
+ } else {
+ for (i = 0; i < max_id; i++) {
+ for (k = 0; k < ID_STRING_LENGTH; k += 2) {
+ ee_data =
+ FPT_utilEERead(p_port,
+ (unsigned
+ short)((EE_SCAMBASE / 2) +
+ (unsigned short)(i *
+ ((unsigned short)ID_STRING_LENGTH / 2)) + (unsigned short)(k / 2)));
+ FPT_scamInfo[i].id_string[k] =
+ (unsigned char)ee_data;
+ ee_data >>= 8;
+ FPT_scamInfo[i].id_string[k + 1] =
+ (unsigned char)ee_data;
+ }
+
+ if ((FPT_scamInfo[i].id_string[0] == 0x00) ||
+ (FPT_scamInfo[i].id_string[0] == 0xFF))
+
+ FPT_scamInfo[i].state = ID_UNUSED; /*Default to unused ID. */
+
+ else
+ FPT_scamInfo[i].state = ID_UNASSIGNED; /*Default to unassigned ID. */
+
+ }
+ }
+ for (k = 0; k < ID_STRING_LENGTH; k++)
+ FPT_scamInfo[p_our_id].id_string[k] = FPT_scamHAString[k];
+
+}
+
+/*---------------------------------------------------------------------
+ *
+ * Function: FPT_scmachid
+ *
+ * Description: Match the Device ID string with our values stored in
+ * the EEPROM.
+ *
+ *---------------------------------------------------------------------*/
+
+static unsigned char FPT_scmachid(unsigned char p_card,
+ unsigned char p_id_string[])
+{
+
+ unsigned char i, k, match;
+
+ for (i = 0; i < MAX_SCSI_TAR; i++) {
+
+ match = 1;
+
+ for (k = 0; k < ID_STRING_LENGTH; k++) {
+ if (p_id_string[k] != FPT_scamInfo[i].id_string[k])
+ match = 0;
+ }
+
+ if (match) {
+ FPT_scamInfo[i].state = ID_ASSIGNED;
+ return i;
+ }
+
+ }
+
+ if (p_id_string[0] & BIT(5))
+ i = 8;
+ else
+ i = MAX_SCSI_TAR;
+
+ if (((p_id_string[0] & 0x06) == 0x02)
+ || ((p_id_string[0] & 0x06) == 0x04))
+ match = p_id_string[1] & (unsigned char)0x1F;
+ else
+ match = 7;
+
+ while (i > 0) {
+ i--;
+
+ if (FPT_scamInfo[match].state == ID_UNUSED) {
+ for (k = 0; k < ID_STRING_LENGTH; k++) {
+ FPT_scamInfo[match].id_string[k] =
+ p_id_string[k];
+ }
+
+ FPT_scamInfo[match].state = ID_ASSIGNED;
+
+ if (FPT_BL_Card[p_card].pNvRamInfo == NULL)
+ FPT_BL_Card[p_card].globalFlags |=
+ F_UPDATE_EEPROM;
+ return match;
+
+ }
+
+ match--;
+
+ if (match == 0xFF) {
+ if (p_id_string[0] & BIT(5))
+ match = 7;
+ else
+ match = MAX_SCSI_TAR - 1;
+ }
+ }
+
+ if (p_id_string[0] & BIT(7)) {
+ return CLR_PRIORITY;
+ }
+
+ if (p_id_string[0] & BIT(5))
+ i = 8;
+ else
+ i = MAX_SCSI_TAR;
+
+ if (((p_id_string[0] & 0x06) == 0x02)
+ || ((p_id_string[0] & 0x06) == 0x04))
+ match = p_id_string[1] & (unsigned char)0x1F;
+ else
+ match = 7;
+
+ while (i > 0) {
+
+ i--;
+
+ if (FPT_scamInfo[match].state == ID_UNASSIGNED) {
+ for (k = 0; k < ID_STRING_LENGTH; k++) {
+ FPT_scamInfo[match].id_string[k] =
+ p_id_string[k];
+ }
+
+ FPT_scamInfo[match].id_string[0] |= BIT(7);
+ FPT_scamInfo[match].state = ID_ASSIGNED;
+ if (FPT_BL_Card[p_card].pNvRamInfo == NULL)
+ FPT_BL_Card[p_card].globalFlags |=
+ F_UPDATE_EEPROM;
+ return match;
+
+ }
+
+ match--;
+
+ if (match == 0xFF) {
+ if (p_id_string[0] & BIT(5))
+ match = 7;
+ else
+ match = MAX_SCSI_TAR - 1;
+ }
+ }
+
+ return NO_ID_AVAIL;
+}
+
+/*---------------------------------------------------------------------
+ *
+ * Function: FPT_scsavdi
+ *
+ * Description: Save off the device SCAM ID strings.
+ *
+ *---------------------------------------------------------------------*/
+
+static void FPT_scsavdi(unsigned char p_card, u32 p_port)
+{
+ unsigned char i, k, max_id;
+ unsigned short ee_data, sum_data;
+
+ sum_data = 0x0000;
+
+ for (i = 1; i < EE_SCAMBASE / 2; i++) {
+ sum_data += FPT_utilEERead(p_port, i);
+ }
+
+ FPT_utilEEWriteOnOff(p_port, 1); /* Enable write access to the EEPROM */
+
+ if (RD_HARPOON(p_port + hp_page_ctrl) & NARROW_SCSI_CARD)
+ max_id = 0x08;
+
+ else
+ max_id = 0x10;
+
+ for (i = 0; i < max_id; i++) {
+
+ for (k = 0; k < ID_STRING_LENGTH; k += 2) {
+ ee_data = FPT_scamInfo[i].id_string[k + 1];
+ ee_data <<= 8;
+ ee_data |= FPT_scamInfo[i].id_string[k];
+ sum_data += ee_data;
+ FPT_utilEEWrite(p_port, ee_data,
+ (unsigned short)((EE_SCAMBASE / 2) +
+ (unsigned short)(i *
+ ((unsigned short)ID_STRING_LENGTH / 2)) + (unsigned short)(k / 2)));
+ }
+ }
+
+ FPT_utilEEWrite(p_port, sum_data, EEPROM_CHECK_SUM / 2);
+ FPT_utilEEWriteOnOff(p_port, 0); /* Turn off write access */
+}
+
+/*---------------------------------------------------------------------
+ *
+ * Function: FPT_XbowInit
+ *
+ * Description: Setup the Xbow for normal operation.
+ *
+ *---------------------------------------------------------------------*/
+
+static void FPT_XbowInit(u32 port, unsigned char ScamFlg)
+{
+ unsigned char i;
+
+ i = RD_HARPOON(port + hp_page_ctrl);
+ WR_HARPOON(port + hp_page_ctrl, (unsigned char)(i | G_INT_DISABLE));
+
+ WR_HARPOON(port + hp_scsireset, 0x00);
+ WR_HARPOON(port + hp_portctrl_1, HOST_MODE8);
+
+ WR_HARPOON(port + hp_scsireset, (DMA_RESET | HPSCSI_RESET | PROG_RESET |
+ FIFO_CLR));
+
+ WR_HARPOON(port + hp_scsireset, SCSI_INI);
+
+ WR_HARPOON(port + hp_clkctrl_0, CLKCTRL_DEFAULT);
+
+ WR_HARPOON(port + hp_scsisig, 0x00); /* Clear any signals we might */
+ WR_HARPOON(port + hp_scsictrl_0, ENA_SCAM_SEL);
+
+ WRW_HARPOON((port + hp_intstat), CLR_ALL_INT);
+
+ FPT_default_intena = RESET | RSEL | PROG_HLT | TIMEOUT |
+ BUS_FREE | XFER_CNT_0 | AUTO_INT;
+
+ if ((ScamFlg & SCAM_ENABLED) && (ScamFlg & SCAM_LEVEL2))
+ FPT_default_intena |= SCAM_SEL;
+
+ WRW_HARPOON((port + hp_intena), FPT_default_intena);
+
+ WR_HARPOON(port + hp_seltimeout, TO_290ms);
+
+ /* Turn on SCSI_MODE8 for narrow cards to fix the
+ strapping issue with the DUAL CHANNEL card */
+ if (RD_HARPOON(port + hp_page_ctrl) & NARROW_SCSI_CARD)
+ WR_HARPOON(port + hp_addstat, SCSI_MODE8);
+
+ WR_HARPOON(port + hp_page_ctrl, i);
+
+}
+
+/*---------------------------------------------------------------------
+ *
+ * Function: FPT_BusMasterInit
+ *
+ * Description: Initialize the BusMaster for normal operations.
+ *
+ *---------------------------------------------------------------------*/
+
+static void FPT_BusMasterInit(u32 p_port)
+{
+
+ WR_HARPOON(p_port + hp_sys_ctrl, DRVR_RST);
+ WR_HARPOON(p_port + hp_sys_ctrl, 0x00);
+
+ WR_HARPOON(p_port + hp_host_blk_cnt, XFER_BLK64);
+
+ WR_HARPOON(p_port + hp_bm_ctrl, (BMCTRL_DEFAULT));
+
+ WR_HARPOON(p_port + hp_ee_ctrl, (SCSI_TERM_ENA_H));
+
+ RD_HARPOON(p_port + hp_int_status); /*Clear interrupts. */
+ WR_HARPOON(p_port + hp_int_mask, (INT_CMD_COMPL | SCSI_INTERRUPT));
+ WR_HARPOON(p_port + hp_page_ctrl, (RD_HARPOON(p_port + hp_page_ctrl) &
+ ~SCATTER_EN));
+}
+
+/*---------------------------------------------------------------------
+ *
+ * Function: FPT_DiagEEPROM
+ *
+ * Description: Verfiy checksum and 'Key' and initialize the EEPROM if
+ * necessary.
+ *
+ *---------------------------------------------------------------------*/
+
+static void FPT_DiagEEPROM(u32 p_port)
+{
+ unsigned short index, temp, max_wd_cnt;
+
+ if (RD_HARPOON(p_port + hp_page_ctrl) & NARROW_SCSI_CARD)
+ max_wd_cnt = EEPROM_WD_CNT;
+ else
+ max_wd_cnt = EEPROM_WD_CNT * 2;
+
+ temp = FPT_utilEERead(p_port, FW_SIGNATURE / 2);
+
+ if (temp == 0x4641) {
+
+ for (index = 2; index < max_wd_cnt; index++) {
+
+ temp += FPT_utilEERead(p_port, index);
+
+ }
+
+ if (temp == FPT_utilEERead(p_port, EEPROM_CHECK_SUM / 2)) {
+
+ return; /*EEPROM is Okay so return now! */
+ }
+ }
+
+ FPT_utilEEWriteOnOff(p_port, (unsigned char)1);
+
+ for (index = 0; index < max_wd_cnt; index++) {
+
+ FPT_utilEEWrite(p_port, 0x0000, index);
+ }
+
+ temp = 0;
+
+ FPT_utilEEWrite(p_port, 0x4641, FW_SIGNATURE / 2);
+ temp += 0x4641;
+ FPT_utilEEWrite(p_port, 0x3920, MODEL_NUMB_0 / 2);
+ temp += 0x3920;
+ FPT_utilEEWrite(p_port, 0x3033, MODEL_NUMB_2 / 2);
+ temp += 0x3033;
+ FPT_utilEEWrite(p_port, 0x2020, MODEL_NUMB_4 / 2);
+ temp += 0x2020;
+ FPT_utilEEWrite(p_port, 0x70D3, SYSTEM_CONFIG / 2);
+ temp += 0x70D3;
+ FPT_utilEEWrite(p_port, 0x0010, BIOS_CONFIG / 2);
+ temp += 0x0010;
+ FPT_utilEEWrite(p_port, 0x0003, SCAM_CONFIG / 2);
+ temp += 0x0003;
+ FPT_utilEEWrite(p_port, 0x0007, ADAPTER_SCSI_ID / 2);
+ temp += 0x0007;
+
+ FPT_utilEEWrite(p_port, 0x0000, IGNORE_B_SCAN / 2);
+ temp += 0x0000;
+ FPT_utilEEWrite(p_port, 0x0000, SEND_START_ENA / 2);
+ temp += 0x0000;
+ FPT_utilEEWrite(p_port, 0x0000, DEVICE_ENABLE / 2);
+ temp += 0x0000;
+
+ FPT_utilEEWrite(p_port, 0x4242, SYNC_RATE_TBL01 / 2);
+ temp += 0x4242;
+ FPT_utilEEWrite(p_port, 0x4242, SYNC_RATE_TBL23 / 2);
+ temp += 0x4242;
+ FPT_utilEEWrite(p_port, 0x4242, SYNC_RATE_TBL45 / 2);
+ temp += 0x4242;
+ FPT_utilEEWrite(p_port, 0x4242, SYNC_RATE_TBL67 / 2);
+ temp += 0x4242;
+ FPT_utilEEWrite(p_port, 0x4242, SYNC_RATE_TBL89 / 2);
+ temp += 0x4242;
+ FPT_utilEEWrite(p_port, 0x4242, SYNC_RATE_TBLab / 2);
+ temp += 0x4242;
+ FPT_utilEEWrite(p_port, 0x4242, SYNC_RATE_TBLcd / 2);
+ temp += 0x4242;
+ FPT_utilEEWrite(p_port, 0x4242, SYNC_RATE_TBLef / 2);
+ temp += 0x4242;
+
+ FPT_utilEEWrite(p_port, 0x6C46, 64 / 2); /*PRODUCT ID */
+ temp += 0x6C46;
+ FPT_utilEEWrite(p_port, 0x7361, 66 / 2); /* FlashPoint LT */
+ temp += 0x7361;
+ FPT_utilEEWrite(p_port, 0x5068, 68 / 2);
+ temp += 0x5068;
+ FPT_utilEEWrite(p_port, 0x696F, 70 / 2);
+ temp += 0x696F;
+ FPT_utilEEWrite(p_port, 0x746E, 72 / 2);
+ temp += 0x746E;
+ FPT_utilEEWrite(p_port, 0x4C20, 74 / 2);
+ temp += 0x4C20;
+ FPT_utilEEWrite(p_port, 0x2054, 76 / 2);
+ temp += 0x2054;
+ FPT_utilEEWrite(p_port, 0x2020, 78 / 2);
+ temp += 0x2020;
+
+ index = ((EE_SCAMBASE / 2) + (7 * 16));
+ FPT_utilEEWrite(p_port, (0x0700 + TYPE_CODE0), index);
+ temp += (0x0700 + TYPE_CODE0);
+ index++;
+ FPT_utilEEWrite(p_port, 0x5542, index); /*Vendor ID code */
+ temp += 0x5542; /* BUSLOGIC */
+ index++;
+ FPT_utilEEWrite(p_port, 0x4C53, index);
+ temp += 0x4C53;
+ index++;
+ FPT_utilEEWrite(p_port, 0x474F, index);
+ temp += 0x474F;
+ index++;
+ FPT_utilEEWrite(p_port, 0x4349, index);
+ temp += 0x4349;
+ index++;
+ FPT_utilEEWrite(p_port, 0x5442, index); /*Vendor unique code */
+ temp += 0x5442; /* BT- 930 */
+ index++;
+ FPT_utilEEWrite(p_port, 0x202D, index);
+ temp += 0x202D;
+ index++;
+ FPT_utilEEWrite(p_port, 0x3339, index);
+ temp += 0x3339;
+ index++; /*Serial # */
+ FPT_utilEEWrite(p_port, 0x2030, index); /* 01234567 */
+ temp += 0x2030;
+ index++;
+ FPT_utilEEWrite(p_port, 0x5453, index);
+ temp += 0x5453;
+ index++;
+ FPT_utilEEWrite(p_port, 0x5645, index);
+ temp += 0x5645;
+ index++;
+ FPT_utilEEWrite(p_port, 0x2045, index);
+ temp += 0x2045;
+ index++;
+ FPT_utilEEWrite(p_port, 0x202F, index);
+ temp += 0x202F;
+ index++;
+ FPT_utilEEWrite(p_port, 0x4F4A, index);
+ temp += 0x4F4A;
+ index++;
+ FPT_utilEEWrite(p_port, 0x204E, index);
+ temp += 0x204E;
+ index++;
+ FPT_utilEEWrite(p_port, 0x3539, index);
+ temp += 0x3539;
+
+ FPT_utilEEWrite(p_port, temp, EEPROM_CHECK_SUM / 2);
+
+ FPT_utilEEWriteOnOff(p_port, (unsigned char)0);
+
+}
+
+/*---------------------------------------------------------------------
+ *
+ * Function: Queue Search Select
+ *
+ * Description: Try to find a new command to execute.
+ *
+ *---------------------------------------------------------------------*/
+
+static void FPT_queueSearchSelect(struct sccb_card *pCurrCard,
+ unsigned char p_card)
+{
+ unsigned char scan_ptr, lun;
+ struct sccb_mgr_tar_info *currTar_Info;
+ struct sccb *pOldSccb;
+
+ scan_ptr = pCurrCard->scanIndex;
+ do {
+ currTar_Info = &FPT_sccbMgrTbl[p_card][scan_ptr];
+ if ((pCurrCard->globalFlags & F_CONLUN_IO) &&
+ ((currTar_Info->TarStatus & TAR_TAG_Q_MASK) !=
+ TAG_Q_TRYING)) {
+ if (currTar_Info->TarSelQ_Cnt != 0) {
+
+ scan_ptr++;
+ if (scan_ptr == MAX_SCSI_TAR)
+ scan_ptr = 0;
+
+ for (lun = 0; lun < MAX_LUN; lun++) {
+ if (currTar_Info->TarLUNBusy[lun] == 0) {
+
+ pCurrCard->currentSCCB =
+ currTar_Info->TarSelQ_Head;
+ pOldSccb = NULL;
+
+ while ((pCurrCard->
+ currentSCCB != NULL)
+ && (lun !=
+ pCurrCard->
+ currentSCCB->Lun)) {
+ pOldSccb =
+ pCurrCard->
+ currentSCCB;
+ pCurrCard->currentSCCB =
+ (struct sccb
+ *)(pCurrCard->
+ currentSCCB)->
+ Sccb_forwardlink;
+ }
+ if (pCurrCard->currentSCCB ==
+ NULL)
+ continue;
+ if (pOldSccb != NULL) {
+ pOldSccb->
+ Sccb_forwardlink =
+ (struct sccb
+ *)(pCurrCard->
+ currentSCCB)->
+ Sccb_forwardlink;
+ pOldSccb->
+ Sccb_backlink =
+ (struct sccb
+ *)(pCurrCard->
+ currentSCCB)->
+ Sccb_backlink;
+ currTar_Info->
+ TarSelQ_Cnt--;
+ } else {
+ currTar_Info->
+ TarSelQ_Head =
+ (struct sccb
+ *)(pCurrCard->
+ currentSCCB)->
+ Sccb_forwardlink;
+
+ if (currTar_Info->
+ TarSelQ_Head ==
+ NULL) {
+ currTar_Info->
+ TarSelQ_Tail
+ = NULL;
+ currTar_Info->
+ TarSelQ_Cnt
+ = 0;
+ } else {
+ currTar_Info->
+ TarSelQ_Cnt--;
+ currTar_Info->
+ TarSelQ_Head->
+ Sccb_backlink
+ =
+ (struct sccb
+ *)NULL;
+ }
+ }
+ pCurrCard->scanIndex = scan_ptr;
+
+ pCurrCard->globalFlags |=
+ F_NEW_SCCB_CMD;
+
+ break;
+ }
+ }
+ }
+
+ else {
+ scan_ptr++;
+ if (scan_ptr == MAX_SCSI_TAR) {
+ scan_ptr = 0;
+ }
+ }
+
+ } else {
+ if ((currTar_Info->TarSelQ_Cnt != 0) &&
+ (currTar_Info->TarLUNBusy[0] == 0)) {
+
+ pCurrCard->currentSCCB =
+ currTar_Info->TarSelQ_Head;
+
+ currTar_Info->TarSelQ_Head =
+ (struct sccb *)(pCurrCard->currentSCCB)->
+ Sccb_forwardlink;
+
+ if (currTar_Info->TarSelQ_Head == NULL) {
+ currTar_Info->TarSelQ_Tail = NULL;
+ currTar_Info->TarSelQ_Cnt = 0;
+ } else {
+ currTar_Info->TarSelQ_Cnt--;
+ currTar_Info->TarSelQ_Head->
+ Sccb_backlink = (struct sccb *)NULL;
+ }
+
+ scan_ptr++;
+ if (scan_ptr == MAX_SCSI_TAR)
+ scan_ptr = 0;
+
+ pCurrCard->scanIndex = scan_ptr;
+
+ pCurrCard->globalFlags |= F_NEW_SCCB_CMD;
+
+ break;
+ }
+
+ else {
+ scan_ptr++;
+ if (scan_ptr == MAX_SCSI_TAR) {
+ scan_ptr = 0;
+ }
+ }
+ }
+ } while (scan_ptr != pCurrCard->scanIndex);
+}
+
+/*---------------------------------------------------------------------
+ *
+ * Function: Queue Select Fail
+ *
+ * Description: Add the current SCCB to the head of the Queue.
+ *
+ *---------------------------------------------------------------------*/
+
+static void FPT_queueSelectFail(struct sccb_card *pCurrCard,
+ unsigned char p_card)
+{
+ unsigned char thisTarg;
+ struct sccb_mgr_tar_info *currTar_Info;
+
+ if (pCurrCard->currentSCCB != NULL) {
+ thisTarg =
+ (unsigned char)(((struct sccb *)(pCurrCard->currentSCCB))->
+ TargID);
+ currTar_Info = &FPT_sccbMgrTbl[p_card][thisTarg];
+
+ pCurrCard->currentSCCB->Sccb_backlink = (struct sccb *)NULL;
+
+ pCurrCard->currentSCCB->Sccb_forwardlink =
+ currTar_Info->TarSelQ_Head;
+
+ if (currTar_Info->TarSelQ_Cnt == 0) {
+ currTar_Info->TarSelQ_Tail = pCurrCard->currentSCCB;
+ }
+
+ else {
+ currTar_Info->TarSelQ_Head->Sccb_backlink =
+ pCurrCard->currentSCCB;
+ }
+
+ currTar_Info->TarSelQ_Head = pCurrCard->currentSCCB;
+
+ pCurrCard->currentSCCB = NULL;
+ currTar_Info->TarSelQ_Cnt++;
+ }
+}
+
+/*---------------------------------------------------------------------
+ *
+ * Function: Queue Command Complete
+ *
+ * Description: Call the callback function with the current SCCB.
+ *
+ *---------------------------------------------------------------------*/
+
+static void FPT_queueCmdComplete(struct sccb_card *pCurrCard,
+ struct sccb *p_sccb, unsigned char p_card)
+{
+
+ unsigned char i, SCSIcmd;
+ CALL_BK_FN callback;
+ struct sccb_mgr_tar_info *currTar_Info;
+
+ SCSIcmd = p_sccb->Cdb[0];
+
+ if (!(p_sccb->Sccb_XferState & F_ALL_XFERRED)) {
+
+ if ((p_sccb->
+ ControlByte & (SCCB_DATA_XFER_OUT | SCCB_DATA_XFER_IN))
+ && (p_sccb->HostStatus == SCCB_COMPLETE)
+ && (p_sccb->TargetStatus != SSCHECK))
+
+ if ((SCSIcmd == SCSI_READ) ||
+ (SCSIcmd == SCSI_WRITE) ||
+ (SCSIcmd == SCSI_READ_EXTENDED) ||
+ (SCSIcmd == SCSI_WRITE_EXTENDED) ||
+ (SCSIcmd == SCSI_WRITE_AND_VERIFY) ||
+ (SCSIcmd == SCSI_START_STOP_UNIT) ||
+ (pCurrCard->globalFlags & F_NO_FILTER)
+ )
+ p_sccb->HostStatus = SCCB_DATA_UNDER_RUN;
+ }
+
+ if (p_sccb->SccbStatus == SCCB_IN_PROCESS) {
+ if (p_sccb->HostStatus || p_sccb->TargetStatus)
+ p_sccb->SccbStatus = SCCB_ERROR;
+ else
+ p_sccb->SccbStatus = SCCB_SUCCESS;
+ }
+
+ if (p_sccb->Sccb_XferState & F_AUTO_SENSE) {
+
+ p_sccb->CdbLength = p_sccb->Save_CdbLen;
+ for (i = 0; i < 6; i++) {
+ p_sccb->Cdb[i] = p_sccb->Save_Cdb[i];
+ }
+ }
+
+ if ((p_sccb->OperationCode == RESIDUAL_SG_COMMAND) ||
+ (p_sccb->OperationCode == RESIDUAL_COMMAND)) {
+
+ FPT_utilUpdateResidual(p_sccb);
+ }
+
+ pCurrCard->cmdCounter--;
+ if (!pCurrCard->cmdCounter) {
+
+ if (pCurrCard->globalFlags & F_GREEN_PC) {
+ WR_HARPOON(pCurrCard->ioPort + hp_clkctrl_0,
+ (PWR_DWN | CLKCTRL_DEFAULT));
+ WR_HARPOON(pCurrCard->ioPort + hp_sys_ctrl, STOP_CLK);
+ }
+
+ WR_HARPOON(pCurrCard->ioPort + hp_semaphore,
+ (RD_HARPOON(pCurrCard->ioPort + hp_semaphore) &
+ ~SCCB_MGR_ACTIVE));
+
+ }
+
+ if (pCurrCard->discQCount != 0) {
+ currTar_Info = &FPT_sccbMgrTbl[p_card][p_sccb->TargID];
+ if (((pCurrCard->globalFlags & F_CONLUN_IO) &&
+ ((currTar_Info->TarStatus & TAR_TAG_Q_MASK) !=
+ TAG_Q_TRYING))) {
+ pCurrCard->discQCount--;
+ pCurrCard->discQ_Tbl[currTar_Info->
+ LunDiscQ_Idx[p_sccb->Lun]] = NULL;
+ } else {
+ if (p_sccb->Sccb_tag) {
+ pCurrCard->discQCount--;
+ pCurrCard->discQ_Tbl[p_sccb->Sccb_tag] = NULL;
+ } else {
+ pCurrCard->discQCount--;
+ pCurrCard->discQ_Tbl[currTar_Info->
+ LunDiscQ_Idx[0]] = NULL;
+ }
+ }
+
+ }
+
+ callback = (CALL_BK_FN) p_sccb->SccbCallback;
+ callback(p_sccb);
+ pCurrCard->globalFlags |= F_NEW_SCCB_CMD;
+ pCurrCard->currentSCCB = NULL;
+}
+
+/*---------------------------------------------------------------------
+ *
+ * Function: Queue Disconnect
+ *
+ * Description: Add SCCB to our disconnect array.
+ *
+ *---------------------------------------------------------------------*/
+static void FPT_queueDisconnect(struct sccb *p_sccb, unsigned char p_card)
+{
+ struct sccb_mgr_tar_info *currTar_Info;
+
+ currTar_Info = &FPT_sccbMgrTbl[p_card][p_sccb->TargID];
+
+ if (((FPT_BL_Card[p_card].globalFlags & F_CONLUN_IO) &&
+ ((currTar_Info->TarStatus & TAR_TAG_Q_MASK) != TAG_Q_TRYING))) {
+ FPT_BL_Card[p_card].discQ_Tbl[currTar_Info->
+ LunDiscQ_Idx[p_sccb->Lun]] =
+ p_sccb;
+ } else {
+ if (p_sccb->Sccb_tag) {
+ FPT_BL_Card[p_card].discQ_Tbl[p_sccb->Sccb_tag] =
+ p_sccb;
+ FPT_sccbMgrTbl[p_card][p_sccb->TargID].TarLUNBusy[0] =
+ 0;
+ FPT_sccbMgrTbl[p_card][p_sccb->TargID].TarTagQ_Cnt++;
+ } else {
+ FPT_BL_Card[p_card].discQ_Tbl[currTar_Info->
+ LunDiscQ_Idx[0]] = p_sccb;
+ }
+ }
+ FPT_BL_Card[p_card].currentSCCB = NULL;
+}
+
+/*---------------------------------------------------------------------
+ *
+ * Function: Queue Flush SCCB
+ *
+ * Description: Flush all SCCB's back to the host driver for this target.
+ *
+ *---------------------------------------------------------------------*/
+
+static void FPT_queueFlushSccb(unsigned char p_card, unsigned char error_code)
+{
+ unsigned char qtag, thisTarg;
+ struct sccb *currSCCB;
+ struct sccb_mgr_tar_info *currTar_Info;
+
+ currSCCB = FPT_BL_Card[p_card].currentSCCB;
+ if (currSCCB != NULL) {
+ thisTarg = (unsigned char)currSCCB->TargID;
+ currTar_Info = &FPT_sccbMgrTbl[p_card][thisTarg];
+
+ for (qtag = 0; qtag < QUEUE_DEPTH; qtag++) {
+
+ if (FPT_BL_Card[p_card].discQ_Tbl[qtag] &&
+ (FPT_BL_Card[p_card].discQ_Tbl[qtag]->TargID ==
+ thisTarg)) {
+
+ FPT_BL_Card[p_card].discQ_Tbl[qtag]->
+ HostStatus = (unsigned char)error_code;
+
+ FPT_queueCmdComplete(&FPT_BL_Card[p_card],
+ FPT_BL_Card[p_card].
+ discQ_Tbl[qtag], p_card);
+
+ FPT_BL_Card[p_card].discQ_Tbl[qtag] = NULL;
+ currTar_Info->TarTagQ_Cnt--;
+
+ }
+ }
+ }
+
+}
+
+/*---------------------------------------------------------------------
+ *
+ * Function: Queue Flush Target SCCB
+ *
+ * Description: Flush all SCCB's back to the host driver for this target.
+ *
+ *---------------------------------------------------------------------*/
+
+static void FPT_queueFlushTargSccb(unsigned char p_card, unsigned char thisTarg,
+ unsigned char error_code)
+{
+ unsigned char qtag;
+ struct sccb_mgr_tar_info *currTar_Info;
+
+ currTar_Info = &FPT_sccbMgrTbl[p_card][thisTarg];
+
+ for (qtag = 0; qtag < QUEUE_DEPTH; qtag++) {
+
+ if (FPT_BL_Card[p_card].discQ_Tbl[qtag] &&
+ (FPT_BL_Card[p_card].discQ_Tbl[qtag]->TargID == thisTarg)) {
+
+ FPT_BL_Card[p_card].discQ_Tbl[qtag]->HostStatus =
+ (unsigned char)error_code;
+
+ FPT_queueCmdComplete(&FPT_BL_Card[p_card],
+ FPT_BL_Card[p_card].
+ discQ_Tbl[qtag], p_card);
+
+ FPT_BL_Card[p_card].discQ_Tbl[qtag] = NULL;
+ currTar_Info->TarTagQ_Cnt--;
+
+ }
+ }
+
+}
+
+static void FPT_queueAddSccb(struct sccb *p_SCCB, unsigned char p_card)
+{
+ struct sccb_mgr_tar_info *currTar_Info;
+ currTar_Info = &FPT_sccbMgrTbl[p_card][p_SCCB->TargID];
+
+ p_SCCB->Sccb_forwardlink = NULL;
+
+ p_SCCB->Sccb_backlink = currTar_Info->TarSelQ_Tail;
+
+ if (currTar_Info->TarSelQ_Cnt == 0) {
+
+ currTar_Info->TarSelQ_Head = p_SCCB;
+ }
+
+ else {
+
+ currTar_Info->TarSelQ_Tail->Sccb_forwardlink = p_SCCB;
+ }
+
+ currTar_Info->TarSelQ_Tail = p_SCCB;
+ currTar_Info->TarSelQ_Cnt++;
+}
+
+/*---------------------------------------------------------------------
+ *
+ * Function: Queue Find SCCB
+ *
+ * Description: Search the target select Queue for this SCCB, and
+ * remove it if found.
+ *
+ *---------------------------------------------------------------------*/
+
+static unsigned char FPT_queueFindSccb(struct sccb *p_SCCB,
+ unsigned char p_card)
+{
+ struct sccb *q_ptr;
+ struct sccb_mgr_tar_info *currTar_Info;
+
+ currTar_Info = &FPT_sccbMgrTbl[p_card][p_SCCB->TargID];
+
+ q_ptr = currTar_Info->TarSelQ_Head;
+
+ while (q_ptr != NULL) {
+
+ if (q_ptr == p_SCCB) {
+
+ if (currTar_Info->TarSelQ_Head == q_ptr) {
+
+ currTar_Info->TarSelQ_Head =
+ q_ptr->Sccb_forwardlink;
+ }
+
+ if (currTar_Info->TarSelQ_Tail == q_ptr) {
+
+ currTar_Info->TarSelQ_Tail =
+ q_ptr->Sccb_backlink;
+ }
+
+ if (q_ptr->Sccb_forwardlink != NULL) {
+ q_ptr->Sccb_forwardlink->Sccb_backlink =
+ q_ptr->Sccb_backlink;
+ }
+
+ if (q_ptr->Sccb_backlink != NULL) {
+ q_ptr->Sccb_backlink->Sccb_forwardlink =
+ q_ptr->Sccb_forwardlink;
+ }
+
+ currTar_Info->TarSelQ_Cnt--;
+
+ return 1;
+ }
+
+ else {
+ q_ptr = q_ptr->Sccb_forwardlink;
+ }
+ }
+
+ return 0;
+
+}
+
+/*---------------------------------------------------------------------
+ *
+ * Function: Utility Update Residual Count
+ *
+ * Description: Update the XferCnt to the remaining byte count.
+ * If we transferred all the data then just write zero.
+ * If Non-SG transfer then report Total Cnt - Actual Transfer
+ * Cnt. For SG transfers add the count fields of all
+ * remaining SG elements, as well as any partial remaining
+ * element.
+ *
+ *---------------------------------------------------------------------*/
+
+static void FPT_utilUpdateResidual(struct sccb *p_SCCB)
+{
+ unsigned long partial_cnt;
+ unsigned int sg_index;
+ struct blogic_sg_seg *segp;
+
+ if (p_SCCB->Sccb_XferState & F_ALL_XFERRED) {
+
+ p_SCCB->DataLength = 0x0000;
+ }
+
+ else if (p_SCCB->Sccb_XferState & F_SG_XFER) {
+
+ partial_cnt = 0x0000;
+
+ sg_index = p_SCCB->Sccb_sgseg;
+
+
+ if (p_SCCB->Sccb_SGoffset) {
+
+ partial_cnt = p_SCCB->Sccb_SGoffset;
+ sg_index++;
+ }
+
+ while (((unsigned long)sg_index *
+ (unsigned long)SG_ELEMENT_SIZE) < p_SCCB->DataLength) {
+ segp = (struct blogic_sg_seg *)(p_SCCB->DataPointer) +
+ (sg_index * 2);
+ partial_cnt += segp->segbytes;
+ sg_index++;
+ }
+
+ p_SCCB->DataLength = partial_cnt;
+ }
+
+ else {
+
+ p_SCCB->DataLength -= p_SCCB->Sccb_ATC;
+ }
+}
+
+/*---------------------------------------------------------------------
+ *
+ * Function: Wait 1 Second
+ *
+ * Description: Wait for 1 second.
+ *
+ *---------------------------------------------------------------------*/
+
+static void FPT_Wait1Second(u32 p_port)
+{
+ unsigned char i;
+
+ for (i = 0; i < 4; i++) {
+
+ FPT_Wait(p_port, TO_250ms);
+
+ if ((RD_HARPOON(p_port + hp_scsictrl_0) & SCSI_RST))
+ break;
+
+ if ((RDW_HARPOON((p_port + hp_intstat)) & SCAM_SEL))
+ break;
+ }
+}
+
+/*---------------------------------------------------------------------
+ *
+ * Function: FPT_Wait
+ *
+ * Description: Wait the desired delay.
+ *
+ *---------------------------------------------------------------------*/
+
+static void FPT_Wait(u32 p_port, unsigned char p_delay)
+{
+ unsigned char old_timer;
+ unsigned char green_flag;
+
+ old_timer = RD_HARPOON(p_port + hp_seltimeout);
+
+ green_flag = RD_HARPOON(p_port + hp_clkctrl_0);
+ WR_HARPOON(p_port + hp_clkctrl_0, CLKCTRL_DEFAULT);
+
+ WR_HARPOON(p_port + hp_seltimeout, p_delay);
+ WRW_HARPOON((p_port + hp_intstat), TIMEOUT);
+ WRW_HARPOON((p_port + hp_intena), (FPT_default_intena & ~TIMEOUT));
+
+ WR_HARPOON(p_port + hp_portctrl_0,
+ (RD_HARPOON(p_port + hp_portctrl_0) | START_TO));
+
+ while (!(RDW_HARPOON((p_port + hp_intstat)) & TIMEOUT)) {
+
+ if ((RD_HARPOON(p_port + hp_scsictrl_0) & SCSI_RST))
+ break;
+
+ if ((RDW_HARPOON((p_port + hp_intstat)) & SCAM_SEL))
+ break;
+ }
+
+ WR_HARPOON(p_port + hp_portctrl_0,
+ (RD_HARPOON(p_port + hp_portctrl_0) & ~START_TO));
+
+ WRW_HARPOON((p_port + hp_intstat), TIMEOUT);
+ WRW_HARPOON((p_port + hp_intena), FPT_default_intena);
+
+ WR_HARPOON(p_port + hp_clkctrl_0, green_flag);
+
+ WR_HARPOON(p_port + hp_seltimeout, old_timer);
+}
+
+/*---------------------------------------------------------------------
+ *
+ * Function: Enable/Disable Write to EEPROM
+ *
+ * Description: The EEPROM must first be enabled for writes
+ * A total of 9 clocks are needed.
+ *
+ *---------------------------------------------------------------------*/
+
+static void FPT_utilEEWriteOnOff(u32 p_port, unsigned char p_mode)
+{
+ unsigned char ee_value;
+
+ ee_value =
+ (unsigned char)(RD_HARPOON(p_port + hp_ee_ctrl) &
+ (EXT_ARB_ACK | SCSI_TERM_ENA_H));
+
+ if (p_mode)
+
+ FPT_utilEESendCmdAddr(p_port, EWEN, EWEN_ADDR);
+
+ else
+
+ FPT_utilEESendCmdAddr(p_port, EWDS, EWDS_ADDR);
+
+ WR_HARPOON(p_port + hp_ee_ctrl, (ee_value | SEE_MS)); /*Turn off CS */
+ WR_HARPOON(p_port + hp_ee_ctrl, ee_value); /*Turn off Master Select */
+}
+
+/*---------------------------------------------------------------------
+ *
+ * Function: Write EEPROM
+ *
+ * Description: Write a word to the EEPROM at the specified
+ * address.
+ *
+ *---------------------------------------------------------------------*/
+
+static void FPT_utilEEWrite(u32 p_port, unsigned short ee_data,
+ unsigned short ee_addr)
+{
+
+ unsigned char ee_value;
+ unsigned short i;
+
+ ee_value =
+ (unsigned
+ char)((RD_HARPOON(p_port + hp_ee_ctrl) &
+ (EXT_ARB_ACK | SCSI_TERM_ENA_H)) | (SEE_MS | SEE_CS));
+
+ FPT_utilEESendCmdAddr(p_port, EE_WRITE, ee_addr);
+
+ ee_value |= (SEE_MS + SEE_CS);
+
+ for (i = 0x8000; i != 0; i >>= 1) {
+
+ if (i & ee_data)
+ ee_value |= SEE_DO;
+ else
+ ee_value &= ~SEE_DO;
+
+ WR_HARPOON(p_port + hp_ee_ctrl, ee_value);
+ WR_HARPOON(p_port + hp_ee_ctrl, ee_value);
+ ee_value |= SEE_CLK; /* Clock data! */
+ WR_HARPOON(p_port + hp_ee_ctrl, ee_value);
+ WR_HARPOON(p_port + hp_ee_ctrl, ee_value);
+ ee_value &= ~SEE_CLK;
+ WR_HARPOON(p_port + hp_ee_ctrl, ee_value);
+ WR_HARPOON(p_port + hp_ee_ctrl, ee_value);
+ }
+ ee_value &= (EXT_ARB_ACK | SCSI_TERM_ENA_H);
+ WR_HARPOON(p_port + hp_ee_ctrl, (ee_value | SEE_MS));
+
+ FPT_Wait(p_port, TO_10ms);
+
+ WR_HARPOON(p_port + hp_ee_ctrl, (ee_value | SEE_MS | SEE_CS)); /* Set CS to EEPROM */
+ WR_HARPOON(p_port + hp_ee_ctrl, (ee_value | SEE_MS)); /* Turn off CS */
+ WR_HARPOON(p_port + hp_ee_ctrl, ee_value); /* Turn off Master Select */
+}
+
+/*---------------------------------------------------------------------
+ *
+ * Function: Read EEPROM
+ *
+ * Description: Read a word from the EEPROM at the desired
+ * address.
+ *
+ *---------------------------------------------------------------------*/
+
+static unsigned short FPT_utilEERead(u32 p_port,
+ unsigned short ee_addr)
+{
+ unsigned short i, ee_data1, ee_data2;
+
+ i = 0;
+ ee_data1 = FPT_utilEEReadOrg(p_port, ee_addr);
+ do {
+ ee_data2 = FPT_utilEEReadOrg(p_port, ee_addr);
+
+ if (ee_data1 == ee_data2)
+ return ee_data1;
+
+ ee_data1 = ee_data2;
+ i++;
+
+ } while (i < 4);
+
+ return ee_data1;
+}
+
+/*---------------------------------------------------------------------
+ *
+ * Function: Read EEPROM Original
+ *
+ * Description: Read a word from the EEPROM at the desired
+ * address.
+ *
+ *---------------------------------------------------------------------*/
+
+static unsigned short FPT_utilEEReadOrg(u32 p_port, unsigned short ee_addr)
+{
+
+ unsigned char ee_value;
+ unsigned short i, ee_data;
+
+ ee_value =
+ (unsigned
+ char)((RD_HARPOON(p_port + hp_ee_ctrl) &
+ (EXT_ARB_ACK | SCSI_TERM_ENA_H)) | (SEE_MS | SEE_CS));
+
+ FPT_utilEESendCmdAddr(p_port, EE_READ, ee_addr);
+
+ ee_value |= (SEE_MS + SEE_CS);
+ ee_data = 0;
+
+ for (i = 1; i <= 16; i++) {
+
+ ee_value |= SEE_CLK; /* Clock data! */
+ WR_HARPOON(p_port + hp_ee_ctrl, ee_value);
+ WR_HARPOON(p_port + hp_ee_ctrl, ee_value);
+ ee_value &= ~SEE_CLK;
+ WR_HARPOON(p_port + hp_ee_ctrl, ee_value);
+ WR_HARPOON(p_port + hp_ee_ctrl, ee_value);
+
+ ee_data <<= 1;
+
+ if (RD_HARPOON(p_port + hp_ee_ctrl) & SEE_DI)
+ ee_data |= 1;
+ }
+
+ ee_value &= ~(SEE_MS + SEE_CS);
+ WR_HARPOON(p_port + hp_ee_ctrl, (ee_value | SEE_MS)); /*Turn off CS */
+ WR_HARPOON(p_port + hp_ee_ctrl, ee_value); /*Turn off Master Select */
+
+ return ee_data;
+}
+
+/*---------------------------------------------------------------------
+ *
+ * Function: Send EE command and Address to the EEPROM
+ *
+ * Description: Transfers the correct command and sends the address
+ * to the eeprom.
+ *
+ *---------------------------------------------------------------------*/
+
+static void FPT_utilEESendCmdAddr(u32 p_port, unsigned char ee_cmd,
+ unsigned short ee_addr)
+{
+ unsigned char ee_value;
+ unsigned char narrow_flg;
+
+ unsigned short i;
+
+ narrow_flg =
+ (unsigned char)(RD_HARPOON(p_port + hp_page_ctrl) &
+ NARROW_SCSI_CARD);
+
+ ee_value = SEE_MS;
+ WR_HARPOON(p_port + hp_ee_ctrl, ee_value);
+
+ ee_value |= SEE_CS; /* Set CS to EEPROM */
+ WR_HARPOON(p_port + hp_ee_ctrl, ee_value);
+
+ for (i = 0x04; i != 0; i >>= 1) {
+
+ if (i & ee_cmd)
+ ee_value |= SEE_DO;
+ else
+ ee_value &= ~SEE_DO;
+
+ WR_HARPOON(p_port + hp_ee_ctrl, ee_value);
+ WR_HARPOON(p_port + hp_ee_ctrl, ee_value);
+ ee_value |= SEE_CLK; /* Clock data! */
+ WR_HARPOON(p_port + hp_ee_ctrl, ee_value);
+ WR_HARPOON(p_port + hp_ee_ctrl, ee_value);
+ ee_value &= ~SEE_CLK;
+ WR_HARPOON(p_port + hp_ee_ctrl, ee_value);
+ WR_HARPOON(p_port + hp_ee_ctrl, ee_value);
+ }
+
+ if (narrow_flg)
+ i = 0x0080;
+
+ else
+ i = 0x0200;
+
+ while (i != 0) {
+
+ if (i & ee_addr)
+ ee_value |= SEE_DO;
+ else
+ ee_value &= ~SEE_DO;
+
+ WR_HARPOON(p_port + hp_ee_ctrl, ee_value);
+ WR_HARPOON(p_port + hp_ee_ctrl, ee_value);
+ ee_value |= SEE_CLK; /* Clock data! */
+ WR_HARPOON(p_port + hp_ee_ctrl, ee_value);
+ WR_HARPOON(p_port + hp_ee_ctrl, ee_value);
+ ee_value &= ~SEE_CLK;
+ WR_HARPOON(p_port + hp_ee_ctrl, ee_value);
+ WR_HARPOON(p_port + hp_ee_ctrl, ee_value);
+
+ i >>= 1;
+ }
+}
+
+static unsigned short FPT_CalcCrc16(unsigned char buffer[])
+{
+ unsigned short crc = 0;
+ int i, j;
+ unsigned short ch;
+ for (i = 0; i < ID_STRING_LENGTH; i++) {
+ ch = (unsigned short)buffer[i];
+ for (j = 0; j < 8; j++) {
+ if ((crc ^ ch) & 1)
+ crc = (crc >> 1) ^ CRCMASK;
+ else
+ crc >>= 1;
+ ch >>= 1;
+ }
+ }
+ return crc;
+}
+
+static unsigned char FPT_CalcLrc(unsigned char buffer[])
+{
+ int i;
+ unsigned char lrc;
+ lrc = 0;
+ for (i = 0; i < ID_STRING_LENGTH; i++)
+ lrc ^= buffer[i];
+ return lrc;
+}
+
+/*
+ The following inline definitions avoid type conflicts.
+*/
+
+static inline unsigned char
+FlashPoint__ProbeHostAdapter(struct fpoint_info *FlashPointInfo)
+{
+ return FlashPoint_ProbeHostAdapter((struct sccb_mgr_info *)
+ FlashPointInfo);
+}
+
+static inline void *
+FlashPoint__HardwareResetHostAdapter(struct fpoint_info *FlashPointInfo)
+{
+ return FlashPoint_HardwareResetHostAdapter((struct sccb_mgr_info *)
+ FlashPointInfo);
+}
+
+static inline void
+FlashPoint__ReleaseHostAdapter(void *CardHandle)
+{
+ FlashPoint_ReleaseHostAdapter(CardHandle);
+}
+
+static inline void
+FlashPoint__StartCCB(void *CardHandle, struct blogic_ccb *CCB)
+{
+ FlashPoint_StartCCB(CardHandle, (struct sccb *)CCB);
+}
+
+static inline void
+FlashPoint__AbortCCB(void *CardHandle, struct blogic_ccb *CCB)
+{
+ FlashPoint_AbortCCB(CardHandle, (struct sccb *)CCB);
+}
+
+static inline bool
+FlashPoint__InterruptPending(void *CardHandle)
+{
+ return FlashPoint_InterruptPending(CardHandle);
+}
+
+static inline int
+FlashPoint__HandleInterrupt(void *CardHandle)
+{
+ return FlashPoint_HandleInterrupt(CardHandle);
+}
+
+#define FlashPoint_ProbeHostAdapter FlashPoint__ProbeHostAdapter
+#define FlashPoint_HardwareResetHostAdapter FlashPoint__HardwareResetHostAdapter
+#define FlashPoint_ReleaseHostAdapter FlashPoint__ReleaseHostAdapter
+#define FlashPoint_StartCCB FlashPoint__StartCCB
+#define FlashPoint_AbortCCB FlashPoint__AbortCCB
+#define FlashPoint_InterruptPending FlashPoint__InterruptPending
+#define FlashPoint_HandleInterrupt FlashPoint__HandleInterrupt
+
+#else /* !CONFIG_SCSI_FLASHPOINT */
+
+/*
+ Define prototypes for the FlashPoint SCCB Manager Functions.
+*/
+
+extern unsigned char FlashPoint_ProbeHostAdapter(struct fpoint_info *);
+extern void *FlashPoint_HardwareResetHostAdapter(struct fpoint_info *);
+extern void FlashPoint_StartCCB(void *, struct blogic_ccb *);
+extern int FlashPoint_AbortCCB(void *, struct blogic_ccb *);
+extern bool FlashPoint_InterruptPending(void *);
+extern int FlashPoint_HandleInterrupt(void *);
+extern void FlashPoint_ReleaseHostAdapter(void *);
+
+#endif /* CONFIG_SCSI_FLASHPOINT */
diff --git a/drivers/scsi/Kconfig b/drivers/scsi/Kconfig
new file mode 100644
index 000000000..b021bcb88
--- /dev/null
+++ b/drivers/scsi/Kconfig
@@ -0,0 +1,1761 @@
+menu "SCSI device support"
+
+config SCSI_MOD
+ tristate
+ default y if SCSI=n || SCSI=y
+ default m if SCSI=m
+
+config RAID_ATTRS
+ tristate "RAID Transport Class"
+ default n
+ depends on BLOCK
+ depends on SCSI_MOD
+ ---help---
+ Provides RAID
+
+config SCSI
+ tristate "SCSI device support"
+ depends on BLOCK
+ select SCSI_DMA if HAS_DMA
+ ---help---
+ If you want to use a SCSI hard disk, SCSI tape drive, SCSI CD-ROM or
+ any other SCSI device under Linux, say Y and make sure that you know
+ the name of your SCSI host adapter (the card inside your computer
+ that "speaks" the SCSI protocol, also called SCSI controller),
+ because you will be asked for it.
+
+ You also need to say Y here if you have a device which speaks
+ the SCSI protocol. Examples of this include the parallel port
+ version of the IOMEGA ZIP drive, USB storage devices, Fibre
+ Channel, and FireWire storage.
+
+ To compile this driver as a module, choose M here and read
+ <file:Documentation/scsi/scsi.txt>.
+ The module will be called scsi_mod.
+
+ However, do not compile this as a module if your root file system
+ (the one containing the directory /) is located on a SCSI device.
+
+config SCSI_DMA
+ bool
+ default n
+
+config SCSI_NETLINK
+ bool
+ default n
+ depends on NET
+
+config SCSI_MQ_DEFAULT
+ bool "SCSI: use blk-mq I/O path by default"
+ depends on SCSI
+ ---help---
+ This option enables the new blk-mq based I/O path for SCSI
+ devices by default. With the option the scsi_mod.use_blk_mq
+ module/boot option defaults to Y, without it to N, but it can
+ still be overriden either way.
+
+ If unsure say N.
+
+config SCSI_PROC_FS
+ bool "legacy /proc/scsi/ support"
+ depends on SCSI && PROC_FS
+ default y
+ ---help---
+ This option enables support for the various files in
+ /proc/scsi. In Linux 2.6 this has been superseded by
+ files in sysfs but many legacy applications rely on this.
+
+ If unsure say Y.
+
+comment "SCSI support type (disk, tape, CD-ROM)"
+ depends on SCSI
+
+config BLK_DEV_SD
+ tristate "SCSI disk support"
+ depends on SCSI
+ ---help---
+ If you want to use SCSI hard disks, Fibre Channel disks,
+ Serial ATA (SATA) or Parallel ATA (PATA) hard disks,
+ USB storage or the SCSI or parallel port version of
+ the IOMEGA ZIP drive, say Y and read the SCSI-HOWTO,
+ the Disk-HOWTO and the Multi-Disk-HOWTO, available from
+ <http://www.tldp.org/docs.html#howto>. This is NOT for SCSI
+ CD-ROMs.
+
+ To compile this driver as a module, choose M here and read
+ <file:Documentation/scsi/scsi.txt>.
+ The module will be called sd_mod.
+
+ Do not compile this driver as a module if your root file system
+ (the one containing the directory /) is located on a SCSI disk.
+ In this case, do not compile the driver for your SCSI host adapter
+ (below) as a module either.
+
+config CHR_DEV_ST
+ tristate "SCSI tape support"
+ depends on SCSI
+ ---help---
+ If you want to use a SCSI tape drive under Linux, say Y and read the
+ SCSI-HOWTO, available from
+ <http://www.tldp.org/docs.html#howto>, and
+ <file:Documentation/scsi/st.txt> in the kernel source. This is NOT
+ for SCSI CD-ROMs.
+
+ To compile this driver as a module, choose M here and read
+ <file:Documentation/scsi/scsi.txt>. The module will be called st.
+
+config CHR_DEV_OSST
+ tristate "SCSI OnStream SC-x0 tape support"
+ depends on SCSI
+ ---help---
+ The OnStream SC-x0 SCSI tape drives cannot be driven by the
+ standard st driver, but instead need this special osst driver and
+ use the /dev/osstX char device nodes (major 206). Via usb-storage,
+ you may be able to drive the USB-x0 and DI-x0 drives as well.
+ Note that there is also a second generation of OnStream
+ tape drives (ADR-x0) that supports the standard SCSI-2 commands for
+ tapes (QIC-157) and can be driven by the standard driver st.
+ For more information, you may have a look at the SCSI-HOWTO
+ <http://www.tldp.org/docs.html#howto> and
+ <file:Documentation/scsi/osst.txt> in the kernel source.
+ More info on the OnStream driver may be found on
+ <http://sourceforge.net/projects/osst/>
+ Please also have a look at the standard st docu, as most of it
+ applies to osst as well.
+
+ To compile this driver as a module, choose M here and read
+ <file:Documentation/scsi/scsi.txt>. The module will be called osst.
+
+config BLK_DEV_SR
+ tristate "SCSI CDROM support"
+ depends on SCSI
+ ---help---
+ If you want to use a CD or DVD drive attached to your computer
+ by SCSI, FireWire, USB or ATAPI, say Y and read the SCSI-HOWTO
+ and the CDROM-HOWTO at <http://www.tldp.org/docs.html#howto>.
+
+ Make sure to say Y or M to "ISO 9660 CD-ROM file system support".
+
+ To compile this driver as a module, choose M here and read
+ <file:Documentation/scsi/scsi.txt>.
+ The module will be called sr_mod.
+
+config BLK_DEV_SR_VENDOR
+ bool "Enable vendor-specific extensions (for SCSI CDROM)"
+ depends on BLK_DEV_SR
+ help
+ This enables the usage of vendor specific SCSI commands. This is
+ required to support multisession CDs with old NEC/TOSHIBA cdrom
+ drives (and HP Writers). If you have such a drive and get the first
+ session only, try saying Y here; everybody else says N.
+
+config CHR_DEV_SG
+ tristate "SCSI generic support"
+ depends on SCSI
+ ---help---
+ If you want to use SCSI scanners, synthesizers or CD-writers or just
+ about anything having "SCSI" in its name other than hard disks,
+ CD-ROMs or tapes, say Y here. These won't be supported by the kernel
+ directly, so you need some additional software which knows how to
+ talk to these devices using the SCSI protocol:
+
+ For scanners, look at SANE (<http://www.sane-project.org/>). For CD
+ writer software look at Cdrtools
+ (<http://cdrecord.berlios.de/private/cdrecord.html>)
+ and for burning a "disk at once": CDRDAO
+ (<http://cdrdao.sourceforge.net/>). Cdparanoia is a high
+ quality digital reader of audio CDs (<http://www.xiph.org/paranoia/>).
+ For other devices, it's possible that you'll have to write the
+ driver software yourself. Please read the file
+ <file:Documentation/scsi/scsi-generic.txt> for more information.
+
+ To compile this driver as a module, choose M here and read
+ <file:Documentation/scsi/scsi.txt>. The module will be called sg.
+
+ If unsure, say N.
+
+config CHR_DEV_SCH
+ tristate "SCSI media changer support"
+ depends on SCSI
+ ---help---
+ This is a driver for SCSI media changers. Most common devices are
+ tape libraries and MOD/CDROM jukeboxes. *Real* jukeboxes, you
+ don't need this for those tiny 6-slot cdrom changers. Media
+ changers are listed as "Type: Medium Changer" in /proc/scsi/scsi.
+ If you have such hardware and want to use it with linux, say Y
+ here. Check <file:Documentation/scsi/scsi-changer.txt> for details.
+
+ If you want to compile this as a module ( = code which can be
+ inserted in and removed from the running kernel whenever you want),
+ say M here and read <file:Documentation/kbuild/modules.txt> and
+ <file:Documentation/scsi/scsi.txt>. The module will be called ch.o.
+ If unsure, say N.
+
+config SCSI_ENCLOSURE
+ tristate "SCSI Enclosure Support"
+ depends on SCSI && ENCLOSURE_SERVICES
+ help
+ Enclosures are devices sitting on or in SCSI backplanes that
+ manage devices. If you have a disk cage, the chances are that
+ it has an enclosure device. Selecting this option will just allow
+ certain enclosure conditions to be reported and is not required.
+
+config SCSI_CONSTANTS
+ bool "Verbose SCSI error reporting (kernel size +=75K)"
+ depends on SCSI
+ help
+ The error messages regarding your SCSI hardware will be easier to
+ understand if you say Y here; it will enlarge your kernel by about
+ 75 KB. If in doubt, say Y.
+
+config SCSI_LOGGING
+ bool "SCSI logging facility"
+ depends on SCSI
+ ---help---
+ This turns on a logging facility that can be used to debug a number
+ of SCSI related problems.
+
+ If you say Y here, no logging output will appear by default, but you
+ can enable logging by saying Y to "/proc file system support" and
+ "Sysctl support" below and executing the command
+
+ echo <bitmask> > /proc/sys/dev/scsi/logging_level
+
+ where <bitmask> is a four byte value representing the logging type
+ and logging level for each type of logging selected.
+
+ There are a number of logging types and you can find them in the
+ source at <file:drivers/scsi/scsi_logging.h>. The logging levels
+ are also described in that file and they determine the verbosity of
+ the logging for each logging type.
+
+ If you say N here, it may be harder to track down some types of SCSI
+ problems. If you say Y here your kernel will be somewhat larger, but
+ there should be no noticeable performance impact as long as you have
+ logging turned off.
+
+config SCSI_SCAN_ASYNC
+ bool "Asynchronous SCSI scanning"
+ depends on SCSI
+ help
+ The SCSI subsystem can probe for devices while the rest of the
+ system continues booting, and even probe devices on different
+ busses in parallel, leading to a significant speed-up.
+
+ If you have built SCSI as modules, enabling this option can
+ be a problem as the devices may not have been found by the
+ time your system expects them to have been. You can load the
+ scsi_wait_scan module to ensure that all scans have completed.
+ If you build your SCSI drivers into the kernel, then everything
+ will work fine if you say Y here.
+
+ You can override this choice by specifying "scsi_mod.scan=sync"
+ or async on the kernel's command line.
+
+ Note that this setting also affects whether resuming from
+ system suspend will be performed asynchronously.
+
+menu "SCSI Transports"
+ depends on SCSI
+
+config SCSI_SPI_ATTRS
+ tristate "Parallel SCSI (SPI) Transport Attributes"
+ depends on SCSI
+ help
+ If you wish to export transport-specific information about
+ each attached SCSI device to sysfs, say Y. Otherwise, say N.
+
+config SCSI_FC_ATTRS
+ tristate "FiberChannel Transport Attributes"
+ depends on SCSI && NET
+ select SCSI_NETLINK
+ help
+ If you wish to export transport-specific information about
+ each attached FiberChannel device to sysfs, say Y.
+ Otherwise, say N.
+
+config SCSI_ISCSI_ATTRS
+ tristate "iSCSI Transport Attributes"
+ depends on SCSI && NET
+ select BLK_DEV_BSGLIB
+ help
+ If you wish to export transport-specific information about
+ each attached iSCSI device to sysfs, say Y.
+ Otherwise, say N.
+
+config SCSI_SAS_ATTRS
+ tristate "SAS Transport Attributes"
+ depends on SCSI
+ select BLK_DEV_BSG
+ help
+ If you wish to export transport-specific information about
+ each attached SAS device to sysfs, say Y.
+
+source "drivers/scsi/libsas/Kconfig"
+
+config SCSI_SRP_ATTRS
+ tristate "SRP Transport Attributes"
+ depends on SCSI
+ help
+ If you wish to export transport-specific information about
+ each attached SRP device to sysfs, say Y.
+
+endmenu
+
+menuconfig SCSI_LOWLEVEL
+ bool "SCSI low-level drivers"
+ depends on SCSI!=n
+ default y
+
+if SCSI_LOWLEVEL && SCSI
+
+config ISCSI_TCP
+ tristate "iSCSI Initiator over TCP/IP"
+ depends on SCSI && INET
+ select CRYPTO
+ select CRYPTO_MD5
+ select CRYPTO_CRC32C
+ select SCSI_ISCSI_ATTRS
+ help
+ The iSCSI Driver provides a host with the ability to access storage
+ through an IP network. The driver uses the iSCSI protocol to transport
+ SCSI requests and responses over a TCP/IP network between the host
+ (the "initiator") and "targets". Architecturally, the iSCSI driver
+ combines with the host's TCP/IP stack, network drivers, and Network
+ Interface Card (NIC) to provide the same functions as a SCSI or a
+ Fibre Channel (FC) adapter driver with a Host Bus Adapter (HBA).
+
+ To compile this driver as a module, choose M here: the
+ module will be called iscsi_tcp.
+
+ The userspace component needed to initialize the driver, documentation,
+ and sample configuration files can be found here:
+
+ http://open-iscsi.org
+
+config ISCSI_BOOT_SYSFS
+ tristate "iSCSI Boot Sysfs Interface"
+ default n
+ help
+ This option enables support for exposing iSCSI boot information
+ via sysfs to userspace. If you wish to export this information,
+ say Y. Otherwise, say N.
+
+source "drivers/scsi/cxgbi/Kconfig"
+source "drivers/scsi/bnx2i/Kconfig"
+source "drivers/scsi/bnx2fc/Kconfig"
+source "drivers/scsi/be2iscsi/Kconfig"
+
+config SGIWD93_SCSI
+ tristate "SGI WD93C93 SCSI Driver"
+ depends on SGI_HAS_WD93 && SCSI
+ help
+ If you have a Western Digital WD93 SCSI controller on
+ an SGI MIPS system, say Y. Otherwise, say N.
+
+config BLK_DEV_3W_XXXX_RAID
+ tristate "3ware 5/6/7/8xxx ATA-RAID support"
+ depends on PCI && SCSI
+ help
+ 3ware is the only hardware ATA-Raid product in Linux to date.
+ This card is 2,4, or 8 channel master mode support only.
+ SCSI support required!!!
+
+ <http://www.3ware.com/>
+
+ Please read the comments at the top of
+ <file:drivers/scsi/3w-xxxx.c>.
+
+config SCSI_HPSA
+ tristate "HP Smart Array SCSI driver"
+ depends on PCI && SCSI
+ select CHECK_SIGNATURE
+ help
+ This driver supports HP Smart Array Controllers (circa 2009).
+ It is a SCSI alternative to the cciss driver, which is a block
+ driver. Anyone wishing to use HP Smart Array controllers who
+ would prefer the devices be presented to linux as SCSI devices,
+ rather than as generic block devices should say Y here.
+
+config SCSI_3W_9XXX
+ tristate "3ware 9xxx SATA-RAID support"
+ depends on PCI && SCSI
+ help
+ This driver supports the 9000 series 3ware SATA-RAID cards.
+
+ <http://www.amcc.com>
+
+ Please read the comments at the top of
+ <file:drivers/scsi/3w-9xxx.c>.
+
+config SCSI_3W_SAS
+ tristate "3ware 97xx SAS/SATA-RAID support"
+ depends on PCI && SCSI
+ help
+ This driver supports the LSI 3ware 9750 6Gb/s SAS/SATA-RAID cards.
+
+ <http://www.lsi.com>
+
+ Please read the comments at the top of
+ <file:drivers/scsi/3w-sas.c>.
+
+config SCSI_7000FASST
+ tristate "7000FASST SCSI support"
+ depends on ISA && SCSI && ISA_DMA_API
+ select CHECK_SIGNATURE
+ help
+ This driver supports the Western Digital 7000 SCSI host adapter
+ family. Some information is in the source:
+ <file:drivers/scsi/wd7000.c>.
+
+ To compile this driver as a module, choose M here: the
+ module will be called wd7000.
+
+config SCSI_ACARD
+ tristate "ACARD SCSI support"
+ depends on PCI && SCSI
+ help
+ This driver supports the ACARD SCSI host adapter.
+ Support Chip <ATP870 ATP876 ATP880 ATP885>
+ To compile this driver as a module, choose M here: the
+ module will be called atp870u.
+
+config SCSI_AHA152X
+ tristate "Adaptec AHA152X/2825 support"
+ depends on ISA && SCSI
+ select SCSI_SPI_ATTRS
+ select CHECK_SIGNATURE
+ ---help---
+ This is a driver for the AHA-1510, AHA-1520, AHA-1522, and AHA-2825
+ SCSI host adapters. It also works for the AVA-1505, but the IRQ etc.
+ must be manually specified in this case.
+
+ It is explained in section 3.3 of the SCSI-HOWTO, available from
+ <http://www.tldp.org/docs.html#howto>. You might also want to
+ read the file <file:Documentation/scsi/aha152x.txt>.
+
+ To compile this driver as a module, choose M here: the
+ module will be called aha152x.
+
+config SCSI_AHA1542
+ tristate "Adaptec AHA1542 support"
+ depends on ISA && SCSI && ISA_DMA_API
+ ---help---
+ This is support for a SCSI host adapter. It is explained in section
+ 3.4 of the SCSI-HOWTO, available from
+ <http://www.tldp.org/docs.html#howto>. Note that Trantor was
+ purchased by Adaptec, and some former Trantor products are being
+ sold under the Adaptec name. If it doesn't work out of the box, you
+ may have to change some settings in <file:drivers/scsi/aha1542.h>.
+
+ To compile this driver as a module, choose M here: the
+ module will be called aha1542.
+
+config SCSI_AHA1740
+ tristate "Adaptec AHA1740 support"
+ depends on EISA && SCSI
+ ---help---
+ This is support for a SCSI host adapter. It is explained in section
+ 3.5 of the SCSI-HOWTO, available from
+ <http://www.tldp.org/docs.html#howto>. If it doesn't work out
+ of the box, you may have to change some settings in
+ <file:drivers/scsi/aha1740.h>.
+
+ To compile this driver as a module, choose M here: the
+ module will be called aha1740.
+
+config SCSI_AACRAID
+ tristate "Adaptec AACRAID support"
+ depends on SCSI && PCI
+ help
+ This driver supports a variety of Dell, HP, Adaptec, IBM and
+ ICP storage products. For a list of supported products, refer
+ to <file:Documentation/scsi/aacraid.txt>.
+
+ To compile this driver as a module, choose M here: the module
+ will be called aacraid.
+
+
+source "drivers/scsi/aic7xxx/Kconfig.aic7xxx"
+source "drivers/scsi/aic7xxx/Kconfig.aic79xx"
+source "drivers/scsi/aic94xx/Kconfig"
+source "drivers/scsi/mvsas/Kconfig"
+
+config SCSI_MVUMI
+ tristate "Marvell UMI driver"
+ depends on SCSI && PCI
+ help
+ Module for Marvell Universal Message Interface(UMI) driver
+
+ To compile this driver as a module, choose M here: the
+ module will be called mvumi.
+
+config SCSI_DPT_I2O
+ tristate "Adaptec I2O RAID support "
+ depends on SCSI && PCI && VIRT_TO_BUS
+ help
+ This driver supports all of Adaptec's I2O based RAID controllers as
+ well as the DPT SmartRaid V cards. This is an Adaptec maintained
+ driver by Deanna Bonds. See <file:Documentation/scsi/dpti.txt>.
+
+ To compile this driver as a module, choose M here: the
+ module will be called dpt_i2o.
+
+config SCSI_ADVANSYS
+ tristate "AdvanSys SCSI support"
+ depends on SCSI && VIRT_TO_BUS && !ARM
+ depends on ISA || EISA || PCI
+ help
+ This is a driver for all SCSI host adapters manufactured by
+ AdvanSys. It is documented in the kernel source in
+ <file:drivers/scsi/advansys.c>.
+
+ To compile this driver as a module, choose M here: the
+ module will be called advansys.
+
+config SCSI_IN2000
+ tristate "Always IN2000 SCSI support"
+ depends on ISA && SCSI
+ help
+ This is support for an ISA bus SCSI host adapter. You'll find more
+ information in <file:Documentation/scsi/in2000.txt>. If it doesn't work
+ out of the box, you may have to change the jumpers for IRQ or
+ address selection.
+
+ To compile this driver as a module, choose M here: the
+ module will be called in2000.
+
+config SCSI_ARCMSR
+ tristate "ARECA (ARC11xx/12xx/13xx/16xx) SATA/SAS RAID Host Adapter"
+ depends on PCI && SCSI
+ help
+ This driver supports all of ARECA's SATA/SAS RAID controller cards.
+ This is an ARECA-maintained driver by Erich Chen.
+ If you have any problems, please mail to: <erich@areca.com.tw>.
+ Areca supports Linux RAID config tools.
+ Please link <http://www.areca.com.tw>
+
+ To compile this driver as a module, choose M here: the
+ module will be called arcmsr (modprobe arcmsr).
+
+source "drivers/scsi/esas2r/Kconfig"
+source "drivers/scsi/megaraid/Kconfig.megaraid"
+source "drivers/scsi/mpt2sas/Kconfig"
+source "drivers/scsi/mpt3sas/Kconfig"
+source "drivers/scsi/ufs/Kconfig"
+
+config SCSI_HPTIOP
+ tristate "HighPoint RocketRAID 3xxx/4xxx Controller support"
+ depends on SCSI && PCI
+ help
+ This option enables support for HighPoint RocketRAID 3xxx/4xxx
+ controllers.
+
+ To compile this driver as a module, choose M here; the module
+ will be called hptiop. If unsure, say N.
+
+config SCSI_BUSLOGIC
+ tristate "BusLogic SCSI support"
+ depends on (PCI || ISA || MCA) && SCSI && ISA_DMA_API && VIRT_TO_BUS
+ ---help---
+ This is support for BusLogic MultiMaster and FlashPoint SCSI Host
+ Adapters. Consult the SCSI-HOWTO, available from
+ <http://www.tldp.org/docs.html#howto>, and the files
+ <file:Documentation/scsi/BusLogic.txt> and
+ <file:Documentation/scsi/FlashPoint.txt> for more information.
+ Note that support for FlashPoint is only available for 32-bit
+ x86 configurations.
+
+ To compile this driver as a module, choose M here: the
+ module will be called BusLogic.
+
+config SCSI_FLASHPOINT
+ bool "FlashPoint support"
+ depends on SCSI_BUSLOGIC && PCI
+ help
+ This option allows you to add FlashPoint support to the
+ BusLogic SCSI driver. The FlashPoint SCCB Manager code is
+ substantial, so users of MultiMaster Host Adapters may not
+ wish to include it.
+
+config VMWARE_PVSCSI
+ tristate "VMware PVSCSI driver support"
+ depends on PCI && SCSI && X86
+ help
+ This driver supports VMware's para virtualized SCSI HBA.
+ To compile this driver as a module, choose M here: the
+ module will be called vmw_pvscsi.
+
+config XEN_SCSI_FRONTEND
+ tristate "XEN SCSI frontend driver"
+ depends on SCSI && XEN
+ select XEN_XENBUS_FRONTEND
+ help
+ The XEN SCSI frontend driver allows the kernel to access SCSI Devices
+ within another guest OS (usually Dom0).
+ Only needed if the kernel is running in a XEN guest and generic
+ SCSI access to a device is needed.
+
+config HYPERV_STORAGE
+ tristate "Microsoft Hyper-V virtual storage driver"
+ depends on SCSI && HYPERV
+ default HYPERV
+ help
+ Select this option to enable the Hyper-V virtual storage driver.
+
+config LIBFC
+ tristate "LibFC module"
+ depends on SCSI_FC_ATTRS
+ select CRC32
+ ---help---
+ Fibre Channel library module
+
+config LIBFCOE
+ tristate "LibFCoE module"
+ depends on LIBFC
+ ---help---
+ Library for Fibre Channel over Ethernet module
+
+config FCOE
+ tristate "FCoE module"
+ depends on PCI
+ depends on LIBFCOE
+ ---help---
+ Fibre Channel over Ethernet module
+
+config FCOE_FNIC
+ tristate "Cisco FNIC Driver"
+ depends on PCI && X86
+ depends on LIBFCOE
+ help
+ This is support for the Cisco PCI-Express FCoE HBA.
+
+ To compile this driver as a module, choose M here and read
+ <file:Documentation/scsi/scsi.txt>.
+ The module will be called fnic.
+
+config SCSI_DMX3191D
+ tristate "DMX3191D SCSI support"
+ depends on PCI && SCSI
+ select SCSI_SPI_ATTRS
+ help
+ This is support for Domex DMX3191D SCSI Host Adapters.
+
+ To compile this driver as a module, choose M here: the
+ module will be called dmx3191d.
+
+config SCSI_DTC3280
+ tristate "DTC3180/3280 SCSI support"
+ depends on ISA && SCSI
+ select SCSI_SPI_ATTRS
+ select CHECK_SIGNATURE
+ help
+ This is support for DTC 3180/3280 SCSI Host Adapters. Please read
+ the SCSI-HOWTO, available from
+ <http://www.tldp.org/docs.html#howto>, and the file
+ <file:Documentation/scsi/dtc3x80.txt>.
+
+ To compile this driver as a module, choose M here: the
+ module will be called dtc.
+
+config SCSI_EATA
+ tristate "EATA ISA/EISA/PCI (DPT and generic EATA/DMA-compliant boards) support"
+ depends on (ISA || EISA || PCI) && SCSI && ISA_DMA_API
+ ---help---
+ This driver supports all EATA/DMA-compliant SCSI host adapters. DPT
+ ISA and all EISA I/O addresses are probed looking for the "EATA"
+ signature. The addresses of all the PCI SCSI controllers reported
+ by the PCI subsystem are probed as well.
+
+ You want to read the start of <file:drivers/scsi/eata.c> and the
+ SCSI-HOWTO, available from
+ <http://www.tldp.org/docs.html#howto>.
+
+ To compile this driver as a module, choose M here: the
+ module will be called eata.
+
+config SCSI_EATA_TAGGED_QUEUE
+ bool "enable tagged command queueing"
+ depends on SCSI_EATA
+ help
+ This is a feature of SCSI-2 which improves performance: the host
+ adapter can send several SCSI commands to a device's queue even if
+ previous commands haven't finished yet.
+ This is equivalent to the "eata=tc:y" boot option.
+
+config SCSI_EATA_LINKED_COMMANDS
+ bool "enable elevator sorting"
+ depends on SCSI_EATA
+ help
+ This option enables elevator sorting for all probed SCSI disks and
+ CD-ROMs. It definitely reduces the average seek distance when doing
+ random seeks, but this does not necessarily result in a noticeable
+ performance improvement: your mileage may vary...
+ This is equivalent to the "eata=lc:y" boot option.
+
+config SCSI_EATA_MAX_TAGS
+ int "maximum number of queued commands"
+ depends on SCSI_EATA
+ default "16"
+ help
+ This specifies how many SCSI commands can be maximally queued for
+ each probed SCSI device. You should reduce the default value of 16
+ only if you have disks with buggy or limited tagged command support.
+ Minimum is 2 and maximum is 62. This value is also the window size
+ used by the elevator sorting option above. The effective value used
+ by the driver for each probed SCSI device is reported at boot time.
+ This is equivalent to the "eata=mq:8" boot option.
+
+config SCSI_EATA_PIO
+ tristate "EATA-PIO (old DPT PM2001, PM2012A) support"
+ depends on (ISA || EISA || PCI) && SCSI && BROKEN
+ ---help---
+ This driver supports all EATA-PIO protocol compliant SCSI Host
+ Adapters like the DPT PM2001 and the PM2012A. EATA-DMA compliant
+ host adapters could also use this driver but are discouraged from
+ doing so, since this driver only supports hard disks and lacks
+ numerous features. You might want to have a look at the SCSI-HOWTO,
+ available from <http://www.tldp.org/docs.html#howto>.
+
+ To compile this driver as a module, choose M here: the
+ module will be called eata_pio.
+
+config SCSI_FUTURE_DOMAIN
+ tristate "Future Domain 16xx SCSI/AHA-2920A support"
+ depends on (ISA || PCI) && SCSI
+ select CHECK_SIGNATURE
+ ---help---
+ This is support for Future Domain's 16-bit SCSI host adapters
+ (TMC-1660/1680, TMC-1650/1670, TMC-3260, TMC-1610M/MER/MEX) and
+ other adapters based on the Future Domain chipsets (Quantum
+ ISA-200S, ISA-250MG; Adaptec AHA-2920A; and at least one IBM board).
+ It is explained in section 3.7 of the SCSI-HOWTO, available from
+ <http://www.tldp.org/docs.html#howto>.
+
+ NOTE: Newer Adaptec AHA-2920C boards use the Adaptec AIC-7850 chip
+ and should use the aic7xxx driver ("Adaptec AIC7xxx chipset SCSI
+ controller support"). This Future Domain driver works with the older
+ Adaptec AHA-2920A boards with a Future Domain chip on them.
+
+ To compile this driver as a module, choose M here: the
+ module will be called fdomain.
+
+config SCSI_GDTH
+ tristate "Intel/ICP (former GDT SCSI Disk Array) RAID Controller support"
+ depends on (ISA || EISA || PCI) && SCSI && ISA_DMA_API
+ ---help---
+ Formerly called GDT SCSI Disk Array Controller Support.
+
+ This is a driver for RAID/SCSI Disk Array Controllers (EISA/ISA/PCI)
+ manufactured by Intel Corporation/ICP vortex GmbH. It is documented
+ in the kernel source in <file:drivers/scsi/gdth.c> and
+ <file:drivers/scsi/gdth.h>.
+
+ To compile this driver as a module, choose M here: the
+ module will be called gdth.
+
+config SCSI_ISCI
+ tristate "Intel(R) C600 Series Chipset SAS Controller"
+ depends on PCI && SCSI
+ depends on X86
+ select SCSI_SAS_LIBSAS
+ ---help---
+ This driver supports the 6Gb/s SAS capabilities of the storage
+ control unit found in the Intel(R) C600 series chipset.
+
+config SCSI_GENERIC_NCR5380
+ tristate "Generic NCR5380/53c400 SCSI PIO support"
+ depends on ISA && SCSI
+ select SCSI_SPI_ATTRS
+ ---help---
+ This is a driver for the old NCR 53c80 series of SCSI controllers
+ on boards using PIO. Most boards such as the Trantor T130 fit this
+ category, along with a large number of ISA 8bit controllers shipped
+ for free with SCSI scanners. If you have a PAS16, T128 or DMX3191
+ you should select the specific driver for that card rather than
+ generic 5380 support.
+
+ It is explained in section 3.8 of the SCSI-HOWTO, available from
+ <http://www.tldp.org/docs.html#howto>. If it doesn't work out
+ of the box, you may have to change some settings in
+ <file:drivers/scsi/g_NCR5380.h>.
+
+ To compile this driver as a module, choose M here: the
+ module will be called g_NCR5380.
+
+config SCSI_GENERIC_NCR5380_MMIO
+ tristate "Generic NCR5380/53c400 SCSI MMIO support"
+ depends on ISA && SCSI
+ select SCSI_SPI_ATTRS
+ ---help---
+ This is a driver for the old NCR 53c80 series of SCSI controllers
+ on boards using memory mapped I/O.
+ It is explained in section 3.8 of the SCSI-HOWTO, available from
+ <http://www.tldp.org/docs.html#howto>. If it doesn't work out
+ of the box, you may have to change some settings in
+ <file:drivers/scsi/g_NCR5380.h>.
+
+ To compile this driver as a module, choose M here: the
+ module will be called g_NCR5380_mmio.
+
+config SCSI_GENERIC_NCR53C400
+ bool "Enable NCR53c400 extensions"
+ depends on SCSI_GENERIC_NCR5380
+ help
+ This enables certain optimizations for the NCR53c400 SCSI cards.
+ You might as well try it out. Note that this driver will only probe
+ for the Trantor T130B in its default configuration; you might have
+ to pass a command line option to the kernel at boot time if it does
+ not detect your card. See the file
+ <file:Documentation/scsi/g_NCR5380.txt> for details.
+
+config SCSI_IPS
+ tristate "IBM ServeRAID support"
+ depends on PCI && SCSI
+ ---help---
+ This is support for the IBM ServeRAID hardware RAID controllers.
+ See <http://www.developer.ibm.com/welcome/netfinity/serveraid.html>
+ and <http://www-947.ibm.com/support/entry/portal/docdisplay?brand=5000008&lndocid=SERV-RAID>
+ for more information. If this driver does not work correctly
+ without modification please contact the author by email at
+ <ipslinux@adaptec.com>.
+
+ To compile this driver as a module, choose M here: the
+ module will be called ips.
+
+config SCSI_IBMVSCSI
+ tristate "IBM Virtual SCSI support"
+ depends on PPC_PSERIES
+ select SCSI_SRP_ATTRS
+ help
+ This is the IBM POWER Virtual SCSI Client
+
+ To compile this driver as a module, choose M here: the
+ module will be called ibmvscsi.
+
+config SCSI_IBMVFC
+ tristate "IBM Virtual FC support"
+ depends on PPC_PSERIES && SCSI
+ depends on SCSI_FC_ATTRS
+ help
+ This is the IBM POWER Virtual FC Client
+
+ To compile this driver as a module, choose M here: the
+ module will be called ibmvfc.
+
+config SCSI_IBMVFC_TRACE
+ bool "enable driver internal trace"
+ depends on SCSI_IBMVFC
+ default y
+ help
+ If you say Y here, the driver will trace all commands issued
+ to the adapter. Performance impact is minimal. Trace can be
+ dumped using /sys/class/scsi_host/hostXX/trace.
+
+config SCSI_INITIO
+ tristate "Initio 9100U(W) support"
+ depends on PCI && SCSI
+ help
+ This is support for the Initio 91XXU(W) SCSI host adapter. Please
+ read the SCSI-HOWTO, available from
+ <http://www.tldp.org/docs.html#howto>.
+
+ To compile this driver as a module, choose M here: the
+ module will be called initio.
+
+config SCSI_INIA100
+ tristate "Initio INI-A100U2W support"
+ depends on PCI && SCSI
+ help
+ This is support for the Initio INI-A100U2W SCSI host adapter.
+ Please read the SCSI-HOWTO, available from
+ <http://www.tldp.org/docs.html#howto>.
+
+ To compile this driver as a module, choose M here: the
+ module will be called a100u2w.
+
+config SCSI_PPA
+ tristate "IOMEGA parallel port (ppa - older drives)"
+ depends on SCSI && PARPORT_PC
+ ---help---
+ This driver supports older versions of IOMEGA's parallel port ZIP
+ drive (a 100 MB removable media device).
+
+ Note that you can say N here if you have the SCSI version of the ZIP
+ drive: it will be supported automatically if you said Y to the
+ generic "SCSI disk support", above.
+
+ If you have the ZIP Plus drive or a more recent parallel port ZIP
+ drive (if the supplied cable with the drive is labeled "AutoDetect")
+ then you should say N here and Y to "IOMEGA parallel port (imm -
+ newer drives)", below.
+
+ For more information about this driver and how to use it you should
+ read the file <file:Documentation/scsi/ppa.txt>. You should also read
+ the SCSI-HOWTO, which is available from
+ <http://www.tldp.org/docs.html#howto>. If you use this driver,
+ you will still be able to use the parallel port for other tasks,
+ such as a printer; it is safe to compile both drivers into the
+ kernel.
+
+ To compile this driver as a module, choose M here: the
+ module will be called ppa.
+
+config SCSI_IMM
+ tristate "IOMEGA parallel port (imm - newer drives)"
+ depends on SCSI && PARPORT_PC
+ ---help---
+ This driver supports newer versions of IOMEGA's parallel port ZIP
+ drive (a 100 MB removable media device).
+
+ Note that you can say N here if you have the SCSI version of the ZIP
+ drive: it will be supported automatically if you said Y to the
+ generic "SCSI disk support", above.
+
+ If you have the ZIP Plus drive or a more recent parallel port ZIP
+ drive (if the supplied cable with the drive is labeled "AutoDetect")
+ then you should say Y here; if you have an older ZIP drive, say N
+ here and Y to "IOMEGA Parallel Port (ppa - older drives)", above.
+
+ For more information about this driver and how to use it you should
+ read the file <file:Documentation/scsi/ppa.txt>. You should also read
+ the SCSI-HOWTO, which is available from
+ <http://www.tldp.org/docs.html#howto>. If you use this driver,
+ you will still be able to use the parallel port for other tasks,
+ such as a printer; it is safe to compile both drivers into the
+ kernel.
+
+ To compile this driver as a module, choose M here: the
+ module will be called imm.
+
+config SCSI_IZIP_EPP16
+ bool "ppa/imm option - Use slow (but safe) EPP-16"
+ depends on SCSI_PPA || SCSI_IMM
+ ---help---
+ EPP (Enhanced Parallel Port) is a standard for parallel ports which
+ allows them to act as expansion buses that can handle up to 64
+ peripheral devices.
+
+ Some parallel port chipsets are slower than their motherboard, and
+ so we have to control the state of the chipset's FIFO queue every
+ now and then to avoid data loss. This will be done if you say Y
+ here.
+
+ Generally, saying Y is the safe option and slows things down a bit.
+
+config SCSI_IZIP_SLOW_CTR
+ bool "ppa/imm option - Assume slow parport control register"
+ depends on SCSI_PPA || SCSI_IMM
+ help
+ Some parallel ports are known to have excessive delays between
+ changing the parallel port control register and good data being
+ available on the parallel port data/status register. This option
+ forces a small delay (1.0 usec to be exact) after changing the
+ control register to let things settle out. Enabling this option may
+ result in a big drop in performance but some very old parallel ports
+ (found in 386 vintage machines) will not work properly.
+
+ Generally, saying N is fine.
+
+config SCSI_NCR53C406A
+ tristate "NCR53c406a SCSI support"
+ depends on ISA && SCSI
+ help
+ This is support for the NCR53c406a SCSI host adapter. For user
+ configurable parameters, check out <file:drivers/scsi/NCR53c406a.c>
+ in the kernel source. Also read the SCSI-HOWTO, available from
+ <http://www.tldp.org/docs.html#howto>.
+
+ To compile this driver as a module, choose M here: the
+ module will be called NCR53c406.
+
+config SCSI_NCR_D700
+ tristate "NCR Dual 700 MCA SCSI support"
+ depends on MCA && SCSI
+ select SCSI_SPI_ATTRS
+ help
+ This is a driver for the MicroChannel Dual 700 card produced by
+ NCR and commonly used in 345x/35xx/4100 class machines. It always
+ tries to negotiate sync and uses tag command queueing.
+
+ Unless you have an NCR manufactured machine, the chances are that
+ you do not have this SCSI card, so say N.
+
+config SCSI_LASI700
+ tristate "HP Lasi SCSI support for 53c700/710"
+ depends on GSC && SCSI
+ select SCSI_SPI_ATTRS
+ help
+ This is a driver for the SCSI controller in the Lasi chip found in
+ many PA-RISC workstations & servers. If you do not know whether you
+ have a Lasi chip, it is safe to say "Y" here.
+
+config SCSI_SNI_53C710
+ tristate "SNI RM SCSI support for 53c710"
+ depends on SNI_RM && SCSI
+ select SCSI_SPI_ATTRS
+ select 53C700_LE_ON_BE
+ help
+ This is a driver for the onboard SCSI controller found in older
+ SNI RM workstations & servers.
+
+config 53C700_LE_ON_BE
+ bool
+ depends on SCSI_LASI700
+ default y
+
+config SCSI_STEX
+ tristate "Promise SuperTrak EX Series support"
+ depends on PCI && SCSI
+ ---help---
+ This driver supports Promise SuperTrak EX series storage controllers.
+
+ Promise provides Linux RAID configuration utility for these
+ controllers. Please visit <http://www.promise.com> to download.
+
+ To compile this driver as a module, choose M here: the
+ module will be called stex.
+
+config 53C700_BE_BUS
+ bool
+ depends on SCSI_A4000T || SCSI_ZORRO7XX || MVME16x_SCSI || BVME6000_SCSI
+ default y
+
+config SCSI_SYM53C8XX_2
+ tristate "SYM53C8XX Version 2 SCSI support"
+ depends on PCI && SCSI
+ select SCSI_SPI_ATTRS
+ ---help---
+ This driver supports the whole NCR53C8XX/SYM53C8XX family of
+ PCI-SCSI controllers. It also supports the subset of LSI53C10XX
+ Ultra-160 controllers that are based on the SYM53C8XX SCRIPTS
+ language. It does not support LSI53C10XX Ultra-320 PCI-X SCSI
+ controllers; you need to use the Fusion MPT driver for that.
+
+ Please read <file:Documentation/scsi/sym53c8xx_2.txt> for more
+ information.
+
+config SCSI_SYM53C8XX_DMA_ADDRESSING_MODE
+ int "DMA addressing mode"
+ depends on SCSI_SYM53C8XX_2
+ default "1"
+ ---help---
+ This option only applies to PCI-SCSI chips that are PCI DAC
+ capable (875A, 895A, 896, 1010-33, 1010-66, 1000).
+
+ When set to 0, the driver will program the chip to only perform
+ 32-bit DMA. When set to 1, the chip will be able to perform DMA
+ to addresses up to 1TB. When set to 2, the driver supports the
+ full 64-bit DMA address range, but can only address 16 segments
+ of 4 GB each. This limits the total addressable range to 64 GB.
+
+ Most machines with less than 4GB of memory should use a setting
+ of 0 for best performance. If your machine has 4GB of memory
+ or more, you should set this option to 1 (the default).
+
+ The still experimental value 2 (64 bit DMA addressing with 16
+ x 4GB segments limitation) can be used on systems that require
+ PCI address bits past bit 39 to be set for the addressing of
+ memory using PCI DAC cycles.
+
+config SCSI_SYM53C8XX_DEFAULT_TAGS
+ int "Default tagged command queue depth"
+ depends on SCSI_SYM53C8XX_2
+ default "16"
+ help
+ This is the default value of the command queue depth the
+ driver will announce to the generic SCSI layer for devices
+ that support tagged command queueing. This value can be changed
+ from the boot command line. This is a soft limit that cannot
+ exceed CONFIG_SCSI_SYM53C8XX_MAX_TAGS.
+
+config SCSI_SYM53C8XX_MAX_TAGS
+ int "Maximum number of queued commands"
+ depends on SCSI_SYM53C8XX_2
+ default "64"
+ help
+ This option allows you to specify the maximum number of commands
+ that can be queued to any device, when tagged command queuing is
+ possible. The driver supports up to 256 queued commands per device.
+ This value is used as a compiled-in hard limit.
+
+config SCSI_SYM53C8XX_MMIO
+ bool "Use memory mapped IO"
+ depends on SCSI_SYM53C8XX_2
+ default y
+ help
+ Memory mapped IO is faster than Port IO. Most people should
+ answer Y here, but some machines may have problems. If you have
+ to answer N here, please report the problem to the maintainer.
+
+config SCSI_IPR
+ tristate "IBM Power Linux RAID adapter support"
+ depends on PCI && SCSI && ATA
+ select FW_LOADER
+ ---help---
+ This driver supports the IBM Power Linux family RAID adapters.
+ This includes IBM pSeries 5712, 5703, 5709, and 570A, as well
+ as IBM iSeries 5702, 5703, 5709, and 570A.
+
+config SCSI_IPR_TRACE
+ bool "enable driver internal trace"
+ depends on SCSI_IPR
+ default y
+ help
+ If you say Y here, the driver will trace all commands issued
+ to the adapter. Performance impact is minimal. Trace can be
+ dumped using /sys/bus/class/scsi_host/hostXX/trace.
+
+config SCSI_IPR_DUMP
+ bool "enable adapter dump support"
+ depends on SCSI_IPR
+ default y
+ help
+ If you say Y here, the driver will support adapter crash dump.
+ If you enable this support, the iprdump daemon can be used
+ to capture adapter failure analysis information.
+
+config SCSI_ZALON
+ tristate "Zalon SCSI support"
+ depends on GSC && SCSI
+ select SCSI_SPI_ATTRS
+ help
+ The Zalon is a GSC/HSC bus interface chip that sits between the
+ PA-RISC processor and the NCR 53c720 SCSI controller on C100,
+ C110, J200, J210 and some D, K & R-class machines. It's also
+ used on the add-in Bluefish, Barracuda & Shrike SCSI cards.
+ Say Y here if you have one of these machines or cards.
+
+config SCSI_NCR_Q720
+ tristate "NCR Quad 720 MCA SCSI support"
+ depends on MCA && SCSI
+ select SCSI_SPI_ATTRS
+ help
+ This is a driver for the MicroChannel Quad 720 card produced by
+ NCR and commonly used in 345x/35xx/4100 class machines. It always
+ tries to negotiate sync and uses tag command queueing.
+
+ Unless you have an NCR manufactured machine, the chances are that
+ you do not have this SCSI card, so say N.
+
+config SCSI_NCR53C8XX_DEFAULT_TAGS
+ int "default tagged command queue depth"
+ depends on SCSI_ZALON || SCSI_NCR_Q720
+ default "8"
+ ---help---
+ "Tagged command queuing" is a feature of SCSI-2 which improves
+ performance: the host adapter can send several SCSI commands to a
+ device's queue even if previous commands haven't finished yet.
+ Because the device is intelligent, it can optimize its operations
+ (like head positioning) based on its own request queue. Some SCSI
+ devices don't implement this properly; if you want to disable this
+ feature, enter 0 or 1 here (it doesn't matter which).
+
+ The default value is 8 and should be supported by most hard disks.
+ This value can be overridden from the boot command line using the
+ 'tags' option as follows (example):
+ 'ncr53c8xx=tags:4/t2t3q16/t0u2q10' will set default queue depth to
+ 4, set queue depth to 16 for target 2 and target 3 on controller 0
+ and set queue depth to 10 for target 0 / lun 2 on controller 1.
+
+ The normal answer therefore is to go with the default 8 and to use
+ a boot command line option for devices that need to use a different
+ command queue depth.
+
+ There is no safe option other than using good SCSI devices.
+
+config SCSI_NCR53C8XX_MAX_TAGS
+ int "maximum number of queued commands"
+ depends on SCSI_ZALON || SCSI_NCR_Q720
+ default "32"
+ ---help---
+ This option allows you to specify the maximum number of commands
+ that can be queued to any device, when tagged command queuing is
+ possible. The default value is 32. Minimum is 2, maximum is 64.
+ Modern hard disks are able to support 64 tags and even more, but
+ do not seem to be faster when more than 32 tags are being used.
+
+ So, the normal answer here is to go with the default value 32 unless
+ you are using very large hard disks with large cache (>= 1 MB) that
+ are able to take advantage of more than 32 tagged commands.
+
+ There is no safe option and the default answer is recommended.
+
+config SCSI_NCR53C8XX_SYNC
+ int "synchronous transfers frequency in MHz"
+ depends on SCSI_ZALON || SCSI_NCR_Q720
+ default "20"
+ ---help---
+ The SCSI Parallel Interface-2 Standard defines 5 classes of transfer
+ rates: FAST-5, FAST-10, FAST-20, FAST-40 and FAST-80. The numbers
+ are respectively the maximum data transfer rates in mega-transfers
+ per second for each class. For example, a FAST-20 Wide 16 device is
+ able to transfer data at 20 million 16 bit packets per second for a
+ total rate of 40 MB/s.
+
+ You may specify 0 if you want to only use asynchronous data
+ transfers. This is the safest and slowest option. Otherwise, specify
+ a value between 5 and 80, depending on the capability of your SCSI
+ controller. The higher the number, the faster the data transfer.
+ Note that 80 should normally be ok since the driver decreases the
+ value automatically according to the controller's capabilities.
+
+ Your answer to this question is ignored for controllers with NVRAM,
+ since the driver will get this information from the user set-up. It
+ also can be overridden using a boot setup option, as follows
+ (example): 'ncr53c8xx=sync:12' will allow the driver to negotiate
+ for FAST-20 synchronous data transfer (20 mega-transfers per
+ second).
+
+ The normal answer therefore is not to go with the default but to
+ select the maximum value 80 allowing the driver to use the maximum
+ value supported by each controller. If this causes problems with
+ your SCSI devices, you should come back and decrease the value.
+
+ There is no safe option other than using good cabling, right
+ terminations and SCSI conformant devices.
+
+config SCSI_NCR53C8XX_NO_DISCONNECT
+ bool "not allow targets to disconnect"
+ depends on (SCSI_ZALON || SCSI_NCR_Q720) && SCSI_NCR53C8XX_DEFAULT_TAGS=0
+ help
+ This option is only provided for safety if you suspect some SCSI
+ device of yours to not support properly the target-disconnect
+ feature. In that case, you would say Y here. In general however, to
+ not allow targets to disconnect is not reasonable if there is more
+ than 1 device on a SCSI bus. The normal answer therefore is N.
+
+config SCSI_PAS16
+ tristate "PAS16 SCSI support"
+ depends on ISA && SCSI
+ select SCSI_SPI_ATTRS
+ ---help---
+ This is support for a SCSI host adapter. It is explained in section
+ 3.10 of the SCSI-HOWTO, available from
+ <http://www.tldp.org/docs.html#howto>. If it doesn't work out
+ of the box, you may have to change some settings in
+ <file:drivers/scsi/pas16.h>.
+
+ To compile this driver as a module, choose M here: the
+ module will be called pas16.
+
+config SCSI_QLOGIC_FAS
+ tristate "Qlogic FAS SCSI support"
+ depends on ISA && SCSI
+ ---help---
+ This is a driver for the ISA, VLB, and PCMCIA versions of the Qlogic
+ FastSCSI! cards as well as any other card based on the FASXX chip
+ (including the Control Concepts SCSI/IDE/SIO/PIO/FDC cards).
+
+ This driver does NOT support the PCI versions of these cards. The
+ PCI versions are supported by the Qlogic ISP driver ("Qlogic ISP
+ SCSI support"), below.
+
+ Information about this driver is contained in
+ <file:Documentation/scsi/qlogicfas.txt>. You should also read the
+ SCSI-HOWTO, available from
+ <http://www.tldp.org/docs.html#howto>.
+
+ To compile this driver as a module, choose M here: the
+ module will be called qlogicfas.
+
+config SCSI_QLOGIC_1280
+ tristate "Qlogic QLA 1240/1x80/1x160 SCSI support"
+ depends on PCI && SCSI
+ help
+ Say Y if you have a QLogic ISP1240/1x80/1x160 SCSI host adapter.
+
+ To compile this driver as a module, choose M here: the
+ module will be called qla1280.
+
+config SCSI_QLOGICPTI
+ tristate "PTI Qlogic, ISP Driver"
+ depends on SBUS && SCSI
+ help
+ This driver supports SBUS SCSI controllers from PTI or QLogic. These
+ controllers are known under Solaris as qpti and in the openprom as
+ PTI,ptisp or QLGC,isp. Note that PCI QLogic SCSI controllers are
+ driven by a different driver.
+
+ To compile this driver as a module, choose M here: the
+ module will be called qlogicpti.
+
+source "drivers/scsi/qla2xxx/Kconfig"
+source "drivers/scsi/qla4xxx/Kconfig"
+
+config SCSI_LPFC
+ tristate "Emulex LightPulse Fibre Channel Support"
+ depends on PCI && SCSI
+ depends on SCSI_FC_ATTRS
+ select CRC_T10DIF
+ help
+ This lpfc driver supports the Emulex LightPulse
+ Family of Fibre Channel PCI host adapters.
+
+config SCSI_LPFC_DEBUG_FS
+ bool "Emulex LightPulse Fibre Channel debugfs Support"
+ depends on SCSI_LPFC && DEBUG_FS
+ help
+ This makes debugging information from the lpfc driver
+ available via the debugfs filesystem.
+
+config SCSI_SIM710
+ tristate "Simple 53c710 SCSI support (Compaq, NCR machines)"
+ depends on (EISA || MCA) && SCSI
+ select SCSI_SPI_ATTRS
+ ---help---
+ This driver is for NCR53c710 based SCSI host adapters.
+
+ It currently supports Compaq EISA cards and NCR MCA cards
+
+config SCSI_SYM53C416
+ tristate "Symbios 53c416 SCSI support"
+ depends on ISA && SCSI
+ ---help---
+ This is support for the sym53c416 SCSI host adapter, the SCSI
+ adapter that comes with some HP scanners. This driver requires that
+ the sym53c416 is configured first using some sort of PnP
+ configuration program (e.g. isapnp) or by a PnP aware BIOS. If you
+ are using isapnp then you need to compile this driver as a module
+ and then load it using insmod after isapnp has run. The parameters
+ of the configured card(s) should be passed to the driver. The format
+ is:
+
+ insmod sym53c416 sym53c416=<base>,<irq> [sym53c416_1=<base>,<irq>]
+
+ To compile this driver as a module, choose M here: the
+ module will be called sym53c416.
+
+config SCSI_DC395x
+ tristate "Tekram DC395(U/UW/F) and DC315(U) SCSI support"
+ depends on PCI && SCSI
+ ---help---
+ This driver supports PCI SCSI host adapters based on the ASIC
+ TRM-S1040 chip, e.g Tekram DC395(U/UW/F) and DC315(U) variants.
+
+ This driver works, but is still in experimental status. So better
+ have a bootable disk and a backup in case of emergency.
+
+ Documentation can be found in <file:Documentation/scsi/dc395x.txt>.
+
+ To compile this driver as a module, choose M here: the
+ module will be called dc395x.
+
+config SCSI_AM53C974
+ tristate "Tekram DC390(T) and Am53/79C974 SCSI support (new driver)"
+ depends on PCI && SCSI
+ select SCSI_SPI_ATTRS
+ ---help---
+ This driver supports PCI SCSI host adapters based on the Am53C974A
+ chip, e.g. Tekram DC390(T), DawiControl 2974 and some onboard
+ PCscsi/PCnet (Am53/79C974) solutions.
+ This is a new implementation base on the generic esp_scsi driver.
+
+ Documentation can be found in <file:Documentation/scsi/tmscsim.txt>.
+
+ Note that this driver does NOT support Tekram DC390W/U/F, which are
+ based on NCR/Symbios chips. Use "NCR53C8XX SCSI support" for those.
+
+ To compile this driver as a module, choose M here: the
+ module will be called am53c974.
+
+config SCSI_T128
+ tristate "Trantor T128/T128F/T228 SCSI support"
+ depends on ISA && SCSI
+ select SCSI_SPI_ATTRS
+ select CHECK_SIGNATURE
+ ---help---
+ This is support for a SCSI host adapter. It is explained in section
+ 3.11 of the SCSI-HOWTO, available from
+ <http://www.tldp.org/docs.html#howto>. If it doesn't work out
+ of the box, you may have to change some settings in
+ <file:drivers/scsi/t128.h>. Note that Trantor was purchased by
+ Adaptec, and some former Trantor products are being sold under the
+ Adaptec name.
+
+ To compile this driver as a module, choose M here: the
+ module will be called t128.
+
+config SCSI_U14_34F
+ tristate "UltraStor 14F/34F support"
+ depends on ISA && SCSI && ISA_DMA_API
+ ---help---
+ This is support for the UltraStor 14F and 34F SCSI-2 host adapters.
+ The source at <file:drivers/scsi/u14-34f.c> contains some
+ information about this hardware. If the driver doesn't work out of
+ the box, you may have to change some settings in
+ <file: drivers/scsi/u14-34f.c>. Read the SCSI-HOWTO, available from
+ <http://www.tldp.org/docs.html#howto>. Note that there is also
+ another driver for the same hardware: "UltraStor SCSI support",
+ below. You should say Y to both only if you want 24F support as
+ well.
+
+ To compile this driver as a module, choose M here: the
+ module will be called u14-34f.
+
+config SCSI_U14_34F_TAGGED_QUEUE
+ bool "enable tagged command queueing"
+ depends on SCSI_U14_34F
+ help
+ This is a feature of SCSI-2 which improves performance: the host
+ adapter can send several SCSI commands to a device's queue even if
+ previous commands haven't finished yet.
+ This is equivalent to the "u14-34f=tc:y" boot option.
+
+config SCSI_U14_34F_LINKED_COMMANDS
+ bool "enable elevator sorting"
+ depends on SCSI_U14_34F
+ help
+ This option enables elevator sorting for all probed SCSI disks and
+ CD-ROMs. It definitely reduces the average seek distance when doing
+ random seeks, but this does not necessarily result in a noticeable
+ performance improvement: your mileage may vary...
+ This is equivalent to the "u14-34f=lc:y" boot option.
+
+config SCSI_U14_34F_MAX_TAGS
+ int "maximum number of queued commands"
+ depends on SCSI_U14_34F
+ default "8"
+ help
+ This specifies how many SCSI commands can be maximally queued for
+ each probed SCSI device. You should reduce the default value of 8
+ only if you have disks with buggy or limited tagged command support.
+ Minimum is 2 and maximum is 14. This value is also the window size
+ used by the elevator sorting option above. The effective value used
+ by the driver for each probed SCSI device is reported at boot time.
+ This is equivalent to the "u14-34f=mq:8" boot option.
+
+config SCSI_ULTRASTOR
+ tristate "UltraStor SCSI support"
+ depends on X86 && ISA && SCSI
+ ---help---
+ This is support for the UltraStor 14F, 24F and 34F SCSI-2 host
+ adapter family. This driver is explained in section 3.12 of the
+ SCSI-HOWTO, available from
+ <http://www.tldp.org/docs.html#howto>. If it doesn't work out
+ of the box, you may have to change some settings in
+ <file:drivers/scsi/ultrastor.h>.
+
+ Note that there is also another driver for the same hardware:
+ "UltraStor 14F/34F support", above.
+
+ To compile this driver as a module, choose M here: the
+ module will be called ultrastor.
+
+config SCSI_NSP32
+ tristate "Workbit NinjaSCSI-32Bi/UDE support"
+ depends on PCI && SCSI && !64BIT
+ help
+ This is support for the Workbit NinjaSCSI-32Bi/UDE PCI/Cardbus
+ SCSI host adapter. Please read the SCSI-HOWTO, available from
+ <http://www.tldp.org/docs.html#howto>.
+
+ To compile this driver as a module, choose M here: the
+ module will be called nsp32.
+
+config SCSI_WD719X
+ tristate "Western Digital WD7193/7197/7296 support"
+ depends on PCI && SCSI
+ select EEPROM_93CX6
+ ---help---
+ This is a driver for Western Digital WD7193, WD7197 and WD7296 PCI
+ SCSI controllers (based on WD33C296A chip).
+
+config SCSI_DEBUG
+ tristate "SCSI debugging host and device simulator"
+ depends on SCSI
+ select CRC_T10DIF
+ help
+ This pseudo driver simulates one or more hosts (SCSI initiators),
+ each with one or more targets, each with one or more logical units.
+ Defaults to one of each, creating a small RAM disk device. Many
+ parameters found in the /sys/bus/pseudo/drivers/scsi_debug
+ directory can be tweaked at run time.
+ See <http://sg.danny.cz/sg/sdebug26.html> for more information.
+ Mainly used for testing and best as a module. If unsure, say N.
+
+config SCSI_MESH
+ tristate "MESH (Power Mac internal SCSI) support"
+ depends on PPC32 && PPC_PMAC && SCSI
+ help
+ Many Power Macintoshes and clones have a MESH (Macintosh Enhanced
+ SCSI Hardware) SCSI bus adaptor (the 7200 doesn't, but all of the
+ other Power Macintoshes do). Say Y to include support for this SCSI
+ adaptor.
+
+ To compile this driver as a module, choose M here: the
+ module will be called mesh.
+
+config SCSI_MESH_SYNC_RATE
+ int "maximum synchronous transfer rate (MB/s) (0 = async)"
+ depends on SCSI_MESH
+ default "5"
+ help
+ On Power Macintoshes (and clones) where the MESH SCSI bus adaptor
+ drives a bus which is entirely internal to the machine (such as the
+ 7500, 7600, 8500, etc.), the MESH is capable of synchronous
+ operation at up to 10 MB/s. On machines where the SCSI bus
+ controlled by the MESH can have external devices connected, it is
+ usually rated at 5 MB/s. 5 is a safe value here unless you know the
+ MESH SCSI bus is internal only; in that case you can say 10. Say 0
+ to disable synchronous operation.
+
+config SCSI_MESH_RESET_DELAY_MS
+ int "initial bus reset delay (ms) (0 = no reset)"
+ depends on SCSI_MESH
+ default "4000"
+
+config SCSI_MAC53C94
+ tristate "53C94 (Power Mac external SCSI) support"
+ depends on PPC32 && PPC_PMAC && SCSI
+ help
+ On Power Macintoshes (and clones) with two SCSI buses, the external
+ SCSI bus is usually controlled by a 53C94 SCSI bus adaptor. Older
+ machines which only have one SCSI bus, such as the 7200, also use
+ the 53C94. Say Y to include support for the 53C94.
+
+ To compile this driver as a module, choose M here: the
+ module will be called mac53c94.
+
+source "drivers/scsi/arm/Kconfig"
+
+config JAZZ_ESP
+ bool "MIPS JAZZ FAS216 SCSI support"
+ depends on MACH_JAZZ && SCSI
+ select SCSI_SPI_ATTRS
+ help
+ This is the driver for the onboard SCSI host adapter of MIPS Magnum
+ 4000, Acer PICA, Olivetti M700-10 and a few other identical OEM
+ systems.
+
+config A3000_SCSI
+ tristate "A3000 WD33C93A support"
+ depends on AMIGA && SCSI
+ help
+ If you have an Amiga 3000 and have SCSI devices connected to the
+ built-in SCSI controller, say Y. Otherwise, say N.
+
+ To compile this driver as a module, choose M here: the
+ module will be called a3000.
+
+config A2091_SCSI
+ tristate "A2091/A590 WD33C93A support"
+ depends on ZORRO && SCSI
+ help
+ If you have a Commodore A2091 SCSI controller, say Y. Otherwise,
+ say N.
+
+ To compile this driver as a module, choose M here: the
+ module will be called a2091.
+
+config GVP11_SCSI
+ tristate "GVP Series II WD33C93A support"
+ depends on ZORRO && SCSI
+ ---help---
+ If you have a Great Valley Products Series II SCSI controller,
+ answer Y. Also say Y if you have a later model of GVP SCSI
+ controller (such as the GVP A4008 or a Combo board). Otherwise,
+ answer N. This driver does NOT work for the T-Rex series of
+ accelerators from TekMagic and GVP-M.
+
+ To compile this driver as a module, choose M here: the
+ module will be called gvp11.
+
+config SCSI_A4000T
+ tristate "A4000T NCR53c710 SCSI support"
+ depends on AMIGA && SCSI
+ select SCSI_SPI_ATTRS
+ help
+ If you have an Amiga 4000T and have SCSI devices connected to the
+ built-in SCSI controller, say Y. Otherwise, say N.
+
+ To compile this driver as a module, choose M here: the
+ module will be called a4000t.
+
+config SCSI_ZORRO7XX
+ tristate "Zorro NCR53c710 SCSI support"
+ depends on ZORRO && SCSI
+ select SCSI_SPI_ATTRS
+ help
+ Support for various NCR53c710-based SCSI controllers on Zorro
+ expansion boards for the Amiga.
+ This includes:
+ - the Amiga 4091 Zorro III SCSI-2 controller,
+ - the MacroSystem Development's WarpEngine Amiga SCSI-2 controller
+ (info at
+ <http://www.lysator.liu.se/amiga/ar/guide/ar310.guide?FEATURE5>),
+ - the SCSI controller on the Phase5 Blizzard PowerUP 603e+
+ accelerator card for the Amiga 1200,
+ - the SCSI controller on the GVP Turbo 040/060 accelerator.
+
+config ATARI_SCSI
+ tristate "Atari native SCSI support"
+ depends on ATARI && SCSI
+ select SCSI_SPI_ATTRS
+ select NVRAM
+ ---help---
+ If you have an Atari with built-in NCR5380 SCSI controller (TT,
+ Falcon, ...) say Y to get it supported. Of course also, if you have
+ a compatible SCSI controller (e.g. for Medusa).
+
+ To compile this driver as a module, choose M here: the
+ module will be called atari_scsi.
+
+ This driver supports both styles of NCR integration into the
+ system: the TT style (separate DMA), and the Falcon style (via
+ ST-DMA, replacing ACSI). It does NOT support other schemes, like
+ in the Hades (without DMA).
+
+config ATARI_SCSI_TOSHIBA_DELAY
+ bool "Long delays for Toshiba CD-ROMs"
+ depends on ATARI_SCSI
+ help
+ This option increases the delay after a SCSI arbitration to
+ accommodate some flaky Toshiba CD-ROM drives. Say Y if you intend to
+ use a Toshiba CD-ROM drive; otherwise, the option is not needed and
+ would impact performance a bit, so say N.
+
+config ATARI_SCSI_RESET_BOOT
+ bool "Reset SCSI-devices at boottime"
+ depends on ATARI_SCSI
+ help
+ Reset the devices on your Atari whenever it boots. This makes the
+ boot process fractionally longer but may assist recovery from errors
+ that leave the devices with SCSI operations partway completed.
+
+config MAC_SCSI
+ tristate "Macintosh NCR5380 SCSI"
+ depends on MAC && SCSI=y
+ select SCSI_SPI_ATTRS
+ help
+ This is the NCR 5380 SCSI controller included on most of the 68030
+ based Macintoshes. If you have one of these say Y and read the
+ SCSI-HOWTO, available from
+ <http://www.tldp.org/docs.html#howto>.
+
+config SCSI_MAC_ESP
+ tristate "Macintosh NCR53c9[46] SCSI"
+ depends on MAC && SCSI
+ select SCSI_SPI_ATTRS
+ help
+ This is the NCR 53c9x SCSI controller found on most of the 68040
+ based Macintoshes.
+
+ To compile this driver as a module, choose M here: the module
+ will be called mac_esp.
+
+config MVME147_SCSI
+ bool "WD33C93 SCSI driver for MVME147"
+ depends on MVME147 && SCSI=y
+ select SCSI_SPI_ATTRS
+ help
+ Support for the on-board SCSI controller on the Motorola MVME147
+ single-board computer.
+
+config MVME16x_SCSI
+ tristate "NCR53C710 SCSI driver for MVME16x"
+ depends on MVME16x && SCSI
+ select SCSI_SPI_ATTRS
+ help
+ The Motorola MVME162, 166, 167, 172 and 177 boards use the NCR53C710
+ SCSI controller chip. Almost everyone using one of these boards
+ will want to say Y to this question.
+
+config BVME6000_SCSI
+ tristate "NCR53C710 SCSI driver for BVME6000"
+ depends on BVME6000 && SCSI
+ select SCSI_SPI_ATTRS
+ help
+ The BVME4000 and BVME6000 boards from BVM Ltd use the NCR53C710
+ SCSI controller chip. Almost everyone using one of these boards
+ will want to say Y to this question.
+
+config SUN3_SCSI
+ tristate "Sun3 NCR5380 SCSI"
+ depends on SUN3 && SCSI
+ select SCSI_SPI_ATTRS
+ help
+ This option will enable support for the OBIO (onboard io) NCR5380
+ SCSI controller found in the Sun 3/50 and 3/60, as well as for
+ "Sun3" type VME scsi controllers also based on the NCR5380.
+ General Linux information on the Sun 3 series (now discontinued)
+ is at <http://www.angelfire.com/ca2/tech68k/sun3.html>.
+
+config SUN3X_ESP
+ bool "Sun3x ESP SCSI"
+ depends on SUN3X && SCSI=y
+ select SCSI_SPI_ATTRS
+ help
+ The ESP was an on-board SCSI controller used on Sun 3/80
+ machines. Say Y here to compile in support for it.
+
+config SCSI_SUNESP
+ tristate "Sparc ESP Scsi Driver"
+ depends on SBUS && SCSI
+ select SCSI_SPI_ATTRS
+ help
+ This is the driver for the Sun ESP SCSI host adapter. The ESP
+ chipset is present in most SPARC SBUS-based computers and
+ supports the Emulex family of ESP SCSI chips (esp100, esp100A,
+ esp236, fas101, fas236) as well as the Qlogic fas366 SCSI chip.
+
+ To compile this driver as a module, choose M here: the
+ module will be called sun_esp.
+
+config ZFCP
+ tristate "FCP host bus adapter driver for IBM eServer zSeries"
+ depends on S390 && QDIO && SCSI
+ depends on SCSI_FC_ATTRS
+ help
+ If you want to access SCSI devices attached to your IBM eServer
+ zSeries by means of Fibre Channel interfaces say Y.
+ For details please refer to the documentation provided by IBM at
+ <http://oss.software.ibm.com/developerworks/opensource/linux390>
+
+ This driver is also available as a module. This module will be
+ called zfcp. If you want to compile it as a module, say M here
+ and read <file:Documentation/kbuild/modules.txt>.
+
+config SCSI_PMCRAID
+ tristate "PMC SIERRA Linux MaxRAID adapter support"
+ depends on PCI && SCSI && NET
+ ---help---
+ This driver supports the PMC SIERRA MaxRAID adapters.
+
+config SCSI_PM8001
+ tristate "PMC-Sierra SPC 8001 SAS/SATA Based Host Adapter driver"
+ depends on PCI && SCSI
+ select SCSI_SAS_LIBSAS
+ help
+ This driver supports PMC-Sierra PCIE SAS/SATA 8x6G SPC 8001 chip
+ based host adapters.
+
+config SCSI_BFA_FC
+ tristate "Brocade BFA Fibre Channel Support"
+ depends on PCI && SCSI
+ depends on SCSI_FC_ATTRS
+ help
+ This bfa driver supports all Brocade PCIe FC/FCOE host adapters.
+
+ To compile this driver as a module, choose M here. The module will
+ be called bfa.
+
+config SCSI_VIRTIO
+ tristate "virtio-scsi support"
+ depends on VIRTIO
+ select BLK_DEV_INTEGRITY
+ help
+ This is the virtual HBA driver for virtio. If the kernel will
+ be used in a virtual machine, say Y or M.
+
+source "drivers/scsi/csiostor/Kconfig"
+
+endif # SCSI_LOWLEVEL
+
+source "drivers/scsi/pcmcia/Kconfig"
+
+source "drivers/scsi/device_handler/Kconfig"
+
+source "drivers/scsi/osd/Kconfig"
+
+endmenu
diff --git a/drivers/scsi/Makefile b/drivers/scsi/Makefile
new file mode 100644
index 000000000..dee160a4f
--- /dev/null
+++ b/drivers/scsi/Makefile
@@ -0,0 +1,200 @@
+#
+# Makefile for linux/drivers/scsi
+#
+# 30 May 2000, Christoph Hellwig <hch@infradead.org>
+# Rewritten to use lists instead of if-statements.
+#
+# 20 Sep 2000, Torben Mathiasen <tmm@image.dk>
+# Changed link order to reflect new scsi initialization.
+#
+# *!*!*!*!*!*!*!*!*!*!*!*!*!*!*!*!*!*!*!*!*!*!*!*!*!*!*!*!*!*!*!*!
+# The link order must be, SCSI Core, SCSI HBA drivers, and
+# lastly SCSI peripheral drivers (disk/tape/cdrom/etc.) to
+# satisfy certain initialization assumptions in the SCSI layer.
+# *!*!*!*!*!*!*!*!*!*!*!*!*!*!*!*!*!*!*!*!*!*!*!*!*!*!*!*!*!*!*!*!
+
+
+CFLAGS_aha152x.o = -DAHA152X_STAT -DAUTOCONF
+CFLAGS_gdth.o = # -DDEBUG_GDTH=2 -D__SERIAL__ -D__COM2__ -DGDTH_STATISTICS
+
+obj-$(CONFIG_PCMCIA) += pcmcia/
+
+obj-$(CONFIG_SCSI) += scsi_mod.o
+
+obj-$(CONFIG_RAID_ATTRS) += raid_class.o
+
+# --- NOTE ORDERING HERE ---
+# For kernel non-modular link, transport attributes need to
+# be initialised before drivers
+# --------------------------
+obj-$(CONFIG_SCSI_SPI_ATTRS) += scsi_transport_spi.o
+obj-$(CONFIG_SCSI_FC_ATTRS) += scsi_transport_fc.o
+obj-$(CONFIG_SCSI_ISCSI_ATTRS) += scsi_transport_iscsi.o
+obj-$(CONFIG_SCSI_SAS_ATTRS) += scsi_transport_sas.o
+obj-$(CONFIG_SCSI_SAS_LIBSAS) += libsas/
+obj-$(CONFIG_SCSI_SRP_ATTRS) += scsi_transport_srp.o
+obj-$(CONFIG_SCSI_DH) += device_handler/
+
+obj-$(CONFIG_LIBFC) += libfc/
+obj-$(CONFIG_LIBFCOE) += fcoe/
+obj-$(CONFIG_FCOE) += fcoe/
+obj-$(CONFIG_FCOE_FNIC) += fnic/
+obj-$(CONFIG_SCSI_BNX2X_FCOE) += libfc/ fcoe/ bnx2fc/
+obj-$(CONFIG_ISCSI_TCP) += libiscsi.o libiscsi_tcp.o iscsi_tcp.o
+obj-$(CONFIG_INFINIBAND_ISER) += libiscsi.o
+obj-$(CONFIG_ISCSI_BOOT_SYSFS) += iscsi_boot_sysfs.o
+obj-$(CONFIG_SCSI_A4000T) += 53c700.o a4000t.o
+obj-$(CONFIG_SCSI_ZORRO7XX) += 53c700.o zorro7xx.o
+obj-$(CONFIG_A3000_SCSI) += a3000.o wd33c93.o
+obj-$(CONFIG_A2091_SCSI) += a2091.o wd33c93.o
+obj-$(CONFIG_GVP11_SCSI) += gvp11.o wd33c93.o
+obj-$(CONFIG_MVME147_SCSI) += mvme147.o wd33c93.o
+obj-$(CONFIG_SGIWD93_SCSI) += sgiwd93.o wd33c93.o
+obj-$(CONFIG_ATARI_SCSI) += atari_scsi.o
+obj-$(CONFIG_MAC_SCSI) += mac_scsi.o
+obj-$(CONFIG_SCSI_MAC_ESP) += esp_scsi.o mac_esp.o
+obj-$(CONFIG_SUN3_SCSI) += sun3_scsi.o sun3_scsi_vme.o
+obj-$(CONFIG_MVME16x_SCSI) += 53c700.o mvme16x_scsi.o
+obj-$(CONFIG_BVME6000_SCSI) += 53c700.o bvme6000_scsi.o
+obj-$(CONFIG_SCSI_SIM710) += 53c700.o sim710.o
+obj-$(CONFIG_SCSI_ADVANSYS) += advansys.o
+obj-$(CONFIG_SCSI_BUSLOGIC) += BusLogic.o
+obj-$(CONFIG_SCSI_DPT_I2O) += dpt_i2o.o
+obj-$(CONFIG_SCSI_U14_34F) += u14-34f.o
+obj-$(CONFIG_SCSI_ARCMSR) += arcmsr/
+obj-$(CONFIG_SCSI_ULTRASTOR) += ultrastor.o
+obj-$(CONFIG_SCSI_AHA152X) += aha152x.o
+obj-$(CONFIG_SCSI_AHA1542) += aha1542.o
+obj-$(CONFIG_SCSI_AHA1740) += aha1740.o
+obj-$(CONFIG_SCSI_AIC7XXX) += aic7xxx/
+obj-$(CONFIG_SCSI_AIC79XX) += aic7xxx/
+obj-$(CONFIG_SCSI_AACRAID) += aacraid/
+obj-$(CONFIG_SCSI_AIC94XX) += aic94xx/
+obj-$(CONFIG_SCSI_PM8001) += pm8001/
+obj-$(CONFIG_SCSI_ISCI) += isci/
+obj-$(CONFIG_SCSI_IPS) += ips.o
+obj-$(CONFIG_SCSI_FUTURE_DOMAIN)+= fdomain.o
+obj-$(CONFIG_SCSI_IN2000) += in2000.o
+obj-$(CONFIG_SCSI_GENERIC_NCR5380) += g_NCR5380.o
+obj-$(CONFIG_SCSI_GENERIC_NCR5380_MMIO) += g_NCR5380_mmio.o
+obj-$(CONFIG_SCSI_NCR53C406A) += NCR53c406a.o
+obj-$(CONFIG_SCSI_NCR_D700) += 53c700.o NCR_D700.o
+obj-$(CONFIG_SCSI_NCR_Q720) += NCR_Q720_mod.o
+obj-$(CONFIG_SCSI_SYM53C416) += sym53c416.o
+obj-$(CONFIG_SCSI_QLOGIC_FAS) += qlogicfas408.o qlogicfas.o
+obj-$(CONFIG_PCMCIA_QLOGIC) += qlogicfas408.o
+obj-$(CONFIG_SCSI_QLOGIC_1280) += qla1280.o
+obj-$(CONFIG_SCSI_QLA_FC) += qla2xxx/
+obj-$(CONFIG_SCSI_QLA_ISCSI) += libiscsi.o qla4xxx/
+obj-$(CONFIG_SCSI_LPFC) += lpfc/
+obj-$(CONFIG_SCSI_BFA_FC) += bfa/
+obj-$(CONFIG_SCSI_CHELSIO_FCOE) += csiostor/
+obj-$(CONFIG_SCSI_PAS16) += pas16.o
+obj-$(CONFIG_SCSI_T128) += t128.o
+obj-$(CONFIG_SCSI_DMX3191D) += dmx3191d.o
+obj-$(CONFIG_SCSI_HPSA) += hpsa.o
+obj-$(CONFIG_SCSI_DTC3280) += dtc.o
+obj-$(CONFIG_SCSI_SYM53C8XX_2) += sym53c8xx_2/
+obj-$(CONFIG_SCSI_ZALON) += zalon7xx.o
+obj-$(CONFIG_SCSI_EATA_PIO) += eata_pio.o
+obj-$(CONFIG_SCSI_7000FASST) += wd7000.o
+obj-$(CONFIG_SCSI_EATA) += eata.o
+obj-$(CONFIG_SCSI_DC395x) += dc395x.o
+obj-$(CONFIG_SCSI_AM53C974) += esp_scsi.o am53c974.o
+obj-$(CONFIG_MEGARAID_LEGACY) += megaraid.o
+obj-$(CONFIG_MEGARAID_NEWGEN) += megaraid/
+obj-$(CONFIG_MEGARAID_SAS) += megaraid/
+obj-$(CONFIG_SCSI_MPT2SAS) += mpt2sas/
+obj-$(CONFIG_SCSI_MPT3SAS) += mpt3sas/
+obj-$(CONFIG_SCSI_UFSHCD) += ufs/
+obj-$(CONFIG_SCSI_ACARD) += atp870u.o
+obj-$(CONFIG_SCSI_SUNESP) += esp_scsi.o sun_esp.o
+obj-$(CONFIG_SCSI_GDTH) += gdth.o
+obj-$(CONFIG_SCSI_INITIO) += initio.o
+obj-$(CONFIG_SCSI_INIA100) += a100u2w.o
+obj-$(CONFIG_SCSI_QLOGICPTI) += qlogicpti.o
+obj-$(CONFIG_SCSI_MESH) += mesh.o
+obj-$(CONFIG_SCSI_MAC53C94) += mac53c94.o
+obj-$(CONFIG_BLK_DEV_3W_XXXX_RAID) += 3w-xxxx.o
+obj-$(CONFIG_SCSI_3W_9XXX) += 3w-9xxx.o
+obj-$(CONFIG_SCSI_3W_SAS) += 3w-sas.o
+obj-$(CONFIG_SCSI_PPA) += ppa.o
+obj-$(CONFIG_SCSI_IMM) += imm.o
+obj-$(CONFIG_JAZZ_ESP) += esp_scsi.o jazz_esp.o
+obj-$(CONFIG_SUN3X_ESP) += esp_scsi.o sun3x_esp.o
+obj-$(CONFIG_SCSI_LASI700) += 53c700.o lasi700.o
+obj-$(CONFIG_SCSI_SNI_53C710) += 53c700.o sni_53c710.o
+obj-$(CONFIG_SCSI_NSP32) += nsp32.o
+obj-$(CONFIG_SCSI_IPR) += ipr.o
+obj-$(CONFIG_SCSI_IBMVSCSI) += ibmvscsi/
+obj-$(CONFIG_SCSI_IBMVFC) += ibmvscsi/
+obj-$(CONFIG_SCSI_HPTIOP) += hptiop.o
+obj-$(CONFIG_SCSI_STEX) += stex.o
+obj-$(CONFIG_SCSI_MVSAS) += mvsas/
+obj-$(CONFIG_SCSI_MVUMI) += mvumi.o
+obj-$(CONFIG_PS3_ROM) += ps3rom.o
+obj-$(CONFIG_SCSI_CXGB3_ISCSI) += libiscsi.o libiscsi_tcp.o cxgbi/
+obj-$(CONFIG_SCSI_CXGB4_ISCSI) += libiscsi.o libiscsi_tcp.o cxgbi/
+obj-$(CONFIG_SCSI_BNX2_ISCSI) += libiscsi.o bnx2i/
+obj-$(CONFIG_BE2ISCSI) += libiscsi.o be2iscsi/
+obj-$(CONFIG_SCSI_ESAS2R) += esas2r/
+obj-$(CONFIG_SCSI_PMCRAID) += pmcraid.o
+obj-$(CONFIG_SCSI_VIRTIO) += virtio_scsi.o
+obj-$(CONFIG_VMWARE_PVSCSI) += vmw_pvscsi.o
+obj-$(CONFIG_XEN_SCSI_FRONTEND) += xen-scsifront.o
+obj-$(CONFIG_HYPERV_STORAGE) += hv_storvsc.o
+obj-$(CONFIG_SCSI_WD719X) += wd719x.o
+
+obj-$(CONFIG_ARM) += arm/
+
+obj-$(CONFIG_CHR_DEV_ST) += st.o
+obj-$(CONFIG_CHR_DEV_OSST) += osst.o
+obj-$(CONFIG_BLK_DEV_SD) += sd_mod.o
+obj-$(CONFIG_BLK_DEV_SR) += sr_mod.o
+obj-$(CONFIG_CHR_DEV_SG) += sg.o
+obj-$(CONFIG_CHR_DEV_SCH) += ch.o
+obj-$(CONFIG_SCSI_ENCLOSURE) += ses.o
+
+obj-$(CONFIG_SCSI_OSD_INITIATOR) += osd/
+
+# This goes last, so that "real" scsi devices probe earlier
+obj-$(CONFIG_SCSI_DEBUG) += scsi_debug.o
+scsi_mod-y += scsi.o hosts.o scsi_ioctl.o \
+ scsicam.o scsi_error.o scsi_lib.o
+scsi_mod-$(CONFIG_SCSI_CONSTANTS) += constants.o
+scsi_mod-$(CONFIG_SCSI_DMA) += scsi_lib_dma.o
+scsi_mod-y += scsi_scan.o scsi_sysfs.o scsi_devinfo.o
+scsi_mod-$(CONFIG_SCSI_NETLINK) += scsi_netlink.o
+scsi_mod-$(CONFIG_SYSCTL) += scsi_sysctl.o
+scsi_mod-$(CONFIG_SCSI_PROC_FS) += scsi_proc.o
+scsi_mod-y += scsi_trace.o scsi_logging.o
+scsi_mod-$(CONFIG_PM) += scsi_pm.o
+
+hv_storvsc-y := storvsc_drv.o
+
+sd_mod-objs := sd.o
+sd_mod-$(CONFIG_BLK_DEV_INTEGRITY) += sd_dif.o
+
+sr_mod-objs := sr.o sr_ioctl.o sr_vendor.o
+ncr53c8xx-flags-$(CONFIG_SCSI_ZALON) \
+ := -DCONFIG_NCR53C8XX_PREFETCH -DSCSI_NCR_BIG_ENDIAN \
+ -DCONFIG_SCSI_NCR53C8XX_NO_WORD_TRANSFERS
+CFLAGS_ncr53c8xx.o := $(ncr53c8xx-flags-y) $(ncr53c8xx-flags-m)
+zalon7xx-objs := zalon.o ncr53c8xx.o
+NCR_Q720_mod-objs := NCR_Q720.o ncr53c8xx.o
+oktagon_esp_mod-objs := oktagon_esp.o oktagon_io.o
+
+# Files generated that shall be removed upon make clean
+clean-files := 53c700_d.h 53c700_u.h
+
+$(obj)/53c700.o $(MODVERDIR)/$(obj)/53c700.ver: $(obj)/53c700_d.h
+
+# If you want to play with the firmware, uncomment
+# GENERATE_FIRMWARE := 1
+
+ifdef GENERATE_FIRMWARE
+
+$(obj)/53c700_d.h: $(src)/53c700.scr $(src)/script_asm.pl
+ $(PERL) -s $(src)/script_asm.pl -ncr7x0_family $@ $(@:_d.h=_u.h) < $<
+
+endif
diff --git a/drivers/scsi/NCR5380.c b/drivers/scsi/NCR5380.c
new file mode 100644
index 000000000..a777e5c41
--- /dev/null
+++ b/drivers/scsi/NCR5380.c
@@ -0,0 +1,2749 @@
+/*
+ * NCR 5380 generic driver routines. These should make it *trivial*
+ * to implement 5380 SCSI drivers under Linux with a non-trantor
+ * architecture.
+ *
+ * Note that these routines also work with NR53c400 family chips.
+ *
+ * Copyright 1993, Drew Eckhardt
+ * Visionary Computing
+ * (Unix and Linux consulting and custom programming)
+ * drew@colorado.edu
+ * +1 (303) 666-5836
+ *
+ * For more information, please consult
+ *
+ * NCR 5380 Family
+ * SCSI Protocol Controller
+ * Databook
+ *
+ * NCR Microelectronics
+ * 1635 Aeroplaza Drive
+ * Colorado Springs, CO 80916
+ * 1+ (719) 578-3400
+ * 1+ (800) 334-5454
+ */
+
+/*
+ * Revision 1.10 1998/9/2 Alan Cox
+ * (alan@lxorguk.ukuu.org.uk)
+ * Fixed up the timer lockups reported so far. Things still suck. Looking
+ * forward to 2.3 and per device request queues. Then it'll be possible to
+ * SMP thread this beast and improve life no end.
+
+ * Revision 1.9 1997/7/27 Ronald van Cuijlenborg
+ * (ronald.van.cuijlenborg@tip.nl or nutty@dds.nl)
+ * (hopefully) fixed and enhanced USLEEP
+ * added support for DTC3181E card (for Mustek scanner)
+ *
+
+ * Revision 1.8 Ingmar Baumgart
+ * (ingmar@gonzo.schwaben.de)
+ * added support for NCR53C400a card
+ *
+
+ * Revision 1.7 1996/3/2 Ray Van Tassle (rayvt@comm.mot.com)
+ * added proc_info
+ * added support needed for DTC 3180/3280
+ * fixed a couple of bugs
+ *
+
+ * Revision 1.5 1994/01/19 09:14:57 drew
+ * Fixed udelay() hack that was being used on DATAOUT phases
+ * instead of a proper wait for the final handshake.
+ *
+ * Revision 1.4 1994/01/19 06:44:25 drew
+ * *** empty log message ***
+ *
+ * Revision 1.3 1994/01/19 05:24:40 drew
+ * Added support for TCR LAST_BYTE_SENT bit.
+ *
+ * Revision 1.2 1994/01/15 06:14:11 drew
+ * REAL DMA support, bug fixes.
+ *
+ * Revision 1.1 1994/01/15 06:00:54 drew
+ * Initial revision
+ *
+ */
+
+/*
+ * Further development / testing that should be done :
+ * 1. Cleanup the NCR5380_transfer_dma function and DMA operation complete
+ * code so that everything does the same thing that's done at the
+ * end of a pseudo-DMA read operation.
+ *
+ * 2. Fix REAL_DMA (interrupt driven, polled works fine) -
+ * basically, transfer size needs to be reduced by one
+ * and the last byte read as is done with PSEUDO_DMA.
+ *
+ * 4. Test SCSI-II tagged queueing (I have no devices which support
+ * tagged queueing)
+ *
+ * 5. Test linked command handling code after Eric is ready with
+ * the high level code.
+ */
+#include <scsi/scsi_dbg.h>
+#include <scsi/scsi_transport_spi.h>
+
+#if (NDEBUG & NDEBUG_LISTS)
+#define LIST(x,y) {printk("LINE:%d Adding %p to %p\n", __LINE__, (void*)(x), (void*)(y)); if ((x)==(y)) udelay(5); }
+#define REMOVE(w,x,y,z) {printk("LINE:%d Removing: %p->%p %p->%p \n", __LINE__, (void*)(w), (void*)(x), (void*)(y), (void*)(z)); if ((x)==(y)) udelay(5); }
+#else
+#define LIST(x,y)
+#define REMOVE(w,x,y,z)
+#endif
+
+#ifndef notyet
+#undef LINKED
+#undef REAL_DMA
+#endif
+
+#ifdef REAL_DMA_POLL
+#undef READ_OVERRUNS
+#define READ_OVERRUNS
+#endif
+
+#ifdef BOARD_REQUIRES_NO_DELAY
+#define io_recovery_delay(x)
+#else
+#define io_recovery_delay(x) udelay(x)
+#endif
+
+/*
+ * Design
+ *
+ * This is a generic 5380 driver. To use it on a different platform,
+ * one simply writes appropriate system specific macros (ie, data
+ * transfer - some PC's will use the I/O bus, 68K's must use
+ * memory mapped) and drops this file in their 'C' wrapper.
+ *
+ * (Note from hch: unfortunately it was not enough for the different
+ * m68k folks and instead of improving this driver they copied it
+ * and hacked it up for their needs. As a consequence they lost
+ * most updates to this driver. Maybe someone will fix all these
+ * drivers to use a common core one day..)
+ *
+ * As far as command queueing, two queues are maintained for
+ * each 5380 in the system - commands that haven't been issued yet,
+ * and commands that are currently executing. This means that an
+ * unlimited number of commands may be queued, letting
+ * more commands propagate from the higher driver levels giving higher
+ * throughput. Note that both I_T_L and I_T_L_Q nexuses are supported,
+ * allowing multiple commands to propagate all the way to a SCSI-II device
+ * while a command is already executing.
+ *
+ *
+ * Issues specific to the NCR5380 :
+ *
+ * When used in a PIO or pseudo-dma mode, the NCR5380 is a braindead
+ * piece of hardware that requires you to sit in a loop polling for
+ * the REQ signal as long as you are connected. Some devices are
+ * brain dead (ie, many TEXEL CD ROM drives) and won't disconnect
+ * while doing long seek operations.
+ *
+ * The workaround for this is to keep track of devices that have
+ * disconnected. If the device hasn't disconnected, for commands that
+ * should disconnect, we do something like
+ *
+ * while (!REQ is asserted) { sleep for N usecs; poll for M usecs }
+ *
+ * Some tweaking of N and M needs to be done. An algorithm based
+ * on "time to data" would give the best results as long as short time
+ * to datas (ie, on the same track) were considered, however these
+ * broken devices are the exception rather than the rule and I'd rather
+ * spend my time optimizing for the normal case.
+ *
+ * Architecture :
+ *
+ * At the heart of the design is a coroutine, NCR5380_main,
+ * which is started from a workqueue for each NCR5380 host in the
+ * system. It attempts to establish I_T_L or I_T_L_Q nexuses by
+ * removing the commands from the issue queue and calling
+ * NCR5380_select() if a nexus is not established.
+ *
+ * Once a nexus is established, the NCR5380_information_transfer()
+ * phase goes through the various phases as instructed by the target.
+ * if the target goes into MSG IN and sends a DISCONNECT message,
+ * the command structure is placed into the per instance disconnected
+ * queue, and NCR5380_main tries to find more work. If the target is
+ * idle for too long, the system will try to sleep.
+ *
+ * If a command has disconnected, eventually an interrupt will trigger,
+ * calling NCR5380_intr() which will in turn call NCR5380_reselect
+ * to reestablish a nexus. This will run main if necessary.
+ *
+ * On command termination, the done function will be called as
+ * appropriate.
+ *
+ * SCSI pointers are maintained in the SCp field of SCSI command
+ * structures, being initialized after the command is connected
+ * in NCR5380_select, and set as appropriate in NCR5380_information_transfer.
+ * Note that in violation of the standard, an implicit SAVE POINTERS operation
+ * is done, since some BROKEN disks fail to issue an explicit SAVE POINTERS.
+ */
+
+/*
+ * Using this file :
+ * This file a skeleton Linux SCSI driver for the NCR 5380 series
+ * of chips. To use it, you write an architecture specific functions
+ * and macros and include this file in your driver.
+ *
+ * These macros control options :
+ * AUTOPROBE_IRQ - if defined, the NCR5380_probe_irq() function will be
+ * defined.
+ *
+ * AUTOSENSE - if defined, REQUEST SENSE will be performed automatically
+ * for commands that return with a CHECK CONDITION status.
+ *
+ * DIFFERENTIAL - if defined, NCR53c81 chips will use external differential
+ * transceivers.
+ *
+ * DONT_USE_INTR - if defined, never use interrupts, even if we probe or
+ * override-configure an IRQ.
+ *
+ * LIMIT_TRANSFERSIZE - if defined, limit the pseudo-dma transfers to 512
+ * bytes at a time. Since interrupts are disabled by default during
+ * these transfers, we might need this to give reasonable interrupt
+ * service time if the transfer size gets too large.
+ *
+ * LINKED - if defined, linked commands are supported.
+ *
+ * PSEUDO_DMA - if defined, PSEUDO DMA is used during the data transfer phases.
+ *
+ * REAL_DMA - if defined, REAL DMA is used during the data transfer phases.
+ *
+ * REAL_DMA_POLL - if defined, REAL DMA is used but the driver doesn't
+ * rely on phase mismatch and EOP interrupts to determine end
+ * of phase.
+ *
+ * UNSAFE - leave interrupts enabled during pseudo-DMA transfers. You
+ * only really want to use this if you're having a problem with
+ * dropped characters during high speed communications, and even
+ * then, you're going to be better off twiddling with transfersize
+ * in the high level code.
+ *
+ * Defaults for these will be provided although the user may want to adjust
+ * these to allocate CPU resources to the SCSI driver or "real" code.
+ *
+ * USLEEP_SLEEP - amount of time, in jiffies, to sleep
+ *
+ * USLEEP_POLL - amount of time, in jiffies, to poll
+ *
+ * These macros MUST be defined :
+ * NCR5380_local_declare() - declare any local variables needed for your
+ * transfer routines.
+ *
+ * NCR5380_setup(instance) - initialize any local variables needed from a given
+ * instance of the host adapter for NCR5380_{read,write,pread,pwrite}
+ *
+ * NCR5380_read(register) - read from the specified register
+ *
+ * NCR5380_write(register, value) - write to the specific register
+ *
+ * NCR5380_implementation_fields - additional fields needed for this
+ * specific implementation of the NCR5380
+ *
+ * Either real DMA *or* pseudo DMA may be implemented
+ * REAL functions :
+ * NCR5380_REAL_DMA should be defined if real DMA is to be used.
+ * Note that the DMA setup functions should return the number of bytes
+ * that they were able to program the controller for.
+ *
+ * Also note that generic i386/PC versions of these macros are
+ * available as NCR5380_i386_dma_write_setup,
+ * NCR5380_i386_dma_read_setup, and NCR5380_i386_dma_residual.
+ *
+ * NCR5380_dma_write_setup(instance, src, count) - initialize
+ * NCR5380_dma_read_setup(instance, dst, count) - initialize
+ * NCR5380_dma_residual(instance); - residual count
+ *
+ * PSEUDO functions :
+ * NCR5380_pwrite(instance, src, count)
+ * NCR5380_pread(instance, dst, count);
+ *
+ * The generic driver is initialized by calling NCR5380_init(instance),
+ * after setting the appropriate host specific fields and ID. If the
+ * driver wishes to autoprobe for an IRQ line, the NCR5380_probe_irq(instance,
+ * possible) function may be used.
+ */
+
+static int do_abort(struct Scsi_Host *host);
+static void do_reset(struct Scsi_Host *host);
+
+/*
+ * initialize_SCp - init the scsi pointer field
+ * @cmd: command block to set up
+ *
+ * Set up the internal fields in the SCSI command.
+ */
+
+static inline void initialize_SCp(struct scsi_cmnd *cmd)
+{
+ /*
+ * Initialize the Scsi Pointer field so that all of the commands in the
+ * various queues are valid.
+ */
+
+ if (scsi_bufflen(cmd)) {
+ cmd->SCp.buffer = scsi_sglist(cmd);
+ cmd->SCp.buffers_residual = scsi_sg_count(cmd) - 1;
+ cmd->SCp.ptr = sg_virt(cmd->SCp.buffer);
+ cmd->SCp.this_residual = cmd->SCp.buffer->length;
+ } else {
+ cmd->SCp.buffer = NULL;
+ cmd->SCp.buffers_residual = 0;
+ cmd->SCp.ptr = NULL;
+ cmd->SCp.this_residual = 0;
+ }
+}
+
+/**
+ * NCR5380_poll_politely - wait for NCR5380 status bits
+ * @instance: controller to poll
+ * @reg: 5380 register to poll
+ * @bit: Bitmask to check
+ * @val: Value required to exit
+ *
+ * Polls the NCR5380 in a reasonably efficient manner waiting for
+ * an event to occur, after a short quick poll we begin giving the
+ * CPU back in non IRQ contexts
+ *
+ * Returns the value of the register or a negative error code.
+ */
+
+static int NCR5380_poll_politely(struct Scsi_Host *instance, int reg, int bit, int val, int t)
+{
+ NCR5380_local_declare();
+ int n = 500; /* At about 8uS a cycle for the cpu access */
+ unsigned long end = jiffies + t;
+ int r;
+
+ NCR5380_setup(instance);
+
+ while( n-- > 0)
+ {
+ r = NCR5380_read(reg);
+ if((r & bit) == val)
+ return 0;
+ cpu_relax();
+ }
+
+ /* t time yet ? */
+ while(time_before(jiffies, end))
+ {
+ r = NCR5380_read(reg);
+ if((r & bit) == val)
+ return 0;
+ if(!in_interrupt())
+ cond_resched();
+ else
+ cpu_relax();
+ }
+ return -ETIMEDOUT;
+}
+
+static struct {
+ unsigned char value;
+ const char *name;
+} phases[] __maybe_unused = {
+ {PHASE_DATAOUT, "DATAOUT"},
+ {PHASE_DATAIN, "DATAIN"},
+ {PHASE_CMDOUT, "CMDOUT"},
+ {PHASE_STATIN, "STATIN"},
+ {PHASE_MSGOUT, "MSGOUT"},
+ {PHASE_MSGIN, "MSGIN"},
+ {PHASE_UNKNOWN, "UNKNOWN"}
+};
+
+#if NDEBUG
+static struct {
+ unsigned char mask;
+ const char *name;
+} signals[] = {
+ {SR_DBP, "PARITY"},
+ {SR_RST, "RST"},
+ {SR_BSY, "BSY"},
+ {SR_REQ, "REQ"},
+ {SR_MSG, "MSG"},
+ {SR_CD, "CD"},
+ {SR_IO, "IO"},
+ {SR_SEL, "SEL"},
+ {0, NULL}
+},
+basrs[] = {
+ {BASR_ATN, "ATN"},
+ {BASR_ACK, "ACK"},
+ {0, NULL}
+},
+icrs[] = {
+ {ICR_ASSERT_RST, "ASSERT RST"},
+ {ICR_ASSERT_ACK, "ASSERT ACK"},
+ {ICR_ASSERT_BSY, "ASSERT BSY"},
+ {ICR_ASSERT_SEL, "ASSERT SEL"},
+ {ICR_ASSERT_ATN, "ASSERT ATN"},
+ {ICR_ASSERT_DATA, "ASSERT DATA"},
+ {0, NULL}
+},
+mrs[] = {
+ {MR_BLOCK_DMA_MODE, "MODE BLOCK DMA"},
+ {MR_TARGET, "MODE TARGET"},
+ {MR_ENABLE_PAR_CHECK, "MODE PARITY CHECK"},
+ {MR_ENABLE_PAR_INTR, "MODE PARITY INTR"},
+ {MR_MONITOR_BSY, "MODE MONITOR BSY"},
+ {MR_DMA_MODE, "MODE DMA"},
+ {MR_ARBITRATE, "MODE ARBITRATION"},
+ {0, NULL}
+};
+
+/**
+ * NCR5380_print - print scsi bus signals
+ * @instance: adapter state to dump
+ *
+ * Print the SCSI bus signals for debugging purposes
+ *
+ * Locks: caller holds hostdata lock (not essential)
+ */
+
+static void NCR5380_print(struct Scsi_Host *instance)
+{
+ NCR5380_local_declare();
+ unsigned char status, data, basr, mr, icr, i;
+ NCR5380_setup(instance);
+
+ data = NCR5380_read(CURRENT_SCSI_DATA_REG);
+ status = NCR5380_read(STATUS_REG);
+ mr = NCR5380_read(MODE_REG);
+ icr = NCR5380_read(INITIATOR_COMMAND_REG);
+ basr = NCR5380_read(BUS_AND_STATUS_REG);
+
+ printk("STATUS_REG: %02x ", status);
+ for (i = 0; signals[i].mask; ++i)
+ if (status & signals[i].mask)
+ printk(",%s", signals[i].name);
+ printk("\nBASR: %02x ", basr);
+ for (i = 0; basrs[i].mask; ++i)
+ if (basr & basrs[i].mask)
+ printk(",%s", basrs[i].name);
+ printk("\nICR: %02x ", icr);
+ for (i = 0; icrs[i].mask; ++i)
+ if (icr & icrs[i].mask)
+ printk(",%s", icrs[i].name);
+ printk("\nMODE: %02x ", mr);
+ for (i = 0; mrs[i].mask; ++i)
+ if (mr & mrs[i].mask)
+ printk(",%s", mrs[i].name);
+ printk("\n");
+}
+
+
+/*
+ * NCR5380_print_phase - show SCSI phase
+ * @instance: adapter to dump
+ *
+ * Print the current SCSI phase for debugging purposes
+ *
+ * Locks: none
+ */
+
+static void NCR5380_print_phase(struct Scsi_Host *instance)
+{
+ NCR5380_local_declare();
+ unsigned char status;
+ int i;
+ NCR5380_setup(instance);
+
+ status = NCR5380_read(STATUS_REG);
+ if (!(status & SR_REQ))
+ printk("scsi%d : REQ not asserted, phase unknown.\n", instance->host_no);
+ else {
+ for (i = 0; (phases[i].value != PHASE_UNKNOWN) && (phases[i].value != (status & PHASE_MASK)); ++i);
+ printk("scsi%d : phase %s\n", instance->host_no, phases[i].name);
+ }
+}
+#endif
+
+/*
+ * These need tweaking, and would probably work best as per-device
+ * flags initialized differently for disk, tape, cd, etc devices.
+ * People with broken devices are free to experiment as to what gives
+ * the best results for them.
+ *
+ * USLEEP_SLEEP should be a minimum seek time.
+ *
+ * USLEEP_POLL should be a maximum rotational latency.
+ */
+#ifndef USLEEP_SLEEP
+/* 20 ms (reasonable hard disk speed) */
+#define USLEEP_SLEEP msecs_to_jiffies(20)
+#endif
+/* 300 RPM (floppy speed) */
+#ifndef USLEEP_POLL
+#define USLEEP_POLL msecs_to_jiffies(200)
+#endif
+#ifndef USLEEP_WAITLONG
+/* RvC: (reasonable time to wait on select error) */
+#define USLEEP_WAITLONG USLEEP_SLEEP
+#endif
+
+/*
+ * Function : int should_disconnect (unsigned char cmd)
+ *
+ * Purpose : decide whether a command would normally disconnect or
+ * not, since if it won't disconnect we should go to sleep.
+ *
+ * Input : cmd - opcode of SCSI command
+ *
+ * Returns : DISCONNECT_LONG if we should disconnect for a really long
+ * time (ie always, sleep, look for REQ active, sleep),
+ * DISCONNECT_TIME_TO_DATA if we would only disconnect for a normal
+ * time-to-data delay, DISCONNECT_NONE if this command would return
+ * immediately.
+ *
+ * Future sleep algorithms based on time to data can exploit
+ * something like this so they can differentiate between "normal"
+ * (ie, read, write, seek) and unusual commands (ie, * format).
+ *
+ * Note : We don't deal with commands that handle an immediate disconnect,
+ *
+ */
+
+static int should_disconnect(unsigned char cmd)
+{
+ switch (cmd) {
+ case READ_6:
+ case WRITE_6:
+ case SEEK_6:
+ case READ_10:
+ case WRITE_10:
+ case SEEK_10:
+ return DISCONNECT_TIME_TO_DATA;
+ case FORMAT_UNIT:
+ case SEARCH_HIGH:
+ case SEARCH_LOW:
+ case SEARCH_EQUAL:
+ return DISCONNECT_LONG;
+ default:
+ return DISCONNECT_NONE;
+ }
+}
+
+static void NCR5380_set_timer(struct NCR5380_hostdata *hostdata, unsigned long timeout)
+{
+ hostdata->time_expires = jiffies + timeout;
+ schedule_delayed_work(&hostdata->coroutine, timeout);
+}
+
+
+static int probe_irq __initdata = 0;
+
+/**
+ * probe_intr - helper for IRQ autoprobe
+ * @irq: interrupt number
+ * @dev_id: unused
+ * @regs: unused
+ *
+ * Set a flag to indicate the IRQ in question was received. This is
+ * used by the IRQ probe code.
+ */
+
+static irqreturn_t __init probe_intr(int irq, void *dev_id)
+{
+ probe_irq = irq;
+ return IRQ_HANDLED;
+}
+
+/**
+ * NCR5380_probe_irq - find the IRQ of an NCR5380
+ * @instance: NCR5380 controller
+ * @possible: bitmask of ISA IRQ lines
+ *
+ * Autoprobe for the IRQ line used by the NCR5380 by triggering an IRQ
+ * and then looking to see what interrupt actually turned up.
+ *
+ * Locks: none, irqs must be enabled on entry
+ */
+
+static int __init __maybe_unused NCR5380_probe_irq(struct Scsi_Host *instance,
+ int possible)
+{
+ NCR5380_local_declare();
+ struct NCR5380_hostdata *hostdata = (struct NCR5380_hostdata *) instance->hostdata;
+ unsigned long timeout;
+ int trying_irqs, i, mask;
+ NCR5380_setup(instance);
+
+ for (trying_irqs = 0, i = 1, mask = 2; i < 16; ++i, mask <<= 1)
+ if ((mask & possible) && (request_irq(i, &probe_intr, 0, "NCR-probe", NULL) == 0))
+ trying_irqs |= mask;
+
+ timeout = jiffies + msecs_to_jiffies(250);
+ probe_irq = NO_IRQ;
+
+ /*
+ * A interrupt is triggered whenever BSY = false, SEL = true
+ * and a bit set in the SELECT_ENABLE_REG is asserted on the
+ * SCSI bus.
+ *
+ * Note that the bus is only driven when the phase control signals
+ * (I/O, C/D, and MSG) match those in the TCR, so we must reset that
+ * to zero.
+ */
+
+ NCR5380_write(TARGET_COMMAND_REG, 0);
+ NCR5380_write(SELECT_ENABLE_REG, hostdata->id_mask);
+ NCR5380_write(OUTPUT_DATA_REG, hostdata->id_mask);
+ NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE | ICR_ASSERT_DATA | ICR_ASSERT_SEL);
+
+ while (probe_irq == NO_IRQ && time_before(jiffies, timeout))
+ schedule_timeout_uninterruptible(1);
+
+ NCR5380_write(SELECT_ENABLE_REG, 0);
+ NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE);
+
+ for (i = 1, mask = 2; i < 16; ++i, mask <<= 1)
+ if (trying_irqs & mask)
+ free_irq(i, NULL);
+
+ return probe_irq;
+}
+
+/**
+ * NCR58380_info - report driver and host information
+ * @instance: relevant scsi host instance
+ *
+ * For use as the host template info() handler.
+ *
+ * Locks: none
+ */
+
+static const char *NCR5380_info(struct Scsi_Host *instance)
+{
+ struct NCR5380_hostdata *hostdata = shost_priv(instance);
+
+ return hostdata->info;
+}
+
+static void prepare_info(struct Scsi_Host *instance)
+{
+ struct NCR5380_hostdata *hostdata = shost_priv(instance);
+
+ snprintf(hostdata->info, sizeof(hostdata->info),
+ "%s, io_port 0x%lx, n_io_port %d, "
+ "base 0x%lx, irq %d, "
+ "can_queue %d, cmd_per_lun %d, "
+ "sg_tablesize %d, this_id %d, "
+ "flags { %s%s%s}, "
+#if defined(USLEEP_POLL) && defined(USLEEP_WAITLONG)
+ "USLEEP_POLL %lu, USLEEP_WAITLONG %lu, "
+#endif
+ "options { %s} ",
+ instance->hostt->name, instance->io_port, instance->n_io_port,
+ instance->base, instance->irq,
+ instance->can_queue, instance->cmd_per_lun,
+ instance->sg_tablesize, instance->this_id,
+ hostdata->flags & FLAG_NCR53C400 ? "NCR53C400 " : "",
+ hostdata->flags & FLAG_DTC3181E ? "DTC3181E " : "",
+ hostdata->flags & FLAG_NO_PSEUDO_DMA ? "NO_PSEUDO_DMA " : "",
+#if defined(USLEEP_POLL) && defined(USLEEP_WAITLONG)
+ USLEEP_POLL, USLEEP_WAITLONG,
+#endif
+#ifdef AUTOPROBE_IRQ
+ "AUTOPROBE_IRQ "
+#endif
+#ifdef DIFFERENTIAL
+ "DIFFERENTIAL "
+#endif
+#ifdef REAL_DMA
+ "REAL_DMA "
+#endif
+#ifdef REAL_DMA_POLL
+ "REAL_DMA_POLL "
+#endif
+#ifdef PARITY
+ "PARITY "
+#endif
+#ifdef PSEUDO_DMA
+ "PSEUDO_DMA "
+#endif
+#ifdef UNSAFE
+ "UNSAFE "
+#endif
+#ifdef NCR53C400
+ "NCR53C400 "
+#endif
+ "");
+}
+
+/**
+ * NCR5380_print_status - dump controller info
+ * @instance: controller to dump
+ *
+ * Print commands in the various queues, called from NCR5380_abort
+ * and NCR5380_debug to aid debugging.
+ *
+ * Locks: called functions disable irqs
+ */
+
+static void NCR5380_print_status(struct Scsi_Host *instance)
+{
+ NCR5380_dprint(NDEBUG_ANY, instance);
+ NCR5380_dprint_phase(NDEBUG_ANY, instance);
+}
+
+#ifdef PSEUDO_DMA
+/******************************************/
+/*
+ * /proc/scsi/[dtc pas16 t128 generic]/[0-ASC_NUM_BOARD_SUPPORTED]
+ *
+ * *buffer: I/O buffer
+ * **start: if inout == FALSE pointer into buffer where user read should start
+ * offset: current offset
+ * length: length of buffer
+ * hostno: Scsi_Host host_no
+ * inout: TRUE - user is writing; FALSE - user is reading
+ *
+ * Return the number of bytes read from or written
+ */
+
+static int __maybe_unused NCR5380_write_info(struct Scsi_Host *instance,
+ char *buffer, int length)
+{
+ struct NCR5380_hostdata *hostdata = shost_priv(instance);
+
+ hostdata->spin_max_r = 0;
+ hostdata->spin_max_w = 0;
+ return 0;
+}
+#endif
+
+static
+void lprint_Scsi_Cmnd(struct scsi_cmnd *cmd, struct seq_file *m);
+static
+void lprint_command(unsigned char *cmd, struct seq_file *m);
+static
+void lprint_opcode(int opcode, struct seq_file *m);
+
+static int __maybe_unused NCR5380_show_info(struct seq_file *m,
+ struct Scsi_Host *instance)
+{
+ struct NCR5380_hostdata *hostdata;
+ struct scsi_cmnd *ptr;
+
+ hostdata = (struct NCR5380_hostdata *) instance->hostdata;
+
+#ifdef PSEUDO_DMA
+ seq_printf(m, "Highwater I/O busy spin counts: write %d, read %d\n",
+ hostdata->spin_max_w, hostdata->spin_max_r);
+#endif
+ spin_lock_irq(instance->host_lock);
+ if (!hostdata->connected)
+ seq_printf(m, "scsi%d: no currently connected command\n", instance->host_no);
+ else
+ lprint_Scsi_Cmnd((struct scsi_cmnd *) hostdata->connected, m);
+ seq_printf(m, "scsi%d: issue_queue\n", instance->host_no);
+ for (ptr = (struct scsi_cmnd *) hostdata->issue_queue; ptr; ptr = (struct scsi_cmnd *) ptr->host_scribble)
+ lprint_Scsi_Cmnd(ptr, m);
+
+ seq_printf(m, "scsi%d: disconnected_queue\n", instance->host_no);
+ for (ptr = (struct scsi_cmnd *) hostdata->disconnected_queue; ptr; ptr = (struct scsi_cmnd *) ptr->host_scribble)
+ lprint_Scsi_Cmnd(ptr, m);
+ spin_unlock_irq(instance->host_lock);
+ return 0;
+}
+
+static void lprint_Scsi_Cmnd(struct scsi_cmnd *cmd, struct seq_file *m)
+{
+ seq_printf(m, "scsi%d : destination target %d, lun %llu\n", cmd->device->host->host_no, cmd->device->id, cmd->device->lun);
+ seq_puts(m, " command = ");
+ lprint_command(cmd->cmnd, m);
+}
+
+static void lprint_command(unsigned char *command, struct seq_file *m)
+{
+ int i, s;
+ lprint_opcode(command[0], m);
+ for (i = 1, s = COMMAND_SIZE(command[0]); i < s; ++i)
+ seq_printf(m, "%02x ", command[i]);
+ seq_putc(m, '\n');
+}
+
+static void lprint_opcode(int opcode, struct seq_file *m)
+{
+ seq_printf(m, "%2d (0x%02x)", opcode, opcode);
+}
+
+
+/**
+ * NCR5380_init - initialise an NCR5380
+ * @instance: adapter to configure
+ * @flags: control flags
+ *
+ * Initializes *instance and corresponding 5380 chip,
+ * with flags OR'd into the initial flags value.
+ *
+ * Notes : I assume that the host, hostno, and id bits have been
+ * set correctly. I don't care about the irq and other fields.
+ *
+ * Returns 0 for success
+ *
+ * Locks: interrupts must be enabled when we are called
+ */
+
+static int NCR5380_init(struct Scsi_Host *instance, int flags)
+{
+ NCR5380_local_declare();
+ int i, pass;
+ unsigned long timeout;
+ struct NCR5380_hostdata *hostdata = (struct NCR5380_hostdata *) instance->hostdata;
+
+ if(in_interrupt())
+ printk(KERN_ERR "NCR5380_init called with interrupts off!\n");
+ /*
+ * On NCR53C400 boards, NCR5380 registers are mapped 8 past
+ * the base address.
+ */
+
+#ifdef NCR53C400
+ if (flags & FLAG_NCR53C400)
+ instance->NCR5380_instance_name += NCR53C400_address_adjust;
+#endif
+
+ NCR5380_setup(instance);
+
+ hostdata->aborted = 0;
+ hostdata->id_mask = 1 << instance->this_id;
+ for (i = hostdata->id_mask; i <= 0x80; i <<= 1)
+ if (i > hostdata->id_mask)
+ hostdata->id_higher_mask |= i;
+ for (i = 0; i < 8; ++i)
+ hostdata->busy[i] = 0;
+#ifdef REAL_DMA
+ hostdata->dmalen = 0;
+#endif
+ hostdata->targets_present = 0;
+ hostdata->connected = NULL;
+ hostdata->issue_queue = NULL;
+ hostdata->disconnected_queue = NULL;
+
+ INIT_DELAYED_WORK(&hostdata->coroutine, NCR5380_main);
+
+ /* The CHECK code seems to break the 53C400. Will check it later maybe */
+ if (flags & FLAG_NCR53C400)
+ hostdata->flags = FLAG_HAS_LAST_BYTE_SENT | flags;
+ else
+ hostdata->flags = FLAG_CHECK_LAST_BYTE_SENT | flags;
+
+ hostdata->host = instance;
+ hostdata->time_expires = 0;
+
+ prepare_info(instance);
+
+ NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE);
+ NCR5380_write(MODE_REG, MR_BASE);
+ NCR5380_write(TARGET_COMMAND_REG, 0);
+ NCR5380_write(SELECT_ENABLE_REG, 0);
+
+#ifdef NCR53C400
+ if (hostdata->flags & FLAG_NCR53C400) {
+ NCR5380_write(C400_CONTROL_STATUS_REG, CSR_BASE);
+ }
+#endif
+
+ /*
+ * Detect and correct bus wedge problems.
+ *
+ * If the system crashed, it may have crashed in a state
+ * where a SCSI command was still executing, and the
+ * SCSI bus is not in a BUS FREE STATE.
+ *
+ * If this is the case, we'll try to abort the currently
+ * established nexus which we know nothing about, and that
+ * failing, do a hard reset of the SCSI bus
+ */
+
+ for (pass = 1; (NCR5380_read(STATUS_REG) & SR_BSY) && pass <= 6; ++pass) {
+ switch (pass) {
+ case 1:
+ case 3:
+ case 5:
+ printk(KERN_INFO "scsi%d: SCSI bus busy, waiting up to five seconds\n", instance->host_no);
+ timeout = jiffies + 5 * HZ;
+ NCR5380_poll_politely(instance, STATUS_REG, SR_BSY, 0, 5*HZ);
+ break;
+ case 2:
+ printk(KERN_WARNING "scsi%d: bus busy, attempting abort\n", instance->host_no);
+ do_abort(instance);
+ break;
+ case 4:
+ printk(KERN_WARNING "scsi%d: bus busy, attempting reset\n", instance->host_no);
+ do_reset(instance);
+ break;
+ case 6:
+ printk(KERN_ERR "scsi%d: bus locked solid or invalid override\n", instance->host_no);
+ return -ENXIO;
+ }
+ }
+ return 0;
+}
+
+/**
+ * NCR5380_exit - remove an NCR5380
+ * @instance: adapter to remove
+ */
+
+static void NCR5380_exit(struct Scsi_Host *instance)
+{
+ struct NCR5380_hostdata *hostdata = (struct NCR5380_hostdata *) instance->hostdata;
+
+ cancel_delayed_work_sync(&hostdata->coroutine);
+}
+
+/**
+ * NCR5380_queue_command - queue a command
+ * @cmd: SCSI command
+ * @done: completion handler
+ *
+ * cmd is added to the per instance issue_queue, with minor
+ * twiddling done to the host specific fields of cmd. If the
+ * main coroutine is not running, it is restarted.
+ *
+ * Locks: host lock taken by caller
+ */
+
+static int NCR5380_queue_command_lck(struct scsi_cmnd *cmd, void (*done) (struct scsi_cmnd *))
+{
+ struct Scsi_Host *instance = cmd->device->host;
+ struct NCR5380_hostdata *hostdata = (struct NCR5380_hostdata *) instance->hostdata;
+ struct scsi_cmnd *tmp;
+
+#if (NDEBUG & NDEBUG_NO_WRITE)
+ switch (cmd->cmnd[0]) {
+ case WRITE_6:
+ case WRITE_10:
+ printk("scsi%d : WRITE attempted with NO_WRITE debugging flag set\n", instance->host_no);
+ cmd->result = (DID_ERROR << 16);
+ done(cmd);
+ return 0;
+ }
+#endif /* (NDEBUG & NDEBUG_NO_WRITE) */
+
+ /*
+ * We use the host_scribble field as a pointer to the next command
+ * in a queue
+ */
+
+ cmd->host_scribble = NULL;
+ cmd->scsi_done = done;
+ cmd->result = 0;
+
+ /*
+ * Insert the cmd into the issue queue. Note that REQUEST SENSE
+ * commands are added to the head of the queue since any command will
+ * clear the contingent allegiance condition that exists and the
+ * sense data is only guaranteed to be valid while the condition exists.
+ */
+
+ if (!(hostdata->issue_queue) || (cmd->cmnd[0] == REQUEST_SENSE)) {
+ LIST(cmd, hostdata->issue_queue);
+ cmd->host_scribble = (unsigned char *) hostdata->issue_queue;
+ hostdata->issue_queue = cmd;
+ } else {
+ for (tmp = (struct scsi_cmnd *) hostdata->issue_queue; tmp->host_scribble; tmp = (struct scsi_cmnd *) tmp->host_scribble);
+ LIST(cmd, tmp);
+ tmp->host_scribble = (unsigned char *) cmd;
+ }
+ dprintk(NDEBUG_QUEUES, "scsi%d : command added to %s of queue\n", instance->host_no, (cmd->cmnd[0] == REQUEST_SENSE) ? "head" : "tail");
+
+ /* Run the coroutine if it isn't already running. */
+ /* Kick off command processing */
+ schedule_delayed_work(&hostdata->coroutine, 0);
+ return 0;
+}
+
+static DEF_SCSI_QCMD(NCR5380_queue_command)
+
+/**
+ * NCR5380_main - NCR state machines
+ *
+ * NCR5380_main is a coroutine that runs as long as more work can
+ * be done on the NCR5380 host adapters in a system. Both
+ * NCR5380_queue_command() and NCR5380_intr() will try to start it
+ * in case it is not running.
+ *
+ * Locks: called as its own thread with no locks held. Takes the
+ * host lock and called routines may take the isa dma lock.
+ */
+
+static void NCR5380_main(struct work_struct *work)
+{
+ struct NCR5380_hostdata *hostdata =
+ container_of(work, struct NCR5380_hostdata, coroutine.work);
+ struct Scsi_Host *instance = hostdata->host;
+ struct scsi_cmnd *tmp, *prev;
+ int done;
+
+ spin_lock_irq(instance->host_lock);
+ do {
+ /* Lock held here */
+ done = 1;
+ if (!hostdata->connected && !hostdata->selecting) {
+ dprintk(NDEBUG_MAIN, "scsi%d : not connected\n", instance->host_no);
+ /*
+ * Search through the issue_queue for a command destined
+ * for a target that's not busy.
+ */
+ for (tmp = (struct scsi_cmnd *) hostdata->issue_queue, prev = NULL; tmp; prev = tmp, tmp = (struct scsi_cmnd *) tmp->host_scribble)
+ {
+ if (prev != tmp)
+ dprintk(NDEBUG_LISTS, "MAIN tmp=%p target=%d busy=%d lun=%llu\n", tmp, tmp->device->id, hostdata->busy[tmp->device->id], tmp->device->lun);
+ /* When we find one, remove it from the issue queue. */
+ if (!(hostdata->busy[tmp->device->id] &
+ (1 << (u8)(tmp->device->lun & 0xff)))) {
+ if (prev) {
+ REMOVE(prev, prev->host_scribble, tmp, tmp->host_scribble);
+ prev->host_scribble = tmp->host_scribble;
+ } else {
+ REMOVE(-1, hostdata->issue_queue, tmp, tmp->host_scribble);
+ hostdata->issue_queue = (struct scsi_cmnd *) tmp->host_scribble;
+ }
+ tmp->host_scribble = NULL;
+
+ /*
+ * Attempt to establish an I_T_L nexus here.
+ * On success, instance->hostdata->connected is set.
+ * On failure, we must add the command back to the
+ * issue queue so we can keep trying.
+ */
+ dprintk(NDEBUG_MAIN|NDEBUG_QUEUES, "scsi%d : main() : command for target %d lun %llu removed from issue_queue\n", instance->host_no, tmp->device->id, tmp->device->lun);
+
+ /*
+ * A successful selection is defined as one that
+ * leaves us with the command connected and
+ * in hostdata->connected, OR has terminated the
+ * command.
+ *
+ * With successful commands, we fall through
+ * and see if we can do an information transfer,
+ * with failures we will restart.
+ */
+ hostdata->selecting = NULL;
+ /* RvC: have to preset this to indicate a new command is being performed */
+
+ /*
+ * REQUEST SENSE commands are issued without tagged
+ * queueing, even on SCSI-II devices because the
+ * contingent allegiance condition exists for the
+ * entire unit.
+ */
+
+ if (!NCR5380_select(instance, tmp)) {
+ break;
+ } else {
+ LIST(tmp, hostdata->issue_queue);
+ tmp->host_scribble = (unsigned char *) hostdata->issue_queue;
+ hostdata->issue_queue = tmp;
+ done = 0;
+ dprintk(NDEBUG_MAIN|NDEBUG_QUEUES, "scsi%d : main(): select() failed, returned to issue_queue\n", instance->host_no);
+ }
+ /* lock held here still */
+ } /* if target/lun is not busy */
+ } /* for */
+ /* exited locked */
+ } /* if (!hostdata->connected) */
+ if (hostdata->selecting) {
+ tmp = (struct scsi_cmnd *) hostdata->selecting;
+ /* Selection will drop and retake the lock */
+ if (!NCR5380_select(instance, tmp)) {
+ /* Ok ?? */
+ } else {
+ /* RvC: device failed, so we wait a long time
+ this is needed for Mustek scanners, that
+ do not respond to commands immediately
+ after a scan */
+ printk(KERN_DEBUG "scsi%d: device %d did not respond in time\n", instance->host_no, tmp->device->id);
+ LIST(tmp, hostdata->issue_queue);
+ tmp->host_scribble = (unsigned char *) hostdata->issue_queue;
+ hostdata->issue_queue = tmp;
+ NCR5380_set_timer(hostdata, USLEEP_WAITLONG);
+ }
+ } /* if hostdata->selecting */
+ if (hostdata->connected
+#ifdef REAL_DMA
+ && !hostdata->dmalen
+#endif
+ && (!hostdata->time_expires || time_before_eq(hostdata->time_expires, jiffies))
+ ) {
+ dprintk(NDEBUG_MAIN, "scsi%d : main() : performing information transfer\n", instance->host_no);
+ NCR5380_information_transfer(instance);
+ dprintk(NDEBUG_MAIN, "scsi%d : main() : done set false\n", instance->host_no);
+ done = 0;
+ } else
+ break;
+ } while (!done);
+
+ spin_unlock_irq(instance->host_lock);
+}
+
+#ifndef DONT_USE_INTR
+
+/**
+ * NCR5380_intr - generic NCR5380 irq handler
+ * @irq: interrupt number
+ * @dev_id: device info
+ *
+ * Handle interrupts, reestablishing I_T_L or I_T_L_Q nexuses
+ * from the disconnected queue, and restarting NCR5380_main()
+ * as required.
+ *
+ * Locks: takes the needed instance locks
+ */
+
+static irqreturn_t NCR5380_intr(int dummy, void *dev_id)
+{
+ NCR5380_local_declare();
+ struct Scsi_Host *instance = dev_id;
+ struct NCR5380_hostdata *hostdata = (struct NCR5380_hostdata *) instance->hostdata;
+ int done;
+ unsigned char basr;
+ unsigned long flags;
+
+ dprintk(NDEBUG_INTR, "scsi : NCR5380 irq %d triggered\n",
+ instance->irq);
+
+ do {
+ done = 1;
+ spin_lock_irqsave(instance->host_lock, flags);
+ /* Look for pending interrupts */
+ NCR5380_setup(instance);
+ basr = NCR5380_read(BUS_AND_STATUS_REG);
+ /* XXX dispatch to appropriate routine if found and done=0 */
+ if (basr & BASR_IRQ) {
+ NCR5380_dprint(NDEBUG_INTR, instance);
+ if ((NCR5380_read(STATUS_REG) & (SR_SEL | SR_IO)) == (SR_SEL | SR_IO)) {
+ done = 0;
+ dprintk(NDEBUG_INTR, "scsi%d : SEL interrupt\n", instance->host_no);
+ NCR5380_reselect(instance);
+ (void) NCR5380_read(RESET_PARITY_INTERRUPT_REG);
+ } else if (basr & BASR_PARITY_ERROR) {
+ dprintk(NDEBUG_INTR, "scsi%d : PARITY interrupt\n", instance->host_no);
+ (void) NCR5380_read(RESET_PARITY_INTERRUPT_REG);
+ } else if ((NCR5380_read(STATUS_REG) & SR_RST) == SR_RST) {
+ dprintk(NDEBUG_INTR, "scsi%d : RESET interrupt\n", instance->host_no);
+ (void) NCR5380_read(RESET_PARITY_INTERRUPT_REG);
+ } else {
+#if defined(REAL_DMA)
+ /*
+ * We should only get PHASE MISMATCH and EOP interrupts
+ * if we have DMA enabled, so do a sanity check based on
+ * the current setting of the MODE register.
+ */
+
+ if ((NCR5380_read(MODE_REG) & MR_DMA) && ((basr & BASR_END_DMA_TRANSFER) || !(basr & BASR_PHASE_MATCH))) {
+ int transferred;
+
+ if (!hostdata->connected)
+ panic("scsi%d : received end of DMA interrupt with no connected cmd\n", instance->hostno);
+
+ transferred = (hostdata->dmalen - NCR5380_dma_residual(instance));
+ hostdata->connected->SCp.this_residual -= transferred;
+ hostdata->connected->SCp.ptr += transferred;
+ hostdata->dmalen = 0;
+
+ (void) NCR5380_read(RESET_PARITY_INTERRUPT_REG);
+
+ /* FIXME: we need to poll briefly then defer a workqueue task ! */
+ NCR5380_poll_politely(hostdata, BUS_AND_STATUS_REG, BASR_ACK, 0, 2*HZ);
+
+ NCR5380_write(MODE_REG, MR_BASE);
+ NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE);
+ }
+#else
+ dprintk(NDEBUG_INTR, "scsi : unknown interrupt, BASR 0x%X, MR 0x%X, SR 0x%x\n", basr, NCR5380_read(MODE_REG), NCR5380_read(STATUS_REG));
+ (void) NCR5380_read(RESET_PARITY_INTERRUPT_REG);
+#endif
+ }
+ } /* if BASR_IRQ */
+ spin_unlock_irqrestore(instance->host_lock, flags);
+ if(!done)
+ schedule_delayed_work(&hostdata->coroutine, 0);
+ } while (!done);
+ return IRQ_HANDLED;
+}
+
+#endif
+
+/*
+ * Function : int NCR5380_select(struct Scsi_Host *instance,
+ * struct scsi_cmnd *cmd)
+ *
+ * Purpose : establishes I_T_L or I_T_L_Q nexus for new or existing command,
+ * including ARBITRATION, SELECTION, and initial message out for
+ * IDENTIFY and queue messages.
+ *
+ * Inputs : instance - instantiation of the 5380 driver on which this
+ * target lives, cmd - SCSI command to execute.
+ *
+ * Returns : -1 if selection could not execute for some reason,
+ * 0 if selection succeeded or failed because the target
+ * did not respond.
+ *
+ * Side effects :
+ * If bus busy, arbitration failed, etc, NCR5380_select() will exit
+ * with registers as they should have been on entry - ie
+ * SELECT_ENABLE will be set appropriately, the NCR5380
+ * will cease to drive any SCSI bus signals.
+ *
+ * If successful : I_T_L or I_T_L_Q nexus will be established,
+ * instance->connected will be set to cmd.
+ * SELECT interrupt will be disabled.
+ *
+ * If failed (no target) : cmd->scsi_done() will be called, and the
+ * cmd->result host byte set to DID_BAD_TARGET.
+ *
+ * Locks: caller holds hostdata lock in IRQ mode
+ */
+
+static int NCR5380_select(struct Scsi_Host *instance, struct scsi_cmnd *cmd)
+{
+ NCR5380_local_declare();
+ struct NCR5380_hostdata *hostdata = (struct NCR5380_hostdata *) instance->hostdata;
+ unsigned char tmp[3], phase;
+ unsigned char *data;
+ int len;
+ unsigned long timeout;
+ unsigned char value;
+ int err;
+ NCR5380_setup(instance);
+
+ if (hostdata->selecting)
+ goto part2;
+
+ hostdata->restart_select = 0;
+
+ NCR5380_dprint(NDEBUG_ARBITRATION, instance);
+ dprintk(NDEBUG_ARBITRATION, "scsi%d : starting arbitration, id = %d\n", instance->host_no, instance->this_id);
+
+ /*
+ * Set the phase bits to 0, otherwise the NCR5380 won't drive the
+ * data bus during SELECTION.
+ */
+
+ NCR5380_write(TARGET_COMMAND_REG, 0);
+
+ /*
+ * Start arbitration.
+ */
+
+ NCR5380_write(OUTPUT_DATA_REG, hostdata->id_mask);
+ NCR5380_write(MODE_REG, MR_ARBITRATE);
+
+
+ /* We can be relaxed here, interrupts are on, we are
+ in workqueue context, the birds are singing in the trees */
+ spin_unlock_irq(instance->host_lock);
+ err = NCR5380_poll_politely(instance, INITIATOR_COMMAND_REG, ICR_ARBITRATION_PROGRESS, ICR_ARBITRATION_PROGRESS, 5*HZ);
+ spin_lock_irq(instance->host_lock);
+ if (err < 0) {
+ printk(KERN_DEBUG "scsi: arbitration timeout at %d\n", __LINE__);
+ NCR5380_write(MODE_REG, MR_BASE);
+ NCR5380_write(SELECT_ENABLE_REG, hostdata->id_mask);
+ goto failed;
+ }
+
+ dprintk(NDEBUG_ARBITRATION, "scsi%d : arbitration complete\n", instance->host_no);
+
+ /*
+ * The arbitration delay is 2.2us, but this is a minimum and there is
+ * no maximum so we can safely sleep for ceil(2.2) usecs to accommodate
+ * the integral nature of udelay().
+ *
+ */
+
+ udelay(3);
+
+ /* Check for lost arbitration */
+ if ((NCR5380_read(INITIATOR_COMMAND_REG) & ICR_ARBITRATION_LOST) || (NCR5380_read(CURRENT_SCSI_DATA_REG) & hostdata->id_higher_mask) || (NCR5380_read(INITIATOR_COMMAND_REG) & ICR_ARBITRATION_LOST)) {
+ NCR5380_write(MODE_REG, MR_BASE);
+ dprintk(NDEBUG_ARBITRATION, "scsi%d : lost arbitration, deasserting MR_ARBITRATE\n", instance->host_no);
+ goto failed;
+ }
+ NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE | ICR_ASSERT_SEL);
+
+ if (!(hostdata->flags & FLAG_DTC3181E) &&
+ /* RvC: DTC3181E has some trouble with this
+ * so we simply removed it. Seems to work with
+ * only Mustek scanner attached
+ */
+ (NCR5380_read(INITIATOR_COMMAND_REG) & ICR_ARBITRATION_LOST)) {
+ NCR5380_write(MODE_REG, MR_BASE);
+ NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE);
+ dprintk(NDEBUG_ARBITRATION, "scsi%d : lost arbitration, deasserting ICR_ASSERT_SEL\n", instance->host_no);
+ goto failed;
+ }
+ /*
+ * Again, bus clear + bus settle time is 1.2us, however, this is
+ * a minimum so we'll udelay ceil(1.2)
+ */
+
+ udelay(2);
+
+ dprintk(NDEBUG_ARBITRATION, "scsi%d : won arbitration\n", instance->host_no);
+
+ /*
+ * Now that we have won arbitration, start Selection process, asserting
+ * the host and target ID's on the SCSI bus.
+ */
+
+ NCR5380_write(OUTPUT_DATA_REG, (hostdata->id_mask | (1 << scmd_id(cmd))));
+
+ /*
+ * Raise ATN while SEL is true before BSY goes false from arbitration,
+ * since this is the only way to guarantee that we'll get a MESSAGE OUT
+ * phase immediately after selection.
+ */
+
+ NCR5380_write(INITIATOR_COMMAND_REG, (ICR_BASE | ICR_ASSERT_BSY | ICR_ASSERT_DATA | ICR_ASSERT_ATN | ICR_ASSERT_SEL));
+ NCR5380_write(MODE_REG, MR_BASE);
+
+ /*
+ * Reselect interrupts must be turned off prior to the dropping of BSY,
+ * otherwise we will trigger an interrupt.
+ */
+ NCR5380_write(SELECT_ENABLE_REG, 0);
+
+ /*
+ * The initiator shall then wait at least two deskew delays and release
+ * the BSY signal.
+ */
+ udelay(1); /* wingel -- wait two bus deskew delay >2*45ns */
+
+ /* Reset BSY */
+ NCR5380_write(INITIATOR_COMMAND_REG, (ICR_BASE | ICR_ASSERT_DATA | ICR_ASSERT_ATN | ICR_ASSERT_SEL));
+
+ /*
+ * Something weird happens when we cease to drive BSY - looks
+ * like the board/chip is letting us do another read before the
+ * appropriate propagation delay has expired, and we're confusing
+ * a BSY signal from ourselves as the target's response to SELECTION.
+ *
+ * A small delay (the 'C++' frontend breaks the pipeline with an
+ * unnecessary jump, making it work on my 386-33/Trantor T128, the
+ * tighter 'C' code breaks and requires this) solves the problem -
+ * the 1 us delay is arbitrary, and only used because this delay will
+ * be the same on other platforms and since it works here, it should
+ * work there.
+ *
+ * wingel suggests that this could be due to failing to wait
+ * one deskew delay.
+ */
+
+ udelay(1);
+
+ dprintk(NDEBUG_SELECTION, "scsi%d : selecting target %d\n", instance->host_no, scmd_id(cmd));
+
+ /*
+ * The SCSI specification calls for a 250 ms timeout for the actual
+ * selection.
+ */
+
+ timeout = jiffies + msecs_to_jiffies(250);
+
+ /*
+ * XXX very interesting - we're seeing a bounce where the BSY we
+ * asserted is being reflected / still asserted (propagation delay?)
+ * and it's detecting as true. Sigh.
+ */
+
+ hostdata->select_time = 0; /* we count the clock ticks at which we polled */
+ hostdata->selecting = cmd;
+
+part2:
+ /* RvC: here we enter after a sleeping period, or immediately after
+ execution of part 1
+ we poll only once ech clock tick */
+ value = NCR5380_read(STATUS_REG) & (SR_BSY | SR_IO);
+
+ if (!value && (hostdata->select_time < HZ/4)) {
+ /* RvC: we still must wait for a device response */
+ hostdata->select_time++; /* after 25 ticks the device has failed */
+ NCR5380_set_timer(hostdata, 1);
+ return 0; /* RvC: we return here with hostdata->selecting set,
+ to go to sleep */
+ }
+
+ hostdata->selecting = NULL;/* clear this pointer, because we passed the
+ waiting period */
+ if ((NCR5380_read(STATUS_REG) & (SR_SEL | SR_IO)) == (SR_SEL | SR_IO)) {
+ NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE);
+ NCR5380_reselect(instance);
+ printk("scsi%d : reselection after won arbitration?\n", instance->host_no);
+ NCR5380_write(SELECT_ENABLE_REG, hostdata->id_mask);
+ return -1;
+ }
+ /*
+ * No less than two deskew delays after the initiator detects the
+ * BSY signal is true, it shall release the SEL signal and may
+ * change the DATA BUS. -wingel
+ */
+
+ udelay(1);
+
+ NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE | ICR_ASSERT_ATN);
+
+ if (!(NCR5380_read(STATUS_REG) & SR_BSY)) {
+ NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE);
+ if (hostdata->targets_present & (1 << scmd_id(cmd))) {
+ printk(KERN_DEBUG "scsi%d : weirdness\n", instance->host_no);
+ if (hostdata->restart_select)
+ printk(KERN_DEBUG "\trestart select\n");
+ NCR5380_dprint(NDEBUG_SELECTION, instance);
+ NCR5380_write(SELECT_ENABLE_REG, hostdata->id_mask);
+ return -1;
+ }
+ cmd->result = DID_BAD_TARGET << 16;
+ cmd->scsi_done(cmd);
+ NCR5380_write(SELECT_ENABLE_REG, hostdata->id_mask);
+ dprintk(NDEBUG_SELECTION, "scsi%d : target did not respond within 250ms\n", instance->host_no);
+ NCR5380_write(SELECT_ENABLE_REG, hostdata->id_mask);
+ return 0;
+ }
+ hostdata->targets_present |= (1 << scmd_id(cmd));
+
+ /*
+ * Since we followed the SCSI spec, and raised ATN while SEL
+ * was true but before BSY was false during selection, the information
+ * transfer phase should be a MESSAGE OUT phase so that we can send the
+ * IDENTIFY message.
+ *
+ * If SCSI-II tagged queuing is enabled, we also send a SIMPLE_QUEUE_TAG
+ * message (2 bytes) with a tag ID that we increment with every command
+ * until it wraps back to 0.
+ *
+ * XXX - it turns out that there are some broken SCSI-II devices,
+ * which claim to support tagged queuing but fail when more than
+ * some number of commands are issued at once.
+ */
+
+ /* Wait for start of REQ/ACK handshake */
+
+ spin_unlock_irq(instance->host_lock);
+ err = NCR5380_poll_politely(instance, STATUS_REG, SR_REQ, SR_REQ, HZ);
+ spin_lock_irq(instance->host_lock);
+
+ if(err) {
+ printk(KERN_ERR "scsi%d: timeout at NCR5380.c:%d\n", instance->host_no, __LINE__);
+ NCR5380_write(SELECT_ENABLE_REG, hostdata->id_mask);
+ goto failed;
+ }
+
+ dprintk(NDEBUG_SELECTION, "scsi%d : target %d selected, going into MESSAGE OUT phase.\n", instance->host_no, cmd->device->id);
+ tmp[0] = IDENTIFY(((instance->irq == NO_IRQ) ? 0 : 1), cmd->device->lun);
+
+ len = 1;
+ cmd->tag = 0;
+
+ /* Send message(s) */
+ data = tmp;
+ phase = PHASE_MSGOUT;
+ NCR5380_transfer_pio(instance, &phase, &len, &data);
+ dprintk(NDEBUG_SELECTION, "scsi%d : nexus established.\n", instance->host_no);
+ /* XXX need to handle errors here */
+ hostdata->connected = cmd;
+ hostdata->busy[cmd->device->id] |= (1 << (cmd->device->lun & 0xFF));
+
+ initialize_SCp(cmd);
+
+ return 0;
+
+ /* Selection failed */
+failed:
+ return -1;
+
+}
+
+/*
+ * Function : int NCR5380_transfer_pio (struct Scsi_Host *instance,
+ * unsigned char *phase, int *count, unsigned char **data)
+ *
+ * Purpose : transfers data in given phase using polled I/O
+ *
+ * Inputs : instance - instance of driver, *phase - pointer to
+ * what phase is expected, *count - pointer to number of
+ * bytes to transfer, **data - pointer to data pointer.
+ *
+ * Returns : -1 when different phase is entered without transferring
+ * maximum number of bytes, 0 if all bytes or transferred or exit
+ * is in same phase.
+ *
+ * Also, *phase, *count, *data are modified in place.
+ *
+ * XXX Note : handling for bus free may be useful.
+ */
+
+/*
+ * Note : this code is not as quick as it could be, however it
+ * IS 100% reliable, and for the actual data transfer where speed
+ * counts, we will always do a pseudo DMA or DMA transfer.
+ */
+
+static int NCR5380_transfer_pio(struct Scsi_Host *instance, unsigned char *phase, int *count, unsigned char **data) {
+ NCR5380_local_declare();
+ unsigned char p = *phase, tmp;
+ int c = *count;
+ unsigned char *d = *data;
+ /*
+ * RvC: some administrative data to process polling time
+ */
+ int break_allowed = 0;
+ struct NCR5380_hostdata *hostdata = (struct NCR5380_hostdata *) instance->hostdata;
+ NCR5380_setup(instance);
+
+ if (!(p & SR_IO))
+ dprintk(NDEBUG_PIO, "scsi%d : pio write %d bytes\n", instance->host_no, c);
+ else
+ dprintk(NDEBUG_PIO, "scsi%d : pio read %d bytes\n", instance->host_no, c);
+
+ /*
+ * The NCR5380 chip will only drive the SCSI bus when the
+ * phase specified in the appropriate bits of the TARGET COMMAND
+ * REGISTER match the STATUS REGISTER
+ */
+
+ NCR5380_write(TARGET_COMMAND_REG, PHASE_SR_TO_TCR(p));
+
+ /* RvC: don't know if this is necessary, but other SCSI I/O is short
+ * so breaks are not necessary there
+ */
+ if ((p == PHASE_DATAIN) || (p == PHASE_DATAOUT)) {
+ break_allowed = 1;
+ }
+ do {
+ /*
+ * Wait for assertion of REQ, after which the phase bits will be
+ * valid
+ */
+
+ /* RvC: we simply poll once, after that we stop temporarily
+ * and let the device buffer fill up
+ * if breaking is not allowed, we keep polling as long as needed
+ */
+
+ /* FIXME */
+ while (!((tmp = NCR5380_read(STATUS_REG)) & SR_REQ) && !break_allowed);
+ if (!(tmp & SR_REQ)) {
+ /* timeout condition */
+ NCR5380_set_timer(hostdata, USLEEP_SLEEP);
+ break;
+ }
+
+ dprintk(NDEBUG_HANDSHAKE, "scsi%d : REQ detected\n", instance->host_no);
+
+ /* Check for phase mismatch */
+ if ((tmp & PHASE_MASK) != p) {
+ dprintk(NDEBUG_HANDSHAKE, "scsi%d : phase mismatch\n", instance->host_no);
+ NCR5380_dprint_phase(NDEBUG_HANDSHAKE, instance);
+ break;
+ }
+ /* Do actual transfer from SCSI bus to / from memory */
+ if (!(p & SR_IO))
+ NCR5380_write(OUTPUT_DATA_REG, *d);
+ else
+ *d = NCR5380_read(CURRENT_SCSI_DATA_REG);
+
+ ++d;
+
+ /*
+ * The SCSI standard suggests that in MSGOUT phase, the initiator
+ * should drop ATN on the last byte of the message phase
+ * after REQ has been asserted for the handshake but before
+ * the initiator raises ACK.
+ */
+
+ if (!(p & SR_IO)) {
+ if (!((p & SR_MSG) && c > 1)) {
+ NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE | ICR_ASSERT_DATA);
+ NCR5380_dprint(NDEBUG_PIO, instance);
+ NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE | ICR_ASSERT_DATA | ICR_ASSERT_ACK);
+ } else {
+ NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE | ICR_ASSERT_DATA | ICR_ASSERT_ATN);
+ NCR5380_dprint(NDEBUG_PIO, instance);
+ NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE | ICR_ASSERT_DATA | ICR_ASSERT_ATN | ICR_ASSERT_ACK);
+ }
+ } else {
+ NCR5380_dprint(NDEBUG_PIO, instance);
+ NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE | ICR_ASSERT_ACK);
+ }
+
+ /* FIXME - if this fails bus reset ?? */
+ NCR5380_poll_politely(instance, STATUS_REG, SR_REQ, 0, 5*HZ);
+ dprintk(NDEBUG_HANDSHAKE, "scsi%d : req false, handshake complete\n", instance->host_no);
+
+/*
+ * We have several special cases to consider during REQ/ACK handshaking :
+ * 1. We were in MSGOUT phase, and we are on the last byte of the
+ * message. ATN must be dropped as ACK is dropped.
+ *
+ * 2. We are in a MSGIN phase, and we are on the last byte of the
+ * message. We must exit with ACK asserted, so that the calling
+ * code may raise ATN before dropping ACK to reject the message.
+ *
+ * 3. ACK and ATN are clear and the target may proceed as normal.
+ */
+ if (!(p == PHASE_MSGIN && c == 1)) {
+ if (p == PHASE_MSGOUT && c > 1)
+ NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE | ICR_ASSERT_ATN);
+ else
+ NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE);
+ }
+ } while (--c);
+
+ dprintk(NDEBUG_PIO, "scsi%d : residual %d\n", instance->host_no, c);
+
+ *count = c;
+ *data = d;
+ tmp = NCR5380_read(STATUS_REG);
+ if (tmp & SR_REQ)
+ *phase = tmp & PHASE_MASK;
+ else
+ *phase = PHASE_UNKNOWN;
+
+ if (!c || (*phase == p))
+ return 0;
+ else
+ return -1;
+}
+
+/**
+ * do_reset - issue a reset command
+ * @host: adapter to reset
+ *
+ * Issue a reset sequence to the NCR5380 and try and get the bus
+ * back into sane shape.
+ *
+ * Locks: caller holds queue lock
+ */
+
+static void do_reset(struct Scsi_Host *host) {
+ NCR5380_local_declare();
+ NCR5380_setup(host);
+
+ NCR5380_write(TARGET_COMMAND_REG, PHASE_SR_TO_TCR(NCR5380_read(STATUS_REG) & PHASE_MASK));
+ NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE | ICR_ASSERT_RST);
+ udelay(25);
+ NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE);
+}
+
+/*
+ * Function : do_abort (Scsi_Host *host)
+ *
+ * Purpose : abort the currently established nexus. Should only be
+ * called from a routine which can drop into a
+ *
+ * Returns : 0 on success, -1 on failure.
+ *
+ * Locks: queue lock held by caller
+ * FIXME: sort this out and get new_eh running
+ */
+
+static int do_abort(struct Scsi_Host *host) {
+ NCR5380_local_declare();
+ unsigned char *msgptr, phase, tmp;
+ int len;
+ int rc;
+ NCR5380_setup(host);
+
+
+ /* Request message out phase */
+ NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE | ICR_ASSERT_ATN);
+
+ /*
+ * Wait for the target to indicate a valid phase by asserting
+ * REQ. Once this happens, we'll have either a MSGOUT phase
+ * and can immediately send the ABORT message, or we'll have some
+ * other phase and will have to source/sink data.
+ *
+ * We really don't care what value was on the bus or what value
+ * the target sees, so we just handshake.
+ */
+
+ rc = NCR5380_poll_politely(host, STATUS_REG, SR_REQ, SR_REQ, 60 * HZ);
+
+ if(rc < 0)
+ return -1;
+
+ tmp = (unsigned char)rc;
+
+ NCR5380_write(TARGET_COMMAND_REG, PHASE_SR_TO_TCR(tmp));
+
+ if ((tmp & PHASE_MASK) != PHASE_MSGOUT) {
+ NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE | ICR_ASSERT_ATN | ICR_ASSERT_ACK);
+ rc = NCR5380_poll_politely(host, STATUS_REG, SR_REQ, 0, 3*HZ);
+ NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE | ICR_ASSERT_ATN);
+ if(rc == -1)
+ return -1;
+ }
+ tmp = ABORT;
+ msgptr = &tmp;
+ len = 1;
+ phase = PHASE_MSGOUT;
+ NCR5380_transfer_pio(host, &phase, &len, &msgptr);
+
+ /*
+ * If we got here, and the command completed successfully,
+ * we're about to go into bus free state.
+ */
+
+ return len ? -1 : 0;
+}
+
+#if defined(REAL_DMA) || defined(PSEUDO_DMA) || defined (REAL_DMA_POLL)
+/*
+ * Function : int NCR5380_transfer_dma (struct Scsi_Host *instance,
+ * unsigned char *phase, int *count, unsigned char **data)
+ *
+ * Purpose : transfers data in given phase using either real
+ * or pseudo DMA.
+ *
+ * Inputs : instance - instance of driver, *phase - pointer to
+ * what phase is expected, *count - pointer to number of
+ * bytes to transfer, **data - pointer to data pointer.
+ *
+ * Returns : -1 when different phase is entered without transferring
+ * maximum number of bytes, 0 if all bytes or transferred or exit
+ * is in same phase.
+ *
+ * Also, *phase, *count, *data are modified in place.
+ *
+ * Locks: io_request lock held by caller
+ */
+
+
+static int NCR5380_transfer_dma(struct Scsi_Host *instance, unsigned char *phase, int *count, unsigned char **data) {
+ NCR5380_local_declare();
+ register int c = *count;
+ register unsigned char p = *phase;
+ register unsigned char *d = *data;
+ unsigned char tmp;
+ int foo;
+#if defined(REAL_DMA_POLL)
+ int cnt, toPIO;
+ unsigned char saved_data = 0, overrun = 0, residue;
+#endif
+
+ struct NCR5380_hostdata *hostdata = (struct NCR5380_hostdata *) instance->hostdata;
+
+ NCR5380_setup(instance);
+
+ if ((tmp = (NCR5380_read(STATUS_REG) & PHASE_MASK)) != p) {
+ *phase = tmp;
+ return -1;
+ }
+#if defined(REAL_DMA) || defined(REAL_DMA_POLL)
+#ifdef READ_OVERRUNS
+ if (p & SR_IO) {
+ c -= 2;
+ }
+#endif
+ dprintk(NDEBUG_DMA, "scsi%d : initializing DMA channel %d for %s, %d bytes %s %0x\n", instance->host_no, instance->dma_channel, (p & SR_IO) ? "reading" : "writing", c, (p & SR_IO) ? "to" : "from", (unsigned) d);
+ hostdata->dma_len = (p & SR_IO) ? NCR5380_dma_read_setup(instance, d, c) : NCR5380_dma_write_setup(instance, d, c);
+#endif
+
+ NCR5380_write(TARGET_COMMAND_REG, PHASE_SR_TO_TCR(p));
+
+#ifdef REAL_DMA
+ NCR5380_write(MODE_REG, MR_BASE | MR_DMA_MODE | MR_ENABLE_EOP_INTR | MR_MONITOR_BSY);
+#elif defined(REAL_DMA_POLL)
+ NCR5380_write(MODE_REG, MR_BASE | MR_DMA_MODE);
+#else
+ /*
+ * Note : on my sample board, watch-dog timeouts occurred when interrupts
+ * were not disabled for the duration of a single DMA transfer, from
+ * before the setting of DMA mode to after transfer of the last byte.
+ */
+
+#if defined(PSEUDO_DMA) && defined(UNSAFE)
+ spin_unlock_irq(instance->host_lock);
+#endif
+ /* KLL May need eop and parity in 53c400 */
+ if (hostdata->flags & FLAG_NCR53C400)
+ NCR5380_write(MODE_REG, MR_BASE | MR_DMA_MODE |
+ MR_ENABLE_PAR_CHECK | MR_ENABLE_PAR_INTR |
+ MR_ENABLE_EOP_INTR | MR_MONITOR_BSY);
+ else
+ NCR5380_write(MODE_REG, MR_BASE | MR_DMA_MODE);
+#endif /* def REAL_DMA */
+
+ dprintk(NDEBUG_DMA, "scsi%d : mode reg = 0x%X\n", instance->host_no, NCR5380_read(MODE_REG));
+
+ /*
+ * On the PAS16 at least I/O recovery delays are not needed here.
+ * Everyone else seems to want them.
+ */
+
+ if (p & SR_IO) {
+ io_recovery_delay(1);
+ NCR5380_write(START_DMA_INITIATOR_RECEIVE_REG, 0);
+ } else {
+ io_recovery_delay(1);
+ NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE | ICR_ASSERT_DATA);
+ io_recovery_delay(1);
+ NCR5380_write(START_DMA_SEND_REG, 0);
+ io_recovery_delay(1);
+ }
+
+#if defined(REAL_DMA_POLL)
+ do {
+ tmp = NCR5380_read(BUS_AND_STATUS_REG);
+ } while ((tmp & BASR_PHASE_MATCH) && !(tmp & (BASR_BUSY_ERROR | BASR_END_DMA_TRANSFER)));
+
+/*
+ At this point, either we've completed DMA, or we have a phase mismatch,
+ or we've unexpectedly lost BUSY (which is a real error).
+
+ For write DMAs, we want to wait until the last byte has been
+ transferred out over the bus before we turn off DMA mode. Alas, there
+ seems to be no terribly good way of doing this on a 5380 under all
+ conditions. For non-scatter-gather operations, we can wait until REQ
+ and ACK both go false, or until a phase mismatch occurs. Gather-writes
+ are nastier, since the device will be expecting more data than we
+ are prepared to send it, and REQ will remain asserted. On a 53C8[01] we
+ could test LAST BIT SENT to assure transfer (I imagine this is precisely
+ why this signal was added to the newer chips) but on the older 538[01]
+ this signal does not exist. The workaround for this lack is a watchdog;
+ we bail out of the wait-loop after a modest amount of wait-time if
+ the usual exit conditions are not met. Not a terribly clean or
+ correct solution :-%
+
+ Reads are equally tricky due to a nasty characteristic of the NCR5380.
+ If the chip is in DMA mode for an READ, it will respond to a target's
+ REQ by latching the SCSI data into the INPUT DATA register and asserting
+ ACK, even if it has _already_ been notified by the DMA controller that
+ the current DMA transfer has completed! If the NCR5380 is then taken
+ out of DMA mode, this already-acknowledged byte is lost.
+
+ This is not a problem for "one DMA transfer per command" reads, because
+ the situation will never arise... either all of the data is DMA'ed
+ properly, or the target switches to MESSAGE IN phase to signal a
+ disconnection (either operation bringing the DMA to a clean halt).
+ However, in order to handle scatter-reads, we must work around the
+ problem. The chosen fix is to DMA N-2 bytes, then check for the
+ condition before taking the NCR5380 out of DMA mode. One or two extra
+ bytes are transferred via PIO as necessary to fill out the original
+ request.
+ */
+
+ if (p & SR_IO) {
+#ifdef READ_OVERRUNS
+ udelay(10);
+ if (((NCR5380_read(BUS_AND_STATUS_REG) & (BASR_PHASE_MATCH | BASR_ACK)) == (BASR_PHASE_MATCH | BASR_ACK))) {
+ saved_data = NCR5380_read(INPUT_DATA_REGISTER);
+ overrun = 1;
+ }
+#endif
+ } else {
+ int limit = 100;
+ while (((tmp = NCR5380_read(BUS_AND_STATUS_REG)) & BASR_ACK) || (NCR5380_read(STATUS_REG) & SR_REQ)) {
+ if (!(tmp & BASR_PHASE_MATCH))
+ break;
+ if (--limit < 0)
+ break;
+ }
+ }
+
+ dprintk(NDEBUG_DMA, "scsi%d : polled DMA transfer complete, basr 0x%X, sr 0x%X\n", instance->host_no, tmp, NCR5380_read(STATUS_REG));
+
+ NCR5380_write(MODE_REG, MR_BASE);
+ NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE);
+
+ residue = NCR5380_dma_residual(instance);
+ c -= residue;
+ *count -= c;
+ *data += c;
+ *phase = NCR5380_read(STATUS_REG) & PHASE_MASK;
+
+#ifdef READ_OVERRUNS
+ if (*phase == p && (p & SR_IO) && residue == 0) {
+ if (overrun) {
+ dprintk(NDEBUG_DMA, "Got an input overrun, using saved byte\n");
+ **data = saved_data;
+ *data += 1;
+ *count -= 1;
+ cnt = toPIO = 1;
+ } else {
+ printk("No overrun??\n");
+ cnt = toPIO = 2;
+ }
+ dprintk(NDEBUG_DMA, "Doing %d-byte PIO to 0x%X\n", cnt, *data);
+ NCR5380_transfer_pio(instance, phase, &cnt, data);
+ *count -= toPIO - cnt;
+ }
+#endif
+
+ dprintk(NDEBUG_DMA, "Return with data ptr = 0x%X, count %d, last 0x%X, next 0x%X\n", *data, *count, *(*data + *count - 1), *(*data + *count));
+ return 0;
+
+#elif defined(REAL_DMA)
+ return 0;
+#else /* defined(REAL_DMA_POLL) */
+ if (p & SR_IO) {
+#ifdef DMA_WORKS_RIGHT
+ foo = NCR5380_pread(instance, d, c);
+#else
+ int diff = 1;
+ if (hostdata->flags & FLAG_NCR53C400) {
+ diff = 0;
+ }
+ if (!(foo = NCR5380_pread(instance, d, c - diff))) {
+ /*
+ * We can't disable DMA mode after successfully transferring
+ * what we plan to be the last byte, since that would open up
+ * a race condition where if the target asserted REQ before
+ * we got the DMA mode reset, the NCR5380 would have latched
+ * an additional byte into the INPUT DATA register and we'd
+ * have dropped it.
+ *
+ * The workaround was to transfer one fewer bytes than we
+ * intended to with the pseudo-DMA read function, wait for
+ * the chip to latch the last byte, read it, and then disable
+ * pseudo-DMA mode.
+ *
+ * After REQ is asserted, the NCR5380 asserts DRQ and ACK.
+ * REQ is deasserted when ACK is asserted, and not reasserted
+ * until ACK goes false. Since the NCR5380 won't lower ACK
+ * until DACK is asserted, which won't happen unless we twiddle
+ * the DMA port or we take the NCR5380 out of DMA mode, we
+ * can guarantee that we won't handshake another extra
+ * byte.
+ */
+
+ if (!(hostdata->flags & FLAG_NCR53C400)) {
+ while (!(NCR5380_read(BUS_AND_STATUS_REG) & BASR_DRQ));
+ /* Wait for clean handshake */
+ while (NCR5380_read(STATUS_REG) & SR_REQ);
+ d[c - 1] = NCR5380_read(INPUT_DATA_REG);
+ }
+ }
+#endif
+ } else {
+#ifdef DMA_WORKS_RIGHT
+ foo = NCR5380_pwrite(instance, d, c);
+#else
+ int timeout;
+ dprintk(NDEBUG_C400_PWRITE, "About to pwrite %d bytes\n", c);
+ if (!(foo = NCR5380_pwrite(instance, d, c))) {
+ /*
+ * Wait for the last byte to be sent. If REQ is being asserted for
+ * the byte we're interested, we'll ACK it and it will go false.
+ */
+ if (!(hostdata->flags & FLAG_HAS_LAST_BYTE_SENT)) {
+ timeout = 20000;
+ while (!(NCR5380_read(BUS_AND_STATUS_REG) & BASR_DRQ) && (NCR5380_read(BUS_AND_STATUS_REG) & BASR_PHASE_MATCH));
+
+ if (!timeout)
+ dprintk(NDEBUG_LAST_BYTE_SENT, "scsi%d : timed out on last byte\n", instance->host_no);
+
+ if (hostdata->flags & FLAG_CHECK_LAST_BYTE_SENT) {
+ hostdata->flags &= ~FLAG_CHECK_LAST_BYTE_SENT;
+ if (NCR5380_read(TARGET_COMMAND_REG) & TCR_LAST_BYTE_SENT) {
+ hostdata->flags |= FLAG_HAS_LAST_BYTE_SENT;
+ dprintk(NDEBUG_LAST_BYTE_SENT, "scsi%d : last byte sent works\n", instance->host_no);
+ }
+ }
+ } else {
+ dprintk(NDEBUG_C400_PWRITE, "Waiting for LASTBYTE\n");
+ while (!(NCR5380_read(TARGET_COMMAND_REG) & TCR_LAST_BYTE_SENT));
+ dprintk(NDEBUG_C400_PWRITE, "Got LASTBYTE\n");
+ }
+ }
+#endif
+ }
+ NCR5380_write(MODE_REG, MR_BASE);
+ NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE);
+
+ if ((!(p & SR_IO)) && (hostdata->flags & FLAG_NCR53C400)) {
+ dprintk(NDEBUG_C400_PWRITE, "53C400w: Checking for IRQ\n");
+ if (NCR5380_read(BUS_AND_STATUS_REG) & BASR_IRQ) {
+ dprintk(NDEBUG_C400_PWRITE, "53C400w: got it, reading reset interrupt reg\n");
+ NCR5380_read(RESET_PARITY_INTERRUPT_REG);
+ } else {
+ printk("53C400w: IRQ NOT THERE!\n");
+ }
+ }
+ *data = d + c;
+ *count = 0;
+ *phase = NCR5380_read(STATUS_REG) & PHASE_MASK;
+#if defined(PSEUDO_DMA) && defined(UNSAFE)
+ spin_lock_irq(instance->host_lock);
+#endif /* defined(REAL_DMA_POLL) */
+ return foo;
+#endif /* def REAL_DMA */
+}
+#endif /* defined(REAL_DMA) | defined(PSEUDO_DMA) */
+
+/*
+ * Function : NCR5380_information_transfer (struct Scsi_Host *instance)
+ *
+ * Purpose : run through the various SCSI phases and do as the target
+ * directs us to. Operates on the currently connected command,
+ * instance->connected.
+ *
+ * Inputs : instance, instance for which we are doing commands
+ *
+ * Side effects : SCSI things happen, the disconnected queue will be
+ * modified if a command disconnects, *instance->connected will
+ * change.
+ *
+ * XXX Note : we need to watch for bus free or a reset condition here
+ * to recover from an unexpected bus free condition.
+ *
+ * Locks: io_request_lock held by caller in IRQ mode
+ */
+
+static void NCR5380_information_transfer(struct Scsi_Host *instance) {
+ NCR5380_local_declare();
+ struct NCR5380_hostdata *hostdata = (struct NCR5380_hostdata *)instance->hostdata;
+ unsigned char msgout = NOP;
+ int sink = 0;
+ int len;
+#if defined(PSEUDO_DMA) || defined(REAL_DMA_POLL)
+ int transfersize;
+#endif
+ unsigned char *data;
+ unsigned char phase, tmp, extended_msg[10], old_phase = 0xff;
+ struct scsi_cmnd *cmd = (struct scsi_cmnd *) hostdata->connected;
+ /* RvC: we need to set the end of the polling time */
+ unsigned long poll_time = jiffies + USLEEP_POLL;
+
+ NCR5380_setup(instance);
+
+ while (1) {
+ tmp = NCR5380_read(STATUS_REG);
+ /* We only have a valid SCSI phase when REQ is asserted */
+ if (tmp & SR_REQ) {
+ phase = (tmp & PHASE_MASK);
+ if (phase != old_phase) {
+ old_phase = phase;
+ NCR5380_dprint_phase(NDEBUG_INFORMATION, instance);
+ }
+ if (sink && (phase != PHASE_MSGOUT)) {
+ NCR5380_write(TARGET_COMMAND_REG, PHASE_SR_TO_TCR(tmp));
+
+ NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE | ICR_ASSERT_ATN | ICR_ASSERT_ACK);
+ while (NCR5380_read(STATUS_REG) & SR_REQ);
+ NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE | ICR_ASSERT_ATN);
+ sink = 0;
+ continue;
+ }
+ switch (phase) {
+ case PHASE_DATAIN:
+ case PHASE_DATAOUT:
+#if (NDEBUG & NDEBUG_NO_DATAOUT)
+ printk("scsi%d : NDEBUG_NO_DATAOUT set, attempted DATAOUT aborted\n", instance->host_no);
+ sink = 1;
+ do_abort(instance);
+ cmd->result = DID_ERROR << 16;
+ cmd->scsi_done(cmd);
+ return;
+#endif
+ /*
+ * If there is no room left in the current buffer in the
+ * scatter-gather list, move onto the next one.
+ */
+
+ if (!cmd->SCp.this_residual && cmd->SCp.buffers_residual) {
+ ++cmd->SCp.buffer;
+ --cmd->SCp.buffers_residual;
+ cmd->SCp.this_residual = cmd->SCp.buffer->length;
+ cmd->SCp.ptr = sg_virt(cmd->SCp.buffer);
+ dprintk(NDEBUG_INFORMATION, "scsi%d : %d bytes and %d buffers left\n", instance->host_no, cmd->SCp.this_residual, cmd->SCp.buffers_residual);
+ }
+ /*
+ * The preferred transfer method is going to be
+ * PSEUDO-DMA for systems that are strictly PIO,
+ * since we can let the hardware do the handshaking.
+ *
+ * For this to work, we need to know the transfersize
+ * ahead of time, since the pseudo-DMA code will sit
+ * in an unconditional loop.
+ */
+
+#if defined(PSEUDO_DMA) || defined(REAL_DMA_POLL)
+ /* KLL
+ * PSEUDO_DMA is defined here. If this is the g_NCR5380
+ * driver then it will always be defined, so the
+ * FLAG_NO_PSEUDO_DMA is used to inhibit PDMA in the base
+ * NCR5380 case. I think this is a fairly clean solution.
+ * We supplement these 2 if's with the flag.
+ */
+#ifdef NCR5380_dma_xfer_len
+ if (!cmd->device->borken && !(hostdata->flags & FLAG_NO_PSEUDO_DMA) && (transfersize = NCR5380_dma_xfer_len(instance, cmd)) != 0) {
+#else
+ transfersize = cmd->transfersize;
+
+#ifdef LIMIT_TRANSFERSIZE /* If we have problems with interrupt service */
+ if (transfersize > 512)
+ transfersize = 512;
+#endif /* LIMIT_TRANSFERSIZE */
+
+ if (!cmd->device->borken && transfersize && !(hostdata->flags & FLAG_NO_PSEUDO_DMA) && cmd->SCp.this_residual && !(cmd->SCp.this_residual % transfersize)) {
+ /* Limit transfers to 32K, for xx400 & xx406
+ * pseudoDMA that transfers in 128 bytes blocks. */
+ if (transfersize > 32 * 1024)
+ transfersize = 32 * 1024;
+#endif
+ len = transfersize;
+ if (NCR5380_transfer_dma(instance, &phase, &len, (unsigned char **) &cmd->SCp.ptr)) {
+ /*
+ * If the watchdog timer fires, all future accesses to this
+ * device will use the polled-IO.
+ */
+ scmd_printk(KERN_INFO, cmd,
+ "switching to slow handshake\n");
+ cmd->device->borken = 1;
+ NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE | ICR_ASSERT_ATN);
+ sink = 1;
+ do_abort(instance);
+ cmd->result = DID_ERROR << 16;
+ cmd->scsi_done(cmd);
+ /* XXX - need to source or sink data here, as appropriate */
+ } else
+ cmd->SCp.this_residual -= transfersize - len;
+ } else
+#endif /* defined(PSEUDO_DMA) || defined(REAL_DMA_POLL) */
+ NCR5380_transfer_pio(instance, &phase, (int *) &cmd->SCp.this_residual, (unsigned char **)
+ &cmd->SCp.ptr);
+ break;
+ case PHASE_MSGIN:
+ len = 1;
+ data = &tmp;
+ NCR5380_transfer_pio(instance, &phase, &len, &data);
+ cmd->SCp.Message = tmp;
+
+ switch (tmp) {
+ /*
+ * Linking lets us reduce the time required to get the
+ * next command out to the device, hopefully this will
+ * mean we don't waste another revolution due to the delays
+ * required by ARBITRATION and another SELECTION.
+ *
+ * In the current implementation proposal, low level drivers
+ * merely have to start the next command, pointed to by
+ * next_link, done() is called as with unlinked commands.
+ */
+#ifdef LINKED
+ case LINKED_CMD_COMPLETE:
+ case LINKED_FLG_CMD_COMPLETE:
+ /* Accept message by clearing ACK */
+ NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE);
+ dprintk(NDEBUG_LINKED, "scsi%d : target %d lun %llu linked command complete.\n", instance->host_no, cmd->device->id, cmd->device->lun);
+ /*
+ * Sanity check : A linked command should only terminate with
+ * one of these messages if there are more linked commands
+ * available.
+ */
+ if (!cmd->next_link) {
+ printk("scsi%d : target %d lun %llu linked command complete, no next_link\n" instance->host_no, cmd->device->id, cmd->device->lun);
+ sink = 1;
+ do_abort(instance);
+ return;
+ }
+ initialize_SCp(cmd->next_link);
+ /* The next command is still part of this process */
+ cmd->next_link->tag = cmd->tag;
+ cmd->result = cmd->SCp.Status | (cmd->SCp.Message << 8);
+ dprintk(NDEBUG_LINKED, "scsi%d : target %d lun %llu linked request done, calling scsi_done().\n", instance->host_no, cmd->device->id, cmd->device->lun);
+ cmd->scsi_done(cmd);
+ cmd = hostdata->connected;
+ break;
+#endif /* def LINKED */
+ case ABORT:
+ case COMMAND_COMPLETE:
+ /* Accept message by clearing ACK */
+ sink = 1;
+ NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE);
+ hostdata->connected = NULL;
+ dprintk(NDEBUG_QUEUES, "scsi%d : command for target %d, lun %llu completed\n", instance->host_no, cmd->device->id, cmd->device->lun);
+ hostdata->busy[cmd->device->id] &= ~(1 << (cmd->device->lun & 0xFF));
+
+ /*
+ * I'm not sure what the correct thing to do here is :
+ *
+ * If the command that just executed is NOT a request
+ * sense, the obvious thing to do is to set the result
+ * code to the values of the stored parameters.
+ *
+ * If it was a REQUEST SENSE command, we need some way
+ * to differentiate between the failure code of the original
+ * and the failure code of the REQUEST sense - the obvious
+ * case is success, where we fall through and leave the result
+ * code unchanged.
+ *
+ * The non-obvious place is where the REQUEST SENSE failed
+ */
+
+ if (cmd->cmnd[0] != REQUEST_SENSE)
+ cmd->result = cmd->SCp.Status | (cmd->SCp.Message << 8);
+ else if (status_byte(cmd->SCp.Status) != GOOD)
+ cmd->result = (cmd->result & 0x00ffff) | (DID_ERROR << 16);
+
+ if ((cmd->cmnd[0] == REQUEST_SENSE) &&
+ hostdata->ses.cmd_len) {
+ scsi_eh_restore_cmnd(cmd, &hostdata->ses);
+ hostdata->ses.cmd_len = 0 ;
+ }
+
+ if ((cmd->cmnd[0] != REQUEST_SENSE) && (status_byte(cmd->SCp.Status) == CHECK_CONDITION)) {
+ scsi_eh_prep_cmnd(cmd, &hostdata->ses, NULL, 0, ~0);
+
+ dprintk(NDEBUG_AUTOSENSE, "scsi%d : performing request sense\n", instance->host_no);
+
+ LIST(cmd, hostdata->issue_queue);
+ cmd->host_scribble = (unsigned char *)
+ hostdata->issue_queue;
+ hostdata->issue_queue = (struct scsi_cmnd *) cmd;
+ dprintk(NDEBUG_QUEUES, "scsi%d : REQUEST SENSE added to head of issue queue\n", instance->host_no);
+ } else {
+ cmd->scsi_done(cmd);
+ }
+
+ NCR5380_write(SELECT_ENABLE_REG, hostdata->id_mask);
+ /*
+ * Restore phase bits to 0 so an interrupted selection,
+ * arbitration can resume.
+ */
+ NCR5380_write(TARGET_COMMAND_REG, 0);
+
+ while ((NCR5380_read(STATUS_REG) & SR_BSY) && !hostdata->connected)
+ barrier();
+ return;
+ case MESSAGE_REJECT:
+ /* Accept message by clearing ACK */
+ NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE);
+ switch (hostdata->last_message) {
+ case HEAD_OF_QUEUE_TAG:
+ case ORDERED_QUEUE_TAG:
+ case SIMPLE_QUEUE_TAG:
+ cmd->device->simple_tags = 0;
+ hostdata->busy[cmd->device->id] |= (1 << (cmd->device->lun & 0xFF));
+ break;
+ default:
+ break;
+ }
+ case DISCONNECT:{
+ /* Accept message by clearing ACK */
+ NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE);
+ cmd->device->disconnect = 1;
+ LIST(cmd, hostdata->disconnected_queue);
+ cmd->host_scribble = (unsigned char *)
+ hostdata->disconnected_queue;
+ hostdata->connected = NULL;
+ hostdata->disconnected_queue = cmd;
+ dprintk(NDEBUG_QUEUES, "scsi%d : command for target %d lun %llu was moved from connected to" " the disconnected_queue\n", instance->host_no, cmd->device->id, cmd->device->lun);
+ /*
+ * Restore phase bits to 0 so an interrupted selection,
+ * arbitration can resume.
+ */
+ NCR5380_write(TARGET_COMMAND_REG, 0);
+
+ /* Enable reselect interrupts */
+ NCR5380_write(SELECT_ENABLE_REG, hostdata->id_mask);
+ /* Wait for bus free to avoid nasty timeouts - FIXME timeout !*/
+ /* NCR538_poll_politely(instance, STATUS_REG, SR_BSY, 0, 30 * HZ); */
+ while ((NCR5380_read(STATUS_REG) & SR_BSY) && !hostdata->connected)
+ barrier();
+ return;
+ }
+ /*
+ * The SCSI data pointer is *IMPLICITLY* saved on a disconnect
+ * operation, in violation of the SCSI spec so we can safely
+ * ignore SAVE/RESTORE pointers calls.
+ *
+ * Unfortunately, some disks violate the SCSI spec and
+ * don't issue the required SAVE_POINTERS message before
+ * disconnecting, and we have to break spec to remain
+ * compatible.
+ */
+ case SAVE_POINTERS:
+ case RESTORE_POINTERS:
+ /* Accept message by clearing ACK */
+ NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE);
+ break;
+ case EXTENDED_MESSAGE:
+/*
+ * Extended messages are sent in the following format :
+ * Byte
+ * 0 EXTENDED_MESSAGE == 1
+ * 1 length (includes one byte for code, doesn't
+ * include first two bytes)
+ * 2 code
+ * 3..length+1 arguments
+ *
+ * Start the extended message buffer with the EXTENDED_MESSAGE
+ * byte, since spi_print_msg() wants the whole thing.
+ */
+ extended_msg[0] = EXTENDED_MESSAGE;
+ /* Accept first byte by clearing ACK */
+ NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE);
+ dprintk(NDEBUG_EXTENDED, "scsi%d : receiving extended message\n", instance->host_no);
+
+ len = 2;
+ data = extended_msg + 1;
+ phase = PHASE_MSGIN;
+ NCR5380_transfer_pio(instance, &phase, &len, &data);
+
+ dprintk(NDEBUG_EXTENDED, "scsi%d : length=%d, code=0x%02x\n", instance->host_no, (int) extended_msg[1], (int) extended_msg[2]);
+
+ if (!len && extended_msg[1] <= (sizeof(extended_msg) - 1)) {
+ /* Accept third byte by clearing ACK */
+ NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE);
+ len = extended_msg[1] - 1;
+ data = extended_msg + 3;
+ phase = PHASE_MSGIN;
+
+ NCR5380_transfer_pio(instance, &phase, &len, &data);
+ dprintk(NDEBUG_EXTENDED, "scsi%d : message received, residual %d\n", instance->host_no, len);
+
+ switch (extended_msg[2]) {
+ case EXTENDED_SDTR:
+ case EXTENDED_WDTR:
+ case EXTENDED_MODIFY_DATA_POINTER:
+ case EXTENDED_EXTENDED_IDENTIFY:
+ tmp = 0;
+ }
+ } else if (len) {
+ printk("scsi%d: error receiving extended message\n", instance->host_no);
+ tmp = 0;
+ } else {
+ printk("scsi%d: extended message code %02x length %d is too long\n", instance->host_no, extended_msg[2], extended_msg[1]);
+ tmp = 0;
+ }
+ /* Fall through to reject message */
+
+ /*
+ * If we get something weird that we aren't expecting,
+ * reject it.
+ */
+ default:
+ if (!tmp) {
+ printk("scsi%d: rejecting message ", instance->host_no);
+ spi_print_msg(extended_msg);
+ printk("\n");
+ } else if (tmp != EXTENDED_MESSAGE)
+ scmd_printk(KERN_INFO, cmd,
+ "rejecting unknown message %02x\n",tmp);
+ else
+ scmd_printk(KERN_INFO, cmd,
+ "rejecting unknown extended message code %02x, length %d\n", extended_msg[1], extended_msg[0]);
+
+ msgout = MESSAGE_REJECT;
+ NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE | ICR_ASSERT_ATN);
+ break;
+ } /* switch (tmp) */
+ break;
+ case PHASE_MSGOUT:
+ len = 1;
+ data = &msgout;
+ hostdata->last_message = msgout;
+ NCR5380_transfer_pio(instance, &phase, &len, &data);
+ if (msgout == ABORT) {
+ hostdata->busy[cmd->device->id] &= ~(1 << (cmd->device->lun & 0xFF));
+ hostdata->connected = NULL;
+ cmd->result = DID_ERROR << 16;
+ cmd->scsi_done(cmd);
+ NCR5380_write(SELECT_ENABLE_REG, hostdata->id_mask);
+ return;
+ }
+ msgout = NOP;
+ break;
+ case PHASE_CMDOUT:
+ len = cmd->cmd_len;
+ data = cmd->cmnd;
+ /*
+ * XXX for performance reasons, on machines with a
+ * PSEUDO-DMA architecture we should probably
+ * use the dma transfer function.
+ */
+ NCR5380_transfer_pio(instance, &phase, &len, &data);
+ if (!cmd->device->disconnect && should_disconnect(cmd->cmnd[0])) {
+ NCR5380_set_timer(hostdata, USLEEP_SLEEP);
+ dprintk(NDEBUG_USLEEP, "scsi%d : issued command, sleeping until %lu\n", instance->host_no, hostdata->time_expires);
+ return;
+ }
+ break;
+ case PHASE_STATIN:
+ len = 1;
+ data = &tmp;
+ NCR5380_transfer_pio(instance, &phase, &len, &data);
+ cmd->SCp.Status = tmp;
+ break;
+ default:
+ printk("scsi%d : unknown phase\n", instance->host_no);
+ NCR5380_dprint(NDEBUG_ANY, instance);
+ } /* switch(phase) */
+ } /* if (tmp * SR_REQ) */
+ else {
+ /* RvC: go to sleep if polling time expired
+ */
+ if (!cmd->device->disconnect && time_after_eq(jiffies, poll_time)) {
+ NCR5380_set_timer(hostdata, USLEEP_SLEEP);
+ dprintk(NDEBUG_USLEEP, "scsi%d : poll timed out, sleeping until %lu\n", instance->host_no, hostdata->time_expires);
+ return;
+ }
+ }
+ } /* while (1) */
+}
+
+/*
+ * Function : void NCR5380_reselect (struct Scsi_Host *instance)
+ *
+ * Purpose : does reselection, initializing the instance->connected
+ * field to point to the scsi_cmnd for which the I_T_L or I_T_L_Q
+ * nexus has been reestablished,
+ *
+ * Inputs : instance - this instance of the NCR5380.
+ *
+ * Locks: io_request_lock held by caller if IRQ driven
+ */
+
+static void NCR5380_reselect(struct Scsi_Host *instance) {
+ NCR5380_local_declare();
+ struct NCR5380_hostdata *hostdata = (struct NCR5380_hostdata *)
+ instance->hostdata;
+ unsigned char target_mask;
+ unsigned char lun, phase;
+ int len;
+ unsigned char msg[3];
+ unsigned char *data;
+ struct scsi_cmnd *tmp = NULL, *prev;
+ int abort = 0;
+ NCR5380_setup(instance);
+
+ /*
+ * Disable arbitration, etc. since the host adapter obviously
+ * lost, and tell an interrupted NCR5380_select() to restart.
+ */
+
+ NCR5380_write(MODE_REG, MR_BASE);
+ hostdata->restart_select = 1;
+
+ target_mask = NCR5380_read(CURRENT_SCSI_DATA_REG) & ~(hostdata->id_mask);
+ dprintk(NDEBUG_SELECTION, "scsi%d : reselect\n", instance->host_no);
+
+ /*
+ * At this point, we have detected that our SCSI ID is on the bus,
+ * SEL is true and BSY was false for at least one bus settle delay
+ * (400 ns).
+ *
+ * We must assert BSY ourselves, until the target drops the SEL
+ * signal.
+ */
+
+ NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE | ICR_ASSERT_BSY);
+
+ /* FIXME: timeout too long, must fail to workqueue */
+ if(NCR5380_poll_politely(instance, STATUS_REG, SR_SEL, 0, 2*HZ)<0)
+ abort = 1;
+
+ NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE);
+
+ /*
+ * Wait for target to go into MSGIN.
+ * FIXME: timeout needed and fail to work queeu
+ */
+
+ if(NCR5380_poll_politely(instance, STATUS_REG, SR_REQ, SR_REQ, 2*HZ))
+ abort = 1;
+
+ len = 1;
+ data = msg;
+ phase = PHASE_MSGIN;
+ NCR5380_transfer_pio(instance, &phase, &len, &data);
+
+ if (!(msg[0] & 0x80)) {
+ printk(KERN_ERR "scsi%d : expecting IDENTIFY message, got ", instance->host_no);
+ spi_print_msg(msg);
+ abort = 1;
+ } else {
+ /* Accept message by clearing ACK */
+ NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE);
+ lun = (msg[0] & 0x07);
+
+ /*
+ * We need to add code for SCSI-II to track which devices have
+ * I_T_L_Q nexuses established, and which have simple I_T_L
+ * nexuses so we can chose to do additional data transfer.
+ */
+
+ /*
+ * Find the command corresponding to the I_T_L or I_T_L_Q nexus we
+ * just reestablished, and remove it from the disconnected queue.
+ */
+
+
+ for (tmp = (struct scsi_cmnd *) hostdata->disconnected_queue, prev = NULL; tmp; prev = tmp, tmp = (struct scsi_cmnd *) tmp->host_scribble)
+ if ((target_mask == (1 << tmp->device->id)) && (lun == (u8)tmp->device->lun)
+ ) {
+ if (prev) {
+ REMOVE(prev, prev->host_scribble, tmp, tmp->host_scribble);
+ prev->host_scribble = tmp->host_scribble;
+ } else {
+ REMOVE(-1, hostdata->disconnected_queue, tmp, tmp->host_scribble);
+ hostdata->disconnected_queue = (struct scsi_cmnd *) tmp->host_scribble;
+ }
+ tmp->host_scribble = NULL;
+ break;
+ }
+ if (!tmp) {
+ printk(KERN_ERR "scsi%d : warning : target bitmask %02x lun %d not in disconnect_queue.\n", instance->host_no, target_mask, lun);
+ /*
+ * Since we have an established nexus that we can't do anything with,
+ * we must abort it.
+ */
+ abort = 1;
+ }
+ }
+
+ if (abort) {
+ do_abort(instance);
+ } else {
+ hostdata->connected = tmp;
+ dprintk(NDEBUG_RESELECTION, "scsi%d : nexus established, target = %d, lun = %llu, tag = %d\n", instance->host_no, tmp->device->id, tmp->device->lun, tmp->tag);
+ }
+}
+
+/*
+ * Function : void NCR5380_dma_complete (struct Scsi_Host *instance)
+ *
+ * Purpose : called by interrupt handler when DMA finishes or a phase
+ * mismatch occurs (which would finish the DMA transfer).
+ *
+ * Inputs : instance - this instance of the NCR5380.
+ *
+ * Returns : pointer to the scsi_cmnd structure for which the I_T_L
+ * nexus has been reestablished, on failure NULL is returned.
+ */
+
+#ifdef REAL_DMA
+static void NCR5380_dma_complete(NCR5380_instance * instance) {
+ NCR5380_local_declare();
+ struct NCR5380_hostdata *hostdata = (struct NCR5380_hostdata *) instance->hostdata;
+ int transferred;
+ NCR5380_setup(instance);
+
+ /*
+ * XXX this might not be right.
+ *
+ * Wait for final byte to transfer, ie wait for ACK to go false.
+ *
+ * We should use the Last Byte Sent bit, unfortunately this is
+ * not available on the 5380/5381 (only the various CMOS chips)
+ *
+ * FIXME: timeout, and need to handle long timeout/irq case
+ */
+
+ NCR5380_poll_politely(instance, BUS_AND_STATUS_REG, BASR_ACK, 0, 5*HZ);
+
+ NCR5380_write(MODE_REG, MR_BASE);
+ NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE);
+
+ /*
+ * The only places we should see a phase mismatch and have to send
+ * data from the same set of pointers will be the data transfer
+ * phases. So, residual, requested length are only important here.
+ */
+
+ if (!(hostdata->connected->SCp.phase & SR_CD)) {
+ transferred = instance->dmalen - NCR5380_dma_residual();
+ hostdata->connected->SCp.this_residual -= transferred;
+ hostdata->connected->SCp.ptr += transferred;
+ }
+}
+#endif /* def REAL_DMA */
+
+/*
+ * Function : int NCR5380_abort (struct scsi_cmnd *cmd)
+ *
+ * Purpose : abort a command
+ *
+ * Inputs : cmd - the scsi_cmnd to abort, code - code to set the
+ * host byte of the result field to, if zero DID_ABORTED is
+ * used.
+ *
+ * Returns : SUCCESS - success, FAILED on failure.
+ *
+ * XXX - there is no way to abort the command that is currently
+ * connected, you have to wait for it to complete. If this is
+ * a problem, we could implement longjmp() / setjmp(), setjmp()
+ * called where the loop started in NCR5380_main().
+ *
+ * Locks: host lock taken by caller
+ */
+
+static int NCR5380_abort(struct scsi_cmnd *cmd)
+{
+ NCR5380_local_declare();
+ struct Scsi_Host *instance = cmd->device->host;
+ struct NCR5380_hostdata *hostdata = (struct NCR5380_hostdata *) instance->hostdata;
+ struct scsi_cmnd *tmp, **prev;
+
+ scmd_printk(KERN_WARNING, cmd, "aborting command\n");
+
+ NCR5380_print_status(instance);
+
+ NCR5380_setup(instance);
+
+ dprintk(NDEBUG_ABORT, "scsi%d : abort called\n", instance->host_no);
+ dprintk(NDEBUG_ABORT, " basr 0x%X, sr 0x%X\n", NCR5380_read(BUS_AND_STATUS_REG), NCR5380_read(STATUS_REG));
+
+#if 0
+/*
+ * Case 1 : If the command is the currently executing command,
+ * we'll set the aborted flag and return control so that
+ * information transfer routine can exit cleanly.
+ */
+
+ if (hostdata->connected == cmd) {
+ dprintk(NDEBUG_ABORT, "scsi%d : aborting connected command\n", instance->host_no);
+ hostdata->aborted = 1;
+/*
+ * We should perform BSY checking, and make sure we haven't slipped
+ * into BUS FREE.
+ */
+
+ NCR5380_write(INITIATOR_COMMAND_REG, ICR_ASSERT_ATN);
+/*
+ * Since we can't change phases until we've completed the current
+ * handshake, we have to source or sink a byte of data if the current
+ * phase is not MSGOUT.
+ */
+
+/*
+ * Return control to the executing NCR drive so we can clear the
+ * aborted flag and get back into our main loop.
+ */
+
+ return SUCCESS;
+ }
+#endif
+
+/*
+ * Case 2 : If the command hasn't been issued yet, we simply remove it
+ * from the issue queue.
+ */
+
+ dprintk(NDEBUG_ABORT, "scsi%d : abort going into loop.\n", instance->host_no);
+ for (prev = (struct scsi_cmnd **) &(hostdata->issue_queue), tmp = (struct scsi_cmnd *) hostdata->issue_queue; tmp; prev = (struct scsi_cmnd **) &(tmp->host_scribble), tmp = (struct scsi_cmnd *) tmp->host_scribble)
+ if (cmd == tmp) {
+ REMOVE(5, *prev, tmp, tmp->host_scribble);
+ (*prev) = (struct scsi_cmnd *) tmp->host_scribble;
+ tmp->host_scribble = NULL;
+ tmp->result = DID_ABORT << 16;
+ dprintk(NDEBUG_ABORT, "scsi%d : abort removed command from issue queue.\n", instance->host_no);
+ tmp->scsi_done(tmp);
+ return SUCCESS;
+ }
+#if (NDEBUG & NDEBUG_ABORT)
+ /* KLL */
+ else if (prev == tmp)
+ printk(KERN_ERR "scsi%d : LOOP\n", instance->host_no);
+#endif
+
+/*
+ * Case 3 : If any commands are connected, we're going to fail the abort
+ * and let the high level SCSI driver retry at a later time or
+ * issue a reset.
+ *
+ * Timeouts, and therefore aborted commands, will be highly unlikely
+ * and handling them cleanly in this situation would make the common
+ * case of noresets less efficient, and would pollute our code. So,
+ * we fail.
+ */
+
+ if (hostdata->connected) {
+ dprintk(NDEBUG_ABORT, "scsi%d : abort failed, command connected.\n", instance->host_no);
+ return FAILED;
+ }
+/*
+ * Case 4: If the command is currently disconnected from the bus, and
+ * there are no connected commands, we reconnect the I_T_L or
+ * I_T_L_Q nexus associated with it, go into message out, and send
+ * an abort message.
+ *
+ * This case is especially ugly. In order to reestablish the nexus, we
+ * need to call NCR5380_select(). The easiest way to implement this
+ * function was to abort if the bus was busy, and let the interrupt
+ * handler triggered on the SEL for reselect take care of lost arbitrations
+ * where necessary, meaning interrupts need to be enabled.
+ *
+ * When interrupts are enabled, the queues may change - so we
+ * can't remove it from the disconnected queue before selecting it
+ * because that could cause a failure in hashing the nexus if that
+ * device reselected.
+ *
+ * Since the queues may change, we can't use the pointers from when we
+ * first locate it.
+ *
+ * So, we must first locate the command, and if NCR5380_select()
+ * succeeds, then issue the abort, relocate the command and remove
+ * it from the disconnected queue.
+ */
+
+ for (tmp = (struct scsi_cmnd *) hostdata->disconnected_queue; tmp; tmp = (struct scsi_cmnd *) tmp->host_scribble)
+ if (cmd == tmp) {
+ dprintk(NDEBUG_ABORT, "scsi%d : aborting disconnected command.\n", instance->host_no);
+
+ if (NCR5380_select(instance, cmd))
+ return FAILED;
+ dprintk(NDEBUG_ABORT, "scsi%d : nexus reestablished.\n", instance->host_no);
+
+ do_abort(instance);
+
+ for (prev = (struct scsi_cmnd **) &(hostdata->disconnected_queue), tmp = (struct scsi_cmnd *) hostdata->disconnected_queue; tmp; prev = (struct scsi_cmnd **) &(tmp->host_scribble), tmp = (struct scsi_cmnd *) tmp->host_scribble)
+ if (cmd == tmp) {
+ REMOVE(5, *prev, tmp, tmp->host_scribble);
+ *prev = (struct scsi_cmnd *) tmp->host_scribble;
+ tmp->host_scribble = NULL;
+ tmp->result = DID_ABORT << 16;
+ tmp->scsi_done(tmp);
+ return SUCCESS;
+ }
+ }
+/*
+ * Case 5 : If we reached this point, the command was not found in any of
+ * the queues.
+ *
+ * We probably reached this point because of an unlikely race condition
+ * between the command completing successfully and the abortion code,
+ * so we won't panic, but we will notify the user in case something really
+ * broke.
+ */
+ printk(KERN_WARNING "scsi%d : warning : SCSI command probably completed successfully\n"
+ " before abortion\n", instance->host_no);
+ return FAILED;
+}
+
+
+/*
+ * Function : int NCR5380_bus_reset (struct scsi_cmnd *cmd)
+ *
+ * Purpose : reset the SCSI bus.
+ *
+ * Returns : SUCCESS
+ *
+ * Locks: host lock taken by caller
+ */
+
+static int NCR5380_bus_reset(struct scsi_cmnd *cmd)
+{
+ struct Scsi_Host *instance = cmd->device->host;
+
+ NCR5380_local_declare();
+ NCR5380_setup(instance);
+ NCR5380_print_status(instance);
+
+ spin_lock_irq(instance->host_lock);
+ do_reset(instance);
+ spin_unlock_irq(instance->host_lock);
+
+ return SUCCESS;
+}
diff --git a/drivers/scsi/NCR5380.h b/drivers/scsi/NCR5380.h
new file mode 100644
index 000000000..162112dd1
--- /dev/null
+++ b/drivers/scsi/NCR5380.h
@@ -0,0 +1,447 @@
+/*
+ * NCR 5380 defines
+ *
+ * Copyright 1993, Drew Eckhardt
+ * Visionary Computing
+ * (Unix consulting and custom programming)
+ * drew@colorado.edu
+ * +1 (303) 666-5836
+ *
+ * For more information, please consult
+ *
+ * NCR 5380 Family
+ * SCSI Protocol Controller
+ * Databook
+ * NCR Microelectronics
+ * 1635 Aeroplaza Drive
+ * Colorado Springs, CO 80916
+ * 1+ (719) 578-3400
+ * 1+ (800) 334-5454
+ */
+
+#ifndef NCR5380_H
+#define NCR5380_H
+
+#include <linux/interrupt.h>
+#include <scsi/scsi_eh.h>
+
+#define NDEBUG_ARBITRATION 0x1
+#define NDEBUG_AUTOSENSE 0x2
+#define NDEBUG_DMA 0x4
+#define NDEBUG_HANDSHAKE 0x8
+#define NDEBUG_INFORMATION 0x10
+#define NDEBUG_INIT 0x20
+#define NDEBUG_INTR 0x40
+#define NDEBUG_LINKED 0x80
+#define NDEBUG_MAIN 0x100
+#define NDEBUG_NO_DATAOUT 0x200
+#define NDEBUG_NO_WRITE 0x400
+#define NDEBUG_PIO 0x800
+#define NDEBUG_PSEUDO_DMA 0x1000
+#define NDEBUG_QUEUES 0x2000
+#define NDEBUG_RESELECTION 0x4000
+#define NDEBUG_SELECTION 0x8000
+#define NDEBUG_USLEEP 0x10000
+#define NDEBUG_LAST_BYTE_SENT 0x20000
+#define NDEBUG_RESTART_SELECT 0x40000
+#define NDEBUG_EXTENDED 0x80000
+#define NDEBUG_C400_PREAD 0x100000
+#define NDEBUG_C400_PWRITE 0x200000
+#define NDEBUG_LISTS 0x400000
+#define NDEBUG_ABORT 0x800000
+#define NDEBUG_TAGS 0x1000000
+#define NDEBUG_MERGING 0x2000000
+
+#define NDEBUG_ANY 0xFFFFFFFFUL
+
+/*
+ * The contents of the OUTPUT DATA register are asserted on the bus when
+ * either arbitration is occurring or the phase-indicating signals (
+ * IO, CD, MSG) in the TARGET COMMAND register and the ASSERT DATA
+ * bit in the INITIATOR COMMAND register is set.
+ */
+
+#define OUTPUT_DATA_REG 0 /* wo DATA lines on SCSI bus */
+#define CURRENT_SCSI_DATA_REG 0 /* ro same */
+
+#define INITIATOR_COMMAND_REG 1 /* rw */
+#define ICR_ASSERT_RST 0x80 /* rw Set to assert RST */
+#define ICR_ARBITRATION_PROGRESS 0x40 /* ro Indicates arbitration complete */
+#define ICR_TRI_STATE 0x40 /* wo Set to tri-state drivers */
+#define ICR_ARBITRATION_LOST 0x20 /* ro Indicates arbitration lost */
+#define ICR_DIFF_ENABLE 0x20 /* wo Set to enable diff. drivers */
+#define ICR_ASSERT_ACK 0x10 /* rw ini Set to assert ACK */
+#define ICR_ASSERT_BSY 0x08 /* rw Set to assert BSY */
+#define ICR_ASSERT_SEL 0x04 /* rw Set to assert SEL */
+#define ICR_ASSERT_ATN 0x02 /* rw Set to assert ATN */
+#define ICR_ASSERT_DATA 0x01 /* rw SCSI_DATA_REG is asserted */
+
+#ifdef DIFFERENTIAL
+#define ICR_BASE ICR_DIFF_ENABLE
+#else
+#define ICR_BASE 0
+#endif
+
+#define MODE_REG 2
+/*
+ * Note : BLOCK_DMA code will keep DRQ asserted for the duration of the
+ * transfer, causing the chip to hog the bus. You probably don't want
+ * this.
+ */
+#define MR_BLOCK_DMA_MODE 0x80 /* rw block mode DMA */
+#define MR_TARGET 0x40 /* rw target mode */
+#define MR_ENABLE_PAR_CHECK 0x20 /* rw enable parity checking */
+#define MR_ENABLE_PAR_INTR 0x10 /* rw enable bad parity interrupt */
+#define MR_ENABLE_EOP_INTR 0x08 /* rw enable eop interrupt */
+#define MR_MONITOR_BSY 0x04 /* rw enable int on unexpected bsy fail */
+#define MR_DMA_MODE 0x02 /* rw DMA / pseudo DMA mode */
+#define MR_ARBITRATE 0x01 /* rw start arbitration */
+
+#ifdef PARITY
+#define MR_BASE MR_ENABLE_PAR_CHECK
+#else
+#define MR_BASE 0
+#endif
+
+#define TARGET_COMMAND_REG 3
+#define TCR_LAST_BYTE_SENT 0x80 /* ro DMA done */
+#define TCR_ASSERT_REQ 0x08 /* tgt rw assert REQ */
+#define TCR_ASSERT_MSG 0x04 /* tgt rw assert MSG */
+#define TCR_ASSERT_CD 0x02 /* tgt rw assert CD */
+#define TCR_ASSERT_IO 0x01 /* tgt rw assert IO */
+
+#define STATUS_REG 4 /* ro */
+/*
+ * Note : a set bit indicates an active signal, driven by us or another
+ * device.
+ */
+#define SR_RST 0x80
+#define SR_BSY 0x40
+#define SR_REQ 0x20
+#define SR_MSG 0x10
+#define SR_CD 0x08
+#define SR_IO 0x04
+#define SR_SEL 0x02
+#define SR_DBP 0x01
+
+/*
+ * Setting a bit in this register will cause an interrupt to be generated when
+ * BSY is false and SEL true and this bit is asserted on the bus.
+ */
+#define SELECT_ENABLE_REG 4 /* wo */
+
+#define BUS_AND_STATUS_REG 5 /* ro */
+#define BASR_END_DMA_TRANSFER 0x80 /* ro set on end of transfer */
+#define BASR_DRQ 0x40 /* ro mirror of DRQ pin */
+#define BASR_PARITY_ERROR 0x20 /* ro parity error detected */
+#define BASR_IRQ 0x10 /* ro mirror of IRQ pin */
+#define BASR_PHASE_MATCH 0x08 /* ro Set when MSG CD IO match TCR */
+#define BASR_BUSY_ERROR 0x04 /* ro Unexpected change to inactive state */
+#define BASR_ATN 0x02 /* ro BUS status */
+#define BASR_ACK 0x01 /* ro BUS status */
+
+/* Write any value to this register to start a DMA send */
+#define START_DMA_SEND_REG 5 /* wo */
+
+/*
+ * Used in DMA transfer mode, data is latched from the SCSI bus on
+ * the falling edge of REQ (ini) or ACK (tgt)
+ */
+#define INPUT_DATA_REG 6 /* ro */
+
+/* Write any value to this register to start a DMA receive */
+#define START_DMA_TARGET_RECEIVE_REG 6 /* wo */
+
+/* Read this register to clear interrupt conditions */
+#define RESET_PARITY_INTERRUPT_REG 7 /* ro */
+
+/* Write any value to this register to start an ini mode DMA receive */
+#define START_DMA_INITIATOR_RECEIVE_REG 7 /* wo */
+
+#define C400_CONTROL_STATUS_REG NCR53C400_register_offset-8 /* rw */
+
+#define CSR_RESET 0x80 /* wo Resets 53c400 */
+#define CSR_53C80_REG 0x80 /* ro 5380 registers busy */
+#define CSR_TRANS_DIR 0x40 /* rw Data transfer direction */
+#define CSR_SCSI_BUFF_INTR 0x20 /* rw Enable int on transfer ready */
+#define CSR_53C80_INTR 0x10 /* rw Enable 53c80 interrupts */
+#define CSR_SHARED_INTR 0x08 /* rw Interrupt sharing */
+#define CSR_HOST_BUF_NOT_RDY 0x04 /* ro Is Host buffer ready */
+#define CSR_SCSI_BUF_RDY 0x02 /* ro SCSI buffer read */
+#define CSR_GATED_53C80_IRQ 0x01 /* ro Last block xferred */
+
+#if 0
+#define CSR_BASE CSR_SCSI_BUFF_INTR | CSR_53C80_INTR
+#else
+#define CSR_BASE CSR_53C80_INTR
+#endif
+
+/* Number of 128-byte blocks to be transferred */
+#define C400_BLOCK_COUNTER_REG NCR53C400_register_offset-7 /* rw */
+
+/* Resume transfer after disconnect */
+#define C400_RESUME_TRANSFER_REG NCR53C400_register_offset-6 /* wo */
+
+/* Access to host buffer stack */
+#define C400_HOST_BUFFER NCR53C400_register_offset-4 /* rw */
+
+
+/* Note : PHASE_* macros are based on the values of the STATUS register */
+#define PHASE_MASK (SR_MSG | SR_CD | SR_IO)
+
+#define PHASE_DATAOUT 0
+#define PHASE_DATAIN SR_IO
+#define PHASE_CMDOUT SR_CD
+#define PHASE_STATIN (SR_CD | SR_IO)
+#define PHASE_MSGOUT (SR_MSG | SR_CD)
+#define PHASE_MSGIN (SR_MSG | SR_CD | SR_IO)
+#define PHASE_UNKNOWN 0xff
+
+/*
+ * Convert status register phase to something we can use to set phase in
+ * the target register so we can get phase mismatch interrupts on DMA
+ * transfers.
+ */
+
+#define PHASE_SR_TO_TCR(phase) ((phase) >> 2)
+
+/*
+ * The internal should_disconnect() function returns these based on the
+ * expected length of a disconnect if a device supports disconnect/
+ * reconnect.
+ */
+
+#define DISCONNECT_NONE 0
+#define DISCONNECT_TIME_TO_DATA 1
+#define DISCONNECT_LONG 2
+
+/*
+ * "Special" value for the (unsigned char) command tag, to indicate
+ * I_T_L nexus instead of I_T_L_Q.
+ */
+
+#define TAG_NONE 0xff
+
+/*
+ * These are "special" values for the irq and dma_channel fields of the
+ * Scsi_Host structure
+ */
+
+#define DMA_NONE 255
+#define IRQ_AUTO 254
+#define DMA_AUTO 254
+#define PORT_AUTO 0xffff /* autoprobe io port for 53c400a */
+
+#ifndef NO_IRQ
+#define NO_IRQ 0
+#endif
+
+#define FLAG_HAS_LAST_BYTE_SENT 1 /* NCR53c81 or better */
+#define FLAG_CHECK_LAST_BYTE_SENT 2 /* Only test once */
+#define FLAG_NCR53C400 4 /* NCR53c400 */
+#define FLAG_NO_PSEUDO_DMA 8 /* Inhibit DMA */
+#define FLAG_DTC3181E 16 /* DTC3181E */
+#define FLAG_LATE_DMA_SETUP 32 /* Setup NCR before DMA H/W */
+#define FLAG_TAGGED_QUEUING 64 /* as X3T9.2 spelled it */
+
+#ifndef ASM
+
+#ifdef SUPPORT_TAGS
+struct tag_alloc {
+ DECLARE_BITMAP(allocated, MAX_TAGS);
+ int nr_allocated;
+ int queue_size;
+};
+#endif
+
+struct NCR5380_hostdata {
+ NCR5380_implementation_fields; /* implementation specific */
+ struct Scsi_Host *host; /* Host backpointer */
+ unsigned char id_mask, id_higher_mask; /* 1 << id, all bits greater */
+ unsigned char targets_present; /* targets we have connected
+ to, so we can call a select
+ failure a retryable condition */
+ volatile unsigned char busy[8]; /* index = target, bit = lun */
+#if defined(REAL_DMA) || defined(REAL_DMA_POLL)
+ volatile int dma_len; /* requested length of DMA */
+#endif
+ volatile unsigned char last_message; /* last message OUT */
+ volatile struct scsi_cmnd *connected; /* currently connected command */
+ volatile struct scsi_cmnd *issue_queue; /* waiting to be issued */
+ volatile struct scsi_cmnd *disconnected_queue; /* waiting for reconnect */
+ volatile int restart_select; /* we have disconnected,
+ used to restart
+ NCR5380_select() */
+ volatile unsigned aborted:1; /* flag, says aborted */
+ int flags;
+ unsigned long time_expires; /* in jiffies, set prior to sleeping */
+ int select_time; /* timer in select for target response */
+ volatile struct scsi_cmnd *selecting;
+ struct delayed_work coroutine; /* our co-routine */
+ struct scsi_eh_save ses;
+ char info[256];
+ int read_overruns; /* number of bytes to cut from a
+ * transfer to handle chip overruns */
+ int retain_dma_intr;
+ struct work_struct main_task;
+ volatile int main_running;
+#ifdef SUPPORT_TAGS
+ struct tag_alloc TagAlloc[8][8]; /* 8 targets and 8 LUNs */
+#endif
+#ifdef PSEUDO_DMA
+ unsigned spin_max_r;
+ unsigned spin_max_w;
+#endif
+};
+
+#ifdef __KERNEL__
+
+#ifndef NDEBUG
+#define NDEBUG (0)
+#endif
+
+#define dprintk(flg, fmt, ...) \
+ do { if ((NDEBUG) & (flg)) \
+ printk(KERN_DEBUG fmt, ## __VA_ARGS__); } while (0)
+
+#if NDEBUG
+#define NCR5380_dprint(flg, arg) \
+ do { if ((NDEBUG) & (flg)) NCR5380_print(arg); } while (0)
+#define NCR5380_dprint_phase(flg, arg) \
+ do { if ((NDEBUG) & (flg)) NCR5380_print_phase(arg); } while (0)
+static void NCR5380_print_phase(struct Scsi_Host *instance);
+static void NCR5380_print(struct Scsi_Host *instance);
+#else
+#define NCR5380_dprint(flg, arg) do {} while (0)
+#define NCR5380_dprint_phase(flg, arg) do {} while (0)
+#endif
+
+#if defined(AUTOPROBE_IRQ)
+static int NCR5380_probe_irq(struct Scsi_Host *instance, int possible);
+#endif
+static int NCR5380_init(struct Scsi_Host *instance, int flags);
+static void NCR5380_exit(struct Scsi_Host *instance);
+static void NCR5380_information_transfer(struct Scsi_Host *instance);
+#ifndef DONT_USE_INTR
+static irqreturn_t NCR5380_intr(int irq, void *dev_id);
+#endif
+static void NCR5380_main(struct work_struct *work);
+static const char *NCR5380_info(struct Scsi_Host *instance);
+static void NCR5380_reselect(struct Scsi_Host *instance);
+static int NCR5380_select(struct Scsi_Host *instance, struct scsi_cmnd *cmd);
+#if defined(PSEUDO_DMA) || defined(REAL_DMA) || defined(REAL_DMA_POLL)
+static int NCR5380_transfer_dma(struct Scsi_Host *instance, unsigned char *phase, int *count, unsigned char **data);
+#endif
+static int NCR5380_transfer_pio(struct Scsi_Host *instance, unsigned char *phase, int *count, unsigned char **data);
+
+#if (defined(REAL_DMA) || defined(REAL_DMA_POLL))
+
+#if defined(i386) || defined(__alpha__)
+
+/**
+ * NCR5380_pc_dma_setup - setup ISA DMA
+ * @instance: adapter to set up
+ * @ptr: block to transfer (virtual address)
+ * @count: number of bytes to transfer
+ * @mode: DMA controller mode to use
+ *
+ * Program the DMA controller ready to perform an ISA DMA transfer
+ * on this chip.
+ *
+ * Locks: takes and releases the ISA DMA lock.
+ */
+
+static __inline__ int NCR5380_pc_dma_setup(struct Scsi_Host *instance, unsigned char *ptr, unsigned int count, unsigned char mode)
+{
+ unsigned limit;
+ unsigned long bus_addr = virt_to_bus(ptr);
+ unsigned long flags;
+
+ if (instance->dma_channel <= 3) {
+ if (count > 65536)
+ count = 65536;
+ limit = 65536 - (bus_addr & 0xFFFF);
+ } else {
+ if (count > 65536 * 2)
+ count = 65536 * 2;
+ limit = 65536 * 2 - (bus_addr & 0x1FFFF);
+ }
+
+ if (count > limit)
+ count = limit;
+
+ if ((count & 1) || (bus_addr & 1))
+ panic("scsi%d : attempted unaligned DMA transfer\n", instance->host_no);
+
+ flags=claim_dma_lock();
+ disable_dma(instance->dma_channel);
+ clear_dma_ff(instance->dma_channel);
+ set_dma_addr(instance->dma_channel, bus_addr);
+ set_dma_count(instance->dma_channel, count);
+ set_dma_mode(instance->dma_channel, mode);
+ enable_dma(instance->dma_channel);
+ release_dma_lock(flags);
+
+ return count;
+}
+
+/**
+ * NCR5380_pc_dma_write_setup - setup ISA DMA write
+ * @instance: adapter to set up
+ * @ptr: block to transfer (virtual address)
+ * @count: number of bytes to transfer
+ *
+ * Program the DMA controller ready to perform an ISA DMA write to the
+ * SCSI controller.
+ *
+ * Locks: called routines take and release the ISA DMA lock.
+ */
+
+static __inline__ int NCR5380_pc_dma_write_setup(struct Scsi_Host *instance, unsigned char *src, unsigned int count)
+{
+ return NCR5380_pc_dma_setup(instance, src, count, DMA_MODE_WRITE);
+}
+
+/**
+ * NCR5380_pc_dma_read_setup - setup ISA DMA read
+ * @instance: adapter to set up
+ * @ptr: block to transfer (virtual address)
+ * @count: number of bytes to transfer
+ *
+ * Program the DMA controller ready to perform an ISA DMA read from the
+ * SCSI controller.
+ *
+ * Locks: called routines take and release the ISA DMA lock.
+ */
+
+static __inline__ int NCR5380_pc_dma_read_setup(struct Scsi_Host *instance, unsigned char *src, unsigned int count)
+{
+ return NCR5380_pc_dma_setup(instance, src, count, DMA_MODE_READ);
+}
+
+/**
+ * NCR5380_pc_dma_residual - return bytes left
+ * @instance: adapter
+ *
+ * Reports the number of bytes left over after the DMA was terminated.
+ *
+ * Locks: takes and releases the ISA DMA lock.
+ */
+
+static __inline__ int NCR5380_pc_dma_residual(struct Scsi_Host *instance)
+{
+ unsigned long flags;
+ int tmp;
+
+ flags = claim_dma_lock();
+ clear_dma_ff(instance->dma_channel);
+ tmp = get_dma_residue(instance->dma_channel);
+ release_dma_lock(flags);
+
+ return tmp;
+}
+#endif /* defined(i386) || defined(__alpha__) */
+#endif /* defined(REAL_DMA) */
+#endif /* __KERNEL__ */
+#endif /* ndef ASM */
+#endif /* NCR5380_H */
diff --git a/drivers/scsi/NCR53c406a.c b/drivers/scsi/NCR53c406a.c
new file mode 100644
index 000000000..42c716147
--- /dev/null
+++ b/drivers/scsi/NCR53c406a.c
@@ -0,0 +1,1091 @@
+/*
+ * NCR53c406.c
+ * Low-level SCSI driver for NCR53c406a chip.
+ * Copyright (C) 1994, 1995, 1996 Normunds Saumanis (normunds@fi.ibm.com)
+ *
+ * LILO command line usage: ncr53c406a=<PORTBASE>[,<IRQ>[,<FASTPIO>]]
+ * Specify IRQ = 0 for non-interrupt driven mode.
+ * FASTPIO = 1 for fast pio mode, 0 for slow mode.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2, or (at your option) any
+ * later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ */
+
+#define NCR53C406A_DEBUG 0
+#define VERBOSE_NCR53C406A_DEBUG 0
+
+/* Set this to 1 for PIO mode (recommended) or to 0 for DMA mode */
+#define USE_PIO 1
+
+#define USE_BIOS 0
+ /* #define BIOS_ADDR 0xD8000 *//* define this if autoprobe fails */
+ /* #define PORT_BASE 0x330 *//* define this if autoprobe fails */
+ /* #define IRQ_LEV 0 *//* define this if autoprobe fails */
+#define DMA_CHAN 5 /* this is ignored if DMA is disabled */
+
+/* Set this to 0 if you encounter kernel lockups while transferring
+ * data in PIO mode */
+#define USE_FAST_PIO 1
+
+/* ============= End of user configurable parameters ============= */
+
+#include <linux/module.h>
+
+#include <linux/errno.h>
+#include <linux/ioport.h>
+#include <linux/interrupt.h>
+#include <linux/proc_fs.h>
+#include <linux/stat.h>
+#include <linux/init.h>
+#include <linux/bitops.h>
+#include <asm/io.h>
+#include <asm/dma.h>
+#include <asm/irq.h>
+
+#include <linux/blkdev.h>
+#include <linux/spinlock.h>
+#include "scsi.h"
+#include <scsi/scsi_host.h>
+
+/* ============================================================= */
+
+#define WATCHDOG 5000000
+
+#define SYNC_MODE 0 /* Synchronous transfer mode */
+
+#ifdef DEBUG
+#undef NCR53C406A_DEBUG
+#define NCR53C406A_DEBUG 1
+#endif
+
+#if USE_PIO
+#define USE_DMA 0
+#else
+#define USE_DMA 1
+#endif
+
+/* Default configuration */
+#define C1_IMG 0x07 /* ID=7 */
+#define C2_IMG 0x48 /* FE SCSI2 */
+#if USE_DMA
+#define C3_IMG 0x21 /* CDB TE */
+#else
+#define C3_IMG 0x20 /* CDB */
+#endif
+#define C4_IMG 0x04 /* ANE */
+#define C5_IMG 0xb6 /* AA PI SIE POL */
+
+#define REG0 (outb(C4_IMG, CONFIG4))
+#define REG1 (outb(C5_IMG, CONFIG5))
+
+#if NCR53C406A_DEBUG
+#define DEB(x) x
+#else
+#define DEB(x)
+#endif
+
+#if VERBOSE_NCR53C406A_DEBUG
+#define VDEB(x) x
+#else
+#define VDEB(x)
+#endif
+
+#define LOAD_DMA_COUNT(count) \
+ outb(count & 0xff, TC_LSB); \
+ outb((count >> 8) & 0xff, TC_MSB); \
+ outb((count >> 16) & 0xff, TC_HIGH);
+
+/* Chip commands */
+#define DMA_OP 0x80
+
+#define SCSI_NOP 0x00
+#define FLUSH_FIFO 0x01
+#define CHIP_RESET 0x02
+#define SCSI_RESET 0x03
+#define RESELECT 0x40
+#define SELECT_NO_ATN 0x41
+#define SELECT_ATN 0x42
+#define SELECT_ATN_STOP 0x43
+#define ENABLE_SEL 0x44
+#define DISABLE_SEL 0x45
+#define SELECT_ATN3 0x46
+#define RESELECT3 0x47
+#define TRANSFER_INFO 0x10
+#define INIT_CMD_COMPLETE 0x11
+#define MSG_ACCEPT 0x12
+#define TRANSFER_PAD 0x18
+#define SET_ATN 0x1a
+#define RESET_ATN 0x1b
+#define SEND_MSG 0x20
+#define SEND_STATUS 0x21
+#define SEND_DATA 0x22
+#define DISCONN_SEQ 0x23
+#define TERMINATE_SEQ 0x24
+#define TARG_CMD_COMPLETE 0x25
+#define DISCONN 0x27
+#define RECV_MSG 0x28
+#define RECV_CMD 0x29
+#define RECV_DATA 0x2a
+#define RECV_CMD_SEQ 0x2b
+#define TARGET_ABORT_DMA 0x04
+
+/*----------------------------------------------------------------*/
+/* the following will set the monitor border color (useful to find
+ where something crashed or gets stuck at */
+/* 1 = blue
+ 2 = green
+ 3 = cyan
+ 4 = red
+ 5 = magenta
+ 6 = yellow
+ 7 = white
+*/
+
+#if NCR53C406A_DEBUG
+#define rtrc(i) {inb(0x3da);outb(0x31,0x3c0);outb((i),0x3c0);}
+#else
+#define rtrc(i) {}
+#endif
+/*----------------------------------------------------------------*/
+
+enum Phase {
+ idle,
+ data_out,
+ data_in,
+ command_ph,
+ status_ph,
+ message_out,
+ message_in
+};
+
+/* Static function prototypes */
+static void NCR53c406a_intr(void *);
+static irqreturn_t do_NCR53c406a_intr(int, void *);
+static void chip_init(void);
+static void calc_port_addr(void);
+#ifndef IRQ_LEV
+static int irq_probe(void);
+#endif
+
+/* ================================================================= */
+
+#if USE_BIOS
+static void *bios_base;
+#endif
+
+#ifdef PORT_BASE
+static int port_base = PORT_BASE;
+#else
+static int port_base;
+#endif
+
+#ifdef IRQ_LEV
+static int irq_level = IRQ_LEV;
+#else
+static int irq_level = -1; /* 0 is 'no irq', so use -1 for 'uninitialized' */
+#endif
+
+#if USE_DMA
+static int dma_chan;
+#endif
+
+#if USE_PIO
+static int fast_pio = USE_FAST_PIO;
+#endif
+
+static Scsi_Cmnd *current_SC;
+static char info_msg[256];
+
+/* ================================================================= */
+
+/* possible BIOS locations */
+#if USE_BIOS
+static void *addresses[] = {
+ (void *) 0xd8000,
+ (void *) 0xc8000
+};
+#define ADDRESS_COUNT ARRAY_SIZE(addresses)
+#endif /* USE_BIOS */
+
+/* possible i/o port addresses */
+static unsigned short ports[] = { 0x230, 0x330, 0x280, 0x290, 0x330, 0x340, 0x300, 0x310, 0x348, 0x350 };
+#define PORT_COUNT ARRAY_SIZE(ports)
+
+#ifndef MODULE
+/* possible interrupt channels */
+static unsigned short intrs[] = { 10, 11, 12, 15 };
+#define INTR_COUNT ARRAY_SIZE(intrs)
+#endif /* !MODULE */
+
+/* signatures for NCR 53c406a based controllers */
+#if USE_BIOS
+struct signature {
+ char *signature;
+ int sig_offset;
+ int sig_length;
+} signatures[] __initdata = {
+ /* 1 2 3 4 5 6 */
+ /* 123456789012345678901234567890123456789012345678901234567890 */
+ {
+"Copyright (C) Acculogic, Inc.\r\n2.8M Diskette Extension Bios ver 4.04.03 03/01/1993", 61, 82},};
+
+#define SIGNATURE_COUNT ARRAY_SIZE(signatures)
+#endif /* USE_BIOS */
+
+/* ============================================================ */
+
+/* Control Register Set 0 */
+static int TC_LSB; /* transfer counter lsb */
+static int TC_MSB; /* transfer counter msb */
+static int SCSI_FIFO; /* scsi fifo register */
+static int CMD_REG; /* command register */
+static int STAT_REG; /* status register */
+static int DEST_ID; /* selection/reselection bus id */
+static int INT_REG; /* interrupt status register */
+static int SRTIMOUT; /* select/reselect timeout reg */
+static int SEQ_REG; /* sequence step register */
+static int SYNCPRD; /* synchronous transfer period */
+static int FIFO_FLAGS; /* indicates # of bytes in fifo */
+static int SYNCOFF; /* synchronous offset register */
+static int CONFIG1; /* configuration register */
+static int CLKCONV; /* clock conversion reg */
+ /*static int TESTREG;*//* test mode register */
+static int CONFIG2; /* Configuration 2 Register */
+static int CONFIG3; /* Configuration 3 Register */
+static int CONFIG4; /* Configuration 4 Register */
+static int TC_HIGH; /* Transfer Counter High */
+ /*static int FIFO_BOTTOM;*//* Reserve FIFO byte register */
+
+/* Control Register Set 1 */
+ /*static int JUMPER_SENSE;*//* Jumper sense port reg (r/w) */
+ /*static int SRAM_PTR;*//* SRAM address pointer reg (r/w) */
+ /*static int SRAM_DATA;*//* SRAM data register (r/w) */
+static int PIO_FIFO; /* PIO FIFO registers (r/w) */
+ /*static int PIO_FIFO1;*//* */
+ /*static int PIO_FIFO2;*//* */
+ /*static int PIO_FIFO3;*//* */
+static int PIO_STATUS; /* PIO status (r/w) */
+ /*static int ATA_CMD;*//* ATA command/status reg (r/w) */
+ /*static int ATA_ERR;*//* ATA features/error register (r/w) */
+static int PIO_FLAG; /* PIO flag interrupt enable (r/w) */
+static int CONFIG5; /* Configuration 5 register (r/w) */
+ /*static int SIGNATURE;*//* Signature Register (r) */
+ /*static int CONFIG6;*//* Configuration 6 register (r) */
+
+/* ============================================================== */
+
+#if USE_DMA
+static __inline__ int NCR53c406a_dma_setup(unsigned char *ptr, unsigned int count, unsigned char mode)
+{
+ unsigned limit;
+ unsigned long flags = 0;
+
+ VDEB(printk("dma: before count=%d ", count));
+ if (dma_chan <= 3) {
+ if (count > 65536)
+ count = 65536;
+ limit = 65536 - (((unsigned) ptr) & 0xFFFF);
+ } else {
+ if (count > (65536 << 1))
+ count = (65536 << 1);
+ limit = (65536 << 1) - (((unsigned) ptr) & 0x1FFFF);
+ }
+
+ if (count > limit)
+ count = limit;
+
+ VDEB(printk("after count=%d\n", count));
+ if ((count & 1) || (((unsigned) ptr) & 1))
+ panic("NCR53c406a: attempted unaligned DMA transfer\n");
+
+ flags = claim_dma_lock();
+ disable_dma(dma_chan);
+ clear_dma_ff(dma_chan);
+ set_dma_addr(dma_chan, (long) ptr);
+ set_dma_count(dma_chan, count);
+ set_dma_mode(dma_chan, mode);
+ enable_dma(dma_chan);
+ release_dma_lock(flags);
+
+ return count;
+}
+
+static __inline__ int NCR53c406a_dma_write(unsigned char *src, unsigned int count)
+{
+ return NCR53c406a_dma_setup(src, count, DMA_MODE_WRITE);
+}
+
+static __inline__ int NCR53c406a_dma_read(unsigned char *src, unsigned int count)
+{
+ return NCR53c406a_dma_setup(src, count, DMA_MODE_READ);
+}
+
+static __inline__ int NCR53c406a_dma_residual(void)
+{
+ register int tmp;
+ unsigned long flags;
+
+ flags = claim_dma_lock();
+ clear_dma_ff(dma_chan);
+ tmp = get_dma_residue(dma_chan);
+ release_dma_lock(flags);
+
+ return tmp;
+}
+#endif /* USE_DMA */
+
+#if USE_PIO
+static __inline__ int NCR53c406a_pio_read(unsigned char *request, unsigned int reqlen)
+{
+ int i;
+ int len; /* current scsi fifo size */
+
+ REG1;
+ while (reqlen) {
+ i = inb(PIO_STATUS);
+ /* VDEB(printk("pio_status=%x\n", i)); */
+ if (i & 0x80)
+ return 0;
+
+ switch (i & 0x1e) {
+ default:
+ case 0x10:
+ len = 0;
+ break;
+ case 0x0:
+ len = 1;
+ break;
+ case 0x8:
+ len = 42;
+ break;
+ case 0xc:
+ len = 84;
+ break;
+ case 0xe:
+ len = 128;
+ break;
+ }
+
+ if ((i & 0x40) && len == 0) { /* fifo empty and interrupt occurred */
+ return 0;
+ }
+
+ if (len) {
+ if (len > reqlen)
+ len = reqlen;
+
+ if (fast_pio && len > 3) {
+ insl(PIO_FIFO, request, len >> 2);
+ request += len & 0xfc;
+ reqlen -= len & 0xfc;
+ } else {
+ while (len--) {
+ *request++ = inb(PIO_FIFO);
+ reqlen--;
+ }
+ }
+ }
+ }
+ return 0;
+}
+
+static __inline__ int NCR53c406a_pio_write(unsigned char *request, unsigned int reqlen)
+{
+ int i = 0;
+ int len; /* current scsi fifo size */
+
+ REG1;
+ while (reqlen && !(i & 0x40)) {
+ i = inb(PIO_STATUS);
+ /* VDEB(printk("pio_status=%x\n", i)); */
+ if (i & 0x80) /* error */
+ return 0;
+
+ switch (i & 0x1e) {
+ case 0x10:
+ len = 128;
+ break;
+ case 0x0:
+ len = 84;
+ break;
+ case 0x8:
+ len = 42;
+ break;
+ case 0xc:
+ len = 1;
+ break;
+ default:
+ case 0xe:
+ len = 0;
+ break;
+ }
+
+ if (len) {
+ if (len > reqlen)
+ len = reqlen;
+
+ if (fast_pio && len > 3) {
+ outsl(PIO_FIFO, request, len >> 2);
+ request += len & 0xfc;
+ reqlen -= len & 0xfc;
+ } else {
+ while (len--) {
+ outb(*request++, PIO_FIFO);
+ reqlen--;
+ }
+ }
+ }
+ }
+ return 0;
+}
+#endif /* USE_PIO */
+
+static int __init NCR53c406a_detect(struct scsi_host_template * tpnt)
+{
+ int present = 0;
+ struct Scsi_Host *shpnt = NULL;
+#ifndef PORT_BASE
+ int i;
+#endif
+
+#if USE_BIOS
+ int ii, jj;
+ bios_base = 0;
+ /* look for a valid signature */
+ for (ii = 0; ii < ADDRESS_COUNT && !bios_base; ii++)
+ for (jj = 0; (jj < SIGNATURE_COUNT) && !bios_base; jj++)
+ if (!memcmp((void *) addresses[ii] + signatures[jj].sig_offset, (void *) signatures[jj].signature, (int) signatures[jj].sig_length))
+ bios_base = addresses[ii];
+
+ if (!bios_base) {
+ printk("NCR53c406a: BIOS signature not found\n");
+ return 0;
+ }
+
+ DEB(printk("NCR53c406a BIOS found at 0x%x\n", (unsigned int) bios_base);
+ );
+#endif /* USE_BIOS */
+
+#ifdef PORT_BASE
+ if (!request_region(port_base, 0x10, "NCR53c406a")) /* ports already snatched */
+ port_base = 0;
+
+#else /* autodetect */
+ if (port_base) { /* LILO override */
+ if (!request_region(port_base, 0x10, "NCR53c406a"))
+ port_base = 0;
+ } else {
+ for (i = 0; i < PORT_COUNT && !port_base; i++) {
+ if (!request_region(ports[i], 0x10, "NCR53c406a")) {
+ DEB(printk("NCR53c406a: port 0x%x in use\n", ports[i]));
+ } else {
+ VDEB(printk("NCR53c406a: port 0x%x available\n", ports[i]));
+ outb(C5_IMG, ports[i] + 0x0d); /* reg set 1 */
+ if ((inb(ports[i] + 0x0e) ^ inb(ports[i] + 0x0e)) == 7 && (inb(ports[i] + 0x0e) ^ inb(ports[i] + 0x0e)) == 7 && (inb(ports[i] + 0x0e) & 0xf8) == 0x58) {
+ port_base = ports[i];
+ VDEB(printk("NCR53c406a: Sig register valid\n"));
+ VDEB(printk("port_base=0x%x\n", port_base));
+ break;
+ }
+ release_region(ports[i], 0x10);
+ }
+ }
+ }
+#endif /* PORT_BASE */
+
+ if (!port_base) { /* no ports found */
+ printk("NCR53c406a: no available ports found\n");
+ return 0;
+ }
+
+ DEB(printk("NCR53c406a detected\n"));
+
+ calc_port_addr();
+ chip_init();
+
+#ifndef IRQ_LEV
+ if (irq_level < 0) { /* LILO override if >= 0 */
+ irq_level = irq_probe();
+ if (irq_level < 0) { /* Trouble */
+ printk("NCR53c406a: IRQ problem, irq_level=%d, giving up\n", irq_level);
+ goto err_release;
+ }
+ }
+#endif
+
+ DEB(printk("NCR53c406a: using port_base 0x%x\n", port_base));
+
+ present = 1;
+ tpnt->proc_name = "NCR53c406a";
+
+ shpnt = scsi_register(tpnt, 0);
+ if (!shpnt) {
+ printk("NCR53c406a: Unable to register host, giving up.\n");
+ goto err_release;
+ }
+
+ if (irq_level > 0) {
+ if (request_irq(irq_level, do_NCR53c406a_intr, 0, "NCR53c406a", shpnt)) {
+ printk("NCR53c406a: unable to allocate IRQ %d\n", irq_level);
+ goto err_free_scsi;
+ }
+ tpnt->can_queue = 1;
+ DEB(printk("NCR53c406a: allocated IRQ %d\n", irq_level));
+ } else if (irq_level == 0) {
+ tpnt->can_queue = 0;
+ DEB(printk("NCR53c406a: No interrupts detected\n"));
+ printk("NCR53c406a driver no longer supports polling interface\n");
+ printk("Please email linux-scsi@vger.kernel.org\n");
+
+#if USE_DMA
+ printk("NCR53c406a: No interrupts found and DMA mode defined. Giving up.\n");
+#endif /* USE_DMA */
+ goto err_free_scsi;
+ } else {
+ DEB(printk("NCR53c406a: Shouldn't get here!\n"));
+ goto err_free_scsi;
+ }
+
+#if USE_DMA
+ dma_chan = DMA_CHAN;
+ if (request_dma(dma_chan, "NCR53c406a") != 0) {
+ printk("NCR53c406a: unable to allocate DMA channel %d\n", dma_chan);
+ goto err_free_irq;
+ }
+
+ DEB(printk("Allocated DMA channel %d\n", dma_chan));
+#endif /* USE_DMA */
+
+ shpnt->irq = irq_level;
+ shpnt->io_port = port_base;
+ shpnt->n_io_port = 0x10;
+#if USE_DMA
+ shpnt->dma = dma_chan;
+#endif
+
+#if USE_DMA
+ sprintf(info_msg, "NCR53c406a at 0x%x, IRQ %d, DMA channel %d.", port_base, irq_level, dma_chan);
+#else
+ sprintf(info_msg, "NCR53c406a at 0x%x, IRQ %d, %s PIO mode.", port_base, irq_level, fast_pio ? "fast" : "slow");
+#endif
+
+ return (present);
+
+#if USE_DMA
+ err_free_irq:
+ if (irq_level)
+ free_irq(irq_level, shpnt);
+#endif
+ err_free_scsi:
+ scsi_unregister(shpnt);
+ err_release:
+ release_region(port_base, 0x10);
+ return 0;
+}
+
+static int NCR53c406a_release(struct Scsi_Host *shost)
+{
+ if (shost->irq)
+ free_irq(shost->irq, NULL);
+#if USE_DMA
+ if (shost->dma_channel != 0xff)
+ free_dma(shost->dma_channel);
+#endif
+ if (shost->io_port && shost->n_io_port)
+ release_region(shost->io_port, shost->n_io_port);
+
+ scsi_unregister(shost);
+ return 0;
+}
+
+#ifndef MODULE
+/* called from init/main.c */
+static int __init NCR53c406a_setup(char *str)
+{
+ static size_t setup_idx = 0;
+ size_t i;
+ int ints[4];
+
+ DEB(printk("NCR53c406a: Setup called\n");
+ );
+
+ if (setup_idx >= PORT_COUNT - 1) {
+ printk("NCR53c406a: Setup called too many times. Bad LILO params?\n");
+ return 0;
+ }
+ get_options(str, 4, ints);
+ if (ints[0] < 1 || ints[0] > 3) {
+ printk("NCR53c406a: Malformed command line\n");
+ printk("NCR53c406a: Usage: ncr53c406a=<PORTBASE>[,<IRQ>[,<FASTPIO>]]\n");
+ return 0;
+ }
+ for (i = 0; i < PORT_COUNT && !port_base; i++)
+ if (ports[i] == ints[1]) {
+ port_base = ints[1];
+ DEB(printk("NCR53c406a: Specified port_base 0x%x\n", port_base);
+ )
+ }
+ if (!port_base) {
+ printk("NCR53c406a: Invalid PORTBASE 0x%x specified\n", ints[1]);
+ return 0;
+ }
+
+ if (ints[0] > 1) {
+ if (ints[2] == 0) {
+ irq_level = 0;
+ DEB(printk("NCR53c406a: Specified irq %d\n", irq_level);
+ )
+ } else
+ for (i = 0; i < INTR_COUNT && irq_level < 0; i++)
+ if (intrs[i] == ints[2]) {
+ irq_level = ints[2];
+ DEB(printk("NCR53c406a: Specified irq %d\n", port_base);
+ )
+ }
+ if (irq_level < 0)
+ printk("NCR53c406a: Invalid IRQ %d specified\n", ints[2]);
+ }
+
+ if (ints[0] > 2)
+ fast_pio = ints[3];
+
+ DEB(printk("NCR53c406a: port_base=0x%x, irq=%d, fast_pio=%d\n", port_base, irq_level, fast_pio);)
+ return 1;
+}
+
+__setup("ncr53c406a=", NCR53c406a_setup);
+
+#endif /* !MODULE */
+
+static const char *NCR53c406a_info(struct Scsi_Host *SChost)
+{
+ DEB(printk("NCR53c406a_info called\n"));
+ return (info_msg);
+}
+
+#if 0
+static void wait_intr(void)
+{
+ unsigned long i = jiffies + WATCHDOG;
+
+ while (time_after(i, jiffies) && !(inb(STAT_REG) & 0xe0)) { /* wait for a pseudo-interrupt */
+ cpu_relax();
+ barrier();
+ }
+
+ if (time_before_eq(i, jiffies)) { /* Timed out */
+ rtrc(0);
+ current_SC->result = DID_TIME_OUT << 16;
+ current_SC->SCp.phase = idle;
+ current_SC->scsi_done(current_SC);
+ return;
+ }
+
+ NCR53c406a_intr(NULL);
+}
+#endif
+
+static int NCR53c406a_queue_lck(Scsi_Cmnd * SCpnt, void (*done) (Scsi_Cmnd *))
+{
+ int i;
+
+ VDEB(printk("NCR53c406a_queue called\n"));
+ DEB(printk("cmd=%02x, cmd_len=%02x, target=%02x, lun=%02x, bufflen=%d\n", SCpnt->cmnd[0], SCpnt->cmd_len, SCpnt->device->target, (u8)SCpnt->device->lun, scsi_bufflen(SCpnt)));
+
+#if 0
+ VDEB(for (i = 0; i < SCpnt->cmd_len; i++)
+ printk("cmd[%d]=%02x ", i, SCpnt->cmnd[i]));
+ VDEB(printk("\n"));
+#endif
+
+ current_SC = SCpnt;
+ current_SC->scsi_done = done;
+ current_SC->SCp.phase = command_ph;
+ current_SC->SCp.Status = 0;
+ current_SC->SCp.Message = 0;
+
+ /* We are locked here already by the mid layer */
+ REG0;
+ outb(scmd_id(SCpnt), DEST_ID); /* set destination */
+ outb(FLUSH_FIFO, CMD_REG); /* reset the fifos */
+
+ for (i = 0; i < SCpnt->cmd_len; i++) {
+ outb(SCpnt->cmnd[i], SCSI_FIFO);
+ }
+ outb(SELECT_NO_ATN, CMD_REG);
+
+ rtrc(1);
+ return 0;
+}
+
+static DEF_SCSI_QCMD(NCR53c406a_queue)
+
+static int NCR53c406a_host_reset(Scsi_Cmnd * SCpnt)
+{
+ DEB(printk("NCR53c406a_reset called\n"));
+
+ spin_lock_irq(SCpnt->device->host->host_lock);
+
+ outb(C4_IMG, CONFIG4); /* Select reg set 0 */
+ outb(CHIP_RESET, CMD_REG);
+ outb(SCSI_NOP, CMD_REG); /* required after reset */
+ outb(SCSI_RESET, CMD_REG);
+ chip_init();
+
+ rtrc(2);
+
+ spin_unlock_irq(SCpnt->device->host->host_lock);
+
+ return SUCCESS;
+}
+
+static int NCR53c406a_biosparm(struct scsi_device *disk,
+ struct block_device *dev,
+ sector_t capacity, int *info_array)
+{
+ int size;
+
+ DEB(printk("NCR53c406a_biosparm called\n"));
+
+ size = capacity;
+ info_array[0] = 64; /* heads */
+ info_array[1] = 32; /* sectors */
+ info_array[2] = size >> 11; /* cylinders */
+ if (info_array[2] > 1024) { /* big disk */
+ info_array[0] = 255;
+ info_array[1] = 63;
+ info_array[2] = size / (255 * 63);
+ }
+ return 0;
+}
+
+static irqreturn_t do_NCR53c406a_intr(int unused, void *dev_id)
+{
+ unsigned long flags;
+ struct Scsi_Host *dev = dev_id;
+
+ spin_lock_irqsave(dev->host_lock, flags);
+ NCR53c406a_intr(dev_id);
+ spin_unlock_irqrestore(dev->host_lock, flags);
+ return IRQ_HANDLED;
+}
+
+static void NCR53c406a_intr(void *dev_id)
+{
+ DEB(unsigned char fifo_size;
+ )
+ DEB(unsigned char seq_reg;
+ )
+ unsigned char status, int_reg;
+#if USE_PIO
+ unsigned char pio_status;
+ struct scatterlist *sg;
+ int i;
+#endif
+
+ VDEB(printk("NCR53c406a_intr called\n"));
+
+#if USE_PIO
+ REG1;
+ pio_status = inb(PIO_STATUS);
+#endif
+ REG0;
+ status = inb(STAT_REG);
+ DEB(seq_reg = inb(SEQ_REG));
+ int_reg = inb(INT_REG);
+ DEB(fifo_size = inb(FIFO_FLAGS) & 0x1f);
+
+#if NCR53C406A_DEBUG
+ printk("status=%02x, seq_reg=%02x, int_reg=%02x, fifo_size=%02x", status, seq_reg, int_reg, fifo_size);
+#if (USE_DMA)
+ printk("\n");
+#else
+ printk(", pio=%02x\n", pio_status);
+#endif /* USE_DMA */
+#endif /* NCR53C406A_DEBUG */
+
+ if (int_reg & 0x80) { /* SCSI reset intr */
+ rtrc(3);
+ DEB(printk("NCR53c406a: reset intr received\n"));
+ current_SC->SCp.phase = idle;
+ current_SC->result = DID_RESET << 16;
+ current_SC->scsi_done(current_SC);
+ return;
+ }
+#if USE_PIO
+ if (pio_status & 0x80) {
+ printk("NCR53C406A: Warning: PIO error!\n");
+ current_SC->SCp.phase = idle;
+ current_SC->result = DID_ERROR << 16;
+ current_SC->scsi_done(current_SC);
+ return;
+ }
+#endif /* USE_PIO */
+
+ if (status & 0x20) { /* Parity error */
+ printk("NCR53c406a: Warning: parity error!\n");
+ current_SC->SCp.phase = idle;
+ current_SC->result = DID_PARITY << 16;
+ current_SC->scsi_done(current_SC);
+ return;
+ }
+
+ if (status & 0x40) { /* Gross error */
+ printk("NCR53c406a: Warning: gross error!\n");
+ current_SC->SCp.phase = idle;
+ current_SC->result = DID_ERROR << 16;
+ current_SC->scsi_done(current_SC);
+ return;
+ }
+
+ if (int_reg & 0x20) { /* Disconnect */
+ DEB(printk("NCR53c406a: disconnect intr received\n"));
+ if (current_SC->SCp.phase != message_in) { /* Unexpected disconnect */
+ current_SC->result = DID_NO_CONNECT << 16;
+ } else { /* Command complete, return status and message */
+ current_SC->result = (current_SC->SCp.Status & 0xff)
+ | ((current_SC->SCp.Message & 0xff) << 8) | (DID_OK << 16);
+ }
+
+ rtrc(0);
+ current_SC->SCp.phase = idle;
+ current_SC->scsi_done(current_SC);
+ return;
+ }
+
+ switch (status & 0x07) { /* scsi phase */
+ case 0x00: /* DATA-OUT */
+ if (int_reg & 0x10) { /* Target requesting info transfer */
+ rtrc(5);
+ current_SC->SCp.phase = data_out;
+ VDEB(printk("NCR53c406a: Data-Out phase\n"));
+ outb(FLUSH_FIFO, CMD_REG);
+ LOAD_DMA_COUNT(scsi_bufflen(current_SC)); /* Max transfer size */
+#if USE_DMA /* No s/g support for DMA */
+ NCR53c406a_dma_write(scsi_sglist(current_SC),
+ scsdi_bufflen(current_SC));
+
+#endif /* USE_DMA */
+ outb(TRANSFER_INFO | DMA_OP, CMD_REG);
+#if USE_PIO
+ scsi_for_each_sg(current_SC, sg, scsi_sg_count(current_SC), i) {
+ NCR53c406a_pio_write(sg_virt(sg), sg->length);
+ }
+ REG0;
+#endif /* USE_PIO */
+ }
+ break;
+
+ case 0x01: /* DATA-IN */
+ if (int_reg & 0x10) { /* Target requesting info transfer */
+ rtrc(6);
+ current_SC->SCp.phase = data_in;
+ VDEB(printk("NCR53c406a: Data-In phase\n"));
+ outb(FLUSH_FIFO, CMD_REG);
+ LOAD_DMA_COUNT(scsi_bufflen(current_SC)); /* Max transfer size */
+#if USE_DMA /* No s/g support for DMA */
+ NCR53c406a_dma_read(scsi_sglist(current_SC),
+ scsdi_bufflen(current_SC));
+#endif /* USE_DMA */
+ outb(TRANSFER_INFO | DMA_OP, CMD_REG);
+#if USE_PIO
+ scsi_for_each_sg(current_SC, sg, scsi_sg_count(current_SC), i) {
+ NCR53c406a_pio_read(sg_virt(sg), sg->length);
+ }
+ REG0;
+#endif /* USE_PIO */
+ }
+ break;
+
+ case 0x02: /* COMMAND */
+ current_SC->SCp.phase = command_ph;
+ printk("NCR53c406a: Warning: Unknown interrupt occurred in command phase!\n");
+ break;
+
+ case 0x03: /* STATUS */
+ rtrc(7);
+ current_SC->SCp.phase = status_ph;
+ VDEB(printk("NCR53c406a: Status phase\n"));
+ outb(FLUSH_FIFO, CMD_REG);
+ outb(INIT_CMD_COMPLETE, CMD_REG);
+ break;
+
+ case 0x04: /* Reserved */
+ case 0x05: /* Reserved */
+ printk("NCR53c406a: WARNING: Reserved phase!!!\n");
+ break;
+
+ case 0x06: /* MESSAGE-OUT */
+ DEB(printk("NCR53c406a: Message-Out phase\n"));
+ current_SC->SCp.phase = message_out;
+ outb(SET_ATN, CMD_REG); /* Reject the message */
+ outb(MSG_ACCEPT, CMD_REG);
+ break;
+
+ case 0x07: /* MESSAGE-IN */
+ rtrc(4);
+ VDEB(printk("NCR53c406a: Message-In phase\n"));
+ current_SC->SCp.phase = message_in;
+
+ current_SC->SCp.Status = inb(SCSI_FIFO);
+ current_SC->SCp.Message = inb(SCSI_FIFO);
+
+ VDEB(printk("SCSI FIFO size=%d\n", inb(FIFO_FLAGS) & 0x1f));
+ DEB(printk("Status = %02x Message = %02x\n", current_SC->SCp.Status, current_SC->SCp.Message));
+
+ if (current_SC->SCp.Message == SAVE_POINTERS || current_SC->SCp.Message == DISCONNECT) {
+ outb(SET_ATN, CMD_REG); /* Reject message */
+ DEB(printk("Discarding SAVE_POINTERS message\n"));
+ }
+ outb(MSG_ACCEPT, CMD_REG);
+ break;
+ }
+}
+
+#ifndef IRQ_LEV
+static int irq_probe(void)
+{
+ int irqs, irq;
+ unsigned long i;
+
+ inb(INT_REG); /* clear the interrupt register */
+ irqs = probe_irq_on();
+
+ /* Invalid command will cause an interrupt */
+ REG0;
+ outb(0xff, CMD_REG);
+
+ /* Wait for the interrupt to occur */
+ i = jiffies + WATCHDOG;
+ while (time_after(i, jiffies) && !(inb(STAT_REG) & 0x80))
+ barrier();
+ if (time_before_eq(i, jiffies)) { /* Timed out, must be hardware trouble */
+ probe_irq_off(irqs);
+ return -1;
+ }
+
+ irq = probe_irq_off(irqs);
+
+ /* Kick the chip */
+ outb(CHIP_RESET, CMD_REG);
+ outb(SCSI_NOP, CMD_REG);
+ chip_init();
+
+ return irq;
+}
+#endif /* IRQ_LEV */
+
+static void chip_init(void)
+{
+ REG1;
+#if USE_DMA
+ outb(0x00, PIO_STATUS);
+#else /* USE_PIO */
+ outb(0x01, PIO_STATUS);
+#endif
+ outb(0x00, PIO_FLAG);
+
+ outb(C4_IMG, CONFIG4); /* REG0; */
+ outb(C3_IMG, CONFIG3);
+ outb(C2_IMG, CONFIG2);
+ outb(C1_IMG, CONFIG1);
+
+ outb(0x05, CLKCONV); /* clock conversion factor */
+ outb(0x9C, SRTIMOUT); /* Selection timeout */
+ outb(0x05, SYNCPRD); /* Synchronous transfer period */
+ outb(SYNC_MODE, SYNCOFF); /* synchronous mode */
+}
+
+static void __init calc_port_addr(void)
+{
+ /* Control Register Set 0 */
+ TC_LSB = (port_base + 0x00);
+ TC_MSB = (port_base + 0x01);
+ SCSI_FIFO = (port_base + 0x02);
+ CMD_REG = (port_base + 0x03);
+ STAT_REG = (port_base + 0x04);
+ DEST_ID = (port_base + 0x04);
+ INT_REG = (port_base + 0x05);
+ SRTIMOUT = (port_base + 0x05);
+ SEQ_REG = (port_base + 0x06);
+ SYNCPRD = (port_base + 0x06);
+ FIFO_FLAGS = (port_base + 0x07);
+ SYNCOFF = (port_base + 0x07);
+ CONFIG1 = (port_base + 0x08);
+ CLKCONV = (port_base + 0x09);
+ /* TESTREG = (port_base+0x0A); */
+ CONFIG2 = (port_base + 0x0B);
+ CONFIG3 = (port_base + 0x0C);
+ CONFIG4 = (port_base + 0x0D);
+ TC_HIGH = (port_base + 0x0E);
+ /* FIFO_BOTTOM = (port_base+0x0F); */
+
+ /* Control Register Set 1 */
+ /* JUMPER_SENSE = (port_base+0x00); */
+ /* SRAM_PTR = (port_base+0x01); */
+ /* SRAM_DATA = (port_base+0x02); */
+ PIO_FIFO = (port_base + 0x04);
+ /* PIO_FIFO1 = (port_base+0x05); */
+ /* PIO_FIFO2 = (port_base+0x06); */
+ /* PIO_FIFO3 = (port_base+0x07); */
+ PIO_STATUS = (port_base + 0x08);
+ /* ATA_CMD = (port_base+0x09); */
+ /* ATA_ERR = (port_base+0x0A); */
+ PIO_FLAG = (port_base + 0x0B);
+ CONFIG5 = (port_base + 0x0D);
+ /* SIGNATURE = (port_base+0x0E); */
+ /* CONFIG6 = (port_base+0x0F); */
+}
+
+MODULE_LICENSE("GPL");
+
+/* NOTE: scatter-gather support only works in PIO mode.
+ * Use SG_NONE if DMA mode is enabled!
+ */
+
+static struct scsi_host_template driver_template =
+{
+ .proc_name = "NCR53c406a" /* proc_name */,
+ .name = "NCR53c406a" /* name */,
+ .detect = NCR53c406a_detect /* detect */,
+ .release = NCR53c406a_release,
+ .info = NCR53c406a_info /* info */,
+ .queuecommand = NCR53c406a_queue /* queuecommand */,
+ .eh_host_reset_handler = NCR53c406a_host_reset /* reset */,
+ .bios_param = NCR53c406a_biosparm /* biosparm */,
+ .can_queue = 1 /* can_queue */,
+ .this_id = 7 /* SCSI ID of the chip */,
+ .sg_tablesize = 32 /*SG_ALL*/ /*SG_NONE*/,
+ .cmd_per_lun = 1 /* commands per lun */,
+ .unchecked_isa_dma = 1 /* unchecked_isa_dma */,
+ .use_clustering = ENABLE_CLUSTERING,
+};
+
+#include "scsi_module.c"
+
+/*
+ * Overrides for Emacs so that we get a uniform tabbing style.
+ * Emacs will notice this stuff at the end of the file and automatically
+ * adjust the settings for this buffer only. This must remain at the end
+ * of the file.
+ * ---------------------------------------------------------------------------
+ * Local variables:
+ * c-indent-level: 4
+ * c-brace-imaginary-offset: 0
+ * c-brace-offset: -4
+ * c-argdecl-indent: 4
+ * c-label-offset: -4
+ * c-continued-statement-offset: 4
+ * c-continued-brace-offset: 0
+ * indent-tabs-mode: nil
+ * tab-width: 8
+ * End:
+ */
diff --git a/drivers/scsi/NCR_D700.c b/drivers/scsi/NCR_D700.c
new file mode 100644
index 000000000..b39a2409a
--- /dev/null
+++ b/drivers/scsi/NCR_D700.c
@@ -0,0 +1,405 @@
+/* -*- mode: c; c-basic-offset: 8 -*- */
+
+/* NCR Dual 700 MCA SCSI Driver
+ *
+ * Copyright (C) 2001 by James.Bottomley@HansenPartnership.com
+**-----------------------------------------------------------------------------
+**
+** This program is free software; you can redistribute it and/or modify
+** it under the terms of the GNU General Public License as published by
+** the Free Software Foundation; either version 2 of the License, or
+** (at your option) any later version.
+**
+** This program is distributed in the hope that it will be useful,
+** but WITHOUT ANY WARRANTY; without even the implied warranty of
+** MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+** GNU General Public License for more details.
+**
+** You should have received a copy of the GNU General Public License
+** along with this program; if not, write to the Free Software
+** Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+**
+**-----------------------------------------------------------------------------
+ */
+
+/* Notes:
+ *
+ * Most of the work is done in the chip specific module, 53c700.o
+ *
+ * TODO List:
+ *
+ * 1. Extract the SCSI ID from the voyager CMOS table (necessary to
+ * support multi-host environments.
+ *
+ * */
+
+
+/* CHANGELOG
+ *
+ * Version 2.2
+ *
+ * Added mca_set_adapter_name().
+ *
+ * Version 2.1
+ *
+ * Modularise the driver into a Board piece (this file) and a chip
+ * piece 53c700.[ch] and 53c700.scr, added module options. You can
+ * now specify the scsi id by the parameters
+ *
+ * NCR_D700=slot:<n> [siop:<n>] id:<n> ....
+ *
+ * They need to be comma separated if compiled into the kernel
+ *
+ * Version 2.0
+ *
+ * Initial implementation of TCQ (Tag Command Queueing). TCQ is full
+ * featured and uses the clock algorithm to keep track of outstanding
+ * tags and guard against individual tag starvation. Also fixed a bug
+ * in all of the 1.x versions where the D700_data_residue() function
+ * was returning results off by 32 bytes (and thus causing the same 32
+ * bytes to be written twice corrupting the data block). It turns out
+ * the 53c700 only has a 6 bit DBC and DFIFO registers not 7 bit ones
+ * like the 53c710 (The 710 is the only data manual still available,
+ * which I'd been using to program the 700).
+ *
+ * Version 1.2
+ *
+ * Much improved message handling engine
+ *
+ * Version 1.1
+ *
+ * Add code to handle selection reasonably correctly. By the time we
+ * get the selection interrupt, we've already responded, but drop off the
+ * bus and hope the selector will go away.
+ *
+ * Version 1.0:
+ *
+ * Initial release. Fully functional except for procfs and tag
+ * command queueing. Has only been tested on cards with 53c700-66
+ * chips and only single ended. Features are
+ *
+ * 1. Synchronous data transfers to offset 8 (limit of 700-66) and
+ * 100ns (10MHz) limit of SCSI-2
+ *
+ * 2. Disconnection and reselection
+ *
+ * Testing:
+ *
+ * I've only really tested this with the 700-66 chip, but have done
+ * soak tests in multi-device environments to verify that
+ * disconnections and reselections are being processed correctly.
+ * */
+
+#define NCR_D700_VERSION "2.2"
+
+#include <linux/blkdev.h>
+#include <linux/interrupt.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/mca.h>
+#include <linux/slab.h>
+#include <asm/io.h>
+#include <scsi/scsi_host.h>
+#include <scsi/scsi_device.h>
+#include <scsi/scsi_transport.h>
+#include <scsi/scsi_transport_spi.h>
+
+#include "53c700.h"
+#include "NCR_D700.h"
+
+static char *NCR_D700; /* command line from insmod */
+
+MODULE_AUTHOR("James Bottomley");
+MODULE_DESCRIPTION("NCR Dual700 SCSI Driver");
+MODULE_LICENSE("GPL");
+module_param(NCR_D700, charp, 0);
+
+static __u8 id_array[2*(MCA_MAX_SLOT_NR + 1)] =
+ { [0 ... 2*(MCA_MAX_SLOT_NR + 1)-1] = 7 };
+
+#ifdef MODULE
+#define ARG_SEP ' '
+#else
+#define ARG_SEP ','
+#endif
+
+static int __init
+param_setup(char *string)
+{
+ char *pos = string, *next;
+ int slot = -1, siop = -1;
+
+ while(pos != NULL && (next = strchr(pos, ':')) != NULL) {
+ int val = (int)simple_strtoul(++next, NULL, 0);
+
+ if(!strncmp(pos, "slot:", 5))
+ slot = val;
+ else if(!strncmp(pos, "siop:", 5))
+ siop = val;
+ else if(!strncmp(pos, "id:", 3)) {
+ if(slot == -1) {
+ printk(KERN_WARNING "NCR D700: Must specify slot for id parameter\n");
+ } else if(slot > MCA_MAX_SLOT_NR) {
+ printk(KERN_WARNING "NCR D700: Illegal slot %d for id %d\n", slot, val);
+ } else {
+ if(siop != 0 && siop != 1) {
+ id_array[slot*2] = val;
+ id_array[slot*2 + 1] =val;
+ } else {
+ id_array[slot*2 + siop] = val;
+ }
+ }
+ }
+ if((pos = strchr(pos, ARG_SEP)) != NULL)
+ pos++;
+ }
+ return 1;
+}
+
+/* Host template. The 53c700 routine NCR_700_detect will
+ * fill in all of the missing routines */
+static struct scsi_host_template NCR_D700_driver_template = {
+ .module = THIS_MODULE,
+ .name = "NCR Dual 700 MCA",
+ .proc_name = "NCR_D700",
+ .this_id = 7,
+};
+
+/* We needs this helper because we have two hosts per struct device */
+struct NCR_D700_private {
+ struct device *dev;
+ struct Scsi_Host *hosts[2];
+ char name[30];
+ char pad;
+};
+
+static int
+NCR_D700_probe_one(struct NCR_D700_private *p, int siop, int irq,
+ int slot, u32 region, int differential)
+{
+ struct NCR_700_Host_Parameters *hostdata;
+ struct Scsi_Host *host;
+ int ret;
+
+ hostdata = kzalloc(sizeof(*hostdata), GFP_KERNEL);
+ if (!hostdata) {
+ printk(KERN_ERR "NCR D700: SIOP%d: Failed to allocate host"
+ "data, detatching\n", siop);
+ return -ENOMEM;
+ }
+
+ if (!request_region(region, 64, "NCR_D700")) {
+ printk(KERN_ERR "NCR D700: Failed to reserve IO region 0x%x\n",
+ region);
+ ret = -ENODEV;
+ goto region_failed;
+ }
+
+ /* Fill in the three required pieces of hostdata */
+ hostdata->base = ioport_map(region, 64);
+ hostdata->differential = (((1<<siop) & differential) != 0);
+ hostdata->clock = NCR_D700_CLOCK_MHZ;
+ hostdata->burst_length = 8;
+
+ /* and register the siop */
+ host = NCR_700_detect(&NCR_D700_driver_template, hostdata, p->dev);
+ if (!host) {
+ ret = -ENOMEM;
+ goto detect_failed;
+ }
+
+ p->hosts[siop] = host;
+ /* FIXME: read this from SUS */
+ host->this_id = id_array[slot * 2 + siop];
+ host->irq = irq;
+ host->base = region;
+ scsi_scan_host(host);
+
+ return 0;
+
+ detect_failed:
+ release_region(region, 64);
+ region_failed:
+ kfree(hostdata);
+
+ return ret;
+}
+
+static irqreturn_t
+NCR_D700_intr(int irq, void *data)
+{
+ struct NCR_D700_private *p = (struct NCR_D700_private *)data;
+ int i, found = 0;
+
+ for (i = 0; i < 2; i++)
+ if (p->hosts[i] &&
+ NCR_700_intr(irq, p->hosts[i]) == IRQ_HANDLED)
+ found++;
+
+ return found ? IRQ_HANDLED : IRQ_NONE;
+}
+
+/* Detect a D700 card. Note, because of the setup --- the chips are
+ * essentially connectecd to the MCA bus independently, it is easier
+ * to set them up as two separate host adapters, rather than one
+ * adapter with two channels */
+static int
+NCR_D700_probe(struct device *dev)
+{
+ struct NCR_D700_private *p;
+ int differential;
+ static int banner = 1;
+ struct mca_device *mca_dev = to_mca_device(dev);
+ int slot = mca_dev->slot;
+ int found = 0;
+ int irq, i;
+ int pos3j, pos3k, pos3a, pos3b, pos4;
+ __u32 base_addr, offset_addr;
+
+ /* enable board interrupt */
+ pos4 = mca_device_read_pos(mca_dev, 4);
+ pos4 |= 0x4;
+ mca_device_write_pos(mca_dev, 4, pos4);
+
+ mca_device_write_pos(mca_dev, 6, 9);
+ pos3j = mca_device_read_pos(mca_dev, 3);
+ mca_device_write_pos(mca_dev, 6, 10);
+ pos3k = mca_device_read_pos(mca_dev, 3);
+ mca_device_write_pos(mca_dev, 6, 0);
+ pos3a = mca_device_read_pos(mca_dev, 3);
+ mca_device_write_pos(mca_dev, 6, 1);
+ pos3b = mca_device_read_pos(mca_dev, 3);
+
+ base_addr = ((pos3j << 8) | pos3k) & 0xfffffff0;
+ offset_addr = ((pos3a << 8) | pos3b) & 0xffffff70;
+
+ irq = (pos4 & 0x3) + 11;
+ if(irq >= 13)
+ irq++;
+ if(banner) {
+ printk(KERN_NOTICE "NCR D700: Driver Version " NCR_D700_VERSION "\n"
+ "NCR D700: Copyright (c) 2001 by James.Bottomley@HansenPartnership.com\n"
+ "NCR D700:\n");
+ banner = 0;
+ }
+ /* now do the bus related transforms */
+ irq = mca_device_transform_irq(mca_dev, irq);
+ base_addr = mca_device_transform_ioport(mca_dev, base_addr);
+ offset_addr = mca_device_transform_ioport(mca_dev, offset_addr);
+
+ printk(KERN_NOTICE "NCR D700: found in slot %d irq = %d I/O base = 0x%x\n", slot, irq, offset_addr);
+
+ /*outb(BOARD_RESET, base_addr);*/
+
+ /* clear any pending interrupts */
+ (void)inb(base_addr + 0x08);
+ /* get modctl, used later for setting diff bits */
+ switch(differential = (inb(base_addr + 0x08) >> 6)) {
+ case 0x00:
+ /* only SIOP1 differential */
+ differential = 0x02;
+ break;
+ case 0x01:
+ /* Both SIOPs differential */
+ differential = 0x03;
+ break;
+ case 0x03:
+ /* No SIOPs differential */
+ differential = 0x00;
+ break;
+ default:
+ printk(KERN_ERR "D700: UNEXPECTED DIFFERENTIAL RESULT 0x%02x\n",
+ differential);
+ differential = 0x00;
+ break;
+ }
+
+ p = kzalloc(sizeof(*p), GFP_KERNEL);
+ if (!p)
+ return -ENOMEM;
+
+ p->dev = dev;
+ snprintf(p->name, sizeof(p->name), "D700(%s)", dev_name(dev));
+ if (request_irq(irq, NCR_D700_intr, IRQF_SHARED, p->name, p)) {
+ printk(KERN_ERR "D700: request_irq failed\n");
+ kfree(p);
+ return -EBUSY;
+ }
+ /* plumb in both 700 chips */
+ for (i = 0; i < 2; i++) {
+ int err;
+
+ if ((err = NCR_D700_probe_one(p, i, irq, slot,
+ offset_addr + (0x80 * i),
+ differential)) != 0)
+ printk("D700: SIOP%d: probe failed, error = %d\n",
+ i, err);
+ else
+ found++;
+ }
+
+ if (!found) {
+ kfree(p);
+ return -ENODEV;
+ }
+
+ mca_device_set_claim(mca_dev, 1);
+ mca_device_set_name(mca_dev, "NCR_D700");
+ dev_set_drvdata(dev, p);
+ return 0;
+}
+
+static void
+NCR_D700_remove_one(struct Scsi_Host *host)
+{
+ scsi_remove_host(host);
+ NCR_700_release(host);
+ kfree((struct NCR_700_Host_Parameters *)host->hostdata[0]);
+ free_irq(host->irq, host);
+ release_region(host->base, 64);
+}
+
+static int
+NCR_D700_remove(struct device *dev)
+{
+ struct NCR_D700_private *p = dev_get_drvdata(dev);
+ int i;
+
+ for (i = 0; i < 2; i++)
+ NCR_D700_remove_one(p->hosts[i]);
+
+ kfree(p);
+ return 0;
+}
+
+static short NCR_D700_id_table[] = { NCR_D700_MCA_ID, 0 };
+
+static struct mca_driver NCR_D700_driver = {
+ .id_table = NCR_D700_id_table,
+ .driver = {
+ .name = "NCR_D700",
+ .bus = &mca_bus_type,
+ .probe = NCR_D700_probe,
+ .remove = NCR_D700_remove,
+ },
+};
+
+static int __init NCR_D700_init(void)
+{
+#ifdef MODULE
+ if (NCR_D700)
+ param_setup(NCR_D700);
+#endif
+
+ return mca_register_driver(&NCR_D700_driver);
+}
+
+static void __exit NCR_D700_exit(void)
+{
+ mca_unregister_driver(&NCR_D700_driver);
+}
+
+module_init(NCR_D700_init);
+module_exit(NCR_D700_exit);
+
+__setup("NCR_D700=", param_setup);
diff --git a/drivers/scsi/NCR_D700.h b/drivers/scsi/NCR_D700.h
new file mode 100644
index 000000000..f167af6bd
--- /dev/null
+++ b/drivers/scsi/NCR_D700.h
@@ -0,0 +1,29 @@
+/* -*- mode: c; c-basic-offset: 8 -*- */
+
+/* NCR Dual 700 MCA SCSI Driver
+ *
+ * Copyright (C) 2001 by James.Bottomley@HansenPartnership.com
+ */
+
+#ifndef _NCR_D700_H
+#define _NCR_D700_H
+
+/* Don't turn on debugging messages */
+#undef NCR_D700_DEBUG
+
+/* The MCA identifier */
+#define NCR_D700_MCA_ID 0x0092
+
+/* Defines for the Board registers */
+#define BOARD_RESET 0x80 /* board level reset */
+#define ADD_PARENB 0x04 /* Address Parity Enabled */
+#define DAT_PARENB 0x01 /* Data Parity Enabled */
+#define SFBK_ENB 0x10 /* SFDBK Interrupt Enabled */
+#define LED0GREEN 0x20 /* Led 0 (red 0; green 1) */
+#define LED1GREEN 0x40 /* Led 1 (red 0; green 1) */
+#define LED0RED 0xDF /* Led 0 (red 0; green 1) */
+#define LED1RED 0xBF /* Led 1 (red 0; green 1) */
+
+#define NCR_D700_CLOCK_MHZ 50
+
+#endif
diff --git a/drivers/scsi/NCR_Q720.c b/drivers/scsi/NCR_Q720.c
new file mode 100644
index 000000000..05835bf1b
--- /dev/null
+++ b/drivers/scsi/NCR_Q720.c
@@ -0,0 +1,377 @@
+/* -*- mode: c; c-basic-offset: 8 -*- */
+
+/* NCR Quad 720 MCA SCSI Driver
+ *
+ * Copyright (C) 2003 by James.Bottomley@HansenPartnership.com
+ */
+
+#include <linux/blkdev.h>
+#include <linux/interrupt.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/mca.h>
+#include <linux/slab.h>
+#include <linux/types.h>
+#include <linux/init.h>
+#include <linux/delay.h>
+#include <asm/io.h>
+
+#include "scsi.h"
+#include <scsi/scsi_host.h>
+
+#include "ncr53c8xx.h"
+
+#include "NCR_Q720.h"
+
+static struct ncr_chip q720_chip __initdata = {
+ .revision_id = 0x0f,
+ .burst_max = 3,
+ .offset_max = 8,
+ .nr_divisor = 4,
+ .features = FE_WIDE | FE_DIFF | FE_VARCLK,
+};
+
+MODULE_AUTHOR("James Bottomley");
+MODULE_DESCRIPTION("NCR Quad 720 SCSI Driver");
+MODULE_LICENSE("GPL");
+
+#define NCR_Q720_VERSION "0.9"
+
+/* We needs this helper because we have up to four hosts per struct device */
+struct NCR_Q720_private {
+ struct device *dev;
+ void __iomem * mem_base;
+ __u32 phys_mem_base;
+ __u32 mem_size;
+ __u8 irq;
+ __u8 siops;
+ __u8 irq_enable;
+ struct Scsi_Host *hosts[4];
+};
+
+static struct scsi_host_template NCR_Q720_tpnt = {
+ .module = THIS_MODULE,
+ .proc_name = "NCR_Q720",
+};
+
+static irqreturn_t
+NCR_Q720_intr(int irq, void *data)
+{
+ struct NCR_Q720_private *p = (struct NCR_Q720_private *)data;
+ __u8 sir = (readb(p->mem_base + 0x0d) & 0xf0) >> 4;
+ __u8 siop;
+
+ sir |= ~p->irq_enable;
+
+ if(sir == 0xff)
+ return IRQ_NONE;
+
+
+ while((siop = ffz(sir)) < p->siops) {
+ sir |= 1<<siop;
+ ncr53c8xx_intr(irq, p->hosts[siop]);
+ }
+ return IRQ_HANDLED;
+}
+
+static int __init
+NCR_Q720_probe_one(struct NCR_Q720_private *p, int siop,
+ int irq, int slot, __u32 paddr, void __iomem *vaddr)
+{
+ struct ncr_device device;
+ __u8 scsi_id;
+ static int unit = 0;
+ __u8 scsr1 = readb(vaddr + NCR_Q720_SCSR_OFFSET + 1);
+ __u8 differential = readb(vaddr + NCR_Q720_SCSR_OFFSET) & 0x20;
+ __u8 version;
+ int error;
+
+ scsi_id = scsr1 >> 4;
+ /* enable burst length 16 (FIXME: should allow this) */
+ scsr1 |= 0x02;
+ /* force a siop reset */
+ scsr1 |= 0x04;
+ writeb(scsr1, vaddr + NCR_Q720_SCSR_OFFSET + 1);
+ udelay(10);
+ version = readb(vaddr + 0x18) >> 4;
+
+ memset(&device, 0, sizeof(struct ncr_device));
+ /* Initialise ncr_device structure with items required by ncr_attach. */
+ device.chip = q720_chip;
+ device.chip.revision_id = version;
+ device.host_id = scsi_id;
+ device.dev = p->dev;
+ device.slot.base = paddr;
+ device.slot.base_c = paddr;
+ device.slot.base_v = vaddr;
+ device.slot.irq = irq;
+ device.differential = differential ? 2 : 0;
+ printk("Q720 probe unit %d (siop%d) at 0x%lx, diff = %d, vers = %d\n", unit, siop,
+ (unsigned long)paddr, differential, version);
+
+ p->hosts[siop] = ncr_attach(&NCR_Q720_tpnt, unit++, &device);
+
+ if (!p->hosts[siop])
+ goto fail;
+
+ p->irq_enable |= (1<<siop);
+ scsr1 = readb(vaddr + NCR_Q720_SCSR_OFFSET + 1);
+ /* clear the disable interrupt bit */
+ scsr1 &= ~0x01;
+ writeb(scsr1, vaddr + NCR_Q720_SCSR_OFFSET + 1);
+
+ error = scsi_add_host(p->hosts[siop], p->dev);
+ if (error)
+ ncr53c8xx_release(p->hosts[siop]);
+ else
+ scsi_scan_host(p->hosts[siop]);
+ return error;
+
+ fail:
+ return -ENODEV;
+}
+
+/* Detect a Q720 card. Note, because of the setup --- the chips are
+ * essentially connectecd to the MCA bus independently, it is easier
+ * to set them up as two separate host adapters, rather than one
+ * adapter with two channels */
+static int __init
+NCR_Q720_probe(struct device *dev)
+{
+ struct NCR_Q720_private *p;
+ static int banner = 1;
+ struct mca_device *mca_dev = to_mca_device(dev);
+ int slot = mca_dev->slot;
+ int found = 0;
+ int irq, i, siops;
+ __u8 pos2, pos4, asr2, asr9, asr10;
+ __u16 io_base;
+ __u32 base_addr, mem_size;
+ void __iomem *mem_base;
+
+ p = kzalloc(sizeof(*p), GFP_KERNEL);
+ if (!p)
+ return -ENOMEM;
+
+ pos2 = mca_device_read_pos(mca_dev, 2);
+ /* enable device */
+ pos2 |= NCR_Q720_POS2_BOARD_ENABLE | NCR_Q720_POS2_INTERRUPT_ENABLE;
+ mca_device_write_pos(mca_dev, 2, pos2);
+
+ io_base = (pos2 & NCR_Q720_POS2_IO_MASK) << NCR_Q720_POS2_IO_SHIFT;
+
+
+ if(banner) {
+ printk(KERN_NOTICE "NCR Q720: Driver Version " NCR_Q720_VERSION "\n"
+ "NCR Q720: Copyright (c) 2003 by James.Bottomley@HansenPartnership.com\n"
+ "NCR Q720:\n");
+ banner = 0;
+ }
+ io_base = mca_device_transform_ioport(mca_dev, io_base);
+
+ /* OK, this is phase one of the bootstrap, we now know the
+ * I/O space base address. All the configuration registers
+ * are mapped here (including pos) */
+
+ /* sanity check I/O mapping */
+ i = inb(io_base) | (inb(io_base+1)<<8);
+ if(i != NCR_Q720_MCA_ID) {
+ printk(KERN_ERR "NCR_Q720, adapter failed to I/O map registers correctly at 0x%x(0x%x)\n", io_base, i);
+ kfree(p);
+ return -ENODEV;
+ }
+
+ /* Phase II, find the ram base and memory map the board register */
+ pos4 = inb(io_base + 4);
+ /* enable streaming data */
+ pos4 |= 0x01;
+ outb(pos4, io_base + 4);
+ base_addr = (pos4 & 0x7e) << 20;
+ base_addr += (pos4 & 0x80) << 23;
+ asr10 = inb(io_base + 0x12);
+ base_addr += (asr10 & 0x80) << 24;
+ base_addr += (asr10 & 0x70) << 23;
+
+ /* OK, got the base addr, now we need to find the ram size,
+ * enable and map it */
+ asr9 = inb(io_base + 0x11);
+ i = (asr9 & 0xc0) >> 6;
+ if(i == 0)
+ mem_size = 1024;
+ else
+ mem_size = 1 << (19 + i);
+
+ /* enable the sram mapping */
+ asr9 |= 0x20;
+
+ /* disable the rom mapping */
+ asr9 &= ~0x10;
+
+ outb(asr9, io_base + 0x11);
+
+ if(!request_mem_region(base_addr, mem_size, "NCR_Q720")) {
+ printk(KERN_ERR "NCR_Q720: Failed to claim memory region 0x%lx\n-0x%lx",
+ (unsigned long)base_addr,
+ (unsigned long)(base_addr + mem_size));
+ goto out_free;
+ }
+
+ if (dma_declare_coherent_memory(dev, base_addr, base_addr,
+ mem_size, DMA_MEMORY_MAP)
+ != DMA_MEMORY_MAP) {
+ printk(KERN_ERR "NCR_Q720: DMA declare memory failed\n");
+ goto out_release_region;
+ }
+
+ /* The first 1k of the memory buffer is a memory map of the registers
+ */
+ mem_base = dma_mark_declared_memory_occupied(dev, base_addr,
+ 1024);
+ if (IS_ERR(mem_base)) {
+ printk("NCR_Q720 failed to reserve memory mapped region\n");
+ goto out_release;
+ }
+
+ /* now also enable accesses in asr 2 */
+ asr2 = inb(io_base + 0x0a);
+
+ asr2 |= 0x01;
+
+ outb(asr2, io_base + 0x0a);
+
+ /* get the number of SIOPs (this should be 2 or 4) */
+ siops = ((asr2 & 0xe0) >> 5) + 1;
+
+ /* sanity check mapping (again) */
+ i = readw(mem_base);
+ if(i != NCR_Q720_MCA_ID) {
+ printk(KERN_ERR "NCR_Q720, adapter failed to memory map registers correctly at 0x%lx(0x%x)\n", (unsigned long)base_addr, i);
+ goto out_release;
+ }
+
+ irq = readb(mem_base + 5) & 0x0f;
+
+
+ /* now do the bus related transforms */
+ irq = mca_device_transform_irq(mca_dev, irq);
+
+ printk(KERN_NOTICE "NCR Q720: found in slot %d irq = %d mem base = 0x%lx siops = %d\n", slot, irq, (unsigned long)base_addr, siops);
+ printk(KERN_NOTICE "NCR Q720: On board ram %dk\n", mem_size/1024);
+
+ p->dev = dev;
+ p->mem_base = mem_base;
+ p->phys_mem_base = base_addr;
+ p->mem_size = mem_size;
+ p->irq = irq;
+ p->siops = siops;
+
+ if (request_irq(irq, NCR_Q720_intr, IRQF_SHARED, "NCR_Q720", p)) {
+ printk(KERN_ERR "NCR_Q720: request irq %d failed\n", irq);
+ goto out_release;
+ }
+ /* disable all the siop interrupts */
+ for(i = 0; i < siops; i++) {
+ void __iomem *reg_scsr1 = mem_base + NCR_Q720_CHIP_REGISTER_OFFSET
+ + i*NCR_Q720_SIOP_SHIFT + NCR_Q720_SCSR_OFFSET + 1;
+ __u8 scsr1 = readb(reg_scsr1);
+ scsr1 |= 0x01;
+ writeb(scsr1, reg_scsr1);
+ }
+
+ /* plumb in all 720 chips */
+ for (i = 0; i < siops; i++) {
+ void __iomem *siop_v_base = mem_base + NCR_Q720_CHIP_REGISTER_OFFSET
+ + i*NCR_Q720_SIOP_SHIFT;
+ __u32 siop_p_base = base_addr + NCR_Q720_CHIP_REGISTER_OFFSET
+ + i*NCR_Q720_SIOP_SHIFT;
+ __u16 port = io_base + NCR_Q720_CHIP_REGISTER_OFFSET
+ + i*NCR_Q720_SIOP_SHIFT;
+ int err;
+
+ outb(0xff, port + 0x40);
+ outb(0x07, port + 0x41);
+ if ((err = NCR_Q720_probe_one(p, i, irq, slot,
+ siop_p_base, siop_v_base)) != 0)
+ printk("Q720: SIOP%d: probe failed, error = %d\n",
+ i, err);
+ else
+ found++;
+ }
+
+ if (!found) {
+ kfree(p);
+ return -ENODEV;
+ }
+
+ mca_device_set_claim(mca_dev, 1);
+ mca_device_set_name(mca_dev, "NCR_Q720");
+ dev_set_drvdata(dev, p);
+
+ return 0;
+
+ out_release:
+ dma_release_declared_memory(dev);
+ out_release_region:
+ release_mem_region(base_addr, mem_size);
+ out_free:
+ kfree(p);
+
+ return -ENODEV;
+}
+
+static void __exit
+NCR_Q720_remove_one(struct Scsi_Host *host)
+{
+ scsi_remove_host(host);
+ ncr53c8xx_release(host);
+}
+
+static int __exit
+NCR_Q720_remove(struct device *dev)
+{
+ struct NCR_Q720_private *p = dev_get_drvdata(dev);
+ int i;
+
+ for (i = 0; i < p->siops; i++)
+ if(p->hosts[i])
+ NCR_Q720_remove_one(p->hosts[i]);
+
+ dma_release_declared_memory(dev);
+ release_mem_region(p->phys_mem_base, p->mem_size);
+ free_irq(p->irq, p);
+ kfree(p);
+ return 0;
+}
+
+static short NCR_Q720_id_table[] = { NCR_Q720_MCA_ID, 0 };
+
+static struct mca_driver NCR_Q720_driver = {
+ .id_table = NCR_Q720_id_table,
+ .driver = {
+ .name = "NCR_Q720",
+ .bus = &mca_bus_type,
+ .probe = NCR_Q720_probe,
+ .remove = NCR_Q720_remove,
+ },
+};
+
+static int __init
+NCR_Q720_init(void)
+{
+ int ret = ncr53c8xx_init();
+ if (!ret)
+ ret = mca_register_driver(&NCR_Q720_driver);
+ if (ret)
+ ncr53c8xx_exit();
+ return ret;
+}
+
+static void __exit
+NCR_Q720_exit(void)
+{
+ mca_unregister_driver(&NCR_Q720_driver);
+ ncr53c8xx_exit();
+}
+
+module_init(NCR_Q720_init);
+module_exit(NCR_Q720_exit);
diff --git a/drivers/scsi/NCR_Q720.h b/drivers/scsi/NCR_Q720.h
new file mode 100644
index 000000000..7b9209008
--- /dev/null
+++ b/drivers/scsi/NCR_Q720.h
@@ -0,0 +1,28 @@
+/* -*- mode: c; c-basic-offset: 8 -*- */
+
+/* NCR Quad 720 MCA SCSI Driver
+ *
+ * Copyright (C) 2003 by James.Bottomley@HansenPartnership.com
+ */
+
+#ifndef _NCR_Q720_H
+#define _NCR_Q720_H
+
+/* The MCA identifier */
+#define NCR_Q720_MCA_ID 0x0720
+
+#define NCR_Q720_CLOCK_MHZ 30
+
+#define NCR_Q720_POS2_BOARD_ENABLE 0x01
+#define NCR_Q720_POS2_INTERRUPT_ENABLE 0x02
+#define NCR_Q720_POS2_PARITY_DISABLE 0x04
+#define NCR_Q720_POS2_IO_MASK 0xf8
+#define NCR_Q720_POS2_IO_SHIFT 8
+
+#define NCR_Q720_CHIP_REGISTER_OFFSET 0x200
+#define NCR_Q720_SCSR_OFFSET 0x070
+#define NCR_Q720_SIOP_SHIFT 0x080
+
+#endif
+
+
diff --git a/drivers/scsi/a100u2w.c b/drivers/scsi/a100u2w.c
new file mode 100644
index 000000000..7e33a61c1
--- /dev/null
+++ b/drivers/scsi/a100u2w.c
@@ -0,0 +1,1241 @@
+/*
+ * Initio A100 device driver for Linux.
+ *
+ * Copyright (c) 1994-1998 Initio Corporation
+ * Copyright (c) 2003-2004 Christoph Hellwig
+ * All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2, or (at your option)
+ * any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; see the file COPYING. If not, write to
+ * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR
+ * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+/*
+ * Revision History:
+ * 07/02/98 hl - v.91n Initial drivers.
+ * 09/14/98 hl - v1.01 Support new Kernel.
+ * 09/22/98 hl - v1.01a Support reset.
+ * 09/24/98 hl - v1.01b Fixed reset.
+ * 10/05/98 hl - v1.02 split the source code and release.
+ * 12/19/98 bv - v1.02a Use spinlocks for 2.1.95 and up
+ * 01/31/99 bv - v1.02b Use mdelay instead of waitForPause
+ * 08/08/99 bv - v1.02c Use waitForPause again.
+ * 06/25/02 Doug Ledford <dledford@redhat.com> - v1.02d
+ * - Remove limit on number of controllers
+ * - Port to DMA mapping API
+ * - Clean up interrupt handler registration
+ * - Fix memory leaks
+ * - Fix allocation of scsi host structs and private data
+ * 11/18/03 Christoph Hellwig <hch@lst.de>
+ * - Port to new probing API
+ * - Fix some more leaks in init failure cases
+ * 9/28/04 Christoph Hellwig <hch@lst.de>
+ * - merge the two source files
+ * - remove internal queueing code
+ * 14/06/07 Alan Cox <alan@lxorguk.ukuu.org.uk>
+ * - Grand cleanup and Linuxisation
+ */
+
+#include <linux/module.h>
+#include <linux/errno.h>
+#include <linux/delay.h>
+#include <linux/interrupt.h>
+#include <linux/pci.h>
+#include <linux/init.h>
+#include <linux/blkdev.h>
+#include <linux/spinlock.h>
+#include <linux/kernel.h>
+#include <linux/string.h>
+#include <linux/ioport.h>
+#include <linux/dma-mapping.h>
+
+#include <asm/io.h>
+#include <asm/irq.h>
+
+#include <scsi/scsi.h>
+#include <scsi/scsi_cmnd.h>
+#include <scsi/scsi_device.h>
+#include <scsi/scsi_host.h>
+
+#include "a100u2w.h"
+
+
+static struct orc_scb *__orc_alloc_scb(struct orc_host * host);
+static void inia100_scb_handler(struct orc_host *host, struct orc_scb *scb);
+
+static struct orc_nvram nvram, *nvramp = &nvram;
+
+static u8 default_nvram[64] =
+{
+/*----------header -------------*/
+ 0x01, /* 0x00: Sub System Vendor ID 0 */
+ 0x11, /* 0x01: Sub System Vendor ID 1 */
+ 0x60, /* 0x02: Sub System ID 0 */
+ 0x10, /* 0x03: Sub System ID 1 */
+ 0x00, /* 0x04: SubClass */
+ 0x01, /* 0x05: Vendor ID 0 */
+ 0x11, /* 0x06: Vendor ID 1 */
+ 0x60, /* 0x07: Device ID 0 */
+ 0x10, /* 0x08: Device ID 1 */
+ 0x00, /* 0x09: Reserved */
+ 0x00, /* 0x0A: Reserved */
+ 0x01, /* 0x0B: Revision of Data Structure */
+ /* -- Host Adapter Structure --- */
+ 0x01, /* 0x0C: Number Of SCSI Channel */
+ 0x01, /* 0x0D: BIOS Configuration 1 */
+ 0x00, /* 0x0E: BIOS Configuration 2 */
+ 0x00, /* 0x0F: BIOS Configuration 3 */
+ /* --- SCSI Channel 0 Configuration --- */
+ 0x07, /* 0x10: H/A ID */
+ 0x83, /* 0x11: Channel Configuration */
+ 0x20, /* 0x12: MAX TAG per target */
+ 0x0A, /* 0x13: SCSI Reset Recovering time */
+ 0x00, /* 0x14: Channel Configuration4 */
+ 0x00, /* 0x15: Channel Configuration5 */
+ /* SCSI Channel 0 Target Configuration */
+ /* 0x16-0x25 */
+ 0xC8, 0xC8, 0xC8, 0xC8, 0xC8, 0xC8, 0xC8, 0xC8,
+ 0xC8, 0xC8, 0xC8, 0xC8, 0xC8, 0xC8, 0xC8, 0xC8,
+ /* --- SCSI Channel 1 Configuration --- */
+ 0x07, /* 0x26: H/A ID */
+ 0x83, /* 0x27: Channel Configuration */
+ 0x20, /* 0x28: MAX TAG per target */
+ 0x0A, /* 0x29: SCSI Reset Recovering time */
+ 0x00, /* 0x2A: Channel Configuration4 */
+ 0x00, /* 0x2B: Channel Configuration5 */
+ /* SCSI Channel 1 Target Configuration */
+ /* 0x2C-0x3B */
+ 0xC8, 0xC8, 0xC8, 0xC8, 0xC8, 0xC8, 0xC8, 0xC8,
+ 0xC8, 0xC8, 0xC8, 0xC8, 0xC8, 0xC8, 0xC8, 0xC8,
+ 0x00, /* 0x3C: Reserved */
+ 0x00, /* 0x3D: Reserved */
+ 0x00, /* 0x3E: Reserved */
+ 0x00 /* 0x3F: Checksum */
+};
+
+
+static u8 wait_chip_ready(struct orc_host * host)
+{
+ int i;
+
+ for (i = 0; i < 10; i++) { /* Wait 1 second for report timeout */
+ if (inb(host->base + ORC_HCTRL) & HOSTSTOP) /* Wait HOSTSTOP set */
+ return 1;
+ mdelay(100);
+ }
+ return 0;
+}
+
+static u8 wait_firmware_ready(struct orc_host * host)
+{
+ int i;
+
+ for (i = 0; i < 10; i++) { /* Wait 1 second for report timeout */
+ if (inb(host->base + ORC_HSTUS) & RREADY) /* Wait READY set */
+ return 1;
+ mdelay(100); /* wait 100ms before try again */
+ }
+ return 0;
+}
+
+/***************************************************************************/
+static u8 wait_scsi_reset_done(struct orc_host * host)
+{
+ int i;
+
+ for (i = 0; i < 10; i++) { /* Wait 1 second for report timeout */
+ if (!(inb(host->base + ORC_HCTRL) & SCSIRST)) /* Wait SCSIRST done */
+ return 1;
+ mdelay(100); /* wait 100ms before try again */
+ }
+ return 0;
+}
+
+/***************************************************************************/
+static u8 wait_HDO_off(struct orc_host * host)
+{
+ int i;
+
+ for (i = 0; i < 10; i++) { /* Wait 1 second for report timeout */
+ if (!(inb(host->base + ORC_HCTRL) & HDO)) /* Wait HDO off */
+ return 1;
+ mdelay(100); /* wait 100ms before try again */
+ }
+ return 0;
+}
+
+/***************************************************************************/
+static u8 wait_hdi_set(struct orc_host * host, u8 * data)
+{
+ int i;
+
+ for (i = 0; i < 10; i++) { /* Wait 1 second for report timeout */
+ if ((*data = inb(host->base + ORC_HSTUS)) & HDI)
+ return 1; /* Wait HDI set */
+ mdelay(100); /* wait 100ms before try again */
+ }
+ return 0;
+}
+
+/***************************************************************************/
+static unsigned short orc_read_fwrev(struct orc_host * host)
+{
+ u16 version;
+ u8 data;
+
+ outb(ORC_CMD_VERSION, host->base + ORC_HDATA);
+ outb(HDO, host->base + ORC_HCTRL);
+ if (wait_HDO_off(host) == 0) /* Wait HDO off */
+ return 0;
+
+ if (wait_hdi_set(host, &data) == 0) /* Wait HDI set */
+ return 0;
+ version = inb(host->base + ORC_HDATA);
+ outb(data, host->base + ORC_HSTUS); /* Clear HDI */
+
+ if (wait_hdi_set(host, &data) == 0) /* Wait HDI set */
+ return 0;
+ version |= inb(host->base + ORC_HDATA) << 8;
+ outb(data, host->base + ORC_HSTUS); /* Clear HDI */
+
+ return version;
+}
+
+/***************************************************************************/
+static u8 orc_nv_write(struct orc_host * host, unsigned char address, unsigned char value)
+{
+ outb(ORC_CMD_SET_NVM, host->base + ORC_HDATA); /* Write command */
+ outb(HDO, host->base + ORC_HCTRL);
+ if (wait_HDO_off(host) == 0) /* Wait HDO off */
+ return 0;
+
+ outb(address, host->base + ORC_HDATA); /* Write address */
+ outb(HDO, host->base + ORC_HCTRL);
+ if (wait_HDO_off(host) == 0) /* Wait HDO off */
+ return 0;
+
+ outb(value, host->base + ORC_HDATA); /* Write value */
+ outb(HDO, host->base + ORC_HCTRL);
+ if (wait_HDO_off(host) == 0) /* Wait HDO off */
+ return 0;
+
+ return 1;
+}
+
+/***************************************************************************/
+static u8 orc_nv_read(struct orc_host * host, u8 address, u8 *ptr)
+{
+ unsigned char data;
+
+ outb(ORC_CMD_GET_NVM, host->base + ORC_HDATA); /* Write command */
+ outb(HDO, host->base + ORC_HCTRL);
+ if (wait_HDO_off(host) == 0) /* Wait HDO off */
+ return 0;
+
+ outb(address, host->base + ORC_HDATA); /* Write address */
+ outb(HDO, host->base + ORC_HCTRL);
+ if (wait_HDO_off(host) == 0) /* Wait HDO off */
+ return 0;
+
+ if (wait_hdi_set(host, &data) == 0) /* Wait HDI set */
+ return 0;
+ *ptr = inb(host->base + ORC_HDATA);
+ outb(data, host->base + ORC_HSTUS); /* Clear HDI */
+
+ return 1;
+
+}
+
+/**
+ * orc_exec_sb - Queue an SCB with the HA
+ * @host: host adapter the SCB belongs to
+ * @scb: SCB to queue for execution
+ */
+
+static void orc_exec_scb(struct orc_host * host, struct orc_scb * scb)
+{
+ scb->status = ORCSCB_POST;
+ outb(scb->scbidx, host->base + ORC_PQUEUE);
+}
+
+
+/**
+ * se2_rd_all - read SCSI parameters from EEPROM
+ * @host: Host whose EEPROM is being loaded
+ *
+ * Read SCSI H/A configuration parameters from serial EEPROM
+ */
+
+static int se2_rd_all(struct orc_host * host)
+{
+ int i;
+ u8 *np, chksum = 0;
+
+ np = (u8 *) nvramp;
+ for (i = 0; i < 64; i++, np++) { /* <01> */
+ if (orc_nv_read(host, (u8) i, np) == 0)
+ return -1;
+ }
+
+ /*------ Is ckecksum ok ? ------*/
+ np = (u8 *) nvramp;
+ for (i = 0; i < 63; i++)
+ chksum += *np++;
+
+ if (nvramp->CheckSum != (u8) chksum)
+ return -1;
+ return 1;
+}
+
+/**
+ * se2_update_all - update the EEPROM
+ * @host: Host whose EEPROM is being updated
+ *
+ * Update changed bytes in the EEPROM image.
+ */
+
+static void se2_update_all(struct orc_host * host)
+{ /* setup default pattern */
+ int i;
+ u8 *np, *np1, chksum = 0;
+
+ /* Calculate checksum first */
+ np = (u8 *) default_nvram;
+ for (i = 0; i < 63; i++)
+ chksum += *np++;
+ *np = chksum;
+
+ np = (u8 *) default_nvram;
+ np1 = (u8 *) nvramp;
+ for (i = 0; i < 64; i++, np++, np1++) {
+ if (*np != *np1)
+ orc_nv_write(host, (u8) i, *np);
+ }
+}
+
+/**
+ * read_eeprom - load EEPROM
+ * @host: Host EEPROM to read
+ *
+ * Read the EEPROM for a given host. If it is invalid or fails
+ * the restore the defaults and use them.
+ */
+
+static void read_eeprom(struct orc_host * host)
+{
+ if (se2_rd_all(host) != 1) {
+ se2_update_all(host); /* setup default pattern */
+ se2_rd_all(host); /* load again */
+ }
+}
+
+
+/**
+ * orc_load_firmware - initialise firmware
+ * @host: Host to set up
+ *
+ * Load the firmware from the EEPROM into controller SRAM. This
+ * is basically a 4K block copy and then a 4K block read to check
+ * correctness. The rest is convulted by the indirect interfaces
+ * in the hardware
+ */
+
+static u8 orc_load_firmware(struct orc_host * host)
+{
+ u32 data32;
+ u16 bios_addr;
+ u16 i;
+ u8 *data32_ptr, data;
+
+
+ /* Set up the EEPROM for access */
+
+ data = inb(host->base + ORC_GCFG);
+ outb(data | EEPRG, host->base + ORC_GCFG); /* Enable EEPROM programming */
+ outb(0x00, host->base + ORC_EBIOSADR2);
+ outw(0x0000, host->base + ORC_EBIOSADR0);
+ if (inb(host->base + ORC_EBIOSDATA) != 0x55) {
+ outb(data, host->base + ORC_GCFG); /* Disable EEPROM programming */
+ return 0;
+ }
+ outw(0x0001, host->base + ORC_EBIOSADR0);
+ if (inb(host->base + ORC_EBIOSDATA) != 0xAA) {
+ outb(data, host->base + ORC_GCFG); /* Disable EEPROM programming */
+ return 0;
+ }
+
+ outb(PRGMRST | DOWNLOAD, host->base + ORC_RISCCTL); /* Enable SRAM programming */
+ data32_ptr = (u8 *) & data32;
+ data32 = cpu_to_le32(0); /* Initial FW address to 0 */
+ outw(0x0010, host->base + ORC_EBIOSADR0);
+ *data32_ptr = inb(host->base + ORC_EBIOSDATA); /* Read from BIOS */
+ outw(0x0011, host->base + ORC_EBIOSADR0);
+ *(data32_ptr + 1) = inb(host->base + ORC_EBIOSDATA); /* Read from BIOS */
+ outw(0x0012, host->base + ORC_EBIOSADR0);
+ *(data32_ptr + 2) = inb(host->base + ORC_EBIOSDATA); /* Read from BIOS */
+ outw(*(data32_ptr + 2), host->base + ORC_EBIOSADR2);
+ outl(le32_to_cpu(data32), host->base + ORC_FWBASEADR); /* Write FW address */
+
+ /* Copy the code from the BIOS to the SRAM */
+
+ udelay(500); /* Required on Sun Ultra 5 ... 350 -> failures */
+ bios_addr = (u16) le32_to_cpu(data32); /* FW code locate at BIOS address + ? */
+ for (i = 0, data32_ptr = (u8 *) & data32; /* Download the code */
+ i < 0x1000; /* Firmware code size = 4K */
+ i++, bios_addr++) {
+ outw(bios_addr, host->base + ORC_EBIOSADR0);
+ *data32_ptr++ = inb(host->base + ORC_EBIOSDATA); /* Read from BIOS */
+ if ((i % 4) == 3) {
+ outl(le32_to_cpu(data32), host->base + ORC_RISCRAM); /* Write every 4 bytes */
+ data32_ptr = (u8 *) & data32;
+ }
+ }
+
+ /* Go back and check they match */
+
+ outb(PRGMRST | DOWNLOAD, host->base + ORC_RISCCTL); /* Reset program count 0 */
+ bios_addr -= 0x1000; /* Reset the BIOS address */
+ for (i = 0, data32_ptr = (u8 *) & data32; /* Check the code */
+ i < 0x1000; /* Firmware code size = 4K */
+ i++, bios_addr++) {
+ outw(bios_addr, host->base + ORC_EBIOSADR0);
+ *data32_ptr++ = inb(host->base + ORC_EBIOSDATA); /* Read from BIOS */
+ if ((i % 4) == 3) {
+ if (inl(host->base + ORC_RISCRAM) != le32_to_cpu(data32)) {
+ outb(PRGMRST, host->base + ORC_RISCCTL); /* Reset program to 0 */
+ outb(data, host->base + ORC_GCFG); /*Disable EEPROM programming */
+ return 0;
+ }
+ data32_ptr = (u8 *) & data32;
+ }
+ }
+
+ /* Success */
+ outb(PRGMRST, host->base + ORC_RISCCTL); /* Reset program to 0 */
+ outb(data, host->base + ORC_GCFG); /* Disable EEPROM programming */
+ return 1;
+}
+
+/***************************************************************************/
+static void setup_SCBs(struct orc_host * host)
+{
+ struct orc_scb *scb;
+ int i;
+ struct orc_extended_scb *escb;
+ dma_addr_t escb_phys;
+
+ /* Setup SCB base and SCB Size registers */
+ outb(ORC_MAXQUEUE, host->base + ORC_SCBSIZE); /* Total number of SCBs */
+ /* SCB base address 0 */
+ outl(host->scb_phys, host->base + ORC_SCBBASE0);
+ /* SCB base address 1 */
+ outl(host->scb_phys, host->base + ORC_SCBBASE1);
+
+ /* setup scatter list address with one buffer */
+ scb = host->scb_virt;
+ escb = host->escb_virt;
+
+ for (i = 0; i < ORC_MAXQUEUE; i++) {
+ escb_phys = (host->escb_phys + (sizeof(struct orc_extended_scb) * i));
+ scb->sg_addr = cpu_to_le32((u32) escb_phys);
+ scb->sense_addr = cpu_to_le32((u32) escb_phys);
+ scb->escb = escb;
+ scb->scbidx = i;
+ scb++;
+ escb++;
+ }
+}
+
+/**
+ * init_alloc_map - initialise allocation map
+ * @host: host map to configure
+ *
+ * Initialise the allocation maps for this device. If the device
+ * is not quiescent the caller must hold the allocation lock
+ */
+
+static void init_alloc_map(struct orc_host * host)
+{
+ u8 i, j;
+
+ for (i = 0; i < MAX_CHANNELS; i++) {
+ for (j = 0; j < 8; j++) {
+ host->allocation_map[i][j] = 0xffffffff;
+ }
+ }
+}
+
+/**
+ * init_orchid - initialise the host adapter
+ * @host:host adapter to initialise
+ *
+ * Initialise the controller and if necessary load the firmware.
+ *
+ * Returns -1 if the initialisation fails.
+ */
+
+static int init_orchid(struct orc_host * host)
+{
+ u8 *ptr;
+ u16 revision;
+ u8 i;
+
+ init_alloc_map(host);
+ outb(0xFF, host->base + ORC_GIMSK); /* Disable all interrupts */
+
+ if (inb(host->base + ORC_HSTUS) & RREADY) { /* Orchid is ready */
+ revision = orc_read_fwrev(host);
+ if (revision == 0xFFFF) {
+ outb(DEVRST, host->base + ORC_HCTRL); /* Reset Host Adapter */
+ if (wait_chip_ready(host) == 0)
+ return -1;
+ orc_load_firmware(host); /* Download FW */
+ setup_SCBs(host); /* Setup SCB base and SCB Size registers */
+ outb(0x00, host->base + ORC_HCTRL); /* clear HOSTSTOP */
+ if (wait_firmware_ready(host) == 0)
+ return -1;
+ /* Wait for firmware ready */
+ } else {
+ setup_SCBs(host); /* Setup SCB base and SCB Size registers */
+ }
+ } else { /* Orchid is not Ready */
+ outb(DEVRST, host->base + ORC_HCTRL); /* Reset Host Adapter */
+ if (wait_chip_ready(host) == 0)
+ return -1;
+ orc_load_firmware(host); /* Download FW */
+ setup_SCBs(host); /* Setup SCB base and SCB Size registers */
+ outb(HDO, host->base + ORC_HCTRL); /* Do Hardware Reset & */
+
+ /* clear HOSTSTOP */
+ if (wait_firmware_ready(host) == 0) /* Wait for firmware ready */
+ return -1;
+ }
+
+ /* Load an EEProm copy into RAM */
+ /* Assumes single threaded at this point */
+ read_eeprom(host);
+
+ if (nvramp->revision != 1)
+ return -1;
+
+ host->scsi_id = nvramp->scsi_id;
+ host->BIOScfg = nvramp->BIOSConfig1;
+ host->max_targets = MAX_TARGETS;
+ ptr = (u8 *) & (nvramp->Target00Config);
+ for (i = 0; i < 16; ptr++, i++) {
+ host->target_flag[i] = *ptr;
+ host->max_tags[i] = ORC_MAXTAGS;
+ }
+
+ if (nvramp->SCSI0Config & NCC_BUSRESET)
+ host->flags |= HCF_SCSI_RESET;
+ outb(0xFB, host->base + ORC_GIMSK); /* enable RP FIFO interrupt */
+ return 0;
+}
+
+/**
+ * orc_reset_scsi_bus - perform bus reset
+ * @host: host being reset
+ *
+ * Perform a full bus reset on the adapter.
+ */
+
+static int orc_reset_scsi_bus(struct orc_host * host)
+{ /* I need Host Control Block Information */
+ unsigned long flags;
+
+ spin_lock_irqsave(&host->allocation_lock, flags);
+
+ init_alloc_map(host);
+ /* reset scsi bus */
+ outb(SCSIRST, host->base + ORC_HCTRL);
+ /* FIXME: We can spend up to a second with the lock held and
+ interrupts off here */
+ if (wait_scsi_reset_done(host) == 0) {
+ spin_unlock_irqrestore(&host->allocation_lock, flags);
+ return FAILED;
+ } else {
+ spin_unlock_irqrestore(&host->allocation_lock, flags);
+ return SUCCESS;
+ }
+}
+
+/**
+ * orc_device_reset - device reset handler
+ * @host: host to reset
+ * @cmd: command causing the reset
+ * @target; target device
+ *
+ * Reset registers, reset a hanging bus and kill active and disconnected
+ * commands for target w/o soft reset
+ */
+
+static int orc_device_reset(struct orc_host * host, struct scsi_cmnd *cmd, unsigned int target)
+{ /* I need Host Control Block Information */
+ struct orc_scb *scb;
+ struct orc_extended_scb *escb;
+ struct orc_scb *host_scb;
+ u8 i;
+ unsigned long flags;
+
+ spin_lock_irqsave(&(host->allocation_lock), flags);
+ scb = (struct orc_scb *) NULL;
+ escb = (struct orc_extended_scb *) NULL;
+
+ /* setup scatter list address with one buffer */
+ host_scb = host->scb_virt;
+
+ /* FIXME: is this safe if we then fail to issue the reset or race
+ a completion ? */
+ init_alloc_map(host);
+
+ /* Find the scb corresponding to the command */
+ for (i = 0; i < ORC_MAXQUEUE; i++) {
+ escb = host_scb->escb;
+ if (host_scb->status && escb->srb == cmd)
+ break;
+ host_scb++;
+ }
+
+ if (i == ORC_MAXQUEUE) {
+ printk(KERN_ERR "Unable to Reset - No SCB Found\n");
+ spin_unlock_irqrestore(&(host->allocation_lock), flags);
+ return FAILED;
+ }
+
+ /* Allocate a new SCB for the reset command to the firmware */
+ if ((scb = __orc_alloc_scb(host)) == NULL) {
+ /* Can't happen.. */
+ spin_unlock_irqrestore(&(host->allocation_lock), flags);
+ return FAILED;
+ }
+
+ /* Reset device is handled by the firmware, we fill in an SCB and
+ fire it at the controller, it does the rest */
+ scb->opcode = ORC_BUSDEVRST;
+ scb->target = target;
+ scb->hastat = 0;
+ scb->tastat = 0;
+ scb->status = 0x0;
+ scb->link = 0xFF;
+ scb->reserved0 = 0;
+ scb->reserved1 = 0;
+ scb->xferlen = cpu_to_le32(0);
+ scb->sg_len = cpu_to_le32(0);
+
+ escb->srb = NULL;
+ escb->srb = cmd;
+ orc_exec_scb(host, scb); /* Start execute SCB */
+ spin_unlock_irqrestore(&host->allocation_lock, flags);
+ return SUCCESS;
+}
+
+/**
+ * __orc_alloc_scb - allocate an SCB
+ * @host: host to allocate from
+ *
+ * Allocate an SCB and return a pointer to the SCB object. NULL
+ * is returned if no SCB is free. The caller must already hold
+ * the allocator lock at this point.
+ */
+
+
+static struct orc_scb *__orc_alloc_scb(struct orc_host * host)
+{
+ u8 channel;
+ unsigned long idx;
+ u8 index;
+ u8 i;
+
+ channel = host->index;
+ for (i = 0; i < 8; i++) {
+ for (index = 0; index < 32; index++) {
+ if ((host->allocation_map[channel][i] >> index) & 0x01) {
+ host->allocation_map[channel][i] &= ~(1 << index);
+ idx = index + 32 * i;
+ /*
+ * Translate the index to a structure instance
+ */
+ return host->scb_virt + idx;
+ }
+ }
+ }
+ return NULL;
+}
+
+/**
+ * orc_alloc_scb - allocate an SCB
+ * @host: host to allocate from
+ *
+ * Allocate an SCB and return a pointer to the SCB object. NULL
+ * is returned if no SCB is free.
+ */
+
+static struct orc_scb *orc_alloc_scb(struct orc_host * host)
+{
+ struct orc_scb *scb;
+ unsigned long flags;
+
+ spin_lock_irqsave(&host->allocation_lock, flags);
+ scb = __orc_alloc_scb(host);
+ spin_unlock_irqrestore(&host->allocation_lock, flags);
+ return scb;
+}
+
+/**
+ * orc_release_scb - release an SCB
+ * @host: host owning the SCB
+ * @scb: SCB that is now free
+ *
+ * Called to return a completed SCB to the allocation pool. Before
+ * calling the SCB must be out of use on both the host and the HA.
+ */
+
+static void orc_release_scb(struct orc_host *host, struct orc_scb *scb)
+{
+ unsigned long flags;
+ u8 index, i, channel;
+
+ spin_lock_irqsave(&(host->allocation_lock), flags);
+ channel = host->index; /* Channel */
+ index = scb->scbidx;
+ i = index / 32;
+ index %= 32;
+ host->allocation_map[channel][i] |= (1 << index);
+ spin_unlock_irqrestore(&(host->allocation_lock), flags);
+}
+
+/**
+ * orchid_abort_scb - abort a command
+ *
+ * Abort a queued command that has been passed to the firmware layer
+ * if possible. This is all handled by the firmware. We aks the firmware
+ * and it either aborts the command or fails
+ */
+
+static int orchid_abort_scb(struct orc_host * host, struct orc_scb * scb)
+{
+ unsigned char data, status;
+
+ outb(ORC_CMD_ABORT_SCB, host->base + ORC_HDATA); /* Write command */
+ outb(HDO, host->base + ORC_HCTRL);
+ if (wait_HDO_off(host) == 0) /* Wait HDO off */
+ return 0;
+
+ outb(scb->scbidx, host->base + ORC_HDATA); /* Write address */
+ outb(HDO, host->base + ORC_HCTRL);
+ if (wait_HDO_off(host) == 0) /* Wait HDO off */
+ return 0;
+
+ if (wait_hdi_set(host, &data) == 0) /* Wait HDI set */
+ return 0;
+ status = inb(host->base + ORC_HDATA);
+ outb(data, host->base + ORC_HSTUS); /* Clear HDI */
+
+ if (status == 1) /* 0 - Successfully */
+ return 0; /* 1 - Fail */
+ return 1;
+}
+
+static int inia100_abort_cmd(struct orc_host * host, struct scsi_cmnd *cmd)
+{
+ struct orc_extended_scb *escb;
+ struct orc_scb *scb;
+ u8 i;
+ unsigned long flags;
+
+ spin_lock_irqsave(&(host->allocation_lock), flags);
+
+ scb = host->scb_virt;
+
+ /* Walk the queue until we find the SCB that belongs to the command
+ block. This isn't a performance critical path so a walk in the park
+ here does no harm */
+
+ for (i = 0; i < ORC_MAXQUEUE; i++, scb++) {
+ escb = scb->escb;
+ if (scb->status && escb->srb == cmd) {
+ if (scb->tag_msg == 0) {
+ goto out;
+ } else {
+ /* Issue an ABORT to the firmware */
+ if (orchid_abort_scb(host, scb)) {
+ escb->srb = NULL;
+ spin_unlock_irqrestore(&host->allocation_lock, flags);
+ return SUCCESS;
+ } else
+ goto out;
+ }
+ }
+ }
+out:
+ spin_unlock_irqrestore(&host->allocation_lock, flags);
+ return FAILED;
+}
+
+/**
+ * orc_interrupt - IRQ processing
+ * @host: Host causing the interrupt
+ *
+ * This function is called from the IRQ handler and protected
+ * by the host lock. While the controller reports that there are
+ * scb's for processing we pull them off the controller, turn the
+ * index into a host address pointer to the scb and call the scb
+ * handler.
+ *
+ * Returns IRQ_HANDLED if any SCBs were processed, IRQ_NONE otherwise
+ */
+
+static irqreturn_t orc_interrupt(struct orc_host * host)
+{
+ u8 scb_index;
+ struct orc_scb *scb;
+
+ /* Check if we have an SCB queued for servicing */
+ if (inb(host->base + ORC_RQUEUECNT) == 0)
+ return IRQ_NONE;
+
+ do {
+ /* Get the SCB index of the SCB to service */
+ scb_index = inb(host->base + ORC_RQUEUE);
+
+ /* Translate it back to a host pointer */
+ scb = (struct orc_scb *) ((unsigned long) host->scb_virt + (unsigned long) (sizeof(struct orc_scb) * scb_index));
+ scb->status = 0x0;
+ /* Process the SCB */
+ inia100_scb_handler(host, scb);
+ } while (inb(host->base + ORC_RQUEUECNT));
+ return IRQ_HANDLED;
+} /* End of I1060Interrupt() */
+
+/**
+ * inia100_build_scb - build SCB
+ * @host: host owing the control block
+ * @scb: control block to use
+ * @cmd: Mid layer command
+ *
+ * Build a host adapter control block from the SCSI mid layer command
+ */
+
+static int inia100_build_scb(struct orc_host * host, struct orc_scb * scb, struct scsi_cmnd * cmd)
+{ /* Create corresponding SCB */
+ struct scatterlist *sg;
+ struct orc_sgent *sgent; /* Pointer to SG list */
+ int i, count_sg;
+ struct orc_extended_scb *escb;
+
+ /* Links between the escb, scb and Linux scsi midlayer cmd */
+ escb = scb->escb;
+ escb->srb = cmd;
+ sgent = NULL;
+
+ /* Set up the SCB to do a SCSI command block */
+ scb->opcode = ORC_EXECSCSI;
+ scb->flags = SCF_NO_DCHK; /* Clear done bit */
+ scb->target = cmd->device->id;
+ scb->lun = cmd->device->lun;
+ scb->reserved0 = 0;
+ scb->reserved1 = 0;
+ scb->sg_len = cpu_to_le32(0);
+
+ scb->xferlen = cpu_to_le32((u32) scsi_bufflen(cmd));
+ sgent = (struct orc_sgent *) & escb->sglist[0];
+
+ count_sg = scsi_dma_map(cmd);
+ if (count_sg < 0)
+ return count_sg;
+ BUG_ON(count_sg > TOTAL_SG_ENTRY);
+
+ /* Build the scatter gather lists */
+ if (count_sg) {
+ scb->sg_len = cpu_to_le32((u32) (count_sg * 8));
+ scsi_for_each_sg(cmd, sg, count_sg, i) {
+ sgent->base = cpu_to_le32((u32) sg_dma_address(sg));
+ sgent->length = cpu_to_le32((u32) sg_dma_len(sg));
+ sgent++;
+ }
+ } else {
+ scb->sg_len = cpu_to_le32(0);
+ sgent->base = cpu_to_le32(0);
+ sgent->length = cpu_to_le32(0);
+ }
+ scb->sg_addr = (u32) scb->sense_addr; /* sense_addr is already little endian */
+ scb->hastat = 0;
+ scb->tastat = 0;
+ scb->link = 0xFF;
+ scb->sense_len = SENSE_SIZE;
+ scb->cdb_len = cmd->cmd_len;
+ if (scb->cdb_len >= IMAX_CDB) {
+ printk("max cdb length= %x\b", cmd->cmd_len);
+ scb->cdb_len = IMAX_CDB;
+ }
+ scb->ident = (u8)(cmd->device->lun & 0xff) | DISC_ALLOW;
+ if (cmd->device->tagged_supported) { /* Tag Support */
+ scb->tag_msg = SIMPLE_QUEUE_TAG; /* Do simple tag only */
+ } else {
+ scb->tag_msg = 0; /* No tag support */
+ }
+ memcpy(scb->cdb, cmd->cmnd, scb->cdb_len);
+ return 0;
+}
+
+/**
+ * inia100_queue - queue command with host
+ * @cmd: Command block
+ * @done: Completion function
+ *
+ * Called by the mid layer to queue a command. Process the command
+ * block, build the host specific scb structures and if there is room
+ * queue the command down to the controller
+ */
+
+static int inia100_queue_lck(struct scsi_cmnd * cmd, void (*done) (struct scsi_cmnd *))
+{
+ struct orc_scb *scb;
+ struct orc_host *host; /* Point to Host adapter control block */
+
+ host = (struct orc_host *) cmd->device->host->hostdata;
+ cmd->scsi_done = done;
+ /* Get free SCSI control block */
+ if ((scb = orc_alloc_scb(host)) == NULL)
+ return SCSI_MLQUEUE_HOST_BUSY;
+
+ if (inia100_build_scb(host, scb, cmd)) {
+ orc_release_scb(host, scb);
+ return SCSI_MLQUEUE_HOST_BUSY;
+ }
+ orc_exec_scb(host, scb); /* Start execute SCB */
+ return 0;
+}
+
+static DEF_SCSI_QCMD(inia100_queue)
+
+/*****************************************************************************
+ Function name : inia100_abort
+ Description : Abort a queued command.
+ (commands that are on the bus can't be aborted easily)
+ Input : host - Pointer to host adapter structure
+ Output : None.
+ Return : pSRB - Pointer to SCSI request block.
+*****************************************************************************/
+static int inia100_abort(struct scsi_cmnd * cmd)
+{
+ struct orc_host *host;
+
+ host = (struct orc_host *) cmd->device->host->hostdata;
+ return inia100_abort_cmd(host, cmd);
+}
+
+/*****************************************************************************
+ Function name : inia100_reset
+ Description : Reset registers, reset a hanging bus and
+ kill active and disconnected commands for target w/o soft reset
+ Input : host - Pointer to host adapter structure
+ Output : None.
+ Return : pSRB - Pointer to SCSI request block.
+*****************************************************************************/
+static int inia100_bus_reset(struct scsi_cmnd * cmd)
+{ /* I need Host Control Block Information */
+ struct orc_host *host;
+ host = (struct orc_host *) cmd->device->host->hostdata;
+ return orc_reset_scsi_bus(host);
+}
+
+/*****************************************************************************
+ Function name : inia100_device_reset
+ Description : Reset the device
+ Input : host - Pointer to host adapter structure
+ Output : None.
+ Return : pSRB - Pointer to SCSI request block.
+*****************************************************************************/
+static int inia100_device_reset(struct scsi_cmnd * cmd)
+{ /* I need Host Control Block Information */
+ struct orc_host *host;
+ host = (struct orc_host *) cmd->device->host->hostdata;
+ return orc_device_reset(host, cmd, scmd_id(cmd));
+
+}
+
+/**
+ * inia100_scb_handler - interrupt callback
+ * @host: Host causing the interrupt
+ * @scb: SCB the controller returned as needing processing
+ *
+ * Perform completion processing on a control block. Do the conversions
+ * from host to SCSI midlayer error coding, save any sense data and
+ * the complete with the midlayer and recycle the scb.
+ */
+
+static void inia100_scb_handler(struct orc_host *host, struct orc_scb *scb)
+{
+ struct scsi_cmnd *cmd; /* Pointer to SCSI request block */
+ struct orc_extended_scb *escb;
+
+ escb = scb->escb;
+ if ((cmd = (struct scsi_cmnd *) escb->srb) == NULL) {
+ printk(KERN_ERR "inia100_scb_handler: SRB pointer is empty\n");
+ orc_release_scb(host, scb); /* Release SCB for current channel */
+ return;
+ }
+ escb->srb = NULL;
+
+ switch (scb->hastat) {
+ case 0x0:
+ case 0xa: /* Linked command complete without error and linked normally */
+ case 0xb: /* Linked command complete without error interrupt generated */
+ scb->hastat = 0;
+ break;
+
+ case 0x11: /* Selection time out-The initiator selection or target
+ reselection was not complete within the SCSI Time out period */
+ scb->hastat = DID_TIME_OUT;
+ break;
+
+ case 0x14: /* Target bus phase sequence failure-An invalid bus phase or bus
+ phase sequence was requested by the target. The host adapter
+ will generate a SCSI Reset Condition, notifying the host with
+ a SCRD interrupt */
+ scb->hastat = DID_RESET;
+ break;
+
+ case 0x1a: /* SCB Aborted. 07/21/98 */
+ scb->hastat = DID_ABORT;
+ break;
+
+ case 0x12: /* Data overrun/underrun-The target attempted to transfer more data
+ than was allocated by the Data Length field or the sum of the
+ Scatter / Gather Data Length fields. */
+ case 0x13: /* Unexpected bus free-The target dropped the SCSI BSY at an unexpected time. */
+ case 0x16: /* Invalid CCB Operation Code-The first byte of the CCB was invalid. */
+
+ default:
+ printk(KERN_DEBUG "inia100: %x %x\n", scb->hastat, scb->tastat);
+ scb->hastat = DID_ERROR; /* Couldn't find any better */
+ break;
+ }
+
+ if (scb->tastat == 2) { /* Check condition */
+ memcpy((unsigned char *) &cmd->sense_buffer[0],
+ (unsigned char *) &escb->sglist[0], SENSE_SIZE);
+ }
+ cmd->result = scb->tastat | (scb->hastat << 16);
+ scsi_dma_unmap(cmd);
+ cmd->scsi_done(cmd); /* Notify system DONE */
+ orc_release_scb(host, scb); /* Release SCB for current channel */
+}
+
+/**
+ * inia100_intr - interrupt handler
+ * @irqno: Interrupt value
+ * @devid: Host adapter
+ *
+ * Entry point for IRQ handling. All the real work is performed
+ * by orc_interrupt.
+ */
+static irqreturn_t inia100_intr(int irqno, void *devid)
+{
+ struct Scsi_Host *shost = (struct Scsi_Host *)devid;
+ struct orc_host *host = (struct orc_host *)shost->hostdata;
+ unsigned long flags;
+ irqreturn_t res;
+
+ spin_lock_irqsave(shost->host_lock, flags);
+ res = orc_interrupt(host);
+ spin_unlock_irqrestore(shost->host_lock, flags);
+
+ return res;
+}
+
+static struct scsi_host_template inia100_template = {
+ .proc_name = "inia100",
+ .name = inia100_REVID,
+ .queuecommand = inia100_queue,
+ .eh_abort_handler = inia100_abort,
+ .eh_bus_reset_handler = inia100_bus_reset,
+ .eh_device_reset_handler = inia100_device_reset,
+ .can_queue = 1,
+ .this_id = 1,
+ .sg_tablesize = SG_ALL,
+ .cmd_per_lun = 1,
+ .use_clustering = ENABLE_CLUSTERING,
+};
+
+static int inia100_probe_one(struct pci_dev *pdev,
+ const struct pci_device_id *id)
+{
+ struct Scsi_Host *shost;
+ struct orc_host *host;
+ unsigned long port, bios;
+ int error = -ENODEV;
+ u32 sz;
+ unsigned long biosaddr;
+ char *bios_phys;
+
+ if (pci_enable_device(pdev))
+ goto out;
+ if (pci_set_dma_mask(pdev, DMA_BIT_MASK(32))) {
+ printk(KERN_WARNING "Unable to set 32bit DMA "
+ "on inia100 adapter, ignoring.\n");
+ goto out_disable_device;
+ }
+
+ pci_set_master(pdev);
+
+ port = pci_resource_start(pdev, 0);
+ if (!request_region(port, 256, "inia100")) {
+ printk(KERN_WARNING "inia100: io port 0x%lx, is busy.\n", port);
+ goto out_disable_device;
+ }
+
+ /* <02> read from base address + 0x50 offset to get the bios value. */
+ bios = inw(port + 0x50);
+
+
+ shost = scsi_host_alloc(&inia100_template, sizeof(struct orc_host));
+ if (!shost)
+ goto out_release_region;
+
+ host = (struct orc_host *)shost->hostdata;
+ host->pdev = pdev;
+ host->base = port;
+ host->BIOScfg = bios;
+ spin_lock_init(&host->allocation_lock);
+
+ /* Get total memory needed for SCB */
+ sz = ORC_MAXQUEUE * sizeof(struct orc_scb);
+ host->scb_virt = pci_zalloc_consistent(pdev, sz, &host->scb_phys);
+ if (!host->scb_virt) {
+ printk("inia100: SCB memory allocation error\n");
+ goto out_host_put;
+ }
+
+ /* Get total memory needed for ESCB */
+ sz = ORC_MAXQUEUE * sizeof(struct orc_extended_scb);
+ host->escb_virt = pci_zalloc_consistent(pdev, sz, &host->escb_phys);
+ if (!host->escb_virt) {
+ printk("inia100: ESCB memory allocation error\n");
+ goto out_free_scb_array;
+ }
+
+ biosaddr = host->BIOScfg;
+ biosaddr = (biosaddr << 4);
+ bios_phys = phys_to_virt(biosaddr);
+ if (init_orchid(host)) { /* Initialize orchid chip */
+ printk("inia100: initial orchid fail!!\n");
+ goto out_free_escb_array;
+ }
+
+ shost->io_port = host->base;
+ shost->n_io_port = 0xff;
+ shost->can_queue = ORC_MAXQUEUE;
+ shost->unique_id = shost->io_port;
+ shost->max_id = host->max_targets;
+ shost->max_lun = 16;
+ shost->irq = pdev->irq;
+ shost->this_id = host->scsi_id; /* Assign HCS index */
+ shost->sg_tablesize = TOTAL_SG_ENTRY;
+
+ /* Initial orc chip */
+ error = request_irq(pdev->irq, inia100_intr, IRQF_SHARED,
+ "inia100", shost);
+ if (error < 0) {
+ printk(KERN_WARNING "inia100: unable to get irq %d\n",
+ pdev->irq);
+ goto out_free_escb_array;
+ }
+
+ pci_set_drvdata(pdev, shost);
+
+ error = scsi_add_host(shost, &pdev->dev);
+ if (error)
+ goto out_free_irq;
+
+ scsi_scan_host(shost);
+ return 0;
+
+out_free_irq:
+ free_irq(shost->irq, shost);
+out_free_escb_array:
+ pci_free_consistent(pdev, ORC_MAXQUEUE * sizeof(struct orc_extended_scb),
+ host->escb_virt, host->escb_phys);
+out_free_scb_array:
+ pci_free_consistent(pdev, ORC_MAXQUEUE * sizeof(struct orc_scb),
+ host->scb_virt, host->scb_phys);
+out_host_put:
+ scsi_host_put(shost);
+out_release_region:
+ release_region(port, 256);
+out_disable_device:
+ pci_disable_device(pdev);
+out:
+ return error;
+}
+
+static void inia100_remove_one(struct pci_dev *pdev)
+{
+ struct Scsi_Host *shost = pci_get_drvdata(pdev);
+ struct orc_host *host = (struct orc_host *)shost->hostdata;
+
+ scsi_remove_host(shost);
+
+ free_irq(shost->irq, shost);
+ pci_free_consistent(pdev, ORC_MAXQUEUE * sizeof(struct orc_extended_scb),
+ host->escb_virt, host->escb_phys);
+ pci_free_consistent(pdev, ORC_MAXQUEUE * sizeof(struct orc_scb),
+ host->scb_virt, host->scb_phys);
+ release_region(shost->io_port, 256);
+
+ scsi_host_put(shost);
+}
+
+static struct pci_device_id inia100_pci_tbl[] = {
+ {PCI_VENDOR_ID_INIT, 0x1060, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
+ {0,}
+};
+MODULE_DEVICE_TABLE(pci, inia100_pci_tbl);
+
+static struct pci_driver inia100_pci_driver = {
+ .name = "inia100",
+ .id_table = inia100_pci_tbl,
+ .probe = inia100_probe_one,
+ .remove = inia100_remove_one,
+};
+
+static int __init inia100_init(void)
+{
+ return pci_register_driver(&inia100_pci_driver);
+}
+
+static void __exit inia100_exit(void)
+{
+ pci_unregister_driver(&inia100_pci_driver);
+}
+
+MODULE_DESCRIPTION("Initio A100U2W SCSI driver");
+MODULE_AUTHOR("Initio Corporation");
+MODULE_LICENSE("Dual BSD/GPL");
+
+module_init(inia100_init);
+module_exit(inia100_exit);
diff --git a/drivers/scsi/a100u2w.h b/drivers/scsi/a100u2w.h
new file mode 100644
index 000000000..d40e0c528
--- /dev/null
+++ b/drivers/scsi/a100u2w.h
@@ -0,0 +1,371 @@
+/*
+ * Initio A100 device driver for Linux.
+ *
+ * Copyright (c) 1994-1998 Initio Corporation
+ * All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2, or (at your option)
+ * any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; see the file COPYING. If not, write to
+ * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR
+ * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * Revision History:
+ * 06/18/98 HL, Initial production Version 1.02
+ * 12/19/98 bv, Use spinlocks for 2.1.95 and up
+ * 06/25/02 Doug Ledford <dledford@redhat.com>
+ * - This and the i60uscsi.h file are almost identical,
+ * merged them into a single header used by both .c files.
+ * 14/06/07 Alan Cox <alan@redhat.com>
+ * - Grand cleanup and Linuxisation
+ */
+
+#define inia100_REVID "Initio INI-A100U2W SCSI device driver; Revision: 1.02d"
+
+#if 1
+#define ORC_MAXQUEUE 245
+#define ORC_MAXTAGS 64
+#else
+#define ORC_MAXQUEUE 25
+#define ORC_MAXTAGS 8
+#endif
+
+#define TOTAL_SG_ENTRY 32
+#define MAX_TARGETS 16
+#define IMAX_CDB 15
+#define SENSE_SIZE 14
+
+/************************************************************************/
+/* Scatter-Gather Element Structure */
+/************************************************************************/
+struct orc_sgent {
+ u32 base; /* Data Pointer */
+ u32 length; /* Data Length */
+};
+
+/* SCSI related definition */
+#define DISC_NOT_ALLOW 0x80 /* Disconnect is not allowed */
+#define DISC_ALLOW 0xC0 /* Disconnect is allowed */
+
+
+#define ORC_OFFSET_SCB 16
+#define ORC_MAX_SCBS 250
+#define MAX_CHANNELS 2
+#define MAX_ESCB_ELE 64
+#define TCF_DRV_255_63 0x0400
+
+/********************************************************/
+/* Orchid Host Command Set */
+/********************************************************/
+#define ORC_CMD_NOP 0x00 /* Host command - NOP */
+#define ORC_CMD_VERSION 0x01 /* Host command - Get F/W version */
+#define ORC_CMD_ECHO 0x02 /* Host command - ECHO */
+#define ORC_CMD_SET_NVM 0x03 /* Host command - Set NVRAM */
+#define ORC_CMD_GET_NVM 0x04 /* Host command - Get NVRAM */
+#define ORC_CMD_GET_BUS_STATUS 0x05 /* Host command - Get SCSI bus status */
+#define ORC_CMD_ABORT_SCB 0x06 /* Host command - Abort SCB */
+#define ORC_CMD_ISSUE_SCB 0x07 /* Host command - Issue SCB */
+
+/********************************************************/
+/* Orchid Register Set */
+/********************************************************/
+#define ORC_GINTS 0xA0 /* Global Interrupt Status */
+#define QINT 0x04 /* Reply Queue Interrupt */
+#define ORC_GIMSK 0xA1 /* Global Interrupt MASK */
+#define MQINT 0x04 /* Mask Reply Queue Interrupt */
+#define ORC_GCFG 0xA2 /* Global Configure */
+#define EEPRG 0x01 /* Enable EEPROM programming */
+#define ORC_GSTAT 0xA3 /* Global status */
+#define WIDEBUS 0x10 /* Wide SCSI Devices connected */
+#define ORC_HDATA 0xA4 /* Host Data */
+#define ORC_HCTRL 0xA5 /* Host Control */
+#define SCSIRST 0x80 /* SCSI bus reset */
+#define HDO 0x40 /* Host data out */
+#define HOSTSTOP 0x02 /* Host stop RISC engine */
+#define DEVRST 0x01 /* Device reset */
+#define ORC_HSTUS 0xA6 /* Host Status */
+#define HDI 0x02 /* Host data in */
+#define RREADY 0x01 /* RISC engine is ready to receive */
+#define ORC_NVRAM 0xA7 /* Nvram port address */
+#define SE2CS 0x008
+#define SE2CLK 0x004
+#define SE2DO 0x002
+#define SE2DI 0x001
+#define ORC_PQUEUE 0xA8 /* Posting queue FIFO */
+#define ORC_PQCNT 0xA9 /* Posting queue FIFO Cnt */
+#define ORC_RQUEUE 0xAA /* Reply queue FIFO */
+#define ORC_RQUEUECNT 0xAB /* Reply queue FIFO Cnt */
+#define ORC_FWBASEADR 0xAC /* Firmware base address */
+
+#define ORC_EBIOSADR0 0xB0 /* External Bios address */
+#define ORC_EBIOSADR1 0xB1 /* External Bios address */
+#define ORC_EBIOSADR2 0xB2 /* External Bios address */
+#define ORC_EBIOSDATA 0xB3 /* External Bios address */
+
+#define ORC_SCBSIZE 0xB7 /* SCB size register */
+#define ORC_SCBBASE0 0xB8 /* SCB base address 0 */
+#define ORC_SCBBASE1 0xBC /* SCB base address 1 */
+
+#define ORC_RISCCTL 0xE0 /* RISC Control */
+#define PRGMRST 0x002
+#define DOWNLOAD 0x001
+#define ORC_PRGMCTR0 0xE2 /* RISC program counter */
+#define ORC_PRGMCTR1 0xE3 /* RISC program counter */
+#define ORC_RISCRAM 0xEC /* RISC RAM data port 4 bytes */
+
+struct orc_extended_scb { /* Extended SCB */
+ struct orc_sgent sglist[TOTAL_SG_ENTRY]; /*0 Start of SG list */
+ struct scsi_cmnd *srb; /*50 SRB Pointer */
+};
+
+/***********************************************************************
+ SCSI Control Block
+
+ 0x40 bytes long, the last 8 are user bytes
+************************************************************************/
+struct orc_scb { /* Scsi_Ctrl_Blk */
+ u8 opcode; /*00 SCB command code&residual */
+ u8 flags; /*01 SCB Flags */
+ u8 target; /*02 Target Id */
+ u8 lun; /*03 Lun */
+ u32 reserved0; /*04 Reserved for ORCHID must 0 */
+ u32 xferlen; /*08 Data Transfer Length */
+ u32 reserved1; /*0C Reserved for ORCHID must 0 */
+ u32 sg_len; /*10 SG list # * 8 */
+ u32 sg_addr; /*14 SG List Buf physical Addr */
+ u32 sg_addrhigh; /*18 SG Buffer high physical Addr */
+ u8 hastat; /*1C Host Status */
+ u8 tastat; /*1D Target Status */
+ u8 status; /*1E SCB status */
+ u8 link; /*1F Link pointer, default 0xFF */
+ u8 sense_len; /*20 Sense Allocation Length */
+ u8 cdb_len; /*21 CDB Length */
+ u8 ident; /*22 Identify */
+ u8 tag_msg; /*23 Tag Message */
+ u8 cdb[IMAX_CDB]; /*24 SCSI CDBs */
+ u8 scbidx; /*3C Index for this ORCSCB */
+ u32 sense_addr; /*34 Sense Buffer physical Addr */
+
+ struct orc_extended_scb *escb; /*38 Extended SCB Pointer */
+ /* 64bit pointer or 32bit pointer + reserved ? */
+#ifndef CONFIG_64BIT
+ u8 reserved2[4]; /*3E Reserved for Driver use */
+#endif
+};
+
+/* Opcodes of ORCSCB_Opcode */
+#define ORC_EXECSCSI 0x00 /* SCSI initiator command with residual */
+#define ORC_BUSDEVRST 0x01 /* SCSI Bus Device Reset */
+
+/* Status of ORCSCB_Status */
+#define ORCSCB_COMPLETE 0x00 /* SCB request completed */
+#define ORCSCB_POST 0x01 /* SCB is posted by the HOST */
+
+/* Bit Definition for ORCSCB_Flags */
+#define SCF_DISINT 0x01 /* Disable HOST interrupt */
+#define SCF_DIR 0x18 /* Direction bits */
+#define SCF_NO_DCHK 0x00 /* Direction determined by SCSI */
+#define SCF_DIN 0x08 /* From Target to Initiator */
+#define SCF_DOUT 0x10 /* From Initiator to Target */
+#define SCF_NO_XF 0x18 /* No data transfer */
+#define SCF_POLL 0x40
+
+/* Error Codes for ORCSCB_HaStat */
+#define HOST_SEL_TOUT 0x11
+#define HOST_DO_DU 0x12
+#define HOST_BUS_FREE 0x13
+#define HOST_BAD_PHAS 0x14
+#define HOST_INV_CMD 0x16
+#define HOST_SCSI_RST 0x1B
+#define HOST_DEV_RST 0x1C
+
+
+/* Error Codes for ORCSCB_TaStat */
+#define TARGET_CHK_COND 0x02
+#define TARGET_BUSY 0x08
+#define TARGET_TAG_FULL 0x28
+
+
+/***********************************************************************
+ Target Device Control Structure
+**********************************************************************/
+
+struct orc_target {
+ u8 TCS_DrvDASD; /* 6 */
+ u8 TCS_DrvSCSI; /* 7 */
+ u8 TCS_DrvHead; /* 8 */
+ u16 TCS_DrvFlags; /* 4 */
+ u8 TCS_DrvSector; /* 7 */
+};
+
+/* Bit Definition for TCF_DrvFlags */
+#define TCS_DF_NODASD_SUPT 0x20 /* Suppress OS/2 DASD Mgr support */
+#define TCS_DF_NOSCSI_SUPT 0x40 /* Suppress OS/2 SCSI Mgr support */
+
+
+/***********************************************************************
+ Host Adapter Control Structure
+************************************************************************/
+struct orc_host {
+ unsigned long base; /* Base address */
+ u8 index; /* Index (Channel)*/
+ u8 scsi_id; /* H/A SCSI ID */
+ u8 BIOScfg; /*BIOS configuration */
+ u8 flags;
+ u8 max_targets; /* SCSI0MAXTags */
+ struct orc_scb *scb_virt; /* Virtual Pointer to SCB array */
+ dma_addr_t scb_phys; /* Scb Physical address */
+ struct orc_extended_scb *escb_virt; /* Virtual pointer to ESCB Scatter list */
+ dma_addr_t escb_phys; /* scatter list Physical address */
+ u8 target_flag[16]; /* target configuration, TCF_EN_TAG */
+ u8 max_tags[16]; /* ORC_MAX_SCBS */
+ u32 allocation_map[MAX_CHANNELS][8]; /* Max STB is 256, So 256/32 */
+ spinlock_t allocation_lock;
+ struct pci_dev *pdev;
+};
+
+/* Bit Definition for HCS_Flags */
+
+#define HCF_SCSI_RESET 0x01 /* SCSI BUS RESET */
+#define HCF_PARITY 0x02 /* parity card */
+#define HCF_LVDS 0x10 /* parity card */
+
+/* Bit Definition for TargetFlag */
+
+#define TCF_EN_255 0x08
+#define TCF_EN_TAG 0x10
+#define TCF_BUSY 0x20
+#define TCF_DISCONNECT 0x40
+#define TCF_SPIN_UP 0x80
+
+/* Bit Definition for HCS_AFlags */
+#define HCS_AF_IGNORE 0x01 /* Adapter ignore */
+#define HCS_AF_DISABLE_RESET 0x10 /* Adapter disable reset */
+#define HCS_AF_DISABLE_ADPT 0x80 /* Adapter disable */
+
+struct orc_nvram {
+/*----------header ---------------*/
+ u8 SubVendorID0; /* 00 - Sub Vendor ID */
+ u8 SubVendorID1; /* 00 - Sub Vendor ID */
+ u8 SubSysID0; /* 02 - Sub System ID */
+ u8 SubSysID1; /* 02 - Sub System ID */
+ u8 SubClass; /* 04 - Sub Class */
+ u8 VendorID0; /* 05 - Vendor ID */
+ u8 VendorID1; /* 05 - Vendor ID */
+ u8 DeviceID0; /* 07 - Device ID */
+ u8 DeviceID1; /* 07 - Device ID */
+ u8 Reserved0[2]; /* 09 - Reserved */
+ u8 revision; /* 0B - revision of data structure */
+ /* ----Host Adapter Structure ---- */
+ u8 NumOfCh; /* 0C - Number of SCSI channel */
+ u8 BIOSConfig1; /* 0D - BIOS configuration 1 */
+ u8 BIOSConfig2; /* 0E - BIOS boot channel&target ID */
+ u8 BIOSConfig3; /* 0F - BIOS configuration 3 */
+ /* ----SCSI channel Structure ---- */
+ /* from "CTRL-I SCSI Host Adapter SetUp menu " */
+ u8 scsi_id; /* 10 - Channel 0 SCSI ID */
+ u8 SCSI0Config; /* 11 - Channel 0 SCSI configuration */
+ u8 SCSI0MaxTags; /* 12 - Channel 0 Maximum tags */
+ u8 SCSI0ResetTime; /* 13 - Channel 0 Reset recovering time */
+ u8 ReservedforChannel0[2]; /* 14 - Reserved */
+
+ /* ----SCSI target Structure ---- */
+ /* from "CTRL-I SCSI device SetUp menu " */
+ u8 Target00Config; /* 16 - Channel 0 Target 0 config */
+ u8 Target01Config; /* 17 - Channel 0 Target 1 config */
+ u8 Target02Config; /* 18 - Channel 0 Target 2 config */
+ u8 Target03Config; /* 19 - Channel 0 Target 3 config */
+ u8 Target04Config; /* 1A - Channel 0 Target 4 config */
+ u8 Target05Config; /* 1B - Channel 0 Target 5 config */
+ u8 Target06Config; /* 1C - Channel 0 Target 6 config */
+ u8 Target07Config; /* 1D - Channel 0 Target 7 config */
+ u8 Target08Config; /* 1E - Channel 0 Target 8 config */
+ u8 Target09Config; /* 1F - Channel 0 Target 9 config */
+ u8 Target0AConfig; /* 20 - Channel 0 Target A config */
+ u8 Target0BConfig; /* 21 - Channel 0 Target B config */
+ u8 Target0CConfig; /* 22 - Channel 0 Target C config */
+ u8 Target0DConfig; /* 23 - Channel 0 Target D config */
+ u8 Target0EConfig; /* 24 - Channel 0 Target E config */
+ u8 Target0FConfig; /* 25 - Channel 0 Target F config */
+
+ u8 SCSI1Id; /* 26 - Channel 1 SCSI ID */
+ u8 SCSI1Config; /* 27 - Channel 1 SCSI configuration */
+ u8 SCSI1MaxTags; /* 28 - Channel 1 Maximum tags */
+ u8 SCSI1ResetTime; /* 29 - Channel 1 Reset recovering time */
+ u8 ReservedforChannel1[2]; /* 2A - Reserved */
+
+ /* ----SCSI target Structure ---- */
+ /* from "CTRL-I SCSI device SetUp menu " */
+ u8 Target10Config; /* 2C - Channel 1 Target 0 config */
+ u8 Target11Config; /* 2D - Channel 1 Target 1 config */
+ u8 Target12Config; /* 2E - Channel 1 Target 2 config */
+ u8 Target13Config; /* 2F - Channel 1 Target 3 config */
+ u8 Target14Config; /* 30 - Channel 1 Target 4 config */
+ u8 Target15Config; /* 31 - Channel 1 Target 5 config */
+ u8 Target16Config; /* 32 - Channel 1 Target 6 config */
+ u8 Target17Config; /* 33 - Channel 1 Target 7 config */
+ u8 Target18Config; /* 34 - Channel 1 Target 8 config */
+ u8 Target19Config; /* 35 - Channel 1 Target 9 config */
+ u8 Target1AConfig; /* 36 - Channel 1 Target A config */
+ u8 Target1BConfig; /* 37 - Channel 1 Target B config */
+ u8 Target1CConfig; /* 38 - Channel 1 Target C config */
+ u8 Target1DConfig; /* 39 - Channel 1 Target D config */
+ u8 Target1EConfig; /* 3A - Channel 1 Target E config */
+ u8 Target1FConfig; /* 3B - Channel 1 Target F config */
+ u8 reserved[3]; /* 3C - Reserved */
+ /* ---------- CheckSum ---------- */
+ u8 CheckSum; /* 3F - Checksum of NVRam */
+};
+
+/* Bios Configuration for nvram->BIOSConfig1 */
+#define NBC_BIOSENABLE 0x01 /* BIOS enable */
+#define NBC_CDROM 0x02 /* Support bootable CDROM */
+#define NBC_REMOVABLE 0x04 /* Support removable drive */
+
+/* Bios Configuration for nvram->BIOSConfig2 */
+#define NBB_TARGET_MASK 0x0F /* Boot SCSI target ID number */
+#define NBB_CHANL_MASK 0xF0 /* Boot SCSI channel number */
+
+/* Bit definition for nvram->SCSIConfig */
+#define NCC_BUSRESET 0x01 /* Reset SCSI bus at power up */
+#define NCC_PARITYCHK 0x02 /* SCSI parity enable */
+#define NCC_LVDS 0x10 /* Enable LVDS */
+#define NCC_ACTTERM1 0x20 /* Enable active terminator 1 */
+#define NCC_ACTTERM2 0x40 /* Enable active terminator 2 */
+#define NCC_AUTOTERM 0x80 /* Enable auto termination */
+
+/* Bit definition for nvram->TargetxConfig */
+#define NTC_PERIOD 0x07 /* Maximum Sync. Speed */
+#define NTC_1GIGA 0x08 /* 255 head / 63 sectors (64/32) */
+#define NTC_NO_SYNC 0x10 /* NO SYNC. NEGO */
+#define NTC_NO_WIDESYNC 0x20 /* NO WIDE SYNC. NEGO */
+#define NTC_DISC_ENABLE 0x40 /* Enable SCSI disconnect */
+#define NTC_SPINUP 0x80 /* Start disk drive */
+
+/* Default NVRam values */
+#define NBC_DEFAULT (NBC_ENABLE)
+#define NCC_DEFAULT (NCC_BUSRESET | NCC_AUTOTERM | NCC_PARITYCHK)
+#define NCC_MAX_TAGS 0x20 /* Maximum tags per target */
+#define NCC_RESET_TIME 0x0A /* SCSI RESET recovering time */
+#define NTC_DEFAULT (NTC_1GIGA | NTC_NO_WIDESYNC | NTC_DISC_ENABLE)
+
diff --git a/drivers/scsi/a2091.c b/drivers/scsi/a2091.c
new file mode 100644
index 000000000..9176bfbd5
--- /dev/null
+++ b/drivers/scsi/a2091.c
@@ -0,0 +1,281 @@
+#include <linux/types.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/mm.h>
+#include <linux/slab.h>
+#include <linux/spinlock.h>
+#include <linux/zorro.h>
+#include <linux/module.h>
+
+#include <asm/page.h>
+#include <asm/pgtable.h>
+#include <asm/amigaints.h>
+#include <asm/amigahw.h>
+
+#include "scsi.h"
+#include "wd33c93.h"
+#include "a2091.h"
+
+
+struct a2091_hostdata {
+ struct WD33C93_hostdata wh;
+ struct a2091_scsiregs *regs;
+};
+
+static irqreturn_t a2091_intr(int irq, void *data)
+{
+ struct Scsi_Host *instance = data;
+ struct a2091_hostdata *hdata = shost_priv(instance);
+ unsigned int status = hdata->regs->ISTR;
+ unsigned long flags;
+
+ if (!(status & (ISTR_INT_F | ISTR_INT_P)) || !(status & ISTR_INTS))
+ return IRQ_NONE;
+
+ spin_lock_irqsave(instance->host_lock, flags);
+ wd33c93_intr(instance);
+ spin_unlock_irqrestore(instance->host_lock, flags);
+ return IRQ_HANDLED;
+}
+
+static int dma_setup(struct scsi_cmnd *cmd, int dir_in)
+{
+ struct Scsi_Host *instance = cmd->device->host;
+ struct a2091_hostdata *hdata = shost_priv(instance);
+ struct WD33C93_hostdata *wh = &hdata->wh;
+ struct a2091_scsiregs *regs = hdata->regs;
+ unsigned short cntr = CNTR_PDMD | CNTR_INTEN;
+ unsigned long addr = virt_to_bus(cmd->SCp.ptr);
+
+ /* don't allow DMA if the physical address is bad */
+ if (addr & A2091_XFER_MASK) {
+ wh->dma_bounce_len = (cmd->SCp.this_residual + 511) & ~0x1ff;
+ wh->dma_bounce_buffer = kmalloc(wh->dma_bounce_len,
+ GFP_KERNEL);
+
+ /* can't allocate memory; use PIO */
+ if (!wh->dma_bounce_buffer) {
+ wh->dma_bounce_len = 0;
+ return 1;
+ }
+
+ /* get the physical address of the bounce buffer */
+ addr = virt_to_bus(wh->dma_bounce_buffer);
+
+ /* the bounce buffer may not be in the first 16M of physmem */
+ if (addr & A2091_XFER_MASK) {
+ /* we could use chipmem... maybe later */
+ kfree(wh->dma_bounce_buffer);
+ wh->dma_bounce_buffer = NULL;
+ wh->dma_bounce_len = 0;
+ return 1;
+ }
+
+ if (!dir_in) {
+ /* copy to bounce buffer for a write */
+ memcpy(wh->dma_bounce_buffer, cmd->SCp.ptr,
+ cmd->SCp.this_residual);
+ }
+ }
+
+ /* setup dma direction */
+ if (!dir_in)
+ cntr |= CNTR_DDIR;
+
+ /* remember direction */
+ wh->dma_dir = dir_in;
+
+ regs->CNTR = cntr;
+
+ /* setup DMA *physical* address */
+ regs->ACR = addr;
+
+ if (dir_in) {
+ /* invalidate any cache */
+ cache_clear(addr, cmd->SCp.this_residual);
+ } else {
+ /* push any dirty cache */
+ cache_push(addr, cmd->SCp.this_residual);
+ }
+ /* start DMA */
+ regs->ST_DMA = 1;
+
+ /* return success */
+ return 0;
+}
+
+static void dma_stop(struct Scsi_Host *instance, struct scsi_cmnd *SCpnt,
+ int status)
+{
+ struct a2091_hostdata *hdata = shost_priv(instance);
+ struct WD33C93_hostdata *wh = &hdata->wh;
+ struct a2091_scsiregs *regs = hdata->regs;
+
+ /* disable SCSI interrupts */
+ unsigned short cntr = CNTR_PDMD;
+
+ if (!wh->dma_dir)
+ cntr |= CNTR_DDIR;
+
+ /* disable SCSI interrupts */
+ regs->CNTR = cntr;
+
+ /* flush if we were reading */
+ if (wh->dma_dir) {
+ regs->FLUSH = 1;
+ while (!(regs->ISTR & ISTR_FE_FLG))
+ ;
+ }
+
+ /* clear a possible interrupt */
+ regs->CINT = 1;
+
+ /* stop DMA */
+ regs->SP_DMA = 1;
+
+ /* restore the CONTROL bits (minus the direction flag) */
+ regs->CNTR = CNTR_PDMD | CNTR_INTEN;
+
+ /* copy from a bounce buffer, if necessary */
+ if (status && wh->dma_bounce_buffer) {
+ if (wh->dma_dir)
+ memcpy(SCpnt->SCp.ptr, wh->dma_bounce_buffer,
+ SCpnt->SCp.this_residual);
+ kfree(wh->dma_bounce_buffer);
+ wh->dma_bounce_buffer = NULL;
+ wh->dma_bounce_len = 0;
+ }
+}
+
+static int a2091_bus_reset(struct scsi_cmnd *cmd)
+{
+ struct Scsi_Host *instance = cmd->device->host;
+
+ /* FIXME perform bus-specific reset */
+
+ /* FIXME 2: kill this function, and let midlayer fall back
+ to the same action, calling wd33c93_host_reset() */
+
+ spin_lock_irq(instance->host_lock);
+ wd33c93_host_reset(cmd);
+ spin_unlock_irq(instance->host_lock);
+
+ return SUCCESS;
+}
+
+static struct scsi_host_template a2091_scsi_template = {
+ .module = THIS_MODULE,
+ .name = "Commodore A2091/A590 SCSI",
+ .show_info = wd33c93_show_info,
+ .write_info = wd33c93_write_info,
+ .proc_name = "A2901",
+ .queuecommand = wd33c93_queuecommand,
+ .eh_abort_handler = wd33c93_abort,
+ .eh_bus_reset_handler = a2091_bus_reset,
+ .eh_host_reset_handler = wd33c93_host_reset,
+ .can_queue = CAN_QUEUE,
+ .this_id = 7,
+ .sg_tablesize = SG_ALL,
+ .cmd_per_lun = CMD_PER_LUN,
+ .use_clustering = DISABLE_CLUSTERING
+};
+
+static int a2091_probe(struct zorro_dev *z, const struct zorro_device_id *ent)
+{
+ struct Scsi_Host *instance;
+ int error;
+ struct a2091_scsiregs *regs;
+ wd33c93_regs wdregs;
+ struct a2091_hostdata *hdata;
+
+ if (!request_mem_region(z->resource.start, 256, "wd33c93"))
+ return -EBUSY;
+
+ instance = scsi_host_alloc(&a2091_scsi_template,
+ sizeof(struct a2091_hostdata));
+ if (!instance) {
+ error = -ENOMEM;
+ goto fail_alloc;
+ }
+
+ instance->irq = IRQ_AMIGA_PORTS;
+ instance->unique_id = z->slotaddr;
+
+ regs = ZTWO_VADDR(z->resource.start);
+ regs->DAWR = DAWR_A2091;
+
+ wdregs.SASR = &regs->SASR;
+ wdregs.SCMD = &regs->SCMD;
+
+ hdata = shost_priv(instance);
+ hdata->wh.no_sync = 0xff;
+ hdata->wh.fast = 0;
+ hdata->wh.dma_mode = CTRL_DMA;
+ hdata->regs = regs;
+
+ wd33c93_init(instance, wdregs, dma_setup, dma_stop, WD33C93_FS_8_10);
+ error = request_irq(IRQ_AMIGA_PORTS, a2091_intr, IRQF_SHARED,
+ "A2091 SCSI", instance);
+ if (error)
+ goto fail_irq;
+
+ regs->CNTR = CNTR_PDMD | CNTR_INTEN;
+
+ error = scsi_add_host(instance, NULL);
+ if (error)
+ goto fail_host;
+
+ zorro_set_drvdata(z, instance);
+
+ scsi_scan_host(instance);
+ return 0;
+
+fail_host:
+ free_irq(IRQ_AMIGA_PORTS, instance);
+fail_irq:
+ scsi_host_put(instance);
+fail_alloc:
+ release_mem_region(z->resource.start, 256);
+ return error;
+}
+
+static void a2091_remove(struct zorro_dev *z)
+{
+ struct Scsi_Host *instance = zorro_get_drvdata(z);
+ struct a2091_hostdata *hdata = shost_priv(instance);
+
+ hdata->regs->CNTR = 0;
+ scsi_remove_host(instance);
+ free_irq(IRQ_AMIGA_PORTS, instance);
+ scsi_host_put(instance);
+ release_mem_region(z->resource.start, 256);
+}
+
+static struct zorro_device_id a2091_zorro_tbl[] = {
+ { ZORRO_PROD_CBM_A590_A2091_1 },
+ { ZORRO_PROD_CBM_A590_A2091_2 },
+ { 0 }
+};
+MODULE_DEVICE_TABLE(zorro, a2091_zorro_tbl);
+
+static struct zorro_driver a2091_driver = {
+ .name = "a2091",
+ .id_table = a2091_zorro_tbl,
+ .probe = a2091_probe,
+ .remove = a2091_remove,
+};
+
+static int __init a2091_init(void)
+{
+ return zorro_register_driver(&a2091_driver);
+}
+module_init(a2091_init);
+
+static void __exit a2091_exit(void)
+{
+ zorro_unregister_driver(&a2091_driver);
+}
+module_exit(a2091_exit);
+
+MODULE_DESCRIPTION("Commodore A2091/A590 SCSI");
+MODULE_LICENSE("GPL");
diff --git a/drivers/scsi/a2091.h b/drivers/scsi/a2091.h
new file mode 100644
index 000000000..794b8e65c
--- /dev/null
+++ b/drivers/scsi/a2091.h
@@ -0,0 +1,69 @@
+#ifndef A2091_H
+#define A2091_H
+
+/* $Id: a2091.h,v 1.4 1997/01/19 23:07:09 davem Exp $
+ *
+ * Header file for the Commodore A2091 Zorro II SCSI controller for Linux
+ *
+ * Written and (C) 1993, Hamish Macdonald, see a2091.c for more info
+ *
+ */
+
+#include <linux/types.h>
+
+#ifndef CMD_PER_LUN
+#define CMD_PER_LUN 2
+#endif
+
+#ifndef CAN_QUEUE
+#define CAN_QUEUE 16
+#endif
+
+/*
+ * if the transfer address ANDed with this results in a non-zero
+ * result, then we can't use DMA.
+ */
+#define A2091_XFER_MASK (0xff000001)
+
+struct a2091_scsiregs {
+ unsigned char pad1[64];
+ volatile unsigned short ISTR;
+ volatile unsigned short CNTR;
+ unsigned char pad2[60];
+ volatile unsigned int WTC;
+ volatile unsigned long ACR;
+ unsigned char pad3[6];
+ volatile unsigned short DAWR;
+ unsigned char pad4;
+ volatile unsigned char SASR;
+ unsigned char pad5;
+ volatile unsigned char SCMD;
+ unsigned char pad6[76];
+ volatile unsigned short ST_DMA;
+ volatile unsigned short SP_DMA;
+ volatile unsigned short CINT;
+ unsigned char pad7[2];
+ volatile unsigned short FLUSH;
+};
+
+#define DAWR_A2091 (3)
+
+/* CNTR bits. */
+#define CNTR_TCEN (1<<7)
+#define CNTR_PREST (1<<6)
+#define CNTR_PDMD (1<<5)
+#define CNTR_INTEN (1<<4)
+#define CNTR_DDIR (1<<3)
+
+/* ISTR bits. */
+#define ISTR_INTX (1<<8)
+#define ISTR_INT_F (1<<7)
+#define ISTR_INTS (1<<6)
+#define ISTR_E_INT (1<<5)
+#define ISTR_INT_P (1<<4)
+#define ISTR_UE_INT (1<<3)
+#define ISTR_OE_INT (1<<2)
+#define ISTR_FF_FLG (1<<1)
+#define ISTR_FE_FLG (1<<0)
+
+#endif /* A2091_H */
diff --git a/drivers/scsi/a3000.c b/drivers/scsi/a3000.c
new file mode 100644
index 000000000..e6375b4de
--- /dev/null
+++ b/drivers/scsi/a3000.c
@@ -0,0 +1,286 @@
+#include <linux/types.h>
+#include <linux/mm.h>
+#include <linux/ioport.h>
+#include <linux/init.h>
+#include <linux/slab.h>
+#include <linux/spinlock.h>
+#include <linux/interrupt.h>
+#include <linux/platform_device.h>
+#include <linux/module.h>
+
+#include <asm/page.h>
+#include <asm/pgtable.h>
+#include <asm/amigaints.h>
+#include <asm/amigahw.h>
+
+#include "scsi.h"
+#include "wd33c93.h"
+#include "a3000.h"
+
+
+struct a3000_hostdata {
+ struct WD33C93_hostdata wh;
+ struct a3000_scsiregs *regs;
+};
+
+static irqreturn_t a3000_intr(int irq, void *data)
+{
+ struct Scsi_Host *instance = data;
+ struct a3000_hostdata *hdata = shost_priv(instance);
+ unsigned int status = hdata->regs->ISTR;
+ unsigned long flags;
+
+ if (!(status & ISTR_INT_P))
+ return IRQ_NONE;
+ if (status & ISTR_INTS) {
+ spin_lock_irqsave(instance->host_lock, flags);
+ wd33c93_intr(instance);
+ spin_unlock_irqrestore(instance->host_lock, flags);
+ return IRQ_HANDLED;
+ }
+ pr_warning("Non-serviced A3000 SCSI-interrupt? ISTR = %02x\n", status);
+ return IRQ_NONE;
+}
+
+static int dma_setup(struct scsi_cmnd *cmd, int dir_in)
+{
+ struct Scsi_Host *instance = cmd->device->host;
+ struct a3000_hostdata *hdata = shost_priv(instance);
+ struct WD33C93_hostdata *wh = &hdata->wh;
+ struct a3000_scsiregs *regs = hdata->regs;
+ unsigned short cntr = CNTR_PDMD | CNTR_INTEN;
+ unsigned long addr = virt_to_bus(cmd->SCp.ptr);
+
+ /*
+ * if the physical address has the wrong alignment, or if
+ * physical address is bad, or if it is a write and at the
+ * end of a physical memory chunk, then allocate a bounce
+ * buffer
+ */
+ if (addr & A3000_XFER_MASK) {
+ wh->dma_bounce_len = (cmd->SCp.this_residual + 511) & ~0x1ff;
+ wh->dma_bounce_buffer = kmalloc(wh->dma_bounce_len,
+ GFP_KERNEL);
+
+ /* can't allocate memory; use PIO */
+ if (!wh->dma_bounce_buffer) {
+ wh->dma_bounce_len = 0;
+ return 1;
+ }
+
+ if (!dir_in) {
+ /* copy to bounce buffer for a write */
+ memcpy(wh->dma_bounce_buffer, cmd->SCp.ptr,
+ cmd->SCp.this_residual);
+ }
+
+ addr = virt_to_bus(wh->dma_bounce_buffer);
+ }
+
+ /* setup dma direction */
+ if (!dir_in)
+ cntr |= CNTR_DDIR;
+
+ /* remember direction */
+ wh->dma_dir = dir_in;
+
+ regs->CNTR = cntr;
+
+ /* setup DMA *physical* address */
+ regs->ACR = addr;
+
+ if (dir_in) {
+ /* invalidate any cache */
+ cache_clear(addr, cmd->SCp.this_residual);
+ } else {
+ /* push any dirty cache */
+ cache_push(addr, cmd->SCp.this_residual);
+ }
+
+ /* start DMA */
+ mb(); /* make sure setup is completed */
+ regs->ST_DMA = 1;
+ mb(); /* make sure DMA has started before next IO */
+
+ /* return success */
+ return 0;
+}
+
+static void dma_stop(struct Scsi_Host *instance, struct scsi_cmnd *SCpnt,
+ int status)
+{
+ struct a3000_hostdata *hdata = shost_priv(instance);
+ struct WD33C93_hostdata *wh = &hdata->wh;
+ struct a3000_scsiregs *regs = hdata->regs;
+
+ /* disable SCSI interrupts */
+ unsigned short cntr = CNTR_PDMD;
+
+ if (!wh->dma_dir)
+ cntr |= CNTR_DDIR;
+
+ regs->CNTR = cntr;
+ mb(); /* make sure CNTR is updated before next IO */
+
+ /* flush if we were reading */
+ if (wh->dma_dir) {
+ regs->FLUSH = 1;
+ mb(); /* don't allow prefetch */
+ while (!(regs->ISTR & ISTR_FE_FLG))
+ barrier();
+ mb(); /* no IO until FLUSH is done */
+ }
+
+ /* clear a possible interrupt */
+ /* I think that this CINT is only necessary if you are
+ * using the terminal count features. HM 7 Mar 1994
+ */
+ regs->CINT = 1;
+
+ /* stop DMA */
+ regs->SP_DMA = 1;
+ mb(); /* make sure DMA is stopped before next IO */
+
+ /* restore the CONTROL bits (minus the direction flag) */
+ regs->CNTR = CNTR_PDMD | CNTR_INTEN;
+ mb(); /* make sure CNTR is updated before next IO */
+
+ /* copy from a bounce buffer, if necessary */
+ if (status && wh->dma_bounce_buffer) {
+ if (SCpnt) {
+ if (wh->dma_dir && SCpnt)
+ memcpy(SCpnt->SCp.ptr, wh->dma_bounce_buffer,
+ SCpnt->SCp.this_residual);
+ kfree(wh->dma_bounce_buffer);
+ wh->dma_bounce_buffer = NULL;
+ wh->dma_bounce_len = 0;
+ } else {
+ kfree(wh->dma_bounce_buffer);
+ wh->dma_bounce_buffer = NULL;
+ wh->dma_bounce_len = 0;
+ }
+ }
+}
+
+static int a3000_bus_reset(struct scsi_cmnd *cmd)
+{
+ struct Scsi_Host *instance = cmd->device->host;
+
+ /* FIXME perform bus-specific reset */
+
+ /* FIXME 2: kill this entire function, which should
+ cause mid-layer to call wd33c93_host_reset anyway? */
+
+ spin_lock_irq(instance->host_lock);
+ wd33c93_host_reset(cmd);
+ spin_unlock_irq(instance->host_lock);
+
+ return SUCCESS;
+}
+
+static struct scsi_host_template amiga_a3000_scsi_template = {
+ .module = THIS_MODULE,
+ .name = "Amiga 3000 built-in SCSI",
+ .show_info = wd33c93_show_info,
+ .write_info = wd33c93_write_info,
+ .proc_name = "A3000",
+ .queuecommand = wd33c93_queuecommand,
+ .eh_abort_handler = wd33c93_abort,
+ .eh_bus_reset_handler = a3000_bus_reset,
+ .eh_host_reset_handler = wd33c93_host_reset,
+ .can_queue = CAN_QUEUE,
+ .this_id = 7,
+ .sg_tablesize = SG_ALL,
+ .cmd_per_lun = CMD_PER_LUN,
+ .use_clustering = ENABLE_CLUSTERING
+};
+
+static int __init amiga_a3000_scsi_probe(struct platform_device *pdev)
+{
+ struct resource *res;
+ struct Scsi_Host *instance;
+ int error;
+ struct a3000_scsiregs *regs;
+ wd33c93_regs wdregs;
+ struct a3000_hostdata *hdata;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!res)
+ return -ENODEV;
+
+ if (!request_mem_region(res->start, resource_size(res), "wd33c93"))
+ return -EBUSY;
+
+ instance = scsi_host_alloc(&amiga_a3000_scsi_template,
+ sizeof(struct a3000_hostdata));
+ if (!instance) {
+ error = -ENOMEM;
+ goto fail_alloc;
+ }
+
+ instance->irq = IRQ_AMIGA_PORTS;
+
+ regs = ZTWO_VADDR(res->start);
+ regs->DAWR = DAWR_A3000;
+
+ wdregs.SASR = &regs->SASR;
+ wdregs.SCMD = &regs->SCMD;
+
+ hdata = shost_priv(instance);
+ hdata->wh.no_sync = 0xff;
+ hdata->wh.fast = 0;
+ hdata->wh.dma_mode = CTRL_DMA;
+ hdata->regs = regs;
+
+ wd33c93_init(instance, wdregs, dma_setup, dma_stop, WD33C93_FS_12_15);
+ error = request_irq(IRQ_AMIGA_PORTS, a3000_intr, IRQF_SHARED,
+ "A3000 SCSI", instance);
+ if (error)
+ goto fail_irq;
+
+ regs->CNTR = CNTR_PDMD | CNTR_INTEN;
+
+ error = scsi_add_host(instance, NULL);
+ if (error)
+ goto fail_host;
+
+ platform_set_drvdata(pdev, instance);
+
+ scsi_scan_host(instance);
+ return 0;
+
+fail_host:
+ free_irq(IRQ_AMIGA_PORTS, instance);
+fail_irq:
+ scsi_host_put(instance);
+fail_alloc:
+ release_mem_region(res->start, resource_size(res));
+ return error;
+}
+
+static int __exit amiga_a3000_scsi_remove(struct platform_device *pdev)
+{
+ struct Scsi_Host *instance = platform_get_drvdata(pdev);
+ struct a3000_hostdata *hdata = shost_priv(instance);
+ struct resource *res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+
+ hdata->regs->CNTR = 0;
+ scsi_remove_host(instance);
+ free_irq(IRQ_AMIGA_PORTS, instance);
+ scsi_host_put(instance);
+ release_mem_region(res->start, resource_size(res));
+ return 0;
+}
+
+static struct platform_driver amiga_a3000_scsi_driver = {
+ .remove = __exit_p(amiga_a3000_scsi_remove),
+ .driver = {
+ .name = "amiga-a3000-scsi",
+ },
+};
+
+module_platform_driver_probe(amiga_a3000_scsi_driver, amiga_a3000_scsi_probe);
+
+MODULE_DESCRIPTION("Amiga 3000 built-in SCSI");
+MODULE_LICENSE("GPL");
+MODULE_ALIAS("platform:amiga-a3000-scsi");
diff --git a/drivers/scsi/a3000.h b/drivers/scsi/a3000.h
new file mode 100644
index 000000000..49db4a335
--- /dev/null
+++ b/drivers/scsi/a3000.h
@@ -0,0 +1,72 @@
+#ifndef A3000_H
+#define A3000_H
+
+/* $Id: a3000.h,v 1.4 1997/01/19 23:07:10 davem Exp $
+ *
+ * Header file for the Amiga 3000 built-in SCSI controller for Linux
+ *
+ * Written and (C) 1993, Hamish Macdonald, see a3000.c for more info
+ *
+ */
+
+#include <linux/types.h>
+
+#ifndef CMD_PER_LUN
+#define CMD_PER_LUN 2
+#endif
+
+#ifndef CAN_QUEUE
+#define CAN_QUEUE 16
+#endif
+
+/*
+ * if the transfer address ANDed with this results in a non-zero
+ * result, then we can't use DMA.
+ */
+#define A3000_XFER_MASK (0x00000003)
+
+struct a3000_scsiregs {
+ unsigned char pad1[2];
+ volatile unsigned short DAWR;
+ volatile unsigned int WTC;
+ unsigned char pad2[2];
+ volatile unsigned short CNTR;
+ volatile unsigned long ACR;
+ unsigned char pad3[2];
+ volatile unsigned short ST_DMA;
+ unsigned char pad4[2];
+ volatile unsigned short FLUSH;
+ unsigned char pad5[2];
+ volatile unsigned short CINT;
+ unsigned char pad6[2];
+ volatile unsigned short ISTR;
+ unsigned char pad7[30];
+ volatile unsigned short SP_DMA;
+ unsigned char pad8;
+ volatile unsigned char SASR;
+ unsigned char pad9;
+ volatile unsigned char SCMD;
+};
+
+#define DAWR_A3000 (3)
+
+/* CNTR bits. */
+#define CNTR_TCEN (1<<5)
+#define CNTR_PREST (1<<4)
+#define CNTR_PDMD (1<<3)
+#define CNTR_INTEN (1<<2)
+#define CNTR_DDIR (1<<1)
+#define CNTR_IO_DX (1<<0)
+
+/* ISTR bits. */
+#define ISTR_INTX (1<<8)
+#define ISTR_INT_F (1<<7)
+#define ISTR_INTS (1<<6)
+#define ISTR_E_INT (1<<5)
+#define ISTR_INT_P (1<<4)
+#define ISTR_UE_INT (1<<3)
+#define ISTR_OE_INT (1<<2)
+#define ISTR_FF_FLG (1<<1)
+#define ISTR_FE_FLG (1<<0)
+
+#endif /* A3000_H */
diff --git a/drivers/scsi/a4000t.c b/drivers/scsi/a4000t.c
new file mode 100644
index 000000000..66c573093
--- /dev/null
+++ b/drivers/scsi/a4000t.c
@@ -0,0 +1,124 @@
+/*
+ * Detection routine for the NCR53c710 based Amiga SCSI Controllers for Linux.
+ * Amiga Technologies A4000T SCSI controller.
+ *
+ * Written 1997 by Alan Hourihane <alanh@fairlite.demon.co.uk>
+ * plus modifications of the 53c7xx.c driver to support the Amiga.
+ *
+ * Rewritten to use 53c700.c by Kars de Jong <jongk@linux-m68k.org>
+ */
+
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/slab.h>
+#include <asm/amigahw.h>
+#include <asm/amigaints.h>
+#include <scsi/scsi_host.h>
+#include <scsi/scsi_transport_spi.h>
+
+#include "53c700.h"
+
+
+static struct scsi_host_template a4000t_scsi_driver_template = {
+ .name = "A4000T builtin SCSI",
+ .proc_name = "A4000t",
+ .this_id = 7,
+ .module = THIS_MODULE,
+};
+
+
+#define A4000T_SCSI_OFFSET 0x40
+
+static int __init amiga_a4000t_scsi_probe(struct platform_device *pdev)
+{
+ struct resource *res;
+ phys_addr_t scsi_addr;
+ struct NCR_700_Host_Parameters *hostdata;
+ struct Scsi_Host *host;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!res)
+ return -ENODEV;
+
+ if (!request_mem_region(res->start, resource_size(res),
+ "A4000T builtin SCSI"))
+ return -EBUSY;
+
+ hostdata = kzalloc(sizeof(struct NCR_700_Host_Parameters),
+ GFP_KERNEL);
+ if (!hostdata) {
+ dev_err(&pdev->dev, "Failed to allocate host data\n");
+ goto out_release;
+ }
+
+ scsi_addr = res->start + A4000T_SCSI_OFFSET;
+
+ /* Fill in the required pieces of hostdata */
+ hostdata->base = ZTWO_VADDR(scsi_addr);
+ hostdata->clock = 50;
+ hostdata->chip710 = 1;
+ hostdata->dmode_extra = DMODE_FC2;
+ hostdata->dcntl_extra = EA_710;
+
+ /* and register the chip */
+ host = NCR_700_detect(&a4000t_scsi_driver_template, hostdata,
+ &pdev->dev);
+ if (!host) {
+ dev_err(&pdev->dev,
+ "No host detected; board configuration problem?\n");
+ goto out_free;
+ }
+
+ host->this_id = 7;
+ host->base = scsi_addr;
+ host->irq = IRQ_AMIGA_PORTS;
+
+ if (request_irq(host->irq, NCR_700_intr, IRQF_SHARED, "a4000t-scsi",
+ host)) {
+ dev_err(&pdev->dev, "request_irq failed\n");
+ goto out_put_host;
+ }
+
+ platform_set_drvdata(pdev, host);
+ scsi_scan_host(host);
+ return 0;
+
+ out_put_host:
+ scsi_host_put(host);
+ out_free:
+ kfree(hostdata);
+ out_release:
+ release_mem_region(res->start, resource_size(res));
+ return -ENODEV;
+}
+
+static int __exit amiga_a4000t_scsi_remove(struct platform_device *pdev)
+{
+ struct Scsi_Host *host = platform_get_drvdata(pdev);
+ struct NCR_700_Host_Parameters *hostdata = shost_priv(host);
+ struct resource *res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+
+ scsi_remove_host(host);
+ NCR_700_release(host);
+ kfree(hostdata);
+ free_irq(host->irq, host);
+ release_mem_region(res->start, resource_size(res));
+ return 0;
+}
+
+static struct platform_driver amiga_a4000t_scsi_driver = {
+ .remove = __exit_p(amiga_a4000t_scsi_remove),
+ .driver = {
+ .name = "amiga-a4000t-scsi",
+ },
+};
+
+module_platform_driver_probe(amiga_a4000t_scsi_driver, amiga_a4000t_scsi_probe);
+
+MODULE_AUTHOR("Alan Hourihane <alanh@fairlite.demon.co.uk> / "
+ "Kars de Jong <jongk@linux-m68k.org>");
+MODULE_DESCRIPTION("Amiga A4000T NCR53C710 driver");
+MODULE_LICENSE("GPL");
+MODULE_ALIAS("platform:amiga-a4000t-scsi");
diff --git a/drivers/scsi/aacraid/Makefile b/drivers/scsi/aacraid/Makefile
new file mode 100644
index 000000000..1bd9fd18f
--- /dev/null
+++ b/drivers/scsi/aacraid/Makefile
@@ -0,0 +1,8 @@
+# Adaptec aacraid
+
+obj-$(CONFIG_SCSI_AACRAID) := aacraid.o
+
+aacraid-objs := linit.o aachba.o commctrl.o comminit.o commsup.o \
+ dpcsup.o rx.o sa.o rkt.o nark.o src.o
+
+ccflags-y := -Idrivers/scsi
diff --git a/drivers/scsi/aacraid/TODO b/drivers/scsi/aacraid/TODO
new file mode 100644
index 000000000..78dc863ef
--- /dev/null
+++ b/drivers/scsi/aacraid/TODO
@@ -0,0 +1,3 @@
+o Testing
+o More testing
+o I/O size increase
diff --git a/drivers/scsi/aacraid/aachba.c b/drivers/scsi/aacraid/aachba.c
new file mode 100644
index 000000000..9b3dd6ef6
--- /dev/null
+++ b/drivers/scsi/aacraid/aachba.c
@@ -0,0 +1,3501 @@
+/*
+ * Adaptec AAC series RAID controller driver
+ * (c) Copyright 2001 Red Hat Inc.
+ *
+ * based on the old aacraid driver that is..
+ * Adaptec aacraid device driver for Linux.
+ *
+ * Copyright (c) 2000-2010 Adaptec, Inc.
+ * 2010 PMC-Sierra, Inc. (aacraid@pmc-sierra.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2, or (at your option)
+ * any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; see the file COPYING. If not, write to
+ * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
+ *
+ */
+
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/types.h>
+#include <linux/pci.h>
+#include <linux/spinlock.h>
+#include <linux/slab.h>
+#include <linux/completion.h>
+#include <linux/blkdev.h>
+#include <asm/uaccess.h>
+#include <linux/highmem.h> /* For flush_kernel_dcache_page */
+#include <linux/module.h>
+
+#include <scsi/scsi.h>
+#include <scsi/scsi_cmnd.h>
+#include <scsi/scsi_device.h>
+#include <scsi/scsi_host.h>
+
+#include "aacraid.h"
+
+/* values for inqd_pdt: Peripheral device type in plain English */
+#define INQD_PDT_DA 0x00 /* Direct-access (DISK) device */
+#define INQD_PDT_PROC 0x03 /* Processor device */
+#define INQD_PDT_CHNGR 0x08 /* Changer (jukebox, scsi2) */
+#define INQD_PDT_COMM 0x09 /* Communication device (scsi2) */
+#define INQD_PDT_NOLUN2 0x1f /* Unknown Device (scsi2) */
+#define INQD_PDT_NOLUN 0x7f /* Logical Unit Not Present */
+
+#define INQD_PDT_DMASK 0x1F /* Peripheral Device Type Mask */
+#define INQD_PDT_QMASK 0xE0 /* Peripheral Device Qualifer Mask */
+
+/*
+ * Sense codes
+ */
+
+#define SENCODE_NO_SENSE 0x00
+#define SENCODE_END_OF_DATA 0x00
+#define SENCODE_BECOMING_READY 0x04
+#define SENCODE_INIT_CMD_REQUIRED 0x04
+#define SENCODE_PARAM_LIST_LENGTH_ERROR 0x1A
+#define SENCODE_INVALID_COMMAND 0x20
+#define SENCODE_LBA_OUT_OF_RANGE 0x21
+#define SENCODE_INVALID_CDB_FIELD 0x24
+#define SENCODE_LUN_NOT_SUPPORTED 0x25
+#define SENCODE_INVALID_PARAM_FIELD 0x26
+#define SENCODE_PARAM_NOT_SUPPORTED 0x26
+#define SENCODE_PARAM_VALUE_INVALID 0x26
+#define SENCODE_RESET_OCCURRED 0x29
+#define SENCODE_LUN_NOT_SELF_CONFIGURED_YET 0x3E
+#define SENCODE_INQUIRY_DATA_CHANGED 0x3F
+#define SENCODE_SAVING_PARAMS_NOT_SUPPORTED 0x39
+#define SENCODE_DIAGNOSTIC_FAILURE 0x40
+#define SENCODE_INTERNAL_TARGET_FAILURE 0x44
+#define SENCODE_INVALID_MESSAGE_ERROR 0x49
+#define SENCODE_LUN_FAILED_SELF_CONFIG 0x4c
+#define SENCODE_OVERLAPPED_COMMAND 0x4E
+
+/*
+ * Additional sense codes
+ */
+
+#define ASENCODE_NO_SENSE 0x00
+#define ASENCODE_END_OF_DATA 0x05
+#define ASENCODE_BECOMING_READY 0x01
+#define ASENCODE_INIT_CMD_REQUIRED 0x02
+#define ASENCODE_PARAM_LIST_LENGTH_ERROR 0x00
+#define ASENCODE_INVALID_COMMAND 0x00
+#define ASENCODE_LBA_OUT_OF_RANGE 0x00
+#define ASENCODE_INVALID_CDB_FIELD 0x00
+#define ASENCODE_LUN_NOT_SUPPORTED 0x00
+#define ASENCODE_INVALID_PARAM_FIELD 0x00
+#define ASENCODE_PARAM_NOT_SUPPORTED 0x01
+#define ASENCODE_PARAM_VALUE_INVALID 0x02
+#define ASENCODE_RESET_OCCURRED 0x00
+#define ASENCODE_LUN_NOT_SELF_CONFIGURED_YET 0x00
+#define ASENCODE_INQUIRY_DATA_CHANGED 0x03
+#define ASENCODE_SAVING_PARAMS_NOT_SUPPORTED 0x00
+#define ASENCODE_DIAGNOSTIC_FAILURE 0x80
+#define ASENCODE_INTERNAL_TARGET_FAILURE 0x00
+#define ASENCODE_INVALID_MESSAGE_ERROR 0x00
+#define ASENCODE_LUN_FAILED_SELF_CONFIG 0x00
+#define ASENCODE_OVERLAPPED_COMMAND 0x00
+
+#define BYTE0(x) (unsigned char)(x)
+#define BYTE1(x) (unsigned char)((x) >> 8)
+#define BYTE2(x) (unsigned char)((x) >> 16)
+#define BYTE3(x) (unsigned char)((x) >> 24)
+
+/* MODE_SENSE data format */
+typedef struct {
+ struct {
+ u8 data_length;
+ u8 med_type;
+ u8 dev_par;
+ u8 bd_length;
+ } __attribute__((packed)) hd;
+ struct {
+ u8 dens_code;
+ u8 block_count[3];
+ u8 reserved;
+ u8 block_length[3];
+ } __attribute__((packed)) bd;
+ u8 mpc_buf[3];
+} __attribute__((packed)) aac_modep_data;
+
+/* MODE_SENSE_10 data format */
+typedef struct {
+ struct {
+ u8 data_length[2];
+ u8 med_type;
+ u8 dev_par;
+ u8 rsrvd[2];
+ u8 bd_length[2];
+ } __attribute__((packed)) hd;
+ struct {
+ u8 dens_code;
+ u8 block_count[3];
+ u8 reserved;
+ u8 block_length[3];
+ } __attribute__((packed)) bd;
+ u8 mpc_buf[3];
+} __attribute__((packed)) aac_modep10_data;
+
+/*------------------------------------------------------------------------------
+ * S T R U C T S / T Y P E D E F S
+ *----------------------------------------------------------------------------*/
+/* SCSI inquiry data */
+struct inquiry_data {
+ u8 inqd_pdt; /* Peripheral qualifier | Peripheral Device Type */
+ u8 inqd_dtq; /* RMB | Device Type Qualifier */
+ u8 inqd_ver; /* ISO version | ECMA version | ANSI-approved version */
+ u8 inqd_rdf; /* AENC | TrmIOP | Response data format */
+ u8 inqd_len; /* Additional length (n-4) */
+ u8 inqd_pad1[2];/* Reserved - must be zero */
+ u8 inqd_pad2; /* RelAdr | WBus32 | WBus16 | Sync | Linked |Reserved| CmdQue | SftRe */
+ u8 inqd_vid[8]; /* Vendor ID */
+ u8 inqd_pid[16];/* Product ID */
+ u8 inqd_prl[4]; /* Product Revision Level */
+};
+
+/* Added for VPD 0x83 */
+typedef struct {
+ u8 CodeSet:4; /* VPD_CODE_SET */
+ u8 Reserved:4;
+ u8 IdentifierType:4; /* VPD_IDENTIFIER_TYPE */
+ u8 Reserved2:4;
+ u8 Reserved3;
+ u8 IdentifierLength;
+ u8 VendId[8];
+ u8 ProductId[16];
+ u8 SerialNumber[8]; /* SN in ASCII */
+
+} TVPD_ID_Descriptor_Type_1;
+
+typedef struct {
+ u8 CodeSet:4; /* VPD_CODE_SET */
+ u8 Reserved:4;
+ u8 IdentifierType:4; /* VPD_IDENTIFIER_TYPE */
+ u8 Reserved2:4;
+ u8 Reserved3;
+ u8 IdentifierLength;
+ struct TEU64Id {
+ u32 Serial;
+ /* The serial number supposed to be 40 bits,
+ * bit we only support 32, so make the last byte zero. */
+ u8 Reserved;
+ u8 VendId[3];
+ } EU64Id;
+
+} TVPD_ID_Descriptor_Type_2;
+
+typedef struct {
+ u8 DeviceType:5;
+ u8 DeviceTypeQualifier:3;
+ u8 PageCode;
+ u8 Reserved;
+ u8 PageLength;
+ TVPD_ID_Descriptor_Type_1 IdDescriptorType1;
+ TVPD_ID_Descriptor_Type_2 IdDescriptorType2;
+
+} TVPD_Page83;
+
+/*
+ * M O D U L E G L O B A L S
+ */
+
+static long aac_build_sg(struct scsi_cmnd *scsicmd, struct sgmap *sgmap);
+static long aac_build_sg64(struct scsi_cmnd *scsicmd, struct sgmap64 *psg);
+static long aac_build_sgraw(struct scsi_cmnd *scsicmd, struct sgmapraw *psg);
+static long aac_build_sgraw2(struct scsi_cmnd *scsicmd,
+ struct aac_raw_io2 *rio2, int sg_max);
+static int aac_convert_sgraw2(struct aac_raw_io2 *rio2,
+ int pages, int nseg, int nseg_new);
+static int aac_send_srb_fib(struct scsi_cmnd* scsicmd);
+#ifdef AAC_DETAILED_STATUS_INFO
+static char *aac_get_status_string(u32 status);
+#endif
+
+/*
+ * Non dasd selection is handled entirely in aachba now
+ */
+
+static int nondasd = -1;
+static int aac_cache = 2; /* WCE=0 to avoid performance problems */
+static int dacmode = -1;
+int aac_msi;
+int aac_commit = -1;
+int startup_timeout = 180;
+int aif_timeout = 120;
+int aac_sync_mode; /* Only Sync. transfer - disabled */
+int aac_convert_sgl = 1; /* convert non-conformable s/g list - enabled */
+
+module_param(aac_sync_mode, int, S_IRUGO|S_IWUSR);
+MODULE_PARM_DESC(aac_sync_mode, "Force sync. transfer mode"
+ " 0=off, 1=on");
+module_param(aac_convert_sgl, int, S_IRUGO|S_IWUSR);
+MODULE_PARM_DESC(aac_convert_sgl, "Convert non-conformable s/g list"
+ " 0=off, 1=on");
+module_param(nondasd, int, S_IRUGO|S_IWUSR);
+MODULE_PARM_DESC(nondasd, "Control scanning of hba for nondasd devices."
+ " 0=off, 1=on");
+module_param_named(cache, aac_cache, int, S_IRUGO|S_IWUSR);
+MODULE_PARM_DESC(cache, "Disable Queue Flush commands:\n"
+ "\tbit 0 - Disable FUA in WRITE SCSI commands\n"
+ "\tbit 1 - Disable SYNCHRONIZE_CACHE SCSI command\n"
+ "\tbit 2 - Disable only if Battery is protecting Cache");
+module_param(dacmode, int, S_IRUGO|S_IWUSR);
+MODULE_PARM_DESC(dacmode, "Control whether dma addressing is using 64 bit DAC."
+ " 0=off, 1=on");
+module_param_named(commit, aac_commit, int, S_IRUGO|S_IWUSR);
+MODULE_PARM_DESC(commit, "Control whether a COMMIT_CONFIG is issued to the"
+ " adapter for foreign arrays.\n"
+ "This is typically needed in systems that do not have a BIOS."
+ " 0=off, 1=on");
+module_param_named(msi, aac_msi, int, S_IRUGO|S_IWUSR);
+MODULE_PARM_DESC(msi, "IRQ handling."
+ " 0=PIC(default), 1=MSI, 2=MSI-X(unsupported, uses MSI)");
+module_param(startup_timeout, int, S_IRUGO|S_IWUSR);
+MODULE_PARM_DESC(startup_timeout, "The duration of time in seconds to wait for"
+ " adapter to have it's kernel up and\n"
+ "running. This is typically adjusted for large systems that do not"
+ " have a BIOS.");
+module_param(aif_timeout, int, S_IRUGO|S_IWUSR);
+MODULE_PARM_DESC(aif_timeout, "The duration of time in seconds to wait for"
+ " applications to pick up AIFs before\n"
+ "deregistering them. This is typically adjusted for heavily burdened"
+ " systems.");
+
+int numacb = -1;
+module_param(numacb, int, S_IRUGO|S_IWUSR);
+MODULE_PARM_DESC(numacb, "Request a limit to the number of adapter control"
+ " blocks (FIB) allocated. Valid values are 512 and down. Default is"
+ " to use suggestion from Firmware.");
+
+int acbsize = -1;
+module_param(acbsize, int, S_IRUGO|S_IWUSR);
+MODULE_PARM_DESC(acbsize, "Request a specific adapter control block (FIB)"
+ " size. Valid values are 512, 2048, 4096 and 8192. Default is to use"
+ " suggestion from Firmware.");
+
+int update_interval = 30 * 60;
+module_param(update_interval, int, S_IRUGO|S_IWUSR);
+MODULE_PARM_DESC(update_interval, "Interval in seconds between time sync"
+ " updates issued to adapter.");
+
+int check_interval = 24 * 60 * 60;
+module_param(check_interval, int, S_IRUGO|S_IWUSR);
+MODULE_PARM_DESC(check_interval, "Interval in seconds between adapter health"
+ " checks.");
+
+int aac_check_reset = 1;
+module_param_named(check_reset, aac_check_reset, int, S_IRUGO|S_IWUSR);
+MODULE_PARM_DESC(check_reset, "If adapter fails health check, reset the"
+ " adapter. a value of -1 forces the reset to adapters programmed to"
+ " ignore it.");
+
+int expose_physicals = -1;
+module_param(expose_physicals, int, S_IRUGO|S_IWUSR);
+MODULE_PARM_DESC(expose_physicals, "Expose physical components of the arrays."
+ " -1=protect 0=off, 1=on");
+
+int aac_reset_devices;
+module_param_named(reset_devices, aac_reset_devices, int, S_IRUGO|S_IWUSR);
+MODULE_PARM_DESC(reset_devices, "Force an adapter reset at initialization.");
+
+int aac_wwn = 1;
+module_param_named(wwn, aac_wwn, int, S_IRUGO|S_IWUSR);
+MODULE_PARM_DESC(wwn, "Select a WWN type for the arrays:\n"
+ "\t0 - Disable\n"
+ "\t1 - Array Meta Data Signature (default)\n"
+ "\t2 - Adapter Serial Number");
+
+
+static inline int aac_valid_context(struct scsi_cmnd *scsicmd,
+ struct fib *fibptr) {
+ struct scsi_device *device;
+
+ if (unlikely(!scsicmd || !scsicmd->scsi_done)) {
+ dprintk((KERN_WARNING "aac_valid_context: scsi command corrupt\n"));
+ aac_fib_complete(fibptr);
+ aac_fib_free(fibptr);
+ return 0;
+ }
+ scsicmd->SCp.phase = AAC_OWNER_MIDLEVEL;
+ device = scsicmd->device;
+ if (unlikely(!device || !scsi_device_online(device))) {
+ dprintk((KERN_WARNING "aac_valid_context: scsi device corrupt\n"));
+ aac_fib_complete(fibptr);
+ aac_fib_free(fibptr);
+ return 0;
+ }
+ return 1;
+}
+
+/**
+ * aac_get_config_status - check the adapter configuration
+ * @common: adapter to query
+ *
+ * Query config status, and commit the configuration if needed.
+ */
+int aac_get_config_status(struct aac_dev *dev, int commit_flag)
+{
+ int status = 0;
+ struct fib * fibptr;
+
+ if (!(fibptr = aac_fib_alloc(dev)))
+ return -ENOMEM;
+
+ aac_fib_init(fibptr);
+ {
+ struct aac_get_config_status *dinfo;
+ dinfo = (struct aac_get_config_status *) fib_data(fibptr);
+
+ dinfo->command = cpu_to_le32(VM_ContainerConfig);
+ dinfo->type = cpu_to_le32(CT_GET_CONFIG_STATUS);
+ dinfo->count = cpu_to_le32(sizeof(((struct aac_get_config_status_resp *)NULL)->data));
+ }
+
+ status = aac_fib_send(ContainerCommand,
+ fibptr,
+ sizeof (struct aac_get_config_status),
+ FsaNormal,
+ 1, 1,
+ NULL, NULL);
+ if (status < 0) {
+ printk(KERN_WARNING "aac_get_config_status: SendFIB failed.\n");
+ } else {
+ struct aac_get_config_status_resp *reply
+ = (struct aac_get_config_status_resp *) fib_data(fibptr);
+ dprintk((KERN_WARNING
+ "aac_get_config_status: response=%d status=%d action=%d\n",
+ le32_to_cpu(reply->response),
+ le32_to_cpu(reply->status),
+ le32_to_cpu(reply->data.action)));
+ if ((le32_to_cpu(reply->response) != ST_OK) ||
+ (le32_to_cpu(reply->status) != CT_OK) ||
+ (le32_to_cpu(reply->data.action) > CFACT_PAUSE)) {
+ printk(KERN_WARNING "aac_get_config_status: Will not issue the Commit Configuration\n");
+ status = -EINVAL;
+ }
+ }
+ /* Do not set XferState to zero unless receives a response from F/W */
+ if (status >= 0)
+ aac_fib_complete(fibptr);
+
+ /* Send a CT_COMMIT_CONFIG to enable discovery of devices */
+ if (status >= 0) {
+ if ((aac_commit == 1) || commit_flag) {
+ struct aac_commit_config * dinfo;
+ aac_fib_init(fibptr);
+ dinfo = (struct aac_commit_config *) fib_data(fibptr);
+
+ dinfo->command = cpu_to_le32(VM_ContainerConfig);
+ dinfo->type = cpu_to_le32(CT_COMMIT_CONFIG);
+
+ status = aac_fib_send(ContainerCommand,
+ fibptr,
+ sizeof (struct aac_commit_config),
+ FsaNormal,
+ 1, 1,
+ NULL, NULL);
+ /* Do not set XferState to zero unless
+ * receives a response from F/W */
+ if (status >= 0)
+ aac_fib_complete(fibptr);
+ } else if (aac_commit == 0) {
+ printk(KERN_WARNING
+ "aac_get_config_status: Foreign device configurations are being ignored\n");
+ }
+ }
+ /* FIB should be freed only after getting the response from the F/W */
+ if (status != -ERESTARTSYS)
+ aac_fib_free(fibptr);
+ return status;
+}
+
+static void aac_expose_phy_device(struct scsi_cmnd *scsicmd)
+{
+ char inq_data;
+ scsi_sg_copy_to_buffer(scsicmd, &inq_data, sizeof(inq_data));
+ if ((inq_data & 0x20) && (inq_data & 0x1f) == TYPE_DISK) {
+ inq_data &= 0xdf;
+ scsi_sg_copy_from_buffer(scsicmd, &inq_data, sizeof(inq_data));
+ }
+}
+
+/**
+ * aac_get_containers - list containers
+ * @common: adapter to probe
+ *
+ * Make a list of all containers on this controller
+ */
+int aac_get_containers(struct aac_dev *dev)
+{
+ struct fsa_dev_info *fsa_dev_ptr;
+ u32 index;
+ int status = 0;
+ struct fib * fibptr;
+ struct aac_get_container_count *dinfo;
+ struct aac_get_container_count_resp *dresp;
+ int maximum_num_containers = MAXIMUM_NUM_CONTAINERS;
+
+ if (!(fibptr = aac_fib_alloc(dev)))
+ return -ENOMEM;
+
+ aac_fib_init(fibptr);
+ dinfo = (struct aac_get_container_count *) fib_data(fibptr);
+ dinfo->command = cpu_to_le32(VM_ContainerConfig);
+ dinfo->type = cpu_to_le32(CT_GET_CONTAINER_COUNT);
+
+ status = aac_fib_send(ContainerCommand,
+ fibptr,
+ sizeof (struct aac_get_container_count),
+ FsaNormal,
+ 1, 1,
+ NULL, NULL);
+ if (status >= 0) {
+ dresp = (struct aac_get_container_count_resp *)fib_data(fibptr);
+ maximum_num_containers = le32_to_cpu(dresp->ContainerSwitchEntries);
+ if (fibptr->dev->supplement_adapter_info.SupportedOptions2 &
+ AAC_OPTION_SUPPORTED_240_VOLUMES) {
+ maximum_num_containers =
+ le32_to_cpu(dresp->MaxSimpleVolumes);
+ }
+ aac_fib_complete(fibptr);
+ }
+ /* FIB should be freed only after getting the response from the F/W */
+ if (status != -ERESTARTSYS)
+ aac_fib_free(fibptr);
+
+ if (maximum_num_containers < MAXIMUM_NUM_CONTAINERS)
+ maximum_num_containers = MAXIMUM_NUM_CONTAINERS;
+ fsa_dev_ptr = kzalloc(sizeof(*fsa_dev_ptr) * maximum_num_containers,
+ GFP_KERNEL);
+ if (!fsa_dev_ptr)
+ return -ENOMEM;
+
+ dev->fsa_dev = fsa_dev_ptr;
+ dev->maximum_num_containers = maximum_num_containers;
+
+ for (index = 0; index < dev->maximum_num_containers; ) {
+ fsa_dev_ptr[index].devname[0] = '\0';
+
+ status = aac_probe_container(dev, index);
+
+ if (status < 0) {
+ printk(KERN_WARNING "aac_get_containers: SendFIB failed.\n");
+ break;
+ }
+
+ /*
+ * If there are no more containers, then stop asking.
+ */
+ if (++index >= status)
+ break;
+ }
+ return status;
+}
+
+static void get_container_name_callback(void *context, struct fib * fibptr)
+{
+ struct aac_get_name_resp * get_name_reply;
+ struct scsi_cmnd * scsicmd;
+
+ scsicmd = (struct scsi_cmnd *) context;
+
+ if (!aac_valid_context(scsicmd, fibptr))
+ return;
+
+ dprintk((KERN_DEBUG "get_container_name_callback[cpu %d]: t = %ld.\n", smp_processor_id(), jiffies));
+ BUG_ON(fibptr == NULL);
+
+ get_name_reply = (struct aac_get_name_resp *) fib_data(fibptr);
+ /* Failure is irrelevant, using default value instead */
+ if ((le32_to_cpu(get_name_reply->status) == CT_OK)
+ && (get_name_reply->data[0] != '\0')) {
+ char *sp = get_name_reply->data;
+ sp[sizeof(((struct aac_get_name_resp *)NULL)->data)] = '\0';
+ while (*sp == ' ')
+ ++sp;
+ if (*sp) {
+ struct inquiry_data inq;
+ char d[sizeof(((struct inquiry_data *)NULL)->inqd_pid)];
+ int count = sizeof(d);
+ char *dp = d;
+ do {
+ *dp++ = (*sp) ? *sp++ : ' ';
+ } while (--count > 0);
+
+ scsi_sg_copy_to_buffer(scsicmd, &inq, sizeof(inq));
+ memcpy(inq.inqd_pid, d, sizeof(d));
+ scsi_sg_copy_from_buffer(scsicmd, &inq, sizeof(inq));
+ }
+ }
+
+ scsicmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8 | SAM_STAT_GOOD;
+
+ aac_fib_complete(fibptr);
+ aac_fib_free(fibptr);
+ scsicmd->scsi_done(scsicmd);
+}
+
+/**
+ * aac_get_container_name - get container name, none blocking.
+ */
+static int aac_get_container_name(struct scsi_cmnd * scsicmd)
+{
+ int status;
+ struct aac_get_name *dinfo;
+ struct fib * cmd_fibcontext;
+ struct aac_dev * dev;
+
+ dev = (struct aac_dev *)scsicmd->device->host->hostdata;
+
+ if (!(cmd_fibcontext = aac_fib_alloc(dev)))
+ return -ENOMEM;
+
+ aac_fib_init(cmd_fibcontext);
+ dinfo = (struct aac_get_name *) fib_data(cmd_fibcontext);
+
+ dinfo->command = cpu_to_le32(VM_ContainerConfig);
+ dinfo->type = cpu_to_le32(CT_READ_NAME);
+ dinfo->cid = cpu_to_le32(scmd_id(scsicmd));
+ dinfo->count = cpu_to_le32(sizeof(((struct aac_get_name_resp *)NULL)->data));
+
+ status = aac_fib_send(ContainerCommand,
+ cmd_fibcontext,
+ sizeof (struct aac_get_name),
+ FsaNormal,
+ 0, 1,
+ (fib_callback)get_container_name_callback,
+ (void *) scsicmd);
+
+ /*
+ * Check that the command queued to the controller
+ */
+ if (status == -EINPROGRESS) {
+ scsicmd->SCp.phase = AAC_OWNER_FIRMWARE;
+ return 0;
+ }
+
+ printk(KERN_WARNING "aac_get_container_name: aac_fib_send failed with status: %d.\n", status);
+ aac_fib_complete(cmd_fibcontext);
+ aac_fib_free(cmd_fibcontext);
+ return -1;
+}
+
+static int aac_probe_container_callback2(struct scsi_cmnd * scsicmd)
+{
+ struct fsa_dev_info *fsa_dev_ptr = ((struct aac_dev *)(scsicmd->device->host->hostdata))->fsa_dev;
+
+ if ((fsa_dev_ptr[scmd_id(scsicmd)].valid & 1))
+ return aac_scsi_cmd(scsicmd);
+
+ scsicmd->result = DID_NO_CONNECT << 16;
+ scsicmd->scsi_done(scsicmd);
+ return 0;
+}
+
+static void _aac_probe_container2(void * context, struct fib * fibptr)
+{
+ struct fsa_dev_info *fsa_dev_ptr;
+ int (*callback)(struct scsi_cmnd *);
+ struct scsi_cmnd * scsicmd = (struct scsi_cmnd *)context;
+
+
+ if (!aac_valid_context(scsicmd, fibptr))
+ return;
+
+ scsicmd->SCp.Status = 0;
+ fsa_dev_ptr = fibptr->dev->fsa_dev;
+ if (fsa_dev_ptr) {
+ struct aac_mount * dresp = (struct aac_mount *) fib_data(fibptr);
+ fsa_dev_ptr += scmd_id(scsicmd);
+
+ if ((le32_to_cpu(dresp->status) == ST_OK) &&
+ (le32_to_cpu(dresp->mnt[0].vol) != CT_NONE) &&
+ (le32_to_cpu(dresp->mnt[0].state) != FSCS_HIDDEN)) {
+ if (!(fibptr->dev->supplement_adapter_info.SupportedOptions2 &
+ AAC_OPTION_VARIABLE_BLOCK_SIZE)) {
+ dresp->mnt[0].fileinfo.bdevinfo.block_size = 0x200;
+ fsa_dev_ptr->block_size = 0x200;
+ } else {
+ fsa_dev_ptr->block_size =
+ le32_to_cpu(dresp->mnt[0].fileinfo.bdevinfo.block_size);
+ }
+ fsa_dev_ptr->valid = 1;
+ /* sense_key holds the current state of the spin-up */
+ if (dresp->mnt[0].state & cpu_to_le32(FSCS_NOT_READY))
+ fsa_dev_ptr->sense_data.sense_key = NOT_READY;
+ else if (fsa_dev_ptr->sense_data.sense_key == NOT_READY)
+ fsa_dev_ptr->sense_data.sense_key = NO_SENSE;
+ fsa_dev_ptr->type = le32_to_cpu(dresp->mnt[0].vol);
+ fsa_dev_ptr->size
+ = ((u64)le32_to_cpu(dresp->mnt[0].capacity)) +
+ (((u64)le32_to_cpu(dresp->mnt[0].capacityhigh)) << 32);
+ fsa_dev_ptr->ro = ((le32_to_cpu(dresp->mnt[0].state) & FSCS_READONLY) != 0);
+ }
+ if ((fsa_dev_ptr->valid & 1) == 0)
+ fsa_dev_ptr->valid = 0;
+ scsicmd->SCp.Status = le32_to_cpu(dresp->count);
+ }
+ aac_fib_complete(fibptr);
+ aac_fib_free(fibptr);
+ callback = (int (*)(struct scsi_cmnd *))(scsicmd->SCp.ptr);
+ scsicmd->SCp.ptr = NULL;
+ (*callback)(scsicmd);
+ return;
+}
+
+static void _aac_probe_container1(void * context, struct fib * fibptr)
+{
+ struct scsi_cmnd * scsicmd;
+ struct aac_mount * dresp;
+ struct aac_query_mount *dinfo;
+ int status;
+
+ dresp = (struct aac_mount *) fib_data(fibptr);
+ if (!(fibptr->dev->supplement_adapter_info.SupportedOptions2 &
+ AAC_OPTION_VARIABLE_BLOCK_SIZE))
+ dresp->mnt[0].capacityhigh = 0;
+ if ((le32_to_cpu(dresp->status) != ST_OK) ||
+ (le32_to_cpu(dresp->mnt[0].vol) != CT_NONE)) {
+ _aac_probe_container2(context, fibptr);
+ return;
+ }
+ scsicmd = (struct scsi_cmnd *) context;
+
+ if (!aac_valid_context(scsicmd, fibptr))
+ return;
+
+ aac_fib_init(fibptr);
+
+ dinfo = (struct aac_query_mount *)fib_data(fibptr);
+
+ if (fibptr->dev->supplement_adapter_info.SupportedOptions2 &
+ AAC_OPTION_VARIABLE_BLOCK_SIZE)
+ dinfo->command = cpu_to_le32(VM_NameServeAllBlk);
+ else
+ dinfo->command = cpu_to_le32(VM_NameServe64);
+
+ dinfo->count = cpu_to_le32(scmd_id(scsicmd));
+ dinfo->type = cpu_to_le32(FT_FILESYS);
+
+ status = aac_fib_send(ContainerCommand,
+ fibptr,
+ sizeof(struct aac_query_mount),
+ FsaNormal,
+ 0, 1,
+ _aac_probe_container2,
+ (void *) scsicmd);
+ /*
+ * Check that the command queued to the controller
+ */
+ if (status == -EINPROGRESS)
+ scsicmd->SCp.phase = AAC_OWNER_FIRMWARE;
+ else if (status < 0) {
+ /* Inherit results from VM_NameServe, if any */
+ dresp->status = cpu_to_le32(ST_OK);
+ _aac_probe_container2(context, fibptr);
+ }
+}
+
+static int _aac_probe_container(struct scsi_cmnd * scsicmd, int (*callback)(struct scsi_cmnd *))
+{
+ struct fib * fibptr;
+ int status = -ENOMEM;
+
+ if ((fibptr = aac_fib_alloc((struct aac_dev *)scsicmd->device->host->hostdata))) {
+ struct aac_query_mount *dinfo;
+
+ aac_fib_init(fibptr);
+
+ dinfo = (struct aac_query_mount *)fib_data(fibptr);
+
+ if (fibptr->dev->supplement_adapter_info.SupportedOptions2 &
+ AAC_OPTION_VARIABLE_BLOCK_SIZE)
+ dinfo->command = cpu_to_le32(VM_NameServeAllBlk);
+ else
+ dinfo->command = cpu_to_le32(VM_NameServe);
+
+ dinfo->count = cpu_to_le32(scmd_id(scsicmd));
+ dinfo->type = cpu_to_le32(FT_FILESYS);
+ scsicmd->SCp.ptr = (char *)callback;
+
+ status = aac_fib_send(ContainerCommand,
+ fibptr,
+ sizeof(struct aac_query_mount),
+ FsaNormal,
+ 0, 1,
+ _aac_probe_container1,
+ (void *) scsicmd);
+ /*
+ * Check that the command queued to the controller
+ */
+ if (status == -EINPROGRESS) {
+ scsicmd->SCp.phase = AAC_OWNER_FIRMWARE;
+ return 0;
+ }
+ if (status < 0) {
+ scsicmd->SCp.ptr = NULL;
+ aac_fib_complete(fibptr);
+ aac_fib_free(fibptr);
+ }
+ }
+ if (status < 0) {
+ struct fsa_dev_info *fsa_dev_ptr = ((struct aac_dev *)(scsicmd->device->host->hostdata))->fsa_dev;
+ if (fsa_dev_ptr) {
+ fsa_dev_ptr += scmd_id(scsicmd);
+ if ((fsa_dev_ptr->valid & 1) == 0) {
+ fsa_dev_ptr->valid = 0;
+ return (*callback)(scsicmd);
+ }
+ }
+ }
+ return status;
+}
+
+/**
+ * aac_probe_container - query a logical volume
+ * @dev: device to query
+ * @cid: container identifier
+ *
+ * Queries the controller about the given volume. The volume information
+ * is updated in the struct fsa_dev_info structure rather than returned.
+ */
+static int aac_probe_container_callback1(struct scsi_cmnd * scsicmd)
+{
+ scsicmd->device = NULL;
+ return 0;
+}
+
+int aac_probe_container(struct aac_dev *dev, int cid)
+{
+ struct scsi_cmnd *scsicmd = kmalloc(sizeof(*scsicmd), GFP_KERNEL);
+ struct scsi_device *scsidev = kmalloc(sizeof(*scsidev), GFP_KERNEL);
+ int status;
+
+ if (!scsicmd || !scsidev) {
+ kfree(scsicmd);
+ kfree(scsidev);
+ return -ENOMEM;
+ }
+ scsicmd->list.next = NULL;
+ scsicmd->scsi_done = (void (*)(struct scsi_cmnd*))aac_probe_container_callback1;
+
+ scsicmd->device = scsidev;
+ scsidev->sdev_state = 0;
+ scsidev->id = cid;
+ scsidev->host = dev->scsi_host_ptr;
+
+ if (_aac_probe_container(scsicmd, aac_probe_container_callback1) == 0)
+ while (scsicmd->device == scsidev)
+ schedule();
+ kfree(scsidev);
+ status = scsicmd->SCp.Status;
+ kfree(scsicmd);
+ return status;
+}
+
+/* Local Structure to set SCSI inquiry data strings */
+struct scsi_inq {
+ char vid[8]; /* Vendor ID */
+ char pid[16]; /* Product ID */
+ char prl[4]; /* Product Revision Level */
+};
+
+/**
+ * InqStrCopy - string merge
+ * @a: string to copy from
+ * @b: string to copy to
+ *
+ * Copy a String from one location to another
+ * without copying \0
+ */
+
+static void inqstrcpy(char *a, char *b)
+{
+
+ while (*a != (char)0)
+ *b++ = *a++;
+}
+
+static char *container_types[] = {
+ "None",
+ "Volume",
+ "Mirror",
+ "Stripe",
+ "RAID5",
+ "SSRW",
+ "SSRO",
+ "Morph",
+ "Legacy",
+ "RAID4",
+ "RAID10",
+ "RAID00",
+ "V-MIRRORS",
+ "PSEUDO R4",
+ "RAID50",
+ "RAID5D",
+ "RAID5D0",
+ "RAID1E",
+ "RAID6",
+ "RAID60",
+ "Unknown"
+};
+
+char * get_container_type(unsigned tindex)
+{
+ if (tindex >= ARRAY_SIZE(container_types))
+ tindex = ARRAY_SIZE(container_types) - 1;
+ return container_types[tindex];
+}
+
+/* Function: setinqstr
+ *
+ * Arguments: [1] pointer to void [1] int
+ *
+ * Purpose: Sets SCSI inquiry data strings for vendor, product
+ * and revision level. Allows strings to be set in platform dependent
+ * files instead of in OS dependent driver source.
+ */
+
+static void setinqstr(struct aac_dev *dev, void *data, int tindex)
+{
+ struct scsi_inq *str;
+
+ str = (struct scsi_inq *)(data); /* cast data to scsi inq block */
+ memset(str, ' ', sizeof(*str));
+
+ if (dev->supplement_adapter_info.AdapterTypeText[0]) {
+ char * cp = dev->supplement_adapter_info.AdapterTypeText;
+ int c;
+ if ((cp[0] == 'A') && (cp[1] == 'O') && (cp[2] == 'C'))
+ inqstrcpy("SMC", str->vid);
+ else {
+ c = sizeof(str->vid);
+ while (*cp && *cp != ' ' && --c)
+ ++cp;
+ c = *cp;
+ *cp = '\0';
+ inqstrcpy (dev->supplement_adapter_info.AdapterTypeText,
+ str->vid);
+ *cp = c;
+ while (*cp && *cp != ' ')
+ ++cp;
+ }
+ while (*cp == ' ')
+ ++cp;
+ /* last six chars reserved for vol type */
+ c = 0;
+ if (strlen(cp) > sizeof(str->pid)) {
+ c = cp[sizeof(str->pid)];
+ cp[sizeof(str->pid)] = '\0';
+ }
+ inqstrcpy (cp, str->pid);
+ if (c)
+ cp[sizeof(str->pid)] = c;
+ } else {
+ struct aac_driver_ident *mp = aac_get_driver_ident(dev->cardtype);
+
+ inqstrcpy (mp->vname, str->vid);
+ /* last six chars reserved for vol type */
+ inqstrcpy (mp->model, str->pid);
+ }
+
+ if (tindex < ARRAY_SIZE(container_types)){
+ char *findit = str->pid;
+
+ for ( ; *findit != ' '; findit++); /* walk till we find a space */
+ /* RAID is superfluous in the context of a RAID device */
+ if (memcmp(findit-4, "RAID", 4) == 0)
+ *(findit -= 4) = ' ';
+ if (((findit - str->pid) + strlen(container_types[tindex]))
+ < (sizeof(str->pid) + sizeof(str->prl)))
+ inqstrcpy (container_types[tindex], findit + 1);
+ }
+ inqstrcpy ("V1.0", str->prl);
+}
+
+static void get_container_serial_callback(void *context, struct fib * fibptr)
+{
+ struct aac_get_serial_resp * get_serial_reply;
+ struct scsi_cmnd * scsicmd;
+
+ BUG_ON(fibptr == NULL);
+
+ scsicmd = (struct scsi_cmnd *) context;
+ if (!aac_valid_context(scsicmd, fibptr))
+ return;
+
+ get_serial_reply = (struct aac_get_serial_resp *) fib_data(fibptr);
+ /* Failure is irrelevant, using default value instead */
+ if (le32_to_cpu(get_serial_reply->status) == CT_OK) {
+ /*Check to see if it's for VPD 0x83 or 0x80 */
+ if (scsicmd->cmnd[2] == 0x83) {
+ /* vpd page 0x83 - Device Identification Page */
+ int i;
+ TVPD_Page83 VPDPage83Data;
+
+ memset(((u8 *)&VPDPage83Data), 0,
+ sizeof(VPDPage83Data));
+
+ /* DIRECT_ACCESS_DEVIC */
+ VPDPage83Data.DeviceType = 0;
+ /* DEVICE_CONNECTED */
+ VPDPage83Data.DeviceTypeQualifier = 0;
+ /* VPD_DEVICE_IDENTIFIERS */
+ VPDPage83Data.PageCode = 0x83;
+ VPDPage83Data.Reserved = 0;
+ VPDPage83Data.PageLength =
+ sizeof(VPDPage83Data.IdDescriptorType1) +
+ sizeof(VPDPage83Data.IdDescriptorType2);
+
+ /* T10 Vendor Identifier Field Format */
+ /* VpdCodeSetAscii */
+ VPDPage83Data.IdDescriptorType1.CodeSet = 2;
+ /* VpdIdentifierTypeVendorId */
+ VPDPage83Data.IdDescriptorType1.IdentifierType = 1;
+ VPDPage83Data.IdDescriptorType1.IdentifierLength =
+ sizeof(VPDPage83Data.IdDescriptorType1) - 4;
+
+ /* "ADAPTEC " for adaptec */
+ memcpy(VPDPage83Data.IdDescriptorType1.VendId,
+ "ADAPTEC ",
+ sizeof(VPDPage83Data.IdDescriptorType1.VendId));
+ memcpy(VPDPage83Data.IdDescriptorType1.ProductId,
+ "ARRAY ",
+ sizeof(
+ VPDPage83Data.IdDescriptorType1.ProductId));
+
+ /* Convert to ascii based serial number.
+ * The LSB is the the end.
+ */
+ for (i = 0; i < 8; i++) {
+ u8 temp =
+ (u8)((get_serial_reply->uid >> ((7 - i) * 4)) & 0xF);
+ if (temp > 0x9) {
+ VPDPage83Data.IdDescriptorType1.SerialNumber[i] =
+ 'A' + (temp - 0xA);
+ } else {
+ VPDPage83Data.IdDescriptorType1.SerialNumber[i] =
+ '0' + temp;
+ }
+ }
+
+ /* VpdCodeSetBinary */
+ VPDPage83Data.IdDescriptorType2.CodeSet = 1;
+ /* VpdIdentifierTypeEUI64 */
+ VPDPage83Data.IdDescriptorType2.IdentifierType = 2;
+ VPDPage83Data.IdDescriptorType2.IdentifierLength =
+ sizeof(VPDPage83Data.IdDescriptorType2) - 4;
+
+ VPDPage83Data.IdDescriptorType2.EU64Id.VendId[0] = 0xD0;
+ VPDPage83Data.IdDescriptorType2.EU64Id.VendId[1] = 0;
+ VPDPage83Data.IdDescriptorType2.EU64Id.VendId[2] = 0;
+
+ VPDPage83Data.IdDescriptorType2.EU64Id.Serial =
+ get_serial_reply->uid;
+ VPDPage83Data.IdDescriptorType2.EU64Id.Reserved = 0;
+
+ /* Move the inquiry data to the response buffer. */
+ scsi_sg_copy_from_buffer(scsicmd, &VPDPage83Data,
+ sizeof(VPDPage83Data));
+ } else {
+ /* It must be for VPD 0x80 */
+ char sp[13];
+ /* EVPD bit set */
+ sp[0] = INQD_PDT_DA;
+ sp[1] = scsicmd->cmnd[2];
+ sp[2] = 0;
+ sp[3] = snprintf(sp+4, sizeof(sp)-4, "%08X",
+ le32_to_cpu(get_serial_reply->uid));
+ scsi_sg_copy_from_buffer(scsicmd, sp,
+ sizeof(sp));
+ }
+ }
+
+ scsicmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8 | SAM_STAT_GOOD;
+
+ aac_fib_complete(fibptr);
+ aac_fib_free(fibptr);
+ scsicmd->scsi_done(scsicmd);
+}
+
+/**
+ * aac_get_container_serial - get container serial, none blocking.
+ */
+static int aac_get_container_serial(struct scsi_cmnd * scsicmd)
+{
+ int status;
+ struct aac_get_serial *dinfo;
+ struct fib * cmd_fibcontext;
+ struct aac_dev * dev;
+
+ dev = (struct aac_dev *)scsicmd->device->host->hostdata;
+
+ if (!(cmd_fibcontext = aac_fib_alloc(dev)))
+ return -ENOMEM;
+
+ aac_fib_init(cmd_fibcontext);
+ dinfo = (struct aac_get_serial *) fib_data(cmd_fibcontext);
+
+ dinfo->command = cpu_to_le32(VM_ContainerConfig);
+ dinfo->type = cpu_to_le32(CT_CID_TO_32BITS_UID);
+ dinfo->cid = cpu_to_le32(scmd_id(scsicmd));
+
+ status = aac_fib_send(ContainerCommand,
+ cmd_fibcontext,
+ sizeof (struct aac_get_serial),
+ FsaNormal,
+ 0, 1,
+ (fib_callback) get_container_serial_callback,
+ (void *) scsicmd);
+
+ /*
+ * Check that the command queued to the controller
+ */
+ if (status == -EINPROGRESS) {
+ scsicmd->SCp.phase = AAC_OWNER_FIRMWARE;
+ return 0;
+ }
+
+ printk(KERN_WARNING "aac_get_container_serial: aac_fib_send failed with status: %d.\n", status);
+ aac_fib_complete(cmd_fibcontext);
+ aac_fib_free(cmd_fibcontext);
+ return -1;
+}
+
+/* Function: setinqserial
+ *
+ * Arguments: [1] pointer to void [1] int
+ *
+ * Purpose: Sets SCSI Unit Serial number.
+ * This is a fake. We should read a proper
+ * serial number from the container. <SuSE>But
+ * without docs it's quite hard to do it :-)
+ * So this will have to do in the meantime.</SuSE>
+ */
+
+static int setinqserial(struct aac_dev *dev, void *data, int cid)
+{
+ /*
+ * This breaks array migration.
+ */
+ return snprintf((char *)(data), sizeof(struct scsi_inq) - 4, "%08X%02X",
+ le32_to_cpu(dev->adapter_info.serial[0]), cid);
+}
+
+static inline void set_sense(struct sense_data *sense_data, u8 sense_key,
+ u8 sense_code, u8 a_sense_code, u8 bit_pointer, u16 field_pointer)
+{
+ u8 *sense_buf = (u8 *)sense_data;
+ /* Sense data valid, err code 70h */
+ sense_buf[0] = 0x70; /* No info field */
+ sense_buf[1] = 0; /* Segment number, always zero */
+
+ sense_buf[2] = sense_key; /* Sense key */
+
+ sense_buf[12] = sense_code; /* Additional sense code */
+ sense_buf[13] = a_sense_code; /* Additional sense code qualifier */
+
+ if (sense_key == ILLEGAL_REQUEST) {
+ sense_buf[7] = 10; /* Additional sense length */
+
+ sense_buf[15] = bit_pointer;
+ /* Illegal parameter is in the parameter block */
+ if (sense_code == SENCODE_INVALID_CDB_FIELD)
+ sense_buf[15] |= 0xc0;/* Std sense key specific field */
+ /* Illegal parameter is in the CDB block */
+ sense_buf[16] = field_pointer >> 8; /* MSB */
+ sense_buf[17] = field_pointer; /* LSB */
+ } else
+ sense_buf[7] = 6; /* Additional sense length */
+}
+
+static int aac_bounds_32(struct aac_dev * dev, struct scsi_cmnd * cmd, u64 lba)
+{
+ if (lba & 0xffffffff00000000LL) {
+ int cid = scmd_id(cmd);
+ dprintk((KERN_DEBUG "aacraid: Illegal lba\n"));
+ cmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8 |
+ SAM_STAT_CHECK_CONDITION;
+ set_sense(&dev->fsa_dev[cid].sense_data,
+ HARDWARE_ERROR, SENCODE_INTERNAL_TARGET_FAILURE,
+ ASENCODE_INTERNAL_TARGET_FAILURE, 0, 0);
+ memcpy(cmd->sense_buffer, &dev->fsa_dev[cid].sense_data,
+ min_t(size_t, sizeof(dev->fsa_dev[cid].sense_data),
+ SCSI_SENSE_BUFFERSIZE));
+ cmd->scsi_done(cmd);
+ return 1;
+ }
+ return 0;
+}
+
+static int aac_bounds_64(struct aac_dev * dev, struct scsi_cmnd * cmd, u64 lba)
+{
+ return 0;
+}
+
+static void io_callback(void *context, struct fib * fibptr);
+
+static int aac_read_raw_io(struct fib * fib, struct scsi_cmnd * cmd, u64 lba, u32 count)
+{
+ struct aac_dev *dev = fib->dev;
+ u16 fibsize, command;
+ long ret;
+
+ aac_fib_init(fib);
+ if (dev->comm_interface == AAC_COMM_MESSAGE_TYPE2 && !dev->sync_mode) {
+ struct aac_raw_io2 *readcmd2;
+ readcmd2 = (struct aac_raw_io2 *) fib_data(fib);
+ memset(readcmd2, 0, sizeof(struct aac_raw_io2));
+ readcmd2->blockLow = cpu_to_le32((u32)(lba&0xffffffff));
+ readcmd2->blockHigh = cpu_to_le32((u32)((lba&0xffffffff00000000LL)>>32));
+ readcmd2->byteCount = cpu_to_le32(count *
+ dev->fsa_dev[scmd_id(cmd)].block_size);
+ readcmd2->cid = cpu_to_le16(scmd_id(cmd));
+ readcmd2->flags = cpu_to_le16(RIO2_IO_TYPE_READ);
+ ret = aac_build_sgraw2(cmd, readcmd2,
+ dev->scsi_host_ptr->sg_tablesize);
+ if (ret < 0)
+ return ret;
+ command = ContainerRawIo2;
+ fibsize = sizeof(struct aac_raw_io2) +
+ ((le32_to_cpu(readcmd2->sgeCnt)-1) * sizeof(struct sge_ieee1212));
+ } else {
+ struct aac_raw_io *readcmd;
+ readcmd = (struct aac_raw_io *) fib_data(fib);
+ readcmd->block[0] = cpu_to_le32((u32)(lba&0xffffffff));
+ readcmd->block[1] = cpu_to_le32((u32)((lba&0xffffffff00000000LL)>>32));
+ readcmd->count = cpu_to_le32(count *
+ dev->fsa_dev[scmd_id(cmd)].block_size);
+ readcmd->cid = cpu_to_le16(scmd_id(cmd));
+ readcmd->flags = cpu_to_le16(RIO_TYPE_READ);
+ readcmd->bpTotal = 0;
+ readcmd->bpComplete = 0;
+ ret = aac_build_sgraw(cmd, &readcmd->sg);
+ if (ret < 0)
+ return ret;
+ command = ContainerRawIo;
+ fibsize = sizeof(struct aac_raw_io) +
+ ((le32_to_cpu(readcmd->sg.count)-1) * sizeof(struct sgentryraw));
+ }
+
+ BUG_ON(fibsize > (fib->dev->max_fib_size - sizeof(struct aac_fibhdr)));
+ /*
+ * Now send the Fib to the adapter
+ */
+ return aac_fib_send(command,
+ fib,
+ fibsize,
+ FsaNormal,
+ 0, 1,
+ (fib_callback) io_callback,
+ (void *) cmd);
+}
+
+static int aac_read_block64(struct fib * fib, struct scsi_cmnd * cmd, u64 lba, u32 count)
+{
+ u16 fibsize;
+ struct aac_read64 *readcmd;
+ long ret;
+
+ aac_fib_init(fib);
+ readcmd = (struct aac_read64 *) fib_data(fib);
+ readcmd->command = cpu_to_le32(VM_CtHostRead64);
+ readcmd->cid = cpu_to_le16(scmd_id(cmd));
+ readcmd->sector_count = cpu_to_le16(count);
+ readcmd->block = cpu_to_le32((u32)(lba&0xffffffff));
+ readcmd->pad = 0;
+ readcmd->flags = 0;
+
+ ret = aac_build_sg64(cmd, &readcmd->sg);
+ if (ret < 0)
+ return ret;
+ fibsize = sizeof(struct aac_read64) +
+ ((le32_to_cpu(readcmd->sg.count) - 1) *
+ sizeof (struct sgentry64));
+ BUG_ON (fibsize > (fib->dev->max_fib_size -
+ sizeof(struct aac_fibhdr)));
+ /*
+ * Now send the Fib to the adapter
+ */
+ return aac_fib_send(ContainerCommand64,
+ fib,
+ fibsize,
+ FsaNormal,
+ 0, 1,
+ (fib_callback) io_callback,
+ (void *) cmd);
+}
+
+static int aac_read_block(struct fib * fib, struct scsi_cmnd * cmd, u64 lba, u32 count)
+{
+ u16 fibsize;
+ struct aac_read *readcmd;
+ struct aac_dev *dev = fib->dev;
+ long ret;
+
+ aac_fib_init(fib);
+ readcmd = (struct aac_read *) fib_data(fib);
+ readcmd->command = cpu_to_le32(VM_CtBlockRead);
+ readcmd->cid = cpu_to_le32(scmd_id(cmd));
+ readcmd->block = cpu_to_le32((u32)(lba&0xffffffff));
+ readcmd->count = cpu_to_le32(count *
+ dev->fsa_dev[scmd_id(cmd)].block_size);
+
+ ret = aac_build_sg(cmd, &readcmd->sg);
+ if (ret < 0)
+ return ret;
+ fibsize = sizeof(struct aac_read) +
+ ((le32_to_cpu(readcmd->sg.count) - 1) *
+ sizeof (struct sgentry));
+ BUG_ON (fibsize > (fib->dev->max_fib_size -
+ sizeof(struct aac_fibhdr)));
+ /*
+ * Now send the Fib to the adapter
+ */
+ return aac_fib_send(ContainerCommand,
+ fib,
+ fibsize,
+ FsaNormal,
+ 0, 1,
+ (fib_callback) io_callback,
+ (void *) cmd);
+}
+
+static int aac_write_raw_io(struct fib * fib, struct scsi_cmnd * cmd, u64 lba, u32 count, int fua)
+{
+ struct aac_dev *dev = fib->dev;
+ u16 fibsize, command;
+ long ret;
+
+ aac_fib_init(fib);
+ if (dev->comm_interface == AAC_COMM_MESSAGE_TYPE2 && !dev->sync_mode) {
+ struct aac_raw_io2 *writecmd2;
+ writecmd2 = (struct aac_raw_io2 *) fib_data(fib);
+ memset(writecmd2, 0, sizeof(struct aac_raw_io2));
+ writecmd2->blockLow = cpu_to_le32((u32)(lba&0xffffffff));
+ writecmd2->blockHigh = cpu_to_le32((u32)((lba&0xffffffff00000000LL)>>32));
+ writecmd2->byteCount = cpu_to_le32(count *
+ dev->fsa_dev[scmd_id(cmd)].block_size);
+ writecmd2->cid = cpu_to_le16(scmd_id(cmd));
+ writecmd2->flags = (fua && ((aac_cache & 5) != 1) &&
+ (((aac_cache & 5) != 5) || !fib->dev->cache_protected)) ?
+ cpu_to_le16(RIO2_IO_TYPE_WRITE|RIO2_IO_SUREWRITE) :
+ cpu_to_le16(RIO2_IO_TYPE_WRITE);
+ ret = aac_build_sgraw2(cmd, writecmd2,
+ dev->scsi_host_ptr->sg_tablesize);
+ if (ret < 0)
+ return ret;
+ command = ContainerRawIo2;
+ fibsize = sizeof(struct aac_raw_io2) +
+ ((le32_to_cpu(writecmd2->sgeCnt)-1) * sizeof(struct sge_ieee1212));
+ } else {
+ struct aac_raw_io *writecmd;
+ writecmd = (struct aac_raw_io *) fib_data(fib);
+ writecmd->block[0] = cpu_to_le32((u32)(lba&0xffffffff));
+ writecmd->block[1] = cpu_to_le32((u32)((lba&0xffffffff00000000LL)>>32));
+ writecmd->count = cpu_to_le32(count *
+ dev->fsa_dev[scmd_id(cmd)].block_size);
+ writecmd->cid = cpu_to_le16(scmd_id(cmd));
+ writecmd->flags = (fua && ((aac_cache & 5) != 1) &&
+ (((aac_cache & 5) != 5) || !fib->dev->cache_protected)) ?
+ cpu_to_le16(RIO_TYPE_WRITE|RIO_SUREWRITE) :
+ cpu_to_le16(RIO_TYPE_WRITE);
+ writecmd->bpTotal = 0;
+ writecmd->bpComplete = 0;
+ ret = aac_build_sgraw(cmd, &writecmd->sg);
+ if (ret < 0)
+ return ret;
+ command = ContainerRawIo;
+ fibsize = sizeof(struct aac_raw_io) +
+ ((le32_to_cpu(writecmd->sg.count)-1) * sizeof (struct sgentryraw));
+ }
+
+ BUG_ON(fibsize > (fib->dev->max_fib_size - sizeof(struct aac_fibhdr)));
+ /*
+ * Now send the Fib to the adapter
+ */
+ return aac_fib_send(command,
+ fib,
+ fibsize,
+ FsaNormal,
+ 0, 1,
+ (fib_callback) io_callback,
+ (void *) cmd);
+}
+
+static int aac_write_block64(struct fib * fib, struct scsi_cmnd * cmd, u64 lba, u32 count, int fua)
+{
+ u16 fibsize;
+ struct aac_write64 *writecmd;
+ long ret;
+
+ aac_fib_init(fib);
+ writecmd = (struct aac_write64 *) fib_data(fib);
+ writecmd->command = cpu_to_le32(VM_CtHostWrite64);
+ writecmd->cid = cpu_to_le16(scmd_id(cmd));
+ writecmd->sector_count = cpu_to_le16(count);
+ writecmd->block = cpu_to_le32((u32)(lba&0xffffffff));
+ writecmd->pad = 0;
+ writecmd->flags = 0;
+
+ ret = aac_build_sg64(cmd, &writecmd->sg);
+ if (ret < 0)
+ return ret;
+ fibsize = sizeof(struct aac_write64) +
+ ((le32_to_cpu(writecmd->sg.count) - 1) *
+ sizeof (struct sgentry64));
+ BUG_ON (fibsize > (fib->dev->max_fib_size -
+ sizeof(struct aac_fibhdr)));
+ /*
+ * Now send the Fib to the adapter
+ */
+ return aac_fib_send(ContainerCommand64,
+ fib,
+ fibsize,
+ FsaNormal,
+ 0, 1,
+ (fib_callback) io_callback,
+ (void *) cmd);
+}
+
+static int aac_write_block(struct fib * fib, struct scsi_cmnd * cmd, u64 lba, u32 count, int fua)
+{
+ u16 fibsize;
+ struct aac_write *writecmd;
+ struct aac_dev *dev = fib->dev;
+ long ret;
+
+ aac_fib_init(fib);
+ writecmd = (struct aac_write *) fib_data(fib);
+ writecmd->command = cpu_to_le32(VM_CtBlockWrite);
+ writecmd->cid = cpu_to_le32(scmd_id(cmd));
+ writecmd->block = cpu_to_le32((u32)(lba&0xffffffff));
+ writecmd->count = cpu_to_le32(count *
+ dev->fsa_dev[scmd_id(cmd)].block_size);
+ writecmd->sg.count = cpu_to_le32(1);
+ /* ->stable is not used - it did mean which type of write */
+
+ ret = aac_build_sg(cmd, &writecmd->sg);
+ if (ret < 0)
+ return ret;
+ fibsize = sizeof(struct aac_write) +
+ ((le32_to_cpu(writecmd->sg.count) - 1) *
+ sizeof (struct sgentry));
+ BUG_ON (fibsize > (fib->dev->max_fib_size -
+ sizeof(struct aac_fibhdr)));
+ /*
+ * Now send the Fib to the adapter
+ */
+ return aac_fib_send(ContainerCommand,
+ fib,
+ fibsize,
+ FsaNormal,
+ 0, 1,
+ (fib_callback) io_callback,
+ (void *) cmd);
+}
+
+static struct aac_srb * aac_scsi_common(struct fib * fib, struct scsi_cmnd * cmd)
+{
+ struct aac_srb * srbcmd;
+ u32 flag;
+ u32 timeout;
+
+ aac_fib_init(fib);
+ switch(cmd->sc_data_direction){
+ case DMA_TO_DEVICE:
+ flag = SRB_DataOut;
+ break;
+ case DMA_BIDIRECTIONAL:
+ flag = SRB_DataIn | SRB_DataOut;
+ break;
+ case DMA_FROM_DEVICE:
+ flag = SRB_DataIn;
+ break;
+ case DMA_NONE:
+ default: /* shuts up some versions of gcc */
+ flag = SRB_NoDataXfer;
+ break;
+ }
+
+ srbcmd = (struct aac_srb*) fib_data(fib);
+ srbcmd->function = cpu_to_le32(SRBF_ExecuteScsi);
+ srbcmd->channel = cpu_to_le32(aac_logical_to_phys(scmd_channel(cmd)));
+ srbcmd->id = cpu_to_le32(scmd_id(cmd));
+ srbcmd->lun = cpu_to_le32(cmd->device->lun);
+ srbcmd->flags = cpu_to_le32(flag);
+ timeout = cmd->request->timeout/HZ;
+ if (timeout == 0)
+ timeout = 1;
+ srbcmd->timeout = cpu_to_le32(timeout); // timeout in seconds
+ srbcmd->retry_limit = 0; /* Obsolete parameter */
+ srbcmd->cdb_size = cpu_to_le32(cmd->cmd_len);
+ return srbcmd;
+}
+
+static void aac_srb_callback(void *context, struct fib * fibptr);
+
+static int aac_scsi_64(struct fib * fib, struct scsi_cmnd * cmd)
+{
+ u16 fibsize;
+ struct aac_srb * srbcmd = aac_scsi_common(fib, cmd);
+ long ret;
+
+ ret = aac_build_sg64(cmd, (struct sgmap64 *) &srbcmd->sg);
+ if (ret < 0)
+ return ret;
+ srbcmd->count = cpu_to_le32(scsi_bufflen(cmd));
+
+ memset(srbcmd->cdb, 0, sizeof(srbcmd->cdb));
+ memcpy(srbcmd->cdb, cmd->cmnd, cmd->cmd_len);
+ /*
+ * Build Scatter/Gather list
+ */
+ fibsize = sizeof (struct aac_srb) - sizeof (struct sgentry) +
+ ((le32_to_cpu(srbcmd->sg.count) & 0xff) *
+ sizeof (struct sgentry64));
+ BUG_ON (fibsize > (fib->dev->max_fib_size -
+ sizeof(struct aac_fibhdr)));
+
+ /*
+ * Now send the Fib to the adapter
+ */
+ return aac_fib_send(ScsiPortCommand64, fib,
+ fibsize, FsaNormal, 0, 1,
+ (fib_callback) aac_srb_callback,
+ (void *) cmd);
+}
+
+static int aac_scsi_32(struct fib * fib, struct scsi_cmnd * cmd)
+{
+ u16 fibsize;
+ struct aac_srb * srbcmd = aac_scsi_common(fib, cmd);
+ long ret;
+
+ ret = aac_build_sg(cmd, (struct sgmap *)&srbcmd->sg);
+ if (ret < 0)
+ return ret;
+ srbcmd->count = cpu_to_le32(scsi_bufflen(cmd));
+
+ memset(srbcmd->cdb, 0, sizeof(srbcmd->cdb));
+ memcpy(srbcmd->cdb, cmd->cmnd, cmd->cmd_len);
+ /*
+ * Build Scatter/Gather list
+ */
+ fibsize = sizeof (struct aac_srb) +
+ (((le32_to_cpu(srbcmd->sg.count) & 0xff) - 1) *
+ sizeof (struct sgentry));
+ BUG_ON (fibsize > (fib->dev->max_fib_size -
+ sizeof(struct aac_fibhdr)));
+
+ /*
+ * Now send the Fib to the adapter
+ */
+ return aac_fib_send(ScsiPortCommand, fib, fibsize, FsaNormal, 0, 1,
+ (fib_callback) aac_srb_callback, (void *) cmd);
+}
+
+static int aac_scsi_32_64(struct fib * fib, struct scsi_cmnd * cmd)
+{
+ if ((sizeof(dma_addr_t) > 4) && fib->dev->needs_dac &&
+ (fib->dev->adapter_info.options & AAC_OPT_SGMAP_HOST64))
+ return FAILED;
+ return aac_scsi_32(fib, cmd);
+}
+
+int aac_get_adapter_info(struct aac_dev* dev)
+{
+ struct fib* fibptr;
+ int rcode;
+ u32 tmp;
+ struct aac_adapter_info *info;
+ struct aac_bus_info *command;
+ struct aac_bus_info_response *bus_info;
+
+ if (!(fibptr = aac_fib_alloc(dev)))
+ return -ENOMEM;
+
+ aac_fib_init(fibptr);
+ info = (struct aac_adapter_info *) fib_data(fibptr);
+ memset(info,0,sizeof(*info));
+
+ rcode = aac_fib_send(RequestAdapterInfo,
+ fibptr,
+ sizeof(*info),
+ FsaNormal,
+ -1, 1, /* First `interrupt' command uses special wait */
+ NULL,
+ NULL);
+
+ if (rcode < 0) {
+ /* FIB should be freed only after
+ * getting the response from the F/W */
+ if (rcode != -ERESTARTSYS) {
+ aac_fib_complete(fibptr);
+ aac_fib_free(fibptr);
+ }
+ return rcode;
+ }
+ memcpy(&dev->adapter_info, info, sizeof(*info));
+
+ if (dev->adapter_info.options & AAC_OPT_SUPPLEMENT_ADAPTER_INFO) {
+ struct aac_supplement_adapter_info * sinfo;
+
+ aac_fib_init(fibptr);
+
+ sinfo = (struct aac_supplement_adapter_info *) fib_data(fibptr);
+
+ memset(sinfo,0,sizeof(*sinfo));
+
+ rcode = aac_fib_send(RequestSupplementAdapterInfo,
+ fibptr,
+ sizeof(*sinfo),
+ FsaNormal,
+ 1, 1,
+ NULL,
+ NULL);
+
+ if (rcode >= 0)
+ memcpy(&dev->supplement_adapter_info, sinfo, sizeof(*sinfo));
+ if (rcode == -ERESTARTSYS) {
+ fibptr = aac_fib_alloc(dev);
+ if (!fibptr)
+ return -ENOMEM;
+ }
+
+ }
+
+
+ /*
+ * GetBusInfo
+ */
+
+ aac_fib_init(fibptr);
+
+ bus_info = (struct aac_bus_info_response *) fib_data(fibptr);
+
+ memset(bus_info, 0, sizeof(*bus_info));
+
+ command = (struct aac_bus_info *)bus_info;
+
+ command->Command = cpu_to_le32(VM_Ioctl);
+ command->ObjType = cpu_to_le32(FT_DRIVE);
+ command->MethodId = cpu_to_le32(1);
+ command->CtlCmd = cpu_to_le32(GetBusInfo);
+
+ rcode = aac_fib_send(ContainerCommand,
+ fibptr,
+ sizeof (*bus_info),
+ FsaNormal,
+ 1, 1,
+ NULL, NULL);
+
+ /* reasoned default */
+ dev->maximum_num_physicals = 16;
+ if (rcode >= 0 && le32_to_cpu(bus_info->Status) == ST_OK) {
+ dev->maximum_num_physicals = le32_to_cpu(bus_info->TargetsPerBus);
+ dev->maximum_num_channels = le32_to_cpu(bus_info->BusCount);
+ }
+
+ if (!dev->in_reset) {
+ char buffer[16];
+ tmp = le32_to_cpu(dev->adapter_info.kernelrev);
+ printk(KERN_INFO "%s%d: kernel %d.%d-%d[%d] %.*s\n",
+ dev->name,
+ dev->id,
+ tmp>>24,
+ (tmp>>16)&0xff,
+ tmp&0xff,
+ le32_to_cpu(dev->adapter_info.kernelbuild),
+ (int)sizeof(dev->supplement_adapter_info.BuildDate),
+ dev->supplement_adapter_info.BuildDate);
+ tmp = le32_to_cpu(dev->adapter_info.monitorrev);
+ printk(KERN_INFO "%s%d: monitor %d.%d-%d[%d]\n",
+ dev->name, dev->id,
+ tmp>>24,(tmp>>16)&0xff,tmp&0xff,
+ le32_to_cpu(dev->adapter_info.monitorbuild));
+ tmp = le32_to_cpu(dev->adapter_info.biosrev);
+ printk(KERN_INFO "%s%d: bios %d.%d-%d[%d]\n",
+ dev->name, dev->id,
+ tmp>>24,(tmp>>16)&0xff,tmp&0xff,
+ le32_to_cpu(dev->adapter_info.biosbuild));
+ buffer[0] = '\0';
+ if (aac_get_serial_number(
+ shost_to_class(dev->scsi_host_ptr), buffer))
+ printk(KERN_INFO "%s%d: serial %s",
+ dev->name, dev->id, buffer);
+ if (dev->supplement_adapter_info.VpdInfo.Tsid[0]) {
+ printk(KERN_INFO "%s%d: TSID %.*s\n",
+ dev->name, dev->id,
+ (int)sizeof(dev->supplement_adapter_info.VpdInfo.Tsid),
+ dev->supplement_adapter_info.VpdInfo.Tsid);
+ }
+ if (!aac_check_reset || ((aac_check_reset == 1) &&
+ (dev->supplement_adapter_info.SupportedOptions2 &
+ AAC_OPTION_IGNORE_RESET))) {
+ printk(KERN_INFO "%s%d: Reset Adapter Ignored\n",
+ dev->name, dev->id);
+ }
+ }
+
+ dev->cache_protected = 0;
+ dev->jbod = ((dev->supplement_adapter_info.FeatureBits &
+ AAC_FEATURE_JBOD) != 0);
+ dev->nondasd_support = 0;
+ dev->raid_scsi_mode = 0;
+ if(dev->adapter_info.options & AAC_OPT_NONDASD)
+ dev->nondasd_support = 1;
+
+ /*
+ * If the firmware supports ROMB RAID/SCSI mode and we are currently
+ * in RAID/SCSI mode, set the flag. For now if in this mode we will
+ * force nondasd support on. If we decide to allow the non-dasd flag
+ * additional changes changes will have to be made to support
+ * RAID/SCSI. the function aac_scsi_cmd in this module will have to be
+ * changed to support the new dev->raid_scsi_mode flag instead of
+ * leaching off of the dev->nondasd_support flag. Also in linit.c the
+ * function aac_detect will have to be modified where it sets up the
+ * max number of channels based on the aac->nondasd_support flag only.
+ */
+ if ((dev->adapter_info.options & AAC_OPT_SCSI_MANAGED) &&
+ (dev->adapter_info.options & AAC_OPT_RAID_SCSI_MODE)) {
+ dev->nondasd_support = 1;
+ dev->raid_scsi_mode = 1;
+ }
+ if (dev->raid_scsi_mode != 0)
+ printk(KERN_INFO "%s%d: ROMB RAID/SCSI mode enabled\n",
+ dev->name, dev->id);
+
+ if (nondasd != -1)
+ dev->nondasd_support = (nondasd!=0);
+ if (dev->nondasd_support && !dev->in_reset)
+ printk(KERN_INFO "%s%d: Non-DASD support enabled.\n",dev->name, dev->id);
+
+ if (dma_get_required_mask(&dev->pdev->dev) > DMA_BIT_MASK(32))
+ dev->needs_dac = 1;
+ dev->dac_support = 0;
+ if ((sizeof(dma_addr_t) > 4) && dev->needs_dac &&
+ (dev->adapter_info.options & AAC_OPT_SGMAP_HOST64)) {
+ if (!dev->in_reset)
+ printk(KERN_INFO "%s%d: 64bit support enabled.\n",
+ dev->name, dev->id);
+ dev->dac_support = 1;
+ }
+
+ if(dacmode != -1) {
+ dev->dac_support = (dacmode!=0);
+ }
+
+ /* avoid problems with AAC_QUIRK_SCSI_32 controllers */
+ if (dev->dac_support && (aac_get_driver_ident(dev->cardtype)->quirks
+ & AAC_QUIRK_SCSI_32)) {
+ dev->nondasd_support = 0;
+ dev->jbod = 0;
+ expose_physicals = 0;
+ }
+
+ if(dev->dac_support != 0) {
+ if (!pci_set_dma_mask(dev->pdev, DMA_BIT_MASK(64)) &&
+ !pci_set_consistent_dma_mask(dev->pdev, DMA_BIT_MASK(64))) {
+ if (!dev->in_reset)
+ printk(KERN_INFO"%s%d: 64 Bit DAC enabled\n",
+ dev->name, dev->id);
+ } else if (!pci_set_dma_mask(dev->pdev, DMA_BIT_MASK(32)) &&
+ !pci_set_consistent_dma_mask(dev->pdev, DMA_BIT_MASK(32))) {
+ printk(KERN_INFO"%s%d: DMA mask set failed, 64 Bit DAC disabled\n",
+ dev->name, dev->id);
+ dev->dac_support = 0;
+ } else {
+ printk(KERN_WARNING"%s%d: No suitable DMA available.\n",
+ dev->name, dev->id);
+ rcode = -ENOMEM;
+ }
+ }
+ /*
+ * Deal with configuring for the individualized limits of each packet
+ * interface.
+ */
+ dev->a_ops.adapter_scsi = (dev->dac_support)
+ ? ((aac_get_driver_ident(dev->cardtype)->quirks & AAC_QUIRK_SCSI_32)
+ ? aac_scsi_32_64
+ : aac_scsi_64)
+ : aac_scsi_32;
+ if (dev->raw_io_interface) {
+ dev->a_ops.adapter_bounds = (dev->raw_io_64)
+ ? aac_bounds_64
+ : aac_bounds_32;
+ dev->a_ops.adapter_read = aac_read_raw_io;
+ dev->a_ops.adapter_write = aac_write_raw_io;
+ } else {
+ dev->a_ops.adapter_bounds = aac_bounds_32;
+ dev->scsi_host_ptr->sg_tablesize = (dev->max_fib_size -
+ sizeof(struct aac_fibhdr) -
+ sizeof(struct aac_write) + sizeof(struct sgentry)) /
+ sizeof(struct sgentry);
+ if (dev->dac_support) {
+ dev->a_ops.adapter_read = aac_read_block64;
+ dev->a_ops.adapter_write = aac_write_block64;
+ /*
+ * 38 scatter gather elements
+ */
+ dev->scsi_host_ptr->sg_tablesize =
+ (dev->max_fib_size -
+ sizeof(struct aac_fibhdr) -
+ sizeof(struct aac_write64) +
+ sizeof(struct sgentry64)) /
+ sizeof(struct sgentry64);
+ } else {
+ dev->a_ops.adapter_read = aac_read_block;
+ dev->a_ops.adapter_write = aac_write_block;
+ }
+ dev->scsi_host_ptr->max_sectors = AAC_MAX_32BIT_SGBCOUNT;
+ if (!(dev->adapter_info.options & AAC_OPT_NEW_COMM)) {
+ /*
+ * Worst case size that could cause sg overflow when
+ * we break up SG elements that are larger than 64KB.
+ * Would be nice if we could tell the SCSI layer what
+ * the maximum SG element size can be. Worst case is
+ * (sg_tablesize-1) 4KB elements with one 64KB
+ * element.
+ * 32bit -> 468 or 238KB 64bit -> 424 or 212KB
+ */
+ dev->scsi_host_ptr->max_sectors =
+ (dev->scsi_host_ptr->sg_tablesize * 8) + 112;
+ }
+ }
+ /* FIB should be freed only after getting the response from the F/W */
+ if (rcode != -ERESTARTSYS) {
+ aac_fib_complete(fibptr);
+ aac_fib_free(fibptr);
+ }
+
+ return rcode;
+}
+
+
+static void io_callback(void *context, struct fib * fibptr)
+{
+ struct aac_dev *dev;
+ struct aac_read_reply *readreply;
+ struct scsi_cmnd *scsicmd;
+ u32 cid;
+
+ scsicmd = (struct scsi_cmnd *) context;
+
+ if (!aac_valid_context(scsicmd, fibptr))
+ return;
+
+ dev = fibptr->dev;
+ cid = scmd_id(scsicmd);
+
+ if (nblank(dprintk(x))) {
+ u64 lba;
+ switch (scsicmd->cmnd[0]) {
+ case WRITE_6:
+ case READ_6:
+ lba = ((scsicmd->cmnd[1] & 0x1F) << 16) |
+ (scsicmd->cmnd[2] << 8) | scsicmd->cmnd[3];
+ break;
+ case WRITE_16:
+ case READ_16:
+ lba = ((u64)scsicmd->cmnd[2] << 56) |
+ ((u64)scsicmd->cmnd[3] << 48) |
+ ((u64)scsicmd->cmnd[4] << 40) |
+ ((u64)scsicmd->cmnd[5] << 32) |
+ ((u64)scsicmd->cmnd[6] << 24) |
+ (scsicmd->cmnd[7] << 16) |
+ (scsicmd->cmnd[8] << 8) | scsicmd->cmnd[9];
+ break;
+ case WRITE_12:
+ case READ_12:
+ lba = ((u64)scsicmd->cmnd[2] << 24) |
+ (scsicmd->cmnd[3] << 16) |
+ (scsicmd->cmnd[4] << 8) | scsicmd->cmnd[5];
+ break;
+ default:
+ lba = ((u64)scsicmd->cmnd[2] << 24) |
+ (scsicmd->cmnd[3] << 16) |
+ (scsicmd->cmnd[4] << 8) | scsicmd->cmnd[5];
+ break;
+ }
+ printk(KERN_DEBUG
+ "io_callback[cpu %d]: lba = %llu, t = %ld.\n",
+ smp_processor_id(), (unsigned long long)lba, jiffies);
+ }
+
+ BUG_ON(fibptr == NULL);
+
+ scsi_dma_unmap(scsicmd);
+
+ readreply = (struct aac_read_reply *)fib_data(fibptr);
+ switch (le32_to_cpu(readreply->status)) {
+ case ST_OK:
+ scsicmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8 |
+ SAM_STAT_GOOD;
+ dev->fsa_dev[cid].sense_data.sense_key = NO_SENSE;
+ break;
+ case ST_NOT_READY:
+ scsicmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8 |
+ SAM_STAT_CHECK_CONDITION;
+ set_sense(&dev->fsa_dev[cid].sense_data, NOT_READY,
+ SENCODE_BECOMING_READY, ASENCODE_BECOMING_READY, 0, 0);
+ memcpy(scsicmd->sense_buffer, &dev->fsa_dev[cid].sense_data,
+ min_t(size_t, sizeof(dev->fsa_dev[cid].sense_data),
+ SCSI_SENSE_BUFFERSIZE));
+ break;
+ default:
+#ifdef AAC_DETAILED_STATUS_INFO
+ printk(KERN_WARNING "io_callback: io failed, status = %d\n",
+ le32_to_cpu(readreply->status));
+#endif
+ scsicmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8 |
+ SAM_STAT_CHECK_CONDITION;
+ set_sense(&dev->fsa_dev[cid].sense_data,
+ HARDWARE_ERROR, SENCODE_INTERNAL_TARGET_FAILURE,
+ ASENCODE_INTERNAL_TARGET_FAILURE, 0, 0);
+ memcpy(scsicmd->sense_buffer, &dev->fsa_dev[cid].sense_data,
+ min_t(size_t, sizeof(dev->fsa_dev[cid].sense_data),
+ SCSI_SENSE_BUFFERSIZE));
+ break;
+ }
+ aac_fib_complete(fibptr);
+ aac_fib_free(fibptr);
+
+ scsicmd->scsi_done(scsicmd);
+}
+
+static int aac_read(struct scsi_cmnd * scsicmd)
+{
+ u64 lba;
+ u32 count;
+ int status;
+ struct aac_dev *dev;
+ struct fib * cmd_fibcontext;
+ int cid;
+
+ dev = (struct aac_dev *)scsicmd->device->host->hostdata;
+ /*
+ * Get block address and transfer length
+ */
+ switch (scsicmd->cmnd[0]) {
+ case READ_6:
+ dprintk((KERN_DEBUG "aachba: received a read(6) command on id %d.\n", scmd_id(scsicmd)));
+
+ lba = ((scsicmd->cmnd[1] & 0x1F) << 16) |
+ (scsicmd->cmnd[2] << 8) | scsicmd->cmnd[3];
+ count = scsicmd->cmnd[4];
+
+ if (count == 0)
+ count = 256;
+ break;
+ case READ_16:
+ dprintk((KERN_DEBUG "aachba: received a read(16) command on id %d.\n", scmd_id(scsicmd)));
+
+ lba = ((u64)scsicmd->cmnd[2] << 56) |
+ ((u64)scsicmd->cmnd[3] << 48) |
+ ((u64)scsicmd->cmnd[4] << 40) |
+ ((u64)scsicmd->cmnd[5] << 32) |
+ ((u64)scsicmd->cmnd[6] << 24) |
+ (scsicmd->cmnd[7] << 16) |
+ (scsicmd->cmnd[8] << 8) | scsicmd->cmnd[9];
+ count = (scsicmd->cmnd[10] << 24) |
+ (scsicmd->cmnd[11] << 16) |
+ (scsicmd->cmnd[12] << 8) | scsicmd->cmnd[13];
+ break;
+ case READ_12:
+ dprintk((KERN_DEBUG "aachba: received a read(12) command on id %d.\n", scmd_id(scsicmd)));
+
+ lba = ((u64)scsicmd->cmnd[2] << 24) |
+ (scsicmd->cmnd[3] << 16) |
+ (scsicmd->cmnd[4] << 8) | scsicmd->cmnd[5];
+ count = (scsicmd->cmnd[6] << 24) |
+ (scsicmd->cmnd[7] << 16) |
+ (scsicmd->cmnd[8] << 8) | scsicmd->cmnd[9];
+ break;
+ default:
+ dprintk((KERN_DEBUG "aachba: received a read(10) command on id %d.\n", scmd_id(scsicmd)));
+
+ lba = ((u64)scsicmd->cmnd[2] << 24) |
+ (scsicmd->cmnd[3] << 16) |
+ (scsicmd->cmnd[4] << 8) | scsicmd->cmnd[5];
+ count = (scsicmd->cmnd[7] << 8) | scsicmd->cmnd[8];
+ break;
+ }
+
+ if ((lba + count) > (dev->fsa_dev[scmd_id(scsicmd)].size)) {
+ cid = scmd_id(scsicmd);
+ dprintk((KERN_DEBUG "aacraid: Illegal lba\n"));
+ scsicmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8 |
+ SAM_STAT_CHECK_CONDITION;
+ set_sense(&dev->fsa_dev[cid].sense_data,
+ HARDWARE_ERROR, SENCODE_INTERNAL_TARGET_FAILURE,
+ ASENCODE_INTERNAL_TARGET_FAILURE, 0, 0);
+ memcpy(scsicmd->sense_buffer, &dev->fsa_dev[cid].sense_data,
+ min_t(size_t, sizeof(dev->fsa_dev[cid].sense_data),
+ SCSI_SENSE_BUFFERSIZE));
+ scsicmd->scsi_done(scsicmd);
+ return 1;
+ }
+
+ dprintk((KERN_DEBUG "aac_read[cpu %d]: lba = %llu, t = %ld.\n",
+ smp_processor_id(), (unsigned long long)lba, jiffies));
+ if (aac_adapter_bounds(dev,scsicmd,lba))
+ return 0;
+ /*
+ * Alocate and initialize a Fib
+ */
+ if (!(cmd_fibcontext = aac_fib_alloc(dev))) {
+ printk(KERN_WARNING "aac_read: fib allocation failed\n");
+ return -1;
+ }
+
+ status = aac_adapter_read(cmd_fibcontext, scsicmd, lba, count);
+
+ /*
+ * Check that the command queued to the controller
+ */
+ if (status == -EINPROGRESS) {
+ scsicmd->SCp.phase = AAC_OWNER_FIRMWARE;
+ return 0;
+ }
+
+ printk(KERN_WARNING "aac_read: aac_fib_send failed with status: %d.\n", status);
+ /*
+ * For some reason, the Fib didn't queue, return QUEUE_FULL
+ */
+ scsicmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8 | SAM_STAT_TASK_SET_FULL;
+ scsicmd->scsi_done(scsicmd);
+ aac_fib_complete(cmd_fibcontext);
+ aac_fib_free(cmd_fibcontext);
+ return 0;
+}
+
+static int aac_write(struct scsi_cmnd * scsicmd)
+{
+ u64 lba;
+ u32 count;
+ int fua;
+ int status;
+ struct aac_dev *dev;
+ struct fib * cmd_fibcontext;
+ int cid;
+
+ dev = (struct aac_dev *)scsicmd->device->host->hostdata;
+ /*
+ * Get block address and transfer length
+ */
+ if (scsicmd->cmnd[0] == WRITE_6) /* 6 byte command */
+ {
+ lba = ((scsicmd->cmnd[1] & 0x1F) << 16) | (scsicmd->cmnd[2] << 8) | scsicmd->cmnd[3];
+ count = scsicmd->cmnd[4];
+ if (count == 0)
+ count = 256;
+ fua = 0;
+ } else if (scsicmd->cmnd[0] == WRITE_16) { /* 16 byte command */
+ dprintk((KERN_DEBUG "aachba: received a write(16) command on id %d.\n", scmd_id(scsicmd)));
+
+ lba = ((u64)scsicmd->cmnd[2] << 56) |
+ ((u64)scsicmd->cmnd[3] << 48) |
+ ((u64)scsicmd->cmnd[4] << 40) |
+ ((u64)scsicmd->cmnd[5] << 32) |
+ ((u64)scsicmd->cmnd[6] << 24) |
+ (scsicmd->cmnd[7] << 16) |
+ (scsicmd->cmnd[8] << 8) | scsicmd->cmnd[9];
+ count = (scsicmd->cmnd[10] << 24) | (scsicmd->cmnd[11] << 16) |
+ (scsicmd->cmnd[12] << 8) | scsicmd->cmnd[13];
+ fua = scsicmd->cmnd[1] & 0x8;
+ } else if (scsicmd->cmnd[0] == WRITE_12) { /* 12 byte command */
+ dprintk((KERN_DEBUG "aachba: received a write(12) command on id %d.\n", scmd_id(scsicmd)));
+
+ lba = ((u64)scsicmd->cmnd[2] << 24) | (scsicmd->cmnd[3] << 16)
+ | (scsicmd->cmnd[4] << 8) | scsicmd->cmnd[5];
+ count = (scsicmd->cmnd[6] << 24) | (scsicmd->cmnd[7] << 16)
+ | (scsicmd->cmnd[8] << 8) | scsicmd->cmnd[9];
+ fua = scsicmd->cmnd[1] & 0x8;
+ } else {
+ dprintk((KERN_DEBUG "aachba: received a write(10) command on id %d.\n", scmd_id(scsicmd)));
+ lba = ((u64)scsicmd->cmnd[2] << 24) | (scsicmd->cmnd[3] << 16) | (scsicmd->cmnd[4] << 8) | scsicmd->cmnd[5];
+ count = (scsicmd->cmnd[7] << 8) | scsicmd->cmnd[8];
+ fua = scsicmd->cmnd[1] & 0x8;
+ }
+
+ if ((lba + count) > (dev->fsa_dev[scmd_id(scsicmd)].size)) {
+ cid = scmd_id(scsicmd);
+ dprintk((KERN_DEBUG "aacraid: Illegal lba\n"));
+ scsicmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8 |
+ SAM_STAT_CHECK_CONDITION;
+ set_sense(&dev->fsa_dev[cid].sense_data,
+ HARDWARE_ERROR, SENCODE_INTERNAL_TARGET_FAILURE,
+ ASENCODE_INTERNAL_TARGET_FAILURE, 0, 0);
+ memcpy(scsicmd->sense_buffer, &dev->fsa_dev[cid].sense_data,
+ min_t(size_t, sizeof(dev->fsa_dev[cid].sense_data),
+ SCSI_SENSE_BUFFERSIZE));
+ scsicmd->scsi_done(scsicmd);
+ return 1;
+ }
+
+ dprintk((KERN_DEBUG "aac_write[cpu %d]: lba = %llu, t = %ld.\n",
+ smp_processor_id(), (unsigned long long)lba, jiffies));
+ if (aac_adapter_bounds(dev,scsicmd,lba))
+ return 0;
+ /*
+ * Allocate and initialize a Fib then setup a BlockWrite command
+ */
+ if (!(cmd_fibcontext = aac_fib_alloc(dev))) {
+ /* FIB temporarily unavailable,not catastrophic failure */
+
+ /* scsicmd->result = DID_ERROR << 16;
+ * scsicmd->scsi_done(scsicmd);
+ * return 0;
+ */
+ printk(KERN_WARNING "aac_write: fib allocation failed\n");
+ return -1;
+ }
+
+ status = aac_adapter_write(cmd_fibcontext, scsicmd, lba, count, fua);
+
+ /*
+ * Check that the command queued to the controller
+ */
+ if (status == -EINPROGRESS) {
+ scsicmd->SCp.phase = AAC_OWNER_FIRMWARE;
+ return 0;
+ }
+
+ printk(KERN_WARNING "aac_write: aac_fib_send failed with status: %d\n", status);
+ /*
+ * For some reason, the Fib didn't queue, return QUEUE_FULL
+ */
+ scsicmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8 | SAM_STAT_TASK_SET_FULL;
+ scsicmd->scsi_done(scsicmd);
+
+ aac_fib_complete(cmd_fibcontext);
+ aac_fib_free(cmd_fibcontext);
+ return 0;
+}
+
+static void synchronize_callback(void *context, struct fib *fibptr)
+{
+ struct aac_synchronize_reply *synchronizereply;
+ struct scsi_cmnd *cmd;
+
+ cmd = context;
+
+ if (!aac_valid_context(cmd, fibptr))
+ return;
+
+ dprintk((KERN_DEBUG "synchronize_callback[cpu %d]: t = %ld.\n",
+ smp_processor_id(), jiffies));
+ BUG_ON(fibptr == NULL);
+
+
+ synchronizereply = fib_data(fibptr);
+ if (le32_to_cpu(synchronizereply->status) == CT_OK)
+ cmd->result = DID_OK << 16 |
+ COMMAND_COMPLETE << 8 | SAM_STAT_GOOD;
+ else {
+ struct scsi_device *sdev = cmd->device;
+ struct aac_dev *dev = fibptr->dev;
+ u32 cid = sdev_id(sdev);
+ printk(KERN_WARNING
+ "synchronize_callback: synchronize failed, status = %d\n",
+ le32_to_cpu(synchronizereply->status));
+ cmd->result = DID_OK << 16 |
+ COMMAND_COMPLETE << 8 | SAM_STAT_CHECK_CONDITION;
+ set_sense(&dev->fsa_dev[cid].sense_data,
+ HARDWARE_ERROR, SENCODE_INTERNAL_TARGET_FAILURE,
+ ASENCODE_INTERNAL_TARGET_FAILURE, 0, 0);
+ memcpy(cmd->sense_buffer, &dev->fsa_dev[cid].sense_data,
+ min_t(size_t, sizeof(dev->fsa_dev[cid].sense_data),
+ SCSI_SENSE_BUFFERSIZE));
+ }
+
+ aac_fib_complete(fibptr);
+ aac_fib_free(fibptr);
+ cmd->scsi_done(cmd);
+}
+
+static int aac_synchronize(struct scsi_cmnd *scsicmd)
+{
+ int status;
+ struct fib *cmd_fibcontext;
+ struct aac_synchronize *synchronizecmd;
+ struct scsi_cmnd *cmd;
+ struct scsi_device *sdev = scsicmd->device;
+ int active = 0;
+ struct aac_dev *aac;
+ u64 lba = ((u64)scsicmd->cmnd[2] << 24) | (scsicmd->cmnd[3] << 16) |
+ (scsicmd->cmnd[4] << 8) | scsicmd->cmnd[5];
+ u32 count = (scsicmd->cmnd[7] << 8) | scsicmd->cmnd[8];
+ unsigned long flags;
+
+ /*
+ * Wait for all outstanding queued commands to complete to this
+ * specific target (block).
+ */
+ spin_lock_irqsave(&sdev->list_lock, flags);
+ list_for_each_entry(cmd, &sdev->cmd_list, list)
+ if (cmd->SCp.phase == AAC_OWNER_FIRMWARE) {
+ u64 cmnd_lba;
+ u32 cmnd_count;
+
+ if (cmd->cmnd[0] == WRITE_6) {
+ cmnd_lba = ((cmd->cmnd[1] & 0x1F) << 16) |
+ (cmd->cmnd[2] << 8) |
+ cmd->cmnd[3];
+ cmnd_count = cmd->cmnd[4];
+ if (cmnd_count == 0)
+ cmnd_count = 256;
+ } else if (cmd->cmnd[0] == WRITE_16) {
+ cmnd_lba = ((u64)cmd->cmnd[2] << 56) |
+ ((u64)cmd->cmnd[3] << 48) |
+ ((u64)cmd->cmnd[4] << 40) |
+ ((u64)cmd->cmnd[5] << 32) |
+ ((u64)cmd->cmnd[6] << 24) |
+ (cmd->cmnd[7] << 16) |
+ (cmd->cmnd[8] << 8) |
+ cmd->cmnd[9];
+ cmnd_count = (cmd->cmnd[10] << 24) |
+ (cmd->cmnd[11] << 16) |
+ (cmd->cmnd[12] << 8) |
+ cmd->cmnd[13];
+ } else if (cmd->cmnd[0] == WRITE_12) {
+ cmnd_lba = ((u64)cmd->cmnd[2] << 24) |
+ (cmd->cmnd[3] << 16) |
+ (cmd->cmnd[4] << 8) |
+ cmd->cmnd[5];
+ cmnd_count = (cmd->cmnd[6] << 24) |
+ (cmd->cmnd[7] << 16) |
+ (cmd->cmnd[8] << 8) |
+ cmd->cmnd[9];
+ } else if (cmd->cmnd[0] == WRITE_10) {
+ cmnd_lba = ((u64)cmd->cmnd[2] << 24) |
+ (cmd->cmnd[3] << 16) |
+ (cmd->cmnd[4] << 8) |
+ cmd->cmnd[5];
+ cmnd_count = (cmd->cmnd[7] << 8) |
+ cmd->cmnd[8];
+ } else
+ continue;
+ if (((cmnd_lba + cmnd_count) < lba) ||
+ (count && ((lba + count) < cmnd_lba)))
+ continue;
+ ++active;
+ break;
+ }
+
+ spin_unlock_irqrestore(&sdev->list_lock, flags);
+
+ /*
+ * Yield the processor (requeue for later)
+ */
+ if (active)
+ return SCSI_MLQUEUE_DEVICE_BUSY;
+
+ aac = (struct aac_dev *)sdev->host->hostdata;
+ if (aac->in_reset)
+ return SCSI_MLQUEUE_HOST_BUSY;
+
+ /*
+ * Allocate and initialize a Fib
+ */
+ if (!(cmd_fibcontext = aac_fib_alloc(aac)))
+ return SCSI_MLQUEUE_HOST_BUSY;
+
+ aac_fib_init(cmd_fibcontext);
+
+ synchronizecmd = fib_data(cmd_fibcontext);
+ synchronizecmd->command = cpu_to_le32(VM_ContainerConfig);
+ synchronizecmd->type = cpu_to_le32(CT_FLUSH_CACHE);
+ synchronizecmd->cid = cpu_to_le32(scmd_id(scsicmd));
+ synchronizecmd->count =
+ cpu_to_le32(sizeof(((struct aac_synchronize_reply *)NULL)->data));
+
+ /*
+ * Now send the Fib to the adapter
+ */
+ status = aac_fib_send(ContainerCommand,
+ cmd_fibcontext,
+ sizeof(struct aac_synchronize),
+ FsaNormal,
+ 0, 1,
+ (fib_callback)synchronize_callback,
+ (void *)scsicmd);
+
+ /*
+ * Check that the command queued to the controller
+ */
+ if (status == -EINPROGRESS) {
+ scsicmd->SCp.phase = AAC_OWNER_FIRMWARE;
+ return 0;
+ }
+
+ printk(KERN_WARNING
+ "aac_synchronize: aac_fib_send failed with status: %d.\n", status);
+ aac_fib_complete(cmd_fibcontext);
+ aac_fib_free(cmd_fibcontext);
+ return SCSI_MLQUEUE_HOST_BUSY;
+}
+
+static void aac_start_stop_callback(void *context, struct fib *fibptr)
+{
+ struct scsi_cmnd *scsicmd = context;
+
+ if (!aac_valid_context(scsicmd, fibptr))
+ return;
+
+ BUG_ON(fibptr == NULL);
+
+ scsicmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8 | SAM_STAT_GOOD;
+
+ aac_fib_complete(fibptr);
+ aac_fib_free(fibptr);
+ scsicmd->scsi_done(scsicmd);
+}
+
+static int aac_start_stop(struct scsi_cmnd *scsicmd)
+{
+ int status;
+ struct fib *cmd_fibcontext;
+ struct aac_power_management *pmcmd;
+ struct scsi_device *sdev = scsicmd->device;
+ struct aac_dev *aac = (struct aac_dev *)sdev->host->hostdata;
+
+ if (!(aac->supplement_adapter_info.SupportedOptions2 &
+ AAC_OPTION_POWER_MANAGEMENT)) {
+ scsicmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8 |
+ SAM_STAT_GOOD;
+ scsicmd->scsi_done(scsicmd);
+ return 0;
+ }
+
+ if (aac->in_reset)
+ return SCSI_MLQUEUE_HOST_BUSY;
+
+ /*
+ * Allocate and initialize a Fib
+ */
+ cmd_fibcontext = aac_fib_alloc(aac);
+ if (!cmd_fibcontext)
+ return SCSI_MLQUEUE_HOST_BUSY;
+
+ aac_fib_init(cmd_fibcontext);
+
+ pmcmd = fib_data(cmd_fibcontext);
+ pmcmd->command = cpu_to_le32(VM_ContainerConfig);
+ pmcmd->type = cpu_to_le32(CT_POWER_MANAGEMENT);
+ /* Eject bit ignored, not relevant */
+ pmcmd->sub = (scsicmd->cmnd[4] & 1) ?
+ cpu_to_le32(CT_PM_START_UNIT) : cpu_to_le32(CT_PM_STOP_UNIT);
+ pmcmd->cid = cpu_to_le32(sdev_id(sdev));
+ pmcmd->parm = (scsicmd->cmnd[1] & 1) ?
+ cpu_to_le32(CT_PM_UNIT_IMMEDIATE) : 0;
+
+ /*
+ * Now send the Fib to the adapter
+ */
+ status = aac_fib_send(ContainerCommand,
+ cmd_fibcontext,
+ sizeof(struct aac_power_management),
+ FsaNormal,
+ 0, 1,
+ (fib_callback)aac_start_stop_callback,
+ (void *)scsicmd);
+
+ /*
+ * Check that the command queued to the controller
+ */
+ if (status == -EINPROGRESS) {
+ scsicmd->SCp.phase = AAC_OWNER_FIRMWARE;
+ return 0;
+ }
+
+ aac_fib_complete(cmd_fibcontext);
+ aac_fib_free(cmd_fibcontext);
+ return SCSI_MLQUEUE_HOST_BUSY;
+}
+
+/**
+ * aac_scsi_cmd() - Process SCSI command
+ * @scsicmd: SCSI command block
+ *
+ * Emulate a SCSI command and queue the required request for the
+ * aacraid firmware.
+ */
+
+int aac_scsi_cmd(struct scsi_cmnd * scsicmd)
+{
+ u32 cid;
+ struct Scsi_Host *host = scsicmd->device->host;
+ struct aac_dev *dev = (struct aac_dev *)host->hostdata;
+ struct fsa_dev_info *fsa_dev_ptr = dev->fsa_dev;
+
+ if (fsa_dev_ptr == NULL)
+ return -1;
+ /*
+ * If the bus, id or lun is out of range, return fail
+ * Test does not apply to ID 16, the pseudo id for the controller
+ * itself.
+ */
+ cid = scmd_id(scsicmd);
+ if (cid != host->this_id) {
+ if (scmd_channel(scsicmd) == CONTAINER_CHANNEL) {
+ if((cid >= dev->maximum_num_containers) ||
+ (scsicmd->device->lun != 0)) {
+ scsicmd->result = DID_NO_CONNECT << 16;
+ scsicmd->scsi_done(scsicmd);
+ return 0;
+ }
+
+ /*
+ * If the target container doesn't exist, it may have
+ * been newly created
+ */
+ if (((fsa_dev_ptr[cid].valid & 1) == 0) ||
+ (fsa_dev_ptr[cid].sense_data.sense_key ==
+ NOT_READY)) {
+ switch (scsicmd->cmnd[0]) {
+ case SERVICE_ACTION_IN_16:
+ if (!(dev->raw_io_interface) ||
+ !(dev->raw_io_64) ||
+ ((scsicmd->cmnd[1] & 0x1f) != SAI_READ_CAPACITY_16))
+ break;
+ case INQUIRY:
+ case READ_CAPACITY:
+ case TEST_UNIT_READY:
+ if (dev->in_reset)
+ return -1;
+ return _aac_probe_container(scsicmd,
+ aac_probe_container_callback2);
+ default:
+ break;
+ }
+ }
+ } else { /* check for physical non-dasd devices */
+ if (dev->nondasd_support || expose_physicals ||
+ dev->jbod) {
+ if (dev->in_reset)
+ return -1;
+ return aac_send_srb_fib(scsicmd);
+ } else {
+ scsicmd->result = DID_NO_CONNECT << 16;
+ scsicmd->scsi_done(scsicmd);
+ return 0;
+ }
+ }
+ }
+ /*
+ * else Command for the controller itself
+ */
+ else if ((scsicmd->cmnd[0] != INQUIRY) && /* only INQUIRY & TUR cmnd supported for controller */
+ (scsicmd->cmnd[0] != TEST_UNIT_READY))
+ {
+ dprintk((KERN_WARNING "Only INQUIRY & TUR command supported for controller, rcvd = 0x%x.\n", scsicmd->cmnd[0]));
+ scsicmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8 | SAM_STAT_CHECK_CONDITION;
+ set_sense(&dev->fsa_dev[cid].sense_data,
+ ILLEGAL_REQUEST, SENCODE_INVALID_COMMAND,
+ ASENCODE_INVALID_COMMAND, 0, 0);
+ memcpy(scsicmd->sense_buffer, &dev->fsa_dev[cid].sense_data,
+ min_t(size_t, sizeof(dev->fsa_dev[cid].sense_data),
+ SCSI_SENSE_BUFFERSIZE));
+ scsicmd->scsi_done(scsicmd);
+ return 0;
+ }
+
+
+ /* Handle commands here that don't really require going out to the adapter */
+ switch (scsicmd->cmnd[0]) {
+ case INQUIRY:
+ {
+ struct inquiry_data inq_data;
+
+ dprintk((KERN_DEBUG "INQUIRY command, ID: %d.\n", cid));
+ memset(&inq_data, 0, sizeof (struct inquiry_data));
+
+ if ((scsicmd->cmnd[1] & 0x1) && aac_wwn) {
+ char *arr = (char *)&inq_data;
+
+ /* EVPD bit set */
+ arr[0] = (scmd_id(scsicmd) == host->this_id) ?
+ INQD_PDT_PROC : INQD_PDT_DA;
+ if (scsicmd->cmnd[2] == 0) {
+ /* supported vital product data pages */
+ arr[3] = 3;
+ arr[4] = 0x0;
+ arr[5] = 0x80;
+ arr[6] = 0x83;
+ arr[1] = scsicmd->cmnd[2];
+ scsi_sg_copy_from_buffer(scsicmd, &inq_data,
+ sizeof(inq_data));
+ scsicmd->result = DID_OK << 16 |
+ COMMAND_COMPLETE << 8 | SAM_STAT_GOOD;
+ } else if (scsicmd->cmnd[2] == 0x80) {
+ /* unit serial number page */
+ arr[3] = setinqserial(dev, &arr[4],
+ scmd_id(scsicmd));
+ arr[1] = scsicmd->cmnd[2];
+ scsi_sg_copy_from_buffer(scsicmd, &inq_data,
+ sizeof(inq_data));
+ if (aac_wwn != 2)
+ return aac_get_container_serial(
+ scsicmd);
+ scsicmd->result = DID_OK << 16 |
+ COMMAND_COMPLETE << 8 | SAM_STAT_GOOD;
+ } else if (scsicmd->cmnd[2] == 0x83) {
+ /* vpd page 0x83 - Device Identification Page */
+ char *sno = (char *)&inq_data;
+ sno[3] = setinqserial(dev, &sno[4],
+ scmd_id(scsicmd));
+ if (aac_wwn != 2)
+ return aac_get_container_serial(
+ scsicmd);
+ scsicmd->result = DID_OK << 16 |
+ COMMAND_COMPLETE << 8 | SAM_STAT_GOOD;
+ } else {
+ /* vpd page not implemented */
+ scsicmd->result = DID_OK << 16 |
+ COMMAND_COMPLETE << 8 |
+ SAM_STAT_CHECK_CONDITION;
+ set_sense(&dev->fsa_dev[cid].sense_data,
+ ILLEGAL_REQUEST, SENCODE_INVALID_CDB_FIELD,
+ ASENCODE_NO_SENSE, 7, 2);
+ memcpy(scsicmd->sense_buffer,
+ &dev->fsa_dev[cid].sense_data,
+ min_t(size_t,
+ sizeof(dev->fsa_dev[cid].sense_data),
+ SCSI_SENSE_BUFFERSIZE));
+ }
+ scsicmd->scsi_done(scsicmd);
+ return 0;
+ }
+ inq_data.inqd_ver = 2; /* claim compliance to SCSI-2 */
+ inq_data.inqd_rdf = 2; /* A response data format value of two indicates that the data shall be in the format specified in SCSI-2 */
+ inq_data.inqd_len = 31;
+ /*Format for "pad2" is RelAdr | WBus32 | WBus16 | Sync | Linked |Reserved| CmdQue | SftRe */
+ inq_data.inqd_pad2= 0x32 ; /*WBus16|Sync|CmdQue */
+ /*
+ * Set the Vendor, Product, and Revision Level
+ * see: <vendor>.c i.e. aac.c
+ */
+ if (cid == host->this_id) {
+ setinqstr(dev, (void *) (inq_data.inqd_vid), ARRAY_SIZE(container_types));
+ inq_data.inqd_pdt = INQD_PDT_PROC; /* Processor device */
+ scsi_sg_copy_from_buffer(scsicmd, &inq_data,
+ sizeof(inq_data));
+ scsicmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8 | SAM_STAT_GOOD;
+ scsicmd->scsi_done(scsicmd);
+ return 0;
+ }
+ if (dev->in_reset)
+ return -1;
+ setinqstr(dev, (void *) (inq_data.inqd_vid), fsa_dev_ptr[cid].type);
+ inq_data.inqd_pdt = INQD_PDT_DA; /* Direct/random access device */
+ scsi_sg_copy_from_buffer(scsicmd, &inq_data, sizeof(inq_data));
+ return aac_get_container_name(scsicmd);
+ }
+ case SERVICE_ACTION_IN_16:
+ if (!(dev->raw_io_interface) ||
+ !(dev->raw_io_64) ||
+ ((scsicmd->cmnd[1] & 0x1f) != SAI_READ_CAPACITY_16))
+ break;
+ {
+ u64 capacity;
+ char cp[13];
+ unsigned int alloc_len;
+
+ dprintk((KERN_DEBUG "READ CAPACITY_16 command.\n"));
+ capacity = fsa_dev_ptr[cid].size - 1;
+ cp[0] = (capacity >> 56) & 0xff;
+ cp[1] = (capacity >> 48) & 0xff;
+ cp[2] = (capacity >> 40) & 0xff;
+ cp[3] = (capacity >> 32) & 0xff;
+ cp[4] = (capacity >> 24) & 0xff;
+ cp[5] = (capacity >> 16) & 0xff;
+ cp[6] = (capacity >> 8) & 0xff;
+ cp[7] = (capacity >> 0) & 0xff;
+ cp[8] = (fsa_dev_ptr[cid].block_size >> 24) & 0xff;
+ cp[9] = (fsa_dev_ptr[cid].block_size >> 16) & 0xff;
+ cp[10] = (fsa_dev_ptr[cid].block_size >> 8) & 0xff;
+ cp[11] = (fsa_dev_ptr[cid].block_size) & 0xff;
+ cp[12] = 0;
+
+ alloc_len = ((scsicmd->cmnd[10] << 24)
+ + (scsicmd->cmnd[11] << 16)
+ + (scsicmd->cmnd[12] << 8) + scsicmd->cmnd[13]);
+
+ alloc_len = min_t(size_t, alloc_len, sizeof(cp));
+ scsi_sg_copy_from_buffer(scsicmd, cp, alloc_len);
+ if (alloc_len < scsi_bufflen(scsicmd))
+ scsi_set_resid(scsicmd,
+ scsi_bufflen(scsicmd) - alloc_len);
+
+ /* Do not cache partition table for arrays */
+ scsicmd->device->removable = 1;
+
+ scsicmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8 | SAM_STAT_GOOD;
+ scsicmd->scsi_done(scsicmd);
+
+ return 0;
+ }
+
+ case READ_CAPACITY:
+ {
+ u32 capacity;
+ char cp[8];
+
+ dprintk((KERN_DEBUG "READ CAPACITY command.\n"));
+ if (fsa_dev_ptr[cid].size <= 0x100000000ULL)
+ capacity = fsa_dev_ptr[cid].size - 1;
+ else
+ capacity = (u32)-1;
+
+ cp[0] = (capacity >> 24) & 0xff;
+ cp[1] = (capacity >> 16) & 0xff;
+ cp[2] = (capacity >> 8) & 0xff;
+ cp[3] = (capacity >> 0) & 0xff;
+ cp[4] = (fsa_dev_ptr[cid].block_size >> 24) & 0xff;
+ cp[5] = (fsa_dev_ptr[cid].block_size >> 16) & 0xff;
+ cp[6] = (fsa_dev_ptr[cid].block_size >> 8) & 0xff;
+ cp[7] = (fsa_dev_ptr[cid].block_size) & 0xff;
+ scsi_sg_copy_from_buffer(scsicmd, cp, sizeof(cp));
+ /* Do not cache partition table for arrays */
+ scsicmd->device->removable = 1;
+ scsicmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8 |
+ SAM_STAT_GOOD;
+ scsicmd->scsi_done(scsicmd);
+
+ return 0;
+ }
+
+ case MODE_SENSE:
+ {
+ int mode_buf_length = 4;
+ u32 capacity;
+ aac_modep_data mpd;
+
+ if (fsa_dev_ptr[cid].size <= 0x100000000ULL)
+ capacity = fsa_dev_ptr[cid].size - 1;
+ else
+ capacity = (u32)-1;
+
+ dprintk((KERN_DEBUG "MODE SENSE command.\n"));
+ memset((char *)&mpd, 0, sizeof(aac_modep_data));
+
+ /* Mode data length */
+ mpd.hd.data_length = sizeof(mpd.hd) - 1;
+ /* Medium type - default */
+ mpd.hd.med_type = 0;
+ /* Device-specific param,
+ bit 8: 0/1 = write enabled/protected
+ bit 4: 0/1 = FUA enabled */
+ mpd.hd.dev_par = 0;
+
+ if (dev->raw_io_interface && ((aac_cache & 5) != 1))
+ mpd.hd.dev_par = 0x10;
+ if (scsicmd->cmnd[1] & 0x8)
+ mpd.hd.bd_length = 0; /* Block descriptor length */
+ else {
+ mpd.hd.bd_length = sizeof(mpd.bd);
+ mpd.hd.data_length += mpd.hd.bd_length;
+ mpd.bd.block_length[0] =
+ (fsa_dev_ptr[cid].block_size >> 16) & 0xff;
+ mpd.bd.block_length[1] =
+ (fsa_dev_ptr[cid].block_size >> 8) & 0xff;
+ mpd.bd.block_length[2] =
+ fsa_dev_ptr[cid].block_size & 0xff;
+
+ mpd.mpc_buf[0] = scsicmd->cmnd[2];
+ if (scsicmd->cmnd[2] == 0x1C) {
+ /* page length */
+ mpd.mpc_buf[1] = 0xa;
+ /* Mode data length */
+ mpd.hd.data_length = 23;
+ } else {
+ /* Mode data length */
+ mpd.hd.data_length = 15;
+ }
+
+ if (capacity > 0xffffff) {
+ mpd.bd.block_count[0] = 0xff;
+ mpd.bd.block_count[1] = 0xff;
+ mpd.bd.block_count[2] = 0xff;
+ } else {
+ mpd.bd.block_count[0] = (capacity >> 16) & 0xff;
+ mpd.bd.block_count[1] = (capacity >> 8) & 0xff;
+ mpd.bd.block_count[2] = capacity & 0xff;
+ }
+ }
+ if (((scsicmd->cmnd[2] & 0x3f) == 8) ||
+ ((scsicmd->cmnd[2] & 0x3f) == 0x3f)) {
+ mpd.hd.data_length += 3;
+ mpd.mpc_buf[0] = 8;
+ mpd.mpc_buf[1] = 1;
+ mpd.mpc_buf[2] = ((aac_cache & 6) == 2)
+ ? 0 : 0x04; /* WCE */
+ mode_buf_length = sizeof(mpd);
+ }
+
+ if (mode_buf_length > scsicmd->cmnd[4])
+ mode_buf_length = scsicmd->cmnd[4];
+ else
+ mode_buf_length = sizeof(mpd);
+ scsi_sg_copy_from_buffer(scsicmd,
+ (char *)&mpd,
+ mode_buf_length);
+ scsicmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8 | SAM_STAT_GOOD;
+ scsicmd->scsi_done(scsicmd);
+
+ return 0;
+ }
+ case MODE_SENSE_10:
+ {
+ u32 capacity;
+ int mode_buf_length = 8;
+ aac_modep10_data mpd10;
+
+ if (fsa_dev_ptr[cid].size <= 0x100000000ULL)
+ capacity = fsa_dev_ptr[cid].size - 1;
+ else
+ capacity = (u32)-1;
+
+ dprintk((KERN_DEBUG "MODE SENSE 10 byte command.\n"));
+ memset((char *)&mpd10, 0, sizeof(aac_modep10_data));
+ /* Mode data length (MSB) */
+ mpd10.hd.data_length[0] = 0;
+ /* Mode data length (LSB) */
+ mpd10.hd.data_length[1] = sizeof(mpd10.hd) - 1;
+ /* Medium type - default */
+ mpd10.hd.med_type = 0;
+ /* Device-specific param,
+ bit 8: 0/1 = write enabled/protected
+ bit 4: 0/1 = FUA enabled */
+ mpd10.hd.dev_par = 0;
+
+ if (dev->raw_io_interface && ((aac_cache & 5) != 1))
+ mpd10.hd.dev_par = 0x10;
+ mpd10.hd.rsrvd[0] = 0; /* reserved */
+ mpd10.hd.rsrvd[1] = 0; /* reserved */
+ if (scsicmd->cmnd[1] & 0x8) {
+ /* Block descriptor length (MSB) */
+ mpd10.hd.bd_length[0] = 0;
+ /* Block descriptor length (LSB) */
+ mpd10.hd.bd_length[1] = 0;
+ } else {
+ mpd10.hd.bd_length[0] = 0;
+ mpd10.hd.bd_length[1] = sizeof(mpd10.bd);
+
+ mpd10.hd.data_length[1] += mpd10.hd.bd_length[1];
+
+ mpd10.bd.block_length[0] =
+ (fsa_dev_ptr[cid].block_size >> 16) & 0xff;
+ mpd10.bd.block_length[1] =
+ (fsa_dev_ptr[cid].block_size >> 8) & 0xff;
+ mpd10.bd.block_length[2] =
+ fsa_dev_ptr[cid].block_size & 0xff;
+
+ if (capacity > 0xffffff) {
+ mpd10.bd.block_count[0] = 0xff;
+ mpd10.bd.block_count[1] = 0xff;
+ mpd10.bd.block_count[2] = 0xff;
+ } else {
+ mpd10.bd.block_count[0] =
+ (capacity >> 16) & 0xff;
+ mpd10.bd.block_count[1] =
+ (capacity >> 8) & 0xff;
+ mpd10.bd.block_count[2] =
+ capacity & 0xff;
+ }
+ }
+ if (((scsicmd->cmnd[2] & 0x3f) == 8) ||
+ ((scsicmd->cmnd[2] & 0x3f) == 0x3f)) {
+ mpd10.hd.data_length[1] += 3;
+ mpd10.mpc_buf[0] = 8;
+ mpd10.mpc_buf[1] = 1;
+ mpd10.mpc_buf[2] = ((aac_cache & 6) == 2)
+ ? 0 : 0x04; /* WCE */
+ mode_buf_length = sizeof(mpd10);
+ if (mode_buf_length > scsicmd->cmnd[8])
+ mode_buf_length = scsicmd->cmnd[8];
+ }
+ scsi_sg_copy_from_buffer(scsicmd,
+ (char *)&mpd10,
+ mode_buf_length);
+
+ scsicmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8 | SAM_STAT_GOOD;
+ scsicmd->scsi_done(scsicmd);
+
+ return 0;
+ }
+ case REQUEST_SENSE:
+ dprintk((KERN_DEBUG "REQUEST SENSE command.\n"));
+ memcpy(scsicmd->sense_buffer, &dev->fsa_dev[cid].sense_data, sizeof (struct sense_data));
+ memset(&dev->fsa_dev[cid].sense_data, 0, sizeof (struct sense_data));
+ scsicmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8 | SAM_STAT_GOOD;
+ scsicmd->scsi_done(scsicmd);
+ return 0;
+
+ case ALLOW_MEDIUM_REMOVAL:
+ dprintk((KERN_DEBUG "LOCK command.\n"));
+ if (scsicmd->cmnd[4])
+ fsa_dev_ptr[cid].locked = 1;
+ else
+ fsa_dev_ptr[cid].locked = 0;
+
+ scsicmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8 | SAM_STAT_GOOD;
+ scsicmd->scsi_done(scsicmd);
+ return 0;
+ /*
+ * These commands are all No-Ops
+ */
+ case TEST_UNIT_READY:
+ if (fsa_dev_ptr[cid].sense_data.sense_key == NOT_READY) {
+ scsicmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8 |
+ SAM_STAT_CHECK_CONDITION;
+ set_sense(&dev->fsa_dev[cid].sense_data,
+ NOT_READY, SENCODE_BECOMING_READY,
+ ASENCODE_BECOMING_READY, 0, 0);
+ memcpy(scsicmd->sense_buffer,
+ &dev->fsa_dev[cid].sense_data,
+ min_t(size_t,
+ sizeof(dev->fsa_dev[cid].sense_data),
+ SCSI_SENSE_BUFFERSIZE));
+ scsicmd->scsi_done(scsicmd);
+ return 0;
+ }
+ /* FALLTHRU */
+ case RESERVE:
+ case RELEASE:
+ case REZERO_UNIT:
+ case REASSIGN_BLOCKS:
+ case SEEK_10:
+ scsicmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8 | SAM_STAT_GOOD;
+ scsicmd->scsi_done(scsicmd);
+ return 0;
+
+ case START_STOP:
+ return aac_start_stop(scsicmd);
+ }
+
+ switch (scsicmd->cmnd[0])
+ {
+ case READ_6:
+ case READ_10:
+ case READ_12:
+ case READ_16:
+ if (dev->in_reset)
+ return -1;
+ /*
+ * Hack to keep track of ordinal number of the device that
+ * corresponds to a container. Needed to convert
+ * containers to /dev/sd device names
+ */
+
+ if (scsicmd->request->rq_disk)
+ strlcpy(fsa_dev_ptr[cid].devname,
+ scsicmd->request->rq_disk->disk_name,
+ min(sizeof(fsa_dev_ptr[cid].devname),
+ sizeof(scsicmd->request->rq_disk->disk_name) + 1));
+
+ return aac_read(scsicmd);
+
+ case WRITE_6:
+ case WRITE_10:
+ case WRITE_12:
+ case WRITE_16:
+ if (dev->in_reset)
+ return -1;
+ return aac_write(scsicmd);
+
+ case SYNCHRONIZE_CACHE:
+ if (((aac_cache & 6) == 6) && dev->cache_protected) {
+ scsicmd->result = DID_OK << 16 |
+ COMMAND_COMPLETE << 8 | SAM_STAT_GOOD;
+ scsicmd->scsi_done(scsicmd);
+ return 0;
+ }
+ /* Issue FIB to tell Firmware to flush it's cache */
+ if ((aac_cache & 6) != 2)
+ return aac_synchronize(scsicmd);
+ /* FALLTHRU */
+ default:
+ /*
+ * Unhandled commands
+ */
+ dprintk((KERN_WARNING "Unhandled SCSI Command: 0x%x.\n", scsicmd->cmnd[0]));
+ scsicmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8 | SAM_STAT_CHECK_CONDITION;
+ set_sense(&dev->fsa_dev[cid].sense_data,
+ ILLEGAL_REQUEST, SENCODE_INVALID_COMMAND,
+ ASENCODE_INVALID_COMMAND, 0, 0);
+ memcpy(scsicmd->sense_buffer, &dev->fsa_dev[cid].sense_data,
+ min_t(size_t,
+ sizeof(dev->fsa_dev[cid].sense_data),
+ SCSI_SENSE_BUFFERSIZE));
+ scsicmd->scsi_done(scsicmd);
+ return 0;
+ }
+}
+
+static int query_disk(struct aac_dev *dev, void __user *arg)
+{
+ struct aac_query_disk qd;
+ struct fsa_dev_info *fsa_dev_ptr;
+
+ fsa_dev_ptr = dev->fsa_dev;
+ if (!fsa_dev_ptr)
+ return -EBUSY;
+ if (copy_from_user(&qd, arg, sizeof (struct aac_query_disk)))
+ return -EFAULT;
+ if (qd.cnum == -1)
+ qd.cnum = qd.id;
+ else if ((qd.bus == -1) && (qd.id == -1) && (qd.lun == -1))
+ {
+ if (qd.cnum < 0 || qd.cnum >= dev->maximum_num_containers)
+ return -EINVAL;
+ qd.instance = dev->scsi_host_ptr->host_no;
+ qd.bus = 0;
+ qd.id = CONTAINER_TO_ID(qd.cnum);
+ qd.lun = CONTAINER_TO_LUN(qd.cnum);
+ }
+ else return -EINVAL;
+
+ qd.valid = fsa_dev_ptr[qd.cnum].valid != 0;
+ qd.locked = fsa_dev_ptr[qd.cnum].locked;
+ qd.deleted = fsa_dev_ptr[qd.cnum].deleted;
+
+ if (fsa_dev_ptr[qd.cnum].devname[0] == '\0')
+ qd.unmapped = 1;
+ else
+ qd.unmapped = 0;
+
+ strlcpy(qd.name, fsa_dev_ptr[qd.cnum].devname,
+ min(sizeof(qd.name), sizeof(fsa_dev_ptr[qd.cnum].devname) + 1));
+
+ if (copy_to_user(arg, &qd, sizeof (struct aac_query_disk)))
+ return -EFAULT;
+ return 0;
+}
+
+static int force_delete_disk(struct aac_dev *dev, void __user *arg)
+{
+ struct aac_delete_disk dd;
+ struct fsa_dev_info *fsa_dev_ptr;
+
+ fsa_dev_ptr = dev->fsa_dev;
+ if (!fsa_dev_ptr)
+ return -EBUSY;
+
+ if (copy_from_user(&dd, arg, sizeof (struct aac_delete_disk)))
+ return -EFAULT;
+
+ if (dd.cnum >= dev->maximum_num_containers)
+ return -EINVAL;
+ /*
+ * Mark this container as being deleted.
+ */
+ fsa_dev_ptr[dd.cnum].deleted = 1;
+ /*
+ * Mark the container as no longer valid
+ */
+ fsa_dev_ptr[dd.cnum].valid = 0;
+ return 0;
+}
+
+static int delete_disk(struct aac_dev *dev, void __user *arg)
+{
+ struct aac_delete_disk dd;
+ struct fsa_dev_info *fsa_dev_ptr;
+
+ fsa_dev_ptr = dev->fsa_dev;
+ if (!fsa_dev_ptr)
+ return -EBUSY;
+
+ if (copy_from_user(&dd, arg, sizeof (struct aac_delete_disk)))
+ return -EFAULT;
+
+ if (dd.cnum >= dev->maximum_num_containers)
+ return -EINVAL;
+ /*
+ * If the container is locked, it can not be deleted by the API.
+ */
+ if (fsa_dev_ptr[dd.cnum].locked)
+ return -EBUSY;
+ else {
+ /*
+ * Mark the container as no longer being valid.
+ */
+ fsa_dev_ptr[dd.cnum].valid = 0;
+ fsa_dev_ptr[dd.cnum].devname[0] = '\0';
+ return 0;
+ }
+}
+
+int aac_dev_ioctl(struct aac_dev *dev, int cmd, void __user *arg)
+{
+ switch (cmd) {
+ case FSACTL_QUERY_DISK:
+ return query_disk(dev, arg);
+ case FSACTL_DELETE_DISK:
+ return delete_disk(dev, arg);
+ case FSACTL_FORCE_DELETE_DISK:
+ return force_delete_disk(dev, arg);
+ case FSACTL_GET_CONTAINERS:
+ return aac_get_containers(dev);
+ default:
+ return -ENOTTY;
+ }
+}
+
+/**
+ *
+ * aac_srb_callback
+ * @context: the context set in the fib - here it is scsi cmd
+ * @fibptr: pointer to the fib
+ *
+ * Handles the completion of a scsi command to a non dasd device
+ *
+ */
+
+static void aac_srb_callback(void *context, struct fib * fibptr)
+{
+ struct aac_dev *dev;
+ struct aac_srb_reply *srbreply;
+ struct scsi_cmnd *scsicmd;
+
+ scsicmd = (struct scsi_cmnd *) context;
+
+ if (!aac_valid_context(scsicmd, fibptr))
+ return;
+
+ BUG_ON(fibptr == NULL);
+
+ dev = fibptr->dev;
+
+ srbreply = (struct aac_srb_reply *) fib_data(fibptr);
+
+ scsicmd->sense_buffer[0] = '\0'; /* Initialize sense valid flag to false */
+
+ if (fibptr->flags & FIB_CONTEXT_FLAG_FASTRESP) {
+ /* fast response */
+ srbreply->srb_status = cpu_to_le32(SRB_STATUS_SUCCESS);
+ srbreply->scsi_status = cpu_to_le32(SAM_STAT_GOOD);
+ } else {
+ /*
+ * Calculate resid for sg
+ */
+ scsi_set_resid(scsicmd, scsi_bufflen(scsicmd)
+ - le32_to_cpu(srbreply->data_xfer_length));
+ }
+
+ scsi_dma_unmap(scsicmd);
+
+ /* expose physical device if expose_physicald flag is on */
+ if (scsicmd->cmnd[0] == INQUIRY && !(scsicmd->cmnd[1] & 0x01)
+ && expose_physicals > 0)
+ aac_expose_phy_device(scsicmd);
+
+ /*
+ * First check the fib status
+ */
+
+ if (le32_to_cpu(srbreply->status) != ST_OK){
+ int len;
+ printk(KERN_WARNING "aac_srb_callback: srb failed, status = %d\n", le32_to_cpu(srbreply->status));
+ len = min_t(u32, le32_to_cpu(srbreply->sense_data_size),
+ SCSI_SENSE_BUFFERSIZE);
+ scsicmd->result = DID_ERROR << 16 | COMMAND_COMPLETE << 8 | SAM_STAT_CHECK_CONDITION;
+ memcpy(scsicmd->sense_buffer, srbreply->sense_data, len);
+ }
+
+ /*
+ * Next check the srb status
+ */
+ switch( (le32_to_cpu(srbreply->srb_status))&0x3f){
+ case SRB_STATUS_ERROR_RECOVERY:
+ case SRB_STATUS_PENDING:
+ case SRB_STATUS_SUCCESS:
+ scsicmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8;
+ break;
+ case SRB_STATUS_DATA_OVERRUN:
+ switch(scsicmd->cmnd[0]){
+ case READ_6:
+ case WRITE_6:
+ case READ_10:
+ case WRITE_10:
+ case READ_12:
+ case WRITE_12:
+ case READ_16:
+ case WRITE_16:
+ if (le32_to_cpu(srbreply->data_xfer_length) < scsicmd->underflow) {
+ printk(KERN_WARNING"aacraid: SCSI CMD underflow\n");
+ } else {
+ printk(KERN_WARNING"aacraid: SCSI CMD Data Overrun\n");
+ }
+ scsicmd->result = DID_ERROR << 16 | COMMAND_COMPLETE << 8;
+ break;
+ case INQUIRY: {
+ scsicmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8;
+ break;
+ }
+ default:
+ scsicmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8;
+ break;
+ }
+ break;
+ case SRB_STATUS_ABORTED:
+ scsicmd->result = DID_ABORT << 16 | ABORT << 8;
+ break;
+ case SRB_STATUS_ABORT_FAILED:
+ // Not sure about this one - but assuming the hba was trying to abort for some reason
+ scsicmd->result = DID_ERROR << 16 | ABORT << 8;
+ break;
+ case SRB_STATUS_PARITY_ERROR:
+ scsicmd->result = DID_PARITY << 16 | MSG_PARITY_ERROR << 8;
+ break;
+ case SRB_STATUS_NO_DEVICE:
+ case SRB_STATUS_INVALID_PATH_ID:
+ case SRB_STATUS_INVALID_TARGET_ID:
+ case SRB_STATUS_INVALID_LUN:
+ case SRB_STATUS_SELECTION_TIMEOUT:
+ scsicmd->result = DID_NO_CONNECT << 16 | COMMAND_COMPLETE << 8;
+ break;
+
+ case SRB_STATUS_COMMAND_TIMEOUT:
+ case SRB_STATUS_TIMEOUT:
+ scsicmd->result = DID_TIME_OUT << 16 | COMMAND_COMPLETE << 8;
+ break;
+
+ case SRB_STATUS_BUSY:
+ scsicmd->result = DID_BUS_BUSY << 16 | COMMAND_COMPLETE << 8;
+ break;
+
+ case SRB_STATUS_BUS_RESET:
+ scsicmd->result = DID_RESET << 16 | COMMAND_COMPLETE << 8;
+ break;
+
+ case SRB_STATUS_MESSAGE_REJECTED:
+ scsicmd->result = DID_ERROR << 16 | MESSAGE_REJECT << 8;
+ break;
+ case SRB_STATUS_REQUEST_FLUSHED:
+ case SRB_STATUS_ERROR:
+ case SRB_STATUS_INVALID_REQUEST:
+ case SRB_STATUS_REQUEST_SENSE_FAILED:
+ case SRB_STATUS_NO_HBA:
+ case SRB_STATUS_UNEXPECTED_BUS_FREE:
+ case SRB_STATUS_PHASE_SEQUENCE_FAILURE:
+ case SRB_STATUS_BAD_SRB_BLOCK_LENGTH:
+ case SRB_STATUS_DELAYED_RETRY:
+ case SRB_STATUS_BAD_FUNCTION:
+ case SRB_STATUS_NOT_STARTED:
+ case SRB_STATUS_NOT_IN_USE:
+ case SRB_STATUS_FORCE_ABORT:
+ case SRB_STATUS_DOMAIN_VALIDATION_FAIL:
+ default:
+#ifdef AAC_DETAILED_STATUS_INFO
+ printk("aacraid: SRB ERROR(%u) %s scsi cmd 0x%x - scsi status 0x%x\n",
+ le32_to_cpu(srbreply->srb_status) & 0x3F,
+ aac_get_status_string(
+ le32_to_cpu(srbreply->srb_status) & 0x3F),
+ scsicmd->cmnd[0],
+ le32_to_cpu(srbreply->scsi_status));
+#endif
+ if ((scsicmd->cmnd[0] == ATA_12)
+ || (scsicmd->cmnd[0] == ATA_16)) {
+ if (scsicmd->cmnd[2] & (0x01 << 5)) {
+ scsicmd->result = DID_OK << 16
+ | COMMAND_COMPLETE << 8;
+ break;
+ } else {
+ scsicmd->result = DID_ERROR << 16
+ | COMMAND_COMPLETE << 8;
+ break;
+ }
+ } else {
+ scsicmd->result = DID_ERROR << 16
+ | COMMAND_COMPLETE << 8;
+ break;
+ }
+ }
+ if (le32_to_cpu(srbreply->scsi_status) == SAM_STAT_CHECK_CONDITION) {
+ int len;
+ scsicmd->result |= SAM_STAT_CHECK_CONDITION;
+ len = min_t(u32, le32_to_cpu(srbreply->sense_data_size),
+ SCSI_SENSE_BUFFERSIZE);
+#ifdef AAC_DETAILED_STATUS_INFO
+ printk(KERN_WARNING "aac_srb_callback: check condition, status = %d len=%d\n",
+ le32_to_cpu(srbreply->status), len);
+#endif
+ memcpy(scsicmd->sense_buffer, srbreply->sense_data, len);
+ }
+ /*
+ * OR in the scsi status (already shifted up a bit)
+ */
+ scsicmd->result |= le32_to_cpu(srbreply->scsi_status);
+
+ aac_fib_complete(fibptr);
+ aac_fib_free(fibptr);
+ scsicmd->scsi_done(scsicmd);
+}
+
+/**
+ *
+ * aac_send_scb_fib
+ * @scsicmd: the scsi command block
+ *
+ * This routine will form a FIB and fill in the aac_srb from the
+ * scsicmd passed in.
+ */
+
+static int aac_send_srb_fib(struct scsi_cmnd* scsicmd)
+{
+ struct fib* cmd_fibcontext;
+ struct aac_dev* dev;
+ int status;
+
+ dev = (struct aac_dev *)scsicmd->device->host->hostdata;
+ if (scmd_id(scsicmd) >= dev->maximum_num_physicals ||
+ scsicmd->device->lun > 7) {
+ scsicmd->result = DID_NO_CONNECT << 16;
+ scsicmd->scsi_done(scsicmd);
+ return 0;
+ }
+
+ /*
+ * Allocate and initialize a Fib then setup a BlockWrite command
+ */
+ if (!(cmd_fibcontext = aac_fib_alloc(dev))) {
+ return -1;
+ }
+ status = aac_adapter_scsi(cmd_fibcontext, scsicmd);
+
+ /*
+ * Check that the command queued to the controller
+ */
+ if (status == -EINPROGRESS) {
+ scsicmd->SCp.phase = AAC_OWNER_FIRMWARE;
+ return 0;
+ }
+
+ printk(KERN_WARNING "aac_srb: aac_fib_send failed with status: %d\n", status);
+ aac_fib_complete(cmd_fibcontext);
+ aac_fib_free(cmd_fibcontext);
+
+ return -1;
+}
+
+static long aac_build_sg(struct scsi_cmnd *scsicmd, struct sgmap *psg)
+{
+ struct aac_dev *dev;
+ unsigned long byte_count = 0;
+ int nseg;
+
+ dev = (struct aac_dev *)scsicmd->device->host->hostdata;
+ // Get rid of old data
+ psg->count = 0;
+ psg->sg[0].addr = 0;
+ psg->sg[0].count = 0;
+
+ nseg = scsi_dma_map(scsicmd);
+ if (nseg < 0)
+ return nseg;
+ if (nseg) {
+ struct scatterlist *sg;
+ int i;
+
+ psg->count = cpu_to_le32(nseg);
+
+ scsi_for_each_sg(scsicmd, sg, nseg, i) {
+ psg->sg[i].addr = cpu_to_le32(sg_dma_address(sg));
+ psg->sg[i].count = cpu_to_le32(sg_dma_len(sg));
+ byte_count += sg_dma_len(sg);
+ }
+ /* hba wants the size to be exact */
+ if (byte_count > scsi_bufflen(scsicmd)) {
+ u32 temp = le32_to_cpu(psg->sg[i-1].count) -
+ (byte_count - scsi_bufflen(scsicmd));
+ psg->sg[i-1].count = cpu_to_le32(temp);
+ byte_count = scsi_bufflen(scsicmd);
+ }
+ /* Check for command underflow */
+ if(scsicmd->underflow && (byte_count < scsicmd->underflow)){
+ printk(KERN_WARNING"aacraid: cmd len %08lX cmd underflow %08X\n",
+ byte_count, scsicmd->underflow);
+ }
+ }
+ return byte_count;
+}
+
+
+static long aac_build_sg64(struct scsi_cmnd *scsicmd, struct sgmap64 *psg)
+{
+ struct aac_dev *dev;
+ unsigned long byte_count = 0;
+ u64 addr;
+ int nseg;
+
+ dev = (struct aac_dev *)scsicmd->device->host->hostdata;
+ // Get rid of old data
+ psg->count = 0;
+ psg->sg[0].addr[0] = 0;
+ psg->sg[0].addr[1] = 0;
+ psg->sg[0].count = 0;
+
+ nseg = scsi_dma_map(scsicmd);
+ if (nseg < 0)
+ return nseg;
+ if (nseg) {
+ struct scatterlist *sg;
+ int i;
+
+ scsi_for_each_sg(scsicmd, sg, nseg, i) {
+ int count = sg_dma_len(sg);
+ addr = sg_dma_address(sg);
+ psg->sg[i].addr[0] = cpu_to_le32(addr & 0xffffffff);
+ psg->sg[i].addr[1] = cpu_to_le32(addr>>32);
+ psg->sg[i].count = cpu_to_le32(count);
+ byte_count += count;
+ }
+ psg->count = cpu_to_le32(nseg);
+ /* hba wants the size to be exact */
+ if (byte_count > scsi_bufflen(scsicmd)) {
+ u32 temp = le32_to_cpu(psg->sg[i-1].count) -
+ (byte_count - scsi_bufflen(scsicmd));
+ psg->sg[i-1].count = cpu_to_le32(temp);
+ byte_count = scsi_bufflen(scsicmd);
+ }
+ /* Check for command underflow */
+ if(scsicmd->underflow && (byte_count < scsicmd->underflow)){
+ printk(KERN_WARNING"aacraid: cmd len %08lX cmd underflow %08X\n",
+ byte_count, scsicmd->underflow);
+ }
+ }
+ return byte_count;
+}
+
+static long aac_build_sgraw(struct scsi_cmnd *scsicmd, struct sgmapraw *psg)
+{
+ unsigned long byte_count = 0;
+ int nseg;
+
+ // Get rid of old data
+ psg->count = 0;
+ psg->sg[0].next = 0;
+ psg->sg[0].prev = 0;
+ psg->sg[0].addr[0] = 0;
+ psg->sg[0].addr[1] = 0;
+ psg->sg[0].count = 0;
+ psg->sg[0].flags = 0;
+
+ nseg = scsi_dma_map(scsicmd);
+ if (nseg < 0)
+ return nseg;
+ if (nseg) {
+ struct scatterlist *sg;
+ int i;
+
+ scsi_for_each_sg(scsicmd, sg, nseg, i) {
+ int count = sg_dma_len(sg);
+ u64 addr = sg_dma_address(sg);
+ psg->sg[i].next = 0;
+ psg->sg[i].prev = 0;
+ psg->sg[i].addr[1] = cpu_to_le32((u32)(addr>>32));
+ psg->sg[i].addr[0] = cpu_to_le32((u32)(addr & 0xffffffff));
+ psg->sg[i].count = cpu_to_le32(count);
+ psg->sg[i].flags = 0;
+ byte_count += count;
+ }
+ psg->count = cpu_to_le32(nseg);
+ /* hba wants the size to be exact */
+ if (byte_count > scsi_bufflen(scsicmd)) {
+ u32 temp = le32_to_cpu(psg->sg[i-1].count) -
+ (byte_count - scsi_bufflen(scsicmd));
+ psg->sg[i-1].count = cpu_to_le32(temp);
+ byte_count = scsi_bufflen(scsicmd);
+ }
+ /* Check for command underflow */
+ if(scsicmd->underflow && (byte_count < scsicmd->underflow)){
+ printk(KERN_WARNING"aacraid: cmd len %08lX cmd underflow %08X\n",
+ byte_count, scsicmd->underflow);
+ }
+ }
+ return byte_count;
+}
+
+static long aac_build_sgraw2(struct scsi_cmnd *scsicmd,
+ struct aac_raw_io2 *rio2, int sg_max)
+{
+ unsigned long byte_count = 0;
+ int nseg;
+
+ nseg = scsi_dma_map(scsicmd);
+ if (nseg < 0)
+ return nseg;
+ if (nseg) {
+ struct scatterlist *sg;
+ int i, conformable = 0;
+ u32 min_size = PAGE_SIZE, cur_size;
+
+ scsi_for_each_sg(scsicmd, sg, nseg, i) {
+ int count = sg_dma_len(sg);
+ u64 addr = sg_dma_address(sg);
+
+ BUG_ON(i >= sg_max);
+ rio2->sge[i].addrHigh = cpu_to_le32((u32)(addr>>32));
+ rio2->sge[i].addrLow = cpu_to_le32((u32)(addr & 0xffffffff));
+ cur_size = cpu_to_le32(count);
+ rio2->sge[i].length = cur_size;
+ rio2->sge[i].flags = 0;
+ if (i == 0) {
+ conformable = 1;
+ rio2->sgeFirstSize = cur_size;
+ } else if (i == 1) {
+ rio2->sgeNominalSize = cur_size;
+ min_size = cur_size;
+ } else if ((i+1) < nseg && cur_size != rio2->sgeNominalSize) {
+ conformable = 0;
+ if (cur_size < min_size)
+ min_size = cur_size;
+ }
+ byte_count += count;
+ }
+
+ /* hba wants the size to be exact */
+ if (byte_count > scsi_bufflen(scsicmd)) {
+ u32 temp = le32_to_cpu(rio2->sge[i-1].length) -
+ (byte_count - scsi_bufflen(scsicmd));
+ rio2->sge[i-1].length = cpu_to_le32(temp);
+ byte_count = scsi_bufflen(scsicmd);
+ }
+
+ rio2->sgeCnt = cpu_to_le32(nseg);
+ rio2->flags |= cpu_to_le16(RIO2_SG_FORMAT_IEEE1212);
+ /* not conformable: evaluate required sg elements */
+ if (!conformable) {
+ int j, nseg_new = nseg, err_found;
+ for (i = min_size / PAGE_SIZE; i >= 1; --i) {
+ err_found = 0;
+ nseg_new = 2;
+ for (j = 1; j < nseg - 1; ++j) {
+ if (rio2->sge[j].length % (i*PAGE_SIZE)) {
+ err_found = 1;
+ break;
+ }
+ nseg_new += (rio2->sge[j].length / (i*PAGE_SIZE));
+ }
+ if (!err_found)
+ break;
+ }
+ if (i > 0 && nseg_new <= sg_max)
+ aac_convert_sgraw2(rio2, i, nseg, nseg_new);
+ } else
+ rio2->flags |= cpu_to_le16(RIO2_SGL_CONFORMANT);
+
+ /* Check for command underflow */
+ if (scsicmd->underflow && (byte_count < scsicmd->underflow)) {
+ printk(KERN_WARNING"aacraid: cmd len %08lX cmd underflow %08X\n",
+ byte_count, scsicmd->underflow);
+ }
+ }
+
+ return byte_count;
+}
+
+static int aac_convert_sgraw2(struct aac_raw_io2 *rio2, int pages, int nseg, int nseg_new)
+{
+ struct sge_ieee1212 *sge;
+ int i, j, pos;
+ u32 addr_low;
+
+ if (aac_convert_sgl == 0)
+ return 0;
+
+ sge = kmalloc(nseg_new * sizeof(struct sge_ieee1212), GFP_ATOMIC);
+ if (sge == NULL)
+ return -1;
+
+ for (i = 1, pos = 1; i < nseg-1; ++i) {
+ for (j = 0; j < rio2->sge[i].length / (pages * PAGE_SIZE); ++j) {
+ addr_low = rio2->sge[i].addrLow + j * pages * PAGE_SIZE;
+ sge[pos].addrLow = addr_low;
+ sge[pos].addrHigh = rio2->sge[i].addrHigh;
+ if (addr_low < rio2->sge[i].addrLow)
+ sge[pos].addrHigh++;
+ sge[pos].length = pages * PAGE_SIZE;
+ sge[pos].flags = 0;
+ pos++;
+ }
+ }
+ sge[pos] = rio2->sge[nseg-1];
+ memcpy(&rio2->sge[1], &sge[1], (nseg_new-1)*sizeof(struct sge_ieee1212));
+
+ kfree(sge);
+ rio2->sgeCnt = cpu_to_le32(nseg_new);
+ rio2->flags |= cpu_to_le16(RIO2_SGL_CONFORMANT);
+ rio2->sgeNominalSize = pages * PAGE_SIZE;
+ return 0;
+}
+
+#ifdef AAC_DETAILED_STATUS_INFO
+
+struct aac_srb_status_info {
+ u32 status;
+ char *str;
+};
+
+
+static struct aac_srb_status_info srb_status_info[] = {
+ { SRB_STATUS_PENDING, "Pending Status"},
+ { SRB_STATUS_SUCCESS, "Success"},
+ { SRB_STATUS_ABORTED, "Aborted Command"},
+ { SRB_STATUS_ABORT_FAILED, "Abort Failed"},
+ { SRB_STATUS_ERROR, "Error Event"},
+ { SRB_STATUS_BUSY, "Device Busy"},
+ { SRB_STATUS_INVALID_REQUEST, "Invalid Request"},
+ { SRB_STATUS_INVALID_PATH_ID, "Invalid Path ID"},
+ { SRB_STATUS_NO_DEVICE, "No Device"},
+ { SRB_STATUS_TIMEOUT, "Timeout"},
+ { SRB_STATUS_SELECTION_TIMEOUT, "Selection Timeout"},
+ { SRB_STATUS_COMMAND_TIMEOUT, "Command Timeout"},
+ { SRB_STATUS_MESSAGE_REJECTED, "Message Rejected"},
+ { SRB_STATUS_BUS_RESET, "Bus Reset"},
+ { SRB_STATUS_PARITY_ERROR, "Parity Error"},
+ { SRB_STATUS_REQUEST_SENSE_FAILED,"Request Sense Failed"},
+ { SRB_STATUS_NO_HBA, "No HBA"},
+ { SRB_STATUS_DATA_OVERRUN, "Data Overrun/Data Underrun"},
+ { SRB_STATUS_UNEXPECTED_BUS_FREE,"Unexpected Bus Free"},
+ { SRB_STATUS_PHASE_SEQUENCE_FAILURE,"Phase Error"},
+ { SRB_STATUS_BAD_SRB_BLOCK_LENGTH,"Bad Srb Block Length"},
+ { SRB_STATUS_REQUEST_FLUSHED, "Request Flushed"},
+ { SRB_STATUS_DELAYED_RETRY, "Delayed Retry"},
+ { SRB_STATUS_INVALID_LUN, "Invalid LUN"},
+ { SRB_STATUS_INVALID_TARGET_ID, "Invalid TARGET ID"},
+ { SRB_STATUS_BAD_FUNCTION, "Bad Function"},
+ { SRB_STATUS_ERROR_RECOVERY, "Error Recovery"},
+ { SRB_STATUS_NOT_STARTED, "Not Started"},
+ { SRB_STATUS_NOT_IN_USE, "Not In Use"},
+ { SRB_STATUS_FORCE_ABORT, "Force Abort"},
+ { SRB_STATUS_DOMAIN_VALIDATION_FAIL,"Domain Validation Failure"},
+ { 0xff, "Unknown Error"}
+};
+
+char *aac_get_status_string(u32 status)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(srb_status_info); i++)
+ if (srb_status_info[i].status == status)
+ return srb_status_info[i].str;
+
+ return "Bad Status Code";
+}
+
+#endif
diff --git a/drivers/scsi/aacraid/aacraid.h b/drivers/scsi/aacraid/aacraid.h
new file mode 100644
index 000000000..40fe65c91
--- /dev/null
+++ b/drivers/scsi/aacraid/aacraid.h
@@ -0,0 +1,2159 @@
+#ifndef dprintk
+# define dprintk(x)
+#endif
+/* eg: if (nblank(dprintk(x))) */
+#define _nblank(x) #x
+#define nblank(x) _nblank(x)[0]
+
+#include <linux/interrupt.h>
+#include <linux/pci.h>
+
+/*------------------------------------------------------------------------------
+ * D E F I N E S
+ *----------------------------------------------------------------------------*/
+
+#define AAC_MAX_MSIX 8 /* vectors */
+#define AAC_PCI_MSI_ENABLE 0x8000
+
+enum {
+ AAC_ENABLE_INTERRUPT = 0x0,
+ AAC_DISABLE_INTERRUPT,
+ AAC_ENABLE_MSIX,
+ AAC_DISABLE_MSIX,
+ AAC_CLEAR_AIF_BIT,
+ AAC_CLEAR_SYNC_BIT,
+ AAC_ENABLE_INTX
+};
+
+#define AAC_INT_MODE_INTX (1<<0)
+#define AAC_INT_MODE_MSI (1<<1)
+#define AAC_INT_MODE_AIF (1<<2)
+#define AAC_INT_MODE_SYNC (1<<3)
+
+#define AAC_INT_ENABLE_TYPE1_INTX 0xfffffffb
+#define AAC_INT_ENABLE_TYPE1_MSIX 0xfffffffa
+#define AAC_INT_DISABLE_ALL 0xffffffff
+
+/* Bit definitions in IOA->Host Interrupt Register */
+#define PMC_TRANSITION_TO_OPERATIONAL (1<<31)
+#define PMC_IOARCB_TRANSFER_FAILED (1<<28)
+#define PMC_IOA_UNIT_CHECK (1<<27)
+#define PMC_NO_HOST_RRQ_FOR_CMD_RESPONSE (1<<26)
+#define PMC_CRITICAL_IOA_OP_IN_PROGRESS (1<<25)
+#define PMC_IOARRIN_LOST (1<<4)
+#define PMC_SYSTEM_BUS_MMIO_ERROR (1<<3)
+#define PMC_IOA_PROCESSOR_IN_ERROR_STATE (1<<2)
+#define PMC_HOST_RRQ_VALID (1<<1)
+#define PMC_OPERATIONAL_STATUS (1<<31)
+#define PMC_ALLOW_MSIX_VECTOR0 (1<<0)
+
+#define PMC_IOA_ERROR_INTERRUPTS (PMC_IOARCB_TRANSFER_FAILED | \
+ PMC_IOA_UNIT_CHECK | \
+ PMC_NO_HOST_RRQ_FOR_CMD_RESPONSE | \
+ PMC_IOARRIN_LOST | \
+ PMC_SYSTEM_BUS_MMIO_ERROR | \
+ PMC_IOA_PROCESSOR_IN_ERROR_STATE)
+
+#define PMC_ALL_INTERRUPT_BITS (PMC_IOA_ERROR_INTERRUPTS | \
+ PMC_HOST_RRQ_VALID | \
+ PMC_TRANSITION_TO_OPERATIONAL | \
+ PMC_ALLOW_MSIX_VECTOR0)
+#define PMC_GLOBAL_INT_BIT2 0x00000004
+#define PMC_GLOBAL_INT_BIT0 0x00000001
+
+#ifndef AAC_DRIVER_BUILD
+# define AAC_DRIVER_BUILD 40709
+# define AAC_DRIVER_BRANCH "-ms"
+#endif
+#define MAXIMUM_NUM_CONTAINERS 32
+
+#define AAC_NUM_MGT_FIB 8
+#define AAC_NUM_IO_FIB (1024 - AAC_NUM_MGT_FIB)
+#define AAC_NUM_FIB (AAC_NUM_IO_FIB + AAC_NUM_MGT_FIB)
+
+#define AAC_MAX_LUN (8)
+
+#define AAC_MAX_HOSTPHYSMEMPAGES (0xfffff)
+#define AAC_MAX_32BIT_SGBCOUNT ((unsigned short)256)
+
+#define AAC_DEBUG_INSTRUMENT_AIF_DELETE
+
+/*
+ * These macros convert from physical channels to virtual channels
+ */
+#define CONTAINER_CHANNEL (0)
+#define CONTAINER_TO_CHANNEL(cont) (CONTAINER_CHANNEL)
+#define CONTAINER_TO_ID(cont) (cont)
+#define CONTAINER_TO_LUN(cont) (0)
+
+#define PMC_DEVICE_S6 0x28b
+#define PMC_DEVICE_S7 0x28c
+#define PMC_DEVICE_S8 0x28d
+#define PMC_DEVICE_S9 0x28f
+
+#define aac_phys_to_logical(x) ((x)+1)
+#define aac_logical_to_phys(x) ((x)?(x)-1:0)
+
+/* #define AAC_DETAILED_STATUS_INFO */
+
+struct diskparm
+{
+ int heads;
+ int sectors;
+ int cylinders;
+};
+
+
+/*
+ * Firmware constants
+ */
+
+#define CT_NONE 0
+#define CT_OK 218
+#define FT_FILESYS 8 /* ADAPTEC's "FSA"(tm) filesystem */
+#define FT_DRIVE 9 /* physical disk - addressable in scsi by bus/id/lun */
+
+/*
+ * Host side memory scatter gather list
+ * Used by the adapter for read, write, and readdirplus operations
+ * We have separate 32 and 64 bit version because even
+ * on 64 bit systems not all cards support the 64 bit version
+ */
+struct sgentry {
+ __le32 addr; /* 32-bit address. */
+ __le32 count; /* Length. */
+};
+
+struct user_sgentry {
+ u32 addr; /* 32-bit address. */
+ u32 count; /* Length. */
+};
+
+struct sgentry64 {
+ __le32 addr[2]; /* 64-bit addr. 2 pieces for data alignment */
+ __le32 count; /* Length. */
+};
+
+struct user_sgentry64 {
+ u32 addr[2]; /* 64-bit addr. 2 pieces for data alignment */
+ u32 count; /* Length. */
+};
+
+struct sgentryraw {
+ __le32 next; /* reserved for F/W use */
+ __le32 prev; /* reserved for F/W use */
+ __le32 addr[2];
+ __le32 count;
+ __le32 flags; /* reserved for F/W use */
+};
+
+struct user_sgentryraw {
+ u32 next; /* reserved for F/W use */
+ u32 prev; /* reserved for F/W use */
+ u32 addr[2];
+ u32 count;
+ u32 flags; /* reserved for F/W use */
+};
+
+struct sge_ieee1212 {
+ u32 addrLow;
+ u32 addrHigh;
+ u32 length;
+ u32 flags;
+};
+
+/*
+ * SGMAP
+ *
+ * This is the SGMAP structure for all commands that use
+ * 32-bit addressing.
+ */
+
+struct sgmap {
+ __le32 count;
+ struct sgentry sg[1];
+};
+
+struct user_sgmap {
+ u32 count;
+ struct user_sgentry sg[1];
+};
+
+struct sgmap64 {
+ __le32 count;
+ struct sgentry64 sg[1];
+};
+
+struct user_sgmap64 {
+ u32 count;
+ struct user_sgentry64 sg[1];
+};
+
+struct sgmapraw {
+ __le32 count;
+ struct sgentryraw sg[1];
+};
+
+struct user_sgmapraw {
+ u32 count;
+ struct user_sgentryraw sg[1];
+};
+
+struct creation_info
+{
+ u8 buildnum; /* e.g., 588 */
+ u8 usec; /* e.g., 588 */
+ u8 via; /* e.g., 1 = FSU,
+ * 2 = API
+ */
+ u8 year; /* e.g., 1997 = 97 */
+ __le32 date; /*
+ * unsigned Month :4; // 1 - 12
+ * unsigned Day :6; // 1 - 32
+ * unsigned Hour :6; // 0 - 23
+ * unsigned Minute :6; // 0 - 60
+ * unsigned Second :6; // 0 - 60
+ */
+ __le32 serial[2]; /* e.g., 0x1DEADB0BFAFAF001 */
+};
+
+
+/*
+ * Define all the constants needed for the communication interface
+ */
+
+/*
+ * Define how many queue entries each queue will have and the total
+ * number of entries for the entire communication interface. Also define
+ * how many queues we support.
+ *
+ * This has to match the controller
+ */
+
+#define NUMBER_OF_COMM_QUEUES 8 // 4 command; 4 response
+#define HOST_HIGH_CMD_ENTRIES 4
+#define HOST_NORM_CMD_ENTRIES 8
+#define ADAP_HIGH_CMD_ENTRIES 4
+#define ADAP_NORM_CMD_ENTRIES 512
+#define HOST_HIGH_RESP_ENTRIES 4
+#define HOST_NORM_RESP_ENTRIES 512
+#define ADAP_HIGH_RESP_ENTRIES 4
+#define ADAP_NORM_RESP_ENTRIES 8
+
+#define TOTAL_QUEUE_ENTRIES \
+ (HOST_NORM_CMD_ENTRIES + HOST_HIGH_CMD_ENTRIES + ADAP_NORM_CMD_ENTRIES + ADAP_HIGH_CMD_ENTRIES + \
+ HOST_NORM_RESP_ENTRIES + HOST_HIGH_RESP_ENTRIES + ADAP_NORM_RESP_ENTRIES + ADAP_HIGH_RESP_ENTRIES)
+
+
+/*
+ * Set the queues on a 16 byte alignment
+ */
+
+#define QUEUE_ALIGNMENT 16
+
+/*
+ * The queue headers define the Communication Region queues. These
+ * are physically contiguous and accessible by both the adapter and the
+ * host. Even though all queue headers are in the same contiguous block
+ * they will be represented as individual units in the data structures.
+ */
+
+struct aac_entry {
+ __le32 size; /* Size in bytes of Fib which this QE points to */
+ __le32 addr; /* Receiver address of the FIB */
+};
+
+/*
+ * The adapter assumes the ProducerIndex and ConsumerIndex are grouped
+ * adjacently and in that order.
+ */
+
+struct aac_qhdr {
+ __le64 header_addr;/* Address to hand the adapter to access
+ to this queue head */
+ __le32 *producer; /* The producer index for this queue (host address) */
+ __le32 *consumer; /* The consumer index for this queue (host address) */
+};
+
+/*
+ * Define all the events which the adapter would like to notify
+ * the host of.
+ */
+
+#define HostNormCmdQue 1 /* Change in host normal priority command queue */
+#define HostHighCmdQue 2 /* Change in host high priority command queue */
+#define HostNormRespQue 3 /* Change in host normal priority response queue */
+#define HostHighRespQue 4 /* Change in host high priority response queue */
+#define AdapNormRespNotFull 5
+#define AdapHighRespNotFull 6
+#define AdapNormCmdNotFull 7
+#define AdapHighCmdNotFull 8
+#define SynchCommandComplete 9
+#define AdapInternalError 0xfe /* The adapter detected an internal error shutting down */
+
+/*
+ * Define all the events the host wishes to notify the
+ * adapter of. The first four values much match the Qid the
+ * corresponding queue.
+ */
+
+#define AdapNormCmdQue 2
+#define AdapHighCmdQue 3
+#define AdapNormRespQue 6
+#define AdapHighRespQue 7
+#define HostShutdown 8
+#define HostPowerFail 9
+#define FatalCommError 10
+#define HostNormRespNotFull 11
+#define HostHighRespNotFull 12
+#define HostNormCmdNotFull 13
+#define HostHighCmdNotFull 14
+#define FastIo 15
+#define AdapPrintfDone 16
+
+/*
+ * Define all the queues that the adapter and host use to communicate
+ * Number them to match the physical queue layout.
+ */
+
+enum aac_queue_types {
+ HostNormCmdQueue = 0, /* Adapter to host normal priority command traffic */
+ HostHighCmdQueue, /* Adapter to host high priority command traffic */
+ AdapNormCmdQueue, /* Host to adapter normal priority command traffic */
+ AdapHighCmdQueue, /* Host to adapter high priority command traffic */
+ HostNormRespQueue, /* Adapter to host normal priority response traffic */
+ HostHighRespQueue, /* Adapter to host high priority response traffic */
+ AdapNormRespQueue, /* Host to adapter normal priority response traffic */
+ AdapHighRespQueue /* Host to adapter high priority response traffic */
+};
+
+/*
+ * Assign type values to the FSA communication data structures
+ */
+
+#define FIB_MAGIC 0x0001
+#define FIB_MAGIC2 0x0004
+#define FIB_MAGIC2_64 0x0005
+
+/*
+ * Define the priority levels the FSA communication routines support.
+ */
+
+#define FsaNormal 1
+
+/* transport FIB header (PMC) */
+struct aac_fib_xporthdr {
+ u64 HostAddress; /* FIB host address w/o xport header */
+ u32 Size; /* FIB size excluding xport header */
+ u32 Handle; /* driver handle to reference the FIB */
+ u64 Reserved[2];
+};
+
+#define ALIGN32 32
+
+/*
+ * Define the FIB. The FIB is the where all the requested data and
+ * command information are put to the application on the FSA adapter.
+ */
+
+struct aac_fibhdr {
+ __le32 XferState; /* Current transfer state for this CCB */
+ __le16 Command; /* Routing information for the destination */
+ u8 StructType; /* Type FIB */
+ u8 Unused; /* Unused */
+ __le16 Size; /* Size of this FIB in bytes */
+ __le16 SenderSize; /* Size of the FIB in the sender
+ (for response sizing) */
+ __le32 SenderFibAddress; /* Host defined data in the FIB */
+ union {
+ __le32 ReceiverFibAddress;/* Logical address of this FIB for
+ the adapter (old) */
+ __le32 SenderFibAddressHigh;/* upper 32bit of phys. FIB address */
+ __le32 TimeStamp; /* otherwise timestamp for FW internal use */
+ } u;
+ u32 Handle; /* FIB handle used for MSGU commnunication */
+ u32 Previous; /* FW internal use */
+ u32 Next; /* FW internal use */
+};
+
+struct hw_fib {
+ struct aac_fibhdr header;
+ u8 data[512-sizeof(struct aac_fibhdr)]; // Command specific data
+};
+
+/*
+ * FIB commands
+ */
+
+#define TestCommandResponse 1
+#define TestAdapterCommand 2
+/*
+ * Lowlevel and comm commands
+ */
+#define LastTestCommand 100
+#define ReinitHostNormCommandQueue 101
+#define ReinitHostHighCommandQueue 102
+#define ReinitHostHighRespQueue 103
+#define ReinitHostNormRespQueue 104
+#define ReinitAdapNormCommandQueue 105
+#define ReinitAdapHighCommandQueue 107
+#define ReinitAdapHighRespQueue 108
+#define ReinitAdapNormRespQueue 109
+#define InterfaceShutdown 110
+#define DmaCommandFib 120
+#define StartProfile 121
+#define TermProfile 122
+#define SpeedTest 123
+#define TakeABreakPt 124
+#define RequestPerfData 125
+#define SetInterruptDefTimer 126
+#define SetInterruptDefCount 127
+#define GetInterruptDefStatus 128
+#define LastCommCommand 129
+/*
+ * Filesystem commands
+ */
+#define NuFileSystem 300
+#define UFS 301
+#define HostFileSystem 302
+#define LastFileSystemCommand 303
+/*
+ * Container Commands
+ */
+#define ContainerCommand 500
+#define ContainerCommand64 501
+#define ContainerRawIo 502
+#define ContainerRawIo2 503
+/*
+ * Scsi Port commands (scsi passthrough)
+ */
+#define ScsiPortCommand 600
+#define ScsiPortCommand64 601
+/*
+ * Misc house keeping and generic adapter initiated commands
+ */
+#define AifRequest 700
+#define CheckRevision 701
+#define FsaHostShutdown 702
+#define RequestAdapterInfo 703
+#define IsAdapterPaused 704
+#define SendHostTime 705
+#define RequestSupplementAdapterInfo 706
+#define LastMiscCommand 707
+
+/*
+ * Commands that will target the failover level on the FSA adapter
+ */
+
+enum fib_xfer_state {
+ HostOwned = (1<<0),
+ AdapterOwned = (1<<1),
+ FibInitialized = (1<<2),
+ FibEmpty = (1<<3),
+ AllocatedFromPool = (1<<4),
+ SentFromHost = (1<<5),
+ SentFromAdapter = (1<<6),
+ ResponseExpected = (1<<7),
+ NoResponseExpected = (1<<8),
+ AdapterProcessed = (1<<9),
+ HostProcessed = (1<<10),
+ HighPriority = (1<<11),
+ NormalPriority = (1<<12),
+ Async = (1<<13),
+ AsyncIo = (1<<13), // rpbfix: remove with new regime
+ PageFileIo = (1<<14), // rpbfix: remove with new regime
+ ShutdownRequest = (1<<15),
+ LazyWrite = (1<<16), // rpbfix: remove with new regime
+ AdapterMicroFib = (1<<17),
+ BIOSFibPath = (1<<18),
+ FastResponseCapable = (1<<19),
+ ApiFib = (1<<20), /* Its an API Fib */
+ /* PMC NEW COMM: There is no more AIF data pending */
+ NoMoreAifDataAvailable = (1<<21)
+};
+
+/*
+ * The following defines needs to be updated any time there is an
+ * incompatible change made to the aac_init structure.
+ */
+
+#define ADAPTER_INIT_STRUCT_REVISION 3
+#define ADAPTER_INIT_STRUCT_REVISION_4 4 // rocket science
+#define ADAPTER_INIT_STRUCT_REVISION_6 6 /* PMC src */
+#define ADAPTER_INIT_STRUCT_REVISION_7 7 /* Denali */
+
+struct aac_init
+{
+ __le32 InitStructRevision;
+ __le32 Sa_MSIXVectors;
+ __le32 fsrev;
+ __le32 CommHeaderAddress;
+ __le32 FastIoCommAreaAddress;
+ __le32 AdapterFibsPhysicalAddress;
+ __le32 AdapterFibsVirtualAddress;
+ __le32 AdapterFibsSize;
+ __le32 AdapterFibAlign;
+ __le32 printfbuf;
+ __le32 printfbufsiz;
+ __le32 HostPhysMemPages; /* number of 4k pages of host
+ physical memory */
+ __le32 HostElapsedSeconds; /* number of seconds since 1970. */
+ /*
+ * ADAPTER_INIT_STRUCT_REVISION_4 begins here
+ */
+ __le32 InitFlags; /* flags for supported features */
+#define INITFLAGS_NEW_COMM_SUPPORTED 0x00000001
+#define INITFLAGS_DRIVER_USES_UTC_TIME 0x00000010
+#define INITFLAGS_DRIVER_SUPPORTS_PM 0x00000020
+#define INITFLAGS_NEW_COMM_TYPE1_SUPPORTED 0x00000040
+#define INITFLAGS_FAST_JBOD_SUPPORTED 0x00000080
+#define INITFLAGS_NEW_COMM_TYPE2_SUPPORTED 0x00000100
+ __le32 MaxIoCommands; /* max outstanding commands */
+ __le32 MaxIoSize; /* largest I/O command */
+ __le32 MaxFibSize; /* largest FIB to adapter */
+ /* ADAPTER_INIT_STRUCT_REVISION_5 begins here */
+ __le32 MaxNumAif; /* max number of aif */
+ /* ADAPTER_INIT_STRUCT_REVISION_6 begins here */
+ __le32 HostRRQ_AddrLow;
+ __le32 HostRRQ_AddrHigh; /* Host RRQ (response queue) for SRC */
+};
+
+enum aac_log_level {
+ LOG_AAC_INIT = 10,
+ LOG_AAC_INFORMATIONAL = 20,
+ LOG_AAC_WARNING = 30,
+ LOG_AAC_LOW_ERROR = 40,
+ LOG_AAC_MEDIUM_ERROR = 50,
+ LOG_AAC_HIGH_ERROR = 60,
+ LOG_AAC_PANIC = 70,
+ LOG_AAC_DEBUG = 80,
+ LOG_AAC_WINDBG_PRINT = 90
+};
+
+#define FSAFS_NTC_GET_ADAPTER_FIB_CONTEXT 0x030b
+#define FSAFS_NTC_FIB_CONTEXT 0x030c
+
+struct aac_dev;
+struct fib;
+struct scsi_cmnd;
+
+struct adapter_ops
+{
+ /* Low level operations */
+ void (*adapter_interrupt)(struct aac_dev *dev);
+ void (*adapter_notify)(struct aac_dev *dev, u32 event);
+ void (*adapter_disable_int)(struct aac_dev *dev);
+ void (*adapter_enable_int)(struct aac_dev *dev);
+ int (*adapter_sync_cmd)(struct aac_dev *dev, u32 command, u32 p1, u32 p2, u32 p3, u32 p4, u32 p5, u32 p6, u32 *status, u32 *r1, u32 *r2, u32 *r3, u32 *r4);
+ int (*adapter_check_health)(struct aac_dev *dev);
+ int (*adapter_restart)(struct aac_dev *dev, int bled);
+ /* Transport operations */
+ int (*adapter_ioremap)(struct aac_dev * dev, u32 size);
+ irq_handler_t adapter_intr;
+ /* Packet operations */
+ int (*adapter_deliver)(struct fib * fib);
+ int (*adapter_bounds)(struct aac_dev * dev, struct scsi_cmnd * cmd, u64 lba);
+ int (*adapter_read)(struct fib * fib, struct scsi_cmnd * cmd, u64 lba, u32 count);
+ int (*adapter_write)(struct fib * fib, struct scsi_cmnd * cmd, u64 lba, u32 count, int fua);
+ int (*adapter_scsi)(struct fib * fib, struct scsi_cmnd * cmd);
+ /* Administrative operations */
+ int (*adapter_comm)(struct aac_dev * dev, int comm);
+};
+
+/*
+ * Define which interrupt handler needs to be installed
+ */
+
+struct aac_driver_ident
+{
+ int (*init)(struct aac_dev *dev);
+ char * name;
+ char * vname;
+ char * model;
+ u16 channels;
+ int quirks;
+};
+/*
+ * Some adapter firmware needs communication memory
+ * below 2gig. This tells the init function to set the
+ * dma mask such that fib memory will be allocated where the
+ * adapter firmware can get to it.
+ */
+#define AAC_QUIRK_31BIT 0x0001
+
+/*
+ * Some adapter firmware, when the raid card's cache is turned off, can not
+ * split up scatter gathers in order to deal with the limits of the
+ * underlying CHIM. This limit is 34 scatter gather elements.
+ */
+#define AAC_QUIRK_34SG 0x0002
+
+/*
+ * This adapter is a slave (no Firmware)
+ */
+#define AAC_QUIRK_SLAVE 0x0004
+
+/*
+ * This adapter is a master.
+ */
+#define AAC_QUIRK_MASTER 0x0008
+
+/*
+ * Some adapter firmware perform poorly when it must split up scatter gathers
+ * in order to deal with the limits of the underlying CHIM. This limit in this
+ * class of adapters is 17 scatter gather elements.
+ */
+#define AAC_QUIRK_17SG 0x0010
+
+/*
+ * Some adapter firmware does not support 64 bit scsi passthrough
+ * commands.
+ */
+#define AAC_QUIRK_SCSI_32 0x0020
+
+/*
+ * The adapter interface specs all queues to be located in the same
+ * physically contiguous block. The host structure that defines the
+ * commuication queues will assume they are each a separate physically
+ * contiguous memory region that will support them all being one big
+ * contiguous block.
+ * There is a command and response queue for each level and direction of
+ * commuication. These regions are accessed by both the host and adapter.
+ */
+
+struct aac_queue {
+ u64 logical; /*address we give the adapter */
+ struct aac_entry *base; /*system virtual address */
+ struct aac_qhdr headers; /*producer,consumer q headers*/
+ u32 entries; /*Number of queue entries */
+ wait_queue_head_t qfull; /*Event to wait on if q full */
+ wait_queue_head_t cmdready; /*Cmd ready from the adapter */
+ /* This is only valid for adapter to host command queues. */
+ spinlock_t *lock; /* Spinlock for this queue must take this lock before accessing the lock */
+ spinlock_t lockdata; /* Actual lock (used only on one side of the lock) */
+ struct list_head cmdq; /* A queue of FIBs which need to be prcessed by the FS thread. This is */
+ /* only valid for command queues which receive entries from the adapter. */
+ /* Number of entries on outstanding queue. */
+ atomic_t numpending;
+ struct aac_dev * dev; /* Back pointer to adapter structure */
+};
+
+/*
+ * Message queues. The order here is important, see also the
+ * queue type ordering
+ */
+
+struct aac_queue_block
+{
+ struct aac_queue queue[8];
+};
+
+/*
+ * SaP1 Message Unit Registers
+ */
+
+struct sa_drawbridge_CSR {
+ /* Offset | Name */
+ __le32 reserved[10]; /* 00h-27h | Reserved */
+ u8 LUT_Offset; /* 28h | Lookup Table Offset */
+ u8 reserved1[3]; /* 29h-2bh | Reserved */
+ __le32 LUT_Data; /* 2ch | Looup Table Data */
+ __le32 reserved2[26]; /* 30h-97h | Reserved */
+ __le16 PRICLEARIRQ; /* 98h | Primary Clear Irq */
+ __le16 SECCLEARIRQ; /* 9ah | Secondary Clear Irq */
+ __le16 PRISETIRQ; /* 9ch | Primary Set Irq */
+ __le16 SECSETIRQ; /* 9eh | Secondary Set Irq */
+ __le16 PRICLEARIRQMASK;/* a0h | Primary Clear Irq Mask */
+ __le16 SECCLEARIRQMASK;/* a2h | Secondary Clear Irq Mask */
+ __le16 PRISETIRQMASK; /* a4h | Primary Set Irq Mask */
+ __le16 SECSETIRQMASK; /* a6h | Secondary Set Irq Mask */
+ __le32 MAILBOX0; /* a8h | Scratchpad 0 */
+ __le32 MAILBOX1; /* ach | Scratchpad 1 */
+ __le32 MAILBOX2; /* b0h | Scratchpad 2 */
+ __le32 MAILBOX3; /* b4h | Scratchpad 3 */
+ __le32 MAILBOX4; /* b8h | Scratchpad 4 */
+ __le32 MAILBOX5; /* bch | Scratchpad 5 */
+ __le32 MAILBOX6; /* c0h | Scratchpad 6 */
+ __le32 MAILBOX7; /* c4h | Scratchpad 7 */
+ __le32 ROM_Setup_Data; /* c8h | Rom Setup and Data */
+ __le32 ROM_Control_Addr;/* cch | Rom Control and Address */
+ __le32 reserved3[12]; /* d0h-ffh | reserved */
+ __le32 LUT[64]; /* 100h-1ffh | Lookup Table Entries */
+};
+
+#define Mailbox0 SaDbCSR.MAILBOX0
+#define Mailbox1 SaDbCSR.MAILBOX1
+#define Mailbox2 SaDbCSR.MAILBOX2
+#define Mailbox3 SaDbCSR.MAILBOX3
+#define Mailbox4 SaDbCSR.MAILBOX4
+#define Mailbox5 SaDbCSR.MAILBOX5
+#define Mailbox6 SaDbCSR.MAILBOX6
+#define Mailbox7 SaDbCSR.MAILBOX7
+
+#define DoorbellReg_p SaDbCSR.PRISETIRQ
+#define DoorbellReg_s SaDbCSR.SECSETIRQ
+#define DoorbellClrReg_p SaDbCSR.PRICLEARIRQ
+
+
+#define DOORBELL_0 0x0001
+#define DOORBELL_1 0x0002
+#define DOORBELL_2 0x0004
+#define DOORBELL_3 0x0008
+#define DOORBELL_4 0x0010
+#define DOORBELL_5 0x0020
+#define DOORBELL_6 0x0040
+
+
+#define PrintfReady DOORBELL_5
+#define PrintfDone DOORBELL_5
+
+struct sa_registers {
+ struct sa_drawbridge_CSR SaDbCSR; /* 98h - c4h */
+};
+
+
+#define Sa_MINIPORT_REVISION 1
+
+#define sa_readw(AEP, CSR) readl(&((AEP)->regs.sa->CSR))
+#define sa_readl(AEP, CSR) readl(&((AEP)->regs.sa->CSR))
+#define sa_writew(AEP, CSR, value) writew(value, &((AEP)->regs.sa->CSR))
+#define sa_writel(AEP, CSR, value) writel(value, &((AEP)->regs.sa->CSR))
+
+/*
+ * Rx Message Unit Registers
+ */
+
+struct rx_mu_registers {
+ /* Local | PCI*| Name */
+ __le32 ARSR; /* 1300h | 00h | APIC Register Select Register */
+ __le32 reserved0; /* 1304h | 04h | Reserved */
+ __le32 AWR; /* 1308h | 08h | APIC Window Register */
+ __le32 reserved1; /* 130Ch | 0Ch | Reserved */
+ __le32 IMRx[2]; /* 1310h | 10h | Inbound Message Registers */
+ __le32 OMRx[2]; /* 1318h | 18h | Outbound Message Registers */
+ __le32 IDR; /* 1320h | 20h | Inbound Doorbell Register */
+ __le32 IISR; /* 1324h | 24h | Inbound Interrupt
+ Status Register */
+ __le32 IIMR; /* 1328h | 28h | Inbound Interrupt
+ Mask Register */
+ __le32 ODR; /* 132Ch | 2Ch | Outbound Doorbell Register */
+ __le32 OISR; /* 1330h | 30h | Outbound Interrupt
+ Status Register */
+ __le32 OIMR; /* 1334h | 34h | Outbound Interrupt
+ Mask Register */
+ __le32 reserved2; /* 1338h | 38h | Reserved */
+ __le32 reserved3; /* 133Ch | 3Ch | Reserved */
+ __le32 InboundQueue;/* 1340h | 40h | Inbound Queue Port relative to firmware */
+ __le32 OutboundQueue;/*1344h | 44h | Outbound Queue Port relative to firmware */
+ /* * Must access through ATU Inbound
+ Translation Window */
+};
+
+struct rx_inbound {
+ __le32 Mailbox[8];
+};
+
+#define INBOUNDDOORBELL_0 0x00000001
+#define INBOUNDDOORBELL_1 0x00000002
+#define INBOUNDDOORBELL_2 0x00000004
+#define INBOUNDDOORBELL_3 0x00000008
+#define INBOUNDDOORBELL_4 0x00000010
+#define INBOUNDDOORBELL_5 0x00000020
+#define INBOUNDDOORBELL_6 0x00000040
+
+#define OUTBOUNDDOORBELL_0 0x00000001
+#define OUTBOUNDDOORBELL_1 0x00000002
+#define OUTBOUNDDOORBELL_2 0x00000004
+#define OUTBOUNDDOORBELL_3 0x00000008
+#define OUTBOUNDDOORBELL_4 0x00000010
+
+#define InboundDoorbellReg MUnit.IDR
+#define OutboundDoorbellReg MUnit.ODR
+
+struct rx_registers {
+ struct rx_mu_registers MUnit; /* 1300h - 1347h */
+ __le32 reserved1[2]; /* 1348h - 134ch */
+ struct rx_inbound IndexRegs;
+};
+
+#define rx_readb(AEP, CSR) readb(&((AEP)->regs.rx->CSR))
+#define rx_readl(AEP, CSR) readl(&((AEP)->regs.rx->CSR))
+#define rx_writeb(AEP, CSR, value) writeb(value, &((AEP)->regs.rx->CSR))
+#define rx_writel(AEP, CSR, value) writel(value, &((AEP)->regs.rx->CSR))
+
+/*
+ * Rkt Message Unit Registers (same as Rx, except a larger reserve region)
+ */
+
+#define rkt_mu_registers rx_mu_registers
+#define rkt_inbound rx_inbound
+
+struct rkt_registers {
+ struct rkt_mu_registers MUnit; /* 1300h - 1347h */
+ __le32 reserved1[1006]; /* 1348h - 22fch */
+ struct rkt_inbound IndexRegs; /* 2300h - */
+};
+
+#define rkt_readb(AEP, CSR) readb(&((AEP)->regs.rkt->CSR))
+#define rkt_readl(AEP, CSR) readl(&((AEP)->regs.rkt->CSR))
+#define rkt_writeb(AEP, CSR, value) writeb(value, &((AEP)->regs.rkt->CSR))
+#define rkt_writel(AEP, CSR, value) writel(value, &((AEP)->regs.rkt->CSR))
+
+/*
+ * PMC SRC message unit registers
+ */
+
+#define src_inbound rx_inbound
+
+struct src_mu_registers {
+ /* PCI*| Name */
+ __le32 reserved0[6]; /* 00h | Reserved */
+ __le32 IOAR[2]; /* 18h | IOA->host interrupt register */
+ __le32 IDR; /* 20h | Inbound Doorbell Register */
+ __le32 IISR; /* 24h | Inbound Int. Status Register */
+ __le32 reserved1[3]; /* 28h | Reserved */
+ __le32 OIMR; /* 34h | Outbound Int. Mask Register */
+ __le32 reserved2[25]; /* 38h | Reserved */
+ __le32 ODR_R; /* 9ch | Outbound Doorbell Read */
+ __le32 ODR_C; /* a0h | Outbound Doorbell Clear */
+ __le32 reserved3[6]; /* a4h | Reserved */
+ __le32 OMR; /* bch | Outbound Message Register */
+ __le32 IQ_L; /* c0h | Inbound Queue (Low address) */
+ __le32 IQ_H; /* c4h | Inbound Queue (High address) */
+ __le32 ODR_MSI; /* c8h | MSI register for sync./AIF */
+};
+
+struct src_registers {
+ struct src_mu_registers MUnit; /* 00h - cbh */
+ union {
+ struct {
+ __le32 reserved1[130789]; /* cch - 7fc5fh */
+ struct src_inbound IndexRegs; /* 7fc60h */
+ } tupelo;
+ struct {
+ __le32 reserved1[973]; /* cch - fffh */
+ struct src_inbound IndexRegs; /* 1000h */
+ } denali;
+ } u;
+};
+
+#define src_readb(AEP, CSR) readb(&((AEP)->regs.src.bar0->CSR))
+#define src_readl(AEP, CSR) readl(&((AEP)->regs.src.bar0->CSR))
+#define src_writeb(AEP, CSR, value) writeb(value, \
+ &((AEP)->regs.src.bar0->CSR))
+#define src_writel(AEP, CSR, value) writel(value, \
+ &((AEP)->regs.src.bar0->CSR))
+
+#define SRC_ODR_SHIFT 12
+#define SRC_IDR_SHIFT 9
+
+typedef void (*fib_callback)(void *ctxt, struct fib *fibctx);
+
+struct aac_fib_context {
+ s16 type; // used for verification of structure
+ s16 size;
+ u32 unique; // unique value representing this context
+ ulong jiffies; // used for cleanup - dmb changed to ulong
+ struct list_head next; // used to link context's into a linked list
+ struct semaphore wait_sem; // this is used to wait for the next fib to arrive.
+ int wait; // Set to true when thread is in WaitForSingleObject
+ unsigned long count; // total number of FIBs on FibList
+ struct list_head fib_list; // this holds fibs and their attachd hw_fibs
+};
+
+struct sense_data {
+ u8 error_code; /* 70h (current errors), 71h(deferred errors) */
+ u8 valid:1; /* A valid bit of one indicates that the information */
+ /* field contains valid information as defined in the
+ * SCSI-2 Standard.
+ */
+ u8 segment_number; /* Only used for COPY, COMPARE, or COPY AND VERIFY Commands */
+ u8 sense_key:4; /* Sense Key */
+ u8 reserved:1;
+ u8 ILI:1; /* Incorrect Length Indicator */
+ u8 EOM:1; /* End Of Medium - reserved for random access devices */
+ u8 filemark:1; /* Filemark - reserved for random access devices */
+
+ u8 information[4]; /* for direct-access devices, contains the unsigned
+ * logical block address or residue associated with
+ * the sense key
+ */
+ u8 add_sense_len; /* number of additional sense bytes to follow this field */
+ u8 cmnd_info[4]; /* not used */
+ u8 ASC; /* Additional Sense Code */
+ u8 ASCQ; /* Additional Sense Code Qualifier */
+ u8 FRUC; /* Field Replaceable Unit Code - not used */
+ u8 bit_ptr:3; /* indicates which byte of the CDB or parameter data
+ * was in error
+ */
+ u8 BPV:1; /* bit pointer valid (BPV): 1- indicates that
+ * the bit_ptr field has valid value
+ */
+ u8 reserved2:2;
+ u8 CD:1; /* command data bit: 1- illegal parameter in CDB.
+ * 0- illegal parameter in data.
+ */
+ u8 SKSV:1;
+ u8 field_ptr[2]; /* byte of the CDB or parameter data in error */
+};
+
+struct fsa_dev_info {
+ u64 last;
+ u64 size;
+ u32 type;
+ u32 config_waiting_on;
+ unsigned long config_waiting_stamp;
+ u16 queue_depth;
+ u8 config_needed;
+ u8 valid;
+ u8 ro;
+ u8 locked;
+ u8 deleted;
+ char devname[8];
+ struct sense_data sense_data;
+ u32 block_size;
+};
+
+struct fib {
+ void *next; /* this is used by the allocator */
+ s16 type;
+ s16 size;
+ /*
+ * The Adapter that this I/O is destined for.
+ */
+ struct aac_dev *dev;
+ /*
+ * This is the event the sendfib routine will wait on if the
+ * caller did not pass one and this is synch io.
+ */
+ struct semaphore event_wait;
+ spinlock_t event_lock;
+
+ u32 done; /* gets set to 1 when fib is complete */
+ fib_callback callback;
+ void *callback_data;
+ u32 flags; // u32 dmb was ulong
+ /*
+ * And for the internal issue/reply queues (we may be able
+ * to merge these two)
+ */
+ struct list_head fiblink;
+ void *data;
+ struct hw_fib *hw_fib_va; /* Actual shared object */
+ dma_addr_t hw_fib_pa; /* physical address of hw_fib*/
+};
+
+/*
+ * Adapter Information Block
+ *
+ * This is returned by the RequestAdapterInfo block
+ */
+
+struct aac_adapter_info
+{
+ __le32 platform;
+ __le32 cpu;
+ __le32 subcpu;
+ __le32 clock;
+ __le32 execmem;
+ __le32 buffermem;
+ __le32 totalmem;
+ __le32 kernelrev;
+ __le32 kernelbuild;
+ __le32 monitorrev;
+ __le32 monitorbuild;
+ __le32 hwrev;
+ __le32 hwbuild;
+ __le32 biosrev;
+ __le32 biosbuild;
+ __le32 cluster;
+ __le32 clusterchannelmask;
+ __le32 serial[2];
+ __le32 battery;
+ __le32 options;
+ __le32 OEM;
+};
+
+struct aac_supplement_adapter_info
+{
+ u8 AdapterTypeText[17+1];
+ u8 Pad[2];
+ __le32 FlashMemoryByteSize;
+ __le32 FlashImageId;
+ __le32 MaxNumberPorts;
+ __le32 Version;
+ __le32 FeatureBits;
+ u8 SlotNumber;
+ u8 ReservedPad0[3];
+ u8 BuildDate[12];
+ __le32 CurrentNumberPorts;
+ struct {
+ u8 AssemblyPn[8];
+ u8 FruPn[8];
+ u8 BatteryFruPn[8];
+ u8 EcVersionString[8];
+ u8 Tsid[12];
+ } VpdInfo;
+ __le32 FlashFirmwareRevision;
+ __le32 FlashFirmwareBuild;
+ __le32 RaidTypeMorphOptions;
+ __le32 FlashFirmwareBootRevision;
+ __le32 FlashFirmwareBootBuild;
+ u8 MfgPcbaSerialNo[12];
+ u8 MfgWWNName[8];
+ __le32 SupportedOptions2;
+ __le32 StructExpansion;
+ /* StructExpansion == 1 */
+ __le32 FeatureBits3;
+ __le32 SupportedPerformanceModes;
+ __le32 ReservedForFutureGrowth[80];
+};
+#define AAC_FEATURE_FALCON cpu_to_le32(0x00000010)
+#define AAC_FEATURE_JBOD cpu_to_le32(0x08000000)
+/* SupportedOptions2 */
+#define AAC_OPTION_MU_RESET cpu_to_le32(0x00000001)
+#define AAC_OPTION_IGNORE_RESET cpu_to_le32(0x00000002)
+#define AAC_OPTION_POWER_MANAGEMENT cpu_to_le32(0x00000004)
+#define AAC_OPTION_DOORBELL_RESET cpu_to_le32(0x00004000)
+/* 4KB sector size */
+#define AAC_OPTION_VARIABLE_BLOCK_SIZE cpu_to_le32(0x00040000)
+/* 240 simple volume support */
+#define AAC_OPTION_SUPPORTED_240_VOLUMES cpu_to_le32(0x10000000)
+#define AAC_SIS_VERSION_V3 3
+#define AAC_SIS_SLOT_UNKNOWN 0xFF
+
+#define GetBusInfo 0x00000009
+struct aac_bus_info {
+ __le32 Command; /* VM_Ioctl */
+ __le32 ObjType; /* FT_DRIVE */
+ __le32 MethodId; /* 1 = SCSI Layer */
+ __le32 ObjectId; /* Handle */
+ __le32 CtlCmd; /* GetBusInfo */
+};
+
+struct aac_bus_info_response {
+ __le32 Status; /* ST_OK */
+ __le32 ObjType;
+ __le32 MethodId; /* unused */
+ __le32 ObjectId; /* unused */
+ __le32 CtlCmd; /* unused */
+ __le32 ProbeComplete;
+ __le32 BusCount;
+ __le32 TargetsPerBus;
+ u8 InitiatorBusId[10];
+ u8 BusValid[10];
+};
+
+/*
+ * Battery platforms
+ */
+#define AAC_BAT_REQ_PRESENT (1)
+#define AAC_BAT_REQ_NOTPRESENT (2)
+#define AAC_BAT_OPT_PRESENT (3)
+#define AAC_BAT_OPT_NOTPRESENT (4)
+#define AAC_BAT_NOT_SUPPORTED (5)
+/*
+ * cpu types
+ */
+#define AAC_CPU_SIMULATOR (1)
+#define AAC_CPU_I960 (2)
+#define AAC_CPU_STRONGARM (3)
+
+/*
+ * Supported Options
+ */
+#define AAC_OPT_SNAPSHOT cpu_to_le32(1)
+#define AAC_OPT_CLUSTERS cpu_to_le32(1<<1)
+#define AAC_OPT_WRITE_CACHE cpu_to_le32(1<<2)
+#define AAC_OPT_64BIT_DATA cpu_to_le32(1<<3)
+#define AAC_OPT_HOST_TIME_FIB cpu_to_le32(1<<4)
+#define AAC_OPT_RAID50 cpu_to_le32(1<<5)
+#define AAC_OPT_4GB_WINDOW cpu_to_le32(1<<6)
+#define AAC_OPT_SCSI_UPGRADEABLE cpu_to_le32(1<<7)
+#define AAC_OPT_SOFT_ERR_REPORT cpu_to_le32(1<<8)
+#define AAC_OPT_SUPPORTED_RECONDITION cpu_to_le32(1<<9)
+#define AAC_OPT_SGMAP_HOST64 cpu_to_le32(1<<10)
+#define AAC_OPT_ALARM cpu_to_le32(1<<11)
+#define AAC_OPT_NONDASD cpu_to_le32(1<<12)
+#define AAC_OPT_SCSI_MANAGED cpu_to_le32(1<<13)
+#define AAC_OPT_RAID_SCSI_MODE cpu_to_le32(1<<14)
+#define AAC_OPT_SUPPLEMENT_ADAPTER_INFO cpu_to_le32(1<<16)
+#define AAC_OPT_NEW_COMM cpu_to_le32(1<<17)
+#define AAC_OPT_NEW_COMM_64 cpu_to_le32(1<<18)
+#define AAC_OPT_NEW_COMM_TYPE1 cpu_to_le32(1<<28)
+#define AAC_OPT_NEW_COMM_TYPE2 cpu_to_le32(1<<29)
+#define AAC_OPT_NEW_COMM_TYPE3 cpu_to_le32(1<<30)
+#define AAC_OPT_NEW_COMM_TYPE4 cpu_to_le32(1<<31)
+
+/* MSIX context */
+struct aac_msix_ctx {
+ int vector_no;
+ struct aac_dev *dev;
+};
+
+struct aac_dev
+{
+ struct list_head entry;
+ const char *name;
+ int id;
+
+ /*
+ * negotiated FIB settings
+ */
+ unsigned max_fib_size;
+ unsigned sg_tablesize;
+ unsigned max_num_aif;
+
+ /*
+ * Map for 128 fib objects (64k)
+ */
+ dma_addr_t hw_fib_pa;
+ struct hw_fib *hw_fib_va;
+ struct hw_fib *aif_base_va;
+ /*
+ * Fib Headers
+ */
+ struct fib *fibs;
+
+ struct fib *free_fib;
+ spinlock_t fib_lock;
+
+ struct aac_queue_block *queues;
+ /*
+ * The user API will use an IOCTL to register itself to receive
+ * FIBs from the adapter. The following list is used to keep
+ * track of all the threads that have requested these FIBs. The
+ * mutex is used to synchronize access to all data associated
+ * with the adapter fibs.
+ */
+ struct list_head fib_list;
+
+ struct adapter_ops a_ops;
+ unsigned long fsrev; /* Main driver's revision number */
+
+ resource_size_t base_start; /* main IO base */
+ resource_size_t dbg_base; /* address of UART
+ * debug buffer */
+
+ resource_size_t base_size, dbg_size; /* Size of
+ * mapped in region */
+
+ struct aac_init *init; /* Holds initialization info to communicate with adapter */
+ dma_addr_t init_pa; /* Holds physical address of the init struct */
+
+ u32 *host_rrq; /* response queue
+ * if AAC_COMM_MESSAGE_TYPE1 */
+
+ dma_addr_t host_rrq_pa; /* phys. address */
+ /* index into rrq buffer */
+ u32 host_rrq_idx[AAC_MAX_MSIX];
+ atomic_t rrq_outstanding[AAC_MAX_MSIX];
+ u32 fibs_pushed_no;
+ struct pci_dev *pdev; /* Our PCI interface */
+ void * printfbuf; /* pointer to buffer used for printf's from the adapter */
+ void * comm_addr; /* Base address of Comm area */
+ dma_addr_t comm_phys; /* Physical Address of Comm area */
+ size_t comm_size;
+
+ struct Scsi_Host *scsi_host_ptr;
+ int maximum_num_containers;
+ int maximum_num_physicals;
+ int maximum_num_channels;
+ struct fsa_dev_info *fsa_dev;
+ struct task_struct *thread;
+ int cardtype;
+
+ /*
+ * The following is the device specific extension.
+ */
+#ifndef AAC_MIN_FOOTPRINT_SIZE
+# define AAC_MIN_FOOTPRINT_SIZE 8192
+# define AAC_MIN_SRC_BAR0_SIZE 0x400000
+# define AAC_MIN_SRC_BAR1_SIZE 0x800
+# define AAC_MIN_SRCV_BAR0_SIZE 0x100000
+# define AAC_MIN_SRCV_BAR1_SIZE 0x400
+#endif
+ union
+ {
+ struct sa_registers __iomem *sa;
+ struct rx_registers __iomem *rx;
+ struct rkt_registers __iomem *rkt;
+ struct {
+ struct src_registers __iomem *bar0;
+ char __iomem *bar1;
+ } src;
+ } regs;
+ volatile void __iomem *base, *dbg_base_mapped;
+ volatile struct rx_inbound __iomem *IndexRegs;
+ u32 OIMR; /* Mask Register Cache */
+ /*
+ * AIF thread states
+ */
+ u32 aif_thread;
+ struct aac_adapter_info adapter_info;
+ struct aac_supplement_adapter_info supplement_adapter_info;
+ /* These are in adapter info but they are in the io flow so
+ * lets break them out so we don't have to do an AND to check them
+ */
+ u8 nondasd_support;
+ u8 jbod;
+ u8 cache_protected;
+ u8 dac_support;
+ u8 needs_dac;
+ u8 raid_scsi_mode;
+ u8 comm_interface;
+# define AAC_COMM_PRODUCER 0
+# define AAC_COMM_MESSAGE 1
+# define AAC_COMM_MESSAGE_TYPE1 3
+# define AAC_COMM_MESSAGE_TYPE2 4
+ u8 raw_io_interface;
+ u8 raw_io_64;
+ u8 printf_enabled;
+ u8 in_reset;
+ u8 msi;
+ int management_fib_count;
+ spinlock_t manage_lock;
+ spinlock_t sync_lock;
+ int sync_mode;
+ struct fib *sync_fib;
+ struct list_head sync_fib_list;
+ u32 doorbell_mask;
+ u32 max_msix; /* max. MSI-X vectors */
+ u32 vector_cap; /* MSI-X vector capab.*/
+ int msi_enabled; /* MSI/MSI-X enabled */
+ struct msix_entry msixentry[AAC_MAX_MSIX];
+ struct aac_msix_ctx aac_msix[AAC_MAX_MSIX]; /* context */
+ u8 adapter_shutdown;
+};
+
+#define aac_adapter_interrupt(dev) \
+ (dev)->a_ops.adapter_interrupt(dev)
+
+#define aac_adapter_notify(dev, event) \
+ (dev)->a_ops.adapter_notify(dev, event)
+
+#define aac_adapter_disable_int(dev) \
+ (dev)->a_ops.adapter_disable_int(dev)
+
+#define aac_adapter_enable_int(dev) \
+ (dev)->a_ops.adapter_enable_int(dev)
+
+#define aac_adapter_sync_cmd(dev, command, p1, p2, p3, p4, p5, p6, status, r1, r2, r3, r4) \
+ (dev)->a_ops.adapter_sync_cmd(dev, command, p1, p2, p3, p4, p5, p6, status, r1, r2, r3, r4)
+
+#define aac_adapter_check_health(dev) \
+ (dev)->a_ops.adapter_check_health(dev)
+
+#define aac_adapter_restart(dev,bled) \
+ (dev)->a_ops.adapter_restart(dev,bled)
+
+#define aac_adapter_ioremap(dev, size) \
+ (dev)->a_ops.adapter_ioremap(dev, size)
+
+#define aac_adapter_deliver(fib) \
+ ((fib)->dev)->a_ops.adapter_deliver(fib)
+
+#define aac_adapter_bounds(dev,cmd,lba) \
+ dev->a_ops.adapter_bounds(dev,cmd,lba)
+
+#define aac_adapter_read(fib,cmd,lba,count) \
+ ((fib)->dev)->a_ops.adapter_read(fib,cmd,lba,count)
+
+#define aac_adapter_write(fib,cmd,lba,count,fua) \
+ ((fib)->dev)->a_ops.adapter_write(fib,cmd,lba,count,fua)
+
+#define aac_adapter_scsi(fib,cmd) \
+ ((fib)->dev)->a_ops.adapter_scsi(fib,cmd)
+
+#define aac_adapter_comm(dev,comm) \
+ (dev)->a_ops.adapter_comm(dev, comm)
+
+#define FIB_CONTEXT_FLAG_TIMED_OUT (0x00000001)
+#define FIB_CONTEXT_FLAG (0x00000002)
+#define FIB_CONTEXT_FLAG_WAIT (0x00000004)
+#define FIB_CONTEXT_FLAG_FASTRESP (0x00000008)
+
+/*
+ * Define the command values
+ */
+
+#define Null 0
+#define GetAttributes 1
+#define SetAttributes 2
+#define Lookup 3
+#define ReadLink 4
+#define Read 5
+#define Write 6
+#define Create 7
+#define MakeDirectory 8
+#define SymbolicLink 9
+#define MakeNode 10
+#define Removex 11
+#define RemoveDirectoryx 12
+#define Rename 13
+#define Link 14
+#define ReadDirectory 15
+#define ReadDirectoryPlus 16
+#define FileSystemStatus 17
+#define FileSystemInfo 18
+#define PathConfigure 19
+#define Commit 20
+#define Mount 21
+#define UnMount 22
+#define Newfs 23
+#define FsCheck 24
+#define FsSync 25
+#define SimReadWrite 26
+#define SetFileSystemStatus 27
+#define BlockRead 28
+#define BlockWrite 29
+#define NvramIoctl 30
+#define FsSyncWait 31
+#define ClearArchiveBit 32
+#define SetAcl 33
+#define GetAcl 34
+#define AssignAcl 35
+#define FaultInsertion 36 /* Fault Insertion Command */
+#define CrazyCache 37 /* Crazycache */
+
+#define MAX_FSACOMMAND_NUM 38
+
+
+/*
+ * Define the status returns. These are very unixlike although
+ * most are not in fact used
+ */
+
+#define ST_OK 0
+#define ST_PERM 1
+#define ST_NOENT 2
+#define ST_IO 5
+#define ST_NXIO 6
+#define ST_E2BIG 7
+#define ST_ACCES 13
+#define ST_EXIST 17
+#define ST_XDEV 18
+#define ST_NODEV 19
+#define ST_NOTDIR 20
+#define ST_ISDIR 21
+#define ST_INVAL 22
+#define ST_FBIG 27
+#define ST_NOSPC 28
+#define ST_ROFS 30
+#define ST_MLINK 31
+#define ST_WOULDBLOCK 35
+#define ST_NAMETOOLONG 63
+#define ST_NOTEMPTY 66
+#define ST_DQUOT 69
+#define ST_STALE 70
+#define ST_REMOTE 71
+#define ST_NOT_READY 72
+#define ST_BADHANDLE 10001
+#define ST_NOT_SYNC 10002
+#define ST_BAD_COOKIE 10003
+#define ST_NOTSUPP 10004
+#define ST_TOOSMALL 10005
+#define ST_SERVERFAULT 10006
+#define ST_BADTYPE 10007
+#define ST_JUKEBOX 10008
+#define ST_NOTMOUNTED 10009
+#define ST_MAINTMODE 10010
+#define ST_STALEACL 10011
+
+/*
+ * On writes how does the client want the data written.
+ */
+
+#define CACHE_CSTABLE 1
+#define CACHE_UNSTABLE 2
+
+/*
+ * Lets the client know at which level the data was committed on
+ * a write request
+ */
+
+#define CMFILE_SYNCH_NVRAM 1
+#define CMDATA_SYNCH_NVRAM 2
+#define CMFILE_SYNCH 3
+#define CMDATA_SYNCH 4
+#define CMUNSTABLE 5
+
+#define RIO_TYPE_WRITE 0x0000
+#define RIO_TYPE_READ 0x0001
+#define RIO_SUREWRITE 0x0008
+
+#define RIO2_IO_TYPE 0x0003
+#define RIO2_IO_TYPE_WRITE 0x0000
+#define RIO2_IO_TYPE_READ 0x0001
+#define RIO2_IO_TYPE_VERIFY 0x0002
+#define RIO2_IO_ERROR 0x0004
+#define RIO2_IO_SUREWRITE 0x0008
+#define RIO2_SGL_CONFORMANT 0x0010
+#define RIO2_SG_FORMAT 0xF000
+#define RIO2_SG_FORMAT_ARC 0x0000
+#define RIO2_SG_FORMAT_SRL 0x1000
+#define RIO2_SG_FORMAT_IEEE1212 0x2000
+
+struct aac_read
+{
+ __le32 command;
+ __le32 cid;
+ __le32 block;
+ __le32 count;
+ struct sgmap sg; // Must be last in struct because it is variable
+};
+
+struct aac_read64
+{
+ __le32 command;
+ __le16 cid;
+ __le16 sector_count;
+ __le32 block;
+ __le16 pad;
+ __le16 flags;
+ struct sgmap64 sg; // Must be last in struct because it is variable
+};
+
+struct aac_read_reply
+{
+ __le32 status;
+ __le32 count;
+};
+
+struct aac_write
+{
+ __le32 command;
+ __le32 cid;
+ __le32 block;
+ __le32 count;
+ __le32 stable; // Not used
+ struct sgmap sg; // Must be last in struct because it is variable
+};
+
+struct aac_write64
+{
+ __le32 command;
+ __le16 cid;
+ __le16 sector_count;
+ __le32 block;
+ __le16 pad;
+ __le16 flags;
+ struct sgmap64 sg; // Must be last in struct because it is variable
+};
+struct aac_write_reply
+{
+ __le32 status;
+ __le32 count;
+ __le32 committed;
+};
+
+struct aac_raw_io
+{
+ __le32 block[2];
+ __le32 count;
+ __le16 cid;
+ __le16 flags; /* 00 W, 01 R */
+ __le16 bpTotal; /* reserved for F/W use */
+ __le16 bpComplete; /* reserved for F/W use */
+ struct sgmapraw sg;
+};
+
+struct aac_raw_io2 {
+ __le32 blockLow;
+ __le32 blockHigh;
+ __le32 byteCount;
+ __le16 cid;
+ __le16 flags; /* RIO2 flags */
+ __le32 sgeFirstSize; /* size of first sge el. */
+ __le32 sgeNominalSize; /* size of 2nd sge el. (if conformant) */
+ u8 sgeCnt; /* only 8 bits required */
+ u8 bpTotal; /* reserved for F/W use */
+ u8 bpComplete; /* reserved for F/W use */
+ u8 sgeFirstIndex; /* reserved for F/W use */
+ u8 unused[4];
+ struct sge_ieee1212 sge[1];
+};
+
+#define CT_FLUSH_CACHE 129
+struct aac_synchronize {
+ __le32 command; /* VM_ContainerConfig */
+ __le32 type; /* CT_FLUSH_CACHE */
+ __le32 cid;
+ __le32 parm1;
+ __le32 parm2;
+ __le32 parm3;
+ __le32 parm4;
+ __le32 count; /* sizeof(((struct aac_synchronize_reply *)NULL)->data) */
+};
+
+struct aac_synchronize_reply {
+ __le32 dummy0;
+ __le32 dummy1;
+ __le32 status; /* CT_OK */
+ __le32 parm1;
+ __le32 parm2;
+ __le32 parm3;
+ __le32 parm4;
+ __le32 parm5;
+ u8 data[16];
+};
+
+#define CT_POWER_MANAGEMENT 245
+#define CT_PM_START_UNIT 2
+#define CT_PM_STOP_UNIT 3
+#define CT_PM_UNIT_IMMEDIATE 1
+struct aac_power_management {
+ __le32 command; /* VM_ContainerConfig */
+ __le32 type; /* CT_POWER_MANAGEMENT */
+ __le32 sub; /* CT_PM_* */
+ __le32 cid;
+ __le32 parm; /* CT_PM_sub_* */
+};
+
+#define CT_PAUSE_IO 65
+#define CT_RELEASE_IO 66
+struct aac_pause {
+ __le32 command; /* VM_ContainerConfig */
+ __le32 type; /* CT_PAUSE_IO */
+ __le32 timeout; /* 10ms ticks */
+ __le32 min;
+ __le32 noRescan;
+ __le32 parm3;
+ __le32 parm4;
+ __le32 count; /* sizeof(((struct aac_pause_reply *)NULL)->data) */
+};
+
+struct aac_srb
+{
+ __le32 function;
+ __le32 channel;
+ __le32 id;
+ __le32 lun;
+ __le32 timeout;
+ __le32 flags;
+ __le32 count; // Data xfer size
+ __le32 retry_limit;
+ __le32 cdb_size;
+ u8 cdb[16];
+ struct sgmap sg;
+};
+
+/*
+ * This and associated data structs are used by the
+ * ioctl caller and are in cpu order.
+ */
+struct user_aac_srb
+{
+ u32 function;
+ u32 channel;
+ u32 id;
+ u32 lun;
+ u32 timeout;
+ u32 flags;
+ u32 count; // Data xfer size
+ u32 retry_limit;
+ u32 cdb_size;
+ u8 cdb[16];
+ struct user_sgmap sg;
+};
+
+#define AAC_SENSE_BUFFERSIZE 30
+
+struct aac_srb_reply
+{
+ __le32 status;
+ __le32 srb_status;
+ __le32 scsi_status;
+ __le32 data_xfer_length;
+ __le32 sense_data_size;
+ u8 sense_data[AAC_SENSE_BUFFERSIZE]; // Can this be SCSI_SENSE_BUFFERSIZE
+};
+/*
+ * SRB Flags
+ */
+#define SRB_NoDataXfer 0x0000
+#define SRB_DisableDisconnect 0x0004
+#define SRB_DisableSynchTransfer 0x0008
+#define SRB_BypassFrozenQueue 0x0010
+#define SRB_DisableAutosense 0x0020
+#define SRB_DataIn 0x0040
+#define SRB_DataOut 0x0080
+
+/*
+ * SRB Functions - set in aac_srb->function
+ */
+#define SRBF_ExecuteScsi 0x0000
+#define SRBF_ClaimDevice 0x0001
+#define SRBF_IO_Control 0x0002
+#define SRBF_ReceiveEvent 0x0003
+#define SRBF_ReleaseQueue 0x0004
+#define SRBF_AttachDevice 0x0005
+#define SRBF_ReleaseDevice 0x0006
+#define SRBF_Shutdown 0x0007
+#define SRBF_Flush 0x0008
+#define SRBF_AbortCommand 0x0010
+#define SRBF_ReleaseRecovery 0x0011
+#define SRBF_ResetBus 0x0012
+#define SRBF_ResetDevice 0x0013
+#define SRBF_TerminateIO 0x0014
+#define SRBF_FlushQueue 0x0015
+#define SRBF_RemoveDevice 0x0016
+#define SRBF_DomainValidation 0x0017
+
+/*
+ * SRB SCSI Status - set in aac_srb->scsi_status
+ */
+#define SRB_STATUS_PENDING 0x00
+#define SRB_STATUS_SUCCESS 0x01
+#define SRB_STATUS_ABORTED 0x02
+#define SRB_STATUS_ABORT_FAILED 0x03
+#define SRB_STATUS_ERROR 0x04
+#define SRB_STATUS_BUSY 0x05
+#define SRB_STATUS_INVALID_REQUEST 0x06
+#define SRB_STATUS_INVALID_PATH_ID 0x07
+#define SRB_STATUS_NO_DEVICE 0x08
+#define SRB_STATUS_TIMEOUT 0x09
+#define SRB_STATUS_SELECTION_TIMEOUT 0x0A
+#define SRB_STATUS_COMMAND_TIMEOUT 0x0B
+#define SRB_STATUS_MESSAGE_REJECTED 0x0D
+#define SRB_STATUS_BUS_RESET 0x0E
+#define SRB_STATUS_PARITY_ERROR 0x0F
+#define SRB_STATUS_REQUEST_SENSE_FAILED 0x10
+#define SRB_STATUS_NO_HBA 0x11
+#define SRB_STATUS_DATA_OVERRUN 0x12
+#define SRB_STATUS_UNEXPECTED_BUS_FREE 0x13
+#define SRB_STATUS_PHASE_SEQUENCE_FAILURE 0x14
+#define SRB_STATUS_BAD_SRB_BLOCK_LENGTH 0x15
+#define SRB_STATUS_REQUEST_FLUSHED 0x16
+#define SRB_STATUS_DELAYED_RETRY 0x17
+#define SRB_STATUS_INVALID_LUN 0x20
+#define SRB_STATUS_INVALID_TARGET_ID 0x21
+#define SRB_STATUS_BAD_FUNCTION 0x22
+#define SRB_STATUS_ERROR_RECOVERY 0x23
+#define SRB_STATUS_NOT_STARTED 0x24
+#define SRB_STATUS_NOT_IN_USE 0x30
+#define SRB_STATUS_FORCE_ABORT 0x31
+#define SRB_STATUS_DOMAIN_VALIDATION_FAIL 0x32
+
+/*
+ * Object-Server / Volume-Manager Dispatch Classes
+ */
+
+#define VM_Null 0
+#define VM_NameServe 1
+#define VM_ContainerConfig 2
+#define VM_Ioctl 3
+#define VM_FilesystemIoctl 4
+#define VM_CloseAll 5
+#define VM_CtBlockRead 6
+#define VM_CtBlockWrite 7
+#define VM_SliceBlockRead 8 /* raw access to configured "storage objects" */
+#define VM_SliceBlockWrite 9
+#define VM_DriveBlockRead 10 /* raw access to physical devices */
+#define VM_DriveBlockWrite 11
+#define VM_EnclosureMgt 12 /* enclosure management */
+#define VM_Unused 13 /* used to be diskset management */
+#define VM_CtBlockVerify 14
+#define VM_CtPerf 15 /* performance test */
+#define VM_CtBlockRead64 16
+#define VM_CtBlockWrite64 17
+#define VM_CtBlockVerify64 18
+#define VM_CtHostRead64 19
+#define VM_CtHostWrite64 20
+#define VM_DrvErrTblLog 21
+#define VM_NameServe64 22
+#define VM_NameServeAllBlk 30
+
+#define MAX_VMCOMMAND_NUM 23 /* used for sizing stats array - leave last */
+
+/*
+ * Descriptive information (eg, vital stats)
+ * that a content manager might report. The
+ * FileArray filesystem component is one example
+ * of a content manager. Raw mode might be
+ * another.
+ */
+
+struct aac_fsinfo {
+ __le32 fsTotalSize; /* Consumed by fs, incl. metadata */
+ __le32 fsBlockSize;
+ __le32 fsFragSize;
+ __le32 fsMaxExtendSize;
+ __le32 fsSpaceUnits;
+ __le32 fsMaxNumFiles;
+ __le32 fsNumFreeFiles;
+ __le32 fsInodeDensity;
+}; /* valid iff ObjType == FT_FILESYS && !(ContentState & FSCS_NOTCLEAN) */
+
+struct aac_blockdevinfo {
+ __le32 block_size;
+};
+
+union aac_contentinfo {
+ struct aac_fsinfo filesys;
+ struct aac_blockdevinfo bdevinfo;
+};
+
+/*
+ * Query for Container Configuration Status
+ */
+
+#define CT_GET_CONFIG_STATUS 147
+struct aac_get_config_status {
+ __le32 command; /* VM_ContainerConfig */
+ __le32 type; /* CT_GET_CONFIG_STATUS */
+ __le32 parm1;
+ __le32 parm2;
+ __le32 parm3;
+ __le32 parm4;
+ __le32 parm5;
+ __le32 count; /* sizeof(((struct aac_get_config_status_resp *)NULL)->data) */
+};
+
+#define CFACT_CONTINUE 0
+#define CFACT_PAUSE 1
+#define CFACT_ABORT 2
+struct aac_get_config_status_resp {
+ __le32 response; /* ST_OK */
+ __le32 dummy0;
+ __le32 status; /* CT_OK */
+ __le32 parm1;
+ __le32 parm2;
+ __le32 parm3;
+ __le32 parm4;
+ __le32 parm5;
+ struct {
+ __le32 action; /* CFACT_CONTINUE, CFACT_PAUSE or CFACT_ABORT */
+ __le16 flags;
+ __le16 count;
+ } data;
+};
+
+/*
+ * Accept the configuration as-is
+ */
+
+#define CT_COMMIT_CONFIG 152
+
+struct aac_commit_config {
+ __le32 command; /* VM_ContainerConfig */
+ __le32 type; /* CT_COMMIT_CONFIG */
+};
+
+/*
+ * Query for Container Configuration Status
+ */
+
+#define CT_GET_CONTAINER_COUNT 4
+struct aac_get_container_count {
+ __le32 command; /* VM_ContainerConfig */
+ __le32 type; /* CT_GET_CONTAINER_COUNT */
+};
+
+struct aac_get_container_count_resp {
+ __le32 response; /* ST_OK */
+ __le32 dummy0;
+ __le32 MaxContainers;
+ __le32 ContainerSwitchEntries;
+ __le32 MaxPartitions;
+ __le32 MaxSimpleVolumes;
+};
+
+
+/*
+ * Query for "mountable" objects, ie, objects that are typically
+ * associated with a drive letter on the client (host) side.
+ */
+
+struct aac_mntent {
+ __le32 oid;
+ u8 name[16]; /* if applicable */
+ struct creation_info create_info; /* if applicable */
+ __le32 capacity;
+ __le32 vol; /* substrate structure */
+ __le32 obj; /* FT_FILESYS, etc. */
+ __le32 state; /* unready for mounting,
+ readonly, etc. */
+ union aac_contentinfo fileinfo; /* Info specific to content
+ manager (eg, filesystem) */
+ __le32 altoid; /* != oid <==> snapshot or
+ broken mirror exists */
+ __le32 capacityhigh;
+};
+
+#define FSCS_NOTCLEAN 0x0001 /* fsck is necessary before mounting */
+#define FSCS_READONLY 0x0002 /* possible result of broken mirror */
+#define FSCS_HIDDEN 0x0004 /* should be ignored - set during a clear */
+#define FSCS_NOT_READY 0x0008 /* Array spinning up to fulfil request */
+
+struct aac_query_mount {
+ __le32 command;
+ __le32 type;
+ __le32 count;
+};
+
+struct aac_mount {
+ __le32 status;
+ __le32 type; /* should be same as that requested */
+ __le32 count;
+ struct aac_mntent mnt[1];
+};
+
+#define CT_READ_NAME 130
+struct aac_get_name {
+ __le32 command; /* VM_ContainerConfig */
+ __le32 type; /* CT_READ_NAME */
+ __le32 cid;
+ __le32 parm1;
+ __le32 parm2;
+ __le32 parm3;
+ __le32 parm4;
+ __le32 count; /* sizeof(((struct aac_get_name_resp *)NULL)->data) */
+};
+
+struct aac_get_name_resp {
+ __le32 dummy0;
+ __le32 dummy1;
+ __le32 status; /* CT_OK */
+ __le32 parm1;
+ __le32 parm2;
+ __le32 parm3;
+ __le32 parm4;
+ __le32 parm5;
+ u8 data[16];
+};
+
+#define CT_CID_TO_32BITS_UID 165
+struct aac_get_serial {
+ __le32 command; /* VM_ContainerConfig */
+ __le32 type; /* CT_CID_TO_32BITS_UID */
+ __le32 cid;
+};
+
+struct aac_get_serial_resp {
+ __le32 dummy0;
+ __le32 dummy1;
+ __le32 status; /* CT_OK */
+ __le32 uid;
+};
+
+/*
+ * The following command is sent to shut down each container.
+ */
+
+struct aac_close {
+ __le32 command;
+ __le32 cid;
+};
+
+struct aac_query_disk
+{
+ s32 cnum;
+ s32 bus;
+ s32 id;
+ s32 lun;
+ u32 valid;
+ u32 locked;
+ u32 deleted;
+ s32 instance;
+ s8 name[10];
+ u32 unmapped;
+};
+
+struct aac_delete_disk {
+ u32 disknum;
+ u32 cnum;
+};
+
+struct fib_ioctl
+{
+ u32 fibctx;
+ s32 wait;
+ char __user *fib;
+};
+
+struct revision
+{
+ u32 compat;
+ __le32 version;
+ __le32 build;
+};
+
+
+/*
+ * Ugly - non Linux like ioctl coding for back compat.
+ */
+
+#define CTL_CODE(function, method) ( \
+ (4<< 16) | ((function) << 2) | (method) \
+)
+
+/*
+ * Define the method codes for how buffers are passed for I/O and FS
+ * controls
+ */
+
+#define METHOD_BUFFERED 0
+#define METHOD_NEITHER 3
+
+/*
+ * Filesystem ioctls
+ */
+
+#define FSACTL_SENDFIB CTL_CODE(2050, METHOD_BUFFERED)
+#define FSACTL_SEND_RAW_SRB CTL_CODE(2067, METHOD_BUFFERED)
+#define FSACTL_DELETE_DISK 0x163
+#define FSACTL_QUERY_DISK 0x173
+#define FSACTL_OPEN_GET_ADAPTER_FIB CTL_CODE(2100, METHOD_BUFFERED)
+#define FSACTL_GET_NEXT_ADAPTER_FIB CTL_CODE(2101, METHOD_BUFFERED)
+#define FSACTL_CLOSE_GET_ADAPTER_FIB CTL_CODE(2102, METHOD_BUFFERED)
+#define FSACTL_MINIPORT_REV_CHECK CTL_CODE(2107, METHOD_BUFFERED)
+#define FSACTL_GET_PCI_INFO CTL_CODE(2119, METHOD_BUFFERED)
+#define FSACTL_FORCE_DELETE_DISK CTL_CODE(2120, METHOD_NEITHER)
+#define FSACTL_GET_CONTAINERS 2131
+#define FSACTL_SEND_LARGE_FIB CTL_CODE(2138, METHOD_BUFFERED)
+
+
+struct aac_common
+{
+ /*
+ * If this value is set to 1 then interrupt moderation will occur
+ * in the base commuication support.
+ */
+ u32 irq_mod;
+ u32 peak_fibs;
+ u32 zero_fibs;
+ u32 fib_timeouts;
+ /*
+ * Statistical counters in debug mode
+ */
+#ifdef DBG
+ u32 FibsSent;
+ u32 FibRecved;
+ u32 NoResponseSent;
+ u32 NoResponseRecved;
+ u32 AsyncSent;
+ u32 AsyncRecved;
+ u32 NormalSent;
+ u32 NormalRecved;
+#endif
+};
+
+extern struct aac_common aac_config;
+
+
+/*
+ * The following macro is used when sending and receiving FIBs. It is
+ * only used for debugging.
+ */
+
+#ifdef DBG
+#define FIB_COUNTER_INCREMENT(counter) (counter)++
+#else
+#define FIB_COUNTER_INCREMENT(counter)
+#endif
+
+/*
+ * Adapter direct commands
+ * Monitor/Kernel API
+ */
+
+#define BREAKPOINT_REQUEST 0x00000004
+#define INIT_STRUCT_BASE_ADDRESS 0x00000005
+#define READ_PERMANENT_PARAMETERS 0x0000000a
+#define WRITE_PERMANENT_PARAMETERS 0x0000000b
+#define HOST_CRASHING 0x0000000d
+#define SEND_SYNCHRONOUS_FIB 0x0000000c
+#define COMMAND_POST_RESULTS 0x00000014
+#define GET_ADAPTER_PROPERTIES 0x00000019
+#define GET_DRIVER_BUFFER_PROPERTIES 0x00000023
+#define RCV_TEMP_READINGS 0x00000025
+#define GET_COMM_PREFERRED_SETTINGS 0x00000026
+#define IOP_RESET 0x00001000
+#define IOP_RESET_ALWAYS 0x00001001
+#define RE_INIT_ADAPTER 0x000000ee
+
+/*
+ * Adapter Status Register
+ *
+ * Phase Staus mailbox is 32bits:
+ * <31:16> = Phase Status
+ * <15:0> = Phase
+ *
+ * The adapter reports is present state through the phase. Only
+ * a single phase should be ever be set. Each phase can have multiple
+ * phase status bits to provide more detailed information about the
+ * state of the board. Care should be taken to ensure that any phase
+ * status bits that are set when changing the phase are also valid
+ * for the new phase or be cleared out. Adapter software (monitor,
+ * iflash, kernel) is responsible for properly maintining the phase
+ * status mailbox when it is running.
+ *
+ * MONKER_API Phases
+ *
+ * Phases are bit oriented. It is NOT valid to have multiple bits set
+ */
+
+#define SELF_TEST_FAILED 0x00000004
+#define MONITOR_PANIC 0x00000020
+#define KERNEL_UP_AND_RUNNING 0x00000080
+#define KERNEL_PANIC 0x00000100
+#define FLASH_UPD_PENDING 0x00002000
+#define FLASH_UPD_SUCCESS 0x00004000
+#define FLASH_UPD_FAILED 0x00008000
+#define FWUPD_TIMEOUT (5 * 60)
+
+/*
+ * Doorbell bit defines
+ */
+
+#define DoorBellSyncCmdAvailable (1<<0) /* Host -> Adapter */
+#define DoorBellPrintfDone (1<<5) /* Host -> Adapter */
+#define DoorBellAdapterNormCmdReady (1<<1) /* Adapter -> Host */
+#define DoorBellAdapterNormRespReady (1<<2) /* Adapter -> Host */
+#define DoorBellAdapterNormCmdNotFull (1<<3) /* Adapter -> Host */
+#define DoorBellAdapterNormRespNotFull (1<<4) /* Adapter -> Host */
+#define DoorBellPrintfReady (1<<5) /* Adapter -> Host */
+#define DoorBellAifPending (1<<6) /* Adapter -> Host */
+
+/* PMC specific outbound doorbell bits */
+#define PmDoorBellResponseSent (1<<1) /* Adapter -> Host */
+
+/*
+ * For FIB communication, we need all of the following things
+ * to send back to the user.
+ */
+
+#define AifCmdEventNotify 1 /* Notify of event */
+#define AifEnConfigChange 3 /* Adapter configuration change */
+#define AifEnContainerChange 4 /* Container configuration change */
+#define AifEnDeviceFailure 5 /* SCSI device failed */
+#define AifEnEnclosureManagement 13 /* EM_DRIVE_* */
+#define EM_DRIVE_INSERTION 31
+#define EM_DRIVE_REMOVAL 32
+#define EM_SES_DRIVE_INSERTION 33
+#define EM_SES_DRIVE_REMOVAL 26
+#define AifEnBatteryEvent 14 /* Change in Battery State */
+#define AifEnAddContainer 15 /* A new array was created */
+#define AifEnDeleteContainer 16 /* A container was deleted */
+#define AifEnExpEvent 23 /* Firmware Event Log */
+#define AifExeFirmwarePanic 3 /* Firmware Event Panic */
+#define AifHighPriority 3 /* Highest Priority Event */
+#define AifEnAddJBOD 30 /* JBOD created */
+#define AifEnDeleteJBOD 31 /* JBOD deleted */
+
+#define AifCmdJobProgress 2 /* Progress report */
+#define AifJobCtrZero 101 /* Array Zero progress */
+#define AifJobStsSuccess 1 /* Job completes */
+#define AifJobStsRunning 102 /* Job running */
+#define AifCmdAPIReport 3 /* Report from other user of API */
+#define AifCmdDriverNotify 4 /* Notify host driver of event */
+#define AifDenMorphComplete 200 /* A morph operation completed */
+#define AifDenVolumeExtendComplete 201 /* A volume extend completed */
+#define AifReqJobList 100 /* Gets back complete job list */
+#define AifReqJobsForCtr 101 /* Gets back jobs for specific container */
+#define AifReqJobsForScsi 102 /* Gets back jobs for specific SCSI device */
+#define AifReqJobReport 103 /* Gets back a specific job report or list of them */
+#define AifReqTerminateJob 104 /* Terminates job */
+#define AifReqSuspendJob 105 /* Suspends a job */
+#define AifReqResumeJob 106 /* Resumes a job */
+#define AifReqSendAPIReport 107 /* API generic report requests */
+#define AifReqAPIJobStart 108 /* Start a job from the API */
+#define AifReqAPIJobUpdate 109 /* Update a job report from the API */
+#define AifReqAPIJobFinish 110 /* Finish a job from the API */
+
+/* PMC NEW COMM: Request the event data */
+#define AifReqEvent 200
+
+/* RAW device deleted */
+#define AifRawDeviceRemove 203
+
+/*
+ * Adapter Initiated FIB command structures. Start with the adapter
+ * initiated FIBs that really come from the adapter, and get responded
+ * to by the host.
+ */
+
+struct aac_aifcmd {
+ __le32 command; /* Tell host what type of notify this is */
+ __le32 seqnum; /* To allow ordering of reports (if necessary) */
+ u8 data[1]; /* Undefined length (from kernel viewpoint) */
+};
+
+/**
+ * Convert capacity to cylinders
+ * accounting for the fact capacity could be a 64 bit value
+ *
+ */
+static inline unsigned int cap_to_cyls(sector_t capacity, unsigned divisor)
+{
+ sector_div(capacity, divisor);
+ return capacity;
+}
+
+/* SCp.phase values */
+#define AAC_OWNER_MIDLEVEL 0x101
+#define AAC_OWNER_LOWLEVEL 0x102
+#define AAC_OWNER_ERROR_HANDLER 0x103
+#define AAC_OWNER_FIRMWARE 0x106
+
+const char *aac_driverinfo(struct Scsi_Host *);
+struct fib *aac_fib_alloc(struct aac_dev *dev);
+int aac_fib_setup(struct aac_dev *dev);
+void aac_fib_map_free(struct aac_dev *dev);
+void aac_fib_free(struct fib * context);
+void aac_fib_init(struct fib * context);
+void aac_printf(struct aac_dev *dev, u32 val);
+int aac_fib_send(u16 command, struct fib * context, unsigned long size, int priority, int wait, int reply, fib_callback callback, void *ctxt);
+int aac_consumer_get(struct aac_dev * dev, struct aac_queue * q, struct aac_entry **entry);
+void aac_consumer_free(struct aac_dev * dev, struct aac_queue * q, u32 qnum);
+int aac_fib_complete(struct fib * context);
+#define fib_data(fibctx) ((void *)(fibctx)->hw_fib_va->data)
+struct aac_dev *aac_init_adapter(struct aac_dev *dev);
+void aac_src_access_devreg(struct aac_dev *dev, int mode);
+int aac_get_config_status(struct aac_dev *dev, int commit_flag);
+int aac_get_containers(struct aac_dev *dev);
+int aac_scsi_cmd(struct scsi_cmnd *cmd);
+int aac_dev_ioctl(struct aac_dev *dev, int cmd, void __user *arg);
+#ifndef shost_to_class
+#define shost_to_class(shost) &shost->shost_dev
+#endif
+ssize_t aac_get_serial_number(struct device *dev, char *buf);
+int aac_do_ioctl(struct aac_dev * dev, int cmd, void __user *arg);
+int aac_rx_init(struct aac_dev *dev);
+int aac_rkt_init(struct aac_dev *dev);
+int aac_nark_init(struct aac_dev *dev);
+int aac_sa_init(struct aac_dev *dev);
+int aac_src_init(struct aac_dev *dev);
+int aac_srcv_init(struct aac_dev *dev);
+int aac_queue_get(struct aac_dev * dev, u32 * index, u32 qid, struct hw_fib * hw_fib, int wait, struct fib * fibptr, unsigned long *nonotify);
+unsigned int aac_response_normal(struct aac_queue * q);
+unsigned int aac_command_normal(struct aac_queue * q);
+unsigned int aac_intr_normal(struct aac_dev *dev, u32 Index,
+ int isAif, int isFastResponse,
+ struct hw_fib *aif_fib);
+int aac_reset_adapter(struct aac_dev * dev, int forced);
+int aac_check_health(struct aac_dev * dev);
+int aac_command_thread(void *data);
+int aac_close_fib_context(struct aac_dev * dev, struct aac_fib_context *fibctx);
+int aac_fib_adapter_complete(struct fib * fibptr, unsigned short size);
+struct aac_driver_ident* aac_get_driver_ident(int devtype);
+int aac_get_adapter_info(struct aac_dev* dev);
+int aac_send_shutdown(struct aac_dev *dev);
+int aac_probe_container(struct aac_dev *dev, int cid);
+int _aac_rx_init(struct aac_dev *dev);
+int aac_rx_select_comm(struct aac_dev *dev, int comm);
+int aac_rx_deliver_producer(struct fib * fib);
+char * get_container_type(unsigned type);
+extern int numacb;
+extern int acbsize;
+extern char aac_driver_version[];
+extern int startup_timeout;
+extern int aif_timeout;
+extern int expose_physicals;
+extern int aac_reset_devices;
+extern int aac_msi;
+extern int aac_commit;
+extern int update_interval;
+extern int check_interval;
+extern int aac_check_reset;
diff --git a/drivers/scsi/aacraid/commctrl.c b/drivers/scsi/aacraid/commctrl.c
new file mode 100644
index 000000000..54195a117
--- /dev/null
+++ b/drivers/scsi/aacraid/commctrl.c
@@ -0,0 +1,895 @@
+/*
+ * Adaptec AAC series RAID controller driver
+ * (c) Copyright 2001 Red Hat Inc.
+ *
+ * based on the old aacraid driver that is..
+ * Adaptec aacraid device driver for Linux.
+ *
+ * Copyright (c) 2000-2010 Adaptec, Inc.
+ * 2010 PMC-Sierra, Inc. (aacraid@pmc-sierra.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2, or (at your option)
+ * any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; see the file COPYING. If not, write to
+ * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
+ *
+ * Module Name:
+ * commctrl.c
+ *
+ * Abstract: Contains all routines for control of the AFA comm layer
+ *
+ */
+
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/types.h>
+#include <linux/pci.h>
+#include <linux/spinlock.h>
+#include <linux/slab.h>
+#include <linux/completion.h>
+#include <linux/dma-mapping.h>
+#include <linux/blkdev.h>
+#include <linux/delay.h> /* ssleep prototype */
+#include <linux/kthread.h>
+#include <linux/semaphore.h>
+#include <asm/uaccess.h>
+#include <scsi/scsi_host.h>
+
+#include "aacraid.h"
+
+/**
+ * ioctl_send_fib - send a FIB from userspace
+ * @dev: adapter is being processed
+ * @arg: arguments to the ioctl call
+ *
+ * This routine sends a fib to the adapter on behalf of a user level
+ * program.
+ */
+# define AAC_DEBUG_PREAMBLE KERN_INFO
+# define AAC_DEBUG_POSTAMBLE
+
+static int ioctl_send_fib(struct aac_dev * dev, void __user *arg)
+{
+ struct hw_fib * kfib;
+ struct fib *fibptr;
+ struct hw_fib * hw_fib = (struct hw_fib *)0;
+ dma_addr_t hw_fib_pa = (dma_addr_t)0LL;
+ unsigned size;
+ int retval;
+
+ if (dev->in_reset) {
+ return -EBUSY;
+ }
+ fibptr = aac_fib_alloc(dev);
+ if(fibptr == NULL) {
+ return -ENOMEM;
+ }
+
+ kfib = fibptr->hw_fib_va;
+ /*
+ * First copy in the header so that we can check the size field.
+ */
+ if (copy_from_user((void *)kfib, arg, sizeof(struct aac_fibhdr))) {
+ aac_fib_free(fibptr);
+ return -EFAULT;
+ }
+ /*
+ * Since we copy based on the fib header size, make sure that we
+ * will not overrun the buffer when we copy the memory. Return
+ * an error if we would.
+ */
+ size = le16_to_cpu(kfib->header.Size) + sizeof(struct aac_fibhdr);
+ if (size < le16_to_cpu(kfib->header.SenderSize))
+ size = le16_to_cpu(kfib->header.SenderSize);
+ if (size > dev->max_fib_size) {
+ dma_addr_t daddr;
+
+ if (size > 2048) {
+ retval = -EINVAL;
+ goto cleanup;
+ }
+
+ kfib = pci_alloc_consistent(dev->pdev, size, &daddr);
+ if (!kfib) {
+ retval = -ENOMEM;
+ goto cleanup;
+ }
+
+ /* Highjack the hw_fib */
+ hw_fib = fibptr->hw_fib_va;
+ hw_fib_pa = fibptr->hw_fib_pa;
+ fibptr->hw_fib_va = kfib;
+ fibptr->hw_fib_pa = daddr;
+ memset(((char *)kfib) + dev->max_fib_size, 0, size - dev->max_fib_size);
+ memcpy(kfib, hw_fib, dev->max_fib_size);
+ }
+
+ if (copy_from_user(kfib, arg, size)) {
+ retval = -EFAULT;
+ goto cleanup;
+ }
+
+ if (kfib->header.Command == cpu_to_le16(TakeABreakPt)) {
+ aac_adapter_interrupt(dev);
+ /*
+ * Since we didn't really send a fib, zero out the state to allow
+ * cleanup code not to assert.
+ */
+ kfib->header.XferState = 0;
+ } else {
+ retval = aac_fib_send(le16_to_cpu(kfib->header.Command), fibptr,
+ le16_to_cpu(kfib->header.Size) , FsaNormal,
+ 1, 1, NULL, NULL);
+ if (retval) {
+ goto cleanup;
+ }
+ if (aac_fib_complete(fibptr) != 0) {
+ retval = -EINVAL;
+ goto cleanup;
+ }
+ }
+ /*
+ * Make sure that the size returned by the adapter (which includes
+ * the header) is less than or equal to the size of a fib, so we
+ * don't corrupt application data. Then copy that size to the user
+ * buffer. (Don't try to add the header information again, since it
+ * was already included by the adapter.)
+ */
+
+ retval = 0;
+ if (copy_to_user(arg, (void *)kfib, size))
+ retval = -EFAULT;
+cleanup:
+ if (hw_fib) {
+ pci_free_consistent(dev->pdev, size, kfib, fibptr->hw_fib_pa);
+ fibptr->hw_fib_pa = hw_fib_pa;
+ fibptr->hw_fib_va = hw_fib;
+ }
+ if (retval != -ERESTARTSYS)
+ aac_fib_free(fibptr);
+ return retval;
+}
+
+/**
+ * open_getadapter_fib - Get the next fib
+ *
+ * This routine will get the next Fib, if available, from the AdapterFibContext
+ * passed in from the user.
+ */
+
+static int open_getadapter_fib(struct aac_dev * dev, void __user *arg)
+{
+ struct aac_fib_context * fibctx;
+ int status;
+
+ fibctx = kmalloc(sizeof(struct aac_fib_context), GFP_KERNEL);
+ if (fibctx == NULL) {
+ status = -ENOMEM;
+ } else {
+ unsigned long flags;
+ struct list_head * entry;
+ struct aac_fib_context * context;
+
+ fibctx->type = FSAFS_NTC_GET_ADAPTER_FIB_CONTEXT;
+ fibctx->size = sizeof(struct aac_fib_context);
+ /*
+ * Yes yes, I know this could be an index, but we have a
+ * better guarantee of uniqueness for the locked loop below.
+ * Without the aid of a persistent history, this also helps
+ * reduce the chance that the opaque context would be reused.
+ */
+ fibctx->unique = (u32)((ulong)fibctx & 0xFFFFFFFF);
+ /*
+ * Initialize the mutex used to wait for the next AIF.
+ */
+ sema_init(&fibctx->wait_sem, 0);
+ fibctx->wait = 0;
+ /*
+ * Initialize the fibs and set the count of fibs on
+ * the list to 0.
+ */
+ fibctx->count = 0;
+ INIT_LIST_HEAD(&fibctx->fib_list);
+ fibctx->jiffies = jiffies/HZ;
+ /*
+ * Now add this context onto the adapter's
+ * AdapterFibContext list.
+ */
+ spin_lock_irqsave(&dev->fib_lock, flags);
+ /* Ensure that we have a unique identifier */
+ entry = dev->fib_list.next;
+ while (entry != &dev->fib_list) {
+ context = list_entry(entry, struct aac_fib_context, next);
+ if (context->unique == fibctx->unique) {
+ /* Not unique (32 bits) */
+ fibctx->unique++;
+ entry = dev->fib_list.next;
+ } else {
+ entry = entry->next;
+ }
+ }
+ list_add_tail(&fibctx->next, &dev->fib_list);
+ spin_unlock_irqrestore(&dev->fib_lock, flags);
+ if (copy_to_user(arg, &fibctx->unique,
+ sizeof(fibctx->unique))) {
+ status = -EFAULT;
+ } else {
+ status = 0;
+ }
+ }
+ return status;
+}
+
+/**
+ * next_getadapter_fib - get the next fib
+ * @dev: adapter to use
+ * @arg: ioctl argument
+ *
+ * This routine will get the next Fib, if available, from the AdapterFibContext
+ * passed in from the user.
+ */
+
+static int next_getadapter_fib(struct aac_dev * dev, void __user *arg)
+{
+ struct fib_ioctl f;
+ struct fib *fib;
+ struct aac_fib_context *fibctx;
+ int status;
+ struct list_head * entry;
+ unsigned long flags;
+
+ if(copy_from_user((void *)&f, arg, sizeof(struct fib_ioctl)))
+ return -EFAULT;
+ /*
+ * Verify that the HANDLE passed in was a valid AdapterFibContext
+ *
+ * Search the list of AdapterFibContext addresses on the adapter
+ * to be sure this is a valid address
+ */
+ spin_lock_irqsave(&dev->fib_lock, flags);
+ entry = dev->fib_list.next;
+ fibctx = NULL;
+
+ while (entry != &dev->fib_list) {
+ fibctx = list_entry(entry, struct aac_fib_context, next);
+ /*
+ * Extract the AdapterFibContext from the Input parameters.
+ */
+ if (fibctx->unique == f.fibctx) { /* We found a winner */
+ break;
+ }
+ entry = entry->next;
+ fibctx = NULL;
+ }
+ if (!fibctx) {
+ spin_unlock_irqrestore(&dev->fib_lock, flags);
+ dprintk ((KERN_INFO "Fib Context not found\n"));
+ return -EINVAL;
+ }
+
+ if((fibctx->type != FSAFS_NTC_GET_ADAPTER_FIB_CONTEXT) ||
+ (fibctx->size != sizeof(struct aac_fib_context))) {
+ spin_unlock_irqrestore(&dev->fib_lock, flags);
+ dprintk ((KERN_INFO "Fib Context corrupt?\n"));
+ return -EINVAL;
+ }
+ status = 0;
+ /*
+ * If there are no fibs to send back, then either wait or return
+ * -EAGAIN
+ */
+return_fib:
+ if (!list_empty(&fibctx->fib_list)) {
+ /*
+ * Pull the next fib from the fibs
+ */
+ entry = fibctx->fib_list.next;
+ list_del(entry);
+
+ fib = list_entry(entry, struct fib, fiblink);
+ fibctx->count--;
+ spin_unlock_irqrestore(&dev->fib_lock, flags);
+ if (copy_to_user(f.fib, fib->hw_fib_va, sizeof(struct hw_fib))) {
+ kfree(fib->hw_fib_va);
+ kfree(fib);
+ return -EFAULT;
+ }
+ /*
+ * Free the space occupied by this copy of the fib.
+ */
+ kfree(fib->hw_fib_va);
+ kfree(fib);
+ status = 0;
+ } else {
+ spin_unlock_irqrestore(&dev->fib_lock, flags);
+ /* If someone killed the AIF aacraid thread, restart it */
+ status = !dev->aif_thread;
+ if (status && !dev->in_reset && dev->queues && dev->fsa_dev) {
+ /* Be paranoid, be very paranoid! */
+ kthread_stop(dev->thread);
+ ssleep(1);
+ dev->aif_thread = 0;
+ dev->thread = kthread_run(aac_command_thread, dev,
+ "%s", dev->name);
+ ssleep(1);
+ }
+ if (f.wait) {
+ if(down_interruptible(&fibctx->wait_sem) < 0) {
+ status = -ERESTARTSYS;
+ } else {
+ /* Lock again and retry */
+ spin_lock_irqsave(&dev->fib_lock, flags);
+ goto return_fib;
+ }
+ } else {
+ status = -EAGAIN;
+ }
+ }
+ fibctx->jiffies = jiffies/HZ;
+ return status;
+}
+
+int aac_close_fib_context(struct aac_dev * dev, struct aac_fib_context * fibctx)
+{
+ struct fib *fib;
+
+ /*
+ * First free any FIBs that have not been consumed.
+ */
+ while (!list_empty(&fibctx->fib_list)) {
+ struct list_head * entry;
+ /*
+ * Pull the next fib from the fibs
+ */
+ entry = fibctx->fib_list.next;
+ list_del(entry);
+ fib = list_entry(entry, struct fib, fiblink);
+ fibctx->count--;
+ /*
+ * Free the space occupied by this copy of the fib.
+ */
+ kfree(fib->hw_fib_va);
+ kfree(fib);
+ }
+ /*
+ * Remove the Context from the AdapterFibContext List
+ */
+ list_del(&fibctx->next);
+ /*
+ * Invalidate context
+ */
+ fibctx->type = 0;
+ /*
+ * Free the space occupied by the Context
+ */
+ kfree(fibctx);
+ return 0;
+}
+
+/**
+ * close_getadapter_fib - close down user fib context
+ * @dev: adapter
+ * @arg: ioctl arguments
+ *
+ * This routine will close down the fibctx passed in from the user.
+ */
+
+static int close_getadapter_fib(struct aac_dev * dev, void __user *arg)
+{
+ struct aac_fib_context *fibctx;
+ int status;
+ unsigned long flags;
+ struct list_head * entry;
+
+ /*
+ * Verify that the HANDLE passed in was a valid AdapterFibContext
+ *
+ * Search the list of AdapterFibContext addresses on the adapter
+ * to be sure this is a valid address
+ */
+
+ entry = dev->fib_list.next;
+ fibctx = NULL;
+
+ while(entry != &dev->fib_list) {
+ fibctx = list_entry(entry, struct aac_fib_context, next);
+ /*
+ * Extract the fibctx from the input parameters
+ */
+ if (fibctx->unique == (u32)(uintptr_t)arg) /* We found a winner */
+ break;
+ entry = entry->next;
+ fibctx = NULL;
+ }
+
+ if (!fibctx)
+ return 0; /* Already gone */
+
+ if((fibctx->type != FSAFS_NTC_GET_ADAPTER_FIB_CONTEXT) ||
+ (fibctx->size != sizeof(struct aac_fib_context)))
+ return -EINVAL;
+ spin_lock_irqsave(&dev->fib_lock, flags);
+ status = aac_close_fib_context(dev, fibctx);
+ spin_unlock_irqrestore(&dev->fib_lock, flags);
+ return status;
+}
+
+/**
+ * check_revision - close down user fib context
+ * @dev: adapter
+ * @arg: ioctl arguments
+ *
+ * This routine returns the driver version.
+ * Under Linux, there have been no version incompatibilities, so this is
+ * simple!
+ */
+
+static int check_revision(struct aac_dev *dev, void __user *arg)
+{
+ struct revision response;
+ char *driver_version = aac_driver_version;
+ u32 version;
+
+ response.compat = 1;
+ version = (simple_strtol(driver_version,
+ &driver_version, 10) << 24) | 0x00000400;
+ version += simple_strtol(driver_version + 1, &driver_version, 10) << 16;
+ version += simple_strtol(driver_version + 1, NULL, 10);
+ response.version = cpu_to_le32(version);
+# ifdef AAC_DRIVER_BUILD
+ response.build = cpu_to_le32(AAC_DRIVER_BUILD);
+# else
+ response.build = cpu_to_le32(9999);
+# endif
+
+ if (copy_to_user(arg, &response, sizeof(response)))
+ return -EFAULT;
+ return 0;
+}
+
+
+/**
+ *
+ * aac_send_raw_scb
+ *
+ */
+
+static int aac_send_raw_srb(struct aac_dev* dev, void __user * arg)
+{
+ struct fib* srbfib;
+ int status;
+ struct aac_srb *srbcmd = NULL;
+ struct user_aac_srb *user_srbcmd = NULL;
+ struct user_aac_srb __user *user_srb = arg;
+ struct aac_srb_reply __user *user_reply;
+ struct aac_srb_reply* reply;
+ u32 fibsize = 0;
+ u32 flags = 0;
+ s32 rcode = 0;
+ u32 data_dir;
+ void __user *sg_user[32];
+ void *sg_list[32];
+ u32 sg_indx = 0;
+ u32 byte_count = 0;
+ u32 actual_fibsize64, actual_fibsize = 0;
+ int i;
+
+
+ if (dev->in_reset) {
+ dprintk((KERN_DEBUG"aacraid: send raw srb -EBUSY\n"));
+ return -EBUSY;
+ }
+ if (!capable(CAP_SYS_ADMIN)){
+ dprintk((KERN_DEBUG"aacraid: No permission to send raw srb\n"));
+ return -EPERM;
+ }
+ /*
+ * Allocate and initialize a Fib then setup a SRB command
+ */
+ if (!(srbfib = aac_fib_alloc(dev))) {
+ return -ENOMEM;
+ }
+ aac_fib_init(srbfib);
+ /* raw_srb FIB is not FastResponseCapable */
+ srbfib->hw_fib_va->header.XferState &= ~cpu_to_le32(FastResponseCapable);
+
+ srbcmd = (struct aac_srb*) fib_data(srbfib);
+
+ memset(sg_list, 0, sizeof(sg_list)); /* cleanup may take issue */
+ if(copy_from_user(&fibsize, &user_srb->count,sizeof(u32))){
+ dprintk((KERN_DEBUG"aacraid: Could not copy data size from user\n"));
+ rcode = -EFAULT;
+ goto cleanup;
+ }
+
+ if ((fibsize < (sizeof(struct user_aac_srb) - sizeof(struct user_sgentry))) ||
+ (fibsize > (dev->max_fib_size - sizeof(struct aac_fibhdr)))) {
+ rcode = -EINVAL;
+ goto cleanup;
+ }
+
+ user_srbcmd = kmalloc(fibsize, GFP_KERNEL);
+ if (!user_srbcmd) {
+ dprintk((KERN_DEBUG"aacraid: Could not make a copy of the srb\n"));
+ rcode = -ENOMEM;
+ goto cleanup;
+ }
+ if(copy_from_user(user_srbcmd, user_srb,fibsize)){
+ dprintk((KERN_DEBUG"aacraid: Could not copy srb from user\n"));
+ rcode = -EFAULT;
+ goto cleanup;
+ }
+
+ user_reply = arg+fibsize;
+
+ flags = user_srbcmd->flags; /* from user in cpu order */
+ // Fix up srb for endian and force some values
+
+ srbcmd->function = cpu_to_le32(SRBF_ExecuteScsi); // Force this
+ srbcmd->channel = cpu_to_le32(user_srbcmd->channel);
+ srbcmd->id = cpu_to_le32(user_srbcmd->id);
+ srbcmd->lun = cpu_to_le32(user_srbcmd->lun);
+ srbcmd->timeout = cpu_to_le32(user_srbcmd->timeout);
+ srbcmd->flags = cpu_to_le32(flags);
+ srbcmd->retry_limit = 0; // Obsolete parameter
+ srbcmd->cdb_size = cpu_to_le32(user_srbcmd->cdb_size);
+ memcpy(srbcmd->cdb, user_srbcmd->cdb, sizeof(srbcmd->cdb));
+
+ switch (flags & (SRB_DataIn | SRB_DataOut)) {
+ case SRB_DataOut:
+ data_dir = DMA_TO_DEVICE;
+ break;
+ case (SRB_DataIn | SRB_DataOut):
+ data_dir = DMA_BIDIRECTIONAL;
+ break;
+ case SRB_DataIn:
+ data_dir = DMA_FROM_DEVICE;
+ break;
+ default:
+ data_dir = DMA_NONE;
+ }
+ if (user_srbcmd->sg.count > ARRAY_SIZE(sg_list)) {
+ dprintk((KERN_DEBUG"aacraid: too many sg entries %d\n",
+ le32_to_cpu(srbcmd->sg.count)));
+ rcode = -EINVAL;
+ goto cleanup;
+ }
+ actual_fibsize = sizeof(struct aac_srb) - sizeof(struct sgentry) +
+ ((user_srbcmd->sg.count & 0xff) * sizeof(struct sgentry));
+ actual_fibsize64 = actual_fibsize + (user_srbcmd->sg.count & 0xff) *
+ (sizeof(struct sgentry64) - sizeof(struct sgentry));
+ /* User made a mistake - should not continue */
+ if ((actual_fibsize != fibsize) && (actual_fibsize64 != fibsize)) {
+ dprintk((KERN_DEBUG"aacraid: Bad Size specified in "
+ "Raw SRB command calculated fibsize=%lu;%lu "
+ "user_srbcmd->sg.count=%d aac_srb=%lu sgentry=%lu;%lu "
+ "issued fibsize=%d\n",
+ actual_fibsize, actual_fibsize64, user_srbcmd->sg.count,
+ sizeof(struct aac_srb), sizeof(struct sgentry),
+ sizeof(struct sgentry64), fibsize));
+ rcode = -EINVAL;
+ goto cleanup;
+ }
+ if ((data_dir == DMA_NONE) && user_srbcmd->sg.count) {
+ dprintk((KERN_DEBUG"aacraid: SG with no direction specified in Raw SRB command\n"));
+ rcode = -EINVAL;
+ goto cleanup;
+ }
+ byte_count = 0;
+ if (dev->adapter_info.options & AAC_OPT_SGMAP_HOST64) {
+ struct user_sgmap64* upsg = (struct user_sgmap64*)&user_srbcmd->sg;
+ struct sgmap64* psg = (struct sgmap64*)&srbcmd->sg;
+
+ /*
+ * This should also catch if user used the 32 bit sgmap
+ */
+ if (actual_fibsize64 == fibsize) {
+ actual_fibsize = actual_fibsize64;
+ for (i = 0; i < upsg->count; i++) {
+ u64 addr;
+ void* p;
+ if (upsg->sg[i].count >
+ ((dev->adapter_info.options &
+ AAC_OPT_NEW_COMM) ?
+ (dev->scsi_host_ptr->max_sectors << 9) :
+ 65536)) {
+ rcode = -EINVAL;
+ goto cleanup;
+ }
+ /* Does this really need to be GFP_DMA? */
+ p = kmalloc(upsg->sg[i].count,GFP_KERNEL|__GFP_DMA);
+ if(!p) {
+ dprintk((KERN_DEBUG"aacraid: Could not allocate SG buffer - size = %d buffer number %d of %d\n",
+ upsg->sg[i].count,i,upsg->count));
+ rcode = -ENOMEM;
+ goto cleanup;
+ }
+ addr = (u64)upsg->sg[i].addr[0];
+ addr += ((u64)upsg->sg[i].addr[1]) << 32;
+ sg_user[i] = (void __user *)(uintptr_t)addr;
+ sg_list[i] = p; // save so we can clean up later
+ sg_indx = i;
+
+ if (flags & SRB_DataOut) {
+ if(copy_from_user(p,sg_user[i],upsg->sg[i].count)){
+ dprintk((KERN_DEBUG"aacraid: Could not copy sg data from user\n"));
+ rcode = -EFAULT;
+ goto cleanup;
+ }
+ }
+ addr = pci_map_single(dev->pdev, p, upsg->sg[i].count, data_dir);
+
+ psg->sg[i].addr[0] = cpu_to_le32(addr & 0xffffffff);
+ psg->sg[i].addr[1] = cpu_to_le32(addr>>32);
+ byte_count += upsg->sg[i].count;
+ psg->sg[i].count = cpu_to_le32(upsg->sg[i].count);
+ }
+ } else {
+ struct user_sgmap* usg;
+ usg = kmalloc(actual_fibsize - sizeof(struct aac_srb)
+ + sizeof(struct sgmap), GFP_KERNEL);
+ if (!usg) {
+ dprintk((KERN_DEBUG"aacraid: Allocation error in Raw SRB command\n"));
+ rcode = -ENOMEM;
+ goto cleanup;
+ }
+ memcpy (usg, upsg, actual_fibsize - sizeof(struct aac_srb)
+ + sizeof(struct sgmap));
+ actual_fibsize = actual_fibsize64;
+
+ for (i = 0; i < usg->count; i++) {
+ u64 addr;
+ void* p;
+ if (usg->sg[i].count >
+ ((dev->adapter_info.options &
+ AAC_OPT_NEW_COMM) ?
+ (dev->scsi_host_ptr->max_sectors << 9) :
+ 65536)) {
+ kfree(usg);
+ rcode = -EINVAL;
+ goto cleanup;
+ }
+ /* Does this really need to be GFP_DMA? */
+ p = kmalloc(usg->sg[i].count,GFP_KERNEL|__GFP_DMA);
+ if(!p) {
+ dprintk((KERN_DEBUG "aacraid: Could not allocate SG buffer - size = %d buffer number %d of %d\n",
+ usg->sg[i].count,i,usg->count));
+ kfree(usg);
+ rcode = -ENOMEM;
+ goto cleanup;
+ }
+ sg_user[i] = (void __user *)(uintptr_t)usg->sg[i].addr;
+ sg_list[i] = p; // save so we can clean up later
+ sg_indx = i;
+
+ if (flags & SRB_DataOut) {
+ if(copy_from_user(p,sg_user[i],upsg->sg[i].count)){
+ kfree (usg);
+ dprintk((KERN_DEBUG"aacraid: Could not copy sg data from user\n"));
+ rcode = -EFAULT;
+ goto cleanup;
+ }
+ }
+ addr = pci_map_single(dev->pdev, p, usg->sg[i].count, data_dir);
+
+ psg->sg[i].addr[0] = cpu_to_le32(addr & 0xffffffff);
+ psg->sg[i].addr[1] = cpu_to_le32(addr>>32);
+ byte_count += usg->sg[i].count;
+ psg->sg[i].count = cpu_to_le32(usg->sg[i].count);
+ }
+ kfree (usg);
+ }
+ srbcmd->count = cpu_to_le32(byte_count);
+ if (user_srbcmd->sg.count)
+ psg->count = cpu_to_le32(sg_indx+1);
+ else
+ psg->count = 0;
+ status = aac_fib_send(ScsiPortCommand64, srbfib, actual_fibsize, FsaNormal, 1, 1,NULL,NULL);
+ } else {
+ struct user_sgmap* upsg = &user_srbcmd->sg;
+ struct sgmap* psg = &srbcmd->sg;
+
+ if (actual_fibsize64 == fibsize) {
+ struct user_sgmap64* usg = (struct user_sgmap64 *)upsg;
+ for (i = 0; i < upsg->count; i++) {
+ uintptr_t addr;
+ void* p;
+ if (usg->sg[i].count >
+ ((dev->adapter_info.options &
+ AAC_OPT_NEW_COMM) ?
+ (dev->scsi_host_ptr->max_sectors << 9) :
+ 65536)) {
+ rcode = -EINVAL;
+ goto cleanup;
+ }
+ /* Does this really need to be GFP_DMA? */
+ p = kmalloc(usg->sg[i].count,GFP_KERNEL|__GFP_DMA);
+ if(!p) {
+ dprintk((KERN_DEBUG"aacraid: Could not allocate SG buffer - size = %d buffer number %d of %d\n",
+ usg->sg[i].count,i,usg->count));
+ rcode = -ENOMEM;
+ goto cleanup;
+ }
+ addr = (u64)usg->sg[i].addr[0];
+ addr += ((u64)usg->sg[i].addr[1]) << 32;
+ sg_user[i] = (void __user *)addr;
+ sg_list[i] = p; // save so we can clean up later
+ sg_indx = i;
+
+ if (flags & SRB_DataOut) {
+ if(copy_from_user(p,sg_user[i],usg->sg[i].count)){
+ dprintk((KERN_DEBUG"aacraid: Could not copy sg data from user\n"));
+ rcode = -EFAULT;
+ goto cleanup;
+ }
+ }
+ addr = pci_map_single(dev->pdev, p, usg->sg[i].count, data_dir);
+
+ psg->sg[i].addr = cpu_to_le32(addr & 0xffffffff);
+ byte_count += usg->sg[i].count;
+ psg->sg[i].count = cpu_to_le32(usg->sg[i].count);
+ }
+ } else {
+ for (i = 0; i < upsg->count; i++) {
+ dma_addr_t addr;
+ void* p;
+ if (upsg->sg[i].count >
+ ((dev->adapter_info.options &
+ AAC_OPT_NEW_COMM) ?
+ (dev->scsi_host_ptr->max_sectors << 9) :
+ 65536)) {
+ rcode = -EINVAL;
+ goto cleanup;
+ }
+ p = kmalloc(upsg->sg[i].count, GFP_KERNEL);
+ if (!p) {
+ dprintk((KERN_DEBUG"aacraid: Could not allocate SG buffer - size = %d buffer number %d of %d\n",
+ upsg->sg[i].count, i, upsg->count));
+ rcode = -ENOMEM;
+ goto cleanup;
+ }
+ sg_user[i] = (void __user *)(uintptr_t)upsg->sg[i].addr;
+ sg_list[i] = p; // save so we can clean up later
+ sg_indx = i;
+
+ if (flags & SRB_DataOut) {
+ if(copy_from_user(p, sg_user[i],
+ upsg->sg[i].count)) {
+ dprintk((KERN_DEBUG"aacraid: Could not copy sg data from user\n"));
+ rcode = -EFAULT;
+ goto cleanup;
+ }
+ }
+ addr = pci_map_single(dev->pdev, p,
+ upsg->sg[i].count, data_dir);
+
+ psg->sg[i].addr = cpu_to_le32(addr);
+ byte_count += upsg->sg[i].count;
+ psg->sg[i].count = cpu_to_le32(upsg->sg[i].count);
+ }
+ }
+ srbcmd->count = cpu_to_le32(byte_count);
+ if (user_srbcmd->sg.count)
+ psg->count = cpu_to_le32(sg_indx+1);
+ else
+ psg->count = 0;
+ status = aac_fib_send(ScsiPortCommand, srbfib, actual_fibsize, FsaNormal, 1, 1, NULL, NULL);
+ }
+ if (status == -ERESTARTSYS) {
+ rcode = -ERESTARTSYS;
+ goto cleanup;
+ }
+
+ if (status != 0){
+ dprintk((KERN_DEBUG"aacraid: Could not send raw srb fib to hba\n"));
+ rcode = -ENXIO;
+ goto cleanup;
+ }
+
+ if (flags & SRB_DataIn) {
+ for(i = 0 ; i <= sg_indx; i++){
+ byte_count = le32_to_cpu(
+ (dev->adapter_info.options & AAC_OPT_SGMAP_HOST64)
+ ? ((struct sgmap64*)&srbcmd->sg)->sg[i].count
+ : srbcmd->sg.sg[i].count);
+ if(copy_to_user(sg_user[i], sg_list[i], byte_count)){
+ dprintk((KERN_DEBUG"aacraid: Could not copy sg data to user\n"));
+ rcode = -EFAULT;
+ goto cleanup;
+
+ }
+ }
+ }
+
+ reply = (struct aac_srb_reply *) fib_data(srbfib);
+ if(copy_to_user(user_reply,reply,sizeof(struct aac_srb_reply))){
+ dprintk((KERN_DEBUG"aacraid: Could not copy reply to user\n"));
+ rcode = -EFAULT;
+ goto cleanup;
+ }
+
+cleanup:
+ kfree(user_srbcmd);
+ for(i=0; i <= sg_indx; i++){
+ kfree(sg_list[i]);
+ }
+ if (rcode != -ERESTARTSYS) {
+ aac_fib_complete(srbfib);
+ aac_fib_free(srbfib);
+ }
+
+ return rcode;
+}
+
+struct aac_pci_info {
+ u32 bus;
+ u32 slot;
+};
+
+
+static int aac_get_pci_info(struct aac_dev* dev, void __user *arg)
+{
+ struct aac_pci_info pci_info;
+
+ pci_info.bus = dev->pdev->bus->number;
+ pci_info.slot = PCI_SLOT(dev->pdev->devfn);
+
+ if (copy_to_user(arg, &pci_info, sizeof(struct aac_pci_info))) {
+ dprintk((KERN_DEBUG "aacraid: Could not copy pci info\n"));
+ return -EFAULT;
+ }
+ return 0;
+}
+
+
+int aac_do_ioctl(struct aac_dev * dev, int cmd, void __user *arg)
+{
+ int status;
+
+ /*
+ * HBA gets first crack
+ */
+
+ status = aac_dev_ioctl(dev, cmd, arg);
+ if (status != -ENOTTY)
+ return status;
+
+ switch (cmd) {
+ case FSACTL_MINIPORT_REV_CHECK:
+ status = check_revision(dev, arg);
+ break;
+ case FSACTL_SEND_LARGE_FIB:
+ case FSACTL_SENDFIB:
+ status = ioctl_send_fib(dev, arg);
+ break;
+ case FSACTL_OPEN_GET_ADAPTER_FIB:
+ status = open_getadapter_fib(dev, arg);
+ break;
+ case FSACTL_GET_NEXT_ADAPTER_FIB:
+ status = next_getadapter_fib(dev, arg);
+ break;
+ case FSACTL_CLOSE_GET_ADAPTER_FIB:
+ status = close_getadapter_fib(dev, arg);
+ break;
+ case FSACTL_SEND_RAW_SRB:
+ status = aac_send_raw_srb(dev,arg);
+ break;
+ case FSACTL_GET_PCI_INFO:
+ status = aac_get_pci_info(dev,arg);
+ break;
+ default:
+ status = -ENOTTY;
+ break;
+ }
+ return status;
+}
+
diff --git a/drivers/scsi/aacraid/comminit.c b/drivers/scsi/aacraid/comminit.c
new file mode 100644
index 000000000..45db84ad3
--- /dev/null
+++ b/drivers/scsi/aacraid/comminit.c
@@ -0,0 +1,586 @@
+/*
+ * Adaptec AAC series RAID controller driver
+ * (c) Copyright 2001 Red Hat Inc.
+ *
+ * based on the old aacraid driver that is..
+ * Adaptec aacraid device driver for Linux.
+ *
+ * Copyright (c) 2000-2010 Adaptec, Inc.
+ * 2010 PMC-Sierra, Inc. (aacraid@pmc-sierra.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2, or (at your option)
+ * any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; see the file COPYING. If not, write to
+ * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
+ *
+ * Module Name:
+ * comminit.c
+ *
+ * Abstract: This supports the initialization of the host adapter commuication interface.
+ * This is a platform dependent module for the pci cyclone board.
+ *
+ */
+
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/types.h>
+#include <linux/pci.h>
+#include <linux/spinlock.h>
+#include <linux/slab.h>
+#include <linux/blkdev.h>
+#include <linux/completion.h>
+#include <linux/mm.h>
+#include <scsi/scsi_host.h>
+
+#include "aacraid.h"
+
+static void aac_define_int_mode(struct aac_dev *dev);
+
+struct aac_common aac_config = {
+ .irq_mod = 1
+};
+
+static int aac_alloc_comm(struct aac_dev *dev, void **commaddr, unsigned long commsize, unsigned long commalign)
+{
+ unsigned char *base;
+ unsigned long size, align;
+ const unsigned long fibsize = dev->max_fib_size;
+ const unsigned long printfbufsiz = 256;
+ unsigned long host_rrq_size = 0;
+ struct aac_init *init;
+ dma_addr_t phys;
+ unsigned long aac_max_hostphysmempages;
+
+ if (dev->comm_interface == AAC_COMM_MESSAGE_TYPE1 ||
+ dev->comm_interface == AAC_COMM_MESSAGE_TYPE2)
+ host_rrq_size = (dev->scsi_host_ptr->can_queue
+ + AAC_NUM_MGT_FIB) * sizeof(u32);
+ size = fibsize + sizeof(struct aac_init) + commsize +
+ commalign + printfbufsiz + host_rrq_size;
+
+ base = pci_alloc_consistent(dev->pdev, size, &phys);
+
+ if(base == NULL)
+ {
+ printk(KERN_ERR "aacraid: unable to create mapping.\n");
+ return 0;
+ }
+ dev->comm_addr = (void *)base;
+ dev->comm_phys = phys;
+ dev->comm_size = size;
+
+ if (dev->comm_interface == AAC_COMM_MESSAGE_TYPE1 ||
+ dev->comm_interface == AAC_COMM_MESSAGE_TYPE2) {
+ dev->host_rrq = (u32 *)(base + fibsize);
+ dev->host_rrq_pa = phys + fibsize;
+ memset(dev->host_rrq, 0, host_rrq_size);
+ }
+
+ dev->init = (struct aac_init *)(base + fibsize + host_rrq_size);
+ dev->init_pa = phys + fibsize + host_rrq_size;
+
+ init = dev->init;
+
+ init->InitStructRevision = cpu_to_le32(ADAPTER_INIT_STRUCT_REVISION);
+ if (dev->max_fib_size != sizeof(struct hw_fib))
+ init->InitStructRevision = cpu_to_le32(ADAPTER_INIT_STRUCT_REVISION_4);
+ init->Sa_MSIXVectors = cpu_to_le32(Sa_MINIPORT_REVISION);
+ init->fsrev = cpu_to_le32(dev->fsrev);
+
+ /*
+ * Adapter Fibs are the first thing allocated so that they
+ * start page aligned
+ */
+ dev->aif_base_va = (struct hw_fib *)base;
+
+ init->AdapterFibsVirtualAddress = 0;
+ init->AdapterFibsPhysicalAddress = cpu_to_le32((u32)phys);
+ init->AdapterFibsSize = cpu_to_le32(fibsize);
+ init->AdapterFibAlign = cpu_to_le32(sizeof(struct hw_fib));
+ /*
+ * number of 4k pages of host physical memory. The aacraid fw needs
+ * this number to be less than 4gb worth of pages. New firmware doesn't
+ * have any issues with the mapping system, but older Firmware did, and
+ * had *troubles* dealing with the math overloading past 32 bits, thus
+ * we must limit this field.
+ */
+ aac_max_hostphysmempages = dma_get_required_mask(&dev->pdev->dev) >> 12;
+ if (aac_max_hostphysmempages < AAC_MAX_HOSTPHYSMEMPAGES)
+ init->HostPhysMemPages = cpu_to_le32(aac_max_hostphysmempages);
+ else
+ init->HostPhysMemPages = cpu_to_le32(AAC_MAX_HOSTPHYSMEMPAGES);
+
+ init->InitFlags = cpu_to_le32(INITFLAGS_DRIVER_USES_UTC_TIME |
+ INITFLAGS_DRIVER_SUPPORTS_PM);
+ init->MaxIoCommands = cpu_to_le32(dev->scsi_host_ptr->can_queue + AAC_NUM_MGT_FIB);
+ init->MaxIoSize = cpu_to_le32(dev->scsi_host_ptr->max_sectors << 9);
+ init->MaxFibSize = cpu_to_le32(dev->max_fib_size);
+ init->MaxNumAif = cpu_to_le32(dev->max_num_aif);
+
+ if (dev->comm_interface == AAC_COMM_MESSAGE) {
+ init->InitFlags |= cpu_to_le32(INITFLAGS_NEW_COMM_SUPPORTED);
+ dprintk((KERN_WARNING"aacraid: New Comm Interface enabled\n"));
+ } else if (dev->comm_interface == AAC_COMM_MESSAGE_TYPE1) {
+ init->InitStructRevision = cpu_to_le32(ADAPTER_INIT_STRUCT_REVISION_6);
+ init->InitFlags |= cpu_to_le32(INITFLAGS_NEW_COMM_SUPPORTED |
+ INITFLAGS_NEW_COMM_TYPE1_SUPPORTED | INITFLAGS_FAST_JBOD_SUPPORTED);
+ init->HostRRQ_AddrHigh = cpu_to_le32((u32)((u64)dev->host_rrq_pa >> 32));
+ init->HostRRQ_AddrLow = cpu_to_le32((u32)(dev->host_rrq_pa & 0xffffffff));
+ dprintk((KERN_WARNING"aacraid: New Comm Interface type1 enabled\n"));
+ } else if (dev->comm_interface == AAC_COMM_MESSAGE_TYPE2) {
+ init->InitStructRevision = cpu_to_le32(ADAPTER_INIT_STRUCT_REVISION_7);
+ init->InitFlags |= cpu_to_le32(INITFLAGS_NEW_COMM_SUPPORTED |
+ INITFLAGS_NEW_COMM_TYPE2_SUPPORTED | INITFLAGS_FAST_JBOD_SUPPORTED);
+ init->HostRRQ_AddrHigh = cpu_to_le32((u32)((u64)dev->host_rrq_pa >> 32));
+ init->HostRRQ_AddrLow = cpu_to_le32((u32)(dev->host_rrq_pa & 0xffffffff));
+ /* number of MSI-X */
+ init->Sa_MSIXVectors = cpu_to_le32(dev->max_msix);
+ dprintk((KERN_WARNING"aacraid: New Comm Interface type2 enabled\n"));
+ }
+
+ /*
+ * Increment the base address by the amount already used
+ */
+ base = base + fibsize + host_rrq_size + sizeof(struct aac_init);
+ phys = (dma_addr_t)((ulong)phys + fibsize + host_rrq_size +
+ sizeof(struct aac_init));
+
+ /*
+ * Align the beginning of Headers to commalign
+ */
+ align = (commalign - ((uintptr_t)(base) & (commalign - 1)));
+ base = base + align;
+ phys = phys + align;
+ /*
+ * Fill in addresses of the Comm Area Headers and Queues
+ */
+ *commaddr = base;
+ init->CommHeaderAddress = cpu_to_le32((u32)phys);
+ /*
+ * Increment the base address by the size of the CommArea
+ */
+ base = base + commsize;
+ phys = phys + commsize;
+ /*
+ * Place the Printf buffer area after the Fast I/O comm area.
+ */
+ dev->printfbuf = (void *)base;
+ init->printfbuf = cpu_to_le32(phys);
+ init->printfbufsiz = cpu_to_le32(printfbufsiz);
+ memset(base, 0, printfbufsiz);
+ return 1;
+}
+
+static void aac_queue_init(struct aac_dev * dev, struct aac_queue * q, u32 *mem, int qsize)
+{
+ atomic_set(&q->numpending, 0);
+ q->dev = dev;
+ init_waitqueue_head(&q->cmdready);
+ INIT_LIST_HEAD(&q->cmdq);
+ init_waitqueue_head(&q->qfull);
+ spin_lock_init(&q->lockdata);
+ q->lock = &q->lockdata;
+ q->headers.producer = (__le32 *)mem;
+ q->headers.consumer = (__le32 *)(mem+1);
+ *(q->headers.producer) = cpu_to_le32(qsize);
+ *(q->headers.consumer) = cpu_to_le32(qsize);
+ q->entries = qsize;
+}
+
+/**
+ * aac_send_shutdown - shutdown an adapter
+ * @dev: Adapter to shutdown
+ *
+ * This routine will send a VM_CloseAll (shutdown) request to the adapter.
+ */
+
+int aac_send_shutdown(struct aac_dev * dev)
+{
+ struct fib * fibctx;
+ struct aac_close *cmd;
+ int status;
+
+ fibctx = aac_fib_alloc(dev);
+ if (!fibctx)
+ return -ENOMEM;
+ aac_fib_init(fibctx);
+
+ cmd = (struct aac_close *) fib_data(fibctx);
+
+ cmd->command = cpu_to_le32(VM_CloseAll);
+ cmd->cid = cpu_to_le32(0xfffffffe);
+
+ status = aac_fib_send(ContainerCommand,
+ fibctx,
+ sizeof(struct aac_close),
+ FsaNormal,
+ -2 /* Timeout silently */, 1,
+ NULL, NULL);
+
+ if (status >= 0)
+ aac_fib_complete(fibctx);
+ /* FIB should be freed only after getting the response from the F/W */
+ if (status != -ERESTARTSYS)
+ aac_fib_free(fibctx);
+ dev->adapter_shutdown = 1;
+ if ((dev->pdev->device == PMC_DEVICE_S7 ||
+ dev->pdev->device == PMC_DEVICE_S8 ||
+ dev->pdev->device == PMC_DEVICE_S9) &&
+ dev->msi_enabled)
+ aac_src_access_devreg(dev, AAC_ENABLE_INTX);
+ return status;
+}
+
+/**
+ * aac_comm_init - Initialise FSA data structures
+ * @dev: Adapter to initialise
+ *
+ * Initializes the data structures that are required for the FSA commuication
+ * interface to operate.
+ * Returns
+ * 1 - if we were able to init the commuication interface.
+ * 0 - If there were errors initing. This is a fatal error.
+ */
+
+static int aac_comm_init(struct aac_dev * dev)
+{
+ unsigned long hdrsize = (sizeof(u32) * NUMBER_OF_COMM_QUEUES) * 2;
+ unsigned long queuesize = sizeof(struct aac_entry) * TOTAL_QUEUE_ENTRIES;
+ u32 *headers;
+ struct aac_entry * queues;
+ unsigned long size;
+ struct aac_queue_block * comm = dev->queues;
+ /*
+ * Now allocate and initialize the zone structures used as our
+ * pool of FIB context records. The size of the zone is based
+ * on the system memory size. We also initialize the mutex used
+ * to protect the zone.
+ */
+ spin_lock_init(&dev->fib_lock);
+
+ /*
+ * Allocate the physically contiguous space for the commuication
+ * queue headers.
+ */
+
+ size = hdrsize + queuesize;
+
+ if (!aac_alloc_comm(dev, (void * *)&headers, size, QUEUE_ALIGNMENT))
+ return -ENOMEM;
+
+ queues = (struct aac_entry *)(((ulong)headers) + hdrsize);
+
+ /* Adapter to Host normal priority Command queue */
+ comm->queue[HostNormCmdQueue].base = queues;
+ aac_queue_init(dev, &comm->queue[HostNormCmdQueue], headers, HOST_NORM_CMD_ENTRIES);
+ queues += HOST_NORM_CMD_ENTRIES;
+ headers += 2;
+
+ /* Adapter to Host high priority command queue */
+ comm->queue[HostHighCmdQueue].base = queues;
+ aac_queue_init(dev, &comm->queue[HostHighCmdQueue], headers, HOST_HIGH_CMD_ENTRIES);
+
+ queues += HOST_HIGH_CMD_ENTRIES;
+ headers +=2;
+
+ /* Host to adapter normal priority command queue */
+ comm->queue[AdapNormCmdQueue].base = queues;
+ aac_queue_init(dev, &comm->queue[AdapNormCmdQueue], headers, ADAP_NORM_CMD_ENTRIES);
+
+ queues += ADAP_NORM_CMD_ENTRIES;
+ headers += 2;
+
+ /* host to adapter high priority command queue */
+ comm->queue[AdapHighCmdQueue].base = queues;
+ aac_queue_init(dev, &comm->queue[AdapHighCmdQueue], headers, ADAP_HIGH_CMD_ENTRIES);
+
+ queues += ADAP_HIGH_CMD_ENTRIES;
+ headers += 2;
+
+ /* adapter to host normal priority response queue */
+ comm->queue[HostNormRespQueue].base = queues;
+ aac_queue_init(dev, &comm->queue[HostNormRespQueue], headers, HOST_NORM_RESP_ENTRIES);
+ queues += HOST_NORM_RESP_ENTRIES;
+ headers += 2;
+
+ /* adapter to host high priority response queue */
+ comm->queue[HostHighRespQueue].base = queues;
+ aac_queue_init(dev, &comm->queue[HostHighRespQueue], headers, HOST_HIGH_RESP_ENTRIES);
+
+ queues += HOST_HIGH_RESP_ENTRIES;
+ headers += 2;
+
+ /* host to adapter normal priority response queue */
+ comm->queue[AdapNormRespQueue].base = queues;
+ aac_queue_init(dev, &comm->queue[AdapNormRespQueue], headers, ADAP_NORM_RESP_ENTRIES);
+
+ queues += ADAP_NORM_RESP_ENTRIES;
+ headers += 2;
+
+ /* host to adapter high priority response queue */
+ comm->queue[AdapHighRespQueue].base = queues;
+ aac_queue_init(dev, &comm->queue[AdapHighRespQueue], headers, ADAP_HIGH_RESP_ENTRIES);
+
+ comm->queue[AdapNormCmdQueue].lock = comm->queue[HostNormRespQueue].lock;
+ comm->queue[AdapHighCmdQueue].lock = comm->queue[HostHighRespQueue].lock;
+ comm->queue[AdapNormRespQueue].lock = comm->queue[HostNormCmdQueue].lock;
+ comm->queue[AdapHighRespQueue].lock = comm->queue[HostHighCmdQueue].lock;
+
+ return 0;
+}
+
+struct aac_dev *aac_init_adapter(struct aac_dev *dev)
+{
+ u32 status[5];
+ struct Scsi_Host * host = dev->scsi_host_ptr;
+ extern int aac_sync_mode;
+
+ /*
+ * Check the preferred comm settings, defaults from template.
+ */
+ dev->management_fib_count = 0;
+ spin_lock_init(&dev->manage_lock);
+ spin_lock_init(&dev->sync_lock);
+ dev->max_fib_size = sizeof(struct hw_fib);
+ dev->sg_tablesize = host->sg_tablesize = (dev->max_fib_size
+ - sizeof(struct aac_fibhdr)
+ - sizeof(struct aac_write) + sizeof(struct sgentry))
+ / sizeof(struct sgentry);
+ dev->comm_interface = AAC_COMM_PRODUCER;
+ dev->raw_io_interface = dev->raw_io_64 = 0;
+
+ if ((!aac_adapter_sync_cmd(dev, GET_ADAPTER_PROPERTIES,
+ 0, 0, 0, 0, 0, 0,
+ status+0, status+1, status+2, status+3, NULL)) &&
+ (status[0] == 0x00000001)) {
+ dev->doorbell_mask = status[3];
+ if (status[1] & le32_to_cpu(AAC_OPT_NEW_COMM_64))
+ dev->raw_io_64 = 1;
+ dev->sync_mode = aac_sync_mode;
+ if (dev->a_ops.adapter_comm &&
+ (status[1] & le32_to_cpu(AAC_OPT_NEW_COMM))) {
+ dev->comm_interface = AAC_COMM_MESSAGE;
+ dev->raw_io_interface = 1;
+ if ((status[1] & le32_to_cpu(AAC_OPT_NEW_COMM_TYPE1))) {
+ /* driver supports TYPE1 (Tupelo) */
+ dev->comm_interface = AAC_COMM_MESSAGE_TYPE1;
+ } else if ((status[1] & le32_to_cpu(AAC_OPT_NEW_COMM_TYPE2))) {
+ /* driver supports TYPE2 (Denali) */
+ dev->comm_interface = AAC_COMM_MESSAGE_TYPE2;
+ } else if ((status[1] & le32_to_cpu(AAC_OPT_NEW_COMM_TYPE4)) ||
+ (status[1] & le32_to_cpu(AAC_OPT_NEW_COMM_TYPE3))) {
+ /* driver doesn't TYPE3 and TYPE4 */
+ /* switch to sync. mode */
+ dev->comm_interface = AAC_COMM_MESSAGE_TYPE2;
+ dev->sync_mode = 1;
+ }
+ }
+ if ((dev->comm_interface == AAC_COMM_MESSAGE) &&
+ (status[2] > dev->base_size)) {
+ aac_adapter_ioremap(dev, 0);
+ dev->base_size = status[2];
+ if (aac_adapter_ioremap(dev, status[2])) {
+ /* remap failed, go back ... */
+ dev->comm_interface = AAC_COMM_PRODUCER;
+ if (aac_adapter_ioremap(dev, AAC_MIN_FOOTPRINT_SIZE)) {
+ printk(KERN_WARNING
+ "aacraid: unable to map adapter.\n");
+ return NULL;
+ }
+ }
+ }
+ }
+ dev->max_msix = 0;
+ dev->msi_enabled = 0;
+ dev->adapter_shutdown = 0;
+ if ((!aac_adapter_sync_cmd(dev, GET_COMM_PREFERRED_SETTINGS,
+ 0, 0, 0, 0, 0, 0,
+ status+0, status+1, status+2, status+3, status+4))
+ && (status[0] == 0x00000001)) {
+ /*
+ * status[1] >> 16 maximum command size in KB
+ * status[1] & 0xFFFF maximum FIB size
+ * status[2] >> 16 maximum SG elements to driver
+ * status[2] & 0xFFFF maximum SG elements from driver
+ * status[3] & 0xFFFF maximum number FIBs outstanding
+ */
+ host->max_sectors = (status[1] >> 16) << 1;
+ /* Multiple of 32 for PMC */
+ dev->max_fib_size = status[1] & 0xFFE0;
+ host->sg_tablesize = status[2] >> 16;
+ dev->sg_tablesize = status[2] & 0xFFFF;
+ if (dev->pdev->device == PMC_DEVICE_S7 ||
+ dev->pdev->device == PMC_DEVICE_S8 ||
+ dev->pdev->device == PMC_DEVICE_S9)
+ host->can_queue = ((status[3] >> 16) ? (status[3] >> 16) :
+ (status[3] & 0xFFFF)) - AAC_NUM_MGT_FIB;
+ else
+ host->can_queue = (status[3] & 0xFFFF) - AAC_NUM_MGT_FIB;
+ dev->max_num_aif = status[4] & 0xFFFF;
+ /*
+ * NOTE:
+ * All these overrides are based on a fixed internal
+ * knowledge and understanding of existing adapters,
+ * acbsize should be set with caution.
+ */
+ if (acbsize == 512) {
+ host->max_sectors = AAC_MAX_32BIT_SGBCOUNT;
+ dev->max_fib_size = 512;
+ dev->sg_tablesize = host->sg_tablesize
+ = (512 - sizeof(struct aac_fibhdr)
+ - sizeof(struct aac_write) + sizeof(struct sgentry))
+ / sizeof(struct sgentry);
+ host->can_queue = AAC_NUM_IO_FIB;
+ } else if (acbsize == 2048) {
+ host->max_sectors = 512;
+ dev->max_fib_size = 2048;
+ host->sg_tablesize = 65;
+ dev->sg_tablesize = 81;
+ host->can_queue = 512 - AAC_NUM_MGT_FIB;
+ } else if (acbsize == 4096) {
+ host->max_sectors = 1024;
+ dev->max_fib_size = 4096;
+ host->sg_tablesize = 129;
+ dev->sg_tablesize = 166;
+ host->can_queue = 256 - AAC_NUM_MGT_FIB;
+ } else if (acbsize == 8192) {
+ host->max_sectors = 2048;
+ dev->max_fib_size = 8192;
+ host->sg_tablesize = 257;
+ dev->sg_tablesize = 337;
+ host->can_queue = 128 - AAC_NUM_MGT_FIB;
+ } else if (acbsize > 0) {
+ printk("Illegal acbsize=%d ignored\n", acbsize);
+ }
+ }
+ {
+
+ if (numacb > 0) {
+ if (numacb < host->can_queue)
+ host->can_queue = numacb;
+ else
+ printk("numacb=%d ignored\n", numacb);
+ }
+ }
+
+ if (host->can_queue > AAC_NUM_IO_FIB)
+ host->can_queue = AAC_NUM_IO_FIB;
+
+ if (dev->pdev->device == PMC_DEVICE_S6 ||
+ dev->pdev->device == PMC_DEVICE_S7 ||
+ dev->pdev->device == PMC_DEVICE_S8 ||
+ dev->pdev->device == PMC_DEVICE_S9)
+ aac_define_int_mode(dev);
+ /*
+ * Ok now init the communication subsystem
+ */
+
+ dev->queues = kzalloc(sizeof(struct aac_queue_block), GFP_KERNEL);
+ if (dev->queues == NULL) {
+ printk(KERN_ERR "Error could not allocate comm region.\n");
+ return NULL;
+ }
+
+ if (aac_comm_init(dev)<0){
+ kfree(dev->queues);
+ return NULL;
+ }
+ /*
+ * Initialize the list of fibs
+ */
+ if (aac_fib_setup(dev) < 0) {
+ kfree(dev->queues);
+ return NULL;
+ }
+
+ INIT_LIST_HEAD(&dev->fib_list);
+ INIT_LIST_HEAD(&dev->sync_fib_list);
+
+ return dev;
+}
+
+static void aac_define_int_mode(struct aac_dev *dev)
+{
+
+ int i, msi_count;
+
+ msi_count = i = 0;
+ /* max. vectors from GET_COMM_PREFERRED_SETTINGS */
+ if (dev->max_msix == 0 ||
+ dev->pdev->device == PMC_DEVICE_S6 ||
+ dev->sync_mode) {
+ dev->max_msix = 1;
+ dev->vector_cap =
+ dev->scsi_host_ptr->can_queue +
+ AAC_NUM_MGT_FIB;
+ return;
+ }
+
+ msi_count = min(dev->max_msix,
+ (unsigned int)num_online_cpus());
+
+ dev->max_msix = msi_count;
+
+ if (msi_count > AAC_MAX_MSIX)
+ msi_count = AAC_MAX_MSIX;
+
+ for (i = 0; i < msi_count; i++)
+ dev->msixentry[i].entry = i;
+
+ if (msi_count > 1 &&
+ pci_find_capability(dev->pdev, PCI_CAP_ID_MSIX)) {
+ i = pci_enable_msix(dev->pdev,
+ dev->msixentry,
+ msi_count);
+ /* Check how many MSIX vectors are allocated */
+ if (i >= 0) {
+ dev->msi_enabled = 1;
+ if (i) {
+ msi_count = i;
+ if (pci_enable_msix(dev->pdev,
+ dev->msixentry,
+ msi_count)) {
+ dev->msi_enabled = 0;
+ printk(KERN_ERR "%s%d: MSIX not supported!! Will try MSI 0x%x.\n",
+ dev->name, dev->id, i);
+ }
+ }
+ } else {
+ dev->msi_enabled = 0;
+ printk(KERN_ERR "%s%d: MSIX not supported!! Will try MSI 0x%x.\n",
+ dev->name, dev->id, i);
+ }
+ }
+
+ if (!dev->msi_enabled) {
+ msi_count = 1;
+ i = pci_enable_msi(dev->pdev);
+
+ if (!i) {
+ dev->msi_enabled = 1;
+ dev->msi = 1;
+ } else {
+ printk(KERN_ERR "%s%d: MSI not supported!! Will try INTx 0x%x.\n",
+ dev->name, dev->id, i);
+ }
+ }
+
+ if (!dev->msi_enabled)
+ dev->max_msix = msi_count = 1;
+ else {
+ if (dev->max_msix > msi_count)
+ dev->max_msix = msi_count;
+ }
+ dev->vector_cap =
+ (dev->scsi_host_ptr->can_queue + AAC_NUM_MGT_FIB) /
+ msi_count;
+}
diff --git a/drivers/scsi/aacraid/commsup.c b/drivers/scsi/aacraid/commsup.c
new file mode 100644
index 000000000..4da574925
--- /dev/null
+++ b/drivers/scsi/aacraid/commsup.c
@@ -0,0 +1,1980 @@
+/*
+ * Adaptec AAC series RAID controller driver
+ * (c) Copyright 2001 Red Hat Inc.
+ *
+ * based on the old aacraid driver that is..
+ * Adaptec aacraid device driver for Linux.
+ *
+ * Copyright (c) 2000-2010 Adaptec, Inc.
+ * 2010 PMC-Sierra, Inc. (aacraid@pmc-sierra.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2, or (at your option)
+ * any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; see the file COPYING. If not, write to
+ * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
+ *
+ * Module Name:
+ * commsup.c
+ *
+ * Abstract: Contain all routines that are required for FSA host/adapter
+ * communication.
+ *
+ */
+
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/types.h>
+#include <linux/sched.h>
+#include <linux/pci.h>
+#include <linux/spinlock.h>
+#include <linux/slab.h>
+#include <linux/completion.h>
+#include <linux/blkdev.h>
+#include <linux/delay.h>
+#include <linux/kthread.h>
+#include <linux/interrupt.h>
+#include <linux/semaphore.h>
+#include <scsi/scsi.h>
+#include <scsi/scsi_host.h>
+#include <scsi/scsi_device.h>
+#include <scsi/scsi_cmnd.h>
+
+#include "aacraid.h"
+
+/**
+ * fib_map_alloc - allocate the fib objects
+ * @dev: Adapter to allocate for
+ *
+ * Allocate and map the shared PCI space for the FIB blocks used to
+ * talk to the Adaptec firmware.
+ */
+
+static int fib_map_alloc(struct aac_dev *dev)
+{
+ dprintk((KERN_INFO
+ "allocate hardware fibs pci_alloc_consistent(%p, %d * (%d + %d), %p)\n",
+ dev->pdev, dev->max_fib_size, dev->scsi_host_ptr->can_queue,
+ AAC_NUM_MGT_FIB, &dev->hw_fib_pa));
+ dev->hw_fib_va = pci_alloc_consistent(dev->pdev,
+ (dev->max_fib_size + sizeof(struct aac_fib_xporthdr))
+ * (dev->scsi_host_ptr->can_queue + AAC_NUM_MGT_FIB) + (ALIGN32 - 1),
+ &dev->hw_fib_pa);
+ if (dev->hw_fib_va == NULL)
+ return -ENOMEM;
+ return 0;
+}
+
+/**
+ * aac_fib_map_free - free the fib objects
+ * @dev: Adapter to free
+ *
+ * Free the PCI mappings and the memory allocated for FIB blocks
+ * on this adapter.
+ */
+
+void aac_fib_map_free(struct aac_dev *dev)
+{
+ pci_free_consistent(dev->pdev,
+ dev->max_fib_size * (dev->scsi_host_ptr->can_queue + AAC_NUM_MGT_FIB),
+ dev->hw_fib_va, dev->hw_fib_pa);
+ dev->hw_fib_va = NULL;
+ dev->hw_fib_pa = 0;
+}
+
+/**
+ * aac_fib_setup - setup the fibs
+ * @dev: Adapter to set up
+ *
+ * Allocate the PCI space for the fibs, map it and then initialise the
+ * fib area, the unmapped fib data and also the free list
+ */
+
+int aac_fib_setup(struct aac_dev * dev)
+{
+ struct fib *fibptr;
+ struct hw_fib *hw_fib;
+ dma_addr_t hw_fib_pa;
+ int i;
+
+ while (((i = fib_map_alloc(dev)) == -ENOMEM)
+ && (dev->scsi_host_ptr->can_queue > (64 - AAC_NUM_MGT_FIB))) {
+ dev->init->MaxIoCommands = cpu_to_le32((dev->scsi_host_ptr->can_queue + AAC_NUM_MGT_FIB) >> 1);
+ dev->scsi_host_ptr->can_queue = le32_to_cpu(dev->init->MaxIoCommands) - AAC_NUM_MGT_FIB;
+ }
+ if (i<0)
+ return -ENOMEM;
+
+ /* 32 byte alignment for PMC */
+ hw_fib_pa = (dev->hw_fib_pa + (ALIGN32 - 1)) & ~(ALIGN32 - 1);
+ dev->hw_fib_va = (struct hw_fib *)((unsigned char *)dev->hw_fib_va +
+ (hw_fib_pa - dev->hw_fib_pa));
+ dev->hw_fib_pa = hw_fib_pa;
+ memset(dev->hw_fib_va, 0,
+ (dev->max_fib_size + sizeof(struct aac_fib_xporthdr)) *
+ (dev->scsi_host_ptr->can_queue + AAC_NUM_MGT_FIB));
+
+ /* add Xport header */
+ dev->hw_fib_va = (struct hw_fib *)((unsigned char *)dev->hw_fib_va +
+ sizeof(struct aac_fib_xporthdr));
+ dev->hw_fib_pa += sizeof(struct aac_fib_xporthdr);
+
+ hw_fib = dev->hw_fib_va;
+ hw_fib_pa = dev->hw_fib_pa;
+ /*
+ * Initialise the fibs
+ */
+ for (i = 0, fibptr = &dev->fibs[i];
+ i < (dev->scsi_host_ptr->can_queue + AAC_NUM_MGT_FIB);
+ i++, fibptr++)
+ {
+ fibptr->flags = 0;
+ fibptr->dev = dev;
+ fibptr->hw_fib_va = hw_fib;
+ fibptr->data = (void *) fibptr->hw_fib_va->data;
+ fibptr->next = fibptr+1; /* Forward chain the fibs */
+ sema_init(&fibptr->event_wait, 0);
+ spin_lock_init(&fibptr->event_lock);
+ hw_fib->header.XferState = cpu_to_le32(0xffffffff);
+ hw_fib->header.SenderSize = cpu_to_le16(dev->max_fib_size);
+ fibptr->hw_fib_pa = hw_fib_pa;
+ hw_fib = (struct hw_fib *)((unsigned char *)hw_fib +
+ dev->max_fib_size + sizeof(struct aac_fib_xporthdr));
+ hw_fib_pa = hw_fib_pa +
+ dev->max_fib_size + sizeof(struct aac_fib_xporthdr);
+ }
+ /*
+ * Add the fib chain to the free list
+ */
+ dev->fibs[dev->scsi_host_ptr->can_queue + AAC_NUM_MGT_FIB - 1].next = NULL;
+ /*
+ * Enable this to debug out of queue space
+ */
+ dev->free_fib = &dev->fibs[0];
+ return 0;
+}
+
+/**
+ * aac_fib_alloc - allocate a fib
+ * @dev: Adapter to allocate the fib for
+ *
+ * Allocate a fib from the adapter fib pool. If the pool is empty we
+ * return NULL.
+ */
+
+struct fib *aac_fib_alloc(struct aac_dev *dev)
+{
+ struct fib * fibptr;
+ unsigned long flags;
+ spin_lock_irqsave(&dev->fib_lock, flags);
+ fibptr = dev->free_fib;
+ if(!fibptr){
+ spin_unlock_irqrestore(&dev->fib_lock, flags);
+ return fibptr;
+ }
+ dev->free_fib = fibptr->next;
+ spin_unlock_irqrestore(&dev->fib_lock, flags);
+ /*
+ * Set the proper node type code and node byte size
+ */
+ fibptr->type = FSAFS_NTC_FIB_CONTEXT;
+ fibptr->size = sizeof(struct fib);
+ /*
+ * Null out fields that depend on being zero at the start of
+ * each I/O
+ */
+ fibptr->hw_fib_va->header.XferState = 0;
+ fibptr->flags = 0;
+ fibptr->callback = NULL;
+ fibptr->callback_data = NULL;
+
+ return fibptr;
+}
+
+/**
+ * aac_fib_free - free a fib
+ * @fibptr: fib to free up
+ *
+ * Frees up a fib and places it on the appropriate queue
+ */
+
+void aac_fib_free(struct fib *fibptr)
+{
+ unsigned long flags;
+
+ if (fibptr->done == 2)
+ return;
+
+ spin_lock_irqsave(&fibptr->dev->fib_lock, flags);
+ if (unlikely(fibptr->flags & FIB_CONTEXT_FLAG_TIMED_OUT))
+ aac_config.fib_timeouts++;
+ if (fibptr->hw_fib_va->header.XferState != 0) {
+ printk(KERN_WARNING "aac_fib_free, XferState != 0, fibptr = 0x%p, XferState = 0x%x\n",
+ (void*)fibptr,
+ le32_to_cpu(fibptr->hw_fib_va->header.XferState));
+ }
+ fibptr->next = fibptr->dev->free_fib;
+ fibptr->dev->free_fib = fibptr;
+ spin_unlock_irqrestore(&fibptr->dev->fib_lock, flags);
+}
+
+/**
+ * aac_fib_init - initialise a fib
+ * @fibptr: The fib to initialize
+ *
+ * Set up the generic fib fields ready for use
+ */
+
+void aac_fib_init(struct fib *fibptr)
+{
+ struct hw_fib *hw_fib = fibptr->hw_fib_va;
+
+ memset(&hw_fib->header, 0, sizeof(struct aac_fibhdr));
+ hw_fib->header.StructType = FIB_MAGIC;
+ hw_fib->header.Size = cpu_to_le16(fibptr->dev->max_fib_size);
+ hw_fib->header.XferState = cpu_to_le32(HostOwned | FibInitialized | FibEmpty | FastResponseCapable);
+ hw_fib->header.u.ReceiverFibAddress = cpu_to_le32(fibptr->hw_fib_pa);
+ hw_fib->header.SenderSize = cpu_to_le16(fibptr->dev->max_fib_size);
+}
+
+/**
+ * fib_deallocate - deallocate a fib
+ * @fibptr: fib to deallocate
+ *
+ * Will deallocate and return to the free pool the FIB pointed to by the
+ * caller.
+ */
+
+static void fib_dealloc(struct fib * fibptr)
+{
+ struct hw_fib *hw_fib = fibptr->hw_fib_va;
+ hw_fib->header.XferState = 0;
+}
+
+/*
+ * Commuication primitives define and support the queuing method we use to
+ * support host to adapter commuication. All queue accesses happen through
+ * these routines and are the only routines which have a knowledge of the
+ * how these queues are implemented.
+ */
+
+/**
+ * aac_get_entry - get a queue entry
+ * @dev: Adapter
+ * @qid: Queue Number
+ * @entry: Entry return
+ * @index: Index return
+ * @nonotify: notification control
+ *
+ * With a priority the routine returns a queue entry if the queue has free entries. If the queue
+ * is full(no free entries) than no entry is returned and the function returns 0 otherwise 1 is
+ * returned.
+ */
+
+static int aac_get_entry (struct aac_dev * dev, u32 qid, struct aac_entry **entry, u32 * index, unsigned long *nonotify)
+{
+ struct aac_queue * q;
+ unsigned long idx;
+
+ /*
+ * All of the queues wrap when they reach the end, so we check
+ * to see if they have reached the end and if they have we just
+ * set the index back to zero. This is a wrap. You could or off
+ * the high bits in all updates but this is a bit faster I think.
+ */
+
+ q = &dev->queues->queue[qid];
+
+ idx = *index = le32_to_cpu(*(q->headers.producer));
+ /* Interrupt Moderation, only interrupt for first two entries */
+ if (idx != le32_to_cpu(*(q->headers.consumer))) {
+ if (--idx == 0) {
+ if (qid == AdapNormCmdQueue)
+ idx = ADAP_NORM_CMD_ENTRIES;
+ else
+ idx = ADAP_NORM_RESP_ENTRIES;
+ }
+ if (idx != le32_to_cpu(*(q->headers.consumer)))
+ *nonotify = 1;
+ }
+
+ if (qid == AdapNormCmdQueue) {
+ if (*index >= ADAP_NORM_CMD_ENTRIES)
+ *index = 0; /* Wrap to front of the Producer Queue. */
+ } else {
+ if (*index >= ADAP_NORM_RESP_ENTRIES)
+ *index = 0; /* Wrap to front of the Producer Queue. */
+ }
+
+ /* Queue is full */
+ if ((*index + 1) == le32_to_cpu(*(q->headers.consumer))) {
+ printk(KERN_WARNING "Queue %d full, %u outstanding.\n",
+ qid, atomic_read(&q->numpending));
+ return 0;
+ } else {
+ *entry = q->base + *index;
+ return 1;
+ }
+}
+
+/**
+ * aac_queue_get - get the next free QE
+ * @dev: Adapter
+ * @index: Returned index
+ * @priority: Priority of fib
+ * @fib: Fib to associate with the queue entry
+ * @wait: Wait if queue full
+ * @fibptr: Driver fib object to go with fib
+ * @nonotify: Don't notify the adapter
+ *
+ * Gets the next free QE off the requested priorty adapter command
+ * queue and associates the Fib with the QE. The QE represented by
+ * index is ready to insert on the queue when this routine returns
+ * success.
+ */
+
+int aac_queue_get(struct aac_dev * dev, u32 * index, u32 qid, struct hw_fib * hw_fib, int wait, struct fib * fibptr, unsigned long *nonotify)
+{
+ struct aac_entry * entry = NULL;
+ int map = 0;
+
+ if (qid == AdapNormCmdQueue) {
+ /* if no entries wait for some if caller wants to */
+ while (!aac_get_entry(dev, qid, &entry, index, nonotify)) {
+ printk(KERN_ERR "GetEntries failed\n");
+ }
+ /*
+ * Setup queue entry with a command, status and fib mapped
+ */
+ entry->size = cpu_to_le32(le16_to_cpu(hw_fib->header.Size));
+ map = 1;
+ } else {
+ while (!aac_get_entry(dev, qid, &entry, index, nonotify)) {
+ /* if no entries wait for some if caller wants to */
+ }
+ /*
+ * Setup queue entry with command, status and fib mapped
+ */
+ entry->size = cpu_to_le32(le16_to_cpu(hw_fib->header.Size));
+ entry->addr = hw_fib->header.SenderFibAddress;
+ /* Restore adapters pointer to the FIB */
+ hw_fib->header.u.ReceiverFibAddress = hw_fib->header.SenderFibAddress; /* Let the adapter now where to find its data */
+ map = 0;
+ }
+ /*
+ * If MapFib is true than we need to map the Fib and put pointers
+ * in the queue entry.
+ */
+ if (map)
+ entry->addr = cpu_to_le32(fibptr->hw_fib_pa);
+ return 0;
+}
+
+/*
+ * Define the highest level of host to adapter communication routines.
+ * These routines will support host to adapter FS commuication. These
+ * routines have no knowledge of the commuication method used. This level
+ * sends and receives FIBs. This level has no knowledge of how these FIBs
+ * get passed back and forth.
+ */
+
+/**
+ * aac_fib_send - send a fib to the adapter
+ * @command: Command to send
+ * @fibptr: The fib
+ * @size: Size of fib data area
+ * @priority: Priority of Fib
+ * @wait: Async/sync select
+ * @reply: True if a reply is wanted
+ * @callback: Called with reply
+ * @callback_data: Passed to callback
+ *
+ * Sends the requested FIB to the adapter and optionally will wait for a
+ * response FIB. If the caller does not wish to wait for a response than
+ * an event to wait on must be supplied. This event will be set when a
+ * response FIB is received from the adapter.
+ */
+
+int aac_fib_send(u16 command, struct fib *fibptr, unsigned long size,
+ int priority, int wait, int reply, fib_callback callback,
+ void *callback_data)
+{
+ struct aac_dev * dev = fibptr->dev;
+ struct hw_fib * hw_fib = fibptr->hw_fib_va;
+ unsigned long flags = 0;
+ unsigned long mflags = 0;
+ unsigned long sflags = 0;
+
+
+ if (!(hw_fib->header.XferState & cpu_to_le32(HostOwned)))
+ return -EBUSY;
+ /*
+ * There are 5 cases with the wait and response requested flags.
+ * The only invalid cases are if the caller requests to wait and
+ * does not request a response and if the caller does not want a
+ * response and the Fib is not allocated from pool. If a response
+ * is not requesed the Fib will just be deallocaed by the DPC
+ * routine when the response comes back from the adapter. No
+ * further processing will be done besides deleting the Fib. We
+ * will have a debug mode where the adapter can notify the host
+ * it had a problem and the host can log that fact.
+ */
+ fibptr->flags = 0;
+ if (wait && !reply) {
+ return -EINVAL;
+ } else if (!wait && reply) {
+ hw_fib->header.XferState |= cpu_to_le32(Async | ResponseExpected);
+ FIB_COUNTER_INCREMENT(aac_config.AsyncSent);
+ } else if (!wait && !reply) {
+ hw_fib->header.XferState |= cpu_to_le32(NoResponseExpected);
+ FIB_COUNTER_INCREMENT(aac_config.NoResponseSent);
+ } else if (wait && reply) {
+ hw_fib->header.XferState |= cpu_to_le32(ResponseExpected);
+ FIB_COUNTER_INCREMENT(aac_config.NormalSent);
+ }
+ /*
+ * Map the fib into 32bits by using the fib number
+ */
+
+ hw_fib->header.SenderFibAddress = cpu_to_le32(((u32)(fibptr - dev->fibs)) << 2);
+ hw_fib->header.Handle = (u32)(fibptr - dev->fibs) + 1;
+ /*
+ * Set FIB state to indicate where it came from and if we want a
+ * response from the adapter. Also load the command from the
+ * caller.
+ *
+ * Map the hw fib pointer as a 32bit value
+ */
+ hw_fib->header.Command = cpu_to_le16(command);
+ hw_fib->header.XferState |= cpu_to_le32(SentFromHost);
+ /*
+ * Set the size of the Fib we want to send to the adapter
+ */
+ hw_fib->header.Size = cpu_to_le16(sizeof(struct aac_fibhdr) + size);
+ if (le16_to_cpu(hw_fib->header.Size) > le16_to_cpu(hw_fib->header.SenderSize)) {
+ return -EMSGSIZE;
+ }
+ /*
+ * Get a queue entry connect the FIB to it and send an notify
+ * the adapter a command is ready.
+ */
+ hw_fib->header.XferState |= cpu_to_le32(NormalPriority);
+
+ /*
+ * Fill in the Callback and CallbackContext if we are not
+ * going to wait.
+ */
+ if (!wait) {
+ fibptr->callback = callback;
+ fibptr->callback_data = callback_data;
+ fibptr->flags = FIB_CONTEXT_FLAG;
+ }
+
+ fibptr->done = 0;
+
+ FIB_COUNTER_INCREMENT(aac_config.FibsSent);
+
+ dprintk((KERN_DEBUG "Fib contents:.\n"));
+ dprintk((KERN_DEBUG " Command = %d.\n", le32_to_cpu(hw_fib->header.Command)));
+ dprintk((KERN_DEBUG " SubCommand = %d.\n", le32_to_cpu(((struct aac_query_mount *)fib_data(fibptr))->command)));
+ dprintk((KERN_DEBUG " XferState = %x.\n", le32_to_cpu(hw_fib->header.XferState)));
+ dprintk((KERN_DEBUG " hw_fib va being sent=%p\n",fibptr->hw_fib_va));
+ dprintk((KERN_DEBUG " hw_fib pa being sent=%lx\n",(ulong)fibptr->hw_fib_pa));
+ dprintk((KERN_DEBUG " fib being sent=%p\n",fibptr));
+
+ if (!dev->queues)
+ return -EBUSY;
+
+ if (wait) {
+
+ spin_lock_irqsave(&dev->manage_lock, mflags);
+ if (dev->management_fib_count >= AAC_NUM_MGT_FIB) {
+ printk(KERN_INFO "No management Fibs Available:%d\n",
+ dev->management_fib_count);
+ spin_unlock_irqrestore(&dev->manage_lock, mflags);
+ return -EBUSY;
+ }
+ dev->management_fib_count++;
+ spin_unlock_irqrestore(&dev->manage_lock, mflags);
+ spin_lock_irqsave(&fibptr->event_lock, flags);
+ }
+
+ if (dev->sync_mode) {
+ if (wait)
+ spin_unlock_irqrestore(&fibptr->event_lock, flags);
+ spin_lock_irqsave(&dev->sync_lock, sflags);
+ if (dev->sync_fib) {
+ list_add_tail(&fibptr->fiblink, &dev->sync_fib_list);
+ spin_unlock_irqrestore(&dev->sync_lock, sflags);
+ } else {
+ dev->sync_fib = fibptr;
+ spin_unlock_irqrestore(&dev->sync_lock, sflags);
+ aac_adapter_sync_cmd(dev, SEND_SYNCHRONOUS_FIB,
+ (u32)fibptr->hw_fib_pa, 0, 0, 0, 0, 0,
+ NULL, NULL, NULL, NULL, NULL);
+ }
+ if (wait) {
+ fibptr->flags |= FIB_CONTEXT_FLAG_WAIT;
+ if (down_interruptible(&fibptr->event_wait)) {
+ fibptr->flags &= ~FIB_CONTEXT_FLAG_WAIT;
+ return -EFAULT;
+ }
+ return 0;
+ }
+ return -EINPROGRESS;
+ }
+
+ if (aac_adapter_deliver(fibptr) != 0) {
+ printk(KERN_ERR "aac_fib_send: returned -EBUSY\n");
+ if (wait) {
+ spin_unlock_irqrestore(&fibptr->event_lock, flags);
+ spin_lock_irqsave(&dev->manage_lock, mflags);
+ dev->management_fib_count--;
+ spin_unlock_irqrestore(&dev->manage_lock, mflags);
+ }
+ return -EBUSY;
+ }
+
+
+ /*
+ * If the caller wanted us to wait for response wait now.
+ */
+
+ if (wait) {
+ spin_unlock_irqrestore(&fibptr->event_lock, flags);
+ /* Only set for first known interruptable command */
+ if (wait < 0) {
+ /*
+ * *VERY* Dangerous to time out a command, the
+ * assumption is made that we have no hope of
+ * functioning because an interrupt routing or other
+ * hardware failure has occurred.
+ */
+ unsigned long timeout = jiffies + (180 * HZ); /* 3 minutes */
+ while (down_trylock(&fibptr->event_wait)) {
+ int blink;
+ if (time_is_before_eq_jiffies(timeout)) {
+ struct aac_queue * q = &dev->queues->queue[AdapNormCmdQueue];
+ atomic_dec(&q->numpending);
+ if (wait == -1) {
+ printk(KERN_ERR "aacraid: aac_fib_send: first asynchronous command timed out.\n"
+ "Usually a result of a PCI interrupt routing problem;\n"
+ "update mother board BIOS or consider utilizing one of\n"
+ "the SAFE mode kernel options (acpi, apic etc)\n");
+ }
+ return -ETIMEDOUT;
+ }
+ if ((blink = aac_adapter_check_health(dev)) > 0) {
+ if (wait == -1) {
+ printk(KERN_ERR "aacraid: aac_fib_send: adapter blinkLED 0x%x.\n"
+ "Usually a result of a serious unrecoverable hardware problem\n",
+ blink);
+ }
+ return -EFAULT;
+ }
+ /* We used to udelay() here but that absorbed
+ * a CPU when a timeout occured. Not very
+ * useful. */
+ cpu_relax();
+ }
+ } else if (down_interruptible(&fibptr->event_wait)) {
+ /* Do nothing ... satisfy
+ * down_interruptible must_check */
+ }
+
+ spin_lock_irqsave(&fibptr->event_lock, flags);
+ if (fibptr->done == 0) {
+ fibptr->done = 2; /* Tell interrupt we aborted */
+ spin_unlock_irqrestore(&fibptr->event_lock, flags);
+ return -ERESTARTSYS;
+ }
+ spin_unlock_irqrestore(&fibptr->event_lock, flags);
+ BUG_ON(fibptr->done == 0);
+
+ if(unlikely(fibptr->flags & FIB_CONTEXT_FLAG_TIMED_OUT))
+ return -ETIMEDOUT;
+ return 0;
+ }
+ /*
+ * If the user does not want a response than return success otherwise
+ * return pending
+ */
+ if (reply)
+ return -EINPROGRESS;
+ else
+ return 0;
+}
+
+/**
+ * aac_consumer_get - get the top of the queue
+ * @dev: Adapter
+ * @q: Queue
+ * @entry: Return entry
+ *
+ * Will return a pointer to the entry on the top of the queue requested that
+ * we are a consumer of, and return the address of the queue entry. It does
+ * not change the state of the queue.
+ */
+
+int aac_consumer_get(struct aac_dev * dev, struct aac_queue * q, struct aac_entry **entry)
+{
+ u32 index;
+ int status;
+ if (le32_to_cpu(*q->headers.producer) == le32_to_cpu(*q->headers.consumer)) {
+ status = 0;
+ } else {
+ /*
+ * The consumer index must be wrapped if we have reached
+ * the end of the queue, else we just use the entry
+ * pointed to by the header index
+ */
+ if (le32_to_cpu(*q->headers.consumer) >= q->entries)
+ index = 0;
+ else
+ index = le32_to_cpu(*q->headers.consumer);
+ *entry = q->base + index;
+ status = 1;
+ }
+ return(status);
+}
+
+/**
+ * aac_consumer_free - free consumer entry
+ * @dev: Adapter
+ * @q: Queue
+ * @qid: Queue ident
+ *
+ * Frees up the current top of the queue we are a consumer of. If the
+ * queue was full notify the producer that the queue is no longer full.
+ */
+
+void aac_consumer_free(struct aac_dev * dev, struct aac_queue *q, u32 qid)
+{
+ int wasfull = 0;
+ u32 notify;
+
+ if ((le32_to_cpu(*q->headers.producer)+1) == le32_to_cpu(*q->headers.consumer))
+ wasfull = 1;
+
+ if (le32_to_cpu(*q->headers.consumer) >= q->entries)
+ *q->headers.consumer = cpu_to_le32(1);
+ else
+ le32_add_cpu(q->headers.consumer, 1);
+
+ if (wasfull) {
+ switch (qid) {
+
+ case HostNormCmdQueue:
+ notify = HostNormCmdNotFull;
+ break;
+ case HostNormRespQueue:
+ notify = HostNormRespNotFull;
+ break;
+ default:
+ BUG();
+ return;
+ }
+ aac_adapter_notify(dev, notify);
+ }
+}
+
+/**
+ * aac_fib_adapter_complete - complete adapter issued fib
+ * @fibptr: fib to complete
+ * @size: size of fib
+ *
+ * Will do all necessary work to complete a FIB that was sent from
+ * the adapter.
+ */
+
+int aac_fib_adapter_complete(struct fib *fibptr, unsigned short size)
+{
+ struct hw_fib * hw_fib = fibptr->hw_fib_va;
+ struct aac_dev * dev = fibptr->dev;
+ struct aac_queue * q;
+ unsigned long nointr = 0;
+ unsigned long qflags;
+
+ if (dev->comm_interface == AAC_COMM_MESSAGE_TYPE1 ||
+ dev->comm_interface == AAC_COMM_MESSAGE_TYPE2) {
+ kfree(hw_fib);
+ return 0;
+ }
+
+ if (hw_fib->header.XferState == 0) {
+ if (dev->comm_interface == AAC_COMM_MESSAGE)
+ kfree(hw_fib);
+ return 0;
+ }
+ /*
+ * If we plan to do anything check the structure type first.
+ */
+ if (hw_fib->header.StructType != FIB_MAGIC &&
+ hw_fib->header.StructType != FIB_MAGIC2 &&
+ hw_fib->header.StructType != FIB_MAGIC2_64) {
+ if (dev->comm_interface == AAC_COMM_MESSAGE)
+ kfree(hw_fib);
+ return -EINVAL;
+ }
+ /*
+ * This block handles the case where the adapter had sent us a
+ * command and we have finished processing the command. We
+ * call completeFib when we are done processing the command
+ * and want to send a response back to the adapter. This will
+ * send the completed cdb to the adapter.
+ */
+ if (hw_fib->header.XferState & cpu_to_le32(SentFromAdapter)) {
+ if (dev->comm_interface == AAC_COMM_MESSAGE) {
+ kfree (hw_fib);
+ } else {
+ u32 index;
+ hw_fib->header.XferState |= cpu_to_le32(HostProcessed);
+ if (size) {
+ size += sizeof(struct aac_fibhdr);
+ if (size > le16_to_cpu(hw_fib->header.SenderSize))
+ return -EMSGSIZE;
+ hw_fib->header.Size = cpu_to_le16(size);
+ }
+ q = &dev->queues->queue[AdapNormRespQueue];
+ spin_lock_irqsave(q->lock, qflags);
+ aac_queue_get(dev, &index, AdapNormRespQueue, hw_fib, 1, NULL, &nointr);
+ *(q->headers.producer) = cpu_to_le32(index + 1);
+ spin_unlock_irqrestore(q->lock, qflags);
+ if (!(nointr & (int)aac_config.irq_mod))
+ aac_adapter_notify(dev, AdapNormRespQueue);
+ }
+ } else {
+ printk(KERN_WARNING "aac_fib_adapter_complete: "
+ "Unknown xferstate detected.\n");
+ BUG();
+ }
+ return 0;
+}
+
+/**
+ * aac_fib_complete - fib completion handler
+ * @fib: FIB to complete
+ *
+ * Will do all necessary work to complete a FIB.
+ */
+
+int aac_fib_complete(struct fib *fibptr)
+{
+ struct hw_fib * hw_fib = fibptr->hw_fib_va;
+
+ /*
+ * Check for a fib which has already been completed
+ */
+
+ if (hw_fib->header.XferState == 0)
+ return 0;
+ /*
+ * If we plan to do anything check the structure type first.
+ */
+
+ if (hw_fib->header.StructType != FIB_MAGIC &&
+ hw_fib->header.StructType != FIB_MAGIC2 &&
+ hw_fib->header.StructType != FIB_MAGIC2_64)
+ return -EINVAL;
+ /*
+ * This block completes a cdb which orginated on the host and we
+ * just need to deallocate the cdb or reinit it. At this point the
+ * command is complete that we had sent to the adapter and this
+ * cdb could be reused.
+ */
+
+ if((hw_fib->header.XferState & cpu_to_le32(SentFromHost)) &&
+ (hw_fib->header.XferState & cpu_to_le32(AdapterProcessed)))
+ {
+ fib_dealloc(fibptr);
+ }
+ else if(hw_fib->header.XferState & cpu_to_le32(SentFromHost))
+ {
+ /*
+ * This handles the case when the host has aborted the I/O
+ * to the adapter because the adapter is not responding
+ */
+ fib_dealloc(fibptr);
+ } else if(hw_fib->header.XferState & cpu_to_le32(HostOwned)) {
+ fib_dealloc(fibptr);
+ } else {
+ BUG();
+ }
+ return 0;
+}
+
+/**
+ * aac_printf - handle printf from firmware
+ * @dev: Adapter
+ * @val: Message info
+ *
+ * Print a message passed to us by the controller firmware on the
+ * Adaptec board
+ */
+
+void aac_printf(struct aac_dev *dev, u32 val)
+{
+ char *cp = dev->printfbuf;
+ if (dev->printf_enabled)
+ {
+ int length = val & 0xffff;
+ int level = (val >> 16) & 0xffff;
+
+ /*
+ * The size of the printfbuf is set in port.c
+ * There is no variable or define for it
+ */
+ if (length > 255)
+ length = 255;
+ if (cp[length] != 0)
+ cp[length] = 0;
+ if (level == LOG_AAC_HIGH_ERROR)
+ printk(KERN_WARNING "%s:%s", dev->name, cp);
+ else
+ printk(KERN_INFO "%s:%s", dev->name, cp);
+ }
+ memset(cp, 0, 256);
+}
+
+
+/**
+ * aac_handle_aif - Handle a message from the firmware
+ * @dev: Which adapter this fib is from
+ * @fibptr: Pointer to fibptr from adapter
+ *
+ * This routine handles a driver notify fib from the adapter and
+ * dispatches it to the appropriate routine for handling.
+ */
+
+#define AIF_SNIFF_TIMEOUT (500*HZ)
+static void aac_handle_aif(struct aac_dev * dev, struct fib * fibptr)
+{
+ struct hw_fib * hw_fib = fibptr->hw_fib_va;
+ struct aac_aifcmd * aifcmd = (struct aac_aifcmd *)hw_fib->data;
+ u32 channel, id, lun, container;
+ struct scsi_device *device;
+ enum {
+ NOTHING,
+ DELETE,
+ ADD,
+ CHANGE
+ } device_config_needed = NOTHING;
+
+ /* Sniff for container changes */
+
+ if (!dev || !dev->fsa_dev)
+ return;
+ container = channel = id = lun = (u32)-1;
+
+ /*
+ * We have set this up to try and minimize the number of
+ * re-configures that take place. As a result of this when
+ * certain AIF's come in we will set a flag waiting for another
+ * type of AIF before setting the re-config flag.
+ */
+ switch (le32_to_cpu(aifcmd->command)) {
+ case AifCmdDriverNotify:
+ switch (le32_to_cpu(((__le32 *)aifcmd->data)[0])) {
+ case AifRawDeviceRemove:
+ container = le32_to_cpu(((__le32 *)aifcmd->data)[1]);
+ if ((container >> 28)) {
+ container = (u32)-1;
+ break;
+ }
+ channel = (container >> 24) & 0xF;
+ if (channel >= dev->maximum_num_channels) {
+ container = (u32)-1;
+ break;
+ }
+ id = container & 0xFFFF;
+ if (id >= dev->maximum_num_physicals) {
+ container = (u32)-1;
+ break;
+ }
+ lun = (container >> 16) & 0xFF;
+ container = (u32)-1;
+ channel = aac_phys_to_logical(channel);
+ device_config_needed =
+ (((__le32 *)aifcmd->data)[0] ==
+ cpu_to_le32(AifRawDeviceRemove)) ? DELETE : ADD;
+
+ if (device_config_needed == ADD) {
+ device = scsi_device_lookup(
+ dev->scsi_host_ptr,
+ channel, id, lun);
+ if (device) {
+ scsi_remove_device(device);
+ scsi_device_put(device);
+ }
+ }
+ break;
+ /*
+ * Morph or Expand complete
+ */
+ case AifDenMorphComplete:
+ case AifDenVolumeExtendComplete:
+ container = le32_to_cpu(((__le32 *)aifcmd->data)[1]);
+ if (container >= dev->maximum_num_containers)
+ break;
+
+ /*
+ * Find the scsi_device associated with the SCSI
+ * address. Make sure we have the right array, and if
+ * so set the flag to initiate a new re-config once we
+ * see an AifEnConfigChange AIF come through.
+ */
+
+ if ((dev != NULL) && (dev->scsi_host_ptr != NULL)) {
+ device = scsi_device_lookup(dev->scsi_host_ptr,
+ CONTAINER_TO_CHANNEL(container),
+ CONTAINER_TO_ID(container),
+ CONTAINER_TO_LUN(container));
+ if (device) {
+ dev->fsa_dev[container].config_needed = CHANGE;
+ dev->fsa_dev[container].config_waiting_on = AifEnConfigChange;
+ dev->fsa_dev[container].config_waiting_stamp = jiffies;
+ scsi_device_put(device);
+ }
+ }
+ }
+
+ /*
+ * If we are waiting on something and this happens to be
+ * that thing then set the re-configure flag.
+ */
+ if (container != (u32)-1) {
+ if (container >= dev->maximum_num_containers)
+ break;
+ if ((dev->fsa_dev[container].config_waiting_on ==
+ le32_to_cpu(*(__le32 *)aifcmd->data)) &&
+ time_before(jiffies, dev->fsa_dev[container].config_waiting_stamp + AIF_SNIFF_TIMEOUT))
+ dev->fsa_dev[container].config_waiting_on = 0;
+ } else for (container = 0;
+ container < dev->maximum_num_containers; ++container) {
+ if ((dev->fsa_dev[container].config_waiting_on ==
+ le32_to_cpu(*(__le32 *)aifcmd->data)) &&
+ time_before(jiffies, dev->fsa_dev[container].config_waiting_stamp + AIF_SNIFF_TIMEOUT))
+ dev->fsa_dev[container].config_waiting_on = 0;
+ }
+ break;
+
+ case AifCmdEventNotify:
+ switch (le32_to_cpu(((__le32 *)aifcmd->data)[0])) {
+ case AifEnBatteryEvent:
+ dev->cache_protected =
+ (((__le32 *)aifcmd->data)[1] == cpu_to_le32(3));
+ break;
+ /*
+ * Add an Array.
+ */
+ case AifEnAddContainer:
+ container = le32_to_cpu(((__le32 *)aifcmd->data)[1]);
+ if (container >= dev->maximum_num_containers)
+ break;
+ dev->fsa_dev[container].config_needed = ADD;
+ dev->fsa_dev[container].config_waiting_on =
+ AifEnConfigChange;
+ dev->fsa_dev[container].config_waiting_stamp = jiffies;
+ break;
+
+ /*
+ * Delete an Array.
+ */
+ case AifEnDeleteContainer:
+ container = le32_to_cpu(((__le32 *)aifcmd->data)[1]);
+ if (container >= dev->maximum_num_containers)
+ break;
+ dev->fsa_dev[container].config_needed = DELETE;
+ dev->fsa_dev[container].config_waiting_on =
+ AifEnConfigChange;
+ dev->fsa_dev[container].config_waiting_stamp = jiffies;
+ break;
+
+ /*
+ * Container change detected. If we currently are not
+ * waiting on something else, setup to wait on a Config Change.
+ */
+ case AifEnContainerChange:
+ container = le32_to_cpu(((__le32 *)aifcmd->data)[1]);
+ if (container >= dev->maximum_num_containers)
+ break;
+ if (dev->fsa_dev[container].config_waiting_on &&
+ time_before(jiffies, dev->fsa_dev[container].config_waiting_stamp + AIF_SNIFF_TIMEOUT))
+ break;
+ dev->fsa_dev[container].config_needed = CHANGE;
+ dev->fsa_dev[container].config_waiting_on =
+ AifEnConfigChange;
+ dev->fsa_dev[container].config_waiting_stamp = jiffies;
+ break;
+
+ case AifEnConfigChange:
+ break;
+
+ case AifEnAddJBOD:
+ case AifEnDeleteJBOD:
+ container = le32_to_cpu(((__le32 *)aifcmd->data)[1]);
+ if ((container >> 28)) {
+ container = (u32)-1;
+ break;
+ }
+ channel = (container >> 24) & 0xF;
+ if (channel >= dev->maximum_num_channels) {
+ container = (u32)-1;
+ break;
+ }
+ id = container & 0xFFFF;
+ if (id >= dev->maximum_num_physicals) {
+ container = (u32)-1;
+ break;
+ }
+ lun = (container >> 16) & 0xFF;
+ container = (u32)-1;
+ channel = aac_phys_to_logical(channel);
+ device_config_needed =
+ (((__le32 *)aifcmd->data)[0] ==
+ cpu_to_le32(AifEnAddJBOD)) ? ADD : DELETE;
+ if (device_config_needed == ADD) {
+ device = scsi_device_lookup(dev->scsi_host_ptr,
+ channel,
+ id,
+ lun);
+ if (device) {
+ scsi_remove_device(device);
+ scsi_device_put(device);
+ }
+ }
+ break;
+
+ case AifEnEnclosureManagement:
+ /*
+ * If in JBOD mode, automatic exposure of new
+ * physical target to be suppressed until configured.
+ */
+ if (dev->jbod)
+ break;
+ switch (le32_to_cpu(((__le32 *)aifcmd->data)[3])) {
+ case EM_DRIVE_INSERTION:
+ case EM_DRIVE_REMOVAL:
+ case EM_SES_DRIVE_INSERTION:
+ case EM_SES_DRIVE_REMOVAL:
+ container = le32_to_cpu(
+ ((__le32 *)aifcmd->data)[2]);
+ if ((container >> 28)) {
+ container = (u32)-1;
+ break;
+ }
+ channel = (container >> 24) & 0xF;
+ if (channel >= dev->maximum_num_channels) {
+ container = (u32)-1;
+ break;
+ }
+ id = container & 0xFFFF;
+ lun = (container >> 16) & 0xFF;
+ container = (u32)-1;
+ if (id >= dev->maximum_num_physicals) {
+ /* legacy dev_t ? */
+ if ((0x2000 <= id) || lun || channel ||
+ ((channel = (id >> 7) & 0x3F) >=
+ dev->maximum_num_channels))
+ break;
+ lun = (id >> 4) & 7;
+ id &= 0xF;
+ }
+ channel = aac_phys_to_logical(channel);
+ device_config_needed =
+ ((((__le32 *)aifcmd->data)[3]
+ == cpu_to_le32(EM_DRIVE_INSERTION)) ||
+ (((__le32 *)aifcmd->data)[3]
+ == cpu_to_le32(EM_SES_DRIVE_INSERTION))) ?
+ ADD : DELETE;
+ break;
+ }
+ break;
+ }
+
+ /*
+ * If we are waiting on something and this happens to be
+ * that thing then set the re-configure flag.
+ */
+ if (container != (u32)-1) {
+ if (container >= dev->maximum_num_containers)
+ break;
+ if ((dev->fsa_dev[container].config_waiting_on ==
+ le32_to_cpu(*(__le32 *)aifcmd->data)) &&
+ time_before(jiffies, dev->fsa_dev[container].config_waiting_stamp + AIF_SNIFF_TIMEOUT))
+ dev->fsa_dev[container].config_waiting_on = 0;
+ } else for (container = 0;
+ container < dev->maximum_num_containers; ++container) {
+ if ((dev->fsa_dev[container].config_waiting_on ==
+ le32_to_cpu(*(__le32 *)aifcmd->data)) &&
+ time_before(jiffies, dev->fsa_dev[container].config_waiting_stamp + AIF_SNIFF_TIMEOUT))
+ dev->fsa_dev[container].config_waiting_on = 0;
+ }
+ break;
+
+ case AifCmdJobProgress:
+ /*
+ * These are job progress AIF's. When a Clear is being
+ * done on a container it is initially created then hidden from
+ * the OS. When the clear completes we don't get a config
+ * change so we monitor the job status complete on a clear then
+ * wait for a container change.
+ */
+
+ if (((__le32 *)aifcmd->data)[1] == cpu_to_le32(AifJobCtrZero) &&
+ (((__le32 *)aifcmd->data)[6] == ((__le32 *)aifcmd->data)[5] ||
+ ((__le32 *)aifcmd->data)[4] == cpu_to_le32(AifJobStsSuccess))) {
+ for (container = 0;
+ container < dev->maximum_num_containers;
+ ++container) {
+ /*
+ * Stomp on all config sequencing for all
+ * containers?
+ */
+ dev->fsa_dev[container].config_waiting_on =
+ AifEnContainerChange;
+ dev->fsa_dev[container].config_needed = ADD;
+ dev->fsa_dev[container].config_waiting_stamp =
+ jiffies;
+ }
+ }
+ if (((__le32 *)aifcmd->data)[1] == cpu_to_le32(AifJobCtrZero) &&
+ ((__le32 *)aifcmd->data)[6] == 0 &&
+ ((__le32 *)aifcmd->data)[4] == cpu_to_le32(AifJobStsRunning)) {
+ for (container = 0;
+ container < dev->maximum_num_containers;
+ ++container) {
+ /*
+ * Stomp on all config sequencing for all
+ * containers?
+ */
+ dev->fsa_dev[container].config_waiting_on =
+ AifEnContainerChange;
+ dev->fsa_dev[container].config_needed = DELETE;
+ dev->fsa_dev[container].config_waiting_stamp =
+ jiffies;
+ }
+ }
+ break;
+ }
+
+ container = 0;
+retry_next:
+ if (device_config_needed == NOTHING)
+ for (; container < dev->maximum_num_containers; ++container) {
+ if ((dev->fsa_dev[container].config_waiting_on == 0) &&
+ (dev->fsa_dev[container].config_needed != NOTHING) &&
+ time_before(jiffies, dev->fsa_dev[container].config_waiting_stamp + AIF_SNIFF_TIMEOUT)) {
+ device_config_needed =
+ dev->fsa_dev[container].config_needed;
+ dev->fsa_dev[container].config_needed = NOTHING;
+ channel = CONTAINER_TO_CHANNEL(container);
+ id = CONTAINER_TO_ID(container);
+ lun = CONTAINER_TO_LUN(container);
+ break;
+ }
+ }
+ if (device_config_needed == NOTHING)
+ return;
+
+ /*
+ * If we decided that a re-configuration needs to be done,
+ * schedule it here on the way out the door, please close the door
+ * behind you.
+ */
+
+ /*
+ * Find the scsi_device associated with the SCSI address,
+ * and mark it as changed, invalidating the cache. This deals
+ * with changes to existing device IDs.
+ */
+
+ if (!dev || !dev->scsi_host_ptr)
+ return;
+ /*
+ * force reload of disk info via aac_probe_container
+ */
+ if ((channel == CONTAINER_CHANNEL) &&
+ (device_config_needed != NOTHING)) {
+ if (dev->fsa_dev[container].valid == 1)
+ dev->fsa_dev[container].valid = 2;
+ aac_probe_container(dev, container);
+ }
+ device = scsi_device_lookup(dev->scsi_host_ptr, channel, id, lun);
+ if (device) {
+ switch (device_config_needed) {
+ case DELETE:
+#if (defined(AAC_DEBUG_INSTRUMENT_AIF_DELETE))
+ scsi_remove_device(device);
+#else
+ if (scsi_device_online(device)) {
+ scsi_device_set_state(device, SDEV_OFFLINE);
+ sdev_printk(KERN_INFO, device,
+ "Device offlined - %s\n",
+ (channel == CONTAINER_CHANNEL) ?
+ "array deleted" :
+ "enclosure services event");
+ }
+#endif
+ break;
+ case ADD:
+ if (!scsi_device_online(device)) {
+ sdev_printk(KERN_INFO, device,
+ "Device online - %s\n",
+ (channel == CONTAINER_CHANNEL) ?
+ "array created" :
+ "enclosure services event");
+ scsi_device_set_state(device, SDEV_RUNNING);
+ }
+ /* FALLTHRU */
+ case CHANGE:
+ if ((channel == CONTAINER_CHANNEL)
+ && (!dev->fsa_dev[container].valid)) {
+#if (defined(AAC_DEBUG_INSTRUMENT_AIF_DELETE))
+ scsi_remove_device(device);
+#else
+ if (!scsi_device_online(device))
+ break;
+ scsi_device_set_state(device, SDEV_OFFLINE);
+ sdev_printk(KERN_INFO, device,
+ "Device offlined - %s\n",
+ "array failed");
+#endif
+ break;
+ }
+ scsi_rescan_device(&device->sdev_gendev);
+
+ default:
+ break;
+ }
+ scsi_device_put(device);
+ device_config_needed = NOTHING;
+ }
+ if (device_config_needed == ADD)
+ scsi_add_device(dev->scsi_host_ptr, channel, id, lun);
+ if (channel == CONTAINER_CHANNEL) {
+ container++;
+ device_config_needed = NOTHING;
+ goto retry_next;
+ }
+}
+
+static int _aac_reset_adapter(struct aac_dev *aac, int forced)
+{
+ int index, quirks;
+ int retval, i;
+ struct Scsi_Host *host;
+ struct scsi_device *dev;
+ struct scsi_cmnd *command;
+ struct scsi_cmnd *command_list;
+ int jafo = 0;
+ int cpu;
+
+ /*
+ * Assumptions:
+ * - host is locked, unless called by the aacraid thread.
+ * (a matter of convenience, due to legacy issues surrounding
+ * eh_host_adapter_reset).
+ * - in_reset is asserted, so no new i/o is getting to the
+ * card.
+ * - The card is dead, or will be very shortly ;-/ so no new
+ * commands are completing in the interrupt service.
+ */
+ host = aac->scsi_host_ptr;
+ scsi_block_requests(host);
+ aac_adapter_disable_int(aac);
+ if (aac->thread->pid != current->pid) {
+ spin_unlock_irq(host->host_lock);
+ kthread_stop(aac->thread);
+ jafo = 1;
+ }
+
+ /*
+ * If a positive health, means in a known DEAD PANIC
+ * state and the adapter could be reset to `try again'.
+ */
+ retval = aac_adapter_restart(aac, forced ? 0 : aac_adapter_check_health(aac));
+
+ if (retval)
+ goto out;
+
+ /*
+ * Loop through the fibs, close the synchronous FIBS
+ */
+ for (retval = 1, index = 0; index < (aac->scsi_host_ptr->can_queue + AAC_NUM_MGT_FIB); index++) {
+ struct fib *fib = &aac->fibs[index];
+ if (!(fib->hw_fib_va->header.XferState & cpu_to_le32(NoResponseExpected | Async)) &&
+ (fib->hw_fib_va->header.XferState & cpu_to_le32(ResponseExpected))) {
+ unsigned long flagv;
+ spin_lock_irqsave(&fib->event_lock, flagv);
+ up(&fib->event_wait);
+ spin_unlock_irqrestore(&fib->event_lock, flagv);
+ schedule();
+ retval = 0;
+ }
+ }
+ /* Give some extra time for ioctls to complete. */
+ if (retval == 0)
+ ssleep(2);
+ index = aac->cardtype;
+
+ /*
+ * Re-initialize the adapter, first free resources, then carefully
+ * apply the initialization sequence to come back again. Only risk
+ * is a change in Firmware dropping cache, it is assumed the caller
+ * will ensure that i/o is queisced and the card is flushed in that
+ * case.
+ */
+ aac_fib_map_free(aac);
+ pci_free_consistent(aac->pdev, aac->comm_size, aac->comm_addr, aac->comm_phys);
+ aac->comm_addr = NULL;
+ aac->comm_phys = 0;
+ kfree(aac->queues);
+ aac->queues = NULL;
+ cpu = cpumask_first(cpu_online_mask);
+ if (aac->pdev->device == PMC_DEVICE_S6 ||
+ aac->pdev->device == PMC_DEVICE_S7 ||
+ aac->pdev->device == PMC_DEVICE_S8 ||
+ aac->pdev->device == PMC_DEVICE_S9) {
+ if (aac->max_msix > 1) {
+ for (i = 0; i < aac->max_msix; i++) {
+ if (irq_set_affinity_hint(
+ aac->msixentry[i].vector,
+ NULL)) {
+ printk(KERN_ERR "%s%d: Failed to reset IRQ affinity for cpu %d\n",
+ aac->name,
+ aac->id,
+ cpu);
+ }
+ cpu = cpumask_next(cpu,
+ cpu_online_mask);
+ free_irq(aac->msixentry[i].vector,
+ &(aac->aac_msix[i]));
+ }
+ pci_disable_msix(aac->pdev);
+ } else {
+ free_irq(aac->pdev->irq, &(aac->aac_msix[0]));
+ }
+ } else {
+ free_irq(aac->pdev->irq, aac);
+ }
+ if (aac->msi)
+ pci_disable_msi(aac->pdev);
+ kfree(aac->fsa_dev);
+ aac->fsa_dev = NULL;
+ quirks = aac_get_driver_ident(index)->quirks;
+ if (quirks & AAC_QUIRK_31BIT) {
+ if (((retval = pci_set_dma_mask(aac->pdev, DMA_BIT_MASK(31)))) ||
+ ((retval = pci_set_consistent_dma_mask(aac->pdev, DMA_BIT_MASK(31)))))
+ goto out;
+ } else {
+ if (((retval = pci_set_dma_mask(aac->pdev, DMA_BIT_MASK(32)))) ||
+ ((retval = pci_set_consistent_dma_mask(aac->pdev, DMA_BIT_MASK(32)))))
+ goto out;
+ }
+ if ((retval = (*(aac_get_driver_ident(index)->init))(aac)))
+ goto out;
+ if (quirks & AAC_QUIRK_31BIT)
+ if ((retval = pci_set_dma_mask(aac->pdev, DMA_BIT_MASK(32))))
+ goto out;
+ if (jafo) {
+ aac->thread = kthread_run(aac_command_thread, aac, "%s",
+ aac->name);
+ if (IS_ERR(aac->thread)) {
+ retval = PTR_ERR(aac->thread);
+ goto out;
+ }
+ }
+ (void)aac_get_adapter_info(aac);
+ if ((quirks & AAC_QUIRK_34SG) && (host->sg_tablesize > 34)) {
+ host->sg_tablesize = 34;
+ host->max_sectors = (host->sg_tablesize * 8) + 112;
+ }
+ if ((quirks & AAC_QUIRK_17SG) && (host->sg_tablesize > 17)) {
+ host->sg_tablesize = 17;
+ host->max_sectors = (host->sg_tablesize * 8) + 112;
+ }
+ aac_get_config_status(aac, 1);
+ aac_get_containers(aac);
+ /*
+ * This is where the assumption that the Adapter is quiesced
+ * is important.
+ */
+ command_list = NULL;
+ __shost_for_each_device(dev, host) {
+ unsigned long flags;
+ spin_lock_irqsave(&dev->list_lock, flags);
+ list_for_each_entry(command, &dev->cmd_list, list)
+ if (command->SCp.phase == AAC_OWNER_FIRMWARE) {
+ command->SCp.buffer = (struct scatterlist *)command_list;
+ command_list = command;
+ }
+ spin_unlock_irqrestore(&dev->list_lock, flags);
+ }
+ while ((command = command_list)) {
+ command_list = (struct scsi_cmnd *)command->SCp.buffer;
+ command->SCp.buffer = NULL;
+ command->result = DID_OK << 16
+ | COMMAND_COMPLETE << 8
+ | SAM_STAT_TASK_SET_FULL;
+ command->SCp.phase = AAC_OWNER_ERROR_HANDLER;
+ command->scsi_done(command);
+ }
+ retval = 0;
+
+out:
+ aac->in_reset = 0;
+ scsi_unblock_requests(host);
+ if (jafo) {
+ spin_lock_irq(host->host_lock);
+ }
+ return retval;
+}
+
+int aac_reset_adapter(struct aac_dev * aac, int forced)
+{
+ unsigned long flagv = 0;
+ int retval;
+ struct Scsi_Host * host;
+
+ if (spin_trylock_irqsave(&aac->fib_lock, flagv) == 0)
+ return -EBUSY;
+
+ if (aac->in_reset) {
+ spin_unlock_irqrestore(&aac->fib_lock, flagv);
+ return -EBUSY;
+ }
+ aac->in_reset = 1;
+ spin_unlock_irqrestore(&aac->fib_lock, flagv);
+
+ /*
+ * Wait for all commands to complete to this specific
+ * target (block maximum 60 seconds). Although not necessary,
+ * it does make us a good storage citizen.
+ */
+ host = aac->scsi_host_ptr;
+ scsi_block_requests(host);
+ if (forced < 2) for (retval = 60; retval; --retval) {
+ struct scsi_device * dev;
+ struct scsi_cmnd * command;
+ int active = 0;
+
+ __shost_for_each_device(dev, host) {
+ spin_lock_irqsave(&dev->list_lock, flagv);
+ list_for_each_entry(command, &dev->cmd_list, list) {
+ if (command->SCp.phase == AAC_OWNER_FIRMWARE) {
+ active++;
+ break;
+ }
+ }
+ spin_unlock_irqrestore(&dev->list_lock, flagv);
+ if (active)
+ break;
+
+ }
+ /*
+ * We can exit If all the commands are complete
+ */
+ if (active == 0)
+ break;
+ ssleep(1);
+ }
+
+ /* Quiesce build, flush cache, write through mode */
+ if (forced < 2)
+ aac_send_shutdown(aac);
+ spin_lock_irqsave(host->host_lock, flagv);
+ retval = _aac_reset_adapter(aac, forced ? forced : ((aac_check_reset != 0) && (aac_check_reset != 1)));
+ spin_unlock_irqrestore(host->host_lock, flagv);
+
+ if ((forced < 2) && (retval == -ENODEV)) {
+ /* Unwind aac_send_shutdown() IOP_RESET unsupported/disabled */
+ struct fib * fibctx = aac_fib_alloc(aac);
+ if (fibctx) {
+ struct aac_pause *cmd;
+ int status;
+
+ aac_fib_init(fibctx);
+
+ cmd = (struct aac_pause *) fib_data(fibctx);
+
+ cmd->command = cpu_to_le32(VM_ContainerConfig);
+ cmd->type = cpu_to_le32(CT_PAUSE_IO);
+ cmd->timeout = cpu_to_le32(1);
+ cmd->min = cpu_to_le32(1);
+ cmd->noRescan = cpu_to_le32(1);
+ cmd->count = cpu_to_le32(0);
+
+ status = aac_fib_send(ContainerCommand,
+ fibctx,
+ sizeof(struct aac_pause),
+ FsaNormal,
+ -2 /* Timeout silently */, 1,
+ NULL, NULL);
+
+ if (status >= 0)
+ aac_fib_complete(fibctx);
+ /* FIB should be freed only after getting
+ * the response from the F/W */
+ if (status != -ERESTARTSYS)
+ aac_fib_free(fibctx);
+ }
+ }
+
+ return retval;
+}
+
+int aac_check_health(struct aac_dev * aac)
+{
+ int BlinkLED;
+ unsigned long time_now, flagv = 0;
+ struct list_head * entry;
+ struct Scsi_Host * host;
+
+ /* Extending the scope of fib_lock slightly to protect aac->in_reset */
+ if (spin_trylock_irqsave(&aac->fib_lock, flagv) == 0)
+ return 0;
+
+ if (aac->in_reset || !(BlinkLED = aac_adapter_check_health(aac))) {
+ spin_unlock_irqrestore(&aac->fib_lock, flagv);
+ return 0; /* OK */
+ }
+
+ aac->in_reset = 1;
+
+ /* Fake up an AIF:
+ * aac_aifcmd.command = AifCmdEventNotify = 1
+ * aac_aifcmd.seqnum = 0xFFFFFFFF
+ * aac_aifcmd.data[0] = AifEnExpEvent = 23
+ * aac_aifcmd.data[1] = AifExeFirmwarePanic = 3
+ * aac.aifcmd.data[2] = AifHighPriority = 3
+ * aac.aifcmd.data[3] = BlinkLED
+ */
+
+ time_now = jiffies/HZ;
+ entry = aac->fib_list.next;
+
+ /*
+ * For each Context that is on the
+ * fibctxList, make a copy of the
+ * fib, and then set the event to wake up the
+ * thread that is waiting for it.
+ */
+ while (entry != &aac->fib_list) {
+ /*
+ * Extract the fibctx
+ */
+ struct aac_fib_context *fibctx = list_entry(entry, struct aac_fib_context, next);
+ struct hw_fib * hw_fib;
+ struct fib * fib;
+ /*
+ * Check if the queue is getting
+ * backlogged
+ */
+ if (fibctx->count > 20) {
+ /*
+ * It's *not* jiffies folks,
+ * but jiffies / HZ, so do not
+ * panic ...
+ */
+ u32 time_last = fibctx->jiffies;
+ /*
+ * Has it been > 2 minutes
+ * since the last read off
+ * the queue?
+ */
+ if ((time_now - time_last) > aif_timeout) {
+ entry = entry->next;
+ aac_close_fib_context(aac, fibctx);
+ continue;
+ }
+ }
+ /*
+ * Warning: no sleep allowed while
+ * holding spinlock
+ */
+ hw_fib = kzalloc(sizeof(struct hw_fib), GFP_ATOMIC);
+ fib = kzalloc(sizeof(struct fib), GFP_ATOMIC);
+ if (fib && hw_fib) {
+ struct aac_aifcmd * aif;
+
+ fib->hw_fib_va = hw_fib;
+ fib->dev = aac;
+ aac_fib_init(fib);
+ fib->type = FSAFS_NTC_FIB_CONTEXT;
+ fib->size = sizeof (struct fib);
+ fib->data = hw_fib->data;
+ aif = (struct aac_aifcmd *)hw_fib->data;
+ aif->command = cpu_to_le32(AifCmdEventNotify);
+ aif->seqnum = cpu_to_le32(0xFFFFFFFF);
+ ((__le32 *)aif->data)[0] = cpu_to_le32(AifEnExpEvent);
+ ((__le32 *)aif->data)[1] = cpu_to_le32(AifExeFirmwarePanic);
+ ((__le32 *)aif->data)[2] = cpu_to_le32(AifHighPriority);
+ ((__le32 *)aif->data)[3] = cpu_to_le32(BlinkLED);
+
+ /*
+ * Put the FIB onto the
+ * fibctx's fibs
+ */
+ list_add_tail(&fib->fiblink, &fibctx->fib_list);
+ fibctx->count++;
+ /*
+ * Set the event to wake up the
+ * thread that will waiting.
+ */
+ up(&fibctx->wait_sem);
+ } else {
+ printk(KERN_WARNING "aifd: didn't allocate NewFib.\n");
+ kfree(fib);
+ kfree(hw_fib);
+ }
+ entry = entry->next;
+ }
+
+ spin_unlock_irqrestore(&aac->fib_lock, flagv);
+
+ if (BlinkLED < 0) {
+ printk(KERN_ERR "%s: Host adapter dead %d\n", aac->name, BlinkLED);
+ goto out;
+ }
+
+ printk(KERN_ERR "%s: Host adapter BLINK LED 0x%x\n", aac->name, BlinkLED);
+
+ if (!aac_check_reset || ((aac_check_reset == 1) &&
+ (aac->supplement_adapter_info.SupportedOptions2 &
+ AAC_OPTION_IGNORE_RESET)))
+ goto out;
+ host = aac->scsi_host_ptr;
+ if (aac->thread->pid != current->pid)
+ spin_lock_irqsave(host->host_lock, flagv);
+ BlinkLED = _aac_reset_adapter(aac, aac_check_reset != 1);
+ if (aac->thread->pid != current->pid)
+ spin_unlock_irqrestore(host->host_lock, flagv);
+ return BlinkLED;
+
+out:
+ aac->in_reset = 0;
+ return BlinkLED;
+}
+
+
+/**
+ * aac_command_thread - command processing thread
+ * @dev: Adapter to monitor
+ *
+ * Waits on the commandready event in it's queue. When the event gets set
+ * it will pull FIBs off it's queue. It will continue to pull FIBs off
+ * until the queue is empty. When the queue is empty it will wait for
+ * more FIBs.
+ */
+
+int aac_command_thread(void *data)
+{
+ struct aac_dev *dev = data;
+ struct hw_fib *hw_fib, *hw_newfib;
+ struct fib *fib, *newfib;
+ struct aac_fib_context *fibctx;
+ unsigned long flags;
+ DECLARE_WAITQUEUE(wait, current);
+ unsigned long next_jiffies = jiffies + HZ;
+ unsigned long next_check_jiffies = next_jiffies;
+ long difference = HZ;
+
+ /*
+ * We can only have one thread per adapter for AIF's.
+ */
+ if (dev->aif_thread)
+ return -EINVAL;
+
+ /*
+ * Let the DPC know it has a place to send the AIF's to.
+ */
+ dev->aif_thread = 1;
+ add_wait_queue(&dev->queues->queue[HostNormCmdQueue].cmdready, &wait);
+ set_current_state(TASK_INTERRUPTIBLE);
+ dprintk ((KERN_INFO "aac_command_thread start\n"));
+ while (1) {
+ spin_lock_irqsave(dev->queues->queue[HostNormCmdQueue].lock, flags);
+ while(!list_empty(&(dev->queues->queue[HostNormCmdQueue].cmdq))) {
+ struct list_head *entry;
+ struct aac_aifcmd * aifcmd;
+
+ set_current_state(TASK_RUNNING);
+
+ entry = dev->queues->queue[HostNormCmdQueue].cmdq.next;
+ list_del(entry);
+
+ spin_unlock_irqrestore(dev->queues->queue[HostNormCmdQueue].lock, flags);
+ fib = list_entry(entry, struct fib, fiblink);
+ /*
+ * We will process the FIB here or pass it to a
+ * worker thread that is TBD. We Really can't
+ * do anything at this point since we don't have
+ * anything defined for this thread to do.
+ */
+ hw_fib = fib->hw_fib_va;
+ memset(fib, 0, sizeof(struct fib));
+ fib->type = FSAFS_NTC_FIB_CONTEXT;
+ fib->size = sizeof(struct fib);
+ fib->hw_fib_va = hw_fib;
+ fib->data = hw_fib->data;
+ fib->dev = dev;
+ /*
+ * We only handle AifRequest fibs from the adapter.
+ */
+ aifcmd = (struct aac_aifcmd *) hw_fib->data;
+ if (aifcmd->command == cpu_to_le32(AifCmdDriverNotify)) {
+ /* Handle Driver Notify Events */
+ aac_handle_aif(dev, fib);
+ *(__le32 *)hw_fib->data = cpu_to_le32(ST_OK);
+ aac_fib_adapter_complete(fib, (u16)sizeof(u32));
+ } else {
+ /* The u32 here is important and intended. We are using
+ 32bit wrapping time to fit the adapter field */
+
+ u32 time_now, time_last;
+ unsigned long flagv;
+ unsigned num;
+ struct hw_fib ** hw_fib_pool, ** hw_fib_p;
+ struct fib ** fib_pool, ** fib_p;
+
+ /* Sniff events */
+ if ((aifcmd->command ==
+ cpu_to_le32(AifCmdEventNotify)) ||
+ (aifcmd->command ==
+ cpu_to_le32(AifCmdJobProgress))) {
+ aac_handle_aif(dev, fib);
+ }
+
+ time_now = jiffies/HZ;
+
+ /*
+ * Warning: no sleep allowed while
+ * holding spinlock. We take the estimate
+ * and pre-allocate a set of fibs outside the
+ * lock.
+ */
+ num = le32_to_cpu(dev->init->AdapterFibsSize)
+ / sizeof(struct hw_fib); /* some extra */
+ spin_lock_irqsave(&dev->fib_lock, flagv);
+ entry = dev->fib_list.next;
+ while (entry != &dev->fib_list) {
+ entry = entry->next;
+ ++num;
+ }
+ spin_unlock_irqrestore(&dev->fib_lock, flagv);
+ hw_fib_pool = NULL;
+ fib_pool = NULL;
+ if (num
+ && ((hw_fib_pool = kmalloc(sizeof(struct hw_fib *) * num, GFP_KERNEL)))
+ && ((fib_pool = kmalloc(sizeof(struct fib *) * num, GFP_KERNEL)))) {
+ hw_fib_p = hw_fib_pool;
+ fib_p = fib_pool;
+ while (hw_fib_p < &hw_fib_pool[num]) {
+ if (!(*(hw_fib_p++) = kmalloc(sizeof(struct hw_fib), GFP_KERNEL))) {
+ --hw_fib_p;
+ break;
+ }
+ if (!(*(fib_p++) = kmalloc(sizeof(struct fib), GFP_KERNEL))) {
+ kfree(*(--hw_fib_p));
+ break;
+ }
+ }
+ if ((num = hw_fib_p - hw_fib_pool) == 0) {
+ kfree(fib_pool);
+ fib_pool = NULL;
+ kfree(hw_fib_pool);
+ hw_fib_pool = NULL;
+ }
+ } else {
+ kfree(hw_fib_pool);
+ hw_fib_pool = NULL;
+ }
+ spin_lock_irqsave(&dev->fib_lock, flagv);
+ entry = dev->fib_list.next;
+ /*
+ * For each Context that is on the
+ * fibctxList, make a copy of the
+ * fib, and then set the event to wake up the
+ * thread that is waiting for it.
+ */
+ hw_fib_p = hw_fib_pool;
+ fib_p = fib_pool;
+ while (entry != &dev->fib_list) {
+ /*
+ * Extract the fibctx
+ */
+ fibctx = list_entry(entry, struct aac_fib_context, next);
+ /*
+ * Check if the queue is getting
+ * backlogged
+ */
+ if (fibctx->count > 20)
+ {
+ /*
+ * It's *not* jiffies folks,
+ * but jiffies / HZ so do not
+ * panic ...
+ */
+ time_last = fibctx->jiffies;
+ /*
+ * Has it been > 2 minutes
+ * since the last read off
+ * the queue?
+ */
+ if ((time_now - time_last) > aif_timeout) {
+ entry = entry->next;
+ aac_close_fib_context(dev, fibctx);
+ continue;
+ }
+ }
+ /*
+ * Warning: no sleep allowed while
+ * holding spinlock
+ */
+ if (hw_fib_p < &hw_fib_pool[num]) {
+ hw_newfib = *hw_fib_p;
+ *(hw_fib_p++) = NULL;
+ newfib = *fib_p;
+ *(fib_p++) = NULL;
+ /*
+ * Make the copy of the FIB
+ */
+ memcpy(hw_newfib, hw_fib, sizeof(struct hw_fib));
+ memcpy(newfib, fib, sizeof(struct fib));
+ newfib->hw_fib_va = hw_newfib;
+ /*
+ * Put the FIB onto the
+ * fibctx's fibs
+ */
+ list_add_tail(&newfib->fiblink, &fibctx->fib_list);
+ fibctx->count++;
+ /*
+ * Set the event to wake up the
+ * thread that is waiting.
+ */
+ up(&fibctx->wait_sem);
+ } else {
+ printk(KERN_WARNING "aifd: didn't allocate NewFib.\n");
+ }
+ entry = entry->next;
+ }
+ /*
+ * Set the status of this FIB
+ */
+ *(__le32 *)hw_fib->data = cpu_to_le32(ST_OK);
+ aac_fib_adapter_complete(fib, sizeof(u32));
+ spin_unlock_irqrestore(&dev->fib_lock, flagv);
+ /* Free up the remaining resources */
+ hw_fib_p = hw_fib_pool;
+ fib_p = fib_pool;
+ while (hw_fib_p < &hw_fib_pool[num]) {
+ kfree(*hw_fib_p);
+ kfree(*fib_p);
+ ++fib_p;
+ ++hw_fib_p;
+ }
+ kfree(hw_fib_pool);
+ kfree(fib_pool);
+ }
+ kfree(fib);
+ spin_lock_irqsave(dev->queues->queue[HostNormCmdQueue].lock, flags);
+ }
+ /*
+ * There are no more AIF's
+ */
+ spin_unlock_irqrestore(dev->queues->queue[HostNormCmdQueue].lock, flags);
+
+ /*
+ * Background activity
+ */
+ if ((time_before(next_check_jiffies,next_jiffies))
+ && ((difference = next_check_jiffies - jiffies) <= 0)) {
+ next_check_jiffies = next_jiffies;
+ if (aac_check_health(dev) == 0) {
+ difference = ((long)(unsigned)check_interval)
+ * HZ;
+ next_check_jiffies = jiffies + difference;
+ } else if (!dev->queues)
+ break;
+ }
+ if (!time_before(next_check_jiffies,next_jiffies)
+ && ((difference = next_jiffies - jiffies) <= 0)) {
+ struct timeval now;
+ int ret;
+
+ /* Don't even try to talk to adapter if its sick */
+ ret = aac_check_health(dev);
+ if (!ret && !dev->queues)
+ break;
+ next_check_jiffies = jiffies
+ + ((long)(unsigned)check_interval)
+ * HZ;
+ do_gettimeofday(&now);
+
+ /* Synchronize our watches */
+ if (((1000000 - (1000000 / HZ)) > now.tv_usec)
+ && (now.tv_usec > (1000000 / HZ)))
+ difference = (((1000000 - now.tv_usec) * HZ)
+ + 500000) / 1000000;
+ else if (ret == 0) {
+ struct fib *fibptr;
+
+ if ((fibptr = aac_fib_alloc(dev))) {
+ int status;
+ __le32 *info;
+
+ aac_fib_init(fibptr);
+
+ info = (__le32 *) fib_data(fibptr);
+ if (now.tv_usec > 500000)
+ ++now.tv_sec;
+
+ *info = cpu_to_le32(now.tv_sec);
+
+ status = aac_fib_send(SendHostTime,
+ fibptr,
+ sizeof(*info),
+ FsaNormal,
+ 1, 1,
+ NULL,
+ NULL);
+ /* Do not set XferState to zero unless
+ * receives a response from F/W */
+ if (status >= 0)
+ aac_fib_complete(fibptr);
+ /* FIB should be freed only after
+ * getting the response from the F/W */
+ if (status != -ERESTARTSYS)
+ aac_fib_free(fibptr);
+ }
+ difference = (long)(unsigned)update_interval*HZ;
+ } else {
+ /* retry shortly */
+ difference = 10 * HZ;
+ }
+ next_jiffies = jiffies + difference;
+ if (time_before(next_check_jiffies,next_jiffies))
+ difference = next_check_jiffies - jiffies;
+ }
+ if (difference <= 0)
+ difference = 1;
+ set_current_state(TASK_INTERRUPTIBLE);
+ schedule_timeout(difference);
+
+ if (kthread_should_stop())
+ break;
+ }
+ if (dev->queues)
+ remove_wait_queue(&dev->queues->queue[HostNormCmdQueue].cmdready, &wait);
+ dev->aif_thread = 0;
+ return 0;
+}
diff --git a/drivers/scsi/aacraid/dpcsup.c b/drivers/scsi/aacraid/dpcsup.c
new file mode 100644
index 000000000..da9d9936e
--- /dev/null
+++ b/drivers/scsi/aacraid/dpcsup.c
@@ -0,0 +1,425 @@
+/*
+ * Adaptec AAC series RAID controller driver
+ * (c) Copyright 2001 Red Hat Inc.
+ *
+ * based on the old aacraid driver that is..
+ * Adaptec aacraid device driver for Linux.
+ *
+ * Copyright (c) 2000-2010 Adaptec, Inc.
+ * 2010 PMC-Sierra, Inc. (aacraid@pmc-sierra.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2, or (at your option)
+ * any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; see the file COPYING. If not, write to
+ * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
+ *
+ * Module Name:
+ * dpcsup.c
+ *
+ * Abstract: All DPC processing routines for the cyclone board occur here.
+ *
+ *
+ */
+
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/types.h>
+#include <linux/spinlock.h>
+#include <linux/slab.h>
+#include <linux/completion.h>
+#include <linux/blkdev.h>
+#include <linux/semaphore.h>
+
+#include "aacraid.h"
+
+/**
+ * aac_response_normal - Handle command replies
+ * @q: Queue to read from
+ *
+ * This DPC routine will be run when the adapter interrupts us to let us
+ * know there is a response on our normal priority queue. We will pull off
+ * all QE there are and wake up all the waiters before exiting. We will
+ * take a spinlock out on the queue before operating on it.
+ */
+
+unsigned int aac_response_normal(struct aac_queue * q)
+{
+ struct aac_dev * dev = q->dev;
+ struct aac_entry *entry;
+ struct hw_fib * hwfib;
+ struct fib * fib;
+ int consumed = 0;
+ unsigned long flags, mflags;
+
+ spin_lock_irqsave(q->lock, flags);
+ /*
+ * Keep pulling response QEs off the response queue and waking
+ * up the waiters until there are no more QEs. We then return
+ * back to the system. If no response was requesed we just
+ * deallocate the Fib here and continue.
+ */
+ while(aac_consumer_get(dev, q, &entry))
+ {
+ int fast;
+ u32 index = le32_to_cpu(entry->addr);
+ fast = index & 0x01;
+ fib = &dev->fibs[index >> 2];
+ hwfib = fib->hw_fib_va;
+
+ aac_consumer_free(dev, q, HostNormRespQueue);
+ /*
+ * Remove this fib from the Outstanding I/O queue.
+ * But only if it has not already been timed out.
+ *
+ * If the fib has been timed out already, then just
+ * continue. The caller has already been notified that
+ * the fib timed out.
+ */
+ atomic_dec(&dev->queues->queue[AdapNormCmdQueue].numpending);
+
+ if (unlikely(fib->flags & FIB_CONTEXT_FLAG_TIMED_OUT)) {
+ spin_unlock_irqrestore(q->lock, flags);
+ aac_fib_complete(fib);
+ aac_fib_free(fib);
+ spin_lock_irqsave(q->lock, flags);
+ continue;
+ }
+ spin_unlock_irqrestore(q->lock, flags);
+
+ if (fast) {
+ /*
+ * Doctor the fib
+ */
+ *(__le32 *)hwfib->data = cpu_to_le32(ST_OK);
+ hwfib->header.XferState |= cpu_to_le32(AdapterProcessed);
+ fib->flags |= FIB_CONTEXT_FLAG_FASTRESP;
+ }
+
+ FIB_COUNTER_INCREMENT(aac_config.FibRecved);
+
+ if (hwfib->header.Command == cpu_to_le16(NuFileSystem))
+ {
+ __le32 *pstatus = (__le32 *)hwfib->data;
+ if (*pstatus & cpu_to_le32(0xffff0000))
+ *pstatus = cpu_to_le32(ST_OK);
+ }
+ if (hwfib->header.XferState & cpu_to_le32(NoResponseExpected | Async))
+ {
+ if (hwfib->header.XferState & cpu_to_le32(NoResponseExpected))
+ FIB_COUNTER_INCREMENT(aac_config.NoResponseRecved);
+ else
+ FIB_COUNTER_INCREMENT(aac_config.AsyncRecved);
+ /*
+ * NOTE: we cannot touch the fib after this
+ * call, because it may have been deallocated.
+ */
+ fib->flags &= FIB_CONTEXT_FLAG_FASTRESP;
+ fib->callback(fib->callback_data, fib);
+ } else {
+ unsigned long flagv;
+ spin_lock_irqsave(&fib->event_lock, flagv);
+ if (!fib->done) {
+ fib->done = 1;
+ up(&fib->event_wait);
+ }
+ spin_unlock_irqrestore(&fib->event_lock, flagv);
+
+ spin_lock_irqsave(&dev->manage_lock, mflags);
+ dev->management_fib_count--;
+ spin_unlock_irqrestore(&dev->manage_lock, mflags);
+
+ FIB_COUNTER_INCREMENT(aac_config.NormalRecved);
+ if (fib->done == 2) {
+ spin_lock_irqsave(&fib->event_lock, flagv);
+ fib->done = 0;
+ spin_unlock_irqrestore(&fib->event_lock, flagv);
+ aac_fib_complete(fib);
+ aac_fib_free(fib);
+ }
+ }
+ consumed++;
+ spin_lock_irqsave(q->lock, flags);
+ }
+
+ if (consumed > aac_config.peak_fibs)
+ aac_config.peak_fibs = consumed;
+ if (consumed == 0)
+ aac_config.zero_fibs++;
+
+ spin_unlock_irqrestore(q->lock, flags);
+ return 0;
+}
+
+
+/**
+ * aac_command_normal - handle commands
+ * @q: queue to process
+ *
+ * This DPC routine will be queued when the adapter interrupts us to
+ * let us know there is a command on our normal priority queue. We will
+ * pull off all QE there are and wake up all the waiters before exiting.
+ * We will take a spinlock out on the queue before operating on it.
+ */
+
+unsigned int aac_command_normal(struct aac_queue *q)
+{
+ struct aac_dev * dev = q->dev;
+ struct aac_entry *entry;
+ unsigned long flags;
+
+ spin_lock_irqsave(q->lock, flags);
+
+ /*
+ * Keep pulling response QEs off the response queue and waking
+ * up the waiters until there are no more QEs. We then return
+ * back to the system.
+ */
+ while(aac_consumer_get(dev, q, &entry))
+ {
+ struct fib fibctx;
+ struct hw_fib * hw_fib;
+ u32 index;
+ struct fib *fib = &fibctx;
+
+ index = le32_to_cpu(entry->addr) / sizeof(struct hw_fib);
+ hw_fib = &dev->aif_base_va[index];
+
+ /*
+ * Allocate a FIB at all costs. For non queued stuff
+ * we can just use the stack so we are happy. We need
+ * a fib object in order to manage the linked lists
+ */
+ if (dev->aif_thread)
+ if((fib = kmalloc(sizeof(struct fib), GFP_ATOMIC)) == NULL)
+ fib = &fibctx;
+
+ memset(fib, 0, sizeof(struct fib));
+ INIT_LIST_HEAD(&fib->fiblink);
+ fib->type = FSAFS_NTC_FIB_CONTEXT;
+ fib->size = sizeof(struct fib);
+ fib->hw_fib_va = hw_fib;
+ fib->data = hw_fib->data;
+ fib->dev = dev;
+
+
+ if (dev->aif_thread && fib != &fibctx) {
+ list_add_tail(&fib->fiblink, &q->cmdq);
+ aac_consumer_free(dev, q, HostNormCmdQueue);
+ wake_up_interruptible(&q->cmdready);
+ } else {
+ aac_consumer_free(dev, q, HostNormCmdQueue);
+ spin_unlock_irqrestore(q->lock, flags);
+ /*
+ * Set the status of this FIB
+ */
+ *(__le32 *)hw_fib->data = cpu_to_le32(ST_OK);
+ aac_fib_adapter_complete(fib, sizeof(u32));
+ spin_lock_irqsave(q->lock, flags);
+ }
+ }
+ spin_unlock_irqrestore(q->lock, flags);
+ return 0;
+}
+
+/*
+ *
+ * aac_aif_callback
+ * @context: the context set in the fib - here it is scsi cmd
+ * @fibptr: pointer to the fib
+ *
+ * Handles the AIFs - new method (SRC)
+ *
+ */
+
+static void aac_aif_callback(void *context, struct fib * fibptr)
+{
+ struct fib *fibctx;
+ struct aac_dev *dev;
+ struct aac_aifcmd *cmd;
+ int status;
+
+ fibctx = (struct fib *)context;
+ BUG_ON(fibptr == NULL);
+ dev = fibptr->dev;
+
+ if (fibptr->hw_fib_va->header.XferState &
+ cpu_to_le32(NoMoreAifDataAvailable)) {
+ aac_fib_complete(fibptr);
+ aac_fib_free(fibptr);
+ return;
+ }
+
+ aac_intr_normal(dev, 0, 1, 0, fibptr->hw_fib_va);
+
+ aac_fib_init(fibctx);
+ cmd = (struct aac_aifcmd *) fib_data(fibctx);
+ cmd->command = cpu_to_le32(AifReqEvent);
+
+ status = aac_fib_send(AifRequest,
+ fibctx,
+ sizeof(struct hw_fib)-sizeof(struct aac_fibhdr),
+ FsaNormal,
+ 0, 1,
+ (fib_callback)aac_aif_callback, fibctx);
+}
+
+
+/**
+ * aac_intr_normal - Handle command replies
+ * @dev: Device
+ * @index: completion reference
+ *
+ * This DPC routine will be run when the adapter interrupts us to let us
+ * know there is a response on our normal priority queue. We will pull off
+ * all QE there are and wake up all the waiters before exiting.
+ */
+unsigned int aac_intr_normal(struct aac_dev *dev, u32 index,
+ int isAif, int isFastResponse, struct hw_fib *aif_fib)
+{
+ unsigned long mflags;
+ dprintk((KERN_INFO "aac_intr_normal(%p,%x)\n", dev, index));
+ if (isAif == 1) { /* AIF - common */
+ struct hw_fib * hw_fib;
+ struct fib * fib;
+ struct aac_queue *q = &dev->queues->queue[HostNormCmdQueue];
+ unsigned long flags;
+
+ /*
+ * Allocate a FIB. For non queued stuff we can just use
+ * the stack so we are happy. We need a fib object in order to
+ * manage the linked lists.
+ */
+ if ((!dev->aif_thread)
+ || (!(fib = kzalloc(sizeof(struct fib),GFP_ATOMIC))))
+ return 1;
+ if (!(hw_fib = kzalloc(sizeof(struct hw_fib),GFP_ATOMIC))) {
+ kfree (fib);
+ return 1;
+ }
+ if (aif_fib != NULL) {
+ memcpy(hw_fib, aif_fib, sizeof(struct hw_fib));
+ } else {
+ memcpy(hw_fib,
+ (struct hw_fib *)(((uintptr_t)(dev->regs.sa)) +
+ index), sizeof(struct hw_fib));
+ }
+ INIT_LIST_HEAD(&fib->fiblink);
+ fib->type = FSAFS_NTC_FIB_CONTEXT;
+ fib->size = sizeof(struct fib);
+ fib->hw_fib_va = hw_fib;
+ fib->data = hw_fib->data;
+ fib->dev = dev;
+
+ spin_lock_irqsave(q->lock, flags);
+ list_add_tail(&fib->fiblink, &q->cmdq);
+ wake_up_interruptible(&q->cmdready);
+ spin_unlock_irqrestore(q->lock, flags);
+ return 1;
+ } else if (isAif == 2) { /* AIF - new (SRC) */
+ struct fib *fibctx;
+ struct aac_aifcmd *cmd;
+
+ fibctx = aac_fib_alloc(dev);
+ if (!fibctx)
+ return 1;
+ aac_fib_init(fibctx);
+
+ cmd = (struct aac_aifcmd *) fib_data(fibctx);
+ cmd->command = cpu_to_le32(AifReqEvent);
+
+ return aac_fib_send(AifRequest,
+ fibctx,
+ sizeof(struct hw_fib)-sizeof(struct aac_fibhdr),
+ FsaNormal,
+ 0, 1,
+ (fib_callback)aac_aif_callback, fibctx);
+ } else {
+ struct fib *fib = &dev->fibs[index];
+ struct hw_fib * hwfib = fib->hw_fib_va;
+
+ /*
+ * Remove this fib from the Outstanding I/O queue.
+ * But only if it has not already been timed out.
+ *
+ * If the fib has been timed out already, then just
+ * continue. The caller has already been notified that
+ * the fib timed out.
+ */
+ atomic_dec(&dev->queues->queue[AdapNormCmdQueue].numpending);
+
+ if (unlikely(fib->flags & FIB_CONTEXT_FLAG_TIMED_OUT)) {
+ aac_fib_complete(fib);
+ aac_fib_free(fib);
+ return 0;
+ }
+
+ if (isFastResponse) {
+ /*
+ * Doctor the fib
+ */
+ *(__le32 *)hwfib->data = cpu_to_le32(ST_OK);
+ hwfib->header.XferState |= cpu_to_le32(AdapterProcessed);
+ fib->flags |= FIB_CONTEXT_FLAG_FASTRESP;
+ }
+
+ FIB_COUNTER_INCREMENT(aac_config.FibRecved);
+
+ if (hwfib->header.Command == cpu_to_le16(NuFileSystem))
+ {
+ __le32 *pstatus = (__le32 *)hwfib->data;
+ if (*pstatus & cpu_to_le32(0xffff0000))
+ *pstatus = cpu_to_le32(ST_OK);
+ }
+ if (hwfib->header.XferState & cpu_to_le32(NoResponseExpected | Async))
+ {
+ if (hwfib->header.XferState & cpu_to_le32(NoResponseExpected))
+ FIB_COUNTER_INCREMENT(aac_config.NoResponseRecved);
+ else
+ FIB_COUNTER_INCREMENT(aac_config.AsyncRecved);
+ /*
+ * NOTE: we cannot touch the fib after this
+ * call, because it may have been deallocated.
+ */
+ if (likely(fib->callback && fib->callback_data)) {
+ fib->flags &= FIB_CONTEXT_FLAG_FASTRESP;
+ fib->callback(fib->callback_data, fib);
+ } else {
+ aac_fib_complete(fib);
+ aac_fib_free(fib);
+ }
+ } else {
+ unsigned long flagv;
+ dprintk((KERN_INFO "event_wait up\n"));
+ spin_lock_irqsave(&fib->event_lock, flagv);
+ if (!fib->done) {
+ fib->done = 1;
+ up(&fib->event_wait);
+ }
+ spin_unlock_irqrestore(&fib->event_lock, flagv);
+
+ spin_lock_irqsave(&dev->manage_lock, mflags);
+ dev->management_fib_count--;
+ spin_unlock_irqrestore(&dev->manage_lock, mflags);
+
+ FIB_COUNTER_INCREMENT(aac_config.NormalRecved);
+ if (fib->done == 2) {
+ spin_lock_irqsave(&fib->event_lock, flagv);
+ fib->done = 0;
+ spin_unlock_irqrestore(&fib->event_lock, flagv);
+ aac_fib_complete(fib);
+ aac_fib_free(fib);
+ }
+
+ }
+ return 0;
+ }
+}
diff --git a/drivers/scsi/aacraid/linit.c b/drivers/scsi/aacraid/linit.c
new file mode 100644
index 000000000..9eec02733
--- /dev/null
+++ b/drivers/scsi/aacraid/linit.c
@@ -0,0 +1,1390 @@
+/*
+ * Adaptec AAC series RAID controller driver
+ * (c) Copyright 2001 Red Hat Inc.
+ *
+ * based on the old aacraid driver that is..
+ * Adaptec aacraid device driver for Linux.
+ *
+ * Copyright (c) 2000-2010 Adaptec, Inc.
+ * 2010 PMC-Sierra, Inc. (aacraid@pmc-sierra.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2, or (at your option)
+ * any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; see the file COPYING. If not, write to
+ * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
+ *
+ * Module Name:
+ * linit.c
+ *
+ * Abstract: Linux Driver entry module for Adaptec RAID Array Controller
+ */
+
+
+#include <linux/compat.h>
+#include <linux/blkdev.h>
+#include <linux/completion.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/pci.h>
+#include <linux/pci-aspm.h>
+#include <linux/slab.h>
+#include <linux/mutex.h>
+#include <linux/spinlock.h>
+#include <linux/syscalls.h>
+#include <linux/delay.h>
+#include <linux/kthread.h>
+
+#include <scsi/scsi.h>
+#include <scsi/scsi_cmnd.h>
+#include <scsi/scsi_device.h>
+#include <scsi/scsi_host.h>
+#include <scsi/scsi_tcq.h>
+#include <scsi/scsicam.h>
+#include <scsi/scsi_eh.h>
+
+#include "aacraid.h"
+
+#define AAC_DRIVER_VERSION "1.2-1"
+#ifndef AAC_DRIVER_BRANCH
+#define AAC_DRIVER_BRANCH ""
+#endif
+#define AAC_DRIVERNAME "aacraid"
+
+#ifdef AAC_DRIVER_BUILD
+#define _str(x) #x
+#define str(x) _str(x)
+#define AAC_DRIVER_FULL_VERSION AAC_DRIVER_VERSION "[" str(AAC_DRIVER_BUILD) "]" AAC_DRIVER_BRANCH
+#else
+#define AAC_DRIVER_FULL_VERSION AAC_DRIVER_VERSION AAC_DRIVER_BRANCH
+#endif
+
+MODULE_AUTHOR("Red Hat Inc and Adaptec");
+MODULE_DESCRIPTION("Dell PERC2, 2/Si, 3/Si, 3/Di, "
+ "Adaptec Advanced Raid Products, "
+ "HP NetRAID-4M, IBM ServeRAID & ICP SCSI driver");
+MODULE_LICENSE("GPL");
+MODULE_VERSION(AAC_DRIVER_FULL_VERSION);
+
+static DEFINE_MUTEX(aac_mutex);
+static LIST_HEAD(aac_devices);
+static int aac_cfg_major = -1;
+char aac_driver_version[] = AAC_DRIVER_FULL_VERSION;
+
+/*
+ * Because of the way Linux names scsi devices, the order in this table has
+ * become important. Check for on-board Raid first, add-in cards second.
+ *
+ * Note: The last field is used to index into aac_drivers below.
+ */
+static const struct pci_device_id aac_pci_tbl[] = {
+ { 0x1028, 0x0001, 0x1028, 0x0001, 0, 0, 0 }, /* PERC 2/Si (Iguana/PERC2Si) */
+ { 0x1028, 0x0002, 0x1028, 0x0002, 0, 0, 1 }, /* PERC 3/Di (Opal/PERC3Di) */
+ { 0x1028, 0x0003, 0x1028, 0x0003, 0, 0, 2 }, /* PERC 3/Si (SlimFast/PERC3Si */
+ { 0x1028, 0x0004, 0x1028, 0x00d0, 0, 0, 3 }, /* PERC 3/Di (Iguana FlipChip/PERC3DiF */
+ { 0x1028, 0x0002, 0x1028, 0x00d1, 0, 0, 4 }, /* PERC 3/Di (Viper/PERC3DiV) */
+ { 0x1028, 0x0002, 0x1028, 0x00d9, 0, 0, 5 }, /* PERC 3/Di (Lexus/PERC3DiL) */
+ { 0x1028, 0x000a, 0x1028, 0x0106, 0, 0, 6 }, /* PERC 3/Di (Jaguar/PERC3DiJ) */
+ { 0x1028, 0x000a, 0x1028, 0x011b, 0, 0, 7 }, /* PERC 3/Di (Dagger/PERC3DiD) */
+ { 0x1028, 0x000a, 0x1028, 0x0121, 0, 0, 8 }, /* PERC 3/Di (Boxster/PERC3DiB) */
+ { 0x9005, 0x0283, 0x9005, 0x0283, 0, 0, 9 }, /* catapult */
+ { 0x9005, 0x0284, 0x9005, 0x0284, 0, 0, 10 }, /* tomcat */
+ { 0x9005, 0x0285, 0x9005, 0x0286, 0, 0, 11 }, /* Adaptec 2120S (Crusader) */
+ { 0x9005, 0x0285, 0x9005, 0x0285, 0, 0, 12 }, /* Adaptec 2200S (Vulcan) */
+ { 0x9005, 0x0285, 0x9005, 0x0287, 0, 0, 13 }, /* Adaptec 2200S (Vulcan-2m) */
+ { 0x9005, 0x0285, 0x17aa, 0x0286, 0, 0, 14 }, /* Legend S220 (Legend Crusader) */
+ { 0x9005, 0x0285, 0x17aa, 0x0287, 0, 0, 15 }, /* Legend S230 (Legend Vulcan) */
+
+ { 0x9005, 0x0285, 0x9005, 0x0288, 0, 0, 16 }, /* Adaptec 3230S (Harrier) */
+ { 0x9005, 0x0285, 0x9005, 0x0289, 0, 0, 17 }, /* Adaptec 3240S (Tornado) */
+ { 0x9005, 0x0285, 0x9005, 0x028a, 0, 0, 18 }, /* ASR-2020ZCR SCSI PCI-X ZCR (Skyhawk) */
+ { 0x9005, 0x0285, 0x9005, 0x028b, 0, 0, 19 }, /* ASR-2025ZCR SCSI SO-DIMM PCI-X ZCR (Terminator) */
+ { 0x9005, 0x0286, 0x9005, 0x028c, 0, 0, 20 }, /* ASR-2230S + ASR-2230SLP PCI-X (Lancer) */
+ { 0x9005, 0x0286, 0x9005, 0x028d, 0, 0, 21 }, /* ASR-2130S (Lancer) */
+ { 0x9005, 0x0286, 0x9005, 0x029b, 0, 0, 22 }, /* AAR-2820SA (Intruder) */
+ { 0x9005, 0x0286, 0x9005, 0x029c, 0, 0, 23 }, /* AAR-2620SA (Intruder) */
+ { 0x9005, 0x0286, 0x9005, 0x029d, 0, 0, 24 }, /* AAR-2420SA (Intruder) */
+ { 0x9005, 0x0286, 0x9005, 0x029e, 0, 0, 25 }, /* ICP9024RO (Lancer) */
+ { 0x9005, 0x0286, 0x9005, 0x029f, 0, 0, 26 }, /* ICP9014RO (Lancer) */
+ { 0x9005, 0x0286, 0x9005, 0x02a0, 0, 0, 27 }, /* ICP9047MA (Lancer) */
+ { 0x9005, 0x0286, 0x9005, 0x02a1, 0, 0, 28 }, /* ICP9087MA (Lancer) */
+ { 0x9005, 0x0286, 0x9005, 0x02a3, 0, 0, 29 }, /* ICP5445AU (Hurricane44) */
+ { 0x9005, 0x0285, 0x9005, 0x02a4, 0, 0, 30 }, /* ICP9085LI (Marauder-X) */
+ { 0x9005, 0x0285, 0x9005, 0x02a5, 0, 0, 31 }, /* ICP5085BR (Marauder-E) */
+ { 0x9005, 0x0286, 0x9005, 0x02a6, 0, 0, 32 }, /* ICP9067MA (Intruder-6) */
+ { 0x9005, 0x0287, 0x9005, 0x0800, 0, 0, 33 }, /* Themisto Jupiter Platform */
+ { 0x9005, 0x0200, 0x9005, 0x0200, 0, 0, 33 }, /* Themisto Jupiter Platform */
+ { 0x9005, 0x0286, 0x9005, 0x0800, 0, 0, 34 }, /* Callisto Jupiter Platform */
+ { 0x9005, 0x0285, 0x9005, 0x028e, 0, 0, 35 }, /* ASR-2020SA SATA PCI-X ZCR (Skyhawk) */
+ { 0x9005, 0x0285, 0x9005, 0x028f, 0, 0, 36 }, /* ASR-2025SA SATA SO-DIMM PCI-X ZCR (Terminator) */
+ { 0x9005, 0x0285, 0x9005, 0x0290, 0, 0, 37 }, /* AAR-2410SA PCI SATA 4ch (Jaguar II) */
+ { 0x9005, 0x0285, 0x1028, 0x0291, 0, 0, 38 }, /* CERC SATA RAID 2 PCI SATA 6ch (DellCorsair) */
+ { 0x9005, 0x0285, 0x9005, 0x0292, 0, 0, 39 }, /* AAR-2810SA PCI SATA 8ch (Corsair-8) */
+ { 0x9005, 0x0285, 0x9005, 0x0293, 0, 0, 40 }, /* AAR-21610SA PCI SATA 16ch (Corsair-16) */
+ { 0x9005, 0x0285, 0x9005, 0x0294, 0, 0, 41 }, /* ESD SO-DIMM PCI-X SATA ZCR (Prowler) */
+ { 0x9005, 0x0285, 0x103C, 0x3227, 0, 0, 42 }, /* AAR-2610SA PCI SATA 6ch */
+ { 0x9005, 0x0285, 0x9005, 0x0296, 0, 0, 43 }, /* ASR-2240S (SabreExpress) */
+ { 0x9005, 0x0285, 0x9005, 0x0297, 0, 0, 44 }, /* ASR-4005 */
+ { 0x9005, 0x0285, 0x1014, 0x02F2, 0, 0, 45 }, /* IBM 8i (AvonPark) */
+ { 0x9005, 0x0285, 0x1014, 0x0312, 0, 0, 45 }, /* IBM 8i (AvonPark Lite) */
+ { 0x9005, 0x0286, 0x1014, 0x9580, 0, 0, 46 }, /* IBM 8k/8k-l8 (Aurora) */
+ { 0x9005, 0x0286, 0x1014, 0x9540, 0, 0, 47 }, /* IBM 8k/8k-l4 (Aurora Lite) */
+ { 0x9005, 0x0285, 0x9005, 0x0298, 0, 0, 48 }, /* ASR-4000 (BlackBird) */
+ { 0x9005, 0x0285, 0x9005, 0x0299, 0, 0, 49 }, /* ASR-4800SAS (Marauder-X) */
+ { 0x9005, 0x0285, 0x9005, 0x029a, 0, 0, 50 }, /* ASR-4805SAS (Marauder-E) */
+ { 0x9005, 0x0286, 0x9005, 0x02a2, 0, 0, 51 }, /* ASR-3800 (Hurricane44) */
+
+ { 0x9005, 0x0285, 0x1028, 0x0287, 0, 0, 52 }, /* Perc 320/DC*/
+ { 0x1011, 0x0046, 0x9005, 0x0365, 0, 0, 53 }, /* Adaptec 5400S (Mustang)*/
+ { 0x1011, 0x0046, 0x9005, 0x0364, 0, 0, 54 }, /* Adaptec 5400S (Mustang)*/
+ { 0x1011, 0x0046, 0x9005, 0x1364, 0, 0, 55 }, /* Dell PERC2/QC */
+ { 0x1011, 0x0046, 0x103c, 0x10c2, 0, 0, 56 }, /* HP NetRAID-4M */
+
+ { 0x9005, 0x0285, 0x1028, PCI_ANY_ID, 0, 0, 57 }, /* Dell Catchall */
+ { 0x9005, 0x0285, 0x17aa, PCI_ANY_ID, 0, 0, 58 }, /* Legend Catchall */
+ { 0x9005, 0x0285, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 59 }, /* Adaptec Catch All */
+ { 0x9005, 0x0286, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 60 }, /* Adaptec Rocket Catch All */
+ { 0x9005, 0x0288, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 61 }, /* Adaptec NEMER/ARK Catch All */
+ { 0x9005, 0x028b, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 62 }, /* Adaptec PMC Series 6 (Tupelo) */
+ { 0x9005, 0x028c, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 63 }, /* Adaptec PMC Series 7 (Denali) */
+ { 0x9005, 0x028d, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 64 }, /* Adaptec PMC Series 8 */
+ { 0x9005, 0x028f, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 65 }, /* Adaptec PMC Series 9 */
+ { 0,}
+};
+MODULE_DEVICE_TABLE(pci, aac_pci_tbl);
+
+/*
+ * dmb - For now we add the number of channels to this structure.
+ * In the future we should add a fib that reports the number of channels
+ * for the card. At that time we can remove the channels from here
+ */
+static struct aac_driver_ident aac_drivers[] = {
+ { aac_rx_init, "percraid", "DELL ", "PERCRAID ", 2, AAC_QUIRK_31BIT | AAC_QUIRK_34SG | AAC_QUIRK_SCSI_32 }, /* PERC 2/Si (Iguana/PERC2Si) */
+ { aac_rx_init, "percraid", "DELL ", "PERCRAID ", 2, AAC_QUIRK_31BIT | AAC_QUIRK_34SG | AAC_QUIRK_SCSI_32 }, /* PERC 3/Di (Opal/PERC3Di) */
+ { aac_rx_init, "percraid", "DELL ", "PERCRAID ", 2, AAC_QUIRK_31BIT | AAC_QUIRK_34SG | AAC_QUIRK_SCSI_32 }, /* PERC 3/Si (SlimFast/PERC3Si */
+ { aac_rx_init, "percraid", "DELL ", "PERCRAID ", 2, AAC_QUIRK_31BIT | AAC_QUIRK_34SG | AAC_QUIRK_SCSI_32 }, /* PERC 3/Di (Iguana FlipChip/PERC3DiF */
+ { aac_rx_init, "percraid", "DELL ", "PERCRAID ", 2, AAC_QUIRK_31BIT | AAC_QUIRK_34SG | AAC_QUIRK_SCSI_32 }, /* PERC 3/Di (Viper/PERC3DiV) */
+ { aac_rx_init, "percraid", "DELL ", "PERCRAID ", 2, AAC_QUIRK_31BIT | AAC_QUIRK_34SG | AAC_QUIRK_SCSI_32 }, /* PERC 3/Di (Lexus/PERC3DiL) */
+ { aac_rx_init, "percraid", "DELL ", "PERCRAID ", 1, AAC_QUIRK_31BIT | AAC_QUIRK_34SG | AAC_QUIRK_SCSI_32 }, /* PERC 3/Di (Jaguar/PERC3DiJ) */
+ { aac_rx_init, "percraid", "DELL ", "PERCRAID ", 2, AAC_QUIRK_31BIT | AAC_QUIRK_34SG | AAC_QUIRK_SCSI_32 }, /* PERC 3/Di (Dagger/PERC3DiD) */
+ { aac_rx_init, "percraid", "DELL ", "PERCRAID ", 2, AAC_QUIRK_31BIT | AAC_QUIRK_34SG | AAC_QUIRK_SCSI_32 }, /* PERC 3/Di (Boxster/PERC3DiB) */
+ { aac_rx_init, "aacraid", "ADAPTEC ", "catapult ", 2, AAC_QUIRK_31BIT | AAC_QUIRK_34SG | AAC_QUIRK_SCSI_32 }, /* catapult */
+ { aac_rx_init, "aacraid", "ADAPTEC ", "tomcat ", 2, AAC_QUIRK_31BIT | AAC_QUIRK_34SG | AAC_QUIRK_SCSI_32 }, /* tomcat */
+ { aac_rx_init, "aacraid", "ADAPTEC ", "Adaptec 2120S ", 1, AAC_QUIRK_31BIT | AAC_QUIRK_34SG }, /* Adaptec 2120S (Crusader) */
+ { aac_rx_init, "aacraid", "ADAPTEC ", "Adaptec 2200S ", 2, AAC_QUIRK_31BIT | AAC_QUIRK_34SG }, /* Adaptec 2200S (Vulcan) */
+ { aac_rx_init, "aacraid", "ADAPTEC ", "Adaptec 2200S ", 2, AAC_QUIRK_31BIT | AAC_QUIRK_34SG | AAC_QUIRK_SCSI_32 }, /* Adaptec 2200S (Vulcan-2m) */
+ { aac_rx_init, "aacraid", "Legend ", "Legend S220 ", 1, AAC_QUIRK_31BIT | AAC_QUIRK_34SG | AAC_QUIRK_SCSI_32 }, /* Legend S220 (Legend Crusader) */
+ { aac_rx_init, "aacraid", "Legend ", "Legend S230 ", 2, AAC_QUIRK_31BIT | AAC_QUIRK_34SG | AAC_QUIRK_SCSI_32 }, /* Legend S230 (Legend Vulcan) */
+
+ { aac_rx_init, "aacraid", "ADAPTEC ", "Adaptec 3230S ", 2 }, /* Adaptec 3230S (Harrier) */
+ { aac_rx_init, "aacraid", "ADAPTEC ", "Adaptec 3240S ", 2 }, /* Adaptec 3240S (Tornado) */
+ { aac_rx_init, "aacraid", "ADAPTEC ", "ASR-2020ZCR ", 2 }, /* ASR-2020ZCR SCSI PCI-X ZCR (Skyhawk) */
+ { aac_rx_init, "aacraid", "ADAPTEC ", "ASR-2025ZCR ", 2 }, /* ASR-2025ZCR SCSI SO-DIMM PCI-X ZCR (Terminator) */
+ { aac_rkt_init, "aacraid", "ADAPTEC ", "ASR-2230S PCI-X ", 2 }, /* ASR-2230S + ASR-2230SLP PCI-X (Lancer) */
+ { aac_rkt_init, "aacraid", "ADAPTEC ", "ASR-2130S PCI-X ", 1 }, /* ASR-2130S (Lancer) */
+ { aac_rkt_init, "aacraid", "ADAPTEC ", "AAR-2820SA ", 1 }, /* AAR-2820SA (Intruder) */
+ { aac_rkt_init, "aacraid", "ADAPTEC ", "AAR-2620SA ", 1 }, /* AAR-2620SA (Intruder) */
+ { aac_rkt_init, "aacraid", "ADAPTEC ", "AAR-2420SA ", 1 }, /* AAR-2420SA (Intruder) */
+ { aac_rkt_init, "aacraid", "ICP ", "ICP9024RO ", 2 }, /* ICP9024RO (Lancer) */
+ { aac_rkt_init, "aacraid", "ICP ", "ICP9014RO ", 1 }, /* ICP9014RO (Lancer) */
+ { aac_rkt_init, "aacraid", "ICP ", "ICP9047MA ", 1 }, /* ICP9047MA (Lancer) */
+ { aac_rkt_init, "aacraid", "ICP ", "ICP9087MA ", 1 }, /* ICP9087MA (Lancer) */
+ { aac_rkt_init, "aacraid", "ICP ", "ICP5445AU ", 1 }, /* ICP5445AU (Hurricane44) */
+ { aac_rx_init, "aacraid", "ICP ", "ICP9085LI ", 1 }, /* ICP9085LI (Marauder-X) */
+ { aac_rx_init, "aacraid", "ICP ", "ICP5085BR ", 1 }, /* ICP5085BR (Marauder-E) */
+ { aac_rkt_init, "aacraid", "ICP ", "ICP9067MA ", 1 }, /* ICP9067MA (Intruder-6) */
+ { NULL , "aacraid", "ADAPTEC ", "Themisto ", 0, AAC_QUIRK_SLAVE }, /* Jupiter Platform */
+ { aac_rkt_init, "aacraid", "ADAPTEC ", "Callisto ", 2, AAC_QUIRK_MASTER }, /* Jupiter Platform */
+ { aac_rx_init, "aacraid", "ADAPTEC ", "ASR-2020SA ", 1 }, /* ASR-2020SA SATA PCI-X ZCR (Skyhawk) */
+ { aac_rx_init, "aacraid", "ADAPTEC ", "ASR-2025SA ", 1 }, /* ASR-2025SA SATA SO-DIMM PCI-X ZCR (Terminator) */
+ { aac_rx_init, "aacraid", "ADAPTEC ", "AAR-2410SA SATA ", 1, AAC_QUIRK_17SG }, /* AAR-2410SA PCI SATA 4ch (Jaguar II) */
+ { aac_rx_init, "aacraid", "DELL ", "CERC SR2 ", 1, AAC_QUIRK_17SG }, /* CERC SATA RAID 2 PCI SATA 6ch (DellCorsair) */
+ { aac_rx_init, "aacraid", "ADAPTEC ", "AAR-2810SA SATA ", 1, AAC_QUIRK_17SG }, /* AAR-2810SA PCI SATA 8ch (Corsair-8) */
+ { aac_rx_init, "aacraid", "ADAPTEC ", "AAR-21610SA SATA", 1, AAC_QUIRK_17SG }, /* AAR-21610SA PCI SATA 16ch (Corsair-16) */
+ { aac_rx_init, "aacraid", "ADAPTEC ", "ASR-2026ZCR ", 1 }, /* ESD SO-DIMM PCI-X SATA ZCR (Prowler) */
+ { aac_rx_init, "aacraid", "ADAPTEC ", "AAR-2610SA ", 1 }, /* SATA 6Ch (Bearcat) */
+ { aac_rx_init, "aacraid", "ADAPTEC ", "ASR-2240S ", 1 }, /* ASR-2240S (SabreExpress) */
+ { aac_rx_init, "aacraid", "ADAPTEC ", "ASR-4005 ", 1 }, /* ASR-4005 */
+ { aac_rx_init, "ServeRAID","IBM ", "ServeRAID 8i ", 1 }, /* IBM 8i (AvonPark) */
+ { aac_rkt_init, "ServeRAID","IBM ", "ServeRAID 8k-l8 ", 1 }, /* IBM 8k/8k-l8 (Aurora) */
+ { aac_rkt_init, "ServeRAID","IBM ", "ServeRAID 8k-l4 ", 1 }, /* IBM 8k/8k-l4 (Aurora Lite) */
+ { aac_rx_init, "aacraid", "ADAPTEC ", "ASR-4000 ", 1 }, /* ASR-4000 (BlackBird & AvonPark) */
+ { aac_rx_init, "aacraid", "ADAPTEC ", "ASR-4800SAS ", 1 }, /* ASR-4800SAS (Marauder-X) */
+ { aac_rx_init, "aacraid", "ADAPTEC ", "ASR-4805SAS ", 1 }, /* ASR-4805SAS (Marauder-E) */
+ { aac_rkt_init, "aacraid", "ADAPTEC ", "ASR-3800 ", 1 }, /* ASR-3800 (Hurricane44) */
+
+ { aac_rx_init, "percraid", "DELL ", "PERC 320/DC ", 2, AAC_QUIRK_31BIT | AAC_QUIRK_34SG }, /* Perc 320/DC*/
+ { aac_sa_init, "aacraid", "ADAPTEC ", "Adaptec 5400S ", 4, AAC_QUIRK_34SG }, /* Adaptec 5400S (Mustang)*/
+ { aac_sa_init, "aacraid", "ADAPTEC ", "AAC-364 ", 4, AAC_QUIRK_34SG }, /* Adaptec 5400S (Mustang)*/
+ { aac_sa_init, "percraid", "DELL ", "PERCRAID ", 4, AAC_QUIRK_34SG }, /* Dell PERC2/QC */
+ { aac_sa_init, "hpnraid", "HP ", "NetRAID ", 4, AAC_QUIRK_34SG }, /* HP NetRAID-4M */
+
+ { aac_rx_init, "aacraid", "DELL ", "RAID ", 2, AAC_QUIRK_31BIT | AAC_QUIRK_34SG | AAC_QUIRK_SCSI_32 }, /* Dell Catchall */
+ { aac_rx_init, "aacraid", "Legend ", "RAID ", 2, AAC_QUIRK_31BIT | AAC_QUIRK_34SG | AAC_QUIRK_SCSI_32 }, /* Legend Catchall */
+ { aac_rx_init, "aacraid", "ADAPTEC ", "RAID ", 2 }, /* Adaptec Catch All */
+ { aac_rkt_init, "aacraid", "ADAPTEC ", "RAID ", 2 }, /* Adaptec Rocket Catch All */
+ { aac_nark_init, "aacraid", "ADAPTEC ", "RAID ", 2 }, /* Adaptec NEMER/ARK Catch All */
+ { aac_src_init, "aacraid", "ADAPTEC ", "RAID ", 2 }, /* Adaptec PMC Series 6 (Tupelo) */
+ { aac_srcv_init, "aacraid", "ADAPTEC ", "RAID ", 2 }, /* Adaptec PMC Series 7 (Denali) */
+ { aac_srcv_init, "aacraid", "ADAPTEC ", "RAID ", 2 }, /* Adaptec PMC Series 8 */
+ { aac_srcv_init, "aacraid", "ADAPTEC ", "RAID ", 2 } /* Adaptec PMC Series 9 */
+};
+
+/**
+ * aac_queuecommand - queue a SCSI command
+ * @cmd: SCSI command to queue
+ * @done: Function to call on command completion
+ *
+ * Queues a command for execution by the associated Host Adapter.
+ *
+ * TODO: unify with aac_scsi_cmd().
+ */
+
+static int aac_queuecommand(struct Scsi_Host *shost,
+ struct scsi_cmnd *cmd)
+{
+ int r = 0;
+ cmd->SCp.phase = AAC_OWNER_LOWLEVEL;
+ r = (aac_scsi_cmd(cmd) ? FAILED : 0);
+ return r;
+}
+
+/**
+ * aac_info - Returns the host adapter name
+ * @shost: Scsi host to report on
+ *
+ * Returns a static string describing the device in question
+ */
+
+static const char *aac_info(struct Scsi_Host *shost)
+{
+ struct aac_dev *dev = (struct aac_dev *)shost->hostdata;
+ return aac_drivers[dev->cardtype].name;
+}
+
+/**
+ * aac_get_driver_ident
+ * @devtype: index into lookup table
+ *
+ * Returns a pointer to the entry in the driver lookup table.
+ */
+
+struct aac_driver_ident* aac_get_driver_ident(int devtype)
+{
+ return &aac_drivers[devtype];
+}
+
+/**
+ * aac_biosparm - return BIOS parameters for disk
+ * @sdev: The scsi device corresponding to the disk
+ * @bdev: the block device corresponding to the disk
+ * @capacity: the sector capacity of the disk
+ * @geom: geometry block to fill in
+ *
+ * Return the Heads/Sectors/Cylinders BIOS Disk Parameters for Disk.
+ * The default disk geometry is 64 heads, 32 sectors, and the appropriate
+ * number of cylinders so as not to exceed drive capacity. In order for
+ * disks equal to or larger than 1 GB to be addressable by the BIOS
+ * without exceeding the BIOS limitation of 1024 cylinders, Extended
+ * Translation should be enabled. With Extended Translation enabled,
+ * drives between 1 GB inclusive and 2 GB exclusive are given a disk
+ * geometry of 128 heads and 32 sectors, and drives above 2 GB inclusive
+ * are given a disk geometry of 255 heads and 63 sectors. However, if
+ * the BIOS detects that the Extended Translation setting does not match
+ * the geometry in the partition table, then the translation inferred
+ * from the partition table will be used by the BIOS, and a warning may
+ * be displayed.
+ */
+
+static int aac_biosparm(struct scsi_device *sdev, struct block_device *bdev,
+ sector_t capacity, int *geom)
+{
+ struct diskparm *param = (struct diskparm *)geom;
+ unsigned char *buf;
+
+ dprintk((KERN_DEBUG "aac_biosparm.\n"));
+
+ /*
+ * Assuming extended translation is enabled - #REVISIT#
+ */
+ if (capacity >= 2 * 1024 * 1024) { /* 1 GB in 512 byte sectors */
+ if(capacity >= 4 * 1024 * 1024) { /* 2 GB in 512 byte sectors */
+ param->heads = 255;
+ param->sectors = 63;
+ } else {
+ param->heads = 128;
+ param->sectors = 32;
+ }
+ } else {
+ param->heads = 64;
+ param->sectors = 32;
+ }
+
+ param->cylinders = cap_to_cyls(capacity, param->heads * param->sectors);
+
+ /*
+ * Read the first 1024 bytes from the disk device, if the boot
+ * sector partition table is valid, search for a partition table
+ * entry whose end_head matches one of the standard geometry
+ * translations ( 64/32, 128/32, 255/63 ).
+ */
+ buf = scsi_bios_ptable(bdev);
+ if (!buf)
+ return 0;
+ if(*(__le16 *)(buf + 0x40) == cpu_to_le16(0xaa55)) {
+ struct partition *first = (struct partition * )buf;
+ struct partition *entry = first;
+ int saved_cylinders = param->cylinders;
+ int num;
+ unsigned char end_head, end_sec;
+
+ for(num = 0; num < 4; num++) {
+ end_head = entry->end_head;
+ end_sec = entry->end_sector & 0x3f;
+
+ if(end_head == 63) {
+ param->heads = 64;
+ param->sectors = 32;
+ break;
+ } else if(end_head == 127) {
+ param->heads = 128;
+ param->sectors = 32;
+ break;
+ } else if(end_head == 254) {
+ param->heads = 255;
+ param->sectors = 63;
+ break;
+ }
+ entry++;
+ }
+
+ if (num == 4) {
+ end_head = first->end_head;
+ end_sec = first->end_sector & 0x3f;
+ }
+
+ param->cylinders = cap_to_cyls(capacity, param->heads * param->sectors);
+ if (num < 4 && end_sec == param->sectors) {
+ if (param->cylinders != saved_cylinders)
+ dprintk((KERN_DEBUG "Adopting geometry: heads=%d, sectors=%d from partition table %d.\n",
+ param->heads, param->sectors, num));
+ } else if (end_head > 0 || end_sec > 0) {
+ dprintk((KERN_DEBUG "Strange geometry: heads=%d, sectors=%d in partition table %d.\n",
+ end_head + 1, end_sec, num));
+ dprintk((KERN_DEBUG "Using geometry: heads=%d, sectors=%d.\n",
+ param->heads, param->sectors));
+ }
+ }
+ kfree(buf);
+ return 0;
+}
+
+/**
+ * aac_slave_configure - compute queue depths
+ * @sdev: SCSI device we are considering
+ *
+ * Selects queue depths for each target device based on the host adapter's
+ * total capacity and the queue depth supported by the target device.
+ * A queue depth of one automatically disables tagged queueing.
+ */
+
+static int aac_slave_configure(struct scsi_device *sdev)
+{
+ struct aac_dev *aac = (struct aac_dev *)sdev->host->hostdata;
+ if (aac->jbod && (sdev->type == TYPE_DISK))
+ sdev->removable = 1;
+ if ((sdev->type == TYPE_DISK) &&
+ (sdev_channel(sdev) != CONTAINER_CHANNEL) &&
+ (!aac->jbod || sdev->inq_periph_qual) &&
+ (!aac->raid_scsi_mode || (sdev_channel(sdev) != 2))) {
+ if (expose_physicals == 0)
+ return -ENXIO;
+ if (expose_physicals < 0)
+ sdev->no_uld_attach = 1;
+ }
+ if (sdev->tagged_supported && (sdev->type == TYPE_DISK) &&
+ (!aac->raid_scsi_mode || (sdev_channel(sdev) != 2)) &&
+ !sdev->no_uld_attach) {
+ struct scsi_device * dev;
+ struct Scsi_Host *host = sdev->host;
+ unsigned num_lsu = 0;
+ unsigned num_one = 0;
+ unsigned depth;
+ unsigned cid;
+
+ /*
+ * Firmware has an individual device recovery time typically
+ * of 35 seconds, give us a margin.
+ */
+ if (sdev->request_queue->rq_timeout < (45 * HZ))
+ blk_queue_rq_timeout(sdev->request_queue, 45*HZ);
+ for (cid = 0; cid < aac->maximum_num_containers; ++cid)
+ if (aac->fsa_dev[cid].valid)
+ ++num_lsu;
+ __shost_for_each_device(dev, host) {
+ if (dev->tagged_supported && (dev->type == TYPE_DISK) &&
+ (!aac->raid_scsi_mode ||
+ (sdev_channel(sdev) != 2)) &&
+ !dev->no_uld_attach) {
+ if ((sdev_channel(dev) != CONTAINER_CHANNEL)
+ || !aac->fsa_dev[sdev_id(dev)].valid)
+ ++num_lsu;
+ } else
+ ++num_one;
+ }
+ if (num_lsu == 0)
+ ++num_lsu;
+ depth = (host->can_queue - num_one) / num_lsu;
+ if (depth > 256)
+ depth = 256;
+ else if (depth < 2)
+ depth = 2;
+ scsi_change_queue_depth(sdev, depth);
+ } else
+ scsi_change_queue_depth(sdev, 1);
+
+ return 0;
+}
+
+/**
+ * aac_change_queue_depth - alter queue depths
+ * @sdev: SCSI device we are considering
+ * @depth: desired queue depth
+ *
+ * Alters queue depths for target device based on the host adapter's
+ * total capacity and the queue depth supported by the target device.
+ */
+
+static int aac_change_queue_depth(struct scsi_device *sdev, int depth)
+{
+ if (sdev->tagged_supported && (sdev->type == TYPE_DISK) &&
+ (sdev_channel(sdev) == CONTAINER_CHANNEL)) {
+ struct scsi_device * dev;
+ struct Scsi_Host *host = sdev->host;
+ unsigned num = 0;
+
+ __shost_for_each_device(dev, host) {
+ if (dev->tagged_supported && (dev->type == TYPE_DISK) &&
+ (sdev_channel(dev) == CONTAINER_CHANNEL))
+ ++num;
+ ++num;
+ }
+ if (num >= host->can_queue)
+ num = host->can_queue - 1;
+ if (depth > (host->can_queue - num))
+ depth = host->can_queue - num;
+ if (depth > 256)
+ depth = 256;
+ else if (depth < 2)
+ depth = 2;
+ return scsi_change_queue_depth(sdev, depth);
+ }
+
+ return scsi_change_queue_depth(sdev, 1);
+}
+
+static ssize_t aac_show_raid_level(struct device *dev, struct device_attribute *attr, char *buf)
+{
+ struct scsi_device *sdev = to_scsi_device(dev);
+ struct aac_dev *aac = (struct aac_dev *)(sdev->host->hostdata);
+ if (sdev_channel(sdev) != CONTAINER_CHANNEL)
+ return snprintf(buf, PAGE_SIZE, sdev->no_uld_attach
+ ? "Hidden\n" :
+ ((aac->jbod && (sdev->type == TYPE_DISK)) ? "JBOD\n" : ""));
+ return snprintf(buf, PAGE_SIZE, "%s\n",
+ get_container_type(aac->fsa_dev[sdev_id(sdev)].type));
+}
+
+static struct device_attribute aac_raid_level_attr = {
+ .attr = {
+ .name = "level",
+ .mode = S_IRUGO,
+ },
+ .show = aac_show_raid_level
+};
+
+static struct device_attribute *aac_dev_attrs[] = {
+ &aac_raid_level_attr,
+ NULL,
+};
+
+static int aac_ioctl(struct scsi_device *sdev, int cmd, void __user * arg)
+{
+ struct aac_dev *dev = (struct aac_dev *)sdev->host->hostdata;
+ if (!capable(CAP_SYS_RAWIO))
+ return -EPERM;
+ return aac_do_ioctl(dev, cmd, arg);
+}
+
+static int aac_eh_abort(struct scsi_cmnd* cmd)
+{
+ struct scsi_device * dev = cmd->device;
+ struct Scsi_Host * host = dev->host;
+ struct aac_dev * aac = (struct aac_dev *)host->hostdata;
+ int count;
+ int ret = FAILED;
+
+ printk(KERN_ERR "%s: Host adapter abort request (%d,%d,%d,%llu)\n",
+ AAC_DRIVERNAME,
+ host->host_no, sdev_channel(dev), sdev_id(dev), dev->lun);
+ switch (cmd->cmnd[0]) {
+ case SERVICE_ACTION_IN_16:
+ if (!(aac->raw_io_interface) ||
+ !(aac->raw_io_64) ||
+ ((cmd->cmnd[1] & 0x1f) != SAI_READ_CAPACITY_16))
+ break;
+ case INQUIRY:
+ case READ_CAPACITY:
+ /* Mark associated FIB to not complete, eh handler does this */
+ for (count = 0; count < (host->can_queue + AAC_NUM_MGT_FIB); ++count) {
+ struct fib * fib = &aac->fibs[count];
+ if (fib->hw_fib_va->header.XferState &&
+ (fib->flags & FIB_CONTEXT_FLAG) &&
+ (fib->callback_data == cmd)) {
+ fib->flags |= FIB_CONTEXT_FLAG_TIMED_OUT;
+ cmd->SCp.phase = AAC_OWNER_ERROR_HANDLER;
+ ret = SUCCESS;
+ }
+ }
+ break;
+ case TEST_UNIT_READY:
+ /* Mark associated FIB to not complete, eh handler does this */
+ for (count = 0; count < (host->can_queue + AAC_NUM_MGT_FIB); ++count) {
+ struct scsi_cmnd * command;
+ struct fib * fib = &aac->fibs[count];
+ if ((fib->hw_fib_va->header.XferState & cpu_to_le32(Async | NoResponseExpected)) &&
+ (fib->flags & FIB_CONTEXT_FLAG) &&
+ ((command = fib->callback_data)) &&
+ (command->device == cmd->device)) {
+ fib->flags |= FIB_CONTEXT_FLAG_TIMED_OUT;
+ command->SCp.phase = AAC_OWNER_ERROR_HANDLER;
+ if (command == cmd)
+ ret = SUCCESS;
+ }
+ }
+ }
+ return ret;
+}
+
+/*
+ * aac_eh_reset - Reset command handling
+ * @scsi_cmd: SCSI command block causing the reset
+ *
+ */
+static int aac_eh_reset(struct scsi_cmnd* cmd)
+{
+ struct scsi_device * dev = cmd->device;
+ struct Scsi_Host * host = dev->host;
+ struct scsi_cmnd * command;
+ int count;
+ struct aac_dev * aac = (struct aac_dev *)host->hostdata;
+ unsigned long flags;
+
+ /* Mark the associated FIB to not complete, eh handler does this */
+ for (count = 0; count < (host->can_queue + AAC_NUM_MGT_FIB); ++count) {
+ struct fib * fib = &aac->fibs[count];
+ if (fib->hw_fib_va->header.XferState &&
+ (fib->flags & FIB_CONTEXT_FLAG) &&
+ (fib->callback_data == cmd)) {
+ fib->flags |= FIB_CONTEXT_FLAG_TIMED_OUT;
+ cmd->SCp.phase = AAC_OWNER_ERROR_HANDLER;
+ }
+ }
+ printk(KERN_ERR "%s: Host adapter reset request. SCSI hang ?\n",
+ AAC_DRIVERNAME);
+
+ if ((count = aac_check_health(aac)))
+ return count;
+ /*
+ * Wait for all commands to complete to this specific
+ * target (block maximum 60 seconds).
+ */
+ for (count = 60; count; --count) {
+ int active = aac->in_reset;
+
+ if (active == 0)
+ __shost_for_each_device(dev, host) {
+ spin_lock_irqsave(&dev->list_lock, flags);
+ list_for_each_entry(command, &dev->cmd_list, list) {
+ if ((command != cmd) &&
+ (command->SCp.phase == AAC_OWNER_FIRMWARE)) {
+ active++;
+ break;
+ }
+ }
+ spin_unlock_irqrestore(&dev->list_lock, flags);
+ if (active)
+ break;
+
+ }
+ /*
+ * We can exit If all the commands are complete
+ */
+ if (active == 0)
+ return SUCCESS;
+ ssleep(1);
+ }
+ printk(KERN_ERR "%s: SCSI bus appears hung\n", AAC_DRIVERNAME);
+ /*
+ * This adapter needs a blind reset, only do so for Adapters that
+ * support a register, instead of a commanded, reset.
+ */
+ if (((aac->supplement_adapter_info.SupportedOptions2 &
+ AAC_OPTION_MU_RESET) ||
+ (aac->supplement_adapter_info.SupportedOptions2 &
+ AAC_OPTION_DOORBELL_RESET)) &&
+ aac_check_reset &&
+ ((aac_check_reset != 1) ||
+ !(aac->supplement_adapter_info.SupportedOptions2 &
+ AAC_OPTION_IGNORE_RESET)))
+ aac_reset_adapter(aac, 2); /* Bypass wait for command quiesce */
+ return SUCCESS; /* Cause an immediate retry of the command with a ten second delay after successful tur */
+}
+
+/**
+ * aac_cfg_open - open a configuration file
+ * @inode: inode being opened
+ * @file: file handle attached
+ *
+ * Called when the configuration device is opened. Does the needed
+ * set up on the handle and then returns
+ *
+ * Bugs: This needs extending to check a given adapter is present
+ * so we can support hot plugging, and to ref count adapters.
+ */
+
+static int aac_cfg_open(struct inode *inode, struct file *file)
+{
+ struct aac_dev *aac;
+ unsigned minor_number = iminor(inode);
+ int err = -ENODEV;
+
+ mutex_lock(&aac_mutex); /* BKL pushdown: nothing else protects this list */
+ list_for_each_entry(aac, &aac_devices, entry) {
+ if (aac->id == minor_number) {
+ file->private_data = aac;
+ err = 0;
+ break;
+ }
+ }
+ mutex_unlock(&aac_mutex);
+
+ return err;
+}
+
+/**
+ * aac_cfg_ioctl - AAC configuration request
+ * @inode: inode of device
+ * @file: file handle
+ * @cmd: ioctl command code
+ * @arg: argument
+ *
+ * Handles a configuration ioctl. Currently this involves wrapping it
+ * up and feeding it into the nasty windowsalike glue layer.
+ *
+ * Bugs: Needs locking against parallel ioctls lower down
+ * Bugs: Needs to handle hot plugging
+ */
+
+static long aac_cfg_ioctl(struct file *file,
+ unsigned int cmd, unsigned long arg)
+{
+ int ret;
+ struct aac_dev *aac;
+ aac = (struct aac_dev *)file->private_data;
+ if (!capable(CAP_SYS_RAWIO) || aac->adapter_shutdown)
+ return -EPERM;
+ mutex_lock(&aac_mutex);
+ ret = aac_do_ioctl(file->private_data, cmd, (void __user *)arg);
+ mutex_unlock(&aac_mutex);
+
+ return ret;
+}
+
+#ifdef CONFIG_COMPAT
+static long aac_compat_do_ioctl(struct aac_dev *dev, unsigned cmd, unsigned long arg)
+{
+ long ret;
+ mutex_lock(&aac_mutex);
+ switch (cmd) {
+ case FSACTL_MINIPORT_REV_CHECK:
+ case FSACTL_SENDFIB:
+ case FSACTL_OPEN_GET_ADAPTER_FIB:
+ case FSACTL_CLOSE_GET_ADAPTER_FIB:
+ case FSACTL_SEND_RAW_SRB:
+ case FSACTL_GET_PCI_INFO:
+ case FSACTL_QUERY_DISK:
+ case FSACTL_DELETE_DISK:
+ case FSACTL_FORCE_DELETE_DISK:
+ case FSACTL_GET_CONTAINERS:
+ case FSACTL_SEND_LARGE_FIB:
+ ret = aac_do_ioctl(dev, cmd, (void __user *)arg);
+ break;
+
+ case FSACTL_GET_NEXT_ADAPTER_FIB: {
+ struct fib_ioctl __user *f;
+
+ f = compat_alloc_user_space(sizeof(*f));
+ ret = 0;
+ if (clear_user(f, sizeof(*f)))
+ ret = -EFAULT;
+ if (copy_in_user(f, (void __user *)arg, sizeof(struct fib_ioctl) - sizeof(u32)))
+ ret = -EFAULT;
+ if (!ret)
+ ret = aac_do_ioctl(dev, cmd, f);
+ break;
+ }
+
+ default:
+ ret = -ENOIOCTLCMD;
+ break;
+ }
+ mutex_unlock(&aac_mutex);
+ return ret;
+}
+
+static int aac_compat_ioctl(struct scsi_device *sdev, int cmd, void __user *arg)
+{
+ struct aac_dev *dev = (struct aac_dev *)sdev->host->hostdata;
+ if (!capable(CAP_SYS_RAWIO))
+ return -EPERM;
+ return aac_compat_do_ioctl(dev, cmd, (unsigned long)arg);
+}
+
+static long aac_compat_cfg_ioctl(struct file *file, unsigned cmd, unsigned long arg)
+{
+ if (!capable(CAP_SYS_RAWIO))
+ return -EPERM;
+ return aac_compat_do_ioctl(file->private_data, cmd, arg);
+}
+#endif
+
+static ssize_t aac_show_model(struct device *device,
+ struct device_attribute *attr, char *buf)
+{
+ struct aac_dev *dev = (struct aac_dev*)class_to_shost(device)->hostdata;
+ int len;
+
+ if (dev->supplement_adapter_info.AdapterTypeText[0]) {
+ char * cp = dev->supplement_adapter_info.AdapterTypeText;
+ while (*cp && *cp != ' ')
+ ++cp;
+ while (*cp == ' ')
+ ++cp;
+ len = snprintf(buf, PAGE_SIZE, "%s\n", cp);
+ } else
+ len = snprintf(buf, PAGE_SIZE, "%s\n",
+ aac_drivers[dev->cardtype].model);
+ return len;
+}
+
+static ssize_t aac_show_vendor(struct device *device,
+ struct device_attribute *attr, char *buf)
+{
+ struct aac_dev *dev = (struct aac_dev*)class_to_shost(device)->hostdata;
+ int len;
+
+ if (dev->supplement_adapter_info.AdapterTypeText[0]) {
+ char * cp = dev->supplement_adapter_info.AdapterTypeText;
+ while (*cp && *cp != ' ')
+ ++cp;
+ len = snprintf(buf, PAGE_SIZE, "%.*s\n",
+ (int)(cp - (char *)dev->supplement_adapter_info.AdapterTypeText),
+ dev->supplement_adapter_info.AdapterTypeText);
+ } else
+ len = snprintf(buf, PAGE_SIZE, "%s\n",
+ aac_drivers[dev->cardtype].vname);
+ return len;
+}
+
+static ssize_t aac_show_flags(struct device *cdev,
+ struct device_attribute *attr, char *buf)
+{
+ int len = 0;
+ struct aac_dev *dev = (struct aac_dev*)class_to_shost(cdev)->hostdata;
+
+ if (nblank(dprintk(x)))
+ len = snprintf(buf, PAGE_SIZE, "dprintk\n");
+#ifdef AAC_DETAILED_STATUS_INFO
+ len += snprintf(buf + len, PAGE_SIZE - len,
+ "AAC_DETAILED_STATUS_INFO\n");
+#endif
+ if (dev->raw_io_interface && dev->raw_io_64)
+ len += snprintf(buf + len, PAGE_SIZE - len,
+ "SAI_READ_CAPACITY_16\n");
+ if (dev->jbod)
+ len += snprintf(buf + len, PAGE_SIZE - len, "SUPPORTED_JBOD\n");
+ if (dev->supplement_adapter_info.SupportedOptions2 &
+ AAC_OPTION_POWER_MANAGEMENT)
+ len += snprintf(buf + len, PAGE_SIZE - len,
+ "SUPPORTED_POWER_MANAGEMENT\n");
+ if (dev->msi)
+ len += snprintf(buf + len, PAGE_SIZE - len, "PCI_HAS_MSI\n");
+ return len;
+}
+
+static ssize_t aac_show_kernel_version(struct device *device,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct aac_dev *dev = (struct aac_dev*)class_to_shost(device)->hostdata;
+ int len, tmp;
+
+ tmp = le32_to_cpu(dev->adapter_info.kernelrev);
+ len = snprintf(buf, PAGE_SIZE, "%d.%d-%d[%d]\n",
+ tmp >> 24, (tmp >> 16) & 0xff, tmp & 0xff,
+ le32_to_cpu(dev->adapter_info.kernelbuild));
+ return len;
+}
+
+static ssize_t aac_show_monitor_version(struct device *device,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct aac_dev *dev = (struct aac_dev*)class_to_shost(device)->hostdata;
+ int len, tmp;
+
+ tmp = le32_to_cpu(dev->adapter_info.monitorrev);
+ len = snprintf(buf, PAGE_SIZE, "%d.%d-%d[%d]\n",
+ tmp >> 24, (tmp >> 16) & 0xff, tmp & 0xff,
+ le32_to_cpu(dev->adapter_info.monitorbuild));
+ return len;
+}
+
+static ssize_t aac_show_bios_version(struct device *device,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct aac_dev *dev = (struct aac_dev*)class_to_shost(device)->hostdata;
+ int len, tmp;
+
+ tmp = le32_to_cpu(dev->adapter_info.biosrev);
+ len = snprintf(buf, PAGE_SIZE, "%d.%d-%d[%d]\n",
+ tmp >> 24, (tmp >> 16) & 0xff, tmp & 0xff,
+ le32_to_cpu(dev->adapter_info.biosbuild));
+ return len;
+}
+
+static ssize_t aac_show_serial_number(struct device *device,
+ struct device_attribute *attr, char *buf)
+{
+ struct aac_dev *dev = (struct aac_dev*)class_to_shost(device)->hostdata;
+ int len = 0;
+
+ if (le32_to_cpu(dev->adapter_info.serial[0]) != 0xBAD0)
+ len = snprintf(buf, 16, "%06X\n",
+ le32_to_cpu(dev->adapter_info.serial[0]));
+ if (len &&
+ !memcmp(&dev->supplement_adapter_info.MfgPcbaSerialNo[
+ sizeof(dev->supplement_adapter_info.MfgPcbaSerialNo)-len],
+ buf, len-1))
+ len = snprintf(buf, 16, "%.*s\n",
+ (int)sizeof(dev->supplement_adapter_info.MfgPcbaSerialNo),
+ dev->supplement_adapter_info.MfgPcbaSerialNo);
+
+ return min(len, 16);
+}
+
+static ssize_t aac_show_max_channel(struct device *device,
+ struct device_attribute *attr, char *buf)
+{
+ return snprintf(buf, PAGE_SIZE, "%d\n",
+ class_to_shost(device)->max_channel);
+}
+
+static ssize_t aac_show_max_id(struct device *device,
+ struct device_attribute *attr, char *buf)
+{
+ return snprintf(buf, PAGE_SIZE, "%d\n",
+ class_to_shost(device)->max_id);
+}
+
+static ssize_t aac_store_reset_adapter(struct device *device,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ int retval = -EACCES;
+
+ if (!capable(CAP_SYS_ADMIN))
+ return retval;
+ retval = aac_reset_adapter((struct aac_dev*)class_to_shost(device)->hostdata, buf[0] == '!');
+ if (retval >= 0)
+ retval = count;
+ return retval;
+}
+
+static ssize_t aac_show_reset_adapter(struct device *device,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct aac_dev *dev = (struct aac_dev*)class_to_shost(device)->hostdata;
+ int len, tmp;
+
+ tmp = aac_adapter_check_health(dev);
+ if ((tmp == 0) && dev->in_reset)
+ tmp = -EBUSY;
+ len = snprintf(buf, PAGE_SIZE, "0x%x\n", tmp);
+ return len;
+}
+
+static struct device_attribute aac_model = {
+ .attr = {
+ .name = "model",
+ .mode = S_IRUGO,
+ },
+ .show = aac_show_model,
+};
+static struct device_attribute aac_vendor = {
+ .attr = {
+ .name = "vendor",
+ .mode = S_IRUGO,
+ },
+ .show = aac_show_vendor,
+};
+static struct device_attribute aac_flags = {
+ .attr = {
+ .name = "flags",
+ .mode = S_IRUGO,
+ },
+ .show = aac_show_flags,
+};
+static struct device_attribute aac_kernel_version = {
+ .attr = {
+ .name = "hba_kernel_version",
+ .mode = S_IRUGO,
+ },
+ .show = aac_show_kernel_version,
+};
+static struct device_attribute aac_monitor_version = {
+ .attr = {
+ .name = "hba_monitor_version",
+ .mode = S_IRUGO,
+ },
+ .show = aac_show_monitor_version,
+};
+static struct device_attribute aac_bios_version = {
+ .attr = {
+ .name = "hba_bios_version",
+ .mode = S_IRUGO,
+ },
+ .show = aac_show_bios_version,
+};
+static struct device_attribute aac_serial_number = {
+ .attr = {
+ .name = "serial_number",
+ .mode = S_IRUGO,
+ },
+ .show = aac_show_serial_number,
+};
+static struct device_attribute aac_max_channel = {
+ .attr = {
+ .name = "max_channel",
+ .mode = S_IRUGO,
+ },
+ .show = aac_show_max_channel,
+};
+static struct device_attribute aac_max_id = {
+ .attr = {
+ .name = "max_id",
+ .mode = S_IRUGO,
+ },
+ .show = aac_show_max_id,
+};
+static struct device_attribute aac_reset = {
+ .attr = {
+ .name = "reset_host",
+ .mode = S_IWUSR|S_IRUGO,
+ },
+ .store = aac_store_reset_adapter,
+ .show = aac_show_reset_adapter,
+};
+
+static struct device_attribute *aac_attrs[] = {
+ &aac_model,
+ &aac_vendor,
+ &aac_flags,
+ &aac_kernel_version,
+ &aac_monitor_version,
+ &aac_bios_version,
+ &aac_serial_number,
+ &aac_max_channel,
+ &aac_max_id,
+ &aac_reset,
+ NULL
+};
+
+ssize_t aac_get_serial_number(struct device *device, char *buf)
+{
+ return aac_show_serial_number(device, &aac_serial_number, buf);
+}
+
+static const struct file_operations aac_cfg_fops = {
+ .owner = THIS_MODULE,
+ .unlocked_ioctl = aac_cfg_ioctl,
+#ifdef CONFIG_COMPAT
+ .compat_ioctl = aac_compat_cfg_ioctl,
+#endif
+ .open = aac_cfg_open,
+ .llseek = noop_llseek,
+};
+
+static struct scsi_host_template aac_driver_template = {
+ .module = THIS_MODULE,
+ .name = "AAC",
+ .proc_name = AAC_DRIVERNAME,
+ .info = aac_info,
+ .ioctl = aac_ioctl,
+#ifdef CONFIG_COMPAT
+ .compat_ioctl = aac_compat_ioctl,
+#endif
+ .queuecommand = aac_queuecommand,
+ .bios_param = aac_biosparm,
+ .shost_attrs = aac_attrs,
+ .slave_configure = aac_slave_configure,
+ .change_queue_depth = aac_change_queue_depth,
+ .sdev_attrs = aac_dev_attrs,
+ .eh_abort_handler = aac_eh_abort,
+ .eh_host_reset_handler = aac_eh_reset,
+ .can_queue = AAC_NUM_IO_FIB,
+ .this_id = MAXIMUM_NUM_CONTAINERS,
+ .sg_tablesize = 16,
+ .max_sectors = 128,
+#if (AAC_NUM_IO_FIB > 256)
+ .cmd_per_lun = 256,
+#else
+ .cmd_per_lun = AAC_NUM_IO_FIB,
+#endif
+ .use_clustering = ENABLE_CLUSTERING,
+ .emulated = 1,
+ .no_write_same = 1,
+};
+
+static void __aac_shutdown(struct aac_dev * aac)
+{
+ int i;
+ int cpu;
+
+ if (aac->aif_thread) {
+ int i;
+ /* Clear out events first */
+ for (i = 0; i < (aac->scsi_host_ptr->can_queue + AAC_NUM_MGT_FIB); i++) {
+ struct fib *fib = &aac->fibs[i];
+ if (!(fib->hw_fib_va->header.XferState & cpu_to_le32(NoResponseExpected | Async)) &&
+ (fib->hw_fib_va->header.XferState & cpu_to_le32(ResponseExpected)))
+ up(&fib->event_wait);
+ }
+ kthread_stop(aac->thread);
+ }
+ aac_send_shutdown(aac);
+ aac_adapter_disable_int(aac);
+ cpu = cpumask_first(cpu_online_mask);
+ if (aac->pdev->device == PMC_DEVICE_S6 ||
+ aac->pdev->device == PMC_DEVICE_S7 ||
+ aac->pdev->device == PMC_DEVICE_S8 ||
+ aac->pdev->device == PMC_DEVICE_S9) {
+ if (aac->max_msix > 1) {
+ for (i = 0; i < aac->max_msix; i++) {
+ if (irq_set_affinity_hint(
+ aac->msixentry[i].vector,
+ NULL)) {
+ printk(KERN_ERR "%s%d: Failed to reset IRQ affinity for cpu %d\n",
+ aac->name,
+ aac->id,
+ cpu);
+ }
+ cpu = cpumask_next(cpu,
+ cpu_online_mask);
+ free_irq(aac->msixentry[i].vector,
+ &(aac->aac_msix[i]));
+ }
+ } else {
+ free_irq(aac->pdev->irq,
+ &(aac->aac_msix[0]));
+ }
+ } else {
+ free_irq(aac->pdev->irq, aac);
+ }
+ if (aac->msi)
+ pci_disable_msi(aac->pdev);
+ else if (aac->max_msix > 1)
+ pci_disable_msix(aac->pdev);
+}
+
+static int aac_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
+{
+ unsigned index = id->driver_data;
+ struct Scsi_Host *shost;
+ struct aac_dev *aac;
+ struct list_head *insert = &aac_devices;
+ int error = -ENODEV;
+ int unique_id = 0;
+ u64 dmamask;
+ extern int aac_sync_mode;
+
+ list_for_each_entry(aac, &aac_devices, entry) {
+ if (aac->id > unique_id)
+ break;
+ insert = &aac->entry;
+ unique_id++;
+ }
+
+ pci_disable_link_state(pdev, PCIE_LINK_STATE_L0S | PCIE_LINK_STATE_L1 |
+ PCIE_LINK_STATE_CLKPM);
+
+ error = pci_enable_device(pdev);
+ if (error)
+ goto out;
+ error = -ENODEV;
+
+ /*
+ * If the quirk31 bit is set, the adapter needs adapter
+ * to driver communication memory to be allocated below 2gig
+ */
+ if (aac_drivers[index].quirks & AAC_QUIRK_31BIT)
+ dmamask = DMA_BIT_MASK(31);
+ else
+ dmamask = DMA_BIT_MASK(32);
+
+ if (pci_set_dma_mask(pdev, dmamask) ||
+ pci_set_consistent_dma_mask(pdev, dmamask))
+ goto out_disable_pdev;
+
+ pci_set_master(pdev);
+
+ shost = scsi_host_alloc(&aac_driver_template, sizeof(struct aac_dev));
+ if (!shost)
+ goto out_disable_pdev;
+
+ shost->irq = pdev->irq;
+ shost->unique_id = unique_id;
+ shost->max_cmd_len = 16;
+ shost->use_cmd_list = 1;
+
+ aac = (struct aac_dev *)shost->hostdata;
+ aac->base_start = pci_resource_start(pdev, 0);
+ aac->scsi_host_ptr = shost;
+ aac->pdev = pdev;
+ aac->name = aac_driver_template.name;
+ aac->id = shost->unique_id;
+ aac->cardtype = index;
+ INIT_LIST_HEAD(&aac->entry);
+
+ aac->fibs = kzalloc(sizeof(struct fib) * (shost->can_queue + AAC_NUM_MGT_FIB), GFP_KERNEL);
+ if (!aac->fibs)
+ goto out_free_host;
+ spin_lock_init(&aac->fib_lock);
+
+ /*
+ * Map in the registers from the adapter.
+ */
+ aac->base_size = AAC_MIN_FOOTPRINT_SIZE;
+ if ((*aac_drivers[index].init)(aac))
+ goto out_unmap;
+
+ if (aac->sync_mode) {
+ if (aac_sync_mode)
+ printk(KERN_INFO "%s%d: Sync. mode enforced "
+ "by driver parameter. This will cause "
+ "a significant performance decrease!\n",
+ aac->name,
+ aac->id);
+ else
+ printk(KERN_INFO "%s%d: Async. mode not supported "
+ "by current driver, sync. mode enforced."
+ "\nPlease update driver to get full performance.\n",
+ aac->name,
+ aac->id);
+ }
+
+ /*
+ * Start any kernel threads needed
+ */
+ aac->thread = kthread_run(aac_command_thread, aac, AAC_DRIVERNAME);
+ if (IS_ERR(aac->thread)) {
+ printk(KERN_ERR "aacraid: Unable to create command thread.\n");
+ error = PTR_ERR(aac->thread);
+ aac->thread = NULL;
+ goto out_deinit;
+ }
+
+ /*
+ * If we had set a smaller DMA mask earlier, set it to 4gig
+ * now since the adapter can dma data to at least a 4gig
+ * address space.
+ */
+ if (aac_drivers[index].quirks & AAC_QUIRK_31BIT)
+ if (pci_set_dma_mask(pdev, DMA_BIT_MASK(32)))
+ goto out_deinit;
+
+ aac->maximum_num_channels = aac_drivers[index].channels;
+ error = aac_get_adapter_info(aac);
+ if (error < 0)
+ goto out_deinit;
+
+ /*
+ * Lets override negotiations and drop the maximum SG limit to 34
+ */
+ if ((aac_drivers[index].quirks & AAC_QUIRK_34SG) &&
+ (shost->sg_tablesize > 34)) {
+ shost->sg_tablesize = 34;
+ shost->max_sectors = (shost->sg_tablesize * 8) + 112;
+ }
+
+ if ((aac_drivers[index].quirks & AAC_QUIRK_17SG) &&
+ (shost->sg_tablesize > 17)) {
+ shost->sg_tablesize = 17;
+ shost->max_sectors = (shost->sg_tablesize * 8) + 112;
+ }
+
+ error = pci_set_dma_max_seg_size(pdev,
+ (aac->adapter_info.options & AAC_OPT_NEW_COMM) ?
+ (shost->max_sectors << 9) : 65536);
+ if (error)
+ goto out_deinit;
+
+ /*
+ * Firmware printf works only with older firmware.
+ */
+ if (aac_drivers[index].quirks & AAC_QUIRK_34SG)
+ aac->printf_enabled = 1;
+ else
+ aac->printf_enabled = 0;
+
+ /*
+ * max channel will be the physical channels plus 1 virtual channel
+ * all containers are on the virtual channel 0 (CONTAINER_CHANNEL)
+ * physical channels are address by their actual physical number+1
+ */
+ if (aac->nondasd_support || expose_physicals || aac->jbod)
+ shost->max_channel = aac->maximum_num_channels;
+ else
+ shost->max_channel = 0;
+
+ aac_get_config_status(aac, 0);
+ aac_get_containers(aac);
+ list_add(&aac->entry, insert);
+
+ shost->max_id = aac->maximum_num_containers;
+ if (shost->max_id < aac->maximum_num_physicals)
+ shost->max_id = aac->maximum_num_physicals;
+ if (shost->max_id < MAXIMUM_NUM_CONTAINERS)
+ shost->max_id = MAXIMUM_NUM_CONTAINERS;
+ else
+ shost->this_id = shost->max_id;
+
+ /*
+ * dmb - we may need to move the setting of these parms somewhere else once
+ * we get a fib that can report the actual numbers
+ */
+ shost->max_lun = AAC_MAX_LUN;
+
+ pci_set_drvdata(pdev, shost);
+
+ error = scsi_add_host(shost, &pdev->dev);
+ if (error)
+ goto out_deinit;
+ scsi_scan_host(shost);
+
+ return 0;
+
+ out_deinit:
+ __aac_shutdown(aac);
+ out_unmap:
+ aac_fib_map_free(aac);
+ if (aac->comm_addr)
+ pci_free_consistent(aac->pdev, aac->comm_size, aac->comm_addr,
+ aac->comm_phys);
+ kfree(aac->queues);
+ aac_adapter_ioremap(aac, 0);
+ kfree(aac->fibs);
+ kfree(aac->fsa_dev);
+ out_free_host:
+ scsi_host_put(shost);
+ out_disable_pdev:
+ pci_disable_device(pdev);
+ out:
+ return error;
+}
+
+static void aac_shutdown(struct pci_dev *dev)
+{
+ struct Scsi_Host *shost = pci_get_drvdata(dev);
+ scsi_block_requests(shost);
+ __aac_shutdown((struct aac_dev *)shost->hostdata);
+}
+
+static void aac_remove_one(struct pci_dev *pdev)
+{
+ struct Scsi_Host *shost = pci_get_drvdata(pdev);
+ struct aac_dev *aac = (struct aac_dev *)shost->hostdata;
+
+ scsi_remove_host(shost);
+
+ __aac_shutdown(aac);
+ aac_fib_map_free(aac);
+ pci_free_consistent(aac->pdev, aac->comm_size, aac->comm_addr,
+ aac->comm_phys);
+ kfree(aac->queues);
+
+ aac_adapter_ioremap(aac, 0);
+
+ kfree(aac->fibs);
+ kfree(aac->fsa_dev);
+
+ list_del(&aac->entry);
+ scsi_host_put(shost);
+ pci_disable_device(pdev);
+ if (list_empty(&aac_devices)) {
+ unregister_chrdev(aac_cfg_major, "aac");
+ aac_cfg_major = -1;
+ }
+}
+
+static struct pci_driver aac_pci_driver = {
+ .name = AAC_DRIVERNAME,
+ .id_table = aac_pci_tbl,
+ .probe = aac_probe_one,
+ .remove = aac_remove_one,
+ .shutdown = aac_shutdown,
+};
+
+static int __init aac_init(void)
+{
+ int error;
+
+ printk(KERN_INFO "Adaptec %s driver %s\n",
+ AAC_DRIVERNAME, aac_driver_version);
+
+ error = pci_register_driver(&aac_pci_driver);
+ if (error < 0)
+ return error;
+
+ aac_cfg_major = register_chrdev( 0, "aac", &aac_cfg_fops);
+ if (aac_cfg_major < 0) {
+ printk(KERN_WARNING
+ "aacraid: unable to register \"aac\" device.\n");
+ }
+
+ return 0;
+}
+
+static void __exit aac_exit(void)
+{
+ if (aac_cfg_major > -1)
+ unregister_chrdev(aac_cfg_major, "aac");
+ pci_unregister_driver(&aac_pci_driver);
+}
+
+module_init(aac_init);
+module_exit(aac_exit);
diff --git a/drivers/scsi/aacraid/nark.c b/drivers/scsi/aacraid/nark.c
new file mode 100644
index 000000000..6c53b1d8b
--- /dev/null
+++ b/drivers/scsi/aacraid/nark.c
@@ -0,0 +1,84 @@
+/*
+ * Adaptec AAC series RAID controller driver
+ *
+ * based on the old aacraid driver that is..
+ * Adaptec aacraid device driver for Linux.
+ *
+ * Copyright (c) 2000-2010 Adaptec, Inc.
+ * 2010 PMC-Sierra, Inc. (aacraid@pmc-sierra.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2, or (at your option)
+ * any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; see the file COPYING. If not, write to
+ * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
+ *
+ * Module Name:
+ * nark.c
+ *
+ * Abstract: Hardware Device Interface for NEMER/ARK
+ *
+ */
+
+#include <linux/pci.h>
+#include <linux/blkdev.h>
+
+#include <scsi/scsi_host.h>
+
+#include "aacraid.h"
+
+/**
+ * aac_nark_ioremap
+ * @size: mapping resize request
+ *
+ */
+static int aac_nark_ioremap(struct aac_dev * dev, u32 size)
+{
+ if (!size) {
+ iounmap(dev->regs.rx);
+ dev->regs.rx = NULL;
+ iounmap(dev->base);
+ dev->base = NULL;
+ return 0;
+ }
+ dev->base_start = pci_resource_start(dev->pdev, 2);
+ dev->regs.rx = ioremap((u64)pci_resource_start(dev->pdev, 0) |
+ ((u64)pci_resource_start(dev->pdev, 1) << 32),
+ sizeof(struct rx_registers) - sizeof(struct rx_inbound));
+ dev->base = NULL;
+ if (dev->regs.rx == NULL)
+ return -1;
+ dev->base = ioremap(dev->base_start, size);
+ if (dev->base == NULL) {
+ iounmap(dev->regs.rx);
+ dev->regs.rx = NULL;
+ return -1;
+ }
+ dev->IndexRegs = &((struct rx_registers __iomem *)dev->base)->IndexRegs;
+ return 0;
+}
+
+/**
+ * aac_nark_init - initialize an NEMER/ARK Split Bar card
+ * @dev: device to configure
+ *
+ */
+
+int aac_nark_init(struct aac_dev * dev)
+{
+ /*
+ * Fill in the function dispatch table.
+ */
+ dev->a_ops.adapter_ioremap = aac_nark_ioremap;
+ dev->a_ops.adapter_comm = aac_rx_select_comm;
+
+ return _aac_rx_init(dev);
+}
diff --git a/drivers/scsi/aacraid/rkt.c b/drivers/scsi/aacraid/rkt.c
new file mode 100644
index 000000000..7d8013fee
--- /dev/null
+++ b/drivers/scsi/aacraid/rkt.c
@@ -0,0 +1,107 @@
+/*
+ * Adaptec AAC series RAID controller driver
+ * (c) Copyright 2001 Red Hat Inc.
+ *
+ * based on the old aacraid driver that is..
+ * Adaptec aacraid device driver for Linux.
+ *
+ * Copyright (c) 2000-2010 Adaptec, Inc.
+ * 2010 PMC-Sierra, Inc. (aacraid@pmc-sierra.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2, or (at your option)
+ * any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; see the file COPYING. If not, write to
+ * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
+ *
+ * Module Name:
+ * rkt.c
+ *
+ * Abstract: Hardware miniport for Drawbridge specific hardware functions.
+ *
+ */
+
+#include <linux/blkdev.h>
+
+#include <scsi/scsi_host.h>
+
+#include "aacraid.h"
+
+#define AAC_NUM_IO_FIB_RKT (246 - AAC_NUM_MGT_FIB)
+
+/**
+ * aac_rkt_select_comm - Select communications method
+ * @dev: Adapter
+ * @comm: communications method
+ */
+
+static int aac_rkt_select_comm(struct aac_dev *dev, int comm)
+{
+ int retval;
+ retval = aac_rx_select_comm(dev, comm);
+ if (comm == AAC_COMM_MESSAGE) {
+ /*
+ * FIB Setup has already been done, but we can minimize the
+ * damage by at least ensuring the OS never issues more
+ * commands than we can handle. The Rocket adapters currently
+ * can only handle 246 commands and 8 AIFs at the same time,
+ * and in fact do notify us accordingly if we negotiate the
+ * FIB size. The problem that causes us to add this check is
+ * to ensure that we do not overdo it with the adapter when a
+ * hard coded FIB override is being utilized. This special
+ * case warrants this half baked, but convenient, check here.
+ */
+ if (dev->scsi_host_ptr->can_queue > AAC_NUM_IO_FIB_RKT) {
+ dev->init->MaxIoCommands =
+ cpu_to_le32(AAC_NUM_IO_FIB_RKT + AAC_NUM_MGT_FIB);
+ dev->scsi_host_ptr->can_queue = AAC_NUM_IO_FIB_RKT;
+ }
+ }
+ return retval;
+}
+
+/**
+ * aac_rkt_ioremap
+ * @size: mapping resize request
+ *
+ */
+static int aac_rkt_ioremap(struct aac_dev * dev, u32 size)
+{
+ if (!size) {
+ iounmap(dev->regs.rkt);
+ return 0;
+ }
+ dev->base = dev->regs.rkt = ioremap(dev->base_start, size);
+ if (dev->base == NULL)
+ return -1;
+ dev->IndexRegs = &dev->regs.rkt->IndexRegs;
+ return 0;
+}
+
+/**
+ * aac_rkt_init - initialize an i960 based AAC card
+ * @dev: device to configure
+ *
+ * Allocate and set up resources for the i960 based AAC variants. The
+ * device_interface in the commregion will be allocated and linked
+ * to the comm region.
+ */
+
+int aac_rkt_init(struct aac_dev *dev)
+{
+ /*
+ * Fill in the function dispatch table.
+ */
+ dev->a_ops.adapter_ioremap = aac_rkt_ioremap;
+ dev->a_ops.adapter_comm = aac_rkt_select_comm;
+
+ return _aac_rx_init(dev);
+}
diff --git a/drivers/scsi/aacraid/rx.c b/drivers/scsi/aacraid/rx.c
new file mode 100644
index 000000000..9570612b8
--- /dev/null
+++ b/drivers/scsi/aacraid/rx.c
@@ -0,0 +1,676 @@
+/*
+ * Adaptec AAC series RAID controller driver
+ * (c) Copyright 2001 Red Hat Inc.
+ *
+ * based on the old aacraid driver that is..
+ * Adaptec aacraid device driver for Linux.
+ *
+ * Copyright (c) 2000-2010 Adaptec, Inc.
+ * 2010 PMC-Sierra, Inc. (aacraid@pmc-sierra.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2, or (at your option)
+ * any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; see the file COPYING. If not, write to
+ * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
+ *
+ * Module Name:
+ * rx.c
+ *
+ * Abstract: Hardware miniport for Drawbridge specific hardware functions.
+ *
+ */
+
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/types.h>
+#include <linux/pci.h>
+#include <linux/spinlock.h>
+#include <linux/blkdev.h>
+#include <linux/delay.h>
+#include <linux/completion.h>
+#include <linux/time.h>
+#include <linux/interrupt.h>
+
+#include <scsi/scsi_host.h>
+
+#include "aacraid.h"
+
+static irqreturn_t aac_rx_intr_producer(int irq, void *dev_id)
+{
+ struct aac_dev *dev = dev_id;
+ unsigned long bellbits;
+ u8 intstat = rx_readb(dev, MUnit.OISR);
+
+ /*
+ * Read mask and invert because drawbridge is reversed.
+ * This allows us to only service interrupts that have
+ * been enabled.
+ * Check to see if this is our interrupt. If it isn't just return
+ */
+ if (likely(intstat & ~(dev->OIMR))) {
+ bellbits = rx_readl(dev, OutboundDoorbellReg);
+ if (unlikely(bellbits & DoorBellPrintfReady)) {
+ aac_printf(dev, readl (&dev->IndexRegs->Mailbox[5]));
+ rx_writel(dev, MUnit.ODR,DoorBellPrintfReady);
+ rx_writel(dev, InboundDoorbellReg,DoorBellPrintfDone);
+ }
+ else if (unlikely(bellbits & DoorBellAdapterNormCmdReady)) {
+ rx_writel(dev, MUnit.ODR, DoorBellAdapterNormCmdReady);
+ aac_command_normal(&dev->queues->queue[HostNormCmdQueue]);
+ }
+ else if (likely(bellbits & DoorBellAdapterNormRespReady)) {
+ rx_writel(dev, MUnit.ODR,DoorBellAdapterNormRespReady);
+ aac_response_normal(&dev->queues->queue[HostNormRespQueue]);
+ }
+ else if (unlikely(bellbits & DoorBellAdapterNormCmdNotFull)) {
+ rx_writel(dev, MUnit.ODR, DoorBellAdapterNormCmdNotFull);
+ }
+ else if (unlikely(bellbits & DoorBellAdapterNormRespNotFull)) {
+ rx_writel(dev, MUnit.ODR, DoorBellAdapterNormCmdNotFull);
+ rx_writel(dev, MUnit.ODR, DoorBellAdapterNormRespNotFull);
+ }
+ return IRQ_HANDLED;
+ }
+ return IRQ_NONE;
+}
+
+static irqreturn_t aac_rx_intr_message(int irq, void *dev_id)
+{
+ int isAif, isFastResponse, isSpecial;
+ struct aac_dev *dev = dev_id;
+ u32 Index = rx_readl(dev, MUnit.OutboundQueue);
+ if (unlikely(Index == 0xFFFFFFFFL))
+ Index = rx_readl(dev, MUnit.OutboundQueue);
+ if (likely(Index != 0xFFFFFFFFL)) {
+ do {
+ isAif = isFastResponse = isSpecial = 0;
+ if (Index & 0x00000002L) {
+ isAif = 1;
+ if (Index == 0xFFFFFFFEL)
+ isSpecial = 1;
+ Index &= ~0x00000002L;
+ } else {
+ if (Index & 0x00000001L)
+ isFastResponse = 1;
+ Index >>= 2;
+ }
+ if (!isSpecial) {
+ if (unlikely(aac_intr_normal(dev,
+ Index, isAif,
+ isFastResponse, NULL))) {
+ rx_writel(dev,
+ MUnit.OutboundQueue,
+ Index);
+ rx_writel(dev,
+ MUnit.ODR,
+ DoorBellAdapterNormRespReady);
+ }
+ }
+ Index = rx_readl(dev, MUnit.OutboundQueue);
+ } while (Index != 0xFFFFFFFFL);
+ return IRQ_HANDLED;
+ }
+ return IRQ_NONE;
+}
+
+/**
+ * aac_rx_disable_interrupt - Disable interrupts
+ * @dev: Adapter
+ */
+
+static void aac_rx_disable_interrupt(struct aac_dev *dev)
+{
+ rx_writeb(dev, MUnit.OIMR, dev->OIMR = 0xff);
+}
+
+/**
+ * aac_rx_enable_interrupt_producer - Enable interrupts
+ * @dev: Adapter
+ */
+
+static void aac_rx_enable_interrupt_producer(struct aac_dev *dev)
+{
+ rx_writeb(dev, MUnit.OIMR, dev->OIMR = 0xfb);
+}
+
+/**
+ * aac_rx_enable_interrupt_message - Enable interrupts
+ * @dev: Adapter
+ */
+
+static void aac_rx_enable_interrupt_message(struct aac_dev *dev)
+{
+ rx_writeb(dev, MUnit.OIMR, dev->OIMR = 0xf7);
+}
+
+/**
+ * rx_sync_cmd - send a command and wait
+ * @dev: Adapter
+ * @command: Command to execute
+ * @p1: first parameter
+ * @ret: adapter status
+ *
+ * This routine will send a synchronous command to the adapter and wait
+ * for its completion.
+ */
+
+static int rx_sync_cmd(struct aac_dev *dev, u32 command,
+ u32 p1, u32 p2, u32 p3, u32 p4, u32 p5, u32 p6,
+ u32 *status, u32 * r1, u32 * r2, u32 * r3, u32 * r4)
+{
+ unsigned long start;
+ int ok;
+ /*
+ * Write the command into Mailbox 0
+ */
+ writel(command, &dev->IndexRegs->Mailbox[0]);
+ /*
+ * Write the parameters into Mailboxes 1 - 6
+ */
+ writel(p1, &dev->IndexRegs->Mailbox[1]);
+ writel(p2, &dev->IndexRegs->Mailbox[2]);
+ writel(p3, &dev->IndexRegs->Mailbox[3]);
+ writel(p4, &dev->IndexRegs->Mailbox[4]);
+ /*
+ * Clear the synch command doorbell to start on a clean slate.
+ */
+ rx_writel(dev, OutboundDoorbellReg, OUTBOUNDDOORBELL_0);
+ /*
+ * Disable doorbell interrupts
+ */
+ rx_writeb(dev, MUnit.OIMR, dev->OIMR = 0xff);
+ /*
+ * Force the completion of the mask register write before issuing
+ * the interrupt.
+ */
+ rx_readb (dev, MUnit.OIMR);
+ /*
+ * Signal that there is a new synch command
+ */
+ rx_writel(dev, InboundDoorbellReg, INBOUNDDOORBELL_0);
+
+ ok = 0;
+ start = jiffies;
+
+ /*
+ * Wait up to 30 seconds
+ */
+ while (time_before(jiffies, start+30*HZ))
+ {
+ udelay(5); /* Delay 5 microseconds to let Mon960 get info. */
+ /*
+ * Mon960 will set doorbell0 bit when it has completed the command.
+ */
+ if (rx_readl(dev, OutboundDoorbellReg) & OUTBOUNDDOORBELL_0) {
+ /*
+ * Clear the doorbell.
+ */
+ rx_writel(dev, OutboundDoorbellReg, OUTBOUNDDOORBELL_0);
+ ok = 1;
+ break;
+ }
+ /*
+ * Yield the processor in case we are slow
+ */
+ msleep(1);
+ }
+ if (unlikely(ok != 1)) {
+ /*
+ * Restore interrupt mask even though we timed out
+ */
+ aac_adapter_enable_int(dev);
+ return -ETIMEDOUT;
+ }
+ /*
+ * Pull the synch status from Mailbox 0.
+ */
+ if (status)
+ *status = readl(&dev->IndexRegs->Mailbox[0]);
+ if (r1)
+ *r1 = readl(&dev->IndexRegs->Mailbox[1]);
+ if (r2)
+ *r2 = readl(&dev->IndexRegs->Mailbox[2]);
+ if (r3)
+ *r3 = readl(&dev->IndexRegs->Mailbox[3]);
+ if (r4)
+ *r4 = readl(&dev->IndexRegs->Mailbox[4]);
+ /*
+ * Clear the synch command doorbell.
+ */
+ rx_writel(dev, OutboundDoorbellReg, OUTBOUNDDOORBELL_0);
+ /*
+ * Restore interrupt mask
+ */
+ aac_adapter_enable_int(dev);
+ return 0;
+
+}
+
+/**
+ * aac_rx_interrupt_adapter - interrupt adapter
+ * @dev: Adapter
+ *
+ * Send an interrupt to the i960 and breakpoint it.
+ */
+
+static void aac_rx_interrupt_adapter(struct aac_dev *dev)
+{
+ rx_sync_cmd(dev, BREAKPOINT_REQUEST, 0, 0, 0, 0, 0, 0, NULL, NULL, NULL, NULL, NULL);
+}
+
+/**
+ * aac_rx_notify_adapter - send an event to the adapter
+ * @dev: Adapter
+ * @event: Event to send
+ *
+ * Notify the i960 that something it probably cares about has
+ * happened.
+ */
+
+static void aac_rx_notify_adapter(struct aac_dev *dev, u32 event)
+{
+ switch (event) {
+
+ case AdapNormCmdQue:
+ rx_writel(dev, MUnit.IDR,INBOUNDDOORBELL_1);
+ break;
+ case HostNormRespNotFull:
+ rx_writel(dev, MUnit.IDR,INBOUNDDOORBELL_4);
+ break;
+ case AdapNormRespQue:
+ rx_writel(dev, MUnit.IDR,INBOUNDDOORBELL_2);
+ break;
+ case HostNormCmdNotFull:
+ rx_writel(dev, MUnit.IDR,INBOUNDDOORBELL_3);
+ break;
+ case HostShutdown:
+ break;
+ case FastIo:
+ rx_writel(dev, MUnit.IDR,INBOUNDDOORBELL_6);
+ break;
+ case AdapPrintfDone:
+ rx_writel(dev, MUnit.IDR,INBOUNDDOORBELL_5);
+ break;
+ default:
+ BUG();
+ break;
+ }
+}
+
+/**
+ * aac_rx_start_adapter - activate adapter
+ * @dev: Adapter
+ *
+ * Start up processing on an i960 based AAC adapter
+ */
+
+static void aac_rx_start_adapter(struct aac_dev *dev)
+{
+ struct aac_init *init;
+
+ init = dev->init;
+ init->HostElapsedSeconds = cpu_to_le32(get_seconds());
+ // We can only use a 32 bit address here
+ rx_sync_cmd(dev, INIT_STRUCT_BASE_ADDRESS, (u32)(ulong)dev->init_pa,
+ 0, 0, 0, 0, 0, NULL, NULL, NULL, NULL, NULL);
+}
+
+/**
+ * aac_rx_check_health
+ * @dev: device to check if healthy
+ *
+ * Will attempt to determine if the specified adapter is alive and
+ * capable of handling requests, returning 0 if alive.
+ */
+static int aac_rx_check_health(struct aac_dev *dev)
+{
+ u32 status = rx_readl(dev, MUnit.OMRx[0]);
+
+ /*
+ * Check to see if the board failed any self tests.
+ */
+ if (unlikely(status & SELF_TEST_FAILED))
+ return -1;
+ /*
+ * Check to see if the board panic'd.
+ */
+ if (unlikely(status & KERNEL_PANIC)) {
+ char * buffer;
+ struct POSTSTATUS {
+ __le32 Post_Command;
+ __le32 Post_Address;
+ } * post;
+ dma_addr_t paddr, baddr;
+ int ret;
+
+ if (likely((status & 0xFF000000L) == 0xBC000000L))
+ return (status >> 16) & 0xFF;
+ buffer = pci_alloc_consistent(dev->pdev, 512, &baddr);
+ ret = -2;
+ if (unlikely(buffer == NULL))
+ return ret;
+ post = pci_alloc_consistent(dev->pdev,
+ sizeof(struct POSTSTATUS), &paddr);
+ if (unlikely(post == NULL)) {
+ pci_free_consistent(dev->pdev, 512, buffer, baddr);
+ return ret;
+ }
+ memset(buffer, 0, 512);
+ post->Post_Command = cpu_to_le32(COMMAND_POST_RESULTS);
+ post->Post_Address = cpu_to_le32(baddr);
+ rx_writel(dev, MUnit.IMRx[0], paddr);
+ rx_sync_cmd(dev, COMMAND_POST_RESULTS, baddr, 0, 0, 0, 0, 0,
+ NULL, NULL, NULL, NULL, NULL);
+ pci_free_consistent(dev->pdev, sizeof(struct POSTSTATUS),
+ post, paddr);
+ if (likely((buffer[0] == '0') && ((buffer[1] == 'x') || (buffer[1] == 'X')))) {
+ ret = (hex_to_bin(buffer[2]) << 4) +
+ hex_to_bin(buffer[3]);
+ }
+ pci_free_consistent(dev->pdev, 512, buffer, baddr);
+ return ret;
+ }
+ /*
+ * Wait for the adapter to be up and running.
+ */
+ if (unlikely(!(status & KERNEL_UP_AND_RUNNING)))
+ return -3;
+ /*
+ * Everything is OK
+ */
+ return 0;
+}
+
+/**
+ * aac_rx_deliver_producer
+ * @fib: fib to issue
+ *
+ * Will send a fib, returning 0 if successful.
+ */
+int aac_rx_deliver_producer(struct fib * fib)
+{
+ struct aac_dev *dev = fib->dev;
+ struct aac_queue *q = &dev->queues->queue[AdapNormCmdQueue];
+ u32 Index;
+ unsigned long nointr = 0;
+
+ aac_queue_get( dev, &Index, AdapNormCmdQueue, fib->hw_fib_va, 1, fib, &nointr);
+
+ atomic_inc(&q->numpending);
+ *(q->headers.producer) = cpu_to_le32(Index + 1);
+ if (!(nointr & aac_config.irq_mod))
+ aac_adapter_notify(dev, AdapNormCmdQueue);
+
+ return 0;
+}
+
+/**
+ * aac_rx_deliver_message
+ * @fib: fib to issue
+ *
+ * Will send a fib, returning 0 if successful.
+ */
+static int aac_rx_deliver_message(struct fib * fib)
+{
+ struct aac_dev *dev = fib->dev;
+ struct aac_queue *q = &dev->queues->queue[AdapNormCmdQueue];
+ u32 Index;
+ u64 addr;
+ volatile void __iomem *device;
+
+ unsigned long count = 10000000L; /* 50 seconds */
+ atomic_inc(&q->numpending);
+ for(;;) {
+ Index = rx_readl(dev, MUnit.InboundQueue);
+ if (unlikely(Index == 0xFFFFFFFFL))
+ Index = rx_readl(dev, MUnit.InboundQueue);
+ if (likely(Index != 0xFFFFFFFFL))
+ break;
+ if (--count == 0) {
+ atomic_dec(&q->numpending);
+ return -ETIMEDOUT;
+ }
+ udelay(5);
+ }
+ device = dev->base + Index;
+ addr = fib->hw_fib_pa;
+ writel((u32)(addr & 0xffffffff), device);
+ device += sizeof(u32);
+ writel((u32)(addr >> 32), device);
+ device += sizeof(u32);
+ writel(le16_to_cpu(fib->hw_fib_va->header.Size), device);
+ rx_writel(dev, MUnit.InboundQueue, Index);
+ return 0;
+}
+
+/**
+ * aac_rx_ioremap
+ * @size: mapping resize request
+ *
+ */
+static int aac_rx_ioremap(struct aac_dev * dev, u32 size)
+{
+ if (!size) {
+ iounmap(dev->regs.rx);
+ return 0;
+ }
+ dev->base = dev->regs.rx = ioremap(dev->base_start, size);
+ if (dev->base == NULL)
+ return -1;
+ dev->IndexRegs = &dev->regs.rx->IndexRegs;
+ return 0;
+}
+
+static int aac_rx_restart_adapter(struct aac_dev *dev, int bled)
+{
+ u32 var = 0;
+
+ if (!(dev->supplement_adapter_info.SupportedOptions2 &
+ AAC_OPTION_MU_RESET) || (bled >= 0) || (bled == -2)) {
+ if (bled)
+ printk(KERN_ERR "%s%d: adapter kernel panic'd %x.\n",
+ dev->name, dev->id, bled);
+ else {
+ bled = aac_adapter_sync_cmd(dev, IOP_RESET_ALWAYS,
+ 0, 0, 0, 0, 0, 0, &var, NULL, NULL, NULL, NULL);
+ if (!bled && (var != 0x00000001) && (var != 0x3803000F))
+ bled = -EINVAL;
+ }
+ if (bled && (bled != -ETIMEDOUT))
+ bled = aac_adapter_sync_cmd(dev, IOP_RESET,
+ 0, 0, 0, 0, 0, 0, &var, NULL, NULL, NULL, NULL);
+
+ if (bled && (bled != -ETIMEDOUT))
+ return -EINVAL;
+ }
+ if (bled && (var == 0x3803000F)) { /* USE_OTHER_METHOD */
+ rx_writel(dev, MUnit.reserved2, 3);
+ msleep(5000); /* Delay 5 seconds */
+ var = 0x00000001;
+ }
+ if (bled && (var != 0x00000001))
+ return -EINVAL;
+ ssleep(5);
+ if (rx_readl(dev, MUnit.OMRx[0]) & KERNEL_PANIC)
+ return -ENODEV;
+ if (startup_timeout < 300)
+ startup_timeout = 300;
+ return 0;
+}
+
+/**
+ * aac_rx_select_comm - Select communications method
+ * @dev: Adapter
+ * @comm: communications method
+ */
+
+int aac_rx_select_comm(struct aac_dev *dev, int comm)
+{
+ switch (comm) {
+ case AAC_COMM_PRODUCER:
+ dev->a_ops.adapter_enable_int = aac_rx_enable_interrupt_producer;
+ dev->a_ops.adapter_intr = aac_rx_intr_producer;
+ dev->a_ops.adapter_deliver = aac_rx_deliver_producer;
+ break;
+ case AAC_COMM_MESSAGE:
+ dev->a_ops.adapter_enable_int = aac_rx_enable_interrupt_message;
+ dev->a_ops.adapter_intr = aac_rx_intr_message;
+ dev->a_ops.adapter_deliver = aac_rx_deliver_message;
+ break;
+ default:
+ return 1;
+ }
+ return 0;
+}
+
+/**
+ * aac_rx_init - initialize an i960 based AAC card
+ * @dev: device to configure
+ *
+ * Allocate and set up resources for the i960 based AAC variants. The
+ * device_interface in the commregion will be allocated and linked
+ * to the comm region.
+ */
+
+int _aac_rx_init(struct aac_dev *dev)
+{
+ unsigned long start;
+ unsigned long status;
+ int restart = 0;
+ int instance = dev->id;
+ const char * name = dev->name;
+
+ if (aac_adapter_ioremap(dev, dev->base_size)) {
+ printk(KERN_WARNING "%s: unable to map adapter.\n", name);
+ goto error_iounmap;
+ }
+
+ /* Failure to reset here is an option ... */
+ dev->a_ops.adapter_sync_cmd = rx_sync_cmd;
+ dev->a_ops.adapter_enable_int = aac_rx_disable_interrupt;
+ dev->OIMR = status = rx_readb (dev, MUnit.OIMR);
+ if ((((status & 0x0c) != 0x0c) || aac_reset_devices || reset_devices) &&
+ !aac_rx_restart_adapter(dev, 0))
+ /* Make sure the Hardware FIFO is empty */
+ while ((++restart < 512) &&
+ (rx_readl(dev, MUnit.OutboundQueue) != 0xFFFFFFFFL));
+ /*
+ * Check to see if the board panic'd while booting.
+ */
+ status = rx_readl(dev, MUnit.OMRx[0]);
+ if (status & KERNEL_PANIC) {
+ if (aac_rx_restart_adapter(dev, aac_rx_check_health(dev)))
+ goto error_iounmap;
+ ++restart;
+ }
+ /*
+ * Check to see if the board failed any self tests.
+ */
+ status = rx_readl(dev, MUnit.OMRx[0]);
+ if (status & SELF_TEST_FAILED) {
+ printk(KERN_ERR "%s%d: adapter self-test failed.\n", dev->name, instance);
+ goto error_iounmap;
+ }
+ /*
+ * Check to see if the monitor panic'd while booting.
+ */
+ if (status & MONITOR_PANIC) {
+ printk(KERN_ERR "%s%d: adapter monitor panic.\n", dev->name, instance);
+ goto error_iounmap;
+ }
+ start = jiffies;
+ /*
+ * Wait for the adapter to be up and running. Wait up to 3 minutes
+ */
+ while (!((status = rx_readl(dev, MUnit.OMRx[0])) & KERNEL_UP_AND_RUNNING))
+ {
+ if ((restart &&
+ (status & (KERNEL_PANIC|SELF_TEST_FAILED|MONITOR_PANIC))) ||
+ time_after(jiffies, start+HZ*startup_timeout)) {
+ printk(KERN_ERR "%s%d: adapter kernel failed to start, init status = %lx.\n",
+ dev->name, instance, status);
+ goto error_iounmap;
+ }
+ if (!restart &&
+ ((status & (KERNEL_PANIC|SELF_TEST_FAILED|MONITOR_PANIC)) ||
+ time_after(jiffies, start + HZ *
+ ((startup_timeout > 60)
+ ? (startup_timeout - 60)
+ : (startup_timeout / 2))))) {
+ if (likely(!aac_rx_restart_adapter(dev, aac_rx_check_health(dev))))
+ start = jiffies;
+ ++restart;
+ }
+ msleep(1);
+ }
+ if (restart && aac_commit)
+ aac_commit = 1;
+ /*
+ * Fill in the common function dispatch table.
+ */
+ dev->a_ops.adapter_interrupt = aac_rx_interrupt_adapter;
+ dev->a_ops.adapter_disable_int = aac_rx_disable_interrupt;
+ dev->a_ops.adapter_notify = aac_rx_notify_adapter;
+ dev->a_ops.adapter_sync_cmd = rx_sync_cmd;
+ dev->a_ops.adapter_check_health = aac_rx_check_health;
+ dev->a_ops.adapter_restart = aac_rx_restart_adapter;
+
+ /*
+ * First clear out all interrupts. Then enable the one's that we
+ * can handle.
+ */
+ aac_adapter_comm(dev, AAC_COMM_PRODUCER);
+ aac_adapter_disable_int(dev);
+ rx_writel(dev, MUnit.ODR, 0xffffffff);
+ aac_adapter_enable_int(dev);
+
+ if (aac_init_adapter(dev) == NULL)
+ goto error_iounmap;
+ aac_adapter_comm(dev, dev->comm_interface);
+ dev->sync_mode = 0; /* sync. mode not supported */
+ dev->msi = aac_msi && !pci_enable_msi(dev->pdev);
+ if (request_irq(dev->pdev->irq, dev->a_ops.adapter_intr,
+ IRQF_SHARED, "aacraid", dev) < 0) {
+ if (dev->msi)
+ pci_disable_msi(dev->pdev);
+ printk(KERN_ERR "%s%d: Interrupt unavailable.\n",
+ name, instance);
+ goto error_iounmap;
+ }
+ dev->dbg_base = dev->base_start;
+ dev->dbg_base_mapped = dev->base;
+ dev->dbg_size = dev->base_size;
+
+ aac_adapter_enable_int(dev);
+ /*
+ * Tell the adapter that all is configured, and it can
+ * start accepting requests
+ */
+ aac_rx_start_adapter(dev);
+
+ return 0;
+
+error_iounmap:
+
+ return -1;
+}
+
+int aac_rx_init(struct aac_dev *dev)
+{
+ /*
+ * Fill in the function dispatch table.
+ */
+ dev->a_ops.adapter_ioremap = aac_rx_ioremap;
+ dev->a_ops.adapter_comm = aac_rx_select_comm;
+
+ return _aac_rx_init(dev);
+}
diff --git a/drivers/scsi/aacraid/sa.c b/drivers/scsi/aacraid/sa.c
new file mode 100644
index 000000000..e66477c98
--- /dev/null
+++ b/drivers/scsi/aacraid/sa.c
@@ -0,0 +1,416 @@
+/*
+ * Adaptec AAC series RAID controller driver
+ * (c) Copyright 2001 Red Hat Inc.
+ *
+ * based on the old aacraid driver that is..
+ * Adaptec aacraid device driver for Linux.
+ *
+ * Copyright (c) 2000-2010 Adaptec, Inc.
+ * 2010 PMC-Sierra, Inc. (aacraid@pmc-sierra.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2, or (at your option)
+ * any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; see the file COPYING. If not, write to
+ * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
+ *
+ * Module Name:
+ * sa.c
+ *
+ * Abstract: Drawbridge specific support functions
+ *
+ */
+
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/types.h>
+#include <linux/pci.h>
+#include <linux/spinlock.h>
+#include <linux/blkdev.h>
+#include <linux/delay.h>
+#include <linux/completion.h>
+#include <linux/time.h>
+#include <linux/interrupt.h>
+
+#include <scsi/scsi_host.h>
+
+#include "aacraid.h"
+
+static irqreturn_t aac_sa_intr(int irq, void *dev_id)
+{
+ struct aac_dev *dev = dev_id;
+ unsigned short intstat, mask;
+
+ intstat = sa_readw(dev, DoorbellReg_p);
+ /*
+ * Read mask and invert because drawbridge is reversed.
+ * This allows us to only service interrupts that have been enabled.
+ */
+ mask = ~(sa_readw(dev, SaDbCSR.PRISETIRQMASK));
+
+ /* Check to see if this is our interrupt. If it isn't just return */
+
+ if (intstat & mask) {
+ if (intstat & PrintfReady) {
+ aac_printf(dev, sa_readl(dev, Mailbox5));
+ sa_writew(dev, DoorbellClrReg_p, PrintfReady); /* clear PrintfReady */
+ sa_writew(dev, DoorbellReg_s, PrintfDone);
+ } else if (intstat & DOORBELL_1) { // dev -> Host Normal Command Ready
+ sa_writew(dev, DoorbellClrReg_p, DOORBELL_1);
+ aac_command_normal(&dev->queues->queue[HostNormCmdQueue]);
+ } else if (intstat & DOORBELL_2) { // dev -> Host Normal Response Ready
+ sa_writew(dev, DoorbellClrReg_p, DOORBELL_2);
+ aac_response_normal(&dev->queues->queue[HostNormRespQueue]);
+ } else if (intstat & DOORBELL_3) { // dev -> Host Normal Command Not Full
+ sa_writew(dev, DoorbellClrReg_p, DOORBELL_3);
+ } else if (intstat & DOORBELL_4) { // dev -> Host Normal Response Not Full
+ sa_writew(dev, DoorbellClrReg_p, DOORBELL_4);
+ }
+ return IRQ_HANDLED;
+ }
+ return IRQ_NONE;
+}
+
+/**
+ * aac_sa_disable_interrupt - disable interrupt
+ * @dev: Which adapter to enable.
+ */
+
+static void aac_sa_disable_interrupt (struct aac_dev *dev)
+{
+ sa_writew(dev, SaDbCSR.PRISETIRQMASK, 0xffff);
+}
+
+/**
+ * aac_sa_enable_interrupt - enable interrupt
+ * @dev: Which adapter to enable.
+ */
+
+static void aac_sa_enable_interrupt (struct aac_dev *dev)
+{
+ sa_writew(dev, SaDbCSR.PRICLEARIRQMASK, (PrintfReady | DOORBELL_1 |
+ DOORBELL_2 | DOORBELL_3 | DOORBELL_4));
+}
+
+/**
+ * aac_sa_notify_adapter - handle adapter notification
+ * @dev: Adapter that notification is for
+ * @event: Event to notidy
+ *
+ * Notify the adapter of an event
+ */
+
+static void aac_sa_notify_adapter(struct aac_dev *dev, u32 event)
+{
+ switch (event) {
+
+ case AdapNormCmdQue:
+ sa_writew(dev, DoorbellReg_s,DOORBELL_1);
+ break;
+ case HostNormRespNotFull:
+ sa_writew(dev, DoorbellReg_s,DOORBELL_4);
+ break;
+ case AdapNormRespQue:
+ sa_writew(dev, DoorbellReg_s,DOORBELL_2);
+ break;
+ case HostNormCmdNotFull:
+ sa_writew(dev, DoorbellReg_s,DOORBELL_3);
+ break;
+ case HostShutdown:
+ /*
+ sa_sync_cmd(dev, HOST_CRASHING, 0, 0, 0, 0, 0, 0,
+ NULL, NULL, NULL, NULL, NULL);
+ */
+ break;
+ case FastIo:
+ sa_writew(dev, DoorbellReg_s,DOORBELL_6);
+ break;
+ case AdapPrintfDone:
+ sa_writew(dev, DoorbellReg_s,DOORBELL_5);
+ break;
+ default:
+ BUG();
+ break;
+ }
+}
+
+
+/**
+ * sa_sync_cmd - send a command and wait
+ * @dev: Adapter
+ * @command: Command to execute
+ * @p1: first parameter
+ * @ret: adapter status
+ *
+ * This routine will send a synchronous command to the adapter and wait
+ * for its completion.
+ */
+
+static int sa_sync_cmd(struct aac_dev *dev, u32 command,
+ u32 p1, u32 p2, u32 p3, u32 p4, u32 p5, u32 p6,
+ u32 *ret, u32 *r1, u32 *r2, u32 *r3, u32 *r4)
+{
+ unsigned long start;
+ int ok;
+ /*
+ * Write the Command into Mailbox 0
+ */
+ sa_writel(dev, Mailbox0, command);
+ /*
+ * Write the parameters into Mailboxes 1 - 4
+ */
+ sa_writel(dev, Mailbox1, p1);
+ sa_writel(dev, Mailbox2, p2);
+ sa_writel(dev, Mailbox3, p3);
+ sa_writel(dev, Mailbox4, p4);
+
+ /*
+ * Clear the synch command doorbell to start on a clean slate.
+ */
+ sa_writew(dev, DoorbellClrReg_p, DOORBELL_0);
+ /*
+ * Signal that there is a new synch command
+ */
+ sa_writew(dev, DoorbellReg_s, DOORBELL_0);
+
+ ok = 0;
+ start = jiffies;
+
+ while(time_before(jiffies, start+30*HZ))
+ {
+ /*
+ * Delay 5uS so that the monitor gets access
+ */
+ udelay(5);
+ /*
+ * Mon110 will set doorbell0 bit when it has
+ * completed the command.
+ */
+ if(sa_readw(dev, DoorbellReg_p) & DOORBELL_0) {
+ ok = 1;
+ break;
+ }
+ msleep(1);
+ }
+
+ if (ok != 1)
+ return -ETIMEDOUT;
+ /*
+ * Clear the synch command doorbell.
+ */
+ sa_writew(dev, DoorbellClrReg_p, DOORBELL_0);
+ /*
+ * Pull the synch status from Mailbox 0.
+ */
+ if (ret)
+ *ret = sa_readl(dev, Mailbox0);
+ if (r1)
+ *r1 = sa_readl(dev, Mailbox1);
+ if (r2)
+ *r2 = sa_readl(dev, Mailbox2);
+ if (r3)
+ *r3 = sa_readl(dev, Mailbox3);
+ if (r4)
+ *r4 = sa_readl(dev, Mailbox4);
+ return 0;
+}
+
+/**
+ * aac_sa_interrupt_adapter - interrupt an adapter
+ * @dev: Which adapter to enable.
+ *
+ * Breakpoint an adapter.
+ */
+
+static void aac_sa_interrupt_adapter (struct aac_dev *dev)
+{
+ sa_sync_cmd(dev, BREAKPOINT_REQUEST, 0, 0, 0, 0, 0, 0,
+ NULL, NULL, NULL, NULL, NULL);
+}
+
+/**
+ * aac_sa_start_adapter - activate adapter
+ * @dev: Adapter
+ *
+ * Start up processing on an ARM based AAC adapter
+ */
+
+static void aac_sa_start_adapter(struct aac_dev *dev)
+{
+ struct aac_init *init;
+ /*
+ * Fill in the remaining pieces of the init.
+ */
+ init = dev->init;
+ init->HostElapsedSeconds = cpu_to_le32(get_seconds());
+ /* We can only use a 32 bit address here */
+ sa_sync_cmd(dev, INIT_STRUCT_BASE_ADDRESS,
+ (u32)(ulong)dev->init_pa, 0, 0, 0, 0, 0,
+ NULL, NULL, NULL, NULL, NULL);
+}
+
+static int aac_sa_restart_adapter(struct aac_dev *dev, int bled)
+{
+ return -EINVAL;
+}
+
+/**
+ * aac_sa_check_health
+ * @dev: device to check if healthy
+ *
+ * Will attempt to determine if the specified adapter is alive and
+ * capable of handling requests, returning 0 if alive.
+ */
+static int aac_sa_check_health(struct aac_dev *dev)
+{
+ long status = sa_readl(dev, Mailbox7);
+
+ /*
+ * Check to see if the board failed any self tests.
+ */
+ if (status & SELF_TEST_FAILED)
+ return -1;
+ /*
+ * Check to see if the board panic'd while booting.
+ */
+ if (status & KERNEL_PANIC)
+ return -2;
+ /*
+ * Wait for the adapter to be up and running. Wait up to 3 minutes
+ */
+ if (!(status & KERNEL_UP_AND_RUNNING))
+ return -3;
+ /*
+ * Everything is OK
+ */
+ return 0;
+}
+
+/**
+ * aac_sa_ioremap
+ * @size: mapping resize request
+ *
+ */
+static int aac_sa_ioremap(struct aac_dev * dev, u32 size)
+{
+ if (!size) {
+ iounmap(dev->regs.sa);
+ return 0;
+ }
+ dev->base = dev->regs.sa = ioremap(dev->base_start, size);
+ return (dev->base == NULL) ? -1 : 0;
+}
+
+/**
+ * aac_sa_init - initialize an ARM based AAC card
+ * @dev: device to configure
+ *
+ * Allocate and set up resources for the ARM based AAC variants. The
+ * device_interface in the commregion will be allocated and linked
+ * to the comm region.
+ */
+
+int aac_sa_init(struct aac_dev *dev)
+{
+ unsigned long start;
+ unsigned long status;
+ int instance;
+ const char *name;
+
+ instance = dev->id;
+ name = dev->name;
+
+ if (aac_sa_ioremap(dev, dev->base_size)) {
+ printk(KERN_WARNING "%s: unable to map adapter.\n", name);
+ goto error_iounmap;
+ }
+
+ /*
+ * Check to see if the board failed any self tests.
+ */
+ if (sa_readl(dev, Mailbox7) & SELF_TEST_FAILED) {
+ printk(KERN_WARNING "%s%d: adapter self-test failed.\n", name, instance);
+ goto error_iounmap;
+ }
+ /*
+ * Check to see if the board panic'd while booting.
+ */
+ if (sa_readl(dev, Mailbox7) & KERNEL_PANIC) {
+ printk(KERN_WARNING "%s%d: adapter kernel panic'd.\n", name, instance);
+ goto error_iounmap;
+ }
+ start = jiffies;
+ /*
+ * Wait for the adapter to be up and running. Wait up to 3 minutes.
+ */
+ while (!(sa_readl(dev, Mailbox7) & KERNEL_UP_AND_RUNNING)) {
+ if (time_after(jiffies, start+startup_timeout*HZ)) {
+ status = sa_readl(dev, Mailbox7);
+ printk(KERN_WARNING "%s%d: adapter kernel failed to start, init status = %lx.\n",
+ name, instance, status);
+ goto error_iounmap;
+ }
+ msleep(1);
+ }
+
+ /*
+ * Fill in the function dispatch table.
+ */
+
+ dev->a_ops.adapter_interrupt = aac_sa_interrupt_adapter;
+ dev->a_ops.adapter_disable_int = aac_sa_disable_interrupt;
+ dev->a_ops.adapter_enable_int = aac_sa_enable_interrupt;
+ dev->a_ops.adapter_notify = aac_sa_notify_adapter;
+ dev->a_ops.adapter_sync_cmd = sa_sync_cmd;
+ dev->a_ops.adapter_check_health = aac_sa_check_health;
+ dev->a_ops.adapter_restart = aac_sa_restart_adapter;
+ dev->a_ops.adapter_intr = aac_sa_intr;
+ dev->a_ops.adapter_deliver = aac_rx_deliver_producer;
+ dev->a_ops.adapter_ioremap = aac_sa_ioremap;
+
+ /*
+ * First clear out all interrupts. Then enable the one's that
+ * we can handle.
+ */
+ aac_adapter_disable_int(dev);
+ aac_adapter_enable_int(dev);
+
+ if(aac_init_adapter(dev) == NULL)
+ goto error_irq;
+ dev->sync_mode = 0; /* sync. mode not supported */
+ if (request_irq(dev->pdev->irq, dev->a_ops.adapter_intr,
+ IRQF_SHARED, "aacraid", (void *)dev) < 0) {
+ printk(KERN_WARNING "%s%d: Interrupt unavailable.\n",
+ name, instance);
+ goto error_iounmap;
+ }
+ dev->dbg_base = dev->base_start;
+ dev->dbg_base_mapped = dev->base;
+ dev->dbg_size = dev->base_size;
+
+ aac_adapter_enable_int(dev);
+
+ /*
+ * Tell the adapter that all is configure, and it can start
+ * accepting requests
+ */
+ aac_sa_start_adapter(dev);
+ return 0;
+
+error_irq:
+ aac_sa_disable_interrupt(dev);
+ free_irq(dev->pdev->irq, (void *)dev);
+
+error_iounmap:
+
+ return -1;
+}
+
diff --git a/drivers/scsi/aacraid/src.c b/drivers/scsi/aacraid/src.c
new file mode 100644
index 000000000..4596e9dd7
--- /dev/null
+++ b/drivers/scsi/aacraid/src.c
@@ -0,0 +1,1063 @@
+/*
+ * Adaptec AAC series RAID controller driver
+ * (c) Copyright 2001 Red Hat Inc.
+ *
+ * based on the old aacraid driver that is..
+ * Adaptec aacraid device driver for Linux.
+ *
+ * Copyright (c) 2000-2010 Adaptec, Inc.
+ * 2010 PMC-Sierra, Inc. (aacraid@pmc-sierra.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2, or (at your option)
+ * any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; see the file COPYING. If not, write to
+ * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
+ *
+ * Module Name:
+ * src.c
+ *
+ * Abstract: Hardware Device Interface for PMC SRC based controllers
+ *
+ */
+
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/types.h>
+#include <linux/pci.h>
+#include <linux/spinlock.h>
+#include <linux/slab.h>
+#include <linux/blkdev.h>
+#include <linux/delay.h>
+#include <linux/completion.h>
+#include <linux/time.h>
+#include <linux/interrupt.h>
+#include <scsi/scsi_host.h>
+
+#include "aacraid.h"
+
+static int aac_src_get_sync_status(struct aac_dev *dev);
+
+irqreturn_t aac_src_intr_message(int irq, void *dev_id)
+{
+ struct aac_msix_ctx *ctx;
+ struct aac_dev *dev;
+ unsigned long bellbits, bellbits_shifted;
+ int vector_no;
+ int isFastResponse, mode;
+ u32 index, handle;
+
+ ctx = (struct aac_msix_ctx *)dev_id;
+ dev = ctx->dev;
+ vector_no = ctx->vector_no;
+
+ if (dev->msi_enabled) {
+ mode = AAC_INT_MODE_MSI;
+ if (vector_no == 0) {
+ bellbits = src_readl(dev, MUnit.ODR_MSI);
+ if (bellbits & 0x40000)
+ mode |= AAC_INT_MODE_AIF;
+ if (bellbits & 0x1000)
+ mode |= AAC_INT_MODE_SYNC;
+ }
+ } else {
+ mode = AAC_INT_MODE_INTX;
+ bellbits = src_readl(dev, MUnit.ODR_R);
+ if (bellbits & PmDoorBellResponseSent) {
+ bellbits = PmDoorBellResponseSent;
+ src_writel(dev, MUnit.ODR_C, bellbits);
+ src_readl(dev, MUnit.ODR_C);
+ } else {
+ bellbits_shifted = (bellbits >> SRC_ODR_SHIFT);
+ src_writel(dev, MUnit.ODR_C, bellbits);
+ src_readl(dev, MUnit.ODR_C);
+
+ if (bellbits_shifted & DoorBellAifPending)
+ mode |= AAC_INT_MODE_AIF;
+ else if (bellbits_shifted & OUTBOUNDDOORBELL_0)
+ mode |= AAC_INT_MODE_SYNC;
+ }
+ }
+
+ if (mode & AAC_INT_MODE_SYNC) {
+ unsigned long sflags;
+ struct list_head *entry;
+ int send_it = 0;
+ extern int aac_sync_mode;
+
+ if (!aac_sync_mode && !dev->msi_enabled) {
+ src_writel(dev, MUnit.ODR_C, bellbits);
+ src_readl(dev, MUnit.ODR_C);
+ }
+
+ if (dev->sync_fib) {
+ if (dev->sync_fib->callback)
+ dev->sync_fib->callback(dev->sync_fib->callback_data,
+ dev->sync_fib);
+ spin_lock_irqsave(&dev->sync_fib->event_lock, sflags);
+ if (dev->sync_fib->flags & FIB_CONTEXT_FLAG_WAIT) {
+ dev->management_fib_count--;
+ up(&dev->sync_fib->event_wait);
+ }
+ spin_unlock_irqrestore(&dev->sync_fib->event_lock,
+ sflags);
+ spin_lock_irqsave(&dev->sync_lock, sflags);
+ if (!list_empty(&dev->sync_fib_list)) {
+ entry = dev->sync_fib_list.next;
+ dev->sync_fib = list_entry(entry,
+ struct fib,
+ fiblink);
+ list_del(entry);
+ send_it = 1;
+ } else {
+ dev->sync_fib = NULL;
+ }
+ spin_unlock_irqrestore(&dev->sync_lock, sflags);
+ if (send_it) {
+ aac_adapter_sync_cmd(dev, SEND_SYNCHRONOUS_FIB,
+ (u32)dev->sync_fib->hw_fib_pa,
+ 0, 0, 0, 0, 0,
+ NULL, NULL, NULL, NULL, NULL);
+ }
+ }
+ if (!dev->msi_enabled)
+ mode = 0;
+
+ }
+
+ if (mode & AAC_INT_MODE_AIF) {
+ /* handle AIF */
+ aac_intr_normal(dev, 0, 2, 0, NULL);
+ if (dev->msi_enabled)
+ aac_src_access_devreg(dev, AAC_CLEAR_AIF_BIT);
+ mode = 0;
+ }
+
+ if (mode) {
+ index = dev->host_rrq_idx[vector_no];
+
+ for (;;) {
+ isFastResponse = 0;
+ /* remove toggle bit (31) */
+ handle = (dev->host_rrq[index] & 0x7fffffff);
+ /* check fast response bit (30) */
+ if (handle & 0x40000000)
+ isFastResponse = 1;
+ handle &= 0x0000ffff;
+ if (handle == 0)
+ break;
+ if (dev->msi_enabled && dev->max_msix > 1)
+ atomic_dec(&dev->rrq_outstanding[vector_no]);
+ aac_intr_normal(dev, handle-1, 0, isFastResponse, NULL);
+ dev->host_rrq[index++] = 0;
+ if (index == (vector_no + 1) * dev->vector_cap)
+ index = vector_no * dev->vector_cap;
+ dev->host_rrq_idx[vector_no] = index;
+ }
+ mode = 0;
+ }
+
+ return IRQ_HANDLED;
+}
+
+/**
+ * aac_src_disable_interrupt - Disable interrupts
+ * @dev: Adapter
+ */
+
+static void aac_src_disable_interrupt(struct aac_dev *dev)
+{
+ src_writel(dev, MUnit.OIMR, dev->OIMR = 0xffffffff);
+}
+
+/**
+ * aac_src_enable_interrupt_message - Enable interrupts
+ * @dev: Adapter
+ */
+
+static void aac_src_enable_interrupt_message(struct aac_dev *dev)
+{
+ aac_src_access_devreg(dev, AAC_ENABLE_INTERRUPT);
+}
+
+/**
+ * src_sync_cmd - send a command and wait
+ * @dev: Adapter
+ * @command: Command to execute
+ * @p1: first parameter
+ * @ret: adapter status
+ *
+ * This routine will send a synchronous command to the adapter and wait
+ * for its completion.
+ */
+
+static int src_sync_cmd(struct aac_dev *dev, u32 command,
+ u32 p1, u32 p2, u32 p3, u32 p4, u32 p5, u32 p6,
+ u32 *status, u32 * r1, u32 * r2, u32 * r3, u32 * r4)
+{
+ unsigned long start;
+ unsigned long delay;
+ int ok;
+
+ /*
+ * Write the command into Mailbox 0
+ */
+ writel(command, &dev->IndexRegs->Mailbox[0]);
+ /*
+ * Write the parameters into Mailboxes 1 - 6
+ */
+ writel(p1, &dev->IndexRegs->Mailbox[1]);
+ writel(p2, &dev->IndexRegs->Mailbox[2]);
+ writel(p3, &dev->IndexRegs->Mailbox[3]);
+ writel(p4, &dev->IndexRegs->Mailbox[4]);
+
+ /*
+ * Clear the synch command doorbell to start on a clean slate.
+ */
+ if (!dev->msi_enabled)
+ src_writel(dev,
+ MUnit.ODR_C,
+ OUTBOUNDDOORBELL_0 << SRC_ODR_SHIFT);
+
+ /*
+ * Disable doorbell interrupts
+ */
+ src_writel(dev, MUnit.OIMR, dev->OIMR = 0xffffffff);
+
+ /*
+ * Force the completion of the mask register write before issuing
+ * the interrupt.
+ */
+ src_readl(dev, MUnit.OIMR);
+
+ /*
+ * Signal that there is a new synch command
+ */
+ src_writel(dev, MUnit.IDR, INBOUNDDOORBELL_0 << SRC_IDR_SHIFT);
+
+ if (!dev->sync_mode || command != SEND_SYNCHRONOUS_FIB) {
+ ok = 0;
+ start = jiffies;
+
+ if (command == IOP_RESET_ALWAYS) {
+ /* Wait up to 10 sec */
+ delay = 10*HZ;
+ } else {
+ /* Wait up to 5 minutes */
+ delay = 300*HZ;
+ }
+ while (time_before(jiffies, start+delay)) {
+ udelay(5); /* Delay 5 microseconds to let Mon960 get info. */
+ /*
+ * Mon960 will set doorbell0 bit when it has completed the command.
+ */
+ if (aac_src_get_sync_status(dev) & OUTBOUNDDOORBELL_0) {
+ /*
+ * Clear the doorbell.
+ */
+ if (dev->msi_enabled)
+ aac_src_access_devreg(dev,
+ AAC_CLEAR_SYNC_BIT);
+ else
+ src_writel(dev,
+ MUnit.ODR_C,
+ OUTBOUNDDOORBELL_0 << SRC_ODR_SHIFT);
+ ok = 1;
+ break;
+ }
+ /*
+ * Yield the processor in case we are slow
+ */
+ msleep(1);
+ }
+ if (unlikely(ok != 1)) {
+ /*
+ * Restore interrupt mask even though we timed out
+ */
+ aac_adapter_enable_int(dev);
+ return -ETIMEDOUT;
+ }
+ /*
+ * Pull the synch status from Mailbox 0.
+ */
+ if (status)
+ *status = readl(&dev->IndexRegs->Mailbox[0]);
+ if (r1)
+ *r1 = readl(&dev->IndexRegs->Mailbox[1]);
+ if (r2)
+ *r2 = readl(&dev->IndexRegs->Mailbox[2]);
+ if (r3)
+ *r3 = readl(&dev->IndexRegs->Mailbox[3]);
+ if (r4)
+ *r4 = readl(&dev->IndexRegs->Mailbox[4]);
+ if (command == GET_COMM_PREFERRED_SETTINGS)
+ dev->max_msix =
+ readl(&dev->IndexRegs->Mailbox[5]) & 0xFFFF;
+ /*
+ * Clear the synch command doorbell.
+ */
+ if (!dev->msi_enabled)
+ src_writel(dev,
+ MUnit.ODR_C,
+ OUTBOUNDDOORBELL_0 << SRC_ODR_SHIFT);
+ }
+
+ /*
+ * Restore interrupt mask
+ */
+ aac_adapter_enable_int(dev);
+ return 0;
+}
+
+/**
+ * aac_src_interrupt_adapter - interrupt adapter
+ * @dev: Adapter
+ *
+ * Send an interrupt to the i960 and breakpoint it.
+ */
+
+static void aac_src_interrupt_adapter(struct aac_dev *dev)
+{
+ src_sync_cmd(dev, BREAKPOINT_REQUEST,
+ 0, 0, 0, 0, 0, 0,
+ NULL, NULL, NULL, NULL, NULL);
+}
+
+/**
+ * aac_src_notify_adapter - send an event to the adapter
+ * @dev: Adapter
+ * @event: Event to send
+ *
+ * Notify the i960 that something it probably cares about has
+ * happened.
+ */
+
+static void aac_src_notify_adapter(struct aac_dev *dev, u32 event)
+{
+ switch (event) {
+
+ case AdapNormCmdQue:
+ src_writel(dev, MUnit.ODR_C,
+ INBOUNDDOORBELL_1 << SRC_ODR_SHIFT);
+ break;
+ case HostNormRespNotFull:
+ src_writel(dev, MUnit.ODR_C,
+ INBOUNDDOORBELL_4 << SRC_ODR_SHIFT);
+ break;
+ case AdapNormRespQue:
+ src_writel(dev, MUnit.ODR_C,
+ INBOUNDDOORBELL_2 << SRC_ODR_SHIFT);
+ break;
+ case HostNormCmdNotFull:
+ src_writel(dev, MUnit.ODR_C,
+ INBOUNDDOORBELL_3 << SRC_ODR_SHIFT);
+ break;
+ case FastIo:
+ src_writel(dev, MUnit.ODR_C,
+ INBOUNDDOORBELL_6 << SRC_ODR_SHIFT);
+ break;
+ case AdapPrintfDone:
+ src_writel(dev, MUnit.ODR_C,
+ INBOUNDDOORBELL_5 << SRC_ODR_SHIFT);
+ break;
+ default:
+ BUG();
+ break;
+ }
+}
+
+/**
+ * aac_src_start_adapter - activate adapter
+ * @dev: Adapter
+ *
+ * Start up processing on an i960 based AAC adapter
+ */
+
+static void aac_src_start_adapter(struct aac_dev *dev)
+{
+ struct aac_init *init;
+ int i;
+
+ /* reset host_rrq_idx first */
+ for (i = 0; i < dev->max_msix; i++) {
+ dev->host_rrq_idx[i] = i * dev->vector_cap;
+ atomic_set(&dev->rrq_outstanding[i], 0);
+ }
+ dev->fibs_pushed_no = 0;
+
+ init = dev->init;
+ init->HostElapsedSeconds = cpu_to_le32(get_seconds());
+
+ /* We can only use a 32 bit address here */
+ src_sync_cmd(dev, INIT_STRUCT_BASE_ADDRESS, (u32)(ulong)dev->init_pa,
+ 0, 0, 0, 0, 0, NULL, NULL, NULL, NULL, NULL);
+}
+
+/**
+ * aac_src_check_health
+ * @dev: device to check if healthy
+ *
+ * Will attempt to determine if the specified adapter is alive and
+ * capable of handling requests, returning 0 if alive.
+ */
+static int aac_src_check_health(struct aac_dev *dev)
+{
+ u32 status = src_readl(dev, MUnit.OMR);
+
+ /*
+ * Check to see if the board failed any self tests.
+ */
+ if (unlikely(status & SELF_TEST_FAILED))
+ return -1;
+
+ /*
+ * Check to see if the board panic'd.
+ */
+ if (unlikely(status & KERNEL_PANIC))
+ return (status >> 16) & 0xFF;
+ /*
+ * Wait for the adapter to be up and running.
+ */
+ if (unlikely(!(status & KERNEL_UP_AND_RUNNING)))
+ return -3;
+ /*
+ * Everything is OK
+ */
+ return 0;
+}
+
+/**
+ * aac_src_deliver_message
+ * @fib: fib to issue
+ *
+ * Will send a fib, returning 0 if successful.
+ */
+static int aac_src_deliver_message(struct fib *fib)
+{
+ struct aac_dev *dev = fib->dev;
+ struct aac_queue *q = &dev->queues->queue[AdapNormCmdQueue];
+ u32 fibsize;
+ dma_addr_t address;
+ struct aac_fib_xporthdr *pFibX;
+ u16 hdr_size = le16_to_cpu(fib->hw_fib_va->header.Size);
+
+ atomic_inc(&q->numpending);
+
+ if (dev->msi_enabled && fib->hw_fib_va->header.Command != AifRequest &&
+ dev->max_msix > 1) {
+ u_int16_t vector_no, first_choice = 0xffff;
+
+ vector_no = dev->fibs_pushed_no % dev->max_msix;
+ do {
+ vector_no += 1;
+ if (vector_no == dev->max_msix)
+ vector_no = 1;
+ if (atomic_read(&dev->rrq_outstanding[vector_no]) <
+ dev->vector_cap)
+ break;
+ if (0xffff == first_choice)
+ first_choice = vector_no;
+ else if (vector_no == first_choice)
+ break;
+ } while (1);
+ if (vector_no == first_choice)
+ vector_no = 0;
+ atomic_inc(&dev->rrq_outstanding[vector_no]);
+ if (dev->fibs_pushed_no == 0xffffffff)
+ dev->fibs_pushed_no = 0;
+ else
+ dev->fibs_pushed_no++;
+ fib->hw_fib_va->header.Handle += (vector_no << 16);
+ }
+
+ if (dev->comm_interface == AAC_COMM_MESSAGE_TYPE2) {
+ /* Calculate the amount to the fibsize bits */
+ fibsize = (hdr_size + 127) / 128 - 1;
+ if (fibsize > (ALIGN32 - 1))
+ return -EMSGSIZE;
+ /* New FIB header, 32-bit */
+ address = fib->hw_fib_pa;
+ fib->hw_fib_va->header.StructType = FIB_MAGIC2;
+ fib->hw_fib_va->header.SenderFibAddress = (u32)address;
+ fib->hw_fib_va->header.u.TimeStamp = 0;
+ BUG_ON(upper_32_bits(address) != 0L);
+ address |= fibsize;
+ } else {
+ /* Calculate the amount to the fibsize bits */
+ fibsize = (sizeof(struct aac_fib_xporthdr) + hdr_size + 127) / 128 - 1;
+ if (fibsize > (ALIGN32 - 1))
+ return -EMSGSIZE;
+
+ /* Fill XPORT header */
+ pFibX = (void *)fib->hw_fib_va - sizeof(struct aac_fib_xporthdr);
+ pFibX->Handle = cpu_to_le32(fib->hw_fib_va->header.Handle);
+ pFibX->HostAddress = cpu_to_le64(fib->hw_fib_pa);
+ pFibX->Size = cpu_to_le32(hdr_size);
+
+ /*
+ * The xport header has been 32-byte aligned for us so that fibsize
+ * can be masked out of this address by hardware. -- BenC
+ */
+ address = fib->hw_fib_pa - sizeof(struct aac_fib_xporthdr);
+ if (address & (ALIGN32 - 1))
+ return -EINVAL;
+ address |= fibsize;
+ }
+
+ src_writel(dev, MUnit.IQ_H, upper_32_bits(address) & 0xffffffff);
+ src_writel(dev, MUnit.IQ_L, address & 0xffffffff);
+
+ return 0;
+}
+
+/**
+ * aac_src_ioremap
+ * @size: mapping resize request
+ *
+ */
+static int aac_src_ioremap(struct aac_dev *dev, u32 size)
+{
+ if (!size) {
+ iounmap(dev->regs.src.bar1);
+ dev->regs.src.bar1 = NULL;
+ iounmap(dev->regs.src.bar0);
+ dev->base = dev->regs.src.bar0 = NULL;
+ return 0;
+ }
+ dev->regs.src.bar1 = ioremap(pci_resource_start(dev->pdev, 2),
+ AAC_MIN_SRC_BAR1_SIZE);
+ dev->base = NULL;
+ if (dev->regs.src.bar1 == NULL)
+ return -1;
+ dev->base = dev->regs.src.bar0 = ioremap(dev->base_start, size);
+ if (dev->base == NULL) {
+ iounmap(dev->regs.src.bar1);
+ dev->regs.src.bar1 = NULL;
+ return -1;
+ }
+ dev->IndexRegs = &((struct src_registers __iomem *)
+ dev->base)->u.tupelo.IndexRegs;
+ return 0;
+}
+
+/**
+ * aac_srcv_ioremap
+ * @size: mapping resize request
+ *
+ */
+static int aac_srcv_ioremap(struct aac_dev *dev, u32 size)
+{
+ if (!size) {
+ iounmap(dev->regs.src.bar0);
+ dev->base = dev->regs.src.bar0 = NULL;
+ return 0;
+ }
+ dev->base = dev->regs.src.bar0 = ioremap(dev->base_start, size);
+ if (dev->base == NULL)
+ return -1;
+ dev->IndexRegs = &((struct src_registers __iomem *)
+ dev->base)->u.denali.IndexRegs;
+ return 0;
+}
+
+static int aac_src_restart_adapter(struct aac_dev *dev, int bled)
+{
+ u32 var, reset_mask;
+
+ if (bled >= 0) {
+ if (bled)
+ printk(KERN_ERR "%s%d: adapter kernel panic'd %x.\n",
+ dev->name, dev->id, bled);
+ dev->a_ops.adapter_enable_int = aac_src_disable_interrupt;
+ bled = aac_adapter_sync_cmd(dev, IOP_RESET_ALWAYS,
+ 0, 0, 0, 0, 0, 0, &var, &reset_mask, NULL, NULL, NULL);
+ if ((bled || (var != 0x00000001)) &&
+ !dev->doorbell_mask)
+ return -EINVAL;
+ else if (dev->doorbell_mask) {
+ reset_mask = dev->doorbell_mask;
+ bled = 0;
+ var = 0x00000001;
+ }
+
+ if ((dev->pdev->device == PMC_DEVICE_S7 ||
+ dev->pdev->device == PMC_DEVICE_S8 ||
+ dev->pdev->device == PMC_DEVICE_S9) && dev->msi_enabled) {
+ aac_src_access_devreg(dev, AAC_ENABLE_INTX);
+ dev->msi_enabled = 0;
+ msleep(5000); /* Delay 5 seconds */
+ }
+
+ if (!bled && (dev->supplement_adapter_info.SupportedOptions2 &
+ AAC_OPTION_DOORBELL_RESET)) {
+ src_writel(dev, MUnit.IDR, reset_mask);
+ ssleep(45);
+ } else {
+ src_writel(dev, MUnit.IDR, 0x100);
+ ssleep(45);
+ }
+ }
+
+ if (src_readl(dev, MUnit.OMR) & KERNEL_PANIC)
+ return -ENODEV;
+
+ if (startup_timeout < 300)
+ startup_timeout = 300;
+
+ return 0;
+}
+
+/**
+ * aac_src_select_comm - Select communications method
+ * @dev: Adapter
+ * @comm: communications method
+ */
+int aac_src_select_comm(struct aac_dev *dev, int comm)
+{
+ switch (comm) {
+ case AAC_COMM_MESSAGE:
+ dev->a_ops.adapter_intr = aac_src_intr_message;
+ dev->a_ops.adapter_deliver = aac_src_deliver_message;
+ break;
+ default:
+ return 1;
+ }
+ return 0;
+}
+
+/**
+ * aac_src_init - initialize an Cardinal Frey Bar card
+ * @dev: device to configure
+ *
+ */
+
+int aac_src_init(struct aac_dev *dev)
+{
+ unsigned long start;
+ unsigned long status;
+ int restart = 0;
+ int instance = dev->id;
+ const char *name = dev->name;
+
+ dev->a_ops.adapter_ioremap = aac_src_ioremap;
+ dev->a_ops.adapter_comm = aac_src_select_comm;
+
+ dev->base_size = AAC_MIN_SRC_BAR0_SIZE;
+ if (aac_adapter_ioremap(dev, dev->base_size)) {
+ printk(KERN_WARNING "%s: unable to map adapter.\n", name);
+ goto error_iounmap;
+ }
+
+ /* Failure to reset here is an option ... */
+ dev->a_ops.adapter_sync_cmd = src_sync_cmd;
+ dev->a_ops.adapter_enable_int = aac_src_disable_interrupt;
+ if ((aac_reset_devices || reset_devices) &&
+ !aac_src_restart_adapter(dev, 0))
+ ++restart;
+ /*
+ * Check to see if the board panic'd while booting.
+ */
+ status = src_readl(dev, MUnit.OMR);
+ if (status & KERNEL_PANIC) {
+ if (aac_src_restart_adapter(dev, aac_src_check_health(dev)))
+ goto error_iounmap;
+ ++restart;
+ }
+ /*
+ * Check to see if the board failed any self tests.
+ */
+ status = src_readl(dev, MUnit.OMR);
+ if (status & SELF_TEST_FAILED) {
+ printk(KERN_ERR "%s%d: adapter self-test failed.\n",
+ dev->name, instance);
+ goto error_iounmap;
+ }
+ /*
+ * Check to see if the monitor panic'd while booting.
+ */
+ if (status & MONITOR_PANIC) {
+ printk(KERN_ERR "%s%d: adapter monitor panic.\n",
+ dev->name, instance);
+ goto error_iounmap;
+ }
+ start = jiffies;
+ /*
+ * Wait for the adapter to be up and running. Wait up to 3 minutes
+ */
+ while (!((status = src_readl(dev, MUnit.OMR)) &
+ KERNEL_UP_AND_RUNNING)) {
+ if ((restart &&
+ (status & (KERNEL_PANIC|SELF_TEST_FAILED|MONITOR_PANIC))) ||
+ time_after(jiffies, start+HZ*startup_timeout)) {
+ printk(KERN_ERR "%s%d: adapter kernel failed to start, init status = %lx.\n",
+ dev->name, instance, status);
+ goto error_iounmap;
+ }
+ if (!restart &&
+ ((status & (KERNEL_PANIC|SELF_TEST_FAILED|MONITOR_PANIC)) ||
+ time_after(jiffies, start + HZ *
+ ((startup_timeout > 60)
+ ? (startup_timeout - 60)
+ : (startup_timeout / 2))))) {
+ if (likely(!aac_src_restart_adapter(dev,
+ aac_src_check_health(dev))))
+ start = jiffies;
+ ++restart;
+ }
+ msleep(1);
+ }
+ if (restart && aac_commit)
+ aac_commit = 1;
+ /*
+ * Fill in the common function dispatch table.
+ */
+ dev->a_ops.adapter_interrupt = aac_src_interrupt_adapter;
+ dev->a_ops.adapter_disable_int = aac_src_disable_interrupt;
+ dev->a_ops.adapter_enable_int = aac_src_disable_interrupt;
+ dev->a_ops.adapter_notify = aac_src_notify_adapter;
+ dev->a_ops.adapter_sync_cmd = src_sync_cmd;
+ dev->a_ops.adapter_check_health = aac_src_check_health;
+ dev->a_ops.adapter_restart = aac_src_restart_adapter;
+
+ /*
+ * First clear out all interrupts. Then enable the one's that we
+ * can handle.
+ */
+ aac_adapter_comm(dev, AAC_COMM_MESSAGE);
+ aac_adapter_disable_int(dev);
+ src_writel(dev, MUnit.ODR_C, 0xffffffff);
+ aac_adapter_enable_int(dev);
+
+ if (aac_init_adapter(dev) == NULL)
+ goto error_iounmap;
+ if (dev->comm_interface != AAC_COMM_MESSAGE_TYPE1)
+ goto error_iounmap;
+
+ dev->msi = aac_msi && !pci_enable_msi(dev->pdev);
+
+ dev->aac_msix[0].vector_no = 0;
+ dev->aac_msix[0].dev = dev;
+
+ if (request_irq(dev->pdev->irq, dev->a_ops.adapter_intr,
+ IRQF_SHARED, "aacraid", &(dev->aac_msix[0])) < 0) {
+
+ if (dev->msi)
+ pci_disable_msi(dev->pdev);
+
+ printk(KERN_ERR "%s%d: Interrupt unavailable.\n",
+ name, instance);
+ goto error_iounmap;
+ }
+ dev->dbg_base = pci_resource_start(dev->pdev, 2);
+ dev->dbg_base_mapped = dev->regs.src.bar1;
+ dev->dbg_size = AAC_MIN_SRC_BAR1_SIZE;
+ dev->a_ops.adapter_enable_int = aac_src_enable_interrupt_message;
+
+ aac_adapter_enable_int(dev);
+
+ if (!dev->sync_mode) {
+ /*
+ * Tell the adapter that all is configured, and it can
+ * start accepting requests
+ */
+ aac_src_start_adapter(dev);
+ }
+ return 0;
+
+error_iounmap:
+
+ return -1;
+}
+
+/**
+ * aac_srcv_init - initialize an SRCv card
+ * @dev: device to configure
+ *
+ */
+
+int aac_srcv_init(struct aac_dev *dev)
+{
+ unsigned long start;
+ unsigned long status;
+ int restart = 0;
+ int instance = dev->id;
+ int i, j;
+ const char *name = dev->name;
+ int cpu;
+
+ dev->a_ops.adapter_ioremap = aac_srcv_ioremap;
+ dev->a_ops.adapter_comm = aac_src_select_comm;
+
+ dev->base_size = AAC_MIN_SRCV_BAR0_SIZE;
+ if (aac_adapter_ioremap(dev, dev->base_size)) {
+ printk(KERN_WARNING "%s: unable to map adapter.\n", name);
+ goto error_iounmap;
+ }
+
+ /* Failure to reset here is an option ... */
+ dev->a_ops.adapter_sync_cmd = src_sync_cmd;
+ dev->a_ops.adapter_enable_int = aac_src_disable_interrupt;
+ if ((aac_reset_devices || reset_devices) &&
+ !aac_src_restart_adapter(dev, 0))
+ ++restart;
+ /*
+ * Check to see if flash update is running.
+ * Wait for the adapter to be up and running. Wait up to 5 minutes
+ */
+ status = src_readl(dev, MUnit.OMR);
+ if (status & FLASH_UPD_PENDING) {
+ start = jiffies;
+ do {
+ status = src_readl(dev, MUnit.OMR);
+ if (time_after(jiffies, start+HZ*FWUPD_TIMEOUT)) {
+ printk(KERN_ERR "%s%d: adapter flash update failed.\n",
+ dev->name, instance);
+ goto error_iounmap;
+ }
+ } while (!(status & FLASH_UPD_SUCCESS) &&
+ !(status & FLASH_UPD_FAILED));
+ /* Delay 10 seconds.
+ * Because right now FW is doing a soft reset,
+ * do not read scratch pad register at this time
+ */
+ ssleep(10);
+ }
+ /*
+ * Check to see if the board panic'd while booting.
+ */
+ status = src_readl(dev, MUnit.OMR);
+ if (status & KERNEL_PANIC) {
+ if (aac_src_restart_adapter(dev, aac_src_check_health(dev)))
+ goto error_iounmap;
+ ++restart;
+ }
+ /*
+ * Check to see if the board failed any self tests.
+ */
+ status = src_readl(dev, MUnit.OMR);
+ if (status & SELF_TEST_FAILED) {
+ printk(KERN_ERR "%s%d: adapter self-test failed.\n", dev->name, instance);
+ goto error_iounmap;
+ }
+ /*
+ * Check to see if the monitor panic'd while booting.
+ */
+ if (status & MONITOR_PANIC) {
+ printk(KERN_ERR "%s%d: adapter monitor panic.\n", dev->name, instance);
+ goto error_iounmap;
+ }
+ start = jiffies;
+ /*
+ * Wait for the adapter to be up and running. Wait up to 3 minutes
+ */
+ while (!((status = src_readl(dev, MUnit.OMR)) &
+ KERNEL_UP_AND_RUNNING) ||
+ status == 0xffffffff) {
+ if ((restart &&
+ (status & (KERNEL_PANIC|SELF_TEST_FAILED|MONITOR_PANIC))) ||
+ time_after(jiffies, start+HZ*startup_timeout)) {
+ printk(KERN_ERR "%s%d: adapter kernel failed to start, init status = %lx.\n",
+ dev->name, instance, status);
+ goto error_iounmap;
+ }
+ if (!restart &&
+ ((status & (KERNEL_PANIC|SELF_TEST_FAILED|MONITOR_PANIC)) ||
+ time_after(jiffies, start + HZ *
+ ((startup_timeout > 60)
+ ? (startup_timeout - 60)
+ : (startup_timeout / 2))))) {
+ if (likely(!aac_src_restart_adapter(dev, aac_src_check_health(dev))))
+ start = jiffies;
+ ++restart;
+ }
+ msleep(1);
+ }
+ if (restart && aac_commit)
+ aac_commit = 1;
+ /*
+ * Fill in the common function dispatch table.
+ */
+ dev->a_ops.adapter_interrupt = aac_src_interrupt_adapter;
+ dev->a_ops.adapter_disable_int = aac_src_disable_interrupt;
+ dev->a_ops.adapter_enable_int = aac_src_disable_interrupt;
+ dev->a_ops.adapter_notify = aac_src_notify_adapter;
+ dev->a_ops.adapter_sync_cmd = src_sync_cmd;
+ dev->a_ops.adapter_check_health = aac_src_check_health;
+ dev->a_ops.adapter_restart = aac_src_restart_adapter;
+
+ /*
+ * First clear out all interrupts. Then enable the one's that we
+ * can handle.
+ */
+ aac_adapter_comm(dev, AAC_COMM_MESSAGE);
+ aac_adapter_disable_int(dev);
+ src_writel(dev, MUnit.ODR_C, 0xffffffff);
+ aac_adapter_enable_int(dev);
+
+ if (aac_init_adapter(dev) == NULL)
+ goto error_iounmap;
+ if (dev->comm_interface != AAC_COMM_MESSAGE_TYPE2)
+ goto error_iounmap;
+ if (dev->msi_enabled)
+ aac_src_access_devreg(dev, AAC_ENABLE_MSIX);
+ if (!dev->sync_mode && dev->msi_enabled && dev->max_msix > 1) {
+ cpu = cpumask_first(cpu_online_mask);
+ for (i = 0; i < dev->max_msix; i++) {
+ dev->aac_msix[i].vector_no = i;
+ dev->aac_msix[i].dev = dev;
+
+ if (request_irq(dev->msixentry[i].vector,
+ dev->a_ops.adapter_intr,
+ 0,
+ "aacraid",
+ &(dev->aac_msix[i]))) {
+ printk(KERN_ERR "%s%d: Failed to register IRQ for vector %d.\n",
+ name, instance, i);
+ for (j = 0 ; j < i ; j++)
+ free_irq(dev->msixentry[j].vector,
+ &(dev->aac_msix[j]));
+ pci_disable_msix(dev->pdev);
+ goto error_iounmap;
+ }
+ if (irq_set_affinity_hint(
+ dev->msixentry[i].vector,
+ get_cpu_mask(cpu))) {
+ printk(KERN_ERR "%s%d: Failed to set IRQ affinity for cpu %d\n",
+ name, instance, cpu);
+ }
+ cpu = cpumask_next(cpu, cpu_online_mask);
+ }
+ } else {
+ dev->aac_msix[0].vector_no = 0;
+ dev->aac_msix[0].dev = dev;
+
+ if (request_irq(dev->pdev->irq, dev->a_ops.adapter_intr,
+ IRQF_SHARED,
+ "aacraid",
+ &(dev->aac_msix[0])) < 0) {
+ if (dev->msi)
+ pci_disable_msi(dev->pdev);
+ printk(KERN_ERR "%s%d: Interrupt unavailable.\n",
+ name, instance);
+ goto error_iounmap;
+ }
+ }
+ dev->dbg_base = dev->base_start;
+ dev->dbg_base_mapped = dev->base;
+ dev->dbg_size = dev->base_size;
+ dev->a_ops.adapter_enable_int = aac_src_enable_interrupt_message;
+
+ aac_adapter_enable_int(dev);
+
+ if (!dev->sync_mode) {
+ /*
+ * Tell the adapter that all is configured, and it can
+ * start accepting requests
+ */
+ aac_src_start_adapter(dev);
+ }
+ return 0;
+
+error_iounmap:
+
+ return -1;
+}
+
+void aac_src_access_devreg(struct aac_dev *dev, int mode)
+{
+ u_int32_t val;
+
+ switch (mode) {
+ case AAC_ENABLE_INTERRUPT:
+ src_writel(dev,
+ MUnit.OIMR,
+ dev->OIMR = (dev->msi_enabled ?
+ AAC_INT_ENABLE_TYPE1_MSIX :
+ AAC_INT_ENABLE_TYPE1_INTX));
+ break;
+
+ case AAC_DISABLE_INTERRUPT:
+ src_writel(dev,
+ MUnit.OIMR,
+ dev->OIMR = AAC_INT_DISABLE_ALL);
+ break;
+
+ case AAC_ENABLE_MSIX:
+ /* set bit 6 */
+ val = src_readl(dev, MUnit.IDR);
+ val |= 0x40;
+ src_writel(dev, MUnit.IDR, val);
+ src_readl(dev, MUnit.IDR);
+ /* unmask int. */
+ val = PMC_ALL_INTERRUPT_BITS;
+ src_writel(dev, MUnit.IOAR, val);
+ val = src_readl(dev, MUnit.OIMR);
+ src_writel(dev,
+ MUnit.OIMR,
+ val & (~(PMC_GLOBAL_INT_BIT2 | PMC_GLOBAL_INT_BIT0)));
+ break;
+
+ case AAC_DISABLE_MSIX:
+ /* reset bit 6 */
+ val = src_readl(dev, MUnit.IDR);
+ val &= ~0x40;
+ src_writel(dev, MUnit.IDR, val);
+ src_readl(dev, MUnit.IDR);
+ break;
+
+ case AAC_CLEAR_AIF_BIT:
+ /* set bit 5 */
+ val = src_readl(dev, MUnit.IDR);
+ val |= 0x20;
+ src_writel(dev, MUnit.IDR, val);
+ src_readl(dev, MUnit.IDR);
+ break;
+
+ case AAC_CLEAR_SYNC_BIT:
+ /* set bit 4 */
+ val = src_readl(dev, MUnit.IDR);
+ val |= 0x10;
+ src_writel(dev, MUnit.IDR, val);
+ src_readl(dev, MUnit.IDR);
+ break;
+
+ case AAC_ENABLE_INTX:
+ /* set bit 7 */
+ val = src_readl(dev, MUnit.IDR);
+ val |= 0x80;
+ src_writel(dev, MUnit.IDR, val);
+ src_readl(dev, MUnit.IDR);
+ /* unmask int. */
+ val = PMC_ALL_INTERRUPT_BITS;
+ src_writel(dev, MUnit.IOAR, val);
+ src_readl(dev, MUnit.IOAR);
+ val = src_readl(dev, MUnit.OIMR);
+ src_writel(dev, MUnit.OIMR,
+ val & (~(PMC_GLOBAL_INT_BIT2)));
+ break;
+
+ default:
+ break;
+ }
+}
+
+static int aac_src_get_sync_status(struct aac_dev *dev)
+{
+
+ int val;
+
+ if (dev->msi_enabled)
+ val = src_readl(dev, MUnit.ODR_MSI) & 0x1000 ? 1 : 0;
+ else
+ val = src_readl(dev, MUnit.ODR_R) >> SRC_ODR_SHIFT;
+
+ return val;
+}
diff --git a/drivers/scsi/advansys.c b/drivers/scsi/advansys.c
new file mode 100644
index 000000000..68d2320db
--- /dev/null
+++ b/drivers/scsi/advansys.c
@@ -0,0 +1,12319 @@
+#define DRV_NAME "advansys"
+#define ASC_VERSION "3.4" /* AdvanSys Driver Version */
+
+/*
+ * advansys.c - Linux Host Driver for AdvanSys SCSI Adapters
+ *
+ * Copyright (c) 1995-2000 Advanced System Products, Inc.
+ * Copyright (c) 2000-2001 ConnectCom Solutions, Inc.
+ * Copyright (c) 2007 Matthew Wilcox <matthew@wil.cx>
+ * All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+/*
+ * As of March 8, 2000 Advanced System Products, Inc. (AdvanSys)
+ * changed its name to ConnectCom Solutions, Inc.
+ * On June 18, 2001 Initio Corp. acquired ConnectCom's SCSI assets
+ */
+
+#include <linux/module.h>
+#include <linux/string.h>
+#include <linux/kernel.h>
+#include <linux/types.h>
+#include <linux/ioport.h>
+#include <linux/interrupt.h>
+#include <linux/delay.h>
+#include <linux/slab.h>
+#include <linux/mm.h>
+#include <linux/proc_fs.h>
+#include <linux/init.h>
+#include <linux/blkdev.h>
+#include <linux/isa.h>
+#include <linux/eisa.h>
+#include <linux/pci.h>
+#include <linux/spinlock.h>
+#include <linux/dma-mapping.h>
+#include <linux/firmware.h>
+
+#include <asm/io.h>
+#include <asm/dma.h>
+
+#include <scsi/scsi_cmnd.h>
+#include <scsi/scsi_device.h>
+#include <scsi/scsi_tcq.h>
+#include <scsi/scsi.h>
+#include <scsi/scsi_host.h>
+
+/* FIXME:
+ *
+ * 1. Although all of the necessary command mapping places have the
+ * appropriate dma_map.. APIs, the driver still processes its internal
+ * queue using bus_to_virt() and virt_to_bus() which are illegal under
+ * the API. The entire queue processing structure will need to be
+ * altered to fix this.
+ * 2. Need to add memory mapping workaround. Test the memory mapping.
+ * If it doesn't work revert to I/O port access. Can a test be done
+ * safely?
+ * 3. Handle an interrupt not working. Keep an interrupt counter in
+ * the interrupt handler. In the timeout function if the interrupt
+ * has not occurred then print a message and run in polled mode.
+ * 4. Need to add support for target mode commands, cf. CAM XPT.
+ * 5. check DMA mapping functions for failure
+ * 6. Use scsi_transport_spi
+ * 7. advansys_info is not safe against multiple simultaneous callers
+ * 8. Add module_param to override ISA/VLB ioport array
+ */
+#warning this driver is still not properly converted to the DMA API
+
+/* Enable driver /proc statistics. */
+#define ADVANSYS_STATS
+
+/* Enable driver tracing. */
+#undef ADVANSYS_DEBUG
+
+/*
+ * Portable Data Types
+ *
+ * Any instance where a 32-bit long or pointer type is assumed
+ * for precision or HW defined structures, the following define
+ * types must be used. In Linux the char, short, and int types
+ * are all consistent at 8, 16, and 32 bits respectively. Pointers
+ * and long types are 64 bits on Alpha and UltraSPARC.
+ */
+#define ASC_PADDR __u32 /* Physical/Bus address data type. */
+#define ASC_VADDR __u32 /* Virtual address data type. */
+#define ASC_DCNT __u32 /* Unsigned Data count type. */
+#define ASC_SDCNT __s32 /* Signed Data count type. */
+
+typedef unsigned char uchar;
+
+#ifndef TRUE
+#define TRUE (1)
+#endif
+#ifndef FALSE
+#define FALSE (0)
+#endif
+
+#define ERR (-1)
+#define UW_ERR (uint)(0xFFFF)
+#define isodd_word(val) ((((uint)val) & (uint)0x0001) != 0)
+
+#define PCI_VENDOR_ID_ASP 0x10cd
+#define PCI_DEVICE_ID_ASP_1200A 0x1100
+#define PCI_DEVICE_ID_ASP_ABP940 0x1200
+#define PCI_DEVICE_ID_ASP_ABP940U 0x1300
+#define PCI_DEVICE_ID_ASP_ABP940UW 0x2300
+#define PCI_DEVICE_ID_38C0800_REV1 0x2500
+#define PCI_DEVICE_ID_38C1600_REV1 0x2700
+
+/*
+ * Enable CC_VERY_LONG_SG_LIST to support up to 64K element SG lists.
+ * The SRB structure will have to be changed and the ASC_SRB2SCSIQ()
+ * macro re-defined to be able to obtain a ASC_SCSI_Q pointer from the
+ * SRB structure.
+ */
+#define CC_VERY_LONG_SG_LIST 0
+#define ASC_SRB2SCSIQ(srb_ptr) (srb_ptr)
+
+#define PortAddr unsigned int /* port address size */
+#define inp(port) inb(port)
+#define outp(port, byte) outb((byte), (port))
+
+#define inpw(port) inw(port)
+#define outpw(port, word) outw((word), (port))
+
+#define ASC_MAX_SG_QUEUE 7
+#define ASC_MAX_SG_LIST 255
+
+#define ASC_CS_TYPE unsigned short
+
+#define ASC_IS_ISA (0x0001)
+#define ASC_IS_ISAPNP (0x0081)
+#define ASC_IS_EISA (0x0002)
+#define ASC_IS_PCI (0x0004)
+#define ASC_IS_PCI_ULTRA (0x0104)
+#define ASC_IS_PCMCIA (0x0008)
+#define ASC_IS_MCA (0x0020)
+#define ASC_IS_VL (0x0040)
+#define ASC_IS_WIDESCSI_16 (0x0100)
+#define ASC_IS_WIDESCSI_32 (0x0200)
+#define ASC_IS_BIG_ENDIAN (0x8000)
+
+#define ASC_CHIP_MIN_VER_VL (0x01)
+#define ASC_CHIP_MAX_VER_VL (0x07)
+#define ASC_CHIP_MIN_VER_PCI (0x09)
+#define ASC_CHIP_MAX_VER_PCI (0x0F)
+#define ASC_CHIP_VER_PCI_BIT (0x08)
+#define ASC_CHIP_MIN_VER_ISA (0x11)
+#define ASC_CHIP_MIN_VER_ISA_PNP (0x21)
+#define ASC_CHIP_MAX_VER_ISA (0x27)
+#define ASC_CHIP_VER_ISA_BIT (0x30)
+#define ASC_CHIP_VER_ISAPNP_BIT (0x20)
+#define ASC_CHIP_VER_ASYN_BUG (0x21)
+#define ASC_CHIP_VER_PCI 0x08
+#define ASC_CHIP_VER_PCI_ULTRA_3150 (ASC_CHIP_VER_PCI | 0x02)
+#define ASC_CHIP_VER_PCI_ULTRA_3050 (ASC_CHIP_VER_PCI | 0x03)
+#define ASC_CHIP_MIN_VER_EISA (0x41)
+#define ASC_CHIP_MAX_VER_EISA (0x47)
+#define ASC_CHIP_VER_EISA_BIT (0x40)
+#define ASC_CHIP_LATEST_VER_EISA ((ASC_CHIP_MIN_VER_EISA - 1) + 3)
+#define ASC_MAX_VL_DMA_COUNT (0x07FFFFFFL)
+#define ASC_MAX_PCI_DMA_COUNT (0xFFFFFFFFL)
+#define ASC_MAX_ISA_DMA_COUNT (0x00FFFFFFL)
+
+#define ASC_SCSI_ID_BITS 3
+#define ASC_SCSI_TIX_TYPE uchar
+#define ASC_ALL_DEVICE_BIT_SET 0xFF
+#define ASC_SCSI_BIT_ID_TYPE uchar
+#define ASC_MAX_TID 7
+#define ASC_MAX_LUN 7
+#define ASC_SCSI_WIDTH_BIT_SET 0xFF
+#define ASC_MAX_SENSE_LEN 32
+#define ASC_MIN_SENSE_LEN 14
+#define ASC_SCSI_RESET_HOLD_TIME_US 60
+
+/*
+ * Narrow boards only support 12-byte commands, while wide boards
+ * extend to 16-byte commands.
+ */
+#define ASC_MAX_CDB_LEN 12
+#define ADV_MAX_CDB_LEN 16
+
+#define MS_SDTR_LEN 0x03
+#define MS_WDTR_LEN 0x02
+
+#define ASC_SG_LIST_PER_Q 7
+#define QS_FREE 0x00
+#define QS_READY 0x01
+#define QS_DISC1 0x02
+#define QS_DISC2 0x04
+#define QS_BUSY 0x08
+#define QS_ABORTED 0x40
+#define QS_DONE 0x80
+#define QC_NO_CALLBACK 0x01
+#define QC_SG_SWAP_QUEUE 0x02
+#define QC_SG_HEAD 0x04
+#define QC_DATA_IN 0x08
+#define QC_DATA_OUT 0x10
+#define QC_URGENT 0x20
+#define QC_MSG_OUT 0x40
+#define QC_REQ_SENSE 0x80
+#define QCSG_SG_XFER_LIST 0x02
+#define QCSG_SG_XFER_MORE 0x04
+#define QCSG_SG_XFER_END 0x08
+#define QD_IN_PROGRESS 0x00
+#define QD_NO_ERROR 0x01
+#define QD_ABORTED_BY_HOST 0x02
+#define QD_WITH_ERROR 0x04
+#define QD_INVALID_REQUEST 0x80
+#define QD_INVALID_HOST_NUM 0x81
+#define QD_INVALID_DEVICE 0x82
+#define QD_ERR_INTERNAL 0xFF
+#define QHSTA_NO_ERROR 0x00
+#define QHSTA_M_SEL_TIMEOUT 0x11
+#define QHSTA_M_DATA_OVER_RUN 0x12
+#define QHSTA_M_DATA_UNDER_RUN 0x12
+#define QHSTA_M_UNEXPECTED_BUS_FREE 0x13
+#define QHSTA_M_BAD_BUS_PHASE_SEQ 0x14
+#define QHSTA_D_QDONE_SG_LIST_CORRUPTED 0x21
+#define QHSTA_D_ASC_DVC_ERROR_CODE_SET 0x22
+#define QHSTA_D_HOST_ABORT_FAILED 0x23
+#define QHSTA_D_EXE_SCSI_Q_FAILED 0x24
+#define QHSTA_D_EXE_SCSI_Q_BUSY_TIMEOUT 0x25
+#define QHSTA_D_ASPI_NO_BUF_POOL 0x26
+#define QHSTA_M_WTM_TIMEOUT 0x41
+#define QHSTA_M_BAD_CMPL_STATUS_IN 0x42
+#define QHSTA_M_NO_AUTO_REQ_SENSE 0x43
+#define QHSTA_M_AUTO_REQ_SENSE_FAIL 0x44
+#define QHSTA_M_TARGET_STATUS_BUSY 0x45
+#define QHSTA_M_BAD_TAG_CODE 0x46
+#define QHSTA_M_BAD_QUEUE_FULL_OR_BUSY 0x47
+#define QHSTA_M_HUNG_REQ_SCSI_BUS_RESET 0x48
+#define QHSTA_D_LRAM_CMP_ERROR 0x81
+#define QHSTA_M_MICRO_CODE_ERROR_HALT 0xA1
+#define ASC_FLAG_SCSIQ_REQ 0x01
+#define ASC_FLAG_BIOS_SCSIQ_REQ 0x02
+#define ASC_FLAG_BIOS_ASYNC_IO 0x04
+#define ASC_FLAG_SRB_LINEAR_ADDR 0x08
+#define ASC_FLAG_WIN16 0x10
+#define ASC_FLAG_WIN32 0x20
+#define ASC_FLAG_ISA_OVER_16MB 0x40
+#define ASC_FLAG_DOS_VM_CALLBACK 0x80
+#define ASC_TAG_FLAG_EXTRA_BYTES 0x10
+#define ASC_TAG_FLAG_DISABLE_DISCONNECT 0x04
+#define ASC_TAG_FLAG_DISABLE_ASYN_USE_SYN_FIX 0x08
+#define ASC_TAG_FLAG_DISABLE_CHK_COND_INT_HOST 0x40
+#define ASC_SCSIQ_CPY_BEG 4
+#define ASC_SCSIQ_SGHD_CPY_BEG 2
+#define ASC_SCSIQ_B_FWD 0
+#define ASC_SCSIQ_B_BWD 1
+#define ASC_SCSIQ_B_STATUS 2
+#define ASC_SCSIQ_B_QNO 3
+#define ASC_SCSIQ_B_CNTL 4
+#define ASC_SCSIQ_B_SG_QUEUE_CNT 5
+#define ASC_SCSIQ_D_DATA_ADDR 8
+#define ASC_SCSIQ_D_DATA_CNT 12
+#define ASC_SCSIQ_B_SENSE_LEN 20
+#define ASC_SCSIQ_DONE_INFO_BEG 22
+#define ASC_SCSIQ_D_SRBPTR 22
+#define ASC_SCSIQ_B_TARGET_IX 26
+#define ASC_SCSIQ_B_CDB_LEN 28
+#define ASC_SCSIQ_B_TAG_CODE 29
+#define ASC_SCSIQ_W_VM_ID 30
+#define ASC_SCSIQ_DONE_STATUS 32
+#define ASC_SCSIQ_HOST_STATUS 33
+#define ASC_SCSIQ_SCSI_STATUS 34
+#define ASC_SCSIQ_CDB_BEG 36
+#define ASC_SCSIQ_DW_REMAIN_XFER_ADDR 56
+#define ASC_SCSIQ_DW_REMAIN_XFER_CNT 60
+#define ASC_SCSIQ_B_FIRST_SG_WK_QP 48
+#define ASC_SCSIQ_B_SG_WK_QP 49
+#define ASC_SCSIQ_B_SG_WK_IX 50
+#define ASC_SCSIQ_W_ALT_DC1 52
+#define ASC_SCSIQ_B_LIST_CNT 6
+#define ASC_SCSIQ_B_CUR_LIST_CNT 7
+#define ASC_SGQ_B_SG_CNTL 4
+#define ASC_SGQ_B_SG_HEAD_QP 5
+#define ASC_SGQ_B_SG_LIST_CNT 6
+#define ASC_SGQ_B_SG_CUR_LIST_CNT 7
+#define ASC_SGQ_LIST_BEG 8
+#define ASC_DEF_SCSI1_QNG 4
+#define ASC_MAX_SCSI1_QNG 4
+#define ASC_DEF_SCSI2_QNG 16
+#define ASC_MAX_SCSI2_QNG 32
+#define ASC_TAG_CODE_MASK 0x23
+#define ASC_STOP_REQ_RISC_STOP 0x01
+#define ASC_STOP_ACK_RISC_STOP 0x03
+#define ASC_STOP_CLEAN_UP_BUSY_Q 0x10
+#define ASC_STOP_CLEAN_UP_DISC_Q 0x20
+#define ASC_STOP_HOST_REQ_RISC_HALT 0x40
+#define ASC_TIDLUN_TO_IX(tid, lun) (ASC_SCSI_TIX_TYPE)((tid) + ((lun)<<ASC_SCSI_ID_BITS))
+#define ASC_TID_TO_TARGET_ID(tid) (ASC_SCSI_BIT_ID_TYPE)(0x01 << (tid))
+#define ASC_TIX_TO_TARGET_ID(tix) (0x01 << ((tix) & ASC_MAX_TID))
+#define ASC_TIX_TO_TID(tix) ((tix) & ASC_MAX_TID)
+#define ASC_TID_TO_TIX(tid) ((tid) & ASC_MAX_TID)
+#define ASC_TIX_TO_LUN(tix) (((tix) >> ASC_SCSI_ID_BITS) & ASC_MAX_LUN)
+#define ASC_QNO_TO_QADDR(q_no) ((ASC_QADR_BEG)+((int)(q_no) << 6))
+
+typedef struct asc_scsiq_1 {
+ uchar status;
+ uchar q_no;
+ uchar cntl;
+ uchar sg_queue_cnt;
+ uchar target_id;
+ uchar target_lun;
+ ASC_PADDR data_addr;
+ ASC_DCNT data_cnt;
+ ASC_PADDR sense_addr;
+ uchar sense_len;
+ uchar extra_bytes;
+} ASC_SCSIQ_1;
+
+typedef struct asc_scsiq_2 {
+ ASC_VADDR srb_ptr;
+ uchar target_ix;
+ uchar flag;
+ uchar cdb_len;
+ uchar tag_code;
+ ushort vm_id;
+} ASC_SCSIQ_2;
+
+typedef struct asc_scsiq_3 {
+ uchar done_stat;
+ uchar host_stat;
+ uchar scsi_stat;
+ uchar scsi_msg;
+} ASC_SCSIQ_3;
+
+typedef struct asc_scsiq_4 {
+ uchar cdb[ASC_MAX_CDB_LEN];
+ uchar y_first_sg_list_qp;
+ uchar y_working_sg_qp;
+ uchar y_working_sg_ix;
+ uchar y_res;
+ ushort x_req_count;
+ ushort x_reconnect_rtn;
+ ASC_PADDR x_saved_data_addr;
+ ASC_DCNT x_saved_data_cnt;
+} ASC_SCSIQ_4;
+
+typedef struct asc_q_done_info {
+ ASC_SCSIQ_2 d2;
+ ASC_SCSIQ_3 d3;
+ uchar q_status;
+ uchar q_no;
+ uchar cntl;
+ uchar sense_len;
+ uchar extra_bytes;
+ uchar res;
+ ASC_DCNT remain_bytes;
+} ASC_QDONE_INFO;
+
+typedef struct asc_sg_list {
+ ASC_PADDR addr;
+ ASC_DCNT bytes;
+} ASC_SG_LIST;
+
+typedef struct asc_sg_head {
+ ushort entry_cnt;
+ ushort queue_cnt;
+ ushort entry_to_copy;
+ ushort res;
+ ASC_SG_LIST sg_list[0];
+} ASC_SG_HEAD;
+
+typedef struct asc_scsi_q {
+ ASC_SCSIQ_1 q1;
+ ASC_SCSIQ_2 q2;
+ uchar *cdbptr;
+ ASC_SG_HEAD *sg_head;
+ ushort remain_sg_entry_cnt;
+ ushort next_sg_index;
+} ASC_SCSI_Q;
+
+typedef struct asc_scsi_req_q {
+ ASC_SCSIQ_1 r1;
+ ASC_SCSIQ_2 r2;
+ uchar *cdbptr;
+ ASC_SG_HEAD *sg_head;
+ uchar *sense_ptr;
+ ASC_SCSIQ_3 r3;
+ uchar cdb[ASC_MAX_CDB_LEN];
+ uchar sense[ASC_MIN_SENSE_LEN];
+} ASC_SCSI_REQ_Q;
+
+typedef struct asc_scsi_bios_req_q {
+ ASC_SCSIQ_1 r1;
+ ASC_SCSIQ_2 r2;
+ uchar *cdbptr;
+ ASC_SG_HEAD *sg_head;
+ uchar *sense_ptr;
+ ASC_SCSIQ_3 r3;
+ uchar cdb[ASC_MAX_CDB_LEN];
+ uchar sense[ASC_MIN_SENSE_LEN];
+} ASC_SCSI_BIOS_REQ_Q;
+
+typedef struct asc_risc_q {
+ uchar fwd;
+ uchar bwd;
+ ASC_SCSIQ_1 i1;
+ ASC_SCSIQ_2 i2;
+ ASC_SCSIQ_3 i3;
+ ASC_SCSIQ_4 i4;
+} ASC_RISC_Q;
+
+typedef struct asc_sg_list_q {
+ uchar seq_no;
+ uchar q_no;
+ uchar cntl;
+ uchar sg_head_qp;
+ uchar sg_list_cnt;
+ uchar sg_cur_list_cnt;
+} ASC_SG_LIST_Q;
+
+typedef struct asc_risc_sg_list_q {
+ uchar fwd;
+ uchar bwd;
+ ASC_SG_LIST_Q sg;
+ ASC_SG_LIST sg_list[7];
+} ASC_RISC_SG_LIST_Q;
+
+#define ASCQ_ERR_Q_STATUS 0x0D
+#define ASCQ_ERR_CUR_QNG 0x17
+#define ASCQ_ERR_SG_Q_LINKS 0x18
+#define ASCQ_ERR_ISR_RE_ENTRY 0x1A
+#define ASCQ_ERR_CRITICAL_RE_ENTRY 0x1B
+#define ASCQ_ERR_ISR_ON_CRITICAL 0x1C
+
+/*
+ * Warning code values are set in ASC_DVC_VAR 'warn_code'.
+ */
+#define ASC_WARN_NO_ERROR 0x0000
+#define ASC_WARN_IO_PORT_ROTATE 0x0001
+#define ASC_WARN_EEPROM_CHKSUM 0x0002
+#define ASC_WARN_IRQ_MODIFIED 0x0004
+#define ASC_WARN_AUTO_CONFIG 0x0008
+#define ASC_WARN_CMD_QNG_CONFLICT 0x0010
+#define ASC_WARN_EEPROM_RECOVER 0x0020
+#define ASC_WARN_CFG_MSW_RECOVER 0x0040
+
+/*
+ * Error code values are set in {ASC/ADV}_DVC_VAR 'err_code'.
+ */
+#define ASC_IERR_NO_CARRIER 0x0001 /* No more carrier memory */
+#define ASC_IERR_MCODE_CHKSUM 0x0002 /* micro code check sum error */
+#define ASC_IERR_SET_PC_ADDR 0x0004
+#define ASC_IERR_START_STOP_CHIP 0x0008 /* start/stop chip failed */
+#define ASC_IERR_ILLEGAL_CONNECTION 0x0010 /* Illegal cable connection */
+#define ASC_IERR_SINGLE_END_DEVICE 0x0020 /* SE device on DIFF bus */
+#define ASC_IERR_REVERSED_CABLE 0x0040 /* Narrow flat cable reversed */
+#define ASC_IERR_SET_SCSI_ID 0x0080 /* set SCSI ID failed */
+#define ASC_IERR_HVD_DEVICE 0x0100 /* HVD device on LVD port */
+#define ASC_IERR_BAD_SIGNATURE 0x0200 /* signature not found */
+#define ASC_IERR_NO_BUS_TYPE 0x0400
+#define ASC_IERR_BIST_PRE_TEST 0x0800 /* BIST pre-test error */
+#define ASC_IERR_BIST_RAM_TEST 0x1000 /* BIST RAM test error */
+#define ASC_IERR_BAD_CHIPTYPE 0x2000 /* Invalid chip_type setting */
+
+#define ASC_DEF_MAX_TOTAL_QNG (0xF0)
+#define ASC_MIN_TAG_Q_PER_DVC (0x04)
+#define ASC_MIN_FREE_Q (0x02)
+#define ASC_MIN_TOTAL_QNG ((ASC_MAX_SG_QUEUE)+(ASC_MIN_FREE_Q))
+#define ASC_MAX_TOTAL_QNG 240
+#define ASC_MAX_PCI_ULTRA_INRAM_TOTAL_QNG 16
+#define ASC_MAX_PCI_ULTRA_INRAM_TAG_QNG 8
+#define ASC_MAX_PCI_INRAM_TOTAL_QNG 20
+#define ASC_MAX_INRAM_TAG_QNG 16
+#define ASC_IOADR_GAP 0x10
+#define ASC_SYN_MAX_OFFSET 0x0F
+#define ASC_DEF_SDTR_OFFSET 0x0F
+#define ASC_SDTR_ULTRA_PCI_10MB_INDEX 0x02
+#define ASYN_SDTR_DATA_FIX_PCI_REV_AB 0x41
+
+/* The narrow chip only supports a limited selection of transfer rates.
+ * These are encoded in the range 0..7 or 0..15 depending whether the chip
+ * is Ultra-capable or not. These tables let us convert from one to the other.
+ */
+static const unsigned char asc_syn_xfer_period[8] = {
+ 25, 30, 35, 40, 50, 60, 70, 85
+};
+
+static const unsigned char asc_syn_ultra_xfer_period[16] = {
+ 12, 19, 25, 32, 38, 44, 50, 57, 63, 69, 75, 82, 88, 94, 100, 107
+};
+
+typedef struct ext_msg {
+ uchar msg_type;
+ uchar msg_len;
+ uchar msg_req;
+ union {
+ struct {
+ uchar sdtr_xfer_period;
+ uchar sdtr_req_ack_offset;
+ } sdtr;
+ struct {
+ uchar wdtr_width;
+ } wdtr;
+ struct {
+ uchar mdp_b3;
+ uchar mdp_b2;
+ uchar mdp_b1;
+ uchar mdp_b0;
+ } mdp;
+ } u_ext_msg;
+ uchar res;
+} EXT_MSG;
+
+#define xfer_period u_ext_msg.sdtr.sdtr_xfer_period
+#define req_ack_offset u_ext_msg.sdtr.sdtr_req_ack_offset
+#define wdtr_width u_ext_msg.wdtr.wdtr_width
+#define mdp_b3 u_ext_msg.mdp_b3
+#define mdp_b2 u_ext_msg.mdp_b2
+#define mdp_b1 u_ext_msg.mdp_b1
+#define mdp_b0 u_ext_msg.mdp_b0
+
+typedef struct asc_dvc_cfg {
+ ASC_SCSI_BIT_ID_TYPE can_tagged_qng;
+ ASC_SCSI_BIT_ID_TYPE cmd_qng_enabled;
+ ASC_SCSI_BIT_ID_TYPE disc_enable;
+ ASC_SCSI_BIT_ID_TYPE sdtr_enable;
+ uchar chip_scsi_id;
+ uchar isa_dma_speed;
+ uchar isa_dma_channel;
+ uchar chip_version;
+ ushort mcode_date;
+ ushort mcode_version;
+ uchar max_tag_qng[ASC_MAX_TID + 1];
+ uchar sdtr_period_offset[ASC_MAX_TID + 1];
+ uchar adapter_info[6];
+} ASC_DVC_CFG;
+
+#define ASC_DEF_DVC_CNTL 0xFFFF
+#define ASC_DEF_CHIP_SCSI_ID 7
+#define ASC_DEF_ISA_DMA_SPEED 4
+#define ASC_INIT_STATE_BEG_GET_CFG 0x0001
+#define ASC_INIT_STATE_END_GET_CFG 0x0002
+#define ASC_INIT_STATE_BEG_SET_CFG 0x0004
+#define ASC_INIT_STATE_END_SET_CFG 0x0008
+#define ASC_INIT_STATE_BEG_LOAD_MC 0x0010
+#define ASC_INIT_STATE_END_LOAD_MC 0x0020
+#define ASC_INIT_STATE_BEG_INQUIRY 0x0040
+#define ASC_INIT_STATE_END_INQUIRY 0x0080
+#define ASC_INIT_RESET_SCSI_DONE 0x0100
+#define ASC_INIT_STATE_WITHOUT_EEP 0x8000
+#define ASC_BUG_FIX_IF_NOT_DWB 0x0001
+#define ASC_BUG_FIX_ASYN_USE_SYN 0x0002
+#define ASC_MIN_TAGGED_CMD 7
+#define ASC_MAX_SCSI_RESET_WAIT 30
+#define ASC_OVERRUN_BSIZE 64
+
+struct asc_dvc_var; /* Forward Declaration. */
+
+typedef struct asc_dvc_var {
+ PortAddr iop_base;
+ ushort err_code;
+ ushort dvc_cntl;
+ ushort bug_fix_cntl;
+ ushort bus_type;
+ ASC_SCSI_BIT_ID_TYPE init_sdtr;
+ ASC_SCSI_BIT_ID_TYPE sdtr_done;
+ ASC_SCSI_BIT_ID_TYPE use_tagged_qng;
+ ASC_SCSI_BIT_ID_TYPE unit_not_ready;
+ ASC_SCSI_BIT_ID_TYPE queue_full_or_busy;
+ ASC_SCSI_BIT_ID_TYPE start_motor;
+ uchar *overrun_buf;
+ dma_addr_t overrun_dma;
+ uchar scsi_reset_wait;
+ uchar chip_no;
+ char is_in_int;
+ uchar max_total_qng;
+ uchar cur_total_qng;
+ uchar in_critical_cnt;
+ uchar last_q_shortage;
+ ushort init_state;
+ uchar cur_dvc_qng[ASC_MAX_TID + 1];
+ uchar max_dvc_qng[ASC_MAX_TID + 1];
+ ASC_SCSI_Q *scsiq_busy_head[ASC_MAX_TID + 1];
+ ASC_SCSI_Q *scsiq_busy_tail[ASC_MAX_TID + 1];
+ const uchar *sdtr_period_tbl;
+ ASC_DVC_CFG *cfg;
+ ASC_SCSI_BIT_ID_TYPE pci_fix_asyn_xfer_always;
+ char redo_scam;
+ ushort res2;
+ uchar dos_int13_table[ASC_MAX_TID + 1];
+ ASC_DCNT max_dma_count;
+ ASC_SCSI_BIT_ID_TYPE no_scam;
+ ASC_SCSI_BIT_ID_TYPE pci_fix_asyn_xfer;
+ uchar min_sdtr_index;
+ uchar max_sdtr_index;
+ struct asc_board *drv_ptr;
+ int ptr_map_count;
+ void **ptr_map;
+ ASC_DCNT uc_break;
+} ASC_DVC_VAR;
+
+typedef struct asc_dvc_inq_info {
+ uchar type[ASC_MAX_TID + 1][ASC_MAX_LUN + 1];
+} ASC_DVC_INQ_INFO;
+
+typedef struct asc_cap_info {
+ ASC_DCNT lba;
+ ASC_DCNT blk_size;
+} ASC_CAP_INFO;
+
+typedef struct asc_cap_info_array {
+ ASC_CAP_INFO cap_info[ASC_MAX_TID + 1][ASC_MAX_LUN + 1];
+} ASC_CAP_INFO_ARRAY;
+
+#define ASC_MCNTL_NO_SEL_TIMEOUT (ushort)0x0001
+#define ASC_MCNTL_NULL_TARGET (ushort)0x0002
+#define ASC_CNTL_INITIATOR (ushort)0x0001
+#define ASC_CNTL_BIOS_GT_1GB (ushort)0x0002
+#define ASC_CNTL_BIOS_GT_2_DISK (ushort)0x0004
+#define ASC_CNTL_BIOS_REMOVABLE (ushort)0x0008
+#define ASC_CNTL_NO_SCAM (ushort)0x0010
+#define ASC_CNTL_INT_MULTI_Q (ushort)0x0080
+#define ASC_CNTL_NO_LUN_SUPPORT (ushort)0x0040
+#define ASC_CNTL_NO_VERIFY_COPY (ushort)0x0100
+#define ASC_CNTL_RESET_SCSI (ushort)0x0200
+#define ASC_CNTL_INIT_INQUIRY (ushort)0x0400
+#define ASC_CNTL_INIT_VERBOSE (ushort)0x0800
+#define ASC_CNTL_SCSI_PARITY (ushort)0x1000
+#define ASC_CNTL_BURST_MODE (ushort)0x2000
+#define ASC_CNTL_SDTR_ENABLE_ULTRA (ushort)0x4000
+#define ASC_EEP_DVC_CFG_BEG_VL 2
+#define ASC_EEP_MAX_DVC_ADDR_VL 15
+#define ASC_EEP_DVC_CFG_BEG 32
+#define ASC_EEP_MAX_DVC_ADDR 45
+#define ASC_EEP_MAX_RETRY 20
+
+/*
+ * These macros keep the chip SCSI id and ISA DMA speed
+ * bitfields in board order. C bitfields aren't portable
+ * between big and little-endian platforms so they are
+ * not used.
+ */
+
+#define ASC_EEP_GET_CHIP_ID(cfg) ((cfg)->id_speed & 0x0f)
+#define ASC_EEP_GET_DMA_SPD(cfg) (((cfg)->id_speed & 0xf0) >> 4)
+#define ASC_EEP_SET_CHIP_ID(cfg, sid) \
+ ((cfg)->id_speed = ((cfg)->id_speed & 0xf0) | ((sid) & ASC_MAX_TID))
+#define ASC_EEP_SET_DMA_SPD(cfg, spd) \
+ ((cfg)->id_speed = ((cfg)->id_speed & 0x0f) | ((spd) & 0x0f) << 4)
+
+typedef struct asceep_config {
+ ushort cfg_lsw;
+ ushort cfg_msw;
+ uchar init_sdtr;
+ uchar disc_enable;
+ uchar use_cmd_qng;
+ uchar start_motor;
+ uchar max_total_qng;
+ uchar max_tag_qng;
+ uchar bios_scan;
+ uchar power_up_wait;
+ uchar no_scam;
+ uchar id_speed; /* low order 4 bits is chip scsi id */
+ /* high order 4 bits is isa dma speed */
+ uchar dos_int13_table[ASC_MAX_TID + 1];
+ uchar adapter_info[6];
+ ushort cntl;
+ ushort chksum;
+} ASCEEP_CONFIG;
+
+#define ASC_EEP_CMD_READ 0x80
+#define ASC_EEP_CMD_WRITE 0x40
+#define ASC_EEP_CMD_WRITE_ABLE 0x30
+#define ASC_EEP_CMD_WRITE_DISABLE 0x00
+#define ASCV_MSGOUT_BEG 0x0000
+#define ASCV_MSGOUT_SDTR_PERIOD (ASCV_MSGOUT_BEG+3)
+#define ASCV_MSGOUT_SDTR_OFFSET (ASCV_MSGOUT_BEG+4)
+#define ASCV_BREAK_SAVED_CODE (ushort)0x0006
+#define ASCV_MSGIN_BEG (ASCV_MSGOUT_BEG+8)
+#define ASCV_MSGIN_SDTR_PERIOD (ASCV_MSGIN_BEG+3)
+#define ASCV_MSGIN_SDTR_OFFSET (ASCV_MSGIN_BEG+4)
+#define ASCV_SDTR_DATA_BEG (ASCV_MSGIN_BEG+8)
+#define ASCV_SDTR_DONE_BEG (ASCV_SDTR_DATA_BEG+8)
+#define ASCV_MAX_DVC_QNG_BEG (ushort)0x0020
+#define ASCV_BREAK_ADDR (ushort)0x0028
+#define ASCV_BREAK_NOTIFY_COUNT (ushort)0x002A
+#define ASCV_BREAK_CONTROL (ushort)0x002C
+#define ASCV_BREAK_HIT_COUNT (ushort)0x002E
+
+#define ASCV_ASCDVC_ERR_CODE_W (ushort)0x0030
+#define ASCV_MCODE_CHKSUM_W (ushort)0x0032
+#define ASCV_MCODE_SIZE_W (ushort)0x0034
+#define ASCV_STOP_CODE_B (ushort)0x0036
+#define ASCV_DVC_ERR_CODE_B (ushort)0x0037
+#define ASCV_OVERRUN_PADDR_D (ushort)0x0038
+#define ASCV_OVERRUN_BSIZE_D (ushort)0x003C
+#define ASCV_HALTCODE_W (ushort)0x0040
+#define ASCV_CHKSUM_W (ushort)0x0042
+#define ASCV_MC_DATE_W (ushort)0x0044
+#define ASCV_MC_VER_W (ushort)0x0046
+#define ASCV_NEXTRDY_B (ushort)0x0048
+#define ASCV_DONENEXT_B (ushort)0x0049
+#define ASCV_USE_TAGGED_QNG_B (ushort)0x004A
+#define ASCV_SCSIBUSY_B (ushort)0x004B
+#define ASCV_Q_DONE_IN_PROGRESS_B (ushort)0x004C
+#define ASCV_CURCDB_B (ushort)0x004D
+#define ASCV_RCLUN_B (ushort)0x004E
+#define ASCV_BUSY_QHEAD_B (ushort)0x004F
+#define ASCV_DISC1_QHEAD_B (ushort)0x0050
+#define ASCV_DISC_ENABLE_B (ushort)0x0052
+#define ASCV_CAN_TAGGED_QNG_B (ushort)0x0053
+#define ASCV_HOSTSCSI_ID_B (ushort)0x0055
+#define ASCV_MCODE_CNTL_B (ushort)0x0056
+#define ASCV_NULL_TARGET_B (ushort)0x0057
+#define ASCV_FREE_Q_HEAD_W (ushort)0x0058
+#define ASCV_DONE_Q_TAIL_W (ushort)0x005A
+#define ASCV_FREE_Q_HEAD_B (ushort)(ASCV_FREE_Q_HEAD_W+1)
+#define ASCV_DONE_Q_TAIL_B (ushort)(ASCV_DONE_Q_TAIL_W+1)
+#define ASCV_HOST_FLAG_B (ushort)0x005D
+#define ASCV_TOTAL_READY_Q_B (ushort)0x0064
+#define ASCV_VER_SERIAL_B (ushort)0x0065
+#define ASCV_HALTCODE_SAVED_W (ushort)0x0066
+#define ASCV_WTM_FLAG_B (ushort)0x0068
+#define ASCV_RISC_FLAG_B (ushort)0x006A
+#define ASCV_REQ_SG_LIST_QP (ushort)0x006B
+#define ASC_HOST_FLAG_IN_ISR 0x01
+#define ASC_HOST_FLAG_ACK_INT 0x02
+#define ASC_RISC_FLAG_GEN_INT 0x01
+#define ASC_RISC_FLAG_REQ_SG_LIST 0x02
+#define IOP_CTRL (0x0F)
+#define IOP_STATUS (0x0E)
+#define IOP_INT_ACK IOP_STATUS
+#define IOP_REG_IFC (0x0D)
+#define IOP_SYN_OFFSET (0x0B)
+#define IOP_EXTRA_CONTROL (0x0D)
+#define IOP_REG_PC (0x0C)
+#define IOP_RAM_ADDR (0x0A)
+#define IOP_RAM_DATA (0x08)
+#define IOP_EEP_DATA (0x06)
+#define IOP_EEP_CMD (0x07)
+#define IOP_VERSION (0x03)
+#define IOP_CONFIG_HIGH (0x04)
+#define IOP_CONFIG_LOW (0x02)
+#define IOP_SIG_BYTE (0x01)
+#define IOP_SIG_WORD (0x00)
+#define IOP_REG_DC1 (0x0E)
+#define IOP_REG_DC0 (0x0C)
+#define IOP_REG_SB (0x0B)
+#define IOP_REG_DA1 (0x0A)
+#define IOP_REG_DA0 (0x08)
+#define IOP_REG_SC (0x09)
+#define IOP_DMA_SPEED (0x07)
+#define IOP_REG_FLAG (0x07)
+#define IOP_FIFO_H (0x06)
+#define IOP_FIFO_L (0x04)
+#define IOP_REG_ID (0x05)
+#define IOP_REG_QP (0x03)
+#define IOP_REG_IH (0x02)
+#define IOP_REG_IX (0x01)
+#define IOP_REG_AX (0x00)
+#define IFC_REG_LOCK (0x00)
+#define IFC_REG_UNLOCK (0x09)
+#define IFC_WR_EN_FILTER (0x10)
+#define IFC_RD_NO_EEPROM (0x10)
+#define IFC_SLEW_RATE (0x20)
+#define IFC_ACT_NEG (0x40)
+#define IFC_INP_FILTER (0x80)
+#define IFC_INIT_DEFAULT (IFC_ACT_NEG | IFC_REG_UNLOCK)
+#define SC_SEL (uchar)(0x80)
+#define SC_BSY (uchar)(0x40)
+#define SC_ACK (uchar)(0x20)
+#define SC_REQ (uchar)(0x10)
+#define SC_ATN (uchar)(0x08)
+#define SC_IO (uchar)(0x04)
+#define SC_CD (uchar)(0x02)
+#define SC_MSG (uchar)(0x01)
+#define SEC_SCSI_CTL (uchar)(0x80)
+#define SEC_ACTIVE_NEGATE (uchar)(0x40)
+#define SEC_SLEW_RATE (uchar)(0x20)
+#define SEC_ENABLE_FILTER (uchar)(0x10)
+#define ASC_HALT_EXTMSG_IN (ushort)0x8000
+#define ASC_HALT_CHK_CONDITION (ushort)0x8100
+#define ASC_HALT_SS_QUEUE_FULL (ushort)0x8200
+#define ASC_HALT_DISABLE_ASYN_USE_SYN_FIX (ushort)0x8300
+#define ASC_HALT_ENABLE_ASYN_USE_SYN_FIX (ushort)0x8400
+#define ASC_HALT_SDTR_REJECTED (ushort)0x4000
+#define ASC_HALT_HOST_COPY_SG_LIST_TO_RISC ( ushort )0x2000
+#define ASC_MAX_QNO 0xF8
+#define ASC_DATA_SEC_BEG (ushort)0x0080
+#define ASC_DATA_SEC_END (ushort)0x0080
+#define ASC_CODE_SEC_BEG (ushort)0x0080
+#define ASC_CODE_SEC_END (ushort)0x0080
+#define ASC_QADR_BEG (0x4000)
+#define ASC_QADR_USED (ushort)(ASC_MAX_QNO * 64)
+#define ASC_QADR_END (ushort)0x7FFF
+#define ASC_QLAST_ADR (ushort)0x7FC0
+#define ASC_QBLK_SIZE 0x40
+#define ASC_BIOS_DATA_QBEG 0xF8
+#define ASC_MIN_ACTIVE_QNO 0x01
+#define ASC_QLINK_END 0xFF
+#define ASC_EEPROM_WORDS 0x10
+#define ASC_MAX_MGS_LEN 0x10
+#define ASC_BIOS_ADDR_DEF 0xDC00
+#define ASC_BIOS_SIZE 0x3800
+#define ASC_BIOS_RAM_OFF 0x3800
+#define ASC_BIOS_RAM_SIZE 0x800
+#define ASC_BIOS_MIN_ADDR 0xC000
+#define ASC_BIOS_MAX_ADDR 0xEC00
+#define ASC_BIOS_BANK_SIZE 0x0400
+#define ASC_MCODE_START_ADDR 0x0080
+#define ASC_CFG0_HOST_INT_ON 0x0020
+#define ASC_CFG0_BIOS_ON 0x0040
+#define ASC_CFG0_VERA_BURST_ON 0x0080
+#define ASC_CFG0_SCSI_PARITY_ON 0x0800
+#define ASC_CFG1_SCSI_TARGET_ON 0x0080
+#define ASC_CFG1_LRAM_8BITS_ON 0x0800
+#define ASC_CFG_MSW_CLR_MASK 0x3080
+#define CSW_TEST1 (ASC_CS_TYPE)0x8000
+#define CSW_AUTO_CONFIG (ASC_CS_TYPE)0x4000
+#define CSW_RESERVED1 (ASC_CS_TYPE)0x2000
+#define CSW_IRQ_WRITTEN (ASC_CS_TYPE)0x1000
+#define CSW_33MHZ_SELECTED (ASC_CS_TYPE)0x0800
+#define CSW_TEST2 (ASC_CS_TYPE)0x0400
+#define CSW_TEST3 (ASC_CS_TYPE)0x0200
+#define CSW_RESERVED2 (ASC_CS_TYPE)0x0100
+#define CSW_DMA_DONE (ASC_CS_TYPE)0x0080
+#define CSW_FIFO_RDY (ASC_CS_TYPE)0x0040
+#define CSW_EEP_READ_DONE (ASC_CS_TYPE)0x0020
+#define CSW_HALTED (ASC_CS_TYPE)0x0010
+#define CSW_SCSI_RESET_ACTIVE (ASC_CS_TYPE)0x0008
+#define CSW_PARITY_ERR (ASC_CS_TYPE)0x0004
+#define CSW_SCSI_RESET_LATCH (ASC_CS_TYPE)0x0002
+#define CSW_INT_PENDING (ASC_CS_TYPE)0x0001
+#define CIW_CLR_SCSI_RESET_INT (ASC_CS_TYPE)0x1000
+#define CIW_INT_ACK (ASC_CS_TYPE)0x0100
+#define CIW_TEST1 (ASC_CS_TYPE)0x0200
+#define CIW_TEST2 (ASC_CS_TYPE)0x0400
+#define CIW_SEL_33MHZ (ASC_CS_TYPE)0x0800
+#define CIW_IRQ_ACT (ASC_CS_TYPE)0x1000
+#define CC_CHIP_RESET (uchar)0x80
+#define CC_SCSI_RESET (uchar)0x40
+#define CC_HALT (uchar)0x20
+#define CC_SINGLE_STEP (uchar)0x10
+#define CC_DMA_ABLE (uchar)0x08
+#define CC_TEST (uchar)0x04
+#define CC_BANK_ONE (uchar)0x02
+#define CC_DIAG (uchar)0x01
+#define ASC_1000_ID0W 0x04C1
+#define ASC_1000_ID0W_FIX 0x00C1
+#define ASC_1000_ID1B 0x25
+#define ASC_EISA_REV_IOP_MASK (0x0C83)
+#define ASC_EISA_CFG_IOP_MASK (0x0C86)
+#define ASC_GET_EISA_SLOT(iop) (PortAddr)((iop) & 0xF000)
+#define INS_HALTINT (ushort)0x6281
+#define INS_HALT (ushort)0x6280
+#define INS_SINT (ushort)0x6200
+#define INS_RFLAG_WTM (ushort)0x7380
+#define ASC_MC_SAVE_CODE_WSIZE 0x500
+#define ASC_MC_SAVE_DATA_WSIZE 0x40
+
+typedef struct asc_mc_saved {
+ ushort data[ASC_MC_SAVE_DATA_WSIZE];
+ ushort code[ASC_MC_SAVE_CODE_WSIZE];
+} ASC_MC_SAVED;
+
+#define AscGetQDoneInProgress(port) AscReadLramByte((port), ASCV_Q_DONE_IN_PROGRESS_B)
+#define AscPutQDoneInProgress(port, val) AscWriteLramByte((port), ASCV_Q_DONE_IN_PROGRESS_B, val)
+#define AscGetVarFreeQHead(port) AscReadLramWord((port), ASCV_FREE_Q_HEAD_W)
+#define AscGetVarDoneQTail(port) AscReadLramWord((port), ASCV_DONE_Q_TAIL_W)
+#define AscPutVarFreeQHead(port, val) AscWriteLramWord((port), ASCV_FREE_Q_HEAD_W, val)
+#define AscPutVarDoneQTail(port, val) AscWriteLramWord((port), ASCV_DONE_Q_TAIL_W, val)
+#define AscGetRiscVarFreeQHead(port) AscReadLramByte((port), ASCV_NEXTRDY_B)
+#define AscGetRiscVarDoneQTail(port) AscReadLramByte((port), ASCV_DONENEXT_B)
+#define AscPutRiscVarFreeQHead(port, val) AscWriteLramByte((port), ASCV_NEXTRDY_B, val)
+#define AscPutRiscVarDoneQTail(port, val) AscWriteLramByte((port), ASCV_DONENEXT_B, val)
+#define AscPutMCodeSDTRDoneAtID(port, id, data) AscWriteLramByte((port), (ushort)((ushort)ASCV_SDTR_DONE_BEG+(ushort)id), (data))
+#define AscGetMCodeSDTRDoneAtID(port, id) AscReadLramByte((port), (ushort)((ushort)ASCV_SDTR_DONE_BEG+(ushort)id))
+#define AscPutMCodeInitSDTRAtID(port, id, data) AscWriteLramByte((port), (ushort)((ushort)ASCV_SDTR_DATA_BEG+(ushort)id), data)
+#define AscGetMCodeInitSDTRAtID(port, id) AscReadLramByte((port), (ushort)((ushort)ASCV_SDTR_DATA_BEG+(ushort)id))
+#define AscGetChipSignatureByte(port) (uchar)inp((port)+IOP_SIG_BYTE)
+#define AscGetChipSignatureWord(port) (ushort)inpw((port)+IOP_SIG_WORD)
+#define AscGetChipVerNo(port) (uchar)inp((port)+IOP_VERSION)
+#define AscGetChipCfgLsw(port) (ushort)inpw((port)+IOP_CONFIG_LOW)
+#define AscGetChipCfgMsw(port) (ushort)inpw((port)+IOP_CONFIG_HIGH)
+#define AscSetChipCfgLsw(port, data) outpw((port)+IOP_CONFIG_LOW, data)
+#define AscSetChipCfgMsw(port, data) outpw((port)+IOP_CONFIG_HIGH, data)
+#define AscGetChipEEPCmd(port) (uchar)inp((port)+IOP_EEP_CMD)
+#define AscSetChipEEPCmd(port, data) outp((port)+IOP_EEP_CMD, data)
+#define AscGetChipEEPData(port) (ushort)inpw((port)+IOP_EEP_DATA)
+#define AscSetChipEEPData(port, data) outpw((port)+IOP_EEP_DATA, data)
+#define AscGetChipLramAddr(port) (ushort)inpw((PortAddr)((port)+IOP_RAM_ADDR))
+#define AscSetChipLramAddr(port, addr) outpw((PortAddr)((port)+IOP_RAM_ADDR), addr)
+#define AscGetChipLramData(port) (ushort)inpw((port)+IOP_RAM_DATA)
+#define AscSetChipLramData(port, data) outpw((port)+IOP_RAM_DATA, data)
+#define AscGetChipIFC(port) (uchar)inp((port)+IOP_REG_IFC)
+#define AscSetChipIFC(port, data) outp((port)+IOP_REG_IFC, data)
+#define AscGetChipStatus(port) (ASC_CS_TYPE)inpw((port)+IOP_STATUS)
+#define AscSetChipStatus(port, cs_val) outpw((port)+IOP_STATUS, cs_val)
+#define AscGetChipControl(port) (uchar)inp((port)+IOP_CTRL)
+#define AscSetChipControl(port, cc_val) outp((port)+IOP_CTRL, cc_val)
+#define AscGetChipSyn(port) (uchar)inp((port)+IOP_SYN_OFFSET)
+#define AscSetChipSyn(port, data) outp((port)+IOP_SYN_OFFSET, data)
+#define AscSetPCAddr(port, data) outpw((port)+IOP_REG_PC, data)
+#define AscGetPCAddr(port) (ushort)inpw((port)+IOP_REG_PC)
+#define AscIsIntPending(port) (AscGetChipStatus(port) & (CSW_INT_PENDING | CSW_SCSI_RESET_LATCH))
+#define AscGetChipScsiID(port) ((AscGetChipCfgLsw(port) >> 8) & ASC_MAX_TID)
+#define AscGetExtraControl(port) (uchar)inp((port)+IOP_EXTRA_CONTROL)
+#define AscSetExtraControl(port, data) outp((port)+IOP_EXTRA_CONTROL, data)
+#define AscReadChipAX(port) (ushort)inpw((port)+IOP_REG_AX)
+#define AscWriteChipAX(port, data) outpw((port)+IOP_REG_AX, data)
+#define AscReadChipIX(port) (uchar)inp((port)+IOP_REG_IX)
+#define AscWriteChipIX(port, data) outp((port)+IOP_REG_IX, data)
+#define AscReadChipIH(port) (ushort)inpw((port)+IOP_REG_IH)
+#define AscWriteChipIH(port, data) outpw((port)+IOP_REG_IH, data)
+#define AscReadChipQP(port) (uchar)inp((port)+IOP_REG_QP)
+#define AscWriteChipQP(port, data) outp((port)+IOP_REG_QP, data)
+#define AscReadChipFIFO_L(port) (ushort)inpw((port)+IOP_REG_FIFO_L)
+#define AscWriteChipFIFO_L(port, data) outpw((port)+IOP_REG_FIFO_L, data)
+#define AscReadChipFIFO_H(port) (ushort)inpw((port)+IOP_REG_FIFO_H)
+#define AscWriteChipFIFO_H(port, data) outpw((port)+IOP_REG_FIFO_H, data)
+#define AscReadChipDmaSpeed(port) (uchar)inp((port)+IOP_DMA_SPEED)
+#define AscWriteChipDmaSpeed(port, data) outp((port)+IOP_DMA_SPEED, data)
+#define AscReadChipDA0(port) (ushort)inpw((port)+IOP_REG_DA0)
+#define AscWriteChipDA0(port) outpw((port)+IOP_REG_DA0, data)
+#define AscReadChipDA1(port) (ushort)inpw((port)+IOP_REG_DA1)
+#define AscWriteChipDA1(port) outpw((port)+IOP_REG_DA1, data)
+#define AscReadChipDC0(port) (ushort)inpw((port)+IOP_REG_DC0)
+#define AscWriteChipDC0(port) outpw((port)+IOP_REG_DC0, data)
+#define AscReadChipDC1(port) (ushort)inpw((port)+IOP_REG_DC1)
+#define AscWriteChipDC1(port) outpw((port)+IOP_REG_DC1, data)
+#define AscReadChipDvcID(port) (uchar)inp((port)+IOP_REG_ID)
+#define AscWriteChipDvcID(port, data) outp((port)+IOP_REG_ID, data)
+
+/*
+ * Portable Data Types
+ *
+ * Any instance where a 32-bit long or pointer type is assumed
+ * for precision or HW defined structures, the following define
+ * types must be used. In Linux the char, short, and int types
+ * are all consistent at 8, 16, and 32 bits respectively. Pointers
+ * and long types are 64 bits on Alpha and UltraSPARC.
+ */
+#define ADV_PADDR __u32 /* Physical address data type. */
+#define ADV_VADDR __u32 /* Virtual address data type. */
+#define ADV_DCNT __u32 /* Unsigned Data count type. */
+#define ADV_SDCNT __s32 /* Signed Data count type. */
+
+/*
+ * These macros are used to convert a virtual address to a
+ * 32-bit value. This currently can be used on Linux Alpha
+ * which uses 64-bit virtual address but a 32-bit bus address.
+ * This is likely to break in the future, but doing this now
+ * will give us time to change the HW and FW to handle 64-bit
+ * addresses.
+ */
+#define ADV_VADDR_TO_U32 virt_to_bus
+#define ADV_U32_TO_VADDR bus_to_virt
+
+#define AdvPortAddr void __iomem * /* Virtual memory address size */
+
+/*
+ * Define Adv Library required memory access macros.
+ */
+#define ADV_MEM_READB(addr) readb(addr)
+#define ADV_MEM_READW(addr) readw(addr)
+#define ADV_MEM_WRITEB(addr, byte) writeb(byte, addr)
+#define ADV_MEM_WRITEW(addr, word) writew(word, addr)
+#define ADV_MEM_WRITEDW(addr, dword) writel(dword, addr)
+
+#define ADV_CARRIER_COUNT (ASC_DEF_MAX_HOST_QNG + 15)
+
+/*
+ * Define total number of simultaneous maximum element scatter-gather
+ * request blocks per wide adapter. ASC_DEF_MAX_HOST_QNG (253) is the
+ * maximum number of outstanding commands per wide host adapter. Each
+ * command uses one or more ADV_SG_BLOCK each with 15 scatter-gather
+ * elements. Allow each command to have at least one ADV_SG_BLOCK structure.
+ * This allows about 15 commands to have the maximum 17 ADV_SG_BLOCK
+ * structures or 255 scatter-gather elements.
+ */
+#define ADV_TOT_SG_BLOCK ASC_DEF_MAX_HOST_QNG
+
+/*
+ * Define maximum number of scatter-gather elements per request.
+ */
+#define ADV_MAX_SG_LIST 255
+#define NO_OF_SG_PER_BLOCK 15
+
+#define ADV_EEP_DVC_CFG_BEGIN (0x00)
+#define ADV_EEP_DVC_CFG_END (0x15)
+#define ADV_EEP_DVC_CTL_BEGIN (0x16) /* location of OEM name */
+#define ADV_EEP_MAX_WORD_ADDR (0x1E)
+
+#define ADV_EEP_DELAY_MS 100
+
+#define ADV_EEPROM_BIG_ENDIAN 0x8000 /* EEPROM Bit 15 */
+#define ADV_EEPROM_BIOS_ENABLE 0x4000 /* EEPROM Bit 14 */
+/*
+ * For the ASC3550 Bit 13 is Termination Polarity control bit.
+ * For later ICs Bit 13 controls whether the CIS (Card Information
+ * Service Section) is loaded from EEPROM.
+ */
+#define ADV_EEPROM_TERM_POL 0x2000 /* EEPROM Bit 13 */
+#define ADV_EEPROM_CIS_LD 0x2000 /* EEPROM Bit 13 */
+/*
+ * ASC38C1600 Bit 11
+ *
+ * If EEPROM Bit 11 is 0 for Function 0, then Function 0 will specify
+ * INT A in the PCI Configuration Space Int Pin field. If it is 1, then
+ * Function 0 will specify INT B.
+ *
+ * If EEPROM Bit 11 is 0 for Function 1, then Function 1 will specify
+ * INT B in the PCI Configuration Space Int Pin field. If it is 1, then
+ * Function 1 will specify INT A.
+ */
+#define ADV_EEPROM_INTAB 0x0800 /* EEPROM Bit 11 */
+
+typedef struct adveep_3550_config {
+ /* Word Offset, Description */
+
+ ushort cfg_lsw; /* 00 power up initialization */
+ /* bit 13 set - Term Polarity Control */
+ /* bit 14 set - BIOS Enable */
+ /* bit 15 set - Big Endian Mode */
+ ushort cfg_msw; /* 01 unused */
+ ushort disc_enable; /* 02 disconnect enable */
+ ushort wdtr_able; /* 03 Wide DTR able */
+ ushort sdtr_able; /* 04 Synchronous DTR able */
+ ushort start_motor; /* 05 send start up motor */
+ ushort tagqng_able; /* 06 tag queuing able */
+ ushort bios_scan; /* 07 BIOS device control */
+ ushort scam_tolerant; /* 08 no scam */
+
+ uchar adapter_scsi_id; /* 09 Host Adapter ID */
+ uchar bios_boot_delay; /* power up wait */
+
+ uchar scsi_reset_delay; /* 10 reset delay */
+ uchar bios_id_lun; /* first boot device scsi id & lun */
+ /* high nibble is lun */
+ /* low nibble is scsi id */
+
+ uchar termination; /* 11 0 - automatic */
+ /* 1 - low off / high off */
+ /* 2 - low off / high on */
+ /* 3 - low on / high on */
+ /* There is no low on / high off */
+
+ uchar reserved1; /* reserved byte (not used) */
+
+ ushort bios_ctrl; /* 12 BIOS control bits */
+ /* bit 0 BIOS don't act as initiator. */
+ /* bit 1 BIOS > 1 GB support */
+ /* bit 2 BIOS > 2 Disk Support */
+ /* bit 3 BIOS don't support removables */
+ /* bit 4 BIOS support bootable CD */
+ /* bit 5 BIOS scan enabled */
+ /* bit 6 BIOS support multiple LUNs */
+ /* bit 7 BIOS display of message */
+ /* bit 8 SCAM disabled */
+ /* bit 9 Reset SCSI bus during init. */
+ /* bit 10 */
+ /* bit 11 No verbose initialization. */
+ /* bit 12 SCSI parity enabled */
+ /* bit 13 */
+ /* bit 14 */
+ /* bit 15 */
+ ushort ultra_able; /* 13 ULTRA speed able */
+ ushort reserved2; /* 14 reserved */
+ uchar max_host_qng; /* 15 maximum host queuing */
+ uchar max_dvc_qng; /* maximum per device queuing */
+ ushort dvc_cntl; /* 16 control bit for driver */
+ ushort bug_fix; /* 17 control bit for bug fix */
+ ushort serial_number_word1; /* 18 Board serial number word 1 */
+ ushort serial_number_word2; /* 19 Board serial number word 2 */
+ ushort serial_number_word3; /* 20 Board serial number word 3 */
+ ushort check_sum; /* 21 EEP check sum */
+ uchar oem_name[16]; /* 22 OEM name */
+ ushort dvc_err_code; /* 30 last device driver error code */
+ ushort adv_err_code; /* 31 last uc and Adv Lib error code */
+ ushort adv_err_addr; /* 32 last uc error address */
+ ushort saved_dvc_err_code; /* 33 saved last dev. driver error code */
+ ushort saved_adv_err_code; /* 34 saved last uc and Adv Lib error code */
+ ushort saved_adv_err_addr; /* 35 saved last uc error address */
+ ushort num_of_err; /* 36 number of error */
+} ADVEEP_3550_CONFIG;
+
+typedef struct adveep_38C0800_config {
+ /* Word Offset, Description */
+
+ ushort cfg_lsw; /* 00 power up initialization */
+ /* bit 13 set - Load CIS */
+ /* bit 14 set - BIOS Enable */
+ /* bit 15 set - Big Endian Mode */
+ ushort cfg_msw; /* 01 unused */
+ ushort disc_enable; /* 02 disconnect enable */
+ ushort wdtr_able; /* 03 Wide DTR able */
+ ushort sdtr_speed1; /* 04 SDTR Speed TID 0-3 */
+ ushort start_motor; /* 05 send start up motor */
+ ushort tagqng_able; /* 06 tag queuing able */
+ ushort bios_scan; /* 07 BIOS device control */
+ ushort scam_tolerant; /* 08 no scam */
+
+ uchar adapter_scsi_id; /* 09 Host Adapter ID */
+ uchar bios_boot_delay; /* power up wait */
+
+ uchar scsi_reset_delay; /* 10 reset delay */
+ uchar bios_id_lun; /* first boot device scsi id & lun */
+ /* high nibble is lun */
+ /* low nibble is scsi id */
+
+ uchar termination_se; /* 11 0 - automatic */
+ /* 1 - low off / high off */
+ /* 2 - low off / high on */
+ /* 3 - low on / high on */
+ /* There is no low on / high off */
+
+ uchar termination_lvd; /* 11 0 - automatic */
+ /* 1 - low off / high off */
+ /* 2 - low off / high on */
+ /* 3 - low on / high on */
+ /* There is no low on / high off */
+
+ ushort bios_ctrl; /* 12 BIOS control bits */
+ /* bit 0 BIOS don't act as initiator. */
+ /* bit 1 BIOS > 1 GB support */
+ /* bit 2 BIOS > 2 Disk Support */
+ /* bit 3 BIOS don't support removables */
+ /* bit 4 BIOS support bootable CD */
+ /* bit 5 BIOS scan enabled */
+ /* bit 6 BIOS support multiple LUNs */
+ /* bit 7 BIOS display of message */
+ /* bit 8 SCAM disabled */
+ /* bit 9 Reset SCSI bus during init. */
+ /* bit 10 */
+ /* bit 11 No verbose initialization. */
+ /* bit 12 SCSI parity enabled */
+ /* bit 13 */
+ /* bit 14 */
+ /* bit 15 */
+ ushort sdtr_speed2; /* 13 SDTR speed TID 4-7 */
+ ushort sdtr_speed3; /* 14 SDTR speed TID 8-11 */
+ uchar max_host_qng; /* 15 maximum host queueing */
+ uchar max_dvc_qng; /* maximum per device queuing */
+ ushort dvc_cntl; /* 16 control bit for driver */
+ ushort sdtr_speed4; /* 17 SDTR speed 4 TID 12-15 */
+ ushort serial_number_word1; /* 18 Board serial number word 1 */
+ ushort serial_number_word2; /* 19 Board serial number word 2 */
+ ushort serial_number_word3; /* 20 Board serial number word 3 */
+ ushort check_sum; /* 21 EEP check sum */
+ uchar oem_name[16]; /* 22 OEM name */
+ ushort dvc_err_code; /* 30 last device driver error code */
+ ushort adv_err_code; /* 31 last uc and Adv Lib error code */
+ ushort adv_err_addr; /* 32 last uc error address */
+ ushort saved_dvc_err_code; /* 33 saved last dev. driver error code */
+ ushort saved_adv_err_code; /* 34 saved last uc and Adv Lib error code */
+ ushort saved_adv_err_addr; /* 35 saved last uc error address */
+ ushort reserved36; /* 36 reserved */
+ ushort reserved37; /* 37 reserved */
+ ushort reserved38; /* 38 reserved */
+ ushort reserved39; /* 39 reserved */
+ ushort reserved40; /* 40 reserved */
+ ushort reserved41; /* 41 reserved */
+ ushort reserved42; /* 42 reserved */
+ ushort reserved43; /* 43 reserved */
+ ushort reserved44; /* 44 reserved */
+ ushort reserved45; /* 45 reserved */
+ ushort reserved46; /* 46 reserved */
+ ushort reserved47; /* 47 reserved */
+ ushort reserved48; /* 48 reserved */
+ ushort reserved49; /* 49 reserved */
+ ushort reserved50; /* 50 reserved */
+ ushort reserved51; /* 51 reserved */
+ ushort reserved52; /* 52 reserved */
+ ushort reserved53; /* 53 reserved */
+ ushort reserved54; /* 54 reserved */
+ ushort reserved55; /* 55 reserved */
+ ushort cisptr_lsw; /* 56 CIS PTR LSW */
+ ushort cisprt_msw; /* 57 CIS PTR MSW */
+ ushort subsysvid; /* 58 SubSystem Vendor ID */
+ ushort subsysid; /* 59 SubSystem ID */
+ ushort reserved60; /* 60 reserved */
+ ushort reserved61; /* 61 reserved */
+ ushort reserved62; /* 62 reserved */
+ ushort reserved63; /* 63 reserved */
+} ADVEEP_38C0800_CONFIG;
+
+typedef struct adveep_38C1600_config {
+ /* Word Offset, Description */
+
+ ushort cfg_lsw; /* 00 power up initialization */
+ /* bit 11 set - Func. 0 INTB, Func. 1 INTA */
+ /* clear - Func. 0 INTA, Func. 1 INTB */
+ /* bit 13 set - Load CIS */
+ /* bit 14 set - BIOS Enable */
+ /* bit 15 set - Big Endian Mode */
+ ushort cfg_msw; /* 01 unused */
+ ushort disc_enable; /* 02 disconnect enable */
+ ushort wdtr_able; /* 03 Wide DTR able */
+ ushort sdtr_speed1; /* 04 SDTR Speed TID 0-3 */
+ ushort start_motor; /* 05 send start up motor */
+ ushort tagqng_able; /* 06 tag queuing able */
+ ushort bios_scan; /* 07 BIOS device control */
+ ushort scam_tolerant; /* 08 no scam */
+
+ uchar adapter_scsi_id; /* 09 Host Adapter ID */
+ uchar bios_boot_delay; /* power up wait */
+
+ uchar scsi_reset_delay; /* 10 reset delay */
+ uchar bios_id_lun; /* first boot device scsi id & lun */
+ /* high nibble is lun */
+ /* low nibble is scsi id */
+
+ uchar termination_se; /* 11 0 - automatic */
+ /* 1 - low off / high off */
+ /* 2 - low off / high on */
+ /* 3 - low on / high on */
+ /* There is no low on / high off */
+
+ uchar termination_lvd; /* 11 0 - automatic */
+ /* 1 - low off / high off */
+ /* 2 - low off / high on */
+ /* 3 - low on / high on */
+ /* There is no low on / high off */
+
+ ushort bios_ctrl; /* 12 BIOS control bits */
+ /* bit 0 BIOS don't act as initiator. */
+ /* bit 1 BIOS > 1 GB support */
+ /* bit 2 BIOS > 2 Disk Support */
+ /* bit 3 BIOS don't support removables */
+ /* bit 4 BIOS support bootable CD */
+ /* bit 5 BIOS scan enabled */
+ /* bit 6 BIOS support multiple LUNs */
+ /* bit 7 BIOS display of message */
+ /* bit 8 SCAM disabled */
+ /* bit 9 Reset SCSI bus during init. */
+ /* bit 10 Basic Integrity Checking disabled */
+ /* bit 11 No verbose initialization. */
+ /* bit 12 SCSI parity enabled */
+ /* bit 13 AIPP (Asyn. Info. Ph. Prot.) dis. */
+ /* bit 14 */
+ /* bit 15 */
+ ushort sdtr_speed2; /* 13 SDTR speed TID 4-7 */
+ ushort sdtr_speed3; /* 14 SDTR speed TID 8-11 */
+ uchar max_host_qng; /* 15 maximum host queueing */
+ uchar max_dvc_qng; /* maximum per device queuing */
+ ushort dvc_cntl; /* 16 control bit for driver */
+ ushort sdtr_speed4; /* 17 SDTR speed 4 TID 12-15 */
+ ushort serial_number_word1; /* 18 Board serial number word 1 */
+ ushort serial_number_word2; /* 19 Board serial number word 2 */
+ ushort serial_number_word3; /* 20 Board serial number word 3 */
+ ushort check_sum; /* 21 EEP check sum */
+ uchar oem_name[16]; /* 22 OEM name */
+ ushort dvc_err_code; /* 30 last device driver error code */
+ ushort adv_err_code; /* 31 last uc and Adv Lib error code */
+ ushort adv_err_addr; /* 32 last uc error address */
+ ushort saved_dvc_err_code; /* 33 saved last dev. driver error code */
+ ushort saved_adv_err_code; /* 34 saved last uc and Adv Lib error code */
+ ushort saved_adv_err_addr; /* 35 saved last uc error address */
+ ushort reserved36; /* 36 reserved */
+ ushort reserved37; /* 37 reserved */
+ ushort reserved38; /* 38 reserved */
+ ushort reserved39; /* 39 reserved */
+ ushort reserved40; /* 40 reserved */
+ ushort reserved41; /* 41 reserved */
+ ushort reserved42; /* 42 reserved */
+ ushort reserved43; /* 43 reserved */
+ ushort reserved44; /* 44 reserved */
+ ushort reserved45; /* 45 reserved */
+ ushort reserved46; /* 46 reserved */
+ ushort reserved47; /* 47 reserved */
+ ushort reserved48; /* 48 reserved */
+ ushort reserved49; /* 49 reserved */
+ ushort reserved50; /* 50 reserved */
+ ushort reserved51; /* 51 reserved */
+ ushort reserved52; /* 52 reserved */
+ ushort reserved53; /* 53 reserved */
+ ushort reserved54; /* 54 reserved */
+ ushort reserved55; /* 55 reserved */
+ ushort cisptr_lsw; /* 56 CIS PTR LSW */
+ ushort cisprt_msw; /* 57 CIS PTR MSW */
+ ushort subsysvid; /* 58 SubSystem Vendor ID */
+ ushort subsysid; /* 59 SubSystem ID */
+ ushort reserved60; /* 60 reserved */
+ ushort reserved61; /* 61 reserved */
+ ushort reserved62; /* 62 reserved */
+ ushort reserved63; /* 63 reserved */
+} ADVEEP_38C1600_CONFIG;
+
+/*
+ * EEPROM Commands
+ */
+#define ASC_EEP_CMD_DONE 0x0200
+
+/* bios_ctrl */
+#define BIOS_CTRL_BIOS 0x0001
+#define BIOS_CTRL_EXTENDED_XLAT 0x0002
+#define BIOS_CTRL_GT_2_DISK 0x0004
+#define BIOS_CTRL_BIOS_REMOVABLE 0x0008
+#define BIOS_CTRL_BOOTABLE_CD 0x0010
+#define BIOS_CTRL_MULTIPLE_LUN 0x0040
+#define BIOS_CTRL_DISPLAY_MSG 0x0080
+#define BIOS_CTRL_NO_SCAM 0x0100
+#define BIOS_CTRL_RESET_SCSI_BUS 0x0200
+#define BIOS_CTRL_INIT_VERBOSE 0x0800
+#define BIOS_CTRL_SCSI_PARITY 0x1000
+#define BIOS_CTRL_AIPP_DIS 0x2000
+
+#define ADV_3550_MEMSIZE 0x2000 /* 8 KB Internal Memory */
+
+#define ADV_38C0800_MEMSIZE 0x4000 /* 16 KB Internal Memory */
+
+/*
+ * XXX - Since ASC38C1600 Rev.3 has a local RAM failure issue, there is
+ * a special 16K Adv Library and Microcode version. After the issue is
+ * resolved, should restore 32K support.
+ *
+ * #define ADV_38C1600_MEMSIZE 0x8000L * 32 KB Internal Memory *
+ */
+#define ADV_38C1600_MEMSIZE 0x4000 /* 16 KB Internal Memory */
+
+/*
+ * Byte I/O register address from base of 'iop_base'.
+ */
+#define IOPB_INTR_STATUS_REG 0x00
+#define IOPB_CHIP_ID_1 0x01
+#define IOPB_INTR_ENABLES 0x02
+#define IOPB_CHIP_TYPE_REV 0x03
+#define IOPB_RES_ADDR_4 0x04
+#define IOPB_RES_ADDR_5 0x05
+#define IOPB_RAM_DATA 0x06
+#define IOPB_RES_ADDR_7 0x07
+#define IOPB_FLAG_REG 0x08
+#define IOPB_RES_ADDR_9 0x09
+#define IOPB_RISC_CSR 0x0A
+#define IOPB_RES_ADDR_B 0x0B
+#define IOPB_RES_ADDR_C 0x0C
+#define IOPB_RES_ADDR_D 0x0D
+#define IOPB_SOFT_OVER_WR 0x0E
+#define IOPB_RES_ADDR_F 0x0F
+#define IOPB_MEM_CFG 0x10
+#define IOPB_RES_ADDR_11 0x11
+#define IOPB_GPIO_DATA 0x12
+#define IOPB_RES_ADDR_13 0x13
+#define IOPB_FLASH_PAGE 0x14
+#define IOPB_RES_ADDR_15 0x15
+#define IOPB_GPIO_CNTL 0x16
+#define IOPB_RES_ADDR_17 0x17
+#define IOPB_FLASH_DATA 0x18
+#define IOPB_RES_ADDR_19 0x19
+#define IOPB_RES_ADDR_1A 0x1A
+#define IOPB_RES_ADDR_1B 0x1B
+#define IOPB_RES_ADDR_1C 0x1C
+#define IOPB_RES_ADDR_1D 0x1D
+#define IOPB_RES_ADDR_1E 0x1E
+#define IOPB_RES_ADDR_1F 0x1F
+#define IOPB_DMA_CFG0 0x20
+#define IOPB_DMA_CFG1 0x21
+#define IOPB_TICKLE 0x22
+#define IOPB_DMA_REG_WR 0x23
+#define IOPB_SDMA_STATUS 0x24
+#define IOPB_SCSI_BYTE_CNT 0x25
+#define IOPB_HOST_BYTE_CNT 0x26
+#define IOPB_BYTE_LEFT_TO_XFER 0x27
+#define IOPB_BYTE_TO_XFER_0 0x28
+#define IOPB_BYTE_TO_XFER_1 0x29
+#define IOPB_BYTE_TO_XFER_2 0x2A
+#define IOPB_BYTE_TO_XFER_3 0x2B
+#define IOPB_ACC_GRP 0x2C
+#define IOPB_RES_ADDR_2D 0x2D
+#define IOPB_DEV_ID 0x2E
+#define IOPB_RES_ADDR_2F 0x2F
+#define IOPB_SCSI_DATA 0x30
+#define IOPB_RES_ADDR_31 0x31
+#define IOPB_RES_ADDR_32 0x32
+#define IOPB_SCSI_DATA_HSHK 0x33
+#define IOPB_SCSI_CTRL 0x34
+#define IOPB_RES_ADDR_35 0x35
+#define IOPB_RES_ADDR_36 0x36
+#define IOPB_RES_ADDR_37 0x37
+#define IOPB_RAM_BIST 0x38
+#define IOPB_PLL_TEST 0x39
+#define IOPB_PCI_INT_CFG 0x3A
+#define IOPB_RES_ADDR_3B 0x3B
+#define IOPB_RFIFO_CNT 0x3C
+#define IOPB_RES_ADDR_3D 0x3D
+#define IOPB_RES_ADDR_3E 0x3E
+#define IOPB_RES_ADDR_3F 0x3F
+
+/*
+ * Word I/O register address from base of 'iop_base'.
+ */
+#define IOPW_CHIP_ID_0 0x00 /* CID0 */
+#define IOPW_CTRL_REG 0x02 /* CC */
+#define IOPW_RAM_ADDR 0x04 /* LA */
+#define IOPW_RAM_DATA 0x06 /* LD */
+#define IOPW_RES_ADDR_08 0x08
+#define IOPW_RISC_CSR 0x0A /* CSR */
+#define IOPW_SCSI_CFG0 0x0C /* CFG0 */
+#define IOPW_SCSI_CFG1 0x0E /* CFG1 */
+#define IOPW_RES_ADDR_10 0x10
+#define IOPW_SEL_MASK 0x12 /* SM */
+#define IOPW_RES_ADDR_14 0x14
+#define IOPW_FLASH_ADDR 0x16 /* FA */
+#define IOPW_RES_ADDR_18 0x18
+#define IOPW_EE_CMD 0x1A /* EC */
+#define IOPW_EE_DATA 0x1C /* ED */
+#define IOPW_SFIFO_CNT 0x1E /* SFC */
+#define IOPW_RES_ADDR_20 0x20
+#define IOPW_Q_BASE 0x22 /* QB */
+#define IOPW_QP 0x24 /* QP */
+#define IOPW_IX 0x26 /* IX */
+#define IOPW_SP 0x28 /* SP */
+#define IOPW_PC 0x2A /* PC */
+#define IOPW_RES_ADDR_2C 0x2C
+#define IOPW_RES_ADDR_2E 0x2E
+#define IOPW_SCSI_DATA 0x30 /* SD */
+#define IOPW_SCSI_DATA_HSHK 0x32 /* SDH */
+#define IOPW_SCSI_CTRL 0x34 /* SC */
+#define IOPW_HSHK_CFG 0x36 /* HCFG */
+#define IOPW_SXFR_STATUS 0x36 /* SXS */
+#define IOPW_SXFR_CNTL 0x38 /* SXL */
+#define IOPW_SXFR_CNTH 0x3A /* SXH */
+#define IOPW_RES_ADDR_3C 0x3C
+#define IOPW_RFIFO_DATA 0x3E /* RFD */
+
+/*
+ * Doubleword I/O register address from base of 'iop_base'.
+ */
+#define IOPDW_RES_ADDR_0 0x00
+#define IOPDW_RAM_DATA 0x04
+#define IOPDW_RES_ADDR_8 0x08
+#define IOPDW_RES_ADDR_C 0x0C
+#define IOPDW_RES_ADDR_10 0x10
+#define IOPDW_COMMA 0x14
+#define IOPDW_COMMB 0x18
+#define IOPDW_RES_ADDR_1C 0x1C
+#define IOPDW_SDMA_ADDR0 0x20
+#define IOPDW_SDMA_ADDR1 0x24
+#define IOPDW_SDMA_COUNT 0x28
+#define IOPDW_SDMA_ERROR 0x2C
+#define IOPDW_RDMA_ADDR0 0x30
+#define IOPDW_RDMA_ADDR1 0x34
+#define IOPDW_RDMA_COUNT 0x38
+#define IOPDW_RDMA_ERROR 0x3C
+
+#define ADV_CHIP_ID_BYTE 0x25
+#define ADV_CHIP_ID_WORD 0x04C1
+
+#define ADV_INTR_ENABLE_HOST_INTR 0x01
+#define ADV_INTR_ENABLE_SEL_INTR 0x02
+#define ADV_INTR_ENABLE_DPR_INTR 0x04
+#define ADV_INTR_ENABLE_RTA_INTR 0x08
+#define ADV_INTR_ENABLE_RMA_INTR 0x10
+#define ADV_INTR_ENABLE_RST_INTR 0x20
+#define ADV_INTR_ENABLE_DPE_INTR 0x40
+#define ADV_INTR_ENABLE_GLOBAL_INTR 0x80
+
+#define ADV_INTR_STATUS_INTRA 0x01
+#define ADV_INTR_STATUS_INTRB 0x02
+#define ADV_INTR_STATUS_INTRC 0x04
+
+#define ADV_RISC_CSR_STOP (0x0000)
+#define ADV_RISC_TEST_COND (0x2000)
+#define ADV_RISC_CSR_RUN (0x4000)
+#define ADV_RISC_CSR_SINGLE_STEP (0x8000)
+
+#define ADV_CTRL_REG_HOST_INTR 0x0100
+#define ADV_CTRL_REG_SEL_INTR 0x0200
+#define ADV_CTRL_REG_DPR_INTR 0x0400
+#define ADV_CTRL_REG_RTA_INTR 0x0800
+#define ADV_CTRL_REG_RMA_INTR 0x1000
+#define ADV_CTRL_REG_RES_BIT14 0x2000
+#define ADV_CTRL_REG_DPE_INTR 0x4000
+#define ADV_CTRL_REG_POWER_DONE 0x8000
+#define ADV_CTRL_REG_ANY_INTR 0xFF00
+
+#define ADV_CTRL_REG_CMD_RESET 0x00C6
+#define ADV_CTRL_REG_CMD_WR_IO_REG 0x00C5
+#define ADV_CTRL_REG_CMD_RD_IO_REG 0x00C4
+#define ADV_CTRL_REG_CMD_WR_PCI_CFG_SPACE 0x00C3
+#define ADV_CTRL_REG_CMD_RD_PCI_CFG_SPACE 0x00C2
+
+#define ADV_TICKLE_NOP 0x00
+#define ADV_TICKLE_A 0x01
+#define ADV_TICKLE_B 0x02
+#define ADV_TICKLE_C 0x03
+
+#define AdvIsIntPending(port) \
+ (AdvReadWordRegister(port, IOPW_CTRL_REG) & ADV_CTRL_REG_HOST_INTR)
+
+/*
+ * SCSI_CFG0 Register bit definitions
+ */
+#define TIMER_MODEAB 0xC000 /* Watchdog, Second, and Select. Timer Ctrl. */
+#define PARITY_EN 0x2000 /* Enable SCSI Parity Error detection */
+#define EVEN_PARITY 0x1000 /* Select Even Parity */
+#define WD_LONG 0x0800 /* Watchdog Interval, 1: 57 min, 0: 13 sec */
+#define QUEUE_128 0x0400 /* Queue Size, 1: 128 byte, 0: 64 byte */
+#define PRIM_MODE 0x0100 /* Primitive SCSI mode */
+#define SCAM_EN 0x0080 /* Enable SCAM selection */
+#define SEL_TMO_LONG 0x0040 /* Sel/Resel Timeout, 1: 400 ms, 0: 1.6 ms */
+#define CFRM_ID 0x0020 /* SCAM id sel. confirm., 1: fast, 0: 6.4 ms */
+#define OUR_ID_EN 0x0010 /* Enable OUR_ID bits */
+#define OUR_ID 0x000F /* SCSI ID */
+
+/*
+ * SCSI_CFG1 Register bit definitions
+ */
+#define BIG_ENDIAN 0x8000 /* Enable Big Endian Mode MIO:15, EEP:15 */
+#define TERM_POL 0x2000 /* Terminator Polarity Ctrl. MIO:13, EEP:13 */
+#define SLEW_RATE 0x1000 /* SCSI output buffer slew rate */
+#define FILTER_SEL 0x0C00 /* Filter Period Selection */
+#define FLTR_DISABLE 0x0000 /* Input Filtering Disabled */
+#define FLTR_11_TO_20NS 0x0800 /* Input Filtering 11ns to 20ns */
+#define FLTR_21_TO_39NS 0x0C00 /* Input Filtering 21ns to 39ns */
+#define ACTIVE_DBL 0x0200 /* Disable Active Negation */
+#define DIFF_MODE 0x0100 /* SCSI differential Mode (Read-Only) */
+#define DIFF_SENSE 0x0080 /* 1: No SE cables, 0: SE cable (Read-Only) */
+#define TERM_CTL_SEL 0x0040 /* Enable TERM_CTL_H and TERM_CTL_L */
+#define TERM_CTL 0x0030 /* External SCSI Termination Bits */
+#define TERM_CTL_H 0x0020 /* Enable External SCSI Upper Termination */
+#define TERM_CTL_L 0x0010 /* Enable External SCSI Lower Termination */
+#define CABLE_DETECT 0x000F /* External SCSI Cable Connection Status */
+
+/*
+ * Addendum for ASC-38C0800 Chip
+ *
+ * The ASC-38C1600 Chip uses the same definitions except that the
+ * bus mode override bits [12:10] have been moved to byte register
+ * offset 0xE (IOPB_SOFT_OVER_WR) bits [12:10]. The [12:10] bits in
+ * SCSI_CFG1 are read-only and always available. Bit 14 (DIS_TERM_DRV)
+ * is not needed. The [12:10] bits in IOPB_SOFT_OVER_WR are write-only.
+ * Also each ASC-38C1600 function or channel uses only cable bits [5:4]
+ * and [1:0]. Bits [14], [7:6], [3:2] are unused.
+ */
+#define DIS_TERM_DRV 0x4000 /* 1: Read c_det[3:0], 0: cannot read */
+#define HVD_LVD_SE 0x1C00 /* Device Detect Bits */
+#define HVD 0x1000 /* HVD Device Detect */
+#define LVD 0x0800 /* LVD Device Detect */
+#define SE 0x0400 /* SE Device Detect */
+#define TERM_LVD 0x00C0 /* LVD Termination Bits */
+#define TERM_LVD_HI 0x0080 /* Enable LVD Upper Termination */
+#define TERM_LVD_LO 0x0040 /* Enable LVD Lower Termination */
+#define TERM_SE 0x0030 /* SE Termination Bits */
+#define TERM_SE_HI 0x0020 /* Enable SE Upper Termination */
+#define TERM_SE_LO 0x0010 /* Enable SE Lower Termination */
+#define C_DET_LVD 0x000C /* LVD Cable Detect Bits */
+#define C_DET3 0x0008 /* Cable Detect for LVD External Wide */
+#define C_DET2 0x0004 /* Cable Detect for LVD Internal Wide */
+#define C_DET_SE 0x0003 /* SE Cable Detect Bits */
+#define C_DET1 0x0002 /* Cable Detect for SE Internal Wide */
+#define C_DET0 0x0001 /* Cable Detect for SE Internal Narrow */
+
+#define CABLE_ILLEGAL_A 0x7
+ /* x 0 0 0 | on on | Illegal (all 3 connectors are used) */
+
+#define CABLE_ILLEGAL_B 0xB
+ /* 0 x 0 0 | on on | Illegal (all 3 connectors are used) */
+
+/*
+ * MEM_CFG Register bit definitions
+ */
+#define BIOS_EN 0x40 /* BIOS Enable MIO:14,EEP:14 */
+#define FAST_EE_CLK 0x20 /* Diagnostic Bit */
+#define RAM_SZ 0x1C /* Specify size of RAM to RISC */
+#define RAM_SZ_2KB 0x00 /* 2 KB */
+#define RAM_SZ_4KB 0x04 /* 4 KB */
+#define RAM_SZ_8KB 0x08 /* 8 KB */
+#define RAM_SZ_16KB 0x0C /* 16 KB */
+#define RAM_SZ_32KB 0x10 /* 32 KB */
+#define RAM_SZ_64KB 0x14 /* 64 KB */
+
+/*
+ * DMA_CFG0 Register bit definitions
+ *
+ * This register is only accessible to the host.
+ */
+#define BC_THRESH_ENB 0x80 /* PCI DMA Start Conditions */
+#define FIFO_THRESH 0x70 /* PCI DMA FIFO Threshold */
+#define FIFO_THRESH_16B 0x00 /* 16 bytes */
+#define FIFO_THRESH_32B 0x20 /* 32 bytes */
+#define FIFO_THRESH_48B 0x30 /* 48 bytes */
+#define FIFO_THRESH_64B 0x40 /* 64 bytes */
+#define FIFO_THRESH_80B 0x50 /* 80 bytes (default) */
+#define FIFO_THRESH_96B 0x60 /* 96 bytes */
+#define FIFO_THRESH_112B 0x70 /* 112 bytes */
+#define START_CTL 0x0C /* DMA start conditions */
+#define START_CTL_TH 0x00 /* Wait threshold level (default) */
+#define START_CTL_ID 0x04 /* Wait SDMA/SBUS idle */
+#define START_CTL_THID 0x08 /* Wait threshold and SDMA/SBUS idle */
+#define START_CTL_EMFU 0x0C /* Wait SDMA FIFO empty/full */
+#define READ_CMD 0x03 /* Memory Read Method */
+#define READ_CMD_MR 0x00 /* Memory Read */
+#define READ_CMD_MRL 0x02 /* Memory Read Long */
+#define READ_CMD_MRM 0x03 /* Memory Read Multiple (default) */
+
+/*
+ * ASC-38C0800 RAM BIST Register bit definitions
+ */
+#define RAM_TEST_MODE 0x80
+#define PRE_TEST_MODE 0x40
+#define NORMAL_MODE 0x00
+#define RAM_TEST_DONE 0x10
+#define RAM_TEST_STATUS 0x0F
+#define RAM_TEST_HOST_ERROR 0x08
+#define RAM_TEST_INTRAM_ERROR 0x04
+#define RAM_TEST_RISC_ERROR 0x02
+#define RAM_TEST_SCSI_ERROR 0x01
+#define RAM_TEST_SUCCESS 0x00
+#define PRE_TEST_VALUE 0x05
+#define NORMAL_VALUE 0x00
+
+/*
+ * ASC38C1600 Definitions
+ *
+ * IOPB_PCI_INT_CFG Bit Field Definitions
+ */
+
+#define INTAB_LD 0x80 /* Value loaded from EEPROM Bit 11. */
+
+/*
+ * Bit 1 can be set to change the interrupt for the Function to operate in
+ * Totem Pole mode. By default Bit 1 is 0 and the interrupt operates in
+ * Open Drain mode. Both functions of the ASC38C1600 must be set to the same
+ * mode, otherwise the operating mode is undefined.
+ */
+#define TOTEMPOLE 0x02
+
+/*
+ * Bit 0 can be used to change the Int Pin for the Function. The value is
+ * 0 by default for both Functions with Function 0 using INT A and Function
+ * B using INT B. For Function 0 if set, INT B is used. For Function 1 if set,
+ * INT A is used.
+ *
+ * EEPROM Word 0 Bit 11 for each Function may change the initial Int Pin
+ * value specified in the PCI Configuration Space.
+ */
+#define INTAB 0x01
+
+/*
+ * Adv Library Status Definitions
+ */
+#define ADV_TRUE 1
+#define ADV_FALSE 0
+#define ADV_SUCCESS 1
+#define ADV_BUSY 0
+#define ADV_ERROR (-1)
+
+/*
+ * ADV_DVC_VAR 'warn_code' values
+ */
+#define ASC_WARN_BUSRESET_ERROR 0x0001 /* SCSI Bus Reset error */
+#define ASC_WARN_EEPROM_CHKSUM 0x0002 /* EEP check sum error */
+#define ASC_WARN_EEPROM_TERMINATION 0x0004 /* EEP termination bad field */
+#define ASC_WARN_ERROR 0xFFFF /* ADV_ERROR return */
+
+#define ADV_MAX_TID 15 /* max. target identifier */
+#define ADV_MAX_LUN 7 /* max. logical unit number */
+
+/*
+ * Fixed locations of microcode operating variables.
+ */
+#define ASC_MC_CODE_BEGIN_ADDR 0x0028 /* microcode start address */
+#define ASC_MC_CODE_END_ADDR 0x002A /* microcode end address */
+#define ASC_MC_CODE_CHK_SUM 0x002C /* microcode code checksum */
+#define ASC_MC_VERSION_DATE 0x0038 /* microcode version */
+#define ASC_MC_VERSION_NUM 0x003A /* microcode number */
+#define ASC_MC_BIOSMEM 0x0040 /* BIOS RISC Memory Start */
+#define ASC_MC_BIOSLEN 0x0050 /* BIOS RISC Memory Length */
+#define ASC_MC_BIOS_SIGNATURE 0x0058 /* BIOS Signature 0x55AA */
+#define ASC_MC_BIOS_VERSION 0x005A /* BIOS Version (2 bytes) */
+#define ASC_MC_SDTR_SPEED1 0x0090 /* SDTR Speed for TID 0-3 */
+#define ASC_MC_SDTR_SPEED2 0x0092 /* SDTR Speed for TID 4-7 */
+#define ASC_MC_SDTR_SPEED3 0x0094 /* SDTR Speed for TID 8-11 */
+#define ASC_MC_SDTR_SPEED4 0x0096 /* SDTR Speed for TID 12-15 */
+#define ASC_MC_CHIP_TYPE 0x009A
+#define ASC_MC_INTRB_CODE 0x009B
+#define ASC_MC_WDTR_ABLE 0x009C
+#define ASC_MC_SDTR_ABLE 0x009E
+#define ASC_MC_TAGQNG_ABLE 0x00A0
+#define ASC_MC_DISC_ENABLE 0x00A2
+#define ASC_MC_IDLE_CMD_STATUS 0x00A4
+#define ASC_MC_IDLE_CMD 0x00A6
+#define ASC_MC_IDLE_CMD_PARAMETER 0x00A8
+#define ASC_MC_DEFAULT_SCSI_CFG0 0x00AC
+#define ASC_MC_DEFAULT_SCSI_CFG1 0x00AE
+#define ASC_MC_DEFAULT_MEM_CFG 0x00B0
+#define ASC_MC_DEFAULT_SEL_MASK 0x00B2
+#define ASC_MC_SDTR_DONE 0x00B6
+#define ASC_MC_NUMBER_OF_QUEUED_CMD 0x00C0
+#define ASC_MC_NUMBER_OF_MAX_CMD 0x00D0
+#define ASC_MC_DEVICE_HSHK_CFG_TABLE 0x0100
+#define ASC_MC_CONTROL_FLAG 0x0122 /* Microcode control flag. */
+#define ASC_MC_WDTR_DONE 0x0124
+#define ASC_MC_CAM_MODE_MASK 0x015E /* CAM mode TID bitmask. */
+#define ASC_MC_ICQ 0x0160
+#define ASC_MC_IRQ 0x0164
+#define ASC_MC_PPR_ABLE 0x017A
+
+/*
+ * BIOS LRAM variable absolute offsets.
+ */
+#define BIOS_CODESEG 0x54
+#define BIOS_CODELEN 0x56
+#define BIOS_SIGNATURE 0x58
+#define BIOS_VERSION 0x5A
+
+/*
+ * Microcode Control Flags
+ *
+ * Flags set by the Adv Library in RISC variable 'control_flag' (0x122)
+ * and handled by the microcode.
+ */
+#define CONTROL_FLAG_IGNORE_PERR 0x0001 /* Ignore DMA Parity Errors */
+#define CONTROL_FLAG_ENABLE_AIPP 0x0002 /* Enabled AIPP checking. */
+
+/*
+ * ASC_MC_DEVICE_HSHK_CFG_TABLE microcode table or HSHK_CFG register format
+ */
+#define HSHK_CFG_WIDE_XFR 0x8000
+#define HSHK_CFG_RATE 0x0F00
+#define HSHK_CFG_OFFSET 0x001F
+
+#define ASC_DEF_MAX_HOST_QNG 0xFD /* Max. number of host commands (253) */
+#define ASC_DEF_MIN_HOST_QNG 0x10 /* Min. number of host commands (16) */
+#define ASC_DEF_MAX_DVC_QNG 0x3F /* Max. number commands per device (63) */
+#define ASC_DEF_MIN_DVC_QNG 0x04 /* Min. number commands per device (4) */
+
+#define ASC_QC_DATA_CHECK 0x01 /* Require ASC_QC_DATA_OUT set or clear. */
+#define ASC_QC_DATA_OUT 0x02 /* Data out DMA transfer. */
+#define ASC_QC_START_MOTOR 0x04 /* Send auto-start motor before request. */
+#define ASC_QC_NO_OVERRUN 0x08 /* Don't report overrun. */
+#define ASC_QC_FREEZE_TIDQ 0x10 /* Freeze TID queue after request. XXX TBD */
+
+#define ASC_QSC_NO_DISC 0x01 /* Don't allow disconnect for request. */
+#define ASC_QSC_NO_TAGMSG 0x02 /* Don't allow tag queuing for request. */
+#define ASC_QSC_NO_SYNC 0x04 /* Don't use Synch. transfer on request. */
+#define ASC_QSC_NO_WIDE 0x08 /* Don't use Wide transfer on request. */
+#define ASC_QSC_REDO_DTR 0x10 /* Renegotiate WDTR/SDTR before request. */
+/*
+ * Note: If a Tag Message is to be sent and neither ASC_QSC_HEAD_TAG or
+ * ASC_QSC_ORDERED_TAG is set, then a Simple Tag Message (0x20) is used.
+ */
+#define ASC_QSC_HEAD_TAG 0x40 /* Use Head Tag Message (0x21). */
+#define ASC_QSC_ORDERED_TAG 0x80 /* Use Ordered Tag Message (0x22). */
+
+/*
+ * All fields here are accessed by the board microcode and need to be
+ * little-endian.
+ */
+typedef struct adv_carr_t {
+ ADV_VADDR carr_va; /* Carrier Virtual Address */
+ ADV_PADDR carr_pa; /* Carrier Physical Address */
+ ADV_VADDR areq_vpa; /* ASC_SCSI_REQ_Q Virtual or Physical Address */
+ /*
+ * next_vpa [31:4] Carrier Virtual or Physical Next Pointer
+ *
+ * next_vpa [3:1] Reserved Bits
+ * next_vpa [0] Done Flag set in Response Queue.
+ */
+ ADV_VADDR next_vpa;
+} ADV_CARR_T;
+
+/*
+ * Mask used to eliminate low 4 bits of carrier 'next_vpa' field.
+ */
+#define ASC_NEXT_VPA_MASK 0xFFFFFFF0
+
+#define ASC_RQ_DONE 0x00000001
+#define ASC_RQ_GOOD 0x00000002
+#define ASC_CQ_STOPPER 0x00000000
+
+#define ASC_GET_CARRP(carrp) ((carrp) & ASC_NEXT_VPA_MASK)
+
+#define ADV_CARRIER_NUM_PAGE_CROSSING \
+ (((ADV_CARRIER_COUNT * sizeof(ADV_CARR_T)) + (PAGE_SIZE - 1))/PAGE_SIZE)
+
+#define ADV_CARRIER_BUFSIZE \
+ ((ADV_CARRIER_COUNT + ADV_CARRIER_NUM_PAGE_CROSSING) * sizeof(ADV_CARR_T))
+
+/*
+ * ASC_SCSI_REQ_Q 'a_flag' definitions
+ *
+ * The Adv Library should limit use to the lower nibble (4 bits) of
+ * a_flag. Drivers are free to use the upper nibble (4 bits) of a_flag.
+ */
+#define ADV_POLL_REQUEST 0x01 /* poll for request completion */
+#define ADV_SCSIQ_DONE 0x02 /* request done */
+#define ADV_DONT_RETRY 0x08 /* don't do retry */
+
+#define ADV_CHIP_ASC3550 0x01 /* Ultra-Wide IC */
+#define ADV_CHIP_ASC38C0800 0x02 /* Ultra2-Wide/LVD IC */
+#define ADV_CHIP_ASC38C1600 0x03 /* Ultra3-Wide/LVD2 IC */
+
+/*
+ * Adapter temporary configuration structure
+ *
+ * This structure can be discarded after initialization. Don't add
+ * fields here needed after initialization.
+ *
+ * Field naming convention:
+ *
+ * *_enable indicates the field enables or disables a feature. The
+ * value of the field is never reset.
+ */
+typedef struct adv_dvc_cfg {
+ ushort disc_enable; /* enable disconnection */
+ uchar chip_version; /* chip version */
+ uchar termination; /* Term. Ctrl. bits 6-5 of SCSI_CFG1 register */
+ ushort control_flag; /* Microcode Control Flag */
+ ushort mcode_date; /* Microcode date */
+ ushort mcode_version; /* Microcode version */
+ ushort serial1; /* EEPROM serial number word 1 */
+ ushort serial2; /* EEPROM serial number word 2 */
+ ushort serial3; /* EEPROM serial number word 3 */
+} ADV_DVC_CFG;
+
+struct adv_dvc_var;
+struct adv_scsi_req_q;
+
+typedef struct asc_sg_block {
+ uchar reserved1;
+ uchar reserved2;
+ uchar reserved3;
+ uchar sg_cnt; /* Valid entries in block. */
+ ADV_PADDR sg_ptr; /* Pointer to next sg block. */
+ struct {
+ ADV_PADDR sg_addr; /* SG element address. */
+ ADV_DCNT sg_count; /* SG element count. */
+ } sg_list[NO_OF_SG_PER_BLOCK];
+} ADV_SG_BLOCK;
+
+/*
+ * ADV_SCSI_REQ_Q - microcode request structure
+ *
+ * All fields in this structure up to byte 60 are used by the microcode.
+ * The microcode makes assumptions about the size and ordering of fields
+ * in this structure. Do not change the structure definition here without
+ * coordinating the change with the microcode.
+ *
+ * All fields accessed by microcode must be maintained in little_endian
+ * order.
+ */
+typedef struct adv_scsi_req_q {
+ uchar cntl; /* Ucode flags and state (ASC_MC_QC_*). */
+ uchar target_cmd;
+ uchar target_id; /* Device target identifier. */
+ uchar target_lun; /* Device target logical unit number. */
+ ADV_PADDR data_addr; /* Data buffer physical address. */
+ ADV_DCNT data_cnt; /* Data count. Ucode sets to residual. */
+ ADV_PADDR sense_addr;
+ ADV_PADDR carr_pa;
+ uchar mflag;
+ uchar sense_len;
+ uchar cdb_len; /* SCSI CDB length. Must <= 16 bytes. */
+ uchar scsi_cntl;
+ uchar done_status; /* Completion status. */
+ uchar scsi_status; /* SCSI status byte. */
+ uchar host_status; /* Ucode host status. */
+ uchar sg_working_ix;
+ uchar cdb[12]; /* SCSI CDB bytes 0-11. */
+ ADV_PADDR sg_real_addr; /* SG list physical address. */
+ ADV_PADDR scsiq_rptr;
+ uchar cdb16[4]; /* SCSI CDB bytes 12-15. */
+ ADV_VADDR scsiq_ptr;
+ ADV_VADDR carr_va;
+ /*
+ * End of microcode structure - 60 bytes. The rest of the structure
+ * is used by the Adv Library and ignored by the microcode.
+ */
+ ADV_VADDR srb_ptr;
+ ADV_SG_BLOCK *sg_list_ptr; /* SG list virtual address. */
+ char *vdata_addr; /* Data buffer virtual address. */
+ uchar a_flag;
+ uchar pad[2]; /* Pad out to a word boundary. */
+} ADV_SCSI_REQ_Q;
+
+/*
+ * The following two structures are used to process Wide Board requests.
+ *
+ * The ADV_SCSI_REQ_Q structure in adv_req_t is passed to the Adv Library
+ * and microcode with the ADV_SCSI_REQ_Q field 'srb_ptr' pointing to the
+ * adv_req_t. The adv_req_t structure 'cmndp' field in turn points to the
+ * Mid-Level SCSI request structure.
+ *
+ * Zero or more ADV_SG_BLOCK are used with each ADV_SCSI_REQ_Q. Each
+ * ADV_SG_BLOCK structure holds 15 scatter-gather elements. Under Linux
+ * up to 255 scatter-gather elements may be used per request or
+ * ADV_SCSI_REQ_Q.
+ *
+ * Both structures must be 32 byte aligned.
+ */
+typedef struct adv_sgblk {
+ ADV_SG_BLOCK sg_block; /* Sgblock structure. */
+ uchar align[32]; /* Sgblock structure padding. */
+ struct adv_sgblk *next_sgblkp; /* Next scatter-gather structure. */
+} adv_sgblk_t;
+
+typedef struct adv_req {
+ ADV_SCSI_REQ_Q scsi_req_q; /* Adv Library request structure. */
+ uchar align[32]; /* Request structure padding. */
+ struct scsi_cmnd *cmndp; /* Mid-Level SCSI command pointer. */
+ adv_sgblk_t *sgblkp; /* Adv Library scatter-gather pointer. */
+ struct adv_req *next_reqp; /* Next Request Structure. */
+} adv_req_t;
+
+/*
+ * Adapter operation variable structure.
+ *
+ * One structure is required per host adapter.
+ *
+ * Field naming convention:
+ *
+ * *_able indicates both whether a feature should be enabled or disabled
+ * and whether a device isi capable of the feature. At initialization
+ * this field may be set, but later if a device is found to be incapable
+ * of the feature, the field is cleared.
+ */
+typedef struct adv_dvc_var {
+ AdvPortAddr iop_base; /* I/O port address */
+ ushort err_code; /* fatal error code */
+ ushort bios_ctrl; /* BIOS control word, EEPROM word 12 */
+ ushort wdtr_able; /* try WDTR for a device */
+ ushort sdtr_able; /* try SDTR for a device */
+ ushort ultra_able; /* try SDTR Ultra speed for a device */
+ ushort sdtr_speed1; /* EEPROM SDTR Speed for TID 0-3 */
+ ushort sdtr_speed2; /* EEPROM SDTR Speed for TID 4-7 */
+ ushort sdtr_speed3; /* EEPROM SDTR Speed for TID 8-11 */
+ ushort sdtr_speed4; /* EEPROM SDTR Speed for TID 12-15 */
+ ushort tagqng_able; /* try tagged queuing with a device */
+ ushort ppr_able; /* PPR message capable per TID bitmask. */
+ uchar max_dvc_qng; /* maximum number of tagged commands per device */
+ ushort start_motor; /* start motor command allowed */
+ uchar scsi_reset_wait; /* delay in seconds after scsi bus reset */
+ uchar chip_no; /* should be assigned by caller */
+ uchar max_host_qng; /* maximum number of Q'ed command allowed */
+ ushort no_scam; /* scam_tolerant of EEPROM */
+ struct asc_board *drv_ptr; /* driver pointer to private structure */
+ uchar chip_scsi_id; /* chip SCSI target ID */
+ uchar chip_type;
+ uchar bist_err_code;
+ ADV_CARR_T *carrier_buf;
+ ADV_CARR_T *carr_freelist; /* Carrier free list. */
+ ADV_CARR_T *icq_sp; /* Initiator command queue stopper pointer. */
+ ADV_CARR_T *irq_sp; /* Initiator response queue stopper pointer. */
+ ushort carr_pending_cnt; /* Count of pending carriers. */
+ struct adv_req *orig_reqp; /* adv_req_t memory block. */
+ /*
+ * Note: The following fields will not be used after initialization. The
+ * driver may discard the buffer after initialization is done.
+ */
+ ADV_DVC_CFG *cfg; /* temporary configuration structure */
+} ADV_DVC_VAR;
+
+/*
+ * Microcode idle loop commands
+ */
+#define IDLE_CMD_COMPLETED 0
+#define IDLE_CMD_STOP_CHIP 0x0001
+#define IDLE_CMD_STOP_CHIP_SEND_INT 0x0002
+#define IDLE_CMD_SEND_INT 0x0004
+#define IDLE_CMD_ABORT 0x0008
+#define IDLE_CMD_DEVICE_RESET 0x0010
+#define IDLE_CMD_SCSI_RESET_START 0x0020 /* Assert SCSI Bus Reset */
+#define IDLE_CMD_SCSI_RESET_END 0x0040 /* Deassert SCSI Bus Reset */
+#define IDLE_CMD_SCSIREQ 0x0080
+
+#define IDLE_CMD_STATUS_SUCCESS 0x0001
+#define IDLE_CMD_STATUS_FAILURE 0x0002
+
+/*
+ * AdvSendIdleCmd() flag definitions.
+ */
+#define ADV_NOWAIT 0x01
+
+/*
+ * Wait loop time out values.
+ */
+#define SCSI_WAIT_100_MSEC 100UL /* 100 milliseconds */
+#define SCSI_US_PER_MSEC 1000 /* microseconds per millisecond */
+#define SCSI_MAX_RETRY 10 /* retry count */
+
+#define ADV_ASYNC_RDMA_FAILURE 0x01 /* Fatal RDMA failure. */
+#define ADV_ASYNC_SCSI_BUS_RESET_DET 0x02 /* Detected SCSI Bus Reset. */
+#define ADV_ASYNC_CARRIER_READY_FAILURE 0x03 /* Carrier Ready failure. */
+#define ADV_RDMA_IN_CARR_AND_Q_INVALID 0x04 /* RDMAed-in data invalid. */
+
+#define ADV_HOST_SCSI_BUS_RESET 0x80 /* Host Initiated SCSI Bus Reset. */
+
+/* Read byte from a register. */
+#define AdvReadByteRegister(iop_base, reg_off) \
+ (ADV_MEM_READB((iop_base) + (reg_off)))
+
+/* Write byte to a register. */
+#define AdvWriteByteRegister(iop_base, reg_off, byte) \
+ (ADV_MEM_WRITEB((iop_base) + (reg_off), (byte)))
+
+/* Read word (2 bytes) from a register. */
+#define AdvReadWordRegister(iop_base, reg_off) \
+ (ADV_MEM_READW((iop_base) + (reg_off)))
+
+/* Write word (2 bytes) to a register. */
+#define AdvWriteWordRegister(iop_base, reg_off, word) \
+ (ADV_MEM_WRITEW((iop_base) + (reg_off), (word)))
+
+/* Write dword (4 bytes) to a register. */
+#define AdvWriteDWordRegister(iop_base, reg_off, dword) \
+ (ADV_MEM_WRITEDW((iop_base) + (reg_off), (dword)))
+
+/* Read byte from LRAM. */
+#define AdvReadByteLram(iop_base, addr, byte) \
+do { \
+ ADV_MEM_WRITEW((iop_base) + IOPW_RAM_ADDR, (addr)); \
+ (byte) = ADV_MEM_READB((iop_base) + IOPB_RAM_DATA); \
+} while (0)
+
+/* Write byte to LRAM. */
+#define AdvWriteByteLram(iop_base, addr, byte) \
+ (ADV_MEM_WRITEW((iop_base) + IOPW_RAM_ADDR, (addr)), \
+ ADV_MEM_WRITEB((iop_base) + IOPB_RAM_DATA, (byte)))
+
+/* Read word (2 bytes) from LRAM. */
+#define AdvReadWordLram(iop_base, addr, word) \
+do { \
+ ADV_MEM_WRITEW((iop_base) + IOPW_RAM_ADDR, (addr)); \
+ (word) = (ADV_MEM_READW((iop_base) + IOPW_RAM_DATA)); \
+} while (0)
+
+/* Write word (2 bytes) to LRAM. */
+#define AdvWriteWordLram(iop_base, addr, word) \
+ (ADV_MEM_WRITEW((iop_base) + IOPW_RAM_ADDR, (addr)), \
+ ADV_MEM_WRITEW((iop_base) + IOPW_RAM_DATA, (word)))
+
+/* Write little-endian double word (4 bytes) to LRAM */
+/* Because of unspecified C language ordering don't use auto-increment. */
+#define AdvWriteDWordLramNoSwap(iop_base, addr, dword) \
+ ((ADV_MEM_WRITEW((iop_base) + IOPW_RAM_ADDR, (addr)), \
+ ADV_MEM_WRITEW((iop_base) + IOPW_RAM_DATA, \
+ cpu_to_le16((ushort) ((dword) & 0xFFFF)))), \
+ (ADV_MEM_WRITEW((iop_base) + IOPW_RAM_ADDR, (addr) + 2), \
+ ADV_MEM_WRITEW((iop_base) + IOPW_RAM_DATA, \
+ cpu_to_le16((ushort) ((dword >> 16) & 0xFFFF)))))
+
+/* Read word (2 bytes) from LRAM assuming that the address is already set. */
+#define AdvReadWordAutoIncLram(iop_base) \
+ (ADV_MEM_READW((iop_base) + IOPW_RAM_DATA))
+
+/* Write word (2 bytes) to LRAM assuming that the address is already set. */
+#define AdvWriteWordAutoIncLram(iop_base, word) \
+ (ADV_MEM_WRITEW((iop_base) + IOPW_RAM_DATA, (word)))
+
+/*
+ * Define macro to check for Condor signature.
+ *
+ * Evaluate to ADV_TRUE if a Condor chip is found the specified port
+ * address 'iop_base'. Otherwise evalue to ADV_FALSE.
+ */
+#define AdvFindSignature(iop_base) \
+ (((AdvReadByteRegister((iop_base), IOPB_CHIP_ID_1) == \
+ ADV_CHIP_ID_BYTE) && \
+ (AdvReadWordRegister((iop_base), IOPW_CHIP_ID_0) == \
+ ADV_CHIP_ID_WORD)) ? ADV_TRUE : ADV_FALSE)
+
+/*
+ * Define macro to Return the version number of the chip at 'iop_base'.
+ *
+ * The second parameter 'bus_type' is currently unused.
+ */
+#define AdvGetChipVersion(iop_base, bus_type) \
+ AdvReadByteRegister((iop_base), IOPB_CHIP_TYPE_REV)
+
+/*
+ * Abort an SRB in the chip's RISC Memory. The 'srb_ptr' argument must
+ * match the ASC_SCSI_REQ_Q 'srb_ptr' field.
+ *
+ * If the request has not yet been sent to the device it will simply be
+ * aborted from RISC memory. If the request is disconnected it will be
+ * aborted on reselection by sending an Abort Message to the target ID.
+ *
+ * Return value:
+ * ADV_TRUE(1) - Queue was successfully aborted.
+ * ADV_FALSE(0) - Queue was not found on the active queue list.
+ */
+#define AdvAbortQueue(asc_dvc, scsiq) \
+ AdvSendIdleCmd((asc_dvc), (ushort) IDLE_CMD_ABORT, \
+ (ADV_DCNT) (scsiq))
+
+/*
+ * Send a Bus Device Reset Message to the specified target ID.
+ *
+ * All outstanding commands will be purged if sending the
+ * Bus Device Reset Message is successful.
+ *
+ * Return Value:
+ * ADV_TRUE(1) - All requests on the target are purged.
+ * ADV_FALSE(0) - Couldn't issue Bus Device Reset Message; Requests
+ * are not purged.
+ */
+#define AdvResetDevice(asc_dvc, target_id) \
+ AdvSendIdleCmd((asc_dvc), (ushort) IDLE_CMD_DEVICE_RESET, \
+ (ADV_DCNT) (target_id))
+
+/*
+ * SCSI Wide Type definition.
+ */
+#define ADV_SCSI_BIT_ID_TYPE ushort
+
+/*
+ * AdvInitScsiTarget() 'cntl_flag' options.
+ */
+#define ADV_SCAN_LUN 0x01
+#define ADV_CAPINFO_NOLUN 0x02
+
+/*
+ * Convert target id to target id bit mask.
+ */
+#define ADV_TID_TO_TIDMASK(tid) (0x01 << ((tid) & ADV_MAX_TID))
+
+/*
+ * ASC_SCSI_REQ_Q 'done_status' and 'host_status' return values.
+ */
+
+#define QD_NO_STATUS 0x00 /* Request not completed yet. */
+#define QD_NO_ERROR 0x01
+#define QD_ABORTED_BY_HOST 0x02
+#define QD_WITH_ERROR 0x04
+
+#define QHSTA_NO_ERROR 0x00
+#define QHSTA_M_SEL_TIMEOUT 0x11
+#define QHSTA_M_DATA_OVER_RUN 0x12
+#define QHSTA_M_UNEXPECTED_BUS_FREE 0x13
+#define QHSTA_M_QUEUE_ABORTED 0x15
+#define QHSTA_M_SXFR_SDMA_ERR 0x16 /* SXFR_STATUS SCSI DMA Error */
+#define QHSTA_M_SXFR_SXFR_PERR 0x17 /* SXFR_STATUS SCSI Bus Parity Error */
+#define QHSTA_M_RDMA_PERR 0x18 /* RISC PCI DMA parity error */
+#define QHSTA_M_SXFR_OFF_UFLW 0x19 /* SXFR_STATUS Offset Underflow */
+#define QHSTA_M_SXFR_OFF_OFLW 0x20 /* SXFR_STATUS Offset Overflow */
+#define QHSTA_M_SXFR_WD_TMO 0x21 /* SXFR_STATUS Watchdog Timeout */
+#define QHSTA_M_SXFR_DESELECTED 0x22 /* SXFR_STATUS Deselected */
+/* Note: QHSTA_M_SXFR_XFR_OFLW is identical to QHSTA_M_DATA_OVER_RUN. */
+#define QHSTA_M_SXFR_XFR_OFLW 0x12 /* SXFR_STATUS Transfer Overflow */
+#define QHSTA_M_SXFR_XFR_PH_ERR 0x24 /* SXFR_STATUS Transfer Phase Error */
+#define QHSTA_M_SXFR_UNKNOWN_ERROR 0x25 /* SXFR_STATUS Unknown Error */
+#define QHSTA_M_SCSI_BUS_RESET 0x30 /* Request aborted from SBR */
+#define QHSTA_M_SCSI_BUS_RESET_UNSOL 0x31 /* Request aborted from unsol. SBR */
+#define QHSTA_M_BUS_DEVICE_RESET 0x32 /* Request aborted from BDR */
+#define QHSTA_M_DIRECTION_ERR 0x35 /* Data Phase mismatch */
+#define QHSTA_M_DIRECTION_ERR_HUNG 0x36 /* Data Phase mismatch and bus hang */
+#define QHSTA_M_WTM_TIMEOUT 0x41
+#define QHSTA_M_BAD_CMPL_STATUS_IN 0x42
+#define QHSTA_M_NO_AUTO_REQ_SENSE 0x43
+#define QHSTA_M_AUTO_REQ_SENSE_FAIL 0x44
+#define QHSTA_M_INVALID_DEVICE 0x45 /* Bad target ID */
+#define QHSTA_M_FROZEN_TIDQ 0x46 /* TID Queue frozen. */
+#define QHSTA_M_SGBACKUP_ERROR 0x47 /* Scatter-Gather backup error */
+
+/* Return the address that is aligned at the next doubleword >= to 'addr'. */
+#define ADV_8BALIGN(addr) (((ulong) (addr) + 0x7) & ~0x7)
+#define ADV_16BALIGN(addr) (((ulong) (addr) + 0xF) & ~0xF)
+#define ADV_32BALIGN(addr) (((ulong) (addr) + 0x1F) & ~0x1F)
+
+/*
+ * Total contiguous memory needed for driver SG blocks.
+ *
+ * ADV_MAX_SG_LIST must be defined by a driver. It is the maximum
+ * number of scatter-gather elements the driver supports in a
+ * single request.
+ */
+
+#define ADV_SG_LIST_MAX_BYTE_SIZE \
+ (sizeof(ADV_SG_BLOCK) * \
+ ((ADV_MAX_SG_LIST + (NO_OF_SG_PER_BLOCK - 1))/NO_OF_SG_PER_BLOCK))
+
+/* struct asc_board flags */
+#define ASC_IS_WIDE_BOARD 0x04 /* AdvanSys Wide Board */
+
+#define ASC_NARROW_BOARD(boardp) (((boardp)->flags & ASC_IS_WIDE_BOARD) == 0)
+
+#define NO_ISA_DMA 0xff /* No ISA DMA Channel Used */
+
+#define ASC_INFO_SIZE 128 /* advansys_info() line size */
+
+/* Asc Library return codes */
+#define ASC_TRUE 1
+#define ASC_FALSE 0
+#define ASC_NOERROR 1
+#define ASC_BUSY 0
+#define ASC_ERROR (-1)
+
+/* struct scsi_cmnd function return codes */
+#define STATUS_BYTE(byte) (byte)
+#define MSG_BYTE(byte) ((byte) << 8)
+#define HOST_BYTE(byte) ((byte) << 16)
+#define DRIVER_BYTE(byte) ((byte) << 24)
+
+#define ASC_STATS(shost, counter) ASC_STATS_ADD(shost, counter, 1)
+#ifndef ADVANSYS_STATS
+#define ASC_STATS_ADD(shost, counter, count)
+#else /* ADVANSYS_STATS */
+#define ASC_STATS_ADD(shost, counter, count) \
+ (((struct asc_board *) shost_priv(shost))->asc_stats.counter += (count))
+#endif /* ADVANSYS_STATS */
+
+/* If the result wraps when calculating tenths, return 0. */
+#define ASC_TENTHS(num, den) \
+ (((10 * ((num)/(den))) > (((num) * 10)/(den))) ? \
+ 0 : ((((num) * 10)/(den)) - (10 * ((num)/(den)))))
+
+/*
+ * Display a message to the console.
+ */
+#define ASC_PRINT(s) \
+ { \
+ printk("advansys: "); \
+ printk(s); \
+ }
+
+#define ASC_PRINT1(s, a1) \
+ { \
+ printk("advansys: "); \
+ printk((s), (a1)); \
+ }
+
+#define ASC_PRINT2(s, a1, a2) \
+ { \
+ printk("advansys: "); \
+ printk((s), (a1), (a2)); \
+ }
+
+#define ASC_PRINT3(s, a1, a2, a3) \
+ { \
+ printk("advansys: "); \
+ printk((s), (a1), (a2), (a3)); \
+ }
+
+#define ASC_PRINT4(s, a1, a2, a3, a4) \
+ { \
+ printk("advansys: "); \
+ printk((s), (a1), (a2), (a3), (a4)); \
+ }
+
+#ifndef ADVANSYS_DEBUG
+
+#define ASC_DBG(lvl, s...)
+#define ASC_DBG_PRT_SCSI_HOST(lvl, s)
+#define ASC_DBG_PRT_ASC_SCSI_Q(lvl, scsiqp)
+#define ASC_DBG_PRT_ADV_SCSI_REQ_Q(lvl, scsiqp)
+#define ASC_DBG_PRT_ASC_QDONE_INFO(lvl, qdone)
+#define ADV_DBG_PRT_ADV_SCSI_REQ_Q(lvl, scsiqp)
+#define ASC_DBG_PRT_HEX(lvl, name, start, length)
+#define ASC_DBG_PRT_CDB(lvl, cdb, len)
+#define ASC_DBG_PRT_SENSE(lvl, sense, len)
+#define ASC_DBG_PRT_INQUIRY(lvl, inq, len)
+
+#else /* ADVANSYS_DEBUG */
+
+/*
+ * Debugging Message Levels:
+ * 0: Errors Only
+ * 1: High-Level Tracing
+ * 2-N: Verbose Tracing
+ */
+
+#define ASC_DBG(lvl, format, arg...) { \
+ if (asc_dbglvl >= (lvl)) \
+ printk(KERN_DEBUG "%s: %s: " format, DRV_NAME, \
+ __func__ , ## arg); \
+}
+
+#define ASC_DBG_PRT_SCSI_HOST(lvl, s) \
+ { \
+ if (asc_dbglvl >= (lvl)) { \
+ asc_prt_scsi_host(s); \
+ } \
+ }
+
+#define ASC_DBG_PRT_ASC_SCSI_Q(lvl, scsiqp) \
+ { \
+ if (asc_dbglvl >= (lvl)) { \
+ asc_prt_asc_scsi_q(scsiqp); \
+ } \
+ }
+
+#define ASC_DBG_PRT_ASC_QDONE_INFO(lvl, qdone) \
+ { \
+ if (asc_dbglvl >= (lvl)) { \
+ asc_prt_asc_qdone_info(qdone); \
+ } \
+ }
+
+#define ASC_DBG_PRT_ADV_SCSI_REQ_Q(lvl, scsiqp) \
+ { \
+ if (asc_dbglvl >= (lvl)) { \
+ asc_prt_adv_scsi_req_q(scsiqp); \
+ } \
+ }
+
+#define ASC_DBG_PRT_HEX(lvl, name, start, length) \
+ { \
+ if (asc_dbglvl >= (lvl)) { \
+ asc_prt_hex((name), (start), (length)); \
+ } \
+ }
+
+#define ASC_DBG_PRT_CDB(lvl, cdb, len) \
+ ASC_DBG_PRT_HEX((lvl), "CDB", (uchar *) (cdb), (len));
+
+#define ASC_DBG_PRT_SENSE(lvl, sense, len) \
+ ASC_DBG_PRT_HEX((lvl), "SENSE", (uchar *) (sense), (len));
+
+#define ASC_DBG_PRT_INQUIRY(lvl, inq, len) \
+ ASC_DBG_PRT_HEX((lvl), "INQUIRY", (uchar *) (inq), (len));
+#endif /* ADVANSYS_DEBUG */
+
+#ifdef ADVANSYS_STATS
+
+/* Per board statistics structure */
+struct asc_stats {
+ /* Driver Entrypoint Statistics */
+ ADV_DCNT queuecommand; /* # calls to advansys_queuecommand() */
+ ADV_DCNT reset; /* # calls to advansys_eh_bus_reset() */
+ ADV_DCNT biosparam; /* # calls to advansys_biosparam() */
+ ADV_DCNT interrupt; /* # advansys_interrupt() calls */
+ ADV_DCNT callback; /* # calls to asc/adv_isr_callback() */
+ ADV_DCNT done; /* # calls to request's scsi_done function */
+ ADV_DCNT build_error; /* # asc/adv_build_req() ASC_ERROR returns. */
+ ADV_DCNT adv_build_noreq; /* # adv_build_req() adv_req_t alloc. fail. */
+ ADV_DCNT adv_build_nosg; /* # adv_build_req() adv_sgblk_t alloc. fail. */
+ /* AscExeScsiQueue()/AdvExeScsiQueue() Statistics */
+ ADV_DCNT exe_noerror; /* # ASC_NOERROR returns. */
+ ADV_DCNT exe_busy; /* # ASC_BUSY returns. */
+ ADV_DCNT exe_error; /* # ASC_ERROR returns. */
+ ADV_DCNT exe_unknown; /* # unknown returns. */
+ /* Data Transfer Statistics */
+ ADV_DCNT xfer_cnt; /* # I/O requests received */
+ ADV_DCNT xfer_elem; /* # scatter-gather elements */
+ ADV_DCNT xfer_sect; /* # 512-byte blocks */
+};
+#endif /* ADVANSYS_STATS */
+
+/*
+ * Structure allocated for each board.
+ *
+ * This structure is allocated by scsi_host_alloc() at the end
+ * of the 'Scsi_Host' structure starting at the 'hostdata'
+ * field. It is guaranteed to be allocated from DMA-able memory.
+ */
+struct asc_board {
+ struct device *dev;
+ uint flags; /* Board flags */
+ unsigned int irq;
+ union {
+ ASC_DVC_VAR asc_dvc_var; /* Narrow board */
+ ADV_DVC_VAR adv_dvc_var; /* Wide board */
+ } dvc_var;
+ union {
+ ASC_DVC_CFG asc_dvc_cfg; /* Narrow board */
+ ADV_DVC_CFG adv_dvc_cfg; /* Wide board */
+ } dvc_cfg;
+ ushort asc_n_io_port; /* Number I/O ports. */
+ ADV_SCSI_BIT_ID_TYPE init_tidmask; /* Target init./valid mask */
+ ushort reqcnt[ADV_MAX_TID + 1]; /* Starvation request count */
+ ADV_SCSI_BIT_ID_TYPE queue_full; /* Queue full mask */
+ ushort queue_full_cnt[ADV_MAX_TID + 1]; /* Queue full count */
+ union {
+ ASCEEP_CONFIG asc_eep; /* Narrow EEPROM config. */
+ ADVEEP_3550_CONFIG adv_3550_eep; /* 3550 EEPROM config. */
+ ADVEEP_38C0800_CONFIG adv_38C0800_eep; /* 38C0800 EEPROM config. */
+ ADVEEP_38C1600_CONFIG adv_38C1600_eep; /* 38C1600 EEPROM config. */
+ } eep_config;
+ ulong last_reset; /* Saved last reset time */
+ /* /proc/scsi/advansys/[0...] */
+#ifdef ADVANSYS_STATS
+ struct asc_stats asc_stats; /* Board statistics */
+#endif /* ADVANSYS_STATS */
+ /*
+ * The following fields are used only for Narrow Boards.
+ */
+ uchar sdtr_data[ASC_MAX_TID + 1]; /* SDTR information */
+ /*
+ * The following fields are used only for Wide Boards.
+ */
+ void __iomem *ioremap_addr; /* I/O Memory remap address. */
+ ushort ioport; /* I/O Port address. */
+ adv_req_t *adv_reqp; /* Request structures. */
+ adv_sgblk_t *adv_sgblkp; /* Scatter-gather structures. */
+ ushort bios_signature; /* BIOS Signature. */
+ ushort bios_version; /* BIOS Version. */
+ ushort bios_codeseg; /* BIOS Code Segment. */
+ ushort bios_codelen; /* BIOS Code Segment Length. */
+};
+
+#define asc_dvc_to_board(asc_dvc) container_of(asc_dvc, struct asc_board, \
+ dvc_var.asc_dvc_var)
+#define adv_dvc_to_board(adv_dvc) container_of(adv_dvc, struct asc_board, \
+ dvc_var.adv_dvc_var)
+#define adv_dvc_to_pdev(adv_dvc) to_pci_dev(adv_dvc_to_board(adv_dvc)->dev)
+
+#ifdef ADVANSYS_DEBUG
+static int asc_dbglvl = 3;
+
+/*
+ * asc_prt_asc_dvc_var()
+ */
+static void asc_prt_asc_dvc_var(ASC_DVC_VAR *h)
+{
+ printk("ASC_DVC_VAR at addr 0x%lx\n", (ulong)h);
+
+ printk(" iop_base 0x%x, err_code 0x%x, dvc_cntl 0x%x, bug_fix_cntl "
+ "%d,\n", h->iop_base, h->err_code, h->dvc_cntl, h->bug_fix_cntl);
+
+ printk(" bus_type %d, init_sdtr 0x%x,\n", h->bus_type,
+ (unsigned)h->init_sdtr);
+
+ printk(" sdtr_done 0x%x, use_tagged_qng 0x%x, unit_not_ready 0x%x, "
+ "chip_no 0x%x,\n", (unsigned)h->sdtr_done,
+ (unsigned)h->use_tagged_qng, (unsigned)h->unit_not_ready,
+ (unsigned)h->chip_no);
+
+ printk(" queue_full_or_busy 0x%x, start_motor 0x%x, scsi_reset_wait "
+ "%u,\n", (unsigned)h->queue_full_or_busy,
+ (unsigned)h->start_motor, (unsigned)h->scsi_reset_wait);
+
+ printk(" is_in_int %u, max_total_qng %u, cur_total_qng %u, "
+ "in_critical_cnt %u,\n", (unsigned)h->is_in_int,
+ (unsigned)h->max_total_qng, (unsigned)h->cur_total_qng,
+ (unsigned)h->in_critical_cnt);
+
+ printk(" last_q_shortage %u, init_state 0x%x, no_scam 0x%x, "
+ "pci_fix_asyn_xfer 0x%x,\n", (unsigned)h->last_q_shortage,
+ (unsigned)h->init_state, (unsigned)h->no_scam,
+ (unsigned)h->pci_fix_asyn_xfer);
+
+ printk(" cfg 0x%lx\n", (ulong)h->cfg);
+}
+
+/*
+ * asc_prt_asc_dvc_cfg()
+ */
+static void asc_prt_asc_dvc_cfg(ASC_DVC_CFG *h)
+{
+ printk("ASC_DVC_CFG at addr 0x%lx\n", (ulong)h);
+
+ printk(" can_tagged_qng 0x%x, cmd_qng_enabled 0x%x,\n",
+ h->can_tagged_qng, h->cmd_qng_enabled);
+ printk(" disc_enable 0x%x, sdtr_enable 0x%x,\n",
+ h->disc_enable, h->sdtr_enable);
+
+ printk(" chip_scsi_id %d, isa_dma_speed %d, isa_dma_channel %d, "
+ "chip_version %d,\n", h->chip_scsi_id, h->isa_dma_speed,
+ h->isa_dma_channel, h->chip_version);
+
+ printk(" mcode_date 0x%x, mcode_version %d\n",
+ h->mcode_date, h->mcode_version);
+}
+
+/*
+ * asc_prt_adv_dvc_var()
+ *
+ * Display an ADV_DVC_VAR structure.
+ */
+static void asc_prt_adv_dvc_var(ADV_DVC_VAR *h)
+{
+ printk(" ADV_DVC_VAR at addr 0x%lx\n", (ulong)h);
+
+ printk(" iop_base 0x%lx, err_code 0x%x, ultra_able 0x%x\n",
+ (ulong)h->iop_base, h->err_code, (unsigned)h->ultra_able);
+
+ printk(" sdtr_able 0x%x, wdtr_able 0x%x\n",
+ (unsigned)h->sdtr_able, (unsigned)h->wdtr_able);
+
+ printk(" start_motor 0x%x, scsi_reset_wait 0x%x\n",
+ (unsigned)h->start_motor, (unsigned)h->scsi_reset_wait);
+
+ printk(" max_host_qng %u, max_dvc_qng %u, carr_freelist 0x%lxn\n",
+ (unsigned)h->max_host_qng, (unsigned)h->max_dvc_qng,
+ (ulong)h->carr_freelist);
+
+ printk(" icq_sp 0x%lx, irq_sp 0x%lx\n",
+ (ulong)h->icq_sp, (ulong)h->irq_sp);
+
+ printk(" no_scam 0x%x, tagqng_able 0x%x\n",
+ (unsigned)h->no_scam, (unsigned)h->tagqng_able);
+
+ printk(" chip_scsi_id 0x%x, cfg 0x%lx\n",
+ (unsigned)h->chip_scsi_id, (ulong)h->cfg);
+}
+
+/*
+ * asc_prt_adv_dvc_cfg()
+ *
+ * Display an ADV_DVC_CFG structure.
+ */
+static void asc_prt_adv_dvc_cfg(ADV_DVC_CFG *h)
+{
+ printk(" ADV_DVC_CFG at addr 0x%lx\n", (ulong)h);
+
+ printk(" disc_enable 0x%x, termination 0x%x\n",
+ h->disc_enable, h->termination);
+
+ printk(" chip_version 0x%x, mcode_date 0x%x\n",
+ h->chip_version, h->mcode_date);
+
+ printk(" mcode_version 0x%x, control_flag 0x%x\n",
+ h->mcode_version, h->control_flag);
+}
+
+/*
+ * asc_prt_scsi_host()
+ */
+static void asc_prt_scsi_host(struct Scsi_Host *s)
+{
+ struct asc_board *boardp = shost_priv(s);
+
+ printk("Scsi_Host at addr 0x%p, device %s\n", s, dev_name(boardp->dev));
+ printk(" host_busy %u, host_no %d,\n",
+ atomic_read(&s->host_busy), s->host_no);
+
+ printk(" base 0x%lx, io_port 0x%lx, irq %d,\n",
+ (ulong)s->base, (ulong)s->io_port, boardp->irq);
+
+ printk(" dma_channel %d, this_id %d, can_queue %d,\n",
+ s->dma_channel, s->this_id, s->can_queue);
+
+ printk(" cmd_per_lun %d, sg_tablesize %d, unchecked_isa_dma %d\n",
+ s->cmd_per_lun, s->sg_tablesize, s->unchecked_isa_dma);
+
+ if (ASC_NARROW_BOARD(boardp)) {
+ asc_prt_asc_dvc_var(&boardp->dvc_var.asc_dvc_var);
+ asc_prt_asc_dvc_cfg(&boardp->dvc_cfg.asc_dvc_cfg);
+ } else {
+ asc_prt_adv_dvc_var(&boardp->dvc_var.adv_dvc_var);
+ asc_prt_adv_dvc_cfg(&boardp->dvc_cfg.adv_dvc_cfg);
+ }
+}
+
+/*
+ * asc_prt_hex()
+ *
+ * Print hexadecimal output in 4 byte groupings 32 bytes
+ * or 8 double-words per line.
+ */
+static void asc_prt_hex(char *f, uchar *s, int l)
+{
+ int i;
+ int j;
+ int k;
+ int m;
+
+ printk("%s: (%d bytes)\n", f, l);
+
+ for (i = 0; i < l; i += 32) {
+
+ /* Display a maximum of 8 double-words per line. */
+ if ((k = (l - i) / 4) >= 8) {
+ k = 8;
+ m = 0;
+ } else {
+ m = (l - i) % 4;
+ }
+
+ for (j = 0; j < k; j++) {
+ printk(" %2.2X%2.2X%2.2X%2.2X",
+ (unsigned)s[i + (j * 4)],
+ (unsigned)s[i + (j * 4) + 1],
+ (unsigned)s[i + (j * 4) + 2],
+ (unsigned)s[i + (j * 4) + 3]);
+ }
+
+ switch (m) {
+ case 0:
+ default:
+ break;
+ case 1:
+ printk(" %2.2X", (unsigned)s[i + (j * 4)]);
+ break;
+ case 2:
+ printk(" %2.2X%2.2X",
+ (unsigned)s[i + (j * 4)],
+ (unsigned)s[i + (j * 4) + 1]);
+ break;
+ case 3:
+ printk(" %2.2X%2.2X%2.2X",
+ (unsigned)s[i + (j * 4) + 1],
+ (unsigned)s[i + (j * 4) + 2],
+ (unsigned)s[i + (j * 4) + 3]);
+ break;
+ }
+
+ printk("\n");
+ }
+}
+
+/*
+ * asc_prt_asc_scsi_q()
+ */
+static void asc_prt_asc_scsi_q(ASC_SCSI_Q *q)
+{
+ ASC_SG_HEAD *sgp;
+ int i;
+
+ printk("ASC_SCSI_Q at addr 0x%lx\n", (ulong)q);
+
+ printk
+ (" target_ix 0x%x, target_lun %u, srb_ptr 0x%lx, tag_code 0x%x,\n",
+ q->q2.target_ix, q->q1.target_lun, (ulong)q->q2.srb_ptr,
+ q->q2.tag_code);
+
+ printk
+ (" data_addr 0x%lx, data_cnt %lu, sense_addr 0x%lx, sense_len %u,\n",
+ (ulong)le32_to_cpu(q->q1.data_addr),
+ (ulong)le32_to_cpu(q->q1.data_cnt),
+ (ulong)le32_to_cpu(q->q1.sense_addr), q->q1.sense_len);
+
+ printk(" cdbptr 0x%lx, cdb_len %u, sg_head 0x%lx, sg_queue_cnt %u\n",
+ (ulong)q->cdbptr, q->q2.cdb_len,
+ (ulong)q->sg_head, q->q1.sg_queue_cnt);
+
+ if (q->sg_head) {
+ sgp = q->sg_head;
+ printk("ASC_SG_HEAD at addr 0x%lx\n", (ulong)sgp);
+ printk(" entry_cnt %u, queue_cnt %u\n", sgp->entry_cnt,
+ sgp->queue_cnt);
+ for (i = 0; i < sgp->entry_cnt; i++) {
+ printk(" [%u]: addr 0x%lx, bytes %lu\n",
+ i, (ulong)le32_to_cpu(sgp->sg_list[i].addr),
+ (ulong)le32_to_cpu(sgp->sg_list[i].bytes));
+ }
+
+ }
+}
+
+/*
+ * asc_prt_asc_qdone_info()
+ */
+static void asc_prt_asc_qdone_info(ASC_QDONE_INFO *q)
+{
+ printk("ASC_QDONE_INFO at addr 0x%lx\n", (ulong)q);
+ printk(" srb_ptr 0x%lx, target_ix %u, cdb_len %u, tag_code %u,\n",
+ (ulong)q->d2.srb_ptr, q->d2.target_ix, q->d2.cdb_len,
+ q->d2.tag_code);
+ printk
+ (" done_stat 0x%x, host_stat 0x%x, scsi_stat 0x%x, scsi_msg 0x%x\n",
+ q->d3.done_stat, q->d3.host_stat, q->d3.scsi_stat, q->d3.scsi_msg);
+}
+
+/*
+ * asc_prt_adv_sgblock()
+ *
+ * Display an ADV_SG_BLOCK structure.
+ */
+static void asc_prt_adv_sgblock(int sgblockno, ADV_SG_BLOCK *b)
+{
+ int i;
+
+ printk(" ASC_SG_BLOCK at addr 0x%lx (sgblockno %d)\n",
+ (ulong)b, sgblockno);
+ printk(" sg_cnt %u, sg_ptr 0x%lx\n",
+ b->sg_cnt, (ulong)le32_to_cpu(b->sg_ptr));
+ BUG_ON(b->sg_cnt > NO_OF_SG_PER_BLOCK);
+ if (b->sg_ptr != 0)
+ BUG_ON(b->sg_cnt != NO_OF_SG_PER_BLOCK);
+ for (i = 0; i < b->sg_cnt; i++) {
+ printk(" [%u]: sg_addr 0x%lx, sg_count 0x%lx\n",
+ i, (ulong)b->sg_list[i].sg_addr,
+ (ulong)b->sg_list[i].sg_count);
+ }
+}
+
+/*
+ * asc_prt_adv_scsi_req_q()
+ *
+ * Display an ADV_SCSI_REQ_Q structure.
+ */
+static void asc_prt_adv_scsi_req_q(ADV_SCSI_REQ_Q *q)
+{
+ int sg_blk_cnt;
+ struct asc_sg_block *sg_ptr;
+
+ printk("ADV_SCSI_REQ_Q at addr 0x%lx\n", (ulong)q);
+
+ printk(" target_id %u, target_lun %u, srb_ptr 0x%lx, a_flag 0x%x\n",
+ q->target_id, q->target_lun, (ulong)q->srb_ptr, q->a_flag);
+
+ printk(" cntl 0x%x, data_addr 0x%lx, vdata_addr 0x%lx\n",
+ q->cntl, (ulong)le32_to_cpu(q->data_addr), (ulong)q->vdata_addr);
+
+ printk(" data_cnt %lu, sense_addr 0x%lx, sense_len %u,\n",
+ (ulong)le32_to_cpu(q->data_cnt),
+ (ulong)le32_to_cpu(q->sense_addr), q->sense_len);
+
+ printk
+ (" cdb_len %u, done_status 0x%x, host_status 0x%x, scsi_status 0x%x\n",
+ q->cdb_len, q->done_status, q->host_status, q->scsi_status);
+
+ printk(" sg_working_ix 0x%x, target_cmd %u\n",
+ q->sg_working_ix, q->target_cmd);
+
+ printk(" scsiq_rptr 0x%lx, sg_real_addr 0x%lx, sg_list_ptr 0x%lx\n",
+ (ulong)le32_to_cpu(q->scsiq_rptr),
+ (ulong)le32_to_cpu(q->sg_real_addr), (ulong)q->sg_list_ptr);
+
+ /* Display the request's ADV_SG_BLOCK structures. */
+ if (q->sg_list_ptr != NULL) {
+ sg_blk_cnt = 0;
+ while (1) {
+ /*
+ * 'sg_ptr' is a physical address. Convert it to a virtual
+ * address by indexing 'sg_blk_cnt' into the virtual address
+ * array 'sg_list_ptr'.
+ *
+ * XXX - Assumes all SG physical blocks are virtually contiguous.
+ */
+ sg_ptr =
+ &(((ADV_SG_BLOCK *)(q->sg_list_ptr))[sg_blk_cnt]);
+ asc_prt_adv_sgblock(sg_blk_cnt, sg_ptr);
+ if (sg_ptr->sg_ptr == 0) {
+ break;
+ }
+ sg_blk_cnt++;
+ }
+ }
+}
+#endif /* ADVANSYS_DEBUG */
+
+/*
+ * The advansys chip/microcode contains a 32-bit identifier for each command
+ * known as the 'srb'. I don't know what it stands for. The driver used
+ * to encode the scsi_cmnd pointer by calling virt_to_bus and retrieve it
+ * with bus_to_virt. Now the driver keeps a per-host map of integers to
+ * pointers. It auto-expands when full, unless it can't allocate memory.
+ * Note that an srb of 0 is treated specially by the chip/firmware, hence
+ * the return of i+1 in this routine, and the corresponding subtraction in
+ * the inverse routine.
+ */
+#define BAD_SRB 0
+static u32 advansys_ptr_to_srb(struct asc_dvc_var *asc_dvc, void *ptr)
+{
+ int i;
+ void **new_ptr;
+
+ for (i = 0; i < asc_dvc->ptr_map_count; i++) {
+ if (!asc_dvc->ptr_map[i])
+ goto out;
+ }
+
+ if (asc_dvc->ptr_map_count == 0)
+ asc_dvc->ptr_map_count = 1;
+ else
+ asc_dvc->ptr_map_count *= 2;
+
+ new_ptr = krealloc(asc_dvc->ptr_map,
+ asc_dvc->ptr_map_count * sizeof(void *), GFP_ATOMIC);
+ if (!new_ptr)
+ return BAD_SRB;
+ asc_dvc->ptr_map = new_ptr;
+ out:
+ ASC_DBG(3, "Putting ptr %p into array offset %d\n", ptr, i);
+ asc_dvc->ptr_map[i] = ptr;
+ return i + 1;
+}
+
+static void * advansys_srb_to_ptr(struct asc_dvc_var *asc_dvc, u32 srb)
+{
+ void *ptr;
+
+ srb--;
+ if (srb >= asc_dvc->ptr_map_count) {
+ printk("advansys: bad SRB %u, max %u\n", srb,
+ asc_dvc->ptr_map_count);
+ return NULL;
+ }
+ ptr = asc_dvc->ptr_map[srb];
+ asc_dvc->ptr_map[srb] = NULL;
+ ASC_DBG(3, "Returning ptr %p from array offset %d\n", ptr, srb);
+ return ptr;
+}
+
+/*
+ * advansys_info()
+ *
+ * Return suitable for printing on the console with the argument
+ * adapter's configuration information.
+ *
+ * Note: The information line should not exceed ASC_INFO_SIZE bytes,
+ * otherwise the static 'info' array will be overrun.
+ */
+static const char *advansys_info(struct Scsi_Host *shost)
+{
+ static char info[ASC_INFO_SIZE];
+ struct asc_board *boardp = shost_priv(shost);
+ ASC_DVC_VAR *asc_dvc_varp;
+ ADV_DVC_VAR *adv_dvc_varp;
+ char *busname;
+ char *widename = NULL;
+
+ if (ASC_NARROW_BOARD(boardp)) {
+ asc_dvc_varp = &boardp->dvc_var.asc_dvc_var;
+ ASC_DBG(1, "begin\n");
+ if (asc_dvc_varp->bus_type & ASC_IS_ISA) {
+ if ((asc_dvc_varp->bus_type & ASC_IS_ISAPNP) ==
+ ASC_IS_ISAPNP) {
+ busname = "ISA PnP";
+ } else {
+ busname = "ISA";
+ }
+ sprintf(info,
+ "AdvanSys SCSI %s: %s: IO 0x%lX-0x%lX, IRQ 0x%X, DMA 0x%X",
+ ASC_VERSION, busname,
+ (ulong)shost->io_port,
+ (ulong)shost->io_port + ASC_IOADR_GAP - 1,
+ boardp->irq, shost->dma_channel);
+ } else {
+ if (asc_dvc_varp->bus_type & ASC_IS_VL) {
+ busname = "VL";
+ } else if (asc_dvc_varp->bus_type & ASC_IS_EISA) {
+ busname = "EISA";
+ } else if (asc_dvc_varp->bus_type & ASC_IS_PCI) {
+ if ((asc_dvc_varp->bus_type & ASC_IS_PCI_ULTRA)
+ == ASC_IS_PCI_ULTRA) {
+ busname = "PCI Ultra";
+ } else {
+ busname = "PCI";
+ }
+ } else {
+ busname = "?";
+ shost_printk(KERN_ERR, shost, "unknown bus "
+ "type %d\n", asc_dvc_varp->bus_type);
+ }
+ sprintf(info,
+ "AdvanSys SCSI %s: %s: IO 0x%lX-0x%lX, IRQ 0x%X",
+ ASC_VERSION, busname, (ulong)shost->io_port,
+ (ulong)shost->io_port + ASC_IOADR_GAP - 1,
+ boardp->irq);
+ }
+ } else {
+ /*
+ * Wide Adapter Information
+ *
+ * Memory-mapped I/O is used instead of I/O space to access
+ * the adapter, but display the I/O Port range. The Memory
+ * I/O address is displayed through the driver /proc file.
+ */
+ adv_dvc_varp = &boardp->dvc_var.adv_dvc_var;
+ if (adv_dvc_varp->chip_type == ADV_CHIP_ASC3550) {
+ widename = "Ultra-Wide";
+ } else if (adv_dvc_varp->chip_type == ADV_CHIP_ASC38C0800) {
+ widename = "Ultra2-Wide";
+ } else {
+ widename = "Ultra3-Wide";
+ }
+ sprintf(info,
+ "AdvanSys SCSI %s: PCI %s: PCIMEM 0x%lX-0x%lX, IRQ 0x%X",
+ ASC_VERSION, widename, (ulong)adv_dvc_varp->iop_base,
+ (ulong)adv_dvc_varp->iop_base + boardp->asc_n_io_port - 1, boardp->irq);
+ }
+ BUG_ON(strlen(info) >= ASC_INFO_SIZE);
+ ASC_DBG(1, "end\n");
+ return info;
+}
+
+#ifdef CONFIG_PROC_FS
+
+/*
+ * asc_prt_board_devices()
+ *
+ * Print driver information for devices attached to the board.
+ */
+static void asc_prt_board_devices(struct seq_file *m, struct Scsi_Host *shost)
+{
+ struct asc_board *boardp = shost_priv(shost);
+ int chip_scsi_id;
+ int i;
+
+ seq_printf(m,
+ "\nDevice Information for AdvanSys SCSI Host %d:\n",
+ shost->host_no);
+
+ if (ASC_NARROW_BOARD(boardp)) {
+ chip_scsi_id = boardp->dvc_cfg.asc_dvc_cfg.chip_scsi_id;
+ } else {
+ chip_scsi_id = boardp->dvc_var.adv_dvc_var.chip_scsi_id;
+ }
+
+ seq_puts(m, "Target IDs Detected:");
+ for (i = 0; i <= ADV_MAX_TID; i++) {
+ if (boardp->init_tidmask & ADV_TID_TO_TIDMASK(i))
+ seq_printf(m, " %X,", i);
+ }
+ seq_printf(m, " (%X=Host Adapter)\n", chip_scsi_id);
+}
+
+/*
+ * Display Wide Board BIOS Information.
+ */
+static void asc_prt_adv_bios(struct seq_file *m, struct Scsi_Host *shost)
+{
+ struct asc_board *boardp = shost_priv(shost);
+ ushort major, minor, letter;
+
+ seq_puts(m, "\nROM BIOS Version: ");
+
+ /*
+ * If the BIOS saved a valid signature, then fill in
+ * the BIOS code segment base address.
+ */
+ if (boardp->bios_signature != 0x55AA) {
+ seq_puts(m, "Disabled or Pre-3.1\n"
+ "BIOS either disabled or Pre-3.1. If it is pre-3.1, then a newer version\n"
+ "can be found at the ConnectCom FTP site: ftp://ftp.connectcom.net/pub\n");
+ } else {
+ major = (boardp->bios_version >> 12) & 0xF;
+ minor = (boardp->bios_version >> 8) & 0xF;
+ letter = (boardp->bios_version & 0xFF);
+
+ seq_printf(m, "%d.%d%c\n",
+ major, minor,
+ letter >= 26 ? '?' : letter + 'A');
+ /*
+ * Current available ROM BIOS release is 3.1I for UW
+ * and 3.2I for U2W. This code doesn't differentiate
+ * UW and U2W boards.
+ */
+ if (major < 3 || (major <= 3 && minor < 1) ||
+ (major <= 3 && minor <= 1 && letter < ('I' - 'A'))) {
+ seq_puts(m, "Newer version of ROM BIOS is available at the ConnectCom FTP site:\n"
+ "ftp://ftp.connectcom.net/pub\n");
+ }
+ }
+}
+
+/*
+ * Add serial number to information bar if signature AAh
+ * is found in at bit 15-9 (7 bits) of word 1.
+ *
+ * Serial Number consists fo 12 alpha-numeric digits.
+ *
+ * 1 - Product type (A,B,C,D..) Word0: 15-13 (3 bits)
+ * 2 - MFG Location (A,B,C,D..) Word0: 12-10 (3 bits)
+ * 3-4 - Product ID (0-99) Word0: 9-0 (10 bits)
+ * 5 - Product revision (A-J) Word0: " "
+ *
+ * Signature Word1: 15-9 (7 bits)
+ * 6 - Year (0-9) Word1: 8-6 (3 bits) & Word2: 15 (1 bit)
+ * 7-8 - Week of the year (1-52) Word1: 5-0 (6 bits)
+ *
+ * 9-12 - Serial Number (A001-Z999) Word2: 14-0 (15 bits)
+ *
+ * Note 1: Only production cards will have a serial number.
+ *
+ * Note 2: Signature is most significant 7 bits (0xFE).
+ *
+ * Returns ASC_TRUE if serial number found, otherwise returns ASC_FALSE.
+ */
+static int asc_get_eeprom_string(ushort *serialnum, uchar *cp)
+{
+ ushort w, num;
+
+ if ((serialnum[1] & 0xFE00) != ((ushort)0xAA << 8)) {
+ return ASC_FALSE;
+ } else {
+ /*
+ * First word - 6 digits.
+ */
+ w = serialnum[0];
+
+ /* Product type - 1st digit. */
+ if ((*cp = 'A' + ((w & 0xE000) >> 13)) == 'H') {
+ /* Product type is P=Prototype */
+ *cp += 0x8;
+ }
+ cp++;
+
+ /* Manufacturing location - 2nd digit. */
+ *cp++ = 'A' + ((w & 0x1C00) >> 10);
+
+ /* Product ID - 3rd, 4th digits. */
+ num = w & 0x3FF;
+ *cp++ = '0' + (num / 100);
+ num %= 100;
+ *cp++ = '0' + (num / 10);
+
+ /* Product revision - 5th digit. */
+ *cp++ = 'A' + (num % 10);
+
+ /*
+ * Second word
+ */
+ w = serialnum[1];
+
+ /*
+ * Year - 6th digit.
+ *
+ * If bit 15 of third word is set, then the
+ * last digit of the year is greater than 7.
+ */
+ if (serialnum[2] & 0x8000) {
+ *cp++ = '8' + ((w & 0x1C0) >> 6);
+ } else {
+ *cp++ = '0' + ((w & 0x1C0) >> 6);
+ }
+
+ /* Week of year - 7th, 8th digits. */
+ num = w & 0x003F;
+ *cp++ = '0' + num / 10;
+ num %= 10;
+ *cp++ = '0' + num;
+
+ /*
+ * Third word
+ */
+ w = serialnum[2] & 0x7FFF;
+
+ /* Serial number - 9th digit. */
+ *cp++ = 'A' + (w / 1000);
+
+ /* 10th, 11th, 12th digits. */
+ num = w % 1000;
+ *cp++ = '0' + num / 100;
+ num %= 100;
+ *cp++ = '0' + num / 10;
+ num %= 10;
+ *cp++ = '0' + num;
+
+ *cp = '\0'; /* Null Terminate the string. */
+ return ASC_TRUE;
+ }
+}
+
+/*
+ * asc_prt_asc_board_eeprom()
+ *
+ * Print board EEPROM configuration.
+ */
+static void asc_prt_asc_board_eeprom(struct seq_file *m, struct Scsi_Host *shost)
+{
+ struct asc_board *boardp = shost_priv(shost);
+ ASC_DVC_VAR *asc_dvc_varp;
+ ASCEEP_CONFIG *ep;
+ int i;
+#ifdef CONFIG_ISA
+ int isa_dma_speed[] = { 10, 8, 7, 6, 5, 4, 3, 2 };
+#endif /* CONFIG_ISA */
+ uchar serialstr[13];
+
+ asc_dvc_varp = &boardp->dvc_var.asc_dvc_var;
+ ep = &boardp->eep_config.asc_eep;
+
+ seq_printf(m,
+ "\nEEPROM Settings for AdvanSys SCSI Host %d:\n",
+ shost->host_no);
+
+ if (asc_get_eeprom_string((ushort *)&ep->adapter_info[0], serialstr)
+ == ASC_TRUE)
+ seq_printf(m, " Serial Number: %s\n", serialstr);
+ else if (ep->adapter_info[5] == 0xBB)
+ seq_puts(m,
+ " Default Settings Used for EEPROM-less Adapter.\n");
+ else
+ seq_puts(m, " Serial Number Signature Not Present.\n");
+
+ seq_printf(m,
+ " Host SCSI ID: %u, Host Queue Size: %u, Device Queue Size: %u\n",
+ ASC_EEP_GET_CHIP_ID(ep), ep->max_total_qng,
+ ep->max_tag_qng);
+
+ seq_printf(m,
+ " cntl 0x%x, no_scam 0x%x\n", ep->cntl, ep->no_scam);
+
+ seq_puts(m, " Target ID: ");
+ for (i = 0; i <= ASC_MAX_TID; i++)
+ seq_printf(m, " %d", i);
+
+ seq_puts(m, "\n Disconnects: ");
+ for (i = 0; i <= ASC_MAX_TID; i++)
+ seq_printf(m, " %c",
+ (ep->disc_enable & ADV_TID_TO_TIDMASK(i)) ? 'Y' : 'N');
+
+ seq_puts(m, "\n Command Queuing: ");
+ for (i = 0; i <= ASC_MAX_TID; i++)
+ seq_printf(m, " %c",
+ (ep->use_cmd_qng & ADV_TID_TO_TIDMASK(i)) ? 'Y' : 'N');
+
+ seq_puts(m, "\n Start Motor: ");
+ for (i = 0; i <= ASC_MAX_TID; i++)
+ seq_printf(m, " %c",
+ (ep->start_motor & ADV_TID_TO_TIDMASK(i)) ? 'Y' : 'N');
+
+ seq_puts(m, "\n Synchronous Transfer:");
+ for (i = 0; i <= ASC_MAX_TID; i++)
+ seq_printf(m, " %c",
+ (ep->init_sdtr & ADV_TID_TO_TIDMASK(i)) ? 'Y' : 'N');
+ seq_putc(m, '\n');
+
+#ifdef CONFIG_ISA
+ if (asc_dvc_varp->bus_type & ASC_IS_ISA) {
+ seq_printf(m,
+ " Host ISA DMA speed: %d MB/S\n",
+ isa_dma_speed[ASC_EEP_GET_DMA_SPD(ep)]);
+ }
+#endif /* CONFIG_ISA */
+}
+
+/*
+ * asc_prt_adv_board_eeprom()
+ *
+ * Print board EEPROM configuration.
+ */
+static void asc_prt_adv_board_eeprom(struct seq_file *m, struct Scsi_Host *shost)
+{
+ struct asc_board *boardp = shost_priv(shost);
+ ADV_DVC_VAR *adv_dvc_varp;
+ int i;
+ char *termstr;
+ uchar serialstr[13];
+ ADVEEP_3550_CONFIG *ep_3550 = NULL;
+ ADVEEP_38C0800_CONFIG *ep_38C0800 = NULL;
+ ADVEEP_38C1600_CONFIG *ep_38C1600 = NULL;
+ ushort word;
+ ushort *wordp;
+ ushort sdtr_speed = 0;
+
+ adv_dvc_varp = &boardp->dvc_var.adv_dvc_var;
+ if (adv_dvc_varp->chip_type == ADV_CHIP_ASC3550) {
+ ep_3550 = &boardp->eep_config.adv_3550_eep;
+ } else if (adv_dvc_varp->chip_type == ADV_CHIP_ASC38C0800) {
+ ep_38C0800 = &boardp->eep_config.adv_38C0800_eep;
+ } else {
+ ep_38C1600 = &boardp->eep_config.adv_38C1600_eep;
+ }
+
+ seq_printf(m,
+ "\nEEPROM Settings for AdvanSys SCSI Host %d:\n",
+ shost->host_no);
+
+ if (adv_dvc_varp->chip_type == ADV_CHIP_ASC3550) {
+ wordp = &ep_3550->serial_number_word1;
+ } else if (adv_dvc_varp->chip_type == ADV_CHIP_ASC38C0800) {
+ wordp = &ep_38C0800->serial_number_word1;
+ } else {
+ wordp = &ep_38C1600->serial_number_word1;
+ }
+
+ if (asc_get_eeprom_string(wordp, serialstr) == ASC_TRUE)
+ seq_printf(m, " Serial Number: %s\n", serialstr);
+ else
+ seq_puts(m, " Serial Number Signature Not Present.\n");
+
+ if (adv_dvc_varp->chip_type == ADV_CHIP_ASC3550)
+ seq_printf(m,
+ " Host SCSI ID: %u, Host Queue Size: %u, Device Queue Size: %u\n",
+ ep_3550->adapter_scsi_id,
+ ep_3550->max_host_qng, ep_3550->max_dvc_qng);
+ else if (adv_dvc_varp->chip_type == ADV_CHIP_ASC38C0800)
+ seq_printf(m,
+ " Host SCSI ID: %u, Host Queue Size: %u, Device Queue Size: %u\n",
+ ep_38C0800->adapter_scsi_id,
+ ep_38C0800->max_host_qng,
+ ep_38C0800->max_dvc_qng);
+ else
+ seq_printf(m,
+ " Host SCSI ID: %u, Host Queue Size: %u, Device Queue Size: %u\n",
+ ep_38C1600->adapter_scsi_id,
+ ep_38C1600->max_host_qng,
+ ep_38C1600->max_dvc_qng);
+ if (adv_dvc_varp->chip_type == ADV_CHIP_ASC3550) {
+ word = ep_3550->termination;
+ } else if (adv_dvc_varp->chip_type == ADV_CHIP_ASC38C0800) {
+ word = ep_38C0800->termination_lvd;
+ } else {
+ word = ep_38C1600->termination_lvd;
+ }
+ switch (word) {
+ case 1:
+ termstr = "Low Off/High Off";
+ break;
+ case 2:
+ termstr = "Low Off/High On";
+ break;
+ case 3:
+ termstr = "Low On/High On";
+ break;
+ default:
+ case 0:
+ termstr = "Automatic";
+ break;
+ }
+
+ if (adv_dvc_varp->chip_type == ADV_CHIP_ASC3550)
+ seq_printf(m,
+ " termination: %u (%s), bios_ctrl: 0x%x\n",
+ ep_3550->termination, termstr,
+ ep_3550->bios_ctrl);
+ else if (adv_dvc_varp->chip_type == ADV_CHIP_ASC38C0800)
+ seq_printf(m,
+ " termination: %u (%s), bios_ctrl: 0x%x\n",
+ ep_38C0800->termination_lvd, termstr,
+ ep_38C0800->bios_ctrl);
+ else
+ seq_printf(m,
+ " termination: %u (%s), bios_ctrl: 0x%x\n",
+ ep_38C1600->termination_lvd, termstr,
+ ep_38C1600->bios_ctrl);
+
+ seq_puts(m, " Target ID: ");
+ for (i = 0; i <= ADV_MAX_TID; i++)
+ seq_printf(m, " %X", i);
+ seq_putc(m, '\n');
+
+ if (adv_dvc_varp->chip_type == ADV_CHIP_ASC3550) {
+ word = ep_3550->disc_enable;
+ } else if (adv_dvc_varp->chip_type == ADV_CHIP_ASC38C0800) {
+ word = ep_38C0800->disc_enable;
+ } else {
+ word = ep_38C1600->disc_enable;
+ }
+ seq_puts(m, " Disconnects: ");
+ for (i = 0; i <= ADV_MAX_TID; i++)
+ seq_printf(m, " %c",
+ (word & ADV_TID_TO_TIDMASK(i)) ? 'Y' : 'N');
+ seq_putc(m, '\n');
+
+ if (adv_dvc_varp->chip_type == ADV_CHIP_ASC3550) {
+ word = ep_3550->tagqng_able;
+ } else if (adv_dvc_varp->chip_type == ADV_CHIP_ASC38C0800) {
+ word = ep_38C0800->tagqng_able;
+ } else {
+ word = ep_38C1600->tagqng_able;
+ }
+ seq_puts(m, " Command Queuing: ");
+ for (i = 0; i <= ADV_MAX_TID; i++)
+ seq_printf(m, " %c",
+ (word & ADV_TID_TO_TIDMASK(i)) ? 'Y' : 'N');
+ seq_putc(m, '\n');
+
+ if (adv_dvc_varp->chip_type == ADV_CHIP_ASC3550) {
+ word = ep_3550->start_motor;
+ } else if (adv_dvc_varp->chip_type == ADV_CHIP_ASC38C0800) {
+ word = ep_38C0800->start_motor;
+ } else {
+ word = ep_38C1600->start_motor;
+ }
+ seq_puts(m, " Start Motor: ");
+ for (i = 0; i <= ADV_MAX_TID; i++)
+ seq_printf(m, " %c",
+ (word & ADV_TID_TO_TIDMASK(i)) ? 'Y' : 'N');
+ seq_putc(m, '\n');
+
+ if (adv_dvc_varp->chip_type == ADV_CHIP_ASC3550) {
+ seq_puts(m, " Synchronous Transfer:");
+ for (i = 0; i <= ADV_MAX_TID; i++)
+ seq_printf(m, " %c",
+ (ep_3550->sdtr_able & ADV_TID_TO_TIDMASK(i)) ?
+ 'Y' : 'N');
+ seq_putc(m, '\n');
+ }
+
+ if (adv_dvc_varp->chip_type == ADV_CHIP_ASC3550) {
+ seq_puts(m, " Ultra Transfer: ");
+ for (i = 0; i <= ADV_MAX_TID; i++)
+ seq_printf(m, " %c",
+ (ep_3550->ultra_able & ADV_TID_TO_TIDMASK(i))
+ ? 'Y' : 'N');
+ seq_putc(m, '\n');
+ }
+
+ if (adv_dvc_varp->chip_type == ADV_CHIP_ASC3550) {
+ word = ep_3550->wdtr_able;
+ } else if (adv_dvc_varp->chip_type == ADV_CHIP_ASC38C0800) {
+ word = ep_38C0800->wdtr_able;
+ } else {
+ word = ep_38C1600->wdtr_able;
+ }
+ seq_puts(m, " Wide Transfer: ");
+ for (i = 0; i <= ADV_MAX_TID; i++)
+ seq_printf(m, " %c",
+ (word & ADV_TID_TO_TIDMASK(i)) ? 'Y' : 'N');
+ seq_putc(m, '\n');
+
+ if (adv_dvc_varp->chip_type == ADV_CHIP_ASC38C0800 ||
+ adv_dvc_varp->chip_type == ADV_CHIP_ASC38C1600) {
+ seq_puts(m, " Synchronous Transfer Speed (Mhz):\n ");
+ for (i = 0; i <= ADV_MAX_TID; i++) {
+ char *speed_str;
+
+ if (i == 0) {
+ sdtr_speed = adv_dvc_varp->sdtr_speed1;
+ } else if (i == 4) {
+ sdtr_speed = adv_dvc_varp->sdtr_speed2;
+ } else if (i == 8) {
+ sdtr_speed = adv_dvc_varp->sdtr_speed3;
+ } else if (i == 12) {
+ sdtr_speed = adv_dvc_varp->sdtr_speed4;
+ }
+ switch (sdtr_speed & ADV_MAX_TID) {
+ case 0:
+ speed_str = "Off";
+ break;
+ case 1:
+ speed_str = " 5";
+ break;
+ case 2:
+ speed_str = " 10";
+ break;
+ case 3:
+ speed_str = " 20";
+ break;
+ case 4:
+ speed_str = " 40";
+ break;
+ case 5:
+ speed_str = " 80";
+ break;
+ default:
+ speed_str = "Unk";
+ break;
+ }
+ seq_printf(m, "%X:%s ", i, speed_str);
+ if (i == 7)
+ seq_puts(m, "\n ");
+ sdtr_speed >>= 4;
+ }
+ seq_putc(m, '\n');
+ }
+}
+
+/*
+ * asc_prt_driver_conf()
+ */
+static void asc_prt_driver_conf(struct seq_file *m, struct Scsi_Host *shost)
+{
+ struct asc_board *boardp = shost_priv(shost);
+ int chip_scsi_id;
+
+ seq_printf(m,
+ "\nLinux Driver Configuration and Information for AdvanSys SCSI Host %d:\n",
+ shost->host_no);
+
+ seq_printf(m,
+ " host_busy %u, max_id %u, max_lun %llu, max_channel %u\n",
+ atomic_read(&shost->host_busy), shost->max_id,
+ shost->max_lun, shost->max_channel);
+
+ seq_printf(m,
+ " unique_id %d, can_queue %d, this_id %d, sg_tablesize %u, cmd_per_lun %u\n",
+ shost->unique_id, shost->can_queue, shost->this_id,
+ shost->sg_tablesize, shost->cmd_per_lun);
+
+ seq_printf(m,
+ " unchecked_isa_dma %d, use_clustering %d\n",
+ shost->unchecked_isa_dma, shost->use_clustering);
+
+ seq_printf(m,
+ " flags 0x%x, last_reset 0x%lx, jiffies 0x%lx, asc_n_io_port 0x%x\n",
+ boardp->flags, boardp->last_reset, jiffies,
+ boardp->asc_n_io_port);
+
+ seq_printf(m, " io_port 0x%lx\n", shost->io_port);
+
+ if (ASC_NARROW_BOARD(boardp)) {
+ chip_scsi_id = boardp->dvc_cfg.asc_dvc_cfg.chip_scsi_id;
+ } else {
+ chip_scsi_id = boardp->dvc_var.adv_dvc_var.chip_scsi_id;
+ }
+}
+
+/*
+ * asc_prt_asc_board_info()
+ *
+ * Print dynamic board configuration information.
+ */
+static void asc_prt_asc_board_info(struct seq_file *m, struct Scsi_Host *shost)
+{
+ struct asc_board *boardp = shost_priv(shost);
+ int chip_scsi_id;
+ ASC_DVC_VAR *v;
+ ASC_DVC_CFG *c;
+ int i;
+ int renegotiate = 0;
+
+ v = &boardp->dvc_var.asc_dvc_var;
+ c = &boardp->dvc_cfg.asc_dvc_cfg;
+ chip_scsi_id = c->chip_scsi_id;
+
+ seq_printf(m,
+ "\nAsc Library Configuration and Statistics for AdvanSys SCSI Host %d:\n",
+ shost->host_no);
+
+ seq_printf(m, " chip_version %u, mcode_date 0x%x, "
+ "mcode_version 0x%x, err_code %u\n",
+ c->chip_version, c->mcode_date, c->mcode_version,
+ v->err_code);
+
+ /* Current number of commands waiting for the host. */
+ seq_printf(m,
+ " Total Command Pending: %d\n", v->cur_total_qng);
+
+ seq_puts(m, " Command Queuing:");
+ for (i = 0; i <= ASC_MAX_TID; i++) {
+ if ((chip_scsi_id == i) ||
+ ((boardp->init_tidmask & ADV_TID_TO_TIDMASK(i)) == 0)) {
+ continue;
+ }
+ seq_printf(m, " %X:%c",
+ i,
+ (v->use_tagged_qng & ADV_TID_TO_TIDMASK(i)) ? 'Y' : 'N');
+ }
+
+ /* Current number of commands waiting for a device. */
+ seq_puts(m, "\n Command Queue Pending:");
+ for (i = 0; i <= ASC_MAX_TID; i++) {
+ if ((chip_scsi_id == i) ||
+ ((boardp->init_tidmask & ADV_TID_TO_TIDMASK(i)) == 0)) {
+ continue;
+ }
+ seq_printf(m, " %X:%u", i, v->cur_dvc_qng[i]);
+ }
+
+ /* Current limit on number of commands that can be sent to a device. */
+ seq_puts(m, "\n Command Queue Limit:");
+ for (i = 0; i <= ASC_MAX_TID; i++) {
+ if ((chip_scsi_id == i) ||
+ ((boardp->init_tidmask & ADV_TID_TO_TIDMASK(i)) == 0)) {
+ continue;
+ }
+ seq_printf(m, " %X:%u", i, v->max_dvc_qng[i]);
+ }
+
+ /* Indicate whether the device has returned queue full status. */
+ seq_puts(m, "\n Command Queue Full:");
+ for (i = 0; i <= ASC_MAX_TID; i++) {
+ if ((chip_scsi_id == i) ||
+ ((boardp->init_tidmask & ADV_TID_TO_TIDMASK(i)) == 0)) {
+ continue;
+ }
+ if (boardp->queue_full & ADV_TID_TO_TIDMASK(i))
+ seq_printf(m, " %X:Y-%d",
+ i, boardp->queue_full_cnt[i]);
+ else
+ seq_printf(m, " %X:N", i);
+ }
+
+ seq_puts(m, "\n Synchronous Transfer:");
+ for (i = 0; i <= ASC_MAX_TID; i++) {
+ if ((chip_scsi_id == i) ||
+ ((boardp->init_tidmask & ADV_TID_TO_TIDMASK(i)) == 0)) {
+ continue;
+ }
+ seq_printf(m, " %X:%c",
+ i,
+ (v->sdtr_done & ADV_TID_TO_TIDMASK(i)) ? 'Y' : 'N');
+ }
+ seq_putc(m, '\n');
+
+ for (i = 0; i <= ASC_MAX_TID; i++) {
+ uchar syn_period_ix;
+
+ if ((chip_scsi_id == i) ||
+ ((boardp->init_tidmask & ADV_TID_TO_TIDMASK(i)) == 0) ||
+ ((v->init_sdtr & ADV_TID_TO_TIDMASK(i)) == 0)) {
+ continue;
+ }
+
+ seq_printf(m, " %X:", i);
+
+ if ((boardp->sdtr_data[i] & ASC_SYN_MAX_OFFSET) == 0) {
+ seq_puts(m, " Asynchronous");
+ } else {
+ syn_period_ix =
+ (boardp->sdtr_data[i] >> 4) & (v->max_sdtr_index -
+ 1);
+
+ seq_printf(m,
+ " Transfer Period Factor: %d (%d.%d Mhz),",
+ v->sdtr_period_tbl[syn_period_ix],
+ 250 / v->sdtr_period_tbl[syn_period_ix],
+ ASC_TENTHS(250,
+ v->sdtr_period_tbl[syn_period_ix]));
+
+ seq_printf(m, " REQ/ACK Offset: %d",
+ boardp->sdtr_data[i] & ASC_SYN_MAX_OFFSET);
+ }
+
+ if ((v->sdtr_done & ADV_TID_TO_TIDMASK(i)) == 0) {
+ seq_puts(m, "*\n");
+ renegotiate = 1;
+ } else {
+ seq_putc(m, '\n');
+ }
+ }
+
+ if (renegotiate) {
+ seq_puts(m, " * = Re-negotiation pending before next command.\n");
+ }
+}
+
+/*
+ * asc_prt_adv_board_info()
+ *
+ * Print dynamic board configuration information.
+ */
+static void asc_prt_adv_board_info(struct seq_file *m, struct Scsi_Host *shost)
+{
+ struct asc_board *boardp = shost_priv(shost);
+ int i;
+ ADV_DVC_VAR *v;
+ ADV_DVC_CFG *c;
+ AdvPortAddr iop_base;
+ ushort chip_scsi_id;
+ ushort lramword;
+ uchar lrambyte;
+ ushort tagqng_able;
+ ushort sdtr_able, wdtr_able;
+ ushort wdtr_done, sdtr_done;
+ ushort period = 0;
+ int renegotiate = 0;
+
+ v = &boardp->dvc_var.adv_dvc_var;
+ c = &boardp->dvc_cfg.adv_dvc_cfg;
+ iop_base = v->iop_base;
+ chip_scsi_id = v->chip_scsi_id;
+
+ seq_printf(m,
+ "\nAdv Library Configuration and Statistics for AdvanSys SCSI Host %d:\n",
+ shost->host_no);
+
+ seq_printf(m,
+ " iop_base 0x%lx, cable_detect: %X, err_code %u\n",
+ (unsigned long)v->iop_base,
+ AdvReadWordRegister(iop_base,IOPW_SCSI_CFG1) & CABLE_DETECT,
+ v->err_code);
+
+ seq_printf(m, " chip_version %u, mcode_date 0x%x, "
+ "mcode_version 0x%x\n", c->chip_version,
+ c->mcode_date, c->mcode_version);
+
+ AdvReadWordLram(iop_base, ASC_MC_TAGQNG_ABLE, tagqng_able);
+ seq_puts(m, " Queuing Enabled:");
+ for (i = 0; i <= ADV_MAX_TID; i++) {
+ if ((chip_scsi_id == i) ||
+ ((boardp->init_tidmask & ADV_TID_TO_TIDMASK(i)) == 0)) {
+ continue;
+ }
+
+ seq_printf(m, " %X:%c",
+ i,
+ (tagqng_able & ADV_TID_TO_TIDMASK(i)) ? 'Y' : 'N');
+ }
+
+ seq_puts(m, "\n Queue Limit:");
+ for (i = 0; i <= ADV_MAX_TID; i++) {
+ if ((chip_scsi_id == i) ||
+ ((boardp->init_tidmask & ADV_TID_TO_TIDMASK(i)) == 0)) {
+ continue;
+ }
+
+ AdvReadByteLram(iop_base, ASC_MC_NUMBER_OF_MAX_CMD + i,
+ lrambyte);
+
+ seq_printf(m, " %X:%d", i, lrambyte);
+ }
+
+ seq_puts(m, "\n Command Pending:");
+ for (i = 0; i <= ADV_MAX_TID; i++) {
+ if ((chip_scsi_id == i) ||
+ ((boardp->init_tidmask & ADV_TID_TO_TIDMASK(i)) == 0)) {
+ continue;
+ }
+
+ AdvReadByteLram(iop_base, ASC_MC_NUMBER_OF_QUEUED_CMD + i,
+ lrambyte);
+
+ seq_printf(m, " %X:%d", i, lrambyte);
+ }
+ seq_putc(m, '\n');
+
+ AdvReadWordLram(iop_base, ASC_MC_WDTR_ABLE, wdtr_able);
+ seq_puts(m, " Wide Enabled:");
+ for (i = 0; i <= ADV_MAX_TID; i++) {
+ if ((chip_scsi_id == i) ||
+ ((boardp->init_tidmask & ADV_TID_TO_TIDMASK(i)) == 0)) {
+ continue;
+ }
+
+ seq_printf(m, " %X:%c",
+ i,
+ (wdtr_able & ADV_TID_TO_TIDMASK(i)) ? 'Y' : 'N');
+ }
+ seq_putc(m, '\n');
+
+ AdvReadWordLram(iop_base, ASC_MC_WDTR_DONE, wdtr_done);
+ seq_puts(m, " Transfer Bit Width:");
+ for (i = 0; i <= ADV_MAX_TID; i++) {
+ if ((chip_scsi_id == i) ||
+ ((boardp->init_tidmask & ADV_TID_TO_TIDMASK(i)) == 0)) {
+ continue;
+ }
+
+ AdvReadWordLram(iop_base,
+ ASC_MC_DEVICE_HSHK_CFG_TABLE + (2 * i),
+ lramword);
+
+ seq_printf(m, " %X:%d",
+ i, (lramword & 0x8000) ? 16 : 8);
+
+ if ((wdtr_able & ADV_TID_TO_TIDMASK(i)) &&
+ (wdtr_done & ADV_TID_TO_TIDMASK(i)) == 0) {
+ seq_putc(m, '*');
+ renegotiate = 1;
+ }
+ }
+ seq_putc(m, '\n');
+
+ AdvReadWordLram(iop_base, ASC_MC_SDTR_ABLE, sdtr_able);
+ seq_puts(m, " Synchronous Enabled:");
+ for (i = 0; i <= ADV_MAX_TID; i++) {
+ if ((chip_scsi_id == i) ||
+ ((boardp->init_tidmask & ADV_TID_TO_TIDMASK(i)) == 0)) {
+ continue;
+ }
+
+ seq_printf(m, " %X:%c",
+ i,
+ (sdtr_able & ADV_TID_TO_TIDMASK(i)) ? 'Y' : 'N');
+ }
+ seq_putc(m, '\n');
+
+ AdvReadWordLram(iop_base, ASC_MC_SDTR_DONE, sdtr_done);
+ for (i = 0; i <= ADV_MAX_TID; i++) {
+
+ AdvReadWordLram(iop_base,
+ ASC_MC_DEVICE_HSHK_CFG_TABLE + (2 * i),
+ lramword);
+ lramword &= ~0x8000;
+
+ if ((chip_scsi_id == i) ||
+ ((boardp->init_tidmask & ADV_TID_TO_TIDMASK(i)) == 0) ||
+ ((sdtr_able & ADV_TID_TO_TIDMASK(i)) == 0)) {
+ continue;
+ }
+
+ seq_printf(m, " %X:", i);
+
+ if ((lramword & 0x1F) == 0) { /* Check for REQ/ACK Offset 0. */
+ seq_puts(m, " Asynchronous");
+ } else {
+ seq_puts(m, " Transfer Period Factor: ");
+
+ if ((lramword & 0x1F00) == 0x1100) { /* 80 Mhz */
+ seq_puts(m, "9 (80.0 Mhz),");
+ } else if ((lramword & 0x1F00) == 0x1000) { /* 40 Mhz */
+ seq_puts(m, "10 (40.0 Mhz),");
+ } else { /* 20 Mhz or below. */
+
+ period = (((lramword >> 8) * 25) + 50) / 4;
+
+ if (period == 0) { /* Should never happen. */
+ seq_printf(m, "%d (? Mhz), ", period);
+ } else {
+ seq_printf(m,
+ "%d (%d.%d Mhz),",
+ period, 250 / period,
+ ASC_TENTHS(250, period));
+ }
+ }
+
+ seq_printf(m, " REQ/ACK Offset: %d",
+ lramword & 0x1F);
+ }
+
+ if ((sdtr_done & ADV_TID_TO_TIDMASK(i)) == 0) {
+ seq_puts(m, "*\n");
+ renegotiate = 1;
+ } else {
+ seq_putc(m, '\n');
+ }
+ }
+
+ if (renegotiate) {
+ seq_puts(m, " * = Re-negotiation pending before next command.\n");
+ }
+}
+
+#ifdef ADVANSYS_STATS
+/*
+ * asc_prt_board_stats()
+ */
+static void asc_prt_board_stats(struct seq_file *m, struct Scsi_Host *shost)
+{
+ struct asc_board *boardp = shost_priv(shost);
+ struct asc_stats *s = &boardp->asc_stats;
+
+ seq_printf(m,
+ "\nLinux Driver Statistics for AdvanSys SCSI Host %d:\n",
+ shost->host_no);
+
+ seq_printf(m,
+ " queuecommand %u, reset %u, biosparam %u, interrupt %u\n",
+ s->queuecommand, s->reset, s->biosparam,
+ s->interrupt);
+
+ seq_printf(m,
+ " callback %u, done %u, build_error %u, build_noreq %u, build_nosg %u\n",
+ s->callback, s->done, s->build_error,
+ s->adv_build_noreq, s->adv_build_nosg);
+
+ seq_printf(m,
+ " exe_noerror %u, exe_busy %u, exe_error %u, exe_unknown %u\n",
+ s->exe_noerror, s->exe_busy, s->exe_error,
+ s->exe_unknown);
+
+ /*
+ * Display data transfer statistics.
+ */
+ if (s->xfer_cnt > 0) {
+ seq_printf(m, " xfer_cnt %u, xfer_elem %u, ",
+ s->xfer_cnt, s->xfer_elem);
+
+ seq_printf(m, "xfer_bytes %u.%01u kb\n",
+ s->xfer_sect / 2, ASC_TENTHS(s->xfer_sect, 2));
+
+ /* Scatter gather transfer statistics */
+ seq_printf(m, " avg_num_elem %u.%01u, ",
+ s->xfer_elem / s->xfer_cnt,
+ ASC_TENTHS(s->xfer_elem, s->xfer_cnt));
+
+ seq_printf(m, "avg_elem_size %u.%01u kb, ",
+ (s->xfer_sect / 2) / s->xfer_elem,
+ ASC_TENTHS((s->xfer_sect / 2), s->xfer_elem));
+
+ seq_printf(m, "avg_xfer_size %u.%01u kb\n",
+ (s->xfer_sect / 2) / s->xfer_cnt,
+ ASC_TENTHS((s->xfer_sect / 2), s->xfer_cnt));
+ }
+}
+#endif /* ADVANSYS_STATS */
+
+/*
+ * advansys_show_info() - /proc/scsi/advansys/{0,1,2,3,...}
+ *
+ * m: seq_file to print into
+ * shost: Scsi_Host
+ *
+ * Return the number of bytes read from or written to a
+ * /proc/scsi/advansys/[0...] file.
+ */
+static int
+advansys_show_info(struct seq_file *m, struct Scsi_Host *shost)
+{
+ struct asc_board *boardp = shost_priv(shost);
+
+ ASC_DBG(1, "begin\n");
+
+ /*
+ * User read of /proc/scsi/advansys/[0...] file.
+ */
+
+ /*
+ * Get board configuration information.
+ *
+ * advansys_info() returns the board string from its own static buffer.
+ */
+ /* Copy board information. */
+ seq_printf(m, "%s\n", (char *)advansys_info(shost));
+ /*
+ * Display Wide Board BIOS Information.
+ */
+ if (!ASC_NARROW_BOARD(boardp))
+ asc_prt_adv_bios(m, shost);
+
+ /*
+ * Display driver information for each device attached to the board.
+ */
+ asc_prt_board_devices(m, shost);
+
+ /*
+ * Display EEPROM configuration for the board.
+ */
+ if (ASC_NARROW_BOARD(boardp))
+ asc_prt_asc_board_eeprom(m, shost);
+ else
+ asc_prt_adv_board_eeprom(m, shost);
+
+ /*
+ * Display driver configuration and information for the board.
+ */
+ asc_prt_driver_conf(m, shost);
+
+#ifdef ADVANSYS_STATS
+ /*
+ * Display driver statistics for the board.
+ */
+ asc_prt_board_stats(m, shost);
+#endif /* ADVANSYS_STATS */
+
+ /*
+ * Display Asc Library dynamic configuration information
+ * for the board.
+ */
+ if (ASC_NARROW_BOARD(boardp))
+ asc_prt_asc_board_info(m, shost);
+ else
+ asc_prt_adv_board_info(m, shost);
+ return 0;
+}
+#endif /* CONFIG_PROC_FS */
+
+static void asc_scsi_done(struct scsi_cmnd *scp)
+{
+ scsi_dma_unmap(scp);
+ ASC_STATS(scp->device->host, done);
+ scp->scsi_done(scp);
+}
+
+static void AscSetBank(PortAddr iop_base, uchar bank)
+{
+ uchar val;
+
+ val = AscGetChipControl(iop_base) &
+ (~
+ (CC_SINGLE_STEP | CC_TEST | CC_DIAG | CC_SCSI_RESET |
+ CC_CHIP_RESET));
+ if (bank == 1) {
+ val |= CC_BANK_ONE;
+ } else if (bank == 2) {
+ val |= CC_DIAG | CC_BANK_ONE;
+ } else {
+ val &= ~CC_BANK_ONE;
+ }
+ AscSetChipControl(iop_base, val);
+}
+
+static void AscSetChipIH(PortAddr iop_base, ushort ins_code)
+{
+ AscSetBank(iop_base, 1);
+ AscWriteChipIH(iop_base, ins_code);
+ AscSetBank(iop_base, 0);
+}
+
+static int AscStartChip(PortAddr iop_base)
+{
+ AscSetChipControl(iop_base, 0);
+ if ((AscGetChipStatus(iop_base) & CSW_HALTED) != 0) {
+ return (0);
+ }
+ return (1);
+}
+
+static int AscStopChip(PortAddr iop_base)
+{
+ uchar cc_val;
+
+ cc_val =
+ AscGetChipControl(iop_base) &
+ (~(CC_SINGLE_STEP | CC_TEST | CC_DIAG));
+ AscSetChipControl(iop_base, (uchar)(cc_val | CC_HALT));
+ AscSetChipIH(iop_base, INS_HALT);
+ AscSetChipIH(iop_base, INS_RFLAG_WTM);
+ if ((AscGetChipStatus(iop_base) & CSW_HALTED) == 0) {
+ return (0);
+ }
+ return (1);
+}
+
+static int AscIsChipHalted(PortAddr iop_base)
+{
+ if ((AscGetChipStatus(iop_base) & CSW_HALTED) != 0) {
+ if ((AscGetChipControl(iop_base) & CC_HALT) != 0) {
+ return (1);
+ }
+ }
+ return (0);
+}
+
+static int AscResetChipAndScsiBus(ASC_DVC_VAR *asc_dvc)
+{
+ PortAddr iop_base;
+ int i = 10;
+
+ iop_base = asc_dvc->iop_base;
+ while ((AscGetChipStatus(iop_base) & CSW_SCSI_RESET_ACTIVE)
+ && (i-- > 0)) {
+ mdelay(100);
+ }
+ AscStopChip(iop_base);
+ AscSetChipControl(iop_base, CC_CHIP_RESET | CC_SCSI_RESET | CC_HALT);
+ udelay(60);
+ AscSetChipIH(iop_base, INS_RFLAG_WTM);
+ AscSetChipIH(iop_base, INS_HALT);
+ AscSetChipControl(iop_base, CC_CHIP_RESET | CC_HALT);
+ AscSetChipControl(iop_base, CC_HALT);
+ mdelay(200);
+ AscSetChipStatus(iop_base, CIW_CLR_SCSI_RESET_INT);
+ AscSetChipStatus(iop_base, 0);
+ return (AscIsChipHalted(iop_base));
+}
+
+static int AscFindSignature(PortAddr iop_base)
+{
+ ushort sig_word;
+
+ ASC_DBG(1, "AscGetChipSignatureByte(0x%x) 0x%x\n",
+ iop_base, AscGetChipSignatureByte(iop_base));
+ if (AscGetChipSignatureByte(iop_base) == (uchar)ASC_1000_ID1B) {
+ ASC_DBG(1, "AscGetChipSignatureWord(0x%x) 0x%x\n",
+ iop_base, AscGetChipSignatureWord(iop_base));
+ sig_word = AscGetChipSignatureWord(iop_base);
+ if ((sig_word == (ushort)ASC_1000_ID0W) ||
+ (sig_word == (ushort)ASC_1000_ID0W_FIX)) {
+ return (1);
+ }
+ }
+ return (0);
+}
+
+static void AscEnableInterrupt(PortAddr iop_base)
+{
+ ushort cfg;
+
+ cfg = AscGetChipCfgLsw(iop_base);
+ AscSetChipCfgLsw(iop_base, cfg | ASC_CFG0_HOST_INT_ON);
+}
+
+static void AscDisableInterrupt(PortAddr iop_base)
+{
+ ushort cfg;
+
+ cfg = AscGetChipCfgLsw(iop_base);
+ AscSetChipCfgLsw(iop_base, cfg & (~ASC_CFG0_HOST_INT_ON));
+}
+
+static uchar AscReadLramByte(PortAddr iop_base, ushort addr)
+{
+ unsigned char byte_data;
+ unsigned short word_data;
+
+ if (isodd_word(addr)) {
+ AscSetChipLramAddr(iop_base, addr - 1);
+ word_data = AscGetChipLramData(iop_base);
+ byte_data = (word_data >> 8) & 0xFF;
+ } else {
+ AscSetChipLramAddr(iop_base, addr);
+ word_data = AscGetChipLramData(iop_base);
+ byte_data = word_data & 0xFF;
+ }
+ return byte_data;
+}
+
+static ushort AscReadLramWord(PortAddr iop_base, ushort addr)
+{
+ ushort word_data;
+
+ AscSetChipLramAddr(iop_base, addr);
+ word_data = AscGetChipLramData(iop_base);
+ return (word_data);
+}
+
+#if CC_VERY_LONG_SG_LIST
+static ASC_DCNT AscReadLramDWord(PortAddr iop_base, ushort addr)
+{
+ ushort val_low, val_high;
+ ASC_DCNT dword_data;
+
+ AscSetChipLramAddr(iop_base, addr);
+ val_low = AscGetChipLramData(iop_base);
+ val_high = AscGetChipLramData(iop_base);
+ dword_data = ((ASC_DCNT) val_high << 16) | (ASC_DCNT) val_low;
+ return (dword_data);
+}
+#endif /* CC_VERY_LONG_SG_LIST */
+
+static void
+AscMemWordSetLram(PortAddr iop_base, ushort s_addr, ushort set_wval, int words)
+{
+ int i;
+
+ AscSetChipLramAddr(iop_base, s_addr);
+ for (i = 0; i < words; i++) {
+ AscSetChipLramData(iop_base, set_wval);
+ }
+}
+
+static void AscWriteLramWord(PortAddr iop_base, ushort addr, ushort word_val)
+{
+ AscSetChipLramAddr(iop_base, addr);
+ AscSetChipLramData(iop_base, word_val);
+}
+
+static void AscWriteLramByte(PortAddr iop_base, ushort addr, uchar byte_val)
+{
+ ushort word_data;
+
+ if (isodd_word(addr)) {
+ addr--;
+ word_data = AscReadLramWord(iop_base, addr);
+ word_data &= 0x00FF;
+ word_data |= (((ushort)byte_val << 8) & 0xFF00);
+ } else {
+ word_data = AscReadLramWord(iop_base, addr);
+ word_data &= 0xFF00;
+ word_data |= ((ushort)byte_val & 0x00FF);
+ }
+ AscWriteLramWord(iop_base, addr, word_data);
+}
+
+/*
+ * Copy 2 bytes to LRAM.
+ *
+ * The source data is assumed to be in little-endian order in memory
+ * and is maintained in little-endian order when written to LRAM.
+ */
+static void
+AscMemWordCopyPtrToLram(PortAddr iop_base, ushort s_addr,
+ const uchar *s_buffer, int words)
+{
+ int i;
+
+ AscSetChipLramAddr(iop_base, s_addr);
+ for (i = 0; i < 2 * words; i += 2) {
+ /*
+ * On a little-endian system the second argument below
+ * produces a little-endian ushort which is written to
+ * LRAM in little-endian order. On a big-endian system
+ * the second argument produces a big-endian ushort which
+ * is "transparently" byte-swapped by outpw() and written
+ * in little-endian order to LRAM.
+ */
+ outpw(iop_base + IOP_RAM_DATA,
+ ((ushort)s_buffer[i + 1] << 8) | s_buffer[i]);
+ }
+}
+
+/*
+ * Copy 4 bytes to LRAM.
+ *
+ * The source data is assumed to be in little-endian order in memory
+ * and is maintained in little-endian order when written to LRAM.
+ */
+static void
+AscMemDWordCopyPtrToLram(PortAddr iop_base,
+ ushort s_addr, uchar *s_buffer, int dwords)
+{
+ int i;
+
+ AscSetChipLramAddr(iop_base, s_addr);
+ for (i = 0; i < 4 * dwords; i += 4) {
+ outpw(iop_base + IOP_RAM_DATA, ((ushort)s_buffer[i + 1] << 8) | s_buffer[i]); /* LSW */
+ outpw(iop_base + IOP_RAM_DATA, ((ushort)s_buffer[i + 3] << 8) | s_buffer[i + 2]); /* MSW */
+ }
+}
+
+/*
+ * Copy 2 bytes from LRAM.
+ *
+ * The source data is assumed to be in little-endian order in LRAM
+ * and is maintained in little-endian order when written to memory.
+ */
+static void
+AscMemWordCopyPtrFromLram(PortAddr iop_base,
+ ushort s_addr, uchar *d_buffer, int words)
+{
+ int i;
+ ushort word;
+
+ AscSetChipLramAddr(iop_base, s_addr);
+ for (i = 0; i < 2 * words; i += 2) {
+ word = inpw(iop_base + IOP_RAM_DATA);
+ d_buffer[i] = word & 0xff;
+ d_buffer[i + 1] = (word >> 8) & 0xff;
+ }
+}
+
+static ASC_DCNT AscMemSumLramWord(PortAddr iop_base, ushort s_addr, int words)
+{
+ ASC_DCNT sum;
+ int i;
+
+ sum = 0L;
+ for (i = 0; i < words; i++, s_addr += 2) {
+ sum += AscReadLramWord(iop_base, s_addr);
+ }
+ return (sum);
+}
+
+static ushort AscInitLram(ASC_DVC_VAR *asc_dvc)
+{
+ uchar i;
+ ushort s_addr;
+ PortAddr iop_base;
+ ushort warn_code;
+
+ iop_base = asc_dvc->iop_base;
+ warn_code = 0;
+ AscMemWordSetLram(iop_base, ASC_QADR_BEG, 0,
+ (ushort)(((int)(asc_dvc->max_total_qng + 2 + 1) *
+ 64) >> 1));
+ i = ASC_MIN_ACTIVE_QNO;
+ s_addr = ASC_QADR_BEG + ASC_QBLK_SIZE;
+ AscWriteLramByte(iop_base, (ushort)(s_addr + ASC_SCSIQ_B_FWD),
+ (uchar)(i + 1));
+ AscWriteLramByte(iop_base, (ushort)(s_addr + ASC_SCSIQ_B_BWD),
+ (uchar)(asc_dvc->max_total_qng));
+ AscWriteLramByte(iop_base, (ushort)(s_addr + ASC_SCSIQ_B_QNO),
+ (uchar)i);
+ i++;
+ s_addr += ASC_QBLK_SIZE;
+ for (; i < asc_dvc->max_total_qng; i++, s_addr += ASC_QBLK_SIZE) {
+ AscWriteLramByte(iop_base, (ushort)(s_addr + ASC_SCSIQ_B_FWD),
+ (uchar)(i + 1));
+ AscWriteLramByte(iop_base, (ushort)(s_addr + ASC_SCSIQ_B_BWD),
+ (uchar)(i - 1));
+ AscWriteLramByte(iop_base, (ushort)(s_addr + ASC_SCSIQ_B_QNO),
+ (uchar)i);
+ }
+ AscWriteLramByte(iop_base, (ushort)(s_addr + ASC_SCSIQ_B_FWD),
+ (uchar)ASC_QLINK_END);
+ AscWriteLramByte(iop_base, (ushort)(s_addr + ASC_SCSIQ_B_BWD),
+ (uchar)(asc_dvc->max_total_qng - 1));
+ AscWriteLramByte(iop_base, (ushort)(s_addr + ASC_SCSIQ_B_QNO),
+ (uchar)asc_dvc->max_total_qng);
+ i++;
+ s_addr += ASC_QBLK_SIZE;
+ for (; i <= (uchar)(asc_dvc->max_total_qng + 3);
+ i++, s_addr += ASC_QBLK_SIZE) {
+ AscWriteLramByte(iop_base,
+ (ushort)(s_addr + (ushort)ASC_SCSIQ_B_FWD), i);
+ AscWriteLramByte(iop_base,
+ (ushort)(s_addr + (ushort)ASC_SCSIQ_B_BWD), i);
+ AscWriteLramByte(iop_base,
+ (ushort)(s_addr + (ushort)ASC_SCSIQ_B_QNO), i);
+ }
+ return warn_code;
+}
+
+static ASC_DCNT
+AscLoadMicroCode(PortAddr iop_base, ushort s_addr,
+ const uchar *mcode_buf, ushort mcode_size)
+{
+ ASC_DCNT chksum;
+ ushort mcode_word_size;
+ ushort mcode_chksum;
+
+ /* Write the microcode buffer starting at LRAM address 0. */
+ mcode_word_size = (ushort)(mcode_size >> 1);
+ AscMemWordSetLram(iop_base, s_addr, 0, mcode_word_size);
+ AscMemWordCopyPtrToLram(iop_base, s_addr, mcode_buf, mcode_word_size);
+
+ chksum = AscMemSumLramWord(iop_base, s_addr, mcode_word_size);
+ ASC_DBG(1, "chksum 0x%lx\n", (ulong)chksum);
+ mcode_chksum = (ushort)AscMemSumLramWord(iop_base,
+ (ushort)ASC_CODE_SEC_BEG,
+ (ushort)((mcode_size -
+ s_addr - (ushort)
+ ASC_CODE_SEC_BEG) /
+ 2));
+ ASC_DBG(1, "mcode_chksum 0x%lx\n", (ulong)mcode_chksum);
+ AscWriteLramWord(iop_base, ASCV_MCODE_CHKSUM_W, mcode_chksum);
+ AscWriteLramWord(iop_base, ASCV_MCODE_SIZE_W, mcode_size);
+ return chksum;
+}
+
+static void AscInitQLinkVar(ASC_DVC_VAR *asc_dvc)
+{
+ PortAddr iop_base;
+ int i;
+ ushort lram_addr;
+
+ iop_base = asc_dvc->iop_base;
+ AscPutRiscVarFreeQHead(iop_base, 1);
+ AscPutRiscVarDoneQTail(iop_base, asc_dvc->max_total_qng);
+ AscPutVarFreeQHead(iop_base, 1);
+ AscPutVarDoneQTail(iop_base, asc_dvc->max_total_qng);
+ AscWriteLramByte(iop_base, ASCV_BUSY_QHEAD_B,
+ (uchar)((int)asc_dvc->max_total_qng + 1));
+ AscWriteLramByte(iop_base, ASCV_DISC1_QHEAD_B,
+ (uchar)((int)asc_dvc->max_total_qng + 2));
+ AscWriteLramByte(iop_base, (ushort)ASCV_TOTAL_READY_Q_B,
+ asc_dvc->max_total_qng);
+ AscWriteLramWord(iop_base, ASCV_ASCDVC_ERR_CODE_W, 0);
+ AscWriteLramWord(iop_base, ASCV_HALTCODE_W, 0);
+ AscWriteLramByte(iop_base, ASCV_STOP_CODE_B, 0);
+ AscWriteLramByte(iop_base, ASCV_SCSIBUSY_B, 0);
+ AscWriteLramByte(iop_base, ASCV_WTM_FLAG_B, 0);
+ AscPutQDoneInProgress(iop_base, 0);
+ lram_addr = ASC_QADR_BEG;
+ for (i = 0; i < 32; i++, lram_addr += 2) {
+ AscWriteLramWord(iop_base, lram_addr, 0);
+ }
+}
+
+static ushort AscInitMicroCodeVar(ASC_DVC_VAR *asc_dvc)
+{
+ int i;
+ ushort warn_code;
+ PortAddr iop_base;
+ ASC_PADDR phy_addr;
+ ASC_DCNT phy_size;
+ struct asc_board *board = asc_dvc_to_board(asc_dvc);
+
+ iop_base = asc_dvc->iop_base;
+ warn_code = 0;
+ for (i = 0; i <= ASC_MAX_TID; i++) {
+ AscPutMCodeInitSDTRAtID(iop_base, i,
+ asc_dvc->cfg->sdtr_period_offset[i]);
+ }
+
+ AscInitQLinkVar(asc_dvc);
+ AscWriteLramByte(iop_base, ASCV_DISC_ENABLE_B,
+ asc_dvc->cfg->disc_enable);
+ AscWriteLramByte(iop_base, ASCV_HOSTSCSI_ID_B,
+ ASC_TID_TO_TARGET_ID(asc_dvc->cfg->chip_scsi_id));
+
+ /* Ensure overrun buffer is aligned on an 8 byte boundary. */
+ BUG_ON((unsigned long)asc_dvc->overrun_buf & 7);
+ asc_dvc->overrun_dma = dma_map_single(board->dev, asc_dvc->overrun_buf,
+ ASC_OVERRUN_BSIZE, DMA_FROM_DEVICE);
+ if (dma_mapping_error(board->dev, asc_dvc->overrun_dma)) {
+ warn_code = -ENOMEM;
+ goto err_dma_map;
+ }
+ phy_addr = cpu_to_le32(asc_dvc->overrun_dma);
+ AscMemDWordCopyPtrToLram(iop_base, ASCV_OVERRUN_PADDR_D,
+ (uchar *)&phy_addr, 1);
+ phy_size = cpu_to_le32(ASC_OVERRUN_BSIZE);
+ AscMemDWordCopyPtrToLram(iop_base, ASCV_OVERRUN_BSIZE_D,
+ (uchar *)&phy_size, 1);
+
+ asc_dvc->cfg->mcode_date =
+ AscReadLramWord(iop_base, (ushort)ASCV_MC_DATE_W);
+ asc_dvc->cfg->mcode_version =
+ AscReadLramWord(iop_base, (ushort)ASCV_MC_VER_W);
+
+ AscSetPCAddr(iop_base, ASC_MCODE_START_ADDR);
+ if (AscGetPCAddr(iop_base) != ASC_MCODE_START_ADDR) {
+ asc_dvc->err_code |= ASC_IERR_SET_PC_ADDR;
+ warn_code = UW_ERR;
+ goto err_mcode_start;
+ }
+ if (AscStartChip(iop_base) != 1) {
+ asc_dvc->err_code |= ASC_IERR_START_STOP_CHIP;
+ warn_code = UW_ERR;
+ goto err_mcode_start;
+ }
+
+ return warn_code;
+
+err_mcode_start:
+ dma_unmap_single(board->dev, asc_dvc->overrun_dma,
+ ASC_OVERRUN_BSIZE, DMA_FROM_DEVICE);
+err_dma_map:
+ asc_dvc->overrun_dma = 0;
+ return warn_code;
+}
+
+static ushort AscInitAsc1000Driver(ASC_DVC_VAR *asc_dvc)
+{
+ const struct firmware *fw;
+ const char fwname[] = "/*(DEBLOBBED)*/";
+ int err;
+ unsigned long chksum;
+ ushort warn_code;
+ PortAddr iop_base;
+
+ iop_base = asc_dvc->iop_base;
+ warn_code = 0;
+ if ((asc_dvc->dvc_cntl & ASC_CNTL_RESET_SCSI) &&
+ !(asc_dvc->init_state & ASC_INIT_RESET_SCSI_DONE)) {
+ AscResetChipAndScsiBus(asc_dvc);
+ mdelay(asc_dvc->scsi_reset_wait * 1000); /* XXX: msleep? */
+ }
+ asc_dvc->init_state |= ASC_INIT_STATE_BEG_LOAD_MC;
+ if (asc_dvc->err_code != 0)
+ return UW_ERR;
+ if (!AscFindSignature(asc_dvc->iop_base)) {
+ asc_dvc->err_code = ASC_IERR_BAD_SIGNATURE;
+ return warn_code;
+ }
+ AscDisableInterrupt(iop_base);
+ warn_code |= AscInitLram(asc_dvc);
+ if (asc_dvc->err_code != 0)
+ return UW_ERR;
+
+ err = reject_firmware(&fw, fwname, asc_dvc->drv_ptr->dev);
+ if (err) {
+ printk(KERN_ERR "Failed to load image \"%s\" err %d\n",
+ fwname, err);
+ asc_dvc->err_code |= ASC_IERR_MCODE_CHKSUM;
+ return err;
+ }
+ if (fw->size < 4) {
+ printk(KERN_ERR "Bogus length %zu in image \"%s\"\n",
+ fw->size, fwname);
+ release_firmware(fw);
+ asc_dvc->err_code |= ASC_IERR_MCODE_CHKSUM;
+ return -EINVAL;
+ }
+ chksum = (fw->data[3] << 24) | (fw->data[2] << 16) |
+ (fw->data[1] << 8) | fw->data[0];
+ ASC_DBG(1, "_asc_mcode_chksum 0x%lx\n", (ulong)chksum);
+ if (AscLoadMicroCode(iop_base, 0, &fw->data[4],
+ fw->size - 4) != chksum) {
+ asc_dvc->err_code |= ASC_IERR_MCODE_CHKSUM;
+ release_firmware(fw);
+ return warn_code;
+ }
+ release_firmware(fw);
+ warn_code |= AscInitMicroCodeVar(asc_dvc);
+ if (!asc_dvc->overrun_dma)
+ return warn_code;
+ asc_dvc->init_state |= ASC_INIT_STATE_END_LOAD_MC;
+ AscEnableInterrupt(iop_base);
+ return warn_code;
+}
+
+/*
+ * Load the Microcode
+ *
+ * Write the microcode image to RISC memory starting at address 0.
+ *
+ * The microcode is stored compressed in the following format:
+ *
+ * 254 word (508 byte) table indexed by byte code followed
+ * by the following byte codes:
+ *
+ * 1-Byte Code:
+ * 00: Emit word 0 in table.
+ * 01: Emit word 1 in table.
+ * .
+ * FD: Emit word 253 in table.
+ *
+ * Multi-Byte Code:
+ * FE WW WW: (3 byte code) Word to emit is the next word WW WW.
+ * FF BB WW WW: (4 byte code) Emit BB count times next word WW WW.
+ *
+ * Returns 0 or an error if the checksum doesn't match
+ */
+static int AdvLoadMicrocode(AdvPortAddr iop_base, const unsigned char *buf,
+ int size, int memsize, int chksum)
+{
+ int i, j, end, len = 0;
+ ADV_DCNT sum;
+
+ AdvWriteWordRegister(iop_base, IOPW_RAM_ADDR, 0);
+
+ for (i = 253 * 2; i < size; i++) {
+ if (buf[i] == 0xff) {
+ unsigned short word = (buf[i + 3] << 8) | buf[i + 2];
+ for (j = 0; j < buf[i + 1]; j++) {
+ AdvWriteWordAutoIncLram(iop_base, word);
+ len += 2;
+ }
+ i += 3;
+ } else if (buf[i] == 0xfe) {
+ unsigned short word = (buf[i + 2] << 8) | buf[i + 1];
+ AdvWriteWordAutoIncLram(iop_base, word);
+ i += 2;
+ len += 2;
+ } else {
+ unsigned int off = buf[i] * 2;
+ unsigned short word = (buf[off + 1] << 8) | buf[off];
+ AdvWriteWordAutoIncLram(iop_base, word);
+ len += 2;
+ }
+ }
+
+ end = len;
+
+ while (len < memsize) {
+ AdvWriteWordAutoIncLram(iop_base, 0);
+ len += 2;
+ }
+
+ /* Verify the microcode checksum. */
+ sum = 0;
+ AdvWriteWordRegister(iop_base, IOPW_RAM_ADDR, 0);
+
+ for (len = 0; len < end; len += 2) {
+ sum += AdvReadWordAutoIncLram(iop_base);
+ }
+
+ if (sum != chksum)
+ return ASC_IERR_MCODE_CHKSUM;
+
+ return 0;
+}
+
+static void AdvBuildCarrierFreelist(struct adv_dvc_var *asc_dvc)
+{
+ ADV_CARR_T *carrp;
+ ADV_SDCNT buf_size;
+ ADV_PADDR carr_paddr;
+
+ carrp = (ADV_CARR_T *) ADV_16BALIGN(asc_dvc->carrier_buf);
+ asc_dvc->carr_freelist = NULL;
+ if (carrp == asc_dvc->carrier_buf) {
+ buf_size = ADV_CARRIER_BUFSIZE;
+ } else {
+ buf_size = ADV_CARRIER_BUFSIZE - sizeof(ADV_CARR_T);
+ }
+
+ do {
+ /* Get physical address of the carrier 'carrp'. */
+ carr_paddr = cpu_to_le32(virt_to_bus(carrp));
+
+ buf_size -= sizeof(ADV_CARR_T);
+
+ carrp->carr_pa = carr_paddr;
+ carrp->carr_va = cpu_to_le32(ADV_VADDR_TO_U32(carrp));
+
+ /*
+ * Insert the carrier at the beginning of the freelist.
+ */
+ carrp->next_vpa =
+ cpu_to_le32(ADV_VADDR_TO_U32(asc_dvc->carr_freelist));
+ asc_dvc->carr_freelist = carrp;
+
+ carrp++;
+ } while (buf_size > 0);
+}
+
+/*
+ * Send an idle command to the chip and wait for completion.
+ *
+ * Command completion is polled for once per microsecond.
+ *
+ * The function can be called from anywhere including an interrupt handler.
+ * But the function is not re-entrant, so it uses the DvcEnter/LeaveCritical()
+ * functions to prevent reentrancy.
+ *
+ * Return Values:
+ * ADV_TRUE - command completed successfully
+ * ADV_FALSE - command failed
+ * ADV_ERROR - command timed out
+ */
+static int
+AdvSendIdleCmd(ADV_DVC_VAR *asc_dvc,
+ ushort idle_cmd, ADV_DCNT idle_cmd_parameter)
+{
+ int result;
+ ADV_DCNT i, j;
+ AdvPortAddr iop_base;
+
+ iop_base = asc_dvc->iop_base;
+
+ /*
+ * Clear the idle command status which is set by the microcode
+ * to a non-zero value to indicate when the command is completed.
+ * The non-zero result is one of the IDLE_CMD_STATUS_* values
+ */
+ AdvWriteWordLram(iop_base, ASC_MC_IDLE_CMD_STATUS, (ushort)0);
+
+ /*
+ * Write the idle command value after the idle command parameter
+ * has been written to avoid a race condition. If the order is not
+ * followed, the microcode may process the idle command before the
+ * parameters have been written to LRAM.
+ */
+ AdvWriteDWordLramNoSwap(iop_base, ASC_MC_IDLE_CMD_PARAMETER,
+ cpu_to_le32(idle_cmd_parameter));
+ AdvWriteWordLram(iop_base, ASC_MC_IDLE_CMD, idle_cmd);
+
+ /*
+ * Tickle the RISC to tell it to process the idle command.
+ */
+ AdvWriteByteRegister(iop_base, IOPB_TICKLE, ADV_TICKLE_B);
+ if (asc_dvc->chip_type == ADV_CHIP_ASC3550) {
+ /*
+ * Clear the tickle value. In the ASC-3550 the RISC flag
+ * command 'clr_tickle_b' does not work unless the host
+ * value is cleared.
+ */
+ AdvWriteByteRegister(iop_base, IOPB_TICKLE, ADV_TICKLE_NOP);
+ }
+
+ /* Wait for up to 100 millisecond for the idle command to timeout. */
+ for (i = 0; i < SCSI_WAIT_100_MSEC; i++) {
+ /* Poll once each microsecond for command completion. */
+ for (j = 0; j < SCSI_US_PER_MSEC; j++) {
+ AdvReadWordLram(iop_base, ASC_MC_IDLE_CMD_STATUS,
+ result);
+ if (result != 0)
+ return result;
+ udelay(1);
+ }
+ }
+
+ BUG(); /* The idle command should never timeout. */
+ return ADV_ERROR;
+}
+
+/*
+ * Reset SCSI Bus and purge all outstanding requests.
+ *
+ * Return Value:
+ * ADV_TRUE(1) - All requests are purged and SCSI Bus is reset.
+ * ADV_FALSE(0) - Microcode command failed.
+ * ADV_ERROR(-1) - Microcode command timed-out. Microcode or IC
+ * may be hung which requires driver recovery.
+ */
+static int AdvResetSB(ADV_DVC_VAR *asc_dvc)
+{
+ int status;
+
+ /*
+ * Send the SCSI Bus Reset idle start idle command which asserts
+ * the SCSI Bus Reset signal.
+ */
+ status = AdvSendIdleCmd(asc_dvc, (ushort)IDLE_CMD_SCSI_RESET_START, 0L);
+ if (status != ADV_TRUE) {
+ return status;
+ }
+
+ /*
+ * Delay for the specified SCSI Bus Reset hold time.
+ *
+ * The hold time delay is done on the host because the RISC has no
+ * microsecond accurate timer.
+ */
+ udelay(ASC_SCSI_RESET_HOLD_TIME_US);
+
+ /*
+ * Send the SCSI Bus Reset end idle command which de-asserts
+ * the SCSI Bus Reset signal and purges any pending requests.
+ */
+ status = AdvSendIdleCmd(asc_dvc, (ushort)IDLE_CMD_SCSI_RESET_END, 0L);
+ if (status != ADV_TRUE) {
+ return status;
+ }
+
+ mdelay(asc_dvc->scsi_reset_wait * 1000); /* XXX: msleep? */
+
+ return status;
+}
+
+/*
+ * Initialize the ASC-3550.
+ *
+ * On failure set the ADV_DVC_VAR field 'err_code' and return ADV_ERROR.
+ *
+ * For a non-fatal error return a warning code. If there are no warnings
+ * then 0 is returned.
+ *
+ * Needed after initialization for error recovery.
+ */
+static int AdvInitAsc3550Driver(ADV_DVC_VAR *asc_dvc)
+{
+ const struct firmware *fw;
+ const char fwname[] = "/*(DEBLOBBED)*/";
+ AdvPortAddr iop_base;
+ ushort warn_code;
+ int begin_addr;
+ int end_addr;
+ ushort code_sum;
+ int word;
+ int i;
+ int err;
+ unsigned long chksum;
+ ushort scsi_cfg1;
+ uchar tid;
+ ushort bios_mem[ASC_MC_BIOSLEN / 2]; /* BIOS RISC Memory 0x40-0x8F. */
+ ushort wdtr_able = 0, sdtr_able, tagqng_able;
+ uchar max_cmd[ADV_MAX_TID + 1];
+
+ /* If there is already an error, don't continue. */
+ if (asc_dvc->err_code != 0)
+ return ADV_ERROR;
+
+ /*
+ * The caller must set 'chip_type' to ADV_CHIP_ASC3550.
+ */
+ if (asc_dvc->chip_type != ADV_CHIP_ASC3550) {
+ asc_dvc->err_code = ASC_IERR_BAD_CHIPTYPE;
+ return ADV_ERROR;
+ }
+
+ warn_code = 0;
+ iop_base = asc_dvc->iop_base;
+
+ /*
+ * Save the RISC memory BIOS region before writing the microcode.
+ * The BIOS may already be loaded and using its RISC LRAM region
+ * so its region must be saved and restored.
+ *
+ * Note: This code makes the assumption, which is currently true,
+ * that a chip reset does not clear RISC LRAM.
+ */
+ for (i = 0; i < ASC_MC_BIOSLEN / 2; i++) {
+ AdvReadWordLram(iop_base, ASC_MC_BIOSMEM + (2 * i),
+ bios_mem[i]);
+ }
+
+ /*
+ * Save current per TID negotiated values.
+ */
+ if (bios_mem[(ASC_MC_BIOS_SIGNATURE - ASC_MC_BIOSMEM) / 2] == 0x55AA) {
+ ushort bios_version, major, minor;
+
+ bios_version =
+ bios_mem[(ASC_MC_BIOS_VERSION - ASC_MC_BIOSMEM) / 2];
+ major = (bios_version >> 12) & 0xF;
+ minor = (bios_version >> 8) & 0xF;
+ if (major < 3 || (major == 3 && minor == 1)) {
+ /* BIOS 3.1 and earlier location of 'wdtr_able' variable. */
+ AdvReadWordLram(iop_base, 0x120, wdtr_able);
+ } else {
+ AdvReadWordLram(iop_base, ASC_MC_WDTR_ABLE, wdtr_able);
+ }
+ }
+ AdvReadWordLram(iop_base, ASC_MC_SDTR_ABLE, sdtr_able);
+ AdvReadWordLram(iop_base, ASC_MC_TAGQNG_ABLE, tagqng_able);
+ for (tid = 0; tid <= ADV_MAX_TID; tid++) {
+ AdvReadByteLram(iop_base, ASC_MC_NUMBER_OF_MAX_CMD + tid,
+ max_cmd[tid]);
+ }
+
+ err = reject_firmware(&fw, fwname, asc_dvc->drv_ptr->dev);
+ if (err) {
+ printk(KERN_ERR "Failed to load image \"%s\" err %d\n",
+ fwname, err);
+ asc_dvc->err_code = ASC_IERR_MCODE_CHKSUM;
+ return err;
+ }
+ if (fw->size < 4) {
+ printk(KERN_ERR "Bogus length %zu in image \"%s\"\n",
+ fw->size, fwname);
+ release_firmware(fw);
+ asc_dvc->err_code = ASC_IERR_MCODE_CHKSUM;
+ return -EINVAL;
+ }
+ chksum = (fw->data[3] << 24) | (fw->data[2] << 16) |
+ (fw->data[1] << 8) | fw->data[0];
+ asc_dvc->err_code = AdvLoadMicrocode(iop_base, &fw->data[4],
+ fw->size - 4, ADV_3550_MEMSIZE,
+ chksum);
+ release_firmware(fw);
+ if (asc_dvc->err_code)
+ return ADV_ERROR;
+
+ /*
+ * Restore the RISC memory BIOS region.
+ */
+ for (i = 0; i < ASC_MC_BIOSLEN / 2; i++) {
+ AdvWriteWordLram(iop_base, ASC_MC_BIOSMEM + (2 * i),
+ bios_mem[i]);
+ }
+
+ /*
+ * Calculate and write the microcode code checksum to the microcode
+ * code checksum location ASC_MC_CODE_CHK_SUM (0x2C).
+ */
+ AdvReadWordLram(iop_base, ASC_MC_CODE_BEGIN_ADDR, begin_addr);
+ AdvReadWordLram(iop_base, ASC_MC_CODE_END_ADDR, end_addr);
+ code_sum = 0;
+ AdvWriteWordRegister(iop_base, IOPW_RAM_ADDR, begin_addr);
+ for (word = begin_addr; word < end_addr; word += 2) {
+ code_sum += AdvReadWordAutoIncLram(iop_base);
+ }
+ AdvWriteWordLram(iop_base, ASC_MC_CODE_CHK_SUM, code_sum);
+
+ /*
+ * Read and save microcode version and date.
+ */
+ AdvReadWordLram(iop_base, ASC_MC_VERSION_DATE,
+ asc_dvc->cfg->mcode_date);
+ AdvReadWordLram(iop_base, ASC_MC_VERSION_NUM,
+ asc_dvc->cfg->mcode_version);
+
+ /*
+ * Set the chip type to indicate the ASC3550.
+ */
+ AdvWriteWordLram(iop_base, ASC_MC_CHIP_TYPE, ADV_CHIP_ASC3550);
+
+ /*
+ * If the PCI Configuration Command Register "Parity Error Response
+ * Control" Bit was clear (0), then set the microcode variable
+ * 'control_flag' CONTROL_FLAG_IGNORE_PERR flag to tell the microcode
+ * to ignore DMA parity errors.
+ */
+ if (asc_dvc->cfg->control_flag & CONTROL_FLAG_IGNORE_PERR) {
+ AdvReadWordLram(iop_base, ASC_MC_CONTROL_FLAG, word);
+ word |= CONTROL_FLAG_IGNORE_PERR;
+ AdvWriteWordLram(iop_base, ASC_MC_CONTROL_FLAG, word);
+ }
+
+ /*
+ * For ASC-3550, setting the START_CTL_EMFU [3:2] bits sets a FIFO
+ * threshold of 128 bytes. This register is only accessible to the host.
+ */
+ AdvWriteByteRegister(iop_base, IOPB_DMA_CFG0,
+ START_CTL_EMFU | READ_CMD_MRM);
+
+ /*
+ * Microcode operating variables for WDTR, SDTR, and command tag
+ * queuing will be set in slave_configure() based on what a
+ * device reports it is capable of in Inquiry byte 7.
+ *
+ * If SCSI Bus Resets have been disabled, then directly set
+ * SDTR and WDTR from the EEPROM configuration. This will allow
+ * the BIOS and warm boot to work without a SCSI bus hang on
+ * the Inquiry caused by host and target mismatched DTR values.
+ * Without the SCSI Bus Reset, before an Inquiry a device can't
+ * be assumed to be in Asynchronous, Narrow mode.
+ */
+ if ((asc_dvc->bios_ctrl & BIOS_CTRL_RESET_SCSI_BUS) == 0) {
+ AdvWriteWordLram(iop_base, ASC_MC_WDTR_ABLE,
+ asc_dvc->wdtr_able);
+ AdvWriteWordLram(iop_base, ASC_MC_SDTR_ABLE,
+ asc_dvc->sdtr_able);
+ }
+
+ /*
+ * Set microcode operating variables for SDTR_SPEED1, SDTR_SPEED2,
+ * SDTR_SPEED3, and SDTR_SPEED4 based on the ULTRA EEPROM per TID
+ * bitmask. These values determine the maximum SDTR speed negotiated
+ * with a device.
+ *
+ * The SDTR per TID bitmask overrides the SDTR_SPEED1, SDTR_SPEED2,
+ * SDTR_SPEED3, and SDTR_SPEED4 values so it is safe to set them
+ * without determining here whether the device supports SDTR.
+ *
+ * 4-bit speed SDTR speed name
+ * =========== ===============
+ * 0000b (0x0) SDTR disabled
+ * 0001b (0x1) 5 Mhz
+ * 0010b (0x2) 10 Mhz
+ * 0011b (0x3) 20 Mhz (Ultra)
+ * 0100b (0x4) 40 Mhz (LVD/Ultra2)
+ * 0101b (0x5) 80 Mhz (LVD2/Ultra3)
+ * 0110b (0x6) Undefined
+ * .
+ * 1111b (0xF) Undefined
+ */
+ word = 0;
+ for (tid = 0; tid <= ADV_MAX_TID; tid++) {
+ if (ADV_TID_TO_TIDMASK(tid) & asc_dvc->ultra_able) {
+ /* Set Ultra speed for TID 'tid'. */
+ word |= (0x3 << (4 * (tid % 4)));
+ } else {
+ /* Set Fast speed for TID 'tid'. */
+ word |= (0x2 << (4 * (tid % 4)));
+ }
+ if (tid == 3) { /* Check if done with sdtr_speed1. */
+ AdvWriteWordLram(iop_base, ASC_MC_SDTR_SPEED1, word);
+ word = 0;
+ } else if (tid == 7) { /* Check if done with sdtr_speed2. */
+ AdvWriteWordLram(iop_base, ASC_MC_SDTR_SPEED2, word);
+ word = 0;
+ } else if (tid == 11) { /* Check if done with sdtr_speed3. */
+ AdvWriteWordLram(iop_base, ASC_MC_SDTR_SPEED3, word);
+ word = 0;
+ } else if (tid == 15) { /* Check if done with sdtr_speed4. */
+ AdvWriteWordLram(iop_base, ASC_MC_SDTR_SPEED4, word);
+ /* End of loop. */
+ }
+ }
+
+ /*
+ * Set microcode operating variable for the disconnect per TID bitmask.
+ */
+ AdvWriteWordLram(iop_base, ASC_MC_DISC_ENABLE,
+ asc_dvc->cfg->disc_enable);
+
+ /*
+ * Set SCSI_CFG0 Microcode Default Value.
+ *
+ * The microcode will set the SCSI_CFG0 register using this value
+ * after it is started below.
+ */
+ AdvWriteWordLram(iop_base, ASC_MC_DEFAULT_SCSI_CFG0,
+ PARITY_EN | QUEUE_128 | SEL_TMO_LONG | OUR_ID_EN |
+ asc_dvc->chip_scsi_id);
+
+ /*
+ * Determine SCSI_CFG1 Microcode Default Value.
+ *
+ * The microcode will set the SCSI_CFG1 register using this value
+ * after it is started below.
+ */
+
+ /* Read current SCSI_CFG1 Register value. */
+ scsi_cfg1 = AdvReadWordRegister(iop_base, IOPW_SCSI_CFG1);
+
+ /*
+ * If all three connectors are in use, return an error.
+ */
+ if ((scsi_cfg1 & CABLE_ILLEGAL_A) == 0 ||
+ (scsi_cfg1 & CABLE_ILLEGAL_B) == 0) {
+ asc_dvc->err_code |= ASC_IERR_ILLEGAL_CONNECTION;
+ return ADV_ERROR;
+ }
+
+ /*
+ * If the internal narrow cable is reversed all of the SCSI_CTRL
+ * register signals will be set. Check for and return an error if
+ * this condition is found.
+ */
+ if ((AdvReadWordRegister(iop_base, IOPW_SCSI_CTRL) & 0x3F07) == 0x3F07) {
+ asc_dvc->err_code |= ASC_IERR_REVERSED_CABLE;
+ return ADV_ERROR;
+ }
+
+ /*
+ * If this is a differential board and a single-ended device
+ * is attached to one of the connectors, return an error.
+ */
+ if ((scsi_cfg1 & DIFF_MODE) && (scsi_cfg1 & DIFF_SENSE) == 0) {
+ asc_dvc->err_code |= ASC_IERR_SINGLE_END_DEVICE;
+ return ADV_ERROR;
+ }
+
+ /*
+ * If automatic termination control is enabled, then set the
+ * termination value based on a table listed in a_condor.h.
+ *
+ * If manual termination was specified with an EEPROM setting
+ * then 'termination' was set-up in AdvInitFrom3550EEPROM() and
+ * is ready to be 'ored' into SCSI_CFG1.
+ */
+ if (asc_dvc->cfg->termination == 0) {
+ /*
+ * The software always controls termination by setting TERM_CTL_SEL.
+ * If TERM_CTL_SEL were set to 0, the hardware would set termination.
+ */
+ asc_dvc->cfg->termination |= TERM_CTL_SEL;
+
+ switch (scsi_cfg1 & CABLE_DETECT) {
+ /* TERM_CTL_H: on, TERM_CTL_L: on */
+ case 0x3:
+ case 0x7:
+ case 0xB:
+ case 0xD:
+ case 0xE:
+ case 0xF:
+ asc_dvc->cfg->termination |= (TERM_CTL_H | TERM_CTL_L);
+ break;
+
+ /* TERM_CTL_H: on, TERM_CTL_L: off */
+ case 0x1:
+ case 0x5:
+ case 0x9:
+ case 0xA:
+ case 0xC:
+ asc_dvc->cfg->termination |= TERM_CTL_H;
+ break;
+
+ /* TERM_CTL_H: off, TERM_CTL_L: off */
+ case 0x2:
+ case 0x6:
+ break;
+ }
+ }
+
+ /*
+ * Clear any set TERM_CTL_H and TERM_CTL_L bits.
+ */
+ scsi_cfg1 &= ~TERM_CTL;
+
+ /*
+ * Invert the TERM_CTL_H and TERM_CTL_L bits and then
+ * set 'scsi_cfg1'. The TERM_POL bit does not need to be
+ * referenced, because the hardware internally inverts
+ * the Termination High and Low bits if TERM_POL is set.
+ */
+ scsi_cfg1 |= (TERM_CTL_SEL | (~asc_dvc->cfg->termination & TERM_CTL));
+
+ /*
+ * Set SCSI_CFG1 Microcode Default Value
+ *
+ * Set filter value and possibly modified termination control
+ * bits in the Microcode SCSI_CFG1 Register Value.
+ *
+ * The microcode will set the SCSI_CFG1 register using this value
+ * after it is started below.
+ */
+ AdvWriteWordLram(iop_base, ASC_MC_DEFAULT_SCSI_CFG1,
+ FLTR_DISABLE | scsi_cfg1);
+
+ /*
+ * Set MEM_CFG Microcode Default Value
+ *
+ * The microcode will set the MEM_CFG register using this value
+ * after it is started below.
+ *
+ * MEM_CFG may be accessed as a word or byte, but only bits 0-7
+ * are defined.
+ *
+ * ASC-3550 has 8KB internal memory.
+ */
+ AdvWriteWordLram(iop_base, ASC_MC_DEFAULT_MEM_CFG,
+ BIOS_EN | RAM_SZ_8KB);
+
+ /*
+ * Set SEL_MASK Microcode Default Value
+ *
+ * The microcode will set the SEL_MASK register using this value
+ * after it is started below.
+ */
+ AdvWriteWordLram(iop_base, ASC_MC_DEFAULT_SEL_MASK,
+ ADV_TID_TO_TIDMASK(asc_dvc->chip_scsi_id));
+
+ AdvBuildCarrierFreelist(asc_dvc);
+
+ /*
+ * Set-up the Host->RISC Initiator Command Queue (ICQ).
+ */
+
+ if ((asc_dvc->icq_sp = asc_dvc->carr_freelist) == NULL) {
+ asc_dvc->err_code |= ASC_IERR_NO_CARRIER;
+ return ADV_ERROR;
+ }
+ asc_dvc->carr_freelist = (ADV_CARR_T *)
+ ADV_U32_TO_VADDR(le32_to_cpu(asc_dvc->icq_sp->next_vpa));
+
+ /*
+ * The first command issued will be placed in the stopper carrier.
+ */
+ asc_dvc->icq_sp->next_vpa = cpu_to_le32(ASC_CQ_STOPPER);
+
+ /*
+ * Set RISC ICQ physical address start value.
+ */
+ AdvWriteDWordLramNoSwap(iop_base, ASC_MC_ICQ, asc_dvc->icq_sp->carr_pa);
+
+ /*
+ * Set-up the RISC->Host Initiator Response Queue (IRQ).
+ */
+ if ((asc_dvc->irq_sp = asc_dvc->carr_freelist) == NULL) {
+ asc_dvc->err_code |= ASC_IERR_NO_CARRIER;
+ return ADV_ERROR;
+ }
+ asc_dvc->carr_freelist = (ADV_CARR_T *)
+ ADV_U32_TO_VADDR(le32_to_cpu(asc_dvc->irq_sp->next_vpa));
+
+ /*
+ * The first command completed by the RISC will be placed in
+ * the stopper.
+ *
+ * Note: Set 'next_vpa' to ASC_CQ_STOPPER. When the request is
+ * completed the RISC will set the ASC_RQ_STOPPER bit.
+ */
+ asc_dvc->irq_sp->next_vpa = cpu_to_le32(ASC_CQ_STOPPER);
+
+ /*
+ * Set RISC IRQ physical address start value.
+ */
+ AdvWriteDWordLramNoSwap(iop_base, ASC_MC_IRQ, asc_dvc->irq_sp->carr_pa);
+ asc_dvc->carr_pending_cnt = 0;
+
+ AdvWriteByteRegister(iop_base, IOPB_INTR_ENABLES,
+ (ADV_INTR_ENABLE_HOST_INTR |
+ ADV_INTR_ENABLE_GLOBAL_INTR));
+
+ AdvReadWordLram(iop_base, ASC_MC_CODE_BEGIN_ADDR, word);
+ AdvWriteWordRegister(iop_base, IOPW_PC, word);
+
+ /* finally, finally, gentlemen, start your engine */
+ AdvWriteWordRegister(iop_base, IOPW_RISC_CSR, ADV_RISC_CSR_RUN);
+
+ /*
+ * Reset the SCSI Bus if the EEPROM indicates that SCSI Bus
+ * Resets should be performed. The RISC has to be running
+ * to issue a SCSI Bus Reset.
+ */
+ if (asc_dvc->bios_ctrl & BIOS_CTRL_RESET_SCSI_BUS) {
+ /*
+ * If the BIOS Signature is present in memory, restore the
+ * BIOS Handshake Configuration Table and do not perform
+ * a SCSI Bus Reset.
+ */
+ if (bios_mem[(ASC_MC_BIOS_SIGNATURE - ASC_MC_BIOSMEM) / 2] ==
+ 0x55AA) {
+ /*
+ * Restore per TID negotiated values.
+ */
+ AdvWriteWordLram(iop_base, ASC_MC_WDTR_ABLE, wdtr_able);
+ AdvWriteWordLram(iop_base, ASC_MC_SDTR_ABLE, sdtr_able);
+ AdvWriteWordLram(iop_base, ASC_MC_TAGQNG_ABLE,
+ tagqng_able);
+ for (tid = 0; tid <= ADV_MAX_TID; tid++) {
+ AdvWriteByteLram(iop_base,
+ ASC_MC_NUMBER_OF_MAX_CMD + tid,
+ max_cmd[tid]);
+ }
+ } else {
+ if (AdvResetSB(asc_dvc) != ADV_TRUE) {
+ warn_code = ASC_WARN_BUSRESET_ERROR;
+ }
+ }
+ }
+
+ return warn_code;
+}
+
+/*
+ * Initialize the ASC-38C0800.
+ *
+ * On failure set the ADV_DVC_VAR field 'err_code' and return ADV_ERROR.
+ *
+ * For a non-fatal error return a warning code. If there are no warnings
+ * then 0 is returned.
+ *
+ * Needed after initialization for error recovery.
+ */
+static int AdvInitAsc38C0800Driver(ADV_DVC_VAR *asc_dvc)
+{
+ const struct firmware *fw;
+ const char fwname[] = "/*(DEBLOBBED)*/";
+ AdvPortAddr iop_base;
+ ushort warn_code;
+ int begin_addr;
+ int end_addr;
+ ushort code_sum;
+ int word;
+ int i;
+ int err;
+ unsigned long chksum;
+ ushort scsi_cfg1;
+ uchar byte;
+ uchar tid;
+ ushort bios_mem[ASC_MC_BIOSLEN / 2]; /* BIOS RISC Memory 0x40-0x8F. */
+ ushort wdtr_able, sdtr_able, tagqng_able;
+ uchar max_cmd[ADV_MAX_TID + 1];
+
+ /* If there is already an error, don't continue. */
+ if (asc_dvc->err_code != 0)
+ return ADV_ERROR;
+
+ /*
+ * The caller must set 'chip_type' to ADV_CHIP_ASC38C0800.
+ */
+ if (asc_dvc->chip_type != ADV_CHIP_ASC38C0800) {
+ asc_dvc->err_code = ASC_IERR_BAD_CHIPTYPE;
+ return ADV_ERROR;
+ }
+
+ warn_code = 0;
+ iop_base = asc_dvc->iop_base;
+
+ /*
+ * Save the RISC memory BIOS region before writing the microcode.
+ * The BIOS may already be loaded and using its RISC LRAM region
+ * so its region must be saved and restored.
+ *
+ * Note: This code makes the assumption, which is currently true,
+ * that a chip reset does not clear RISC LRAM.
+ */
+ for (i = 0; i < ASC_MC_BIOSLEN / 2; i++) {
+ AdvReadWordLram(iop_base, ASC_MC_BIOSMEM + (2 * i),
+ bios_mem[i]);
+ }
+
+ /*
+ * Save current per TID negotiated values.
+ */
+ AdvReadWordLram(iop_base, ASC_MC_WDTR_ABLE, wdtr_able);
+ AdvReadWordLram(iop_base, ASC_MC_SDTR_ABLE, sdtr_able);
+ AdvReadWordLram(iop_base, ASC_MC_TAGQNG_ABLE, tagqng_able);
+ for (tid = 0; tid <= ADV_MAX_TID; tid++) {
+ AdvReadByteLram(iop_base, ASC_MC_NUMBER_OF_MAX_CMD + tid,
+ max_cmd[tid]);
+ }
+
+ /*
+ * RAM BIST (RAM Built-In Self Test)
+ *
+ * Address : I/O base + offset 0x38h register (byte).
+ * Function: Bit 7-6(RW) : RAM mode
+ * Normal Mode : 0x00
+ * Pre-test Mode : 0x40
+ * RAM Test Mode : 0x80
+ * Bit 5 : unused
+ * Bit 4(RO) : Done bit
+ * Bit 3-0(RO) : Status
+ * Host Error : 0x08
+ * Int_RAM Error : 0x04
+ * RISC Error : 0x02
+ * SCSI Error : 0x01
+ * No Error : 0x00
+ *
+ * Note: RAM BIST code should be put right here, before loading the
+ * microcode and after saving the RISC memory BIOS region.
+ */
+
+ /*
+ * LRAM Pre-test
+ *
+ * Write PRE_TEST_MODE (0x40) to register and wait for 10 milliseconds.
+ * If Done bit not set or low nibble not PRE_TEST_VALUE (0x05), return
+ * an error. Reset to NORMAL_MODE (0x00) and do again. If cannot reset
+ * to NORMAL_MODE, return an error too.
+ */
+ for (i = 0; i < 2; i++) {
+ AdvWriteByteRegister(iop_base, IOPB_RAM_BIST, PRE_TEST_MODE);
+ mdelay(10); /* Wait for 10ms before reading back. */
+ byte = AdvReadByteRegister(iop_base, IOPB_RAM_BIST);
+ if ((byte & RAM_TEST_DONE) == 0
+ || (byte & 0x0F) != PRE_TEST_VALUE) {
+ asc_dvc->err_code = ASC_IERR_BIST_PRE_TEST;
+ return ADV_ERROR;
+ }
+
+ AdvWriteByteRegister(iop_base, IOPB_RAM_BIST, NORMAL_MODE);
+ mdelay(10); /* Wait for 10ms before reading back. */
+ if (AdvReadByteRegister(iop_base, IOPB_RAM_BIST)
+ != NORMAL_VALUE) {
+ asc_dvc->err_code = ASC_IERR_BIST_PRE_TEST;
+ return ADV_ERROR;
+ }
+ }
+
+ /*
+ * LRAM Test - It takes about 1.5 ms to run through the test.
+ *
+ * Write RAM_TEST_MODE (0x80) to register and wait for 10 milliseconds.
+ * If Done bit not set or Status not 0, save register byte, set the
+ * err_code, and return an error.
+ */
+ AdvWriteByteRegister(iop_base, IOPB_RAM_BIST, RAM_TEST_MODE);
+ mdelay(10); /* Wait for 10ms before checking status. */
+
+ byte = AdvReadByteRegister(iop_base, IOPB_RAM_BIST);
+ if ((byte & RAM_TEST_DONE) == 0 || (byte & RAM_TEST_STATUS) != 0) {
+ /* Get here if Done bit not set or Status not 0. */
+ asc_dvc->bist_err_code = byte; /* for BIOS display message */
+ asc_dvc->err_code = ASC_IERR_BIST_RAM_TEST;
+ return ADV_ERROR;
+ }
+
+ /* We need to reset back to normal mode after LRAM test passes. */
+ AdvWriteByteRegister(iop_base, IOPB_RAM_BIST, NORMAL_MODE);
+
+ err = reject_firmware(&fw, fwname, asc_dvc->drv_ptr->dev);
+ if (err) {
+ printk(KERN_ERR "Failed to load image \"%s\" err %d\n",
+ fwname, err);
+ asc_dvc->err_code = ASC_IERR_MCODE_CHKSUM;
+ return err;
+ }
+ if (fw->size < 4) {
+ printk(KERN_ERR "Bogus length %zu in image \"%s\"\n",
+ fw->size, fwname);
+ release_firmware(fw);
+ asc_dvc->err_code = ASC_IERR_MCODE_CHKSUM;
+ return -EINVAL;
+ }
+ chksum = (fw->data[3] << 24) | (fw->data[2] << 16) |
+ (fw->data[1] << 8) | fw->data[0];
+ asc_dvc->err_code = AdvLoadMicrocode(iop_base, &fw->data[4],
+ fw->size - 4, ADV_38C0800_MEMSIZE,
+ chksum);
+ release_firmware(fw);
+ if (asc_dvc->err_code)
+ return ADV_ERROR;
+
+ /*
+ * Restore the RISC memory BIOS region.
+ */
+ for (i = 0; i < ASC_MC_BIOSLEN / 2; i++) {
+ AdvWriteWordLram(iop_base, ASC_MC_BIOSMEM + (2 * i),
+ bios_mem[i]);
+ }
+
+ /*
+ * Calculate and write the microcode code checksum to the microcode
+ * code checksum location ASC_MC_CODE_CHK_SUM (0x2C).
+ */
+ AdvReadWordLram(iop_base, ASC_MC_CODE_BEGIN_ADDR, begin_addr);
+ AdvReadWordLram(iop_base, ASC_MC_CODE_END_ADDR, end_addr);
+ code_sum = 0;
+ AdvWriteWordRegister(iop_base, IOPW_RAM_ADDR, begin_addr);
+ for (word = begin_addr; word < end_addr; word += 2) {
+ code_sum += AdvReadWordAutoIncLram(iop_base);
+ }
+ AdvWriteWordLram(iop_base, ASC_MC_CODE_CHK_SUM, code_sum);
+
+ /*
+ * Read microcode version and date.
+ */
+ AdvReadWordLram(iop_base, ASC_MC_VERSION_DATE,
+ asc_dvc->cfg->mcode_date);
+ AdvReadWordLram(iop_base, ASC_MC_VERSION_NUM,
+ asc_dvc->cfg->mcode_version);
+
+ /*
+ * Set the chip type to indicate the ASC38C0800.
+ */
+ AdvWriteWordLram(iop_base, ASC_MC_CHIP_TYPE, ADV_CHIP_ASC38C0800);
+
+ /*
+ * Write 1 to bit 14 'DIS_TERM_DRV' in the SCSI_CFG1 register.
+ * When DIS_TERM_DRV set to 1, C_DET[3:0] will reflect current
+ * cable detection and then we are able to read C_DET[3:0].
+ *
+ * Note: We will reset DIS_TERM_DRV to 0 in the 'Set SCSI_CFG1
+ * Microcode Default Value' section below.
+ */
+ scsi_cfg1 = AdvReadWordRegister(iop_base, IOPW_SCSI_CFG1);
+ AdvWriteWordRegister(iop_base, IOPW_SCSI_CFG1,
+ scsi_cfg1 | DIS_TERM_DRV);
+
+ /*
+ * If the PCI Configuration Command Register "Parity Error Response
+ * Control" Bit was clear (0), then set the microcode variable
+ * 'control_flag' CONTROL_FLAG_IGNORE_PERR flag to tell the microcode
+ * to ignore DMA parity errors.
+ */
+ if (asc_dvc->cfg->control_flag & CONTROL_FLAG_IGNORE_PERR) {
+ AdvReadWordLram(iop_base, ASC_MC_CONTROL_FLAG, word);
+ word |= CONTROL_FLAG_IGNORE_PERR;
+ AdvWriteWordLram(iop_base, ASC_MC_CONTROL_FLAG, word);
+ }
+
+ /*
+ * For ASC-38C0800, set FIFO_THRESH_80B [6:4] bits and START_CTL_TH [3:2]
+ * bits for the default FIFO threshold.
+ *
+ * Note: ASC-38C0800 FIFO threshold has been changed to 256 bytes.
+ *
+ * For DMA Errata #4 set the BC_THRESH_ENB bit.
+ */
+ AdvWriteByteRegister(iop_base, IOPB_DMA_CFG0,
+ BC_THRESH_ENB | FIFO_THRESH_80B | START_CTL_TH |
+ READ_CMD_MRM);
+
+ /*
+ * Microcode operating variables for WDTR, SDTR, and command tag
+ * queuing will be set in slave_configure() based on what a
+ * device reports it is capable of in Inquiry byte 7.
+ *
+ * If SCSI Bus Resets have been disabled, then directly set
+ * SDTR and WDTR from the EEPROM configuration. This will allow
+ * the BIOS and warm boot to work without a SCSI bus hang on
+ * the Inquiry caused by host and target mismatched DTR values.
+ * Without the SCSI Bus Reset, before an Inquiry a device can't
+ * be assumed to be in Asynchronous, Narrow mode.
+ */
+ if ((asc_dvc->bios_ctrl & BIOS_CTRL_RESET_SCSI_BUS) == 0) {
+ AdvWriteWordLram(iop_base, ASC_MC_WDTR_ABLE,
+ asc_dvc->wdtr_able);
+ AdvWriteWordLram(iop_base, ASC_MC_SDTR_ABLE,
+ asc_dvc->sdtr_able);
+ }
+
+ /*
+ * Set microcode operating variables for DISC and SDTR_SPEED1,
+ * SDTR_SPEED2, SDTR_SPEED3, and SDTR_SPEED4 based on the EEPROM
+ * configuration values.
+ *
+ * The SDTR per TID bitmask overrides the SDTR_SPEED1, SDTR_SPEED2,
+ * SDTR_SPEED3, and SDTR_SPEED4 values so it is safe to set them
+ * without determining here whether the device supports SDTR.
+ */
+ AdvWriteWordLram(iop_base, ASC_MC_DISC_ENABLE,
+ asc_dvc->cfg->disc_enable);
+ AdvWriteWordLram(iop_base, ASC_MC_SDTR_SPEED1, asc_dvc->sdtr_speed1);
+ AdvWriteWordLram(iop_base, ASC_MC_SDTR_SPEED2, asc_dvc->sdtr_speed2);
+ AdvWriteWordLram(iop_base, ASC_MC_SDTR_SPEED3, asc_dvc->sdtr_speed3);
+ AdvWriteWordLram(iop_base, ASC_MC_SDTR_SPEED4, asc_dvc->sdtr_speed4);
+
+ /*
+ * Set SCSI_CFG0 Microcode Default Value.
+ *
+ * The microcode will set the SCSI_CFG0 register using this value
+ * after it is started below.
+ */
+ AdvWriteWordLram(iop_base, ASC_MC_DEFAULT_SCSI_CFG0,
+ PARITY_EN | QUEUE_128 | SEL_TMO_LONG | OUR_ID_EN |
+ asc_dvc->chip_scsi_id);
+
+ /*
+ * Determine SCSI_CFG1 Microcode Default Value.
+ *
+ * The microcode will set the SCSI_CFG1 register using this value
+ * after it is started below.
+ */
+
+ /* Read current SCSI_CFG1 Register value. */
+ scsi_cfg1 = AdvReadWordRegister(iop_base, IOPW_SCSI_CFG1);
+
+ /*
+ * If the internal narrow cable is reversed all of the SCSI_CTRL
+ * register signals will be set. Check for and return an error if
+ * this condition is found.
+ */
+ if ((AdvReadWordRegister(iop_base, IOPW_SCSI_CTRL) & 0x3F07) == 0x3F07) {
+ asc_dvc->err_code |= ASC_IERR_REVERSED_CABLE;
+ return ADV_ERROR;
+ }
+
+ /*
+ * All kind of combinations of devices attached to one of four
+ * connectors are acceptable except HVD device attached. For example,
+ * LVD device can be attached to SE connector while SE device attached
+ * to LVD connector. If LVD device attached to SE connector, it only
+ * runs up to Ultra speed.
+ *
+ * If an HVD device is attached to one of LVD connectors, return an
+ * error. However, there is no way to detect HVD device attached to
+ * SE connectors.
+ */
+ if (scsi_cfg1 & HVD) {
+ asc_dvc->err_code = ASC_IERR_HVD_DEVICE;
+ return ADV_ERROR;
+ }
+
+ /*
+ * If either SE or LVD automatic termination control is enabled, then
+ * set the termination value based on a table listed in a_condor.h.
+ *
+ * If manual termination was specified with an EEPROM setting then
+ * 'termination' was set-up in AdvInitFrom38C0800EEPROM() and is ready
+ * to be 'ored' into SCSI_CFG1.
+ */
+ if ((asc_dvc->cfg->termination & TERM_SE) == 0) {
+ /* SE automatic termination control is enabled. */
+ switch (scsi_cfg1 & C_DET_SE) {
+ /* TERM_SE_HI: on, TERM_SE_LO: on */
+ case 0x1:
+ case 0x2:
+ case 0x3:
+ asc_dvc->cfg->termination |= TERM_SE;
+ break;
+
+ /* TERM_SE_HI: on, TERM_SE_LO: off */
+ case 0x0:
+ asc_dvc->cfg->termination |= TERM_SE_HI;
+ break;
+ }
+ }
+
+ if ((asc_dvc->cfg->termination & TERM_LVD) == 0) {
+ /* LVD automatic termination control is enabled. */
+ switch (scsi_cfg1 & C_DET_LVD) {
+ /* TERM_LVD_HI: on, TERM_LVD_LO: on */
+ case 0x4:
+ case 0x8:
+ case 0xC:
+ asc_dvc->cfg->termination |= TERM_LVD;
+ break;
+
+ /* TERM_LVD_HI: off, TERM_LVD_LO: off */
+ case 0x0:
+ break;
+ }
+ }
+
+ /*
+ * Clear any set TERM_SE and TERM_LVD bits.
+ */
+ scsi_cfg1 &= (~TERM_SE & ~TERM_LVD);
+
+ /*
+ * Invert the TERM_SE and TERM_LVD bits and then set 'scsi_cfg1'.
+ */
+ scsi_cfg1 |= (~asc_dvc->cfg->termination & 0xF0);
+
+ /*
+ * Clear BIG_ENDIAN, DIS_TERM_DRV, Terminator Polarity and HVD/LVD/SE
+ * bits and set possibly modified termination control bits in the
+ * Microcode SCSI_CFG1 Register Value.
+ */
+ scsi_cfg1 &= (~BIG_ENDIAN & ~DIS_TERM_DRV & ~TERM_POL & ~HVD_LVD_SE);
+
+ /*
+ * Set SCSI_CFG1 Microcode Default Value
+ *
+ * Set possibly modified termination control and reset DIS_TERM_DRV
+ * bits in the Microcode SCSI_CFG1 Register Value.
+ *
+ * The microcode will set the SCSI_CFG1 register using this value
+ * after it is started below.
+ */
+ AdvWriteWordLram(iop_base, ASC_MC_DEFAULT_SCSI_CFG1, scsi_cfg1);
+
+ /*
+ * Set MEM_CFG Microcode Default Value
+ *
+ * The microcode will set the MEM_CFG register using this value
+ * after it is started below.
+ *
+ * MEM_CFG may be accessed as a word or byte, but only bits 0-7
+ * are defined.
+ *
+ * ASC-38C0800 has 16KB internal memory.
+ */
+ AdvWriteWordLram(iop_base, ASC_MC_DEFAULT_MEM_CFG,
+ BIOS_EN | RAM_SZ_16KB);
+
+ /*
+ * Set SEL_MASK Microcode Default Value
+ *
+ * The microcode will set the SEL_MASK register using this value
+ * after it is started below.
+ */
+ AdvWriteWordLram(iop_base, ASC_MC_DEFAULT_SEL_MASK,
+ ADV_TID_TO_TIDMASK(asc_dvc->chip_scsi_id));
+
+ AdvBuildCarrierFreelist(asc_dvc);
+
+ /*
+ * Set-up the Host->RISC Initiator Command Queue (ICQ).
+ */
+
+ if ((asc_dvc->icq_sp = asc_dvc->carr_freelist) == NULL) {
+ asc_dvc->err_code |= ASC_IERR_NO_CARRIER;
+ return ADV_ERROR;
+ }
+ asc_dvc->carr_freelist = (ADV_CARR_T *)
+ ADV_U32_TO_VADDR(le32_to_cpu(asc_dvc->icq_sp->next_vpa));
+
+ /*
+ * The first command issued will be placed in the stopper carrier.
+ */
+ asc_dvc->icq_sp->next_vpa = cpu_to_le32(ASC_CQ_STOPPER);
+
+ /*
+ * Set RISC ICQ physical address start value.
+ * carr_pa is LE, must be native before write
+ */
+ AdvWriteDWordLramNoSwap(iop_base, ASC_MC_ICQ, asc_dvc->icq_sp->carr_pa);
+
+ /*
+ * Set-up the RISC->Host Initiator Response Queue (IRQ).
+ */
+ if ((asc_dvc->irq_sp = asc_dvc->carr_freelist) == NULL) {
+ asc_dvc->err_code |= ASC_IERR_NO_CARRIER;
+ return ADV_ERROR;
+ }
+ asc_dvc->carr_freelist = (ADV_CARR_T *)
+ ADV_U32_TO_VADDR(le32_to_cpu(asc_dvc->irq_sp->next_vpa));
+
+ /*
+ * The first command completed by the RISC will be placed in
+ * the stopper.
+ *
+ * Note: Set 'next_vpa' to ASC_CQ_STOPPER. When the request is
+ * completed the RISC will set the ASC_RQ_STOPPER bit.
+ */
+ asc_dvc->irq_sp->next_vpa = cpu_to_le32(ASC_CQ_STOPPER);
+
+ /*
+ * Set RISC IRQ physical address start value.
+ *
+ * carr_pa is LE, must be native before write *
+ */
+ AdvWriteDWordLramNoSwap(iop_base, ASC_MC_IRQ, asc_dvc->irq_sp->carr_pa);
+ asc_dvc->carr_pending_cnt = 0;
+
+ AdvWriteByteRegister(iop_base, IOPB_INTR_ENABLES,
+ (ADV_INTR_ENABLE_HOST_INTR |
+ ADV_INTR_ENABLE_GLOBAL_INTR));
+
+ AdvReadWordLram(iop_base, ASC_MC_CODE_BEGIN_ADDR, word);
+ AdvWriteWordRegister(iop_base, IOPW_PC, word);
+
+ /* finally, finally, gentlemen, start your engine */
+ AdvWriteWordRegister(iop_base, IOPW_RISC_CSR, ADV_RISC_CSR_RUN);
+
+ /*
+ * Reset the SCSI Bus if the EEPROM indicates that SCSI Bus
+ * Resets should be performed. The RISC has to be running
+ * to issue a SCSI Bus Reset.
+ */
+ if (asc_dvc->bios_ctrl & BIOS_CTRL_RESET_SCSI_BUS) {
+ /*
+ * If the BIOS Signature is present in memory, restore the
+ * BIOS Handshake Configuration Table and do not perform
+ * a SCSI Bus Reset.
+ */
+ if (bios_mem[(ASC_MC_BIOS_SIGNATURE - ASC_MC_BIOSMEM) / 2] ==
+ 0x55AA) {
+ /*
+ * Restore per TID negotiated values.
+ */
+ AdvWriteWordLram(iop_base, ASC_MC_WDTR_ABLE, wdtr_able);
+ AdvWriteWordLram(iop_base, ASC_MC_SDTR_ABLE, sdtr_able);
+ AdvWriteWordLram(iop_base, ASC_MC_TAGQNG_ABLE,
+ tagqng_able);
+ for (tid = 0; tid <= ADV_MAX_TID; tid++) {
+ AdvWriteByteLram(iop_base,
+ ASC_MC_NUMBER_OF_MAX_CMD + tid,
+ max_cmd[tid]);
+ }
+ } else {
+ if (AdvResetSB(asc_dvc) != ADV_TRUE) {
+ warn_code = ASC_WARN_BUSRESET_ERROR;
+ }
+ }
+ }
+
+ return warn_code;
+}
+
+/*
+ * Initialize the ASC-38C1600.
+ *
+ * On failure set the ASC_DVC_VAR field 'err_code' and return ADV_ERROR.
+ *
+ * For a non-fatal error return a warning code. If there are no warnings
+ * then 0 is returned.
+ *
+ * Needed after initialization for error recovery.
+ */
+static int AdvInitAsc38C1600Driver(ADV_DVC_VAR *asc_dvc)
+{
+ const struct firmware *fw;
+ const char fwname[] = "/*(DEBLOBBED)*/";
+ AdvPortAddr iop_base;
+ ushort warn_code;
+ int begin_addr;
+ int end_addr;
+ ushort code_sum;
+ long word;
+ int i;
+ int err;
+ unsigned long chksum;
+ ushort scsi_cfg1;
+ uchar byte;
+ uchar tid;
+ ushort bios_mem[ASC_MC_BIOSLEN / 2]; /* BIOS RISC Memory 0x40-0x8F. */
+ ushort wdtr_able, sdtr_able, ppr_able, tagqng_able;
+ uchar max_cmd[ASC_MAX_TID + 1];
+
+ /* If there is already an error, don't continue. */
+ if (asc_dvc->err_code != 0) {
+ return ADV_ERROR;
+ }
+
+ /*
+ * The caller must set 'chip_type' to ADV_CHIP_ASC38C1600.
+ */
+ if (asc_dvc->chip_type != ADV_CHIP_ASC38C1600) {
+ asc_dvc->err_code = ASC_IERR_BAD_CHIPTYPE;
+ return ADV_ERROR;
+ }
+
+ warn_code = 0;
+ iop_base = asc_dvc->iop_base;
+
+ /*
+ * Save the RISC memory BIOS region before writing the microcode.
+ * The BIOS may already be loaded and using its RISC LRAM region
+ * so its region must be saved and restored.
+ *
+ * Note: This code makes the assumption, which is currently true,
+ * that a chip reset does not clear RISC LRAM.
+ */
+ for (i = 0; i < ASC_MC_BIOSLEN / 2; i++) {
+ AdvReadWordLram(iop_base, ASC_MC_BIOSMEM + (2 * i),
+ bios_mem[i]);
+ }
+
+ /*
+ * Save current per TID negotiated values.
+ */
+ AdvReadWordLram(iop_base, ASC_MC_WDTR_ABLE, wdtr_able);
+ AdvReadWordLram(iop_base, ASC_MC_SDTR_ABLE, sdtr_able);
+ AdvReadWordLram(iop_base, ASC_MC_PPR_ABLE, ppr_able);
+ AdvReadWordLram(iop_base, ASC_MC_TAGQNG_ABLE, tagqng_able);
+ for (tid = 0; tid <= ASC_MAX_TID; tid++) {
+ AdvReadByteLram(iop_base, ASC_MC_NUMBER_OF_MAX_CMD + tid,
+ max_cmd[tid]);
+ }
+
+ /*
+ * RAM BIST (Built-In Self Test)
+ *
+ * Address : I/O base + offset 0x38h register (byte).
+ * Function: Bit 7-6(RW) : RAM mode
+ * Normal Mode : 0x00
+ * Pre-test Mode : 0x40
+ * RAM Test Mode : 0x80
+ * Bit 5 : unused
+ * Bit 4(RO) : Done bit
+ * Bit 3-0(RO) : Status
+ * Host Error : 0x08
+ * Int_RAM Error : 0x04
+ * RISC Error : 0x02
+ * SCSI Error : 0x01
+ * No Error : 0x00
+ *
+ * Note: RAM BIST code should be put right here, before loading the
+ * microcode and after saving the RISC memory BIOS region.
+ */
+
+ /*
+ * LRAM Pre-test
+ *
+ * Write PRE_TEST_MODE (0x40) to register and wait for 10 milliseconds.
+ * If Done bit not set or low nibble not PRE_TEST_VALUE (0x05), return
+ * an error. Reset to NORMAL_MODE (0x00) and do again. If cannot reset
+ * to NORMAL_MODE, return an error too.
+ */
+ for (i = 0; i < 2; i++) {
+ AdvWriteByteRegister(iop_base, IOPB_RAM_BIST, PRE_TEST_MODE);
+ mdelay(10); /* Wait for 10ms before reading back. */
+ byte = AdvReadByteRegister(iop_base, IOPB_RAM_BIST);
+ if ((byte & RAM_TEST_DONE) == 0
+ || (byte & 0x0F) != PRE_TEST_VALUE) {
+ asc_dvc->err_code = ASC_IERR_BIST_PRE_TEST;
+ return ADV_ERROR;
+ }
+
+ AdvWriteByteRegister(iop_base, IOPB_RAM_BIST, NORMAL_MODE);
+ mdelay(10); /* Wait for 10ms before reading back. */
+ if (AdvReadByteRegister(iop_base, IOPB_RAM_BIST)
+ != NORMAL_VALUE) {
+ asc_dvc->err_code = ASC_IERR_BIST_PRE_TEST;
+ return ADV_ERROR;
+ }
+ }
+
+ /*
+ * LRAM Test - It takes about 1.5 ms to run through the test.
+ *
+ * Write RAM_TEST_MODE (0x80) to register and wait for 10 milliseconds.
+ * If Done bit not set or Status not 0, save register byte, set the
+ * err_code, and return an error.
+ */
+ AdvWriteByteRegister(iop_base, IOPB_RAM_BIST, RAM_TEST_MODE);
+ mdelay(10); /* Wait for 10ms before checking status. */
+
+ byte = AdvReadByteRegister(iop_base, IOPB_RAM_BIST);
+ if ((byte & RAM_TEST_DONE) == 0 || (byte & RAM_TEST_STATUS) != 0) {
+ /* Get here if Done bit not set or Status not 0. */
+ asc_dvc->bist_err_code = byte; /* for BIOS display message */
+ asc_dvc->err_code = ASC_IERR_BIST_RAM_TEST;
+ return ADV_ERROR;
+ }
+
+ /* We need to reset back to normal mode after LRAM test passes. */
+ AdvWriteByteRegister(iop_base, IOPB_RAM_BIST, NORMAL_MODE);
+
+ err = reject_firmware(&fw, fwname, asc_dvc->drv_ptr->dev);
+ if (err) {
+ printk(KERN_ERR "Failed to load image \"%s\" err %d\n",
+ fwname, err);
+ asc_dvc->err_code = ASC_IERR_MCODE_CHKSUM;
+ return err;
+ }
+ if (fw->size < 4) {
+ printk(KERN_ERR "Bogus length %zu in image \"%s\"\n",
+ fw->size, fwname);
+ release_firmware(fw);
+ asc_dvc->err_code = ASC_IERR_MCODE_CHKSUM;
+ return -EINVAL;
+ }
+ chksum = (fw->data[3] << 24) | (fw->data[2] << 16) |
+ (fw->data[1] << 8) | fw->data[0];
+ asc_dvc->err_code = AdvLoadMicrocode(iop_base, &fw->data[4],
+ fw->size - 4, ADV_38C1600_MEMSIZE,
+ chksum);
+ release_firmware(fw);
+ if (asc_dvc->err_code)
+ return ADV_ERROR;
+
+ /*
+ * Restore the RISC memory BIOS region.
+ */
+ for (i = 0; i < ASC_MC_BIOSLEN / 2; i++) {
+ AdvWriteWordLram(iop_base, ASC_MC_BIOSMEM + (2 * i),
+ bios_mem[i]);
+ }
+
+ /*
+ * Calculate and write the microcode code checksum to the microcode
+ * code checksum location ASC_MC_CODE_CHK_SUM (0x2C).
+ */
+ AdvReadWordLram(iop_base, ASC_MC_CODE_BEGIN_ADDR, begin_addr);
+ AdvReadWordLram(iop_base, ASC_MC_CODE_END_ADDR, end_addr);
+ code_sum = 0;
+ AdvWriteWordRegister(iop_base, IOPW_RAM_ADDR, begin_addr);
+ for (word = begin_addr; word < end_addr; word += 2) {
+ code_sum += AdvReadWordAutoIncLram(iop_base);
+ }
+ AdvWriteWordLram(iop_base, ASC_MC_CODE_CHK_SUM, code_sum);
+
+ /*
+ * Read microcode version and date.
+ */
+ AdvReadWordLram(iop_base, ASC_MC_VERSION_DATE,
+ asc_dvc->cfg->mcode_date);
+ AdvReadWordLram(iop_base, ASC_MC_VERSION_NUM,
+ asc_dvc->cfg->mcode_version);
+
+ /*
+ * Set the chip type to indicate the ASC38C1600.
+ */
+ AdvWriteWordLram(iop_base, ASC_MC_CHIP_TYPE, ADV_CHIP_ASC38C1600);
+
+ /*
+ * Write 1 to bit 14 'DIS_TERM_DRV' in the SCSI_CFG1 register.
+ * When DIS_TERM_DRV set to 1, C_DET[3:0] will reflect current
+ * cable detection and then we are able to read C_DET[3:0].
+ *
+ * Note: We will reset DIS_TERM_DRV to 0 in the 'Set SCSI_CFG1
+ * Microcode Default Value' section below.
+ */
+ scsi_cfg1 = AdvReadWordRegister(iop_base, IOPW_SCSI_CFG1);
+ AdvWriteWordRegister(iop_base, IOPW_SCSI_CFG1,
+ scsi_cfg1 | DIS_TERM_DRV);
+
+ /*
+ * If the PCI Configuration Command Register "Parity Error Response
+ * Control" Bit was clear (0), then set the microcode variable
+ * 'control_flag' CONTROL_FLAG_IGNORE_PERR flag to tell the microcode
+ * to ignore DMA parity errors.
+ */
+ if (asc_dvc->cfg->control_flag & CONTROL_FLAG_IGNORE_PERR) {
+ AdvReadWordLram(iop_base, ASC_MC_CONTROL_FLAG, word);
+ word |= CONTROL_FLAG_IGNORE_PERR;
+ AdvWriteWordLram(iop_base, ASC_MC_CONTROL_FLAG, word);
+ }
+
+ /*
+ * If the BIOS control flag AIPP (Asynchronous Information
+ * Phase Protection) disable bit is not set, then set the firmware
+ * 'control_flag' CONTROL_FLAG_ENABLE_AIPP bit to enable
+ * AIPP checking and encoding.
+ */
+ if ((asc_dvc->bios_ctrl & BIOS_CTRL_AIPP_DIS) == 0) {
+ AdvReadWordLram(iop_base, ASC_MC_CONTROL_FLAG, word);
+ word |= CONTROL_FLAG_ENABLE_AIPP;
+ AdvWriteWordLram(iop_base, ASC_MC_CONTROL_FLAG, word);
+ }
+
+ /*
+ * For ASC-38C1600 use DMA_CFG0 default values: FIFO_THRESH_80B [6:4],
+ * and START_CTL_TH [3:2].
+ */
+ AdvWriteByteRegister(iop_base, IOPB_DMA_CFG0,
+ FIFO_THRESH_80B | START_CTL_TH | READ_CMD_MRM);
+
+ /*
+ * Microcode operating variables for WDTR, SDTR, and command tag
+ * queuing will be set in slave_configure() based on what a
+ * device reports it is capable of in Inquiry byte 7.
+ *
+ * If SCSI Bus Resets have been disabled, then directly set
+ * SDTR and WDTR from the EEPROM configuration. This will allow
+ * the BIOS and warm boot to work without a SCSI bus hang on
+ * the Inquiry caused by host and target mismatched DTR values.
+ * Without the SCSI Bus Reset, before an Inquiry a device can't
+ * be assumed to be in Asynchronous, Narrow mode.
+ */
+ if ((asc_dvc->bios_ctrl & BIOS_CTRL_RESET_SCSI_BUS) == 0) {
+ AdvWriteWordLram(iop_base, ASC_MC_WDTR_ABLE,
+ asc_dvc->wdtr_able);
+ AdvWriteWordLram(iop_base, ASC_MC_SDTR_ABLE,
+ asc_dvc->sdtr_able);
+ }
+
+ /*
+ * Set microcode operating variables for DISC and SDTR_SPEED1,
+ * SDTR_SPEED2, SDTR_SPEED3, and SDTR_SPEED4 based on the EEPROM
+ * configuration values.
+ *
+ * The SDTR per TID bitmask overrides the SDTR_SPEED1, SDTR_SPEED2,
+ * SDTR_SPEED3, and SDTR_SPEED4 values so it is safe to set them
+ * without determining here whether the device supports SDTR.
+ */
+ AdvWriteWordLram(iop_base, ASC_MC_DISC_ENABLE,
+ asc_dvc->cfg->disc_enable);
+ AdvWriteWordLram(iop_base, ASC_MC_SDTR_SPEED1, asc_dvc->sdtr_speed1);
+ AdvWriteWordLram(iop_base, ASC_MC_SDTR_SPEED2, asc_dvc->sdtr_speed2);
+ AdvWriteWordLram(iop_base, ASC_MC_SDTR_SPEED3, asc_dvc->sdtr_speed3);
+ AdvWriteWordLram(iop_base, ASC_MC_SDTR_SPEED4, asc_dvc->sdtr_speed4);
+
+ /*
+ * Set SCSI_CFG0 Microcode Default Value.
+ *
+ * The microcode will set the SCSI_CFG0 register using this value
+ * after it is started below.
+ */
+ AdvWriteWordLram(iop_base, ASC_MC_DEFAULT_SCSI_CFG0,
+ PARITY_EN | QUEUE_128 | SEL_TMO_LONG | OUR_ID_EN |
+ asc_dvc->chip_scsi_id);
+
+ /*
+ * Calculate SCSI_CFG1 Microcode Default Value.
+ *
+ * The microcode will set the SCSI_CFG1 register using this value
+ * after it is started below.
+ *
+ * Each ASC-38C1600 function has only two cable detect bits.
+ * The bus mode override bits are in IOPB_SOFT_OVER_WR.
+ */
+ scsi_cfg1 = AdvReadWordRegister(iop_base, IOPW_SCSI_CFG1);
+
+ /*
+ * If the cable is reversed all of the SCSI_CTRL register signals
+ * will be set. Check for and return an error if this condition is
+ * found.
+ */
+ if ((AdvReadWordRegister(iop_base, IOPW_SCSI_CTRL) & 0x3F07) == 0x3F07) {
+ asc_dvc->err_code |= ASC_IERR_REVERSED_CABLE;
+ return ADV_ERROR;
+ }
+
+ /*
+ * Each ASC-38C1600 function has two connectors. Only an HVD device
+ * can not be connected to either connector. An LVD device or SE device
+ * may be connected to either connecor. If an SE device is connected,
+ * then at most Ultra speed (20 Mhz) can be used on both connectors.
+ *
+ * If an HVD device is attached, return an error.
+ */
+ if (scsi_cfg1 & HVD) {
+ asc_dvc->err_code |= ASC_IERR_HVD_DEVICE;
+ return ADV_ERROR;
+ }
+
+ /*
+ * Each function in the ASC-38C1600 uses only the SE cable detect and
+ * termination because there are two connectors for each function. Each
+ * function may use either LVD or SE mode. Corresponding the SE automatic
+ * termination control EEPROM bits are used for each function. Each
+ * function has its own EEPROM. If SE automatic control is enabled for
+ * the function, then set the termination value based on a table listed
+ * in a_condor.h.
+ *
+ * If manual termination is specified in the EEPROM for the function,
+ * then 'termination' was set-up in AscInitFrom38C1600EEPROM() and is
+ * ready to be 'ored' into SCSI_CFG1.
+ */
+ if ((asc_dvc->cfg->termination & TERM_SE) == 0) {
+ struct pci_dev *pdev = adv_dvc_to_pdev(asc_dvc);
+ /* SE automatic termination control is enabled. */
+ switch (scsi_cfg1 & C_DET_SE) {
+ /* TERM_SE_HI: on, TERM_SE_LO: on */
+ case 0x1:
+ case 0x2:
+ case 0x3:
+ asc_dvc->cfg->termination |= TERM_SE;
+ break;
+
+ case 0x0:
+ if (PCI_FUNC(pdev->devfn) == 0) {
+ /* Function 0 - TERM_SE_HI: off, TERM_SE_LO: off */
+ } else {
+ /* Function 1 - TERM_SE_HI: on, TERM_SE_LO: off */
+ asc_dvc->cfg->termination |= TERM_SE_HI;
+ }
+ break;
+ }
+ }
+
+ /*
+ * Clear any set TERM_SE bits.
+ */
+ scsi_cfg1 &= ~TERM_SE;
+
+ /*
+ * Invert the TERM_SE bits and then set 'scsi_cfg1'.
+ */
+ scsi_cfg1 |= (~asc_dvc->cfg->termination & TERM_SE);
+
+ /*
+ * Clear Big Endian and Terminator Polarity bits and set possibly
+ * modified termination control bits in the Microcode SCSI_CFG1
+ * Register Value.
+ *
+ * Big Endian bit is not used even on big endian machines.
+ */
+ scsi_cfg1 &= (~BIG_ENDIAN & ~DIS_TERM_DRV & ~TERM_POL);
+
+ /*
+ * Set SCSI_CFG1 Microcode Default Value
+ *
+ * Set possibly modified termination control bits in the Microcode
+ * SCSI_CFG1 Register Value.
+ *
+ * The microcode will set the SCSI_CFG1 register using this value
+ * after it is started below.
+ */
+ AdvWriteWordLram(iop_base, ASC_MC_DEFAULT_SCSI_CFG1, scsi_cfg1);
+
+ /*
+ * Set MEM_CFG Microcode Default Value
+ *
+ * The microcode will set the MEM_CFG register using this value
+ * after it is started below.
+ *
+ * MEM_CFG may be accessed as a word or byte, but only bits 0-7
+ * are defined.
+ *
+ * ASC-38C1600 has 32KB internal memory.
+ *
+ * XXX - Since ASC38C1600 Rev.3 has a Local RAM failure issue, we come
+ * out a special 16K Adv Library and Microcode version. After the issue
+ * resolved, we should turn back to the 32K support. Both a_condor.h and
+ * mcode.sas files also need to be updated.
+ *
+ * AdvWriteWordLram(iop_base, ASC_MC_DEFAULT_MEM_CFG,
+ * BIOS_EN | RAM_SZ_32KB);
+ */
+ AdvWriteWordLram(iop_base, ASC_MC_DEFAULT_MEM_CFG,
+ BIOS_EN | RAM_SZ_16KB);
+
+ /*
+ * Set SEL_MASK Microcode Default Value
+ *
+ * The microcode will set the SEL_MASK register using this value
+ * after it is started below.
+ */
+ AdvWriteWordLram(iop_base, ASC_MC_DEFAULT_SEL_MASK,
+ ADV_TID_TO_TIDMASK(asc_dvc->chip_scsi_id));
+
+ AdvBuildCarrierFreelist(asc_dvc);
+
+ /*
+ * Set-up the Host->RISC Initiator Command Queue (ICQ).
+ */
+ if ((asc_dvc->icq_sp = asc_dvc->carr_freelist) == NULL) {
+ asc_dvc->err_code |= ASC_IERR_NO_CARRIER;
+ return ADV_ERROR;
+ }
+ asc_dvc->carr_freelist = (ADV_CARR_T *)
+ ADV_U32_TO_VADDR(le32_to_cpu(asc_dvc->icq_sp->next_vpa));
+
+ /*
+ * The first command issued will be placed in the stopper carrier.
+ */
+ asc_dvc->icq_sp->next_vpa = cpu_to_le32(ASC_CQ_STOPPER);
+
+ /*
+ * Set RISC ICQ physical address start value. Initialize the
+ * COMMA register to the same value otherwise the RISC will
+ * prematurely detect a command is available.
+ */
+ AdvWriteDWordLramNoSwap(iop_base, ASC_MC_ICQ, asc_dvc->icq_sp->carr_pa);
+ AdvWriteDWordRegister(iop_base, IOPDW_COMMA,
+ le32_to_cpu(asc_dvc->icq_sp->carr_pa));
+
+ /*
+ * Set-up the RISC->Host Initiator Response Queue (IRQ).
+ */
+ if ((asc_dvc->irq_sp = asc_dvc->carr_freelist) == NULL) {
+ asc_dvc->err_code |= ASC_IERR_NO_CARRIER;
+ return ADV_ERROR;
+ }
+ asc_dvc->carr_freelist = (ADV_CARR_T *)
+ ADV_U32_TO_VADDR(le32_to_cpu(asc_dvc->irq_sp->next_vpa));
+
+ /*
+ * The first command completed by the RISC will be placed in
+ * the stopper.
+ *
+ * Note: Set 'next_vpa' to ASC_CQ_STOPPER. When the request is
+ * completed the RISC will set the ASC_RQ_STOPPER bit.
+ */
+ asc_dvc->irq_sp->next_vpa = cpu_to_le32(ASC_CQ_STOPPER);
+
+ /*
+ * Set RISC IRQ physical address start value.
+ */
+ AdvWriteDWordLramNoSwap(iop_base, ASC_MC_IRQ, asc_dvc->irq_sp->carr_pa);
+ asc_dvc->carr_pending_cnt = 0;
+
+ AdvWriteByteRegister(iop_base, IOPB_INTR_ENABLES,
+ (ADV_INTR_ENABLE_HOST_INTR |
+ ADV_INTR_ENABLE_GLOBAL_INTR));
+ AdvReadWordLram(iop_base, ASC_MC_CODE_BEGIN_ADDR, word);
+ AdvWriteWordRegister(iop_base, IOPW_PC, word);
+
+ /* finally, finally, gentlemen, start your engine */
+ AdvWriteWordRegister(iop_base, IOPW_RISC_CSR, ADV_RISC_CSR_RUN);
+
+ /*
+ * Reset the SCSI Bus if the EEPROM indicates that SCSI Bus
+ * Resets should be performed. The RISC has to be running
+ * to issue a SCSI Bus Reset.
+ */
+ if (asc_dvc->bios_ctrl & BIOS_CTRL_RESET_SCSI_BUS) {
+ /*
+ * If the BIOS Signature is present in memory, restore the
+ * per TID microcode operating variables.
+ */
+ if (bios_mem[(ASC_MC_BIOS_SIGNATURE - ASC_MC_BIOSMEM) / 2] ==
+ 0x55AA) {
+ /*
+ * Restore per TID negotiated values.
+ */
+ AdvWriteWordLram(iop_base, ASC_MC_WDTR_ABLE, wdtr_able);
+ AdvWriteWordLram(iop_base, ASC_MC_SDTR_ABLE, sdtr_able);
+ AdvWriteWordLram(iop_base, ASC_MC_PPR_ABLE, ppr_able);
+ AdvWriteWordLram(iop_base, ASC_MC_TAGQNG_ABLE,
+ tagqng_able);
+ for (tid = 0; tid <= ASC_MAX_TID; tid++) {
+ AdvWriteByteLram(iop_base,
+ ASC_MC_NUMBER_OF_MAX_CMD + tid,
+ max_cmd[tid]);
+ }
+ } else {
+ if (AdvResetSB(asc_dvc) != ADV_TRUE) {
+ warn_code = ASC_WARN_BUSRESET_ERROR;
+ }
+ }
+ }
+
+ return warn_code;
+}
+
+/*
+ * Reset chip and SCSI Bus.
+ *
+ * Return Value:
+ * ADV_TRUE(1) - Chip re-initialization and SCSI Bus Reset successful.
+ * ADV_FALSE(0) - Chip re-initialization and SCSI Bus Reset failure.
+ */
+static int AdvResetChipAndSB(ADV_DVC_VAR *asc_dvc)
+{
+ int status;
+ ushort wdtr_able, sdtr_able, tagqng_able;
+ ushort ppr_able = 0;
+ uchar tid, max_cmd[ADV_MAX_TID + 1];
+ AdvPortAddr iop_base;
+ ushort bios_sig;
+
+ iop_base = asc_dvc->iop_base;
+
+ /*
+ * Save current per TID negotiated values.
+ */
+ AdvReadWordLram(iop_base, ASC_MC_WDTR_ABLE, wdtr_able);
+ AdvReadWordLram(iop_base, ASC_MC_SDTR_ABLE, sdtr_able);
+ if (asc_dvc->chip_type == ADV_CHIP_ASC38C1600) {
+ AdvReadWordLram(iop_base, ASC_MC_PPR_ABLE, ppr_able);
+ }
+ AdvReadWordLram(iop_base, ASC_MC_TAGQNG_ABLE, tagqng_able);
+ for (tid = 0; tid <= ADV_MAX_TID; tid++) {
+ AdvReadByteLram(iop_base, ASC_MC_NUMBER_OF_MAX_CMD + tid,
+ max_cmd[tid]);
+ }
+
+ /*
+ * Force the AdvInitAsc3550/38C0800Driver() function to
+ * perform a SCSI Bus Reset by clearing the BIOS signature word.
+ * The initialization functions assumes a SCSI Bus Reset is not
+ * needed if the BIOS signature word is present.
+ */
+ AdvReadWordLram(iop_base, ASC_MC_BIOS_SIGNATURE, bios_sig);
+ AdvWriteWordLram(iop_base, ASC_MC_BIOS_SIGNATURE, 0);
+
+ /*
+ * Stop chip and reset it.
+ */
+ AdvWriteWordRegister(iop_base, IOPW_RISC_CSR, ADV_RISC_CSR_STOP);
+ AdvWriteWordRegister(iop_base, IOPW_CTRL_REG, ADV_CTRL_REG_CMD_RESET);
+ mdelay(100);
+ AdvWriteWordRegister(iop_base, IOPW_CTRL_REG,
+ ADV_CTRL_REG_CMD_WR_IO_REG);
+
+ /*
+ * Reset Adv Library error code, if any, and try
+ * re-initializing the chip.
+ */
+ asc_dvc->err_code = 0;
+ if (asc_dvc->chip_type == ADV_CHIP_ASC38C1600) {
+ status = AdvInitAsc38C1600Driver(asc_dvc);
+ } else if (asc_dvc->chip_type == ADV_CHIP_ASC38C0800) {
+ status = AdvInitAsc38C0800Driver(asc_dvc);
+ } else {
+ status = AdvInitAsc3550Driver(asc_dvc);
+ }
+
+ /* Translate initialization return value to status value. */
+ if (status == 0) {
+ status = ADV_TRUE;
+ } else {
+ status = ADV_FALSE;
+ }
+
+ /*
+ * Restore the BIOS signature word.
+ */
+ AdvWriteWordLram(iop_base, ASC_MC_BIOS_SIGNATURE, bios_sig);
+
+ /*
+ * Restore per TID negotiated values.
+ */
+ AdvWriteWordLram(iop_base, ASC_MC_WDTR_ABLE, wdtr_able);
+ AdvWriteWordLram(iop_base, ASC_MC_SDTR_ABLE, sdtr_able);
+ if (asc_dvc->chip_type == ADV_CHIP_ASC38C1600) {
+ AdvWriteWordLram(iop_base, ASC_MC_PPR_ABLE, ppr_able);
+ }
+ AdvWriteWordLram(iop_base, ASC_MC_TAGQNG_ABLE, tagqng_able);
+ for (tid = 0; tid <= ADV_MAX_TID; tid++) {
+ AdvWriteByteLram(iop_base, ASC_MC_NUMBER_OF_MAX_CMD + tid,
+ max_cmd[tid]);
+ }
+
+ return status;
+}
+
+/*
+ * adv_async_callback() - Adv Library asynchronous event callback function.
+ */
+static void adv_async_callback(ADV_DVC_VAR *adv_dvc_varp, uchar code)
+{
+ switch (code) {
+ case ADV_ASYNC_SCSI_BUS_RESET_DET:
+ /*
+ * The firmware detected a SCSI Bus reset.
+ */
+ ASC_DBG(0, "ADV_ASYNC_SCSI_BUS_RESET_DET\n");
+ break;
+
+ case ADV_ASYNC_RDMA_FAILURE:
+ /*
+ * Handle RDMA failure by resetting the SCSI Bus and
+ * possibly the chip if it is unresponsive. Log the error
+ * with a unique code.
+ */
+ ASC_DBG(0, "ADV_ASYNC_RDMA_FAILURE\n");
+ AdvResetChipAndSB(adv_dvc_varp);
+ break;
+
+ case ADV_HOST_SCSI_BUS_RESET:
+ /*
+ * Host generated SCSI bus reset occurred.
+ */
+ ASC_DBG(0, "ADV_HOST_SCSI_BUS_RESET\n");
+ break;
+
+ default:
+ ASC_DBG(0, "unknown code 0x%x\n", code);
+ break;
+ }
+}
+
+/*
+ * adv_isr_callback() - Second Level Interrupt Handler called by AdvISR().
+ *
+ * Callback function for the Wide SCSI Adv Library.
+ */
+static void adv_isr_callback(ADV_DVC_VAR *adv_dvc_varp, ADV_SCSI_REQ_Q *scsiqp)
+{
+ struct asc_board *boardp;
+ adv_req_t *reqp;
+ adv_sgblk_t *sgblkp;
+ struct scsi_cmnd *scp;
+ struct Scsi_Host *shost;
+ ADV_DCNT resid_cnt;
+
+ ASC_DBG(1, "adv_dvc_varp 0x%lx, scsiqp 0x%lx\n",
+ (ulong)adv_dvc_varp, (ulong)scsiqp);
+ ASC_DBG_PRT_ADV_SCSI_REQ_Q(2, scsiqp);
+
+ /*
+ * Get the adv_req_t structure for the command that has been
+ * completed. The adv_req_t structure actually contains the
+ * completed ADV_SCSI_REQ_Q structure.
+ */
+ reqp = (adv_req_t *)ADV_U32_TO_VADDR(scsiqp->srb_ptr);
+ ASC_DBG(1, "reqp 0x%lx\n", (ulong)reqp);
+ if (reqp == NULL) {
+ ASC_PRINT("adv_isr_callback: reqp is NULL\n");
+ return;
+ }
+
+ /*
+ * Get the struct scsi_cmnd structure and Scsi_Host structure for the
+ * command that has been completed.
+ *
+ * Note: The adv_req_t request structure and adv_sgblk_t structure,
+ * if any, are dropped, because a board structure pointer can not be
+ * determined.
+ */
+ scp = reqp->cmndp;
+ ASC_DBG(1, "scp 0x%p\n", scp);
+ if (scp == NULL) {
+ ASC_PRINT
+ ("adv_isr_callback: scp is NULL; adv_req_t dropped.\n");
+ return;
+ }
+ ASC_DBG_PRT_CDB(2, scp->cmnd, scp->cmd_len);
+
+ shost = scp->device->host;
+ ASC_STATS(shost, callback);
+ ASC_DBG(1, "shost 0x%p\n", shost);
+
+ boardp = shost_priv(shost);
+ BUG_ON(adv_dvc_varp != &boardp->dvc_var.adv_dvc_var);
+
+ /*
+ * 'done_status' contains the command's ending status.
+ */
+ switch (scsiqp->done_status) {
+ case QD_NO_ERROR:
+ ASC_DBG(2, "QD_NO_ERROR\n");
+ scp->result = 0;
+
+ /*
+ * Check for an underrun condition.
+ *
+ * If there was no error and an underrun condition, then
+ * then return the number of underrun bytes.
+ */
+ resid_cnt = le32_to_cpu(scsiqp->data_cnt);
+ if (scsi_bufflen(scp) != 0 && resid_cnt != 0 &&
+ resid_cnt <= scsi_bufflen(scp)) {
+ ASC_DBG(1, "underrun condition %lu bytes\n",
+ (ulong)resid_cnt);
+ scsi_set_resid(scp, resid_cnt);
+ }
+ break;
+
+ case QD_WITH_ERROR:
+ ASC_DBG(2, "QD_WITH_ERROR\n");
+ switch (scsiqp->host_status) {
+ case QHSTA_NO_ERROR:
+ if (scsiqp->scsi_status == SAM_STAT_CHECK_CONDITION) {
+ ASC_DBG(2, "SAM_STAT_CHECK_CONDITION\n");
+ ASC_DBG_PRT_SENSE(2, scp->sense_buffer,
+ SCSI_SENSE_BUFFERSIZE);
+ /*
+ * Note: The 'status_byte()' macro used by
+ * target drivers defined in scsi.h shifts the
+ * status byte returned by host drivers right
+ * by 1 bit. This is why target drivers also
+ * use right shifted status byte definitions.
+ * For instance target drivers use
+ * CHECK_CONDITION, defined to 0x1, instead of
+ * the SCSI defined check condition value of
+ * 0x2. Host drivers are supposed to return
+ * the status byte as it is defined by SCSI.
+ */
+ scp->result = DRIVER_BYTE(DRIVER_SENSE) |
+ STATUS_BYTE(scsiqp->scsi_status);
+ } else {
+ scp->result = STATUS_BYTE(scsiqp->scsi_status);
+ }
+ break;
+
+ default:
+ /* Some other QHSTA error occurred. */
+ ASC_DBG(1, "host_status 0x%x\n", scsiqp->host_status);
+ scp->result = HOST_BYTE(DID_BAD_TARGET);
+ break;
+ }
+ break;
+
+ case QD_ABORTED_BY_HOST:
+ ASC_DBG(1, "QD_ABORTED_BY_HOST\n");
+ scp->result =
+ HOST_BYTE(DID_ABORT) | STATUS_BYTE(scsiqp->scsi_status);
+ break;
+
+ default:
+ ASC_DBG(1, "done_status 0x%x\n", scsiqp->done_status);
+ scp->result =
+ HOST_BYTE(DID_ERROR) | STATUS_BYTE(scsiqp->scsi_status);
+ break;
+ }
+
+ /*
+ * If the 'init_tidmask' bit isn't already set for the target and the
+ * current request finished normally, then set the bit for the target
+ * to indicate that a device is present.
+ */
+ if ((boardp->init_tidmask & ADV_TID_TO_TIDMASK(scp->device->id)) == 0 &&
+ scsiqp->done_status == QD_NO_ERROR &&
+ scsiqp->host_status == QHSTA_NO_ERROR) {
+ boardp->init_tidmask |= ADV_TID_TO_TIDMASK(scp->device->id);
+ }
+
+ asc_scsi_done(scp);
+
+ /*
+ * Free all 'adv_sgblk_t' structures allocated for the request.
+ */
+ while ((sgblkp = reqp->sgblkp) != NULL) {
+ /* Remove 'sgblkp' from the request list. */
+ reqp->sgblkp = sgblkp->next_sgblkp;
+
+ /* Add 'sgblkp' to the board free list. */
+ sgblkp->next_sgblkp = boardp->adv_sgblkp;
+ boardp->adv_sgblkp = sgblkp;
+ }
+
+ /*
+ * Free the adv_req_t structure used with the command by adding
+ * it back to the board free list.
+ */
+ reqp->next_reqp = boardp->adv_reqp;
+ boardp->adv_reqp = reqp;
+
+ ASC_DBG(1, "done\n");
+}
+
+/*
+ * Adv Library Interrupt Service Routine
+ *
+ * This function is called by a driver's interrupt service routine.
+ * The function disables and re-enables interrupts.
+ *
+ * When a microcode idle command is completed, the ADV_DVC_VAR
+ * 'idle_cmd_done' field is set to ADV_TRUE.
+ *
+ * Note: AdvISR() can be called when interrupts are disabled or even
+ * when there is no hardware interrupt condition present. It will
+ * always check for completed idle commands and microcode requests.
+ * This is an important feature that shouldn't be changed because it
+ * allows commands to be completed from polling mode loops.
+ *
+ * Return:
+ * ADV_TRUE(1) - interrupt was pending
+ * ADV_FALSE(0) - no interrupt was pending
+ */
+static int AdvISR(ADV_DVC_VAR *asc_dvc)
+{
+ AdvPortAddr iop_base;
+ uchar int_stat;
+ ushort target_bit;
+ ADV_CARR_T *free_carrp;
+ ADV_VADDR irq_next_vpa;
+ ADV_SCSI_REQ_Q *scsiq;
+
+ iop_base = asc_dvc->iop_base;
+
+ /* Reading the register clears the interrupt. */
+ int_stat = AdvReadByteRegister(iop_base, IOPB_INTR_STATUS_REG);
+
+ if ((int_stat & (ADV_INTR_STATUS_INTRA | ADV_INTR_STATUS_INTRB |
+ ADV_INTR_STATUS_INTRC)) == 0) {
+ return ADV_FALSE;
+ }
+
+ /*
+ * Notify the driver of an asynchronous microcode condition by
+ * calling the adv_async_callback function. The function
+ * is passed the microcode ASC_MC_INTRB_CODE byte value.
+ */
+ if (int_stat & ADV_INTR_STATUS_INTRB) {
+ uchar intrb_code;
+
+ AdvReadByteLram(iop_base, ASC_MC_INTRB_CODE, intrb_code);
+
+ if (asc_dvc->chip_type == ADV_CHIP_ASC3550 ||
+ asc_dvc->chip_type == ADV_CHIP_ASC38C0800) {
+ if (intrb_code == ADV_ASYNC_CARRIER_READY_FAILURE &&
+ asc_dvc->carr_pending_cnt != 0) {
+ AdvWriteByteRegister(iop_base, IOPB_TICKLE,
+ ADV_TICKLE_A);
+ if (asc_dvc->chip_type == ADV_CHIP_ASC3550) {
+ AdvWriteByteRegister(iop_base,
+ IOPB_TICKLE,
+ ADV_TICKLE_NOP);
+ }
+ }
+ }
+
+ adv_async_callback(asc_dvc, intrb_code);
+ }
+
+ /*
+ * Check if the IRQ stopper carrier contains a completed request.
+ */
+ while (((irq_next_vpa =
+ le32_to_cpu(asc_dvc->irq_sp->next_vpa)) & ASC_RQ_DONE) != 0) {
+ /*
+ * Get a pointer to the newly completed ADV_SCSI_REQ_Q structure.
+ * The RISC will have set 'areq_vpa' to a virtual address.
+ *
+ * The firmware will have copied the ASC_SCSI_REQ_Q.scsiq_ptr
+ * field to the carrier ADV_CARR_T.areq_vpa field. The conversion
+ * below complements the conversion of ASC_SCSI_REQ_Q.scsiq_ptr'
+ * in AdvExeScsiQueue().
+ */
+ scsiq = (ADV_SCSI_REQ_Q *)
+ ADV_U32_TO_VADDR(le32_to_cpu(asc_dvc->irq_sp->areq_vpa));
+
+ /*
+ * Request finished with good status and the queue was not
+ * DMAed to host memory by the firmware. Set all status fields
+ * to indicate good status.
+ */
+ if ((irq_next_vpa & ASC_RQ_GOOD) != 0) {
+ scsiq->done_status = QD_NO_ERROR;
+ scsiq->host_status = scsiq->scsi_status = 0;
+ scsiq->data_cnt = 0L;
+ }
+
+ /*
+ * Advance the stopper pointer to the next carrier
+ * ignoring the lower four bits. Free the previous
+ * stopper carrier.
+ */
+ free_carrp = asc_dvc->irq_sp;
+ asc_dvc->irq_sp = (ADV_CARR_T *)
+ ADV_U32_TO_VADDR(ASC_GET_CARRP(irq_next_vpa));
+
+ free_carrp->next_vpa =
+ cpu_to_le32(ADV_VADDR_TO_U32(asc_dvc->carr_freelist));
+ asc_dvc->carr_freelist = free_carrp;
+ asc_dvc->carr_pending_cnt--;
+
+ target_bit = ADV_TID_TO_TIDMASK(scsiq->target_id);
+
+ /*
+ * Clear request microcode control flag.
+ */
+ scsiq->cntl = 0;
+
+ /*
+ * Notify the driver of the completed request by passing
+ * the ADV_SCSI_REQ_Q pointer to its callback function.
+ */
+ scsiq->a_flag |= ADV_SCSIQ_DONE;
+ adv_isr_callback(asc_dvc, scsiq);
+ /*
+ * Note: After the driver callback function is called, 'scsiq'
+ * can no longer be referenced.
+ *
+ * Fall through and continue processing other completed
+ * requests...
+ */
+ }
+ return ADV_TRUE;
+}
+
+static int AscSetLibErrorCode(ASC_DVC_VAR *asc_dvc, ushort err_code)
+{
+ if (asc_dvc->err_code == 0) {
+ asc_dvc->err_code = err_code;
+ AscWriteLramWord(asc_dvc->iop_base, ASCV_ASCDVC_ERR_CODE_W,
+ err_code);
+ }
+ return err_code;
+}
+
+static void AscAckInterrupt(PortAddr iop_base)
+{
+ uchar host_flag;
+ uchar risc_flag;
+ ushort loop;
+
+ loop = 0;
+ do {
+ risc_flag = AscReadLramByte(iop_base, ASCV_RISC_FLAG_B);
+ if (loop++ > 0x7FFF) {
+ break;
+ }
+ } while ((risc_flag & ASC_RISC_FLAG_GEN_INT) != 0);
+ host_flag =
+ AscReadLramByte(iop_base,
+ ASCV_HOST_FLAG_B) & (~ASC_HOST_FLAG_ACK_INT);
+ AscWriteLramByte(iop_base, ASCV_HOST_FLAG_B,
+ (uchar)(host_flag | ASC_HOST_FLAG_ACK_INT));
+ AscSetChipStatus(iop_base, CIW_INT_ACK);
+ loop = 0;
+ while (AscGetChipStatus(iop_base) & CSW_INT_PENDING) {
+ AscSetChipStatus(iop_base, CIW_INT_ACK);
+ if (loop++ > 3) {
+ break;
+ }
+ }
+ AscWriteLramByte(iop_base, ASCV_HOST_FLAG_B, host_flag);
+}
+
+static uchar AscGetSynPeriodIndex(ASC_DVC_VAR *asc_dvc, uchar syn_time)
+{
+ const uchar *period_table;
+ int max_index;
+ int min_index;
+ int i;
+
+ period_table = asc_dvc->sdtr_period_tbl;
+ max_index = (int)asc_dvc->max_sdtr_index;
+ min_index = (int)asc_dvc->min_sdtr_index;
+ if ((syn_time <= period_table[max_index])) {
+ for (i = min_index; i < (max_index - 1); i++) {
+ if (syn_time <= period_table[i]) {
+ return (uchar)i;
+ }
+ }
+ return (uchar)max_index;
+ } else {
+ return (uchar)(max_index + 1);
+ }
+}
+
+static uchar
+AscMsgOutSDTR(ASC_DVC_VAR *asc_dvc, uchar sdtr_period, uchar sdtr_offset)
+{
+ EXT_MSG sdtr_buf;
+ uchar sdtr_period_index;
+ PortAddr iop_base;
+
+ iop_base = asc_dvc->iop_base;
+ sdtr_buf.msg_type = EXTENDED_MESSAGE;
+ sdtr_buf.msg_len = MS_SDTR_LEN;
+ sdtr_buf.msg_req = EXTENDED_SDTR;
+ sdtr_buf.xfer_period = sdtr_period;
+ sdtr_offset &= ASC_SYN_MAX_OFFSET;
+ sdtr_buf.req_ack_offset = sdtr_offset;
+ sdtr_period_index = AscGetSynPeriodIndex(asc_dvc, sdtr_period);
+ if (sdtr_period_index <= asc_dvc->max_sdtr_index) {
+ AscMemWordCopyPtrToLram(iop_base, ASCV_MSGOUT_BEG,
+ (uchar *)&sdtr_buf,
+ sizeof(EXT_MSG) >> 1);
+ return ((sdtr_period_index << 4) | sdtr_offset);
+ } else {
+ sdtr_buf.req_ack_offset = 0;
+ AscMemWordCopyPtrToLram(iop_base, ASCV_MSGOUT_BEG,
+ (uchar *)&sdtr_buf,
+ sizeof(EXT_MSG) >> 1);
+ return 0;
+ }
+}
+
+static uchar
+AscCalSDTRData(ASC_DVC_VAR *asc_dvc, uchar sdtr_period, uchar syn_offset)
+{
+ uchar byte;
+ uchar sdtr_period_ix;
+
+ sdtr_period_ix = AscGetSynPeriodIndex(asc_dvc, sdtr_period);
+ if (sdtr_period_ix > asc_dvc->max_sdtr_index)
+ return 0xFF;
+ byte = (sdtr_period_ix << 4) | (syn_offset & ASC_SYN_MAX_OFFSET);
+ return byte;
+}
+
+static int AscSetChipSynRegAtID(PortAddr iop_base, uchar id, uchar sdtr_data)
+{
+ ASC_SCSI_BIT_ID_TYPE org_id;
+ int i;
+ int sta = TRUE;
+
+ AscSetBank(iop_base, 1);
+ org_id = AscReadChipDvcID(iop_base);
+ for (i = 0; i <= ASC_MAX_TID; i++) {
+ if (org_id == (0x01 << i))
+ break;
+ }
+ org_id = (ASC_SCSI_BIT_ID_TYPE) i;
+ AscWriteChipDvcID(iop_base, id);
+ if (AscReadChipDvcID(iop_base) == (0x01 << id)) {
+ AscSetBank(iop_base, 0);
+ AscSetChipSyn(iop_base, sdtr_data);
+ if (AscGetChipSyn(iop_base) != sdtr_data) {
+ sta = FALSE;
+ }
+ } else {
+ sta = FALSE;
+ }
+ AscSetBank(iop_base, 1);
+ AscWriteChipDvcID(iop_base, org_id);
+ AscSetBank(iop_base, 0);
+ return (sta);
+}
+
+static void AscSetChipSDTR(PortAddr iop_base, uchar sdtr_data, uchar tid_no)
+{
+ AscSetChipSynRegAtID(iop_base, tid_no, sdtr_data);
+ AscPutMCodeSDTRDoneAtID(iop_base, tid_no, sdtr_data);
+}
+
+static int AscIsrChipHalted(ASC_DVC_VAR *asc_dvc)
+{
+ EXT_MSG ext_msg;
+ EXT_MSG out_msg;
+ ushort halt_q_addr;
+ int sdtr_accept;
+ ushort int_halt_code;
+ ASC_SCSI_BIT_ID_TYPE scsi_busy;
+ ASC_SCSI_BIT_ID_TYPE target_id;
+ PortAddr iop_base;
+ uchar tag_code;
+ uchar q_status;
+ uchar halt_qp;
+ uchar sdtr_data;
+ uchar target_ix;
+ uchar q_cntl, tid_no;
+ uchar cur_dvc_qng;
+ uchar asyn_sdtr;
+ uchar scsi_status;
+ struct asc_board *boardp;
+
+ BUG_ON(!asc_dvc->drv_ptr);
+ boardp = asc_dvc->drv_ptr;
+
+ iop_base = asc_dvc->iop_base;
+ int_halt_code = AscReadLramWord(iop_base, ASCV_HALTCODE_W);
+
+ halt_qp = AscReadLramByte(iop_base, ASCV_CURCDB_B);
+ halt_q_addr = ASC_QNO_TO_QADDR(halt_qp);
+ target_ix = AscReadLramByte(iop_base,
+ (ushort)(halt_q_addr +
+ (ushort)ASC_SCSIQ_B_TARGET_IX));
+ q_cntl = AscReadLramByte(iop_base,
+ (ushort)(halt_q_addr + (ushort)ASC_SCSIQ_B_CNTL));
+ tid_no = ASC_TIX_TO_TID(target_ix);
+ target_id = (uchar)ASC_TID_TO_TARGET_ID(tid_no);
+ if (asc_dvc->pci_fix_asyn_xfer & target_id) {
+ asyn_sdtr = ASYN_SDTR_DATA_FIX_PCI_REV_AB;
+ } else {
+ asyn_sdtr = 0;
+ }
+ if (int_halt_code == ASC_HALT_DISABLE_ASYN_USE_SYN_FIX) {
+ if (asc_dvc->pci_fix_asyn_xfer & target_id) {
+ AscSetChipSDTR(iop_base, 0, tid_no);
+ boardp->sdtr_data[tid_no] = 0;
+ }
+ AscWriteLramWord(iop_base, ASCV_HALTCODE_W, 0);
+ return (0);
+ } else if (int_halt_code == ASC_HALT_ENABLE_ASYN_USE_SYN_FIX) {
+ if (asc_dvc->pci_fix_asyn_xfer & target_id) {
+ AscSetChipSDTR(iop_base, asyn_sdtr, tid_no);
+ boardp->sdtr_data[tid_no] = asyn_sdtr;
+ }
+ AscWriteLramWord(iop_base, ASCV_HALTCODE_W, 0);
+ return (0);
+ } else if (int_halt_code == ASC_HALT_EXTMSG_IN) {
+ AscMemWordCopyPtrFromLram(iop_base,
+ ASCV_MSGIN_BEG,
+ (uchar *)&ext_msg,
+ sizeof(EXT_MSG) >> 1);
+
+ if (ext_msg.msg_type == EXTENDED_MESSAGE &&
+ ext_msg.msg_req == EXTENDED_SDTR &&
+ ext_msg.msg_len == MS_SDTR_LEN) {
+ sdtr_accept = TRUE;
+ if ((ext_msg.req_ack_offset > ASC_SYN_MAX_OFFSET)) {
+
+ sdtr_accept = FALSE;
+ ext_msg.req_ack_offset = ASC_SYN_MAX_OFFSET;
+ }
+ if ((ext_msg.xfer_period <
+ asc_dvc->sdtr_period_tbl[asc_dvc->min_sdtr_index])
+ || (ext_msg.xfer_period >
+ asc_dvc->sdtr_period_tbl[asc_dvc->
+ max_sdtr_index])) {
+ sdtr_accept = FALSE;
+ ext_msg.xfer_period =
+ asc_dvc->sdtr_period_tbl[asc_dvc->
+ min_sdtr_index];
+ }
+ if (sdtr_accept) {
+ sdtr_data =
+ AscCalSDTRData(asc_dvc, ext_msg.xfer_period,
+ ext_msg.req_ack_offset);
+ if ((sdtr_data == 0xFF)) {
+
+ q_cntl |= QC_MSG_OUT;
+ asc_dvc->init_sdtr &= ~target_id;
+ asc_dvc->sdtr_done &= ~target_id;
+ AscSetChipSDTR(iop_base, asyn_sdtr,
+ tid_no);
+ boardp->sdtr_data[tid_no] = asyn_sdtr;
+ }
+ }
+ if (ext_msg.req_ack_offset == 0) {
+
+ q_cntl &= ~QC_MSG_OUT;
+ asc_dvc->init_sdtr &= ~target_id;
+ asc_dvc->sdtr_done &= ~target_id;
+ AscSetChipSDTR(iop_base, asyn_sdtr, tid_no);
+ } else {
+ if (sdtr_accept && (q_cntl & QC_MSG_OUT)) {
+ q_cntl &= ~QC_MSG_OUT;
+ asc_dvc->sdtr_done |= target_id;
+ asc_dvc->init_sdtr |= target_id;
+ asc_dvc->pci_fix_asyn_xfer &=
+ ~target_id;
+ sdtr_data =
+ AscCalSDTRData(asc_dvc,
+ ext_msg.xfer_period,
+ ext_msg.
+ req_ack_offset);
+ AscSetChipSDTR(iop_base, sdtr_data,
+ tid_no);
+ boardp->sdtr_data[tid_no] = sdtr_data;
+ } else {
+ q_cntl |= QC_MSG_OUT;
+ AscMsgOutSDTR(asc_dvc,
+ ext_msg.xfer_period,
+ ext_msg.req_ack_offset);
+ asc_dvc->pci_fix_asyn_xfer &=
+ ~target_id;
+ sdtr_data =
+ AscCalSDTRData(asc_dvc,
+ ext_msg.xfer_period,
+ ext_msg.
+ req_ack_offset);
+ AscSetChipSDTR(iop_base, sdtr_data,
+ tid_no);
+ boardp->sdtr_data[tid_no] = sdtr_data;
+ asc_dvc->sdtr_done |= target_id;
+ asc_dvc->init_sdtr |= target_id;
+ }
+ }
+
+ AscWriteLramByte(iop_base,
+ (ushort)(halt_q_addr +
+ (ushort)ASC_SCSIQ_B_CNTL),
+ q_cntl);
+ AscWriteLramWord(iop_base, ASCV_HALTCODE_W, 0);
+ return (0);
+ } else if (ext_msg.msg_type == EXTENDED_MESSAGE &&
+ ext_msg.msg_req == EXTENDED_WDTR &&
+ ext_msg.msg_len == MS_WDTR_LEN) {
+
+ ext_msg.wdtr_width = 0;
+ AscMemWordCopyPtrToLram(iop_base,
+ ASCV_MSGOUT_BEG,
+ (uchar *)&ext_msg,
+ sizeof(EXT_MSG) >> 1);
+ q_cntl |= QC_MSG_OUT;
+ AscWriteLramByte(iop_base,
+ (ushort)(halt_q_addr +
+ (ushort)ASC_SCSIQ_B_CNTL),
+ q_cntl);
+ AscWriteLramWord(iop_base, ASCV_HALTCODE_W, 0);
+ return (0);
+ } else {
+
+ ext_msg.msg_type = MESSAGE_REJECT;
+ AscMemWordCopyPtrToLram(iop_base,
+ ASCV_MSGOUT_BEG,
+ (uchar *)&ext_msg,
+ sizeof(EXT_MSG) >> 1);
+ q_cntl |= QC_MSG_OUT;
+ AscWriteLramByte(iop_base,
+ (ushort)(halt_q_addr +
+ (ushort)ASC_SCSIQ_B_CNTL),
+ q_cntl);
+ AscWriteLramWord(iop_base, ASCV_HALTCODE_W, 0);
+ return (0);
+ }
+ } else if (int_halt_code == ASC_HALT_CHK_CONDITION) {
+
+ q_cntl |= QC_REQ_SENSE;
+
+ if ((asc_dvc->init_sdtr & target_id) != 0) {
+
+ asc_dvc->sdtr_done &= ~target_id;
+
+ sdtr_data = AscGetMCodeInitSDTRAtID(iop_base, tid_no);
+ q_cntl |= QC_MSG_OUT;
+ AscMsgOutSDTR(asc_dvc,
+ asc_dvc->
+ sdtr_period_tbl[(sdtr_data >> 4) &
+ (uchar)(asc_dvc->
+ max_sdtr_index -
+ 1)],
+ (uchar)(sdtr_data & (uchar)
+ ASC_SYN_MAX_OFFSET));
+ }
+
+ AscWriteLramByte(iop_base,
+ (ushort)(halt_q_addr +
+ (ushort)ASC_SCSIQ_B_CNTL), q_cntl);
+
+ tag_code = AscReadLramByte(iop_base,
+ (ushort)(halt_q_addr + (ushort)
+ ASC_SCSIQ_B_TAG_CODE));
+ tag_code &= 0xDC;
+ if ((asc_dvc->pci_fix_asyn_xfer & target_id)
+ && !(asc_dvc->pci_fix_asyn_xfer_always & target_id)
+ ) {
+
+ tag_code |= (ASC_TAG_FLAG_DISABLE_DISCONNECT
+ | ASC_TAG_FLAG_DISABLE_ASYN_USE_SYN_FIX);
+
+ }
+ AscWriteLramByte(iop_base,
+ (ushort)(halt_q_addr +
+ (ushort)ASC_SCSIQ_B_TAG_CODE),
+ tag_code);
+
+ q_status = AscReadLramByte(iop_base,
+ (ushort)(halt_q_addr + (ushort)
+ ASC_SCSIQ_B_STATUS));
+ q_status |= (QS_READY | QS_BUSY);
+ AscWriteLramByte(iop_base,
+ (ushort)(halt_q_addr +
+ (ushort)ASC_SCSIQ_B_STATUS),
+ q_status);
+
+ scsi_busy = AscReadLramByte(iop_base, (ushort)ASCV_SCSIBUSY_B);
+ scsi_busy &= ~target_id;
+ AscWriteLramByte(iop_base, (ushort)ASCV_SCSIBUSY_B, scsi_busy);
+
+ AscWriteLramWord(iop_base, ASCV_HALTCODE_W, 0);
+ return (0);
+ } else if (int_halt_code == ASC_HALT_SDTR_REJECTED) {
+
+ AscMemWordCopyPtrFromLram(iop_base,
+ ASCV_MSGOUT_BEG,
+ (uchar *)&out_msg,
+ sizeof(EXT_MSG) >> 1);
+
+ if ((out_msg.msg_type == EXTENDED_MESSAGE) &&
+ (out_msg.msg_len == MS_SDTR_LEN) &&
+ (out_msg.msg_req == EXTENDED_SDTR)) {
+
+ asc_dvc->init_sdtr &= ~target_id;
+ asc_dvc->sdtr_done &= ~target_id;
+ AscSetChipSDTR(iop_base, asyn_sdtr, tid_no);
+ boardp->sdtr_data[tid_no] = asyn_sdtr;
+ }
+ q_cntl &= ~QC_MSG_OUT;
+ AscWriteLramByte(iop_base,
+ (ushort)(halt_q_addr +
+ (ushort)ASC_SCSIQ_B_CNTL), q_cntl);
+ AscWriteLramWord(iop_base, ASCV_HALTCODE_W, 0);
+ return (0);
+ } else if (int_halt_code == ASC_HALT_SS_QUEUE_FULL) {
+
+ scsi_status = AscReadLramByte(iop_base,
+ (ushort)((ushort)halt_q_addr +
+ (ushort)
+ ASC_SCSIQ_SCSI_STATUS));
+ cur_dvc_qng =
+ AscReadLramByte(iop_base,
+ (ushort)((ushort)ASC_QADR_BEG +
+ (ushort)target_ix));
+ if ((cur_dvc_qng > 0) && (asc_dvc->cur_dvc_qng[tid_no] > 0)) {
+
+ scsi_busy = AscReadLramByte(iop_base,
+ (ushort)ASCV_SCSIBUSY_B);
+ scsi_busy |= target_id;
+ AscWriteLramByte(iop_base,
+ (ushort)ASCV_SCSIBUSY_B, scsi_busy);
+ asc_dvc->queue_full_or_busy |= target_id;
+
+ if (scsi_status == SAM_STAT_TASK_SET_FULL) {
+ if (cur_dvc_qng > ASC_MIN_TAGGED_CMD) {
+ cur_dvc_qng -= 1;
+ asc_dvc->max_dvc_qng[tid_no] =
+ cur_dvc_qng;
+
+ AscWriteLramByte(iop_base,
+ (ushort)((ushort)
+ ASCV_MAX_DVC_QNG_BEG
+ + (ushort)
+ tid_no),
+ cur_dvc_qng);
+
+ /*
+ * Set the device queue depth to the
+ * number of active requests when the
+ * QUEUE FULL condition was encountered.
+ */
+ boardp->queue_full |= target_id;
+ boardp->queue_full_cnt[tid_no] =
+ cur_dvc_qng;
+ }
+ }
+ }
+ AscWriteLramWord(iop_base, ASCV_HALTCODE_W, 0);
+ return (0);
+ }
+#if CC_VERY_LONG_SG_LIST
+ else if (int_halt_code == ASC_HALT_HOST_COPY_SG_LIST_TO_RISC) {
+ uchar q_no;
+ ushort q_addr;
+ uchar sg_wk_q_no;
+ uchar first_sg_wk_q_no;
+ ASC_SCSI_Q *scsiq; /* Ptr to driver request. */
+ ASC_SG_HEAD *sg_head; /* Ptr to driver SG request. */
+ ASC_SG_LIST_Q scsi_sg_q; /* Structure written to queue. */
+ ushort sg_list_dwords;
+ ushort sg_entry_cnt;
+ uchar next_qp;
+ int i;
+
+ q_no = AscReadLramByte(iop_base, (ushort)ASCV_REQ_SG_LIST_QP);
+ if (q_no == ASC_QLINK_END)
+ return 0;
+
+ q_addr = ASC_QNO_TO_QADDR(q_no);
+
+ /*
+ * Convert the request's SRB pointer to a host ASC_SCSI_REQ
+ * structure pointer using a macro provided by the driver.
+ * The ASC_SCSI_REQ pointer provides a pointer to the
+ * host ASC_SG_HEAD structure.
+ */
+ /* Read request's SRB pointer. */
+ scsiq = (ASC_SCSI_Q *)
+ ASC_SRB2SCSIQ(ASC_U32_TO_VADDR(AscReadLramDWord(iop_base,
+ (ushort)
+ (q_addr +
+ ASC_SCSIQ_D_SRBPTR))));
+
+ /*
+ * Get request's first and working SG queue.
+ */
+ sg_wk_q_no = AscReadLramByte(iop_base,
+ (ushort)(q_addr +
+ ASC_SCSIQ_B_SG_WK_QP));
+
+ first_sg_wk_q_no = AscReadLramByte(iop_base,
+ (ushort)(q_addr +
+ ASC_SCSIQ_B_FIRST_SG_WK_QP));
+
+ /*
+ * Reset request's working SG queue back to the
+ * first SG queue.
+ */
+ AscWriteLramByte(iop_base,
+ (ushort)(q_addr +
+ (ushort)ASC_SCSIQ_B_SG_WK_QP),
+ first_sg_wk_q_no);
+
+ sg_head = scsiq->sg_head;
+
+ /*
+ * Set sg_entry_cnt to the number of SG elements
+ * that will be completed on this interrupt.
+ *
+ * Note: The allocated SG queues contain ASC_MAX_SG_LIST - 1
+ * SG elements. The data_cnt and data_addr fields which
+ * add 1 to the SG element capacity are not used when
+ * restarting SG handling after a halt.
+ */
+ if (scsiq->remain_sg_entry_cnt > (ASC_MAX_SG_LIST - 1)) {
+ sg_entry_cnt = ASC_MAX_SG_LIST - 1;
+
+ /*
+ * Keep track of remaining number of SG elements that
+ * will need to be handled on the next interrupt.
+ */
+ scsiq->remain_sg_entry_cnt -= (ASC_MAX_SG_LIST - 1);
+ } else {
+ sg_entry_cnt = scsiq->remain_sg_entry_cnt;
+ scsiq->remain_sg_entry_cnt = 0;
+ }
+
+ /*
+ * Copy SG elements into the list of allocated SG queues.
+ *
+ * Last index completed is saved in scsiq->next_sg_index.
+ */
+ next_qp = first_sg_wk_q_no;
+ q_addr = ASC_QNO_TO_QADDR(next_qp);
+ scsi_sg_q.sg_head_qp = q_no;
+ scsi_sg_q.cntl = QCSG_SG_XFER_LIST;
+ for (i = 0; i < sg_head->queue_cnt; i++) {
+ scsi_sg_q.seq_no = i + 1;
+ if (sg_entry_cnt > ASC_SG_LIST_PER_Q) {
+ sg_list_dwords = (uchar)(ASC_SG_LIST_PER_Q * 2);
+ sg_entry_cnt -= ASC_SG_LIST_PER_Q;
+ /*
+ * After very first SG queue RISC FW uses next
+ * SG queue first element then checks sg_list_cnt
+ * against zero and then decrements, so set
+ * sg_list_cnt 1 less than number of SG elements
+ * in each SG queue.
+ */
+ scsi_sg_q.sg_list_cnt = ASC_SG_LIST_PER_Q - 1;
+ scsi_sg_q.sg_cur_list_cnt =
+ ASC_SG_LIST_PER_Q - 1;
+ } else {
+ /*
+ * This is the last SG queue in the list of
+ * allocated SG queues. If there are more
+ * SG elements than will fit in the allocated
+ * queues, then set the QCSG_SG_XFER_MORE flag.
+ */
+ if (scsiq->remain_sg_entry_cnt != 0) {
+ scsi_sg_q.cntl |= QCSG_SG_XFER_MORE;
+ } else {
+ scsi_sg_q.cntl |= QCSG_SG_XFER_END;
+ }
+ /* equals sg_entry_cnt * 2 */
+ sg_list_dwords = sg_entry_cnt << 1;
+ scsi_sg_q.sg_list_cnt = sg_entry_cnt - 1;
+ scsi_sg_q.sg_cur_list_cnt = sg_entry_cnt - 1;
+ sg_entry_cnt = 0;
+ }
+
+ scsi_sg_q.q_no = next_qp;
+ AscMemWordCopyPtrToLram(iop_base,
+ q_addr + ASC_SCSIQ_SGHD_CPY_BEG,
+ (uchar *)&scsi_sg_q,
+ sizeof(ASC_SG_LIST_Q) >> 1);
+
+ AscMemDWordCopyPtrToLram(iop_base,
+ q_addr + ASC_SGQ_LIST_BEG,
+ (uchar *)&sg_head->
+ sg_list[scsiq->next_sg_index],
+ sg_list_dwords);
+
+ scsiq->next_sg_index += ASC_SG_LIST_PER_Q;
+
+ /*
+ * If the just completed SG queue contained the
+ * last SG element, then no more SG queues need
+ * to be written.
+ */
+ if (scsi_sg_q.cntl & QCSG_SG_XFER_END) {
+ break;
+ }
+
+ next_qp = AscReadLramByte(iop_base,
+ (ushort)(q_addr +
+ ASC_SCSIQ_B_FWD));
+ q_addr = ASC_QNO_TO_QADDR(next_qp);
+ }
+
+ /*
+ * Clear the halt condition so the RISC will be restarted
+ * after the return.
+ */
+ AscWriteLramWord(iop_base, ASCV_HALTCODE_W, 0);
+ return (0);
+ }
+#endif /* CC_VERY_LONG_SG_LIST */
+ return (0);
+}
+
+/*
+ * void
+ * DvcGetQinfo(PortAddr iop_base, ushort s_addr, uchar *inbuf, int words)
+ *
+ * Calling/Exit State:
+ * none
+ *
+ * Description:
+ * Input an ASC_QDONE_INFO structure from the chip
+ */
+static void
+DvcGetQinfo(PortAddr iop_base, ushort s_addr, uchar *inbuf, int words)
+{
+ int i;
+ ushort word;
+
+ AscSetChipLramAddr(iop_base, s_addr);
+ for (i = 0; i < 2 * words; i += 2) {
+ if (i == 10) {
+ continue;
+ }
+ word = inpw(iop_base + IOP_RAM_DATA);
+ inbuf[i] = word & 0xff;
+ inbuf[i + 1] = (word >> 8) & 0xff;
+ }
+ ASC_DBG_PRT_HEX(2, "DvcGetQinfo", inbuf, 2 * words);
+}
+
+static uchar
+_AscCopyLramScsiDoneQ(PortAddr iop_base,
+ ushort q_addr,
+ ASC_QDONE_INFO *scsiq, ASC_DCNT max_dma_count)
+{
+ ushort _val;
+ uchar sg_queue_cnt;
+
+ DvcGetQinfo(iop_base,
+ q_addr + ASC_SCSIQ_DONE_INFO_BEG,
+ (uchar *)scsiq,
+ (sizeof(ASC_SCSIQ_2) + sizeof(ASC_SCSIQ_3)) / 2);
+
+ _val = AscReadLramWord(iop_base,
+ (ushort)(q_addr + (ushort)ASC_SCSIQ_B_STATUS));
+ scsiq->q_status = (uchar)_val;
+ scsiq->q_no = (uchar)(_val >> 8);
+ _val = AscReadLramWord(iop_base,
+ (ushort)(q_addr + (ushort)ASC_SCSIQ_B_CNTL));
+ scsiq->cntl = (uchar)_val;
+ sg_queue_cnt = (uchar)(_val >> 8);
+ _val = AscReadLramWord(iop_base,
+ (ushort)(q_addr +
+ (ushort)ASC_SCSIQ_B_SENSE_LEN));
+ scsiq->sense_len = (uchar)_val;
+ scsiq->extra_bytes = (uchar)(_val >> 8);
+
+ /*
+ * Read high word of remain bytes from alternate location.
+ */
+ scsiq->remain_bytes = (((ADV_DCNT)AscReadLramWord(iop_base,
+ (ushort)(q_addr +
+ (ushort)
+ ASC_SCSIQ_W_ALT_DC1)))
+ << 16);
+ /*
+ * Read low word of remain bytes from original location.
+ */
+ scsiq->remain_bytes += AscReadLramWord(iop_base,
+ (ushort)(q_addr + (ushort)
+ ASC_SCSIQ_DW_REMAIN_XFER_CNT));
+
+ scsiq->remain_bytes &= max_dma_count;
+ return sg_queue_cnt;
+}
+
+/*
+ * asc_isr_callback() - Second Level Interrupt Handler called by AscISR().
+ *
+ * Interrupt callback function for the Narrow SCSI Asc Library.
+ */
+static void asc_isr_callback(ASC_DVC_VAR *asc_dvc_varp, ASC_QDONE_INFO *qdonep)
+{
+ struct asc_board *boardp;
+ struct scsi_cmnd *scp;
+ struct Scsi_Host *shost;
+
+ ASC_DBG(1, "asc_dvc_varp 0x%p, qdonep 0x%p\n", asc_dvc_varp, qdonep);
+ ASC_DBG_PRT_ASC_QDONE_INFO(2, qdonep);
+
+ scp = advansys_srb_to_ptr(asc_dvc_varp, qdonep->d2.srb_ptr);
+ if (!scp)
+ return;
+
+ ASC_DBG_PRT_CDB(2, scp->cmnd, scp->cmd_len);
+
+ shost = scp->device->host;
+ ASC_STATS(shost, callback);
+ ASC_DBG(1, "shost 0x%p\n", shost);
+
+ boardp = shost_priv(shost);
+ BUG_ON(asc_dvc_varp != &boardp->dvc_var.asc_dvc_var);
+
+ dma_unmap_single(boardp->dev, scp->SCp.dma_handle,
+ SCSI_SENSE_BUFFERSIZE, DMA_FROM_DEVICE);
+ /*
+ * 'qdonep' contains the command's ending status.
+ */
+ switch (qdonep->d3.done_stat) {
+ case QD_NO_ERROR:
+ ASC_DBG(2, "QD_NO_ERROR\n");
+ scp->result = 0;
+
+ /*
+ * Check for an underrun condition.
+ *
+ * If there was no error and an underrun condition, then
+ * return the number of underrun bytes.
+ */
+ if (scsi_bufflen(scp) != 0 && qdonep->remain_bytes != 0 &&
+ qdonep->remain_bytes <= scsi_bufflen(scp)) {
+ ASC_DBG(1, "underrun condition %u bytes\n",
+ (unsigned)qdonep->remain_bytes);
+ scsi_set_resid(scp, qdonep->remain_bytes);
+ }
+ break;
+
+ case QD_WITH_ERROR:
+ ASC_DBG(2, "QD_WITH_ERROR\n");
+ switch (qdonep->d3.host_stat) {
+ case QHSTA_NO_ERROR:
+ if (qdonep->d3.scsi_stat == SAM_STAT_CHECK_CONDITION) {
+ ASC_DBG(2, "SAM_STAT_CHECK_CONDITION\n");
+ ASC_DBG_PRT_SENSE(2, scp->sense_buffer,
+ SCSI_SENSE_BUFFERSIZE);
+ /*
+ * Note: The 'status_byte()' macro used by
+ * target drivers defined in scsi.h shifts the
+ * status byte returned by host drivers right
+ * by 1 bit. This is why target drivers also
+ * use right shifted status byte definitions.
+ * For instance target drivers use
+ * CHECK_CONDITION, defined to 0x1, instead of
+ * the SCSI defined check condition value of
+ * 0x2. Host drivers are supposed to return
+ * the status byte as it is defined by SCSI.
+ */
+ scp->result = DRIVER_BYTE(DRIVER_SENSE) |
+ STATUS_BYTE(qdonep->d3.scsi_stat);
+ } else {
+ scp->result = STATUS_BYTE(qdonep->d3.scsi_stat);
+ }
+ break;
+
+ default:
+ /* QHSTA error occurred */
+ ASC_DBG(1, "host_stat 0x%x\n", qdonep->d3.host_stat);
+ scp->result = HOST_BYTE(DID_BAD_TARGET);
+ break;
+ }
+ break;
+
+ case QD_ABORTED_BY_HOST:
+ ASC_DBG(1, "QD_ABORTED_BY_HOST\n");
+ scp->result =
+ HOST_BYTE(DID_ABORT) | MSG_BYTE(qdonep->d3.
+ scsi_msg) |
+ STATUS_BYTE(qdonep->d3.scsi_stat);
+ break;
+
+ default:
+ ASC_DBG(1, "done_stat 0x%x\n", qdonep->d3.done_stat);
+ scp->result =
+ HOST_BYTE(DID_ERROR) | MSG_BYTE(qdonep->d3.
+ scsi_msg) |
+ STATUS_BYTE(qdonep->d3.scsi_stat);
+ break;
+ }
+
+ /*
+ * If the 'init_tidmask' bit isn't already set for the target and the
+ * current request finished normally, then set the bit for the target
+ * to indicate that a device is present.
+ */
+ if ((boardp->init_tidmask & ADV_TID_TO_TIDMASK(scp->device->id)) == 0 &&
+ qdonep->d3.done_stat == QD_NO_ERROR &&
+ qdonep->d3.host_stat == QHSTA_NO_ERROR) {
+ boardp->init_tidmask |= ADV_TID_TO_TIDMASK(scp->device->id);
+ }
+
+ asc_scsi_done(scp);
+}
+
+static int AscIsrQDone(ASC_DVC_VAR *asc_dvc)
+{
+ uchar next_qp;
+ uchar n_q_used;
+ uchar sg_list_qp;
+ uchar sg_queue_cnt;
+ uchar q_cnt;
+ uchar done_q_tail;
+ uchar tid_no;
+ ASC_SCSI_BIT_ID_TYPE scsi_busy;
+ ASC_SCSI_BIT_ID_TYPE target_id;
+ PortAddr iop_base;
+ ushort q_addr;
+ ushort sg_q_addr;
+ uchar cur_target_qng;
+ ASC_QDONE_INFO scsiq_buf;
+ ASC_QDONE_INFO *scsiq;
+ int false_overrun;
+
+ iop_base = asc_dvc->iop_base;
+ n_q_used = 1;
+ scsiq = (ASC_QDONE_INFO *)&scsiq_buf;
+ done_q_tail = (uchar)AscGetVarDoneQTail(iop_base);
+ q_addr = ASC_QNO_TO_QADDR(done_q_tail);
+ next_qp = AscReadLramByte(iop_base,
+ (ushort)(q_addr + (ushort)ASC_SCSIQ_B_FWD));
+ if (next_qp != ASC_QLINK_END) {
+ AscPutVarDoneQTail(iop_base, next_qp);
+ q_addr = ASC_QNO_TO_QADDR(next_qp);
+ sg_queue_cnt = _AscCopyLramScsiDoneQ(iop_base, q_addr, scsiq,
+ asc_dvc->max_dma_count);
+ AscWriteLramByte(iop_base,
+ (ushort)(q_addr +
+ (ushort)ASC_SCSIQ_B_STATUS),
+ (uchar)(scsiq->
+ q_status & (uchar)~(QS_READY |
+ QS_ABORTED)));
+ tid_no = ASC_TIX_TO_TID(scsiq->d2.target_ix);
+ target_id = ASC_TIX_TO_TARGET_ID(scsiq->d2.target_ix);
+ if ((scsiq->cntl & QC_SG_HEAD) != 0) {
+ sg_q_addr = q_addr;
+ sg_list_qp = next_qp;
+ for (q_cnt = 0; q_cnt < sg_queue_cnt; q_cnt++) {
+ sg_list_qp = AscReadLramByte(iop_base,
+ (ushort)(sg_q_addr
+ + (ushort)
+ ASC_SCSIQ_B_FWD));
+ sg_q_addr = ASC_QNO_TO_QADDR(sg_list_qp);
+ if (sg_list_qp == ASC_QLINK_END) {
+ AscSetLibErrorCode(asc_dvc,
+ ASCQ_ERR_SG_Q_LINKS);
+ scsiq->d3.done_stat = QD_WITH_ERROR;
+ scsiq->d3.host_stat =
+ QHSTA_D_QDONE_SG_LIST_CORRUPTED;
+ goto FATAL_ERR_QDONE;
+ }
+ AscWriteLramByte(iop_base,
+ (ushort)(sg_q_addr + (ushort)
+ ASC_SCSIQ_B_STATUS),
+ QS_FREE);
+ }
+ n_q_used = sg_queue_cnt + 1;
+ AscPutVarDoneQTail(iop_base, sg_list_qp);
+ }
+ if (asc_dvc->queue_full_or_busy & target_id) {
+ cur_target_qng = AscReadLramByte(iop_base,
+ (ushort)((ushort)
+ ASC_QADR_BEG
+ + (ushort)
+ scsiq->d2.
+ target_ix));
+ if (cur_target_qng < asc_dvc->max_dvc_qng[tid_no]) {
+ scsi_busy = AscReadLramByte(iop_base, (ushort)
+ ASCV_SCSIBUSY_B);
+ scsi_busy &= ~target_id;
+ AscWriteLramByte(iop_base,
+ (ushort)ASCV_SCSIBUSY_B,
+ scsi_busy);
+ asc_dvc->queue_full_or_busy &= ~target_id;
+ }
+ }
+ if (asc_dvc->cur_total_qng >= n_q_used) {
+ asc_dvc->cur_total_qng -= n_q_used;
+ if (asc_dvc->cur_dvc_qng[tid_no] != 0) {
+ asc_dvc->cur_dvc_qng[tid_no]--;
+ }
+ } else {
+ AscSetLibErrorCode(asc_dvc, ASCQ_ERR_CUR_QNG);
+ scsiq->d3.done_stat = QD_WITH_ERROR;
+ goto FATAL_ERR_QDONE;
+ }
+ if ((scsiq->d2.srb_ptr == 0UL) ||
+ ((scsiq->q_status & QS_ABORTED) != 0)) {
+ return (0x11);
+ } else if (scsiq->q_status == QS_DONE) {
+ false_overrun = FALSE;
+ if (scsiq->extra_bytes != 0) {
+ scsiq->remain_bytes +=
+ (ADV_DCNT)scsiq->extra_bytes;
+ }
+ if (scsiq->d3.done_stat == QD_WITH_ERROR) {
+ if (scsiq->d3.host_stat ==
+ QHSTA_M_DATA_OVER_RUN) {
+ if ((scsiq->
+ cntl & (QC_DATA_IN | QC_DATA_OUT))
+ == 0) {
+ scsiq->d3.done_stat =
+ QD_NO_ERROR;
+ scsiq->d3.host_stat =
+ QHSTA_NO_ERROR;
+ } else if (false_overrun) {
+ scsiq->d3.done_stat =
+ QD_NO_ERROR;
+ scsiq->d3.host_stat =
+ QHSTA_NO_ERROR;
+ }
+ } else if (scsiq->d3.host_stat ==
+ QHSTA_M_HUNG_REQ_SCSI_BUS_RESET) {
+ AscStopChip(iop_base);
+ AscSetChipControl(iop_base,
+ (uchar)(CC_SCSI_RESET
+ | CC_HALT));
+ udelay(60);
+ AscSetChipControl(iop_base, CC_HALT);
+ AscSetChipStatus(iop_base,
+ CIW_CLR_SCSI_RESET_INT);
+ AscSetChipStatus(iop_base, 0);
+ AscSetChipControl(iop_base, 0);
+ }
+ }
+ if ((scsiq->cntl & QC_NO_CALLBACK) == 0) {
+ asc_isr_callback(asc_dvc, scsiq);
+ } else {
+ if ((AscReadLramByte(iop_base,
+ (ushort)(q_addr + (ushort)
+ ASC_SCSIQ_CDB_BEG))
+ == START_STOP)) {
+ asc_dvc->unit_not_ready &= ~target_id;
+ if (scsiq->d3.done_stat != QD_NO_ERROR) {
+ asc_dvc->start_motor &=
+ ~target_id;
+ }
+ }
+ }
+ return (1);
+ } else {
+ AscSetLibErrorCode(asc_dvc, ASCQ_ERR_Q_STATUS);
+ FATAL_ERR_QDONE:
+ if ((scsiq->cntl & QC_NO_CALLBACK) == 0) {
+ asc_isr_callback(asc_dvc, scsiq);
+ }
+ return (0x80);
+ }
+ }
+ return (0);
+}
+
+static int AscISR(ASC_DVC_VAR *asc_dvc)
+{
+ ASC_CS_TYPE chipstat;
+ PortAddr iop_base;
+ ushort saved_ram_addr;
+ uchar ctrl_reg;
+ uchar saved_ctrl_reg;
+ int int_pending;
+ int status;
+ uchar host_flag;
+
+ iop_base = asc_dvc->iop_base;
+ int_pending = FALSE;
+
+ if (AscIsIntPending(iop_base) == 0)
+ return int_pending;
+
+ if ((asc_dvc->init_state & ASC_INIT_STATE_END_LOAD_MC) == 0) {
+ return ERR;
+ }
+ if (asc_dvc->in_critical_cnt != 0) {
+ AscSetLibErrorCode(asc_dvc, ASCQ_ERR_ISR_ON_CRITICAL);
+ return ERR;
+ }
+ if (asc_dvc->is_in_int) {
+ AscSetLibErrorCode(asc_dvc, ASCQ_ERR_ISR_RE_ENTRY);
+ return ERR;
+ }
+ asc_dvc->is_in_int = TRUE;
+ ctrl_reg = AscGetChipControl(iop_base);
+ saved_ctrl_reg = ctrl_reg & (~(CC_SCSI_RESET | CC_CHIP_RESET |
+ CC_SINGLE_STEP | CC_DIAG | CC_TEST));
+ chipstat = AscGetChipStatus(iop_base);
+ if (chipstat & CSW_SCSI_RESET_LATCH) {
+ if (!(asc_dvc->bus_type & (ASC_IS_VL | ASC_IS_EISA))) {
+ int i = 10;
+ int_pending = TRUE;
+ asc_dvc->sdtr_done = 0;
+ saved_ctrl_reg &= (uchar)(~CC_HALT);
+ while ((AscGetChipStatus(iop_base) &
+ CSW_SCSI_RESET_ACTIVE) && (i-- > 0)) {
+ mdelay(100);
+ }
+ AscSetChipControl(iop_base, (CC_CHIP_RESET | CC_HALT));
+ AscSetChipControl(iop_base, CC_HALT);
+ AscSetChipStatus(iop_base, CIW_CLR_SCSI_RESET_INT);
+ AscSetChipStatus(iop_base, 0);
+ chipstat = AscGetChipStatus(iop_base);
+ }
+ }
+ saved_ram_addr = AscGetChipLramAddr(iop_base);
+ host_flag = AscReadLramByte(iop_base,
+ ASCV_HOST_FLAG_B) &
+ (uchar)(~ASC_HOST_FLAG_IN_ISR);
+ AscWriteLramByte(iop_base, ASCV_HOST_FLAG_B,
+ (uchar)(host_flag | (uchar)ASC_HOST_FLAG_IN_ISR));
+ if ((chipstat & CSW_INT_PENDING) || (int_pending)) {
+ AscAckInterrupt(iop_base);
+ int_pending = TRUE;
+ if ((chipstat & CSW_HALTED) && (ctrl_reg & CC_SINGLE_STEP)) {
+ if (AscIsrChipHalted(asc_dvc) == ERR) {
+ goto ISR_REPORT_QDONE_FATAL_ERROR;
+ } else {
+ saved_ctrl_reg &= (uchar)(~CC_HALT);
+ }
+ } else {
+ ISR_REPORT_QDONE_FATAL_ERROR:
+ if ((asc_dvc->dvc_cntl & ASC_CNTL_INT_MULTI_Q) != 0) {
+ while (((status =
+ AscIsrQDone(asc_dvc)) & 0x01) != 0) {
+ }
+ } else {
+ do {
+ if ((status =
+ AscIsrQDone(asc_dvc)) == 1) {
+ break;
+ }
+ } while (status == 0x11);
+ }
+ if ((status & 0x80) != 0)
+ int_pending = ERR;
+ }
+ }
+ AscWriteLramByte(iop_base, ASCV_HOST_FLAG_B, host_flag);
+ AscSetChipLramAddr(iop_base, saved_ram_addr);
+ AscSetChipControl(iop_base, saved_ctrl_reg);
+ asc_dvc->is_in_int = FALSE;
+ return int_pending;
+}
+
+/*
+ * advansys_reset()
+ *
+ * Reset the bus associated with the command 'scp'.
+ *
+ * This function runs its own thread. Interrupts must be blocked but
+ * sleeping is allowed and no locking other than for host structures is
+ * required. Returns SUCCESS or FAILED.
+ */
+static int advansys_reset(struct scsi_cmnd *scp)
+{
+ struct Scsi_Host *shost = scp->device->host;
+ struct asc_board *boardp = shost_priv(shost);
+ unsigned long flags;
+ int status;
+ int ret = SUCCESS;
+
+ ASC_DBG(1, "0x%p\n", scp);
+
+ ASC_STATS(shost, reset);
+
+ scmd_printk(KERN_INFO, scp, "SCSI bus reset started...\n");
+
+ if (ASC_NARROW_BOARD(boardp)) {
+ ASC_DVC_VAR *asc_dvc = &boardp->dvc_var.asc_dvc_var;
+
+ /* Reset the chip and SCSI bus. */
+ ASC_DBG(1, "before AscInitAsc1000Driver()\n");
+ status = AscInitAsc1000Driver(asc_dvc);
+
+ /* Refer to ASC_IERR_* definitions for meaning of 'err_code'. */
+ if (asc_dvc->err_code || !asc_dvc->overrun_dma) {
+ scmd_printk(KERN_INFO, scp, "SCSI bus reset error: "
+ "0x%x, status: 0x%x\n", asc_dvc->err_code,
+ status);
+ ret = FAILED;
+ } else if (status) {
+ scmd_printk(KERN_INFO, scp, "SCSI bus reset warning: "
+ "0x%x\n", status);
+ } else {
+ scmd_printk(KERN_INFO, scp, "SCSI bus reset "
+ "successful\n");
+ }
+
+ ASC_DBG(1, "after AscInitAsc1000Driver()\n");
+ spin_lock_irqsave(shost->host_lock, flags);
+ } else {
+ /*
+ * If the suggest reset bus flags are set, then reset the bus.
+ * Otherwise only reset the device.
+ */
+ ADV_DVC_VAR *adv_dvc = &boardp->dvc_var.adv_dvc_var;
+
+ /*
+ * Reset the target's SCSI bus.
+ */
+ ASC_DBG(1, "before AdvResetChipAndSB()\n");
+ switch (AdvResetChipAndSB(adv_dvc)) {
+ case ASC_TRUE:
+ scmd_printk(KERN_INFO, scp, "SCSI bus reset "
+ "successful\n");
+ break;
+ case ASC_FALSE:
+ default:
+ scmd_printk(KERN_INFO, scp, "SCSI bus reset error\n");
+ ret = FAILED;
+ break;
+ }
+ spin_lock_irqsave(shost->host_lock, flags);
+ AdvISR(adv_dvc);
+ }
+
+ /* Save the time of the most recently completed reset. */
+ boardp->last_reset = jiffies;
+ spin_unlock_irqrestore(shost->host_lock, flags);
+
+ ASC_DBG(1, "ret %d\n", ret);
+
+ return ret;
+}
+
+/*
+ * advansys_biosparam()
+ *
+ * Translate disk drive geometry if the "BIOS greater than 1 GB"
+ * support is enabled for a drive.
+ *
+ * ip (information pointer) is an int array with the following definition:
+ * ip[0]: heads
+ * ip[1]: sectors
+ * ip[2]: cylinders
+ */
+static int
+advansys_biosparam(struct scsi_device *sdev, struct block_device *bdev,
+ sector_t capacity, int ip[])
+{
+ struct asc_board *boardp = shost_priv(sdev->host);
+
+ ASC_DBG(1, "begin\n");
+ ASC_STATS(sdev->host, biosparam);
+ if (ASC_NARROW_BOARD(boardp)) {
+ if ((boardp->dvc_var.asc_dvc_var.dvc_cntl &
+ ASC_CNTL_BIOS_GT_1GB) && capacity > 0x200000) {
+ ip[0] = 255;
+ ip[1] = 63;
+ } else {
+ ip[0] = 64;
+ ip[1] = 32;
+ }
+ } else {
+ if ((boardp->dvc_var.adv_dvc_var.bios_ctrl &
+ BIOS_CTRL_EXTENDED_XLAT) && capacity > 0x200000) {
+ ip[0] = 255;
+ ip[1] = 63;
+ } else {
+ ip[0] = 64;
+ ip[1] = 32;
+ }
+ }
+ ip[2] = (unsigned long)capacity / (ip[0] * ip[1]);
+ ASC_DBG(1, "end\n");
+ return 0;
+}
+
+/*
+ * First-level interrupt handler.
+ *
+ * 'dev_id' is a pointer to the interrupting adapter's Scsi_Host.
+ */
+static irqreturn_t advansys_interrupt(int irq, void *dev_id)
+{
+ struct Scsi_Host *shost = dev_id;
+ struct asc_board *boardp = shost_priv(shost);
+ irqreturn_t result = IRQ_NONE;
+
+ ASC_DBG(2, "boardp 0x%p\n", boardp);
+ spin_lock(shost->host_lock);
+ if (ASC_NARROW_BOARD(boardp)) {
+ if (AscIsIntPending(shost->io_port)) {
+ result = IRQ_HANDLED;
+ ASC_STATS(shost, interrupt);
+ ASC_DBG(1, "before AscISR()\n");
+ AscISR(&boardp->dvc_var.asc_dvc_var);
+ }
+ } else {
+ ASC_DBG(1, "before AdvISR()\n");
+ if (AdvISR(&boardp->dvc_var.adv_dvc_var)) {
+ result = IRQ_HANDLED;
+ ASC_STATS(shost, interrupt);
+ }
+ }
+ spin_unlock(shost->host_lock);
+
+ ASC_DBG(1, "end\n");
+ return result;
+}
+
+static int AscHostReqRiscHalt(PortAddr iop_base)
+{
+ int count = 0;
+ int sta = 0;
+ uchar saved_stop_code;
+
+ if (AscIsChipHalted(iop_base))
+ return (1);
+ saved_stop_code = AscReadLramByte(iop_base, ASCV_STOP_CODE_B);
+ AscWriteLramByte(iop_base, ASCV_STOP_CODE_B,
+ ASC_STOP_HOST_REQ_RISC_HALT | ASC_STOP_REQ_RISC_STOP);
+ do {
+ if (AscIsChipHalted(iop_base)) {
+ sta = 1;
+ break;
+ }
+ mdelay(100);
+ } while (count++ < 20);
+ AscWriteLramByte(iop_base, ASCV_STOP_CODE_B, saved_stop_code);
+ return (sta);
+}
+
+static int
+AscSetRunChipSynRegAtID(PortAddr iop_base, uchar tid_no, uchar sdtr_data)
+{
+ int sta = FALSE;
+
+ if (AscHostReqRiscHalt(iop_base)) {
+ sta = AscSetChipSynRegAtID(iop_base, tid_no, sdtr_data);
+ AscStartChip(iop_base);
+ }
+ return sta;
+}
+
+static void AscAsyncFix(ASC_DVC_VAR *asc_dvc, struct scsi_device *sdev)
+{
+ char type = sdev->type;
+ ASC_SCSI_BIT_ID_TYPE tid_bits = 1 << sdev->id;
+
+ if (!(asc_dvc->bug_fix_cntl & ASC_BUG_FIX_ASYN_USE_SYN))
+ return;
+ if (asc_dvc->init_sdtr & tid_bits)
+ return;
+
+ if ((type == TYPE_ROM) && (strncmp(sdev->vendor, "HP ", 3) == 0))
+ asc_dvc->pci_fix_asyn_xfer_always |= tid_bits;
+
+ asc_dvc->pci_fix_asyn_xfer |= tid_bits;
+ if ((type == TYPE_PROCESSOR) || (type == TYPE_SCANNER) ||
+ (type == TYPE_ROM) || (type == TYPE_TAPE))
+ asc_dvc->pci_fix_asyn_xfer &= ~tid_bits;
+
+ if (asc_dvc->pci_fix_asyn_xfer & tid_bits)
+ AscSetRunChipSynRegAtID(asc_dvc->iop_base, sdev->id,
+ ASYN_SDTR_DATA_FIX_PCI_REV_AB);
+}
+
+static void
+advansys_narrow_slave_configure(struct scsi_device *sdev, ASC_DVC_VAR *asc_dvc)
+{
+ ASC_SCSI_BIT_ID_TYPE tid_bit = 1 << sdev->id;
+ ASC_SCSI_BIT_ID_TYPE orig_use_tagged_qng = asc_dvc->use_tagged_qng;
+
+ if (sdev->lun == 0) {
+ ASC_SCSI_BIT_ID_TYPE orig_init_sdtr = asc_dvc->init_sdtr;
+ if ((asc_dvc->cfg->sdtr_enable & tid_bit) && sdev->sdtr) {
+ asc_dvc->init_sdtr |= tid_bit;
+ } else {
+ asc_dvc->init_sdtr &= ~tid_bit;
+ }
+
+ if (orig_init_sdtr != asc_dvc->init_sdtr)
+ AscAsyncFix(asc_dvc, sdev);
+ }
+
+ if (sdev->tagged_supported) {
+ if (asc_dvc->cfg->cmd_qng_enabled & tid_bit) {
+ if (sdev->lun == 0) {
+ asc_dvc->cfg->can_tagged_qng |= tid_bit;
+ asc_dvc->use_tagged_qng |= tid_bit;
+ }
+ scsi_change_queue_depth(sdev,
+ asc_dvc->max_dvc_qng[sdev->id]);
+ }
+ } else {
+ if (sdev->lun == 0) {
+ asc_dvc->cfg->can_tagged_qng &= ~tid_bit;
+ asc_dvc->use_tagged_qng &= ~tid_bit;
+ }
+ }
+
+ if ((sdev->lun == 0) &&
+ (orig_use_tagged_qng != asc_dvc->use_tagged_qng)) {
+ AscWriteLramByte(asc_dvc->iop_base, ASCV_DISC_ENABLE_B,
+ asc_dvc->cfg->disc_enable);
+ AscWriteLramByte(asc_dvc->iop_base, ASCV_USE_TAGGED_QNG_B,
+ asc_dvc->use_tagged_qng);
+ AscWriteLramByte(asc_dvc->iop_base, ASCV_CAN_TAGGED_QNG_B,
+ asc_dvc->cfg->can_tagged_qng);
+
+ asc_dvc->max_dvc_qng[sdev->id] =
+ asc_dvc->cfg->max_tag_qng[sdev->id];
+ AscWriteLramByte(asc_dvc->iop_base,
+ (ushort)(ASCV_MAX_DVC_QNG_BEG + sdev->id),
+ asc_dvc->max_dvc_qng[sdev->id]);
+ }
+}
+
+/*
+ * Wide Transfers
+ *
+ * If the EEPROM enabled WDTR for the device and the device supports wide
+ * bus (16 bit) transfers, then turn on the device's 'wdtr_able' bit and
+ * write the new value to the microcode.
+ */
+static void
+advansys_wide_enable_wdtr(AdvPortAddr iop_base, unsigned short tidmask)
+{
+ unsigned short cfg_word;
+ AdvReadWordLram(iop_base, ASC_MC_WDTR_ABLE, cfg_word);
+ if ((cfg_word & tidmask) != 0)
+ return;
+
+ cfg_word |= tidmask;
+ AdvWriteWordLram(iop_base, ASC_MC_WDTR_ABLE, cfg_word);
+
+ /*
+ * Clear the microcode SDTR and WDTR negotiation done indicators for
+ * the target to cause it to negotiate with the new setting set above.
+ * WDTR when accepted causes the target to enter asynchronous mode, so
+ * SDTR must be negotiated.
+ */
+ AdvReadWordLram(iop_base, ASC_MC_SDTR_DONE, cfg_word);
+ cfg_word &= ~tidmask;
+ AdvWriteWordLram(iop_base, ASC_MC_SDTR_DONE, cfg_word);
+ AdvReadWordLram(iop_base, ASC_MC_WDTR_DONE, cfg_word);
+ cfg_word &= ~tidmask;
+ AdvWriteWordLram(iop_base, ASC_MC_WDTR_DONE, cfg_word);
+}
+
+/*
+ * Synchronous Transfers
+ *
+ * If the EEPROM enabled SDTR for the device and the device
+ * supports synchronous transfers, then turn on the device's
+ * 'sdtr_able' bit. Write the new value to the microcode.
+ */
+static void
+advansys_wide_enable_sdtr(AdvPortAddr iop_base, unsigned short tidmask)
+{
+ unsigned short cfg_word;
+ AdvReadWordLram(iop_base, ASC_MC_SDTR_ABLE, cfg_word);
+ if ((cfg_word & tidmask) != 0)
+ return;
+
+ cfg_word |= tidmask;
+ AdvWriteWordLram(iop_base, ASC_MC_SDTR_ABLE, cfg_word);
+
+ /*
+ * Clear the microcode "SDTR negotiation" done indicator for the
+ * target to cause it to negotiate with the new setting set above.
+ */
+ AdvReadWordLram(iop_base, ASC_MC_SDTR_DONE, cfg_word);
+ cfg_word &= ~tidmask;
+ AdvWriteWordLram(iop_base, ASC_MC_SDTR_DONE, cfg_word);
+}
+
+/*
+ * PPR (Parallel Protocol Request) Capable
+ *
+ * If the device supports DT mode, then it must be PPR capable.
+ * The PPR message will be used in place of the SDTR and WDTR
+ * messages to negotiate synchronous speed and offset, transfer
+ * width, and protocol options.
+ */
+static void advansys_wide_enable_ppr(ADV_DVC_VAR *adv_dvc,
+ AdvPortAddr iop_base, unsigned short tidmask)
+{
+ AdvReadWordLram(iop_base, ASC_MC_PPR_ABLE, adv_dvc->ppr_able);
+ adv_dvc->ppr_able |= tidmask;
+ AdvWriteWordLram(iop_base, ASC_MC_PPR_ABLE, adv_dvc->ppr_able);
+}
+
+static void
+advansys_wide_slave_configure(struct scsi_device *sdev, ADV_DVC_VAR *adv_dvc)
+{
+ AdvPortAddr iop_base = adv_dvc->iop_base;
+ unsigned short tidmask = 1 << sdev->id;
+
+ if (sdev->lun == 0) {
+ /*
+ * Handle WDTR, SDTR, and Tag Queuing. If the feature
+ * is enabled in the EEPROM and the device supports the
+ * feature, then enable it in the microcode.
+ */
+
+ if ((adv_dvc->wdtr_able & tidmask) && sdev->wdtr)
+ advansys_wide_enable_wdtr(iop_base, tidmask);
+ if ((adv_dvc->sdtr_able & tidmask) && sdev->sdtr)
+ advansys_wide_enable_sdtr(iop_base, tidmask);
+ if (adv_dvc->chip_type == ADV_CHIP_ASC38C1600 && sdev->ppr)
+ advansys_wide_enable_ppr(adv_dvc, iop_base, tidmask);
+
+ /*
+ * Tag Queuing is disabled for the BIOS which runs in polled
+ * mode and would see no benefit from Tag Queuing. Also by
+ * disabling Tag Queuing in the BIOS devices with Tag Queuing
+ * bugs will at least work with the BIOS.
+ */
+ if ((adv_dvc->tagqng_able & tidmask) &&
+ sdev->tagged_supported) {
+ unsigned short cfg_word;
+ AdvReadWordLram(iop_base, ASC_MC_TAGQNG_ABLE, cfg_word);
+ cfg_word |= tidmask;
+ AdvWriteWordLram(iop_base, ASC_MC_TAGQNG_ABLE,
+ cfg_word);
+ AdvWriteByteLram(iop_base,
+ ASC_MC_NUMBER_OF_MAX_CMD + sdev->id,
+ adv_dvc->max_dvc_qng);
+ }
+ }
+
+ if ((adv_dvc->tagqng_able & tidmask) && sdev->tagged_supported)
+ scsi_change_queue_depth(sdev, adv_dvc->max_dvc_qng);
+}
+
+/*
+ * Set the number of commands to queue per device for the
+ * specified host adapter.
+ */
+static int advansys_slave_configure(struct scsi_device *sdev)
+{
+ struct asc_board *boardp = shost_priv(sdev->host);
+
+ if (ASC_NARROW_BOARD(boardp))
+ advansys_narrow_slave_configure(sdev,
+ &boardp->dvc_var.asc_dvc_var);
+ else
+ advansys_wide_slave_configure(sdev,
+ &boardp->dvc_var.adv_dvc_var);
+
+ return 0;
+}
+
+static __le32 advansys_get_sense_buffer_dma(struct scsi_cmnd *scp)
+{
+ struct asc_board *board = shost_priv(scp->device->host);
+ scp->SCp.dma_handle = dma_map_single(board->dev, scp->sense_buffer,
+ SCSI_SENSE_BUFFERSIZE, DMA_FROM_DEVICE);
+ dma_cache_sync(board->dev, scp->sense_buffer,
+ SCSI_SENSE_BUFFERSIZE, DMA_FROM_DEVICE);
+ return cpu_to_le32(scp->SCp.dma_handle);
+}
+
+static int asc_build_req(struct asc_board *boardp, struct scsi_cmnd *scp,
+ struct asc_scsi_q *asc_scsi_q)
+{
+ struct asc_dvc_var *asc_dvc = &boardp->dvc_var.asc_dvc_var;
+ int use_sg;
+
+ memset(asc_scsi_q, 0, sizeof(*asc_scsi_q));
+
+ /*
+ * Point the ASC_SCSI_Q to the 'struct scsi_cmnd'.
+ */
+ asc_scsi_q->q2.srb_ptr = advansys_ptr_to_srb(asc_dvc, scp);
+ if (asc_scsi_q->q2.srb_ptr == BAD_SRB) {
+ scp->result = HOST_BYTE(DID_SOFT_ERROR);
+ return ASC_ERROR;
+ }
+
+ /*
+ * Build the ASC_SCSI_Q request.
+ */
+ asc_scsi_q->cdbptr = &scp->cmnd[0];
+ asc_scsi_q->q2.cdb_len = scp->cmd_len;
+ asc_scsi_q->q1.target_id = ASC_TID_TO_TARGET_ID(scp->device->id);
+ asc_scsi_q->q1.target_lun = scp->device->lun;
+ asc_scsi_q->q2.target_ix =
+ ASC_TIDLUN_TO_IX(scp->device->id, scp->device->lun);
+ asc_scsi_q->q1.sense_addr = advansys_get_sense_buffer_dma(scp);
+ asc_scsi_q->q1.sense_len = SCSI_SENSE_BUFFERSIZE;
+
+ /*
+ * If there are any outstanding requests for the current target,
+ * then every 255th request send an ORDERED request. This heuristic
+ * tries to retain the benefit of request sorting while preventing
+ * request starvation. 255 is the max number of tags or pending commands
+ * a device may have outstanding.
+ *
+ * The request count is incremented below for every successfully
+ * started request.
+ *
+ */
+ if ((asc_dvc->cur_dvc_qng[scp->device->id] > 0) &&
+ (boardp->reqcnt[scp->device->id] % 255) == 0) {
+ asc_scsi_q->q2.tag_code = ORDERED_QUEUE_TAG;
+ } else {
+ asc_scsi_q->q2.tag_code = SIMPLE_QUEUE_TAG;
+ }
+
+ /* Build ASC_SCSI_Q */
+ use_sg = scsi_dma_map(scp);
+ if (use_sg != 0) {
+ int sgcnt;
+ struct scatterlist *slp;
+ struct asc_sg_head *asc_sg_head;
+
+ if (use_sg > scp->device->host->sg_tablesize) {
+ scmd_printk(KERN_ERR, scp, "use_sg %d > "
+ "sg_tablesize %d\n", use_sg,
+ scp->device->host->sg_tablesize);
+ scsi_dma_unmap(scp);
+ scp->result = HOST_BYTE(DID_ERROR);
+ return ASC_ERROR;
+ }
+
+ asc_sg_head = kzalloc(sizeof(asc_scsi_q->sg_head) +
+ use_sg * sizeof(struct asc_sg_list), GFP_ATOMIC);
+ if (!asc_sg_head) {
+ scsi_dma_unmap(scp);
+ scp->result = HOST_BYTE(DID_SOFT_ERROR);
+ return ASC_ERROR;
+ }
+
+ asc_scsi_q->q1.cntl |= QC_SG_HEAD;
+ asc_scsi_q->sg_head = asc_sg_head;
+ asc_scsi_q->q1.data_cnt = 0;
+ asc_scsi_q->q1.data_addr = 0;
+ /* This is a byte value, otherwise it would need to be swapped. */
+ asc_sg_head->entry_cnt = asc_scsi_q->q1.sg_queue_cnt = use_sg;
+ ASC_STATS_ADD(scp->device->host, xfer_elem,
+ asc_sg_head->entry_cnt);
+
+ /*
+ * Convert scatter-gather list into ASC_SG_HEAD list.
+ */
+ scsi_for_each_sg(scp, slp, use_sg, sgcnt) {
+ asc_sg_head->sg_list[sgcnt].addr =
+ cpu_to_le32(sg_dma_address(slp));
+ asc_sg_head->sg_list[sgcnt].bytes =
+ cpu_to_le32(sg_dma_len(slp));
+ ASC_STATS_ADD(scp->device->host, xfer_sect,
+ DIV_ROUND_UP(sg_dma_len(slp), 512));
+ }
+ }
+
+ ASC_STATS(scp->device->host, xfer_cnt);
+
+ ASC_DBG_PRT_ASC_SCSI_Q(2, asc_scsi_q);
+ ASC_DBG_PRT_CDB(1, scp->cmnd, scp->cmd_len);
+
+ return ASC_NOERROR;
+}
+
+/*
+ * Build scatter-gather list for Adv Library (Wide Board).
+ *
+ * Additional ADV_SG_BLOCK structures will need to be allocated
+ * if the total number of scatter-gather elements exceeds
+ * NO_OF_SG_PER_BLOCK (15). The ADV_SG_BLOCK structures are
+ * assumed to be physically contiguous.
+ *
+ * Return:
+ * ADV_SUCCESS(1) - SG List successfully created
+ * ADV_ERROR(-1) - SG List creation failed
+ */
+static int
+adv_get_sglist(struct asc_board *boardp, adv_req_t *reqp, struct scsi_cmnd *scp,
+ int use_sg)
+{
+ adv_sgblk_t *sgblkp;
+ ADV_SCSI_REQ_Q *scsiqp;
+ struct scatterlist *slp;
+ int sg_elem_cnt;
+ ADV_SG_BLOCK *sg_block, *prev_sg_block;
+ ADV_PADDR sg_block_paddr;
+ int i;
+
+ scsiqp = (ADV_SCSI_REQ_Q *)ADV_32BALIGN(&reqp->scsi_req_q);
+ slp = scsi_sglist(scp);
+ sg_elem_cnt = use_sg;
+ prev_sg_block = NULL;
+ reqp->sgblkp = NULL;
+
+ for (;;) {
+ /*
+ * Allocate a 'adv_sgblk_t' structure from the board free
+ * list. One 'adv_sgblk_t' structure holds NO_OF_SG_PER_BLOCK
+ * (15) scatter-gather elements.
+ */
+ if ((sgblkp = boardp->adv_sgblkp) == NULL) {
+ ASC_DBG(1, "no free adv_sgblk_t\n");
+ ASC_STATS(scp->device->host, adv_build_nosg);
+
+ /*
+ * Allocation failed. Free 'adv_sgblk_t' structures
+ * already allocated for the request.
+ */
+ while ((sgblkp = reqp->sgblkp) != NULL) {
+ /* Remove 'sgblkp' from the request list. */
+ reqp->sgblkp = sgblkp->next_sgblkp;
+
+ /* Add 'sgblkp' to the board free list. */
+ sgblkp->next_sgblkp = boardp->adv_sgblkp;
+ boardp->adv_sgblkp = sgblkp;
+ }
+ return ASC_BUSY;
+ }
+
+ /* Complete 'adv_sgblk_t' board allocation. */
+ boardp->adv_sgblkp = sgblkp->next_sgblkp;
+ sgblkp->next_sgblkp = NULL;
+
+ /*
+ * Get 8 byte aligned virtual and physical addresses
+ * for the allocated ADV_SG_BLOCK structure.
+ */
+ sg_block = (ADV_SG_BLOCK *)ADV_8BALIGN(&sgblkp->sg_block);
+ sg_block_paddr = virt_to_bus(sg_block);
+
+ /*
+ * Check if this is the first 'adv_sgblk_t' for the
+ * request.
+ */
+ if (reqp->sgblkp == NULL) {
+ /* Request's first scatter-gather block. */
+ reqp->sgblkp = sgblkp;
+
+ /*
+ * Set ADV_SCSI_REQ_T ADV_SG_BLOCK virtual and physical
+ * address pointers.
+ */
+ scsiqp->sg_list_ptr = sg_block;
+ scsiqp->sg_real_addr = cpu_to_le32(sg_block_paddr);
+ } else {
+ /* Request's second or later scatter-gather block. */
+ sgblkp->next_sgblkp = reqp->sgblkp;
+ reqp->sgblkp = sgblkp;
+
+ /*
+ * Point the previous ADV_SG_BLOCK structure to
+ * the newly allocated ADV_SG_BLOCK structure.
+ */
+ prev_sg_block->sg_ptr = cpu_to_le32(sg_block_paddr);
+ }
+
+ for (i = 0; i < NO_OF_SG_PER_BLOCK; i++) {
+ sg_block->sg_list[i].sg_addr =
+ cpu_to_le32(sg_dma_address(slp));
+ sg_block->sg_list[i].sg_count =
+ cpu_to_le32(sg_dma_len(slp));
+ ASC_STATS_ADD(scp->device->host, xfer_sect,
+ DIV_ROUND_UP(sg_dma_len(slp), 512));
+
+ if (--sg_elem_cnt == 0) { /* Last ADV_SG_BLOCK and scatter-gather entry. */
+ sg_block->sg_cnt = i + 1;
+ sg_block->sg_ptr = 0L; /* Last ADV_SG_BLOCK in list. */
+ return ADV_SUCCESS;
+ }
+ slp++;
+ }
+ sg_block->sg_cnt = NO_OF_SG_PER_BLOCK;
+ prev_sg_block = sg_block;
+ }
+}
+
+/*
+ * Build a request structure for the Adv Library (Wide Board).
+ *
+ * If an adv_req_t can not be allocated to issue the request,
+ * then return ASC_BUSY. If an error occurs, then return ASC_ERROR.
+ *
+ * Multi-byte fields in the ASC_SCSI_REQ_Q that are used by the
+ * microcode for DMA addresses or math operations are byte swapped
+ * to little-endian order.
+ */
+static int
+adv_build_req(struct asc_board *boardp, struct scsi_cmnd *scp,
+ ADV_SCSI_REQ_Q **adv_scsiqpp)
+{
+ adv_req_t *reqp;
+ ADV_SCSI_REQ_Q *scsiqp;
+ int i;
+ int ret;
+ int use_sg;
+
+ /*
+ * Allocate an adv_req_t structure from the board to execute
+ * the command.
+ */
+ if (boardp->adv_reqp == NULL) {
+ ASC_DBG(1, "no free adv_req_t\n");
+ ASC_STATS(scp->device->host, adv_build_noreq);
+ return ASC_BUSY;
+ } else {
+ reqp = boardp->adv_reqp;
+ boardp->adv_reqp = reqp->next_reqp;
+ reqp->next_reqp = NULL;
+ }
+
+ /*
+ * Get 32-byte aligned ADV_SCSI_REQ_Q and ADV_SG_BLOCK pointers.
+ */
+ scsiqp = (ADV_SCSI_REQ_Q *)ADV_32BALIGN(&reqp->scsi_req_q);
+
+ /*
+ * Initialize the structure.
+ */
+ scsiqp->cntl = scsiqp->scsi_cntl = scsiqp->done_status = 0;
+
+ /*
+ * Set the ADV_SCSI_REQ_Q 'srb_ptr' to point to the adv_req_t structure.
+ */
+ scsiqp->srb_ptr = ADV_VADDR_TO_U32(reqp);
+
+ /*
+ * Set the adv_req_t 'cmndp' to point to the struct scsi_cmnd structure.
+ */
+ reqp->cmndp = scp;
+
+ /*
+ * Build the ADV_SCSI_REQ_Q request.
+ */
+
+ /* Set CDB length and copy it to the request structure. */
+ scsiqp->cdb_len = scp->cmd_len;
+ /* Copy first 12 CDB bytes to cdb[]. */
+ for (i = 0; i < scp->cmd_len && i < 12; i++) {
+ scsiqp->cdb[i] = scp->cmnd[i];
+ }
+ /* Copy last 4 CDB bytes, if present, to cdb16[]. */
+ for (; i < scp->cmd_len; i++) {
+ scsiqp->cdb16[i - 12] = scp->cmnd[i];
+ }
+
+ scsiqp->target_id = scp->device->id;
+ scsiqp->target_lun = scp->device->lun;
+
+ scsiqp->sense_addr = cpu_to_le32(virt_to_bus(&scp->sense_buffer[0]));
+ scsiqp->sense_len = SCSI_SENSE_BUFFERSIZE;
+
+ /* Build ADV_SCSI_REQ_Q */
+
+ use_sg = scsi_dma_map(scp);
+ if (use_sg == 0) {
+ /* Zero-length transfer */
+ reqp->sgblkp = NULL;
+ scsiqp->data_cnt = 0;
+ scsiqp->vdata_addr = NULL;
+
+ scsiqp->data_addr = 0;
+ scsiqp->sg_list_ptr = NULL;
+ scsiqp->sg_real_addr = 0;
+ } else {
+ if (use_sg > ADV_MAX_SG_LIST) {
+ scmd_printk(KERN_ERR, scp, "use_sg %d > "
+ "ADV_MAX_SG_LIST %d\n", use_sg,
+ scp->device->host->sg_tablesize);
+ scsi_dma_unmap(scp);
+ scp->result = HOST_BYTE(DID_ERROR);
+
+ /*
+ * Free the 'adv_req_t' structure by adding it back
+ * to the board free list.
+ */
+ reqp->next_reqp = boardp->adv_reqp;
+ boardp->adv_reqp = reqp;
+
+ return ASC_ERROR;
+ }
+
+ scsiqp->data_cnt = cpu_to_le32(scsi_bufflen(scp));
+
+ ret = adv_get_sglist(boardp, reqp, scp, use_sg);
+ if (ret != ADV_SUCCESS) {
+ /*
+ * Free the adv_req_t structure by adding it back to
+ * the board free list.
+ */
+ reqp->next_reqp = boardp->adv_reqp;
+ boardp->adv_reqp = reqp;
+
+ return ret;
+ }
+
+ ASC_STATS_ADD(scp->device->host, xfer_elem, use_sg);
+ }
+
+ ASC_STATS(scp->device->host, xfer_cnt);
+
+ ASC_DBG_PRT_ADV_SCSI_REQ_Q(2, scsiqp);
+ ASC_DBG_PRT_CDB(1, scp->cmnd, scp->cmd_len);
+
+ *adv_scsiqpp = scsiqp;
+
+ return ASC_NOERROR;
+}
+
+static int AscSgListToQueue(int sg_list)
+{
+ int n_sg_list_qs;
+
+ n_sg_list_qs = ((sg_list - 1) / ASC_SG_LIST_PER_Q);
+ if (((sg_list - 1) % ASC_SG_LIST_PER_Q) != 0)
+ n_sg_list_qs++;
+ return n_sg_list_qs + 1;
+}
+
+static uint
+AscGetNumOfFreeQueue(ASC_DVC_VAR *asc_dvc, uchar target_ix, uchar n_qs)
+{
+ uint cur_used_qs;
+ uint cur_free_qs;
+ ASC_SCSI_BIT_ID_TYPE target_id;
+ uchar tid_no;
+
+ target_id = ASC_TIX_TO_TARGET_ID(target_ix);
+ tid_no = ASC_TIX_TO_TID(target_ix);
+ if ((asc_dvc->unit_not_ready & target_id) ||
+ (asc_dvc->queue_full_or_busy & target_id)) {
+ return 0;
+ }
+ if (n_qs == 1) {
+ cur_used_qs = (uint) asc_dvc->cur_total_qng +
+ (uint) asc_dvc->last_q_shortage + (uint) ASC_MIN_FREE_Q;
+ } else {
+ cur_used_qs = (uint) asc_dvc->cur_total_qng +
+ (uint) ASC_MIN_FREE_Q;
+ }
+ if ((uint) (cur_used_qs + n_qs) <= (uint) asc_dvc->max_total_qng) {
+ cur_free_qs = (uint) asc_dvc->max_total_qng - cur_used_qs;
+ if (asc_dvc->cur_dvc_qng[tid_no] >=
+ asc_dvc->max_dvc_qng[tid_no]) {
+ return 0;
+ }
+ return cur_free_qs;
+ }
+ if (n_qs > 1) {
+ if ((n_qs > asc_dvc->last_q_shortage)
+ && (n_qs <= (asc_dvc->max_total_qng - ASC_MIN_FREE_Q))) {
+ asc_dvc->last_q_shortage = n_qs;
+ }
+ }
+ return 0;
+}
+
+static uchar AscAllocFreeQueue(PortAddr iop_base, uchar free_q_head)
+{
+ ushort q_addr;
+ uchar next_qp;
+ uchar q_status;
+
+ q_addr = ASC_QNO_TO_QADDR(free_q_head);
+ q_status = (uchar)AscReadLramByte(iop_base,
+ (ushort)(q_addr +
+ ASC_SCSIQ_B_STATUS));
+ next_qp = AscReadLramByte(iop_base, (ushort)(q_addr + ASC_SCSIQ_B_FWD));
+ if (((q_status & QS_READY) == 0) && (next_qp != ASC_QLINK_END))
+ return next_qp;
+ return ASC_QLINK_END;
+}
+
+static uchar
+AscAllocMultipleFreeQueue(PortAddr iop_base, uchar free_q_head, uchar n_free_q)
+{
+ uchar i;
+
+ for (i = 0; i < n_free_q; i++) {
+ free_q_head = AscAllocFreeQueue(iop_base, free_q_head);
+ if (free_q_head == ASC_QLINK_END)
+ break;
+ }
+ return free_q_head;
+}
+
+/*
+ * void
+ * DvcPutScsiQ(PortAddr iop_base, ushort s_addr, uchar *outbuf, int words)
+ *
+ * Calling/Exit State:
+ * none
+ *
+ * Description:
+ * Output an ASC_SCSI_Q structure to the chip
+ */
+static void
+DvcPutScsiQ(PortAddr iop_base, ushort s_addr, uchar *outbuf, int words)
+{
+ int i;
+
+ ASC_DBG_PRT_HEX(2, "DvcPutScsiQ", outbuf, 2 * words);
+ AscSetChipLramAddr(iop_base, s_addr);
+ for (i = 0; i < 2 * words; i += 2) {
+ if (i == 4 || i == 20) {
+ continue;
+ }
+ outpw(iop_base + IOP_RAM_DATA,
+ ((ushort)outbuf[i + 1] << 8) | outbuf[i]);
+ }
+}
+
+static int AscPutReadyQueue(ASC_DVC_VAR *asc_dvc, ASC_SCSI_Q *scsiq, uchar q_no)
+{
+ ushort q_addr;
+ uchar tid_no;
+ uchar sdtr_data;
+ uchar syn_period_ix;
+ uchar syn_offset;
+ PortAddr iop_base;
+
+ iop_base = asc_dvc->iop_base;
+ if (((asc_dvc->init_sdtr & scsiq->q1.target_id) != 0) &&
+ ((asc_dvc->sdtr_done & scsiq->q1.target_id) == 0)) {
+ tid_no = ASC_TIX_TO_TID(scsiq->q2.target_ix);
+ sdtr_data = AscGetMCodeInitSDTRAtID(iop_base, tid_no);
+ syn_period_ix =
+ (sdtr_data >> 4) & (asc_dvc->max_sdtr_index - 1);
+ syn_offset = sdtr_data & ASC_SYN_MAX_OFFSET;
+ AscMsgOutSDTR(asc_dvc,
+ asc_dvc->sdtr_period_tbl[syn_period_ix],
+ syn_offset);
+ scsiq->q1.cntl |= QC_MSG_OUT;
+ }
+ q_addr = ASC_QNO_TO_QADDR(q_no);
+ if ((scsiq->q1.target_id & asc_dvc->use_tagged_qng) == 0) {
+ scsiq->q2.tag_code &= ~SIMPLE_QUEUE_TAG;
+ }
+ scsiq->q1.status = QS_FREE;
+ AscMemWordCopyPtrToLram(iop_base,
+ q_addr + ASC_SCSIQ_CDB_BEG,
+ (uchar *)scsiq->cdbptr, scsiq->q2.cdb_len >> 1);
+
+ DvcPutScsiQ(iop_base,
+ q_addr + ASC_SCSIQ_CPY_BEG,
+ (uchar *)&scsiq->q1.cntl,
+ ((sizeof(ASC_SCSIQ_1) + sizeof(ASC_SCSIQ_2)) / 2) - 1);
+ AscWriteLramWord(iop_base,
+ (ushort)(q_addr + (ushort)ASC_SCSIQ_B_STATUS),
+ (ushort)(((ushort)scsiq->q1.
+ q_no << 8) | (ushort)QS_READY));
+ return 1;
+}
+
+static int
+AscPutReadySgListQueue(ASC_DVC_VAR *asc_dvc, ASC_SCSI_Q *scsiq, uchar q_no)
+{
+ int sta;
+ int i;
+ ASC_SG_HEAD *sg_head;
+ ASC_SG_LIST_Q scsi_sg_q;
+ ASC_DCNT saved_data_addr;
+ ASC_DCNT saved_data_cnt;
+ PortAddr iop_base;
+ ushort sg_list_dwords;
+ ushort sg_index;
+ ushort sg_entry_cnt;
+ ushort q_addr;
+ uchar next_qp;
+
+ iop_base = asc_dvc->iop_base;
+ sg_head = scsiq->sg_head;
+ saved_data_addr = scsiq->q1.data_addr;
+ saved_data_cnt = scsiq->q1.data_cnt;
+ scsiq->q1.data_addr = (ASC_PADDR) sg_head->sg_list[0].addr;
+ scsiq->q1.data_cnt = (ASC_DCNT) sg_head->sg_list[0].bytes;
+#if CC_VERY_LONG_SG_LIST
+ /*
+ * If sg_head->entry_cnt is greater than ASC_MAX_SG_LIST
+ * then not all SG elements will fit in the allocated queues.
+ * The rest of the SG elements will be copied when the RISC
+ * completes the SG elements that fit and halts.
+ */
+ if (sg_head->entry_cnt > ASC_MAX_SG_LIST) {
+ /*
+ * Set sg_entry_cnt to be the number of SG elements that
+ * will fit in the allocated SG queues. It is minus 1, because
+ * the first SG element is handled above. ASC_MAX_SG_LIST is
+ * already inflated by 1 to account for this. For example it
+ * may be 50 which is 1 + 7 queues * 7 SG elements.
+ */
+ sg_entry_cnt = ASC_MAX_SG_LIST - 1;
+
+ /*
+ * Keep track of remaining number of SG elements that will
+ * need to be handled from a_isr.c.
+ */
+ scsiq->remain_sg_entry_cnt =
+ sg_head->entry_cnt - ASC_MAX_SG_LIST;
+ } else {
+#endif /* CC_VERY_LONG_SG_LIST */
+ /*
+ * Set sg_entry_cnt to be the number of SG elements that
+ * will fit in the allocated SG queues. It is minus 1, because
+ * the first SG element is handled above.
+ */
+ sg_entry_cnt = sg_head->entry_cnt - 1;
+#if CC_VERY_LONG_SG_LIST
+ }
+#endif /* CC_VERY_LONG_SG_LIST */
+ if (sg_entry_cnt != 0) {
+ scsiq->q1.cntl |= QC_SG_HEAD;
+ q_addr = ASC_QNO_TO_QADDR(q_no);
+ sg_index = 1;
+ scsiq->q1.sg_queue_cnt = sg_head->queue_cnt;
+ scsi_sg_q.sg_head_qp = q_no;
+ scsi_sg_q.cntl = QCSG_SG_XFER_LIST;
+ for (i = 0; i < sg_head->queue_cnt; i++) {
+ scsi_sg_q.seq_no = i + 1;
+ if (sg_entry_cnt > ASC_SG_LIST_PER_Q) {
+ sg_list_dwords = (uchar)(ASC_SG_LIST_PER_Q * 2);
+ sg_entry_cnt -= ASC_SG_LIST_PER_Q;
+ if (i == 0) {
+ scsi_sg_q.sg_list_cnt =
+ ASC_SG_LIST_PER_Q;
+ scsi_sg_q.sg_cur_list_cnt =
+ ASC_SG_LIST_PER_Q;
+ } else {
+ scsi_sg_q.sg_list_cnt =
+ ASC_SG_LIST_PER_Q - 1;
+ scsi_sg_q.sg_cur_list_cnt =
+ ASC_SG_LIST_PER_Q - 1;
+ }
+ } else {
+#if CC_VERY_LONG_SG_LIST
+ /*
+ * This is the last SG queue in the list of
+ * allocated SG queues. If there are more
+ * SG elements than will fit in the allocated
+ * queues, then set the QCSG_SG_XFER_MORE flag.
+ */
+ if (sg_head->entry_cnt > ASC_MAX_SG_LIST) {
+ scsi_sg_q.cntl |= QCSG_SG_XFER_MORE;
+ } else {
+#endif /* CC_VERY_LONG_SG_LIST */
+ scsi_sg_q.cntl |= QCSG_SG_XFER_END;
+#if CC_VERY_LONG_SG_LIST
+ }
+#endif /* CC_VERY_LONG_SG_LIST */
+ sg_list_dwords = sg_entry_cnt << 1;
+ if (i == 0) {
+ scsi_sg_q.sg_list_cnt = sg_entry_cnt;
+ scsi_sg_q.sg_cur_list_cnt =
+ sg_entry_cnt;
+ } else {
+ scsi_sg_q.sg_list_cnt =
+ sg_entry_cnt - 1;
+ scsi_sg_q.sg_cur_list_cnt =
+ sg_entry_cnt - 1;
+ }
+ sg_entry_cnt = 0;
+ }
+ next_qp = AscReadLramByte(iop_base,
+ (ushort)(q_addr +
+ ASC_SCSIQ_B_FWD));
+ scsi_sg_q.q_no = next_qp;
+ q_addr = ASC_QNO_TO_QADDR(next_qp);
+ AscMemWordCopyPtrToLram(iop_base,
+ q_addr + ASC_SCSIQ_SGHD_CPY_BEG,
+ (uchar *)&scsi_sg_q,
+ sizeof(ASC_SG_LIST_Q) >> 1);
+ AscMemDWordCopyPtrToLram(iop_base,
+ q_addr + ASC_SGQ_LIST_BEG,
+ (uchar *)&sg_head->
+ sg_list[sg_index],
+ sg_list_dwords);
+ sg_index += ASC_SG_LIST_PER_Q;
+ scsiq->next_sg_index = sg_index;
+ }
+ } else {
+ scsiq->q1.cntl &= ~QC_SG_HEAD;
+ }
+ sta = AscPutReadyQueue(asc_dvc, scsiq, q_no);
+ scsiq->q1.data_addr = saved_data_addr;
+ scsiq->q1.data_cnt = saved_data_cnt;
+ return (sta);
+}
+
+static int
+AscSendScsiQueue(ASC_DVC_VAR *asc_dvc, ASC_SCSI_Q *scsiq, uchar n_q_required)
+{
+ PortAddr iop_base;
+ uchar free_q_head;
+ uchar next_qp;
+ uchar tid_no;
+ uchar target_ix;
+ int sta;
+
+ iop_base = asc_dvc->iop_base;
+ target_ix = scsiq->q2.target_ix;
+ tid_no = ASC_TIX_TO_TID(target_ix);
+ sta = 0;
+ free_q_head = (uchar)AscGetVarFreeQHead(iop_base);
+ if (n_q_required > 1) {
+ next_qp = AscAllocMultipleFreeQueue(iop_base, free_q_head,
+ (uchar)n_q_required);
+ if (next_qp != ASC_QLINK_END) {
+ asc_dvc->last_q_shortage = 0;
+ scsiq->sg_head->queue_cnt = n_q_required - 1;
+ scsiq->q1.q_no = free_q_head;
+ sta = AscPutReadySgListQueue(asc_dvc, scsiq,
+ free_q_head);
+ }
+ } else if (n_q_required == 1) {
+ next_qp = AscAllocFreeQueue(iop_base, free_q_head);
+ if (next_qp != ASC_QLINK_END) {
+ scsiq->q1.q_no = free_q_head;
+ sta = AscPutReadyQueue(asc_dvc, scsiq, free_q_head);
+ }
+ }
+ if (sta == 1) {
+ AscPutVarFreeQHead(iop_base, next_qp);
+ asc_dvc->cur_total_qng += n_q_required;
+ asc_dvc->cur_dvc_qng[tid_no]++;
+ }
+ return sta;
+}
+
+#define ASC_SYN_OFFSET_ONE_DISABLE_LIST 16
+static uchar _syn_offset_one_disable_cmd[ASC_SYN_OFFSET_ONE_DISABLE_LIST] = {
+ INQUIRY,
+ REQUEST_SENSE,
+ READ_CAPACITY,
+ READ_TOC,
+ MODE_SELECT,
+ MODE_SENSE,
+ MODE_SELECT_10,
+ MODE_SENSE_10,
+ 0xFF,
+ 0xFF,
+ 0xFF,
+ 0xFF,
+ 0xFF,
+ 0xFF,
+ 0xFF,
+ 0xFF
+};
+
+static int AscExeScsiQueue(ASC_DVC_VAR *asc_dvc, ASC_SCSI_Q *scsiq)
+{
+ PortAddr iop_base;
+ int sta;
+ int n_q_required;
+ int disable_syn_offset_one_fix;
+ int i;
+ ASC_PADDR addr;
+ ushort sg_entry_cnt = 0;
+ ushort sg_entry_cnt_minus_one = 0;
+ uchar target_ix;
+ uchar tid_no;
+ uchar sdtr_data;
+ uchar extra_bytes;
+ uchar scsi_cmd;
+ uchar disable_cmd;
+ ASC_SG_HEAD *sg_head;
+ ASC_DCNT data_cnt;
+
+ iop_base = asc_dvc->iop_base;
+ sg_head = scsiq->sg_head;
+ if (asc_dvc->err_code != 0)
+ return (ERR);
+ scsiq->q1.q_no = 0;
+ if ((scsiq->q2.tag_code & ASC_TAG_FLAG_EXTRA_BYTES) == 0) {
+ scsiq->q1.extra_bytes = 0;
+ }
+ sta = 0;
+ target_ix = scsiq->q2.target_ix;
+ tid_no = ASC_TIX_TO_TID(target_ix);
+ n_q_required = 1;
+ if (scsiq->cdbptr[0] == REQUEST_SENSE) {
+ if ((asc_dvc->init_sdtr & scsiq->q1.target_id) != 0) {
+ asc_dvc->sdtr_done &= ~scsiq->q1.target_id;
+ sdtr_data = AscGetMCodeInitSDTRAtID(iop_base, tid_no);
+ AscMsgOutSDTR(asc_dvc,
+ asc_dvc->
+ sdtr_period_tbl[(sdtr_data >> 4) &
+ (uchar)(asc_dvc->
+ max_sdtr_index -
+ 1)],
+ (uchar)(sdtr_data & (uchar)
+ ASC_SYN_MAX_OFFSET));
+ scsiq->q1.cntl |= (QC_MSG_OUT | QC_URGENT);
+ }
+ }
+ if (asc_dvc->in_critical_cnt != 0) {
+ AscSetLibErrorCode(asc_dvc, ASCQ_ERR_CRITICAL_RE_ENTRY);
+ return (ERR);
+ }
+ asc_dvc->in_critical_cnt++;
+ if ((scsiq->q1.cntl & QC_SG_HEAD) != 0) {
+ if ((sg_entry_cnt = sg_head->entry_cnt) == 0) {
+ asc_dvc->in_critical_cnt--;
+ return (ERR);
+ }
+#if !CC_VERY_LONG_SG_LIST
+ if (sg_entry_cnt > ASC_MAX_SG_LIST) {
+ asc_dvc->in_critical_cnt--;
+ return (ERR);
+ }
+#endif /* !CC_VERY_LONG_SG_LIST */
+ if (sg_entry_cnt == 1) {
+ scsiq->q1.data_addr =
+ (ADV_PADDR)sg_head->sg_list[0].addr;
+ scsiq->q1.data_cnt =
+ (ADV_DCNT)sg_head->sg_list[0].bytes;
+ scsiq->q1.cntl &= ~(QC_SG_HEAD | QC_SG_SWAP_QUEUE);
+ }
+ sg_entry_cnt_minus_one = sg_entry_cnt - 1;
+ }
+ scsi_cmd = scsiq->cdbptr[0];
+ disable_syn_offset_one_fix = FALSE;
+ if ((asc_dvc->pci_fix_asyn_xfer & scsiq->q1.target_id) &&
+ !(asc_dvc->pci_fix_asyn_xfer_always & scsiq->q1.target_id)) {
+ if (scsiq->q1.cntl & QC_SG_HEAD) {
+ data_cnt = 0;
+ for (i = 0; i < sg_entry_cnt; i++) {
+ data_cnt +=
+ (ADV_DCNT)le32_to_cpu(sg_head->sg_list[i].
+ bytes);
+ }
+ } else {
+ data_cnt = le32_to_cpu(scsiq->q1.data_cnt);
+ }
+ if (data_cnt != 0UL) {
+ if (data_cnt < 512UL) {
+ disable_syn_offset_one_fix = TRUE;
+ } else {
+ for (i = 0; i < ASC_SYN_OFFSET_ONE_DISABLE_LIST;
+ i++) {
+ disable_cmd =
+ _syn_offset_one_disable_cmd[i];
+ if (disable_cmd == 0xFF) {
+ break;
+ }
+ if (scsi_cmd == disable_cmd) {
+ disable_syn_offset_one_fix =
+ TRUE;
+ break;
+ }
+ }
+ }
+ }
+ }
+ if (disable_syn_offset_one_fix) {
+ scsiq->q2.tag_code &= ~SIMPLE_QUEUE_TAG;
+ scsiq->q2.tag_code |= (ASC_TAG_FLAG_DISABLE_ASYN_USE_SYN_FIX |
+ ASC_TAG_FLAG_DISABLE_DISCONNECT);
+ } else {
+ scsiq->q2.tag_code &= 0x27;
+ }
+ if ((scsiq->q1.cntl & QC_SG_HEAD) != 0) {
+ if (asc_dvc->bug_fix_cntl) {
+ if (asc_dvc->bug_fix_cntl & ASC_BUG_FIX_IF_NOT_DWB) {
+ if ((scsi_cmd == READ_6) ||
+ (scsi_cmd == READ_10)) {
+ addr =
+ (ADV_PADDR)le32_to_cpu(sg_head->
+ sg_list
+ [sg_entry_cnt_minus_one].
+ addr) +
+ (ADV_DCNT)le32_to_cpu(sg_head->
+ sg_list
+ [sg_entry_cnt_minus_one].
+ bytes);
+ extra_bytes =
+ (uchar)((ushort)addr & 0x0003);
+ if ((extra_bytes != 0)
+ &&
+ ((scsiq->q2.
+ tag_code &
+ ASC_TAG_FLAG_EXTRA_BYTES)
+ == 0)) {
+ scsiq->q2.tag_code |=
+ ASC_TAG_FLAG_EXTRA_BYTES;
+ scsiq->q1.extra_bytes =
+ extra_bytes;
+ data_cnt =
+ le32_to_cpu(sg_head->
+ sg_list
+ [sg_entry_cnt_minus_one].
+ bytes);
+ data_cnt -=
+ (ASC_DCNT) extra_bytes;
+ sg_head->
+ sg_list
+ [sg_entry_cnt_minus_one].
+ bytes =
+ cpu_to_le32(data_cnt);
+ }
+ }
+ }
+ }
+ sg_head->entry_to_copy = sg_head->entry_cnt;
+#if CC_VERY_LONG_SG_LIST
+ /*
+ * Set the sg_entry_cnt to the maximum possible. The rest of
+ * the SG elements will be copied when the RISC completes the
+ * SG elements that fit and halts.
+ */
+ if (sg_entry_cnt > ASC_MAX_SG_LIST) {
+ sg_entry_cnt = ASC_MAX_SG_LIST;
+ }
+#endif /* CC_VERY_LONG_SG_LIST */
+ n_q_required = AscSgListToQueue(sg_entry_cnt);
+ if ((AscGetNumOfFreeQueue(asc_dvc, target_ix, n_q_required) >=
+ (uint) n_q_required)
+ || ((scsiq->q1.cntl & QC_URGENT) != 0)) {
+ if ((sta =
+ AscSendScsiQueue(asc_dvc, scsiq,
+ n_q_required)) == 1) {
+ asc_dvc->in_critical_cnt--;
+ return (sta);
+ }
+ }
+ } else {
+ if (asc_dvc->bug_fix_cntl) {
+ if (asc_dvc->bug_fix_cntl & ASC_BUG_FIX_IF_NOT_DWB) {
+ if ((scsi_cmd == READ_6) ||
+ (scsi_cmd == READ_10)) {
+ addr =
+ le32_to_cpu(scsiq->q1.data_addr) +
+ le32_to_cpu(scsiq->q1.data_cnt);
+ extra_bytes =
+ (uchar)((ushort)addr & 0x0003);
+ if ((extra_bytes != 0)
+ &&
+ ((scsiq->q2.
+ tag_code &
+ ASC_TAG_FLAG_EXTRA_BYTES)
+ == 0)) {
+ data_cnt =
+ le32_to_cpu(scsiq->q1.
+ data_cnt);
+ if (((ushort)data_cnt & 0x01FF)
+ == 0) {
+ scsiq->q2.tag_code |=
+ ASC_TAG_FLAG_EXTRA_BYTES;
+ data_cnt -= (ASC_DCNT)
+ extra_bytes;
+ scsiq->q1.data_cnt =
+ cpu_to_le32
+ (data_cnt);
+ scsiq->q1.extra_bytes =
+ extra_bytes;
+ }
+ }
+ }
+ }
+ }
+ n_q_required = 1;
+ if ((AscGetNumOfFreeQueue(asc_dvc, target_ix, 1) >= 1) ||
+ ((scsiq->q1.cntl & QC_URGENT) != 0)) {
+ if ((sta = AscSendScsiQueue(asc_dvc, scsiq,
+ n_q_required)) == 1) {
+ asc_dvc->in_critical_cnt--;
+ return (sta);
+ }
+ }
+ }
+ asc_dvc->in_critical_cnt--;
+ return (sta);
+}
+
+/*
+ * AdvExeScsiQueue() - Send a request to the RISC microcode program.
+ *
+ * Allocate a carrier structure, point the carrier to the ADV_SCSI_REQ_Q,
+ * add the carrier to the ICQ (Initiator Command Queue), and tickle the
+ * RISC to notify it a new command is ready to be executed.
+ *
+ * If 'done_status' is not set to QD_DO_RETRY, then 'error_retry' will be
+ * set to SCSI_MAX_RETRY.
+ *
+ * Multi-byte fields in the ASC_SCSI_REQ_Q that are used by the microcode
+ * for DMA addresses or math operations are byte swapped to little-endian
+ * order.
+ *
+ * Return:
+ * ADV_SUCCESS(1) - The request was successfully queued.
+ * ADV_BUSY(0) - Resource unavailable; Retry again after pending
+ * request completes.
+ * ADV_ERROR(-1) - Invalid ADV_SCSI_REQ_Q request structure
+ * host IC error.
+ */
+static int AdvExeScsiQueue(ADV_DVC_VAR *asc_dvc, ADV_SCSI_REQ_Q *scsiq)
+{
+ AdvPortAddr iop_base;
+ ADV_PADDR req_paddr;
+ ADV_CARR_T *new_carrp;
+
+ /*
+ * The ADV_SCSI_REQ_Q 'target_id' field should never exceed ADV_MAX_TID.
+ */
+ if (scsiq->target_id > ADV_MAX_TID) {
+ scsiq->host_status = QHSTA_M_INVALID_DEVICE;
+ scsiq->done_status = QD_WITH_ERROR;
+ return ADV_ERROR;
+ }
+
+ iop_base = asc_dvc->iop_base;
+
+ /*
+ * Allocate a carrier ensuring at least one carrier always
+ * remains on the freelist and initialize fields.
+ */
+ if ((new_carrp = asc_dvc->carr_freelist) == NULL) {
+ return ADV_BUSY;
+ }
+ asc_dvc->carr_freelist = (ADV_CARR_T *)
+ ADV_U32_TO_VADDR(le32_to_cpu(new_carrp->next_vpa));
+ asc_dvc->carr_pending_cnt++;
+
+ /*
+ * Set the carrier to be a stopper by setting 'next_vpa'
+ * to the stopper value. The current stopper will be changed
+ * below to point to the new stopper.
+ */
+ new_carrp->next_vpa = cpu_to_le32(ASC_CQ_STOPPER);
+
+ /*
+ * Clear the ADV_SCSI_REQ_Q done flag.
+ */
+ scsiq->a_flag &= ~ADV_SCSIQ_DONE;
+
+ req_paddr = virt_to_bus(scsiq);
+ BUG_ON(req_paddr & 31);
+ /* Wait for assertion before making little-endian */
+ req_paddr = cpu_to_le32(req_paddr);
+
+ /* Save virtual and physical address of ADV_SCSI_REQ_Q and carrier. */
+ scsiq->scsiq_ptr = cpu_to_le32(ADV_VADDR_TO_U32(scsiq));
+ scsiq->scsiq_rptr = req_paddr;
+
+ scsiq->carr_va = cpu_to_le32(ADV_VADDR_TO_U32(asc_dvc->icq_sp));
+ /*
+ * Every ADV_CARR_T.carr_pa is byte swapped to little-endian
+ * order during initialization.
+ */
+ scsiq->carr_pa = asc_dvc->icq_sp->carr_pa;
+
+ /*
+ * Use the current stopper to send the ADV_SCSI_REQ_Q command to
+ * the microcode. The newly allocated stopper will become the new
+ * stopper.
+ */
+ asc_dvc->icq_sp->areq_vpa = req_paddr;
+
+ /*
+ * Set the 'next_vpa' pointer for the old stopper to be the
+ * physical address of the new stopper. The RISC can only
+ * follow physical addresses.
+ */
+ asc_dvc->icq_sp->next_vpa = new_carrp->carr_pa;
+
+ /*
+ * Set the host adapter stopper pointer to point to the new carrier.
+ */
+ asc_dvc->icq_sp = new_carrp;
+
+ if (asc_dvc->chip_type == ADV_CHIP_ASC3550 ||
+ asc_dvc->chip_type == ADV_CHIP_ASC38C0800) {
+ /*
+ * Tickle the RISC to tell it to read its Command Queue Head pointer.
+ */
+ AdvWriteByteRegister(iop_base, IOPB_TICKLE, ADV_TICKLE_A);
+ if (asc_dvc->chip_type == ADV_CHIP_ASC3550) {
+ /*
+ * Clear the tickle value. In the ASC-3550 the RISC flag
+ * command 'clr_tickle_a' does not work unless the host
+ * value is cleared.
+ */
+ AdvWriteByteRegister(iop_base, IOPB_TICKLE,
+ ADV_TICKLE_NOP);
+ }
+ } else if (asc_dvc->chip_type == ADV_CHIP_ASC38C1600) {
+ /*
+ * Notify the RISC a carrier is ready by writing the physical
+ * address of the new carrier stopper to the COMMA register.
+ */
+ AdvWriteDWordRegister(iop_base, IOPDW_COMMA,
+ le32_to_cpu(new_carrp->carr_pa));
+ }
+
+ return ADV_SUCCESS;
+}
+
+/*
+ * Execute a single 'Scsi_Cmnd'.
+ */
+static int asc_execute_scsi_cmnd(struct scsi_cmnd *scp)
+{
+ int ret, err_code;
+ struct asc_board *boardp = shost_priv(scp->device->host);
+
+ ASC_DBG(1, "scp 0x%p\n", scp);
+
+ if (ASC_NARROW_BOARD(boardp)) {
+ ASC_DVC_VAR *asc_dvc = &boardp->dvc_var.asc_dvc_var;
+ struct asc_scsi_q asc_scsi_q;
+
+ /* asc_build_req() can not return ASC_BUSY. */
+ ret = asc_build_req(boardp, scp, &asc_scsi_q);
+ if (ret == ASC_ERROR) {
+ ASC_STATS(scp->device->host, build_error);
+ return ASC_ERROR;
+ }
+
+ ret = AscExeScsiQueue(asc_dvc, &asc_scsi_q);
+ kfree(asc_scsi_q.sg_head);
+ err_code = asc_dvc->err_code;
+ } else {
+ ADV_DVC_VAR *adv_dvc = &boardp->dvc_var.adv_dvc_var;
+ ADV_SCSI_REQ_Q *adv_scsiqp;
+
+ switch (adv_build_req(boardp, scp, &adv_scsiqp)) {
+ case ASC_NOERROR:
+ ASC_DBG(3, "adv_build_req ASC_NOERROR\n");
+ break;
+ case ASC_BUSY:
+ ASC_DBG(1, "adv_build_req ASC_BUSY\n");
+ /*
+ * The asc_stats fields 'adv_build_noreq' and
+ * 'adv_build_nosg' count wide board busy conditions.
+ * They are updated in adv_build_req and
+ * adv_get_sglist, respectively.
+ */
+ return ASC_BUSY;
+ case ASC_ERROR:
+ default:
+ ASC_DBG(1, "adv_build_req ASC_ERROR\n");
+ ASC_STATS(scp->device->host, build_error);
+ return ASC_ERROR;
+ }
+
+ ret = AdvExeScsiQueue(adv_dvc, adv_scsiqp);
+ err_code = adv_dvc->err_code;
+ }
+
+ switch (ret) {
+ case ASC_NOERROR:
+ ASC_STATS(scp->device->host, exe_noerror);
+ /*
+ * Increment monotonically increasing per device
+ * successful request counter. Wrapping doesn't matter.
+ */
+ boardp->reqcnt[scp->device->id]++;
+ ASC_DBG(1, "ExeScsiQueue() ASC_NOERROR\n");
+ break;
+ case ASC_BUSY:
+ ASC_STATS(scp->device->host, exe_busy);
+ break;
+ case ASC_ERROR:
+ scmd_printk(KERN_ERR, scp, "ExeScsiQueue() ASC_ERROR, "
+ "err_code 0x%x\n", err_code);
+ ASC_STATS(scp->device->host, exe_error);
+ scp->result = HOST_BYTE(DID_ERROR);
+ break;
+ default:
+ scmd_printk(KERN_ERR, scp, "ExeScsiQueue() unknown, "
+ "err_code 0x%x\n", err_code);
+ ASC_STATS(scp->device->host, exe_unknown);
+ scp->result = HOST_BYTE(DID_ERROR);
+ break;
+ }
+
+ ASC_DBG(1, "end\n");
+ return ret;
+}
+
+/*
+ * advansys_queuecommand() - interrupt-driven I/O entrypoint.
+ *
+ * This function always returns 0. Command return status is saved
+ * in the 'scp' result field.
+ */
+static int
+advansys_queuecommand_lck(struct scsi_cmnd *scp, void (*done)(struct scsi_cmnd *))
+{
+ struct Scsi_Host *shost = scp->device->host;
+ int asc_res, result = 0;
+
+ ASC_STATS(shost, queuecommand);
+ scp->scsi_done = done;
+
+ asc_res = asc_execute_scsi_cmnd(scp);
+
+ switch (asc_res) {
+ case ASC_NOERROR:
+ break;
+ case ASC_BUSY:
+ result = SCSI_MLQUEUE_HOST_BUSY;
+ break;
+ case ASC_ERROR:
+ default:
+ asc_scsi_done(scp);
+ break;
+ }
+
+ return result;
+}
+
+static DEF_SCSI_QCMD(advansys_queuecommand)
+
+static ushort AscGetEisaChipCfg(PortAddr iop_base)
+{
+ PortAddr eisa_cfg_iop = (PortAddr) ASC_GET_EISA_SLOT(iop_base) |
+ (PortAddr) (ASC_EISA_CFG_IOP_MASK);
+ return inpw(eisa_cfg_iop);
+}
+
+/*
+ * Return the BIOS address of the adapter at the specified
+ * I/O port and with the specified bus type.
+ */
+static unsigned short AscGetChipBiosAddress(PortAddr iop_base,
+ unsigned short bus_type)
+{
+ unsigned short cfg_lsw;
+ unsigned short bios_addr;
+
+ /*
+ * The PCI BIOS is re-located by the motherboard BIOS. Because
+ * of this the driver can not determine where a PCI BIOS is
+ * loaded and executes.
+ */
+ if (bus_type & ASC_IS_PCI)
+ return 0;
+
+ if ((bus_type & ASC_IS_EISA) != 0) {
+ cfg_lsw = AscGetEisaChipCfg(iop_base);
+ cfg_lsw &= 0x000F;
+ bios_addr = ASC_BIOS_MIN_ADDR + cfg_lsw * ASC_BIOS_BANK_SIZE;
+ return bios_addr;
+ }
+
+ cfg_lsw = AscGetChipCfgLsw(iop_base);
+
+ /*
+ * ISA PnP uses the top bit as the 32K BIOS flag
+ */
+ if (bus_type == ASC_IS_ISAPNP)
+ cfg_lsw &= 0x7FFF;
+ bios_addr = ASC_BIOS_MIN_ADDR + (cfg_lsw >> 12) * ASC_BIOS_BANK_SIZE;
+ return bios_addr;
+}
+
+static uchar AscSetChipScsiID(PortAddr iop_base, uchar new_host_id)
+{
+ ushort cfg_lsw;
+
+ if (AscGetChipScsiID(iop_base) == new_host_id) {
+ return (new_host_id);
+ }
+ cfg_lsw = AscGetChipCfgLsw(iop_base);
+ cfg_lsw &= 0xF8FF;
+ cfg_lsw |= (ushort)((new_host_id & ASC_MAX_TID) << 8);
+ AscSetChipCfgLsw(iop_base, cfg_lsw);
+ return (AscGetChipScsiID(iop_base));
+}
+
+static unsigned char AscGetChipScsiCtrl(PortAddr iop_base)
+{
+ unsigned char sc;
+
+ AscSetBank(iop_base, 1);
+ sc = inp(iop_base + IOP_REG_SC);
+ AscSetBank(iop_base, 0);
+ return sc;
+}
+
+static unsigned char AscGetChipVersion(PortAddr iop_base,
+ unsigned short bus_type)
+{
+ if (bus_type & ASC_IS_EISA) {
+ PortAddr eisa_iop;
+ unsigned char revision;
+ eisa_iop = (PortAddr) ASC_GET_EISA_SLOT(iop_base) |
+ (PortAddr) ASC_EISA_REV_IOP_MASK;
+ revision = inp(eisa_iop);
+ return ASC_CHIP_MIN_VER_EISA - 1 + revision;
+ }
+ return AscGetChipVerNo(iop_base);
+}
+
+#ifdef CONFIG_ISA
+static void AscEnableIsaDma(uchar dma_channel)
+{
+ if (dma_channel < 4) {
+ outp(0x000B, (ushort)(0xC0 | dma_channel));
+ outp(0x000A, dma_channel);
+ } else if (dma_channel < 8) {
+ outp(0x00D6, (ushort)(0xC0 | (dma_channel - 4)));
+ outp(0x00D4, (ushort)(dma_channel - 4));
+ }
+}
+#endif /* CONFIG_ISA */
+
+static int AscStopQueueExe(PortAddr iop_base)
+{
+ int count = 0;
+
+ if (AscReadLramByte(iop_base, ASCV_STOP_CODE_B) == 0) {
+ AscWriteLramByte(iop_base, ASCV_STOP_CODE_B,
+ ASC_STOP_REQ_RISC_STOP);
+ do {
+ if (AscReadLramByte(iop_base, ASCV_STOP_CODE_B) &
+ ASC_STOP_ACK_RISC_STOP) {
+ return (1);
+ }
+ mdelay(100);
+ } while (count++ < 20);
+ }
+ return (0);
+}
+
+static ASC_DCNT AscGetMaxDmaCount(ushort bus_type)
+{
+ if (bus_type & ASC_IS_ISA)
+ return ASC_MAX_ISA_DMA_COUNT;
+ else if (bus_type & (ASC_IS_EISA | ASC_IS_VL))
+ return ASC_MAX_VL_DMA_COUNT;
+ return ASC_MAX_PCI_DMA_COUNT;
+}
+
+#ifdef CONFIG_ISA
+static ushort AscGetIsaDmaChannel(PortAddr iop_base)
+{
+ ushort channel;
+
+ channel = AscGetChipCfgLsw(iop_base) & 0x0003;
+ if (channel == 0x03)
+ return (0);
+ else if (channel == 0x00)
+ return (7);
+ return (channel + 4);
+}
+
+static ushort AscSetIsaDmaChannel(PortAddr iop_base, ushort dma_channel)
+{
+ ushort cfg_lsw;
+ uchar value;
+
+ if ((dma_channel >= 5) && (dma_channel <= 7)) {
+ if (dma_channel == 7)
+ value = 0x00;
+ else
+ value = dma_channel - 4;
+ cfg_lsw = AscGetChipCfgLsw(iop_base) & 0xFFFC;
+ cfg_lsw |= value;
+ AscSetChipCfgLsw(iop_base, cfg_lsw);
+ return (AscGetIsaDmaChannel(iop_base));
+ }
+ return 0;
+}
+
+static uchar AscGetIsaDmaSpeed(PortAddr iop_base)
+{
+ uchar speed_value;
+
+ AscSetBank(iop_base, 1);
+ speed_value = AscReadChipDmaSpeed(iop_base);
+ speed_value &= 0x07;
+ AscSetBank(iop_base, 0);
+ return speed_value;
+}
+
+static uchar AscSetIsaDmaSpeed(PortAddr iop_base, uchar speed_value)
+{
+ speed_value &= 0x07;
+ AscSetBank(iop_base, 1);
+ AscWriteChipDmaSpeed(iop_base, speed_value);
+ AscSetBank(iop_base, 0);
+ return AscGetIsaDmaSpeed(iop_base);
+}
+#endif /* CONFIG_ISA */
+
+static ushort AscInitAscDvcVar(ASC_DVC_VAR *asc_dvc)
+{
+ int i;
+ PortAddr iop_base;
+ ushort warn_code;
+ uchar chip_version;
+
+ iop_base = asc_dvc->iop_base;
+ warn_code = 0;
+ asc_dvc->err_code = 0;
+ if ((asc_dvc->bus_type &
+ (ASC_IS_ISA | ASC_IS_PCI | ASC_IS_EISA | ASC_IS_VL)) == 0) {
+ asc_dvc->err_code |= ASC_IERR_NO_BUS_TYPE;
+ }
+ AscSetChipControl(iop_base, CC_HALT);
+ AscSetChipStatus(iop_base, 0);
+ asc_dvc->bug_fix_cntl = 0;
+ asc_dvc->pci_fix_asyn_xfer = 0;
+ asc_dvc->pci_fix_asyn_xfer_always = 0;
+ /* asc_dvc->init_state initialized in AscInitGetConfig(). */
+ asc_dvc->sdtr_done = 0;
+ asc_dvc->cur_total_qng = 0;
+ asc_dvc->is_in_int = 0;
+ asc_dvc->in_critical_cnt = 0;
+ asc_dvc->last_q_shortage = 0;
+ asc_dvc->use_tagged_qng = 0;
+ asc_dvc->no_scam = 0;
+ asc_dvc->unit_not_ready = 0;
+ asc_dvc->queue_full_or_busy = 0;
+ asc_dvc->redo_scam = 0;
+ asc_dvc->res2 = 0;
+ asc_dvc->min_sdtr_index = 0;
+ asc_dvc->cfg->can_tagged_qng = 0;
+ asc_dvc->cfg->cmd_qng_enabled = 0;
+ asc_dvc->dvc_cntl = ASC_DEF_DVC_CNTL;
+ asc_dvc->init_sdtr = 0;
+ asc_dvc->max_total_qng = ASC_DEF_MAX_TOTAL_QNG;
+ asc_dvc->scsi_reset_wait = 3;
+ asc_dvc->start_motor = ASC_SCSI_WIDTH_BIT_SET;
+ asc_dvc->max_dma_count = AscGetMaxDmaCount(asc_dvc->bus_type);
+ asc_dvc->cfg->sdtr_enable = ASC_SCSI_WIDTH_BIT_SET;
+ asc_dvc->cfg->disc_enable = ASC_SCSI_WIDTH_BIT_SET;
+ asc_dvc->cfg->chip_scsi_id = ASC_DEF_CHIP_SCSI_ID;
+ chip_version = AscGetChipVersion(iop_base, asc_dvc->bus_type);
+ asc_dvc->cfg->chip_version = chip_version;
+ asc_dvc->sdtr_period_tbl = asc_syn_xfer_period;
+ asc_dvc->max_sdtr_index = 7;
+ if ((asc_dvc->bus_type & ASC_IS_PCI) &&
+ (chip_version >= ASC_CHIP_VER_PCI_ULTRA_3150)) {
+ asc_dvc->bus_type = ASC_IS_PCI_ULTRA;
+ asc_dvc->sdtr_period_tbl = asc_syn_ultra_xfer_period;
+ asc_dvc->max_sdtr_index = 15;
+ if (chip_version == ASC_CHIP_VER_PCI_ULTRA_3150) {
+ AscSetExtraControl(iop_base,
+ (SEC_ACTIVE_NEGATE | SEC_SLEW_RATE));
+ } else if (chip_version >= ASC_CHIP_VER_PCI_ULTRA_3050) {
+ AscSetExtraControl(iop_base,
+ (SEC_ACTIVE_NEGATE |
+ SEC_ENABLE_FILTER));
+ }
+ }
+ if (asc_dvc->bus_type == ASC_IS_PCI) {
+ AscSetExtraControl(iop_base,
+ (SEC_ACTIVE_NEGATE | SEC_SLEW_RATE));
+ }
+
+ asc_dvc->cfg->isa_dma_speed = ASC_DEF_ISA_DMA_SPEED;
+#ifdef CONFIG_ISA
+ if ((asc_dvc->bus_type & ASC_IS_ISA) != 0) {
+ if (chip_version >= ASC_CHIP_MIN_VER_ISA_PNP) {
+ AscSetChipIFC(iop_base, IFC_INIT_DEFAULT);
+ asc_dvc->bus_type = ASC_IS_ISAPNP;
+ }
+ asc_dvc->cfg->isa_dma_channel =
+ (uchar)AscGetIsaDmaChannel(iop_base);
+ }
+#endif /* CONFIG_ISA */
+ for (i = 0; i <= ASC_MAX_TID; i++) {
+ asc_dvc->cur_dvc_qng[i] = 0;
+ asc_dvc->max_dvc_qng[i] = ASC_MAX_SCSI1_QNG;
+ asc_dvc->scsiq_busy_head[i] = (ASC_SCSI_Q *)0L;
+ asc_dvc->scsiq_busy_tail[i] = (ASC_SCSI_Q *)0L;
+ asc_dvc->cfg->max_tag_qng[i] = ASC_MAX_INRAM_TAG_QNG;
+ }
+ return warn_code;
+}
+
+static int AscWriteEEPCmdReg(PortAddr iop_base, uchar cmd_reg)
+{
+ int retry;
+
+ for (retry = 0; retry < ASC_EEP_MAX_RETRY; retry++) {
+ unsigned char read_back;
+ AscSetChipEEPCmd(iop_base, cmd_reg);
+ mdelay(1);
+ read_back = AscGetChipEEPCmd(iop_base);
+ if (read_back == cmd_reg)
+ return 1;
+ }
+ return 0;
+}
+
+static void AscWaitEEPRead(void)
+{
+ mdelay(1);
+}
+
+static ushort AscReadEEPWord(PortAddr iop_base, uchar addr)
+{
+ ushort read_wval;
+ uchar cmd_reg;
+
+ AscWriteEEPCmdReg(iop_base, ASC_EEP_CMD_WRITE_DISABLE);
+ AscWaitEEPRead();
+ cmd_reg = addr | ASC_EEP_CMD_READ;
+ AscWriteEEPCmdReg(iop_base, cmd_reg);
+ AscWaitEEPRead();
+ read_wval = AscGetChipEEPData(iop_base);
+ AscWaitEEPRead();
+ return read_wval;
+}
+
+static ushort AscGetEEPConfig(PortAddr iop_base, ASCEEP_CONFIG *cfg_buf,
+ ushort bus_type)
+{
+ ushort wval;
+ ushort sum;
+ ushort *wbuf;
+ int cfg_beg;
+ int cfg_end;
+ int uchar_end_in_config = ASC_EEP_MAX_DVC_ADDR - 2;
+ int s_addr;
+
+ wbuf = (ushort *)cfg_buf;
+ sum = 0;
+ /* Read two config words; Byte-swapping done by AscReadEEPWord(). */
+ for (s_addr = 0; s_addr < 2; s_addr++, wbuf++) {
+ *wbuf = AscReadEEPWord(iop_base, (uchar)s_addr);
+ sum += *wbuf;
+ }
+ if (bus_type & ASC_IS_VL) {
+ cfg_beg = ASC_EEP_DVC_CFG_BEG_VL;
+ cfg_end = ASC_EEP_MAX_DVC_ADDR_VL;
+ } else {
+ cfg_beg = ASC_EEP_DVC_CFG_BEG;
+ cfg_end = ASC_EEP_MAX_DVC_ADDR;
+ }
+ for (s_addr = cfg_beg; s_addr <= (cfg_end - 1); s_addr++, wbuf++) {
+ wval = AscReadEEPWord(iop_base, (uchar)s_addr);
+ if (s_addr <= uchar_end_in_config) {
+ /*
+ * Swap all char fields - must unswap bytes already swapped
+ * by AscReadEEPWord().
+ */
+ *wbuf = le16_to_cpu(wval);
+ } else {
+ /* Don't swap word field at the end - cntl field. */
+ *wbuf = wval;
+ }
+ sum += wval; /* Checksum treats all EEPROM data as words. */
+ }
+ /*
+ * Read the checksum word which will be compared against 'sum'
+ * by the caller. Word field already swapped.
+ */
+ *wbuf = AscReadEEPWord(iop_base, (uchar)s_addr);
+ return sum;
+}
+
+static int AscTestExternalLram(ASC_DVC_VAR *asc_dvc)
+{
+ PortAddr iop_base;
+ ushort q_addr;
+ ushort saved_word;
+ int sta;
+
+ iop_base = asc_dvc->iop_base;
+ sta = 0;
+ q_addr = ASC_QNO_TO_QADDR(241);
+ saved_word = AscReadLramWord(iop_base, q_addr);
+ AscSetChipLramAddr(iop_base, q_addr);
+ AscSetChipLramData(iop_base, 0x55AA);
+ mdelay(10);
+ AscSetChipLramAddr(iop_base, q_addr);
+ if (AscGetChipLramData(iop_base) == 0x55AA) {
+ sta = 1;
+ AscWriteLramWord(iop_base, q_addr, saved_word);
+ }
+ return (sta);
+}
+
+static void AscWaitEEPWrite(void)
+{
+ mdelay(20);
+}
+
+static int AscWriteEEPDataReg(PortAddr iop_base, ushort data_reg)
+{
+ ushort read_back;
+ int retry;
+
+ retry = 0;
+ while (TRUE) {
+ AscSetChipEEPData(iop_base, data_reg);
+ mdelay(1);
+ read_back = AscGetChipEEPData(iop_base);
+ if (read_back == data_reg) {
+ return (1);
+ }
+ if (retry++ > ASC_EEP_MAX_RETRY) {
+ return (0);
+ }
+ }
+}
+
+static ushort AscWriteEEPWord(PortAddr iop_base, uchar addr, ushort word_val)
+{
+ ushort read_wval;
+
+ read_wval = AscReadEEPWord(iop_base, addr);
+ if (read_wval != word_val) {
+ AscWriteEEPCmdReg(iop_base, ASC_EEP_CMD_WRITE_ABLE);
+ AscWaitEEPRead();
+ AscWriteEEPDataReg(iop_base, word_val);
+ AscWaitEEPRead();
+ AscWriteEEPCmdReg(iop_base,
+ (uchar)((uchar)ASC_EEP_CMD_WRITE | addr));
+ AscWaitEEPWrite();
+ AscWriteEEPCmdReg(iop_base, ASC_EEP_CMD_WRITE_DISABLE);
+ AscWaitEEPRead();
+ return (AscReadEEPWord(iop_base, addr));
+ }
+ return (read_wval);
+}
+
+static int AscSetEEPConfigOnce(PortAddr iop_base, ASCEEP_CONFIG *cfg_buf,
+ ushort bus_type)
+{
+ int n_error;
+ ushort *wbuf;
+ ushort word;
+ ushort sum;
+ int s_addr;
+ int cfg_beg;
+ int cfg_end;
+ int uchar_end_in_config = ASC_EEP_MAX_DVC_ADDR - 2;
+
+ wbuf = (ushort *)cfg_buf;
+ n_error = 0;
+ sum = 0;
+ /* Write two config words; AscWriteEEPWord() will swap bytes. */
+ for (s_addr = 0; s_addr < 2; s_addr++, wbuf++) {
+ sum += *wbuf;
+ if (*wbuf != AscWriteEEPWord(iop_base, (uchar)s_addr, *wbuf)) {
+ n_error++;
+ }
+ }
+ if (bus_type & ASC_IS_VL) {
+ cfg_beg = ASC_EEP_DVC_CFG_BEG_VL;
+ cfg_end = ASC_EEP_MAX_DVC_ADDR_VL;
+ } else {
+ cfg_beg = ASC_EEP_DVC_CFG_BEG;
+ cfg_end = ASC_EEP_MAX_DVC_ADDR;
+ }
+ for (s_addr = cfg_beg; s_addr <= (cfg_end - 1); s_addr++, wbuf++) {
+ if (s_addr <= uchar_end_in_config) {
+ /*
+ * This is a char field. Swap char fields before they are
+ * swapped again by AscWriteEEPWord().
+ */
+ word = cpu_to_le16(*wbuf);
+ if (word !=
+ AscWriteEEPWord(iop_base, (uchar)s_addr, word)) {
+ n_error++;
+ }
+ } else {
+ /* Don't swap word field at the end - cntl field. */
+ if (*wbuf !=
+ AscWriteEEPWord(iop_base, (uchar)s_addr, *wbuf)) {
+ n_error++;
+ }
+ }
+ sum += *wbuf; /* Checksum calculated from word values. */
+ }
+ /* Write checksum word. It will be swapped by AscWriteEEPWord(). */
+ *wbuf = sum;
+ if (sum != AscWriteEEPWord(iop_base, (uchar)s_addr, sum)) {
+ n_error++;
+ }
+
+ /* Read EEPROM back again. */
+ wbuf = (ushort *)cfg_buf;
+ /*
+ * Read two config words; Byte-swapping done by AscReadEEPWord().
+ */
+ for (s_addr = 0; s_addr < 2; s_addr++, wbuf++) {
+ if (*wbuf != AscReadEEPWord(iop_base, (uchar)s_addr)) {
+ n_error++;
+ }
+ }
+ if (bus_type & ASC_IS_VL) {
+ cfg_beg = ASC_EEP_DVC_CFG_BEG_VL;
+ cfg_end = ASC_EEP_MAX_DVC_ADDR_VL;
+ } else {
+ cfg_beg = ASC_EEP_DVC_CFG_BEG;
+ cfg_end = ASC_EEP_MAX_DVC_ADDR;
+ }
+ for (s_addr = cfg_beg; s_addr <= (cfg_end - 1); s_addr++, wbuf++) {
+ if (s_addr <= uchar_end_in_config) {
+ /*
+ * Swap all char fields. Must unswap bytes already swapped
+ * by AscReadEEPWord().
+ */
+ word =
+ le16_to_cpu(AscReadEEPWord
+ (iop_base, (uchar)s_addr));
+ } else {
+ /* Don't swap word field at the end - cntl field. */
+ word = AscReadEEPWord(iop_base, (uchar)s_addr);
+ }
+ if (*wbuf != word) {
+ n_error++;
+ }
+ }
+ /* Read checksum; Byte swapping not needed. */
+ if (AscReadEEPWord(iop_base, (uchar)s_addr) != sum) {
+ n_error++;
+ }
+ return n_error;
+}
+
+static int AscSetEEPConfig(PortAddr iop_base, ASCEEP_CONFIG *cfg_buf,
+ ushort bus_type)
+{
+ int retry;
+ int n_error;
+
+ retry = 0;
+ while (TRUE) {
+ if ((n_error = AscSetEEPConfigOnce(iop_base, cfg_buf,
+ bus_type)) == 0) {
+ break;
+ }
+ if (++retry > ASC_EEP_MAX_RETRY) {
+ break;
+ }
+ }
+ return n_error;
+}
+
+static ushort AscInitFromEEP(ASC_DVC_VAR *asc_dvc)
+{
+ ASCEEP_CONFIG eep_config_buf;
+ ASCEEP_CONFIG *eep_config;
+ PortAddr iop_base;
+ ushort chksum;
+ ushort warn_code;
+ ushort cfg_msw, cfg_lsw;
+ int i;
+ int write_eep = 0;
+
+ iop_base = asc_dvc->iop_base;
+ warn_code = 0;
+ AscWriteLramWord(iop_base, ASCV_HALTCODE_W, 0x00FE);
+ AscStopQueueExe(iop_base);
+ if ((AscStopChip(iop_base) == FALSE) ||
+ (AscGetChipScsiCtrl(iop_base) != 0)) {
+ asc_dvc->init_state |= ASC_INIT_RESET_SCSI_DONE;
+ AscResetChipAndScsiBus(asc_dvc);
+ mdelay(asc_dvc->scsi_reset_wait * 1000); /* XXX: msleep? */
+ }
+ if (AscIsChipHalted(iop_base) == FALSE) {
+ asc_dvc->err_code |= ASC_IERR_START_STOP_CHIP;
+ return (warn_code);
+ }
+ AscSetPCAddr(iop_base, ASC_MCODE_START_ADDR);
+ if (AscGetPCAddr(iop_base) != ASC_MCODE_START_ADDR) {
+ asc_dvc->err_code |= ASC_IERR_SET_PC_ADDR;
+ return (warn_code);
+ }
+ eep_config = (ASCEEP_CONFIG *)&eep_config_buf;
+ cfg_msw = AscGetChipCfgMsw(iop_base);
+ cfg_lsw = AscGetChipCfgLsw(iop_base);
+ if ((cfg_msw & ASC_CFG_MSW_CLR_MASK) != 0) {
+ cfg_msw &= ~ASC_CFG_MSW_CLR_MASK;
+ warn_code |= ASC_WARN_CFG_MSW_RECOVER;
+ AscSetChipCfgMsw(iop_base, cfg_msw);
+ }
+ chksum = AscGetEEPConfig(iop_base, eep_config, asc_dvc->bus_type);
+ ASC_DBG(1, "chksum 0x%x\n", chksum);
+ if (chksum == 0) {
+ chksum = 0xaa55;
+ }
+ if (AscGetChipStatus(iop_base) & CSW_AUTO_CONFIG) {
+ warn_code |= ASC_WARN_AUTO_CONFIG;
+ if (asc_dvc->cfg->chip_version == 3) {
+ if (eep_config->cfg_lsw != cfg_lsw) {
+ warn_code |= ASC_WARN_EEPROM_RECOVER;
+ eep_config->cfg_lsw =
+ AscGetChipCfgLsw(iop_base);
+ }
+ if (eep_config->cfg_msw != cfg_msw) {
+ warn_code |= ASC_WARN_EEPROM_RECOVER;
+ eep_config->cfg_msw =
+ AscGetChipCfgMsw(iop_base);
+ }
+ }
+ }
+ eep_config->cfg_msw &= ~ASC_CFG_MSW_CLR_MASK;
+ eep_config->cfg_lsw |= ASC_CFG0_HOST_INT_ON;
+ ASC_DBG(1, "eep_config->chksum 0x%x\n", eep_config->chksum);
+ if (chksum != eep_config->chksum) {
+ if (AscGetChipVersion(iop_base, asc_dvc->bus_type) ==
+ ASC_CHIP_VER_PCI_ULTRA_3050) {
+ ASC_DBG(1, "chksum error ignored; EEPROM-less board\n");
+ eep_config->init_sdtr = 0xFF;
+ eep_config->disc_enable = 0xFF;
+ eep_config->start_motor = 0xFF;
+ eep_config->use_cmd_qng = 0;
+ eep_config->max_total_qng = 0xF0;
+ eep_config->max_tag_qng = 0x20;
+ eep_config->cntl = 0xBFFF;
+ ASC_EEP_SET_CHIP_ID(eep_config, 7);
+ eep_config->no_scam = 0;
+ eep_config->adapter_info[0] = 0;
+ eep_config->adapter_info[1] = 0;
+ eep_config->adapter_info[2] = 0;
+ eep_config->adapter_info[3] = 0;
+ eep_config->adapter_info[4] = 0;
+ /* Indicate EEPROM-less board. */
+ eep_config->adapter_info[5] = 0xBB;
+ } else {
+ ASC_PRINT
+ ("AscInitFromEEP: EEPROM checksum error; Will try to re-write EEPROM.\n");
+ write_eep = 1;
+ warn_code |= ASC_WARN_EEPROM_CHKSUM;
+ }
+ }
+ asc_dvc->cfg->sdtr_enable = eep_config->init_sdtr;
+ asc_dvc->cfg->disc_enable = eep_config->disc_enable;
+ asc_dvc->cfg->cmd_qng_enabled = eep_config->use_cmd_qng;
+ asc_dvc->cfg->isa_dma_speed = ASC_EEP_GET_DMA_SPD(eep_config);
+ asc_dvc->start_motor = eep_config->start_motor;
+ asc_dvc->dvc_cntl = eep_config->cntl;
+ asc_dvc->no_scam = eep_config->no_scam;
+ asc_dvc->cfg->adapter_info[0] = eep_config->adapter_info[0];
+ asc_dvc->cfg->adapter_info[1] = eep_config->adapter_info[1];
+ asc_dvc->cfg->adapter_info[2] = eep_config->adapter_info[2];
+ asc_dvc->cfg->adapter_info[3] = eep_config->adapter_info[3];
+ asc_dvc->cfg->adapter_info[4] = eep_config->adapter_info[4];
+ asc_dvc->cfg->adapter_info[5] = eep_config->adapter_info[5];
+ if (!AscTestExternalLram(asc_dvc)) {
+ if (((asc_dvc->bus_type & ASC_IS_PCI_ULTRA) ==
+ ASC_IS_PCI_ULTRA)) {
+ eep_config->max_total_qng =
+ ASC_MAX_PCI_ULTRA_INRAM_TOTAL_QNG;
+ eep_config->max_tag_qng =
+ ASC_MAX_PCI_ULTRA_INRAM_TAG_QNG;
+ } else {
+ eep_config->cfg_msw |= 0x0800;
+ cfg_msw |= 0x0800;
+ AscSetChipCfgMsw(iop_base, cfg_msw);
+ eep_config->max_total_qng = ASC_MAX_PCI_INRAM_TOTAL_QNG;
+ eep_config->max_tag_qng = ASC_MAX_INRAM_TAG_QNG;
+ }
+ } else {
+ }
+ if (eep_config->max_total_qng < ASC_MIN_TOTAL_QNG) {
+ eep_config->max_total_qng = ASC_MIN_TOTAL_QNG;
+ }
+ if (eep_config->max_total_qng > ASC_MAX_TOTAL_QNG) {
+ eep_config->max_total_qng = ASC_MAX_TOTAL_QNG;
+ }
+ if (eep_config->max_tag_qng > eep_config->max_total_qng) {
+ eep_config->max_tag_qng = eep_config->max_total_qng;
+ }
+ if (eep_config->max_tag_qng < ASC_MIN_TAG_Q_PER_DVC) {
+ eep_config->max_tag_qng = ASC_MIN_TAG_Q_PER_DVC;
+ }
+ asc_dvc->max_total_qng = eep_config->max_total_qng;
+ if ((eep_config->use_cmd_qng & eep_config->disc_enable) !=
+ eep_config->use_cmd_qng) {
+ eep_config->disc_enable = eep_config->use_cmd_qng;
+ warn_code |= ASC_WARN_CMD_QNG_CONFLICT;
+ }
+ ASC_EEP_SET_CHIP_ID(eep_config,
+ ASC_EEP_GET_CHIP_ID(eep_config) & ASC_MAX_TID);
+ asc_dvc->cfg->chip_scsi_id = ASC_EEP_GET_CHIP_ID(eep_config);
+ if (((asc_dvc->bus_type & ASC_IS_PCI_ULTRA) == ASC_IS_PCI_ULTRA) &&
+ !(asc_dvc->dvc_cntl & ASC_CNTL_SDTR_ENABLE_ULTRA)) {
+ asc_dvc->min_sdtr_index = ASC_SDTR_ULTRA_PCI_10MB_INDEX;
+ }
+
+ for (i = 0; i <= ASC_MAX_TID; i++) {
+ asc_dvc->dos_int13_table[i] = eep_config->dos_int13_table[i];
+ asc_dvc->cfg->max_tag_qng[i] = eep_config->max_tag_qng;
+ asc_dvc->cfg->sdtr_period_offset[i] =
+ (uchar)(ASC_DEF_SDTR_OFFSET |
+ (asc_dvc->min_sdtr_index << 4));
+ }
+ eep_config->cfg_msw = AscGetChipCfgMsw(iop_base);
+ if (write_eep) {
+ if ((i = AscSetEEPConfig(iop_base, eep_config,
+ asc_dvc->bus_type)) != 0) {
+ ASC_PRINT1
+ ("AscInitFromEEP: Failed to re-write EEPROM with %d errors.\n",
+ i);
+ } else {
+ ASC_PRINT
+ ("AscInitFromEEP: Successfully re-wrote EEPROM.\n");
+ }
+ }
+ return (warn_code);
+}
+
+static int AscInitGetConfig(struct Scsi_Host *shost)
+{
+ struct asc_board *board = shost_priv(shost);
+ ASC_DVC_VAR *asc_dvc = &board->dvc_var.asc_dvc_var;
+ unsigned short warn_code = 0;
+
+ asc_dvc->init_state = ASC_INIT_STATE_BEG_GET_CFG;
+ if (asc_dvc->err_code != 0)
+ return asc_dvc->err_code;
+
+ if (AscFindSignature(asc_dvc->iop_base)) {
+ warn_code |= AscInitAscDvcVar(asc_dvc);
+ warn_code |= AscInitFromEEP(asc_dvc);
+ asc_dvc->init_state |= ASC_INIT_STATE_END_GET_CFG;
+ if (asc_dvc->scsi_reset_wait > ASC_MAX_SCSI_RESET_WAIT)
+ asc_dvc->scsi_reset_wait = ASC_MAX_SCSI_RESET_WAIT;
+ } else {
+ asc_dvc->err_code = ASC_IERR_BAD_SIGNATURE;
+ }
+
+ switch (warn_code) {
+ case 0: /* No error */
+ break;
+ case ASC_WARN_IO_PORT_ROTATE:
+ shost_printk(KERN_WARNING, shost, "I/O port address "
+ "modified\n");
+ break;
+ case ASC_WARN_AUTO_CONFIG:
+ shost_printk(KERN_WARNING, shost, "I/O port increment switch "
+ "enabled\n");
+ break;
+ case ASC_WARN_EEPROM_CHKSUM:
+ shost_printk(KERN_WARNING, shost, "EEPROM checksum error\n");
+ break;
+ case ASC_WARN_IRQ_MODIFIED:
+ shost_printk(KERN_WARNING, shost, "IRQ modified\n");
+ break;
+ case ASC_WARN_CMD_QNG_CONFLICT:
+ shost_printk(KERN_WARNING, shost, "tag queuing enabled w/o "
+ "disconnects\n");
+ break;
+ default:
+ shost_printk(KERN_WARNING, shost, "unknown warning: 0x%x\n",
+ warn_code);
+ break;
+ }
+
+ if (asc_dvc->err_code != 0)
+ shost_printk(KERN_ERR, shost, "error 0x%x at init_state "
+ "0x%x\n", asc_dvc->err_code, asc_dvc->init_state);
+
+ return asc_dvc->err_code;
+}
+
+static int AscInitSetConfig(struct pci_dev *pdev, struct Scsi_Host *shost)
+{
+ struct asc_board *board = shost_priv(shost);
+ ASC_DVC_VAR *asc_dvc = &board->dvc_var.asc_dvc_var;
+ PortAddr iop_base = asc_dvc->iop_base;
+ unsigned short cfg_msw;
+ unsigned short warn_code = 0;
+
+ asc_dvc->init_state |= ASC_INIT_STATE_BEG_SET_CFG;
+ if (asc_dvc->err_code != 0)
+ return asc_dvc->err_code;
+ if (!AscFindSignature(asc_dvc->iop_base)) {
+ asc_dvc->err_code = ASC_IERR_BAD_SIGNATURE;
+ return asc_dvc->err_code;
+ }
+
+ cfg_msw = AscGetChipCfgMsw(iop_base);
+ if ((cfg_msw & ASC_CFG_MSW_CLR_MASK) != 0) {
+ cfg_msw &= ~ASC_CFG_MSW_CLR_MASK;
+ warn_code |= ASC_WARN_CFG_MSW_RECOVER;
+ AscSetChipCfgMsw(iop_base, cfg_msw);
+ }
+ if ((asc_dvc->cfg->cmd_qng_enabled & asc_dvc->cfg->disc_enable) !=
+ asc_dvc->cfg->cmd_qng_enabled) {
+ asc_dvc->cfg->disc_enable = asc_dvc->cfg->cmd_qng_enabled;
+ warn_code |= ASC_WARN_CMD_QNG_CONFLICT;
+ }
+ if (AscGetChipStatus(iop_base) & CSW_AUTO_CONFIG) {
+ warn_code |= ASC_WARN_AUTO_CONFIG;
+ }
+#ifdef CONFIG_PCI
+ if (asc_dvc->bus_type & ASC_IS_PCI) {
+ cfg_msw &= 0xFFC0;
+ AscSetChipCfgMsw(iop_base, cfg_msw);
+ if ((asc_dvc->bus_type & ASC_IS_PCI_ULTRA) == ASC_IS_PCI_ULTRA) {
+ } else {
+ if ((pdev->device == PCI_DEVICE_ID_ASP_1200A) ||
+ (pdev->device == PCI_DEVICE_ID_ASP_ABP940)) {
+ asc_dvc->bug_fix_cntl |= ASC_BUG_FIX_IF_NOT_DWB;
+ asc_dvc->bug_fix_cntl |=
+ ASC_BUG_FIX_ASYN_USE_SYN;
+ }
+ }
+ } else
+#endif /* CONFIG_PCI */
+ if (asc_dvc->bus_type == ASC_IS_ISAPNP) {
+ if (AscGetChipVersion(iop_base, asc_dvc->bus_type)
+ == ASC_CHIP_VER_ASYN_BUG) {
+ asc_dvc->bug_fix_cntl |= ASC_BUG_FIX_ASYN_USE_SYN;
+ }
+ }
+ if (AscSetChipScsiID(iop_base, asc_dvc->cfg->chip_scsi_id) !=
+ asc_dvc->cfg->chip_scsi_id) {
+ asc_dvc->err_code |= ASC_IERR_SET_SCSI_ID;
+ }
+#ifdef CONFIG_ISA
+ if (asc_dvc->bus_type & ASC_IS_ISA) {
+ AscSetIsaDmaChannel(iop_base, asc_dvc->cfg->isa_dma_channel);
+ AscSetIsaDmaSpeed(iop_base, asc_dvc->cfg->isa_dma_speed);
+ }
+#endif /* CONFIG_ISA */
+
+ asc_dvc->init_state |= ASC_INIT_STATE_END_SET_CFG;
+
+ switch (warn_code) {
+ case 0: /* No error. */
+ break;
+ case ASC_WARN_IO_PORT_ROTATE:
+ shost_printk(KERN_WARNING, shost, "I/O port address "
+ "modified\n");
+ break;
+ case ASC_WARN_AUTO_CONFIG:
+ shost_printk(KERN_WARNING, shost, "I/O port increment switch "
+ "enabled\n");
+ break;
+ case ASC_WARN_EEPROM_CHKSUM:
+ shost_printk(KERN_WARNING, shost, "EEPROM checksum error\n");
+ break;
+ case ASC_WARN_IRQ_MODIFIED:
+ shost_printk(KERN_WARNING, shost, "IRQ modified\n");
+ break;
+ case ASC_WARN_CMD_QNG_CONFLICT:
+ shost_printk(KERN_WARNING, shost, "tag queuing w/o "
+ "disconnects\n");
+ break;
+ default:
+ shost_printk(KERN_WARNING, shost, "unknown warning: 0x%x\n",
+ warn_code);
+ break;
+ }
+
+ if (asc_dvc->err_code != 0)
+ shost_printk(KERN_ERR, shost, "error 0x%x at init_state "
+ "0x%x\n", asc_dvc->err_code, asc_dvc->init_state);
+
+ return asc_dvc->err_code;
+}
+
+/*
+ * EEPROM Configuration.
+ *
+ * All drivers should use this structure to set the default EEPROM
+ * configuration. The BIOS now uses this structure when it is built.
+ * Additional structure information can be found in a_condor.h where
+ * the structure is defined.
+ *
+ * The *_Field_IsChar structs are needed to correct for endianness.
+ * These values are read from the board 16 bits at a time directly
+ * into the structs. Because some fields are char, the values will be
+ * in the wrong order. The *_Field_IsChar tells when to flip the
+ * bytes. Data read and written to PCI memory is automatically swapped
+ * on big-endian platforms so char fields read as words are actually being
+ * unswapped on big-endian platforms.
+ */
+static ADVEEP_3550_CONFIG Default_3550_EEPROM_Config = {
+ ADV_EEPROM_BIOS_ENABLE, /* cfg_lsw */
+ 0x0000, /* cfg_msw */
+ 0xFFFF, /* disc_enable */
+ 0xFFFF, /* wdtr_able */
+ 0xFFFF, /* sdtr_able */
+ 0xFFFF, /* start_motor */
+ 0xFFFF, /* tagqng_able */
+ 0xFFFF, /* bios_scan */
+ 0, /* scam_tolerant */
+ 7, /* adapter_scsi_id */
+ 0, /* bios_boot_delay */
+ 3, /* scsi_reset_delay */
+ 0, /* bios_id_lun */
+ 0, /* termination */
+ 0, /* reserved1 */
+ 0xFFE7, /* bios_ctrl */
+ 0xFFFF, /* ultra_able */
+ 0, /* reserved2 */
+ ASC_DEF_MAX_HOST_QNG, /* max_host_qng */
+ ASC_DEF_MAX_DVC_QNG, /* max_dvc_qng */
+ 0, /* dvc_cntl */
+ 0, /* bug_fix */
+ 0, /* serial_number_word1 */
+ 0, /* serial_number_word2 */
+ 0, /* serial_number_word3 */
+ 0, /* check_sum */
+ {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}
+ , /* oem_name[16] */
+ 0, /* dvc_err_code */
+ 0, /* adv_err_code */
+ 0, /* adv_err_addr */
+ 0, /* saved_dvc_err_code */
+ 0, /* saved_adv_err_code */
+ 0, /* saved_adv_err_addr */
+ 0 /* num_of_err */
+};
+
+static ADVEEP_3550_CONFIG ADVEEP_3550_Config_Field_IsChar = {
+ 0, /* cfg_lsw */
+ 0, /* cfg_msw */
+ 0, /* -disc_enable */
+ 0, /* wdtr_able */
+ 0, /* sdtr_able */
+ 0, /* start_motor */
+ 0, /* tagqng_able */
+ 0, /* bios_scan */
+ 0, /* scam_tolerant */
+ 1, /* adapter_scsi_id */
+ 1, /* bios_boot_delay */
+ 1, /* scsi_reset_delay */
+ 1, /* bios_id_lun */
+ 1, /* termination */
+ 1, /* reserved1 */
+ 0, /* bios_ctrl */
+ 0, /* ultra_able */
+ 0, /* reserved2 */
+ 1, /* max_host_qng */
+ 1, /* max_dvc_qng */
+ 0, /* dvc_cntl */
+ 0, /* bug_fix */
+ 0, /* serial_number_word1 */
+ 0, /* serial_number_word2 */
+ 0, /* serial_number_word3 */
+ 0, /* check_sum */
+ {1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1}
+ , /* oem_name[16] */
+ 0, /* dvc_err_code */
+ 0, /* adv_err_code */
+ 0, /* adv_err_addr */
+ 0, /* saved_dvc_err_code */
+ 0, /* saved_adv_err_code */
+ 0, /* saved_adv_err_addr */
+ 0 /* num_of_err */
+};
+
+static ADVEEP_38C0800_CONFIG Default_38C0800_EEPROM_Config = {
+ ADV_EEPROM_BIOS_ENABLE, /* 00 cfg_lsw */
+ 0x0000, /* 01 cfg_msw */
+ 0xFFFF, /* 02 disc_enable */
+ 0xFFFF, /* 03 wdtr_able */
+ 0x4444, /* 04 sdtr_speed1 */
+ 0xFFFF, /* 05 start_motor */
+ 0xFFFF, /* 06 tagqng_able */
+ 0xFFFF, /* 07 bios_scan */
+ 0, /* 08 scam_tolerant */
+ 7, /* 09 adapter_scsi_id */
+ 0, /* bios_boot_delay */
+ 3, /* 10 scsi_reset_delay */
+ 0, /* bios_id_lun */
+ 0, /* 11 termination_se */
+ 0, /* termination_lvd */
+ 0xFFE7, /* 12 bios_ctrl */
+ 0x4444, /* 13 sdtr_speed2 */
+ 0x4444, /* 14 sdtr_speed3 */
+ ASC_DEF_MAX_HOST_QNG, /* 15 max_host_qng */
+ ASC_DEF_MAX_DVC_QNG, /* max_dvc_qng */
+ 0, /* 16 dvc_cntl */
+ 0x4444, /* 17 sdtr_speed4 */
+ 0, /* 18 serial_number_word1 */
+ 0, /* 19 serial_number_word2 */
+ 0, /* 20 serial_number_word3 */
+ 0, /* 21 check_sum */
+ {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}
+ , /* 22-29 oem_name[16] */
+ 0, /* 30 dvc_err_code */
+ 0, /* 31 adv_err_code */
+ 0, /* 32 adv_err_addr */
+ 0, /* 33 saved_dvc_err_code */
+ 0, /* 34 saved_adv_err_code */
+ 0, /* 35 saved_adv_err_addr */
+ 0, /* 36 reserved */
+ 0, /* 37 reserved */
+ 0, /* 38 reserved */
+ 0, /* 39 reserved */
+ 0, /* 40 reserved */
+ 0, /* 41 reserved */
+ 0, /* 42 reserved */
+ 0, /* 43 reserved */
+ 0, /* 44 reserved */
+ 0, /* 45 reserved */
+ 0, /* 46 reserved */
+ 0, /* 47 reserved */
+ 0, /* 48 reserved */
+ 0, /* 49 reserved */
+ 0, /* 50 reserved */
+ 0, /* 51 reserved */
+ 0, /* 52 reserved */
+ 0, /* 53 reserved */
+ 0, /* 54 reserved */
+ 0, /* 55 reserved */
+ 0, /* 56 cisptr_lsw */
+ 0, /* 57 cisprt_msw */
+ PCI_VENDOR_ID_ASP, /* 58 subsysvid */
+ PCI_DEVICE_ID_38C0800_REV1, /* 59 subsysid */
+ 0, /* 60 reserved */
+ 0, /* 61 reserved */
+ 0, /* 62 reserved */
+ 0 /* 63 reserved */
+};
+
+static ADVEEP_38C0800_CONFIG ADVEEP_38C0800_Config_Field_IsChar = {
+ 0, /* 00 cfg_lsw */
+ 0, /* 01 cfg_msw */
+ 0, /* 02 disc_enable */
+ 0, /* 03 wdtr_able */
+ 0, /* 04 sdtr_speed1 */
+ 0, /* 05 start_motor */
+ 0, /* 06 tagqng_able */
+ 0, /* 07 bios_scan */
+ 0, /* 08 scam_tolerant */
+ 1, /* 09 adapter_scsi_id */
+ 1, /* bios_boot_delay */
+ 1, /* 10 scsi_reset_delay */
+ 1, /* bios_id_lun */
+ 1, /* 11 termination_se */
+ 1, /* termination_lvd */
+ 0, /* 12 bios_ctrl */
+ 0, /* 13 sdtr_speed2 */
+ 0, /* 14 sdtr_speed3 */
+ 1, /* 15 max_host_qng */
+ 1, /* max_dvc_qng */
+ 0, /* 16 dvc_cntl */
+ 0, /* 17 sdtr_speed4 */
+ 0, /* 18 serial_number_word1 */
+ 0, /* 19 serial_number_word2 */
+ 0, /* 20 serial_number_word3 */
+ 0, /* 21 check_sum */
+ {1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1}
+ , /* 22-29 oem_name[16] */
+ 0, /* 30 dvc_err_code */
+ 0, /* 31 adv_err_code */
+ 0, /* 32 adv_err_addr */
+ 0, /* 33 saved_dvc_err_code */
+ 0, /* 34 saved_adv_err_code */
+ 0, /* 35 saved_adv_err_addr */
+ 0, /* 36 reserved */
+ 0, /* 37 reserved */
+ 0, /* 38 reserved */
+ 0, /* 39 reserved */
+ 0, /* 40 reserved */
+ 0, /* 41 reserved */
+ 0, /* 42 reserved */
+ 0, /* 43 reserved */
+ 0, /* 44 reserved */
+ 0, /* 45 reserved */
+ 0, /* 46 reserved */
+ 0, /* 47 reserved */
+ 0, /* 48 reserved */
+ 0, /* 49 reserved */
+ 0, /* 50 reserved */
+ 0, /* 51 reserved */
+ 0, /* 52 reserved */
+ 0, /* 53 reserved */
+ 0, /* 54 reserved */
+ 0, /* 55 reserved */
+ 0, /* 56 cisptr_lsw */
+ 0, /* 57 cisprt_msw */
+ 0, /* 58 subsysvid */
+ 0, /* 59 subsysid */
+ 0, /* 60 reserved */
+ 0, /* 61 reserved */
+ 0, /* 62 reserved */
+ 0 /* 63 reserved */
+};
+
+static ADVEEP_38C1600_CONFIG Default_38C1600_EEPROM_Config = {
+ ADV_EEPROM_BIOS_ENABLE, /* 00 cfg_lsw */
+ 0x0000, /* 01 cfg_msw */
+ 0xFFFF, /* 02 disc_enable */
+ 0xFFFF, /* 03 wdtr_able */
+ 0x5555, /* 04 sdtr_speed1 */
+ 0xFFFF, /* 05 start_motor */
+ 0xFFFF, /* 06 tagqng_able */
+ 0xFFFF, /* 07 bios_scan */
+ 0, /* 08 scam_tolerant */
+ 7, /* 09 adapter_scsi_id */
+ 0, /* bios_boot_delay */
+ 3, /* 10 scsi_reset_delay */
+ 0, /* bios_id_lun */
+ 0, /* 11 termination_se */
+ 0, /* termination_lvd */
+ 0xFFE7, /* 12 bios_ctrl */
+ 0x5555, /* 13 sdtr_speed2 */
+ 0x5555, /* 14 sdtr_speed3 */
+ ASC_DEF_MAX_HOST_QNG, /* 15 max_host_qng */
+ ASC_DEF_MAX_DVC_QNG, /* max_dvc_qng */
+ 0, /* 16 dvc_cntl */
+ 0x5555, /* 17 sdtr_speed4 */
+ 0, /* 18 serial_number_word1 */
+ 0, /* 19 serial_number_word2 */
+ 0, /* 20 serial_number_word3 */
+ 0, /* 21 check_sum */
+ {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}
+ , /* 22-29 oem_name[16] */
+ 0, /* 30 dvc_err_code */
+ 0, /* 31 adv_err_code */
+ 0, /* 32 adv_err_addr */
+ 0, /* 33 saved_dvc_err_code */
+ 0, /* 34 saved_adv_err_code */
+ 0, /* 35 saved_adv_err_addr */
+ 0, /* 36 reserved */
+ 0, /* 37 reserved */
+ 0, /* 38 reserved */
+ 0, /* 39 reserved */
+ 0, /* 40 reserved */
+ 0, /* 41 reserved */
+ 0, /* 42 reserved */
+ 0, /* 43 reserved */
+ 0, /* 44 reserved */
+ 0, /* 45 reserved */
+ 0, /* 46 reserved */
+ 0, /* 47 reserved */
+ 0, /* 48 reserved */
+ 0, /* 49 reserved */
+ 0, /* 50 reserved */
+ 0, /* 51 reserved */
+ 0, /* 52 reserved */
+ 0, /* 53 reserved */
+ 0, /* 54 reserved */
+ 0, /* 55 reserved */
+ 0, /* 56 cisptr_lsw */
+ 0, /* 57 cisprt_msw */
+ PCI_VENDOR_ID_ASP, /* 58 subsysvid */
+ PCI_DEVICE_ID_38C1600_REV1, /* 59 subsysid */
+ 0, /* 60 reserved */
+ 0, /* 61 reserved */
+ 0, /* 62 reserved */
+ 0 /* 63 reserved */
+};
+
+static ADVEEP_38C1600_CONFIG ADVEEP_38C1600_Config_Field_IsChar = {
+ 0, /* 00 cfg_lsw */
+ 0, /* 01 cfg_msw */
+ 0, /* 02 disc_enable */
+ 0, /* 03 wdtr_able */
+ 0, /* 04 sdtr_speed1 */
+ 0, /* 05 start_motor */
+ 0, /* 06 tagqng_able */
+ 0, /* 07 bios_scan */
+ 0, /* 08 scam_tolerant */
+ 1, /* 09 adapter_scsi_id */
+ 1, /* bios_boot_delay */
+ 1, /* 10 scsi_reset_delay */
+ 1, /* bios_id_lun */
+ 1, /* 11 termination_se */
+ 1, /* termination_lvd */
+ 0, /* 12 bios_ctrl */
+ 0, /* 13 sdtr_speed2 */
+ 0, /* 14 sdtr_speed3 */
+ 1, /* 15 max_host_qng */
+ 1, /* max_dvc_qng */
+ 0, /* 16 dvc_cntl */
+ 0, /* 17 sdtr_speed4 */
+ 0, /* 18 serial_number_word1 */
+ 0, /* 19 serial_number_word2 */
+ 0, /* 20 serial_number_word3 */
+ 0, /* 21 check_sum */
+ {1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1}
+ , /* 22-29 oem_name[16] */
+ 0, /* 30 dvc_err_code */
+ 0, /* 31 adv_err_code */
+ 0, /* 32 adv_err_addr */
+ 0, /* 33 saved_dvc_err_code */
+ 0, /* 34 saved_adv_err_code */
+ 0, /* 35 saved_adv_err_addr */
+ 0, /* 36 reserved */
+ 0, /* 37 reserved */
+ 0, /* 38 reserved */
+ 0, /* 39 reserved */
+ 0, /* 40 reserved */
+ 0, /* 41 reserved */
+ 0, /* 42 reserved */
+ 0, /* 43 reserved */
+ 0, /* 44 reserved */
+ 0, /* 45 reserved */
+ 0, /* 46 reserved */
+ 0, /* 47 reserved */
+ 0, /* 48 reserved */
+ 0, /* 49 reserved */
+ 0, /* 50 reserved */
+ 0, /* 51 reserved */
+ 0, /* 52 reserved */
+ 0, /* 53 reserved */
+ 0, /* 54 reserved */
+ 0, /* 55 reserved */
+ 0, /* 56 cisptr_lsw */
+ 0, /* 57 cisprt_msw */
+ 0, /* 58 subsysvid */
+ 0, /* 59 subsysid */
+ 0, /* 60 reserved */
+ 0, /* 61 reserved */
+ 0, /* 62 reserved */
+ 0 /* 63 reserved */
+};
+
+#ifdef CONFIG_PCI
+/*
+ * Wait for EEPROM command to complete
+ */
+static void AdvWaitEEPCmd(AdvPortAddr iop_base)
+{
+ int eep_delay_ms;
+
+ for (eep_delay_ms = 0; eep_delay_ms < ADV_EEP_DELAY_MS; eep_delay_ms++) {
+ if (AdvReadWordRegister(iop_base, IOPW_EE_CMD) &
+ ASC_EEP_CMD_DONE) {
+ break;
+ }
+ mdelay(1);
+ }
+ if ((AdvReadWordRegister(iop_base, IOPW_EE_CMD) & ASC_EEP_CMD_DONE) ==
+ 0)
+ BUG();
+}
+
+/*
+ * Read the EEPROM from specified location
+ */
+static ushort AdvReadEEPWord(AdvPortAddr iop_base, int eep_word_addr)
+{
+ AdvWriteWordRegister(iop_base, IOPW_EE_CMD,
+ ASC_EEP_CMD_READ | eep_word_addr);
+ AdvWaitEEPCmd(iop_base);
+ return AdvReadWordRegister(iop_base, IOPW_EE_DATA);
+}
+
+/*
+ * Write the EEPROM from 'cfg_buf'.
+ */
+static void AdvSet3550EEPConfig(AdvPortAddr iop_base,
+ ADVEEP_3550_CONFIG *cfg_buf)
+{
+ ushort *wbuf;
+ ushort addr, chksum;
+ ushort *charfields;
+
+ wbuf = (ushort *)cfg_buf;
+ charfields = (ushort *)&ADVEEP_3550_Config_Field_IsChar;
+ chksum = 0;
+
+ AdvWriteWordRegister(iop_base, IOPW_EE_CMD, ASC_EEP_CMD_WRITE_ABLE);
+ AdvWaitEEPCmd(iop_base);
+
+ /*
+ * Write EEPROM from word 0 to word 20.
+ */
+ for (addr = ADV_EEP_DVC_CFG_BEGIN;
+ addr < ADV_EEP_DVC_CFG_END; addr++, wbuf++) {
+ ushort word;
+
+ if (*charfields++) {
+ word = cpu_to_le16(*wbuf);
+ } else {
+ word = *wbuf;
+ }
+ chksum += *wbuf; /* Checksum is calculated from word values. */
+ AdvWriteWordRegister(iop_base, IOPW_EE_DATA, word);
+ AdvWriteWordRegister(iop_base, IOPW_EE_CMD,
+ ASC_EEP_CMD_WRITE | addr);
+ AdvWaitEEPCmd(iop_base);
+ mdelay(ADV_EEP_DELAY_MS);
+ }
+
+ /*
+ * Write EEPROM checksum at word 21.
+ */
+ AdvWriteWordRegister(iop_base, IOPW_EE_DATA, chksum);
+ AdvWriteWordRegister(iop_base, IOPW_EE_CMD, ASC_EEP_CMD_WRITE | addr);
+ AdvWaitEEPCmd(iop_base);
+ wbuf++;
+ charfields++;
+
+ /*
+ * Write EEPROM OEM name at words 22 to 29.
+ */
+ for (addr = ADV_EEP_DVC_CTL_BEGIN;
+ addr < ADV_EEP_MAX_WORD_ADDR; addr++, wbuf++) {
+ ushort word;
+
+ if (*charfields++) {
+ word = cpu_to_le16(*wbuf);
+ } else {
+ word = *wbuf;
+ }
+ AdvWriteWordRegister(iop_base, IOPW_EE_DATA, word);
+ AdvWriteWordRegister(iop_base, IOPW_EE_CMD,
+ ASC_EEP_CMD_WRITE | addr);
+ AdvWaitEEPCmd(iop_base);
+ }
+ AdvWriteWordRegister(iop_base, IOPW_EE_CMD, ASC_EEP_CMD_WRITE_DISABLE);
+ AdvWaitEEPCmd(iop_base);
+}
+
+/*
+ * Write the EEPROM from 'cfg_buf'.
+ */
+static void AdvSet38C0800EEPConfig(AdvPortAddr iop_base,
+ ADVEEP_38C0800_CONFIG *cfg_buf)
+{
+ ushort *wbuf;
+ ushort *charfields;
+ ushort addr, chksum;
+
+ wbuf = (ushort *)cfg_buf;
+ charfields = (ushort *)&ADVEEP_38C0800_Config_Field_IsChar;
+ chksum = 0;
+
+ AdvWriteWordRegister(iop_base, IOPW_EE_CMD, ASC_EEP_CMD_WRITE_ABLE);
+ AdvWaitEEPCmd(iop_base);
+
+ /*
+ * Write EEPROM from word 0 to word 20.
+ */
+ for (addr = ADV_EEP_DVC_CFG_BEGIN;
+ addr < ADV_EEP_DVC_CFG_END; addr++, wbuf++) {
+ ushort word;
+
+ if (*charfields++) {
+ word = cpu_to_le16(*wbuf);
+ } else {
+ word = *wbuf;
+ }
+ chksum += *wbuf; /* Checksum is calculated from word values. */
+ AdvWriteWordRegister(iop_base, IOPW_EE_DATA, word);
+ AdvWriteWordRegister(iop_base, IOPW_EE_CMD,
+ ASC_EEP_CMD_WRITE | addr);
+ AdvWaitEEPCmd(iop_base);
+ mdelay(ADV_EEP_DELAY_MS);
+ }
+
+ /*
+ * Write EEPROM checksum at word 21.
+ */
+ AdvWriteWordRegister(iop_base, IOPW_EE_DATA, chksum);
+ AdvWriteWordRegister(iop_base, IOPW_EE_CMD, ASC_EEP_CMD_WRITE | addr);
+ AdvWaitEEPCmd(iop_base);
+ wbuf++;
+ charfields++;
+
+ /*
+ * Write EEPROM OEM name at words 22 to 29.
+ */
+ for (addr = ADV_EEP_DVC_CTL_BEGIN;
+ addr < ADV_EEP_MAX_WORD_ADDR; addr++, wbuf++) {
+ ushort word;
+
+ if (*charfields++) {
+ word = cpu_to_le16(*wbuf);
+ } else {
+ word = *wbuf;
+ }
+ AdvWriteWordRegister(iop_base, IOPW_EE_DATA, word);
+ AdvWriteWordRegister(iop_base, IOPW_EE_CMD,
+ ASC_EEP_CMD_WRITE | addr);
+ AdvWaitEEPCmd(iop_base);
+ }
+ AdvWriteWordRegister(iop_base, IOPW_EE_CMD, ASC_EEP_CMD_WRITE_DISABLE);
+ AdvWaitEEPCmd(iop_base);
+}
+
+/*
+ * Write the EEPROM from 'cfg_buf'.
+ */
+static void AdvSet38C1600EEPConfig(AdvPortAddr iop_base,
+ ADVEEP_38C1600_CONFIG *cfg_buf)
+{
+ ushort *wbuf;
+ ushort *charfields;
+ ushort addr, chksum;
+
+ wbuf = (ushort *)cfg_buf;
+ charfields = (ushort *)&ADVEEP_38C1600_Config_Field_IsChar;
+ chksum = 0;
+
+ AdvWriteWordRegister(iop_base, IOPW_EE_CMD, ASC_EEP_CMD_WRITE_ABLE);
+ AdvWaitEEPCmd(iop_base);
+
+ /*
+ * Write EEPROM from word 0 to word 20.
+ */
+ for (addr = ADV_EEP_DVC_CFG_BEGIN;
+ addr < ADV_EEP_DVC_CFG_END; addr++, wbuf++) {
+ ushort word;
+
+ if (*charfields++) {
+ word = cpu_to_le16(*wbuf);
+ } else {
+ word = *wbuf;
+ }
+ chksum += *wbuf; /* Checksum is calculated from word values. */
+ AdvWriteWordRegister(iop_base, IOPW_EE_DATA, word);
+ AdvWriteWordRegister(iop_base, IOPW_EE_CMD,
+ ASC_EEP_CMD_WRITE | addr);
+ AdvWaitEEPCmd(iop_base);
+ mdelay(ADV_EEP_DELAY_MS);
+ }
+
+ /*
+ * Write EEPROM checksum at word 21.
+ */
+ AdvWriteWordRegister(iop_base, IOPW_EE_DATA, chksum);
+ AdvWriteWordRegister(iop_base, IOPW_EE_CMD, ASC_EEP_CMD_WRITE | addr);
+ AdvWaitEEPCmd(iop_base);
+ wbuf++;
+ charfields++;
+
+ /*
+ * Write EEPROM OEM name at words 22 to 29.
+ */
+ for (addr = ADV_EEP_DVC_CTL_BEGIN;
+ addr < ADV_EEP_MAX_WORD_ADDR; addr++, wbuf++) {
+ ushort word;
+
+ if (*charfields++) {
+ word = cpu_to_le16(*wbuf);
+ } else {
+ word = *wbuf;
+ }
+ AdvWriteWordRegister(iop_base, IOPW_EE_DATA, word);
+ AdvWriteWordRegister(iop_base, IOPW_EE_CMD,
+ ASC_EEP_CMD_WRITE | addr);
+ AdvWaitEEPCmd(iop_base);
+ }
+ AdvWriteWordRegister(iop_base, IOPW_EE_CMD, ASC_EEP_CMD_WRITE_DISABLE);
+ AdvWaitEEPCmd(iop_base);
+}
+
+/*
+ * Read EEPROM configuration into the specified buffer.
+ *
+ * Return a checksum based on the EEPROM configuration read.
+ */
+static ushort AdvGet3550EEPConfig(AdvPortAddr iop_base,
+ ADVEEP_3550_CONFIG *cfg_buf)
+{
+ ushort wval, chksum;
+ ushort *wbuf;
+ int eep_addr;
+ ushort *charfields;
+
+ charfields = (ushort *)&ADVEEP_3550_Config_Field_IsChar;
+ wbuf = (ushort *)cfg_buf;
+ chksum = 0;
+
+ for (eep_addr = ADV_EEP_DVC_CFG_BEGIN;
+ eep_addr < ADV_EEP_DVC_CFG_END; eep_addr++, wbuf++) {
+ wval = AdvReadEEPWord(iop_base, eep_addr);
+ chksum += wval; /* Checksum is calculated from word values. */
+ if (*charfields++) {
+ *wbuf = le16_to_cpu(wval);
+ } else {
+ *wbuf = wval;
+ }
+ }
+ /* Read checksum word. */
+ *wbuf = AdvReadEEPWord(iop_base, eep_addr);
+ wbuf++;
+ charfields++;
+
+ /* Read rest of EEPROM not covered by the checksum. */
+ for (eep_addr = ADV_EEP_DVC_CTL_BEGIN;
+ eep_addr < ADV_EEP_MAX_WORD_ADDR; eep_addr++, wbuf++) {
+ *wbuf = AdvReadEEPWord(iop_base, eep_addr);
+ if (*charfields++) {
+ *wbuf = le16_to_cpu(*wbuf);
+ }
+ }
+ return chksum;
+}
+
+/*
+ * Read EEPROM configuration into the specified buffer.
+ *
+ * Return a checksum based on the EEPROM configuration read.
+ */
+static ushort AdvGet38C0800EEPConfig(AdvPortAddr iop_base,
+ ADVEEP_38C0800_CONFIG *cfg_buf)
+{
+ ushort wval, chksum;
+ ushort *wbuf;
+ int eep_addr;
+ ushort *charfields;
+
+ charfields = (ushort *)&ADVEEP_38C0800_Config_Field_IsChar;
+ wbuf = (ushort *)cfg_buf;
+ chksum = 0;
+
+ for (eep_addr = ADV_EEP_DVC_CFG_BEGIN;
+ eep_addr < ADV_EEP_DVC_CFG_END; eep_addr++, wbuf++) {
+ wval = AdvReadEEPWord(iop_base, eep_addr);
+ chksum += wval; /* Checksum is calculated from word values. */
+ if (*charfields++) {
+ *wbuf = le16_to_cpu(wval);
+ } else {
+ *wbuf = wval;
+ }
+ }
+ /* Read checksum word. */
+ *wbuf = AdvReadEEPWord(iop_base, eep_addr);
+ wbuf++;
+ charfields++;
+
+ /* Read rest of EEPROM not covered by the checksum. */
+ for (eep_addr = ADV_EEP_DVC_CTL_BEGIN;
+ eep_addr < ADV_EEP_MAX_WORD_ADDR; eep_addr++, wbuf++) {
+ *wbuf = AdvReadEEPWord(iop_base, eep_addr);
+ if (*charfields++) {
+ *wbuf = le16_to_cpu(*wbuf);
+ }
+ }
+ return chksum;
+}
+
+/*
+ * Read EEPROM configuration into the specified buffer.
+ *
+ * Return a checksum based on the EEPROM configuration read.
+ */
+static ushort AdvGet38C1600EEPConfig(AdvPortAddr iop_base,
+ ADVEEP_38C1600_CONFIG *cfg_buf)
+{
+ ushort wval, chksum;
+ ushort *wbuf;
+ int eep_addr;
+ ushort *charfields;
+
+ charfields = (ushort *)&ADVEEP_38C1600_Config_Field_IsChar;
+ wbuf = (ushort *)cfg_buf;
+ chksum = 0;
+
+ for (eep_addr = ADV_EEP_DVC_CFG_BEGIN;
+ eep_addr < ADV_EEP_DVC_CFG_END; eep_addr++, wbuf++) {
+ wval = AdvReadEEPWord(iop_base, eep_addr);
+ chksum += wval; /* Checksum is calculated from word values. */
+ if (*charfields++) {
+ *wbuf = le16_to_cpu(wval);
+ } else {
+ *wbuf = wval;
+ }
+ }
+ /* Read checksum word. */
+ *wbuf = AdvReadEEPWord(iop_base, eep_addr);
+ wbuf++;
+ charfields++;
+
+ /* Read rest of EEPROM not covered by the checksum. */
+ for (eep_addr = ADV_EEP_DVC_CTL_BEGIN;
+ eep_addr < ADV_EEP_MAX_WORD_ADDR; eep_addr++, wbuf++) {
+ *wbuf = AdvReadEEPWord(iop_base, eep_addr);
+ if (*charfields++) {
+ *wbuf = le16_to_cpu(*wbuf);
+ }
+ }
+ return chksum;
+}
+
+/*
+ * Read the board's EEPROM configuration. Set fields in ADV_DVC_VAR and
+ * ADV_DVC_CFG based on the EEPROM settings. The chip is stopped while
+ * all of this is done.
+ *
+ * On failure set the ADV_DVC_VAR field 'err_code' and return ADV_ERROR.
+ *
+ * For a non-fatal error return a warning code. If there are no warnings
+ * then 0 is returned.
+ *
+ * Note: Chip is stopped on entry.
+ */
+static int AdvInitFrom3550EEP(ADV_DVC_VAR *asc_dvc)
+{
+ AdvPortAddr iop_base;
+ ushort warn_code;
+ ADVEEP_3550_CONFIG eep_config;
+
+ iop_base = asc_dvc->iop_base;
+
+ warn_code = 0;
+
+ /*
+ * Read the board's EEPROM configuration.
+ *
+ * Set default values if a bad checksum is found.
+ */
+ if (AdvGet3550EEPConfig(iop_base, &eep_config) != eep_config.check_sum) {
+ warn_code |= ASC_WARN_EEPROM_CHKSUM;
+
+ /*
+ * Set EEPROM default values.
+ */
+ memcpy(&eep_config, &Default_3550_EEPROM_Config,
+ sizeof(ADVEEP_3550_CONFIG));
+
+ /*
+ * Assume the 6 byte board serial number that was read from
+ * EEPROM is correct even if the EEPROM checksum failed.
+ */
+ eep_config.serial_number_word3 =
+ AdvReadEEPWord(iop_base, ADV_EEP_DVC_CFG_END - 1);
+
+ eep_config.serial_number_word2 =
+ AdvReadEEPWord(iop_base, ADV_EEP_DVC_CFG_END - 2);
+
+ eep_config.serial_number_word1 =
+ AdvReadEEPWord(iop_base, ADV_EEP_DVC_CFG_END - 3);
+
+ AdvSet3550EEPConfig(iop_base, &eep_config);
+ }
+ /*
+ * Set ASC_DVC_VAR and ASC_DVC_CFG variables from the
+ * EEPROM configuration that was read.
+ *
+ * This is the mapping of EEPROM fields to Adv Library fields.
+ */
+ asc_dvc->wdtr_able = eep_config.wdtr_able;
+ asc_dvc->sdtr_able = eep_config.sdtr_able;
+ asc_dvc->ultra_able = eep_config.ultra_able;
+ asc_dvc->tagqng_able = eep_config.tagqng_able;
+ asc_dvc->cfg->disc_enable = eep_config.disc_enable;
+ asc_dvc->max_host_qng = eep_config.max_host_qng;
+ asc_dvc->max_dvc_qng = eep_config.max_dvc_qng;
+ asc_dvc->chip_scsi_id = (eep_config.adapter_scsi_id & ADV_MAX_TID);
+ asc_dvc->start_motor = eep_config.start_motor;
+ asc_dvc->scsi_reset_wait = eep_config.scsi_reset_delay;
+ asc_dvc->bios_ctrl = eep_config.bios_ctrl;
+ asc_dvc->no_scam = eep_config.scam_tolerant;
+ asc_dvc->cfg->serial1 = eep_config.serial_number_word1;
+ asc_dvc->cfg->serial2 = eep_config.serial_number_word2;
+ asc_dvc->cfg->serial3 = eep_config.serial_number_word3;
+
+ /*
+ * Set the host maximum queuing (max. 253, min. 16) and the per device
+ * maximum queuing (max. 63, min. 4).
+ */
+ if (eep_config.max_host_qng > ASC_DEF_MAX_HOST_QNG) {
+ eep_config.max_host_qng = ASC_DEF_MAX_HOST_QNG;
+ } else if (eep_config.max_host_qng < ASC_DEF_MIN_HOST_QNG) {
+ /* If the value is zero, assume it is uninitialized. */
+ if (eep_config.max_host_qng == 0) {
+ eep_config.max_host_qng = ASC_DEF_MAX_HOST_QNG;
+ } else {
+ eep_config.max_host_qng = ASC_DEF_MIN_HOST_QNG;
+ }
+ }
+
+ if (eep_config.max_dvc_qng > ASC_DEF_MAX_DVC_QNG) {
+ eep_config.max_dvc_qng = ASC_DEF_MAX_DVC_QNG;
+ } else if (eep_config.max_dvc_qng < ASC_DEF_MIN_DVC_QNG) {
+ /* If the value is zero, assume it is uninitialized. */
+ if (eep_config.max_dvc_qng == 0) {
+ eep_config.max_dvc_qng = ASC_DEF_MAX_DVC_QNG;
+ } else {
+ eep_config.max_dvc_qng = ASC_DEF_MIN_DVC_QNG;
+ }
+ }
+
+ /*
+ * If 'max_dvc_qng' is greater than 'max_host_qng', then
+ * set 'max_dvc_qng' to 'max_host_qng'.
+ */
+ if (eep_config.max_dvc_qng > eep_config.max_host_qng) {
+ eep_config.max_dvc_qng = eep_config.max_host_qng;
+ }
+
+ /*
+ * Set ADV_DVC_VAR 'max_host_qng' and ADV_DVC_VAR 'max_dvc_qng'
+ * values based on possibly adjusted EEPROM values.
+ */
+ asc_dvc->max_host_qng = eep_config.max_host_qng;
+ asc_dvc->max_dvc_qng = eep_config.max_dvc_qng;
+
+ /*
+ * If the EEPROM 'termination' field is set to automatic (0), then set
+ * the ADV_DVC_CFG 'termination' field to automatic also.
+ *
+ * If the termination is specified with a non-zero 'termination'
+ * value check that a legal value is set and set the ADV_DVC_CFG
+ * 'termination' field appropriately.
+ */
+ if (eep_config.termination == 0) {
+ asc_dvc->cfg->termination = 0; /* auto termination */
+ } else {
+ /* Enable manual control with low off / high off. */
+ if (eep_config.termination == 1) {
+ asc_dvc->cfg->termination = TERM_CTL_SEL;
+
+ /* Enable manual control with low off / high on. */
+ } else if (eep_config.termination == 2) {
+ asc_dvc->cfg->termination = TERM_CTL_SEL | TERM_CTL_H;
+
+ /* Enable manual control with low on / high on. */
+ } else if (eep_config.termination == 3) {
+ asc_dvc->cfg->termination =
+ TERM_CTL_SEL | TERM_CTL_H | TERM_CTL_L;
+ } else {
+ /*
+ * The EEPROM 'termination' field contains a bad value. Use
+ * automatic termination instead.
+ */
+ asc_dvc->cfg->termination = 0;
+ warn_code |= ASC_WARN_EEPROM_TERMINATION;
+ }
+ }
+
+ return warn_code;
+}
+
+/*
+ * Read the board's EEPROM configuration. Set fields in ADV_DVC_VAR and
+ * ADV_DVC_CFG based on the EEPROM settings. The chip is stopped while
+ * all of this is done.
+ *
+ * On failure set the ADV_DVC_VAR field 'err_code' and return ADV_ERROR.
+ *
+ * For a non-fatal error return a warning code. If there are no warnings
+ * then 0 is returned.
+ *
+ * Note: Chip is stopped on entry.
+ */
+static int AdvInitFrom38C0800EEP(ADV_DVC_VAR *asc_dvc)
+{
+ AdvPortAddr iop_base;
+ ushort warn_code;
+ ADVEEP_38C0800_CONFIG eep_config;
+ uchar tid, termination;
+ ushort sdtr_speed = 0;
+
+ iop_base = asc_dvc->iop_base;
+
+ warn_code = 0;
+
+ /*
+ * Read the board's EEPROM configuration.
+ *
+ * Set default values if a bad checksum is found.
+ */
+ if (AdvGet38C0800EEPConfig(iop_base, &eep_config) !=
+ eep_config.check_sum) {
+ warn_code |= ASC_WARN_EEPROM_CHKSUM;
+
+ /*
+ * Set EEPROM default values.
+ */
+ memcpy(&eep_config, &Default_38C0800_EEPROM_Config,
+ sizeof(ADVEEP_38C0800_CONFIG));
+
+ /*
+ * Assume the 6 byte board serial number that was read from
+ * EEPROM is correct even if the EEPROM checksum failed.
+ */
+ eep_config.serial_number_word3 =
+ AdvReadEEPWord(iop_base, ADV_EEP_DVC_CFG_END - 1);
+
+ eep_config.serial_number_word2 =
+ AdvReadEEPWord(iop_base, ADV_EEP_DVC_CFG_END - 2);
+
+ eep_config.serial_number_word1 =
+ AdvReadEEPWord(iop_base, ADV_EEP_DVC_CFG_END - 3);
+
+ AdvSet38C0800EEPConfig(iop_base, &eep_config);
+ }
+ /*
+ * Set ADV_DVC_VAR and ADV_DVC_CFG variables from the
+ * EEPROM configuration that was read.
+ *
+ * This is the mapping of EEPROM fields to Adv Library fields.
+ */
+ asc_dvc->wdtr_able = eep_config.wdtr_able;
+ asc_dvc->sdtr_speed1 = eep_config.sdtr_speed1;
+ asc_dvc->sdtr_speed2 = eep_config.sdtr_speed2;
+ asc_dvc->sdtr_speed3 = eep_config.sdtr_speed3;
+ asc_dvc->sdtr_speed4 = eep_config.sdtr_speed4;
+ asc_dvc->tagqng_able = eep_config.tagqng_able;
+ asc_dvc->cfg->disc_enable = eep_config.disc_enable;
+ asc_dvc->max_host_qng = eep_config.max_host_qng;
+ asc_dvc->max_dvc_qng = eep_config.max_dvc_qng;
+ asc_dvc->chip_scsi_id = (eep_config.adapter_scsi_id & ADV_MAX_TID);
+ asc_dvc->start_motor = eep_config.start_motor;
+ asc_dvc->scsi_reset_wait = eep_config.scsi_reset_delay;
+ asc_dvc->bios_ctrl = eep_config.bios_ctrl;
+ asc_dvc->no_scam = eep_config.scam_tolerant;
+ asc_dvc->cfg->serial1 = eep_config.serial_number_word1;
+ asc_dvc->cfg->serial2 = eep_config.serial_number_word2;
+ asc_dvc->cfg->serial3 = eep_config.serial_number_word3;
+
+ /*
+ * For every Target ID if any of its 'sdtr_speed[1234]' bits
+ * are set, then set an 'sdtr_able' bit for it.
+ */
+ asc_dvc->sdtr_able = 0;
+ for (tid = 0; tid <= ADV_MAX_TID; tid++) {
+ if (tid == 0) {
+ sdtr_speed = asc_dvc->sdtr_speed1;
+ } else if (tid == 4) {
+ sdtr_speed = asc_dvc->sdtr_speed2;
+ } else if (tid == 8) {
+ sdtr_speed = asc_dvc->sdtr_speed3;
+ } else if (tid == 12) {
+ sdtr_speed = asc_dvc->sdtr_speed4;
+ }
+ if (sdtr_speed & ADV_MAX_TID) {
+ asc_dvc->sdtr_able |= (1 << tid);
+ }
+ sdtr_speed >>= 4;
+ }
+
+ /*
+ * Set the host maximum queuing (max. 253, min. 16) and the per device
+ * maximum queuing (max. 63, min. 4).
+ */
+ if (eep_config.max_host_qng > ASC_DEF_MAX_HOST_QNG) {
+ eep_config.max_host_qng = ASC_DEF_MAX_HOST_QNG;
+ } else if (eep_config.max_host_qng < ASC_DEF_MIN_HOST_QNG) {
+ /* If the value is zero, assume it is uninitialized. */
+ if (eep_config.max_host_qng == 0) {
+ eep_config.max_host_qng = ASC_DEF_MAX_HOST_QNG;
+ } else {
+ eep_config.max_host_qng = ASC_DEF_MIN_HOST_QNG;
+ }
+ }
+
+ if (eep_config.max_dvc_qng > ASC_DEF_MAX_DVC_QNG) {
+ eep_config.max_dvc_qng = ASC_DEF_MAX_DVC_QNG;
+ } else if (eep_config.max_dvc_qng < ASC_DEF_MIN_DVC_QNG) {
+ /* If the value is zero, assume it is uninitialized. */
+ if (eep_config.max_dvc_qng == 0) {
+ eep_config.max_dvc_qng = ASC_DEF_MAX_DVC_QNG;
+ } else {
+ eep_config.max_dvc_qng = ASC_DEF_MIN_DVC_QNG;
+ }
+ }
+
+ /*
+ * If 'max_dvc_qng' is greater than 'max_host_qng', then
+ * set 'max_dvc_qng' to 'max_host_qng'.
+ */
+ if (eep_config.max_dvc_qng > eep_config.max_host_qng) {
+ eep_config.max_dvc_qng = eep_config.max_host_qng;
+ }
+
+ /*
+ * Set ADV_DVC_VAR 'max_host_qng' and ADV_DVC_VAR 'max_dvc_qng'
+ * values based on possibly adjusted EEPROM values.
+ */
+ asc_dvc->max_host_qng = eep_config.max_host_qng;
+ asc_dvc->max_dvc_qng = eep_config.max_dvc_qng;
+
+ /*
+ * If the EEPROM 'termination' field is set to automatic (0), then set
+ * the ADV_DVC_CFG 'termination' field to automatic also.
+ *
+ * If the termination is specified with a non-zero 'termination'
+ * value check that a legal value is set and set the ADV_DVC_CFG
+ * 'termination' field appropriately.
+ */
+ if (eep_config.termination_se == 0) {
+ termination = 0; /* auto termination for SE */
+ } else {
+ /* Enable manual control with low off / high off. */
+ if (eep_config.termination_se == 1) {
+ termination = 0;
+
+ /* Enable manual control with low off / high on. */
+ } else if (eep_config.termination_se == 2) {
+ termination = TERM_SE_HI;
+
+ /* Enable manual control with low on / high on. */
+ } else if (eep_config.termination_se == 3) {
+ termination = TERM_SE;
+ } else {
+ /*
+ * The EEPROM 'termination_se' field contains a bad value.
+ * Use automatic termination instead.
+ */
+ termination = 0;
+ warn_code |= ASC_WARN_EEPROM_TERMINATION;
+ }
+ }
+
+ if (eep_config.termination_lvd == 0) {
+ asc_dvc->cfg->termination = termination; /* auto termination for LVD */
+ } else {
+ /* Enable manual control with low off / high off. */
+ if (eep_config.termination_lvd == 1) {
+ asc_dvc->cfg->termination = termination;
+
+ /* Enable manual control with low off / high on. */
+ } else if (eep_config.termination_lvd == 2) {
+ asc_dvc->cfg->termination = termination | TERM_LVD_HI;
+
+ /* Enable manual control with low on / high on. */
+ } else if (eep_config.termination_lvd == 3) {
+ asc_dvc->cfg->termination = termination | TERM_LVD;
+ } else {
+ /*
+ * The EEPROM 'termination_lvd' field contains a bad value.
+ * Use automatic termination instead.
+ */
+ asc_dvc->cfg->termination = termination;
+ warn_code |= ASC_WARN_EEPROM_TERMINATION;
+ }
+ }
+
+ return warn_code;
+}
+
+/*
+ * Read the board's EEPROM configuration. Set fields in ASC_DVC_VAR and
+ * ASC_DVC_CFG based on the EEPROM settings. The chip is stopped while
+ * all of this is done.
+ *
+ * On failure set the ASC_DVC_VAR field 'err_code' and return ADV_ERROR.
+ *
+ * For a non-fatal error return a warning code. If there are no warnings
+ * then 0 is returned.
+ *
+ * Note: Chip is stopped on entry.
+ */
+static int AdvInitFrom38C1600EEP(ADV_DVC_VAR *asc_dvc)
+{
+ AdvPortAddr iop_base;
+ ushort warn_code;
+ ADVEEP_38C1600_CONFIG eep_config;
+ uchar tid, termination;
+ ushort sdtr_speed = 0;
+
+ iop_base = asc_dvc->iop_base;
+
+ warn_code = 0;
+
+ /*
+ * Read the board's EEPROM configuration.
+ *
+ * Set default values if a bad checksum is found.
+ */
+ if (AdvGet38C1600EEPConfig(iop_base, &eep_config) !=
+ eep_config.check_sum) {
+ struct pci_dev *pdev = adv_dvc_to_pdev(asc_dvc);
+ warn_code |= ASC_WARN_EEPROM_CHKSUM;
+
+ /*
+ * Set EEPROM default values.
+ */
+ memcpy(&eep_config, &Default_38C1600_EEPROM_Config,
+ sizeof(ADVEEP_38C1600_CONFIG));
+
+ if (PCI_FUNC(pdev->devfn) != 0) {
+ u8 ints;
+ /*
+ * Disable Bit 14 (BIOS_ENABLE) to fix SPARC Ultra 60
+ * and old Mac system booting problem. The Expansion
+ * ROM must be disabled in Function 1 for these systems
+ */
+ eep_config.cfg_lsw &= ~ADV_EEPROM_BIOS_ENABLE;
+ /*
+ * Clear the INTAB (bit 11) if the GPIO 0 input
+ * indicates the Function 1 interrupt line is wired
+ * to INTB.
+ *
+ * Set/Clear Bit 11 (INTAB) from the GPIO bit 0 input:
+ * 1 - Function 1 interrupt line wired to INT A.
+ * 0 - Function 1 interrupt line wired to INT B.
+ *
+ * Note: Function 0 is always wired to INTA.
+ * Put all 5 GPIO bits in input mode and then read
+ * their input values.
+ */
+ AdvWriteByteRegister(iop_base, IOPB_GPIO_CNTL, 0);
+ ints = AdvReadByteRegister(iop_base, IOPB_GPIO_DATA);
+ if ((ints & 0x01) == 0)
+ eep_config.cfg_lsw &= ~ADV_EEPROM_INTAB;
+ }
+
+ /*
+ * Assume the 6 byte board serial number that was read from
+ * EEPROM is correct even if the EEPROM checksum failed.
+ */
+ eep_config.serial_number_word3 =
+ AdvReadEEPWord(iop_base, ADV_EEP_DVC_CFG_END - 1);
+ eep_config.serial_number_word2 =
+ AdvReadEEPWord(iop_base, ADV_EEP_DVC_CFG_END - 2);
+ eep_config.serial_number_word1 =
+ AdvReadEEPWord(iop_base, ADV_EEP_DVC_CFG_END - 3);
+
+ AdvSet38C1600EEPConfig(iop_base, &eep_config);
+ }
+
+ /*
+ * Set ASC_DVC_VAR and ASC_DVC_CFG variables from the
+ * EEPROM configuration that was read.
+ *
+ * This is the mapping of EEPROM fields to Adv Library fields.
+ */
+ asc_dvc->wdtr_able = eep_config.wdtr_able;
+ asc_dvc->sdtr_speed1 = eep_config.sdtr_speed1;
+ asc_dvc->sdtr_speed2 = eep_config.sdtr_speed2;
+ asc_dvc->sdtr_speed3 = eep_config.sdtr_speed3;
+ asc_dvc->sdtr_speed4 = eep_config.sdtr_speed4;
+ asc_dvc->ppr_able = 0;
+ asc_dvc->tagqng_able = eep_config.tagqng_able;
+ asc_dvc->cfg->disc_enable = eep_config.disc_enable;
+ asc_dvc->max_host_qng = eep_config.max_host_qng;
+ asc_dvc->max_dvc_qng = eep_config.max_dvc_qng;
+ asc_dvc->chip_scsi_id = (eep_config.adapter_scsi_id & ASC_MAX_TID);
+ asc_dvc->start_motor = eep_config.start_motor;
+ asc_dvc->scsi_reset_wait = eep_config.scsi_reset_delay;
+ asc_dvc->bios_ctrl = eep_config.bios_ctrl;
+ asc_dvc->no_scam = eep_config.scam_tolerant;
+
+ /*
+ * For every Target ID if any of its 'sdtr_speed[1234]' bits
+ * are set, then set an 'sdtr_able' bit for it.
+ */
+ asc_dvc->sdtr_able = 0;
+ for (tid = 0; tid <= ASC_MAX_TID; tid++) {
+ if (tid == 0) {
+ sdtr_speed = asc_dvc->sdtr_speed1;
+ } else if (tid == 4) {
+ sdtr_speed = asc_dvc->sdtr_speed2;
+ } else if (tid == 8) {
+ sdtr_speed = asc_dvc->sdtr_speed3;
+ } else if (tid == 12) {
+ sdtr_speed = asc_dvc->sdtr_speed4;
+ }
+ if (sdtr_speed & ASC_MAX_TID) {
+ asc_dvc->sdtr_able |= (1 << tid);
+ }
+ sdtr_speed >>= 4;
+ }
+
+ /*
+ * Set the host maximum queuing (max. 253, min. 16) and the per device
+ * maximum queuing (max. 63, min. 4).
+ */
+ if (eep_config.max_host_qng > ASC_DEF_MAX_HOST_QNG) {
+ eep_config.max_host_qng = ASC_DEF_MAX_HOST_QNG;
+ } else if (eep_config.max_host_qng < ASC_DEF_MIN_HOST_QNG) {
+ /* If the value is zero, assume it is uninitialized. */
+ if (eep_config.max_host_qng == 0) {
+ eep_config.max_host_qng = ASC_DEF_MAX_HOST_QNG;
+ } else {
+ eep_config.max_host_qng = ASC_DEF_MIN_HOST_QNG;
+ }
+ }
+
+ if (eep_config.max_dvc_qng > ASC_DEF_MAX_DVC_QNG) {
+ eep_config.max_dvc_qng = ASC_DEF_MAX_DVC_QNG;
+ } else if (eep_config.max_dvc_qng < ASC_DEF_MIN_DVC_QNG) {
+ /* If the value is zero, assume it is uninitialized. */
+ if (eep_config.max_dvc_qng == 0) {
+ eep_config.max_dvc_qng = ASC_DEF_MAX_DVC_QNG;
+ } else {
+ eep_config.max_dvc_qng = ASC_DEF_MIN_DVC_QNG;
+ }
+ }
+
+ /*
+ * If 'max_dvc_qng' is greater than 'max_host_qng', then
+ * set 'max_dvc_qng' to 'max_host_qng'.
+ */
+ if (eep_config.max_dvc_qng > eep_config.max_host_qng) {
+ eep_config.max_dvc_qng = eep_config.max_host_qng;
+ }
+
+ /*
+ * Set ASC_DVC_VAR 'max_host_qng' and ASC_DVC_VAR 'max_dvc_qng'
+ * values based on possibly adjusted EEPROM values.
+ */
+ asc_dvc->max_host_qng = eep_config.max_host_qng;
+ asc_dvc->max_dvc_qng = eep_config.max_dvc_qng;
+
+ /*
+ * If the EEPROM 'termination' field is set to automatic (0), then set
+ * the ASC_DVC_CFG 'termination' field to automatic also.
+ *
+ * If the termination is specified with a non-zero 'termination'
+ * value check that a legal value is set and set the ASC_DVC_CFG
+ * 'termination' field appropriately.
+ */
+ if (eep_config.termination_se == 0) {
+ termination = 0; /* auto termination for SE */
+ } else {
+ /* Enable manual control with low off / high off. */
+ if (eep_config.termination_se == 1) {
+ termination = 0;
+
+ /* Enable manual control with low off / high on. */
+ } else if (eep_config.termination_se == 2) {
+ termination = TERM_SE_HI;
+
+ /* Enable manual control with low on / high on. */
+ } else if (eep_config.termination_se == 3) {
+ termination = TERM_SE;
+ } else {
+ /*
+ * The EEPROM 'termination_se' field contains a bad value.
+ * Use automatic termination instead.
+ */
+ termination = 0;
+ warn_code |= ASC_WARN_EEPROM_TERMINATION;
+ }
+ }
+
+ if (eep_config.termination_lvd == 0) {
+ asc_dvc->cfg->termination = termination; /* auto termination for LVD */
+ } else {
+ /* Enable manual control with low off / high off. */
+ if (eep_config.termination_lvd == 1) {
+ asc_dvc->cfg->termination = termination;
+
+ /* Enable manual control with low off / high on. */
+ } else if (eep_config.termination_lvd == 2) {
+ asc_dvc->cfg->termination = termination | TERM_LVD_HI;
+
+ /* Enable manual control with low on / high on. */
+ } else if (eep_config.termination_lvd == 3) {
+ asc_dvc->cfg->termination = termination | TERM_LVD;
+ } else {
+ /*
+ * The EEPROM 'termination_lvd' field contains a bad value.
+ * Use automatic termination instead.
+ */
+ asc_dvc->cfg->termination = termination;
+ warn_code |= ASC_WARN_EEPROM_TERMINATION;
+ }
+ }
+
+ return warn_code;
+}
+
+/*
+ * Initialize the ADV_DVC_VAR structure.
+ *
+ * On failure set the ADV_DVC_VAR field 'err_code' and return ADV_ERROR.
+ *
+ * For a non-fatal error return a warning code. If there are no warnings
+ * then 0 is returned.
+ */
+static int AdvInitGetConfig(struct pci_dev *pdev, struct Scsi_Host *shost)
+{
+ struct asc_board *board = shost_priv(shost);
+ ADV_DVC_VAR *asc_dvc = &board->dvc_var.adv_dvc_var;
+ unsigned short warn_code = 0;
+ AdvPortAddr iop_base = asc_dvc->iop_base;
+ u16 cmd;
+ int status;
+
+ asc_dvc->err_code = 0;
+
+ /*
+ * Save the state of the PCI Configuration Command Register
+ * "Parity Error Response Control" Bit. If the bit is clear (0),
+ * in AdvInitAsc3550/38C0800Driver() tell the microcode to ignore
+ * DMA parity errors.
+ */
+ asc_dvc->cfg->control_flag = 0;
+ pci_read_config_word(pdev, PCI_COMMAND, &cmd);
+ if ((cmd & PCI_COMMAND_PARITY) == 0)
+ asc_dvc->cfg->control_flag |= CONTROL_FLAG_IGNORE_PERR;
+
+ asc_dvc->cfg->chip_version =
+ AdvGetChipVersion(iop_base, asc_dvc->bus_type);
+
+ ASC_DBG(1, "iopb_chip_id_1: 0x%x 0x%x\n",
+ (ushort)AdvReadByteRegister(iop_base, IOPB_CHIP_ID_1),
+ (ushort)ADV_CHIP_ID_BYTE);
+
+ ASC_DBG(1, "iopw_chip_id_0: 0x%x 0x%x\n",
+ (ushort)AdvReadWordRegister(iop_base, IOPW_CHIP_ID_0),
+ (ushort)ADV_CHIP_ID_WORD);
+
+ /*
+ * Reset the chip to start and allow register writes.
+ */
+ if (AdvFindSignature(iop_base) == 0) {
+ asc_dvc->err_code = ASC_IERR_BAD_SIGNATURE;
+ return ADV_ERROR;
+ } else {
+ /*
+ * The caller must set 'chip_type' to a valid setting.
+ */
+ if (asc_dvc->chip_type != ADV_CHIP_ASC3550 &&
+ asc_dvc->chip_type != ADV_CHIP_ASC38C0800 &&
+ asc_dvc->chip_type != ADV_CHIP_ASC38C1600) {
+ asc_dvc->err_code |= ASC_IERR_BAD_CHIPTYPE;
+ return ADV_ERROR;
+ }
+
+ /*
+ * Reset Chip.
+ */
+ AdvWriteWordRegister(iop_base, IOPW_CTRL_REG,
+ ADV_CTRL_REG_CMD_RESET);
+ mdelay(100);
+ AdvWriteWordRegister(iop_base, IOPW_CTRL_REG,
+ ADV_CTRL_REG_CMD_WR_IO_REG);
+
+ if (asc_dvc->chip_type == ADV_CHIP_ASC38C1600) {
+ status = AdvInitFrom38C1600EEP(asc_dvc);
+ } else if (asc_dvc->chip_type == ADV_CHIP_ASC38C0800) {
+ status = AdvInitFrom38C0800EEP(asc_dvc);
+ } else {
+ status = AdvInitFrom3550EEP(asc_dvc);
+ }
+ warn_code |= status;
+ }
+
+ if (warn_code != 0)
+ shost_printk(KERN_WARNING, shost, "warning: 0x%x\n", warn_code);
+
+ if (asc_dvc->err_code)
+ shost_printk(KERN_ERR, shost, "error code 0x%x\n",
+ asc_dvc->err_code);
+
+ return asc_dvc->err_code;
+}
+#endif
+
+static struct scsi_host_template advansys_template = {
+ .proc_name = DRV_NAME,
+#ifdef CONFIG_PROC_FS
+ .show_info = advansys_show_info,
+#endif
+ .name = DRV_NAME,
+ .info = advansys_info,
+ .queuecommand = advansys_queuecommand,
+ .eh_bus_reset_handler = advansys_reset,
+ .bios_param = advansys_biosparam,
+ .slave_configure = advansys_slave_configure,
+ /*
+ * Because the driver may control an ISA adapter 'unchecked_isa_dma'
+ * must be set. The flag will be cleared in advansys_board_found
+ * for non-ISA adapters.
+ */
+ .unchecked_isa_dma = 1,
+ /*
+ * All adapters controlled by this driver are capable of large
+ * scatter-gather lists. According to the mid-level SCSI documentation
+ * this obviates any performance gain provided by setting
+ * 'use_clustering'. But empirically while CPU utilization is increased
+ * by enabling clustering, I/O throughput increases as well.
+ */
+ .use_clustering = ENABLE_CLUSTERING,
+};
+
+static int advansys_wide_init_chip(struct Scsi_Host *shost)
+{
+ struct asc_board *board = shost_priv(shost);
+ struct adv_dvc_var *adv_dvc = &board->dvc_var.adv_dvc_var;
+ int req_cnt = 0;
+ adv_req_t *reqp = NULL;
+ int sg_cnt = 0;
+ adv_sgblk_t *sgp;
+ int warn_code, err_code;
+
+ /*
+ * Allocate buffer carrier structures. The total size
+ * is about 4 KB, so allocate all at once.
+ */
+ adv_dvc->carrier_buf = kmalloc(ADV_CARRIER_BUFSIZE, GFP_KERNEL);
+ ASC_DBG(1, "carrier_buf 0x%p\n", adv_dvc->carrier_buf);
+
+ if (!adv_dvc->carrier_buf)
+ goto kmalloc_failed;
+
+ /*
+ * Allocate up to 'max_host_qng' request structures for the Wide
+ * board. The total size is about 16 KB, so allocate all at once.
+ * If the allocation fails decrement and try again.
+ */
+ for (req_cnt = adv_dvc->max_host_qng; req_cnt > 0; req_cnt--) {
+ reqp = kmalloc(sizeof(adv_req_t) * req_cnt, GFP_KERNEL);
+
+ ASC_DBG(1, "reqp 0x%p, req_cnt %d, bytes %lu\n", reqp, req_cnt,
+ (ulong)sizeof(adv_req_t) * req_cnt);
+
+ if (reqp)
+ break;
+ }
+
+ if (!reqp)
+ goto kmalloc_failed;
+
+ adv_dvc->orig_reqp = reqp;
+
+ /*
+ * Allocate up to ADV_TOT_SG_BLOCK request structures for
+ * the Wide board. Each structure is about 136 bytes.
+ */
+ board->adv_sgblkp = NULL;
+ for (sg_cnt = 0; sg_cnt < ADV_TOT_SG_BLOCK; sg_cnt++) {
+ sgp = kmalloc(sizeof(adv_sgblk_t), GFP_KERNEL);
+
+ if (!sgp)
+ break;
+
+ sgp->next_sgblkp = board->adv_sgblkp;
+ board->adv_sgblkp = sgp;
+
+ }
+
+ ASC_DBG(1, "sg_cnt %d * %lu = %lu bytes\n", sg_cnt, sizeof(adv_sgblk_t),
+ sizeof(adv_sgblk_t) * sg_cnt);
+
+ if (!board->adv_sgblkp)
+ goto kmalloc_failed;
+
+ /*
+ * Point 'adv_reqp' to the request structures and
+ * link them together.
+ */
+ req_cnt--;
+ reqp[req_cnt].next_reqp = NULL;
+ for (; req_cnt > 0; req_cnt--) {
+ reqp[req_cnt - 1].next_reqp = &reqp[req_cnt];
+ }
+ board->adv_reqp = &reqp[0];
+
+ if (adv_dvc->chip_type == ADV_CHIP_ASC3550) {
+ ASC_DBG(2, "AdvInitAsc3550Driver()\n");
+ warn_code = AdvInitAsc3550Driver(adv_dvc);
+ } else if (adv_dvc->chip_type == ADV_CHIP_ASC38C0800) {
+ ASC_DBG(2, "AdvInitAsc38C0800Driver()\n");
+ warn_code = AdvInitAsc38C0800Driver(adv_dvc);
+ } else {
+ ASC_DBG(2, "AdvInitAsc38C1600Driver()\n");
+ warn_code = AdvInitAsc38C1600Driver(adv_dvc);
+ }
+ err_code = adv_dvc->err_code;
+
+ if (warn_code || err_code) {
+ shost_printk(KERN_WARNING, shost, "error: warn 0x%x, error "
+ "0x%x\n", warn_code, err_code);
+ }
+
+ goto exit;
+
+ kmalloc_failed:
+ shost_printk(KERN_ERR, shost, "error: kmalloc() failed\n");
+ err_code = ADV_ERROR;
+ exit:
+ return err_code;
+}
+
+static void advansys_wide_free_mem(struct asc_board *board)
+{
+ struct adv_dvc_var *adv_dvc = &board->dvc_var.adv_dvc_var;
+ kfree(adv_dvc->carrier_buf);
+ adv_dvc->carrier_buf = NULL;
+ kfree(adv_dvc->orig_reqp);
+ adv_dvc->orig_reqp = board->adv_reqp = NULL;
+ while (board->adv_sgblkp) {
+ adv_sgblk_t *sgp = board->adv_sgblkp;
+ board->adv_sgblkp = sgp->next_sgblkp;
+ kfree(sgp);
+ }
+}
+
+static int advansys_board_found(struct Scsi_Host *shost, unsigned int iop,
+ int bus_type)
+{
+ struct pci_dev *pdev;
+ struct asc_board *boardp = shost_priv(shost);
+ ASC_DVC_VAR *asc_dvc_varp = NULL;
+ ADV_DVC_VAR *adv_dvc_varp = NULL;
+ int share_irq, warn_code, ret;
+
+ pdev = (bus_type == ASC_IS_PCI) ? to_pci_dev(boardp->dev) : NULL;
+
+ if (ASC_NARROW_BOARD(boardp)) {
+ ASC_DBG(1, "narrow board\n");
+ asc_dvc_varp = &boardp->dvc_var.asc_dvc_var;
+ asc_dvc_varp->bus_type = bus_type;
+ asc_dvc_varp->drv_ptr = boardp;
+ asc_dvc_varp->cfg = &boardp->dvc_cfg.asc_dvc_cfg;
+ asc_dvc_varp->iop_base = iop;
+ } else {
+#ifdef CONFIG_PCI
+ adv_dvc_varp = &boardp->dvc_var.adv_dvc_var;
+ adv_dvc_varp->drv_ptr = boardp;
+ adv_dvc_varp->cfg = &boardp->dvc_cfg.adv_dvc_cfg;
+ if (pdev->device == PCI_DEVICE_ID_ASP_ABP940UW) {
+ ASC_DBG(1, "wide board ASC-3550\n");
+ adv_dvc_varp->chip_type = ADV_CHIP_ASC3550;
+ } else if (pdev->device == PCI_DEVICE_ID_38C0800_REV1) {
+ ASC_DBG(1, "wide board ASC-38C0800\n");
+ adv_dvc_varp->chip_type = ADV_CHIP_ASC38C0800;
+ } else {
+ ASC_DBG(1, "wide board ASC-38C1600\n");
+ adv_dvc_varp->chip_type = ADV_CHIP_ASC38C1600;
+ }
+
+ boardp->asc_n_io_port = pci_resource_len(pdev, 1);
+ boardp->ioremap_addr = pci_ioremap_bar(pdev, 1);
+ if (!boardp->ioremap_addr) {
+ shost_printk(KERN_ERR, shost, "ioremap(%lx, %d) "
+ "returned NULL\n",
+ (long)pci_resource_start(pdev, 1),
+ boardp->asc_n_io_port);
+ ret = -ENODEV;
+ goto err_shost;
+ }
+ adv_dvc_varp->iop_base = (AdvPortAddr)boardp->ioremap_addr;
+ ASC_DBG(1, "iop_base: 0x%p\n", adv_dvc_varp->iop_base);
+
+ /*
+ * Even though it isn't used to access wide boards, other
+ * than for the debug line below, save I/O Port address so
+ * that it can be reported.
+ */
+ boardp->ioport = iop;
+
+ ASC_DBG(1, "iopb_chip_id_1 0x%x, iopw_chip_id_0 0x%x\n",
+ (ushort)inp(iop + 1), (ushort)inpw(iop));
+#endif /* CONFIG_PCI */
+ }
+
+ if (ASC_NARROW_BOARD(boardp)) {
+ /*
+ * Set the board bus type and PCI IRQ before
+ * calling AscInitGetConfig().
+ */
+ switch (asc_dvc_varp->bus_type) {
+#ifdef CONFIG_ISA
+ case ASC_IS_ISA:
+ shost->unchecked_isa_dma = TRUE;
+ share_irq = 0;
+ break;
+ case ASC_IS_VL:
+ shost->unchecked_isa_dma = FALSE;
+ share_irq = 0;
+ break;
+ case ASC_IS_EISA:
+ shost->unchecked_isa_dma = FALSE;
+ share_irq = IRQF_SHARED;
+ break;
+#endif /* CONFIG_ISA */
+#ifdef CONFIG_PCI
+ case ASC_IS_PCI:
+ shost->unchecked_isa_dma = FALSE;
+ share_irq = IRQF_SHARED;
+ break;
+#endif /* CONFIG_PCI */
+ default:
+ shost_printk(KERN_ERR, shost, "unknown adapter type: "
+ "%d\n", asc_dvc_varp->bus_type);
+ shost->unchecked_isa_dma = TRUE;
+ share_irq = 0;
+ break;
+ }
+
+ /*
+ * NOTE: AscInitGetConfig() may change the board's
+ * bus_type value. The bus_type value should no
+ * longer be used. If the bus_type field must be
+ * referenced only use the bit-wise AND operator "&".
+ */
+ ASC_DBG(2, "AscInitGetConfig()\n");
+ ret = AscInitGetConfig(shost) ? -ENODEV : 0;
+ } else {
+#ifdef CONFIG_PCI
+ /*
+ * For Wide boards set PCI information before calling
+ * AdvInitGetConfig().
+ */
+ shost->unchecked_isa_dma = FALSE;
+ share_irq = IRQF_SHARED;
+ ASC_DBG(2, "AdvInitGetConfig()\n");
+
+ ret = AdvInitGetConfig(pdev, shost) ? -ENODEV : 0;
+#endif /* CONFIG_PCI */
+ }
+
+ if (ret)
+ goto err_unmap;
+
+ /*
+ * Save the EEPROM configuration so that it can be displayed
+ * from /proc/scsi/advansys/[0...].
+ */
+ if (ASC_NARROW_BOARD(boardp)) {
+
+ ASCEEP_CONFIG *ep;
+
+ /*
+ * Set the adapter's target id bit in the 'init_tidmask' field.
+ */
+ boardp->init_tidmask |=
+ ADV_TID_TO_TIDMASK(asc_dvc_varp->cfg->chip_scsi_id);
+
+ /*
+ * Save EEPROM settings for the board.
+ */
+ ep = &boardp->eep_config.asc_eep;
+
+ ep->init_sdtr = asc_dvc_varp->cfg->sdtr_enable;
+ ep->disc_enable = asc_dvc_varp->cfg->disc_enable;
+ ep->use_cmd_qng = asc_dvc_varp->cfg->cmd_qng_enabled;
+ ASC_EEP_SET_DMA_SPD(ep, asc_dvc_varp->cfg->isa_dma_speed);
+ ep->start_motor = asc_dvc_varp->start_motor;
+ ep->cntl = asc_dvc_varp->dvc_cntl;
+ ep->no_scam = asc_dvc_varp->no_scam;
+ ep->max_total_qng = asc_dvc_varp->max_total_qng;
+ ASC_EEP_SET_CHIP_ID(ep, asc_dvc_varp->cfg->chip_scsi_id);
+ /* 'max_tag_qng' is set to the same value for every device. */
+ ep->max_tag_qng = asc_dvc_varp->cfg->max_tag_qng[0];
+ ep->adapter_info[0] = asc_dvc_varp->cfg->adapter_info[0];
+ ep->adapter_info[1] = asc_dvc_varp->cfg->adapter_info[1];
+ ep->adapter_info[2] = asc_dvc_varp->cfg->adapter_info[2];
+ ep->adapter_info[3] = asc_dvc_varp->cfg->adapter_info[3];
+ ep->adapter_info[4] = asc_dvc_varp->cfg->adapter_info[4];
+ ep->adapter_info[5] = asc_dvc_varp->cfg->adapter_info[5];
+
+ /*
+ * Modify board configuration.
+ */
+ ASC_DBG(2, "AscInitSetConfig()\n");
+ ret = AscInitSetConfig(pdev, shost) ? -ENODEV : 0;
+ if (ret)
+ goto err_unmap;
+ } else {
+ ADVEEP_3550_CONFIG *ep_3550;
+ ADVEEP_38C0800_CONFIG *ep_38C0800;
+ ADVEEP_38C1600_CONFIG *ep_38C1600;
+
+ /*
+ * Save Wide EEP Configuration Information.
+ */
+ if (adv_dvc_varp->chip_type == ADV_CHIP_ASC3550) {
+ ep_3550 = &boardp->eep_config.adv_3550_eep;
+
+ ep_3550->adapter_scsi_id = adv_dvc_varp->chip_scsi_id;
+ ep_3550->max_host_qng = adv_dvc_varp->max_host_qng;
+ ep_3550->max_dvc_qng = adv_dvc_varp->max_dvc_qng;
+ ep_3550->termination = adv_dvc_varp->cfg->termination;
+ ep_3550->disc_enable = adv_dvc_varp->cfg->disc_enable;
+ ep_3550->bios_ctrl = adv_dvc_varp->bios_ctrl;
+ ep_3550->wdtr_able = adv_dvc_varp->wdtr_able;
+ ep_3550->sdtr_able = adv_dvc_varp->sdtr_able;
+ ep_3550->ultra_able = adv_dvc_varp->ultra_able;
+ ep_3550->tagqng_able = adv_dvc_varp->tagqng_able;
+ ep_3550->start_motor = adv_dvc_varp->start_motor;
+ ep_3550->scsi_reset_delay =
+ adv_dvc_varp->scsi_reset_wait;
+ ep_3550->serial_number_word1 =
+ adv_dvc_varp->cfg->serial1;
+ ep_3550->serial_number_word2 =
+ adv_dvc_varp->cfg->serial2;
+ ep_3550->serial_number_word3 =
+ adv_dvc_varp->cfg->serial3;
+ } else if (adv_dvc_varp->chip_type == ADV_CHIP_ASC38C0800) {
+ ep_38C0800 = &boardp->eep_config.adv_38C0800_eep;
+
+ ep_38C0800->adapter_scsi_id =
+ adv_dvc_varp->chip_scsi_id;
+ ep_38C0800->max_host_qng = adv_dvc_varp->max_host_qng;
+ ep_38C0800->max_dvc_qng = adv_dvc_varp->max_dvc_qng;
+ ep_38C0800->termination_lvd =
+ adv_dvc_varp->cfg->termination;
+ ep_38C0800->disc_enable =
+ adv_dvc_varp->cfg->disc_enable;
+ ep_38C0800->bios_ctrl = adv_dvc_varp->bios_ctrl;
+ ep_38C0800->wdtr_able = adv_dvc_varp->wdtr_able;
+ ep_38C0800->tagqng_able = adv_dvc_varp->tagqng_able;
+ ep_38C0800->sdtr_speed1 = adv_dvc_varp->sdtr_speed1;
+ ep_38C0800->sdtr_speed2 = adv_dvc_varp->sdtr_speed2;
+ ep_38C0800->sdtr_speed3 = adv_dvc_varp->sdtr_speed3;
+ ep_38C0800->sdtr_speed4 = adv_dvc_varp->sdtr_speed4;
+ ep_38C0800->tagqng_able = adv_dvc_varp->tagqng_able;
+ ep_38C0800->start_motor = adv_dvc_varp->start_motor;
+ ep_38C0800->scsi_reset_delay =
+ adv_dvc_varp->scsi_reset_wait;
+ ep_38C0800->serial_number_word1 =
+ adv_dvc_varp->cfg->serial1;
+ ep_38C0800->serial_number_word2 =
+ adv_dvc_varp->cfg->serial2;
+ ep_38C0800->serial_number_word3 =
+ adv_dvc_varp->cfg->serial3;
+ } else {
+ ep_38C1600 = &boardp->eep_config.adv_38C1600_eep;
+
+ ep_38C1600->adapter_scsi_id =
+ adv_dvc_varp->chip_scsi_id;
+ ep_38C1600->max_host_qng = adv_dvc_varp->max_host_qng;
+ ep_38C1600->max_dvc_qng = adv_dvc_varp->max_dvc_qng;
+ ep_38C1600->termination_lvd =
+ adv_dvc_varp->cfg->termination;
+ ep_38C1600->disc_enable =
+ adv_dvc_varp->cfg->disc_enable;
+ ep_38C1600->bios_ctrl = adv_dvc_varp->bios_ctrl;
+ ep_38C1600->wdtr_able = adv_dvc_varp->wdtr_able;
+ ep_38C1600->tagqng_able = adv_dvc_varp->tagqng_able;
+ ep_38C1600->sdtr_speed1 = adv_dvc_varp->sdtr_speed1;
+ ep_38C1600->sdtr_speed2 = adv_dvc_varp->sdtr_speed2;
+ ep_38C1600->sdtr_speed3 = adv_dvc_varp->sdtr_speed3;
+ ep_38C1600->sdtr_speed4 = adv_dvc_varp->sdtr_speed4;
+ ep_38C1600->tagqng_able = adv_dvc_varp->tagqng_able;
+ ep_38C1600->start_motor = adv_dvc_varp->start_motor;
+ ep_38C1600->scsi_reset_delay =
+ adv_dvc_varp->scsi_reset_wait;
+ ep_38C1600->serial_number_word1 =
+ adv_dvc_varp->cfg->serial1;
+ ep_38C1600->serial_number_word2 =
+ adv_dvc_varp->cfg->serial2;
+ ep_38C1600->serial_number_word3 =
+ adv_dvc_varp->cfg->serial3;
+ }
+
+ /*
+ * Set the adapter's target id bit in the 'init_tidmask' field.
+ */
+ boardp->init_tidmask |=
+ ADV_TID_TO_TIDMASK(adv_dvc_varp->chip_scsi_id);
+ }
+
+ /*
+ * Channels are numbered beginning with 0. For AdvanSys one host
+ * structure supports one channel. Multi-channel boards have a
+ * separate host structure for each channel.
+ */
+ shost->max_channel = 0;
+ if (ASC_NARROW_BOARD(boardp)) {
+ shost->max_id = ASC_MAX_TID + 1;
+ shost->max_lun = ASC_MAX_LUN + 1;
+ shost->max_cmd_len = ASC_MAX_CDB_LEN;
+
+ shost->io_port = asc_dvc_varp->iop_base;
+ boardp->asc_n_io_port = ASC_IOADR_GAP;
+ shost->this_id = asc_dvc_varp->cfg->chip_scsi_id;
+
+ /* Set maximum number of queues the adapter can handle. */
+ shost->can_queue = asc_dvc_varp->max_total_qng;
+ } else {
+ shost->max_id = ADV_MAX_TID + 1;
+ shost->max_lun = ADV_MAX_LUN + 1;
+ shost->max_cmd_len = ADV_MAX_CDB_LEN;
+
+ /*
+ * Save the I/O Port address and length even though
+ * I/O ports are not used to access Wide boards.
+ * Instead the Wide boards are accessed with
+ * PCI Memory Mapped I/O.
+ */
+ shost->io_port = iop;
+
+ shost->this_id = adv_dvc_varp->chip_scsi_id;
+
+ /* Set maximum number of queues the adapter can handle. */
+ shost->can_queue = adv_dvc_varp->max_host_qng;
+ }
+
+ /*
+ * Following v1.3.89, 'cmd_per_lun' is no longer needed
+ * and should be set to zero.
+ *
+ * But because of a bug introduced in v1.3.89 if the driver is
+ * compiled as a module and 'cmd_per_lun' is zero, the Mid-Level
+ * SCSI function 'allocate_device' will panic. To allow the driver
+ * to work as a module in these kernels set 'cmd_per_lun' to 1.
+ *
+ * Note: This is wrong. cmd_per_lun should be set to the depth
+ * you want on untagged devices always.
+ #ifdef MODULE
+ */
+ shost->cmd_per_lun = 1;
+/* #else
+ shost->cmd_per_lun = 0;
+#endif */
+
+ /*
+ * Set the maximum number of scatter-gather elements the
+ * adapter can handle.
+ */
+ if (ASC_NARROW_BOARD(boardp)) {
+ /*
+ * Allow two commands with 'sg_tablesize' scatter-gather
+ * elements to be executed simultaneously. This value is
+ * the theoretical hardware limit. It may be decreased
+ * below.
+ */
+ shost->sg_tablesize =
+ (((asc_dvc_varp->max_total_qng - 2) / 2) *
+ ASC_SG_LIST_PER_Q) + 1;
+ } else {
+ shost->sg_tablesize = ADV_MAX_SG_LIST;
+ }
+
+ /*
+ * The value of 'sg_tablesize' can not exceed the SCSI
+ * mid-level driver definition of SG_ALL. SG_ALL also
+ * must not be exceeded, because it is used to define the
+ * size of the scatter-gather table in 'struct asc_sg_head'.
+ */
+ if (shost->sg_tablesize > SG_ALL) {
+ shost->sg_tablesize = SG_ALL;
+ }
+
+ ASC_DBG(1, "sg_tablesize: %d\n", shost->sg_tablesize);
+
+ /* BIOS start address. */
+ if (ASC_NARROW_BOARD(boardp)) {
+ shost->base = AscGetChipBiosAddress(asc_dvc_varp->iop_base,
+ asc_dvc_varp->bus_type);
+ } else {
+ /*
+ * Fill-in BIOS board variables. The Wide BIOS saves
+ * information in LRAM that is used by the driver.
+ */
+ AdvReadWordLram(adv_dvc_varp->iop_base,
+ BIOS_SIGNATURE, boardp->bios_signature);
+ AdvReadWordLram(adv_dvc_varp->iop_base,
+ BIOS_VERSION, boardp->bios_version);
+ AdvReadWordLram(adv_dvc_varp->iop_base,
+ BIOS_CODESEG, boardp->bios_codeseg);
+ AdvReadWordLram(adv_dvc_varp->iop_base,
+ BIOS_CODELEN, boardp->bios_codelen);
+
+ ASC_DBG(1, "bios_signature 0x%x, bios_version 0x%x\n",
+ boardp->bios_signature, boardp->bios_version);
+
+ ASC_DBG(1, "bios_codeseg 0x%x, bios_codelen 0x%x\n",
+ boardp->bios_codeseg, boardp->bios_codelen);
+
+ /*
+ * If the BIOS saved a valid signature, then fill in
+ * the BIOS code segment base address.
+ */
+ if (boardp->bios_signature == 0x55AA) {
+ /*
+ * Convert x86 realmode code segment to a linear
+ * address by shifting left 4.
+ */
+ shost->base = ((ulong)boardp->bios_codeseg << 4);
+ } else {
+ shost->base = 0;
+ }
+ }
+
+ /*
+ * Register Board Resources - I/O Port, DMA, IRQ
+ */
+
+ /* Register DMA Channel for Narrow boards. */
+ shost->dma_channel = NO_ISA_DMA; /* Default to no ISA DMA. */
+#ifdef CONFIG_ISA
+ if (ASC_NARROW_BOARD(boardp)) {
+ /* Register DMA channel for ISA bus. */
+ if (asc_dvc_varp->bus_type & ASC_IS_ISA) {
+ shost->dma_channel = asc_dvc_varp->cfg->isa_dma_channel;
+ ret = request_dma(shost->dma_channel, DRV_NAME);
+ if (ret) {
+ shost_printk(KERN_ERR, shost, "request_dma() "
+ "%d failed %d\n",
+ shost->dma_channel, ret);
+ goto err_unmap;
+ }
+ AscEnableIsaDma(shost->dma_channel);
+ }
+ }
+#endif /* CONFIG_ISA */
+
+ /* Register IRQ Number. */
+ ASC_DBG(2, "request_irq(%d, %p)\n", boardp->irq, shost);
+
+ ret = request_irq(boardp->irq, advansys_interrupt, share_irq,
+ DRV_NAME, shost);
+
+ if (ret) {
+ if (ret == -EBUSY) {
+ shost_printk(KERN_ERR, shost, "request_irq(): IRQ 0x%x "
+ "already in use\n", boardp->irq);
+ } else if (ret == -EINVAL) {
+ shost_printk(KERN_ERR, shost, "request_irq(): IRQ 0x%x "
+ "not valid\n", boardp->irq);
+ } else {
+ shost_printk(KERN_ERR, shost, "request_irq(): IRQ 0x%x "
+ "failed with %d\n", boardp->irq, ret);
+ }
+ goto err_free_dma;
+ }
+
+ /*
+ * Initialize board RISC chip and enable interrupts.
+ */
+ if (ASC_NARROW_BOARD(boardp)) {
+ ASC_DBG(2, "AscInitAsc1000Driver()\n");
+
+ asc_dvc_varp->overrun_buf = kzalloc(ASC_OVERRUN_BSIZE, GFP_KERNEL);
+ if (!asc_dvc_varp->overrun_buf) {
+ ret = -ENOMEM;
+ goto err_free_irq;
+ }
+ warn_code = AscInitAsc1000Driver(asc_dvc_varp);
+
+ if (warn_code || asc_dvc_varp->err_code) {
+ shost_printk(KERN_ERR, shost, "error: init_state 0x%x, "
+ "warn 0x%x, error 0x%x\n",
+ asc_dvc_varp->init_state, warn_code,
+ asc_dvc_varp->err_code);
+ if (!asc_dvc_varp->overrun_dma) {
+ ret = -ENODEV;
+ goto err_free_mem;
+ }
+ }
+ } else {
+ if (advansys_wide_init_chip(shost)) {
+ ret = -ENODEV;
+ goto err_free_mem;
+ }
+ }
+
+ ASC_DBG_PRT_SCSI_HOST(2, shost);
+
+ ret = scsi_add_host(shost, boardp->dev);
+ if (ret)
+ goto err_free_mem;
+
+ scsi_scan_host(shost);
+ return 0;
+
+ err_free_mem:
+ if (ASC_NARROW_BOARD(boardp)) {
+ if (asc_dvc_varp->overrun_dma)
+ dma_unmap_single(boardp->dev, asc_dvc_varp->overrun_dma,
+ ASC_OVERRUN_BSIZE, DMA_FROM_DEVICE);
+ kfree(asc_dvc_varp->overrun_buf);
+ } else
+ advansys_wide_free_mem(boardp);
+ err_free_irq:
+ free_irq(boardp->irq, shost);
+ err_free_dma:
+#ifdef CONFIG_ISA
+ if (shost->dma_channel != NO_ISA_DMA)
+ free_dma(shost->dma_channel);
+#endif
+ err_unmap:
+ if (boardp->ioremap_addr)
+ iounmap(boardp->ioremap_addr);
+ err_shost:
+ return ret;
+}
+
+/*
+ * advansys_release()
+ *
+ * Release resources allocated for a single AdvanSys adapter.
+ */
+static int advansys_release(struct Scsi_Host *shost)
+{
+ struct asc_board *board = shost_priv(shost);
+ ASC_DBG(1, "begin\n");
+ scsi_remove_host(shost);
+ free_irq(board->irq, shost);
+#ifdef CONFIG_ISA
+ if (shost->dma_channel != NO_ISA_DMA) {
+ ASC_DBG(1, "free_dma()\n");
+ free_dma(shost->dma_channel);
+ }
+#endif
+ if (ASC_NARROW_BOARD(board)) {
+ dma_unmap_single(board->dev,
+ board->dvc_var.asc_dvc_var.overrun_dma,
+ ASC_OVERRUN_BSIZE, DMA_FROM_DEVICE);
+ kfree(board->dvc_var.asc_dvc_var.overrun_buf);
+ } else {
+ iounmap(board->ioremap_addr);
+ advansys_wide_free_mem(board);
+ }
+ scsi_host_put(shost);
+ ASC_DBG(1, "end\n");
+ return 0;
+}
+
+#define ASC_IOADR_TABLE_MAX_IX 11
+
+static PortAddr _asc_def_iop_base[ASC_IOADR_TABLE_MAX_IX] = {
+ 0x100, 0x0110, 0x120, 0x0130, 0x140, 0x0150, 0x0190,
+ 0x0210, 0x0230, 0x0250, 0x0330
+};
+
+/*
+ * The ISA IRQ number is found in bits 2 and 3 of the CfgLsw. It decodes as:
+ * 00: 10
+ * 01: 11
+ * 10: 12
+ * 11: 15
+ */
+static unsigned int advansys_isa_irq_no(PortAddr iop_base)
+{
+ unsigned short cfg_lsw = AscGetChipCfgLsw(iop_base);
+ unsigned int chip_irq = ((cfg_lsw >> 2) & 0x03) + 10;
+ if (chip_irq == 13)
+ chip_irq = 15;
+ return chip_irq;
+}
+
+static int advansys_isa_probe(struct device *dev, unsigned int id)
+{
+ int err = -ENODEV;
+ PortAddr iop_base = _asc_def_iop_base[id];
+ struct Scsi_Host *shost;
+ struct asc_board *board;
+
+ if (!request_region(iop_base, ASC_IOADR_GAP, DRV_NAME)) {
+ ASC_DBG(1, "I/O port 0x%x busy\n", iop_base);
+ return -ENODEV;
+ }
+ ASC_DBG(1, "probing I/O port 0x%x\n", iop_base);
+ if (!AscFindSignature(iop_base))
+ goto release_region;
+ if (!(AscGetChipVersion(iop_base, ASC_IS_ISA) & ASC_CHIP_VER_ISA_BIT))
+ goto release_region;
+
+ err = -ENOMEM;
+ shost = scsi_host_alloc(&advansys_template, sizeof(*board));
+ if (!shost)
+ goto release_region;
+
+ board = shost_priv(shost);
+ board->irq = advansys_isa_irq_no(iop_base);
+ board->dev = dev;
+
+ err = advansys_board_found(shost, iop_base, ASC_IS_ISA);
+ if (err)
+ goto free_host;
+
+ dev_set_drvdata(dev, shost);
+ return 0;
+
+ free_host:
+ scsi_host_put(shost);
+ release_region:
+ release_region(iop_base, ASC_IOADR_GAP);
+ return err;
+}
+
+static int advansys_isa_remove(struct device *dev, unsigned int id)
+{
+ int ioport = _asc_def_iop_base[id];
+ advansys_release(dev_get_drvdata(dev));
+ release_region(ioport, ASC_IOADR_GAP);
+ return 0;
+}
+
+static struct isa_driver advansys_isa_driver = {
+ .probe = advansys_isa_probe,
+ .remove = advansys_isa_remove,
+ .driver = {
+ .owner = THIS_MODULE,
+ .name = DRV_NAME,
+ },
+};
+
+/*
+ * The VLB IRQ number is found in bits 2 to 4 of the CfgLsw. It decodes as:
+ * 000: invalid
+ * 001: 10
+ * 010: 11
+ * 011: 12
+ * 100: invalid
+ * 101: 14
+ * 110: 15
+ * 111: invalid
+ */
+static unsigned int advansys_vlb_irq_no(PortAddr iop_base)
+{
+ unsigned short cfg_lsw = AscGetChipCfgLsw(iop_base);
+ unsigned int chip_irq = ((cfg_lsw >> 2) & 0x07) + 9;
+ if ((chip_irq < 10) || (chip_irq == 13) || (chip_irq > 15))
+ return 0;
+ return chip_irq;
+}
+
+static int advansys_vlb_probe(struct device *dev, unsigned int id)
+{
+ int err = -ENODEV;
+ PortAddr iop_base = _asc_def_iop_base[id];
+ struct Scsi_Host *shost;
+ struct asc_board *board;
+
+ if (!request_region(iop_base, ASC_IOADR_GAP, DRV_NAME)) {
+ ASC_DBG(1, "I/O port 0x%x busy\n", iop_base);
+ return -ENODEV;
+ }
+ ASC_DBG(1, "probing I/O port 0x%x\n", iop_base);
+ if (!AscFindSignature(iop_base))
+ goto release_region;
+ /*
+ * I don't think this condition can actually happen, but the old
+ * driver did it, and the chances of finding a VLB setup in 2007
+ * to do testing with is slight to none.
+ */
+ if (AscGetChipVersion(iop_base, ASC_IS_VL) > ASC_CHIP_MAX_VER_VL)
+ goto release_region;
+
+ err = -ENOMEM;
+ shost = scsi_host_alloc(&advansys_template, sizeof(*board));
+ if (!shost)
+ goto release_region;
+
+ board = shost_priv(shost);
+ board->irq = advansys_vlb_irq_no(iop_base);
+ board->dev = dev;
+
+ err = advansys_board_found(shost, iop_base, ASC_IS_VL);
+ if (err)
+ goto free_host;
+
+ dev_set_drvdata(dev, shost);
+ return 0;
+
+ free_host:
+ scsi_host_put(shost);
+ release_region:
+ release_region(iop_base, ASC_IOADR_GAP);
+ return -ENODEV;
+}
+
+static struct isa_driver advansys_vlb_driver = {
+ .probe = advansys_vlb_probe,
+ .remove = advansys_isa_remove,
+ .driver = {
+ .owner = THIS_MODULE,
+ .name = "advansys_vlb",
+ },
+};
+
+static struct eisa_device_id advansys_eisa_table[] = {
+ { "ABP7401" },
+ { "ABP7501" },
+ { "" }
+};
+
+MODULE_DEVICE_TABLE(eisa, advansys_eisa_table);
+
+/*
+ * EISA is a little more tricky than PCI; each EISA device may have two
+ * channels, and this driver is written to make each channel its own Scsi_Host
+ */
+struct eisa_scsi_data {
+ struct Scsi_Host *host[2];
+};
+
+/*
+ * The EISA IRQ number is found in bits 8 to 10 of the CfgLsw. It decodes as:
+ * 000: 10
+ * 001: 11
+ * 010: 12
+ * 011: invalid
+ * 100: 14
+ * 101: 15
+ * 110: invalid
+ * 111: invalid
+ */
+static unsigned int advansys_eisa_irq_no(struct eisa_device *edev)
+{
+ unsigned short cfg_lsw = inw(edev->base_addr + 0xc86);
+ unsigned int chip_irq = ((cfg_lsw >> 8) & 0x07) + 10;
+ if ((chip_irq == 13) || (chip_irq > 15))
+ return 0;
+ return chip_irq;
+}
+
+static int advansys_eisa_probe(struct device *dev)
+{
+ int i, ioport, irq = 0;
+ int err;
+ struct eisa_device *edev = to_eisa_device(dev);
+ struct eisa_scsi_data *data;
+
+ err = -ENOMEM;
+ data = kzalloc(sizeof(*data), GFP_KERNEL);
+ if (!data)
+ goto fail;
+ ioport = edev->base_addr + 0xc30;
+
+ err = -ENODEV;
+ for (i = 0; i < 2; i++, ioport += 0x20) {
+ struct asc_board *board;
+ struct Scsi_Host *shost;
+ if (!request_region(ioport, ASC_IOADR_GAP, DRV_NAME)) {
+ printk(KERN_WARNING "Region %x-%x busy\n", ioport,
+ ioport + ASC_IOADR_GAP - 1);
+ continue;
+ }
+ if (!AscFindSignature(ioport)) {
+ release_region(ioport, ASC_IOADR_GAP);
+ continue;
+ }
+
+ /*
+ * I don't know why we need to do this for EISA chips, but
+ * not for any others. It looks to be equivalent to
+ * AscGetChipCfgMsw, but I may have overlooked something,
+ * so I'm not converting it until I get an EISA board to
+ * test with.
+ */
+ inw(ioport + 4);
+
+ if (!irq)
+ irq = advansys_eisa_irq_no(edev);
+
+ err = -ENOMEM;
+ shost = scsi_host_alloc(&advansys_template, sizeof(*board));
+ if (!shost)
+ goto release_region;
+
+ board = shost_priv(shost);
+ board->irq = irq;
+ board->dev = dev;
+
+ err = advansys_board_found(shost, ioport, ASC_IS_EISA);
+ if (!err) {
+ data->host[i] = shost;
+ continue;
+ }
+
+ scsi_host_put(shost);
+ release_region:
+ release_region(ioport, ASC_IOADR_GAP);
+ break;
+ }
+
+ if (err)
+ goto free_data;
+ dev_set_drvdata(dev, data);
+ return 0;
+
+ free_data:
+ kfree(data->host[0]);
+ kfree(data->host[1]);
+ kfree(data);
+ fail:
+ return err;
+}
+
+static int advansys_eisa_remove(struct device *dev)
+{
+ int i;
+ struct eisa_scsi_data *data = dev_get_drvdata(dev);
+
+ for (i = 0; i < 2; i++) {
+ int ioport;
+ struct Scsi_Host *shost = data->host[i];
+ if (!shost)
+ continue;
+ ioport = shost->io_port;
+ advansys_release(shost);
+ release_region(ioport, ASC_IOADR_GAP);
+ }
+
+ kfree(data);
+ return 0;
+}
+
+static struct eisa_driver advansys_eisa_driver = {
+ .id_table = advansys_eisa_table,
+ .driver = {
+ .name = DRV_NAME,
+ .probe = advansys_eisa_probe,
+ .remove = advansys_eisa_remove,
+ }
+};
+
+/* PCI Devices supported by this driver */
+static struct pci_device_id advansys_pci_tbl[] = {
+ {PCI_VENDOR_ID_ASP, PCI_DEVICE_ID_ASP_1200A,
+ PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
+ {PCI_VENDOR_ID_ASP, PCI_DEVICE_ID_ASP_ABP940,
+ PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
+ {PCI_VENDOR_ID_ASP, PCI_DEVICE_ID_ASP_ABP940U,
+ PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
+ {PCI_VENDOR_ID_ASP, PCI_DEVICE_ID_ASP_ABP940UW,
+ PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
+ {PCI_VENDOR_ID_ASP, PCI_DEVICE_ID_38C0800_REV1,
+ PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
+ {PCI_VENDOR_ID_ASP, PCI_DEVICE_ID_38C1600_REV1,
+ PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
+ {}
+};
+
+MODULE_DEVICE_TABLE(pci, advansys_pci_tbl);
+
+static void advansys_set_latency(struct pci_dev *pdev)
+{
+ if ((pdev->device == PCI_DEVICE_ID_ASP_1200A) ||
+ (pdev->device == PCI_DEVICE_ID_ASP_ABP940)) {
+ pci_write_config_byte(pdev, PCI_LATENCY_TIMER, 0);
+ } else {
+ u8 latency;
+ pci_read_config_byte(pdev, PCI_LATENCY_TIMER, &latency);
+ if (latency < 0x20)
+ pci_write_config_byte(pdev, PCI_LATENCY_TIMER, 0x20);
+ }
+}
+
+static int advansys_pci_probe(struct pci_dev *pdev,
+ const struct pci_device_id *ent)
+{
+ int err, ioport;
+ struct Scsi_Host *shost;
+ struct asc_board *board;
+
+ err = pci_enable_device(pdev);
+ if (err)
+ goto fail;
+ err = pci_request_regions(pdev, DRV_NAME);
+ if (err)
+ goto disable_device;
+ pci_set_master(pdev);
+ advansys_set_latency(pdev);
+
+ err = -ENODEV;
+ if (pci_resource_len(pdev, 0) == 0)
+ goto release_region;
+
+ ioport = pci_resource_start(pdev, 0);
+
+ err = -ENOMEM;
+ shost = scsi_host_alloc(&advansys_template, sizeof(*board));
+ if (!shost)
+ goto release_region;
+
+ board = shost_priv(shost);
+ board->irq = pdev->irq;
+ board->dev = &pdev->dev;
+
+ if (pdev->device == PCI_DEVICE_ID_ASP_ABP940UW ||
+ pdev->device == PCI_DEVICE_ID_38C0800_REV1 ||
+ pdev->device == PCI_DEVICE_ID_38C1600_REV1) {
+ board->flags |= ASC_IS_WIDE_BOARD;
+ }
+
+ err = advansys_board_found(shost, ioport, ASC_IS_PCI);
+ if (err)
+ goto free_host;
+
+ pci_set_drvdata(pdev, shost);
+ return 0;
+
+ free_host:
+ scsi_host_put(shost);
+ release_region:
+ pci_release_regions(pdev);
+ disable_device:
+ pci_disable_device(pdev);
+ fail:
+ return err;
+}
+
+static void advansys_pci_remove(struct pci_dev *pdev)
+{
+ advansys_release(pci_get_drvdata(pdev));
+ pci_release_regions(pdev);
+ pci_disable_device(pdev);
+}
+
+static struct pci_driver advansys_pci_driver = {
+ .name = DRV_NAME,
+ .id_table = advansys_pci_tbl,
+ .probe = advansys_pci_probe,
+ .remove = advansys_pci_remove,
+};
+
+static int __init advansys_init(void)
+{
+ int error;
+
+ error = isa_register_driver(&advansys_isa_driver,
+ ASC_IOADR_TABLE_MAX_IX);
+ if (error)
+ goto fail;
+
+ error = isa_register_driver(&advansys_vlb_driver,
+ ASC_IOADR_TABLE_MAX_IX);
+ if (error)
+ goto unregister_isa;
+
+ error = eisa_driver_register(&advansys_eisa_driver);
+ if (error)
+ goto unregister_vlb;
+
+ error = pci_register_driver(&advansys_pci_driver);
+ if (error)
+ goto unregister_eisa;
+
+ return 0;
+
+ unregister_eisa:
+ eisa_driver_unregister(&advansys_eisa_driver);
+ unregister_vlb:
+ isa_unregister_driver(&advansys_vlb_driver);
+ unregister_isa:
+ isa_unregister_driver(&advansys_isa_driver);
+ fail:
+ return error;
+}
+
+static void __exit advansys_exit(void)
+{
+ pci_unregister_driver(&advansys_pci_driver);
+ eisa_driver_unregister(&advansys_eisa_driver);
+ isa_unregister_driver(&advansys_vlb_driver);
+ isa_unregister_driver(&advansys_isa_driver);
+}
+
+module_init(advansys_init);
+module_exit(advansys_exit);
+
+MODULE_LICENSE("GPL");
+/*(DEBLOBBED)*/
diff --git a/drivers/scsi/aha152x.c b/drivers/scsi/aha152x.c
new file mode 100644
index 000000000..e31c460a1
--- /dev/null
+++ b/drivers/scsi/aha152x.c
@@ -0,0 +1,3398 @@
+/* aha152x.c -- Adaptec AHA-152x driver
+ * Author: Jürgen E. Fischer, fischer@norbit.de
+ * Copyright 1993-2004 Jürgen E. Fischer
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2, or (at your option) any
+ * later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ *
+ * $Id: aha152x.c,v 2.7 2004/01/24 11:42:59 fischer Exp $
+ *
+ * $Log: aha152x.c,v $
+ * Revision 2.7 2004/01/24 11:42:59 fischer
+ * - gather code that is not used by PCMCIA at the end
+ * - move request_region for !PCMCIA case to detection
+ * - migration to new scsi host api (remove legacy code)
+ * - free host scribble before scsi_done
+ * - fix error handling
+ * - one isapnp device added to id_table
+ *
+ * Revision 2.6 2003/10/30 20:52:47 fischer
+ * - interfaces changes for kernel 2.6
+ * - aha152x_probe_one introduced for pcmcia stub
+ * - fixed pnpdev handling
+ * - instead of allocation a new one, reuse command for request sense after check condition and reset
+ * - fixes race in is_complete
+ *
+ * Revision 2.5 2002/04/14 11:24:53 fischer
+ * - isapnp support
+ * - abort fixed
+ * - 2.5 support
+ *
+ * Revision 2.4 2000/12/16 12:53:56 fischer
+ * - allow REQUEST SENSE to be queued
+ * - handle shared PCI interrupts
+ *
+ * Revision 2.3 2000/11/04 16:40:26 fischer
+ * - handle data overruns
+ * - extend timeout for data phases
+ *
+ * Revision 2.2 2000/08/08 19:54:53 fischer
+ * - minor changes
+ *
+ * Revision 2.1 2000/05/17 16:23:17 fischer
+ * - signature update
+ * - fix for data out w/o scatter gather
+ *
+ * Revision 2.0 1999/12/25 15:07:32 fischer
+ * - interrupt routine completly reworked
+ * - basic support for new eh code
+ *
+ * Revision 1.21 1999/11/10 23:46:36 fischer
+ * - default to synchronous operation
+ * - synchronous negotiation fixed
+ * - added timeout to loops
+ * - debugging output can be controlled through procfs
+ *
+ * Revision 1.20 1999/11/07 18:37:31 fischer
+ * - synchronous operation works
+ * - resid support for sg driver
+ *
+ * Revision 1.19 1999/11/02 22:39:59 fischer
+ * - moved leading comments to README.aha152x
+ * - new additional module parameters
+ * - updates for 2.3
+ * - support for the Tripace TC1550 controller
+ * - interrupt handling changed
+ *
+ * Revision 1.18 1996/09/07 20:10:40 fischer
+ * - fixed can_queue handling (multiple outstanding commands working again)
+ *
+ * Revision 1.17 1996/08/17 16:05:14 fischer
+ * - biosparam improved
+ * - interrupt verification
+ * - updated documentation
+ * - cleanups
+ *
+ * Revision 1.16 1996/06/09 00:04:56 root
+ * - added configuration symbols for insmod (aha152x/aha152x1)
+ *
+ * Revision 1.15 1996/04/30 14:52:06 fischer
+ * - proc info fixed
+ * - support for extended translation for >1GB disks
+ *
+ * Revision 1.14 1996/01/17 15:11:20 fischer
+ * - fixed lockup in MESSAGE IN phase after reconnection
+ *
+ * Revision 1.13 1996/01/09 02:15:53 fischer
+ * - some cleanups
+ * - moved request_irq behind controller initialization
+ * (to avoid spurious interrupts)
+ *
+ * Revision 1.12 1995/12/16 12:26:07 fischer
+ * - barrier()s added
+ * - configurable RESET delay added
+ *
+ * Revision 1.11 1995/12/06 21:18:35 fischer
+ * - some minor updates
+ *
+ * Revision 1.10 1995/07/22 19:18:45 fischer
+ * - support for 2 controllers
+ * - started synchronous data transfers (not working yet)
+ *
+ * Revision 1.9 1995/03/18 09:20:24 root
+ * - patches for PCMCIA and modules
+ *
+ * Revision 1.8 1995/01/21 22:07:19 root
+ * - snarf_region => request_region
+ * - aha152x_intr interface change
+ *
+ * Revision 1.7 1995/01/02 23:19:36 root
+ * - updated COMMAND_SIZE to cmd_len
+ * - changed sti() to restore_flags()
+ * - fixed some #ifdef which generated warnings
+ *
+ * Revision 1.6 1994/11/24 20:35:27 root
+ * - problem with odd number of bytes in fifo fixed
+ *
+ * Revision 1.5 1994/10/30 14:39:56 root
+ * - abort code fixed
+ * - debugging improved
+ *
+ * Revision 1.4 1994/09/12 11:33:01 root
+ * - irqaction to request_irq
+ * - abortion updated
+ *
+ * Revision 1.3 1994/08/04 13:53:05 root
+ * - updates for mid-level-driver changes
+ * - accept unexpected BUSFREE phase as error condition
+ * - parity check now configurable
+ *
+ * Revision 1.2 1994/07/03 12:56:36 root
+ * - cleaned up debugging code
+ * - more tweaking on reset delays
+ * - updated abort/reset code (pretty untested...)
+ *
+ * Revision 1.1 1994/05/28 21:18:49 root
+ * - update for mid-level interface change (abort-reset)
+ * - delays after resets adjusted for some slow devices
+ *
+ * Revision 1.0 1994/03/25 12:52:00 root
+ * - Fixed "more data than expected" problem
+ * - added new BIOS signatures
+ *
+ * Revision 0.102 1994/01/31 20:44:12 root
+ * - minor changes in insw/outsw handling
+ *
+ * Revision 0.101 1993/12/13 01:16:27 root
+ * - fixed STATUS phase (non-GOOD stati were dropped sometimes;
+ * fixes problems with CD-ROM sector size detection & media change)
+ *
+ * Revision 0.100 1993/12/10 16:58:47 root
+ * - fix for unsuccessful selections in case of non-continuous id assignments
+ * on the scsi bus.
+ *
+ * Revision 0.99 1993/10/24 16:19:59 root
+ * - fixed DATA IN (rare read errors gone)
+ *
+ * Revision 0.98 1993/10/17 12:54:44 root
+ * - fixed some recent fixes (shame on me)
+ * - moved initialization of scratch area to aha152x_queue
+ *
+ * Revision 0.97 1993/10/09 18:53:53 root
+ * - DATA IN fixed. Rarely left data in the fifo.
+ *
+ * Revision 0.96 1993/10/03 00:53:59 root
+ * - minor changes on DATA IN
+ *
+ * Revision 0.95 1993/09/24 10:36:01 root
+ * - change handling of MSGI after reselection
+ * - fixed sti/cli
+ * - minor changes
+ *
+ * Revision 0.94 1993/09/18 14:08:22 root
+ * - fixed bug in multiple outstanding command code
+ * - changed detection
+ * - support for kernel command line configuration
+ * - reset corrected
+ * - changed message handling
+ *
+ * Revision 0.93 1993/09/15 20:41:19 root
+ * - fixed bugs with multiple outstanding commands
+ *
+ * Revision 0.92 1993/09/13 02:46:33 root
+ * - multiple outstanding commands work (no problems with IBM drive)
+ *
+ * Revision 0.91 1993/09/12 20:51:46 root
+ * added multiple outstanding commands
+ * (some problem with this $%&? IBM device remain)
+ *
+ * Revision 0.9 1993/09/12 11:11:22 root
+ * - corrected auto-configuration
+ * - changed the auto-configuration (added some '#define's)
+ * - added support for dis-/reconnection
+ *
+ * Revision 0.8 1993/09/06 23:09:39 root
+ * - added support for the drive activity light
+ * - minor changes
+ *
+ * Revision 0.7 1993/09/05 14:30:15 root
+ * - improved phase detection
+ * - now using the new snarf_region code of 0.99pl13
+ *
+ * Revision 0.6 1993/09/02 11:01:38 root
+ * first public release; added some signatures and biosparam()
+ *
+ * Revision 0.5 1993/08/30 10:23:30 root
+ * fixed timing problems with my IBM drive
+ *
+ * Revision 0.4 1993/08/29 14:06:52 root
+ * fixed some problems with timeouts due incomplete commands
+ *
+ * Revision 0.3 1993/08/28 15:55:03 root
+ * writing data works too. mounted and worked on a dos partition
+ *
+ * Revision 0.2 1993/08/27 22:42:07 root
+ * reading data works. Mounted a msdos partition.
+ *
+ * Revision 0.1 1993/08/25 13:38:30 root
+ * first "damn thing doesn't work" version
+ *
+ * Revision 0.0 1993/08/14 19:54:25 root
+ * empty function bodies; detect() works.
+ *
+ *
+ **************************************************************************
+
+ see Documentation/scsi/aha152x.txt for configuration details
+
+ **************************************************************************/
+
+#include <linux/module.h>
+#include <asm/irq.h>
+#include <linux/io.h>
+#include <linux/blkdev.h>
+#include <linux/completion.h>
+#include <linux/errno.h>
+#include <linux/string.h>
+#include <linux/wait.h>
+#include <linux/ioport.h>
+#include <linux/delay.h>
+#include <linux/proc_fs.h>
+#include <linux/interrupt.h>
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/isapnp.h>
+#include <linux/spinlock.h>
+#include <linux/workqueue.h>
+#include <linux/list.h>
+#include <linux/slab.h>
+#include <scsi/scsicam.h>
+
+#include "scsi.h"
+#include <scsi/scsi_dbg.h>
+#include <scsi/scsi_host.h>
+#include <scsi/scsi_transport_spi.h>
+#include <scsi/scsi_eh.h>
+#include "aha152x.h"
+
+static LIST_HEAD(aha152x_host_list);
+
+
+/* DEFINES */
+
+/* For PCMCIA cards, always use AUTOCONF */
+#if defined(PCMCIA) || defined(MODULE)
+#if !defined(AUTOCONF)
+#define AUTOCONF
+#endif
+#endif
+
+#if !defined(AUTOCONF) && !defined(SETUP0)
+#error define AUTOCONF or SETUP0
+#endif
+
+#define DO_LOCK(flags) spin_lock_irqsave(&QLOCK,flags)
+#define DO_UNLOCK(flags) spin_unlock_irqrestore(&QLOCK,flags)
+
+#define LEAD "(scsi%d:%d:%d) "
+#define INFO_LEAD KERN_INFO LEAD
+#define CMDINFO(cmd) \
+ (cmd) ? ((cmd)->device->host->host_no) : -1, \
+ (cmd) ? ((cmd)->device->id & 0x0f) : -1, \
+ (cmd) ? ((u8)(cmd)->device->lun & 0x07) : -1
+
+static inline void
+CMD_INC_RESID(struct scsi_cmnd *cmd, int inc)
+{
+ scsi_set_resid(cmd, scsi_get_resid(cmd) + inc);
+}
+
+#define DELAY_DEFAULT 1000
+
+#if defined(PCMCIA)
+#define IRQ_MIN 0
+#define IRQ_MAX 16
+#else
+#define IRQ_MIN 9
+#if defined(__PPC)
+#define IRQ_MAX (nr_irqs-1)
+#else
+#define IRQ_MAX 12
+#endif
+#endif
+
+enum {
+ not_issued = 0x0001, /* command not yet issued */
+ selecting = 0x0002, /* target is being selected */
+ identified = 0x0004, /* IDENTIFY was sent */
+ disconnected = 0x0008, /* target disconnected */
+ completed = 0x0010, /* target sent COMMAND COMPLETE */
+ aborted = 0x0020, /* ABORT was sent */
+ resetted = 0x0040, /* BUS DEVICE RESET was sent */
+ spiordy = 0x0080, /* waiting for SPIORDY to raise */
+ syncneg = 0x0100, /* synchronous negotiation in progress */
+ aborting = 0x0200, /* ABORT is pending */
+ resetting = 0x0400, /* BUS DEVICE RESET is pending */
+ check_condition = 0x0800, /* requesting sense after CHECK CONDITION */
+};
+
+MODULE_AUTHOR("Jürgen Fischer");
+MODULE_DESCRIPTION(AHA152X_REVID);
+MODULE_LICENSE("GPL");
+
+#if !defined(PCMCIA)
+#if defined(MODULE)
+static int io[] = {0, 0};
+module_param_array(io, int, NULL, 0);
+MODULE_PARM_DESC(io,"base io address of controller");
+
+static int irq[] = {0, 0};
+module_param_array(irq, int, NULL, 0);
+MODULE_PARM_DESC(irq,"interrupt for controller");
+
+static int scsiid[] = {7, 7};
+module_param_array(scsiid, int, NULL, 0);
+MODULE_PARM_DESC(scsiid,"scsi id of controller");
+
+static int reconnect[] = {1, 1};
+module_param_array(reconnect, int, NULL, 0);
+MODULE_PARM_DESC(reconnect,"allow targets to disconnect");
+
+static int parity[] = {1, 1};
+module_param_array(parity, int, NULL, 0);
+MODULE_PARM_DESC(parity,"use scsi parity");
+
+static int sync[] = {1, 1};
+module_param_array(sync, int, NULL, 0);
+MODULE_PARM_DESC(sync,"use synchronous transfers");
+
+static int delay[] = {DELAY_DEFAULT, DELAY_DEFAULT};
+module_param_array(delay, int, NULL, 0);
+MODULE_PARM_DESC(delay,"scsi reset delay");
+
+static int exttrans[] = {0, 0};
+module_param_array(exttrans, int, NULL, 0);
+MODULE_PARM_DESC(exttrans,"use extended translation");
+
+static int aha152x[] = {0, 11, 7, 1, 1, 0, DELAY_DEFAULT, 0};
+module_param_array(aha152x, int, NULL, 0);
+MODULE_PARM_DESC(aha152x, "parameters for first controller");
+
+static int aha152x1[] = {0, 11, 7, 1, 1, 0, DELAY_DEFAULT, 0};
+module_param_array(aha152x1, int, NULL, 0);
+MODULE_PARM_DESC(aha152x1, "parameters for second controller");
+#endif /* MODULE */
+
+#ifdef __ISAPNP__
+static struct isapnp_device_id id_table[] = {
+ { ISAPNP_ANY_ID, ISAPNP_ANY_ID, ISAPNP_VENDOR('A', 'D', 'P'), ISAPNP_FUNCTION(0x1502), 0 },
+ { ISAPNP_ANY_ID, ISAPNP_ANY_ID, ISAPNP_VENDOR('A', 'D', 'P'), ISAPNP_FUNCTION(0x1505), 0 },
+ { ISAPNP_ANY_ID, ISAPNP_ANY_ID, ISAPNP_VENDOR('A', 'D', 'P'), ISAPNP_FUNCTION(0x1510), 0 },
+ { ISAPNP_ANY_ID, ISAPNP_ANY_ID, ISAPNP_VENDOR('A', 'D', 'P'), ISAPNP_FUNCTION(0x1515), 0 },
+ { ISAPNP_ANY_ID, ISAPNP_ANY_ID, ISAPNP_VENDOR('A', 'D', 'P'), ISAPNP_FUNCTION(0x1520), 0 },
+ { ISAPNP_ANY_ID, ISAPNP_ANY_ID, ISAPNP_VENDOR('A', 'D', 'P'), ISAPNP_FUNCTION(0x2015), 0 },
+ { ISAPNP_ANY_ID, ISAPNP_ANY_ID, ISAPNP_VENDOR('A', 'D', 'P'), ISAPNP_FUNCTION(0x1522), 0 },
+ { ISAPNP_ANY_ID, ISAPNP_ANY_ID, ISAPNP_VENDOR('A', 'D', 'P'), ISAPNP_FUNCTION(0x2215), 0 },
+ { ISAPNP_ANY_ID, ISAPNP_ANY_ID, ISAPNP_VENDOR('A', 'D', 'P'), ISAPNP_FUNCTION(0x1530), 0 },
+ { ISAPNP_ANY_ID, ISAPNP_ANY_ID, ISAPNP_VENDOR('A', 'D', 'P'), ISAPNP_FUNCTION(0x3015), 0 },
+ { ISAPNP_ANY_ID, ISAPNP_ANY_ID, ISAPNP_VENDOR('A', 'D', 'P'), ISAPNP_FUNCTION(0x1532), 0 },
+ { ISAPNP_ANY_ID, ISAPNP_ANY_ID, ISAPNP_VENDOR('A', 'D', 'P'), ISAPNP_FUNCTION(0x3215), 0 },
+ { ISAPNP_ANY_ID, ISAPNP_ANY_ID, ISAPNP_VENDOR('A', 'D', 'P'), ISAPNP_FUNCTION(0x6360), 0 },
+ { ISAPNP_DEVICE_SINGLE_END, }
+};
+MODULE_DEVICE_TABLE(isapnp, id_table);
+#endif /* ISAPNP */
+
+#endif /* !PCMCIA */
+
+static struct scsi_host_template aha152x_driver_template;
+
+/*
+ * internal states of the host
+ *
+ */
+enum aha152x_state {
+ idle=0,
+ unknown,
+ seldo,
+ seldi,
+ selto,
+ busfree,
+ msgo,
+ cmd,
+ msgi,
+ status,
+ datai,
+ datao,
+ parerr,
+ rsti,
+ maxstate
+};
+
+/*
+ * current state information of the host
+ *
+ */
+struct aha152x_hostdata {
+ Scsi_Cmnd *issue_SC;
+ /* pending commands to issue */
+
+ Scsi_Cmnd *current_SC;
+ /* current command on the bus */
+
+ Scsi_Cmnd *disconnected_SC;
+ /* commands that disconnected */
+
+ Scsi_Cmnd *done_SC;
+ /* command that was completed */
+
+ spinlock_t lock;
+ /* host lock */
+
+#if defined(AHA152X_STAT)
+ int total_commands;
+ int disconnections;
+ int busfree_without_any_action;
+ int busfree_without_old_command;
+ int busfree_without_new_command;
+ int busfree_without_done_command;
+ int busfree_with_check_condition;
+ int count[maxstate];
+ int count_trans[maxstate];
+ unsigned long time[maxstate];
+#endif
+
+ int commands; /* current number of commands */
+
+ int reconnect; /* disconnection allowed */
+ int parity; /* parity checking enabled */
+ int synchronous; /* synchronous transferes enabled */
+ int delay; /* reset out delay */
+ int ext_trans; /* extended translation enabled */
+
+ int swint; /* software-interrupt was fired during detect() */
+ int service; /* bh needs to be run */
+ int in_intr; /* bh is running */
+
+ /* current state,
+ previous state,
+ last state different from current state */
+ enum aha152x_state state, prevstate, laststate;
+
+ int target;
+ /* reconnecting target */
+
+ unsigned char syncrate[8];
+ /* current synchronous transfer agreements */
+
+ unsigned char syncneg[8];
+ /* 0: no negotiation;
+ * 1: negotiation in progress;
+ * 2: negotiation completed
+ */
+
+ int cmd_i;
+ /* number of sent bytes of current command */
+
+ int msgi_len;
+ /* number of received message bytes */
+ unsigned char msgi[256];
+ /* received message bytes */
+
+ int msgo_i, msgo_len;
+ /* number of sent bytes and length of current messages */
+ unsigned char msgo[256];
+ /* pending messages */
+
+ int data_len;
+ /* number of sent/received bytes in dataphase */
+
+ unsigned long io_port0;
+ unsigned long io_port1;
+
+#ifdef __ISAPNP__
+ struct pnp_dev *pnpdev;
+#endif
+ struct list_head host_list;
+};
+
+
+/*
+ * host specific command extension
+ *
+ */
+struct aha152x_scdata {
+ Scsi_Cmnd *next; /* next sc in queue */
+ struct completion *done;/* semaphore to block on */
+ struct scsi_eh_save ses;
+};
+
+/* access macros for hostdata */
+
+#define HOSTDATA(shpnt) ((struct aha152x_hostdata *) &shpnt->hostdata)
+
+#define HOSTNO ((shpnt)->host_no)
+
+#define CURRENT_SC (HOSTDATA(shpnt)->current_SC)
+#define DONE_SC (HOSTDATA(shpnt)->done_SC)
+#define ISSUE_SC (HOSTDATA(shpnt)->issue_SC)
+#define DISCONNECTED_SC (HOSTDATA(shpnt)->disconnected_SC)
+#define QLOCK (HOSTDATA(shpnt)->lock)
+#define QLOCKER (HOSTDATA(shpnt)->locker)
+#define QLOCKERL (HOSTDATA(shpnt)->lockerl)
+
+#define STATE (HOSTDATA(shpnt)->state)
+#define PREVSTATE (HOSTDATA(shpnt)->prevstate)
+#define LASTSTATE (HOSTDATA(shpnt)->laststate)
+
+#define RECONN_TARGET (HOSTDATA(shpnt)->target)
+
+#define CMD_I (HOSTDATA(shpnt)->cmd_i)
+
+#define MSGO(i) (HOSTDATA(shpnt)->msgo[i])
+#define MSGO_I (HOSTDATA(shpnt)->msgo_i)
+#define MSGOLEN (HOSTDATA(shpnt)->msgo_len)
+#define ADDMSGO(x) (MSGOLEN<256 ? (void)(MSGO(MSGOLEN++)=x) : aha152x_error(shpnt,"MSGO overflow"))
+
+#define MSGI(i) (HOSTDATA(shpnt)->msgi[i])
+#define MSGILEN (HOSTDATA(shpnt)->msgi_len)
+#define ADDMSGI(x) (MSGILEN<256 ? (void)(MSGI(MSGILEN++)=x) : aha152x_error(shpnt,"MSGI overflow"))
+
+#define DATA_LEN (HOSTDATA(shpnt)->data_len)
+
+#define SYNCRATE (HOSTDATA(shpnt)->syncrate[CURRENT_SC->device->id])
+#define SYNCNEG (HOSTDATA(shpnt)->syncneg[CURRENT_SC->device->id])
+
+#define DELAY (HOSTDATA(shpnt)->delay)
+#define EXT_TRANS (HOSTDATA(shpnt)->ext_trans)
+#define TC1550 (HOSTDATA(shpnt)->tc1550)
+#define RECONNECT (HOSTDATA(shpnt)->reconnect)
+#define PARITY (HOSTDATA(shpnt)->parity)
+#define SYNCHRONOUS (HOSTDATA(shpnt)->synchronous)
+
+#define HOSTIOPORT0 (HOSTDATA(shpnt)->io_port0)
+#define HOSTIOPORT1 (HOSTDATA(shpnt)->io_port1)
+
+#define SCDATA(SCpnt) ((struct aha152x_scdata *) (SCpnt)->host_scribble)
+#define SCNEXT(SCpnt) SCDATA(SCpnt)->next
+#define SCSEM(SCpnt) SCDATA(SCpnt)->done
+
+#define SG_ADDRESS(buffer) ((char *) sg_virt((buffer)))
+
+/* state handling */
+static void seldi_run(struct Scsi_Host *shpnt);
+static void seldo_run(struct Scsi_Host *shpnt);
+static void selto_run(struct Scsi_Host *shpnt);
+static void busfree_run(struct Scsi_Host *shpnt);
+
+static void msgo_init(struct Scsi_Host *shpnt);
+static void msgo_run(struct Scsi_Host *shpnt);
+static void msgo_end(struct Scsi_Host *shpnt);
+
+static void cmd_init(struct Scsi_Host *shpnt);
+static void cmd_run(struct Scsi_Host *shpnt);
+static void cmd_end(struct Scsi_Host *shpnt);
+
+static void datai_init(struct Scsi_Host *shpnt);
+static void datai_run(struct Scsi_Host *shpnt);
+static void datai_end(struct Scsi_Host *shpnt);
+
+static void datao_init(struct Scsi_Host *shpnt);
+static void datao_run(struct Scsi_Host *shpnt);
+static void datao_end(struct Scsi_Host *shpnt);
+
+static void status_run(struct Scsi_Host *shpnt);
+
+static void msgi_run(struct Scsi_Host *shpnt);
+static void msgi_end(struct Scsi_Host *shpnt);
+
+static void parerr_run(struct Scsi_Host *shpnt);
+static void rsti_run(struct Scsi_Host *shpnt);
+
+static void is_complete(struct Scsi_Host *shpnt);
+
+/*
+ * driver states
+ *
+ */
+static struct {
+ char *name;
+ void (*init)(struct Scsi_Host *);
+ void (*run)(struct Scsi_Host *);
+ void (*end)(struct Scsi_Host *);
+ int spio;
+} states[] = {
+ { "idle", NULL, NULL, NULL, 0},
+ { "unknown", NULL, NULL, NULL, 0},
+ { "seldo", NULL, seldo_run, NULL, 0},
+ { "seldi", NULL, seldi_run, NULL, 0},
+ { "selto", NULL, selto_run, NULL, 0},
+ { "busfree", NULL, busfree_run, NULL, 0},
+ { "msgo", msgo_init, msgo_run, msgo_end, 1},
+ { "cmd", cmd_init, cmd_run, cmd_end, 1},
+ { "msgi", NULL, msgi_run, msgi_end, 1},
+ { "status", NULL, status_run, NULL, 1},
+ { "datai", datai_init, datai_run, datai_end, 0},
+ { "datao", datao_init, datao_run, datao_end, 0},
+ { "parerr", NULL, parerr_run, NULL, 0},
+ { "rsti", NULL, rsti_run, NULL, 0},
+};
+
+/* setup & interrupt */
+static irqreturn_t intr(int irq, void *dev_id);
+static void reset_ports(struct Scsi_Host *shpnt);
+static void aha152x_error(struct Scsi_Host *shpnt, char *msg);
+static void done(struct Scsi_Host *shpnt, int error);
+
+/* diagnostics */
+static void show_command(Scsi_Cmnd * ptr);
+static void show_queues(struct Scsi_Host *shpnt);
+static void disp_enintr(struct Scsi_Host *shpnt);
+
+
+/*
+ * queue services:
+ *
+ */
+static inline void append_SC(Scsi_Cmnd **SC, Scsi_Cmnd *new_SC)
+{
+ Scsi_Cmnd *end;
+
+ SCNEXT(new_SC) = NULL;
+ if (!*SC)
+ *SC = new_SC;
+ else {
+ for (end = *SC; SCNEXT(end); end = SCNEXT(end))
+ ;
+ SCNEXT(end) = new_SC;
+ }
+}
+
+static inline Scsi_Cmnd *remove_first_SC(Scsi_Cmnd ** SC)
+{
+ Scsi_Cmnd *ptr;
+
+ ptr = *SC;
+ if (ptr) {
+ *SC = SCNEXT(*SC);
+ SCNEXT(ptr)=NULL;
+ }
+ return ptr;
+}
+
+static inline Scsi_Cmnd *remove_lun_SC(Scsi_Cmnd ** SC, int target, int lun)
+{
+ Scsi_Cmnd *ptr, *prev;
+
+ for (ptr = *SC, prev = NULL;
+ ptr && ((ptr->device->id != target) || (ptr->device->lun != lun));
+ prev = ptr, ptr = SCNEXT(ptr))
+ ;
+
+ if (ptr) {
+ if (prev)
+ SCNEXT(prev) = SCNEXT(ptr);
+ else
+ *SC = SCNEXT(ptr);
+
+ SCNEXT(ptr)=NULL;
+ }
+
+ return ptr;
+}
+
+static inline Scsi_Cmnd *remove_SC(Scsi_Cmnd **SC, Scsi_Cmnd *SCp)
+{
+ Scsi_Cmnd *ptr, *prev;
+
+ for (ptr = *SC, prev = NULL;
+ ptr && SCp!=ptr;
+ prev = ptr, ptr = SCNEXT(ptr))
+ ;
+
+ if (ptr) {
+ if (prev)
+ SCNEXT(prev) = SCNEXT(ptr);
+ else
+ *SC = SCNEXT(ptr);
+
+ SCNEXT(ptr)=NULL;
+ }
+
+ return ptr;
+}
+
+static irqreturn_t swintr(int irqno, void *dev_id)
+{
+ struct Scsi_Host *shpnt = dev_id;
+
+ HOSTDATA(shpnt)->swint++;
+
+ SETPORT(DMACNTRL0, INTEN);
+ return IRQ_HANDLED;
+}
+
+struct Scsi_Host *aha152x_probe_one(struct aha152x_setup *setup)
+{
+ struct Scsi_Host *shpnt;
+
+ shpnt = scsi_host_alloc(&aha152x_driver_template, sizeof(struct aha152x_hostdata));
+ if (!shpnt) {
+ printk(KERN_ERR "aha152x: scsi_host_alloc failed\n");
+ return NULL;
+ }
+
+ memset(HOSTDATA(shpnt), 0, sizeof *HOSTDATA(shpnt));
+ INIT_LIST_HEAD(&HOSTDATA(shpnt)->host_list);
+
+ /* need to have host registered before triggering any interrupt */
+ list_add_tail(&HOSTDATA(shpnt)->host_list, &aha152x_host_list);
+
+ shpnt->io_port = setup->io_port;
+ shpnt->n_io_port = IO_RANGE;
+ shpnt->irq = setup->irq;
+
+ if (!setup->tc1550) {
+ HOSTIOPORT0 = setup->io_port;
+ HOSTIOPORT1 = setup->io_port;
+ } else {
+ HOSTIOPORT0 = setup->io_port+0x10;
+ HOSTIOPORT1 = setup->io_port-0x10;
+ }
+
+ spin_lock_init(&QLOCK);
+ RECONNECT = setup->reconnect;
+ SYNCHRONOUS = setup->synchronous;
+ PARITY = setup->parity;
+ DELAY = setup->delay;
+ EXT_TRANS = setup->ext_trans;
+
+ SETPORT(SCSIID, setup->scsiid << 4);
+ shpnt->this_id = setup->scsiid;
+
+ if (setup->reconnect)
+ shpnt->can_queue = AHA152X_MAXQUEUE;
+
+ /* RESET OUT */
+ printk("aha152x: resetting bus...\n");
+ SETPORT(SCSISEQ, SCSIRSTO);
+ mdelay(256);
+ SETPORT(SCSISEQ, 0);
+ mdelay(DELAY);
+
+ reset_ports(shpnt);
+
+ printk(KERN_INFO
+ "aha152x%d%s: "
+ "vital data: rev=%x, "
+ "io=0x%03lx (0x%03lx/0x%03lx), "
+ "irq=%d, "
+ "scsiid=%d, "
+ "reconnect=%s, "
+ "parity=%s, "
+ "synchronous=%s, "
+ "delay=%d, "
+ "extended translation=%s\n",
+ shpnt->host_no, setup->tc1550 ? " (tc1550 mode)" : "",
+ GETPORT(REV) & 0x7,
+ shpnt->io_port, HOSTIOPORT0, HOSTIOPORT1,
+ shpnt->irq,
+ shpnt->this_id,
+ RECONNECT ? "enabled" : "disabled",
+ PARITY ? "enabled" : "disabled",
+ SYNCHRONOUS ? "enabled" : "disabled",
+ DELAY,
+ EXT_TRANS ? "enabled" : "disabled");
+
+ /* not expecting any interrupts */
+ SETPORT(SIMODE0, 0);
+ SETPORT(SIMODE1, 0);
+
+ if (request_irq(shpnt->irq, swintr, IRQF_SHARED, "aha152x", shpnt)) {
+ printk(KERN_ERR "aha152x%d: irq %d busy.\n", shpnt->host_no, shpnt->irq);
+ goto out_host_put;
+ }
+
+ HOSTDATA(shpnt)->swint = 0;
+
+ printk(KERN_INFO "aha152x%d: trying software interrupt, ", shpnt->host_no);
+
+ mb();
+ SETPORT(DMACNTRL0, SWINT|INTEN);
+ mdelay(1000);
+ free_irq(shpnt->irq, shpnt);
+
+ if (!HOSTDATA(shpnt)->swint) {
+ if (TESTHI(DMASTAT, INTSTAT)) {
+ printk("lost.\n");
+ } else {
+ printk("failed.\n");
+ }
+
+ SETPORT(DMACNTRL0, INTEN);
+
+ printk(KERN_ERR "aha152x%d: irq %d possibly wrong. "
+ "Please verify.\n", shpnt->host_no, shpnt->irq);
+ goto out_host_put;
+ }
+ printk("ok.\n");
+
+
+ /* clear interrupts */
+ SETPORT(SSTAT0, 0x7f);
+ SETPORT(SSTAT1, 0xef);
+
+ if (request_irq(shpnt->irq, intr, IRQF_SHARED, "aha152x", shpnt)) {
+ printk(KERN_ERR "aha152x%d: failed to reassign irq %d.\n", shpnt->host_no, shpnt->irq);
+ goto out_host_put;
+ }
+
+ if( scsi_add_host(shpnt, NULL) ) {
+ free_irq(shpnt->irq, shpnt);
+ printk(KERN_ERR "aha152x%d: failed to add host.\n", shpnt->host_no);
+ goto out_host_put;
+ }
+
+ scsi_scan_host(shpnt);
+
+ return shpnt;
+
+out_host_put:
+ list_del(&HOSTDATA(shpnt)->host_list);
+ scsi_host_put(shpnt);
+
+ return NULL;
+}
+
+void aha152x_release(struct Scsi_Host *shpnt)
+{
+ if (!shpnt)
+ return;
+
+ scsi_remove_host(shpnt);
+ if (shpnt->irq)
+ free_irq(shpnt->irq, shpnt);
+
+#if !defined(PCMCIA)
+ if (shpnt->io_port)
+ release_region(shpnt->io_port, IO_RANGE);
+#endif
+
+#ifdef __ISAPNP__
+ if (HOSTDATA(shpnt)->pnpdev)
+ pnp_device_detach(HOSTDATA(shpnt)->pnpdev);
+#endif
+
+ list_del(&HOSTDATA(shpnt)->host_list);
+ scsi_host_put(shpnt);
+}
+
+
+/*
+ * setup controller to generate interrupts depending
+ * on current state (lock has to be acquired)
+ *
+ */
+static int setup_expected_interrupts(struct Scsi_Host *shpnt)
+{
+ if(CURRENT_SC) {
+ CURRENT_SC->SCp.phase |= 1 << 16;
+
+ if(CURRENT_SC->SCp.phase & selecting) {
+ SETPORT(SSTAT1, SELTO);
+ SETPORT(SIMODE0, ENSELDO | (DISCONNECTED_SC ? ENSELDI : 0));
+ SETPORT(SIMODE1, ENSELTIMO);
+ } else {
+ SETPORT(SIMODE0, (CURRENT_SC->SCp.phase & spiordy) ? ENSPIORDY : 0);
+ SETPORT(SIMODE1, ENPHASEMIS | ENSCSIRST | ENSCSIPERR | ENBUSFREE);
+ }
+ } else if(STATE==seldi) {
+ SETPORT(SIMODE0, 0);
+ SETPORT(SIMODE1, ENPHASEMIS | ENSCSIRST | ENSCSIPERR | ENBUSFREE);
+ } else {
+ SETPORT(SIMODE0, DISCONNECTED_SC ? ENSELDI : 0);
+ SETPORT(SIMODE1, ENSCSIRST | ( (ISSUE_SC||DONE_SC) ? ENBUSFREE : 0));
+ }
+
+ if(!HOSTDATA(shpnt)->in_intr)
+ SETBITS(DMACNTRL0, INTEN);
+
+ return TESTHI(DMASTAT, INTSTAT);
+}
+
+
+/*
+ * Queue a command and setup interrupts for a free bus.
+ */
+static int aha152x_internal_queue(Scsi_Cmnd *SCpnt, struct completion *complete,
+ int phase, void (*done)(Scsi_Cmnd *))
+{
+ struct Scsi_Host *shpnt = SCpnt->device->host;
+ unsigned long flags;
+
+ SCpnt->scsi_done = done;
+ SCpnt->SCp.phase = not_issued | phase;
+ SCpnt->SCp.Status = 0x1; /* Ilegal status by SCSI standard */
+ SCpnt->SCp.Message = 0;
+ SCpnt->SCp.have_data_in = 0;
+ SCpnt->SCp.sent_command = 0;
+
+ if(SCpnt->SCp.phase & (resetting|check_condition)) {
+ if (!SCpnt->host_scribble || SCSEM(SCpnt) || SCNEXT(SCpnt)) {
+ scmd_printk(KERN_ERR, SCpnt, "cannot reuse command\n");
+ return FAILED;
+ }
+ } else {
+ SCpnt->host_scribble = kmalloc(sizeof(struct aha152x_scdata), GFP_ATOMIC);
+ if(!SCpnt->host_scribble) {
+ scmd_printk(KERN_ERR, SCpnt, "allocation failed\n");
+ return FAILED;
+ }
+ }
+
+ SCNEXT(SCpnt) = NULL;
+ SCSEM(SCpnt) = complete;
+
+ /* setup scratch area
+ SCp.ptr : buffer pointer
+ SCp.this_residual : buffer length
+ SCp.buffer : next buffer
+ SCp.buffers_residual : left buffers in list
+ SCp.phase : current state of the command */
+
+ if ((phase & resetting) || !scsi_sglist(SCpnt)) {
+ SCpnt->SCp.ptr = NULL;
+ SCpnt->SCp.this_residual = 0;
+ scsi_set_resid(SCpnt, 0);
+ SCpnt->SCp.buffer = NULL;
+ SCpnt->SCp.buffers_residual = 0;
+ } else {
+ scsi_set_resid(SCpnt, scsi_bufflen(SCpnt));
+ SCpnt->SCp.buffer = scsi_sglist(SCpnt);
+ SCpnt->SCp.ptr = SG_ADDRESS(SCpnt->SCp.buffer);
+ SCpnt->SCp.this_residual = SCpnt->SCp.buffer->length;
+ SCpnt->SCp.buffers_residual = scsi_sg_count(SCpnt) - 1;
+ }
+
+ DO_LOCK(flags);
+
+#if defined(AHA152X_STAT)
+ HOSTDATA(shpnt)->total_commands++;
+#endif
+
+ /* Turn led on, when this is the first command. */
+ HOSTDATA(shpnt)->commands++;
+ if (HOSTDATA(shpnt)->commands==1)
+ SETPORT(PORTA, 1);
+
+ append_SC(&ISSUE_SC, SCpnt);
+
+ if(!HOSTDATA(shpnt)->in_intr)
+ setup_expected_interrupts(shpnt);
+
+ DO_UNLOCK(flags);
+
+ return 0;
+}
+
+/*
+ * queue a command
+ *
+ */
+static int aha152x_queue_lck(Scsi_Cmnd *SCpnt, void (*done)(Scsi_Cmnd *))
+{
+ return aha152x_internal_queue(SCpnt, NULL, 0, done);
+}
+
+static DEF_SCSI_QCMD(aha152x_queue)
+
+
+/*
+ *
+ */
+static void reset_done(Scsi_Cmnd *SCpnt)
+{
+ if(SCSEM(SCpnt)) {
+ complete(SCSEM(SCpnt));
+ } else {
+ printk(KERN_ERR "aha152x: reset_done w/o completion\n");
+ }
+}
+
+/*
+ * Abort a command
+ *
+ */
+static int aha152x_abort(Scsi_Cmnd *SCpnt)
+{
+ struct Scsi_Host *shpnt = SCpnt->device->host;
+ Scsi_Cmnd *ptr;
+ unsigned long flags;
+
+ DO_LOCK(flags);
+
+ ptr=remove_SC(&ISSUE_SC, SCpnt);
+
+ if(ptr) {
+ HOSTDATA(shpnt)->commands--;
+ if (!HOSTDATA(shpnt)->commands)
+ SETPORT(PORTA, 0);
+ DO_UNLOCK(flags);
+
+ kfree(SCpnt->host_scribble);
+ SCpnt->host_scribble=NULL;
+
+ return SUCCESS;
+ }
+
+ DO_UNLOCK(flags);
+
+ /*
+ * FIXME:
+ * for current command: queue ABORT for message out and raise ATN
+ * for disconnected command: pseudo SC with ABORT message or ABORT on reselection?
+ *
+ */
+
+ scmd_printk(KERN_ERR, SCpnt,
+ "cannot abort running or disconnected command\n");
+
+ return FAILED;
+}
+
+/*
+ * Reset a device
+ *
+ */
+static int aha152x_device_reset(Scsi_Cmnd * SCpnt)
+{
+ struct Scsi_Host *shpnt = SCpnt->device->host;
+ DECLARE_COMPLETION(done);
+ int ret, issued, disconnected;
+ unsigned char old_cmd_len = SCpnt->cmd_len;
+ unsigned long flags;
+ unsigned long timeleft;
+
+ if(CURRENT_SC==SCpnt) {
+ scmd_printk(KERN_ERR, SCpnt, "cannot reset current device\n");
+ return FAILED;
+ }
+
+ DO_LOCK(flags);
+ issued = remove_SC(&ISSUE_SC, SCpnt) == NULL;
+ disconnected = issued && remove_SC(&DISCONNECTED_SC, SCpnt);
+ DO_UNLOCK(flags);
+
+ SCpnt->cmd_len = 0;
+
+ aha152x_internal_queue(SCpnt, &done, resetting, reset_done);
+
+ timeleft = wait_for_completion_timeout(&done, 100*HZ);
+ if (!timeleft) {
+ /* remove command from issue queue */
+ DO_LOCK(flags);
+ remove_SC(&ISSUE_SC, SCpnt);
+ DO_UNLOCK(flags);
+ }
+
+ SCpnt->cmd_len = old_cmd_len;
+
+ DO_LOCK(flags);
+
+ if(SCpnt->SCp.phase & resetted) {
+ HOSTDATA(shpnt)->commands--;
+ if (!HOSTDATA(shpnt)->commands)
+ SETPORT(PORTA, 0);
+ kfree(SCpnt->host_scribble);
+ SCpnt->host_scribble=NULL;
+
+ ret = SUCCESS;
+ } else {
+ /* requeue */
+ if(!issued) {
+ append_SC(&ISSUE_SC, SCpnt);
+ } else if(disconnected) {
+ append_SC(&DISCONNECTED_SC, SCpnt);
+ }
+
+ ret = FAILED;
+ }
+
+ DO_UNLOCK(flags);
+ return ret;
+}
+
+static void free_hard_reset_SCs(struct Scsi_Host *shpnt, Scsi_Cmnd **SCs)
+{
+ Scsi_Cmnd *ptr;
+
+ ptr=*SCs;
+ while(ptr) {
+ Scsi_Cmnd *next;
+
+ if(SCDATA(ptr)) {
+ next = SCNEXT(ptr);
+ } else {
+ scmd_printk(KERN_DEBUG, ptr,
+ "queue corrupted at %p\n", ptr);
+ next = NULL;
+ }
+
+ if (!ptr->device->soft_reset) {
+ remove_SC(SCs, ptr);
+ HOSTDATA(shpnt)->commands--;
+ kfree(ptr->host_scribble);
+ ptr->host_scribble=NULL;
+ }
+
+ ptr = next;
+ }
+}
+
+/*
+ * Reset the bus
+ *
+ */
+static int aha152x_bus_reset_host(struct Scsi_Host *shpnt)
+{
+ unsigned long flags;
+
+ DO_LOCK(flags);
+
+ free_hard_reset_SCs(shpnt, &ISSUE_SC);
+ free_hard_reset_SCs(shpnt, &DISCONNECTED_SC);
+
+ SETPORT(SCSISEQ, SCSIRSTO);
+ mdelay(256);
+ SETPORT(SCSISEQ, 0);
+ mdelay(DELAY);
+
+ setup_expected_interrupts(shpnt);
+ if(HOSTDATA(shpnt)->commands==0)
+ SETPORT(PORTA, 0);
+
+ DO_UNLOCK(flags);
+
+ return SUCCESS;
+}
+
+/*
+ * Reset the bus
+ *
+ */
+static int aha152x_bus_reset(Scsi_Cmnd *SCpnt)
+{
+ return aha152x_bus_reset_host(SCpnt->device->host);
+}
+
+/*
+ * Restore default values to the AIC-6260 registers and reset the fifos
+ *
+ */
+static void reset_ports(struct Scsi_Host *shpnt)
+{
+ unsigned long flags;
+
+ /* disable interrupts */
+ SETPORT(DMACNTRL0, RSTFIFO);
+
+ SETPORT(SCSISEQ, 0);
+
+ SETPORT(SXFRCTL1, 0);
+ SETPORT(SCSISIG, 0);
+ SETRATE(0);
+
+ /* clear all interrupt conditions */
+ SETPORT(SSTAT0, 0x7f);
+ SETPORT(SSTAT1, 0xef);
+
+ SETPORT(SSTAT4, SYNCERR | FWERR | FRERR);
+
+ SETPORT(DMACNTRL0, 0);
+ SETPORT(DMACNTRL1, 0);
+
+ SETPORT(BRSTCNTRL, 0xf1);
+
+ /* clear SCSI fifos and transfer count */
+ SETPORT(SXFRCTL0, CH1|CLRCH1|CLRSTCNT);
+ SETPORT(SXFRCTL0, CH1);
+
+ DO_LOCK(flags);
+ setup_expected_interrupts(shpnt);
+ DO_UNLOCK(flags);
+}
+
+/*
+ * Reset the host (bus and controller)
+ *
+ */
+int aha152x_host_reset_host(struct Scsi_Host *shpnt)
+{
+ aha152x_bus_reset_host(shpnt);
+ reset_ports(shpnt);
+
+ return SUCCESS;
+}
+
+/*
+ * Reset the host (bus and controller)
+ *
+ */
+static int aha152x_host_reset(Scsi_Cmnd *SCpnt)
+{
+ return aha152x_host_reset_host(SCpnt->device->host);
+}
+
+/*
+ * Return the "logical geometry"
+ *
+ */
+static int aha152x_biosparam(struct scsi_device *sdev, struct block_device *bdev,
+ sector_t capacity, int *info_array)
+{
+ struct Scsi_Host *shpnt = sdev->host;
+
+ /* try default translation */
+ info_array[0] = 64;
+ info_array[1] = 32;
+ info_array[2] = (unsigned long)capacity / (64 * 32);
+
+ /* for disks >1GB do some guessing */
+ if (info_array[2] >= 1024) {
+ int info[3];
+
+ /* try to figure out the geometry from the partition table */
+ if (scsicam_bios_param(bdev, capacity, info) < 0 ||
+ !((info[0] == 64 && info[1] == 32) || (info[0] == 255 && info[1] == 63))) {
+ if (EXT_TRANS) {
+ printk(KERN_NOTICE
+ "aha152x: unable to verify geometry for disk with >1GB.\n"
+ " using extended translation.\n");
+ info_array[0] = 255;
+ info_array[1] = 63;
+ info_array[2] = (unsigned long)capacity / (255 * 63);
+ } else {
+ printk(KERN_NOTICE
+ "aha152x: unable to verify geometry for disk with >1GB.\n"
+ " Using default translation. Please verify yourself.\n"
+ " Perhaps you need to enable extended translation in the driver.\n"
+ " See Documentation/scsi/aha152x.txt for details.\n");
+ }
+ } else {
+ info_array[0] = info[0];
+ info_array[1] = info[1];
+ info_array[2] = info[2];
+
+ if (info[0] == 255 && !EXT_TRANS) {
+ printk(KERN_NOTICE
+ "aha152x: current partition table is using extended translation.\n"
+ " using it also, although it's not explicitly enabled.\n");
+ }
+ }
+ }
+
+ return 0;
+}
+
+/*
+ * Internal done function
+ *
+ */
+static void done(struct Scsi_Host *shpnt, int error)
+{
+ if (CURRENT_SC) {
+ if(DONE_SC)
+ scmd_printk(KERN_ERR, CURRENT_SC,
+ "there's already a completed command %p "
+ "- will cause abort\n", DONE_SC);
+
+ DONE_SC = CURRENT_SC;
+ CURRENT_SC = NULL;
+ DONE_SC->result = error;
+ } else
+ printk(KERN_ERR "aha152x: done() called outside of command\n");
+}
+
+static struct work_struct aha152x_tq;
+
+/*
+ * Run service completions on the card with interrupts enabled.
+ *
+ */
+static void run(struct work_struct *work)
+{
+ struct aha152x_hostdata *hd;
+
+ list_for_each_entry(hd, &aha152x_host_list, host_list) {
+ struct Scsi_Host *shost = container_of((void *)hd, struct Scsi_Host, hostdata);
+
+ is_complete(shost);
+ }
+}
+
+/*
+ * Interrupt handler
+ *
+ */
+static irqreturn_t intr(int irqno, void *dev_id)
+{
+ struct Scsi_Host *shpnt = dev_id;
+ unsigned long flags;
+ unsigned char rev, dmacntrl0;
+
+ /*
+ * Read a couple of registers that are known to not be all 1's. If
+ * we read all 1's (-1), that means that either:
+ *
+ * a. The host adapter chip has gone bad, and we cannot control it,
+ * OR
+ * b. The host adapter is a PCMCIA card that has been ejected
+ *
+ * In either case, we cannot do anything with the host adapter at
+ * this point in time. So just ignore the interrupt and return.
+ * In the latter case, the interrupt might actually be meant for
+ * someone else sharing this IRQ, and that driver will handle it.
+ */
+ rev = GETPORT(REV);
+ dmacntrl0 = GETPORT(DMACNTRL0);
+ if ((rev == 0xFF) && (dmacntrl0 == 0xFF))
+ return IRQ_NONE;
+
+ if( TESTLO(DMASTAT, INTSTAT) )
+ return IRQ_NONE;
+
+ /* no more interrupts from the controller, while we're busy.
+ INTEN is restored by the BH handler */
+ CLRBITS(DMACNTRL0, INTEN);
+
+ DO_LOCK(flags);
+ if( HOSTDATA(shpnt)->service==0 ) {
+ HOSTDATA(shpnt)->service=1;
+
+ /* Poke the BH handler */
+ INIT_WORK(&aha152x_tq, run);
+ schedule_work(&aha152x_tq);
+ }
+ DO_UNLOCK(flags);
+
+ return IRQ_HANDLED;
+}
+
+/*
+ * busfree phase
+ * - handle completition/disconnection/error of current command
+ * - start selection for next command (if any)
+ */
+static void busfree_run(struct Scsi_Host *shpnt)
+{
+ unsigned long flags;
+#if defined(AHA152X_STAT)
+ int action=0;
+#endif
+
+ SETPORT(SXFRCTL0, CH1|CLRCH1|CLRSTCNT);
+ SETPORT(SXFRCTL0, CH1);
+
+ SETPORT(SSTAT1, CLRBUSFREE);
+
+ if(CURRENT_SC) {
+#if defined(AHA152X_STAT)
+ action++;
+#endif
+ CURRENT_SC->SCp.phase &= ~syncneg;
+
+ if(CURRENT_SC->SCp.phase & completed) {
+ /* target sent COMMAND COMPLETE */
+ done(shpnt, (CURRENT_SC->SCp.Status & 0xff) | ((CURRENT_SC->SCp.Message & 0xff) << 8) | (DID_OK << 16));
+
+ } else if(CURRENT_SC->SCp.phase & aborted) {
+ done(shpnt, (CURRENT_SC->SCp.Status & 0xff) | ((CURRENT_SC->SCp.Message & 0xff) << 8) | (DID_ABORT << 16));
+
+ } else if(CURRENT_SC->SCp.phase & resetted) {
+ done(shpnt, (CURRENT_SC->SCp.Status & 0xff) | ((CURRENT_SC->SCp.Message & 0xff) << 8) | (DID_RESET << 16));
+
+ } else if(CURRENT_SC->SCp.phase & disconnected) {
+ /* target sent DISCONNECT */
+#if defined(AHA152X_STAT)
+ HOSTDATA(shpnt)->disconnections++;
+#endif
+ append_SC(&DISCONNECTED_SC, CURRENT_SC);
+ CURRENT_SC->SCp.phase |= 1 << 16;
+ CURRENT_SC = NULL;
+
+ } else {
+ done(shpnt, DID_ERROR << 16);
+ }
+#if defined(AHA152X_STAT)
+ } else {
+ HOSTDATA(shpnt)->busfree_without_old_command++;
+#endif
+ }
+
+ DO_LOCK(flags);
+
+ if(DONE_SC) {
+#if defined(AHA152X_STAT)
+ action++;
+#endif
+
+ if(DONE_SC->SCp.phase & check_condition) {
+ struct scsi_cmnd *cmd = HOSTDATA(shpnt)->done_SC;
+ struct aha152x_scdata *sc = SCDATA(cmd);
+
+ scsi_eh_restore_cmnd(cmd, &sc->ses);
+
+ cmd->SCp.Status = SAM_STAT_CHECK_CONDITION;
+
+ HOSTDATA(shpnt)->commands--;
+ if (!HOSTDATA(shpnt)->commands)
+ SETPORT(PORTA, 0); /* turn led off */
+ } else if(DONE_SC->SCp.Status==SAM_STAT_CHECK_CONDITION) {
+#if defined(AHA152X_STAT)
+ HOSTDATA(shpnt)->busfree_with_check_condition++;
+#endif
+
+ if(!(DONE_SC->SCp.phase & not_issued)) {
+ struct aha152x_scdata *sc;
+ Scsi_Cmnd *ptr = DONE_SC;
+ DONE_SC=NULL;
+
+ sc = SCDATA(ptr);
+ /* It was allocated in aha152x_internal_queue? */
+ BUG_ON(!sc);
+ scsi_eh_prep_cmnd(ptr, &sc->ses, NULL, 0, ~0);
+
+ DO_UNLOCK(flags);
+ aha152x_internal_queue(ptr, NULL, check_condition, ptr->scsi_done);
+ DO_LOCK(flags);
+ }
+ }
+
+ if(DONE_SC && DONE_SC->scsi_done) {
+ Scsi_Cmnd *ptr = DONE_SC;
+ DONE_SC=NULL;
+
+ /* turn led off, when no commands are in the driver */
+ HOSTDATA(shpnt)->commands--;
+ if (!HOSTDATA(shpnt)->commands)
+ SETPORT(PORTA, 0); /* turn led off */
+
+ if(ptr->scsi_done != reset_done) {
+ kfree(ptr->host_scribble);
+ ptr->host_scribble=NULL;
+ }
+
+ DO_UNLOCK(flags);
+ ptr->scsi_done(ptr);
+ DO_LOCK(flags);
+ }
+
+ DONE_SC=NULL;
+#if defined(AHA152X_STAT)
+ } else {
+ HOSTDATA(shpnt)->busfree_without_done_command++;
+#endif
+ }
+
+ if(ISSUE_SC)
+ CURRENT_SC = remove_first_SC(&ISSUE_SC);
+
+ DO_UNLOCK(flags);
+
+ if(CURRENT_SC) {
+#if defined(AHA152X_STAT)
+ action++;
+#endif
+ CURRENT_SC->SCp.phase |= selecting;
+
+ /* clear selection timeout */
+ SETPORT(SSTAT1, SELTO);
+
+ SETPORT(SCSIID, (shpnt->this_id << OID_) | CURRENT_SC->device->id);
+ SETPORT(SXFRCTL1, (PARITY ? ENSPCHK : 0 ) | ENSTIMER);
+ SETPORT(SCSISEQ, ENSELO | ENAUTOATNO | (DISCONNECTED_SC ? ENRESELI : 0));
+ } else {
+#if defined(AHA152X_STAT)
+ HOSTDATA(shpnt)->busfree_without_new_command++;
+#endif
+ SETPORT(SCSISEQ, DISCONNECTED_SC ? ENRESELI : 0);
+ }
+
+#if defined(AHA152X_STAT)
+ if(!action)
+ HOSTDATA(shpnt)->busfree_without_any_action++;
+#endif
+}
+
+/*
+ * Selection done (OUT)
+ * - queue IDENTIFY message and SDTR to selected target for message out
+ * (ATN asserted automagically via ENAUTOATNO in busfree())
+ */
+static void seldo_run(struct Scsi_Host *shpnt)
+{
+ SETPORT(SCSISIG, 0);
+ SETPORT(SSTAT1, CLRBUSFREE);
+ SETPORT(SSTAT1, CLRPHASECHG);
+
+ CURRENT_SC->SCp.phase &= ~(selecting|not_issued);
+
+ SETPORT(SCSISEQ, 0);
+
+ if (TESTLO(SSTAT0, SELDO)) {
+ scmd_printk(KERN_ERR, CURRENT_SC,
+ "aha152x: passing bus free condition\n");
+ done(shpnt, DID_NO_CONNECT << 16);
+ return;
+ }
+
+ SETPORT(SSTAT0, CLRSELDO);
+
+ ADDMSGO(IDENTIFY(RECONNECT, CURRENT_SC->device->lun));
+
+ if (CURRENT_SC->SCp.phase & aborting) {
+ ADDMSGO(ABORT);
+ } else if (CURRENT_SC->SCp.phase & resetting) {
+ ADDMSGO(BUS_DEVICE_RESET);
+ } else if (SYNCNEG==0 && SYNCHRONOUS) {
+ CURRENT_SC->SCp.phase |= syncneg;
+ MSGOLEN += spi_populate_sync_msg(&MSGO(MSGOLEN), 50, 8);
+ SYNCNEG=1; /* negotiation in progress */
+ }
+
+ SETRATE(SYNCRATE);
+}
+
+/*
+ * Selection timeout
+ * - return command to mid-level with failure cause
+ *
+ */
+static void selto_run(struct Scsi_Host *shpnt)
+{
+ SETPORT(SCSISEQ, 0);
+ SETPORT(SSTAT1, CLRSELTIMO);
+
+ if (!CURRENT_SC)
+ return;
+
+ CURRENT_SC->SCp.phase &= ~selecting;
+
+ if (CURRENT_SC->SCp.phase & aborted)
+ done(shpnt, DID_ABORT << 16);
+ else if (TESTLO(SSTAT0, SELINGO))
+ done(shpnt, DID_BUS_BUSY << 16);
+ else
+ /* ARBITRATION won, but SELECTION failed */
+ done(shpnt, DID_NO_CONNECT << 16);
+}
+
+/*
+ * Selection in done
+ * - put current command back to issue queue
+ * (reconnection of a disconnected nexus instead
+ * of successful selection out)
+ *
+ */
+static void seldi_run(struct Scsi_Host *shpnt)
+{
+ int selid;
+ int target;
+ unsigned long flags;
+
+ SETPORT(SCSISIG, 0);
+ SETPORT(SSTAT0, CLRSELDI);
+ SETPORT(SSTAT1, CLRBUSFREE);
+ SETPORT(SSTAT1, CLRPHASECHG);
+
+ if(CURRENT_SC) {
+ if(!(CURRENT_SC->SCp.phase & not_issued))
+ scmd_printk(KERN_ERR, CURRENT_SC,
+ "command should not have been issued yet\n");
+
+ DO_LOCK(flags);
+ append_SC(&ISSUE_SC, CURRENT_SC);
+ DO_UNLOCK(flags);
+
+ CURRENT_SC = NULL;
+ }
+
+ if (!DISCONNECTED_SC)
+ return;
+
+ RECONN_TARGET=-1;
+
+ selid = GETPORT(SELID) & ~(1 << shpnt->this_id);
+
+ if (selid==0) {
+ shost_printk(KERN_INFO, shpnt,
+ "target id unknown (%02x)\n", selid);
+ return;
+ }
+
+ for(target=7; !(selid & (1 << target)); target--)
+ ;
+
+ if(selid & ~(1 << target)) {
+ shost_printk(KERN_INFO, shpnt,
+ "multiple targets reconnected (%02x)\n", selid);
+ }
+
+
+ SETPORT(SCSIID, (shpnt->this_id << OID_) | target);
+ SETPORT(SCSISEQ, 0);
+
+ SETRATE(HOSTDATA(shpnt)->syncrate[target]);
+
+ RECONN_TARGET=target;
+}
+
+/*
+ * message in phase
+ * - handle initial message after reconnection to identify
+ * reconnecting nexus
+ * - queue command on DISCONNECTED_SC on DISCONNECT message
+ * - set completed flag on COMMAND COMPLETE
+ * (other completition code moved to busfree_run)
+ * - handle response to SDTR
+ * - clear synchronous transfer agreements on BUS RESET
+ *
+ * FIXME: what about SAVE POINTERS, RESTORE POINTERS?
+ *
+ */
+static void msgi_run(struct Scsi_Host *shpnt)
+{
+ for(;;) {
+ int sstat1 = GETPORT(SSTAT1);
+
+ if(sstat1 & (PHASECHG|PHASEMIS|BUSFREE) || !(sstat1 & REQINIT))
+ return;
+
+ if (TESTLO(SSTAT0, SPIORDY))
+ return;
+
+ ADDMSGI(GETPORT(SCSIDAT));
+
+ if(!CURRENT_SC) {
+ if(LASTSTATE!=seldi) {
+ shost_printk(KERN_ERR, shpnt,
+ "message in w/o current command"
+ " not after reselection\n");
+ }
+
+ /*
+ * Handle reselection
+ */
+ if(!(MSGI(0) & IDENTIFY_BASE)) {
+ shost_printk(KERN_ERR, shpnt,
+ "target didn't identify after reselection\n");
+ continue;
+ }
+
+ CURRENT_SC = remove_lun_SC(&DISCONNECTED_SC, RECONN_TARGET, MSGI(0) & 0x3f);
+
+ if (!CURRENT_SC) {
+ show_queues(shpnt);
+ shost_printk(KERN_ERR, shpnt,
+ "no disconnected command"
+ " for target %d/%d\n",
+ RECONN_TARGET, MSGI(0) & 0x3f);
+ continue;
+ }
+
+ CURRENT_SC->SCp.Message = MSGI(0);
+ CURRENT_SC->SCp.phase &= ~disconnected;
+
+ MSGILEN=0;
+
+ /* next message if any */
+ continue;
+ }
+
+ CURRENT_SC->SCp.Message = MSGI(0);
+
+ switch (MSGI(0)) {
+ case DISCONNECT:
+ if (!RECONNECT)
+ scmd_printk(KERN_WARNING, CURRENT_SC,
+ "target was not allowed to disconnect\n");
+
+ CURRENT_SC->SCp.phase |= disconnected;
+ break;
+
+ case COMMAND_COMPLETE:
+ CURRENT_SC->SCp.phase |= completed;
+ break;
+
+ case MESSAGE_REJECT:
+ if (SYNCNEG==1) {
+ scmd_printk(KERN_INFO, CURRENT_SC,
+ "Synchronous Data Transfer Request"
+ " was rejected\n");
+ SYNCNEG=2; /* negotiation completed */
+ } else
+ scmd_printk(KERN_INFO, CURRENT_SC,
+ "inbound message (MESSAGE REJECT)\n");
+ break;
+
+ case SAVE_POINTERS:
+ break;
+
+ case RESTORE_POINTERS:
+ break;
+
+ case EXTENDED_MESSAGE:
+ if(MSGILEN<2 || MSGILEN<MSGI(1)+2) {
+ /* not yet completed */
+ continue;
+ }
+
+ switch (MSGI(2)) {
+ case EXTENDED_SDTR:
+ {
+ long ticks;
+
+ if (MSGI(1) != 3) {
+ scmd_printk(KERN_ERR, CURRENT_SC,
+ "SDTR message length!=3\n");
+ break;
+ }
+
+ if (!HOSTDATA(shpnt)->synchronous)
+ break;
+
+ printk(INFO_LEAD, CMDINFO(CURRENT_SC));
+ spi_print_msg(&MSGI(0));
+ printk("\n");
+
+ ticks = (MSGI(3) * 4 + 49) / 50;
+
+ if (syncneg) {
+ /* negotiation in progress */
+ if (ticks > 9 || MSGI(4) < 1 || MSGI(4) > 8) {
+ ADDMSGO(MESSAGE_REJECT);
+ scmd_printk(KERN_INFO,
+ CURRENT_SC,
+ "received Synchronous Data Transfer Request invalid - rejected\n");
+ break;
+ }
+
+ SYNCRATE |= ((ticks - 2) << 4) + MSGI(4);
+ } else if (ticks <= 9 && MSGI(4) >= 1) {
+ ADDMSGO(EXTENDED_MESSAGE);
+ ADDMSGO(3);
+ ADDMSGO(EXTENDED_SDTR);
+ if (ticks < 4) {
+ ticks = 4;
+ ADDMSGO(50);
+ } else
+ ADDMSGO(MSGI(3));
+
+ if (MSGI(4) > 8)
+ MSGI(4) = 8;
+
+ ADDMSGO(MSGI(4));
+
+ SYNCRATE |= ((ticks - 2) << 4) + MSGI(4);
+ } else {
+ /* requested SDTR is too slow, do it asynchronously */
+ scmd_printk(KERN_INFO,
+ CURRENT_SC,
+ "Synchronous Data Transfer Request too slow - Rejecting\n");
+ ADDMSGO(MESSAGE_REJECT);
+ }
+
+ /* negotiation completed */
+ SYNCNEG=2;
+ SETRATE(SYNCRATE);
+ }
+ break;
+
+ case BUS_DEVICE_RESET:
+ {
+ int i;
+
+ for(i=0; i<8; i++) {
+ HOSTDATA(shpnt)->syncrate[i]=0;
+ HOSTDATA(shpnt)->syncneg[i]=0;
+ }
+
+ }
+ break;
+
+ case EXTENDED_MODIFY_DATA_POINTER:
+ case EXTENDED_EXTENDED_IDENTIFY:
+ case EXTENDED_WDTR:
+ default:
+ ADDMSGO(MESSAGE_REJECT);
+ break;
+ }
+ break;
+ }
+
+ MSGILEN=0;
+ }
+}
+
+static void msgi_end(struct Scsi_Host *shpnt)
+{
+ if(MSGILEN>0)
+ scmd_printk(KERN_WARNING, CURRENT_SC,
+ "target left before message completed (%d)\n",
+ MSGILEN);
+
+ if (MSGOLEN > 0 && !(GETPORT(SSTAT1) & BUSFREE))
+ SETPORT(SCSISIG, P_MSGI | SIG_ATNO);
+}
+
+/*
+ * message out phase
+ *
+ */
+static void msgo_init(struct Scsi_Host *shpnt)
+{
+ if(MSGOLEN==0) {
+ if((CURRENT_SC->SCp.phase & syncneg) && SYNCNEG==2 && SYNCRATE==0) {
+ ADDMSGO(IDENTIFY(RECONNECT, CURRENT_SC->device->lun));
+ } else {
+ scmd_printk(KERN_INFO, CURRENT_SC,
+ "unexpected MESSAGE OUT phase; rejecting\n");
+ ADDMSGO(MESSAGE_REJECT);
+ }
+ }
+
+}
+
+/*
+ * message out phase
+ *
+ */
+static void msgo_run(struct Scsi_Host *shpnt)
+{
+ while(MSGO_I<MSGOLEN) {
+ if (TESTLO(SSTAT0, SPIORDY))
+ return;
+
+ if (MSGO_I==MSGOLEN-1) {
+ /* Leave MESSAGE OUT after transfer */
+ SETPORT(SSTAT1, CLRATNO);
+ }
+
+
+ if (MSGO(MSGO_I) & IDENTIFY_BASE)
+ CURRENT_SC->SCp.phase |= identified;
+
+ if (MSGO(MSGO_I)==ABORT)
+ CURRENT_SC->SCp.phase |= aborted;
+
+ if (MSGO(MSGO_I)==BUS_DEVICE_RESET)
+ CURRENT_SC->SCp.phase |= resetted;
+
+ SETPORT(SCSIDAT, MSGO(MSGO_I++));
+ }
+}
+
+static void msgo_end(struct Scsi_Host *shpnt)
+{
+ if(MSGO_I<MSGOLEN) {
+ scmd_printk(KERN_ERR, CURRENT_SC,
+ "message sent incompletely (%d/%d)\n",
+ MSGO_I, MSGOLEN);
+ if(SYNCNEG==1) {
+ scmd_printk(KERN_INFO, CURRENT_SC,
+ "Synchronous Data Transfer Request was rejected\n");
+ SYNCNEG=2;
+ }
+ }
+
+ MSGO_I = 0;
+ MSGOLEN = 0;
+}
+
+/*
+ * command phase
+ *
+ */
+static void cmd_init(struct Scsi_Host *shpnt)
+{
+ if (CURRENT_SC->SCp.sent_command) {
+ scmd_printk(KERN_ERR, CURRENT_SC,
+ "command already sent\n");
+ done(shpnt, DID_ERROR << 16);
+ return;
+ }
+
+ CMD_I=0;
+}
+
+/*
+ * command phase
+ *
+ */
+static void cmd_run(struct Scsi_Host *shpnt)
+{
+ while(CMD_I<CURRENT_SC->cmd_len) {
+ if (TESTLO(SSTAT0, SPIORDY))
+ return;
+
+ SETPORT(SCSIDAT, CURRENT_SC->cmnd[CMD_I++]);
+ }
+}
+
+static void cmd_end(struct Scsi_Host *shpnt)
+{
+ if(CMD_I<CURRENT_SC->cmd_len)
+ scmd_printk(KERN_ERR, CURRENT_SC,
+ "command sent incompletely (%d/%d)\n",
+ CMD_I, CURRENT_SC->cmd_len);
+ else
+ CURRENT_SC->SCp.sent_command++;
+}
+
+/*
+ * status phase
+ *
+ */
+static void status_run(struct Scsi_Host *shpnt)
+{
+ if (TESTLO(SSTAT0, SPIORDY))
+ return;
+
+ CURRENT_SC->SCp.Status = GETPORT(SCSIDAT);
+
+}
+
+/*
+ * data in phase
+ *
+ */
+static void datai_init(struct Scsi_Host *shpnt)
+{
+ SETPORT(DMACNTRL0, RSTFIFO);
+ SETPORT(DMACNTRL0, RSTFIFO|ENDMA);
+
+ SETPORT(SXFRCTL0, CH1|CLRSTCNT);
+ SETPORT(SXFRCTL0, CH1|SCSIEN|DMAEN);
+
+ SETPORT(SIMODE0, 0);
+ SETPORT(SIMODE1, ENSCSIPERR | ENSCSIRST | ENPHASEMIS | ENBUSFREE);
+
+ DATA_LEN=0;
+}
+
+static void datai_run(struct Scsi_Host *shpnt)
+{
+ unsigned long the_time;
+ int fifodata, data_count;
+
+ /*
+ * loop while the phase persists or the fifos are not empty
+ *
+ */
+ while(TESTLO(DMASTAT, INTSTAT) || TESTLO(DMASTAT, DFIFOEMP) || TESTLO(SSTAT2, SEMPTY)) {
+ /* FIXME: maybe this should be done by setting up
+ * STCNT to trigger ENSWRAP interrupt, instead of
+ * polling for DFIFOFULL
+ */
+ the_time=jiffies + 100*HZ;
+ while(TESTLO(DMASTAT, DFIFOFULL|INTSTAT) && time_before(jiffies,the_time))
+ barrier();
+
+ if(TESTLO(DMASTAT, DFIFOFULL|INTSTAT)) {
+ scmd_printk(KERN_ERR, CURRENT_SC, "datai timeout\n");
+ break;
+ }
+
+ if(TESTHI(DMASTAT, DFIFOFULL)) {
+ fifodata = 128;
+ } else {
+ the_time=jiffies + 100*HZ;
+ while(TESTLO(SSTAT2, SEMPTY) && time_before(jiffies,the_time))
+ barrier();
+
+ if(TESTLO(SSTAT2, SEMPTY)) {
+ scmd_printk(KERN_ERR, CURRENT_SC,
+ "datai sempty timeout");
+ break;
+ }
+
+ fifodata = GETPORT(FIFOSTAT);
+ }
+
+ if(CURRENT_SC->SCp.this_residual>0) {
+ while(fifodata>0 && CURRENT_SC->SCp.this_residual>0) {
+ data_count = fifodata > CURRENT_SC->SCp.this_residual ?
+ CURRENT_SC->SCp.this_residual :
+ fifodata;
+ fifodata -= data_count;
+
+ if (data_count & 1) {
+ SETPORT(DMACNTRL0, ENDMA|_8BIT);
+ *CURRENT_SC->SCp.ptr++ = GETPORT(DATAPORT);
+ CURRENT_SC->SCp.this_residual--;
+ DATA_LEN++;
+ SETPORT(DMACNTRL0, ENDMA);
+ }
+
+ if (data_count > 1) {
+ data_count >>= 1;
+ insw(DATAPORT, CURRENT_SC->SCp.ptr, data_count);
+ CURRENT_SC->SCp.ptr += 2 * data_count;
+ CURRENT_SC->SCp.this_residual -= 2 * data_count;
+ DATA_LEN += 2 * data_count;
+ }
+
+ if (CURRENT_SC->SCp.this_residual == 0 &&
+ CURRENT_SC->SCp.buffers_residual > 0) {
+ /* advance to next buffer */
+ CURRENT_SC->SCp.buffers_residual--;
+ CURRENT_SC->SCp.buffer++;
+ CURRENT_SC->SCp.ptr = SG_ADDRESS(CURRENT_SC->SCp.buffer);
+ CURRENT_SC->SCp.this_residual = CURRENT_SC->SCp.buffer->length;
+ }
+ }
+ } else if (fifodata > 0) {
+ scmd_printk(KERN_ERR, CURRENT_SC,
+ "no buffers left for %d(%d) bytes"
+ " (data overrun!?)\n",
+ fifodata, GETPORT(FIFOSTAT));
+ SETPORT(DMACNTRL0, ENDMA|_8BIT);
+ while(fifodata>0) {
+ int data;
+ data=GETPORT(DATAPORT);
+ fifodata--;
+ DATA_LEN++;
+ }
+ SETPORT(DMACNTRL0, ENDMA|_8BIT);
+ }
+ }
+
+ if(TESTLO(DMASTAT, INTSTAT) ||
+ TESTLO(DMASTAT, DFIFOEMP) ||
+ TESTLO(SSTAT2, SEMPTY) ||
+ GETPORT(FIFOSTAT)>0) {
+ /*
+ * something went wrong, if there's something left in the fifos
+ * or the phase didn't change
+ */
+ scmd_printk(KERN_ERR, CURRENT_SC,
+ "fifos should be empty and phase should have changed\n");
+ }
+
+ if(DATA_LEN!=GETSTCNT()) {
+ scmd_printk(KERN_ERR, CURRENT_SC,
+ "manual transfer count differs from automatic "
+ "(count=%d;stcnt=%d;diff=%d;fifostat=%d)",
+ DATA_LEN, GETSTCNT(), GETSTCNT()-DATA_LEN,
+ GETPORT(FIFOSTAT));
+ mdelay(10000);
+ }
+}
+
+static void datai_end(struct Scsi_Host *shpnt)
+{
+ CMD_INC_RESID(CURRENT_SC, -GETSTCNT());
+
+ SETPORT(SXFRCTL0, CH1|CLRSTCNT);
+ SETPORT(DMACNTRL0, 0);
+}
+
+/*
+ * data out phase
+ *
+ */
+static void datao_init(struct Scsi_Host *shpnt)
+{
+ SETPORT(DMACNTRL0, WRITE_READ | RSTFIFO);
+ SETPORT(DMACNTRL0, WRITE_READ | ENDMA);
+
+ SETPORT(SXFRCTL0, CH1|CLRSTCNT);
+ SETPORT(SXFRCTL0, CH1|SCSIEN|DMAEN);
+
+ SETPORT(SIMODE0, 0);
+ SETPORT(SIMODE1, ENSCSIPERR | ENSCSIRST | ENPHASEMIS | ENBUSFREE );
+
+ DATA_LEN = scsi_get_resid(CURRENT_SC);
+}
+
+static void datao_run(struct Scsi_Host *shpnt)
+{
+ unsigned long the_time;
+ int data_count;
+
+ /* until phase changes or all data sent */
+ while(TESTLO(DMASTAT, INTSTAT) && CURRENT_SC->SCp.this_residual>0) {
+ data_count = 128;
+ if(data_count > CURRENT_SC->SCp.this_residual)
+ data_count=CURRENT_SC->SCp.this_residual;
+
+ if(TESTLO(DMASTAT, DFIFOEMP)) {
+ scmd_printk(KERN_ERR, CURRENT_SC,
+ "datao fifo not empty (%d)",
+ GETPORT(FIFOSTAT));
+ break;
+ }
+
+ if(data_count & 1) {
+ SETPORT(DMACNTRL0,WRITE_READ|ENDMA|_8BIT);
+ SETPORT(DATAPORT, *CURRENT_SC->SCp.ptr++);
+ CURRENT_SC->SCp.this_residual--;
+ CMD_INC_RESID(CURRENT_SC, -1);
+ SETPORT(DMACNTRL0,WRITE_READ|ENDMA);
+ }
+
+ if(data_count > 1) {
+ data_count >>= 1;
+ outsw(DATAPORT, CURRENT_SC->SCp.ptr, data_count);
+ CURRENT_SC->SCp.ptr += 2 * data_count;
+ CURRENT_SC->SCp.this_residual -= 2 * data_count;
+ CMD_INC_RESID(CURRENT_SC, -2 * data_count);
+ }
+
+ if(CURRENT_SC->SCp.this_residual==0 && CURRENT_SC->SCp.buffers_residual>0) {
+ /* advance to next buffer */
+ CURRENT_SC->SCp.buffers_residual--;
+ CURRENT_SC->SCp.buffer++;
+ CURRENT_SC->SCp.ptr = SG_ADDRESS(CURRENT_SC->SCp.buffer);
+ CURRENT_SC->SCp.this_residual = CURRENT_SC->SCp.buffer->length;
+ }
+
+ the_time=jiffies + 100*HZ;
+ while(TESTLO(DMASTAT, DFIFOEMP|INTSTAT) && time_before(jiffies,the_time))
+ barrier();
+
+ if(TESTLO(DMASTAT, DFIFOEMP|INTSTAT)) {
+ scmd_printk(KERN_ERR, CURRENT_SC, "dataout timeout\n");
+ break;
+ }
+ }
+}
+
+static void datao_end(struct Scsi_Host *shpnt)
+{
+ if(TESTLO(DMASTAT, DFIFOEMP)) {
+ int data_count = (DATA_LEN - scsi_get_resid(CURRENT_SC)) -
+ GETSTCNT();
+
+ CMD_INC_RESID(CURRENT_SC, data_count);
+
+ data_count -= CURRENT_SC->SCp.ptr -
+ SG_ADDRESS(CURRENT_SC->SCp.buffer);
+ while(data_count>0) {
+ CURRENT_SC->SCp.buffer--;
+ CURRENT_SC->SCp.buffers_residual++;
+ data_count -= CURRENT_SC->SCp.buffer->length;
+ }
+ CURRENT_SC->SCp.ptr = SG_ADDRESS(CURRENT_SC->SCp.buffer) -
+ data_count;
+ CURRENT_SC->SCp.this_residual = CURRENT_SC->SCp.buffer->length +
+ data_count;
+ }
+
+ SETPORT(SXFRCTL0, CH1|CLRCH1|CLRSTCNT);
+ SETPORT(SXFRCTL0, CH1);
+
+ SETPORT(DMACNTRL0, 0);
+}
+
+/*
+ * figure out what state we're in
+ *
+ */
+static int update_state(struct Scsi_Host *shpnt)
+{
+ int dataphase=0;
+ unsigned int stat0 = GETPORT(SSTAT0);
+ unsigned int stat1 = GETPORT(SSTAT1);
+
+ PREVSTATE = STATE;
+ STATE=unknown;
+
+ if(stat1 & SCSIRSTI) {
+ STATE=rsti;
+ SETPORT(SCSISEQ,0);
+ SETPORT(SSTAT1,SCSIRSTI);
+ } else if (stat0 & SELDI && PREVSTATE == busfree) {
+ STATE=seldi;
+ } else if(stat0 & SELDO && CURRENT_SC && (CURRENT_SC->SCp.phase & selecting)) {
+ STATE=seldo;
+ } else if(stat1 & SELTO) {
+ STATE=selto;
+ } else if(stat1 & BUSFREE) {
+ STATE=busfree;
+ SETPORT(SSTAT1,BUSFREE);
+ } else if(stat1 & SCSIPERR) {
+ STATE=parerr;
+ SETPORT(SSTAT1,SCSIPERR);
+ } else if(stat1 & REQINIT) {
+ switch(GETPORT(SCSISIG) & P_MASK) {
+ case P_MSGI: STATE=msgi; break;
+ case P_MSGO: STATE=msgo; break;
+ case P_DATAO: STATE=datao; break;
+ case P_DATAI: STATE=datai; break;
+ case P_STATUS: STATE=status; break;
+ case P_CMD: STATE=cmd; break;
+ }
+ dataphase=1;
+ }
+
+ if((stat0 & SELDI) && STATE!=seldi && !dataphase) {
+ scmd_printk(KERN_INFO, CURRENT_SC, "reselection missed?");
+ }
+
+ if(STATE!=PREVSTATE) {
+ LASTSTATE=PREVSTATE;
+ }
+
+ return dataphase;
+}
+
+/*
+ * handle parity error
+ *
+ * FIXME: in which phase?
+ *
+ */
+static void parerr_run(struct Scsi_Host *shpnt)
+{
+ scmd_printk(KERN_ERR, CURRENT_SC, "parity error\n");
+ done(shpnt, DID_PARITY << 16);
+}
+
+/*
+ * handle reset in
+ *
+ */
+static void rsti_run(struct Scsi_Host *shpnt)
+{
+ Scsi_Cmnd *ptr;
+
+ shost_printk(KERN_NOTICE, shpnt, "scsi reset in\n");
+
+ ptr=DISCONNECTED_SC;
+ while(ptr) {
+ Scsi_Cmnd *next = SCNEXT(ptr);
+
+ if (!ptr->device->soft_reset) {
+ remove_SC(&DISCONNECTED_SC, ptr);
+
+ kfree(ptr->host_scribble);
+ ptr->host_scribble=NULL;
+
+ ptr->result = DID_RESET << 16;
+ ptr->scsi_done(ptr);
+ }
+
+ ptr = next;
+ }
+
+ if(CURRENT_SC && !CURRENT_SC->device->soft_reset)
+ done(shpnt, DID_RESET << 16 );
+}
+
+
+/*
+ * bottom-half handler
+ *
+ */
+static void is_complete(struct Scsi_Host *shpnt)
+{
+ int dataphase;
+ unsigned long flags;
+ int pending;
+
+ if(!shpnt)
+ return;
+
+ DO_LOCK(flags);
+
+ if( HOSTDATA(shpnt)->service==0 ) {
+ DO_UNLOCK(flags);
+ return;
+ }
+
+ HOSTDATA(shpnt)->service = 0;
+
+ if(HOSTDATA(shpnt)->in_intr) {
+ DO_UNLOCK(flags);
+ /* aha152x_error never returns.. */
+ aha152x_error(shpnt, "bottom-half already running!?");
+ }
+ HOSTDATA(shpnt)->in_intr++;
+
+ /*
+ * loop while there are interrupt conditions pending
+ *
+ */
+ do {
+ unsigned long start = jiffies;
+ DO_UNLOCK(flags);
+
+ dataphase=update_state(shpnt);
+
+ /*
+ * end previous state
+ *
+ */
+ if(PREVSTATE!=STATE && states[PREVSTATE].end)
+ states[PREVSTATE].end(shpnt);
+
+ /*
+ * disable SPIO mode if previous phase used it
+ * and this one doesn't
+ *
+ */
+ if(states[PREVSTATE].spio && !states[STATE].spio) {
+ SETPORT(SXFRCTL0, CH1);
+ SETPORT(DMACNTRL0, 0);
+ if(CURRENT_SC)
+ CURRENT_SC->SCp.phase &= ~spiordy;
+ }
+
+ /*
+ * accept current dataphase phase
+ *
+ */
+ if(dataphase) {
+ SETPORT(SSTAT0, REQINIT);
+ SETPORT(SCSISIG, GETPORT(SCSISIG) & P_MASK);
+ SETPORT(SSTAT1, PHASECHG);
+ }
+
+ /*
+ * enable SPIO mode if previous didn't use it
+ * and this one does
+ *
+ */
+ if(!states[PREVSTATE].spio && states[STATE].spio) {
+ SETPORT(DMACNTRL0, 0);
+ SETPORT(SXFRCTL0, CH1|SPIOEN);
+ if(CURRENT_SC)
+ CURRENT_SC->SCp.phase |= spiordy;
+ }
+
+ /*
+ * initialize for new state
+ *
+ */
+ if(PREVSTATE!=STATE && states[STATE].init)
+ states[STATE].init(shpnt);
+
+ /*
+ * handle current state
+ *
+ */
+ if(states[STATE].run)
+ states[STATE].run(shpnt);
+ else
+ scmd_printk(KERN_ERR, CURRENT_SC,
+ "unexpected state (%x)\n", STATE);
+
+ /*
+ * setup controller to interrupt on
+ * the next expected condition and
+ * loop if it's already there
+ *
+ */
+ DO_LOCK(flags);
+ pending=setup_expected_interrupts(shpnt);
+#if defined(AHA152X_STAT)
+ HOSTDATA(shpnt)->count[STATE]++;
+ if(PREVSTATE!=STATE)
+ HOSTDATA(shpnt)->count_trans[STATE]++;
+ HOSTDATA(shpnt)->time[STATE] += jiffies-start;
+#endif
+
+ } while(pending);
+
+ /*
+ * enable interrupts and leave bottom-half
+ *
+ */
+ HOSTDATA(shpnt)->in_intr--;
+ SETBITS(DMACNTRL0, INTEN);
+ DO_UNLOCK(flags);
+}
+
+
+/*
+ * Dump the current driver status and panic
+ */
+static void aha152x_error(struct Scsi_Host *shpnt, char *msg)
+{
+ shost_printk(KERN_EMERG, shpnt, "%s\n", msg);
+ show_queues(shpnt);
+ panic("aha152x panic\n");
+}
+
+/*
+ * display enabled interrupts
+ */
+static void disp_enintr(struct Scsi_Host *shpnt)
+{
+ int s0, s1;
+
+ s0 = GETPORT(SIMODE0);
+ s1 = GETPORT(SIMODE1);
+
+ shost_printk(KERN_DEBUG, shpnt,
+ "enabled interrupts (%s%s%s%s%s%s%s%s%s%s%s%s%s%s)\n",
+ (s0 & ENSELDO) ? "ENSELDO " : "",
+ (s0 & ENSELDI) ? "ENSELDI " : "",
+ (s0 & ENSELINGO) ? "ENSELINGO " : "",
+ (s0 & ENSWRAP) ? "ENSWRAP " : "",
+ (s0 & ENSDONE) ? "ENSDONE " : "",
+ (s0 & ENSPIORDY) ? "ENSPIORDY " : "",
+ (s0 & ENDMADONE) ? "ENDMADONE " : "",
+ (s1 & ENSELTIMO) ? "ENSELTIMO " : "",
+ (s1 & ENATNTARG) ? "ENATNTARG " : "",
+ (s1 & ENPHASEMIS) ? "ENPHASEMIS " : "",
+ (s1 & ENBUSFREE) ? "ENBUSFREE " : "",
+ (s1 & ENSCSIPERR) ? "ENSCSIPERR " : "",
+ (s1 & ENPHASECHG) ? "ENPHASECHG " : "",
+ (s1 & ENREQINIT) ? "ENREQINIT " : "");
+}
+
+/*
+ * Show the command data of a command
+ */
+static void show_command(Scsi_Cmnd *ptr)
+{
+ scsi_print_command(ptr);
+ scmd_printk(KERN_DEBUG, ptr,
+ "request_bufflen=%d; resid=%d; "
+ "phase |%s%s%s%s%s%s%s%s%s; next=0x%p",
+ scsi_bufflen(ptr), scsi_get_resid(ptr),
+ (ptr->SCp.phase & not_issued) ? "not issued|" : "",
+ (ptr->SCp.phase & selecting) ? "selecting|" : "",
+ (ptr->SCp.phase & identified) ? "identified|" : "",
+ (ptr->SCp.phase & disconnected) ? "disconnected|" : "",
+ (ptr->SCp.phase & completed) ? "completed|" : "",
+ (ptr->SCp.phase & spiordy) ? "spiordy|" : "",
+ (ptr->SCp.phase & syncneg) ? "syncneg|" : "",
+ (ptr->SCp.phase & aborted) ? "aborted|" : "",
+ (ptr->SCp.phase & resetted) ? "resetted|" : "",
+ (SCDATA(ptr)) ? SCNEXT(ptr) : NULL);
+}
+
+/*
+ * Dump the queued data
+ */
+static void show_queues(struct Scsi_Host *shpnt)
+{
+ Scsi_Cmnd *ptr;
+ unsigned long flags;
+
+ DO_LOCK(flags);
+ printk(KERN_DEBUG "\nqueue status:\nissue_SC:\n");
+ for (ptr = ISSUE_SC; ptr; ptr = SCNEXT(ptr))
+ show_command(ptr);
+ DO_UNLOCK(flags);
+
+ printk(KERN_DEBUG "current_SC:\n");
+ if (CURRENT_SC)
+ show_command(CURRENT_SC);
+ else
+ printk(KERN_DEBUG "none\n");
+
+ printk(KERN_DEBUG "disconnected_SC:\n");
+ for (ptr = DISCONNECTED_SC; ptr; ptr = SCDATA(ptr) ? SCNEXT(ptr) : NULL)
+ show_command(ptr);
+
+ disp_enintr(shpnt);
+}
+
+static void get_command(struct seq_file *m, Scsi_Cmnd * ptr)
+{
+ int i;
+
+ seq_printf(m, "%p: target=%d; lun=%d; cmnd=( ",
+ ptr, ptr->device->id, (u8)ptr->device->lun);
+
+ for (i = 0; i < COMMAND_SIZE(ptr->cmnd[0]); i++)
+ seq_printf(m, "0x%02x ", ptr->cmnd[i]);
+
+ seq_printf(m, "); resid=%d; residual=%d; buffers=%d; phase |",
+ scsi_get_resid(ptr), ptr->SCp.this_residual,
+ ptr->SCp.buffers_residual);
+
+ if (ptr->SCp.phase & not_issued)
+ seq_puts(m, "not issued|");
+ if (ptr->SCp.phase & selecting)
+ seq_puts(m, "selecting|");
+ if (ptr->SCp.phase & disconnected)
+ seq_puts(m, "disconnected|");
+ if (ptr->SCp.phase & aborted)
+ seq_puts(m, "aborted|");
+ if (ptr->SCp.phase & identified)
+ seq_puts(m, "identified|");
+ if (ptr->SCp.phase & completed)
+ seq_puts(m, "completed|");
+ if (ptr->SCp.phase & spiordy)
+ seq_puts(m, "spiordy|");
+ if (ptr->SCp.phase & syncneg)
+ seq_puts(m, "syncneg|");
+ seq_printf(m, "; next=0x%p\n", SCNEXT(ptr));
+}
+
+static void get_ports(struct seq_file *m, struct Scsi_Host *shpnt)
+{
+ int s;
+
+ seq_printf(m, "\n%s: %s(%s) ", CURRENT_SC ? "on bus" : "waiting", states[STATE].name, states[PREVSTATE].name);
+
+ s = GETPORT(SCSISEQ);
+ seq_puts(m, "SCSISEQ( ");
+ if (s & TEMODEO)
+ seq_puts(m, "TARGET MODE ");
+ if (s & ENSELO)
+ seq_puts(m, "SELO ");
+ if (s & ENSELI)
+ seq_puts(m, "SELI ");
+ if (s & ENRESELI)
+ seq_puts(m, "RESELI ");
+ if (s & ENAUTOATNO)
+ seq_puts(m, "AUTOATNO ");
+ if (s & ENAUTOATNI)
+ seq_puts(m, "AUTOATNI ");
+ if (s & ENAUTOATNP)
+ seq_puts(m, "AUTOATNP ");
+ if (s & SCSIRSTO)
+ seq_puts(m, "SCSIRSTO ");
+ seq_puts(m, ");");
+
+ seq_puts(m, " SCSISIG(");
+ s = GETPORT(SCSISIG);
+ switch (s & P_MASK) {
+ case P_DATAO:
+ seq_puts(m, "DATA OUT");
+ break;
+ case P_DATAI:
+ seq_puts(m, "DATA IN");
+ break;
+ case P_CMD:
+ seq_puts(m, "COMMAND");
+ break;
+ case P_STATUS:
+ seq_puts(m, "STATUS");
+ break;
+ case P_MSGO:
+ seq_puts(m, "MESSAGE OUT");
+ break;
+ case P_MSGI:
+ seq_puts(m, "MESSAGE IN");
+ break;
+ default:
+ seq_puts(m, "*invalid*");
+ break;
+ }
+
+ seq_puts(m, "); ");
+
+ seq_printf(m, "INTSTAT (%s); ", TESTHI(DMASTAT, INTSTAT) ? "hi" : "lo");
+
+ seq_puts(m, "SSTAT( ");
+ s = GETPORT(SSTAT0);
+ if (s & TARGET)
+ seq_puts(m, "TARGET ");
+ if (s & SELDO)
+ seq_puts(m, "SELDO ");
+ if (s & SELDI)
+ seq_puts(m, "SELDI ");
+ if (s & SELINGO)
+ seq_puts(m, "SELINGO ");
+ if (s & SWRAP)
+ seq_puts(m, "SWRAP ");
+ if (s & SDONE)
+ seq_puts(m, "SDONE ");
+ if (s & SPIORDY)
+ seq_puts(m, "SPIORDY ");
+ if (s & DMADONE)
+ seq_puts(m, "DMADONE ");
+
+ s = GETPORT(SSTAT1);
+ if (s & SELTO)
+ seq_puts(m, "SELTO ");
+ if (s & ATNTARG)
+ seq_puts(m, "ATNTARG ");
+ if (s & SCSIRSTI)
+ seq_puts(m, "SCSIRSTI ");
+ if (s & PHASEMIS)
+ seq_puts(m, "PHASEMIS ");
+ if (s & BUSFREE)
+ seq_puts(m, "BUSFREE ");
+ if (s & SCSIPERR)
+ seq_puts(m, "SCSIPERR ");
+ if (s & PHASECHG)
+ seq_puts(m, "PHASECHG ");
+ if (s & REQINIT)
+ seq_puts(m, "REQINIT ");
+ seq_puts(m, "); ");
+
+
+ seq_puts(m, "SSTAT( ");
+
+ s = GETPORT(SSTAT0) & GETPORT(SIMODE0);
+
+ if (s & TARGET)
+ seq_puts(m, "TARGET ");
+ if (s & SELDO)
+ seq_puts(m, "SELDO ");
+ if (s & SELDI)
+ seq_puts(m, "SELDI ");
+ if (s & SELINGO)
+ seq_puts(m, "SELINGO ");
+ if (s & SWRAP)
+ seq_puts(m, "SWRAP ");
+ if (s & SDONE)
+ seq_puts(m, "SDONE ");
+ if (s & SPIORDY)
+ seq_puts(m, "SPIORDY ");
+ if (s & DMADONE)
+ seq_puts(m, "DMADONE ");
+
+ s = GETPORT(SSTAT1) & GETPORT(SIMODE1);
+
+ if (s & SELTO)
+ seq_puts(m, "SELTO ");
+ if (s & ATNTARG)
+ seq_puts(m, "ATNTARG ");
+ if (s & SCSIRSTI)
+ seq_puts(m, "SCSIRSTI ");
+ if (s & PHASEMIS)
+ seq_puts(m, "PHASEMIS ");
+ if (s & BUSFREE)
+ seq_puts(m, "BUSFREE ");
+ if (s & SCSIPERR)
+ seq_puts(m, "SCSIPERR ");
+ if (s & PHASECHG)
+ seq_puts(m, "PHASECHG ");
+ if (s & REQINIT)
+ seq_puts(m, "REQINIT ");
+ seq_puts(m, "); ");
+
+ seq_puts(m, "SXFRCTL0( ");
+
+ s = GETPORT(SXFRCTL0);
+ if (s & SCSIEN)
+ seq_puts(m, "SCSIEN ");
+ if (s & DMAEN)
+ seq_puts(m, "DMAEN ");
+ if (s & CH1)
+ seq_puts(m, "CH1 ");
+ if (s & CLRSTCNT)
+ seq_puts(m, "CLRSTCNT ");
+ if (s & SPIOEN)
+ seq_puts(m, "SPIOEN ");
+ if (s & CLRCH1)
+ seq_puts(m, "CLRCH1 ");
+ seq_puts(m, "); ");
+
+ seq_puts(m, "SIGNAL( ");
+
+ s = GETPORT(SCSISIG);
+ if (s & SIG_ATNI)
+ seq_puts(m, "ATNI ");
+ if (s & SIG_SELI)
+ seq_puts(m, "SELI ");
+ if (s & SIG_BSYI)
+ seq_puts(m, "BSYI ");
+ if (s & SIG_REQI)
+ seq_puts(m, "REQI ");
+ if (s & SIG_ACKI)
+ seq_puts(m, "ACKI ");
+ seq_puts(m, "); ");
+
+ seq_printf(m, "SELID(%02x), ", GETPORT(SELID));
+
+ seq_printf(m, "STCNT(%d), ", GETSTCNT());
+
+ seq_puts(m, "SSTAT2( ");
+
+ s = GETPORT(SSTAT2);
+ if (s & SOFFSET)
+ seq_puts(m, "SOFFSET ");
+ if (s & SEMPTY)
+ seq_puts(m, "SEMPTY ");
+ if (s & SFULL)
+ seq_puts(m, "SFULL ");
+ seq_printf(m, "); SFCNT (%d); ", s & (SFULL | SFCNT));
+
+ s = GETPORT(SSTAT3);
+ seq_printf(m, "SCSICNT (%d), OFFCNT(%d), ", (s & 0xf0) >> 4, s & 0x0f);
+
+ seq_puts(m, "SSTAT4( ");
+ s = GETPORT(SSTAT4);
+ if (s & SYNCERR)
+ seq_puts(m, "SYNCERR ");
+ if (s & FWERR)
+ seq_puts(m, "FWERR ");
+ if (s & FRERR)
+ seq_puts(m, "FRERR ");
+ seq_puts(m, "); ");
+
+ seq_puts(m, "DMACNTRL0( ");
+ s = GETPORT(DMACNTRL0);
+ seq_printf(m, "%s ", s & _8BIT ? "8BIT" : "16BIT");
+ seq_printf(m, "%s ", s & DMA ? "DMA" : "PIO");
+ seq_printf(m, "%s ", s & WRITE_READ ? "WRITE" : "READ");
+ if (s & ENDMA)
+ seq_puts(m, "ENDMA ");
+ if (s & INTEN)
+ seq_puts(m, "INTEN ");
+ if (s & RSTFIFO)
+ seq_puts(m, "RSTFIFO ");
+ if (s & SWINT)
+ seq_puts(m, "SWINT ");
+ seq_puts(m, "); ");
+
+ seq_puts(m, "DMASTAT( ");
+ s = GETPORT(DMASTAT);
+ if (s & ATDONE)
+ seq_puts(m, "ATDONE ");
+ if (s & WORDRDY)
+ seq_puts(m, "WORDRDY ");
+ if (s & DFIFOFULL)
+ seq_puts(m, "DFIFOFULL ");
+ if (s & DFIFOEMP)
+ seq_puts(m, "DFIFOEMP ");
+ seq_puts(m, ")\n");
+
+ seq_puts(m, "enabled interrupts( ");
+
+ s = GETPORT(SIMODE0);
+ if (s & ENSELDO)
+ seq_puts(m, "ENSELDO ");
+ if (s & ENSELDI)
+ seq_puts(m, "ENSELDI ");
+ if (s & ENSELINGO)
+ seq_puts(m, "ENSELINGO ");
+ if (s & ENSWRAP)
+ seq_puts(m, "ENSWRAP ");
+ if (s & ENSDONE)
+ seq_puts(m, "ENSDONE ");
+ if (s & ENSPIORDY)
+ seq_puts(m, "ENSPIORDY ");
+ if (s & ENDMADONE)
+ seq_puts(m, "ENDMADONE ");
+
+ s = GETPORT(SIMODE1);
+ if (s & ENSELTIMO)
+ seq_puts(m, "ENSELTIMO ");
+ if (s & ENATNTARG)
+ seq_puts(m, "ENATNTARG ");
+ if (s & ENPHASEMIS)
+ seq_puts(m, "ENPHASEMIS ");
+ if (s & ENBUSFREE)
+ seq_puts(m, "ENBUSFREE ");
+ if (s & ENSCSIPERR)
+ seq_puts(m, "ENSCSIPERR ");
+ if (s & ENPHASECHG)
+ seq_puts(m, "ENPHASECHG ");
+ if (s & ENREQINIT)
+ seq_puts(m, "ENREQINIT ");
+ seq_puts(m, ")\n");
+}
+
+static int aha152x_set_info(struct Scsi_Host *shpnt, char *buffer, int length)
+{
+ if(!shpnt || !buffer || length<8 || strncmp("aha152x ", buffer, 8)!=0)
+ return -EINVAL;
+
+#if defined(AHA152X_STAT)
+ if(length>13 && strncmp("reset", buffer+8, 5)==0) {
+ int i;
+
+ HOSTDATA(shpnt)->total_commands=0;
+ HOSTDATA(shpnt)->disconnections=0;
+ HOSTDATA(shpnt)->busfree_without_any_action=0;
+ HOSTDATA(shpnt)->busfree_without_old_command=0;
+ HOSTDATA(shpnt)->busfree_without_new_command=0;
+ HOSTDATA(shpnt)->busfree_without_done_command=0;
+ HOSTDATA(shpnt)->busfree_with_check_condition=0;
+ for (i = idle; i<maxstate; i++) {
+ HOSTDATA(shpnt)->count[i]=0;
+ HOSTDATA(shpnt)->count_trans[i]=0;
+ HOSTDATA(shpnt)->time[i]=0;
+ }
+
+ shost_printk(KERN_INFO, shpnt, "aha152x: stats reset.\n");
+
+ } else
+#endif
+ {
+ return -EINVAL;
+ }
+
+
+ return length;
+}
+
+static int aha152x_show_info(struct seq_file *m, struct Scsi_Host *shpnt)
+{
+ int i;
+ Scsi_Cmnd *ptr;
+ unsigned long flags;
+
+ seq_puts(m, AHA152X_REVID "\n");
+
+ seq_printf(m, "ioports 0x%04lx to 0x%04lx\n",
+ shpnt->io_port, shpnt->io_port + shpnt->n_io_port - 1);
+ seq_printf(m, "interrupt 0x%02x\n", shpnt->irq);
+ seq_printf(m, "disconnection/reconnection %s\n",
+ RECONNECT ? "enabled" : "disabled");
+ seq_printf(m, "parity checking %s\n",
+ PARITY ? "enabled" : "disabled");
+ seq_printf(m, "synchronous transfers %s\n",
+ SYNCHRONOUS ? "enabled" : "disabled");
+ seq_printf(m, "%d commands currently queued\n", HOSTDATA(shpnt)->commands);
+
+ if(SYNCHRONOUS) {
+ seq_puts(m, "synchronously operating targets (tick=50 ns):\n");
+ for (i = 0; i < 8; i++)
+ if (HOSTDATA(shpnt)->syncrate[i] & 0x7f)
+ seq_printf(m, "target %d: period %dT/%dns; req/ack offset %d\n",
+ i,
+ (((HOSTDATA(shpnt)->syncrate[i] & 0x70) >> 4) + 2),
+ (((HOSTDATA(shpnt)->syncrate[i] & 0x70) >> 4) + 2) * 50,
+ HOSTDATA(shpnt)->syncrate[i] & 0x0f);
+ }
+ seq_puts(m, "\nqueue status:\n");
+ DO_LOCK(flags);
+ if (ISSUE_SC) {
+ seq_puts(m, "not yet issued commands:\n");
+ for (ptr = ISSUE_SC; ptr; ptr = SCNEXT(ptr))
+ get_command(m, ptr);
+ } else
+ seq_puts(m, "no not yet issued commands\n");
+ DO_UNLOCK(flags);
+
+ if (CURRENT_SC) {
+ seq_puts(m, "current command:\n");
+ get_command(m, CURRENT_SC);
+ } else
+ seq_puts(m, "no current command\n");
+
+ if (DISCONNECTED_SC) {
+ seq_puts(m, "disconnected commands:\n");
+ for (ptr = DISCONNECTED_SC; ptr; ptr = SCNEXT(ptr))
+ get_command(m, ptr);
+ } else
+ seq_puts(m, "no disconnected commands\n");
+
+ get_ports(m, shpnt);
+
+#if defined(AHA152X_STAT)
+ seq_printf(m, "statistics:\n"
+ "total commands: %d\n"
+ "disconnections: %d\n"
+ "busfree with check condition: %d\n"
+ "busfree without old command: %d\n"
+ "busfree without new command: %d\n"
+ "busfree without done command: %d\n"
+ "busfree without any action: %d\n"
+ "state "
+ "transitions "
+ "count "
+ "time\n",
+ HOSTDATA(shpnt)->total_commands,
+ HOSTDATA(shpnt)->disconnections,
+ HOSTDATA(shpnt)->busfree_with_check_condition,
+ HOSTDATA(shpnt)->busfree_without_old_command,
+ HOSTDATA(shpnt)->busfree_without_new_command,
+ HOSTDATA(shpnt)->busfree_without_done_command,
+ HOSTDATA(shpnt)->busfree_without_any_action);
+ for(i=0; i<maxstate; i++) {
+ seq_printf(m, "%-10s %-12d %-12d %-12ld\n",
+ states[i].name,
+ HOSTDATA(shpnt)->count_trans[i],
+ HOSTDATA(shpnt)->count[i],
+ HOSTDATA(shpnt)->time[i]);
+ }
+#endif
+ return 0;
+}
+
+static int aha152x_adjust_queue(struct scsi_device *device)
+{
+ blk_queue_bounce_limit(device->request_queue, BLK_BOUNCE_HIGH);
+ return 0;
+}
+
+static struct scsi_host_template aha152x_driver_template = {
+ .module = THIS_MODULE,
+ .name = AHA152X_REVID,
+ .proc_name = "aha152x",
+ .show_info = aha152x_show_info,
+ .write_info = aha152x_set_info,
+ .queuecommand = aha152x_queue,
+ .eh_abort_handler = aha152x_abort,
+ .eh_device_reset_handler = aha152x_device_reset,
+ .eh_bus_reset_handler = aha152x_bus_reset,
+ .eh_host_reset_handler = aha152x_host_reset,
+ .bios_param = aha152x_biosparam,
+ .can_queue = 1,
+ .this_id = 7,
+ .sg_tablesize = SG_ALL,
+ .cmd_per_lun = 1,
+ .use_clustering = DISABLE_CLUSTERING,
+ .slave_alloc = aha152x_adjust_queue,
+};
+
+#if !defined(PCMCIA)
+static int setup_count;
+static struct aha152x_setup setup[2];
+
+/* possible i/o addresses for the AIC-6260; default first */
+static unsigned short ports[] = { 0x340, 0x140 };
+
+#if !defined(SKIP_BIOSTEST)
+/* possible locations for the Adaptec BIOS; defaults first */
+static unsigned int addresses[] =
+{
+ 0xdc000, /* default first */
+ 0xc8000,
+ 0xcc000,
+ 0xd0000,
+ 0xd4000,
+ 0xd8000,
+ 0xe0000,
+ 0xeb800, /* VTech Platinum SMP */
+ 0xf0000,
+};
+
+/* signatures for various AIC-6[23]60 based controllers.
+ The point in detecting signatures is to avoid useless and maybe
+ harmful probes on ports. I'm not sure that all listed boards pass
+ auto-configuration. For those which fail the BIOS signature is
+ obsolete, because user intervention to supply the configuration is
+ needed anyway. May be an information whether or not the BIOS supports
+ extended translation could be also useful here. */
+static struct signature {
+ unsigned char *signature;
+ int sig_offset;
+ int sig_length;
+} signatures[] =
+{
+ { "Adaptec AHA-1520 BIOS", 0x102e, 21 },
+ /* Adaptec 152x */
+ { "Adaptec AHA-1520B", 0x000b, 17 },
+ /* Adaptec 152x rev B */
+ { "Adaptec AHA-1520B", 0x0026, 17 },
+ /* Iomega Jaz Jet ISA (AIC6370Q) */
+ { "Adaptec ASW-B626 BIOS", 0x1029, 21 },
+ /* on-board controller */
+ { "Adaptec BIOS: ASW-B626", 0x000f, 22 },
+ /* on-board controller */
+ { "Adaptec ASW-B626 S2", 0x2e6c, 19 },
+ /* on-board controller */
+ { "Adaptec BIOS:AIC-6360", 0x000c, 21 },
+ /* on-board controller */
+ { "ScsiPro SP-360 BIOS", 0x2873, 19 },
+ /* ScsiPro-Controller */
+ { "GA-400 LOCAL BUS SCSI BIOS", 0x102e, 26 },
+ /* Gigabyte Local-Bus-SCSI */
+ { "Adaptec BIOS:AVA-282X", 0x000c, 21 },
+ /* Adaptec 282x */
+ { "Adaptec IBM Dock II SCSI", 0x2edd, 24 },
+ /* IBM Thinkpad Dock II */
+ { "Adaptec BIOS:AHA-1532P", 0x001c, 22 },
+ /* IBM Thinkpad Dock II SCSI */
+ { "DTC3520A Host Adapter BIOS", 0x318a, 26 },
+ /* DTC 3520A ISA SCSI */
+};
+#endif /* !SKIP_BIOSTEST */
+
+/*
+ * Test, if port_base is valid.
+ *
+ */
+static int aha152x_porttest(int io_port)
+{
+ int i;
+
+ SETPORT(io_port + O_DMACNTRL1, 0); /* reset stack pointer */
+ for (i = 0; i < 16; i++)
+ SETPORT(io_port + O_STACK, i);
+
+ SETPORT(io_port + O_DMACNTRL1, 0); /* reset stack pointer */
+ for (i = 0; i < 16 && GETPORT(io_port + O_STACK) == i; i++)
+ ;
+
+ return (i == 16);
+}
+
+static int tc1550_porttest(int io_port)
+{
+ int i;
+
+ SETPORT(io_port + O_TC_DMACNTRL1, 0); /* reset stack pointer */
+ for (i = 0; i < 16; i++)
+ SETPORT(io_port + O_STACK, i);
+
+ SETPORT(io_port + O_TC_DMACNTRL1, 0); /* reset stack pointer */
+ for (i = 0; i < 16 && GETPORT(io_port + O_TC_STACK) == i; i++)
+ ;
+
+ return (i == 16);
+}
+
+
+static int checksetup(struct aha152x_setup *setup)
+{
+ int i;
+ for (i = 0; i < ARRAY_SIZE(ports) && (setup->io_port != ports[i]); i++)
+ ;
+
+ if (i == ARRAY_SIZE(ports))
+ return 0;
+
+ if (!request_region(setup->io_port, IO_RANGE, "aha152x")) {
+ printk(KERN_ERR "aha152x: io port 0x%x busy.\n", setup->io_port);
+ return 0;
+ }
+
+ if( aha152x_porttest(setup->io_port) ) {
+ setup->tc1550=0;
+ } else if( tc1550_porttest(setup->io_port) ) {
+ setup->tc1550=1;
+ } else {
+ release_region(setup->io_port, IO_RANGE);
+ return 0;
+ }
+
+ release_region(setup->io_port, IO_RANGE);
+
+ if ((setup->irq < IRQ_MIN) || (setup->irq > IRQ_MAX))
+ return 0;
+
+ if ((setup->scsiid < 0) || (setup->scsiid > 7))
+ return 0;
+
+ if ((setup->reconnect < 0) || (setup->reconnect > 1))
+ return 0;
+
+ if ((setup->parity < 0) || (setup->parity > 1))
+ return 0;
+
+ if ((setup->synchronous < 0) || (setup->synchronous > 1))
+ return 0;
+
+ if ((setup->ext_trans < 0) || (setup->ext_trans > 1))
+ return 0;
+
+
+ return 1;
+}
+
+
+static int __init aha152x_init(void)
+{
+ int i, j, ok;
+#if defined(AUTOCONF)
+ aha152x_config conf;
+#endif
+#ifdef __ISAPNP__
+ struct pnp_dev *dev=NULL, *pnpdev[2] = {NULL, NULL};
+#endif
+
+ if ( setup_count ) {
+ printk(KERN_INFO "aha152x: processing commandline: ");
+
+ for (i = 0; i<setup_count; i++) {
+ if (!checksetup(&setup[i])) {
+ printk(KERN_ERR "\naha152x: %s\n", setup[i].conf);
+ printk(KERN_ERR "aha152x: invalid line\n");
+ }
+ }
+ printk("ok\n");
+ }
+
+#if defined(SETUP0)
+ if (setup_count < ARRAY_SIZE(setup)) {
+ struct aha152x_setup override = SETUP0;
+
+ if (setup_count == 0 || (override.io_port != setup[0].io_port)) {
+ if (!checksetup(&override)) {
+ printk(KERN_ERR "\naha152x: invalid override SETUP0={0x%x,%d,%d,%d,%d,%d,%d,%d}\n",
+ override.io_port,
+ override.irq,
+ override.scsiid,
+ override.reconnect,
+ override.parity,
+ override.synchronous,
+ override.delay,
+ override.ext_trans);
+ } else
+ setup[setup_count++] = override;
+ }
+ }
+#endif
+
+#if defined(SETUP1)
+ if (setup_count < ARRAY_SIZE(setup)) {
+ struct aha152x_setup override = SETUP1;
+
+ if (setup_count == 0 || (override.io_port != setup[0].io_port)) {
+ if (!checksetup(&override)) {
+ printk(KERN_ERR "\naha152x: invalid override SETUP1={0x%x,%d,%d,%d,%d,%d,%d,%d}\n",
+ override.io_port,
+ override.irq,
+ override.scsiid,
+ override.reconnect,
+ override.parity,
+ override.synchronous,
+ override.delay,
+ override.ext_trans);
+ } else
+ setup[setup_count++] = override;
+ }
+ }
+#endif
+
+#if defined(MODULE)
+ if (setup_count<ARRAY_SIZE(setup) && (aha152x[0]!=0 || io[0]!=0 || irq[0]!=0)) {
+ if(aha152x[0]!=0) {
+ setup[setup_count].conf = "";
+ setup[setup_count].io_port = aha152x[0];
+ setup[setup_count].irq = aha152x[1];
+ setup[setup_count].scsiid = aha152x[2];
+ setup[setup_count].reconnect = aha152x[3];
+ setup[setup_count].parity = aha152x[4];
+ setup[setup_count].synchronous = aha152x[5];
+ setup[setup_count].delay = aha152x[6];
+ setup[setup_count].ext_trans = aha152x[7];
+ } else if (io[0] != 0 || irq[0] != 0) {
+ if(io[0]!=0) setup[setup_count].io_port = io[0];
+ if(irq[0]!=0) setup[setup_count].irq = irq[0];
+
+ setup[setup_count].scsiid = scsiid[0];
+ setup[setup_count].reconnect = reconnect[0];
+ setup[setup_count].parity = parity[0];
+ setup[setup_count].synchronous = sync[0];
+ setup[setup_count].delay = delay[0];
+ setup[setup_count].ext_trans = exttrans[0];
+ }
+
+ if (checksetup(&setup[setup_count]))
+ setup_count++;
+ else
+ printk(KERN_ERR "aha152x: invalid module params io=0x%x, irq=%d,scsiid=%d,reconnect=%d,parity=%d,sync=%d,delay=%d,exttrans=%d\n",
+ setup[setup_count].io_port,
+ setup[setup_count].irq,
+ setup[setup_count].scsiid,
+ setup[setup_count].reconnect,
+ setup[setup_count].parity,
+ setup[setup_count].synchronous,
+ setup[setup_count].delay,
+ setup[setup_count].ext_trans);
+ }
+
+ if (setup_count<ARRAY_SIZE(setup) && (aha152x1[0]!=0 || io[1]!=0 || irq[1]!=0)) {
+ if(aha152x1[0]!=0) {
+ setup[setup_count].conf = "";
+ setup[setup_count].io_port = aha152x1[0];
+ setup[setup_count].irq = aha152x1[1];
+ setup[setup_count].scsiid = aha152x1[2];
+ setup[setup_count].reconnect = aha152x1[3];
+ setup[setup_count].parity = aha152x1[4];
+ setup[setup_count].synchronous = aha152x1[5];
+ setup[setup_count].delay = aha152x1[6];
+ setup[setup_count].ext_trans = aha152x1[7];
+ } else if (io[1] != 0 || irq[1] != 0) {
+ if(io[1]!=0) setup[setup_count].io_port = io[1];
+ if(irq[1]!=0) setup[setup_count].irq = irq[1];
+
+ setup[setup_count].scsiid = scsiid[1];
+ setup[setup_count].reconnect = reconnect[1];
+ setup[setup_count].parity = parity[1];
+ setup[setup_count].synchronous = sync[1];
+ setup[setup_count].delay = delay[1];
+ setup[setup_count].ext_trans = exttrans[1];
+ }
+ if (checksetup(&setup[setup_count]))
+ setup_count++;
+ else
+ printk(KERN_ERR "aha152x: invalid module params io=0x%x, irq=%d,scsiid=%d,reconnect=%d,parity=%d,sync=%d,delay=%d,exttrans=%d\n",
+ setup[setup_count].io_port,
+ setup[setup_count].irq,
+ setup[setup_count].scsiid,
+ setup[setup_count].reconnect,
+ setup[setup_count].parity,
+ setup[setup_count].synchronous,
+ setup[setup_count].delay,
+ setup[setup_count].ext_trans);
+ }
+#endif
+
+#ifdef __ISAPNP__
+ for(i=0; setup_count<ARRAY_SIZE(setup) && id_table[i].vendor; i++) {
+ while ( setup_count<ARRAY_SIZE(setup) &&
+ (dev=pnp_find_dev(NULL, id_table[i].vendor, id_table[i].function, dev)) ) {
+ if (pnp_device_attach(dev) < 0)
+ continue;
+
+ if (pnp_activate_dev(dev) < 0) {
+ pnp_device_detach(dev);
+ continue;
+ }
+
+ if (!pnp_port_valid(dev, 0)) {
+ pnp_device_detach(dev);
+ continue;
+ }
+
+ if (setup_count==1 && pnp_port_start(dev, 0)==setup[0].io_port) {
+ pnp_device_detach(dev);
+ continue;
+ }
+
+ setup[setup_count].io_port = pnp_port_start(dev, 0);
+ setup[setup_count].irq = pnp_irq(dev, 0);
+ setup[setup_count].scsiid = 7;
+ setup[setup_count].reconnect = 1;
+ setup[setup_count].parity = 1;
+ setup[setup_count].synchronous = 1;
+ setup[setup_count].delay = DELAY_DEFAULT;
+ setup[setup_count].ext_trans = 0;
+#if defined(__ISAPNP__)
+ pnpdev[setup_count] = dev;
+#endif
+ printk (KERN_INFO
+ "aha152x: found ISAPnP adapter at io=0x%03x, irq=%d\n",
+ setup[setup_count].io_port, setup[setup_count].irq);
+ setup_count++;
+ }
+ }
+#endif
+
+#if defined(AUTOCONF)
+ if (setup_count<ARRAY_SIZE(setup)) {
+#if !defined(SKIP_BIOSTEST)
+ ok = 0;
+ for (i = 0; i < ARRAY_SIZE(addresses) && !ok; i++) {
+ void __iomem *p = ioremap(addresses[i], 0x4000);
+ if (!p)
+ continue;
+ for (j = 0; j<ARRAY_SIZE(signatures) && !ok; j++)
+ ok = check_signature(p + signatures[j].sig_offset,
+ signatures[j].signature, signatures[j].sig_length);
+ iounmap(p);
+ }
+ if (!ok && setup_count == 0)
+ return -ENODEV;
+
+ printk(KERN_INFO "aha152x: BIOS test: passed, ");
+#else
+ printk(KERN_INFO "aha152x: ");
+#endif /* !SKIP_BIOSTEST */
+
+ ok = 0;
+ for (i = 0; i < ARRAY_SIZE(ports) && setup_count < 2; i++) {
+ if ((setup_count == 1) && (setup[0].io_port == ports[i]))
+ continue;
+
+ if (!request_region(ports[i], IO_RANGE, "aha152x")) {
+ printk(KERN_ERR "aha152x: io port 0x%x busy.\n", ports[i]);
+ continue;
+ }
+
+ if (aha152x_porttest(ports[i])) {
+ setup[setup_count].tc1550 = 0;
+
+ conf.cf_port =
+ (GETPORT(ports[i] + O_PORTA) << 8) + GETPORT(ports[i] + O_PORTB);
+ } else if (tc1550_porttest(ports[i])) {
+ setup[setup_count].tc1550 = 1;
+
+ conf.cf_port =
+ (GETPORT(ports[i] + O_TC_PORTA) << 8) + GETPORT(ports[i] + O_TC_PORTB);
+ } else {
+ release_region(ports[i], IO_RANGE);
+ continue;
+ }
+
+ release_region(ports[i], IO_RANGE);
+
+ ok++;
+ setup[setup_count].io_port = ports[i];
+ setup[setup_count].irq = IRQ_MIN + conf.cf_irq;
+ setup[setup_count].scsiid = conf.cf_id;
+ setup[setup_count].reconnect = conf.cf_tardisc;
+ setup[setup_count].parity = !conf.cf_parity;
+ setup[setup_count].synchronous = conf.cf_syncneg;
+ setup[setup_count].delay = DELAY_DEFAULT;
+ setup[setup_count].ext_trans = 0;
+ setup_count++;
+
+ }
+
+ if (ok)
+ printk("auto configuration: ok, ");
+ }
+#endif
+
+ printk("%d controller(s) configured\n", setup_count);
+
+ for (i=0; i<setup_count; i++) {
+ if ( request_region(setup[i].io_port, IO_RANGE, "aha152x") ) {
+ struct Scsi_Host *shpnt = aha152x_probe_one(&setup[i]);
+
+ if( !shpnt ) {
+ release_region(setup[i].io_port, IO_RANGE);
+#if defined(__ISAPNP__)
+ } else if( pnpdev[i] ) {
+ HOSTDATA(shpnt)->pnpdev=pnpdev[i];
+ pnpdev[i]=NULL;
+#endif
+ }
+ } else {
+ printk(KERN_ERR "aha152x: io port 0x%x busy.\n", setup[i].io_port);
+ }
+
+#if defined(__ISAPNP__)
+ if( pnpdev[i] )
+ pnp_device_detach(pnpdev[i]);
+#endif
+ }
+
+ return 0;
+}
+
+static void __exit aha152x_exit(void)
+{
+ struct aha152x_hostdata *hd, *tmp;
+
+ list_for_each_entry_safe(hd, tmp, &aha152x_host_list, host_list) {
+ struct Scsi_Host *shost = container_of((void *)hd, struct Scsi_Host, hostdata);
+
+ aha152x_release(shost);
+ }
+}
+
+module_init(aha152x_init);
+module_exit(aha152x_exit);
+
+#if !defined(MODULE)
+static int __init aha152x_setup(char *str)
+{
+ int ints[10];
+
+ get_options(str, ARRAY_SIZE(ints), ints);
+
+ if(setup_count>=ARRAY_SIZE(setup)) {
+ printk(KERN_ERR "aha152x: you can only configure up to two controllers\n");
+ return 1;
+ }
+
+ setup[setup_count].conf = str;
+ setup[setup_count].io_port = ints[0] >= 1 ? ints[1] : 0x340;
+ setup[setup_count].irq = ints[0] >= 2 ? ints[2] : 11;
+ setup[setup_count].scsiid = ints[0] >= 3 ? ints[3] : 7;
+ setup[setup_count].reconnect = ints[0] >= 4 ? ints[4] : 1;
+ setup[setup_count].parity = ints[0] >= 5 ? ints[5] : 1;
+ setup[setup_count].synchronous = ints[0] >= 6 ? ints[6] : 1;
+ setup[setup_count].delay = ints[0] >= 7 ? ints[7] : DELAY_DEFAULT;
+ setup[setup_count].ext_trans = ints[0] >= 8 ? ints[8] : 0;
+ if (ints[0] > 8) { /*}*/
+ printk(KERN_NOTICE "aha152x: usage: aha152x=<IOBASE>[,<IRQ>[,<SCSI ID>"
+ "[,<RECONNECT>[,<PARITY>[,<SYNCHRONOUS>[,<DELAY>[,<EXT_TRANS>]]]]]]]\n");
+ } else {
+ setup_count++;
+ return 0;
+ }
+
+ return 1;
+}
+__setup("aha152x=", aha152x_setup);
+#endif
+
+#endif /* !PCMCIA */
diff --git a/drivers/scsi/aha152x.h b/drivers/scsi/aha152x.h
new file mode 100644
index 000000000..ac4bfa438
--- /dev/null
+++ b/drivers/scsi/aha152x.h
@@ -0,0 +1,337 @@
+#ifndef _AHA152X_H
+#define _AHA152X_H
+
+/*
+ * $Id: aha152x.h,v 2.7 2004/01/24 11:39:03 fischer Exp $
+ */
+
+/* number of queueable commands
+ (unless we support more than 1 cmd_per_lun this should do) */
+#define AHA152X_MAXQUEUE 7
+
+#define AHA152X_REVID "Adaptec 152x SCSI driver; $Revision: 2.7 $"
+
+/* port addresses */
+#define SCSISEQ (HOSTIOPORT0+0x00) /* SCSI sequence control */
+#define SXFRCTL0 (HOSTIOPORT0+0x01) /* SCSI transfer control 0 */
+#define SXFRCTL1 (HOSTIOPORT0+0x02) /* SCSI transfer control 1 */
+#define SCSISIG (HOSTIOPORT0+0x03) /* SCSI signal in/out */
+#define SCSIRATE (HOSTIOPORT0+0x04) /* SCSI rate control */
+#define SELID (HOSTIOPORT0+0x05) /* selection/reselection ID */
+#define SCSIID SELID /* SCSI ID */
+#define SCSIDAT (HOSTIOPORT0+0x06) /* SCSI latched data */
+#define SCSIBUS (HOSTIOPORT0+0x07) /* SCSI data bus */
+#define STCNT0 (HOSTIOPORT0+0x08) /* SCSI transfer count 0 */
+#define STCNT1 (HOSTIOPORT0+0x09) /* SCSI transfer count 1 */
+#define STCNT2 (HOSTIOPORT0+0x0a) /* SCSI transfer count 2 */
+#define SSTAT0 (HOSTIOPORT0+0x0b) /* SCSI interrupt status 0 */
+#define SSTAT1 (HOSTIOPORT0+0x0c) /* SCSI interrupt status 1 */
+#define SSTAT2 (HOSTIOPORT0+0x0d) /* SCSI interrupt status 2 */
+#define SCSITEST (HOSTIOPORT0+0x0e) /* SCSI test control */
+#define SSTAT3 SCSITEST /* SCSI interrupt status 3 */
+#define SSTAT4 (HOSTIOPORT0+0x0f) /* SCSI status 4 */
+#define SIMODE0 (HOSTIOPORT1+0x10) /* SCSI interrupt mode 0 */
+#define SIMODE1 (HOSTIOPORT1+0x11) /* SCSI interrupt mode 1 */
+#define DMACNTRL0 (HOSTIOPORT1+0x12) /* DMA control 0 */
+#define DMACNTRL1 (HOSTIOPORT1+0x13) /* DMA control 1 */
+#define DMASTAT (HOSTIOPORT1+0x14) /* DMA status */
+#define FIFOSTAT (HOSTIOPORT1+0x15) /* FIFO status */
+#define DATAPORT (HOSTIOPORT1+0x16) /* DATA port */
+#define BRSTCNTRL (HOSTIOPORT1+0x18) /* burst control */
+#define PORTA (HOSTIOPORT1+0x1a) /* PORT A */
+#define PORTB (HOSTIOPORT1+0x1b) /* PORT B */
+#define REV (HOSTIOPORT1+0x1c) /* revision */
+#define STACK (HOSTIOPORT1+0x1d) /* stack */
+#define TEST (HOSTIOPORT1+0x1e) /* test register */
+
+#define IO_RANGE 0x20
+
+/* used in aha152x_porttest */
+#define O_PORTA 0x1a /* PORT A */
+#define O_PORTB 0x1b /* PORT B */
+#define O_DMACNTRL1 0x13 /* DMA control 1 */
+#define O_STACK 0x1d /* stack */
+
+/* used in tc1550_porttest */
+#define O_TC_PORTA 0x0a /* PORT A */
+#define O_TC_PORTB 0x0b /* PORT B */
+#define O_TC_DMACNTRL1 0x03 /* DMA control 1 */
+#define O_TC_STACK 0x0d /* stack */
+
+/* bits and bitmasks to ports */
+
+/* SCSI sequence control */
+#define TEMODEO 0x80
+#define ENSELO 0x40
+#define ENSELI 0x20
+#define ENRESELI 0x10
+#define ENAUTOATNO 0x08
+#define ENAUTOATNI 0x04
+#define ENAUTOATNP 0x02
+#define SCSIRSTO 0x01
+
+/* SCSI transfer control 0 */
+#define SCSIEN 0x80
+#define DMAEN 0x40
+#define CH1 0x20
+#define CLRSTCNT 0x10
+#define SPIOEN 0x08
+#define CLRCH1 0x02
+
+/* SCSI transfer control 1 */
+#define BITBUCKET 0x80
+#define SWRAPEN 0x40
+#define ENSPCHK 0x20
+#define STIMESEL 0x18 /* mask */
+#define STIMESEL_ 3
+#define ENSTIMER 0x04
+#define BYTEALIGN 0x02
+
+/* SCSI signal IN */
+#define SIG_CDI 0x80
+#define SIG_IOI 0x40
+#define SIG_MSGI 0x20
+#define SIG_ATNI 0x10
+#define SIG_SELI 0x08
+#define SIG_BSYI 0x04
+#define SIG_REQI 0x02
+#define SIG_ACKI 0x01
+
+/* SCSI Phases */
+#define P_MASK (SIG_MSGI|SIG_CDI|SIG_IOI)
+#define P_DATAO (0)
+#define P_DATAI (SIG_IOI)
+#define P_CMD (SIG_CDI)
+#define P_STATUS (SIG_CDI|SIG_IOI)
+#define P_MSGO (SIG_MSGI|SIG_CDI)
+#define P_MSGI (SIG_MSGI|SIG_CDI|SIG_IOI)
+
+/* SCSI signal OUT */
+#define SIG_CDO 0x80
+#define SIG_IOO 0x40
+#define SIG_MSGO 0x20
+#define SIG_ATNO 0x10
+#define SIG_SELO 0x08
+#define SIG_BSYO 0x04
+#define SIG_REQO 0x02
+#define SIG_ACKO 0x01
+
+/* SCSI rate control */
+#define SXFR 0x70 /* mask */
+#define SXFR_ 4
+#define SOFS 0x0f /* mask */
+
+/* SCSI ID */
+#define OID 0x70
+#define OID_ 4
+#define TID 0x07
+
+/* SCSI transfer count */
+#define GETSTCNT() ( (GETPORT(STCNT2)<<16) \
+ + (GETPORT(STCNT1)<< 8) \
+ + GETPORT(STCNT0) )
+
+#define SETSTCNT(X) { SETPORT(STCNT2, ((X) & 0xFF0000) >> 16); \
+ SETPORT(STCNT1, ((X) & 0x00FF00) >> 8); \
+ SETPORT(STCNT0, ((X) & 0x0000FF) ); }
+
+/* SCSI interrupt status */
+#define TARGET 0x80
+#define SELDO 0x40
+#define SELDI 0x20
+#define SELINGO 0x10
+#define SWRAP 0x08
+#define SDONE 0x04
+#define SPIORDY 0x02
+#define DMADONE 0x01
+
+#define SETSDONE 0x80
+#define CLRSELDO 0x40
+#define CLRSELDI 0x20
+#define CLRSELINGO 0x10
+#define CLRSWRAP 0x08
+#define CLRSDONE 0x04
+#define CLRSPIORDY 0x02
+#define CLRDMADONE 0x01
+
+/* SCSI status 1 */
+#define SELTO 0x80
+#define ATNTARG 0x40
+#define SCSIRSTI 0x20
+#define PHASEMIS 0x10
+#define BUSFREE 0x08
+#define SCSIPERR 0x04
+#define PHASECHG 0x02
+#define REQINIT 0x01
+
+#define CLRSELTIMO 0x80
+#define CLRATNO 0x40
+#define CLRSCSIRSTI 0x20
+#define CLRBUSFREE 0x08
+#define CLRSCSIPERR 0x04
+#define CLRPHASECHG 0x02
+#define CLRREQINIT 0x01
+
+/* SCSI status 2 */
+#define SOFFSET 0x20
+#define SEMPTY 0x10
+#define SFULL 0x08
+#define SFCNT 0x07 /* mask */
+
+/* SCSI status 3 */
+#define SCSICNT 0xf0 /* mask */
+#define SCSICNT_ 4
+#define OFFCNT 0x0f /* mask */
+
+/* SCSI TEST control */
+#define SCTESTU 0x08
+#define SCTESTD 0x04
+#define STCTEST 0x01
+
+/* SCSI status 4 */
+#define SYNCERR 0x04
+#define FWERR 0x02
+#define FRERR 0x01
+
+#define CLRSYNCERR 0x04
+#define CLRFWERR 0x02
+#define CLRFRERR 0x01
+
+/* SCSI interrupt mode 0 */
+#define ENSELDO 0x40
+#define ENSELDI 0x20
+#define ENSELINGO 0x10
+#define ENSWRAP 0x08
+#define ENSDONE 0x04
+#define ENSPIORDY 0x02
+#define ENDMADONE 0x01
+
+/* SCSI interrupt mode 1 */
+#define ENSELTIMO 0x80
+#define ENATNTARG 0x40
+#define ENSCSIRST 0x20
+#define ENPHASEMIS 0x10
+#define ENBUSFREE 0x08
+#define ENSCSIPERR 0x04
+#define ENPHASECHG 0x02
+#define ENREQINIT 0x01
+
+/* DMA control 0 */
+#define ENDMA 0x80
+#define _8BIT 0x40
+#define DMA 0x20
+#define WRITE_READ 0x08
+#define INTEN 0x04
+#define RSTFIFO 0x02
+#define SWINT 0x01
+
+/* DMA control 1 */
+#define PWRDWN 0x80
+#define STK 0x07 /* mask */
+
+/* DMA status */
+#define ATDONE 0x80
+#define WORDRDY 0x40
+#define INTSTAT 0x20
+#define DFIFOFULL 0x10
+#define DFIFOEMP 0x08
+
+/* BURST control */
+#define BON 0xf0
+#define BOFF 0x0f
+
+/* TEST REGISTER */
+#define BOFFTMR 0x40
+#define BONTMR 0x20
+#define STCNTH 0x10
+#define STCNTM 0x08
+#define STCNTL 0x04
+#define SCSIBLK 0x02
+#define DMABLK 0x01
+
+/* On the AHA-152x board PORTA and PORTB contain
+ some information about the board's configuration. */
+typedef union {
+ struct {
+ unsigned reserved:2; /* reserved */
+ unsigned tardisc:1; /* Target disconnect: 0=disabled, 1=enabled */
+ unsigned syncneg:1; /* Initial sync neg: 0=disabled, 1=enabled */
+ unsigned msgclasses:2; /* Message classes
+ 0=#4
+ 1=#0, #1, #2, #3, #4
+ 2=#0, #3, #4
+ 3=#0, #4
+ */
+ unsigned boot:1; /* boot: 0=disabled, 1=enabled */
+ unsigned dma:1; /* Transfer mode: 0=PIO; 1=DMA */
+ unsigned id:3; /* SCSI-id */
+ unsigned irq:2; /* IRQ-Channel: 0,3=12, 1=10, 2=11 */
+ unsigned dmachan:2; /* DMA-Channel: 0=0, 1=5, 2=6, 3=7 */
+ unsigned parity:1; /* SCSI-parity: 1=enabled 0=disabled */
+ } fields;
+ unsigned short port;
+} aha152x_config ;
+
+#define cf_parity fields.parity
+#define cf_dmachan fields.dmachan
+#define cf_irq fields.irq
+#define cf_id fields.id
+#define cf_dma fields.dma
+#define cf_boot fields.boot
+#define cf_msgclasses fields.msgclasses
+#define cf_syncneg fields.syncneg
+#define cf_tardisc fields.tardisc
+#define cf_port port
+
+/* Some macros to manipulate ports and their bits */
+
+#define SETPORT(PORT, VAL) outb( (VAL), (PORT) )
+#define GETPORT(PORT) inb( PORT )
+#define SETBITS(PORT, BITS) outb( (inb(PORT) | (BITS)), (PORT) )
+#define CLRBITS(PORT, BITS) outb( (inb(PORT) & ~(BITS)), (PORT) )
+#define TESTHI(PORT, BITS) ((inb(PORT) & (BITS)) == (BITS))
+#define TESTLO(PORT, BITS) ((inb(PORT) & (BITS)) == 0)
+
+#define SETRATE(RATE) SETPORT(SCSIRATE,(RATE) & 0x7f)
+
+#if defined(AHA152X_DEBUG)
+enum {
+ debug_procinfo = 0x0001,
+ debug_queue = 0x0002,
+ debug_locking = 0x0004,
+ debug_intr = 0x0008,
+ debug_selection = 0x0010,
+ debug_msgo = 0x0020,
+ debug_msgi = 0x0040,
+ debug_status = 0x0080,
+ debug_cmd = 0x0100,
+ debug_datai = 0x0200,
+ debug_datao = 0x0400,
+ debug_eh = 0x0800,
+ debug_done = 0x1000,
+ debug_phases = 0x2000,
+};
+#endif
+
+/* for the pcmcia stub */
+struct aha152x_setup {
+ int io_port;
+ int irq;
+ int scsiid;
+ int reconnect;
+ int parity;
+ int synchronous;
+ int delay;
+ int ext_trans;
+ int tc1550;
+#if defined(AHA152X_DEBUG)
+ int debug;
+#endif
+ char *conf;
+};
+
+struct Scsi_Host *aha152x_probe_one(struct aha152x_setup *);
+void aha152x_release(struct Scsi_Host *);
+int aha152x_host_reset_host(struct Scsi_Host *);
+
+#endif /* _AHA152X_H */
diff --git a/drivers/scsi/aha1542.c b/drivers/scsi/aha1542.c
new file mode 100644
index 000000000..b95d2779f
--- /dev/null
+++ b/drivers/scsi/aha1542.c
@@ -0,0 +1,1072 @@
+/*
+ * Driver for Adaptec AHA-1542 SCSI host adapters
+ *
+ * Copyright (C) 1992 Tommy Thorn
+ * Copyright (C) 1993, 1994, 1995 Eric Youngdale
+ * Copyright (C) 2015 Ondrej Zary
+ */
+
+#include <linux/module.h>
+#include <linux/interrupt.h>
+#include <linux/kernel.h>
+#include <linux/types.h>
+#include <linux/string.h>
+#include <linux/delay.h>
+#include <linux/init.h>
+#include <linux/spinlock.h>
+#include <linux/isa.h>
+#include <linux/pnp.h>
+#include <linux/slab.h>
+#include <linux/io.h>
+#include <asm/dma.h>
+#include <scsi/scsi_cmnd.h>
+#include <scsi/scsi_device.h>
+#include <scsi/scsi_host.h>
+#include "aha1542.h"
+
+#define MAXBOARDS 4
+
+static bool isapnp = 1;
+module_param(isapnp, bool, 0);
+MODULE_PARM_DESC(isapnp, "enable PnP support (default=1)");
+
+static int io[MAXBOARDS] = { 0x330, 0x334, 0, 0 };
+module_param_array(io, int, NULL, 0);
+MODULE_PARM_DESC(io, "base IO address of controller (0x130,0x134,0x230,0x234,0x330,0x334, default=0x330,0x334)");
+
+/* time AHA spends on the AT-bus during data transfer */
+static int bus_on[MAXBOARDS] = { -1, -1, -1, -1 }; /* power-on default: 11us */
+module_param_array(bus_on, int, NULL, 0);
+MODULE_PARM_DESC(bus_on, "bus on time [us] (2-15, default=-1 [HW default: 11])");
+
+/* time AHA spends off the bus (not to monopolize it) during data transfer */
+static int bus_off[MAXBOARDS] = { -1, -1, -1, -1 }; /* power-on default: 4us */
+module_param_array(bus_off, int, NULL, 0);
+MODULE_PARM_DESC(bus_off, "bus off time [us] (1-64, default=-1 [HW default: 4])");
+
+/* default is jumper selected (J1 on 1542A), factory default = 5 MB/s */
+static int dma_speed[MAXBOARDS] = { -1, -1, -1, -1 };
+module_param_array(dma_speed, int, NULL, 0);
+MODULE_PARM_DESC(dma_speed, "DMA speed [MB/s] (5,6,7,8,10, default=-1 [by jumper])");
+
+#define BIOS_TRANSLATION_6432 1 /* Default case these days */
+#define BIOS_TRANSLATION_25563 2 /* Big disk case */
+
+struct aha1542_hostdata {
+ /* This will effectively start both of them at the first mailbox */
+ int bios_translation; /* Mapping bios uses - for compatibility */
+ int aha1542_last_mbi_used;
+ int aha1542_last_mbo_used;
+ struct scsi_cmnd *int_cmds[AHA1542_MAILBOXES];
+ struct mailbox mb[2 * AHA1542_MAILBOXES];
+ struct ccb ccb[AHA1542_MAILBOXES];
+};
+
+static inline void aha1542_intr_reset(u16 base)
+{
+ outb(IRST, CONTROL(base));
+}
+
+static inline bool wait_mask(u16 port, u8 mask, u8 allof, u8 noneof, int timeout)
+{
+ bool delayed = true;
+
+ if (timeout == 0) {
+ timeout = 3000000;
+ delayed = false;
+ }
+
+ while (1) {
+ u8 bits = inb(port) & mask;
+ if ((bits & allof) == allof && ((bits & noneof) == 0))
+ break;
+ if (delayed)
+ mdelay(1);
+ if (--timeout == 0)
+ return false;
+ }
+
+ return true;
+}
+
+static int aha1542_outb(unsigned int base, u8 val)
+{
+ if (!wait_mask(STATUS(base), CDF, 0, CDF, 0))
+ return 1;
+ outb(val, DATA(base));
+
+ return 0;
+}
+
+static int aha1542_out(unsigned int base, u8 *buf, int len)
+{
+ while (len--) {
+ if (!wait_mask(STATUS(base), CDF, 0, CDF, 0))
+ return 1;
+ outb(*buf++, DATA(base));
+ }
+ if (!wait_mask(INTRFLAGS(base), INTRMASK, HACC, 0, 0))
+ return 1;
+
+ return 0;
+}
+
+/* Only used at boot time, so we do not need to worry about latency as much
+ here */
+
+static int aha1542_in(unsigned int base, u8 *buf, int len, int timeout)
+{
+ while (len--) {
+ if (!wait_mask(STATUS(base), DF, DF, 0, timeout))
+ return 1;
+ *buf++ = inb(DATA(base));
+ }
+ return 0;
+}
+
+static int makecode(unsigned hosterr, unsigned scsierr)
+{
+ switch (hosterr) {
+ case 0x0:
+ case 0xa: /* Linked command complete without error and linked normally */
+ case 0xb: /* Linked command complete without error, interrupt generated */
+ hosterr = 0;
+ break;
+
+ case 0x11: /* Selection time out-The initiator selection or target
+ reselection was not complete within the SCSI Time out period */
+ hosterr = DID_TIME_OUT;
+ break;
+
+ case 0x12: /* Data overrun/underrun-The target attempted to transfer more data
+ than was allocated by the Data Length field or the sum of the
+ Scatter / Gather Data Length fields. */
+
+ case 0x13: /* Unexpected bus free-The target dropped the SCSI BSY at an unexpected time. */
+
+ case 0x15: /* MBO command was not 00, 01 or 02-The first byte of the CB was
+ invalid. This usually indicates a software failure. */
+
+ case 0x16: /* Invalid CCB Operation Code-The first byte of the CCB was invalid.
+ This usually indicates a software failure. */
+
+ case 0x17: /* Linked CCB does not have the same LUN-A subsequent CCB of a set
+ of linked CCB's does not specify the same logical unit number as
+ the first. */
+ case 0x18: /* Invalid Target Direction received from Host-The direction of a
+ Target Mode CCB was invalid. */
+
+ case 0x19: /* Duplicate CCB Received in Target Mode-More than once CCB was
+ received to service data transfer between the same target LUN
+ and initiator SCSI ID in the same direction. */
+
+ case 0x1a: /* Invalid CCB or Segment List Parameter-A segment list with a zero
+ length segment or invalid segment list boundaries was received.
+ A CCB parameter was invalid. */
+#ifdef DEBUG
+ printk("Aha1542: %x %x\n", hosterr, scsierr);
+#endif
+ hosterr = DID_ERROR; /* Couldn't find any better */
+ break;
+
+ case 0x14: /* Target bus phase sequence failure-An invalid bus phase or bus
+ phase sequence was requested by the target. The host adapter
+ will generate a SCSI Reset Condition, notifying the host with
+ a SCRD interrupt */
+ hosterr = DID_RESET;
+ break;
+ default:
+ printk(KERN_ERR "aha1542: makecode: unknown hoststatus %x\n", hosterr);
+ break;
+ }
+ return scsierr | (hosterr << 16);
+}
+
+static int aha1542_test_port(struct Scsi_Host *sh)
+{
+ u8 inquiry_result[4];
+ int i;
+
+ /* Quick and dirty test for presence of the card. */
+ if (inb(STATUS(sh->io_port)) == 0xff)
+ return 0;
+
+ /* Reset the adapter. I ought to make a hard reset, but it's not really necessary */
+
+ /* In case some other card was probing here, reset interrupts */
+ aha1542_intr_reset(sh->io_port); /* reset interrupts, so they don't block */
+
+ outb(SRST | IRST /*|SCRST */ , CONTROL(sh->io_port));
+
+ mdelay(20); /* Wait a little bit for things to settle down. */
+
+ /* Expect INIT and IDLE, any of the others are bad */
+ if (!wait_mask(STATUS(sh->io_port), STATMASK, INIT | IDLE, STST | DIAGF | INVDCMD | DF | CDF, 0))
+ return 0;
+
+ /* Shouldn't have generated any interrupts during reset */
+ if (inb(INTRFLAGS(sh->io_port)) & INTRMASK)
+ return 0;
+
+ /* Perform a host adapter inquiry instead so we do not need to set
+ up the mailboxes ahead of time */
+
+ aha1542_outb(sh->io_port, CMD_INQUIRY);
+
+ for (i = 0; i < 4; i++) {
+ if (!wait_mask(STATUS(sh->io_port), DF, DF, 0, 0))
+ return 0;
+ inquiry_result[i] = inb(DATA(sh->io_port));
+ }
+
+ /* Reading port should reset DF */
+ if (inb(STATUS(sh->io_port)) & DF)
+ return 0;
+
+ /* When HACC, command is completed, and we're though testing */
+ if (!wait_mask(INTRFLAGS(sh->io_port), HACC, HACC, 0, 0))
+ return 0;
+
+ /* Clear interrupts */
+ outb(IRST, CONTROL(sh->io_port));
+
+ return 1;
+}
+
+static irqreturn_t aha1542_interrupt(int irq, void *dev_id)
+{
+ struct Scsi_Host *sh = dev_id;
+ struct aha1542_hostdata *aha1542 = shost_priv(sh);
+ void (*my_done)(struct scsi_cmnd *) = NULL;
+ int errstatus, mbi, mbo, mbistatus;
+ int number_serviced;
+ unsigned long flags;
+ struct scsi_cmnd *tmp_cmd;
+ int flag;
+ struct mailbox *mb = aha1542->mb;
+ struct ccb *ccb = aha1542->ccb;
+
+#ifdef DEBUG
+ {
+ flag = inb(INTRFLAGS(sh->io_port));
+ shost_printk(KERN_DEBUG, sh, "aha1542_intr_handle: ");
+ if (!(flag & ANYINTR))
+ printk("no interrupt?");
+ if (flag & MBIF)
+ printk("MBIF ");
+ if (flag & MBOA)
+ printk("MBOF ");
+ if (flag & HACC)
+ printk("HACC ");
+ if (flag & SCRD)
+ printk("SCRD ");
+ printk("status %02x\n", inb(STATUS(sh->io_port)));
+ };
+#endif
+ number_serviced = 0;
+
+ spin_lock_irqsave(sh->host_lock, flags);
+ while (1) {
+ flag = inb(INTRFLAGS(sh->io_port));
+
+ /* Check for unusual interrupts. If any of these happen, we should
+ probably do something special, but for now just printing a message
+ is sufficient. A SCSI reset detected is something that we really
+ need to deal with in some way. */
+ if (flag & ~MBIF) {
+ if (flag & MBOA)
+ printk("MBOF ");
+ if (flag & HACC)
+ printk("HACC ");
+ if (flag & SCRD)
+ printk("SCRD ");
+ }
+ aha1542_intr_reset(sh->io_port);
+
+ mbi = aha1542->aha1542_last_mbi_used + 1;
+ if (mbi >= 2 * AHA1542_MAILBOXES)
+ mbi = AHA1542_MAILBOXES;
+
+ do {
+ if (mb[mbi].status != 0)
+ break;
+ mbi++;
+ if (mbi >= 2 * AHA1542_MAILBOXES)
+ mbi = AHA1542_MAILBOXES;
+ } while (mbi != aha1542->aha1542_last_mbi_used);
+
+ if (mb[mbi].status == 0) {
+ spin_unlock_irqrestore(sh->host_lock, flags);
+ /* Hmm, no mail. Must have read it the last time around */
+ if (!number_serviced)
+ shost_printk(KERN_WARNING, sh, "interrupt received, but no mail.\n");
+ return IRQ_HANDLED;
+ };
+
+ mbo = (scsi2int(mb[mbi].ccbptr) - (isa_virt_to_bus(&ccb[0]))) / sizeof(struct ccb);
+ mbistatus = mb[mbi].status;
+ mb[mbi].status = 0;
+ aha1542->aha1542_last_mbi_used = mbi;
+
+#ifdef DEBUG
+ if (ccb[mbo].tarstat | ccb[mbo].hastat)
+ shost_printk(KERN_DEBUG, sh, "aha1542_command: returning %x (status %d)\n",
+ ccb[mbo].tarstat + ((int) ccb[mbo].hastat << 16), mb[mbi].status);
+#endif
+
+ if (mbistatus == 3)
+ continue; /* Aborted command not found */
+
+#ifdef DEBUG
+ shost_printk(KERN_DEBUG, sh, "...done %d %d\n", mbo, mbi);
+#endif
+
+ tmp_cmd = aha1542->int_cmds[mbo];
+
+ if (!tmp_cmd || !tmp_cmd->scsi_done) {
+ spin_unlock_irqrestore(sh->host_lock, flags);
+ shost_printk(KERN_WARNING, sh, "Unexpected interrupt\n");
+ shost_printk(KERN_WARNING, sh, "tarstat=%x, hastat=%x idlun=%x ccb#=%d\n", ccb[mbo].tarstat,
+ ccb[mbo].hastat, ccb[mbo].idlun, mbo);
+ return IRQ_HANDLED;
+ }
+ my_done = tmp_cmd->scsi_done;
+ kfree(tmp_cmd->host_scribble);
+ tmp_cmd->host_scribble = NULL;
+ /* Fetch the sense data, and tuck it away, in the required slot. The
+ Adaptec automatically fetches it, and there is no guarantee that
+ we will still have it in the cdb when we come back */
+ if (ccb[mbo].tarstat == 2)
+ memcpy(tmp_cmd->sense_buffer, &ccb[mbo].cdb[ccb[mbo].cdblen],
+ SCSI_SENSE_BUFFERSIZE);
+
+
+ /* is there mail :-) */
+
+ /* more error checking left out here */
+ if (mbistatus != 1)
+ /* This is surely wrong, but I don't know what's right */
+ errstatus = makecode(ccb[mbo].hastat, ccb[mbo].tarstat);
+ else
+ errstatus = 0;
+
+#ifdef DEBUG
+ if (errstatus)
+ shost_printk(KERN_DEBUG, sh, "(aha1542 error:%x %x %x) ", errstatus,
+ ccb[mbo].hastat, ccb[mbo].tarstat);
+ if (ccb[mbo].tarstat == 2)
+ print_hex_dump_bytes("sense: ", DUMP_PREFIX_NONE, &ccb[mbo].cdb[ccb[mbo].cdblen], 12);
+ if (errstatus)
+ printk("aha1542_intr_handle: returning %6x\n", errstatus);
+#endif
+ tmp_cmd->result = errstatus;
+ aha1542->int_cmds[mbo] = NULL; /* This effectively frees up the mailbox slot, as
+ far as queuecommand is concerned */
+ my_done(tmp_cmd);
+ number_serviced++;
+ };
+}
+
+static int aha1542_queuecommand(struct Scsi_Host *sh, struct scsi_cmnd *cmd)
+{
+ struct aha1542_hostdata *aha1542 = shost_priv(sh);
+ u8 direction;
+ u8 target = cmd->device->id;
+ u8 lun = cmd->device->lun;
+ unsigned long flags;
+ int bufflen = scsi_bufflen(cmd);
+ int mbo, sg_count;
+ struct mailbox *mb = aha1542->mb;
+ struct ccb *ccb = aha1542->ccb;
+ struct chain *cptr;
+
+ if (*cmd->cmnd == REQUEST_SENSE) {
+ /* Don't do the command - we have the sense data already */
+ cmd->result = 0;
+ cmd->scsi_done(cmd);
+ return 0;
+ }
+#ifdef DEBUG
+ {
+ int i = -1;
+ if (*cmd->cmnd == READ_10 || *cmd->cmnd == WRITE_10)
+ i = xscsi2int(cmd->cmnd + 2);
+ else if (*cmd->cmnd == READ_6 || *cmd->cmnd == WRITE_6)
+ i = scsi2int(cmd->cmnd + 2);
+ shost_printk(KERN_DEBUG, sh, "aha1542_queuecommand: dev %d cmd %02x pos %d len %d",
+ target, *cmd->cmnd, i, bufflen);
+ print_hex_dump_bytes("command: ", DUMP_PREFIX_NONE, cmd->cmnd, cmd->cmd_len);
+ }
+#endif
+ if (bufflen) { /* allocate memory before taking host_lock */
+ sg_count = scsi_sg_count(cmd);
+ cptr = kmalloc(sizeof(*cptr) * sg_count, GFP_KERNEL | GFP_DMA);
+ if (!cptr)
+ return SCSI_MLQUEUE_HOST_BUSY;
+ }
+
+ /* Use the outgoing mailboxes in a round-robin fashion, because this
+ is how the host adapter will scan for them */
+
+ spin_lock_irqsave(sh->host_lock, flags);
+ mbo = aha1542->aha1542_last_mbo_used + 1;
+ if (mbo >= AHA1542_MAILBOXES)
+ mbo = 0;
+
+ do {
+ if (mb[mbo].status == 0 && aha1542->int_cmds[mbo] == NULL)
+ break;
+ mbo++;
+ if (mbo >= AHA1542_MAILBOXES)
+ mbo = 0;
+ } while (mbo != aha1542->aha1542_last_mbo_used);
+
+ if (mb[mbo].status || aha1542->int_cmds[mbo])
+ panic("Unable to find empty mailbox for aha1542.\n");
+
+ aha1542->int_cmds[mbo] = cmd; /* This will effectively prevent someone else from
+ screwing with this cdb. */
+
+ aha1542->aha1542_last_mbo_used = mbo;
+
+#ifdef DEBUG
+ shost_printk(KERN_DEBUG, sh, "Sending command (%d %p)...", mbo, cmd->scsi_done);
+#endif
+
+ any2scsi(mb[mbo].ccbptr, isa_virt_to_bus(&ccb[mbo])); /* This gets trashed for some reason */
+
+ memset(&ccb[mbo], 0, sizeof(struct ccb));
+
+ ccb[mbo].cdblen = cmd->cmd_len;
+
+ direction = 0;
+ if (*cmd->cmnd == READ_10 || *cmd->cmnd == READ_6)
+ direction = 8;
+ else if (*cmd->cmnd == WRITE_10 || *cmd->cmnd == WRITE_6)
+ direction = 16;
+
+ memcpy(ccb[mbo].cdb, cmd->cmnd, ccb[mbo].cdblen);
+
+ if (bufflen) {
+ struct scatterlist *sg;
+ int i;
+
+ ccb[mbo].op = 2; /* SCSI Initiator Command w/scatter-gather */
+ cmd->host_scribble = (void *)cptr;
+ scsi_for_each_sg(cmd, sg, sg_count, i) {
+ any2scsi(cptr[i].dataptr, isa_page_to_bus(sg_page(sg))
+ + sg->offset);
+ any2scsi(cptr[i].datalen, sg->length);
+ };
+ any2scsi(ccb[mbo].datalen, sg_count * sizeof(struct chain));
+ any2scsi(ccb[mbo].dataptr, isa_virt_to_bus(cptr));
+#ifdef DEBUG
+ shost_printk(KERN_DEBUG, sh, "cptr %p: ", cptr);
+ print_hex_dump_bytes("cptr: ", DUMP_PREFIX_NONE, cptr, 18);
+#endif
+ } else {
+ ccb[mbo].op = 0; /* SCSI Initiator Command */
+ cmd->host_scribble = NULL;
+ any2scsi(ccb[mbo].datalen, 0);
+ any2scsi(ccb[mbo].dataptr, 0);
+ };
+ ccb[mbo].idlun = (target & 7) << 5 | direction | (lun & 7); /*SCSI Target Id */
+ ccb[mbo].rsalen = 16;
+ ccb[mbo].linkptr[0] = ccb[mbo].linkptr[1] = ccb[mbo].linkptr[2] = 0;
+ ccb[mbo].commlinkid = 0;
+
+#ifdef DEBUG
+ print_hex_dump_bytes("sending: ", DUMP_PREFIX_NONE, &ccb[mbo], sizeof(ccb[mbo]) - 10);
+ printk("aha1542_queuecommand: now waiting for interrupt ");
+#endif
+ mb[mbo].status = 1;
+ aha1542_outb(cmd->device->host->io_port, CMD_START_SCSI);
+ spin_unlock_irqrestore(sh->host_lock, flags);
+
+ return 0;
+}
+
+/* Initialize mailboxes */
+static void setup_mailboxes(struct Scsi_Host *sh)
+{
+ struct aha1542_hostdata *aha1542 = shost_priv(sh);
+ int i;
+ struct mailbox *mb = aha1542->mb;
+ struct ccb *ccb = aha1542->ccb;
+
+ u8 mb_cmd[5] = { CMD_MBINIT, AHA1542_MAILBOXES, 0, 0, 0};
+
+ for (i = 0; i < AHA1542_MAILBOXES; i++) {
+ mb[i].status = mb[AHA1542_MAILBOXES + i].status = 0;
+ any2scsi(mb[i].ccbptr, isa_virt_to_bus(&ccb[i]));
+ };
+ aha1542_intr_reset(sh->io_port); /* reset interrupts, so they don't block */
+ any2scsi((mb_cmd + 2), isa_virt_to_bus(mb));
+ if (aha1542_out(sh->io_port, mb_cmd, 5))
+ shost_printk(KERN_ERR, sh, "failed setting up mailboxes\n");
+ aha1542_intr_reset(sh->io_port);
+}
+
+static int aha1542_getconfig(struct Scsi_Host *sh)
+{
+ u8 inquiry_result[3];
+ int i;
+ i = inb(STATUS(sh->io_port));
+ if (i & DF) {
+ i = inb(DATA(sh->io_port));
+ };
+ aha1542_outb(sh->io_port, CMD_RETCONF);
+ aha1542_in(sh->io_port, inquiry_result, 3, 0);
+ if (!wait_mask(INTRFLAGS(sh->io_port), INTRMASK, HACC, 0, 0))
+ shost_printk(KERN_ERR, sh, "error querying board settings\n");
+ aha1542_intr_reset(sh->io_port);
+ switch (inquiry_result[0]) {
+ case 0x80:
+ sh->dma_channel = 7;
+ break;
+ case 0x40:
+ sh->dma_channel = 6;
+ break;
+ case 0x20:
+ sh->dma_channel = 5;
+ break;
+ case 0x01:
+ sh->dma_channel = 0;
+ break;
+ case 0:
+ /* This means that the adapter, although Adaptec 1542 compatible, doesn't use a DMA channel.
+ Currently only aware of the BusLogic BT-445S VL-Bus adapter which needs this. */
+ sh->dma_channel = 0xFF;
+ break;
+ default:
+ shost_printk(KERN_ERR, sh, "Unable to determine DMA channel.\n");
+ return -1;
+ };
+ switch (inquiry_result[1]) {
+ case 0x40:
+ sh->irq = 15;
+ break;
+ case 0x20:
+ sh->irq = 14;
+ break;
+ case 0x8:
+ sh->irq = 12;
+ break;
+ case 0x4:
+ sh->irq = 11;
+ break;
+ case 0x2:
+ sh->irq = 10;
+ break;
+ case 0x1:
+ sh->irq = 9;
+ break;
+ default:
+ shost_printk(KERN_ERR, sh, "Unable to determine IRQ level.\n");
+ return -1;
+ };
+ sh->this_id = inquiry_result[2] & 7;
+ return 0;
+}
+
+/* This function should only be called for 1542C boards - we can detect
+ the special firmware settings and unlock the board */
+
+static int aha1542_mbenable(struct Scsi_Host *sh)
+{
+ static u8 mbenable_cmd[3];
+ static u8 mbenable_result[2];
+ int retval;
+
+ retval = BIOS_TRANSLATION_6432;
+
+ aha1542_outb(sh->io_port, CMD_EXTBIOS);
+ if (aha1542_in(sh->io_port, mbenable_result, 2, 100))
+ return retval;
+ if (!wait_mask(INTRFLAGS(sh->io_port), INTRMASK, HACC, 0, 100))
+ goto fail;
+ aha1542_intr_reset(sh->io_port);
+
+ if ((mbenable_result[0] & 0x08) || mbenable_result[1]) {
+ mbenable_cmd[0] = CMD_MBENABLE;
+ mbenable_cmd[1] = 0;
+ mbenable_cmd[2] = mbenable_result[1];
+
+ if ((mbenable_result[0] & 0x08) && (mbenable_result[1] & 0x03))
+ retval = BIOS_TRANSLATION_25563;
+
+ if (aha1542_out(sh->io_port, mbenable_cmd, 3))
+ goto fail;
+ };
+ while (0) {
+fail:
+ shost_printk(KERN_ERR, sh, "Mailbox init failed\n");
+ }
+ aha1542_intr_reset(sh->io_port);
+ return retval;
+}
+
+/* Query the board to find out if it is a 1542 or a 1740, or whatever. */
+static int aha1542_query(struct Scsi_Host *sh)
+{
+ struct aha1542_hostdata *aha1542 = shost_priv(sh);
+ u8 inquiry_result[4];
+ int i;
+ i = inb(STATUS(sh->io_port));
+ if (i & DF) {
+ i = inb(DATA(sh->io_port));
+ };
+ aha1542_outb(sh->io_port, CMD_INQUIRY);
+ aha1542_in(sh->io_port, inquiry_result, 4, 0);
+ if (!wait_mask(INTRFLAGS(sh->io_port), INTRMASK, HACC, 0, 0))
+ shost_printk(KERN_ERR, sh, "error querying card type\n");
+ aha1542_intr_reset(sh->io_port);
+
+ aha1542->bios_translation = BIOS_TRANSLATION_6432; /* Default case */
+
+ /* For an AHA1740 series board, we ignore the board since there is a
+ hardware bug which can lead to wrong blocks being returned if the board
+ is operating in the 1542 emulation mode. Since there is an extended mode
+ driver, we simply ignore the board and let the 1740 driver pick it up.
+ */
+
+ if (inquiry_result[0] == 0x43) {
+ shost_printk(KERN_INFO, sh, "Emulation mode not supported for AHA-1740 hardware, use aha1740 driver instead.\n");
+ return 1;
+ };
+
+ /* Always call this - boards that do not support extended bios translation
+ will ignore the command, and we will set the proper default */
+
+ aha1542->bios_translation = aha1542_mbenable(sh);
+
+ return 0;
+}
+
+static u8 dma_speed_hw(int dma_speed)
+{
+ switch (dma_speed) {
+ case 5:
+ return 0x00;
+ case 6:
+ return 0x04;
+ case 7:
+ return 0x01;
+ case 8:
+ return 0x02;
+ case 10:
+ return 0x03;
+ }
+
+ return 0xff; /* invalid */
+}
+
+/* Set the Bus on/off-times as not to ruin floppy performance */
+static void aha1542_set_bus_times(struct Scsi_Host *sh, int bus_on, int bus_off, int dma_speed)
+{
+ if (bus_on > 0) {
+ u8 oncmd[] = { CMD_BUSON_TIME, clamp(bus_on, 2, 15) };
+
+ aha1542_intr_reset(sh->io_port);
+ if (aha1542_out(sh->io_port, oncmd, 2))
+ goto fail;
+ }
+
+ if (bus_off > 0) {
+ u8 offcmd[] = { CMD_BUSOFF_TIME, clamp(bus_off, 1, 64) };
+
+ aha1542_intr_reset(sh->io_port);
+ if (aha1542_out(sh->io_port, offcmd, 2))
+ goto fail;
+ }
+
+ if (dma_speed_hw(dma_speed) != 0xff) {
+ u8 dmacmd[] = { CMD_DMASPEED, dma_speed_hw(dma_speed) };
+
+ aha1542_intr_reset(sh->io_port);
+ if (aha1542_out(sh->io_port, dmacmd, 2))
+ goto fail;
+ }
+ aha1542_intr_reset(sh->io_port);
+ return;
+fail:
+ shost_printk(KERN_ERR, sh, "setting bus on/off-time failed\n");
+ aha1542_intr_reset(sh->io_port);
+}
+
+/* return non-zero on detection */
+static struct Scsi_Host *aha1542_hw_init(struct scsi_host_template *tpnt, struct device *pdev, int indx)
+{
+ unsigned int base_io = io[indx];
+ struct Scsi_Host *sh;
+ struct aha1542_hostdata *aha1542;
+ char dma_info[] = "no DMA";
+
+ if (base_io == 0)
+ return NULL;
+
+ if (!request_region(base_io, AHA1542_REGION_SIZE, "aha1542"))
+ return NULL;
+
+ sh = scsi_host_alloc(tpnt, sizeof(struct aha1542_hostdata));
+ if (!sh)
+ goto release;
+ aha1542 = shost_priv(sh);
+
+ sh->unique_id = base_io;
+ sh->io_port = base_io;
+ sh->n_io_port = AHA1542_REGION_SIZE;
+ aha1542->aha1542_last_mbi_used = 2 * AHA1542_MAILBOXES - 1;
+ aha1542->aha1542_last_mbo_used = AHA1542_MAILBOXES - 1;
+
+ if (!aha1542_test_port(sh))
+ goto unregister;
+
+ aha1542_set_bus_times(sh, bus_on[indx], bus_off[indx], dma_speed[indx]);
+ if (aha1542_query(sh))
+ goto unregister;
+ if (aha1542_getconfig(sh) == -1)
+ goto unregister;
+
+ if (sh->dma_channel != 0xFF)
+ snprintf(dma_info, sizeof(dma_info), "DMA %d", sh->dma_channel);
+ shost_printk(KERN_INFO, sh, "Adaptec AHA-1542 (SCSI-ID %d) at IO 0x%x, IRQ %d, %s\n",
+ sh->this_id, base_io, sh->irq, dma_info);
+ if (aha1542->bios_translation == BIOS_TRANSLATION_25563)
+ shost_printk(KERN_INFO, sh, "Using extended bios translation\n");
+
+ setup_mailboxes(sh);
+
+ if (request_irq(sh->irq, aha1542_interrupt, 0, "aha1542", sh)) {
+ shost_printk(KERN_ERR, sh, "Unable to allocate IRQ.\n");
+ goto unregister;
+ }
+ if (sh->dma_channel != 0xFF) {
+ if (request_dma(sh->dma_channel, "aha1542")) {
+ shost_printk(KERN_ERR, sh, "Unable to allocate DMA channel.\n");
+ goto free_irq;
+ }
+ if (sh->dma_channel == 0 || sh->dma_channel >= 5) {
+ set_dma_mode(sh->dma_channel, DMA_MODE_CASCADE);
+ enable_dma(sh->dma_channel);
+ }
+ }
+
+ if (scsi_add_host(sh, pdev))
+ goto free_dma;
+
+ scsi_scan_host(sh);
+
+ return sh;
+free_dma:
+ if (sh->dma_channel != 0xff)
+ free_dma(sh->dma_channel);
+free_irq:
+ free_irq(sh->irq, sh);
+unregister:
+ scsi_host_put(sh);
+release:
+ release_region(base_io, AHA1542_REGION_SIZE);
+
+ return NULL;
+}
+
+static int aha1542_release(struct Scsi_Host *sh)
+{
+ scsi_remove_host(sh);
+ if (sh->dma_channel != 0xff)
+ free_dma(sh->dma_channel);
+ if (sh->irq)
+ free_irq(sh->irq, sh);
+ if (sh->io_port && sh->n_io_port)
+ release_region(sh->io_port, sh->n_io_port);
+ scsi_host_put(sh);
+ return 0;
+}
+
+
+/*
+ * This is a device reset. This is handled by sending a special command
+ * to the device.
+ */
+static int aha1542_dev_reset(struct scsi_cmnd *cmd)
+{
+ struct Scsi_Host *sh = cmd->device->host;
+ struct aha1542_hostdata *aha1542 = shost_priv(sh);
+ unsigned long flags;
+ struct mailbox *mb = aha1542->mb;
+ u8 target = cmd->device->id;
+ u8 lun = cmd->device->lun;
+ int mbo;
+ struct ccb *ccb = aha1542->ccb;
+
+ spin_lock_irqsave(sh->host_lock, flags);
+ mbo = aha1542->aha1542_last_mbo_used + 1;
+ if (mbo >= AHA1542_MAILBOXES)
+ mbo = 0;
+
+ do {
+ if (mb[mbo].status == 0 && aha1542->int_cmds[mbo] == NULL)
+ break;
+ mbo++;
+ if (mbo >= AHA1542_MAILBOXES)
+ mbo = 0;
+ } while (mbo != aha1542->aha1542_last_mbo_used);
+
+ if (mb[mbo].status || aha1542->int_cmds[mbo])
+ panic("Unable to find empty mailbox for aha1542.\n");
+
+ aha1542->int_cmds[mbo] = cmd; /* This will effectively
+ prevent someone else from
+ screwing with this cdb. */
+
+ aha1542->aha1542_last_mbo_used = mbo;
+
+ any2scsi(mb[mbo].ccbptr, isa_virt_to_bus(&ccb[mbo])); /* This gets trashed for some reason */
+
+ memset(&ccb[mbo], 0, sizeof(struct ccb));
+
+ ccb[mbo].op = 0x81; /* BUS DEVICE RESET */
+
+ ccb[mbo].idlun = (target & 7) << 5 | (lun & 7); /*SCSI Target Id */
+
+ ccb[mbo].linkptr[0] = ccb[mbo].linkptr[1] = ccb[mbo].linkptr[2] = 0;
+ ccb[mbo].commlinkid = 0;
+
+ /*
+ * Now tell the 1542 to flush all pending commands for this
+ * target
+ */
+ aha1542_outb(sh->io_port, CMD_START_SCSI);
+ spin_unlock_irqrestore(sh->host_lock, flags);
+
+ scmd_printk(KERN_WARNING, cmd,
+ "Trying device reset for target\n");
+
+ return SUCCESS;
+}
+
+static int aha1542_reset(struct scsi_cmnd *cmd, u8 reset_cmd)
+{
+ struct Scsi_Host *sh = cmd->device->host;
+ struct aha1542_hostdata *aha1542 = shost_priv(sh);
+ unsigned long flags;
+ int i;
+
+ spin_lock_irqsave(sh->host_lock, flags);
+ /*
+ * This does a scsi reset for all devices on the bus.
+ * In principle, we could also reset the 1542 - should
+ * we do this? Try this first, and we can add that later
+ * if it turns out to be useful.
+ */
+ outb(reset_cmd, CONTROL(cmd->device->host->io_port));
+
+ if (!wait_mask(STATUS(cmd->device->host->io_port),
+ STATMASK, IDLE, STST | DIAGF | INVDCMD | DF | CDF, 0)) {
+ spin_unlock_irqrestore(sh->host_lock, flags);
+ return FAILED;
+ }
+
+ /*
+ * We need to do this too before the 1542 can interact with
+ * us again after host reset.
+ */
+ if (reset_cmd & HRST)
+ setup_mailboxes(cmd->device->host);
+
+ /*
+ * Now try to pick up the pieces. For all pending commands,
+ * free any internal data structures, and basically clear things
+ * out. We do not try and restart any commands or anything -
+ * the strategy handler takes care of that crap.
+ */
+ shost_printk(KERN_WARNING, cmd->device->host, "Sent BUS RESET to scsi host %d\n", cmd->device->host->host_no);
+
+ for (i = 0; i < AHA1542_MAILBOXES; i++) {
+ if (aha1542->int_cmds[i] != NULL) {
+ struct scsi_cmnd *tmp_cmd;
+ tmp_cmd = aha1542->int_cmds[i];
+
+ if (tmp_cmd->device->soft_reset) {
+ /*
+ * If this device implements the soft reset option,
+ * then it is still holding onto the command, and
+ * may yet complete it. In this case, we don't
+ * flush the data.
+ */
+ continue;
+ }
+ kfree(tmp_cmd->host_scribble);
+ tmp_cmd->host_scribble = NULL;
+ aha1542->int_cmds[i] = NULL;
+ aha1542->mb[i].status = 0;
+ }
+ }
+
+ spin_unlock_irqrestore(sh->host_lock, flags);
+ return SUCCESS;
+}
+
+static int aha1542_bus_reset(struct scsi_cmnd *cmd)
+{
+ return aha1542_reset(cmd, SCRST);
+}
+
+static int aha1542_host_reset(struct scsi_cmnd *cmd)
+{
+ return aha1542_reset(cmd, HRST | SCRST);
+}
+
+static int aha1542_biosparam(struct scsi_device *sdev,
+ struct block_device *bdev, sector_t capacity, int geom[])
+{
+ struct aha1542_hostdata *aha1542 = shost_priv(sdev->host);
+
+ if (capacity >= 0x200000 &&
+ aha1542->bios_translation == BIOS_TRANSLATION_25563) {
+ /* Please verify that this is the same as what DOS returns */
+ geom[0] = 255; /* heads */
+ geom[1] = 63; /* sectors */
+ } else {
+ geom[0] = 64; /* heads */
+ geom[1] = 32; /* sectors */
+ }
+ geom[2] = sector_div(capacity, geom[0] * geom[1]); /* cylinders */
+
+ return 0;
+}
+MODULE_LICENSE("GPL");
+
+static struct scsi_host_template driver_template = {
+ .module = THIS_MODULE,
+ .proc_name = "aha1542",
+ .name = "Adaptec 1542",
+ .queuecommand = aha1542_queuecommand,
+ .eh_device_reset_handler= aha1542_dev_reset,
+ .eh_bus_reset_handler = aha1542_bus_reset,
+ .eh_host_reset_handler = aha1542_host_reset,
+ .bios_param = aha1542_biosparam,
+ .can_queue = AHA1542_MAILBOXES,
+ .this_id = 7,
+ .sg_tablesize = 16,
+ .cmd_per_lun = 1,
+ .unchecked_isa_dma = 1,
+ .use_clustering = ENABLE_CLUSTERING,
+};
+
+static int aha1542_isa_match(struct device *pdev, unsigned int ndev)
+{
+ struct Scsi_Host *sh = aha1542_hw_init(&driver_template, pdev, ndev);
+
+ if (!sh)
+ return 0;
+
+ dev_set_drvdata(pdev, sh);
+ return 1;
+}
+
+static int aha1542_isa_remove(struct device *pdev,
+ unsigned int ndev)
+{
+ aha1542_release(dev_get_drvdata(pdev));
+ dev_set_drvdata(pdev, NULL);
+ return 0;
+}
+
+static struct isa_driver aha1542_isa_driver = {
+ .match = aha1542_isa_match,
+ .remove = aha1542_isa_remove,
+ .driver = {
+ .name = "aha1542"
+ },
+};
+static int isa_registered;
+
+#ifdef CONFIG_PNP
+static struct pnp_device_id aha1542_pnp_ids[] = {
+ { .id = "ADP1542" },
+ { .id = "" }
+};
+MODULE_DEVICE_TABLE(pnp, aha1542_pnp_ids);
+
+static int aha1542_pnp_probe(struct pnp_dev *pdev, const struct pnp_device_id *id)
+{
+ int indx;
+ struct Scsi_Host *sh;
+
+ for (indx = 0; indx < ARRAY_SIZE(io); indx++) {
+ if (io[indx])
+ continue;
+
+ if (pnp_activate_dev(pdev) < 0)
+ continue;
+
+ io[indx] = pnp_port_start(pdev, 0);
+
+ /* The card can be queried for its DMA, we have
+ the DMA set up that is enough */
+
+ dev_info(&pdev->dev, "ISAPnP found an AHA1535 at I/O 0x%03X", io[indx]);
+ }
+
+ sh = aha1542_hw_init(&driver_template, &pdev->dev, indx);
+ if (!sh)
+ return -ENODEV;
+
+ pnp_set_drvdata(pdev, sh);
+ return 0;
+}
+
+static void aha1542_pnp_remove(struct pnp_dev *pdev)
+{
+ aha1542_release(pnp_get_drvdata(pdev));
+ pnp_set_drvdata(pdev, NULL);
+}
+
+static struct pnp_driver aha1542_pnp_driver = {
+ .name = "aha1542",
+ .id_table = aha1542_pnp_ids,
+ .probe = aha1542_pnp_probe,
+ .remove = aha1542_pnp_remove,
+};
+static int pnp_registered;
+#endif /* CONFIG_PNP */
+
+static int __init aha1542_init(void)
+{
+ int ret = 0;
+
+#ifdef CONFIG_PNP
+ if (isapnp) {
+ ret = pnp_register_driver(&aha1542_pnp_driver);
+ if (!ret)
+ pnp_registered = 1;
+ }
+#endif
+ ret = isa_register_driver(&aha1542_isa_driver, MAXBOARDS);
+ if (!ret)
+ isa_registered = 1;
+
+#ifdef CONFIG_PNP
+ if (pnp_registered)
+ ret = 0;
+#endif
+ if (isa_registered)
+ ret = 0;
+
+ return ret;
+}
+
+static void __exit aha1542_exit(void)
+{
+#ifdef CONFIG_PNP
+ if (pnp_registered)
+ pnp_unregister_driver(&aha1542_pnp_driver);
+#endif
+ if (isa_registered)
+ isa_unregister_driver(&aha1542_isa_driver);
+}
+
+module_init(aha1542_init);
+module_exit(aha1542_exit);
diff --git a/drivers/scsi/aha1542.h b/drivers/scsi/aha1542.h
new file mode 100644
index 000000000..0fe9bae1b
--- /dev/null
+++ b/drivers/scsi/aha1542.h
@@ -0,0 +1,102 @@
+#ifndef _AHA1542_H_
+#define _AHA1542_H_
+
+#include <linux/types.h>
+
+/* I/O Port interface 4.2 */
+/* READ */
+#define STATUS(base) base
+#define STST BIT(7) /* Self Test in Progress */
+#define DIAGF BIT(6) /* Internal Diagnostic Failure */
+#define INIT BIT(5) /* Mailbox Initialization Required */
+#define IDLE BIT(4) /* SCSI Host Adapter Idle */
+#define CDF BIT(3) /* Command/Data Out Port Full */
+#define DF BIT(2) /* Data In Port Full */
+/* BIT(1) is reserved */
+#define INVDCMD BIT(0) /* Invalid H A Command */
+#define STATMASK (STST | DIAGF | INIT | IDLE | CDF | DF | INVDCMD)
+
+#define INTRFLAGS(base) (STATUS(base)+2)
+#define ANYINTR BIT(7) /* Any Interrupt */
+#define SCRD BIT(3) /* SCSI Reset Detected */
+#define HACC BIT(2) /* HA Command Complete */
+#define MBOA BIT(1) /* MBO Empty */
+#define MBIF BIT(0) /* MBI Full */
+#define INTRMASK (ANYINTR | SCRD | HACC | MBOA | MBIF)
+
+/* WRITE */
+#define CONTROL(base) STATUS(base)
+#define HRST BIT(7) /* Hard Reset */
+#define SRST BIT(6) /* Soft Reset */
+#define IRST BIT(5) /* Interrupt Reset */
+#define SCRST BIT(4) /* SCSI Bus Reset */
+
+/* READ/WRITE */
+#define DATA(base) (STATUS(base)+1)
+#define CMD_NOP 0x00 /* No Operation */
+#define CMD_MBINIT 0x01 /* Mailbox Initialization */
+#define CMD_START_SCSI 0x02 /* Start SCSI Command */
+#define CMD_INQUIRY 0x04 /* Adapter Inquiry */
+#define CMD_EMBOI 0x05 /* Enable MailBox Out Interrupt */
+#define CMD_BUSON_TIME 0x07 /* Set Bus-On Time */
+#define CMD_BUSOFF_TIME 0x08 /* Set Bus-Off Time */
+#define CMD_DMASPEED 0x09 /* Set AT Bus Transfer Speed */
+#define CMD_RETDEVS 0x0a /* Return Installed Devices */
+#define CMD_RETCONF 0x0b /* Return Configuration Data */
+#define CMD_RETSETUP 0x0d /* Return Setup Data */
+#define CMD_ECHO 0x1f /* ECHO Command Data */
+
+#define CMD_EXTBIOS 0x28 /* Return extend bios information only 1542C */
+#define CMD_MBENABLE 0x29 /* Set Mailbox Interface enable only 1542C */
+
+/* Mailbox Definition 5.2.1 and 5.2.2 */
+struct mailbox {
+ u8 status; /* Command/Status */
+ u8 ccbptr[3]; /* msb, .., lsb */
+};
+
+/* This is used with scatter-gather */
+struct chain {
+ u8 datalen[3]; /* Size of this part of chain */
+ u8 dataptr[3]; /* Location of data */
+};
+
+/* These belong in scsi.h also */
+static inline void any2scsi(u8 *p, u32 v)
+{
+ p[0] = v >> 16;
+ p[1] = v >> 8;
+ p[2] = v;
+}
+
+#define scsi2int(up) ( (((long)*(up)) << 16) + (((long)(up)[1]) << 8) + ((long)(up)[2]) )
+
+#define xscsi2int(up) ( (((long)(up)[0]) << 24) + (((long)(up)[1]) << 16) \
+ + (((long)(up)[2]) << 8) + ((long)(up)[3]) )
+
+#define MAX_CDB 12
+#define MAX_SENSE 14
+
+struct ccb { /* Command Control Block 5.3 */
+ u8 op; /* Command Control Block Operation Code */
+ u8 idlun; /* op=0,2:Target Id, op=1:Initiator Id */
+ /* Outbound data transfer, length is checked*/
+ /* Inbound data transfer, length is checked */
+ /* Logical Unit Number */
+ u8 cdblen; /* SCSI Command Length */
+ u8 rsalen; /* Request Sense Allocation Length/Disable */
+ u8 datalen[3]; /* Data Length (msb, .., lsb) */
+ u8 dataptr[3]; /* Data Pointer */
+ u8 linkptr[3]; /* Link Pointer */
+ u8 commlinkid; /* Command Linking Identifier */
+ u8 hastat; /* Host Adapter Status (HASTAT) */
+ u8 tarstat; /* Target Device Status */
+ u8 reserved[2];
+ u8 cdb[MAX_CDB+MAX_SENSE]; /* SCSI Command Descriptor Block */
+ /* REQUEST SENSE */
+};
+
+#define AHA1542_REGION_SIZE 4
+#define AHA1542_MAILBOXES 8
+
+#endif /* _AHA1542_H_ */
diff --git a/drivers/scsi/aha1740.c b/drivers/scsi/aha1740.c
new file mode 100644
index 000000000..31ace4bef
--- /dev/null
+++ b/drivers/scsi/aha1740.c
@@ -0,0 +1,678 @@
+/* $Id$
+ * 1993/03/31
+ * linux/kernel/aha1740.c
+ *
+ * Based loosely on aha1542.c which is
+ * Copyright (C) 1992 Tommy Thorn and
+ * Modified by Eric Youngdale
+ *
+ * This file is aha1740.c, written and
+ * Copyright (C) 1992,1993 Brad McLean
+ * brad@saturn.gaylord.com or brad@bradpc.gaylord.com.
+ *
+ * Modifications to makecode and queuecommand
+ * for proper handling of multiple devices courteously
+ * provided by Michael Weller, March, 1993
+ *
+ * Multiple adapter support, extended translation detection,
+ * update to current scsi subsystem changes, proc fs support,
+ * working (!) module support based on patches from Andreas Arens,
+ * by Andreas Degert <ad@papyrus.hamburg.com>, 2/1997
+ *
+ * aha1740_makecode may still need even more work
+ * if it doesn't work for your devices, take a look.
+ *
+ * Reworked for new_eh and new locking by Alan Cox <alan@lxorguk.ukuu.org.uk>
+ *
+ * Converted to EISA and generic DMA APIs by Marc Zyngier
+ * <maz@wild-wind.fr.eu.org>, 4/2003.
+ *
+ * Shared interrupt support added by Rask Ingemann Lambertsen
+ * <rask@sygehus.dk>, 10/2003
+ *
+ * For the avoidance of doubt the "preferred form" of this code is one which
+ * is in an open non patent encumbered format. Where cryptographic key signing
+ * forms part of the process of creating an executable the information
+ * including keys needed to generate an equivalently functional executable
+ * are deemed to be part of the source code.
+ */
+
+#include <linux/blkdev.h>
+#include <linux/interrupt.h>
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/types.h>
+#include <linux/string.h>
+#include <linux/ioport.h>
+#include <linux/proc_fs.h>
+#include <linux/stat.h>
+#include <linux/init.h>
+#include <linux/device.h>
+#include <linux/eisa.h>
+#include <linux/dma-mapping.h>
+#include <linux/gfp.h>
+
+#include <asm/dma.h>
+#include <asm/io.h>
+
+#include "scsi.h"
+#include <scsi/scsi_host.h>
+#include "aha1740.h"
+
+/* IF YOU ARE HAVING PROBLEMS WITH THIS DRIVER, AND WANT TO WATCH
+ IT WORK, THEN:
+#define DEBUG
+*/
+#ifdef DEBUG
+#define DEB(x) x
+#else
+#define DEB(x)
+#endif
+
+struct aha1740_hostdata {
+ struct eisa_device *edev;
+ unsigned int translation;
+ unsigned int last_ecb_used;
+ dma_addr_t ecb_dma_addr;
+ struct ecb ecb[AHA1740_ECBS];
+};
+
+struct aha1740_sg {
+ struct aha1740_chain sg_chain[AHA1740_SCATTER];
+ dma_addr_t sg_dma_addr;
+ dma_addr_t buf_dma_addr;
+};
+
+#define HOSTDATA(host) ((struct aha1740_hostdata *) &host->hostdata)
+
+static inline struct ecb *ecb_dma_to_cpu (struct Scsi_Host *host,
+ dma_addr_t dma)
+{
+ struct aha1740_hostdata *hdata = HOSTDATA (host);
+ dma_addr_t offset;
+
+ offset = dma - hdata->ecb_dma_addr;
+
+ return (struct ecb *)(((char *) hdata->ecb) + (unsigned int) offset);
+}
+
+static inline dma_addr_t ecb_cpu_to_dma (struct Scsi_Host *host, void *cpu)
+{
+ struct aha1740_hostdata *hdata = HOSTDATA (host);
+ dma_addr_t offset;
+
+ offset = (char *) cpu - (char *) hdata->ecb;
+
+ return hdata->ecb_dma_addr + offset;
+}
+
+static int aha1740_show_info(struct seq_file *m, struct Scsi_Host *shpnt)
+{
+ struct aha1740_hostdata *host = HOSTDATA(shpnt);
+ seq_printf(m, "aha174x at IO:%lx, IRQ %d, SLOT %d.\n"
+ "Extended translation %sabled.\n",
+ shpnt->io_port, shpnt->irq, host->edev->slot,
+ host->translation ? "en" : "dis");
+ return 0;
+}
+
+static int aha1740_makecode(unchar *sense, unchar *status)
+{
+ struct statusword
+ {
+ ushort don:1, /* Command Done - No Error */
+ du:1, /* Data underrun */
+ :1, qf:1, /* Queue full */
+ sc:1, /* Specification Check */
+ dor:1, /* Data overrun */
+ ch:1, /* Chaining Halted */
+ intr:1, /* Interrupt issued */
+ asa:1, /* Additional Status Available */
+ sns:1, /* Sense information Stored */
+ :1, ini:1, /* Initialization Required */
+ me:1, /* Major error or exception */
+ :1, eca:1, /* Extended Contingent alliance */
+ :1;
+ } status_word;
+ int retval = DID_OK;
+
+ status_word = * (struct statusword *) status;
+#ifdef DEBUG
+ printk("makecode from %x,%x,%x,%x %x,%x,%x,%x",
+ status[0], status[1], status[2], status[3],
+ sense[0], sense[1], sense[2], sense[3]);
+#endif
+ if (!status_word.don) { /* Anything abnormal was detected */
+ if ( (status[1]&0x18) || status_word.sc ) {
+ /*Additional info available*/
+ /* Use the supplied info for further diagnostics */
+ switch ( status[2] ) {
+ case 0x12:
+ if ( status_word.dor )
+ retval=DID_ERROR; /* It's an Overrun */
+ /* If not overrun, assume underrun and
+ * ignore it! */
+ case 0x00: /* No info, assume no error, should
+ * not occur */
+ break;
+ case 0x11:
+ case 0x21:
+ retval=DID_TIME_OUT;
+ break;
+ case 0x0a:
+ retval=DID_BAD_TARGET;
+ break;
+ case 0x04:
+ case 0x05:
+ retval=DID_ABORT;
+ /* Either by this driver or the
+ * AHA1740 itself */
+ break;
+ default:
+ retval=DID_ERROR; /* No further
+ * diagnostics
+ * possible */
+ }
+ } else {
+ /* Michael suggests, and Brad concurs: */
+ if ( status_word.qf ) {
+ retval = DID_TIME_OUT; /* forces a redo */
+ /* I think this specific one should
+ * not happen -Brad */
+ printk("aha1740.c: WARNING: AHA1740 queue overflow!\n");
+ } else
+ if ( status[0]&0x60 ) {
+ /* Didn't find a better error */
+ retval = DID_ERROR;
+ }
+ /* In any other case return DID_OK so for example
+ CONDITION_CHECKS make it through to the appropriate
+ device driver */
+ }
+ }
+ /* Under all circumstances supply the target status -Michael */
+ return status[3] | retval << 16;
+}
+
+static int aha1740_test_port(unsigned int base)
+{
+ if ( inb(PORTADR(base)) & PORTADDR_ENH )
+ return 1; /* Okay, we're all set */
+
+ printk("aha174x: Board detected, but not in enhanced mode, so disabled it.\n");
+ return 0;
+}
+
+/* A "high" level interrupt handler */
+static irqreturn_t aha1740_intr_handle(int irq, void *dev_id)
+{
+ struct Scsi_Host *host = (struct Scsi_Host *) dev_id;
+ void (*my_done)(Scsi_Cmnd *);
+ int errstatus, adapstat;
+ int number_serviced;
+ struct ecb *ecbptr;
+ Scsi_Cmnd *SCtmp;
+ unsigned int base;
+ unsigned long flags;
+ int handled = 0;
+ struct aha1740_sg *sgptr;
+ struct eisa_device *edev;
+
+ if (!host)
+ panic("aha1740.c: Irq from unknown host!\n");
+ spin_lock_irqsave(host->host_lock, flags);
+ base = host->io_port;
+ number_serviced = 0;
+ edev = HOSTDATA(host)->edev;
+
+ while(inb(G2STAT(base)) & G2STAT_INTPEND) {
+ handled = 1;
+ DEB(printk("aha1740_intr top of loop.\n"));
+ adapstat = inb(G2INTST(base));
+ ecbptr = ecb_dma_to_cpu (host, inl(MBOXIN0(base)));
+ outb(G2CNTRL_IRST,G2CNTRL(base)); /* interrupt reset */
+
+ switch ( adapstat & G2INTST_MASK ) {
+ case G2INTST_CCBRETRY:
+ case G2INTST_CCBERROR:
+ case G2INTST_CCBGOOD:
+ /* Host Ready -> Mailbox in complete */
+ outb(G2CNTRL_HRDY,G2CNTRL(base));
+ if (!ecbptr) {
+ printk("Aha1740 null ecbptr in interrupt (%x,%x,%x,%d)\n",
+ inb(G2STAT(base)),adapstat,
+ inb(G2INTST(base)), number_serviced++);
+ continue;
+ }
+ SCtmp = ecbptr->SCpnt;
+ if (!SCtmp) {
+ printk("Aha1740 null SCtmp in interrupt (%x,%x,%x,%d)\n",
+ inb(G2STAT(base)),adapstat,
+ inb(G2INTST(base)), number_serviced++);
+ continue;
+ }
+ sgptr = (struct aha1740_sg *) SCtmp->host_scribble;
+ scsi_dma_unmap(SCtmp);
+
+ /* Free the sg block */
+ dma_free_coherent (&edev->dev,
+ sizeof (struct aha1740_sg),
+ SCtmp->host_scribble,
+ sgptr->sg_dma_addr);
+
+ /* Fetch the sense data, and tuck it away, in
+ the required slot. The Adaptec
+ automatically fetches it, and there is no
+ guarantee that we will still have it in the
+ cdb when we come back */
+ if ( (adapstat & G2INTST_MASK) == G2INTST_CCBERROR ) {
+ memcpy(SCtmp->sense_buffer, ecbptr->sense,
+ SCSI_SENSE_BUFFERSIZE);
+ errstatus = aha1740_makecode(ecbptr->sense,ecbptr->status);
+ } else
+ errstatus = 0;
+ DEB(if (errstatus)
+ printk("aha1740_intr_handle: returning %6x\n",
+ errstatus));
+ SCtmp->result = errstatus;
+ my_done = ecbptr->done;
+ memset(ecbptr,0,sizeof(struct ecb));
+ if ( my_done )
+ my_done(SCtmp);
+ break;
+
+ case G2INTST_HARDFAIL:
+ printk(KERN_ALERT "aha1740 hardware failure!\n");
+ panic("aha1740.c"); /* Goodbye */
+
+ case G2INTST_ASNEVENT:
+ printk("aha1740 asynchronous event: %02x %02x %02x %02x %02x\n",
+ adapstat,
+ inb(MBOXIN0(base)),
+ inb(MBOXIN1(base)),
+ inb(MBOXIN2(base)),
+ inb(MBOXIN3(base))); /* Say What? */
+ /* Host Ready -> Mailbox in complete */
+ outb(G2CNTRL_HRDY,G2CNTRL(base));
+ break;
+
+ case G2INTST_CMDGOOD:
+ /* set immediate command success flag here: */
+ break;
+
+ case G2INTST_CMDERROR:
+ /* Set immediate command failure flag here: */
+ break;
+ }
+ number_serviced++;
+ }
+
+ spin_unlock_irqrestore(host->host_lock, flags);
+ return IRQ_RETVAL(handled);
+}
+
+static int aha1740_queuecommand_lck(Scsi_Cmnd * SCpnt, void (*done)(Scsi_Cmnd *))
+{
+ unchar direction;
+ unchar *cmd = (unchar *) SCpnt->cmnd;
+ unchar target = scmd_id(SCpnt);
+ struct aha1740_hostdata *host = HOSTDATA(SCpnt->device->host);
+ unsigned long flags;
+ dma_addr_t sg_dma;
+ struct aha1740_sg *sgptr;
+ int ecbno, nseg;
+ DEB(int i);
+
+ if(*cmd == REQUEST_SENSE) {
+ SCpnt->result = 0;
+ done(SCpnt);
+ return 0;
+ }
+
+#ifdef DEBUG
+ if (*cmd == READ_10 || *cmd == WRITE_10)
+ i = xscsi2int(cmd+2);
+ else if (*cmd == READ_6 || *cmd == WRITE_6)
+ i = scsi2int(cmd+2);
+ else
+ i = -1;
+ printk("aha1740_queuecommand: dev %d cmd %02x pos %d len %d ",
+ target, *cmd, i, bufflen);
+ printk("scsi cmd:");
+ for (i = 0; i < SCpnt->cmd_len; i++) printk("%02x ", cmd[i]);
+ printk("\n");
+#endif
+
+ /* locate an available ecb */
+ spin_lock_irqsave(SCpnt->device->host->host_lock, flags);
+ ecbno = host->last_ecb_used + 1; /* An optimization */
+ if (ecbno >= AHA1740_ECBS)
+ ecbno = 0;
+ do {
+ if (!host->ecb[ecbno].cmdw)
+ break;
+ ecbno++;
+ if (ecbno >= AHA1740_ECBS)
+ ecbno = 0;
+ } while (ecbno != host->last_ecb_used);
+
+ if (host->ecb[ecbno].cmdw)
+ panic("Unable to find empty ecb for aha1740.\n");
+
+ host->ecb[ecbno].cmdw = AHA1740CMD_INIT; /* SCSI Initiator Command
+ doubles as reserved flag */
+
+ host->last_ecb_used = ecbno;
+ spin_unlock_irqrestore(SCpnt->device->host->host_lock, flags);
+
+#ifdef DEBUG
+ printk("Sending command (%d %x)...", ecbno, done);
+#endif
+
+ host->ecb[ecbno].cdblen = SCpnt->cmd_len; /* SCSI Command
+ * Descriptor Block
+ * Length */
+
+ direction = 0;
+ if (*cmd == READ_10 || *cmd == READ_6)
+ direction = 1;
+ else if (*cmd == WRITE_10 || *cmd == WRITE_6)
+ direction = 0;
+
+ memcpy(host->ecb[ecbno].cdb, cmd, SCpnt->cmd_len);
+
+ SCpnt->host_scribble = dma_alloc_coherent (&host->edev->dev,
+ sizeof (struct aha1740_sg),
+ &sg_dma, GFP_ATOMIC);
+ if(SCpnt->host_scribble == NULL) {
+ printk(KERN_WARNING "aha1740: out of memory in queuecommand!\n");
+ return 1;
+ }
+ sgptr = (struct aha1740_sg *) SCpnt->host_scribble;
+ sgptr->sg_dma_addr = sg_dma;
+
+ nseg = scsi_dma_map(SCpnt);
+ BUG_ON(nseg < 0);
+ if (nseg) {
+ struct scatterlist *sg;
+ struct aha1740_chain * cptr;
+ int i;
+ DEB(unsigned char * ptr);
+
+ host->ecb[ecbno].sg = 1; /* SCSI Initiator Command
+ * w/scatter-gather*/
+ cptr = sgptr->sg_chain;
+ scsi_for_each_sg(SCpnt, sg, nseg, i) {
+ cptr[i].datalen = sg_dma_len (sg);
+ cptr[i].dataptr = sg_dma_address (sg);
+ }
+ host->ecb[ecbno].datalen = nseg * sizeof(struct aha1740_chain);
+ host->ecb[ecbno].dataptr = sg_dma;
+#ifdef DEBUG
+ printk("cptr %x: ",cptr);
+ ptr = (unsigned char *) cptr;
+ for(i=0;i<24;i++) printk("%02x ", ptr[i]);
+#endif
+ } else {
+ host->ecb[ecbno].datalen = 0;
+ host->ecb[ecbno].dataptr = 0;
+ }
+ host->ecb[ecbno].lun = SCpnt->device->lun;
+ host->ecb[ecbno].ses = 1; /* Suppress underrun errors */
+ host->ecb[ecbno].dir = direction;
+ host->ecb[ecbno].ars = 1; /* Yes, get the sense on an error */
+ host->ecb[ecbno].senselen = 12;
+ host->ecb[ecbno].senseptr = ecb_cpu_to_dma (SCpnt->device->host,
+ host->ecb[ecbno].sense);
+ host->ecb[ecbno].statusptr = ecb_cpu_to_dma (SCpnt->device->host,
+ host->ecb[ecbno].status);
+ host->ecb[ecbno].done = done;
+ host->ecb[ecbno].SCpnt = SCpnt;
+#ifdef DEBUG
+ {
+ int i;
+ printk("aha1740_command: sending.. ");
+ for (i = 0; i < sizeof(host->ecb[ecbno]) - 10; i++)
+ printk("%02x ", ((unchar *)&host->ecb[ecbno])[i]);
+ }
+ printk("\n");
+#endif
+ if (done) {
+ /* The Adaptec Spec says the card is so fast that the loops
+ will only be executed once in the code below. Even if this
+ was true with the fastest processors when the spec was
+ written, it doesn't seem to be true with today's fast
+ processors. We print a warning if the code is executed more
+ often than LOOPCNT_WARN. If this happens, it should be
+ investigated. If the count reaches LOOPCNT_MAX, we assume
+ something is broken; since there is no way to return an
+ error (the return value is ignored by the mid-level scsi
+ layer) we have to panic (and maybe that's the best thing we
+ can do then anyhow). */
+
+#define LOOPCNT_WARN 10 /* excessive mbxout wait -> syslog-msg */
+#define LOOPCNT_MAX 1000000 /* mbxout deadlock -> panic() after ~ 2 sec. */
+ int loopcnt;
+ unsigned int base = SCpnt->device->host->io_port;
+ DEB(printk("aha1740[%d] critical section\n",ecbno));
+
+ spin_lock_irqsave(SCpnt->device->host->host_lock, flags);
+ for (loopcnt = 0; ; loopcnt++) {
+ if (inb(G2STAT(base)) & G2STAT_MBXOUT) break;
+ if (loopcnt == LOOPCNT_WARN) {
+ printk("aha1740[%d]_mbxout wait!\n",ecbno);
+ }
+ if (loopcnt == LOOPCNT_MAX)
+ panic("aha1740.c: mbxout busy!\n");
+ }
+ outl (ecb_cpu_to_dma (SCpnt->device->host, host->ecb + ecbno),
+ MBOXOUT0(base));
+ for (loopcnt = 0; ; loopcnt++) {
+ if (! (inb(G2STAT(base)) & G2STAT_BUSY)) break;
+ if (loopcnt == LOOPCNT_WARN) {
+ printk("aha1740[%d]_attn wait!\n",ecbno);
+ }
+ if (loopcnt == LOOPCNT_MAX)
+ panic("aha1740.c: attn wait failed!\n");
+ }
+ outb(ATTN_START | (target & 7), ATTN(base)); /* Start it up */
+ spin_unlock_irqrestore(SCpnt->device->host->host_lock, flags);
+ DEB(printk("aha1740[%d] request queued.\n",ecbno));
+ } else
+ printk(KERN_ALERT "aha1740_queuecommand: done can't be NULL\n");
+ return 0;
+}
+
+static DEF_SCSI_QCMD(aha1740_queuecommand)
+
+/* Query the board for its irq_level and irq_type. Nothing else matters
+ in enhanced mode on an EISA bus. */
+
+static void aha1740_getconfig(unsigned int base, unsigned int *irq_level,
+ unsigned int *irq_type,
+ unsigned int *translation)
+{
+ static int intab[] = { 9, 10, 11, 12, 0, 14, 15, 0 };
+
+ *irq_level = intab[inb(INTDEF(base)) & 0x7];
+ *irq_type = (inb(INTDEF(base)) & 0x8) >> 3;
+ *translation = inb(RESV1(base)) & 0x1;
+ outb(inb(INTDEF(base)) | 0x10, INTDEF(base));
+}
+
+static int aha1740_biosparam(struct scsi_device *sdev,
+ struct block_device *dev,
+ sector_t capacity, int* ip)
+{
+ int size = capacity;
+ int extended = HOSTDATA(sdev->host)->translation;
+
+ DEB(printk("aha1740_biosparam\n"));
+ if (extended && (ip[2] > 1024)) {
+ ip[0] = 255;
+ ip[1] = 63;
+ ip[2] = size / (255 * 63);
+ } else {
+ ip[0] = 64;
+ ip[1] = 32;
+ ip[2] = size >> 11;
+ }
+ return 0;
+}
+
+static int aha1740_eh_abort_handler (Scsi_Cmnd *dummy)
+{
+/*
+ * From Alan Cox :
+ * The AHA1740 has firmware handled abort/reset handling. The "head in
+ * sand" kernel code is correct for once 8)
+ *
+ * So we define a dummy handler just to keep the kernel SCSI code as
+ * quiet as possible...
+ */
+
+ return SUCCESS;
+}
+
+static struct scsi_host_template aha1740_template = {
+ .module = THIS_MODULE,
+ .proc_name = "aha1740",
+ .show_info = aha1740_show_info,
+ .name = "Adaptec 174x (EISA)",
+ .queuecommand = aha1740_queuecommand,
+ .bios_param = aha1740_biosparam,
+ .can_queue = AHA1740_ECBS,
+ .this_id = 7,
+ .sg_tablesize = AHA1740_SCATTER,
+ .cmd_per_lun = AHA1740_CMDLUN,
+ .use_clustering = ENABLE_CLUSTERING,
+ .eh_abort_handler = aha1740_eh_abort_handler,
+};
+
+static int aha1740_probe (struct device *dev)
+{
+ int slotbase, rc;
+ unsigned int irq_level, irq_type, translation;
+ struct Scsi_Host *shpnt;
+ struct aha1740_hostdata *host;
+ struct eisa_device *edev = to_eisa_device (dev);
+
+ DEB(printk("aha1740_probe: \n"));
+
+ slotbase = edev->base_addr + EISA_VENDOR_ID_OFFSET;
+ if (!request_region(slotbase, SLOTSIZE, "aha1740")) /* See if in use */
+ return -EBUSY;
+ if (!aha1740_test_port(slotbase))
+ goto err_release_region;
+ aha1740_getconfig(slotbase,&irq_level,&irq_type,&translation);
+ if ((inb(G2STAT(slotbase)) &
+ (G2STAT_MBXOUT|G2STAT_BUSY)) != G2STAT_MBXOUT) {
+ /* If the card isn't ready, hard reset it */
+ outb(G2CNTRL_HRST, G2CNTRL(slotbase));
+ outb(0, G2CNTRL(slotbase));
+ }
+ printk(KERN_INFO "Configuring slot %d at IO:%x, IRQ %u (%s)\n",
+ edev->slot, slotbase, irq_level, irq_type ? "edge" : "level");
+ printk(KERN_INFO "aha174x: Extended translation %sabled.\n",
+ translation ? "en" : "dis");
+ shpnt = scsi_host_alloc(&aha1740_template,
+ sizeof(struct aha1740_hostdata));
+ if(shpnt == NULL)
+ goto err_release_region;
+
+ shpnt->base = 0;
+ shpnt->io_port = slotbase;
+ shpnt->n_io_port = SLOTSIZE;
+ shpnt->irq = irq_level;
+ shpnt->dma_channel = 0xff;
+ host = HOSTDATA(shpnt);
+ host->edev = edev;
+ host->translation = translation;
+ host->ecb_dma_addr = dma_map_single (&edev->dev, host->ecb,
+ sizeof (host->ecb),
+ DMA_BIDIRECTIONAL);
+ if (!host->ecb_dma_addr) {
+ printk (KERN_ERR "aha1740_probe: Couldn't map ECB, giving up\n");
+ scsi_unregister (shpnt);
+ goto err_host_put;
+ }
+
+ DEB(printk("aha1740_probe: enable interrupt channel %d\n",irq_level));
+ if (request_irq(irq_level,aha1740_intr_handle,irq_type ? 0 : IRQF_SHARED,
+ "aha1740",shpnt)) {
+ printk(KERN_ERR "aha1740_probe: Unable to allocate IRQ %d.\n",
+ irq_level);
+ goto err_unmap;
+ }
+
+ eisa_set_drvdata (edev, shpnt);
+
+ rc = scsi_add_host (shpnt, dev);
+ if (rc)
+ goto err_irq;
+
+ scsi_scan_host (shpnt);
+ return 0;
+
+ err_irq:
+ free_irq(irq_level, shpnt);
+ err_unmap:
+ dma_unmap_single (&edev->dev, host->ecb_dma_addr,
+ sizeof (host->ecb), DMA_BIDIRECTIONAL);
+ err_host_put:
+ scsi_host_put (shpnt);
+ err_release_region:
+ release_region(slotbase, SLOTSIZE);
+
+ return -ENODEV;
+}
+
+static int aha1740_remove (struct device *dev)
+{
+ struct Scsi_Host *shpnt = dev_get_drvdata(dev);
+ struct aha1740_hostdata *host = HOSTDATA (shpnt);
+
+ scsi_remove_host(shpnt);
+
+ free_irq (shpnt->irq, shpnt);
+ dma_unmap_single (dev, host->ecb_dma_addr,
+ sizeof (host->ecb), DMA_BIDIRECTIONAL);
+ release_region (shpnt->io_port, SLOTSIZE);
+
+ scsi_host_put (shpnt);
+
+ return 0;
+}
+
+static struct eisa_device_id aha1740_ids[] = {
+ { "ADP0000" }, /* 1740 */
+ { "ADP0001" }, /* 1740A */
+ { "ADP0002" }, /* 1742A */
+ { "ADP0400" }, /* 1744 */
+ { "" }
+};
+MODULE_DEVICE_TABLE(eisa, aha1740_ids);
+
+static struct eisa_driver aha1740_driver = {
+ .id_table = aha1740_ids,
+ .driver = {
+ .name = "aha1740",
+ .probe = aha1740_probe,
+ .remove = aha1740_remove,
+ },
+};
+
+static __init int aha1740_init (void)
+{
+ return eisa_driver_register (&aha1740_driver);
+}
+
+static __exit void aha1740_exit (void)
+{
+ eisa_driver_unregister (&aha1740_driver);
+}
+
+module_init (aha1740_init);
+module_exit (aha1740_exit);
+
+MODULE_LICENSE("GPL");
diff --git a/drivers/scsi/aha1740.h b/drivers/scsi/aha1740.h
new file mode 100644
index 000000000..af23fd6bd
--- /dev/null
+++ b/drivers/scsi/aha1740.h
@@ -0,0 +1,154 @@
+#ifndef _AHA1740_H
+
+/* $Id$
+ *
+ * Header file for the adaptec 1740 driver for Linux
+ *
+ * With minor revisions 3/31/93
+ * Written and (C) 1992,1993 Brad McLean. See aha1740.c
+ * for more info
+ *
+ */
+
+#include <linux/types.h>
+
+#define SLOTSIZE 0x5c
+
+/* EISA configuration registers & values */
+#define HID0(base) (base + 0x0)
+#define HID1(base) (base + 0x1)
+#define HID2(base) (base + 0x2)
+#define HID3(base) (base + 0x3)
+#define EBCNTRL(base) (base + 0x4)
+#define PORTADR(base) (base + 0x40)
+#define BIOSADR(base) (base + 0x41)
+#define INTDEF(base) (base + 0x42)
+#define SCSIDEF(base) (base + 0x43)
+#define BUSDEF(base) (base + 0x44)
+#define RESV0(base) (base + 0x45)
+#define RESV1(base) (base + 0x46)
+#define RESV2(base) (base + 0x47)
+
+#define HID_MFG "ADP"
+#define HID_PRD 0
+#define HID_REV 2
+#define EBCNTRL_VALUE 1
+#define PORTADDR_ENH 0x80
+/* READ */
+#define G2INTST(base) (base + 0x56)
+#define G2STAT(base) (base + 0x57)
+#define MBOXIN0(base) (base + 0x58)
+#define MBOXIN1(base) (base + 0x59)
+#define MBOXIN2(base) (base + 0x5a)
+#define MBOXIN3(base) (base + 0x5b)
+#define G2STAT2(base) (base + 0x5c)
+
+#define G2INTST_MASK 0xf0 /* isolate the status */
+#define G2INTST_CCBGOOD 0x10 /* CCB Completed */
+#define G2INTST_CCBRETRY 0x50 /* CCB Completed with a retry */
+#define G2INTST_HARDFAIL 0x70 /* Adapter Hardware Failure */
+#define G2INTST_CMDGOOD 0xa0 /* Immediate command success */
+#define G2INTST_CCBERROR 0xc0 /* CCB Completed with error */
+#define G2INTST_ASNEVENT 0xd0 /* Asynchronous Event Notification */
+#define G2INTST_CMDERROR 0xe0 /* Immediate command error */
+
+#define G2STAT_MBXOUT 4 /* Mailbox Out Empty Bit */
+#define G2STAT_INTPEND 2 /* Interrupt Pending Bit */
+#define G2STAT_BUSY 1 /* Busy Bit (attention pending) */
+
+#define G2STAT2_READY 0 /* Host Ready Bit */
+
+/* WRITE (and ReadBack) */
+#define MBOXOUT0(base) (base + 0x50)
+#define MBOXOUT1(base) (base + 0x51)
+#define MBOXOUT2(base) (base + 0x52)
+#define MBOXOUT3(base) (base + 0x53)
+#define ATTN(base) (base + 0x54)
+#define G2CNTRL(base) (base + 0x55)
+
+#define ATTN_IMMED 0x10 /* Immediate Command */
+#define ATTN_START 0x40 /* Start CCB */
+#define ATTN_ABORT 0x50 /* Abort CCB */
+
+#define G2CNTRL_HRST 0x80 /* Hard Reset */
+#define G2CNTRL_IRST 0x40 /* Clear EISA Interrupt */
+#define G2CNTRL_HRDY 0x20 /* Sets HOST ready */
+
+/* This is used with scatter-gather */
+struct aha1740_chain {
+ u32 dataptr; /* Location of data */
+ u32 datalen; /* Size of this part of chain */
+};
+
+/* These belong in scsi.h */
+#define any2scsi(up, p) \
+(up)[0] = (((unsigned long)(p)) >> 16) ; \
+(up)[1] = (((unsigned long)(p)) >> 8); \
+(up)[2] = ((unsigned long)(p));
+
+#define scsi2int(up) ( (((long)*(up)) << 16) + (((long)(up)[1]) << 8) + ((long)(up)[2]) )
+
+#define xany2scsi(up, p) \
+(up)[0] = ((long)(p)) >> 24; \
+(up)[1] = ((long)(p)) >> 16; \
+(up)[2] = ((long)(p)) >> 8; \
+(up)[3] = ((long)(p));
+
+#define xscsi2int(up) ( (((long)(up)[0]) << 24) + (((long)(up)[1]) << 16) \
+ + (((long)(up)[2]) << 8) + ((long)(up)[3]) )
+
+#define MAX_CDB 12
+#define MAX_SENSE 14
+#define MAX_STATUS 32
+
+struct ecb { /* Enhanced Control Block 6.1 */
+ u16 cmdw; /* Command Word */
+ /* Flag Word 1 */
+ u16 cne:1, /* Control Block Chaining */
+ :6, di:1, /* Disable Interrupt */
+ :2, ses:1, /* Suppress Underrun error */
+ :1, sg:1, /* Scatter/Gather */
+ :1, dsb:1, /* Disable Status Block */
+ ars:1; /* Automatic Request Sense */
+ /* Flag Word 2 */
+ u16 lun:3, /* Logical Unit */
+ tag:1, /* Tagged Queuing */
+ tt:2, /* Tag Type */
+ nd:1, /* No Disconnect */
+ :1, dat:1, /* Data transfer - check direction */
+ dir:1, /* Direction of transfer 1 = datain */
+ st:1, /* Suppress Transfer */
+ chk:1, /* Calculate Checksum */
+ :2, rec:1,:1; /* Error Recovery */
+ u16 nil0; /* nothing */
+ u32 dataptr; /* Data or Scatter List ptr */
+ u32 datalen; /* Data or Scatter List len */
+ u32 statusptr; /* Status Block ptr */
+ u32 linkptr; /* Chain Address */
+ u32 nil1; /* nothing */
+ u32 senseptr; /* Sense Info Pointer */
+ u8 senselen; /* Sense Length */
+ u8 cdblen; /* CDB Length */
+ u16 datacheck; /* Data checksum */
+ u8 cdb[MAX_CDB]; /* CDB area */
+/* Hardware defined portion ends here, rest is driver defined */
+ u8 sense[MAX_SENSE]; /* Sense area */
+ u8 status[MAX_STATUS]; /* Status area */
+ Scsi_Cmnd *SCpnt; /* Link to the SCSI Command Block */
+ void (*done) (Scsi_Cmnd *); /* Completion Function */
+};
+
+#define AHA1740CMD_NOP 0x00 /* No OP */
+#define AHA1740CMD_INIT 0x01 /* Initiator SCSI Command */
+#define AHA1740CMD_DIAG 0x05 /* Run Diagnostic Command */
+#define AHA1740CMD_SCSI 0x06 /* Initialize SCSI */
+#define AHA1740CMD_SENSE 0x08 /* Read Sense Information */
+#define AHA1740CMD_DOWN 0x09 /* Download Firmware (yeah, I bet!) */
+#define AHA1740CMD_RINQ 0x0a /* Read Host Adapter Inquiry Data */
+#define AHA1740CMD_TARG 0x10 /* Target SCSI Command */
+
+#define AHA1740_ECBS 32
+#define AHA1740_SCATTER 16
+#define AHA1740_CMDLUN 1
+
+#endif
diff --git a/drivers/scsi/aic7xxx/.gitignore b/drivers/scsi/aic7xxx/.gitignore
new file mode 100644
index 000000000..b8ee24d57
--- /dev/null
+++ b/drivers/scsi/aic7xxx/.gitignore
@@ -0,0 +1,6 @@
+aic79xx_reg.h
+aic79xx_reg_print.c
+aic79xx_seq.h
+aic7xxx_reg.h
+aic7xxx_reg_print.c
+aic7xxx_seq.h
diff --git a/drivers/scsi/aic7xxx/Kconfig.aic79xx b/drivers/scsi/aic7xxx/Kconfig.aic79xx
new file mode 100644
index 000000000..3b3d59910
--- /dev/null
+++ b/drivers/scsi/aic7xxx/Kconfig.aic79xx
@@ -0,0 +1,85 @@
+#
+# AIC79XX 2.5.X Kernel configuration File.
+# $Id: //depot/linux-aic79xx-2.5.0/drivers/scsi/aic7xxx/Kconfig.aic79xx#4 $
+#
+config SCSI_AIC79XX
+ tristate "Adaptec AIC79xx U320 support"
+ depends on PCI && SCSI
+ select SCSI_SPI_ATTRS
+ help
+ This driver supports all of Adaptec's Ultra 320 PCI-X
+ based SCSI controllers.
+
+config AIC79XX_CMDS_PER_DEVICE
+ int "Maximum number of TCQ commands per device"
+ depends on SCSI_AIC79XX
+ default "32"
+ ---help---
+ Specify the number of commands you would like to allocate per SCSI
+ device when Tagged Command Queueing (TCQ) is enabled on that device.
+
+ This is an upper bound value for the number of tagged transactions
+ to be used for any device. The aic7xxx driver will automatically
+ vary this number based on device behavior. For devices with a
+ fixed maximum, the driver will eventually lock to this maximum
+ and display a console message indicating this value.
+
+ Due to resource allocation issues in the Linux SCSI mid-layer, using
+ a high number of commands per device may result in memory allocation
+ failures when many devices are attached to the system. For this reason,
+ the default is set to 32. Higher values may result in higher performance
+ on some devices. The upper bound is 253. 0 disables tagged queueing.
+
+ Per device tag depth can be controlled via the kernel command line
+ "tag_info" option. See Documentation/scsi/aic79xx.txt for details.
+
+config AIC79XX_RESET_DELAY_MS
+ int "Initial bus reset delay in milli-seconds"
+ depends on SCSI_AIC79XX
+ default "5000"
+ ---help---
+ The number of milliseconds to delay after an initial bus reset.
+ The bus settle delay following all error recovery actions is
+ dictated by the SCSI layer and is not affected by this value.
+
+ Default: 5000 (5 seconds)
+
+config AIC79XX_BUILD_FIRMWARE
+ bool "Build Adapter Firmware with Kernel Build"
+ depends on SCSI_AIC79XX && !PREVENT_FIRMWARE_BUILD
+ help
+ This option should only be enabled if you are modifying the firmware
+ source to the aic79xx driver and wish to have the generated firmware
+ include files updated during a normal kernel build. The assembler
+ for the firmware requires lex and yacc or their equivalents, as well
+ as the db v1 library. You may have to install additional packages
+ or modify the assembler Makefile or the files it includes if your
+ build environment is different than that of the author.
+
+config AIC79XX_DEBUG_ENABLE
+ bool "Compile in Debugging Code"
+ depends on SCSI_AIC79XX
+ default y
+ help
+ Compile in aic79xx debugging code that can be useful in diagnosing
+ driver errors.
+
+config AIC79XX_DEBUG_MASK
+ int "Debug code enable mask (16383 for all debugging)"
+ depends on SCSI_AIC79XX
+ default "0"
+ help
+ Bit mask of debug options that is only valid if the
+ CONFIG_AIC79XX_DEBUG_ENABLE option is enabled. The bits in this mask
+ are defined in the drivers/scsi/aic7xxx/aic79xx.h - search for the
+ variable ahd_debug in that file to find them.
+
+config AIC79XX_REG_PRETTY_PRINT
+ bool "Decode registers during diagnostics"
+ depends on SCSI_AIC79XX
+ default y
+ help
+ Compile in register value tables for the output of expanded register
+ contents in diagnostics. This make it much easier to understand debug
+ output without having to refer to a data book and/or the aic7xxx.reg
+ file.
diff --git a/drivers/scsi/aic7xxx/Kconfig.aic7xxx b/drivers/scsi/aic7xxx/Kconfig.aic7xxx
new file mode 100644
index 000000000..55ac55ee6
--- /dev/null
+++ b/drivers/scsi/aic7xxx/Kconfig.aic7xxx
@@ -0,0 +1,90 @@
+#
+# AIC7XXX and AIC79XX 2.5.X Kernel configuration File.
+# $Id: //depot/linux-aic79xx-2.5.0/drivers/scsi/aic7xxx/Kconfig.aic7xxx#7 $
+#
+config SCSI_AIC7XXX
+ tristate "Adaptec AIC7xxx Fast -> U160 support (New Driver)"
+ depends on (PCI || EISA) && SCSI
+ select SCSI_SPI_ATTRS
+ ---help---
+ This driver supports all of Adaptec's Fast through Ultra 160 PCI
+ based SCSI controllers as well as the aic7770 based EISA and VLB
+ SCSI controllers (the 274x and 284x series). For AAA and ARO based
+ configurations, only SCSI functionality is provided.
+
+ To compile this driver as a module, choose M here: the
+ module will be called aic7xxx.
+
+config AIC7XXX_CMDS_PER_DEVICE
+ int "Maximum number of TCQ commands per device"
+ depends on SCSI_AIC7XXX
+ default "32"
+ ---help---
+ Specify the number of commands you would like to allocate per SCSI
+ device when Tagged Command Queueing (TCQ) is enabled on that device.
+
+ This is an upper bound value for the number of tagged transactions
+ to be used for any device. The aic7xxx driver will automatically
+ vary this number based on device behavior. For devices with a
+ fixed maximum, the driver will eventually lock to this maximum
+ and display a console message indicating this value.
+
+ Due to resource allocation issues in the Linux SCSI mid-layer, using
+ a high number of commands per device may result in memory allocation
+ failures when many devices are attached to the system. For this reason,
+ the default is set to 32. Higher values may result in higher performance
+ on some devices. The upper bound is 253. 0 disables tagged queueing.
+
+ Per device tag depth can be controlled via the kernel command line
+ "tag_info" option. See Documentation/scsi/aic7xxx.txt for details.
+
+config AIC7XXX_RESET_DELAY_MS
+ int "Initial bus reset delay in milli-seconds"
+ depends on SCSI_AIC7XXX
+ default "5000"
+ ---help---
+ The number of milliseconds to delay after an initial bus reset.
+ The bus settle delay following all error recovery actions is
+ dictated by the SCSI layer and is not affected by this value.
+
+ Default: 5000 (5 seconds)
+
+config AIC7XXX_BUILD_FIRMWARE
+ bool "Build Adapter Firmware with Kernel Build"
+ depends on SCSI_AIC7XXX && !PREVENT_FIRMWARE_BUILD
+ help
+ This option should only be enabled if you are modifying the firmware
+ source to the aic7xxx driver and wish to have the generated firmware
+ include files updated during a normal kernel build. The assembler
+ for the firmware requires lex and yacc or their equivalents, as well
+ as the db v1 library. You may have to install additional packages
+ or modify the assembler Makefile or the files it includes if your
+ build environment is different than that of the author.
+
+config AIC7XXX_DEBUG_ENABLE
+ bool "Compile in Debugging Code"
+ depends on SCSI_AIC7XXX
+ default y
+ help
+ Compile in aic7xxx debugging code that can be useful in diagnosing
+ driver errors.
+
+config AIC7XXX_DEBUG_MASK
+ int "Debug code enable mask (2047 for all debugging)"
+ depends on SCSI_AIC7XXX
+ default "0"
+ help
+ Bit mask of debug options that is only valid if the
+ CONFIG_AIC7XXX_DEBUG_ENABLE option is enabled. The bits in this mask
+ are defined in the drivers/scsi/aic7xxx/aic7xxx.h - search for the
+ variable ahc_debug in that file to find them.
+
+config AIC7XXX_REG_PRETTY_PRINT
+ bool "Decode registers during diagnostics"
+ depends on SCSI_AIC7XXX
+ default y
+ help
+ Compile in register value tables for the output of expanded register
+ contents in diagnostics. This make it much easier to understand debug
+ output without having to refer to a data book and/or the aic7xxx.reg
+ file.
diff --git a/drivers/scsi/aic7xxx/Makefile b/drivers/scsi/aic7xxx/Makefile
new file mode 100644
index 000000000..741d81861
--- /dev/null
+++ b/drivers/scsi/aic7xxx/Makefile
@@ -0,0 +1,85 @@
+#
+# Makefile for the Linux aic7xxx SCSI driver.
+#
+# $Id: //depot/linux-aic79xx-2.5.0/drivers/scsi/aic7xxx/Makefile#8 $
+#
+
+# Let kbuild descend into aicasm when cleaning
+subdir- += aicasm
+
+obj-$(CONFIG_SCSI_AIC7XXX) += aic7xxx.o
+obj-$(CONFIG_SCSI_AIC79XX) += aic79xx.o
+
+# Core Fast -> U160 files
+aic7xxx-y += aic7xxx_core.o \
+ aic7xxx_93cx6.o
+aic7xxx-$(CONFIG_EISA) += aic7770.o
+aic7xxx-$(CONFIG_PCI) += aic7xxx_pci.o
+aic7xxx-$(CONFIG_AIC7XXX_REG_PRETTY_PRINT) += aic7xxx_reg_print.o
+
+# Platform Specific Fast -> U160 Files
+aic7xxx-y += aic7xxx_osm.o \
+ aic7xxx_proc.o
+aic7xxx-$(CONFIG_EISA) += aic7770_osm.o
+aic7xxx-$(CONFIG_PCI) += aic7xxx_osm_pci.o
+
+# Core U320 files
+aic79xx-y += aic79xx_core.o \
+ aic79xx_pci.o
+aic79xx-$(CONFIG_AIC79XX_REG_PRETTY_PRINT) += aic79xx_reg_print.o
+
+# Platform Specific U320 Files
+aic79xx-y += aic79xx_osm.o \
+ aic79xx_proc.o \
+ aic79xx_osm_pci.o
+
+ccflags-y += -Idrivers/scsi
+ifdef WARNINGS_BECOME_ERRORS
+ccflags-y += -Werror
+endif
+
+# Files generated that shall be removed upon make clean
+clean-files := aic7xxx_seq.h aic7xxx_reg.h aic7xxx_reg_print.c
+clean-files += aic79xx_seq.h aic79xx_reg.h aic79xx_reg_print.c
+
+# Dependencies for generated files need to be listed explicitly
+
+$(addprefix $(obj)/,$(aic7xxx-y)): $(obj)/aic7xxx_seq.h $(obj)/aic7xxx_reg.h
+$(addprefix $(obj)/,$(aic79xx-y)): $(obj)/aic79xx_seq.h $(obj)/aic79xx_reg.h
+
+aic7xxx-gen-$(CONFIG_AIC7XXX_BUILD_FIRMWARE) := $(obj)/aic7xxx_reg.h
+aic7xxx-gen-$(CONFIG_AIC7XXX_REG_PRETTY_PRINT) += $(obj)/aic7xxx_reg_print.c
+
+aicasm-7xxx-opts-$(CONFIG_AIC7XXX_REG_PRETTY_PRINT) := \
+ -p $(obj)/aic7xxx_reg_print.c -i aic7xxx_osm.h
+
+ifeq ($(CONFIG_AIC7XXX_BUILD_FIRMWARE),y)
+$(obj)/aic7xxx_seq.h: $(src)/aic7xxx.seq $(src)/aic7xxx.reg $(obj)/aicasm/aicasm
+ $(obj)/aicasm/aicasm -I$(src) -r $(obj)/aic7xxx_reg.h \
+ $(aicasm-7xxx-opts-y) -o $(obj)/aic7xxx_seq.h \
+ $(src)/aic7xxx.seq
+
+$(aic7xxx-gen-y): $(obj)/aic7xxx_seq.h
+else
+$(obj)/aic7xxx_reg_print.c: $(src)/aic7xxx_reg_print.c_shipped
+endif
+
+aic79xx-gen-$(CONFIG_AIC79XX_BUILD_FIRMWARE) := $(obj)/aic79xx_reg.h
+aic79xx-gen-$(CONFIG_AIC79XX_REG_PRETTY_PRINT) += $(obj)/aic79xx_reg_print.c
+
+aicasm-79xx-opts-$(CONFIG_AIC79XX_REG_PRETTY_PRINT) := \
+ -p $(obj)/aic79xx_reg_print.c -i aic79xx_osm.h
+
+ifeq ($(CONFIG_AIC79XX_BUILD_FIRMWARE),y)
+$(obj)/aic79xx_seq.h: $(src)/aic79xx.seq $(src)/aic79xx.reg $(obj)/aicasm/aicasm
+ $(obj)/aicasm/aicasm -I$(src) -r $(obj)/aic79xx_reg.h \
+ $(aicasm-79xx-opts-y) -o $(obj)/aic79xx_seq.h \
+ $(src)/aic79xx.seq
+
+$(aic79xx-gen-y): $(obj)/aic79xx_seq.h
+else
+$(obj)/aic79xx_reg_print.c: $(src)/aic79xx_reg_print.c_shipped
+endif
+
+$(obj)/aicasm/aicasm: $(src)/aicasm/*.[chyl]
+ $(MAKE) -C $(src)/aicasm
diff --git a/drivers/scsi/aic7xxx/aic7770.c b/drivers/scsi/aic7xxx/aic7770.c
new file mode 100644
index 000000000..5000bd69c
--- /dev/null
+++ b/drivers/scsi/aic7xxx/aic7770.c
@@ -0,0 +1,391 @@
+/*
+ * Product specific probe and attach routines for:
+ * 27/284X and aic7770 motherboard SCSI controllers
+ *
+ * Copyright (c) 1994-1998, 2000, 2001 Justin T. Gibbs.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions, and the following disclaimer,
+ * without modification.
+ * 2. Redistributions in binary form must reproduce at minimum a disclaimer
+ * substantially similar to the "NO WARRANTY" disclaimer below
+ * ("Disclaimer") and any redistribution must be conditioned upon
+ * including a substantially similar Disclaimer requirement for further
+ * binary redistribution.
+ * 3. Neither the names of the above-listed copyright holders nor the names
+ * of any contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * Alternatively, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") version 2 as published by the Free
+ * Software Foundation.
+ *
+ * NO WARRANTY
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
+ * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
+ * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGES.
+ *
+ * $Id: //depot/aic7xxx/aic7xxx/aic7770.c#32 $
+ *
+ * $FreeBSD$
+ */
+
+#ifdef __linux__
+#include "aic7xxx_osm.h"
+#include "aic7xxx_inline.h"
+#include "aic7xxx_93cx6.h"
+#else
+#include <dev/aic7xxx/aic7xxx_osm.h>
+#include <dev/aic7xxx/aic7xxx_inline.h>
+#include <dev/aic7xxx/aic7xxx_93cx6.h>
+#endif
+
+#define ID_AIC7770 0x04907770
+#define ID_AHA_274x 0x04907771
+#define ID_AHA_284xB 0x04907756 /* BIOS enabled */
+#define ID_AHA_284x 0x04907757 /* BIOS disabled*/
+#define ID_OLV_274x 0x04907782 /* Olivetti OEM */
+#define ID_OLV_274xD 0x04907783 /* Olivetti OEM (Differential) */
+
+static int aic7770_chip_init(struct ahc_softc *ahc);
+static int aha2840_load_seeprom(struct ahc_softc *ahc);
+static ahc_device_setup_t ahc_aic7770_VL_setup;
+static ahc_device_setup_t ahc_aic7770_EISA_setup;
+static ahc_device_setup_t ahc_aic7770_setup;
+
+struct aic7770_identity aic7770_ident_table[] =
+{
+ {
+ ID_AHA_274x,
+ 0xFFFFFFFF,
+ "Adaptec 274X SCSI adapter",
+ ahc_aic7770_EISA_setup
+ },
+ {
+ ID_AHA_284xB,
+ 0xFFFFFFFE,
+ "Adaptec 284X SCSI adapter",
+ ahc_aic7770_VL_setup
+ },
+ {
+ ID_AHA_284x,
+ 0xFFFFFFFE,
+ "Adaptec 284X SCSI adapter (BIOS Disabled)",
+ ahc_aic7770_VL_setup
+ },
+ {
+ ID_OLV_274x,
+ 0xFFFFFFFF,
+ "Adaptec (Olivetti OEM) 274X SCSI adapter",
+ ahc_aic7770_EISA_setup
+ },
+ {
+ ID_OLV_274xD,
+ 0xFFFFFFFF,
+ "Adaptec (Olivetti OEM) 274X Differential SCSI adapter",
+ ahc_aic7770_EISA_setup
+ },
+ /* Generic chip probes for devices we don't know 'exactly' */
+ {
+ ID_AIC7770,
+ 0xFFFFFFFF,
+ "Adaptec aic7770 SCSI adapter",
+ ahc_aic7770_EISA_setup
+ }
+};
+const int ahc_num_aic7770_devs = ARRAY_SIZE(aic7770_ident_table);
+
+struct aic7770_identity *
+aic7770_find_device(uint32_t id)
+{
+ struct aic7770_identity *entry;
+ int i;
+
+ for (i = 0; i < ahc_num_aic7770_devs; i++) {
+ entry = &aic7770_ident_table[i];
+ if (entry->full_id == (id & entry->id_mask))
+ return (entry);
+ }
+ return (NULL);
+}
+
+int
+aic7770_config(struct ahc_softc *ahc, struct aic7770_identity *entry, u_int io)
+{
+ int error;
+ int have_seeprom;
+ u_int hostconf;
+ u_int irq;
+ u_int intdef;
+
+ error = entry->setup(ahc);
+ have_seeprom = 0;
+ if (error != 0)
+ return (error);
+
+ error = aic7770_map_registers(ahc, io);
+ if (error != 0)
+ return (error);
+
+ /*
+ * Before we continue probing the card, ensure that
+ * its interrupts are *disabled*. We don't want
+ * a misstep to hang the machine in an interrupt
+ * storm.
+ */
+ ahc_intr_enable(ahc, FALSE);
+
+ ahc->description = entry->name;
+ error = ahc_softc_init(ahc);
+ if (error != 0)
+ return (error);
+
+ ahc->bus_chip_init = aic7770_chip_init;
+
+ error = ahc_reset(ahc, /*reinit*/FALSE);
+ if (error != 0)
+ return (error);
+
+ /* Make sure we have a valid interrupt vector */
+ intdef = ahc_inb(ahc, INTDEF);
+ irq = intdef & VECTOR;
+ switch (irq) {
+ case 9:
+ case 10:
+ case 11:
+ case 12:
+ case 14:
+ case 15:
+ break;
+ default:
+ printk("aic7770_config: invalid irq setting %d\n", intdef);
+ return (ENXIO);
+ }
+
+ if ((intdef & EDGE_TRIG) != 0)
+ ahc->flags |= AHC_EDGE_INTERRUPT;
+
+ switch (ahc->chip & (AHC_EISA|AHC_VL)) {
+ case AHC_EISA:
+ {
+ u_int biosctrl;
+ u_int scsiconf;
+ u_int scsiconf1;
+
+ biosctrl = ahc_inb(ahc, HA_274_BIOSCTRL);
+ scsiconf = ahc_inb(ahc, SCSICONF);
+ scsiconf1 = ahc_inb(ahc, SCSICONF + 1);
+
+ /* Get the primary channel information */
+ if ((biosctrl & CHANNEL_B_PRIMARY) != 0)
+ ahc->flags |= 1;
+
+ if ((biosctrl & BIOSMODE) == BIOSDISABLED) {
+ ahc->flags |= AHC_USEDEFAULTS;
+ } else {
+ if ((ahc->features & AHC_WIDE) != 0) {
+ ahc->our_id = scsiconf1 & HWSCSIID;
+ if (scsiconf & TERM_ENB)
+ ahc->flags |= AHC_TERM_ENB_A;
+ } else {
+ ahc->our_id = scsiconf & HSCSIID;
+ ahc->our_id_b = scsiconf1 & HSCSIID;
+ if (scsiconf & TERM_ENB)
+ ahc->flags |= AHC_TERM_ENB_A;
+ if (scsiconf1 & TERM_ENB)
+ ahc->flags |= AHC_TERM_ENB_B;
+ }
+ }
+ if ((ahc_inb(ahc, HA_274_BIOSGLOBAL) & HA_274_EXTENDED_TRANS))
+ ahc->flags |= AHC_EXTENDED_TRANS_A|AHC_EXTENDED_TRANS_B;
+ break;
+ }
+ case AHC_VL:
+ {
+ have_seeprom = aha2840_load_seeprom(ahc);
+ break;
+ }
+ default:
+ break;
+ }
+ if (have_seeprom == 0) {
+ kfree(ahc->seep_config);
+ ahc->seep_config = NULL;
+ }
+
+ /*
+ * Ensure autoflush is enabled
+ */
+ ahc_outb(ahc, SBLKCTL, ahc_inb(ahc, SBLKCTL) & ~AUTOFLUSHDIS);
+
+ /* Setup the FIFO threshold and the bus off time */
+ hostconf = ahc_inb(ahc, HOSTCONF);
+ ahc_outb(ahc, BUSSPD, hostconf & DFTHRSH);
+ ahc_outb(ahc, BUSTIME, (hostconf << 2) & BOFF);
+
+ ahc->bus_softc.aic7770_softc.busspd = hostconf & DFTHRSH;
+ ahc->bus_softc.aic7770_softc.bustime = (hostconf << 2) & BOFF;
+
+ /*
+ * Generic aic7xxx initialization.
+ */
+ error = ahc_init(ahc);
+ if (error != 0)
+ return (error);
+
+ error = aic7770_map_int(ahc, irq);
+ if (error != 0)
+ return (error);
+
+ ahc->init_level++;
+
+ /*
+ * Enable the board's BUS drivers
+ */
+ ahc_outb(ahc, BCTL, ENABLE);
+ return (0);
+}
+
+static int
+aic7770_chip_init(struct ahc_softc *ahc)
+{
+ ahc_outb(ahc, BUSSPD, ahc->bus_softc.aic7770_softc.busspd);
+ ahc_outb(ahc, BUSTIME, ahc->bus_softc.aic7770_softc.bustime);
+ ahc_outb(ahc, SBLKCTL, ahc_inb(ahc, SBLKCTL) & ~AUTOFLUSHDIS);
+ ahc_outb(ahc, BCTL, ENABLE);
+ return (ahc_chip_init(ahc));
+}
+
+/*
+ * Read the 284x SEEPROM.
+ */
+static int
+aha2840_load_seeprom(struct ahc_softc *ahc)
+{
+ struct seeprom_descriptor sd;
+ struct seeprom_config *sc;
+ int have_seeprom;
+ uint8_t scsi_conf;
+
+ sd.sd_ahc = ahc;
+ sd.sd_control_offset = SEECTL_2840;
+ sd.sd_status_offset = STATUS_2840;
+ sd.sd_dataout_offset = STATUS_2840;
+ sd.sd_chip = C46;
+ sd.sd_MS = 0;
+ sd.sd_RDY = EEPROM_TF;
+ sd.sd_CS = CS_2840;
+ sd.sd_CK = CK_2840;
+ sd.sd_DO = DO_2840;
+ sd.sd_DI = DI_2840;
+ sc = ahc->seep_config;
+
+ if (bootverbose)
+ printk("%s: Reading SEEPROM...", ahc_name(ahc));
+ have_seeprom = ahc_read_seeprom(&sd, (uint16_t *)sc,
+ /*start_addr*/0, sizeof(*sc)/2);
+
+ if (have_seeprom) {
+
+ if (ahc_verify_cksum(sc) == 0) {
+ if(bootverbose)
+ printk ("checksum error\n");
+ have_seeprom = 0;
+ } else if (bootverbose) {
+ printk("done.\n");
+ }
+ }
+
+ if (!have_seeprom) {
+ if (bootverbose)
+ printk("%s: No SEEPROM available\n", ahc_name(ahc));
+ ahc->flags |= AHC_USEDEFAULTS;
+ } else {
+ /*
+ * Put the data we've collected down into SRAM
+ * where ahc_init will find it.
+ */
+ int i;
+ int max_targ;
+ uint16_t discenable;
+
+ max_targ = (ahc->features & AHC_WIDE) != 0 ? 16 : 8;
+ discenable = 0;
+ for (i = 0; i < max_targ; i++){
+ uint8_t target_settings;
+
+ target_settings = (sc->device_flags[i] & CFXFER) << 4;
+ if (sc->device_flags[i] & CFSYNCH)
+ target_settings |= SOFS;
+ if (sc->device_flags[i] & CFWIDEB)
+ target_settings |= WIDEXFER;
+ if (sc->device_flags[i] & CFDISC)
+ discenable |= (0x01 << i);
+ ahc_outb(ahc, TARG_SCSIRATE + i, target_settings);
+ }
+ ahc_outb(ahc, DISC_DSB, ~(discenable & 0xff));
+ ahc_outb(ahc, DISC_DSB + 1, ~((discenable >> 8) & 0xff));
+
+ ahc->our_id = sc->brtime_id & CFSCSIID;
+
+ scsi_conf = (ahc->our_id & 0x7);
+ if (sc->adapter_control & CFSPARITY)
+ scsi_conf |= ENSPCHK;
+ if (sc->adapter_control & CFRESETB)
+ scsi_conf |= RESET_SCSI;
+
+ if (sc->bios_control & CF284XEXTEND)
+ ahc->flags |= AHC_EXTENDED_TRANS_A;
+ /* Set SCSICONF info */
+ ahc_outb(ahc, SCSICONF, scsi_conf);
+
+ if (sc->adapter_control & CF284XSTERM)
+ ahc->flags |= AHC_TERM_ENB_A;
+ }
+ return (have_seeprom);
+}
+
+static int
+ahc_aic7770_VL_setup(struct ahc_softc *ahc)
+{
+ int error;
+
+ error = ahc_aic7770_setup(ahc);
+ ahc->chip |= AHC_VL;
+ return (error);
+}
+
+static int
+ahc_aic7770_EISA_setup(struct ahc_softc *ahc)
+{
+ int error;
+
+ error = ahc_aic7770_setup(ahc);
+ ahc->chip |= AHC_EISA;
+ return (error);
+}
+
+static int
+ahc_aic7770_setup(struct ahc_softc *ahc)
+{
+ ahc->channel = 'A';
+ ahc->channel_b = 'B';
+ ahc->chip = AHC_AIC7770;
+ ahc->features = AHC_AIC7770_FE;
+ ahc->bugs |= AHC_TMODE_WIDEODD_BUG;
+ ahc->flags |= AHC_PAGESCBS;
+ ahc->instruction_ram_size = 448;
+ return (0);
+}
diff --git a/drivers/scsi/aic7xxx/aic7770_osm.c b/drivers/scsi/aic7xxx/aic7770_osm.c
new file mode 100644
index 000000000..3d401d02c
--- /dev/null
+++ b/drivers/scsi/aic7xxx/aic7770_osm.c
@@ -0,0 +1,155 @@
+/*
+ * Linux driver attachment glue for aic7770 based controllers.
+ *
+ * Copyright (c) 2000-2003 Adaptec Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions, and the following disclaimer,
+ * without modification.
+ * 2. Redistributions in binary form must reproduce at minimum a disclaimer
+ * substantially similar to the "NO WARRANTY" disclaimer below
+ * ("Disclaimer") and any redistribution must be conditioned upon
+ * including a substantially similar Disclaimer requirement for further
+ * binary redistribution.
+ * 3. Neither the names of the above-listed copyright holders nor the names
+ * of any contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * Alternatively, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") version 2 as published by the Free
+ * Software Foundation.
+ *
+ * NO WARRANTY
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
+ * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
+ * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGES.
+ *
+ * $Id: //depot/aic7xxx/linux/drivers/scsi/aic7xxx/aic7770_osm.c#14 $
+ */
+
+#include "aic7xxx_osm.h"
+
+#include <linux/device.h>
+#include <linux/eisa.h>
+
+int
+aic7770_map_registers(struct ahc_softc *ahc, u_int port)
+{
+ /*
+ * Lock out other contenders for our i/o space.
+ */
+ if (!request_region(port, AHC_EISA_IOSIZE, "aic7xxx"))
+ return (ENOMEM);
+ ahc->tag = BUS_SPACE_PIO;
+ ahc->bsh.ioport = port;
+ return (0);
+}
+
+int
+aic7770_map_int(struct ahc_softc *ahc, u_int irq)
+{
+ int error;
+ int shared;
+
+ shared = 0;
+ if ((ahc->flags & AHC_EDGE_INTERRUPT) == 0)
+ shared = IRQF_SHARED;
+
+ error = request_irq(irq, ahc_linux_isr, shared, "aic7xxx", ahc);
+ if (error == 0)
+ ahc->platform_data->irq = irq;
+
+ return (-error);
+}
+
+static int
+aic7770_probe(struct device *dev)
+{
+ struct eisa_device *edev = to_eisa_device(dev);
+ u_int eisaBase = edev->base_addr+AHC_EISA_SLOT_OFFSET;
+ struct ahc_softc *ahc;
+ char buf[80];
+ char *name;
+ int error;
+
+ sprintf(buf, "ahc_eisa:%d", eisaBase >> 12);
+ name = kstrdup(buf, GFP_ATOMIC);
+ if (name == NULL)
+ return (ENOMEM);
+ ahc = ahc_alloc(&aic7xxx_driver_template, name);
+ if (ahc == NULL)
+ return (ENOMEM);
+ error = aic7770_config(ahc, aic7770_ident_table + edev->id.driver_data,
+ eisaBase);
+ if (error != 0) {
+ ahc->bsh.ioport = 0;
+ ahc_free(ahc);
+ return (error);
+ }
+
+ dev_set_drvdata(dev, ahc);
+
+ error = ahc_linux_register_host(ahc, &aic7xxx_driver_template);
+ return (error);
+}
+
+static int
+aic7770_remove(struct device *dev)
+{
+ struct ahc_softc *ahc = dev_get_drvdata(dev);
+ u_long s;
+
+ if (ahc->platform_data && ahc->platform_data->host)
+ scsi_remove_host(ahc->platform_data->host);
+
+ ahc_lock(ahc, &s);
+ ahc_intr_enable(ahc, FALSE);
+ ahc_unlock(ahc, &s);
+
+ ahc_free(ahc);
+ return 0;
+}
+
+static struct eisa_device_id aic7770_ids[] = {
+ { "ADP7771", 0 }, /* AHA 274x */
+ { "ADP7756", 1 }, /* AHA 284x BIOS enabled */
+ { "ADP7757", 2 }, /* AHA 284x BIOS disabled */
+ { "ADP7782", 3 }, /* AHA 274x Olivetti OEM */
+ { "ADP7783", 4 }, /* AHA 274x Olivetti OEM (Differential) */
+ { "ADP7770", 5 }, /* AIC7770 generic */
+ { "" }
+};
+MODULE_DEVICE_TABLE(eisa, aic7770_ids);
+
+static struct eisa_driver aic7770_driver = {
+ .id_table = aic7770_ids,
+ .driver = {
+ .name = "aic7xxx",
+ .probe = aic7770_probe,
+ .remove = aic7770_remove,
+ }
+};
+
+int
+ahc_linux_eisa_init(void)
+{
+ return eisa_driver_register(&aic7770_driver);
+}
+
+void
+ahc_linux_eisa_exit(void)
+{
+ eisa_driver_unregister(&aic7770_driver);
+}
diff --git a/drivers/scsi/aic7xxx/aic79xx.h b/drivers/scsi/aic7xxx/aic79xx.h
new file mode 100644
index 000000000..df2e0e536
--- /dev/null
+++ b/drivers/scsi/aic7xxx/aic79xx.h
@@ -0,0 +1,1478 @@
+/*
+ * Core definitions and data structures shareable across OS platforms.
+ *
+ * Copyright (c) 1994-2002 Justin T. Gibbs.
+ * Copyright (c) 2000-2002 Adaptec Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions, and the following disclaimer,
+ * without modification.
+ * 2. Redistributions in binary form must reproduce at minimum a disclaimer
+ * substantially similar to the "NO WARRANTY" disclaimer below
+ * ("Disclaimer") and any redistribution must be conditioned upon
+ * including a substantially similar Disclaimer requirement for further
+ * binary redistribution.
+ * 3. Neither the names of the above-listed copyright holders nor the names
+ * of any contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * Alternatively, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") version 2 as published by the Free
+ * Software Foundation.
+ *
+ * NO WARRANTY
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
+ * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
+ * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGES.
+ *
+ * $Id: //depot/aic7xxx/aic7xxx/aic79xx.h#109 $
+ *
+ * $FreeBSD$
+ */
+
+#ifndef _AIC79XX_H_
+#define _AIC79XX_H_
+
+/* Register Definitions */
+#include "aic79xx_reg.h"
+
+/************************* Forward Declarations *******************************/
+struct ahd_platform_data;
+struct scb_platform_data;
+
+/****************************** Useful Macros *********************************/
+#ifndef TRUE
+#define TRUE 1
+#endif
+#ifndef FALSE
+#define FALSE 0
+#endif
+
+#define ALL_CHANNELS '\0'
+#define ALL_TARGETS_MASK 0xFFFF
+#define INITIATOR_WILDCARD (~0)
+#define SCB_LIST_NULL 0xFF00
+#define SCB_LIST_NULL_LE (ahd_htole16(SCB_LIST_NULL))
+#define QOUTFIFO_ENTRY_VALID 0x80
+#define SCBID_IS_NULL(scbid) (((scbid) & 0xFF00 ) == SCB_LIST_NULL)
+
+#define SCSIID_TARGET(ahd, scsiid) \
+ (((scsiid) & TID) >> TID_SHIFT)
+#define SCSIID_OUR_ID(scsiid) \
+ ((scsiid) & OID)
+#define SCSIID_CHANNEL(ahd, scsiid) ('A')
+#define SCB_IS_SCSIBUS_B(ahd, scb) (0)
+#define SCB_GET_OUR_ID(scb) \
+ SCSIID_OUR_ID((scb)->hscb->scsiid)
+#define SCB_GET_TARGET(ahd, scb) \
+ SCSIID_TARGET((ahd), (scb)->hscb->scsiid)
+#define SCB_GET_CHANNEL(ahd, scb) \
+ SCSIID_CHANNEL(ahd, (scb)->hscb->scsiid)
+#define SCB_GET_LUN(scb) \
+ ((scb)->hscb->lun)
+#define SCB_GET_TARGET_OFFSET(ahd, scb) \
+ SCB_GET_TARGET(ahd, scb)
+#define SCB_GET_TARGET_MASK(ahd, scb) \
+ (0x01 << (SCB_GET_TARGET_OFFSET(ahd, scb)))
+#ifdef AHD_DEBUG
+#define SCB_IS_SILENT(scb) \
+ ((ahd_debug & AHD_SHOW_MASKED_ERRORS) == 0 \
+ && (((scb)->flags & SCB_SILENT) != 0))
+#else
+#define SCB_IS_SILENT(scb) \
+ (((scb)->flags & SCB_SILENT) != 0)
+#endif
+/*
+ * TCLs have the following format: TTTTLLLLLLLL
+ */
+#define TCL_TARGET_OFFSET(tcl) \
+ ((((tcl) >> 4) & TID) >> 4)
+#define TCL_LUN(tcl) \
+ (tcl & (AHD_NUM_LUNS - 1))
+#define BUILD_TCL(scsiid, lun) \
+ ((lun) | (((scsiid) & TID) << 4))
+#define BUILD_TCL_RAW(target, channel, lun) \
+ ((lun) | ((target) << 8))
+
+#define SCB_GET_TAG(scb) \
+ ahd_le16toh(scb->hscb->tag)
+
+#ifndef AHD_TARGET_MODE
+#undef AHD_TMODE_ENABLE
+#define AHD_TMODE_ENABLE 0
+#endif
+
+#define AHD_BUILD_COL_IDX(target, lun) \
+ ((((u8)lun) << 4) | target)
+
+#define AHD_GET_SCB_COL_IDX(ahd, scb) \
+ ((SCB_GET_LUN(scb) << 4) | SCB_GET_TARGET(ahd, scb))
+
+#define AHD_SET_SCB_COL_IDX(scb, col_idx) \
+do { \
+ (scb)->hscb->scsiid = ((col_idx) << TID_SHIFT) & TID; \
+ (scb)->hscb->lun = ((col_idx) >> 4) & (AHD_NUM_LUNS_NONPKT-1); \
+} while (0)
+
+#define AHD_COPY_SCB_COL_IDX(dst, src) \
+do { \
+ dst->hscb->scsiid = src->hscb->scsiid; \
+ dst->hscb->lun = src->hscb->lun; \
+} while (0)
+
+#define AHD_NEVER_COL_IDX 0xFFFF
+
+/**************************** Driver Constants ********************************/
+/*
+ * The maximum number of supported targets.
+ */
+#define AHD_NUM_TARGETS 16
+
+/*
+ * The maximum number of supported luns.
+ * The identify message only supports 64 luns in non-packetized transfers.
+ * You can have 2^64 luns when information unit transfers are enabled,
+ * but until we see a need to support that many, we support 256.
+ */
+#define AHD_NUM_LUNS_NONPKT 64
+#define AHD_NUM_LUNS 256
+
+/*
+ * The maximum transfer per S/G segment.
+ */
+#define AHD_MAXTRANSFER_SIZE 0x00ffffff /* limited by 24bit counter */
+
+/*
+ * The maximum amount of SCB storage in hardware on a controller.
+ * This value represents an upper bound. Due to software design,
+ * we may not be able to use this number.
+ */
+#define AHD_SCB_MAX 512
+
+/*
+ * The maximum number of concurrent transactions supported per driver instance.
+ * Sequencer Control Blocks (SCBs) store per-transaction information.
+ */
+#define AHD_MAX_QUEUE AHD_SCB_MAX
+
+/*
+ * Define the size of our QIN and QOUT FIFOs. They must be a power of 2
+ * in size and accommodate as many transactions as can be queued concurrently.
+ */
+#define AHD_QIN_SIZE AHD_MAX_QUEUE
+#define AHD_QOUT_SIZE AHD_MAX_QUEUE
+
+#define AHD_QIN_WRAP(x) ((x) & (AHD_QIN_SIZE-1))
+/*
+ * The maximum amount of SCB storage we allocate in host memory.
+ */
+#define AHD_SCB_MAX_ALLOC AHD_MAX_QUEUE
+
+/*
+ * Ring Buffer of incoming target commands.
+ * We allocate 256 to simplify the logic in the sequencer
+ * by using the natural wrap point of an 8bit counter.
+ */
+#define AHD_TMODE_CMDS 256
+
+/* Reset line assertion time in us */
+#define AHD_BUSRESET_DELAY 25
+
+/******************* Chip Characteristics/Operating Settings *****************/
+/*
+ * Chip Type
+ * The chip order is from least sophisticated to most sophisticated.
+ */
+typedef enum {
+ AHD_NONE = 0x0000,
+ AHD_CHIPID_MASK = 0x00FF,
+ AHD_AIC7901 = 0x0001,
+ AHD_AIC7902 = 0x0002,
+ AHD_AIC7901A = 0x0003,
+ AHD_PCI = 0x0100, /* Bus type PCI */
+ AHD_PCIX = 0x0200, /* Bus type PCIX */
+ AHD_BUS_MASK = 0x0F00
+} ahd_chip;
+
+/*
+ * Features available in each chip type.
+ */
+typedef enum {
+ AHD_FENONE = 0x00000,
+ AHD_WIDE = 0x00001,/* Wide Channel */
+ AHD_AIC79XXB_SLOWCRC = 0x00002,/* SLOWCRC bit should be set */
+ AHD_MULTI_FUNC = 0x00100,/* Multi-Function/Channel Device */
+ AHD_TARGETMODE = 0x01000,/* Has tested target mode support */
+ AHD_MULTIROLE = 0x02000,/* Space for two roles at a time */
+ AHD_RTI = 0x04000,/* Retained Training Support */
+ AHD_NEW_IOCELL_OPTS = 0x08000,/* More Signal knobs in the IOCELL */
+ AHD_NEW_DFCNTRL_OPTS = 0x10000,/* SCSIENWRDIS bit */
+ AHD_FAST_CDB_DELIVERY = 0x20000,/* CDB acks released to Output Sync */
+ AHD_REMOVABLE = 0x00000,/* Hot-Swap supported - None so far*/
+ AHD_AIC7901_FE = AHD_FENONE,
+ AHD_AIC7901A_FE = AHD_FENONE,
+ AHD_AIC7902_FE = AHD_MULTI_FUNC
+} ahd_feature;
+
+/*
+ * Bugs in the silicon that we work around in software.
+ */
+typedef enum {
+ AHD_BUGNONE = 0x0000,
+ /*
+ * Rev A hardware fails to update LAST/CURR/NEXTSCB
+ * correctly in certain packetized selection cases.
+ */
+ AHD_SENT_SCB_UPDATE_BUG = 0x0001,
+ /* The wrong SCB is accessed to check the abort pending bit. */
+ AHD_ABORT_LQI_BUG = 0x0002,
+ /* Packetized bitbucket crosses packet boundaries. */
+ AHD_PKT_BITBUCKET_BUG = 0x0004,
+ /* The selection timer runs twice as long as its setting. */
+ AHD_LONG_SETIMO_BUG = 0x0008,
+ /* The Non-LQ CRC error status is delayed until phase change. */
+ AHD_NLQICRC_DELAYED_BUG = 0x0010,
+ /* The chip must be reset for all outgoing bus resets. */
+ AHD_SCSIRST_BUG = 0x0020,
+ /* Some PCIX fields must be saved and restored across chip reset. */
+ AHD_PCIX_CHIPRST_BUG = 0x0040,
+ /* MMAPIO is not functional in PCI-X mode. */
+ AHD_PCIX_MMAPIO_BUG = 0x0080,
+ /* Reads to SCBRAM fail to reset the discard timer. */
+ AHD_PCIX_SCBRAM_RD_BUG = 0x0100,
+ /* Bug workarounds that can be disabled on non-PCIX busses. */
+ AHD_PCIX_BUG_MASK = AHD_PCIX_CHIPRST_BUG
+ | AHD_PCIX_MMAPIO_BUG
+ | AHD_PCIX_SCBRAM_RD_BUG,
+ /*
+ * LQOSTOP0 status set even for forced selections with ATN
+ * to perform non-packetized message delivery.
+ */
+ AHD_LQO_ATNO_BUG = 0x0200,
+ /* FIFO auto-flush does not always trigger. */
+ AHD_AUTOFLUSH_BUG = 0x0400,
+ /* The CLRLQO registers are not self-clearing. */
+ AHD_CLRLQO_AUTOCLR_BUG = 0x0800,
+ /* The PACKETIZED status bit refers to the previous connection. */
+ AHD_PKTIZED_STATUS_BUG = 0x1000,
+ /* "Short Luns" are not placed into outgoing LQ packets correctly. */
+ AHD_PKT_LUN_BUG = 0x2000,
+ /*
+ * Only the FIFO allocated to the non-packetized connection may
+ * be in use during a non-packetzied connection.
+ */
+ AHD_NONPACKFIFO_BUG = 0x4000,
+ /*
+ * Writing to a DFF SCBPTR register may fail if concurent with
+ * a hardware write to the other DFF SCBPTR register. This is
+ * not currently a concern in our sequencer since all chips with
+ * this bug have the AHD_NONPACKFIFO_BUG and all writes of concern
+ * occur in non-packetized connections.
+ */
+ AHD_MDFF_WSCBPTR_BUG = 0x8000,
+ /* SGHADDR updates are slow. */
+ AHD_REG_SLOW_SETTLE_BUG = 0x10000,
+ /*
+ * Changing the MODE_PTR coincident with an interrupt that
+ * switches to a different mode will cause the interrupt to
+ * be in the mode written outside of interrupt context.
+ */
+ AHD_SET_MODE_BUG = 0x20000,
+ /* Non-packetized busfree revision does not work. */
+ AHD_BUSFREEREV_BUG = 0x40000,
+ /*
+ * Paced transfers are indicated with a non-standard PPR
+ * option bit in the neg table, 160MHz is indicated by
+ * sync factor 0x7, and the offset if off by a factor of 2.
+ */
+ AHD_PACED_NEGTABLE_BUG = 0x80000,
+ /* LQOOVERRUN false positives. */
+ AHD_LQOOVERRUN_BUG = 0x100000,
+ /*
+ * Controller write to INTSTAT will lose to a host
+ * write to CLRINT.
+ */
+ AHD_INTCOLLISION_BUG = 0x200000,
+ /*
+ * The GEM318 violates the SCSI spec by not waiting
+ * the mandated bus settle delay between phase changes
+ * in some situations. Some aic79xx chip revs. are more
+ * strict in this regard and will treat REQ assertions
+ * that fall within the bus settle delay window as
+ * glitches. This flag tells the firmware to tolerate
+ * early REQ assertions.
+ */
+ AHD_EARLY_REQ_BUG = 0x400000,
+ /*
+ * The LED does not stay on long enough in packetized modes.
+ */
+ AHD_FAINT_LED_BUG = 0x800000
+} ahd_bug;
+
+/*
+ * Configuration specific settings.
+ * The driver determines these settings by probing the
+ * chip/controller's configuration.
+ */
+typedef enum {
+ AHD_FNONE = 0x00000,
+ AHD_BOOT_CHANNEL = 0x00001,/* We were set as the boot channel. */
+ AHD_USEDEFAULTS = 0x00004,/*
+ * For cards without an seeprom
+ * or a BIOS to initialize the chip's
+ * SRAM, we use the default target
+ * settings.
+ */
+ AHD_SEQUENCER_DEBUG = 0x00008,
+ AHD_RESET_BUS_A = 0x00010,
+ AHD_EXTENDED_TRANS_A = 0x00020,
+ AHD_TERM_ENB_A = 0x00040,
+ AHD_SPCHK_ENB_A = 0x00080,
+ AHD_STPWLEVEL_A = 0x00100,
+ AHD_INITIATORROLE = 0x00200,/*
+ * Allow initiator operations on
+ * this controller.
+ */
+ AHD_TARGETROLE = 0x00400,/*
+ * Allow target operations on this
+ * controller.
+ */
+ AHD_RESOURCE_SHORTAGE = 0x00800,
+ AHD_TQINFIFO_BLOCKED = 0x01000,/* Blocked waiting for ATIOs */
+ AHD_INT50_SPEEDFLEX = 0x02000,/*
+ * Internal 50pin connector
+ * sits behind an aic3860
+ */
+ AHD_BIOS_ENABLED = 0x04000,
+ AHD_ALL_INTERRUPTS = 0x08000,
+ AHD_39BIT_ADDRESSING = 0x10000,/* Use 39 bit addressing scheme. */
+ AHD_64BIT_ADDRESSING = 0x20000,/* Use 64 bit addressing scheme. */
+ AHD_CURRENT_SENSING = 0x40000,
+ AHD_SCB_CONFIG_USED = 0x80000,/* No SEEPROM but SCB had info. */
+ AHD_HP_BOARD = 0x100000,
+ AHD_BUS_RESET_ACTIVE = 0x200000,
+ AHD_UPDATE_PEND_CMDS = 0x400000,
+ AHD_RUNNING_QOUTFIFO = 0x800000,
+ AHD_HAD_FIRST_SEL = 0x1000000
+} ahd_flag;
+
+/************************* Hardware SCB Definition ***************************/
+
+/*
+ * The driver keeps up to MAX_SCB scb structures per card in memory. The SCB
+ * consists of a "hardware SCB" mirroring the fields available on the card
+ * and additional information the kernel stores for each transaction.
+ *
+ * To minimize space utilization, a portion of the hardware scb stores
+ * different data during different portions of a SCSI transaction.
+ * As initialized by the host driver for the initiator role, this area
+ * contains the SCSI cdb (or a pointer to the cdb) to be executed. After
+ * the cdb has been presented to the target, this area serves to store
+ * residual transfer information and the SCSI status byte.
+ * For the target role, the contents of this area do not change, but
+ * still serve a different purpose than for the initiator role. See
+ * struct target_data for details.
+ */
+
+/*
+ * Status information embedded in the shared poriton of
+ * an SCB after passing the cdb to the target. The kernel
+ * driver will only read this data for transactions that
+ * complete abnormally.
+ */
+struct initiator_status {
+ uint32_t residual_datacnt; /* Residual in the current S/G seg */
+ uint32_t residual_sgptr; /* The next S/G for this transfer */
+ uint8_t scsi_status; /* Standard SCSI status byte */
+};
+
+struct target_status {
+ uint32_t residual_datacnt; /* Residual in the current S/G seg */
+ uint32_t residual_sgptr; /* The next S/G for this transfer */
+ uint8_t scsi_status; /* SCSI status to give to initiator */
+ uint8_t target_phases; /* Bitmap of phases to execute */
+ uint8_t data_phase; /* Data-In or Data-Out */
+ uint8_t initiator_tag; /* Initiator's transaction tag */
+};
+
+/*
+ * Initiator mode SCB shared data area.
+ * If the embedded CDB is 12 bytes or less, we embed
+ * the sense buffer address in the SCB. This allows
+ * us to retrieve sense information without interrupting
+ * the host in packetized mode.
+ */
+typedef uint32_t sense_addr_t;
+#define MAX_CDB_LEN 16
+#define MAX_CDB_LEN_WITH_SENSE_ADDR (MAX_CDB_LEN - sizeof(sense_addr_t))
+union initiator_data {
+ struct {
+ uint64_t cdbptr;
+ uint8_t cdblen;
+ } cdb_from_host;
+ uint8_t cdb[MAX_CDB_LEN];
+ struct {
+ uint8_t cdb[MAX_CDB_LEN_WITH_SENSE_ADDR];
+ sense_addr_t sense_addr;
+ } cdb_plus_saddr;
+};
+
+/*
+ * Target mode version of the shared data SCB segment.
+ */
+struct target_data {
+ uint32_t spare[2];
+ uint8_t scsi_status; /* SCSI status to give to initiator */
+ uint8_t target_phases; /* Bitmap of phases to execute */
+ uint8_t data_phase; /* Data-In or Data-Out */
+ uint8_t initiator_tag; /* Initiator's transaction tag */
+};
+
+struct hardware_scb {
+/*0*/ union {
+ union initiator_data idata;
+ struct target_data tdata;
+ struct initiator_status istatus;
+ struct target_status tstatus;
+ } shared_data;
+/*
+ * A word about residuals.
+ * The scb is presented to the sequencer with the dataptr and datacnt
+ * fields initialized to the contents of the first S/G element to
+ * transfer. The sgptr field is initialized to the bus address for
+ * the S/G element that follows the first in the in core S/G array
+ * or'ed with the SG_FULL_RESID flag. Sgptr may point to an invalid
+ * S/G entry for this transfer (single S/G element transfer with the
+ * first elements address and length preloaded in the dataptr/datacnt
+ * fields). If no transfer is to occur, sgptr is set to SG_LIST_NULL.
+ * The SG_FULL_RESID flag ensures that the residual will be correctly
+ * noted even if no data transfers occur. Once the data phase is entered,
+ * the residual sgptr and datacnt are loaded from the sgptr and the
+ * datacnt fields. After each S/G element's dataptr and length are
+ * loaded into the hardware, the residual sgptr is advanced. After
+ * each S/G element is expired, its datacnt field is checked to see
+ * if the LAST_SEG flag is set. If so, SG_LIST_NULL is set in the
+ * residual sg ptr and the transfer is considered complete. If the
+ * sequencer determines that there is a residual in the tranfer, or
+ * there is non-zero status, it will set the SG_STATUS_VALID flag in
+ * sgptr and dma the scb back into host memory. To sumarize:
+ *
+ * Sequencer:
+ * o A residual has occurred if SG_FULL_RESID is set in sgptr,
+ * or residual_sgptr does not have SG_LIST_NULL set.
+ *
+ * o We are transferring the last segment if residual_datacnt has
+ * the SG_LAST_SEG flag set.
+ *
+ * Host:
+ * o A residual can only have occurred if a completed scb has the
+ * SG_STATUS_VALID flag set. Inspection of the SCSI status field,
+ * the residual_datacnt, and the residual_sgptr field will tell
+ * for sure.
+ *
+ * o residual_sgptr and sgptr refer to the "next" sg entry
+ * and so may point beyond the last valid sg entry for the
+ * transfer.
+ */
+#define SG_PTR_MASK 0xFFFFFFF8
+/*16*/ uint16_t tag; /* Reused by Sequencer. */
+/*18*/ uint8_t control; /* See SCB_CONTROL in aic79xx.reg for details */
+/*19*/ uint8_t scsiid; /*
+ * Selection out Id
+ * Our Id (bits 0-3) Their ID (bits 4-7)
+ */
+/*20*/ uint8_t lun;
+/*21*/ uint8_t task_attribute;
+/*22*/ uint8_t cdb_len;
+/*23*/ uint8_t task_management;
+/*24*/ uint64_t dataptr;
+/*32*/ uint32_t datacnt; /* Byte 3 is spare. */
+/*36*/ uint32_t sgptr;
+/*40*/ uint32_t hscb_busaddr;
+/*44*/ uint32_t next_hscb_busaddr;
+/********** Long lun field only downloaded for full 8 byte lun support ********/
+/*48*/ uint8_t pkt_long_lun[8];
+/******* Fields below are not Downloaded (Sequencer may use for scratch) ******/
+/*56*/ uint8_t spare[8];
+};
+
+/************************ Kernel SCB Definitions ******************************/
+/*
+ * Some fields of the SCB are OS dependent. Here we collect the
+ * definitions for elements that all OS platforms need to include
+ * in there SCB definition.
+ */
+
+/*
+ * Definition of a scatter/gather element as transferred to the controller.
+ * The aic7xxx chips only support a 24bit length. We use the top byte of
+ * the length to store additional address bits and a flag to indicate
+ * that a given segment terminates the transfer. This gives us an
+ * addressable range of 512GB on machines with 64bit PCI or with chips
+ * that can support dual address cycles on 32bit PCI busses.
+ */
+struct ahd_dma_seg {
+ uint32_t addr;
+ uint32_t len;
+#define AHD_DMA_LAST_SEG 0x80000000
+#define AHD_SG_HIGH_ADDR_MASK 0x7F000000
+#define AHD_SG_LEN_MASK 0x00FFFFFF
+};
+
+struct ahd_dma64_seg {
+ uint64_t addr;
+ uint32_t len;
+ uint32_t pad;
+};
+
+struct map_node {
+ bus_dmamap_t dmamap;
+ dma_addr_t physaddr;
+ uint8_t *vaddr;
+ SLIST_ENTRY(map_node) links;
+};
+
+/*
+ * The current state of this SCB.
+ */
+typedef enum {
+ SCB_FLAG_NONE = 0x00000,
+ SCB_TRANSMISSION_ERROR = 0x00001,/*
+ * We detected a parity or CRC
+ * error that has effected the
+ * payload of the command. This
+ * flag is checked when normal
+ * status is returned to catch
+ * the case of a target not
+ * responding to our attempt
+ * to report the error.
+ */
+ SCB_OTHERTCL_TIMEOUT = 0x00002,/*
+ * Another device was active
+ * during the first timeout for
+ * this SCB so we gave ourselves
+ * an additional timeout period
+ * in case it was hogging the
+ * bus.
+ */
+ SCB_DEVICE_RESET = 0x00004,
+ SCB_SENSE = 0x00008,
+ SCB_CDB32_PTR = 0x00010,
+ SCB_RECOVERY_SCB = 0x00020,
+ SCB_AUTO_NEGOTIATE = 0x00040,/* Negotiate to achieve goal. */
+ SCB_NEGOTIATE = 0x00080,/* Negotiation forced for command. */
+ SCB_ABORT = 0x00100,
+ SCB_ACTIVE = 0x00200,
+ SCB_TARGET_IMMEDIATE = 0x00400,
+ SCB_PACKETIZED = 0x00800,
+ SCB_EXPECT_PPR_BUSFREE = 0x01000,
+ SCB_PKT_SENSE = 0x02000,
+ SCB_EXTERNAL_RESET = 0x04000,/* Device was reset externally */
+ SCB_ON_COL_LIST = 0x08000,
+ SCB_SILENT = 0x10000 /*
+ * Be quiet about transmission type
+ * errors. They are expected and we
+ * don't want to upset the user. This
+ * flag is typically used during DV.
+ */
+} scb_flag;
+
+struct scb {
+ struct hardware_scb *hscb;
+ union {
+ SLIST_ENTRY(scb) sle;
+ LIST_ENTRY(scb) le;
+ TAILQ_ENTRY(scb) tqe;
+ } links;
+ union {
+ SLIST_ENTRY(scb) sle;
+ LIST_ENTRY(scb) le;
+ TAILQ_ENTRY(scb) tqe;
+ } links2;
+#define pending_links links2.le
+#define collision_links links2.le
+ struct scb *col_scb;
+ ahd_io_ctx_t io_ctx;
+ struct ahd_softc *ahd_softc;
+ scb_flag flags;
+#ifndef __linux__
+ bus_dmamap_t dmamap;
+#endif
+ struct scb_platform_data *platform_data;
+ struct map_node *hscb_map;
+ struct map_node *sg_map;
+ struct map_node *sense_map;
+ void *sg_list;
+ uint8_t *sense_data;
+ dma_addr_t sg_list_busaddr;
+ dma_addr_t sense_busaddr;
+ u_int sg_count;/* How full ahd_dma_seg is */
+#define AHD_MAX_LQ_CRC_ERRORS 5
+ u_int crc_retry_count;
+};
+
+TAILQ_HEAD(scb_tailq, scb);
+LIST_HEAD(scb_list, scb);
+
+struct scb_data {
+ /*
+ * TAILQ of lists of free SCBs grouped by device
+ * collision domains.
+ */
+ struct scb_tailq free_scbs;
+
+ /*
+ * Per-device lists of SCBs whose tag ID would collide
+ * with an already active tag on the device.
+ */
+ struct scb_list free_scb_lists[AHD_NUM_TARGETS * AHD_NUM_LUNS_NONPKT];
+
+ /*
+ * SCBs that will not collide with any active device.
+ */
+ struct scb_list any_dev_free_scb_list;
+
+ /*
+ * Mapping from tag to SCB.
+ */
+ struct scb *scbindex[AHD_SCB_MAX];
+
+ /*
+ * "Bus" addresses of our data structures.
+ */
+ bus_dma_tag_t hscb_dmat; /* dmat for our hardware SCB array */
+ bus_dma_tag_t sg_dmat; /* dmat for our sg segments */
+ bus_dma_tag_t sense_dmat; /* dmat for our sense buffers */
+ SLIST_HEAD(, map_node) hscb_maps;
+ SLIST_HEAD(, map_node) sg_maps;
+ SLIST_HEAD(, map_node) sense_maps;
+ int scbs_left; /* unallocated scbs in head map_node */
+ int sgs_left; /* unallocated sgs in head map_node */
+ int sense_left; /* unallocated sense in head map_node */
+ uint16_t numscbs;
+ uint16_t maxhscbs; /* Number of SCBs on the card */
+ uint8_t init_level; /*
+ * How far we've initialized
+ * this structure.
+ */
+};
+
+/************************ Target Mode Definitions *****************************/
+
+/*
+ * Connection descriptor for select-in requests in target mode.
+ */
+struct target_cmd {
+ uint8_t scsiid; /* Our ID and the initiator's ID */
+ uint8_t identify; /* Identify message */
+ uint8_t bytes[22]; /*
+ * Bytes contains any additional message
+ * bytes terminated by 0xFF. The remainder
+ * is the cdb to execute.
+ */
+ uint8_t cmd_valid; /*
+ * When a command is complete, the firmware
+ * will set cmd_valid to all bits set.
+ * After the host has seen the command,
+ * the bits are cleared. This allows us
+ * to just peek at host memory to determine
+ * if more work is complete. cmd_valid is on
+ * an 8 byte boundary to simplify setting
+ * it on aic7880 hardware which only has
+ * limited direct access to the DMA FIFO.
+ */
+ uint8_t pad[7];
+};
+
+/*
+ * Number of events we can buffer up if we run out
+ * of immediate notify ccbs.
+ */
+#define AHD_TMODE_EVENT_BUFFER_SIZE 8
+struct ahd_tmode_event {
+ uint8_t initiator_id;
+ uint8_t event_type; /* MSG type or EVENT_TYPE_BUS_RESET */
+#define EVENT_TYPE_BUS_RESET 0xFF
+ uint8_t event_arg;
+};
+
+/*
+ * Per enabled lun target mode state.
+ * As this state is directly influenced by the host OS'es target mode
+ * environment, we let the OS module define it. Forward declare the
+ * structure here so we can store arrays of them, etc. in OS neutral
+ * data structures.
+ */
+#ifdef AHD_TARGET_MODE
+struct ahd_tmode_lstate {
+ struct cam_path *path;
+ struct ccb_hdr_slist accept_tios;
+ struct ccb_hdr_slist immed_notifies;
+ struct ahd_tmode_event event_buffer[AHD_TMODE_EVENT_BUFFER_SIZE];
+ uint8_t event_r_idx;
+ uint8_t event_w_idx;
+};
+#else
+struct ahd_tmode_lstate;
+#endif
+
+/******************** Transfer Negotiation Datastructures *********************/
+#define AHD_TRANS_CUR 0x01 /* Modify current neogtiation status */
+#define AHD_TRANS_ACTIVE 0x03 /* Assume this target is on the bus */
+#define AHD_TRANS_GOAL 0x04 /* Modify negotiation goal */
+#define AHD_TRANS_USER 0x08 /* Modify user negotiation settings */
+#define AHD_PERIOD_10MHz 0x19
+
+#define AHD_WIDTH_UNKNOWN 0xFF
+#define AHD_PERIOD_UNKNOWN 0xFF
+#define AHD_OFFSET_UNKNOWN 0xFF
+#define AHD_PPR_OPTS_UNKNOWN 0xFF
+
+/*
+ * Transfer Negotiation Information.
+ */
+struct ahd_transinfo {
+ uint8_t protocol_version; /* SCSI Revision level */
+ uint8_t transport_version; /* SPI Revision level */
+ uint8_t width; /* Bus width */
+ uint8_t period; /* Sync rate factor */
+ uint8_t offset; /* Sync offset */
+ uint8_t ppr_options; /* Parallel Protocol Request options */
+};
+
+/*
+ * Per-initiator current, goal and user transfer negotiation information. */
+struct ahd_initiator_tinfo {
+ struct ahd_transinfo curr;
+ struct ahd_transinfo goal;
+ struct ahd_transinfo user;
+};
+
+/*
+ * Per enabled target ID state.
+ * Pointers to lun target state as well as sync/wide negotiation information
+ * for each initiator<->target mapping. For the initiator role we pretend
+ * that we are the target and the targets are the initiators since the
+ * negotiation is the same regardless of role.
+ */
+struct ahd_tmode_tstate {
+ struct ahd_tmode_lstate* enabled_luns[AHD_NUM_LUNS];
+ struct ahd_initiator_tinfo transinfo[AHD_NUM_TARGETS];
+
+ /*
+ * Per initiator state bitmasks.
+ */
+ uint16_t auto_negotiate;/* Auto Negotiation Required */
+ uint16_t discenable; /* Disconnection allowed */
+ uint16_t tagenable; /* Tagged Queuing allowed */
+};
+
+/*
+ * Points of interest along the negotiated transfer scale.
+ */
+#define AHD_SYNCRATE_160 0x8
+#define AHD_SYNCRATE_PACED 0x8
+#define AHD_SYNCRATE_DT 0x9
+#define AHD_SYNCRATE_ULTRA2 0xa
+#define AHD_SYNCRATE_ULTRA 0xc
+#define AHD_SYNCRATE_FAST 0x19
+#define AHD_SYNCRATE_MIN_DT AHD_SYNCRATE_FAST
+#define AHD_SYNCRATE_SYNC 0x32
+#define AHD_SYNCRATE_MIN 0x60
+#define AHD_SYNCRATE_ASYNC 0xFF
+#define AHD_SYNCRATE_MAX AHD_SYNCRATE_160
+
+/* Safe and valid period for async negotiations. */
+#define AHD_ASYNC_XFER_PERIOD 0x44
+
+/*
+ * In RevA, the synctable uses a 120MHz rate for the period
+ * factor 8 and 160MHz for the period factor 7. The 120MHz
+ * rate never made it into the official SCSI spec, so we must
+ * compensate when setting the negotiation table for Rev A
+ * parts.
+ */
+#define AHD_SYNCRATE_REVA_120 0x8
+#define AHD_SYNCRATE_REVA_160 0x7
+
+/***************************** Lookup Tables **********************************/
+/*
+ * Phase -> name and message out response
+ * to parity errors in each phase table.
+ */
+struct ahd_phase_table_entry {
+ uint8_t phase;
+ uint8_t mesg_out; /* Message response to parity errors */
+ const char *phasemsg;
+};
+
+/************************** Serial EEPROM Format ******************************/
+
+struct seeprom_config {
+/*
+ * Per SCSI ID Configuration Flags
+ */
+ uint16_t device_flags[16]; /* words 0-15 */
+#define CFXFER 0x003F /* synchronous transfer rate */
+#define CFXFER_ASYNC 0x3F
+#define CFQAS 0x0040 /* Negotiate QAS */
+#define CFPACKETIZED 0x0080 /* Negotiate Packetized Transfers */
+#define CFSTART 0x0100 /* send start unit SCSI command */
+#define CFINCBIOS 0x0200 /* include in BIOS scan */
+#define CFDISC 0x0400 /* enable disconnection */
+#define CFMULTILUNDEV 0x0800 /* Probe multiple luns in BIOS scan */
+#define CFWIDEB 0x1000 /* wide bus device */
+#define CFHOSTMANAGED 0x8000 /* Managed by a RAID controller */
+
+/*
+ * BIOS Control Bits
+ */
+ uint16_t bios_control; /* word 16 */
+#define CFSUPREM 0x0001 /* support all removeable drives */
+#define CFSUPREMB 0x0002 /* support removeable boot drives */
+#define CFBIOSSTATE 0x000C /* BIOS Action State */
+#define CFBS_DISABLED 0x00
+#define CFBS_ENABLED 0x04
+#define CFBS_DISABLED_SCAN 0x08
+#define CFENABLEDV 0x0010 /* Perform Domain Validation */
+#define CFCTRL_A 0x0020 /* BIOS displays Ctrl-A message */
+#define CFSPARITY 0x0040 /* SCSI parity */
+#define CFEXTEND 0x0080 /* extended translation enabled */
+#define CFBOOTCD 0x0100 /* Support Bootable CD-ROM */
+#define CFMSG_LEVEL 0x0600 /* BIOS Message Level */
+#define CFMSG_VERBOSE 0x0000
+#define CFMSG_SILENT 0x0200
+#define CFMSG_DIAG 0x0400
+#define CFRESETB 0x0800 /* reset SCSI bus at boot */
+/* UNUSED 0xf000 */
+
+/*
+ * Host Adapter Control Bits
+ */
+ uint16_t adapter_control; /* word 17 */
+#define CFAUTOTERM 0x0001 /* Perform Auto termination */
+#define CFSTERM 0x0002 /* SCSI low byte termination */
+#define CFWSTERM 0x0004 /* SCSI high byte termination */
+#define CFSEAUTOTERM 0x0008 /* Ultra2 Perform secondary Auto Term*/
+#define CFSELOWTERM 0x0010 /* Ultra2 secondary low term */
+#define CFSEHIGHTERM 0x0020 /* Ultra2 secondary high term */
+#define CFSTPWLEVEL 0x0040 /* Termination level control */
+#define CFBIOSAUTOTERM 0x0080 /* Perform Auto termination */
+#define CFTERM_MENU 0x0100 /* BIOS displays termination menu */
+#define CFCLUSTERENB 0x8000 /* Cluster Enable */
+
+/*
+ * Bus Release Time, Host Adapter ID
+ */
+ uint16_t brtime_id; /* word 18 */
+#define CFSCSIID 0x000f /* host adapter SCSI ID */
+/* UNUSED 0x00f0 */
+#define CFBRTIME 0xff00 /* bus release time/PCI Latency Time */
+
+/*
+ * Maximum targets
+ */
+ uint16_t max_targets; /* word 19 */
+#define CFMAXTARG 0x00ff /* maximum targets */
+#define CFBOOTLUN 0x0f00 /* Lun to boot from */
+#define CFBOOTID 0xf000 /* Target to boot from */
+ uint16_t res_1[10]; /* words 20-29 */
+ uint16_t signature; /* BIOS Signature */
+#define CFSIGNATURE 0x400
+ uint16_t checksum; /* word 31 */
+};
+
+/*
+ * Vital Product Data used during POST and by the BIOS.
+ */
+struct vpd_config {
+ uint8_t bios_flags;
+#define VPDMASTERBIOS 0x0001
+#define VPDBOOTHOST 0x0002
+ uint8_t reserved_1[21];
+ uint8_t resource_type;
+ uint8_t resource_len[2];
+ uint8_t resource_data[8];
+ uint8_t vpd_tag;
+ uint16_t vpd_len;
+ uint8_t vpd_keyword[2];
+ uint8_t length;
+ uint8_t revision;
+ uint8_t device_flags;
+ uint8_t termination_menus[2];
+ uint8_t fifo_threshold;
+ uint8_t end_tag;
+ uint8_t vpd_checksum;
+ uint16_t default_target_flags;
+ uint16_t default_bios_flags;
+ uint16_t default_ctrl_flags;
+ uint8_t default_irq;
+ uint8_t pci_lattime;
+ uint8_t max_target;
+ uint8_t boot_lun;
+ uint16_t signature;
+ uint8_t reserved_2;
+ uint8_t checksum;
+ uint8_t reserved_3[4];
+};
+
+/****************************** Flexport Logic ********************************/
+#define FLXADDR_TERMCTL 0x0
+#define FLX_TERMCTL_ENSECHIGH 0x8
+#define FLX_TERMCTL_ENSECLOW 0x4
+#define FLX_TERMCTL_ENPRIHIGH 0x2
+#define FLX_TERMCTL_ENPRILOW 0x1
+#define FLXADDR_ROMSTAT_CURSENSECTL 0x1
+#define FLX_ROMSTAT_SEECFG 0xF0
+#define FLX_ROMSTAT_EECFG 0x0F
+#define FLX_ROMSTAT_SEE_93C66 0x00
+#define FLX_ROMSTAT_SEE_NONE 0xF0
+#define FLX_ROMSTAT_EE_512x8 0x0
+#define FLX_ROMSTAT_EE_1MBx8 0x1
+#define FLX_ROMSTAT_EE_2MBx8 0x2
+#define FLX_ROMSTAT_EE_4MBx8 0x3
+#define FLX_ROMSTAT_EE_16MBx8 0x4
+#define CURSENSE_ENB 0x1
+#define FLXADDR_FLEXSTAT 0x2
+#define FLX_FSTAT_BUSY 0x1
+#define FLXADDR_CURRENT_STAT 0x4
+#define FLX_CSTAT_SEC_HIGH 0xC0
+#define FLX_CSTAT_SEC_LOW 0x30
+#define FLX_CSTAT_PRI_HIGH 0x0C
+#define FLX_CSTAT_PRI_LOW 0x03
+#define FLX_CSTAT_MASK 0x03
+#define FLX_CSTAT_SHIFT 2
+#define FLX_CSTAT_OKAY 0x0
+#define FLX_CSTAT_OVER 0x1
+#define FLX_CSTAT_UNDER 0x2
+#define FLX_CSTAT_INVALID 0x3
+
+int ahd_read_seeprom(struct ahd_softc *ahd, uint16_t *buf,
+ u_int start_addr, u_int count, int bstream);
+
+int ahd_write_seeprom(struct ahd_softc *ahd, uint16_t *buf,
+ u_int start_addr, u_int count);
+int ahd_verify_cksum(struct seeprom_config *sc);
+int ahd_acquire_seeprom(struct ahd_softc *ahd);
+void ahd_release_seeprom(struct ahd_softc *ahd);
+
+/**************************** Message Buffer *********************************/
+typedef enum {
+ MSG_FLAG_NONE = 0x00,
+ MSG_FLAG_EXPECT_PPR_BUSFREE = 0x01,
+ MSG_FLAG_IU_REQ_CHANGED = 0x02,
+ MSG_FLAG_EXPECT_IDE_BUSFREE = 0x04,
+ MSG_FLAG_EXPECT_QASREJ_BUSFREE = 0x08,
+ MSG_FLAG_PACKETIZED = 0x10
+} ahd_msg_flags;
+
+typedef enum {
+ MSG_TYPE_NONE = 0x00,
+ MSG_TYPE_INITIATOR_MSGOUT = 0x01,
+ MSG_TYPE_INITIATOR_MSGIN = 0x02,
+ MSG_TYPE_TARGET_MSGOUT = 0x03,
+ MSG_TYPE_TARGET_MSGIN = 0x04
+} ahd_msg_type;
+
+typedef enum {
+ MSGLOOP_IN_PROG,
+ MSGLOOP_MSGCOMPLETE,
+ MSGLOOP_TERMINATED
+} msg_loop_stat;
+
+/*********************** Software Configuration Structure *********************/
+struct ahd_suspend_channel_state {
+ uint8_t scsiseq;
+ uint8_t sxfrctl0;
+ uint8_t sxfrctl1;
+ uint8_t simode0;
+ uint8_t simode1;
+ uint8_t seltimer;
+ uint8_t seqctl;
+};
+
+struct ahd_suspend_pci_state {
+ uint32_t devconfig;
+ uint8_t command;
+ uint8_t csize_lattime;
+};
+
+struct ahd_suspend_state {
+ struct ahd_suspend_channel_state channel[2];
+ struct ahd_suspend_pci_state pci_state;
+ uint8_t optionmode;
+ uint8_t dscommand0;
+ uint8_t dspcistatus;
+ /* hsmailbox */
+ uint8_t crccontrol1;
+ uint8_t scbbaddr;
+ /* Host and sequencer SCB counts */
+ uint8_t dff_thrsh;
+ uint8_t *scratch_ram;
+ uint8_t *btt;
+};
+
+typedef void (*ahd_bus_intr_t)(struct ahd_softc *);
+
+typedef enum {
+ AHD_MODE_DFF0,
+ AHD_MODE_DFF1,
+ AHD_MODE_CCHAN,
+ AHD_MODE_SCSI,
+ AHD_MODE_CFG,
+ AHD_MODE_UNKNOWN
+} ahd_mode;
+
+#define AHD_MK_MSK(x) (0x01 << (x))
+#define AHD_MODE_DFF0_MSK AHD_MK_MSK(AHD_MODE_DFF0)
+#define AHD_MODE_DFF1_MSK AHD_MK_MSK(AHD_MODE_DFF1)
+#define AHD_MODE_CCHAN_MSK AHD_MK_MSK(AHD_MODE_CCHAN)
+#define AHD_MODE_SCSI_MSK AHD_MK_MSK(AHD_MODE_SCSI)
+#define AHD_MODE_CFG_MSK AHD_MK_MSK(AHD_MODE_CFG)
+#define AHD_MODE_UNKNOWN_MSK AHD_MK_MSK(AHD_MODE_UNKNOWN)
+#define AHD_MODE_ANY_MSK (~0)
+
+typedef uint8_t ahd_mode_state;
+
+typedef void ahd_callback_t (void *);
+
+struct ahd_completion
+{
+ uint16_t tag;
+ uint8_t sg_status;
+ uint8_t valid_tag;
+};
+
+struct ahd_softc {
+ bus_space_tag_t tags[2];
+ bus_space_handle_t bshs[2];
+#ifndef __linux__
+ bus_dma_tag_t buffer_dmat; /* dmat for buffer I/O */
+#endif
+ struct scb_data scb_data;
+
+ struct hardware_scb *next_queued_hscb;
+ struct map_node *next_queued_hscb_map;
+
+ /*
+ * SCBs that have been sent to the controller
+ */
+ LIST_HEAD(, scb) pending_scbs;
+
+ /*
+ * Current register window mode information.
+ */
+ ahd_mode dst_mode;
+ ahd_mode src_mode;
+
+ /*
+ * Saved register window mode information
+ * used for restore on next unpause.
+ */
+ ahd_mode saved_dst_mode;
+ ahd_mode saved_src_mode;
+
+ /*
+ * Platform specific data.
+ */
+ struct ahd_platform_data *platform_data;
+
+ /*
+ * Platform specific device information.
+ */
+ ahd_dev_softc_t dev_softc;
+
+ /*
+ * Bus specific device information.
+ */
+ ahd_bus_intr_t bus_intr;
+
+ /*
+ * Target mode related state kept on a per enabled lun basis.
+ * Targets that are not enabled will have null entries.
+ * As an initiator, we keep one target entry for our initiator
+ * ID to store our sync/wide transfer settings.
+ */
+ struct ahd_tmode_tstate *enabled_targets[AHD_NUM_TARGETS];
+
+ /*
+ * The black hole device responsible for handling requests for
+ * disabled luns on enabled targets.
+ */
+ struct ahd_tmode_lstate *black_hole;
+
+ /*
+ * Device instance currently on the bus awaiting a continue TIO
+ * for a command that was not given the disconnect priveledge.
+ */
+ struct ahd_tmode_lstate *pending_device;
+
+ /*
+ * Timer handles for timer driven callbacks.
+ */
+ ahd_timer_t reset_timer;
+ ahd_timer_t stat_timer;
+
+ /*
+ * Statistics.
+ */
+#define AHD_STAT_UPDATE_US 250000 /* 250ms */
+#define AHD_STAT_BUCKETS 4
+ u_int cmdcmplt_bucket;
+ uint32_t cmdcmplt_counts[AHD_STAT_BUCKETS];
+ uint32_t cmdcmplt_total;
+
+ /*
+ * Card characteristics
+ */
+ ahd_chip chip;
+ ahd_feature features;
+ ahd_bug bugs;
+ ahd_flag flags;
+ struct seeprom_config *seep_config;
+
+ /* Command Queues */
+ struct ahd_completion *qoutfifo;
+ uint16_t qoutfifonext;
+ uint16_t qoutfifonext_valid_tag;
+ uint16_t qinfifonext;
+ uint16_t qinfifo[AHD_SCB_MAX];
+
+ /*
+ * Our qfreeze count. The sequencer compares
+ * this value with its own counter to determine
+ * whether to allow selections to occur.
+ */
+ uint16_t qfreeze_cnt;
+
+ /* Values to store in the SEQCTL register for pause and unpause */
+ uint8_t unpause;
+ uint8_t pause;
+
+ /* Critical Section Data */
+ struct cs *critical_sections;
+ u_int num_critical_sections;
+
+ /* Buffer for handling packetized bitbucket. */
+ uint8_t *overrun_buf;
+
+ /* Links for chaining softcs */
+ TAILQ_ENTRY(ahd_softc) links;
+
+ /* Channel Names ('A', 'B', etc.) */
+ char channel;
+
+ /* Initiator Bus ID */
+ uint8_t our_id;
+
+ /*
+ * Target incoming command FIFO.
+ */
+ struct target_cmd *targetcmds;
+ uint8_t tqinfifonext;
+
+ /*
+ * Cached verson of the hs_mailbox so we can avoid
+ * pausing the sequencer during mailbox updates.
+ */
+ uint8_t hs_mailbox;
+
+ /*
+ * Incoming and outgoing message handling.
+ */
+ uint8_t send_msg_perror;
+ ahd_msg_flags msg_flags;
+ ahd_msg_type msg_type;
+ uint8_t msgout_buf[12];/* Message we are sending */
+ uint8_t msgin_buf[12];/* Message we are receiving */
+ u_int msgout_len; /* Length of message to send */
+ u_int msgout_index; /* Current index in msgout */
+ u_int msgin_index; /* Current index in msgin */
+
+ /*
+ * Mapping information for data structures shared
+ * between the sequencer and kernel.
+ */
+ bus_dma_tag_t parent_dmat;
+ bus_dma_tag_t shared_data_dmat;
+ struct map_node shared_data_map;
+
+ /* Information saved through suspend/resume cycles */
+ struct ahd_suspend_state suspend_state;
+
+ /* Number of enabled target mode device on this card */
+ u_int enabled_luns;
+
+ /* Initialization level of this data structure */
+ u_int init_level;
+
+ /* PCI cacheline size. */
+ u_int pci_cachesize;
+
+ /* IO Cell Parameters */
+ uint8_t iocell_opts[AHD_NUM_PER_DEV_ANNEXCOLS];
+
+ u_int stack_size;
+ uint16_t *saved_stack;
+
+ /* Per-Unit descriptive information */
+ const char *description;
+ const char *bus_description;
+ char *name;
+ int unit;
+
+ /* Selection Timer settings */
+ int seltime;
+
+ /*
+ * Interrupt coalescing settings.
+ */
+#define AHD_INT_COALESCING_TIMER_DEFAULT 250 /*us*/
+#define AHD_INT_COALESCING_MAXCMDS_DEFAULT 10
+#define AHD_INT_COALESCING_MAXCMDS_MAX 127
+#define AHD_INT_COALESCING_MINCMDS_DEFAULT 5
+#define AHD_INT_COALESCING_MINCMDS_MAX 127
+#define AHD_INT_COALESCING_THRESHOLD_DEFAULT 2000
+#define AHD_INT_COALESCING_STOP_THRESHOLD_DEFAULT 1000
+ u_int int_coalescing_timer;
+ u_int int_coalescing_maxcmds;
+ u_int int_coalescing_mincmds;
+ u_int int_coalescing_threshold;
+ u_int int_coalescing_stop_threshold;
+
+ uint16_t user_discenable;/* Disconnection allowed */
+ uint16_t user_tagenable;/* Tagged Queuing allowed */
+};
+
+/*************************** IO Cell Configuration ****************************/
+#define AHD_PRECOMP_SLEW_INDEX \
+ (AHD_ANNEXCOL_PRECOMP_SLEW - AHD_ANNEXCOL_PER_DEV0)
+
+#define AHD_AMPLITUDE_INDEX \
+ (AHD_ANNEXCOL_AMPLITUDE - AHD_ANNEXCOL_PER_DEV0)
+
+#define AHD_SET_SLEWRATE(ahd, new_slew) \
+do { \
+ (ahd)->iocell_opts[AHD_PRECOMP_SLEW_INDEX] &= ~AHD_SLEWRATE_MASK; \
+ (ahd)->iocell_opts[AHD_PRECOMP_SLEW_INDEX] |= \
+ (((new_slew) << AHD_SLEWRATE_SHIFT) & AHD_SLEWRATE_MASK); \
+} while (0)
+
+#define AHD_SET_PRECOMP(ahd, new_pcomp) \
+do { \
+ (ahd)->iocell_opts[AHD_PRECOMP_SLEW_INDEX] &= ~AHD_PRECOMP_MASK; \
+ (ahd)->iocell_opts[AHD_PRECOMP_SLEW_INDEX] |= \
+ (((new_pcomp) << AHD_PRECOMP_SHIFT) & AHD_PRECOMP_MASK); \
+} while (0)
+
+#define AHD_SET_AMPLITUDE(ahd, new_amp) \
+do { \
+ (ahd)->iocell_opts[AHD_AMPLITUDE_INDEX] &= ~AHD_AMPLITUDE_MASK; \
+ (ahd)->iocell_opts[AHD_AMPLITUDE_INDEX] |= \
+ (((new_amp) << AHD_AMPLITUDE_SHIFT) & AHD_AMPLITUDE_MASK); \
+} while (0)
+
+/************************ Active Device Information ***************************/
+typedef enum {
+ ROLE_UNKNOWN,
+ ROLE_INITIATOR,
+ ROLE_TARGET
+} role_t;
+
+struct ahd_devinfo {
+ int our_scsiid;
+ int target_offset;
+ uint16_t target_mask;
+ u_int target;
+ u_int lun;
+ char channel;
+ role_t role; /*
+ * Only guaranteed to be correct if not
+ * in the busfree state.
+ */
+};
+
+/****************************** PCI Structures ********************************/
+#define AHD_PCI_IOADDR0 PCIR_BAR(0) /* I/O BAR*/
+#define AHD_PCI_MEMADDR PCIR_BAR(1) /* Memory BAR */
+#define AHD_PCI_IOADDR1 PCIR_BAR(3) /* Second I/O BAR */
+
+typedef int (ahd_device_setup_t)(struct ahd_softc *);
+
+struct ahd_pci_identity {
+ uint64_t full_id;
+ uint64_t id_mask;
+ const char *name;
+ ahd_device_setup_t *setup;
+};
+
+/***************************** VL/EISA Declarations ***************************/
+struct aic7770_identity {
+ uint32_t full_id;
+ uint32_t id_mask;
+ const char *name;
+ ahd_device_setup_t *setup;
+};
+extern struct aic7770_identity aic7770_ident_table [];
+extern const int ahd_num_aic7770_devs;
+
+#define AHD_EISA_SLOT_OFFSET 0xc00
+#define AHD_EISA_IOSIZE 0x100
+
+/*************************** Function Declarations ****************************/
+/******************************************************************************/
+
+/***************************** PCI Front End *********************************/
+const struct ahd_pci_identity *ahd_find_pci_device(ahd_dev_softc_t);
+int ahd_pci_config(struct ahd_softc *,
+ const struct ahd_pci_identity *);
+int ahd_pci_test_register_access(struct ahd_softc *);
+#ifdef CONFIG_PM
+void ahd_pci_suspend(struct ahd_softc *);
+void ahd_pci_resume(struct ahd_softc *);
+#endif
+
+/************************** SCB and SCB queue management **********************/
+void ahd_qinfifo_requeue_tail(struct ahd_softc *ahd,
+ struct scb *scb);
+
+/****************************** Initialization ********************************/
+struct ahd_softc *ahd_alloc(void *platform_arg, char *name);
+int ahd_softc_init(struct ahd_softc *);
+void ahd_controller_info(struct ahd_softc *ahd, char *buf);
+int ahd_init(struct ahd_softc *ahd);
+#ifdef CONFIG_PM
+int ahd_suspend(struct ahd_softc *ahd);
+void ahd_resume(struct ahd_softc *ahd);
+#endif
+int ahd_default_config(struct ahd_softc *ahd);
+int ahd_parse_vpddata(struct ahd_softc *ahd,
+ struct vpd_config *vpd);
+int ahd_parse_cfgdata(struct ahd_softc *ahd,
+ struct seeprom_config *sc);
+void ahd_intr_enable(struct ahd_softc *ahd, int enable);
+void ahd_pause_and_flushwork(struct ahd_softc *ahd);
+void ahd_set_unit(struct ahd_softc *, int);
+void ahd_set_name(struct ahd_softc *, char *);
+struct scb *ahd_get_scb(struct ahd_softc *ahd, u_int col_idx);
+void ahd_free_scb(struct ahd_softc *ahd, struct scb *scb);
+void ahd_free(struct ahd_softc *ahd);
+int ahd_reset(struct ahd_softc *ahd, int reinit);
+int ahd_write_flexport(struct ahd_softc *ahd,
+ u_int addr, u_int value);
+int ahd_read_flexport(struct ahd_softc *ahd, u_int addr,
+ uint8_t *value);
+
+/***************************** Error Recovery *********************************/
+typedef enum {
+ SEARCH_COMPLETE,
+ SEARCH_COUNT,
+ SEARCH_REMOVE,
+ SEARCH_PRINT
+} ahd_search_action;
+int ahd_search_qinfifo(struct ahd_softc *ahd, int target,
+ char channel, int lun, u_int tag,
+ role_t role, uint32_t status,
+ ahd_search_action action);
+int ahd_search_disc_list(struct ahd_softc *ahd, int target,
+ char channel, int lun, u_int tag,
+ int stop_on_first, int remove,
+ int save_state);
+int ahd_reset_channel(struct ahd_softc *ahd, char channel,
+ int initiate_reset);
+/*************************** Utility Functions ********************************/
+void ahd_compile_devinfo(struct ahd_devinfo *devinfo,
+ u_int our_id, u_int target,
+ u_int lun, char channel,
+ role_t role);
+/************************** Transfer Negotiation ******************************/
+void ahd_find_syncrate(struct ahd_softc *ahd, u_int *period,
+ u_int *ppr_options, u_int maxsync);
+/*
+ * Negotiation types. These are used to qualify if we should renegotiate
+ * even if our goal and current transport parameters are identical.
+ */
+typedef enum {
+ AHD_NEG_TO_GOAL, /* Renegotiate only if goal and curr differ. */
+ AHD_NEG_IF_NON_ASYNC, /* Renegotiate so long as goal is non-async. */
+ AHD_NEG_ALWAYS /* Renegotiat even if goal is async. */
+} ahd_neg_type;
+int ahd_update_neg_request(struct ahd_softc*,
+ struct ahd_devinfo*,
+ struct ahd_tmode_tstate*,
+ struct ahd_initiator_tinfo*,
+ ahd_neg_type);
+void ahd_set_width(struct ahd_softc *ahd,
+ struct ahd_devinfo *devinfo,
+ u_int width, u_int type, int paused);
+void ahd_set_syncrate(struct ahd_softc *ahd,
+ struct ahd_devinfo *devinfo,
+ u_int period, u_int offset,
+ u_int ppr_options,
+ u_int type, int paused);
+typedef enum {
+ AHD_QUEUE_NONE,
+ AHD_QUEUE_BASIC,
+ AHD_QUEUE_TAGGED
+} ahd_queue_alg;
+
+/**************************** Target Mode *************************************/
+#ifdef AHD_TARGET_MODE
+void ahd_send_lstate_events(struct ahd_softc *,
+ struct ahd_tmode_lstate *);
+void ahd_handle_en_lun(struct ahd_softc *ahd,
+ struct cam_sim *sim, union ccb *ccb);
+cam_status ahd_find_tmode_devs(struct ahd_softc *ahd,
+ struct cam_sim *sim, union ccb *ccb,
+ struct ahd_tmode_tstate **tstate,
+ struct ahd_tmode_lstate **lstate,
+ int notfound_failure);
+#ifndef AHD_TMODE_ENABLE
+#define AHD_TMODE_ENABLE 0
+#endif
+#endif
+/******************************* Debug ***************************************/
+#ifdef AHD_DEBUG
+extern uint32_t ahd_debug;
+#define AHD_SHOW_MISC 0x00001
+#define AHD_SHOW_SENSE 0x00002
+#define AHD_SHOW_RECOVERY 0x00004
+#define AHD_DUMP_SEEPROM 0x00008
+#define AHD_SHOW_TERMCTL 0x00010
+#define AHD_SHOW_MEMORY 0x00020
+#define AHD_SHOW_MESSAGES 0x00040
+#define AHD_SHOW_MODEPTR 0x00080
+#define AHD_SHOW_SELTO 0x00100
+#define AHD_SHOW_FIFOS 0x00200
+#define AHD_SHOW_QFULL 0x00400
+#define AHD_SHOW_DV 0x00800
+#define AHD_SHOW_MASKED_ERRORS 0x01000
+#define AHD_SHOW_QUEUE 0x02000
+#define AHD_SHOW_TQIN 0x04000
+#define AHD_SHOW_SG 0x08000
+#define AHD_SHOW_INT_COALESCING 0x10000
+#define AHD_DEBUG_SEQUENCER 0x20000
+#endif
+void ahd_print_devinfo(struct ahd_softc *ahd,
+ struct ahd_devinfo *devinfo);
+void ahd_dump_card_state(struct ahd_softc *ahd);
+int ahd_print_register(const ahd_reg_parse_entry_t *table,
+ u_int num_entries,
+ const char *name,
+ u_int address,
+ u_int value,
+ u_int *cur_column,
+ u_int wrap_point);
+#endif /* _AIC79XX_H_ */
diff --git a/drivers/scsi/aic7xxx/aic79xx.reg b/drivers/scsi/aic7xxx/aic79xx.reg
new file mode 100644
index 000000000..7e12c31cc
--- /dev/null
+++ b/drivers/scsi/aic7xxx/aic79xx.reg
@@ -0,0 +1,4281 @@
+/*
+ * Aic79xx register and scratch ram definitions.
+ *
+ * Copyright (c) 1994-2001, 2004 Justin T. Gibbs.
+ * Copyright (c) 2000-2002 Adaptec Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions, and the following disclaimer,
+ * without modification.
+ * 2. Redistributions in binary form must reproduce at minimum a disclaimer
+ * substantially similar to the "NO WARRANTY" disclaimer below
+ * ("Disclaimer") and any redistribution must be conditioned upon
+ * including a substantially similar Disclaimer requirement for further
+ * binary redistribution.
+ * 3. Neither the names of the above-listed copyright holders nor the names
+ * of any contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * Alternatively, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") version 2 as published by the Free
+ * Software Foundation.
+ *
+ * NO WARRANTY
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
+ * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
+ * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGES.
+ *
+ * $FreeBSD$
+ */
+VERSION = "$Id: //depot/aic7xxx/aic7xxx/aic79xx.reg#77 $"
+
+/*
+ * This file is processed by the aic7xxx_asm utility for use in assembling
+ * firmware for the aic79xx family of SCSI host adapters as well as to generate
+ * a C header file for use in the kernel portion of the Aic79xx driver.
+ */
+
+/* Register window Modes */
+#define M_DFF0 0
+#define M_DFF1 1
+#define M_CCHAN 2
+#define M_SCSI 3
+#define M_CFG 4
+#define M_DST_SHIFT 4
+
+#define MK_MODE(src, dst) ((src) | ((dst) << M_DST_SHIFT))
+#define SET_MODE(src, dst) \
+ SET_SRC_MODE src; \
+ SET_DST_MODE dst; \
+ if ((ahd->bugs & AHD_SET_MODE_BUG) != 0) { \
+ mvi MK_MODE(src, dst) call set_mode_work_around; \
+ } else { \
+ mvi MODE_PTR, MK_MODE(src, dst); \
+ }
+
+#define RESTORE_MODE(mode) \
+ if ((ahd->bugs & AHD_SET_MODE_BUG) != 0) { \
+ mov mode call set_mode_work_around; \
+ } else { \
+ mov MODE_PTR, mode; \
+ }
+
+#define SET_SEQINTCODE(code) \
+ if ((ahd->bugs & AHD_INTCOLLISION_BUG) != 0) { \
+ mvi code call set_seqint_work_around; \
+ } else { \
+ mvi SEQINTCODE, code; \
+ }
+
+/*
+ * Registers marked "dont_generate_debug_code" are not (yet) referenced
+ * from the driver code, and this keyword inhibit generation
+ * of debug code for them.
+ *
+ * REG_PRETTY_PRINT config will complain if dont_generate_debug_code
+ * is added to the register which is referenced in the driver.
+ * Unreferenced register with no dont_generate_debug_code will result
+ * in dead code. No warning is issued.
+ */
+
+/*
+ * Mode Pointer
+ * Controls which of the 5, 512byte, address spaces should be used
+ * as the source and destination of any register accesses in our
+ * register window.
+ */
+register MODE_PTR {
+ address 0x000
+ access_mode RW
+ field DST_MODE 0x70
+ field SRC_MODE 0x07
+ mode_pointer
+ dont_generate_debug_code
+}
+
+const SRC_MODE_SHIFT 0
+const DST_MODE_SHIFT 4
+
+/*
+ * Host Interrupt Status
+ */
+register INTSTAT {
+ address 0x001
+ access_mode RW
+ field HWERRINT 0x80
+ field BRKADRINT 0x40
+ field SWTMINT 0x20
+ field PCIINT 0x10
+ field SCSIINT 0x08
+ field SEQINT 0x04
+ field CMDCMPLT 0x02
+ field SPLTINT 0x01
+ mask INT_PEND 0xFF
+}
+
+/*
+ * Sequencer Interrupt Code
+ */
+register SEQINTCODE {
+ address 0x002
+ access_mode RW
+ field {
+ NO_SEQINT, /* No seqint pending. */
+ BAD_PHASE, /* unknown scsi bus phase */
+ SEND_REJECT, /* sending a message reject */
+ PROTO_VIOLATION, /* Protocol Violation */
+ NO_MATCH, /* no cmd match for reconnect */
+ IGN_WIDE_RES, /* Complex IGN Wide Res Msg */
+ PDATA_REINIT, /*
+ * Returned to data phase
+ * that requires data
+ * transfer pointers to be
+ * recalculated from the
+ * transfer residual.
+ */
+ HOST_MSG_LOOP, /*
+ * The bus is ready for the
+ * host to perform another
+ * message transaction. This
+ * mechanism is used for things
+ * like sync/wide negotiation
+ * that require a kernel based
+ * message state engine.
+ */
+ BAD_STATUS, /* Bad status from target */
+ DATA_OVERRUN, /*
+ * Target attempted to write
+ * beyond the bounds of its
+ * command.
+ */
+ MKMSG_FAILED, /*
+ * Target completed command
+ * without honoring our ATN
+ * request to issue a message.
+ */
+ MISSED_BUSFREE, /*
+ * The sequencer never saw
+ * the bus go free after
+ * either a command complete
+ * or disconnect message.
+ */
+ DUMP_CARD_STATE,
+ ILLEGAL_PHASE,
+ INVALID_SEQINT,
+ CFG4ISTAT_INTR,
+ STATUS_OVERRUN,
+ CFG4OVERRUN,
+ ENTERING_NONPACK,
+ TASKMGMT_FUNC_COMPLETE, /*
+ * Task management function
+ * request completed with
+ * an expected busfree.
+ */
+ TASKMGMT_CMD_CMPLT_OKAY, /*
+ * A command with a non-zero
+ * task management function
+ * has completed via the normal
+ * command completion method
+ * for commands with a zero
+ * task management function.
+ * This happens when an attempt
+ * to abort a command loses
+ * the race for the command to
+ * complete normally.
+ */
+ TRACEPOINT0,
+ TRACEPOINT1,
+ TRACEPOINT2,
+ TRACEPOINT3,
+ SAW_HWERR,
+ BAD_SCB_STATUS
+ }
+ dont_generate_debug_code
+}
+
+/*
+ * Clear Host Interrupt
+ */
+register CLRINT {
+ address 0x003
+ access_mode WO
+ count 19
+ field CLRHWERRINT 0x80 /* Rev B or greater */
+ field CLRBRKADRINT 0x40
+ field CLRSWTMINT 0x20
+ field CLRPCIINT 0x10
+ field CLRSCSIINT 0x08
+ field CLRSEQINT 0x04
+ field CLRCMDINT 0x02
+ field CLRSPLTINT 0x01
+ dont_generate_debug_code
+}
+
+/*
+ * Error Register
+ */
+register ERROR {
+ address 0x004
+ access_mode RO
+ field CIOPARERR 0x80
+ field CIOACCESFAIL 0x40 /* Rev B or greater */
+ field MPARERR 0x20
+ field DPARERR 0x10
+ field SQPARERR 0x08
+ field ILLOPCODE 0x04
+ field DSCTMOUT 0x02
+ dont_generate_debug_code
+}
+
+/*
+ * Clear Error
+ */
+register CLRERR {
+ address 0x004
+ access_mode WO
+ field CLRCIOPARERR 0x80
+ field CLRCIOACCESFAIL 0x40 /* Rev B or greater */
+ field CLRMPARERR 0x20
+ field CLRDPARERR 0x10
+ field CLRSQPARERR 0x08
+ field CLRILLOPCODE 0x04
+ field CLRDSCTMOUT 0x02
+}
+
+/*
+ * Host Control Register
+ * Overall host control of the device.
+ */
+register HCNTRL {
+ address 0x005
+ access_mode RW
+ count 12
+ field SEQ_RESET 0x80 /* Rev B or greater */
+ field POWRDN 0x40
+ field SWINT 0x10
+ field SWTIMER_START_B 0x08 /* Rev B or greater */
+ field PAUSE 0x04
+ field INTEN 0x02
+ field CHIPRST 0x01
+ field CHIPRSTACK 0x01
+ dont_generate_debug_code
+}
+
+/*
+ * Host New SCB Queue Offset
+ */
+register HNSCB_QOFF {
+ address 0x006
+ access_mode RW
+ size 2
+ count 2
+ dont_generate_debug_code
+}
+
+/*
+ * Host Empty SCB Queue Offset
+ */
+register HESCB_QOFF {
+ address 0x008
+ access_mode RW
+ count 2
+ dont_generate_debug_code
+}
+
+/*
+ * Host Mailbox
+ */
+register HS_MAILBOX {
+ address 0x00B
+ access_mode RW
+ mask HOST_TQINPOS 0x80 /* Boundary at either 0 or 128 */
+ mask ENINT_COALESCE 0x40 /* Perform interrupt coalescing */
+}
+
+/*
+ * Sequencer Interrupt Status
+ */
+register SEQINTSTAT {
+ address 0x00C
+ count 1
+ access_mode RO
+ field SEQ_SWTMRTO 0x10
+ field SEQ_SEQINT 0x08
+ field SEQ_SCSIINT 0x04
+ field SEQ_PCIINT 0x02
+ field SEQ_SPLTINT 0x01
+}
+
+/*
+ * Clear SEQ Interrupt
+ */
+register CLRSEQINTSTAT {
+ address 0x00C
+ access_mode WO
+ field CLRSEQ_SWTMRTO 0x10
+ field CLRSEQ_SEQINT 0x08
+ field CLRSEQ_SCSIINT 0x04
+ field CLRSEQ_PCIINT 0x02
+ field CLRSEQ_SPLTINT 0x01
+ dont_generate_debug_code
+}
+
+/*
+ * Software Timer
+ */
+register SWTIMER {
+ address 0x00E
+ access_mode RW
+ size 2
+ dont_generate_debug_code
+}
+
+/*
+ * SEQ New SCB Queue Offset
+ */
+register SNSCB_QOFF {
+ address 0x010
+ access_mode RW
+ size 2
+ modes M_CCHAN
+ dont_generate_debug_code
+}
+
+/*
+ * SEQ Empty SCB Queue Offset
+ */
+register SESCB_QOFF {
+ address 0x012
+ count 2
+ access_mode RW
+ modes M_CCHAN
+ dont_generate_debug_code
+}
+
+/*
+ * SEQ Done SCB Queue Offset
+ */
+register SDSCB_QOFF {
+ address 0x014
+ access_mode RW
+ modes M_CCHAN
+ size 2
+ dont_generate_debug_code
+}
+
+/*
+ * Queue Offset Control & Status
+ */
+register QOFF_CTLSTA {
+ address 0x016
+ access_mode RW
+ modes M_CCHAN
+ field EMPTY_SCB_AVAIL 0x80
+ field NEW_SCB_AVAIL 0x40
+ field SDSCB_ROLLOVR 0x20
+ field HS_MAILBOX_ACT 0x10
+ field SCB_QSIZE 0x0F {
+ SCB_QSIZE_4,
+ SCB_QSIZE_8,
+ SCB_QSIZE_16,
+ SCB_QSIZE_32,
+ SCB_QSIZE_64,
+ SCB_QSIZE_128,
+ SCB_QSIZE_256,
+ SCB_QSIZE_512,
+ SCB_QSIZE_1024,
+ SCB_QSIZE_2048,
+ SCB_QSIZE_4096,
+ SCB_QSIZE_8192,
+ SCB_QSIZE_16384
+ }
+ dont_generate_debug_code
+}
+
+/*
+ * Interrupt Control
+ */
+register INTCTL {
+ address 0x018
+ access_mode RW
+ field SWTMINTMASK 0x80
+ field SWTMINTEN 0x40
+ field SWTIMER_START 0x20
+ field AUTOCLRCMDINT 0x10
+ field PCIINTEN 0x08
+ field SCSIINTEN 0x04
+ field SEQINTEN 0x02
+ field SPLTINTEN 0x01
+}
+
+/*
+ * Data FIFO Control
+ */
+register DFCNTRL {
+ address 0x019
+ access_mode RW
+ modes M_DFF0, M_DFF1
+ count 11
+ field PRELOADEN 0x80
+ field SCSIENWRDIS 0x40 /* Rev B only. */
+ field SCSIEN 0x20
+ field SCSIENACK 0x20
+ field HDMAEN 0x08
+ field HDMAENACK 0x08
+ field DIRECTION 0x04
+ field DIRECTIONACK 0x04
+ field FIFOFLUSH 0x02
+ field FIFOFLUSHACK 0x02
+ field DIRECTIONEN 0x01
+}
+
+/*
+ * Device Space Command 0
+ */
+register DSCOMMAND0 {
+ address 0x019
+ count 1
+ access_mode RW
+ modes M_CFG
+ field CACHETHEN 0x80 /* Cache Threshold enable */
+ field DPARCKEN 0x40 /* Data Parity Check Enable */
+ field MPARCKEN 0x20 /* Memory Parity Check Enable */
+ field EXTREQLCK 0x10 /* External Request Lock */
+ field DISABLE_TWATE 0x02 /* Rev B or greater */
+ field CIOPARCKEN 0x01 /* Internal bus parity error enable */
+ dont_generate_debug_code
+}
+
+/*
+ * Data FIFO Status
+ */
+register DFSTATUS {
+ address 0x01A
+ access_mode RO
+ modes M_DFF0, M_DFF1
+ field PRELOAD_AVAIL 0x80
+ field PKT_PRELOAD_AVAIL 0x40
+ field MREQPEND 0x10
+ field HDONE 0x08
+ field DFTHRESH 0x04
+ field FIFOFULL 0x02
+ field FIFOEMP 0x01
+}
+
+/*
+ * S/G Cache Pointer
+ */
+register SG_CACHE_PRE {
+ address 0x01B
+ access_mode WO
+ modes M_DFF0, M_DFF1
+ field SG_ADDR_MASK 0xf8
+ field ODD_SEG 0x04
+ field LAST_SEG 0x02
+ dont_generate_debug_code
+}
+
+register SG_CACHE_SHADOW {
+ address 0x01B
+ access_mode RO
+ modes M_DFF0, M_DFF1
+ field SG_ADDR_MASK 0xf8
+ field ODD_SEG 0x04
+ field LAST_SEG 0x02
+ field LAST_SEG_DONE 0x01
+}
+
+/*
+ * Arbiter Control
+ */
+register ARBCTL {
+ address 0x01B
+ access_mode RW
+ modes M_CFG
+ field RESET_HARB 0x80
+ field RETRY_SWEN 0x08
+ field USE_TIME 0x07
+}
+
+/*
+ * Data Channel Host Address
+ */
+register HADDR {
+ address 0x070
+ access_mode RW
+ size 8
+ modes M_DFF0, M_DFF1
+ dont_generate_debug_code
+}
+
+/*
+ * Host Overlay DMA Address
+ */
+register HODMAADR {
+ address 0x070
+ access_mode RW
+ size 8
+ modes M_SCSI
+}
+
+/*
+ * PCI PLL Delay.
+ */
+register PLLDELAY {
+ address 0x070
+ access_mode RW
+ size 1
+ modes M_CFG
+ field SPLIT_DROP_REQ 0x80
+}
+
+/*
+ * Data Channel Host Count
+ */
+register HCNT {
+ address 0x078
+ access_mode RW
+ size 3
+ modes M_DFF0, M_DFF1
+ dont_generate_debug_code
+}
+
+/*
+ * Host Overlay DMA Count
+ */
+register HODMACNT {
+ address 0x078
+ access_mode RW
+ size 2
+ modes M_SCSI
+}
+
+/*
+ * Host Overlay DMA Enable
+ */
+register HODMAEN {
+ address 0x07A
+ access_mode RW
+ modes M_SCSI
+}
+
+/*
+ * Scatter/Gather Host Address
+ */
+register SGHADDR {
+ address 0x07C
+ access_mode RW
+ size 8
+ modes M_DFF0, M_DFF1
+ dont_generate_debug_code
+}
+
+/*
+ * SCB Host Address
+ */
+register SCBHADDR {
+ address 0x07C
+ access_mode RW
+ size 8
+ modes M_CCHAN
+ dont_generate_debug_code
+}
+
+/*
+ * Scatter/Gather Host Count
+ */
+register SGHCNT {
+ address 0x084
+ access_mode RW
+ modes M_DFF0, M_DFF1
+ dont_generate_debug_code
+}
+
+/*
+ * SCB Host Count
+ */
+register SCBHCNT {
+ address 0x084
+ access_mode RW
+ modes M_CCHAN
+ dont_generate_debug_code
+}
+
+/*
+ * Data FIFO Threshold
+ */
+register DFF_THRSH {
+ address 0x088
+ access_mode RW
+ modes M_CFG
+ count 1
+ field WR_DFTHRSH 0x70 {
+ WR_DFTHRSH_MIN,
+ WR_DFTHRSH_25,
+ WR_DFTHRSH_50,
+ WR_DFTHRSH_63,
+ WR_DFTHRSH_75,
+ WR_DFTHRSH_85,
+ WR_DFTHRSH_90,
+ WR_DFTHRSH_MAX
+ }
+ field RD_DFTHRSH 0x07 {
+ RD_DFTHRSH_MIN,
+ RD_DFTHRSH_25,
+ RD_DFTHRSH_50,
+ RD_DFTHRSH_63,
+ RD_DFTHRSH_75,
+ RD_DFTHRSH_85,
+ RD_DFTHRSH_90,
+ RD_DFTHRSH_MAX
+ }
+ dont_generate_debug_code
+}
+
+/*
+ * ROM Address
+ */
+register ROMADDR {
+ address 0x08A
+ access_mode RW
+ size 3
+}
+
+/*
+ * ROM Control
+ */
+register ROMCNTRL {
+ address 0x08D
+ access_mode RW
+ field ROMOP 0xE0
+ field ROMSPD 0x18
+ field REPEAT 0x02
+ field RDY 0x01
+}
+
+/*
+ * ROM Data
+ */
+register ROMDATA {
+ address 0x08E
+ access_mode RW
+}
+
+/*
+ * Data Channel Receive Message 0
+ */
+register DCHRXMSG0 {
+ address 0x090
+ access_mode RO
+ modes M_DFF0, M_DFF1
+ field CDNUM 0xF8
+ field CFNUM 0x07
+}
+
+/*
+ * CMC Receive Message 0
+ */
+register CMCRXMSG0 {
+ address 0x090
+ access_mode RO
+ modes M_CCHAN
+ field CDNUM 0xF8
+ field CFNUM 0x07
+}
+
+/*
+ * Overlay Receive Message 0
+ */
+register OVLYRXMSG0 {
+ address 0x090
+ access_mode RO
+ modes M_SCSI
+ field CDNUM 0xF8
+ field CFNUM 0x07
+}
+
+/*
+ * Relaxed Order Enable
+ */
+register ROENABLE {
+ address 0x090
+ access_mode RW
+ modes M_CFG
+ field MSIROEN 0x20
+ field OVLYROEN 0x10
+ field CMCROEN 0x08
+ field SGROEN 0x04
+ field DCH1ROEN 0x02
+ field DCH0ROEN 0x01
+}
+
+/*
+ * Data Channel Receive Message 1
+ */
+register DCHRXMSG1 {
+ address 0x091
+ access_mode RO
+ modes M_DFF0, M_DFF1
+ field CBNUM 0xFF
+}
+
+/*
+ * CMC Receive Message 1
+ */
+register CMCRXMSG1 {
+ address 0x091
+ access_mode RO
+ modes M_CCHAN
+ field CBNUM 0xFF
+}
+
+/*
+ * Overlay Receive Message 1
+ */
+register OVLYRXMSG1 {
+ address 0x091
+ access_mode RO
+ modes M_SCSI
+ field CBNUM 0xFF
+}
+
+/*
+ * No Snoop Enable
+ */
+register NSENABLE {
+ address 0x091
+ access_mode RW
+ modes M_CFG
+ field MSINSEN 0x20
+ field OVLYNSEN 0x10
+ field CMCNSEN 0x08
+ field SGNSEN 0x04
+ field DCH1NSEN 0x02
+ field DCH0NSEN 0x01
+}
+
+/*
+ * Data Channel Receive Message 2
+ */
+register DCHRXMSG2 {
+ address 0x092
+ access_mode RO
+ modes M_DFF0, M_DFF1
+ field MINDEX 0xFF
+}
+
+/*
+ * CMC Receive Message 2
+ */
+register CMCRXMSG2 {
+ address 0x092
+ access_mode RO
+ modes M_CCHAN
+ field MINDEX 0xFF
+}
+
+/*
+ * Overlay Receive Message 2
+ */
+register OVLYRXMSG2 {
+ address 0x092
+ access_mode RO
+ modes M_SCSI
+ field MINDEX 0xFF
+}
+
+/*
+ * Outstanding Split Transactions
+ */
+register OST {
+ address 0x092
+ access_mode RW
+ modes M_CFG
+}
+
+/*
+ * Data Channel Receive Message 3
+ */
+register DCHRXMSG3 {
+ address 0x093
+ access_mode RO
+ modes M_DFF0, M_DFF1
+ field MCLASS 0x0F
+}
+
+/*
+ * CMC Receive Message 3
+ */
+register CMCRXMSG3 {
+ address 0x093
+ access_mode RO
+ modes M_CCHAN
+ field MCLASS 0x0F
+}
+
+/*
+ * Overlay Receive Message 3
+ */
+register OVLYRXMSG3 {
+ address 0x093
+ access_mode RO
+ modes M_SCSI
+ field MCLASS 0x0F
+}
+
+/*
+ * PCI-X Control
+ */
+register PCIXCTL {
+ address 0x093
+ access_mode RW
+ modes M_CFG
+ count 1
+ field SERRPULSE 0x80
+ field UNEXPSCIEN 0x20
+ field SPLTSMADIS 0x10
+ field SPLTSTADIS 0x08
+ field SRSPDPEEN 0x04
+ field TSCSERREN 0x02
+ field CMPABCDIS 0x01
+ dont_generate_debug_code
+}
+
+/*
+ * CMC Sequencer Byte Count
+ */
+register CMCSEQBCNT {
+ address 0x094
+ access_mode RO
+ modes M_CCHAN
+}
+
+/*
+ * Overlay Sequencer Byte Count
+ */
+register OVLYSEQBCNT {
+ address 0x094
+ access_mode RO
+ modes M_SCSI
+}
+
+/*
+ * Data Channel Sequencer Byte Count
+ */
+register DCHSEQBCNT {
+ address 0x094
+ access_mode RO
+ size 2
+ modes M_DFF0, M_DFF1
+}
+
+/*
+ * Data Channel Split Status 0
+ */
+register DCHSPLTSTAT0 {
+ address 0x096
+ access_mode RW
+ modes M_DFF0, M_DFF1
+ count 2
+ field STAETERM 0x80
+ field SCBCERR 0x40
+ field SCADERR 0x20
+ field SCDATBUCKET 0x10
+ field CNTNOTCMPLT 0x08
+ field RXOVRUN 0x04
+ field RXSCEMSG 0x02
+ field RXSPLTRSP 0x01
+ dont_generate_debug_code
+}
+
+/*
+ * CMC Split Status 0
+ */
+register CMCSPLTSTAT0 {
+ address 0x096
+ access_mode RW
+ modes M_CCHAN
+ field STAETERM 0x80
+ field SCBCERR 0x40
+ field SCADERR 0x20
+ field SCDATBUCKET 0x10
+ field CNTNOTCMPLT 0x08
+ field RXOVRUN 0x04
+ field RXSCEMSG 0x02
+ field RXSPLTRSP 0x01
+}
+
+/*
+ * Overlay Split Status 0
+ */
+register OVLYSPLTSTAT0 {
+ address 0x096
+ access_mode RW
+ modes M_SCSI
+ field STAETERM 0x80
+ field SCBCERR 0x40
+ field SCADERR 0x20
+ field SCDATBUCKET 0x10
+ field CNTNOTCMPLT 0x08
+ field RXOVRUN 0x04
+ field RXSCEMSG 0x02
+ field RXSPLTRSP 0x01
+}
+
+/*
+ * Data Channel Split Status 1
+ */
+register DCHSPLTSTAT1 {
+ address 0x097
+ access_mode RW
+ modes M_DFF0, M_DFF1
+ count 2
+ field RXDATABUCKET 0x01
+ dont_generate_debug_code
+}
+
+/*
+ * CMC Split Status 1
+ */
+register CMCSPLTSTAT1 {
+ address 0x097
+ access_mode RW
+ modes M_CCHAN
+ field RXDATABUCKET 0x01
+}
+
+/*
+ * Overlay Split Status 1
+ */
+register OVLYSPLTSTAT1 {
+ address 0x097
+ access_mode RW
+ modes M_SCSI
+ field RXDATABUCKET 0x01
+}
+
+/*
+ * S/G Receive Message 0
+ */
+register SGRXMSG0 {
+ address 0x098
+ access_mode RO
+ modes M_DFF0, M_DFF1
+ field CDNUM 0xF8
+ field CFNUM 0x07
+}
+
+/*
+ * S/G Receive Message 1
+ */
+register SGRXMSG1 {
+ address 0x099
+ access_mode RO
+ modes M_DFF0, M_DFF1
+ field CBNUM 0xFF
+}
+
+/*
+ * S/G Receive Message 2
+ */
+register SGRXMSG2 {
+ address 0x09A
+ access_mode RO
+ modes M_DFF0, M_DFF1
+ field MINDEX 0xFF
+}
+
+/*
+ * S/G Receive Message 3
+ */
+register SGRXMSG3 {
+ address 0x09B
+ access_mode RO
+ modes M_DFF0, M_DFF1
+ field MCLASS 0x0F
+}
+
+/*
+ * Slave Split Out Address 0
+ */
+register SLVSPLTOUTADR0 {
+ address 0x098
+ access_mode RO
+ modes M_SCSI
+ field LOWER_ADDR 0x7F
+}
+
+/*
+ * Slave Split Out Address 1
+ */
+register SLVSPLTOUTADR1 {
+ address 0x099
+ access_mode RO
+ modes M_SCSI
+ field REQ_DNUM 0xF8
+ field REQ_FNUM 0x07
+}
+
+/*
+ * Slave Split Out Address 2
+ */
+register SLVSPLTOUTADR2 {
+ address 0x09A
+ access_mode RO
+ modes M_SCSI
+ field REQ_BNUM 0xFF
+}
+
+/*
+ * Slave Split Out Address 3
+ */
+register SLVSPLTOUTADR3 {
+ address 0x09B
+ access_mode RO
+ modes M_SCSI
+ field RLXORD 020
+ field TAG_NUM 0x1F
+}
+
+/*
+ * SG Sequencer Byte Count
+ */
+register SGSEQBCNT {
+ address 0x09C
+ access_mode RO
+ modes M_DFF0, M_DFF1
+}
+
+/*
+ * Slave Split Out Attribute 0
+ */
+register SLVSPLTOUTATTR0 {
+ address 0x09C
+ access_mode RO
+ modes M_SCSI
+ field LOWER_BCNT 0xFF
+}
+
+/*
+ * Slave Split Out Attribute 1
+ */
+register SLVSPLTOUTATTR1 {
+ address 0x09D
+ access_mode RO
+ modes M_SCSI
+ field CMPLT_DNUM 0xF8
+ field CMPLT_FNUM 0x07
+}
+
+/*
+ * Slave Split Out Attribute 2
+ */
+register SLVSPLTOUTATTR2 {
+ address 0x09E
+ access_mode RO
+ size 2
+ modes M_SCSI
+ field CMPLT_BNUM 0xFF
+}
+/*
+ * S/G Split Status 0
+ */
+register SGSPLTSTAT0 {
+ address 0x09E
+ access_mode RW
+ modes M_DFF0, M_DFF1
+ count 2
+ field STAETERM 0x80
+ field SCBCERR 0x40
+ field SCADERR 0x20
+ field SCDATBUCKET 0x10
+ field CNTNOTCMPLT 0x08
+ field RXOVRUN 0x04
+ field RXSCEMSG 0x02
+ field RXSPLTRSP 0x01
+ dont_generate_debug_code
+}
+
+/*
+ * S/G Split Status 1
+ */
+register SGSPLTSTAT1 {
+ address 0x09F
+ access_mode RW
+ modes M_DFF0, M_DFF1
+ count 2
+ field RXDATABUCKET 0x01
+ dont_generate_debug_code
+}
+
+/*
+ * Special Function
+ */
+register SFUNCT {
+ address 0x09f
+ access_mode RW
+ modes M_CFG
+ field TEST_GROUP 0xF0
+ field TEST_NUM 0x0F
+ dont_generate_debug_code
+}
+
+/*
+ * Data FIFO 0 PCI Status
+ */
+register DF0PCISTAT {
+ address 0x0A0
+ access_mode RW
+ modes M_CFG
+ count 1
+ field DPE 0x80
+ field SSE 0x40
+ field RMA 0x20
+ field RTA 0x10
+ field SCAAPERR 0x08
+ field RDPERR 0x04
+ field TWATERR 0x02
+ field DPR 0x01
+ dont_generate_debug_code
+}
+
+/*
+ * Data FIFO 1 PCI Status
+ */
+register DF1PCISTAT {
+ address 0x0A1
+ access_mode RW
+ modes M_CFG
+ field DPE 0x80
+ field SSE 0x40
+ field RMA 0x20
+ field RTA 0x10
+ field SCAAPERR 0x08
+ field RDPERR 0x04
+ field TWATERR 0x02
+ field DPR 0x01
+}
+
+/*
+ * S/G PCI Status
+ */
+register SGPCISTAT {
+ address 0x0A2
+ access_mode RW
+ modes M_CFG
+ field DPE 0x80
+ field SSE 0x40
+ field RMA 0x20
+ field RTA 0x10
+ field SCAAPERR 0x08
+ field RDPERR 0x04
+ field DPR 0x01
+}
+
+/*
+ * CMC PCI Status
+ */
+register CMCPCISTAT {
+ address 0x0A3
+ access_mode RW
+ modes M_CFG
+ field DPE 0x80
+ field SSE 0x40
+ field RMA 0x20
+ field RTA 0x10
+ field SCAAPERR 0x08
+ field RDPERR 0x04
+ field TWATERR 0x02
+ field DPR 0x01
+}
+
+/*
+ * Overlay PCI Status
+ */
+register OVLYPCISTAT {
+ address 0x0A4
+ access_mode RW
+ modes M_CFG
+ field DPE 0x80
+ field SSE 0x40
+ field RMA 0x20
+ field RTA 0x10
+ field SCAAPERR 0x08
+ field RDPERR 0x04
+ field DPR 0x01
+}
+
+/*
+ * PCI Status for MSI Master DMA Transfer
+ */
+register MSIPCISTAT {
+ address 0x0A6
+ access_mode RW
+ modes M_CFG
+ field SSE 0x40
+ field RMA 0x20
+ field RTA 0x10
+ field CLRPENDMSI 0x08
+ field TWATERR 0x02
+ field DPR 0x01
+}
+
+/*
+ * PCI Status for Target
+ */
+register TARGPCISTAT {
+ address 0x0A7
+ access_mode RW
+ modes M_CFG
+ count 5
+ field DPE 0x80
+ field SSE 0x40
+ field STA 0x08
+ field TWATERR 0x02
+ dont_generate_debug_code
+}
+
+/*
+ * LQ Packet In
+ * The last LQ Packet received
+ */
+register LQIN {
+ address 0x020
+ access_mode RW
+ size 20
+ count 2
+ modes M_DFF0, M_DFF1, M_SCSI
+ dont_generate_debug_code
+}
+
+/*
+ * SCB Type Pointer
+ * SCB offset for Target Mode SCB type information
+ */
+register TYPEPTR {
+ address 0x020
+ access_mode RW
+ modes M_CFG
+}
+
+/*
+ * Queue Tag Pointer
+ * SCB offset to the Two Byte tag identifier used for target mode.
+ */
+register TAGPTR {
+ address 0x021
+ access_mode RW
+ modes M_CFG
+}
+
+/*
+ * Logical Unit Number Pointer
+ * SCB offset to the LSB (little endian) of the lun field.
+ */
+register LUNPTR {
+ address 0x022
+ access_mode RW
+ modes M_CFG
+ count 2
+ dont_generate_debug_code
+}
+
+/*
+ * Data Length Pointer
+ * SCB offset for the 4 byte data length field in target mode.
+ */
+register DATALENPTR {
+ address 0x023
+ access_mode RW
+ modes M_CFG
+}
+
+/*
+ * Status Length Pointer
+ * SCB offset to the two byte status field in target SCBs.
+ */
+register STATLENPTR {
+ address 0x024
+ access_mode RW
+ modes M_CFG
+}
+
+/*
+ * Command Length Pointer
+ * Scb offset for the CDB length field in initiator SCBs.
+ */
+register CMDLENPTR {
+ address 0x025
+ access_mode RW
+ modes M_CFG
+ count 1
+ dont_generate_debug_code
+}
+
+/*
+ * Task Attribute Pointer
+ * Scb offset for the byte field specifying the attribute byte
+ * to be used in command packets.
+ */
+register ATTRPTR {
+ address 0x026
+ access_mode RW
+ modes M_CFG
+ count 1
+ dont_generate_debug_code
+}
+
+/*
+ * Task Management Flags Pointer
+ * Scb offset for the byte field specifying the attribute flags
+ * byte to be used in command packets.
+ */
+register FLAGPTR {
+ address 0x027
+ access_mode RW
+ modes M_CFG
+ count 1
+ dont_generate_debug_code
+}
+
+/*
+ * Command Pointer
+ * Scb offset for the first byte in the CDB for initiator SCBs.
+ */
+register CMDPTR {
+ address 0x028
+ access_mode RW
+ modes M_CFG
+ count 1
+ dont_generate_debug_code
+}
+
+/*
+ * Queue Next Pointer
+ * Scb offset for the 2 byte "next scb link".
+ */
+register QNEXTPTR {
+ address 0x029
+ access_mode RW
+ modes M_CFG
+ count 1
+ dont_generate_debug_code
+}
+
+/*
+ * SCSI ID Pointer
+ * Scb offset to the value to place in the SCSIID register
+ * during target mode connections.
+ */
+register IDPTR {
+ address 0x02A
+ access_mode RW
+ modes M_CFG
+}
+
+/*
+ * Command Aborted Byte Pointer
+ * Offset to the SCB flags field that includes the
+ * "SCB aborted" status bit.
+ */
+register ABRTBYTEPTR {
+ address 0x02B
+ access_mode RW
+ modes M_CFG
+ count 1
+ dont_generate_debug_code
+}
+
+/*
+ * Command Aborted Bit Pointer
+ * Bit offset in the SCB flags field for "SCB aborted" status.
+ */
+register ABRTBITPTR {
+ address 0x02C
+ access_mode RW
+ modes M_CFG
+ count 1
+ dont_generate_debug_code
+}
+
+/*
+ * Rev B or greater.
+ */
+register MAXCMDBYTES {
+ address 0x02D
+ access_mode RW
+ modes M_CFG
+}
+
+/*
+ * Rev B or greater.
+ */
+register MAXCMD2RCV {
+ address 0x02E
+ access_mode RW
+ modes M_CFG
+}
+
+/*
+ * Rev B or greater.
+ */
+register SHORTTHRESH {
+ address 0x02F
+ access_mode RW
+ modes M_CFG
+}
+
+/*
+ * Logical Unit Number Length
+ * The length, in bytes, of the SCB lun field.
+ */
+register LUNLEN {
+ address 0x030
+ access_mode RW
+ modes M_CFG
+ count 2
+ mask ILUNLEN 0x0F
+ mask TLUNLEN 0xF0
+ dont_generate_debug_code
+}
+const LUNLEN_SINGLE_LEVEL_LUN 0xF
+
+/*
+ * CDB Limit
+ * The size, in bytes, of the embedded CDB field in initator SCBs.
+ */
+register CDBLIMIT {
+ address 0x031
+ access_mode RW
+ modes M_CFG
+ count 1
+ dont_generate_debug_code
+}
+
+/*
+ * Maximum Commands
+ * The maximum number of commands to issue during a
+ * single packetized connection.
+ */
+register MAXCMD {
+ address 0x032
+ access_mode RW
+ modes M_CFG
+ count 9
+ dont_generate_debug_code
+}
+
+/*
+ * Maximum Command Counter
+ * The number of commands already sent during this connection
+ */
+register MAXCMDCNT {
+ address 0x033
+ access_mode RW
+ modes M_CFG
+ dont_generate_debug_code
+}
+
+/*
+ * LQ Packet Reserved Bytes
+ * The bytes to be sent in the currently reserved fileds
+ * of all LQ packets.
+ */
+register LQRSVD01 {
+ address 0x034
+ access_mode RW
+ modes M_SCSI
+}
+register LQRSVD16 {
+ address 0x035
+ access_mode RW
+ modes M_SCSI
+}
+register LQRSVD17 {
+ address 0x036
+ access_mode RW
+ modes M_SCSI
+}
+
+/*
+ * Command Reserved 0
+ * The byte to be sent for the reserved byte 0 of
+ * outgoing command packets.
+ */
+register CMDRSVD0 {
+ address 0x037
+ access_mode RW
+ modes M_CFG
+}
+
+/*
+ * LQ Manager Control 0
+ */
+register LQCTL0 {
+ address 0x038
+ access_mode RW
+ modes M_CFG
+ field LQITARGCLT 0xC0
+ field LQIINITGCLT 0x30
+ field LQ0TARGCLT 0x0C
+ field LQ0INITGCLT 0x03
+}
+
+/*
+ * LQ Manager Control 1
+ */
+register LQCTL1 {
+ address 0x038
+ access_mode RW
+ modes M_DFF0, M_DFF1, M_SCSI
+ count 2
+ field PCI2PCI 0x04
+ field SINGLECMD 0x02
+ field ABORTPENDING 0x01
+ dont_generate_debug_code
+}
+
+/*
+ * LQ Manager Control 2
+ */
+register LQCTL2 {
+ address 0x039
+ access_mode RW
+ modes M_DFF0, M_DFF1, M_SCSI
+ count 5
+ field LQIRETRY 0x80
+ field LQICONTINUE 0x40
+ field LQITOIDLE 0x20
+ field LQIPAUSE 0x10
+ field LQORETRY 0x08
+ field LQOCONTINUE 0x04
+ field LQOTOIDLE 0x02
+ field LQOPAUSE 0x01
+ dont_generate_debug_code
+}
+
+/*
+ * SCSI RAM BIST0
+ */
+register SCSBIST0 {
+ address 0x039
+ access_mode RW
+ modes M_CFG
+ field GSBISTERR 0x40
+ field GSBISTDONE 0x20
+ field GSBISTRUN 0x10
+ field OSBISTERR 0x04
+ field OSBISTDONE 0x02
+ field OSBISTRUN 0x01
+}
+
+/*
+ * SCSI Sequence Control0
+ */
+register SCSISEQ0 {
+ address 0x03A
+ access_mode RW
+ modes M_DFF0, M_DFF1, M_SCSI
+ field TEMODEO 0x80
+ field ENSELO 0x40
+ field ENARBO 0x20
+ field FORCEBUSFREE 0x10
+ field SCSIRSTO 0x01
+}
+
+/*
+ * SCSI RAM BIST 1
+ */
+register SCSBIST1 {
+ address 0x03A
+ access_mode RW
+ modes M_CFG
+ field NTBISTERR 0x04
+ field NTBISTDONE 0x02
+ field NTBISTRUN 0x01
+}
+
+/*
+ * SCSI Sequence Control 1
+ */
+register SCSISEQ1 {
+ address 0x03B
+ access_mode RW
+ modes M_DFF0, M_DFF1, M_SCSI
+ count 8
+ field MANUALCTL 0x40
+ field ENSELI 0x20
+ field ENRSELI 0x10
+ field MANUALP 0x0C
+ field ENAUTOATNP 0x02
+ field ALTSTIM 0x01
+}
+
+/*
+ * SCSI Transfer Control 0
+ */
+register SXFRCTL0 {
+ address 0x03C
+ access_mode RW
+ modes M_SCSI
+ field DFON 0x80
+ field DFPEXP 0x40
+ field BIOSCANCELEN 0x10
+ field SPIOEN 0x08
+ dont_generate_debug_code
+}
+
+/*
+ * SCSI Transfer Control 1
+ */
+register SXFRCTL1 {
+ address 0x03D
+ access_mode RW
+ modes M_SCSI
+ field BITBUCKET 0x80
+ field ENSACHK 0x40
+ field ENSPCHK 0x20
+ field STIMESEL 0x18
+ field ENSTIMER 0x04
+ field ACTNEGEN 0x02
+ field STPWEN 0x01
+ dont_generate_debug_code
+}
+
+/*
+ * SCSI Transfer Control 2
+ */
+register SXFRCTL2 {
+ address 0x03E
+ access_mode RW
+ modes M_SCSI
+ field AUTORSTDIS 0x10
+ field CMDDMAEN 0x08
+ field ASU 0x07
+}
+
+/*
+ * SCSI Bus Initiator IDs
+ * Bitmask of observed initiators on the bus.
+ */
+register BUSINITID {
+ address 0x03C
+ access_mode RW
+ modes M_CFG
+ size 2
+}
+
+/*
+ * Data Length Counters
+ * Packet byte counter.
+ */
+register DLCOUNT {
+ address 0x03C
+ access_mode RW
+ modes M_DFF0, M_DFF1
+ size 3
+}
+
+/*
+ * Data FIFO Status
+ */
+register DFFSTAT {
+ address 0x03F
+ access_mode RW
+ modes M_SCSI
+ field FIFO1FREE 0x20
+ field FIFO0FREE 0x10
+ /*
+ * On the B, this enum only works
+ * in the read direction. For writes,
+ * you must use the B version of the
+ * CURRFIFO_0 definition which is defined
+ * as a constant outside of this register
+ * definition to avoid confusing the
+ * register pretty printing code.
+ */
+ enum CURRFIFO 0x03 {
+ CURRFIFO_0,
+ CURRFIFO_1,
+ CURRFIFO_NONE 0x3
+ }
+}
+
+const B_CURRFIFO_0 0x2
+
+/*
+ * SCSI Bus Target IDs
+ * Bitmask of observed targets on the bus.
+ */
+register BUSTARGID {
+ address 0x03E
+ access_mode RW
+ modes M_CFG
+ size 2
+}
+
+/*
+ * SCSI Control Signal Out
+ */
+register SCSISIGO {
+ address 0x040
+ access_mode RW
+ modes M_DFF0, M_DFF1, M_SCSI
+ field CDO 0x80
+ field IOO 0x40
+ field MSGO 0x20
+ field ATNO 0x10
+ field SELO 0x08
+ field BSYO 0x04
+ field REQO 0x02
+ field ACKO 0x01
+/*
+ * Possible phases to write into SCSISIG0
+ */
+ enum PHASE_MASK CDO|IOO|MSGO {
+ P_DATAOUT 0x0,
+ P_DATAIN IOO,
+ P_DATAOUT_DT P_DATAOUT|MSGO,
+ P_DATAIN_DT P_DATAIN|MSGO,
+ P_COMMAND CDO,
+ P_MESGOUT CDO|MSGO,
+ P_STATUS CDO|IOO,
+ P_MESGIN CDO|IOO|MSGO
+ }
+ dont_generate_debug_code
+}
+
+/*
+ * SCSI Control Signal In
+ */
+register SCSISIGI {
+ address 0x041
+ access_mode RO
+ modes M_DFF0, M_DFF1, M_SCSI
+ field CDI 0x80
+ field IOI 0x40
+ field MSGI 0x20
+ field ATNI 0x10
+ field SELI 0x08
+ field BSYI 0x04
+ field REQI 0x02
+ field ACKI 0x01
+/*
+ * Possible phases in SCSISIGI
+ */
+ enum PHASE_MASK CDO|IOO|MSGO {
+ P_DATAOUT 0x0,
+ P_DATAIN IOO,
+ P_DATAOUT_DT P_DATAOUT|MSGO,
+ P_DATAIN_DT P_DATAIN|MSGO,
+ P_COMMAND CDO,
+ P_MESGOUT CDO|MSGO,
+ P_STATUS CDO|IOO,
+ P_MESGIN CDO|IOO|MSGO
+ }
+}
+
+/*
+ * Multiple Target IDs
+ * Bitmask of ids to respond as a target.
+ */
+register MULTARGID {
+ address 0x040
+ access_mode RW
+ modes M_CFG
+ size 2
+ count 2
+ dont_generate_debug_code
+}
+
+/*
+ * SCSI Phase
+ */
+register SCSIPHASE {
+ address 0x042
+ access_mode RO
+ modes M_DFF0, M_DFF1, M_SCSI
+ field STATUS_PHASE 0x20
+ field COMMAND_PHASE 0x10
+ field MSG_IN_PHASE 0x08
+ field MSG_OUT_PHASE 0x04
+ field DATA_PHASE_MASK 0x03 {
+ DATA_OUT_PHASE 0x01,
+ DATA_IN_PHASE 0x02
+ }
+}
+
+/*
+ * SCSI Data 0 Image
+ */
+register SCSIDAT0_IMG {
+ address 0x043
+ access_mode RW
+ modes M_DFF0, M_DFF1, M_SCSI
+}
+
+/*
+ * SCSI Latched Data
+ */
+register SCSIDAT {
+ address 0x044
+ access_mode RW
+ modes M_DFF0, M_DFF1, M_SCSI
+ size 2
+ dont_generate_debug_code
+}
+
+/*
+ * SCSI Data Bus
+ */
+register SCSIBUS {
+ address 0x046
+ access_mode RW
+ modes M_DFF0, M_DFF1, M_SCSI
+ size 2
+}
+
+/*
+ * Target ID In
+ */
+register TARGIDIN {
+ address 0x048
+ access_mode RO
+ modes M_DFF0, M_DFF1, M_SCSI
+ count 2
+ field CLKOUT 0x80
+ field TARGID 0x0F
+ dont_generate_debug_code
+}
+
+/*
+ * Selection/Reselection ID
+ * Upper four bits are the device id. The ONEBIT is set when the re/selecting
+ * device did not set its own ID.
+ */
+register SELID {
+ address 0x049
+ access_mode RW
+ modes M_DFF0, M_DFF1, M_SCSI
+ field SELID_MASK 0xf0
+ field ONEBIT 0x08
+}
+
+/*
+ * SCSI Block Control
+ * Controls Bus type and channel selection. SELWIDE allows for the
+ * coexistence of 8bit and 16bit devices on a wide bus.
+ */
+register SBLKCTL {
+ address 0x04A
+ access_mode RW
+ modes M_DFF0, M_DFF1, M_SCSI
+ field DIAGLEDEN 0x80
+ field DIAGLEDON 0x40
+ field ENAB40 0x08 /* LVD transceiver active */
+ field ENAB20 0x04 /* SE/HVD transceiver active */
+ field SELWIDE 0x02
+ dont_generate_debug_code
+}
+
+/*
+ * Option Mode
+ */
+register OPTIONMODE {
+ address 0x04A
+ access_mode RW
+ modes M_CFG
+ count 4
+ field BIOSCANCTL 0x80
+ field AUTOACKEN 0x40
+ field BIASCANCTL 0x20
+ field BUSFREEREV 0x10
+ field ENDGFORMCHK 0x04
+ field AUTO_MSGOUT_DE 0x02
+ mask OPTIONMODE_DEFAULTS AUTO_MSGOUT_DE
+ dont_generate_debug_code
+}
+
+/*
+ * SCSI Status 0
+ */
+register SSTAT0 {
+ address 0x04B
+ access_mode RO
+ modes M_DFF0, M_DFF1, M_SCSI
+ field TARGET 0x80 /* Board acting as target */
+ field SELDO 0x40 /* Selection Done */
+ field SELDI 0x20 /* Board has been selected */
+ field SELINGO 0x10 /* Selection In Progress */
+ field IOERR 0x08 /* LVD Tranceiver mode changed */
+ field OVERRUN 0x04 /* SCSI Offset overrun detected */
+ field SPIORDY 0x02 /* SCSI PIO Ready */
+ field ARBDO 0x01 /* Arbitration Done Out */
+}
+
+/*
+ * Clear SCSI Interrupt 0
+ * Writing a 1 to a bit clears the associated SCSI Interrupt in SSTAT0.
+ */
+register CLRSINT0 {
+ address 0x04B
+ access_mode WO
+ modes M_DFF0, M_DFF1, M_SCSI
+ field CLRSELDO 0x40
+ field CLRSELDI 0x20
+ field CLRSELINGO 0x10
+ field CLRIOERR 0x08
+ field CLROVERRUN 0x04
+ field CLRSPIORDY 0x02
+ field CLRARBDO 0x01
+ dont_generate_debug_code
+}
+
+/*
+ * SCSI Interrupt Mode 0
+ * Setting any bit will enable the corresponding function
+ * in SIMODE0 to interrupt via the IRQ pin.
+ */
+register SIMODE0 {
+ address 0x04B
+ access_mode RW
+ modes M_CFG
+ count 8
+ field ENSELDO 0x40
+ field ENSELDI 0x20
+ field ENSELINGO 0x10
+ field ENIOERR 0x08
+ field ENOVERRUN 0x04
+ field ENSPIORDY 0x02
+ field ENARBDO 0x01
+}
+
+/*
+ * SCSI Status 1
+ */
+register SSTAT1 {
+ address 0x04C
+ access_mode RO
+ modes M_DFF0, M_DFF1, M_SCSI
+ field SELTO 0x80
+ field ATNTARG 0x40
+ field SCSIRSTI 0x20
+ field PHASEMIS 0x10
+ field BUSFREE 0x08
+ field SCSIPERR 0x04
+ field STRB2FAST 0x02
+ field REQINIT 0x01
+}
+
+/*
+ * Clear SCSI Interrupt 1
+ * Writing a 1 to a bit clears the associated SCSI Interrupt in SSTAT1.
+ */
+register CLRSINT1 {
+ address 0x04C
+ access_mode WO
+ modes M_DFF0, M_DFF1, M_SCSI
+ field CLRSELTIMEO 0x80
+ field CLRATNO 0x40
+ field CLRSCSIRSTI 0x20
+ field CLRBUSFREE 0x08
+ field CLRSCSIPERR 0x04
+ field CLRSTRB2FAST 0x02
+ field CLRREQINIT 0x01
+ dont_generate_debug_code
+}
+
+/*
+ * SCSI Status 2
+ */
+register SSTAT2 {
+ address 0x04d
+ access_mode RO
+ modes M_DFF0, M_DFF1, M_SCSI
+ field BUSFREETIME 0xc0 {
+ BUSFREE_LQO 0x40,
+ BUSFREE_DFF0 0x80,
+ BUSFREE_DFF1 0xC0
+ }
+ field NONPACKREQ 0x20
+ field EXP_ACTIVE 0x10 /* SCSI Expander Active */
+ field BSYX 0x08 /* Busy Expander */
+ field WIDE_RES 0x04 /* Modes 0 and 1 only */
+ field SDONE 0x02 /* Modes 0 and 1 only */
+ field DMADONE 0x01 /* Modes 0 and 1 only */
+}
+
+/*
+ * Clear SCSI Interrupt 2
+ */
+register CLRSINT2 {
+ address 0x04D
+ access_mode WO
+ modes M_DFF0, M_DFF1, M_SCSI
+ field CLRNONPACKREQ 0x20
+ field CLRWIDE_RES 0x04 /* Modes 0 and 1 only */
+ field CLRSDONE 0x02 /* Modes 0 and 1 only */
+ field CLRDMADONE 0x01 /* Modes 0 and 1 only */
+ dont_generate_debug_code
+}
+
+/*
+ * SCSI Interrupt Mode 2
+ */
+register SIMODE2 {
+ address 0x04D
+ access_mode RW
+ modes M_CFG
+ field ENWIDE_RES 0x04
+ field ENSDONE 0x02
+ field ENDMADONE 0x01
+}
+
+/*
+ * Physical Error Diagnosis
+ */
+register PERRDIAG {
+ address 0x04E
+ access_mode RO
+ modes M_DFF0, M_DFF1, M_SCSI
+ count 3
+ field HIZERO 0x80
+ field HIPERR 0x40
+ field PREVPHASE 0x20
+ field PARITYERR 0x10
+ field AIPERR 0x08
+ field CRCERR 0x04
+ field DGFORMERR 0x02
+ field DTERR 0x01
+}
+
+/*
+ * LQI Manager Current State
+ */
+register LQISTATE {
+ address 0x04E
+ access_mode RO
+ modes M_CFG
+ count 6
+ dont_generate_debug_code
+}
+
+/*
+ * SCSI Offset Count
+ */
+register SOFFCNT {
+ address 0x04F
+ access_mode RO
+ modes M_DFF0, M_DFF1, M_SCSI
+ count 1
+}
+
+/*
+ * LQO Manager Current State
+ */
+register LQOSTATE {
+ address 0x04F
+ access_mode RO
+ modes M_CFG
+ count 2
+ dont_generate_debug_code
+}
+
+/*
+ * LQI Manager Status
+ */
+register LQISTAT0 {
+ address 0x050
+ access_mode RO
+ modes M_DFF0, M_DFF1, M_SCSI
+ count 2
+ field LQIATNQAS 0x20
+ field LQICRCT1 0x10
+ field LQICRCT2 0x08
+ field LQIBADLQT 0x04
+ field LQIATNLQ 0x02
+ field LQIATNCMD 0x01
+}
+
+/*
+ * Clear LQI Interrupts 0
+ */
+register CLRLQIINT0 {
+ address 0x050
+ access_mode WO
+ modes M_DFF0, M_DFF1, M_SCSI
+ count 1
+ field CLRLQIATNQAS 0x20
+ field CLRLQICRCT1 0x10
+ field CLRLQICRCT2 0x08
+ field CLRLQIBADLQT 0x04
+ field CLRLQIATNLQ 0x02
+ field CLRLQIATNCMD 0x01
+ dont_generate_debug_code
+}
+
+/*
+ * LQI Manager Interrupt Mode 0
+ */
+register LQIMODE0 {
+ address 0x050
+ access_mode RW
+ modes M_CFG
+ count 3
+ field ENLQIATNQASK 0x20
+ field ENLQICRCT1 0x10
+ field ENLQICRCT2 0x08
+ field ENLQIBADLQT 0x04
+ field ENLQIATNLQ 0x02
+ field ENLQIATNCMD 0x01
+ dont_generate_debug_code
+}
+
+/*
+ * LQI Manager Status 1
+ */
+register LQISTAT1 {
+ address 0x051
+ access_mode RO
+ modes M_DFF0, M_DFF1, M_SCSI
+ count 3
+ field LQIPHASE_LQ 0x80
+ field LQIPHASE_NLQ 0x40
+ field LQIABORT 0x20
+ field LQICRCI_LQ 0x10
+ field LQICRCI_NLQ 0x08
+ field LQIBADLQI 0x04
+ field LQIOVERI_LQ 0x02
+ field LQIOVERI_NLQ 0x01
+}
+
+/*
+ * Clear LQI Manager Interrupts1
+ */
+register CLRLQIINT1 {
+ address 0x051
+ access_mode WO
+ modes M_DFF0, M_DFF1, M_SCSI
+ count 4
+ field CLRLQIPHASE_LQ 0x80
+ field CLRLQIPHASE_NLQ 0x40
+ field CLRLIQABORT 0x20
+ field CLRLQICRCI_LQ 0x10
+ field CLRLQICRCI_NLQ 0x08
+ field CLRLQIBADLQI 0x04
+ field CLRLQIOVERI_LQ 0x02
+ field CLRLQIOVERI_NLQ 0x01
+ dont_generate_debug_code
+}
+
+/*
+ * LQI Manager Interrupt Mode 1
+ */
+register LQIMODE1 {
+ address 0x051
+ access_mode RW
+ modes M_CFG
+ count 4
+ field ENLQIPHASE_LQ 0x80 /* LQIPHASE1 */
+ field ENLQIPHASE_NLQ 0x40 /* LQIPHASE2 */
+ field ENLIQABORT 0x20
+ field ENLQICRCI_LQ 0x10 /* LQICRCI1 */
+ field ENLQICRCI_NLQ 0x08 /* LQICRCI2 */
+ field ENLQIBADLQI 0x04
+ field ENLQIOVERI_LQ 0x02 /* LQIOVERI1 */
+ field ENLQIOVERI_NLQ 0x01 /* LQIOVERI2 */
+ dont_generate_debug_code
+}
+
+/*
+ * LQI Manager Status 2
+ */
+register LQISTAT2 {
+ address 0x052
+ access_mode RO
+ modes M_DFF0, M_DFF1, M_SCSI
+ field PACKETIZED 0x80
+ field LQIPHASE_OUTPKT 0x40
+ field LQIWORKONLQ 0x20
+ field LQIWAITFIFO 0x10
+ field LQISTOPPKT 0x08
+ field LQISTOPLQ 0x04
+ field LQISTOPCMD 0x02
+ field LQIGSAVAIL 0x01
+}
+
+/*
+ * SCSI Status 3
+ */
+register SSTAT3 {
+ address 0x053
+ access_mode RO
+ modes M_DFF0, M_DFF1, M_SCSI
+ count 3
+ field NTRAMPERR 0x02
+ field OSRAMPERR 0x01
+}
+
+/*
+ * Clear SCSI Status 3
+ */
+register CLRSINT3 {
+ address 0x053
+ access_mode WO
+ modes M_DFF0, M_DFF1, M_SCSI
+ count 3
+ field CLRNTRAMPERR 0x02
+ field CLROSRAMPERR 0x01
+ dont_generate_debug_code
+}
+
+/*
+ * SCSI Interrupt Mode 3
+ */
+register SIMODE3 {
+ address 0x053
+ access_mode RW
+ modes M_CFG
+ count 4
+ field ENNTRAMPERR 0x02
+ field ENOSRAMPERR 0x01
+ dont_generate_debug_code
+}
+
+/*
+ * LQO Manager Status 0
+ */
+register LQOSTAT0 {
+ address 0x054
+ access_mode RO
+ modes M_DFF0, M_DFF1, M_SCSI
+ count 2
+ field LQOTARGSCBPERR 0x10
+ field LQOSTOPT2 0x08
+ field LQOATNLQ 0x04
+ field LQOATNPKT 0x02
+ field LQOTCRC 0x01
+}
+
+/*
+ * Clear LQO Manager interrupt 0
+ */
+register CLRLQOINT0 {
+ address 0x054
+ access_mode WO
+ modes M_DFF0, M_DFF1, M_SCSI
+ count 3
+ field CLRLQOTARGSCBPERR 0x10
+ field CLRLQOSTOPT2 0x08
+ field CLRLQOATNLQ 0x04
+ field CLRLQOATNPKT 0x02
+ field CLRLQOTCRC 0x01
+ dont_generate_debug_code
+}
+
+/*
+ * LQO Manager Interrupt Mode 0
+ */
+register LQOMODE0 {
+ address 0x054
+ access_mode RW
+ modes M_CFG
+ count 4
+ field ENLQOTARGSCBPERR 0x10
+ field ENLQOSTOPT2 0x08
+ field ENLQOATNLQ 0x04
+ field ENLQOATNPKT 0x02
+ field ENLQOTCRC 0x01
+ dont_generate_debug_code
+}
+
+/*
+ * LQO Manager Status 1
+ */
+register LQOSTAT1 {
+ address 0x055
+ access_mode RO
+ modes M_DFF0, M_DFF1, M_SCSI
+ field LQOINITSCBPERR 0x10
+ field LQOSTOPI2 0x08
+ field LQOBADQAS 0x04
+ field LQOBUSFREE 0x02
+ field LQOPHACHGINPKT 0x01
+}
+
+/*
+ * Clear LOQ Interrupt 1
+ */
+register CLRLQOINT1 {
+ address 0x055
+ access_mode WO
+ modes M_DFF0, M_DFF1, M_SCSI
+ count 7
+ field CLRLQOINITSCBPERR 0x10
+ field CLRLQOSTOPI2 0x08
+ field CLRLQOBADQAS 0x04
+ field CLRLQOBUSFREE 0x02
+ field CLRLQOPHACHGINPKT 0x01
+ dont_generate_debug_code
+}
+
+/*
+ * LQO Manager Interrupt Mode 1
+ */
+register LQOMODE1 {
+ address 0x055
+ access_mode RW
+ modes M_CFG
+ count 4
+ field ENLQOINITSCBPERR 0x10
+ field ENLQOSTOPI2 0x08
+ field ENLQOBADQAS 0x04
+ field ENLQOBUSFREE 0x02
+ field ENLQOPHACHGINPKT 0x01
+ dont_generate_debug_code
+}
+
+/*
+ * LQO Manager Status 2
+ */
+register LQOSTAT2 {
+ address 0x056
+ access_mode RO
+ modes M_DFF0, M_DFF1, M_SCSI
+ field LQOPKT 0xE0
+ field LQOWAITFIFO 0x10
+ field LQOPHACHGOUTPKT 0x02 /* outside of packet boundaries. */
+ field LQOSTOP0 0x01 /* Stopped after sending all packets */
+}
+
+/*
+ * Output Synchronizer Space Count
+ */
+register OS_SPACE_CNT {
+ address 0x056
+ access_mode RO
+ modes M_CFG
+ count 2
+ dont_generate_debug_code
+}
+
+/*
+ * SCSI Interrupt Mode 1
+ * Setting any bit will enable the corresponding function
+ * in SIMODE1 to interrupt via the IRQ pin.
+ */
+register SIMODE1 {
+ address 0x057
+ access_mode RW
+ modes M_DFF0, M_DFF1, M_SCSI
+ field ENSELTIMO 0x80
+ field ENATNTARG 0x40
+ field ENSCSIRST 0x20
+ field ENPHASEMIS 0x10
+ field ENBUSFREE 0x08
+ field ENSCSIPERR 0x04
+ field ENSTRB2FAST 0x02
+ field ENREQINIT 0x01
+}
+
+/*
+ * Good Status FIFO
+ */
+register GSFIFO {
+ address 0x058
+ access_mode RO
+ size 2
+ modes M_DFF0, M_DFF1, M_SCSI
+ dont_generate_debug_code
+}
+
+/*
+ * Data FIFO SCSI Transfer Control
+ */
+register DFFSXFRCTL {
+ address 0x05A
+ access_mode RW
+ modes M_DFF0, M_DFF1
+ field DFFBITBUCKET 0x08
+ field CLRSHCNT 0x04
+ field CLRCHN 0x02
+ field RSTCHN 0x01
+}
+
+/*
+ * Next SCSI Control Block
+ */
+register NEXTSCB {
+ address 0x05A
+ access_mode RW
+ size 2
+ modes M_SCSI
+ dont_generate_debug_code
+}
+
+/*
+ * LQO SCSI Control
+ * (Rev B only.)
+ */
+register LQOSCSCTL {
+ address 0x05A
+ access_mode RW
+ size 1
+ modes M_CFG
+ count 1
+ field LQOH2A_VERSION 0x80
+ field LQOBUSETDLY 0x40
+ field LQONOHOLDLACK 0x02
+ field LQONOCHKOVER 0x01
+ dont_generate_debug_code
+}
+
+/*
+ * SEQ Interrupts
+ */
+register SEQINTSRC {
+ address 0x05B
+ access_mode RO
+ modes M_DFF0, M_DFF1
+ field CTXTDONE 0x40
+ field SAVEPTRS 0x20
+ field CFG4DATA 0x10
+ field CFG4ISTAT 0x08
+ field CFG4TSTAT 0x04
+ field CFG4ICMD 0x02
+ field CFG4TCMD 0x01
+}
+
+/*
+ * Clear Arp Interrupts
+ */
+register CLRSEQINTSRC {
+ address 0x05B
+ access_mode WO
+ modes M_DFF0, M_DFF1
+ field CLRCTXTDONE 0x40
+ field CLRSAVEPTRS 0x20
+ field CLRCFG4DATA 0x10
+ field CLRCFG4ISTAT 0x08
+ field CLRCFG4TSTAT 0x04
+ field CLRCFG4ICMD 0x02
+ field CLRCFG4TCMD 0x01
+ dont_generate_debug_code
+}
+
+/*
+ * SEQ Interrupt Enabled (Shared)
+ */
+register SEQIMODE {
+ address 0x05C
+ access_mode RW
+ modes M_DFF0, M_DFF1
+ field ENCTXTDONE 0x40
+ field ENSAVEPTRS 0x20
+ field ENCFG4DATA 0x10
+ field ENCFG4ISTAT 0x08
+ field ENCFG4TSTAT 0x04
+ field ENCFG4ICMD 0x02
+ field ENCFG4TCMD 0x01
+}
+
+/*
+ * Current SCSI Control Block
+ */
+register CURRSCB {
+ address 0x05C
+ access_mode RW
+ size 2
+ modes M_SCSI
+ dont_generate_debug_code
+}
+
+/*
+ * Data FIFO Status
+ */
+register MDFFSTAT {
+ address 0x05D
+ access_mode RO
+ modes M_DFF0, M_DFF1
+ field SHCNTNEGATIVE 0x40 /* Rev B or higher */
+ field SHCNTMINUS1 0x20 /* Rev B or higher */
+ field LASTSDONE 0x10
+ field SHVALID 0x08
+ field DLZERO 0x04 /* FIFO data ends on packet boundary. */
+ field DATAINFIFO 0x02
+ field FIFOFREE 0x01
+}
+
+/*
+ * CRC Control
+ */
+register CRCCONTROL {
+ address 0x05d
+ access_mode RW
+ modes M_CFG
+ field CRCVALCHKEN 0x40
+}
+
+/*
+ * SCSI Test Control
+ */
+register SCSITEST {
+ address 0x05E
+ access_mode RW
+ modes M_CFG
+ field CNTRTEST 0x08
+ field SEL_TXPLL_DEBUG 0x04
+}
+
+/*
+ * Data FIFO Queue Tag
+ */
+register DFFTAG {
+ address 0x05E
+ access_mode RW
+ size 2
+ modes M_DFF0, M_DFF1
+}
+
+/*
+ * Last SCSI Control Block
+ */
+register LASTSCB {
+ address 0x05E
+ access_mode RW
+ size 2
+ modes M_SCSI
+ dont_generate_debug_code
+}
+
+/*
+ * SCSI I/O Cell Power-down Control
+ */
+register IOPDNCTL {
+ address 0x05F
+ access_mode RW
+ modes M_CFG
+ field DISABLE_OE 0x80
+ field PDN_IDIST 0x04
+ field PDN_DIFFSENSE 0x01
+}
+
+/*
+ * Shadow Host Address.
+ */
+register SHADDR {
+ address 0x060
+ access_mode RO
+ size 8
+ modes M_DFF0, M_DFF1
+ dont_generate_debug_code
+}
+
+/*
+ * Data Group CRC Interval.
+ */
+register DGRPCRCI {
+ address 0x060
+ access_mode RW
+ size 2
+ modes M_CFG
+}
+
+/*
+ * Data Transfer Negotiation Address
+ */
+register NEGOADDR {
+ address 0x060
+ access_mode RW
+ modes M_SCSI
+ dont_generate_debug_code
+}
+
+/*
+ * Data Transfer Negotiation Data - Period Byte
+ */
+register NEGPERIOD {
+ address 0x061
+ access_mode RW
+ modes M_SCSI
+ count 1
+ dont_generate_debug_code
+}
+
+/*
+ * Packetized CRC Interval
+ */
+register PACKCRCI {
+ address 0x062
+ access_mode RW
+ size 2
+ modes M_CFG
+}
+
+/*
+ * Data Transfer Negotiation Data - Offset Byte
+ */
+register NEGOFFSET {
+ address 0x062
+ access_mode RW
+ modes M_SCSI
+ count 1
+ dont_generate_debug_code
+}
+
+/*
+ * Data Transfer Negotiation Data - PPR Options
+ */
+register NEGPPROPTS {
+ address 0x063
+ access_mode RW
+ modes M_SCSI
+ count 1
+ field PPROPT_PACE 0x08
+ field PPROPT_QAS 0x04
+ field PPROPT_DT 0x02
+ field PPROPT_IUT 0x01
+ dont_generate_debug_code
+}
+
+/*
+ * Data Transfer Negotiation Data - Connection Options
+ */
+register NEGCONOPTS {
+ address 0x064
+ access_mode RW
+ modes M_SCSI
+ field ENSNAPSHOT 0x40
+ field RTI_WRTDIS 0x20
+ field RTI_OVRDTRN 0x10
+ field ENSLOWCRC 0x08
+ field ENAUTOATNI 0x04
+ field ENAUTOATNO 0x02
+ field WIDEXFER 0x01
+ dont_generate_debug_code
+}
+
+/*
+ * Negotiation Table Annex Column Index.
+ */
+register ANNEXCOL {
+ address 0x065
+ access_mode RW
+ modes M_SCSI
+ count 7
+ dont_generate_debug_code
+}
+
+/*
+ * SCSI Check
+ * (Rev. B only)
+ */
+register SCSCHKN {
+ address 0x066
+ access_mode RW
+ modes M_CFG
+ count 1
+ field BIDICHKDIS 0x80
+ field STSELSKIDDIS 0x40
+ field CURRFIFODEF 0x20
+ field WIDERESEN 0x10
+ field SDONEMSKDIS 0x08
+ field DFFACTCLR 0x04
+ field SHVALIDSTDIS 0x02
+ field LSTSGCLRDIS 0x01
+ dont_generate_debug_code
+}
+
+const AHD_ANNEXCOL_PER_DEV0 4
+const AHD_NUM_PER_DEV_ANNEXCOLS 4
+const AHD_ANNEXCOL_PRECOMP_SLEW 4
+const AHD_PRECOMP_MASK 0x07
+const AHD_PRECOMP_SHIFT 0
+const AHD_PRECOMP_CUTBACK_17 0x04
+const AHD_PRECOMP_CUTBACK_29 0x06
+const AHD_PRECOMP_CUTBACK_37 0x07
+const AHD_SLEWRATE_MASK 0x78
+const AHD_SLEWRATE_SHIFT 3
+/*
+ * Rev A has only a single bit (high bit of field) of slew adjustment.
+ * Rev B has 4 bits. The current default happens to be the same for both.
+ */
+const AHD_SLEWRATE_DEF_REVA 0x08
+const AHD_SLEWRATE_DEF_REVB 0x08
+
+/* Rev A does not have any amplitude setting. */
+const AHD_ANNEXCOL_AMPLITUDE 6
+const AHD_AMPLITUDE_MASK 0x7
+const AHD_AMPLITUDE_SHIFT 0
+const AHD_AMPLITUDE_DEF 0x7
+
+/*
+ * Negotiation Table Annex Data Port.
+ */
+register ANNEXDAT {
+ address 0x066
+ access_mode RW
+ modes M_SCSI
+ count 3
+ dont_generate_debug_code
+}
+
+/*
+ * Initiator's Own Id.
+ * The SCSI ID to use for Selection Out and seen during a reselection..
+ */
+register IOWNID {
+ address 0x067
+ access_mode RW
+ modes M_SCSI
+ dont_generate_debug_code
+}
+
+/*
+ * 960MHz Phase-Locked Loop Control 0
+ */
+register PLL960CTL0 {
+ address 0x068
+ access_mode RW
+ modes M_CFG
+ field PLL_VCOSEL 0x80
+ field PLL_PWDN 0x40
+ field PLL_NS 0x30
+ field PLL_ENLUD 0x08
+ field PLL_ENLPF 0x04
+ field PLL_DLPF 0x02
+ field PLL_ENFBM 0x01
+}
+
+/*
+ * Target Own Id
+ */
+register TOWNID {
+ address 0x069
+ access_mode RW
+ modes M_SCSI
+ count 2
+ dont_generate_debug_code
+}
+
+/*
+ * 960MHz Phase-Locked Loop Control 1
+ */
+register PLL960CTL1 {
+ address 0x069
+ access_mode RW
+ modes M_CFG
+ field PLL_CNTEN 0x80
+ field PLL_CNTCLR 0x40
+ field PLL_RST 0x01
+}
+
+/*
+ * Expander Signature
+ */
+register XSIG {
+ address 0x06A
+ access_mode RW
+ modes M_SCSI
+}
+
+/*
+ * Shadow Byte Count
+ */
+register SHCNT {
+ address 0x068
+ access_mode RW
+ size 3
+ modes M_DFF0, M_DFF1
+ dont_generate_debug_code
+}
+
+/*
+ * Selection Out ID
+ */
+register SELOID {
+ address 0x06B
+ access_mode RW
+ modes M_SCSI
+}
+
+/*
+ * 960-MHz Phase-Locked Loop Test Count
+ */
+register PLL960CNT0 {
+ address 0x06A
+ access_mode RO
+ size 2
+ modes M_CFG
+}
+
+/*
+ * 400-MHz Phase-Locked Loop Control 0
+ */
+register PLL400CTL0 {
+ address 0x06C
+ access_mode RW
+ modes M_CFG
+ field PLL_VCOSEL 0x80
+ field PLL_PWDN 0x40
+ field PLL_NS 0x30
+ field PLL_ENLUD 0x08
+ field PLL_ENLPF 0x04
+ field PLL_DLPF 0x02
+ field PLL_ENFBM 0x01
+}
+
+/*
+ * Arbitration Fairness
+ */
+register FAIRNESS {
+ address 0x06C
+ access_mode RW
+ size 2
+ modes M_SCSI
+}
+
+/*
+ * 400-MHz Phase-Locked Loop Control 1
+ */
+register PLL400CTL1 {
+ address 0x06D
+ access_mode RW
+ modes M_CFG
+ field PLL_CNTEN 0x80
+ field PLL_CNTCLR 0x40
+ field PLL_RST 0x01
+}
+
+/*
+ * Arbitration Unfairness
+ */
+register UNFAIRNESS {
+ address 0x06E
+ access_mode RW
+ size 2
+ modes M_SCSI
+}
+
+/*
+ * 400-MHz Phase-Locked Loop Test Count
+ */
+register PLL400CNT0 {
+ address 0x06E
+ access_mode RO
+ size 2
+ modes M_CFG
+}
+
+/*
+ * SCB Page Pointer
+ */
+register SCBPTR {
+ address 0x0A8
+ access_mode RW
+ size 2
+ modes M_DFF0, M_DFF1, M_CCHAN, M_SCSI
+ dont_generate_debug_code
+}
+
+/*
+ * CMC SCB Array Count
+ * Number of bytes to transfer between CMC SCB memory and SCBRAM.
+ * Transfers must be 8byte aligned and sized.
+ */
+register CCSCBACNT {
+ address 0x0AB
+ access_mode RW
+ modes M_CCHAN
+}
+
+/*
+ * SCB Autopointer
+ * SCB-Next Address Snooping logic. When an SCB is transferred to
+ * the card, the next SCB address to be used by the CMC array can
+ * be autoloaded from that transfer.
+ */
+register SCBAUTOPTR {
+ address 0x0AB
+ access_mode RW
+ modes M_CFG
+ count 1
+ field AUSCBPTR_EN 0x80
+ field SCBPTR_ADDR 0x38
+ field SCBPTR_OFF 0x07
+ dont_generate_debug_code
+}
+
+/*
+ * CMC SG Ram Address Pointer
+ */
+register CCSGADDR {
+ address 0x0AC
+ access_mode RW
+ modes M_DFF0, M_DFF1
+ dont_generate_debug_code
+}
+
+/*
+ * CMC SCB RAM Address Pointer
+ */
+register CCSCBADDR {
+ address 0x0AC
+ access_mode RW
+ modes M_CCHAN
+ dont_generate_debug_code
+}
+
+/*
+ * CMC SCB Ram Back-up Address Pointer
+ * Indicates the true stop location of transfers halted prior
+ * to SCBHCNT going to 0.
+ */
+register CCSCBADR_BK {
+ address 0x0AC
+ access_mode RO
+ modes M_CFG
+}
+
+/*
+ * CMC SG Control
+ */
+register CCSGCTL {
+ address 0x0AD
+ access_mode RW
+ modes M_DFF0, M_DFF1
+ field CCSGDONE 0x80
+ field SG_CACHE_AVAIL 0x10
+ field CCSGENACK 0x08
+ mask CCSGEN 0x0C
+ field SG_FETCH_REQ 0x02
+ field CCSGRESET 0x01
+}
+
+/*
+ * CMD SCB Control
+ */
+register CCSCBCTL {
+ address 0x0AD
+ access_mode RW
+ modes M_CCHAN
+ field CCSCBDONE 0x80
+ field ARRDONE 0x40
+ field CCARREN 0x10
+ field CCSCBEN 0x08
+ field CCSCBDIR 0x04
+ field CCSCBRESET 0x01
+}
+
+/*
+ * CMC Ram BIST
+ */
+register CMC_RAMBIST {
+ address 0x0AD
+ access_mode RW
+ modes M_CFG
+ field SG_ELEMENT_SIZE 0x80
+ field SCBRAMBIST_FAIL 0x40
+ field SG_BIST_FAIL 0x20
+ field SG_BIST_EN 0x10
+ field CMC_BUFFER_BIST_FAIL 0x02
+ field CMC_BUFFER_BIST_EN 0x01
+}
+
+/*
+ * CMC SG RAM Data Port
+ */
+register CCSGRAM {
+ address 0x0B0
+ access_mode RW
+ modes M_DFF0, M_DFF1
+ dont_generate_debug_code
+}
+
+/*
+ * CMC SCB RAM Data Port
+ */
+register CCSCBRAM {
+ address 0x0B0
+ access_mode RW
+ modes M_CCHAN
+ dont_generate_debug_code
+}
+
+/*
+ * Flex DMA Address.
+ */
+register FLEXADR {
+ address 0x0B0
+ access_mode RW
+ size 3
+ modes M_SCSI
+}
+
+/*
+ * Flex DMA Byte Count
+ */
+register FLEXCNT {
+ address 0x0B3
+ access_mode RW
+ size 2
+ modes M_SCSI
+}
+
+/*
+ * Flex DMA Status
+ */
+register FLEXDMASTAT {
+ address 0x0B5
+ access_mode RW
+ modes M_SCSI
+ field FLEXDMAERR 0x02
+ field FLEXDMADONE 0x01
+}
+
+/*
+ * Flex DMA Data Port
+ */
+register FLEXDATA {
+ address 0x0B6
+ access_mode RW
+ modes M_SCSI
+}
+
+/*
+ * Board Data
+ */
+register BRDDAT {
+ address 0x0B8
+ access_mode RW
+ modes M_SCSI
+ count 2
+ dont_generate_debug_code
+}
+
+/*
+ * Board Control
+ */
+register BRDCTL {
+ address 0x0B9
+ access_mode RW
+ modes M_SCSI
+ count 7
+ field FLXARBACK 0x80
+ field FLXARBREQ 0x40
+ field BRDADDR 0x38
+ field BRDEN 0x04
+ field BRDRW 0x02
+ field BRDSTB 0x01
+ dont_generate_debug_code
+}
+
+/*
+ * Serial EEPROM Address
+ */
+register SEEADR {
+ address 0x0BA
+ access_mode RW
+ modes M_SCSI
+ count 4
+ dont_generate_debug_code
+}
+
+/*
+ * Serial EEPROM Data
+ */
+register SEEDAT {
+ address 0x0BC
+ access_mode RW
+ size 2
+ modes M_SCSI
+ count 4
+ dont_generate_debug_code
+}
+
+/*
+ * Serial EEPROM Status
+ */
+register SEESTAT {
+ address 0x0BE
+ access_mode RO
+ modes M_SCSI
+ count 1
+ field INIT_DONE 0x80
+ field SEEOPCODE 0x70
+ field LDALTID_L 0x08
+ field SEEARBACK 0x04
+ field SEEBUSY 0x02
+ field SEESTART 0x01
+ dont_generate_debug_code
+}
+
+/*
+ * Serial EEPROM Control
+ */
+register SEECTL {
+ address 0x0BE
+ access_mode RW
+ modes M_SCSI
+ count 4
+ field SEEOPCODE 0x70 {
+ SEEOP_ERASE 0x70,
+ SEEOP_READ 0x60,
+ SEEOP_WRITE 0x50,
+ /*
+ * The following four commands use special
+ * addresses for differentiation.
+ */
+ SEEOP_ERAL 0x40
+ }
+ mask SEEOP_EWEN 0x40
+ mask SEEOP_WALL 0x40
+ mask SEEOP_EWDS 0x40
+ field SEERST 0x02
+ field SEESTART 0x01
+ dont_generate_debug_code
+}
+
+const SEEOP_ERAL_ADDR 0x80
+const SEEOP_EWEN_ADDR 0xC0
+const SEEOP_WRAL_ADDR 0x40
+const SEEOP_EWDS_ADDR 0x00
+
+/*
+ * SCB Counter
+ */
+register SCBCNT {
+ address 0x0BF
+ access_mode RW
+ modes M_SCSI
+ dont_generate_debug_code
+}
+
+/*
+ * Data FIFO Write Address
+ * Pointer to the next QWD location to be written to the data FIFO.
+ */
+register DFWADDR {
+ address 0x0C0
+ access_mode RW
+ size 2
+ modes M_DFF0, M_DFF1
+ dont_generate_debug_code
+}
+
+/*
+ * DSP Filter Control
+ */
+register DSPFLTRCTL {
+ address 0x0C0
+ access_mode RW
+ modes M_CFG
+ field FLTRDISABLE 0x20
+ field EDGESENSE 0x10
+ field DSPFCNTSEL 0x0F
+}
+
+/*
+ * DSP Data Channel Control
+ */
+register DSPDATACTL {
+ address 0x0C1
+ access_mode RW
+ modes M_CFG
+ count 3
+ field BYPASSENAB 0x80
+ field DESQDIS 0x10
+ field RCVROFFSTDIS 0x04
+ field XMITOFFSTDIS 0x02
+ dont_generate_debug_code
+}
+
+/*
+ * Data FIFO Read Address
+ * Pointer to the next QWD location to be read from the data FIFO.
+ */
+register DFRADDR {
+ address 0x0C2
+ access_mode RW
+ size 2
+ modes M_DFF0, M_DFF1
+}
+
+/*
+ * DSP REQ Control
+ */
+register DSPREQCTL {
+ address 0x0C2
+ access_mode RW
+ modes M_CFG
+ field MANREQCTL 0xC0
+ field MANREQDLY 0x3F
+}
+
+/*
+ * DSP ACK Control
+ */
+register DSPACKCTL {
+ address 0x0C3
+ access_mode RW
+ modes M_CFG
+ field MANACKCTL 0xC0
+ field MANACKDLY 0x3F
+}
+
+/*
+ * Data FIFO Data
+ * Read/Write byte port into the data FIFO. The read and write
+ * FIFO pointers increment with each read and write respectively
+ * to this port.
+ */
+register DFDAT {
+ address 0x0C4
+ access_mode RW
+ modes M_DFF0, M_DFF1
+ dont_generate_debug_code
+}
+
+/*
+ * DSP Channel Select
+ */
+register DSPSELECT {
+ address 0x0C4
+ access_mode RW
+ modes M_CFG
+ count 1
+ field AUTOINCEN 0x80
+ field DSPSEL 0x1F
+ dont_generate_debug_code
+}
+
+const NUMDSPS 0x14
+
+/*
+ * Write Bias Control
+ */
+register WRTBIASCTL {
+ address 0x0C5
+ access_mode WO
+ modes M_CFG
+ count 3
+ field AUTOXBCDIS 0x80
+ field XMITMANVAL 0x3F
+ dont_generate_debug_code
+}
+
+/*
+ * Currently the WRTBIASCTL is the same as the default.
+ */
+const WRTBIASCTL_HP_DEFAULT 0x0
+
+/*
+ * Receiver Bias Control
+ */
+register RCVRBIOSCTL {
+ address 0x0C6
+ access_mode WO
+ modes M_CFG
+ field AUTORBCDIS 0x80
+ field RCVRMANVAL 0x3F
+}
+
+/*
+ * Write Bias Calculator
+ */
+register WRTBIASCALC {
+ address 0x0C7
+ access_mode RO
+ modes M_CFG
+}
+
+/*
+ * Data FIFO Pointers
+ * Contains the byte offset from DFWADDR and DWRADDR to the current
+ * FIFO write/read locations.
+ */
+register DFPTRS {
+ address 0x0C8
+ access_mode RW
+ modes M_DFF0, M_DFF1
+}
+
+/*
+ * Receiver Bias Calculator
+ */
+register RCVRBIASCALC {
+ address 0x0C8
+ access_mode RO
+ modes M_CFG
+}
+
+/*
+ * Data FIFO Backup Read Pointer
+ * Contains the data FIFO address to be restored if the last
+ * data accessed from the data FIFO was not transferred successfully.
+ */
+register DFBKPTR {
+ address 0x0C9
+ access_mode RW
+ size 2
+ modes M_DFF0, M_DFF1
+}
+
+/*
+ * Skew Calculator
+ */
+register SKEWCALC {
+ address 0x0C9
+ access_mode RO
+ modes M_CFG
+}
+
+/*
+ * Data FIFO Debug Control
+ */
+register DFDBCTL {
+ address 0x0CB
+ access_mode RW
+ modes M_DFF0, M_DFF1
+ field DFF_CIO_WR_RDY 0x20
+ field DFF_CIO_RD_RDY 0x10
+ field DFF_DIR_ERR 0x08
+ field DFF_RAMBIST_FAIL 0x04
+ field DFF_RAMBIST_DONE 0x02
+ field DFF_RAMBIST_EN 0x01
+}
+
+/*
+ * Data FIFO Space Count
+ * Number of FIFO locations that are free.
+ */
+register DFSCNT {
+ address 0x0CC
+ access_mode RO
+ size 2
+ modes M_DFF0, M_DFF1
+}
+
+/*
+ * Data FIFO Byte Count
+ * Number of filled FIFO locations.
+ */
+register DFBCNT {
+ address 0x0CE
+ access_mode RO
+ size 2
+ modes M_DFF0, M_DFF1
+}
+
+/*
+ * Sequencer Program Overlay Address.
+ * Low address must be written prior to high address.
+ */
+register OVLYADDR {
+ address 0x0D4
+ modes M_SCSI
+ size 2
+ access_mode RW
+}
+
+/*
+ * Sequencer Control 0
+ * Error detection mode, speed configuration,
+ * single step, breakpoints and program load.
+ */
+register SEQCTL0 {
+ address 0x0D6
+ access_mode RW
+ count 11
+ field PERRORDIS 0x80
+ field PAUSEDIS 0x40
+ field FAILDIS 0x20
+ field FASTMODE 0x10
+ field BRKADRINTEN 0x08
+ field STEP 0x04
+ field SEQRESET 0x02
+ field LOADRAM 0x01
+}
+
+/*
+ * Sequencer Control 1
+ * Instruction RAM Diagnostics
+ */
+register SEQCTL1 {
+ address 0x0D7
+ access_mode RW
+ field OVRLAY_DATA_CHK 0x08
+ field RAMBIST_DONE 0x04
+ field RAMBIST_FAIL 0x02
+ field RAMBIST_EN 0x01
+}
+
+/*
+ * Sequencer Flags
+ * Zero and Carry state of the ALU.
+ */
+register FLAGS {
+ address 0x0D8
+ access_mode RO
+ count 23
+ field ZERO 0x02
+ field CARRY 0x01
+ dont_generate_debug_code
+}
+
+/*
+ * Sequencer Interrupt Control
+ */
+register SEQINTCTL {
+ address 0x0D9
+ access_mode RW
+ field INTVEC1DSL 0x80
+ field INT1_CONTEXT 0x20
+ field SCS_SEQ_INT1M1 0x10
+ field SCS_SEQ_INT1M0 0x08
+ field INTMASK2 0x04
+ field INTMASK1 0x02
+ field IRET 0x01
+}
+
+/*
+ * Sequencer RAM Data Port
+ * Single byte window into the Sequencer Instruction Ram area starting
+ * at the address specified by OVLYADDR. To write a full instruction word,
+ * simply write four bytes in succession. OVLYADDR will increment after the
+ * most significant instrution byte (the byte with the parity bit) is written.
+ */
+register SEQRAM {
+ address 0x0DA
+ access_mode RW
+ count 2
+ dont_generate_debug_code
+}
+
+/*
+ * Sequencer Program Counter
+ * Low byte must be written prior to high byte.
+ */
+register PRGMCNT {
+ address 0x0DE
+ access_mode RW
+ size 2
+ count 5
+ dont_generate_debug_code
+}
+
+/*
+ * Accumulator
+ */
+register ACCUM {
+ address 0x0E0
+ access_mode RW
+ accumulator
+ dont_generate_debug_code
+}
+
+/*
+ * Source Index Register
+ * Incrementing index for reads of SINDIR and the destination (low byte only)
+ * for any immediate operands passed in jmp, jc, jnc, call instructions.
+ * Example:
+ * mvi 0xFF call some_routine;
+ *
+ * Will set SINDEX[0] to 0xFF and call the routine "some_routine.
+ */
+register SINDEX {
+ address 0x0E2
+ access_mode RW
+ size 2
+ sindex
+ dont_generate_debug_code
+}
+
+/*
+ * Destination Index Register
+ * Incrementing index for writes to DINDIR. Can be used as a scratch register.
+ */
+register DINDEX {
+ address 0x0E4
+ access_mode RW
+ size 2
+ dont_generate_debug_code
+}
+
+/*
+ * Break Address
+ * Sequencer instruction breakpoint address address.
+ */
+register BRKADDR0 {
+ address 0x0E6
+ access_mode RW
+}
+
+register BRKADDR1 {
+ address 0x0E6
+ access_mode RW
+ field BRKDIS 0x80 /* Disable Breakpoint */
+}
+
+/*
+ * All Ones
+ * All reads to this register return the value 0xFF.
+ */
+register ALLONES {
+ address 0x0E8
+ access_mode RO
+ allones
+ dont_generate_debug_code
+}
+
+/*
+ * All Zeros
+ * All reads to this register return the value 0.
+ */
+register ALLZEROS {
+ address 0x0EA
+ access_mode RO
+ allzeros
+ dont_generate_debug_code
+}
+
+/*
+ * No Destination
+ * Writes to this register have no effect.
+ */
+register NONE {
+ address 0x0EA
+ access_mode WO
+ none
+ dont_generate_debug_code
+}
+
+/*
+ * Source Index Indirect
+ * Reading this register is equivalent to reading (register_base + SINDEX) and
+ * incrementing SINDEX by 1.
+ */
+register SINDIR {
+ address 0x0EC
+ access_mode RO
+ dont_generate_debug_code
+}
+
+/*
+ * Destination Index Indirect
+ * Writing this register is equivalent to writing to (register_base + DINDEX)
+ * and incrementing DINDEX by 1.
+ */
+register DINDIR {
+ address 0x0ED
+ access_mode WO
+ dont_generate_debug_code
+}
+
+/*
+ * Function One
+ * 2's complement to bit value conversion. Write the 2's complement value
+ * (0-7 only) to the top nibble and retrieve the bit indexed by that value
+ * on the next read of this register.
+ * Example:
+ * Write 0x60
+ * Read 0x40
+ */
+register FUNCTION1 {
+ address 0x0F0
+ access_mode RW
+}
+
+/*
+ * Stack
+ * Window into the stack. Each stack location is 10 bits wide reported
+ * low byte followed by high byte. There are 8 stack locations.
+ */
+register STACK {
+ address 0x0F2
+ access_mode RW
+ dont_generate_debug_code
+}
+
+/*
+ * Interrupt Vector 1 Address
+ * Interrupt branch address for SCS SEQ_INT1 mode 0 and 1 interrupts.
+ */
+register INTVEC1_ADDR {
+ address 0x0F4
+ access_mode RW
+ size 2
+ modes M_CFG
+ count 1
+ dont_generate_debug_code
+}
+
+/*
+ * Current Address
+ * Address of the SEQRAM instruction currently executing instruction.
+ */
+register CURADDR {
+ address 0x0F4
+ access_mode RW
+ size 2
+ modes M_SCSI
+ count 2
+ dont_generate_debug_code
+}
+
+/*
+ * Interrupt Vector 2 Address
+ * Interrupt branch address for HST_SEQ_INT2 interrupts.
+ */
+register INTVEC2_ADDR {
+ address 0x0F6
+ access_mode RW
+ size 2
+ modes M_CFG
+ count 1
+ dont_generate_debug_code
+}
+
+/*
+ * Last Address
+ * Address of the SEQRAM instruction executed prior to the current instruction.
+ */
+register LASTADDR {
+ address 0x0F6
+ access_mode RW
+ size 2
+ modes M_SCSI
+}
+
+register AHD_PCI_CONFIG_BASE {
+ address 0x100
+ access_mode RW
+ size 256
+ modes M_CFG
+}
+
+/* ---------------------- Scratch RAM Offsets ------------------------- */
+scratch_ram {
+ /* Mode Specific */
+ address 0x0A0
+ size 8
+ modes 0, 1, 2, 3
+ REG0 {
+ size 2
+ dont_generate_debug_code
+ }
+ REG1 {
+ size 2
+ }
+ REG_ISR {
+ size 2
+ dont_generate_debug_code
+ }
+ SG_STATE {
+ size 1
+ field SEGS_AVAIL 0x01
+ field LOADING_NEEDED 0x02
+ field FETCH_INPROG 0x04
+ }
+ /*
+ * Track whether the transfer byte count for
+ * the current data phase is odd.
+ */
+ DATA_COUNT_ODD {
+ size 1
+ }
+}
+
+scratch_ram {
+ /* Mode Specific */
+ address 0x0F8
+ size 8
+ modes 0, 1, 2, 3
+ LONGJMP_ADDR {
+ size 2
+ dont_generate_debug_code
+ }
+ ACCUM_SAVE {
+ size 1
+ dont_generate_debug_code
+ }
+}
+
+
+scratch_ram {
+ address 0x100
+ size 128
+ modes 0, 1, 2, 3
+ /*
+ * Per "other-id" execution queues. We use an array of
+ * tail pointers into lists of SCBs sorted by "other-id".
+ * The execution head pointer threads the head SCBs for
+ * each list.
+ */
+ WAITING_SCB_TAILS {
+ size 32
+ dont_generate_debug_code
+ }
+ WAITING_TID_HEAD {
+ size 2
+ dont_generate_debug_code
+ }
+ WAITING_TID_TAIL {
+ size 2
+ dont_generate_debug_code
+ }
+ /*
+ * SCBID of the next SCB in the new SCB queue.
+ */
+ NEXT_QUEUED_SCB_ADDR {
+ size 4
+ dont_generate_debug_code
+ }
+ /*
+ * head of list of SCBs that have
+ * completed but have not been
+ * put into the qoutfifo.
+ */
+ COMPLETE_SCB_HEAD {
+ size 2
+ dont_generate_debug_code
+ }
+ /*
+ * The list of completed SCBs in
+ * the active DMA.
+ */
+ COMPLETE_SCB_DMAINPROG_HEAD {
+ size 2
+ dont_generate_debug_code
+ }
+ /*
+ * head of list of SCBs that have
+ * completed but need to be uploaded
+ * to the host prior to being completed.
+ */
+ COMPLETE_DMA_SCB_HEAD {
+ size 2
+ dont_generate_debug_code
+ }
+ /*
+ * tail of list of SCBs that have
+ * completed but need to be uploaded
+ * to the host prior to being completed.
+ */
+ COMPLETE_DMA_SCB_TAIL {
+ size 2
+ dont_generate_debug_code
+ }
+ /*
+ * head of list of SCBs that have
+ * been uploaded to the host, but cannot
+ * be completed until the QFREEZE is in
+ * full effect (i.e. no selections pending).
+ */
+ COMPLETE_ON_QFREEZE_HEAD {
+ size 2
+ dont_generate_debug_code
+ }
+ /*
+ * Counting semaphore to prevent new select-outs
+ * The queue is frozen so long as the sequencer
+ * and kernel freeze counts differ.
+ */
+ QFREEZE_COUNT {
+ size 2
+ }
+ KERNEL_QFREEZE_COUNT {
+ size 2
+ }
+ /*
+ * Mode to restore on legacy idle loop exit.
+ */
+ SAVED_MODE {
+ size 1
+ }
+ /*
+ * Single byte buffer used to designate the type or message
+ * to send to a target.
+ */
+ MSG_OUT {
+ size 1
+ dont_generate_debug_code
+ }
+ /* Parameters for DMA Logic */
+ DMAPARAMS {
+ size 1
+ count 8
+ field PRELOADEN 0x80
+ field WIDEODD 0x40
+ field SCSIEN 0x20
+ field SDMAEN 0x10
+ field SDMAENACK 0x10
+ field HDMAEN 0x08
+ field HDMAENACK 0x08
+ field DIRECTION 0x04 /* Set indicates PCI->SCSI */
+ field FIFOFLUSH 0x02
+ field FIFORESET 0x01
+ dont_generate_debug_code
+ }
+ SEQ_FLAGS {
+ size 1
+ field NOT_IDENTIFIED 0x80
+ field NO_CDB_SENT 0x40
+ field TARGET_CMD_IS_TAGGED 0x40
+ field DPHASE 0x20
+ /* Target flags */
+ field TARG_CMD_PENDING 0x10
+ field CMDPHASE_PENDING 0x08
+ field DPHASE_PENDING 0x04
+ field SPHASE_PENDING 0x02
+ field NO_DISCONNECT 0x01
+ }
+ /*
+ * Temporary storage for the
+ * target/channel/lun of a
+ * reconnecting target
+ */
+ SAVED_SCSIID {
+ size 1
+ dont_generate_debug_code
+ }
+ SAVED_LUN {
+ size 1
+ dont_generate_debug_code
+ }
+ /*
+ * The last bus phase as seen by the sequencer.
+ */
+ LASTPHASE {
+ size 1
+ field CDI 0x80
+ field IOI 0x40
+ field MSGI 0x20
+ field P_BUSFREE 0x01
+ enum PHASE_MASK CDO|IOO|MSGO {
+ P_DATAOUT 0x0,
+ P_DATAIN IOO,
+ P_DATAOUT_DT P_DATAOUT|MSGO,
+ P_DATAIN_DT P_DATAIN|MSGO,
+ P_COMMAND CDO,
+ P_MESGOUT CDO|MSGO,
+ P_STATUS CDO|IOO,
+ P_MESGIN CDO|IOO|MSGO
+ }
+ }
+ /*
+ * Value to "or" into the SCBPTR[1] value to
+ * indicate that an entry in the QINFIFO is valid.
+ */
+ QOUTFIFO_ENTRY_VALID_TAG {
+ size 1
+ dont_generate_debug_code
+ }
+ /*
+ * Kernel and sequencer offsets into the queue of
+ * incoming target mode command descriptors. The
+ * queue is full when the KERNEL_TQINPOS == TQINPOS.
+ */
+ KERNEL_TQINPOS {
+ size 1
+ count 1
+ dont_generate_debug_code
+ }
+ TQINPOS {
+ size 1
+ count 8
+ dont_generate_debug_code
+ }
+ /*
+ * Base address of our shared data with the kernel driver in host
+ * memory. This includes the qoutfifo and target mode
+ * incoming command queue.
+ */
+ SHARED_DATA_ADDR {
+ size 4
+ dont_generate_debug_code
+ }
+ /*
+ * Pointer to location in host memory for next
+ * position in the qoutfifo.
+ */
+ QOUTFIFO_NEXT_ADDR {
+ size 4
+ dont_generate_debug_code
+ }
+ ARG_1 {
+ size 1
+ mask SEND_MSG 0x80
+ mask SEND_SENSE 0x40
+ mask SEND_REJ 0x20
+ mask MSGOUT_PHASEMIS 0x10
+ mask EXIT_MSG_LOOP 0x08
+ mask CONT_MSG_LOOP_WRITE 0x04
+ mask CONT_MSG_LOOP_READ 0x03
+ mask CONT_MSG_LOOP_TARG 0x02
+ alias RETURN_1
+ dont_generate_debug_code
+ }
+ ARG_2 {
+ size 1
+ count 1
+ alias RETURN_2
+ dont_generate_debug_code
+ }
+
+ /*
+ * Snapshot of MSG_OUT taken after each message is sent.
+ */
+ LAST_MSG {
+ size 1
+ dont_generate_debug_code
+ }
+
+ /*
+ * Sequences the kernel driver has okayed for us. This allows
+ * the driver to do things like prevent initiator or target
+ * operations.
+ */
+ SCSISEQ_TEMPLATE {
+ size 1
+ count 7
+ field MANUALCTL 0x40
+ field ENSELI 0x20
+ field ENRSELI 0x10
+ field MANUALP 0x0C
+ field ENAUTOATNP 0x02
+ field ALTSTIM 0x01
+ dont_generate_debug_code
+ }
+
+ /*
+ * The initiator specified tag for this target mode transaction.
+ */
+ INITIATOR_TAG {
+ size 1
+ count 1
+ dont_generate_debug_code
+ }
+
+ SEQ_FLAGS2 {
+ size 1
+ field PENDING_MK_MESSAGE 0x01
+ field TARGET_MSG_PENDING 0x02
+ field SELECTOUT_QFROZEN 0x04
+ }
+
+ ALLOCFIFO_SCBPTR {
+ size 2
+ dont_generate_debug_code
+ }
+
+ /*
+ * The maximum amount of time to wait, when interrupt coalescing
+ * is enabled, before issuing a CMDCMPLT interrupt for a completed
+ * command.
+ */
+ INT_COALESCING_TIMER {
+ size 2
+ dont_generate_debug_code
+ }
+
+ /*
+ * The maximum number of commands to coalesce into a single interrupt.
+ * Actually the 2's complement of that value to simplify sequencer
+ * code.
+ */
+ INT_COALESCING_MAXCMDS {
+ size 1
+ dont_generate_debug_code
+ }
+
+ /*
+ * The minimum number of commands still outstanding required
+ * to continue coalescing (2's complement of value).
+ */
+ INT_COALESCING_MINCMDS {
+ size 1
+ dont_generate_debug_code
+ }
+
+ /*
+ * Number of commands "in-flight".
+ */
+ CMDS_PENDING {
+ size 2
+ dont_generate_debug_code
+ }
+
+ /*
+ * The count of commands that have been coalesced.
+ */
+ INT_COALESCING_CMDCOUNT {
+ size 1
+ dont_generate_debug_code
+ }
+
+ /*
+ * Since the HS_MAIBOX is self clearing, copy its contents to
+ * this position in scratch ram every time it changes.
+ */
+ LOCAL_HS_MAILBOX {
+ size 1
+ dont_generate_debug_code
+ }
+ /*
+ * Target-mode CDB type to CDB length table used
+ * in non-packetized operation.
+ */
+ CMDSIZE_TABLE {
+ size 8
+ count 8
+ dont_generate_debug_code
+ }
+ /*
+ * When an SCB with the MK_MESSAGE flag is
+ * queued to the controller, it cannot enter
+ * the waiting for selection list until the
+ * selections for any previously queued
+ * commands to that target complete. During
+ * the wait, the MK_MESSAGE SCB is queued
+ * here.
+ */
+ MK_MESSAGE_SCB {
+ size 2
+ }
+ /*
+ * Saved SCSIID of MK_MESSAGE_SCB to avoid
+ * an extra SCBPTR operation when deciding
+ * if the MK_MESSAGE_SCB can be run.
+ */
+ MK_MESSAGE_SCSIID {
+ size 1
+ }
+}
+
+/************************* Hardware SCB Definition ****************************/
+scb {
+ address 0x180
+ size 64
+ modes 0, 1, 2, 3
+ SCB_RESIDUAL_DATACNT {
+ size 4
+ alias SCB_CDB_STORE
+ alias SCB_HOST_CDB_PTR
+ dont_generate_debug_code
+ }
+ SCB_RESIDUAL_SGPTR {
+ size 4
+ field SG_ADDR_MASK 0xf8 /* In the last byte */
+ field SG_OVERRUN_RESID 0x02 /* In the first byte */
+ field SG_LIST_NULL 0x01 /* In the first byte */
+ dont_generate_debug_code
+ }
+ SCB_SCSI_STATUS {
+ size 1
+ alias SCB_HOST_CDB_LEN
+ dont_generate_debug_code
+ }
+ SCB_TARGET_PHASES {
+ size 1
+ dont_generate_debug_code
+ }
+ SCB_TARGET_DATA_DIR {
+ size 1
+ dont_generate_debug_code
+ }
+ SCB_TARGET_ITAG {
+ size 1
+ dont_generate_debug_code
+ }
+ SCB_SENSE_BUSADDR {
+ /*
+ * Only valid if CDB length is less than 13 bytes or
+ * we are using a CDB pointer. Otherwise contains
+ * the last 4 bytes of embedded cdb information.
+ */
+ size 4
+ alias SCB_NEXT_COMPLETE
+ dont_generate_debug_code
+ }
+ SCB_TAG {
+ alias SCB_FIFO_USE_COUNT
+ size 2
+ dont_generate_debug_code
+ }
+ SCB_CONTROL {
+ size 1
+ field TARGET_SCB 0x80
+ field DISCENB 0x40
+ field TAG_ENB 0x20
+ field MK_MESSAGE 0x10
+ field STATUS_RCVD 0x08
+ field DISCONNECTED 0x04
+ field SCB_TAG_TYPE 0x03
+ }
+ SCB_SCSIID {
+ size 1
+ field TID 0xF0
+ field OID 0x0F
+ }
+ SCB_LUN {
+ size 1
+ field LID 0xff
+ dont_generate_debug_code
+ }
+ SCB_TASK_ATTRIBUTE {
+ size 1
+ /*
+ * Overloaded field for non-packetized
+ * ignore wide residue message handling.
+ */
+ field SCB_XFERLEN_ODD 0x01
+ dont_generate_debug_code
+ }
+ SCB_CDB_LEN {
+ size 1
+ field SCB_CDB_LEN_PTR 0x80 /* CDB in host memory */
+ dont_generate_debug_code
+ }
+ SCB_TASK_MANAGEMENT {
+ size 1
+ dont_generate_debug_code
+ }
+ SCB_DATAPTR {
+ size 8
+ dont_generate_debug_code
+ }
+ SCB_DATACNT {
+ /*
+ * The last byte is really the high address bits for
+ * the data address.
+ */
+ size 4
+ field SG_LAST_SEG 0x80 /* In the fourth byte */
+ field SG_HIGH_ADDR_BITS 0x7F /* In the fourth byte */
+ dont_generate_debug_code
+ }
+ SCB_SGPTR {
+ size 4
+ field SG_STATUS_VALID 0x04 /* In the first byte */
+ field SG_FULL_RESID 0x02 /* In the first byte */
+ field SG_LIST_NULL 0x01 /* In the first byte */
+ dont_generate_debug_code
+ }
+ SCB_BUSADDR {
+ size 4
+ dont_generate_debug_code
+ }
+ SCB_NEXT {
+ alias SCB_NEXT_SCB_BUSADDR
+ size 2
+ dont_generate_debug_code
+ }
+ SCB_NEXT2 {
+ size 2
+ dont_generate_debug_code
+ }
+ SCB_SPARE {
+ size 8
+ alias SCB_PKT_LUN
+ }
+ SCB_DISCONNECTED_LISTS {
+ size 8
+ dont_generate_debug_code
+ }
+}
+
+/*********************************** Constants ********************************/
+const MK_MESSAGE_BIT_OFFSET 4
+const TID_SHIFT 4
+const TARGET_CMD_CMPLT 0xfe
+const INVALID_ADDR 0x80
+#define SCB_LIST_NULL 0xff
+#define QOUTFIFO_ENTRY_VALID_TOGGLE 0x80
+
+const CCSGADDR_MAX 0x80
+const CCSCBADDR_MAX 0x80
+const CCSGRAM_MAXSEGS 16
+
+/* Selection Timeout Timer Constants */
+const STIMESEL_SHIFT 3
+const STIMESEL_MIN 0x18
+const STIMESEL_BUG_ADJ 0x8
+
+/* WDTR Message values */
+const BUS_8_BIT 0x00
+const BUS_16_BIT 0x01
+const BUS_32_BIT 0x02
+
+/* Offset maximums */
+const MAX_OFFSET 0xfe
+const MAX_OFFSET_PACED 0xfe
+const MAX_OFFSET_PACED_BUG 0x7f
+/*
+ * Some 160 devices incorrectly accept 0xfe as a
+ * sync offset, but will overrun this value. Limit
+ * to 0x7f for speed lower than U320 which will
+ * avoid the persistent sync offset overruns.
+ */
+const MAX_OFFSET_NON_PACED 0x7f
+const HOST_MSG 0xff
+
+/*
+ * The size of our sense buffers.
+ * Sense buffer mapping can be handled in either of two ways.
+ * The first is to allocate a dmamap for each transaction.
+ * Depending on the architecture, dmamaps can be costly. The
+ * alternative is to statically map the buffers in much the same
+ * way we handle our scatter gather lists. The driver implements
+ * the later.
+ */
+const AHD_SENSE_BUFSIZE 256
+
+/* Target mode command processing constants */
+const CMD_GROUP_CODE_SHIFT 0x05
+
+const STATUS_BUSY 0x08
+const STATUS_QUEUE_FULL 0x28
+const STATUS_PKT_SENSE 0xFF
+const TARGET_DATA_IN 1
+
+const SCB_TRANSFER_SIZE_FULL_LUN 56
+const SCB_TRANSFER_SIZE_1BYTE_LUN 48
+/* PKT_OVERRUN_BUFSIZE must be a multiple of 256 less than 64K */
+const PKT_OVERRUN_BUFSIZE 512
+
+/*
+ * Timer parameters.
+ */
+const AHD_TIMER_US_PER_TICK 25
+const AHD_TIMER_MAX_TICKS 0xFFFF
+const AHD_TIMER_MAX_US (AHD_TIMER_MAX_TICKS * AHD_TIMER_US_PER_TICK)
+
+/*
+ * Downloaded (kernel inserted) constants
+ */
+const SG_PREFETCH_CNT download
+const SG_PREFETCH_CNT_LIMIT download
+const SG_PREFETCH_ALIGN_MASK download
+const SG_PREFETCH_ADDR_MASK download
+const SG_SIZEOF download
+const PKT_OVERRUN_BUFOFFSET download
+const SCB_TRANSFER_SIZE download
+const CACHELINE_MASK download
+
+/*
+ * BIOS SCB offsets
+ */
+const NVRAM_SCB_OFFSET 0x2C
diff --git a/drivers/scsi/aic7xxx/aic79xx.seq b/drivers/scsi/aic7xxx/aic79xx.seq
new file mode 100644
index 000000000..3a36d9362
--- /dev/null
+++ b/drivers/scsi/aic7xxx/aic79xx.seq
@@ -0,0 +1,2290 @@
+/*
+ * Adaptec U320 device driver firmware for Linux and FreeBSD.
+ *
+ * Copyright (c) 1994-2001, 2004 Justin T. Gibbs.
+ * Copyright (c) 2000-2002 Adaptec Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions, and the following disclaimer,
+ * without modification.
+ * 2. Redistributions in binary form must reproduce at minimum a disclaimer
+ * substantially similar to the "NO WARRANTY" disclaimer below
+ * ("Disclaimer") and any redistribution must be conditioned upon
+ * including a substantially similar Disclaimer requirement for further
+ * binary redistribution.
+ * 3. Neither the names of the above-listed copyright holders nor the names
+ * of any contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * Alternatively, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") version 2 as published by the Free
+ * Software Foundation.
+ *
+ * NO WARRANTY
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
+ * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
+ * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGES.
+ *
+ * $FreeBSD$
+ */
+
+VERSION = "$Id: //depot/aic7xxx/aic7xxx/aic79xx.seq#120 $"
+PATCH_ARG_LIST = "struct ahd_softc *ahd"
+PREFIX = "ahd_"
+
+#include "aic79xx.reg"
+#include "scsi_message.h"
+
+restart:
+if ((ahd->bugs & AHD_INTCOLLISION_BUG) != 0) {
+ test SEQINTCODE, 0xFF jz idle_loop;
+ SET_SEQINTCODE(NO_SEQINT)
+}
+
+idle_loop:
+
+ if ((ahd->bugs & AHD_INTCOLLISION_BUG) != 0) {
+ /*
+ * Convert ERROR status into a sequencer
+ * interrupt to handle the case of an
+ * interrupt collision on the hardware
+ * setting of HWERR.
+ */
+ test ERROR, 0xFF jz no_error_set;
+ SET_SEQINTCODE(SAW_HWERR)
+no_error_set:
+ }
+ SET_MODE(M_SCSI, M_SCSI)
+ test SCSISEQ0, ENSELO|ENARBO jnz idle_loop_checkbus;
+ test SEQ_FLAGS2, SELECTOUT_QFROZEN jz check_waiting_list;
+ /*
+ * If the kernel has caught up with us, thaw the queue.
+ */
+ mov A, KERNEL_QFREEZE_COUNT;
+ cmp QFREEZE_COUNT, A jne check_frozen_completions;
+ mov A, KERNEL_QFREEZE_COUNT[1];
+ cmp QFREEZE_COUNT[1], A jne check_frozen_completions;
+ and SEQ_FLAGS2, ~SELECTOUT_QFROZEN;
+ jmp check_waiting_list;
+check_frozen_completions:
+ test SSTAT0, SELDO|SELINGO jnz idle_loop_checkbus;
+BEGIN_CRITICAL;
+ /*
+ * If we have completions stalled waiting for the qfreeze
+ * to take effect, move them over to the complete_scb list
+ * now that no selections are pending.
+ */
+ cmp COMPLETE_ON_QFREEZE_HEAD[1],SCB_LIST_NULL je idle_loop_checkbus;
+ /*
+ * Find the end of the qfreeze list. The first element has
+ * to be treated specially.
+ */
+ bmov SCBPTR, COMPLETE_ON_QFREEZE_HEAD, 2;
+ cmp SCB_NEXT_COMPLETE[1], SCB_LIST_NULL je join_lists;
+ /*
+ * Now the normal loop.
+ */
+ bmov SCBPTR, SCB_NEXT_COMPLETE, 2;
+ cmp SCB_NEXT_COMPLETE[1], SCB_LIST_NULL jne . - 1;
+join_lists:
+ bmov SCB_NEXT_COMPLETE, COMPLETE_SCB_HEAD, 2;
+ bmov COMPLETE_SCB_HEAD, COMPLETE_ON_QFREEZE_HEAD, 2;
+ mvi COMPLETE_ON_QFREEZE_HEAD[1], SCB_LIST_NULL;
+ jmp idle_loop_checkbus;
+check_waiting_list:
+ cmp WAITING_TID_HEAD[1], SCB_LIST_NULL je idle_loop_checkbus;
+ /*
+ * ENSELO is cleared by a SELDO, so we must test for SELDO
+ * one last time.
+ */
+ test SSTAT0, SELDO jnz select_out;
+ call start_selection;
+idle_loop_checkbus:
+ test SSTAT0, SELDO jnz select_out;
+END_CRITICAL;
+ test SSTAT0, SELDI jnz select_in;
+ test SCSIPHASE, ~DATA_PHASE_MASK jz idle_loop_check_nonpackreq;
+ test SCSISIGO, ATNO jz idle_loop_check_nonpackreq;
+ call unexpected_nonpkt_phase_find_ctxt;
+idle_loop_check_nonpackreq:
+ test SSTAT2, NONPACKREQ jz . + 2;
+ call unexpected_nonpkt_phase_find_ctxt;
+ if ((ahd->bugs & AHD_FAINT_LED_BUG) != 0) {
+ /*
+ * On Rev A. hardware, the busy LED is only
+ * turned on automaically during selections
+ * and re-selections. Make the LED status
+ * more useful by forcing it to be on so
+ * long as one of our data FIFOs is active.
+ */
+ and A, FIFO0FREE|FIFO1FREE, DFFSTAT;
+ cmp A, FIFO0FREE|FIFO1FREE jne . + 3;
+ and SBLKCTL, ~DIAGLEDEN|DIAGLEDON;
+ jmp . + 2;
+ or SBLKCTL, DIAGLEDEN|DIAGLEDON;
+ }
+ call idle_loop_gsfifo_in_scsi_mode;
+ call idle_loop_service_fifos;
+ call idle_loop_cchan;
+ jmp idle_loop;
+
+idle_loop_gsfifo:
+ SET_MODE(M_SCSI, M_SCSI)
+BEGIN_CRITICAL;
+idle_loop_gsfifo_in_scsi_mode:
+ test LQISTAT2, LQIGSAVAIL jz return;
+ /*
+ * We have received good status for this transaction. There may
+ * still be data in our FIFOs draining to the host. Complete
+ * the SCB only if all data has transferred to the host.
+ */
+good_status_IU_done:
+ bmov SCBPTR, GSFIFO, 2;
+ clr SCB_SCSI_STATUS;
+ /*
+ * If a command completed before an attempted task management
+ * function completed, notify the host after disabling any
+ * pending select-outs.
+ */
+ test SCB_TASK_MANAGEMENT, 0xFF jz gsfifo_complete_normally;
+ test SSTAT0, SELDO|SELINGO jnz . + 2;
+ and SCSISEQ0, ~ENSELO;
+ SET_SEQINTCODE(TASKMGMT_CMD_CMPLT_OKAY)
+gsfifo_complete_normally:
+ or SCB_CONTROL, STATUS_RCVD;
+
+ /*
+ * Since this status did not consume a FIFO, we have to
+ * be a bit more dilligent in how we check for FIFOs pertaining
+ * to this transaction. There are two states that a FIFO still
+ * transferring data may be in.
+ *
+ * 1) Configured and draining to the host, with a FIFO handler.
+ * 2) Pending cfg4data, fifo not empty.
+ *
+ * Case 1 can be detected by noticing a non-zero FIFO active
+ * count in the SCB. In this case, we allow the routine servicing
+ * the FIFO to complete the SCB.
+ *
+ * Case 2 implies either a pending or yet to occur save data
+ * pointers for this same context in the other FIFO. So, if
+ * we detect case 1, we will properly defer the post of the SCB
+ * and achieve the desired result. The pending cfg4data will
+ * notice that status has been received and complete the SCB.
+ */
+ test SCB_FIFO_USE_COUNT, 0xFF jnz idle_loop_gsfifo_in_scsi_mode;
+ call complete;
+END_CRITICAL;
+ jmp idle_loop_gsfifo_in_scsi_mode;
+
+idle_loop_service_fifos:
+ SET_MODE(M_DFF0, M_DFF0)
+BEGIN_CRITICAL;
+ test LONGJMP_ADDR[1], INVALID_ADDR jnz idle_loop_next_fifo;
+ call longjmp;
+END_CRITICAL;
+idle_loop_next_fifo:
+ SET_MODE(M_DFF1, M_DFF1)
+BEGIN_CRITICAL;
+ test LONGJMP_ADDR[1], INVALID_ADDR jz longjmp;
+END_CRITICAL;
+return:
+ ret;
+
+idle_loop_cchan:
+ SET_MODE(M_CCHAN, M_CCHAN)
+ test QOFF_CTLSTA, HS_MAILBOX_ACT jz hs_mailbox_empty;
+ or QOFF_CTLSTA, HS_MAILBOX_ACT;
+ mov LOCAL_HS_MAILBOX, HS_MAILBOX;
+hs_mailbox_empty:
+BEGIN_CRITICAL;
+ test CCSCBCTL, CCARREN|CCSCBEN jz scbdma_idle;
+ test CCSCBCTL, CCSCBDIR jnz fetch_new_scb_inprog;
+ test CCSCBCTL, CCSCBDONE jz return;
+ /* FALLTHROUGH */
+scbdma_tohost_done:
+ test CCSCBCTL, CCARREN jz fill_qoutfifo_dmadone;
+ /*
+ * An SCB has been successfully uploaded to the host.
+ * If the SCB was uploaded for some reason other than
+ * bad SCSI status (currently only for underruns), we
+ * queue the SCB for normal completion. Otherwise, we
+ * wait until any select-out activity has halted, and
+ * then queue the completion.
+ */
+ and CCSCBCTL, ~(CCARREN|CCSCBEN);
+ bmov COMPLETE_DMA_SCB_HEAD, SCB_NEXT_COMPLETE, 2;
+ cmp SCB_NEXT_COMPLETE[1], SCB_LIST_NULL jne . + 2;
+ mvi COMPLETE_DMA_SCB_TAIL[1], SCB_LIST_NULL;
+ test SCB_SCSI_STATUS, 0xff jz scbdma_queue_completion;
+ bmov SCB_NEXT_COMPLETE, COMPLETE_ON_QFREEZE_HEAD, 2;
+ bmov COMPLETE_ON_QFREEZE_HEAD, SCBPTR, 2 ret;
+scbdma_queue_completion:
+ bmov SCB_NEXT_COMPLETE, COMPLETE_SCB_HEAD, 2;
+ bmov COMPLETE_SCB_HEAD, SCBPTR, 2 ret;
+fill_qoutfifo_dmadone:
+ and CCSCBCTL, ~(CCARREN|CCSCBEN);
+ call qoutfifo_updated;
+ mvi COMPLETE_SCB_DMAINPROG_HEAD[1], SCB_LIST_NULL;
+ bmov QOUTFIFO_NEXT_ADDR, SCBHADDR, 4;
+ test QOFF_CTLSTA, SDSCB_ROLLOVR jz return;
+ bmov QOUTFIFO_NEXT_ADDR, SHARED_DATA_ADDR, 4;
+ xor QOUTFIFO_ENTRY_VALID_TAG, QOUTFIFO_ENTRY_VALID_TOGGLE ret;
+END_CRITICAL;
+
+qoutfifo_updated:
+ /*
+ * If there are more commands waiting to be dma'ed
+ * to the host, always coalesce. Otherwise honor the
+ * host's wishes.
+ */
+ cmp COMPLETE_DMA_SCB_HEAD[1], SCB_LIST_NULL jne coalesce_by_count;
+ cmp COMPLETE_SCB_HEAD[1], SCB_LIST_NULL jne coalesce_by_count;
+ test LOCAL_HS_MAILBOX, ENINT_COALESCE jz issue_cmdcmplt;
+
+ /*
+ * If we have relatively few commands outstanding, don't
+ * bother waiting for another command to complete.
+ */
+ test CMDS_PENDING[1], 0xFF jnz coalesce_by_count;
+ /* Add -1 so that jnc means <= not just < */
+ add A, -1, INT_COALESCING_MINCMDS;
+ add NONE, A, CMDS_PENDING;
+ jnc issue_cmdcmplt;
+
+ /*
+ * If coalescing, only coalesce up to the limit
+ * provided by the host driver.
+ */
+coalesce_by_count:
+ mov A, INT_COALESCING_MAXCMDS;
+ add NONE, A, INT_COALESCING_CMDCOUNT;
+ jc issue_cmdcmplt;
+ /*
+ * If the timer is not currently active,
+ * fire it up.
+ */
+ test INTCTL, SWTMINTMASK jz return;
+ bmov SWTIMER, INT_COALESCING_TIMER, 2;
+ mvi CLRSEQINTSTAT, CLRSEQ_SWTMRTO;
+ or INTCTL, SWTMINTEN|SWTIMER_START;
+ and INTCTL, ~SWTMINTMASK ret;
+
+issue_cmdcmplt:
+ mvi INTSTAT, CMDCMPLT;
+ clr INT_COALESCING_CMDCOUNT;
+ or INTCTL, SWTMINTMASK ret;
+
+BEGIN_CRITICAL;
+fetch_new_scb_inprog:
+ test CCSCBCTL, ARRDONE jz return;
+fetch_new_scb_done:
+ and CCSCBCTL, ~(CCARREN|CCSCBEN);
+ clr A;
+ add CMDS_PENDING, 1;
+ adc CMDS_PENDING[1], A;
+ if ((ahd->bugs & AHD_PKT_LUN_BUG) != 0) {
+ /*
+ * "Short Luns" are not placed into outgoing LQ
+ * packets in the correct byte order. Use a full
+ * sized lun field instead and fill it with the
+ * one byte of lun information we support.
+ */
+ mov SCB_PKT_LUN[6], SCB_LUN;
+ }
+ /*
+ * The FIFO use count field is shared with the
+ * tag set by the host so that our SCB dma engine
+ * knows the correct location to store the SCB.
+ * Set it to zero before processing the SCB.
+ */
+ clr SCB_FIFO_USE_COUNT;
+ /* Update the next SCB address to download. */
+ bmov NEXT_QUEUED_SCB_ADDR, SCB_NEXT_SCB_BUSADDR, 4;
+ /*
+ * NULL out the SCB links since these fields
+ * occupy the same location as SCB_NEXT_SCB_BUSADDR.
+ */
+ mvi SCB_NEXT[1], SCB_LIST_NULL;
+ mvi SCB_NEXT2[1], SCB_LIST_NULL;
+ /* Increment our position in the QINFIFO. */
+ mov NONE, SNSCB_QOFF;
+
+ /*
+ * Save SCBID of this SCB in REG0 since
+ * SCBPTR will be clobbered during target
+ * list updates. We also record the SCB's
+ * flags so that we can refer to them even
+ * after SCBPTR has been changed.
+ */
+ bmov REG0, SCBPTR, 2;
+ mov A, SCB_CONTROL;
+
+ /*
+ * Find the tail SCB of the execution queue
+ * for this target.
+ */
+ shr SINDEX, 3, SCB_SCSIID;
+ and SINDEX, ~0x1;
+ mvi SINDEX[1], (WAITING_SCB_TAILS >> 8);
+ bmov DINDEX, SINDEX, 2;
+ bmov SCBPTR, SINDIR, 2;
+
+ /*
+ * Update the tail to point to the new SCB.
+ */
+ bmov DINDIR, REG0, 2;
+
+ /*
+ * If the queue was empty, queue this SCB as
+ * the first for this target.
+ */
+ cmp SCBPTR[1], SCB_LIST_NULL je first_new_target_scb;
+
+ /*
+ * SCBs that want to send messages must always be
+ * at the head of their per-target queue so that
+ * ATN can be asserted even if the current
+ * negotiation agreement is packetized. If the
+ * target queue is empty, the SCB can be queued
+ * immediately. If the queue is not empty, we must
+ * wait for it to empty before entering this SCB
+ * into the waiting for selection queue. Otherwise
+ * our batching and round-robin selection scheme
+ * could allow commands to be queued out of order.
+ * To simplify the implementation, we stop pulling
+ * new commands from the host until the MK_MESSAGE
+ * SCB can be queued to the waiting for selection
+ * list.
+ */
+ test A, MK_MESSAGE jz batch_scb;
+
+ /*
+ * If the last SCB is also a MK_MESSAGE SCB, then
+ * order is preserved even if we batch.
+ */
+ test SCB_CONTROL, MK_MESSAGE jz batch_scb;
+
+ /*
+ * Defer this SCB and stop fetching new SCBs until
+ * it can be queued. Since the SCB_SCSIID of the
+ * tail SCB must be the same as that of the newly
+ * queued SCB, there is no need to restore the SCBID
+ * here.
+ */
+ or SEQ_FLAGS2, PENDING_MK_MESSAGE;
+ bmov MK_MESSAGE_SCB, REG0, 2;
+ mov MK_MESSAGE_SCSIID, SCB_SCSIID ret;
+
+batch_scb:
+ /*
+ * Otherwise just update the previous tail SCB to
+ * point to the new tail.
+ */
+ bmov SCB_NEXT, REG0, 2 ret;
+
+first_new_target_scb:
+ /*
+ * Append SCB to the tail of the waiting for
+ * selection list.
+ */
+ cmp WAITING_TID_HEAD[1], SCB_LIST_NULL je first_new_scb;
+ bmov SCBPTR, WAITING_TID_TAIL, 2;
+ bmov SCB_NEXT2, REG0, 2;
+ bmov WAITING_TID_TAIL, REG0, 2 ret;
+first_new_scb:
+ /*
+ * Whole list is empty, so the head of
+ * the list must be initialized too.
+ */
+ bmov WAITING_TID_HEAD, REG0, 2;
+ bmov WAITING_TID_TAIL, REG0, 2 ret;
+END_CRITICAL;
+
+scbdma_idle:
+ /*
+ * Don't bother downloading new SCBs to execute
+ * if select-outs are currently frozen or we have
+ * a MK_MESSAGE SCB waiting to enter the queue.
+ */
+ test SEQ_FLAGS2, SELECTOUT_QFROZEN|PENDING_MK_MESSAGE
+ jnz scbdma_no_new_scbs;
+BEGIN_CRITICAL;
+ test QOFF_CTLSTA, NEW_SCB_AVAIL jnz fetch_new_scb;
+scbdma_no_new_scbs:
+ cmp COMPLETE_DMA_SCB_HEAD[1], SCB_LIST_NULL jne dma_complete_scb;
+ cmp COMPLETE_SCB_HEAD[1], SCB_LIST_NULL je return;
+ /* FALLTHROUGH */
+fill_qoutfifo:
+ /*
+ * Keep track of the SCBs we are dmaing just
+ * in case the DMA fails or is aborted.
+ */
+ bmov COMPLETE_SCB_DMAINPROG_HEAD, COMPLETE_SCB_HEAD, 2;
+ mvi CCSCBCTL, CCSCBRESET;
+ bmov SCBHADDR, QOUTFIFO_NEXT_ADDR, 4;
+ mov A, QOUTFIFO_NEXT_ADDR;
+ bmov SCBPTR, COMPLETE_SCB_HEAD, 2;
+fill_qoutfifo_loop:
+ bmov CCSCBRAM, SCBPTR, 2;
+ mov CCSCBRAM, SCB_SGPTR[0];
+ mov CCSCBRAM, QOUTFIFO_ENTRY_VALID_TAG;
+ mov NONE, SDSCB_QOFF;
+ inc INT_COALESCING_CMDCOUNT;
+ add CMDS_PENDING, -1;
+ adc CMDS_PENDING[1], -1;
+ cmp SCB_NEXT_COMPLETE[1], SCB_LIST_NULL je fill_qoutfifo_done;
+ cmp CCSCBADDR, CCSCBADDR_MAX je fill_qoutfifo_done;
+ test QOFF_CTLSTA, SDSCB_ROLLOVR jnz fill_qoutfifo_done;
+ /*
+ * Don't cross an ADB or Cachline boundary when DMA'ing
+ * completion entries. In PCI mode, at least in 32/33
+ * configurations, the SCB DMA engine may lose its place
+ * in the data-stream should the target force a retry on
+ * something other than an 8byte aligned boundary. In
+ * PCI-X mode, we do this to avoid split transactions since
+ * many chipsets seem to be unable to format proper split
+ * completions to continue the data transfer.
+ */
+ add SINDEX, A, CCSCBADDR;
+ test SINDEX, CACHELINE_MASK jz fill_qoutfifo_done;
+ bmov SCBPTR, SCB_NEXT_COMPLETE, 2;
+ jmp fill_qoutfifo_loop;
+fill_qoutfifo_done:
+ mov SCBHCNT, CCSCBADDR;
+ mvi CCSCBCTL, CCSCBEN|CCSCBRESET;
+ bmov COMPLETE_SCB_HEAD, SCB_NEXT_COMPLETE, 2;
+ mvi SCB_NEXT_COMPLETE[1], SCB_LIST_NULL ret;
+
+fetch_new_scb:
+ bmov SCBHADDR, NEXT_QUEUED_SCB_ADDR, 4;
+ mvi CCARREN|CCSCBEN|CCSCBDIR|CCSCBRESET jmp dma_scb;
+dma_complete_scb:
+ bmov SCBPTR, COMPLETE_DMA_SCB_HEAD, 2;
+ bmov SCBHADDR, SCB_BUSADDR, 4;
+ mvi CCARREN|CCSCBEN|CCSCBRESET jmp dma_scb;
+
+/*
+ * Either post or fetch an SCB from host memory. The caller
+ * is responsible for polling for transfer completion.
+ *
+ * Prerequisits: Mode == M_CCHAN
+ * SINDEX contains CCSCBCTL flags
+ * SCBHADDR set to Host SCB address
+ * SCBPTR set to SCB src location on "push" operations
+ */
+SET_SRC_MODE M_CCHAN;
+SET_DST_MODE M_CCHAN;
+dma_scb:
+ mvi SCBHCNT, SCB_TRANSFER_SIZE;
+ mov CCSCBCTL, SINDEX ret;
+
+setjmp:
+ /*
+ * At least on the A, a return in the same
+ * instruction as the bmov results in a return
+ * to the caller, not to the new address at the
+ * top of the stack. Since we want the latter
+ * (we use setjmp to register a handler from an
+ * interrupt context but not invoke that handler
+ * until we return to our idle loop), use a
+ * separate ret instruction.
+ */
+ bmov LONGJMP_ADDR, STACK, 2;
+ ret;
+setjmp_inline:
+ bmov LONGJMP_ADDR, STACK, 2;
+longjmp:
+ bmov STACK, LONGJMP_ADDR, 2 ret;
+END_CRITICAL;
+
+/*************************** Chip Bug Work Arounds ****************************/
+/*
+ * Must disable interrupts when setting the mode pointer
+ * register as an interrupt occurring mid update will
+ * fail to store the new mode value for restoration on
+ * an iret.
+ */
+if ((ahd->bugs & AHD_SET_MODE_BUG) != 0) {
+set_mode_work_around:
+ mvi SEQINTCTL, INTVEC1DSL;
+ mov MODE_PTR, SINDEX;
+ clr SEQINTCTL ret;
+}
+
+
+if ((ahd->bugs & AHD_INTCOLLISION_BUG) != 0) {
+set_seqint_work_around:
+ mov SEQINTCODE, SINDEX;
+ mvi SEQINTCODE, NO_SEQINT ret;
+}
+
+/************************ Packetized LongJmp Routines *************************/
+SET_SRC_MODE M_SCSI;
+SET_DST_MODE M_SCSI;
+start_selection:
+BEGIN_CRITICAL;
+ if ((ahd->bugs & AHD_SENT_SCB_UPDATE_BUG) != 0) {
+ /*
+ * Razor #494
+ * Rev A hardware fails to update LAST/CURR/NEXTSCB
+ * correctly after a packetized selection in several
+ * situations:
+ *
+ * 1) If only one command existed in the queue, the
+ * LAST/CURR/NEXTSCB are unchanged.
+ *
+ * 2) In a non QAS, protocol allowed phase change,
+ * the queue is shifted 1 too far. LASTSCB is
+ * the last SCB that was correctly processed.
+ *
+ * 3) In the QAS case, if the full list of commands
+ * was successfully sent, NEXTSCB is NULL and neither
+ * CURRSCB nor LASTSCB can be trusted. We must
+ * manually walk the list counting MAXCMDCNT elements
+ * to find the last SCB that was sent correctly.
+ *
+ * To simplify the workaround for this bug in SELDO
+ * handling, we initialize LASTSCB prior to enabling
+ * selection so we can rely on it even for case #1 above.
+ */
+ bmov LASTSCB, WAITING_TID_HEAD, 2;
+ }
+ bmov CURRSCB, WAITING_TID_HEAD, 2;
+ bmov SCBPTR, WAITING_TID_HEAD, 2;
+ shr SELOID, 4, SCB_SCSIID;
+ /*
+ * If we want to send a message to the device, ensure
+ * we are selecting with atn regardless of our packetized
+ * agreement. Since SPI4 only allows target reset or PPR
+ * messages if this is a packetized connection, the change
+ * to our negotiation table entry for this selection will
+ * be cleared when the message is acted on.
+ */
+ test SCB_CONTROL, MK_MESSAGE jz . + 3;
+ mov NEGOADDR, SELOID;
+ or NEGCONOPTS, ENAUTOATNO;
+ or SCSISEQ0, ENSELO ret;
+END_CRITICAL;
+
+/*
+ * Allocate a FIFO for a non-packetized transaction.
+ * In RevA hardware, both FIFOs must be free before we
+ * can allocate a FIFO for a non-packetized transaction.
+ */
+allocate_fifo_loop:
+ /*
+ * Do whatever work is required to free a FIFO.
+ */
+ call idle_loop_service_fifos;
+ SET_MODE(M_SCSI, M_SCSI)
+allocate_fifo:
+ if ((ahd->bugs & AHD_NONPACKFIFO_BUG) != 0) {
+ and A, FIFO0FREE|FIFO1FREE, DFFSTAT;
+ cmp A, FIFO0FREE|FIFO1FREE jne allocate_fifo_loop;
+ } else {
+ test DFFSTAT, FIFO1FREE jnz allocate_fifo1;
+ test DFFSTAT, FIFO0FREE jz allocate_fifo_loop;
+ mvi DFFSTAT, B_CURRFIFO_0;
+ SET_MODE(M_DFF0, M_DFF0)
+ bmov SCBPTR, ALLOCFIFO_SCBPTR, 2 ret;
+ }
+SET_SRC_MODE M_SCSI;
+SET_DST_MODE M_SCSI;
+allocate_fifo1:
+ mvi DFFSTAT, CURRFIFO_1;
+ SET_MODE(M_DFF1, M_DFF1)
+ bmov SCBPTR, ALLOCFIFO_SCBPTR, 2 ret;
+
+/*
+ * We have been reselected as an initiator
+ * or selected as a target.
+ */
+SET_SRC_MODE M_SCSI;
+SET_DST_MODE M_SCSI;
+select_in:
+ if ((ahd->bugs & AHD_FAINT_LED_BUG) != 0) {
+ /*
+ * On Rev A. hardware, the busy LED is only
+ * turned on automaically during selections
+ * and re-selections. Make the LED status
+ * more useful by forcing it to be on from
+ * the point of selection until our idle
+ * loop determines that neither of our FIFOs
+ * are busy. This handles the non-packetized
+ * case nicely as we will not return to the
+ * idle loop until the busfree at the end of
+ * each transaction.
+ */
+ or SBLKCTL, DIAGLEDEN|DIAGLEDON;
+ }
+ if ((ahd->bugs & AHD_BUSFREEREV_BUG) != 0) {
+ /*
+ * Test to ensure that the bus has not
+ * already gone free prior to clearing
+ * any stale busfree status. This avoids
+ * a window whereby a busfree just after
+ * a selection could be missed.
+ */
+ test SCSISIGI, BSYI jz . + 2;
+ mvi CLRSINT1,CLRBUSFREE;
+ or SIMODE1, ENBUSFREE;
+ }
+ or SXFRCTL0, SPIOEN;
+ and SAVED_SCSIID, SELID_MASK, SELID;
+ and A, OID, IOWNID;
+ or SAVED_SCSIID, A;
+ mvi CLRSINT0, CLRSELDI;
+ jmp ITloop;
+
+/*
+ * We have successfully selected out.
+ *
+ * Clear SELDO.
+ * Dequeue all SCBs sent from the waiting queue
+ * Requeue all SCBs *not* sent to the tail of the waiting queue
+ * Take Razor #494 into account for above.
+ *
+ * In Packetized Mode:
+ * Return to the idle loop. Our interrupt handler will take
+ * care of any incoming L_Qs.
+ *
+ * In Non-Packetize Mode:
+ * Continue to our normal state machine.
+ */
+SET_SRC_MODE M_SCSI;
+SET_DST_MODE M_SCSI;
+select_out:
+BEGIN_CRITICAL;
+ if ((ahd->bugs & AHD_FAINT_LED_BUG) != 0) {
+ /*
+ * On Rev A. hardware, the busy LED is only
+ * turned on automaically during selections
+ * and re-selections. Make the LED status
+ * more useful by forcing it to be on from
+ * the point of re-selection until our idle
+ * loop determines that neither of our FIFOs
+ * are busy. This handles the non-packetized
+ * case nicely as we will not return to the
+ * idle loop until the busfree at the end of
+ * each transaction.
+ */
+ or SBLKCTL, DIAGLEDEN|DIAGLEDON;
+ }
+ /* Clear out all SCBs that have been successfully sent. */
+ if ((ahd->bugs & AHD_SENT_SCB_UPDATE_BUG) != 0) {
+ /*
+ * For packetized, the LQO manager clears ENSELO on
+ * the assertion of SELDO. If we are non-packetized,
+ * LASTSCB and CURRSCB are accurate.
+ */
+ test SCSISEQ0, ENSELO jnz use_lastscb;
+
+ /*
+ * The update is correct for LQOSTAT1 errors. All
+ * but LQOBUSFREE are handled by kernel interrupts.
+ * If we see LQOBUSFREE, return to the idle loop.
+ * Once we are out of the select_out critical section,
+ * the kernel will cleanup the LQOBUSFREE and we will
+ * eventually restart the selection if appropriate.
+ */
+ test LQOSTAT1, LQOBUSFREE jnz idle_loop;
+
+ /*
+ * On a phase change oustside of packet boundaries,
+ * LASTSCB points to the currently active SCB context
+ * on the bus.
+ */
+ test LQOSTAT2, LQOPHACHGOUTPKT jnz use_lastscb;
+
+ /*
+ * If the hardware has traversed the whole list, NEXTSCB
+ * will be NULL, CURRSCB and LASTSCB cannot be trusted,
+ * but MAXCMDCNT is accurate. If we stop part way through
+ * the list or only had one command to issue, NEXTSCB[1] is
+ * not NULL and LASTSCB is the last command to go out.
+ */
+ cmp NEXTSCB[1], SCB_LIST_NULL jne use_lastscb;
+
+ /*
+ * Brute force walk.
+ */
+ bmov SCBPTR, WAITING_TID_HEAD, 2;
+ mvi SEQINTCTL, INTVEC1DSL;
+ mvi MODE_PTR, MK_MODE(M_CFG, M_CFG);
+ mov A, MAXCMDCNT;
+ mvi MODE_PTR, MK_MODE(M_SCSI, M_SCSI);
+ clr SEQINTCTL;
+find_lastscb_loop:
+ dec A;
+ test A, 0xFF jz found_last_sent_scb;
+ bmov SCBPTR, SCB_NEXT, 2;
+ jmp find_lastscb_loop;
+use_lastscb:
+ bmov SCBPTR, LASTSCB, 2;
+found_last_sent_scb:
+ bmov CURRSCB, SCBPTR, 2;
+curscb_ww_done:
+ } else {
+ bmov SCBPTR, CURRSCB, 2;
+ }
+
+ /*
+ * The whole list made it. Clear our tail pointer to indicate
+ * that the per-target selection queue is now empty.
+ */
+ cmp SCB_NEXT[1], SCB_LIST_NULL je select_out_clear_tail;
+
+ /*
+ * Requeue any SCBs not sent, to the tail of the waiting Q.
+ * We know that neither the per-TID list nor the list of
+ * TIDs is empty. Use this knowledge to our advantage and
+ * queue the remainder to the tail of the global execution
+ * queue.
+ */
+ bmov REG0, SCB_NEXT, 2;
+select_out_queue_remainder:
+ bmov SCBPTR, WAITING_TID_TAIL, 2;
+ bmov SCB_NEXT2, REG0, 2;
+ bmov WAITING_TID_TAIL, REG0, 2;
+ jmp select_out_inc_tid_q;
+
+select_out_clear_tail:
+ /*
+ * Queue any pending MK_MESSAGE SCB for this target now
+ * that the queue is empty.
+ */
+ test SEQ_FLAGS2, PENDING_MK_MESSAGE jz select_out_no_mk_message_scb;
+ mov A, MK_MESSAGE_SCSIID;
+ cmp SCB_SCSIID, A jne select_out_no_mk_message_scb;
+ and SEQ_FLAGS2, ~PENDING_MK_MESSAGE;
+ bmov REG0, MK_MESSAGE_SCB, 2;
+ jmp select_out_queue_remainder;
+
+select_out_no_mk_message_scb:
+ /*
+ * Clear this target's execution tail and increment the queue.
+ */
+ shr DINDEX, 3, SCB_SCSIID;
+ or DINDEX, 1; /* Want only the second byte */
+ mvi DINDEX[1], ((WAITING_SCB_TAILS) >> 8);
+ mvi DINDIR, SCB_LIST_NULL;
+select_out_inc_tid_q:
+ bmov SCBPTR, WAITING_TID_HEAD, 2;
+ bmov WAITING_TID_HEAD, SCB_NEXT2, 2;
+ cmp WAITING_TID_HEAD[1], SCB_LIST_NULL jne . + 2;
+ mvi WAITING_TID_TAIL[1], SCB_LIST_NULL;
+ bmov SCBPTR, CURRSCB, 2;
+ mvi CLRSINT0, CLRSELDO;
+ test LQOSTAT2, LQOPHACHGOUTPKT jnz unexpected_nonpkt_mode_cleared;
+ test LQOSTAT1, LQOPHACHGINPKT jnz unexpected_nonpkt_mode_cleared;
+
+ /*
+ * If this is a packetized connection, return to our
+ * idle_loop and let our interrupt handler deal with
+ * any connection setup/teardown issues. The only
+ * exceptions are the case of MK_MESSAGE and task management
+ * SCBs.
+ */
+ if ((ahd->bugs & AHD_LQO_ATNO_BUG) != 0) {
+ /*
+ * In the A, the LQO manager transitions to LQOSTOP0 even if
+ * we have selected out with ATN asserted and the target
+ * REQs in a non-packet phase.
+ */
+ test SCB_CONTROL, MK_MESSAGE jz select_out_no_message;
+ test SCSISIGO, ATNO jnz select_out_non_packetized;
+select_out_no_message:
+ }
+ test LQOSTAT2, LQOSTOP0 jz select_out_non_packetized;
+ test SCB_TASK_MANAGEMENT, 0xFF jz idle_loop;
+ SET_SEQINTCODE(TASKMGMT_FUNC_COMPLETE)
+ jmp idle_loop;
+
+select_out_non_packetized:
+ /* Non packetized request. */
+ and SCSISEQ0, ~ENSELO;
+ if ((ahd->bugs & AHD_BUSFREEREV_BUG) != 0) {
+ /*
+ * Test to ensure that the bus has not
+ * already gone free prior to clearing
+ * any stale busfree status. This avoids
+ * a window whereby a busfree just after
+ * a selection could be missed.
+ */
+ test SCSISIGI, BSYI jz . + 2;
+ mvi CLRSINT1,CLRBUSFREE;
+ or SIMODE1, ENBUSFREE;
+ }
+ mov SAVED_SCSIID, SCB_SCSIID;
+ mov SAVED_LUN, SCB_LUN;
+ mvi SEQ_FLAGS, NO_CDB_SENT;
+END_CRITICAL;
+ or SXFRCTL0, SPIOEN;
+
+ /*
+ * As soon as we get a successful selection, the target
+ * should go into the message out phase since we have ATN
+ * asserted.
+ */
+ mvi MSG_OUT, MSG_IDENTIFYFLAG;
+
+ /*
+ * Main loop for information transfer phases. Wait for the
+ * target to assert REQ before checking MSG, C/D and I/O for
+ * the bus phase.
+ */
+mesgin_phasemis:
+ITloop:
+ call phase_lock;
+
+ mov A, LASTPHASE;
+
+ test A, ~P_DATAIN_DT jz p_data;
+ cmp A,P_COMMAND je p_command;
+ cmp A,P_MESGOUT je p_mesgout;
+ cmp A,P_STATUS je p_status;
+ cmp A,P_MESGIN je p_mesgin;
+
+ SET_SEQINTCODE(BAD_PHASE)
+ jmp ITloop; /* Try reading the bus again. */
+
+/*
+ * Command phase. Set up the DMA registers and let 'er rip.
+ */
+p_command:
+ test SEQ_FLAGS, NOT_IDENTIFIED jz p_command_okay;
+ SET_SEQINTCODE(PROTO_VIOLATION)
+p_command_okay:
+ test MODE_PTR, ~(MK_MODE(M_DFF1, M_DFF1))
+ jnz p_command_allocate_fifo;
+ /*
+ * Command retry. Free our current FIFO and
+ * re-allocate a FIFO so transfer state is
+ * reset.
+ */
+SET_SRC_MODE M_DFF1;
+SET_DST_MODE M_DFF1;
+ mvi DFFSXFRCTL, RSTCHN|CLRSHCNT;
+ SET_MODE(M_SCSI, M_SCSI)
+p_command_allocate_fifo:
+ bmov ALLOCFIFO_SCBPTR, SCBPTR, 2;
+ call allocate_fifo;
+SET_SRC_MODE M_DFF1;
+SET_DST_MODE M_DFF1;
+ add NONE, -17, SCB_CDB_LEN;
+ jnc p_command_embedded;
+p_command_from_host:
+ bmov HADDR[0], SCB_HOST_CDB_PTR, 9;
+ mvi SG_CACHE_PRE, LAST_SEG;
+ mvi DFCNTRL, (PRELOADEN|SCSIEN|HDMAEN);
+ jmp p_command_xfer;
+p_command_embedded:
+ bmov SHCNT[0], SCB_CDB_LEN, 1;
+ bmov DFDAT, SCB_CDB_STORE, 16;
+ mvi DFCNTRL, SCSIEN;
+p_command_xfer:
+ and SEQ_FLAGS, ~NO_CDB_SENT;
+ if ((ahd->features & AHD_FAST_CDB_DELIVERY) != 0) {
+ /*
+ * To speed up CDB delivery in Rev B, all CDB acks
+ * are "released" to the output sync as soon as the
+ * command phase starts. There is only one problem
+ * with this approach. If the target changes phase
+ * before all data are sent, we have left over acks
+ * that can go out on the bus in a data phase. Due
+ * to other chip contraints, this only happens if
+ * the target goes to data-in, but if the acks go
+ * out before we can test SDONE, we'll think that
+ * the transfer has completed successfully. Work
+ * around this by taking advantage of the 400ns or
+ * 800ns dead time between command phase and the REQ
+ * of the new phase. If the transfer has completed
+ * successfully, SCSIEN should fall *long* before we
+ * see a phase change. We thus treat any phasemiss
+ * that occurs before SCSIEN falls as an incomplete
+ * transfer.
+ */
+ test SSTAT1, PHASEMIS jnz p_command_xfer_failed;
+ test DFCNTRL, SCSIEN jnz . - 1;
+ } else {
+ test DFCNTRL, SCSIEN jnz .;
+ }
+ /*
+ * DMA Channel automatically disabled.
+ * Don't allow a data phase if the command
+ * was not fully transferred.
+ */
+ test SSTAT2, SDONE jnz ITloop;
+p_command_xfer_failed:
+ or SEQ_FLAGS, NO_CDB_SENT;
+ jmp ITloop;
+
+
+/*
+ * Status phase. Wait for the data byte to appear, then read it
+ * and store it into the SCB.
+ */
+SET_SRC_MODE M_SCSI;
+SET_DST_MODE M_SCSI;
+p_status:
+ test SEQ_FLAGS,NOT_IDENTIFIED jnz mesgin_proto_violation;
+p_status_okay:
+ mov SCB_SCSI_STATUS, SCSIDAT;
+ or SCB_CONTROL, STATUS_RCVD;
+ jmp ITloop;
+
+/*
+ * Message out phase. If MSG_OUT is MSG_IDENTIFYFLAG, build a full
+ * indentify message sequence and send it to the target. The host may
+ * override this behavior by setting the MK_MESSAGE bit in the SCB
+ * control byte. This will cause us to interrupt the host and allow
+ * it to handle the message phase completely on its own. If the bit
+ * associated with this target is set, we will also interrupt the host,
+ * thereby allowing it to send a message on the next selection regardless
+ * of the transaction being sent.
+ *
+ * If MSG_OUT is == HOST_MSG, also interrupt the host and take a message.
+ * This is done to allow the host to send messages outside of an identify
+ * sequence while protecting the seqencer from testing the MK_MESSAGE bit
+ * on an SCB that might not be for the current nexus. (For example, a
+ * BDR message in response to a bad reselection would leave us pointed to
+ * an SCB that doesn't have anything to do with the current target).
+ *
+ * Otherwise, treat MSG_OUT as a 1 byte message to send (abort, abort tag,
+ * bus device reset).
+ *
+ * When there are no messages to send, MSG_OUT should be set to MSG_NOOP,
+ * in case the target decides to put us in this phase for some strange
+ * reason.
+ */
+p_mesgout_retry:
+ /* Turn on ATN for the retry */
+ mvi SCSISIGO, ATNO;
+p_mesgout:
+ mov SINDEX, MSG_OUT;
+ cmp SINDEX, MSG_IDENTIFYFLAG jne p_mesgout_from_host;
+ test SCB_CONTROL,MK_MESSAGE jnz host_message_loop;
+p_mesgout_identify:
+ or SINDEX, MSG_IDENTIFYFLAG|DISCENB, SCB_LUN;
+ test SCB_CONTROL, DISCENB jnz . + 2;
+ and SINDEX, ~DISCENB;
+/*
+ * Send a tag message if TAG_ENB is set in the SCB control block.
+ * Use SCB_NONPACKET_TAG as the tag value.
+ */
+p_mesgout_tag:
+ test SCB_CONTROL,TAG_ENB jz p_mesgout_onebyte;
+ mov SCSIDAT, SINDEX; /* Send the identify message */
+ call phase_lock;
+ cmp LASTPHASE, P_MESGOUT jne p_mesgout_done;
+ and SCSIDAT,TAG_ENB|SCB_TAG_TYPE,SCB_CONTROL;
+ call phase_lock;
+ cmp LASTPHASE, P_MESGOUT jne p_mesgout_done;
+ mov SCBPTR jmp p_mesgout_onebyte;
+/*
+ * Interrupt the driver, and allow it to handle this message
+ * phase and any required retries.
+ */
+p_mesgout_from_host:
+ cmp SINDEX, HOST_MSG jne p_mesgout_onebyte;
+ jmp host_message_loop;
+
+p_mesgout_onebyte:
+ mvi CLRSINT1, CLRATNO;
+ mov SCSIDAT, SINDEX;
+
+/*
+ * If the next bus phase after ATN drops is message out, it means
+ * that the target is requesting that the last message(s) be resent.
+ */
+ call phase_lock;
+ cmp LASTPHASE, P_MESGOUT je p_mesgout_retry;
+
+p_mesgout_done:
+ mvi CLRSINT1,CLRATNO; /* Be sure to turn ATNO off */
+ mov LAST_MSG, MSG_OUT;
+ mvi MSG_OUT, MSG_NOOP; /* No message left */
+ jmp ITloop;
+
+/*
+ * Message in phase. Bytes are read using Automatic PIO mode.
+ */
+p_mesgin:
+ /* read the 1st message byte */
+ mvi ACCUM call inb_first;
+
+ test A,MSG_IDENTIFYFLAG jnz mesgin_identify;
+ cmp A,MSG_DISCONNECT je mesgin_disconnect;
+ cmp A,MSG_SAVEDATAPOINTER je mesgin_sdptrs;
+ cmp ALLZEROS,A je mesgin_complete;
+ cmp A,MSG_RESTOREPOINTERS je mesgin_rdptrs;
+ cmp A,MSG_IGN_WIDE_RESIDUE je mesgin_ign_wide_residue;
+ cmp A,MSG_NOOP je mesgin_done;
+
+/*
+ * Pushed message loop to allow the kernel to
+ * run it's own message state engine. To avoid an
+ * extra nop instruction after signaling the kernel,
+ * we perform the phase_lock before checking to see
+ * if we should exit the loop and skip the phase_lock
+ * in the ITloop. Performing back to back phase_locks
+ * shouldn't hurt, but why do it twice...
+ */
+host_message_loop:
+ call phase_lock; /* Benign the first time through. */
+ SET_SEQINTCODE(HOST_MSG_LOOP)
+ cmp RETURN_1, EXIT_MSG_LOOP je ITloop;
+ cmp RETURN_1, CONT_MSG_LOOP_WRITE jne . + 3;
+ mov SCSIDAT, RETURN_2;
+ jmp host_message_loop;
+ /* Must be CONT_MSG_LOOP_READ */
+ mov NONE, SCSIDAT; /* ACK Byte */
+ jmp host_message_loop;
+
+mesgin_ign_wide_residue:
+ mov SAVED_MODE, MODE_PTR;
+ SET_MODE(M_SCSI, M_SCSI)
+ shr NEGOADDR, 4, SAVED_SCSIID;
+ mov A, NEGCONOPTS;
+ RESTORE_MODE(SAVED_MODE)
+ test A, WIDEXFER jz mesgin_reject;
+ /* Pull the residue byte */
+ mvi REG0 call inb_next;
+ cmp REG0, 0x01 jne mesgin_reject;
+ test SCB_RESIDUAL_SGPTR[0], SG_LIST_NULL jz . + 2;
+ test SCB_TASK_ATTRIBUTE, SCB_XFERLEN_ODD jnz mesgin_done;
+ SET_SEQINTCODE(IGN_WIDE_RES)
+ jmp mesgin_done;
+
+mesgin_proto_violation:
+ SET_SEQINTCODE(PROTO_VIOLATION)
+ jmp mesgin_done;
+mesgin_reject:
+ mvi MSG_MESSAGE_REJECT call mk_mesg;
+mesgin_done:
+ mov NONE,SCSIDAT; /*dummy read from latch to ACK*/
+ jmp ITloop;
+
+#define INDEX_DISC_LIST(scsiid, lun) \
+ and A, 0xC0, scsiid; \
+ or SCBPTR, A, lun; \
+ clr SCBPTR[1]; \
+ and SINDEX, 0x30, scsiid; \
+ shr SINDEX, 3; /* Multiply by 2 */ \
+ add SINDEX, (SCB_DISCONNECTED_LISTS & 0xFF); \
+ mvi SINDEX[1], ((SCB_DISCONNECTED_LISTS >> 8) & 0xFF)
+
+mesgin_identify:
+ /*
+ * Determine whether a target is using tagged or non-tagged
+ * transactions by first looking at the transaction stored in
+ * the per-device, disconnected array. If there is no untagged
+ * transaction for this target, this must be a tagged transaction.
+ */
+ and SAVED_LUN, MSG_IDENTIFY_LUNMASK, A;
+ INDEX_DISC_LIST(SAVED_SCSIID, SAVED_LUN);
+ bmov DINDEX, SINDEX, 2;
+ bmov REG0, SINDIR, 2;
+ cmp REG0[1], SCB_LIST_NULL je snoop_tag;
+ /* Untagged. Clear the busy table entry and setup the SCB. */
+ bmov DINDIR, ALLONES, 2;
+ bmov SCBPTR, REG0, 2;
+ jmp setup_SCB;
+
+/*
+ * Here we "snoop" the bus looking for a SIMPLE QUEUE TAG message.
+ * If we get one, we use the tag returned to find the proper
+ * SCB. After receiving the tag, look for the SCB at SCB locations tag and
+ * tag + 256.
+ */
+snoop_tag:
+ if ((ahd->flags & AHD_SEQUENCER_DEBUG) != 0) {
+ or SEQ_FLAGS, 0x80;
+ }
+ mov NONE, SCSIDAT; /* ACK Identify MSG */
+ call phase_lock;
+ if ((ahd->flags & AHD_SEQUENCER_DEBUG) != 0) {
+ or SEQ_FLAGS, 0x1;
+ }
+ cmp LASTPHASE, P_MESGIN jne not_found_ITloop;
+ if ((ahd->flags & AHD_SEQUENCER_DEBUG) != 0) {
+ or SEQ_FLAGS, 0x2;
+ }
+ cmp SCSIBUS, MSG_SIMPLE_Q_TAG jne not_found;
+get_tag:
+ clr SCBPTR[1];
+ mvi SCBPTR call inb_next; /* tag value */
+verify_scb:
+ test SCB_CONTROL,DISCONNECTED jz verify_other_scb;
+ mov A, SAVED_SCSIID;
+ cmp SCB_SCSIID, A jne verify_other_scb;
+ mov A, SAVED_LUN;
+ cmp SCB_LUN, A je setup_SCB_disconnected;
+verify_other_scb:
+ xor SCBPTR[1], 1;
+ test SCBPTR[1], 0xFF jnz verify_scb;
+ jmp not_found;
+
+/*
+ * Ensure that the SCB the tag points to is for
+ * an SCB transaction to the reconnecting target.
+ */
+setup_SCB:
+ if ((ahd->flags & AHD_SEQUENCER_DEBUG) != 0) {
+ or SEQ_FLAGS, 0x10;
+ }
+ test SCB_CONTROL,DISCONNECTED jz not_found;
+setup_SCB_disconnected:
+ and SCB_CONTROL,~DISCONNECTED;
+ clr SEQ_FLAGS; /* make note of IDENTIFY */
+ test SCB_SGPTR, SG_LIST_NULL jnz . + 3;
+ bmov ALLOCFIFO_SCBPTR, SCBPTR, 2;
+ call allocate_fifo;
+ /* See if the host wants to send a message upon reconnection */
+ test SCB_CONTROL, MK_MESSAGE jz mesgin_done;
+ mvi HOST_MSG call mk_mesg;
+ jmp mesgin_done;
+
+not_found:
+ SET_SEQINTCODE(NO_MATCH)
+ jmp mesgin_done;
+
+not_found_ITloop:
+ SET_SEQINTCODE(NO_MATCH)
+ jmp ITloop;
+
+/*
+ * We received a "command complete" message. Put the SCB on the complete
+ * queue and trigger a completion interrupt via the idle loop. Before doing
+ * so, check to see if there is a residual or the status byte is something
+ * other than STATUS_GOOD (0). In either of these conditions, we upload the
+ * SCB back to the host so it can process this information.
+ */
+mesgin_complete:
+
+ /*
+ * If ATN is raised, we still want to give the target a message.
+ * Perhaps there was a parity error on this last message byte.
+ * Either way, the target should take us to message out phase
+ * and then attempt to complete the command again. We should use a
+ * critical section here to guard against a timeout triggering
+ * for this command and setting ATN while we are still processing
+ * the completion.
+ test SCSISIGI, ATNI jnz mesgin_done;
+ */
+
+ /*
+ * If we are identified and have successfully sent the CDB,
+ * any status will do. Optimize this fast path.
+ */
+ test SCB_CONTROL, STATUS_RCVD jz mesgin_proto_violation;
+ test SEQ_FLAGS, NOT_IDENTIFIED|NO_CDB_SENT jz complete_accepted;
+
+ /*
+ * If the target never sent an identify message but instead went
+ * to mesgin to give an invalid message, let the host abort us.
+ */
+ test SEQ_FLAGS, NOT_IDENTIFIED jnz mesgin_proto_violation;
+
+ /*
+ * If we recevied good status but never successfully sent the
+ * cdb, abort the command.
+ */
+ test SCB_SCSI_STATUS,0xff jnz complete_accepted;
+ test SEQ_FLAGS, NO_CDB_SENT jnz mesgin_proto_violation;
+complete_accepted:
+
+ /*
+ * See if we attempted to deliver a message but the target ingnored us.
+ */
+ test SCB_CONTROL, MK_MESSAGE jz complete_nomsg;
+ SET_SEQINTCODE(MKMSG_FAILED)
+complete_nomsg:
+ call queue_scb_completion;
+ jmp await_busfree;
+
+BEGIN_CRITICAL;
+freeze_queue:
+ /* Cancel any pending select-out. */
+ test SSTAT0, SELDO|SELINGO jnz . + 2;
+ and SCSISEQ0, ~ENSELO;
+ mov ACCUM_SAVE, A;
+ clr A;
+ add QFREEZE_COUNT, 1;
+ adc QFREEZE_COUNT[1], A;
+ or SEQ_FLAGS2, SELECTOUT_QFROZEN;
+ mov A, ACCUM_SAVE ret;
+END_CRITICAL;
+
+/*
+ * Complete the current FIFO's SCB if data for this same
+ * SCB is not transferring in the other FIFO.
+ */
+SET_SRC_MODE M_DFF1;
+SET_DST_MODE M_DFF1;
+pkt_complete_scb_if_fifos_idle:
+ bmov ARG_1, SCBPTR, 2;
+ mvi DFFSXFRCTL, CLRCHN;
+ SET_MODE(M_SCSI, M_SCSI)
+ bmov SCBPTR, ARG_1, 2;
+ test SCB_FIFO_USE_COUNT, 0xFF jnz return;
+queue_scb_completion:
+ test SCB_SCSI_STATUS,0xff jnz bad_status;
+ /*
+ * Check for residuals
+ */
+ test SCB_SGPTR, SG_LIST_NULL jnz complete; /* No xfer */
+ test SCB_SGPTR, SG_FULL_RESID jnz upload_scb;/* Never xfered */
+ test SCB_RESIDUAL_SGPTR, SG_LIST_NULL jz upload_scb;
+complete:
+BEGIN_CRITICAL;
+ bmov SCB_NEXT_COMPLETE, COMPLETE_SCB_HEAD, 2;
+ bmov COMPLETE_SCB_HEAD, SCBPTR, 2 ret;
+END_CRITICAL;
+bad_status:
+ cmp SCB_SCSI_STATUS, STATUS_PKT_SENSE je upload_scb;
+ call freeze_queue;
+upload_scb:
+ /*
+ * Restore SCB TAG since we reuse this field
+ * in the sequencer. We don't want to corrupt
+ * it on the host.
+ */
+ bmov SCB_TAG, SCBPTR, 2;
+BEGIN_CRITICAL;
+ or SCB_SGPTR, SG_STATUS_VALID;
+ mvi SCB_NEXT_COMPLETE[1], SCB_LIST_NULL;
+ cmp COMPLETE_DMA_SCB_HEAD[1], SCB_LIST_NULL jne add_dma_scb_tail;
+ bmov COMPLETE_DMA_SCB_HEAD, SCBPTR, 2;
+ bmov COMPLETE_DMA_SCB_TAIL, SCBPTR, 2 ret;
+add_dma_scb_tail:
+ bmov REG0, SCBPTR, 2;
+ bmov SCBPTR, COMPLETE_DMA_SCB_TAIL, 2;
+ bmov SCB_NEXT_COMPLETE, REG0, 2;
+ bmov COMPLETE_DMA_SCB_TAIL, REG0, 2 ret;
+END_CRITICAL;
+
+/*
+ * Is it a disconnect message? Set a flag in the SCB to remind us
+ * and await the bus going free. If this is an untagged transaction
+ * store the SCB id for it in our untagged target table for lookup on
+ * a reselection.
+ */
+mesgin_disconnect:
+ /*
+ * If ATN is raised, we still want to give the target a message.
+ * Perhaps there was a parity error on this last message byte
+ * or we want to abort this command. Either way, the target
+ * should take us to message out phase and then attempt to
+ * disconnect again.
+ * XXX - Wait for more testing.
+ test SCSISIGI, ATNI jnz mesgin_done;
+ */
+ test SEQ_FLAGS, NOT_IDENTIFIED|NO_CDB_SENT
+ jnz mesgin_proto_violation;
+ or SCB_CONTROL,DISCONNECTED;
+ test SCB_CONTROL, TAG_ENB jnz await_busfree;
+queue_disc_scb:
+ bmov REG0, SCBPTR, 2;
+ INDEX_DISC_LIST(SAVED_SCSIID, SAVED_LUN);
+ bmov DINDEX, SINDEX, 2;
+ bmov DINDIR, REG0, 2;
+ bmov SCBPTR, REG0, 2;
+ /* FALLTHROUGH */
+await_busfree:
+ and SIMODE1, ~ENBUSFREE;
+ if ((ahd->bugs & AHD_BUSFREEREV_BUG) == 0) {
+ /*
+ * In the BUSFREEREV_BUG case, the
+ * busfree status was cleared at the
+ * beginning of the connection.
+ */
+ mvi CLRSINT1,CLRBUSFREE;
+ }
+ mov NONE, SCSIDAT; /* Ack the last byte */
+ test MODE_PTR, ~(MK_MODE(M_DFF1, M_DFF1))
+ jnz await_busfree_not_m_dff;
+SET_SRC_MODE M_DFF1;
+SET_DST_MODE M_DFF1;
+await_busfree_clrchn:
+ mvi DFFSXFRCTL, CLRCHN;
+await_busfree_not_m_dff:
+ /* clear target specific flags */
+ mvi SEQ_FLAGS, NOT_IDENTIFIED|NO_CDB_SENT;
+ test SSTAT1,REQINIT|BUSFREE jz .;
+ /*
+ * We only set BUSFREE status once either a new
+ * phase has been detected or we are really
+ * BUSFREE. This allows the driver to know
+ * that we are active on the bus even though
+ * no identified transaction exists should a
+ * timeout occur while awaiting busfree.
+ */
+ mvi LASTPHASE, P_BUSFREE;
+ test SSTAT1, BUSFREE jnz idle_loop;
+ SET_SEQINTCODE(MISSED_BUSFREE)
+
+
+/*
+ * Save data pointers message:
+ * Copying RAM values back to SCB, for Save Data Pointers message, but
+ * only if we've actually been into a data phase to change them. This
+ * protects against bogus data in scratch ram and the residual counts
+ * since they are only initialized when we go into data_in or data_out.
+ * Ack the message as soon as possible.
+ */
+SET_SRC_MODE M_DFF1;
+SET_DST_MODE M_DFF1;
+mesgin_sdptrs:
+ mov NONE,SCSIDAT; /*dummy read from latch to ACK*/
+ test SEQ_FLAGS, DPHASE jz ITloop;
+ call save_pointers;
+ jmp ITloop;
+
+save_pointers:
+ /*
+ * If we are asked to save our position at the end of the
+ * transfer, just mark us at the end rather than perform a
+ * full save.
+ */
+ test SCB_RESIDUAL_SGPTR[0], SG_LIST_NULL jz save_pointers_full;
+ or SCB_SGPTR, SG_LIST_NULL ret;
+
+save_pointers_full:
+ /*
+ * The SCB_DATAPTR becomes the current SHADDR.
+ * All other information comes directly from our residual
+ * state.
+ */
+ bmov SCB_DATAPTR, SHADDR, 8;
+ bmov SCB_DATACNT, SCB_RESIDUAL_DATACNT, 8 ret;
+
+/*
+ * Restore pointers message? Data pointers are recopied from the
+ * SCB anytime we enter a data phase for the first time, so all
+ * we need to do is clear the DPHASE flag and let the data phase
+ * code do the rest. We also reset/reallocate the FIFO to make
+ * sure we have a clean start for the next data or command phase.
+ */
+mesgin_rdptrs:
+ and SEQ_FLAGS, ~DPHASE;
+ test MODE_PTR, ~(MK_MODE(M_DFF1, M_DFF1)) jnz msgin_rdptrs_get_fifo;
+ mvi DFFSXFRCTL, RSTCHN|CLRSHCNT;
+ SET_MODE(M_SCSI, M_SCSI)
+msgin_rdptrs_get_fifo:
+ call allocate_fifo;
+ jmp mesgin_done;
+
+phase_lock:
+ if ((ahd->bugs & AHD_EARLY_REQ_BUG) != 0) {
+ /*
+ * Don't ignore persistent REQ assertions just because
+ * they were asserted within the bus settle delay window.
+ * This allows us to tolerate devices like the GEM318
+ * that violate the SCSI spec. We are careful not to
+ * count REQ while we are waiting for it to fall during
+ * an async phase due to our asserted ACK. Each
+ * sequencer instruction takes ~25ns, so the REQ must
+ * last at least 100ns in order to be counted as a true
+ * REQ.
+ */
+ test SCSIPHASE, 0xFF jnz phase_locked;
+ test SCSISIGI, ACKI jnz phase_lock;
+ test SCSISIGI, REQI jz phase_lock;
+ test SCSIPHASE, 0xFF jnz phase_locked;
+ test SCSISIGI, ACKI jnz phase_lock;
+ test SCSISIGI, REQI jz phase_lock;
+phase_locked:
+ } else {
+ test SCSIPHASE, 0xFF jz .;
+ }
+ test SSTAT1, SCSIPERR jnz phase_lock;
+phase_lock_latch_phase:
+ and LASTPHASE, PHASE_MASK, SCSISIGI ret;
+
+/*
+ * Functions to read data in Automatic PIO mode.
+ *
+ * An ACK is not sent on input from the target until SCSIDATL is read from.
+ * So we wait until SCSIDATL is latched (the usual way), then read the data
+ * byte directly off the bus using SCSIBUSL. When we have pulled the ATN
+ * line, or we just want to acknowledge the byte, then we do a dummy read
+ * from SCISDATL. The SCSI spec guarantees that the target will hold the
+ * data byte on the bus until we send our ACK.
+ *
+ * The assumption here is that these are called in a particular sequence,
+ * and that REQ is already set when inb_first is called. inb_{first,next}
+ * use the same calling convention as inb.
+ */
+inb_next:
+ mov NONE,SCSIDAT; /*dummy read from latch to ACK*/
+inb_next_wait:
+ /*
+ * If there is a parity error, wait for the kernel to
+ * see the interrupt and prepare our message response
+ * before continuing.
+ */
+ test SCSIPHASE, 0xFF jz .;
+ test SSTAT1, SCSIPERR jnz inb_next_wait;
+inb_next_check_phase:
+ and LASTPHASE, PHASE_MASK, SCSISIGI;
+ cmp LASTPHASE, P_MESGIN jne mesgin_phasemis;
+inb_first:
+ clr DINDEX[1];
+ mov DINDEX,SINDEX;
+ mov DINDIR,SCSIBUS ret; /*read byte directly from bus*/
+inb_last:
+ mov NONE,SCSIDAT ret; /*dummy read from latch to ACK*/
+
+mk_mesg:
+ mvi SCSISIGO, ATNO;
+ mov MSG_OUT,SINDEX ret;
+
+SET_SRC_MODE M_DFF1;
+SET_DST_MODE M_DFF1;
+disable_ccsgen:
+ test SG_STATE, FETCH_INPROG jz disable_ccsgen_fetch_done;
+ clr CCSGCTL;
+disable_ccsgen_fetch_done:
+ clr SG_STATE ret;
+
+service_fifo:
+ /*
+ * Do we have any prefetch left???
+ */
+ test SG_STATE, SEGS_AVAIL jnz idle_sg_avail;
+
+ /*
+ * Can this FIFO have access to the S/G cache yet?
+ */
+ test CCSGCTL, SG_CACHE_AVAIL jz return;
+
+ /* Did we just finish fetching segs? */
+ test CCSGCTL, CCSGDONE jnz idle_sgfetch_complete;
+
+ /* Are we actively fetching segments? */
+ test CCSGCTL, CCSGENACK jnz return;
+
+ /*
+ * Should the other FIFO get the S/G cache first? If
+ * both FIFOs have been allocated since we last checked
+ * any FIFO, it is important that we service a FIFO
+ * that is not actively on the bus first. This guarantees
+ * that a FIFO will be freed to handle snapshot requests for
+ * any FIFO that is still on the bus. Chips with RTI do not
+ * perform snapshots, so don't bother with this test there.
+ */
+ if ((ahd->features & AHD_RTI) == 0) {
+ /*
+ * If we're not still receiving SCSI data,
+ * it is safe to allocate the S/G cache to
+ * this FIFO.
+ */
+ test DFCNTRL, SCSIEN jz idle_sgfetch_start;
+
+ /*
+ * Switch to the other FIFO. Non-RTI chips
+ * also have the "set mode" bug, so we must
+ * disable interrupts during the switch.
+ */
+ mvi SEQINTCTL, INTVEC1DSL;
+ xor MODE_PTR, MK_MODE(M_DFF1, M_DFF1);
+
+ /*
+ * If the other FIFO needs loading, then it
+ * must not have claimed the S/G cache yet
+ * (SG_CACHE_AVAIL would have been cleared in
+ * the original FIFO mode and we test this above).
+ * Return to the idle loop so we can process the
+ * FIFO not currently on the bus first.
+ */
+ test SG_STATE, LOADING_NEEDED jz idle_sgfetch_okay;
+ clr SEQINTCTL ret;
+idle_sgfetch_okay:
+ xor MODE_PTR, MK_MODE(M_DFF1, M_DFF1);
+ clr SEQINTCTL;
+ }
+
+idle_sgfetch_start:
+ /*
+ * We fetch a "cacheline aligned" and sized amount of data
+ * so we don't end up referencing a non-existent page.
+ * Cacheline aligned is in quotes because the kernel will
+ * set the prefetch amount to a reasonable level if the
+ * cacheline size is unknown.
+ */
+ bmov SGHADDR, SCB_RESIDUAL_SGPTR, 4;
+ mvi SGHCNT, SG_PREFETCH_CNT;
+ if ((ahd->bugs & AHD_REG_SLOW_SETTLE_BUG) != 0) {
+ /*
+ * Need two instructions between "touches" of SGHADDR.
+ */
+ nop;
+ }
+ and SGHADDR[0], SG_PREFETCH_ALIGN_MASK, SCB_RESIDUAL_SGPTR;
+ mvi CCSGCTL, CCSGEN|CCSGRESET;
+ or SG_STATE, FETCH_INPROG ret;
+idle_sgfetch_complete:
+ /*
+ * Guard against SG_CACHE_AVAIL activating during sg fetch
+ * request in the other FIFO.
+ */
+ test SG_STATE, FETCH_INPROG jz return;
+ clr CCSGCTL;
+ and CCSGADDR, SG_PREFETCH_ADDR_MASK, SCB_RESIDUAL_SGPTR;
+ mvi SG_STATE, SEGS_AVAIL|LOADING_NEEDED;
+idle_sg_avail:
+ /* Does the hardware have space for another SG entry? */
+ test DFSTATUS, PRELOAD_AVAIL jz return;
+ /*
+ * On the A, preloading a segment before HDMAENACK
+ * comes true can clobber the shadow address of the
+ * first segment in the S/G FIFO. Wait until it is
+ * safe to proceed.
+ */
+ if ((ahd->features & AHD_NEW_DFCNTRL_OPTS) == 0) {
+ test DFCNTRL, HDMAENACK jz return;
+ }
+ if ((ahd->flags & AHD_64BIT_ADDRESSING) != 0) {
+ bmov HADDR, CCSGRAM, 8;
+ } else {
+ bmov HADDR, CCSGRAM, 4;
+ }
+ bmov HCNT, CCSGRAM, 3;
+ bmov SCB_RESIDUAL_DATACNT[3], CCSGRAM, 1;
+ if ((ahd->flags & AHD_39BIT_ADDRESSING) != 0) {
+ and HADDR[4], SG_HIGH_ADDR_BITS, SCB_RESIDUAL_DATACNT[3];
+ }
+ if ((ahd->flags & AHD_64BIT_ADDRESSING) != 0) {
+ /* Skip 4 bytes of pad. */
+ add CCSGADDR, 4;
+ }
+sg_advance:
+ clr A; /* add sizeof(struct scatter) */
+ add SCB_RESIDUAL_SGPTR[0],SG_SIZEOF;
+ adc SCB_RESIDUAL_SGPTR[1],A;
+ adc SCB_RESIDUAL_SGPTR[2],A;
+ adc SCB_RESIDUAL_SGPTR[3],A;
+ mov SINDEX, SCB_RESIDUAL_SGPTR[0];
+ test SCB_RESIDUAL_DATACNT[3], SG_LAST_SEG jz . + 3;
+ or SINDEX, LAST_SEG;
+ clr SG_STATE;
+ mov SG_CACHE_PRE, SINDEX;
+ if ((ahd->features & AHD_NEW_DFCNTRL_OPTS) != 0) {
+ /*
+ * Use SCSIENWRDIS so that SCSIEN is never
+ * modified by this operation.
+ */
+ or DFCNTRL, PRELOADEN|HDMAEN|SCSIENWRDIS;
+ } else {
+ or DFCNTRL, PRELOADEN|HDMAEN;
+ }
+ /*
+ * Do we have another segment in the cache?
+ */
+ add NONE, SG_PREFETCH_CNT_LIMIT, CCSGADDR;
+ jnc return;
+ and SG_STATE, ~SEGS_AVAIL ret;
+
+/*
+ * Initialize the DMA address and counter from the SCB.
+ */
+load_first_seg:
+ bmov HADDR, SCB_DATAPTR, 11;
+ and REG_ISR, ~SG_FULL_RESID, SCB_SGPTR[0];
+ test SCB_DATACNT[3], SG_LAST_SEG jz . + 2;
+ or REG_ISR, LAST_SEG;
+ mov SG_CACHE_PRE, REG_ISR;
+ mvi DFCNTRL, (PRELOADEN|SCSIEN|HDMAEN);
+ /*
+ * Since we've are entering a data phase, we will
+ * rely on the SCB_RESID* fields. Initialize the
+ * residual and clear the full residual flag.
+ */
+ and SCB_SGPTR[0], ~SG_FULL_RESID;
+ bmov SCB_RESIDUAL_DATACNT[3], SCB_DATACNT[3], 5;
+ /* If we need more S/G elements, tell the idle loop */
+ test SCB_RESIDUAL_DATACNT[3], SG_LAST_SEG jnz . + 2;
+ mvi SG_STATE, LOADING_NEEDED ret;
+ clr SG_STATE ret;
+
+p_data_handle_xfer:
+ call setjmp;
+ test SG_STATE, LOADING_NEEDED jnz service_fifo;
+p_data_clear_handler:
+ or LONGJMP_ADDR[1], INVALID_ADDR ret;
+
+p_data:
+ test SEQ_FLAGS, NOT_IDENTIFIED|NO_CDB_SENT jz p_data_allowed;
+ SET_SEQINTCODE(PROTO_VIOLATION)
+p_data_allowed:
+
+ test SEQ_FLAGS, DPHASE jz data_phase_initialize;
+
+ /*
+ * If we re-enter the data phase after going through another
+ * phase, our transfer location has almost certainly been
+ * corrupted by the interveining, non-data, transfers. Ask
+ * the host driver to fix us up based on the transfer residual
+ * unless we already know that we should be bitbucketing.
+ */
+ test SCB_RESIDUAL_SGPTR[0], SG_LIST_NULL jnz p_data_bitbucket;
+ SET_SEQINTCODE(PDATA_REINIT)
+ jmp data_phase_inbounds;
+
+p_data_bitbucket:
+ /*
+ * Turn on `Bit Bucket' mode, wait until the target takes
+ * us to another phase, and then notify the host.
+ */
+ mov SAVED_MODE, MODE_PTR;
+ test MODE_PTR, ~(MK_MODE(M_DFF1, M_DFF1))
+ jnz bitbucket_not_m_dff;
+ /*
+ * Ensure that any FIFO contents are cleared out and the
+ * FIFO free'd prior to starting the BITBUCKET. BITBUCKET
+ * doesn't discard data already in the FIFO.
+ */
+ mvi DFFSXFRCTL, RSTCHN|CLRSHCNT;
+ SET_MODE(M_SCSI, M_SCSI)
+bitbucket_not_m_dff:
+ or SXFRCTL1,BITBUCKET;
+ /* Wait for non-data phase. */
+ test SCSIPHASE, ~DATA_PHASE_MASK jz .;
+ and SXFRCTL1, ~BITBUCKET;
+ RESTORE_MODE(SAVED_MODE)
+SET_SRC_MODE M_DFF1;
+SET_DST_MODE M_DFF1;
+ SET_SEQINTCODE(DATA_OVERRUN)
+ jmp ITloop;
+
+data_phase_initialize:
+ test SCB_SGPTR[0], SG_LIST_NULL jnz p_data_bitbucket;
+ call load_first_seg;
+data_phase_inbounds:
+ /* We have seen a data phase at least once. */
+ or SEQ_FLAGS, DPHASE;
+ mov SAVED_MODE, MODE_PTR;
+ test SG_STATE, LOADING_NEEDED jz data_group_dma_loop;
+ call p_data_handle_xfer;
+data_group_dma_loop:
+ /*
+ * The transfer is complete if either the last segment
+ * completes or the target changes phase. Both conditions
+ * will clear SCSIEN.
+ */
+ call idle_loop_service_fifos;
+ call idle_loop_cchan;
+ call idle_loop_gsfifo;
+ RESTORE_MODE(SAVED_MODE)
+ test DFCNTRL, SCSIEN jnz data_group_dma_loop;
+
+data_group_dmafinish:
+ /*
+ * The transfer has terminated either due to a phase
+ * change, and/or the completion of the last segment.
+ * We have two goals here. Do as much other work
+ * as possible while the data fifo drains on a read
+ * and respond as quickly as possible to the standard
+ * messages (save data pointers/disconnect and command
+ * complete) that usually follow a data phase.
+ */
+ call calc_residual;
+
+ /*
+ * Go ahead and shut down the DMA engine now.
+ */
+ test DFCNTRL, DIRECTION jnz data_phase_finish;
+data_group_fifoflush:
+ if ((ahd->bugs & AHD_AUTOFLUSH_BUG) != 0) {
+ or DFCNTRL, FIFOFLUSH;
+ }
+ /*
+ * We have enabled the auto-ack feature. This means
+ * that the controller may have already transferred
+ * some overrun bytes into the data FIFO and acked them
+ * on the bus. The only way to detect this situation is
+ * to wait for LAST_SEG_DONE to come true on a completed
+ * transfer and then test to see if the data FIFO is
+ * non-empty. We know there is more data yet to transfer
+ * if SG_LIST_NULL is not yet set, thus there cannot be
+ * an overrun.
+ */
+ test SCB_RESIDUAL_SGPTR[0], SG_LIST_NULL jz data_phase_finish;
+ test SG_CACHE_SHADOW, LAST_SEG_DONE jz .;
+ test DFSTATUS, FIFOEMP jnz data_phase_finish;
+ /* Overrun */
+ jmp p_data;
+data_phase_finish:
+ /*
+ * If the target has left us in data phase, loop through
+ * the dma code again. We will only loop if there is a
+ * data overrun.
+ */
+ if ((ahd->flags & AHD_TARGETROLE) != 0) {
+ test SSTAT0, TARGET jnz data_phase_done;
+ }
+ if ((ahd->flags & AHD_INITIATORROLE) != 0) {
+ test SSTAT1, REQINIT jz .;
+ test SCSIPHASE, DATA_PHASE_MASK jnz p_data;
+ }
+
+data_phase_done:
+ /* Kill off any pending prefetch */
+ call disable_ccsgen;
+ or LONGJMP_ADDR[1], INVALID_ADDR;
+
+ if ((ahd->flags & AHD_TARGETROLE) != 0) {
+ test SEQ_FLAGS, DPHASE_PENDING jz ITloop;
+ /*
+ and SEQ_FLAGS, ~DPHASE_PENDING;
+ * For data-in phases, wait for any pending acks from the
+ * initiator before changing phase. We only need to
+ * send Ignore Wide Residue messages for data-in phases.
+ test DFCNTRL, DIRECTION jz target_ITloop;
+ test SSTAT1, REQINIT jnz .;
+ test SCB_TASK_ATTRIBUTE, SCB_XFERLEN_ODD jz target_ITloop;
+ SET_MODE(M_SCSI, M_SCSI)
+ test NEGCONOPTS, WIDEXFER jz target_ITloop;
+ */
+ /*
+ * Issue an Ignore Wide Residue Message.
+ mvi P_MESGIN|BSYO call change_phase;
+ mvi MSG_IGN_WIDE_RESIDUE call target_outb;
+ mvi 1 call target_outb;
+ jmp target_ITloop;
+ */
+ } else {
+ jmp ITloop;
+ }
+
+/*
+ * We assume that, even though data may still be
+ * transferring to the host, that the SCSI side of
+ * the DMA engine is now in a static state. This
+ * allows us to update our notion of where we are
+ * in this transfer.
+ *
+ * If, by chance, we stopped before being able
+ * to fetch additional segments for this transfer,
+ * yet the last S/G was completely exhausted,
+ * call our idle loop until it is able to load
+ * another segment. This will allow us to immediately
+ * pickup on the next segment on the next data phase.
+ *
+ * If we happened to stop on the last segment, then
+ * our residual information is still correct from
+ * the idle loop and there is no need to perform
+ * any fixups.
+ */
+residual_before_last_seg:
+ test MDFFSTAT, SHVALID jnz sgptr_fixup;
+ /*
+ * Can never happen from an interrupt as the packetized
+ * hardware will only interrupt us once SHVALID or
+ * LAST_SEG_DONE.
+ */
+ call idle_loop_service_fifos;
+ RESTORE_MODE(SAVED_MODE)
+ /* FALLTHROUGH */
+calc_residual:
+ test SG_CACHE_SHADOW, LAST_SEG jz residual_before_last_seg;
+ /* Record if we've consumed all S/G entries */
+ test MDFFSTAT, SHVALID jz . + 2;
+ bmov SCB_RESIDUAL_DATACNT, SHCNT, 3 ret;
+ or SCB_RESIDUAL_SGPTR[0], SG_LIST_NULL ret;
+
+sgptr_fixup:
+ /*
+ * Fixup the residual next S/G pointer. The S/G preload
+ * feature of the chip allows us to load two elements
+ * in addition to the currently active element. We
+ * store the bottom byte of the next S/G pointer in
+ * the SG_CACHE_PTR register so we can restore the
+ * correct value when the DMA completes. If the next
+ * sg ptr value has advanced to the point where higher
+ * bytes in the address have been affected, fix them
+ * too.
+ */
+ test SG_CACHE_SHADOW, 0x80 jz sgptr_fixup_done;
+ test SCB_RESIDUAL_SGPTR[0], 0x80 jnz sgptr_fixup_done;
+ add SCB_RESIDUAL_SGPTR[1], -1;
+ adc SCB_RESIDUAL_SGPTR[2], -1;
+ adc SCB_RESIDUAL_SGPTR[3], -1;
+sgptr_fixup_done:
+ and SCB_RESIDUAL_SGPTR[0], SG_ADDR_MASK, SG_CACHE_SHADOW;
+ clr SCB_RESIDUAL_DATACNT[3]; /* We are not the last seg */
+ bmov SCB_RESIDUAL_DATACNT, SHCNT, 3 ret;
+
+export timer_isr:
+ call issue_cmdcmplt;
+ mvi CLRSEQINTSTAT, CLRSEQ_SWTMRTO;
+ if ((ahd->bugs & AHD_SET_MODE_BUG) != 0) {
+ /*
+ * In H2A4, the mode pointer is not saved
+ * for intvec2, but is restored on iret.
+ * This can lead to the restoration of a
+ * bogus mode ptr. Manually clear the
+ * intmask bits and do a normal return
+ * to compensate.
+ */
+ and SEQINTCTL, ~(INTMASK2|INTMASK1) ret;
+ } else {
+ or SEQINTCTL, IRET ret;
+ }
+
+export seq_isr:
+ if ((ahd->features & AHD_RTI) == 0) {
+ /*
+ * On RevA Silicon, if the target returns us to data-out
+ * after we have already trained for data-out, it is
+ * possible for us to transition the free running clock to
+ * data-valid before the required 100ns P1 setup time (8 P1
+ * assertions in fast-160 mode). This will only happen if
+ * this L-Q is a continuation of a data transfer for which
+ * we have already prefetched data into our FIFO (LQ/Data
+ * followed by LQ/Data for the same write transaction).
+ * This can cause some target implementations to miss the
+ * first few data transfers on the bus. We detect this
+ * situation by noticing that this is the first data transfer
+ * after an LQ (LQIWORKONLQ true), that the data transfer is
+ * a continuation of a transfer already setup in our FIFO
+ * (SAVEPTRS interrupt), and that the transaction is a write
+ * (DIRECTION set in DFCNTRL). The delay is performed by
+ * disabling SCSIEN until we see the first REQ from the
+ * target.
+ *
+ * First instruction in an ISR cannot be a branch on
+ * Rev A. Snapshot LQISTAT2 so the status is not missed
+ * and deffer the test by one instruction.
+ */
+ mov REG_ISR, LQISTAT2;
+ test REG_ISR, LQIWORKONLQ jz main_isr;
+ test SEQINTSRC, SAVEPTRS jz main_isr;
+ test LONGJMP_ADDR[1], INVALID_ADDR jz saveptr_active_fifo;
+ /*
+ * Switch to the active FIFO after clearing the snapshot
+ * savepointer in the current FIFO. We do this so that
+ * a pending CTXTDONE or SAVEPTR is visible in the active
+ * FIFO. This status is the only way we can detect if we
+ * have lost the race (e.g. host paused us) and our attempts
+ * to disable the channel occurred after all REQs were
+ * already seen and acked (REQINIT never comes true).
+ */
+ mvi DFFSXFRCTL, CLRCHN;
+ xor MODE_PTR, MK_MODE(M_DFF1, M_DFF1);
+ test DFCNTRL, DIRECTION jz interrupt_return;
+ and DFCNTRL, ~SCSIEN;
+snapshot_wait_data_valid:
+ test SEQINTSRC, (CTXTDONE|SAVEPTRS) jnz interrupt_return;
+ test SSTAT1, REQINIT jz snapshot_wait_data_valid;
+snapshot_data_valid:
+ or DFCNTRL, SCSIEN;
+ or SEQINTCTL, IRET ret;
+snapshot_saveptr:
+ mvi DFFSXFRCTL, CLRCHN;
+ or SEQINTCTL, IRET ret;
+main_isr:
+ }
+ test SEQINTSRC, CFG4DATA jnz cfg4data_intr;
+ test SEQINTSRC, CFG4ISTAT jnz cfg4istat_intr;
+ test SEQINTSRC, SAVEPTRS jnz saveptr_intr;
+ test SEQINTSRC, CFG4ICMD jnz cfg4icmd_intr;
+ SET_SEQINTCODE(INVALID_SEQINT)
+
+/*
+ * There are two types of save pointers interrupts:
+ * The first is a snapshot save pointers where the current FIFO is not
+ * active and contains a snapshot of the current poniter information.
+ * This happens between packets in a stream for a single L_Q. Since we
+ * are not performing a pointer save, we can safely clear the channel
+ * so it can be used for other transactions. On RTI capable controllers,
+ * where snapshots can, and are, disabled, the code to handle this type
+ * of snapshot is not active.
+ *
+ * The second case is a save pointers on an active FIFO which occurs
+ * if the target changes to a new L_Q or busfrees/QASes and the transfer
+ * has a residual. This should occur coincident with a ctxtdone. We
+ * disable the interrupt and allow our active routine to handle the
+ * save.
+ */
+saveptr_intr:
+ if ((ahd->features & AHD_RTI) == 0) {
+ test LONGJMP_ADDR[1], INVALID_ADDR jnz snapshot_saveptr;
+ }
+saveptr_active_fifo:
+ and SEQIMODE, ~ENSAVEPTRS;
+ or SEQINTCTL, IRET ret;
+
+cfg4data_intr:
+ test SCB_SGPTR[0], SG_LIST_NULL jnz pkt_handle_overrun_inc_use_count;
+ call load_first_seg;
+ call pkt_handle_xfer;
+ inc SCB_FIFO_USE_COUNT;
+interrupt_return:
+ or SEQINTCTL, IRET ret;
+
+cfg4istat_intr:
+ call freeze_queue;
+ add NONE, -13, SCB_CDB_LEN;
+ jnc cfg4istat_have_sense_addr;
+ test SCB_CDB_LEN, SCB_CDB_LEN_PTR jnz cfg4istat_have_sense_addr;
+ /*
+ * Host sets up address/count and enables transfer.
+ */
+ SET_SEQINTCODE(CFG4ISTAT_INTR)
+ jmp cfg4istat_setup_handler;
+cfg4istat_have_sense_addr:
+ bmov HADDR, SCB_SENSE_BUSADDR, 4;
+ mvi HCNT[1], (AHD_SENSE_BUFSIZE >> 8);
+ mvi SG_CACHE_PRE, LAST_SEG;
+ mvi DFCNTRL, PRELOADEN|SCSIEN|HDMAEN;
+cfg4istat_setup_handler:
+ /*
+ * Status pkt is transferring to host.
+ * Wait in idle loop for transfer to complete.
+ * If a command completed before an attempted
+ * task management function completed, notify the host.
+ */
+ test SCB_TASK_MANAGEMENT, 0xFF jz cfg4istat_no_taskmgmt_func;
+ SET_SEQINTCODE(TASKMGMT_CMD_CMPLT_OKAY)
+cfg4istat_no_taskmgmt_func:
+ call pkt_handle_status;
+ or SEQINTCTL, IRET ret;
+
+cfg4icmd_intr:
+ /*
+ * In the case of DMAing a CDB from the host, the normal
+ * CDB buffer is formatted with an 8 byte address followed
+ * by a 1 byte count.
+ */
+ bmov HADDR[0], SCB_HOST_CDB_PTR, 9;
+ mvi SG_CACHE_PRE, LAST_SEG;
+ mvi DFCNTRL, (PRELOADEN|SCSIEN|HDMAEN);
+ call pkt_handle_cdb;
+ or SEQINTCTL, IRET ret;
+
+/*
+ * See if the target has gone on in this context creating an
+ * overrun condition. For the write case, the hardware cannot
+ * ack bytes until data are provided. So, if the target begins
+ * another packet without changing contexts, implying we are
+ * not sitting on a packet boundary, we are in an overrun
+ * situation. For the read case, the hardware will continue to
+ * ack bytes into the FIFO, and may even ack the last overrun packet
+ * into the FIFO. If the FIFO should become non-empty, we are in
+ * a read overrun case.
+ */
+#define check_overrun \
+ /* Not on a packet boundary. */ \
+ test MDFFSTAT, DLZERO jz pkt_handle_overrun; \
+ test DFSTATUS, FIFOEMP jz pkt_handle_overrun
+
+pkt_handle_xfer:
+ test SG_STATE, LOADING_NEEDED jz pkt_last_seg;
+ call setjmp;
+ test SEQINTSRC, SAVEPTRS jnz pkt_saveptrs;
+ test SCSIPHASE, ~DATA_PHASE_MASK jz . + 2;
+ test SCSISIGO, ATNO jnz . + 2;
+ test SSTAT2, NONPACKREQ jz pkt_service_fifo;
+ /*
+ * Defer handling of this NONPACKREQ until we
+ * can be sure it pertains to this FIFO. SAVEPTRS
+ * will not be asserted if the NONPACKREQ is for us,
+ * so we must simulate it if shadow is valid. If
+ * shadow is not valid, keep running this FIFO until we
+ * have satisfied the transfer by loading segments and
+ * waiting for either shadow valid or last_seg_done.
+ */
+ test MDFFSTAT, SHVALID jnz pkt_saveptrs;
+pkt_service_fifo:
+ test SG_STATE, LOADING_NEEDED jnz service_fifo;
+pkt_last_seg:
+ call setjmp;
+ test SEQINTSRC, SAVEPTRS jnz pkt_saveptrs;
+ test SG_CACHE_SHADOW, LAST_SEG_DONE jnz pkt_last_seg_done;
+ test SCSIPHASE, ~DATA_PHASE_MASK jz . + 2;
+ test SCSISIGO, ATNO jnz . + 2;
+ test SSTAT2, NONPACKREQ jz return;
+ test MDFFSTAT, SHVALID jz return;
+ /* FALLTHROUGH */
+
+/*
+ * Either a SAVEPTRS interrupt condition is pending for this FIFO
+ * or we have a pending NONPACKREQ for this FIFO. We differentiate
+ * between the two by capturing the state of the SAVEPTRS interrupt
+ * prior to clearing this status and executing the common code for
+ * these two cases.
+ */
+pkt_saveptrs:
+BEGIN_CRITICAL;
+ if ((ahd->bugs & AHD_AUTOFLUSH_BUG) != 0) {
+ or DFCNTRL, FIFOFLUSH;
+ }
+ mov REG0, SEQINTSRC;
+ call calc_residual;
+ call save_pointers;
+ mvi CLRSEQINTSRC, CLRSAVEPTRS;
+ call disable_ccsgen;
+ or SEQIMODE, ENSAVEPTRS;
+ test DFCNTRL, DIRECTION jnz pkt_saveptrs_check_status;
+ test DFSTATUS, FIFOEMP jnz pkt_saveptrs_check_status;
+ /*
+ * Keep a handler around for this FIFO until it drains
+ * to the host to guarantee that we don't complete the
+ * command to the host before the data arrives.
+ */
+pkt_saveptrs_wait_fifoemp:
+ call setjmp;
+ test DFSTATUS, FIFOEMP jz return;
+pkt_saveptrs_check_status:
+ or LONGJMP_ADDR[1], INVALID_ADDR;
+ test REG0, SAVEPTRS jz unexpected_nonpkt_phase;
+ dec SCB_FIFO_USE_COUNT;
+ test SCB_CONTROL, STATUS_RCVD jnz pkt_complete_scb_if_fifos_idle;
+ mvi DFFSXFRCTL, CLRCHN ret;
+
+/*
+ * LAST_SEG_DONE status has been seen in the current FIFO.
+ * This indicates that all of the allowed data for this
+ * command has transferred across the SCSI and host buses.
+ * Check for overrun and see if we can complete this command.
+ */
+pkt_last_seg_done:
+ /*
+ * Mark transfer as completed.
+ */
+ or SCB_SGPTR, SG_LIST_NULL;
+
+ /*
+ * Wait for the current context to finish to verify that
+ * no overrun condition has occurred.
+ */
+ test SEQINTSRC, CTXTDONE jnz pkt_ctxt_done;
+ call setjmp;
+pkt_wait_ctxt_done_loop:
+ test SEQINTSRC, CTXTDONE jnz pkt_ctxt_done;
+ /*
+ * A sufficiently large overrun or a NONPACKREQ may
+ * prevent CTXTDONE from ever asserting, so we must
+ * poll for these statuses too.
+ */
+ check_overrun;
+ test SSTAT2, NONPACKREQ jz return;
+ test SEQINTSRC, CTXTDONE jz unexpected_nonpkt_phase;
+ /* FALLTHROUGH */
+
+pkt_ctxt_done:
+ check_overrun;
+ or LONGJMP_ADDR[1], INVALID_ADDR;
+ /*
+ * If status has been received, it is safe to skip
+ * the check to see if another FIFO is active because
+ * LAST_SEG_DONE has been observed. However, we check
+ * the FIFO anyway since it costs us only one extra
+ * instruction to leverage common code to perform the
+ * SCB completion.
+ */
+ dec SCB_FIFO_USE_COUNT;
+ test SCB_CONTROL, STATUS_RCVD jnz pkt_complete_scb_if_fifos_idle;
+ mvi DFFSXFRCTL, CLRCHN ret;
+END_CRITICAL;
+
+/*
+ * Must wait until CDB xfer is over before issuing the
+ * clear channel.
+ */
+pkt_handle_cdb:
+ call setjmp;
+ test SG_CACHE_SHADOW, LAST_SEG_DONE jz return;
+ or LONGJMP_ADDR[1], INVALID_ADDR;
+ mvi DFFSXFRCTL, CLRCHN ret;
+
+/*
+ * Watch over the status transfer. Our host sense buffer is
+ * large enough to take the maximum allowed status packet.
+ * None-the-less, we must still catch and report overruns to
+ * the host. Additionally, properly catch unexpected non-packet
+ * phases that are typically caused by CRC errors in status packet
+ * transmission.
+ */
+pkt_handle_status:
+ call setjmp;
+ test SG_CACHE_SHADOW, LAST_SEG_DONE jnz pkt_status_check_overrun;
+ test SEQINTSRC, CTXTDONE jz pkt_status_check_nonpackreq;
+ test SG_CACHE_SHADOW, LAST_SEG_DONE jnz pkt_status_check_overrun;
+pkt_status_IU_done:
+ if ((ahd->bugs & AHD_AUTOFLUSH_BUG) != 0) {
+ or DFCNTRL, FIFOFLUSH;
+ }
+ test DFSTATUS, FIFOEMP jz return;
+BEGIN_CRITICAL;
+ or LONGJMP_ADDR[1], INVALID_ADDR;
+ mvi SCB_SCSI_STATUS, STATUS_PKT_SENSE;
+ or SCB_CONTROL, STATUS_RCVD;
+ jmp pkt_complete_scb_if_fifos_idle;
+END_CRITICAL;
+pkt_status_check_overrun:
+ /*
+ * Status PKT overruns are uncerimoniously recovered with a
+ * bus reset. If we've overrun, let the host know so that
+ * recovery can be performed.
+ *
+ * LAST_SEG_DONE has been observed. If either CTXTDONE or
+ * a NONPACKREQ phase change have occurred and the FIFO is
+ * empty, there is no overrun.
+ */
+ test DFSTATUS, FIFOEMP jz pkt_status_report_overrun;
+ test SEQINTSRC, CTXTDONE jz . + 2;
+ test DFSTATUS, FIFOEMP jnz pkt_status_IU_done;
+ test SCSIPHASE, ~DATA_PHASE_MASK jz return;
+ test DFSTATUS, FIFOEMP jnz pkt_status_check_nonpackreq;
+pkt_status_report_overrun:
+ SET_SEQINTCODE(STATUS_OVERRUN)
+ /* SEQUENCER RESTARTED */
+pkt_status_check_nonpackreq:
+ /*
+ * CTXTDONE may be held off if a NONPACKREQ is associated with
+ * the current context. If a NONPACKREQ is observed, decide
+ * if it is for the current context. If it is for the current
+ * context, we must defer NONPACKREQ processing until all data
+ * has transferred to the host.
+ */
+ test SCSIPHASE, ~DATA_PHASE_MASK jz return;
+ test SCSISIGO, ATNO jnz . + 2;
+ test SSTAT2, NONPACKREQ jz return;
+ test SEQINTSRC, CTXTDONE jnz pkt_status_IU_done;
+ test DFSTATUS, FIFOEMP jz return;
+ /*
+ * The unexpected nonpkt phase handler assumes that any
+ * data channel use will have a FIFO reference count. It
+ * turns out that the status handler doesn't need a references
+ * count since the status received flag, and thus completion
+ * processing, cannot be set until the handler is finished.
+ * We increment the count here to make the nonpkt handler
+ * happy.
+ */
+ inc SCB_FIFO_USE_COUNT;
+ /* FALLTHROUGH */
+
+/*
+ * Nonpackreq is a polled status. It can come true in three situations:
+ * we have received an L_Q, we have sent one or more L_Qs, or there is no
+ * L_Q context associated with this REQ (REQ occurs immediately after a
+ * (re)selection). Routines that know that the context responsible for this
+ * nonpackreq call directly into unexpected_nonpkt_phase. In the case of the
+ * top level idle loop, we exhaust all active contexts prior to determining that
+ * we simply do not have the full I_T_L_Q for this phase.
+ */
+unexpected_nonpkt_phase_find_ctxt:
+ /*
+ * This nonpackreq is most likely associated with one of the tags
+ * in a FIFO or an outgoing LQ. Only treat it as an I_T only
+ * nonpackreq if we've cleared out the FIFOs and handled any
+ * pending SELDO.
+ */
+SET_SRC_MODE M_SCSI;
+SET_DST_MODE M_SCSI;
+ and A, FIFO1FREE|FIFO0FREE, DFFSTAT;
+ cmp A, FIFO1FREE|FIFO0FREE jne return;
+ test SSTAT0, SELDO jnz return;
+ mvi SCBPTR[1], SCB_LIST_NULL;
+unexpected_nonpkt_phase:
+ test MODE_PTR, ~(MK_MODE(M_DFF1, M_DFF1))
+ jnz unexpected_nonpkt_mode_cleared;
+SET_SRC_MODE M_DFF0;
+SET_DST_MODE M_DFF0;
+ or LONGJMP_ADDR[1], INVALID_ADDR;
+ dec SCB_FIFO_USE_COUNT;
+ mvi DFFSXFRCTL, CLRCHN;
+unexpected_nonpkt_mode_cleared:
+ mvi CLRSINT2, CLRNONPACKREQ;
+ if ((ahd->bugs & AHD_BUSFREEREV_BUG) != 0) {
+ /*
+ * Test to ensure that the bus has not
+ * already gone free prior to clearing
+ * any stale busfree status. This avoids
+ * a window whereby a busfree just after
+ * a selection could be missed.
+ */
+ test SCSISIGI, BSYI jz . + 2;
+ mvi CLRSINT1,CLRBUSFREE;
+ or SIMODE1, ENBUSFREE;
+ }
+ test SCSIPHASE, ~(MSG_IN_PHASE|MSG_OUT_PHASE) jnz illegal_phase;
+ SET_SEQINTCODE(ENTERING_NONPACK)
+ jmp ITloop;
+
+illegal_phase:
+ SET_SEQINTCODE(ILLEGAL_PHASE)
+ jmp ITloop;
+
+/*
+ * We have entered an overrun situation. If we have working
+ * BITBUCKET, flip that on and let the hardware eat any overrun
+ * data. Otherwise use an overrun buffer in the host to simulate
+ * BITBUCKET.
+ */
+pkt_handle_overrun_inc_use_count:
+ inc SCB_FIFO_USE_COUNT;
+pkt_handle_overrun:
+ SET_SEQINTCODE(CFG4OVERRUN)
+ call freeze_queue;
+ if ((ahd->bugs & AHD_PKT_BITBUCKET_BUG) == 0) {
+ or DFFSXFRCTL, DFFBITBUCKET;
+SET_SRC_MODE M_DFF1;
+SET_DST_MODE M_DFF1;
+ } else {
+ call load_overrun_buf;
+ mvi DFCNTRL, (HDMAEN|SCSIEN|PRELOADEN);
+ }
+ call setjmp;
+ if ((ahd->bugs & AHD_PKT_BITBUCKET_BUG) != 0) {
+ test DFSTATUS, PRELOAD_AVAIL jz overrun_load_done;
+ call load_overrun_buf;
+ or DFCNTRL, PRELOADEN;
+overrun_load_done:
+ test SEQINTSRC, CTXTDONE jnz pkt_overrun_end;
+ } else {
+ test DFFSXFRCTL, DFFBITBUCKET jz pkt_overrun_end;
+ }
+ test SSTAT2, NONPACKREQ jz return;
+pkt_overrun_end:
+ or SCB_RESIDUAL_SGPTR, SG_OVERRUN_RESID;
+ test SEQINTSRC, CTXTDONE jz unexpected_nonpkt_phase;
+ dec SCB_FIFO_USE_COUNT;
+ or LONGJMP_ADDR[1], INVALID_ADDR;
+ test SCB_CONTROL, STATUS_RCVD jnz pkt_complete_scb_if_fifos_idle;
+ mvi DFFSXFRCTL, CLRCHN ret;
+
+if ((ahd->bugs & AHD_PKT_BITBUCKET_BUG) != 0) {
+load_overrun_buf:
+ /*
+ * Load a dummy segment if preload space is available.
+ */
+ mov HADDR[0], SHARED_DATA_ADDR;
+ add HADDR[1], PKT_OVERRUN_BUFOFFSET, SHARED_DATA_ADDR[1];
+ mov ACCUM_SAVE, A;
+ clr A;
+ adc HADDR[2], A, SHARED_DATA_ADDR[2];
+ adc HADDR[3], A, SHARED_DATA_ADDR[3];
+ mov A, ACCUM_SAVE;
+ bmov HADDR[4], ALLZEROS, 4;
+ /* PKT_OVERRUN_BUFSIZE is a multiple of 256 */
+ clr HCNT[0];
+ mvi HCNT[1], ((PKT_OVERRUN_BUFSIZE >> 8) & 0xFF);
+ clr HCNT[2] ret;
+}
diff --git a/drivers/scsi/aic7xxx/aic79xx_core.c b/drivers/scsi/aic7xxx/aic79xx_core.c
new file mode 100644
index 000000000..109e2c99e
--- /dev/null
+++ b/drivers/scsi/aic7xxx/aic79xx_core.c
@@ -0,0 +1,10822 @@
+/*
+ * Core routines and tables shareable across OS platforms.
+ *
+ * Copyright (c) 1994-2002 Justin T. Gibbs.
+ * Copyright (c) 2000-2003 Adaptec Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions, and the following disclaimer,
+ * without modification.
+ * 2. Redistributions in binary form must reproduce at minimum a disclaimer
+ * substantially similar to the "NO WARRANTY" disclaimer below
+ * ("Disclaimer") and any redistribution must be conditioned upon
+ * including a substantially similar Disclaimer requirement for further
+ * binary redistribution.
+ * 3. Neither the names of the above-listed copyright holders nor the names
+ * of any contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * Alternatively, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") version 2 as published by the Free
+ * Software Foundation.
+ *
+ * NO WARRANTY
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
+ * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
+ * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGES.
+ *
+ * $Id: //depot/aic7xxx/aic7xxx/aic79xx.c#250 $
+ */
+
+#ifdef __linux__
+#include "aic79xx_osm.h"
+#include "aic79xx_inline.h"
+#include "aicasm/aicasm_insformat.h"
+#else
+#include <dev/aic7xxx/aic79xx_osm.h>
+#include <dev/aic7xxx/aic79xx_inline.h>
+#include <dev/aic7xxx/aicasm/aicasm_insformat.h>
+#endif
+
+
+/***************************** Lookup Tables **********************************/
+static const char *const ahd_chip_names[] =
+{
+ "NONE",
+ "aic7901",
+ "aic7902",
+ "aic7901A"
+};
+static const u_int num_chip_names = ARRAY_SIZE(ahd_chip_names);
+
+/*
+ * Hardware error codes.
+ */
+struct ahd_hard_error_entry {
+ uint8_t errno;
+ const char *errmesg;
+};
+
+static const struct ahd_hard_error_entry ahd_hard_errors[] = {
+ { DSCTMOUT, "Discard Timer has timed out" },
+ { ILLOPCODE, "Illegal Opcode in sequencer program" },
+ { SQPARERR, "Sequencer Parity Error" },
+ { DPARERR, "Data-path Parity Error" },
+ { MPARERR, "Scratch or SCB Memory Parity Error" },
+ { CIOPARERR, "CIOBUS Parity Error" },
+};
+static const u_int num_errors = ARRAY_SIZE(ahd_hard_errors);
+
+static const struct ahd_phase_table_entry ahd_phase_table[] =
+{
+ { P_DATAOUT, MSG_NOOP, "in Data-out phase" },
+ { P_DATAIN, MSG_INITIATOR_DET_ERR, "in Data-in phase" },
+ { P_DATAOUT_DT, MSG_NOOP, "in DT Data-out phase" },
+ { P_DATAIN_DT, MSG_INITIATOR_DET_ERR, "in DT Data-in phase" },
+ { P_COMMAND, MSG_NOOP, "in Command phase" },
+ { P_MESGOUT, MSG_NOOP, "in Message-out phase" },
+ { P_STATUS, MSG_INITIATOR_DET_ERR, "in Status phase" },
+ { P_MESGIN, MSG_PARITY_ERROR, "in Message-in phase" },
+ { P_BUSFREE, MSG_NOOP, "while idle" },
+ { 0, MSG_NOOP, "in unknown phase" }
+};
+
+/*
+ * In most cases we only wish to itterate over real phases, so
+ * exclude the last element from the count.
+ */
+static const u_int num_phases = ARRAY_SIZE(ahd_phase_table) - 1;
+
+/* Our Sequencer Program */
+#include "aic79xx_seq.h"
+
+/**************************** Function Declarations ***************************/
+static void ahd_handle_transmission_error(struct ahd_softc *ahd);
+static void ahd_handle_lqiphase_error(struct ahd_softc *ahd,
+ u_int lqistat1);
+static int ahd_handle_pkt_busfree(struct ahd_softc *ahd,
+ u_int busfreetime);
+static int ahd_handle_nonpkt_busfree(struct ahd_softc *ahd);
+static void ahd_handle_proto_violation(struct ahd_softc *ahd);
+static void ahd_force_renegotiation(struct ahd_softc *ahd,
+ struct ahd_devinfo *devinfo);
+
+static struct ahd_tmode_tstate*
+ ahd_alloc_tstate(struct ahd_softc *ahd,
+ u_int scsi_id, char channel);
+#ifdef AHD_TARGET_MODE
+static void ahd_free_tstate(struct ahd_softc *ahd,
+ u_int scsi_id, char channel, int force);
+#endif
+static void ahd_devlimited_syncrate(struct ahd_softc *ahd,
+ struct ahd_initiator_tinfo *,
+ u_int *period,
+ u_int *ppr_options,
+ role_t role);
+static void ahd_update_neg_table(struct ahd_softc *ahd,
+ struct ahd_devinfo *devinfo,
+ struct ahd_transinfo *tinfo);
+static void ahd_update_pending_scbs(struct ahd_softc *ahd);
+static void ahd_fetch_devinfo(struct ahd_softc *ahd,
+ struct ahd_devinfo *devinfo);
+static void ahd_scb_devinfo(struct ahd_softc *ahd,
+ struct ahd_devinfo *devinfo,
+ struct scb *scb);
+static void ahd_setup_initiator_msgout(struct ahd_softc *ahd,
+ struct ahd_devinfo *devinfo,
+ struct scb *scb);
+static void ahd_build_transfer_msg(struct ahd_softc *ahd,
+ struct ahd_devinfo *devinfo);
+static void ahd_construct_sdtr(struct ahd_softc *ahd,
+ struct ahd_devinfo *devinfo,
+ u_int period, u_int offset);
+static void ahd_construct_wdtr(struct ahd_softc *ahd,
+ struct ahd_devinfo *devinfo,
+ u_int bus_width);
+static void ahd_construct_ppr(struct ahd_softc *ahd,
+ struct ahd_devinfo *devinfo,
+ u_int period, u_int offset,
+ u_int bus_width, u_int ppr_options);
+static void ahd_clear_msg_state(struct ahd_softc *ahd);
+static void ahd_handle_message_phase(struct ahd_softc *ahd);
+typedef enum {
+ AHDMSG_1B,
+ AHDMSG_2B,
+ AHDMSG_EXT
+} ahd_msgtype;
+static int ahd_sent_msg(struct ahd_softc *ahd, ahd_msgtype type,
+ u_int msgval, int full);
+static int ahd_parse_msg(struct ahd_softc *ahd,
+ struct ahd_devinfo *devinfo);
+static int ahd_handle_msg_reject(struct ahd_softc *ahd,
+ struct ahd_devinfo *devinfo);
+static void ahd_handle_ign_wide_residue(struct ahd_softc *ahd,
+ struct ahd_devinfo *devinfo);
+static void ahd_reinitialize_dataptrs(struct ahd_softc *ahd);
+static void ahd_handle_devreset(struct ahd_softc *ahd,
+ struct ahd_devinfo *devinfo,
+ u_int lun, cam_status status,
+ char *message, int verbose_level);
+#ifdef AHD_TARGET_MODE
+static void ahd_setup_target_msgin(struct ahd_softc *ahd,
+ struct ahd_devinfo *devinfo,
+ struct scb *scb);
+#endif
+
+static u_int ahd_sglist_size(struct ahd_softc *ahd);
+static u_int ahd_sglist_allocsize(struct ahd_softc *ahd);
+static bus_dmamap_callback_t
+ ahd_dmamap_cb;
+static void ahd_initialize_hscbs(struct ahd_softc *ahd);
+static int ahd_init_scbdata(struct ahd_softc *ahd);
+static void ahd_fini_scbdata(struct ahd_softc *ahd);
+static void ahd_setup_iocell_workaround(struct ahd_softc *ahd);
+static void ahd_iocell_first_selection(struct ahd_softc *ahd);
+static void ahd_add_col_list(struct ahd_softc *ahd,
+ struct scb *scb, u_int col_idx);
+static void ahd_rem_col_list(struct ahd_softc *ahd,
+ struct scb *scb);
+static void ahd_chip_init(struct ahd_softc *ahd);
+static void ahd_qinfifo_requeue(struct ahd_softc *ahd,
+ struct scb *prev_scb,
+ struct scb *scb);
+static int ahd_qinfifo_count(struct ahd_softc *ahd);
+static int ahd_search_scb_list(struct ahd_softc *ahd, int target,
+ char channel, int lun, u_int tag,
+ role_t role, uint32_t status,
+ ahd_search_action action,
+ u_int *list_head, u_int *list_tail,
+ u_int tid);
+static void ahd_stitch_tid_list(struct ahd_softc *ahd,
+ u_int tid_prev, u_int tid_cur,
+ u_int tid_next);
+static void ahd_add_scb_to_free_list(struct ahd_softc *ahd,
+ u_int scbid);
+static u_int ahd_rem_wscb(struct ahd_softc *ahd, u_int scbid,
+ u_int prev, u_int next, u_int tid);
+static void ahd_reset_current_bus(struct ahd_softc *ahd);
+static ahd_callback_t ahd_stat_timer;
+#ifdef AHD_DUMP_SEQ
+static void ahd_dumpseq(struct ahd_softc *ahd);
+#endif
+static void ahd_loadseq(struct ahd_softc *ahd);
+static int ahd_check_patch(struct ahd_softc *ahd,
+ const struct patch **start_patch,
+ u_int start_instr, u_int *skip_addr);
+static u_int ahd_resolve_seqaddr(struct ahd_softc *ahd,
+ u_int address);
+static void ahd_download_instr(struct ahd_softc *ahd,
+ u_int instrptr, uint8_t *dconsts);
+static int ahd_probe_stack_size(struct ahd_softc *ahd);
+static int ahd_scb_active_in_fifo(struct ahd_softc *ahd,
+ struct scb *scb);
+static void ahd_run_data_fifo(struct ahd_softc *ahd,
+ struct scb *scb);
+
+#ifdef AHD_TARGET_MODE
+static void ahd_queue_lstate_event(struct ahd_softc *ahd,
+ struct ahd_tmode_lstate *lstate,
+ u_int initiator_id,
+ u_int event_type,
+ u_int event_arg);
+static void ahd_update_scsiid(struct ahd_softc *ahd,
+ u_int targid_mask);
+static int ahd_handle_target_cmd(struct ahd_softc *ahd,
+ struct target_cmd *cmd);
+#endif
+
+static int ahd_abort_scbs(struct ahd_softc *ahd, int target,
+ char channel, int lun, u_int tag,
+ role_t role, uint32_t status);
+static void ahd_alloc_scbs(struct ahd_softc *ahd);
+static void ahd_busy_tcl(struct ahd_softc *ahd, u_int tcl,
+ u_int scbid);
+static void ahd_calc_residual(struct ahd_softc *ahd,
+ struct scb *scb);
+static void ahd_clear_critical_section(struct ahd_softc *ahd);
+static void ahd_clear_intstat(struct ahd_softc *ahd);
+static void ahd_enable_coalescing(struct ahd_softc *ahd,
+ int enable);
+static u_int ahd_find_busy_tcl(struct ahd_softc *ahd, u_int tcl);
+static void ahd_freeze_devq(struct ahd_softc *ahd,
+ struct scb *scb);
+static void ahd_handle_scb_status(struct ahd_softc *ahd,
+ struct scb *scb);
+static const struct ahd_phase_table_entry* ahd_lookup_phase_entry(int phase);
+static void ahd_shutdown(void *arg);
+static void ahd_update_coalescing_values(struct ahd_softc *ahd,
+ u_int timer,
+ u_int maxcmds,
+ u_int mincmds);
+static int ahd_verify_vpd_cksum(struct vpd_config *vpd);
+static int ahd_wait_seeprom(struct ahd_softc *ahd);
+static int ahd_match_scb(struct ahd_softc *ahd, struct scb *scb,
+ int target, char channel, int lun,
+ u_int tag, role_t role);
+
+static void ahd_reset_cmds_pending(struct ahd_softc *ahd);
+
+/*************************** Interrupt Services *******************************/
+static void ahd_run_qoutfifo(struct ahd_softc *ahd);
+#ifdef AHD_TARGET_MODE
+static void ahd_run_tqinfifo(struct ahd_softc *ahd, int paused);
+#endif
+static void ahd_handle_hwerrint(struct ahd_softc *ahd);
+static void ahd_handle_seqint(struct ahd_softc *ahd, u_int intstat);
+static void ahd_handle_scsiint(struct ahd_softc *ahd,
+ u_int intstat);
+
+/************************ Sequencer Execution Control *************************/
+void
+ahd_set_modes(struct ahd_softc *ahd, ahd_mode src, ahd_mode dst)
+{
+ if (ahd->src_mode == src && ahd->dst_mode == dst)
+ return;
+#ifdef AHD_DEBUG
+ if (ahd->src_mode == AHD_MODE_UNKNOWN
+ || ahd->dst_mode == AHD_MODE_UNKNOWN)
+ panic("Setting mode prior to saving it.\n");
+ if ((ahd_debug & AHD_SHOW_MODEPTR) != 0)
+ printk("%s: Setting mode 0x%x\n", ahd_name(ahd),
+ ahd_build_mode_state(ahd, src, dst));
+#endif
+ ahd_outb(ahd, MODE_PTR, ahd_build_mode_state(ahd, src, dst));
+ ahd->src_mode = src;
+ ahd->dst_mode = dst;
+}
+
+static void
+ahd_update_modes(struct ahd_softc *ahd)
+{
+ ahd_mode_state mode_ptr;
+ ahd_mode src;
+ ahd_mode dst;
+
+ mode_ptr = ahd_inb(ahd, MODE_PTR);
+#ifdef AHD_DEBUG
+ if ((ahd_debug & AHD_SHOW_MODEPTR) != 0)
+ printk("Reading mode 0x%x\n", mode_ptr);
+#endif
+ ahd_extract_mode_state(ahd, mode_ptr, &src, &dst);
+ ahd_known_modes(ahd, src, dst);
+}
+
+static void
+ahd_assert_modes(struct ahd_softc *ahd, ahd_mode srcmode,
+ ahd_mode dstmode, const char *file, int line)
+{
+#ifdef AHD_DEBUG
+ if ((srcmode & AHD_MK_MSK(ahd->src_mode)) == 0
+ || (dstmode & AHD_MK_MSK(ahd->dst_mode)) == 0) {
+ panic("%s:%s:%d: Mode assertion failed.\n",
+ ahd_name(ahd), file, line);
+ }
+#endif
+}
+
+#define AHD_ASSERT_MODES(ahd, source, dest) \
+ ahd_assert_modes(ahd, source, dest, __FILE__, __LINE__);
+
+ahd_mode_state
+ahd_save_modes(struct ahd_softc *ahd)
+{
+ if (ahd->src_mode == AHD_MODE_UNKNOWN
+ || ahd->dst_mode == AHD_MODE_UNKNOWN)
+ ahd_update_modes(ahd);
+
+ return (ahd_build_mode_state(ahd, ahd->src_mode, ahd->dst_mode));
+}
+
+void
+ahd_restore_modes(struct ahd_softc *ahd, ahd_mode_state state)
+{
+ ahd_mode src;
+ ahd_mode dst;
+
+ ahd_extract_mode_state(ahd, state, &src, &dst);
+ ahd_set_modes(ahd, src, dst);
+}
+
+/*
+ * Determine whether the sequencer has halted code execution.
+ * Returns non-zero status if the sequencer is stopped.
+ */
+int
+ahd_is_paused(struct ahd_softc *ahd)
+{
+ return ((ahd_inb(ahd, HCNTRL) & PAUSE) != 0);
+}
+
+/*
+ * Request that the sequencer stop and wait, indefinitely, for it
+ * to stop. The sequencer will only acknowledge that it is paused
+ * once it has reached an instruction boundary and PAUSEDIS is
+ * cleared in the SEQCTL register. The sequencer may use PAUSEDIS
+ * for critical sections.
+ */
+void
+ahd_pause(struct ahd_softc *ahd)
+{
+ ahd_outb(ahd, HCNTRL, ahd->pause);
+
+ /*
+ * Since the sequencer can disable pausing in a critical section, we
+ * must loop until it actually stops.
+ */
+ while (ahd_is_paused(ahd) == 0)
+ ;
+}
+
+/*
+ * Allow the sequencer to continue program execution.
+ * We check here to ensure that no additional interrupt
+ * sources that would cause the sequencer to halt have been
+ * asserted. If, for example, a SCSI bus reset is detected
+ * while we are fielding a different, pausing, interrupt type,
+ * we don't want to release the sequencer before going back
+ * into our interrupt handler and dealing with this new
+ * condition.
+ */
+void
+ahd_unpause(struct ahd_softc *ahd)
+{
+ /*
+ * Automatically restore our modes to those saved
+ * prior to the first change of the mode.
+ */
+ if (ahd->saved_src_mode != AHD_MODE_UNKNOWN
+ && ahd->saved_dst_mode != AHD_MODE_UNKNOWN) {
+ if ((ahd->flags & AHD_UPDATE_PEND_CMDS) != 0)
+ ahd_reset_cmds_pending(ahd);
+ ahd_set_modes(ahd, ahd->saved_src_mode, ahd->saved_dst_mode);
+ }
+
+ if ((ahd_inb(ahd, INTSTAT) & ~CMDCMPLT) == 0)
+ ahd_outb(ahd, HCNTRL, ahd->unpause);
+
+ ahd_known_modes(ahd, AHD_MODE_UNKNOWN, AHD_MODE_UNKNOWN);
+}
+
+/*********************** Scatter Gather List Handling *************************/
+void *
+ahd_sg_setup(struct ahd_softc *ahd, struct scb *scb,
+ void *sgptr, dma_addr_t addr, bus_size_t len, int last)
+{
+ scb->sg_count++;
+ if (sizeof(dma_addr_t) > 4
+ && (ahd->flags & AHD_64BIT_ADDRESSING) != 0) {
+ struct ahd_dma64_seg *sg;
+
+ sg = (struct ahd_dma64_seg *)sgptr;
+ sg->addr = ahd_htole64(addr);
+ sg->len = ahd_htole32(len | (last ? AHD_DMA_LAST_SEG : 0));
+ return (sg + 1);
+ } else {
+ struct ahd_dma_seg *sg;
+
+ sg = (struct ahd_dma_seg *)sgptr;
+ sg->addr = ahd_htole32(addr & 0xFFFFFFFF);
+ sg->len = ahd_htole32(len | ((addr >> 8) & 0x7F000000)
+ | (last ? AHD_DMA_LAST_SEG : 0));
+ return (sg + 1);
+ }
+}
+
+static void
+ahd_setup_scb_common(struct ahd_softc *ahd, struct scb *scb)
+{
+ /* XXX Handle target mode SCBs. */
+ scb->crc_retry_count = 0;
+ if ((scb->flags & SCB_PACKETIZED) != 0) {
+ /* XXX what about ACA?? It is type 4, but TAG_TYPE == 0x3. */
+ scb->hscb->task_attribute = scb->hscb->control & SCB_TAG_TYPE;
+ } else {
+ if (ahd_get_transfer_length(scb) & 0x01)
+ scb->hscb->task_attribute = SCB_XFERLEN_ODD;
+ else
+ scb->hscb->task_attribute = 0;
+ }
+
+ if (scb->hscb->cdb_len <= MAX_CDB_LEN_WITH_SENSE_ADDR
+ || (scb->hscb->cdb_len & SCB_CDB_LEN_PTR) != 0)
+ scb->hscb->shared_data.idata.cdb_plus_saddr.sense_addr =
+ ahd_htole32(scb->sense_busaddr);
+}
+
+static void
+ahd_setup_data_scb(struct ahd_softc *ahd, struct scb *scb)
+{
+ /*
+ * Copy the first SG into the "current" data ponter area.
+ */
+ if ((ahd->flags & AHD_64BIT_ADDRESSING) != 0) {
+ struct ahd_dma64_seg *sg;
+
+ sg = (struct ahd_dma64_seg *)scb->sg_list;
+ scb->hscb->dataptr = sg->addr;
+ scb->hscb->datacnt = sg->len;
+ } else {
+ struct ahd_dma_seg *sg;
+ uint32_t *dataptr_words;
+
+ sg = (struct ahd_dma_seg *)scb->sg_list;
+ dataptr_words = (uint32_t*)&scb->hscb->dataptr;
+ dataptr_words[0] = sg->addr;
+ dataptr_words[1] = 0;
+ if ((ahd->flags & AHD_39BIT_ADDRESSING) != 0) {
+ uint64_t high_addr;
+
+ high_addr = ahd_le32toh(sg->len) & 0x7F000000;
+ scb->hscb->dataptr |= ahd_htole64(high_addr << 8);
+ }
+ scb->hscb->datacnt = sg->len;
+ }
+ /*
+ * Note where to find the SG entries in bus space.
+ * We also set the full residual flag which the
+ * sequencer will clear as soon as a data transfer
+ * occurs.
+ */
+ scb->hscb->sgptr = ahd_htole32(scb->sg_list_busaddr|SG_FULL_RESID);
+}
+
+static void
+ahd_setup_noxfer_scb(struct ahd_softc *ahd, struct scb *scb)
+{
+ scb->hscb->sgptr = ahd_htole32(SG_LIST_NULL);
+ scb->hscb->dataptr = 0;
+ scb->hscb->datacnt = 0;
+}
+
+/************************** Memory mapping routines ***************************/
+static void *
+ahd_sg_bus_to_virt(struct ahd_softc *ahd, struct scb *scb, uint32_t sg_busaddr)
+{
+ dma_addr_t sg_offset;
+
+ /* sg_list_phys points to entry 1, not 0 */
+ sg_offset = sg_busaddr - (scb->sg_list_busaddr - ahd_sg_size(ahd));
+ return ((uint8_t *)scb->sg_list + sg_offset);
+}
+
+static uint32_t
+ahd_sg_virt_to_bus(struct ahd_softc *ahd, struct scb *scb, void *sg)
+{
+ dma_addr_t sg_offset;
+
+ /* sg_list_phys points to entry 1, not 0 */
+ sg_offset = ((uint8_t *)sg - (uint8_t *)scb->sg_list)
+ - ahd_sg_size(ahd);
+
+ return (scb->sg_list_busaddr + sg_offset);
+}
+
+static void
+ahd_sync_scb(struct ahd_softc *ahd, struct scb *scb, int op)
+{
+ ahd_dmamap_sync(ahd, ahd->scb_data.hscb_dmat,
+ scb->hscb_map->dmamap,
+ /*offset*/(uint8_t*)scb->hscb - scb->hscb_map->vaddr,
+ /*len*/sizeof(*scb->hscb), op);
+}
+
+void
+ahd_sync_sglist(struct ahd_softc *ahd, struct scb *scb, int op)
+{
+ if (scb->sg_count == 0)
+ return;
+
+ ahd_dmamap_sync(ahd, ahd->scb_data.sg_dmat,
+ scb->sg_map->dmamap,
+ /*offset*/scb->sg_list_busaddr - ahd_sg_size(ahd),
+ /*len*/ahd_sg_size(ahd) * scb->sg_count, op);
+}
+
+static void
+ahd_sync_sense(struct ahd_softc *ahd, struct scb *scb, int op)
+{
+ ahd_dmamap_sync(ahd, ahd->scb_data.sense_dmat,
+ scb->sense_map->dmamap,
+ /*offset*/scb->sense_busaddr,
+ /*len*/AHD_SENSE_BUFSIZE, op);
+}
+
+#ifdef AHD_TARGET_MODE
+static uint32_t
+ahd_targetcmd_offset(struct ahd_softc *ahd, u_int index)
+{
+ return (((uint8_t *)&ahd->targetcmds[index])
+ - (uint8_t *)ahd->qoutfifo);
+}
+#endif
+
+/*********************** Miscellaneous Support Functions ***********************/
+/*
+ * Return pointers to the transfer negotiation information
+ * for the specified our_id/remote_id pair.
+ */
+struct ahd_initiator_tinfo *
+ahd_fetch_transinfo(struct ahd_softc *ahd, char channel, u_int our_id,
+ u_int remote_id, struct ahd_tmode_tstate **tstate)
+{
+ /*
+ * Transfer data structures are stored from the perspective
+ * of the target role. Since the parameters for a connection
+ * in the initiator role to a given target are the same as
+ * when the roles are reversed, we pretend we are the target.
+ */
+ if (channel == 'B')
+ our_id += 8;
+ *tstate = ahd->enabled_targets[our_id];
+ return (&(*tstate)->transinfo[remote_id]);
+}
+
+uint16_t
+ahd_inw(struct ahd_softc *ahd, u_int port)
+{
+ /*
+ * Read high byte first as some registers increment
+ * or have other side effects when the low byte is
+ * read.
+ */
+ uint16_t r = ahd_inb(ahd, port+1) << 8;
+ return r | ahd_inb(ahd, port);
+}
+
+void
+ahd_outw(struct ahd_softc *ahd, u_int port, u_int value)
+{
+ /*
+ * Write low byte first to accommodate registers
+ * such as PRGMCNT where the order maters.
+ */
+ ahd_outb(ahd, port, value & 0xFF);
+ ahd_outb(ahd, port+1, (value >> 8) & 0xFF);
+}
+
+uint32_t
+ahd_inl(struct ahd_softc *ahd, u_int port)
+{
+ return ((ahd_inb(ahd, port))
+ | (ahd_inb(ahd, port+1) << 8)
+ | (ahd_inb(ahd, port+2) << 16)
+ | (ahd_inb(ahd, port+3) << 24));
+}
+
+void
+ahd_outl(struct ahd_softc *ahd, u_int port, uint32_t value)
+{
+ ahd_outb(ahd, port, (value) & 0xFF);
+ ahd_outb(ahd, port+1, ((value) >> 8) & 0xFF);
+ ahd_outb(ahd, port+2, ((value) >> 16) & 0xFF);
+ ahd_outb(ahd, port+3, ((value) >> 24) & 0xFF);
+}
+
+uint64_t
+ahd_inq(struct ahd_softc *ahd, u_int port)
+{
+ return ((ahd_inb(ahd, port))
+ | (ahd_inb(ahd, port+1) << 8)
+ | (ahd_inb(ahd, port+2) << 16)
+ | (ahd_inb(ahd, port+3) << 24)
+ | (((uint64_t)ahd_inb(ahd, port+4)) << 32)
+ | (((uint64_t)ahd_inb(ahd, port+5)) << 40)
+ | (((uint64_t)ahd_inb(ahd, port+6)) << 48)
+ | (((uint64_t)ahd_inb(ahd, port+7)) << 56));
+}
+
+void
+ahd_outq(struct ahd_softc *ahd, u_int port, uint64_t value)
+{
+ ahd_outb(ahd, port, value & 0xFF);
+ ahd_outb(ahd, port+1, (value >> 8) & 0xFF);
+ ahd_outb(ahd, port+2, (value >> 16) & 0xFF);
+ ahd_outb(ahd, port+3, (value >> 24) & 0xFF);
+ ahd_outb(ahd, port+4, (value >> 32) & 0xFF);
+ ahd_outb(ahd, port+5, (value >> 40) & 0xFF);
+ ahd_outb(ahd, port+6, (value >> 48) & 0xFF);
+ ahd_outb(ahd, port+7, (value >> 56) & 0xFF);
+}
+
+u_int
+ahd_get_scbptr(struct ahd_softc *ahd)
+{
+ AHD_ASSERT_MODES(ahd, ~(AHD_MODE_UNKNOWN_MSK|AHD_MODE_CFG_MSK),
+ ~(AHD_MODE_UNKNOWN_MSK|AHD_MODE_CFG_MSK));
+ return (ahd_inb(ahd, SCBPTR) | (ahd_inb(ahd, SCBPTR + 1) << 8));
+}
+
+void
+ahd_set_scbptr(struct ahd_softc *ahd, u_int scbptr)
+{
+ AHD_ASSERT_MODES(ahd, ~(AHD_MODE_UNKNOWN_MSK|AHD_MODE_CFG_MSK),
+ ~(AHD_MODE_UNKNOWN_MSK|AHD_MODE_CFG_MSK));
+ ahd_outb(ahd, SCBPTR, scbptr & 0xFF);
+ ahd_outb(ahd, SCBPTR+1, (scbptr >> 8) & 0xFF);
+}
+
+#if 0 /* unused */
+static u_int
+ahd_get_hnscb_qoff(struct ahd_softc *ahd)
+{
+ return (ahd_inw_atomic(ahd, HNSCB_QOFF));
+}
+#endif
+
+static void
+ahd_set_hnscb_qoff(struct ahd_softc *ahd, u_int value)
+{
+ ahd_outw_atomic(ahd, HNSCB_QOFF, value);
+}
+
+#if 0 /* unused */
+static u_int
+ahd_get_hescb_qoff(struct ahd_softc *ahd)
+{
+ return (ahd_inb(ahd, HESCB_QOFF));
+}
+#endif
+
+static void
+ahd_set_hescb_qoff(struct ahd_softc *ahd, u_int value)
+{
+ ahd_outb(ahd, HESCB_QOFF, value);
+}
+
+static u_int
+ahd_get_snscb_qoff(struct ahd_softc *ahd)
+{
+ u_int oldvalue;
+
+ AHD_ASSERT_MODES(ahd, AHD_MODE_CCHAN_MSK, AHD_MODE_CCHAN_MSK);
+ oldvalue = ahd_inw(ahd, SNSCB_QOFF);
+ ahd_outw(ahd, SNSCB_QOFF, oldvalue);
+ return (oldvalue);
+}
+
+static void
+ahd_set_snscb_qoff(struct ahd_softc *ahd, u_int value)
+{
+ AHD_ASSERT_MODES(ahd, AHD_MODE_CCHAN_MSK, AHD_MODE_CCHAN_MSK);
+ ahd_outw(ahd, SNSCB_QOFF, value);
+}
+
+#if 0 /* unused */
+static u_int
+ahd_get_sescb_qoff(struct ahd_softc *ahd)
+{
+ AHD_ASSERT_MODES(ahd, AHD_MODE_CCHAN_MSK, AHD_MODE_CCHAN_MSK);
+ return (ahd_inb(ahd, SESCB_QOFF));
+}
+#endif
+
+static void
+ahd_set_sescb_qoff(struct ahd_softc *ahd, u_int value)
+{
+ AHD_ASSERT_MODES(ahd, AHD_MODE_CCHAN_MSK, AHD_MODE_CCHAN_MSK);
+ ahd_outb(ahd, SESCB_QOFF, value);
+}
+
+#if 0 /* unused */
+static u_int
+ahd_get_sdscb_qoff(struct ahd_softc *ahd)
+{
+ AHD_ASSERT_MODES(ahd, AHD_MODE_CCHAN_MSK, AHD_MODE_CCHAN_MSK);
+ return (ahd_inb(ahd, SDSCB_QOFF) | (ahd_inb(ahd, SDSCB_QOFF + 1) << 8));
+}
+#endif
+
+static void
+ahd_set_sdscb_qoff(struct ahd_softc *ahd, u_int value)
+{
+ AHD_ASSERT_MODES(ahd, AHD_MODE_CCHAN_MSK, AHD_MODE_CCHAN_MSK);
+ ahd_outb(ahd, SDSCB_QOFF, value & 0xFF);
+ ahd_outb(ahd, SDSCB_QOFF+1, (value >> 8) & 0xFF);
+}
+
+u_int
+ahd_inb_scbram(struct ahd_softc *ahd, u_int offset)
+{
+ u_int value;
+
+ /*
+ * Workaround PCI-X Rev A. hardware bug.
+ * After a host read of SCB memory, the chip
+ * may become confused into thinking prefetch
+ * was required. This starts the discard timer
+ * running and can cause an unexpected discard
+ * timer interrupt. The work around is to read
+ * a normal register prior to the exhaustion of
+ * the discard timer. The mode pointer register
+ * has no side effects and so serves well for
+ * this purpose.
+ *
+ * Razor #528
+ */
+ value = ahd_inb(ahd, offset);
+ if ((ahd->bugs & AHD_PCIX_SCBRAM_RD_BUG) != 0)
+ ahd_inb(ahd, MODE_PTR);
+ return (value);
+}
+
+u_int
+ahd_inw_scbram(struct ahd_softc *ahd, u_int offset)
+{
+ return (ahd_inb_scbram(ahd, offset)
+ | (ahd_inb_scbram(ahd, offset+1) << 8));
+}
+
+static uint32_t
+ahd_inl_scbram(struct ahd_softc *ahd, u_int offset)
+{
+ return (ahd_inw_scbram(ahd, offset)
+ | (ahd_inw_scbram(ahd, offset+2) << 16));
+}
+
+static uint64_t
+ahd_inq_scbram(struct ahd_softc *ahd, u_int offset)
+{
+ return (ahd_inl_scbram(ahd, offset)
+ | ((uint64_t)ahd_inl_scbram(ahd, offset+4)) << 32);
+}
+
+struct scb *
+ahd_lookup_scb(struct ahd_softc *ahd, u_int tag)
+{
+ struct scb* scb;
+
+ if (tag >= AHD_SCB_MAX)
+ return (NULL);
+ scb = ahd->scb_data.scbindex[tag];
+ if (scb != NULL)
+ ahd_sync_scb(ahd, scb,
+ BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
+ return (scb);
+}
+
+static void
+ahd_swap_with_next_hscb(struct ahd_softc *ahd, struct scb *scb)
+{
+ struct hardware_scb *q_hscb;
+ struct map_node *q_hscb_map;
+ uint32_t saved_hscb_busaddr;
+
+ /*
+ * Our queuing method is a bit tricky. The card
+ * knows in advance which HSCB (by address) to download,
+ * and we can't disappoint it. To achieve this, the next
+ * HSCB to download is saved off in ahd->next_queued_hscb.
+ * When we are called to queue "an arbitrary scb",
+ * we copy the contents of the incoming HSCB to the one
+ * the sequencer knows about, swap HSCB pointers and
+ * finally assign the SCB to the tag indexed location
+ * in the scb_array. This makes sure that we can still
+ * locate the correct SCB by SCB_TAG.
+ */
+ q_hscb = ahd->next_queued_hscb;
+ q_hscb_map = ahd->next_queued_hscb_map;
+ saved_hscb_busaddr = q_hscb->hscb_busaddr;
+ memcpy(q_hscb, scb->hscb, sizeof(*scb->hscb));
+ q_hscb->hscb_busaddr = saved_hscb_busaddr;
+ q_hscb->next_hscb_busaddr = scb->hscb->hscb_busaddr;
+
+ /* Now swap HSCB pointers. */
+ ahd->next_queued_hscb = scb->hscb;
+ ahd->next_queued_hscb_map = scb->hscb_map;
+ scb->hscb = q_hscb;
+ scb->hscb_map = q_hscb_map;
+
+ /* Now define the mapping from tag to SCB in the scbindex */
+ ahd->scb_data.scbindex[SCB_GET_TAG(scb)] = scb;
+}
+
+/*
+ * Tell the sequencer about a new transaction to execute.
+ */
+void
+ahd_queue_scb(struct ahd_softc *ahd, struct scb *scb)
+{
+ ahd_swap_with_next_hscb(ahd, scb);
+
+ if (SCBID_IS_NULL(SCB_GET_TAG(scb)))
+ panic("Attempt to queue invalid SCB tag %x\n",
+ SCB_GET_TAG(scb));
+
+ /*
+ * Keep a history of SCBs we've downloaded in the qinfifo.
+ */
+ ahd->qinfifo[AHD_QIN_WRAP(ahd->qinfifonext)] = SCB_GET_TAG(scb);
+ ahd->qinfifonext++;
+
+ if (scb->sg_count != 0)
+ ahd_setup_data_scb(ahd, scb);
+ else
+ ahd_setup_noxfer_scb(ahd, scb);
+ ahd_setup_scb_common(ahd, scb);
+
+ /*
+ * Make sure our data is consistent from the
+ * perspective of the adapter.
+ */
+ ahd_sync_scb(ahd, scb, BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
+
+#ifdef AHD_DEBUG
+ if ((ahd_debug & AHD_SHOW_QUEUE) != 0) {
+ uint64_t host_dataptr;
+
+ host_dataptr = ahd_le64toh(scb->hscb->dataptr);
+ printk("%s: Queueing SCB %d:0x%x bus addr 0x%x - 0x%x%x/0x%x\n",
+ ahd_name(ahd),
+ SCB_GET_TAG(scb), scb->hscb->scsiid,
+ ahd_le32toh(scb->hscb->hscb_busaddr),
+ (u_int)((host_dataptr >> 32) & 0xFFFFFFFF),
+ (u_int)(host_dataptr & 0xFFFFFFFF),
+ ahd_le32toh(scb->hscb->datacnt));
+ }
+#endif
+ /* Tell the adapter about the newly queued SCB */
+ ahd_set_hnscb_qoff(ahd, ahd->qinfifonext);
+}
+
+/************************** Interrupt Processing ******************************/
+static void
+ahd_sync_qoutfifo(struct ahd_softc *ahd, int op)
+{
+ ahd_dmamap_sync(ahd, ahd->shared_data_dmat, ahd->shared_data_map.dmamap,
+ /*offset*/0,
+ /*len*/AHD_SCB_MAX * sizeof(struct ahd_completion), op);
+}
+
+static void
+ahd_sync_tqinfifo(struct ahd_softc *ahd, int op)
+{
+#ifdef AHD_TARGET_MODE
+ if ((ahd->flags & AHD_TARGETROLE) != 0) {
+ ahd_dmamap_sync(ahd, ahd->shared_data_dmat,
+ ahd->shared_data_map.dmamap,
+ ahd_targetcmd_offset(ahd, 0),
+ sizeof(struct target_cmd) * AHD_TMODE_CMDS,
+ op);
+ }
+#endif
+}
+
+/*
+ * See if the firmware has posted any completed commands
+ * into our in-core command complete fifos.
+ */
+#define AHD_RUN_QOUTFIFO 0x1
+#define AHD_RUN_TQINFIFO 0x2
+static u_int
+ahd_check_cmdcmpltqueues(struct ahd_softc *ahd)
+{
+ u_int retval;
+
+ retval = 0;
+ ahd_dmamap_sync(ahd, ahd->shared_data_dmat, ahd->shared_data_map.dmamap,
+ /*offset*/ahd->qoutfifonext * sizeof(*ahd->qoutfifo),
+ /*len*/sizeof(*ahd->qoutfifo), BUS_DMASYNC_POSTREAD);
+ if (ahd->qoutfifo[ahd->qoutfifonext].valid_tag
+ == ahd->qoutfifonext_valid_tag)
+ retval |= AHD_RUN_QOUTFIFO;
+#ifdef AHD_TARGET_MODE
+ if ((ahd->flags & AHD_TARGETROLE) != 0
+ && (ahd->flags & AHD_TQINFIFO_BLOCKED) == 0) {
+ ahd_dmamap_sync(ahd, ahd->shared_data_dmat,
+ ahd->shared_data_map.dmamap,
+ ahd_targetcmd_offset(ahd, ahd->tqinfifofnext),
+ /*len*/sizeof(struct target_cmd),
+ BUS_DMASYNC_POSTREAD);
+ if (ahd->targetcmds[ahd->tqinfifonext].cmd_valid != 0)
+ retval |= AHD_RUN_TQINFIFO;
+ }
+#endif
+ return (retval);
+}
+
+/*
+ * Catch an interrupt from the adapter
+ */
+int
+ahd_intr(struct ahd_softc *ahd)
+{
+ u_int intstat;
+
+ if ((ahd->pause & INTEN) == 0) {
+ /*
+ * Our interrupt is not enabled on the chip
+ * and may be disabled for re-entrancy reasons,
+ * so just return. This is likely just a shared
+ * interrupt.
+ */
+ return (0);
+ }
+
+ /*
+ * Instead of directly reading the interrupt status register,
+ * infer the cause of the interrupt by checking our in-core
+ * completion queues. This avoids a costly PCI bus read in
+ * most cases.
+ */
+ if ((ahd->flags & AHD_ALL_INTERRUPTS) == 0
+ && (ahd_check_cmdcmpltqueues(ahd) != 0))
+ intstat = CMDCMPLT;
+ else
+ intstat = ahd_inb(ahd, INTSTAT);
+
+ if ((intstat & INT_PEND) == 0)
+ return (0);
+
+ if (intstat & CMDCMPLT) {
+ ahd_outb(ahd, CLRINT, CLRCMDINT);
+
+ /*
+ * Ensure that the chip sees that we've cleared
+ * this interrupt before we walk the output fifo.
+ * Otherwise, we may, due to posted bus writes,
+ * clear the interrupt after we finish the scan,
+ * and after the sequencer has added new entries
+ * and asserted the interrupt again.
+ */
+ if ((ahd->bugs & AHD_INTCOLLISION_BUG) != 0) {
+ if (ahd_is_paused(ahd)) {
+ /*
+ * Potentially lost SEQINT.
+ * If SEQINTCODE is non-zero,
+ * simulate the SEQINT.
+ */
+ if (ahd_inb(ahd, SEQINTCODE) != NO_SEQINT)
+ intstat |= SEQINT;
+ }
+ } else {
+ ahd_flush_device_writes(ahd);
+ }
+ ahd_run_qoutfifo(ahd);
+ ahd->cmdcmplt_counts[ahd->cmdcmplt_bucket]++;
+ ahd->cmdcmplt_total++;
+#ifdef AHD_TARGET_MODE
+ if ((ahd->flags & AHD_TARGETROLE) != 0)
+ ahd_run_tqinfifo(ahd, /*paused*/FALSE);
+#endif
+ }
+
+ /*
+ * Handle statuses that may invalidate our cached
+ * copy of INTSTAT separately.
+ */
+ if (intstat == 0xFF && (ahd->features & AHD_REMOVABLE) != 0) {
+ /* Hot eject. Do nothing */
+ } else if (intstat & HWERRINT) {
+ ahd_handle_hwerrint(ahd);
+ } else if ((intstat & (PCIINT|SPLTINT)) != 0) {
+ ahd->bus_intr(ahd);
+ } else {
+
+ if ((intstat & SEQINT) != 0)
+ ahd_handle_seqint(ahd, intstat);
+
+ if ((intstat & SCSIINT) != 0)
+ ahd_handle_scsiint(ahd, intstat);
+ }
+ return (1);
+}
+
+/******************************** Private Inlines *****************************/
+static inline void
+ahd_assert_atn(struct ahd_softc *ahd)
+{
+ ahd_outb(ahd, SCSISIGO, ATNO);
+}
+
+/*
+ * Determine if the current connection has a packetized
+ * agreement. This does not necessarily mean that we
+ * are currently in a packetized transfer. We could
+ * just as easily be sending or receiving a message.
+ */
+static int
+ahd_currently_packetized(struct ahd_softc *ahd)
+{
+ ahd_mode_state saved_modes;
+ int packetized;
+
+ saved_modes = ahd_save_modes(ahd);
+ if ((ahd->bugs & AHD_PKTIZED_STATUS_BUG) != 0) {
+ /*
+ * The packetized bit refers to the last
+ * connection, not the current one. Check
+ * for non-zero LQISTATE instead.
+ */
+ ahd_set_modes(ahd, AHD_MODE_CFG, AHD_MODE_CFG);
+ packetized = ahd_inb(ahd, LQISTATE) != 0;
+ } else {
+ ahd_set_modes(ahd, AHD_MODE_SCSI, AHD_MODE_SCSI);
+ packetized = ahd_inb(ahd, LQISTAT2) & PACKETIZED;
+ }
+ ahd_restore_modes(ahd, saved_modes);
+ return (packetized);
+}
+
+static inline int
+ahd_set_active_fifo(struct ahd_softc *ahd)
+{
+ u_int active_fifo;
+
+ AHD_ASSERT_MODES(ahd, AHD_MODE_SCSI_MSK, AHD_MODE_SCSI_MSK);
+ active_fifo = ahd_inb(ahd, DFFSTAT) & CURRFIFO;
+ switch (active_fifo) {
+ case 0:
+ case 1:
+ ahd_set_modes(ahd, active_fifo, active_fifo);
+ return (1);
+ default:
+ return (0);
+ }
+}
+
+static inline void
+ahd_unbusy_tcl(struct ahd_softc *ahd, u_int tcl)
+{
+ ahd_busy_tcl(ahd, tcl, SCB_LIST_NULL);
+}
+
+/*
+ * Determine whether the sequencer reported a residual
+ * for this SCB/transaction.
+ */
+static inline void
+ahd_update_residual(struct ahd_softc *ahd, struct scb *scb)
+{
+ uint32_t sgptr;
+
+ sgptr = ahd_le32toh(scb->hscb->sgptr);
+ if ((sgptr & SG_STATUS_VALID) != 0)
+ ahd_calc_residual(ahd, scb);
+}
+
+static inline void
+ahd_complete_scb(struct ahd_softc *ahd, struct scb *scb)
+{
+ uint32_t sgptr;
+
+ sgptr = ahd_le32toh(scb->hscb->sgptr);
+ if ((sgptr & SG_STATUS_VALID) != 0)
+ ahd_handle_scb_status(ahd, scb);
+ else
+ ahd_done(ahd, scb);
+}
+
+
+/************************* Sequencer Execution Control ************************/
+/*
+ * Restart the sequencer program from address zero
+ */
+static void
+ahd_restart(struct ahd_softc *ahd)
+{
+
+ ahd_pause(ahd);
+
+ ahd_set_modes(ahd, AHD_MODE_SCSI, AHD_MODE_SCSI);
+
+ /* No more pending messages */
+ ahd_clear_msg_state(ahd);
+ ahd_outb(ahd, SCSISIGO, 0); /* De-assert BSY */
+ ahd_outb(ahd, MSG_OUT, MSG_NOOP); /* No message to send */
+ ahd_outb(ahd, SXFRCTL1, ahd_inb(ahd, SXFRCTL1) & ~BITBUCKET);
+ ahd_outb(ahd, SEQINTCTL, 0);
+ ahd_outb(ahd, LASTPHASE, P_BUSFREE);
+ ahd_outb(ahd, SEQ_FLAGS, 0);
+ ahd_outb(ahd, SAVED_SCSIID, 0xFF);
+ ahd_outb(ahd, SAVED_LUN, 0xFF);
+
+ /*
+ * Ensure that the sequencer's idea of TQINPOS
+ * matches our own. The sequencer increments TQINPOS
+ * only after it sees a DMA complete and a reset could
+ * occur before the increment leaving the kernel to believe
+ * the command arrived but the sequencer to not.
+ */
+ ahd_outb(ahd, TQINPOS, ahd->tqinfifonext);
+
+ /* Always allow reselection */
+ ahd_outb(ahd, SCSISEQ1,
+ ahd_inb(ahd, SCSISEQ_TEMPLATE) & (ENSELI|ENRSELI|ENAUTOATNP));
+ ahd_set_modes(ahd, AHD_MODE_CCHAN, AHD_MODE_CCHAN);
+
+ /*
+ * Clear any pending sequencer interrupt. It is no
+ * longer relevant since we're resetting the Program
+ * Counter.
+ */
+ ahd_outb(ahd, CLRINT, CLRSEQINT);
+
+ ahd_outb(ahd, SEQCTL0, FASTMODE|SEQRESET);
+ ahd_unpause(ahd);
+}
+
+static void
+ahd_clear_fifo(struct ahd_softc *ahd, u_int fifo)
+{
+ ahd_mode_state saved_modes;
+
+#ifdef AHD_DEBUG
+ if ((ahd_debug & AHD_SHOW_FIFOS) != 0)
+ printk("%s: Clearing FIFO %d\n", ahd_name(ahd), fifo);
+#endif
+ saved_modes = ahd_save_modes(ahd);
+ ahd_set_modes(ahd, fifo, fifo);
+ ahd_outb(ahd, DFFSXFRCTL, RSTCHN|CLRSHCNT);
+ if ((ahd_inb(ahd, SG_STATE) & FETCH_INPROG) != 0)
+ ahd_outb(ahd, CCSGCTL, CCSGRESET);
+ ahd_outb(ahd, LONGJMP_ADDR + 1, INVALID_ADDR);
+ ahd_outb(ahd, SG_STATE, 0);
+ ahd_restore_modes(ahd, saved_modes);
+}
+
+/************************* Input/Output Queues ********************************/
+/*
+ * Flush and completed commands that are sitting in the command
+ * complete queues down on the chip but have yet to be dma'ed back up.
+ */
+static void
+ahd_flush_qoutfifo(struct ahd_softc *ahd)
+{
+ struct scb *scb;
+ ahd_mode_state saved_modes;
+ u_int saved_scbptr;
+ u_int ccscbctl;
+ u_int scbid;
+ u_int next_scbid;
+
+ saved_modes = ahd_save_modes(ahd);
+
+ /*
+ * Flush the good status FIFO for completed packetized commands.
+ */
+ ahd_set_modes(ahd, AHD_MODE_SCSI, AHD_MODE_SCSI);
+ saved_scbptr = ahd_get_scbptr(ahd);
+ while ((ahd_inb(ahd, LQISTAT2) & LQIGSAVAIL) != 0) {
+ u_int fifo_mode;
+ u_int i;
+
+ scbid = ahd_inw(ahd, GSFIFO);
+ scb = ahd_lookup_scb(ahd, scbid);
+ if (scb == NULL) {
+ printk("%s: Warning - GSFIFO SCB %d invalid\n",
+ ahd_name(ahd), scbid);
+ continue;
+ }
+ /*
+ * Determine if this transaction is still active in
+ * any FIFO. If it is, we must flush that FIFO to
+ * the host before completing the command.
+ */
+ fifo_mode = 0;
+rescan_fifos:
+ for (i = 0; i < 2; i++) {
+ /* Toggle to the other mode. */
+ fifo_mode ^= 1;
+ ahd_set_modes(ahd, fifo_mode, fifo_mode);
+
+ if (ahd_scb_active_in_fifo(ahd, scb) == 0)
+ continue;
+
+ ahd_run_data_fifo(ahd, scb);
+
+ /*
+ * Running this FIFO may cause a CFG4DATA for
+ * this same transaction to assert in the other
+ * FIFO or a new snapshot SAVEPTRS interrupt
+ * in this FIFO. Even running a FIFO may not
+ * clear the transaction if we are still waiting
+ * for data to drain to the host. We must loop
+ * until the transaction is not active in either
+ * FIFO just to be sure. Reset our loop counter
+ * so we will visit both FIFOs again before
+ * declaring this transaction finished. We
+ * also delay a bit so that status has a chance
+ * to change before we look at this FIFO again.
+ */
+ ahd_delay(200);
+ goto rescan_fifos;
+ }
+ ahd_set_modes(ahd, AHD_MODE_SCSI, AHD_MODE_SCSI);
+ ahd_set_scbptr(ahd, scbid);
+ if ((ahd_inb_scbram(ahd, SCB_SGPTR) & SG_LIST_NULL) == 0
+ && ((ahd_inb_scbram(ahd, SCB_SGPTR) & SG_FULL_RESID) != 0
+ || (ahd_inb_scbram(ahd, SCB_RESIDUAL_SGPTR)
+ & SG_LIST_NULL) != 0)) {
+ u_int comp_head;
+
+ /*
+ * The transfer completed with a residual.
+ * Place this SCB on the complete DMA list
+ * so that we update our in-core copy of the
+ * SCB before completing the command.
+ */
+ ahd_outb(ahd, SCB_SCSI_STATUS, 0);
+ ahd_outb(ahd, SCB_SGPTR,
+ ahd_inb_scbram(ahd, SCB_SGPTR)
+ | SG_STATUS_VALID);
+ ahd_outw(ahd, SCB_TAG, scbid);
+ ahd_outw(ahd, SCB_NEXT_COMPLETE, SCB_LIST_NULL);
+ comp_head = ahd_inw(ahd, COMPLETE_DMA_SCB_HEAD);
+ if (SCBID_IS_NULL(comp_head)) {
+ ahd_outw(ahd, COMPLETE_DMA_SCB_HEAD, scbid);
+ ahd_outw(ahd, COMPLETE_DMA_SCB_TAIL, scbid);
+ } else {
+ u_int tail;
+
+ tail = ahd_inw(ahd, COMPLETE_DMA_SCB_TAIL);
+ ahd_set_scbptr(ahd, tail);
+ ahd_outw(ahd, SCB_NEXT_COMPLETE, scbid);
+ ahd_outw(ahd, COMPLETE_DMA_SCB_TAIL, scbid);
+ ahd_set_scbptr(ahd, scbid);
+ }
+ } else
+ ahd_complete_scb(ahd, scb);
+ }
+ ahd_set_scbptr(ahd, saved_scbptr);
+
+ /*
+ * Setup for command channel portion of flush.
+ */
+ ahd_set_modes(ahd, AHD_MODE_CCHAN, AHD_MODE_CCHAN);
+
+ /*
+ * Wait for any inprogress DMA to complete and clear DMA state
+ * if this is for an SCB in the qinfifo.
+ */
+ while (((ccscbctl = ahd_inb(ahd, CCSCBCTL)) & (CCARREN|CCSCBEN)) != 0) {
+
+ if ((ccscbctl & (CCSCBDIR|CCARREN)) == (CCSCBDIR|CCARREN)) {
+ if ((ccscbctl & ARRDONE) != 0)
+ break;
+ } else if ((ccscbctl & CCSCBDONE) != 0)
+ break;
+ ahd_delay(200);
+ }
+ /*
+ * We leave the sequencer to cleanup in the case of DMA's to
+ * update the qoutfifo. In all other cases (DMA's to the
+ * chip or a push of an SCB from the COMPLETE_DMA_SCB list),
+ * we disable the DMA engine so that the sequencer will not
+ * attempt to handle the DMA completion.
+ */
+ if ((ccscbctl & CCSCBDIR) != 0 || (ccscbctl & ARRDONE) != 0)
+ ahd_outb(ahd, CCSCBCTL, ccscbctl & ~(CCARREN|CCSCBEN));
+
+ /*
+ * Complete any SCBs that just finished
+ * being DMA'ed into the qoutfifo.
+ */
+ ahd_run_qoutfifo(ahd);
+
+ saved_scbptr = ahd_get_scbptr(ahd);
+ /*
+ * Manually update/complete any completed SCBs that are waiting to be
+ * DMA'ed back up to the host.
+ */
+ scbid = ahd_inw(ahd, COMPLETE_DMA_SCB_HEAD);
+ while (!SCBID_IS_NULL(scbid)) {
+ uint8_t *hscb_ptr;
+ u_int i;
+
+ ahd_set_scbptr(ahd, scbid);
+ next_scbid = ahd_inw_scbram(ahd, SCB_NEXT_COMPLETE);
+ scb = ahd_lookup_scb(ahd, scbid);
+ if (scb == NULL) {
+ printk("%s: Warning - DMA-up and complete "
+ "SCB %d invalid\n", ahd_name(ahd), scbid);
+ continue;
+ }
+ hscb_ptr = (uint8_t *)scb->hscb;
+ for (i = 0; i < sizeof(struct hardware_scb); i++)
+ *hscb_ptr++ = ahd_inb_scbram(ahd, SCB_BASE + i);
+
+ ahd_complete_scb(ahd, scb);
+ scbid = next_scbid;
+ }
+ ahd_outw(ahd, COMPLETE_DMA_SCB_HEAD, SCB_LIST_NULL);
+ ahd_outw(ahd, COMPLETE_DMA_SCB_TAIL, SCB_LIST_NULL);
+
+ scbid = ahd_inw(ahd, COMPLETE_ON_QFREEZE_HEAD);
+ while (!SCBID_IS_NULL(scbid)) {
+
+ ahd_set_scbptr(ahd, scbid);
+ next_scbid = ahd_inw_scbram(ahd, SCB_NEXT_COMPLETE);
+ scb = ahd_lookup_scb(ahd, scbid);
+ if (scb == NULL) {
+ printk("%s: Warning - Complete Qfrz SCB %d invalid\n",
+ ahd_name(ahd), scbid);
+ continue;
+ }
+
+ ahd_complete_scb(ahd, scb);
+ scbid = next_scbid;
+ }
+ ahd_outw(ahd, COMPLETE_ON_QFREEZE_HEAD, SCB_LIST_NULL);
+
+ scbid = ahd_inw(ahd, COMPLETE_SCB_HEAD);
+ while (!SCBID_IS_NULL(scbid)) {
+
+ ahd_set_scbptr(ahd, scbid);
+ next_scbid = ahd_inw_scbram(ahd, SCB_NEXT_COMPLETE);
+ scb = ahd_lookup_scb(ahd, scbid);
+ if (scb == NULL) {
+ printk("%s: Warning - Complete SCB %d invalid\n",
+ ahd_name(ahd), scbid);
+ continue;
+ }
+
+ ahd_complete_scb(ahd, scb);
+ scbid = next_scbid;
+ }
+ ahd_outw(ahd, COMPLETE_SCB_HEAD, SCB_LIST_NULL);
+
+ /*
+ * Restore state.
+ */
+ ahd_set_scbptr(ahd, saved_scbptr);
+ ahd_restore_modes(ahd, saved_modes);
+ ahd->flags |= AHD_UPDATE_PEND_CMDS;
+}
+
+/*
+ * Determine if an SCB for a packetized transaction
+ * is active in a FIFO.
+ */
+static int
+ahd_scb_active_in_fifo(struct ahd_softc *ahd, struct scb *scb)
+{
+
+ /*
+ * The FIFO is only active for our transaction if
+ * the SCBPTR matches the SCB's ID and the firmware
+ * has installed a handler for the FIFO or we have
+ * a pending SAVEPTRS or CFG4DATA interrupt.
+ */
+ if (ahd_get_scbptr(ahd) != SCB_GET_TAG(scb)
+ || ((ahd_inb(ahd, LONGJMP_ADDR+1) & INVALID_ADDR) != 0
+ && (ahd_inb(ahd, SEQINTSRC) & (CFG4DATA|SAVEPTRS)) == 0))
+ return (0);
+
+ return (1);
+}
+
+/*
+ * Run a data fifo to completion for a transaction we know
+ * has completed across the SCSI bus (good status has been
+ * received). We are already set to the correct FIFO mode
+ * on entry to this routine.
+ *
+ * This function attempts to operate exactly as the firmware
+ * would when running this FIFO. Care must be taken to update
+ * this routine any time the firmware's FIFO algorithm is
+ * changed.
+ */
+static void
+ahd_run_data_fifo(struct ahd_softc *ahd, struct scb *scb)
+{
+ u_int seqintsrc;
+
+ seqintsrc = ahd_inb(ahd, SEQINTSRC);
+ if ((seqintsrc & CFG4DATA) != 0) {
+ uint32_t datacnt;
+ uint32_t sgptr;
+
+ /*
+ * Clear full residual flag.
+ */
+ sgptr = ahd_inl_scbram(ahd, SCB_SGPTR) & ~SG_FULL_RESID;
+ ahd_outb(ahd, SCB_SGPTR, sgptr);
+
+ /*
+ * Load datacnt and address.
+ */
+ datacnt = ahd_inl_scbram(ahd, SCB_DATACNT);
+ if ((datacnt & AHD_DMA_LAST_SEG) != 0) {
+ sgptr |= LAST_SEG;
+ ahd_outb(ahd, SG_STATE, 0);
+ } else
+ ahd_outb(ahd, SG_STATE, LOADING_NEEDED);
+ ahd_outq(ahd, HADDR, ahd_inq_scbram(ahd, SCB_DATAPTR));
+ ahd_outl(ahd, HCNT, datacnt & AHD_SG_LEN_MASK);
+ ahd_outb(ahd, SG_CACHE_PRE, sgptr);
+ ahd_outb(ahd, DFCNTRL, PRELOADEN|SCSIEN|HDMAEN);
+
+ /*
+ * Initialize Residual Fields.
+ */
+ ahd_outb(ahd, SCB_RESIDUAL_DATACNT+3, datacnt >> 24);
+ ahd_outl(ahd, SCB_RESIDUAL_SGPTR, sgptr & SG_PTR_MASK);
+
+ /*
+ * Mark the SCB as having a FIFO in use.
+ */
+ ahd_outb(ahd, SCB_FIFO_USE_COUNT,
+ ahd_inb_scbram(ahd, SCB_FIFO_USE_COUNT) + 1);
+
+ /*
+ * Install a "fake" handler for this FIFO.
+ */
+ ahd_outw(ahd, LONGJMP_ADDR, 0);
+
+ /*
+ * Notify the hardware that we have satisfied
+ * this sequencer interrupt.
+ */
+ ahd_outb(ahd, CLRSEQINTSRC, CLRCFG4DATA);
+ } else if ((seqintsrc & SAVEPTRS) != 0) {
+ uint32_t sgptr;
+ uint32_t resid;
+
+ if ((ahd_inb(ahd, LONGJMP_ADDR+1)&INVALID_ADDR) != 0) {
+ /*
+ * Snapshot Save Pointers. All that
+ * is necessary to clear the snapshot
+ * is a CLRCHN.
+ */
+ goto clrchn;
+ }
+
+ /*
+ * Disable S/G fetch so the DMA engine
+ * is available to future users.
+ */
+ if ((ahd_inb(ahd, SG_STATE) & FETCH_INPROG) != 0)
+ ahd_outb(ahd, CCSGCTL, 0);
+ ahd_outb(ahd, SG_STATE, 0);
+
+ /*
+ * Flush the data FIFO. Strickly only
+ * necessary for Rev A parts.
+ */
+ ahd_outb(ahd, DFCNTRL, ahd_inb(ahd, DFCNTRL) | FIFOFLUSH);
+
+ /*
+ * Calculate residual.
+ */
+ sgptr = ahd_inl_scbram(ahd, SCB_RESIDUAL_SGPTR);
+ resid = ahd_inl(ahd, SHCNT);
+ resid |= ahd_inb_scbram(ahd, SCB_RESIDUAL_DATACNT+3) << 24;
+ ahd_outl(ahd, SCB_RESIDUAL_DATACNT, resid);
+ if ((ahd_inb(ahd, SG_CACHE_SHADOW) & LAST_SEG) == 0) {
+ /*
+ * Must back up to the correct S/G element.
+ * Typically this just means resetting our
+ * low byte to the offset in the SG_CACHE,
+ * but if we wrapped, we have to correct
+ * the other bytes of the sgptr too.
+ */
+ if ((ahd_inb(ahd, SG_CACHE_SHADOW) & 0x80) != 0
+ && (sgptr & 0x80) == 0)
+ sgptr -= 0x100;
+ sgptr &= ~0xFF;
+ sgptr |= ahd_inb(ahd, SG_CACHE_SHADOW)
+ & SG_ADDR_MASK;
+ ahd_outl(ahd, SCB_RESIDUAL_SGPTR, sgptr);
+ ahd_outb(ahd, SCB_RESIDUAL_DATACNT + 3, 0);
+ } else if ((resid & AHD_SG_LEN_MASK) == 0) {
+ ahd_outb(ahd, SCB_RESIDUAL_SGPTR,
+ sgptr | SG_LIST_NULL);
+ }
+ /*
+ * Save Pointers.
+ */
+ ahd_outq(ahd, SCB_DATAPTR, ahd_inq(ahd, SHADDR));
+ ahd_outl(ahd, SCB_DATACNT, resid);
+ ahd_outl(ahd, SCB_SGPTR, sgptr);
+ ahd_outb(ahd, CLRSEQINTSRC, CLRSAVEPTRS);
+ ahd_outb(ahd, SEQIMODE,
+ ahd_inb(ahd, SEQIMODE) | ENSAVEPTRS);
+ /*
+ * If the data is to the SCSI bus, we are
+ * done, otherwise wait for FIFOEMP.
+ */
+ if ((ahd_inb(ahd, DFCNTRL) & DIRECTION) != 0)
+ goto clrchn;
+ } else if ((ahd_inb(ahd, SG_STATE) & LOADING_NEEDED) != 0) {
+ uint32_t sgptr;
+ uint64_t data_addr;
+ uint32_t data_len;
+ u_int dfcntrl;
+
+ /*
+ * Disable S/G fetch so the DMA engine
+ * is available to future users. We won't
+ * be using the DMA engine to load segments.
+ */
+ if ((ahd_inb(ahd, SG_STATE) & FETCH_INPROG) != 0) {
+ ahd_outb(ahd, CCSGCTL, 0);
+ ahd_outb(ahd, SG_STATE, LOADING_NEEDED);
+ }
+
+ /*
+ * Wait for the DMA engine to notice that the
+ * host transfer is enabled and that there is
+ * space in the S/G FIFO for new segments before
+ * loading more segments.
+ */
+ if ((ahd_inb(ahd, DFSTATUS) & PRELOAD_AVAIL) != 0
+ && (ahd_inb(ahd, DFCNTRL) & HDMAENACK) != 0) {
+
+ /*
+ * Determine the offset of the next S/G
+ * element to load.
+ */
+ sgptr = ahd_inl_scbram(ahd, SCB_RESIDUAL_SGPTR);
+ sgptr &= SG_PTR_MASK;
+ if ((ahd->flags & AHD_64BIT_ADDRESSING) != 0) {
+ struct ahd_dma64_seg *sg;
+
+ sg = ahd_sg_bus_to_virt(ahd, scb, sgptr);
+ data_addr = sg->addr;
+ data_len = sg->len;
+ sgptr += sizeof(*sg);
+ } else {
+ struct ahd_dma_seg *sg;
+
+ sg = ahd_sg_bus_to_virt(ahd, scb, sgptr);
+ data_addr = sg->len & AHD_SG_HIGH_ADDR_MASK;
+ data_addr <<= 8;
+ data_addr |= sg->addr;
+ data_len = sg->len;
+ sgptr += sizeof(*sg);
+ }
+
+ /*
+ * Update residual information.
+ */
+ ahd_outb(ahd, SCB_RESIDUAL_DATACNT+3, data_len >> 24);
+ ahd_outl(ahd, SCB_RESIDUAL_SGPTR, sgptr);
+
+ /*
+ * Load the S/G.
+ */
+ if (data_len & AHD_DMA_LAST_SEG) {
+ sgptr |= LAST_SEG;
+ ahd_outb(ahd, SG_STATE, 0);
+ }
+ ahd_outq(ahd, HADDR, data_addr);
+ ahd_outl(ahd, HCNT, data_len & AHD_SG_LEN_MASK);
+ ahd_outb(ahd, SG_CACHE_PRE, sgptr & 0xFF);
+
+ /*
+ * Advertise the segment to the hardware.
+ */
+ dfcntrl = ahd_inb(ahd, DFCNTRL)|PRELOADEN|HDMAEN;
+ if ((ahd->features & AHD_NEW_DFCNTRL_OPTS) != 0) {
+ /*
+ * Use SCSIENWRDIS so that SCSIEN
+ * is never modified by this
+ * operation.
+ */
+ dfcntrl |= SCSIENWRDIS;
+ }
+ ahd_outb(ahd, DFCNTRL, dfcntrl);
+ }
+ } else if ((ahd_inb(ahd, SG_CACHE_SHADOW) & LAST_SEG_DONE) != 0) {
+
+ /*
+ * Transfer completed to the end of SG list
+ * and has flushed to the host.
+ */
+ ahd_outb(ahd, SCB_SGPTR,
+ ahd_inb_scbram(ahd, SCB_SGPTR) | SG_LIST_NULL);
+ goto clrchn;
+ } else if ((ahd_inb(ahd, DFSTATUS) & FIFOEMP) != 0) {
+clrchn:
+ /*
+ * Clear any handler for this FIFO, decrement
+ * the FIFO use count for the SCB, and release
+ * the FIFO.
+ */
+ ahd_outb(ahd, LONGJMP_ADDR + 1, INVALID_ADDR);
+ ahd_outb(ahd, SCB_FIFO_USE_COUNT,
+ ahd_inb_scbram(ahd, SCB_FIFO_USE_COUNT) - 1);
+ ahd_outb(ahd, DFFSXFRCTL, CLRCHN);
+ }
+}
+
+/*
+ * Look for entries in the QoutFIFO that have completed.
+ * The valid_tag completion field indicates the validity
+ * of the entry - the valid value toggles each time through
+ * the queue. We use the sg_status field in the completion
+ * entry to avoid referencing the hscb if the completion
+ * occurred with no errors and no residual. sg_status is
+ * a copy of the first byte (little endian) of the sgptr
+ * hscb field.
+ */
+static void
+ahd_run_qoutfifo(struct ahd_softc *ahd)
+{
+ struct ahd_completion *completion;
+ struct scb *scb;
+ u_int scb_index;
+
+ if ((ahd->flags & AHD_RUNNING_QOUTFIFO) != 0)
+ panic("ahd_run_qoutfifo recursion");
+ ahd->flags |= AHD_RUNNING_QOUTFIFO;
+ ahd_sync_qoutfifo(ahd, BUS_DMASYNC_POSTREAD);
+ for (;;) {
+ completion = &ahd->qoutfifo[ahd->qoutfifonext];
+
+ if (completion->valid_tag != ahd->qoutfifonext_valid_tag)
+ break;
+
+ scb_index = ahd_le16toh(completion->tag);
+ scb = ahd_lookup_scb(ahd, scb_index);
+ if (scb == NULL) {
+ printk("%s: WARNING no command for scb %d "
+ "(cmdcmplt)\nQOUTPOS = %d\n",
+ ahd_name(ahd), scb_index,
+ ahd->qoutfifonext);
+ ahd_dump_card_state(ahd);
+ } else if ((completion->sg_status & SG_STATUS_VALID) != 0) {
+ ahd_handle_scb_status(ahd, scb);
+ } else {
+ ahd_done(ahd, scb);
+ }
+
+ ahd->qoutfifonext = (ahd->qoutfifonext+1) & (AHD_QOUT_SIZE-1);
+ if (ahd->qoutfifonext == 0)
+ ahd->qoutfifonext_valid_tag ^= QOUTFIFO_ENTRY_VALID;
+ }
+ ahd->flags &= ~AHD_RUNNING_QOUTFIFO;
+}
+
+/************************* Interrupt Handling *********************************/
+static void
+ahd_handle_hwerrint(struct ahd_softc *ahd)
+{
+ /*
+ * Some catastrophic hardware error has occurred.
+ * Print it for the user and disable the controller.
+ */
+ int i;
+ int error;
+
+ error = ahd_inb(ahd, ERROR);
+ for (i = 0; i < num_errors; i++) {
+ if ((error & ahd_hard_errors[i].errno) != 0)
+ printk("%s: hwerrint, %s\n",
+ ahd_name(ahd), ahd_hard_errors[i].errmesg);
+ }
+
+ ahd_dump_card_state(ahd);
+ panic("BRKADRINT");
+
+ /* Tell everyone that this HBA is no longer available */
+ ahd_abort_scbs(ahd, CAM_TARGET_WILDCARD, ALL_CHANNELS,
+ CAM_LUN_WILDCARD, SCB_LIST_NULL, ROLE_UNKNOWN,
+ CAM_NO_HBA);
+
+ /* Tell the system that this controller has gone away. */
+ ahd_free(ahd);
+}
+
+#ifdef AHD_DEBUG
+static void
+ahd_dump_sglist(struct scb *scb)
+{
+ int i;
+
+ if (scb->sg_count > 0) {
+ if ((scb->ahd_softc->flags & AHD_64BIT_ADDRESSING) != 0) {
+ struct ahd_dma64_seg *sg_list;
+
+ sg_list = (struct ahd_dma64_seg*)scb->sg_list;
+ for (i = 0; i < scb->sg_count; i++) {
+ uint64_t addr;
+ uint32_t len;
+
+ addr = ahd_le64toh(sg_list[i].addr);
+ len = ahd_le32toh(sg_list[i].len);
+ printk("sg[%d] - Addr 0x%x%x : Length %d%s\n",
+ i,
+ (uint32_t)((addr >> 32) & 0xFFFFFFFF),
+ (uint32_t)(addr & 0xFFFFFFFF),
+ sg_list[i].len & AHD_SG_LEN_MASK,
+ (sg_list[i].len & AHD_DMA_LAST_SEG)
+ ? " Last" : "");
+ }
+ } else {
+ struct ahd_dma_seg *sg_list;
+
+ sg_list = (struct ahd_dma_seg*)scb->sg_list;
+ for (i = 0; i < scb->sg_count; i++) {
+ uint32_t len;
+
+ len = ahd_le32toh(sg_list[i].len);
+ printk("sg[%d] - Addr 0x%x%x : Length %d%s\n",
+ i,
+ (len & AHD_SG_HIGH_ADDR_MASK) >> 24,
+ ahd_le32toh(sg_list[i].addr),
+ len & AHD_SG_LEN_MASK,
+ len & AHD_DMA_LAST_SEG ? " Last" : "");
+ }
+ }
+ }
+}
+#endif /* AHD_DEBUG */
+
+static void
+ahd_handle_seqint(struct ahd_softc *ahd, u_int intstat)
+{
+ u_int seqintcode;
+
+ /*
+ * Save the sequencer interrupt code and clear the SEQINT
+ * bit. We will unpause the sequencer, if appropriate,
+ * after servicing the request.
+ */
+ seqintcode = ahd_inb(ahd, SEQINTCODE);
+ ahd_outb(ahd, CLRINT, CLRSEQINT);
+ if ((ahd->bugs & AHD_INTCOLLISION_BUG) != 0) {
+ /*
+ * Unpause the sequencer and let it clear
+ * SEQINT by writing NO_SEQINT to it. This
+ * will cause the sequencer to be paused again,
+ * which is the expected state of this routine.
+ */
+ ahd_unpause(ahd);
+ while (!ahd_is_paused(ahd))
+ ;
+ ahd_outb(ahd, CLRINT, CLRSEQINT);
+ }
+ ahd_update_modes(ahd);
+#ifdef AHD_DEBUG
+ if ((ahd_debug & AHD_SHOW_MISC) != 0)
+ printk("%s: Handle Seqint Called for code %d\n",
+ ahd_name(ahd), seqintcode);
+#endif
+ switch (seqintcode) {
+ case ENTERING_NONPACK:
+ {
+ struct scb *scb;
+ u_int scbid;
+
+ AHD_ASSERT_MODES(ahd, ~(AHD_MODE_UNKNOWN_MSK|AHD_MODE_CFG_MSK),
+ ~(AHD_MODE_UNKNOWN_MSK|AHD_MODE_CFG_MSK));
+ scbid = ahd_get_scbptr(ahd);
+ scb = ahd_lookup_scb(ahd, scbid);
+ if (scb == NULL) {
+ /*
+ * Somehow need to know if this
+ * is from a selection or reselection.
+ * From that, we can determine target
+ * ID so we at least have an I_T nexus.
+ */
+ } else {
+ ahd_outb(ahd, SAVED_SCSIID, scb->hscb->scsiid);
+ ahd_outb(ahd, SAVED_LUN, scb->hscb->lun);
+ ahd_outb(ahd, SEQ_FLAGS, 0x0);
+ }
+ if ((ahd_inb(ahd, LQISTAT2) & LQIPHASE_OUTPKT) != 0
+ && (ahd_inb(ahd, SCSISIGO) & ATNO) != 0) {
+ /*
+ * Phase change after read stream with
+ * CRC error with P0 asserted on last
+ * packet.
+ */
+#ifdef AHD_DEBUG
+ if ((ahd_debug & AHD_SHOW_RECOVERY) != 0)
+ printk("%s: Assuming LQIPHASE_NLQ with "
+ "P0 assertion\n", ahd_name(ahd));
+#endif
+ }
+#ifdef AHD_DEBUG
+ if ((ahd_debug & AHD_SHOW_RECOVERY) != 0)
+ printk("%s: Entering NONPACK\n", ahd_name(ahd));
+#endif
+ break;
+ }
+ case INVALID_SEQINT:
+ printk("%s: Invalid Sequencer interrupt occurred, "
+ "resetting channel.\n",
+ ahd_name(ahd));
+#ifdef AHD_DEBUG
+ if ((ahd_debug & AHD_SHOW_RECOVERY) != 0)
+ ahd_dump_card_state(ahd);
+#endif
+ ahd_reset_channel(ahd, 'A', /*Initiate Reset*/TRUE);
+ break;
+ case STATUS_OVERRUN:
+ {
+ struct scb *scb;
+ u_int scbid;
+
+ scbid = ahd_get_scbptr(ahd);
+ scb = ahd_lookup_scb(ahd, scbid);
+ if (scb != NULL)
+ ahd_print_path(ahd, scb);
+ else
+ printk("%s: ", ahd_name(ahd));
+ printk("SCB %d Packetized Status Overrun", scbid);
+ ahd_dump_card_state(ahd);
+ ahd_reset_channel(ahd, 'A', /*Initiate Reset*/TRUE);
+ break;
+ }
+ case CFG4ISTAT_INTR:
+ {
+ struct scb *scb;
+ u_int scbid;
+
+ scbid = ahd_get_scbptr(ahd);
+ scb = ahd_lookup_scb(ahd, scbid);
+ if (scb == NULL) {
+ ahd_dump_card_state(ahd);
+ printk("CFG4ISTAT: Free SCB %d referenced", scbid);
+ panic("For safety");
+ }
+ ahd_outq(ahd, HADDR, scb->sense_busaddr);
+ ahd_outw(ahd, HCNT, AHD_SENSE_BUFSIZE);
+ ahd_outb(ahd, HCNT + 2, 0);
+ ahd_outb(ahd, SG_CACHE_PRE, SG_LAST_SEG);
+ ahd_outb(ahd, DFCNTRL, PRELOADEN|SCSIEN|HDMAEN);
+ break;
+ }
+ case ILLEGAL_PHASE:
+ {
+ u_int bus_phase;
+
+ bus_phase = ahd_inb(ahd, SCSISIGI) & PHASE_MASK;
+ printk("%s: ILLEGAL_PHASE 0x%x\n",
+ ahd_name(ahd), bus_phase);
+
+ switch (bus_phase) {
+ case P_DATAOUT:
+ case P_DATAIN:
+ case P_DATAOUT_DT:
+ case P_DATAIN_DT:
+ case P_MESGOUT:
+ case P_STATUS:
+ case P_MESGIN:
+ ahd_reset_channel(ahd, 'A', /*Initiate Reset*/TRUE);
+ printk("%s: Issued Bus Reset.\n", ahd_name(ahd));
+ break;
+ case P_COMMAND:
+ {
+ struct ahd_devinfo devinfo;
+ struct scb *scb;
+ struct ahd_initiator_tinfo *targ_info;
+ struct ahd_tmode_tstate *tstate;
+ struct ahd_transinfo *tinfo;
+ u_int scbid;
+
+ /*
+ * If a target takes us into the command phase
+ * assume that it has been externally reset and
+ * has thus lost our previous packetized negotiation
+ * agreement. Since we have not sent an identify
+ * message and may not have fully qualified the
+ * connection, we change our command to TUR, assert
+ * ATN and ABORT the task when we go to message in
+ * phase. The OSM will see the REQUEUE_REQUEST
+ * status and retry the command.
+ */
+ scbid = ahd_get_scbptr(ahd);
+ scb = ahd_lookup_scb(ahd, scbid);
+ if (scb == NULL) {
+ printk("Invalid phase with no valid SCB. "
+ "Resetting bus.\n");
+ ahd_reset_channel(ahd, 'A',
+ /*Initiate Reset*/TRUE);
+ break;
+ }
+ ahd_compile_devinfo(&devinfo, SCB_GET_OUR_ID(scb),
+ SCB_GET_TARGET(ahd, scb),
+ SCB_GET_LUN(scb),
+ SCB_GET_CHANNEL(ahd, scb),
+ ROLE_INITIATOR);
+ targ_info = ahd_fetch_transinfo(ahd,
+ devinfo.channel,
+ devinfo.our_scsiid,
+ devinfo.target,
+ &tstate);
+ tinfo = &targ_info->curr;
+ ahd_set_width(ahd, &devinfo, MSG_EXT_WDTR_BUS_8_BIT,
+ AHD_TRANS_ACTIVE, /*paused*/TRUE);
+ ahd_set_syncrate(ahd, &devinfo, /*period*/0,
+ /*offset*/0, /*ppr_options*/0,
+ AHD_TRANS_ACTIVE, /*paused*/TRUE);
+ /* Hand-craft TUR command */
+ ahd_outb(ahd, SCB_CDB_STORE, 0);
+ ahd_outb(ahd, SCB_CDB_STORE+1, 0);
+ ahd_outb(ahd, SCB_CDB_STORE+2, 0);
+ ahd_outb(ahd, SCB_CDB_STORE+3, 0);
+ ahd_outb(ahd, SCB_CDB_STORE+4, 0);
+ ahd_outb(ahd, SCB_CDB_STORE+5, 0);
+ ahd_outb(ahd, SCB_CDB_LEN, 6);
+ scb->hscb->control &= ~(TAG_ENB|SCB_TAG_TYPE);
+ scb->hscb->control |= MK_MESSAGE;
+ ahd_outb(ahd, SCB_CONTROL, scb->hscb->control);
+ ahd_outb(ahd, MSG_OUT, HOST_MSG);
+ ahd_outb(ahd, SAVED_SCSIID, scb->hscb->scsiid);
+ /*
+ * The lun is 0, regardless of the SCB's lun
+ * as we have not sent an identify message.
+ */
+ ahd_outb(ahd, SAVED_LUN, 0);
+ ahd_outb(ahd, SEQ_FLAGS, 0);
+ ahd_assert_atn(ahd);
+ scb->flags &= ~SCB_PACKETIZED;
+ scb->flags |= SCB_ABORT|SCB_EXTERNAL_RESET;
+ ahd_freeze_devq(ahd, scb);
+ ahd_set_transaction_status(scb, CAM_REQUEUE_REQ);
+ ahd_freeze_scb(scb);
+
+ /* Notify XPT */
+ ahd_send_async(ahd, devinfo.channel, devinfo.target,
+ CAM_LUN_WILDCARD, AC_SENT_BDR);
+
+ /*
+ * Allow the sequencer to continue with
+ * non-pack processing.
+ */
+ ahd_set_modes(ahd, AHD_MODE_SCSI, AHD_MODE_SCSI);
+ ahd_outb(ahd, CLRLQOINT1, CLRLQOPHACHGINPKT);
+ if ((ahd->bugs & AHD_CLRLQO_AUTOCLR_BUG) != 0) {
+ ahd_outb(ahd, CLRLQOINT1, 0);
+ }
+#ifdef AHD_DEBUG
+ if ((ahd_debug & AHD_SHOW_RECOVERY) != 0) {
+ ahd_print_path(ahd, scb);
+ printk("Unexpected command phase from "
+ "packetized target\n");
+ }
+#endif
+ break;
+ }
+ }
+ break;
+ }
+ case CFG4OVERRUN:
+ {
+ struct scb *scb;
+ u_int scb_index;
+
+#ifdef AHD_DEBUG
+ if ((ahd_debug & AHD_SHOW_RECOVERY) != 0) {
+ printk("%s: CFG4OVERRUN mode = %x\n", ahd_name(ahd),
+ ahd_inb(ahd, MODE_PTR));
+ }
+#endif
+ scb_index = ahd_get_scbptr(ahd);
+ scb = ahd_lookup_scb(ahd, scb_index);
+ if (scb == NULL) {
+ /*
+ * Attempt to transfer to an SCB that is
+ * not outstanding.
+ */
+ ahd_assert_atn(ahd);
+ ahd_outb(ahd, MSG_OUT, HOST_MSG);
+ ahd->msgout_buf[0] = MSG_ABORT_TASK;
+ ahd->msgout_len = 1;
+ ahd->msgout_index = 0;
+ ahd->msg_type = MSG_TYPE_INITIATOR_MSGOUT;
+ /*
+ * Clear status received flag to prevent any
+ * attempt to complete this bogus SCB.
+ */
+ ahd_outb(ahd, SCB_CONTROL,
+ ahd_inb_scbram(ahd, SCB_CONTROL)
+ & ~STATUS_RCVD);
+ }
+ break;
+ }
+ case DUMP_CARD_STATE:
+ {
+ ahd_dump_card_state(ahd);
+ break;
+ }
+ case PDATA_REINIT:
+ {
+#ifdef AHD_DEBUG
+ if ((ahd_debug & AHD_SHOW_RECOVERY) != 0) {
+ printk("%s: PDATA_REINIT - DFCNTRL = 0x%x "
+ "SG_CACHE_SHADOW = 0x%x\n",
+ ahd_name(ahd), ahd_inb(ahd, DFCNTRL),
+ ahd_inb(ahd, SG_CACHE_SHADOW));
+ }
+#endif
+ ahd_reinitialize_dataptrs(ahd);
+ break;
+ }
+ case HOST_MSG_LOOP:
+ {
+ struct ahd_devinfo devinfo;
+
+ /*
+ * The sequencer has encountered a message phase
+ * that requires host assistance for completion.
+ * While handling the message phase(s), we will be
+ * notified by the sequencer after each byte is
+ * transferred so we can track bus phase changes.
+ *
+ * If this is the first time we've seen a HOST_MSG_LOOP
+ * interrupt, initialize the state of the host message
+ * loop.
+ */
+ ahd_fetch_devinfo(ahd, &devinfo);
+ if (ahd->msg_type == MSG_TYPE_NONE) {
+ struct scb *scb;
+ u_int scb_index;
+ u_int bus_phase;
+
+ bus_phase = ahd_inb(ahd, SCSISIGI) & PHASE_MASK;
+ if (bus_phase != P_MESGIN
+ && bus_phase != P_MESGOUT) {
+ printk("ahd_intr: HOST_MSG_LOOP bad "
+ "phase 0x%x\n", bus_phase);
+ /*
+ * Probably transitioned to bus free before
+ * we got here. Just punt the message.
+ */
+ ahd_dump_card_state(ahd);
+ ahd_clear_intstat(ahd);
+ ahd_restart(ahd);
+ return;
+ }
+
+ scb_index = ahd_get_scbptr(ahd);
+ scb = ahd_lookup_scb(ahd, scb_index);
+ if (devinfo.role == ROLE_INITIATOR) {
+ if (bus_phase == P_MESGOUT)
+ ahd_setup_initiator_msgout(ahd,
+ &devinfo,
+ scb);
+ else {
+ ahd->msg_type =
+ MSG_TYPE_INITIATOR_MSGIN;
+ ahd->msgin_index = 0;
+ }
+ }
+#ifdef AHD_TARGET_MODE
+ else {
+ if (bus_phase == P_MESGOUT) {
+ ahd->msg_type =
+ MSG_TYPE_TARGET_MSGOUT;
+ ahd->msgin_index = 0;
+ }
+ else
+ ahd_setup_target_msgin(ahd,
+ &devinfo,
+ scb);
+ }
+#endif
+ }
+
+ ahd_handle_message_phase(ahd);
+ break;
+ }
+ case NO_MATCH:
+ {
+ /* Ensure we don't leave the selection hardware on */
+ AHD_ASSERT_MODES(ahd, AHD_MODE_SCSI_MSK, AHD_MODE_SCSI_MSK);
+ ahd_outb(ahd, SCSISEQ0, ahd_inb(ahd, SCSISEQ0) & ~ENSELO);
+
+ printk("%s:%c:%d: no active SCB for reconnecting "
+ "target - issuing BUS DEVICE RESET\n",
+ ahd_name(ahd), 'A', ahd_inb(ahd, SELID) >> 4);
+ printk("SAVED_SCSIID == 0x%x, SAVED_LUN == 0x%x, "
+ "REG0 == 0x%x ACCUM = 0x%x\n",
+ ahd_inb(ahd, SAVED_SCSIID), ahd_inb(ahd, SAVED_LUN),
+ ahd_inw(ahd, REG0), ahd_inb(ahd, ACCUM));
+ printk("SEQ_FLAGS == 0x%x, SCBPTR == 0x%x, BTT == 0x%x, "
+ "SINDEX == 0x%x\n",
+ ahd_inb(ahd, SEQ_FLAGS), ahd_get_scbptr(ahd),
+ ahd_find_busy_tcl(ahd,
+ BUILD_TCL(ahd_inb(ahd, SAVED_SCSIID),
+ ahd_inb(ahd, SAVED_LUN))),
+ ahd_inw(ahd, SINDEX));
+ printk("SELID == 0x%x, SCB_SCSIID == 0x%x, SCB_LUN == 0x%x, "
+ "SCB_CONTROL == 0x%x\n",
+ ahd_inb(ahd, SELID), ahd_inb_scbram(ahd, SCB_SCSIID),
+ ahd_inb_scbram(ahd, SCB_LUN),
+ ahd_inb_scbram(ahd, SCB_CONTROL));
+ printk("SCSIBUS[0] == 0x%x, SCSISIGI == 0x%x\n",
+ ahd_inb(ahd, SCSIBUS), ahd_inb(ahd, SCSISIGI));
+ printk("SXFRCTL0 == 0x%x\n", ahd_inb(ahd, SXFRCTL0));
+ printk("SEQCTL0 == 0x%x\n", ahd_inb(ahd, SEQCTL0));
+ ahd_dump_card_state(ahd);
+ ahd->msgout_buf[0] = MSG_BUS_DEV_RESET;
+ ahd->msgout_len = 1;
+ ahd->msgout_index = 0;
+ ahd->msg_type = MSG_TYPE_INITIATOR_MSGOUT;
+ ahd_outb(ahd, MSG_OUT, HOST_MSG);
+ ahd_assert_atn(ahd);
+ break;
+ }
+ case PROTO_VIOLATION:
+ {
+ ahd_handle_proto_violation(ahd);
+ break;
+ }
+ case IGN_WIDE_RES:
+ {
+ struct ahd_devinfo devinfo;
+
+ ahd_fetch_devinfo(ahd, &devinfo);
+ ahd_handle_ign_wide_residue(ahd, &devinfo);
+ break;
+ }
+ case BAD_PHASE:
+ {
+ u_int lastphase;
+
+ lastphase = ahd_inb(ahd, LASTPHASE);
+ printk("%s:%c:%d: unknown scsi bus phase %x, "
+ "lastphase = 0x%x. Attempting to continue\n",
+ ahd_name(ahd), 'A',
+ SCSIID_TARGET(ahd, ahd_inb(ahd, SAVED_SCSIID)),
+ lastphase, ahd_inb(ahd, SCSISIGI));
+ break;
+ }
+ case MISSED_BUSFREE:
+ {
+ u_int lastphase;
+
+ lastphase = ahd_inb(ahd, LASTPHASE);
+ printk("%s:%c:%d: Missed busfree. "
+ "Lastphase = 0x%x, Curphase = 0x%x\n",
+ ahd_name(ahd), 'A',
+ SCSIID_TARGET(ahd, ahd_inb(ahd, SAVED_SCSIID)),
+ lastphase, ahd_inb(ahd, SCSISIGI));
+ ahd_restart(ahd);
+ return;
+ }
+ case DATA_OVERRUN:
+ {
+ /*
+ * When the sequencer detects an overrun, it
+ * places the controller in "BITBUCKET" mode
+ * and allows the target to complete its transfer.
+ * Unfortunately, none of the counters get updated
+ * when the controller is in this mode, so we have
+ * no way of knowing how large the overrun was.
+ */
+ struct scb *scb;
+ u_int scbindex;
+#ifdef AHD_DEBUG
+ u_int lastphase;
+#endif
+
+ scbindex = ahd_get_scbptr(ahd);
+ scb = ahd_lookup_scb(ahd, scbindex);
+#ifdef AHD_DEBUG
+ lastphase = ahd_inb(ahd, LASTPHASE);
+ if ((ahd_debug & AHD_SHOW_RECOVERY) != 0) {
+ ahd_print_path(ahd, scb);
+ printk("data overrun detected %s. Tag == 0x%x.\n",
+ ahd_lookup_phase_entry(lastphase)->phasemsg,
+ SCB_GET_TAG(scb));
+ ahd_print_path(ahd, scb);
+ printk("%s seen Data Phase. Length = %ld. "
+ "NumSGs = %d.\n",
+ ahd_inb(ahd, SEQ_FLAGS) & DPHASE
+ ? "Have" : "Haven't",
+ ahd_get_transfer_length(scb), scb->sg_count);
+ ahd_dump_sglist(scb);
+ }
+#endif
+
+ /*
+ * Set this and it will take effect when the
+ * target does a command complete.
+ */
+ ahd_freeze_devq(ahd, scb);
+ ahd_set_transaction_status(scb, CAM_DATA_RUN_ERR);
+ ahd_freeze_scb(scb);
+ break;
+ }
+ case MKMSG_FAILED:
+ {
+ struct ahd_devinfo devinfo;
+ struct scb *scb;
+ u_int scbid;
+
+ ahd_fetch_devinfo(ahd, &devinfo);
+ printk("%s:%c:%d:%d: Attempt to issue message failed\n",
+ ahd_name(ahd), devinfo.channel, devinfo.target,
+ devinfo.lun);
+ scbid = ahd_get_scbptr(ahd);
+ scb = ahd_lookup_scb(ahd, scbid);
+ if (scb != NULL
+ && (scb->flags & SCB_RECOVERY_SCB) != 0)
+ /*
+ * Ensure that we didn't put a second instance of this
+ * SCB into the QINFIFO.
+ */
+ ahd_search_qinfifo(ahd, SCB_GET_TARGET(ahd, scb),
+ SCB_GET_CHANNEL(ahd, scb),
+ SCB_GET_LUN(scb), SCB_GET_TAG(scb),
+ ROLE_INITIATOR, /*status*/0,
+ SEARCH_REMOVE);
+ ahd_outb(ahd, SCB_CONTROL,
+ ahd_inb_scbram(ahd, SCB_CONTROL) & ~MK_MESSAGE);
+ break;
+ }
+ case TASKMGMT_FUNC_COMPLETE:
+ {
+ u_int scbid;
+ struct scb *scb;
+
+ scbid = ahd_get_scbptr(ahd);
+ scb = ahd_lookup_scb(ahd, scbid);
+ if (scb != NULL) {
+ u_int lun;
+ u_int tag;
+ cam_status error;
+
+ ahd_print_path(ahd, scb);
+ printk("Task Management Func 0x%x Complete\n",
+ scb->hscb->task_management);
+ lun = CAM_LUN_WILDCARD;
+ tag = SCB_LIST_NULL;
+
+ switch (scb->hscb->task_management) {
+ case SIU_TASKMGMT_ABORT_TASK:
+ tag = SCB_GET_TAG(scb);
+ case SIU_TASKMGMT_ABORT_TASK_SET:
+ case SIU_TASKMGMT_CLEAR_TASK_SET:
+ lun = scb->hscb->lun;
+ error = CAM_REQ_ABORTED;
+ ahd_abort_scbs(ahd, SCB_GET_TARGET(ahd, scb),
+ 'A', lun, tag, ROLE_INITIATOR,
+ error);
+ break;
+ case SIU_TASKMGMT_LUN_RESET:
+ lun = scb->hscb->lun;
+ case SIU_TASKMGMT_TARGET_RESET:
+ {
+ struct ahd_devinfo devinfo;
+
+ ahd_scb_devinfo(ahd, &devinfo, scb);
+ error = CAM_BDR_SENT;
+ ahd_handle_devreset(ahd, &devinfo, lun,
+ CAM_BDR_SENT,
+ lun != CAM_LUN_WILDCARD
+ ? "Lun Reset"
+ : "Target Reset",
+ /*verbose_level*/0);
+ break;
+ }
+ default:
+ panic("Unexpected TaskMgmt Func\n");
+ break;
+ }
+ }
+ break;
+ }
+ case TASKMGMT_CMD_CMPLT_OKAY:
+ {
+ u_int scbid;
+ struct scb *scb;
+
+ /*
+ * An ABORT TASK TMF failed to be delivered before
+ * the targeted command completed normally.
+ */
+ scbid = ahd_get_scbptr(ahd);
+ scb = ahd_lookup_scb(ahd, scbid);
+ if (scb != NULL) {
+ /*
+ * Remove the second instance of this SCB from
+ * the QINFIFO if it is still there.
+ */
+ ahd_print_path(ahd, scb);
+ printk("SCB completes before TMF\n");
+ /*
+ * Handle losing the race. Wait until any
+ * current selection completes. We will then
+ * set the TMF back to zero in this SCB so that
+ * the sequencer doesn't bother to issue another
+ * sequencer interrupt for its completion.
+ */
+ while ((ahd_inb(ahd, SCSISEQ0) & ENSELO) != 0
+ && (ahd_inb(ahd, SSTAT0) & SELDO) == 0
+ && (ahd_inb(ahd, SSTAT1) & SELTO) == 0)
+ ;
+ ahd_outb(ahd, SCB_TASK_MANAGEMENT, 0);
+ ahd_search_qinfifo(ahd, SCB_GET_TARGET(ahd, scb),
+ SCB_GET_CHANNEL(ahd, scb),
+ SCB_GET_LUN(scb), SCB_GET_TAG(scb),
+ ROLE_INITIATOR, /*status*/0,
+ SEARCH_REMOVE);
+ }
+ break;
+ }
+ case TRACEPOINT0:
+ case TRACEPOINT1:
+ case TRACEPOINT2:
+ case TRACEPOINT3:
+ printk("%s: Tracepoint %d\n", ahd_name(ahd),
+ seqintcode - TRACEPOINT0);
+ break;
+ case NO_SEQINT:
+ break;
+ case SAW_HWERR:
+ ahd_handle_hwerrint(ahd);
+ break;
+ default:
+ printk("%s: Unexpected SEQINTCODE %d\n", ahd_name(ahd),
+ seqintcode);
+ break;
+ }
+ /*
+ * The sequencer is paused immediately on
+ * a SEQINT, so we should restart it when
+ * we're done.
+ */
+ ahd_unpause(ahd);
+}
+
+static void
+ahd_handle_scsiint(struct ahd_softc *ahd, u_int intstat)
+{
+ struct scb *scb;
+ u_int status0;
+ u_int status3;
+ u_int status;
+ u_int lqistat1;
+ u_int lqostat0;
+ u_int scbid;
+ u_int busfreetime;
+
+ ahd_update_modes(ahd);
+ ahd_set_modes(ahd, AHD_MODE_SCSI, AHD_MODE_SCSI);
+
+ status3 = ahd_inb(ahd, SSTAT3) & (NTRAMPERR|OSRAMPERR);
+ status0 = ahd_inb(ahd, SSTAT0) & (IOERR|OVERRUN|SELDI|SELDO);
+ status = ahd_inb(ahd, SSTAT1) & (SELTO|SCSIRSTI|BUSFREE|SCSIPERR);
+ lqistat1 = ahd_inb(ahd, LQISTAT1);
+ lqostat0 = ahd_inb(ahd, LQOSTAT0);
+ busfreetime = ahd_inb(ahd, SSTAT2) & BUSFREETIME;
+
+ /*
+ * Ignore external resets after a bus reset.
+ */
+ if (((status & SCSIRSTI) != 0) && (ahd->flags & AHD_BUS_RESET_ACTIVE)) {
+ ahd_outb(ahd, CLRSINT1, CLRSCSIRSTI);
+ return;
+ }
+
+ /*
+ * Clear bus reset flag
+ */
+ ahd->flags &= ~AHD_BUS_RESET_ACTIVE;
+
+ if ((status0 & (SELDI|SELDO)) != 0) {
+ u_int simode0;
+
+ ahd_set_modes(ahd, AHD_MODE_CFG, AHD_MODE_CFG);
+ simode0 = ahd_inb(ahd, SIMODE0);
+ status0 &= simode0 & (IOERR|OVERRUN|SELDI|SELDO);
+ ahd_set_modes(ahd, AHD_MODE_SCSI, AHD_MODE_SCSI);
+ }
+ scbid = ahd_get_scbptr(ahd);
+ scb = ahd_lookup_scb(ahd, scbid);
+ if (scb != NULL
+ && (ahd_inb(ahd, SEQ_FLAGS) & NOT_IDENTIFIED) != 0)
+ scb = NULL;
+
+ if ((status0 & IOERR) != 0) {
+ u_int now_lvd;
+
+ now_lvd = ahd_inb(ahd, SBLKCTL) & ENAB40;
+ printk("%s: Transceiver State Has Changed to %s mode\n",
+ ahd_name(ahd), now_lvd ? "LVD" : "SE");
+ ahd_outb(ahd, CLRSINT0, CLRIOERR);
+ /*
+ * A change in I/O mode is equivalent to a bus reset.
+ */
+ ahd_reset_channel(ahd, 'A', /*Initiate Reset*/TRUE);
+ ahd_pause(ahd);
+ ahd_setup_iocell_workaround(ahd);
+ ahd_unpause(ahd);
+ } else if ((status0 & OVERRUN) != 0) {
+
+ printk("%s: SCSI offset overrun detected. Resetting bus.\n",
+ ahd_name(ahd));
+ ahd_reset_channel(ahd, 'A', /*Initiate Reset*/TRUE);
+ } else if ((status & SCSIRSTI) != 0) {
+
+ printk("%s: Someone reset channel A\n", ahd_name(ahd));
+ ahd_reset_channel(ahd, 'A', /*Initiate Reset*/FALSE);
+ } else if ((status & SCSIPERR) != 0) {
+
+ /* Make sure the sequencer is in a safe location. */
+ ahd_clear_critical_section(ahd);
+
+ ahd_handle_transmission_error(ahd);
+ } else if (lqostat0 != 0) {
+
+ printk("%s: lqostat0 == 0x%x!\n", ahd_name(ahd), lqostat0);
+ ahd_outb(ahd, CLRLQOINT0, lqostat0);
+ if ((ahd->bugs & AHD_CLRLQO_AUTOCLR_BUG) != 0)
+ ahd_outb(ahd, CLRLQOINT1, 0);
+ } else if ((status & SELTO) != 0) {
+ /* Stop the selection */
+ ahd_outb(ahd, SCSISEQ0, 0);
+
+ /* Make sure the sequencer is in a safe location. */
+ ahd_clear_critical_section(ahd);
+
+ /* No more pending messages */
+ ahd_clear_msg_state(ahd);
+
+ /* Clear interrupt state */
+ ahd_outb(ahd, CLRSINT1, CLRSELTIMEO|CLRBUSFREE|CLRSCSIPERR);
+
+ /*
+ * Although the driver does not care about the
+ * 'Selection in Progress' status bit, the busy
+ * LED does. SELINGO is only cleared by a successful
+ * selection, so we must manually clear it to insure
+ * the LED turns off just incase no future successful
+ * selections occur (e.g. no devices on the bus).
+ */
+ ahd_outb(ahd, CLRSINT0, CLRSELINGO);
+
+ scbid = ahd_inw(ahd, WAITING_TID_HEAD);
+ scb = ahd_lookup_scb(ahd, scbid);
+ if (scb == NULL) {
+ printk("%s: ahd_intr - referenced scb not "
+ "valid during SELTO scb(0x%x)\n",
+ ahd_name(ahd), scbid);
+ ahd_dump_card_state(ahd);
+ } else {
+ struct ahd_devinfo devinfo;
+#ifdef AHD_DEBUG
+ if ((ahd_debug & AHD_SHOW_SELTO) != 0) {
+ ahd_print_path(ahd, scb);
+ printk("Saw Selection Timeout for SCB 0x%x\n",
+ scbid);
+ }
+#endif
+ ahd_scb_devinfo(ahd, &devinfo, scb);
+ ahd_set_transaction_status(scb, CAM_SEL_TIMEOUT);
+ ahd_freeze_devq(ahd, scb);
+
+ /*
+ * Cancel any pending transactions on the device
+ * now that it seems to be missing. This will
+ * also revert us to async/narrow transfers until
+ * we can renegotiate with the device.
+ */
+ ahd_handle_devreset(ahd, &devinfo,
+ CAM_LUN_WILDCARD,
+ CAM_SEL_TIMEOUT,
+ "Selection Timeout",
+ /*verbose_level*/1);
+ }
+ ahd_outb(ahd, CLRINT, CLRSCSIINT);
+ ahd_iocell_first_selection(ahd);
+ ahd_unpause(ahd);
+ } else if ((status0 & (SELDI|SELDO)) != 0) {
+
+ ahd_iocell_first_selection(ahd);
+ ahd_unpause(ahd);
+ } else if (status3 != 0) {
+ printk("%s: SCSI Cell parity error SSTAT3 == 0x%x\n",
+ ahd_name(ahd), status3);
+ ahd_outb(ahd, CLRSINT3, status3);
+ } else if ((lqistat1 & (LQIPHASE_LQ|LQIPHASE_NLQ)) != 0) {
+
+ /* Make sure the sequencer is in a safe location. */
+ ahd_clear_critical_section(ahd);
+
+ ahd_handle_lqiphase_error(ahd, lqistat1);
+ } else if ((lqistat1 & LQICRCI_NLQ) != 0) {
+ /*
+ * This status can be delayed during some
+ * streaming operations. The SCSIPHASE
+ * handler has already dealt with this case
+ * so just clear the error.
+ */
+ ahd_outb(ahd, CLRLQIINT1, CLRLQICRCI_NLQ);
+ } else if ((status & BUSFREE) != 0
+ || (lqistat1 & LQOBUSFREE) != 0) {
+ u_int lqostat1;
+ int restart;
+ int clear_fifo;
+ int packetized;
+ u_int mode;
+
+ /*
+ * Clear our selection hardware as soon as possible.
+ * We may have an entry in the waiting Q for this target,
+ * that is affected by this busfree and we don't want to
+ * go about selecting the target while we handle the event.
+ */
+ ahd_outb(ahd, SCSISEQ0, 0);
+
+ /* Make sure the sequencer is in a safe location. */
+ ahd_clear_critical_section(ahd);
+
+ /*
+ * Determine what we were up to at the time of
+ * the busfree.
+ */
+ mode = AHD_MODE_SCSI;
+ busfreetime = ahd_inb(ahd, SSTAT2) & BUSFREETIME;
+ lqostat1 = ahd_inb(ahd, LQOSTAT1);
+ switch (busfreetime) {
+ case BUSFREE_DFF0:
+ case BUSFREE_DFF1:
+ {
+ mode = busfreetime == BUSFREE_DFF0
+ ? AHD_MODE_DFF0 : AHD_MODE_DFF1;
+ ahd_set_modes(ahd, mode, mode);
+ scbid = ahd_get_scbptr(ahd);
+ scb = ahd_lookup_scb(ahd, scbid);
+ if (scb == NULL) {
+ printk("%s: Invalid SCB %d in DFF%d "
+ "during unexpected busfree\n",
+ ahd_name(ahd), scbid, mode);
+ packetized = 0;
+ } else
+ packetized = (scb->flags & SCB_PACKETIZED) != 0;
+ clear_fifo = 1;
+ break;
+ }
+ case BUSFREE_LQO:
+ clear_fifo = 0;
+ packetized = 1;
+ break;
+ default:
+ clear_fifo = 0;
+ packetized = (lqostat1 & LQOBUSFREE) != 0;
+ if (!packetized
+ && ahd_inb(ahd, LASTPHASE) == P_BUSFREE
+ && (ahd_inb(ahd, SSTAT0) & SELDI) == 0
+ && ((ahd_inb(ahd, SSTAT0) & SELDO) == 0
+ || (ahd_inb(ahd, SCSISEQ0) & ENSELO) == 0))
+ /*
+ * Assume packetized if we are not
+ * on the bus in a non-packetized
+ * capacity and any pending selection
+ * was a packetized selection.
+ */
+ packetized = 1;
+ break;
+ }
+
+#ifdef AHD_DEBUG
+ if ((ahd_debug & AHD_SHOW_MISC) != 0)
+ printk("Saw Busfree. Busfreetime = 0x%x.\n",
+ busfreetime);
+#endif
+ /*
+ * Busfrees that occur in non-packetized phases are
+ * handled by the nonpkt_busfree handler.
+ */
+ if (packetized && ahd_inb(ahd, LASTPHASE) == P_BUSFREE) {
+ restart = ahd_handle_pkt_busfree(ahd, busfreetime);
+ } else {
+ packetized = 0;
+ restart = ahd_handle_nonpkt_busfree(ahd);
+ }
+ /*
+ * Clear the busfree interrupt status. The setting of
+ * the interrupt is a pulse, so in a perfect world, we
+ * would not need to muck with the ENBUSFREE logic. This
+ * would ensure that if the bus moves on to another
+ * connection, busfree protection is still in force. If
+ * BUSFREEREV is broken, however, we must manually clear
+ * the ENBUSFREE if the busfree occurred during a non-pack
+ * connection so that we don't get false positives during
+ * future, packetized, connections.
+ */
+ ahd_outb(ahd, CLRSINT1, CLRBUSFREE);
+ if (packetized == 0
+ && (ahd->bugs & AHD_BUSFREEREV_BUG) != 0)
+ ahd_outb(ahd, SIMODE1,
+ ahd_inb(ahd, SIMODE1) & ~ENBUSFREE);
+
+ if (clear_fifo)
+ ahd_clear_fifo(ahd, mode);
+
+ ahd_clear_msg_state(ahd);
+ ahd_outb(ahd, CLRINT, CLRSCSIINT);
+ if (restart) {
+ ahd_restart(ahd);
+ } else {
+ ahd_unpause(ahd);
+ }
+ } else {
+ printk("%s: Missing case in ahd_handle_scsiint. status = %x\n",
+ ahd_name(ahd), status);
+ ahd_dump_card_state(ahd);
+ ahd_clear_intstat(ahd);
+ ahd_unpause(ahd);
+ }
+}
+
+static void
+ahd_handle_transmission_error(struct ahd_softc *ahd)
+{
+ struct scb *scb;
+ u_int scbid;
+ u_int lqistat1;
+ u_int lqistat2;
+ u_int msg_out;
+ u_int curphase;
+ u_int lastphase;
+ u_int perrdiag;
+ u_int cur_col;
+ int silent;
+
+ scb = NULL;
+ ahd_set_modes(ahd, AHD_MODE_SCSI, AHD_MODE_SCSI);
+ lqistat1 = ahd_inb(ahd, LQISTAT1) & ~(LQIPHASE_LQ|LQIPHASE_NLQ);
+ lqistat2 = ahd_inb(ahd, LQISTAT2);
+ if ((lqistat1 & (LQICRCI_NLQ|LQICRCI_LQ)) == 0
+ && (ahd->bugs & AHD_NLQICRC_DELAYED_BUG) != 0) {
+ u_int lqistate;
+
+ ahd_set_modes(ahd, AHD_MODE_CFG, AHD_MODE_CFG);
+ lqistate = ahd_inb(ahd, LQISTATE);
+ if ((lqistate >= 0x1E && lqistate <= 0x24)
+ || (lqistate == 0x29)) {
+#ifdef AHD_DEBUG
+ if ((ahd_debug & AHD_SHOW_RECOVERY) != 0) {
+ printk("%s: NLQCRC found via LQISTATE\n",
+ ahd_name(ahd));
+ }
+#endif
+ lqistat1 |= LQICRCI_NLQ;
+ }
+ ahd_set_modes(ahd, AHD_MODE_SCSI, AHD_MODE_SCSI);
+ }
+
+ ahd_outb(ahd, CLRLQIINT1, lqistat1);
+ lastphase = ahd_inb(ahd, LASTPHASE);
+ curphase = ahd_inb(ahd, SCSISIGI) & PHASE_MASK;
+ perrdiag = ahd_inb(ahd, PERRDIAG);
+ msg_out = MSG_INITIATOR_DET_ERR;
+ ahd_outb(ahd, CLRSINT1, CLRSCSIPERR);
+
+ /*
+ * Try to find the SCB associated with this error.
+ */
+ silent = FALSE;
+ if (lqistat1 == 0
+ || (lqistat1 & LQICRCI_NLQ) != 0) {
+ if ((lqistat1 & (LQICRCI_NLQ|LQIOVERI_NLQ)) != 0)
+ ahd_set_active_fifo(ahd);
+ scbid = ahd_get_scbptr(ahd);
+ scb = ahd_lookup_scb(ahd, scbid);
+ if (scb != NULL && SCB_IS_SILENT(scb))
+ silent = TRUE;
+ }
+
+ cur_col = 0;
+ if (silent == FALSE) {
+ printk("%s: Transmission error detected\n", ahd_name(ahd));
+ ahd_lqistat1_print(lqistat1, &cur_col, 50);
+ ahd_lastphase_print(lastphase, &cur_col, 50);
+ ahd_scsisigi_print(curphase, &cur_col, 50);
+ ahd_perrdiag_print(perrdiag, &cur_col, 50);
+ printk("\n");
+ ahd_dump_card_state(ahd);
+ }
+
+ if ((lqistat1 & (LQIOVERI_LQ|LQIOVERI_NLQ)) != 0) {
+ if (silent == FALSE) {
+ printk("%s: Gross protocol error during incoming "
+ "packet. lqistat1 == 0x%x. Resetting bus.\n",
+ ahd_name(ahd), lqistat1);
+ }
+ ahd_reset_channel(ahd, 'A', /*Initiate Reset*/TRUE);
+ return;
+ } else if ((lqistat1 & LQICRCI_LQ) != 0) {
+ /*
+ * A CRC error has been detected on an incoming LQ.
+ * The bus is currently hung on the last ACK.
+ * Hit LQIRETRY to release the last ack, and
+ * wait for the sequencer to determine that ATNO
+ * is asserted while in message out to take us
+ * to our host message loop. No NONPACKREQ or
+ * LQIPHASE type errors will occur in this
+ * scenario. After this first LQIRETRY, the LQI
+ * manager will be in ISELO where it will
+ * happily sit until another packet phase begins.
+ * Unexpected bus free detection is enabled
+ * through any phases that occur after we release
+ * this last ack until the LQI manager sees a
+ * packet phase. This implies we may have to
+ * ignore a perfectly valid "unexected busfree"
+ * after our "initiator detected error" message is
+ * sent. A busfree is the expected response after
+ * we tell the target that it's L_Q was corrupted.
+ * (SPI4R09 10.7.3.3.3)
+ */
+ ahd_outb(ahd, LQCTL2, LQIRETRY);
+ printk("LQIRetry for LQICRCI_LQ to release ACK\n");
+ } else if ((lqistat1 & LQICRCI_NLQ) != 0) {
+ /*
+ * We detected a CRC error in a NON-LQ packet.
+ * The hardware has varying behavior in this situation
+ * depending on whether this packet was part of a
+ * stream or not.
+ *
+ * PKT by PKT mode:
+ * The hardware has already acked the complete packet.
+ * If the target honors our outstanding ATN condition,
+ * we should be (or soon will be) in MSGOUT phase.
+ * This will trigger the LQIPHASE_LQ status bit as the
+ * hardware was expecting another LQ. Unexpected
+ * busfree detection is enabled. Once LQIPHASE_LQ is
+ * true (first entry into host message loop is much
+ * the same), we must clear LQIPHASE_LQ and hit
+ * LQIRETRY so the hardware is ready to handle
+ * a future LQ. NONPACKREQ will not be asserted again
+ * once we hit LQIRETRY until another packet is
+ * processed. The target may either go busfree
+ * or start another packet in response to our message.
+ *
+ * Read Streaming P0 asserted:
+ * If we raise ATN and the target completes the entire
+ * stream (P0 asserted during the last packet), the
+ * hardware will ack all data and return to the ISTART
+ * state. When the target reponds to our ATN condition,
+ * LQIPHASE_LQ will be asserted. We should respond to
+ * this with an LQIRETRY to prepare for any future
+ * packets. NONPACKREQ will not be asserted again
+ * once we hit LQIRETRY until another packet is
+ * processed. The target may either go busfree or
+ * start another packet in response to our message.
+ * Busfree detection is enabled.
+ *
+ * Read Streaming P0 not asserted:
+ * If we raise ATN and the target transitions to
+ * MSGOUT in or after a packet where P0 is not
+ * asserted, the hardware will assert LQIPHASE_NLQ.
+ * We should respond to the LQIPHASE_NLQ with an
+ * LQIRETRY. Should the target stay in a non-pkt
+ * phase after we send our message, the hardware
+ * will assert LQIPHASE_LQ. Recovery is then just as
+ * listed above for the read streaming with P0 asserted.
+ * Busfree detection is enabled.
+ */
+ if (silent == FALSE)
+ printk("LQICRC_NLQ\n");
+ if (scb == NULL) {
+ printk("%s: No SCB valid for LQICRC_NLQ. "
+ "Resetting bus\n", ahd_name(ahd));
+ ahd_reset_channel(ahd, 'A', /*Initiate Reset*/TRUE);
+ return;
+ }
+ } else if ((lqistat1 & LQIBADLQI) != 0) {
+ printk("Need to handle BADLQI!\n");
+ ahd_reset_channel(ahd, 'A', /*Initiate Reset*/TRUE);
+ return;
+ } else if ((perrdiag & (PARITYERR|PREVPHASE)) == PARITYERR) {
+ if ((curphase & ~P_DATAIN_DT) != 0) {
+ /* Ack the byte. So we can continue. */
+ if (silent == FALSE)
+ printk("Acking %s to clear perror\n",
+ ahd_lookup_phase_entry(curphase)->phasemsg);
+ ahd_inb(ahd, SCSIDAT);
+ }
+
+ if (curphase == P_MESGIN)
+ msg_out = MSG_PARITY_ERROR;
+ }
+
+ /*
+ * We've set the hardware to assert ATN if we
+ * get a parity error on "in" phases, so all we
+ * need to do is stuff the message buffer with
+ * the appropriate message. "In" phases have set
+ * mesg_out to something other than MSG_NOP.
+ */
+ ahd->send_msg_perror = msg_out;
+ if (scb != NULL && msg_out == MSG_INITIATOR_DET_ERR)
+ scb->flags |= SCB_TRANSMISSION_ERROR;
+ ahd_outb(ahd, MSG_OUT, HOST_MSG);
+ ahd_outb(ahd, CLRINT, CLRSCSIINT);
+ ahd_unpause(ahd);
+}
+
+static void
+ahd_handle_lqiphase_error(struct ahd_softc *ahd, u_int lqistat1)
+{
+ /*
+ * Clear the sources of the interrupts.
+ */
+ ahd_set_modes(ahd, AHD_MODE_SCSI, AHD_MODE_SCSI);
+ ahd_outb(ahd, CLRLQIINT1, lqistat1);
+
+ /*
+ * If the "illegal" phase changes were in response
+ * to our ATN to flag a CRC error, AND we ended up
+ * on packet boundaries, clear the error, restart the
+ * LQI manager as appropriate, and go on our merry
+ * way toward sending the message. Otherwise, reset
+ * the bus to clear the error.
+ */
+ ahd_set_active_fifo(ahd);
+ if ((ahd_inb(ahd, SCSISIGO) & ATNO) != 0
+ && (ahd_inb(ahd, MDFFSTAT) & DLZERO) != 0) {
+ if ((lqistat1 & LQIPHASE_LQ) != 0) {
+ printk("LQIRETRY for LQIPHASE_LQ\n");
+ ahd_outb(ahd, LQCTL2, LQIRETRY);
+ } else if ((lqistat1 & LQIPHASE_NLQ) != 0) {
+ printk("LQIRETRY for LQIPHASE_NLQ\n");
+ ahd_outb(ahd, LQCTL2, LQIRETRY);
+ } else
+ panic("ahd_handle_lqiphase_error: No phase errors\n");
+ ahd_dump_card_state(ahd);
+ ahd_outb(ahd, CLRINT, CLRSCSIINT);
+ ahd_unpause(ahd);
+ } else {
+ printk("Resetting Channel for LQI Phase error\n");
+ ahd_dump_card_state(ahd);
+ ahd_reset_channel(ahd, 'A', /*Initiate Reset*/TRUE);
+ }
+}
+
+/*
+ * Packetized unexpected or expected busfree.
+ * Entered in mode based on busfreetime.
+ */
+static int
+ahd_handle_pkt_busfree(struct ahd_softc *ahd, u_int busfreetime)
+{
+ u_int lqostat1;
+
+ AHD_ASSERT_MODES(ahd, ~(AHD_MODE_UNKNOWN_MSK|AHD_MODE_CFG_MSK),
+ ~(AHD_MODE_UNKNOWN_MSK|AHD_MODE_CFG_MSK));
+ lqostat1 = ahd_inb(ahd, LQOSTAT1);
+ if ((lqostat1 & LQOBUSFREE) != 0) {
+ struct scb *scb;
+ u_int scbid;
+ u_int saved_scbptr;
+ u_int waiting_h;
+ u_int waiting_t;
+ u_int next;
+
+ /*
+ * The LQO manager detected an unexpected busfree
+ * either:
+ *
+ * 1) During an outgoing LQ.
+ * 2) After an outgoing LQ but before the first
+ * REQ of the command packet.
+ * 3) During an outgoing command packet.
+ *
+ * In all cases, CURRSCB is pointing to the
+ * SCB that encountered the failure. Clean
+ * up the queue, clear SELDO and LQOBUSFREE,
+ * and allow the sequencer to restart the select
+ * out at its lesure.
+ */
+ ahd_set_modes(ahd, AHD_MODE_SCSI, AHD_MODE_SCSI);
+ scbid = ahd_inw(ahd, CURRSCB);
+ scb = ahd_lookup_scb(ahd, scbid);
+ if (scb == NULL)
+ panic("SCB not valid during LQOBUSFREE");
+ /*
+ * Clear the status.
+ */
+ ahd_outb(ahd, CLRLQOINT1, CLRLQOBUSFREE);
+ if ((ahd->bugs & AHD_CLRLQO_AUTOCLR_BUG) != 0)
+ ahd_outb(ahd, CLRLQOINT1, 0);
+ ahd_outb(ahd, SCSISEQ0, ahd_inb(ahd, SCSISEQ0) & ~ENSELO);
+ ahd_flush_device_writes(ahd);
+ ahd_outb(ahd, CLRSINT0, CLRSELDO);
+
+ /*
+ * Return the LQO manager to its idle loop. It will
+ * not do this automatically if the busfree occurs
+ * after the first REQ of either the LQ or command
+ * packet or between the LQ and command packet.
+ */
+ ahd_outb(ahd, LQCTL2, ahd_inb(ahd, LQCTL2) | LQOTOIDLE);
+
+ /*
+ * Update the waiting for selection queue so
+ * we restart on the correct SCB.
+ */
+ waiting_h = ahd_inw(ahd, WAITING_TID_HEAD);
+ saved_scbptr = ahd_get_scbptr(ahd);
+ if (waiting_h != scbid) {
+
+ ahd_outw(ahd, WAITING_TID_HEAD, scbid);
+ waiting_t = ahd_inw(ahd, WAITING_TID_TAIL);
+ if (waiting_t == waiting_h) {
+ ahd_outw(ahd, WAITING_TID_TAIL, scbid);
+ next = SCB_LIST_NULL;
+ } else {
+ ahd_set_scbptr(ahd, waiting_h);
+ next = ahd_inw_scbram(ahd, SCB_NEXT2);
+ }
+ ahd_set_scbptr(ahd, scbid);
+ ahd_outw(ahd, SCB_NEXT2, next);
+ }
+ ahd_set_scbptr(ahd, saved_scbptr);
+ if (scb->crc_retry_count < AHD_MAX_LQ_CRC_ERRORS) {
+ if (SCB_IS_SILENT(scb) == FALSE) {
+ ahd_print_path(ahd, scb);
+ printk("Probable outgoing LQ CRC error. "
+ "Retrying command\n");
+ }
+ scb->crc_retry_count++;
+ } else {
+ ahd_set_transaction_status(scb, CAM_UNCOR_PARITY);
+ ahd_freeze_scb(scb);
+ ahd_freeze_devq(ahd, scb);
+ }
+ /* Return unpausing the sequencer. */
+ return (0);
+ } else if ((ahd_inb(ahd, PERRDIAG) & PARITYERR) != 0) {
+ /*
+ * Ignore what are really parity errors that
+ * occur on the last REQ of a free running
+ * clock prior to going busfree. Some drives
+ * do not properly active negate just before
+ * going busfree resulting in a parity glitch.
+ */
+ ahd_outb(ahd, CLRSINT1, CLRSCSIPERR|CLRBUSFREE);
+#ifdef AHD_DEBUG
+ if ((ahd_debug & AHD_SHOW_MASKED_ERRORS) != 0)
+ printk("%s: Parity on last REQ detected "
+ "during busfree phase.\n",
+ ahd_name(ahd));
+#endif
+ /* Return unpausing the sequencer. */
+ return (0);
+ }
+ if (ahd->src_mode != AHD_MODE_SCSI) {
+ u_int scbid;
+ struct scb *scb;
+
+ scbid = ahd_get_scbptr(ahd);
+ scb = ahd_lookup_scb(ahd, scbid);
+ ahd_print_path(ahd, scb);
+ printk("Unexpected PKT busfree condition\n");
+ ahd_dump_card_state(ahd);
+ ahd_abort_scbs(ahd, SCB_GET_TARGET(ahd, scb), 'A',
+ SCB_GET_LUN(scb), SCB_GET_TAG(scb),
+ ROLE_INITIATOR, CAM_UNEXP_BUSFREE);
+
+ /* Return restarting the sequencer. */
+ return (1);
+ }
+ printk("%s: Unexpected PKT busfree condition\n", ahd_name(ahd));
+ ahd_dump_card_state(ahd);
+ /* Restart the sequencer. */
+ return (1);
+}
+
+/*
+ * Non-packetized unexpected or expected busfree.
+ */
+static int
+ahd_handle_nonpkt_busfree(struct ahd_softc *ahd)
+{
+ struct ahd_devinfo devinfo;
+ struct scb *scb;
+ u_int lastphase;
+ u_int saved_scsiid;
+ u_int saved_lun;
+ u_int target;
+ u_int initiator_role_id;
+ u_int scbid;
+ u_int ppr_busfree;
+ int printerror;
+
+ /*
+ * Look at what phase we were last in. If its message out,
+ * chances are pretty good that the busfree was in response
+ * to one of our abort requests.
+ */
+ lastphase = ahd_inb(ahd, LASTPHASE);
+ saved_scsiid = ahd_inb(ahd, SAVED_SCSIID);
+ saved_lun = ahd_inb(ahd, SAVED_LUN);
+ target = SCSIID_TARGET(ahd, saved_scsiid);
+ initiator_role_id = SCSIID_OUR_ID(saved_scsiid);
+ ahd_compile_devinfo(&devinfo, initiator_role_id,
+ target, saved_lun, 'A', ROLE_INITIATOR);
+ printerror = 1;
+
+ scbid = ahd_get_scbptr(ahd);
+ scb = ahd_lookup_scb(ahd, scbid);
+ if (scb != NULL
+ && (ahd_inb(ahd, SEQ_FLAGS) & NOT_IDENTIFIED) != 0)
+ scb = NULL;
+
+ ppr_busfree = (ahd->msg_flags & MSG_FLAG_EXPECT_PPR_BUSFREE) != 0;
+ if (lastphase == P_MESGOUT) {
+ u_int tag;
+
+ tag = SCB_LIST_NULL;
+ if (ahd_sent_msg(ahd, AHDMSG_1B, MSG_ABORT_TAG, TRUE)
+ || ahd_sent_msg(ahd, AHDMSG_1B, MSG_ABORT, TRUE)) {
+ int found;
+ int sent_msg;
+
+ if (scb == NULL) {
+ ahd_print_devinfo(ahd, &devinfo);
+ printk("Abort for unidentified "
+ "connection completed.\n");
+ /* restart the sequencer. */
+ return (1);
+ }
+ sent_msg = ahd->msgout_buf[ahd->msgout_index - 1];
+ ahd_print_path(ahd, scb);
+ printk("SCB %d - Abort%s Completed.\n",
+ SCB_GET_TAG(scb),
+ sent_msg == MSG_ABORT_TAG ? "" : " Tag");
+
+ if (sent_msg == MSG_ABORT_TAG)
+ tag = SCB_GET_TAG(scb);
+
+ if ((scb->flags & SCB_EXTERNAL_RESET) != 0) {
+ /*
+ * This abort is in response to an
+ * unexpected switch to command phase
+ * for a packetized connection. Since
+ * the identify message was never sent,
+ * "saved lun" is 0. We really want to
+ * abort only the SCB that encountered
+ * this error, which could have a different
+ * lun. The SCB will be retried so the OS
+ * will see the UA after renegotiating to
+ * packetized.
+ */
+ tag = SCB_GET_TAG(scb);
+ saved_lun = scb->hscb->lun;
+ }
+ found = ahd_abort_scbs(ahd, target, 'A', saved_lun,
+ tag, ROLE_INITIATOR,
+ CAM_REQ_ABORTED);
+ printk("found == 0x%x\n", found);
+ printerror = 0;
+ } else if (ahd_sent_msg(ahd, AHDMSG_1B,
+ MSG_BUS_DEV_RESET, TRUE)) {
+#ifdef __FreeBSD__
+ /*
+ * Don't mark the user's request for this BDR
+ * as completing with CAM_BDR_SENT. CAM3
+ * specifies CAM_REQ_CMP.
+ */
+ if (scb != NULL
+ && scb->io_ctx->ccb_h.func_code== XPT_RESET_DEV
+ && ahd_match_scb(ahd, scb, target, 'A',
+ CAM_LUN_WILDCARD, SCB_LIST_NULL,
+ ROLE_INITIATOR))
+ ahd_set_transaction_status(scb, CAM_REQ_CMP);
+#endif
+ ahd_handle_devreset(ahd, &devinfo, CAM_LUN_WILDCARD,
+ CAM_BDR_SENT, "Bus Device Reset",
+ /*verbose_level*/0);
+ printerror = 0;
+ } else if (ahd_sent_msg(ahd, AHDMSG_EXT, MSG_EXT_PPR, FALSE)
+ && ppr_busfree == 0) {
+ struct ahd_initiator_tinfo *tinfo;
+ struct ahd_tmode_tstate *tstate;
+
+ /*
+ * PPR Rejected.
+ *
+ * If the previous negotiation was packetized,
+ * this could be because the device has been
+ * reset without our knowledge. Force our
+ * current negotiation to async and retry the
+ * negotiation. Otherwise retry the command
+ * with non-ppr negotiation.
+ */
+#ifdef AHD_DEBUG
+ if ((ahd_debug & AHD_SHOW_MESSAGES) != 0)
+ printk("PPR negotiation rejected busfree.\n");
+#endif
+ tinfo = ahd_fetch_transinfo(ahd, devinfo.channel,
+ devinfo.our_scsiid,
+ devinfo.target, &tstate);
+ if ((tinfo->curr.ppr_options & MSG_EXT_PPR_IU_REQ)!=0) {
+ ahd_set_width(ahd, &devinfo,
+ MSG_EXT_WDTR_BUS_8_BIT,
+ AHD_TRANS_CUR,
+ /*paused*/TRUE);
+ ahd_set_syncrate(ahd, &devinfo,
+ /*period*/0, /*offset*/0,
+ /*ppr_options*/0,
+ AHD_TRANS_CUR,
+ /*paused*/TRUE);
+ /*
+ * The expect PPR busfree handler below
+ * will effect the retry and necessary
+ * abort.
+ */
+ } else {
+ tinfo->curr.transport_version = 2;
+ tinfo->goal.transport_version = 2;
+ tinfo->goal.ppr_options = 0;
+ if (scb != NULL) {
+ /*
+ * Remove any SCBs in the waiting
+ * for selection queue that may
+ * also be for this target so that
+ * command ordering is preserved.
+ */
+ ahd_freeze_devq(ahd, scb);
+ ahd_qinfifo_requeue_tail(ahd, scb);
+ }
+ printerror = 0;
+ }
+ } else if (ahd_sent_msg(ahd, AHDMSG_EXT, MSG_EXT_WDTR, FALSE)
+ && ppr_busfree == 0) {
+ /*
+ * Negotiation Rejected. Go-narrow and
+ * retry command.
+ */
+#ifdef AHD_DEBUG
+ if ((ahd_debug & AHD_SHOW_MESSAGES) != 0)
+ printk("WDTR negotiation rejected busfree.\n");
+#endif
+ ahd_set_width(ahd, &devinfo,
+ MSG_EXT_WDTR_BUS_8_BIT,
+ AHD_TRANS_CUR|AHD_TRANS_GOAL,
+ /*paused*/TRUE);
+ if (scb != NULL) {
+ /*
+ * Remove any SCBs in the waiting for
+ * selection queue that may also be for
+ * this target so that command ordering
+ * is preserved.
+ */
+ ahd_freeze_devq(ahd, scb);
+ ahd_qinfifo_requeue_tail(ahd, scb);
+ }
+ printerror = 0;
+ } else if (ahd_sent_msg(ahd, AHDMSG_EXT, MSG_EXT_SDTR, FALSE)
+ && ppr_busfree == 0) {
+ /*
+ * Negotiation Rejected. Go-async and
+ * retry command.
+ */
+#ifdef AHD_DEBUG
+ if ((ahd_debug & AHD_SHOW_MESSAGES) != 0)
+ printk("SDTR negotiation rejected busfree.\n");
+#endif
+ ahd_set_syncrate(ahd, &devinfo,
+ /*period*/0, /*offset*/0,
+ /*ppr_options*/0,
+ AHD_TRANS_CUR|AHD_TRANS_GOAL,
+ /*paused*/TRUE);
+ if (scb != NULL) {
+ /*
+ * Remove any SCBs in the waiting for
+ * selection queue that may also be for
+ * this target so that command ordering
+ * is preserved.
+ */
+ ahd_freeze_devq(ahd, scb);
+ ahd_qinfifo_requeue_tail(ahd, scb);
+ }
+ printerror = 0;
+ } else if ((ahd->msg_flags & MSG_FLAG_EXPECT_IDE_BUSFREE) != 0
+ && ahd_sent_msg(ahd, AHDMSG_1B,
+ MSG_INITIATOR_DET_ERR, TRUE)) {
+
+#ifdef AHD_DEBUG
+ if ((ahd_debug & AHD_SHOW_MESSAGES) != 0)
+ printk("Expected IDE Busfree\n");
+#endif
+ printerror = 0;
+ } else if ((ahd->msg_flags & MSG_FLAG_EXPECT_QASREJ_BUSFREE)
+ && ahd_sent_msg(ahd, AHDMSG_1B,
+ MSG_MESSAGE_REJECT, TRUE)) {
+
+#ifdef AHD_DEBUG
+ if ((ahd_debug & AHD_SHOW_MESSAGES) != 0)
+ printk("Expected QAS Reject Busfree\n");
+#endif
+ printerror = 0;
+ }
+ }
+
+ /*
+ * The busfree required flag is honored at the end of
+ * the message phases. We check it last in case we
+ * had to send some other message that caused a busfree.
+ */
+ if (scb != NULL && printerror != 0
+ && (lastphase == P_MESGIN || lastphase == P_MESGOUT)
+ && ((ahd->msg_flags & MSG_FLAG_EXPECT_PPR_BUSFREE) != 0)) {
+
+ ahd_freeze_devq(ahd, scb);
+ ahd_set_transaction_status(scb, CAM_REQUEUE_REQ);
+ ahd_freeze_scb(scb);
+ if ((ahd->msg_flags & MSG_FLAG_IU_REQ_CHANGED) != 0) {
+ ahd_abort_scbs(ahd, SCB_GET_TARGET(ahd, scb),
+ SCB_GET_CHANNEL(ahd, scb),
+ SCB_GET_LUN(scb), SCB_LIST_NULL,
+ ROLE_INITIATOR, CAM_REQ_ABORTED);
+ } else {
+#ifdef AHD_DEBUG
+ if ((ahd_debug & AHD_SHOW_MESSAGES) != 0)
+ printk("PPR Negotiation Busfree.\n");
+#endif
+ ahd_done(ahd, scb);
+ }
+ printerror = 0;
+ }
+ if (printerror != 0) {
+ int aborted;
+
+ aborted = 0;
+ if (scb != NULL) {
+ u_int tag;
+
+ if ((scb->hscb->control & TAG_ENB) != 0)
+ tag = SCB_GET_TAG(scb);
+ else
+ tag = SCB_LIST_NULL;
+ ahd_print_path(ahd, scb);
+ aborted = ahd_abort_scbs(ahd, target, 'A',
+ SCB_GET_LUN(scb), tag,
+ ROLE_INITIATOR,
+ CAM_UNEXP_BUSFREE);
+ } else {
+ /*
+ * We had not fully identified this connection,
+ * so we cannot abort anything.
+ */
+ printk("%s: ", ahd_name(ahd));
+ }
+ printk("Unexpected busfree %s, %d SCBs aborted, "
+ "PRGMCNT == 0x%x\n",
+ ahd_lookup_phase_entry(lastphase)->phasemsg,
+ aborted,
+ ahd_inw(ahd, PRGMCNT));
+ ahd_dump_card_state(ahd);
+ if (lastphase != P_BUSFREE)
+ ahd_force_renegotiation(ahd, &devinfo);
+ }
+ /* Always restart the sequencer. */
+ return (1);
+}
+
+static void
+ahd_handle_proto_violation(struct ahd_softc *ahd)
+{
+ struct ahd_devinfo devinfo;
+ struct scb *scb;
+ u_int scbid;
+ u_int seq_flags;
+ u_int curphase;
+ u_int lastphase;
+ int found;
+
+ ahd_fetch_devinfo(ahd, &devinfo);
+ scbid = ahd_get_scbptr(ahd);
+ scb = ahd_lookup_scb(ahd, scbid);
+ seq_flags = ahd_inb(ahd, SEQ_FLAGS);
+ curphase = ahd_inb(ahd, SCSISIGI) & PHASE_MASK;
+ lastphase = ahd_inb(ahd, LASTPHASE);
+ if ((seq_flags & NOT_IDENTIFIED) != 0) {
+
+ /*
+ * The reconnecting target either did not send an
+ * identify message, or did, but we didn't find an SCB
+ * to match.
+ */
+ ahd_print_devinfo(ahd, &devinfo);
+ printk("Target did not send an IDENTIFY message. "
+ "LASTPHASE = 0x%x.\n", lastphase);
+ scb = NULL;
+ } else if (scb == NULL) {
+ /*
+ * We don't seem to have an SCB active for this
+ * transaction. Print an error and reset the bus.
+ */
+ ahd_print_devinfo(ahd, &devinfo);
+ printk("No SCB found during protocol violation\n");
+ goto proto_violation_reset;
+ } else {
+ ahd_set_transaction_status(scb, CAM_SEQUENCE_FAIL);
+ if ((seq_flags & NO_CDB_SENT) != 0) {
+ ahd_print_path(ahd, scb);
+ printk("No or incomplete CDB sent to device.\n");
+ } else if ((ahd_inb_scbram(ahd, SCB_CONTROL)
+ & STATUS_RCVD) == 0) {
+ /*
+ * The target never bothered to provide status to
+ * us prior to completing the command. Since we don't
+ * know the disposition of this command, we must attempt
+ * to abort it. Assert ATN and prepare to send an abort
+ * message.
+ */
+ ahd_print_path(ahd, scb);
+ printk("Completed command without status.\n");
+ } else {
+ ahd_print_path(ahd, scb);
+ printk("Unknown protocol violation.\n");
+ ahd_dump_card_state(ahd);
+ }
+ }
+ if ((lastphase & ~P_DATAIN_DT) == 0
+ || lastphase == P_COMMAND) {
+proto_violation_reset:
+ /*
+ * Target either went directly to data
+ * phase or didn't respond to our ATN.
+ * The only safe thing to do is to blow
+ * it away with a bus reset.
+ */
+ found = ahd_reset_channel(ahd, 'A', TRUE);
+ printk("%s: Issued Channel %c Bus Reset. "
+ "%d SCBs aborted\n", ahd_name(ahd), 'A', found);
+ } else {
+ /*
+ * Leave the selection hardware off in case
+ * this abort attempt will affect yet to
+ * be sent commands.
+ */
+ ahd_outb(ahd, SCSISEQ0,
+ ahd_inb(ahd, SCSISEQ0) & ~ENSELO);
+ ahd_assert_atn(ahd);
+ ahd_outb(ahd, MSG_OUT, HOST_MSG);
+ if (scb == NULL) {
+ ahd_print_devinfo(ahd, &devinfo);
+ ahd->msgout_buf[0] = MSG_ABORT_TASK;
+ ahd->msgout_len = 1;
+ ahd->msgout_index = 0;
+ ahd->msg_type = MSG_TYPE_INITIATOR_MSGOUT;
+ } else {
+ ahd_print_path(ahd, scb);
+ scb->flags |= SCB_ABORT;
+ }
+ printk("Protocol violation %s. Attempting to abort.\n",
+ ahd_lookup_phase_entry(curphase)->phasemsg);
+ }
+}
+
+/*
+ * Force renegotiation to occur the next time we initiate
+ * a command to the current device.
+ */
+static void
+ahd_force_renegotiation(struct ahd_softc *ahd, struct ahd_devinfo *devinfo)
+{
+ struct ahd_initiator_tinfo *targ_info;
+ struct ahd_tmode_tstate *tstate;
+
+#ifdef AHD_DEBUG
+ if ((ahd_debug & AHD_SHOW_MESSAGES) != 0) {
+ ahd_print_devinfo(ahd, devinfo);
+ printk("Forcing renegotiation\n");
+ }
+#endif
+ targ_info = ahd_fetch_transinfo(ahd,
+ devinfo->channel,
+ devinfo->our_scsiid,
+ devinfo->target,
+ &tstate);
+ ahd_update_neg_request(ahd, devinfo, tstate,
+ targ_info, AHD_NEG_IF_NON_ASYNC);
+}
+
+#define AHD_MAX_STEPS 2000
+static void
+ahd_clear_critical_section(struct ahd_softc *ahd)
+{
+ ahd_mode_state saved_modes;
+ int stepping;
+ int steps;
+ int first_instr;
+ u_int simode0;
+ u_int simode1;
+ u_int simode3;
+ u_int lqimode0;
+ u_int lqimode1;
+ u_int lqomode0;
+ u_int lqomode1;
+
+ if (ahd->num_critical_sections == 0)
+ return;
+
+ stepping = FALSE;
+ steps = 0;
+ first_instr = 0;
+ simode0 = 0;
+ simode1 = 0;
+ simode3 = 0;
+ lqimode0 = 0;
+ lqimode1 = 0;
+ lqomode0 = 0;
+ lqomode1 = 0;
+ saved_modes = ahd_save_modes(ahd);
+ for (;;) {
+ struct cs *cs;
+ u_int seqaddr;
+ u_int i;
+
+ ahd_set_modes(ahd, AHD_MODE_SCSI, AHD_MODE_SCSI);
+ seqaddr = ahd_inw(ahd, CURADDR);
+
+ cs = ahd->critical_sections;
+ for (i = 0; i < ahd->num_critical_sections; i++, cs++) {
+
+ if (cs->begin < seqaddr && cs->end >= seqaddr)
+ break;
+ }
+
+ if (i == ahd->num_critical_sections)
+ break;
+
+ if (steps > AHD_MAX_STEPS) {
+ printk("%s: Infinite loop in critical section\n"
+ "%s: First Instruction 0x%x now 0x%x\n",
+ ahd_name(ahd), ahd_name(ahd), first_instr,
+ seqaddr);
+ ahd_dump_card_state(ahd);
+ panic("critical section loop");
+ }
+
+ steps++;
+#ifdef AHD_DEBUG
+ if ((ahd_debug & AHD_SHOW_MISC) != 0)
+ printk("%s: Single stepping at 0x%x\n", ahd_name(ahd),
+ seqaddr);
+#endif
+ if (stepping == FALSE) {
+
+ first_instr = seqaddr;
+ ahd_set_modes(ahd, AHD_MODE_CFG, AHD_MODE_CFG);
+ simode0 = ahd_inb(ahd, SIMODE0);
+ simode3 = ahd_inb(ahd, SIMODE3);
+ lqimode0 = ahd_inb(ahd, LQIMODE0);
+ lqimode1 = ahd_inb(ahd, LQIMODE1);
+ lqomode0 = ahd_inb(ahd, LQOMODE0);
+ lqomode1 = ahd_inb(ahd, LQOMODE1);
+ ahd_outb(ahd, SIMODE0, 0);
+ ahd_outb(ahd, SIMODE3, 0);
+ ahd_outb(ahd, LQIMODE0, 0);
+ ahd_outb(ahd, LQIMODE1, 0);
+ ahd_outb(ahd, LQOMODE0, 0);
+ ahd_outb(ahd, LQOMODE1, 0);
+ ahd_set_modes(ahd, AHD_MODE_SCSI, AHD_MODE_SCSI);
+ simode1 = ahd_inb(ahd, SIMODE1);
+ /*
+ * We don't clear ENBUSFREE. Unfortunately
+ * we cannot re-enable busfree detection within
+ * the current connection, so we must leave it
+ * on while single stepping.
+ */
+ ahd_outb(ahd, SIMODE1, simode1 & ENBUSFREE);
+ ahd_outb(ahd, SEQCTL0, ahd_inb(ahd, SEQCTL0) | STEP);
+ stepping = TRUE;
+ }
+ ahd_outb(ahd, CLRSINT1, CLRBUSFREE);
+ ahd_outb(ahd, CLRINT, CLRSCSIINT);
+ ahd_set_modes(ahd, ahd->saved_src_mode, ahd->saved_dst_mode);
+ ahd_outb(ahd, HCNTRL, ahd->unpause);
+ while (!ahd_is_paused(ahd))
+ ahd_delay(200);
+ ahd_update_modes(ahd);
+ }
+ if (stepping) {
+ ahd_set_modes(ahd, AHD_MODE_CFG, AHD_MODE_CFG);
+ ahd_outb(ahd, SIMODE0, simode0);
+ ahd_outb(ahd, SIMODE3, simode3);
+ ahd_outb(ahd, LQIMODE0, lqimode0);
+ ahd_outb(ahd, LQIMODE1, lqimode1);
+ ahd_outb(ahd, LQOMODE0, lqomode0);
+ ahd_outb(ahd, LQOMODE1, lqomode1);
+ ahd_set_modes(ahd, AHD_MODE_SCSI, AHD_MODE_SCSI);
+ ahd_outb(ahd, SEQCTL0, ahd_inb(ahd, SEQCTL0) & ~STEP);
+ ahd_outb(ahd, SIMODE1, simode1);
+ /*
+ * SCSIINT seems to glitch occasionally when
+ * the interrupt masks are restored. Clear SCSIINT
+ * one more time so that only persistent errors
+ * are seen as a real interrupt.
+ */
+ ahd_outb(ahd, CLRINT, CLRSCSIINT);
+ }
+ ahd_restore_modes(ahd, saved_modes);
+}
+
+/*
+ * Clear any pending interrupt status.
+ */
+static void
+ahd_clear_intstat(struct ahd_softc *ahd)
+{
+ AHD_ASSERT_MODES(ahd, ~(AHD_MODE_UNKNOWN_MSK|AHD_MODE_CFG_MSK),
+ ~(AHD_MODE_UNKNOWN_MSK|AHD_MODE_CFG_MSK));
+ /* Clear any interrupt conditions this may have caused */
+ ahd_outb(ahd, CLRLQIINT0, CLRLQIATNQAS|CLRLQICRCT1|CLRLQICRCT2
+ |CLRLQIBADLQT|CLRLQIATNLQ|CLRLQIATNCMD);
+ ahd_outb(ahd, CLRLQIINT1, CLRLQIPHASE_LQ|CLRLQIPHASE_NLQ|CLRLIQABORT
+ |CLRLQICRCI_LQ|CLRLQICRCI_NLQ|CLRLQIBADLQI
+ |CLRLQIOVERI_LQ|CLRLQIOVERI_NLQ|CLRNONPACKREQ);
+ ahd_outb(ahd, CLRLQOINT0, CLRLQOTARGSCBPERR|CLRLQOSTOPT2|CLRLQOATNLQ
+ |CLRLQOATNPKT|CLRLQOTCRC);
+ ahd_outb(ahd, CLRLQOINT1, CLRLQOINITSCBPERR|CLRLQOSTOPI2|CLRLQOBADQAS
+ |CLRLQOBUSFREE|CLRLQOPHACHGINPKT);
+ if ((ahd->bugs & AHD_CLRLQO_AUTOCLR_BUG) != 0) {
+ ahd_outb(ahd, CLRLQOINT0, 0);
+ ahd_outb(ahd, CLRLQOINT1, 0);
+ }
+ ahd_outb(ahd, CLRSINT3, CLRNTRAMPERR|CLROSRAMPERR);
+ ahd_outb(ahd, CLRSINT1, CLRSELTIMEO|CLRATNO|CLRSCSIRSTI
+ |CLRBUSFREE|CLRSCSIPERR|CLRREQINIT);
+ ahd_outb(ahd, CLRSINT0, CLRSELDO|CLRSELDI|CLRSELINGO
+ |CLRIOERR|CLROVERRUN);
+ ahd_outb(ahd, CLRINT, CLRSCSIINT);
+}
+
+/**************************** Debugging Routines ******************************/
+#ifdef AHD_DEBUG
+uint32_t ahd_debug = AHD_DEBUG_OPTS;
+#endif
+
+#if 0
+void
+ahd_print_scb(struct scb *scb)
+{
+ struct hardware_scb *hscb;
+ int i;
+
+ hscb = scb->hscb;
+ printk("scb:%p control:0x%x scsiid:0x%x lun:%d cdb_len:%d\n",
+ (void *)scb,
+ hscb->control,
+ hscb->scsiid,
+ hscb->lun,
+ hscb->cdb_len);
+ printk("Shared Data: ");
+ for (i = 0; i < sizeof(hscb->shared_data.idata.cdb); i++)
+ printk("%#02x", hscb->shared_data.idata.cdb[i]);
+ printk(" dataptr:%#x%x datacnt:%#x sgptr:%#x tag:%#x\n",
+ (uint32_t)((ahd_le64toh(hscb->dataptr) >> 32) & 0xFFFFFFFF),
+ (uint32_t)(ahd_le64toh(hscb->dataptr) & 0xFFFFFFFF),
+ ahd_le32toh(hscb->datacnt),
+ ahd_le32toh(hscb->sgptr),
+ SCB_GET_TAG(scb));
+ ahd_dump_sglist(scb);
+}
+#endif /* 0 */
+
+/************************* Transfer Negotiation *******************************/
+/*
+ * Allocate per target mode instance (ID we respond to as a target)
+ * transfer negotiation data structures.
+ */
+static struct ahd_tmode_tstate *
+ahd_alloc_tstate(struct ahd_softc *ahd, u_int scsi_id, char channel)
+{
+ struct ahd_tmode_tstate *master_tstate;
+ struct ahd_tmode_tstate *tstate;
+ int i;
+
+ master_tstate = ahd->enabled_targets[ahd->our_id];
+ if (ahd->enabled_targets[scsi_id] != NULL
+ && ahd->enabled_targets[scsi_id] != master_tstate)
+ panic("%s: ahd_alloc_tstate - Target already allocated",
+ ahd_name(ahd));
+ tstate = kmalloc(sizeof(*tstate), GFP_ATOMIC);
+ if (tstate == NULL)
+ return (NULL);
+
+ /*
+ * If we have allocated a master tstate, copy user settings from
+ * the master tstate (taken from SRAM or the EEPROM) for this
+ * channel, but reset our current and goal settings to async/narrow
+ * until an initiator talks to us.
+ */
+ if (master_tstate != NULL) {
+ memcpy(tstate, master_tstate, sizeof(*tstate));
+ memset(tstate->enabled_luns, 0, sizeof(tstate->enabled_luns));
+ for (i = 0; i < 16; i++) {
+ memset(&tstate->transinfo[i].curr, 0,
+ sizeof(tstate->transinfo[i].curr));
+ memset(&tstate->transinfo[i].goal, 0,
+ sizeof(tstate->transinfo[i].goal));
+ }
+ } else
+ memset(tstate, 0, sizeof(*tstate));
+ ahd->enabled_targets[scsi_id] = tstate;
+ return (tstate);
+}
+
+#ifdef AHD_TARGET_MODE
+/*
+ * Free per target mode instance (ID we respond to as a target)
+ * transfer negotiation data structures.
+ */
+static void
+ahd_free_tstate(struct ahd_softc *ahd, u_int scsi_id, char channel, int force)
+{
+ struct ahd_tmode_tstate *tstate;
+
+ /*
+ * Don't clean up our "master" tstate.
+ * It has our default user settings.
+ */
+ if (scsi_id == ahd->our_id
+ && force == FALSE)
+ return;
+
+ tstate = ahd->enabled_targets[scsi_id];
+ if (tstate != NULL)
+ kfree(tstate);
+ ahd->enabled_targets[scsi_id] = NULL;
+}
+#endif
+
+/*
+ * Called when we have an active connection to a target on the bus,
+ * this function finds the nearest period to the input period limited
+ * by the capabilities of the bus connectivity of and sync settings for
+ * the target.
+ */
+static void
+ahd_devlimited_syncrate(struct ahd_softc *ahd,
+ struct ahd_initiator_tinfo *tinfo,
+ u_int *period, u_int *ppr_options, role_t role)
+{
+ struct ahd_transinfo *transinfo;
+ u_int maxsync;
+
+ if ((ahd_inb(ahd, SBLKCTL) & ENAB40) != 0
+ && (ahd_inb(ahd, SSTAT2) & EXP_ACTIVE) == 0) {
+ maxsync = AHD_SYNCRATE_PACED;
+ } else {
+ maxsync = AHD_SYNCRATE_ULTRA;
+ /* Can't do DT related options on an SE bus */
+ *ppr_options &= MSG_EXT_PPR_QAS_REQ;
+ }
+ /*
+ * Never allow a value higher than our current goal
+ * period otherwise we may allow a target initiated
+ * negotiation to go above the limit as set by the
+ * user. In the case of an initiator initiated
+ * sync negotiation, we limit based on the user
+ * setting. This allows the system to still accept
+ * incoming negotiations even if target initiated
+ * negotiation is not performed.
+ */
+ if (role == ROLE_TARGET)
+ transinfo = &tinfo->user;
+ else
+ transinfo = &tinfo->goal;
+ *ppr_options &= (transinfo->ppr_options|MSG_EXT_PPR_PCOMP_EN);
+ if (transinfo->width == MSG_EXT_WDTR_BUS_8_BIT) {
+ maxsync = max(maxsync, (u_int)AHD_SYNCRATE_ULTRA2);
+ *ppr_options &= ~MSG_EXT_PPR_DT_REQ;
+ }
+ if (transinfo->period == 0) {
+ *period = 0;
+ *ppr_options = 0;
+ } else {
+ *period = max(*period, (u_int)transinfo->period);
+ ahd_find_syncrate(ahd, period, ppr_options, maxsync);
+ }
+}
+
+/*
+ * Look up the valid period to SCSIRATE conversion in our table.
+ * Return the period and offset that should be sent to the target
+ * if this was the beginning of an SDTR.
+ */
+void
+ahd_find_syncrate(struct ahd_softc *ahd, u_int *period,
+ u_int *ppr_options, u_int maxsync)
+{
+ if (*period < maxsync)
+ *period = maxsync;
+
+ if ((*ppr_options & MSG_EXT_PPR_DT_REQ) != 0
+ && *period > AHD_SYNCRATE_MIN_DT)
+ *ppr_options &= ~MSG_EXT_PPR_DT_REQ;
+
+ if (*period > AHD_SYNCRATE_MIN)
+ *period = 0;
+
+ /* Honor PPR option conformance rules. */
+ if (*period > AHD_SYNCRATE_PACED)
+ *ppr_options &= ~MSG_EXT_PPR_RTI;
+
+ if ((*ppr_options & MSG_EXT_PPR_IU_REQ) == 0)
+ *ppr_options &= (MSG_EXT_PPR_DT_REQ|MSG_EXT_PPR_QAS_REQ);
+
+ if ((*ppr_options & MSG_EXT_PPR_DT_REQ) == 0)
+ *ppr_options &= MSG_EXT_PPR_QAS_REQ;
+
+ /* Skip all PACED only entries if IU is not available */
+ if ((*ppr_options & MSG_EXT_PPR_IU_REQ) == 0
+ && *period < AHD_SYNCRATE_DT)
+ *period = AHD_SYNCRATE_DT;
+
+ /* Skip all DT only entries if DT is not available */
+ if ((*ppr_options & MSG_EXT_PPR_DT_REQ) == 0
+ && *period < AHD_SYNCRATE_ULTRA2)
+ *period = AHD_SYNCRATE_ULTRA2;
+}
+
+/*
+ * Truncate the given synchronous offset to a value the
+ * current adapter type and syncrate are capable of.
+ */
+static void
+ahd_validate_offset(struct ahd_softc *ahd,
+ struct ahd_initiator_tinfo *tinfo,
+ u_int period, u_int *offset, int wide,
+ role_t role)
+{
+ u_int maxoffset;
+
+ /* Limit offset to what we can do */
+ if (period == 0)
+ maxoffset = 0;
+ else if (period <= AHD_SYNCRATE_PACED) {
+ if ((ahd->bugs & AHD_PACED_NEGTABLE_BUG) != 0)
+ maxoffset = MAX_OFFSET_PACED_BUG;
+ else
+ maxoffset = MAX_OFFSET_PACED;
+ } else
+ maxoffset = MAX_OFFSET_NON_PACED;
+ *offset = min(*offset, maxoffset);
+ if (tinfo != NULL) {
+ if (role == ROLE_TARGET)
+ *offset = min(*offset, (u_int)tinfo->user.offset);
+ else
+ *offset = min(*offset, (u_int)tinfo->goal.offset);
+ }
+}
+
+/*
+ * Truncate the given transfer width parameter to a value the
+ * current adapter type is capable of.
+ */
+static void
+ahd_validate_width(struct ahd_softc *ahd, struct ahd_initiator_tinfo *tinfo,
+ u_int *bus_width, role_t role)
+{
+ switch (*bus_width) {
+ default:
+ if (ahd->features & AHD_WIDE) {
+ /* Respond Wide */
+ *bus_width = MSG_EXT_WDTR_BUS_16_BIT;
+ break;
+ }
+ /* FALLTHROUGH */
+ case MSG_EXT_WDTR_BUS_8_BIT:
+ *bus_width = MSG_EXT_WDTR_BUS_8_BIT;
+ break;
+ }
+ if (tinfo != NULL) {
+ if (role == ROLE_TARGET)
+ *bus_width = min((u_int)tinfo->user.width, *bus_width);
+ else
+ *bus_width = min((u_int)tinfo->goal.width, *bus_width);
+ }
+}
+
+/*
+ * Update the bitmask of targets for which the controller should
+ * negotiate with at the next convenient opportunity. This currently
+ * means the next time we send the initial identify messages for
+ * a new transaction.
+ */
+int
+ahd_update_neg_request(struct ahd_softc *ahd, struct ahd_devinfo *devinfo,
+ struct ahd_tmode_tstate *tstate,
+ struct ahd_initiator_tinfo *tinfo, ahd_neg_type neg_type)
+{
+ u_int auto_negotiate_orig;
+
+ auto_negotiate_orig = tstate->auto_negotiate;
+ if (neg_type == AHD_NEG_ALWAYS) {
+ /*
+ * Force our "current" settings to be
+ * unknown so that unless a bus reset
+ * occurs the need to renegotiate is
+ * recorded persistently.
+ */
+ if ((ahd->features & AHD_WIDE) != 0)
+ tinfo->curr.width = AHD_WIDTH_UNKNOWN;
+ tinfo->curr.period = AHD_PERIOD_UNKNOWN;
+ tinfo->curr.offset = AHD_OFFSET_UNKNOWN;
+ }
+ if (tinfo->curr.period != tinfo->goal.period
+ || tinfo->curr.width != tinfo->goal.width
+ || tinfo->curr.offset != tinfo->goal.offset
+ || tinfo->curr.ppr_options != tinfo->goal.ppr_options
+ || (neg_type == AHD_NEG_IF_NON_ASYNC
+ && (tinfo->goal.offset != 0
+ || tinfo->goal.width != MSG_EXT_WDTR_BUS_8_BIT
+ || tinfo->goal.ppr_options != 0)))
+ tstate->auto_negotiate |= devinfo->target_mask;
+ else
+ tstate->auto_negotiate &= ~devinfo->target_mask;
+
+ return (auto_negotiate_orig != tstate->auto_negotiate);
+}
+
+/*
+ * Update the user/goal/curr tables of synchronous negotiation
+ * parameters as well as, in the case of a current or active update,
+ * any data structures on the host controller. In the case of an
+ * active update, the specified target is currently talking to us on
+ * the bus, so the transfer parameter update must take effect
+ * immediately.
+ */
+void
+ahd_set_syncrate(struct ahd_softc *ahd, struct ahd_devinfo *devinfo,
+ u_int period, u_int offset, u_int ppr_options,
+ u_int type, int paused)
+{
+ struct ahd_initiator_tinfo *tinfo;
+ struct ahd_tmode_tstate *tstate;
+ u_int old_period;
+ u_int old_offset;
+ u_int old_ppr;
+ int active;
+ int update_needed;
+
+ active = (type & AHD_TRANS_ACTIVE) == AHD_TRANS_ACTIVE;
+ update_needed = 0;
+
+ if (period == 0 || offset == 0) {
+ period = 0;
+ offset = 0;
+ }
+
+ tinfo = ahd_fetch_transinfo(ahd, devinfo->channel, devinfo->our_scsiid,
+ devinfo->target, &tstate);
+
+ if ((type & AHD_TRANS_USER) != 0) {
+ tinfo->user.period = period;
+ tinfo->user.offset = offset;
+ tinfo->user.ppr_options = ppr_options;
+ }
+
+ if ((type & AHD_TRANS_GOAL) != 0) {
+ tinfo->goal.period = period;
+ tinfo->goal.offset = offset;
+ tinfo->goal.ppr_options = ppr_options;
+ }
+
+ old_period = tinfo->curr.period;
+ old_offset = tinfo->curr.offset;
+ old_ppr = tinfo->curr.ppr_options;
+
+ if ((type & AHD_TRANS_CUR) != 0
+ && (old_period != period
+ || old_offset != offset
+ || old_ppr != ppr_options)) {
+
+ update_needed++;
+
+ tinfo->curr.period = period;
+ tinfo->curr.offset = offset;
+ tinfo->curr.ppr_options = ppr_options;
+
+ ahd_send_async(ahd, devinfo->channel, devinfo->target,
+ CAM_LUN_WILDCARD, AC_TRANSFER_NEG);
+ if (bootverbose) {
+ if (offset != 0) {
+ int options;
+
+ printk("%s: target %d synchronous with "
+ "period = 0x%x, offset = 0x%x",
+ ahd_name(ahd), devinfo->target,
+ period, offset);
+ options = 0;
+ if ((ppr_options & MSG_EXT_PPR_RD_STRM) != 0) {
+ printk("(RDSTRM");
+ options++;
+ }
+ if ((ppr_options & MSG_EXT_PPR_DT_REQ) != 0) {
+ printk("%s", options ? "|DT" : "(DT");
+ options++;
+ }
+ if ((ppr_options & MSG_EXT_PPR_IU_REQ) != 0) {
+ printk("%s", options ? "|IU" : "(IU");
+ options++;
+ }
+ if ((ppr_options & MSG_EXT_PPR_RTI) != 0) {
+ printk("%s", options ? "|RTI" : "(RTI");
+ options++;
+ }
+ if ((ppr_options & MSG_EXT_PPR_QAS_REQ) != 0) {
+ printk("%s", options ? "|QAS" : "(QAS");
+ options++;
+ }
+ if (options != 0)
+ printk(")\n");
+ else
+ printk("\n");
+ } else {
+ printk("%s: target %d using "
+ "asynchronous transfers%s\n",
+ ahd_name(ahd), devinfo->target,
+ (ppr_options & MSG_EXT_PPR_QAS_REQ) != 0
+ ? "(QAS)" : "");
+ }
+ }
+ }
+ /*
+ * Always refresh the neg-table to handle the case of the
+ * sequencer setting the ENATNO bit for a MK_MESSAGE request.
+ * We will always renegotiate in that case if this is a
+ * packetized request. Also manage the busfree expected flag
+ * from this common routine so that we catch changes due to
+ * WDTR or SDTR messages.
+ */
+ if ((type & AHD_TRANS_CUR) != 0) {
+ if (!paused)
+ ahd_pause(ahd);
+ ahd_update_neg_table(ahd, devinfo, &tinfo->curr);
+ if (!paused)
+ ahd_unpause(ahd);
+ if (ahd->msg_type != MSG_TYPE_NONE) {
+ if ((old_ppr & MSG_EXT_PPR_IU_REQ)
+ != (ppr_options & MSG_EXT_PPR_IU_REQ)) {
+#ifdef AHD_DEBUG
+ if ((ahd_debug & AHD_SHOW_MESSAGES) != 0) {
+ ahd_print_devinfo(ahd, devinfo);
+ printk("Expecting IU Change busfree\n");
+ }
+#endif
+ ahd->msg_flags |= MSG_FLAG_EXPECT_PPR_BUSFREE
+ | MSG_FLAG_IU_REQ_CHANGED;
+ }
+ if ((old_ppr & MSG_EXT_PPR_IU_REQ) != 0) {
+#ifdef AHD_DEBUG
+ if ((ahd_debug & AHD_SHOW_MESSAGES) != 0)
+ printk("PPR with IU_REQ outstanding\n");
+#endif
+ ahd->msg_flags |= MSG_FLAG_EXPECT_PPR_BUSFREE;
+ }
+ }
+ }
+
+ update_needed += ahd_update_neg_request(ahd, devinfo, tstate,
+ tinfo, AHD_NEG_TO_GOAL);
+
+ if (update_needed && active)
+ ahd_update_pending_scbs(ahd);
+}
+
+/*
+ * Update the user/goal/curr tables of wide negotiation
+ * parameters as well as, in the case of a current or active update,
+ * any data structures on the host controller. In the case of an
+ * active update, the specified target is currently talking to us on
+ * the bus, so the transfer parameter update must take effect
+ * immediately.
+ */
+void
+ahd_set_width(struct ahd_softc *ahd, struct ahd_devinfo *devinfo,
+ u_int width, u_int type, int paused)
+{
+ struct ahd_initiator_tinfo *tinfo;
+ struct ahd_tmode_tstate *tstate;
+ u_int oldwidth;
+ int active;
+ int update_needed;
+
+ active = (type & AHD_TRANS_ACTIVE) == AHD_TRANS_ACTIVE;
+ update_needed = 0;
+ tinfo = ahd_fetch_transinfo(ahd, devinfo->channel, devinfo->our_scsiid,
+ devinfo->target, &tstate);
+
+ if ((type & AHD_TRANS_USER) != 0)
+ tinfo->user.width = width;
+
+ if ((type & AHD_TRANS_GOAL) != 0)
+ tinfo->goal.width = width;
+
+ oldwidth = tinfo->curr.width;
+ if ((type & AHD_TRANS_CUR) != 0 && oldwidth != width) {
+
+ update_needed++;
+
+ tinfo->curr.width = width;
+ ahd_send_async(ahd, devinfo->channel, devinfo->target,
+ CAM_LUN_WILDCARD, AC_TRANSFER_NEG);
+ if (bootverbose) {
+ printk("%s: target %d using %dbit transfers\n",
+ ahd_name(ahd), devinfo->target,
+ 8 * (0x01 << width));
+ }
+ }
+
+ if ((type & AHD_TRANS_CUR) != 0) {
+ if (!paused)
+ ahd_pause(ahd);
+ ahd_update_neg_table(ahd, devinfo, &tinfo->curr);
+ if (!paused)
+ ahd_unpause(ahd);
+ }
+
+ update_needed += ahd_update_neg_request(ahd, devinfo, tstate,
+ tinfo, AHD_NEG_TO_GOAL);
+ if (update_needed && active)
+ ahd_update_pending_scbs(ahd);
+
+}
+
+/*
+ * Update the current state of tagged queuing for a given target.
+ */
+static void
+ahd_set_tags(struct ahd_softc *ahd, struct scsi_cmnd *cmd,
+ struct ahd_devinfo *devinfo, ahd_queue_alg alg)
+{
+ struct scsi_device *sdev = cmd->device;
+
+ ahd_platform_set_tags(ahd, sdev, devinfo, alg);
+ ahd_send_async(ahd, devinfo->channel, devinfo->target,
+ devinfo->lun, AC_TRANSFER_NEG);
+}
+
+static void
+ahd_update_neg_table(struct ahd_softc *ahd, struct ahd_devinfo *devinfo,
+ struct ahd_transinfo *tinfo)
+{
+ ahd_mode_state saved_modes;
+ u_int period;
+ u_int ppr_opts;
+ u_int con_opts;
+ u_int offset;
+ u_int saved_negoaddr;
+ uint8_t iocell_opts[sizeof(ahd->iocell_opts)];
+
+ saved_modes = ahd_save_modes(ahd);
+ ahd_set_modes(ahd, AHD_MODE_SCSI, AHD_MODE_SCSI);
+
+ saved_negoaddr = ahd_inb(ahd, NEGOADDR);
+ ahd_outb(ahd, NEGOADDR, devinfo->target);
+ period = tinfo->period;
+ offset = tinfo->offset;
+ memcpy(iocell_opts, ahd->iocell_opts, sizeof(ahd->iocell_opts));
+ ppr_opts = tinfo->ppr_options & (MSG_EXT_PPR_QAS_REQ|MSG_EXT_PPR_DT_REQ
+ |MSG_EXT_PPR_IU_REQ|MSG_EXT_PPR_RTI);
+ con_opts = 0;
+ if (period == 0)
+ period = AHD_SYNCRATE_ASYNC;
+ if (period == AHD_SYNCRATE_160) {
+
+ if ((ahd->bugs & AHD_PACED_NEGTABLE_BUG) != 0) {
+ /*
+ * When the SPI4 spec was finalized, PACE transfers
+ * was not made a configurable option in the PPR
+ * message. Instead it is assumed to be enabled for
+ * any syncrate faster than 80MHz. Nevertheless,
+ * Harpoon2A4 allows this to be configurable.
+ *
+ * Harpoon2A4 also assumes at most 2 data bytes per
+ * negotiated REQ/ACK offset. Paced transfers take
+ * 4, so we must adjust our offset.
+ */
+ ppr_opts |= PPROPT_PACE;
+ offset *= 2;
+
+ /*
+ * Harpoon2A assumed that there would be a
+ * fallback rate between 160MHz and 80MHz,
+ * so 7 is used as the period factor rather
+ * than 8 for 160MHz.
+ */
+ period = AHD_SYNCRATE_REVA_160;
+ }
+ if ((tinfo->ppr_options & MSG_EXT_PPR_PCOMP_EN) == 0)
+ iocell_opts[AHD_PRECOMP_SLEW_INDEX] &=
+ ~AHD_PRECOMP_MASK;
+ } else {
+ /*
+ * Precomp should be disabled for non-paced transfers.
+ */
+ iocell_opts[AHD_PRECOMP_SLEW_INDEX] &= ~AHD_PRECOMP_MASK;
+
+ if ((ahd->features & AHD_NEW_IOCELL_OPTS) != 0
+ && (ppr_opts & MSG_EXT_PPR_DT_REQ) != 0
+ && (ppr_opts & MSG_EXT_PPR_IU_REQ) == 0) {
+ /*
+ * Slow down our CRC interval to be
+ * compatible with non-packetized
+ * U160 devices that can't handle a
+ * CRC at full speed.
+ */
+ con_opts |= ENSLOWCRC;
+ }
+
+ if ((ahd->bugs & AHD_PACED_NEGTABLE_BUG) != 0) {
+ /*
+ * On H2A4, revert to a slower slewrate
+ * on non-paced transfers.
+ */
+ iocell_opts[AHD_PRECOMP_SLEW_INDEX] &=
+ ~AHD_SLEWRATE_MASK;
+ }
+ }
+
+ ahd_outb(ahd, ANNEXCOL, AHD_ANNEXCOL_PRECOMP_SLEW);
+ ahd_outb(ahd, ANNEXDAT, iocell_opts[AHD_PRECOMP_SLEW_INDEX]);
+ ahd_outb(ahd, ANNEXCOL, AHD_ANNEXCOL_AMPLITUDE);
+ ahd_outb(ahd, ANNEXDAT, iocell_opts[AHD_AMPLITUDE_INDEX]);
+
+ ahd_outb(ahd, NEGPERIOD, period);
+ ahd_outb(ahd, NEGPPROPTS, ppr_opts);
+ ahd_outb(ahd, NEGOFFSET, offset);
+
+ if (tinfo->width == MSG_EXT_WDTR_BUS_16_BIT)
+ con_opts |= WIDEXFER;
+
+ /*
+ * Slow down our CRC interval to be
+ * compatible with packetized U320 devices
+ * that can't handle a CRC at full speed
+ */
+ if (ahd->features & AHD_AIC79XXB_SLOWCRC) {
+ con_opts |= ENSLOWCRC;
+ }
+
+ /*
+ * During packetized transfers, the target will
+ * give us the opportunity to send command packets
+ * without us asserting attention.
+ */
+ if ((tinfo->ppr_options & MSG_EXT_PPR_IU_REQ) == 0)
+ con_opts |= ENAUTOATNO;
+ ahd_outb(ahd, NEGCONOPTS, con_opts);
+ ahd_outb(ahd, NEGOADDR, saved_negoaddr);
+ ahd_restore_modes(ahd, saved_modes);
+}
+
+/*
+ * When the transfer settings for a connection change, setup for
+ * negotiation in pending SCBs to effect the change as quickly as
+ * possible. We also cancel any negotiations that are scheduled
+ * for inflight SCBs that have not been started yet.
+ */
+static void
+ahd_update_pending_scbs(struct ahd_softc *ahd)
+{
+ struct scb *pending_scb;
+ int pending_scb_count;
+ int paused;
+ u_int saved_scbptr;
+ ahd_mode_state saved_modes;
+
+ /*
+ * Traverse the pending SCB list and ensure that all of the
+ * SCBs there have the proper settings. We can only safely
+ * clear the negotiation required flag (setting requires the
+ * execution queue to be modified) and this is only possible
+ * if we are not already attempting to select out for this
+ * SCB. For this reason, all callers only call this routine
+ * if we are changing the negotiation settings for the currently
+ * active transaction on the bus.
+ */
+ pending_scb_count = 0;
+ LIST_FOREACH(pending_scb, &ahd->pending_scbs, pending_links) {
+ struct ahd_devinfo devinfo;
+ struct ahd_initiator_tinfo *tinfo;
+ struct ahd_tmode_tstate *tstate;
+
+ ahd_scb_devinfo(ahd, &devinfo, pending_scb);
+ tinfo = ahd_fetch_transinfo(ahd, devinfo.channel,
+ devinfo.our_scsiid,
+ devinfo.target, &tstate);
+ if ((tstate->auto_negotiate & devinfo.target_mask) == 0
+ && (pending_scb->flags & SCB_AUTO_NEGOTIATE) != 0) {
+ pending_scb->flags &= ~SCB_AUTO_NEGOTIATE;
+ pending_scb->hscb->control &= ~MK_MESSAGE;
+ }
+ ahd_sync_scb(ahd, pending_scb,
+ BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
+ pending_scb_count++;
+ }
+
+ if (pending_scb_count == 0)
+ return;
+
+ if (ahd_is_paused(ahd)) {
+ paused = 1;
+ } else {
+ paused = 0;
+ ahd_pause(ahd);
+ }
+
+ /*
+ * Force the sequencer to reinitialize the selection for
+ * the command at the head of the execution queue if it
+ * has already been setup. The negotiation changes may
+ * effect whether we select-out with ATN. It is only
+ * safe to clear ENSELO when the bus is not free and no
+ * selection is in progres or completed.
+ */
+ saved_modes = ahd_save_modes(ahd);
+ ahd_set_modes(ahd, AHD_MODE_SCSI, AHD_MODE_SCSI);
+ if ((ahd_inb(ahd, SCSISIGI) & BSYI) != 0
+ && (ahd_inb(ahd, SSTAT0) & (SELDO|SELINGO)) == 0)
+ ahd_outb(ahd, SCSISEQ0, ahd_inb(ahd, SCSISEQ0) & ~ENSELO);
+ saved_scbptr = ahd_get_scbptr(ahd);
+ /* Ensure that the hscbs down on the card match the new information */
+ LIST_FOREACH(pending_scb, &ahd->pending_scbs, pending_links) {
+ u_int scb_tag;
+ u_int control;
+
+ scb_tag = SCB_GET_TAG(pending_scb);
+ ahd_set_scbptr(ahd, scb_tag);
+ control = ahd_inb_scbram(ahd, SCB_CONTROL);
+ control &= ~MK_MESSAGE;
+ control |= pending_scb->hscb->control & MK_MESSAGE;
+ ahd_outb(ahd, SCB_CONTROL, control);
+ }
+ ahd_set_scbptr(ahd, saved_scbptr);
+ ahd_restore_modes(ahd, saved_modes);
+
+ if (paused == 0)
+ ahd_unpause(ahd);
+}
+
+/**************************** Pathing Information *****************************/
+static void
+ahd_fetch_devinfo(struct ahd_softc *ahd, struct ahd_devinfo *devinfo)
+{
+ ahd_mode_state saved_modes;
+ u_int saved_scsiid;
+ role_t role;
+ int our_id;
+
+ saved_modes = ahd_save_modes(ahd);
+ ahd_set_modes(ahd, AHD_MODE_SCSI, AHD_MODE_SCSI);
+
+ if (ahd_inb(ahd, SSTAT0) & TARGET)
+ role = ROLE_TARGET;
+ else
+ role = ROLE_INITIATOR;
+
+ if (role == ROLE_TARGET
+ && (ahd_inb(ahd, SEQ_FLAGS) & CMDPHASE_PENDING) != 0) {
+ /* We were selected, so pull our id from TARGIDIN */
+ our_id = ahd_inb(ahd, TARGIDIN) & OID;
+ } else if (role == ROLE_TARGET)
+ our_id = ahd_inb(ahd, TOWNID);
+ else
+ our_id = ahd_inb(ahd, IOWNID);
+
+ saved_scsiid = ahd_inb(ahd, SAVED_SCSIID);
+ ahd_compile_devinfo(devinfo,
+ our_id,
+ SCSIID_TARGET(ahd, saved_scsiid),
+ ahd_inb(ahd, SAVED_LUN),
+ SCSIID_CHANNEL(ahd, saved_scsiid),
+ role);
+ ahd_restore_modes(ahd, saved_modes);
+}
+
+void
+ahd_print_devinfo(struct ahd_softc *ahd, struct ahd_devinfo *devinfo)
+{
+ printk("%s:%c:%d:%d: ", ahd_name(ahd), 'A',
+ devinfo->target, devinfo->lun);
+}
+
+static const struct ahd_phase_table_entry*
+ahd_lookup_phase_entry(int phase)
+{
+ const struct ahd_phase_table_entry *entry;
+ const struct ahd_phase_table_entry *last_entry;
+
+ /*
+ * num_phases doesn't include the default entry which
+ * will be returned if the phase doesn't match.
+ */
+ last_entry = &ahd_phase_table[num_phases];
+ for (entry = ahd_phase_table; entry < last_entry; entry++) {
+ if (phase == entry->phase)
+ break;
+ }
+ return (entry);
+}
+
+void
+ahd_compile_devinfo(struct ahd_devinfo *devinfo, u_int our_id, u_int target,
+ u_int lun, char channel, role_t role)
+{
+ devinfo->our_scsiid = our_id;
+ devinfo->target = target;
+ devinfo->lun = lun;
+ devinfo->target_offset = target;
+ devinfo->channel = channel;
+ devinfo->role = role;
+ if (channel == 'B')
+ devinfo->target_offset += 8;
+ devinfo->target_mask = (0x01 << devinfo->target_offset);
+}
+
+static void
+ahd_scb_devinfo(struct ahd_softc *ahd, struct ahd_devinfo *devinfo,
+ struct scb *scb)
+{
+ role_t role;
+ int our_id;
+
+ our_id = SCSIID_OUR_ID(scb->hscb->scsiid);
+ role = ROLE_INITIATOR;
+ if ((scb->hscb->control & TARGET_SCB) != 0)
+ role = ROLE_TARGET;
+ ahd_compile_devinfo(devinfo, our_id, SCB_GET_TARGET(ahd, scb),
+ SCB_GET_LUN(scb), SCB_GET_CHANNEL(ahd, scb), role);
+}
+
+
+/************************ Message Phase Processing ****************************/
+/*
+ * When an initiator transaction with the MK_MESSAGE flag either reconnects
+ * or enters the initial message out phase, we are interrupted. Fill our
+ * outgoing message buffer with the appropriate message and beging handing
+ * the message phase(s) manually.
+ */
+static void
+ahd_setup_initiator_msgout(struct ahd_softc *ahd, struct ahd_devinfo *devinfo,
+ struct scb *scb)
+{
+ /*
+ * To facilitate adding multiple messages together,
+ * each routine should increment the index and len
+ * variables instead of setting them explicitly.
+ */
+ ahd->msgout_index = 0;
+ ahd->msgout_len = 0;
+
+ if (ahd_currently_packetized(ahd))
+ ahd->msg_flags |= MSG_FLAG_PACKETIZED;
+
+ if (ahd->send_msg_perror
+ && ahd_inb(ahd, MSG_OUT) == HOST_MSG) {
+ ahd->msgout_buf[ahd->msgout_index++] = ahd->send_msg_perror;
+ ahd->msgout_len++;
+ ahd->msg_type = MSG_TYPE_INITIATOR_MSGOUT;
+#ifdef AHD_DEBUG
+ if ((ahd_debug & AHD_SHOW_MESSAGES) != 0)
+ printk("Setting up for Parity Error delivery\n");
+#endif
+ return;
+ } else if (scb == NULL) {
+ printk("%s: WARNING. No pending message for "
+ "I_T msgin. Issuing NO-OP\n", ahd_name(ahd));
+ ahd->msgout_buf[ahd->msgout_index++] = MSG_NOOP;
+ ahd->msgout_len++;
+ ahd->msg_type = MSG_TYPE_INITIATOR_MSGOUT;
+ return;
+ }
+
+ if ((scb->flags & SCB_DEVICE_RESET) == 0
+ && (scb->flags & SCB_PACKETIZED) == 0
+ && ahd_inb(ahd, MSG_OUT) == MSG_IDENTIFYFLAG) {
+ u_int identify_msg;
+
+ identify_msg = MSG_IDENTIFYFLAG | SCB_GET_LUN(scb);
+ if ((scb->hscb->control & DISCENB) != 0)
+ identify_msg |= MSG_IDENTIFY_DISCFLAG;
+ ahd->msgout_buf[ahd->msgout_index++] = identify_msg;
+ ahd->msgout_len++;
+
+ if ((scb->hscb->control & TAG_ENB) != 0) {
+ ahd->msgout_buf[ahd->msgout_index++] =
+ scb->hscb->control & (TAG_ENB|SCB_TAG_TYPE);
+ ahd->msgout_buf[ahd->msgout_index++] = SCB_GET_TAG(scb);
+ ahd->msgout_len += 2;
+ }
+ }
+
+ if (scb->flags & SCB_DEVICE_RESET) {
+ ahd->msgout_buf[ahd->msgout_index++] = MSG_BUS_DEV_RESET;
+ ahd->msgout_len++;
+ ahd_print_path(ahd, scb);
+ printk("Bus Device Reset Message Sent\n");
+ /*
+ * Clear our selection hardware in advance of
+ * the busfree. We may have an entry in the waiting
+ * Q for this target, and we don't want to go about
+ * selecting while we handle the busfree and blow it
+ * away.
+ */
+ ahd_outb(ahd, SCSISEQ0, 0);
+ } else if ((scb->flags & SCB_ABORT) != 0) {
+
+ if ((scb->hscb->control & TAG_ENB) != 0) {
+ ahd->msgout_buf[ahd->msgout_index++] = MSG_ABORT_TAG;
+ } else {
+ ahd->msgout_buf[ahd->msgout_index++] = MSG_ABORT;
+ }
+ ahd->msgout_len++;
+ ahd_print_path(ahd, scb);
+ printk("Abort%s Message Sent\n",
+ (scb->hscb->control & TAG_ENB) != 0 ? " Tag" : "");
+ /*
+ * Clear our selection hardware in advance of
+ * the busfree. We may have an entry in the waiting
+ * Q for this target, and we don't want to go about
+ * selecting while we handle the busfree and blow it
+ * away.
+ */
+ ahd_outb(ahd, SCSISEQ0, 0);
+ } else if ((scb->flags & (SCB_AUTO_NEGOTIATE|SCB_NEGOTIATE)) != 0) {
+ ahd_build_transfer_msg(ahd, devinfo);
+ /*
+ * Clear our selection hardware in advance of potential
+ * PPR IU status change busfree. We may have an entry in
+ * the waiting Q for this target, and we don't want to go
+ * about selecting while we handle the busfree and blow
+ * it away.
+ */
+ ahd_outb(ahd, SCSISEQ0, 0);
+ } else {
+ printk("ahd_intr: AWAITING_MSG for an SCB that "
+ "does not have a waiting message\n");
+ printk("SCSIID = %x, target_mask = %x\n", scb->hscb->scsiid,
+ devinfo->target_mask);
+ panic("SCB = %d, SCB Control = %x:%x, MSG_OUT = %x "
+ "SCB flags = %x", SCB_GET_TAG(scb), scb->hscb->control,
+ ahd_inb_scbram(ahd, SCB_CONTROL), ahd_inb(ahd, MSG_OUT),
+ scb->flags);
+ }
+
+ /*
+ * Clear the MK_MESSAGE flag from the SCB so we aren't
+ * asked to send this message again.
+ */
+ ahd_outb(ahd, SCB_CONTROL,
+ ahd_inb_scbram(ahd, SCB_CONTROL) & ~MK_MESSAGE);
+ scb->hscb->control &= ~MK_MESSAGE;
+ ahd->msgout_index = 0;
+ ahd->msg_type = MSG_TYPE_INITIATOR_MSGOUT;
+}
+
+/*
+ * Build an appropriate transfer negotiation message for the
+ * currently active target.
+ */
+static void
+ahd_build_transfer_msg(struct ahd_softc *ahd, struct ahd_devinfo *devinfo)
+{
+ /*
+ * We need to initiate transfer negotiations.
+ * If our current and goal settings are identical,
+ * we want to renegotiate due to a check condition.
+ */
+ struct ahd_initiator_tinfo *tinfo;
+ struct ahd_tmode_tstate *tstate;
+ int dowide;
+ int dosync;
+ int doppr;
+ u_int period;
+ u_int ppr_options;
+ u_int offset;
+
+ tinfo = ahd_fetch_transinfo(ahd, devinfo->channel, devinfo->our_scsiid,
+ devinfo->target, &tstate);
+ /*
+ * Filter our period based on the current connection.
+ * If we can't perform DT transfers on this segment (not in LVD
+ * mode for instance), then our decision to issue a PPR message
+ * may change.
+ */
+ period = tinfo->goal.period;
+ offset = tinfo->goal.offset;
+ ppr_options = tinfo->goal.ppr_options;
+ /* Target initiated PPR is not allowed in the SCSI spec */
+ if (devinfo->role == ROLE_TARGET)
+ ppr_options = 0;
+ ahd_devlimited_syncrate(ahd, tinfo, &period,
+ &ppr_options, devinfo->role);
+ dowide = tinfo->curr.width != tinfo->goal.width;
+ dosync = tinfo->curr.offset != offset || tinfo->curr.period != period;
+ /*
+ * Only use PPR if we have options that need it, even if the device
+ * claims to support it. There might be an expander in the way
+ * that doesn't.
+ */
+ doppr = ppr_options != 0;
+
+ if (!dowide && !dosync && !doppr) {
+ dowide = tinfo->goal.width != MSG_EXT_WDTR_BUS_8_BIT;
+ dosync = tinfo->goal.offset != 0;
+ }
+
+ if (!dowide && !dosync && !doppr) {
+ /*
+ * Force async with a WDTR message if we have a wide bus,
+ * or just issue an SDTR with a 0 offset.
+ */
+ if ((ahd->features & AHD_WIDE) != 0)
+ dowide = 1;
+ else
+ dosync = 1;
+
+ if (bootverbose) {
+ ahd_print_devinfo(ahd, devinfo);
+ printk("Ensuring async\n");
+ }
+ }
+ /* Target initiated PPR is not allowed in the SCSI spec */
+ if (devinfo->role == ROLE_TARGET)
+ doppr = 0;
+
+ /*
+ * Both the PPR message and SDTR message require the
+ * goal syncrate to be limited to what the target device
+ * is capable of handling (based on whether an LVD->SE
+ * expander is on the bus), so combine these two cases.
+ * Regardless, guarantee that if we are using WDTR and SDTR
+ * messages that WDTR comes first.
+ */
+ if (doppr || (dosync && !dowide)) {
+
+ offset = tinfo->goal.offset;
+ ahd_validate_offset(ahd, tinfo, period, &offset,
+ doppr ? tinfo->goal.width
+ : tinfo->curr.width,
+ devinfo->role);
+ if (doppr) {
+ ahd_construct_ppr(ahd, devinfo, period, offset,
+ tinfo->goal.width, ppr_options);
+ } else {
+ ahd_construct_sdtr(ahd, devinfo, period, offset);
+ }
+ } else {
+ ahd_construct_wdtr(ahd, devinfo, tinfo->goal.width);
+ }
+}
+
+/*
+ * Build a synchronous negotiation message in our message
+ * buffer based on the input parameters.
+ */
+static void
+ahd_construct_sdtr(struct ahd_softc *ahd, struct ahd_devinfo *devinfo,
+ u_int period, u_int offset)
+{
+ if (offset == 0)
+ period = AHD_ASYNC_XFER_PERIOD;
+ ahd->msgout_index += spi_populate_sync_msg(
+ ahd->msgout_buf + ahd->msgout_index, period, offset);
+ ahd->msgout_len += 5;
+ if (bootverbose) {
+ printk("(%s:%c:%d:%d): Sending SDTR period %x, offset %x\n",
+ ahd_name(ahd), devinfo->channel, devinfo->target,
+ devinfo->lun, period, offset);
+ }
+}
+
+/*
+ * Build a wide negotiateion message in our message
+ * buffer based on the input parameters.
+ */
+static void
+ahd_construct_wdtr(struct ahd_softc *ahd, struct ahd_devinfo *devinfo,
+ u_int bus_width)
+{
+ ahd->msgout_index += spi_populate_width_msg(
+ ahd->msgout_buf + ahd->msgout_index, bus_width);
+ ahd->msgout_len += 4;
+ if (bootverbose) {
+ printk("(%s:%c:%d:%d): Sending WDTR %x\n",
+ ahd_name(ahd), devinfo->channel, devinfo->target,
+ devinfo->lun, bus_width);
+ }
+}
+
+/*
+ * Build a parallel protocol request message in our message
+ * buffer based on the input parameters.
+ */
+static void
+ahd_construct_ppr(struct ahd_softc *ahd, struct ahd_devinfo *devinfo,
+ u_int period, u_int offset, u_int bus_width,
+ u_int ppr_options)
+{
+ /*
+ * Always request precompensation from
+ * the other target if we are running
+ * at paced syncrates.
+ */
+ if (period <= AHD_SYNCRATE_PACED)
+ ppr_options |= MSG_EXT_PPR_PCOMP_EN;
+ if (offset == 0)
+ period = AHD_ASYNC_XFER_PERIOD;
+ ahd->msgout_index += spi_populate_ppr_msg(
+ ahd->msgout_buf + ahd->msgout_index, period, offset,
+ bus_width, ppr_options);
+ ahd->msgout_len += 8;
+ if (bootverbose) {
+ printk("(%s:%c:%d:%d): Sending PPR bus_width %x, period %x, "
+ "offset %x, ppr_options %x\n", ahd_name(ahd),
+ devinfo->channel, devinfo->target, devinfo->lun,
+ bus_width, period, offset, ppr_options);
+ }
+}
+
+/*
+ * Clear any active message state.
+ */
+static void
+ahd_clear_msg_state(struct ahd_softc *ahd)
+{
+ ahd_mode_state saved_modes;
+
+ saved_modes = ahd_save_modes(ahd);
+ ahd_set_modes(ahd, AHD_MODE_SCSI, AHD_MODE_SCSI);
+ ahd->send_msg_perror = 0;
+ ahd->msg_flags = MSG_FLAG_NONE;
+ ahd->msgout_len = 0;
+ ahd->msgin_index = 0;
+ ahd->msg_type = MSG_TYPE_NONE;
+ if ((ahd_inb(ahd, SCSISIGO) & ATNO) != 0) {
+ /*
+ * The target didn't care to respond to our
+ * message request, so clear ATN.
+ */
+ ahd_outb(ahd, CLRSINT1, CLRATNO);
+ }
+ ahd_outb(ahd, MSG_OUT, MSG_NOOP);
+ ahd_outb(ahd, SEQ_FLAGS2,
+ ahd_inb(ahd, SEQ_FLAGS2) & ~TARGET_MSG_PENDING);
+ ahd_restore_modes(ahd, saved_modes);
+}
+
+/*
+ * Manual message loop handler.
+ */
+static void
+ahd_handle_message_phase(struct ahd_softc *ahd)
+{
+ struct ahd_devinfo devinfo;
+ u_int bus_phase;
+ int end_session;
+
+ ahd_fetch_devinfo(ahd, &devinfo);
+ end_session = FALSE;
+ bus_phase = ahd_inb(ahd, LASTPHASE);
+
+ if ((ahd_inb(ahd, LQISTAT2) & LQIPHASE_OUTPKT) != 0) {
+ printk("LQIRETRY for LQIPHASE_OUTPKT\n");
+ ahd_outb(ahd, LQCTL2, LQIRETRY);
+ }
+reswitch:
+ switch (ahd->msg_type) {
+ case MSG_TYPE_INITIATOR_MSGOUT:
+ {
+ int lastbyte;
+ int phasemis;
+ int msgdone;
+
+ if (ahd->msgout_len == 0 && ahd->send_msg_perror == 0)
+ panic("HOST_MSG_LOOP interrupt with no active message");
+
+#ifdef AHD_DEBUG
+ if ((ahd_debug & AHD_SHOW_MESSAGES) != 0) {
+ ahd_print_devinfo(ahd, &devinfo);
+ printk("INITIATOR_MSG_OUT");
+ }
+#endif
+ phasemis = bus_phase != P_MESGOUT;
+ if (phasemis) {
+#ifdef AHD_DEBUG
+ if ((ahd_debug & AHD_SHOW_MESSAGES) != 0) {
+ printk(" PHASEMIS %s\n",
+ ahd_lookup_phase_entry(bus_phase)
+ ->phasemsg);
+ }
+#endif
+ if (bus_phase == P_MESGIN) {
+ /*
+ * Change gears and see if
+ * this messages is of interest to
+ * us or should be passed back to
+ * the sequencer.
+ */
+ ahd_outb(ahd, CLRSINT1, CLRATNO);
+ ahd->send_msg_perror = 0;
+ ahd->msg_type = MSG_TYPE_INITIATOR_MSGIN;
+ ahd->msgin_index = 0;
+ goto reswitch;
+ }
+ end_session = TRUE;
+ break;
+ }
+
+ if (ahd->send_msg_perror) {
+ ahd_outb(ahd, CLRSINT1, CLRATNO);
+ ahd_outb(ahd, CLRSINT1, CLRREQINIT);
+#ifdef AHD_DEBUG
+ if ((ahd_debug & AHD_SHOW_MESSAGES) != 0)
+ printk(" byte 0x%x\n", ahd->send_msg_perror);
+#endif
+ /*
+ * If we are notifying the target of a CRC error
+ * during packetized operations, the target is
+ * within its rights to acknowledge our message
+ * with a busfree.
+ */
+ if ((ahd->msg_flags & MSG_FLAG_PACKETIZED) != 0
+ && ahd->send_msg_perror == MSG_INITIATOR_DET_ERR)
+ ahd->msg_flags |= MSG_FLAG_EXPECT_IDE_BUSFREE;
+
+ ahd_outb(ahd, RETURN_2, ahd->send_msg_perror);
+ ahd_outb(ahd, RETURN_1, CONT_MSG_LOOP_WRITE);
+ break;
+ }
+
+ msgdone = ahd->msgout_index == ahd->msgout_len;
+ if (msgdone) {
+ /*
+ * The target has requested a retry.
+ * Re-assert ATN, reset our message index to
+ * 0, and try again.
+ */
+ ahd->msgout_index = 0;
+ ahd_assert_atn(ahd);
+ }
+
+ lastbyte = ahd->msgout_index == (ahd->msgout_len - 1);
+ if (lastbyte) {
+ /* Last byte is signified by dropping ATN */
+ ahd_outb(ahd, CLRSINT1, CLRATNO);
+ }
+
+ /*
+ * Clear our interrupt status and present
+ * the next byte on the bus.
+ */
+ ahd_outb(ahd, CLRSINT1, CLRREQINIT);
+#ifdef AHD_DEBUG
+ if ((ahd_debug & AHD_SHOW_MESSAGES) != 0)
+ printk(" byte 0x%x\n",
+ ahd->msgout_buf[ahd->msgout_index]);
+#endif
+ ahd_outb(ahd, RETURN_2, ahd->msgout_buf[ahd->msgout_index++]);
+ ahd_outb(ahd, RETURN_1, CONT_MSG_LOOP_WRITE);
+ break;
+ }
+ case MSG_TYPE_INITIATOR_MSGIN:
+ {
+ int phasemis;
+ int message_done;
+
+#ifdef AHD_DEBUG
+ if ((ahd_debug & AHD_SHOW_MESSAGES) != 0) {
+ ahd_print_devinfo(ahd, &devinfo);
+ printk("INITIATOR_MSG_IN");
+ }
+#endif
+ phasemis = bus_phase != P_MESGIN;
+ if (phasemis) {
+#ifdef AHD_DEBUG
+ if ((ahd_debug & AHD_SHOW_MESSAGES) != 0) {
+ printk(" PHASEMIS %s\n",
+ ahd_lookup_phase_entry(bus_phase)
+ ->phasemsg);
+ }
+#endif
+ ahd->msgin_index = 0;
+ if (bus_phase == P_MESGOUT
+ && (ahd->send_msg_perror != 0
+ || (ahd->msgout_len != 0
+ && ahd->msgout_index == 0))) {
+ ahd->msg_type = MSG_TYPE_INITIATOR_MSGOUT;
+ goto reswitch;
+ }
+ end_session = TRUE;
+ break;
+ }
+
+ /* Pull the byte in without acking it */
+ ahd->msgin_buf[ahd->msgin_index] = ahd_inb(ahd, SCSIBUS);
+#ifdef AHD_DEBUG
+ if ((ahd_debug & AHD_SHOW_MESSAGES) != 0)
+ printk(" byte 0x%x\n",
+ ahd->msgin_buf[ahd->msgin_index]);
+#endif
+
+ message_done = ahd_parse_msg(ahd, &devinfo);
+
+ if (message_done) {
+ /*
+ * Clear our incoming message buffer in case there
+ * is another message following this one.
+ */
+ ahd->msgin_index = 0;
+
+ /*
+ * If this message illicited a response,
+ * assert ATN so the target takes us to the
+ * message out phase.
+ */
+ if (ahd->msgout_len != 0) {
+#ifdef AHD_DEBUG
+ if ((ahd_debug & AHD_SHOW_MESSAGES) != 0) {
+ ahd_print_devinfo(ahd, &devinfo);
+ printk("Asserting ATN for response\n");
+ }
+#endif
+ ahd_assert_atn(ahd);
+ }
+ } else
+ ahd->msgin_index++;
+
+ if (message_done == MSGLOOP_TERMINATED) {
+ end_session = TRUE;
+ } else {
+ /* Ack the byte */
+ ahd_outb(ahd, CLRSINT1, CLRREQINIT);
+ ahd_outb(ahd, RETURN_1, CONT_MSG_LOOP_READ);
+ }
+ break;
+ }
+ case MSG_TYPE_TARGET_MSGIN:
+ {
+ int msgdone;
+ int msgout_request;
+
+ /*
+ * By default, the message loop will continue.
+ */
+ ahd_outb(ahd, RETURN_1, CONT_MSG_LOOP_TARG);
+
+ if (ahd->msgout_len == 0)
+ panic("Target MSGIN with no active message");
+
+ /*
+ * If we interrupted a mesgout session, the initiator
+ * will not know this until our first REQ. So, we
+ * only honor mesgout requests after we've sent our
+ * first byte.
+ */
+ if ((ahd_inb(ahd, SCSISIGI) & ATNI) != 0
+ && ahd->msgout_index > 0)
+ msgout_request = TRUE;
+ else
+ msgout_request = FALSE;
+
+ if (msgout_request) {
+
+ /*
+ * Change gears and see if
+ * this messages is of interest to
+ * us or should be passed back to
+ * the sequencer.
+ */
+ ahd->msg_type = MSG_TYPE_TARGET_MSGOUT;
+ ahd_outb(ahd, SCSISIGO, P_MESGOUT | BSYO);
+ ahd->msgin_index = 0;
+ /* Dummy read to REQ for first byte */
+ ahd_inb(ahd, SCSIDAT);
+ ahd_outb(ahd, SXFRCTL0,
+ ahd_inb(ahd, SXFRCTL0) | SPIOEN);
+ break;
+ }
+
+ msgdone = ahd->msgout_index == ahd->msgout_len;
+ if (msgdone) {
+ ahd_outb(ahd, SXFRCTL0,
+ ahd_inb(ahd, SXFRCTL0) & ~SPIOEN);
+ end_session = TRUE;
+ break;
+ }
+
+ /*
+ * Present the next byte on the bus.
+ */
+ ahd_outb(ahd, SXFRCTL0, ahd_inb(ahd, SXFRCTL0) | SPIOEN);
+ ahd_outb(ahd, SCSIDAT, ahd->msgout_buf[ahd->msgout_index++]);
+ break;
+ }
+ case MSG_TYPE_TARGET_MSGOUT:
+ {
+ int lastbyte;
+ int msgdone;
+
+ /*
+ * By default, the message loop will continue.
+ */
+ ahd_outb(ahd, RETURN_1, CONT_MSG_LOOP_TARG);
+
+ /*
+ * The initiator signals that this is
+ * the last byte by dropping ATN.
+ */
+ lastbyte = (ahd_inb(ahd, SCSISIGI) & ATNI) == 0;
+
+ /*
+ * Read the latched byte, but turn off SPIOEN first
+ * so that we don't inadvertently cause a REQ for the
+ * next byte.
+ */
+ ahd_outb(ahd, SXFRCTL0, ahd_inb(ahd, SXFRCTL0) & ~SPIOEN);
+ ahd->msgin_buf[ahd->msgin_index] = ahd_inb(ahd, SCSIDAT);
+ msgdone = ahd_parse_msg(ahd, &devinfo);
+ if (msgdone == MSGLOOP_TERMINATED) {
+ /*
+ * The message is *really* done in that it caused
+ * us to go to bus free. The sequencer has already
+ * been reset at this point, so pull the ejection
+ * handle.
+ */
+ return;
+ }
+
+ ahd->msgin_index++;
+
+ /*
+ * XXX Read spec about initiator dropping ATN too soon
+ * and use msgdone to detect it.
+ */
+ if (msgdone == MSGLOOP_MSGCOMPLETE) {
+ ahd->msgin_index = 0;
+
+ /*
+ * If this message illicited a response, transition
+ * to the Message in phase and send it.
+ */
+ if (ahd->msgout_len != 0) {
+ ahd_outb(ahd, SCSISIGO, P_MESGIN | BSYO);
+ ahd_outb(ahd, SXFRCTL0,
+ ahd_inb(ahd, SXFRCTL0) | SPIOEN);
+ ahd->msg_type = MSG_TYPE_TARGET_MSGIN;
+ ahd->msgin_index = 0;
+ break;
+ }
+ }
+
+ if (lastbyte)
+ end_session = TRUE;
+ else {
+ /* Ask for the next byte. */
+ ahd_outb(ahd, SXFRCTL0,
+ ahd_inb(ahd, SXFRCTL0) | SPIOEN);
+ }
+
+ break;
+ }
+ default:
+ panic("Unknown REQINIT message type");
+ }
+
+ if (end_session) {
+ if ((ahd->msg_flags & MSG_FLAG_PACKETIZED) != 0) {
+ printk("%s: Returning to Idle Loop\n",
+ ahd_name(ahd));
+ ahd_clear_msg_state(ahd);
+
+ /*
+ * Perform the equivalent of a clear_target_state.
+ */
+ ahd_outb(ahd, LASTPHASE, P_BUSFREE);
+ ahd_outb(ahd, SEQ_FLAGS, NOT_IDENTIFIED|NO_CDB_SENT);
+ ahd_outb(ahd, SEQCTL0, FASTMODE|SEQRESET);
+ } else {
+ ahd_clear_msg_state(ahd);
+ ahd_outb(ahd, RETURN_1, EXIT_MSG_LOOP);
+ }
+ }
+}
+
+/*
+ * See if we sent a particular extended message to the target.
+ * If "full" is true, return true only if the target saw the full
+ * message. If "full" is false, return true if the target saw at
+ * least the first byte of the message.
+ */
+static int
+ahd_sent_msg(struct ahd_softc *ahd, ahd_msgtype type, u_int msgval, int full)
+{
+ int found;
+ u_int index;
+
+ found = FALSE;
+ index = 0;
+
+ while (index < ahd->msgout_len) {
+ if (ahd->msgout_buf[index] == MSG_EXTENDED) {
+ u_int end_index;
+
+ end_index = index + 1 + ahd->msgout_buf[index + 1];
+ if (ahd->msgout_buf[index+2] == msgval
+ && type == AHDMSG_EXT) {
+
+ if (full) {
+ if (ahd->msgout_index > end_index)
+ found = TRUE;
+ } else if (ahd->msgout_index > index)
+ found = TRUE;
+ }
+ index = end_index;
+ } else if (ahd->msgout_buf[index] >= MSG_SIMPLE_TASK
+ && ahd->msgout_buf[index] <= MSG_IGN_WIDE_RESIDUE) {
+
+ /* Skip tag type and tag id or residue param*/
+ index += 2;
+ } else {
+ /* Single byte message */
+ if (type == AHDMSG_1B
+ && ahd->msgout_index > index
+ && (ahd->msgout_buf[index] == msgval
+ || ((ahd->msgout_buf[index] & MSG_IDENTIFYFLAG) != 0
+ && msgval == MSG_IDENTIFYFLAG)))
+ found = TRUE;
+ index++;
+ }
+
+ if (found)
+ break;
+ }
+ return (found);
+}
+
+/*
+ * Wait for a complete incoming message, parse it, and respond accordingly.
+ */
+static int
+ahd_parse_msg(struct ahd_softc *ahd, struct ahd_devinfo *devinfo)
+{
+ struct ahd_initiator_tinfo *tinfo;
+ struct ahd_tmode_tstate *tstate;
+ int reject;
+ int done;
+ int response;
+
+ done = MSGLOOP_IN_PROG;
+ response = FALSE;
+ reject = FALSE;
+ tinfo = ahd_fetch_transinfo(ahd, devinfo->channel, devinfo->our_scsiid,
+ devinfo->target, &tstate);
+
+ /*
+ * Parse as much of the message as is available,
+ * rejecting it if we don't support it. When
+ * the entire message is available and has been
+ * handled, return MSGLOOP_MSGCOMPLETE, indicating
+ * that we have parsed an entire message.
+ *
+ * In the case of extended messages, we accept the length
+ * byte outright and perform more checking once we know the
+ * extended message type.
+ */
+ switch (ahd->msgin_buf[0]) {
+ case MSG_DISCONNECT:
+ case MSG_SAVEDATAPOINTER:
+ case MSG_CMDCOMPLETE:
+ case MSG_RESTOREPOINTERS:
+ case MSG_IGN_WIDE_RESIDUE:
+ /*
+ * End our message loop as these are messages
+ * the sequencer handles on its own.
+ */
+ done = MSGLOOP_TERMINATED;
+ break;
+ case MSG_MESSAGE_REJECT:
+ response = ahd_handle_msg_reject(ahd, devinfo);
+ /* FALLTHROUGH */
+ case MSG_NOOP:
+ done = MSGLOOP_MSGCOMPLETE;
+ break;
+ case MSG_EXTENDED:
+ {
+ /* Wait for enough of the message to begin validation */
+ if (ahd->msgin_index < 2)
+ break;
+ switch (ahd->msgin_buf[2]) {
+ case MSG_EXT_SDTR:
+ {
+ u_int period;
+ u_int ppr_options;
+ u_int offset;
+ u_int saved_offset;
+
+ if (ahd->msgin_buf[1] != MSG_EXT_SDTR_LEN) {
+ reject = TRUE;
+ break;
+ }
+
+ /*
+ * Wait until we have both args before validating
+ * and acting on this message.
+ *
+ * Add one to MSG_EXT_SDTR_LEN to account for
+ * the extended message preamble.
+ */
+ if (ahd->msgin_index < (MSG_EXT_SDTR_LEN + 1))
+ break;
+
+ period = ahd->msgin_buf[3];
+ ppr_options = 0;
+ saved_offset = offset = ahd->msgin_buf[4];
+ ahd_devlimited_syncrate(ahd, tinfo, &period,
+ &ppr_options, devinfo->role);
+ ahd_validate_offset(ahd, tinfo, period, &offset,
+ tinfo->curr.width, devinfo->role);
+ if (bootverbose) {
+ printk("(%s:%c:%d:%d): Received "
+ "SDTR period %x, offset %x\n\t"
+ "Filtered to period %x, offset %x\n",
+ ahd_name(ahd), devinfo->channel,
+ devinfo->target, devinfo->lun,
+ ahd->msgin_buf[3], saved_offset,
+ period, offset);
+ }
+ ahd_set_syncrate(ahd, devinfo, period,
+ offset, ppr_options,
+ AHD_TRANS_ACTIVE|AHD_TRANS_GOAL,
+ /*paused*/TRUE);
+
+ /*
+ * See if we initiated Sync Negotiation
+ * and didn't have to fall down to async
+ * transfers.
+ */
+ if (ahd_sent_msg(ahd, AHDMSG_EXT, MSG_EXT_SDTR, TRUE)) {
+ /* We started it */
+ if (saved_offset != offset) {
+ /* Went too low - force async */
+ reject = TRUE;
+ }
+ } else {
+ /*
+ * Send our own SDTR in reply
+ */
+ if (bootverbose
+ && devinfo->role == ROLE_INITIATOR) {
+ printk("(%s:%c:%d:%d): Target "
+ "Initiated SDTR\n",
+ ahd_name(ahd), devinfo->channel,
+ devinfo->target, devinfo->lun);
+ }
+ ahd->msgout_index = 0;
+ ahd->msgout_len = 0;
+ ahd_construct_sdtr(ahd, devinfo,
+ period, offset);
+ ahd->msgout_index = 0;
+ response = TRUE;
+ }
+ done = MSGLOOP_MSGCOMPLETE;
+ break;
+ }
+ case MSG_EXT_WDTR:
+ {
+ u_int bus_width;
+ u_int saved_width;
+ u_int sending_reply;
+
+ sending_reply = FALSE;
+ if (ahd->msgin_buf[1] != MSG_EXT_WDTR_LEN) {
+ reject = TRUE;
+ break;
+ }
+
+ /*
+ * Wait until we have our arg before validating
+ * and acting on this message.
+ *
+ * Add one to MSG_EXT_WDTR_LEN to account for
+ * the extended message preamble.
+ */
+ if (ahd->msgin_index < (MSG_EXT_WDTR_LEN + 1))
+ break;
+
+ bus_width = ahd->msgin_buf[3];
+ saved_width = bus_width;
+ ahd_validate_width(ahd, tinfo, &bus_width,
+ devinfo->role);
+ if (bootverbose) {
+ printk("(%s:%c:%d:%d): Received WDTR "
+ "%x filtered to %x\n",
+ ahd_name(ahd), devinfo->channel,
+ devinfo->target, devinfo->lun,
+ saved_width, bus_width);
+ }
+
+ if (ahd_sent_msg(ahd, AHDMSG_EXT, MSG_EXT_WDTR, TRUE)) {
+ /*
+ * Don't send a WDTR back to the
+ * target, since we asked first.
+ * If the width went higher than our
+ * request, reject it.
+ */
+ if (saved_width > bus_width) {
+ reject = TRUE;
+ printk("(%s:%c:%d:%d): requested %dBit "
+ "transfers. Rejecting...\n",
+ ahd_name(ahd), devinfo->channel,
+ devinfo->target, devinfo->lun,
+ 8 * (0x01 << bus_width));
+ bus_width = 0;
+ }
+ } else {
+ /*
+ * Send our own WDTR in reply
+ */
+ if (bootverbose
+ && devinfo->role == ROLE_INITIATOR) {
+ printk("(%s:%c:%d:%d): Target "
+ "Initiated WDTR\n",
+ ahd_name(ahd), devinfo->channel,
+ devinfo->target, devinfo->lun);
+ }
+ ahd->msgout_index = 0;
+ ahd->msgout_len = 0;
+ ahd_construct_wdtr(ahd, devinfo, bus_width);
+ ahd->msgout_index = 0;
+ response = TRUE;
+ sending_reply = TRUE;
+ }
+ /*
+ * After a wide message, we are async, but
+ * some devices don't seem to honor this portion
+ * of the spec. Force a renegotiation of the
+ * sync component of our transfer agreement even
+ * if our goal is async. By updating our width
+ * after forcing the negotiation, we avoid
+ * renegotiating for width.
+ */
+ ahd_update_neg_request(ahd, devinfo, tstate,
+ tinfo, AHD_NEG_ALWAYS);
+ ahd_set_width(ahd, devinfo, bus_width,
+ AHD_TRANS_ACTIVE|AHD_TRANS_GOAL,
+ /*paused*/TRUE);
+ if (sending_reply == FALSE && reject == FALSE) {
+
+ /*
+ * We will always have an SDTR to send.
+ */
+ ahd->msgout_index = 0;
+ ahd->msgout_len = 0;
+ ahd_build_transfer_msg(ahd, devinfo);
+ ahd->msgout_index = 0;
+ response = TRUE;
+ }
+ done = MSGLOOP_MSGCOMPLETE;
+ break;
+ }
+ case MSG_EXT_PPR:
+ {
+ u_int period;
+ u_int offset;
+ u_int bus_width;
+ u_int ppr_options;
+ u_int saved_width;
+ u_int saved_offset;
+ u_int saved_ppr_options;
+
+ if (ahd->msgin_buf[1] != MSG_EXT_PPR_LEN) {
+ reject = TRUE;
+ break;
+ }
+
+ /*
+ * Wait until we have all args before validating
+ * and acting on this message.
+ *
+ * Add one to MSG_EXT_PPR_LEN to account for
+ * the extended message preamble.
+ */
+ if (ahd->msgin_index < (MSG_EXT_PPR_LEN + 1))
+ break;
+
+ period = ahd->msgin_buf[3];
+ offset = ahd->msgin_buf[5];
+ bus_width = ahd->msgin_buf[6];
+ saved_width = bus_width;
+ ppr_options = ahd->msgin_buf[7];
+ /*
+ * According to the spec, a DT only
+ * period factor with no DT option
+ * set implies async.
+ */
+ if ((ppr_options & MSG_EXT_PPR_DT_REQ) == 0
+ && period <= 9)
+ offset = 0;
+ saved_ppr_options = ppr_options;
+ saved_offset = offset;
+
+ /*
+ * Transfer options are only available if we
+ * are negotiating wide.
+ */
+ if (bus_width == 0)
+ ppr_options &= MSG_EXT_PPR_QAS_REQ;
+
+ ahd_validate_width(ahd, tinfo, &bus_width,
+ devinfo->role);
+ ahd_devlimited_syncrate(ahd, tinfo, &period,
+ &ppr_options, devinfo->role);
+ ahd_validate_offset(ahd, tinfo, period, &offset,
+ bus_width, devinfo->role);
+
+ if (ahd_sent_msg(ahd, AHDMSG_EXT, MSG_EXT_PPR, TRUE)) {
+ /*
+ * If we are unable to do any of the
+ * requested options (we went too low),
+ * then we'll have to reject the message.
+ */
+ if (saved_width > bus_width
+ || saved_offset != offset
+ || saved_ppr_options != ppr_options) {
+ reject = TRUE;
+ period = 0;
+ offset = 0;
+ bus_width = 0;
+ ppr_options = 0;
+ }
+ } else {
+ if (devinfo->role != ROLE_TARGET)
+ printk("(%s:%c:%d:%d): Target "
+ "Initiated PPR\n",
+ ahd_name(ahd), devinfo->channel,
+ devinfo->target, devinfo->lun);
+ else
+ printk("(%s:%c:%d:%d): Initiator "
+ "Initiated PPR\n",
+ ahd_name(ahd), devinfo->channel,
+ devinfo->target, devinfo->lun);
+ ahd->msgout_index = 0;
+ ahd->msgout_len = 0;
+ ahd_construct_ppr(ahd, devinfo, period, offset,
+ bus_width, ppr_options);
+ ahd->msgout_index = 0;
+ response = TRUE;
+ }
+ if (bootverbose) {
+ printk("(%s:%c:%d:%d): Received PPR width %x, "
+ "period %x, offset %x,options %x\n"
+ "\tFiltered to width %x, period %x, "
+ "offset %x, options %x\n",
+ ahd_name(ahd), devinfo->channel,
+ devinfo->target, devinfo->lun,
+ saved_width, ahd->msgin_buf[3],
+ saved_offset, saved_ppr_options,
+ bus_width, period, offset, ppr_options);
+ }
+ ahd_set_width(ahd, devinfo, bus_width,
+ AHD_TRANS_ACTIVE|AHD_TRANS_GOAL,
+ /*paused*/TRUE);
+ ahd_set_syncrate(ahd, devinfo, period,
+ offset, ppr_options,
+ AHD_TRANS_ACTIVE|AHD_TRANS_GOAL,
+ /*paused*/TRUE);
+
+ done = MSGLOOP_MSGCOMPLETE;
+ break;
+ }
+ default:
+ /* Unknown extended message. Reject it. */
+ reject = TRUE;
+ break;
+ }
+ break;
+ }
+#ifdef AHD_TARGET_MODE
+ case MSG_BUS_DEV_RESET:
+ ahd_handle_devreset(ahd, devinfo, CAM_LUN_WILDCARD,
+ CAM_BDR_SENT,
+ "Bus Device Reset Received",
+ /*verbose_level*/0);
+ ahd_restart(ahd);
+ done = MSGLOOP_TERMINATED;
+ break;
+ case MSG_ABORT_TAG:
+ case MSG_ABORT:
+ case MSG_CLEAR_QUEUE:
+ {
+ int tag;
+
+ /* Target mode messages */
+ if (devinfo->role != ROLE_TARGET) {
+ reject = TRUE;
+ break;
+ }
+ tag = SCB_LIST_NULL;
+ if (ahd->msgin_buf[0] == MSG_ABORT_TAG)
+ tag = ahd_inb(ahd, INITIATOR_TAG);
+ ahd_abort_scbs(ahd, devinfo->target, devinfo->channel,
+ devinfo->lun, tag, ROLE_TARGET,
+ CAM_REQ_ABORTED);
+
+ tstate = ahd->enabled_targets[devinfo->our_scsiid];
+ if (tstate != NULL) {
+ struct ahd_tmode_lstate* lstate;
+
+ lstate = tstate->enabled_luns[devinfo->lun];
+ if (lstate != NULL) {
+ ahd_queue_lstate_event(ahd, lstate,
+ devinfo->our_scsiid,
+ ahd->msgin_buf[0],
+ /*arg*/tag);
+ ahd_send_lstate_events(ahd, lstate);
+ }
+ }
+ ahd_restart(ahd);
+ done = MSGLOOP_TERMINATED;
+ break;
+ }
+#endif
+ case MSG_QAS_REQUEST:
+#ifdef AHD_DEBUG
+ if ((ahd_debug & AHD_SHOW_MESSAGES) != 0)
+ printk("%s: QAS request. SCSISIGI == 0x%x\n",
+ ahd_name(ahd), ahd_inb(ahd, SCSISIGI));
+#endif
+ ahd->msg_flags |= MSG_FLAG_EXPECT_QASREJ_BUSFREE;
+ /* FALLTHROUGH */
+ case MSG_TERM_IO_PROC:
+ default:
+ reject = TRUE;
+ break;
+ }
+
+ if (reject) {
+ /*
+ * Setup to reject the message.
+ */
+ ahd->msgout_index = 0;
+ ahd->msgout_len = 1;
+ ahd->msgout_buf[0] = MSG_MESSAGE_REJECT;
+ done = MSGLOOP_MSGCOMPLETE;
+ response = TRUE;
+ }
+
+ if (done != MSGLOOP_IN_PROG && !response)
+ /* Clear the outgoing message buffer */
+ ahd->msgout_len = 0;
+
+ return (done);
+}
+
+/*
+ * Process a message reject message.
+ */
+static int
+ahd_handle_msg_reject(struct ahd_softc *ahd, struct ahd_devinfo *devinfo)
+{
+ /*
+ * What we care about here is if we had an
+ * outstanding SDTR or WDTR message for this
+ * target. If we did, this is a signal that
+ * the target is refusing negotiation.
+ */
+ struct scb *scb;
+ struct ahd_initiator_tinfo *tinfo;
+ struct ahd_tmode_tstate *tstate;
+ u_int scb_index;
+ u_int last_msg;
+ int response = 0;
+
+ scb_index = ahd_get_scbptr(ahd);
+ scb = ahd_lookup_scb(ahd, scb_index);
+ tinfo = ahd_fetch_transinfo(ahd, devinfo->channel,
+ devinfo->our_scsiid,
+ devinfo->target, &tstate);
+ /* Might be necessary */
+ last_msg = ahd_inb(ahd, LAST_MSG);
+
+ if (ahd_sent_msg(ahd, AHDMSG_EXT, MSG_EXT_PPR, /*full*/FALSE)) {
+ if (ahd_sent_msg(ahd, AHDMSG_EXT, MSG_EXT_PPR, /*full*/TRUE)
+ && tinfo->goal.period <= AHD_SYNCRATE_PACED) {
+ /*
+ * Target may not like our SPI-4 PPR Options.
+ * Attempt to negotiate 80MHz which will turn
+ * off these options.
+ */
+ if (bootverbose) {
+ printk("(%s:%c:%d:%d): PPR Rejected. "
+ "Trying simple U160 PPR\n",
+ ahd_name(ahd), devinfo->channel,
+ devinfo->target, devinfo->lun);
+ }
+ tinfo->goal.period = AHD_SYNCRATE_DT;
+ tinfo->goal.ppr_options &= MSG_EXT_PPR_IU_REQ
+ | MSG_EXT_PPR_QAS_REQ
+ | MSG_EXT_PPR_DT_REQ;
+ } else {
+ /*
+ * Target does not support the PPR message.
+ * Attempt to negotiate SPI-2 style.
+ */
+ if (bootverbose) {
+ printk("(%s:%c:%d:%d): PPR Rejected. "
+ "Trying WDTR/SDTR\n",
+ ahd_name(ahd), devinfo->channel,
+ devinfo->target, devinfo->lun);
+ }
+ tinfo->goal.ppr_options = 0;
+ tinfo->curr.transport_version = 2;
+ tinfo->goal.transport_version = 2;
+ }
+ ahd->msgout_index = 0;
+ ahd->msgout_len = 0;
+ ahd_build_transfer_msg(ahd, devinfo);
+ ahd->msgout_index = 0;
+ response = 1;
+ } else if (ahd_sent_msg(ahd, AHDMSG_EXT, MSG_EXT_WDTR, /*full*/FALSE)) {
+
+ /* note 8bit xfers */
+ printk("(%s:%c:%d:%d): refuses WIDE negotiation. Using "
+ "8bit transfers\n", ahd_name(ahd),
+ devinfo->channel, devinfo->target, devinfo->lun);
+ ahd_set_width(ahd, devinfo, MSG_EXT_WDTR_BUS_8_BIT,
+ AHD_TRANS_ACTIVE|AHD_TRANS_GOAL,
+ /*paused*/TRUE);
+ /*
+ * No need to clear the sync rate. If the target
+ * did not accept the command, our syncrate is
+ * unaffected. If the target started the negotiation,
+ * but rejected our response, we already cleared the
+ * sync rate before sending our WDTR.
+ */
+ if (tinfo->goal.offset != tinfo->curr.offset) {
+
+ /* Start the sync negotiation */
+ ahd->msgout_index = 0;
+ ahd->msgout_len = 0;
+ ahd_build_transfer_msg(ahd, devinfo);
+ ahd->msgout_index = 0;
+ response = 1;
+ }
+ } else if (ahd_sent_msg(ahd, AHDMSG_EXT, MSG_EXT_SDTR, /*full*/FALSE)) {
+ /* note asynch xfers and clear flag */
+ ahd_set_syncrate(ahd, devinfo, /*period*/0,
+ /*offset*/0, /*ppr_options*/0,
+ AHD_TRANS_ACTIVE|AHD_TRANS_GOAL,
+ /*paused*/TRUE);
+ printk("(%s:%c:%d:%d): refuses synchronous negotiation. "
+ "Using asynchronous transfers\n",
+ ahd_name(ahd), devinfo->channel,
+ devinfo->target, devinfo->lun);
+ } else if ((scb->hscb->control & MSG_SIMPLE_TASK) != 0) {
+ int tag_type;
+ int mask;
+
+ tag_type = (scb->hscb->control & MSG_SIMPLE_TASK);
+
+ if (tag_type == MSG_SIMPLE_TASK) {
+ printk("(%s:%c:%d:%d): refuses tagged commands. "
+ "Performing non-tagged I/O\n", ahd_name(ahd),
+ devinfo->channel, devinfo->target, devinfo->lun);
+ ahd_set_tags(ahd, scb->io_ctx, devinfo, AHD_QUEUE_NONE);
+ mask = ~0x23;
+ } else {
+ printk("(%s:%c:%d:%d): refuses %s tagged commands. "
+ "Performing simple queue tagged I/O only\n",
+ ahd_name(ahd), devinfo->channel, devinfo->target,
+ devinfo->lun, tag_type == MSG_ORDERED_TASK
+ ? "ordered" : "head of queue");
+ ahd_set_tags(ahd, scb->io_ctx, devinfo, AHD_QUEUE_BASIC);
+ mask = ~0x03;
+ }
+
+ /*
+ * Resend the identify for this CCB as the target
+ * may believe that the selection is invalid otherwise.
+ */
+ ahd_outb(ahd, SCB_CONTROL,
+ ahd_inb_scbram(ahd, SCB_CONTROL) & mask);
+ scb->hscb->control &= mask;
+ ahd_set_transaction_tag(scb, /*enabled*/FALSE,
+ /*type*/MSG_SIMPLE_TASK);
+ ahd_outb(ahd, MSG_OUT, MSG_IDENTIFYFLAG);
+ ahd_assert_atn(ahd);
+ ahd_busy_tcl(ahd, BUILD_TCL(scb->hscb->scsiid, devinfo->lun),
+ SCB_GET_TAG(scb));
+
+ /*
+ * Requeue all tagged commands for this target
+ * currently in our possession so they can be
+ * converted to untagged commands.
+ */
+ ahd_search_qinfifo(ahd, SCB_GET_TARGET(ahd, scb),
+ SCB_GET_CHANNEL(ahd, scb),
+ SCB_GET_LUN(scb), /*tag*/SCB_LIST_NULL,
+ ROLE_INITIATOR, CAM_REQUEUE_REQ,
+ SEARCH_COMPLETE);
+ } else if (ahd_sent_msg(ahd, AHDMSG_1B, MSG_IDENTIFYFLAG, TRUE)) {
+ /*
+ * Most likely the device believes that we had
+ * previously negotiated packetized.
+ */
+ ahd->msg_flags |= MSG_FLAG_EXPECT_PPR_BUSFREE
+ | MSG_FLAG_IU_REQ_CHANGED;
+
+ ahd_force_renegotiation(ahd, devinfo);
+ ahd->msgout_index = 0;
+ ahd->msgout_len = 0;
+ ahd_build_transfer_msg(ahd, devinfo);
+ ahd->msgout_index = 0;
+ response = 1;
+ } else {
+ /*
+ * Otherwise, we ignore it.
+ */
+ printk("%s:%c:%d: Message reject for %x -- ignored\n",
+ ahd_name(ahd), devinfo->channel, devinfo->target,
+ last_msg);
+ }
+ return (response);
+}
+
+/*
+ * Process an ingnore wide residue message.
+ */
+static void
+ahd_handle_ign_wide_residue(struct ahd_softc *ahd, struct ahd_devinfo *devinfo)
+{
+ u_int scb_index;
+ struct scb *scb;
+
+ scb_index = ahd_get_scbptr(ahd);
+ scb = ahd_lookup_scb(ahd, scb_index);
+ /*
+ * XXX Actually check data direction in the sequencer?
+ * Perhaps add datadir to some spare bits in the hscb?
+ */
+ if ((ahd_inb(ahd, SEQ_FLAGS) & DPHASE) == 0
+ || ahd_get_transfer_dir(scb) != CAM_DIR_IN) {
+ /*
+ * Ignore the message if we haven't
+ * seen an appropriate data phase yet.
+ */
+ } else {
+ /*
+ * If the residual occurred on the last
+ * transfer and the transfer request was
+ * expected to end on an odd count, do
+ * nothing. Otherwise, subtract a byte
+ * and update the residual count accordingly.
+ */
+ uint32_t sgptr;
+
+ sgptr = ahd_inb_scbram(ahd, SCB_RESIDUAL_SGPTR);
+ if ((sgptr & SG_LIST_NULL) != 0
+ && (ahd_inb_scbram(ahd, SCB_TASK_ATTRIBUTE)
+ & SCB_XFERLEN_ODD) != 0) {
+ /*
+ * If the residual occurred on the last
+ * transfer and the transfer request was
+ * expected to end on an odd count, do
+ * nothing.
+ */
+ } else {
+ uint32_t data_cnt;
+ uint64_t data_addr;
+ uint32_t sglen;
+
+ /* Pull in the rest of the sgptr */
+ sgptr = ahd_inl_scbram(ahd, SCB_RESIDUAL_SGPTR);
+ data_cnt = ahd_inl_scbram(ahd, SCB_RESIDUAL_DATACNT);
+ if ((sgptr & SG_LIST_NULL) != 0) {
+ /*
+ * The residual data count is not updated
+ * for the command run to completion case.
+ * Explicitly zero the count.
+ */
+ data_cnt &= ~AHD_SG_LEN_MASK;
+ }
+ data_addr = ahd_inq(ahd, SHADDR);
+ data_cnt += 1;
+ data_addr -= 1;
+ sgptr &= SG_PTR_MASK;
+ if ((ahd->flags & AHD_64BIT_ADDRESSING) != 0) {
+ struct ahd_dma64_seg *sg;
+
+ sg = ahd_sg_bus_to_virt(ahd, scb, sgptr);
+
+ /*
+ * The residual sg ptr points to the next S/G
+ * to load so we must go back one.
+ */
+ sg--;
+ sglen = ahd_le32toh(sg->len) & AHD_SG_LEN_MASK;
+ if (sg != scb->sg_list
+ && sglen < (data_cnt & AHD_SG_LEN_MASK)) {
+
+ sg--;
+ sglen = ahd_le32toh(sg->len);
+ /*
+ * Preserve High Address and SG_LIST
+ * bits while setting the count to 1.
+ */
+ data_cnt = 1|(sglen&(~AHD_SG_LEN_MASK));
+ data_addr = ahd_le64toh(sg->addr)
+ + (sglen & AHD_SG_LEN_MASK)
+ - 1;
+
+ /*
+ * Increment sg so it points to the
+ * "next" sg.
+ */
+ sg++;
+ sgptr = ahd_sg_virt_to_bus(ahd, scb,
+ sg);
+ }
+ } else {
+ struct ahd_dma_seg *sg;
+
+ sg = ahd_sg_bus_to_virt(ahd, scb, sgptr);
+
+ /*
+ * The residual sg ptr points to the next S/G
+ * to load so we must go back one.
+ */
+ sg--;
+ sglen = ahd_le32toh(sg->len) & AHD_SG_LEN_MASK;
+ if (sg != scb->sg_list
+ && sglen < (data_cnt & AHD_SG_LEN_MASK)) {
+
+ sg--;
+ sglen = ahd_le32toh(sg->len);
+ /*
+ * Preserve High Address and SG_LIST
+ * bits while setting the count to 1.
+ */
+ data_cnt = 1|(sglen&(~AHD_SG_LEN_MASK));
+ data_addr = ahd_le32toh(sg->addr)
+ + (sglen & AHD_SG_LEN_MASK)
+ - 1;
+
+ /*
+ * Increment sg so it points to the
+ * "next" sg.
+ */
+ sg++;
+ sgptr = ahd_sg_virt_to_bus(ahd, scb,
+ sg);
+ }
+ }
+ /*
+ * Toggle the "oddness" of the transfer length
+ * to handle this mid-transfer ignore wide
+ * residue. This ensures that the oddness is
+ * correct for subsequent data transfers.
+ */
+ ahd_outb(ahd, SCB_TASK_ATTRIBUTE,
+ ahd_inb_scbram(ahd, SCB_TASK_ATTRIBUTE)
+ ^ SCB_XFERLEN_ODD);
+
+ ahd_outl(ahd, SCB_RESIDUAL_SGPTR, sgptr);
+ ahd_outl(ahd, SCB_RESIDUAL_DATACNT, data_cnt);
+ /*
+ * The FIFO's pointers will be updated if/when the
+ * sequencer re-enters a data phase.
+ */
+ }
+ }
+}
+
+
+/*
+ * Reinitialize the data pointers for the active transfer
+ * based on its current residual.
+ */
+static void
+ahd_reinitialize_dataptrs(struct ahd_softc *ahd)
+{
+ struct scb *scb;
+ ahd_mode_state saved_modes;
+ u_int scb_index;
+ u_int wait;
+ uint32_t sgptr;
+ uint32_t resid;
+ uint64_t dataptr;
+
+ AHD_ASSERT_MODES(ahd, AHD_MODE_DFF0_MSK|AHD_MODE_DFF1_MSK,
+ AHD_MODE_DFF0_MSK|AHD_MODE_DFF1_MSK);
+
+ scb_index = ahd_get_scbptr(ahd);
+ scb = ahd_lookup_scb(ahd, scb_index);
+
+ /*
+ * Release and reacquire the FIFO so we
+ * have a clean slate.
+ */
+ ahd_outb(ahd, DFFSXFRCTL, CLRCHN);
+ wait = 1000;
+ while (--wait && !(ahd_inb(ahd, MDFFSTAT) & FIFOFREE))
+ ahd_delay(100);
+ if (wait == 0) {
+ ahd_print_path(ahd, scb);
+ printk("ahd_reinitialize_dataptrs: Forcing FIFO free.\n");
+ ahd_outb(ahd, DFFSXFRCTL, RSTCHN|CLRSHCNT);
+ }
+ saved_modes = ahd_save_modes(ahd);
+ ahd_set_modes(ahd, AHD_MODE_SCSI, AHD_MODE_SCSI);
+ ahd_outb(ahd, DFFSTAT,
+ ahd_inb(ahd, DFFSTAT)
+ | (saved_modes == 0x11 ? CURRFIFO_1 : CURRFIFO_0));
+
+ /*
+ * Determine initial values for data_addr and data_cnt
+ * for resuming the data phase.
+ */
+ sgptr = ahd_inl_scbram(ahd, SCB_RESIDUAL_SGPTR);
+ sgptr &= SG_PTR_MASK;
+
+ resid = (ahd_inb_scbram(ahd, SCB_RESIDUAL_DATACNT + 2) << 16)
+ | (ahd_inb_scbram(ahd, SCB_RESIDUAL_DATACNT + 1) << 8)
+ | ahd_inb_scbram(ahd, SCB_RESIDUAL_DATACNT);
+
+ if ((ahd->flags & AHD_64BIT_ADDRESSING) != 0) {
+ struct ahd_dma64_seg *sg;
+
+ sg = ahd_sg_bus_to_virt(ahd, scb, sgptr);
+
+ /* The residual sg_ptr always points to the next sg */
+ sg--;
+
+ dataptr = ahd_le64toh(sg->addr)
+ + (ahd_le32toh(sg->len) & AHD_SG_LEN_MASK)
+ - resid;
+ ahd_outl(ahd, HADDR + 4, dataptr >> 32);
+ } else {
+ struct ahd_dma_seg *sg;
+
+ sg = ahd_sg_bus_to_virt(ahd, scb, sgptr);
+
+ /* The residual sg_ptr always points to the next sg */
+ sg--;
+
+ dataptr = ahd_le32toh(sg->addr)
+ + (ahd_le32toh(sg->len) & AHD_SG_LEN_MASK)
+ - resid;
+ ahd_outb(ahd, HADDR + 4,
+ (ahd_le32toh(sg->len) & ~AHD_SG_LEN_MASK) >> 24);
+ }
+ ahd_outl(ahd, HADDR, dataptr);
+ ahd_outb(ahd, HCNT + 2, resid >> 16);
+ ahd_outb(ahd, HCNT + 1, resid >> 8);
+ ahd_outb(ahd, HCNT, resid);
+}
+
+/*
+ * Handle the effects of issuing a bus device reset message.
+ */
+static void
+ahd_handle_devreset(struct ahd_softc *ahd, struct ahd_devinfo *devinfo,
+ u_int lun, cam_status status, char *message,
+ int verbose_level)
+{
+#ifdef AHD_TARGET_MODE
+ struct ahd_tmode_tstate* tstate;
+#endif
+ int found;
+
+ found = ahd_abort_scbs(ahd, devinfo->target, devinfo->channel,
+ lun, SCB_LIST_NULL, devinfo->role,
+ status);
+
+#ifdef AHD_TARGET_MODE
+ /*
+ * Send an immediate notify ccb to all target mord peripheral
+ * drivers affected by this action.
+ */
+ tstate = ahd->enabled_targets[devinfo->our_scsiid];
+ if (tstate != NULL) {
+ u_int cur_lun;
+ u_int max_lun;
+
+ if (lun != CAM_LUN_WILDCARD) {
+ cur_lun = 0;
+ max_lun = AHD_NUM_LUNS - 1;
+ } else {
+ cur_lun = lun;
+ max_lun = lun;
+ }
+ for (;cur_lun <= max_lun; cur_lun++) {
+ struct ahd_tmode_lstate* lstate;
+
+ lstate = tstate->enabled_luns[cur_lun];
+ if (lstate == NULL)
+ continue;
+
+ ahd_queue_lstate_event(ahd, lstate, devinfo->our_scsiid,
+ MSG_BUS_DEV_RESET, /*arg*/0);
+ ahd_send_lstate_events(ahd, lstate);
+ }
+ }
+#endif
+
+ /*
+ * Go back to async/narrow transfers and renegotiate.
+ */
+ ahd_set_width(ahd, devinfo, MSG_EXT_WDTR_BUS_8_BIT,
+ AHD_TRANS_CUR, /*paused*/TRUE);
+ ahd_set_syncrate(ahd, devinfo, /*period*/0, /*offset*/0,
+ /*ppr_options*/0, AHD_TRANS_CUR,
+ /*paused*/TRUE);
+
+ if (status != CAM_SEL_TIMEOUT)
+ ahd_send_async(ahd, devinfo->channel, devinfo->target,
+ CAM_LUN_WILDCARD, AC_SENT_BDR);
+
+ if (message != NULL && bootverbose)
+ printk("%s: %s on %c:%d. %d SCBs aborted\n", ahd_name(ahd),
+ message, devinfo->channel, devinfo->target, found);
+}
+
+#ifdef AHD_TARGET_MODE
+static void
+ahd_setup_target_msgin(struct ahd_softc *ahd, struct ahd_devinfo *devinfo,
+ struct scb *scb)
+{
+
+ /*
+ * To facilitate adding multiple messages together,
+ * each routine should increment the index and len
+ * variables instead of setting them explicitly.
+ */
+ ahd->msgout_index = 0;
+ ahd->msgout_len = 0;
+
+ if (scb != NULL && (scb->flags & SCB_AUTO_NEGOTIATE) != 0)
+ ahd_build_transfer_msg(ahd, devinfo);
+ else
+ panic("ahd_intr: AWAITING target message with no message");
+
+ ahd->msgout_index = 0;
+ ahd->msg_type = MSG_TYPE_TARGET_MSGIN;
+}
+#endif
+/**************************** Initialization **********************************/
+static u_int
+ahd_sglist_size(struct ahd_softc *ahd)
+{
+ bus_size_t list_size;
+
+ list_size = sizeof(struct ahd_dma_seg) * AHD_NSEG;
+ if ((ahd->flags & AHD_64BIT_ADDRESSING) != 0)
+ list_size = sizeof(struct ahd_dma64_seg) * AHD_NSEG;
+ return (list_size);
+}
+
+/*
+ * Calculate the optimum S/G List allocation size. S/G elements used
+ * for a given transaction must be physically contiguous. Assume the
+ * OS will allocate full pages to us, so it doesn't make sense to request
+ * less than a page.
+ */
+static u_int
+ahd_sglist_allocsize(struct ahd_softc *ahd)
+{
+ bus_size_t sg_list_increment;
+ bus_size_t sg_list_size;
+ bus_size_t max_list_size;
+ bus_size_t best_list_size;
+
+ /* Start out with the minimum required for AHD_NSEG. */
+ sg_list_increment = ahd_sglist_size(ahd);
+ sg_list_size = sg_list_increment;
+
+ /* Get us as close as possible to a page in size. */
+ while ((sg_list_size + sg_list_increment) <= PAGE_SIZE)
+ sg_list_size += sg_list_increment;
+
+ /*
+ * Try to reduce the amount of wastage by allocating
+ * multiple pages.
+ */
+ best_list_size = sg_list_size;
+ max_list_size = roundup(sg_list_increment, PAGE_SIZE);
+ if (max_list_size < 4 * PAGE_SIZE)
+ max_list_size = 4 * PAGE_SIZE;
+ if (max_list_size > (AHD_SCB_MAX_ALLOC * sg_list_increment))
+ max_list_size = (AHD_SCB_MAX_ALLOC * sg_list_increment);
+ while ((sg_list_size + sg_list_increment) <= max_list_size
+ && (sg_list_size % PAGE_SIZE) != 0) {
+ bus_size_t new_mod;
+ bus_size_t best_mod;
+
+ sg_list_size += sg_list_increment;
+ new_mod = sg_list_size % PAGE_SIZE;
+ best_mod = best_list_size % PAGE_SIZE;
+ if (new_mod > best_mod || new_mod == 0) {
+ best_list_size = sg_list_size;
+ }
+ }
+ return (best_list_size);
+}
+
+/*
+ * Allocate a controller structure for a new device
+ * and perform initial initializion.
+ */
+struct ahd_softc *
+ahd_alloc(void *platform_arg, char *name)
+{
+ struct ahd_softc *ahd;
+
+#ifndef __FreeBSD__
+ ahd = kmalloc(sizeof(*ahd), GFP_ATOMIC);
+ if (!ahd) {
+ printk("aic7xxx: cannot malloc softc!\n");
+ kfree(name);
+ return NULL;
+ }
+#else
+ ahd = device_get_softc((device_t)platform_arg);
+#endif
+ memset(ahd, 0, sizeof(*ahd));
+ ahd->seep_config = kmalloc(sizeof(*ahd->seep_config), GFP_ATOMIC);
+ if (ahd->seep_config == NULL) {
+#ifndef __FreeBSD__
+ kfree(ahd);
+#endif
+ kfree(name);
+ return (NULL);
+ }
+ LIST_INIT(&ahd->pending_scbs);
+ /* We don't know our unit number until the OSM sets it */
+ ahd->name = name;
+ ahd->unit = -1;
+ ahd->description = NULL;
+ ahd->bus_description = NULL;
+ ahd->channel = 'A';
+ ahd->chip = AHD_NONE;
+ ahd->features = AHD_FENONE;
+ ahd->bugs = AHD_BUGNONE;
+ ahd->flags = AHD_SPCHK_ENB_A|AHD_RESET_BUS_A|AHD_TERM_ENB_A
+ | AHD_EXTENDED_TRANS_A|AHD_STPWLEVEL_A;
+ ahd_timer_init(&ahd->reset_timer);
+ ahd_timer_init(&ahd->stat_timer);
+ ahd->int_coalescing_timer = AHD_INT_COALESCING_TIMER_DEFAULT;
+ ahd->int_coalescing_maxcmds = AHD_INT_COALESCING_MAXCMDS_DEFAULT;
+ ahd->int_coalescing_mincmds = AHD_INT_COALESCING_MINCMDS_DEFAULT;
+ ahd->int_coalescing_threshold = AHD_INT_COALESCING_THRESHOLD_DEFAULT;
+ ahd->int_coalescing_stop_threshold =
+ AHD_INT_COALESCING_STOP_THRESHOLD_DEFAULT;
+
+ if (ahd_platform_alloc(ahd, platform_arg) != 0) {
+ ahd_free(ahd);
+ ahd = NULL;
+ }
+#ifdef AHD_DEBUG
+ if ((ahd_debug & AHD_SHOW_MEMORY) != 0) {
+ printk("%s: scb size = 0x%x, hscb size = 0x%x\n",
+ ahd_name(ahd), (u_int)sizeof(struct scb),
+ (u_int)sizeof(struct hardware_scb));
+ }
+#endif
+ return (ahd);
+}
+
+int
+ahd_softc_init(struct ahd_softc *ahd)
+{
+
+ ahd->unpause = 0;
+ ahd->pause = PAUSE;
+ return (0);
+}
+
+void
+ahd_set_unit(struct ahd_softc *ahd, int unit)
+{
+ ahd->unit = unit;
+}
+
+void
+ahd_set_name(struct ahd_softc *ahd, char *name)
+{
+ if (ahd->name != NULL)
+ kfree(ahd->name);
+ ahd->name = name;
+}
+
+void
+ahd_free(struct ahd_softc *ahd)
+{
+ int i;
+
+ switch (ahd->init_level) {
+ default:
+ case 5:
+ ahd_shutdown(ahd);
+ /* FALLTHROUGH */
+ case 4:
+ ahd_dmamap_unload(ahd, ahd->shared_data_dmat,
+ ahd->shared_data_map.dmamap);
+ /* FALLTHROUGH */
+ case 3:
+ ahd_dmamem_free(ahd, ahd->shared_data_dmat, ahd->qoutfifo,
+ ahd->shared_data_map.dmamap);
+ ahd_dmamap_destroy(ahd, ahd->shared_data_dmat,
+ ahd->shared_data_map.dmamap);
+ /* FALLTHROUGH */
+ case 2:
+ ahd_dma_tag_destroy(ahd, ahd->shared_data_dmat);
+ case 1:
+#ifndef __linux__
+ ahd_dma_tag_destroy(ahd, ahd->buffer_dmat);
+#endif
+ break;
+ case 0:
+ break;
+ }
+
+#ifndef __linux__
+ ahd_dma_tag_destroy(ahd, ahd->parent_dmat);
+#endif
+ ahd_platform_free(ahd);
+ ahd_fini_scbdata(ahd);
+ for (i = 0; i < AHD_NUM_TARGETS; i++) {
+ struct ahd_tmode_tstate *tstate;
+
+ tstate = ahd->enabled_targets[i];
+ if (tstate != NULL) {
+#ifdef AHD_TARGET_MODE
+ int j;
+
+ for (j = 0; j < AHD_NUM_LUNS; j++) {
+ struct ahd_tmode_lstate *lstate;
+
+ lstate = tstate->enabled_luns[j];
+ if (lstate != NULL) {
+ xpt_free_path(lstate->path);
+ kfree(lstate);
+ }
+ }
+#endif
+ kfree(tstate);
+ }
+ }
+#ifdef AHD_TARGET_MODE
+ if (ahd->black_hole != NULL) {
+ xpt_free_path(ahd->black_hole->path);
+ kfree(ahd->black_hole);
+ }
+#endif
+ if (ahd->name != NULL)
+ kfree(ahd->name);
+ if (ahd->seep_config != NULL)
+ kfree(ahd->seep_config);
+ if (ahd->saved_stack != NULL)
+ kfree(ahd->saved_stack);
+#ifndef __FreeBSD__
+ kfree(ahd);
+#endif
+ return;
+}
+
+static void
+ahd_shutdown(void *arg)
+{
+ struct ahd_softc *ahd;
+
+ ahd = (struct ahd_softc *)arg;
+
+ /*
+ * Stop periodic timer callbacks.
+ */
+ ahd_timer_stop(&ahd->reset_timer);
+ ahd_timer_stop(&ahd->stat_timer);
+
+ /* This will reset most registers to 0, but not all */
+ ahd_reset(ahd, /*reinit*/FALSE);
+}
+
+/*
+ * Reset the controller and record some information about it
+ * that is only available just after a reset. If "reinit" is
+ * non-zero, this reset occurred after initial configuration
+ * and the caller requests that the chip be fully reinitialized
+ * to a runable state. Chip interrupts are *not* enabled after
+ * a reinitialization. The caller must enable interrupts via
+ * ahd_intr_enable().
+ */
+int
+ahd_reset(struct ahd_softc *ahd, int reinit)
+{
+ u_int sxfrctl1;
+ int wait;
+ uint32_t cmd;
+
+ /*
+ * Preserve the value of the SXFRCTL1 register for all channels.
+ * It contains settings that affect termination and we don't want
+ * to disturb the integrity of the bus.
+ */
+ ahd_pause(ahd);
+ ahd_update_modes(ahd);
+ ahd_set_modes(ahd, AHD_MODE_SCSI, AHD_MODE_SCSI);
+ sxfrctl1 = ahd_inb(ahd, SXFRCTL1);
+
+ cmd = ahd_pci_read_config(ahd->dev_softc, PCIR_COMMAND, /*bytes*/2);
+ if ((ahd->bugs & AHD_PCIX_CHIPRST_BUG) != 0) {
+ uint32_t mod_cmd;
+
+ /*
+ * A4 Razor #632
+ * During the assertion of CHIPRST, the chip
+ * does not disable its parity logic prior to
+ * the start of the reset. This may cause a
+ * parity error to be detected and thus a
+ * spurious SERR or PERR assertion. Disble
+ * PERR and SERR responses during the CHIPRST.
+ */
+ mod_cmd = cmd & ~(PCIM_CMD_PERRESPEN|PCIM_CMD_SERRESPEN);
+ ahd_pci_write_config(ahd->dev_softc, PCIR_COMMAND,
+ mod_cmd, /*bytes*/2);
+ }
+ ahd_outb(ahd, HCNTRL, CHIPRST | ahd->pause);
+
+ /*
+ * Ensure that the reset has finished. We delay 1000us
+ * prior to reading the register to make sure the chip
+ * has sufficiently completed its reset to handle register
+ * accesses.
+ */
+ wait = 1000;
+ do {
+ ahd_delay(1000);
+ } while (--wait && !(ahd_inb(ahd, HCNTRL) & CHIPRSTACK));
+
+ if (wait == 0) {
+ printk("%s: WARNING - Failed chip reset! "
+ "Trying to initialize anyway.\n", ahd_name(ahd));
+ }
+ ahd_outb(ahd, HCNTRL, ahd->pause);
+
+ if ((ahd->bugs & AHD_PCIX_CHIPRST_BUG) != 0) {
+ /*
+ * Clear any latched PCI error status and restore
+ * previous SERR and PERR response enables.
+ */
+ ahd_pci_write_config(ahd->dev_softc, PCIR_STATUS + 1,
+ 0xFF, /*bytes*/1);
+ ahd_pci_write_config(ahd->dev_softc, PCIR_COMMAND,
+ cmd, /*bytes*/2);
+ }
+
+ /*
+ * Mode should be SCSI after a chip reset, but lets
+ * set it just to be safe. We touch the MODE_PTR
+ * register directly so as to bypass the lazy update
+ * code in ahd_set_modes().
+ */
+ ahd_known_modes(ahd, AHD_MODE_SCSI, AHD_MODE_SCSI);
+ ahd_outb(ahd, MODE_PTR,
+ ahd_build_mode_state(ahd, AHD_MODE_SCSI, AHD_MODE_SCSI));
+
+ /*
+ * Restore SXFRCTL1.
+ *
+ * We must always initialize STPWEN to 1 before we
+ * restore the saved values. STPWEN is initialized
+ * to a tri-state condition which can only be cleared
+ * by turning it on.
+ */
+ ahd_outb(ahd, SXFRCTL1, sxfrctl1|STPWEN);
+ ahd_outb(ahd, SXFRCTL1, sxfrctl1);
+
+ /* Determine chip configuration */
+ ahd->features &= ~AHD_WIDE;
+ if ((ahd_inb(ahd, SBLKCTL) & SELWIDE) != 0)
+ ahd->features |= AHD_WIDE;
+
+ /*
+ * If a recovery action has forced a chip reset,
+ * re-initialize the chip to our liking.
+ */
+ if (reinit != 0)
+ ahd_chip_init(ahd);
+
+ return (0);
+}
+
+/*
+ * Determine the number of SCBs available on the controller
+ */
+static int
+ahd_probe_scbs(struct ahd_softc *ahd) {
+ int i;
+
+ AHD_ASSERT_MODES(ahd, ~(AHD_MODE_UNKNOWN_MSK|AHD_MODE_CFG_MSK),
+ ~(AHD_MODE_UNKNOWN_MSK|AHD_MODE_CFG_MSK));
+ for (i = 0; i < AHD_SCB_MAX; i++) {
+ int j;
+
+ ahd_set_scbptr(ahd, i);
+ ahd_outw(ahd, SCB_BASE, i);
+ for (j = 2; j < 64; j++)
+ ahd_outb(ahd, SCB_BASE+j, 0);
+ /* Start out life as unallocated (needing an abort) */
+ ahd_outb(ahd, SCB_CONTROL, MK_MESSAGE);
+ if (ahd_inw_scbram(ahd, SCB_BASE) != i)
+ break;
+ ahd_set_scbptr(ahd, 0);
+ if (ahd_inw_scbram(ahd, SCB_BASE) != 0)
+ break;
+ }
+ return (i);
+}
+
+static void
+ahd_dmamap_cb(void *arg, bus_dma_segment_t *segs, int nseg, int error)
+{
+ dma_addr_t *baddr;
+
+ baddr = (dma_addr_t *)arg;
+ *baddr = segs->ds_addr;
+}
+
+static void
+ahd_initialize_hscbs(struct ahd_softc *ahd)
+{
+ int i;
+
+ for (i = 0; i < ahd->scb_data.maxhscbs; i++) {
+ ahd_set_scbptr(ahd, i);
+
+ /* Clear the control byte. */
+ ahd_outb(ahd, SCB_CONTROL, 0);
+
+ /* Set the next pointer */
+ ahd_outw(ahd, SCB_NEXT, SCB_LIST_NULL);
+ }
+}
+
+static int
+ahd_init_scbdata(struct ahd_softc *ahd)
+{
+ struct scb_data *scb_data;
+ int i;
+
+ scb_data = &ahd->scb_data;
+ TAILQ_INIT(&scb_data->free_scbs);
+ for (i = 0; i < AHD_NUM_TARGETS * AHD_NUM_LUNS_NONPKT; i++)
+ LIST_INIT(&scb_data->free_scb_lists[i]);
+ LIST_INIT(&scb_data->any_dev_free_scb_list);
+ SLIST_INIT(&scb_data->hscb_maps);
+ SLIST_INIT(&scb_data->sg_maps);
+ SLIST_INIT(&scb_data->sense_maps);
+
+ /* Determine the number of hardware SCBs and initialize them */
+ scb_data->maxhscbs = ahd_probe_scbs(ahd);
+ if (scb_data->maxhscbs == 0) {
+ printk("%s: No SCB space found\n", ahd_name(ahd));
+ return (ENXIO);
+ }
+
+ ahd_initialize_hscbs(ahd);
+
+ /*
+ * Create our DMA tags. These tags define the kinds of device
+ * accessible memory allocations and memory mappings we will
+ * need to perform during normal operation.
+ *
+ * Unless we need to further restrict the allocation, we rely
+ * on the restrictions of the parent dmat, hence the common
+ * use of MAXADDR and MAXSIZE.
+ */
+
+ /* DMA tag for our hardware scb structures */
+ if (ahd_dma_tag_create(ahd, ahd->parent_dmat, /*alignment*/1,
+ /*boundary*/BUS_SPACE_MAXADDR_32BIT + 1,
+ /*lowaddr*/BUS_SPACE_MAXADDR_32BIT,
+ /*highaddr*/BUS_SPACE_MAXADDR,
+ /*filter*/NULL, /*filterarg*/NULL,
+ PAGE_SIZE, /*nsegments*/1,
+ /*maxsegsz*/BUS_SPACE_MAXSIZE_32BIT,
+ /*flags*/0, &scb_data->hscb_dmat) != 0) {
+ goto error_exit;
+ }
+
+ scb_data->init_level++;
+
+ /* DMA tag for our S/G structures. */
+ if (ahd_dma_tag_create(ahd, ahd->parent_dmat, /*alignment*/8,
+ /*boundary*/BUS_SPACE_MAXADDR_32BIT + 1,
+ /*lowaddr*/BUS_SPACE_MAXADDR_32BIT,
+ /*highaddr*/BUS_SPACE_MAXADDR,
+ /*filter*/NULL, /*filterarg*/NULL,
+ ahd_sglist_allocsize(ahd), /*nsegments*/1,
+ /*maxsegsz*/BUS_SPACE_MAXSIZE_32BIT,
+ /*flags*/0, &scb_data->sg_dmat) != 0) {
+ goto error_exit;
+ }
+#ifdef AHD_DEBUG
+ if ((ahd_debug & AHD_SHOW_MEMORY) != 0)
+ printk("%s: ahd_sglist_allocsize = 0x%x\n", ahd_name(ahd),
+ ahd_sglist_allocsize(ahd));
+#endif
+
+ scb_data->init_level++;
+
+ /* DMA tag for our sense buffers. We allocate in page sized chunks */
+ if (ahd_dma_tag_create(ahd, ahd->parent_dmat, /*alignment*/1,
+ /*boundary*/BUS_SPACE_MAXADDR_32BIT + 1,
+ /*lowaddr*/BUS_SPACE_MAXADDR_32BIT,
+ /*highaddr*/BUS_SPACE_MAXADDR,
+ /*filter*/NULL, /*filterarg*/NULL,
+ PAGE_SIZE, /*nsegments*/1,
+ /*maxsegsz*/BUS_SPACE_MAXSIZE_32BIT,
+ /*flags*/0, &scb_data->sense_dmat) != 0) {
+ goto error_exit;
+ }
+
+ scb_data->init_level++;
+
+ /* Perform initial CCB allocation */
+ ahd_alloc_scbs(ahd);
+
+ if (scb_data->numscbs == 0) {
+ printk("%s: ahd_init_scbdata - "
+ "Unable to allocate initial scbs\n",
+ ahd_name(ahd));
+ goto error_exit;
+ }
+
+ /*
+ * Note that we were successful
+ */
+ return (0);
+
+error_exit:
+
+ return (ENOMEM);
+}
+
+static struct scb *
+ahd_find_scb_by_tag(struct ahd_softc *ahd, u_int tag)
+{
+ struct scb *scb;
+
+ /*
+ * Look on the pending list.
+ */
+ LIST_FOREACH(scb, &ahd->pending_scbs, pending_links) {
+ if (SCB_GET_TAG(scb) == tag)
+ return (scb);
+ }
+
+ /*
+ * Then on all of the collision free lists.
+ */
+ TAILQ_FOREACH(scb, &ahd->scb_data.free_scbs, links.tqe) {
+ struct scb *list_scb;
+
+ list_scb = scb;
+ do {
+ if (SCB_GET_TAG(list_scb) == tag)
+ return (list_scb);
+ list_scb = LIST_NEXT(list_scb, collision_links);
+ } while (list_scb);
+ }
+
+ /*
+ * And finally on the generic free list.
+ */
+ LIST_FOREACH(scb, &ahd->scb_data.any_dev_free_scb_list, links.le) {
+ if (SCB_GET_TAG(scb) == tag)
+ return (scb);
+ }
+
+ return (NULL);
+}
+
+static void
+ahd_fini_scbdata(struct ahd_softc *ahd)
+{
+ struct scb_data *scb_data;
+
+ scb_data = &ahd->scb_data;
+ if (scb_data == NULL)
+ return;
+
+ switch (scb_data->init_level) {
+ default:
+ case 7:
+ {
+ struct map_node *sns_map;
+
+ while ((sns_map = SLIST_FIRST(&scb_data->sense_maps)) != NULL) {
+ SLIST_REMOVE_HEAD(&scb_data->sense_maps, links);
+ ahd_dmamap_unload(ahd, scb_data->sense_dmat,
+ sns_map->dmamap);
+ ahd_dmamem_free(ahd, scb_data->sense_dmat,
+ sns_map->vaddr, sns_map->dmamap);
+ kfree(sns_map);
+ }
+ ahd_dma_tag_destroy(ahd, scb_data->sense_dmat);
+ /* FALLTHROUGH */
+ }
+ case 6:
+ {
+ struct map_node *sg_map;
+
+ while ((sg_map = SLIST_FIRST(&scb_data->sg_maps)) != NULL) {
+ SLIST_REMOVE_HEAD(&scb_data->sg_maps, links);
+ ahd_dmamap_unload(ahd, scb_data->sg_dmat,
+ sg_map->dmamap);
+ ahd_dmamem_free(ahd, scb_data->sg_dmat,
+ sg_map->vaddr, sg_map->dmamap);
+ kfree(sg_map);
+ }
+ ahd_dma_tag_destroy(ahd, scb_data->sg_dmat);
+ /* FALLTHROUGH */
+ }
+ case 5:
+ {
+ struct map_node *hscb_map;
+
+ while ((hscb_map = SLIST_FIRST(&scb_data->hscb_maps)) != NULL) {
+ SLIST_REMOVE_HEAD(&scb_data->hscb_maps, links);
+ ahd_dmamap_unload(ahd, scb_data->hscb_dmat,
+ hscb_map->dmamap);
+ ahd_dmamem_free(ahd, scb_data->hscb_dmat,
+ hscb_map->vaddr, hscb_map->dmamap);
+ kfree(hscb_map);
+ }
+ ahd_dma_tag_destroy(ahd, scb_data->hscb_dmat);
+ /* FALLTHROUGH */
+ }
+ case 4:
+ case 3:
+ case 2:
+ case 1:
+ case 0:
+ break;
+ }
+}
+
+/*
+ * DSP filter Bypass must be enabled until the first selection
+ * after a change in bus mode (Razor #491 and #493).
+ */
+static void
+ahd_setup_iocell_workaround(struct ahd_softc *ahd)
+{
+ ahd_mode_state saved_modes;
+
+ saved_modes = ahd_save_modes(ahd);
+ ahd_set_modes(ahd, AHD_MODE_CFG, AHD_MODE_CFG);
+ ahd_outb(ahd, DSPDATACTL, ahd_inb(ahd, DSPDATACTL)
+ | BYPASSENAB | RCVROFFSTDIS | XMITOFFSTDIS);
+ ahd_outb(ahd, SIMODE0, ahd_inb(ahd, SIMODE0) | (ENSELDO|ENSELDI));
+#ifdef AHD_DEBUG
+ if ((ahd_debug & AHD_SHOW_MISC) != 0)
+ printk("%s: Setting up iocell workaround\n", ahd_name(ahd));
+#endif
+ ahd_restore_modes(ahd, saved_modes);
+ ahd->flags &= ~AHD_HAD_FIRST_SEL;
+}
+
+static void
+ahd_iocell_first_selection(struct ahd_softc *ahd)
+{
+ ahd_mode_state saved_modes;
+ u_int sblkctl;
+
+ if ((ahd->flags & AHD_HAD_FIRST_SEL) != 0)
+ return;
+ saved_modes = ahd_save_modes(ahd);
+ ahd_set_modes(ahd, AHD_MODE_SCSI, AHD_MODE_SCSI);
+ sblkctl = ahd_inb(ahd, SBLKCTL);
+ ahd_set_modes(ahd, AHD_MODE_CFG, AHD_MODE_CFG);
+#ifdef AHD_DEBUG
+ if ((ahd_debug & AHD_SHOW_MISC) != 0)
+ printk("%s: iocell first selection\n", ahd_name(ahd));
+#endif
+ if ((sblkctl & ENAB40) != 0) {
+ ahd_outb(ahd, DSPDATACTL,
+ ahd_inb(ahd, DSPDATACTL) & ~BYPASSENAB);
+#ifdef AHD_DEBUG
+ if ((ahd_debug & AHD_SHOW_MISC) != 0)
+ printk("%s: BYPASS now disabled\n", ahd_name(ahd));
+#endif
+ }
+ ahd_outb(ahd, SIMODE0, ahd_inb(ahd, SIMODE0) & ~(ENSELDO|ENSELDI));
+ ahd_outb(ahd, CLRINT, CLRSCSIINT);
+ ahd_restore_modes(ahd, saved_modes);
+ ahd->flags |= AHD_HAD_FIRST_SEL;
+}
+
+/*************************** SCB Management ***********************************/
+static void
+ahd_add_col_list(struct ahd_softc *ahd, struct scb *scb, u_int col_idx)
+{
+ struct scb_list *free_list;
+ struct scb_tailq *free_tailq;
+ struct scb *first_scb;
+
+ scb->flags |= SCB_ON_COL_LIST;
+ AHD_SET_SCB_COL_IDX(scb, col_idx);
+ free_list = &ahd->scb_data.free_scb_lists[col_idx];
+ free_tailq = &ahd->scb_data.free_scbs;
+ first_scb = LIST_FIRST(free_list);
+ if (first_scb != NULL) {
+ LIST_INSERT_AFTER(first_scb, scb, collision_links);
+ } else {
+ LIST_INSERT_HEAD(free_list, scb, collision_links);
+ TAILQ_INSERT_TAIL(free_tailq, scb, links.tqe);
+ }
+}
+
+static void
+ahd_rem_col_list(struct ahd_softc *ahd, struct scb *scb)
+{
+ struct scb_list *free_list;
+ struct scb_tailq *free_tailq;
+ struct scb *first_scb;
+ u_int col_idx;
+
+ scb->flags &= ~SCB_ON_COL_LIST;
+ col_idx = AHD_GET_SCB_COL_IDX(ahd, scb);
+ free_list = &ahd->scb_data.free_scb_lists[col_idx];
+ free_tailq = &ahd->scb_data.free_scbs;
+ first_scb = LIST_FIRST(free_list);
+ if (first_scb == scb) {
+ struct scb *next_scb;
+
+ /*
+ * Maintain order in the collision free
+ * lists for fairness if this device has
+ * other colliding tags active.
+ */
+ next_scb = LIST_NEXT(scb, collision_links);
+ if (next_scb != NULL) {
+ TAILQ_INSERT_AFTER(free_tailq, scb,
+ next_scb, links.tqe);
+ }
+ TAILQ_REMOVE(free_tailq, scb, links.tqe);
+ }
+ LIST_REMOVE(scb, collision_links);
+}
+
+/*
+ * Get a free scb. If there are none, see if we can allocate a new SCB.
+ */
+struct scb *
+ahd_get_scb(struct ahd_softc *ahd, u_int col_idx)
+{
+ struct scb *scb;
+ int tries;
+
+ tries = 0;
+look_again:
+ TAILQ_FOREACH(scb, &ahd->scb_data.free_scbs, links.tqe) {
+ if (AHD_GET_SCB_COL_IDX(ahd, scb) != col_idx) {
+ ahd_rem_col_list(ahd, scb);
+ goto found;
+ }
+ }
+ if ((scb = LIST_FIRST(&ahd->scb_data.any_dev_free_scb_list)) == NULL) {
+
+ if (tries++ != 0)
+ return (NULL);
+ ahd_alloc_scbs(ahd);
+ goto look_again;
+ }
+ LIST_REMOVE(scb, links.le);
+ if (col_idx != AHD_NEVER_COL_IDX
+ && (scb->col_scb != NULL)
+ && (scb->col_scb->flags & SCB_ACTIVE) == 0) {
+ LIST_REMOVE(scb->col_scb, links.le);
+ ahd_add_col_list(ahd, scb->col_scb, col_idx);
+ }
+found:
+ scb->flags |= SCB_ACTIVE;
+ return (scb);
+}
+
+/*
+ * Return an SCB resource to the free list.
+ */
+void
+ahd_free_scb(struct ahd_softc *ahd, struct scb *scb)
+{
+ /* Clean up for the next user */
+ scb->flags = SCB_FLAG_NONE;
+ scb->hscb->control = 0;
+ ahd->scb_data.scbindex[SCB_GET_TAG(scb)] = NULL;
+
+ if (scb->col_scb == NULL) {
+
+ /*
+ * No collision possible. Just free normally.
+ */
+ LIST_INSERT_HEAD(&ahd->scb_data.any_dev_free_scb_list,
+ scb, links.le);
+ } else if ((scb->col_scb->flags & SCB_ON_COL_LIST) != 0) {
+
+ /*
+ * The SCB we might have collided with is on
+ * a free collision list. Put both SCBs on
+ * the generic list.
+ */
+ ahd_rem_col_list(ahd, scb->col_scb);
+ LIST_INSERT_HEAD(&ahd->scb_data.any_dev_free_scb_list,
+ scb, links.le);
+ LIST_INSERT_HEAD(&ahd->scb_data.any_dev_free_scb_list,
+ scb->col_scb, links.le);
+ } else if ((scb->col_scb->flags
+ & (SCB_PACKETIZED|SCB_ACTIVE)) == SCB_ACTIVE
+ && (scb->col_scb->hscb->control & TAG_ENB) != 0) {
+
+ /*
+ * The SCB we might collide with on the next allocation
+ * is still active in a non-packetized, tagged, context.
+ * Put us on the SCB collision list.
+ */
+ ahd_add_col_list(ahd, scb,
+ AHD_GET_SCB_COL_IDX(ahd, scb->col_scb));
+ } else {
+ /*
+ * The SCB we might collide with on the next allocation
+ * is either active in a packetized context, or free.
+ * Since we can't collide, put this SCB on the generic
+ * free list.
+ */
+ LIST_INSERT_HEAD(&ahd->scb_data.any_dev_free_scb_list,
+ scb, links.le);
+ }
+
+ ahd_platform_scb_free(ahd, scb);
+}
+
+static void
+ahd_alloc_scbs(struct ahd_softc *ahd)
+{
+ struct scb_data *scb_data;
+ struct scb *next_scb;
+ struct hardware_scb *hscb;
+ struct map_node *hscb_map;
+ struct map_node *sg_map;
+ struct map_node *sense_map;
+ uint8_t *segs;
+ uint8_t *sense_data;
+ dma_addr_t hscb_busaddr;
+ dma_addr_t sg_busaddr;
+ dma_addr_t sense_busaddr;
+ int newcount;
+ int i;
+
+ scb_data = &ahd->scb_data;
+ if (scb_data->numscbs >= AHD_SCB_MAX_ALLOC)
+ /* Can't allocate any more */
+ return;
+
+ if (scb_data->scbs_left != 0) {
+ int offset;
+
+ offset = (PAGE_SIZE / sizeof(*hscb)) - scb_data->scbs_left;
+ hscb_map = SLIST_FIRST(&scb_data->hscb_maps);
+ hscb = &((struct hardware_scb *)hscb_map->vaddr)[offset];
+ hscb_busaddr = hscb_map->physaddr + (offset * sizeof(*hscb));
+ } else {
+ hscb_map = kmalloc(sizeof(*hscb_map), GFP_ATOMIC);
+
+ if (hscb_map == NULL)
+ return;
+
+ /* Allocate the next batch of hardware SCBs */
+ if (ahd_dmamem_alloc(ahd, scb_data->hscb_dmat,
+ (void **)&hscb_map->vaddr,
+ BUS_DMA_NOWAIT, &hscb_map->dmamap) != 0) {
+ kfree(hscb_map);
+ return;
+ }
+
+ SLIST_INSERT_HEAD(&scb_data->hscb_maps, hscb_map, links);
+
+ ahd_dmamap_load(ahd, scb_data->hscb_dmat, hscb_map->dmamap,
+ hscb_map->vaddr, PAGE_SIZE, ahd_dmamap_cb,
+ &hscb_map->physaddr, /*flags*/0);
+
+ hscb = (struct hardware_scb *)hscb_map->vaddr;
+ hscb_busaddr = hscb_map->physaddr;
+ scb_data->scbs_left = PAGE_SIZE / sizeof(*hscb);
+ }
+
+ if (scb_data->sgs_left != 0) {
+ int offset;
+
+ offset = ((ahd_sglist_allocsize(ahd) / ahd_sglist_size(ahd))
+ - scb_data->sgs_left) * ahd_sglist_size(ahd);
+ sg_map = SLIST_FIRST(&scb_data->sg_maps);
+ segs = sg_map->vaddr + offset;
+ sg_busaddr = sg_map->physaddr + offset;
+ } else {
+ sg_map = kmalloc(sizeof(*sg_map), GFP_ATOMIC);
+
+ if (sg_map == NULL)
+ return;
+
+ /* Allocate the next batch of S/G lists */
+ if (ahd_dmamem_alloc(ahd, scb_data->sg_dmat,
+ (void **)&sg_map->vaddr,
+ BUS_DMA_NOWAIT, &sg_map->dmamap) != 0) {
+ kfree(sg_map);
+ return;
+ }
+
+ SLIST_INSERT_HEAD(&scb_data->sg_maps, sg_map, links);
+
+ ahd_dmamap_load(ahd, scb_data->sg_dmat, sg_map->dmamap,
+ sg_map->vaddr, ahd_sglist_allocsize(ahd),
+ ahd_dmamap_cb, &sg_map->physaddr, /*flags*/0);
+
+ segs = sg_map->vaddr;
+ sg_busaddr = sg_map->physaddr;
+ scb_data->sgs_left =
+ ahd_sglist_allocsize(ahd) / ahd_sglist_size(ahd);
+#ifdef AHD_DEBUG
+ if (ahd_debug & AHD_SHOW_MEMORY)
+ printk("Mapped SG data\n");
+#endif
+ }
+
+ if (scb_data->sense_left != 0) {
+ int offset;
+
+ offset = PAGE_SIZE - (AHD_SENSE_BUFSIZE * scb_data->sense_left);
+ sense_map = SLIST_FIRST(&scb_data->sense_maps);
+ sense_data = sense_map->vaddr + offset;
+ sense_busaddr = sense_map->physaddr + offset;
+ } else {
+ sense_map = kmalloc(sizeof(*sense_map), GFP_ATOMIC);
+
+ if (sense_map == NULL)
+ return;
+
+ /* Allocate the next batch of sense buffers */
+ if (ahd_dmamem_alloc(ahd, scb_data->sense_dmat,
+ (void **)&sense_map->vaddr,
+ BUS_DMA_NOWAIT, &sense_map->dmamap) != 0) {
+ kfree(sense_map);
+ return;
+ }
+
+ SLIST_INSERT_HEAD(&scb_data->sense_maps, sense_map, links);
+
+ ahd_dmamap_load(ahd, scb_data->sense_dmat, sense_map->dmamap,
+ sense_map->vaddr, PAGE_SIZE, ahd_dmamap_cb,
+ &sense_map->physaddr, /*flags*/0);
+
+ sense_data = sense_map->vaddr;
+ sense_busaddr = sense_map->physaddr;
+ scb_data->sense_left = PAGE_SIZE / AHD_SENSE_BUFSIZE;
+#ifdef AHD_DEBUG
+ if (ahd_debug & AHD_SHOW_MEMORY)
+ printk("Mapped sense data\n");
+#endif
+ }
+
+ newcount = min(scb_data->sense_left, scb_data->scbs_left);
+ newcount = min(newcount, scb_data->sgs_left);
+ newcount = min(newcount, (AHD_SCB_MAX_ALLOC - scb_data->numscbs));
+ for (i = 0; i < newcount; i++) {
+ struct scb_platform_data *pdata;
+ u_int col_tag;
+#ifndef __linux__
+ int error;
+#endif
+
+ next_scb = kmalloc(sizeof(*next_scb), GFP_ATOMIC);
+ if (next_scb == NULL)
+ break;
+
+ pdata = kmalloc(sizeof(*pdata), GFP_ATOMIC);
+ if (pdata == NULL) {
+ kfree(next_scb);
+ break;
+ }
+ next_scb->platform_data = pdata;
+ next_scb->hscb_map = hscb_map;
+ next_scb->sg_map = sg_map;
+ next_scb->sense_map = sense_map;
+ next_scb->sg_list = segs;
+ next_scb->sense_data = sense_data;
+ next_scb->sense_busaddr = sense_busaddr;
+ memset(hscb, 0, sizeof(*hscb));
+ next_scb->hscb = hscb;
+ hscb->hscb_busaddr = ahd_htole32(hscb_busaddr);
+
+ /*
+ * The sequencer always starts with the second entry.
+ * The first entry is embedded in the scb.
+ */
+ next_scb->sg_list_busaddr = sg_busaddr;
+ if ((ahd->flags & AHD_64BIT_ADDRESSING) != 0)
+ next_scb->sg_list_busaddr
+ += sizeof(struct ahd_dma64_seg);
+ else
+ next_scb->sg_list_busaddr += sizeof(struct ahd_dma_seg);
+ next_scb->ahd_softc = ahd;
+ next_scb->flags = SCB_FLAG_NONE;
+#ifndef __linux__
+ error = ahd_dmamap_create(ahd, ahd->buffer_dmat, /*flags*/0,
+ &next_scb->dmamap);
+ if (error != 0) {
+ kfree(next_scb);
+ kfree(pdata);
+ break;
+ }
+#endif
+ next_scb->hscb->tag = ahd_htole16(scb_data->numscbs);
+ col_tag = scb_data->numscbs ^ 0x100;
+ next_scb->col_scb = ahd_find_scb_by_tag(ahd, col_tag);
+ if (next_scb->col_scb != NULL)
+ next_scb->col_scb->col_scb = next_scb;
+ ahd_free_scb(ahd, next_scb);
+ hscb++;
+ hscb_busaddr += sizeof(*hscb);
+ segs += ahd_sglist_size(ahd);
+ sg_busaddr += ahd_sglist_size(ahd);
+ sense_data += AHD_SENSE_BUFSIZE;
+ sense_busaddr += AHD_SENSE_BUFSIZE;
+ scb_data->numscbs++;
+ scb_data->sense_left--;
+ scb_data->scbs_left--;
+ scb_data->sgs_left--;
+ }
+}
+
+void
+ahd_controller_info(struct ahd_softc *ahd, char *buf)
+{
+ const char *speed;
+ const char *type;
+ int len;
+
+ len = sprintf(buf, "%s: ", ahd_chip_names[ahd->chip & AHD_CHIPID_MASK]);
+ buf += len;
+
+ speed = "Ultra320 ";
+ if ((ahd->features & AHD_WIDE) != 0) {
+ type = "Wide ";
+ } else {
+ type = "Single ";
+ }
+ len = sprintf(buf, "%s%sChannel %c, SCSI Id=%d, ",
+ speed, type, ahd->channel, ahd->our_id);
+ buf += len;
+
+ sprintf(buf, "%s, %d SCBs", ahd->bus_description,
+ ahd->scb_data.maxhscbs);
+}
+
+static const char *channel_strings[] = {
+ "Primary Low",
+ "Primary High",
+ "Secondary Low",
+ "Secondary High"
+};
+
+static const char *termstat_strings[] = {
+ "Terminated Correctly",
+ "Over Terminated",
+ "Under Terminated",
+ "Not Configured"
+};
+
+/***************************** Timer Facilities *******************************/
+#define ahd_timer_init init_timer
+#define ahd_timer_stop del_timer_sync
+typedef void ahd_linux_callback_t (u_long);
+
+static void
+ahd_timer_reset(ahd_timer_t *timer, int usec, ahd_callback_t *func, void *arg)
+{
+ struct ahd_softc *ahd;
+
+ ahd = (struct ahd_softc *)arg;
+ del_timer(timer);
+ timer->data = (u_long)arg;
+ timer->expires = jiffies + (usec * HZ)/1000000;
+ timer->function = (ahd_linux_callback_t*)func;
+ add_timer(timer);
+}
+
+/*
+ * Start the board, ready for normal operation
+ */
+int
+ahd_init(struct ahd_softc *ahd)
+{
+ uint8_t *next_vaddr;
+ dma_addr_t next_baddr;
+ size_t driver_data_size;
+ int i;
+ int error;
+ u_int warn_user;
+ uint8_t current_sensing;
+ uint8_t fstat;
+
+ AHD_ASSERT_MODES(ahd, AHD_MODE_SCSI_MSK, AHD_MODE_SCSI_MSK);
+
+ ahd->stack_size = ahd_probe_stack_size(ahd);
+ ahd->saved_stack = kmalloc(ahd->stack_size * sizeof(uint16_t), GFP_ATOMIC);
+ if (ahd->saved_stack == NULL)
+ return (ENOMEM);
+
+ /*
+ * Verify that the compiler hasn't over-aggressively
+ * padded important structures.
+ */
+ if (sizeof(struct hardware_scb) != 64)
+ panic("Hardware SCB size is incorrect");
+
+#ifdef AHD_DEBUG
+ if ((ahd_debug & AHD_DEBUG_SEQUENCER) != 0)
+ ahd->flags |= AHD_SEQUENCER_DEBUG;
+#endif
+
+ /*
+ * Default to allowing initiator operations.
+ */
+ ahd->flags |= AHD_INITIATORROLE;
+
+ /*
+ * Only allow target mode features if this unit has them enabled.
+ */
+ if ((AHD_TMODE_ENABLE & (0x1 << ahd->unit)) == 0)
+ ahd->features &= ~AHD_TARGETMODE;
+
+#ifndef __linux__
+ /* DMA tag for mapping buffers into device visible space. */
+ if (ahd_dma_tag_create(ahd, ahd->parent_dmat, /*alignment*/1,
+ /*boundary*/BUS_SPACE_MAXADDR_32BIT + 1,
+ /*lowaddr*/ahd->flags & AHD_39BIT_ADDRESSING
+ ? (dma_addr_t)0x7FFFFFFFFFULL
+ : BUS_SPACE_MAXADDR_32BIT,
+ /*highaddr*/BUS_SPACE_MAXADDR,
+ /*filter*/NULL, /*filterarg*/NULL,
+ /*maxsize*/(AHD_NSEG - 1) * PAGE_SIZE,
+ /*nsegments*/AHD_NSEG,
+ /*maxsegsz*/AHD_MAXTRANSFER_SIZE,
+ /*flags*/BUS_DMA_ALLOCNOW,
+ &ahd->buffer_dmat) != 0) {
+ return (ENOMEM);
+ }
+#endif
+
+ ahd->init_level++;
+
+ /*
+ * DMA tag for our command fifos and other data in system memory
+ * the card's sequencer must be able to access. For initiator
+ * roles, we need to allocate space for the qoutfifo. When providing
+ * for the target mode role, we must additionally provide space for
+ * the incoming target command fifo.
+ */
+ driver_data_size = AHD_SCB_MAX * sizeof(*ahd->qoutfifo)
+ + sizeof(struct hardware_scb);
+ if ((ahd->features & AHD_TARGETMODE) != 0)
+ driver_data_size += AHD_TMODE_CMDS * sizeof(struct target_cmd);
+ if ((ahd->bugs & AHD_PKT_BITBUCKET_BUG) != 0)
+ driver_data_size += PKT_OVERRUN_BUFSIZE;
+ if (ahd_dma_tag_create(ahd, ahd->parent_dmat, /*alignment*/1,
+ /*boundary*/BUS_SPACE_MAXADDR_32BIT + 1,
+ /*lowaddr*/BUS_SPACE_MAXADDR_32BIT,
+ /*highaddr*/BUS_SPACE_MAXADDR,
+ /*filter*/NULL, /*filterarg*/NULL,
+ driver_data_size,
+ /*nsegments*/1,
+ /*maxsegsz*/BUS_SPACE_MAXSIZE_32BIT,
+ /*flags*/0, &ahd->shared_data_dmat) != 0) {
+ return (ENOMEM);
+ }
+
+ ahd->init_level++;
+
+ /* Allocation of driver data */
+ if (ahd_dmamem_alloc(ahd, ahd->shared_data_dmat,
+ (void **)&ahd->shared_data_map.vaddr,
+ BUS_DMA_NOWAIT,
+ &ahd->shared_data_map.dmamap) != 0) {
+ return (ENOMEM);
+ }
+
+ ahd->init_level++;
+
+ /* And permanently map it in */
+ ahd_dmamap_load(ahd, ahd->shared_data_dmat, ahd->shared_data_map.dmamap,
+ ahd->shared_data_map.vaddr, driver_data_size,
+ ahd_dmamap_cb, &ahd->shared_data_map.physaddr,
+ /*flags*/0);
+ ahd->qoutfifo = (struct ahd_completion *)ahd->shared_data_map.vaddr;
+ next_vaddr = (uint8_t *)&ahd->qoutfifo[AHD_QOUT_SIZE];
+ next_baddr = ahd->shared_data_map.physaddr
+ + AHD_QOUT_SIZE*sizeof(struct ahd_completion);
+ if ((ahd->features & AHD_TARGETMODE) != 0) {
+ ahd->targetcmds = (struct target_cmd *)next_vaddr;
+ next_vaddr += AHD_TMODE_CMDS * sizeof(struct target_cmd);
+ next_baddr += AHD_TMODE_CMDS * sizeof(struct target_cmd);
+ }
+
+ if ((ahd->bugs & AHD_PKT_BITBUCKET_BUG) != 0) {
+ ahd->overrun_buf = next_vaddr;
+ next_vaddr += PKT_OVERRUN_BUFSIZE;
+ next_baddr += PKT_OVERRUN_BUFSIZE;
+ }
+
+ /*
+ * We need one SCB to serve as the "next SCB". Since the
+ * tag identifier in this SCB will never be used, there is
+ * no point in using a valid HSCB tag from an SCB pulled from
+ * the standard free pool. So, we allocate this "sentinel"
+ * specially from the DMA safe memory chunk used for the QOUTFIFO.
+ */
+ ahd->next_queued_hscb = (struct hardware_scb *)next_vaddr;
+ ahd->next_queued_hscb_map = &ahd->shared_data_map;
+ ahd->next_queued_hscb->hscb_busaddr = ahd_htole32(next_baddr);
+
+ ahd->init_level++;
+
+ /* Allocate SCB data now that buffer_dmat is initialized */
+ if (ahd_init_scbdata(ahd) != 0)
+ return (ENOMEM);
+
+ if ((ahd->flags & AHD_INITIATORROLE) == 0)
+ ahd->flags &= ~AHD_RESET_BUS_A;
+
+ /*
+ * Before committing these settings to the chip, give
+ * the OSM one last chance to modify our configuration.
+ */
+ ahd_platform_init(ahd);
+
+ /* Bring up the chip. */
+ ahd_chip_init(ahd);
+
+ AHD_ASSERT_MODES(ahd, AHD_MODE_SCSI_MSK, AHD_MODE_SCSI_MSK);
+
+ if ((ahd->flags & AHD_CURRENT_SENSING) == 0)
+ goto init_done;
+
+ /*
+ * Verify termination based on current draw and
+ * warn user if the bus is over/under terminated.
+ */
+ error = ahd_write_flexport(ahd, FLXADDR_ROMSTAT_CURSENSECTL,
+ CURSENSE_ENB);
+ if (error != 0) {
+ printk("%s: current sensing timeout 1\n", ahd_name(ahd));
+ goto init_done;
+ }
+ for (i = 20, fstat = FLX_FSTAT_BUSY;
+ (fstat & FLX_FSTAT_BUSY) != 0 && i; i--) {
+ error = ahd_read_flexport(ahd, FLXADDR_FLEXSTAT, &fstat);
+ if (error != 0) {
+ printk("%s: current sensing timeout 2\n",
+ ahd_name(ahd));
+ goto init_done;
+ }
+ }
+ if (i == 0) {
+ printk("%s: Timedout during current-sensing test\n",
+ ahd_name(ahd));
+ goto init_done;
+ }
+
+ /* Latch Current Sensing status. */
+ error = ahd_read_flexport(ahd, FLXADDR_CURRENT_STAT, &current_sensing);
+ if (error != 0) {
+ printk("%s: current sensing timeout 3\n", ahd_name(ahd));
+ goto init_done;
+ }
+
+ /* Diable current sensing. */
+ ahd_write_flexport(ahd, FLXADDR_ROMSTAT_CURSENSECTL, 0);
+
+#ifdef AHD_DEBUG
+ if ((ahd_debug & AHD_SHOW_TERMCTL) != 0) {
+ printk("%s: current_sensing == 0x%x\n",
+ ahd_name(ahd), current_sensing);
+ }
+#endif
+ warn_user = 0;
+ for (i = 0; i < 4; i++, current_sensing >>= FLX_CSTAT_SHIFT) {
+ u_int term_stat;
+
+ term_stat = (current_sensing & FLX_CSTAT_MASK);
+ switch (term_stat) {
+ case FLX_CSTAT_OVER:
+ case FLX_CSTAT_UNDER:
+ warn_user++;
+ case FLX_CSTAT_INVALID:
+ case FLX_CSTAT_OKAY:
+ if (warn_user == 0 && bootverbose == 0)
+ break;
+ printk("%s: %s Channel %s\n", ahd_name(ahd),
+ channel_strings[i], termstat_strings[term_stat]);
+ break;
+ }
+ }
+ if (warn_user) {
+ printk("%s: WARNING. Termination is not configured correctly.\n"
+ "%s: WARNING. SCSI bus operations may FAIL.\n",
+ ahd_name(ahd), ahd_name(ahd));
+ }
+init_done:
+ ahd_restart(ahd);
+ ahd_timer_reset(&ahd->stat_timer, AHD_STAT_UPDATE_US,
+ ahd_stat_timer, ahd);
+ return (0);
+}
+
+/*
+ * (Re)initialize chip state after a chip reset.
+ */
+static void
+ahd_chip_init(struct ahd_softc *ahd)
+{
+ uint32_t busaddr;
+ u_int sxfrctl1;
+ u_int scsiseq_template;
+ u_int wait;
+ u_int i;
+ u_int target;
+
+ ahd_set_modes(ahd, AHD_MODE_SCSI, AHD_MODE_SCSI);
+ /*
+ * Take the LED out of diagnostic mode
+ */
+ ahd_outb(ahd, SBLKCTL, ahd_inb(ahd, SBLKCTL) & ~(DIAGLEDEN|DIAGLEDON));
+
+ /*
+ * Return HS_MAILBOX to its default value.
+ */
+ ahd->hs_mailbox = 0;
+ ahd_outb(ahd, HS_MAILBOX, 0);
+
+ /* Set the SCSI Id, SXFRCTL0, SXFRCTL1, and SIMODE1. */
+ ahd_outb(ahd, IOWNID, ahd->our_id);
+ ahd_outb(ahd, TOWNID, ahd->our_id);
+ sxfrctl1 = (ahd->flags & AHD_TERM_ENB_A) != 0 ? STPWEN : 0;
+ sxfrctl1 |= (ahd->flags & AHD_SPCHK_ENB_A) != 0 ? ENSPCHK : 0;
+ if ((ahd->bugs & AHD_LONG_SETIMO_BUG)
+ && (ahd->seltime != STIMESEL_MIN)) {
+ /*
+ * The selection timer duration is twice as long
+ * as it should be. Halve it by adding "1" to
+ * the user specified setting.
+ */
+ sxfrctl1 |= ahd->seltime + STIMESEL_BUG_ADJ;
+ } else {
+ sxfrctl1 |= ahd->seltime;
+ }
+
+ ahd_outb(ahd, SXFRCTL0, DFON);
+ ahd_outb(ahd, SXFRCTL1, sxfrctl1|ahd->seltime|ENSTIMER|ACTNEGEN);
+ ahd_outb(ahd, SIMODE1, ENSELTIMO|ENSCSIRST|ENSCSIPERR);
+
+ /*
+ * Now that termination is set, wait for up
+ * to 500ms for our transceivers to settle. If
+ * the adapter does not have a cable attached,
+ * the transceivers may never settle, so don't
+ * complain if we fail here.
+ */
+ for (wait = 10000;
+ (ahd_inb(ahd, SBLKCTL) & (ENAB40|ENAB20)) == 0 && wait;
+ wait--)
+ ahd_delay(100);
+
+ /* Clear any false bus resets due to the transceivers settling */
+ ahd_outb(ahd, CLRSINT1, CLRSCSIRSTI);
+ ahd_outb(ahd, CLRINT, CLRSCSIINT);
+
+ /* Initialize mode specific S/G state. */
+ for (i = 0; i < 2; i++) {
+ ahd_set_modes(ahd, AHD_MODE_DFF0 + i, AHD_MODE_DFF0 + i);
+ ahd_outb(ahd, LONGJMP_ADDR + 1, INVALID_ADDR);
+ ahd_outb(ahd, SG_STATE, 0);
+ ahd_outb(ahd, CLRSEQINTSRC, 0xFF);
+ ahd_outb(ahd, SEQIMODE,
+ ENSAVEPTRS|ENCFG4DATA|ENCFG4ISTAT
+ |ENCFG4TSTAT|ENCFG4ICMD|ENCFG4TCMD);
+ }
+
+ ahd_set_modes(ahd, AHD_MODE_CFG, AHD_MODE_CFG);
+ ahd_outb(ahd, DSCOMMAND0, ahd_inb(ahd, DSCOMMAND0)|MPARCKEN|CACHETHEN);
+ ahd_outb(ahd, DFF_THRSH, RD_DFTHRSH_75|WR_DFTHRSH_75);
+ ahd_outb(ahd, SIMODE0, ENIOERR|ENOVERRUN);
+ ahd_outb(ahd, SIMODE3, ENNTRAMPERR|ENOSRAMPERR);
+ if ((ahd->bugs & AHD_BUSFREEREV_BUG) != 0) {
+ ahd_outb(ahd, OPTIONMODE, AUTOACKEN|AUTO_MSGOUT_DE);
+ } else {
+ ahd_outb(ahd, OPTIONMODE, AUTOACKEN|BUSFREEREV|AUTO_MSGOUT_DE);
+ }
+ ahd_outb(ahd, SCSCHKN, CURRFIFODEF|WIDERESEN|SHVALIDSTDIS);
+ if ((ahd->chip & AHD_BUS_MASK) == AHD_PCIX)
+ /*
+ * Do not issue a target abort when a split completion
+ * error occurs. Let our PCIX interrupt handler deal
+ * with it instead. H2A4 Razor #625
+ */
+ ahd_outb(ahd, PCIXCTL, ahd_inb(ahd, PCIXCTL) | SPLTSTADIS);
+
+ if ((ahd->bugs & AHD_LQOOVERRUN_BUG) != 0)
+ ahd_outb(ahd, LQOSCSCTL, LQONOCHKOVER);
+
+ /*
+ * Tweak IOCELL settings.
+ */
+ if ((ahd->flags & AHD_HP_BOARD) != 0) {
+ for (i = 0; i < NUMDSPS; i++) {
+ ahd_outb(ahd, DSPSELECT, i);
+ ahd_outb(ahd, WRTBIASCTL, WRTBIASCTL_HP_DEFAULT);
+ }
+#ifdef AHD_DEBUG
+ if ((ahd_debug & AHD_SHOW_MISC) != 0)
+ printk("%s: WRTBIASCTL now 0x%x\n", ahd_name(ahd),
+ WRTBIASCTL_HP_DEFAULT);
+#endif
+ }
+ ahd_setup_iocell_workaround(ahd);
+
+ /*
+ * Enable LQI Manager interrupts.
+ */
+ ahd_outb(ahd, LQIMODE1, ENLQIPHASE_LQ|ENLQIPHASE_NLQ|ENLIQABORT
+ | ENLQICRCI_LQ|ENLQICRCI_NLQ|ENLQIBADLQI
+ | ENLQIOVERI_LQ|ENLQIOVERI_NLQ);
+ ahd_outb(ahd, LQOMODE0, ENLQOATNLQ|ENLQOATNPKT|ENLQOTCRC);
+ /*
+ * We choose to have the sequencer catch LQOPHCHGINPKT errors
+ * manually for the command phase at the start of a packetized
+ * selection case. ENLQOBUSFREE should be made redundant by
+ * the BUSFREE interrupt, but it seems that some LQOBUSFREE
+ * events fail to assert the BUSFREE interrupt so we must
+ * also enable LQOBUSFREE interrupts.
+ */
+ ahd_outb(ahd, LQOMODE1, ENLQOBUSFREE);
+
+ /*
+ * Setup sequencer interrupt handlers.
+ */
+ ahd_outw(ahd, INTVEC1_ADDR, ahd_resolve_seqaddr(ahd, LABEL_seq_isr));
+ ahd_outw(ahd, INTVEC2_ADDR, ahd_resolve_seqaddr(ahd, LABEL_timer_isr));
+
+ /*
+ * Setup SCB Offset registers.
+ */
+ if ((ahd->bugs & AHD_PKT_LUN_BUG) != 0) {
+ ahd_outb(ahd, LUNPTR, offsetof(struct hardware_scb,
+ pkt_long_lun));
+ } else {
+ ahd_outb(ahd, LUNPTR, offsetof(struct hardware_scb, lun));
+ }
+ ahd_outb(ahd, CMDLENPTR, offsetof(struct hardware_scb, cdb_len));
+ ahd_outb(ahd, ATTRPTR, offsetof(struct hardware_scb, task_attribute));
+ ahd_outb(ahd, FLAGPTR, offsetof(struct hardware_scb, task_management));
+ ahd_outb(ahd, CMDPTR, offsetof(struct hardware_scb,
+ shared_data.idata.cdb));
+ ahd_outb(ahd, QNEXTPTR,
+ offsetof(struct hardware_scb, next_hscb_busaddr));
+ ahd_outb(ahd, ABRTBITPTR, MK_MESSAGE_BIT_OFFSET);
+ ahd_outb(ahd, ABRTBYTEPTR, offsetof(struct hardware_scb, control));
+ if ((ahd->bugs & AHD_PKT_LUN_BUG) != 0) {
+ ahd_outb(ahd, LUNLEN,
+ sizeof(ahd->next_queued_hscb->pkt_long_lun) - 1);
+ } else {
+ ahd_outb(ahd, LUNLEN, LUNLEN_SINGLE_LEVEL_LUN);
+ }
+ ahd_outb(ahd, CDBLIMIT, SCB_CDB_LEN_PTR - 1);
+ ahd_outb(ahd, MAXCMD, 0xFF);
+ ahd_outb(ahd, SCBAUTOPTR,
+ AUSCBPTR_EN | offsetof(struct hardware_scb, tag));
+
+ /* We haven't been enabled for target mode yet. */
+ ahd_outb(ahd, MULTARGID, 0);
+ ahd_outb(ahd, MULTARGID + 1, 0);
+
+ ahd_set_modes(ahd, AHD_MODE_SCSI, AHD_MODE_SCSI);
+ /* Initialize the negotiation table. */
+ if ((ahd->features & AHD_NEW_IOCELL_OPTS) == 0) {
+ /*
+ * Clear the spare bytes in the neg table to avoid
+ * spurious parity errors.
+ */
+ for (target = 0; target < AHD_NUM_TARGETS; target++) {
+ ahd_outb(ahd, NEGOADDR, target);
+ ahd_outb(ahd, ANNEXCOL, AHD_ANNEXCOL_PER_DEV0);
+ for (i = 0; i < AHD_NUM_PER_DEV_ANNEXCOLS; i++)
+ ahd_outb(ahd, ANNEXDAT, 0);
+ }
+ }
+ for (target = 0; target < AHD_NUM_TARGETS; target++) {
+ struct ahd_devinfo devinfo;
+ struct ahd_initiator_tinfo *tinfo;
+ struct ahd_tmode_tstate *tstate;
+
+ tinfo = ahd_fetch_transinfo(ahd, 'A', ahd->our_id,
+ target, &tstate);
+ ahd_compile_devinfo(&devinfo, ahd->our_id,
+ target, CAM_LUN_WILDCARD,
+ 'A', ROLE_INITIATOR);
+ ahd_update_neg_table(ahd, &devinfo, &tinfo->curr);
+ }
+
+ ahd_outb(ahd, CLRSINT3, NTRAMPERR|OSRAMPERR);
+ ahd_outb(ahd, CLRINT, CLRSCSIINT);
+
+#ifdef NEEDS_MORE_TESTING
+ /*
+ * Always enable abort on incoming L_Qs if this feature is
+ * supported. We use this to catch invalid SCB references.
+ */
+ if ((ahd->bugs & AHD_ABORT_LQI_BUG) == 0)
+ ahd_outb(ahd, LQCTL1, ABORTPENDING);
+ else
+#endif
+ ahd_outb(ahd, LQCTL1, 0);
+
+ /* All of our queues are empty */
+ ahd->qoutfifonext = 0;
+ ahd->qoutfifonext_valid_tag = QOUTFIFO_ENTRY_VALID;
+ ahd_outb(ahd, QOUTFIFO_ENTRY_VALID_TAG, QOUTFIFO_ENTRY_VALID);
+ for (i = 0; i < AHD_QOUT_SIZE; i++)
+ ahd->qoutfifo[i].valid_tag = 0;
+ ahd_sync_qoutfifo(ahd, BUS_DMASYNC_PREREAD);
+
+ ahd->qinfifonext = 0;
+ for (i = 0; i < AHD_QIN_SIZE; i++)
+ ahd->qinfifo[i] = SCB_LIST_NULL;
+
+ if ((ahd->features & AHD_TARGETMODE) != 0) {
+ /* All target command blocks start out invalid. */
+ for (i = 0; i < AHD_TMODE_CMDS; i++)
+ ahd->targetcmds[i].cmd_valid = 0;
+ ahd_sync_tqinfifo(ahd, BUS_DMASYNC_PREREAD);
+ ahd->tqinfifonext = 1;
+ ahd_outb(ahd, KERNEL_TQINPOS, ahd->tqinfifonext - 1);
+ ahd_outb(ahd, TQINPOS, ahd->tqinfifonext);
+ }
+
+ /* Initialize Scratch Ram. */
+ ahd_outb(ahd, SEQ_FLAGS, 0);
+ ahd_outb(ahd, SEQ_FLAGS2, 0);
+
+ /* We don't have any waiting selections */
+ ahd_outw(ahd, WAITING_TID_HEAD, SCB_LIST_NULL);
+ ahd_outw(ahd, WAITING_TID_TAIL, SCB_LIST_NULL);
+ ahd_outw(ahd, MK_MESSAGE_SCB, SCB_LIST_NULL);
+ ahd_outw(ahd, MK_MESSAGE_SCSIID, 0xFF);
+ for (i = 0; i < AHD_NUM_TARGETS; i++)
+ ahd_outw(ahd, WAITING_SCB_TAILS + (2 * i), SCB_LIST_NULL);
+
+ /*
+ * Nobody is waiting to be DMAed into the QOUTFIFO.
+ */
+ ahd_outw(ahd, COMPLETE_SCB_HEAD, SCB_LIST_NULL);
+ ahd_outw(ahd, COMPLETE_SCB_DMAINPROG_HEAD, SCB_LIST_NULL);
+ ahd_outw(ahd, COMPLETE_DMA_SCB_HEAD, SCB_LIST_NULL);
+ ahd_outw(ahd, COMPLETE_DMA_SCB_TAIL, SCB_LIST_NULL);
+ ahd_outw(ahd, COMPLETE_ON_QFREEZE_HEAD, SCB_LIST_NULL);
+
+ /*
+ * The Freeze Count is 0.
+ */
+ ahd->qfreeze_cnt = 0;
+ ahd_outw(ahd, QFREEZE_COUNT, 0);
+ ahd_outw(ahd, KERNEL_QFREEZE_COUNT, 0);
+
+ /*
+ * Tell the sequencer where it can find our arrays in memory.
+ */
+ busaddr = ahd->shared_data_map.physaddr;
+ ahd_outl(ahd, SHARED_DATA_ADDR, busaddr);
+ ahd_outl(ahd, QOUTFIFO_NEXT_ADDR, busaddr);
+
+ /*
+ * Setup the allowed SCSI Sequences based on operational mode.
+ * If we are a target, we'll enable select in operations once
+ * we've had a lun enabled.
+ */
+ scsiseq_template = ENAUTOATNP;
+ if ((ahd->flags & AHD_INITIATORROLE) != 0)
+ scsiseq_template |= ENRSELI;
+ ahd_outb(ahd, SCSISEQ_TEMPLATE, scsiseq_template);
+
+ /* There are no busy SCBs yet. */
+ for (target = 0; target < AHD_NUM_TARGETS; target++) {
+ int lun;
+
+ for (lun = 0; lun < AHD_NUM_LUNS_NONPKT; lun++)
+ ahd_unbusy_tcl(ahd, BUILD_TCL_RAW(target, 'A', lun));
+ }
+
+ /*
+ * Initialize the group code to command length table.
+ * Vendor Unique codes are set to 0 so we only capture
+ * the first byte of the cdb. These can be overridden
+ * when target mode is enabled.
+ */
+ ahd_outb(ahd, CMDSIZE_TABLE, 5);
+ ahd_outb(ahd, CMDSIZE_TABLE + 1, 9);
+ ahd_outb(ahd, CMDSIZE_TABLE + 2, 9);
+ ahd_outb(ahd, CMDSIZE_TABLE + 3, 0);
+ ahd_outb(ahd, CMDSIZE_TABLE + 4, 15);
+ ahd_outb(ahd, CMDSIZE_TABLE + 5, 11);
+ ahd_outb(ahd, CMDSIZE_TABLE + 6, 0);
+ ahd_outb(ahd, CMDSIZE_TABLE + 7, 0);
+
+ /* Tell the sequencer of our initial queue positions */
+ ahd_set_modes(ahd, AHD_MODE_CCHAN, AHD_MODE_CCHAN);
+ ahd_outb(ahd, QOFF_CTLSTA, SCB_QSIZE_512);
+ ahd->qinfifonext = 0;
+ ahd_set_hnscb_qoff(ahd, ahd->qinfifonext);
+ ahd_set_hescb_qoff(ahd, 0);
+ ahd_set_snscb_qoff(ahd, 0);
+ ahd_set_sescb_qoff(ahd, 0);
+ ahd_set_sdscb_qoff(ahd, 0);
+
+ /*
+ * Tell the sequencer which SCB will be the next one it receives.
+ */
+ busaddr = ahd_le32toh(ahd->next_queued_hscb->hscb_busaddr);
+ ahd_outl(ahd, NEXT_QUEUED_SCB_ADDR, busaddr);
+
+ /*
+ * Default to coalescing disabled.
+ */
+ ahd_outw(ahd, INT_COALESCING_CMDCOUNT, 0);
+ ahd_outw(ahd, CMDS_PENDING, 0);
+ ahd_update_coalescing_values(ahd, ahd->int_coalescing_timer,
+ ahd->int_coalescing_maxcmds,
+ ahd->int_coalescing_mincmds);
+ ahd_enable_coalescing(ahd, FALSE);
+
+ ahd_loadseq(ahd);
+ ahd_set_modes(ahd, AHD_MODE_SCSI, AHD_MODE_SCSI);
+
+ if (ahd->features & AHD_AIC79XXB_SLOWCRC) {
+ u_int negodat3 = ahd_inb(ahd, NEGCONOPTS);
+
+ negodat3 |= ENSLOWCRC;
+ ahd_outb(ahd, NEGCONOPTS, negodat3);
+ negodat3 = ahd_inb(ahd, NEGCONOPTS);
+ if (!(negodat3 & ENSLOWCRC))
+ printk("aic79xx: failed to set the SLOWCRC bit\n");
+ else
+ printk("aic79xx: SLOWCRC bit set\n");
+ }
+}
+
+/*
+ * Setup default device and controller settings.
+ * This should only be called if our probe has
+ * determined that no configuration data is available.
+ */
+int
+ahd_default_config(struct ahd_softc *ahd)
+{
+ int targ;
+
+ ahd->our_id = 7;
+
+ /*
+ * Allocate a tstate to house information for our
+ * initiator presence on the bus as well as the user
+ * data for any target mode initiator.
+ */
+ if (ahd_alloc_tstate(ahd, ahd->our_id, 'A') == NULL) {
+ printk("%s: unable to allocate ahd_tmode_tstate. "
+ "Failing attach\n", ahd_name(ahd));
+ return (ENOMEM);
+ }
+
+ for (targ = 0; targ < AHD_NUM_TARGETS; targ++) {
+ struct ahd_devinfo devinfo;
+ struct ahd_initiator_tinfo *tinfo;
+ struct ahd_tmode_tstate *tstate;
+ uint16_t target_mask;
+
+ tinfo = ahd_fetch_transinfo(ahd, 'A', ahd->our_id,
+ targ, &tstate);
+ /*
+ * We support SPC2 and SPI4.
+ */
+ tinfo->user.protocol_version = 4;
+ tinfo->user.transport_version = 4;
+
+ target_mask = 0x01 << targ;
+ ahd->user_discenable |= target_mask;
+ tstate->discenable |= target_mask;
+ ahd->user_tagenable |= target_mask;
+#ifdef AHD_FORCE_160
+ tinfo->user.period = AHD_SYNCRATE_DT;
+#else
+ tinfo->user.period = AHD_SYNCRATE_160;
+#endif
+ tinfo->user.offset = MAX_OFFSET;
+ tinfo->user.ppr_options = MSG_EXT_PPR_RD_STRM
+ | MSG_EXT_PPR_WR_FLOW
+ | MSG_EXT_PPR_HOLD_MCS
+ | MSG_EXT_PPR_IU_REQ
+ | MSG_EXT_PPR_QAS_REQ
+ | MSG_EXT_PPR_DT_REQ;
+ if ((ahd->features & AHD_RTI) != 0)
+ tinfo->user.ppr_options |= MSG_EXT_PPR_RTI;
+
+ tinfo->user.width = MSG_EXT_WDTR_BUS_16_BIT;
+
+ /*
+ * Start out Async/Narrow/Untagged and with
+ * conservative protocol support.
+ */
+ tinfo->goal.protocol_version = 2;
+ tinfo->goal.transport_version = 2;
+ tinfo->curr.protocol_version = 2;
+ tinfo->curr.transport_version = 2;
+ ahd_compile_devinfo(&devinfo, ahd->our_id,
+ targ, CAM_LUN_WILDCARD,
+ 'A', ROLE_INITIATOR);
+ tstate->tagenable &= ~target_mask;
+ ahd_set_width(ahd, &devinfo, MSG_EXT_WDTR_BUS_8_BIT,
+ AHD_TRANS_CUR|AHD_TRANS_GOAL, /*paused*/TRUE);
+ ahd_set_syncrate(ahd, &devinfo, /*period*/0, /*offset*/0,
+ /*ppr_options*/0, AHD_TRANS_CUR|AHD_TRANS_GOAL,
+ /*paused*/TRUE);
+ }
+ return (0);
+}
+
+/*
+ * Parse device configuration information.
+ */
+int
+ahd_parse_cfgdata(struct ahd_softc *ahd, struct seeprom_config *sc)
+{
+ int targ;
+ int max_targ;
+
+ max_targ = sc->max_targets & CFMAXTARG;
+ ahd->our_id = sc->brtime_id & CFSCSIID;
+
+ /*
+ * Allocate a tstate to house information for our
+ * initiator presence on the bus as well as the user
+ * data for any target mode initiator.
+ */
+ if (ahd_alloc_tstate(ahd, ahd->our_id, 'A') == NULL) {
+ printk("%s: unable to allocate ahd_tmode_tstate. "
+ "Failing attach\n", ahd_name(ahd));
+ return (ENOMEM);
+ }
+
+ for (targ = 0; targ < max_targ; targ++) {
+ struct ahd_devinfo devinfo;
+ struct ahd_initiator_tinfo *tinfo;
+ struct ahd_transinfo *user_tinfo;
+ struct ahd_tmode_tstate *tstate;
+ uint16_t target_mask;
+
+ tinfo = ahd_fetch_transinfo(ahd, 'A', ahd->our_id,
+ targ, &tstate);
+ user_tinfo = &tinfo->user;
+
+ /*
+ * We support SPC2 and SPI4.
+ */
+ tinfo->user.protocol_version = 4;
+ tinfo->user.transport_version = 4;
+
+ target_mask = 0x01 << targ;
+ ahd->user_discenable &= ~target_mask;
+ tstate->discenable &= ~target_mask;
+ ahd->user_tagenable &= ~target_mask;
+ if (sc->device_flags[targ] & CFDISC) {
+ tstate->discenable |= target_mask;
+ ahd->user_discenable |= target_mask;
+ ahd->user_tagenable |= target_mask;
+ } else {
+ /*
+ * Cannot be packetized without disconnection.
+ */
+ sc->device_flags[targ] &= ~CFPACKETIZED;
+ }
+
+ user_tinfo->ppr_options = 0;
+ user_tinfo->period = (sc->device_flags[targ] & CFXFER);
+ if (user_tinfo->period < CFXFER_ASYNC) {
+ if (user_tinfo->period <= AHD_PERIOD_10MHz)
+ user_tinfo->ppr_options |= MSG_EXT_PPR_DT_REQ;
+ user_tinfo->offset = MAX_OFFSET;
+ } else {
+ user_tinfo->offset = 0;
+ user_tinfo->period = AHD_ASYNC_XFER_PERIOD;
+ }
+#ifdef AHD_FORCE_160
+ if (user_tinfo->period <= AHD_SYNCRATE_160)
+ user_tinfo->period = AHD_SYNCRATE_DT;
+#endif
+
+ if ((sc->device_flags[targ] & CFPACKETIZED) != 0) {
+ user_tinfo->ppr_options |= MSG_EXT_PPR_RD_STRM
+ | MSG_EXT_PPR_WR_FLOW
+ | MSG_EXT_PPR_HOLD_MCS
+ | MSG_EXT_PPR_IU_REQ;
+ if ((ahd->features & AHD_RTI) != 0)
+ user_tinfo->ppr_options |= MSG_EXT_PPR_RTI;
+ }
+
+ if ((sc->device_flags[targ] & CFQAS) != 0)
+ user_tinfo->ppr_options |= MSG_EXT_PPR_QAS_REQ;
+
+ if ((sc->device_flags[targ] & CFWIDEB) != 0)
+ user_tinfo->width = MSG_EXT_WDTR_BUS_16_BIT;
+ else
+ user_tinfo->width = MSG_EXT_WDTR_BUS_8_BIT;
+#ifdef AHD_DEBUG
+ if ((ahd_debug & AHD_SHOW_MISC) != 0)
+ printk("(%d): %x:%x:%x:%x\n", targ, user_tinfo->width,
+ user_tinfo->period, user_tinfo->offset,
+ user_tinfo->ppr_options);
+#endif
+ /*
+ * Start out Async/Narrow/Untagged and with
+ * conservative protocol support.
+ */
+ tstate->tagenable &= ~target_mask;
+ tinfo->goal.protocol_version = 2;
+ tinfo->goal.transport_version = 2;
+ tinfo->curr.protocol_version = 2;
+ tinfo->curr.transport_version = 2;
+ ahd_compile_devinfo(&devinfo, ahd->our_id,
+ targ, CAM_LUN_WILDCARD,
+ 'A', ROLE_INITIATOR);
+ ahd_set_width(ahd, &devinfo, MSG_EXT_WDTR_BUS_8_BIT,
+ AHD_TRANS_CUR|AHD_TRANS_GOAL, /*paused*/TRUE);
+ ahd_set_syncrate(ahd, &devinfo, /*period*/0, /*offset*/0,
+ /*ppr_options*/0, AHD_TRANS_CUR|AHD_TRANS_GOAL,
+ /*paused*/TRUE);
+ }
+
+ ahd->flags &= ~AHD_SPCHK_ENB_A;
+ if (sc->bios_control & CFSPARITY)
+ ahd->flags |= AHD_SPCHK_ENB_A;
+
+ ahd->flags &= ~AHD_RESET_BUS_A;
+ if (sc->bios_control & CFRESETB)
+ ahd->flags |= AHD_RESET_BUS_A;
+
+ ahd->flags &= ~AHD_EXTENDED_TRANS_A;
+ if (sc->bios_control & CFEXTEND)
+ ahd->flags |= AHD_EXTENDED_TRANS_A;
+
+ ahd->flags &= ~AHD_BIOS_ENABLED;
+ if ((sc->bios_control & CFBIOSSTATE) == CFBS_ENABLED)
+ ahd->flags |= AHD_BIOS_ENABLED;
+
+ ahd->flags &= ~AHD_STPWLEVEL_A;
+ if ((sc->adapter_control & CFSTPWLEVEL) != 0)
+ ahd->flags |= AHD_STPWLEVEL_A;
+
+ return (0);
+}
+
+/*
+ * Parse device configuration information.
+ */
+int
+ahd_parse_vpddata(struct ahd_softc *ahd, struct vpd_config *vpd)
+{
+ int error;
+
+ error = ahd_verify_vpd_cksum(vpd);
+ if (error == 0)
+ return (EINVAL);
+ if ((vpd->bios_flags & VPDBOOTHOST) != 0)
+ ahd->flags |= AHD_BOOT_CHANNEL;
+ return (0);
+}
+
+void
+ahd_intr_enable(struct ahd_softc *ahd, int enable)
+{
+ u_int hcntrl;
+
+ hcntrl = ahd_inb(ahd, HCNTRL);
+ hcntrl &= ~INTEN;
+ ahd->pause &= ~INTEN;
+ ahd->unpause &= ~INTEN;
+ if (enable) {
+ hcntrl |= INTEN;
+ ahd->pause |= INTEN;
+ ahd->unpause |= INTEN;
+ }
+ ahd_outb(ahd, HCNTRL, hcntrl);
+}
+
+static void
+ahd_update_coalescing_values(struct ahd_softc *ahd, u_int timer, u_int maxcmds,
+ u_int mincmds)
+{
+ if (timer > AHD_TIMER_MAX_US)
+ timer = AHD_TIMER_MAX_US;
+ ahd->int_coalescing_timer = timer;
+
+ if (maxcmds > AHD_INT_COALESCING_MAXCMDS_MAX)
+ maxcmds = AHD_INT_COALESCING_MAXCMDS_MAX;
+ if (mincmds > AHD_INT_COALESCING_MINCMDS_MAX)
+ mincmds = AHD_INT_COALESCING_MINCMDS_MAX;
+ ahd->int_coalescing_maxcmds = maxcmds;
+ ahd_outw(ahd, INT_COALESCING_TIMER, timer / AHD_TIMER_US_PER_TICK);
+ ahd_outb(ahd, INT_COALESCING_MAXCMDS, -maxcmds);
+ ahd_outb(ahd, INT_COALESCING_MINCMDS, -mincmds);
+}
+
+static void
+ahd_enable_coalescing(struct ahd_softc *ahd, int enable)
+{
+
+ ahd->hs_mailbox &= ~ENINT_COALESCE;
+ if (enable)
+ ahd->hs_mailbox |= ENINT_COALESCE;
+ ahd_outb(ahd, HS_MAILBOX, ahd->hs_mailbox);
+ ahd_flush_device_writes(ahd);
+ ahd_run_qoutfifo(ahd);
+}
+
+/*
+ * Ensure that the card is paused in a location
+ * outside of all critical sections and that all
+ * pending work is completed prior to returning.
+ * This routine should only be called from outside
+ * an interrupt context.
+ */
+void
+ahd_pause_and_flushwork(struct ahd_softc *ahd)
+{
+ u_int intstat;
+ u_int maxloops;
+
+ maxloops = 1000;
+ ahd->flags |= AHD_ALL_INTERRUPTS;
+ ahd_pause(ahd);
+ /*
+ * Freeze the outgoing selections. We do this only
+ * until we are safely paused without further selections
+ * pending.
+ */
+ ahd->qfreeze_cnt--;
+ ahd_outw(ahd, KERNEL_QFREEZE_COUNT, ahd->qfreeze_cnt);
+ ahd_outb(ahd, SEQ_FLAGS2, ahd_inb(ahd, SEQ_FLAGS2) | SELECTOUT_QFROZEN);
+ do {
+
+ ahd_unpause(ahd);
+ /*
+ * Give the sequencer some time to service
+ * any active selections.
+ */
+ ahd_delay(500);
+
+ ahd_intr(ahd);
+ ahd_pause(ahd);
+ intstat = ahd_inb(ahd, INTSTAT);
+ if ((intstat & INT_PEND) == 0) {
+ ahd_clear_critical_section(ahd);
+ intstat = ahd_inb(ahd, INTSTAT);
+ }
+ } while (--maxloops
+ && (intstat != 0xFF || (ahd->features & AHD_REMOVABLE) == 0)
+ && ((intstat & INT_PEND) != 0
+ || (ahd_inb(ahd, SCSISEQ0) & ENSELO) != 0
+ || (ahd_inb(ahd, SSTAT0) & (SELDO|SELINGO)) != 0));
+
+ if (maxloops == 0) {
+ printk("Infinite interrupt loop, INTSTAT = %x",
+ ahd_inb(ahd, INTSTAT));
+ }
+ ahd->qfreeze_cnt++;
+ ahd_outw(ahd, KERNEL_QFREEZE_COUNT, ahd->qfreeze_cnt);
+
+ ahd_flush_qoutfifo(ahd);
+
+ ahd->flags &= ~AHD_ALL_INTERRUPTS;
+}
+
+#ifdef CONFIG_PM
+int
+ahd_suspend(struct ahd_softc *ahd)
+{
+
+ ahd_pause_and_flushwork(ahd);
+
+ if (LIST_FIRST(&ahd->pending_scbs) != NULL) {
+ ahd_unpause(ahd);
+ return (EBUSY);
+ }
+ ahd_shutdown(ahd);
+ return (0);
+}
+
+void
+ahd_resume(struct ahd_softc *ahd)
+{
+
+ ahd_reset(ahd, /*reinit*/TRUE);
+ ahd_intr_enable(ahd, TRUE);
+ ahd_restart(ahd);
+}
+#endif
+
+/************************** Busy Target Table *********************************/
+/*
+ * Set SCBPTR to the SCB that contains the busy
+ * table entry for TCL. Return the offset into
+ * the SCB that contains the entry for TCL.
+ * saved_scbid is dereferenced and set to the
+ * scbid that should be restored once manipualtion
+ * of the TCL entry is complete.
+ */
+static inline u_int
+ahd_index_busy_tcl(struct ahd_softc *ahd, u_int *saved_scbid, u_int tcl)
+{
+ /*
+ * Index to the SCB that contains the busy entry.
+ */
+ AHD_ASSERT_MODES(ahd, AHD_MODE_SCSI_MSK, AHD_MODE_SCSI_MSK);
+ *saved_scbid = ahd_get_scbptr(ahd);
+ ahd_set_scbptr(ahd, TCL_LUN(tcl)
+ | ((TCL_TARGET_OFFSET(tcl) & 0xC) << 4));
+
+ /*
+ * And now calculate the SCB offset to the entry.
+ * Each entry is 2 bytes wide, hence the
+ * multiplication by 2.
+ */
+ return (((TCL_TARGET_OFFSET(tcl) & 0x3) << 1) + SCB_DISCONNECTED_LISTS);
+}
+
+/*
+ * Return the untagged transaction id for a given target/channel lun.
+ */
+static u_int
+ahd_find_busy_tcl(struct ahd_softc *ahd, u_int tcl)
+{
+ u_int scbid;
+ u_int scb_offset;
+ u_int saved_scbptr;
+
+ scb_offset = ahd_index_busy_tcl(ahd, &saved_scbptr, tcl);
+ scbid = ahd_inw_scbram(ahd, scb_offset);
+ ahd_set_scbptr(ahd, saved_scbptr);
+ return (scbid);
+}
+
+static void
+ahd_busy_tcl(struct ahd_softc *ahd, u_int tcl, u_int scbid)
+{
+ u_int scb_offset;
+ u_int saved_scbptr;
+
+ scb_offset = ahd_index_busy_tcl(ahd, &saved_scbptr, tcl);
+ ahd_outw(ahd, scb_offset, scbid);
+ ahd_set_scbptr(ahd, saved_scbptr);
+}
+
+/************************** SCB and SCB queue management **********************/
+static int
+ahd_match_scb(struct ahd_softc *ahd, struct scb *scb, int target,
+ char channel, int lun, u_int tag, role_t role)
+{
+ int targ = SCB_GET_TARGET(ahd, scb);
+ char chan = SCB_GET_CHANNEL(ahd, scb);
+ int slun = SCB_GET_LUN(scb);
+ int match;
+
+ match = ((chan == channel) || (channel == ALL_CHANNELS));
+ if (match != 0)
+ match = ((targ == target) || (target == CAM_TARGET_WILDCARD));
+ if (match != 0)
+ match = ((lun == slun) || (lun == CAM_LUN_WILDCARD));
+ if (match != 0) {
+#ifdef AHD_TARGET_MODE
+ int group;
+
+ group = XPT_FC_GROUP(scb->io_ctx->ccb_h.func_code);
+ if (role == ROLE_INITIATOR) {
+ match = (group != XPT_FC_GROUP_TMODE)
+ && ((tag == SCB_GET_TAG(scb))
+ || (tag == SCB_LIST_NULL));
+ } else if (role == ROLE_TARGET) {
+ match = (group == XPT_FC_GROUP_TMODE)
+ && ((tag == scb->io_ctx->csio.tag_id)
+ || (tag == SCB_LIST_NULL));
+ }
+#else /* !AHD_TARGET_MODE */
+ match = ((tag == SCB_GET_TAG(scb)) || (tag == SCB_LIST_NULL));
+#endif /* AHD_TARGET_MODE */
+ }
+
+ return match;
+}
+
+static void
+ahd_freeze_devq(struct ahd_softc *ahd, struct scb *scb)
+{
+ int target;
+ char channel;
+ int lun;
+
+ target = SCB_GET_TARGET(ahd, scb);
+ lun = SCB_GET_LUN(scb);
+ channel = SCB_GET_CHANNEL(ahd, scb);
+
+ ahd_search_qinfifo(ahd, target, channel, lun,
+ /*tag*/SCB_LIST_NULL, ROLE_UNKNOWN,
+ CAM_REQUEUE_REQ, SEARCH_COMPLETE);
+
+ ahd_platform_freeze_devq(ahd, scb);
+}
+
+void
+ahd_qinfifo_requeue_tail(struct ahd_softc *ahd, struct scb *scb)
+{
+ struct scb *prev_scb;
+ ahd_mode_state saved_modes;
+
+ saved_modes = ahd_save_modes(ahd);
+ ahd_set_modes(ahd, AHD_MODE_CCHAN, AHD_MODE_CCHAN);
+ prev_scb = NULL;
+ if (ahd_qinfifo_count(ahd) != 0) {
+ u_int prev_tag;
+ u_int prev_pos;
+
+ prev_pos = AHD_QIN_WRAP(ahd->qinfifonext - 1);
+ prev_tag = ahd->qinfifo[prev_pos];
+ prev_scb = ahd_lookup_scb(ahd, prev_tag);
+ }
+ ahd_qinfifo_requeue(ahd, prev_scb, scb);
+ ahd_set_hnscb_qoff(ahd, ahd->qinfifonext);
+ ahd_restore_modes(ahd, saved_modes);
+}
+
+static void
+ahd_qinfifo_requeue(struct ahd_softc *ahd, struct scb *prev_scb,
+ struct scb *scb)
+{
+ if (prev_scb == NULL) {
+ uint32_t busaddr;
+
+ busaddr = ahd_le32toh(scb->hscb->hscb_busaddr);
+ ahd_outl(ahd, NEXT_QUEUED_SCB_ADDR, busaddr);
+ } else {
+ prev_scb->hscb->next_hscb_busaddr = scb->hscb->hscb_busaddr;
+ ahd_sync_scb(ahd, prev_scb,
+ BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
+ }
+ ahd->qinfifo[AHD_QIN_WRAP(ahd->qinfifonext)] = SCB_GET_TAG(scb);
+ ahd->qinfifonext++;
+ scb->hscb->next_hscb_busaddr = ahd->next_queued_hscb->hscb_busaddr;
+ ahd_sync_scb(ahd, scb, BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
+}
+
+static int
+ahd_qinfifo_count(struct ahd_softc *ahd)
+{
+ u_int qinpos;
+ u_int wrap_qinpos;
+ u_int wrap_qinfifonext;
+
+ AHD_ASSERT_MODES(ahd, AHD_MODE_CCHAN_MSK, AHD_MODE_CCHAN_MSK);
+ qinpos = ahd_get_snscb_qoff(ahd);
+ wrap_qinpos = AHD_QIN_WRAP(qinpos);
+ wrap_qinfifonext = AHD_QIN_WRAP(ahd->qinfifonext);
+ if (wrap_qinfifonext >= wrap_qinpos)
+ return (wrap_qinfifonext - wrap_qinpos);
+ else
+ return (wrap_qinfifonext
+ + ARRAY_SIZE(ahd->qinfifo) - wrap_qinpos);
+}
+
+static void
+ahd_reset_cmds_pending(struct ahd_softc *ahd)
+{
+ struct scb *scb;
+ ahd_mode_state saved_modes;
+ u_int pending_cmds;
+
+ saved_modes = ahd_save_modes(ahd);
+ ahd_set_modes(ahd, AHD_MODE_CCHAN, AHD_MODE_CCHAN);
+
+ /*
+ * Don't count any commands as outstanding that the
+ * sequencer has already marked for completion.
+ */
+ ahd_flush_qoutfifo(ahd);
+
+ pending_cmds = 0;
+ LIST_FOREACH(scb, &ahd->pending_scbs, pending_links) {
+ pending_cmds++;
+ }
+ ahd_outw(ahd, CMDS_PENDING, pending_cmds - ahd_qinfifo_count(ahd));
+ ahd_restore_modes(ahd, saved_modes);
+ ahd->flags &= ~AHD_UPDATE_PEND_CMDS;
+}
+
+static void
+ahd_done_with_status(struct ahd_softc *ahd, struct scb *scb, uint32_t status)
+{
+ cam_status ostat;
+ cam_status cstat;
+
+ ostat = ahd_get_transaction_status(scb);
+ if (ostat == CAM_REQ_INPROG)
+ ahd_set_transaction_status(scb, status);
+ cstat = ahd_get_transaction_status(scb);
+ if (cstat != CAM_REQ_CMP)
+ ahd_freeze_scb(scb);
+ ahd_done(ahd, scb);
+}
+
+int
+ahd_search_qinfifo(struct ahd_softc *ahd, int target, char channel,
+ int lun, u_int tag, role_t role, uint32_t status,
+ ahd_search_action action)
+{
+ struct scb *scb;
+ struct scb *mk_msg_scb;
+ struct scb *prev_scb;
+ ahd_mode_state saved_modes;
+ u_int qinstart;
+ u_int qinpos;
+ u_int qintail;
+ u_int tid_next;
+ u_int tid_prev;
+ u_int scbid;
+ u_int seq_flags2;
+ u_int savedscbptr;
+ uint32_t busaddr;
+ int found;
+ int targets;
+
+ /* Must be in CCHAN mode */
+ saved_modes = ahd_save_modes(ahd);
+ ahd_set_modes(ahd, AHD_MODE_CCHAN, AHD_MODE_CCHAN);
+
+ /*
+ * Halt any pending SCB DMA. The sequencer will reinitiate
+ * this dma if the qinfifo is not empty once we unpause.
+ */
+ if ((ahd_inb(ahd, CCSCBCTL) & (CCARREN|CCSCBEN|CCSCBDIR))
+ == (CCARREN|CCSCBEN|CCSCBDIR)) {
+ ahd_outb(ahd, CCSCBCTL,
+ ahd_inb(ahd, CCSCBCTL) & ~(CCARREN|CCSCBEN));
+ while ((ahd_inb(ahd, CCSCBCTL) & (CCARREN|CCSCBEN)) != 0)
+ ;
+ }
+ /* Determine sequencer's position in the qinfifo. */
+ qintail = AHD_QIN_WRAP(ahd->qinfifonext);
+ qinstart = ahd_get_snscb_qoff(ahd);
+ qinpos = AHD_QIN_WRAP(qinstart);
+ found = 0;
+ prev_scb = NULL;
+
+ if (action == SEARCH_PRINT) {
+ printk("qinstart = %d qinfifonext = %d\nQINFIFO:",
+ qinstart, ahd->qinfifonext);
+ }
+
+ /*
+ * Start with an empty queue. Entries that are not chosen
+ * for removal will be re-added to the queue as we go.
+ */
+ ahd->qinfifonext = qinstart;
+ busaddr = ahd_le32toh(ahd->next_queued_hscb->hscb_busaddr);
+ ahd_outl(ahd, NEXT_QUEUED_SCB_ADDR, busaddr);
+
+ while (qinpos != qintail) {
+ scb = ahd_lookup_scb(ahd, ahd->qinfifo[qinpos]);
+ if (scb == NULL) {
+ printk("qinpos = %d, SCB index = %d\n",
+ qinpos, ahd->qinfifo[qinpos]);
+ panic("Loop 1\n");
+ }
+
+ if (ahd_match_scb(ahd, scb, target, channel, lun, tag, role)) {
+ /*
+ * We found an scb that needs to be acted on.
+ */
+ found++;
+ switch (action) {
+ case SEARCH_COMPLETE:
+ if ((scb->flags & SCB_ACTIVE) == 0)
+ printk("Inactive SCB in qinfifo\n");
+ ahd_done_with_status(ahd, scb, status);
+ /* FALLTHROUGH */
+ case SEARCH_REMOVE:
+ break;
+ case SEARCH_PRINT:
+ printk(" 0x%x", ahd->qinfifo[qinpos]);
+ /* FALLTHROUGH */
+ case SEARCH_COUNT:
+ ahd_qinfifo_requeue(ahd, prev_scb, scb);
+ prev_scb = scb;
+ break;
+ }
+ } else {
+ ahd_qinfifo_requeue(ahd, prev_scb, scb);
+ prev_scb = scb;
+ }
+ qinpos = AHD_QIN_WRAP(qinpos+1);
+ }
+
+ ahd_set_hnscb_qoff(ahd, ahd->qinfifonext);
+
+ if (action == SEARCH_PRINT)
+ printk("\nWAITING_TID_QUEUES:\n");
+
+ /*
+ * Search waiting for selection lists. We traverse the
+ * list of "their ids" waiting for selection and, if
+ * appropriate, traverse the SCBs of each "their id"
+ * looking for matches.
+ */
+ ahd_set_modes(ahd, AHD_MODE_SCSI, AHD_MODE_SCSI);
+ seq_flags2 = ahd_inb(ahd, SEQ_FLAGS2);
+ if ((seq_flags2 & PENDING_MK_MESSAGE) != 0) {
+ scbid = ahd_inw(ahd, MK_MESSAGE_SCB);
+ mk_msg_scb = ahd_lookup_scb(ahd, scbid);
+ } else
+ mk_msg_scb = NULL;
+ savedscbptr = ahd_get_scbptr(ahd);
+ tid_next = ahd_inw(ahd, WAITING_TID_HEAD);
+ tid_prev = SCB_LIST_NULL;
+ targets = 0;
+ for (scbid = tid_next; !SCBID_IS_NULL(scbid); scbid = tid_next) {
+ u_int tid_head;
+ u_int tid_tail;
+
+ targets++;
+ if (targets > AHD_NUM_TARGETS)
+ panic("TID LIST LOOP");
+
+ if (scbid >= ahd->scb_data.numscbs) {
+ printk("%s: Waiting TID List inconsistency. "
+ "SCB index == 0x%x, yet numscbs == 0x%x.",
+ ahd_name(ahd), scbid, ahd->scb_data.numscbs);
+ ahd_dump_card_state(ahd);
+ panic("for safety");
+ }
+ scb = ahd_lookup_scb(ahd, scbid);
+ if (scb == NULL) {
+ printk("%s: SCB = 0x%x Not Active!\n",
+ ahd_name(ahd), scbid);
+ panic("Waiting TID List traversal\n");
+ }
+ ahd_set_scbptr(ahd, scbid);
+ tid_next = ahd_inw_scbram(ahd, SCB_NEXT2);
+ if (ahd_match_scb(ahd, scb, target, channel, CAM_LUN_WILDCARD,
+ SCB_LIST_NULL, ROLE_UNKNOWN) == 0) {
+ tid_prev = scbid;
+ continue;
+ }
+
+ /*
+ * We found a list of scbs that needs to be searched.
+ */
+ if (action == SEARCH_PRINT)
+ printk(" %d ( ", SCB_GET_TARGET(ahd, scb));
+ tid_head = scbid;
+ found += ahd_search_scb_list(ahd, target, channel,
+ lun, tag, role, status,
+ action, &tid_head, &tid_tail,
+ SCB_GET_TARGET(ahd, scb));
+ /*
+ * Check any MK_MESSAGE SCB that is still waiting to
+ * enter this target's waiting for selection queue.
+ */
+ if (mk_msg_scb != NULL
+ && ahd_match_scb(ahd, mk_msg_scb, target, channel,
+ lun, tag, role)) {
+
+ /*
+ * We found an scb that needs to be acted on.
+ */
+ found++;
+ switch (action) {
+ case SEARCH_COMPLETE:
+ if ((mk_msg_scb->flags & SCB_ACTIVE) == 0)
+ printk("Inactive SCB pending MK_MSG\n");
+ ahd_done_with_status(ahd, mk_msg_scb, status);
+ /* FALLTHROUGH */
+ case SEARCH_REMOVE:
+ {
+ u_int tail_offset;
+
+ printk("Removing MK_MSG scb\n");
+
+ /*
+ * Reset our tail to the tail of the
+ * main per-target list.
+ */
+ tail_offset = WAITING_SCB_TAILS
+ + (2 * SCB_GET_TARGET(ahd, mk_msg_scb));
+ ahd_outw(ahd, tail_offset, tid_tail);
+
+ seq_flags2 &= ~PENDING_MK_MESSAGE;
+ ahd_outb(ahd, SEQ_FLAGS2, seq_flags2);
+ ahd_outw(ahd, CMDS_PENDING,
+ ahd_inw(ahd, CMDS_PENDING)-1);
+ mk_msg_scb = NULL;
+ break;
+ }
+ case SEARCH_PRINT:
+ printk(" 0x%x", SCB_GET_TAG(scb));
+ /* FALLTHROUGH */
+ case SEARCH_COUNT:
+ break;
+ }
+ }
+
+ if (mk_msg_scb != NULL
+ && SCBID_IS_NULL(tid_head)
+ && ahd_match_scb(ahd, scb, target, channel, CAM_LUN_WILDCARD,
+ SCB_LIST_NULL, ROLE_UNKNOWN)) {
+
+ /*
+ * When removing the last SCB for a target
+ * queue with a pending MK_MESSAGE scb, we
+ * must queue the MK_MESSAGE scb.
+ */
+ printk("Queueing mk_msg_scb\n");
+ tid_head = ahd_inw(ahd, MK_MESSAGE_SCB);
+ seq_flags2 &= ~PENDING_MK_MESSAGE;
+ ahd_outb(ahd, SEQ_FLAGS2, seq_flags2);
+ mk_msg_scb = NULL;
+ }
+ if (tid_head != scbid)
+ ahd_stitch_tid_list(ahd, tid_prev, tid_head, tid_next);
+ if (!SCBID_IS_NULL(tid_head))
+ tid_prev = tid_head;
+ if (action == SEARCH_PRINT)
+ printk(")\n");
+ }
+
+ /* Restore saved state. */
+ ahd_set_scbptr(ahd, savedscbptr);
+ ahd_restore_modes(ahd, saved_modes);
+ return (found);
+}
+
+static int
+ahd_search_scb_list(struct ahd_softc *ahd, int target, char channel,
+ int lun, u_int tag, role_t role, uint32_t status,
+ ahd_search_action action, u_int *list_head,
+ u_int *list_tail, u_int tid)
+{
+ struct scb *scb;
+ u_int scbid;
+ u_int next;
+ u_int prev;
+ int found;
+
+ AHD_ASSERT_MODES(ahd, AHD_MODE_SCSI_MSK, AHD_MODE_SCSI_MSK);
+ found = 0;
+ prev = SCB_LIST_NULL;
+ next = *list_head;
+ *list_tail = SCB_LIST_NULL;
+ for (scbid = next; !SCBID_IS_NULL(scbid); scbid = next) {
+ if (scbid >= ahd->scb_data.numscbs) {
+ printk("%s:SCB List inconsistency. "
+ "SCB == 0x%x, yet numscbs == 0x%x.",
+ ahd_name(ahd), scbid, ahd->scb_data.numscbs);
+ ahd_dump_card_state(ahd);
+ panic("for safety");
+ }
+ scb = ahd_lookup_scb(ahd, scbid);
+ if (scb == NULL) {
+ printk("%s: SCB = %d Not Active!\n",
+ ahd_name(ahd), scbid);
+ panic("Waiting List traversal\n");
+ }
+ ahd_set_scbptr(ahd, scbid);
+ *list_tail = scbid;
+ next = ahd_inw_scbram(ahd, SCB_NEXT);
+ if (ahd_match_scb(ahd, scb, target, channel,
+ lun, SCB_LIST_NULL, role) == 0) {
+ prev = scbid;
+ continue;
+ }
+ found++;
+ switch (action) {
+ case SEARCH_COMPLETE:
+ if ((scb->flags & SCB_ACTIVE) == 0)
+ printk("Inactive SCB in Waiting List\n");
+ ahd_done_with_status(ahd, scb, status);
+ /* FALLTHROUGH */
+ case SEARCH_REMOVE:
+ ahd_rem_wscb(ahd, scbid, prev, next, tid);
+ *list_tail = prev;
+ if (SCBID_IS_NULL(prev))
+ *list_head = next;
+ break;
+ case SEARCH_PRINT:
+ printk("0x%x ", scbid);
+ case SEARCH_COUNT:
+ prev = scbid;
+ break;
+ }
+ if (found > AHD_SCB_MAX)
+ panic("SCB LIST LOOP");
+ }
+ if (action == SEARCH_COMPLETE
+ || action == SEARCH_REMOVE)
+ ahd_outw(ahd, CMDS_PENDING, ahd_inw(ahd, CMDS_PENDING) - found);
+ return (found);
+}
+
+static void
+ahd_stitch_tid_list(struct ahd_softc *ahd, u_int tid_prev,
+ u_int tid_cur, u_int tid_next)
+{
+ AHD_ASSERT_MODES(ahd, AHD_MODE_SCSI_MSK, AHD_MODE_SCSI_MSK);
+
+ if (SCBID_IS_NULL(tid_cur)) {
+
+ /* Bypass current TID list */
+ if (SCBID_IS_NULL(tid_prev)) {
+ ahd_outw(ahd, WAITING_TID_HEAD, tid_next);
+ } else {
+ ahd_set_scbptr(ahd, tid_prev);
+ ahd_outw(ahd, SCB_NEXT2, tid_next);
+ }
+ if (SCBID_IS_NULL(tid_next))
+ ahd_outw(ahd, WAITING_TID_TAIL, tid_prev);
+ } else {
+
+ /* Stitch through tid_cur */
+ if (SCBID_IS_NULL(tid_prev)) {
+ ahd_outw(ahd, WAITING_TID_HEAD, tid_cur);
+ } else {
+ ahd_set_scbptr(ahd, tid_prev);
+ ahd_outw(ahd, SCB_NEXT2, tid_cur);
+ }
+ ahd_set_scbptr(ahd, tid_cur);
+ ahd_outw(ahd, SCB_NEXT2, tid_next);
+
+ if (SCBID_IS_NULL(tid_next))
+ ahd_outw(ahd, WAITING_TID_TAIL, tid_cur);
+ }
+}
+
+/*
+ * Manipulate the waiting for selection list and return the
+ * scb that follows the one that we remove.
+ */
+static u_int
+ahd_rem_wscb(struct ahd_softc *ahd, u_int scbid,
+ u_int prev, u_int next, u_int tid)
+{
+ u_int tail_offset;
+
+ AHD_ASSERT_MODES(ahd, AHD_MODE_SCSI_MSK, AHD_MODE_SCSI_MSK);
+ if (!SCBID_IS_NULL(prev)) {
+ ahd_set_scbptr(ahd, prev);
+ ahd_outw(ahd, SCB_NEXT, next);
+ }
+
+ /*
+ * SCBs that have MK_MESSAGE set in them may
+ * cause the tail pointer to be updated without
+ * setting the next pointer of the previous tail.
+ * Only clear the tail if the removed SCB was
+ * the tail.
+ */
+ tail_offset = WAITING_SCB_TAILS + (2 * tid);
+ if (SCBID_IS_NULL(next)
+ && ahd_inw(ahd, tail_offset) == scbid)
+ ahd_outw(ahd, tail_offset, prev);
+
+ ahd_add_scb_to_free_list(ahd, scbid);
+ return (next);
+}
+
+/*
+ * Add the SCB as selected by SCBPTR onto the on chip list of
+ * free hardware SCBs. This list is empty/unused if we are not
+ * performing SCB paging.
+ */
+static void
+ahd_add_scb_to_free_list(struct ahd_softc *ahd, u_int scbid)
+{
+/* XXX Need some other mechanism to designate "free". */
+ /*
+ * Invalidate the tag so that our abort
+ * routines don't think it's active.
+ ahd_outb(ahd, SCB_TAG, SCB_LIST_NULL);
+ */
+}
+
+/******************************** Error Handling ******************************/
+/*
+ * Abort all SCBs that match the given description (target/channel/lun/tag),
+ * setting their status to the passed in status if the status has not already
+ * been modified from CAM_REQ_INPROG. This routine assumes that the sequencer
+ * is paused before it is called.
+ */
+static int
+ahd_abort_scbs(struct ahd_softc *ahd, int target, char channel,
+ int lun, u_int tag, role_t role, uint32_t status)
+{
+ struct scb *scbp;
+ struct scb *scbp_next;
+ u_int i, j;
+ u_int maxtarget;
+ u_int minlun;
+ u_int maxlun;
+ int found;
+ ahd_mode_state saved_modes;
+
+ /* restore this when we're done */
+ saved_modes = ahd_save_modes(ahd);
+ ahd_set_modes(ahd, AHD_MODE_SCSI, AHD_MODE_SCSI);
+
+ found = ahd_search_qinfifo(ahd, target, channel, lun, SCB_LIST_NULL,
+ role, CAM_REQUEUE_REQ, SEARCH_COMPLETE);
+
+ /*
+ * Clean out the busy target table for any untagged commands.
+ */
+ i = 0;
+ maxtarget = 16;
+ if (target != CAM_TARGET_WILDCARD) {
+ i = target;
+ if (channel == 'B')
+ i += 8;
+ maxtarget = i + 1;
+ }
+
+ if (lun == CAM_LUN_WILDCARD) {
+ minlun = 0;
+ maxlun = AHD_NUM_LUNS_NONPKT;
+ } else if (lun >= AHD_NUM_LUNS_NONPKT) {
+ minlun = maxlun = 0;
+ } else {
+ minlun = lun;
+ maxlun = lun + 1;
+ }
+
+ if (role != ROLE_TARGET) {
+ for (;i < maxtarget; i++) {
+ for (j = minlun;j < maxlun; j++) {
+ u_int scbid;
+ u_int tcl;
+
+ tcl = BUILD_TCL_RAW(i, 'A', j);
+ scbid = ahd_find_busy_tcl(ahd, tcl);
+ scbp = ahd_lookup_scb(ahd, scbid);
+ if (scbp == NULL
+ || ahd_match_scb(ahd, scbp, target, channel,
+ lun, tag, role) == 0)
+ continue;
+ ahd_unbusy_tcl(ahd, BUILD_TCL_RAW(i, 'A', j));
+ }
+ }
+ }
+
+ /*
+ * Don't abort commands that have already completed,
+ * but haven't quite made it up to the host yet.
+ */
+ ahd_flush_qoutfifo(ahd);
+
+ /*
+ * Go through the pending CCB list and look for
+ * commands for this target that are still active.
+ * These are other tagged commands that were
+ * disconnected when the reset occurred.
+ */
+ scbp_next = LIST_FIRST(&ahd->pending_scbs);
+ while (scbp_next != NULL) {
+ scbp = scbp_next;
+ scbp_next = LIST_NEXT(scbp, pending_links);
+ if (ahd_match_scb(ahd, scbp, target, channel, lun, tag, role)) {
+ cam_status ostat;
+
+ ostat = ahd_get_transaction_status(scbp);
+ if (ostat == CAM_REQ_INPROG)
+ ahd_set_transaction_status(scbp, status);
+ if (ahd_get_transaction_status(scbp) != CAM_REQ_CMP)
+ ahd_freeze_scb(scbp);
+ if ((scbp->flags & SCB_ACTIVE) == 0)
+ printk("Inactive SCB on pending list\n");
+ ahd_done(ahd, scbp);
+ found++;
+ }
+ }
+ ahd_restore_modes(ahd, saved_modes);
+ ahd_platform_abort_scbs(ahd, target, channel, lun, tag, role, status);
+ ahd->flags |= AHD_UPDATE_PEND_CMDS;
+ return found;
+}
+
+static void
+ahd_reset_current_bus(struct ahd_softc *ahd)
+{
+ uint8_t scsiseq;
+
+ AHD_ASSERT_MODES(ahd, AHD_MODE_SCSI_MSK, AHD_MODE_SCSI_MSK);
+ ahd_outb(ahd, SIMODE1, ahd_inb(ahd, SIMODE1) & ~ENSCSIRST);
+ scsiseq = ahd_inb(ahd, SCSISEQ0) & ~(ENSELO|ENARBO|SCSIRSTO);
+ ahd_outb(ahd, SCSISEQ0, scsiseq | SCSIRSTO);
+ ahd_flush_device_writes(ahd);
+ ahd_delay(AHD_BUSRESET_DELAY);
+ /* Turn off the bus reset */
+ ahd_outb(ahd, SCSISEQ0, scsiseq);
+ ahd_flush_device_writes(ahd);
+ ahd_delay(AHD_BUSRESET_DELAY);
+ if ((ahd->bugs & AHD_SCSIRST_BUG) != 0) {
+ /*
+ * 2A Razor #474
+ * Certain chip state is not cleared for
+ * SCSI bus resets that we initiate, so
+ * we must reset the chip.
+ */
+ ahd_reset(ahd, /*reinit*/TRUE);
+ ahd_intr_enable(ahd, /*enable*/TRUE);
+ AHD_ASSERT_MODES(ahd, AHD_MODE_SCSI_MSK, AHD_MODE_SCSI_MSK);
+ }
+
+ ahd_clear_intstat(ahd);
+}
+
+int
+ahd_reset_channel(struct ahd_softc *ahd, char channel, int initiate_reset)
+{
+ struct ahd_devinfo caminfo;
+ u_int initiator;
+ u_int target;
+ u_int max_scsiid;
+ int found;
+ u_int fifo;
+ u_int next_fifo;
+ uint8_t scsiseq;
+
+ /*
+ * Check if the last bus reset is cleared
+ */
+ if (ahd->flags & AHD_BUS_RESET_ACTIVE) {
+ printk("%s: bus reset still active\n",
+ ahd_name(ahd));
+ return 0;
+ }
+ ahd->flags |= AHD_BUS_RESET_ACTIVE;
+
+ ahd->pending_device = NULL;
+
+ ahd_compile_devinfo(&caminfo,
+ CAM_TARGET_WILDCARD,
+ CAM_TARGET_WILDCARD,
+ CAM_LUN_WILDCARD,
+ channel, ROLE_UNKNOWN);
+ ahd_pause(ahd);
+
+ /* Make sure the sequencer is in a safe location. */
+ ahd_clear_critical_section(ahd);
+
+ /*
+ * Run our command complete fifos to ensure that we perform
+ * completion processing on any commands that 'completed'
+ * before the reset occurred.
+ */
+ ahd_run_qoutfifo(ahd);
+#ifdef AHD_TARGET_MODE
+ if ((ahd->flags & AHD_TARGETROLE) != 0) {
+ ahd_run_tqinfifo(ahd, /*paused*/TRUE);
+ }
+#endif
+ ahd_set_modes(ahd, AHD_MODE_SCSI, AHD_MODE_SCSI);
+
+ /*
+ * Disable selections so no automatic hardware
+ * functions will modify chip state.
+ */
+ ahd_outb(ahd, SCSISEQ0, 0);
+ ahd_outb(ahd, SCSISEQ1, 0);
+
+ /*
+ * Safely shut down our DMA engines. Always start with
+ * the FIFO that is not currently active (if any are
+ * actively connected).
+ */
+ next_fifo = fifo = ahd_inb(ahd, DFFSTAT) & CURRFIFO;
+ if (next_fifo > CURRFIFO_1)
+ /* If disconneced, arbitrarily start with FIFO1. */
+ next_fifo = fifo = 0;
+ do {
+ next_fifo ^= CURRFIFO_1;
+ ahd_set_modes(ahd, next_fifo, next_fifo);
+ ahd_outb(ahd, DFCNTRL,
+ ahd_inb(ahd, DFCNTRL) & ~(SCSIEN|HDMAEN));
+ while ((ahd_inb(ahd, DFCNTRL) & HDMAENACK) != 0)
+ ahd_delay(10);
+ /*
+ * Set CURRFIFO to the now inactive channel.
+ */
+ ahd_set_modes(ahd, AHD_MODE_SCSI, AHD_MODE_SCSI);
+ ahd_outb(ahd, DFFSTAT, next_fifo);
+ } while (next_fifo != fifo);
+
+ /*
+ * Reset the bus if we are initiating this reset
+ */
+ ahd_clear_msg_state(ahd);
+ ahd_outb(ahd, SIMODE1,
+ ahd_inb(ahd, SIMODE1) & ~(ENBUSFREE|ENSCSIRST));
+
+ if (initiate_reset)
+ ahd_reset_current_bus(ahd);
+
+ ahd_clear_intstat(ahd);
+
+ /*
+ * Clean up all the state information for the
+ * pending transactions on this bus.
+ */
+ found = ahd_abort_scbs(ahd, CAM_TARGET_WILDCARD, channel,
+ CAM_LUN_WILDCARD, SCB_LIST_NULL,
+ ROLE_UNKNOWN, CAM_SCSI_BUS_RESET);
+
+ /*
+ * Cleanup anything left in the FIFOs.
+ */
+ ahd_clear_fifo(ahd, 0);
+ ahd_clear_fifo(ahd, 1);
+
+ /*
+ * Clear SCSI interrupt status
+ */
+ ahd_outb(ahd, CLRSINT1, CLRSCSIRSTI);
+
+ /*
+ * Reenable selections
+ */
+ ahd_outb(ahd, SIMODE1, ahd_inb(ahd, SIMODE1) | ENSCSIRST);
+ scsiseq = ahd_inb(ahd, SCSISEQ_TEMPLATE);
+ ahd_outb(ahd, SCSISEQ1, scsiseq & (ENSELI|ENRSELI|ENAUTOATNP));
+
+ max_scsiid = (ahd->features & AHD_WIDE) ? 15 : 7;
+#ifdef AHD_TARGET_MODE
+ /*
+ * Send an immediate notify ccb to all target more peripheral
+ * drivers affected by this action.
+ */
+ for (target = 0; target <= max_scsiid; target++) {
+ struct ahd_tmode_tstate* tstate;
+ u_int lun;
+
+ tstate = ahd->enabled_targets[target];
+ if (tstate == NULL)
+ continue;
+ for (lun = 0; lun < AHD_NUM_LUNS; lun++) {
+ struct ahd_tmode_lstate* lstate;
+
+ lstate = tstate->enabled_luns[lun];
+ if (lstate == NULL)
+ continue;
+
+ ahd_queue_lstate_event(ahd, lstate, CAM_TARGET_WILDCARD,
+ EVENT_TYPE_BUS_RESET, /*arg*/0);
+ ahd_send_lstate_events(ahd, lstate);
+ }
+ }
+#endif
+ /*
+ * Revert to async/narrow transfers until we renegotiate.
+ */
+ for (target = 0; target <= max_scsiid; target++) {
+
+ if (ahd->enabled_targets[target] == NULL)
+ continue;
+ for (initiator = 0; initiator <= max_scsiid; initiator++) {
+ struct ahd_devinfo devinfo;
+
+ ahd_compile_devinfo(&devinfo, target, initiator,
+ CAM_LUN_WILDCARD,
+ 'A', ROLE_UNKNOWN);
+ ahd_set_width(ahd, &devinfo, MSG_EXT_WDTR_BUS_8_BIT,
+ AHD_TRANS_CUR, /*paused*/TRUE);
+ ahd_set_syncrate(ahd, &devinfo, /*period*/0,
+ /*offset*/0, /*ppr_options*/0,
+ AHD_TRANS_CUR, /*paused*/TRUE);
+ }
+ }
+
+ /* Notify the XPT that a bus reset occurred */
+ ahd_send_async(ahd, caminfo.channel, CAM_TARGET_WILDCARD,
+ CAM_LUN_WILDCARD, AC_BUS_RESET);
+
+ ahd_restart(ahd);
+
+ return (found);
+}
+
+/**************************** Statistics Processing ***************************/
+static void
+ahd_stat_timer(void *arg)
+{
+ struct ahd_softc *ahd = arg;
+ u_long s;
+ int enint_coal;
+
+ ahd_lock(ahd, &s);
+
+ enint_coal = ahd->hs_mailbox & ENINT_COALESCE;
+ if (ahd->cmdcmplt_total > ahd->int_coalescing_threshold)
+ enint_coal |= ENINT_COALESCE;
+ else if (ahd->cmdcmplt_total < ahd->int_coalescing_stop_threshold)
+ enint_coal &= ~ENINT_COALESCE;
+
+ if (enint_coal != (ahd->hs_mailbox & ENINT_COALESCE)) {
+ ahd_enable_coalescing(ahd, enint_coal);
+#ifdef AHD_DEBUG
+ if ((ahd_debug & AHD_SHOW_INT_COALESCING) != 0)
+ printk("%s: Interrupt coalescing "
+ "now %sabled. Cmds %d\n",
+ ahd_name(ahd),
+ (enint_coal & ENINT_COALESCE) ? "en" : "dis",
+ ahd->cmdcmplt_total);
+#endif
+ }
+
+ ahd->cmdcmplt_bucket = (ahd->cmdcmplt_bucket+1) & (AHD_STAT_BUCKETS-1);
+ ahd->cmdcmplt_total -= ahd->cmdcmplt_counts[ahd->cmdcmplt_bucket];
+ ahd->cmdcmplt_counts[ahd->cmdcmplt_bucket] = 0;
+ ahd_timer_reset(&ahd->stat_timer, AHD_STAT_UPDATE_US,
+ ahd_stat_timer, ahd);
+ ahd_unlock(ahd, &s);
+}
+
+/****************************** Status Processing *****************************/
+
+static void
+ahd_handle_scsi_status(struct ahd_softc *ahd, struct scb *scb)
+{
+ struct hardware_scb *hscb;
+ int paused;
+
+ /*
+ * The sequencer freezes its select-out queue
+ * anytime a SCSI status error occurs. We must
+ * handle the error and increment our qfreeze count
+ * to allow the sequencer to continue. We don't
+ * bother clearing critical sections here since all
+ * operations are on data structures that the sequencer
+ * is not touching once the queue is frozen.
+ */
+ hscb = scb->hscb;
+
+ if (ahd_is_paused(ahd)) {
+ paused = 1;
+ } else {
+ paused = 0;
+ ahd_pause(ahd);
+ }
+
+ /* Freeze the queue until the client sees the error. */
+ ahd_freeze_devq(ahd, scb);
+ ahd_freeze_scb(scb);
+ ahd->qfreeze_cnt++;
+ ahd_outw(ahd, KERNEL_QFREEZE_COUNT, ahd->qfreeze_cnt);
+
+ if (paused == 0)
+ ahd_unpause(ahd);
+
+ /* Don't want to clobber the original sense code */
+ if ((scb->flags & SCB_SENSE) != 0) {
+ /*
+ * Clear the SCB_SENSE Flag and perform
+ * a normal command completion.
+ */
+ scb->flags &= ~SCB_SENSE;
+ ahd_set_transaction_status(scb, CAM_AUTOSENSE_FAIL);
+ ahd_done(ahd, scb);
+ return;
+ }
+ ahd_set_transaction_status(scb, CAM_SCSI_STATUS_ERROR);
+ ahd_set_scsi_status(scb, hscb->shared_data.istatus.scsi_status);
+ switch (hscb->shared_data.istatus.scsi_status) {
+ case STATUS_PKT_SENSE:
+ {
+ struct scsi_status_iu_header *siu;
+
+ ahd_sync_sense(ahd, scb, BUS_DMASYNC_POSTREAD);
+ siu = (struct scsi_status_iu_header *)scb->sense_data;
+ ahd_set_scsi_status(scb, siu->status);
+#ifdef AHD_DEBUG
+ if ((ahd_debug & AHD_SHOW_SENSE) != 0) {
+ ahd_print_path(ahd, scb);
+ printk("SCB 0x%x Received PKT Status of 0x%x\n",
+ SCB_GET_TAG(scb), siu->status);
+ printk("\tflags = 0x%x, sense len = 0x%x, "
+ "pktfail = 0x%x\n",
+ siu->flags, scsi_4btoul(siu->sense_length),
+ scsi_4btoul(siu->pkt_failures_length));
+ }
+#endif
+ if ((siu->flags & SIU_RSPVALID) != 0) {
+ ahd_print_path(ahd, scb);
+ if (scsi_4btoul(siu->pkt_failures_length) < 4) {
+ printk("Unable to parse pkt_failures\n");
+ } else {
+
+ switch (SIU_PKTFAIL_CODE(siu)) {
+ case SIU_PFC_NONE:
+ printk("No packet failure found\n");
+ break;
+ case SIU_PFC_CIU_FIELDS_INVALID:
+ printk("Invalid Command IU Field\n");
+ break;
+ case SIU_PFC_TMF_NOT_SUPPORTED:
+ printk("TMF not supported\n");
+ break;
+ case SIU_PFC_TMF_FAILED:
+ printk("TMF failed\n");
+ break;
+ case SIU_PFC_INVALID_TYPE_CODE:
+ printk("Invalid L_Q Type code\n");
+ break;
+ case SIU_PFC_ILLEGAL_REQUEST:
+ printk("Illegal request\n");
+ default:
+ break;
+ }
+ }
+ if (siu->status == SCSI_STATUS_OK)
+ ahd_set_transaction_status(scb,
+ CAM_REQ_CMP_ERR);
+ }
+ if ((siu->flags & SIU_SNSVALID) != 0) {
+ scb->flags |= SCB_PKT_SENSE;
+#ifdef AHD_DEBUG
+ if ((ahd_debug & AHD_SHOW_SENSE) != 0)
+ printk("Sense data available\n");
+#endif
+ }
+ ahd_done(ahd, scb);
+ break;
+ }
+ case SCSI_STATUS_CMD_TERMINATED:
+ case SCSI_STATUS_CHECK_COND:
+ {
+ struct ahd_devinfo devinfo;
+ struct ahd_dma_seg *sg;
+ struct scsi_sense *sc;
+ struct ahd_initiator_tinfo *targ_info;
+ struct ahd_tmode_tstate *tstate;
+ struct ahd_transinfo *tinfo;
+#ifdef AHD_DEBUG
+ if (ahd_debug & AHD_SHOW_SENSE) {
+ ahd_print_path(ahd, scb);
+ printk("SCB %d: requests Check Status\n",
+ SCB_GET_TAG(scb));
+ }
+#endif
+
+ if (ahd_perform_autosense(scb) == 0)
+ break;
+
+ ahd_compile_devinfo(&devinfo, SCB_GET_OUR_ID(scb),
+ SCB_GET_TARGET(ahd, scb),
+ SCB_GET_LUN(scb),
+ SCB_GET_CHANNEL(ahd, scb),
+ ROLE_INITIATOR);
+ targ_info = ahd_fetch_transinfo(ahd,
+ devinfo.channel,
+ devinfo.our_scsiid,
+ devinfo.target,
+ &tstate);
+ tinfo = &targ_info->curr;
+ sg = scb->sg_list;
+ sc = (struct scsi_sense *)hscb->shared_data.idata.cdb;
+ /*
+ * Save off the residual if there is one.
+ */
+ ahd_update_residual(ahd, scb);
+#ifdef AHD_DEBUG
+ if (ahd_debug & AHD_SHOW_SENSE) {
+ ahd_print_path(ahd, scb);
+ printk("Sending Sense\n");
+ }
+#endif
+ scb->sg_count = 0;
+ sg = ahd_sg_setup(ahd, scb, sg, ahd_get_sense_bufaddr(ahd, scb),
+ ahd_get_sense_bufsize(ahd, scb),
+ /*last*/TRUE);
+ sc->opcode = REQUEST_SENSE;
+ sc->byte2 = 0;
+ if (tinfo->protocol_version <= SCSI_REV_2
+ && SCB_GET_LUN(scb) < 8)
+ sc->byte2 = SCB_GET_LUN(scb) << 5;
+ sc->unused[0] = 0;
+ sc->unused[1] = 0;
+ sc->length = ahd_get_sense_bufsize(ahd, scb);
+ sc->control = 0;
+
+ /*
+ * We can't allow the target to disconnect.
+ * This will be an untagged transaction and
+ * having the target disconnect will make this
+ * transaction indestinguishable from outstanding
+ * tagged transactions.
+ */
+ hscb->control = 0;
+
+ /*
+ * This request sense could be because the
+ * the device lost power or in some other
+ * way has lost our transfer negotiations.
+ * Renegotiate if appropriate. Unit attention
+ * errors will be reported before any data
+ * phases occur.
+ */
+ if (ahd_get_residual(scb) == ahd_get_transfer_length(scb)) {
+ ahd_update_neg_request(ahd, &devinfo,
+ tstate, targ_info,
+ AHD_NEG_IF_NON_ASYNC);
+ }
+ if (tstate->auto_negotiate & devinfo.target_mask) {
+ hscb->control |= MK_MESSAGE;
+ scb->flags &=
+ ~(SCB_NEGOTIATE|SCB_ABORT|SCB_DEVICE_RESET);
+ scb->flags |= SCB_AUTO_NEGOTIATE;
+ }
+ hscb->cdb_len = sizeof(*sc);
+ ahd_setup_data_scb(ahd, scb);
+ scb->flags |= SCB_SENSE;
+ ahd_queue_scb(ahd, scb);
+ break;
+ }
+ case SCSI_STATUS_OK:
+ printk("%s: Interrupted for status of 0???\n",
+ ahd_name(ahd));
+ /* FALLTHROUGH */
+ default:
+ ahd_done(ahd, scb);
+ break;
+ }
+}
+
+static void
+ahd_handle_scb_status(struct ahd_softc *ahd, struct scb *scb)
+{
+ if (scb->hscb->shared_data.istatus.scsi_status != 0) {
+ ahd_handle_scsi_status(ahd, scb);
+ } else {
+ ahd_calc_residual(ahd, scb);
+ ahd_done(ahd, scb);
+ }
+}
+
+/*
+ * Calculate the residual for a just completed SCB.
+ */
+static void
+ahd_calc_residual(struct ahd_softc *ahd, struct scb *scb)
+{
+ struct hardware_scb *hscb;
+ struct initiator_status *spkt;
+ uint32_t sgptr;
+ uint32_t resid_sgptr;
+ uint32_t resid;
+
+ /*
+ * 5 cases.
+ * 1) No residual.
+ * SG_STATUS_VALID clear in sgptr.
+ * 2) Transferless command
+ * 3) Never performed any transfers.
+ * sgptr has SG_FULL_RESID set.
+ * 4) No residual but target did not
+ * save data pointers after the
+ * last transfer, so sgptr was
+ * never updated.
+ * 5) We have a partial residual.
+ * Use residual_sgptr to determine
+ * where we are.
+ */
+
+ hscb = scb->hscb;
+ sgptr = ahd_le32toh(hscb->sgptr);
+ if ((sgptr & SG_STATUS_VALID) == 0)
+ /* Case 1 */
+ return;
+ sgptr &= ~SG_STATUS_VALID;
+
+ if ((sgptr & SG_LIST_NULL) != 0)
+ /* Case 2 */
+ return;
+
+ /*
+ * Residual fields are the same in both
+ * target and initiator status packets,
+ * so we can always use the initiator fields
+ * regardless of the role for this SCB.
+ */
+ spkt = &hscb->shared_data.istatus;
+ resid_sgptr = ahd_le32toh(spkt->residual_sgptr);
+ if ((sgptr & SG_FULL_RESID) != 0) {
+ /* Case 3 */
+ resid = ahd_get_transfer_length(scb);
+ } else if ((resid_sgptr & SG_LIST_NULL) != 0) {
+ /* Case 4 */
+ return;
+ } else if ((resid_sgptr & SG_OVERRUN_RESID) != 0) {
+ ahd_print_path(ahd, scb);
+ printk("data overrun detected Tag == 0x%x.\n",
+ SCB_GET_TAG(scb));
+ ahd_freeze_devq(ahd, scb);
+ ahd_set_transaction_status(scb, CAM_DATA_RUN_ERR);
+ ahd_freeze_scb(scb);
+ return;
+ } else if ((resid_sgptr & ~SG_PTR_MASK) != 0) {
+ panic("Bogus resid sgptr value 0x%x\n", resid_sgptr);
+ /* NOTREACHED */
+ } else {
+ struct ahd_dma_seg *sg;
+
+ /*
+ * Remainder of the SG where the transfer
+ * stopped.
+ */
+ resid = ahd_le32toh(spkt->residual_datacnt) & AHD_SG_LEN_MASK;
+ sg = ahd_sg_bus_to_virt(ahd, scb, resid_sgptr & SG_PTR_MASK);
+
+ /* The residual sg_ptr always points to the next sg */
+ sg--;
+
+ /*
+ * Add up the contents of all residual
+ * SG segments that are after the SG where
+ * the transfer stopped.
+ */
+ while ((ahd_le32toh(sg->len) & AHD_DMA_LAST_SEG) == 0) {
+ sg++;
+ resid += ahd_le32toh(sg->len) & AHD_SG_LEN_MASK;
+ }
+ }
+ if ((scb->flags & SCB_SENSE) == 0)
+ ahd_set_residual(scb, resid);
+ else
+ ahd_set_sense_residual(scb, resid);
+
+#ifdef AHD_DEBUG
+ if ((ahd_debug & AHD_SHOW_MISC) != 0) {
+ ahd_print_path(ahd, scb);
+ printk("Handled %sResidual of %d bytes\n",
+ (scb->flags & SCB_SENSE) ? "Sense " : "", resid);
+ }
+#endif
+}
+
+/******************************* Target Mode **********************************/
+#ifdef AHD_TARGET_MODE
+/*
+ * Add a target mode event to this lun's queue
+ */
+static void
+ahd_queue_lstate_event(struct ahd_softc *ahd, struct ahd_tmode_lstate *lstate,
+ u_int initiator_id, u_int event_type, u_int event_arg)
+{
+ struct ahd_tmode_event *event;
+ int pending;
+
+ xpt_freeze_devq(lstate->path, /*count*/1);
+ if (lstate->event_w_idx >= lstate->event_r_idx)
+ pending = lstate->event_w_idx - lstate->event_r_idx;
+ else
+ pending = AHD_TMODE_EVENT_BUFFER_SIZE + 1
+ - (lstate->event_r_idx - lstate->event_w_idx);
+
+ if (event_type == EVENT_TYPE_BUS_RESET
+ || event_type == MSG_BUS_DEV_RESET) {
+ /*
+ * Any earlier events are irrelevant, so reset our buffer.
+ * This has the effect of allowing us to deal with reset
+ * floods (an external device holding down the reset line)
+ * without losing the event that is really interesting.
+ */
+ lstate->event_r_idx = 0;
+ lstate->event_w_idx = 0;
+ xpt_release_devq(lstate->path, pending, /*runqueue*/FALSE);
+ }
+
+ if (pending == AHD_TMODE_EVENT_BUFFER_SIZE) {
+ xpt_print_path(lstate->path);
+ printk("immediate event %x:%x lost\n",
+ lstate->event_buffer[lstate->event_r_idx].event_type,
+ lstate->event_buffer[lstate->event_r_idx].event_arg);
+ lstate->event_r_idx++;
+ if (lstate->event_r_idx == AHD_TMODE_EVENT_BUFFER_SIZE)
+ lstate->event_r_idx = 0;
+ xpt_release_devq(lstate->path, /*count*/1, /*runqueue*/FALSE);
+ }
+
+ event = &lstate->event_buffer[lstate->event_w_idx];
+ event->initiator_id = initiator_id;
+ event->event_type = event_type;
+ event->event_arg = event_arg;
+ lstate->event_w_idx++;
+ if (lstate->event_w_idx == AHD_TMODE_EVENT_BUFFER_SIZE)
+ lstate->event_w_idx = 0;
+}
+
+/*
+ * Send any target mode events queued up waiting
+ * for immediate notify resources.
+ */
+void
+ahd_send_lstate_events(struct ahd_softc *ahd, struct ahd_tmode_lstate *lstate)
+{
+ struct ccb_hdr *ccbh;
+ struct ccb_immed_notify *inot;
+
+ while (lstate->event_r_idx != lstate->event_w_idx
+ && (ccbh = SLIST_FIRST(&lstate->immed_notifies)) != NULL) {
+ struct ahd_tmode_event *event;
+
+ event = &lstate->event_buffer[lstate->event_r_idx];
+ SLIST_REMOVE_HEAD(&lstate->immed_notifies, sim_links.sle);
+ inot = (struct ccb_immed_notify *)ccbh;
+ switch (event->event_type) {
+ case EVENT_TYPE_BUS_RESET:
+ ccbh->status = CAM_SCSI_BUS_RESET|CAM_DEV_QFRZN;
+ break;
+ default:
+ ccbh->status = CAM_MESSAGE_RECV|CAM_DEV_QFRZN;
+ inot->message_args[0] = event->event_type;
+ inot->message_args[1] = event->event_arg;
+ break;
+ }
+ inot->initiator_id = event->initiator_id;
+ inot->sense_len = 0;
+ xpt_done((union ccb *)inot);
+ lstate->event_r_idx++;
+ if (lstate->event_r_idx == AHD_TMODE_EVENT_BUFFER_SIZE)
+ lstate->event_r_idx = 0;
+ }
+}
+#endif
+
+/******************** Sequencer Program Patching/Download *********************/
+
+#ifdef AHD_DUMP_SEQ
+void
+ahd_dumpseq(struct ahd_softc* ahd)
+{
+ int i;
+ int max_prog;
+
+ max_prog = 2048;
+
+ ahd_outb(ahd, SEQCTL0, PERRORDIS|FAILDIS|FASTMODE|LOADRAM);
+ ahd_outw(ahd, PRGMCNT, 0);
+ for (i = 0; i < max_prog; i++) {
+ uint8_t ins_bytes[4];
+
+ ahd_insb(ahd, SEQRAM, ins_bytes, 4);
+ printk("0x%08x\n", ins_bytes[0] << 24
+ | ins_bytes[1] << 16
+ | ins_bytes[2] << 8
+ | ins_bytes[3]);
+ }
+}
+#endif
+
+static void
+ahd_loadseq(struct ahd_softc *ahd)
+{
+ struct cs cs_table[num_critical_sections];
+ u_int begin_set[num_critical_sections];
+ u_int end_set[num_critical_sections];
+ const struct patch *cur_patch;
+ u_int cs_count;
+ u_int cur_cs;
+ u_int i;
+ int downloaded;
+ u_int skip_addr;
+ u_int sg_prefetch_cnt;
+ u_int sg_prefetch_cnt_limit;
+ u_int sg_prefetch_align;
+ u_int sg_size;
+ u_int cacheline_mask;
+ uint8_t download_consts[DOWNLOAD_CONST_COUNT];
+
+ if (bootverbose)
+ printk("%s: Downloading Sequencer Program...",
+ ahd_name(ahd));
+
+#if DOWNLOAD_CONST_COUNT != 8
+#error "Download Const Mismatch"
+#endif
+ /*
+ * Start out with 0 critical sections
+ * that apply to this firmware load.
+ */
+ cs_count = 0;
+ cur_cs = 0;
+ memset(begin_set, 0, sizeof(begin_set));
+ memset(end_set, 0, sizeof(end_set));
+
+ /*
+ * Setup downloadable constant table.
+ *
+ * The computation for the S/G prefetch variables is
+ * a bit complicated. We would like to always fetch
+ * in terms of cachelined sized increments. However,
+ * if the cacheline is not an even multiple of the
+ * SG element size or is larger than our SG RAM, using
+ * just the cache size might leave us with only a portion
+ * of an SG element at the tail of a prefetch. If the
+ * cacheline is larger than our S/G prefetch buffer less
+ * the size of an SG element, we may round down to a cacheline
+ * that doesn't contain any or all of the S/G of interest
+ * within the bounds of our S/G ram. Provide variables to
+ * the sequencer that will allow it to handle these edge
+ * cases.
+ */
+ /* Start by aligning to the nearest cacheline. */
+ sg_prefetch_align = ahd->pci_cachesize;
+ if (sg_prefetch_align == 0)
+ sg_prefetch_align = 8;
+ /* Round down to the nearest power of 2. */
+ while (powerof2(sg_prefetch_align) == 0)
+ sg_prefetch_align--;
+
+ cacheline_mask = sg_prefetch_align - 1;
+
+ /*
+ * If the cacheline boundary is greater than half our prefetch RAM
+ * we risk not being able to fetch even a single complete S/G
+ * segment if we align to that boundary.
+ */
+ if (sg_prefetch_align > CCSGADDR_MAX/2)
+ sg_prefetch_align = CCSGADDR_MAX/2;
+ /* Start by fetching a single cacheline. */
+ sg_prefetch_cnt = sg_prefetch_align;
+ /*
+ * Increment the prefetch count by cachelines until
+ * at least one S/G element will fit.
+ */
+ sg_size = sizeof(struct ahd_dma_seg);
+ if ((ahd->flags & AHD_64BIT_ADDRESSING) != 0)
+ sg_size = sizeof(struct ahd_dma64_seg);
+ while (sg_prefetch_cnt < sg_size)
+ sg_prefetch_cnt += sg_prefetch_align;
+ /*
+ * If the cacheline is not an even multiple of
+ * the S/G size, we may only get a partial S/G when
+ * we align. Add a cacheline if this is the case.
+ */
+ if ((sg_prefetch_align % sg_size) != 0
+ && (sg_prefetch_cnt < CCSGADDR_MAX))
+ sg_prefetch_cnt += sg_prefetch_align;
+ /*
+ * Lastly, compute a value that the sequencer can use
+ * to determine if the remainder of the CCSGRAM buffer
+ * has a full S/G element in it.
+ */
+ sg_prefetch_cnt_limit = -(sg_prefetch_cnt - sg_size + 1);
+ download_consts[SG_PREFETCH_CNT] = sg_prefetch_cnt;
+ download_consts[SG_PREFETCH_CNT_LIMIT] = sg_prefetch_cnt_limit;
+ download_consts[SG_PREFETCH_ALIGN_MASK] = ~(sg_prefetch_align - 1);
+ download_consts[SG_PREFETCH_ADDR_MASK] = (sg_prefetch_align - 1);
+ download_consts[SG_SIZEOF] = sg_size;
+ download_consts[PKT_OVERRUN_BUFOFFSET] =
+ (ahd->overrun_buf - (uint8_t *)ahd->qoutfifo) / 256;
+ download_consts[SCB_TRANSFER_SIZE] = SCB_TRANSFER_SIZE_1BYTE_LUN;
+ download_consts[CACHELINE_MASK] = cacheline_mask;
+ cur_patch = patches;
+ downloaded = 0;
+ skip_addr = 0;
+ ahd_outb(ahd, SEQCTL0, PERRORDIS|FAILDIS|FASTMODE|LOADRAM);
+ ahd_outw(ahd, PRGMCNT, 0);
+
+ for (i = 0; i < sizeof(seqprog)/4; i++) {
+ if (ahd_check_patch(ahd, &cur_patch, i, &skip_addr) == 0) {
+ /*
+ * Don't download this instruction as it
+ * is in a patch that was removed.
+ */
+ continue;
+ }
+ /*
+ * Move through the CS table until we find a CS
+ * that might apply to this instruction.
+ */
+ for (; cur_cs < num_critical_sections; cur_cs++) {
+ if (critical_sections[cur_cs].end <= i) {
+ if (begin_set[cs_count] == TRUE
+ && end_set[cs_count] == FALSE) {
+ cs_table[cs_count].end = downloaded;
+ end_set[cs_count] = TRUE;
+ cs_count++;
+ }
+ continue;
+ }
+ if (critical_sections[cur_cs].begin <= i
+ && begin_set[cs_count] == FALSE) {
+ cs_table[cs_count].begin = downloaded;
+ begin_set[cs_count] = TRUE;
+ }
+ break;
+ }
+ ahd_download_instr(ahd, i, download_consts);
+ downloaded++;
+ }
+
+ ahd->num_critical_sections = cs_count;
+ if (cs_count != 0) {
+
+ cs_count *= sizeof(struct cs);
+ ahd->critical_sections = kmalloc(cs_count, GFP_ATOMIC);
+ if (ahd->critical_sections == NULL)
+ panic("ahd_loadseq: Could not malloc");
+ memcpy(ahd->critical_sections, cs_table, cs_count);
+ }
+ ahd_outb(ahd, SEQCTL0, PERRORDIS|FAILDIS|FASTMODE);
+
+ if (bootverbose) {
+ printk(" %d instructions downloaded\n", downloaded);
+ printk("%s: Features 0x%x, Bugs 0x%x, Flags 0x%x\n",
+ ahd_name(ahd), ahd->features, ahd->bugs, ahd->flags);
+ }
+}
+
+static int
+ahd_check_patch(struct ahd_softc *ahd, const struct patch **start_patch,
+ u_int start_instr, u_int *skip_addr)
+{
+ const struct patch *cur_patch;
+ const struct patch *last_patch;
+ u_int num_patches;
+
+ num_patches = ARRAY_SIZE(patches);
+ last_patch = &patches[num_patches];
+ cur_patch = *start_patch;
+
+ while (cur_patch < last_patch && start_instr == cur_patch->begin) {
+
+ if (cur_patch->patch_func(ahd) == 0) {
+
+ /* Start rejecting code */
+ *skip_addr = start_instr + cur_patch->skip_instr;
+ cur_patch += cur_patch->skip_patch;
+ } else {
+ /* Accepted this patch. Advance to the next
+ * one and wait for our intruction pointer to
+ * hit this point.
+ */
+ cur_patch++;
+ }
+ }
+
+ *start_patch = cur_patch;
+ if (start_instr < *skip_addr)
+ /* Still skipping */
+ return (0);
+
+ return (1);
+}
+
+static u_int
+ahd_resolve_seqaddr(struct ahd_softc *ahd, u_int address)
+{
+ const struct patch *cur_patch;
+ int address_offset;
+ u_int skip_addr;
+ u_int i;
+
+ address_offset = 0;
+ cur_patch = patches;
+ skip_addr = 0;
+
+ for (i = 0; i < address;) {
+
+ ahd_check_patch(ahd, &cur_patch, i, &skip_addr);
+
+ if (skip_addr > i) {
+ int end_addr;
+
+ end_addr = min(address, skip_addr);
+ address_offset += end_addr - i;
+ i = skip_addr;
+ } else {
+ i++;
+ }
+ }
+ return (address - address_offset);
+}
+
+static void
+ahd_download_instr(struct ahd_softc *ahd, u_int instrptr, uint8_t *dconsts)
+{
+ union ins_formats instr;
+ struct ins_format1 *fmt1_ins;
+ struct ins_format3 *fmt3_ins;
+ u_int opcode;
+
+ /*
+ * The firmware is always compiled into a little endian format.
+ */
+ instr.integer = ahd_le32toh(*(uint32_t*)&seqprog[instrptr * 4]);
+
+ fmt1_ins = &instr.format1;
+ fmt3_ins = NULL;
+
+ /* Pull the opcode */
+ opcode = instr.format1.opcode;
+ switch (opcode) {
+ case AIC_OP_JMP:
+ case AIC_OP_JC:
+ case AIC_OP_JNC:
+ case AIC_OP_CALL:
+ case AIC_OP_JNE:
+ case AIC_OP_JNZ:
+ case AIC_OP_JE:
+ case AIC_OP_JZ:
+ {
+ fmt3_ins = &instr.format3;
+ fmt3_ins->address = ahd_resolve_seqaddr(ahd, fmt3_ins->address);
+ /* FALLTHROUGH */
+ }
+ case AIC_OP_OR:
+ case AIC_OP_AND:
+ case AIC_OP_XOR:
+ case AIC_OP_ADD:
+ case AIC_OP_ADC:
+ case AIC_OP_BMOV:
+ if (fmt1_ins->parity != 0) {
+ fmt1_ins->immediate = dconsts[fmt1_ins->immediate];
+ }
+ fmt1_ins->parity = 0;
+ /* FALLTHROUGH */
+ case AIC_OP_ROL:
+ {
+ int i, count;
+
+ /* Calculate odd parity for the instruction */
+ for (i = 0, count = 0; i < 31; i++) {
+ uint32_t mask;
+
+ mask = 0x01 << i;
+ if ((instr.integer & mask) != 0)
+ count++;
+ }
+ if ((count & 0x01) == 0)
+ instr.format1.parity = 1;
+
+ /* The sequencer is a little endian cpu */
+ instr.integer = ahd_htole32(instr.integer);
+ ahd_outsb(ahd, SEQRAM, instr.bytes, 4);
+ break;
+ }
+ default:
+ panic("Unknown opcode encountered in seq program");
+ break;
+ }
+}
+
+static int
+ahd_probe_stack_size(struct ahd_softc *ahd)
+{
+ int last_probe;
+
+ last_probe = 0;
+ while (1) {
+ int i;
+
+ /*
+ * We avoid using 0 as a pattern to avoid
+ * confusion if the stack implementation
+ * "back-fills" with zeros when "poping'
+ * entries.
+ */
+ for (i = 1; i <= last_probe+1; i++) {
+ ahd_outb(ahd, STACK, i & 0xFF);
+ ahd_outb(ahd, STACK, (i >> 8) & 0xFF);
+ }
+
+ /* Verify */
+ for (i = last_probe+1; i > 0; i--) {
+ u_int stack_entry;
+
+ stack_entry = ahd_inb(ahd, STACK)
+ |(ahd_inb(ahd, STACK) << 8);
+ if (stack_entry != i)
+ goto sized;
+ }
+ last_probe++;
+ }
+sized:
+ return (last_probe);
+}
+
+int
+ahd_print_register(const ahd_reg_parse_entry_t *table, u_int num_entries,
+ const char *name, u_int address, u_int value,
+ u_int *cur_column, u_int wrap_point)
+{
+ int printed;
+ u_int printed_mask;
+
+ if (cur_column != NULL && *cur_column >= wrap_point) {
+ printk("\n");
+ *cur_column = 0;
+ }
+ printed = printk("%s[0x%x]", name, value);
+ if (table == NULL) {
+ printed += printk(" ");
+ *cur_column += printed;
+ return (printed);
+ }
+ printed_mask = 0;
+ while (printed_mask != 0xFF) {
+ int entry;
+
+ for (entry = 0; entry < num_entries; entry++) {
+ if (((value & table[entry].mask)
+ != table[entry].value)
+ || ((printed_mask & table[entry].mask)
+ == table[entry].mask))
+ continue;
+
+ printed += printk("%s%s",
+ printed_mask == 0 ? ":(" : "|",
+ table[entry].name);
+ printed_mask |= table[entry].mask;
+
+ break;
+ }
+ if (entry >= num_entries)
+ break;
+ }
+ if (printed_mask != 0)
+ printed += printk(") ");
+ else
+ printed += printk(" ");
+ if (cur_column != NULL)
+ *cur_column += printed;
+ return (printed);
+}
+
+void
+ahd_dump_card_state(struct ahd_softc *ahd)
+{
+ struct scb *scb;
+ ahd_mode_state saved_modes;
+ u_int dffstat;
+ int paused;
+ u_int scb_index;
+ u_int saved_scb_index;
+ u_int cur_col;
+ int i;
+
+ if (ahd_is_paused(ahd)) {
+ paused = 1;
+ } else {
+ paused = 0;
+ ahd_pause(ahd);
+ }
+ saved_modes = ahd_save_modes(ahd);
+ ahd_set_modes(ahd, AHD_MODE_SCSI, AHD_MODE_SCSI);
+ printk(">>>>>>>>>>>>>>>>>> Dump Card State Begins <<<<<<<<<<<<<<<<<\n"
+ "%s: Dumping Card State at program address 0x%x Mode 0x%x\n",
+ ahd_name(ahd),
+ ahd_inw(ahd, CURADDR),
+ ahd_build_mode_state(ahd, ahd->saved_src_mode,
+ ahd->saved_dst_mode));
+ if (paused)
+ printk("Card was paused\n");
+
+ if (ahd_check_cmdcmpltqueues(ahd))
+ printk("Completions are pending\n");
+
+ /*
+ * Mode independent registers.
+ */
+ cur_col = 0;
+ ahd_intstat_print(ahd_inb(ahd, INTSTAT), &cur_col, 50);
+ ahd_seloid_print(ahd_inb(ahd, SELOID), &cur_col, 50);
+ ahd_selid_print(ahd_inb(ahd, SELID), &cur_col, 50);
+ ahd_hs_mailbox_print(ahd_inb(ahd, LOCAL_HS_MAILBOX), &cur_col, 50);
+ ahd_intctl_print(ahd_inb(ahd, INTCTL), &cur_col, 50);
+ ahd_seqintstat_print(ahd_inb(ahd, SEQINTSTAT), &cur_col, 50);
+ ahd_saved_mode_print(ahd_inb(ahd, SAVED_MODE), &cur_col, 50);
+ ahd_dffstat_print(ahd_inb(ahd, DFFSTAT), &cur_col, 50);
+ ahd_scsisigi_print(ahd_inb(ahd, SCSISIGI), &cur_col, 50);
+ ahd_scsiphase_print(ahd_inb(ahd, SCSIPHASE), &cur_col, 50);
+ ahd_scsibus_print(ahd_inb(ahd, SCSIBUS), &cur_col, 50);
+ ahd_lastphase_print(ahd_inb(ahd, LASTPHASE), &cur_col, 50);
+ ahd_scsiseq0_print(ahd_inb(ahd, SCSISEQ0), &cur_col, 50);
+ ahd_scsiseq1_print(ahd_inb(ahd, SCSISEQ1), &cur_col, 50);
+ ahd_seqctl0_print(ahd_inb(ahd, SEQCTL0), &cur_col, 50);
+ ahd_seqintctl_print(ahd_inb(ahd, SEQINTCTL), &cur_col, 50);
+ ahd_seq_flags_print(ahd_inb(ahd, SEQ_FLAGS), &cur_col, 50);
+ ahd_seq_flags2_print(ahd_inb(ahd, SEQ_FLAGS2), &cur_col, 50);
+ ahd_qfreeze_count_print(ahd_inw(ahd, QFREEZE_COUNT), &cur_col, 50);
+ ahd_kernel_qfreeze_count_print(ahd_inw(ahd, KERNEL_QFREEZE_COUNT),
+ &cur_col, 50);
+ ahd_mk_message_scb_print(ahd_inw(ahd, MK_MESSAGE_SCB), &cur_col, 50);
+ ahd_mk_message_scsiid_print(ahd_inb(ahd, MK_MESSAGE_SCSIID),
+ &cur_col, 50);
+ ahd_sstat0_print(ahd_inb(ahd, SSTAT0), &cur_col, 50);
+ ahd_sstat1_print(ahd_inb(ahd, SSTAT1), &cur_col, 50);
+ ahd_sstat2_print(ahd_inb(ahd, SSTAT2), &cur_col, 50);
+ ahd_sstat3_print(ahd_inb(ahd, SSTAT3), &cur_col, 50);
+ ahd_perrdiag_print(ahd_inb(ahd, PERRDIAG), &cur_col, 50);
+ ahd_simode1_print(ahd_inb(ahd, SIMODE1), &cur_col, 50);
+ ahd_lqistat0_print(ahd_inb(ahd, LQISTAT0), &cur_col, 50);
+ ahd_lqistat1_print(ahd_inb(ahd, LQISTAT1), &cur_col, 50);
+ ahd_lqistat2_print(ahd_inb(ahd, LQISTAT2), &cur_col, 50);
+ ahd_lqostat0_print(ahd_inb(ahd, LQOSTAT0), &cur_col, 50);
+ ahd_lqostat1_print(ahd_inb(ahd, LQOSTAT1), &cur_col, 50);
+ ahd_lqostat2_print(ahd_inb(ahd, LQOSTAT2), &cur_col, 50);
+ printk("\n");
+ printk("\nSCB Count = %d CMDS_PENDING = %d LASTSCB 0x%x "
+ "CURRSCB 0x%x NEXTSCB 0x%x\n",
+ ahd->scb_data.numscbs, ahd_inw(ahd, CMDS_PENDING),
+ ahd_inw(ahd, LASTSCB), ahd_inw(ahd, CURRSCB),
+ ahd_inw(ahd, NEXTSCB));
+ cur_col = 0;
+ /* QINFIFO */
+ ahd_search_qinfifo(ahd, CAM_TARGET_WILDCARD, ALL_CHANNELS,
+ CAM_LUN_WILDCARD, SCB_LIST_NULL,
+ ROLE_UNKNOWN, /*status*/0, SEARCH_PRINT);
+ saved_scb_index = ahd_get_scbptr(ahd);
+ printk("Pending list:");
+ i = 0;
+ LIST_FOREACH(scb, &ahd->pending_scbs, pending_links) {
+ if (i++ > AHD_SCB_MAX)
+ break;
+ cur_col = printk("\n%3d FIFO_USE[0x%x] ", SCB_GET_TAG(scb),
+ ahd_inb_scbram(ahd, SCB_FIFO_USE_COUNT));
+ ahd_set_scbptr(ahd, SCB_GET_TAG(scb));
+ ahd_scb_control_print(ahd_inb_scbram(ahd, SCB_CONTROL),
+ &cur_col, 60);
+ ahd_scb_scsiid_print(ahd_inb_scbram(ahd, SCB_SCSIID),
+ &cur_col, 60);
+ }
+ printk("\nTotal %d\n", i);
+
+ printk("Kernel Free SCB list: ");
+ i = 0;
+ TAILQ_FOREACH(scb, &ahd->scb_data.free_scbs, links.tqe) {
+ struct scb *list_scb;
+
+ list_scb = scb;
+ do {
+ printk("%d ", SCB_GET_TAG(list_scb));
+ list_scb = LIST_NEXT(list_scb, collision_links);
+ } while (list_scb && i++ < AHD_SCB_MAX);
+ }
+
+ LIST_FOREACH(scb, &ahd->scb_data.any_dev_free_scb_list, links.le) {
+ if (i++ > AHD_SCB_MAX)
+ break;
+ printk("%d ", SCB_GET_TAG(scb));
+ }
+ printk("\n");
+
+ printk("Sequencer Complete DMA-inprog list: ");
+ scb_index = ahd_inw(ahd, COMPLETE_SCB_DMAINPROG_HEAD);
+ i = 0;
+ while (!SCBID_IS_NULL(scb_index) && i++ < AHD_SCB_MAX) {
+ ahd_set_scbptr(ahd, scb_index);
+ printk("%d ", scb_index);
+ scb_index = ahd_inw_scbram(ahd, SCB_NEXT_COMPLETE);
+ }
+ printk("\n");
+
+ printk("Sequencer Complete list: ");
+ scb_index = ahd_inw(ahd, COMPLETE_SCB_HEAD);
+ i = 0;
+ while (!SCBID_IS_NULL(scb_index) && i++ < AHD_SCB_MAX) {
+ ahd_set_scbptr(ahd, scb_index);
+ printk("%d ", scb_index);
+ scb_index = ahd_inw_scbram(ahd, SCB_NEXT_COMPLETE);
+ }
+ printk("\n");
+
+
+ printk("Sequencer DMA-Up and Complete list: ");
+ scb_index = ahd_inw(ahd, COMPLETE_DMA_SCB_HEAD);
+ i = 0;
+ while (!SCBID_IS_NULL(scb_index) && i++ < AHD_SCB_MAX) {
+ ahd_set_scbptr(ahd, scb_index);
+ printk("%d ", scb_index);
+ scb_index = ahd_inw_scbram(ahd, SCB_NEXT_COMPLETE);
+ }
+ printk("\n");
+ printk("Sequencer On QFreeze and Complete list: ");
+ scb_index = ahd_inw(ahd, COMPLETE_ON_QFREEZE_HEAD);
+ i = 0;
+ while (!SCBID_IS_NULL(scb_index) && i++ < AHD_SCB_MAX) {
+ ahd_set_scbptr(ahd, scb_index);
+ printk("%d ", scb_index);
+ scb_index = ahd_inw_scbram(ahd, SCB_NEXT_COMPLETE);
+ }
+ printk("\n");
+ ahd_set_scbptr(ahd, saved_scb_index);
+ dffstat = ahd_inb(ahd, DFFSTAT);
+ for (i = 0; i < 2; i++) {
+#ifdef AHD_DEBUG
+ struct scb *fifo_scb;
+#endif
+ u_int fifo_scbptr;
+
+ ahd_set_modes(ahd, AHD_MODE_DFF0 + i, AHD_MODE_DFF0 + i);
+ fifo_scbptr = ahd_get_scbptr(ahd);
+ printk("\n\n%s: FIFO%d %s, LONGJMP == 0x%x, SCB 0x%x\n",
+ ahd_name(ahd), i,
+ (dffstat & (FIFO0FREE << i)) ? "Free" : "Active",
+ ahd_inw(ahd, LONGJMP_ADDR), fifo_scbptr);
+ cur_col = 0;
+ ahd_seqimode_print(ahd_inb(ahd, SEQIMODE), &cur_col, 50);
+ ahd_seqintsrc_print(ahd_inb(ahd, SEQINTSRC), &cur_col, 50);
+ ahd_dfcntrl_print(ahd_inb(ahd, DFCNTRL), &cur_col, 50);
+ ahd_dfstatus_print(ahd_inb(ahd, DFSTATUS), &cur_col, 50);
+ ahd_sg_cache_shadow_print(ahd_inb(ahd, SG_CACHE_SHADOW),
+ &cur_col, 50);
+ ahd_sg_state_print(ahd_inb(ahd, SG_STATE), &cur_col, 50);
+ ahd_dffsxfrctl_print(ahd_inb(ahd, DFFSXFRCTL), &cur_col, 50);
+ ahd_soffcnt_print(ahd_inb(ahd, SOFFCNT), &cur_col, 50);
+ ahd_mdffstat_print(ahd_inb(ahd, MDFFSTAT), &cur_col, 50);
+ if (cur_col > 50) {
+ printk("\n");
+ cur_col = 0;
+ }
+ cur_col += printk("SHADDR = 0x%x%x, SHCNT = 0x%x ",
+ ahd_inl(ahd, SHADDR+4),
+ ahd_inl(ahd, SHADDR),
+ (ahd_inb(ahd, SHCNT)
+ | (ahd_inb(ahd, SHCNT + 1) << 8)
+ | (ahd_inb(ahd, SHCNT + 2) << 16)));
+ if (cur_col > 50) {
+ printk("\n");
+ cur_col = 0;
+ }
+ cur_col += printk("HADDR = 0x%x%x, HCNT = 0x%x ",
+ ahd_inl(ahd, HADDR+4),
+ ahd_inl(ahd, HADDR),
+ (ahd_inb(ahd, HCNT)
+ | (ahd_inb(ahd, HCNT + 1) << 8)
+ | (ahd_inb(ahd, HCNT + 2) << 16)));
+ ahd_ccsgctl_print(ahd_inb(ahd, CCSGCTL), &cur_col, 50);
+#ifdef AHD_DEBUG
+ if ((ahd_debug & AHD_SHOW_SG) != 0) {
+ fifo_scb = ahd_lookup_scb(ahd, fifo_scbptr);
+ if (fifo_scb != NULL)
+ ahd_dump_sglist(fifo_scb);
+ }
+#endif
+ }
+ printk("\nLQIN: ");
+ for (i = 0; i < 20; i++)
+ printk("0x%x ", ahd_inb(ahd, LQIN + i));
+ printk("\n");
+ ahd_set_modes(ahd, AHD_MODE_CFG, AHD_MODE_CFG);
+ printk("%s: LQISTATE = 0x%x, LQOSTATE = 0x%x, OPTIONMODE = 0x%x\n",
+ ahd_name(ahd), ahd_inb(ahd, LQISTATE), ahd_inb(ahd, LQOSTATE),
+ ahd_inb(ahd, OPTIONMODE));
+ printk("%s: OS_SPACE_CNT = 0x%x MAXCMDCNT = 0x%x\n",
+ ahd_name(ahd), ahd_inb(ahd, OS_SPACE_CNT),
+ ahd_inb(ahd, MAXCMDCNT));
+ printk("%s: SAVED_SCSIID = 0x%x SAVED_LUN = 0x%x\n",
+ ahd_name(ahd), ahd_inb(ahd, SAVED_SCSIID),
+ ahd_inb(ahd, SAVED_LUN));
+ ahd_simode0_print(ahd_inb(ahd, SIMODE0), &cur_col, 50);
+ printk("\n");
+ ahd_set_modes(ahd, AHD_MODE_CCHAN, AHD_MODE_CCHAN);
+ cur_col = 0;
+ ahd_ccscbctl_print(ahd_inb(ahd, CCSCBCTL), &cur_col, 50);
+ printk("\n");
+ ahd_set_modes(ahd, ahd->saved_src_mode, ahd->saved_dst_mode);
+ printk("%s: REG0 == 0x%x, SINDEX = 0x%x, DINDEX = 0x%x\n",
+ ahd_name(ahd), ahd_inw(ahd, REG0), ahd_inw(ahd, SINDEX),
+ ahd_inw(ahd, DINDEX));
+ printk("%s: SCBPTR == 0x%x, SCB_NEXT == 0x%x, SCB_NEXT2 == 0x%x\n",
+ ahd_name(ahd), ahd_get_scbptr(ahd),
+ ahd_inw_scbram(ahd, SCB_NEXT),
+ ahd_inw_scbram(ahd, SCB_NEXT2));
+ printk("CDB %x %x %x %x %x %x\n",
+ ahd_inb_scbram(ahd, SCB_CDB_STORE),
+ ahd_inb_scbram(ahd, SCB_CDB_STORE+1),
+ ahd_inb_scbram(ahd, SCB_CDB_STORE+2),
+ ahd_inb_scbram(ahd, SCB_CDB_STORE+3),
+ ahd_inb_scbram(ahd, SCB_CDB_STORE+4),
+ ahd_inb_scbram(ahd, SCB_CDB_STORE+5));
+ printk("STACK:");
+ for (i = 0; i < ahd->stack_size; i++) {
+ ahd->saved_stack[i] =
+ ahd_inb(ahd, STACK)|(ahd_inb(ahd, STACK) << 8);
+ printk(" 0x%x", ahd->saved_stack[i]);
+ }
+ for (i = ahd->stack_size-1; i >= 0; i--) {
+ ahd_outb(ahd, STACK, ahd->saved_stack[i] & 0xFF);
+ ahd_outb(ahd, STACK, (ahd->saved_stack[i] >> 8) & 0xFF);
+ }
+ printk("\n<<<<<<<<<<<<<<<<< Dump Card State Ends >>>>>>>>>>>>>>>>>>\n");
+ ahd_restore_modes(ahd, saved_modes);
+ if (paused == 0)
+ ahd_unpause(ahd);
+}
+
+#if 0
+void
+ahd_dump_scbs(struct ahd_softc *ahd)
+{
+ ahd_mode_state saved_modes;
+ u_int saved_scb_index;
+ int i;
+
+ saved_modes = ahd_save_modes(ahd);
+ ahd_set_modes(ahd, AHD_MODE_SCSI, AHD_MODE_SCSI);
+ saved_scb_index = ahd_get_scbptr(ahd);
+ for (i = 0; i < AHD_SCB_MAX; i++) {
+ ahd_set_scbptr(ahd, i);
+ printk("%3d", i);
+ printk("(CTRL 0x%x ID 0x%x N 0x%x N2 0x%x SG 0x%x, RSG 0x%x)\n",
+ ahd_inb_scbram(ahd, SCB_CONTROL),
+ ahd_inb_scbram(ahd, SCB_SCSIID),
+ ahd_inw_scbram(ahd, SCB_NEXT),
+ ahd_inw_scbram(ahd, SCB_NEXT2),
+ ahd_inl_scbram(ahd, SCB_SGPTR),
+ ahd_inl_scbram(ahd, SCB_RESIDUAL_SGPTR));
+ }
+ printk("\n");
+ ahd_set_scbptr(ahd, saved_scb_index);
+ ahd_restore_modes(ahd, saved_modes);
+}
+#endif /* 0 */
+
+/**************************** Flexport Logic **********************************/
+/*
+ * Read count 16bit words from 16bit word address start_addr from the
+ * SEEPROM attached to the controller, into buf, using the controller's
+ * SEEPROM reading state machine. Optionally treat the data as a byte
+ * stream in terms of byte order.
+ */
+int
+ahd_read_seeprom(struct ahd_softc *ahd, uint16_t *buf,
+ u_int start_addr, u_int count, int bytestream)
+{
+ u_int cur_addr;
+ u_int end_addr;
+ int error;
+
+ /*
+ * If we never make it through the loop even once,
+ * we were passed invalid arguments.
+ */
+ error = EINVAL;
+ AHD_ASSERT_MODES(ahd, AHD_MODE_SCSI_MSK, AHD_MODE_SCSI_MSK);
+ end_addr = start_addr + count;
+ for (cur_addr = start_addr; cur_addr < end_addr; cur_addr++) {
+
+ ahd_outb(ahd, SEEADR, cur_addr);
+ ahd_outb(ahd, SEECTL, SEEOP_READ | SEESTART);
+
+ error = ahd_wait_seeprom(ahd);
+ if (error)
+ break;
+ if (bytestream != 0) {
+ uint8_t *bytestream_ptr;
+
+ bytestream_ptr = (uint8_t *)buf;
+ *bytestream_ptr++ = ahd_inb(ahd, SEEDAT);
+ *bytestream_ptr = ahd_inb(ahd, SEEDAT+1);
+ } else {
+ /*
+ * ahd_inw() already handles machine byte order.
+ */
+ *buf = ahd_inw(ahd, SEEDAT);
+ }
+ buf++;
+ }
+ return (error);
+}
+
+/*
+ * Write count 16bit words from buf, into SEEPROM attache to the
+ * controller starting at 16bit word address start_addr, using the
+ * controller's SEEPROM writing state machine.
+ */
+int
+ahd_write_seeprom(struct ahd_softc *ahd, uint16_t *buf,
+ u_int start_addr, u_int count)
+{
+ u_int cur_addr;
+ u_int end_addr;
+ int error;
+ int retval;
+
+ AHD_ASSERT_MODES(ahd, AHD_MODE_SCSI_MSK, AHD_MODE_SCSI_MSK);
+ error = ENOENT;
+
+ /* Place the chip into write-enable mode */
+ ahd_outb(ahd, SEEADR, SEEOP_EWEN_ADDR);
+ ahd_outb(ahd, SEECTL, SEEOP_EWEN | SEESTART);
+ error = ahd_wait_seeprom(ahd);
+ if (error)
+ return (error);
+
+ /*
+ * Write the data. If we don't get through the loop at
+ * least once, the arguments were invalid.
+ */
+ retval = EINVAL;
+ end_addr = start_addr + count;
+ for (cur_addr = start_addr; cur_addr < end_addr; cur_addr++) {
+ ahd_outw(ahd, SEEDAT, *buf++);
+ ahd_outb(ahd, SEEADR, cur_addr);
+ ahd_outb(ahd, SEECTL, SEEOP_WRITE | SEESTART);
+
+ retval = ahd_wait_seeprom(ahd);
+ if (retval)
+ break;
+ }
+
+ /*
+ * Disable writes.
+ */
+ ahd_outb(ahd, SEEADR, SEEOP_EWDS_ADDR);
+ ahd_outb(ahd, SEECTL, SEEOP_EWDS | SEESTART);
+ error = ahd_wait_seeprom(ahd);
+ if (error)
+ return (error);
+ return (retval);
+}
+
+/*
+ * Wait ~100us for the serial eeprom to satisfy our request.
+ */
+static int
+ahd_wait_seeprom(struct ahd_softc *ahd)
+{
+ int cnt;
+
+ cnt = 5000;
+ while ((ahd_inb(ahd, SEESTAT) & (SEEARBACK|SEEBUSY)) != 0 && --cnt)
+ ahd_delay(5);
+
+ if (cnt == 0)
+ return (ETIMEDOUT);
+ return (0);
+}
+
+/*
+ * Validate the two checksums in the per_channel
+ * vital product data struct.
+ */
+static int
+ahd_verify_vpd_cksum(struct vpd_config *vpd)
+{
+ int i;
+ int maxaddr;
+ uint32_t checksum;
+ uint8_t *vpdarray;
+
+ vpdarray = (uint8_t *)vpd;
+ maxaddr = offsetof(struct vpd_config, vpd_checksum);
+ checksum = 0;
+ for (i = offsetof(struct vpd_config, resource_type); i < maxaddr; i++)
+ checksum = checksum + vpdarray[i];
+ if (checksum == 0
+ || (-checksum & 0xFF) != vpd->vpd_checksum)
+ return (0);
+
+ checksum = 0;
+ maxaddr = offsetof(struct vpd_config, checksum);
+ for (i = offsetof(struct vpd_config, default_target_flags);
+ i < maxaddr; i++)
+ checksum = checksum + vpdarray[i];
+ if (checksum == 0
+ || (-checksum & 0xFF) != vpd->checksum)
+ return (0);
+ return (1);
+}
+
+int
+ahd_verify_cksum(struct seeprom_config *sc)
+{
+ int i;
+ int maxaddr;
+ uint32_t checksum;
+ uint16_t *scarray;
+
+ maxaddr = (sizeof(*sc)/2) - 1;
+ checksum = 0;
+ scarray = (uint16_t *)sc;
+
+ for (i = 0; i < maxaddr; i++)
+ checksum = checksum + scarray[i];
+ if (checksum == 0
+ || (checksum & 0xFFFF) != sc->checksum) {
+ return (0);
+ } else {
+ return (1);
+ }
+}
+
+int
+ahd_acquire_seeprom(struct ahd_softc *ahd)
+{
+ /*
+ * We should be able to determine the SEEPROM type
+ * from the flexport logic, but unfortunately not
+ * all implementations have this logic and there is
+ * no programatic method for determining if the logic
+ * is present.
+ */
+ return (1);
+#if 0
+ uint8_t seetype;
+ int error;
+
+ error = ahd_read_flexport(ahd, FLXADDR_ROMSTAT_CURSENSECTL, &seetype);
+ if (error != 0
+ || ((seetype & FLX_ROMSTAT_SEECFG) == FLX_ROMSTAT_SEE_NONE))
+ return (0);
+ return (1);
+#endif
+}
+
+void
+ahd_release_seeprom(struct ahd_softc *ahd)
+{
+ /* Currently a no-op */
+}
+
+/*
+ * Wait at most 2 seconds for flexport arbitration to succeed.
+ */
+static int
+ahd_wait_flexport(struct ahd_softc *ahd)
+{
+ int cnt;
+
+ AHD_ASSERT_MODES(ahd, AHD_MODE_SCSI_MSK, AHD_MODE_SCSI_MSK);
+ cnt = 1000000 * 2 / 5;
+ while ((ahd_inb(ahd, BRDCTL) & FLXARBACK) == 0 && --cnt)
+ ahd_delay(5);
+
+ if (cnt == 0)
+ return (ETIMEDOUT);
+ return (0);
+}
+
+int
+ahd_write_flexport(struct ahd_softc *ahd, u_int addr, u_int value)
+{
+ int error;
+
+ AHD_ASSERT_MODES(ahd, AHD_MODE_SCSI_MSK, AHD_MODE_SCSI_MSK);
+ if (addr > 7)
+ panic("ahd_write_flexport: address out of range");
+ ahd_outb(ahd, BRDCTL, BRDEN|(addr << 3));
+ error = ahd_wait_flexport(ahd);
+ if (error != 0)
+ return (error);
+ ahd_outb(ahd, BRDDAT, value);
+ ahd_flush_device_writes(ahd);
+ ahd_outb(ahd, BRDCTL, BRDSTB|BRDEN|(addr << 3));
+ ahd_flush_device_writes(ahd);
+ ahd_outb(ahd, BRDCTL, BRDEN|(addr << 3));
+ ahd_flush_device_writes(ahd);
+ ahd_outb(ahd, BRDCTL, 0);
+ ahd_flush_device_writes(ahd);
+ return (0);
+}
+
+int
+ahd_read_flexport(struct ahd_softc *ahd, u_int addr, uint8_t *value)
+{
+ int error;
+
+ AHD_ASSERT_MODES(ahd, AHD_MODE_SCSI_MSK, AHD_MODE_SCSI_MSK);
+ if (addr > 7)
+ panic("ahd_read_flexport: address out of range");
+ ahd_outb(ahd, BRDCTL, BRDRW|BRDEN|(addr << 3));
+ error = ahd_wait_flexport(ahd);
+ if (error != 0)
+ return (error);
+ *value = ahd_inb(ahd, BRDDAT);
+ ahd_outb(ahd, BRDCTL, 0);
+ ahd_flush_device_writes(ahd);
+ return (0);
+}
+
+/************************* Target Mode ****************************************/
+#ifdef AHD_TARGET_MODE
+cam_status
+ahd_find_tmode_devs(struct ahd_softc *ahd, struct cam_sim *sim, union ccb *ccb,
+ struct ahd_tmode_tstate **tstate,
+ struct ahd_tmode_lstate **lstate,
+ int notfound_failure)
+{
+
+ if ((ahd->features & AHD_TARGETMODE) == 0)
+ return (CAM_REQ_INVALID);
+
+ /*
+ * Handle the 'black hole' device that sucks up
+ * requests to unattached luns on enabled targets.
+ */
+ if (ccb->ccb_h.target_id == CAM_TARGET_WILDCARD
+ && ccb->ccb_h.target_lun == CAM_LUN_WILDCARD) {
+ *tstate = NULL;
+ *lstate = ahd->black_hole;
+ } else {
+ u_int max_id;
+
+ max_id = (ahd->features & AHD_WIDE) ? 16 : 8;
+ if (ccb->ccb_h.target_id >= max_id)
+ return (CAM_TID_INVALID);
+
+ if (ccb->ccb_h.target_lun >= AHD_NUM_LUNS)
+ return (CAM_LUN_INVALID);
+
+ *tstate = ahd->enabled_targets[ccb->ccb_h.target_id];
+ *lstate = NULL;
+ if (*tstate != NULL)
+ *lstate =
+ (*tstate)->enabled_luns[ccb->ccb_h.target_lun];
+ }
+
+ if (notfound_failure != 0 && *lstate == NULL)
+ return (CAM_PATH_INVALID);
+
+ return (CAM_REQ_CMP);
+}
+
+void
+ahd_handle_en_lun(struct ahd_softc *ahd, struct cam_sim *sim, union ccb *ccb)
+{
+#if NOT_YET
+ struct ahd_tmode_tstate *tstate;
+ struct ahd_tmode_lstate *lstate;
+ struct ccb_en_lun *cel;
+ cam_status status;
+ u_int target;
+ u_int lun;
+ u_int target_mask;
+ u_long s;
+ char channel;
+
+ status = ahd_find_tmode_devs(ahd, sim, ccb, &tstate, &lstate,
+ /*notfound_failure*/FALSE);
+
+ if (status != CAM_REQ_CMP) {
+ ccb->ccb_h.status = status;
+ return;
+ }
+
+ if ((ahd->features & AHD_MULTIROLE) != 0) {
+ u_int our_id;
+
+ our_id = ahd->our_id;
+ if (ccb->ccb_h.target_id != our_id) {
+ if ((ahd->features & AHD_MULTI_TID) != 0
+ && (ahd->flags & AHD_INITIATORROLE) != 0) {
+ /*
+ * Only allow additional targets if
+ * the initiator role is disabled.
+ * The hardware cannot handle a re-select-in
+ * on the initiator id during a re-select-out
+ * on a different target id.
+ */
+ status = CAM_TID_INVALID;
+ } else if ((ahd->flags & AHD_INITIATORROLE) != 0
+ || ahd->enabled_luns > 0) {
+ /*
+ * Only allow our target id to change
+ * if the initiator role is not configured
+ * and there are no enabled luns which
+ * are attached to the currently registered
+ * scsi id.
+ */
+ status = CAM_TID_INVALID;
+ }
+ }
+ }
+
+ if (status != CAM_REQ_CMP) {
+ ccb->ccb_h.status = status;
+ return;
+ }
+
+ /*
+ * We now have an id that is valid.
+ * If we aren't in target mode, switch modes.
+ */
+ if ((ahd->flags & AHD_TARGETROLE) == 0
+ && ccb->ccb_h.target_id != CAM_TARGET_WILDCARD) {
+ u_long s;
+
+ printk("Configuring Target Mode\n");
+ ahd_lock(ahd, &s);
+ if (LIST_FIRST(&ahd->pending_scbs) != NULL) {
+ ccb->ccb_h.status = CAM_BUSY;
+ ahd_unlock(ahd, &s);
+ return;
+ }
+ ahd->flags |= AHD_TARGETROLE;
+ if ((ahd->features & AHD_MULTIROLE) == 0)
+ ahd->flags &= ~AHD_INITIATORROLE;
+ ahd_pause(ahd);
+ ahd_loadseq(ahd);
+ ahd_restart(ahd);
+ ahd_unlock(ahd, &s);
+ }
+ cel = &ccb->cel;
+ target = ccb->ccb_h.target_id;
+ lun = ccb->ccb_h.target_lun;
+ channel = SIM_CHANNEL(ahd, sim);
+ target_mask = 0x01 << target;
+ if (channel == 'B')
+ target_mask <<= 8;
+
+ if (cel->enable != 0) {
+ u_int scsiseq1;
+
+ /* Are we already enabled?? */
+ if (lstate != NULL) {
+ xpt_print_path(ccb->ccb_h.path);
+ printk("Lun already enabled\n");
+ ccb->ccb_h.status = CAM_LUN_ALRDY_ENA;
+ return;
+ }
+
+ if (cel->grp6_len != 0
+ || cel->grp7_len != 0) {
+ /*
+ * Don't (yet?) support vendor
+ * specific commands.
+ */
+ ccb->ccb_h.status = CAM_REQ_INVALID;
+ printk("Non-zero Group Codes\n");
+ return;
+ }
+
+ /*
+ * Seems to be okay.
+ * Setup our data structures.
+ */
+ if (target != CAM_TARGET_WILDCARD && tstate == NULL) {
+ tstate = ahd_alloc_tstate(ahd, target, channel);
+ if (tstate == NULL) {
+ xpt_print_path(ccb->ccb_h.path);
+ printk("Couldn't allocate tstate\n");
+ ccb->ccb_h.status = CAM_RESRC_UNAVAIL;
+ return;
+ }
+ }
+ lstate = kzalloc(sizeof(*lstate), GFP_ATOMIC);
+ if (lstate == NULL) {
+ xpt_print_path(ccb->ccb_h.path);
+ printk("Couldn't allocate lstate\n");
+ ccb->ccb_h.status = CAM_RESRC_UNAVAIL;
+ return;
+ }
+ status = xpt_create_path(&lstate->path, /*periph*/NULL,
+ xpt_path_path_id(ccb->ccb_h.path),
+ xpt_path_target_id(ccb->ccb_h.path),
+ xpt_path_lun_id(ccb->ccb_h.path));
+ if (status != CAM_REQ_CMP) {
+ kfree(lstate);
+ xpt_print_path(ccb->ccb_h.path);
+ printk("Couldn't allocate path\n");
+ ccb->ccb_h.status = CAM_RESRC_UNAVAIL;
+ return;
+ }
+ SLIST_INIT(&lstate->accept_tios);
+ SLIST_INIT(&lstate->immed_notifies);
+ ahd_lock(ahd, &s);
+ ahd_pause(ahd);
+ if (target != CAM_TARGET_WILDCARD) {
+ tstate->enabled_luns[lun] = lstate;
+ ahd->enabled_luns++;
+
+ if ((ahd->features & AHD_MULTI_TID) != 0) {
+ u_int targid_mask;
+
+ targid_mask = ahd_inw(ahd, TARGID);
+ targid_mask |= target_mask;
+ ahd_outw(ahd, TARGID, targid_mask);
+ ahd_update_scsiid(ahd, targid_mask);
+ } else {
+ u_int our_id;
+ char channel;
+
+ channel = SIM_CHANNEL(ahd, sim);
+ our_id = SIM_SCSI_ID(ahd, sim);
+
+ /*
+ * This can only happen if selections
+ * are not enabled
+ */
+ if (target != our_id) {
+ u_int sblkctl;
+ char cur_channel;
+ int swap;
+
+ sblkctl = ahd_inb(ahd, SBLKCTL);
+ cur_channel = (sblkctl & SELBUSB)
+ ? 'B' : 'A';
+ if ((ahd->features & AHD_TWIN) == 0)
+ cur_channel = 'A';
+ swap = cur_channel != channel;
+ ahd->our_id = target;
+
+ if (swap)
+ ahd_outb(ahd, SBLKCTL,
+ sblkctl ^ SELBUSB);
+
+ ahd_outb(ahd, SCSIID, target);
+
+ if (swap)
+ ahd_outb(ahd, SBLKCTL, sblkctl);
+ }
+ }
+ } else
+ ahd->black_hole = lstate;
+ /* Allow select-in operations */
+ if (ahd->black_hole != NULL && ahd->enabled_luns > 0) {
+ scsiseq1 = ahd_inb(ahd, SCSISEQ_TEMPLATE);
+ scsiseq1 |= ENSELI;
+ ahd_outb(ahd, SCSISEQ_TEMPLATE, scsiseq1);
+ scsiseq1 = ahd_inb(ahd, SCSISEQ1);
+ scsiseq1 |= ENSELI;
+ ahd_outb(ahd, SCSISEQ1, scsiseq1);
+ }
+ ahd_unpause(ahd);
+ ahd_unlock(ahd, &s);
+ ccb->ccb_h.status = CAM_REQ_CMP;
+ xpt_print_path(ccb->ccb_h.path);
+ printk("Lun now enabled for target mode\n");
+ } else {
+ struct scb *scb;
+ int i, empty;
+
+ if (lstate == NULL) {
+ ccb->ccb_h.status = CAM_LUN_INVALID;
+ return;
+ }
+
+ ahd_lock(ahd, &s);
+
+ ccb->ccb_h.status = CAM_REQ_CMP;
+ LIST_FOREACH(scb, &ahd->pending_scbs, pending_links) {
+ struct ccb_hdr *ccbh;
+
+ ccbh = &scb->io_ctx->ccb_h;
+ if (ccbh->func_code == XPT_CONT_TARGET_IO
+ && !xpt_path_comp(ccbh->path, ccb->ccb_h.path)){
+ printk("CTIO pending\n");
+ ccb->ccb_h.status = CAM_REQ_INVALID;
+ ahd_unlock(ahd, &s);
+ return;
+ }
+ }
+
+ if (SLIST_FIRST(&lstate->accept_tios) != NULL) {
+ printk("ATIOs pending\n");
+ ccb->ccb_h.status = CAM_REQ_INVALID;
+ }
+
+ if (SLIST_FIRST(&lstate->immed_notifies) != NULL) {
+ printk("INOTs pending\n");
+ ccb->ccb_h.status = CAM_REQ_INVALID;
+ }
+
+ if (ccb->ccb_h.status != CAM_REQ_CMP) {
+ ahd_unlock(ahd, &s);
+ return;
+ }
+
+ xpt_print_path(ccb->ccb_h.path);
+ printk("Target mode disabled\n");
+ xpt_free_path(lstate->path);
+ kfree(lstate);
+
+ ahd_pause(ahd);
+ /* Can we clean up the target too? */
+ if (target != CAM_TARGET_WILDCARD) {
+ tstate->enabled_luns[lun] = NULL;
+ ahd->enabled_luns--;
+ for (empty = 1, i = 0; i < 8; i++)
+ if (tstate->enabled_luns[i] != NULL) {
+ empty = 0;
+ break;
+ }
+
+ if (empty) {
+ ahd_free_tstate(ahd, target, channel,
+ /*force*/FALSE);
+ if (ahd->features & AHD_MULTI_TID) {
+ u_int targid_mask;
+
+ targid_mask = ahd_inw(ahd, TARGID);
+ targid_mask &= ~target_mask;
+ ahd_outw(ahd, TARGID, targid_mask);
+ ahd_update_scsiid(ahd, targid_mask);
+ }
+ }
+ } else {
+
+ ahd->black_hole = NULL;
+
+ /*
+ * We can't allow selections without
+ * our black hole device.
+ */
+ empty = TRUE;
+ }
+ if (ahd->enabled_luns == 0) {
+ /* Disallow select-in */
+ u_int scsiseq1;
+
+ scsiseq1 = ahd_inb(ahd, SCSISEQ_TEMPLATE);
+ scsiseq1 &= ~ENSELI;
+ ahd_outb(ahd, SCSISEQ_TEMPLATE, scsiseq1);
+ scsiseq1 = ahd_inb(ahd, SCSISEQ1);
+ scsiseq1 &= ~ENSELI;
+ ahd_outb(ahd, SCSISEQ1, scsiseq1);
+
+ if ((ahd->features & AHD_MULTIROLE) == 0) {
+ printk("Configuring Initiator Mode\n");
+ ahd->flags &= ~AHD_TARGETROLE;
+ ahd->flags |= AHD_INITIATORROLE;
+ ahd_pause(ahd);
+ ahd_loadseq(ahd);
+ ahd_restart(ahd);
+ /*
+ * Unpaused. The extra unpause
+ * that follows is harmless.
+ */
+ }
+ }
+ ahd_unpause(ahd);
+ ahd_unlock(ahd, &s);
+ }
+#endif
+}
+
+static void
+ahd_update_scsiid(struct ahd_softc *ahd, u_int targid_mask)
+{
+#if NOT_YET
+ u_int scsiid_mask;
+ u_int scsiid;
+
+ if ((ahd->features & AHD_MULTI_TID) == 0)
+ panic("ahd_update_scsiid called on non-multitid unit\n");
+
+ /*
+ * Since we will rely on the TARGID mask
+ * for selection enables, ensure that OID
+ * in SCSIID is not set to some other ID
+ * that we don't want to allow selections on.
+ */
+ if ((ahd->features & AHD_ULTRA2) != 0)
+ scsiid = ahd_inb(ahd, SCSIID_ULTRA2);
+ else
+ scsiid = ahd_inb(ahd, SCSIID);
+ scsiid_mask = 0x1 << (scsiid & OID);
+ if ((targid_mask & scsiid_mask) == 0) {
+ u_int our_id;
+
+ /* ffs counts from 1 */
+ our_id = ffs(targid_mask);
+ if (our_id == 0)
+ our_id = ahd->our_id;
+ else
+ our_id--;
+ scsiid &= TID;
+ scsiid |= our_id;
+ }
+ if ((ahd->features & AHD_ULTRA2) != 0)
+ ahd_outb(ahd, SCSIID_ULTRA2, scsiid);
+ else
+ ahd_outb(ahd, SCSIID, scsiid);
+#endif
+}
+
+static void
+ahd_run_tqinfifo(struct ahd_softc *ahd, int paused)
+{
+ struct target_cmd *cmd;
+
+ ahd_sync_tqinfifo(ahd, BUS_DMASYNC_POSTREAD);
+ while ((cmd = &ahd->targetcmds[ahd->tqinfifonext])->cmd_valid != 0) {
+
+ /*
+ * Only advance through the queue if we
+ * have the resources to process the command.
+ */
+ if (ahd_handle_target_cmd(ahd, cmd) != 0)
+ break;
+
+ cmd->cmd_valid = 0;
+ ahd_dmamap_sync(ahd, ahd->shared_data_dmat,
+ ahd->shared_data_map.dmamap,
+ ahd_targetcmd_offset(ahd, ahd->tqinfifonext),
+ sizeof(struct target_cmd),
+ BUS_DMASYNC_PREREAD);
+ ahd->tqinfifonext++;
+
+ /*
+ * Lazily update our position in the target mode incoming
+ * command queue as seen by the sequencer.
+ */
+ if ((ahd->tqinfifonext & (HOST_TQINPOS - 1)) == 1) {
+ u_int hs_mailbox;
+
+ hs_mailbox = ahd_inb(ahd, HS_MAILBOX);
+ hs_mailbox &= ~HOST_TQINPOS;
+ hs_mailbox |= ahd->tqinfifonext & HOST_TQINPOS;
+ ahd_outb(ahd, HS_MAILBOX, hs_mailbox);
+ }
+ }
+}
+
+static int
+ahd_handle_target_cmd(struct ahd_softc *ahd, struct target_cmd *cmd)
+{
+ struct ahd_tmode_tstate *tstate;
+ struct ahd_tmode_lstate *lstate;
+ struct ccb_accept_tio *atio;
+ uint8_t *byte;
+ int initiator;
+ int target;
+ int lun;
+
+ initiator = SCSIID_TARGET(ahd, cmd->scsiid);
+ target = SCSIID_OUR_ID(cmd->scsiid);
+ lun = (cmd->identify & MSG_IDENTIFY_LUNMASK);
+
+ byte = cmd->bytes;
+ tstate = ahd->enabled_targets[target];
+ lstate = NULL;
+ if (tstate != NULL)
+ lstate = tstate->enabled_luns[lun];
+
+ /*
+ * Commands for disabled luns go to the black hole driver.
+ */
+ if (lstate == NULL)
+ lstate = ahd->black_hole;
+
+ atio = (struct ccb_accept_tio*)SLIST_FIRST(&lstate->accept_tios);
+ if (atio == NULL) {
+ ahd->flags |= AHD_TQINFIFO_BLOCKED;
+ /*
+ * Wait for more ATIOs from the peripheral driver for this lun.
+ */
+ return (1);
+ } else
+ ahd->flags &= ~AHD_TQINFIFO_BLOCKED;
+#ifdef AHD_DEBUG
+ if ((ahd_debug & AHD_SHOW_TQIN) != 0)
+ printk("Incoming command from %d for %d:%d%s\n",
+ initiator, target, lun,
+ lstate == ahd->black_hole ? "(Black Holed)" : "");
+#endif
+ SLIST_REMOVE_HEAD(&lstate->accept_tios, sim_links.sle);
+
+ if (lstate == ahd->black_hole) {
+ /* Fill in the wildcards */
+ atio->ccb_h.target_id = target;
+ atio->ccb_h.target_lun = lun;
+ }
+
+ /*
+ * Package it up and send it off to
+ * whomever has this lun enabled.
+ */
+ atio->sense_len = 0;
+ atio->init_id = initiator;
+ if (byte[0] != 0xFF) {
+ /* Tag was included */
+ atio->tag_action = *byte++;
+ atio->tag_id = *byte++;
+ atio->ccb_h.flags = CAM_TAG_ACTION_VALID;
+ } else {
+ atio->ccb_h.flags = 0;
+ }
+ byte++;
+
+ /* Okay. Now determine the cdb size based on the command code */
+ switch (*byte >> CMD_GROUP_CODE_SHIFT) {
+ case 0:
+ atio->cdb_len = 6;
+ break;
+ case 1:
+ case 2:
+ atio->cdb_len = 10;
+ break;
+ case 4:
+ atio->cdb_len = 16;
+ break;
+ case 5:
+ atio->cdb_len = 12;
+ break;
+ case 3:
+ default:
+ /* Only copy the opcode. */
+ atio->cdb_len = 1;
+ printk("Reserved or VU command code type encountered\n");
+ break;
+ }
+
+ memcpy(atio->cdb_io.cdb_bytes, byte, atio->cdb_len);
+
+ atio->ccb_h.status |= CAM_CDB_RECVD;
+
+ if ((cmd->identify & MSG_IDENTIFY_DISCFLAG) == 0) {
+ /*
+ * We weren't allowed to disconnect.
+ * We're hanging on the bus until a
+ * continue target I/O comes in response
+ * to this accept tio.
+ */
+#ifdef AHD_DEBUG
+ if ((ahd_debug & AHD_SHOW_TQIN) != 0)
+ printk("Received Immediate Command %d:%d:%d - %p\n",
+ initiator, target, lun, ahd->pending_device);
+#endif
+ ahd->pending_device = lstate;
+ ahd_freeze_ccb((union ccb *)atio);
+ atio->ccb_h.flags |= CAM_DIS_DISCONNECT;
+ }
+ xpt_done((union ccb*)atio);
+ return (0);
+}
+
+#endif
diff --git a/drivers/scsi/aic7xxx/aic79xx_inline.h b/drivers/scsi/aic7xxx/aic79xx_inline.h
new file mode 100644
index 000000000..09335a3c8
--- /dev/null
+++ b/drivers/scsi/aic7xxx/aic79xx_inline.h
@@ -0,0 +1,172 @@
+/*
+ * Inline routines shareable across OS platforms.
+ *
+ * Copyright (c) 1994-2001 Justin T. Gibbs.
+ * Copyright (c) 2000-2003 Adaptec Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions, and the following disclaimer,
+ * without modification.
+ * 2. Redistributions in binary form must reproduce at minimum a disclaimer
+ * substantially similar to the "NO WARRANTY" disclaimer below
+ * ("Disclaimer") and any redistribution must be conditioned upon
+ * including a substantially similar Disclaimer requirement for further
+ * binary redistribution.
+ * 3. Neither the names of the above-listed copyright holders nor the names
+ * of any contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * Alternatively, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") version 2 as published by the Free
+ * Software Foundation.
+ *
+ * NO WARRANTY
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
+ * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
+ * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGES.
+ *
+ * $Id: //depot/aic7xxx/aic7xxx/aic79xx_inline.h#59 $
+ *
+ * $FreeBSD$
+ */
+
+#ifndef _AIC79XX_INLINE_H_
+#define _AIC79XX_INLINE_H_
+
+/******************************** Debugging ***********************************/
+static inline char *ahd_name(struct ahd_softc *ahd);
+
+static inline char *ahd_name(struct ahd_softc *ahd)
+{
+ return (ahd->name);
+}
+
+/************************ Sequencer Execution Control *************************/
+static inline void ahd_known_modes(struct ahd_softc *ahd,
+ ahd_mode src, ahd_mode dst);
+static inline ahd_mode_state ahd_build_mode_state(struct ahd_softc *ahd,
+ ahd_mode src,
+ ahd_mode dst);
+static inline void ahd_extract_mode_state(struct ahd_softc *ahd,
+ ahd_mode_state state,
+ ahd_mode *src, ahd_mode *dst);
+
+void ahd_set_modes(struct ahd_softc *ahd, ahd_mode src,
+ ahd_mode dst);
+ahd_mode_state ahd_save_modes(struct ahd_softc *ahd);
+void ahd_restore_modes(struct ahd_softc *ahd,
+ ahd_mode_state state);
+int ahd_is_paused(struct ahd_softc *ahd);
+void ahd_pause(struct ahd_softc *ahd);
+void ahd_unpause(struct ahd_softc *ahd);
+
+static inline void
+ahd_known_modes(struct ahd_softc *ahd, ahd_mode src, ahd_mode dst)
+{
+ ahd->src_mode = src;
+ ahd->dst_mode = dst;
+ ahd->saved_src_mode = src;
+ ahd->saved_dst_mode = dst;
+}
+
+static inline ahd_mode_state
+ahd_build_mode_state(struct ahd_softc *ahd, ahd_mode src, ahd_mode dst)
+{
+ return ((src << SRC_MODE_SHIFT) | (dst << DST_MODE_SHIFT));
+}
+
+static inline void
+ahd_extract_mode_state(struct ahd_softc *ahd, ahd_mode_state state,
+ ahd_mode *src, ahd_mode *dst)
+{
+ *src = (state & SRC_MODE) >> SRC_MODE_SHIFT;
+ *dst = (state & DST_MODE) >> DST_MODE_SHIFT;
+}
+
+/*********************** Scatter Gather List Handling *************************/
+void *ahd_sg_setup(struct ahd_softc *ahd, struct scb *scb,
+ void *sgptr, dma_addr_t addr,
+ bus_size_t len, int last);
+
+/************************** Memory mapping routines ***************************/
+static inline size_t ahd_sg_size(struct ahd_softc *ahd);
+
+void ahd_sync_sglist(struct ahd_softc *ahd,
+ struct scb *scb, int op);
+
+static inline size_t ahd_sg_size(struct ahd_softc *ahd)
+{
+ if ((ahd->flags & AHD_64BIT_ADDRESSING) != 0)
+ return (sizeof(struct ahd_dma64_seg));
+ return (sizeof(struct ahd_dma_seg));
+}
+
+/*********************** Miscellaneous Support Functions ***********************/
+struct ahd_initiator_tinfo *
+ ahd_fetch_transinfo(struct ahd_softc *ahd,
+ char channel, u_int our_id,
+ u_int remote_id,
+ struct ahd_tmode_tstate **tstate);
+uint16_t
+ ahd_inw(struct ahd_softc *ahd, u_int port);
+void ahd_outw(struct ahd_softc *ahd, u_int port,
+ u_int value);
+uint32_t
+ ahd_inl(struct ahd_softc *ahd, u_int port);
+void ahd_outl(struct ahd_softc *ahd, u_int port,
+ uint32_t value);
+uint64_t
+ ahd_inq(struct ahd_softc *ahd, u_int port);
+void ahd_outq(struct ahd_softc *ahd, u_int port,
+ uint64_t value);
+u_int ahd_get_scbptr(struct ahd_softc *ahd);
+void ahd_set_scbptr(struct ahd_softc *ahd, u_int scbptr);
+u_int ahd_inb_scbram(struct ahd_softc *ahd, u_int offset);
+u_int ahd_inw_scbram(struct ahd_softc *ahd, u_int offset);
+struct scb *
+ ahd_lookup_scb(struct ahd_softc *ahd, u_int tag);
+void ahd_queue_scb(struct ahd_softc *ahd, struct scb *scb);
+
+static inline uint8_t *ahd_get_sense_buf(struct ahd_softc *ahd,
+ struct scb *scb);
+static inline uint32_t ahd_get_sense_bufaddr(struct ahd_softc *ahd,
+ struct scb *scb);
+
+#if 0 /* unused */
+
+#define AHD_COPY_COL_IDX(dst, src) \
+do { \
+ dst->hscb->scsiid = src->hscb->scsiid; \
+ dst->hscb->lun = src->hscb->lun; \
+} while (0)
+
+#endif
+
+static inline uint8_t *
+ahd_get_sense_buf(struct ahd_softc *ahd, struct scb *scb)
+{
+ return (scb->sense_data);
+}
+
+static inline uint32_t
+ahd_get_sense_bufaddr(struct ahd_softc *ahd, struct scb *scb)
+{
+ return (scb->sense_busaddr);
+}
+
+/************************** Interrupt Processing ******************************/
+int ahd_intr(struct ahd_softc *ahd);
+
+#endif /* _AIC79XX_INLINE_H_ */
diff --git a/drivers/scsi/aic7xxx/aic79xx_osm.c b/drivers/scsi/aic7xxx/aic79xx_osm.c
new file mode 100644
index 000000000..ce96a0be3
--- /dev/null
+++ b/drivers/scsi/aic7xxx/aic79xx_osm.c
@@ -0,0 +1,2886 @@
+/*
+ * Adaptec AIC79xx device driver for Linux.
+ *
+ * $Id: //depot/aic7xxx/linux/drivers/scsi/aic7xxx/aic79xx_osm.c#171 $
+ *
+ * --------------------------------------------------------------------------
+ * Copyright (c) 1994-2000 Justin T. Gibbs.
+ * Copyright (c) 1997-1999 Doug Ledford
+ * Copyright (c) 2000-2003 Adaptec Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions, and the following disclaimer,
+ * without modification.
+ * 2. Redistributions in binary form must reproduce at minimum a disclaimer
+ * substantially similar to the "NO WARRANTY" disclaimer below
+ * ("Disclaimer") and any redistribution must be conditioned upon
+ * including a substantially similar Disclaimer requirement for further
+ * binary redistribution.
+ * 3. Neither the names of the above-listed copyright holders nor the names
+ * of any contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * Alternatively, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") version 2 as published by the Free
+ * Software Foundation.
+ *
+ * NO WARRANTY
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
+ * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
+ * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGES.
+ */
+
+#include "aic79xx_osm.h"
+#include "aic79xx_inline.h"
+#include <scsi/scsicam.h>
+
+static struct scsi_transport_template *ahd_linux_transport_template = NULL;
+
+#include <linux/init.h> /* __setup */
+#include <linux/mm.h> /* For fetching system memory size */
+#include <linux/blkdev.h> /* For block_size() */
+#include <linux/delay.h> /* For ssleep/msleep */
+#include <linux/device.h>
+#include <linux/slab.h>
+
+/*
+ * Bucket size for counting good commands in between bad ones.
+ */
+#define AHD_LINUX_ERR_THRESH 1000
+
+/*
+ * Set this to the delay in seconds after SCSI bus reset.
+ * Note, we honor this only for the initial bus reset.
+ * The scsi error recovery code performs its own bus settle
+ * delay handling for error recovery actions.
+ */
+#ifdef CONFIG_AIC79XX_RESET_DELAY_MS
+#define AIC79XX_RESET_DELAY CONFIG_AIC79XX_RESET_DELAY_MS
+#else
+#define AIC79XX_RESET_DELAY 5000
+#endif
+
+/*
+ * To change the default number of tagged transactions allowed per-device,
+ * add a line to the lilo.conf file like:
+ * append="aic79xx=verbose,tag_info:{{32,32,32,32},{32,32,32,32}}"
+ * which will result in the first four devices on the first two
+ * controllers being set to a tagged queue depth of 32.
+ *
+ * The tag_commands is an array of 16 to allow for wide and twin adapters.
+ * Twin adapters will use indexes 0-7 for channel 0, and indexes 8-15
+ * for channel 1.
+ */
+typedef struct {
+ uint16_t tag_commands[16]; /* Allow for wide/twin adapters. */
+} adapter_tag_info_t;
+
+/*
+ * Modify this as you see fit for your system.
+ *
+ * 0 tagged queuing disabled
+ * 1 <= n <= 253 n == max tags ever dispatched.
+ *
+ * The driver will throttle the number of commands dispatched to a
+ * device if it returns queue full. For devices with a fixed maximum
+ * queue depth, the driver will eventually determine this depth and
+ * lock it in (a console message is printed to indicate that a lock
+ * has occurred). On some devices, queue full is returned for a temporary
+ * resource shortage. These devices will return queue full at varying
+ * depths. The driver will throttle back when the queue fulls occur and
+ * attempt to slowly increase the depth over time as the device recovers
+ * from the resource shortage.
+ *
+ * In this example, the first line will disable tagged queueing for all
+ * the devices on the first probed aic79xx adapter.
+ *
+ * The second line enables tagged queueing with 4 commands/LUN for IDs
+ * (0, 2-11, 13-15), disables tagged queueing for ID 12, and tells the
+ * driver to attempt to use up to 64 tags for ID 1.
+ *
+ * The third line is the same as the first line.
+ *
+ * The fourth line disables tagged queueing for devices 0 and 3. It
+ * enables tagged queueing for the other IDs, with 16 commands/LUN
+ * for IDs 1 and 4, 127 commands/LUN for ID 8, and 4 commands/LUN for
+ * IDs 2, 5-7, and 9-15.
+ */
+
+/*
+ * NOTE: The below structure is for reference only, the actual structure
+ * to modify in order to change things is just below this comment block.
+adapter_tag_info_t aic79xx_tag_info[] =
+{
+ {{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}},
+ {{4, 64, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 0, 4, 4, 4}},
+ {{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}},
+ {{0, 16, 4, 0, 16, 4, 4, 4, 127, 4, 4, 4, 4, 4, 4, 4}}
+};
+*/
+
+#ifdef CONFIG_AIC79XX_CMDS_PER_DEVICE
+#define AIC79XX_CMDS_PER_DEVICE CONFIG_AIC79XX_CMDS_PER_DEVICE
+#else
+#define AIC79XX_CMDS_PER_DEVICE AHD_MAX_QUEUE
+#endif
+
+#define AIC79XX_CONFIGED_TAG_COMMANDS { \
+ AIC79XX_CMDS_PER_DEVICE, AIC79XX_CMDS_PER_DEVICE, \
+ AIC79XX_CMDS_PER_DEVICE, AIC79XX_CMDS_PER_DEVICE, \
+ AIC79XX_CMDS_PER_DEVICE, AIC79XX_CMDS_PER_DEVICE, \
+ AIC79XX_CMDS_PER_DEVICE, AIC79XX_CMDS_PER_DEVICE, \
+ AIC79XX_CMDS_PER_DEVICE, AIC79XX_CMDS_PER_DEVICE, \
+ AIC79XX_CMDS_PER_DEVICE, AIC79XX_CMDS_PER_DEVICE, \
+ AIC79XX_CMDS_PER_DEVICE, AIC79XX_CMDS_PER_DEVICE, \
+ AIC79XX_CMDS_PER_DEVICE, AIC79XX_CMDS_PER_DEVICE \
+}
+
+/*
+ * By default, use the number of commands specified by
+ * the users kernel configuration.
+ */
+static adapter_tag_info_t aic79xx_tag_info[] =
+{
+ {AIC79XX_CONFIGED_TAG_COMMANDS},
+ {AIC79XX_CONFIGED_TAG_COMMANDS},
+ {AIC79XX_CONFIGED_TAG_COMMANDS},
+ {AIC79XX_CONFIGED_TAG_COMMANDS},
+ {AIC79XX_CONFIGED_TAG_COMMANDS},
+ {AIC79XX_CONFIGED_TAG_COMMANDS},
+ {AIC79XX_CONFIGED_TAG_COMMANDS},
+ {AIC79XX_CONFIGED_TAG_COMMANDS},
+ {AIC79XX_CONFIGED_TAG_COMMANDS},
+ {AIC79XX_CONFIGED_TAG_COMMANDS},
+ {AIC79XX_CONFIGED_TAG_COMMANDS},
+ {AIC79XX_CONFIGED_TAG_COMMANDS},
+ {AIC79XX_CONFIGED_TAG_COMMANDS},
+ {AIC79XX_CONFIGED_TAG_COMMANDS},
+ {AIC79XX_CONFIGED_TAG_COMMANDS},
+ {AIC79XX_CONFIGED_TAG_COMMANDS}
+};
+
+/*
+ * The I/O cell on the chip is very configurable in respect to its analog
+ * characteristics. Set the defaults here; they can be overriden with
+ * the proper insmod parameters.
+ */
+struct ahd_linux_iocell_opts
+{
+ uint8_t precomp;
+ uint8_t slewrate;
+ uint8_t amplitude;
+};
+#define AIC79XX_DEFAULT_PRECOMP 0xFF
+#define AIC79XX_DEFAULT_SLEWRATE 0xFF
+#define AIC79XX_DEFAULT_AMPLITUDE 0xFF
+#define AIC79XX_DEFAULT_IOOPTS \
+{ \
+ AIC79XX_DEFAULT_PRECOMP, \
+ AIC79XX_DEFAULT_SLEWRATE, \
+ AIC79XX_DEFAULT_AMPLITUDE \
+}
+#define AIC79XX_PRECOMP_INDEX 0
+#define AIC79XX_SLEWRATE_INDEX 1
+#define AIC79XX_AMPLITUDE_INDEX 2
+static const struct ahd_linux_iocell_opts aic79xx_iocell_info[] =
+{
+ AIC79XX_DEFAULT_IOOPTS,
+ AIC79XX_DEFAULT_IOOPTS,
+ AIC79XX_DEFAULT_IOOPTS,
+ AIC79XX_DEFAULT_IOOPTS,
+ AIC79XX_DEFAULT_IOOPTS,
+ AIC79XX_DEFAULT_IOOPTS,
+ AIC79XX_DEFAULT_IOOPTS,
+ AIC79XX_DEFAULT_IOOPTS,
+ AIC79XX_DEFAULT_IOOPTS,
+ AIC79XX_DEFAULT_IOOPTS,
+ AIC79XX_DEFAULT_IOOPTS,
+ AIC79XX_DEFAULT_IOOPTS,
+ AIC79XX_DEFAULT_IOOPTS,
+ AIC79XX_DEFAULT_IOOPTS,
+ AIC79XX_DEFAULT_IOOPTS,
+ AIC79XX_DEFAULT_IOOPTS
+};
+
+/*
+ * There should be a specific return value for this in scsi.h, but
+ * it seems that most drivers ignore it.
+ */
+#define DID_UNDERFLOW DID_ERROR
+
+void
+ahd_print_path(struct ahd_softc *ahd, struct scb *scb)
+{
+ printk("(scsi%d:%c:%d:%d): ",
+ ahd->platform_data->host->host_no,
+ scb != NULL ? SCB_GET_CHANNEL(ahd, scb) : 'X',
+ scb != NULL ? SCB_GET_TARGET(ahd, scb) : -1,
+ scb != NULL ? SCB_GET_LUN(scb) : -1);
+}
+
+/*
+ * XXX - these options apply unilaterally to _all_ adapters
+ * cards in the system. This should be fixed. Exceptions to this
+ * rule are noted in the comments.
+ */
+
+/*
+ * Skip the scsi bus reset. Non 0 make us skip the reset at startup. This
+ * has no effect on any later resets that might occur due to things like
+ * SCSI bus timeouts.
+ */
+static uint32_t aic79xx_no_reset;
+
+/*
+ * Should we force EXTENDED translation on a controller.
+ * 0 == Use whatever is in the SEEPROM or default to off
+ * 1 == Use whatever is in the SEEPROM or default to on
+ */
+static uint32_t aic79xx_extended;
+
+/*
+ * PCI bus parity checking of the Adaptec controllers. This is somewhat
+ * dubious at best. To my knowledge, this option has never actually
+ * solved a PCI parity problem, but on certain machines with broken PCI
+ * chipset configurations, it can generate tons of false error messages.
+ * It's included in the driver for completeness.
+ * 0 = Shut off PCI parity check
+ * non-0 = Enable PCI parity check
+ *
+ * NOTE: you can't actually pass -1 on the lilo prompt. So, to set this
+ * variable to -1 you would actually want to simply pass the variable
+ * name without a number. That will invert the 0 which will result in
+ * -1.
+ */
+static uint32_t aic79xx_pci_parity = ~0;
+
+/*
+ * There are lots of broken chipsets in the world. Some of them will
+ * violate the PCI spec when we issue byte sized memory writes to our
+ * controller. I/O mapped register access, if allowed by the given
+ * platform, will work in almost all cases.
+ */
+uint32_t aic79xx_allow_memio = ~0;
+
+/*
+ * So that we can set how long each device is given as a selection timeout.
+ * The table of values goes like this:
+ * 0 - 256ms
+ * 1 - 128ms
+ * 2 - 64ms
+ * 3 - 32ms
+ * We default to 256ms because some older devices need a longer time
+ * to respond to initial selection.
+ */
+static uint32_t aic79xx_seltime;
+
+/*
+ * Certain devices do not perform any aging on commands. Should the
+ * device be saturated by commands in one portion of the disk, it is
+ * possible for transactions on far away sectors to never be serviced.
+ * To handle these devices, we can periodically send an ordered tag to
+ * force all outstanding transactions to be serviced prior to a new
+ * transaction.
+ */
+static uint32_t aic79xx_periodic_otag;
+
+/* Some storage boxes are using an LSI chip which has a bug making it
+ * impossible to use aic79xx Rev B chip in 320 speeds. The following
+ * storage boxes have been reported to be buggy:
+ * EonStor 3U 16-Bay: U16U-G3A3
+ * EonStor 2U 12-Bay: U12U-G3A3
+ * SentinelRAID: 2500F R5 / R6
+ * SentinelRAID: 2500F R1
+ * SentinelRAID: 2500F/1500F
+ * SentinelRAID: 150F
+ *
+ * To get around this LSI bug, you can set your board to 160 mode
+ * or you can enable the SLOWCRC bit.
+ */
+uint32_t aic79xx_slowcrc;
+
+/*
+ * Module information and settable options.
+ */
+static char *aic79xx = NULL;
+
+MODULE_AUTHOR("Maintainer: Hannes Reinecke <hare@suse.de>");
+MODULE_DESCRIPTION("Adaptec AIC790X U320 SCSI Host Bus Adapter driver");
+MODULE_LICENSE("Dual BSD/GPL");
+MODULE_VERSION(AIC79XX_DRIVER_VERSION);
+module_param(aic79xx, charp, 0444);
+MODULE_PARM_DESC(aic79xx,
+"period-delimited options string:\n"
+" verbose Enable verbose/diagnostic logging\n"
+" allow_memio Allow device registers to be memory mapped\n"
+" debug Bitmask of debug values to enable\n"
+" no_reset Suppress initial bus resets\n"
+" extended Enable extended geometry on all controllers\n"
+" periodic_otag Send an ordered tagged transaction\n"
+" periodically to prevent tag starvation.\n"
+" This may be required by some older disk\n"
+" or drives/RAID arrays.\n"
+" tag_info:<tag_str> Set per-target tag depth\n"
+" global_tag_depth:<int> Global tag depth for all targets on all buses\n"
+" slewrate:<slewrate_list>Set the signal slew rate (0-15).\n"
+" precomp:<pcomp_list> Set the signal precompensation (0-7).\n"
+" amplitude:<int> Set the signal amplitude (0-7).\n"
+" seltime:<int> Selection Timeout:\n"
+" (0/256ms,1/128ms,2/64ms,3/32ms)\n"
+" slowcrc Turn on the SLOWCRC bit (Rev B only)\n"
+"\n"
+" Sample modprobe configuration file:\n"
+" # Enable verbose logging\n"
+" # Set tag depth on Controller 2/Target 2 to 10 tags\n"
+" # Shorten the selection timeout to 128ms\n"
+"\n"
+" options aic79xx 'aic79xx=verbose.tag_info:{{}.{}.{..10}}.seltime:1'\n"
+);
+
+static void ahd_linux_handle_scsi_status(struct ahd_softc *,
+ struct scsi_device *,
+ struct scb *);
+static void ahd_linux_queue_cmd_complete(struct ahd_softc *ahd,
+ struct scsi_cmnd *cmd);
+static int ahd_linux_queue_abort_cmd(struct scsi_cmnd *cmd);
+static void ahd_linux_initialize_scsi_bus(struct ahd_softc *ahd);
+static u_int ahd_linux_user_tagdepth(struct ahd_softc *ahd,
+ struct ahd_devinfo *devinfo);
+static void ahd_linux_device_queue_depth(struct scsi_device *);
+static int ahd_linux_run_command(struct ahd_softc*,
+ struct ahd_linux_device *,
+ struct scsi_cmnd *);
+static void ahd_linux_setup_tag_info_global(char *p);
+static int aic79xx_setup(char *c);
+static void ahd_freeze_simq(struct ahd_softc *ahd);
+static void ahd_release_simq(struct ahd_softc *ahd);
+
+static int ahd_linux_unit;
+
+
+/************************** OS Utility Wrappers *******************************/
+void ahd_delay(long);
+void
+ahd_delay(long usec)
+{
+ /*
+ * udelay on Linux can have problems for
+ * multi-millisecond waits. Wait at most
+ * 1024us per call.
+ */
+ while (usec > 0) {
+ udelay(usec % 1024);
+ usec -= 1024;
+ }
+}
+
+
+/***************************** Low Level I/O **********************************/
+uint8_t ahd_inb(struct ahd_softc * ahd, long port);
+void ahd_outb(struct ahd_softc * ahd, long port, uint8_t val);
+void ahd_outw_atomic(struct ahd_softc * ahd,
+ long port, uint16_t val);
+void ahd_outsb(struct ahd_softc * ahd, long port,
+ uint8_t *, int count);
+void ahd_insb(struct ahd_softc * ahd, long port,
+ uint8_t *, int count);
+
+uint8_t
+ahd_inb(struct ahd_softc * ahd, long port)
+{
+ uint8_t x;
+
+ if (ahd->tags[0] == BUS_SPACE_MEMIO) {
+ x = readb(ahd->bshs[0].maddr + port);
+ } else {
+ x = inb(ahd->bshs[(port) >> 8].ioport + ((port) & 0xFF));
+ }
+ mb();
+ return (x);
+}
+
+#if 0 /* unused */
+static uint16_t
+ahd_inw_atomic(struct ahd_softc * ahd, long port)
+{
+ uint8_t x;
+
+ if (ahd->tags[0] == BUS_SPACE_MEMIO) {
+ x = readw(ahd->bshs[0].maddr + port);
+ } else {
+ x = inw(ahd->bshs[(port) >> 8].ioport + ((port) & 0xFF));
+ }
+ mb();
+ return (x);
+}
+#endif
+
+void
+ahd_outb(struct ahd_softc * ahd, long port, uint8_t val)
+{
+ if (ahd->tags[0] == BUS_SPACE_MEMIO) {
+ writeb(val, ahd->bshs[0].maddr + port);
+ } else {
+ outb(val, ahd->bshs[(port) >> 8].ioport + (port & 0xFF));
+ }
+ mb();
+}
+
+void
+ahd_outw_atomic(struct ahd_softc * ahd, long port, uint16_t val)
+{
+ if (ahd->tags[0] == BUS_SPACE_MEMIO) {
+ writew(val, ahd->bshs[0].maddr + port);
+ } else {
+ outw(val, ahd->bshs[(port) >> 8].ioport + (port & 0xFF));
+ }
+ mb();
+}
+
+void
+ahd_outsb(struct ahd_softc * ahd, long port, uint8_t *array, int count)
+{
+ int i;
+
+ /*
+ * There is probably a more efficient way to do this on Linux
+ * but we don't use this for anything speed critical and this
+ * should work.
+ */
+ for (i = 0; i < count; i++)
+ ahd_outb(ahd, port, *array++);
+}
+
+void
+ahd_insb(struct ahd_softc * ahd, long port, uint8_t *array, int count)
+{
+ int i;
+
+ /*
+ * There is probably a more efficient way to do this on Linux
+ * but we don't use this for anything speed critical and this
+ * should work.
+ */
+ for (i = 0; i < count; i++)
+ *array++ = ahd_inb(ahd, port);
+}
+
+/******************************* PCI Routines *********************************/
+uint32_t
+ahd_pci_read_config(ahd_dev_softc_t pci, int reg, int width)
+{
+ switch (width) {
+ case 1:
+ {
+ uint8_t retval;
+
+ pci_read_config_byte(pci, reg, &retval);
+ return (retval);
+ }
+ case 2:
+ {
+ uint16_t retval;
+ pci_read_config_word(pci, reg, &retval);
+ return (retval);
+ }
+ case 4:
+ {
+ uint32_t retval;
+ pci_read_config_dword(pci, reg, &retval);
+ return (retval);
+ }
+ default:
+ panic("ahd_pci_read_config: Read size too big");
+ /* NOTREACHED */
+ return (0);
+ }
+}
+
+void
+ahd_pci_write_config(ahd_dev_softc_t pci, int reg, uint32_t value, int width)
+{
+ switch (width) {
+ case 1:
+ pci_write_config_byte(pci, reg, value);
+ break;
+ case 2:
+ pci_write_config_word(pci, reg, value);
+ break;
+ case 4:
+ pci_write_config_dword(pci, reg, value);
+ break;
+ default:
+ panic("ahd_pci_write_config: Write size too big");
+ /* NOTREACHED */
+ }
+}
+
+/****************************** Inlines ***************************************/
+static void ahd_linux_unmap_scb(struct ahd_softc*, struct scb*);
+
+static void
+ahd_linux_unmap_scb(struct ahd_softc *ahd, struct scb *scb)
+{
+ struct scsi_cmnd *cmd;
+
+ cmd = scb->io_ctx;
+ ahd_sync_sglist(ahd, scb, BUS_DMASYNC_POSTWRITE);
+ scsi_dma_unmap(cmd);
+}
+
+/******************************** Macros **************************************/
+#define BUILD_SCSIID(ahd, cmd) \
+ (((scmd_id(cmd) << TID_SHIFT) & TID) | (ahd)->our_id)
+
+/*
+ * Return a string describing the driver.
+ */
+static const char *
+ahd_linux_info(struct Scsi_Host *host)
+{
+ static char buffer[512];
+ char ahd_info[256];
+ char *bp;
+ struct ahd_softc *ahd;
+
+ bp = &buffer[0];
+ ahd = *(struct ahd_softc **)host->hostdata;
+ memset(bp, 0, sizeof(buffer));
+ strcpy(bp, "Adaptec AIC79XX PCI-X SCSI HBA DRIVER, Rev " AIC79XX_DRIVER_VERSION "\n"
+ " <");
+ strcat(bp, ahd->description);
+ strcat(bp, ">\n"
+ " ");
+ ahd_controller_info(ahd, ahd_info);
+ strcat(bp, ahd_info);
+
+ return (bp);
+}
+
+/*
+ * Queue an SCB to the controller.
+ */
+static int
+ahd_linux_queue_lck(struct scsi_cmnd * cmd, void (*scsi_done) (struct scsi_cmnd *))
+{
+ struct ahd_softc *ahd;
+ struct ahd_linux_device *dev = scsi_transport_device_data(cmd->device);
+ int rtn = SCSI_MLQUEUE_HOST_BUSY;
+
+ ahd = *(struct ahd_softc **)cmd->device->host->hostdata;
+
+ cmd->scsi_done = scsi_done;
+ cmd->result = CAM_REQ_INPROG << 16;
+ rtn = ahd_linux_run_command(ahd, dev, cmd);
+
+ return rtn;
+}
+
+static DEF_SCSI_QCMD(ahd_linux_queue)
+
+static struct scsi_target **
+ahd_linux_target_in_softc(struct scsi_target *starget)
+{
+ struct ahd_softc *ahd =
+ *((struct ahd_softc **)dev_to_shost(&starget->dev)->hostdata);
+ unsigned int target_offset;
+
+ target_offset = starget->id;
+ if (starget->channel != 0)
+ target_offset += 8;
+
+ return &ahd->platform_data->starget[target_offset];
+}
+
+static int
+ahd_linux_target_alloc(struct scsi_target *starget)
+{
+ struct ahd_softc *ahd =
+ *((struct ahd_softc **)dev_to_shost(&starget->dev)->hostdata);
+ struct seeprom_config *sc = ahd->seep_config;
+ unsigned long flags;
+ struct scsi_target **ahd_targp = ahd_linux_target_in_softc(starget);
+ struct ahd_devinfo devinfo;
+ struct ahd_initiator_tinfo *tinfo;
+ struct ahd_tmode_tstate *tstate;
+ char channel = starget->channel + 'A';
+
+ ahd_lock(ahd, &flags);
+
+ BUG_ON(*ahd_targp != NULL);
+
+ *ahd_targp = starget;
+
+ if (sc) {
+ int flags = sc->device_flags[starget->id];
+
+ tinfo = ahd_fetch_transinfo(ahd, 'A', ahd->our_id,
+ starget->id, &tstate);
+
+ if ((flags & CFPACKETIZED) == 0) {
+ /* don't negotiate packetized (IU) transfers */
+ spi_max_iu(starget) = 0;
+ } else {
+ if ((ahd->features & AHD_RTI) == 0)
+ spi_rti(starget) = 0;
+ }
+
+ if ((flags & CFQAS) == 0)
+ spi_max_qas(starget) = 0;
+
+ /* Transinfo values have been set to BIOS settings */
+ spi_max_width(starget) = (flags & CFWIDEB) ? 1 : 0;
+ spi_min_period(starget) = tinfo->user.period;
+ spi_max_offset(starget) = tinfo->user.offset;
+ }
+
+ tinfo = ahd_fetch_transinfo(ahd, channel, ahd->our_id,
+ starget->id, &tstate);
+ ahd_compile_devinfo(&devinfo, ahd->our_id, starget->id,
+ CAM_LUN_WILDCARD, channel,
+ ROLE_INITIATOR);
+ ahd_set_syncrate(ahd, &devinfo, 0, 0, 0,
+ AHD_TRANS_GOAL, /*paused*/FALSE);
+ ahd_set_width(ahd, &devinfo, MSG_EXT_WDTR_BUS_8_BIT,
+ AHD_TRANS_GOAL, /*paused*/FALSE);
+ ahd_unlock(ahd, &flags);
+
+ return 0;
+}
+
+static void
+ahd_linux_target_destroy(struct scsi_target *starget)
+{
+ struct scsi_target **ahd_targp = ahd_linux_target_in_softc(starget);
+
+ *ahd_targp = NULL;
+}
+
+static int
+ahd_linux_slave_alloc(struct scsi_device *sdev)
+{
+ struct ahd_softc *ahd =
+ *((struct ahd_softc **)sdev->host->hostdata);
+ struct ahd_linux_device *dev;
+
+ if (bootverbose)
+ printk("%s: Slave Alloc %d\n", ahd_name(ahd), sdev->id);
+
+ dev = scsi_transport_device_data(sdev);
+ memset(dev, 0, sizeof(*dev));
+
+ /*
+ * We start out life using untagged
+ * transactions of which we allow one.
+ */
+ dev->openings = 1;
+
+ /*
+ * Set maxtags to 0. This will be changed if we
+ * later determine that we are dealing with
+ * a tagged queuing capable device.
+ */
+ dev->maxtags = 0;
+
+ return (0);
+}
+
+static int
+ahd_linux_slave_configure(struct scsi_device *sdev)
+{
+ struct ahd_softc *ahd;
+
+ ahd = *((struct ahd_softc **)sdev->host->hostdata);
+ if (bootverbose)
+ sdev_printk(KERN_INFO, sdev, "Slave Configure\n");
+
+ ahd_linux_device_queue_depth(sdev);
+
+ /* Initial Domain Validation */
+ if (!spi_initial_dv(sdev->sdev_target))
+ spi_dv_device(sdev);
+
+ return 0;
+}
+
+#if defined(__i386__)
+/*
+ * Return the disk geometry for the given SCSI device.
+ */
+static int
+ahd_linux_biosparam(struct scsi_device *sdev, struct block_device *bdev,
+ sector_t capacity, int geom[])
+{
+ uint8_t *bh;
+ int heads;
+ int sectors;
+ int cylinders;
+ int ret;
+ int extended;
+ struct ahd_softc *ahd;
+
+ ahd = *((struct ahd_softc **)sdev->host->hostdata);
+
+ bh = scsi_bios_ptable(bdev);
+ if (bh) {
+ ret = scsi_partsize(bh, capacity,
+ &geom[2], &geom[0], &geom[1]);
+ kfree(bh);
+ if (ret != -1)
+ return (ret);
+ }
+ heads = 64;
+ sectors = 32;
+ cylinders = aic_sector_div(capacity, heads, sectors);
+
+ if (aic79xx_extended != 0)
+ extended = 1;
+ else
+ extended = (ahd->flags & AHD_EXTENDED_TRANS_A) != 0;
+ if (extended && cylinders >= 1024) {
+ heads = 255;
+ sectors = 63;
+ cylinders = aic_sector_div(capacity, heads, sectors);
+ }
+ geom[0] = heads;
+ geom[1] = sectors;
+ geom[2] = cylinders;
+ return (0);
+}
+#endif
+
+/*
+ * Abort the current SCSI command(s).
+ */
+static int
+ahd_linux_abort(struct scsi_cmnd *cmd)
+{
+ int error;
+
+ error = ahd_linux_queue_abort_cmd(cmd);
+
+ return error;
+}
+
+/*
+ * Attempt to send a target reset message to the device that timed out.
+ */
+static int
+ahd_linux_dev_reset(struct scsi_cmnd *cmd)
+{
+ struct ahd_softc *ahd;
+ struct ahd_linux_device *dev;
+ struct scb *reset_scb;
+ u_int cdb_byte;
+ int retval = SUCCESS;
+ int paused;
+ int wait;
+ struct ahd_initiator_tinfo *tinfo;
+ struct ahd_tmode_tstate *tstate;
+ unsigned long flags;
+ DECLARE_COMPLETION_ONSTACK(done);
+
+ reset_scb = NULL;
+ paused = FALSE;
+ wait = FALSE;
+ ahd = *(struct ahd_softc **)cmd->device->host->hostdata;
+
+ scmd_printk(KERN_INFO, cmd,
+ "Attempting to queue a TARGET RESET message:");
+
+ printk("CDB:");
+ for (cdb_byte = 0; cdb_byte < cmd->cmd_len; cdb_byte++)
+ printk(" 0x%x", cmd->cmnd[cdb_byte]);
+ printk("\n");
+
+ /*
+ * Determine if we currently own this command.
+ */
+ dev = scsi_transport_device_data(cmd->device);
+
+ if (dev == NULL) {
+ /*
+ * No target device for this command exists,
+ * so we must not still own the command.
+ */
+ scmd_printk(KERN_INFO, cmd, "Is not an active device\n");
+ return SUCCESS;
+ }
+
+ /*
+ * Generate us a new SCB
+ */
+ reset_scb = ahd_get_scb(ahd, AHD_NEVER_COL_IDX);
+ if (!reset_scb) {
+ scmd_printk(KERN_INFO, cmd, "No SCB available\n");
+ return FAILED;
+ }
+
+ tinfo = ahd_fetch_transinfo(ahd, 'A', ahd->our_id,
+ cmd->device->id, &tstate);
+ reset_scb->io_ctx = cmd;
+ reset_scb->platform_data->dev = dev;
+ reset_scb->sg_count = 0;
+ ahd_set_residual(reset_scb, 0);
+ ahd_set_sense_residual(reset_scb, 0);
+ reset_scb->platform_data->xfer_len = 0;
+ reset_scb->hscb->control = 0;
+ reset_scb->hscb->scsiid = BUILD_SCSIID(ahd,cmd);
+ reset_scb->hscb->lun = cmd->device->lun;
+ reset_scb->hscb->cdb_len = 0;
+ reset_scb->hscb->task_management = SIU_TASKMGMT_LUN_RESET;
+ reset_scb->flags |= SCB_DEVICE_RESET|SCB_RECOVERY_SCB|SCB_ACTIVE;
+ if ((tinfo->curr.ppr_options & MSG_EXT_PPR_IU_REQ) != 0) {
+ reset_scb->flags |= SCB_PACKETIZED;
+ } else {
+ reset_scb->hscb->control |= MK_MESSAGE;
+ }
+ dev->openings--;
+ dev->active++;
+ dev->commands_issued++;
+
+ ahd_lock(ahd, &flags);
+
+ LIST_INSERT_HEAD(&ahd->pending_scbs, reset_scb, pending_links);
+ ahd_queue_scb(ahd, reset_scb);
+
+ ahd->platform_data->eh_done = &done;
+ ahd_unlock(ahd, &flags);
+
+ printk("%s: Device reset code sleeping\n", ahd_name(ahd));
+ if (!wait_for_completion_timeout(&done, 5 * HZ)) {
+ ahd_lock(ahd, &flags);
+ ahd->platform_data->eh_done = NULL;
+ ahd_unlock(ahd, &flags);
+ printk("%s: Device reset timer expired (active %d)\n",
+ ahd_name(ahd), dev->active);
+ retval = FAILED;
+ }
+ printk("%s: Device reset returning 0x%x\n", ahd_name(ahd), retval);
+
+ return (retval);
+}
+
+/*
+ * Reset the SCSI bus.
+ */
+static int
+ahd_linux_bus_reset(struct scsi_cmnd *cmd)
+{
+ struct ahd_softc *ahd;
+ int found;
+ unsigned long flags;
+
+ ahd = *(struct ahd_softc **)cmd->device->host->hostdata;
+#ifdef AHD_DEBUG
+ if ((ahd_debug & AHD_SHOW_RECOVERY) != 0)
+ printk("%s: Bus reset called for cmd %p\n",
+ ahd_name(ahd), cmd);
+#endif
+ ahd_lock(ahd, &flags);
+
+ found = ahd_reset_channel(ahd, scmd_channel(cmd) + 'A',
+ /*initiate reset*/TRUE);
+ ahd_unlock(ahd, &flags);
+
+ if (bootverbose)
+ printk("%s: SCSI bus reset delivered. "
+ "%d SCBs aborted.\n", ahd_name(ahd), found);
+
+ return (SUCCESS);
+}
+
+struct scsi_host_template aic79xx_driver_template = {
+ .module = THIS_MODULE,
+ .name = "aic79xx",
+ .proc_name = "aic79xx",
+ .show_info = ahd_linux_show_info,
+ .write_info = ahd_proc_write_seeprom,
+ .info = ahd_linux_info,
+ .queuecommand = ahd_linux_queue,
+ .eh_abort_handler = ahd_linux_abort,
+ .eh_device_reset_handler = ahd_linux_dev_reset,
+ .eh_bus_reset_handler = ahd_linux_bus_reset,
+#if defined(__i386__)
+ .bios_param = ahd_linux_biosparam,
+#endif
+ .can_queue = AHD_MAX_QUEUE,
+ .this_id = -1,
+ .max_sectors = 8192,
+ .cmd_per_lun = 2,
+ .use_clustering = ENABLE_CLUSTERING,
+ .slave_alloc = ahd_linux_slave_alloc,
+ .slave_configure = ahd_linux_slave_configure,
+ .target_alloc = ahd_linux_target_alloc,
+ .target_destroy = ahd_linux_target_destroy,
+ .use_blk_tags = 1,
+};
+
+/******************************** Bus DMA *************************************/
+int
+ahd_dma_tag_create(struct ahd_softc *ahd, bus_dma_tag_t parent,
+ bus_size_t alignment, bus_size_t boundary,
+ dma_addr_t lowaddr, dma_addr_t highaddr,
+ bus_dma_filter_t *filter, void *filterarg,
+ bus_size_t maxsize, int nsegments,
+ bus_size_t maxsegsz, int flags, bus_dma_tag_t *ret_tag)
+{
+ bus_dma_tag_t dmat;
+
+ dmat = kmalloc(sizeof(*dmat), GFP_ATOMIC);
+ if (dmat == NULL)
+ return (ENOMEM);
+
+ /*
+ * Linux is very simplistic about DMA memory. For now don't
+ * maintain all specification information. Once Linux supplies
+ * better facilities for doing these operations, or the
+ * needs of this particular driver change, we might need to do
+ * more here.
+ */
+ dmat->alignment = alignment;
+ dmat->boundary = boundary;
+ dmat->maxsize = maxsize;
+ *ret_tag = dmat;
+ return (0);
+}
+
+void
+ahd_dma_tag_destroy(struct ahd_softc *ahd, bus_dma_tag_t dmat)
+{
+ kfree(dmat);
+}
+
+int
+ahd_dmamem_alloc(struct ahd_softc *ahd, bus_dma_tag_t dmat, void** vaddr,
+ int flags, bus_dmamap_t *mapp)
+{
+ *vaddr = pci_alloc_consistent(ahd->dev_softc,
+ dmat->maxsize, mapp);
+ if (*vaddr == NULL)
+ return (ENOMEM);
+ return(0);
+}
+
+void
+ahd_dmamem_free(struct ahd_softc *ahd, bus_dma_tag_t dmat,
+ void* vaddr, bus_dmamap_t map)
+{
+ pci_free_consistent(ahd->dev_softc, dmat->maxsize,
+ vaddr, map);
+}
+
+int
+ahd_dmamap_load(struct ahd_softc *ahd, bus_dma_tag_t dmat, bus_dmamap_t map,
+ void *buf, bus_size_t buflen, bus_dmamap_callback_t *cb,
+ void *cb_arg, int flags)
+{
+ /*
+ * Assume for now that this will only be used during
+ * initialization and not for per-transaction buffer mapping.
+ */
+ bus_dma_segment_t stack_sg;
+
+ stack_sg.ds_addr = map;
+ stack_sg.ds_len = dmat->maxsize;
+ cb(cb_arg, &stack_sg, /*nseg*/1, /*error*/0);
+ return (0);
+}
+
+void
+ahd_dmamap_destroy(struct ahd_softc *ahd, bus_dma_tag_t dmat, bus_dmamap_t map)
+{
+}
+
+int
+ahd_dmamap_unload(struct ahd_softc *ahd, bus_dma_tag_t dmat, bus_dmamap_t map)
+{
+ /* Nothing to do */
+ return (0);
+}
+
+/********************* Platform Dependent Functions ***************************/
+static void
+ahd_linux_setup_iocell_info(u_long index, int instance, int targ, int32_t value)
+{
+
+ if ((instance >= 0)
+ && (instance < ARRAY_SIZE(aic79xx_iocell_info))) {
+ uint8_t *iocell_info;
+
+ iocell_info = (uint8_t*)&aic79xx_iocell_info[instance];
+ iocell_info[index] = value & 0xFFFF;
+ if (bootverbose)
+ printk("iocell[%d:%ld] = %d\n", instance, index, value);
+ }
+}
+
+static void
+ahd_linux_setup_tag_info_global(char *p)
+{
+ int tags, i, j;
+
+ tags = simple_strtoul(p + 1, NULL, 0) & 0xff;
+ printk("Setting Global Tags= %d\n", tags);
+
+ for (i = 0; i < ARRAY_SIZE(aic79xx_tag_info); i++) {
+ for (j = 0; j < AHD_NUM_TARGETS; j++) {
+ aic79xx_tag_info[i].tag_commands[j] = tags;
+ }
+ }
+}
+
+static void
+ahd_linux_setup_tag_info(u_long arg, int instance, int targ, int32_t value)
+{
+
+ if ((instance >= 0) && (targ >= 0)
+ && (instance < ARRAY_SIZE(aic79xx_tag_info))
+ && (targ < AHD_NUM_TARGETS)) {
+ aic79xx_tag_info[instance].tag_commands[targ] = value & 0x1FF;
+ if (bootverbose)
+ printk("tag_info[%d:%d] = %d\n", instance, targ, value);
+ }
+}
+
+static char *
+ahd_parse_brace_option(char *opt_name, char *opt_arg, char *end, int depth,
+ void (*callback)(u_long, int, int, int32_t),
+ u_long callback_arg)
+{
+ char *tok_end;
+ char *tok_end2;
+ int i;
+ int instance;
+ int targ;
+ int done;
+ char tok_list[] = {'.', ',', '{', '}', '\0'};
+
+ /* All options use a ':' name/arg separator */
+ if (*opt_arg != ':')
+ return (opt_arg);
+ opt_arg++;
+ instance = -1;
+ targ = -1;
+ done = FALSE;
+ /*
+ * Restore separator that may be in
+ * the middle of our option argument.
+ */
+ tok_end = strchr(opt_arg, '\0');
+ if (tok_end < end)
+ *tok_end = ',';
+ while (!done) {
+ switch (*opt_arg) {
+ case '{':
+ if (instance == -1) {
+ instance = 0;
+ } else {
+ if (depth > 1) {
+ if (targ == -1)
+ targ = 0;
+ } else {
+ printk("Malformed Option %s\n",
+ opt_name);
+ done = TRUE;
+ }
+ }
+ opt_arg++;
+ break;
+ case '}':
+ if (targ != -1)
+ targ = -1;
+ else if (instance != -1)
+ instance = -1;
+ opt_arg++;
+ break;
+ case ',':
+ case '.':
+ if (instance == -1)
+ done = TRUE;
+ else if (targ >= 0)
+ targ++;
+ else if (instance >= 0)
+ instance++;
+ opt_arg++;
+ break;
+ case '\0':
+ done = TRUE;
+ break;
+ default:
+ tok_end = end;
+ for (i = 0; tok_list[i]; i++) {
+ tok_end2 = strchr(opt_arg, tok_list[i]);
+ if ((tok_end2) && (tok_end2 < tok_end))
+ tok_end = tok_end2;
+ }
+ callback(callback_arg, instance, targ,
+ simple_strtol(opt_arg, NULL, 0));
+ opt_arg = tok_end;
+ break;
+ }
+ }
+ return (opt_arg);
+}
+
+/*
+ * Handle Linux boot parameters. This routine allows for assigning a value
+ * to a parameter with a ':' between the parameter and the value.
+ * ie. aic79xx=stpwlev:1,extended
+ */
+static int
+aic79xx_setup(char *s)
+{
+ int i, n;
+ char *p;
+ char *end;
+
+ static const struct {
+ const char *name;
+ uint32_t *flag;
+ } options[] = {
+ { "extended", &aic79xx_extended },
+ { "no_reset", &aic79xx_no_reset },
+ { "verbose", &aic79xx_verbose },
+ { "allow_memio", &aic79xx_allow_memio},
+#ifdef AHD_DEBUG
+ { "debug", &ahd_debug },
+#endif
+ { "periodic_otag", &aic79xx_periodic_otag },
+ { "pci_parity", &aic79xx_pci_parity },
+ { "seltime", &aic79xx_seltime },
+ { "tag_info", NULL },
+ { "global_tag_depth", NULL},
+ { "slewrate", NULL },
+ { "precomp", NULL },
+ { "amplitude", NULL },
+ { "slowcrc", &aic79xx_slowcrc },
+ };
+
+ end = strchr(s, '\0');
+
+ /*
+ * XXX ia64 gcc isn't smart enough to know that ARRAY_SIZE
+ * will never be 0 in this case.
+ */
+ n = 0;
+
+ while ((p = strsep(&s, ",.")) != NULL) {
+ if (*p == '\0')
+ continue;
+ for (i = 0; i < ARRAY_SIZE(options); i++) {
+
+ n = strlen(options[i].name);
+ if (strncmp(options[i].name, p, n) == 0)
+ break;
+ }
+ if (i == ARRAY_SIZE(options))
+ continue;
+
+ if (strncmp(p, "global_tag_depth", n) == 0) {
+ ahd_linux_setup_tag_info_global(p + n);
+ } else if (strncmp(p, "tag_info", n) == 0) {
+ s = ahd_parse_brace_option("tag_info", p + n, end,
+ 2, ahd_linux_setup_tag_info, 0);
+ } else if (strncmp(p, "slewrate", n) == 0) {
+ s = ahd_parse_brace_option("slewrate",
+ p + n, end, 1, ahd_linux_setup_iocell_info,
+ AIC79XX_SLEWRATE_INDEX);
+ } else if (strncmp(p, "precomp", n) == 0) {
+ s = ahd_parse_brace_option("precomp",
+ p + n, end, 1, ahd_linux_setup_iocell_info,
+ AIC79XX_PRECOMP_INDEX);
+ } else if (strncmp(p, "amplitude", n) == 0) {
+ s = ahd_parse_brace_option("amplitude",
+ p + n, end, 1, ahd_linux_setup_iocell_info,
+ AIC79XX_AMPLITUDE_INDEX);
+ } else if (p[n] == ':') {
+ *(options[i].flag) = simple_strtoul(p + n + 1, NULL, 0);
+ } else if (!strncmp(p, "verbose", n)) {
+ *(options[i].flag) = 1;
+ } else {
+ *(options[i].flag) ^= 0xFFFFFFFF;
+ }
+ }
+ return 1;
+}
+
+__setup("aic79xx=", aic79xx_setup);
+
+uint32_t aic79xx_verbose;
+
+int
+ahd_linux_register_host(struct ahd_softc *ahd, struct scsi_host_template *template)
+{
+ char buf[80];
+ struct Scsi_Host *host;
+ char *new_name;
+ u_long s;
+ int retval;
+
+ template->name = ahd->description;
+ host = scsi_host_alloc(template, sizeof(struct ahd_softc *));
+ if (host == NULL)
+ return (ENOMEM);
+
+ *((struct ahd_softc **)host->hostdata) = ahd;
+ ahd->platform_data->host = host;
+ host->can_queue = AHD_MAX_QUEUE;
+ host->cmd_per_lun = 2;
+ host->sg_tablesize = AHD_NSEG;
+ host->this_id = ahd->our_id;
+ host->irq = ahd->platform_data->irq;
+ host->max_id = (ahd->features & AHD_WIDE) ? 16 : 8;
+ host->max_lun = AHD_NUM_LUNS;
+ host->max_channel = 0;
+ host->sg_tablesize = AHD_NSEG;
+ ahd_lock(ahd, &s);
+ ahd_set_unit(ahd, ahd_linux_unit++);
+ ahd_unlock(ahd, &s);
+ sprintf(buf, "scsi%d", host->host_no);
+ new_name = kmalloc(strlen(buf) + 1, GFP_ATOMIC);
+ if (new_name != NULL) {
+ strcpy(new_name, buf);
+ ahd_set_name(ahd, new_name);
+ }
+ host->unique_id = ahd->unit;
+ ahd_linux_initialize_scsi_bus(ahd);
+ ahd_intr_enable(ahd, TRUE);
+
+ host->transportt = ahd_linux_transport_template;
+
+ retval = scsi_add_host(host, &ahd->dev_softc->dev);
+ if (retval) {
+ printk(KERN_WARNING "aic79xx: scsi_add_host failed\n");
+ scsi_host_put(host);
+ return retval;
+ }
+
+ scsi_scan_host(host);
+ return 0;
+}
+
+/*
+ * Place the SCSI bus into a known state by either resetting it,
+ * or forcing transfer negotiations on the next command to any
+ * target.
+ */
+static void
+ahd_linux_initialize_scsi_bus(struct ahd_softc *ahd)
+{
+ u_int target_id;
+ u_int numtarg;
+ unsigned long s;
+
+ target_id = 0;
+ numtarg = 0;
+
+ if (aic79xx_no_reset != 0)
+ ahd->flags &= ~AHD_RESET_BUS_A;
+
+ if ((ahd->flags & AHD_RESET_BUS_A) != 0)
+ ahd_reset_channel(ahd, 'A', /*initiate_reset*/TRUE);
+ else
+ numtarg = (ahd->features & AHD_WIDE) ? 16 : 8;
+
+ ahd_lock(ahd, &s);
+
+ /*
+ * Force negotiation to async for all targets that
+ * will not see an initial bus reset.
+ */
+ for (; target_id < numtarg; target_id++) {
+ struct ahd_devinfo devinfo;
+ struct ahd_initiator_tinfo *tinfo;
+ struct ahd_tmode_tstate *tstate;
+
+ tinfo = ahd_fetch_transinfo(ahd, 'A', ahd->our_id,
+ target_id, &tstate);
+ ahd_compile_devinfo(&devinfo, ahd->our_id, target_id,
+ CAM_LUN_WILDCARD, 'A', ROLE_INITIATOR);
+ ahd_update_neg_request(ahd, &devinfo, tstate,
+ tinfo, AHD_NEG_ALWAYS);
+ }
+ ahd_unlock(ahd, &s);
+ /* Give the bus some time to recover */
+ if ((ahd->flags & AHD_RESET_BUS_A) != 0) {
+ ahd_freeze_simq(ahd);
+ msleep(AIC79XX_RESET_DELAY);
+ ahd_release_simq(ahd);
+ }
+}
+
+int
+ahd_platform_alloc(struct ahd_softc *ahd, void *platform_arg)
+{
+ ahd->platform_data =
+ kzalloc(sizeof(struct ahd_platform_data), GFP_ATOMIC);
+ if (ahd->platform_data == NULL)
+ return (ENOMEM);
+ ahd->platform_data->irq = AHD_LINUX_NOIRQ;
+ ahd_lockinit(ahd);
+ ahd->seltime = (aic79xx_seltime & 0x3) << 4;
+ return (0);
+}
+
+void
+ahd_platform_free(struct ahd_softc *ahd)
+{
+ struct scsi_target *starget;
+ int i;
+
+ if (ahd->platform_data != NULL) {
+ /* destroy all of the device and target objects */
+ for (i = 0; i < AHD_NUM_TARGETS; i++) {
+ starget = ahd->platform_data->starget[i];
+ if (starget != NULL) {
+ ahd->platform_data->starget[i] = NULL;
+ }
+ }
+
+ if (ahd->platform_data->irq != AHD_LINUX_NOIRQ)
+ free_irq(ahd->platform_data->irq, ahd);
+ if (ahd->tags[0] == BUS_SPACE_PIO
+ && ahd->bshs[0].ioport != 0)
+ release_region(ahd->bshs[0].ioport, 256);
+ if (ahd->tags[1] == BUS_SPACE_PIO
+ && ahd->bshs[1].ioport != 0)
+ release_region(ahd->bshs[1].ioport, 256);
+ if (ahd->tags[0] == BUS_SPACE_MEMIO
+ && ahd->bshs[0].maddr != NULL) {
+ iounmap(ahd->bshs[0].maddr);
+ release_mem_region(ahd->platform_data->mem_busaddr,
+ 0x1000);
+ }
+ if (ahd->platform_data->host)
+ scsi_host_put(ahd->platform_data->host);
+
+ kfree(ahd->platform_data);
+ }
+}
+
+void
+ahd_platform_init(struct ahd_softc *ahd)
+{
+ /*
+ * Lookup and commit any modified IO Cell options.
+ */
+ if (ahd->unit < ARRAY_SIZE(aic79xx_iocell_info)) {
+ const struct ahd_linux_iocell_opts *iocell_opts;
+
+ iocell_opts = &aic79xx_iocell_info[ahd->unit];
+ if (iocell_opts->precomp != AIC79XX_DEFAULT_PRECOMP)
+ AHD_SET_PRECOMP(ahd, iocell_opts->precomp);
+ if (iocell_opts->slewrate != AIC79XX_DEFAULT_SLEWRATE)
+ AHD_SET_SLEWRATE(ahd, iocell_opts->slewrate);
+ if (iocell_opts->amplitude != AIC79XX_DEFAULT_AMPLITUDE)
+ AHD_SET_AMPLITUDE(ahd, iocell_opts->amplitude);
+ }
+
+}
+
+void
+ahd_platform_freeze_devq(struct ahd_softc *ahd, struct scb *scb)
+{
+ ahd_platform_abort_scbs(ahd, SCB_GET_TARGET(ahd, scb),
+ SCB_GET_CHANNEL(ahd, scb),
+ SCB_GET_LUN(scb), SCB_LIST_NULL,
+ ROLE_UNKNOWN, CAM_REQUEUE_REQ);
+}
+
+void
+ahd_platform_set_tags(struct ahd_softc *ahd, struct scsi_device *sdev,
+ struct ahd_devinfo *devinfo, ahd_queue_alg alg)
+{
+ struct ahd_linux_device *dev;
+ int was_queuing;
+ int now_queuing;
+
+ if (sdev == NULL)
+ return;
+
+ dev = scsi_transport_device_data(sdev);
+
+ if (dev == NULL)
+ return;
+ was_queuing = dev->flags & (AHD_DEV_Q_BASIC|AHD_DEV_Q_TAGGED);
+ switch (alg) {
+ default:
+ case AHD_QUEUE_NONE:
+ now_queuing = 0;
+ break;
+ case AHD_QUEUE_BASIC:
+ now_queuing = AHD_DEV_Q_BASIC;
+ break;
+ case AHD_QUEUE_TAGGED:
+ now_queuing = AHD_DEV_Q_TAGGED;
+ break;
+ }
+ if ((dev->flags & AHD_DEV_FREEZE_TIL_EMPTY) == 0
+ && (was_queuing != now_queuing)
+ && (dev->active != 0)) {
+ dev->flags |= AHD_DEV_FREEZE_TIL_EMPTY;
+ dev->qfrozen++;
+ }
+
+ dev->flags &= ~(AHD_DEV_Q_BASIC|AHD_DEV_Q_TAGGED|AHD_DEV_PERIODIC_OTAG);
+ if (now_queuing) {
+ u_int usertags;
+
+ usertags = ahd_linux_user_tagdepth(ahd, devinfo);
+ if (!was_queuing) {
+ /*
+ * Start out aggressively and allow our
+ * dynamic queue depth algorithm to take
+ * care of the rest.
+ */
+ dev->maxtags = usertags;
+ dev->openings = dev->maxtags - dev->active;
+ }
+ if (dev->maxtags == 0) {
+ /*
+ * Queueing is disabled by the user.
+ */
+ dev->openings = 1;
+ } else if (alg == AHD_QUEUE_TAGGED) {
+ dev->flags |= AHD_DEV_Q_TAGGED;
+ if (aic79xx_periodic_otag != 0)
+ dev->flags |= AHD_DEV_PERIODIC_OTAG;
+ } else
+ dev->flags |= AHD_DEV_Q_BASIC;
+ } else {
+ /* We can only have one opening. */
+ dev->maxtags = 0;
+ dev->openings = 1 - dev->active;
+ }
+
+ switch ((dev->flags & (AHD_DEV_Q_BASIC|AHD_DEV_Q_TAGGED))) {
+ case AHD_DEV_Q_BASIC:
+ case AHD_DEV_Q_TAGGED:
+ scsi_change_queue_depth(sdev,
+ dev->openings + dev->active);
+ break;
+ default:
+ /*
+ * We allow the OS to queue 2 untagged transactions to
+ * us at any time even though we can only execute them
+ * serially on the controller/device. This should
+ * remove some latency.
+ */
+ scsi_change_queue_depth(sdev, 1);
+ break;
+ }
+}
+
+int
+ahd_platform_abort_scbs(struct ahd_softc *ahd, int target, char channel,
+ int lun, u_int tag, role_t role, uint32_t status)
+{
+ return 0;
+}
+
+static u_int
+ahd_linux_user_tagdepth(struct ahd_softc *ahd, struct ahd_devinfo *devinfo)
+{
+ static int warned_user;
+ u_int tags;
+
+ tags = 0;
+ if ((ahd->user_discenable & devinfo->target_mask) != 0) {
+ if (ahd->unit >= ARRAY_SIZE(aic79xx_tag_info)) {
+
+ if (warned_user == 0) {
+ printk(KERN_WARNING
+"aic79xx: WARNING: Insufficient tag_info instances\n"
+"aic79xx: for installed controllers. Using defaults\n"
+"aic79xx: Please update the aic79xx_tag_info array in\n"
+"aic79xx: the aic79xx_osm.c source file.\n");
+ warned_user++;
+ }
+ tags = AHD_MAX_QUEUE;
+ } else {
+ adapter_tag_info_t *tag_info;
+
+ tag_info = &aic79xx_tag_info[ahd->unit];
+ tags = tag_info->tag_commands[devinfo->target_offset];
+ if (tags > AHD_MAX_QUEUE)
+ tags = AHD_MAX_QUEUE;
+ }
+ }
+ return (tags);
+}
+
+/*
+ * Determines the queue depth for a given device.
+ */
+static void
+ahd_linux_device_queue_depth(struct scsi_device *sdev)
+{
+ struct ahd_devinfo devinfo;
+ u_int tags;
+ struct ahd_softc *ahd = *((struct ahd_softc **)sdev->host->hostdata);
+
+ ahd_compile_devinfo(&devinfo,
+ ahd->our_id,
+ sdev->sdev_target->id, sdev->lun,
+ sdev->sdev_target->channel == 0 ? 'A' : 'B',
+ ROLE_INITIATOR);
+ tags = ahd_linux_user_tagdepth(ahd, &devinfo);
+ if (tags != 0 && sdev->tagged_supported != 0) {
+
+ ahd_platform_set_tags(ahd, sdev, &devinfo, AHD_QUEUE_TAGGED);
+ ahd_send_async(ahd, devinfo.channel, devinfo.target,
+ devinfo.lun, AC_TRANSFER_NEG);
+ ahd_print_devinfo(ahd, &devinfo);
+ printk("Tagged Queuing enabled. Depth %d\n", tags);
+ } else {
+ ahd_platform_set_tags(ahd, sdev, &devinfo, AHD_QUEUE_NONE);
+ ahd_send_async(ahd, devinfo.channel, devinfo.target,
+ devinfo.lun, AC_TRANSFER_NEG);
+ }
+}
+
+static int
+ahd_linux_run_command(struct ahd_softc *ahd, struct ahd_linux_device *dev,
+ struct scsi_cmnd *cmd)
+{
+ struct scb *scb;
+ struct hardware_scb *hscb;
+ struct ahd_initiator_tinfo *tinfo;
+ struct ahd_tmode_tstate *tstate;
+ u_int col_idx;
+ uint16_t mask;
+ unsigned long flags;
+ int nseg;
+
+ nseg = scsi_dma_map(cmd);
+ if (nseg < 0)
+ return SCSI_MLQUEUE_HOST_BUSY;
+
+ ahd_lock(ahd, &flags);
+
+ /*
+ * Get an scb to use.
+ */
+ tinfo = ahd_fetch_transinfo(ahd, 'A', ahd->our_id,
+ cmd->device->id, &tstate);
+ if ((dev->flags & (AHD_DEV_Q_TAGGED|AHD_DEV_Q_BASIC)) == 0
+ || (tinfo->curr.ppr_options & MSG_EXT_PPR_IU_REQ) != 0) {
+ col_idx = AHD_NEVER_COL_IDX;
+ } else {
+ col_idx = AHD_BUILD_COL_IDX(cmd->device->id,
+ cmd->device->lun);
+ }
+ if ((scb = ahd_get_scb(ahd, col_idx)) == NULL) {
+ ahd->flags |= AHD_RESOURCE_SHORTAGE;
+ ahd_unlock(ahd, &flags);
+ scsi_dma_unmap(cmd);
+ return SCSI_MLQUEUE_HOST_BUSY;
+ }
+
+ scb->io_ctx = cmd;
+ scb->platform_data->dev = dev;
+ hscb = scb->hscb;
+ cmd->host_scribble = (char *)scb;
+
+ /*
+ * Fill out basics of the HSCB.
+ */
+ hscb->control = 0;
+ hscb->scsiid = BUILD_SCSIID(ahd, cmd);
+ hscb->lun = cmd->device->lun;
+ scb->hscb->task_management = 0;
+ mask = SCB_GET_TARGET_MASK(ahd, scb);
+
+ if ((ahd->user_discenable & mask) != 0)
+ hscb->control |= DISCENB;
+
+ if ((tinfo->curr.ppr_options & MSG_EXT_PPR_IU_REQ) != 0)
+ scb->flags |= SCB_PACKETIZED;
+
+ if ((tstate->auto_negotiate & mask) != 0) {
+ scb->flags |= SCB_AUTO_NEGOTIATE;
+ scb->hscb->control |= MK_MESSAGE;
+ }
+
+ if ((dev->flags & (AHD_DEV_Q_TAGGED|AHD_DEV_Q_BASIC)) != 0) {
+ if (dev->commands_since_idle_or_otag == AHD_OTAG_THRESH
+ && (dev->flags & AHD_DEV_Q_TAGGED) != 0) {
+ hscb->control |= MSG_ORDERED_TASK;
+ dev->commands_since_idle_or_otag = 0;
+ } else {
+ hscb->control |= MSG_SIMPLE_TASK;
+ }
+ }
+
+ hscb->cdb_len = cmd->cmd_len;
+ memcpy(hscb->shared_data.idata.cdb, cmd->cmnd, hscb->cdb_len);
+
+ scb->platform_data->xfer_len = 0;
+ ahd_set_residual(scb, 0);
+ ahd_set_sense_residual(scb, 0);
+ scb->sg_count = 0;
+
+ if (nseg > 0) {
+ void *sg = scb->sg_list;
+ struct scatterlist *cur_seg;
+ int i;
+
+ scb->platform_data->xfer_len = 0;
+
+ scsi_for_each_sg(cmd, cur_seg, nseg, i) {
+ dma_addr_t addr;
+ bus_size_t len;
+
+ addr = sg_dma_address(cur_seg);
+ len = sg_dma_len(cur_seg);
+ scb->platform_data->xfer_len += len;
+ sg = ahd_sg_setup(ahd, scb, sg, addr, len,
+ i == (nseg - 1));
+ }
+ }
+
+ LIST_INSERT_HEAD(&ahd->pending_scbs, scb, pending_links);
+ dev->openings--;
+ dev->active++;
+ dev->commands_issued++;
+
+ if ((dev->flags & AHD_DEV_PERIODIC_OTAG) != 0)
+ dev->commands_since_idle_or_otag++;
+ scb->flags |= SCB_ACTIVE;
+ ahd_queue_scb(ahd, scb);
+
+ ahd_unlock(ahd, &flags);
+
+ return 0;
+}
+
+/*
+ * SCSI controller interrupt handler.
+ */
+irqreturn_t
+ahd_linux_isr(int irq, void *dev_id)
+{
+ struct ahd_softc *ahd;
+ u_long flags;
+ int ours;
+
+ ahd = (struct ahd_softc *) dev_id;
+ ahd_lock(ahd, &flags);
+ ours = ahd_intr(ahd);
+ ahd_unlock(ahd, &flags);
+ return IRQ_RETVAL(ours);
+}
+
+void
+ahd_send_async(struct ahd_softc *ahd, char channel,
+ u_int target, u_int lun, ac_code code)
+{
+ switch (code) {
+ case AC_TRANSFER_NEG:
+ {
+ struct scsi_target *starget;
+ struct ahd_initiator_tinfo *tinfo;
+ struct ahd_tmode_tstate *tstate;
+ unsigned int target_ppr_options;
+
+ BUG_ON(target == CAM_TARGET_WILDCARD);
+
+ tinfo = ahd_fetch_transinfo(ahd, channel, ahd->our_id,
+ target, &tstate);
+
+ /*
+ * Don't bother reporting results while
+ * negotiations are still pending.
+ */
+ if (tinfo->curr.period != tinfo->goal.period
+ || tinfo->curr.width != tinfo->goal.width
+ || tinfo->curr.offset != tinfo->goal.offset
+ || tinfo->curr.ppr_options != tinfo->goal.ppr_options)
+ if (bootverbose == 0)
+ break;
+
+ /*
+ * Don't bother reporting results that
+ * are identical to those last reported.
+ */
+ starget = ahd->platform_data->starget[target];
+ if (starget == NULL)
+ break;
+
+ target_ppr_options =
+ (spi_dt(starget) ? MSG_EXT_PPR_DT_REQ : 0)
+ + (spi_qas(starget) ? MSG_EXT_PPR_QAS_REQ : 0)
+ + (spi_iu(starget) ? MSG_EXT_PPR_IU_REQ : 0)
+ + (spi_rd_strm(starget) ? MSG_EXT_PPR_RD_STRM : 0)
+ + (spi_pcomp_en(starget) ? MSG_EXT_PPR_PCOMP_EN : 0)
+ + (spi_rti(starget) ? MSG_EXT_PPR_RTI : 0)
+ + (spi_wr_flow(starget) ? MSG_EXT_PPR_WR_FLOW : 0)
+ + (spi_hold_mcs(starget) ? MSG_EXT_PPR_HOLD_MCS : 0);
+
+ if (tinfo->curr.period == spi_period(starget)
+ && tinfo->curr.width == spi_width(starget)
+ && tinfo->curr.offset == spi_offset(starget)
+ && tinfo->curr.ppr_options == target_ppr_options)
+ if (bootverbose == 0)
+ break;
+
+ spi_period(starget) = tinfo->curr.period;
+ spi_width(starget) = tinfo->curr.width;
+ spi_offset(starget) = tinfo->curr.offset;
+ spi_dt(starget) = tinfo->curr.ppr_options & MSG_EXT_PPR_DT_REQ ? 1 : 0;
+ spi_qas(starget) = tinfo->curr.ppr_options & MSG_EXT_PPR_QAS_REQ ? 1 : 0;
+ spi_iu(starget) = tinfo->curr.ppr_options & MSG_EXT_PPR_IU_REQ ? 1 : 0;
+ spi_rd_strm(starget) = tinfo->curr.ppr_options & MSG_EXT_PPR_RD_STRM ? 1 : 0;
+ spi_pcomp_en(starget) = tinfo->curr.ppr_options & MSG_EXT_PPR_PCOMP_EN ? 1 : 0;
+ spi_rti(starget) = tinfo->curr.ppr_options & MSG_EXT_PPR_RTI ? 1 : 0;
+ spi_wr_flow(starget) = tinfo->curr.ppr_options & MSG_EXT_PPR_WR_FLOW ? 1 : 0;
+ spi_hold_mcs(starget) = tinfo->curr.ppr_options & MSG_EXT_PPR_HOLD_MCS ? 1 : 0;
+ spi_display_xfer_agreement(starget);
+ break;
+ }
+ case AC_SENT_BDR:
+ {
+ WARN_ON(lun != CAM_LUN_WILDCARD);
+ scsi_report_device_reset(ahd->platform_data->host,
+ channel - 'A', target);
+ break;
+ }
+ case AC_BUS_RESET:
+ if (ahd->platform_data->host != NULL) {
+ scsi_report_bus_reset(ahd->platform_data->host,
+ channel - 'A');
+ }
+ break;
+ default:
+ panic("ahd_send_async: Unexpected async event");
+ }
+}
+
+/*
+ * Calls the higher level scsi done function and frees the scb.
+ */
+void
+ahd_done(struct ahd_softc *ahd, struct scb *scb)
+{
+ struct scsi_cmnd *cmd;
+ struct ahd_linux_device *dev;
+
+ if ((scb->flags & SCB_ACTIVE) == 0) {
+ printk("SCB %d done'd twice\n", SCB_GET_TAG(scb));
+ ahd_dump_card_state(ahd);
+ panic("Stopping for safety");
+ }
+ LIST_REMOVE(scb, pending_links);
+ cmd = scb->io_ctx;
+ dev = scb->platform_data->dev;
+ dev->active--;
+ dev->openings++;
+ if ((cmd->result & (CAM_DEV_QFRZN << 16)) != 0) {
+ cmd->result &= ~(CAM_DEV_QFRZN << 16);
+ dev->qfrozen--;
+ }
+ ahd_linux_unmap_scb(ahd, scb);
+
+ /*
+ * Guard against stale sense data.
+ * The Linux mid-layer assumes that sense
+ * was retrieved anytime the first byte of
+ * the sense buffer looks "sane".
+ */
+ cmd->sense_buffer[0] = 0;
+ if (ahd_get_transaction_status(scb) == CAM_REQ_INPROG) {
+ uint32_t amount_xferred;
+
+ amount_xferred =
+ ahd_get_transfer_length(scb) - ahd_get_residual(scb);
+ if ((scb->flags & SCB_TRANSMISSION_ERROR) != 0) {
+#ifdef AHD_DEBUG
+ if ((ahd_debug & AHD_SHOW_MISC) != 0) {
+ ahd_print_path(ahd, scb);
+ printk("Set CAM_UNCOR_PARITY\n");
+ }
+#endif
+ ahd_set_transaction_status(scb, CAM_UNCOR_PARITY);
+#ifdef AHD_REPORT_UNDERFLOWS
+ /*
+ * This code is disabled by default as some
+ * clients of the SCSI system do not properly
+ * initialize the underflow parameter. This
+ * results in spurious termination of commands
+ * that complete as expected (e.g. underflow is
+ * allowed as command can return variable amounts
+ * of data.
+ */
+ } else if (amount_xferred < scb->io_ctx->underflow) {
+ u_int i;
+
+ ahd_print_path(ahd, scb);
+ printk("CDB:");
+ for (i = 0; i < scb->io_ctx->cmd_len; i++)
+ printk(" 0x%x", scb->io_ctx->cmnd[i]);
+ printk("\n");
+ ahd_print_path(ahd, scb);
+ printk("Saw underflow (%ld of %ld bytes). "
+ "Treated as error\n",
+ ahd_get_residual(scb),
+ ahd_get_transfer_length(scb));
+ ahd_set_transaction_status(scb, CAM_DATA_RUN_ERR);
+#endif
+ } else {
+ ahd_set_transaction_status(scb, CAM_REQ_CMP);
+ }
+ } else if (ahd_get_transaction_status(scb) == CAM_SCSI_STATUS_ERROR) {
+ ahd_linux_handle_scsi_status(ahd, cmd->device, scb);
+ }
+
+ if (dev->openings == 1
+ && ahd_get_transaction_status(scb) == CAM_REQ_CMP
+ && ahd_get_scsi_status(scb) != SCSI_STATUS_QUEUE_FULL)
+ dev->tag_success_count++;
+ /*
+ * Some devices deal with temporary internal resource
+ * shortages by returning queue full. When the queue
+ * full occurrs, we throttle back. Slowly try to get
+ * back to our previous queue depth.
+ */
+ if ((dev->openings + dev->active) < dev->maxtags
+ && dev->tag_success_count > AHD_TAG_SUCCESS_INTERVAL) {
+ dev->tag_success_count = 0;
+ dev->openings++;
+ }
+
+ if (dev->active == 0)
+ dev->commands_since_idle_or_otag = 0;
+
+ if ((scb->flags & SCB_RECOVERY_SCB) != 0) {
+ printk("Recovery SCB completes\n");
+ if (ahd_get_transaction_status(scb) == CAM_BDR_SENT
+ || ahd_get_transaction_status(scb) == CAM_REQ_ABORTED)
+ ahd_set_transaction_status(scb, CAM_CMD_TIMEOUT);
+
+ if (ahd->platform_data->eh_done)
+ complete(ahd->platform_data->eh_done);
+ }
+
+ ahd_free_scb(ahd, scb);
+ ahd_linux_queue_cmd_complete(ahd, cmd);
+}
+
+static void
+ahd_linux_handle_scsi_status(struct ahd_softc *ahd,
+ struct scsi_device *sdev, struct scb *scb)
+{
+ struct ahd_devinfo devinfo;
+ struct ahd_linux_device *dev = scsi_transport_device_data(sdev);
+
+ ahd_compile_devinfo(&devinfo,
+ ahd->our_id,
+ sdev->sdev_target->id, sdev->lun,
+ sdev->sdev_target->channel == 0 ? 'A' : 'B',
+ ROLE_INITIATOR);
+
+ /*
+ * We don't currently trust the mid-layer to
+ * properly deal with queue full or busy. So,
+ * when one occurs, we tell the mid-layer to
+ * unconditionally requeue the command to us
+ * so that we can retry it ourselves. We also
+ * implement our own throttling mechanism so
+ * we don't clobber the device with too many
+ * commands.
+ */
+ switch (ahd_get_scsi_status(scb)) {
+ default:
+ break;
+ case SCSI_STATUS_CHECK_COND:
+ case SCSI_STATUS_CMD_TERMINATED:
+ {
+ struct scsi_cmnd *cmd;
+
+ /*
+ * Copy sense information to the OS's cmd
+ * structure if it is available.
+ */
+ cmd = scb->io_ctx;
+ if ((scb->flags & (SCB_SENSE|SCB_PKT_SENSE)) != 0) {
+ struct scsi_status_iu_header *siu;
+ u_int sense_size;
+ u_int sense_offset;
+
+ if (scb->flags & SCB_SENSE) {
+ sense_size = min(sizeof(struct scsi_sense_data)
+ - ahd_get_sense_residual(scb),
+ (u_long)SCSI_SENSE_BUFFERSIZE);
+ sense_offset = 0;
+ } else {
+ /*
+ * Copy only the sense data into the provided
+ * buffer.
+ */
+ siu = (struct scsi_status_iu_header *)
+ scb->sense_data;
+ sense_size = min_t(size_t,
+ scsi_4btoul(siu->sense_length),
+ SCSI_SENSE_BUFFERSIZE);
+ sense_offset = SIU_SENSE_OFFSET(siu);
+ }
+
+ memset(cmd->sense_buffer, 0, SCSI_SENSE_BUFFERSIZE);
+ memcpy(cmd->sense_buffer,
+ ahd_get_sense_buf(ahd, scb)
+ + sense_offset, sense_size);
+ cmd->result |= (DRIVER_SENSE << 24);
+
+#ifdef AHD_DEBUG
+ if (ahd_debug & AHD_SHOW_SENSE) {
+ int i;
+
+ printk("Copied %d bytes of sense data at %d:",
+ sense_size, sense_offset);
+ for (i = 0; i < sense_size; i++) {
+ if ((i & 0xF) == 0)
+ printk("\n");
+ printk("0x%x ", cmd->sense_buffer[i]);
+ }
+ printk("\n");
+ }
+#endif
+ }
+ break;
+ }
+ case SCSI_STATUS_QUEUE_FULL:
+ /*
+ * By the time the core driver has returned this
+ * command, all other commands that were queued
+ * to us but not the device have been returned.
+ * This ensures that dev->active is equal to
+ * the number of commands actually queued to
+ * the device.
+ */
+ dev->tag_success_count = 0;
+ if (dev->active != 0) {
+ /*
+ * Drop our opening count to the number
+ * of commands currently outstanding.
+ */
+ dev->openings = 0;
+#ifdef AHD_DEBUG
+ if ((ahd_debug & AHD_SHOW_QFULL) != 0) {
+ ahd_print_path(ahd, scb);
+ printk("Dropping tag count to %d\n",
+ dev->active);
+ }
+#endif
+ if (dev->active == dev->tags_on_last_queuefull) {
+
+ dev->last_queuefull_same_count++;
+ /*
+ * If we repeatedly see a queue full
+ * at the same queue depth, this
+ * device has a fixed number of tag
+ * slots. Lock in this tag depth
+ * so we stop seeing queue fulls from
+ * this device.
+ */
+ if (dev->last_queuefull_same_count
+ == AHD_LOCK_TAGS_COUNT) {
+ dev->maxtags = dev->active;
+ ahd_print_path(ahd, scb);
+ printk("Locking max tag count at %d\n",
+ dev->active);
+ }
+ } else {
+ dev->tags_on_last_queuefull = dev->active;
+ dev->last_queuefull_same_count = 0;
+ }
+ ahd_set_transaction_status(scb, CAM_REQUEUE_REQ);
+ ahd_set_scsi_status(scb, SCSI_STATUS_OK);
+ ahd_platform_set_tags(ahd, sdev, &devinfo,
+ (dev->flags & AHD_DEV_Q_BASIC)
+ ? AHD_QUEUE_BASIC : AHD_QUEUE_TAGGED);
+ break;
+ }
+ /*
+ * Drop down to a single opening, and treat this
+ * as if the target returned BUSY SCSI status.
+ */
+ dev->openings = 1;
+ ahd_platform_set_tags(ahd, sdev, &devinfo,
+ (dev->flags & AHD_DEV_Q_BASIC)
+ ? AHD_QUEUE_BASIC : AHD_QUEUE_TAGGED);
+ ahd_set_scsi_status(scb, SCSI_STATUS_BUSY);
+ }
+}
+
+static void
+ahd_linux_queue_cmd_complete(struct ahd_softc *ahd, struct scsi_cmnd *cmd)
+{
+ int status;
+ int new_status = DID_OK;
+ int do_fallback = 0;
+ int scsi_status;
+
+ /*
+ * Map CAM error codes into Linux Error codes. We
+ * avoid the conversion so that the DV code has the
+ * full error information available when making
+ * state change decisions.
+ */
+
+ status = ahd_cmd_get_transaction_status(cmd);
+ switch (status) {
+ case CAM_REQ_INPROG:
+ case CAM_REQ_CMP:
+ new_status = DID_OK;
+ break;
+ case CAM_AUTOSENSE_FAIL:
+ new_status = DID_ERROR;
+ /* Fallthrough */
+ case CAM_SCSI_STATUS_ERROR:
+ scsi_status = ahd_cmd_get_scsi_status(cmd);
+
+ switch(scsi_status) {
+ case SCSI_STATUS_CMD_TERMINATED:
+ case SCSI_STATUS_CHECK_COND:
+ if ((cmd->result >> 24) != DRIVER_SENSE) {
+ do_fallback = 1;
+ } else {
+ struct scsi_sense_data *sense;
+
+ sense = (struct scsi_sense_data *)
+ cmd->sense_buffer;
+ if (sense->extra_len >= 5 &&
+ (sense->add_sense_code == 0x47
+ || sense->add_sense_code == 0x48))
+ do_fallback = 1;
+ }
+ break;
+ default:
+ break;
+ }
+ break;
+ case CAM_REQ_ABORTED:
+ new_status = DID_ABORT;
+ break;
+ case CAM_BUSY:
+ new_status = DID_BUS_BUSY;
+ break;
+ case CAM_REQ_INVALID:
+ case CAM_PATH_INVALID:
+ new_status = DID_BAD_TARGET;
+ break;
+ case CAM_SEL_TIMEOUT:
+ new_status = DID_NO_CONNECT;
+ break;
+ case CAM_SCSI_BUS_RESET:
+ case CAM_BDR_SENT:
+ new_status = DID_RESET;
+ break;
+ case CAM_UNCOR_PARITY:
+ new_status = DID_PARITY;
+ do_fallback = 1;
+ break;
+ case CAM_CMD_TIMEOUT:
+ new_status = DID_TIME_OUT;
+ do_fallback = 1;
+ break;
+ case CAM_REQ_CMP_ERR:
+ case CAM_UNEXP_BUSFREE:
+ case CAM_DATA_RUN_ERR:
+ new_status = DID_ERROR;
+ do_fallback = 1;
+ break;
+ case CAM_UA_ABORT:
+ case CAM_NO_HBA:
+ case CAM_SEQUENCE_FAIL:
+ case CAM_CCB_LEN_ERR:
+ case CAM_PROVIDE_FAIL:
+ case CAM_REQ_TERMIO:
+ case CAM_UNREC_HBA_ERROR:
+ case CAM_REQ_TOO_BIG:
+ new_status = DID_ERROR;
+ break;
+ case CAM_REQUEUE_REQ:
+ new_status = DID_REQUEUE;
+ break;
+ default:
+ /* We should never get here */
+ new_status = DID_ERROR;
+ break;
+ }
+
+ if (do_fallback) {
+ printk("%s: device overrun (status %x) on %d:%d:%d\n",
+ ahd_name(ahd), status, cmd->device->channel,
+ cmd->device->id, (u8)cmd->device->lun);
+ }
+
+ ahd_cmd_set_transaction_status(cmd, new_status);
+
+ cmd->scsi_done(cmd);
+}
+
+static void
+ahd_freeze_simq(struct ahd_softc *ahd)
+{
+ scsi_block_requests(ahd->platform_data->host);
+}
+
+static void
+ahd_release_simq(struct ahd_softc *ahd)
+{
+ scsi_unblock_requests(ahd->platform_data->host);
+}
+
+static int
+ahd_linux_queue_abort_cmd(struct scsi_cmnd *cmd)
+{
+ struct ahd_softc *ahd;
+ struct ahd_linux_device *dev;
+ struct scb *pending_scb;
+ u_int saved_scbptr;
+ u_int active_scbptr;
+ u_int last_phase;
+ u_int saved_scsiid;
+ u_int cdb_byte;
+ int retval;
+ int was_paused;
+ int paused;
+ int wait;
+ int disconnected;
+ ahd_mode_state saved_modes;
+ unsigned long flags;
+
+ pending_scb = NULL;
+ paused = FALSE;
+ wait = FALSE;
+ ahd = *(struct ahd_softc **)cmd->device->host->hostdata;
+
+ scmd_printk(KERN_INFO, cmd,
+ "Attempting to queue an ABORT message:");
+
+ printk("CDB:");
+ for (cdb_byte = 0; cdb_byte < cmd->cmd_len; cdb_byte++)
+ printk(" 0x%x", cmd->cmnd[cdb_byte]);
+ printk("\n");
+
+ ahd_lock(ahd, &flags);
+
+ /*
+ * First determine if we currently own this command.
+ * Start by searching the device queue. If not found
+ * there, check the pending_scb list. If not found
+ * at all, and the system wanted us to just abort the
+ * command, return success.
+ */
+ dev = scsi_transport_device_data(cmd->device);
+
+ if (dev == NULL) {
+ /*
+ * No target device for this command exists,
+ * so we must not still own the command.
+ */
+ scmd_printk(KERN_INFO, cmd, "Is not an active device\n");
+ retval = SUCCESS;
+ goto no_cmd;
+ }
+
+ /*
+ * See if we can find a matching cmd in the pending list.
+ */
+ LIST_FOREACH(pending_scb, &ahd->pending_scbs, pending_links) {
+ if (pending_scb->io_ctx == cmd)
+ break;
+ }
+
+ if (pending_scb == NULL) {
+ scmd_printk(KERN_INFO, cmd, "Command not found\n");
+ goto no_cmd;
+ }
+
+ if ((pending_scb->flags & SCB_RECOVERY_SCB) != 0) {
+ /*
+ * We can't queue two recovery actions using the same SCB
+ */
+ retval = FAILED;
+ goto done;
+ }
+
+ /*
+ * Ensure that the card doesn't do anything
+ * behind our back. Also make sure that we
+ * didn't "just" miss an interrupt that would
+ * affect this cmd.
+ */
+ was_paused = ahd_is_paused(ahd);
+ ahd_pause_and_flushwork(ahd);
+ paused = TRUE;
+
+ if ((pending_scb->flags & SCB_ACTIVE) == 0) {
+ scmd_printk(KERN_INFO, cmd, "Command already completed\n");
+ goto no_cmd;
+ }
+
+ printk("%s: At time of recovery, card was %spaused\n",
+ ahd_name(ahd), was_paused ? "" : "not ");
+ ahd_dump_card_state(ahd);
+
+ disconnected = TRUE;
+ if (ahd_search_qinfifo(ahd, cmd->device->id,
+ cmd->device->channel + 'A',
+ cmd->device->lun,
+ pending_scb->hscb->tag,
+ ROLE_INITIATOR, CAM_REQ_ABORTED,
+ SEARCH_COMPLETE) > 0) {
+ printk("%s:%d:%d:%d: Cmd aborted from QINFIFO\n",
+ ahd_name(ahd), cmd->device->channel,
+ cmd->device->id, (u8)cmd->device->lun);
+ retval = SUCCESS;
+ goto done;
+ }
+
+ saved_modes = ahd_save_modes(ahd);
+ ahd_set_modes(ahd, AHD_MODE_SCSI, AHD_MODE_SCSI);
+ last_phase = ahd_inb(ahd, LASTPHASE);
+ saved_scbptr = ahd_get_scbptr(ahd);
+ active_scbptr = saved_scbptr;
+ if (disconnected && (ahd_inb(ahd, SEQ_FLAGS) & NOT_IDENTIFIED) == 0) {
+ struct scb *bus_scb;
+
+ bus_scb = ahd_lookup_scb(ahd, active_scbptr);
+ if (bus_scb == pending_scb)
+ disconnected = FALSE;
+ }
+
+ /*
+ * At this point, pending_scb is the scb associated with the
+ * passed in command. That command is currently active on the
+ * bus or is in the disconnected state.
+ */
+ saved_scsiid = ahd_inb(ahd, SAVED_SCSIID);
+ if (last_phase != P_BUSFREE
+ && SCB_GET_TAG(pending_scb) == active_scbptr) {
+
+ /*
+ * We're active on the bus, so assert ATN
+ * and hope that the target responds.
+ */
+ pending_scb = ahd_lookup_scb(ahd, active_scbptr);
+ pending_scb->flags |= SCB_RECOVERY_SCB|SCB_ABORT;
+ ahd_outb(ahd, MSG_OUT, HOST_MSG);
+ ahd_outb(ahd, SCSISIGO, last_phase|ATNO);
+ scmd_printk(KERN_INFO, cmd, "Device is active, asserting ATN\n");
+ wait = TRUE;
+ } else if (disconnected) {
+
+ /*
+ * Actually re-queue this SCB in an attempt
+ * to select the device before it reconnects.
+ */
+ pending_scb->flags |= SCB_RECOVERY_SCB|SCB_ABORT;
+ ahd_set_scbptr(ahd, SCB_GET_TAG(pending_scb));
+ pending_scb->hscb->cdb_len = 0;
+ pending_scb->hscb->task_attribute = 0;
+ pending_scb->hscb->task_management = SIU_TASKMGMT_ABORT_TASK;
+
+ if ((pending_scb->flags & SCB_PACKETIZED) != 0) {
+ /*
+ * Mark the SCB has having an outstanding
+ * task management function. Should the command
+ * complete normally before the task management
+ * function can be sent, the host will be notified
+ * to abort our requeued SCB.
+ */
+ ahd_outb(ahd, SCB_TASK_MANAGEMENT,
+ pending_scb->hscb->task_management);
+ } else {
+ /*
+ * If non-packetized, set the MK_MESSAGE control
+ * bit indicating that we desire to send a message.
+ * We also set the disconnected flag since there is
+ * no guarantee that our SCB control byte matches
+ * the version on the card. We don't want the
+ * sequencer to abort the command thinking an
+ * unsolicited reselection occurred.
+ */
+ pending_scb->hscb->control |= MK_MESSAGE|DISCONNECTED;
+
+ /*
+ * The sequencer will never re-reference the
+ * in-core SCB. To make sure we are notified
+ * during reselection, set the MK_MESSAGE flag in
+ * the card's copy of the SCB.
+ */
+ ahd_outb(ahd, SCB_CONTROL,
+ ahd_inb(ahd, SCB_CONTROL)|MK_MESSAGE);
+ }
+
+ /*
+ * Clear out any entries in the QINFIFO first
+ * so we are the next SCB for this target
+ * to run.
+ */
+ ahd_search_qinfifo(ahd, cmd->device->id,
+ cmd->device->channel + 'A', cmd->device->lun,
+ SCB_LIST_NULL, ROLE_INITIATOR,
+ CAM_REQUEUE_REQ, SEARCH_COMPLETE);
+ ahd_qinfifo_requeue_tail(ahd, pending_scb);
+ ahd_set_scbptr(ahd, saved_scbptr);
+ ahd_print_path(ahd, pending_scb);
+ printk("Device is disconnected, re-queuing SCB\n");
+ wait = TRUE;
+ } else {
+ scmd_printk(KERN_INFO, cmd, "Unable to deliver message\n");
+ retval = FAILED;
+ goto done;
+ }
+
+no_cmd:
+ /*
+ * Our assumption is that if we don't have the command, no
+ * recovery action was required, so we return success. Again,
+ * the semantics of the mid-layer recovery engine are not
+ * well defined, so this may change in time.
+ */
+ retval = SUCCESS;
+done:
+ if (paused)
+ ahd_unpause(ahd);
+ if (wait) {
+ DECLARE_COMPLETION_ONSTACK(done);
+
+ ahd->platform_data->eh_done = &done;
+ ahd_unlock(ahd, &flags);
+
+ printk("%s: Recovery code sleeping\n", ahd_name(ahd));
+ if (!wait_for_completion_timeout(&done, 5 * HZ)) {
+ ahd_lock(ahd, &flags);
+ ahd->platform_data->eh_done = NULL;
+ ahd_unlock(ahd, &flags);
+ printk("%s: Timer Expired (active %d)\n",
+ ahd_name(ahd), dev->active);
+ retval = FAILED;
+ }
+ printk("Recovery code awake\n");
+ } else
+ ahd_unlock(ahd, &flags);
+
+ if (retval != SUCCESS)
+ printk("%s: Command abort returning 0x%x\n",
+ ahd_name(ahd), retval);
+
+ return retval;
+}
+
+static void ahd_linux_set_width(struct scsi_target *starget, int width)
+{
+ struct Scsi_Host *shost = dev_to_shost(starget->dev.parent);
+ struct ahd_softc *ahd = *((struct ahd_softc **)shost->hostdata);
+ struct ahd_devinfo devinfo;
+ unsigned long flags;
+
+ ahd_compile_devinfo(&devinfo, shost->this_id, starget->id, 0,
+ starget->channel + 'A', ROLE_INITIATOR);
+ ahd_lock(ahd, &flags);
+ ahd_set_width(ahd, &devinfo, width, AHD_TRANS_GOAL, FALSE);
+ ahd_unlock(ahd, &flags);
+}
+
+static void ahd_linux_set_period(struct scsi_target *starget, int period)
+{
+ struct Scsi_Host *shost = dev_to_shost(starget->dev.parent);
+ struct ahd_softc *ahd = *((struct ahd_softc **)shost->hostdata);
+ struct ahd_tmode_tstate *tstate;
+ struct ahd_initiator_tinfo *tinfo
+ = ahd_fetch_transinfo(ahd,
+ starget->channel + 'A',
+ shost->this_id, starget->id, &tstate);
+ struct ahd_devinfo devinfo;
+ unsigned int ppr_options = tinfo->goal.ppr_options;
+ unsigned int dt;
+ unsigned long flags;
+ unsigned long offset = tinfo->goal.offset;
+
+#ifdef AHD_DEBUG
+ if ((ahd_debug & AHD_SHOW_DV) != 0)
+ printk("%s: set period to %d\n", ahd_name(ahd), period);
+#endif
+ if (offset == 0)
+ offset = MAX_OFFSET;
+
+ if (period < 8)
+ period = 8;
+ if (period < 10) {
+ if (spi_max_width(starget)) {
+ ppr_options |= MSG_EXT_PPR_DT_REQ;
+ if (period == 8)
+ ppr_options |= MSG_EXT_PPR_IU_REQ;
+ } else
+ period = 10;
+ }
+
+ dt = ppr_options & MSG_EXT_PPR_DT_REQ;
+
+ ahd_compile_devinfo(&devinfo, shost->this_id, starget->id, 0,
+ starget->channel + 'A', ROLE_INITIATOR);
+
+ /* all PPR requests apart from QAS require wide transfers */
+ if (ppr_options & ~MSG_EXT_PPR_QAS_REQ) {
+ if (spi_width(starget) == 0)
+ ppr_options &= MSG_EXT_PPR_QAS_REQ;
+ }
+
+ ahd_find_syncrate(ahd, &period, &ppr_options,
+ dt ? AHD_SYNCRATE_MAX : AHD_SYNCRATE_ULTRA2);
+
+ ahd_lock(ahd, &flags);
+ ahd_set_syncrate(ahd, &devinfo, period, offset,
+ ppr_options, AHD_TRANS_GOAL, FALSE);
+ ahd_unlock(ahd, &flags);
+}
+
+static void ahd_linux_set_offset(struct scsi_target *starget, int offset)
+{
+ struct Scsi_Host *shost = dev_to_shost(starget->dev.parent);
+ struct ahd_softc *ahd = *((struct ahd_softc **)shost->hostdata);
+ struct ahd_tmode_tstate *tstate;
+ struct ahd_initiator_tinfo *tinfo
+ = ahd_fetch_transinfo(ahd,
+ starget->channel + 'A',
+ shost->this_id, starget->id, &tstate);
+ struct ahd_devinfo devinfo;
+ unsigned int ppr_options = 0;
+ unsigned int period = 0;
+ unsigned int dt = ppr_options & MSG_EXT_PPR_DT_REQ;
+ unsigned long flags;
+
+#ifdef AHD_DEBUG
+ if ((ahd_debug & AHD_SHOW_DV) != 0)
+ printk("%s: set offset to %d\n", ahd_name(ahd), offset);
+#endif
+
+ ahd_compile_devinfo(&devinfo, shost->this_id, starget->id, 0,
+ starget->channel + 'A', ROLE_INITIATOR);
+ if (offset != 0) {
+ period = tinfo->goal.period;
+ ppr_options = tinfo->goal.ppr_options;
+ ahd_find_syncrate(ahd, &period, &ppr_options,
+ dt ? AHD_SYNCRATE_MAX : AHD_SYNCRATE_ULTRA2);
+ }
+
+ ahd_lock(ahd, &flags);
+ ahd_set_syncrate(ahd, &devinfo, period, offset, ppr_options,
+ AHD_TRANS_GOAL, FALSE);
+ ahd_unlock(ahd, &flags);
+}
+
+static void ahd_linux_set_dt(struct scsi_target *starget, int dt)
+{
+ struct Scsi_Host *shost = dev_to_shost(starget->dev.parent);
+ struct ahd_softc *ahd = *((struct ahd_softc **)shost->hostdata);
+ struct ahd_tmode_tstate *tstate;
+ struct ahd_initiator_tinfo *tinfo
+ = ahd_fetch_transinfo(ahd,
+ starget->channel + 'A',
+ shost->this_id, starget->id, &tstate);
+ struct ahd_devinfo devinfo;
+ unsigned int ppr_options = tinfo->goal.ppr_options
+ & ~MSG_EXT_PPR_DT_REQ;
+ unsigned int period = tinfo->goal.period;
+ unsigned int width = tinfo->goal.width;
+ unsigned long flags;
+
+#ifdef AHD_DEBUG
+ if ((ahd_debug & AHD_SHOW_DV) != 0)
+ printk("%s: %s DT\n", ahd_name(ahd),
+ dt ? "enabling" : "disabling");
+#endif
+ if (dt && spi_max_width(starget)) {
+ ppr_options |= MSG_EXT_PPR_DT_REQ;
+ if (!width)
+ ahd_linux_set_width(starget, 1);
+ } else {
+ if (period <= 9)
+ period = 10; /* If resetting DT, period must be >= 25ns */
+ /* IU is invalid without DT set */
+ ppr_options &= ~MSG_EXT_PPR_IU_REQ;
+ }
+ ahd_compile_devinfo(&devinfo, shost->this_id, starget->id, 0,
+ starget->channel + 'A', ROLE_INITIATOR);
+ ahd_find_syncrate(ahd, &period, &ppr_options,
+ dt ? AHD_SYNCRATE_MAX : AHD_SYNCRATE_ULTRA2);
+
+ ahd_lock(ahd, &flags);
+ ahd_set_syncrate(ahd, &devinfo, period, tinfo->goal.offset,
+ ppr_options, AHD_TRANS_GOAL, FALSE);
+ ahd_unlock(ahd, &flags);
+}
+
+static void ahd_linux_set_qas(struct scsi_target *starget, int qas)
+{
+ struct Scsi_Host *shost = dev_to_shost(starget->dev.parent);
+ struct ahd_softc *ahd = *((struct ahd_softc **)shost->hostdata);
+ struct ahd_tmode_tstate *tstate;
+ struct ahd_initiator_tinfo *tinfo
+ = ahd_fetch_transinfo(ahd,
+ starget->channel + 'A',
+ shost->this_id, starget->id, &tstate);
+ struct ahd_devinfo devinfo;
+ unsigned int ppr_options = tinfo->goal.ppr_options
+ & ~MSG_EXT_PPR_QAS_REQ;
+ unsigned int period = tinfo->goal.period;
+ unsigned int dt;
+ unsigned long flags;
+
+#ifdef AHD_DEBUG
+ if ((ahd_debug & AHD_SHOW_DV) != 0)
+ printk("%s: %s QAS\n", ahd_name(ahd),
+ qas ? "enabling" : "disabling");
+#endif
+
+ if (qas) {
+ ppr_options |= MSG_EXT_PPR_QAS_REQ;
+ }
+
+ dt = ppr_options & MSG_EXT_PPR_DT_REQ;
+
+ ahd_compile_devinfo(&devinfo, shost->this_id, starget->id, 0,
+ starget->channel + 'A', ROLE_INITIATOR);
+ ahd_find_syncrate(ahd, &period, &ppr_options,
+ dt ? AHD_SYNCRATE_MAX : AHD_SYNCRATE_ULTRA2);
+
+ ahd_lock(ahd, &flags);
+ ahd_set_syncrate(ahd, &devinfo, period, tinfo->goal.offset,
+ ppr_options, AHD_TRANS_GOAL, FALSE);
+ ahd_unlock(ahd, &flags);
+}
+
+static void ahd_linux_set_iu(struct scsi_target *starget, int iu)
+{
+ struct Scsi_Host *shost = dev_to_shost(starget->dev.parent);
+ struct ahd_softc *ahd = *((struct ahd_softc **)shost->hostdata);
+ struct ahd_tmode_tstate *tstate;
+ struct ahd_initiator_tinfo *tinfo
+ = ahd_fetch_transinfo(ahd,
+ starget->channel + 'A',
+ shost->this_id, starget->id, &tstate);
+ struct ahd_devinfo devinfo;
+ unsigned int ppr_options = tinfo->goal.ppr_options
+ & ~MSG_EXT_PPR_IU_REQ;
+ unsigned int period = tinfo->goal.period;
+ unsigned int dt;
+ unsigned long flags;
+
+#ifdef AHD_DEBUG
+ if ((ahd_debug & AHD_SHOW_DV) != 0)
+ printk("%s: %s IU\n", ahd_name(ahd),
+ iu ? "enabling" : "disabling");
+#endif
+
+ if (iu && spi_max_width(starget)) {
+ ppr_options |= MSG_EXT_PPR_IU_REQ;
+ ppr_options |= MSG_EXT_PPR_DT_REQ; /* IU requires DT */
+ }
+
+ dt = ppr_options & MSG_EXT_PPR_DT_REQ;
+
+ ahd_compile_devinfo(&devinfo, shost->this_id, starget->id, 0,
+ starget->channel + 'A', ROLE_INITIATOR);
+ ahd_find_syncrate(ahd, &period, &ppr_options,
+ dt ? AHD_SYNCRATE_MAX : AHD_SYNCRATE_ULTRA2);
+
+ ahd_lock(ahd, &flags);
+ ahd_set_syncrate(ahd, &devinfo, period, tinfo->goal.offset,
+ ppr_options, AHD_TRANS_GOAL, FALSE);
+ ahd_unlock(ahd, &flags);
+}
+
+static void ahd_linux_set_rd_strm(struct scsi_target *starget, int rdstrm)
+{
+ struct Scsi_Host *shost = dev_to_shost(starget->dev.parent);
+ struct ahd_softc *ahd = *((struct ahd_softc **)shost->hostdata);
+ struct ahd_tmode_tstate *tstate;
+ struct ahd_initiator_tinfo *tinfo
+ = ahd_fetch_transinfo(ahd,
+ starget->channel + 'A',
+ shost->this_id, starget->id, &tstate);
+ struct ahd_devinfo devinfo;
+ unsigned int ppr_options = tinfo->goal.ppr_options
+ & ~MSG_EXT_PPR_RD_STRM;
+ unsigned int period = tinfo->goal.period;
+ unsigned int dt = ppr_options & MSG_EXT_PPR_DT_REQ;
+ unsigned long flags;
+
+#ifdef AHD_DEBUG
+ if ((ahd_debug & AHD_SHOW_DV) != 0)
+ printk("%s: %s Read Streaming\n", ahd_name(ahd),
+ rdstrm ? "enabling" : "disabling");
+#endif
+
+ if (rdstrm && spi_max_width(starget))
+ ppr_options |= MSG_EXT_PPR_RD_STRM;
+
+ ahd_compile_devinfo(&devinfo, shost->this_id, starget->id, 0,
+ starget->channel + 'A', ROLE_INITIATOR);
+ ahd_find_syncrate(ahd, &period, &ppr_options,
+ dt ? AHD_SYNCRATE_MAX : AHD_SYNCRATE_ULTRA2);
+
+ ahd_lock(ahd, &flags);
+ ahd_set_syncrate(ahd, &devinfo, period, tinfo->goal.offset,
+ ppr_options, AHD_TRANS_GOAL, FALSE);
+ ahd_unlock(ahd, &flags);
+}
+
+static void ahd_linux_set_wr_flow(struct scsi_target *starget, int wrflow)
+{
+ struct Scsi_Host *shost = dev_to_shost(starget->dev.parent);
+ struct ahd_softc *ahd = *((struct ahd_softc **)shost->hostdata);
+ struct ahd_tmode_tstate *tstate;
+ struct ahd_initiator_tinfo *tinfo
+ = ahd_fetch_transinfo(ahd,
+ starget->channel + 'A',
+ shost->this_id, starget->id, &tstate);
+ struct ahd_devinfo devinfo;
+ unsigned int ppr_options = tinfo->goal.ppr_options
+ & ~MSG_EXT_PPR_WR_FLOW;
+ unsigned int period = tinfo->goal.period;
+ unsigned int dt = ppr_options & MSG_EXT_PPR_DT_REQ;
+ unsigned long flags;
+
+#ifdef AHD_DEBUG
+ if ((ahd_debug & AHD_SHOW_DV) != 0)
+ printk("%s: %s Write Flow Control\n", ahd_name(ahd),
+ wrflow ? "enabling" : "disabling");
+#endif
+
+ if (wrflow && spi_max_width(starget))
+ ppr_options |= MSG_EXT_PPR_WR_FLOW;
+
+ ahd_compile_devinfo(&devinfo, shost->this_id, starget->id, 0,
+ starget->channel + 'A', ROLE_INITIATOR);
+ ahd_find_syncrate(ahd, &period, &ppr_options,
+ dt ? AHD_SYNCRATE_MAX : AHD_SYNCRATE_ULTRA2);
+
+ ahd_lock(ahd, &flags);
+ ahd_set_syncrate(ahd, &devinfo, period, tinfo->goal.offset,
+ ppr_options, AHD_TRANS_GOAL, FALSE);
+ ahd_unlock(ahd, &flags);
+}
+
+static void ahd_linux_set_rti(struct scsi_target *starget, int rti)
+{
+ struct Scsi_Host *shost = dev_to_shost(starget->dev.parent);
+ struct ahd_softc *ahd = *((struct ahd_softc **)shost->hostdata);
+ struct ahd_tmode_tstate *tstate;
+ struct ahd_initiator_tinfo *tinfo
+ = ahd_fetch_transinfo(ahd,
+ starget->channel + 'A',
+ shost->this_id, starget->id, &tstate);
+ struct ahd_devinfo devinfo;
+ unsigned int ppr_options = tinfo->goal.ppr_options
+ & ~MSG_EXT_PPR_RTI;
+ unsigned int period = tinfo->goal.period;
+ unsigned int dt = ppr_options & MSG_EXT_PPR_DT_REQ;
+ unsigned long flags;
+
+ if ((ahd->features & AHD_RTI) == 0) {
+#ifdef AHD_DEBUG
+ if ((ahd_debug & AHD_SHOW_DV) != 0)
+ printk("%s: RTI not available\n", ahd_name(ahd));
+#endif
+ return;
+ }
+
+#ifdef AHD_DEBUG
+ if ((ahd_debug & AHD_SHOW_DV) != 0)
+ printk("%s: %s RTI\n", ahd_name(ahd),
+ rti ? "enabling" : "disabling");
+#endif
+
+ if (rti && spi_max_width(starget))
+ ppr_options |= MSG_EXT_PPR_RTI;
+
+ ahd_compile_devinfo(&devinfo, shost->this_id, starget->id, 0,
+ starget->channel + 'A', ROLE_INITIATOR);
+ ahd_find_syncrate(ahd, &period, &ppr_options,
+ dt ? AHD_SYNCRATE_MAX : AHD_SYNCRATE_ULTRA2);
+
+ ahd_lock(ahd, &flags);
+ ahd_set_syncrate(ahd, &devinfo, period, tinfo->goal.offset,
+ ppr_options, AHD_TRANS_GOAL, FALSE);
+ ahd_unlock(ahd, &flags);
+}
+
+static void ahd_linux_set_pcomp_en(struct scsi_target *starget, int pcomp)
+{
+ struct Scsi_Host *shost = dev_to_shost(starget->dev.parent);
+ struct ahd_softc *ahd = *((struct ahd_softc **)shost->hostdata);
+ struct ahd_tmode_tstate *tstate;
+ struct ahd_initiator_tinfo *tinfo
+ = ahd_fetch_transinfo(ahd,
+ starget->channel + 'A',
+ shost->this_id, starget->id, &tstate);
+ struct ahd_devinfo devinfo;
+ unsigned int ppr_options = tinfo->goal.ppr_options
+ & ~MSG_EXT_PPR_PCOMP_EN;
+ unsigned int period = tinfo->goal.period;
+ unsigned int dt = ppr_options & MSG_EXT_PPR_DT_REQ;
+ unsigned long flags;
+
+#ifdef AHD_DEBUG
+ if ((ahd_debug & AHD_SHOW_DV) != 0)
+ printk("%s: %s Precompensation\n", ahd_name(ahd),
+ pcomp ? "Enable" : "Disable");
+#endif
+
+ if (pcomp && spi_max_width(starget)) {
+ uint8_t precomp;
+
+ if (ahd->unit < ARRAY_SIZE(aic79xx_iocell_info)) {
+ const struct ahd_linux_iocell_opts *iocell_opts;
+
+ iocell_opts = &aic79xx_iocell_info[ahd->unit];
+ precomp = iocell_opts->precomp;
+ } else {
+ precomp = AIC79XX_DEFAULT_PRECOMP;
+ }
+ ppr_options |= MSG_EXT_PPR_PCOMP_EN;
+ AHD_SET_PRECOMP(ahd, precomp);
+ } else {
+ AHD_SET_PRECOMP(ahd, 0);
+ }
+
+ ahd_compile_devinfo(&devinfo, shost->this_id, starget->id, 0,
+ starget->channel + 'A', ROLE_INITIATOR);
+ ahd_find_syncrate(ahd, &period, &ppr_options,
+ dt ? AHD_SYNCRATE_MAX : AHD_SYNCRATE_ULTRA2);
+
+ ahd_lock(ahd, &flags);
+ ahd_set_syncrate(ahd, &devinfo, period, tinfo->goal.offset,
+ ppr_options, AHD_TRANS_GOAL, FALSE);
+ ahd_unlock(ahd, &flags);
+}
+
+static void ahd_linux_set_hold_mcs(struct scsi_target *starget, int hold)
+{
+ struct Scsi_Host *shost = dev_to_shost(starget->dev.parent);
+ struct ahd_softc *ahd = *((struct ahd_softc **)shost->hostdata);
+ struct ahd_tmode_tstate *tstate;
+ struct ahd_initiator_tinfo *tinfo
+ = ahd_fetch_transinfo(ahd,
+ starget->channel + 'A',
+ shost->this_id, starget->id, &tstate);
+ struct ahd_devinfo devinfo;
+ unsigned int ppr_options = tinfo->goal.ppr_options
+ & ~MSG_EXT_PPR_HOLD_MCS;
+ unsigned int period = tinfo->goal.period;
+ unsigned int dt = ppr_options & MSG_EXT_PPR_DT_REQ;
+ unsigned long flags;
+
+ if (hold && spi_max_width(starget))
+ ppr_options |= MSG_EXT_PPR_HOLD_MCS;
+
+ ahd_compile_devinfo(&devinfo, shost->this_id, starget->id, 0,
+ starget->channel + 'A', ROLE_INITIATOR);
+ ahd_find_syncrate(ahd, &period, &ppr_options,
+ dt ? AHD_SYNCRATE_MAX : AHD_SYNCRATE_ULTRA2);
+
+ ahd_lock(ahd, &flags);
+ ahd_set_syncrate(ahd, &devinfo, period, tinfo->goal.offset,
+ ppr_options, AHD_TRANS_GOAL, FALSE);
+ ahd_unlock(ahd, &flags);
+}
+
+static void ahd_linux_get_signalling(struct Scsi_Host *shost)
+{
+ struct ahd_softc *ahd = *(struct ahd_softc **)shost->hostdata;
+ unsigned long flags;
+ u8 mode;
+
+ ahd_lock(ahd, &flags);
+ ahd_pause(ahd);
+ mode = ahd_inb(ahd, SBLKCTL);
+ ahd_unpause(ahd);
+ ahd_unlock(ahd, &flags);
+
+ if (mode & ENAB40)
+ spi_signalling(shost) = SPI_SIGNAL_LVD;
+ else if (mode & ENAB20)
+ spi_signalling(shost) = SPI_SIGNAL_SE;
+ else
+ spi_signalling(shost) = SPI_SIGNAL_UNKNOWN;
+}
+
+static struct spi_function_template ahd_linux_transport_functions = {
+ .set_offset = ahd_linux_set_offset,
+ .show_offset = 1,
+ .set_period = ahd_linux_set_period,
+ .show_period = 1,
+ .set_width = ahd_linux_set_width,
+ .show_width = 1,
+ .set_dt = ahd_linux_set_dt,
+ .show_dt = 1,
+ .set_iu = ahd_linux_set_iu,
+ .show_iu = 1,
+ .set_qas = ahd_linux_set_qas,
+ .show_qas = 1,
+ .set_rd_strm = ahd_linux_set_rd_strm,
+ .show_rd_strm = 1,
+ .set_wr_flow = ahd_linux_set_wr_flow,
+ .show_wr_flow = 1,
+ .set_rti = ahd_linux_set_rti,
+ .show_rti = 1,
+ .set_pcomp_en = ahd_linux_set_pcomp_en,
+ .show_pcomp_en = 1,
+ .set_hold_mcs = ahd_linux_set_hold_mcs,
+ .show_hold_mcs = 1,
+ .get_signalling = ahd_linux_get_signalling,
+};
+
+static int __init
+ahd_linux_init(void)
+{
+ int error = 0;
+
+ /*
+ * If we've been passed any parameters, process them now.
+ */
+ if (aic79xx)
+ aic79xx_setup(aic79xx);
+
+ ahd_linux_transport_template =
+ spi_attach_transport(&ahd_linux_transport_functions);
+ if (!ahd_linux_transport_template)
+ return -ENODEV;
+
+ scsi_transport_reserve_device(ahd_linux_transport_template,
+ sizeof(struct ahd_linux_device));
+
+ error = ahd_linux_pci_init();
+ if (error)
+ spi_release_transport(ahd_linux_transport_template);
+ return error;
+}
+
+static void __exit
+ahd_linux_exit(void)
+{
+ ahd_linux_pci_exit();
+ spi_release_transport(ahd_linux_transport_template);
+}
+
+module_init(ahd_linux_init);
+module_exit(ahd_linux_exit);
diff --git a/drivers/scsi/aic7xxx/aic79xx_osm.h b/drivers/scsi/aic7xxx/aic79xx_osm.h
new file mode 100644
index 000000000..c58fa33c6
--- /dev/null
+++ b/drivers/scsi/aic7xxx/aic79xx_osm.h
@@ -0,0 +1,695 @@
+/*
+ * Adaptec AIC79xx device driver for Linux.
+ *
+ * Copyright (c) 2000-2001 Adaptec Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions, and the following disclaimer,
+ * without modification.
+ * 2. Redistributions in binary form must reproduce at minimum a disclaimer
+ * substantially similar to the "NO WARRANTY" disclaimer below
+ * ("Disclaimer") and any redistribution must be conditioned upon
+ * including a substantially similar Disclaimer requirement for further
+ * binary redistribution.
+ * 3. Neither the names of the above-listed copyright holders nor the names
+ * of any contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * Alternatively, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") version 2 as published by the Free
+ * Software Foundation.
+ *
+ * NO WARRANTY
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
+ * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
+ * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGES.
+ *
+ * $Id: //depot/aic7xxx/linux/drivers/scsi/aic7xxx/aic79xx_osm.h#166 $
+ *
+ */
+#ifndef _AIC79XX_LINUX_H_
+#define _AIC79XX_LINUX_H_
+
+#include <linux/types.h>
+#include <linux/blkdev.h>
+#include <linux/delay.h>
+#include <linux/ioport.h>
+#include <linux/pci.h>
+#include <linux/interrupt.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <asm/byteorder.h>
+#include <asm/io.h>
+
+#include <scsi/scsi.h>
+#include <scsi/scsi_cmnd.h>
+#include <scsi/scsi_eh.h>
+#include <scsi/scsi_device.h>
+#include <scsi/scsi_host.h>
+#include <scsi/scsi_tcq.h>
+#include <scsi/scsi_transport.h>
+#include <scsi/scsi_transport_spi.h>
+
+/* Core SCSI definitions */
+#define AIC_LIB_PREFIX ahd
+
+/* Name space conflict with BSD queue macros */
+#ifdef LIST_HEAD
+#undef LIST_HEAD
+#endif
+
+#include "cam.h"
+#include "queue.h"
+#include "scsi_message.h"
+#include "scsi_iu.h"
+#include "aiclib.h"
+
+/*********************************** Debugging ********************************/
+#ifdef CONFIG_AIC79XX_DEBUG_ENABLE
+#ifdef CONFIG_AIC79XX_DEBUG_MASK
+#define AHD_DEBUG 1
+#define AHD_DEBUG_OPTS CONFIG_AIC79XX_DEBUG_MASK
+#else
+/*
+ * Compile in debugging code, but do not enable any printfs.
+ */
+#define AHD_DEBUG 1
+#define AHD_DEBUG_OPTS 0
+#endif
+/* No debugging code. */
+#endif
+
+/********************************** Misc Macros *******************************/
+#define powerof2(x) ((((x)-1)&(x))==0)
+
+/************************* Forward Declarations *******************************/
+struct ahd_softc;
+typedef struct pci_dev *ahd_dev_softc_t;
+typedef struct scsi_cmnd *ahd_io_ctx_t;
+
+/******************************* Byte Order ***********************************/
+#define ahd_htobe16(x) cpu_to_be16(x)
+#define ahd_htobe32(x) cpu_to_be32(x)
+#define ahd_htobe64(x) cpu_to_be64(x)
+#define ahd_htole16(x) cpu_to_le16(x)
+#define ahd_htole32(x) cpu_to_le32(x)
+#define ahd_htole64(x) cpu_to_le64(x)
+
+#define ahd_be16toh(x) be16_to_cpu(x)
+#define ahd_be32toh(x) be32_to_cpu(x)
+#define ahd_be64toh(x) be64_to_cpu(x)
+#define ahd_le16toh(x) le16_to_cpu(x)
+#define ahd_le32toh(x) le32_to_cpu(x)
+#define ahd_le64toh(x) le64_to_cpu(x)
+
+/************************* Configuration Data *********************************/
+extern uint32_t aic79xx_allow_memio;
+extern struct scsi_host_template aic79xx_driver_template;
+
+/***************************** Bus Space/DMA **********************************/
+
+typedef uint32_t bus_size_t;
+
+typedef enum {
+ BUS_SPACE_MEMIO,
+ BUS_SPACE_PIO
+} bus_space_tag_t;
+
+typedef union {
+ u_long ioport;
+ volatile uint8_t __iomem *maddr;
+} bus_space_handle_t;
+
+typedef struct bus_dma_segment
+{
+ dma_addr_t ds_addr;
+ bus_size_t ds_len;
+} bus_dma_segment_t;
+
+struct ahd_linux_dma_tag
+{
+ bus_size_t alignment;
+ bus_size_t boundary;
+ bus_size_t maxsize;
+};
+typedef struct ahd_linux_dma_tag* bus_dma_tag_t;
+
+typedef dma_addr_t bus_dmamap_t;
+
+typedef int bus_dma_filter_t(void*, dma_addr_t);
+typedef void bus_dmamap_callback_t(void *, bus_dma_segment_t *, int, int);
+
+#define BUS_DMA_WAITOK 0x0
+#define BUS_DMA_NOWAIT 0x1
+#define BUS_DMA_ALLOCNOW 0x2
+#define BUS_DMA_LOAD_SEGS 0x4 /*
+ * Argument is an S/G list not
+ * a single buffer.
+ */
+
+#define BUS_SPACE_MAXADDR 0xFFFFFFFF
+#define BUS_SPACE_MAXADDR_32BIT 0xFFFFFFFF
+#define BUS_SPACE_MAXSIZE_32BIT 0xFFFFFFFF
+
+int ahd_dma_tag_create(struct ahd_softc *, bus_dma_tag_t /*parent*/,
+ bus_size_t /*alignment*/, bus_size_t /*boundary*/,
+ dma_addr_t /*lowaddr*/, dma_addr_t /*highaddr*/,
+ bus_dma_filter_t*/*filter*/, void */*filterarg*/,
+ bus_size_t /*maxsize*/, int /*nsegments*/,
+ bus_size_t /*maxsegsz*/, int /*flags*/,
+ bus_dma_tag_t */*dma_tagp*/);
+
+void ahd_dma_tag_destroy(struct ahd_softc *, bus_dma_tag_t /*tag*/);
+
+int ahd_dmamem_alloc(struct ahd_softc *, bus_dma_tag_t /*dmat*/,
+ void** /*vaddr*/, int /*flags*/,
+ bus_dmamap_t* /*mapp*/);
+
+void ahd_dmamem_free(struct ahd_softc *, bus_dma_tag_t /*dmat*/,
+ void* /*vaddr*/, bus_dmamap_t /*map*/);
+
+void ahd_dmamap_destroy(struct ahd_softc *, bus_dma_tag_t /*tag*/,
+ bus_dmamap_t /*map*/);
+
+int ahd_dmamap_load(struct ahd_softc *ahd, bus_dma_tag_t /*dmat*/,
+ bus_dmamap_t /*map*/, void * /*buf*/,
+ bus_size_t /*buflen*/, bus_dmamap_callback_t *,
+ void */*callback_arg*/, int /*flags*/);
+
+int ahd_dmamap_unload(struct ahd_softc *, bus_dma_tag_t, bus_dmamap_t);
+
+/*
+ * Operations performed by ahd_dmamap_sync().
+ */
+#define BUS_DMASYNC_PREREAD 0x01 /* pre-read synchronization */
+#define BUS_DMASYNC_POSTREAD 0x02 /* post-read synchronization */
+#define BUS_DMASYNC_PREWRITE 0x04 /* pre-write synchronization */
+#define BUS_DMASYNC_POSTWRITE 0x08 /* post-write synchronization */
+
+/*
+ * XXX
+ * ahd_dmamap_sync is only used on buffers allocated with
+ * the pci_alloc_consistent() API. Although I'm not sure how
+ * this works on architectures with a write buffer, Linux does
+ * not have an API to sync "coherent" memory. Perhaps we need
+ * to do an mb()?
+ */
+#define ahd_dmamap_sync(ahd, dma_tag, dmamap, offset, len, op)
+
+/************************** Timer DataStructures ******************************/
+typedef struct timer_list ahd_timer_t;
+
+/********************************** Includes **********************************/
+#ifdef CONFIG_AIC79XX_REG_PRETTY_PRINT
+#define AIC_DEBUG_REGISTERS 1
+#else
+#define AIC_DEBUG_REGISTERS 0
+#endif
+#include "aic79xx.h"
+
+/***************************** Timer Facilities *******************************/
+#define ahd_timer_init init_timer
+#define ahd_timer_stop del_timer_sync
+
+/***************************** SMP support ************************************/
+#include <linux/spinlock.h>
+
+#define AIC79XX_DRIVER_VERSION "3.0"
+
+/*************************** Device Data Structures ***************************/
+/*
+ * A per probed device structure used to deal with some error recovery
+ * scenarios that the Linux mid-layer code just doesn't know how to
+ * handle. The structure allocated for a device only becomes persistent
+ * after a successfully completed inquiry command to the target when
+ * that inquiry data indicates a lun is present.
+ */
+
+typedef enum {
+ AHD_DEV_FREEZE_TIL_EMPTY = 0x02, /* Freeze queue until active == 0 */
+ AHD_DEV_Q_BASIC = 0x10, /* Allow basic device queuing */
+ AHD_DEV_Q_TAGGED = 0x20, /* Allow full SCSI2 command queueing */
+ AHD_DEV_PERIODIC_OTAG = 0x40, /* Send OTAG to prevent starvation */
+} ahd_linux_dev_flags;
+
+struct ahd_linux_device {
+ TAILQ_ENTRY(ahd_linux_device) links;
+
+ /*
+ * The number of transactions currently
+ * queued to the device.
+ */
+ int active;
+
+ /*
+ * The currently allowed number of
+ * transactions that can be queued to
+ * the device. Must be signed for
+ * conversion from tagged to untagged
+ * mode where the device may have more
+ * than one outstanding active transaction.
+ */
+ int openings;
+
+ /*
+ * A positive count indicates that this
+ * device's queue is halted.
+ */
+ u_int qfrozen;
+
+ /*
+ * Cumulative command counter.
+ */
+ u_long commands_issued;
+
+ /*
+ * The number of tagged transactions when
+ * running at our current opening level
+ * that have been successfully received by
+ * this device since the last QUEUE FULL.
+ */
+ u_int tag_success_count;
+#define AHD_TAG_SUCCESS_INTERVAL 50
+
+ ahd_linux_dev_flags flags;
+
+ /*
+ * Per device timer.
+ */
+ struct timer_list timer;
+
+ /*
+ * The high limit for the tags variable.
+ */
+ u_int maxtags;
+
+ /*
+ * The computed number of tags outstanding
+ * at the time of the last QUEUE FULL event.
+ */
+ u_int tags_on_last_queuefull;
+
+ /*
+ * How many times we have seen a queue full
+ * with the same number of tags. This is used
+ * to stop our adaptive queue depth algorithm
+ * on devices with a fixed number of tags.
+ */
+ u_int last_queuefull_same_count;
+#define AHD_LOCK_TAGS_COUNT 50
+
+ /*
+ * How many transactions have been queued
+ * without the device going idle. We use
+ * this statistic to determine when to issue
+ * an ordered tag to prevent transaction
+ * starvation. This statistic is only updated
+ * if the AHD_DEV_PERIODIC_OTAG flag is set
+ * on this device.
+ */
+ u_int commands_since_idle_or_otag;
+#define AHD_OTAG_THRESH 500
+};
+
+/********************* Definitions Required by the Core ***********************/
+/*
+ * Number of SG segments we require. So long as the S/G segments for
+ * a particular transaction are allocated in a physically contiguous
+ * manner and are allocated below 4GB, the number of S/G segments is
+ * unrestricted.
+ */
+#define AHD_NSEG 128
+
+/*
+ * Per-SCB OSM storage.
+ */
+struct scb_platform_data {
+ struct ahd_linux_device *dev;
+ dma_addr_t buf_busaddr;
+ uint32_t xfer_len;
+ uint32_t sense_resid; /* Auto-Sense residual */
+};
+
+/*
+ * Define a structure used for each host adapter. All members are
+ * aligned on a boundary >= the size of the member to honor the
+ * alignment restrictions of the various platforms supported by
+ * this driver.
+ */
+struct ahd_platform_data {
+ /*
+ * Fields accessed from interrupt context.
+ */
+ struct scsi_target *starget[AHD_NUM_TARGETS];
+
+ spinlock_t spin_lock;
+ struct completion *eh_done;
+ struct Scsi_Host *host; /* pointer to scsi host */
+#define AHD_LINUX_NOIRQ ((uint32_t)~0)
+ uint32_t irq; /* IRQ for this adapter */
+ uint32_t bios_address;
+ resource_size_t mem_busaddr; /* Mem Base Addr */
+};
+
+void ahd_delay(long);
+
+/***************************** Low Level I/O **********************************/
+uint8_t ahd_inb(struct ahd_softc * ahd, long port);
+void ahd_outb(struct ahd_softc * ahd, long port, uint8_t val);
+void ahd_outw_atomic(struct ahd_softc * ahd,
+ long port, uint16_t val);
+void ahd_outsb(struct ahd_softc * ahd, long port,
+ uint8_t *, int count);
+void ahd_insb(struct ahd_softc * ahd, long port,
+ uint8_t *, int count);
+
+/**************************** Initialization **********************************/
+int ahd_linux_register_host(struct ahd_softc *,
+ struct scsi_host_template *);
+
+/******************************** Locking *************************************/
+static inline void
+ahd_lockinit(struct ahd_softc *ahd)
+{
+ spin_lock_init(&ahd->platform_data->spin_lock);
+}
+
+static inline void
+ahd_lock(struct ahd_softc *ahd, unsigned long *flags)
+{
+ spin_lock_irqsave(&ahd->platform_data->spin_lock, *flags);
+}
+
+static inline void
+ahd_unlock(struct ahd_softc *ahd, unsigned long *flags)
+{
+ spin_unlock_irqrestore(&ahd->platform_data->spin_lock, *flags);
+}
+
+/******************************* PCI Definitions ******************************/
+/*
+ * PCIM_xxx: mask to locate subfield in register
+ * PCIR_xxx: config register offset
+ * PCIC_xxx: device class
+ * PCIS_xxx: device subclass
+ * PCIP_xxx: device programming interface
+ * PCIV_xxx: PCI vendor ID (only required to fixup ancient devices)
+ * PCID_xxx: device ID
+ */
+#define PCIR_DEVVENDOR 0x00
+#define PCIR_VENDOR 0x00
+#define PCIR_DEVICE 0x02
+#define PCIR_COMMAND 0x04
+#define PCIM_CMD_PORTEN 0x0001
+#define PCIM_CMD_MEMEN 0x0002
+#define PCIM_CMD_BUSMASTEREN 0x0004
+#define PCIM_CMD_MWRICEN 0x0010
+#define PCIM_CMD_PERRESPEN 0x0040
+#define PCIM_CMD_SERRESPEN 0x0100
+#define PCIR_STATUS 0x06
+#define PCIR_REVID 0x08
+#define PCIR_PROGIF 0x09
+#define PCIR_SUBCLASS 0x0a
+#define PCIR_CLASS 0x0b
+#define PCIR_CACHELNSZ 0x0c
+#define PCIR_LATTIMER 0x0d
+#define PCIR_HEADERTYPE 0x0e
+#define PCIM_MFDEV 0x80
+#define PCIR_BIST 0x0f
+#define PCIR_CAP_PTR 0x34
+
+/* config registers for header type 0 devices */
+#define PCIR_MAPS 0x10
+#define PCIR_SUBVEND_0 0x2c
+#define PCIR_SUBDEV_0 0x2e
+
+/****************************** PCI-X definitions *****************************/
+#define PCIXR_COMMAND 0x96
+#define PCIXR_DEVADDR 0x98
+#define PCIXM_DEVADDR_FNUM 0x0003 /* Function Number */
+#define PCIXM_DEVADDR_DNUM 0x00F8 /* Device Number */
+#define PCIXM_DEVADDR_BNUM 0xFF00 /* Bus Number */
+#define PCIXR_STATUS 0x9A
+#define PCIXM_STATUS_64BIT 0x0001 /* Active 64bit connection to device. */
+#define PCIXM_STATUS_133CAP 0x0002 /* Device is 133MHz capable */
+#define PCIXM_STATUS_SCDISC 0x0004 /* Split Completion Discarded */
+#define PCIXM_STATUS_UNEXPSC 0x0008 /* Unexpected Split Completion */
+#define PCIXM_STATUS_CMPLEXDEV 0x0010 /* Device Complexity (set == bridge) */
+#define PCIXM_STATUS_MAXMRDBC 0x0060 /* Maximum Burst Read Count */
+#define PCIXM_STATUS_MAXSPLITS 0x0380 /* Maximum Split Transactions */
+#define PCIXM_STATUS_MAXCRDS 0x1C00 /* Maximum Cumulative Read Size */
+#define PCIXM_STATUS_RCVDSCEM 0x2000 /* Received a Split Comp w/Error msg */
+
+typedef enum
+{
+ AHD_POWER_STATE_D0,
+ AHD_POWER_STATE_D1,
+ AHD_POWER_STATE_D2,
+ AHD_POWER_STATE_D3
+} ahd_power_state;
+
+void ahd_power_state_change(struct ahd_softc *ahd,
+ ahd_power_state new_state);
+
+/******************************* PCI Routines *********************************/
+int ahd_linux_pci_init(void);
+void ahd_linux_pci_exit(void);
+int ahd_pci_map_registers(struct ahd_softc *ahd);
+int ahd_pci_map_int(struct ahd_softc *ahd);
+
+uint32_t ahd_pci_read_config(ahd_dev_softc_t pci,
+ int reg, int width);
+void ahd_pci_write_config(ahd_dev_softc_t pci,
+ int reg, uint32_t value,
+ int width);
+
+static inline int ahd_get_pci_function(ahd_dev_softc_t);
+static inline int
+ahd_get_pci_function(ahd_dev_softc_t pci)
+{
+ return (PCI_FUNC(pci->devfn));
+}
+
+static inline int ahd_get_pci_slot(ahd_dev_softc_t);
+static inline int
+ahd_get_pci_slot(ahd_dev_softc_t pci)
+{
+ return (PCI_SLOT(pci->devfn));
+}
+
+static inline int ahd_get_pci_bus(ahd_dev_softc_t);
+static inline int
+ahd_get_pci_bus(ahd_dev_softc_t pci)
+{
+ return (pci->bus->number);
+}
+
+static inline void ahd_flush_device_writes(struct ahd_softc *);
+static inline void
+ahd_flush_device_writes(struct ahd_softc *ahd)
+{
+ /* XXX Is this sufficient for all architectures??? */
+ ahd_inb(ahd, INTSTAT);
+}
+
+/**************************** Proc FS Support *********************************/
+int ahd_proc_write_seeprom(struct Scsi_Host *, char *, int);
+int ahd_linux_show_info(struct seq_file *,struct Scsi_Host *);
+
+/*********************** Transaction Access Wrappers **************************/
+static inline void ahd_cmd_set_transaction_status(struct scsi_cmnd *, uint32_t);
+static inline void ahd_set_transaction_status(struct scb *, uint32_t);
+static inline void ahd_cmd_set_scsi_status(struct scsi_cmnd *, uint32_t);
+static inline void ahd_set_scsi_status(struct scb *, uint32_t);
+static inline uint32_t ahd_cmd_get_transaction_status(struct scsi_cmnd *cmd);
+static inline uint32_t ahd_get_transaction_status(struct scb *);
+static inline uint32_t ahd_cmd_get_scsi_status(struct scsi_cmnd *cmd);
+static inline uint32_t ahd_get_scsi_status(struct scb *);
+static inline void ahd_set_transaction_tag(struct scb *, int, u_int);
+static inline u_long ahd_get_transfer_length(struct scb *);
+static inline int ahd_get_transfer_dir(struct scb *);
+static inline void ahd_set_residual(struct scb *, u_long);
+static inline void ahd_set_sense_residual(struct scb *scb, u_long resid);
+static inline u_long ahd_get_residual(struct scb *);
+static inline u_long ahd_get_sense_residual(struct scb *);
+static inline int ahd_perform_autosense(struct scb *);
+static inline uint32_t ahd_get_sense_bufsize(struct ahd_softc *,
+ struct scb *);
+static inline void ahd_notify_xfer_settings_change(struct ahd_softc *,
+ struct ahd_devinfo *);
+static inline void ahd_platform_scb_free(struct ahd_softc *ahd,
+ struct scb *scb);
+static inline void ahd_freeze_scb(struct scb *scb);
+
+static inline
+void ahd_cmd_set_transaction_status(struct scsi_cmnd *cmd, uint32_t status)
+{
+ cmd->result &= ~(CAM_STATUS_MASK << 16);
+ cmd->result |= status << 16;
+}
+
+static inline
+void ahd_set_transaction_status(struct scb *scb, uint32_t status)
+{
+ ahd_cmd_set_transaction_status(scb->io_ctx,status);
+}
+
+static inline
+void ahd_cmd_set_scsi_status(struct scsi_cmnd *cmd, uint32_t status)
+{
+ cmd->result &= ~0xFFFF;
+ cmd->result |= status;
+}
+
+static inline
+void ahd_set_scsi_status(struct scb *scb, uint32_t status)
+{
+ ahd_cmd_set_scsi_status(scb->io_ctx, status);
+}
+
+static inline
+uint32_t ahd_cmd_get_transaction_status(struct scsi_cmnd *cmd)
+{
+ return ((cmd->result >> 16) & CAM_STATUS_MASK);
+}
+
+static inline
+uint32_t ahd_get_transaction_status(struct scb *scb)
+{
+ return (ahd_cmd_get_transaction_status(scb->io_ctx));
+}
+
+static inline
+uint32_t ahd_cmd_get_scsi_status(struct scsi_cmnd *cmd)
+{
+ return (cmd->result & 0xFFFF);
+}
+
+static inline
+uint32_t ahd_get_scsi_status(struct scb *scb)
+{
+ return (ahd_cmd_get_scsi_status(scb->io_ctx));
+}
+
+static inline
+void ahd_set_transaction_tag(struct scb *scb, int enabled, u_int type)
+{
+ /*
+ * Nothing to do for linux as the incoming transaction
+ * has no concept of tag/non tagged, etc.
+ */
+}
+
+static inline
+u_long ahd_get_transfer_length(struct scb *scb)
+{
+ return (scb->platform_data->xfer_len);
+}
+
+static inline
+int ahd_get_transfer_dir(struct scb *scb)
+{
+ return (scb->io_ctx->sc_data_direction);
+}
+
+static inline
+void ahd_set_residual(struct scb *scb, u_long resid)
+{
+ scsi_set_resid(scb->io_ctx, resid);
+}
+
+static inline
+void ahd_set_sense_residual(struct scb *scb, u_long resid)
+{
+ scb->platform_data->sense_resid = resid;
+}
+
+static inline
+u_long ahd_get_residual(struct scb *scb)
+{
+ return scsi_get_resid(scb->io_ctx);
+}
+
+static inline
+u_long ahd_get_sense_residual(struct scb *scb)
+{
+ return (scb->platform_data->sense_resid);
+}
+
+static inline
+int ahd_perform_autosense(struct scb *scb)
+{
+ /*
+ * We always perform autosense in Linux.
+ * On other platforms this is set on a
+ * per-transaction basis.
+ */
+ return (1);
+}
+
+static inline uint32_t
+ahd_get_sense_bufsize(struct ahd_softc *ahd, struct scb *scb)
+{
+ return (sizeof(struct scsi_sense_data));
+}
+
+static inline void
+ahd_notify_xfer_settings_change(struct ahd_softc *ahd,
+ struct ahd_devinfo *devinfo)
+{
+ /* Nothing to do here for linux */
+}
+
+static inline void
+ahd_platform_scb_free(struct ahd_softc *ahd, struct scb *scb)
+{
+ ahd->flags &= ~AHD_RESOURCE_SHORTAGE;
+}
+
+int ahd_platform_alloc(struct ahd_softc *ahd, void *platform_arg);
+void ahd_platform_free(struct ahd_softc *ahd);
+void ahd_platform_init(struct ahd_softc *ahd);
+void ahd_platform_freeze_devq(struct ahd_softc *ahd, struct scb *scb);
+
+static inline void
+ahd_freeze_scb(struct scb *scb)
+{
+ if ((scb->io_ctx->result & (CAM_DEV_QFRZN << 16)) == 0) {
+ scb->io_ctx->result |= CAM_DEV_QFRZN << 16;
+ scb->platform_data->dev->qfrozen++;
+ }
+}
+
+void ahd_platform_set_tags(struct ahd_softc *ahd, struct scsi_device *sdev,
+ struct ahd_devinfo *devinfo, ahd_queue_alg);
+int ahd_platform_abort_scbs(struct ahd_softc *ahd, int target,
+ char channel, int lun, u_int tag,
+ role_t role, uint32_t status);
+irqreturn_t
+ ahd_linux_isr(int irq, void *dev_id);
+void ahd_done(struct ahd_softc*, struct scb*);
+void ahd_send_async(struct ahd_softc *, char channel,
+ u_int target, u_int lun, ac_code);
+void ahd_print_path(struct ahd_softc *, struct scb *);
+
+#ifdef CONFIG_PCI
+#define AHD_PCI_CONFIG 1
+#else
+#define AHD_PCI_CONFIG 0
+#endif
+#define bootverbose aic79xx_verbose
+extern uint32_t aic79xx_verbose;
+
+#endif /* _AIC79XX_LINUX_H_ */
diff --git a/drivers/scsi/aic7xxx/aic79xx_osm_pci.c b/drivers/scsi/aic7xxx/aic79xx_osm_pci.c
new file mode 100644
index 000000000..8466aa784
--- /dev/null
+++ b/drivers/scsi/aic7xxx/aic79xx_osm_pci.c
@@ -0,0 +1,397 @@
+/*
+ * Linux driver attachment glue for PCI based U320 controllers.
+ *
+ * Copyright (c) 2000-2001 Adaptec Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions, and the following disclaimer,
+ * without modification.
+ * 2. Redistributions in binary form must reproduce at minimum a disclaimer
+ * substantially similar to the "NO WARRANTY" disclaimer below
+ * ("Disclaimer") and any redistribution must be conditioned upon
+ * including a substantially similar Disclaimer requirement for further
+ * binary redistribution.
+ * 3. Neither the names of the above-listed copyright holders nor the names
+ * of any contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * Alternatively, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") version 2 as published by the Free
+ * Software Foundation.
+ *
+ * NO WARRANTY
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
+ * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
+ * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGES.
+ *
+ * $Id: //depot/aic7xxx/linux/drivers/scsi/aic7xxx/aic79xx_osm_pci.c#25 $
+ */
+
+#include "aic79xx_osm.h"
+#include "aic79xx_inline.h"
+#include "aic79xx_pci.h"
+
+/* Define the macro locally since it's different for different class of chips.
+ */
+#define ID(x) \
+ ID2C(x), \
+ ID2C(IDIROC(x))
+
+static const struct pci_device_id ahd_linux_pci_id_table[] = {
+ /* aic7901 based controllers */
+ ID(ID_AHA_29320A),
+ ID(ID_AHA_29320ALP),
+ ID(ID_AHA_29320LPE),
+ /* aic7902 based controllers */
+ ID(ID_AHA_29320),
+ ID(ID_AHA_29320B),
+ ID(ID_AHA_29320LP),
+ ID(ID_AHA_39320),
+ ID(ID_AHA_39320_B),
+ ID(ID_AHA_39320A),
+ ID(ID_AHA_39320D),
+ ID(ID_AHA_39320D_HP),
+ ID(ID_AHA_39320D_B),
+ ID(ID_AHA_39320D_B_HP),
+ /* Generic chip probes for devices we don't know exactly. */
+ ID16(ID_AIC7901 & ID_9005_GENERIC_MASK),
+ ID(ID_AIC7901A & ID_DEV_VENDOR_MASK),
+ ID16(ID_AIC7902 & ID_9005_GENERIC_MASK),
+ { 0 }
+};
+
+MODULE_DEVICE_TABLE(pci, ahd_linux_pci_id_table);
+
+#ifdef CONFIG_PM
+static int
+ahd_linux_pci_dev_suspend(struct pci_dev *pdev, pm_message_t mesg)
+{
+ struct ahd_softc *ahd = pci_get_drvdata(pdev);
+ int rc;
+
+ if ((rc = ahd_suspend(ahd)))
+ return rc;
+
+ ahd_pci_suspend(ahd);
+
+ pci_save_state(pdev);
+ pci_disable_device(pdev);
+
+ if (mesg.event & PM_EVENT_SLEEP)
+ pci_set_power_state(pdev, PCI_D3hot);
+
+ return rc;
+}
+
+static int
+ahd_linux_pci_dev_resume(struct pci_dev *pdev)
+{
+ struct ahd_softc *ahd = pci_get_drvdata(pdev);
+ int rc;
+
+ pci_set_power_state(pdev, PCI_D0);
+ pci_restore_state(pdev);
+
+ if ((rc = pci_enable_device(pdev))) {
+ dev_printk(KERN_ERR, &pdev->dev,
+ "failed to enable device after resume (%d)\n", rc);
+ return rc;
+ }
+
+ pci_set_master(pdev);
+
+ ahd_pci_resume(ahd);
+
+ ahd_resume(ahd);
+
+ return rc;
+}
+#endif
+
+static void
+ahd_linux_pci_dev_remove(struct pci_dev *pdev)
+{
+ struct ahd_softc *ahd = pci_get_drvdata(pdev);
+ u_long s;
+
+ if (ahd->platform_data && ahd->platform_data->host)
+ scsi_remove_host(ahd->platform_data->host);
+
+ ahd_lock(ahd, &s);
+ ahd_intr_enable(ahd, FALSE);
+ ahd_unlock(ahd, &s);
+ ahd_free(ahd);
+}
+
+static void
+ahd_linux_pci_inherit_flags(struct ahd_softc *ahd)
+{
+ struct pci_dev *pdev = ahd->dev_softc, *master_pdev;
+ unsigned int master_devfn = PCI_DEVFN(PCI_SLOT(pdev->devfn), 0);
+
+ master_pdev = pci_get_slot(pdev->bus, master_devfn);
+ if (master_pdev) {
+ struct ahd_softc *master = pci_get_drvdata(master_pdev);
+ if (master) {
+ ahd->flags &= ~AHD_BIOS_ENABLED;
+ ahd->flags |= master->flags & AHD_BIOS_ENABLED;
+ } else
+ printk(KERN_ERR "aic79xx: no multichannel peer found!\n");
+ pci_dev_put(master_pdev);
+ }
+}
+
+static int
+ahd_linux_pci_dev_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
+{
+ char buf[80];
+ struct ahd_softc *ahd;
+ ahd_dev_softc_t pci;
+ const struct ahd_pci_identity *entry;
+ char *name;
+ int error;
+ struct device *dev = &pdev->dev;
+
+ pci = pdev;
+ entry = ahd_find_pci_device(pci);
+ if (entry == NULL)
+ return (-ENODEV);
+
+ /*
+ * Allocate a softc for this card and
+ * set it up for attachment by our
+ * common detect routine.
+ */
+ sprintf(buf, "ahd_pci:%d:%d:%d",
+ ahd_get_pci_bus(pci),
+ ahd_get_pci_slot(pci),
+ ahd_get_pci_function(pci));
+ name = kstrdup(buf, GFP_ATOMIC);
+ if (name == NULL)
+ return (-ENOMEM);
+ ahd = ahd_alloc(NULL, name);
+ if (ahd == NULL)
+ return (-ENOMEM);
+ if (pci_enable_device(pdev)) {
+ ahd_free(ahd);
+ return (-ENODEV);
+ }
+ pci_set_master(pdev);
+
+ if (sizeof(dma_addr_t) > 4) {
+ const u64 required_mask = dma_get_required_mask(dev);
+
+ if (required_mask > DMA_BIT_MASK(39) &&
+ dma_set_mask(dev, DMA_BIT_MASK(64)) == 0)
+ ahd->flags |= AHD_64BIT_ADDRESSING;
+ else if (required_mask > DMA_BIT_MASK(32) &&
+ dma_set_mask(dev, DMA_BIT_MASK(39)) == 0)
+ ahd->flags |= AHD_39BIT_ADDRESSING;
+ else
+ dma_set_mask(dev, DMA_BIT_MASK(32));
+ } else {
+ dma_set_mask(dev, DMA_BIT_MASK(32));
+ }
+ ahd->dev_softc = pci;
+ error = ahd_pci_config(ahd, entry);
+ if (error != 0) {
+ ahd_free(ahd);
+ return (-error);
+ }
+
+ /*
+ * Second Function PCI devices need to inherit some
+ * * settings from function 0.
+ */
+ if ((ahd->features & AHD_MULTI_FUNC) && PCI_FUNC(pdev->devfn) != 0)
+ ahd_linux_pci_inherit_flags(ahd);
+
+ pci_set_drvdata(pdev, ahd);
+
+ ahd_linux_register_host(ahd, &aic79xx_driver_template);
+ return (0);
+}
+
+static struct pci_driver aic79xx_pci_driver = {
+ .name = "aic79xx",
+ .probe = ahd_linux_pci_dev_probe,
+#ifdef CONFIG_PM
+ .suspend = ahd_linux_pci_dev_suspend,
+ .resume = ahd_linux_pci_dev_resume,
+#endif
+ .remove = ahd_linux_pci_dev_remove,
+ .id_table = ahd_linux_pci_id_table
+};
+
+int
+ahd_linux_pci_init(void)
+{
+ return pci_register_driver(&aic79xx_pci_driver);
+}
+
+void
+ahd_linux_pci_exit(void)
+{
+ pci_unregister_driver(&aic79xx_pci_driver);
+}
+
+static int
+ahd_linux_pci_reserve_io_regions(struct ahd_softc *ahd, resource_size_t *base,
+ resource_size_t *base2)
+{
+ *base = pci_resource_start(ahd->dev_softc, 0);
+ /*
+ * This is really the 3rd bar and should be at index 2,
+ * but the Linux PCI code doesn't know how to "count" 64bit
+ * bars.
+ */
+ *base2 = pci_resource_start(ahd->dev_softc, 3);
+ if (*base == 0 || *base2 == 0)
+ return (ENOMEM);
+ if (!request_region(*base, 256, "aic79xx"))
+ return (ENOMEM);
+ if (!request_region(*base2, 256, "aic79xx")) {
+ release_region(*base, 256);
+ return (ENOMEM);
+ }
+ return (0);
+}
+
+static int
+ahd_linux_pci_reserve_mem_region(struct ahd_softc *ahd,
+ resource_size_t *bus_addr,
+ uint8_t __iomem **maddr)
+{
+ resource_size_t start;
+ resource_size_t base_page;
+ u_long base_offset;
+ int error = 0;
+
+ if (aic79xx_allow_memio == 0)
+ return (ENOMEM);
+
+ if ((ahd->bugs & AHD_PCIX_MMAPIO_BUG) != 0)
+ return (ENOMEM);
+
+ start = pci_resource_start(ahd->dev_softc, 1);
+ base_page = start & PAGE_MASK;
+ base_offset = start - base_page;
+ if (start != 0) {
+ *bus_addr = start;
+ if (!request_mem_region(start, 0x1000, "aic79xx"))
+ error = ENOMEM;
+ if (!error) {
+ *maddr = ioremap_nocache(base_page, base_offset + 512);
+ if (*maddr == NULL) {
+ error = ENOMEM;
+ release_mem_region(start, 0x1000);
+ } else
+ *maddr += base_offset;
+ }
+ } else
+ error = ENOMEM;
+ return (error);
+}
+
+int
+ahd_pci_map_registers(struct ahd_softc *ahd)
+{
+ uint32_t command;
+ resource_size_t base;
+ uint8_t __iomem *maddr;
+ int error;
+
+ /*
+ * If its allowed, we prefer memory mapped access.
+ */
+ command = ahd_pci_read_config(ahd->dev_softc, PCIR_COMMAND, 4);
+ command &= ~(PCIM_CMD_PORTEN|PCIM_CMD_MEMEN);
+ base = 0;
+ maddr = NULL;
+ error = ahd_linux_pci_reserve_mem_region(ahd, &base, &maddr);
+ if (error == 0) {
+ ahd->platform_data->mem_busaddr = base;
+ ahd->tags[0] = BUS_SPACE_MEMIO;
+ ahd->bshs[0].maddr = maddr;
+ ahd->tags[1] = BUS_SPACE_MEMIO;
+ ahd->bshs[1].maddr = maddr + 0x100;
+ ahd_pci_write_config(ahd->dev_softc, PCIR_COMMAND,
+ command | PCIM_CMD_MEMEN, 4);
+
+ if (ahd_pci_test_register_access(ahd) != 0) {
+
+ printk("aic79xx: PCI Device %d:%d:%d "
+ "failed memory mapped test. Using PIO.\n",
+ ahd_get_pci_bus(ahd->dev_softc),
+ ahd_get_pci_slot(ahd->dev_softc),
+ ahd_get_pci_function(ahd->dev_softc));
+ iounmap(maddr);
+ release_mem_region(ahd->platform_data->mem_busaddr,
+ 0x1000);
+ ahd->bshs[0].maddr = NULL;
+ maddr = NULL;
+ } else
+ command |= PCIM_CMD_MEMEN;
+ } else if (bootverbose) {
+ printk("aic79xx: PCI%d:%d:%d MEM region 0x%llx "
+ "unavailable. Cannot memory map device.\n",
+ ahd_get_pci_bus(ahd->dev_softc),
+ ahd_get_pci_slot(ahd->dev_softc),
+ ahd_get_pci_function(ahd->dev_softc),
+ (unsigned long long)base);
+ }
+
+ if (maddr == NULL) {
+ resource_size_t base2;
+
+ error = ahd_linux_pci_reserve_io_regions(ahd, &base, &base2);
+ if (error == 0) {
+ ahd->tags[0] = BUS_SPACE_PIO;
+ ahd->tags[1] = BUS_SPACE_PIO;
+ ahd->bshs[0].ioport = (u_long)base;
+ ahd->bshs[1].ioport = (u_long)base2;
+ command |= PCIM_CMD_PORTEN;
+ } else {
+ printk("aic79xx: PCI%d:%d:%d IO regions 0x%llx and "
+ "0x%llx unavailable. Cannot map device.\n",
+ ahd_get_pci_bus(ahd->dev_softc),
+ ahd_get_pci_slot(ahd->dev_softc),
+ ahd_get_pci_function(ahd->dev_softc),
+ (unsigned long long)base,
+ (unsigned long long)base2);
+ }
+ }
+ ahd_pci_write_config(ahd->dev_softc, PCIR_COMMAND, command, 4);
+ return (error);
+}
+
+int
+ahd_pci_map_int(struct ahd_softc *ahd)
+{
+ int error;
+
+ error = request_irq(ahd->dev_softc->irq, ahd_linux_isr,
+ IRQF_SHARED, "aic79xx", ahd);
+ if (!error)
+ ahd->platform_data->irq = ahd->dev_softc->irq;
+
+ return (-error);
+}
+
+void
+ahd_power_state_change(struct ahd_softc *ahd, ahd_power_state new_state)
+{
+ pci_set_power_state(ahd->dev_softc, new_state);
+}
diff --git a/drivers/scsi/aic7xxx/aic79xx_pci.c b/drivers/scsi/aic7xxx/aic79xx_pci.c
new file mode 100644
index 000000000..cc9bd26f5
--- /dev/null
+++ b/drivers/scsi/aic7xxx/aic79xx_pci.c
@@ -0,0 +1,1014 @@
+/*
+ * Product specific probe and attach routines for:
+ * aic7901 and aic7902 SCSI controllers
+ *
+ * Copyright (c) 1994-2001 Justin T. Gibbs.
+ * Copyright (c) 2000-2002 Adaptec Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions, and the following disclaimer,
+ * without modification.
+ * 2. Redistributions in binary form must reproduce at minimum a disclaimer
+ * substantially similar to the "NO WARRANTY" disclaimer below
+ * ("Disclaimer") and any redistribution must be conditioned upon
+ * including a substantially similar Disclaimer requirement for further
+ * binary redistribution.
+ * 3. Neither the names of the above-listed copyright holders nor the names
+ * of any contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * Alternatively, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") version 2 as published by the Free
+ * Software Foundation.
+ *
+ * NO WARRANTY
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
+ * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
+ * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGES.
+ *
+ * $Id: //depot/aic7xxx/aic7xxx/aic79xx_pci.c#92 $
+ */
+
+#ifdef __linux__
+#include "aic79xx_osm.h"
+#include "aic79xx_inline.h"
+#else
+#include <dev/aic7xxx/aic79xx_osm.h>
+#include <dev/aic7xxx/aic79xx_inline.h>
+#endif
+
+#include "aic79xx_pci.h"
+
+static inline uint64_t
+ahd_compose_id(u_int device, u_int vendor, u_int subdevice, u_int subvendor)
+{
+ uint64_t id;
+
+ id = subvendor
+ | (subdevice << 16)
+ | ((uint64_t)vendor << 32)
+ | ((uint64_t)device << 48);
+
+ return (id);
+}
+
+#define ID_AIC7902_PCI_REV_A4 0x3
+#define ID_AIC7902_PCI_REV_B0 0x10
+#define SUBID_HP 0x0E11
+
+#define DEVID_9005_HOSTRAID(id) ((id) & 0x80)
+
+#define DEVID_9005_TYPE(id) ((id) & 0xF)
+#define DEVID_9005_TYPE_HBA 0x0 /* Standard Card */
+#define DEVID_9005_TYPE_HBA_2EXT 0x1 /* 2 External Ports */
+#define DEVID_9005_TYPE_IROC 0x8 /* Raid(0,1,10) Card */
+#define DEVID_9005_TYPE_MB 0xF /* On Motherboard */
+
+#define DEVID_9005_MFUNC(id) ((id) & 0x10)
+
+#define DEVID_9005_PACKETIZED(id) ((id) & 0x8000)
+
+#define SUBID_9005_TYPE(id) ((id) & 0xF)
+#define SUBID_9005_TYPE_HBA 0x0 /* Standard Card */
+#define SUBID_9005_TYPE_MB 0xF /* On Motherboard */
+
+#define SUBID_9005_AUTOTERM(id) (((id) & 0x10) == 0)
+
+#define SUBID_9005_LEGACYCONN_FUNC(id) ((id) & 0x20)
+
+#define SUBID_9005_SEEPTYPE(id) (((id) & 0x0C0) >> 6)
+#define SUBID_9005_SEEPTYPE_NONE 0x0
+#define SUBID_9005_SEEPTYPE_4K 0x1
+
+static ahd_device_setup_t ahd_aic7901_setup;
+static ahd_device_setup_t ahd_aic7901A_setup;
+static ahd_device_setup_t ahd_aic7902_setup;
+static ahd_device_setup_t ahd_aic790X_setup;
+
+static const struct ahd_pci_identity ahd_pci_ident_table[] =
+{
+ /* aic7901 based controllers */
+ {
+ ID_AHA_29320A,
+ ID_ALL_MASK,
+ "Adaptec 29320A Ultra320 SCSI adapter",
+ ahd_aic7901_setup
+ },
+ {
+ ID_AHA_29320ALP,
+ ID_ALL_MASK,
+ "Adaptec 29320ALP PCIx Ultra320 SCSI adapter",
+ ahd_aic7901_setup
+ },
+ {
+ ID_AHA_29320LPE,
+ ID_ALL_MASK,
+ "Adaptec 29320LPE PCIe Ultra320 SCSI adapter",
+ ahd_aic7901_setup
+ },
+ /* aic7901A based controllers */
+ {
+ ID_AHA_29320LP,
+ ID_ALL_MASK,
+ "Adaptec 29320LP Ultra320 SCSI adapter",
+ ahd_aic7901A_setup
+ },
+ /* aic7902 based controllers */
+ {
+ ID_AHA_29320,
+ ID_ALL_MASK,
+ "Adaptec 29320 Ultra320 SCSI adapter",
+ ahd_aic7902_setup
+ },
+ {
+ ID_AHA_29320B,
+ ID_ALL_MASK,
+ "Adaptec 29320B Ultra320 SCSI adapter",
+ ahd_aic7902_setup
+ },
+ {
+ ID_AHA_39320,
+ ID_ALL_MASK,
+ "Adaptec 39320 Ultra320 SCSI adapter",
+ ahd_aic7902_setup
+ },
+ {
+ ID_AHA_39320_B,
+ ID_ALL_MASK,
+ "Adaptec 39320 Ultra320 SCSI adapter",
+ ahd_aic7902_setup
+ },
+ {
+ ID_AHA_39320_B_DELL,
+ ID_ALL_MASK,
+ "Adaptec (Dell OEM) 39320 Ultra320 SCSI adapter",
+ ahd_aic7902_setup
+ },
+ {
+ ID_AHA_39320A,
+ ID_ALL_MASK,
+ "Adaptec 39320A Ultra320 SCSI adapter",
+ ahd_aic7902_setup
+ },
+ {
+ ID_AHA_39320D,
+ ID_ALL_MASK,
+ "Adaptec 39320D Ultra320 SCSI adapter",
+ ahd_aic7902_setup
+ },
+ {
+ ID_AHA_39320D_HP,
+ ID_ALL_MASK,
+ "Adaptec (HP OEM) 39320D Ultra320 SCSI adapter",
+ ahd_aic7902_setup
+ },
+ {
+ ID_AHA_39320D_B,
+ ID_ALL_MASK,
+ "Adaptec 39320D Ultra320 SCSI adapter",
+ ahd_aic7902_setup
+ },
+ {
+ ID_AHA_39320D_B_HP,
+ ID_ALL_MASK,
+ "Adaptec (HP OEM) 39320D Ultra320 SCSI adapter",
+ ahd_aic7902_setup
+ },
+ /* Generic chip probes for devices we don't know 'exactly' */
+ {
+ ID_AIC7901 & ID_9005_GENERIC_MASK,
+ ID_9005_GENERIC_MASK,
+ "Adaptec AIC7901 Ultra320 SCSI adapter",
+ ahd_aic7901_setup
+ },
+ {
+ ID_AIC7901A & ID_DEV_VENDOR_MASK,
+ ID_DEV_VENDOR_MASK,
+ "Adaptec AIC7901A Ultra320 SCSI adapter",
+ ahd_aic7901A_setup
+ },
+ {
+ ID_AIC7902 & ID_9005_GENERIC_MASK,
+ ID_9005_GENERIC_MASK,
+ "Adaptec AIC7902 Ultra320 SCSI adapter",
+ ahd_aic7902_setup
+ }
+};
+
+static const u_int ahd_num_pci_devs = ARRAY_SIZE(ahd_pci_ident_table);
+
+#define DEVCONFIG 0x40
+#define PCIXINITPAT 0x0000E000ul
+#define PCIXINIT_PCI33_66 0x0000E000ul
+#define PCIXINIT_PCIX50_66 0x0000C000ul
+#define PCIXINIT_PCIX66_100 0x0000A000ul
+#define PCIXINIT_PCIX100_133 0x00008000ul
+#define PCI_BUS_MODES_INDEX(devconfig) \
+ (((devconfig) & PCIXINITPAT) >> 13)
+static const char *pci_bus_modes[] =
+{
+ "PCI bus mode unknown",
+ "PCI bus mode unknown",
+ "PCI bus mode unknown",
+ "PCI bus mode unknown",
+ "PCI-X 101-133MHz",
+ "PCI-X 67-100MHz",
+ "PCI-X 50-66MHz",
+ "PCI 33 or 66MHz"
+};
+
+#define TESTMODE 0x00000800ul
+#define IRDY_RST 0x00000200ul
+#define FRAME_RST 0x00000100ul
+#define PCI64BIT 0x00000080ul
+#define MRDCEN 0x00000040ul
+#define ENDIANSEL 0x00000020ul
+#define MIXQWENDIANEN 0x00000008ul
+#define DACEN 0x00000004ul
+#define STPWLEVEL 0x00000002ul
+#define QWENDIANSEL 0x00000001ul
+
+#define DEVCONFIG1 0x44
+#define PREQDIS 0x01
+
+#define CSIZE_LATTIME 0x0c
+#define CACHESIZE 0x000000fful
+#define LATTIME 0x0000ff00ul
+
+static int ahd_check_extport(struct ahd_softc *ahd);
+static void ahd_configure_termination(struct ahd_softc *ahd,
+ u_int adapter_control);
+static void ahd_pci_split_intr(struct ahd_softc *ahd, u_int intstat);
+static void ahd_pci_intr(struct ahd_softc *ahd);
+
+const struct ahd_pci_identity *
+ahd_find_pci_device(ahd_dev_softc_t pci)
+{
+ uint64_t full_id;
+ uint16_t device;
+ uint16_t vendor;
+ uint16_t subdevice;
+ uint16_t subvendor;
+ const struct ahd_pci_identity *entry;
+ u_int i;
+
+ vendor = ahd_pci_read_config(pci, PCIR_DEVVENDOR, /*bytes*/2);
+ device = ahd_pci_read_config(pci, PCIR_DEVICE, /*bytes*/2);
+ subvendor = ahd_pci_read_config(pci, PCIR_SUBVEND_0, /*bytes*/2);
+ subdevice = ahd_pci_read_config(pci, PCIR_SUBDEV_0, /*bytes*/2);
+ full_id = ahd_compose_id(device,
+ vendor,
+ subdevice,
+ subvendor);
+
+ /*
+ * Controllers, mask out the IROC/HostRAID bit
+ */
+
+ full_id &= ID_ALL_IROC_MASK;
+
+ for (i = 0; i < ahd_num_pci_devs; i++) {
+ entry = &ahd_pci_ident_table[i];
+ if (entry->full_id == (full_id & entry->id_mask)) {
+ /* Honor exclusion entries. */
+ if (entry->name == NULL)
+ return (NULL);
+ return (entry);
+ }
+ }
+ return (NULL);
+}
+
+int
+ahd_pci_config(struct ahd_softc *ahd, const struct ahd_pci_identity *entry)
+{
+ struct scb_data *shared_scb_data;
+ u_int command;
+ uint32_t devconfig;
+ uint16_t subvendor;
+ int error;
+
+ shared_scb_data = NULL;
+ ahd->description = entry->name;
+ /*
+ * Record if this is an HP board.
+ */
+ subvendor = ahd_pci_read_config(ahd->dev_softc,
+ PCIR_SUBVEND_0, /*bytes*/2);
+ if (subvendor == SUBID_HP)
+ ahd->flags |= AHD_HP_BOARD;
+
+ error = entry->setup(ahd);
+ if (error != 0)
+ return (error);
+
+ devconfig = ahd_pci_read_config(ahd->dev_softc, DEVCONFIG, /*bytes*/4);
+ if ((devconfig & PCIXINITPAT) == PCIXINIT_PCI33_66) {
+ ahd->chip |= AHD_PCI;
+ /* Disable PCIX workarounds when running in PCI mode. */
+ ahd->bugs &= ~AHD_PCIX_BUG_MASK;
+ } else {
+ ahd->chip |= AHD_PCIX;
+ }
+ ahd->bus_description = pci_bus_modes[PCI_BUS_MODES_INDEX(devconfig)];
+
+ ahd_power_state_change(ahd, AHD_POWER_STATE_D0);
+
+ error = ahd_pci_map_registers(ahd);
+ if (error != 0)
+ return (error);
+
+ /*
+ * If we need to support high memory, enable dual
+ * address cycles. This bit must be set to enable
+ * high address bit generation even if we are on a
+ * 64bit bus (PCI64BIT set in devconfig).
+ */
+ if ((ahd->flags & (AHD_39BIT_ADDRESSING|AHD_64BIT_ADDRESSING)) != 0) {
+ if (bootverbose)
+ printk("%s: Enabling 39Bit Addressing\n",
+ ahd_name(ahd));
+ devconfig = ahd_pci_read_config(ahd->dev_softc,
+ DEVCONFIG, /*bytes*/4);
+ devconfig |= DACEN;
+ ahd_pci_write_config(ahd->dev_softc, DEVCONFIG,
+ devconfig, /*bytes*/4);
+ }
+
+ /* Ensure busmastering is enabled */
+ command = ahd_pci_read_config(ahd->dev_softc, PCIR_COMMAND, /*bytes*/2);
+ command |= PCIM_CMD_BUSMASTEREN;
+ ahd_pci_write_config(ahd->dev_softc, PCIR_COMMAND, command, /*bytes*/2);
+
+ error = ahd_softc_init(ahd);
+ if (error != 0)
+ return (error);
+
+ ahd->bus_intr = ahd_pci_intr;
+
+ error = ahd_reset(ahd, /*reinit*/FALSE);
+ if (error != 0)
+ return (ENXIO);
+
+ ahd->pci_cachesize =
+ ahd_pci_read_config(ahd->dev_softc, CSIZE_LATTIME,
+ /*bytes*/1) & CACHESIZE;
+ ahd->pci_cachesize *= 4;
+
+ ahd_set_modes(ahd, AHD_MODE_SCSI, AHD_MODE_SCSI);
+ /* See if we have a SEEPROM and perform auto-term */
+ error = ahd_check_extport(ahd);
+ if (error != 0)
+ return (error);
+
+ /* Core initialization */
+ error = ahd_init(ahd);
+ if (error != 0)
+ return (error);
+ ahd->init_level++;
+
+ /*
+ * Allow interrupts now that we are completely setup.
+ */
+ return ahd_pci_map_int(ahd);
+}
+
+#ifdef CONFIG_PM
+void
+ahd_pci_suspend(struct ahd_softc *ahd)
+{
+ /*
+ * Save chip register configuration data for chip resets
+ * that occur during runtime and resume events.
+ */
+ ahd->suspend_state.pci_state.devconfig =
+ ahd_pci_read_config(ahd->dev_softc, DEVCONFIG, /*bytes*/4);
+ ahd->suspend_state.pci_state.command =
+ ahd_pci_read_config(ahd->dev_softc, PCIR_COMMAND, /*bytes*/1);
+ ahd->suspend_state.pci_state.csize_lattime =
+ ahd_pci_read_config(ahd->dev_softc, CSIZE_LATTIME, /*bytes*/1);
+
+}
+
+void
+ahd_pci_resume(struct ahd_softc *ahd)
+{
+ ahd_pci_write_config(ahd->dev_softc, DEVCONFIG,
+ ahd->suspend_state.pci_state.devconfig, /*bytes*/4);
+ ahd_pci_write_config(ahd->dev_softc, PCIR_COMMAND,
+ ahd->suspend_state.pci_state.command, /*bytes*/1);
+ ahd_pci_write_config(ahd->dev_softc, CSIZE_LATTIME,
+ ahd->suspend_state.pci_state.csize_lattime, /*bytes*/1);
+}
+#endif
+
+/*
+ * Perform some simple tests that should catch situations where
+ * our registers are invalidly mapped.
+ */
+int
+ahd_pci_test_register_access(struct ahd_softc *ahd)
+{
+ uint32_t cmd;
+ u_int targpcistat;
+ u_int pci_status1;
+ int error;
+ uint8_t hcntrl;
+
+ error = EIO;
+
+ /*
+ * Enable PCI error interrupt status, but suppress NMIs
+ * generated by SERR raised due to target aborts.
+ */
+ cmd = ahd_pci_read_config(ahd->dev_softc, PCIR_COMMAND, /*bytes*/2);
+ ahd_pci_write_config(ahd->dev_softc, PCIR_COMMAND,
+ cmd & ~PCIM_CMD_SERRESPEN, /*bytes*/2);
+
+ /*
+ * First a simple test to see if any
+ * registers can be read. Reading
+ * HCNTRL has no side effects and has
+ * at least one bit that is guaranteed to
+ * be zero so it is a good register to
+ * use for this test.
+ */
+ hcntrl = ahd_inb(ahd, HCNTRL);
+ if (hcntrl == 0xFF)
+ goto fail;
+
+ /*
+ * Next create a situation where write combining
+ * or read prefetching could be initiated by the
+ * CPU or host bridge. Our device does not support
+ * either, so look for data corruption and/or flaged
+ * PCI errors. First pause without causing another
+ * chip reset.
+ */
+ hcntrl &= ~CHIPRST;
+ ahd_outb(ahd, HCNTRL, hcntrl|PAUSE);
+ while (ahd_is_paused(ahd) == 0)
+ ;
+
+ /* Clear any PCI errors that occurred before our driver attached. */
+ ahd_set_modes(ahd, AHD_MODE_CFG, AHD_MODE_CFG);
+ targpcistat = ahd_inb(ahd, TARGPCISTAT);
+ ahd_outb(ahd, TARGPCISTAT, targpcistat);
+ pci_status1 = ahd_pci_read_config(ahd->dev_softc,
+ PCIR_STATUS + 1, /*bytes*/1);
+ ahd_pci_write_config(ahd->dev_softc, PCIR_STATUS + 1,
+ pci_status1, /*bytes*/1);
+ ahd_set_modes(ahd, AHD_MODE_SCSI, AHD_MODE_SCSI);
+ ahd_outb(ahd, CLRINT, CLRPCIINT);
+
+ ahd_outb(ahd, SEQCTL0, PERRORDIS);
+ ahd_outl(ahd, SRAM_BASE, 0x5aa555aa);
+ if (ahd_inl(ahd, SRAM_BASE) != 0x5aa555aa)
+ goto fail;
+
+ if ((ahd_inb(ahd, INTSTAT) & PCIINT) != 0) {
+ ahd_set_modes(ahd, AHD_MODE_CFG, AHD_MODE_CFG);
+ targpcistat = ahd_inb(ahd, TARGPCISTAT);
+ if ((targpcistat & STA) != 0)
+ goto fail;
+ }
+
+ error = 0;
+
+fail:
+ if ((ahd_inb(ahd, INTSTAT) & PCIINT) != 0) {
+
+ ahd_set_modes(ahd, AHD_MODE_CFG, AHD_MODE_CFG);
+ targpcistat = ahd_inb(ahd, TARGPCISTAT);
+
+ /* Silently clear any latched errors. */
+ ahd_outb(ahd, TARGPCISTAT, targpcistat);
+ pci_status1 = ahd_pci_read_config(ahd->dev_softc,
+ PCIR_STATUS + 1, /*bytes*/1);
+ ahd_pci_write_config(ahd->dev_softc, PCIR_STATUS + 1,
+ pci_status1, /*bytes*/1);
+ ahd_outb(ahd, CLRINT, CLRPCIINT);
+ }
+ ahd_outb(ahd, SEQCTL0, PERRORDIS|FAILDIS);
+ ahd_pci_write_config(ahd->dev_softc, PCIR_COMMAND, cmd, /*bytes*/2);
+ return (error);
+}
+
+/*
+ * Check the external port logic for a serial eeprom
+ * and termination/cable detection contrls.
+ */
+static int
+ahd_check_extport(struct ahd_softc *ahd)
+{
+ struct vpd_config vpd;
+ struct seeprom_config *sc;
+ u_int adapter_control;
+ int have_seeprom;
+ int error;
+
+ sc = ahd->seep_config;
+ have_seeprom = ahd_acquire_seeprom(ahd);
+ if (have_seeprom) {
+ u_int start_addr;
+
+ /*
+ * Fetch VPD for this function and parse it.
+ */
+ if (bootverbose)
+ printk("%s: Reading VPD from SEEPROM...",
+ ahd_name(ahd));
+
+ /* Address is always in units of 16bit words */
+ start_addr = ((2 * sizeof(*sc))
+ + (sizeof(vpd) * (ahd->channel - 'A'))) / 2;
+
+ error = ahd_read_seeprom(ahd, (uint16_t *)&vpd,
+ start_addr, sizeof(vpd)/2,
+ /*bytestream*/TRUE);
+ if (error == 0)
+ error = ahd_parse_vpddata(ahd, &vpd);
+ if (bootverbose)
+ printk("%s: VPD parsing %s\n",
+ ahd_name(ahd),
+ error == 0 ? "successful" : "failed");
+
+ if (bootverbose)
+ printk("%s: Reading SEEPROM...", ahd_name(ahd));
+
+ /* Address is always in units of 16bit words */
+ start_addr = (sizeof(*sc) / 2) * (ahd->channel - 'A');
+
+ error = ahd_read_seeprom(ahd, (uint16_t *)sc,
+ start_addr, sizeof(*sc)/2,
+ /*bytestream*/FALSE);
+
+ if (error != 0) {
+ printk("Unable to read SEEPROM\n");
+ have_seeprom = 0;
+ } else {
+ have_seeprom = ahd_verify_cksum(sc);
+
+ if (bootverbose) {
+ if (have_seeprom == 0)
+ printk ("checksum error\n");
+ else
+ printk ("done.\n");
+ }
+ }
+ ahd_release_seeprom(ahd);
+ }
+
+ if (!have_seeprom) {
+ u_int nvram_scb;
+
+ /*
+ * Pull scratch ram settings and treat them as
+ * if they are the contents of an seeprom if
+ * the 'ADPT', 'BIOS', or 'ASPI' signature is found
+ * in SCB 0xFF. We manually compose the data as 16bit
+ * values to avoid endian issues.
+ */
+ ahd_set_scbptr(ahd, 0xFF);
+ nvram_scb = ahd_inb_scbram(ahd, SCB_BASE + NVRAM_SCB_OFFSET);
+ if (nvram_scb != 0xFF
+ && ((ahd_inb_scbram(ahd, SCB_BASE + 0) == 'A'
+ && ahd_inb_scbram(ahd, SCB_BASE + 1) == 'D'
+ && ahd_inb_scbram(ahd, SCB_BASE + 2) == 'P'
+ && ahd_inb_scbram(ahd, SCB_BASE + 3) == 'T')
+ || (ahd_inb_scbram(ahd, SCB_BASE + 0) == 'B'
+ && ahd_inb_scbram(ahd, SCB_BASE + 1) == 'I'
+ && ahd_inb_scbram(ahd, SCB_BASE + 2) == 'O'
+ && ahd_inb_scbram(ahd, SCB_BASE + 3) == 'S')
+ || (ahd_inb_scbram(ahd, SCB_BASE + 0) == 'A'
+ && ahd_inb_scbram(ahd, SCB_BASE + 1) == 'S'
+ && ahd_inb_scbram(ahd, SCB_BASE + 2) == 'P'
+ && ahd_inb_scbram(ahd, SCB_BASE + 3) == 'I'))) {
+ uint16_t *sc_data;
+ int i;
+
+ ahd_set_scbptr(ahd, nvram_scb);
+ sc_data = (uint16_t *)sc;
+ for (i = 0; i < 64; i += 2)
+ *sc_data++ = ahd_inw_scbram(ahd, SCB_BASE+i);
+ have_seeprom = ahd_verify_cksum(sc);
+ if (have_seeprom)
+ ahd->flags |= AHD_SCB_CONFIG_USED;
+ }
+ }
+
+#ifdef AHD_DEBUG
+ if (have_seeprom != 0
+ && (ahd_debug & AHD_DUMP_SEEPROM) != 0) {
+ uint16_t *sc_data;
+ int i;
+
+ printk("%s: Seeprom Contents:", ahd_name(ahd));
+ sc_data = (uint16_t *)sc;
+ for (i = 0; i < (sizeof(*sc)); i += 2)
+ printk("\n\t0x%.4x", sc_data[i]);
+ printk("\n");
+ }
+#endif
+
+ if (!have_seeprom) {
+ if (bootverbose)
+ printk("%s: No SEEPROM available.\n", ahd_name(ahd));
+ ahd->flags |= AHD_USEDEFAULTS;
+ error = ahd_default_config(ahd);
+ adapter_control = CFAUTOTERM|CFSEAUTOTERM;
+ kfree(ahd->seep_config);
+ ahd->seep_config = NULL;
+ } else {
+ error = ahd_parse_cfgdata(ahd, sc);
+ adapter_control = sc->adapter_control;
+ }
+ if (error != 0)
+ return (error);
+
+ ahd_configure_termination(ahd, adapter_control);
+
+ return (0);
+}
+
+static void
+ahd_configure_termination(struct ahd_softc *ahd, u_int adapter_control)
+{
+ int error;
+ u_int sxfrctl1;
+ uint8_t termctl;
+ uint32_t devconfig;
+
+ devconfig = ahd_pci_read_config(ahd->dev_softc, DEVCONFIG, /*bytes*/4);
+ devconfig &= ~STPWLEVEL;
+ if ((ahd->flags & AHD_STPWLEVEL_A) != 0)
+ devconfig |= STPWLEVEL;
+ if (bootverbose)
+ printk("%s: STPWLEVEL is %s\n",
+ ahd_name(ahd), (devconfig & STPWLEVEL) ? "on" : "off");
+ ahd_pci_write_config(ahd->dev_softc, DEVCONFIG, devconfig, /*bytes*/4);
+
+ /* Make sure current sensing is off. */
+ if ((ahd->flags & AHD_CURRENT_SENSING) != 0) {
+ (void)ahd_write_flexport(ahd, FLXADDR_ROMSTAT_CURSENSECTL, 0);
+ }
+
+ /*
+ * Read to sense. Write to set.
+ */
+ error = ahd_read_flexport(ahd, FLXADDR_TERMCTL, &termctl);
+ if ((adapter_control & CFAUTOTERM) == 0) {
+ if (bootverbose)
+ printk("%s: Manual Primary Termination\n",
+ ahd_name(ahd));
+ termctl &= ~(FLX_TERMCTL_ENPRILOW|FLX_TERMCTL_ENPRIHIGH);
+ if ((adapter_control & CFSTERM) != 0)
+ termctl |= FLX_TERMCTL_ENPRILOW;
+ if ((adapter_control & CFWSTERM) != 0)
+ termctl |= FLX_TERMCTL_ENPRIHIGH;
+ } else if (error != 0) {
+ printk("%s: Primary Auto-Term Sensing failed! "
+ "Using Defaults.\n", ahd_name(ahd));
+ termctl = FLX_TERMCTL_ENPRILOW|FLX_TERMCTL_ENPRIHIGH;
+ }
+
+ if ((adapter_control & CFSEAUTOTERM) == 0) {
+ if (bootverbose)
+ printk("%s: Manual Secondary Termination\n",
+ ahd_name(ahd));
+ termctl &= ~(FLX_TERMCTL_ENSECLOW|FLX_TERMCTL_ENSECHIGH);
+ if ((adapter_control & CFSELOWTERM) != 0)
+ termctl |= FLX_TERMCTL_ENSECLOW;
+ if ((adapter_control & CFSEHIGHTERM) != 0)
+ termctl |= FLX_TERMCTL_ENSECHIGH;
+ } else if (error != 0) {
+ printk("%s: Secondary Auto-Term Sensing failed! "
+ "Using Defaults.\n", ahd_name(ahd));
+ termctl |= FLX_TERMCTL_ENSECLOW|FLX_TERMCTL_ENSECHIGH;
+ }
+
+ /*
+ * Now set the termination based on what we found.
+ */
+ sxfrctl1 = ahd_inb(ahd, SXFRCTL1) & ~STPWEN;
+ ahd->flags &= ~AHD_TERM_ENB_A;
+ if ((termctl & FLX_TERMCTL_ENPRILOW) != 0) {
+ ahd->flags |= AHD_TERM_ENB_A;
+ sxfrctl1 |= STPWEN;
+ }
+ /* Must set the latch once in order to be effective. */
+ ahd_outb(ahd, SXFRCTL1, sxfrctl1|STPWEN);
+ ahd_outb(ahd, SXFRCTL1, sxfrctl1);
+
+ error = ahd_write_flexport(ahd, FLXADDR_TERMCTL, termctl);
+ if (error != 0) {
+ printk("%s: Unable to set termination settings!\n",
+ ahd_name(ahd));
+ } else if (bootverbose) {
+ printk("%s: Primary High byte termination %sabled\n",
+ ahd_name(ahd),
+ (termctl & FLX_TERMCTL_ENPRIHIGH) ? "En" : "Dis");
+
+ printk("%s: Primary Low byte termination %sabled\n",
+ ahd_name(ahd),
+ (termctl & FLX_TERMCTL_ENPRILOW) ? "En" : "Dis");
+
+ printk("%s: Secondary High byte termination %sabled\n",
+ ahd_name(ahd),
+ (termctl & FLX_TERMCTL_ENSECHIGH) ? "En" : "Dis");
+
+ printk("%s: Secondary Low byte termination %sabled\n",
+ ahd_name(ahd),
+ (termctl & FLX_TERMCTL_ENSECLOW) ? "En" : "Dis");
+ }
+ return;
+}
+
+#define DPE 0x80
+#define SSE 0x40
+#define RMA 0x20
+#define RTA 0x10
+#define STA 0x08
+#define DPR 0x01
+
+static const char *split_status_source[] =
+{
+ "DFF0",
+ "DFF1",
+ "OVLY",
+ "CMC",
+};
+
+static const char *pci_status_source[] =
+{
+ "DFF0",
+ "DFF1",
+ "SG",
+ "CMC",
+ "OVLY",
+ "NONE",
+ "MSI",
+ "TARG"
+};
+
+static const char *split_status_strings[] =
+{
+ "%s: Received split response in %s.\n",
+ "%s: Received split completion error message in %s\n",
+ "%s: Receive overrun in %s\n",
+ "%s: Count not complete in %s\n",
+ "%s: Split completion data bucket in %s\n",
+ "%s: Split completion address error in %s\n",
+ "%s: Split completion byte count error in %s\n",
+ "%s: Signaled Target-abort to early terminate a split in %s\n"
+};
+
+static const char *pci_status_strings[] =
+{
+ "%s: Data Parity Error has been reported via PERR# in %s\n",
+ "%s: Target initial wait state error in %s\n",
+ "%s: Split completion read data parity error in %s\n",
+ "%s: Split completion address attribute parity error in %s\n",
+ "%s: Received a Target Abort in %s\n",
+ "%s: Received a Master Abort in %s\n",
+ "%s: Signal System Error Detected in %s\n",
+ "%s: Address or Write Phase Parity Error Detected in %s.\n"
+};
+
+static void
+ahd_pci_intr(struct ahd_softc *ahd)
+{
+ uint8_t pci_status[8];
+ ahd_mode_state saved_modes;
+ u_int pci_status1;
+ u_int intstat;
+ u_int i;
+ u_int reg;
+
+ intstat = ahd_inb(ahd, INTSTAT);
+
+ if ((intstat & SPLTINT) != 0)
+ ahd_pci_split_intr(ahd, intstat);
+
+ if ((intstat & PCIINT) == 0)
+ return;
+
+ printk("%s: PCI error Interrupt\n", ahd_name(ahd));
+ saved_modes = ahd_save_modes(ahd);
+ ahd_dump_card_state(ahd);
+ ahd_set_modes(ahd, AHD_MODE_CFG, AHD_MODE_CFG);
+ for (i = 0, reg = DF0PCISTAT; i < 8; i++, reg++) {
+
+ if (i == 5)
+ continue;
+ pci_status[i] = ahd_inb(ahd, reg);
+ /* Clear latched errors. So our interrupt deasserts. */
+ ahd_outb(ahd, reg, pci_status[i]);
+ }
+
+ for (i = 0; i < 8; i++) {
+ u_int bit;
+
+ if (i == 5)
+ continue;
+
+ for (bit = 0; bit < 8; bit++) {
+
+ if ((pci_status[i] & (0x1 << bit)) != 0) {
+ const char *s;
+
+ s = pci_status_strings[bit];
+ if (i == 7/*TARG*/ && bit == 3)
+ s = "%s: Signaled Target Abort\n";
+ printk(s, ahd_name(ahd), pci_status_source[i]);
+ }
+ }
+ }
+ pci_status1 = ahd_pci_read_config(ahd->dev_softc,
+ PCIR_STATUS + 1, /*bytes*/1);
+ ahd_pci_write_config(ahd->dev_softc, PCIR_STATUS + 1,
+ pci_status1, /*bytes*/1);
+ ahd_restore_modes(ahd, saved_modes);
+ ahd_outb(ahd, CLRINT, CLRPCIINT);
+ ahd_unpause(ahd);
+}
+
+static void
+ahd_pci_split_intr(struct ahd_softc *ahd, u_int intstat)
+{
+ uint8_t split_status[4];
+ uint8_t split_status1[4];
+ uint8_t sg_split_status[2];
+ uint8_t sg_split_status1[2];
+ ahd_mode_state saved_modes;
+ u_int i;
+ uint16_t pcix_status;
+
+ /*
+ * Check for splits in all modes. Modes 0 and 1
+ * additionally have SG engine splits to look at.
+ */
+ pcix_status = ahd_pci_read_config(ahd->dev_softc, PCIXR_STATUS,
+ /*bytes*/2);
+ printk("%s: PCI Split Interrupt - PCI-X status = 0x%x\n",
+ ahd_name(ahd), pcix_status);
+ saved_modes = ahd_save_modes(ahd);
+ for (i = 0; i < 4; i++) {
+ ahd_set_modes(ahd, i, i);
+
+ split_status[i] = ahd_inb(ahd, DCHSPLTSTAT0);
+ split_status1[i] = ahd_inb(ahd, DCHSPLTSTAT1);
+ /* Clear latched errors. So our interrupt deasserts. */
+ ahd_outb(ahd, DCHSPLTSTAT0, split_status[i]);
+ ahd_outb(ahd, DCHSPLTSTAT1, split_status1[i]);
+ if (i > 1)
+ continue;
+ sg_split_status[i] = ahd_inb(ahd, SGSPLTSTAT0);
+ sg_split_status1[i] = ahd_inb(ahd, SGSPLTSTAT1);
+ /* Clear latched errors. So our interrupt deasserts. */
+ ahd_outb(ahd, SGSPLTSTAT0, sg_split_status[i]);
+ ahd_outb(ahd, SGSPLTSTAT1, sg_split_status1[i]);
+ }
+
+ for (i = 0; i < 4; i++) {
+ u_int bit;
+
+ for (bit = 0; bit < 8; bit++) {
+
+ if ((split_status[i] & (0x1 << bit)) != 0)
+ printk(split_status_strings[bit], ahd_name(ahd),
+ split_status_source[i]);
+
+ if (i > 1)
+ continue;
+
+ if ((sg_split_status[i] & (0x1 << bit)) != 0)
+ printk(split_status_strings[bit], ahd_name(ahd), "SG");
+ }
+ }
+ /*
+ * Clear PCI-X status bits.
+ */
+ ahd_pci_write_config(ahd->dev_softc, PCIXR_STATUS,
+ pcix_status, /*bytes*/2);
+ ahd_outb(ahd, CLRINT, CLRSPLTINT);
+ ahd_restore_modes(ahd, saved_modes);
+}
+
+static int
+ahd_aic7901_setup(struct ahd_softc *ahd)
+{
+
+ ahd->chip = AHD_AIC7901;
+ ahd->features = AHD_AIC7901_FE;
+ return (ahd_aic790X_setup(ahd));
+}
+
+static int
+ahd_aic7901A_setup(struct ahd_softc *ahd)
+{
+
+ ahd->chip = AHD_AIC7901A;
+ ahd->features = AHD_AIC7901A_FE;
+ return (ahd_aic790X_setup(ahd));
+}
+
+static int
+ahd_aic7902_setup(struct ahd_softc *ahd)
+{
+ ahd->chip = AHD_AIC7902;
+ ahd->features = AHD_AIC7902_FE;
+ return (ahd_aic790X_setup(ahd));
+}
+
+static int
+ahd_aic790X_setup(struct ahd_softc *ahd)
+{
+ ahd_dev_softc_t pci;
+ u_int rev;
+
+ pci = ahd->dev_softc;
+ rev = ahd_pci_read_config(pci, PCIR_REVID, /*bytes*/1);
+ if (rev < ID_AIC7902_PCI_REV_A4) {
+ printk("%s: Unable to attach to unsupported chip revision %d\n",
+ ahd_name(ahd), rev);
+ ahd_pci_write_config(pci, PCIR_COMMAND, 0, /*bytes*/2);
+ return (ENXIO);
+ }
+ ahd->channel = ahd_get_pci_function(pci) + 'A';
+ if (rev < ID_AIC7902_PCI_REV_B0) {
+ /*
+ * Enable A series workarounds.
+ */
+ ahd->bugs |= AHD_SENT_SCB_UPDATE_BUG|AHD_ABORT_LQI_BUG
+ | AHD_PKT_BITBUCKET_BUG|AHD_LONG_SETIMO_BUG
+ | AHD_NLQICRC_DELAYED_BUG|AHD_SCSIRST_BUG
+ | AHD_LQO_ATNO_BUG|AHD_AUTOFLUSH_BUG
+ | AHD_CLRLQO_AUTOCLR_BUG|AHD_PCIX_MMAPIO_BUG
+ | AHD_PCIX_CHIPRST_BUG|AHD_PCIX_SCBRAM_RD_BUG
+ | AHD_PKTIZED_STATUS_BUG|AHD_PKT_LUN_BUG
+ | AHD_MDFF_WSCBPTR_BUG|AHD_REG_SLOW_SETTLE_BUG
+ | AHD_SET_MODE_BUG|AHD_BUSFREEREV_BUG
+ | AHD_NONPACKFIFO_BUG|AHD_PACED_NEGTABLE_BUG
+ | AHD_FAINT_LED_BUG;
+
+ /*
+ * IO Cell parameter setup.
+ */
+ AHD_SET_PRECOMP(ahd, AHD_PRECOMP_CUTBACK_29);
+
+ if ((ahd->flags & AHD_HP_BOARD) == 0)
+ AHD_SET_SLEWRATE(ahd, AHD_SLEWRATE_DEF_REVA);
+ } else {
+ /* This is revision B and newer. */
+ extern uint32_t aic79xx_slowcrc;
+ u_int devconfig1;
+
+ ahd->features |= AHD_RTI|AHD_NEW_IOCELL_OPTS
+ | AHD_NEW_DFCNTRL_OPTS|AHD_FAST_CDB_DELIVERY
+ | AHD_BUSFREEREV_BUG;
+ ahd->bugs |= AHD_LQOOVERRUN_BUG|AHD_EARLY_REQ_BUG;
+
+ /* If the user requested that the SLOWCRC bit to be set. */
+ if (aic79xx_slowcrc)
+ ahd->features |= AHD_AIC79XXB_SLOWCRC;
+
+ /*
+ * Some issues have been resolved in the 7901B.
+ */
+ if ((ahd->features & AHD_MULTI_FUNC) != 0)
+ ahd->bugs |= AHD_INTCOLLISION_BUG|AHD_ABORT_LQI_BUG;
+
+ /*
+ * IO Cell parameter setup.
+ */
+ AHD_SET_PRECOMP(ahd, AHD_PRECOMP_CUTBACK_29);
+ AHD_SET_SLEWRATE(ahd, AHD_SLEWRATE_DEF_REVB);
+ AHD_SET_AMPLITUDE(ahd, AHD_AMPLITUDE_DEF);
+
+ /*
+ * Set the PREQDIS bit for H2B which disables some workaround
+ * that doesn't work on regular PCI busses.
+ * XXX - Find out exactly what this does from the hardware
+ * folks!
+ */
+ devconfig1 = ahd_pci_read_config(pci, DEVCONFIG1, /*bytes*/1);
+ ahd_pci_write_config(pci, DEVCONFIG1,
+ devconfig1|PREQDIS, /*bytes*/1);
+ devconfig1 = ahd_pci_read_config(pci, DEVCONFIG1, /*bytes*/1);
+ }
+
+ return (0);
+}
diff --git a/drivers/scsi/aic7xxx/aic79xx_pci.h b/drivers/scsi/aic7xxx/aic79xx_pci.h
new file mode 100644
index 000000000..16b7c70a6
--- /dev/null
+++ b/drivers/scsi/aic7xxx/aic79xx_pci.h
@@ -0,0 +1,72 @@
+/*
+ * Adaptec AIC79xx device driver for Linux.
+ *
+ * Copyright (c) 2000-2001 Adaptec Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions, and the following disclaimer,
+ * without modification.
+ * 2. Redistributions in binary form must reproduce at minimum a disclaimer
+ * substantially similar to the "NO WARRANTY" disclaimer below
+ * ("Disclaimer") and any redistribution must be conditioned upon
+ * including a substantially similar Disclaimer requirement for further
+ * binary redistribution.
+ * 3. Neither the names of the above-listed copyright holders nor the names
+ * of any contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * Alternatively, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") version 2 as published by the Free
+ * Software Foundation.
+ *
+ * NO WARRANTY
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
+ * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
+ * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGES.
+ *
+ * $Id$
+ *
+ */
+#ifndef _AIC79XX_PCI_H_
+#define _AIC79XX_PCI_H_
+
+#define ID_ALL_MASK 0xFFFFFFFFFFFFFFFFull
+#define ID_ALL_IROC_MASK 0xFF7FFFFFFFFFFFFFull
+#define ID_DEV_VENDOR_MASK 0xFFFFFFFF00000000ull
+#define ID_9005_GENERIC_MASK 0xFFF0FFFF00000000ull
+#define ID_9005_GENERIC_IROC_MASK 0xFF70FFFF00000000ull
+
+#define ID_AIC7901 0x800F9005FFFF9005ull
+#define ID_AHA_29320A 0x8000900500609005ull
+#define ID_AHA_29320ALP 0x8017900500449005ull
+#define ID_AHA_29320LPE 0x8017900500459005ull
+
+#define ID_AIC7901A 0x801E9005FFFF9005ull
+#define ID_AHA_29320LP 0x8014900500449005ull
+
+#define ID_AIC7902 0x801F9005FFFF9005ull
+#define ID_AIC7902_B 0x801D9005FFFF9005ull
+#define ID_AHA_39320 0x8010900500409005ull
+#define ID_AHA_29320 0x8012900500429005ull
+#define ID_AHA_29320B 0x8013900500439005ull
+#define ID_AHA_39320_B 0x8015900500409005ull
+#define ID_AHA_39320_B_DELL 0x8015900501681028ull
+#define ID_AHA_39320A 0x8016900500409005ull
+#define ID_AHA_39320D 0x8011900500419005ull
+#define ID_AHA_39320D_B 0x801C900500419005ull
+#define ID_AHA_39320D_HP 0x8011900500AC0E11ull
+#define ID_AHA_39320D_B_HP 0x801C900500AC0E11ull
+
+#endif /* _AIC79XX_PCI_H_ */
diff --git a/drivers/scsi/aic7xxx/aic79xx_proc.c b/drivers/scsi/aic7xxx/aic79xx_proc.c
new file mode 100644
index 000000000..add2da581
--- /dev/null
+++ b/drivers/scsi/aic7xxx/aic79xx_proc.c
@@ -0,0 +1,315 @@
+/*
+ * Copyright (c) 2000-2001 Adaptec Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions, and the following disclaimer,
+ * without modification.
+ * 2. Redistributions in binary form must reproduce at minimum a disclaimer
+ * substantially similar to the "NO WARRANTY" disclaimer below
+ * ("Disclaimer") and any redistribution must be conditioned upon
+ * including a substantially similar Disclaimer requirement for further
+ * binary redistribution.
+ * 3. Neither the names of the above-listed copyright holders nor the names
+ * of any contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * Alternatively, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") version 2 as published by the Free
+ * Software Foundation.
+ *
+ * NO WARRANTY
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
+ * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
+ * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGES.
+ *
+ * String handling code courtesy of Gerard Roudier's <groudier@club-internet.fr>
+ * sym driver.
+ *
+ * $Id: //depot/aic7xxx/linux/drivers/scsi/aic7xxx/aic79xx_proc.c#19 $
+ */
+#include "aic79xx_osm.h"
+#include "aic79xx_inline.h"
+
+static void ahd_dump_target_state(struct ahd_softc *ahd,
+ struct seq_file *m,
+ u_int our_id, char channel,
+ u_int target_id);
+static void ahd_dump_device_state(struct seq_file *m,
+ struct scsi_device *sdev);
+
+/*
+ * Table of syncrates that don't follow the "divisible by 4"
+ * rule. This table will be expanded in future SCSI specs.
+ */
+static const struct {
+ u_int period_factor;
+ u_int period; /* in 100ths of ns */
+} scsi_syncrates[] = {
+ { 0x08, 625 }, /* FAST-160 */
+ { 0x09, 1250 }, /* FAST-80 */
+ { 0x0a, 2500 }, /* FAST-40 40MHz */
+ { 0x0b, 3030 }, /* FAST-40 33MHz */
+ { 0x0c, 5000 } /* FAST-20 */
+};
+
+/*
+ * Return the frequency in kHz corresponding to the given
+ * sync period factor.
+ */
+static u_int
+ahd_calc_syncsrate(u_int period_factor)
+{
+ int i;
+
+ /* See if the period is in the "exception" table */
+ for (i = 0; i < ARRAY_SIZE(scsi_syncrates); i++) {
+
+ if (period_factor == scsi_syncrates[i].period_factor) {
+ /* Period in kHz */
+ return (100000000 / scsi_syncrates[i].period);
+ }
+ }
+
+ /*
+ * Wasn't in the table, so use the standard
+ * 4 times conversion.
+ */
+ return (10000000 / (period_factor * 4 * 10));
+}
+
+static void
+ahd_format_transinfo(struct seq_file *m, struct ahd_transinfo *tinfo)
+{
+ u_int speed;
+ u_int freq;
+ u_int mb;
+
+ if (tinfo->period == AHD_PERIOD_UNKNOWN) {
+ seq_puts(m, "Renegotiation Pending\n");
+ return;
+ }
+ speed = 3300;
+ freq = 0;
+ if (tinfo->offset != 0) {
+ freq = ahd_calc_syncsrate(tinfo->period);
+ speed = freq;
+ }
+ speed *= (0x01 << tinfo->width);
+ mb = speed / 1000;
+ if (mb > 0)
+ seq_printf(m, "%d.%03dMB/s transfers", mb, speed % 1000);
+ else
+ seq_printf(m, "%dKB/s transfers", speed);
+
+ if (freq != 0) {
+ int printed_options;
+
+ printed_options = 0;
+ seq_printf(m, " (%d.%03dMHz", freq / 1000, freq % 1000);
+ if ((tinfo->ppr_options & MSG_EXT_PPR_RD_STRM) != 0) {
+ seq_puts(m, " RDSTRM");
+ printed_options++;
+ }
+ if ((tinfo->ppr_options & MSG_EXT_PPR_DT_REQ) != 0) {
+ seq_puts(m, printed_options ? "|DT" : " DT");
+ printed_options++;
+ }
+ if ((tinfo->ppr_options & MSG_EXT_PPR_IU_REQ) != 0) {
+ seq_puts(m, printed_options ? "|IU" : " IU");
+ printed_options++;
+ }
+ if ((tinfo->ppr_options & MSG_EXT_PPR_RTI) != 0) {
+ seq_puts(m, printed_options ? "|RTI" : " RTI");
+ printed_options++;
+ }
+ if ((tinfo->ppr_options & MSG_EXT_PPR_QAS_REQ) != 0) {
+ seq_puts(m, printed_options ? "|QAS" : " QAS");
+ printed_options++;
+ }
+ }
+
+ if (tinfo->width > 0) {
+ if (freq != 0) {
+ seq_puts(m, ", ");
+ } else {
+ seq_puts(m, " (");
+ }
+ seq_printf(m, "%dbit)", 8 * (0x01 << tinfo->width));
+ } else if (freq != 0) {
+ seq_putc(m, ')');
+ }
+ seq_putc(m, '\n');
+}
+
+static void
+ahd_dump_target_state(struct ahd_softc *ahd, struct seq_file *m,
+ u_int our_id, char channel, u_int target_id)
+{
+ struct scsi_target *starget;
+ struct ahd_initiator_tinfo *tinfo;
+ struct ahd_tmode_tstate *tstate;
+ int lun;
+
+ tinfo = ahd_fetch_transinfo(ahd, channel, our_id,
+ target_id, &tstate);
+ seq_printf(m, "Target %d Negotiation Settings\n", target_id);
+ seq_puts(m, "\tUser: ");
+ ahd_format_transinfo(m, &tinfo->user);
+ starget = ahd->platform_data->starget[target_id];
+ if (starget == NULL)
+ return;
+
+ seq_puts(m, "\tGoal: ");
+ ahd_format_transinfo(m, &tinfo->goal);
+ seq_puts(m, "\tCurr: ");
+ ahd_format_transinfo(m, &tinfo->curr);
+
+ for (lun = 0; lun < AHD_NUM_LUNS; lun++) {
+ struct scsi_device *dev;
+
+ dev = scsi_device_lookup_by_target(starget, lun);
+
+ if (dev == NULL)
+ continue;
+
+ ahd_dump_device_state(m, dev);
+ }
+}
+
+static void
+ahd_dump_device_state(struct seq_file *m, struct scsi_device *sdev)
+{
+ struct ahd_linux_device *dev = scsi_transport_device_data(sdev);
+
+ seq_printf(m, "\tChannel %c Target %d Lun %d Settings\n",
+ sdev->sdev_target->channel + 'A',
+ sdev->sdev_target->id, (u8)sdev->lun);
+
+ seq_printf(m, "\t\tCommands Queued %ld\n", dev->commands_issued);
+ seq_printf(m, "\t\tCommands Active %d\n", dev->active);
+ seq_printf(m, "\t\tCommand Openings %d\n", dev->openings);
+ seq_printf(m, "\t\tMax Tagged Openings %d\n", dev->maxtags);
+ seq_printf(m, "\t\tDevice Queue Frozen Count %d\n", dev->qfrozen);
+}
+
+int
+ahd_proc_write_seeprom(struct Scsi_Host *shost, char *buffer, int length)
+{
+ struct ahd_softc *ahd = *(struct ahd_softc **)shost->hostdata;
+ ahd_mode_state saved_modes;
+ int have_seeprom;
+ u_long s;
+ int paused;
+ int written;
+
+ /* Default to failure. */
+ written = -EINVAL;
+ ahd_lock(ahd, &s);
+ paused = ahd_is_paused(ahd);
+ if (!paused)
+ ahd_pause(ahd);
+
+ saved_modes = ahd_save_modes(ahd);
+ ahd_set_modes(ahd, AHD_MODE_SCSI, AHD_MODE_SCSI);
+ if (length != sizeof(struct seeprom_config)) {
+ printk("ahd_proc_write_seeprom: incorrect buffer size\n");
+ goto done;
+ }
+
+ have_seeprom = ahd_verify_cksum((struct seeprom_config*)buffer);
+ if (have_seeprom == 0) {
+ printk("ahd_proc_write_seeprom: cksum verification failed\n");
+ goto done;
+ }
+
+ have_seeprom = ahd_acquire_seeprom(ahd);
+ if (!have_seeprom) {
+ printk("ahd_proc_write_seeprom: No Serial EEPROM\n");
+ goto done;
+ } else {
+ u_int start_addr;
+
+ if (ahd->seep_config == NULL) {
+ ahd->seep_config = kmalloc(sizeof(*ahd->seep_config), GFP_ATOMIC);
+ if (ahd->seep_config == NULL) {
+ printk("aic79xx: Unable to allocate serial "
+ "eeprom buffer. Write failing\n");
+ goto done;
+ }
+ }
+ printk("aic79xx: Writing Serial EEPROM\n");
+ start_addr = 32 * (ahd->channel - 'A');
+ ahd_write_seeprom(ahd, (u_int16_t *)buffer, start_addr,
+ sizeof(struct seeprom_config)/2);
+ ahd_read_seeprom(ahd, (uint16_t *)ahd->seep_config,
+ start_addr, sizeof(struct seeprom_config)/2,
+ /*ByteStream*/FALSE);
+ ahd_release_seeprom(ahd);
+ written = length;
+ }
+
+done:
+ ahd_restore_modes(ahd, saved_modes);
+ if (!paused)
+ ahd_unpause(ahd);
+ ahd_unlock(ahd, &s);
+ return (written);
+}
+/*
+ * Return information to handle /proc support for the driver.
+ */
+int
+ahd_linux_show_info(struct seq_file *m, struct Scsi_Host *shost)
+{
+ struct ahd_softc *ahd = *(struct ahd_softc **)shost->hostdata;
+ char ahd_info[256];
+ u_int max_targ;
+ u_int i;
+
+ seq_printf(m, "Adaptec AIC79xx driver version: %s\n",
+ AIC79XX_DRIVER_VERSION);
+ seq_printf(m, "%s\n", ahd->description);
+ ahd_controller_info(ahd, ahd_info);
+ seq_printf(m, "%s\n", ahd_info);
+ seq_printf(m, "Allocated SCBs: %d, SG List Length: %d\n\n",
+ ahd->scb_data.numscbs, AHD_NSEG);
+
+ max_targ = 16;
+
+ if (ahd->seep_config == NULL)
+ seq_puts(m, "No Serial EEPROM\n");
+ else {
+ seq_puts(m, "Serial EEPROM:\n");
+ for (i = 0; i < sizeof(*ahd->seep_config)/2; i++) {
+ if (((i % 8) == 0) && (i != 0)) {
+ seq_putc(m, '\n');
+ }
+ seq_printf(m, "0x%.4x ",
+ ((uint16_t*)ahd->seep_config)[i]);
+ }
+ seq_putc(m, '\n');
+ }
+ seq_putc(m, '\n');
+
+ if ((ahd->features & AHD_WIDE) == 0)
+ max_targ = 8;
+
+ for (i = 0; i < max_targ; i++) {
+
+ ahd_dump_target_state(ahd, m, ahd->our_id, 'A',
+ /*target_id*/i);
+ }
+ return 0;
+}
diff --git a/drivers/scsi/aic7xxx/aic79xx_reg.h_shipped b/drivers/scsi/aic7xxx/aic79xx_reg.h_shipped
new file mode 100644
index 000000000..cdcead071
--- /dev/null
+++ b/drivers/scsi/aic7xxx/aic79xx_reg.h_shipped
@@ -0,0 +1,2685 @@
+/*
+ * DO NOT EDIT - This file is automatically generated
+ * from the following source files:
+ *
+ * $Id: //depot/aic7xxx/aic7xxx/aic79xx.seq#120 $
+ * $Id: //depot/aic7xxx/aic7xxx/aic79xx.reg#77 $
+ */
+typedef int (ahd_reg_print_t)(u_int, u_int *, u_int);
+typedef struct ahd_reg_parse_entry {
+ char *name;
+ uint8_t value;
+ uint8_t mask;
+} ahd_reg_parse_entry_t;
+
+#if AIC_DEBUG_REGISTERS
+ahd_reg_print_t ahd_mode_ptr_print;
+#else
+#define ahd_mode_ptr_print(regvalue, cur_col, wrap) \
+ ahd_print_register(NULL, 0, "MODE_PTR", 0x00, regvalue, cur_col, wrap)
+#endif
+
+#if AIC_DEBUG_REGISTERS
+ahd_reg_print_t ahd_intstat_print;
+#else
+#define ahd_intstat_print(regvalue, cur_col, wrap) \
+ ahd_print_register(NULL, 0, "INTSTAT", 0x01, regvalue, cur_col, wrap)
+#endif
+
+#if AIC_DEBUG_REGISTERS
+ahd_reg_print_t ahd_seqintcode_print;
+#else
+#define ahd_seqintcode_print(regvalue, cur_col, wrap) \
+ ahd_print_register(NULL, 0, "SEQINTCODE", 0x02, regvalue, cur_col, wrap)
+#endif
+
+#if AIC_DEBUG_REGISTERS
+ahd_reg_print_t ahd_error_print;
+#else
+#define ahd_error_print(regvalue, cur_col, wrap) \
+ ahd_print_register(NULL, 0, "ERROR", 0x04, regvalue, cur_col, wrap)
+#endif
+
+#if AIC_DEBUG_REGISTERS
+ahd_reg_print_t ahd_hescb_qoff_print;
+#else
+#define ahd_hescb_qoff_print(regvalue, cur_col, wrap) \
+ ahd_print_register(NULL, 0, "HESCB_QOFF", 0x08, regvalue, cur_col, wrap)
+#endif
+
+#if AIC_DEBUG_REGISTERS
+ahd_reg_print_t ahd_hs_mailbox_print;
+#else
+#define ahd_hs_mailbox_print(regvalue, cur_col, wrap) \
+ ahd_print_register(NULL, 0, "HS_MAILBOX", 0x0b, regvalue, cur_col, wrap)
+#endif
+
+#if AIC_DEBUG_REGISTERS
+ahd_reg_print_t ahd_seqintstat_print;
+#else
+#define ahd_seqintstat_print(regvalue, cur_col, wrap) \
+ ahd_print_register(NULL, 0, "SEQINTSTAT", 0x0c, regvalue, cur_col, wrap)
+#endif
+
+#if AIC_DEBUG_REGISTERS
+ahd_reg_print_t ahd_clrseqintstat_print;
+#else
+#define ahd_clrseqintstat_print(regvalue, cur_col, wrap) \
+ ahd_print_register(NULL, 0, "CLRSEQINTSTAT", 0x0c, regvalue, cur_col, wrap)
+#endif
+
+#if AIC_DEBUG_REGISTERS
+ahd_reg_print_t ahd_swtimer_print;
+#else
+#define ahd_swtimer_print(regvalue, cur_col, wrap) \
+ ahd_print_register(NULL, 0, "SWTIMER", 0x0e, regvalue, cur_col, wrap)
+#endif
+
+#if AIC_DEBUG_REGISTERS
+ahd_reg_print_t ahd_sescb_qoff_print;
+#else
+#define ahd_sescb_qoff_print(regvalue, cur_col, wrap) \
+ ahd_print_register(NULL, 0, "SESCB_QOFF", 0x12, regvalue, cur_col, wrap)
+#endif
+
+#if AIC_DEBUG_REGISTERS
+ahd_reg_print_t ahd_intctl_print;
+#else
+#define ahd_intctl_print(regvalue, cur_col, wrap) \
+ ahd_print_register(NULL, 0, "INTCTL", 0x18, regvalue, cur_col, wrap)
+#endif
+
+#if AIC_DEBUG_REGISTERS
+ahd_reg_print_t ahd_dfcntrl_print;
+#else
+#define ahd_dfcntrl_print(regvalue, cur_col, wrap) \
+ ahd_print_register(NULL, 0, "DFCNTRL", 0x19, regvalue, cur_col, wrap)
+#endif
+
+#if AIC_DEBUG_REGISTERS
+ahd_reg_print_t ahd_dfstatus_print;
+#else
+#define ahd_dfstatus_print(regvalue, cur_col, wrap) \
+ ahd_print_register(NULL, 0, "DFSTATUS", 0x1a, regvalue, cur_col, wrap)
+#endif
+
+#if AIC_DEBUG_REGISTERS
+ahd_reg_print_t ahd_sg_cache_shadow_print;
+#else
+#define ahd_sg_cache_shadow_print(regvalue, cur_col, wrap) \
+ ahd_print_register(NULL, 0, "SG_CACHE_SHADOW", 0x1b, regvalue, cur_col, wrap)
+#endif
+
+#if AIC_DEBUG_REGISTERS
+ahd_reg_print_t ahd_lqin_print;
+#else
+#define ahd_lqin_print(regvalue, cur_col, wrap) \
+ ahd_print_register(NULL, 0, "LQIN", 0x20, regvalue, cur_col, wrap)
+#endif
+
+#if AIC_DEBUG_REGISTERS
+ahd_reg_print_t ahd_lunptr_print;
+#else
+#define ahd_lunptr_print(regvalue, cur_col, wrap) \
+ ahd_print_register(NULL, 0, "LUNPTR", 0x22, regvalue, cur_col, wrap)
+#endif
+
+#if AIC_DEBUG_REGISTERS
+ahd_reg_print_t ahd_cmdlenptr_print;
+#else
+#define ahd_cmdlenptr_print(regvalue, cur_col, wrap) \
+ ahd_print_register(NULL, 0, "CMDLENPTR", 0x25, regvalue, cur_col, wrap)
+#endif
+
+#if AIC_DEBUG_REGISTERS
+ahd_reg_print_t ahd_attrptr_print;
+#else
+#define ahd_attrptr_print(regvalue, cur_col, wrap) \
+ ahd_print_register(NULL, 0, "ATTRPTR", 0x26, regvalue, cur_col, wrap)
+#endif
+
+#if AIC_DEBUG_REGISTERS
+ahd_reg_print_t ahd_flagptr_print;
+#else
+#define ahd_flagptr_print(regvalue, cur_col, wrap) \
+ ahd_print_register(NULL, 0, "FLAGPTR", 0x27, regvalue, cur_col, wrap)
+#endif
+
+#if AIC_DEBUG_REGISTERS
+ahd_reg_print_t ahd_cmdptr_print;
+#else
+#define ahd_cmdptr_print(regvalue, cur_col, wrap) \
+ ahd_print_register(NULL, 0, "CMDPTR", 0x28, regvalue, cur_col, wrap)
+#endif
+
+#if AIC_DEBUG_REGISTERS
+ahd_reg_print_t ahd_qnextptr_print;
+#else
+#define ahd_qnextptr_print(regvalue, cur_col, wrap) \
+ ahd_print_register(NULL, 0, "QNEXTPTR", 0x29, regvalue, cur_col, wrap)
+#endif
+
+#if AIC_DEBUG_REGISTERS
+ahd_reg_print_t ahd_abrtbyteptr_print;
+#else
+#define ahd_abrtbyteptr_print(regvalue, cur_col, wrap) \
+ ahd_print_register(NULL, 0, "ABRTBYTEPTR", 0x2b, regvalue, cur_col, wrap)
+#endif
+
+#if AIC_DEBUG_REGISTERS
+ahd_reg_print_t ahd_abrtbitptr_print;
+#else
+#define ahd_abrtbitptr_print(regvalue, cur_col, wrap) \
+ ahd_print_register(NULL, 0, "ABRTBITPTR", 0x2c, regvalue, cur_col, wrap)
+#endif
+
+#if AIC_DEBUG_REGISTERS
+ahd_reg_print_t ahd_lunlen_print;
+#else
+#define ahd_lunlen_print(regvalue, cur_col, wrap) \
+ ahd_print_register(NULL, 0, "LUNLEN", 0x30, regvalue, cur_col, wrap)
+#endif
+
+#if AIC_DEBUG_REGISTERS
+ahd_reg_print_t ahd_cdblimit_print;
+#else
+#define ahd_cdblimit_print(regvalue, cur_col, wrap) \
+ ahd_print_register(NULL, 0, "CDBLIMIT", 0x31, regvalue, cur_col, wrap)
+#endif
+
+#if AIC_DEBUG_REGISTERS
+ahd_reg_print_t ahd_maxcmd_print;
+#else
+#define ahd_maxcmd_print(regvalue, cur_col, wrap) \
+ ahd_print_register(NULL, 0, "MAXCMD", 0x32, regvalue, cur_col, wrap)
+#endif
+
+#if AIC_DEBUG_REGISTERS
+ahd_reg_print_t ahd_maxcmdcnt_print;
+#else
+#define ahd_maxcmdcnt_print(regvalue, cur_col, wrap) \
+ ahd_print_register(NULL, 0, "MAXCMDCNT", 0x33, regvalue, cur_col, wrap)
+#endif
+
+#if AIC_DEBUG_REGISTERS
+ahd_reg_print_t ahd_lqctl1_print;
+#else
+#define ahd_lqctl1_print(regvalue, cur_col, wrap) \
+ ahd_print_register(NULL, 0, "LQCTL1", 0x38, regvalue, cur_col, wrap)
+#endif
+
+#if AIC_DEBUG_REGISTERS
+ahd_reg_print_t ahd_lqctl2_print;
+#else
+#define ahd_lqctl2_print(regvalue, cur_col, wrap) \
+ ahd_print_register(NULL, 0, "LQCTL2", 0x39, regvalue, cur_col, wrap)
+#endif
+
+#if AIC_DEBUG_REGISTERS
+ahd_reg_print_t ahd_scsiseq0_print;
+#else
+#define ahd_scsiseq0_print(regvalue, cur_col, wrap) \
+ ahd_print_register(NULL, 0, "SCSISEQ0", 0x3a, regvalue, cur_col, wrap)
+#endif
+
+#if AIC_DEBUG_REGISTERS
+ahd_reg_print_t ahd_scsiseq1_print;
+#else
+#define ahd_scsiseq1_print(regvalue, cur_col, wrap) \
+ ahd_print_register(NULL, 0, "SCSISEQ1", 0x3b, regvalue, cur_col, wrap)
+#endif
+
+#if AIC_DEBUG_REGISTERS
+ahd_reg_print_t ahd_sxfrctl0_print;
+#else
+#define ahd_sxfrctl0_print(regvalue, cur_col, wrap) \
+ ahd_print_register(NULL, 0, "SXFRCTL0", 0x3c, regvalue, cur_col, wrap)
+#endif
+
+#if AIC_DEBUG_REGISTERS
+ahd_reg_print_t ahd_dffstat_print;
+#else
+#define ahd_dffstat_print(regvalue, cur_col, wrap) \
+ ahd_print_register(NULL, 0, "DFFSTAT", 0x3f, regvalue, cur_col, wrap)
+#endif
+
+#if AIC_DEBUG_REGISTERS
+ahd_reg_print_t ahd_multargid_print;
+#else
+#define ahd_multargid_print(regvalue, cur_col, wrap) \
+ ahd_print_register(NULL, 0, "MULTARGID", 0x40, regvalue, cur_col, wrap)
+#endif
+
+#if AIC_DEBUG_REGISTERS
+ahd_reg_print_t ahd_scsisigi_print;
+#else
+#define ahd_scsisigi_print(regvalue, cur_col, wrap) \
+ ahd_print_register(NULL, 0, "SCSISIGI", 0x41, regvalue, cur_col, wrap)
+#endif
+
+#if AIC_DEBUG_REGISTERS
+ahd_reg_print_t ahd_scsiphase_print;
+#else
+#define ahd_scsiphase_print(regvalue, cur_col, wrap) \
+ ahd_print_register(NULL, 0, "SCSIPHASE", 0x42, regvalue, cur_col, wrap)
+#endif
+
+#if AIC_DEBUG_REGISTERS
+ahd_reg_print_t ahd_scsidat_print;
+#else
+#define ahd_scsidat_print(regvalue, cur_col, wrap) \
+ ahd_print_register(NULL, 0, "SCSIDAT", 0x44, regvalue, cur_col, wrap)
+#endif
+
+#if AIC_DEBUG_REGISTERS
+ahd_reg_print_t ahd_scsibus_print;
+#else
+#define ahd_scsibus_print(regvalue, cur_col, wrap) \
+ ahd_print_register(NULL, 0, "SCSIBUS", 0x46, regvalue, cur_col, wrap)
+#endif
+
+#if AIC_DEBUG_REGISTERS
+ahd_reg_print_t ahd_targidin_print;
+#else
+#define ahd_targidin_print(regvalue, cur_col, wrap) \
+ ahd_print_register(NULL, 0, "TARGIDIN", 0x48, regvalue, cur_col, wrap)
+#endif
+
+#if AIC_DEBUG_REGISTERS
+ahd_reg_print_t ahd_selid_print;
+#else
+#define ahd_selid_print(regvalue, cur_col, wrap) \
+ ahd_print_register(NULL, 0, "SELID", 0x49, regvalue, cur_col, wrap)
+#endif
+
+#if AIC_DEBUG_REGISTERS
+ahd_reg_print_t ahd_sblkctl_print;
+#else
+#define ahd_sblkctl_print(regvalue, cur_col, wrap) \
+ ahd_print_register(NULL, 0, "SBLKCTL", 0x4a, regvalue, cur_col, wrap)
+#endif
+
+#if AIC_DEBUG_REGISTERS
+ahd_reg_print_t ahd_sstat0_print;
+#else
+#define ahd_sstat0_print(regvalue, cur_col, wrap) \
+ ahd_print_register(NULL, 0, "SSTAT0", 0x4b, regvalue, cur_col, wrap)
+#endif
+
+#if AIC_DEBUG_REGISTERS
+ahd_reg_print_t ahd_simode0_print;
+#else
+#define ahd_simode0_print(regvalue, cur_col, wrap) \
+ ahd_print_register(NULL, 0, "SIMODE0", 0x4b, regvalue, cur_col, wrap)
+#endif
+
+#if AIC_DEBUG_REGISTERS
+ahd_reg_print_t ahd_sstat1_print;
+#else
+#define ahd_sstat1_print(regvalue, cur_col, wrap) \
+ ahd_print_register(NULL, 0, "SSTAT1", 0x4c, regvalue, cur_col, wrap)
+#endif
+
+#if AIC_DEBUG_REGISTERS
+ahd_reg_print_t ahd_sstat2_print;
+#else
+#define ahd_sstat2_print(regvalue, cur_col, wrap) \
+ ahd_print_register(NULL, 0, "SSTAT2", 0x4d, regvalue, cur_col, wrap)
+#endif
+
+#if AIC_DEBUG_REGISTERS
+ahd_reg_print_t ahd_clrsint2_print;
+#else
+#define ahd_clrsint2_print(regvalue, cur_col, wrap) \
+ ahd_print_register(NULL, 0, "CLRSINT2", 0x4d, regvalue, cur_col, wrap)
+#endif
+
+#if AIC_DEBUG_REGISTERS
+ahd_reg_print_t ahd_perrdiag_print;
+#else
+#define ahd_perrdiag_print(regvalue, cur_col, wrap) \
+ ahd_print_register(NULL, 0, "PERRDIAG", 0x4e, regvalue, cur_col, wrap)
+#endif
+
+#if AIC_DEBUG_REGISTERS
+ahd_reg_print_t ahd_lqistate_print;
+#else
+#define ahd_lqistate_print(regvalue, cur_col, wrap) \
+ ahd_print_register(NULL, 0, "LQISTATE", 0x4e, regvalue, cur_col, wrap)
+#endif
+
+#if AIC_DEBUG_REGISTERS
+ahd_reg_print_t ahd_soffcnt_print;
+#else
+#define ahd_soffcnt_print(regvalue, cur_col, wrap) \
+ ahd_print_register(NULL, 0, "SOFFCNT", 0x4f, regvalue, cur_col, wrap)
+#endif
+
+#if AIC_DEBUG_REGISTERS
+ahd_reg_print_t ahd_lqostate_print;
+#else
+#define ahd_lqostate_print(regvalue, cur_col, wrap) \
+ ahd_print_register(NULL, 0, "LQOSTATE", 0x4f, regvalue, cur_col, wrap)
+#endif
+
+#if AIC_DEBUG_REGISTERS
+ahd_reg_print_t ahd_lqistat0_print;
+#else
+#define ahd_lqistat0_print(regvalue, cur_col, wrap) \
+ ahd_print_register(NULL, 0, "LQISTAT0", 0x50, regvalue, cur_col, wrap)
+#endif
+
+#if AIC_DEBUG_REGISTERS
+ahd_reg_print_t ahd_clrlqiint0_print;
+#else
+#define ahd_clrlqiint0_print(regvalue, cur_col, wrap) \
+ ahd_print_register(NULL, 0, "CLRLQIINT0", 0x50, regvalue, cur_col, wrap)
+#endif
+
+#if AIC_DEBUG_REGISTERS
+ahd_reg_print_t ahd_lqimode0_print;
+#else
+#define ahd_lqimode0_print(regvalue, cur_col, wrap) \
+ ahd_print_register(NULL, 0, "LQIMODE0", 0x50, regvalue, cur_col, wrap)
+#endif
+
+#if AIC_DEBUG_REGISTERS
+ahd_reg_print_t ahd_lqimode1_print;
+#else
+#define ahd_lqimode1_print(regvalue, cur_col, wrap) \
+ ahd_print_register(NULL, 0, "LQIMODE1", 0x51, regvalue, cur_col, wrap)
+#endif
+
+#if AIC_DEBUG_REGISTERS
+ahd_reg_print_t ahd_lqistat1_print;
+#else
+#define ahd_lqistat1_print(regvalue, cur_col, wrap) \
+ ahd_print_register(NULL, 0, "LQISTAT1", 0x51, regvalue, cur_col, wrap)
+#endif
+
+#if AIC_DEBUG_REGISTERS
+ahd_reg_print_t ahd_clrlqiint1_print;
+#else
+#define ahd_clrlqiint1_print(regvalue, cur_col, wrap) \
+ ahd_print_register(NULL, 0, "CLRLQIINT1", 0x51, regvalue, cur_col, wrap)
+#endif
+
+#if AIC_DEBUG_REGISTERS
+ahd_reg_print_t ahd_lqistat2_print;
+#else
+#define ahd_lqistat2_print(regvalue, cur_col, wrap) \
+ ahd_print_register(NULL, 0, "LQISTAT2", 0x52, regvalue, cur_col, wrap)
+#endif
+
+#if AIC_DEBUG_REGISTERS
+ahd_reg_print_t ahd_sstat3_print;
+#else
+#define ahd_sstat3_print(regvalue, cur_col, wrap) \
+ ahd_print_register(NULL, 0, "SSTAT3", 0x53, regvalue, cur_col, wrap)
+#endif
+
+#if AIC_DEBUG_REGISTERS
+ahd_reg_print_t ahd_simode3_print;
+#else
+#define ahd_simode3_print(regvalue, cur_col, wrap) \
+ ahd_print_register(NULL, 0, "SIMODE3", 0x53, regvalue, cur_col, wrap)
+#endif
+
+#if AIC_DEBUG_REGISTERS
+ahd_reg_print_t ahd_clrsint3_print;
+#else
+#define ahd_clrsint3_print(regvalue, cur_col, wrap) \
+ ahd_print_register(NULL, 0, "CLRSINT3", 0x53, regvalue, cur_col, wrap)
+#endif
+
+#if AIC_DEBUG_REGISTERS
+ahd_reg_print_t ahd_lqostat0_print;
+#else
+#define ahd_lqostat0_print(regvalue, cur_col, wrap) \
+ ahd_print_register(NULL, 0, "LQOSTAT0", 0x54, regvalue, cur_col, wrap)
+#endif
+
+#if AIC_DEBUG_REGISTERS
+ahd_reg_print_t ahd_clrlqoint0_print;
+#else
+#define ahd_clrlqoint0_print(regvalue, cur_col, wrap) \
+ ahd_print_register(NULL, 0, "CLRLQOINT0", 0x54, regvalue, cur_col, wrap)
+#endif
+
+#if AIC_DEBUG_REGISTERS
+ahd_reg_print_t ahd_lqomode0_print;
+#else
+#define ahd_lqomode0_print(regvalue, cur_col, wrap) \
+ ahd_print_register(NULL, 0, "LQOMODE0", 0x54, regvalue, cur_col, wrap)
+#endif
+
+#if AIC_DEBUG_REGISTERS
+ahd_reg_print_t ahd_lqomode1_print;
+#else
+#define ahd_lqomode1_print(regvalue, cur_col, wrap) \
+ ahd_print_register(NULL, 0, "LQOMODE1", 0x55, regvalue, cur_col, wrap)
+#endif
+
+#if AIC_DEBUG_REGISTERS
+ahd_reg_print_t ahd_lqostat1_print;
+#else
+#define ahd_lqostat1_print(regvalue, cur_col, wrap) \
+ ahd_print_register(NULL, 0, "LQOSTAT1", 0x55, regvalue, cur_col, wrap)
+#endif
+
+#if AIC_DEBUG_REGISTERS
+ahd_reg_print_t ahd_clrlqoint1_print;
+#else
+#define ahd_clrlqoint1_print(regvalue, cur_col, wrap) \
+ ahd_print_register(NULL, 0, "CLRLQOINT1", 0x55, regvalue, cur_col, wrap)
+#endif
+
+#if AIC_DEBUG_REGISTERS
+ahd_reg_print_t ahd_lqostat2_print;
+#else
+#define ahd_lqostat2_print(regvalue, cur_col, wrap) \
+ ahd_print_register(NULL, 0, "LQOSTAT2", 0x56, regvalue, cur_col, wrap)
+#endif
+
+#if AIC_DEBUG_REGISTERS
+ahd_reg_print_t ahd_os_space_cnt_print;
+#else
+#define ahd_os_space_cnt_print(regvalue, cur_col, wrap) \
+ ahd_print_register(NULL, 0, "OS_SPACE_CNT", 0x56, regvalue, cur_col, wrap)
+#endif
+
+#if AIC_DEBUG_REGISTERS
+ahd_reg_print_t ahd_simode1_print;
+#else
+#define ahd_simode1_print(regvalue, cur_col, wrap) \
+ ahd_print_register(NULL, 0, "SIMODE1", 0x57, regvalue, cur_col, wrap)
+#endif
+
+#if AIC_DEBUG_REGISTERS
+ahd_reg_print_t ahd_gsfifo_print;
+#else
+#define ahd_gsfifo_print(regvalue, cur_col, wrap) \
+ ahd_print_register(NULL, 0, "GSFIFO", 0x58, regvalue, cur_col, wrap)
+#endif
+
+#if AIC_DEBUG_REGISTERS
+ahd_reg_print_t ahd_dffsxfrctl_print;
+#else
+#define ahd_dffsxfrctl_print(regvalue, cur_col, wrap) \
+ ahd_print_register(NULL, 0, "DFFSXFRCTL", 0x5a, regvalue, cur_col, wrap)
+#endif
+
+#if AIC_DEBUG_REGISTERS
+ahd_reg_print_t ahd_lqoscsctl_print;
+#else
+#define ahd_lqoscsctl_print(regvalue, cur_col, wrap) \
+ ahd_print_register(NULL, 0, "LQOSCSCTL", 0x5a, regvalue, cur_col, wrap)
+#endif
+
+#if AIC_DEBUG_REGISTERS
+ahd_reg_print_t ahd_nextscb_print;
+#else
+#define ahd_nextscb_print(regvalue, cur_col, wrap) \
+ ahd_print_register(NULL, 0, "NEXTSCB", 0x5a, regvalue, cur_col, wrap)
+#endif
+
+#if AIC_DEBUG_REGISTERS
+ahd_reg_print_t ahd_clrseqintsrc_print;
+#else
+#define ahd_clrseqintsrc_print(regvalue, cur_col, wrap) \
+ ahd_print_register(NULL, 0, "CLRSEQINTSRC", 0x5b, regvalue, cur_col, wrap)
+#endif
+
+#if AIC_DEBUG_REGISTERS
+ahd_reg_print_t ahd_seqintsrc_print;
+#else
+#define ahd_seqintsrc_print(regvalue, cur_col, wrap) \
+ ahd_print_register(NULL, 0, "SEQINTSRC", 0x5b, regvalue, cur_col, wrap)
+#endif
+
+#if AIC_DEBUG_REGISTERS
+ahd_reg_print_t ahd_currscb_print;
+#else
+#define ahd_currscb_print(regvalue, cur_col, wrap) \
+ ahd_print_register(NULL, 0, "CURRSCB", 0x5c, regvalue, cur_col, wrap)
+#endif
+
+#if AIC_DEBUG_REGISTERS
+ahd_reg_print_t ahd_seqimode_print;
+#else
+#define ahd_seqimode_print(regvalue, cur_col, wrap) \
+ ahd_print_register(NULL, 0, "SEQIMODE", 0x5c, regvalue, cur_col, wrap)
+#endif
+
+#if AIC_DEBUG_REGISTERS
+ahd_reg_print_t ahd_mdffstat_print;
+#else
+#define ahd_mdffstat_print(regvalue, cur_col, wrap) \
+ ahd_print_register(NULL, 0, "MDFFSTAT", 0x5d, regvalue, cur_col, wrap)
+#endif
+
+#if AIC_DEBUG_REGISTERS
+ahd_reg_print_t ahd_lastscb_print;
+#else
+#define ahd_lastscb_print(regvalue, cur_col, wrap) \
+ ahd_print_register(NULL, 0, "LASTSCB", 0x5e, regvalue, cur_col, wrap)
+#endif
+
+#if AIC_DEBUG_REGISTERS
+ahd_reg_print_t ahd_negoaddr_print;
+#else
+#define ahd_negoaddr_print(regvalue, cur_col, wrap) \
+ ahd_print_register(NULL, 0, "NEGOADDR", 0x60, regvalue, cur_col, wrap)
+#endif
+
+#if AIC_DEBUG_REGISTERS
+ahd_reg_print_t ahd_negperiod_print;
+#else
+#define ahd_negperiod_print(regvalue, cur_col, wrap) \
+ ahd_print_register(NULL, 0, "NEGPERIOD", 0x61, regvalue, cur_col, wrap)
+#endif
+
+#if AIC_DEBUG_REGISTERS
+ahd_reg_print_t ahd_negoffset_print;
+#else
+#define ahd_negoffset_print(regvalue, cur_col, wrap) \
+ ahd_print_register(NULL, 0, "NEGOFFSET", 0x62, regvalue, cur_col, wrap)
+#endif
+
+#if AIC_DEBUG_REGISTERS
+ahd_reg_print_t ahd_negppropts_print;
+#else
+#define ahd_negppropts_print(regvalue, cur_col, wrap) \
+ ahd_print_register(NULL, 0, "NEGPPROPTS", 0x63, regvalue, cur_col, wrap)
+#endif
+
+#if AIC_DEBUG_REGISTERS
+ahd_reg_print_t ahd_negconopts_print;
+#else
+#define ahd_negconopts_print(regvalue, cur_col, wrap) \
+ ahd_print_register(NULL, 0, "NEGCONOPTS", 0x64, regvalue, cur_col, wrap)
+#endif
+
+#if AIC_DEBUG_REGISTERS
+ahd_reg_print_t ahd_annexcol_print;
+#else
+#define ahd_annexcol_print(regvalue, cur_col, wrap) \
+ ahd_print_register(NULL, 0, "ANNEXCOL", 0x65, regvalue, cur_col, wrap)
+#endif
+
+#if AIC_DEBUG_REGISTERS
+ahd_reg_print_t ahd_annexdat_print;
+#else
+#define ahd_annexdat_print(regvalue, cur_col, wrap) \
+ ahd_print_register(NULL, 0, "ANNEXDAT", 0x66, regvalue, cur_col, wrap)
+#endif
+
+#if AIC_DEBUG_REGISTERS
+ahd_reg_print_t ahd_scschkn_print;
+#else
+#define ahd_scschkn_print(regvalue, cur_col, wrap) \
+ ahd_print_register(NULL, 0, "SCSCHKN", 0x66, regvalue, cur_col, wrap)
+#endif
+
+#if AIC_DEBUG_REGISTERS
+ahd_reg_print_t ahd_iownid_print;
+#else
+#define ahd_iownid_print(regvalue, cur_col, wrap) \
+ ahd_print_register(NULL, 0, "IOWNID", 0x67, regvalue, cur_col, wrap)
+#endif
+
+#if AIC_DEBUG_REGISTERS
+ahd_reg_print_t ahd_shcnt_print;
+#else
+#define ahd_shcnt_print(regvalue, cur_col, wrap) \
+ ahd_print_register(NULL, 0, "SHCNT", 0x68, regvalue, cur_col, wrap)
+#endif
+
+#if AIC_DEBUG_REGISTERS
+ahd_reg_print_t ahd_townid_print;
+#else
+#define ahd_townid_print(regvalue, cur_col, wrap) \
+ ahd_print_register(NULL, 0, "TOWNID", 0x69, regvalue, cur_col, wrap)
+#endif
+
+#if AIC_DEBUG_REGISTERS
+ahd_reg_print_t ahd_seloid_print;
+#else
+#define ahd_seloid_print(regvalue, cur_col, wrap) \
+ ahd_print_register(NULL, 0, "SELOID", 0x6b, regvalue, cur_col, wrap)
+#endif
+
+#if AIC_DEBUG_REGISTERS
+ahd_reg_print_t ahd_scbhaddr_print;
+#else
+#define ahd_scbhaddr_print(regvalue, cur_col, wrap) \
+ ahd_print_register(NULL, 0, "SCBHADDR", 0x7c, regvalue, cur_col, wrap)
+#endif
+
+#if AIC_DEBUG_REGISTERS
+ahd_reg_print_t ahd_sghaddr_print;
+#else
+#define ahd_sghaddr_print(regvalue, cur_col, wrap) \
+ ahd_print_register(NULL, 0, "SGHADDR", 0x7c, regvalue, cur_col, wrap)
+#endif
+
+#if AIC_DEBUG_REGISTERS
+ahd_reg_print_t ahd_scbhcnt_print;
+#else
+#define ahd_scbhcnt_print(regvalue, cur_col, wrap) \
+ ahd_print_register(NULL, 0, "SCBHCNT", 0x84, regvalue, cur_col, wrap)
+#endif
+
+#if AIC_DEBUG_REGISTERS
+ahd_reg_print_t ahd_sghcnt_print;
+#else
+#define ahd_sghcnt_print(regvalue, cur_col, wrap) \
+ ahd_print_register(NULL, 0, "SGHCNT", 0x84, regvalue, cur_col, wrap)
+#endif
+
+#if AIC_DEBUG_REGISTERS
+ahd_reg_print_t ahd_pcixctl_print;
+#else
+#define ahd_pcixctl_print(regvalue, cur_col, wrap) \
+ ahd_print_register(NULL, 0, "PCIXCTL", 0x93, regvalue, cur_col, wrap)
+#endif
+
+#if AIC_DEBUG_REGISTERS
+ahd_reg_print_t ahd_dchspltstat0_print;
+#else
+#define ahd_dchspltstat0_print(regvalue, cur_col, wrap) \
+ ahd_print_register(NULL, 0, "DCHSPLTSTAT0", 0x96, regvalue, cur_col, wrap)
+#endif
+
+#if AIC_DEBUG_REGISTERS
+ahd_reg_print_t ahd_dchspltstat1_print;
+#else
+#define ahd_dchspltstat1_print(regvalue, cur_col, wrap) \
+ ahd_print_register(NULL, 0, "DCHSPLTSTAT1", 0x97, regvalue, cur_col, wrap)
+#endif
+
+#if AIC_DEBUG_REGISTERS
+ahd_reg_print_t ahd_sgspltstat0_print;
+#else
+#define ahd_sgspltstat0_print(regvalue, cur_col, wrap) \
+ ahd_print_register(NULL, 0, "SGSPLTSTAT0", 0x9e, regvalue, cur_col, wrap)
+#endif
+
+#if AIC_DEBUG_REGISTERS
+ahd_reg_print_t ahd_sgspltstat1_print;
+#else
+#define ahd_sgspltstat1_print(regvalue, cur_col, wrap) \
+ ahd_print_register(NULL, 0, "SGSPLTSTAT1", 0x9f, regvalue, cur_col, wrap)
+#endif
+
+#if AIC_DEBUG_REGISTERS
+ahd_reg_print_t ahd_df0pcistat_print;
+#else
+#define ahd_df0pcistat_print(regvalue, cur_col, wrap) \
+ ahd_print_register(NULL, 0, "DF0PCISTAT", 0xa0, regvalue, cur_col, wrap)
+#endif
+
+#if AIC_DEBUG_REGISTERS
+ahd_reg_print_t ahd_reg0_print;
+#else
+#define ahd_reg0_print(regvalue, cur_col, wrap) \
+ ahd_print_register(NULL, 0, "REG0", 0xa0, regvalue, cur_col, wrap)
+#endif
+
+#if AIC_DEBUG_REGISTERS
+ahd_reg_print_t ahd_reg_isr_print;
+#else
+#define ahd_reg_isr_print(regvalue, cur_col, wrap) \
+ ahd_print_register(NULL, 0, "REG_ISR", 0xa4, regvalue, cur_col, wrap)
+#endif
+
+#if AIC_DEBUG_REGISTERS
+ahd_reg_print_t ahd_sg_state_print;
+#else
+#define ahd_sg_state_print(regvalue, cur_col, wrap) \
+ ahd_print_register(NULL, 0, "SG_STATE", 0xa6, regvalue, cur_col, wrap)
+#endif
+
+#if AIC_DEBUG_REGISTERS
+ahd_reg_print_t ahd_targpcistat_print;
+#else
+#define ahd_targpcistat_print(regvalue, cur_col, wrap) \
+ ahd_print_register(NULL, 0, "TARGPCISTAT", 0xa7, regvalue, cur_col, wrap)
+#endif
+
+#if AIC_DEBUG_REGISTERS
+ahd_reg_print_t ahd_scbautoptr_print;
+#else
+#define ahd_scbautoptr_print(regvalue, cur_col, wrap) \
+ ahd_print_register(NULL, 0, "SCBAUTOPTR", 0xab, regvalue, cur_col, wrap)
+#endif
+
+#if AIC_DEBUG_REGISTERS
+ahd_reg_print_t ahd_ccscbaddr_print;
+#else
+#define ahd_ccscbaddr_print(regvalue, cur_col, wrap) \
+ ahd_print_register(NULL, 0, "CCSCBADDR", 0xac, regvalue, cur_col, wrap)
+#endif
+
+#if AIC_DEBUG_REGISTERS
+ahd_reg_print_t ahd_ccscbctl_print;
+#else
+#define ahd_ccscbctl_print(regvalue, cur_col, wrap) \
+ ahd_print_register(NULL, 0, "CCSCBCTL", 0xad, regvalue, cur_col, wrap)
+#endif
+
+#if AIC_DEBUG_REGISTERS
+ahd_reg_print_t ahd_ccsgctl_print;
+#else
+#define ahd_ccsgctl_print(regvalue, cur_col, wrap) \
+ ahd_print_register(NULL, 0, "CCSGCTL", 0xad, regvalue, cur_col, wrap)
+#endif
+
+#if AIC_DEBUG_REGISTERS
+ahd_reg_print_t ahd_ccscbram_print;
+#else
+#define ahd_ccscbram_print(regvalue, cur_col, wrap) \
+ ahd_print_register(NULL, 0, "CCSCBRAM", 0xb0, regvalue, cur_col, wrap)
+#endif
+
+#if AIC_DEBUG_REGISTERS
+ahd_reg_print_t ahd_brddat_print;
+#else
+#define ahd_brddat_print(regvalue, cur_col, wrap) \
+ ahd_print_register(NULL, 0, "BRDDAT", 0xb8, regvalue, cur_col, wrap)
+#endif
+
+#if AIC_DEBUG_REGISTERS
+ahd_reg_print_t ahd_seeadr_print;
+#else
+#define ahd_seeadr_print(regvalue, cur_col, wrap) \
+ ahd_print_register(NULL, 0, "SEEADR", 0xba, regvalue, cur_col, wrap)
+#endif
+
+#if AIC_DEBUG_REGISTERS
+ahd_reg_print_t ahd_seedat_print;
+#else
+#define ahd_seedat_print(regvalue, cur_col, wrap) \
+ ahd_print_register(NULL, 0, "SEEDAT", 0xbc, regvalue, cur_col, wrap)
+#endif
+
+#if AIC_DEBUG_REGISTERS
+ahd_reg_print_t ahd_seectl_print;
+#else
+#define ahd_seectl_print(regvalue, cur_col, wrap) \
+ ahd_print_register(NULL, 0, "SEECTL", 0xbe, regvalue, cur_col, wrap)
+#endif
+
+#if AIC_DEBUG_REGISTERS
+ahd_reg_print_t ahd_seestat_print;
+#else
+#define ahd_seestat_print(regvalue, cur_col, wrap) \
+ ahd_print_register(NULL, 0, "SEESTAT", 0xbe, regvalue, cur_col, wrap)
+#endif
+
+#if AIC_DEBUG_REGISTERS
+ahd_reg_print_t ahd_dspdatactl_print;
+#else
+#define ahd_dspdatactl_print(regvalue, cur_col, wrap) \
+ ahd_print_register(NULL, 0, "DSPDATACTL", 0xc1, regvalue, cur_col, wrap)
+#endif
+
+#if AIC_DEBUG_REGISTERS
+ahd_reg_print_t ahd_dspselect_print;
+#else
+#define ahd_dspselect_print(regvalue, cur_col, wrap) \
+ ahd_print_register(NULL, 0, "DSPSELECT", 0xc4, regvalue, cur_col, wrap)
+#endif
+
+#if AIC_DEBUG_REGISTERS
+ahd_reg_print_t ahd_wrtbiasctl_print;
+#else
+#define ahd_wrtbiasctl_print(regvalue, cur_col, wrap) \
+ ahd_print_register(NULL, 0, "WRTBIASCTL", 0xc5, regvalue, cur_col, wrap)
+#endif
+
+#if AIC_DEBUG_REGISTERS
+ahd_reg_print_t ahd_seqctl0_print;
+#else
+#define ahd_seqctl0_print(regvalue, cur_col, wrap) \
+ ahd_print_register(NULL, 0, "SEQCTL0", 0xd6, regvalue, cur_col, wrap)
+#endif
+
+#if AIC_DEBUG_REGISTERS
+ahd_reg_print_t ahd_seqintctl_print;
+#else
+#define ahd_seqintctl_print(regvalue, cur_col, wrap) \
+ ahd_print_register(NULL, 0, "SEQINTCTL", 0xd9, regvalue, cur_col, wrap)
+#endif
+
+#if AIC_DEBUG_REGISTERS
+ahd_reg_print_t ahd_prgmcnt_print;
+#else
+#define ahd_prgmcnt_print(regvalue, cur_col, wrap) \
+ ahd_print_register(NULL, 0, "PRGMCNT", 0xde, regvalue, cur_col, wrap)
+#endif
+
+#if AIC_DEBUG_REGISTERS
+ahd_reg_print_t ahd_none_print;
+#else
+#define ahd_none_print(regvalue, cur_col, wrap) \
+ ahd_print_register(NULL, 0, "NONE", 0xea, regvalue, cur_col, wrap)
+#endif
+
+#if AIC_DEBUG_REGISTERS
+ahd_reg_print_t ahd_intvec1_addr_print;
+#else
+#define ahd_intvec1_addr_print(regvalue, cur_col, wrap) \
+ ahd_print_register(NULL, 0, "INTVEC1_ADDR", 0xf4, regvalue, cur_col, wrap)
+#endif
+
+#if AIC_DEBUG_REGISTERS
+ahd_reg_print_t ahd_curaddr_print;
+#else
+#define ahd_curaddr_print(regvalue, cur_col, wrap) \
+ ahd_print_register(NULL, 0, "CURADDR", 0xf4, regvalue, cur_col, wrap)
+#endif
+
+#if AIC_DEBUG_REGISTERS
+ahd_reg_print_t ahd_intvec2_addr_print;
+#else
+#define ahd_intvec2_addr_print(regvalue, cur_col, wrap) \
+ ahd_print_register(NULL, 0, "INTVEC2_ADDR", 0xf6, regvalue, cur_col, wrap)
+#endif
+
+#if AIC_DEBUG_REGISTERS
+ahd_reg_print_t ahd_longjmp_addr_print;
+#else
+#define ahd_longjmp_addr_print(regvalue, cur_col, wrap) \
+ ahd_print_register(NULL, 0, "LONGJMP_ADDR", 0xf8, regvalue, cur_col, wrap)
+#endif
+
+#if AIC_DEBUG_REGISTERS
+ahd_reg_print_t ahd_accum_save_print;
+#else
+#define ahd_accum_save_print(regvalue, cur_col, wrap) \
+ ahd_print_register(NULL, 0, "ACCUM_SAVE", 0xfa, regvalue, cur_col, wrap)
+#endif
+
+#if AIC_DEBUG_REGISTERS
+ahd_reg_print_t ahd_waiting_scb_tails_print;
+#else
+#define ahd_waiting_scb_tails_print(regvalue, cur_col, wrap) \
+ ahd_print_register(NULL, 0, "WAITING_SCB_TAILS", 0x100, regvalue, cur_col, wrap)
+#endif
+
+#if AIC_DEBUG_REGISTERS
+ahd_reg_print_t ahd_sram_base_print;
+#else
+#define ahd_sram_base_print(regvalue, cur_col, wrap) \
+ ahd_print_register(NULL, 0, "SRAM_BASE", 0x100, regvalue, cur_col, wrap)
+#endif
+
+#if AIC_DEBUG_REGISTERS
+ahd_reg_print_t ahd_waiting_tid_head_print;
+#else
+#define ahd_waiting_tid_head_print(regvalue, cur_col, wrap) \
+ ahd_print_register(NULL, 0, "WAITING_TID_HEAD", 0x120, regvalue, cur_col, wrap)
+#endif
+
+#if AIC_DEBUG_REGISTERS
+ahd_reg_print_t ahd_waiting_tid_tail_print;
+#else
+#define ahd_waiting_tid_tail_print(regvalue, cur_col, wrap) \
+ ahd_print_register(NULL, 0, "WAITING_TID_TAIL", 0x122, regvalue, cur_col, wrap)
+#endif
+
+#if AIC_DEBUG_REGISTERS
+ahd_reg_print_t ahd_next_queued_scb_addr_print;
+#else
+#define ahd_next_queued_scb_addr_print(regvalue, cur_col, wrap) \
+ ahd_print_register(NULL, 0, "NEXT_QUEUED_SCB_ADDR", 0x124, regvalue, cur_col, wrap)
+#endif
+
+#if AIC_DEBUG_REGISTERS
+ahd_reg_print_t ahd_complete_scb_head_print;
+#else
+#define ahd_complete_scb_head_print(regvalue, cur_col, wrap) \
+ ahd_print_register(NULL, 0, "COMPLETE_SCB_HEAD", 0x128, regvalue, cur_col, wrap)
+#endif
+
+#if AIC_DEBUG_REGISTERS
+ahd_reg_print_t ahd_complete_scb_dmainprog_head_print;
+#else
+#define ahd_complete_scb_dmainprog_head_print(regvalue, cur_col, wrap) \
+ ahd_print_register(NULL, 0, "COMPLETE_SCB_DMAINPROG_HEAD", 0x12a, regvalue, cur_col, wrap)
+#endif
+
+#if AIC_DEBUG_REGISTERS
+ahd_reg_print_t ahd_complete_dma_scb_head_print;
+#else
+#define ahd_complete_dma_scb_head_print(regvalue, cur_col, wrap) \
+ ahd_print_register(NULL, 0, "COMPLETE_DMA_SCB_HEAD", 0x12c, regvalue, cur_col, wrap)
+#endif
+
+#if AIC_DEBUG_REGISTERS
+ahd_reg_print_t ahd_complete_dma_scb_tail_print;
+#else
+#define ahd_complete_dma_scb_tail_print(regvalue, cur_col, wrap) \
+ ahd_print_register(NULL, 0, "COMPLETE_DMA_SCB_TAIL", 0x12e, regvalue, cur_col, wrap)
+#endif
+
+#if AIC_DEBUG_REGISTERS
+ahd_reg_print_t ahd_complete_on_qfreeze_head_print;
+#else
+#define ahd_complete_on_qfreeze_head_print(regvalue, cur_col, wrap) \
+ ahd_print_register(NULL, 0, "COMPLETE_ON_QFREEZE_HEAD", 0x130, regvalue, cur_col, wrap)
+#endif
+
+#if AIC_DEBUG_REGISTERS
+ahd_reg_print_t ahd_qfreeze_count_print;
+#else
+#define ahd_qfreeze_count_print(regvalue, cur_col, wrap) \
+ ahd_print_register(NULL, 0, "QFREEZE_COUNT", 0x132, regvalue, cur_col, wrap)
+#endif
+
+#if AIC_DEBUG_REGISTERS
+ahd_reg_print_t ahd_kernel_qfreeze_count_print;
+#else
+#define ahd_kernel_qfreeze_count_print(regvalue, cur_col, wrap) \
+ ahd_print_register(NULL, 0, "KERNEL_QFREEZE_COUNT", 0x134, regvalue, cur_col, wrap)
+#endif
+
+#if AIC_DEBUG_REGISTERS
+ahd_reg_print_t ahd_saved_mode_print;
+#else
+#define ahd_saved_mode_print(regvalue, cur_col, wrap) \
+ ahd_print_register(NULL, 0, "SAVED_MODE", 0x136, regvalue, cur_col, wrap)
+#endif
+
+#if AIC_DEBUG_REGISTERS
+ahd_reg_print_t ahd_msg_out_print;
+#else
+#define ahd_msg_out_print(regvalue, cur_col, wrap) \
+ ahd_print_register(NULL, 0, "MSG_OUT", 0x137, regvalue, cur_col, wrap)
+#endif
+
+#if AIC_DEBUG_REGISTERS
+ahd_reg_print_t ahd_seq_flags_print;
+#else
+#define ahd_seq_flags_print(regvalue, cur_col, wrap) \
+ ahd_print_register(NULL, 0, "SEQ_FLAGS", 0x139, regvalue, cur_col, wrap)
+#endif
+
+#if AIC_DEBUG_REGISTERS
+ahd_reg_print_t ahd_lastphase_print;
+#else
+#define ahd_lastphase_print(regvalue, cur_col, wrap) \
+ ahd_print_register(NULL, 0, "LASTPHASE", 0x13c, regvalue, cur_col, wrap)
+#endif
+
+#if AIC_DEBUG_REGISTERS
+ahd_reg_print_t ahd_qoutfifo_entry_valid_tag_print;
+#else
+#define ahd_qoutfifo_entry_valid_tag_print(regvalue, cur_col, wrap) \
+ ahd_print_register(NULL, 0, "QOUTFIFO_ENTRY_VALID_TAG", 0x13d, regvalue, cur_col, wrap)
+#endif
+
+#if AIC_DEBUG_REGISTERS
+ahd_reg_print_t ahd_kernel_tqinpos_print;
+#else
+#define ahd_kernel_tqinpos_print(regvalue, cur_col, wrap) \
+ ahd_print_register(NULL, 0, "KERNEL_TQINPOS", 0x13e, regvalue, cur_col, wrap)
+#endif
+
+#if AIC_DEBUG_REGISTERS
+ahd_reg_print_t ahd_qoutfifo_next_addr_print;
+#else
+#define ahd_qoutfifo_next_addr_print(regvalue, cur_col, wrap) \
+ ahd_print_register(NULL, 0, "QOUTFIFO_NEXT_ADDR", 0x144, regvalue, cur_col, wrap)
+#endif
+
+#if AIC_DEBUG_REGISTERS
+ahd_reg_print_t ahd_last_msg_print;
+#else
+#define ahd_last_msg_print(regvalue, cur_col, wrap) \
+ ahd_print_register(NULL, 0, "LAST_MSG", 0x14a, regvalue, cur_col, wrap)
+#endif
+
+#if AIC_DEBUG_REGISTERS
+ahd_reg_print_t ahd_scsiseq_template_print;
+#else
+#define ahd_scsiseq_template_print(regvalue, cur_col, wrap) \
+ ahd_print_register(NULL, 0, "SCSISEQ_TEMPLATE", 0x14b, regvalue, cur_col, wrap)
+#endif
+
+#if AIC_DEBUG_REGISTERS
+ahd_reg_print_t ahd_initiator_tag_print;
+#else
+#define ahd_initiator_tag_print(regvalue, cur_col, wrap) \
+ ahd_print_register(NULL, 0, "INITIATOR_TAG", 0x14c, regvalue, cur_col, wrap)
+#endif
+
+#if AIC_DEBUG_REGISTERS
+ahd_reg_print_t ahd_seq_flags2_print;
+#else
+#define ahd_seq_flags2_print(regvalue, cur_col, wrap) \
+ ahd_print_register(NULL, 0, "SEQ_FLAGS2", 0x14d, regvalue, cur_col, wrap)
+#endif
+
+#if AIC_DEBUG_REGISTERS
+ahd_reg_print_t ahd_allocfifo_scbptr_print;
+#else
+#define ahd_allocfifo_scbptr_print(regvalue, cur_col, wrap) \
+ ahd_print_register(NULL, 0, "ALLOCFIFO_SCBPTR", 0x14e, regvalue, cur_col, wrap)
+#endif
+
+#if AIC_DEBUG_REGISTERS
+ahd_reg_print_t ahd_int_coalescing_timer_print;
+#else
+#define ahd_int_coalescing_timer_print(regvalue, cur_col, wrap) \
+ ahd_print_register(NULL, 0, "INT_COALESCING_TIMER", 0x150, regvalue, cur_col, wrap)
+#endif
+
+#if AIC_DEBUG_REGISTERS
+ahd_reg_print_t ahd_int_coalescing_maxcmds_print;
+#else
+#define ahd_int_coalescing_maxcmds_print(regvalue, cur_col, wrap) \
+ ahd_print_register(NULL, 0, "INT_COALESCING_MAXCMDS", 0x152, regvalue, cur_col, wrap)
+#endif
+
+#if AIC_DEBUG_REGISTERS
+ahd_reg_print_t ahd_int_coalescing_mincmds_print;
+#else
+#define ahd_int_coalescing_mincmds_print(regvalue, cur_col, wrap) \
+ ahd_print_register(NULL, 0, "INT_COALESCING_MINCMDS", 0x153, regvalue, cur_col, wrap)
+#endif
+
+#if AIC_DEBUG_REGISTERS
+ahd_reg_print_t ahd_cmds_pending_print;
+#else
+#define ahd_cmds_pending_print(regvalue, cur_col, wrap) \
+ ahd_print_register(NULL, 0, "CMDS_PENDING", 0x154, regvalue, cur_col, wrap)
+#endif
+
+#if AIC_DEBUG_REGISTERS
+ahd_reg_print_t ahd_int_coalescing_cmdcount_print;
+#else
+#define ahd_int_coalescing_cmdcount_print(regvalue, cur_col, wrap) \
+ ahd_print_register(NULL, 0, "INT_COALESCING_CMDCOUNT", 0x156, regvalue, cur_col, wrap)
+#endif
+
+#if AIC_DEBUG_REGISTERS
+ahd_reg_print_t ahd_local_hs_mailbox_print;
+#else
+#define ahd_local_hs_mailbox_print(regvalue, cur_col, wrap) \
+ ahd_print_register(NULL, 0, "LOCAL_HS_MAILBOX", 0x157, regvalue, cur_col, wrap)
+#endif
+
+#if AIC_DEBUG_REGISTERS
+ahd_reg_print_t ahd_cmdsize_table_print;
+#else
+#define ahd_cmdsize_table_print(regvalue, cur_col, wrap) \
+ ahd_print_register(NULL, 0, "CMDSIZE_TABLE", 0x158, regvalue, cur_col, wrap)
+#endif
+
+#if AIC_DEBUG_REGISTERS
+ahd_reg_print_t ahd_mk_message_scb_print;
+#else
+#define ahd_mk_message_scb_print(regvalue, cur_col, wrap) \
+ ahd_print_register(NULL, 0, "MK_MESSAGE_SCB", 0x160, regvalue, cur_col, wrap)
+#endif
+
+#if AIC_DEBUG_REGISTERS
+ahd_reg_print_t ahd_mk_message_scsiid_print;
+#else
+#define ahd_mk_message_scsiid_print(regvalue, cur_col, wrap) \
+ ahd_print_register(NULL, 0, "MK_MESSAGE_SCSIID", 0x162, regvalue, cur_col, wrap)
+#endif
+
+#if AIC_DEBUG_REGISTERS
+ahd_reg_print_t ahd_scb_base_print;
+#else
+#define ahd_scb_base_print(regvalue, cur_col, wrap) \
+ ahd_print_register(NULL, 0, "SCB_BASE", 0x180, regvalue, cur_col, wrap)
+#endif
+
+#if AIC_DEBUG_REGISTERS
+ahd_reg_print_t ahd_scb_residual_datacnt_print;
+#else
+#define ahd_scb_residual_datacnt_print(regvalue, cur_col, wrap) \
+ ahd_print_register(NULL, 0, "SCB_RESIDUAL_DATACNT", 0x180, regvalue, cur_col, wrap)
+#endif
+
+#if AIC_DEBUG_REGISTERS
+ahd_reg_print_t ahd_scb_sense_busaddr_print;
+#else
+#define ahd_scb_sense_busaddr_print(regvalue, cur_col, wrap) \
+ ahd_print_register(NULL, 0, "SCB_SENSE_BUSADDR", 0x18c, regvalue, cur_col, wrap)
+#endif
+
+#if AIC_DEBUG_REGISTERS
+ahd_reg_print_t ahd_scb_tag_print;
+#else
+#define ahd_scb_tag_print(regvalue, cur_col, wrap) \
+ ahd_print_register(NULL, 0, "SCB_TAG", 0x190, regvalue, cur_col, wrap)
+#endif
+
+#if AIC_DEBUG_REGISTERS
+ahd_reg_print_t ahd_scb_control_print;
+#else
+#define ahd_scb_control_print(regvalue, cur_col, wrap) \
+ ahd_print_register(NULL, 0, "SCB_CONTROL", 0x192, regvalue, cur_col, wrap)
+#endif
+
+#if AIC_DEBUG_REGISTERS
+ahd_reg_print_t ahd_scb_scsiid_print;
+#else
+#define ahd_scb_scsiid_print(regvalue, cur_col, wrap) \
+ ahd_print_register(NULL, 0, "SCB_SCSIID", 0x193, regvalue, cur_col, wrap)
+#endif
+
+#if AIC_DEBUG_REGISTERS
+ahd_reg_print_t ahd_scb_lun_print;
+#else
+#define ahd_scb_lun_print(regvalue, cur_col, wrap) \
+ ahd_print_register(NULL, 0, "SCB_LUN", 0x194, regvalue, cur_col, wrap)
+#endif
+
+#if AIC_DEBUG_REGISTERS
+ahd_reg_print_t ahd_scb_task_attribute_print;
+#else
+#define ahd_scb_task_attribute_print(regvalue, cur_col, wrap) \
+ ahd_print_register(NULL, 0, "SCB_TASK_ATTRIBUTE", 0x195, regvalue, cur_col, wrap)
+#endif
+
+#if AIC_DEBUG_REGISTERS
+ahd_reg_print_t ahd_scb_task_management_print;
+#else
+#define ahd_scb_task_management_print(regvalue, cur_col, wrap) \
+ ahd_print_register(NULL, 0, "SCB_TASK_MANAGEMENT", 0x197, regvalue, cur_col, wrap)
+#endif
+
+#if AIC_DEBUG_REGISTERS
+ahd_reg_print_t ahd_scb_dataptr_print;
+#else
+#define ahd_scb_dataptr_print(regvalue, cur_col, wrap) \
+ ahd_print_register(NULL, 0, "SCB_DATAPTR", 0x198, regvalue, cur_col, wrap)
+#endif
+
+#if AIC_DEBUG_REGISTERS
+ahd_reg_print_t ahd_scb_datacnt_print;
+#else
+#define ahd_scb_datacnt_print(regvalue, cur_col, wrap) \
+ ahd_print_register(NULL, 0, "SCB_DATACNT", 0x1a0, regvalue, cur_col, wrap)
+#endif
+
+#if AIC_DEBUG_REGISTERS
+ahd_reg_print_t ahd_scb_sgptr_print;
+#else
+#define ahd_scb_sgptr_print(regvalue, cur_col, wrap) \
+ ahd_print_register(NULL, 0, "SCB_SGPTR", 0x1a4, regvalue, cur_col, wrap)
+#endif
+
+#if AIC_DEBUG_REGISTERS
+ahd_reg_print_t ahd_scb_busaddr_print;
+#else
+#define ahd_scb_busaddr_print(regvalue, cur_col, wrap) \
+ ahd_print_register(NULL, 0, "SCB_BUSADDR", 0x1a8, regvalue, cur_col, wrap)
+#endif
+
+#if AIC_DEBUG_REGISTERS
+ahd_reg_print_t ahd_scb_next2_print;
+#else
+#define ahd_scb_next2_print(regvalue, cur_col, wrap) \
+ ahd_print_register(NULL, 0, "SCB_NEXT2", 0x1ae, regvalue, cur_col, wrap)
+#endif
+
+#if AIC_DEBUG_REGISTERS
+ahd_reg_print_t ahd_scb_disconnected_lists_print;
+#else
+#define ahd_scb_disconnected_lists_print(regvalue, cur_col, wrap) \
+ ahd_print_register(NULL, 0, "SCB_DISCONNECTED_LISTS", 0x1b8, regvalue, cur_col, wrap)
+#endif
+
+
+#define MODE_PTR 0x00
+#define DST_MODE 0x70
+#define SRC_MODE 0x07
+
+#define INTSTAT 0x01
+#define INT_PEND 0xff
+#define HWERRINT 0x80
+#define BRKADRINT 0x40
+#define SWTMINT 0x20
+#define PCIINT 0x10
+#define SCSIINT 0x08
+#define SEQINT 0x04
+#define CMDCMPLT 0x02
+#define SPLTINT 0x01
+
+#define SEQINTCODE 0x02
+#define BAD_SCB_STATUS 0x1a
+#define SAW_HWERR 0x19
+#define TRACEPOINT3 0x18
+#define TRACEPOINT2 0x17
+#define TRACEPOINT1 0x16
+#define TRACEPOINT0 0x15
+#define TASKMGMT_CMD_CMPLT_OKAY 0x14
+#define TASKMGMT_FUNC_COMPLETE 0x13
+#define ENTERING_NONPACK 0x12
+#define CFG4OVERRUN 0x11
+#define STATUS_OVERRUN 0x10
+#define CFG4ISTAT_INTR 0x0f
+#define INVALID_SEQINT 0x0e
+#define ILLEGAL_PHASE 0x0d
+#define DUMP_CARD_STATE 0x0c
+#define MISSED_BUSFREE 0x0b
+#define MKMSG_FAILED 0x0a
+#define DATA_OVERRUN 0x09
+#define BAD_STATUS 0x08
+#define HOST_MSG_LOOP 0x07
+#define PDATA_REINIT 0x06
+#define IGN_WIDE_RES 0x05
+#define NO_MATCH 0x04
+#define PROTO_VIOLATION 0x03
+#define SEND_REJECT 0x02
+#define BAD_PHASE 0x01
+#define NO_SEQINT 0x00
+
+#define CLRINT 0x03
+#define CLRHWERRINT 0x80
+#define CLRBRKADRINT 0x40
+#define CLRSWTMINT 0x20
+#define CLRPCIINT 0x10
+#define CLRSCSIINT 0x08
+#define CLRSEQINT 0x04
+#define CLRCMDINT 0x02
+#define CLRSPLTINT 0x01
+
+#define ERROR 0x04
+#define CIOPARERR 0x80
+#define CIOACCESFAIL 0x40
+#define MPARERR 0x20
+#define DPARERR 0x10
+#define SQPARERR 0x08
+#define ILLOPCODE 0x04
+#define DSCTMOUT 0x02
+
+#define CLRERR 0x04
+#define CLRCIOPARERR 0x80
+#define CLRCIOACCESFAIL 0x40
+#define CLRMPARERR 0x20
+#define CLRDPARERR 0x10
+#define CLRSQPARERR 0x08
+#define CLRILLOPCODE 0x04
+#define CLRDSCTMOUT 0x02
+
+#define HCNTRL 0x05
+#define SEQ_RESET 0x80
+#define POWRDN 0x40
+#define SWINT 0x10
+#define SWTIMER_START_B 0x08
+#define PAUSE 0x04
+#define INTEN 0x02
+#define CHIPRST 0x01
+#define CHIPRSTACK 0x01
+
+#define HNSCB_QOFF 0x06
+
+#define HESCB_QOFF 0x08
+
+#define HS_MAILBOX 0x0b
+#define HOST_TQINPOS 0x80
+#define ENINT_COALESCE 0x40
+
+#define SEQINTSTAT 0x0c
+#define SEQ_SWTMRTO 0x10
+#define SEQ_SEQINT 0x08
+#define SEQ_SCSIINT 0x04
+#define SEQ_PCIINT 0x02
+#define SEQ_SPLTINT 0x01
+
+#define CLRSEQINTSTAT 0x0c
+#define CLRSEQ_SWTMRTO 0x10
+#define CLRSEQ_SEQINT 0x08
+#define CLRSEQ_SCSIINT 0x04
+#define CLRSEQ_PCIINT 0x02
+#define CLRSEQ_SPLTINT 0x01
+
+#define SWTIMER 0x0e
+
+#define SNSCB_QOFF 0x10
+
+#define SESCB_QOFF 0x12
+
+#define SDSCB_QOFF 0x14
+
+#define QOFF_CTLSTA 0x16
+#define EMPTY_SCB_AVAIL 0x80
+#define NEW_SCB_AVAIL 0x40
+#define SDSCB_ROLLOVR 0x20
+#define HS_MAILBOX_ACT 0x10
+#define SCB_QSIZE 0x0f
+#define SCB_QSIZE_16384 0x0c
+#define SCB_QSIZE_8192 0x0b
+#define SCB_QSIZE_4096 0x0a
+#define SCB_QSIZE_2048 0x09
+#define SCB_QSIZE_1024 0x08
+#define SCB_QSIZE_512 0x07
+#define SCB_QSIZE_256 0x06
+#define SCB_QSIZE_128 0x05
+#define SCB_QSIZE_64 0x04
+#define SCB_QSIZE_32 0x03
+#define SCB_QSIZE_16 0x02
+#define SCB_QSIZE_8 0x01
+#define SCB_QSIZE_4 0x00
+
+#define INTCTL 0x18
+#define SWTMINTMASK 0x80
+#define SWTMINTEN 0x40
+#define SWTIMER_START 0x20
+#define AUTOCLRCMDINT 0x10
+#define PCIINTEN 0x08
+#define SCSIINTEN 0x04
+#define SEQINTEN 0x02
+#define SPLTINTEN 0x01
+
+#define DFCNTRL 0x19
+#define SCSIENWRDIS 0x40
+#define SCSIENACK 0x20
+#define DIRECTIONACK 0x04
+#define FIFOFLUSHACK 0x02
+#define DIRECTIONEN 0x01
+
+#define DSCOMMAND0 0x19
+#define CACHETHEN 0x80
+#define DPARCKEN 0x40
+#define MPARCKEN 0x20
+#define EXTREQLCK 0x10
+#define DISABLE_TWATE 0x02
+#define CIOPARCKEN 0x01
+
+#define DFSTATUS 0x1a
+#define PRELOAD_AVAIL 0x80
+#define PKT_PRELOAD_AVAIL 0x40
+#define MREQPEND 0x10
+#define HDONE 0x08
+#define DFTHRESH 0x04
+#define FIFOFULL 0x02
+#define FIFOEMP 0x01
+
+#define SG_CACHE_SHADOW 0x1b
+#define ODD_SEG 0x04
+#define LAST_SEG 0x02
+#define LAST_SEG_DONE 0x01
+
+#define ARBCTL 0x1b
+#define RESET_HARB 0x80
+#define RETRY_SWEN 0x08
+#define USE_TIME 0x07
+
+#define SG_CACHE_PRE 0x1b
+
+#define LQIN 0x20
+
+#define TYPEPTR 0x20
+
+#define TAGPTR 0x21
+
+#define LUNPTR 0x22
+
+#define DATALENPTR 0x23
+
+#define STATLENPTR 0x24
+
+#define CMDLENPTR 0x25
+
+#define ATTRPTR 0x26
+
+#define FLAGPTR 0x27
+
+#define CMDPTR 0x28
+
+#define QNEXTPTR 0x29
+
+#define IDPTR 0x2a
+
+#define ABRTBYTEPTR 0x2b
+
+#define ABRTBITPTR 0x2c
+
+#define MAXCMDBYTES 0x2d
+
+#define MAXCMD2RCV 0x2e
+
+#define SHORTTHRESH 0x2f
+
+#define LUNLEN 0x30
+#define TLUNLEN 0xf0
+#define ILUNLEN 0x0f
+
+#define CDBLIMIT 0x31
+
+#define MAXCMD 0x32
+
+#define MAXCMDCNT 0x33
+
+#define LQRSVD01 0x34
+
+#define LQRSVD16 0x35
+
+#define LQRSVD17 0x36
+
+#define CMDRSVD0 0x37
+
+#define LQCTL0 0x38
+#define LQITARGCLT 0xc0
+#define LQIINITGCLT 0x30
+#define LQ0TARGCLT 0x0c
+#define LQ0INITGCLT 0x03
+
+#define LQCTL1 0x38
+#define PCI2PCI 0x04
+#define SINGLECMD 0x02
+#define ABORTPENDING 0x01
+
+#define SCSBIST0 0x39
+#define GSBISTERR 0x40
+#define GSBISTDONE 0x20
+#define GSBISTRUN 0x10
+#define OSBISTERR 0x04
+#define OSBISTDONE 0x02
+#define OSBISTRUN 0x01
+
+#define LQCTL2 0x39
+#define LQIRETRY 0x80
+#define LQICONTINUE 0x40
+#define LQITOIDLE 0x20
+#define LQIPAUSE 0x10
+#define LQORETRY 0x08
+#define LQOCONTINUE 0x04
+#define LQOTOIDLE 0x02
+#define LQOPAUSE 0x01
+
+#define SCSBIST1 0x3a
+#define NTBISTERR 0x04
+#define NTBISTDONE 0x02
+#define NTBISTRUN 0x01
+
+#define SCSISEQ0 0x3a
+#define TEMODEO 0x80
+#define ENSELO 0x40
+#define ENARBO 0x20
+#define FORCEBUSFREE 0x10
+#define SCSIRSTO 0x01
+
+#define SCSISEQ1 0x3b
+
+#define SXFRCTL0 0x3c
+#define DFON 0x80
+#define DFPEXP 0x40
+#define BIOSCANCELEN 0x10
+#define SPIOEN 0x08
+
+#define DLCOUNT 0x3c
+
+#define BUSINITID 0x3c
+
+#define SXFRCTL1 0x3d
+#define BITBUCKET 0x80
+#define ENSACHK 0x40
+#define ENSPCHK 0x20
+#define STIMESEL 0x18
+#define ENSTIMER 0x04
+#define ACTNEGEN 0x02
+#define STPWEN 0x01
+
+#define BUSTARGID 0x3e
+
+#define SXFRCTL2 0x3e
+#define AUTORSTDIS 0x10
+#define CMDDMAEN 0x08
+#define ASU 0x07
+
+#define DFFSTAT 0x3f
+#define CURRFIFO 0x03
+#define FIFO1FREE 0x20
+#define FIFO0FREE 0x10
+#define CURRFIFO_NONE 0x03
+#define CURRFIFO_1 0x01
+#define CURRFIFO_0 0x00
+
+#define SCSISIGO 0x40
+#define CDO 0x80
+#define IOO 0x40
+#define MSGO 0x20
+#define ATNO 0x10
+#define SELO 0x08
+#define BSYO 0x04
+#define REQO 0x02
+#define ACKO 0x01
+
+#define MULTARGID 0x40
+
+#define SCSISIGI 0x41
+#define ATNI 0x10
+#define SELI 0x08
+#define BSYI 0x04
+#define REQI 0x02
+#define ACKI 0x01
+
+#define SCSIPHASE 0x42
+#define STATUS_PHASE 0x20
+#define COMMAND_PHASE 0x10
+#define MSG_IN_PHASE 0x08
+#define MSG_OUT_PHASE 0x04
+#define DATA_PHASE_MASK 0x03
+#define DATA_IN_PHASE 0x02
+#define DATA_OUT_PHASE 0x01
+
+#define SCSIDAT0_IMG 0x43
+
+#define SCSIDAT 0x44
+
+#define SCSIBUS 0x46
+
+#define TARGIDIN 0x48
+#define CLKOUT 0x80
+#define TARGID 0x0f
+
+#define SELID 0x49
+#define SELID_MASK 0xf0
+#define ONEBIT 0x08
+
+#define OPTIONMODE 0x4a
+#define OPTIONMODE_DEFAULTS 0x02
+#define BIOSCANCTL 0x80
+#define AUTOACKEN 0x40
+#define BIASCANCTL 0x20
+#define BUSFREEREV 0x10
+#define ENDGFORMCHK 0x04
+#define AUTO_MSGOUT_DE 0x02
+
+#define SBLKCTL 0x4a
+#define DIAGLEDEN 0x80
+#define DIAGLEDON 0x40
+#define ENAB40 0x08
+#define ENAB20 0x04
+#define SELWIDE 0x02
+
+#define CLRSINT0 0x4b
+#define CLRSELDO 0x40
+#define CLRSELDI 0x20
+#define CLRSELINGO 0x10
+#define CLRIOERR 0x08
+#define CLROVERRUN 0x04
+#define CLRSPIORDY 0x02
+#define CLRARBDO 0x01
+
+#define SSTAT0 0x4b
+#define TARGET 0x80
+#define SELDO 0x40
+#define SELDI 0x20
+#define SELINGO 0x10
+#define IOERR 0x08
+#define OVERRUN 0x04
+#define SPIORDY 0x02
+#define ARBDO 0x01
+
+#define SIMODE0 0x4b
+#define ENSELDO 0x40
+#define ENSELDI 0x20
+#define ENSELINGO 0x10
+#define ENIOERR 0x08
+#define ENOVERRUN 0x04
+#define ENSPIORDY 0x02
+#define ENARBDO 0x01
+
+#define CLRSINT1 0x4c
+#define CLRSELTIMEO 0x80
+#define CLRATNO 0x40
+#define CLRSCSIRSTI 0x20
+#define CLRBUSFREE 0x08
+#define CLRSCSIPERR 0x04
+#define CLRSTRB2FAST 0x02
+#define CLRREQINIT 0x01
+
+#define SSTAT1 0x4c
+#define SELTO 0x80
+#define ATNTARG 0x40
+#define SCSIRSTI 0x20
+#define PHASEMIS 0x10
+#define BUSFREE 0x08
+#define SCSIPERR 0x04
+#define STRB2FAST 0x02
+#define REQINIT 0x01
+
+#define SSTAT2 0x4d
+#define BUSFREETIME 0xc0
+#define NONPACKREQ 0x20
+#define EXP_ACTIVE 0x10
+#define BSYX 0x08
+#define WIDE_RES 0x04
+#define SDONE 0x02
+#define DMADONE 0x01
+#define BUSFREE_DFF1 0xc0
+#define BUSFREE_DFF0 0x80
+#define BUSFREE_LQO 0x40
+
+#define SIMODE2 0x4d
+#define ENWIDE_RES 0x04
+#define ENSDONE 0x02
+#define ENDMADONE 0x01
+
+#define CLRSINT2 0x4d
+#define CLRNONPACKREQ 0x20
+#define CLRWIDE_RES 0x04
+#define CLRSDONE 0x02
+#define CLRDMADONE 0x01
+
+#define PERRDIAG 0x4e
+#define HIZERO 0x80
+#define HIPERR 0x40
+#define PREVPHASE 0x20
+#define PARITYERR 0x10
+#define AIPERR 0x08
+#define CRCERR 0x04
+#define DGFORMERR 0x02
+#define DTERR 0x01
+
+#define LQISTATE 0x4e
+
+#define SOFFCNT 0x4f
+
+#define LQOSTATE 0x4f
+
+#define LQISTAT0 0x50
+#define LQIATNQAS 0x20
+#define LQICRCT1 0x10
+#define LQICRCT2 0x08
+#define LQIBADLQT 0x04
+#define LQIATNLQ 0x02
+#define LQIATNCMD 0x01
+
+#define CLRLQIINT0 0x50
+#define CLRLQIATNQAS 0x20
+#define CLRLQICRCT1 0x10
+#define CLRLQICRCT2 0x08
+#define CLRLQIBADLQT 0x04
+#define CLRLQIATNLQ 0x02
+#define CLRLQIATNCMD 0x01
+
+#define LQIMODE0 0x50
+#define ENLQIATNQASK 0x20
+#define ENLQICRCT1 0x10
+#define ENLQICRCT2 0x08
+#define ENLQIBADLQT 0x04
+#define ENLQIATNLQ 0x02
+#define ENLQIATNCMD 0x01
+
+#define LQIMODE1 0x51
+#define ENLQIPHASE_LQ 0x80
+#define ENLQIPHASE_NLQ 0x40
+#define ENLIQABORT 0x20
+#define ENLQICRCI_LQ 0x10
+#define ENLQICRCI_NLQ 0x08
+#define ENLQIBADLQI 0x04
+#define ENLQIOVERI_LQ 0x02
+#define ENLQIOVERI_NLQ 0x01
+
+#define LQISTAT1 0x51
+#define LQIPHASE_LQ 0x80
+#define LQIPHASE_NLQ 0x40
+#define LQIABORT 0x20
+#define LQICRCI_LQ 0x10
+#define LQICRCI_NLQ 0x08
+#define LQIBADLQI 0x04
+#define LQIOVERI_LQ 0x02
+#define LQIOVERI_NLQ 0x01
+
+#define CLRLQIINT1 0x51
+#define CLRLQIPHASE_LQ 0x80
+#define CLRLQIPHASE_NLQ 0x40
+#define CLRLIQABORT 0x20
+#define CLRLQICRCI_LQ 0x10
+#define CLRLQICRCI_NLQ 0x08
+#define CLRLQIBADLQI 0x04
+#define CLRLQIOVERI_LQ 0x02
+#define CLRLQIOVERI_NLQ 0x01
+
+#define LQISTAT2 0x52
+#define PACKETIZED 0x80
+#define LQIPHASE_OUTPKT 0x40
+#define LQIWORKONLQ 0x20
+#define LQIWAITFIFO 0x10
+#define LQISTOPPKT 0x08
+#define LQISTOPLQ 0x04
+#define LQISTOPCMD 0x02
+#define LQIGSAVAIL 0x01
+
+#define SSTAT3 0x53
+#define NTRAMPERR 0x02
+#define OSRAMPERR 0x01
+
+#define SIMODE3 0x53
+#define ENNTRAMPERR 0x02
+#define ENOSRAMPERR 0x01
+
+#define CLRSINT3 0x53
+#define CLRNTRAMPERR 0x02
+#define CLROSRAMPERR 0x01
+
+#define LQOSTAT0 0x54
+#define LQOTARGSCBPERR 0x10
+#define LQOSTOPT2 0x08
+#define LQOATNLQ 0x04
+#define LQOATNPKT 0x02
+#define LQOTCRC 0x01
+
+#define CLRLQOINT0 0x54
+#define CLRLQOTARGSCBPERR 0x10
+#define CLRLQOSTOPT2 0x08
+#define CLRLQOATNLQ 0x04
+#define CLRLQOATNPKT 0x02
+#define CLRLQOTCRC 0x01
+
+#define LQOMODE0 0x54
+#define ENLQOTARGSCBPERR 0x10
+#define ENLQOSTOPT2 0x08
+#define ENLQOATNLQ 0x04
+#define ENLQOATNPKT 0x02
+#define ENLQOTCRC 0x01
+
+#define LQOMODE1 0x55
+#define ENLQOINITSCBPERR 0x10
+#define ENLQOSTOPI2 0x08
+#define ENLQOBADQAS 0x04
+#define ENLQOBUSFREE 0x02
+#define ENLQOPHACHGINPKT 0x01
+
+#define LQOSTAT1 0x55
+#define LQOINITSCBPERR 0x10
+#define LQOSTOPI2 0x08
+#define LQOBADQAS 0x04
+#define LQOBUSFREE 0x02
+#define LQOPHACHGINPKT 0x01
+
+#define CLRLQOINT1 0x55
+#define CLRLQOINITSCBPERR 0x10
+#define CLRLQOSTOPI2 0x08
+#define CLRLQOBADQAS 0x04
+#define CLRLQOBUSFREE 0x02
+#define CLRLQOPHACHGINPKT 0x01
+
+#define LQOSTAT2 0x56
+#define LQOPKT 0xe0
+#define LQOWAITFIFO 0x10
+#define LQOPHACHGOUTPKT 0x02
+#define LQOSTOP0 0x01
+
+#define OS_SPACE_CNT 0x56
+
+#define SIMODE1 0x57
+#define ENSELTIMO 0x80
+#define ENATNTARG 0x40
+#define ENSCSIRST 0x20
+#define ENPHASEMIS 0x10
+#define ENBUSFREE 0x08
+#define ENSCSIPERR 0x04
+#define ENSTRB2FAST 0x02
+#define ENREQINIT 0x01
+
+#define GSFIFO 0x58
+
+#define DFFSXFRCTL 0x5a
+#define DFFBITBUCKET 0x08
+#define CLRSHCNT 0x04
+#define CLRCHN 0x02
+#define RSTCHN 0x01
+
+#define LQOSCSCTL 0x5a
+#define LQOH2A_VERSION 0x80
+#define LQOBUSETDLY 0x40
+#define LQONOHOLDLACK 0x02
+#define LQONOCHKOVER 0x01
+
+#define NEXTSCB 0x5a
+
+#define CLRSEQINTSRC 0x5b
+#define CLRCTXTDONE 0x40
+#define CLRSAVEPTRS 0x20
+#define CLRCFG4DATA 0x10
+#define CLRCFG4ISTAT 0x08
+#define CLRCFG4TSTAT 0x04
+#define CLRCFG4ICMD 0x02
+#define CLRCFG4TCMD 0x01
+
+#define SEQINTSRC 0x5b
+#define CTXTDONE 0x40
+#define SAVEPTRS 0x20
+#define CFG4DATA 0x10
+#define CFG4ISTAT 0x08
+#define CFG4TSTAT 0x04
+#define CFG4ICMD 0x02
+#define CFG4TCMD 0x01
+
+#define CURRSCB 0x5c
+
+#define SEQIMODE 0x5c
+#define ENCTXTDONE 0x40
+#define ENSAVEPTRS 0x20
+#define ENCFG4DATA 0x10
+#define ENCFG4ISTAT 0x08
+#define ENCFG4TSTAT 0x04
+#define ENCFG4ICMD 0x02
+#define ENCFG4TCMD 0x01
+
+#define MDFFSTAT 0x5d
+#define SHCNTNEGATIVE 0x40
+#define SHCNTMINUS1 0x20
+#define LASTSDONE 0x10
+#define SHVALID 0x08
+#define DLZERO 0x04
+#define DATAINFIFO 0x02
+#define FIFOFREE 0x01
+
+#define CRCCONTROL 0x5d
+#define CRCVALCHKEN 0x40
+
+#define DFFTAG 0x5e
+
+#define LASTSCB 0x5e
+
+#define SCSITEST 0x5e
+#define CNTRTEST 0x08
+#define SEL_TXPLL_DEBUG 0x04
+
+#define IOPDNCTL 0x5f
+#define DISABLE_OE 0x80
+#define PDN_IDIST 0x04
+#define PDN_DIFFSENSE 0x01
+
+#define SHADDR 0x60
+
+#define NEGOADDR 0x60
+
+#define DGRPCRCI 0x60
+
+#define NEGPERIOD 0x61
+
+#define PACKCRCI 0x62
+
+#define NEGOFFSET 0x62
+
+#define NEGPPROPTS 0x63
+#define PPROPT_PACE 0x08
+#define PPROPT_QAS 0x04
+#define PPROPT_DT 0x02
+#define PPROPT_IUT 0x01
+
+#define NEGCONOPTS 0x64
+#define ENSNAPSHOT 0x40
+#define RTI_WRTDIS 0x20
+#define RTI_OVRDTRN 0x10
+#define ENSLOWCRC 0x08
+#define ENAUTOATNI 0x04
+#define ENAUTOATNO 0x02
+#define WIDEXFER 0x01
+
+#define ANNEXCOL 0x65
+
+#define ANNEXDAT 0x66
+
+#define SCSCHKN 0x66
+#define BIDICHKDIS 0x80
+#define STSELSKIDDIS 0x40
+#define CURRFIFODEF 0x20
+#define WIDERESEN 0x10
+#define SDONEMSKDIS 0x08
+#define DFFACTCLR 0x04
+#define SHVALIDSTDIS 0x02
+#define LSTSGCLRDIS 0x01
+
+#define IOWNID 0x67
+
+#define PLL960CTL0 0x68
+
+#define SHCNT 0x68
+
+#define TOWNID 0x69
+
+#define PLL960CTL1 0x69
+
+#define PLL960CNT0 0x6a
+
+#define XSIG 0x6a
+
+#define SELOID 0x6b
+
+#define PLL400CTL0 0x6c
+#define PLL_VCOSEL 0x80
+#define PLL_PWDN 0x40
+#define PLL_NS 0x30
+#define PLL_ENLUD 0x08
+#define PLL_ENLPF 0x04
+#define PLL_DLPF 0x02
+#define PLL_ENFBM 0x01
+
+#define FAIRNESS 0x6c
+
+#define PLL400CTL1 0x6d
+#define PLL_CNTEN 0x80
+#define PLL_CNTCLR 0x40
+#define PLL_RST 0x01
+
+#define UNFAIRNESS 0x6e
+
+#define PLL400CNT0 0x6e
+
+#define HADDR 0x70
+
+#define PLLDELAY 0x70
+#define SPLIT_DROP_REQ 0x80
+
+#define HODMAADR 0x70
+
+#define HODMACNT 0x78
+
+#define HCNT 0x78
+
+#define HODMAEN 0x7a
+
+#define SCBHADDR 0x7c
+
+#define SGHADDR 0x7c
+
+#define SCBHCNT 0x84
+
+#define SGHCNT 0x84
+
+#define DFF_THRSH 0x88
+#define WR_DFTHRSH 0x70
+#define RD_DFTHRSH 0x07
+#define WR_DFTHRSH_MAX 0x70
+#define WR_DFTHRSH_90 0x60
+#define WR_DFTHRSH_85 0x50
+#define WR_DFTHRSH_75 0x40
+#define WR_DFTHRSH_63 0x30
+#define WR_DFTHRSH_50 0x20
+#define WR_DFTHRSH_25 0x10
+#define RD_DFTHRSH_MAX 0x07
+#define RD_DFTHRSH_90 0x06
+#define RD_DFTHRSH_85 0x05
+#define RD_DFTHRSH_75 0x04
+#define RD_DFTHRSH_63 0x03
+#define RD_DFTHRSH_50 0x02
+#define RD_DFTHRSH_25 0x01
+#define RD_DFTHRSH_MIN 0x00
+#define WR_DFTHRSH_MIN 0x00
+
+#define ROMADDR 0x8a
+
+#define ROMCNTRL 0x8d
+#define ROMOP 0xe0
+#define ROMSPD 0x18
+#define REPEAT 0x02
+#define RDY 0x01
+
+#define ROMDATA 0x8e
+
+#define CMCRXMSG0 0x90
+
+#define ROENABLE 0x90
+#define MSIROEN 0x20
+#define OVLYROEN 0x10
+#define CMCROEN 0x08
+#define SGROEN 0x04
+#define DCH1ROEN 0x02
+#define DCH0ROEN 0x01
+
+#define OVLYRXMSG0 0x90
+
+#define DCHRXMSG0 0x90
+
+#define OVLYRXMSG1 0x91
+
+#define NSENABLE 0x91
+#define MSINSEN 0x20
+#define OVLYNSEN 0x10
+#define CMCNSEN 0x08
+#define SGNSEN 0x04
+#define DCH1NSEN 0x02
+#define DCH0NSEN 0x01
+
+#define CMCRXMSG1 0x91
+
+#define DCHRXMSG1 0x91
+
+#define DCHRXMSG2 0x92
+
+#define CMCRXMSG2 0x92
+
+#define OST 0x92
+
+#define OVLYRXMSG2 0x92
+
+#define DCHRXMSG3 0x93
+
+#define OVLYRXMSG3 0x93
+
+#define CMCRXMSG3 0x93
+
+#define PCIXCTL 0x93
+#define SERRPULSE 0x80
+#define UNEXPSCIEN 0x20
+#define SPLTSMADIS 0x10
+#define SPLTSTADIS 0x08
+#define SRSPDPEEN 0x04
+#define TSCSERREN 0x02
+#define CMPABCDIS 0x01
+
+#define OVLYSEQBCNT 0x94
+
+#define DCHSEQBCNT 0x94
+
+#define CMCSEQBCNT 0x94
+
+#define CMCSPLTSTAT0 0x96
+
+#define DCHSPLTSTAT0 0x96
+
+#define OVLYSPLTSTAT0 0x96
+
+#define CMCSPLTSTAT1 0x97
+
+#define OVLYSPLTSTAT1 0x97
+
+#define DCHSPLTSTAT1 0x97
+
+#define SGRXMSG0 0x98
+#define CDNUM 0xf8
+#define CFNUM 0x07
+
+#define SLVSPLTOUTADR0 0x98
+#define LOWER_ADDR 0x7f
+
+#define SGRXMSG1 0x99
+#define CBNUM 0xff
+
+#define SLVSPLTOUTADR1 0x99
+#define REQ_DNUM 0xf8
+#define REQ_FNUM 0x07
+
+#define SGRXMSG2 0x9a
+#define MINDEX 0xff
+
+#define SLVSPLTOUTADR2 0x9a
+#define REQ_BNUM 0xff
+
+#define SGRXMSG3 0x9b
+#define MCLASS 0x0f
+
+#define SLVSPLTOUTADR3 0x9b
+#define TAG_NUM 0x1f
+#define RLXORD 0x10
+
+#define SGSEQBCNT 0x9c
+
+#define SLVSPLTOUTATTR0 0x9c
+#define LOWER_BCNT 0xff
+
+#define SLVSPLTOUTATTR1 0x9d
+#define CMPLT_DNUM 0xf8
+#define CMPLT_FNUM 0x07
+
+#define SLVSPLTOUTATTR2 0x9e
+#define CMPLT_BNUM 0xff
+
+#define SGSPLTSTAT0 0x9e
+#define STAETERM 0x80
+#define SCBCERR 0x40
+#define SCADERR 0x20
+#define SCDATBUCKET 0x10
+#define CNTNOTCMPLT 0x08
+#define RXOVRUN 0x04
+#define RXSCEMSG 0x02
+#define RXSPLTRSP 0x01
+
+#define SGSPLTSTAT1 0x9f
+#define RXDATABUCKET 0x01
+
+#define SFUNCT 0x9f
+#define TEST_GROUP 0xf0
+#define TEST_NUM 0x0f
+
+#define DF0PCISTAT 0xa0
+
+#define REG0 0xa0
+
+#define DF1PCISTAT 0xa1
+
+#define SGPCISTAT 0xa2
+
+#define REG1 0xa2
+
+#define CMCPCISTAT 0xa3
+
+#define OVLYPCISTAT 0xa4
+#define SCAAPERR 0x08
+#define RDPERR 0x04
+
+#define REG_ISR 0xa4
+
+#define SG_STATE 0xa6
+#define FETCH_INPROG 0x04
+#define LOADING_NEEDED 0x02
+#define SEGS_AVAIL 0x01
+
+#define MSIPCISTAT 0xa6
+#define RMA 0x20
+#define RTA 0x10
+#define CLRPENDMSI 0x08
+#define DPR 0x01
+
+#define TARGPCISTAT 0xa7
+#define DPE 0x80
+#define SSE 0x40
+#define STA 0x08
+#define TWATERR 0x02
+
+#define DATA_COUNT_ODD 0xa7
+
+#define SCBPTR 0xa8
+
+#define CCSCBACNT 0xab
+
+#define SCBAUTOPTR 0xab
+#define AUSCBPTR_EN 0x80
+#define SCBPTR_ADDR 0x38
+#define SCBPTR_OFF 0x07
+
+#define CCSGADDR 0xac
+
+#define CCSCBADR_BK 0xac
+
+#define CCSCBADDR 0xac
+
+#define CMC_RAMBIST 0xad
+#define SG_ELEMENT_SIZE 0x80
+#define SCBRAMBIST_FAIL 0x40
+#define SG_BIST_FAIL 0x20
+#define SG_BIST_EN 0x10
+#define CMC_BUFFER_BIST_FAIL 0x02
+#define CMC_BUFFER_BIST_EN 0x01
+
+#define CCSCBCTL 0xad
+#define CCSCBDONE 0x80
+#define ARRDONE 0x40
+#define CCARREN 0x10
+#define CCSCBEN 0x08
+#define CCSCBDIR 0x04
+#define CCSCBRESET 0x01
+
+#define CCSGCTL 0xad
+#define CCSGEN 0x0c
+#define CCSGDONE 0x80
+#define SG_CACHE_AVAIL 0x10
+#define CCSGENACK 0x08
+#define SG_FETCH_REQ 0x02
+#define CCSGRESET 0x01
+
+#define CCSGRAM 0xb0
+
+#define FLEXADR 0xb0
+
+#define CCSCBRAM 0xb0
+
+#define FLEXCNT 0xb3
+
+#define FLEXDMASTAT 0xb5
+#define FLEXDMAERR 0x02
+#define FLEXDMADONE 0x01
+
+#define FLEXDATA 0xb6
+
+#define BRDDAT 0xb8
+
+#define BRDCTL 0xb9
+#define FLXARBACK 0x80
+#define FLXARBREQ 0x40
+#define BRDADDR 0x38
+#define BRDEN 0x04
+#define BRDRW 0x02
+#define BRDSTB 0x01
+
+#define SEEADR 0xba
+
+#define SEEDAT 0xbc
+
+#define SEECTL 0xbe
+#define SEEOP_WALL 0x40
+#define SEEOP_EWEN 0x40
+#define SEEOP_EWDS 0x40
+#define SEEOPCODE 0x70
+#define SEERST 0x02
+#define SEESTART 0x01
+#define SEEOP_ERASE 0x70
+#define SEEOP_READ 0x60
+#define SEEOP_WRITE 0x50
+#define SEEOP_ERAL 0x40
+
+#define SEESTAT 0xbe
+#define INIT_DONE 0x80
+#define LDALTID_L 0x08
+#define SEEARBACK 0x04
+#define SEEBUSY 0x02
+
+#define SCBCNT 0xbf
+
+#define DFWADDR 0xc0
+
+#define DSPFLTRCTL 0xc0
+#define FLTRDISABLE 0x20
+#define EDGESENSE 0x10
+#define DSPFCNTSEL 0x0f
+
+#define DSPDATACTL 0xc1
+#define BYPASSENAB 0x80
+#define DESQDIS 0x10
+#define RCVROFFSTDIS 0x04
+#define XMITOFFSTDIS 0x02
+
+#define DFRADDR 0xc2
+
+#define DSPREQCTL 0xc2
+#define MANREQCTL 0xc0
+#define MANREQDLY 0x3f
+
+#define DSPACKCTL 0xc3
+#define MANACKCTL 0xc0
+#define MANACKDLY 0x3f
+
+#define DFDAT 0xc4
+
+#define DSPSELECT 0xc4
+#define AUTOINCEN 0x80
+#define DSPSEL 0x1f
+
+#define WRTBIASCTL 0xc5
+#define AUTOXBCDIS 0x80
+#define XMITMANVAL 0x3f
+
+#define RCVRBIOSCTL 0xc6
+#define AUTORBCDIS 0x80
+#define RCVRMANVAL 0x3f
+
+#define WRTBIASCALC 0xc7
+
+#define RCVRBIASCALC 0xc8
+
+#define DFPTRS 0xc8
+
+#define SKEWCALC 0xc9
+
+#define DFBKPTR 0xc9
+
+#define DFDBCTL 0xcb
+#define DFF_CIO_WR_RDY 0x20
+#define DFF_CIO_RD_RDY 0x10
+#define DFF_DIR_ERR 0x08
+#define DFF_RAMBIST_FAIL 0x04
+#define DFF_RAMBIST_DONE 0x02
+#define DFF_RAMBIST_EN 0x01
+
+#define DFSCNT 0xcc
+
+#define DFBCNT 0xce
+
+#define OVLYADDR 0xd4
+
+#define SEQCTL0 0xd6
+#define PERRORDIS 0x80
+#define PAUSEDIS 0x40
+#define FAILDIS 0x20
+#define FASTMODE 0x10
+#define BRKADRINTEN 0x08
+#define STEP 0x04
+#define SEQRESET 0x02
+#define LOADRAM 0x01
+
+#define SEQCTL1 0xd7
+#define OVRLAY_DATA_CHK 0x08
+#define RAMBIST_DONE 0x04
+#define RAMBIST_FAIL 0x02
+#define RAMBIST_EN 0x01
+
+#define FLAGS 0xd8
+#define ZERO 0x02
+#define CARRY 0x01
+
+#define SEQINTCTL 0xd9
+#define INTVEC1DSL 0x80
+#define INT1_CONTEXT 0x20
+#define SCS_SEQ_INT1M1 0x10
+#define SCS_SEQ_INT1M0 0x08
+#define INTMASK2 0x04
+#define INTMASK1 0x02
+#define IRET 0x01
+
+#define SEQRAM 0xda
+
+#define PRGMCNT 0xde
+
+#define ACCUM 0xe0
+
+#define SINDEX 0xe2
+
+#define DINDEX 0xe4
+
+#define BRKADDR0 0xe6
+
+#define BRKADDR1 0xe6
+#define BRKDIS 0x80
+
+#define ALLONES 0xe8
+
+#define ALLZEROS 0xea
+
+#define NONE 0xea
+
+#define SINDIR 0xec
+
+#define DINDIR 0xed
+
+#define FUNCTION1 0xf0
+
+#define STACK 0xf2
+
+#define INTVEC1_ADDR 0xf4
+
+#define CURADDR 0xf4
+
+#define LASTADDR 0xf6
+
+#define INTVEC2_ADDR 0xf6
+
+#define LONGJMP_ADDR 0xf8
+
+#define ACCUM_SAVE 0xfa
+
+#define WAITING_SCB_TAILS 0x100
+
+#define AHD_PCI_CONFIG_BASE 0x100
+
+#define SRAM_BASE 0x100
+
+#define WAITING_TID_HEAD 0x120
+
+#define WAITING_TID_TAIL 0x122
+
+#define NEXT_QUEUED_SCB_ADDR 0x124
+
+#define COMPLETE_SCB_HEAD 0x128
+
+#define COMPLETE_SCB_DMAINPROG_HEAD 0x12a
+
+#define COMPLETE_DMA_SCB_HEAD 0x12c
+
+#define COMPLETE_DMA_SCB_TAIL 0x12e
+
+#define COMPLETE_ON_QFREEZE_HEAD 0x130
+
+#define QFREEZE_COUNT 0x132
+
+#define KERNEL_QFREEZE_COUNT 0x134
+
+#define SAVED_MODE 0x136
+
+#define MSG_OUT 0x137
+
+#define DMAPARAMS 0x138
+#define PRELOADEN 0x80
+#define WIDEODD 0x40
+#define SCSIEN 0x20
+#define SDMAEN 0x10
+#define SDMAENACK 0x10
+#define HDMAEN 0x08
+#define HDMAENACK 0x08
+#define DIRECTION 0x04
+#define FIFOFLUSH 0x02
+#define FIFORESET 0x01
+
+#define SEQ_FLAGS 0x139
+#define NOT_IDENTIFIED 0x80
+#define NO_CDB_SENT 0x40
+#define TARGET_CMD_IS_TAGGED 0x40
+#define DPHASE 0x20
+#define TARG_CMD_PENDING 0x10
+#define CMDPHASE_PENDING 0x08
+#define DPHASE_PENDING 0x04
+#define SPHASE_PENDING 0x02
+#define NO_DISCONNECT 0x01
+
+#define SAVED_SCSIID 0x13a
+
+#define SAVED_LUN 0x13b
+
+#define LASTPHASE 0x13c
+#define PHASE_MASK 0xe0
+#define CDI 0x80
+#define IOI 0x40
+#define MSGI 0x20
+#define P_BUSFREE 0x01
+#define P_MESGIN 0xe0
+#define P_STATUS 0xc0
+#define P_MESGOUT 0xa0
+#define P_COMMAND 0x80
+#define P_DATAIN_DT 0x60
+#define P_DATAIN 0x40
+#define P_DATAOUT_DT 0x20
+#define P_DATAOUT 0x00
+
+#define QOUTFIFO_ENTRY_VALID_TAG 0x13d
+
+#define KERNEL_TQINPOS 0x13e
+
+#define TQINPOS 0x13f
+
+#define SHARED_DATA_ADDR 0x140
+
+#define QOUTFIFO_NEXT_ADDR 0x144
+
+#define ARG_1 0x148
+#define RETURN_1 0x148
+#define SEND_MSG 0x80
+#define SEND_SENSE 0x40
+#define SEND_REJ 0x20
+#define MSGOUT_PHASEMIS 0x10
+#define EXIT_MSG_LOOP 0x08
+#define CONT_MSG_LOOP_WRITE 0x04
+#define CONT_MSG_LOOP_READ 0x03
+#define CONT_MSG_LOOP_TARG 0x02
+
+#define ARG_2 0x149
+#define RETURN_2 0x149
+
+#define LAST_MSG 0x14a
+
+#define SCSISEQ_TEMPLATE 0x14b
+#define MANUALCTL 0x40
+#define ENSELI 0x20
+#define ENRSELI 0x10
+#define MANUALP 0x0c
+#define ENAUTOATNP 0x02
+#define ALTSTIM 0x01
+
+#define INITIATOR_TAG 0x14c
+
+#define SEQ_FLAGS2 0x14d
+#define SELECTOUT_QFROZEN 0x04
+#define TARGET_MSG_PENDING 0x02
+#define PENDING_MK_MESSAGE 0x01
+
+#define ALLOCFIFO_SCBPTR 0x14e
+
+#define INT_COALESCING_TIMER 0x150
+
+#define INT_COALESCING_MAXCMDS 0x152
+
+#define INT_COALESCING_MINCMDS 0x153
+
+#define CMDS_PENDING 0x154
+
+#define INT_COALESCING_CMDCOUNT 0x156
+
+#define LOCAL_HS_MAILBOX 0x157
+
+#define CMDSIZE_TABLE 0x158
+
+#define MK_MESSAGE_SCB 0x160
+
+#define MK_MESSAGE_SCSIID 0x162
+
+#define SCB_BASE 0x180
+
+#define SCB_RESIDUAL_DATACNT 0x180
+#define SCB_CDB_STORE 0x180
+#define SCB_HOST_CDB_PTR 0x180
+
+#define SCB_RESIDUAL_SGPTR 0x184
+#define SG_ADDR_MASK 0xf8
+#define SG_OVERRUN_RESID 0x02
+
+#define SCB_SCSI_STATUS 0x188
+#define SCB_HOST_CDB_LEN 0x188
+
+#define SCB_TARGET_PHASES 0x189
+
+#define SCB_TARGET_DATA_DIR 0x18a
+
+#define SCB_TARGET_ITAG 0x18b
+
+#define SCB_SENSE_BUSADDR 0x18c
+#define SCB_NEXT_COMPLETE 0x18c
+
+#define SCB_TAG 0x190
+#define SCB_FIFO_USE_COUNT 0x190
+
+#define SCB_CONTROL 0x192
+#define TARGET_SCB 0x80
+#define DISCENB 0x40
+#define TAG_ENB 0x20
+#define MK_MESSAGE 0x10
+#define STATUS_RCVD 0x08
+#define DISCONNECTED 0x04
+#define SCB_TAG_TYPE 0x03
+
+#define SCB_SCSIID 0x193
+#define TID 0xf0
+#define OID 0x0f
+
+#define SCB_LUN 0x194
+#define LID 0xff
+
+#define SCB_TASK_ATTRIBUTE 0x195
+#define SCB_XFERLEN_ODD 0x01
+
+#define SCB_CDB_LEN 0x196
+#define SCB_CDB_LEN_PTR 0x80
+
+#define SCB_TASK_MANAGEMENT 0x197
+
+#define SCB_DATAPTR 0x198
+
+#define SCB_DATACNT 0x1a0
+#define SG_LAST_SEG 0x80
+#define SG_HIGH_ADDR_BITS 0x7f
+
+#define SCB_SGPTR 0x1a4
+#define SG_STATUS_VALID 0x04
+#define SG_FULL_RESID 0x02
+#define SG_LIST_NULL 0x01
+
+#define SCB_BUSADDR 0x1a8
+
+#define SCB_NEXT 0x1ac
+#define SCB_NEXT_SCB_BUSADDR 0x1ac
+
+#define SCB_NEXT2 0x1ae
+
+#define SCB_SPARE 0x1b0
+#define SCB_PKT_LUN 0x1b0
+
+#define SCB_DISCONNECTED_LISTS 0x1b8
+
+
+#define AHD_TIMER_MAX_US 0x18ffe7
+#define AHD_TIMER_MAX_TICKS 0xffff
+#define AHD_SENSE_BUFSIZE 0x100
+#define BUS_8_BIT 0x00
+#define TARGET_CMD_CMPLT 0xfe
+#define SEEOP_WRAL_ADDR 0x40
+#define AHD_AMPLITUDE_DEF 0x07
+#define AHD_PRECOMP_CUTBACK_37 0x07
+#define AHD_PRECOMP_SHIFT 0x00
+#define AHD_ANNEXCOL_PRECOMP_SLEW 0x04
+#define AHD_TIMER_US_PER_TICK 0x19
+#define SCB_TRANSFER_SIZE_FULL_LUN 0x38
+#define STATUS_QUEUE_FULL 0x28
+#define STATUS_BUSY 0x08
+#define MAX_OFFSET_NON_PACED 0x7f
+#define MAX_OFFSET_PACED 0xfe
+#define BUS_32_BIT 0x02
+#define CCSGADDR_MAX 0x80
+#define TID_SHIFT 0x04
+#define MK_MESSAGE_BIT_OFFSET 0x04
+#define WRTBIASCTL_HP_DEFAULT 0x00
+#define SEEOP_EWDS_ADDR 0x00
+#define AHD_AMPLITUDE_SHIFT 0x00
+#define AHD_AMPLITUDE_MASK 0x07
+#define AHD_ANNEXCOL_AMPLITUDE 0x06
+#define AHD_SLEWRATE_DEF_REVA 0x08
+#define AHD_SLEWRATE_SHIFT 0x03
+#define AHD_SLEWRATE_MASK 0x78
+#define AHD_PRECOMP_CUTBACK_29 0x06
+#define AHD_NUM_PER_DEV_ANNEXCOLS 0x04
+#define B_CURRFIFO_0 0x02
+#define LUNLEN_SINGLE_LEVEL_LUN 0x0f
+#define NVRAM_SCB_OFFSET 0x2c
+#define STATUS_PKT_SENSE 0xff
+#define CMD_GROUP_CODE_SHIFT 0x05
+#define MAX_OFFSET_PACED_BUG 0x7f
+#define STIMESEL_BUG_ADJ 0x08
+#define STIMESEL_MIN 0x18
+#define STIMESEL_SHIFT 0x03
+#define CCSGRAM_MAXSEGS 0x10
+#define INVALID_ADDR 0x80
+#define SEEOP_ERAL_ADDR 0x80
+#define AHD_SLEWRATE_DEF_REVB 0x08
+#define AHD_PRECOMP_CUTBACK_17 0x04
+#define AHD_PRECOMP_MASK 0x07
+#define SRC_MODE_SHIFT 0x00
+#define PKT_OVERRUN_BUFSIZE 0x200
+#define SCB_TRANSFER_SIZE_1BYTE_LUN 0x30
+#define TARGET_DATA_IN 0x01
+#define HOST_MSG 0xff
+#define MAX_OFFSET 0xfe
+#define BUS_16_BIT 0x01
+#define CCSCBADDR_MAX 0x80
+#define NUMDSPS 0x14
+#define SEEOP_EWEN_ADDR 0xc0
+#define AHD_ANNEXCOL_PER_DEV0 0x04
+#define DST_MODE_SHIFT 0x04
+
+
+/* Downloaded Constant Definitions */
+#define CACHELINE_MASK 0x07
+#define SCB_TRANSFER_SIZE 0x06
+#define PKT_OVERRUN_BUFOFFSET 0x05
+#define SG_SIZEOF 0x04
+#define SG_PREFETCH_ADDR_MASK 0x03
+#define SG_PREFETCH_ALIGN_MASK 0x02
+#define SG_PREFETCH_CNT_LIMIT 0x01
+#define SG_PREFETCH_CNT 0x00
+#define DOWNLOAD_CONST_COUNT 0x08
+
+
+/* Exported Labels */
+#define LABEL_seq_isr 0x28f
+#define LABEL_timer_isr 0x28b
diff --git a/drivers/scsi/aic7xxx/aic79xx_reg_print.c_shipped b/drivers/scsi/aic7xxx/aic79xx_reg_print.c_shipped
new file mode 100644
index 000000000..f5ea715d6
--- /dev/null
+++ b/drivers/scsi/aic7xxx/aic79xx_reg_print.c_shipped
@@ -0,0 +1,745 @@
+/*
+ * DO NOT EDIT - This file is automatically generated
+ * from the following source files:
+ *
+ * $Id: //depot/aic7xxx/aic7xxx/aic79xx.seq#120 $
+ * $Id: //depot/aic7xxx/aic7xxx/aic79xx.reg#77 $
+ */
+
+#include "aic79xx_osm.h"
+
+static const ahd_reg_parse_entry_t INTSTAT_parse_table[] = {
+ { "SPLTINT", 0x01, 0x01 },
+ { "CMDCMPLT", 0x02, 0x02 },
+ { "SEQINT", 0x04, 0x04 },
+ { "SCSIINT", 0x08, 0x08 },
+ { "PCIINT", 0x10, 0x10 },
+ { "SWTMINT", 0x20, 0x20 },
+ { "BRKADRINT", 0x40, 0x40 },
+ { "HWERRINT", 0x80, 0x80 },
+ { "INT_PEND", 0xff, 0xff }
+};
+
+int
+ahd_intstat_print(u_int regvalue, u_int *cur_col, u_int wrap)
+{
+ return (ahd_print_register(INTSTAT_parse_table, 9, "INTSTAT",
+ 0x01, regvalue, cur_col, wrap));
+}
+
+static const ahd_reg_parse_entry_t HS_MAILBOX_parse_table[] = {
+ { "ENINT_COALESCE", 0x40, 0x40 },
+ { "HOST_TQINPOS", 0x80, 0x80 }
+};
+
+int
+ahd_hs_mailbox_print(u_int regvalue, u_int *cur_col, u_int wrap)
+{
+ return (ahd_print_register(HS_MAILBOX_parse_table, 2, "HS_MAILBOX",
+ 0x0b, regvalue, cur_col, wrap));
+}
+
+static const ahd_reg_parse_entry_t SEQINTSTAT_parse_table[] = {
+ { "SEQ_SPLTINT", 0x01, 0x01 },
+ { "SEQ_PCIINT", 0x02, 0x02 },
+ { "SEQ_SCSIINT", 0x04, 0x04 },
+ { "SEQ_SEQINT", 0x08, 0x08 },
+ { "SEQ_SWTMRTO", 0x10, 0x10 }
+};
+
+int
+ahd_seqintstat_print(u_int regvalue, u_int *cur_col, u_int wrap)
+{
+ return (ahd_print_register(SEQINTSTAT_parse_table, 5, "SEQINTSTAT",
+ 0x0c, regvalue, cur_col, wrap));
+}
+
+static const ahd_reg_parse_entry_t INTCTL_parse_table[] = {
+ { "SPLTINTEN", 0x01, 0x01 },
+ { "SEQINTEN", 0x02, 0x02 },
+ { "SCSIINTEN", 0x04, 0x04 },
+ { "PCIINTEN", 0x08, 0x08 },
+ { "AUTOCLRCMDINT", 0x10, 0x10 },
+ { "SWTIMER_START", 0x20, 0x20 },
+ { "SWTMINTEN", 0x40, 0x40 },
+ { "SWTMINTMASK", 0x80, 0x80 }
+};
+
+int
+ahd_intctl_print(u_int regvalue, u_int *cur_col, u_int wrap)
+{
+ return (ahd_print_register(INTCTL_parse_table, 8, "INTCTL",
+ 0x18, regvalue, cur_col, wrap));
+}
+
+static const ahd_reg_parse_entry_t DFCNTRL_parse_table[] = {
+ { "DIRECTIONEN", 0x01, 0x01 },
+ { "FIFOFLUSH", 0x02, 0x02 },
+ { "FIFOFLUSHACK", 0x02, 0x02 },
+ { "DIRECTION", 0x04, 0x04 },
+ { "DIRECTIONACK", 0x04, 0x04 },
+ { "HDMAEN", 0x08, 0x08 },
+ { "HDMAENACK", 0x08, 0x08 },
+ { "SCSIEN", 0x20, 0x20 },
+ { "SCSIENACK", 0x20, 0x20 },
+ { "SCSIENWRDIS", 0x40, 0x40 },
+ { "PRELOADEN", 0x80, 0x80 }
+};
+
+int
+ahd_dfcntrl_print(u_int regvalue, u_int *cur_col, u_int wrap)
+{
+ return (ahd_print_register(DFCNTRL_parse_table, 11, "DFCNTRL",
+ 0x19, regvalue, cur_col, wrap));
+}
+
+static const ahd_reg_parse_entry_t DFSTATUS_parse_table[] = {
+ { "FIFOEMP", 0x01, 0x01 },
+ { "FIFOFULL", 0x02, 0x02 },
+ { "DFTHRESH", 0x04, 0x04 },
+ { "HDONE", 0x08, 0x08 },
+ { "MREQPEND", 0x10, 0x10 },
+ { "PKT_PRELOAD_AVAIL", 0x40, 0x40 },
+ { "PRELOAD_AVAIL", 0x80, 0x80 }
+};
+
+int
+ahd_dfstatus_print(u_int regvalue, u_int *cur_col, u_int wrap)
+{
+ return (ahd_print_register(DFSTATUS_parse_table, 7, "DFSTATUS",
+ 0x1a, regvalue, cur_col, wrap));
+}
+
+static const ahd_reg_parse_entry_t SG_CACHE_SHADOW_parse_table[] = {
+ { "LAST_SEG_DONE", 0x01, 0x01 },
+ { "LAST_SEG", 0x02, 0x02 },
+ { "ODD_SEG", 0x04, 0x04 },
+ { "SG_ADDR_MASK", 0xf8, 0xf8 }
+};
+
+int
+ahd_sg_cache_shadow_print(u_int regvalue, u_int *cur_col, u_int wrap)
+{
+ return (ahd_print_register(SG_CACHE_SHADOW_parse_table, 4, "SG_CACHE_SHADOW",
+ 0x1b, regvalue, cur_col, wrap));
+}
+
+static const ahd_reg_parse_entry_t SCSISEQ0_parse_table[] = {
+ { "SCSIRSTO", 0x01, 0x01 },
+ { "FORCEBUSFREE", 0x10, 0x10 },
+ { "ENARBO", 0x20, 0x20 },
+ { "ENSELO", 0x40, 0x40 },
+ { "TEMODEO", 0x80, 0x80 }
+};
+
+int
+ahd_scsiseq0_print(u_int regvalue, u_int *cur_col, u_int wrap)
+{
+ return (ahd_print_register(SCSISEQ0_parse_table, 5, "SCSISEQ0",
+ 0x3a, regvalue, cur_col, wrap));
+}
+
+static const ahd_reg_parse_entry_t SCSISEQ1_parse_table[] = {
+ { "ALTSTIM", 0x01, 0x01 },
+ { "ENAUTOATNP", 0x02, 0x02 },
+ { "MANUALP", 0x0c, 0x0c },
+ { "ENRSELI", 0x10, 0x10 },
+ { "ENSELI", 0x20, 0x20 },
+ { "MANUALCTL", 0x40, 0x40 }
+};
+
+int
+ahd_scsiseq1_print(u_int regvalue, u_int *cur_col, u_int wrap)
+{
+ return (ahd_print_register(SCSISEQ1_parse_table, 6, "SCSISEQ1",
+ 0x3b, regvalue, cur_col, wrap));
+}
+
+static const ahd_reg_parse_entry_t DFFSTAT_parse_table[] = {
+ { "CURRFIFO_0", 0x00, 0x03 },
+ { "CURRFIFO_1", 0x01, 0x03 },
+ { "CURRFIFO_NONE", 0x03, 0x03 },
+ { "FIFO0FREE", 0x10, 0x10 },
+ { "FIFO1FREE", 0x20, 0x20 },
+ { "CURRFIFO", 0x03, 0x03 }
+};
+
+int
+ahd_dffstat_print(u_int regvalue, u_int *cur_col, u_int wrap)
+{
+ return (ahd_print_register(DFFSTAT_parse_table, 6, "DFFSTAT",
+ 0x3f, regvalue, cur_col, wrap));
+}
+
+static const ahd_reg_parse_entry_t SCSISIGI_parse_table[] = {
+ { "P_DATAOUT", 0x00, 0xe0 },
+ { "P_DATAOUT_DT", 0x20, 0xe0 },
+ { "P_DATAIN", 0x40, 0xe0 },
+ { "P_DATAIN_DT", 0x60, 0xe0 },
+ { "P_COMMAND", 0x80, 0xe0 },
+ { "P_MESGOUT", 0xa0, 0xe0 },
+ { "P_STATUS", 0xc0, 0xe0 },
+ { "P_MESGIN", 0xe0, 0xe0 },
+ { "ACKI", 0x01, 0x01 },
+ { "REQI", 0x02, 0x02 },
+ { "BSYI", 0x04, 0x04 },
+ { "SELI", 0x08, 0x08 },
+ { "ATNI", 0x10, 0x10 },
+ { "MSGI", 0x20, 0x20 },
+ { "IOI", 0x40, 0x40 },
+ { "CDI", 0x80, 0x80 },
+ { "PHASE_MASK", 0xe0, 0xe0 }
+};
+
+int
+ahd_scsisigi_print(u_int regvalue, u_int *cur_col, u_int wrap)
+{
+ return (ahd_print_register(SCSISIGI_parse_table, 17, "SCSISIGI",
+ 0x41, regvalue, cur_col, wrap));
+}
+
+static const ahd_reg_parse_entry_t SCSIPHASE_parse_table[] = {
+ { "DATA_OUT_PHASE", 0x01, 0x03 },
+ { "DATA_IN_PHASE", 0x02, 0x03 },
+ { "DATA_PHASE_MASK", 0x03, 0x03 },
+ { "MSG_OUT_PHASE", 0x04, 0x04 },
+ { "MSG_IN_PHASE", 0x08, 0x08 },
+ { "COMMAND_PHASE", 0x10, 0x10 },
+ { "STATUS_PHASE", 0x20, 0x20 }
+};
+
+int
+ahd_scsiphase_print(u_int regvalue, u_int *cur_col, u_int wrap)
+{
+ return (ahd_print_register(SCSIPHASE_parse_table, 7, "SCSIPHASE",
+ 0x42, regvalue, cur_col, wrap));
+}
+
+int
+ahd_scsibus_print(u_int regvalue, u_int *cur_col, u_int wrap)
+{
+ return (ahd_print_register(NULL, 0, "SCSIBUS",
+ 0x46, regvalue, cur_col, wrap));
+}
+
+static const ahd_reg_parse_entry_t SELID_parse_table[] = {
+ { "ONEBIT", 0x08, 0x08 },
+ { "SELID_MASK", 0xf0, 0xf0 }
+};
+
+int
+ahd_selid_print(u_int regvalue, u_int *cur_col, u_int wrap)
+{
+ return (ahd_print_register(SELID_parse_table, 2, "SELID",
+ 0x49, regvalue, cur_col, wrap));
+}
+
+static const ahd_reg_parse_entry_t SSTAT0_parse_table[] = {
+ { "ARBDO", 0x01, 0x01 },
+ { "SPIORDY", 0x02, 0x02 },
+ { "OVERRUN", 0x04, 0x04 },
+ { "IOERR", 0x08, 0x08 },
+ { "SELINGO", 0x10, 0x10 },
+ { "SELDI", 0x20, 0x20 },
+ { "SELDO", 0x40, 0x40 },
+ { "TARGET", 0x80, 0x80 }
+};
+
+int
+ahd_sstat0_print(u_int regvalue, u_int *cur_col, u_int wrap)
+{
+ return (ahd_print_register(SSTAT0_parse_table, 8, "SSTAT0",
+ 0x4b, regvalue, cur_col, wrap));
+}
+
+static const ahd_reg_parse_entry_t SIMODE0_parse_table[] = {
+ { "ENARBDO", 0x01, 0x01 },
+ { "ENSPIORDY", 0x02, 0x02 },
+ { "ENOVERRUN", 0x04, 0x04 },
+ { "ENIOERR", 0x08, 0x08 },
+ { "ENSELINGO", 0x10, 0x10 },
+ { "ENSELDI", 0x20, 0x20 },
+ { "ENSELDO", 0x40, 0x40 }
+};
+
+int
+ahd_simode0_print(u_int regvalue, u_int *cur_col, u_int wrap)
+{
+ return (ahd_print_register(SIMODE0_parse_table, 7, "SIMODE0",
+ 0x4b, regvalue, cur_col, wrap));
+}
+
+static const ahd_reg_parse_entry_t SSTAT1_parse_table[] = {
+ { "REQINIT", 0x01, 0x01 },
+ { "STRB2FAST", 0x02, 0x02 },
+ { "SCSIPERR", 0x04, 0x04 },
+ { "BUSFREE", 0x08, 0x08 },
+ { "PHASEMIS", 0x10, 0x10 },
+ { "SCSIRSTI", 0x20, 0x20 },
+ { "ATNTARG", 0x40, 0x40 },
+ { "SELTO", 0x80, 0x80 }
+};
+
+int
+ahd_sstat1_print(u_int regvalue, u_int *cur_col, u_int wrap)
+{
+ return (ahd_print_register(SSTAT1_parse_table, 8, "SSTAT1",
+ 0x4c, regvalue, cur_col, wrap));
+}
+
+static const ahd_reg_parse_entry_t SSTAT2_parse_table[] = {
+ { "BUSFREE_LQO", 0x40, 0xc0 },
+ { "BUSFREE_DFF0", 0x80, 0xc0 },
+ { "BUSFREE_DFF1", 0xc0, 0xc0 },
+ { "DMADONE", 0x01, 0x01 },
+ { "SDONE", 0x02, 0x02 },
+ { "WIDE_RES", 0x04, 0x04 },
+ { "BSYX", 0x08, 0x08 },
+ { "EXP_ACTIVE", 0x10, 0x10 },
+ { "NONPACKREQ", 0x20, 0x20 },
+ { "BUSFREETIME", 0xc0, 0xc0 }
+};
+
+int
+ahd_sstat2_print(u_int regvalue, u_int *cur_col, u_int wrap)
+{
+ return (ahd_print_register(SSTAT2_parse_table, 10, "SSTAT2",
+ 0x4d, regvalue, cur_col, wrap));
+}
+
+static const ahd_reg_parse_entry_t PERRDIAG_parse_table[] = {
+ { "DTERR", 0x01, 0x01 },
+ { "DGFORMERR", 0x02, 0x02 },
+ { "CRCERR", 0x04, 0x04 },
+ { "AIPERR", 0x08, 0x08 },
+ { "PARITYERR", 0x10, 0x10 },
+ { "PREVPHASE", 0x20, 0x20 },
+ { "HIPERR", 0x40, 0x40 },
+ { "HIZERO", 0x80, 0x80 }
+};
+
+int
+ahd_perrdiag_print(u_int regvalue, u_int *cur_col, u_int wrap)
+{
+ return (ahd_print_register(PERRDIAG_parse_table, 8, "PERRDIAG",
+ 0x4e, regvalue, cur_col, wrap));
+}
+
+int
+ahd_soffcnt_print(u_int regvalue, u_int *cur_col, u_int wrap)
+{
+ return (ahd_print_register(NULL, 0, "SOFFCNT",
+ 0x4f, regvalue, cur_col, wrap));
+}
+
+static const ahd_reg_parse_entry_t LQISTAT0_parse_table[] = {
+ { "LQIATNCMD", 0x01, 0x01 },
+ { "LQIATNLQ", 0x02, 0x02 },
+ { "LQIBADLQT", 0x04, 0x04 },
+ { "LQICRCT2", 0x08, 0x08 },
+ { "LQICRCT1", 0x10, 0x10 },
+ { "LQIATNQAS", 0x20, 0x20 }
+};
+
+int
+ahd_lqistat0_print(u_int regvalue, u_int *cur_col, u_int wrap)
+{
+ return (ahd_print_register(LQISTAT0_parse_table, 6, "LQISTAT0",
+ 0x50, regvalue, cur_col, wrap));
+}
+
+static const ahd_reg_parse_entry_t LQISTAT1_parse_table[] = {
+ { "LQIOVERI_NLQ", 0x01, 0x01 },
+ { "LQIOVERI_LQ", 0x02, 0x02 },
+ { "LQIBADLQI", 0x04, 0x04 },
+ { "LQICRCI_NLQ", 0x08, 0x08 },
+ { "LQICRCI_LQ", 0x10, 0x10 },
+ { "LQIABORT", 0x20, 0x20 },
+ { "LQIPHASE_NLQ", 0x40, 0x40 },
+ { "LQIPHASE_LQ", 0x80, 0x80 }
+};
+
+int
+ahd_lqistat1_print(u_int regvalue, u_int *cur_col, u_int wrap)
+{
+ return (ahd_print_register(LQISTAT1_parse_table, 8, "LQISTAT1",
+ 0x51, regvalue, cur_col, wrap));
+}
+
+static const ahd_reg_parse_entry_t LQISTAT2_parse_table[] = {
+ { "LQIGSAVAIL", 0x01, 0x01 },
+ { "LQISTOPCMD", 0x02, 0x02 },
+ { "LQISTOPLQ", 0x04, 0x04 },
+ { "LQISTOPPKT", 0x08, 0x08 },
+ { "LQIWAITFIFO", 0x10, 0x10 },
+ { "LQIWORKONLQ", 0x20, 0x20 },
+ { "LQIPHASE_OUTPKT", 0x40, 0x40 },
+ { "PACKETIZED", 0x80, 0x80 }
+};
+
+int
+ahd_lqistat2_print(u_int regvalue, u_int *cur_col, u_int wrap)
+{
+ return (ahd_print_register(LQISTAT2_parse_table, 8, "LQISTAT2",
+ 0x52, regvalue, cur_col, wrap));
+}
+
+static const ahd_reg_parse_entry_t SSTAT3_parse_table[] = {
+ { "OSRAMPERR", 0x01, 0x01 },
+ { "NTRAMPERR", 0x02, 0x02 }
+};
+
+int
+ahd_sstat3_print(u_int regvalue, u_int *cur_col, u_int wrap)
+{
+ return (ahd_print_register(SSTAT3_parse_table, 2, "SSTAT3",
+ 0x53, regvalue, cur_col, wrap));
+}
+
+static const ahd_reg_parse_entry_t LQOSTAT0_parse_table[] = {
+ { "LQOTCRC", 0x01, 0x01 },
+ { "LQOATNPKT", 0x02, 0x02 },
+ { "LQOATNLQ", 0x04, 0x04 },
+ { "LQOSTOPT2", 0x08, 0x08 },
+ { "LQOTARGSCBPERR", 0x10, 0x10 }
+};
+
+int
+ahd_lqostat0_print(u_int regvalue, u_int *cur_col, u_int wrap)
+{
+ return (ahd_print_register(LQOSTAT0_parse_table, 5, "LQOSTAT0",
+ 0x54, regvalue, cur_col, wrap));
+}
+
+static const ahd_reg_parse_entry_t LQOSTAT1_parse_table[] = {
+ { "LQOPHACHGINPKT", 0x01, 0x01 },
+ { "LQOBUSFREE", 0x02, 0x02 },
+ { "LQOBADQAS", 0x04, 0x04 },
+ { "LQOSTOPI2", 0x08, 0x08 },
+ { "LQOINITSCBPERR", 0x10, 0x10 }
+};
+
+int
+ahd_lqostat1_print(u_int regvalue, u_int *cur_col, u_int wrap)
+{
+ return (ahd_print_register(LQOSTAT1_parse_table, 5, "LQOSTAT1",
+ 0x55, regvalue, cur_col, wrap));
+}
+
+static const ahd_reg_parse_entry_t LQOSTAT2_parse_table[] = {
+ { "LQOSTOP0", 0x01, 0x01 },
+ { "LQOPHACHGOUTPKT", 0x02, 0x02 },
+ { "LQOWAITFIFO", 0x10, 0x10 },
+ { "LQOPKT", 0xe0, 0xe0 }
+};
+
+int
+ahd_lqostat2_print(u_int regvalue, u_int *cur_col, u_int wrap)
+{
+ return (ahd_print_register(LQOSTAT2_parse_table, 4, "LQOSTAT2",
+ 0x56, regvalue, cur_col, wrap));
+}
+
+static const ahd_reg_parse_entry_t SIMODE1_parse_table[] = {
+ { "ENREQINIT", 0x01, 0x01 },
+ { "ENSTRB2FAST", 0x02, 0x02 },
+ { "ENSCSIPERR", 0x04, 0x04 },
+ { "ENBUSFREE", 0x08, 0x08 },
+ { "ENPHASEMIS", 0x10, 0x10 },
+ { "ENSCSIRST", 0x20, 0x20 },
+ { "ENATNTARG", 0x40, 0x40 },
+ { "ENSELTIMO", 0x80, 0x80 }
+};
+
+int
+ahd_simode1_print(u_int regvalue, u_int *cur_col, u_int wrap)
+{
+ return (ahd_print_register(SIMODE1_parse_table, 8, "SIMODE1",
+ 0x57, regvalue, cur_col, wrap));
+}
+
+static const ahd_reg_parse_entry_t DFFSXFRCTL_parse_table[] = {
+ { "RSTCHN", 0x01, 0x01 },
+ { "CLRCHN", 0x02, 0x02 },
+ { "CLRSHCNT", 0x04, 0x04 },
+ { "DFFBITBUCKET", 0x08, 0x08 }
+};
+
+int
+ahd_dffsxfrctl_print(u_int regvalue, u_int *cur_col, u_int wrap)
+{
+ return (ahd_print_register(DFFSXFRCTL_parse_table, 4, "DFFSXFRCTL",
+ 0x5a, regvalue, cur_col, wrap));
+}
+
+static const ahd_reg_parse_entry_t SEQINTSRC_parse_table[] = {
+ { "CFG4TCMD", 0x01, 0x01 },
+ { "CFG4ICMD", 0x02, 0x02 },
+ { "CFG4TSTAT", 0x04, 0x04 },
+ { "CFG4ISTAT", 0x08, 0x08 },
+ { "CFG4DATA", 0x10, 0x10 },
+ { "SAVEPTRS", 0x20, 0x20 },
+ { "CTXTDONE", 0x40, 0x40 }
+};
+
+int
+ahd_seqintsrc_print(u_int regvalue, u_int *cur_col, u_int wrap)
+{
+ return (ahd_print_register(SEQINTSRC_parse_table, 7, "SEQINTSRC",
+ 0x5b, regvalue, cur_col, wrap));
+}
+
+static const ahd_reg_parse_entry_t SEQIMODE_parse_table[] = {
+ { "ENCFG4TCMD", 0x01, 0x01 },
+ { "ENCFG4ICMD", 0x02, 0x02 },
+ { "ENCFG4TSTAT", 0x04, 0x04 },
+ { "ENCFG4ISTAT", 0x08, 0x08 },
+ { "ENCFG4DATA", 0x10, 0x10 },
+ { "ENSAVEPTRS", 0x20, 0x20 },
+ { "ENCTXTDONE", 0x40, 0x40 }
+};
+
+int
+ahd_seqimode_print(u_int regvalue, u_int *cur_col, u_int wrap)
+{
+ return (ahd_print_register(SEQIMODE_parse_table, 7, "SEQIMODE",
+ 0x5c, regvalue, cur_col, wrap));
+}
+
+static const ahd_reg_parse_entry_t MDFFSTAT_parse_table[] = {
+ { "FIFOFREE", 0x01, 0x01 },
+ { "DATAINFIFO", 0x02, 0x02 },
+ { "DLZERO", 0x04, 0x04 },
+ { "SHVALID", 0x08, 0x08 },
+ { "LASTSDONE", 0x10, 0x10 },
+ { "SHCNTMINUS1", 0x20, 0x20 },
+ { "SHCNTNEGATIVE", 0x40, 0x40 }
+};
+
+int
+ahd_mdffstat_print(u_int regvalue, u_int *cur_col, u_int wrap)
+{
+ return (ahd_print_register(MDFFSTAT_parse_table, 7, "MDFFSTAT",
+ 0x5d, regvalue, cur_col, wrap));
+}
+
+int
+ahd_seloid_print(u_int regvalue, u_int *cur_col, u_int wrap)
+{
+ return (ahd_print_register(NULL, 0, "SELOID",
+ 0x6b, regvalue, cur_col, wrap));
+}
+
+static const ahd_reg_parse_entry_t SG_STATE_parse_table[] = {
+ { "SEGS_AVAIL", 0x01, 0x01 },
+ { "LOADING_NEEDED", 0x02, 0x02 },
+ { "FETCH_INPROG", 0x04, 0x04 }
+};
+
+int
+ahd_sg_state_print(u_int regvalue, u_int *cur_col, u_int wrap)
+{
+ return (ahd_print_register(SG_STATE_parse_table, 3, "SG_STATE",
+ 0xa6, regvalue, cur_col, wrap));
+}
+
+static const ahd_reg_parse_entry_t CCSCBCTL_parse_table[] = {
+ { "CCSCBRESET", 0x01, 0x01 },
+ { "CCSCBDIR", 0x04, 0x04 },
+ { "CCSCBEN", 0x08, 0x08 },
+ { "CCARREN", 0x10, 0x10 },
+ { "ARRDONE", 0x40, 0x40 },
+ { "CCSCBDONE", 0x80, 0x80 }
+};
+
+int
+ahd_ccscbctl_print(u_int regvalue, u_int *cur_col, u_int wrap)
+{
+ return (ahd_print_register(CCSCBCTL_parse_table, 6, "CCSCBCTL",
+ 0xad, regvalue, cur_col, wrap));
+}
+
+static const ahd_reg_parse_entry_t CCSGCTL_parse_table[] = {
+ { "CCSGRESET", 0x01, 0x01 },
+ { "SG_FETCH_REQ", 0x02, 0x02 },
+ { "CCSGENACK", 0x08, 0x08 },
+ { "SG_CACHE_AVAIL", 0x10, 0x10 },
+ { "CCSGDONE", 0x80, 0x80 },
+ { "CCSGEN", 0x0c, 0x0c }
+};
+
+int
+ahd_ccsgctl_print(u_int regvalue, u_int *cur_col, u_int wrap)
+{
+ return (ahd_print_register(CCSGCTL_parse_table, 6, "CCSGCTL",
+ 0xad, regvalue, cur_col, wrap));
+}
+
+static const ahd_reg_parse_entry_t SEQCTL0_parse_table[] = {
+ { "LOADRAM", 0x01, 0x01 },
+ { "SEQRESET", 0x02, 0x02 },
+ { "STEP", 0x04, 0x04 },
+ { "BRKADRINTEN", 0x08, 0x08 },
+ { "FASTMODE", 0x10, 0x10 },
+ { "FAILDIS", 0x20, 0x20 },
+ { "PAUSEDIS", 0x40, 0x40 },
+ { "PERRORDIS", 0x80, 0x80 }
+};
+
+int
+ahd_seqctl0_print(u_int regvalue, u_int *cur_col, u_int wrap)
+{
+ return (ahd_print_register(SEQCTL0_parse_table, 8, "SEQCTL0",
+ 0xd6, regvalue, cur_col, wrap));
+}
+
+static const ahd_reg_parse_entry_t SEQINTCTL_parse_table[] = {
+ { "IRET", 0x01, 0x01 },
+ { "INTMASK1", 0x02, 0x02 },
+ { "INTMASK2", 0x04, 0x04 },
+ { "SCS_SEQ_INT1M0", 0x08, 0x08 },
+ { "SCS_SEQ_INT1M1", 0x10, 0x10 },
+ { "INT1_CONTEXT", 0x20, 0x20 },
+ { "INTVEC1DSL", 0x80, 0x80 }
+};
+
+int
+ahd_seqintctl_print(u_int regvalue, u_int *cur_col, u_int wrap)
+{
+ return (ahd_print_register(SEQINTCTL_parse_table, 7, "SEQINTCTL",
+ 0xd9, regvalue, cur_col, wrap));
+}
+
+int
+ahd_sram_base_print(u_int regvalue, u_int *cur_col, u_int wrap)
+{
+ return (ahd_print_register(NULL, 0, "SRAM_BASE",
+ 0x100, regvalue, cur_col, wrap));
+}
+
+int
+ahd_qfreeze_count_print(u_int regvalue, u_int *cur_col, u_int wrap)
+{
+ return (ahd_print_register(NULL, 0, "QFREEZE_COUNT",
+ 0x132, regvalue, cur_col, wrap));
+}
+
+int
+ahd_kernel_qfreeze_count_print(u_int regvalue, u_int *cur_col, u_int wrap)
+{
+ return (ahd_print_register(NULL, 0, "KERNEL_QFREEZE_COUNT",
+ 0x134, regvalue, cur_col, wrap));
+}
+
+int
+ahd_saved_mode_print(u_int regvalue, u_int *cur_col, u_int wrap)
+{
+ return (ahd_print_register(NULL, 0, "SAVED_MODE",
+ 0x136, regvalue, cur_col, wrap));
+}
+
+static const ahd_reg_parse_entry_t SEQ_FLAGS_parse_table[] = {
+ { "NO_DISCONNECT", 0x01, 0x01 },
+ { "SPHASE_PENDING", 0x02, 0x02 },
+ { "DPHASE_PENDING", 0x04, 0x04 },
+ { "CMDPHASE_PENDING", 0x08, 0x08 },
+ { "TARG_CMD_PENDING", 0x10, 0x10 },
+ { "DPHASE", 0x20, 0x20 },
+ { "NO_CDB_SENT", 0x40, 0x40 },
+ { "TARGET_CMD_IS_TAGGED",0x40, 0x40 },
+ { "NOT_IDENTIFIED", 0x80, 0x80 }
+};
+
+int
+ahd_seq_flags_print(u_int regvalue, u_int *cur_col, u_int wrap)
+{
+ return (ahd_print_register(SEQ_FLAGS_parse_table, 9, "SEQ_FLAGS",
+ 0x139, regvalue, cur_col, wrap));
+}
+
+static const ahd_reg_parse_entry_t LASTPHASE_parse_table[] = {
+ { "P_DATAOUT", 0x00, 0xe0 },
+ { "P_DATAOUT_DT", 0x20, 0xe0 },
+ { "P_DATAIN", 0x40, 0xe0 },
+ { "P_DATAIN_DT", 0x60, 0xe0 },
+ { "P_COMMAND", 0x80, 0xe0 },
+ { "P_MESGOUT", 0xa0, 0xe0 },
+ { "P_STATUS", 0xc0, 0xe0 },
+ { "P_MESGIN", 0xe0, 0xe0 },
+ { "P_BUSFREE", 0x01, 0x01 },
+ { "MSGI", 0x20, 0x20 },
+ { "IOI", 0x40, 0x40 },
+ { "CDI", 0x80, 0x80 },
+ { "PHASE_MASK", 0xe0, 0xe0 }
+};
+
+int
+ahd_lastphase_print(u_int regvalue, u_int *cur_col, u_int wrap)
+{
+ return (ahd_print_register(LASTPHASE_parse_table, 13, "LASTPHASE",
+ 0x13c, regvalue, cur_col, wrap));
+}
+
+static const ahd_reg_parse_entry_t SEQ_FLAGS2_parse_table[] = {
+ { "PENDING_MK_MESSAGE", 0x01, 0x01 },
+ { "TARGET_MSG_PENDING", 0x02, 0x02 },
+ { "SELECTOUT_QFROZEN", 0x04, 0x04 }
+};
+
+int
+ahd_seq_flags2_print(u_int regvalue, u_int *cur_col, u_int wrap)
+{
+ return (ahd_print_register(SEQ_FLAGS2_parse_table, 3, "SEQ_FLAGS2",
+ 0x14d, regvalue, cur_col, wrap));
+}
+
+int
+ahd_mk_message_scb_print(u_int regvalue, u_int *cur_col, u_int wrap)
+{
+ return (ahd_print_register(NULL, 0, "MK_MESSAGE_SCB",
+ 0x160, regvalue, cur_col, wrap));
+}
+
+int
+ahd_mk_message_scsiid_print(u_int regvalue, u_int *cur_col, u_int wrap)
+{
+ return (ahd_print_register(NULL, 0, "MK_MESSAGE_SCSIID",
+ 0x162, regvalue, cur_col, wrap));
+}
+
+int
+ahd_scb_base_print(u_int regvalue, u_int *cur_col, u_int wrap)
+{
+ return (ahd_print_register(NULL, 0, "SCB_BASE",
+ 0x180, regvalue, cur_col, wrap));
+}
+
+static const ahd_reg_parse_entry_t SCB_CONTROL_parse_table[] = {
+ { "SCB_TAG_TYPE", 0x03, 0x03 },
+ { "DISCONNECTED", 0x04, 0x04 },
+ { "STATUS_RCVD", 0x08, 0x08 },
+ { "MK_MESSAGE", 0x10, 0x10 },
+ { "TAG_ENB", 0x20, 0x20 },
+ { "DISCENB", 0x40, 0x40 },
+ { "TARGET_SCB", 0x80, 0x80 }
+};
+
+int
+ahd_scb_control_print(u_int regvalue, u_int *cur_col, u_int wrap)
+{
+ return (ahd_print_register(SCB_CONTROL_parse_table, 7, "SCB_CONTROL",
+ 0x192, regvalue, cur_col, wrap));
+}
+
+static const ahd_reg_parse_entry_t SCB_SCSIID_parse_table[] = {
+ { "OID", 0x0f, 0x0f },
+ { "TID", 0xf0, 0xf0 }
+};
+
+int
+ahd_scb_scsiid_print(u_int regvalue, u_int *cur_col, u_int wrap)
+{
+ return (ahd_print_register(SCB_SCSIID_parse_table, 2, "SCB_SCSIID",
+ 0x193, regvalue, cur_col, wrap));
+}
+
diff --git a/drivers/scsi/aic7xxx/aic79xx_seq.h_shipped b/drivers/scsi/aic7xxx/aic79xx_seq.h_shipped
new file mode 100644
index 000000000..4b51e2323
--- /dev/null
+++ b/drivers/scsi/aic7xxx/aic79xx_seq.h_shipped
@@ -0,0 +1,1190 @@
+/*
+ * DO NOT EDIT - This file is automatically generated
+ * from the following source files:
+ *
+ * $Id: //depot/aic7xxx/aic7xxx/aic79xx.seq#120 $
+ * $Id: //depot/aic7xxx/aic7xxx/aic79xx.reg#77 $
+ */
+static const uint8_t seqprog[] = {
+ 0xff, 0x02, 0x06, 0x78,
+ 0x00, 0xea, 0x6e, 0x59,
+ 0x01, 0xea, 0x04, 0x30,
+ 0xff, 0x04, 0x0c, 0x78,
+ 0x19, 0xea, 0x6e, 0x59,
+ 0x19, 0xea, 0x04, 0x00,
+ 0x33, 0xea, 0x68, 0x59,
+ 0x33, 0xea, 0x00, 0x00,
+ 0x60, 0x3a, 0x3a, 0x68,
+ 0x04, 0x4d, 0x35, 0x78,
+ 0x01, 0x34, 0xc1, 0x31,
+ 0x00, 0x32, 0x21, 0x60,
+ 0x01, 0x35, 0xc1, 0x31,
+ 0x00, 0x33, 0x21, 0x60,
+ 0xfb, 0x4d, 0x9b, 0x0a,
+ 0x00, 0xe2, 0x34, 0x40,
+ 0x50, 0x4b, 0x3a, 0x68,
+ 0xff, 0x31, 0x3b, 0x70,
+ 0x02, 0x30, 0x51, 0x31,
+ 0xff, 0x8d, 0x2d, 0x70,
+ 0x02, 0x8c, 0x51, 0x31,
+ 0xff, 0x8d, 0x29, 0x60,
+ 0x02, 0x28, 0x19, 0x33,
+ 0x02, 0x30, 0x51, 0x32,
+ 0xff, 0xea, 0x62, 0x02,
+ 0x00, 0xe2, 0x3a, 0x40,
+ 0xff, 0x21, 0x3b, 0x70,
+ 0x40, 0x4b, 0xb4, 0x69,
+ 0x00, 0xe2, 0x72, 0x59,
+ 0x40, 0x4b, 0xb4, 0x69,
+ 0x20, 0x4b, 0xa0, 0x69,
+ 0xfc, 0x42, 0x44, 0x78,
+ 0x10, 0x40, 0x44, 0x78,
+ 0x00, 0xe2, 0x10, 0x5e,
+ 0x20, 0x4d, 0x48, 0x78,
+ 0x00, 0xe2, 0x10, 0x5e,
+ 0x30, 0x3f, 0xc0, 0x09,
+ 0x30, 0xe0, 0x50, 0x60,
+ 0x7f, 0x4a, 0x94, 0x08,
+ 0x00, 0xe2, 0x52, 0x40,
+ 0xc0, 0x4a, 0x94, 0x00,
+ 0x00, 0xe2, 0x5e, 0x58,
+ 0x00, 0xe2, 0x76, 0x58,
+ 0x00, 0xe2, 0x86, 0x58,
+ 0x00, 0xe2, 0x06, 0x40,
+ 0x33, 0xea, 0x68, 0x59,
+ 0x33, 0xea, 0x00, 0x00,
+ 0x01, 0x52, 0x84, 0x78,
+ 0x02, 0x58, 0x50, 0x31,
+ 0xff, 0xea, 0x10, 0x0b,
+ 0xff, 0x97, 0x6f, 0x78,
+ 0x50, 0x4b, 0x6a, 0x68,
+ 0xbf, 0x3a, 0x74, 0x08,
+ 0x14, 0xea, 0x6e, 0x59,
+ 0x14, 0xea, 0x04, 0x00,
+ 0x08, 0x92, 0x25, 0x03,
+ 0xff, 0x90, 0x5f, 0x68,
+ 0x00, 0xe2, 0x8a, 0x5b,
+ 0x00, 0xe2, 0x5e, 0x40,
+ 0x00, 0xea, 0x68, 0x59,
+ 0x01, 0xea, 0x00, 0x30,
+ 0x80, 0xf9, 0x7e, 0x68,
+ 0x00, 0xe2, 0x66, 0x59,
+ 0x11, 0xea, 0x68, 0x59,
+ 0x11, 0xea, 0x00, 0x00,
+ 0x80, 0xf9, 0x66, 0x79,
+ 0xff, 0xea, 0xd4, 0x0d,
+ 0x22, 0xea, 0x68, 0x59,
+ 0x22, 0xea, 0x00, 0x00,
+ 0x10, 0x16, 0x90, 0x78,
+ 0x10, 0x16, 0x2c, 0x00,
+ 0x01, 0x0b, 0xae, 0x32,
+ 0x18, 0xad, 0x1c, 0x79,
+ 0x04, 0xad, 0xdc, 0x68,
+ 0x80, 0xad, 0x84, 0x78,
+ 0x10, 0xad, 0xaa, 0x78,
+ 0xe7, 0xad, 0x5a, 0x09,
+ 0x02, 0x8c, 0x59, 0x32,
+ 0xff, 0x8d, 0xa1, 0x60,
+ 0xff, 0xea, 0x5e, 0x02,
+ 0xff, 0x88, 0xa7, 0x78,
+ 0x02, 0x30, 0x19, 0x33,
+ 0x02, 0xa8, 0x60, 0x36,
+ 0x02, 0x28, 0x19, 0x33,
+ 0x02, 0xa8, 0x50, 0x36,
+ 0xe7, 0xad, 0x5a, 0x09,
+ 0x00, 0xe2, 0xb8, 0x58,
+ 0xff, 0xea, 0x56, 0x02,
+ 0x04, 0x7c, 0x88, 0x32,
+ 0x20, 0x16, 0x84, 0x78,
+ 0x04, 0x40, 0x89, 0x32,
+ 0x80, 0x3d, 0x7b, 0x16,
+ 0xff, 0x2d, 0xc7, 0x60,
+ 0xff, 0x29, 0xc7, 0x60,
+ 0x40, 0x57, 0xd7, 0x78,
+ 0xff, 0x55, 0xc7, 0x68,
+ 0xff, 0x53, 0xc1, 0x19,
+ 0x00, 0x54, 0xd5, 0x19,
+ 0x00, 0xe2, 0xd6, 0x50,
+ 0x01, 0x52, 0xc1, 0x31,
+ 0x00, 0x56, 0xd5, 0x19,
+ 0x00, 0xe2, 0xd6, 0x48,
+ 0x80, 0x18, 0x84, 0x78,
+ 0x02, 0x50, 0x1d, 0x30,
+ 0x10, 0xea, 0x18, 0x00,
+ 0x60, 0x18, 0x30, 0x00,
+ 0x7f, 0x18, 0x30, 0x0c,
+ 0x02, 0xea, 0x02, 0x00,
+ 0xff, 0xea, 0xac, 0x0a,
+ 0x80, 0x18, 0x30, 0x04,
+ 0x40, 0xad, 0x84, 0x78,
+ 0xe7, 0xad, 0x5a, 0x09,
+ 0xff, 0xea, 0xc0, 0x09,
+ 0x01, 0x54, 0xa9, 0x1a,
+ 0x00, 0x55, 0xab, 0x22,
+ 0x01, 0x94, 0x6d, 0x33,
+ 0xff, 0xea, 0x20, 0x0b,
+ 0x04, 0xac, 0x49, 0x32,
+ 0xff, 0xea, 0x5a, 0x03,
+ 0xff, 0xea, 0x5e, 0x03,
+ 0x01, 0x10, 0xd4, 0x31,
+ 0x02, 0xa8, 0x40, 0x31,
+ 0x01, 0x92, 0xc1, 0x31,
+ 0x3d, 0x93, 0xc5, 0x29,
+ 0xfe, 0xe2, 0xc4, 0x09,
+ 0x01, 0xea, 0xc6, 0x01,
+ 0x02, 0xe2, 0xc8, 0x31,
+ 0x02, 0xec, 0x50, 0x31,
+ 0x02, 0xa0, 0xda, 0x31,
+ 0xff, 0xa9, 0x10, 0x71,
+ 0x10, 0xe0, 0x0e, 0x79,
+ 0x10, 0x92, 0x0f, 0x79,
+ 0x01, 0x4d, 0x9b, 0x02,
+ 0x02, 0xa0, 0xc0, 0x32,
+ 0x01, 0x93, 0xc5, 0x36,
+ 0x02, 0xa0, 0x58, 0x37,
+ 0xff, 0x21, 0x19, 0x71,
+ 0x02, 0x22, 0x51, 0x31,
+ 0x02, 0xa0, 0x5c, 0x33,
+ 0x02, 0xa0, 0x44, 0x36,
+ 0x02, 0xa0, 0x40, 0x32,
+ 0x02, 0xa0, 0x44, 0x36,
+ 0x05, 0x4d, 0x21, 0x69,
+ 0x40, 0x16, 0x52, 0x69,
+ 0xff, 0x2d, 0x57, 0x61,
+ 0xff, 0x29, 0x85, 0x70,
+ 0x02, 0x28, 0x55, 0x32,
+ 0x01, 0xea, 0x5a, 0x01,
+ 0x04, 0x44, 0xf9, 0x30,
+ 0x01, 0x44, 0xc1, 0x31,
+ 0x02, 0x28, 0x51, 0x31,
+ 0x02, 0xa8, 0x60, 0x31,
+ 0x01, 0xa4, 0x61, 0x31,
+ 0x01, 0x3d, 0x61, 0x31,
+ 0x01, 0x14, 0xd4, 0x31,
+ 0x01, 0x56, 0xad, 0x1a,
+ 0xff, 0x54, 0xa9, 0x1a,
+ 0xff, 0x55, 0xab, 0x22,
+ 0xff, 0x8d, 0x4b, 0x71,
+ 0x80, 0xac, 0x4a, 0x71,
+ 0x20, 0x16, 0x4a, 0x69,
+ 0x00, 0xac, 0xc4, 0x19,
+ 0x07, 0xe2, 0x4a, 0xf9,
+ 0x02, 0x8c, 0x51, 0x31,
+ 0x00, 0xe2, 0x2e, 0x41,
+ 0x01, 0xac, 0x08, 0x31,
+ 0x09, 0xea, 0x5a, 0x01,
+ 0x02, 0x8c, 0x51, 0x32,
+ 0xff, 0xea, 0x1a, 0x07,
+ 0x04, 0x24, 0xf9, 0x30,
+ 0x1d, 0xea, 0x5c, 0x41,
+ 0x02, 0x2c, 0x51, 0x31,
+ 0x04, 0xa8, 0xf9, 0x30,
+ 0x19, 0xea, 0x5c, 0x41,
+ 0x06, 0xea, 0x08, 0x81,
+ 0x01, 0xe2, 0x5a, 0x35,
+ 0x02, 0xf2, 0xf0, 0x31,
+ 0xff, 0xea, 0xd4, 0x0d,
+ 0x02, 0xf2, 0xf0, 0x31,
+ 0x02, 0xf8, 0xe4, 0x35,
+ 0x80, 0xea, 0xb2, 0x01,
+ 0x01, 0xe2, 0x00, 0x30,
+ 0xff, 0xea, 0xb2, 0x0d,
+ 0x01, 0xe2, 0x04, 0x30,
+ 0x01, 0xea, 0x04, 0x34,
+ 0x02, 0x20, 0xbd, 0x30,
+ 0x02, 0x20, 0xb9, 0x30,
+ 0x02, 0x20, 0x51, 0x31,
+ 0x4c, 0x93, 0xd7, 0x28,
+ 0x10, 0x92, 0x81, 0x79,
+ 0x01, 0x6b, 0xc0, 0x30,
+ 0x02, 0x64, 0xc8, 0x00,
+ 0x40, 0x3a, 0x74, 0x04,
+ 0x00, 0xe2, 0x76, 0x58,
+ 0x33, 0xea, 0x68, 0x59,
+ 0x33, 0xea, 0x00, 0x00,
+ 0x30, 0x3f, 0xc0, 0x09,
+ 0x30, 0xe0, 0x82, 0x61,
+ 0x20, 0x3f, 0x98, 0x69,
+ 0x10, 0x3f, 0x82, 0x79,
+ 0x02, 0xea, 0x7e, 0x00,
+ 0x00, 0xea, 0x68, 0x59,
+ 0x01, 0xea, 0x00, 0x30,
+ 0x02, 0x4e, 0x51, 0x35,
+ 0x01, 0xea, 0x7e, 0x00,
+ 0x11, 0xea, 0x68, 0x59,
+ 0x11, 0xea, 0x00, 0x00,
+ 0x02, 0x4e, 0x51, 0x35,
+ 0xc0, 0x4a, 0x94, 0x00,
+ 0x04, 0x41, 0xa6, 0x79,
+ 0x08, 0xea, 0x98, 0x00,
+ 0x08, 0x57, 0xae, 0x00,
+ 0x08, 0x3c, 0x78, 0x00,
+ 0xf0, 0x49, 0x74, 0x0a,
+ 0x0f, 0x67, 0xc0, 0x09,
+ 0x00, 0x3a, 0x75, 0x02,
+ 0x20, 0xea, 0x96, 0x00,
+ 0x00, 0xe2, 0x28, 0x42,
+ 0xc0, 0x4a, 0x94, 0x00,
+ 0x40, 0x3a, 0xd2, 0x69,
+ 0x02, 0x55, 0x06, 0x68,
+ 0x02, 0x56, 0xd2, 0x69,
+ 0xff, 0x5b, 0xd2, 0x61,
+ 0x02, 0x20, 0x51, 0x31,
+ 0x80, 0xea, 0xb2, 0x01,
+ 0x44, 0xea, 0x00, 0x00,
+ 0x01, 0x33, 0xc0, 0x31,
+ 0x33, 0xea, 0x00, 0x00,
+ 0xff, 0xea, 0xb2, 0x09,
+ 0xff, 0xe0, 0xc0, 0x19,
+ 0xff, 0xe0, 0xd4, 0x79,
+ 0x02, 0xac, 0x51, 0x31,
+ 0x00, 0xe2, 0xca, 0x41,
+ 0x02, 0x5e, 0x50, 0x31,
+ 0x02, 0xa8, 0xb8, 0x30,
+ 0x02, 0x5c, 0x50, 0x31,
+ 0xff, 0xad, 0xe5, 0x71,
+ 0x02, 0xac, 0x41, 0x31,
+ 0x02, 0x22, 0x51, 0x31,
+ 0x02, 0xa0, 0x5c, 0x33,
+ 0x02, 0xa0, 0x44, 0x32,
+ 0x00, 0xe2, 0xf8, 0x41,
+ 0x01, 0x4d, 0xf1, 0x79,
+ 0x01, 0x62, 0xc1, 0x31,
+ 0x00, 0x93, 0xf1, 0x61,
+ 0xfe, 0x4d, 0x9b, 0x0a,
+ 0x02, 0x60, 0x41, 0x31,
+ 0x00, 0xe2, 0xdc, 0x41,
+ 0x3d, 0x93, 0xc9, 0x29,
+ 0x01, 0xe4, 0xc8, 0x01,
+ 0x01, 0xea, 0xca, 0x01,
+ 0xff, 0xea, 0xda, 0x01,
+ 0x02, 0x20, 0x51, 0x31,
+ 0x02, 0xae, 0x41, 0x32,
+ 0xff, 0x21, 0x01, 0x62,
+ 0xff, 0xea, 0x46, 0x02,
+ 0x02, 0x5c, 0x50, 0x31,
+ 0x40, 0xea, 0x96, 0x00,
+ 0x02, 0x56, 0x20, 0x6e,
+ 0x01, 0x55, 0x20, 0x6e,
+ 0x10, 0x92, 0x0d, 0x7a,
+ 0x10, 0x40, 0x16, 0x6a,
+ 0x01, 0x56, 0x16, 0x7a,
+ 0xff, 0x97, 0x07, 0x78,
+ 0x13, 0xea, 0x6e, 0x59,
+ 0x13, 0xea, 0x04, 0x00,
+ 0x00, 0xe2, 0x06, 0x40,
+ 0xbf, 0x3a, 0x74, 0x08,
+ 0x04, 0x41, 0x1c, 0x7a,
+ 0x08, 0xea, 0x98, 0x00,
+ 0x08, 0x57, 0xae, 0x00,
+ 0x01, 0x93, 0x75, 0x32,
+ 0x01, 0x94, 0x77, 0x32,
+ 0x40, 0xea, 0x72, 0x02,
+ 0x08, 0x3c, 0x78, 0x00,
+ 0x80, 0xea, 0x6e, 0x02,
+ 0x00, 0xe2, 0xf6, 0x5b,
+ 0x01, 0x3c, 0xc1, 0x31,
+ 0x9f, 0xe0, 0x98, 0x7c,
+ 0x80, 0xe0, 0x3c, 0x72,
+ 0xa0, 0xe0, 0x78, 0x72,
+ 0xc0, 0xe0, 0x6e, 0x72,
+ 0xe0, 0xe0, 0xa8, 0x72,
+ 0x01, 0xea, 0x6e, 0x59,
+ 0x01, 0xea, 0x04, 0x00,
+ 0x00, 0xe2, 0x28, 0x42,
+ 0x80, 0x39, 0x43, 0x7a,
+ 0x03, 0xea, 0x6e, 0x59,
+ 0x03, 0xea, 0x04, 0x00,
+ 0xee, 0x00, 0x4a, 0x6a,
+ 0x05, 0xea, 0xb4, 0x00,
+ 0x33, 0xea, 0x68, 0x59,
+ 0x33, 0xea, 0x00, 0x00,
+ 0x02, 0xa8, 0x9c, 0x32,
+ 0x00, 0xe2, 0x88, 0x59,
+ 0xef, 0x96, 0xd5, 0x19,
+ 0x00, 0xe2, 0x5a, 0x52,
+ 0x09, 0x80, 0xe1, 0x30,
+ 0x02, 0xea, 0x36, 0x00,
+ 0xa8, 0xea, 0x32, 0x00,
+ 0x00, 0xe2, 0x60, 0x42,
+ 0x01, 0x96, 0xd1, 0x30,
+ 0x10, 0x80, 0x89, 0x31,
+ 0x20, 0xea, 0x32, 0x00,
+ 0xbf, 0x39, 0x73, 0x0a,
+ 0x10, 0x4c, 0x6a, 0x6a,
+ 0x20, 0x19, 0x62, 0x6a,
+ 0x20, 0x19, 0x66, 0x6a,
+ 0x02, 0x4d, 0x28, 0x6a,
+ 0x40, 0x39, 0x73, 0x02,
+ 0x00, 0xe2, 0x28, 0x42,
+ 0x80, 0x39, 0xe9, 0x6a,
+ 0x01, 0x44, 0x10, 0x33,
+ 0x08, 0x92, 0x25, 0x03,
+ 0x00, 0xe2, 0x28, 0x42,
+ 0x10, 0xea, 0x80, 0x00,
+ 0x01, 0x37, 0xc5, 0x31,
+ 0x80, 0xe2, 0x94, 0x62,
+ 0x10, 0x92, 0xb9, 0x6a,
+ 0xc0, 0x94, 0xc5, 0x01,
+ 0x40, 0x92, 0x85, 0x6a,
+ 0xbf, 0xe2, 0xc4, 0x09,
+ 0x20, 0x92, 0x99, 0x7a,
+ 0x01, 0xe2, 0x88, 0x30,
+ 0x00, 0xe2, 0xf6, 0x5b,
+ 0xa0, 0x3c, 0xa1, 0x62,
+ 0x23, 0x92, 0x89, 0x08,
+ 0x00, 0xe2, 0xf6, 0x5b,
+ 0xa0, 0x3c, 0xa1, 0x62,
+ 0x00, 0xa8, 0x98, 0x42,
+ 0xff, 0xe2, 0x98, 0x62,
+ 0x00, 0xe2, 0xb8, 0x42,
+ 0x40, 0xea, 0x98, 0x00,
+ 0x01, 0xe2, 0x88, 0x30,
+ 0x00, 0xe2, 0xf6, 0x5b,
+ 0xa0, 0x3c, 0x77, 0x72,
+ 0x40, 0xea, 0x98, 0x00,
+ 0x01, 0x37, 0x95, 0x32,
+ 0x08, 0xea, 0x6e, 0x02,
+ 0x00, 0xe2, 0x28, 0x42,
+ 0xe0, 0xea, 0x12, 0x5c,
+ 0x80, 0xe0, 0xf4, 0x6a,
+ 0x04, 0xe0, 0xa6, 0x73,
+ 0x02, 0xe0, 0xd8, 0x73,
+ 0x00, 0xea, 0x52, 0x73,
+ 0x03, 0xe0, 0xe8, 0x73,
+ 0x23, 0xe0, 0xca, 0x72,
+ 0x08, 0xe0, 0xf0, 0x72,
+ 0x00, 0xe2, 0xf6, 0x5b,
+ 0x07, 0xea, 0x6e, 0x59,
+ 0x07, 0xea, 0x04, 0x00,
+ 0x08, 0x48, 0x29, 0x72,
+ 0x04, 0x48, 0xc7, 0x62,
+ 0x01, 0x49, 0x89, 0x30,
+ 0x00, 0xe2, 0xb8, 0x42,
+ 0x01, 0x44, 0xd4, 0x31,
+ 0x00, 0xe2, 0xb8, 0x42,
+ 0x01, 0x00, 0x6c, 0x32,
+ 0x33, 0xea, 0x68, 0x59,
+ 0x33, 0xea, 0x00, 0x00,
+ 0x4c, 0x3a, 0xc1, 0x28,
+ 0x01, 0x64, 0xc0, 0x31,
+ 0x00, 0x36, 0x69, 0x59,
+ 0x01, 0x36, 0x01, 0x30,
+ 0x01, 0xe0, 0xee, 0x7a,
+ 0xa0, 0xea, 0x08, 0x5c,
+ 0x01, 0xa0, 0xee, 0x62,
+ 0x01, 0x84, 0xe3, 0x7a,
+ 0x01, 0x95, 0xf1, 0x6a,
+ 0x05, 0xea, 0x6e, 0x59,
+ 0x05, 0xea, 0x04, 0x00,
+ 0x00, 0xe2, 0xf0, 0x42,
+ 0x03, 0xea, 0x6e, 0x59,
+ 0x03, 0xea, 0x04, 0x00,
+ 0x00, 0xe2, 0xf0, 0x42,
+ 0x07, 0xea, 0x1a, 0x5c,
+ 0x01, 0x44, 0xd4, 0x31,
+ 0x00, 0xe2, 0x28, 0x42,
+ 0x3f, 0xe0, 0x76, 0x0a,
+ 0xc0, 0x3a, 0xc1, 0x09,
+ 0x00, 0x3b, 0x51, 0x01,
+ 0xff, 0xea, 0x52, 0x09,
+ 0x30, 0x3a, 0xc5, 0x09,
+ 0x3d, 0xe2, 0xc4, 0x29,
+ 0xb8, 0xe2, 0xc4, 0x19,
+ 0x01, 0xea, 0xc6, 0x01,
+ 0x02, 0xe2, 0xc8, 0x31,
+ 0x02, 0xec, 0x40, 0x31,
+ 0xff, 0xa1, 0x10, 0x73,
+ 0x02, 0xe8, 0xda, 0x31,
+ 0x02, 0xa0, 0x50, 0x31,
+ 0x00, 0xe2, 0x32, 0x43,
+ 0x80, 0x39, 0x73, 0x02,
+ 0x01, 0x44, 0xd4, 0x31,
+ 0x00, 0xe2, 0xf6, 0x5b,
+ 0x01, 0x39, 0x73, 0x02,
+ 0xe0, 0x3c, 0x4d, 0x63,
+ 0x02, 0x39, 0x73, 0x02,
+ 0x20, 0x46, 0x46, 0x63,
+ 0xff, 0xea, 0x52, 0x09,
+ 0xa8, 0xea, 0x08, 0x5c,
+ 0x04, 0x92, 0x2d, 0x7b,
+ 0x01, 0x3a, 0xc1, 0x31,
+ 0x00, 0x93, 0x2d, 0x63,
+ 0x01, 0x3b, 0xc1, 0x31,
+ 0x00, 0x94, 0x37, 0x73,
+ 0x01, 0xa9, 0x52, 0x11,
+ 0xff, 0xa9, 0x22, 0x6b,
+ 0x00, 0xe2, 0x46, 0x43,
+ 0x10, 0x39, 0x73, 0x02,
+ 0x04, 0x92, 0x47, 0x7b,
+ 0xfb, 0x92, 0x25, 0x0b,
+ 0xff, 0xea, 0x72, 0x0a,
+ 0x01, 0xa4, 0x41, 0x6b,
+ 0x02, 0xa8, 0x9c, 0x32,
+ 0x00, 0xe2, 0x88, 0x59,
+ 0x10, 0x92, 0xf1, 0x7a,
+ 0xff, 0xea, 0x1a, 0x5c,
+ 0x00, 0xe2, 0xf0, 0x42,
+ 0x04, 0xea, 0x6e, 0x59,
+ 0x04, 0xea, 0x04, 0x00,
+ 0x00, 0xe2, 0xf0, 0x42,
+ 0x04, 0xea, 0x6e, 0x59,
+ 0x04, 0xea, 0x04, 0x00,
+ 0x00, 0xe2, 0x28, 0x42,
+ 0x08, 0x92, 0xe9, 0x7a,
+ 0xc0, 0x39, 0x5d, 0x7b,
+ 0x80, 0x39, 0xe9, 0x6a,
+ 0xff, 0x88, 0x5d, 0x6b,
+ 0x40, 0x39, 0xe9, 0x6a,
+ 0x10, 0x92, 0x63, 0x7b,
+ 0x0a, 0xea, 0x6e, 0x59,
+ 0x0a, 0xea, 0x04, 0x00,
+ 0x00, 0xe2, 0x82, 0x5b,
+ 0x00, 0xe2, 0xc2, 0x43,
+ 0x50, 0x4b, 0x6a, 0x6b,
+ 0xbf, 0x3a, 0x74, 0x08,
+ 0x01, 0xe0, 0xf4, 0x31,
+ 0xff, 0xea, 0xc0, 0x09,
+ 0x01, 0x32, 0x65, 0x1a,
+ 0x00, 0x33, 0x67, 0x22,
+ 0x04, 0x4d, 0x9b, 0x02,
+ 0x01, 0xfa, 0xc0, 0x35,
+ 0x02, 0xa8, 0x90, 0x32,
+ 0x02, 0xea, 0xb4, 0x00,
+ 0x33, 0xea, 0x68, 0x59,
+ 0x33, 0xea, 0x00, 0x00,
+ 0x02, 0x48, 0x51, 0x31,
+ 0xff, 0x90, 0x85, 0x68,
+ 0xff, 0x88, 0x8f, 0x6b,
+ 0x01, 0xa4, 0x8b, 0x6b,
+ 0x02, 0xa4, 0x93, 0x6b,
+ 0x01, 0x84, 0x93, 0x7b,
+ 0x02, 0x28, 0x19, 0x33,
+ 0x02, 0xa8, 0x50, 0x36,
+ 0xff, 0x88, 0x93, 0x73,
+ 0x00, 0xe2, 0x66, 0x5b,
+ 0x02, 0xa8, 0x20, 0x33,
+ 0x04, 0xa4, 0x49, 0x03,
+ 0xff, 0xea, 0x1a, 0x03,
+ 0xff, 0x2d, 0x9f, 0x63,
+ 0x02, 0xa8, 0x58, 0x32,
+ 0x02, 0xa8, 0x5c, 0x36,
+ 0x02, 0xa8, 0x40, 0x31,
+ 0x02, 0x2e, 0x51, 0x31,
+ 0x02, 0xa0, 0x18, 0x33,
+ 0x02, 0xa0, 0x5c, 0x36,
+ 0xc0, 0x39, 0xe9, 0x6a,
+ 0x04, 0x92, 0x25, 0x03,
+ 0x20, 0x92, 0xc3, 0x6b,
+ 0x02, 0xa8, 0x40, 0x31,
+ 0xc0, 0x3a, 0xc1, 0x09,
+ 0x00, 0x3b, 0x51, 0x01,
+ 0xff, 0xea, 0x52, 0x09,
+ 0x30, 0x3a, 0xc5, 0x09,
+ 0x3d, 0xe2, 0xc4, 0x29,
+ 0xb8, 0xe2, 0xc4, 0x19,
+ 0x01, 0xea, 0xc6, 0x01,
+ 0x02, 0xe2, 0xc8, 0x31,
+ 0x02, 0xa0, 0xda, 0x31,
+ 0x02, 0xa0, 0x50, 0x31,
+ 0xf7, 0x57, 0xae, 0x08,
+ 0x08, 0xea, 0x98, 0x00,
+ 0x01, 0x44, 0xd4, 0x31,
+ 0xee, 0x00, 0xcc, 0x6b,
+ 0x02, 0xea, 0xb4, 0x00,
+ 0xc0, 0xea, 0x72, 0x02,
+ 0x09, 0x4c, 0xce, 0x7b,
+ 0x01, 0xea, 0x78, 0x02,
+ 0x08, 0x4c, 0x06, 0x68,
+ 0x0b, 0xea, 0x6e, 0x59,
+ 0x0b, 0xea, 0x04, 0x00,
+ 0x01, 0x44, 0xd4, 0x31,
+ 0x20, 0x39, 0x29, 0x7a,
+ 0x00, 0xe2, 0xe0, 0x5b,
+ 0x00, 0xe2, 0x28, 0x42,
+ 0x01, 0x84, 0xe5, 0x7b,
+ 0x01, 0xa4, 0x49, 0x07,
+ 0x08, 0x60, 0x30, 0x33,
+ 0x08, 0x80, 0x41, 0x37,
+ 0xdf, 0x39, 0x73, 0x0a,
+ 0xee, 0x00, 0xf2, 0x6b,
+ 0x05, 0xea, 0xb4, 0x00,
+ 0x33, 0xea, 0x68, 0x59,
+ 0x33, 0xea, 0x00, 0x00,
+ 0x00, 0xe2, 0x88, 0x59,
+ 0x00, 0xe2, 0xf0, 0x42,
+ 0xff, 0x42, 0x02, 0x6c,
+ 0x01, 0x41, 0xf6, 0x6b,
+ 0x02, 0x41, 0xf6, 0x7b,
+ 0xff, 0x42, 0x02, 0x6c,
+ 0x01, 0x41, 0xf6, 0x6b,
+ 0x02, 0x41, 0xf6, 0x7b,
+ 0xff, 0x42, 0x02, 0x7c,
+ 0x04, 0x4c, 0xf6, 0x6b,
+ 0xe0, 0x41, 0x78, 0x0e,
+ 0x01, 0x44, 0xd4, 0x31,
+ 0xff, 0x42, 0x0a, 0x7c,
+ 0x04, 0x4c, 0x0a, 0x6c,
+ 0xe0, 0x41, 0x78, 0x0a,
+ 0xe0, 0x3c, 0x29, 0x62,
+ 0xff, 0xea, 0xca, 0x09,
+ 0x01, 0xe2, 0xc8, 0x31,
+ 0x01, 0x46, 0xda, 0x35,
+ 0x01, 0x44, 0xd4, 0x35,
+ 0x10, 0xea, 0x80, 0x00,
+ 0x01, 0xe2, 0x6e, 0x36,
+ 0x04, 0xa6, 0x22, 0x7c,
+ 0xff, 0xea, 0x5a, 0x09,
+ 0xff, 0xea, 0x4c, 0x0d,
+ 0x01, 0xa6, 0x4e, 0x6c,
+ 0x10, 0xad, 0x84, 0x78,
+ 0x80, 0xad, 0x46, 0x6c,
+ 0x08, 0xad, 0x84, 0x68,
+ 0x20, 0x19, 0x3a, 0x7c,
+ 0x80, 0xea, 0xb2, 0x01,
+ 0x11, 0x00, 0x00, 0x10,
+ 0x02, 0xa6, 0x36, 0x7c,
+ 0xff, 0xea, 0xb2, 0x0d,
+ 0x11, 0x00, 0x00, 0x10,
+ 0xff, 0xea, 0xb2, 0x09,
+ 0x04, 0x84, 0xf9, 0x30,
+ 0x00, 0xea, 0x08, 0x81,
+ 0xff, 0xea, 0xd4, 0x09,
+ 0x02, 0x84, 0xf9, 0x88,
+ 0x0d, 0xea, 0x5a, 0x01,
+ 0x04, 0xa6, 0x4c, 0x05,
+ 0x04, 0xa6, 0x84, 0x78,
+ 0xff, 0xea, 0x5a, 0x09,
+ 0x03, 0x84, 0x59, 0x89,
+ 0x03, 0xea, 0x4c, 0x01,
+ 0x80, 0x1a, 0x84, 0x78,
+ 0x08, 0x19, 0x84, 0x78,
+ 0x08, 0xb0, 0xe0, 0x30,
+ 0x04, 0xb0, 0xe0, 0x30,
+ 0x03, 0xb0, 0xf0, 0x30,
+ 0x01, 0xb0, 0x06, 0x33,
+ 0x7f, 0x83, 0xe9, 0x08,
+ 0x04, 0xac, 0x58, 0x19,
+ 0xff, 0xea, 0xc0, 0x09,
+ 0x04, 0x84, 0x09, 0x9b,
+ 0x00, 0x85, 0x0b, 0x23,
+ 0x00, 0x86, 0x0d, 0x23,
+ 0x00, 0x87, 0x0f, 0x23,
+ 0x01, 0x84, 0xc5, 0x31,
+ 0x80, 0x83, 0x71, 0x7c,
+ 0x02, 0xe2, 0xc4, 0x01,
+ 0xff, 0xea, 0x4c, 0x09,
+ 0x01, 0xe2, 0x36, 0x30,
+ 0xc8, 0x19, 0x32, 0x00,
+ 0x88, 0x19, 0x32, 0x00,
+ 0x01, 0xac, 0xd4, 0x99,
+ 0x00, 0xe2, 0x84, 0x50,
+ 0xfe, 0xa6, 0x4c, 0x0d,
+ 0x0b, 0x98, 0xe1, 0x30,
+ 0xfd, 0xa4, 0x49, 0x09,
+ 0x80, 0xa3, 0x85, 0x7c,
+ 0x02, 0xa4, 0x48, 0x01,
+ 0x01, 0xa4, 0x36, 0x30,
+ 0xa8, 0xea, 0x32, 0x00,
+ 0xfd, 0xa4, 0x49, 0x0b,
+ 0x05, 0xa3, 0x07, 0x33,
+ 0x80, 0x83, 0x91, 0x6c,
+ 0x02, 0xea, 0x4c, 0x05,
+ 0xff, 0xea, 0x4c, 0x0d,
+ 0x00, 0xe2, 0x60, 0x59,
+ 0x02, 0xa6, 0x24, 0x6c,
+ 0x80, 0xf9, 0xf2, 0x05,
+ 0xc0, 0x39, 0x9f, 0x7c,
+ 0x03, 0xea, 0x6e, 0x59,
+ 0x03, 0xea, 0x04, 0x00,
+ 0x20, 0x39, 0xc3, 0x7c,
+ 0x01, 0x84, 0xa9, 0x6c,
+ 0x06, 0xea, 0x6e, 0x59,
+ 0x06, 0xea, 0x04, 0x00,
+ 0x00, 0xe2, 0xc6, 0x44,
+ 0x01, 0x00, 0x6c, 0x32,
+ 0xee, 0x00, 0xb2, 0x6c,
+ 0x05, 0xea, 0xb4, 0x00,
+ 0x33, 0xea, 0x68, 0x59,
+ 0x33, 0xea, 0x00, 0x00,
+ 0x80, 0x3d, 0x7a, 0x00,
+ 0xfc, 0x42, 0xb4, 0x7c,
+ 0x7f, 0x3d, 0x7a, 0x08,
+ 0x00, 0x36, 0x69, 0x59,
+ 0x01, 0x36, 0x01, 0x30,
+ 0x09, 0xea, 0x6e, 0x59,
+ 0x09, 0xea, 0x04, 0x00,
+ 0x00, 0xe2, 0x28, 0x42,
+ 0x01, 0xa4, 0xa9, 0x6c,
+ 0x00, 0xe2, 0x7c, 0x5c,
+ 0x20, 0x39, 0x73, 0x02,
+ 0x01, 0x00, 0x6c, 0x32,
+ 0x02, 0xa6, 0xce, 0x7c,
+ 0x00, 0xe2, 0x92, 0x5c,
+ 0x00, 0xe2, 0x76, 0x58,
+ 0x00, 0xe2, 0x86, 0x58,
+ 0x00, 0xe2, 0x5a, 0x58,
+ 0x00, 0x36, 0x69, 0x59,
+ 0x01, 0x36, 0x01, 0x30,
+ 0x20, 0x19, 0xce, 0x6c,
+ 0x00, 0xe2, 0xfe, 0x5c,
+ 0x04, 0x19, 0xe8, 0x6c,
+ 0x02, 0x19, 0x32, 0x00,
+ 0x01, 0x84, 0xe9, 0x7c,
+ 0x01, 0x1b, 0xe2, 0x7c,
+ 0x01, 0x1a, 0xe8, 0x6c,
+ 0x00, 0xe2, 0x98, 0x44,
+ 0x80, 0x4b, 0xee, 0x6c,
+ 0x01, 0x4c, 0xea, 0x7c,
+ 0x03, 0x42, 0x98, 0x6c,
+ 0x00, 0xe2, 0x1e, 0x5c,
+ 0x80, 0xf9, 0xf2, 0x01,
+ 0x04, 0x39, 0x29, 0x7a,
+ 0x00, 0xe2, 0x28, 0x42,
+ 0x08, 0x5d, 0x06, 0x6d,
+ 0x00, 0xe2, 0x76, 0x58,
+ 0x00, 0x36, 0x69, 0x59,
+ 0x01, 0x36, 0x01, 0x30,
+ 0x02, 0x1b, 0xf6, 0x7c,
+ 0x08, 0x5d, 0x04, 0x7d,
+ 0x03, 0x68, 0x00, 0x37,
+ 0x01, 0x84, 0x09, 0x07,
+ 0x80, 0x1b, 0x10, 0x7d,
+ 0x80, 0x84, 0x11, 0x6d,
+ 0xff, 0x85, 0x0b, 0x1b,
+ 0xff, 0x86, 0x0d, 0x23,
+ 0xff, 0x87, 0x0f, 0x23,
+ 0xf8, 0x1b, 0x08, 0x0b,
+ 0xff, 0xea, 0x06, 0x0b,
+ 0x03, 0x68, 0x00, 0x37,
+ 0x00, 0xe2, 0xd6, 0x58,
+ 0x10, 0xea, 0x18, 0x00,
+ 0xf9, 0xd9, 0xb2, 0x0d,
+ 0x01, 0xd9, 0xb2, 0x05,
+ 0x01, 0x52, 0x48, 0x31,
+ 0x20, 0xa4, 0x3a, 0x7d,
+ 0x20, 0x5b, 0x3a, 0x7d,
+ 0x80, 0xf9, 0x48, 0x7d,
+ 0x02, 0xea, 0xb4, 0x00,
+ 0x11, 0x00, 0x00, 0x10,
+ 0x04, 0x19, 0x54, 0x7d,
+ 0xdf, 0x19, 0x32, 0x08,
+ 0x60, 0x5b, 0x54, 0x6d,
+ 0x01, 0x4c, 0x2e, 0x7d,
+ 0x20, 0x19, 0x32, 0x00,
+ 0x01, 0xd9, 0xb2, 0x05,
+ 0x02, 0xea, 0xb4, 0x00,
+ 0x01, 0xd9, 0xb2, 0x05,
+ 0x10, 0x5b, 0x4c, 0x6d,
+ 0x08, 0x5b, 0x56, 0x6d,
+ 0x20, 0x5b, 0x46, 0x6d,
+ 0x02, 0x5b, 0x76, 0x6d,
+ 0x0e, 0xea, 0x6e, 0x59,
+ 0x0e, 0xea, 0x04, 0x00,
+ 0x80, 0xf9, 0x36, 0x6d,
+ 0xdf, 0x5c, 0xb8, 0x08,
+ 0x01, 0xd9, 0xb2, 0x05,
+ 0x01, 0xa4, 0x37, 0x6e,
+ 0x00, 0xe2, 0x7c, 0x5c,
+ 0x00, 0xe2, 0x80, 0x5d,
+ 0x01, 0x90, 0x21, 0x1b,
+ 0x01, 0xd9, 0xb2, 0x05,
+ 0x00, 0xe2, 0x66, 0x5b,
+ 0xf3, 0x96, 0xd5, 0x19,
+ 0x00, 0xe2, 0x64, 0x55,
+ 0x80, 0x96, 0x65, 0x6d,
+ 0x0f, 0xea, 0x6e, 0x59,
+ 0x0f, 0xea, 0x04, 0x00,
+ 0x00, 0xe2, 0x6c, 0x45,
+ 0x04, 0x8c, 0xe1, 0x30,
+ 0x01, 0xea, 0xf2, 0x00,
+ 0x02, 0xea, 0x36, 0x00,
+ 0xa8, 0xea, 0x32, 0x00,
+ 0xff, 0x97, 0x73, 0x7d,
+ 0x14, 0xea, 0x6e, 0x59,
+ 0x14, 0xea, 0x04, 0x00,
+ 0x00, 0xe2, 0xe2, 0x5d,
+ 0x01, 0xd9, 0xb2, 0x05,
+ 0x09, 0x80, 0xe1, 0x30,
+ 0x02, 0xea, 0x36, 0x00,
+ 0xa8, 0xea, 0x32, 0x00,
+ 0x00, 0xe2, 0xda, 0x5d,
+ 0x01, 0xd9, 0xb2, 0x05,
+ 0x02, 0xa6, 0x90, 0x7d,
+ 0x00, 0xe2, 0x60, 0x59,
+ 0x20, 0x5b, 0x9e, 0x6d,
+ 0xfc, 0x42, 0x8a, 0x7d,
+ 0x10, 0x40, 0x8c, 0x6d,
+ 0x20, 0x4d, 0x8e, 0x7d,
+ 0x08, 0x5d, 0x9e, 0x6d,
+ 0x02, 0xa6, 0x24, 0x6c,
+ 0x00, 0xe2, 0x60, 0x59,
+ 0x20, 0x5b, 0x9e, 0x6d,
+ 0x01, 0x1b, 0xbe, 0x6d,
+ 0xfc, 0x42, 0x9a, 0x7d,
+ 0x10, 0x40, 0x9c, 0x6d,
+ 0x20, 0x4d, 0x84, 0x78,
+ 0x08, 0x5d, 0x84, 0x78,
+ 0x02, 0x19, 0x32, 0x00,
+ 0x01, 0x5b, 0x40, 0x31,
+ 0x00, 0xe2, 0xfe, 0x5c,
+ 0x00, 0xe2, 0xe0, 0x5b,
+ 0x20, 0xea, 0xb6, 0x00,
+ 0x00, 0xe2, 0x1e, 0x5c,
+ 0x20, 0x5c, 0xb8, 0x00,
+ 0x04, 0x19, 0xb4, 0x6d,
+ 0x01, 0x1a, 0xb4, 0x6d,
+ 0x00, 0xe2, 0x60, 0x59,
+ 0x01, 0x1a, 0x84, 0x78,
+ 0x80, 0xf9, 0xf2, 0x01,
+ 0x20, 0xa0, 0x18, 0x7e,
+ 0xff, 0x90, 0x21, 0x1b,
+ 0x08, 0x92, 0x77, 0x6b,
+ 0x02, 0xea, 0xb4, 0x04,
+ 0x01, 0xa4, 0x49, 0x03,
+ 0x40, 0x5b, 0xce, 0x6d,
+ 0x00, 0xe2, 0x60, 0x59,
+ 0x40, 0x5b, 0xce, 0x6d,
+ 0x04, 0x5d, 0x38, 0x7e,
+ 0x01, 0x1a, 0x38, 0x7e,
+ 0x20, 0x4d, 0x84, 0x78,
+ 0x40, 0x5b, 0x18, 0x7e,
+ 0x04, 0x5d, 0x38, 0x7e,
+ 0x01, 0x1a, 0x38, 0x7e,
+ 0x80, 0xf9, 0xf2, 0x01,
+ 0xff, 0x90, 0x21, 0x1b,
+ 0x08, 0x92, 0x77, 0x6b,
+ 0x02, 0xea, 0xb4, 0x04,
+ 0x00, 0xe2, 0x60, 0x59,
+ 0x01, 0x1b, 0x84, 0x78,
+ 0x80, 0xf9, 0xf2, 0x01,
+ 0x02, 0xea, 0xb4, 0x04,
+ 0x00, 0xe2, 0x60, 0x59,
+ 0x01, 0x1b, 0xf6, 0x6d,
+ 0x40, 0x5b, 0x04, 0x7e,
+ 0x01, 0x1b, 0xf6, 0x6d,
+ 0x02, 0x19, 0x32, 0x00,
+ 0x01, 0x1a, 0x84, 0x78,
+ 0x80, 0xf9, 0xf2, 0x01,
+ 0xff, 0xea, 0x10, 0x03,
+ 0x08, 0x92, 0x25, 0x03,
+ 0x00, 0xe2, 0x76, 0x43,
+ 0x01, 0x1a, 0x00, 0x7e,
+ 0x40, 0x5b, 0xfc, 0x7d,
+ 0x01, 0x1a, 0xea, 0x6d,
+ 0xfc, 0x42, 0x84, 0x78,
+ 0x01, 0x1a, 0x04, 0x6e,
+ 0x10, 0xea, 0x6e, 0x59,
+ 0x10, 0xea, 0x04, 0x00,
+ 0xfc, 0x42, 0x84, 0x78,
+ 0x10, 0x40, 0x0a, 0x6e,
+ 0x20, 0x4d, 0x84, 0x78,
+ 0x40, 0x5b, 0xea, 0x6d,
+ 0x01, 0x1a, 0x84, 0x78,
+ 0x01, 0x90, 0x21, 0x1b,
+ 0x30, 0x3f, 0xc0, 0x09,
+ 0x30, 0xe0, 0x84, 0x60,
+ 0x40, 0x4b, 0x84, 0x68,
+ 0xff, 0xea, 0x52, 0x01,
+ 0xee, 0x00, 0x20, 0x6e,
+ 0x80, 0xf9, 0xf2, 0x01,
+ 0xff, 0x90, 0x21, 0x1b,
+ 0x02, 0xea, 0xb4, 0x00,
+ 0x20, 0xea, 0x9a, 0x00,
+ 0x04, 0x41, 0x26, 0x7e,
+ 0x08, 0xea, 0x98, 0x00,
+ 0x08, 0x57, 0xae, 0x00,
+ 0xf3, 0x42, 0x30, 0x6e,
+ 0x12, 0xea, 0x6e, 0x59,
+ 0x12, 0xea, 0x04, 0x00,
+ 0x00, 0xe2, 0x28, 0x42,
+ 0x0d, 0xea, 0x6e, 0x59,
+ 0x0d, 0xea, 0x04, 0x00,
+ 0x00, 0xe2, 0x28, 0x42,
+ 0x01, 0x90, 0x21, 0x1b,
+ 0x11, 0xea, 0x6e, 0x59,
+ 0x11, 0xea, 0x04, 0x00,
+ 0x00, 0xe2, 0x66, 0x5b,
+ 0x08, 0x5a, 0xb4, 0x00,
+ 0x00, 0xe2, 0x5e, 0x5e,
+ 0xa8, 0xea, 0x32, 0x00,
+ 0x00, 0xe2, 0x60, 0x59,
+ 0x80, 0x1a, 0x4c, 0x7e,
+ 0x00, 0xe2, 0x5e, 0x5e,
+ 0x80, 0x19, 0x32, 0x00,
+ 0x40, 0x5b, 0x52, 0x6e,
+ 0x08, 0x5a, 0x52, 0x7e,
+ 0x20, 0x4d, 0x84, 0x78,
+ 0x02, 0x84, 0x09, 0x03,
+ 0x40, 0x5b, 0x18, 0x7e,
+ 0xff, 0x90, 0x21, 0x1b,
+ 0x80, 0xf9, 0xf2, 0x01,
+ 0x08, 0x92, 0x77, 0x6b,
+ 0x02, 0xea, 0xb4, 0x04,
+ 0x01, 0x40, 0xe1, 0x30,
+ 0x05, 0x41, 0xe3, 0x98,
+ 0x01, 0xe0, 0xf4, 0x31,
+ 0xff, 0xea, 0xc0, 0x09,
+ 0x00, 0x42, 0xe5, 0x20,
+ 0x00, 0x43, 0xe7, 0x20,
+ 0x01, 0xfa, 0xc0, 0x31,
+ 0x04, 0xea, 0xe8, 0x30,
+ 0xff, 0xea, 0xf0, 0x08,
+ 0x02, 0xea, 0xf2, 0x00,
+ 0xff, 0xea, 0xf4, 0x0c
+};
+
+typedef int ahd_patch_func_t (struct ahd_softc *ahd);
+static ahd_patch_func_t ahd_patch23_func;
+
+static int
+ahd_patch23_func(struct ahd_softc *ahd)
+{
+ return ((ahd->bugs & AHD_PKT_BITBUCKET_BUG) != 0);
+}
+
+static ahd_patch_func_t ahd_patch22_func;
+
+static int
+ahd_patch22_func(struct ahd_softc *ahd)
+{
+ return ((ahd->bugs & AHD_PKT_BITBUCKET_BUG) == 0);
+}
+
+static ahd_patch_func_t ahd_patch21_func;
+
+static int
+ahd_patch21_func(struct ahd_softc *ahd)
+{
+ return ((ahd->flags & AHD_INITIATORROLE) != 0);
+}
+
+static ahd_patch_func_t ahd_patch20_func;
+
+static int
+ahd_patch20_func(struct ahd_softc *ahd)
+{
+ return ((ahd->flags & AHD_TARGETROLE) != 0);
+}
+
+static ahd_patch_func_t ahd_patch19_func;
+
+static int
+ahd_patch19_func(struct ahd_softc *ahd)
+{
+ return ((ahd->bugs & AHD_AUTOFLUSH_BUG) != 0);
+}
+
+static ahd_patch_func_t ahd_patch18_func;
+
+static int
+ahd_patch18_func(struct ahd_softc *ahd)
+{
+ return ((ahd->features & AHD_NEW_DFCNTRL_OPTS) != 0);
+}
+
+static ahd_patch_func_t ahd_patch17_func;
+
+static int
+ahd_patch17_func(struct ahd_softc *ahd)
+{
+ return ((ahd->flags & AHD_39BIT_ADDRESSING) != 0);
+}
+
+static ahd_patch_func_t ahd_patch16_func;
+
+static int
+ahd_patch16_func(struct ahd_softc *ahd)
+{
+ return ((ahd->flags & AHD_64BIT_ADDRESSING) != 0);
+}
+
+static ahd_patch_func_t ahd_patch15_func;
+
+static int
+ahd_patch15_func(struct ahd_softc *ahd)
+{
+ return ((ahd->features & AHD_NEW_DFCNTRL_OPTS) == 0);
+}
+
+static ahd_patch_func_t ahd_patch14_func;
+
+static int
+ahd_patch14_func(struct ahd_softc *ahd)
+{
+ return ((ahd->bugs & AHD_REG_SLOW_SETTLE_BUG) != 0);
+}
+
+static ahd_patch_func_t ahd_patch13_func;
+
+static int
+ahd_patch13_func(struct ahd_softc *ahd)
+{
+ return ((ahd->features & AHD_RTI) == 0);
+}
+
+static ahd_patch_func_t ahd_patch12_func;
+
+static int
+ahd_patch12_func(struct ahd_softc *ahd)
+{
+ return ((ahd->bugs & AHD_EARLY_REQ_BUG) != 0);
+}
+
+static ahd_patch_func_t ahd_patch11_func;
+
+static int
+ahd_patch11_func(struct ahd_softc *ahd)
+{
+ return ((ahd->bugs & AHD_BUSFREEREV_BUG) == 0);
+}
+
+static ahd_patch_func_t ahd_patch10_func;
+
+static int
+ahd_patch10_func(struct ahd_softc *ahd)
+{
+ return ((ahd->flags & AHD_SEQUENCER_DEBUG) != 0);
+}
+
+static ahd_patch_func_t ahd_patch9_func;
+
+static int
+ahd_patch9_func(struct ahd_softc *ahd)
+{
+ return ((ahd->features & AHD_FAST_CDB_DELIVERY) != 0);
+}
+
+static ahd_patch_func_t ahd_patch8_func;
+
+static int
+ahd_patch8_func(struct ahd_softc *ahd)
+{
+ return ((ahd->bugs & AHD_LQO_ATNO_BUG) != 0);
+}
+
+static ahd_patch_func_t ahd_patch7_func;
+
+static int
+ahd_patch7_func(struct ahd_softc *ahd)
+{
+ return ((ahd->bugs & AHD_BUSFREEREV_BUG) != 0);
+}
+
+static ahd_patch_func_t ahd_patch6_func;
+
+static int
+ahd_patch6_func(struct ahd_softc *ahd)
+{
+ return ((ahd->bugs & AHD_NONPACKFIFO_BUG) != 0);
+}
+
+static ahd_patch_func_t ahd_patch5_func;
+
+static int
+ahd_patch5_func(struct ahd_softc *ahd)
+{
+ return ((ahd->bugs & AHD_SENT_SCB_UPDATE_BUG) != 0);
+}
+
+static ahd_patch_func_t ahd_patch4_func;
+
+static int
+ahd_patch4_func(struct ahd_softc *ahd)
+{
+ return ((ahd->bugs & AHD_PKT_LUN_BUG) != 0);
+}
+
+static ahd_patch_func_t ahd_patch3_func;
+
+static int
+ahd_patch3_func(struct ahd_softc *ahd)
+{
+ return ((ahd->bugs & AHD_FAINT_LED_BUG) != 0);
+}
+
+static ahd_patch_func_t ahd_patch2_func;
+
+static int
+ahd_patch2_func(struct ahd_softc *ahd)
+{
+ return ((ahd->bugs & AHD_SET_MODE_BUG) != 0);
+}
+
+static ahd_patch_func_t ahd_patch1_func;
+
+static int
+ahd_patch1_func(struct ahd_softc *ahd)
+{
+ return ((ahd->bugs & AHD_INTCOLLISION_BUG) != 0);
+}
+
+static ahd_patch_func_t ahd_patch0_func;
+
+static int
+ahd_patch0_func(struct ahd_softc *ahd)
+{
+ return (0);
+}
+
+static const struct patch {
+ ahd_patch_func_t *patch_func;
+ uint32_t begin :10,
+ skip_instr :10,
+ skip_patch :12;
+} patches[] = {
+ { ahd_patch1_func, 0, 3, 3 },
+ { ahd_patch1_func, 1, 1, 2 },
+ { ahd_patch0_func, 2, 1, 1 },
+ { ahd_patch1_func, 3, 3, 3 },
+ { ahd_patch1_func, 4, 1, 2 },
+ { ahd_patch0_func, 5, 1, 1 },
+ { ahd_patch2_func, 6, 1, 2 },
+ { ahd_patch0_func, 7, 1, 1 },
+ { ahd_patch3_func, 36, 5, 1 },
+ { ahd_patch2_func, 45, 1, 2 },
+ { ahd_patch0_func, 46, 1, 1 },
+ { ahd_patch1_func, 53, 1, 2 },
+ { ahd_patch0_func, 54, 1, 1 },
+ { ahd_patch2_func, 59, 1, 2 },
+ { ahd_patch0_func, 60, 1, 1 },
+ { ahd_patch2_func, 63, 1, 2 },
+ { ahd_patch0_func, 64, 1, 1 },
+ { ahd_patch2_func, 67, 1, 2 },
+ { ahd_patch0_func, 68, 1, 1 },
+ { ahd_patch4_func, 115, 1, 1 },
+ { ahd_patch2_func, 180, 3, 1 },
+ { ahd_patch1_func, 183, 2, 1 },
+ { ahd_patch5_func, 185, 1, 1 },
+ { ahd_patch2_func, 194, 1, 2 },
+ { ahd_patch0_func, 195, 1, 1 },
+ { ahd_patch6_func, 196, 2, 2 },
+ { ahd_patch0_func, 198, 6, 3 },
+ { ahd_patch2_func, 201, 1, 2 },
+ { ahd_patch0_func, 202, 1, 1 },
+ { ahd_patch2_func, 205, 1, 2 },
+ { ahd_patch0_func, 206, 1, 1 },
+ { ahd_patch3_func, 208, 1, 1 },
+ { ahd_patch7_func, 209, 3, 1 },
+ { ahd_patch3_func, 218, 1, 1 },
+ { ahd_patch5_func, 219, 16, 2 },
+ { ahd_patch0_func, 235, 1, 1 },
+ { ahd_patch8_func, 260, 2, 1 },
+ { ahd_patch1_func, 264, 1, 2 },
+ { ahd_patch0_func, 265, 1, 1 },
+ { ahd_patch7_func, 268, 3, 1 },
+ { ahd_patch1_func, 283, 1, 2 },
+ { ahd_patch0_func, 284, 1, 1 },
+ { ahd_patch1_func, 287, 1, 2 },
+ { ahd_patch0_func, 288, 1, 1 },
+ { ahd_patch2_func, 291, 1, 2 },
+ { ahd_patch0_func, 292, 1, 1 },
+ { ahd_patch9_func, 305, 2, 2 },
+ { ahd_patch0_func, 307, 1, 1 },
+ { ahd_patch1_func, 349, 1, 2 },
+ { ahd_patch0_func, 350, 1, 1 },
+ { ahd_patch2_func, 358, 1, 2 },
+ { ahd_patch0_func, 359, 1, 1 },
+ { ahd_patch2_func, 362, 1, 2 },
+ { ahd_patch0_func, 363, 1, 1 },
+ { ahd_patch1_func, 369, 1, 2 },
+ { ahd_patch0_func, 370, 1, 1 },
+ { ahd_patch1_func, 372, 1, 2 },
+ { ahd_patch0_func, 373, 1, 1 },
+ { ahd_patch10_func, 392, 1, 1 },
+ { ahd_patch10_func, 395, 1, 1 },
+ { ahd_patch10_func, 397, 1, 1 },
+ { ahd_patch10_func, 409, 1, 1 },
+ { ahd_patch1_func, 419, 1, 2 },
+ { ahd_patch0_func, 420, 1, 1 },
+ { ahd_patch1_func, 422, 1, 2 },
+ { ahd_patch0_func, 423, 1, 1 },
+ { ahd_patch1_func, 431, 1, 2 },
+ { ahd_patch0_func, 432, 1, 1 },
+ { ahd_patch2_func, 445, 1, 2 },
+ { ahd_patch0_func, 446, 1, 1 },
+ { ahd_patch11_func, 482, 1, 1 },
+ { ahd_patch1_func, 490, 1, 2 },
+ { ahd_patch0_func, 491, 1, 1 },
+ { ahd_patch2_func, 503, 1, 2 },
+ { ahd_patch0_func, 504, 1, 1 },
+ { ahd_patch12_func, 507, 6, 2 },
+ { ahd_patch0_func, 513, 1, 1 },
+ { ahd_patch13_func, 534, 7, 1 },
+ { ahd_patch14_func, 543, 1, 1 },
+ { ahd_patch15_func, 552, 1, 1 },
+ { ahd_patch16_func, 553, 1, 2 },
+ { ahd_patch0_func, 554, 1, 1 },
+ { ahd_patch17_func, 557, 1, 1 },
+ { ahd_patch16_func, 558, 1, 1 },
+ { ahd_patch18_func, 569, 1, 2 },
+ { ahd_patch0_func, 570, 1, 1 },
+ { ahd_patch1_func, 589, 1, 2 },
+ { ahd_patch0_func, 590, 1, 1 },
+ { ahd_patch1_func, 593, 1, 2 },
+ { ahd_patch0_func, 594, 1, 1 },
+ { ahd_patch2_func, 599, 1, 2 },
+ { ahd_patch0_func, 600, 1, 1 },
+ { ahd_patch2_func, 604, 1, 2 },
+ { ahd_patch0_func, 605, 1, 1 },
+ { ahd_patch1_func, 606, 1, 2 },
+ { ahd_patch0_func, 607, 1, 1 },
+ { ahd_patch2_func, 618, 1, 2 },
+ { ahd_patch0_func, 619, 1, 1 },
+ { ahd_patch19_func, 623, 1, 1 },
+ { ahd_patch20_func, 628, 1, 1 },
+ { ahd_patch21_func, 629, 2, 1 },
+ { ahd_patch20_func, 633, 1, 2 },
+ { ahd_patch0_func, 634, 1, 1 },
+ { ahd_patch2_func, 637, 1, 2 },
+ { ahd_patch0_func, 638, 1, 1 },
+ { ahd_patch2_func, 653, 1, 2 },
+ { ahd_patch0_func, 654, 1, 1 },
+ { ahd_patch13_func, 655, 14, 1 },
+ { ahd_patch1_func, 673, 1, 2 },
+ { ahd_patch0_func, 674, 1, 1 },
+ { ahd_patch13_func, 675, 1, 1 },
+ { ahd_patch1_func, 687, 1, 2 },
+ { ahd_patch0_func, 688, 1, 1 },
+ { ahd_patch1_func, 695, 1, 2 },
+ { ahd_patch0_func, 696, 1, 1 },
+ { ahd_patch19_func, 719, 1, 1 },
+ { ahd_patch19_func, 757, 1, 1 },
+ { ahd_patch1_func, 768, 1, 2 },
+ { ahd_patch0_func, 769, 1, 1 },
+ { ahd_patch7_func, 785, 3, 1 },
+ { ahd_patch1_func, 789, 1, 2 },
+ { ahd_patch0_func, 790, 1, 1 },
+ { ahd_patch1_func, 792, 1, 2 },
+ { ahd_patch0_func, 793, 1, 1 },
+ { ahd_patch1_func, 796, 1, 2 },
+ { ahd_patch0_func, 797, 1, 1 },
+ { ahd_patch22_func, 799, 1, 2 },
+ { ahd_patch0_func, 800, 2, 1 },
+ { ahd_patch23_func, 803, 4, 2 },
+ { ahd_patch0_func, 807, 1, 1 },
+ { ahd_patch23_func, 815, 11, 1 }
+};
+
+static const struct cs {
+ uint16_t begin;
+ uint16_t end;
+} critical_sections[] = {
+ { 17, 30 },
+ { 47, 58 },
+ { 61, 63 },
+ { 65, 66 },
+ { 72, 92 },
+ { 110, 142 },
+ { 143, 180 },
+ { 185, 193 },
+ { 218, 274 },
+ { 435, 443 },
+ { 453, 455 },
+ { 458, 467 },
+ { 719, 749 },
+ { 759, 763 }
+};
+
+static const int num_critical_sections = sizeof(critical_sections)
+ / sizeof(*critical_sections);
diff --git a/drivers/scsi/aic7xxx/aic7xxx.h b/drivers/scsi/aic7xxx/aic7xxx.h
new file mode 100644
index 000000000..f69577464
--- /dev/null
+++ b/drivers/scsi/aic7xxx/aic7xxx.h
@@ -0,0 +1,1284 @@
+/*
+ * Core definitions and data structures shareable across OS platforms.
+ *
+ * Copyright (c) 1994-2001 Justin T. Gibbs.
+ * Copyright (c) 2000-2001 Adaptec Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions, and the following disclaimer,
+ * without modification.
+ * 2. Redistributions in binary form must reproduce at minimum a disclaimer
+ * substantially similar to the "NO WARRANTY" disclaimer below
+ * ("Disclaimer") and any redistribution must be conditioned upon
+ * including a substantially similar Disclaimer requirement for further
+ * binary redistribution.
+ * 3. Neither the names of the above-listed copyright holders nor the names
+ * of any contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * Alternatively, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") version 2 as published by the Free
+ * Software Foundation.
+ *
+ * NO WARRANTY
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
+ * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
+ * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGES.
+ *
+ * $Id: //depot/aic7xxx/aic7xxx/aic7xxx.h#85 $
+ *
+ * $FreeBSD$
+ */
+
+#ifndef _AIC7XXX_H_
+#define _AIC7XXX_H_
+
+/* Register Definitions */
+#include "aic7xxx_reg.h"
+
+/************************* Forward Declarations *******************************/
+struct ahc_platform_data;
+struct scb_platform_data;
+struct seeprom_descriptor;
+
+/****************************** Useful Macros *********************************/
+#ifndef TRUE
+#define TRUE 1
+#endif
+#ifndef FALSE
+#define FALSE 0
+#endif
+
+#define ALL_CHANNELS '\0'
+#define ALL_TARGETS_MASK 0xFFFF
+#define INITIATOR_WILDCARD (~0)
+
+#define SCSIID_TARGET(ahc, scsiid) \
+ (((scsiid) & ((((ahc)->features & AHC_TWIN) != 0) ? TWIN_TID : TID)) \
+ >> TID_SHIFT)
+#define SCSIID_OUR_ID(scsiid) \
+ ((scsiid) & OID)
+#define SCSIID_CHANNEL(ahc, scsiid) \
+ ((((ahc)->features & AHC_TWIN) != 0) \
+ ? ((((scsiid) & TWIN_CHNLB) != 0) ? 'B' : 'A') \
+ : 'A')
+#define SCB_IS_SCSIBUS_B(ahc, scb) \
+ (SCSIID_CHANNEL(ahc, (scb)->hscb->scsiid) == 'B')
+#define SCB_GET_OUR_ID(scb) \
+ SCSIID_OUR_ID((scb)->hscb->scsiid)
+#define SCB_GET_TARGET(ahc, scb) \
+ SCSIID_TARGET((ahc), (scb)->hscb->scsiid)
+#define SCB_GET_CHANNEL(ahc, scb) \
+ SCSIID_CHANNEL(ahc, (scb)->hscb->scsiid)
+#define SCB_GET_LUN(scb) \
+ ((scb)->hscb->lun & LID)
+#define SCB_GET_TARGET_OFFSET(ahc, scb) \
+ (SCB_GET_TARGET(ahc, scb) + (SCB_IS_SCSIBUS_B(ahc, scb) ? 8 : 0))
+#define SCB_GET_TARGET_MASK(ahc, scb) \
+ (0x01 << (SCB_GET_TARGET_OFFSET(ahc, scb)))
+#ifdef AHC_DEBUG
+#define SCB_IS_SILENT(scb) \
+ ((ahc_debug & AHC_SHOW_MASKED_ERRORS) == 0 \
+ && (((scb)->flags & SCB_SILENT) != 0))
+#else
+#define SCB_IS_SILENT(scb) \
+ (((scb)->flags & SCB_SILENT) != 0)
+#endif
+#define TCL_TARGET_OFFSET(tcl) \
+ ((((tcl) >> 4) & TID) >> 4)
+#define TCL_LUN(tcl) \
+ (tcl & (AHC_NUM_LUNS - 1))
+#define BUILD_TCL(scsiid, lun) \
+ ((lun) | (((scsiid) & TID) << 4))
+
+#ifndef AHC_TARGET_MODE
+#undef AHC_TMODE_ENABLE
+#define AHC_TMODE_ENABLE 0
+#endif
+
+/**************************** Driver Constants ********************************/
+/*
+ * The maximum number of supported targets.
+ */
+#define AHC_NUM_TARGETS 16
+
+/*
+ * The maximum number of supported luns.
+ * The identify message only supports 64 luns in SPI3.
+ * You can have 2^64 luns when information unit transfers are enabled,
+ * but it is doubtful this driver will ever support IUTs.
+ */
+#define AHC_NUM_LUNS 64
+
+/*
+ * The maximum transfer per S/G segment.
+ */
+#define AHC_MAXTRANSFER_SIZE 0x00ffffff /* limited by 24bit counter */
+
+/*
+ * The maximum amount of SCB storage in hardware on a controller.
+ * This value represents an upper bound. Controllers vary in the number
+ * they actually support.
+ */
+#define AHC_SCB_MAX 255
+
+/*
+ * The maximum number of concurrent transactions supported per driver instance.
+ * Sequencer Control Blocks (SCBs) store per-transaction information. Although
+ * the space for SCBs on the host adapter varies by model, the driver will
+ * page the SCBs between host and controller memory as needed. We are limited
+ * to 253 because:
+ * 1) The 8bit nature of the RISC engine holds us to an 8bit value.
+ * 2) We reserve one value, 255, to represent the invalid element.
+ * 3) Our input queue scheme requires one SCB to always be reserved
+ * in advance of queuing any SCBs. This takes us down to 254.
+ * 4) To handle our output queue correctly on machines that only
+ * support 32bit stores, we must clear the array 4 bytes at a
+ * time. To avoid colliding with a DMA write from the sequencer,
+ * we must be sure that 4 slots are empty when we write to clear
+ * the queue. This reduces us to 253 SCBs: 1 that just completed
+ * and the known three additional empty slots in the queue that
+ * precede it.
+ */
+#define AHC_MAX_QUEUE 253
+
+/*
+ * The maximum amount of SCB storage we allocate in host memory. This
+ * number should reflect the 1 additional SCB we require to handle our
+ * qinfifo mechanism.
+ */
+#define AHC_SCB_MAX_ALLOC (AHC_MAX_QUEUE+1)
+
+/*
+ * Ring Buffer of incoming target commands.
+ * We allocate 256 to simplify the logic in the sequencer
+ * by using the natural wrap point of an 8bit counter.
+ */
+#define AHC_TMODE_CMDS 256
+
+/* Reset line assertion time in us */
+#define AHC_BUSRESET_DELAY 25
+
+/******************* Chip Characteristics/Operating Settings *****************/
+/*
+ * Chip Type
+ * The chip order is from least sophisticated to most sophisticated.
+ */
+typedef enum {
+ AHC_NONE = 0x0000,
+ AHC_CHIPID_MASK = 0x00FF,
+ AHC_AIC7770 = 0x0001,
+ AHC_AIC7850 = 0x0002,
+ AHC_AIC7855 = 0x0003,
+ AHC_AIC7859 = 0x0004,
+ AHC_AIC7860 = 0x0005,
+ AHC_AIC7870 = 0x0006,
+ AHC_AIC7880 = 0x0007,
+ AHC_AIC7895 = 0x0008,
+ AHC_AIC7895C = 0x0009,
+ AHC_AIC7890 = 0x000a,
+ AHC_AIC7896 = 0x000b,
+ AHC_AIC7892 = 0x000c,
+ AHC_AIC7899 = 0x000d,
+ AHC_VL = 0x0100, /* Bus type VL */
+ AHC_EISA = 0x0200, /* Bus type EISA */
+ AHC_PCI = 0x0400, /* Bus type PCI */
+ AHC_BUS_MASK = 0x0F00
+} ahc_chip;
+
+/*
+ * Features available in each chip type.
+ */
+typedef enum {
+ AHC_FENONE = 0x00000,
+ AHC_ULTRA = 0x00001, /* Supports 20MHz Transfers */
+ AHC_ULTRA2 = 0x00002, /* Supports 40MHz Transfers */
+ AHC_WIDE = 0x00004, /* Wide Channel */
+ AHC_TWIN = 0x00008, /* Twin Channel */
+ AHC_MORE_SRAM = 0x00010, /* 80 bytes instead of 64 */
+ AHC_CMD_CHAN = 0x00020, /* Has a Command DMA Channel */
+ AHC_QUEUE_REGS = 0x00040, /* Has Queue management registers */
+ AHC_SG_PRELOAD = 0x00080, /* Can perform auto-SG preload */
+ AHC_SPIOCAP = 0x00100, /* Has a Serial Port I/O Cap Register */
+ AHC_MULTI_TID = 0x00200, /* Has bitmask of TIDs for select-in */
+ AHC_HS_MAILBOX = 0x00400, /* Has HS_MAILBOX register */
+ AHC_DT = 0x00800, /* Double Transition transfers */
+ AHC_NEW_TERMCTL = 0x01000, /* Newer termination scheme */
+ AHC_MULTI_FUNC = 0x02000, /* Multi-Function Twin Channel Device */
+ AHC_LARGE_SCBS = 0x04000, /* 64byte SCBs */
+ AHC_AUTORATE = 0x08000, /* Automatic update of SCSIRATE/OFFSET*/
+ AHC_AUTOPAUSE = 0x10000, /* Automatic pause on register access */
+ AHC_TARGETMODE = 0x20000, /* Has tested target mode support */
+ AHC_MULTIROLE = 0x40000, /* Space for two roles at a time */
+ AHC_REMOVABLE = 0x80000, /* Hot-Swap supported */
+ AHC_HVD = 0x100000, /* HVD rather than SE */
+ AHC_AIC7770_FE = AHC_FENONE,
+ /*
+ * The real 7850 does not support Ultra modes, but there are
+ * several cards that use the generic 7850 PCI ID even though
+ * they are using an Ultra capable chip (7859/7860). We start
+ * out with the AHC_ULTRA feature set and then check the DEVSTATUS
+ * register to determine if the capability is really present.
+ */
+ AHC_AIC7850_FE = AHC_SPIOCAP|AHC_AUTOPAUSE|AHC_TARGETMODE|AHC_ULTRA,
+ AHC_AIC7860_FE = AHC_AIC7850_FE,
+ AHC_AIC7870_FE = AHC_TARGETMODE|AHC_AUTOPAUSE,
+ AHC_AIC7880_FE = AHC_AIC7870_FE|AHC_ULTRA,
+ /*
+ * Although we have space for both the initiator and
+ * target roles on ULTRA2 chips, we currently disable
+ * the initiator role to allow multi-scsi-id target mode
+ * configurations. We can only respond on the same SCSI
+ * ID as our initiator role if we allow initiator operation.
+ * At some point, we should add a configuration knob to
+ * allow both roles to be loaded.
+ */
+ AHC_AIC7890_FE = AHC_MORE_SRAM|AHC_CMD_CHAN|AHC_ULTRA2
+ |AHC_QUEUE_REGS|AHC_SG_PRELOAD|AHC_MULTI_TID
+ |AHC_HS_MAILBOX|AHC_NEW_TERMCTL|AHC_LARGE_SCBS
+ |AHC_TARGETMODE,
+ AHC_AIC7892_FE = AHC_AIC7890_FE|AHC_DT|AHC_AUTORATE|AHC_AUTOPAUSE,
+ AHC_AIC7895_FE = AHC_AIC7880_FE|AHC_MORE_SRAM|AHC_AUTOPAUSE
+ |AHC_CMD_CHAN|AHC_MULTI_FUNC|AHC_LARGE_SCBS,
+ AHC_AIC7895C_FE = AHC_AIC7895_FE|AHC_MULTI_TID,
+ AHC_AIC7896_FE = AHC_AIC7890_FE|AHC_MULTI_FUNC,
+ AHC_AIC7899_FE = AHC_AIC7892_FE|AHC_MULTI_FUNC
+} ahc_feature;
+
+/*
+ * Bugs in the silicon that we work around in software.
+ */
+typedef enum {
+ AHC_BUGNONE = 0x00,
+ /*
+ * On all chips prior to the U2 product line,
+ * the WIDEODD S/G segment feature does not
+ * work during scsi->HostBus transfers.
+ */
+ AHC_TMODE_WIDEODD_BUG = 0x01,
+ /*
+ * On the aic7890/91 Rev 0 chips, the autoflush
+ * feature does not work. A manual flush of
+ * the DMA FIFO is required.
+ */
+ AHC_AUTOFLUSH_BUG = 0x02,
+ /*
+ * On many chips, cacheline streaming does not work.
+ */
+ AHC_CACHETHEN_BUG = 0x04,
+ /*
+ * On the aic7896/97 chips, cacheline
+ * streaming must be enabled.
+ */
+ AHC_CACHETHEN_DIS_BUG = 0x08,
+ /*
+ * PCI 2.1 Retry failure on non-empty data fifo.
+ */
+ AHC_PCI_2_1_RETRY_BUG = 0x10,
+ /*
+ * Controller does not handle cacheline residuals
+ * properly on S/G segments if PCI MWI instructions
+ * are allowed.
+ */
+ AHC_PCI_MWI_BUG = 0x20,
+ /*
+ * An SCB upload using the SCB channel's
+ * auto array entry copy feature may
+ * corrupt data. This appears to only
+ * occur on 66MHz systems.
+ */
+ AHC_SCBCHAN_UPLOAD_BUG = 0x40
+} ahc_bug;
+
+/*
+ * Configuration specific settings.
+ * The driver determines these settings by probing the
+ * chip/controller's configuration.
+ */
+typedef enum {
+ AHC_FNONE = 0x000,
+ AHC_PRIMARY_CHANNEL = 0x003, /*
+ * The channel that should
+ * be probed first.
+ */
+ AHC_USEDEFAULTS = 0x004, /*
+ * For cards without an seeprom
+ * or a BIOS to initialize the chip's
+ * SRAM, we use the default target
+ * settings.
+ */
+ AHC_SEQUENCER_DEBUG = 0x008,
+ AHC_SHARED_SRAM = 0x010,
+ AHC_LARGE_SEEPROM = 0x020, /* Uses C56_66 not C46 */
+ AHC_RESET_BUS_A = 0x040,
+ AHC_RESET_BUS_B = 0x080,
+ AHC_EXTENDED_TRANS_A = 0x100,
+ AHC_EXTENDED_TRANS_B = 0x200,
+ AHC_TERM_ENB_A = 0x400,
+ AHC_TERM_ENB_B = 0x800,
+ AHC_INITIATORROLE = 0x1000, /*
+ * Allow initiator operations on
+ * this controller.
+ */
+ AHC_TARGETROLE = 0x2000, /*
+ * Allow target operations on this
+ * controller.
+ */
+ AHC_NEWEEPROM_FMT = 0x4000,
+ AHC_TQINFIFO_BLOCKED = 0x10000, /* Blocked waiting for ATIOs */
+ AHC_INT50_SPEEDFLEX = 0x20000, /*
+ * Internal 50pin connector
+ * sits behind an aic3860
+ */
+ AHC_SCB_BTT = 0x40000, /*
+ * The busy targets table is
+ * stored in SCB space rather
+ * than SRAM.
+ */
+ AHC_BIOS_ENABLED = 0x80000,
+ AHC_ALL_INTERRUPTS = 0x100000,
+ AHC_PAGESCBS = 0x400000, /* Enable SCB paging */
+ AHC_EDGE_INTERRUPT = 0x800000, /* Device uses edge triggered ints */
+ AHC_39BIT_ADDRESSING = 0x1000000, /* Use 39 bit addressing scheme. */
+ AHC_LSCBS_ENABLED = 0x2000000, /* 64Byte SCBs enabled */
+ AHC_SCB_CONFIG_USED = 0x4000000, /* No SEEPROM but SCB2 had info. */
+ AHC_NO_BIOS_INIT = 0x8000000, /* No BIOS left over settings. */
+ AHC_DISABLE_PCI_PERR = 0x10000000,
+ AHC_HAS_TERM_LOGIC = 0x20000000
+} ahc_flag;
+
+/************************* Hardware SCB Definition ***************************/
+
+/*
+ * The driver keeps up to MAX_SCB scb structures per card in memory. The SCB
+ * consists of a "hardware SCB" mirroring the fields available on the card
+ * and additional information the kernel stores for each transaction.
+ *
+ * To minimize space utilization, a portion of the hardware scb stores
+ * different data during different portions of a SCSI transaction.
+ * As initialized by the host driver for the initiator role, this area
+ * contains the SCSI cdb (or a pointer to the cdb) to be executed. After
+ * the cdb has been presented to the target, this area serves to store
+ * residual transfer information and the SCSI status byte.
+ * For the target role, the contents of this area do not change, but
+ * still serve a different purpose than for the initiator role. See
+ * struct target_data for details.
+ */
+
+/*
+ * Status information embedded in the shared poriton of
+ * an SCB after passing the cdb to the target. The kernel
+ * driver will only read this data for transactions that
+ * complete abnormally (non-zero status byte).
+ */
+struct status_pkt {
+ uint32_t residual_datacnt; /* Residual in the current S/G seg */
+ uint32_t residual_sg_ptr; /* The next S/G for this transfer */
+ uint8_t scsi_status; /* Standard SCSI status byte */
+};
+
+/*
+ * Target mode version of the shared data SCB segment.
+ */
+struct target_data {
+ uint32_t residual_datacnt; /* Residual in the current S/G seg */
+ uint32_t residual_sg_ptr; /* The next S/G for this transfer */
+ uint8_t scsi_status; /* SCSI status to give to initiator */
+ uint8_t target_phases; /* Bitmap of phases to execute */
+ uint8_t data_phase; /* Data-In or Data-Out */
+ uint8_t initiator_tag; /* Initiator's transaction tag */
+};
+
+struct hardware_scb {
+/*0*/ union {
+ /*
+ * If the cdb is 12 bytes or less, we embed it directly
+ * in the SCB. For longer cdbs, we embed the address
+ * of the cdb payload as seen by the chip and a DMA
+ * is used to pull it in.
+ */
+ uint8_t cdb[12];
+ uint32_t cdb_ptr;
+ struct status_pkt status;
+ struct target_data tdata;
+ } shared_data;
+/*
+ * A word about residuals.
+ * The scb is presented to the sequencer with the dataptr and datacnt
+ * fields initialized to the contents of the first S/G element to
+ * transfer. The sgptr field is initialized to the bus address for
+ * the S/G element that follows the first in the in core S/G array
+ * or'ed with the SG_FULL_RESID flag. Sgptr may point to an invalid
+ * S/G entry for this transfer (single S/G element transfer with the
+ * first elements address and length preloaded in the dataptr/datacnt
+ * fields). If no transfer is to occur, sgptr is set to SG_LIST_NULL.
+ * The SG_FULL_RESID flag ensures that the residual will be correctly
+ * noted even if no data transfers occur. Once the data phase is entered,
+ * the residual sgptr and datacnt are loaded from the sgptr and the
+ * datacnt fields. After each S/G element's dataptr and length are
+ * loaded into the hardware, the residual sgptr is advanced. After
+ * each S/G element is expired, its datacnt field is checked to see
+ * if the LAST_SEG flag is set. If so, SG_LIST_NULL is set in the
+ * residual sg ptr and the transfer is considered complete. If the
+ * sequencer determines that there is a residual in the tranfer, it
+ * will set the SG_RESID_VALID flag in sgptr and dma the scb back into
+ * host memory. To sumarize:
+ *
+ * Sequencer:
+ * o A residual has occurred if SG_FULL_RESID is set in sgptr,
+ * or residual_sgptr does not have SG_LIST_NULL set.
+ *
+ * o We are transferring the last segment if residual_datacnt has
+ * the SG_LAST_SEG flag set.
+ *
+ * Host:
+ * o A residual has occurred if a completed scb has the
+ * SG_RESID_VALID flag set.
+ *
+ * o residual_sgptr and sgptr refer to the "next" sg entry
+ * and so may point beyond the last valid sg entry for the
+ * transfer.
+ */
+/*12*/ uint32_t dataptr;
+/*16*/ uint32_t datacnt; /*
+ * Byte 3 (numbered from 0) of
+ * the datacnt is really the
+ * 4th byte in that data address.
+ */
+/*20*/ uint32_t sgptr;
+#define SG_PTR_MASK 0xFFFFFFF8
+/*24*/ uint8_t control; /* See SCB_CONTROL in aic7xxx.reg for details */
+/*25*/ uint8_t scsiid; /* what to load in the SCSIID register */
+/*26*/ uint8_t lun;
+/*27*/ uint8_t tag; /*
+ * Index into our kernel SCB array.
+ * Also used as the tag for tagged I/O
+ */
+/*28*/ uint8_t cdb_len;
+/*29*/ uint8_t scsirate; /* Value for SCSIRATE register */
+/*30*/ uint8_t scsioffset; /* Value for SCSIOFFSET register */
+/*31*/ uint8_t next; /*
+ * Used for threading SCBs in the
+ * "Waiting for Selection" and
+ * "Disconnected SCB" lists down
+ * in the sequencer.
+ */
+/*32*/ uint8_t cdb32[32]; /*
+ * CDB storage for cdbs of size
+ * 13->32. We store them here
+ * because hardware scbs are
+ * allocated from DMA safe
+ * memory so we are guaranteed
+ * the controller can access
+ * this data.
+ */
+};
+
+/************************ Kernel SCB Definitions ******************************/
+/*
+ * Some fields of the SCB are OS dependent. Here we collect the
+ * definitions for elements that all OS platforms need to include
+ * in there SCB definition.
+ */
+
+/*
+ * Definition of a scatter/gather element as transferred to the controller.
+ * The aic7xxx chips only support a 24bit length. We use the top byte of
+ * the length to store additional address bits and a flag to indicate
+ * that a given segment terminates the transfer. This gives us an
+ * addressable range of 512GB on machines with 64bit PCI or with chips
+ * that can support dual address cycles on 32bit PCI busses.
+ */
+struct ahc_dma_seg {
+ uint32_t addr;
+ uint32_t len;
+#define AHC_DMA_LAST_SEG 0x80000000
+#define AHC_SG_HIGH_ADDR_MASK 0x7F000000
+#define AHC_SG_LEN_MASK 0x00FFFFFF
+};
+
+struct sg_map_node {
+ bus_dmamap_t sg_dmamap;
+ dma_addr_t sg_physaddr;
+ struct ahc_dma_seg* sg_vaddr;
+ SLIST_ENTRY(sg_map_node) links;
+};
+
+/*
+ * The current state of this SCB.
+ */
+typedef enum {
+ SCB_FREE = 0x0000,
+ SCB_OTHERTCL_TIMEOUT = 0x0002,/*
+ * Another device was active
+ * during the first timeout for
+ * this SCB so we gave ourselves
+ * an additional timeout period
+ * in case it was hogging the
+ * bus.
+ */
+ SCB_DEVICE_RESET = 0x0004,
+ SCB_SENSE = 0x0008,
+ SCB_CDB32_PTR = 0x0010,
+ SCB_RECOVERY_SCB = 0x0020,
+ SCB_AUTO_NEGOTIATE = 0x0040,/* Negotiate to achieve goal. */
+ SCB_NEGOTIATE = 0x0080,/* Negotiation forced for command. */
+ SCB_ABORT = 0x0100,
+ SCB_UNTAGGEDQ = 0x0200,
+ SCB_ACTIVE = 0x0400,
+ SCB_TARGET_IMMEDIATE = 0x0800,
+ SCB_TRANSMISSION_ERROR = 0x1000,/*
+ * We detected a parity or CRC
+ * error that has effected the
+ * payload of the command. This
+ * flag is checked when normal
+ * status is returned to catch
+ * the case of a target not
+ * responding to our attempt
+ * to report the error.
+ */
+ SCB_TARGET_SCB = 0x2000,
+ SCB_SILENT = 0x4000 /*
+ * Be quiet about transmission type
+ * errors. They are expected and we
+ * don't want to upset the user. This
+ * flag is typically used during DV.
+ */
+} scb_flag;
+
+struct scb {
+ struct hardware_scb *hscb;
+ union {
+ SLIST_ENTRY(scb) sle;
+ TAILQ_ENTRY(scb) tqe;
+ } links;
+ LIST_ENTRY(scb) pending_links;
+ ahc_io_ctx_t io_ctx;
+ struct ahc_softc *ahc_softc;
+ scb_flag flags;
+#ifndef __linux__
+ bus_dmamap_t dmamap;
+#endif
+ struct scb_platform_data *platform_data;
+ struct sg_map_node *sg_map;
+ struct ahc_dma_seg *sg_list;
+ dma_addr_t sg_list_phys;
+ u_int sg_count;/* How full ahc_dma_seg is */
+};
+
+struct scb_data {
+ SLIST_HEAD(, scb) free_scbs; /*
+ * Pool of SCBs ready to be assigned
+ * commands to execute.
+ */
+ struct scb *scbindex[256]; /*
+ * Mapping from tag to SCB.
+ * As tag identifiers are an
+ * 8bit value, we provide space
+ * for all possible tag values.
+ * Any lookups to entries at or
+ * above AHC_SCB_MAX_ALLOC will
+ * always fail.
+ */
+ struct hardware_scb *hscbs; /* Array of hardware SCBs */
+ struct scb *scbarray; /* Array of kernel SCBs */
+ struct scsi_sense_data *sense; /* Per SCB sense data */
+
+ /*
+ * "Bus" addresses of our data structures.
+ */
+ bus_dma_tag_t hscb_dmat; /* dmat for our hardware SCB array */
+ bus_dmamap_t hscb_dmamap;
+ dma_addr_t hscb_busaddr;
+ bus_dma_tag_t sense_dmat;
+ bus_dmamap_t sense_dmamap;
+ dma_addr_t sense_busaddr;
+ bus_dma_tag_t sg_dmat; /* dmat for our sg segments */
+ SLIST_HEAD(, sg_map_node) sg_maps;
+ uint8_t numscbs;
+ uint8_t maxhscbs; /* Number of SCBs on the card */
+ uint8_t init_level; /*
+ * How far we've initialized
+ * this structure.
+ */
+};
+
+/************************ Target Mode Definitions *****************************/
+
+/*
+ * Connection descriptor for select-in requests in target mode.
+ */
+struct target_cmd {
+ uint8_t scsiid; /* Our ID and the initiator's ID */
+ uint8_t identify; /* Identify message */
+ uint8_t bytes[22]; /*
+ * Bytes contains any additional message
+ * bytes terminated by 0xFF. The remainder
+ * is the cdb to execute.
+ */
+ uint8_t cmd_valid; /*
+ * When a command is complete, the firmware
+ * will set cmd_valid to all bits set.
+ * After the host has seen the command,
+ * the bits are cleared. This allows us
+ * to just peek at host memory to determine
+ * if more work is complete. cmd_valid is on
+ * an 8 byte boundary to simplify setting
+ * it on aic7880 hardware which only has
+ * limited direct access to the DMA FIFO.
+ */
+ uint8_t pad[7];
+};
+
+/*
+ * Number of events we can buffer up if we run out
+ * of immediate notify ccbs.
+ */
+#define AHC_TMODE_EVENT_BUFFER_SIZE 8
+struct ahc_tmode_event {
+ uint8_t initiator_id;
+ uint8_t event_type; /* MSG type or EVENT_TYPE_BUS_RESET */
+#define EVENT_TYPE_BUS_RESET 0xFF
+ uint8_t event_arg;
+};
+
+/*
+ * Per enabled lun target mode state.
+ * As this state is directly influenced by the host OS'es target mode
+ * environment, we let the OS module define it. Forward declare the
+ * structure here so we can store arrays of them, etc. in OS neutral
+ * data structures.
+ */
+#ifdef AHC_TARGET_MODE
+struct ahc_tmode_lstate {
+ struct cam_path *path;
+ struct ccb_hdr_slist accept_tios;
+ struct ccb_hdr_slist immed_notifies;
+ struct ahc_tmode_event event_buffer[AHC_TMODE_EVENT_BUFFER_SIZE];
+ uint8_t event_r_idx;
+ uint8_t event_w_idx;
+};
+#else
+struct ahc_tmode_lstate;
+#endif
+
+/******************** Transfer Negotiation Datastructures *********************/
+#define AHC_TRANS_CUR 0x01 /* Modify current neogtiation status */
+#define AHC_TRANS_ACTIVE 0x03 /* Assume this target is on the bus */
+#define AHC_TRANS_GOAL 0x04 /* Modify negotiation goal */
+#define AHC_TRANS_USER 0x08 /* Modify user negotiation settings */
+
+#define AHC_WIDTH_UNKNOWN 0xFF
+#define AHC_PERIOD_UNKNOWN 0xFF
+#define AHC_OFFSET_UNKNOWN 0xFF
+#define AHC_PPR_OPTS_UNKNOWN 0xFF
+
+/*
+ * Transfer Negotiation Information.
+ */
+struct ahc_transinfo {
+ uint8_t protocol_version; /* SCSI Revision level */
+ uint8_t transport_version; /* SPI Revision level */
+ uint8_t width; /* Bus width */
+ uint8_t period; /* Sync rate factor */
+ uint8_t offset; /* Sync offset */
+ uint8_t ppr_options; /* Parallel Protocol Request options */
+};
+
+/*
+ * Per-initiator current, goal and user transfer negotiation information. */
+struct ahc_initiator_tinfo {
+ uint8_t scsirate; /* Computed value for SCSIRATE reg */
+ struct ahc_transinfo curr;
+ struct ahc_transinfo goal;
+ struct ahc_transinfo user;
+};
+
+/*
+ * Per enabled target ID state.
+ * Pointers to lun target state as well as sync/wide negotiation information
+ * for each initiator<->target mapping. For the initiator role we pretend
+ * that we are the target and the targets are the initiators since the
+ * negotiation is the same regardless of role.
+ */
+struct ahc_tmode_tstate {
+ struct ahc_tmode_lstate* enabled_luns[AHC_NUM_LUNS];
+ struct ahc_initiator_tinfo transinfo[AHC_NUM_TARGETS];
+
+ /*
+ * Per initiator state bitmasks.
+ */
+ uint16_t auto_negotiate;/* Auto Negotiation Required */
+ uint16_t ultraenb; /* Using ultra sync rate */
+ uint16_t discenable; /* Disconnection allowed */
+ uint16_t tagenable; /* Tagged Queuing allowed */
+};
+
+/*
+ * Data structure for our table of allowed synchronous transfer rates.
+ */
+struct ahc_syncrate {
+ u_int sxfr_u2; /* Value of the SXFR parameter for Ultra2+ Chips */
+ u_int sxfr; /* Value of the SXFR parameter for <= Ultra Chips */
+#define ULTRA_SXFR 0x100 /* Rate Requires Ultra Mode set */
+#define ST_SXFR 0x010 /* Rate Single Transition Only */
+#define DT_SXFR 0x040 /* Rate Double Transition Only */
+ uint8_t period; /* Period to send to SCSI target */
+ const char *rate;
+};
+
+/* Safe and valid period for async negotiations. */
+#define AHC_ASYNC_XFER_PERIOD 0x45
+#define AHC_ULTRA2_XFER_PERIOD 0x0a
+
+/*
+ * Indexes into our table of syncronous transfer rates.
+ */
+#define AHC_SYNCRATE_DT 0
+#define AHC_SYNCRATE_ULTRA2 1
+#define AHC_SYNCRATE_ULTRA 3
+#define AHC_SYNCRATE_FAST 6
+#define AHC_SYNCRATE_MAX AHC_SYNCRATE_DT
+#define AHC_SYNCRATE_MIN 13
+
+/***************************** Lookup Tables **********************************/
+/*
+ * Phase -> name and message out response
+ * to parity errors in each phase table.
+ */
+struct ahc_phase_table_entry {
+ uint8_t phase;
+ uint8_t mesg_out; /* Message response to parity errors */
+ char *phasemsg;
+};
+
+/************************** Serial EEPROM Format ******************************/
+
+struct seeprom_config {
+/*
+ * Per SCSI ID Configuration Flags
+ */
+ uint16_t device_flags[16]; /* words 0-15 */
+#define CFXFER 0x0007 /* synchronous transfer rate */
+#define CFSYNCH 0x0008 /* enable synchronous transfer */
+#define CFDISC 0x0010 /* enable disconnection */
+#define CFWIDEB 0x0020 /* wide bus device */
+#define CFSYNCHISULTRA 0x0040 /* CFSYNCH is an ultra offset (2940AU)*/
+#define CFSYNCSINGLE 0x0080 /* Single-Transition signalling */
+#define CFSTART 0x0100 /* send start unit SCSI command */
+#define CFINCBIOS 0x0200 /* include in BIOS scan */
+#define CFRNFOUND 0x0400 /* report even if not found */
+#define CFMULTILUNDEV 0x0800 /* Probe multiple luns in BIOS scan */
+#define CFWBCACHEENB 0x4000 /* Enable W-Behind Cache on disks */
+#define CFWBCACHENOP 0xc000 /* Don't touch W-Behind Cache */
+
+/*
+ * BIOS Control Bits
+ */
+ uint16_t bios_control; /* word 16 */
+#define CFSUPREM 0x0001 /* support all removeable drives */
+#define CFSUPREMB 0x0002 /* support removeable boot drives */
+#define CFBIOSEN 0x0004 /* BIOS enabled */
+#define CFBIOS_BUSSCAN 0x0008 /* Have the BIOS Scan the Bus */
+#define CFSM2DRV 0x0010 /* support more than two drives */
+#define CFSTPWLEVEL 0x0010 /* Termination level control */
+#define CF284XEXTEND 0x0020 /* extended translation (284x cards) */
+#define CFCTRL_A 0x0020 /* BIOS displays Ctrl-A message */
+#define CFTERM_MENU 0x0040 /* BIOS displays termination menu */
+#define CFEXTEND 0x0080 /* extended translation enabled */
+#define CFSCAMEN 0x0100 /* SCAM enable */
+#define CFMSG_LEVEL 0x0600 /* BIOS Message Level */
+#define CFMSG_VERBOSE 0x0000
+#define CFMSG_SILENT 0x0200
+#define CFMSG_DIAG 0x0400
+#define CFBOOTCD 0x0800 /* Support Bootable CD-ROM */
+/* UNUSED 0xff00 */
+
+/*
+ * Host Adapter Control Bits
+ */
+ uint16_t adapter_control; /* word 17 */
+#define CFAUTOTERM 0x0001 /* Perform Auto termination */
+#define CFULTRAEN 0x0002 /* Ultra SCSI speed enable */
+#define CF284XSELTO 0x0003 /* Selection timeout (284x cards) */
+#define CF284XFIFO 0x000C /* FIFO Threshold (284x cards) */
+#define CFSTERM 0x0004 /* SCSI low byte termination */
+#define CFWSTERM 0x0008 /* SCSI high byte termination */
+#define CFSPARITY 0x0010 /* SCSI parity */
+#define CF284XSTERM 0x0020 /* SCSI low byte term (284x cards) */
+#define CFMULTILUN 0x0020
+#define CFRESETB 0x0040 /* reset SCSI bus at boot */
+#define CFCLUSTERENB 0x0080 /* Cluster Enable */
+#define CFBOOTCHAN 0x0300 /* probe this channel first */
+#define CFBOOTCHANSHIFT 8
+#define CFSEAUTOTERM 0x0400 /* Ultra2 Perform secondary Auto Term*/
+#define CFSELOWTERM 0x0800 /* Ultra2 secondary low term */
+#define CFSEHIGHTERM 0x1000 /* Ultra2 secondary high term */
+#define CFENABLEDV 0x4000 /* Perform Domain Validation*/
+
+/*
+ * Bus Release Time, Host Adapter ID
+ */
+ uint16_t brtime_id; /* word 18 */
+#define CFSCSIID 0x000f /* host adapter SCSI ID */
+/* UNUSED 0x00f0 */
+#define CFBRTIME 0xff00 /* bus release time */
+
+/*
+ * Maximum targets
+ */
+ uint16_t max_targets; /* word 19 */
+#define CFMAXTARG 0x00ff /* maximum targets */
+#define CFBOOTLUN 0x0f00 /* Lun to boot from */
+#define CFBOOTID 0xf000 /* Target to boot from */
+ uint16_t res_1[10]; /* words 20-29 */
+ uint16_t signature; /* Signature == 0x250 */
+#define CFSIGNATURE 0x250
+#define CFSIGNATURE2 0x300
+ uint16_t checksum; /* word 31 */
+};
+
+/**************************** Message Buffer *********************************/
+typedef enum {
+ MSG_TYPE_NONE = 0x00,
+ MSG_TYPE_INITIATOR_MSGOUT = 0x01,
+ MSG_TYPE_INITIATOR_MSGIN = 0x02,
+ MSG_TYPE_TARGET_MSGOUT = 0x03,
+ MSG_TYPE_TARGET_MSGIN = 0x04
+} ahc_msg_type;
+
+typedef enum {
+ MSGLOOP_IN_PROG,
+ MSGLOOP_MSGCOMPLETE,
+ MSGLOOP_TERMINATED
+} msg_loop_stat;
+
+/*********************** Software Configuration Structure *********************/
+TAILQ_HEAD(scb_tailq, scb);
+
+struct ahc_aic7770_softc {
+ /*
+ * Saved register state used for chip_init().
+ */
+ uint8_t busspd;
+ uint8_t bustime;
+};
+
+struct ahc_pci_softc {
+ /*
+ * Saved register state used for chip_init().
+ */
+ uint32_t devconfig;
+ uint16_t targcrccnt;
+ uint8_t command;
+ uint8_t csize_lattime;
+ uint8_t optionmode;
+ uint8_t crccontrol1;
+ uint8_t dscommand0;
+ uint8_t dspcistatus;
+ uint8_t scbbaddr;
+ uint8_t dff_thrsh;
+};
+
+union ahc_bus_softc {
+ struct ahc_aic7770_softc aic7770_softc;
+ struct ahc_pci_softc pci_softc;
+};
+
+typedef void (*ahc_bus_intr_t)(struct ahc_softc *);
+typedef int (*ahc_bus_chip_init_t)(struct ahc_softc *);
+typedef int (*ahc_bus_suspend_t)(struct ahc_softc *);
+typedef int (*ahc_bus_resume_t)(struct ahc_softc *);
+typedef void ahc_callback_t (void *);
+
+struct ahc_softc {
+ bus_space_tag_t tag;
+ bus_space_handle_t bsh;
+#ifndef __linux__
+ bus_dma_tag_t buffer_dmat; /* dmat for buffer I/O */
+#endif
+ struct scb_data *scb_data;
+
+ struct scb *next_queued_scb;
+
+ /*
+ * SCBs that have been sent to the controller
+ */
+ LIST_HEAD(, scb) pending_scbs;
+
+ /*
+ * Counting lock for deferring the release of additional
+ * untagged transactions from the untagged_queues. When
+ * the lock is decremented to 0, all queues in the
+ * untagged_queues array are run.
+ */
+ u_int untagged_queue_lock;
+
+ /*
+ * Per-target queue of untagged-transactions. The
+ * transaction at the head of the queue is the
+ * currently pending untagged transaction for the
+ * target. The driver only allows a single untagged
+ * transaction per target.
+ */
+ struct scb_tailq untagged_queues[AHC_NUM_TARGETS];
+
+ /*
+ * Bus attachment specific data.
+ */
+ union ahc_bus_softc bus_softc;
+
+ /*
+ * Platform specific data.
+ */
+ struct ahc_platform_data *platform_data;
+
+ /*
+ * Platform specific device information.
+ */
+ ahc_dev_softc_t dev_softc;
+
+ /*
+ * Bus specific device information.
+ */
+ ahc_bus_intr_t bus_intr;
+
+ /*
+ * Bus specific initialization required
+ * after a chip reset.
+ */
+ ahc_bus_chip_init_t bus_chip_init;
+
+ /*
+ * Target mode related state kept on a per enabled lun basis.
+ * Targets that are not enabled will have null entries.
+ * As an initiator, we keep one target entry for our initiator
+ * ID to store our sync/wide transfer settings.
+ */
+ struct ahc_tmode_tstate *enabled_targets[AHC_NUM_TARGETS];
+
+ /*
+ * The black hole device responsible for handling requests for
+ * disabled luns on enabled targets.
+ */
+ struct ahc_tmode_lstate *black_hole;
+
+ /*
+ * Device instance currently on the bus awaiting a continue TIO
+ * for a command that was not given the disconnect priveledge.
+ */
+ struct ahc_tmode_lstate *pending_device;
+
+ /*
+ * Card characteristics
+ */
+ ahc_chip chip;
+ ahc_feature features;
+ ahc_bug bugs;
+ ahc_flag flags;
+ struct seeprom_config *seep_config;
+
+ /* Values to store in the SEQCTL register for pause and unpause */
+ uint8_t unpause;
+ uint8_t pause;
+
+ /* Command Queues */
+ uint8_t qoutfifonext;
+ uint8_t qinfifonext;
+ uint8_t *qoutfifo;
+ uint8_t *qinfifo;
+
+ /* Critical Section Data */
+ struct cs *critical_sections;
+ u_int num_critical_sections;
+
+ /* Channel Names ('A', 'B', etc.) */
+ char channel;
+ char channel_b;
+
+ /* Initiator Bus ID */
+ uint8_t our_id;
+ uint8_t our_id_b;
+
+ /*
+ * PCI error detection.
+ */
+ int unsolicited_ints;
+
+ /*
+ * Target incoming command FIFO.
+ */
+ struct target_cmd *targetcmds;
+ uint8_t tqinfifonext;
+
+ /*
+ * Cached copy of the sequencer control register.
+ */
+ uint8_t seqctl;
+
+ /*
+ * Incoming and outgoing message handling.
+ */
+ uint8_t send_msg_perror;
+ ahc_msg_type msg_type;
+ uint8_t msgout_buf[12];/* Message we are sending */
+ uint8_t msgin_buf[12];/* Message we are receiving */
+ u_int msgout_len; /* Length of message to send */
+ u_int msgout_index; /* Current index in msgout */
+ u_int msgin_index; /* Current index in msgin */
+
+ /*
+ * Mapping information for data structures shared
+ * between the sequencer and kernel.
+ */
+ bus_dma_tag_t parent_dmat;
+ bus_dma_tag_t shared_data_dmat;
+ bus_dmamap_t shared_data_dmamap;
+ dma_addr_t shared_data_busaddr;
+
+ /*
+ * Bus address of the one byte buffer used to
+ * work-around a DMA bug for chips <= aic7880
+ * in target mode.
+ */
+ dma_addr_t dma_bug_buf;
+
+ /* Number of enabled target mode device on this card */
+ u_int enabled_luns;
+
+ /* Initialization level of this data structure */
+ u_int init_level;
+
+ /* PCI cacheline size. */
+ u_int pci_cachesize;
+
+ /*
+ * Count of parity errors we have seen as a target.
+ * We auto-disable parity error checking after seeing
+ * AHC_PCI_TARGET_PERR_THRESH number of errors.
+ */
+ u_int pci_target_perr_count;
+#define AHC_PCI_TARGET_PERR_THRESH 10
+
+ /* Maximum number of sequencer instructions supported. */
+ u_int instruction_ram_size;
+
+ /* Per-Unit descriptive information */
+ const char *description;
+ char *name;
+ int unit;
+
+ /* Selection Timer settings */
+ int seltime;
+ int seltime_b;
+
+ uint16_t user_discenable;/* Disconnection allowed */
+ uint16_t user_tagenable;/* Tagged Queuing allowed */
+};
+
+/************************ Active Device Information ***************************/
+typedef enum {
+ ROLE_UNKNOWN,
+ ROLE_INITIATOR,
+ ROLE_TARGET
+} role_t;
+
+struct ahc_devinfo {
+ int our_scsiid;
+ int target_offset;
+ uint16_t target_mask;
+ u_int target;
+ u_int lun;
+ char channel;
+ role_t role; /*
+ * Only guaranteed to be correct if not
+ * in the busfree state.
+ */
+};
+
+/****************************** PCI Structures ********************************/
+typedef int (ahc_device_setup_t)(struct ahc_softc *);
+
+struct ahc_pci_identity {
+ uint64_t full_id;
+ uint64_t id_mask;
+ const char *name;
+ ahc_device_setup_t *setup;
+};
+
+/***************************** VL/EISA Declarations ***************************/
+struct aic7770_identity {
+ uint32_t full_id;
+ uint32_t id_mask;
+ const char *name;
+ ahc_device_setup_t *setup;
+};
+extern struct aic7770_identity aic7770_ident_table[];
+extern const int ahc_num_aic7770_devs;
+
+#define AHC_EISA_SLOT_OFFSET 0xc00
+#define AHC_EISA_IOSIZE 0x100
+
+/*************************** Function Declarations ****************************/
+/******************************************************************************/
+
+/***************************** PCI Front End *********************************/
+const struct ahc_pci_identity *ahc_find_pci_device(ahc_dev_softc_t);
+int ahc_pci_config(struct ahc_softc *,
+ const struct ahc_pci_identity *);
+int ahc_pci_test_register_access(struct ahc_softc *);
+#ifdef CONFIG_PM
+void ahc_pci_resume(struct ahc_softc *ahc);
+#endif
+
+/*************************** EISA/VL Front End ********************************/
+struct aic7770_identity *aic7770_find_device(uint32_t);
+int aic7770_config(struct ahc_softc *ahc,
+ struct aic7770_identity *,
+ u_int port);
+
+/************************** SCB and SCB queue management **********************/
+int ahc_probe_scbs(struct ahc_softc *);
+void ahc_qinfifo_requeue_tail(struct ahc_softc *ahc,
+ struct scb *scb);
+int ahc_match_scb(struct ahc_softc *ahc, struct scb *scb,
+ int target, char channel, int lun,
+ u_int tag, role_t role);
+
+/****************************** Initialization ********************************/
+struct ahc_softc *ahc_alloc(void *platform_arg, char *name);
+int ahc_softc_init(struct ahc_softc *);
+void ahc_controller_info(struct ahc_softc *ahc, char *buf);
+int ahc_chip_init(struct ahc_softc *ahc);
+int ahc_init(struct ahc_softc *ahc);
+void ahc_intr_enable(struct ahc_softc *ahc, int enable);
+void ahc_pause_and_flushwork(struct ahc_softc *ahc);
+#ifdef CONFIG_PM
+int ahc_suspend(struct ahc_softc *ahc);
+int ahc_resume(struct ahc_softc *ahc);
+#endif
+void ahc_set_unit(struct ahc_softc *, int);
+void ahc_set_name(struct ahc_softc *, char *);
+void ahc_free(struct ahc_softc *ahc);
+int ahc_reset(struct ahc_softc *ahc, int reinit);
+
+/***************************** Error Recovery *********************************/
+typedef enum {
+ SEARCH_COMPLETE,
+ SEARCH_COUNT,
+ SEARCH_REMOVE
+} ahc_search_action;
+int ahc_search_qinfifo(struct ahc_softc *ahc, int target,
+ char channel, int lun, u_int tag,
+ role_t role, uint32_t status,
+ ahc_search_action action);
+int ahc_search_untagged_queues(struct ahc_softc *ahc,
+ ahc_io_ctx_t ctx,
+ int target, char channel,
+ int lun, uint32_t status,
+ ahc_search_action action);
+int ahc_search_disc_list(struct ahc_softc *ahc, int target,
+ char channel, int lun, u_int tag,
+ int stop_on_first, int remove,
+ int save_state);
+int ahc_reset_channel(struct ahc_softc *ahc, char channel,
+ int initiate_reset);
+
+/*************************** Utility Functions ********************************/
+void ahc_compile_devinfo(struct ahc_devinfo *devinfo,
+ u_int our_id, u_int target,
+ u_int lun, char channel,
+ role_t role);
+/************************** Transfer Negotiation ******************************/
+const struct ahc_syncrate* ahc_find_syncrate(struct ahc_softc *ahc, u_int *period,
+ u_int *ppr_options, u_int maxsync);
+u_int ahc_find_period(struct ahc_softc *ahc,
+ u_int scsirate, u_int maxsync);
+/*
+ * Negotiation types. These are used to qualify if we should renegotiate
+ * even if our goal and current transport parameters are identical.
+ */
+typedef enum {
+ AHC_NEG_TO_GOAL, /* Renegotiate only if goal and curr differ. */
+ AHC_NEG_IF_NON_ASYNC, /* Renegotiate so long as goal is non-async. */
+ AHC_NEG_ALWAYS /* Renegotiat even if goal is async. */
+} ahc_neg_type;
+int ahc_update_neg_request(struct ahc_softc*,
+ struct ahc_devinfo*,
+ struct ahc_tmode_tstate*,
+ struct ahc_initiator_tinfo*,
+ ahc_neg_type);
+void ahc_set_width(struct ahc_softc *ahc,
+ struct ahc_devinfo *devinfo,
+ u_int width, u_int type, int paused);
+void ahc_set_syncrate(struct ahc_softc *ahc,
+ struct ahc_devinfo *devinfo,
+ const struct ahc_syncrate *syncrate,
+ u_int period, u_int offset,
+ u_int ppr_options,
+ u_int type, int paused);
+typedef enum {
+ AHC_QUEUE_NONE,
+ AHC_QUEUE_BASIC,
+ AHC_QUEUE_TAGGED
+} ahc_queue_alg;
+
+/**************************** Target Mode *************************************/
+#ifdef AHC_TARGET_MODE
+void ahc_send_lstate_events(struct ahc_softc *,
+ struct ahc_tmode_lstate *);
+void ahc_handle_en_lun(struct ahc_softc *ahc,
+ struct cam_sim *sim, union ccb *ccb);
+cam_status ahc_find_tmode_devs(struct ahc_softc *ahc,
+ struct cam_sim *sim, union ccb *ccb,
+ struct ahc_tmode_tstate **tstate,
+ struct ahc_tmode_lstate **lstate,
+ int notfound_failure);
+#ifndef AHC_TMODE_ENABLE
+#define AHC_TMODE_ENABLE 0
+#endif
+#endif
+/******************************* Debug ***************************************/
+#ifdef AHC_DEBUG
+extern uint32_t ahc_debug;
+#define AHC_SHOW_MISC 0x0001
+#define AHC_SHOW_SENSE 0x0002
+#define AHC_DUMP_SEEPROM 0x0004
+#define AHC_SHOW_TERMCTL 0x0008
+#define AHC_SHOW_MEMORY 0x0010
+#define AHC_SHOW_MESSAGES 0x0020
+#define AHC_SHOW_DV 0x0040
+#define AHC_SHOW_SELTO 0x0080
+#define AHC_SHOW_QFULL 0x0200
+#define AHC_SHOW_QUEUE 0x0400
+#define AHC_SHOW_TQIN 0x0800
+#define AHC_SHOW_MASKED_ERRORS 0x1000
+#define AHC_DEBUG_SEQUENCER 0x2000
+#endif
+void ahc_print_devinfo(struct ahc_softc *ahc,
+ struct ahc_devinfo *dev);
+void ahc_dump_card_state(struct ahc_softc *ahc);
+int ahc_print_register(const ahc_reg_parse_entry_t *table,
+ u_int num_entries,
+ const char *name,
+ u_int address,
+ u_int value,
+ u_int *cur_column,
+ u_int wrap_point);
+/******************************* SEEPROM *************************************/
+int ahc_acquire_seeprom(struct ahc_softc *ahc,
+ struct seeprom_descriptor *sd);
+void ahc_release_seeprom(struct seeprom_descriptor *sd);
+#endif /* _AIC7XXX_H_ */
diff --git a/drivers/scsi/aic7xxx/aic7xxx.reg b/drivers/scsi/aic7xxx/aic7xxx.reg
new file mode 100644
index 000000000..ba0b411d0
--- /dev/null
+++ b/drivers/scsi/aic7xxx/aic7xxx.reg
@@ -0,0 +1,1761 @@
+/*
+ * Aic7xxx register and scratch ram definitions.
+ *
+ * Copyright (c) 1994-2001 Justin T. Gibbs.
+ * Copyright (c) 2000-2001 Adaptec Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions, and the following disclaimer,
+ * without modification.
+ * 2. Redistributions in binary form must reproduce at minimum a disclaimer
+ * substantially similar to the "NO WARRANTY" disclaimer below
+ * ("Disclaimer") and any redistribution must be conditioned upon
+ * including a substantially similar Disclaimer requirement for further
+ * binary redistribution.
+ * 3. Neither the names of the above-listed copyright holders nor the names
+ * of any contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * Alternatively, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") version 2 as published by the Free
+ * Software Foundation.
+ *
+ * NO WARRANTY
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
+ * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
+ * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGES.
+ *
+ * $FreeBSD$
+ */
+VERSION = "$Id: //depot/aic7xxx/aic7xxx/aic7xxx.reg#40 $"
+
+/*
+ * This file is processed by the aic7xxx_asm utility for use in assembling
+ * firmware for the aic7xxx family of SCSI host adapters as well as to generate
+ * a C header file for use in the kernel portion of the Aic7xxx driver.
+ *
+ * All page numbers refer to the Adaptec AIC-7770 Data Book available from
+ * Adaptec's Technical Documents Department 1-800-934-2766
+ */
+
+/*
+ * Registers marked "dont_generate_debug_code" are not (yet) referenced
+ * from the driver code, and this keyword inhibit generation
+ * of debug code for them.
+ *
+ * REG_PRETTY_PRINT config will complain if dont_generate_debug_code
+ * is added to the register which is referenced in the driver.
+ * Unreferenced register with no dont_generate_debug_code will result
+ * in dead code. No warning is issued.
+ */
+
+/*
+ * SCSI Sequence Control (p. 3-11).
+ * Each bit, when set starts a specific SCSI sequence on the bus
+ */
+register SCSISEQ {
+ address 0x000
+ access_mode RW
+ field TEMODE 0x80
+ field ENSELO 0x40
+ field ENSELI 0x20
+ field ENRSELI 0x10
+ field ENAUTOATNO 0x08
+ field ENAUTOATNI 0x04
+ field ENAUTOATNP 0x02
+ field SCSIRSTO 0x01
+}
+
+/*
+ * SCSI Transfer Control 0 Register (pp. 3-13).
+ * Controls the SCSI module data path.
+ */
+register SXFRCTL0 {
+ address 0x001
+ access_mode RW
+ field DFON 0x80
+ field DFPEXP 0x40
+ field FAST20 0x20
+ field CLRSTCNT 0x10
+ field SPIOEN 0x08
+ field SCAMEN 0x04
+ field CLRCHN 0x02
+}
+
+/*
+ * SCSI Transfer Control 1 Register (pp. 3-14,15).
+ * Controls the SCSI module data path.
+ */
+register SXFRCTL1 {
+ address 0x002
+ access_mode RW
+ field BITBUCKET 0x80
+ field SWRAPEN 0x40
+ field ENSPCHK 0x20
+ mask STIMESEL 0x18
+ field ENSTIMER 0x04
+ field ACTNEGEN 0x02
+ field STPWEN 0x01 /* Powered Termination */
+ dont_generate_debug_code
+}
+
+/*
+ * SCSI Control Signal Read Register (p. 3-15).
+ * Reads the actual state of the SCSI bus pins
+ */
+register SCSISIGI {
+ address 0x003
+ access_mode RO
+ field CDI 0x80
+ field IOI 0x40
+ field MSGI 0x20
+ field ATNI 0x10
+ field SELI 0x08
+ field BSYI 0x04
+ field REQI 0x02
+ field ACKI 0x01
+/*
+ * Possible phases in SCSISIGI
+ */
+ mask PHASE_MASK CDI|IOI|MSGI
+ mask P_DATAOUT 0x00
+ mask P_DATAIN IOI
+ mask P_DATAOUT_DT P_DATAOUT|MSGI
+ mask P_DATAIN_DT P_DATAIN|MSGI
+ mask P_COMMAND CDI
+ mask P_MESGOUT CDI|MSGI
+ mask P_STATUS CDI|IOI
+ mask P_MESGIN CDI|IOI|MSGI
+}
+
+/*
+ * SCSI Control Signal Write Register (p. 3-16).
+ * Writing to this register modifies the control signals on the bus. Only
+ * those signals that are allowed in the current mode (Initiator/Target) are
+ * asserted.
+ */
+register SCSISIGO {
+ address 0x003
+ access_mode WO
+ field CDO 0x80
+ field IOO 0x40
+ field MSGO 0x20
+ field ATNO 0x10
+ field SELO 0x08
+ field BSYO 0x04
+ field REQO 0x02
+ field ACKO 0x01
+/*
+ * Possible phases to write into SCSISIG0
+ */
+ mask PHASE_MASK CDI|IOI|MSGI
+ mask P_DATAOUT 0x00
+ mask P_DATAIN IOI
+ mask P_COMMAND CDI
+ mask P_MESGOUT CDI|MSGI
+ mask P_STATUS CDI|IOI
+ mask P_MESGIN CDI|IOI|MSGI
+ dont_generate_debug_code
+}
+
+/*
+ * SCSI Rate Control (p. 3-17).
+ * Contents of this register determine the Synchronous SCSI data transfer
+ * rate and the maximum synchronous Req/Ack offset. An offset of 0 in the
+ * SOFS (3:0) bits disables synchronous data transfers. Any offset value
+ * greater than 0 enables synchronous transfers.
+ */
+register SCSIRATE {
+ address 0x004
+ access_mode RW
+ field WIDEXFER 0x80 /* Wide transfer control */
+ field ENABLE_CRC 0x40 /* CRC for D-Phases */
+ field SINGLE_EDGE 0x10 /* Disable DT Transfers */
+ mask SXFR 0x70 /* Sync transfer rate */
+ mask SXFR_ULTRA2 0x0f /* Sync transfer rate */
+ mask SOFS 0x0f /* Sync offset */
+}
+
+/*
+ * SCSI ID (p. 3-18).
+ * Contains the ID of the board and the current target on the
+ * selected channel.
+ */
+register SCSIID {
+ address 0x005
+ access_mode RW
+ mask TID 0xf0 /* Target ID mask */
+ mask TWIN_TID 0x70
+ field TWIN_CHNLB 0x80
+ mask OID 0x0f /* Our ID mask */
+ /*
+ * SCSI Maximum Offset (p. 4-61 aic7890/91 Data Book)
+ * The aic7890/91 allow an offset of up to 127 transfers in both wide
+ * and narrow mode.
+ */
+ alias SCSIOFFSET
+ mask SOFS_ULTRA2 0x7f /* Sync offset U2 chips */
+ dont_generate_debug_code
+}
+
+/*
+ * SCSI Latched Data (p. 3-19).
+ * Read/Write latches used to transfer data on the SCSI bus during
+ * Automatic or Manual PIO mode. SCSIDATH can be used for the
+ * upper byte of a 16bit wide asynchronouse data phase transfer.
+ */
+register SCSIDATL {
+ address 0x006
+ access_mode RW
+ dont_generate_debug_code
+}
+
+register SCSIDATH {
+ address 0x007
+ access_mode RW
+}
+
+/*
+ * SCSI Transfer Count (pp. 3-19,20)
+ * These registers count down the number of bytes transferred
+ * across the SCSI bus. The counter is decremented only once
+ * the data has been safely transferred. SDONE in SSTAT0 is
+ * set when STCNT goes to 0
+ */
+register STCNT {
+ address 0x008
+ size 3
+ access_mode RW
+ dont_generate_debug_code
+}
+
+/* ALT_MODE registers (Ultra2 and Ultra160 chips) */
+register SXFRCTL2 {
+ address 0x013
+ access_mode RW
+ field AUTORSTDIS 0x10
+ field CMDDMAEN 0x08
+ mask ASYNC_SETUP 0x07
+}
+
+/* ALT_MODE register on Ultra160 chips */
+register OPTIONMODE {
+ address 0x008
+ access_mode RW
+ count 2
+ field AUTORATEEN 0x80
+ field AUTOACKEN 0x40
+ field ATNMGMNTEN 0x20
+ field BUSFREEREV 0x10
+ field EXPPHASEDIS 0x08
+ field SCSIDATL_IMGEN 0x04
+ field AUTO_MSGOUT_DE 0x02
+ field DIS_MSGIN_DUALEDGE 0x01
+ mask OPTIONMODE_DEFAULTS AUTO_MSGOUT_DE|DIS_MSGIN_DUALEDGE
+ dont_generate_debug_code
+}
+
+/* ALT_MODE register on Ultra160 chips */
+register TARGCRCCNT {
+ address 0x00a
+ size 2
+ access_mode RW
+ count 2
+ dont_generate_debug_code
+}
+
+/*
+ * Clear SCSI Interrupt 0 (p. 3-20)
+ * Writing a 1 to a bit clears the associated SCSI Interrupt in SSTAT0.
+ */
+register CLRSINT0 {
+ address 0x00b
+ access_mode WO
+ field CLRSELDO 0x40
+ field CLRSELDI 0x20
+ field CLRSELINGO 0x10
+ field CLRSWRAP 0x08
+ field CLRIOERR 0x08 /* Ultra2 Only */
+ field CLRSPIORDY 0x02
+ dont_generate_debug_code
+}
+
+/*
+ * SCSI Status 0 (p. 3-21)
+ * Contains one set of SCSI Interrupt codes
+ * These are most likely of interest to the sequencer
+ */
+register SSTAT0 {
+ address 0x00b
+ access_mode RO
+ field TARGET 0x80 /* Board acting as target */
+ field SELDO 0x40 /* Selection Done */
+ field SELDI 0x20 /* Board has been selected */
+ field SELINGO 0x10 /* Selection In Progress */
+ field SWRAP 0x08 /* 24bit counter wrap */
+ field IOERR 0x08 /* LVD Tranceiver mode changed */
+ field SDONE 0x04 /* STCNT = 0x000000 */
+ field SPIORDY 0x02 /* SCSI PIO Ready */
+ field DMADONE 0x01 /* DMA transfer completed */
+}
+
+/*
+ * Clear SCSI Interrupt 1 (p. 3-23)
+ * Writing a 1 to a bit clears the associated SCSI Interrupt in SSTAT1.
+ */
+register CLRSINT1 {
+ address 0x00c
+ access_mode WO
+ field CLRSELTIMEO 0x80
+ field CLRATNO 0x40
+ field CLRSCSIRSTI 0x20
+ field CLRBUSFREE 0x08
+ field CLRSCSIPERR 0x04
+ field CLRPHASECHG 0x02
+ field CLRREQINIT 0x01
+ dont_generate_debug_code
+}
+
+/*
+ * SCSI Status 1 (p. 3-24)
+ */
+register SSTAT1 {
+ address 0x00c
+ access_mode RO
+ field SELTO 0x80
+ field ATNTARG 0x40
+ field SCSIRSTI 0x20
+ field PHASEMIS 0x10
+ field BUSFREE 0x08
+ field SCSIPERR 0x04
+ field PHASECHG 0x02
+ field REQINIT 0x01
+}
+
+/*
+ * SCSI Status 2 (pp. 3-25,26)
+ */
+register SSTAT2 {
+ address 0x00d
+ access_mode RO
+ field OVERRUN 0x80
+ field SHVALID 0x40 /* Shadow Layer non-zero */
+ field EXP_ACTIVE 0x10 /* SCSI Expander Active */
+ field CRCVALERR 0x08 /* CRC doesn't match (U3 only) */
+ field CRCENDERR 0x04 /* No terminal CRC packet (U3 only) */
+ field CRCREQERR 0x02 /* Illegal CRC packet req (U3 only) */
+ field DUAL_EDGE_ERR 0x01 /* Incorrect data phase (U3 only) */
+ mask SFCNT 0x1f
+}
+
+/*
+ * SCSI Status 3 (p. 3-26)
+ */
+register SSTAT3 {
+ address 0x00e
+ access_mode RO
+ count 2
+ mask SCSICNT 0xf0
+ mask OFFCNT 0x0f
+ mask U2OFFCNT 0x7f
+}
+
+/*
+ * SCSI ID for the aic7890/91 chips
+ */
+register SCSIID_ULTRA2 {
+ address 0x00f
+ access_mode RW
+ mask TID 0xf0 /* Target ID mask */
+ mask OID 0x0f /* Our ID mask */
+ dont_generate_debug_code
+}
+
+/*
+ * SCSI Interrupt Mode 1 (p. 3-28)
+ * Setting any bit will enable the corresponding function
+ * in SIMODE0 to interrupt via the IRQ pin.
+ */
+register SIMODE0 {
+ address 0x010
+ access_mode RW
+ count 2
+ field ENSELDO 0x40
+ field ENSELDI 0x20
+ field ENSELINGO 0x10
+ field ENSWRAP 0x08
+ field ENIOERR 0x08 /* LVD Tranceiver mode changes */
+ field ENSDONE 0x04
+ field ENSPIORDY 0x02
+ field ENDMADONE 0x01
+}
+
+/*
+ * SCSI Interrupt Mode 1 (pp. 3-28,29)
+ * Setting any bit will enable the corresponding function
+ * in SIMODE1 to interrupt via the IRQ pin.
+ */
+register SIMODE1 {
+ address 0x011
+ access_mode RW
+ field ENSELTIMO 0x80
+ field ENATNTARG 0x40
+ field ENSCSIRST 0x20
+ field ENPHASEMIS 0x10
+ field ENBUSFREE 0x08
+ field ENSCSIPERR 0x04
+ field ENPHASECHG 0x02
+ field ENREQINIT 0x01
+}
+
+/*
+ * SCSI Data Bus (High) (p. 3-29)
+ * This register reads data on the SCSI Data bus directly.
+ */
+register SCSIBUSL {
+ address 0x012
+ access_mode RW
+}
+
+register SCSIBUSH {
+ address 0x013
+ access_mode RW
+}
+
+/*
+ * SCSI/Host Address (p. 3-30)
+ * These registers hold the host address for the byte about to be
+ * transferred on the SCSI bus. They are counted up in the same
+ * manner as STCNT is counted down. SHADDR should always be used
+ * to determine the address of the last byte transferred since HADDR
+ * can be skewed by write ahead.
+ */
+register SHADDR {
+ address 0x014
+ size 4
+ access_mode RO
+ dont_generate_debug_code
+}
+
+/*
+ * Selection Timeout Timer (p. 3-30)
+ */
+register SELTIMER {
+ address 0x018
+ access_mode RW
+ count 1
+ field STAGE6 0x20
+ field STAGE5 0x10
+ field STAGE4 0x08
+ field STAGE3 0x04
+ field STAGE2 0x02
+ field STAGE1 0x01
+ alias TARGIDIN
+ dont_generate_debug_code
+}
+
+/*
+ * Selection/Reselection ID (p. 3-31)
+ * Upper four bits are the device id. The ONEBIT is set when the re/selecting
+ * device did not set its own ID.
+ */
+register SELID {
+ address 0x019
+ access_mode RW
+ mask SELID_MASK 0xf0
+ field ONEBIT 0x08
+ dont_generate_debug_code
+}
+
+register SCAMCTL {
+ address 0x01a
+ access_mode RW
+ field ENSCAMSELO 0x80
+ field CLRSCAMSELID 0x40
+ field ALTSTIM 0x20
+ field DFLTTID 0x10
+ mask SCAMLVL 0x03
+}
+
+/*
+ * Target Mode Selecting in ID bitmask (aic7890/91/96/97)
+ */
+register TARGID {
+ address 0x01b
+ size 2
+ access_mode RW
+ count 14
+ dont_generate_debug_code
+}
+
+/*
+ * Serial Port I/O Cabability register (p. 4-95 aic7860 Data Book)
+ * Indicates if external logic has been attached to the chip to
+ * perform the tasks of accessing a serial eeprom, testing termination
+ * strength, and performing cable detection. On the aic7860, most of
+ * these features are handled on chip, but on the aic7855 an attached
+ * aic3800 does the grunt work.
+ */
+register SPIOCAP {
+ address 0x01b
+ access_mode RW
+ count 10
+ field SOFT1 0x80
+ field SOFT0 0x40
+ field SOFTCMDEN 0x20
+ field EXT_BRDCTL 0x10 /* External Board control */
+ field SEEPROM 0x08 /* External serial eeprom logic */
+ field EEPROM 0x04 /* Writable external BIOS ROM */
+ field ROM 0x02 /* Logic for accessing external ROM */
+ field SSPIOCPS 0x01 /* Termination and cable detection */
+ dont_generate_debug_code
+}
+
+register BRDCTL {
+ address 0x01d
+ count 11
+ field BRDDAT7 0x80
+ field BRDDAT6 0x40
+ field BRDDAT5 0x20
+ field BRDSTB 0x10
+ field BRDCS 0x08
+ field BRDRW 0x04
+ field BRDCTL1 0x02
+ field BRDCTL0 0x01
+ /* 7890 Definitions */
+ field BRDDAT4 0x10
+ field BRDDAT3 0x08
+ field BRDDAT2 0x04
+ field BRDRW_ULTRA2 0x02
+ field BRDSTB_ULTRA2 0x01
+ dont_generate_debug_code
+}
+
+/*
+ * Serial EEPROM Control (p. 4-92 in 7870 Databook)
+ * Controls the reading and writing of an external serial 1-bit
+ * EEPROM Device. In order to access the serial EEPROM, you must
+ * first set the SEEMS bit that generates a request to the memory
+ * port for access to the serial EEPROM device. When the memory
+ * port is not busy servicing another request, it reconfigures
+ * to allow access to the serial EEPROM. When this happens, SEERDY
+ * gets set high to verify that the memory port access has been
+ * granted.
+ *
+ * After successful arbitration for the memory port, the SEECS bit of
+ * the SEECTL register is connected to the chip select. The SEECK,
+ * SEEDO, and SEEDI are connected to the clock, data out, and data in
+ * lines respectively. The SEERDY bit of SEECTL is useful in that it
+ * gives us an 800 nsec timer. After a write to the SEECTL register,
+ * the SEERDY goes high 800 nsec later. The one exception to this is
+ * when we first request access to the memory port. The SEERDY goes
+ * high to signify that access has been granted and, for this case, has
+ * no implied timing.
+ *
+ * See 93cx6.c for detailed information on the protocol necessary to
+ * read the serial EEPROM.
+ */
+register SEECTL {
+ address 0x01e
+ count 11
+ field EXTARBACK 0x80
+ field EXTARBREQ 0x40
+ field SEEMS 0x20
+ field SEERDY 0x10
+ field SEECS 0x08
+ field SEECK 0x04
+ field SEEDO 0x02
+ field SEEDI 0x01
+ dont_generate_debug_code
+}
+/*
+ * SCSI Block Control (p. 3-32)
+ * Controls Bus type and channel selection. In a twin channel configuration
+ * addresses 0x00-0x1e are gated to the appropriate channel based on this
+ * register. SELWIDE allows for the coexistence of 8bit and 16bit devices
+ * on a wide bus.
+ */
+register SBLKCTL {
+ address 0x01f
+ access_mode RW
+ field DIAGLEDEN 0x80 /* Aic78X0 only */
+ field DIAGLEDON 0x40 /* Aic78X0 only */
+ field AUTOFLUSHDIS 0x20
+ field SELBUSB 0x08
+ field ENAB40 0x08 /* LVD transceiver active */
+ field ENAB20 0x04 /* SE/HVD transceiver active */
+ field SELWIDE 0x02
+ field XCVR 0x01 /* External transceiver active */
+}
+
+/*
+ * Sequencer Control (p. 3-33)
+ * Error detection mode and speed configuration
+ */
+register SEQCTL {
+ address 0x060
+ access_mode RW
+ count 15
+ field PERRORDIS 0x80
+ field PAUSEDIS 0x40
+ field FAILDIS 0x20
+ field FASTMODE 0x10
+ field BRKADRINTEN 0x08
+ field STEP 0x04
+ field SEQRESET 0x02
+ field LOADRAM 0x01
+}
+
+/*
+ * Sequencer RAM Data (p. 3-34)
+ * Single byte window into the Scratch Ram area starting at the address
+ * specified by SEQADDR0 and SEQADDR1. To write a full word, simply write
+ * four bytes in succession. The SEQADDRs will increment after the most
+ * significant byte is written
+ */
+register SEQRAM {
+ address 0x061
+ access_mode RW
+ count 2
+ dont_generate_debug_code
+}
+
+/*
+ * Sequencer Address Registers (p. 3-35)
+ * Only the first bit of SEQADDR1 holds addressing information
+ */
+register SEQADDR0 {
+ address 0x062
+ access_mode RW
+ dont_generate_debug_code
+}
+
+register SEQADDR1 {
+ address 0x063
+ access_mode RW
+ count 8
+ mask SEQADDR1_MASK 0x01
+ dont_generate_debug_code
+}
+
+/*
+ * Accumulator
+ * We cheat by passing arguments in the Accumulator up to the kernel driver
+ */
+register ACCUM {
+ address 0x064
+ access_mode RW
+ accumulator
+ dont_generate_debug_code
+}
+
+register SINDEX {
+ address 0x065
+ access_mode RW
+ sindex
+ dont_generate_debug_code
+}
+
+register DINDEX {
+ address 0x066
+ access_mode RW
+ dont_generate_debug_code
+}
+
+register ALLONES {
+ address 0x069
+ access_mode RO
+ allones
+ dont_generate_debug_code
+}
+
+register ALLZEROS {
+ address 0x06a
+ access_mode RO
+ allzeros
+ dont_generate_debug_code
+}
+
+register NONE {
+ address 0x06a
+ access_mode WO
+ none
+ dont_generate_debug_code
+}
+
+register FLAGS {
+ address 0x06b
+ access_mode RO
+ count 18
+ field ZERO 0x02
+ field CARRY 0x01
+ dont_generate_debug_code
+}
+
+register SINDIR {
+ address 0x06c
+ access_mode RO
+ dont_generate_debug_code
+}
+
+register DINDIR {
+ address 0x06d
+ access_mode WO
+ dont_generate_debug_code
+}
+
+register FUNCTION1 {
+ address 0x06e
+ access_mode RW
+}
+
+register STACK {
+ address 0x06f
+ access_mode RO
+ count 5
+ dont_generate_debug_code
+}
+
+const STACK_SIZE 4
+
+/*
+ * Board Control (p. 3-43)
+ */
+register BCTL {
+ address 0x084
+ access_mode RW
+ field ACE 0x08
+ field ENABLE 0x01
+}
+
+/*
+ * On the aic78X0 chips, Board Control is replaced by the DSCommand
+ * register (p. 4-64)
+ */
+register DSCOMMAND0 {
+ address 0x084
+ access_mode RW
+ count 7
+ field CACHETHEN 0x80 /* Cache Threshold enable */
+ field DPARCKEN 0x40 /* Data Parity Check Enable */
+ field MPARCKEN 0x20 /* Memory Parity Check Enable */
+ field EXTREQLCK 0x10 /* External Request Lock */
+ /* aic7890/91/96/97 only */
+ field INTSCBRAMSEL 0x08 /* Internal SCB RAM Select */
+ field RAMPS 0x04 /* External SCB RAM Present */
+ field USCBSIZE32 0x02 /* Use 32byte SCB Page Size */
+ field CIOPARCKEN 0x01 /* Internal bus parity error enable */
+ dont_generate_debug_code
+}
+
+register DSCOMMAND1 {
+ address 0x085
+ access_mode RW
+ mask DSLATT 0xfc /* PCI latency timer (non-ultra2) */
+ field HADDLDSEL1 0x02 /* Host Address Load Select Bits */
+ field HADDLDSEL0 0x01
+ dont_generate_debug_code
+}
+
+/*
+ * Bus On/Off Time (p. 3-44) aic7770 only
+ */
+register BUSTIME {
+ address 0x085
+ access_mode RW
+ count 2
+ mask BOFF 0xf0
+ mask BON 0x0f
+ dont_generate_debug_code
+}
+
+/*
+ * Bus Speed (p. 3-45) aic7770 only
+ */
+register BUSSPD {
+ address 0x086
+ access_mode RW
+ count 2
+ mask DFTHRSH 0xc0
+ mask STBOFF 0x38
+ mask STBON 0x07
+ mask DFTHRSH_100 0xc0
+ mask DFTHRSH_75 0x80
+ dont_generate_debug_code
+}
+
+/* aic7850/55/60/70/80/95 only */
+register DSPCISTATUS {
+ address 0x086
+ count 4
+ mask DFTHRSH_100 0xc0
+ dont_generate_debug_code
+}
+
+/* aic7890/91/96/97 only */
+register HS_MAILBOX {
+ address 0x086
+ mask HOST_MAILBOX 0xF0
+ mask SEQ_MAILBOX 0x0F
+ mask HOST_TQINPOS 0x80 /* Boundary at either 0 or 128 */
+ dont_generate_debug_code
+}
+
+const HOST_MAILBOX_SHIFT 4
+const SEQ_MAILBOX_SHIFT 0
+
+/*
+ * Host Control (p. 3-47) R/W
+ * Overall host control of the device.
+ */
+register HCNTRL {
+ address 0x087
+ access_mode RW
+ count 14
+ field POWRDN 0x40
+ field SWINT 0x10
+ field IRQMS 0x08
+ field PAUSE 0x04
+ field INTEN 0x02
+ field CHIPRST 0x01
+ field CHIPRSTACK 0x01
+ dont_generate_debug_code
+}
+
+/*
+ * Host Address (p. 3-48)
+ * This register contains the address of the byte about
+ * to be transferred across the host bus.
+ */
+register HADDR {
+ address 0x088
+ size 4
+ access_mode RW
+ dont_generate_debug_code
+}
+
+register HCNT {
+ address 0x08c
+ size 3
+ access_mode RW
+ dont_generate_debug_code
+}
+
+/*
+ * SCB Pointer (p. 3-49)
+ * Gate one of the SCBs into the SCBARRAY window.
+ */
+register SCBPTR {
+ address 0x090
+ access_mode RW
+ dont_generate_debug_code
+}
+
+/*
+ * Interrupt Status (p. 3-50)
+ * Status for system interrupts
+ */
+register INTSTAT {
+ address 0x091
+ access_mode RW
+ field BRKADRINT 0x08
+ field SCSIINT 0x04
+ field CMDCMPLT 0x02
+ field SEQINT 0x01
+ mask BAD_PHASE SEQINT /* unknown scsi bus phase */
+ mask SEND_REJECT 0x10|SEQINT /* sending a message reject */
+ mask PROTO_VIOLATION 0x20|SEQINT /* SCSI protocol violation */
+ mask NO_MATCH 0x30|SEQINT /* no cmd match for reconnect */
+ mask IGN_WIDE_RES 0x40|SEQINT /* Complex IGN Wide Res Msg */
+ mask PDATA_REINIT 0x50|SEQINT /*
+ * Returned to data phase
+ * that requires data
+ * transfer pointers to be
+ * recalculated from the
+ * transfer residual.
+ */
+ mask HOST_MSG_LOOP 0x60|SEQINT /*
+ * The bus is ready for the
+ * host to perform another
+ * message transaction. This
+ * mechanism is used for things
+ * like sync/wide negotiation
+ * that require a kernel based
+ * message state engine.
+ */
+ mask BAD_STATUS 0x70|SEQINT /* Bad status from target */
+ mask PERR_DETECTED 0x80|SEQINT /*
+ * Either the phase_lock
+ * or inb_next routine has
+ * noticed a parity error.
+ */
+ mask DATA_OVERRUN 0x90|SEQINT /*
+ * Target attempted to write
+ * beyond the bounds of its
+ * command.
+ */
+ mask MKMSG_FAILED 0xa0|SEQINT /*
+ * Target completed command
+ * without honoring our ATN
+ * request to issue a message.
+ */
+ mask MISSED_BUSFREE 0xb0|SEQINT /*
+ * The sequencer never saw
+ * the bus go free after
+ * either a command complete
+ * or disconnect message.
+ */
+ mask SCB_MISMATCH 0xc0|SEQINT /*
+ * Downloaded SCB's tag does
+ * not match the entry we
+ * intended to download.
+ */
+ mask NO_FREE_SCB 0xd0|SEQINT /*
+ * get_free_or_disc_scb failed.
+ */
+ mask OUT_OF_RANGE 0xe0|SEQINT
+
+ mask SEQINT_MASK 0xf0|SEQINT /* SEQINT Status Codes */
+ mask INT_PEND (BRKADRINT|SEQINT|SCSIINT|CMDCMPLT)
+ dont_generate_debug_code
+}
+
+/*
+ * Hard Error (p. 3-53)
+ * Reporting of catastrophic errors. You usually cannot recover from
+ * these without a full board reset.
+ */
+register ERROR {
+ address 0x092
+ access_mode RO
+ count 26
+ field CIOPARERR 0x80 /* Ultra2 only */
+ field PCIERRSTAT 0x40 /* PCI only */
+ field MPARERR 0x20 /* PCI only */
+ field DPARERR 0x10 /* PCI only */
+ field SQPARERR 0x08
+ field ILLOPCODE 0x04
+ field ILLSADDR 0x02
+ field ILLHADDR 0x01
+}
+
+/*
+ * Clear Interrupt Status (p. 3-52)
+ */
+register CLRINT {
+ address 0x092
+ access_mode WO
+ count 24
+ field CLRPARERR 0x10 /* PCI only */
+ field CLRBRKADRINT 0x08
+ field CLRSCSIINT 0x04
+ field CLRCMDINT 0x02
+ field CLRSEQINT 0x01
+ dont_generate_debug_code
+}
+
+register DFCNTRL {
+ address 0x093
+ access_mode RW
+ field PRELOADEN 0x80 /* aic7890 only */
+ field WIDEODD 0x40
+ field SCSIEN 0x20
+ field SDMAEN 0x10
+ field SDMAENACK 0x10
+ field HDMAEN 0x08
+ field HDMAENACK 0x08
+ field DIRECTION 0x04
+ field FIFOFLUSH 0x02
+ field FIFORESET 0x01
+}
+
+register DFSTATUS {
+ address 0x094
+ access_mode RO
+ field PRELOAD_AVAIL 0x80
+ field DFCACHETH 0x40
+ field FIFOQWDEMP 0x20
+ field MREQPEND 0x10
+ field HDONE 0x08
+ field DFTHRESH 0x04
+ field FIFOFULL 0x02
+ field FIFOEMP 0x01
+}
+
+register DFWADDR {
+ address 0x95
+ access_mode RW
+ dont_generate_debug_code
+}
+
+register DFRADDR {
+ address 0x97
+ access_mode RW
+}
+
+register DFDAT {
+ address 0x099
+ access_mode RW
+ dont_generate_debug_code
+}
+
+/*
+ * SCB Auto Increment (p. 3-59)
+ * Byte offset into the SCB Array and an optional bit to allow auto
+ * incrementing of the address during download and upload operations
+ */
+register SCBCNT {
+ address 0x09a
+ access_mode RW
+ count 1
+ field SCBAUTO 0x80
+ mask SCBCNT_MASK 0x1f
+ dont_generate_debug_code
+}
+
+/*
+ * Queue In FIFO (p. 3-60)
+ * Input queue for queued SCBs (commands that the seqencer has yet to start)
+ */
+register QINFIFO {
+ address 0x09b
+ access_mode RW
+ count 12
+ dont_generate_debug_code
+}
+
+/*
+ * Queue In Count (p. 3-60)
+ * Number of queued SCBs
+ */
+register QINCNT {
+ address 0x09c
+ access_mode RO
+}
+
+/*
+ * Queue Out FIFO (p. 3-61)
+ * Queue of SCBs that have completed and await the host
+ */
+register QOUTFIFO {
+ address 0x09d
+ access_mode WO
+ count 7
+ dont_generate_debug_code
+}
+
+register CRCCONTROL1 {
+ address 0x09d
+ access_mode RW
+ count 3
+ field CRCONSEEN 0x80
+ field CRCVALCHKEN 0x40
+ field CRCENDCHKEN 0x20
+ field CRCREQCHKEN 0x10
+ field TARGCRCENDEN 0x08
+ field TARGCRCCNTEN 0x04
+ dont_generate_debug_code
+}
+
+
+/*
+ * Queue Out Count (p. 3-61)
+ * Number of queued SCBs in the Out FIFO
+ */
+register QOUTCNT {
+ address 0x09e
+ access_mode RO
+}
+
+register SCSIPHASE {
+ address 0x09e
+ access_mode RO
+ field STATUS_PHASE 0x20
+ field COMMAND_PHASE 0x10
+ field MSG_IN_PHASE 0x08
+ field MSG_OUT_PHASE 0x04
+ field DATA_IN_PHASE 0x02
+ field DATA_OUT_PHASE 0x01
+ mask DATA_PHASE_MASK 0x03
+}
+
+/*
+ * Special Function
+ */
+register SFUNCT {
+ address 0x09f
+ access_mode RW
+ count 4
+ field ALT_MODE 0x80
+ dont_generate_debug_code
+}
+
+/*
+ * SCB Definition (p. 5-4)
+ */
+scb {
+ address 0x0a0
+ size 64
+
+ SCB_CDB_PTR {
+ size 4
+ alias SCB_RESIDUAL_DATACNT
+ alias SCB_CDB_STORE
+ dont_generate_debug_code
+ }
+ SCB_RESIDUAL_SGPTR {
+ size 4
+ dont_generate_debug_code
+ }
+ SCB_SCSI_STATUS {
+ size 1
+ dont_generate_debug_code
+ }
+ SCB_TARGET_PHASES {
+ size 1
+ dont_generate_debug_code
+ }
+ SCB_TARGET_DATA_DIR {
+ size 1
+ dont_generate_debug_code
+ }
+ SCB_TARGET_ITAG {
+ size 1
+ dont_generate_debug_code
+ }
+ SCB_DATAPTR {
+ size 4
+ dont_generate_debug_code
+ }
+ SCB_DATACNT {
+ /*
+ * The last byte is really the high address bits for
+ * the data address.
+ */
+ size 4
+ field SG_LAST_SEG 0x80 /* In the fourth byte */
+ mask SG_HIGH_ADDR_BITS 0x7F /* In the fourth byte */
+ dont_generate_debug_code
+ }
+ SCB_SGPTR {
+ size 4
+ field SG_RESID_VALID 0x04 /* In the first byte */
+ field SG_FULL_RESID 0x02 /* In the first byte */
+ field SG_LIST_NULL 0x01 /* In the first byte */
+ dont_generate_debug_code
+ }
+ SCB_CONTROL {
+ size 1
+ field TARGET_SCB 0x80
+ field STATUS_RCVD 0x80
+ field DISCENB 0x40
+ field TAG_ENB 0x20
+ field MK_MESSAGE 0x10
+ field ULTRAENB 0x08
+ field DISCONNECTED 0x04
+ mask SCB_TAG_TYPE 0x03
+ }
+ SCB_SCSIID {
+ size 1
+ field TWIN_CHNLB 0x80
+ mask TWIN_TID 0x70
+ mask TID 0xf0
+ mask OID 0x0f
+ }
+ SCB_LUN {
+ field SCB_XFERLEN_ODD 0x80
+ mask LID 0x3f
+ size 1
+ }
+ SCB_TAG {
+ size 1
+ }
+ SCB_CDB_LEN {
+ size 1
+ dont_generate_debug_code
+ }
+ SCB_SCSIRATE {
+ size 1
+ dont_generate_debug_code
+ }
+ SCB_SCSIOFFSET {
+ size 1
+ count 1
+ dont_generate_debug_code
+ }
+ SCB_NEXT {
+ size 1
+ dont_generate_debug_code
+ }
+ SCB_64_SPARE {
+ size 16
+ }
+ SCB_64_BTT {
+ size 16
+ dont_generate_debug_code
+ }
+}
+
+const SCB_UPLOAD_SIZE 32
+const SCB_DOWNLOAD_SIZE 32
+const SCB_DOWNLOAD_SIZE_64 48
+
+const SG_SIZEOF 0x08 /* sizeof(struct ahc_dma) */
+
+/* --------------------- AHA-2840-only definitions -------------------- */
+
+register SEECTL_2840 {
+ address 0x0c0
+ access_mode RW
+ count 2
+ field CS_2840 0x04
+ field CK_2840 0x02
+ field DO_2840 0x01
+ dont_generate_debug_code
+}
+
+register STATUS_2840 {
+ address 0x0c1
+ access_mode RW
+ count 4
+ field EEPROM_TF 0x80
+ mask BIOS_SEL 0x60
+ mask ADSEL 0x1e
+ field DI_2840 0x01
+ dont_generate_debug_code
+}
+
+/* --------------------- AIC-7870-only definitions -------------------- */
+
+register CCHADDR {
+ address 0x0E0
+ size 8
+ dont_generate_debug_code
+}
+
+register CCHCNT {
+ address 0x0E8
+ dont_generate_debug_code
+}
+
+register CCSGRAM {
+ address 0x0E9
+ dont_generate_debug_code
+}
+
+register CCSGADDR {
+ address 0x0EA
+ dont_generate_debug_code
+}
+
+register CCSGCTL {
+ address 0x0EB
+ field CCSGDONE 0x80
+ field CCSGEN 0x08
+ field SG_FETCH_NEEDED 0x02 /* Bit used for software state */
+ field CCSGRESET 0x01
+ dont_generate_debug_code
+}
+
+register CCSCBCNT {
+ address 0xEF
+ count 1
+ dont_generate_debug_code
+}
+
+register CCSCBCTL {
+ address 0x0EE
+ field CCSCBDONE 0x80
+ field ARRDONE 0x40 /* SCB Array prefetch done */
+ field CCARREN 0x10
+ field CCSCBEN 0x08
+ field CCSCBDIR 0x04
+ field CCSCBRESET 0x01
+ dont_generate_debug_code
+}
+
+register CCSCBADDR {
+ address 0x0ED
+ dont_generate_debug_code
+}
+
+register CCSCBRAM {
+ address 0xEC
+ dont_generate_debug_code
+}
+
+/*
+ * SCB bank address (7895/7896/97 only)
+ */
+register SCBBADDR {
+ address 0x0F0
+ access_mode RW
+ count 3
+ dont_generate_debug_code
+}
+
+register CCSCBPTR {
+ address 0x0F1
+ dont_generate_debug_code
+}
+
+register HNSCB_QOFF {
+ address 0x0F4
+ count 4
+ dont_generate_debug_code
+}
+
+register SNSCB_QOFF {
+ address 0x0F6
+ dont_generate_debug_code
+}
+
+register SDSCB_QOFF {
+ address 0x0F8
+ dont_generate_debug_code
+}
+
+register QOFF_CTLSTA {
+ address 0x0FA
+ field SCB_AVAIL 0x40
+ field SNSCB_ROLLOVER 0x20
+ field SDSCB_ROLLOVER 0x10
+ mask SCB_QSIZE 0x07
+ mask SCB_QSIZE_256 0x06
+ dont_generate_debug_code
+}
+
+register DFF_THRSH {
+ address 0x0FB
+ mask WR_DFTHRSH 0x70
+ mask RD_DFTHRSH 0x07
+ mask RD_DFTHRSH_MIN 0x00
+ mask RD_DFTHRSH_25 0x01
+ mask RD_DFTHRSH_50 0x02
+ mask RD_DFTHRSH_63 0x03
+ mask RD_DFTHRSH_75 0x04
+ mask RD_DFTHRSH_85 0x05
+ mask RD_DFTHRSH_90 0x06
+ mask RD_DFTHRSH_MAX 0x07
+ mask WR_DFTHRSH_MIN 0x00
+ mask WR_DFTHRSH_25 0x10
+ mask WR_DFTHRSH_50 0x20
+ mask WR_DFTHRSH_63 0x30
+ mask WR_DFTHRSH_75 0x40
+ mask WR_DFTHRSH_85 0x50
+ mask WR_DFTHRSH_90 0x60
+ mask WR_DFTHRSH_MAX 0x70
+ count 4
+ dont_generate_debug_code
+}
+
+register SG_CACHE_PRE {
+ access_mode WO
+ address 0x0fc
+ mask SG_ADDR_MASK 0xf8
+ field LAST_SEG 0x02
+ field LAST_SEG_DONE 0x01
+ dont_generate_debug_code
+}
+
+register SG_CACHE_SHADOW {
+ access_mode RO
+ address 0x0fc
+ mask SG_ADDR_MASK 0xf8
+ field LAST_SEG 0x02
+ field LAST_SEG_DONE 0x01
+ dont_generate_debug_code
+}
+/* ---------------------- Scratch RAM Offsets ------------------------- */
+/* These offsets are either to values that are initialized by the board's
+ * BIOS or are specified by the sequencer code.
+ *
+ * The host adapter card (at least the BIOS) uses 20-2f for SCSI
+ * device information, 32-33 and 5a-5f as well. As it turns out, the
+ * BIOS trashes 20-2f, writing the synchronous negotiation results
+ * on top of the BIOS values, so we re-use those for our per-target
+ * scratchspace (actually a value that can be copied directly into
+ * SCSIRATE). The kernel driver will enable synchronous negotiation
+ * for all targets that have a value other than 0 in the lower four
+ * bits of the target scratch space. This should work regardless of
+ * whether the bios has been installed.
+ */
+
+scratch_ram {
+ address 0x020
+ size 58
+
+ /*
+ * 1 byte per target starting at this address for configuration values
+ */
+ BUSY_TARGETS {
+ alias TARG_SCSIRATE
+ size 16
+ dont_generate_debug_code
+ }
+ /*
+ * Bit vector of targets that have ULTRA enabled as set by
+ * the BIOS. The Sequencer relies on a per-SCB field to
+ * control whether to enable Ultra transfers or not. During
+ * initialization, we read this field and reuse it for 2
+ * entries in the busy target table.
+ */
+ ULTRA_ENB {
+ alias CMDSIZE_TABLE
+ size 2
+ count 2
+ dont_generate_debug_code
+ }
+ /*
+ * Bit vector of targets that have disconnection disabled as set by
+ * the BIOS. The Sequencer relies in a per-SCB field to control the
+ * disconnect priveldge. During initialization, we read this field
+ * and reuse it for 2 entries in the busy target table.
+ */
+ DISC_DSB {
+ size 2
+ count 6
+ dont_generate_debug_code
+ }
+ CMDSIZE_TABLE_TAIL {
+ size 4
+ }
+ /*
+ * Partial transfer past cacheline end to be
+ * transferred using an extra S/G.
+ */
+ MWI_RESIDUAL {
+ size 1
+ dont_generate_debug_code
+ }
+ /*
+ * SCBID of the next SCB to be started by the controller.
+ */
+ NEXT_QUEUED_SCB {
+ size 1
+ dont_generate_debug_code
+ }
+ /*
+ * Single byte buffer used to designate the type or message
+ * to send to a target.
+ */
+ MSG_OUT {
+ size 1
+ dont_generate_debug_code
+ }
+ /* Parameters for DMA Logic */
+ DMAPARAMS {
+ size 1
+ count 12
+ field PRELOADEN 0x80
+ field WIDEODD 0x40
+ field SCSIEN 0x20
+ field SDMAEN 0x10
+ field SDMAENACK 0x10
+ field HDMAEN 0x08
+ field HDMAENACK 0x08
+ field DIRECTION 0x04 /* Set indicates PCI->SCSI */
+ field FIFOFLUSH 0x02
+ field FIFORESET 0x01
+ dont_generate_debug_code
+ }
+ SEQ_FLAGS {
+ size 1
+ field NOT_IDENTIFIED 0x80
+ field NO_CDB_SENT 0x40
+ field TARGET_CMD_IS_TAGGED 0x40
+ field DPHASE 0x20
+ /* Target flags */
+ field TARG_CMD_PENDING 0x10
+ field CMDPHASE_PENDING 0x08
+ field DPHASE_PENDING 0x04
+ field SPHASE_PENDING 0x02
+ field NO_DISCONNECT 0x01
+ }
+ /*
+ * Temporary storage for the
+ * target/channel/lun of a
+ * reconnecting target
+ */
+ SAVED_SCSIID {
+ size 1
+ dont_generate_debug_code
+ }
+ SAVED_LUN {
+ size 1
+ dont_generate_debug_code
+ }
+ /*
+ * The last bus phase as seen by the sequencer.
+ */
+ LASTPHASE {
+ size 1
+ field CDI 0x80
+ field IOI 0x40
+ field MSGI 0x20
+ mask PHASE_MASK CDI|IOI|MSGI
+ mask P_DATAOUT 0x00
+ mask P_DATAIN IOI
+ mask P_COMMAND CDI
+ mask P_MESGOUT CDI|MSGI
+ mask P_STATUS CDI|IOI
+ mask P_MESGIN CDI|IOI|MSGI
+ mask P_BUSFREE 0x01
+ }
+ /*
+ * head of list of SCBs awaiting
+ * selection
+ */
+ WAITING_SCBH {
+ size 1
+ dont_generate_debug_code
+ }
+ /*
+ * head of list of SCBs that are
+ * disconnected. Used for SCB
+ * paging.
+ */
+ DISCONNECTED_SCBH {
+ size 1
+ dont_generate_debug_code
+ }
+ /*
+ * head of list of SCBs that are
+ * not in use. Used for SCB paging.
+ */
+ FREE_SCBH {
+ size 1
+ dont_generate_debug_code
+ }
+ /*
+ * head of list of SCBs that have
+ * completed but have not been
+ * put into the qoutfifo.
+ */
+ COMPLETE_SCBH {
+ size 1
+ }
+ /*
+ * Address of the hardware scb array in the host.
+ */
+ HSCB_ADDR {
+ size 4
+ dont_generate_debug_code
+ }
+ /*
+ * Base address of our shared data with the kernel driver in host
+ * memory. This includes the qoutfifo and target mode
+ * incoming command queue.
+ */
+ SHARED_DATA_ADDR {
+ size 4
+ dont_generate_debug_code
+ }
+ KERNEL_QINPOS {
+ size 1
+ dont_generate_debug_code
+ }
+ QINPOS {
+ size 1
+ dont_generate_debug_code
+ }
+ QOUTPOS {
+ size 1
+ dont_generate_debug_code
+ }
+ /*
+ * Kernel and sequencer offsets into the queue of
+ * incoming target mode command descriptors. The
+ * queue is full when the KERNEL_TQINPOS == TQINPOS.
+ */
+ KERNEL_TQINPOS {
+ size 1
+ dont_generate_debug_code
+ }
+ TQINPOS {
+ size 1
+ dont_generate_debug_code
+ }
+ ARG_1 {
+ size 1
+ count 1
+ mask SEND_MSG 0x80
+ mask SEND_SENSE 0x40
+ mask SEND_REJ 0x20
+ mask MSGOUT_PHASEMIS 0x10
+ mask EXIT_MSG_LOOP 0x08
+ mask CONT_MSG_LOOP 0x04
+ mask CONT_TARG_SESSION 0x02
+ alias RETURN_1
+ dont_generate_debug_code
+ }
+ ARG_2 {
+ size 1
+ alias RETURN_2
+ dont_generate_debug_code
+ }
+
+ /*
+ * Snapshot of MSG_OUT taken after each message is sent.
+ */
+ LAST_MSG {
+ size 1
+ alias TARG_IMMEDIATE_SCB
+ dont_generate_debug_code
+ }
+
+ /*
+ * Sequences the kernel driver has okayed for us. This allows
+ * the driver to do things like prevent initiator or target
+ * operations.
+ */
+ SCSISEQ_TEMPLATE {
+ size 1
+ field ENSELO 0x40
+ field ENSELI 0x20
+ field ENRSELI 0x10
+ field ENAUTOATNO 0x08
+ field ENAUTOATNI 0x04
+ field ENAUTOATNP 0x02
+ dont_generate_debug_code
+ }
+}
+
+scratch_ram {
+ address 0x056
+ size 4
+ /*
+ * These scratch ram locations are initialized by the 274X BIOS.
+ * We reuse them after capturing the BIOS settings during
+ * initialization.
+ */
+
+ /*
+ * The initiator specified tag for this target mode transaction.
+ */
+ HA_274_BIOSGLOBAL {
+ size 1
+ field HA_274_EXTENDED_TRANS 0x01
+ alias INITIATOR_TAG
+ count 1
+ dont_generate_debug_code
+ }
+
+ SEQ_FLAGS2 {
+ size 1
+ field SCB_DMA 0x01
+ field TARGET_MSG_PENDING 0x02
+ dont_generate_debug_code
+ }
+}
+
+scratch_ram {
+ address 0x05a
+ size 6
+ /*
+ * These are reserved registers in the card's scratch ram on the 2742.
+ * The EISA configuraiton chip is mapped here. On Rev E. of the
+ * aic7770, the sequencer can use this area for scratch, but the
+ * host cannot directly access these registers. On later chips, this
+ * area can be read and written by both the host and the sequencer.
+ * Even on later chips, many of these locations are initialized by
+ * the BIOS.
+ */
+ SCSICONF {
+ size 1
+ count 12
+ field TERM_ENB 0x80
+ field RESET_SCSI 0x40
+ field ENSPCHK 0x20
+ mask HSCSIID 0x07 /* our SCSI ID */
+ mask HWSCSIID 0x0f /* our SCSI ID if Wide Bus */
+ dont_generate_debug_code
+ }
+ INTDEF {
+ address 0x05c
+ size 1
+ count 1
+ field EDGE_TRIG 0x80
+ mask VECTOR 0x0f
+ dont_generate_debug_code
+ }
+ HOSTCONF {
+ address 0x05d
+ size 1
+ count 1
+ dont_generate_debug_code
+ }
+ HA_274_BIOSCTRL {
+ address 0x05f
+ size 1
+ count 1
+ mask BIOSMODE 0x30
+ mask BIOSDISABLED 0x30
+ field CHANNEL_B_PRIMARY 0x08
+ dont_generate_debug_code
+ }
+}
+
+scratch_ram {
+ address 0x070
+ size 16
+
+ /*
+ * Per target SCSI offset values for Ultra2 controllers.
+ */
+ TARG_OFFSET {
+ size 16
+ count 1
+ dont_generate_debug_code
+ }
+}
+
+const TID_SHIFT 4
+const SCB_LIST_NULL 0xff
+const TARGET_CMD_CMPLT 0xfe
+
+const CCSGADDR_MAX 0x80
+const CCSGRAM_MAXSEGS 16
+
+/* WDTR Message values */
+const BUS_8_BIT 0x00
+const BUS_16_BIT 0x01
+const BUS_32_BIT 0x02
+
+/* Offset maximums */
+const MAX_OFFSET_8BIT 0x0f
+const MAX_OFFSET_16BIT 0x08
+const MAX_OFFSET_ULTRA2 0x7f
+const MAX_OFFSET 0x7f
+const HOST_MSG 0xff
+
+/* Target mode command processing constants */
+const CMD_GROUP_CODE_SHIFT 0x05
+
+const STATUS_BUSY 0x08
+const STATUS_QUEUE_FULL 0x28
+const TARGET_DATA_IN 1
+
+/*
+ * Downloaded (kernel inserted) constants
+ */
+/* Offsets into the SCBID array where different data is stored */
+const QOUTFIFO_OFFSET download
+const QINFIFO_OFFSET download
+const CACHESIZE_MASK download
+const INVERTED_CACHESIZE_MASK download
+const SG_PREFETCH_CNT download
+const SG_PREFETCH_ALIGN_MASK download
+const SG_PREFETCH_ADDR_MASK download
diff --git a/drivers/scsi/aic7xxx/aic7xxx.seq b/drivers/scsi/aic7xxx/aic7xxx.seq
new file mode 100644
index 000000000..e60041e8f
--- /dev/null
+++ b/drivers/scsi/aic7xxx/aic7xxx.seq
@@ -0,0 +1,2399 @@
+/*
+ * Adaptec 274x/284x/294x device driver firmware for Linux and FreeBSD.
+ *
+ * Copyright (c) 1994-2001 Justin T. Gibbs.
+ * Copyright (c) 2000-2001 Adaptec Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions, and the following disclaimer,
+ * without modification.
+ * 2. Redistributions in binary form must reproduce at minimum a disclaimer
+ * substantially similar to the "NO WARRANTY" disclaimer below
+ * ("Disclaimer") and any redistribution must be conditioned upon
+ * including a substantially similar Disclaimer requirement for further
+ * binary redistribution.
+ * 3. Neither the names of the above-listed copyright holders nor the names
+ * of any contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * Alternatively, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") version 2 as published by the Free
+ * Software Foundation.
+ *
+ * NO WARRANTY
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
+ * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
+ * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGES.
+ *
+ * $FreeBSD$
+ */
+
+VERSION = "$Id: //depot/aic7xxx/aic7xxx/aic7xxx.seq#58 $"
+PATCH_ARG_LIST = "struct ahc_softc *ahc"
+PREFIX = "ahc_"
+
+#include "aic7xxx.reg"
+#include "scsi_message.h"
+
+/*
+ * A few words on the waiting SCB list:
+ * After starting the selection hardware, we check for reconnecting targets
+ * as well as for our selection to complete just in case the reselection wins
+ * bus arbitration. The problem with this is that we must keep track of the
+ * SCB that we've already pulled from the QINFIFO and started the selection
+ * on just in case the reselection wins so that we can retry the selection at
+ * a later time. This problem cannot be resolved by holding a single entry
+ * in scratch ram since a reconnecting target can request sense and this will
+ * create yet another SCB waiting for selection. The solution used here is to
+ * use byte 27 of the SCB as a pseudo-next pointer and to thread a list
+ * of SCBs that are awaiting selection. Since 0-0xfe are valid SCB indexes,
+ * SCB_LIST_NULL is 0xff which is out of range. An entry is also added to
+ * this list every time a request sense occurs or after completing a non-tagged
+ * command for which a second SCB has been queued. The sequencer will
+ * automatically consume the entries.
+ */
+
+bus_free_sel:
+ /*
+ * Turn off the selection hardware. We need to reset the
+ * selection request in order to perform a new selection.
+ */
+ and SCSISEQ, TEMODE|ENSELI|ENRSELI|ENAUTOATNP;
+ and SIMODE1, ~ENBUSFREE;
+poll_for_work:
+ call clear_target_state;
+ and SXFRCTL0, ~SPIOEN;
+ if ((ahc->features & AHC_ULTRA2) != 0) {
+ clr SCSIBUSL;
+ }
+ test SCSISEQ, ENSELO jnz poll_for_selection;
+ if ((ahc->features & AHC_TWIN) != 0) {
+ xor SBLKCTL,SELBUSB; /* Toggle to the other bus */
+ test SCSISEQ, ENSELO jnz poll_for_selection;
+ }
+ cmp WAITING_SCBH,SCB_LIST_NULL jne start_waiting;
+poll_for_work_loop:
+ if ((ahc->features & AHC_TWIN) != 0) {
+ xor SBLKCTL,SELBUSB; /* Toggle to the other bus */
+ }
+ test SSTAT0, SELDO|SELDI jnz selection;
+test_queue:
+ /* Has the driver posted any work for us? */
+BEGIN_CRITICAL;
+ if ((ahc->features & AHC_QUEUE_REGS) != 0) {
+ test QOFF_CTLSTA, SCB_AVAIL jz poll_for_work_loop;
+ } else {
+ mov A, QINPOS;
+ cmp KERNEL_QINPOS, A je poll_for_work_loop;
+ }
+ mov ARG_1, NEXT_QUEUED_SCB;
+
+ /*
+ * We have at least one queued SCB now and we don't have any
+ * SCBs in the list of SCBs awaiting selection. Allocate a
+ * card SCB for the host's SCB and get to work on it.
+ */
+ if ((ahc->flags & AHC_PAGESCBS) != 0) {
+ mov ALLZEROS call get_free_or_disc_scb;
+ } else {
+ /* In the non-paging case, the SCBID == hardware SCB index */
+ mov SCBPTR, ARG_1;
+ }
+ or SEQ_FLAGS2, SCB_DMA;
+END_CRITICAL;
+dma_queued_scb:
+ /*
+ * DMA the SCB from host ram into the current SCB location.
+ */
+ mvi DMAPARAMS, HDMAEN|DIRECTION|FIFORESET;
+ mov ARG_1 call dma_scb;
+ /*
+ * Check one last time to see if this SCB was canceled
+ * before we completed the DMA operation. If it was,
+ * the QINFIFO next pointer will not match our saved
+ * value.
+ */
+ mov A, ARG_1;
+BEGIN_CRITICAL;
+ cmp NEXT_QUEUED_SCB, A jne abort_qinscb;
+ if ((ahc->flags & AHC_SEQUENCER_DEBUG) != 0) {
+ cmp SCB_TAG, A je . + 2;
+ mvi SCB_MISMATCH call set_seqint;
+ }
+ mov NEXT_QUEUED_SCB, SCB_NEXT;
+ mov SCB_NEXT,WAITING_SCBH;
+ mov WAITING_SCBH, SCBPTR;
+ if ((ahc->features & AHC_QUEUE_REGS) != 0) {
+ mov NONE, SNSCB_QOFF;
+ } else {
+ inc QINPOS;
+ }
+ and SEQ_FLAGS2, ~SCB_DMA;
+END_CRITICAL;
+start_waiting:
+ /*
+ * Start the first entry on the waiting SCB list.
+ */
+ mov SCBPTR, WAITING_SCBH;
+ call start_selection;
+
+poll_for_selection:
+ /*
+ * Twin channel devices cannot handle things like SELTO
+ * interrupts on the "background" channel. So, while
+ * selecting, keep polling the current channel until
+ * either a selection or reselection occurs.
+ */
+ test SSTAT0, SELDO|SELDI jz poll_for_selection;
+
+selection:
+ /*
+ * We aren't expecting a bus free, so interrupt
+ * the kernel driver if it happens.
+ */
+ mvi CLRSINT1,CLRBUSFREE;
+ if ((ahc->features & AHC_DT) == 0) {
+ or SIMODE1, ENBUSFREE;
+ }
+
+ /*
+ * Guard against a bus free after (re)selection
+ * but prior to enabling the busfree interrupt. SELDI
+ * and SELDO will be cleared in that case.
+ */
+ test SSTAT0, SELDI|SELDO jz bus_free_sel;
+ test SSTAT0,SELDO jnz select_out;
+select_in:
+ if ((ahc->flags & AHC_TARGETROLE) != 0) {
+ if ((ahc->flags & AHC_INITIATORROLE) != 0) {
+ test SSTAT0, TARGET jz initiator_reselect;
+ }
+ mvi CLRSINT0, CLRSELDI;
+
+ /*
+ * We've just been selected. Assert BSY and
+ * setup the phase for receiving messages
+ * from the target.
+ */
+ mvi SCSISIGO, P_MESGOUT|BSYO;
+
+ /*
+ * Setup the DMA for sending the identify and
+ * command information.
+ */
+ mvi SEQ_FLAGS, CMDPHASE_PENDING;
+
+ mov A, TQINPOS;
+ if ((ahc->features & AHC_CMD_CHAN) != 0) {
+ mvi DINDEX, CCHADDR;
+ mvi SHARED_DATA_ADDR call set_32byte_addr;
+ mvi CCSCBCTL, CCSCBRESET;
+ } else {
+ mvi DINDEX, HADDR;
+ mvi SHARED_DATA_ADDR call set_32byte_addr;
+ mvi DFCNTRL, FIFORESET;
+ }
+
+ /* Initiator that selected us */
+ and SAVED_SCSIID, SELID_MASK, SELID;
+ /* The Target ID we were selected at */
+ if ((ahc->features & AHC_MULTI_TID) != 0) {
+ and A, OID, TARGIDIN;
+ } else if ((ahc->features & AHC_ULTRA2) != 0) {
+ and A, OID, SCSIID_ULTRA2;
+ } else {
+ and A, OID, SCSIID;
+ }
+ or SAVED_SCSIID, A;
+ if ((ahc->features & AHC_TWIN) != 0) {
+ test SBLKCTL, SELBUSB jz . + 2;
+ or SAVED_SCSIID, TWIN_CHNLB;
+ }
+ if ((ahc->features & AHC_CMD_CHAN) != 0) {
+ mov CCSCBRAM, SAVED_SCSIID;
+ } else {
+ mov DFDAT, SAVED_SCSIID;
+ }
+
+ /*
+ * If ATN isn't asserted, the target isn't interested
+ * in talking to us. Go directly to bus free.
+ * XXX SCSI-1 may require us to assume lun 0 if
+ * ATN is false.
+ */
+ test SCSISIGI, ATNI jz target_busfree;
+
+ /*
+ * Watch ATN closely now as we pull in messages from the
+ * initiator. We follow the guidlines from section 6.5
+ * of the SCSI-2 spec for what messages are allowed when.
+ */
+ call target_inb;
+
+ /*
+ * Our first message must be one of IDENTIFY, ABORT, or
+ * BUS_DEVICE_RESET.
+ */
+ test DINDEX, MSG_IDENTIFYFLAG jz host_target_message_loop;
+ /* Store for host */
+ if ((ahc->features & AHC_CMD_CHAN) != 0) {
+ mov CCSCBRAM, DINDEX;
+ } else {
+ mov DFDAT, DINDEX;
+ }
+ and SAVED_LUN, MSG_IDENTIFY_LUNMASK, DINDEX;
+
+ /* Remember for disconnection decision */
+ test DINDEX, MSG_IDENTIFY_DISCFLAG jnz . + 2;
+ /* XXX Honor per target settings too */
+ or SEQ_FLAGS, NO_DISCONNECT;
+
+ test SCSISIGI, ATNI jz ident_messages_done;
+ call target_inb;
+ /*
+ * If this is a tagged request, the tagged message must
+ * immediately follow the identify. We test for a valid
+ * tag message by seeing if it is >= MSG_SIMPLE_Q_TAG and
+ * < MSG_IGN_WIDE_RESIDUE.
+ */
+ add A, -MSG_SIMPLE_Q_TAG, DINDEX;
+ jnc ident_messages_done_msg_pending;
+ add A, -MSG_IGN_WIDE_RESIDUE, DINDEX;
+ jc ident_messages_done_msg_pending;
+
+ /* Store for host */
+ if ((ahc->features & AHC_CMD_CHAN) != 0) {
+ mov CCSCBRAM, DINDEX;
+ } else {
+ mov DFDAT, DINDEX;
+ }
+
+ /*
+ * If the initiator doesn't feel like providing a tag number,
+ * we've got a failed selection and must transition to bus
+ * free.
+ */
+ test SCSISIGI, ATNI jz target_busfree;
+
+ /*
+ * Store the tag for the host.
+ */
+ call target_inb;
+ if ((ahc->features & AHC_CMD_CHAN) != 0) {
+ mov CCSCBRAM, DINDEX;
+ } else {
+ mov DFDAT, DINDEX;
+ }
+ mov INITIATOR_TAG, DINDEX;
+ or SEQ_FLAGS, TARGET_CMD_IS_TAGGED;
+
+ident_messages_done:
+ /* Terminate the ident list */
+ if ((ahc->features & AHC_CMD_CHAN) != 0) {
+ mvi CCSCBRAM, SCB_LIST_NULL;
+ } else {
+ mvi DFDAT, SCB_LIST_NULL;
+ }
+ or SEQ_FLAGS, TARG_CMD_PENDING;
+ test SEQ_FLAGS2, TARGET_MSG_PENDING
+ jnz target_mesgout_pending;
+ test SCSISIGI, ATNI jnz target_mesgout_continue;
+ jmp target_ITloop;
+
+
+ident_messages_done_msg_pending:
+ or SEQ_FLAGS2, TARGET_MSG_PENDING;
+ jmp ident_messages_done;
+
+ /*
+ * Pushed message loop to allow the kernel to
+ * run it's own target mode message state engine.
+ */
+host_target_message_loop:
+ mvi HOST_MSG_LOOP call set_seqint;
+ cmp RETURN_1, EXIT_MSG_LOOP je target_ITloop;
+ test SSTAT0, SPIORDY jz .;
+ jmp host_target_message_loop;
+ }
+
+if ((ahc->flags & AHC_INITIATORROLE) != 0) {
+/*
+ * Reselection has been initiated by a target. Make a note that we've been
+ * reselected, but haven't seen an IDENTIFY message from the target yet.
+ */
+initiator_reselect:
+ /* XXX test for and handle ONE BIT condition */
+ or SXFRCTL0, SPIOEN|CLRSTCNT|CLRCHN;
+ and SAVED_SCSIID, SELID_MASK, SELID;
+ if ((ahc->features & AHC_ULTRA2) != 0) {
+ and A, OID, SCSIID_ULTRA2;
+ } else {
+ and A, OID, SCSIID;
+ }
+ or SAVED_SCSIID, A;
+ if ((ahc->features & AHC_TWIN) != 0) {
+ test SBLKCTL, SELBUSB jz . + 2;
+ or SAVED_SCSIID, TWIN_CHNLB;
+ }
+ mvi CLRSINT0, CLRSELDI;
+ jmp ITloop;
+}
+
+abort_qinscb:
+ call add_scb_to_free_list;
+ jmp poll_for_work_loop;
+
+start_selection:
+ /*
+ * If bus reset interrupts have been disabled (from a previous
+ * reset), re-enable them now. Resets are only of interest
+ * when we have outstanding transactions, so we can safely
+ * defer re-enabling the interrupt until, as an initiator,
+ * we start sending out transactions again.
+ */
+ test SIMODE1, ENSCSIRST jnz . + 3;
+ mvi CLRSINT1, CLRSCSIRSTI;
+ or SIMODE1, ENSCSIRST;
+ if ((ahc->features & AHC_TWIN) != 0) {
+ and SINDEX,~SELBUSB,SBLKCTL;/* Clear channel select bit */
+ test SCB_SCSIID, TWIN_CHNLB jz . + 2;
+ or SINDEX, SELBUSB;
+ mov SBLKCTL,SINDEX; /* select channel */
+ }
+initialize_scsiid:
+ if ((ahc->features & AHC_ULTRA2) != 0) {
+ mov SCSIID_ULTRA2, SCB_SCSIID;
+ } else if ((ahc->features & AHC_TWIN) != 0) {
+ and SCSIID, TWIN_TID|OID, SCB_SCSIID;
+ } else {
+ mov SCSIID, SCB_SCSIID;
+ }
+ if ((ahc->flags & AHC_TARGETROLE) != 0) {
+ mov SINDEX, SCSISEQ_TEMPLATE;
+ test SCB_CONTROL, TARGET_SCB jz . + 2;
+ or SINDEX, TEMODE;
+ mov SCSISEQ, SINDEX ret;
+ } else {
+ mov SCSISEQ, SCSISEQ_TEMPLATE ret;
+ }
+
+/*
+ * Initialize transfer settings with SCB provided settings.
+ */
+set_transfer_settings:
+ if ((ahc->features & AHC_ULTRA) != 0) {
+ test SCB_CONTROL, ULTRAENB jz . + 2;
+ or SXFRCTL0, FAST20;
+ }
+ /*
+ * Initialize SCSIRATE with the appropriate value for this target.
+ */
+ if ((ahc->features & AHC_ULTRA2) != 0) {
+ bmov SCSIRATE, SCB_SCSIRATE, 2 ret;
+ } else {
+ mov SCSIRATE, SCB_SCSIRATE ret;
+ }
+
+if ((ahc->flags & AHC_TARGETROLE) != 0) {
+/*
+ * We carefully toggle SPIOEN to allow us to return the
+ * message byte we receive so it can be checked prior to
+ * driving REQ on the bus for the next byte.
+ */
+target_inb:
+ /*
+ * Drive REQ on the bus by enabling SCSI PIO.
+ */
+ or SXFRCTL0, SPIOEN;
+ /* Wait for the byte */
+ test SSTAT0, SPIORDY jz .;
+ /* Prevent our read from triggering another REQ */
+ and SXFRCTL0, ~SPIOEN;
+ /* Save latched contents */
+ mov DINDEX, SCSIDATL ret;
+}
+
+/*
+ * After the selection, remove this SCB from the "waiting SCB"
+ * list. This is achieved by simply moving our "next" pointer into
+ * WAITING_SCBH. Our next pointer will be set to null the next time this
+ * SCB is used, so don't bother with it now.
+ */
+select_out:
+ /* Turn off the selection hardware */
+ and SCSISEQ, TEMODE|ENSELI|ENRSELI|ENAUTOATNP, SCSISEQ;
+ mov SCBPTR, WAITING_SCBH;
+ mov WAITING_SCBH,SCB_NEXT;
+ mov SAVED_SCSIID, SCB_SCSIID;
+ and SAVED_LUN, LID, SCB_LUN;
+ call set_transfer_settings;
+ if ((ahc->flags & AHC_TARGETROLE) != 0) {
+ test SSTAT0, TARGET jz initiator_select;
+
+ or SXFRCTL0, CLRSTCNT|CLRCHN;
+
+ /*
+ * Put tag in connonical location since not
+ * all connections have an SCB.
+ */
+ mov INITIATOR_TAG, SCB_TARGET_ITAG;
+
+ /*
+ * We've just re-selected an initiator.
+ * Assert BSY and setup the phase for
+ * sending our identify messages.
+ */
+ mvi P_MESGIN|BSYO call change_phase;
+ mvi CLRSINT0, CLRSELDO;
+
+ /*
+ * Start out with a simple identify message.
+ */
+ or SAVED_LUN, MSG_IDENTIFYFLAG call target_outb;
+
+ /*
+ * If we are the result of a tagged command, send
+ * a simple Q tag and the tag id.
+ */
+ test SCB_CONTROL, TAG_ENB jz . + 3;
+ mvi MSG_SIMPLE_Q_TAG call target_outb;
+ mov SCB_TARGET_ITAG call target_outb;
+target_synccmd:
+ /*
+ * Now determine what phases the host wants us
+ * to go through.
+ */
+ mov SEQ_FLAGS, SCB_TARGET_PHASES;
+
+ test SCB_CONTROL, MK_MESSAGE jz target_ITloop;
+ mvi P_MESGIN|BSYO call change_phase;
+ jmp host_target_message_loop;
+target_ITloop:
+ /*
+ * Start honoring ATN signals now that
+ * we properly identified ourselves.
+ */
+ test SCSISIGI, ATNI jnz target_mesgout;
+ test SEQ_FLAGS, CMDPHASE_PENDING jnz target_cmdphase;
+ test SEQ_FLAGS, DPHASE_PENDING jnz target_dphase;
+ test SEQ_FLAGS, SPHASE_PENDING jnz target_sphase;
+
+ /*
+ * No more work to do. Either disconnect or not depending
+ * on the state of NO_DISCONNECT.
+ */
+ test SEQ_FLAGS, NO_DISCONNECT jz target_disconnect;
+ mvi TARG_IMMEDIATE_SCB, SCB_LIST_NULL;
+ call complete_target_cmd;
+ if ((ahc->flags & AHC_PAGESCBS) != 0) {
+ mov ALLZEROS call get_free_or_disc_scb;
+ }
+ cmp TARG_IMMEDIATE_SCB, SCB_LIST_NULL je .;
+ mvi DMAPARAMS, HDMAEN|DIRECTION|FIFORESET;
+ mov TARG_IMMEDIATE_SCB call dma_scb;
+ call set_transfer_settings;
+ or SXFRCTL0, CLRSTCNT|CLRCHN;
+ jmp target_synccmd;
+
+target_mesgout:
+ mvi SCSISIGO, P_MESGOUT|BSYO;
+target_mesgout_continue:
+ call target_inb;
+target_mesgout_pending:
+ and SEQ_FLAGS2, ~TARGET_MSG_PENDING;
+ /* Local Processing goes here... */
+ jmp host_target_message_loop;
+
+target_disconnect:
+ mvi P_MESGIN|BSYO call change_phase;
+ test SEQ_FLAGS, DPHASE jz . + 2;
+ mvi MSG_SAVEDATAPOINTER call target_outb;
+ mvi MSG_DISCONNECT call target_outb;
+
+target_busfree_wait:
+ /* Wait for preceding I/O session to complete. */
+ test SCSISIGI, ACKI jnz .;
+target_busfree:
+ and SIMODE1, ~ENBUSFREE;
+ if ((ahc->features & AHC_ULTRA2) != 0) {
+ clr SCSIBUSL;
+ }
+ clr SCSISIGO;
+ mvi LASTPHASE, P_BUSFREE;
+ call complete_target_cmd;
+ jmp poll_for_work;
+
+target_cmdphase:
+ /*
+ * The target has dropped ATN (doesn't want to abort or BDR)
+ * and we believe this selection to be valid. If the ring
+ * buffer for new commands is full, return busy or queue full.
+ */
+ if ((ahc->features & AHC_HS_MAILBOX) != 0) {
+ and A, HOST_TQINPOS, HS_MAILBOX;
+ } else {
+ mov A, KERNEL_TQINPOS;
+ }
+ cmp TQINPOS, A jne tqinfifo_has_space;
+ mvi P_STATUS|BSYO call change_phase;
+ test SEQ_FLAGS, TARGET_CMD_IS_TAGGED jz . + 3;
+ mvi STATUS_QUEUE_FULL call target_outb;
+ jmp target_busfree_wait;
+ mvi STATUS_BUSY call target_outb;
+ jmp target_busfree_wait;
+tqinfifo_has_space:
+ mvi P_COMMAND|BSYO call change_phase;
+ call target_inb;
+ mov A, DINDEX;
+ /* Store for host */
+ if ((ahc->features & AHC_CMD_CHAN) != 0) {
+ mov CCSCBRAM, A;
+ } else {
+ mov DFDAT, A;
+ }
+
+ /*
+ * Determine the number of bytes to read
+ * based on the command group code via table lookup.
+ * We reuse the first 8 bytes of the TARG_SCSIRATE
+ * BIOS array for this table. Count is one less than
+ * the total for the command since we've already fetched
+ * the first byte.
+ */
+ shr A, CMD_GROUP_CODE_SHIFT;
+ add SINDEX, CMDSIZE_TABLE, A;
+ mov A, SINDIR;
+
+ test A, 0xFF jz command_phase_done;
+ or SXFRCTL0, SPIOEN;
+command_loop:
+ test SSTAT0, SPIORDY jz .;
+ cmp A, 1 jne . + 2;
+ and SXFRCTL0, ~SPIOEN; /* Last Byte */
+ if ((ahc->features & AHC_CMD_CHAN) != 0) {
+ mov CCSCBRAM, SCSIDATL;
+ } else {
+ mov DFDAT, SCSIDATL;
+ }
+ dec A;
+ test A, 0xFF jnz command_loop;
+
+command_phase_done:
+ and SEQ_FLAGS, ~CMDPHASE_PENDING;
+ jmp target_ITloop;
+
+target_dphase:
+ /*
+ * Data phases on the bus are from the
+ * perspective of the initiator. The dma
+ * code looks at LASTPHASE to determine the
+ * data direction of the DMA. Toggle it for
+ * target transfers.
+ */
+ xor LASTPHASE, IOI, SCB_TARGET_DATA_DIR;
+ or SCB_TARGET_DATA_DIR, BSYO call change_phase;
+ jmp p_data;
+
+target_sphase:
+ mvi P_STATUS|BSYO call change_phase;
+ mvi LASTPHASE, P_STATUS;
+ mov SCB_SCSI_STATUS call target_outb;
+ /* XXX Watch for ATN or parity errors??? */
+ mvi SCSISIGO, P_MESGIN|BSYO;
+ /* MSG_CMDCMPLT is 0, but we can't do an immediate of 0 */
+ mov ALLZEROS call target_outb;
+ jmp target_busfree_wait;
+
+complete_target_cmd:
+ test SEQ_FLAGS, TARG_CMD_PENDING jnz . + 2;
+ mov SCB_TAG jmp complete_post;
+ if ((ahc->features & AHC_CMD_CHAN) != 0) {
+ /* Set the valid byte */
+ mvi CCSCBADDR, 24;
+ mov CCSCBRAM, ALLONES;
+ mvi CCHCNT, 28;
+ or CCSCBCTL, CCSCBEN|CCSCBRESET;
+ test CCSCBCTL, CCSCBDONE jz .;
+ clr CCSCBCTL;
+ } else {
+ /* Set the valid byte */
+ or DFCNTRL, FIFORESET;
+ mvi DFWADDR, 3; /* Third 64bit word or byte 24 */
+ mov DFDAT, ALLONES;
+ mvi 28 call set_hcnt;
+ or DFCNTRL, HDMAEN|FIFOFLUSH;
+ call dma_finish;
+ }
+ inc TQINPOS;
+ mvi INTSTAT,CMDCMPLT ret;
+ }
+
+if ((ahc->flags & AHC_INITIATORROLE) != 0) {
+initiator_select:
+ or SXFRCTL0, SPIOEN|CLRSTCNT|CLRCHN;
+ /*
+ * As soon as we get a successful selection, the target
+ * should go into the message out phase since we have ATN
+ * asserted.
+ */
+ mvi MSG_OUT, MSG_IDENTIFYFLAG;
+ mvi SEQ_FLAGS, NO_CDB_SENT;
+ mvi CLRSINT0, CLRSELDO;
+
+ /*
+ * Main loop for information transfer phases. Wait for the
+ * target to assert REQ before checking MSG, C/D and I/O for
+ * the bus phase.
+ */
+mesgin_phasemis:
+ITloop:
+ call phase_lock;
+
+ mov A, LASTPHASE;
+
+ test A, ~P_DATAIN jz p_data;
+ cmp A,P_COMMAND je p_command;
+ cmp A,P_MESGOUT je p_mesgout;
+ cmp A,P_STATUS je p_status;
+ cmp A,P_MESGIN je p_mesgin;
+
+ mvi BAD_PHASE call set_seqint;
+ jmp ITloop; /* Try reading the bus again. */
+
+await_busfree:
+ and SIMODE1, ~ENBUSFREE;
+ mov NONE, SCSIDATL; /* Ack the last byte */
+ if ((ahc->features & AHC_ULTRA2) != 0) {
+ clr SCSIBUSL; /* Prevent bit leakage durint SELTO */
+ }
+ and SXFRCTL0, ~SPIOEN;
+ mvi SEQ_FLAGS, NOT_IDENTIFIED|NO_CDB_SENT;
+ test SSTAT1,REQINIT|BUSFREE jz .;
+ test SSTAT1, BUSFREE jnz poll_for_work;
+ mvi MISSED_BUSFREE call set_seqint;
+}
+
+clear_target_state:
+ /*
+ * We assume that the kernel driver may reset us
+ * at any time, even in the middle of a DMA, so
+ * clear DFCNTRL too.
+ */
+ clr DFCNTRL;
+ or SXFRCTL0, CLRSTCNT|CLRCHN;
+
+ /*
+ * We don't know the target we will connect to,
+ * so default to narrow transfers to avoid
+ * parity problems.
+ */
+ if ((ahc->features & AHC_ULTRA2) != 0) {
+ bmov SCSIRATE, ALLZEROS, 2;
+ } else {
+ clr SCSIRATE;
+ if ((ahc->features & AHC_ULTRA) != 0) {
+ and SXFRCTL0, ~(FAST20);
+ }
+ }
+ mvi LASTPHASE, P_BUSFREE;
+ /* clear target specific flags */
+ mvi SEQ_FLAGS, NOT_IDENTIFIED|NO_CDB_SENT ret;
+
+sg_advance:
+ clr A; /* add sizeof(struct scatter) */
+ add SCB_RESIDUAL_SGPTR[0],SG_SIZEOF;
+ adc SCB_RESIDUAL_SGPTR[1],A;
+ adc SCB_RESIDUAL_SGPTR[2],A;
+ adc SCB_RESIDUAL_SGPTR[3],A ret;
+
+if ((ahc->features & AHC_CMD_CHAN) != 0) {
+disable_ccsgen:
+ test CCSGCTL, CCSGEN jz return;
+ test CCSGCTL, CCSGDONE jz .;
+disable_ccsgen_fetch_done:
+ clr CCSGCTL;
+ test CCSGCTL, CCSGEN jnz .;
+ ret;
+idle_loop:
+ /*
+ * Do we need any more segments for this transfer?
+ */
+ test SCB_RESIDUAL_DATACNT[3], SG_LAST_SEG jnz return;
+
+ /* Did we just finish fetching segs? */
+ cmp CCSGCTL, CCSGEN|CCSGDONE je idle_sgfetch_complete;
+
+ /* Are we actively fetching segments? */
+ test CCSGCTL, CCSGEN jnz return;
+
+ /*
+ * Do we have any prefetch left???
+ */
+ cmp CCSGADDR, SG_PREFETCH_CNT jne idle_sg_avail;
+
+ /*
+ * Need to fetch segments, but we can only do that
+ * if the command channel is completely idle. Make
+ * sure we don't have an SCB prefetch going on.
+ */
+ test CCSCBCTL, CCSCBEN jnz return;
+
+ /*
+ * We fetch a "cacheline aligned" and sized amount of data
+ * so we don't end up referencing a non-existent page.
+ * Cacheline aligned is in quotes because the kernel will
+ * set the prefetch amount to a reasonable level if the
+ * cacheline size is unknown.
+ */
+ mvi CCHCNT, SG_PREFETCH_CNT;
+ and CCHADDR[0], SG_PREFETCH_ALIGN_MASK, SCB_RESIDUAL_SGPTR;
+ bmov CCHADDR[1], SCB_RESIDUAL_SGPTR[1], 3;
+ mvi CCSGCTL, CCSGEN|CCSGRESET ret;
+idle_sgfetch_complete:
+ call disable_ccsgen_fetch_done;
+ and CCSGADDR, SG_PREFETCH_ADDR_MASK, SCB_RESIDUAL_SGPTR;
+idle_sg_avail:
+ if ((ahc->features & AHC_ULTRA2) != 0) {
+ /* Does the hardware have space for another SG entry? */
+ test DFSTATUS, PRELOAD_AVAIL jz return;
+ bmov HADDR, CCSGRAM, 7;
+ bmov SCB_RESIDUAL_DATACNT[3], CCSGRAM, 1;
+ if ((ahc->flags & AHC_39BIT_ADDRESSING) != 0) {
+ mov SCB_RESIDUAL_DATACNT[3] call set_hhaddr;
+ }
+ call sg_advance;
+ mov SINDEX, SCB_RESIDUAL_SGPTR[0];
+ test SCB_RESIDUAL_DATACNT[3], SG_LAST_SEG jz . + 2;
+ or SINDEX, LAST_SEG;
+ mov SG_CACHE_PRE, SINDEX;
+ /* Load the segment */
+ or DFCNTRL, PRELOADEN;
+ }
+ ret;
+}
+
+if ((ahc->bugs & AHC_PCI_MWI_BUG) != 0 && ahc->pci_cachesize != 0) {
+/*
+ * Calculate the trailing portion of this S/G segment that cannot
+ * be transferred using memory write and invalidate PCI transactions.
+ * XXX Can we optimize this for PCI writes only???
+ */
+calc_mwi_residual:
+ /*
+ * If the ending address is on a cacheline boundary,
+ * there is no need for an extra segment.
+ */
+ mov A, HCNT[0];
+ add A, A, HADDR[0];
+ and A, CACHESIZE_MASK;
+ test A, 0xFF jz return;
+
+ /*
+ * If the transfer is less than a cachline,
+ * there is no need for an extra segment.
+ */
+ test HCNT[1], 0xFF jnz calc_mwi_residual_final;
+ test HCNT[2], 0xFF jnz calc_mwi_residual_final;
+ add NONE, INVERTED_CACHESIZE_MASK, HCNT[0];
+ jnc return;
+
+calc_mwi_residual_final:
+ mov MWI_RESIDUAL, A;
+ not A;
+ inc A;
+ add HCNT[0], A;
+ adc HCNT[1], -1;
+ adc HCNT[2], -1 ret;
+}
+
+p_data:
+ test SEQ_FLAGS,NOT_IDENTIFIED|NO_CDB_SENT jz p_data_allowed;
+ mvi PROTO_VIOLATION call set_seqint;
+p_data_allowed:
+ if ((ahc->features & AHC_ULTRA2) != 0) {
+ mvi DMAPARAMS, PRELOADEN|SCSIEN|HDMAEN;
+ } else {
+ mvi DMAPARAMS, WIDEODD|SCSIEN|SDMAEN|HDMAEN|FIFORESET;
+ }
+ test LASTPHASE, IOI jnz . + 2;
+ or DMAPARAMS, DIRECTION;
+ if ((ahc->features & AHC_CMD_CHAN) != 0) {
+ /* We don't have any valid S/G elements */
+ mvi CCSGADDR, SG_PREFETCH_CNT;
+ }
+ test SEQ_FLAGS, DPHASE jz data_phase_initialize;
+
+ /*
+ * If we re-enter the data phase after going through another
+ * phase, our transfer location has almost certainly been
+ * corrupted by the interveining, non-data, transfers. Ask
+ * the host driver to fix us up based on the transfer residual.
+ */
+ mvi PDATA_REINIT call set_seqint;
+ jmp data_phase_loop;
+
+data_phase_initialize:
+ /* We have seen a data phase for the first time */
+ or SEQ_FLAGS, DPHASE;
+
+ /*
+ * Initialize the DMA address and counter from the SCB.
+ * Also set SCB_RESIDUAL_SGPTR, including the LAST_SEG
+ * flag in the highest byte of the data count. We cannot
+ * modify the saved values in the SCB until we see a save
+ * data pointers message.
+ */
+ if ((ahc->flags & AHC_39BIT_ADDRESSING) != 0) {
+ /* The lowest address byte must be loaded last. */
+ mov SCB_DATACNT[3] call set_hhaddr;
+ }
+ if ((ahc->features & AHC_CMD_CHAN) != 0) {
+ bmov HADDR, SCB_DATAPTR, 7;
+ bmov SCB_RESIDUAL_DATACNT[3], SCB_DATACNT[3], 5;
+ } else {
+ mvi DINDEX, HADDR;
+ mvi SCB_DATAPTR call bcopy_7;
+ mvi DINDEX, SCB_RESIDUAL_DATACNT + 3;
+ mvi SCB_DATACNT + 3 call bcopy_5;
+ }
+ if ((ahc->bugs & AHC_PCI_MWI_BUG) != 0 && ahc->pci_cachesize != 0) {
+ call calc_mwi_residual;
+ }
+ and SCB_RESIDUAL_SGPTR[0], ~SG_FULL_RESID;
+
+ if ((ahc->features & AHC_ULTRA2) == 0) {
+ if ((ahc->features & AHC_CMD_CHAN) != 0) {
+ bmov STCNT, HCNT, 3;
+ } else {
+ call set_stcnt_from_hcnt;
+ }
+ }
+
+data_phase_loop:
+ /* Guard against overruns */
+ test SCB_RESIDUAL_SGPTR[0], SG_LIST_NULL jz data_phase_inbounds;
+
+ /*
+ * Turn on `Bit Bucket' mode, wait until the target takes
+ * us to another phase, and then notify the host.
+ */
+ and DMAPARAMS, DIRECTION;
+ mov DFCNTRL, DMAPARAMS;
+ or SXFRCTL1,BITBUCKET;
+ if ((ahc->features & AHC_DT) == 0) {
+ test SSTAT1,PHASEMIS jz .;
+ } else {
+ test SCSIPHASE, DATA_PHASE_MASK jnz .;
+ }
+ and SXFRCTL1, ~BITBUCKET;
+ mvi DATA_OVERRUN call set_seqint;
+ jmp ITloop;
+
+data_phase_inbounds:
+ if ((ahc->features & AHC_ULTRA2) != 0) {
+ mov SINDEX, SCB_RESIDUAL_SGPTR[0];
+ test SCB_RESIDUAL_DATACNT[3], SG_LAST_SEG jz . + 2;
+ or SINDEX, LAST_SEG;
+ mov SG_CACHE_PRE, SINDEX;
+ mov DFCNTRL, DMAPARAMS;
+ultra2_dma_loop:
+ call idle_loop;
+ /*
+ * The transfer is complete if either the last segment
+ * completes or the target changes phase.
+ */
+ test SG_CACHE_SHADOW, LAST_SEG_DONE jnz ultra2_dmafinish;
+ if ((ahc->features & AHC_DT) == 0) {
+ if ((ahc->flags & AHC_TARGETROLE) != 0) {
+ /*
+ * As a target, we control the phases,
+ * so ignore PHASEMIS.
+ */
+ test SSTAT0, TARGET jnz ultra2_dma_loop;
+ }
+ if ((ahc->flags & AHC_INITIATORROLE) != 0) {
+ test SSTAT1,PHASEMIS jz ultra2_dma_loop;
+ }
+ } else {
+ test DFCNTRL, SCSIEN jnz ultra2_dma_loop;
+ }
+
+ultra2_dmafinish:
+ /*
+ * The transfer has terminated either due to a phase
+ * change, and/or the completion of the last segment.
+ * We have two goals here. Do as much other work
+ * as possible while the data fifo drains on a read
+ * and respond as quickly as possible to the standard
+ * messages (save data pointers/disconnect and command
+ * complete) that usually follow a data phase.
+ */
+ if ((ahc->bugs & AHC_AUTOFLUSH_BUG) != 0) {
+ /*
+ * On chips with broken auto-flush, start
+ * the flushing process now. We'll poke
+ * the chip from time to time to keep the
+ * flush process going as we complete the
+ * data phase.
+ */
+ or DFCNTRL, FIFOFLUSH;
+ }
+ /*
+ * We assume that, even though data may still be
+ * transferring to the host, that the SCSI side of
+ * the DMA engine is now in a static state. This
+ * allows us to update our notion of where we are
+ * in this transfer.
+ *
+ * If, by chance, we stopped before being able
+ * to fetch additional segments for this transfer,
+ * yet the last S/G was completely exhausted,
+ * call our idle loop until it is able to load
+ * another segment. This will allow us to immediately
+ * pickup on the next segment on the next data phase.
+ *
+ * If we happened to stop on the last segment, then
+ * our residual information is still correct from
+ * the idle loop and there is no need to perform
+ * any fixups.
+ */
+ultra2_ensure_sg:
+ test SG_CACHE_SHADOW, LAST_SEG jz ultra2_shvalid;
+ /* Record if we've consumed all S/G entries */
+ test SSTAT2, SHVALID jnz residuals_correct;
+ or SCB_RESIDUAL_SGPTR[0], SG_LIST_NULL;
+ jmp residuals_correct;
+
+ultra2_shvalid:
+ test SSTAT2, SHVALID jnz sgptr_fixup;
+ call idle_loop;
+ jmp ultra2_ensure_sg;
+
+sgptr_fixup:
+ /*
+ * Fixup the residual next S/G pointer. The S/G preload
+ * feature of the chip allows us to load two elements
+ * in addition to the currently active element. We
+ * store the bottom byte of the next S/G pointer in
+ * the SG_CACEPTR register so we can restore the
+ * correct value when the DMA completes. If the next
+ * sg ptr value has advanced to the point where higher
+ * bytes in the address have been affected, fix them
+ * too.
+ */
+ test SG_CACHE_SHADOW, 0x80 jz sgptr_fixup_done;
+ test SCB_RESIDUAL_SGPTR[0], 0x80 jnz sgptr_fixup_done;
+ add SCB_RESIDUAL_SGPTR[1], -1;
+ adc SCB_RESIDUAL_SGPTR[2], -1;
+ adc SCB_RESIDUAL_SGPTR[3], -1;
+sgptr_fixup_done:
+ and SCB_RESIDUAL_SGPTR[0], SG_ADDR_MASK, SG_CACHE_SHADOW;
+ /* We are not the last seg */
+ and SCB_RESIDUAL_DATACNT[3], ~SG_LAST_SEG;
+residuals_correct:
+ /*
+ * Go ahead and shut down the DMA engine now.
+ * In the future, we'll want to handle end of
+ * transfer messages prior to doing this, but this
+ * requires similar restructuring for pre-ULTRA2
+ * controllers.
+ */
+ test DMAPARAMS, DIRECTION jnz ultra2_fifoempty;
+ultra2_fifoflush:
+ if ((ahc->features & AHC_DT) == 0) {
+ if ((ahc->bugs & AHC_AUTOFLUSH_BUG) != 0) {
+ /*
+ * On Rev A of the aic7890, the autoflush
+ * feature doesn't function correctly.
+ * Perform an explicit manual flush. During
+ * a manual flush, the FIFOEMP bit becomes
+ * true every time the PCI FIFO empties
+ * regardless of the state of the SCSI FIFO.
+ * It can take up to 4 clock cycles for the
+ * SCSI FIFO to get data into the PCI FIFO
+ * and for FIFOEMP to de-assert. Here we
+ * guard against this condition by making
+ * sure the FIFOEMP bit stays on for 5 full
+ * clock cycles.
+ */
+ or DFCNTRL, FIFOFLUSH;
+ test DFSTATUS, FIFOEMP jz ultra2_fifoflush;
+ test DFSTATUS, FIFOEMP jz ultra2_fifoflush;
+ test DFSTATUS, FIFOEMP jz ultra2_fifoflush;
+ test DFSTATUS, FIFOEMP jz ultra2_fifoflush;
+ }
+ test DFSTATUS, FIFOEMP jz ultra2_fifoflush;
+ } else {
+ /*
+ * We enable the auto-ack feature on DT capable
+ * controllers. This means that the controller may
+ * have already transferred some overrun bytes into
+ * the data FIFO and acked them on the bus. The only
+ * way to detect this situation is to wait for
+ * LAST_SEG_DONE to come true on a completed transfer
+ * and then test to see if the data FIFO is non-empty.
+ */
+ test SCB_RESIDUAL_SGPTR[0], SG_LIST_NULL
+ jz ultra2_wait_fifoemp;
+ test SG_CACHE_SHADOW, LAST_SEG_DONE jz .;
+ /*
+ * FIFOEMP can lag LAST_SEG_DONE. Wait a few
+ * clocks before calling this an overrun.
+ */
+ test DFSTATUS, FIFOEMP jnz ultra2_fifoempty;
+ test DFSTATUS, FIFOEMP jnz ultra2_fifoempty;
+ test DFSTATUS, FIFOEMP jnz ultra2_fifoempty;
+ /* Overrun */
+ jmp data_phase_loop;
+ultra2_wait_fifoemp:
+ test DFSTATUS, FIFOEMP jz .;
+ }
+ultra2_fifoempty:
+ /* Don't clobber an inprogress host data transfer */
+ test DFSTATUS, MREQPEND jnz ultra2_fifoempty;
+ultra2_dmahalt:
+ and DFCNTRL, ~(SCSIEN|HDMAEN);
+ test DFCNTRL, SCSIEN|HDMAEN jnz .;
+ if ((ahc->flags & AHC_39BIT_ADDRESSING) != 0) {
+ /*
+ * Keep HHADDR cleared for future, 32bit addressed
+ * only, DMA operations.
+ *
+ * Due to bayonette style S/G handling, our residual
+ * data must be "fixed up" once the transfer is halted.
+ * Here we fixup the HSHADDR stored in the high byte
+ * of the residual data cnt. By postponing the fixup,
+ * we can batch the clearing of HADDR with the fixup.
+ * If we halted on the last segment, the residual is
+ * already correct. If we are not on the last
+ * segment, copy the high address directly from HSHADDR.
+ * We don't need to worry about maintaining the
+ * SG_LAST_SEG flag as it will always be false in the
+ * case where an update is required.
+ */
+ or DSCOMMAND1, HADDLDSEL0;
+ test SG_CACHE_SHADOW, LAST_SEG jnz . + 2;
+ mov SCB_RESIDUAL_DATACNT[3], SHADDR;
+ clr HADDR;
+ and DSCOMMAND1, ~HADDLDSEL0;
+ }
+ } else {
+ /* If we are the last SG block, tell the hardware. */
+ if ((ahc->bugs & AHC_PCI_MWI_BUG) != 0
+ && ahc->pci_cachesize != 0) {
+ test MWI_RESIDUAL, 0xFF jnz dma_mid_sg;
+ }
+ test SCB_RESIDUAL_DATACNT[3], SG_LAST_SEG jz dma_mid_sg;
+ if ((ahc->flags & AHC_TARGETROLE) != 0) {
+ test SSTAT0, TARGET jz dma_last_sg;
+ if ((ahc->bugs & AHC_TMODE_WIDEODD_BUG) != 0) {
+ test DMAPARAMS, DIRECTION jz dma_mid_sg;
+ }
+ }
+dma_last_sg:
+ and DMAPARAMS, ~WIDEODD;
+dma_mid_sg:
+ /* Start DMA data transfer. */
+ mov DFCNTRL, DMAPARAMS;
+dma_loop:
+ if ((ahc->features & AHC_CMD_CHAN) != 0) {
+ call idle_loop;
+ }
+ test SSTAT0,DMADONE jnz dma_dmadone;
+ test SSTAT1,PHASEMIS jz dma_loop; /* ie. underrun */
+dma_phasemis:
+ /*
+ * We will be "done" DMAing when the transfer count goes to
+ * zero, or the target changes the phase (in light of this,
+ * it makes sense that the DMA circuitry doesn't ACK when
+ * PHASEMIS is active). If we are doing a SCSI->Host transfer,
+ * the data FIFO should be flushed auto-magically on STCNT=0
+ * or a phase change, so just wait for FIFO empty status.
+ */
+dma_checkfifo:
+ test DFCNTRL,DIRECTION jnz dma_fifoempty;
+dma_fifoflush:
+ test DFSTATUS,FIFOEMP jz dma_fifoflush;
+dma_fifoempty:
+ /* Don't clobber an inprogress host data transfer */
+ test DFSTATUS, MREQPEND jnz dma_fifoempty;
+
+ /*
+ * Now shut off the DMA and make sure that the DMA
+ * hardware has actually stopped. Touching the DMA
+ * counters, etc. while a DMA is active will result
+ * in an ILLSADDR exception.
+ */
+dma_dmadone:
+ and DFCNTRL, ~(SCSIEN|SDMAEN|HDMAEN);
+dma_halt:
+ /*
+ * Some revisions of the aic78XX have a problem where, if the
+ * data fifo is full, but the PCI input latch is not empty,
+ * HDMAEN cannot be cleared. The fix used here is to drain
+ * the prefetched but unused data from the data fifo until
+ * there is space for the input latch to drain.
+ */
+ if ((ahc->bugs & AHC_PCI_2_1_RETRY_BUG) != 0) {
+ mov NONE, DFDAT;
+ }
+ test DFCNTRL, (SCSIEN|SDMAEN|HDMAEN) jnz dma_halt;
+
+ /* See if we have completed this last segment */
+ test STCNT[0], 0xff jnz data_phase_finish;
+ test STCNT[1], 0xff jnz data_phase_finish;
+ test STCNT[2], 0xff jnz data_phase_finish;
+
+ /*
+ * Advance the scatter-gather pointers if needed
+ */
+ if ((ahc->bugs & AHC_PCI_MWI_BUG) != 0
+ && ahc->pci_cachesize != 0) {
+ test MWI_RESIDUAL, 0xFF jz no_mwi_resid;
+ /*
+ * Reload HADDR from SHADDR and setup the
+ * count to be the size of our residual.
+ */
+ if ((ahc->features & AHC_CMD_CHAN) != 0) {
+ bmov HADDR, SHADDR, 4;
+ mov HCNT, MWI_RESIDUAL;
+ bmov HCNT[1], ALLZEROS, 2;
+ } else {
+ mvi DINDEX, HADDR;
+ mvi SHADDR call bcopy_4;
+ mov MWI_RESIDUAL call set_hcnt;
+ }
+ clr MWI_RESIDUAL;
+ jmp sg_load_done;
+no_mwi_resid:
+ }
+ test SCB_RESIDUAL_DATACNT[3], SG_LAST_SEG jz sg_load;
+ or SCB_RESIDUAL_SGPTR[0], SG_LIST_NULL;
+ jmp data_phase_finish;
+sg_load:
+ /*
+ * Load the next SG element's data address and length
+ * into the DMA engine. If we don't have hardware
+ * to perform a prefetch, we'll have to fetch the
+ * segment from host memory first.
+ */
+ if ((ahc->features & AHC_CMD_CHAN) != 0) {
+ /* Wait for the idle loop to complete */
+ test CCSGCTL, CCSGEN jz . + 3;
+ call idle_loop;
+ test CCSGCTL, CCSGEN jnz . - 1;
+ bmov HADDR, CCSGRAM, 7;
+ /*
+ * Workaround for flaky external SCB RAM
+ * on certain aic7895 setups. It seems
+ * unable to handle direct transfers from
+ * S/G ram to certain SCB locations.
+ */
+ mov SINDEX, CCSGRAM;
+ mov SCB_RESIDUAL_DATACNT[3], SINDEX;
+ } else {
+ if ((ahc->flags & AHC_39BIT_ADDRESSING) != 0) {
+ mov ALLZEROS call set_hhaddr;
+ }
+ mvi DINDEX, HADDR;
+ mvi SCB_RESIDUAL_SGPTR call bcopy_4;
+
+ mvi SG_SIZEOF call set_hcnt;
+
+ or DFCNTRL, HDMAEN|DIRECTION|FIFORESET;
+
+ call dma_finish;
+
+ mvi DINDEX, HADDR;
+ call dfdat_in_7;
+ mov SCB_RESIDUAL_DATACNT[3], DFDAT;
+ }
+
+ if ((ahc->flags & AHC_39BIT_ADDRESSING) != 0) {
+ mov SCB_RESIDUAL_DATACNT[3] call set_hhaddr;
+
+ /*
+ * The lowest address byte must be loaded
+ * last as it triggers the computation of
+ * some items in the PCI block. The ULTRA2
+ * chips do this on PRELOAD.
+ */
+ mov HADDR, HADDR;
+ }
+ if ((ahc->bugs & AHC_PCI_MWI_BUG) != 0
+ && ahc->pci_cachesize != 0) {
+ call calc_mwi_residual;
+ }
+
+ /* Point to the new next sg in memory */
+ call sg_advance;
+
+sg_load_done:
+ if ((ahc->features & AHC_CMD_CHAN) != 0) {
+ bmov STCNT, HCNT, 3;
+ } else {
+ call set_stcnt_from_hcnt;
+ }
+
+ if ((ahc->flags & AHC_TARGETROLE) != 0) {
+ test SSTAT0, TARGET jnz data_phase_loop;
+ }
+ }
+data_phase_finish:
+ /*
+ * If the target has left us in data phase, loop through
+ * the dma code again. In the case of ULTRA2 adapters,
+ * we should only loop if there is a data overrun. For
+ * all other adapters, we'll loop after each S/G element
+ * is loaded as well as if there is an overrun.
+ */
+ if ((ahc->flags & AHC_TARGETROLE) != 0) {
+ test SSTAT0, TARGET jnz data_phase_done;
+ }
+ if ((ahc->flags & AHC_INITIATORROLE) != 0) {
+ test SSTAT1, REQINIT jz .;
+ if ((ahc->features & AHC_DT) == 0) {
+ test SSTAT1,PHASEMIS jz data_phase_loop;
+ } else {
+ test SCSIPHASE, DATA_PHASE_MASK jnz data_phase_loop;
+ }
+ }
+
+data_phase_done:
+ /*
+ * After a DMA finishes, save the SG and STCNT residuals back into
+ * the SCB. We use STCNT instead of HCNT, since it's a reflection
+ * of how many bytes were transferred on the SCSI (as opposed to the
+ * host) bus.
+ */
+ if ((ahc->features & AHC_CMD_CHAN) != 0) {
+ /* Kill off any pending prefetch */
+ call disable_ccsgen;
+ }
+
+ if ((ahc->features & AHC_ULTRA2) == 0) {
+ /*
+ * Clear the high address byte so that all other DMA
+ * operations, which use 32bit addressing, can assume
+ * HHADDR is 0.
+ */
+ if ((ahc->flags & AHC_39BIT_ADDRESSING) != 0) {
+ mov ALLZEROS call set_hhaddr;
+ }
+ }
+
+ /*
+ * Update our residual information before the information is
+ * lost by some other type of SCSI I/O (e.g. PIO). If we have
+ * transferred all data, no update is needed.
+ *
+ */
+ test SCB_RESIDUAL_SGPTR, SG_LIST_NULL jnz residual_update_done;
+ if ((ahc->bugs & AHC_PCI_MWI_BUG) != 0
+ && ahc->pci_cachesize != 0) {
+ if ((ahc->features & AHC_CMD_CHAN) != 0) {
+ test MWI_RESIDUAL, 0xFF jz bmov_resid;
+ }
+ mov A, MWI_RESIDUAL;
+ add SCB_RESIDUAL_DATACNT[0], A, STCNT[0];
+ clr A;
+ adc SCB_RESIDUAL_DATACNT[1], A, STCNT[1];
+ adc SCB_RESIDUAL_DATACNT[2], A, STCNT[2];
+ clr MWI_RESIDUAL;
+ if ((ahc->features & AHC_CMD_CHAN) != 0) {
+ jmp . + 2;
+bmov_resid:
+ bmov SCB_RESIDUAL_DATACNT, STCNT, 3;
+ }
+ } else if ((ahc->features & AHC_CMD_CHAN) != 0) {
+ bmov SCB_RESIDUAL_DATACNT, STCNT, 3;
+ } else {
+ mov SCB_RESIDUAL_DATACNT[0], STCNT[0];
+ mov SCB_RESIDUAL_DATACNT[1], STCNT[1];
+ mov SCB_RESIDUAL_DATACNT[2], STCNT[2];
+ }
+residual_update_done:
+ /*
+ * Since we've been through a data phase, the SCB_RESID* fields
+ * are now initialized. Clear the full residual flag.
+ */
+ and SCB_SGPTR[0], ~SG_FULL_RESID;
+
+ if ((ahc->features & AHC_ULTRA2) != 0) {
+ /* Clear the channel in case we return to data phase later */
+ or SXFRCTL0, CLRSTCNT|CLRCHN;
+ or SXFRCTL0, CLRSTCNT|CLRCHN;
+ }
+
+ if ((ahc->flags & AHC_TARGETROLE) != 0) {
+ test SEQ_FLAGS, DPHASE_PENDING jz ITloop;
+ and SEQ_FLAGS, ~DPHASE_PENDING;
+ /*
+ * For data-in phases, wait for any pending acks from the
+ * initiator before changing phase. We only need to
+ * send Ignore Wide Residue messages for data-in phases.
+ */
+ test DFCNTRL, DIRECTION jz target_ITloop;
+ test SSTAT1, REQINIT jnz .;
+ test SCB_LUN, SCB_XFERLEN_ODD jz target_ITloop;
+ test SCSIRATE, WIDEXFER jz target_ITloop;
+ /*
+ * Issue an Ignore Wide Residue Message.
+ */
+ mvi P_MESGIN|BSYO call change_phase;
+ mvi MSG_IGN_WIDE_RESIDUE call target_outb;
+ mvi 1 call target_outb;
+ jmp target_ITloop;
+ } else {
+ jmp ITloop;
+ }
+
+if ((ahc->flags & AHC_INITIATORROLE) != 0) {
+/*
+ * Command phase. Set up the DMA registers and let 'er rip.
+ */
+p_command:
+ test SEQ_FLAGS, NOT_IDENTIFIED jz p_command_okay;
+ mvi PROTO_VIOLATION call set_seqint;
+p_command_okay:
+
+ if ((ahc->features & AHC_ULTRA2) != 0) {
+ bmov HCNT[0], SCB_CDB_LEN, 1;
+ bmov HCNT[1], ALLZEROS, 2;
+ mvi SG_CACHE_PRE, LAST_SEG;
+ } else if ((ahc->features & AHC_CMD_CHAN) != 0) {
+ bmov STCNT[0], SCB_CDB_LEN, 1;
+ bmov STCNT[1], ALLZEROS, 2;
+ } else {
+ mov STCNT[0], SCB_CDB_LEN;
+ clr STCNT[1];
+ clr STCNT[2];
+ }
+ add NONE, -13, SCB_CDB_LEN;
+ mvi SCB_CDB_STORE jnc p_command_embedded;
+p_command_from_host:
+ if ((ahc->features & AHC_ULTRA2) != 0) {
+ bmov HADDR[0], SCB_CDB_PTR, 4;
+ mvi DFCNTRL, (PRELOADEN|SCSIEN|HDMAEN|DIRECTION);
+ } else {
+ if ((ahc->features & AHC_CMD_CHAN) != 0) {
+ bmov HADDR[0], SCB_CDB_PTR, 4;
+ bmov HCNT, STCNT, 3;
+ } else {
+ mvi DINDEX, HADDR;
+ mvi SCB_CDB_PTR call bcopy_4;
+ mov SCB_CDB_LEN call set_hcnt;
+ }
+ mvi DFCNTRL, (SCSIEN|SDMAEN|HDMAEN|DIRECTION|FIFORESET);
+ }
+ jmp p_command_xfer;
+p_command_embedded:
+ /*
+ * The data fifo seems to require 4 byte aligned
+ * transfers from the sequencer. Force this to
+ * be the case by clearing HADDR[0] even though
+ * we aren't going to touch host memory.
+ */
+ clr HADDR[0];
+ if ((ahc->features & AHC_ULTRA2) != 0) {
+ mvi DFCNTRL, (PRELOADEN|SCSIEN|DIRECTION);
+ bmov DFDAT, SCB_CDB_STORE, 12;
+ } else if ((ahc->features & AHC_CMD_CHAN) != 0) {
+ if ((ahc->flags & AHC_SCB_BTT) != 0) {
+ /*
+ * On the 7895 the data FIFO will
+ * get corrupted if you try to dump
+ * data from external SCB memory into
+ * the FIFO while it is enabled. So,
+ * fill the fifo and then enable SCSI
+ * transfers.
+ */
+ mvi DFCNTRL, (DIRECTION|FIFORESET);
+ } else {
+ mvi DFCNTRL, (SCSIEN|SDMAEN|DIRECTION|FIFORESET);
+ }
+ bmov DFDAT, SCB_CDB_STORE, 12;
+ if ((ahc->flags & AHC_SCB_BTT) != 0) {
+ mvi DFCNTRL, (SCSIEN|SDMAEN|DIRECTION|FIFOFLUSH);
+ } else {
+ or DFCNTRL, FIFOFLUSH;
+ }
+ } else {
+ mvi DFCNTRL, (SCSIEN|SDMAEN|DIRECTION|FIFORESET);
+ call copy_to_fifo_6;
+ call copy_to_fifo_6;
+ or DFCNTRL, FIFOFLUSH;
+ }
+p_command_xfer:
+ and SEQ_FLAGS, ~NO_CDB_SENT;
+ if ((ahc->features & AHC_DT) == 0) {
+ test SSTAT0, SDONE jnz . + 2;
+ test SSTAT1, PHASEMIS jz . - 1;
+ /*
+ * Wait for our ACK to go-away on it's own
+ * instead of being killed by SCSIEN getting cleared.
+ */
+ test SCSISIGI, ACKI jnz .;
+ } else {
+ test DFCNTRL, SCSIEN jnz .;
+ }
+ test SSTAT0, SDONE jnz p_command_successful;
+ /*
+ * Don't allow a data phase if the command
+ * was not fully transferred.
+ */
+ or SEQ_FLAGS, NO_CDB_SENT;
+p_command_successful:
+ and DFCNTRL, ~(SCSIEN|SDMAEN|HDMAEN);
+ test DFCNTRL, (SCSIEN|SDMAEN|HDMAEN) jnz .;
+ jmp ITloop;
+
+/*
+ * Status phase. Wait for the data byte to appear, then read it
+ * and store it into the SCB.
+ */
+p_status:
+ test SEQ_FLAGS, NOT_IDENTIFIED jnz mesgin_proto_violation;
+p_status_okay:
+ mov SCB_SCSI_STATUS, SCSIDATL;
+ or SCB_CONTROL, STATUS_RCVD;
+ jmp ITloop;
+
+/*
+ * Message out phase. If MSG_OUT is MSG_IDENTIFYFLAG, build a full
+ * indentify message sequence and send it to the target. The host may
+ * override this behavior by setting the MK_MESSAGE bit in the SCB
+ * control byte. This will cause us to interrupt the host and allow
+ * it to handle the message phase completely on its own. If the bit
+ * associated with this target is set, we will also interrupt the host,
+ * thereby allowing it to send a message on the next selection regardless
+ * of the transaction being sent.
+ *
+ * If MSG_OUT is == HOST_MSG, also interrupt the host and take a message.
+ * This is done to allow the host to send messages outside of an identify
+ * sequence while protecting the seqencer from testing the MK_MESSAGE bit
+ * on an SCB that might not be for the current nexus. (For example, a
+ * BDR message in response to a bad reselection would leave us pointed to
+ * an SCB that doesn't have anything to do with the current target).
+ *
+ * Otherwise, treat MSG_OUT as a 1 byte message to send (abort, abort tag,
+ * bus device reset).
+ *
+ * When there are no messages to send, MSG_OUT should be set to MSG_NOOP,
+ * in case the target decides to put us in this phase for some strange
+ * reason.
+ */
+p_mesgout_retry:
+ /* Turn on ATN for the retry */
+ if ((ahc->features & AHC_DT) == 0) {
+ or SCSISIGO, ATNO, LASTPHASE;
+ } else {
+ mvi SCSISIGO, ATNO;
+ }
+p_mesgout:
+ mov SINDEX, MSG_OUT;
+ cmp SINDEX, MSG_IDENTIFYFLAG jne p_mesgout_from_host;
+ test SCB_CONTROL,MK_MESSAGE jnz host_message_loop;
+p_mesgout_identify:
+ or SINDEX, MSG_IDENTIFYFLAG|DISCENB, SAVED_LUN;
+ test SCB_CONTROL, DISCENB jnz . + 2;
+ and SINDEX, ~DISCENB;
+/*
+ * Send a tag message if TAG_ENB is set in the SCB control block.
+ * Use SCB_TAG (the position in the kernel's SCB array) as the tag value.
+ */
+p_mesgout_tag:
+ test SCB_CONTROL,TAG_ENB jz p_mesgout_onebyte;
+ mov SCSIDATL, SINDEX; /* Send the identify message */
+ call phase_lock;
+ cmp LASTPHASE, P_MESGOUT jne p_mesgout_done;
+ and SCSIDATL,TAG_ENB|SCB_TAG_TYPE,SCB_CONTROL;
+ call phase_lock;
+ cmp LASTPHASE, P_MESGOUT jne p_mesgout_done;
+ mov SCB_TAG jmp p_mesgout_onebyte;
+/*
+ * Interrupt the driver, and allow it to handle this message
+ * phase and any required retries.
+ */
+p_mesgout_from_host:
+ cmp SINDEX, HOST_MSG jne p_mesgout_onebyte;
+ jmp host_message_loop;
+
+p_mesgout_onebyte:
+ mvi CLRSINT1, CLRATNO;
+ mov SCSIDATL, SINDEX;
+
+/*
+ * If the next bus phase after ATN drops is message out, it means
+ * that the target is requesting that the last message(s) be resent.
+ */
+ call phase_lock;
+ cmp LASTPHASE, P_MESGOUT je p_mesgout_retry;
+
+p_mesgout_done:
+ mvi CLRSINT1,CLRATNO; /* Be sure to turn ATNO off */
+ mov LAST_MSG, MSG_OUT;
+ mvi MSG_OUT, MSG_NOOP; /* No message left */
+ jmp ITloop;
+
+/*
+ * Message in phase. Bytes are read using Automatic PIO mode.
+ */
+p_mesgin:
+ mvi ACCUM call inb_first; /* read the 1st message byte */
+
+ test A,MSG_IDENTIFYFLAG jnz mesgin_identify;
+ cmp A,MSG_DISCONNECT je mesgin_disconnect;
+ cmp A,MSG_SAVEDATAPOINTER je mesgin_sdptrs;
+ cmp ALLZEROS,A je mesgin_complete;
+ cmp A,MSG_RESTOREPOINTERS je mesgin_rdptrs;
+ cmp A,MSG_IGN_WIDE_RESIDUE je mesgin_ign_wide_residue;
+ cmp A,MSG_NOOP je mesgin_done;
+
+/*
+ * Pushed message loop to allow the kernel to
+ * run it's own message state engine. To avoid an
+ * extra nop instruction after signaling the kernel,
+ * we perform the phase_lock before checking to see
+ * if we should exit the loop and skip the phase_lock
+ * in the ITloop. Performing back to back phase_locks
+ * shouldn't hurt, but why do it twice...
+ */
+host_message_loop:
+ mvi HOST_MSG_LOOP call set_seqint;
+ call phase_lock;
+ cmp RETURN_1, EXIT_MSG_LOOP je ITloop + 1;
+ jmp host_message_loop;
+
+mesgin_ign_wide_residue:
+if ((ahc->features & AHC_WIDE) != 0) {
+ test SCSIRATE, WIDEXFER jz mesgin_reject;
+ /* Pull the residue byte */
+ mvi ARG_1 call inb_next;
+ cmp ARG_1, 0x01 jne mesgin_reject;
+ test SCB_RESIDUAL_SGPTR[0], SG_LIST_NULL jz . + 2;
+ test SCB_LUN, SCB_XFERLEN_ODD jnz mesgin_done;
+ mvi IGN_WIDE_RES call set_seqint;
+ jmp mesgin_done;
+}
+
+mesgin_proto_violation:
+ mvi PROTO_VIOLATION call set_seqint;
+ jmp mesgin_done;
+mesgin_reject:
+ mvi MSG_MESSAGE_REJECT call mk_mesg;
+mesgin_done:
+ mov NONE,SCSIDATL; /*dummy read from latch to ACK*/
+ jmp ITloop;
+
+/*
+ * We received a "command complete" message. Put the SCB_TAG into the QOUTFIFO,
+ * and trigger a completion interrupt. Before doing so, check to see if there
+ * is a residual or the status byte is something other than STATUS_GOOD (0).
+ * In either of these conditions, we upload the SCB back to the host so it can
+ * process this information. In the case of a non zero status byte, we
+ * additionally interrupt the kernel driver synchronously, allowing it to
+ * decide if sense should be retrieved. If the kernel driver wishes to request
+ * sense, it will fill the kernel SCB with a request sense command, requeue
+ * it to the QINFIFO and tell us not to post to the QOUTFIFO by setting
+ * RETURN_1 to SEND_SENSE.
+ */
+mesgin_complete:
+
+ /*
+ * If ATN is raised, we still want to give the target a message.
+ * Perhaps there was a parity error on this last message byte.
+ * Either way, the target should take us to message out phase
+ * and then attempt to complete the command again. We should use a
+ * critical section here to guard against a timeout triggering
+ * for this command and setting ATN while we are still processing
+ * the completion.
+ test SCSISIGI, ATNI jnz mesgin_done;
+ */
+
+ /*
+ * If we are identified and have successfully sent the CDB,
+ * any status will do. Optimize this fast path.
+ */
+ test SCB_CONTROL, STATUS_RCVD jz mesgin_proto_violation;
+ test SEQ_FLAGS, NOT_IDENTIFIED|NO_CDB_SENT jz complete_accepted;
+
+ /*
+ * If the target never sent an identify message but instead went
+ * to mesgin to give an invalid message, let the host abort us.
+ */
+ test SEQ_FLAGS, NOT_IDENTIFIED jnz mesgin_proto_violation;
+
+ /*
+ * If we recevied good status but never successfully sent the
+ * cdb, abort the command.
+ */
+ test SCB_SCSI_STATUS,0xff jnz complete_accepted;
+ test SEQ_FLAGS, NO_CDB_SENT jnz mesgin_proto_violation;
+
+complete_accepted:
+ /*
+ * See if we attempted to deliver a message but the target ingnored us.
+ */
+ test SCB_CONTROL, MK_MESSAGE jz . + 2;
+ mvi MKMSG_FAILED call set_seqint;
+
+ /*
+ * Check for residuals
+ */
+ test SCB_SGPTR, SG_LIST_NULL jnz check_status;/* No xfer */
+ test SCB_SGPTR, SG_FULL_RESID jnz upload_scb;/* Never xfered */
+ test SCB_RESIDUAL_SGPTR, SG_LIST_NULL jz upload_scb;
+check_status:
+ test SCB_SCSI_STATUS,0xff jz complete; /* Good Status? */
+upload_scb:
+ or SCB_SGPTR, SG_RESID_VALID;
+ mvi DMAPARAMS, FIFORESET;
+ mov SCB_TAG call dma_scb;
+ test SCB_SCSI_STATUS, 0xff jz complete; /* Just a residual? */
+ mvi BAD_STATUS call set_seqint; /* let driver know */
+ cmp RETURN_1, SEND_SENSE jne complete;
+ call add_scb_to_free_list;
+ jmp await_busfree;
+complete:
+ mov SCB_TAG call complete_post;
+ jmp await_busfree;
+}
+
+complete_post:
+ /* Post the SCBID in SINDEX and issue an interrupt */
+ call add_scb_to_free_list;
+ mov ARG_1, SINDEX;
+ if ((ahc->features & AHC_QUEUE_REGS) != 0) {
+ mov A, SDSCB_QOFF;
+ } else {
+ mov A, QOUTPOS;
+ }
+ mvi QOUTFIFO_OFFSET call post_byte_setup;
+ mov ARG_1 call post_byte;
+ if ((ahc->features & AHC_QUEUE_REGS) == 0) {
+ inc QOUTPOS;
+ }
+ mvi INTSTAT,CMDCMPLT ret;
+
+if ((ahc->flags & AHC_INITIATORROLE) != 0) {
+/*
+ * Is it a disconnect message? Set a flag in the SCB to remind us
+ * and await the bus going free. If this is an untagged transaction
+ * store the SCB id for it in our untagged target table for lookup on
+ * a reselection.
+ */
+mesgin_disconnect:
+ /*
+ * If ATN is raised, we still want to give the target a message.
+ * Perhaps there was a parity error on this last message byte
+ * or we want to abort this command. Either way, the target
+ * should take us to message out phase and then attempt to
+ * disconnect again.
+ * XXX - Wait for more testing.
+ test SCSISIGI, ATNI jnz mesgin_done;
+ */
+ test SEQ_FLAGS, NOT_IDENTIFIED|NO_CDB_SENT
+ jnz mesgin_proto_violation;
+ or SCB_CONTROL,DISCONNECTED;
+ if ((ahc->flags & AHC_PAGESCBS) != 0) {
+ call add_scb_to_disc_list;
+ }
+ test SCB_CONTROL, TAG_ENB jnz await_busfree;
+ mov ARG_1, SCB_TAG;
+ and SAVED_LUN, LID, SCB_LUN;
+ mov SCB_SCSIID call set_busy_target;
+ jmp await_busfree;
+
+/*
+ * Save data pointers message:
+ * Copying RAM values back to SCB, for Save Data Pointers message, but
+ * only if we've actually been into a data phase to change them. This
+ * protects against bogus data in scratch ram and the residual counts
+ * since they are only initialized when we go into data_in or data_out.
+ * Ack the message as soon as possible. For chips without S/G pipelining,
+ * we can only ack the message after SHADDR has been saved. On these
+ * chips, SHADDR increments with every bus transaction, even PIO.
+ */
+mesgin_sdptrs:
+ if ((ahc->features & AHC_ULTRA2) != 0) {
+ mov NONE,SCSIDATL; /*dummy read from latch to ACK*/
+ test SEQ_FLAGS, DPHASE jz ITloop;
+ } else {
+ test SEQ_FLAGS, DPHASE jz mesgin_done;
+ }
+
+ /*
+ * If we are asked to save our position at the end of the
+ * transfer, just mark us at the end rather than perform a
+ * full save.
+ */
+ test SCB_RESIDUAL_SGPTR[0], SG_LIST_NULL jz mesgin_sdptrs_full;
+ or SCB_SGPTR, SG_LIST_NULL;
+ if ((ahc->features & AHC_ULTRA2) != 0) {
+ jmp ITloop;
+ } else {
+ jmp mesgin_done;
+ }
+
+mesgin_sdptrs_full:
+
+ /*
+ * The SCB_SGPTR becomes the next one we'll download,
+ * and the SCB_DATAPTR becomes the current SHADDR.
+ * Use the residual number since STCNT is corrupted by
+ * any message transfer.
+ */
+ if ((ahc->features & AHC_CMD_CHAN) != 0) {
+ bmov SCB_DATAPTR, SHADDR, 4;
+ if ((ahc->features & AHC_ULTRA2) == 0) {
+ mov NONE,SCSIDATL; /*dummy read from latch to ACK*/
+ }
+ bmov SCB_DATACNT, SCB_RESIDUAL_DATACNT, 8;
+ } else {
+ mvi DINDEX, SCB_DATAPTR;
+ mvi SHADDR call bcopy_4;
+ mov NONE,SCSIDATL; /*dummy read from latch to ACK*/
+ mvi SCB_RESIDUAL_DATACNT call bcopy_8;
+ }
+ jmp ITloop;
+
+/*
+ * Restore pointers message? Data pointers are recopied from the
+ * SCB anytime we enter a data phase for the first time, so all
+ * we need to do is clear the DPHASE flag and let the data phase
+ * code do the rest. We also reset/reallocate the FIFO to make
+ * sure we have a clean start for the next data or command phase.
+ */
+mesgin_rdptrs:
+ and SEQ_FLAGS, ~DPHASE; /*
+ * We'll reload them
+ * the next time through
+ * the dataphase.
+ */
+ or SXFRCTL0, CLRSTCNT|CLRCHN;
+ jmp mesgin_done;
+
+/*
+ * Index into our Busy Target table. SINDEX and DINDEX are modified
+ * upon return. SCBPTR may be modified by this action.
+ */
+set_busy_target:
+ shr DINDEX, 4, SINDEX;
+ if ((ahc->flags & AHC_SCB_BTT) != 0) {
+ mov SCBPTR, SAVED_LUN;
+ add DINDEX, SCB_64_BTT;
+ } else {
+ add DINDEX, BUSY_TARGETS;
+ }
+ mov DINDIR, ARG_1 ret;
+
+/*
+ * Identify message? For a reconnecting target, this tells us the lun
+ * that the reconnection is for - find the correct SCB and switch to it,
+ * clearing the "disconnected" bit so we don't "find" it by accident later.
+ */
+mesgin_identify:
+ /*
+ * Determine whether a target is using tagged or non-tagged
+ * transactions by first looking at the transaction stored in
+ * the busy target array. If there is no untagged transaction
+ * for this target or the transaction is for a different lun, then
+ * this must be a tagged transaction.
+ */
+ shr SINDEX, 4, SAVED_SCSIID;
+ and SAVED_LUN, MSG_IDENTIFY_LUNMASK, A;
+ if ((ahc->flags & AHC_SCB_BTT) != 0) {
+ add SINDEX, SCB_64_BTT;
+ mov SCBPTR, SAVED_LUN;
+ if ((ahc->flags & AHC_SEQUENCER_DEBUG) != 0) {
+ add NONE, -SCB_64_BTT, SINDEX;
+ jc . + 2;
+ mvi INTSTAT, OUT_OF_RANGE;
+ nop;
+ add NONE, -(SCB_64_BTT + 16), SINDEX;
+ jnc . + 2;
+ mvi INTSTAT, OUT_OF_RANGE;
+ nop;
+ }
+ } else {
+ add SINDEX, BUSY_TARGETS;
+ if ((ahc->flags & AHC_SEQUENCER_DEBUG) != 0) {
+ add NONE, -BUSY_TARGETS, SINDEX;
+ jc . + 2;
+ mvi INTSTAT, OUT_OF_RANGE;
+ nop;
+ add NONE, -(BUSY_TARGETS + 16), SINDEX;
+ jnc . + 2;
+ mvi INTSTAT, OUT_OF_RANGE;
+ nop;
+ }
+ }
+ mov ARG_1, SINDIR;
+ cmp ARG_1, SCB_LIST_NULL je snoop_tag;
+ if ((ahc->flags & AHC_PAGESCBS) != 0) {
+ mov ARG_1 call findSCB;
+ } else {
+ mov SCBPTR, ARG_1;
+ }
+ if ((ahc->flags & AHC_SCB_BTT) != 0) {
+ jmp setup_SCB_id_lun_okay;
+ } else {
+ /*
+ * We only allow one untagged command per-target
+ * at a time. So, if the lun doesn't match, look
+ * for a tag message.
+ */
+ and A, LID, SCB_LUN;
+ cmp SAVED_LUN, A je setup_SCB_id_lun_okay;
+ if ((ahc->flags & AHC_PAGESCBS) != 0) {
+ /*
+ * findSCB removes the SCB from the
+ * disconnected list, so we must replace
+ * it there should this SCB be for another
+ * lun.
+ */
+ call cleanup_scb;
+ }
+ }
+
+/*
+ * Here we "snoop" the bus looking for a SIMPLE QUEUE TAG message.
+ * If we get one, we use the tag returned to find the proper
+ * SCB. With SCB paging, we must search for non-tagged
+ * transactions since the SCB may exist in any slot. If we're not
+ * using SCB paging, we can use the tag as the direct index to the
+ * SCB.
+ */
+snoop_tag:
+ if ((ahc->flags & AHC_SEQUENCER_DEBUG) != 0) {
+ or SEQ_FLAGS, 0x80;
+ }
+ mov NONE,SCSIDATL; /* ACK Identify MSG */
+ call phase_lock;
+ if ((ahc->flags & AHC_SEQUENCER_DEBUG) != 0) {
+ or SEQ_FLAGS, 0x1;
+ }
+ cmp LASTPHASE, P_MESGIN jne not_found;
+ if ((ahc->flags & AHC_SEQUENCER_DEBUG) != 0) {
+ or SEQ_FLAGS, 0x2;
+ }
+ cmp SCSIBUSL,MSG_SIMPLE_Q_TAG jne not_found;
+get_tag:
+ if ((ahc->flags & AHC_PAGESCBS) != 0) {
+ mvi ARG_1 call inb_next; /* tag value */
+ mov ARG_1 call findSCB;
+ } else {
+ mvi ARG_1 call inb_next; /* tag value */
+ mov SCBPTR, ARG_1;
+ }
+
+/*
+ * Ensure that the SCB the tag points to is for
+ * an SCB transaction to the reconnecting target.
+ */
+setup_SCB:
+ if ((ahc->flags & AHC_SEQUENCER_DEBUG) != 0) {
+ or SEQ_FLAGS, 0x4;
+ }
+ mov A, SCB_SCSIID;
+ cmp SAVED_SCSIID, A jne not_found_cleanup_scb;
+ if ((ahc->flags & AHC_SEQUENCER_DEBUG) != 0) {
+ or SEQ_FLAGS, 0x8;
+ }
+setup_SCB_id_okay:
+ and A, LID, SCB_LUN;
+ cmp SAVED_LUN, A jne not_found_cleanup_scb;
+setup_SCB_id_lun_okay:
+ if ((ahc->flags & AHC_SEQUENCER_DEBUG) != 0) {
+ or SEQ_FLAGS, 0x10;
+ }
+ test SCB_CONTROL,DISCONNECTED jz not_found_cleanup_scb;
+ and SCB_CONTROL,~DISCONNECTED;
+ test SCB_CONTROL, TAG_ENB jnz setup_SCB_tagged;
+ if ((ahc->flags & AHC_SCB_BTT) != 0) {
+ mov A, SCBPTR;
+ }
+ mvi ARG_1, SCB_LIST_NULL;
+ mov SAVED_SCSIID call set_busy_target;
+ if ((ahc->flags & AHC_SCB_BTT) != 0) {
+ mov SCBPTR, A;
+ }
+setup_SCB_tagged:
+ clr SEQ_FLAGS; /* make note of IDENTIFY */
+ call set_transfer_settings;
+ /* See if the host wants to send a message upon reconnection */
+ test SCB_CONTROL, MK_MESSAGE jz mesgin_done;
+ mvi HOST_MSG call mk_mesg;
+ jmp mesgin_done;
+
+not_found_cleanup_scb:
+ if ((ahc->flags & AHC_PAGESCBS) != 0) {
+ call cleanup_scb;
+ }
+not_found:
+ mvi NO_MATCH call set_seqint;
+ jmp mesgin_done;
+
+mk_mesg:
+ if ((ahc->features & AHC_DT) == 0) {
+ or SCSISIGO, ATNO, LASTPHASE;
+ } else {
+ mvi SCSISIGO, ATNO;
+ }
+ mov MSG_OUT,SINDEX ret;
+
+/*
+ * Functions to read data in Automatic PIO mode.
+ *
+ * According to Adaptec's documentation, an ACK is not sent on input from
+ * the target until SCSIDATL is read from. So we wait until SCSIDATL is
+ * latched (the usual way), then read the data byte directly off the bus
+ * using SCSIBUSL. When we have pulled the ATN line, or we just want to
+ * acknowledge the byte, then we do a dummy read from SCISDATL. The SCSI
+ * spec guarantees that the target will hold the data byte on the bus until
+ * we send our ACK.
+ *
+ * The assumption here is that these are called in a particular sequence,
+ * and that REQ is already set when inb_first is called. inb_{first,next}
+ * use the same calling convention as inb.
+ */
+inb_next_wait_perr:
+ mvi PERR_DETECTED call set_seqint;
+ jmp inb_next_wait;
+inb_next:
+ mov NONE,SCSIDATL; /*dummy read from latch to ACK*/
+inb_next_wait:
+ /*
+ * If there is a parity error, wait for the kernel to
+ * see the interrupt and prepare our message response
+ * before continuing.
+ */
+ test SSTAT1, REQINIT jz inb_next_wait;
+ test SSTAT1, SCSIPERR jnz inb_next_wait_perr;
+inb_next_check_phase:
+ and LASTPHASE, PHASE_MASK, SCSISIGI;
+ cmp LASTPHASE, P_MESGIN jne mesgin_phasemis;
+inb_first:
+ mov DINDEX,SINDEX;
+ mov DINDIR,SCSIBUSL ret; /*read byte directly from bus*/
+inb_last:
+ mov NONE,SCSIDATL ret; /*dummy read from latch to ACK*/
+}
+
+if ((ahc->flags & AHC_TARGETROLE) != 0) {
+/*
+ * Change to a new phase. If we are changing the state of the I/O signal,
+ * from out to in, wait an additional data release delay before continuing.
+ */
+change_phase:
+ /* Wait for preceding I/O session to complete. */
+ test SCSISIGI, ACKI jnz .;
+
+ /* Change the phase */
+ and DINDEX, IOI, SCSISIGI;
+ mov SCSISIGO, SINDEX;
+ and A, IOI, SINDEX;
+
+ /*
+ * If the data direction has changed, from
+ * out (initiator driving) to in (target driving),
+ * we must wait at least a data release delay plus
+ * the normal bus settle delay. [SCSI III SPI 10.11.0]
+ */
+ cmp DINDEX, A je change_phase_wait;
+ test SINDEX, IOI jz change_phase_wait;
+ call change_phase_wait;
+change_phase_wait:
+ nop;
+ nop;
+ nop;
+ nop ret;
+
+/*
+ * Send a byte to an initiator in Automatic PIO mode.
+ */
+target_outb:
+ or SXFRCTL0, SPIOEN;
+ test SSTAT0, SPIORDY jz .;
+ mov SCSIDATL, SINDEX;
+ test SSTAT0, SPIORDY jz .;
+ and SXFRCTL0, ~SPIOEN ret;
+}
+
+/*
+ * Locate a disconnected SCB by SCBID. Upon return, SCBPTR and SINDEX will
+ * be set to the position of the SCB. If the SCB cannot be found locally,
+ * it will be paged in from host memory. RETURN_2 stores the address of the
+ * preceding SCB in the disconnected list which can be used to speed up
+ * removal of the found SCB from the disconnected list.
+ */
+if ((ahc->flags & AHC_PAGESCBS) != 0) {
+BEGIN_CRITICAL;
+findSCB:
+ mov A, SINDEX; /* Tag passed in SINDEX */
+ cmp DISCONNECTED_SCBH, SCB_LIST_NULL je findSCB_notFound;
+ mov SCBPTR, DISCONNECTED_SCBH; /* Initialize SCBPTR */
+ mvi ARG_2, SCB_LIST_NULL; /* Head of list */
+ jmp findSCB_loop;
+findSCB_next:
+ cmp SCB_NEXT, SCB_LIST_NULL je findSCB_notFound;
+ mov ARG_2, SCBPTR;
+ mov SCBPTR,SCB_NEXT;
+findSCB_loop:
+ cmp SCB_TAG, A jne findSCB_next;
+rem_scb_from_disc_list:
+ cmp ARG_2, SCB_LIST_NULL je rHead;
+ mov DINDEX, SCB_NEXT;
+ mov SINDEX, SCBPTR;
+ mov SCBPTR, ARG_2;
+ mov SCB_NEXT, DINDEX;
+ mov SCBPTR, SINDEX ret;
+rHead:
+ mov DISCONNECTED_SCBH,SCB_NEXT ret;
+END_CRITICAL;
+findSCB_notFound:
+ /*
+ * We didn't find it. Page in the SCB.
+ */
+ mov ARG_1, A; /* Save tag */
+ mov ALLZEROS call get_free_or_disc_scb;
+ mvi DMAPARAMS, HDMAEN|DIRECTION|FIFORESET;
+ mov ARG_1 jmp dma_scb;
+}
+
+/*
+ * Prepare the hardware to post a byte to host memory given an
+ * index of (A + (256 * SINDEX)) and a base address of SHARED_DATA_ADDR.
+ */
+post_byte_setup:
+ mov ARG_2, SINDEX;
+ if ((ahc->features & AHC_CMD_CHAN) != 0) {
+ mvi DINDEX, CCHADDR;
+ mvi SHARED_DATA_ADDR call set_1byte_addr;
+ mvi CCHCNT, 1;
+ mvi CCSCBCTL, CCSCBRESET ret;
+ } else {
+ mvi DINDEX, HADDR;
+ mvi SHARED_DATA_ADDR call set_1byte_addr;
+ mvi 1 call set_hcnt;
+ mvi DFCNTRL, FIFORESET ret;
+ }
+
+post_byte:
+ if ((ahc->features & AHC_CMD_CHAN) != 0) {
+ bmov CCSCBRAM, SINDEX, 1;
+ or CCSCBCTL, CCSCBEN|CCSCBRESET;
+ test CCSCBCTL, CCSCBDONE jz .;
+ clr CCSCBCTL ret;
+ } else {
+ mov DFDAT, SINDEX;
+ or DFCNTRL, HDMAEN|FIFOFLUSH;
+ jmp dma_finish;
+ }
+
+phase_lock_perr:
+ mvi PERR_DETECTED call set_seqint;
+phase_lock:
+ /*
+ * If there is a parity error, wait for the kernel to
+ * see the interrupt and prepare our message response
+ * before continuing.
+ */
+ test SSTAT1, REQINIT jz phase_lock;
+ test SSTAT1, SCSIPERR jnz phase_lock_perr;
+phase_lock_latch_phase:
+ if ((ahc->features & AHC_DT) == 0) {
+ and SCSISIGO, PHASE_MASK, SCSISIGI;
+ }
+ and LASTPHASE, PHASE_MASK, SCSISIGI ret;
+
+if ((ahc->features & AHC_CMD_CHAN) == 0) {
+set_hcnt:
+ mov HCNT[0], SINDEX;
+clear_hcnt:
+ clr HCNT[1];
+ clr HCNT[2] ret;
+
+set_stcnt_from_hcnt:
+ mov STCNT[0], HCNT[0];
+ mov STCNT[1], HCNT[1];
+ mov STCNT[2], HCNT[2] ret;
+
+bcopy_8:
+ mov DINDIR, SINDIR;
+bcopy_7:
+ mov DINDIR, SINDIR;
+ mov DINDIR, SINDIR;
+bcopy_5:
+ mov DINDIR, SINDIR;
+bcopy_4:
+ mov DINDIR, SINDIR;
+bcopy_3:
+ mov DINDIR, SINDIR;
+ mov DINDIR, SINDIR;
+ mov DINDIR, SINDIR ret;
+}
+
+if ((ahc->flags & AHC_TARGETROLE) != 0) {
+/*
+ * Setup addr assuming that A is an index into
+ * an array of 32byte objects, SINDEX contains
+ * the base address of that array, and DINDEX
+ * contains the base address of the location
+ * to store the indexed address.
+ */
+set_32byte_addr:
+ shr ARG_2, 3, A;
+ shl A, 5;
+ jmp set_1byte_addr;
+}
+
+/*
+ * Setup addr assuming that A is an index into
+ * an array of 64byte objects, SINDEX contains
+ * the base address of that array, and DINDEX
+ * contains the base address of the location
+ * to store the indexed address.
+ */
+set_64byte_addr:
+ shr ARG_2, 2, A;
+ shl A, 6;
+
+/*
+ * Setup addr assuming that A + (ARG_2 * 256) is an
+ * index into an array of 1byte objects, SINDEX contains
+ * the base address of that array, and DINDEX contains
+ * the base address of the location to store the computed
+ * address.
+ */
+set_1byte_addr:
+ add DINDIR, A, SINDIR;
+ mov A, ARG_2;
+ adc DINDIR, A, SINDIR;
+ clr A;
+ adc DINDIR, A, SINDIR;
+ adc DINDIR, A, SINDIR ret;
+
+/*
+ * Either post or fetch an SCB from host memory based on the
+ * DIRECTION bit in DMAPARAMS. The host SCB index is in SINDEX.
+ */
+dma_scb:
+ mov A, SINDEX;
+ if ((ahc->features & AHC_CMD_CHAN) != 0) {
+ mvi DINDEX, CCHADDR;
+ mvi HSCB_ADDR call set_64byte_addr;
+ mov CCSCBPTR, SCBPTR;
+ test DMAPARAMS, DIRECTION jz dma_scb_tohost;
+ if ((ahc->flags & AHC_SCB_BTT) != 0) {
+ mvi CCHCNT, SCB_DOWNLOAD_SIZE_64;
+ } else {
+ mvi CCHCNT, SCB_DOWNLOAD_SIZE;
+ }
+ mvi CCSCBCTL, CCARREN|CCSCBEN|CCSCBDIR|CCSCBRESET;
+ cmp CCSCBCTL, CCSCBDONE|ARRDONE|CCARREN|CCSCBEN|CCSCBDIR jne .;
+ jmp dma_scb_finish;
+dma_scb_tohost:
+ mvi CCHCNT, SCB_UPLOAD_SIZE;
+ if ((ahc->features & AHC_ULTRA2) == 0) {
+ mvi CCSCBCTL, CCSCBRESET;
+ bmov CCSCBRAM, SCB_BASE, SCB_UPLOAD_SIZE;
+ or CCSCBCTL, CCSCBEN|CCSCBRESET;
+ test CCSCBCTL, CCSCBDONE jz .;
+ } else if ((ahc->bugs & AHC_SCBCHAN_UPLOAD_BUG) != 0) {
+ mvi CCSCBCTL, CCARREN|CCSCBRESET;
+ cmp CCSCBCTL, ARRDONE|CCARREN jne .;
+ mvi CCHCNT, SCB_UPLOAD_SIZE;
+ mvi CCSCBCTL, CCSCBEN|CCSCBRESET;
+ cmp CCSCBCTL, CCSCBDONE|CCSCBEN jne .;
+ } else {
+ mvi CCSCBCTL, CCARREN|CCSCBEN|CCSCBRESET;
+ cmp CCSCBCTL, CCSCBDONE|ARRDONE|CCARREN|CCSCBEN jne .;
+ }
+dma_scb_finish:
+ clr CCSCBCTL;
+ test CCSCBCTL, CCARREN|CCSCBEN jnz .;
+ ret;
+ } else {
+ mvi DINDEX, HADDR;
+ mvi HSCB_ADDR call set_64byte_addr;
+ mvi SCB_DOWNLOAD_SIZE call set_hcnt;
+ mov DFCNTRL, DMAPARAMS;
+ test DMAPARAMS, DIRECTION jnz dma_scb_fromhost;
+ /* Fill it with the SCB data */
+copy_scb_tofifo:
+ mvi SINDEX, SCB_BASE;
+ add A, SCB_DOWNLOAD_SIZE, SINDEX;
+copy_scb_tofifo_loop:
+ call copy_to_fifo_8;
+ cmp SINDEX, A jne copy_scb_tofifo_loop;
+ or DFCNTRL, HDMAEN|FIFOFLUSH;
+ jmp dma_finish;
+dma_scb_fromhost:
+ mvi DINDEX, SCB_BASE;
+ if ((ahc->bugs & AHC_PCI_2_1_RETRY_BUG) != 0) {
+ /*
+ * The PCI module will only issue a PCI
+ * retry if the data FIFO is empty. If the
+ * host disconnects in the middle of a
+ * transfer, we must empty the fifo of all
+ * available data to force the chip to
+ * continue the transfer. This does not
+ * happen for SCSI transfers as the SCSI module
+ * will drain the FIFO as data are made available.
+ * When the hang occurs, we know that a multiple
+ * of 8 bytes is in the FIFO because the PCI
+ * module has an 8 byte input latch that only
+ * dumps to the FIFO when HCNT == 0 or the
+ * latch is full.
+ */
+ clr A;
+ /* Wait for at least 8 bytes of data to arrive. */
+dma_scb_hang_fifo:
+ test DFSTATUS, FIFOQWDEMP jnz dma_scb_hang_fifo;
+dma_scb_hang_wait:
+ test DFSTATUS, MREQPEND jnz dma_scb_hang_wait;
+ test DFSTATUS, HDONE jnz dma_scb_hang_dma_done;
+ test DFSTATUS, HDONE jnz dma_scb_hang_dma_done;
+ test DFSTATUS, HDONE jnz dma_scb_hang_dma_done;
+ /*
+ * The PCI module no longer intends to perform
+ * a PCI transaction. Drain the fifo.
+ */
+dma_scb_hang_dma_drain_fifo:
+ not A, HCNT;
+ add A, SCB_DOWNLOAD_SIZE+SCB_BASE+1;
+ and A, ~0x7;
+ mov DINDIR,DFDAT;
+ cmp DINDEX, A jne . - 1;
+ cmp DINDEX, SCB_DOWNLOAD_SIZE+SCB_BASE
+ je dma_finish_nowait;
+ /* Restore A as the lines left to transfer. */
+ add A, -SCB_BASE, DINDEX;
+ shr A, 3;
+ jmp dma_scb_hang_fifo;
+dma_scb_hang_dma_done:
+ and DFCNTRL, ~HDMAEN;
+ test DFCNTRL, HDMAEN jnz .;
+ add SEQADDR0, A;
+ } else {
+ call dma_finish;
+ }
+ call dfdat_in_8;
+ call dfdat_in_8;
+ call dfdat_in_8;
+dfdat_in_8:
+ mov DINDIR,DFDAT;
+dfdat_in_7:
+ mov DINDIR,DFDAT;
+ mov DINDIR,DFDAT;
+ mov DINDIR,DFDAT;
+ mov DINDIR,DFDAT;
+ mov DINDIR,DFDAT;
+dfdat_in_2:
+ mov DINDIR,DFDAT;
+ mov DINDIR,DFDAT ret;
+ }
+
+copy_to_fifo_8:
+ mov DFDAT,SINDIR;
+ mov DFDAT,SINDIR;
+copy_to_fifo_6:
+ mov DFDAT,SINDIR;
+copy_to_fifo_5:
+ mov DFDAT,SINDIR;
+copy_to_fifo_4:
+ mov DFDAT,SINDIR;
+ mov DFDAT,SINDIR;
+ mov DFDAT,SINDIR;
+ mov DFDAT,SINDIR ret;
+
+/*
+ * Wait for DMA from host memory to data FIFO to complete, then disable
+ * DMA and wait for it to acknowledge that it's off.
+ */
+dma_finish:
+ test DFSTATUS,HDONE jz dma_finish;
+dma_finish_nowait:
+ /* Turn off DMA */
+ and DFCNTRL, ~HDMAEN;
+ test DFCNTRL, HDMAEN jnz .;
+ ret;
+
+/*
+ * Restore an SCB that failed to match an incoming reselection
+ * to the correct/safe state. If the SCB is for a disconnected
+ * transaction, it must be returned to the disconnected list.
+ * If it is not in the disconnected state, it must be free.
+ */
+cleanup_scb:
+ if ((ahc->flags & AHC_PAGESCBS) != 0) {
+ test SCB_CONTROL,DISCONNECTED jnz add_scb_to_disc_list;
+ }
+add_scb_to_free_list:
+ if ((ahc->flags & AHC_PAGESCBS) != 0) {
+BEGIN_CRITICAL;
+ mov SCB_NEXT, FREE_SCBH;
+ mvi SCB_TAG, SCB_LIST_NULL;
+ mov FREE_SCBH, SCBPTR ret;
+END_CRITICAL;
+ } else {
+ mvi SCB_TAG, SCB_LIST_NULL ret;
+ }
+
+if ((ahc->flags & AHC_39BIT_ADDRESSING) != 0) {
+set_hhaddr:
+ or DSCOMMAND1, HADDLDSEL0;
+ and HADDR, SG_HIGH_ADDR_BITS, SINDEX;
+ and DSCOMMAND1, ~HADDLDSEL0 ret;
+}
+
+if ((ahc->flags & AHC_PAGESCBS) != 0) {
+get_free_or_disc_scb:
+BEGIN_CRITICAL;
+ cmp FREE_SCBH, SCB_LIST_NULL jne dequeue_free_scb;
+ cmp DISCONNECTED_SCBH, SCB_LIST_NULL jne dequeue_disc_scb;
+return_error:
+ mvi NO_FREE_SCB call set_seqint;
+ mvi SINDEX, SCB_LIST_NULL ret;
+dequeue_disc_scb:
+ mov SCBPTR, DISCONNECTED_SCBH;
+ mov DISCONNECTED_SCBH, SCB_NEXT;
+END_CRITICAL;
+ mvi DMAPARAMS, FIFORESET;
+ mov SCB_TAG jmp dma_scb;
+BEGIN_CRITICAL;
+dequeue_free_scb:
+ mov SCBPTR, FREE_SCBH;
+ mov FREE_SCBH, SCB_NEXT ret;
+END_CRITICAL;
+
+add_scb_to_disc_list:
+/*
+ * Link this SCB into the DISCONNECTED list. This list holds the
+ * candidates for paging out an SCB if one is needed for a new command.
+ * Modifying the disconnected list is a critical(pause dissabled) section.
+ */
+BEGIN_CRITICAL;
+ mov SCB_NEXT, DISCONNECTED_SCBH;
+ mov DISCONNECTED_SCBH, SCBPTR ret;
+END_CRITICAL;
+}
+set_seqint:
+ mov INTSTAT, SINDEX;
+ nop;
+return:
+ ret;
diff --git a/drivers/scsi/aic7xxx/aic7xxx_93cx6.c b/drivers/scsi/aic7xxx/aic7xxx_93cx6.c
new file mode 100644
index 000000000..9e85a7ef9
--- /dev/null
+++ b/drivers/scsi/aic7xxx/aic7xxx_93cx6.c
@@ -0,0 +1,324 @@
+/*
+ * Interface for the 93C66/56/46/26/06 serial eeprom parts.
+ *
+ * Copyright (c) 1995, 1996 Daniel M. Eischen
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions, and the following disclaimer,
+ * without modification.
+ * 2. The name of the author may not be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * Alternatively, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL").
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR
+ * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * $Id: //depot/aic7xxx/aic7xxx/aic7xxx_93cx6.c#19 $
+ */
+
+/*
+ * The instruction set of the 93C66/56/46/26/06 chips are as follows:
+ *
+ * Start OP *
+ * Function Bit Code Address** Data Description
+ * -------------------------------------------------------------------
+ * READ 1 10 A5 - A0 Reads data stored in memory,
+ * starting at specified address
+ * EWEN 1 00 11XXXX Write enable must precede
+ * all programming modes
+ * ERASE 1 11 A5 - A0 Erase register A5A4A3A2A1A0
+ * WRITE 1 01 A5 - A0 D15 - D0 Writes register
+ * ERAL 1 00 10XXXX Erase all registers
+ * WRAL 1 00 01XXXX D15 - D0 Writes to all registers
+ * EWDS 1 00 00XXXX Disables all programming
+ * instructions
+ * *Note: A value of X for address is a don't care condition.
+ * **Note: There are 8 address bits for the 93C56/66 chips unlike
+ * the 93C46/26/06 chips which have 6 address bits.
+ *
+ * The 93C46 has a four wire interface: clock, chip select, data in, and
+ * data out. In order to perform one of the above functions, you need
+ * to enable the chip select for a clock period (typically a minimum of
+ * 1 usec, with the clock high and low a minimum of 750 and 250 nsec
+ * respectively). While the chip select remains high, you can clock in
+ * the instructions (above) starting with the start bit, followed by the
+ * OP code, Address, and Data (if needed). For the READ instruction, the
+ * requested 16-bit register contents is read from the data out line but
+ * is preceded by an initial zero (leading 0, followed by 16-bits, MSB
+ * first). The clock cycling from low to high initiates the next data
+ * bit to be sent from the chip.
+ */
+
+#ifdef __linux__
+#include "aic7xxx_osm.h"
+#include "aic7xxx_inline.h"
+#include "aic7xxx_93cx6.h"
+#else
+#include <dev/aic7xxx/aic7xxx_osm.h>
+#include <dev/aic7xxx/aic7xxx_inline.h>
+#include <dev/aic7xxx/aic7xxx_93cx6.h>
+#endif
+
+/*
+ * Right now, we only have to read the SEEPROM. But we make it easier to
+ * add other 93Cx6 functions.
+ */
+struct seeprom_cmd {
+ uint8_t len;
+ uint8_t bits[11];
+};
+
+/* Short opcodes for the c46 */
+static const struct seeprom_cmd seeprom_ewen = {9, {1, 0, 0, 1, 1, 0, 0, 0, 0}};
+static const struct seeprom_cmd seeprom_ewds = {9, {1, 0, 0, 0, 0, 0, 0, 0, 0}};
+
+/* Long opcodes for the C56/C66 */
+static const struct seeprom_cmd seeprom_long_ewen = {11, {1, 0, 0, 1, 1, 0, 0, 0, 0}};
+static const struct seeprom_cmd seeprom_long_ewds = {11, {1, 0, 0, 0, 0, 0, 0, 0, 0}};
+
+/* Common opcodes */
+static const struct seeprom_cmd seeprom_write = {3, {1, 0, 1}};
+static const struct seeprom_cmd seeprom_read = {3, {1, 1, 0}};
+
+/*
+ * Wait for the SEERDY to go high; about 800 ns.
+ */
+#define CLOCK_PULSE(sd, rdy) \
+ while ((SEEPROM_STATUS_INB(sd) & rdy) == 0) { \
+ ; /* Do nothing */ \
+ } \
+ (void)SEEPROM_INB(sd); /* Clear clock */
+
+/*
+ * Send a START condition and the given command
+ */
+static void
+send_seeprom_cmd(struct seeprom_descriptor *sd, const struct seeprom_cmd *cmd)
+{
+ uint8_t temp;
+ int i = 0;
+
+ /* Send chip select for one clock cycle. */
+ temp = sd->sd_MS ^ sd->sd_CS;
+ SEEPROM_OUTB(sd, temp ^ sd->sd_CK);
+ CLOCK_PULSE(sd, sd->sd_RDY);
+
+ for (i = 0; i < cmd->len; i++) {
+ if (cmd->bits[i] != 0)
+ temp ^= sd->sd_DO;
+ SEEPROM_OUTB(sd, temp);
+ CLOCK_PULSE(sd, sd->sd_RDY);
+ SEEPROM_OUTB(sd, temp ^ sd->sd_CK);
+ CLOCK_PULSE(sd, sd->sd_RDY);
+ if (cmd->bits[i] != 0)
+ temp ^= sd->sd_DO;
+ }
+}
+
+/*
+ * Clear CS put the chip in the reset state, where it can wait for new commands.
+ */
+static void
+reset_seeprom(struct seeprom_descriptor *sd)
+{
+ uint8_t temp;
+
+ temp = sd->sd_MS;
+ SEEPROM_OUTB(sd, temp);
+ CLOCK_PULSE(sd, sd->sd_RDY);
+ SEEPROM_OUTB(sd, temp ^ sd->sd_CK);
+ CLOCK_PULSE(sd, sd->sd_RDY);
+ SEEPROM_OUTB(sd, temp);
+ CLOCK_PULSE(sd, sd->sd_RDY);
+}
+
+/*
+ * Read the serial EEPROM and returns 1 if successful and 0 if
+ * not successful.
+ */
+int
+ahc_read_seeprom(struct seeprom_descriptor *sd, uint16_t *buf,
+ u_int start_addr, u_int count)
+{
+ int i = 0;
+ u_int k = 0;
+ uint16_t v;
+ uint8_t temp;
+
+ /*
+ * Read the requested registers of the seeprom. The loop
+ * will range from 0 to count-1.
+ */
+ for (k = start_addr; k < count + start_addr; k++) {
+ /*
+ * Now we're ready to send the read command followed by the
+ * address of the 16-bit register we want to read.
+ */
+ send_seeprom_cmd(sd, &seeprom_read);
+
+ /* Send the 6 or 8 bit address (MSB first, LSB last). */
+ temp = sd->sd_MS ^ sd->sd_CS;
+ for (i = (sd->sd_chip - 1); i >= 0; i--) {
+ if ((k & (1 << i)) != 0)
+ temp ^= sd->sd_DO;
+ SEEPROM_OUTB(sd, temp);
+ CLOCK_PULSE(sd, sd->sd_RDY);
+ SEEPROM_OUTB(sd, temp ^ sd->sd_CK);
+ CLOCK_PULSE(sd, sd->sd_RDY);
+ if ((k & (1 << i)) != 0)
+ temp ^= sd->sd_DO;
+ }
+
+ /*
+ * Now read the 16 bit register. An initial 0 precedes the
+ * register contents which begins with bit 15 (MSB) and ends
+ * with bit 0 (LSB). The initial 0 will be shifted off the
+ * top of our word as we let the loop run from 0 to 16.
+ */
+ v = 0;
+ for (i = 16; i >= 0; i--) {
+ SEEPROM_OUTB(sd, temp);
+ CLOCK_PULSE(sd, sd->sd_RDY);
+ v <<= 1;
+ if (SEEPROM_DATA_INB(sd) & sd->sd_DI)
+ v |= 1;
+ SEEPROM_OUTB(sd, temp ^ sd->sd_CK);
+ CLOCK_PULSE(sd, sd->sd_RDY);
+ }
+
+ buf[k - start_addr] = v;
+
+ /* Reset the chip select for the next command cycle. */
+ reset_seeprom(sd);
+ }
+#ifdef AHC_DUMP_EEPROM
+ printk("\nSerial EEPROM:\n\t");
+ for (k = 0; k < count; k = k + 1) {
+ if (((k % 8) == 0) && (k != 0)) {
+ printk(KERN_CONT "\n\t");
+ }
+ printk(KERN_CONT " 0x%x", buf[k]);
+ }
+ printk(KERN_CONT "\n");
+#endif
+ return (1);
+}
+
+/*
+ * Write the serial EEPROM and return 1 if successful and 0 if
+ * not successful.
+ */
+int
+ahc_write_seeprom(struct seeprom_descriptor *sd, uint16_t *buf,
+ u_int start_addr, u_int count)
+{
+ const struct seeprom_cmd *ewen, *ewds;
+ uint16_t v;
+ uint8_t temp;
+ int i, k;
+
+ /* Place the chip into write-enable mode */
+ if (sd->sd_chip == C46) {
+ ewen = &seeprom_ewen;
+ ewds = &seeprom_ewds;
+ } else if (sd->sd_chip == C56_66) {
+ ewen = &seeprom_long_ewen;
+ ewds = &seeprom_long_ewds;
+ } else {
+ printk("ahc_write_seeprom: unsupported seeprom type %d\n",
+ sd->sd_chip);
+ return (0);
+ }
+
+ send_seeprom_cmd(sd, ewen);
+ reset_seeprom(sd);
+
+ /* Write all requested data out to the seeprom. */
+ temp = sd->sd_MS ^ sd->sd_CS;
+ for (k = start_addr; k < count + start_addr; k++) {
+ /* Send the write command */
+ send_seeprom_cmd(sd, &seeprom_write);
+
+ /* Send the 6 or 8 bit address (MSB first). */
+ for (i = (sd->sd_chip - 1); i >= 0; i--) {
+ if ((k & (1 << i)) != 0)
+ temp ^= sd->sd_DO;
+ SEEPROM_OUTB(sd, temp);
+ CLOCK_PULSE(sd, sd->sd_RDY);
+ SEEPROM_OUTB(sd, temp ^ sd->sd_CK);
+ CLOCK_PULSE(sd, sd->sd_RDY);
+ if ((k & (1 << i)) != 0)
+ temp ^= sd->sd_DO;
+ }
+
+ /* Write the 16 bit value, MSB first */
+ v = buf[k - start_addr];
+ for (i = 15; i >= 0; i--) {
+ if ((v & (1 << i)) != 0)
+ temp ^= sd->sd_DO;
+ SEEPROM_OUTB(sd, temp);
+ CLOCK_PULSE(sd, sd->sd_RDY);
+ SEEPROM_OUTB(sd, temp ^ sd->sd_CK);
+ CLOCK_PULSE(sd, sd->sd_RDY);
+ if ((v & (1 << i)) != 0)
+ temp ^= sd->sd_DO;
+ }
+
+ /* Wait for the chip to complete the write */
+ temp = sd->sd_MS;
+ SEEPROM_OUTB(sd, temp);
+ CLOCK_PULSE(sd, sd->sd_RDY);
+ temp = sd->sd_MS ^ sd->sd_CS;
+ do {
+ SEEPROM_OUTB(sd, temp);
+ CLOCK_PULSE(sd, sd->sd_RDY);
+ SEEPROM_OUTB(sd, temp ^ sd->sd_CK);
+ CLOCK_PULSE(sd, sd->sd_RDY);
+ } while ((SEEPROM_DATA_INB(sd) & sd->sd_DI) == 0);
+
+ reset_seeprom(sd);
+ }
+
+ /* Put the chip back into write-protect mode */
+ send_seeprom_cmd(sd, ewds);
+ reset_seeprom(sd);
+
+ return (1);
+}
+
+int
+ahc_verify_cksum(struct seeprom_config *sc)
+{
+ int i;
+ int maxaddr;
+ uint32_t checksum;
+ uint16_t *scarray;
+
+ maxaddr = (sizeof(*sc)/2) - 1;
+ checksum = 0;
+ scarray = (uint16_t *)sc;
+
+ for (i = 0; i < maxaddr; i++)
+ checksum = checksum + scarray[i];
+ if (checksum == 0
+ || (checksum & 0xFFFF) != sc->checksum) {
+ return (0);
+ } else {
+ return(1);
+ }
+}
diff --git a/drivers/scsi/aic7xxx/aic7xxx_93cx6.h b/drivers/scsi/aic7xxx/aic7xxx_93cx6.h
new file mode 100644
index 000000000..859c43ccd
--- /dev/null
+++ b/drivers/scsi/aic7xxx/aic7xxx_93cx6.h
@@ -0,0 +1,102 @@
+/*
+ * Interface to the 93C46/56 serial EEPROM that is used to store BIOS
+ * settings for the aic7xxx based adaptec SCSI controllers. It can
+ * also be used for 93C26 and 93C06 serial EEPROMS.
+ *
+ * Copyright (c) 1994, 1995, 2000 Justin T. Gibbs.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions, and the following disclaimer,
+ * without modification.
+ * 2. Redistributions in binary form must reproduce at minimum a disclaimer
+ * substantially similar to the "NO WARRANTY" disclaimer below
+ * ("Disclaimer") and any redistribution must be conditioned upon
+ * including a substantially similar Disclaimer requirement for further
+ * binary redistribution.
+ * 3. Neither the names of the above-listed copyright holders nor the names
+ * of any contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * Alternatively, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") version 2 as published by the Free
+ * Software Foundation.
+ *
+ * NO WARRANTY
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
+ * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
+ * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGES.
+ *
+ * $Id: //depot/aic7xxx/aic7xxx/aic7xxx_93cx6.h#12 $
+ *
+ * $FreeBSD$
+ */
+#ifndef _AIC7XXX_93CX6_H_
+#define _AIC7XXX_93CX6_H_
+
+typedef enum {
+ C46 = 6,
+ C56_66 = 8
+} seeprom_chip_t;
+
+struct seeprom_descriptor {
+ struct ahc_softc *sd_ahc;
+ u_int sd_control_offset;
+ u_int sd_status_offset;
+ u_int sd_dataout_offset;
+ seeprom_chip_t sd_chip;
+ uint16_t sd_MS;
+ uint16_t sd_RDY;
+ uint16_t sd_CS;
+ uint16_t sd_CK;
+ uint16_t sd_DO;
+ uint16_t sd_DI;
+};
+
+/*
+ * This function will read count 16-bit words from the serial EEPROM and
+ * return their value in buf. The port address of the aic7xxx serial EEPROM
+ * control register is passed in as offset. The following parameters are
+ * also passed in:
+ *
+ * CS - Chip select
+ * CK - Clock
+ * DO - Data out
+ * DI - Data in
+ * RDY - SEEPROM ready
+ * MS - Memory port mode select
+ *
+ * A failed read attempt returns 0, and a successful read returns 1.
+ */
+
+#define SEEPROM_INB(sd) \
+ ahc_inb(sd->sd_ahc, sd->sd_control_offset)
+#define SEEPROM_OUTB(sd, value) \
+do { \
+ ahc_outb(sd->sd_ahc, sd->sd_control_offset, value); \
+ ahc_flush_device_writes(sd->sd_ahc); \
+} while(0)
+
+#define SEEPROM_STATUS_INB(sd) \
+ ahc_inb(sd->sd_ahc, sd->sd_status_offset)
+#define SEEPROM_DATA_INB(sd) \
+ ahc_inb(sd->sd_ahc, sd->sd_dataout_offset)
+
+int ahc_read_seeprom(struct seeprom_descriptor *sd, uint16_t *buf,
+ u_int start_addr, u_int count);
+int ahc_write_seeprom(struct seeprom_descriptor *sd, uint16_t *buf,
+ u_int start_addr, u_int count);
+int ahc_verify_cksum(struct seeprom_config *sc);
+
+#endif /* _AIC7XXX_93CX6_H_ */
diff --git a/drivers/scsi/aic7xxx/aic7xxx_core.c b/drivers/scsi/aic7xxx/aic7xxx_core.c
new file mode 100644
index 000000000..c4829d84b
--- /dev/null
+++ b/drivers/scsi/aic7xxx/aic7xxx_core.c
@@ -0,0 +1,7971 @@
+/*
+ * Core routines and tables shareable across OS platforms.
+ *
+ * Copyright (c) 1994-2002 Justin T. Gibbs.
+ * Copyright (c) 2000-2002 Adaptec Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions, and the following disclaimer,
+ * without modification.
+ * 2. Redistributions in binary form must reproduce at minimum a disclaimer
+ * substantially similar to the "NO WARRANTY" disclaimer below
+ * ("Disclaimer") and any redistribution must be conditioned upon
+ * including a substantially similar Disclaimer requirement for further
+ * binary redistribution.
+ * 3. Neither the names of the above-listed copyright holders nor the names
+ * of any contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * Alternatively, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") version 2 as published by the Free
+ * Software Foundation.
+ *
+ * NO WARRANTY
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
+ * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
+ * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGES.
+ *
+ * $Id: //depot/aic7xxx/aic7xxx/aic7xxx.c#155 $
+ */
+
+#ifdef __linux__
+#include "aic7xxx_osm.h"
+#include "aic7xxx_inline.h"
+#include "aicasm/aicasm_insformat.h"
+#else
+#include <dev/aic7xxx/aic7xxx_osm.h>
+#include <dev/aic7xxx/aic7xxx_inline.h>
+#include <dev/aic7xxx/aicasm/aicasm_insformat.h>
+#endif
+
+/***************************** Lookup Tables **********************************/
+static const char *const ahc_chip_names[] = {
+ "NONE",
+ "aic7770",
+ "aic7850",
+ "aic7855",
+ "aic7859",
+ "aic7860",
+ "aic7870",
+ "aic7880",
+ "aic7895",
+ "aic7895C",
+ "aic7890/91",
+ "aic7896/97",
+ "aic7892",
+ "aic7899"
+};
+static const u_int num_chip_names = ARRAY_SIZE(ahc_chip_names);
+
+/*
+ * Hardware error codes.
+ */
+struct ahc_hard_error_entry {
+ uint8_t errno;
+ const char *errmesg;
+};
+
+static const struct ahc_hard_error_entry ahc_hard_errors[] = {
+ { ILLHADDR, "Illegal Host Access" },
+ { ILLSADDR, "Illegal Sequencer Address referrenced" },
+ { ILLOPCODE, "Illegal Opcode in sequencer program" },
+ { SQPARERR, "Sequencer Parity Error" },
+ { DPARERR, "Data-path Parity Error" },
+ { MPARERR, "Scratch or SCB Memory Parity Error" },
+ { PCIERRSTAT, "PCI Error detected" },
+ { CIOPARERR, "CIOBUS Parity Error" },
+};
+static const u_int num_errors = ARRAY_SIZE(ahc_hard_errors);
+
+static const struct ahc_phase_table_entry ahc_phase_table[] =
+{
+ { P_DATAOUT, MSG_NOOP, "in Data-out phase" },
+ { P_DATAIN, MSG_INITIATOR_DET_ERR, "in Data-in phase" },
+ { P_DATAOUT_DT, MSG_NOOP, "in DT Data-out phase" },
+ { P_DATAIN_DT, MSG_INITIATOR_DET_ERR, "in DT Data-in phase" },
+ { P_COMMAND, MSG_NOOP, "in Command phase" },
+ { P_MESGOUT, MSG_NOOP, "in Message-out phase" },
+ { P_STATUS, MSG_INITIATOR_DET_ERR, "in Status phase" },
+ { P_MESGIN, MSG_PARITY_ERROR, "in Message-in phase" },
+ { P_BUSFREE, MSG_NOOP, "while idle" },
+ { 0, MSG_NOOP, "in unknown phase" }
+};
+
+/*
+ * In most cases we only wish to itterate over real phases, so
+ * exclude the last element from the count.
+ */
+static const u_int num_phases = ARRAY_SIZE(ahc_phase_table) - 1;
+
+/*
+ * Valid SCSIRATE values. (p. 3-17)
+ * Provides a mapping of tranfer periods in ns to the proper value to
+ * stick in the scsixfer reg.
+ */
+static const struct ahc_syncrate ahc_syncrates[] =
+{
+ /* ultra2 fast/ultra period rate */
+ { 0x42, 0x000, 9, "80.0" },
+ { 0x03, 0x000, 10, "40.0" },
+ { 0x04, 0x000, 11, "33.0" },
+ { 0x05, 0x100, 12, "20.0" },
+ { 0x06, 0x110, 15, "16.0" },
+ { 0x07, 0x120, 18, "13.4" },
+ { 0x08, 0x000, 25, "10.0" },
+ { 0x19, 0x010, 31, "8.0" },
+ { 0x1a, 0x020, 37, "6.67" },
+ { 0x1b, 0x030, 43, "5.7" },
+ { 0x1c, 0x040, 50, "5.0" },
+ { 0x00, 0x050, 56, "4.4" },
+ { 0x00, 0x060, 62, "4.0" },
+ { 0x00, 0x070, 68, "3.6" },
+ { 0x00, 0x000, 0, NULL }
+};
+
+/* Our Sequencer Program */
+#include "aic7xxx_seq.h"
+
+/**************************** Function Declarations ***************************/
+static void ahc_force_renegotiation(struct ahc_softc *ahc,
+ struct ahc_devinfo *devinfo);
+static struct ahc_tmode_tstate*
+ ahc_alloc_tstate(struct ahc_softc *ahc,
+ u_int scsi_id, char channel);
+#ifdef AHC_TARGET_MODE
+static void ahc_free_tstate(struct ahc_softc *ahc,
+ u_int scsi_id, char channel, int force);
+#endif
+static const struct ahc_syncrate*
+ ahc_devlimited_syncrate(struct ahc_softc *ahc,
+ struct ahc_initiator_tinfo *,
+ u_int *period,
+ u_int *ppr_options,
+ role_t role);
+static void ahc_update_pending_scbs(struct ahc_softc *ahc);
+static void ahc_fetch_devinfo(struct ahc_softc *ahc,
+ struct ahc_devinfo *devinfo);
+static void ahc_scb_devinfo(struct ahc_softc *ahc,
+ struct ahc_devinfo *devinfo,
+ struct scb *scb);
+static void ahc_assert_atn(struct ahc_softc *ahc);
+static void ahc_setup_initiator_msgout(struct ahc_softc *ahc,
+ struct ahc_devinfo *devinfo,
+ struct scb *scb);
+static void ahc_build_transfer_msg(struct ahc_softc *ahc,
+ struct ahc_devinfo *devinfo);
+static void ahc_construct_sdtr(struct ahc_softc *ahc,
+ struct ahc_devinfo *devinfo,
+ u_int period, u_int offset);
+static void ahc_construct_wdtr(struct ahc_softc *ahc,
+ struct ahc_devinfo *devinfo,
+ u_int bus_width);
+static void ahc_construct_ppr(struct ahc_softc *ahc,
+ struct ahc_devinfo *devinfo,
+ u_int period, u_int offset,
+ u_int bus_width, u_int ppr_options);
+static void ahc_clear_msg_state(struct ahc_softc *ahc);
+static void ahc_handle_proto_violation(struct ahc_softc *ahc);
+static void ahc_handle_message_phase(struct ahc_softc *ahc);
+typedef enum {
+ AHCMSG_1B,
+ AHCMSG_2B,
+ AHCMSG_EXT
+} ahc_msgtype;
+static int ahc_sent_msg(struct ahc_softc *ahc, ahc_msgtype type,
+ u_int msgval, int full);
+static int ahc_parse_msg(struct ahc_softc *ahc,
+ struct ahc_devinfo *devinfo);
+static int ahc_handle_msg_reject(struct ahc_softc *ahc,
+ struct ahc_devinfo *devinfo);
+static void ahc_handle_ign_wide_residue(struct ahc_softc *ahc,
+ struct ahc_devinfo *devinfo);
+static void ahc_reinitialize_dataptrs(struct ahc_softc *ahc);
+static void ahc_handle_devreset(struct ahc_softc *ahc,
+ struct ahc_devinfo *devinfo,
+ cam_status status, char *message,
+ int verbose_level);
+#ifdef AHC_TARGET_MODE
+static void ahc_setup_target_msgin(struct ahc_softc *ahc,
+ struct ahc_devinfo *devinfo,
+ struct scb *scb);
+#endif
+
+static bus_dmamap_callback_t ahc_dmamap_cb;
+static void ahc_build_free_scb_list(struct ahc_softc *ahc);
+static int ahc_init_scbdata(struct ahc_softc *ahc);
+static void ahc_fini_scbdata(struct ahc_softc *ahc);
+static void ahc_qinfifo_requeue(struct ahc_softc *ahc,
+ struct scb *prev_scb,
+ struct scb *scb);
+static int ahc_qinfifo_count(struct ahc_softc *ahc);
+static u_int ahc_rem_scb_from_disc_list(struct ahc_softc *ahc,
+ u_int prev, u_int scbptr);
+static void ahc_add_curscb_to_free_list(struct ahc_softc *ahc);
+static u_int ahc_rem_wscb(struct ahc_softc *ahc,
+ u_int scbpos, u_int prev);
+static void ahc_reset_current_bus(struct ahc_softc *ahc);
+#ifdef AHC_DUMP_SEQ
+static void ahc_dumpseq(struct ahc_softc *ahc);
+#endif
+static int ahc_loadseq(struct ahc_softc *ahc);
+static int ahc_check_patch(struct ahc_softc *ahc,
+ const struct patch **start_patch,
+ u_int start_instr, u_int *skip_addr);
+static void ahc_download_instr(struct ahc_softc *ahc,
+ u_int instrptr, uint8_t *dconsts);
+#ifdef AHC_TARGET_MODE
+static void ahc_queue_lstate_event(struct ahc_softc *ahc,
+ struct ahc_tmode_lstate *lstate,
+ u_int initiator_id,
+ u_int event_type,
+ u_int event_arg);
+static void ahc_update_scsiid(struct ahc_softc *ahc,
+ u_int targid_mask);
+static int ahc_handle_target_cmd(struct ahc_softc *ahc,
+ struct target_cmd *cmd);
+#endif
+
+static u_int ahc_index_busy_tcl(struct ahc_softc *ahc, u_int tcl);
+static void ahc_unbusy_tcl(struct ahc_softc *ahc, u_int tcl);
+static void ahc_busy_tcl(struct ahc_softc *ahc,
+ u_int tcl, u_int busyid);
+
+/************************** SCB and SCB queue management **********************/
+static void ahc_run_untagged_queues(struct ahc_softc *ahc);
+static void ahc_run_untagged_queue(struct ahc_softc *ahc,
+ struct scb_tailq *queue);
+
+/****************************** Initialization ********************************/
+static void ahc_alloc_scbs(struct ahc_softc *ahc);
+static void ahc_shutdown(void *arg);
+
+/*************************** Interrupt Services *******************************/
+static void ahc_clear_intstat(struct ahc_softc *ahc);
+static void ahc_run_qoutfifo(struct ahc_softc *ahc);
+#ifdef AHC_TARGET_MODE
+static void ahc_run_tqinfifo(struct ahc_softc *ahc, int paused);
+#endif
+static void ahc_handle_brkadrint(struct ahc_softc *ahc);
+static void ahc_handle_seqint(struct ahc_softc *ahc, u_int intstat);
+static void ahc_handle_scsiint(struct ahc_softc *ahc,
+ u_int intstat);
+static void ahc_clear_critical_section(struct ahc_softc *ahc);
+
+/***************************** Error Recovery *********************************/
+static void ahc_freeze_devq(struct ahc_softc *ahc, struct scb *scb);
+static int ahc_abort_scbs(struct ahc_softc *ahc, int target,
+ char channel, int lun, u_int tag,
+ role_t role, uint32_t status);
+static void ahc_calc_residual(struct ahc_softc *ahc,
+ struct scb *scb);
+
+/*********************** Untagged Transaction Routines ************************/
+static inline void ahc_freeze_untagged_queues(struct ahc_softc *ahc);
+static inline void ahc_release_untagged_queues(struct ahc_softc *ahc);
+
+/*
+ * Block our completion routine from starting the next untagged
+ * transaction for this target or target lun.
+ */
+static inline void
+ahc_freeze_untagged_queues(struct ahc_softc *ahc)
+{
+ if ((ahc->flags & AHC_SCB_BTT) == 0)
+ ahc->untagged_queue_lock++;
+}
+
+/*
+ * Allow the next untagged transaction for this target or target lun
+ * to be executed. We use a counting semaphore to allow the lock
+ * to be acquired recursively. Once the count drops to zero, the
+ * transaction queues will be run.
+ */
+static inline void
+ahc_release_untagged_queues(struct ahc_softc *ahc)
+{
+ if ((ahc->flags & AHC_SCB_BTT) == 0) {
+ ahc->untagged_queue_lock--;
+ if (ahc->untagged_queue_lock == 0)
+ ahc_run_untagged_queues(ahc);
+ }
+}
+
+/************************* Sequencer Execution Control ************************/
+/*
+ * Work around any chip bugs related to halting sequencer execution.
+ * On Ultra2 controllers, we must clear the CIOBUS stretch signal by
+ * reading a register that will set this signal and deassert it.
+ * Without this workaround, if the chip is paused, by an interrupt or
+ * manual pause while accessing scb ram, accesses to certain registers
+ * will hang the system (infinite pci retries).
+ */
+static void
+ahc_pause_bug_fix(struct ahc_softc *ahc)
+{
+ if ((ahc->features & AHC_ULTRA2) != 0)
+ (void)ahc_inb(ahc, CCSCBCTL);
+}
+
+/*
+ * Determine whether the sequencer has halted code execution.
+ * Returns non-zero status if the sequencer is stopped.
+ */
+int
+ahc_is_paused(struct ahc_softc *ahc)
+{
+ return ((ahc_inb(ahc, HCNTRL) & PAUSE) != 0);
+}
+
+/*
+ * Request that the sequencer stop and wait, indefinitely, for it
+ * to stop. The sequencer will only acknowledge that it is paused
+ * once it has reached an instruction boundary and PAUSEDIS is
+ * cleared in the SEQCTL register. The sequencer may use PAUSEDIS
+ * for critical sections.
+ */
+void
+ahc_pause(struct ahc_softc *ahc)
+{
+ ahc_outb(ahc, HCNTRL, ahc->pause);
+
+ /*
+ * Since the sequencer can disable pausing in a critical section, we
+ * must loop until it actually stops.
+ */
+ while (ahc_is_paused(ahc) == 0)
+ ;
+
+ ahc_pause_bug_fix(ahc);
+}
+
+/*
+ * Allow the sequencer to continue program execution.
+ * We check here to ensure that no additional interrupt
+ * sources that would cause the sequencer to halt have been
+ * asserted. If, for example, a SCSI bus reset is detected
+ * while we are fielding a different, pausing, interrupt type,
+ * we don't want to release the sequencer before going back
+ * into our interrupt handler and dealing with this new
+ * condition.
+ */
+void
+ahc_unpause(struct ahc_softc *ahc)
+{
+ if ((ahc_inb(ahc, INTSTAT) & (SCSIINT | SEQINT | BRKADRINT)) == 0)
+ ahc_outb(ahc, HCNTRL, ahc->unpause);
+}
+
+/************************** Memory mapping routines ***************************/
+static struct ahc_dma_seg *
+ahc_sg_bus_to_virt(struct scb *scb, uint32_t sg_busaddr)
+{
+ int sg_index;
+
+ sg_index = (sg_busaddr - scb->sg_list_phys)/sizeof(struct ahc_dma_seg);
+ /* sg_list_phys points to entry 1, not 0 */
+ sg_index++;
+
+ return (&scb->sg_list[sg_index]);
+}
+
+static uint32_t
+ahc_sg_virt_to_bus(struct scb *scb, struct ahc_dma_seg *sg)
+{
+ int sg_index;
+
+ /* sg_list_phys points to entry 1, not 0 */
+ sg_index = sg - &scb->sg_list[1];
+
+ return (scb->sg_list_phys + (sg_index * sizeof(*scb->sg_list)));
+}
+
+static uint32_t
+ahc_hscb_busaddr(struct ahc_softc *ahc, u_int index)
+{
+ return (ahc->scb_data->hscb_busaddr
+ + (sizeof(struct hardware_scb) * index));
+}
+
+static void
+ahc_sync_scb(struct ahc_softc *ahc, struct scb *scb, int op)
+{
+ ahc_dmamap_sync(ahc, ahc->scb_data->hscb_dmat,
+ ahc->scb_data->hscb_dmamap,
+ /*offset*/(scb->hscb - ahc->hscbs) * sizeof(*scb->hscb),
+ /*len*/sizeof(*scb->hscb), op);
+}
+
+void
+ahc_sync_sglist(struct ahc_softc *ahc, struct scb *scb, int op)
+{
+ if (scb->sg_count == 0)
+ return;
+
+ ahc_dmamap_sync(ahc, ahc->scb_data->sg_dmat, scb->sg_map->sg_dmamap,
+ /*offset*/(scb->sg_list - scb->sg_map->sg_vaddr)
+ * sizeof(struct ahc_dma_seg),
+ /*len*/sizeof(struct ahc_dma_seg) * scb->sg_count, op);
+}
+
+#ifdef AHC_TARGET_MODE
+static uint32_t
+ahc_targetcmd_offset(struct ahc_softc *ahc, u_int index)
+{
+ return (((uint8_t *)&ahc->targetcmds[index]) - ahc->qoutfifo);
+}
+#endif
+
+/*********************** Miscellaneous Support Functions ***********************/
+/*
+ * Determine whether the sequencer reported a residual
+ * for this SCB/transaction.
+ */
+static void
+ahc_update_residual(struct ahc_softc *ahc, struct scb *scb)
+{
+ uint32_t sgptr;
+
+ sgptr = ahc_le32toh(scb->hscb->sgptr);
+ if ((sgptr & SG_RESID_VALID) != 0)
+ ahc_calc_residual(ahc, scb);
+}
+
+/*
+ * Return pointers to the transfer negotiation information
+ * for the specified our_id/remote_id pair.
+ */
+struct ahc_initiator_tinfo *
+ahc_fetch_transinfo(struct ahc_softc *ahc, char channel, u_int our_id,
+ u_int remote_id, struct ahc_tmode_tstate **tstate)
+{
+ /*
+ * Transfer data structures are stored from the perspective
+ * of the target role. Since the parameters for a connection
+ * in the initiator role to a given target are the same as
+ * when the roles are reversed, we pretend we are the target.
+ */
+ if (channel == 'B')
+ our_id += 8;
+ *tstate = ahc->enabled_targets[our_id];
+ return (&(*tstate)->transinfo[remote_id]);
+}
+
+uint16_t
+ahc_inw(struct ahc_softc *ahc, u_int port)
+{
+ uint16_t r = ahc_inb(ahc, port+1) << 8;
+ return r | ahc_inb(ahc, port);
+}
+
+void
+ahc_outw(struct ahc_softc *ahc, u_int port, u_int value)
+{
+ ahc_outb(ahc, port, value & 0xFF);
+ ahc_outb(ahc, port+1, (value >> 8) & 0xFF);
+}
+
+uint32_t
+ahc_inl(struct ahc_softc *ahc, u_int port)
+{
+ return ((ahc_inb(ahc, port))
+ | (ahc_inb(ahc, port+1) << 8)
+ | (ahc_inb(ahc, port+2) << 16)
+ | (ahc_inb(ahc, port+3) << 24));
+}
+
+void
+ahc_outl(struct ahc_softc *ahc, u_int port, uint32_t value)
+{
+ ahc_outb(ahc, port, (value) & 0xFF);
+ ahc_outb(ahc, port+1, ((value) >> 8) & 0xFF);
+ ahc_outb(ahc, port+2, ((value) >> 16) & 0xFF);
+ ahc_outb(ahc, port+3, ((value) >> 24) & 0xFF);
+}
+
+uint64_t
+ahc_inq(struct ahc_softc *ahc, u_int port)
+{
+ return ((ahc_inb(ahc, port))
+ | (ahc_inb(ahc, port+1) << 8)
+ | (ahc_inb(ahc, port+2) << 16)
+ | (ahc_inb(ahc, port+3) << 24)
+ | (((uint64_t)ahc_inb(ahc, port+4)) << 32)
+ | (((uint64_t)ahc_inb(ahc, port+5)) << 40)
+ | (((uint64_t)ahc_inb(ahc, port+6)) << 48)
+ | (((uint64_t)ahc_inb(ahc, port+7)) << 56));
+}
+
+void
+ahc_outq(struct ahc_softc *ahc, u_int port, uint64_t value)
+{
+ ahc_outb(ahc, port, value & 0xFF);
+ ahc_outb(ahc, port+1, (value >> 8) & 0xFF);
+ ahc_outb(ahc, port+2, (value >> 16) & 0xFF);
+ ahc_outb(ahc, port+3, (value >> 24) & 0xFF);
+ ahc_outb(ahc, port+4, (value >> 32) & 0xFF);
+ ahc_outb(ahc, port+5, (value >> 40) & 0xFF);
+ ahc_outb(ahc, port+6, (value >> 48) & 0xFF);
+ ahc_outb(ahc, port+7, (value >> 56) & 0xFF);
+}
+
+/*
+ * Get a free scb. If there are none, see if we can allocate a new SCB.
+ */
+struct scb *
+ahc_get_scb(struct ahc_softc *ahc)
+{
+ struct scb *scb;
+
+ if ((scb = SLIST_FIRST(&ahc->scb_data->free_scbs)) == NULL) {
+ ahc_alloc_scbs(ahc);
+ scb = SLIST_FIRST(&ahc->scb_data->free_scbs);
+ if (scb == NULL)
+ return (NULL);
+ }
+ SLIST_REMOVE_HEAD(&ahc->scb_data->free_scbs, links.sle);
+ return (scb);
+}
+
+/*
+ * Return an SCB resource to the free list.
+ */
+void
+ahc_free_scb(struct ahc_softc *ahc, struct scb *scb)
+{
+ struct hardware_scb *hscb;
+
+ hscb = scb->hscb;
+ /* Clean up for the next user */
+ ahc->scb_data->scbindex[hscb->tag] = NULL;
+ scb->flags = SCB_FREE;
+ hscb->control = 0;
+
+ SLIST_INSERT_HEAD(&ahc->scb_data->free_scbs, scb, links.sle);
+
+ /* Notify the OSM that a resource is now available. */
+ ahc_platform_scb_free(ahc, scb);
+}
+
+struct scb *
+ahc_lookup_scb(struct ahc_softc *ahc, u_int tag)
+{
+ struct scb* scb;
+
+ scb = ahc->scb_data->scbindex[tag];
+ if (scb != NULL)
+ ahc_sync_scb(ahc, scb,
+ BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
+ return (scb);
+}
+
+static void
+ahc_swap_with_next_hscb(struct ahc_softc *ahc, struct scb *scb)
+{
+ struct hardware_scb *q_hscb;
+ u_int saved_tag;
+
+ /*
+ * Our queuing method is a bit tricky. The card
+ * knows in advance which HSCB to download, and we
+ * can't disappoint it. To achieve this, the next
+ * SCB to download is saved off in ahc->next_queued_scb.
+ * When we are called to queue "an arbitrary scb",
+ * we copy the contents of the incoming HSCB to the one
+ * the sequencer knows about, swap HSCB pointers and
+ * finally assign the SCB to the tag indexed location
+ * in the scb_array. This makes sure that we can still
+ * locate the correct SCB by SCB_TAG.
+ */
+ q_hscb = ahc->next_queued_scb->hscb;
+ saved_tag = q_hscb->tag;
+ memcpy(q_hscb, scb->hscb, sizeof(*scb->hscb));
+ if ((scb->flags & SCB_CDB32_PTR) != 0) {
+ q_hscb->shared_data.cdb_ptr =
+ ahc_htole32(ahc_hscb_busaddr(ahc, q_hscb->tag)
+ + offsetof(struct hardware_scb, cdb32));
+ }
+ q_hscb->tag = saved_tag;
+ q_hscb->next = scb->hscb->tag;
+
+ /* Now swap HSCB pointers. */
+ ahc->next_queued_scb->hscb = scb->hscb;
+ scb->hscb = q_hscb;
+
+ /* Now define the mapping from tag to SCB in the scbindex */
+ ahc->scb_data->scbindex[scb->hscb->tag] = scb;
+}
+
+/*
+ * Tell the sequencer about a new transaction to execute.
+ */
+void
+ahc_queue_scb(struct ahc_softc *ahc, struct scb *scb)
+{
+ ahc_swap_with_next_hscb(ahc, scb);
+
+ if (scb->hscb->tag == SCB_LIST_NULL
+ || scb->hscb->next == SCB_LIST_NULL)
+ panic("Attempt to queue invalid SCB tag %x:%x\n",
+ scb->hscb->tag, scb->hscb->next);
+
+ /*
+ * Setup data "oddness".
+ */
+ scb->hscb->lun &= LID;
+ if (ahc_get_transfer_length(scb) & 0x1)
+ scb->hscb->lun |= SCB_XFERLEN_ODD;
+
+ /*
+ * Keep a history of SCBs we've downloaded in the qinfifo.
+ */
+ ahc->qinfifo[ahc->qinfifonext++] = scb->hscb->tag;
+
+ /*
+ * Make sure our data is consistent from the
+ * perspective of the adapter.
+ */
+ ahc_sync_scb(ahc, scb, BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
+
+ /* Tell the adapter about the newly queued SCB */
+ if ((ahc->features & AHC_QUEUE_REGS) != 0) {
+ ahc_outb(ahc, HNSCB_QOFF, ahc->qinfifonext);
+ } else {
+ if ((ahc->features & AHC_AUTOPAUSE) == 0)
+ ahc_pause(ahc);
+ ahc_outb(ahc, KERNEL_QINPOS, ahc->qinfifonext);
+ if ((ahc->features & AHC_AUTOPAUSE) == 0)
+ ahc_unpause(ahc);
+ }
+}
+
+struct scsi_sense_data *
+ahc_get_sense_buf(struct ahc_softc *ahc, struct scb *scb)
+{
+ int offset;
+
+ offset = scb - ahc->scb_data->scbarray;
+ return (&ahc->scb_data->sense[offset]);
+}
+
+static uint32_t
+ahc_get_sense_bufaddr(struct ahc_softc *ahc, struct scb *scb)
+{
+ int offset;
+
+ offset = scb - ahc->scb_data->scbarray;
+ return (ahc->scb_data->sense_busaddr
+ + (offset * sizeof(struct scsi_sense_data)));
+}
+
+/************************** Interrupt Processing ******************************/
+static void
+ahc_sync_qoutfifo(struct ahc_softc *ahc, int op)
+{
+ ahc_dmamap_sync(ahc, ahc->shared_data_dmat, ahc->shared_data_dmamap,
+ /*offset*/0, /*len*/256, op);
+}
+
+static void
+ahc_sync_tqinfifo(struct ahc_softc *ahc, int op)
+{
+#ifdef AHC_TARGET_MODE
+ if ((ahc->flags & AHC_TARGETROLE) != 0) {
+ ahc_dmamap_sync(ahc, ahc->shared_data_dmat,
+ ahc->shared_data_dmamap,
+ ahc_targetcmd_offset(ahc, 0),
+ sizeof(struct target_cmd) * AHC_TMODE_CMDS,
+ op);
+ }
+#endif
+}
+
+/*
+ * See if the firmware has posted any completed commands
+ * into our in-core command complete fifos.
+ */
+#define AHC_RUN_QOUTFIFO 0x1
+#define AHC_RUN_TQINFIFO 0x2
+static u_int
+ahc_check_cmdcmpltqueues(struct ahc_softc *ahc)
+{
+ u_int retval;
+
+ retval = 0;
+ ahc_dmamap_sync(ahc, ahc->shared_data_dmat, ahc->shared_data_dmamap,
+ /*offset*/ahc->qoutfifonext, /*len*/1,
+ BUS_DMASYNC_POSTREAD);
+ if (ahc->qoutfifo[ahc->qoutfifonext] != SCB_LIST_NULL)
+ retval |= AHC_RUN_QOUTFIFO;
+#ifdef AHC_TARGET_MODE
+ if ((ahc->flags & AHC_TARGETROLE) != 0
+ && (ahc->flags & AHC_TQINFIFO_BLOCKED) == 0) {
+ ahc_dmamap_sync(ahc, ahc->shared_data_dmat,
+ ahc->shared_data_dmamap,
+ ahc_targetcmd_offset(ahc, ahc->tqinfifofnext),
+ /*len*/sizeof(struct target_cmd),
+ BUS_DMASYNC_POSTREAD);
+ if (ahc->targetcmds[ahc->tqinfifonext].cmd_valid != 0)
+ retval |= AHC_RUN_TQINFIFO;
+ }
+#endif
+ return (retval);
+}
+
+/*
+ * Catch an interrupt from the adapter
+ */
+int
+ahc_intr(struct ahc_softc *ahc)
+{
+ u_int intstat;
+
+ if ((ahc->pause & INTEN) == 0) {
+ /*
+ * Our interrupt is not enabled on the chip
+ * and may be disabled for re-entrancy reasons,
+ * so just return. This is likely just a shared
+ * interrupt.
+ */
+ return (0);
+ }
+ /*
+ * Instead of directly reading the interrupt status register,
+ * infer the cause of the interrupt by checking our in-core
+ * completion queues. This avoids a costly PCI bus read in
+ * most cases.
+ */
+ if ((ahc->flags & (AHC_ALL_INTERRUPTS|AHC_EDGE_INTERRUPT)) == 0
+ && (ahc_check_cmdcmpltqueues(ahc) != 0))
+ intstat = CMDCMPLT;
+ else {
+ intstat = ahc_inb(ahc, INTSTAT);
+ }
+
+ if ((intstat & INT_PEND) == 0) {
+#if AHC_PCI_CONFIG > 0
+ if (ahc->unsolicited_ints > 500) {
+ ahc->unsolicited_ints = 0;
+ if ((ahc->chip & AHC_PCI) != 0
+ && (ahc_inb(ahc, ERROR) & PCIERRSTAT) != 0)
+ ahc->bus_intr(ahc);
+ }
+#endif
+ ahc->unsolicited_ints++;
+ return (0);
+ }
+ ahc->unsolicited_ints = 0;
+
+ if (intstat & CMDCMPLT) {
+ ahc_outb(ahc, CLRINT, CLRCMDINT);
+
+ /*
+ * Ensure that the chip sees that we've cleared
+ * this interrupt before we walk the output fifo.
+ * Otherwise, we may, due to posted bus writes,
+ * clear the interrupt after we finish the scan,
+ * and after the sequencer has added new entries
+ * and asserted the interrupt again.
+ */
+ ahc_flush_device_writes(ahc);
+ ahc_run_qoutfifo(ahc);
+#ifdef AHC_TARGET_MODE
+ if ((ahc->flags & AHC_TARGETROLE) != 0)
+ ahc_run_tqinfifo(ahc, /*paused*/FALSE);
+#endif
+ }
+
+ /*
+ * Handle statuses that may invalidate our cached
+ * copy of INTSTAT separately.
+ */
+ if (intstat == 0xFF && (ahc->features & AHC_REMOVABLE) != 0) {
+ /* Hot eject. Do nothing */
+ } else if (intstat & BRKADRINT) {
+ ahc_handle_brkadrint(ahc);
+ } else if ((intstat & (SEQINT|SCSIINT)) != 0) {
+
+ ahc_pause_bug_fix(ahc);
+
+ if ((intstat & SEQINT) != 0)
+ ahc_handle_seqint(ahc, intstat);
+
+ if ((intstat & SCSIINT) != 0)
+ ahc_handle_scsiint(ahc, intstat);
+ }
+ return (1);
+}
+
+/************************* Sequencer Execution Control ************************/
+/*
+ * Restart the sequencer program from address zero
+ */
+static void
+ahc_restart(struct ahc_softc *ahc)
+{
+ uint8_t sblkctl;
+
+ ahc_pause(ahc);
+
+ /* No more pending messages. */
+ ahc_clear_msg_state(ahc);
+
+ ahc_outb(ahc, SCSISIGO, 0); /* De-assert BSY */
+ ahc_outb(ahc, MSG_OUT, MSG_NOOP); /* No message to send */
+ ahc_outb(ahc, SXFRCTL1, ahc_inb(ahc, SXFRCTL1) & ~BITBUCKET);
+ ahc_outb(ahc, LASTPHASE, P_BUSFREE);
+ ahc_outb(ahc, SAVED_SCSIID, 0xFF);
+ ahc_outb(ahc, SAVED_LUN, 0xFF);
+
+ /*
+ * Ensure that the sequencer's idea of TQINPOS
+ * matches our own. The sequencer increments TQINPOS
+ * only after it sees a DMA complete and a reset could
+ * occur before the increment leaving the kernel to believe
+ * the command arrived but the sequencer to not.
+ */
+ ahc_outb(ahc, TQINPOS, ahc->tqinfifonext);
+
+ /* Always allow reselection */
+ ahc_outb(ahc, SCSISEQ,
+ ahc_inb(ahc, SCSISEQ_TEMPLATE) & (ENSELI|ENRSELI|ENAUTOATNP));
+ if ((ahc->features & AHC_CMD_CHAN) != 0) {
+ /* Ensure that no DMA operations are in progress */
+ ahc_outb(ahc, CCSCBCNT, 0);
+ ahc_outb(ahc, CCSGCTL, 0);
+ ahc_outb(ahc, CCSCBCTL, 0);
+ }
+ /*
+ * If we were in the process of DMA'ing SCB data into
+ * an SCB, replace that SCB on the free list. This prevents
+ * an SCB leak.
+ */
+ if ((ahc_inb(ahc, SEQ_FLAGS2) & SCB_DMA) != 0) {
+ ahc_add_curscb_to_free_list(ahc);
+ ahc_outb(ahc, SEQ_FLAGS2,
+ ahc_inb(ahc, SEQ_FLAGS2) & ~SCB_DMA);
+ }
+
+ /*
+ * Clear any pending sequencer interrupt. It is no
+ * longer relevant since we're resetting the Program
+ * Counter.
+ */
+ ahc_outb(ahc, CLRINT, CLRSEQINT);
+
+ ahc_outb(ahc, MWI_RESIDUAL, 0);
+ ahc_outb(ahc, SEQCTL, ahc->seqctl);
+ ahc_outb(ahc, SEQADDR0, 0);
+ ahc_outb(ahc, SEQADDR1, 0);
+
+ /*
+ * Take the LED out of diagnostic mode on PM resume, too
+ */
+ sblkctl = ahc_inb(ahc, SBLKCTL);
+ ahc_outb(ahc, SBLKCTL, (sblkctl & ~(DIAGLEDEN|DIAGLEDON)));
+
+ ahc_unpause(ahc);
+}
+
+/************************* Input/Output Queues ********************************/
+static void
+ahc_run_qoutfifo(struct ahc_softc *ahc)
+{
+ struct scb *scb;
+ u_int scb_index;
+
+ ahc_sync_qoutfifo(ahc, BUS_DMASYNC_POSTREAD);
+ while (ahc->qoutfifo[ahc->qoutfifonext] != SCB_LIST_NULL) {
+
+ scb_index = ahc->qoutfifo[ahc->qoutfifonext];
+ if ((ahc->qoutfifonext & 0x03) == 0x03) {
+ u_int modnext;
+
+ /*
+ * Clear 32bits of QOUTFIFO at a time
+ * so that we don't clobber an incoming
+ * byte DMA to the array on architectures
+ * that only support 32bit load and store
+ * operations.
+ */
+ modnext = ahc->qoutfifonext & ~0x3;
+ *((uint32_t *)(&ahc->qoutfifo[modnext])) = 0xFFFFFFFFUL;
+ ahc_dmamap_sync(ahc, ahc->shared_data_dmat,
+ ahc->shared_data_dmamap,
+ /*offset*/modnext, /*len*/4,
+ BUS_DMASYNC_PREREAD);
+ }
+ ahc->qoutfifonext++;
+
+ scb = ahc_lookup_scb(ahc, scb_index);
+ if (scb == NULL) {
+ printk("%s: WARNING no command for scb %d "
+ "(cmdcmplt)\nQOUTPOS = %d\n",
+ ahc_name(ahc), scb_index,
+ (ahc->qoutfifonext - 1) & 0xFF);
+ continue;
+ }
+
+ /*
+ * Save off the residual
+ * if there is one.
+ */
+ ahc_update_residual(ahc, scb);
+ ahc_done(ahc, scb);
+ }
+}
+
+static void
+ahc_run_untagged_queues(struct ahc_softc *ahc)
+{
+ int i;
+
+ for (i = 0; i < 16; i++)
+ ahc_run_untagged_queue(ahc, &ahc->untagged_queues[i]);
+}
+
+static void
+ahc_run_untagged_queue(struct ahc_softc *ahc, struct scb_tailq *queue)
+{
+ struct scb *scb;
+
+ if (ahc->untagged_queue_lock != 0)
+ return;
+
+ if ((scb = TAILQ_FIRST(queue)) != NULL
+ && (scb->flags & SCB_ACTIVE) == 0) {
+ scb->flags |= SCB_ACTIVE;
+ ahc_queue_scb(ahc, scb);
+ }
+}
+
+/************************* Interrupt Handling *********************************/
+static void
+ahc_handle_brkadrint(struct ahc_softc *ahc)
+{
+ /*
+ * We upset the sequencer :-(
+ * Lookup the error message
+ */
+ int i;
+ int error;
+
+ error = ahc_inb(ahc, ERROR);
+ for (i = 0; error != 1 && i < num_errors; i++)
+ error >>= 1;
+ printk("%s: brkadrint, %s at seqaddr = 0x%x\n",
+ ahc_name(ahc), ahc_hard_errors[i].errmesg,
+ ahc_inb(ahc, SEQADDR0) |
+ (ahc_inb(ahc, SEQADDR1) << 8));
+
+ ahc_dump_card_state(ahc);
+
+ /* Tell everyone that this HBA is no longer available */
+ ahc_abort_scbs(ahc, CAM_TARGET_WILDCARD, ALL_CHANNELS,
+ CAM_LUN_WILDCARD, SCB_LIST_NULL, ROLE_UNKNOWN,
+ CAM_NO_HBA);
+
+ /* Disable all interrupt sources by resetting the controller */
+ ahc_shutdown(ahc);
+}
+
+static void
+ahc_handle_seqint(struct ahc_softc *ahc, u_int intstat)
+{
+ struct scb *scb;
+ struct ahc_devinfo devinfo;
+
+ ahc_fetch_devinfo(ahc, &devinfo);
+
+ /*
+ * Clear the upper byte that holds SEQINT status
+ * codes and clear the SEQINT bit. We will unpause
+ * the sequencer, if appropriate, after servicing
+ * the request.
+ */
+ ahc_outb(ahc, CLRINT, CLRSEQINT);
+ switch (intstat & SEQINT_MASK) {
+ case BAD_STATUS:
+ {
+ u_int scb_index;
+ struct hardware_scb *hscb;
+
+ /*
+ * Set the default return value to 0 (don't
+ * send sense). The sense code will change
+ * this if needed.
+ */
+ ahc_outb(ahc, RETURN_1, 0);
+
+ /*
+ * The sequencer will notify us when a command
+ * has an error that would be of interest to
+ * the kernel. This allows us to leave the sequencer
+ * running in the common case of command completes
+ * without error. The sequencer will already have
+ * dma'd the SCB back up to us, so we can reference
+ * the in kernel copy directly.
+ */
+ scb_index = ahc_inb(ahc, SCB_TAG);
+ scb = ahc_lookup_scb(ahc, scb_index);
+ if (scb == NULL) {
+ ahc_print_devinfo(ahc, &devinfo);
+ printk("ahc_intr - referenced scb "
+ "not valid during seqint 0x%x scb(%d)\n",
+ intstat, scb_index);
+ ahc_dump_card_state(ahc);
+ panic("for safety");
+ goto unpause;
+ }
+
+ hscb = scb->hscb;
+
+ /* Don't want to clobber the original sense code */
+ if ((scb->flags & SCB_SENSE) != 0) {
+ /*
+ * Clear the SCB_SENSE Flag and have
+ * the sequencer do a normal command
+ * complete.
+ */
+ scb->flags &= ~SCB_SENSE;
+ ahc_set_transaction_status(scb, CAM_AUTOSENSE_FAIL);
+ break;
+ }
+ ahc_set_transaction_status(scb, CAM_SCSI_STATUS_ERROR);
+ /* Freeze the queue until the client sees the error. */
+ ahc_freeze_devq(ahc, scb);
+ ahc_freeze_scb(scb);
+ ahc_set_scsi_status(scb, hscb->shared_data.status.scsi_status);
+ switch (hscb->shared_data.status.scsi_status) {
+ case SCSI_STATUS_OK:
+ printk("%s: Interrupted for status of 0???\n",
+ ahc_name(ahc));
+ break;
+ case SCSI_STATUS_CMD_TERMINATED:
+ case SCSI_STATUS_CHECK_COND:
+ {
+ struct ahc_dma_seg *sg;
+ struct scsi_sense *sc;
+ struct ahc_initiator_tinfo *targ_info;
+ struct ahc_tmode_tstate *tstate;
+ struct ahc_transinfo *tinfo;
+#ifdef AHC_DEBUG
+ if (ahc_debug & AHC_SHOW_SENSE) {
+ ahc_print_path(ahc, scb);
+ printk("SCB %d: requests Check Status\n",
+ scb->hscb->tag);
+ }
+#endif
+
+ if (ahc_perform_autosense(scb) == 0)
+ break;
+
+ targ_info = ahc_fetch_transinfo(ahc,
+ devinfo.channel,
+ devinfo.our_scsiid,
+ devinfo.target,
+ &tstate);
+ tinfo = &targ_info->curr;
+ sg = scb->sg_list;
+ sc = (struct scsi_sense *)(&hscb->shared_data.cdb);
+ /*
+ * Save off the residual if there is one.
+ */
+ ahc_update_residual(ahc, scb);
+#ifdef AHC_DEBUG
+ if (ahc_debug & AHC_SHOW_SENSE) {
+ ahc_print_path(ahc, scb);
+ printk("Sending Sense\n");
+ }
+#endif
+ sg->addr = ahc_get_sense_bufaddr(ahc, scb);
+ sg->len = ahc_get_sense_bufsize(ahc, scb);
+ sg->len |= AHC_DMA_LAST_SEG;
+
+ /* Fixup byte order */
+ sg->addr = ahc_htole32(sg->addr);
+ sg->len = ahc_htole32(sg->len);
+
+ sc->opcode = REQUEST_SENSE;
+ sc->byte2 = 0;
+ if (tinfo->protocol_version <= SCSI_REV_2
+ && SCB_GET_LUN(scb) < 8)
+ sc->byte2 = SCB_GET_LUN(scb) << 5;
+ sc->unused[0] = 0;
+ sc->unused[1] = 0;
+ sc->length = sg->len;
+ sc->control = 0;
+
+ /*
+ * We can't allow the target to disconnect.
+ * This will be an untagged transaction and
+ * having the target disconnect will make this
+ * transaction indestinguishable from outstanding
+ * tagged transactions.
+ */
+ hscb->control = 0;
+
+ /*
+ * This request sense could be because the
+ * the device lost power or in some other
+ * way has lost our transfer negotiations.
+ * Renegotiate if appropriate. Unit attention
+ * errors will be reported before any data
+ * phases occur.
+ */
+ if (ahc_get_residual(scb)
+ == ahc_get_transfer_length(scb)) {
+ ahc_update_neg_request(ahc, &devinfo,
+ tstate, targ_info,
+ AHC_NEG_IF_NON_ASYNC);
+ }
+ if (tstate->auto_negotiate & devinfo.target_mask) {
+ hscb->control |= MK_MESSAGE;
+ scb->flags &= ~SCB_NEGOTIATE;
+ scb->flags |= SCB_AUTO_NEGOTIATE;
+ }
+ hscb->cdb_len = sizeof(*sc);
+ hscb->dataptr = sg->addr;
+ hscb->datacnt = sg->len;
+ hscb->sgptr = scb->sg_list_phys | SG_FULL_RESID;
+ hscb->sgptr = ahc_htole32(hscb->sgptr);
+ scb->sg_count = 1;
+ scb->flags |= SCB_SENSE;
+ ahc_qinfifo_requeue_tail(ahc, scb);
+ ahc_outb(ahc, RETURN_1, SEND_SENSE);
+ /*
+ * Ensure we have enough time to actually
+ * retrieve the sense.
+ */
+ ahc_scb_timer_reset(scb, 5 * 1000000);
+ break;
+ }
+ default:
+ break;
+ }
+ break;
+ }
+ case NO_MATCH:
+ {
+ /* Ensure we don't leave the selection hardware on */
+ ahc_outb(ahc, SCSISEQ,
+ ahc_inb(ahc, SCSISEQ) & (ENSELI|ENRSELI|ENAUTOATNP));
+
+ printk("%s:%c:%d: no active SCB for reconnecting "
+ "target - issuing BUS DEVICE RESET\n",
+ ahc_name(ahc), devinfo.channel, devinfo.target);
+ printk("SAVED_SCSIID == 0x%x, SAVED_LUN == 0x%x, "
+ "ARG_1 == 0x%x ACCUM = 0x%x\n",
+ ahc_inb(ahc, SAVED_SCSIID), ahc_inb(ahc, SAVED_LUN),
+ ahc_inb(ahc, ARG_1), ahc_inb(ahc, ACCUM));
+ printk("SEQ_FLAGS == 0x%x, SCBPTR == 0x%x, BTT == 0x%x, "
+ "SINDEX == 0x%x\n",
+ ahc_inb(ahc, SEQ_FLAGS), ahc_inb(ahc, SCBPTR),
+ ahc_index_busy_tcl(ahc,
+ BUILD_TCL(ahc_inb(ahc, SAVED_SCSIID),
+ ahc_inb(ahc, SAVED_LUN))),
+ ahc_inb(ahc, SINDEX));
+ printk("SCSIID == 0x%x, SCB_SCSIID == 0x%x, SCB_LUN == 0x%x, "
+ "SCB_TAG == 0x%x, SCB_CONTROL == 0x%x\n",
+ ahc_inb(ahc, SCSIID), ahc_inb(ahc, SCB_SCSIID),
+ ahc_inb(ahc, SCB_LUN), ahc_inb(ahc, SCB_TAG),
+ ahc_inb(ahc, SCB_CONTROL));
+ printk("SCSIBUSL == 0x%x, SCSISIGI == 0x%x\n",
+ ahc_inb(ahc, SCSIBUSL), ahc_inb(ahc, SCSISIGI));
+ printk("SXFRCTL0 == 0x%x\n", ahc_inb(ahc, SXFRCTL0));
+ printk("SEQCTL == 0x%x\n", ahc_inb(ahc, SEQCTL));
+ ahc_dump_card_state(ahc);
+ ahc->msgout_buf[0] = MSG_BUS_DEV_RESET;
+ ahc->msgout_len = 1;
+ ahc->msgout_index = 0;
+ ahc->msg_type = MSG_TYPE_INITIATOR_MSGOUT;
+ ahc_outb(ahc, MSG_OUT, HOST_MSG);
+ ahc_assert_atn(ahc);
+ break;
+ }
+ case SEND_REJECT:
+ {
+ u_int rejbyte = ahc_inb(ahc, ACCUM);
+ printk("%s:%c:%d: Warning - unknown message received from "
+ "target (0x%x). Rejecting\n",
+ ahc_name(ahc), devinfo.channel, devinfo.target, rejbyte);
+ break;
+ }
+ case PROTO_VIOLATION:
+ {
+ ahc_handle_proto_violation(ahc);
+ break;
+ }
+ case IGN_WIDE_RES:
+ ahc_handle_ign_wide_residue(ahc, &devinfo);
+ break;
+ case PDATA_REINIT:
+ ahc_reinitialize_dataptrs(ahc);
+ break;
+ case BAD_PHASE:
+ {
+ u_int lastphase;
+
+ lastphase = ahc_inb(ahc, LASTPHASE);
+ printk("%s:%c:%d: unknown scsi bus phase %x, "
+ "lastphase = 0x%x. Attempting to continue\n",
+ ahc_name(ahc), devinfo.channel, devinfo.target,
+ lastphase, ahc_inb(ahc, SCSISIGI));
+ break;
+ }
+ case MISSED_BUSFREE:
+ {
+ u_int lastphase;
+
+ lastphase = ahc_inb(ahc, LASTPHASE);
+ printk("%s:%c:%d: Missed busfree. "
+ "Lastphase = 0x%x, Curphase = 0x%x\n",
+ ahc_name(ahc), devinfo.channel, devinfo.target,
+ lastphase, ahc_inb(ahc, SCSISIGI));
+ ahc_restart(ahc);
+ return;
+ }
+ case HOST_MSG_LOOP:
+ {
+ /*
+ * The sequencer has encountered a message phase
+ * that requires host assistance for completion.
+ * While handling the message phase(s), we will be
+ * notified by the sequencer after each byte is
+ * transferred so we can track bus phase changes.
+ *
+ * If this is the first time we've seen a HOST_MSG_LOOP
+ * interrupt, initialize the state of the host message
+ * loop.
+ */
+ if (ahc->msg_type == MSG_TYPE_NONE) {
+ struct scb *scb;
+ u_int scb_index;
+ u_int bus_phase;
+
+ bus_phase = ahc_inb(ahc, SCSISIGI) & PHASE_MASK;
+ if (bus_phase != P_MESGIN
+ && bus_phase != P_MESGOUT) {
+ printk("ahc_intr: HOST_MSG_LOOP bad "
+ "phase 0x%x\n",
+ bus_phase);
+ /*
+ * Probably transitioned to bus free before
+ * we got here. Just punt the message.
+ */
+ ahc_clear_intstat(ahc);
+ ahc_restart(ahc);
+ return;
+ }
+
+ scb_index = ahc_inb(ahc, SCB_TAG);
+ scb = ahc_lookup_scb(ahc, scb_index);
+ if (devinfo.role == ROLE_INITIATOR) {
+ if (bus_phase == P_MESGOUT) {
+ if (scb == NULL)
+ panic("HOST_MSG_LOOP with "
+ "invalid SCB %x\n",
+ scb_index);
+
+ ahc_setup_initiator_msgout(ahc,
+ &devinfo,
+ scb);
+ } else {
+ ahc->msg_type =
+ MSG_TYPE_INITIATOR_MSGIN;
+ ahc->msgin_index = 0;
+ }
+ }
+#ifdef AHC_TARGET_MODE
+ else {
+ if (bus_phase == P_MESGOUT) {
+ ahc->msg_type =
+ MSG_TYPE_TARGET_MSGOUT;
+ ahc->msgin_index = 0;
+ }
+ else
+ ahc_setup_target_msgin(ahc,
+ &devinfo,
+ scb);
+ }
+#endif
+ }
+
+ ahc_handle_message_phase(ahc);
+ break;
+ }
+ case PERR_DETECTED:
+ {
+ /*
+ * If we've cleared the parity error interrupt
+ * but the sequencer still believes that SCSIPERR
+ * is true, it must be that the parity error is
+ * for the currently presented byte on the bus,
+ * and we are not in a phase (data-in) where we will
+ * eventually ack this byte. Ack the byte and
+ * throw it away in the hope that the target will
+ * take us to message out to deliver the appropriate
+ * error message.
+ */
+ if ((intstat & SCSIINT) == 0
+ && (ahc_inb(ahc, SSTAT1) & SCSIPERR) != 0) {
+
+ if ((ahc->features & AHC_DT) == 0) {
+ u_int curphase;
+
+ /*
+ * The hardware will only let you ack bytes
+ * if the expected phase in SCSISIGO matches
+ * the current phase. Make sure this is
+ * currently the case.
+ */
+ curphase = ahc_inb(ahc, SCSISIGI) & PHASE_MASK;
+ ahc_outb(ahc, LASTPHASE, curphase);
+ ahc_outb(ahc, SCSISIGO, curphase);
+ }
+ if ((ahc_inb(ahc, SCSISIGI) & (CDI|MSGI)) == 0) {
+ int wait;
+
+ /*
+ * In a data phase. Faster to bitbucket
+ * the data than to individually ack each
+ * byte. This is also the only strategy
+ * that will work with AUTOACK enabled.
+ */
+ ahc_outb(ahc, SXFRCTL1,
+ ahc_inb(ahc, SXFRCTL1) | BITBUCKET);
+ wait = 5000;
+ while (--wait != 0) {
+ if ((ahc_inb(ahc, SCSISIGI)
+ & (CDI|MSGI)) != 0)
+ break;
+ ahc_delay(100);
+ }
+ ahc_outb(ahc, SXFRCTL1,
+ ahc_inb(ahc, SXFRCTL1) & ~BITBUCKET);
+ if (wait == 0) {
+ struct scb *scb;
+ u_int scb_index;
+
+ ahc_print_devinfo(ahc, &devinfo);
+ printk("Unable to clear parity error. "
+ "Resetting bus.\n");
+ scb_index = ahc_inb(ahc, SCB_TAG);
+ scb = ahc_lookup_scb(ahc, scb_index);
+ if (scb != NULL)
+ ahc_set_transaction_status(scb,
+ CAM_UNCOR_PARITY);
+ ahc_reset_channel(ahc, devinfo.channel,
+ /*init reset*/TRUE);
+ }
+ } else {
+ ahc_inb(ahc, SCSIDATL);
+ }
+ }
+ break;
+ }
+ case DATA_OVERRUN:
+ {
+ /*
+ * When the sequencer detects an overrun, it
+ * places the controller in "BITBUCKET" mode
+ * and allows the target to complete its transfer.
+ * Unfortunately, none of the counters get updated
+ * when the controller is in this mode, so we have
+ * no way of knowing how large the overrun was.
+ */
+ u_int scbindex = ahc_inb(ahc, SCB_TAG);
+ u_int lastphase = ahc_inb(ahc, LASTPHASE);
+ u_int i;
+
+ scb = ahc_lookup_scb(ahc, scbindex);
+ for (i = 0; i < num_phases; i++) {
+ if (lastphase == ahc_phase_table[i].phase)
+ break;
+ }
+ ahc_print_path(ahc, scb);
+ printk("data overrun detected %s."
+ " Tag == 0x%x.\n",
+ ahc_phase_table[i].phasemsg,
+ scb->hscb->tag);
+ ahc_print_path(ahc, scb);
+ printk("%s seen Data Phase. Length = %ld. NumSGs = %d.\n",
+ ahc_inb(ahc, SEQ_FLAGS) & DPHASE ? "Have" : "Haven't",
+ ahc_get_transfer_length(scb), scb->sg_count);
+ if (scb->sg_count > 0) {
+ for (i = 0; i < scb->sg_count; i++) {
+
+ printk("sg[%d] - Addr 0x%x%x : Length %d\n",
+ i,
+ (ahc_le32toh(scb->sg_list[i].len) >> 24
+ & SG_HIGH_ADDR_BITS),
+ ahc_le32toh(scb->sg_list[i].addr),
+ ahc_le32toh(scb->sg_list[i].len)
+ & AHC_SG_LEN_MASK);
+ }
+ }
+ /*
+ * Set this and it will take effect when the
+ * target does a command complete.
+ */
+ ahc_freeze_devq(ahc, scb);
+ if ((scb->flags & SCB_SENSE) == 0) {
+ ahc_set_transaction_status(scb, CAM_DATA_RUN_ERR);
+ } else {
+ scb->flags &= ~SCB_SENSE;
+ ahc_set_transaction_status(scb, CAM_AUTOSENSE_FAIL);
+ }
+ ahc_freeze_scb(scb);
+
+ if ((ahc->features & AHC_ULTRA2) != 0) {
+ /*
+ * Clear the channel in case we return
+ * to data phase later.
+ */
+ ahc_outb(ahc, SXFRCTL0,
+ ahc_inb(ahc, SXFRCTL0) | CLRSTCNT|CLRCHN);
+ ahc_outb(ahc, SXFRCTL0,
+ ahc_inb(ahc, SXFRCTL0) | CLRSTCNT|CLRCHN);
+ }
+ if ((ahc->flags & AHC_39BIT_ADDRESSING) != 0) {
+ u_int dscommand1;
+
+ /* Ensure HHADDR is 0 for future DMA operations. */
+ dscommand1 = ahc_inb(ahc, DSCOMMAND1);
+ ahc_outb(ahc, DSCOMMAND1, dscommand1 | HADDLDSEL0);
+ ahc_outb(ahc, HADDR, 0);
+ ahc_outb(ahc, DSCOMMAND1, dscommand1);
+ }
+ break;
+ }
+ case MKMSG_FAILED:
+ {
+ u_int scbindex;
+
+ printk("%s:%c:%d:%d: Attempt to issue message failed\n",
+ ahc_name(ahc), devinfo.channel, devinfo.target,
+ devinfo.lun);
+ scbindex = ahc_inb(ahc, SCB_TAG);
+ scb = ahc_lookup_scb(ahc, scbindex);
+ if (scb != NULL
+ && (scb->flags & SCB_RECOVERY_SCB) != 0)
+ /*
+ * Ensure that we didn't put a second instance of this
+ * SCB into the QINFIFO.
+ */
+ ahc_search_qinfifo(ahc, SCB_GET_TARGET(ahc, scb),
+ SCB_GET_CHANNEL(ahc, scb),
+ SCB_GET_LUN(scb), scb->hscb->tag,
+ ROLE_INITIATOR, /*status*/0,
+ SEARCH_REMOVE);
+ break;
+ }
+ case NO_FREE_SCB:
+ {
+ printk("%s: No free or disconnected SCBs\n", ahc_name(ahc));
+ ahc_dump_card_state(ahc);
+ panic("for safety");
+ break;
+ }
+ case SCB_MISMATCH:
+ {
+ u_int scbptr;
+
+ scbptr = ahc_inb(ahc, SCBPTR);
+ printk("Bogus TAG after DMA. SCBPTR %d, tag %d, our tag %d\n",
+ scbptr, ahc_inb(ahc, ARG_1),
+ ahc->scb_data->hscbs[scbptr].tag);
+ ahc_dump_card_state(ahc);
+ panic("for safety");
+ break;
+ }
+ case OUT_OF_RANGE:
+ {
+ printk("%s: BTT calculation out of range\n", ahc_name(ahc));
+ printk("SAVED_SCSIID == 0x%x, SAVED_LUN == 0x%x, "
+ "ARG_1 == 0x%x ACCUM = 0x%x\n",
+ ahc_inb(ahc, SAVED_SCSIID), ahc_inb(ahc, SAVED_LUN),
+ ahc_inb(ahc, ARG_1), ahc_inb(ahc, ACCUM));
+ printk("SEQ_FLAGS == 0x%x, SCBPTR == 0x%x, BTT == 0x%x, "
+ "SINDEX == 0x%x\n, A == 0x%x\n",
+ ahc_inb(ahc, SEQ_FLAGS), ahc_inb(ahc, SCBPTR),
+ ahc_index_busy_tcl(ahc,
+ BUILD_TCL(ahc_inb(ahc, SAVED_SCSIID),
+ ahc_inb(ahc, SAVED_LUN))),
+ ahc_inb(ahc, SINDEX),
+ ahc_inb(ahc, ACCUM));
+ printk("SCSIID == 0x%x, SCB_SCSIID == 0x%x, SCB_LUN == 0x%x, "
+ "SCB_TAG == 0x%x, SCB_CONTROL == 0x%x\n",
+ ahc_inb(ahc, SCSIID), ahc_inb(ahc, SCB_SCSIID),
+ ahc_inb(ahc, SCB_LUN), ahc_inb(ahc, SCB_TAG),
+ ahc_inb(ahc, SCB_CONTROL));
+ printk("SCSIBUSL == 0x%x, SCSISIGI == 0x%x\n",
+ ahc_inb(ahc, SCSIBUSL), ahc_inb(ahc, SCSISIGI));
+ ahc_dump_card_state(ahc);
+ panic("for safety");
+ break;
+ }
+ default:
+ printk("ahc_intr: seqint, "
+ "intstat == 0x%x, scsisigi = 0x%x\n",
+ intstat, ahc_inb(ahc, SCSISIGI));
+ break;
+ }
+unpause:
+ /*
+ * The sequencer is paused immediately on
+ * a SEQINT, so we should restart it when
+ * we're done.
+ */
+ ahc_unpause(ahc);
+}
+
+static void
+ahc_handle_scsiint(struct ahc_softc *ahc, u_int intstat)
+{
+ u_int scb_index;
+ u_int status0;
+ u_int status;
+ struct scb *scb;
+ char cur_channel;
+ char intr_channel;
+
+ if ((ahc->features & AHC_TWIN) != 0
+ && ((ahc_inb(ahc, SBLKCTL) & SELBUSB) != 0))
+ cur_channel = 'B';
+ else
+ cur_channel = 'A';
+ intr_channel = cur_channel;
+
+ if ((ahc->features & AHC_ULTRA2) != 0)
+ status0 = ahc_inb(ahc, SSTAT0) & IOERR;
+ else
+ status0 = 0;
+ status = ahc_inb(ahc, SSTAT1) & (SELTO|SCSIRSTI|BUSFREE|SCSIPERR);
+ if (status == 0 && status0 == 0) {
+ if ((ahc->features & AHC_TWIN) != 0) {
+ /* Try the other channel */
+ ahc_outb(ahc, SBLKCTL, ahc_inb(ahc, SBLKCTL) ^ SELBUSB);
+ status = ahc_inb(ahc, SSTAT1)
+ & (SELTO|SCSIRSTI|BUSFREE|SCSIPERR);
+ intr_channel = (cur_channel == 'A') ? 'B' : 'A';
+ }
+ if (status == 0) {
+ printk("%s: Spurious SCSI interrupt\n", ahc_name(ahc));
+ ahc_outb(ahc, CLRINT, CLRSCSIINT);
+ ahc_unpause(ahc);
+ return;
+ }
+ }
+
+ /* Make sure the sequencer is in a safe location. */
+ ahc_clear_critical_section(ahc);
+
+ scb_index = ahc_inb(ahc, SCB_TAG);
+ scb = ahc_lookup_scb(ahc, scb_index);
+ if (scb != NULL
+ && (ahc_inb(ahc, SEQ_FLAGS) & NOT_IDENTIFIED) != 0)
+ scb = NULL;
+
+ if ((ahc->features & AHC_ULTRA2) != 0
+ && (status0 & IOERR) != 0) {
+ int now_lvd;
+
+ now_lvd = ahc_inb(ahc, SBLKCTL) & ENAB40;
+ printk("%s: Transceiver State Has Changed to %s mode\n",
+ ahc_name(ahc), now_lvd ? "LVD" : "SE");
+ ahc_outb(ahc, CLRSINT0, CLRIOERR);
+ /*
+ * When transitioning to SE mode, the reset line
+ * glitches, triggering an arbitration bug in some
+ * Ultra2 controllers. This bug is cleared when we
+ * assert the reset line. Since a reset glitch has
+ * already occurred with this transition and a
+ * transceiver state change is handled just like
+ * a bus reset anyway, asserting the reset line
+ * ourselves is safe.
+ */
+ ahc_reset_channel(ahc, intr_channel,
+ /*Initiate Reset*/now_lvd == 0);
+ } else if ((status & SCSIRSTI) != 0) {
+ printk("%s: Someone reset channel %c\n",
+ ahc_name(ahc), intr_channel);
+ if (intr_channel != cur_channel)
+ ahc_outb(ahc, SBLKCTL, ahc_inb(ahc, SBLKCTL) ^ SELBUSB);
+ ahc_reset_channel(ahc, intr_channel, /*Initiate Reset*/FALSE);
+ } else if ((status & SCSIPERR) != 0) {
+ /*
+ * Determine the bus phase and queue an appropriate message.
+ * SCSIPERR is latched true as soon as a parity error
+ * occurs. If the sequencer acked the transfer that
+ * caused the parity error and the currently presented
+ * transfer on the bus has correct parity, SCSIPERR will
+ * be cleared by CLRSCSIPERR. Use this to determine if
+ * we should look at the last phase the sequencer recorded,
+ * or the current phase presented on the bus.
+ */
+ struct ahc_devinfo devinfo;
+ u_int mesg_out;
+ u_int curphase;
+ u_int errorphase;
+ u_int lastphase;
+ u_int scsirate;
+ u_int i;
+ u_int sstat2;
+ int silent;
+
+ lastphase = ahc_inb(ahc, LASTPHASE);
+ curphase = ahc_inb(ahc, SCSISIGI) & PHASE_MASK;
+ sstat2 = ahc_inb(ahc, SSTAT2);
+ ahc_outb(ahc, CLRSINT1, CLRSCSIPERR);
+ /*
+ * For all phases save DATA, the sequencer won't
+ * automatically ack a byte that has a parity error
+ * in it. So the only way that the current phase
+ * could be 'data-in' is if the parity error is for
+ * an already acked byte in the data phase. During
+ * synchronous data-in transfers, we may actually
+ * ack bytes before latching the current phase in
+ * LASTPHASE, leading to the discrepancy between
+ * curphase and lastphase.
+ */
+ if ((ahc_inb(ahc, SSTAT1) & SCSIPERR) != 0
+ || curphase == P_DATAIN || curphase == P_DATAIN_DT)
+ errorphase = curphase;
+ else
+ errorphase = lastphase;
+
+ for (i = 0; i < num_phases; i++) {
+ if (errorphase == ahc_phase_table[i].phase)
+ break;
+ }
+ mesg_out = ahc_phase_table[i].mesg_out;
+ silent = FALSE;
+ if (scb != NULL) {
+ if (SCB_IS_SILENT(scb))
+ silent = TRUE;
+ else
+ ahc_print_path(ahc, scb);
+ scb->flags |= SCB_TRANSMISSION_ERROR;
+ } else
+ printk("%s:%c:%d: ", ahc_name(ahc), intr_channel,
+ SCSIID_TARGET(ahc, ahc_inb(ahc, SAVED_SCSIID)));
+ scsirate = ahc_inb(ahc, SCSIRATE);
+ if (silent == FALSE) {
+ printk("parity error detected %s. "
+ "SEQADDR(0x%x) SCSIRATE(0x%x)\n",
+ ahc_phase_table[i].phasemsg,
+ ahc_inw(ahc, SEQADDR0),
+ scsirate);
+ if ((ahc->features & AHC_DT) != 0) {
+ if ((sstat2 & CRCVALERR) != 0)
+ printk("\tCRC Value Mismatch\n");
+ if ((sstat2 & CRCENDERR) != 0)
+ printk("\tNo terminal CRC packet "
+ "recevied\n");
+ if ((sstat2 & CRCREQERR) != 0)
+ printk("\tIllegal CRC packet "
+ "request\n");
+ if ((sstat2 & DUAL_EDGE_ERR) != 0)
+ printk("\tUnexpected %sDT Data Phase\n",
+ (scsirate & SINGLE_EDGE)
+ ? "" : "non-");
+ }
+ }
+
+ if ((ahc->features & AHC_DT) != 0
+ && (sstat2 & DUAL_EDGE_ERR) != 0) {
+ /*
+ * This error applies regardless of
+ * data direction, so ignore the value
+ * in the phase table.
+ */
+ mesg_out = MSG_INITIATOR_DET_ERR;
+ }
+
+ /*
+ * We've set the hardware to assert ATN if we
+ * get a parity error on "in" phases, so all we
+ * need to do is stuff the message buffer with
+ * the appropriate message. "In" phases have set
+ * mesg_out to something other than MSG_NOP.
+ */
+ if (mesg_out != MSG_NOOP) {
+ if (ahc->msg_type != MSG_TYPE_NONE)
+ ahc->send_msg_perror = TRUE;
+ else
+ ahc_outb(ahc, MSG_OUT, mesg_out);
+ }
+ /*
+ * Force a renegotiation with this target just in
+ * case we are out of sync for some external reason
+ * unknown (or unreported) by the target.
+ */
+ ahc_fetch_devinfo(ahc, &devinfo);
+ ahc_force_renegotiation(ahc, &devinfo);
+
+ ahc_outb(ahc, CLRINT, CLRSCSIINT);
+ ahc_unpause(ahc);
+ } else if ((status & SELTO) != 0) {
+ u_int scbptr;
+
+ /* Stop the selection */
+ ahc_outb(ahc, SCSISEQ, 0);
+
+ /* No more pending messages */
+ ahc_clear_msg_state(ahc);
+
+ /* Clear interrupt state */
+ ahc_outb(ahc, SIMODE1, ahc_inb(ahc, SIMODE1) & ~ENBUSFREE);
+ ahc_outb(ahc, CLRSINT1, CLRSELTIMEO|CLRBUSFREE|CLRSCSIPERR);
+
+ /*
+ * Although the driver does not care about the
+ * 'Selection in Progress' status bit, the busy
+ * LED does. SELINGO is only cleared by a successful
+ * selection, so we must manually clear it to insure
+ * the LED turns off just incase no future successful
+ * selections occur (e.g. no devices on the bus).
+ */
+ ahc_outb(ahc, CLRSINT0, CLRSELINGO);
+
+ scbptr = ahc_inb(ahc, WAITING_SCBH);
+ ahc_outb(ahc, SCBPTR, scbptr);
+ scb_index = ahc_inb(ahc, SCB_TAG);
+
+ scb = ahc_lookup_scb(ahc, scb_index);
+ if (scb == NULL) {
+ printk("%s: ahc_intr - referenced scb not "
+ "valid during SELTO scb(%d, %d)\n",
+ ahc_name(ahc), scbptr, scb_index);
+ ahc_dump_card_state(ahc);
+ } else {
+ struct ahc_devinfo devinfo;
+#ifdef AHC_DEBUG
+ if ((ahc_debug & AHC_SHOW_SELTO) != 0) {
+ ahc_print_path(ahc, scb);
+ printk("Saw Selection Timeout for SCB 0x%x\n",
+ scb_index);
+ }
+#endif
+ ahc_scb_devinfo(ahc, &devinfo, scb);
+ ahc_set_transaction_status(scb, CAM_SEL_TIMEOUT);
+ ahc_freeze_devq(ahc, scb);
+
+ /*
+ * Cancel any pending transactions on the device
+ * now that it seems to be missing. This will
+ * also revert us to async/narrow transfers until
+ * we can renegotiate with the device.
+ */
+ ahc_handle_devreset(ahc, &devinfo,
+ CAM_SEL_TIMEOUT,
+ "Selection Timeout",
+ /*verbose_level*/1);
+ }
+ ahc_outb(ahc, CLRINT, CLRSCSIINT);
+ ahc_restart(ahc);
+ } else if ((status & BUSFREE) != 0
+ && (ahc_inb(ahc, SIMODE1) & ENBUSFREE) != 0) {
+ struct ahc_devinfo devinfo;
+ u_int lastphase;
+ u_int saved_scsiid;
+ u_int saved_lun;
+ u_int target;
+ u_int initiator_role_id;
+ char channel;
+ int printerror;
+
+ /*
+ * Clear our selection hardware as soon as possible.
+ * We may have an entry in the waiting Q for this target,
+ * that is affected by this busfree and we don't want to
+ * go about selecting the target while we handle the event.
+ */
+ ahc_outb(ahc, SCSISEQ,
+ ahc_inb(ahc, SCSISEQ) & (ENSELI|ENRSELI|ENAUTOATNP));
+
+ /*
+ * Disable busfree interrupts and clear the busfree
+ * interrupt status. We do this here so that several
+ * bus transactions occur prior to clearing the SCSIINT
+ * latch. It can take a bit for the clearing to take effect.
+ */
+ ahc_outb(ahc, SIMODE1, ahc_inb(ahc, SIMODE1) & ~ENBUSFREE);
+ ahc_outb(ahc, CLRSINT1, CLRBUSFREE|CLRSCSIPERR);
+
+ /*
+ * Look at what phase we were last in.
+ * If its message out, chances are pretty good
+ * that the busfree was in response to one of
+ * our abort requests.
+ */
+ lastphase = ahc_inb(ahc, LASTPHASE);
+ saved_scsiid = ahc_inb(ahc, SAVED_SCSIID);
+ saved_lun = ahc_inb(ahc, SAVED_LUN);
+ target = SCSIID_TARGET(ahc, saved_scsiid);
+ initiator_role_id = SCSIID_OUR_ID(saved_scsiid);
+ channel = SCSIID_CHANNEL(ahc, saved_scsiid);
+ ahc_compile_devinfo(&devinfo, initiator_role_id,
+ target, saved_lun, channel, ROLE_INITIATOR);
+ printerror = 1;
+
+ if (lastphase == P_MESGOUT) {
+ u_int tag;
+
+ tag = SCB_LIST_NULL;
+ if (ahc_sent_msg(ahc, AHCMSG_1B, MSG_ABORT_TAG, TRUE)
+ || ahc_sent_msg(ahc, AHCMSG_1B, MSG_ABORT, TRUE)) {
+ if (ahc->msgout_buf[ahc->msgout_index - 1]
+ == MSG_ABORT_TAG)
+ tag = scb->hscb->tag;
+ ahc_print_path(ahc, scb);
+ printk("SCB %d - Abort%s Completed.\n",
+ scb->hscb->tag, tag == SCB_LIST_NULL ?
+ "" : " Tag");
+ ahc_abort_scbs(ahc, target, channel,
+ saved_lun, tag,
+ ROLE_INITIATOR,
+ CAM_REQ_ABORTED);
+ printerror = 0;
+ } else if (ahc_sent_msg(ahc, AHCMSG_1B,
+ MSG_BUS_DEV_RESET, TRUE)) {
+#ifdef __FreeBSD__
+ /*
+ * Don't mark the user's request for this BDR
+ * as completing with CAM_BDR_SENT. CAM3
+ * specifies CAM_REQ_CMP.
+ */
+ if (scb != NULL
+ && scb->io_ctx->ccb_h.func_code== XPT_RESET_DEV
+ && ahc_match_scb(ahc, scb, target, channel,
+ CAM_LUN_WILDCARD,
+ SCB_LIST_NULL,
+ ROLE_INITIATOR)) {
+ ahc_set_transaction_status(scb, CAM_REQ_CMP);
+ }
+#endif
+ ahc_compile_devinfo(&devinfo,
+ initiator_role_id,
+ target,
+ CAM_LUN_WILDCARD,
+ channel,
+ ROLE_INITIATOR);
+ ahc_handle_devreset(ahc, &devinfo,
+ CAM_BDR_SENT,
+ "Bus Device Reset",
+ /*verbose_level*/0);
+ printerror = 0;
+ } else if (ahc_sent_msg(ahc, AHCMSG_EXT,
+ MSG_EXT_PPR, FALSE)) {
+ struct ahc_initiator_tinfo *tinfo;
+ struct ahc_tmode_tstate *tstate;
+
+ /*
+ * PPR Rejected. Try non-ppr negotiation
+ * and retry command.
+ */
+ tinfo = ahc_fetch_transinfo(ahc,
+ devinfo.channel,
+ devinfo.our_scsiid,
+ devinfo.target,
+ &tstate);
+ tinfo->curr.transport_version = 2;
+ tinfo->goal.transport_version = 2;
+ tinfo->goal.ppr_options = 0;
+ ahc_qinfifo_requeue_tail(ahc, scb);
+ printerror = 0;
+ } else if (ahc_sent_msg(ahc, AHCMSG_EXT,
+ MSG_EXT_WDTR, FALSE)) {
+ /*
+ * Negotiation Rejected. Go-narrow and
+ * retry command.
+ */
+ ahc_set_width(ahc, &devinfo,
+ MSG_EXT_WDTR_BUS_8_BIT,
+ AHC_TRANS_CUR|AHC_TRANS_GOAL,
+ /*paused*/TRUE);
+ ahc_qinfifo_requeue_tail(ahc, scb);
+ printerror = 0;
+ } else if (ahc_sent_msg(ahc, AHCMSG_EXT,
+ MSG_EXT_SDTR, FALSE)) {
+ /*
+ * Negotiation Rejected. Go-async and
+ * retry command.
+ */
+ ahc_set_syncrate(ahc, &devinfo,
+ /*syncrate*/NULL,
+ /*period*/0, /*offset*/0,
+ /*ppr_options*/0,
+ AHC_TRANS_CUR|AHC_TRANS_GOAL,
+ /*paused*/TRUE);
+ ahc_qinfifo_requeue_tail(ahc, scb);
+ printerror = 0;
+ }
+ }
+ if (printerror != 0) {
+ u_int i;
+
+ if (scb != NULL) {
+ u_int tag;
+
+ if ((scb->hscb->control & TAG_ENB) != 0)
+ tag = scb->hscb->tag;
+ else
+ tag = SCB_LIST_NULL;
+ ahc_print_path(ahc, scb);
+ ahc_abort_scbs(ahc, target, channel,
+ SCB_GET_LUN(scb), tag,
+ ROLE_INITIATOR,
+ CAM_UNEXP_BUSFREE);
+ } else {
+ /*
+ * We had not fully identified this connection,
+ * so we cannot abort anything.
+ */
+ printk("%s: ", ahc_name(ahc));
+ }
+ for (i = 0; i < num_phases; i++) {
+ if (lastphase == ahc_phase_table[i].phase)
+ break;
+ }
+ if (lastphase != P_BUSFREE) {
+ /*
+ * Renegotiate with this device at the
+ * next opportunity just in case this busfree
+ * is due to a negotiation mismatch with the
+ * device.
+ */
+ ahc_force_renegotiation(ahc, &devinfo);
+ }
+ printk("Unexpected busfree %s\n"
+ "SEQADDR == 0x%x\n",
+ ahc_phase_table[i].phasemsg,
+ ahc_inb(ahc, SEQADDR0)
+ | (ahc_inb(ahc, SEQADDR1) << 8));
+ }
+ ahc_outb(ahc, CLRINT, CLRSCSIINT);
+ ahc_restart(ahc);
+ } else {
+ printk("%s: Missing case in ahc_handle_scsiint. status = %x\n",
+ ahc_name(ahc), status);
+ ahc_outb(ahc, CLRINT, CLRSCSIINT);
+ }
+}
+
+/*
+ * Force renegotiation to occur the next time we initiate
+ * a command to the current device.
+ */
+static void
+ahc_force_renegotiation(struct ahc_softc *ahc, struct ahc_devinfo *devinfo)
+{
+ struct ahc_initiator_tinfo *targ_info;
+ struct ahc_tmode_tstate *tstate;
+
+ targ_info = ahc_fetch_transinfo(ahc,
+ devinfo->channel,
+ devinfo->our_scsiid,
+ devinfo->target,
+ &tstate);
+ ahc_update_neg_request(ahc, devinfo, tstate,
+ targ_info, AHC_NEG_IF_NON_ASYNC);
+}
+
+#define AHC_MAX_STEPS 2000
+static void
+ahc_clear_critical_section(struct ahc_softc *ahc)
+{
+ int stepping;
+ int steps;
+ u_int simode0;
+ u_int simode1;
+
+ if (ahc->num_critical_sections == 0)
+ return;
+
+ stepping = FALSE;
+ steps = 0;
+ simode0 = 0;
+ simode1 = 0;
+ for (;;) {
+ struct cs *cs;
+ u_int seqaddr;
+ u_int i;
+
+ seqaddr = ahc_inb(ahc, SEQADDR0)
+ | (ahc_inb(ahc, SEQADDR1) << 8);
+
+ /*
+ * Seqaddr represents the next instruction to execute,
+ * so we are really executing the instruction just
+ * before it.
+ */
+ if (seqaddr != 0)
+ seqaddr -= 1;
+ cs = ahc->critical_sections;
+ for (i = 0; i < ahc->num_critical_sections; i++, cs++) {
+
+ if (cs->begin < seqaddr && cs->end >= seqaddr)
+ break;
+ }
+
+ if (i == ahc->num_critical_sections)
+ break;
+
+ if (steps > AHC_MAX_STEPS) {
+ printk("%s: Infinite loop in critical section\n",
+ ahc_name(ahc));
+ ahc_dump_card_state(ahc);
+ panic("critical section loop");
+ }
+
+ steps++;
+ if (stepping == FALSE) {
+
+ /*
+ * Disable all interrupt sources so that the
+ * sequencer will not be stuck by a pausing
+ * interrupt condition while we attempt to
+ * leave a critical section.
+ */
+ simode0 = ahc_inb(ahc, SIMODE0);
+ ahc_outb(ahc, SIMODE0, 0);
+ simode1 = ahc_inb(ahc, SIMODE1);
+ if ((ahc->features & AHC_DT) != 0)
+ /*
+ * On DT class controllers, we
+ * use the enhanced busfree logic.
+ * Unfortunately we cannot re-enable
+ * busfree detection within the
+ * current connection, so we must
+ * leave it on while single stepping.
+ */
+ ahc_outb(ahc, SIMODE1, simode1 & ENBUSFREE);
+ else
+ ahc_outb(ahc, SIMODE1, 0);
+ ahc_outb(ahc, CLRINT, CLRSCSIINT);
+ ahc_outb(ahc, SEQCTL, ahc->seqctl | STEP);
+ stepping = TRUE;
+ }
+ if ((ahc->features & AHC_DT) != 0) {
+ ahc_outb(ahc, CLRSINT1, CLRBUSFREE);
+ ahc_outb(ahc, CLRINT, CLRSCSIINT);
+ }
+ ahc_outb(ahc, HCNTRL, ahc->unpause);
+ while (!ahc_is_paused(ahc))
+ ahc_delay(200);
+ }
+ if (stepping) {
+ ahc_outb(ahc, SIMODE0, simode0);
+ ahc_outb(ahc, SIMODE1, simode1);
+ ahc_outb(ahc, SEQCTL, ahc->seqctl);
+ }
+}
+
+/*
+ * Clear any pending interrupt status.
+ */
+static void
+ahc_clear_intstat(struct ahc_softc *ahc)
+{
+ /* Clear any interrupt conditions this may have caused */
+ ahc_outb(ahc, CLRSINT1, CLRSELTIMEO|CLRATNO|CLRSCSIRSTI
+ |CLRBUSFREE|CLRSCSIPERR|CLRPHASECHG|
+ CLRREQINIT);
+ ahc_flush_device_writes(ahc);
+ ahc_outb(ahc, CLRSINT0, CLRSELDO|CLRSELDI|CLRSELINGO);
+ ahc_flush_device_writes(ahc);
+ ahc_outb(ahc, CLRINT, CLRSCSIINT);
+ ahc_flush_device_writes(ahc);
+}
+
+/**************************** Debugging Routines ******************************/
+#ifdef AHC_DEBUG
+uint32_t ahc_debug = AHC_DEBUG_OPTS;
+#endif
+
+#if 0 /* unused */
+static void
+ahc_print_scb(struct scb *scb)
+{
+ int i;
+
+ struct hardware_scb *hscb = scb->hscb;
+
+ printk("scb:%p control:0x%x scsiid:0x%x lun:%d cdb_len:%d\n",
+ (void *)scb,
+ hscb->control,
+ hscb->scsiid,
+ hscb->lun,
+ hscb->cdb_len);
+ printk("Shared Data: ");
+ for (i = 0; i < sizeof(hscb->shared_data.cdb); i++)
+ printk("%#02x", hscb->shared_data.cdb[i]);
+ printk(" dataptr:%#x datacnt:%#x sgptr:%#x tag:%#x\n",
+ ahc_le32toh(hscb->dataptr),
+ ahc_le32toh(hscb->datacnt),
+ ahc_le32toh(hscb->sgptr),
+ hscb->tag);
+ if (scb->sg_count > 0) {
+ for (i = 0; i < scb->sg_count; i++) {
+ printk("sg[%d] - Addr 0x%x%x : Length %d\n",
+ i,
+ (ahc_le32toh(scb->sg_list[i].len) >> 24
+ & SG_HIGH_ADDR_BITS),
+ ahc_le32toh(scb->sg_list[i].addr),
+ ahc_le32toh(scb->sg_list[i].len));
+ }
+ }
+}
+#endif
+
+/************************* Transfer Negotiation *******************************/
+/*
+ * Allocate per target mode instance (ID we respond to as a target)
+ * transfer negotiation data structures.
+ */
+static struct ahc_tmode_tstate *
+ahc_alloc_tstate(struct ahc_softc *ahc, u_int scsi_id, char channel)
+{
+ struct ahc_tmode_tstate *master_tstate;
+ struct ahc_tmode_tstate *tstate;
+ int i;
+
+ master_tstate = ahc->enabled_targets[ahc->our_id];
+ if (channel == 'B') {
+ scsi_id += 8;
+ master_tstate = ahc->enabled_targets[ahc->our_id_b + 8];
+ }
+ if (ahc->enabled_targets[scsi_id] != NULL
+ && ahc->enabled_targets[scsi_id] != master_tstate)
+ panic("%s: ahc_alloc_tstate - Target already allocated",
+ ahc_name(ahc));
+ tstate = kmalloc(sizeof(*tstate), GFP_ATOMIC);
+ if (tstate == NULL)
+ return (NULL);
+
+ /*
+ * If we have allocated a master tstate, copy user settings from
+ * the master tstate (taken from SRAM or the EEPROM) for this
+ * channel, but reset our current and goal settings to async/narrow
+ * until an initiator talks to us.
+ */
+ if (master_tstate != NULL) {
+ memcpy(tstate, master_tstate, sizeof(*tstate));
+ memset(tstate->enabled_luns, 0, sizeof(tstate->enabled_luns));
+ tstate->ultraenb = 0;
+ for (i = 0; i < AHC_NUM_TARGETS; i++) {
+ memset(&tstate->transinfo[i].curr, 0,
+ sizeof(tstate->transinfo[i].curr));
+ memset(&tstate->transinfo[i].goal, 0,
+ sizeof(tstate->transinfo[i].goal));
+ }
+ } else
+ memset(tstate, 0, sizeof(*tstate));
+ ahc->enabled_targets[scsi_id] = tstate;
+ return (tstate);
+}
+
+#ifdef AHC_TARGET_MODE
+/*
+ * Free per target mode instance (ID we respond to as a target)
+ * transfer negotiation data structures.
+ */
+static void
+ahc_free_tstate(struct ahc_softc *ahc, u_int scsi_id, char channel, int force)
+{
+ struct ahc_tmode_tstate *tstate;
+
+ /*
+ * Don't clean up our "master" tstate.
+ * It has our default user settings.
+ */
+ if (((channel == 'B' && scsi_id == ahc->our_id_b)
+ || (channel == 'A' && scsi_id == ahc->our_id))
+ && force == FALSE)
+ return;
+
+ if (channel == 'B')
+ scsi_id += 8;
+ tstate = ahc->enabled_targets[scsi_id];
+ if (tstate != NULL)
+ kfree(tstate);
+ ahc->enabled_targets[scsi_id] = NULL;
+}
+#endif
+
+/*
+ * Called when we have an active connection to a target on the bus,
+ * this function finds the nearest syncrate to the input period limited
+ * by the capabilities of the bus connectivity of and sync settings for
+ * the target.
+ */
+const struct ahc_syncrate *
+ahc_devlimited_syncrate(struct ahc_softc *ahc,
+ struct ahc_initiator_tinfo *tinfo,
+ u_int *period, u_int *ppr_options, role_t role)
+{
+ struct ahc_transinfo *transinfo;
+ u_int maxsync;
+
+ if ((ahc->features & AHC_ULTRA2) != 0) {
+ if ((ahc_inb(ahc, SBLKCTL) & ENAB40) != 0
+ && (ahc_inb(ahc, SSTAT2) & EXP_ACTIVE) == 0) {
+ maxsync = AHC_SYNCRATE_DT;
+ } else {
+ maxsync = AHC_SYNCRATE_ULTRA;
+ /* Can't do DT on an SE bus */
+ *ppr_options &= ~MSG_EXT_PPR_DT_REQ;
+ }
+ } else if ((ahc->features & AHC_ULTRA) != 0) {
+ maxsync = AHC_SYNCRATE_ULTRA;
+ } else {
+ maxsync = AHC_SYNCRATE_FAST;
+ }
+ /*
+ * Never allow a value higher than our current goal
+ * period otherwise we may allow a target initiated
+ * negotiation to go above the limit as set by the
+ * user. In the case of an initiator initiated
+ * sync negotiation, we limit based on the user
+ * setting. This allows the system to still accept
+ * incoming negotiations even if target initiated
+ * negotiation is not performed.
+ */
+ if (role == ROLE_TARGET)
+ transinfo = &tinfo->user;
+ else
+ transinfo = &tinfo->goal;
+ *ppr_options &= transinfo->ppr_options;
+ if (transinfo->width == MSG_EXT_WDTR_BUS_8_BIT) {
+ maxsync = max(maxsync, (u_int)AHC_SYNCRATE_ULTRA2);
+ *ppr_options &= ~MSG_EXT_PPR_DT_REQ;
+ }
+ if (transinfo->period == 0) {
+ *period = 0;
+ *ppr_options = 0;
+ return (NULL);
+ }
+ *period = max(*period, (u_int)transinfo->period);
+ return (ahc_find_syncrate(ahc, period, ppr_options, maxsync));
+}
+
+/*
+ * Look up the valid period to SCSIRATE conversion in our table.
+ * Return the period and offset that should be sent to the target
+ * if this was the beginning of an SDTR.
+ */
+const struct ahc_syncrate *
+ahc_find_syncrate(struct ahc_softc *ahc, u_int *period,
+ u_int *ppr_options, u_int maxsync)
+{
+ const struct ahc_syncrate *syncrate;
+
+ if ((ahc->features & AHC_DT) == 0)
+ *ppr_options &= ~MSG_EXT_PPR_DT_REQ;
+
+ /* Skip all DT only entries if DT is not available */
+ if ((*ppr_options & MSG_EXT_PPR_DT_REQ) == 0
+ && maxsync < AHC_SYNCRATE_ULTRA2)
+ maxsync = AHC_SYNCRATE_ULTRA2;
+
+ /* Now set the maxsync based on the card capabilities
+ * DT is already done above */
+ if ((ahc->features & (AHC_DT | AHC_ULTRA2)) == 0
+ && maxsync < AHC_SYNCRATE_ULTRA)
+ maxsync = AHC_SYNCRATE_ULTRA;
+ if ((ahc->features & (AHC_DT | AHC_ULTRA2 | AHC_ULTRA)) == 0
+ && maxsync < AHC_SYNCRATE_FAST)
+ maxsync = AHC_SYNCRATE_FAST;
+
+ for (syncrate = &ahc_syncrates[maxsync];
+ syncrate->rate != NULL;
+ syncrate++) {
+
+ /*
+ * The Ultra2 table doesn't go as low
+ * as for the Fast/Ultra cards.
+ */
+ if ((ahc->features & AHC_ULTRA2) != 0
+ && (syncrate->sxfr_u2 == 0))
+ break;
+
+ if (*period <= syncrate->period) {
+ /*
+ * When responding to a target that requests
+ * sync, the requested rate may fall between
+ * two rates that we can output, but still be
+ * a rate that we can receive. Because of this,
+ * we want to respond to the target with
+ * the same rate that it sent to us even
+ * if the period we use to send data to it
+ * is lower. Only lower the response period
+ * if we must.
+ */
+ if (syncrate == &ahc_syncrates[maxsync])
+ *period = syncrate->period;
+
+ /*
+ * At some speeds, we only support
+ * ST transfers.
+ */
+ if ((syncrate->sxfr_u2 & ST_SXFR) != 0)
+ *ppr_options &= ~MSG_EXT_PPR_DT_REQ;
+ break;
+ }
+ }
+
+ if ((*period == 0)
+ || (syncrate->rate == NULL)
+ || ((ahc->features & AHC_ULTRA2) != 0
+ && (syncrate->sxfr_u2 == 0))) {
+ /* Use asynchronous transfers. */
+ *period = 0;
+ syncrate = NULL;
+ *ppr_options &= ~MSG_EXT_PPR_DT_REQ;
+ }
+ return (syncrate);
+}
+
+/*
+ * Convert from an entry in our syncrate table to the SCSI equivalent
+ * sync "period" factor.
+ */
+u_int
+ahc_find_period(struct ahc_softc *ahc, u_int scsirate, u_int maxsync)
+{
+ const struct ahc_syncrate *syncrate;
+
+ if ((ahc->features & AHC_ULTRA2) != 0)
+ scsirate &= SXFR_ULTRA2;
+ else
+ scsirate &= SXFR;
+
+ /* now set maxsync based on card capabilities */
+ if ((ahc->features & AHC_DT) == 0 && maxsync < AHC_SYNCRATE_ULTRA2)
+ maxsync = AHC_SYNCRATE_ULTRA2;
+ if ((ahc->features & (AHC_DT | AHC_ULTRA2)) == 0
+ && maxsync < AHC_SYNCRATE_ULTRA)
+ maxsync = AHC_SYNCRATE_ULTRA;
+ if ((ahc->features & (AHC_DT | AHC_ULTRA2 | AHC_ULTRA)) == 0
+ && maxsync < AHC_SYNCRATE_FAST)
+ maxsync = AHC_SYNCRATE_FAST;
+
+
+ syncrate = &ahc_syncrates[maxsync];
+ while (syncrate->rate != NULL) {
+
+ if ((ahc->features & AHC_ULTRA2) != 0) {
+ if (syncrate->sxfr_u2 == 0)
+ break;
+ else if (scsirate == (syncrate->sxfr_u2 & SXFR_ULTRA2))
+ return (syncrate->period);
+ } else if (scsirate == (syncrate->sxfr & SXFR)) {
+ return (syncrate->period);
+ }
+ syncrate++;
+ }
+ return (0); /* async */
+}
+
+/*
+ * Truncate the given synchronous offset to a value the
+ * current adapter type and syncrate are capable of.
+ */
+static void
+ahc_validate_offset(struct ahc_softc *ahc,
+ struct ahc_initiator_tinfo *tinfo,
+ const struct ahc_syncrate *syncrate,
+ u_int *offset, int wide, role_t role)
+{
+ u_int maxoffset;
+
+ /* Limit offset to what we can do */
+ if (syncrate == NULL) {
+ maxoffset = 0;
+ } else if ((ahc->features & AHC_ULTRA2) != 0) {
+ maxoffset = MAX_OFFSET_ULTRA2;
+ } else {
+ if (wide)
+ maxoffset = MAX_OFFSET_16BIT;
+ else
+ maxoffset = MAX_OFFSET_8BIT;
+ }
+ *offset = min(*offset, maxoffset);
+ if (tinfo != NULL) {
+ if (role == ROLE_TARGET)
+ *offset = min(*offset, (u_int)tinfo->user.offset);
+ else
+ *offset = min(*offset, (u_int)tinfo->goal.offset);
+ }
+}
+
+/*
+ * Truncate the given transfer width parameter to a value the
+ * current adapter type is capable of.
+ */
+static void
+ahc_validate_width(struct ahc_softc *ahc, struct ahc_initiator_tinfo *tinfo,
+ u_int *bus_width, role_t role)
+{
+ switch (*bus_width) {
+ default:
+ if (ahc->features & AHC_WIDE) {
+ /* Respond Wide */
+ *bus_width = MSG_EXT_WDTR_BUS_16_BIT;
+ break;
+ }
+ /* FALLTHROUGH */
+ case MSG_EXT_WDTR_BUS_8_BIT:
+ *bus_width = MSG_EXT_WDTR_BUS_8_BIT;
+ break;
+ }
+ if (tinfo != NULL) {
+ if (role == ROLE_TARGET)
+ *bus_width = min((u_int)tinfo->user.width, *bus_width);
+ else
+ *bus_width = min((u_int)tinfo->goal.width, *bus_width);
+ }
+}
+
+/*
+ * Update the bitmask of targets for which the controller should
+ * negotiate with at the next convenient opportunity. This currently
+ * means the next time we send the initial identify messages for
+ * a new transaction.
+ */
+int
+ahc_update_neg_request(struct ahc_softc *ahc, struct ahc_devinfo *devinfo,
+ struct ahc_tmode_tstate *tstate,
+ struct ahc_initiator_tinfo *tinfo, ahc_neg_type neg_type)
+{
+ u_int auto_negotiate_orig;
+
+ auto_negotiate_orig = tstate->auto_negotiate;
+ if (neg_type == AHC_NEG_ALWAYS) {
+ /*
+ * Force our "current" settings to be
+ * unknown so that unless a bus reset
+ * occurs the need to renegotiate is
+ * recorded persistently.
+ */
+ if ((ahc->features & AHC_WIDE) != 0)
+ tinfo->curr.width = AHC_WIDTH_UNKNOWN;
+ tinfo->curr.period = AHC_PERIOD_UNKNOWN;
+ tinfo->curr.offset = AHC_OFFSET_UNKNOWN;
+ }
+ if (tinfo->curr.period != tinfo->goal.period
+ || tinfo->curr.width != tinfo->goal.width
+ || tinfo->curr.offset != tinfo->goal.offset
+ || tinfo->curr.ppr_options != tinfo->goal.ppr_options
+ || (neg_type == AHC_NEG_IF_NON_ASYNC
+ && (tinfo->goal.offset != 0
+ || tinfo->goal.width != MSG_EXT_WDTR_BUS_8_BIT
+ || tinfo->goal.ppr_options != 0)))
+ tstate->auto_negotiate |= devinfo->target_mask;
+ else
+ tstate->auto_negotiate &= ~devinfo->target_mask;
+
+ return (auto_negotiate_orig != tstate->auto_negotiate);
+}
+
+/*
+ * Update the user/goal/curr tables of synchronous negotiation
+ * parameters as well as, in the case of a current or active update,
+ * any data structures on the host controller. In the case of an
+ * active update, the specified target is currently talking to us on
+ * the bus, so the transfer parameter update must take effect
+ * immediately.
+ */
+void
+ahc_set_syncrate(struct ahc_softc *ahc, struct ahc_devinfo *devinfo,
+ const struct ahc_syncrate *syncrate, u_int period,
+ u_int offset, u_int ppr_options, u_int type, int paused)
+{
+ struct ahc_initiator_tinfo *tinfo;
+ struct ahc_tmode_tstate *tstate;
+ u_int old_period;
+ u_int old_offset;
+ u_int old_ppr;
+ int active;
+ int update_needed;
+
+ active = (type & AHC_TRANS_ACTIVE) == AHC_TRANS_ACTIVE;
+ update_needed = 0;
+
+ if (syncrate == NULL) {
+ period = 0;
+ offset = 0;
+ }
+
+ tinfo = ahc_fetch_transinfo(ahc, devinfo->channel, devinfo->our_scsiid,
+ devinfo->target, &tstate);
+
+ if ((type & AHC_TRANS_USER) != 0) {
+ tinfo->user.period = period;
+ tinfo->user.offset = offset;
+ tinfo->user.ppr_options = ppr_options;
+ }
+
+ if ((type & AHC_TRANS_GOAL) != 0) {
+ tinfo->goal.period = period;
+ tinfo->goal.offset = offset;
+ tinfo->goal.ppr_options = ppr_options;
+ }
+
+ old_period = tinfo->curr.period;
+ old_offset = tinfo->curr.offset;
+ old_ppr = tinfo->curr.ppr_options;
+
+ if ((type & AHC_TRANS_CUR) != 0
+ && (old_period != period
+ || old_offset != offset
+ || old_ppr != ppr_options)) {
+ u_int scsirate;
+
+ update_needed++;
+ scsirate = tinfo->scsirate;
+ if ((ahc->features & AHC_ULTRA2) != 0) {
+
+ scsirate &= ~(SXFR_ULTRA2|SINGLE_EDGE|ENABLE_CRC);
+ if (syncrate != NULL) {
+ scsirate |= syncrate->sxfr_u2;
+ if ((ppr_options & MSG_EXT_PPR_DT_REQ) != 0)
+ scsirate |= ENABLE_CRC;
+ else
+ scsirate |= SINGLE_EDGE;
+ }
+ } else {
+
+ scsirate &= ~(SXFR|SOFS);
+ /*
+ * Ensure Ultra mode is set properly for
+ * this target.
+ */
+ tstate->ultraenb &= ~devinfo->target_mask;
+ if (syncrate != NULL) {
+ if (syncrate->sxfr & ULTRA_SXFR) {
+ tstate->ultraenb |=
+ devinfo->target_mask;
+ }
+ scsirate |= syncrate->sxfr & SXFR;
+ scsirate |= offset & SOFS;
+ }
+ if (active) {
+ u_int sxfrctl0;
+
+ sxfrctl0 = ahc_inb(ahc, SXFRCTL0);
+ sxfrctl0 &= ~FAST20;
+ if (tstate->ultraenb & devinfo->target_mask)
+ sxfrctl0 |= FAST20;
+ ahc_outb(ahc, SXFRCTL0, sxfrctl0);
+ }
+ }
+ if (active) {
+ ahc_outb(ahc, SCSIRATE, scsirate);
+ if ((ahc->features & AHC_ULTRA2) != 0)
+ ahc_outb(ahc, SCSIOFFSET, offset);
+ }
+
+ tinfo->scsirate = scsirate;
+ tinfo->curr.period = period;
+ tinfo->curr.offset = offset;
+ tinfo->curr.ppr_options = ppr_options;
+
+ ahc_send_async(ahc, devinfo->channel, devinfo->target,
+ CAM_LUN_WILDCARD, AC_TRANSFER_NEG);
+ if (bootverbose) {
+ if (offset != 0) {
+ printk("%s: target %d synchronous at %sMHz%s, "
+ "offset = 0x%x\n", ahc_name(ahc),
+ devinfo->target, syncrate->rate,
+ (ppr_options & MSG_EXT_PPR_DT_REQ)
+ ? " DT" : "", offset);
+ } else {
+ printk("%s: target %d using "
+ "asynchronous transfers\n",
+ ahc_name(ahc), devinfo->target);
+ }
+ }
+ }
+
+ update_needed += ahc_update_neg_request(ahc, devinfo, tstate,
+ tinfo, AHC_NEG_TO_GOAL);
+
+ if (update_needed)
+ ahc_update_pending_scbs(ahc);
+}
+
+/*
+ * Update the user/goal/curr tables of wide negotiation
+ * parameters as well as, in the case of a current or active update,
+ * any data structures on the host controller. In the case of an
+ * active update, the specified target is currently talking to us on
+ * the bus, so the transfer parameter update must take effect
+ * immediately.
+ */
+void
+ahc_set_width(struct ahc_softc *ahc, struct ahc_devinfo *devinfo,
+ u_int width, u_int type, int paused)
+{
+ struct ahc_initiator_tinfo *tinfo;
+ struct ahc_tmode_tstate *tstate;
+ u_int oldwidth;
+ int active;
+ int update_needed;
+
+ active = (type & AHC_TRANS_ACTIVE) == AHC_TRANS_ACTIVE;
+ update_needed = 0;
+ tinfo = ahc_fetch_transinfo(ahc, devinfo->channel, devinfo->our_scsiid,
+ devinfo->target, &tstate);
+
+ if ((type & AHC_TRANS_USER) != 0)
+ tinfo->user.width = width;
+
+ if ((type & AHC_TRANS_GOAL) != 0)
+ tinfo->goal.width = width;
+
+ oldwidth = tinfo->curr.width;
+ if ((type & AHC_TRANS_CUR) != 0 && oldwidth != width) {
+ u_int scsirate;
+
+ update_needed++;
+ scsirate = tinfo->scsirate;
+ scsirate &= ~WIDEXFER;
+ if (width == MSG_EXT_WDTR_BUS_16_BIT)
+ scsirate |= WIDEXFER;
+
+ tinfo->scsirate = scsirate;
+
+ if (active)
+ ahc_outb(ahc, SCSIRATE, scsirate);
+
+ tinfo->curr.width = width;
+
+ ahc_send_async(ahc, devinfo->channel, devinfo->target,
+ CAM_LUN_WILDCARD, AC_TRANSFER_NEG);
+ if (bootverbose) {
+ printk("%s: target %d using %dbit transfers\n",
+ ahc_name(ahc), devinfo->target,
+ 8 * (0x01 << width));
+ }
+ }
+
+ update_needed += ahc_update_neg_request(ahc, devinfo, tstate,
+ tinfo, AHC_NEG_TO_GOAL);
+ if (update_needed)
+ ahc_update_pending_scbs(ahc);
+}
+
+/*
+ * Update the current state of tagged queuing for a given target.
+ */
+static void
+ahc_set_tags(struct ahc_softc *ahc, struct scsi_cmnd *cmd,
+ struct ahc_devinfo *devinfo, ahc_queue_alg alg)
+{
+ struct scsi_device *sdev = cmd->device;
+
+ ahc_platform_set_tags(ahc, sdev, devinfo, alg);
+ ahc_send_async(ahc, devinfo->channel, devinfo->target,
+ devinfo->lun, AC_TRANSFER_NEG);
+}
+
+/*
+ * When the transfer settings for a connection change, update any
+ * in-transit SCBs to contain the new data so the hardware will
+ * be set correctly during future (re)selections.
+ */
+static void
+ahc_update_pending_scbs(struct ahc_softc *ahc)
+{
+ struct scb *pending_scb;
+ int pending_scb_count;
+ int i;
+ int paused;
+ u_int saved_scbptr;
+
+ /*
+ * Traverse the pending SCB list and ensure that all of the
+ * SCBs there have the proper settings.
+ */
+ pending_scb_count = 0;
+ LIST_FOREACH(pending_scb, &ahc->pending_scbs, pending_links) {
+ struct ahc_devinfo devinfo;
+ struct hardware_scb *pending_hscb;
+ struct ahc_initiator_tinfo *tinfo;
+ struct ahc_tmode_tstate *tstate;
+
+ ahc_scb_devinfo(ahc, &devinfo, pending_scb);
+ tinfo = ahc_fetch_transinfo(ahc, devinfo.channel,
+ devinfo.our_scsiid,
+ devinfo.target, &tstate);
+ pending_hscb = pending_scb->hscb;
+ pending_hscb->control &= ~ULTRAENB;
+ if ((tstate->ultraenb & devinfo.target_mask) != 0)
+ pending_hscb->control |= ULTRAENB;
+ pending_hscb->scsirate = tinfo->scsirate;
+ pending_hscb->scsioffset = tinfo->curr.offset;
+ if ((tstate->auto_negotiate & devinfo.target_mask) == 0
+ && (pending_scb->flags & SCB_AUTO_NEGOTIATE) != 0) {
+ pending_scb->flags &= ~SCB_AUTO_NEGOTIATE;
+ pending_hscb->control &= ~MK_MESSAGE;
+ }
+ ahc_sync_scb(ahc, pending_scb,
+ BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
+ pending_scb_count++;
+ }
+
+ if (pending_scb_count == 0)
+ return;
+
+ if (ahc_is_paused(ahc)) {
+ paused = 1;
+ } else {
+ paused = 0;
+ ahc_pause(ahc);
+ }
+
+ saved_scbptr = ahc_inb(ahc, SCBPTR);
+ /* Ensure that the hscbs down on the card match the new information */
+ for (i = 0; i < ahc->scb_data->maxhscbs; i++) {
+ struct hardware_scb *pending_hscb;
+ u_int control;
+ u_int scb_tag;
+
+ ahc_outb(ahc, SCBPTR, i);
+ scb_tag = ahc_inb(ahc, SCB_TAG);
+ pending_scb = ahc_lookup_scb(ahc, scb_tag);
+ if (pending_scb == NULL)
+ continue;
+
+ pending_hscb = pending_scb->hscb;
+ control = ahc_inb(ahc, SCB_CONTROL);
+ control &= ~(ULTRAENB|MK_MESSAGE);
+ control |= pending_hscb->control & (ULTRAENB|MK_MESSAGE);
+ ahc_outb(ahc, SCB_CONTROL, control);
+ ahc_outb(ahc, SCB_SCSIRATE, pending_hscb->scsirate);
+ ahc_outb(ahc, SCB_SCSIOFFSET, pending_hscb->scsioffset);
+ }
+ ahc_outb(ahc, SCBPTR, saved_scbptr);
+
+ if (paused == 0)
+ ahc_unpause(ahc);
+}
+
+/**************************** Pathing Information *****************************/
+static void
+ahc_fetch_devinfo(struct ahc_softc *ahc, struct ahc_devinfo *devinfo)
+{
+ u_int saved_scsiid;
+ role_t role;
+ int our_id;
+
+ if (ahc_inb(ahc, SSTAT0) & TARGET)
+ role = ROLE_TARGET;
+ else
+ role = ROLE_INITIATOR;
+
+ if (role == ROLE_TARGET
+ && (ahc->features & AHC_MULTI_TID) != 0
+ && (ahc_inb(ahc, SEQ_FLAGS)
+ & (CMDPHASE_PENDING|TARG_CMD_PENDING|NO_DISCONNECT)) != 0) {
+ /* We were selected, so pull our id from TARGIDIN */
+ our_id = ahc_inb(ahc, TARGIDIN) & OID;
+ } else if ((ahc->features & AHC_ULTRA2) != 0)
+ our_id = ahc_inb(ahc, SCSIID_ULTRA2) & OID;
+ else
+ our_id = ahc_inb(ahc, SCSIID) & OID;
+
+ saved_scsiid = ahc_inb(ahc, SAVED_SCSIID);
+ ahc_compile_devinfo(devinfo,
+ our_id,
+ SCSIID_TARGET(ahc, saved_scsiid),
+ ahc_inb(ahc, SAVED_LUN),
+ SCSIID_CHANNEL(ahc, saved_scsiid),
+ role);
+}
+
+static const struct ahc_phase_table_entry*
+ahc_lookup_phase_entry(int phase)
+{
+ const struct ahc_phase_table_entry *entry;
+ const struct ahc_phase_table_entry *last_entry;
+
+ /*
+ * num_phases doesn't include the default entry which
+ * will be returned if the phase doesn't match.
+ */
+ last_entry = &ahc_phase_table[num_phases];
+ for (entry = ahc_phase_table; entry < last_entry; entry++) {
+ if (phase == entry->phase)
+ break;
+ }
+ return (entry);
+}
+
+void
+ahc_compile_devinfo(struct ahc_devinfo *devinfo, u_int our_id, u_int target,
+ u_int lun, char channel, role_t role)
+{
+ devinfo->our_scsiid = our_id;
+ devinfo->target = target;
+ devinfo->lun = lun;
+ devinfo->target_offset = target;
+ devinfo->channel = channel;
+ devinfo->role = role;
+ if (channel == 'B')
+ devinfo->target_offset += 8;
+ devinfo->target_mask = (0x01 << devinfo->target_offset);
+}
+
+void
+ahc_print_devinfo(struct ahc_softc *ahc, struct ahc_devinfo *devinfo)
+{
+ printk("%s:%c:%d:%d: ", ahc_name(ahc), devinfo->channel,
+ devinfo->target, devinfo->lun);
+}
+
+static void
+ahc_scb_devinfo(struct ahc_softc *ahc, struct ahc_devinfo *devinfo,
+ struct scb *scb)
+{
+ role_t role;
+ int our_id;
+
+ our_id = SCSIID_OUR_ID(scb->hscb->scsiid);
+ role = ROLE_INITIATOR;
+ if ((scb->flags & SCB_TARGET_SCB) != 0)
+ role = ROLE_TARGET;
+ ahc_compile_devinfo(devinfo, our_id, SCB_GET_TARGET(ahc, scb),
+ SCB_GET_LUN(scb), SCB_GET_CHANNEL(ahc, scb), role);
+}
+
+
+/************************ Message Phase Processing ****************************/
+static void
+ahc_assert_atn(struct ahc_softc *ahc)
+{
+ u_int scsisigo;
+
+ scsisigo = ATNO;
+ if ((ahc->features & AHC_DT) == 0)
+ scsisigo |= ahc_inb(ahc, SCSISIGI);
+ ahc_outb(ahc, SCSISIGO, scsisigo);
+}
+
+/*
+ * When an initiator transaction with the MK_MESSAGE flag either reconnects
+ * or enters the initial message out phase, we are interrupted. Fill our
+ * outgoing message buffer with the appropriate message and beging handing
+ * the message phase(s) manually.
+ */
+static void
+ahc_setup_initiator_msgout(struct ahc_softc *ahc, struct ahc_devinfo *devinfo,
+ struct scb *scb)
+{
+ /*
+ * To facilitate adding multiple messages together,
+ * each routine should increment the index and len
+ * variables instead of setting them explicitly.
+ */
+ ahc->msgout_index = 0;
+ ahc->msgout_len = 0;
+
+ if ((scb->flags & SCB_DEVICE_RESET) == 0
+ && ahc_inb(ahc, MSG_OUT) == MSG_IDENTIFYFLAG) {
+ u_int identify_msg;
+
+ identify_msg = MSG_IDENTIFYFLAG | SCB_GET_LUN(scb);
+ if ((scb->hscb->control & DISCENB) != 0)
+ identify_msg |= MSG_IDENTIFY_DISCFLAG;
+ ahc->msgout_buf[ahc->msgout_index++] = identify_msg;
+ ahc->msgout_len++;
+
+ if ((scb->hscb->control & TAG_ENB) != 0) {
+ ahc->msgout_buf[ahc->msgout_index++] =
+ scb->hscb->control & (TAG_ENB|SCB_TAG_TYPE);
+ ahc->msgout_buf[ahc->msgout_index++] = scb->hscb->tag;
+ ahc->msgout_len += 2;
+ }
+ }
+
+ if (scb->flags & SCB_DEVICE_RESET) {
+ ahc->msgout_buf[ahc->msgout_index++] = MSG_BUS_DEV_RESET;
+ ahc->msgout_len++;
+ ahc_print_path(ahc, scb);
+ printk("Bus Device Reset Message Sent\n");
+ /*
+ * Clear our selection hardware in advance of
+ * the busfree. We may have an entry in the waiting
+ * Q for this target, and we don't want to go about
+ * selecting while we handle the busfree and blow it
+ * away.
+ */
+ ahc_outb(ahc, SCSISEQ, (ahc_inb(ahc, SCSISEQ) & ~ENSELO));
+ } else if ((scb->flags & SCB_ABORT) != 0) {
+ if ((scb->hscb->control & TAG_ENB) != 0)
+ ahc->msgout_buf[ahc->msgout_index++] = MSG_ABORT_TAG;
+ else
+ ahc->msgout_buf[ahc->msgout_index++] = MSG_ABORT;
+ ahc->msgout_len++;
+ ahc_print_path(ahc, scb);
+ printk("Abort%s Message Sent\n",
+ (scb->hscb->control & TAG_ENB) != 0 ? " Tag" : "");
+ /*
+ * Clear our selection hardware in advance of
+ * the busfree. We may have an entry in the waiting
+ * Q for this target, and we don't want to go about
+ * selecting while we handle the busfree and blow it
+ * away.
+ */
+ ahc_outb(ahc, SCSISEQ, (ahc_inb(ahc, SCSISEQ) & ~ENSELO));
+ } else if ((scb->flags & (SCB_AUTO_NEGOTIATE|SCB_NEGOTIATE)) != 0) {
+ ahc_build_transfer_msg(ahc, devinfo);
+ } else {
+ printk("ahc_intr: AWAITING_MSG for an SCB that "
+ "does not have a waiting message\n");
+ printk("SCSIID = %x, target_mask = %x\n", scb->hscb->scsiid,
+ devinfo->target_mask);
+ panic("SCB = %d, SCB Control = %x, MSG_OUT = %x "
+ "SCB flags = %x", scb->hscb->tag, scb->hscb->control,
+ ahc_inb(ahc, MSG_OUT), scb->flags);
+ }
+
+ /*
+ * Clear the MK_MESSAGE flag from the SCB so we aren't
+ * asked to send this message again.
+ */
+ ahc_outb(ahc, SCB_CONTROL, ahc_inb(ahc, SCB_CONTROL) & ~MK_MESSAGE);
+ scb->hscb->control &= ~MK_MESSAGE;
+ ahc->msgout_index = 0;
+ ahc->msg_type = MSG_TYPE_INITIATOR_MSGOUT;
+}
+
+/*
+ * Build an appropriate transfer negotiation message for the
+ * currently active target.
+ */
+static void
+ahc_build_transfer_msg(struct ahc_softc *ahc, struct ahc_devinfo *devinfo)
+{
+ /*
+ * We need to initiate transfer negotiations.
+ * If our current and goal settings are identical,
+ * we want to renegotiate due to a check condition.
+ */
+ struct ahc_initiator_tinfo *tinfo;
+ struct ahc_tmode_tstate *tstate;
+ const struct ahc_syncrate *rate;
+ int dowide;
+ int dosync;
+ int doppr;
+ u_int period;
+ u_int ppr_options;
+ u_int offset;
+
+ tinfo = ahc_fetch_transinfo(ahc, devinfo->channel, devinfo->our_scsiid,
+ devinfo->target, &tstate);
+ /*
+ * Filter our period based on the current connection.
+ * If we can't perform DT transfers on this segment (not in LVD
+ * mode for instance), then our decision to issue a PPR message
+ * may change.
+ */
+ period = tinfo->goal.period;
+ offset = tinfo->goal.offset;
+ ppr_options = tinfo->goal.ppr_options;
+ /* Target initiated PPR is not allowed in the SCSI spec */
+ if (devinfo->role == ROLE_TARGET)
+ ppr_options = 0;
+ rate = ahc_devlimited_syncrate(ahc, tinfo, &period,
+ &ppr_options, devinfo->role);
+ dowide = tinfo->curr.width != tinfo->goal.width;
+ dosync = tinfo->curr.offset != offset || tinfo->curr.period != period;
+ /*
+ * Only use PPR if we have options that need it, even if the device
+ * claims to support it. There might be an expander in the way
+ * that doesn't.
+ */
+ doppr = ppr_options != 0;
+
+ if (!dowide && !dosync && !doppr) {
+ dowide = tinfo->goal.width != MSG_EXT_WDTR_BUS_8_BIT;
+ dosync = tinfo->goal.offset != 0;
+ }
+
+ if (!dowide && !dosync && !doppr) {
+ /*
+ * Force async with a WDTR message if we have a wide bus,
+ * or just issue an SDTR with a 0 offset.
+ */
+ if ((ahc->features & AHC_WIDE) != 0)
+ dowide = 1;
+ else
+ dosync = 1;
+
+ if (bootverbose) {
+ ahc_print_devinfo(ahc, devinfo);
+ printk("Ensuring async\n");
+ }
+ }
+
+ /* Target initiated PPR is not allowed in the SCSI spec */
+ if (devinfo->role == ROLE_TARGET)
+ doppr = 0;
+
+ /*
+ * Both the PPR message and SDTR message require the
+ * goal syncrate to be limited to what the target device
+ * is capable of handling (based on whether an LVD->SE
+ * expander is on the bus), so combine these two cases.
+ * Regardless, guarantee that if we are using WDTR and SDTR
+ * messages that WDTR comes first.
+ */
+ if (doppr || (dosync && !dowide)) {
+
+ offset = tinfo->goal.offset;
+ ahc_validate_offset(ahc, tinfo, rate, &offset,
+ doppr ? tinfo->goal.width
+ : tinfo->curr.width,
+ devinfo->role);
+ if (doppr) {
+ ahc_construct_ppr(ahc, devinfo, period, offset,
+ tinfo->goal.width, ppr_options);
+ } else {
+ ahc_construct_sdtr(ahc, devinfo, period, offset);
+ }
+ } else {
+ ahc_construct_wdtr(ahc, devinfo, tinfo->goal.width);
+ }
+}
+
+/*
+ * Build a synchronous negotiation message in our message
+ * buffer based on the input parameters.
+ */
+static void
+ahc_construct_sdtr(struct ahc_softc *ahc, struct ahc_devinfo *devinfo,
+ u_int period, u_int offset)
+{
+ if (offset == 0)
+ period = AHC_ASYNC_XFER_PERIOD;
+ ahc->msgout_index += spi_populate_sync_msg(
+ ahc->msgout_buf + ahc->msgout_index, period, offset);
+ ahc->msgout_len += 5;
+ if (bootverbose) {
+ printk("(%s:%c:%d:%d): Sending SDTR period %x, offset %x\n",
+ ahc_name(ahc), devinfo->channel, devinfo->target,
+ devinfo->lun, period, offset);
+ }
+}
+
+/*
+ * Build a wide negotiation message in our message
+ * buffer based on the input parameters.
+ */
+static void
+ahc_construct_wdtr(struct ahc_softc *ahc, struct ahc_devinfo *devinfo,
+ u_int bus_width)
+{
+ ahc->msgout_index += spi_populate_width_msg(
+ ahc->msgout_buf + ahc->msgout_index, bus_width);
+ ahc->msgout_len += 4;
+ if (bootverbose) {
+ printk("(%s:%c:%d:%d): Sending WDTR %x\n",
+ ahc_name(ahc), devinfo->channel, devinfo->target,
+ devinfo->lun, bus_width);
+ }
+}
+
+/*
+ * Build a parallel protocol request message in our message
+ * buffer based on the input parameters.
+ */
+static void
+ahc_construct_ppr(struct ahc_softc *ahc, struct ahc_devinfo *devinfo,
+ u_int period, u_int offset, u_int bus_width,
+ u_int ppr_options)
+{
+ if (offset == 0)
+ period = AHC_ASYNC_XFER_PERIOD;
+ ahc->msgout_index += spi_populate_ppr_msg(
+ ahc->msgout_buf + ahc->msgout_index, period, offset,
+ bus_width, ppr_options);
+ ahc->msgout_len += 8;
+ if (bootverbose) {
+ printk("(%s:%c:%d:%d): Sending PPR bus_width %x, period %x, "
+ "offset %x, ppr_options %x\n", ahc_name(ahc),
+ devinfo->channel, devinfo->target, devinfo->lun,
+ bus_width, period, offset, ppr_options);
+ }
+}
+
+/*
+ * Clear any active message state.
+ */
+static void
+ahc_clear_msg_state(struct ahc_softc *ahc)
+{
+ ahc->msgout_len = 0;
+ ahc->msgin_index = 0;
+ ahc->msg_type = MSG_TYPE_NONE;
+ if ((ahc_inb(ahc, SCSISIGI) & ATNI) != 0) {
+ /*
+ * The target didn't care to respond to our
+ * message request, so clear ATN.
+ */
+ ahc_outb(ahc, CLRSINT1, CLRATNO);
+ }
+ ahc_outb(ahc, MSG_OUT, MSG_NOOP);
+ ahc_outb(ahc, SEQ_FLAGS2,
+ ahc_inb(ahc, SEQ_FLAGS2) & ~TARGET_MSG_PENDING);
+}
+
+static void
+ahc_handle_proto_violation(struct ahc_softc *ahc)
+{
+ struct ahc_devinfo devinfo;
+ struct scb *scb;
+ u_int scbid;
+ u_int seq_flags;
+ u_int curphase;
+ u_int lastphase;
+ int found;
+
+ ahc_fetch_devinfo(ahc, &devinfo);
+ scbid = ahc_inb(ahc, SCB_TAG);
+ scb = ahc_lookup_scb(ahc, scbid);
+ seq_flags = ahc_inb(ahc, SEQ_FLAGS);
+ curphase = ahc_inb(ahc, SCSISIGI) & PHASE_MASK;
+ lastphase = ahc_inb(ahc, LASTPHASE);
+ if ((seq_flags & NOT_IDENTIFIED) != 0) {
+
+ /*
+ * The reconnecting target either did not send an
+ * identify message, or did, but we didn't find an SCB
+ * to match.
+ */
+ ahc_print_devinfo(ahc, &devinfo);
+ printk("Target did not send an IDENTIFY message. "
+ "LASTPHASE = 0x%x.\n", lastphase);
+ scb = NULL;
+ } else if (scb == NULL) {
+ /*
+ * We don't seem to have an SCB active for this
+ * transaction. Print an error and reset the bus.
+ */
+ ahc_print_devinfo(ahc, &devinfo);
+ printk("No SCB found during protocol violation\n");
+ goto proto_violation_reset;
+ } else {
+ ahc_set_transaction_status(scb, CAM_SEQUENCE_FAIL);
+ if ((seq_flags & NO_CDB_SENT) != 0) {
+ ahc_print_path(ahc, scb);
+ printk("No or incomplete CDB sent to device.\n");
+ } else if ((ahc_inb(ahc, SCB_CONTROL) & STATUS_RCVD) == 0) {
+ /*
+ * The target never bothered to provide status to
+ * us prior to completing the command. Since we don't
+ * know the disposition of this command, we must attempt
+ * to abort it. Assert ATN and prepare to send an abort
+ * message.
+ */
+ ahc_print_path(ahc, scb);
+ printk("Completed command without status.\n");
+ } else {
+ ahc_print_path(ahc, scb);
+ printk("Unknown protocol violation.\n");
+ ahc_dump_card_state(ahc);
+ }
+ }
+ if ((lastphase & ~P_DATAIN_DT) == 0
+ || lastphase == P_COMMAND) {
+proto_violation_reset:
+ /*
+ * Target either went directly to data/command
+ * phase or didn't respond to our ATN.
+ * The only safe thing to do is to blow
+ * it away with a bus reset.
+ */
+ found = ahc_reset_channel(ahc, 'A', TRUE);
+ printk("%s: Issued Channel %c Bus Reset. "
+ "%d SCBs aborted\n", ahc_name(ahc), 'A', found);
+ } else {
+ /*
+ * Leave the selection hardware off in case
+ * this abort attempt will affect yet to
+ * be sent commands.
+ */
+ ahc_outb(ahc, SCSISEQ,
+ ahc_inb(ahc, SCSISEQ) & ~ENSELO);
+ ahc_assert_atn(ahc);
+ ahc_outb(ahc, MSG_OUT, HOST_MSG);
+ if (scb == NULL) {
+ ahc_print_devinfo(ahc, &devinfo);
+ ahc->msgout_buf[0] = MSG_ABORT_TASK;
+ ahc->msgout_len = 1;
+ ahc->msgout_index = 0;
+ ahc->msg_type = MSG_TYPE_INITIATOR_MSGOUT;
+ } else {
+ ahc_print_path(ahc, scb);
+ scb->flags |= SCB_ABORT;
+ }
+ printk("Protocol violation %s. Attempting to abort.\n",
+ ahc_lookup_phase_entry(curphase)->phasemsg);
+ }
+}
+
+/*
+ * Manual message loop handler.
+ */
+static void
+ahc_handle_message_phase(struct ahc_softc *ahc)
+{
+ struct ahc_devinfo devinfo;
+ u_int bus_phase;
+ int end_session;
+
+ ahc_fetch_devinfo(ahc, &devinfo);
+ end_session = FALSE;
+ bus_phase = ahc_inb(ahc, SCSISIGI) & PHASE_MASK;
+
+reswitch:
+ switch (ahc->msg_type) {
+ case MSG_TYPE_INITIATOR_MSGOUT:
+ {
+ int lastbyte;
+ int phasemis;
+ int msgdone;
+
+ if (ahc->msgout_len == 0)
+ panic("HOST_MSG_LOOP interrupt with no active message");
+
+#ifdef AHC_DEBUG
+ if ((ahc_debug & AHC_SHOW_MESSAGES) != 0) {
+ ahc_print_devinfo(ahc, &devinfo);
+ printk("INITIATOR_MSG_OUT");
+ }
+#endif
+ phasemis = bus_phase != P_MESGOUT;
+ if (phasemis) {
+#ifdef AHC_DEBUG
+ if ((ahc_debug & AHC_SHOW_MESSAGES) != 0) {
+ printk(" PHASEMIS %s\n",
+ ahc_lookup_phase_entry(bus_phase)
+ ->phasemsg);
+ }
+#endif
+ if (bus_phase == P_MESGIN) {
+ /*
+ * Change gears and see if
+ * this messages is of interest to
+ * us or should be passed back to
+ * the sequencer.
+ */
+ ahc_outb(ahc, CLRSINT1, CLRATNO);
+ ahc->send_msg_perror = FALSE;
+ ahc->msg_type = MSG_TYPE_INITIATOR_MSGIN;
+ ahc->msgin_index = 0;
+ goto reswitch;
+ }
+ end_session = TRUE;
+ break;
+ }
+
+ if (ahc->send_msg_perror) {
+ ahc_outb(ahc, CLRSINT1, CLRATNO);
+ ahc_outb(ahc, CLRSINT1, CLRREQINIT);
+#ifdef AHC_DEBUG
+ if ((ahc_debug & AHC_SHOW_MESSAGES) != 0)
+ printk(" byte 0x%x\n", ahc->send_msg_perror);
+#endif
+ ahc_outb(ahc, SCSIDATL, MSG_PARITY_ERROR);
+ break;
+ }
+
+ msgdone = ahc->msgout_index == ahc->msgout_len;
+ if (msgdone) {
+ /*
+ * The target has requested a retry.
+ * Re-assert ATN, reset our message index to
+ * 0, and try again.
+ */
+ ahc->msgout_index = 0;
+ ahc_assert_atn(ahc);
+ }
+
+ lastbyte = ahc->msgout_index == (ahc->msgout_len - 1);
+ if (lastbyte) {
+ /* Last byte is signified by dropping ATN */
+ ahc_outb(ahc, CLRSINT1, CLRATNO);
+ }
+
+ /*
+ * Clear our interrupt status and present
+ * the next byte on the bus.
+ */
+ ahc_outb(ahc, CLRSINT1, CLRREQINIT);
+#ifdef AHC_DEBUG
+ if ((ahc_debug & AHC_SHOW_MESSAGES) != 0)
+ printk(" byte 0x%x\n",
+ ahc->msgout_buf[ahc->msgout_index]);
+#endif
+ ahc_outb(ahc, SCSIDATL, ahc->msgout_buf[ahc->msgout_index++]);
+ break;
+ }
+ case MSG_TYPE_INITIATOR_MSGIN:
+ {
+ int phasemis;
+ int message_done;
+
+#ifdef AHC_DEBUG
+ if ((ahc_debug & AHC_SHOW_MESSAGES) != 0) {
+ ahc_print_devinfo(ahc, &devinfo);
+ printk("INITIATOR_MSG_IN");
+ }
+#endif
+ phasemis = bus_phase != P_MESGIN;
+ if (phasemis) {
+#ifdef AHC_DEBUG
+ if ((ahc_debug & AHC_SHOW_MESSAGES) != 0) {
+ printk(" PHASEMIS %s\n",
+ ahc_lookup_phase_entry(bus_phase)
+ ->phasemsg);
+ }
+#endif
+ ahc->msgin_index = 0;
+ if (bus_phase == P_MESGOUT
+ && (ahc->send_msg_perror == TRUE
+ || (ahc->msgout_len != 0
+ && ahc->msgout_index == 0))) {
+ ahc->msg_type = MSG_TYPE_INITIATOR_MSGOUT;
+ goto reswitch;
+ }
+ end_session = TRUE;
+ break;
+ }
+
+ /* Pull the byte in without acking it */
+ ahc->msgin_buf[ahc->msgin_index] = ahc_inb(ahc, SCSIBUSL);
+#ifdef AHC_DEBUG
+ if ((ahc_debug & AHC_SHOW_MESSAGES) != 0)
+ printk(" byte 0x%x\n",
+ ahc->msgin_buf[ahc->msgin_index]);
+#endif
+
+ message_done = ahc_parse_msg(ahc, &devinfo);
+
+ if (message_done) {
+ /*
+ * Clear our incoming message buffer in case there
+ * is another message following this one.
+ */
+ ahc->msgin_index = 0;
+
+ /*
+ * If this message illicited a response,
+ * assert ATN so the target takes us to the
+ * message out phase.
+ */
+ if (ahc->msgout_len != 0) {
+#ifdef AHC_DEBUG
+ if ((ahc_debug & AHC_SHOW_MESSAGES) != 0) {
+ ahc_print_devinfo(ahc, &devinfo);
+ printk("Asserting ATN for response\n");
+ }
+#endif
+ ahc_assert_atn(ahc);
+ }
+ } else
+ ahc->msgin_index++;
+
+ if (message_done == MSGLOOP_TERMINATED) {
+ end_session = TRUE;
+ } else {
+ /* Ack the byte */
+ ahc_outb(ahc, CLRSINT1, CLRREQINIT);
+ ahc_inb(ahc, SCSIDATL);
+ }
+ break;
+ }
+ case MSG_TYPE_TARGET_MSGIN:
+ {
+ int msgdone;
+ int msgout_request;
+
+ if (ahc->msgout_len == 0)
+ panic("Target MSGIN with no active message");
+
+ /*
+ * If we interrupted a mesgout session, the initiator
+ * will not know this until our first REQ. So, we
+ * only honor mesgout requests after we've sent our
+ * first byte.
+ */
+ if ((ahc_inb(ahc, SCSISIGI) & ATNI) != 0
+ && ahc->msgout_index > 0)
+ msgout_request = TRUE;
+ else
+ msgout_request = FALSE;
+
+ if (msgout_request) {
+
+ /*
+ * Change gears and see if
+ * this messages is of interest to
+ * us or should be passed back to
+ * the sequencer.
+ */
+ ahc->msg_type = MSG_TYPE_TARGET_MSGOUT;
+ ahc_outb(ahc, SCSISIGO, P_MESGOUT | BSYO);
+ ahc->msgin_index = 0;
+ /* Dummy read to REQ for first byte */
+ ahc_inb(ahc, SCSIDATL);
+ ahc_outb(ahc, SXFRCTL0,
+ ahc_inb(ahc, SXFRCTL0) | SPIOEN);
+ break;
+ }
+
+ msgdone = ahc->msgout_index == ahc->msgout_len;
+ if (msgdone) {
+ ahc_outb(ahc, SXFRCTL0,
+ ahc_inb(ahc, SXFRCTL0) & ~SPIOEN);
+ end_session = TRUE;
+ break;
+ }
+
+ /*
+ * Present the next byte on the bus.
+ */
+ ahc_outb(ahc, SXFRCTL0, ahc_inb(ahc, SXFRCTL0) | SPIOEN);
+ ahc_outb(ahc, SCSIDATL, ahc->msgout_buf[ahc->msgout_index++]);
+ break;
+ }
+ case MSG_TYPE_TARGET_MSGOUT:
+ {
+ int lastbyte;
+ int msgdone;
+
+ /*
+ * The initiator signals that this is
+ * the last byte by dropping ATN.
+ */
+ lastbyte = (ahc_inb(ahc, SCSISIGI) & ATNI) == 0;
+
+ /*
+ * Read the latched byte, but turn off SPIOEN first
+ * so that we don't inadvertently cause a REQ for the
+ * next byte.
+ */
+ ahc_outb(ahc, SXFRCTL0, ahc_inb(ahc, SXFRCTL0) & ~SPIOEN);
+ ahc->msgin_buf[ahc->msgin_index] = ahc_inb(ahc, SCSIDATL);
+ msgdone = ahc_parse_msg(ahc, &devinfo);
+ if (msgdone == MSGLOOP_TERMINATED) {
+ /*
+ * The message is *really* done in that it caused
+ * us to go to bus free. The sequencer has already
+ * been reset at this point, so pull the ejection
+ * handle.
+ */
+ return;
+ }
+
+ ahc->msgin_index++;
+
+ /*
+ * XXX Read spec about initiator dropping ATN too soon
+ * and use msgdone to detect it.
+ */
+ if (msgdone == MSGLOOP_MSGCOMPLETE) {
+ ahc->msgin_index = 0;
+
+ /*
+ * If this message illicited a response, transition
+ * to the Message in phase and send it.
+ */
+ if (ahc->msgout_len != 0) {
+ ahc_outb(ahc, SCSISIGO, P_MESGIN | BSYO);
+ ahc_outb(ahc, SXFRCTL0,
+ ahc_inb(ahc, SXFRCTL0) | SPIOEN);
+ ahc->msg_type = MSG_TYPE_TARGET_MSGIN;
+ ahc->msgin_index = 0;
+ break;
+ }
+ }
+
+ if (lastbyte)
+ end_session = TRUE;
+ else {
+ /* Ask for the next byte. */
+ ahc_outb(ahc, SXFRCTL0,
+ ahc_inb(ahc, SXFRCTL0) | SPIOEN);
+ }
+
+ break;
+ }
+ default:
+ panic("Unknown REQINIT message type");
+ }
+
+ if (end_session) {
+ ahc_clear_msg_state(ahc);
+ ahc_outb(ahc, RETURN_1, EXIT_MSG_LOOP);
+ } else
+ ahc_outb(ahc, RETURN_1, CONT_MSG_LOOP);
+}
+
+/*
+ * See if we sent a particular extended message to the target.
+ * If "full" is true, return true only if the target saw the full
+ * message. If "full" is false, return true if the target saw at
+ * least the first byte of the message.
+ */
+static int
+ahc_sent_msg(struct ahc_softc *ahc, ahc_msgtype type, u_int msgval, int full)
+{
+ int found;
+ u_int index;
+
+ found = FALSE;
+ index = 0;
+
+ while (index < ahc->msgout_len) {
+ if (ahc->msgout_buf[index] == MSG_EXTENDED) {
+ u_int end_index;
+
+ end_index = index + 1 + ahc->msgout_buf[index + 1];
+ if (ahc->msgout_buf[index+2] == msgval
+ && type == AHCMSG_EXT) {
+
+ if (full) {
+ if (ahc->msgout_index > end_index)
+ found = TRUE;
+ } else if (ahc->msgout_index > index)
+ found = TRUE;
+ }
+ index = end_index;
+ } else if (ahc->msgout_buf[index] >= MSG_SIMPLE_TASK
+ && ahc->msgout_buf[index] <= MSG_IGN_WIDE_RESIDUE) {
+
+ /* Skip tag type and tag id or residue param*/
+ index += 2;
+ } else {
+ /* Single byte message */
+ if (type == AHCMSG_1B
+ && ahc->msgout_buf[index] == msgval
+ && ahc->msgout_index > index)
+ found = TRUE;
+ index++;
+ }
+
+ if (found)
+ break;
+ }
+ return (found);
+}
+
+/*
+ * Wait for a complete incoming message, parse it, and respond accordingly.
+ */
+static int
+ahc_parse_msg(struct ahc_softc *ahc, struct ahc_devinfo *devinfo)
+{
+ struct ahc_initiator_tinfo *tinfo;
+ struct ahc_tmode_tstate *tstate;
+ int reject;
+ int done;
+ int response;
+ u_int targ_scsirate;
+
+ done = MSGLOOP_IN_PROG;
+ response = FALSE;
+ reject = FALSE;
+ tinfo = ahc_fetch_transinfo(ahc, devinfo->channel, devinfo->our_scsiid,
+ devinfo->target, &tstate);
+ targ_scsirate = tinfo->scsirate;
+
+ /*
+ * Parse as much of the message as is available,
+ * rejecting it if we don't support it. When
+ * the entire message is available and has been
+ * handled, return MSGLOOP_MSGCOMPLETE, indicating
+ * that we have parsed an entire message.
+ *
+ * In the case of extended messages, we accept the length
+ * byte outright and perform more checking once we know the
+ * extended message type.
+ */
+ switch (ahc->msgin_buf[0]) {
+ case MSG_DISCONNECT:
+ case MSG_SAVEDATAPOINTER:
+ case MSG_CMDCOMPLETE:
+ case MSG_RESTOREPOINTERS:
+ case MSG_IGN_WIDE_RESIDUE:
+ /*
+ * End our message loop as these are messages
+ * the sequencer handles on its own.
+ */
+ done = MSGLOOP_TERMINATED;
+ break;
+ case MSG_MESSAGE_REJECT:
+ response = ahc_handle_msg_reject(ahc, devinfo);
+ /* FALLTHROUGH */
+ case MSG_NOOP:
+ done = MSGLOOP_MSGCOMPLETE;
+ break;
+ case MSG_EXTENDED:
+ {
+ /* Wait for enough of the message to begin validation */
+ if (ahc->msgin_index < 2)
+ break;
+ switch (ahc->msgin_buf[2]) {
+ case MSG_EXT_SDTR:
+ {
+ const struct ahc_syncrate *syncrate;
+ u_int period;
+ u_int ppr_options;
+ u_int offset;
+ u_int saved_offset;
+
+ if (ahc->msgin_buf[1] != MSG_EXT_SDTR_LEN) {
+ reject = TRUE;
+ break;
+ }
+
+ /*
+ * Wait until we have both args before validating
+ * and acting on this message.
+ *
+ * Add one to MSG_EXT_SDTR_LEN to account for
+ * the extended message preamble.
+ */
+ if (ahc->msgin_index < (MSG_EXT_SDTR_LEN + 1))
+ break;
+
+ period = ahc->msgin_buf[3];
+ ppr_options = 0;
+ saved_offset = offset = ahc->msgin_buf[4];
+ syncrate = ahc_devlimited_syncrate(ahc, tinfo, &period,
+ &ppr_options,
+ devinfo->role);
+ ahc_validate_offset(ahc, tinfo, syncrate, &offset,
+ targ_scsirate & WIDEXFER,
+ devinfo->role);
+ if (bootverbose) {
+ printk("(%s:%c:%d:%d): Received "
+ "SDTR period %x, offset %x\n\t"
+ "Filtered to period %x, offset %x\n",
+ ahc_name(ahc), devinfo->channel,
+ devinfo->target, devinfo->lun,
+ ahc->msgin_buf[3], saved_offset,
+ period, offset);
+ }
+ ahc_set_syncrate(ahc, devinfo,
+ syncrate, period,
+ offset, ppr_options,
+ AHC_TRANS_ACTIVE|AHC_TRANS_GOAL,
+ /*paused*/TRUE);
+
+ /*
+ * See if we initiated Sync Negotiation
+ * and didn't have to fall down to async
+ * transfers.
+ */
+ if (ahc_sent_msg(ahc, AHCMSG_EXT, MSG_EXT_SDTR, TRUE)) {
+ /* We started it */
+ if (saved_offset != offset) {
+ /* Went too low - force async */
+ reject = TRUE;
+ }
+ } else {
+ /*
+ * Send our own SDTR in reply
+ */
+ if (bootverbose
+ && devinfo->role == ROLE_INITIATOR) {
+ printk("(%s:%c:%d:%d): Target "
+ "Initiated SDTR\n",
+ ahc_name(ahc), devinfo->channel,
+ devinfo->target, devinfo->lun);
+ }
+ ahc->msgout_index = 0;
+ ahc->msgout_len = 0;
+ ahc_construct_sdtr(ahc, devinfo,
+ period, offset);
+ ahc->msgout_index = 0;
+ response = TRUE;
+ }
+ done = MSGLOOP_MSGCOMPLETE;
+ break;
+ }
+ case MSG_EXT_WDTR:
+ {
+ u_int bus_width;
+ u_int saved_width;
+ u_int sending_reply;
+
+ sending_reply = FALSE;
+ if (ahc->msgin_buf[1] != MSG_EXT_WDTR_LEN) {
+ reject = TRUE;
+ break;
+ }
+
+ /*
+ * Wait until we have our arg before validating
+ * and acting on this message.
+ *
+ * Add one to MSG_EXT_WDTR_LEN to account for
+ * the extended message preamble.
+ */
+ if (ahc->msgin_index < (MSG_EXT_WDTR_LEN + 1))
+ break;
+
+ bus_width = ahc->msgin_buf[3];
+ saved_width = bus_width;
+ ahc_validate_width(ahc, tinfo, &bus_width,
+ devinfo->role);
+ if (bootverbose) {
+ printk("(%s:%c:%d:%d): Received WDTR "
+ "%x filtered to %x\n",
+ ahc_name(ahc), devinfo->channel,
+ devinfo->target, devinfo->lun,
+ saved_width, bus_width);
+ }
+
+ if (ahc_sent_msg(ahc, AHCMSG_EXT, MSG_EXT_WDTR, TRUE)) {
+ /*
+ * Don't send a WDTR back to the
+ * target, since we asked first.
+ * If the width went higher than our
+ * request, reject it.
+ */
+ if (saved_width > bus_width) {
+ reject = TRUE;
+ printk("(%s:%c:%d:%d): requested %dBit "
+ "transfers. Rejecting...\n",
+ ahc_name(ahc), devinfo->channel,
+ devinfo->target, devinfo->lun,
+ 8 * (0x01 << bus_width));
+ bus_width = 0;
+ }
+ } else {
+ /*
+ * Send our own WDTR in reply
+ */
+ if (bootverbose
+ && devinfo->role == ROLE_INITIATOR) {
+ printk("(%s:%c:%d:%d): Target "
+ "Initiated WDTR\n",
+ ahc_name(ahc), devinfo->channel,
+ devinfo->target, devinfo->lun);
+ }
+ ahc->msgout_index = 0;
+ ahc->msgout_len = 0;
+ ahc_construct_wdtr(ahc, devinfo, bus_width);
+ ahc->msgout_index = 0;
+ response = TRUE;
+ sending_reply = TRUE;
+ }
+ /*
+ * After a wide message, we are async, but
+ * some devices don't seem to honor this portion
+ * of the spec. Force a renegotiation of the
+ * sync component of our transfer agreement even
+ * if our goal is async. By updating our width
+ * after forcing the negotiation, we avoid
+ * renegotiating for width.
+ */
+ ahc_update_neg_request(ahc, devinfo, tstate,
+ tinfo, AHC_NEG_ALWAYS);
+ ahc_set_width(ahc, devinfo, bus_width,
+ AHC_TRANS_ACTIVE|AHC_TRANS_GOAL,
+ /*paused*/TRUE);
+ if (sending_reply == FALSE && reject == FALSE) {
+
+ /*
+ * We will always have an SDTR to send.
+ */
+ ahc->msgout_index = 0;
+ ahc->msgout_len = 0;
+ ahc_build_transfer_msg(ahc, devinfo);
+ ahc->msgout_index = 0;
+ response = TRUE;
+ }
+ done = MSGLOOP_MSGCOMPLETE;
+ break;
+ }
+ case MSG_EXT_PPR:
+ {
+ const struct ahc_syncrate *syncrate;
+ u_int period;
+ u_int offset;
+ u_int bus_width;
+ u_int ppr_options;
+ u_int saved_width;
+ u_int saved_offset;
+ u_int saved_ppr_options;
+
+ if (ahc->msgin_buf[1] != MSG_EXT_PPR_LEN) {
+ reject = TRUE;
+ break;
+ }
+
+ /*
+ * Wait until we have all args before validating
+ * and acting on this message.
+ *
+ * Add one to MSG_EXT_PPR_LEN to account for
+ * the extended message preamble.
+ */
+ if (ahc->msgin_index < (MSG_EXT_PPR_LEN + 1))
+ break;
+
+ period = ahc->msgin_buf[3];
+ offset = ahc->msgin_buf[5];
+ bus_width = ahc->msgin_buf[6];
+ saved_width = bus_width;
+ ppr_options = ahc->msgin_buf[7];
+ /*
+ * According to the spec, a DT only
+ * period factor with no DT option
+ * set implies async.
+ */
+ if ((ppr_options & MSG_EXT_PPR_DT_REQ) == 0
+ && period == 9)
+ offset = 0;
+ saved_ppr_options = ppr_options;
+ saved_offset = offset;
+
+ /*
+ * Mask out any options we don't support
+ * on any controller. Transfer options are
+ * only available if we are negotiating wide.
+ */
+ ppr_options &= MSG_EXT_PPR_DT_REQ;
+ if (bus_width == 0)
+ ppr_options = 0;
+
+ ahc_validate_width(ahc, tinfo, &bus_width,
+ devinfo->role);
+ syncrate = ahc_devlimited_syncrate(ahc, tinfo, &period,
+ &ppr_options,
+ devinfo->role);
+ ahc_validate_offset(ahc, tinfo, syncrate,
+ &offset, bus_width,
+ devinfo->role);
+
+ if (ahc_sent_msg(ahc, AHCMSG_EXT, MSG_EXT_PPR, TRUE)) {
+ /*
+ * If we are unable to do any of the
+ * requested options (we went too low),
+ * then we'll have to reject the message.
+ */
+ if (saved_width > bus_width
+ || saved_offset != offset
+ || saved_ppr_options != ppr_options) {
+ reject = TRUE;
+ period = 0;
+ offset = 0;
+ bus_width = 0;
+ ppr_options = 0;
+ syncrate = NULL;
+ }
+ } else {
+ if (devinfo->role != ROLE_TARGET)
+ printk("(%s:%c:%d:%d): Target "
+ "Initiated PPR\n",
+ ahc_name(ahc), devinfo->channel,
+ devinfo->target, devinfo->lun);
+ else
+ printk("(%s:%c:%d:%d): Initiator "
+ "Initiated PPR\n",
+ ahc_name(ahc), devinfo->channel,
+ devinfo->target, devinfo->lun);
+ ahc->msgout_index = 0;
+ ahc->msgout_len = 0;
+ ahc_construct_ppr(ahc, devinfo, period, offset,
+ bus_width, ppr_options);
+ ahc->msgout_index = 0;
+ response = TRUE;
+ }
+ if (bootverbose) {
+ printk("(%s:%c:%d:%d): Received PPR width %x, "
+ "period %x, offset %x,options %x\n"
+ "\tFiltered to width %x, period %x, "
+ "offset %x, options %x\n",
+ ahc_name(ahc), devinfo->channel,
+ devinfo->target, devinfo->lun,
+ saved_width, ahc->msgin_buf[3],
+ saved_offset, saved_ppr_options,
+ bus_width, period, offset, ppr_options);
+ }
+ ahc_set_width(ahc, devinfo, bus_width,
+ AHC_TRANS_ACTIVE|AHC_TRANS_GOAL,
+ /*paused*/TRUE);
+ ahc_set_syncrate(ahc, devinfo,
+ syncrate, period,
+ offset, ppr_options,
+ AHC_TRANS_ACTIVE|AHC_TRANS_GOAL,
+ /*paused*/TRUE);
+ done = MSGLOOP_MSGCOMPLETE;
+ break;
+ }
+ default:
+ /* Unknown extended message. Reject it. */
+ reject = TRUE;
+ break;
+ }
+ break;
+ }
+#ifdef AHC_TARGET_MODE
+ case MSG_BUS_DEV_RESET:
+ ahc_handle_devreset(ahc, devinfo,
+ CAM_BDR_SENT,
+ "Bus Device Reset Received",
+ /*verbose_level*/0);
+ ahc_restart(ahc);
+ done = MSGLOOP_TERMINATED;
+ break;
+ case MSG_ABORT_TAG:
+ case MSG_ABORT:
+ case MSG_CLEAR_QUEUE:
+ {
+ int tag;
+
+ /* Target mode messages */
+ if (devinfo->role != ROLE_TARGET) {
+ reject = TRUE;
+ break;
+ }
+ tag = SCB_LIST_NULL;
+ if (ahc->msgin_buf[0] == MSG_ABORT_TAG)
+ tag = ahc_inb(ahc, INITIATOR_TAG);
+ ahc_abort_scbs(ahc, devinfo->target, devinfo->channel,
+ devinfo->lun, tag, ROLE_TARGET,
+ CAM_REQ_ABORTED);
+
+ tstate = ahc->enabled_targets[devinfo->our_scsiid];
+ if (tstate != NULL) {
+ struct ahc_tmode_lstate* lstate;
+
+ lstate = tstate->enabled_luns[devinfo->lun];
+ if (lstate != NULL) {
+ ahc_queue_lstate_event(ahc, lstate,
+ devinfo->our_scsiid,
+ ahc->msgin_buf[0],
+ /*arg*/tag);
+ ahc_send_lstate_events(ahc, lstate);
+ }
+ }
+ ahc_restart(ahc);
+ done = MSGLOOP_TERMINATED;
+ break;
+ }
+#endif
+ case MSG_TERM_IO_PROC:
+ default:
+ reject = TRUE;
+ break;
+ }
+
+ if (reject) {
+ /*
+ * Setup to reject the message.
+ */
+ ahc->msgout_index = 0;
+ ahc->msgout_len = 1;
+ ahc->msgout_buf[0] = MSG_MESSAGE_REJECT;
+ done = MSGLOOP_MSGCOMPLETE;
+ response = TRUE;
+ }
+
+ if (done != MSGLOOP_IN_PROG && !response)
+ /* Clear the outgoing message buffer */
+ ahc->msgout_len = 0;
+
+ return (done);
+}
+
+/*
+ * Process a message reject message.
+ */
+static int
+ahc_handle_msg_reject(struct ahc_softc *ahc, struct ahc_devinfo *devinfo)
+{
+ /*
+ * What we care about here is if we had an
+ * outstanding SDTR or WDTR message for this
+ * target. If we did, this is a signal that
+ * the target is refusing negotiation.
+ */
+ struct scb *scb;
+ struct ahc_initiator_tinfo *tinfo;
+ struct ahc_tmode_tstate *tstate;
+ u_int scb_index;
+ u_int last_msg;
+ int response = 0;
+
+ scb_index = ahc_inb(ahc, SCB_TAG);
+ scb = ahc_lookup_scb(ahc, scb_index);
+ tinfo = ahc_fetch_transinfo(ahc, devinfo->channel,
+ devinfo->our_scsiid,
+ devinfo->target, &tstate);
+ /* Might be necessary */
+ last_msg = ahc_inb(ahc, LAST_MSG);
+
+ if (ahc_sent_msg(ahc, AHCMSG_EXT, MSG_EXT_PPR, /*full*/FALSE)) {
+ /*
+ * Target does not support the PPR message.
+ * Attempt to negotiate SPI-2 style.
+ */
+ if (bootverbose) {
+ printk("(%s:%c:%d:%d): PPR Rejected. "
+ "Trying WDTR/SDTR\n",
+ ahc_name(ahc), devinfo->channel,
+ devinfo->target, devinfo->lun);
+ }
+ tinfo->goal.ppr_options = 0;
+ tinfo->curr.transport_version = 2;
+ tinfo->goal.transport_version = 2;
+ ahc->msgout_index = 0;
+ ahc->msgout_len = 0;
+ ahc_build_transfer_msg(ahc, devinfo);
+ ahc->msgout_index = 0;
+ response = 1;
+ } else if (ahc_sent_msg(ahc, AHCMSG_EXT, MSG_EXT_WDTR, /*full*/FALSE)) {
+
+ /* note 8bit xfers */
+ printk("(%s:%c:%d:%d): refuses WIDE negotiation. Using "
+ "8bit transfers\n", ahc_name(ahc),
+ devinfo->channel, devinfo->target, devinfo->lun);
+ ahc_set_width(ahc, devinfo, MSG_EXT_WDTR_BUS_8_BIT,
+ AHC_TRANS_ACTIVE|AHC_TRANS_GOAL,
+ /*paused*/TRUE);
+ /*
+ * No need to clear the sync rate. If the target
+ * did not accept the command, our syncrate is
+ * unaffected. If the target started the negotiation,
+ * but rejected our response, we already cleared the
+ * sync rate before sending our WDTR.
+ */
+ if (tinfo->goal.offset != tinfo->curr.offset) {
+
+ /* Start the sync negotiation */
+ ahc->msgout_index = 0;
+ ahc->msgout_len = 0;
+ ahc_build_transfer_msg(ahc, devinfo);
+ ahc->msgout_index = 0;
+ response = 1;
+ }
+ } else if (ahc_sent_msg(ahc, AHCMSG_EXT, MSG_EXT_SDTR, /*full*/FALSE)) {
+ /* note asynch xfers and clear flag */
+ ahc_set_syncrate(ahc, devinfo, /*syncrate*/NULL, /*period*/0,
+ /*offset*/0, /*ppr_options*/0,
+ AHC_TRANS_ACTIVE|AHC_TRANS_GOAL,
+ /*paused*/TRUE);
+ printk("(%s:%c:%d:%d): refuses synchronous negotiation. "
+ "Using asynchronous transfers\n",
+ ahc_name(ahc), devinfo->channel,
+ devinfo->target, devinfo->lun);
+ } else if ((scb->hscb->control & MSG_SIMPLE_TASK) != 0) {
+ int tag_type;
+ int mask;
+
+ tag_type = (scb->hscb->control & MSG_SIMPLE_TASK);
+
+ if (tag_type == MSG_SIMPLE_TASK) {
+ printk("(%s:%c:%d:%d): refuses tagged commands. "
+ "Performing non-tagged I/O\n", ahc_name(ahc),
+ devinfo->channel, devinfo->target, devinfo->lun);
+ ahc_set_tags(ahc, scb->io_ctx, devinfo, AHC_QUEUE_NONE);
+ mask = ~0x23;
+ } else {
+ printk("(%s:%c:%d:%d): refuses %s tagged commands. "
+ "Performing simple queue tagged I/O only\n",
+ ahc_name(ahc), devinfo->channel, devinfo->target,
+ devinfo->lun, tag_type == MSG_ORDERED_TASK
+ ? "ordered" : "head of queue");
+ ahc_set_tags(ahc, scb->io_ctx, devinfo, AHC_QUEUE_BASIC);
+ mask = ~0x03;
+ }
+
+ /*
+ * Resend the identify for this CCB as the target
+ * may believe that the selection is invalid otherwise.
+ */
+ ahc_outb(ahc, SCB_CONTROL,
+ ahc_inb(ahc, SCB_CONTROL) & mask);
+ scb->hscb->control &= mask;
+ ahc_set_transaction_tag(scb, /*enabled*/FALSE,
+ /*type*/MSG_SIMPLE_TASK);
+ ahc_outb(ahc, MSG_OUT, MSG_IDENTIFYFLAG);
+ ahc_assert_atn(ahc);
+
+ /*
+ * This transaction is now at the head of
+ * the untagged queue for this target.
+ */
+ if ((ahc->flags & AHC_SCB_BTT) == 0) {
+ struct scb_tailq *untagged_q;
+
+ untagged_q =
+ &(ahc->untagged_queues[devinfo->target_offset]);
+ TAILQ_INSERT_HEAD(untagged_q, scb, links.tqe);
+ scb->flags |= SCB_UNTAGGEDQ;
+ }
+ ahc_busy_tcl(ahc, BUILD_TCL(scb->hscb->scsiid, devinfo->lun),
+ scb->hscb->tag);
+
+ /*
+ * Requeue all tagged commands for this target
+ * currently in our possession so they can be
+ * converted to untagged commands.
+ */
+ ahc_search_qinfifo(ahc, SCB_GET_TARGET(ahc, scb),
+ SCB_GET_CHANNEL(ahc, scb),
+ SCB_GET_LUN(scb), /*tag*/SCB_LIST_NULL,
+ ROLE_INITIATOR, CAM_REQUEUE_REQ,
+ SEARCH_COMPLETE);
+ } else {
+ /*
+ * Otherwise, we ignore it.
+ */
+ printk("%s:%c:%d: Message reject for %x -- ignored\n",
+ ahc_name(ahc), devinfo->channel, devinfo->target,
+ last_msg);
+ }
+ return (response);
+}
+
+/*
+ * Process an ingnore wide residue message.
+ */
+static void
+ahc_handle_ign_wide_residue(struct ahc_softc *ahc, struct ahc_devinfo *devinfo)
+{
+ u_int scb_index;
+ struct scb *scb;
+
+ scb_index = ahc_inb(ahc, SCB_TAG);
+ scb = ahc_lookup_scb(ahc, scb_index);
+ /*
+ * XXX Actually check data direction in the sequencer?
+ * Perhaps add datadir to some spare bits in the hscb?
+ */
+ if ((ahc_inb(ahc, SEQ_FLAGS) & DPHASE) == 0
+ || ahc_get_transfer_dir(scb) != CAM_DIR_IN) {
+ /*
+ * Ignore the message if we haven't
+ * seen an appropriate data phase yet.
+ */
+ } else {
+ /*
+ * If the residual occurred on the last
+ * transfer and the transfer request was
+ * expected to end on an odd count, do
+ * nothing. Otherwise, subtract a byte
+ * and update the residual count accordingly.
+ */
+ uint32_t sgptr;
+
+ sgptr = ahc_inb(ahc, SCB_RESIDUAL_SGPTR);
+ if ((sgptr & SG_LIST_NULL) != 0
+ && (ahc_inb(ahc, SCB_LUN) & SCB_XFERLEN_ODD) != 0) {
+ /*
+ * If the residual occurred on the last
+ * transfer and the transfer request was
+ * expected to end on an odd count, do
+ * nothing.
+ */
+ } else {
+ struct ahc_dma_seg *sg;
+ uint32_t data_cnt;
+ uint32_t data_addr;
+ uint32_t sglen;
+
+ /* Pull in all of the sgptr */
+ sgptr = ahc_inl(ahc, SCB_RESIDUAL_SGPTR);
+ data_cnt = ahc_inl(ahc, SCB_RESIDUAL_DATACNT);
+
+ if ((sgptr & SG_LIST_NULL) != 0) {
+ /*
+ * The residual data count is not updated
+ * for the command run to completion case.
+ * Explicitly zero the count.
+ */
+ data_cnt &= ~AHC_SG_LEN_MASK;
+ }
+
+ data_addr = ahc_inl(ahc, SHADDR);
+
+ data_cnt += 1;
+ data_addr -= 1;
+ sgptr &= SG_PTR_MASK;
+
+ sg = ahc_sg_bus_to_virt(scb, sgptr);
+
+ /*
+ * The residual sg ptr points to the next S/G
+ * to load so we must go back one.
+ */
+ sg--;
+ sglen = ahc_le32toh(sg->len) & AHC_SG_LEN_MASK;
+ if (sg != scb->sg_list
+ && sglen < (data_cnt & AHC_SG_LEN_MASK)) {
+
+ sg--;
+ sglen = ahc_le32toh(sg->len);
+ /*
+ * Preserve High Address and SG_LIST bits
+ * while setting the count to 1.
+ */
+ data_cnt = 1 | (sglen & (~AHC_SG_LEN_MASK));
+ data_addr = ahc_le32toh(sg->addr)
+ + (sglen & AHC_SG_LEN_MASK) - 1;
+
+ /*
+ * Increment sg so it points to the
+ * "next" sg.
+ */
+ sg++;
+ sgptr = ahc_sg_virt_to_bus(scb, sg);
+ }
+ ahc_outl(ahc, SCB_RESIDUAL_SGPTR, sgptr);
+ ahc_outl(ahc, SCB_RESIDUAL_DATACNT, data_cnt);
+ /*
+ * Toggle the "oddness" of the transfer length
+ * to handle this mid-transfer ignore wide
+ * residue. This ensures that the oddness is
+ * correct for subsequent data transfers.
+ */
+ ahc_outb(ahc, SCB_LUN,
+ ahc_inb(ahc, SCB_LUN) ^ SCB_XFERLEN_ODD);
+ }
+ }
+}
+
+
+/*
+ * Reinitialize the data pointers for the active transfer
+ * based on its current residual.
+ */
+static void
+ahc_reinitialize_dataptrs(struct ahc_softc *ahc)
+{
+ struct scb *scb;
+ struct ahc_dma_seg *sg;
+ u_int scb_index;
+ uint32_t sgptr;
+ uint32_t resid;
+ uint32_t dataptr;
+
+ scb_index = ahc_inb(ahc, SCB_TAG);
+ scb = ahc_lookup_scb(ahc, scb_index);
+ sgptr = (ahc_inb(ahc, SCB_RESIDUAL_SGPTR + 3) << 24)
+ | (ahc_inb(ahc, SCB_RESIDUAL_SGPTR + 2) << 16)
+ | (ahc_inb(ahc, SCB_RESIDUAL_SGPTR + 1) << 8)
+ | ahc_inb(ahc, SCB_RESIDUAL_SGPTR);
+
+ sgptr &= SG_PTR_MASK;
+ sg = ahc_sg_bus_to_virt(scb, sgptr);
+
+ /* The residual sg_ptr always points to the next sg */
+ sg--;
+
+ resid = (ahc_inb(ahc, SCB_RESIDUAL_DATACNT + 2) << 16)
+ | (ahc_inb(ahc, SCB_RESIDUAL_DATACNT + 1) << 8)
+ | ahc_inb(ahc, SCB_RESIDUAL_DATACNT);
+
+ dataptr = ahc_le32toh(sg->addr)
+ + (ahc_le32toh(sg->len) & AHC_SG_LEN_MASK)
+ - resid;
+ if ((ahc->flags & AHC_39BIT_ADDRESSING) != 0) {
+ u_int dscommand1;
+
+ dscommand1 = ahc_inb(ahc, DSCOMMAND1);
+ ahc_outb(ahc, DSCOMMAND1, dscommand1 | HADDLDSEL0);
+ ahc_outb(ahc, HADDR,
+ (ahc_le32toh(sg->len) >> 24) & SG_HIGH_ADDR_BITS);
+ ahc_outb(ahc, DSCOMMAND1, dscommand1);
+ }
+ ahc_outb(ahc, HADDR + 3, dataptr >> 24);
+ ahc_outb(ahc, HADDR + 2, dataptr >> 16);
+ ahc_outb(ahc, HADDR + 1, dataptr >> 8);
+ ahc_outb(ahc, HADDR, dataptr);
+ ahc_outb(ahc, HCNT + 2, resid >> 16);
+ ahc_outb(ahc, HCNT + 1, resid >> 8);
+ ahc_outb(ahc, HCNT, resid);
+ if ((ahc->features & AHC_ULTRA2) == 0) {
+ ahc_outb(ahc, STCNT + 2, resid >> 16);
+ ahc_outb(ahc, STCNT + 1, resid >> 8);
+ ahc_outb(ahc, STCNT, resid);
+ }
+}
+
+/*
+ * Handle the effects of issuing a bus device reset message.
+ */
+static void
+ahc_handle_devreset(struct ahc_softc *ahc, struct ahc_devinfo *devinfo,
+ cam_status status, char *message, int verbose_level)
+{
+#ifdef AHC_TARGET_MODE
+ struct ahc_tmode_tstate* tstate;
+ u_int lun;
+#endif
+ int found;
+
+ found = ahc_abort_scbs(ahc, devinfo->target, devinfo->channel,
+ CAM_LUN_WILDCARD, SCB_LIST_NULL, devinfo->role,
+ status);
+
+#ifdef AHC_TARGET_MODE
+ /*
+ * Send an immediate notify ccb to all target mord peripheral
+ * drivers affected by this action.
+ */
+ tstate = ahc->enabled_targets[devinfo->our_scsiid];
+ if (tstate != NULL) {
+ for (lun = 0; lun < AHC_NUM_LUNS; lun++) {
+ struct ahc_tmode_lstate* lstate;
+
+ lstate = tstate->enabled_luns[lun];
+ if (lstate == NULL)
+ continue;
+
+ ahc_queue_lstate_event(ahc, lstate, devinfo->our_scsiid,
+ MSG_BUS_DEV_RESET, /*arg*/0);
+ ahc_send_lstate_events(ahc, lstate);
+ }
+ }
+#endif
+
+ /*
+ * Go back to async/narrow transfers and renegotiate.
+ */
+ ahc_set_width(ahc, devinfo, MSG_EXT_WDTR_BUS_8_BIT,
+ AHC_TRANS_CUR, /*paused*/TRUE);
+ ahc_set_syncrate(ahc, devinfo, /*syncrate*/NULL,
+ /*period*/0, /*offset*/0, /*ppr_options*/0,
+ AHC_TRANS_CUR, /*paused*/TRUE);
+
+ if (status != CAM_SEL_TIMEOUT)
+ ahc_send_async(ahc, devinfo->channel, devinfo->target,
+ CAM_LUN_WILDCARD, AC_SENT_BDR);
+
+ if (message != NULL
+ && (verbose_level <= bootverbose))
+ printk("%s: %s on %c:%d. %d SCBs aborted\n", ahc_name(ahc),
+ message, devinfo->channel, devinfo->target, found);
+}
+
+#ifdef AHC_TARGET_MODE
+static void
+ahc_setup_target_msgin(struct ahc_softc *ahc, struct ahc_devinfo *devinfo,
+ struct scb *scb)
+{
+
+ /*
+ * To facilitate adding multiple messages together,
+ * each routine should increment the index and len
+ * variables instead of setting them explicitly.
+ */
+ ahc->msgout_index = 0;
+ ahc->msgout_len = 0;
+
+ if (scb != NULL && (scb->flags & SCB_AUTO_NEGOTIATE) != 0)
+ ahc_build_transfer_msg(ahc, devinfo);
+ else
+ panic("ahc_intr: AWAITING target message with no message");
+
+ ahc->msgout_index = 0;
+ ahc->msg_type = MSG_TYPE_TARGET_MSGIN;
+}
+#endif
+/**************************** Initialization **********************************/
+/*
+ * Allocate a controller structure for a new device
+ * and perform initial initializion.
+ */
+struct ahc_softc *
+ahc_alloc(void *platform_arg, char *name)
+{
+ struct ahc_softc *ahc;
+ int i;
+
+#ifndef __FreeBSD__
+ ahc = kmalloc(sizeof(*ahc), GFP_ATOMIC);
+ if (!ahc) {
+ printk("aic7xxx: cannot malloc softc!\n");
+ kfree(name);
+ return NULL;
+ }
+#else
+ ahc = device_get_softc((device_t)platform_arg);
+#endif
+ memset(ahc, 0, sizeof(*ahc));
+ ahc->seep_config = kmalloc(sizeof(*ahc->seep_config), GFP_ATOMIC);
+ if (ahc->seep_config == NULL) {
+#ifndef __FreeBSD__
+ kfree(ahc);
+#endif
+ kfree(name);
+ return (NULL);
+ }
+ LIST_INIT(&ahc->pending_scbs);
+ /* We don't know our unit number until the OSM sets it */
+ ahc->name = name;
+ ahc->unit = -1;
+ ahc->description = NULL;
+ ahc->channel = 'A';
+ ahc->channel_b = 'B';
+ ahc->chip = AHC_NONE;
+ ahc->features = AHC_FENONE;
+ ahc->bugs = AHC_BUGNONE;
+ ahc->flags = AHC_FNONE;
+ /*
+ * Default to all error reporting enabled with the
+ * sequencer operating at its fastest speed.
+ * The bus attach code may modify this.
+ */
+ ahc->seqctl = FASTMODE;
+
+ for (i = 0; i < AHC_NUM_TARGETS; i++)
+ TAILQ_INIT(&ahc->untagged_queues[i]);
+ if (ahc_platform_alloc(ahc, platform_arg) != 0) {
+ ahc_free(ahc);
+ ahc = NULL;
+ }
+ return (ahc);
+}
+
+int
+ahc_softc_init(struct ahc_softc *ahc)
+{
+
+ /* The IRQMS bit is only valid on VL and EISA chips */
+ if ((ahc->chip & AHC_PCI) == 0)
+ ahc->unpause = ahc_inb(ahc, HCNTRL) & IRQMS;
+ else
+ ahc->unpause = 0;
+ ahc->pause = ahc->unpause | PAUSE;
+ /* XXX The shared scb data stuff should be deprecated */
+ if (ahc->scb_data == NULL) {
+ ahc->scb_data = kzalloc(sizeof(*ahc->scb_data), GFP_ATOMIC);
+ if (ahc->scb_data == NULL)
+ return (ENOMEM);
+ }
+
+ return (0);
+}
+
+void
+ahc_set_unit(struct ahc_softc *ahc, int unit)
+{
+ ahc->unit = unit;
+}
+
+void
+ahc_set_name(struct ahc_softc *ahc, char *name)
+{
+ if (ahc->name != NULL)
+ kfree(ahc->name);
+ ahc->name = name;
+}
+
+void
+ahc_free(struct ahc_softc *ahc)
+{
+ int i;
+
+ switch (ahc->init_level) {
+ default:
+ case 5:
+ ahc_shutdown(ahc);
+ /* FALLTHROUGH */
+ case 4:
+ ahc_dmamap_unload(ahc, ahc->shared_data_dmat,
+ ahc->shared_data_dmamap);
+ /* FALLTHROUGH */
+ case 3:
+ ahc_dmamem_free(ahc, ahc->shared_data_dmat, ahc->qoutfifo,
+ ahc->shared_data_dmamap);
+ ahc_dmamap_destroy(ahc, ahc->shared_data_dmat,
+ ahc->shared_data_dmamap);
+ /* FALLTHROUGH */
+ case 2:
+ ahc_dma_tag_destroy(ahc, ahc->shared_data_dmat);
+ case 1:
+#ifndef __linux__
+ ahc_dma_tag_destroy(ahc, ahc->buffer_dmat);
+#endif
+ break;
+ case 0:
+ break;
+ }
+
+#ifndef __linux__
+ ahc_dma_tag_destroy(ahc, ahc->parent_dmat);
+#endif
+ ahc_platform_free(ahc);
+ ahc_fini_scbdata(ahc);
+ for (i = 0; i < AHC_NUM_TARGETS; i++) {
+ struct ahc_tmode_tstate *tstate;
+
+ tstate = ahc->enabled_targets[i];
+ if (tstate != NULL) {
+#ifdef AHC_TARGET_MODE
+ int j;
+
+ for (j = 0; j < AHC_NUM_LUNS; j++) {
+ struct ahc_tmode_lstate *lstate;
+
+ lstate = tstate->enabled_luns[j];
+ if (lstate != NULL) {
+ xpt_free_path(lstate->path);
+ kfree(lstate);
+ }
+ }
+#endif
+ kfree(tstate);
+ }
+ }
+#ifdef AHC_TARGET_MODE
+ if (ahc->black_hole != NULL) {
+ xpt_free_path(ahc->black_hole->path);
+ kfree(ahc->black_hole);
+ }
+#endif
+ if (ahc->name != NULL)
+ kfree(ahc->name);
+ if (ahc->seep_config != NULL)
+ kfree(ahc->seep_config);
+#ifndef __FreeBSD__
+ kfree(ahc);
+#endif
+ return;
+}
+
+static void
+ahc_shutdown(void *arg)
+{
+ struct ahc_softc *ahc;
+ int i;
+
+ ahc = (struct ahc_softc *)arg;
+
+ /* This will reset most registers to 0, but not all */
+ ahc_reset(ahc, /*reinit*/FALSE);
+ ahc_outb(ahc, SCSISEQ, 0);
+ ahc_outb(ahc, SXFRCTL0, 0);
+ ahc_outb(ahc, DSPCISTATUS, 0);
+
+ for (i = TARG_SCSIRATE; i < SCSICONF; i++)
+ ahc_outb(ahc, i, 0);
+}
+
+/*
+ * Reset the controller and record some information about it
+ * that is only available just after a reset. If "reinit" is
+ * non-zero, this reset occurred after initial configuration
+ * and the caller requests that the chip be fully reinitialized
+ * to a runable state. Chip interrupts are *not* enabled after
+ * a reinitialization. The caller must enable interrupts via
+ * ahc_intr_enable().
+ */
+int
+ahc_reset(struct ahc_softc *ahc, int reinit)
+{
+ u_int sblkctl;
+ u_int sxfrctl1_a, sxfrctl1_b;
+ int error;
+ int wait;
+
+ /*
+ * Preserve the value of the SXFRCTL1 register for all channels.
+ * It contains settings that affect termination and we don't want
+ * to disturb the integrity of the bus.
+ */
+ ahc_pause(ahc);
+ sxfrctl1_b = 0;
+ if ((ahc->chip & AHC_CHIPID_MASK) == AHC_AIC7770) {
+ u_int sblkctl;
+
+ /*
+ * Save channel B's settings in case this chip
+ * is setup for TWIN channel operation.
+ */
+ sblkctl = ahc_inb(ahc, SBLKCTL);
+ ahc_outb(ahc, SBLKCTL, sblkctl | SELBUSB);
+ sxfrctl1_b = ahc_inb(ahc, SXFRCTL1);
+ ahc_outb(ahc, SBLKCTL, sblkctl & ~SELBUSB);
+ }
+ sxfrctl1_a = ahc_inb(ahc, SXFRCTL1);
+
+ ahc_outb(ahc, HCNTRL, CHIPRST | ahc->pause);
+
+ /*
+ * Ensure that the reset has finished. We delay 1000us
+ * prior to reading the register to make sure the chip
+ * has sufficiently completed its reset to handle register
+ * accesses.
+ */
+ wait = 1000;
+ do {
+ ahc_delay(1000);
+ } while (--wait && !(ahc_inb(ahc, HCNTRL) & CHIPRSTACK));
+
+ if (wait == 0) {
+ printk("%s: WARNING - Failed chip reset! "
+ "Trying to initialize anyway.\n", ahc_name(ahc));
+ }
+ ahc_outb(ahc, HCNTRL, ahc->pause);
+
+ /* Determine channel configuration */
+ sblkctl = ahc_inb(ahc, SBLKCTL) & (SELBUSB|SELWIDE);
+ /* No Twin Channel PCI cards */
+ if ((ahc->chip & AHC_PCI) != 0)
+ sblkctl &= ~SELBUSB;
+ switch (sblkctl) {
+ case 0:
+ /* Single Narrow Channel */
+ break;
+ case 2:
+ /* Wide Channel */
+ ahc->features |= AHC_WIDE;
+ break;
+ case 8:
+ /* Twin Channel */
+ ahc->features |= AHC_TWIN;
+ break;
+ default:
+ printk(" Unsupported adapter type. Ignoring\n");
+ return(-1);
+ }
+
+ /*
+ * Reload sxfrctl1.
+ *
+ * We must always initialize STPWEN to 1 before we
+ * restore the saved values. STPWEN is initialized
+ * to a tri-state condition which can only be cleared
+ * by turning it on.
+ */
+ if ((ahc->features & AHC_TWIN) != 0) {
+ u_int sblkctl;
+
+ sblkctl = ahc_inb(ahc, SBLKCTL);
+ ahc_outb(ahc, SBLKCTL, sblkctl | SELBUSB);
+ ahc_outb(ahc, SXFRCTL1, sxfrctl1_b);
+ ahc_outb(ahc, SBLKCTL, sblkctl & ~SELBUSB);
+ }
+ ahc_outb(ahc, SXFRCTL1, sxfrctl1_a);
+
+ error = 0;
+ if (reinit != 0)
+ /*
+ * If a recovery action has forced a chip reset,
+ * re-initialize the chip to our liking.
+ */
+ error = ahc->bus_chip_init(ahc);
+#ifdef AHC_DUMP_SEQ
+ else
+ ahc_dumpseq(ahc);
+#endif
+
+ return (error);
+}
+
+/*
+ * Determine the number of SCBs available on the controller
+ */
+int
+ahc_probe_scbs(struct ahc_softc *ahc) {
+ int i;
+
+ for (i = 0; i < AHC_SCB_MAX; i++) {
+
+ ahc_outb(ahc, SCBPTR, i);
+ ahc_outb(ahc, SCB_BASE, i);
+ if (ahc_inb(ahc, SCB_BASE) != i)
+ break;
+ ahc_outb(ahc, SCBPTR, 0);
+ if (ahc_inb(ahc, SCB_BASE) != 0)
+ break;
+ }
+ return (i);
+}
+
+static void
+ahc_dmamap_cb(void *arg, bus_dma_segment_t *segs, int nseg, int error)
+{
+ dma_addr_t *baddr;
+
+ baddr = (dma_addr_t *)arg;
+ *baddr = segs->ds_addr;
+}
+
+static void
+ahc_build_free_scb_list(struct ahc_softc *ahc)
+{
+ int scbsize;
+ int i;
+
+ scbsize = 32;
+ if ((ahc->flags & AHC_LSCBS_ENABLED) != 0)
+ scbsize = 64;
+
+ for (i = 0; i < ahc->scb_data->maxhscbs; i++) {
+ int j;
+
+ ahc_outb(ahc, SCBPTR, i);
+
+ /*
+ * Touch all SCB bytes to avoid parity errors
+ * should one of our debugging routines read
+ * an otherwise uninitiatlized byte.
+ */
+ for (j = 0; j < scbsize; j++)
+ ahc_outb(ahc, SCB_BASE+j, 0xFF);
+
+ /* Clear the control byte. */
+ ahc_outb(ahc, SCB_CONTROL, 0);
+
+ /* Set the next pointer */
+ if ((ahc->flags & AHC_PAGESCBS) != 0)
+ ahc_outb(ahc, SCB_NEXT, i+1);
+ else
+ ahc_outb(ahc, SCB_NEXT, SCB_LIST_NULL);
+
+ /* Make the tag number, SCSIID, and lun invalid */
+ ahc_outb(ahc, SCB_TAG, SCB_LIST_NULL);
+ ahc_outb(ahc, SCB_SCSIID, 0xFF);
+ ahc_outb(ahc, SCB_LUN, 0xFF);
+ }
+
+ if ((ahc->flags & AHC_PAGESCBS) != 0) {
+ /* SCB 0 heads the free list. */
+ ahc_outb(ahc, FREE_SCBH, 0);
+ } else {
+ /* No free list. */
+ ahc_outb(ahc, FREE_SCBH, SCB_LIST_NULL);
+ }
+
+ /* Make sure that the last SCB terminates the free list */
+ ahc_outb(ahc, SCBPTR, i-1);
+ ahc_outb(ahc, SCB_NEXT, SCB_LIST_NULL);
+}
+
+static int
+ahc_init_scbdata(struct ahc_softc *ahc)
+{
+ struct scb_data *scb_data;
+
+ scb_data = ahc->scb_data;
+ SLIST_INIT(&scb_data->free_scbs);
+ SLIST_INIT(&scb_data->sg_maps);
+
+ /* Allocate SCB resources */
+ scb_data->scbarray = kzalloc(sizeof(struct scb) * AHC_SCB_MAX_ALLOC,
+ GFP_ATOMIC);
+ if (scb_data->scbarray == NULL)
+ return (ENOMEM);
+
+ /* Determine the number of hardware SCBs and initialize them */
+
+ scb_data->maxhscbs = ahc_probe_scbs(ahc);
+ if (ahc->scb_data->maxhscbs == 0) {
+ printk("%s: No SCB space found\n", ahc_name(ahc));
+ return (ENXIO);
+ }
+
+ /*
+ * Create our DMA tags. These tags define the kinds of device
+ * accessible memory allocations and memory mappings we will
+ * need to perform during normal operation.
+ *
+ * Unless we need to further restrict the allocation, we rely
+ * on the restrictions of the parent dmat, hence the common
+ * use of MAXADDR and MAXSIZE.
+ */
+
+ /* DMA tag for our hardware scb structures */
+ if (ahc_dma_tag_create(ahc, ahc->parent_dmat, /*alignment*/1,
+ /*boundary*/BUS_SPACE_MAXADDR_32BIT + 1,
+ /*lowaddr*/BUS_SPACE_MAXADDR_32BIT,
+ /*highaddr*/BUS_SPACE_MAXADDR,
+ /*filter*/NULL, /*filterarg*/NULL,
+ AHC_SCB_MAX_ALLOC * sizeof(struct hardware_scb),
+ /*nsegments*/1,
+ /*maxsegsz*/BUS_SPACE_MAXSIZE_32BIT,
+ /*flags*/0, &scb_data->hscb_dmat) != 0) {
+ goto error_exit;
+ }
+
+ scb_data->init_level++;
+
+ /* Allocation for our hscbs */
+ if (ahc_dmamem_alloc(ahc, scb_data->hscb_dmat,
+ (void **)&scb_data->hscbs,
+ BUS_DMA_NOWAIT, &scb_data->hscb_dmamap) != 0) {
+ goto error_exit;
+ }
+
+ scb_data->init_level++;
+
+ /* And permanently map them */
+ ahc_dmamap_load(ahc, scb_data->hscb_dmat, scb_data->hscb_dmamap,
+ scb_data->hscbs,
+ AHC_SCB_MAX_ALLOC * sizeof(struct hardware_scb),
+ ahc_dmamap_cb, &scb_data->hscb_busaddr, /*flags*/0);
+
+ scb_data->init_level++;
+
+ /* DMA tag for our sense buffers */
+ if (ahc_dma_tag_create(ahc, ahc->parent_dmat, /*alignment*/1,
+ /*boundary*/BUS_SPACE_MAXADDR_32BIT + 1,
+ /*lowaddr*/BUS_SPACE_MAXADDR_32BIT,
+ /*highaddr*/BUS_SPACE_MAXADDR,
+ /*filter*/NULL, /*filterarg*/NULL,
+ AHC_SCB_MAX_ALLOC * sizeof(struct scsi_sense_data),
+ /*nsegments*/1,
+ /*maxsegsz*/BUS_SPACE_MAXSIZE_32BIT,
+ /*flags*/0, &scb_data->sense_dmat) != 0) {
+ goto error_exit;
+ }
+
+ scb_data->init_level++;
+
+ /* Allocate them */
+ if (ahc_dmamem_alloc(ahc, scb_data->sense_dmat,
+ (void **)&scb_data->sense,
+ BUS_DMA_NOWAIT, &scb_data->sense_dmamap) != 0) {
+ goto error_exit;
+ }
+
+ scb_data->init_level++;
+
+ /* And permanently map them */
+ ahc_dmamap_load(ahc, scb_data->sense_dmat, scb_data->sense_dmamap,
+ scb_data->sense,
+ AHC_SCB_MAX_ALLOC * sizeof(struct scsi_sense_data),
+ ahc_dmamap_cb, &scb_data->sense_busaddr, /*flags*/0);
+
+ scb_data->init_level++;
+
+ /* DMA tag for our S/G structures. We allocate in page sized chunks */
+ if (ahc_dma_tag_create(ahc, ahc->parent_dmat, /*alignment*/8,
+ /*boundary*/BUS_SPACE_MAXADDR_32BIT + 1,
+ /*lowaddr*/BUS_SPACE_MAXADDR_32BIT,
+ /*highaddr*/BUS_SPACE_MAXADDR,
+ /*filter*/NULL, /*filterarg*/NULL,
+ PAGE_SIZE, /*nsegments*/1,
+ /*maxsegsz*/BUS_SPACE_MAXSIZE_32BIT,
+ /*flags*/0, &scb_data->sg_dmat) != 0) {
+ goto error_exit;
+ }
+
+ scb_data->init_level++;
+
+ /* Perform initial CCB allocation */
+ memset(scb_data->hscbs, 0,
+ AHC_SCB_MAX_ALLOC * sizeof(struct hardware_scb));
+ ahc_alloc_scbs(ahc);
+
+ if (scb_data->numscbs == 0) {
+ printk("%s: ahc_init_scbdata - "
+ "Unable to allocate initial scbs\n",
+ ahc_name(ahc));
+ goto error_exit;
+ }
+
+ /*
+ * Reserve the next queued SCB.
+ */
+ ahc->next_queued_scb = ahc_get_scb(ahc);
+
+ /*
+ * Note that we were successful
+ */
+ return (0);
+
+error_exit:
+
+ return (ENOMEM);
+}
+
+static void
+ahc_fini_scbdata(struct ahc_softc *ahc)
+{
+ struct scb_data *scb_data;
+
+ scb_data = ahc->scb_data;
+ if (scb_data == NULL)
+ return;
+
+ switch (scb_data->init_level) {
+ default:
+ case 7:
+ {
+ struct sg_map_node *sg_map;
+
+ while ((sg_map = SLIST_FIRST(&scb_data->sg_maps))!= NULL) {
+ SLIST_REMOVE_HEAD(&scb_data->sg_maps, links);
+ ahc_dmamap_unload(ahc, scb_data->sg_dmat,
+ sg_map->sg_dmamap);
+ ahc_dmamem_free(ahc, scb_data->sg_dmat,
+ sg_map->sg_vaddr,
+ sg_map->sg_dmamap);
+ kfree(sg_map);
+ }
+ ahc_dma_tag_destroy(ahc, scb_data->sg_dmat);
+ }
+ case 6:
+ ahc_dmamap_unload(ahc, scb_data->sense_dmat,
+ scb_data->sense_dmamap);
+ case 5:
+ ahc_dmamem_free(ahc, scb_data->sense_dmat, scb_data->sense,
+ scb_data->sense_dmamap);
+ ahc_dmamap_destroy(ahc, scb_data->sense_dmat,
+ scb_data->sense_dmamap);
+ case 4:
+ ahc_dma_tag_destroy(ahc, scb_data->sense_dmat);
+ case 3:
+ ahc_dmamap_unload(ahc, scb_data->hscb_dmat,
+ scb_data->hscb_dmamap);
+ case 2:
+ ahc_dmamem_free(ahc, scb_data->hscb_dmat, scb_data->hscbs,
+ scb_data->hscb_dmamap);
+ ahc_dmamap_destroy(ahc, scb_data->hscb_dmat,
+ scb_data->hscb_dmamap);
+ case 1:
+ ahc_dma_tag_destroy(ahc, scb_data->hscb_dmat);
+ break;
+ case 0:
+ break;
+ }
+ if (scb_data->scbarray != NULL)
+ kfree(scb_data->scbarray);
+}
+
+static void
+ahc_alloc_scbs(struct ahc_softc *ahc)
+{
+ struct scb_data *scb_data;
+ struct scb *next_scb;
+ struct sg_map_node *sg_map;
+ dma_addr_t physaddr;
+ struct ahc_dma_seg *segs;
+ int newcount;
+ int i;
+
+ scb_data = ahc->scb_data;
+ if (scb_data->numscbs >= AHC_SCB_MAX_ALLOC)
+ /* Can't allocate any more */
+ return;
+
+ next_scb = &scb_data->scbarray[scb_data->numscbs];
+
+ sg_map = kmalloc(sizeof(*sg_map), GFP_ATOMIC);
+
+ if (sg_map == NULL)
+ return;
+
+ /* Allocate S/G space for the next batch of SCBS */
+ if (ahc_dmamem_alloc(ahc, scb_data->sg_dmat,
+ (void **)&sg_map->sg_vaddr,
+ BUS_DMA_NOWAIT, &sg_map->sg_dmamap) != 0) {
+ kfree(sg_map);
+ return;
+ }
+
+ SLIST_INSERT_HEAD(&scb_data->sg_maps, sg_map, links);
+
+ ahc_dmamap_load(ahc, scb_data->sg_dmat, sg_map->sg_dmamap,
+ sg_map->sg_vaddr, PAGE_SIZE, ahc_dmamap_cb,
+ &sg_map->sg_physaddr, /*flags*/0);
+
+ segs = sg_map->sg_vaddr;
+ physaddr = sg_map->sg_physaddr;
+
+ newcount = (PAGE_SIZE / (AHC_NSEG * sizeof(struct ahc_dma_seg)));
+ newcount = min(newcount, (AHC_SCB_MAX_ALLOC - scb_data->numscbs));
+ for (i = 0; i < newcount; i++) {
+ struct scb_platform_data *pdata;
+#ifndef __linux__
+ int error;
+#endif
+ pdata = kmalloc(sizeof(*pdata), GFP_ATOMIC);
+ if (pdata == NULL)
+ break;
+ next_scb->platform_data = pdata;
+ next_scb->sg_map = sg_map;
+ next_scb->sg_list = segs;
+ /*
+ * The sequencer always starts with the second entry.
+ * The first entry is embedded in the scb.
+ */
+ next_scb->sg_list_phys = physaddr + sizeof(struct ahc_dma_seg);
+ next_scb->ahc_softc = ahc;
+ next_scb->flags = SCB_FREE;
+#ifndef __linux__
+ error = ahc_dmamap_create(ahc, ahc->buffer_dmat, /*flags*/0,
+ &next_scb->dmamap);
+ if (error != 0)
+ break;
+#endif
+ next_scb->hscb = &scb_data->hscbs[scb_data->numscbs];
+ next_scb->hscb->tag = ahc->scb_data->numscbs;
+ SLIST_INSERT_HEAD(&ahc->scb_data->free_scbs,
+ next_scb, links.sle);
+ segs += AHC_NSEG;
+ physaddr += (AHC_NSEG * sizeof(struct ahc_dma_seg));
+ next_scb++;
+ ahc->scb_data->numscbs++;
+ }
+}
+
+void
+ahc_controller_info(struct ahc_softc *ahc, char *buf)
+{
+ int len;
+
+ len = sprintf(buf, "%s: ", ahc_chip_names[ahc->chip & AHC_CHIPID_MASK]);
+ buf += len;
+ if ((ahc->features & AHC_TWIN) != 0)
+ len = sprintf(buf, "Twin Channel, A SCSI Id=%d, "
+ "B SCSI Id=%d, primary %c, ",
+ ahc->our_id, ahc->our_id_b,
+ (ahc->flags & AHC_PRIMARY_CHANNEL) + 'A');
+ else {
+ const char *speed;
+ const char *type;
+
+ speed = "";
+ if ((ahc->features & AHC_ULTRA) != 0) {
+ speed = "Ultra ";
+ } else if ((ahc->features & AHC_DT) != 0) {
+ speed = "Ultra160 ";
+ } else if ((ahc->features & AHC_ULTRA2) != 0) {
+ speed = "Ultra2 ";
+ }
+ if ((ahc->features & AHC_WIDE) != 0) {
+ type = "Wide";
+ } else {
+ type = "Single";
+ }
+ len = sprintf(buf, "%s%s Channel %c, SCSI Id=%d, ",
+ speed, type, ahc->channel, ahc->our_id);
+ }
+ buf += len;
+
+ if ((ahc->flags & AHC_PAGESCBS) != 0)
+ sprintf(buf, "%d/%d SCBs",
+ ahc->scb_data->maxhscbs, AHC_MAX_QUEUE);
+ else
+ sprintf(buf, "%d SCBs", ahc->scb_data->maxhscbs);
+}
+
+int
+ahc_chip_init(struct ahc_softc *ahc)
+{
+ int term;
+ int error;
+ u_int i;
+ u_int scsi_conf;
+ u_int scsiseq_template;
+ uint32_t physaddr;
+
+ ahc_outb(ahc, SEQ_FLAGS, 0);
+ ahc_outb(ahc, SEQ_FLAGS2, 0);
+
+ /* Set the SCSI Id, SXFRCTL0, SXFRCTL1, and SIMODE1, for both channels*/
+ if (ahc->features & AHC_TWIN) {
+
+ /*
+ * Setup Channel B first.
+ */
+ ahc_outb(ahc, SBLKCTL, ahc_inb(ahc, SBLKCTL) | SELBUSB);
+ term = (ahc->flags & AHC_TERM_ENB_B) != 0 ? STPWEN : 0;
+ ahc_outb(ahc, SCSIID, ahc->our_id_b);
+ scsi_conf = ahc_inb(ahc, SCSICONF + 1);
+ ahc_outb(ahc, SXFRCTL1, (scsi_conf & (ENSPCHK|STIMESEL))
+ |term|ahc->seltime_b|ENSTIMER|ACTNEGEN);
+ if ((ahc->features & AHC_ULTRA2) != 0)
+ ahc_outb(ahc, SIMODE0, ahc_inb(ahc, SIMODE0)|ENIOERR);
+ ahc_outb(ahc, SIMODE1, ENSELTIMO|ENSCSIRST|ENSCSIPERR);
+ ahc_outb(ahc, SXFRCTL0, DFON|SPIOEN);
+
+ /* Select Channel A */
+ ahc_outb(ahc, SBLKCTL, ahc_inb(ahc, SBLKCTL) & ~SELBUSB);
+ }
+ term = (ahc->flags & AHC_TERM_ENB_A) != 0 ? STPWEN : 0;
+ if ((ahc->features & AHC_ULTRA2) != 0)
+ ahc_outb(ahc, SCSIID_ULTRA2, ahc->our_id);
+ else
+ ahc_outb(ahc, SCSIID, ahc->our_id);
+ scsi_conf = ahc_inb(ahc, SCSICONF);
+ ahc_outb(ahc, SXFRCTL1, (scsi_conf & (ENSPCHK|STIMESEL))
+ |term|ahc->seltime
+ |ENSTIMER|ACTNEGEN);
+ if ((ahc->features & AHC_ULTRA2) != 0)
+ ahc_outb(ahc, SIMODE0, ahc_inb(ahc, SIMODE0)|ENIOERR);
+ ahc_outb(ahc, SIMODE1, ENSELTIMO|ENSCSIRST|ENSCSIPERR);
+ ahc_outb(ahc, SXFRCTL0, DFON|SPIOEN);
+
+ /* There are no untagged SCBs active yet. */
+ for (i = 0; i < 16; i++) {
+ ahc_unbusy_tcl(ahc, BUILD_TCL(i << 4, 0));
+ if ((ahc->flags & AHC_SCB_BTT) != 0) {
+ int lun;
+
+ /*
+ * The SCB based BTT allows an entry per
+ * target and lun pair.
+ */
+ for (lun = 1; lun < AHC_NUM_LUNS; lun++)
+ ahc_unbusy_tcl(ahc, BUILD_TCL(i << 4, lun));
+ }
+ }
+
+ /* All of our queues are empty */
+ for (i = 0; i < 256; i++)
+ ahc->qoutfifo[i] = SCB_LIST_NULL;
+ ahc_sync_qoutfifo(ahc, BUS_DMASYNC_PREREAD);
+
+ for (i = 0; i < 256; i++)
+ ahc->qinfifo[i] = SCB_LIST_NULL;
+
+ if ((ahc->features & AHC_MULTI_TID) != 0) {
+ ahc_outb(ahc, TARGID, 0);
+ ahc_outb(ahc, TARGID + 1, 0);
+ }
+
+ /*
+ * Tell the sequencer where it can find our arrays in memory.
+ */
+ physaddr = ahc->scb_data->hscb_busaddr;
+ ahc_outb(ahc, HSCB_ADDR, physaddr & 0xFF);
+ ahc_outb(ahc, HSCB_ADDR + 1, (physaddr >> 8) & 0xFF);
+ ahc_outb(ahc, HSCB_ADDR + 2, (physaddr >> 16) & 0xFF);
+ ahc_outb(ahc, HSCB_ADDR + 3, (physaddr >> 24) & 0xFF);
+
+ physaddr = ahc->shared_data_busaddr;
+ ahc_outb(ahc, SHARED_DATA_ADDR, physaddr & 0xFF);
+ ahc_outb(ahc, SHARED_DATA_ADDR + 1, (physaddr >> 8) & 0xFF);
+ ahc_outb(ahc, SHARED_DATA_ADDR + 2, (physaddr >> 16) & 0xFF);
+ ahc_outb(ahc, SHARED_DATA_ADDR + 3, (physaddr >> 24) & 0xFF);
+
+ /*
+ * Initialize the group code to command length table.
+ * This overrides the values in TARG_SCSIRATE, so only
+ * setup the table after we have processed that information.
+ */
+ ahc_outb(ahc, CMDSIZE_TABLE, 5);
+ ahc_outb(ahc, CMDSIZE_TABLE + 1, 9);
+ ahc_outb(ahc, CMDSIZE_TABLE + 2, 9);
+ ahc_outb(ahc, CMDSIZE_TABLE + 3, 0);
+ ahc_outb(ahc, CMDSIZE_TABLE + 4, 15);
+ ahc_outb(ahc, CMDSIZE_TABLE + 5, 11);
+ ahc_outb(ahc, CMDSIZE_TABLE + 6, 0);
+ ahc_outb(ahc, CMDSIZE_TABLE + 7, 0);
+
+ if ((ahc->features & AHC_HS_MAILBOX) != 0)
+ ahc_outb(ahc, HS_MAILBOX, 0);
+
+ /* Tell the sequencer of our initial queue positions */
+ if ((ahc->features & AHC_TARGETMODE) != 0) {
+ ahc->tqinfifonext = 1;
+ ahc_outb(ahc, KERNEL_TQINPOS, ahc->tqinfifonext - 1);
+ ahc_outb(ahc, TQINPOS, ahc->tqinfifonext);
+ }
+ ahc->qinfifonext = 0;
+ ahc->qoutfifonext = 0;
+ if ((ahc->features & AHC_QUEUE_REGS) != 0) {
+ ahc_outb(ahc, QOFF_CTLSTA, SCB_QSIZE_256);
+ ahc_outb(ahc, HNSCB_QOFF, ahc->qinfifonext);
+ ahc_outb(ahc, SNSCB_QOFF, ahc->qinfifonext);
+ ahc_outb(ahc, SDSCB_QOFF, 0);
+ } else {
+ ahc_outb(ahc, KERNEL_QINPOS, ahc->qinfifonext);
+ ahc_outb(ahc, QINPOS, ahc->qinfifonext);
+ ahc_outb(ahc, QOUTPOS, ahc->qoutfifonext);
+ }
+
+ /* We don't have any waiting selections */
+ ahc_outb(ahc, WAITING_SCBH, SCB_LIST_NULL);
+
+ /* Our disconnection list is empty too */
+ ahc_outb(ahc, DISCONNECTED_SCBH, SCB_LIST_NULL);
+
+ /* Message out buffer starts empty */
+ ahc_outb(ahc, MSG_OUT, MSG_NOOP);
+
+ /*
+ * Setup the allowed SCSI Sequences based on operational mode.
+ * If we are a target, we'll enable select in operations once
+ * we've had a lun enabled.
+ */
+ scsiseq_template = ENSELO|ENAUTOATNO|ENAUTOATNP;
+ if ((ahc->flags & AHC_INITIATORROLE) != 0)
+ scsiseq_template |= ENRSELI;
+ ahc_outb(ahc, SCSISEQ_TEMPLATE, scsiseq_template);
+
+ /* Initialize our list of free SCBs. */
+ ahc_build_free_scb_list(ahc);
+
+ /*
+ * Tell the sequencer which SCB will be the next one it receives.
+ */
+ ahc_outb(ahc, NEXT_QUEUED_SCB, ahc->next_queued_scb->hscb->tag);
+
+ /*
+ * Load the Sequencer program and Enable the adapter
+ * in "fast" mode.
+ */
+ if (bootverbose)
+ printk("%s: Downloading Sequencer Program...",
+ ahc_name(ahc));
+
+ error = ahc_loadseq(ahc);
+ if (error != 0)
+ return (error);
+
+ if ((ahc->features & AHC_ULTRA2) != 0) {
+ int wait;
+
+ /*
+ * Wait for up to 500ms for our transceivers
+ * to settle. If the adapter does not have
+ * a cable attached, the transceivers may
+ * never settle, so don't complain if we
+ * fail here.
+ */
+ for (wait = 5000;
+ (ahc_inb(ahc, SBLKCTL) & (ENAB40|ENAB20)) == 0 && wait;
+ wait--)
+ ahc_delay(100);
+ }
+ ahc_restart(ahc);
+ return (0);
+}
+
+/*
+ * Start the board, ready for normal operation
+ */
+int
+ahc_init(struct ahc_softc *ahc)
+{
+ int max_targ;
+ u_int i;
+ u_int scsi_conf;
+ u_int ultraenb;
+ u_int discenable;
+ u_int tagenable;
+ size_t driver_data_size;
+
+#ifdef AHC_DEBUG
+ if ((ahc_debug & AHC_DEBUG_SEQUENCER) != 0)
+ ahc->flags |= AHC_SEQUENCER_DEBUG;
+#endif
+
+#ifdef AHC_PRINT_SRAM
+ printk("Scratch Ram:");
+ for (i = 0x20; i < 0x5f; i++) {
+ if (((i % 8) == 0) && (i != 0)) {
+ printk ("\n ");
+ }
+ printk (" 0x%x", ahc_inb(ahc, i));
+ }
+ if ((ahc->features & AHC_MORE_SRAM) != 0) {
+ for (i = 0x70; i < 0x7f; i++) {
+ if (((i % 8) == 0) && (i != 0)) {
+ printk ("\n ");
+ }
+ printk (" 0x%x", ahc_inb(ahc, i));
+ }
+ }
+ printk ("\n");
+ /*
+ * Reading uninitialized scratch ram may
+ * generate parity errors.
+ */
+ ahc_outb(ahc, CLRINT, CLRPARERR);
+ ahc_outb(ahc, CLRINT, CLRBRKADRINT);
+#endif
+ max_targ = 15;
+
+ /*
+ * Assume we have a board at this stage and it has been reset.
+ */
+ if ((ahc->flags & AHC_USEDEFAULTS) != 0)
+ ahc->our_id = ahc->our_id_b = 7;
+
+ /*
+ * Default to allowing initiator operations.
+ */
+ ahc->flags |= AHC_INITIATORROLE;
+
+ /*
+ * Only allow target mode features if this unit has them enabled.
+ */
+ if ((AHC_TMODE_ENABLE & (0x1 << ahc->unit)) == 0)
+ ahc->features &= ~AHC_TARGETMODE;
+
+#ifndef __linux__
+ /* DMA tag for mapping buffers into device visible space. */
+ if (ahc_dma_tag_create(ahc, ahc->parent_dmat, /*alignment*/1,
+ /*boundary*/BUS_SPACE_MAXADDR_32BIT + 1,
+ /*lowaddr*/ahc->flags & AHC_39BIT_ADDRESSING
+ ? (dma_addr_t)0x7FFFFFFFFFULL
+ : BUS_SPACE_MAXADDR_32BIT,
+ /*highaddr*/BUS_SPACE_MAXADDR,
+ /*filter*/NULL, /*filterarg*/NULL,
+ /*maxsize*/(AHC_NSEG - 1) * PAGE_SIZE,
+ /*nsegments*/AHC_NSEG,
+ /*maxsegsz*/AHC_MAXTRANSFER_SIZE,
+ /*flags*/BUS_DMA_ALLOCNOW,
+ &ahc->buffer_dmat) != 0) {
+ return (ENOMEM);
+ }
+#endif
+
+ ahc->init_level++;
+
+ /*
+ * DMA tag for our command fifos and other data in system memory
+ * the card's sequencer must be able to access. For initiator
+ * roles, we need to allocate space for the qinfifo and qoutfifo.
+ * The qinfifo and qoutfifo are composed of 256 1 byte elements.
+ * When providing for the target mode role, we must additionally
+ * provide space for the incoming target command fifo and an extra
+ * byte to deal with a dma bug in some chip versions.
+ */
+ driver_data_size = 2 * 256 * sizeof(uint8_t);
+ if ((ahc->features & AHC_TARGETMODE) != 0)
+ driver_data_size += AHC_TMODE_CMDS * sizeof(struct target_cmd)
+ + /*DMA WideOdd Bug Buffer*/1;
+ if (ahc_dma_tag_create(ahc, ahc->parent_dmat, /*alignment*/1,
+ /*boundary*/BUS_SPACE_MAXADDR_32BIT + 1,
+ /*lowaddr*/BUS_SPACE_MAXADDR_32BIT,
+ /*highaddr*/BUS_SPACE_MAXADDR,
+ /*filter*/NULL, /*filterarg*/NULL,
+ driver_data_size,
+ /*nsegments*/1,
+ /*maxsegsz*/BUS_SPACE_MAXSIZE_32BIT,
+ /*flags*/0, &ahc->shared_data_dmat) != 0) {
+ return (ENOMEM);
+ }
+
+ ahc->init_level++;
+
+ /* Allocation of driver data */
+ if (ahc_dmamem_alloc(ahc, ahc->shared_data_dmat,
+ (void **)&ahc->qoutfifo,
+ BUS_DMA_NOWAIT, &ahc->shared_data_dmamap) != 0) {
+ return (ENOMEM);
+ }
+
+ ahc->init_level++;
+
+ /* And permanently map it in */
+ ahc_dmamap_load(ahc, ahc->shared_data_dmat, ahc->shared_data_dmamap,
+ ahc->qoutfifo, driver_data_size, ahc_dmamap_cb,
+ &ahc->shared_data_busaddr, /*flags*/0);
+
+ if ((ahc->features & AHC_TARGETMODE) != 0) {
+ ahc->targetcmds = (struct target_cmd *)ahc->qoutfifo;
+ ahc->qoutfifo = (uint8_t *)&ahc->targetcmds[AHC_TMODE_CMDS];
+ ahc->dma_bug_buf = ahc->shared_data_busaddr
+ + driver_data_size - 1;
+ /* All target command blocks start out invalid. */
+ for (i = 0; i < AHC_TMODE_CMDS; i++)
+ ahc->targetcmds[i].cmd_valid = 0;
+ ahc_sync_tqinfifo(ahc, BUS_DMASYNC_PREREAD);
+ ahc->qoutfifo = (uint8_t *)&ahc->targetcmds[256];
+ }
+ ahc->qinfifo = &ahc->qoutfifo[256];
+
+ ahc->init_level++;
+
+ /* Allocate SCB data now that buffer_dmat is initialized */
+ if (ahc->scb_data->maxhscbs == 0)
+ if (ahc_init_scbdata(ahc) != 0)
+ return (ENOMEM);
+
+ /*
+ * Allocate a tstate to house information for our
+ * initiator presence on the bus as well as the user
+ * data for any target mode initiator.
+ */
+ if (ahc_alloc_tstate(ahc, ahc->our_id, 'A') == NULL) {
+ printk("%s: unable to allocate ahc_tmode_tstate. "
+ "Failing attach\n", ahc_name(ahc));
+ return (ENOMEM);
+ }
+
+ if ((ahc->features & AHC_TWIN) != 0) {
+ if (ahc_alloc_tstate(ahc, ahc->our_id_b, 'B') == NULL) {
+ printk("%s: unable to allocate ahc_tmode_tstate. "
+ "Failing attach\n", ahc_name(ahc));
+ return (ENOMEM);
+ }
+ }
+
+ if (ahc->scb_data->maxhscbs < AHC_SCB_MAX_ALLOC) {
+ ahc->flags |= AHC_PAGESCBS;
+ } else {
+ ahc->flags &= ~AHC_PAGESCBS;
+ }
+
+#ifdef AHC_DEBUG
+ if (ahc_debug & AHC_SHOW_MISC) {
+ printk("%s: hardware scb %u bytes; kernel scb %u bytes; "
+ "ahc_dma %u bytes\n",
+ ahc_name(ahc),
+ (u_int)sizeof(struct hardware_scb),
+ (u_int)sizeof(struct scb),
+ (u_int)sizeof(struct ahc_dma_seg));
+ }
+#endif /* AHC_DEBUG */
+
+ /*
+ * Look at the information that board initialization or
+ * the board bios has left us.
+ */
+ if (ahc->features & AHC_TWIN) {
+ scsi_conf = ahc_inb(ahc, SCSICONF + 1);
+ if ((scsi_conf & RESET_SCSI) != 0
+ && (ahc->flags & AHC_INITIATORROLE) != 0)
+ ahc->flags |= AHC_RESET_BUS_B;
+ }
+
+ scsi_conf = ahc_inb(ahc, SCSICONF);
+ if ((scsi_conf & RESET_SCSI) != 0
+ && (ahc->flags & AHC_INITIATORROLE) != 0)
+ ahc->flags |= AHC_RESET_BUS_A;
+
+ ultraenb = 0;
+ tagenable = ALL_TARGETS_MASK;
+
+ /* Grab the disconnection disable table and invert it for our needs */
+ if ((ahc->flags & AHC_USEDEFAULTS) != 0) {
+ printk("%s: Host Adapter Bios disabled. Using default SCSI "
+ "device parameters\n", ahc_name(ahc));
+ ahc->flags |= AHC_EXTENDED_TRANS_A|AHC_EXTENDED_TRANS_B|
+ AHC_TERM_ENB_A|AHC_TERM_ENB_B;
+ discenable = ALL_TARGETS_MASK;
+ if ((ahc->features & AHC_ULTRA) != 0)
+ ultraenb = ALL_TARGETS_MASK;
+ } else {
+ discenable = ~((ahc_inb(ahc, DISC_DSB + 1) << 8)
+ | ahc_inb(ahc, DISC_DSB));
+ if ((ahc->features & (AHC_ULTRA|AHC_ULTRA2)) != 0)
+ ultraenb = (ahc_inb(ahc, ULTRA_ENB + 1) << 8)
+ | ahc_inb(ahc, ULTRA_ENB);
+ }
+
+ if ((ahc->features & (AHC_WIDE|AHC_TWIN)) == 0)
+ max_targ = 7;
+
+ for (i = 0; i <= max_targ; i++) {
+ struct ahc_initiator_tinfo *tinfo;
+ struct ahc_tmode_tstate *tstate;
+ u_int our_id;
+ u_int target_id;
+ char channel;
+
+ channel = 'A';
+ our_id = ahc->our_id;
+ target_id = i;
+ if (i > 7 && (ahc->features & AHC_TWIN) != 0) {
+ channel = 'B';
+ our_id = ahc->our_id_b;
+ target_id = i % 8;
+ }
+ tinfo = ahc_fetch_transinfo(ahc, channel, our_id,
+ target_id, &tstate);
+ /* Default to async narrow across the board */
+ memset(tinfo, 0, sizeof(*tinfo));
+ if (ahc->flags & AHC_USEDEFAULTS) {
+ if ((ahc->features & AHC_WIDE) != 0)
+ tinfo->user.width = MSG_EXT_WDTR_BUS_16_BIT;
+
+ /*
+ * These will be truncated when we determine the
+ * connection type we have with the target.
+ */
+ tinfo->user.period = ahc_syncrates->period;
+ tinfo->user.offset = MAX_OFFSET;
+ } else {
+ u_int scsirate;
+ uint16_t mask;
+
+ /* Take the settings leftover in scratch RAM. */
+ scsirate = ahc_inb(ahc, TARG_SCSIRATE + i);
+ mask = (0x01 << i);
+ if ((ahc->features & AHC_ULTRA2) != 0) {
+ u_int offset;
+ u_int maxsync;
+
+ if ((scsirate & SOFS) == 0x0F) {
+ /*
+ * Haven't negotiated yet,
+ * so the format is different.
+ */
+ scsirate = (scsirate & SXFR) >> 4
+ | (ultraenb & mask)
+ ? 0x08 : 0x0
+ | (scsirate & WIDEXFER);
+ offset = MAX_OFFSET_ULTRA2;
+ } else
+ offset = ahc_inb(ahc, TARG_OFFSET + i);
+ if ((scsirate & ~WIDEXFER) == 0 && offset != 0)
+ /* Set to the lowest sync rate, 5MHz */
+ scsirate |= 0x1c;
+ maxsync = AHC_SYNCRATE_ULTRA2;
+ if ((ahc->features & AHC_DT) != 0)
+ maxsync = AHC_SYNCRATE_DT;
+ tinfo->user.period =
+ ahc_find_period(ahc, scsirate, maxsync);
+ if (offset == 0)
+ tinfo->user.period = 0;
+ else
+ tinfo->user.offset = MAX_OFFSET;
+ if ((scsirate & SXFR_ULTRA2) <= 8/*10MHz*/
+ && (ahc->features & AHC_DT) != 0)
+ tinfo->user.ppr_options =
+ MSG_EXT_PPR_DT_REQ;
+ } else if ((scsirate & SOFS) != 0) {
+ if ((scsirate & SXFR) == 0x40
+ && (ultraenb & mask) != 0) {
+ /* Treat 10MHz as a non-ultra speed */
+ scsirate &= ~SXFR;
+ ultraenb &= ~mask;
+ }
+ tinfo->user.period =
+ ahc_find_period(ahc, scsirate,
+ (ultraenb & mask)
+ ? AHC_SYNCRATE_ULTRA
+ : AHC_SYNCRATE_FAST);
+ if (tinfo->user.period != 0)
+ tinfo->user.offset = MAX_OFFSET;
+ }
+ if (tinfo->user.period == 0)
+ tinfo->user.offset = 0;
+ if ((scsirate & WIDEXFER) != 0
+ && (ahc->features & AHC_WIDE) != 0)
+ tinfo->user.width = MSG_EXT_WDTR_BUS_16_BIT;
+ tinfo->user.protocol_version = 4;
+ if ((ahc->features & AHC_DT) != 0)
+ tinfo->user.transport_version = 3;
+ else
+ tinfo->user.transport_version = 2;
+ tinfo->goal.protocol_version = 2;
+ tinfo->goal.transport_version = 2;
+ tinfo->curr.protocol_version = 2;
+ tinfo->curr.transport_version = 2;
+ }
+ tstate->ultraenb = 0;
+ }
+ ahc->user_discenable = discenable;
+ ahc->user_tagenable = tagenable;
+
+ return (ahc->bus_chip_init(ahc));
+}
+
+void
+ahc_intr_enable(struct ahc_softc *ahc, int enable)
+{
+ u_int hcntrl;
+
+ hcntrl = ahc_inb(ahc, HCNTRL);
+ hcntrl &= ~INTEN;
+ ahc->pause &= ~INTEN;
+ ahc->unpause &= ~INTEN;
+ if (enable) {
+ hcntrl |= INTEN;
+ ahc->pause |= INTEN;
+ ahc->unpause |= INTEN;
+ }
+ ahc_outb(ahc, HCNTRL, hcntrl);
+}
+
+/*
+ * Ensure that the card is paused in a location
+ * outside of all critical sections and that all
+ * pending work is completed prior to returning.
+ * This routine should only be called from outside
+ * an interrupt context.
+ */
+void
+ahc_pause_and_flushwork(struct ahc_softc *ahc)
+{
+ int intstat;
+ int maxloops;
+ int paused;
+
+ maxloops = 1000;
+ ahc->flags |= AHC_ALL_INTERRUPTS;
+ paused = FALSE;
+ do {
+ if (paused) {
+ ahc_unpause(ahc);
+ /*
+ * Give the sequencer some time to service
+ * any active selections.
+ */
+ ahc_delay(500);
+ }
+ ahc_intr(ahc);
+ ahc_pause(ahc);
+ paused = TRUE;
+ ahc_outb(ahc, SCSISEQ, ahc_inb(ahc, SCSISEQ) & ~ENSELO);
+ intstat = ahc_inb(ahc, INTSTAT);
+ if ((intstat & INT_PEND) == 0) {
+ ahc_clear_critical_section(ahc);
+ intstat = ahc_inb(ahc, INTSTAT);
+ }
+ } while (--maxloops
+ && (intstat != 0xFF || (ahc->features & AHC_REMOVABLE) == 0)
+ && ((intstat & INT_PEND) != 0
+ || (ahc_inb(ahc, SSTAT0) & (SELDO|SELINGO)) != 0));
+ if (maxloops == 0) {
+ printk("Infinite interrupt loop, INTSTAT = %x",
+ ahc_inb(ahc, INTSTAT));
+ }
+ ahc_platform_flushwork(ahc);
+ ahc->flags &= ~AHC_ALL_INTERRUPTS;
+}
+
+#ifdef CONFIG_PM
+int
+ahc_suspend(struct ahc_softc *ahc)
+{
+
+ ahc_pause_and_flushwork(ahc);
+
+ if (LIST_FIRST(&ahc->pending_scbs) != NULL) {
+ ahc_unpause(ahc);
+ return (EBUSY);
+ }
+
+#ifdef AHC_TARGET_MODE
+ /*
+ * XXX What about ATIOs that have not yet been serviced?
+ * Perhaps we should just refuse to be suspended if we
+ * are acting in a target role.
+ */
+ if (ahc->pending_device != NULL) {
+ ahc_unpause(ahc);
+ return (EBUSY);
+ }
+#endif
+ ahc_shutdown(ahc);
+ return (0);
+}
+
+int
+ahc_resume(struct ahc_softc *ahc)
+{
+
+ ahc_reset(ahc, /*reinit*/TRUE);
+ ahc_intr_enable(ahc, TRUE);
+ ahc_restart(ahc);
+ return (0);
+}
+#endif
+/************************** Busy Target Table *********************************/
+/*
+ * Return the untagged transaction id for a given target/channel lun.
+ * Optionally, clear the entry.
+ */
+static u_int
+ahc_index_busy_tcl(struct ahc_softc *ahc, u_int tcl)
+{
+ u_int scbid;
+ u_int target_offset;
+
+ if ((ahc->flags & AHC_SCB_BTT) != 0) {
+ u_int saved_scbptr;
+
+ saved_scbptr = ahc_inb(ahc, SCBPTR);
+ ahc_outb(ahc, SCBPTR, TCL_LUN(tcl));
+ scbid = ahc_inb(ahc, SCB_64_BTT + TCL_TARGET_OFFSET(tcl));
+ ahc_outb(ahc, SCBPTR, saved_scbptr);
+ } else {
+ target_offset = TCL_TARGET_OFFSET(tcl);
+ scbid = ahc_inb(ahc, BUSY_TARGETS + target_offset);
+ }
+
+ return (scbid);
+}
+
+static void
+ahc_unbusy_tcl(struct ahc_softc *ahc, u_int tcl)
+{
+ u_int target_offset;
+
+ if ((ahc->flags & AHC_SCB_BTT) != 0) {
+ u_int saved_scbptr;
+
+ saved_scbptr = ahc_inb(ahc, SCBPTR);
+ ahc_outb(ahc, SCBPTR, TCL_LUN(tcl));
+ ahc_outb(ahc, SCB_64_BTT+TCL_TARGET_OFFSET(tcl), SCB_LIST_NULL);
+ ahc_outb(ahc, SCBPTR, saved_scbptr);
+ } else {
+ target_offset = TCL_TARGET_OFFSET(tcl);
+ ahc_outb(ahc, BUSY_TARGETS + target_offset, SCB_LIST_NULL);
+ }
+}
+
+static void
+ahc_busy_tcl(struct ahc_softc *ahc, u_int tcl, u_int scbid)
+{
+ u_int target_offset;
+
+ if ((ahc->flags & AHC_SCB_BTT) != 0) {
+ u_int saved_scbptr;
+
+ saved_scbptr = ahc_inb(ahc, SCBPTR);
+ ahc_outb(ahc, SCBPTR, TCL_LUN(tcl));
+ ahc_outb(ahc, SCB_64_BTT + TCL_TARGET_OFFSET(tcl), scbid);
+ ahc_outb(ahc, SCBPTR, saved_scbptr);
+ } else {
+ target_offset = TCL_TARGET_OFFSET(tcl);
+ ahc_outb(ahc, BUSY_TARGETS + target_offset, scbid);
+ }
+}
+
+/************************** SCB and SCB queue management **********************/
+int
+ahc_match_scb(struct ahc_softc *ahc, struct scb *scb, int target,
+ char channel, int lun, u_int tag, role_t role)
+{
+ int targ = SCB_GET_TARGET(ahc, scb);
+ char chan = SCB_GET_CHANNEL(ahc, scb);
+ int slun = SCB_GET_LUN(scb);
+ int match;
+
+ match = ((chan == channel) || (channel == ALL_CHANNELS));
+ if (match != 0)
+ match = ((targ == target) || (target == CAM_TARGET_WILDCARD));
+ if (match != 0)
+ match = ((lun == slun) || (lun == CAM_LUN_WILDCARD));
+ if (match != 0) {
+#ifdef AHC_TARGET_MODE
+ int group;
+
+ group = XPT_FC_GROUP(scb->io_ctx->ccb_h.func_code);
+ if (role == ROLE_INITIATOR) {
+ match = (group != XPT_FC_GROUP_TMODE)
+ && ((tag == scb->hscb->tag)
+ || (tag == SCB_LIST_NULL));
+ } else if (role == ROLE_TARGET) {
+ match = (group == XPT_FC_GROUP_TMODE)
+ && ((tag == scb->io_ctx->csio.tag_id)
+ || (tag == SCB_LIST_NULL));
+ }
+#else /* !AHC_TARGET_MODE */
+ match = ((tag == scb->hscb->tag) || (tag == SCB_LIST_NULL));
+#endif /* AHC_TARGET_MODE */
+ }
+
+ return match;
+}
+
+static void
+ahc_freeze_devq(struct ahc_softc *ahc, struct scb *scb)
+{
+ int target;
+ char channel;
+ int lun;
+
+ target = SCB_GET_TARGET(ahc, scb);
+ lun = SCB_GET_LUN(scb);
+ channel = SCB_GET_CHANNEL(ahc, scb);
+
+ ahc_search_qinfifo(ahc, target, channel, lun,
+ /*tag*/SCB_LIST_NULL, ROLE_UNKNOWN,
+ CAM_REQUEUE_REQ, SEARCH_COMPLETE);
+
+ ahc_platform_freeze_devq(ahc, scb);
+}
+
+void
+ahc_qinfifo_requeue_tail(struct ahc_softc *ahc, struct scb *scb)
+{
+ struct scb *prev_scb;
+
+ prev_scb = NULL;
+ if (ahc_qinfifo_count(ahc) != 0) {
+ u_int prev_tag;
+ uint8_t prev_pos;
+
+ prev_pos = ahc->qinfifonext - 1;
+ prev_tag = ahc->qinfifo[prev_pos];
+ prev_scb = ahc_lookup_scb(ahc, prev_tag);
+ }
+ ahc_qinfifo_requeue(ahc, prev_scb, scb);
+ if ((ahc->features & AHC_QUEUE_REGS) != 0) {
+ ahc_outb(ahc, HNSCB_QOFF, ahc->qinfifonext);
+ } else {
+ ahc_outb(ahc, KERNEL_QINPOS, ahc->qinfifonext);
+ }
+}
+
+static void
+ahc_qinfifo_requeue(struct ahc_softc *ahc, struct scb *prev_scb,
+ struct scb *scb)
+{
+ if (prev_scb == NULL) {
+ ahc_outb(ahc, NEXT_QUEUED_SCB, scb->hscb->tag);
+ } else {
+ prev_scb->hscb->next = scb->hscb->tag;
+ ahc_sync_scb(ahc, prev_scb,
+ BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
+ }
+ ahc->qinfifo[ahc->qinfifonext++] = scb->hscb->tag;
+ scb->hscb->next = ahc->next_queued_scb->hscb->tag;
+ ahc_sync_scb(ahc, scb, BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
+}
+
+static int
+ahc_qinfifo_count(struct ahc_softc *ahc)
+{
+ uint8_t qinpos;
+ uint8_t diff;
+
+ if ((ahc->features & AHC_QUEUE_REGS) != 0) {
+ qinpos = ahc_inb(ahc, SNSCB_QOFF);
+ ahc_outb(ahc, SNSCB_QOFF, qinpos);
+ } else
+ qinpos = ahc_inb(ahc, QINPOS);
+ diff = ahc->qinfifonext - qinpos;
+ return (diff);
+}
+
+int
+ahc_search_qinfifo(struct ahc_softc *ahc, int target, char channel,
+ int lun, u_int tag, role_t role, uint32_t status,
+ ahc_search_action action)
+{
+ struct scb *scb;
+ struct scb *prev_scb;
+ uint8_t qinstart;
+ uint8_t qinpos;
+ uint8_t qintail;
+ uint8_t next;
+ uint8_t prev;
+ uint8_t curscbptr;
+ int found;
+ int have_qregs;
+
+ qintail = ahc->qinfifonext;
+ have_qregs = (ahc->features & AHC_QUEUE_REGS) != 0;
+ if (have_qregs) {
+ qinstart = ahc_inb(ahc, SNSCB_QOFF);
+ ahc_outb(ahc, SNSCB_QOFF, qinstart);
+ } else
+ qinstart = ahc_inb(ahc, QINPOS);
+ qinpos = qinstart;
+ found = 0;
+ prev_scb = NULL;
+
+ if (action == SEARCH_COMPLETE) {
+ /*
+ * Don't attempt to run any queued untagged transactions
+ * until we are done with the abort process.
+ */
+ ahc_freeze_untagged_queues(ahc);
+ }
+
+ /*
+ * Start with an empty queue. Entries that are not chosen
+ * for removal will be re-added to the queue as we go.
+ */
+ ahc->qinfifonext = qinpos;
+ ahc_outb(ahc, NEXT_QUEUED_SCB, ahc->next_queued_scb->hscb->tag);
+
+ while (qinpos != qintail) {
+ scb = ahc_lookup_scb(ahc, ahc->qinfifo[qinpos]);
+ if (scb == NULL) {
+ printk("qinpos = %d, SCB index = %d\n",
+ qinpos, ahc->qinfifo[qinpos]);
+ panic("Loop 1\n");
+ }
+
+ if (ahc_match_scb(ahc, scb, target, channel, lun, tag, role)) {
+ /*
+ * We found an scb that needs to be acted on.
+ */
+ found++;
+ switch (action) {
+ case SEARCH_COMPLETE:
+ {
+ cam_status ostat;
+ cam_status cstat;
+
+ ostat = ahc_get_transaction_status(scb);
+ if (ostat == CAM_REQ_INPROG)
+ ahc_set_transaction_status(scb, status);
+ cstat = ahc_get_transaction_status(scb);
+ if (cstat != CAM_REQ_CMP)
+ ahc_freeze_scb(scb);
+ if ((scb->flags & SCB_ACTIVE) == 0)
+ printk("Inactive SCB in qinfifo\n");
+ ahc_done(ahc, scb);
+
+ /* FALLTHROUGH */
+ }
+ case SEARCH_REMOVE:
+ break;
+ case SEARCH_COUNT:
+ ahc_qinfifo_requeue(ahc, prev_scb, scb);
+ prev_scb = scb;
+ break;
+ }
+ } else {
+ ahc_qinfifo_requeue(ahc, prev_scb, scb);
+ prev_scb = scb;
+ }
+ qinpos++;
+ }
+
+ if ((ahc->features & AHC_QUEUE_REGS) != 0) {
+ ahc_outb(ahc, HNSCB_QOFF, ahc->qinfifonext);
+ } else {
+ ahc_outb(ahc, KERNEL_QINPOS, ahc->qinfifonext);
+ }
+
+ if (action != SEARCH_COUNT
+ && (found != 0)
+ && (qinstart != ahc->qinfifonext)) {
+ /*
+ * The sequencer may be in the process of dmaing
+ * down the SCB at the beginning of the queue.
+ * This could be problematic if either the first,
+ * or the second SCB is removed from the queue
+ * (the first SCB includes a pointer to the "next"
+ * SCB to dma). If we have removed any entries, swap
+ * the first element in the queue with the next HSCB
+ * so the sequencer will notice that NEXT_QUEUED_SCB
+ * has changed during its dma attempt and will retry
+ * the DMA.
+ */
+ scb = ahc_lookup_scb(ahc, ahc->qinfifo[qinstart]);
+
+ if (scb == NULL) {
+ printk("found = %d, qinstart = %d, qinfifionext = %d\n",
+ found, qinstart, ahc->qinfifonext);
+ panic("First/Second Qinfifo fixup\n");
+ }
+ /*
+ * ahc_swap_with_next_hscb forces our next pointer to
+ * point to the reserved SCB for future commands. Save
+ * and restore our original next pointer to maintain
+ * queue integrity.
+ */
+ next = scb->hscb->next;
+ ahc->scb_data->scbindex[scb->hscb->tag] = NULL;
+ ahc_swap_with_next_hscb(ahc, scb);
+ scb->hscb->next = next;
+ ahc->qinfifo[qinstart] = scb->hscb->tag;
+
+ /* Tell the card about the new head of the qinfifo. */
+ ahc_outb(ahc, NEXT_QUEUED_SCB, scb->hscb->tag);
+
+ /* Fixup the tail "next" pointer. */
+ qintail = ahc->qinfifonext - 1;
+ scb = ahc_lookup_scb(ahc, ahc->qinfifo[qintail]);
+ scb->hscb->next = ahc->next_queued_scb->hscb->tag;
+ }
+
+ /*
+ * Search waiting for selection list.
+ */
+ curscbptr = ahc_inb(ahc, SCBPTR);
+ next = ahc_inb(ahc, WAITING_SCBH); /* Start at head of list. */
+ prev = SCB_LIST_NULL;
+
+ while (next != SCB_LIST_NULL) {
+ uint8_t scb_index;
+
+ ahc_outb(ahc, SCBPTR, next);
+ scb_index = ahc_inb(ahc, SCB_TAG);
+ if (scb_index >= ahc->scb_data->numscbs) {
+ printk("Waiting List inconsistency. "
+ "SCB index == %d, yet numscbs == %d.",
+ scb_index, ahc->scb_data->numscbs);
+ ahc_dump_card_state(ahc);
+ panic("for safety");
+ }
+ scb = ahc_lookup_scb(ahc, scb_index);
+ if (scb == NULL) {
+ printk("scb_index = %d, next = %d\n",
+ scb_index, next);
+ panic("Waiting List traversal\n");
+ }
+ if (ahc_match_scb(ahc, scb, target, channel,
+ lun, SCB_LIST_NULL, role)) {
+ /*
+ * We found an scb that needs to be acted on.
+ */
+ found++;
+ switch (action) {
+ case SEARCH_COMPLETE:
+ {
+ cam_status ostat;
+ cam_status cstat;
+
+ ostat = ahc_get_transaction_status(scb);
+ if (ostat == CAM_REQ_INPROG)
+ ahc_set_transaction_status(scb,
+ status);
+ cstat = ahc_get_transaction_status(scb);
+ if (cstat != CAM_REQ_CMP)
+ ahc_freeze_scb(scb);
+ if ((scb->flags & SCB_ACTIVE) == 0)
+ printk("Inactive SCB in Waiting List\n");
+ ahc_done(ahc, scb);
+ /* FALLTHROUGH */
+ }
+ case SEARCH_REMOVE:
+ next = ahc_rem_wscb(ahc, next, prev);
+ break;
+ case SEARCH_COUNT:
+ prev = next;
+ next = ahc_inb(ahc, SCB_NEXT);
+ break;
+ }
+ } else {
+
+ prev = next;
+ next = ahc_inb(ahc, SCB_NEXT);
+ }
+ }
+ ahc_outb(ahc, SCBPTR, curscbptr);
+
+ found += ahc_search_untagged_queues(ahc, /*ahc_io_ctx_t*/NULL, target,
+ channel, lun, status, action);
+
+ if (action == SEARCH_COMPLETE)
+ ahc_release_untagged_queues(ahc);
+ return (found);
+}
+
+int
+ahc_search_untagged_queues(struct ahc_softc *ahc, ahc_io_ctx_t ctx,
+ int target, char channel, int lun, uint32_t status,
+ ahc_search_action action)
+{
+ struct scb *scb;
+ int maxtarget;
+ int found;
+ int i;
+
+ if (action == SEARCH_COMPLETE) {
+ /*
+ * Don't attempt to run any queued untagged transactions
+ * until we are done with the abort process.
+ */
+ ahc_freeze_untagged_queues(ahc);
+ }
+
+ found = 0;
+ i = 0;
+ if ((ahc->flags & AHC_SCB_BTT) == 0) {
+
+ maxtarget = 16;
+ if (target != CAM_TARGET_WILDCARD) {
+
+ i = target;
+ if (channel == 'B')
+ i += 8;
+ maxtarget = i + 1;
+ }
+ } else {
+ maxtarget = 0;
+ }
+
+ for (; i < maxtarget; i++) {
+ struct scb_tailq *untagged_q;
+ struct scb *next_scb;
+
+ untagged_q = &(ahc->untagged_queues[i]);
+ next_scb = TAILQ_FIRST(untagged_q);
+ while (next_scb != NULL) {
+
+ scb = next_scb;
+ next_scb = TAILQ_NEXT(scb, links.tqe);
+
+ /*
+ * The head of the list may be the currently
+ * active untagged command for a device.
+ * We're only searching for commands that
+ * have not been started. A transaction
+ * marked active but still in the qinfifo
+ * is removed by the qinfifo scanning code
+ * above.
+ */
+ if ((scb->flags & SCB_ACTIVE) != 0)
+ continue;
+
+ if (ahc_match_scb(ahc, scb, target, channel, lun,
+ SCB_LIST_NULL, ROLE_INITIATOR) == 0
+ || (ctx != NULL && ctx != scb->io_ctx))
+ continue;
+
+ /*
+ * We found an scb that needs to be acted on.
+ */
+ found++;
+ switch (action) {
+ case SEARCH_COMPLETE:
+ {
+ cam_status ostat;
+ cam_status cstat;
+
+ ostat = ahc_get_transaction_status(scb);
+ if (ostat == CAM_REQ_INPROG)
+ ahc_set_transaction_status(scb, status);
+ cstat = ahc_get_transaction_status(scb);
+ if (cstat != CAM_REQ_CMP)
+ ahc_freeze_scb(scb);
+ if ((scb->flags & SCB_ACTIVE) == 0)
+ printk("Inactive SCB in untaggedQ\n");
+ ahc_done(ahc, scb);
+ break;
+ }
+ case SEARCH_REMOVE:
+ scb->flags &= ~SCB_UNTAGGEDQ;
+ TAILQ_REMOVE(untagged_q, scb, links.tqe);
+ break;
+ case SEARCH_COUNT:
+ break;
+ }
+ }
+ }
+
+ if (action == SEARCH_COMPLETE)
+ ahc_release_untagged_queues(ahc);
+ return (found);
+}
+
+int
+ahc_search_disc_list(struct ahc_softc *ahc, int target, char channel,
+ int lun, u_int tag, int stop_on_first, int remove,
+ int save_state)
+{
+ struct scb *scbp;
+ u_int next;
+ u_int prev;
+ u_int count;
+ u_int active_scb;
+
+ count = 0;
+ next = ahc_inb(ahc, DISCONNECTED_SCBH);
+ prev = SCB_LIST_NULL;
+
+ if (save_state) {
+ /* restore this when we're done */
+ active_scb = ahc_inb(ahc, SCBPTR);
+ } else
+ /* Silence compiler */
+ active_scb = SCB_LIST_NULL;
+
+ while (next != SCB_LIST_NULL) {
+ u_int scb_index;
+
+ ahc_outb(ahc, SCBPTR, next);
+ scb_index = ahc_inb(ahc, SCB_TAG);
+ if (scb_index >= ahc->scb_data->numscbs) {
+ printk("Disconnected List inconsistency. "
+ "SCB index == %d, yet numscbs == %d.",
+ scb_index, ahc->scb_data->numscbs);
+ ahc_dump_card_state(ahc);
+ panic("for safety");
+ }
+
+ if (next == prev) {
+ panic("Disconnected List Loop. "
+ "cur SCBPTR == %x, prev SCBPTR == %x.",
+ next, prev);
+ }
+ scbp = ahc_lookup_scb(ahc, scb_index);
+ if (ahc_match_scb(ahc, scbp, target, channel, lun,
+ tag, ROLE_INITIATOR)) {
+ count++;
+ if (remove) {
+ next =
+ ahc_rem_scb_from_disc_list(ahc, prev, next);
+ } else {
+ prev = next;
+ next = ahc_inb(ahc, SCB_NEXT);
+ }
+ if (stop_on_first)
+ break;
+ } else {
+ prev = next;
+ next = ahc_inb(ahc, SCB_NEXT);
+ }
+ }
+ if (save_state)
+ ahc_outb(ahc, SCBPTR, active_scb);
+ return (count);
+}
+
+/*
+ * Remove an SCB from the on chip list of disconnected transactions.
+ * This is empty/unused if we are not performing SCB paging.
+ */
+static u_int
+ahc_rem_scb_from_disc_list(struct ahc_softc *ahc, u_int prev, u_int scbptr)
+{
+ u_int next;
+
+ ahc_outb(ahc, SCBPTR, scbptr);
+ next = ahc_inb(ahc, SCB_NEXT);
+
+ ahc_outb(ahc, SCB_CONTROL, 0);
+
+ ahc_add_curscb_to_free_list(ahc);
+
+ if (prev != SCB_LIST_NULL) {
+ ahc_outb(ahc, SCBPTR, prev);
+ ahc_outb(ahc, SCB_NEXT, next);
+ } else
+ ahc_outb(ahc, DISCONNECTED_SCBH, next);
+
+ return (next);
+}
+
+/*
+ * Add the SCB as selected by SCBPTR onto the on chip list of
+ * free hardware SCBs. This list is empty/unused if we are not
+ * performing SCB paging.
+ */
+static void
+ahc_add_curscb_to_free_list(struct ahc_softc *ahc)
+{
+ /*
+ * Invalidate the tag so that our abort
+ * routines don't think it's active.
+ */
+ ahc_outb(ahc, SCB_TAG, SCB_LIST_NULL);
+
+ if ((ahc->flags & AHC_PAGESCBS) != 0) {
+ ahc_outb(ahc, SCB_NEXT, ahc_inb(ahc, FREE_SCBH));
+ ahc_outb(ahc, FREE_SCBH, ahc_inb(ahc, SCBPTR));
+ }
+}
+
+/*
+ * Manipulate the waiting for selection list and return the
+ * scb that follows the one that we remove.
+ */
+static u_int
+ahc_rem_wscb(struct ahc_softc *ahc, u_int scbpos, u_int prev)
+{
+ u_int curscb, next;
+
+ /*
+ * Select the SCB we want to abort and
+ * pull the next pointer out of it.
+ */
+ curscb = ahc_inb(ahc, SCBPTR);
+ ahc_outb(ahc, SCBPTR, scbpos);
+ next = ahc_inb(ahc, SCB_NEXT);
+
+ /* Clear the necessary fields */
+ ahc_outb(ahc, SCB_CONTROL, 0);
+
+ ahc_add_curscb_to_free_list(ahc);
+
+ /* update the waiting list */
+ if (prev == SCB_LIST_NULL) {
+ /* First in the list */
+ ahc_outb(ahc, WAITING_SCBH, next);
+
+ /*
+ * Ensure we aren't attempting to perform
+ * selection for this entry.
+ */
+ ahc_outb(ahc, SCSISEQ, (ahc_inb(ahc, SCSISEQ) & ~ENSELO));
+ } else {
+ /*
+ * Select the scb that pointed to us
+ * and update its next pointer.
+ */
+ ahc_outb(ahc, SCBPTR, prev);
+ ahc_outb(ahc, SCB_NEXT, next);
+ }
+
+ /*
+ * Point us back at the original scb position.
+ */
+ ahc_outb(ahc, SCBPTR, curscb);
+ return next;
+}
+
+/******************************** Error Handling ******************************/
+/*
+ * Abort all SCBs that match the given description (target/channel/lun/tag),
+ * setting their status to the passed in status if the status has not already
+ * been modified from CAM_REQ_INPROG. This routine assumes that the sequencer
+ * is paused before it is called.
+ */
+static int
+ahc_abort_scbs(struct ahc_softc *ahc, int target, char channel,
+ int lun, u_int tag, role_t role, uint32_t status)
+{
+ struct scb *scbp;
+ struct scb *scbp_next;
+ u_int active_scb;
+ int i, j;
+ int maxtarget;
+ int minlun;
+ int maxlun;
+
+ int found;
+
+ /*
+ * Don't attempt to run any queued untagged transactions
+ * until we are done with the abort process.
+ */
+ ahc_freeze_untagged_queues(ahc);
+
+ /* restore this when we're done */
+ active_scb = ahc_inb(ahc, SCBPTR);
+
+ found = ahc_search_qinfifo(ahc, target, channel, lun, SCB_LIST_NULL,
+ role, CAM_REQUEUE_REQ, SEARCH_COMPLETE);
+
+ /*
+ * Clean out the busy target table for any untagged commands.
+ */
+ i = 0;
+ maxtarget = 16;
+ if (target != CAM_TARGET_WILDCARD) {
+ i = target;
+ if (channel == 'B')
+ i += 8;
+ maxtarget = i + 1;
+ }
+
+ if (lun == CAM_LUN_WILDCARD) {
+
+ /*
+ * Unless we are using an SCB based
+ * busy targets table, there is only
+ * one table entry for all luns of
+ * a target.
+ */
+ minlun = 0;
+ maxlun = 1;
+ if ((ahc->flags & AHC_SCB_BTT) != 0)
+ maxlun = AHC_NUM_LUNS;
+ } else {
+ minlun = lun;
+ maxlun = lun + 1;
+ }
+
+ if (role != ROLE_TARGET) {
+ for (;i < maxtarget; i++) {
+ for (j = minlun;j < maxlun; j++) {
+ u_int scbid;
+ u_int tcl;
+
+ tcl = BUILD_TCL(i << 4, j);
+ scbid = ahc_index_busy_tcl(ahc, tcl);
+ scbp = ahc_lookup_scb(ahc, scbid);
+ if (scbp == NULL
+ || ahc_match_scb(ahc, scbp, target, channel,
+ lun, tag, role) == 0)
+ continue;
+ ahc_unbusy_tcl(ahc, BUILD_TCL(i << 4, j));
+ }
+ }
+
+ /*
+ * Go through the disconnected list and remove any entries we
+ * have queued for completion, 0'ing their control byte too.
+ * We save the active SCB and restore it ourselves, so there
+ * is no reason for this search to restore it too.
+ */
+ ahc_search_disc_list(ahc, target, channel, lun, tag,
+ /*stop_on_first*/FALSE, /*remove*/TRUE,
+ /*save_state*/FALSE);
+ }
+
+ /*
+ * Go through the hardware SCB array looking for commands that
+ * were active but not on any list. In some cases, these remnants
+ * might not still have mappings in the scbindex array (e.g. unexpected
+ * bus free with the same scb queued for an abort). Don't hold this
+ * against them.
+ */
+ for (i = 0; i < ahc->scb_data->maxhscbs; i++) {
+ u_int scbid;
+
+ ahc_outb(ahc, SCBPTR, i);
+ scbid = ahc_inb(ahc, SCB_TAG);
+ scbp = ahc_lookup_scb(ahc, scbid);
+ if ((scbp == NULL && scbid != SCB_LIST_NULL)
+ || (scbp != NULL
+ && ahc_match_scb(ahc, scbp, target, channel, lun, tag, role)))
+ ahc_add_curscb_to_free_list(ahc);
+ }
+
+ /*
+ * Go through the pending CCB list and look for
+ * commands for this target that are still active.
+ * These are other tagged commands that were
+ * disconnected when the reset occurred.
+ */
+ scbp_next = LIST_FIRST(&ahc->pending_scbs);
+ while (scbp_next != NULL) {
+ scbp = scbp_next;
+ scbp_next = LIST_NEXT(scbp, pending_links);
+ if (ahc_match_scb(ahc, scbp, target, channel, lun, tag, role)) {
+ cam_status ostat;
+
+ ostat = ahc_get_transaction_status(scbp);
+ if (ostat == CAM_REQ_INPROG)
+ ahc_set_transaction_status(scbp, status);
+ if (ahc_get_transaction_status(scbp) != CAM_REQ_CMP)
+ ahc_freeze_scb(scbp);
+ if ((scbp->flags & SCB_ACTIVE) == 0)
+ printk("Inactive SCB on pending list\n");
+ ahc_done(ahc, scbp);
+ found++;
+ }
+ }
+ ahc_outb(ahc, SCBPTR, active_scb);
+ ahc_platform_abort_scbs(ahc, target, channel, lun, tag, role, status);
+ ahc_release_untagged_queues(ahc);
+ return found;
+}
+
+static void
+ahc_reset_current_bus(struct ahc_softc *ahc)
+{
+ uint8_t scsiseq;
+
+ ahc_outb(ahc, SIMODE1, ahc_inb(ahc, SIMODE1) & ~ENSCSIRST);
+ scsiseq = ahc_inb(ahc, SCSISEQ);
+ ahc_outb(ahc, SCSISEQ, scsiseq | SCSIRSTO);
+ ahc_flush_device_writes(ahc);
+ ahc_delay(AHC_BUSRESET_DELAY);
+ /* Turn off the bus reset */
+ ahc_outb(ahc, SCSISEQ, scsiseq & ~SCSIRSTO);
+
+ ahc_clear_intstat(ahc);
+
+ /* Re-enable reset interrupts */
+ ahc_outb(ahc, SIMODE1, ahc_inb(ahc, SIMODE1) | ENSCSIRST);
+}
+
+int
+ahc_reset_channel(struct ahc_softc *ahc, char channel, int initiate_reset)
+{
+ struct ahc_devinfo devinfo;
+ u_int initiator, target, max_scsiid;
+ u_int sblkctl;
+ u_int scsiseq;
+ u_int simode1;
+ int found;
+ int restart_needed;
+ char cur_channel;
+
+ ahc->pending_device = NULL;
+
+ ahc_compile_devinfo(&devinfo,
+ CAM_TARGET_WILDCARD,
+ CAM_TARGET_WILDCARD,
+ CAM_LUN_WILDCARD,
+ channel, ROLE_UNKNOWN);
+ ahc_pause(ahc);
+
+ /* Make sure the sequencer is in a safe location. */
+ ahc_clear_critical_section(ahc);
+
+ /*
+ * Run our command complete fifos to ensure that we perform
+ * completion processing on any commands that 'completed'
+ * before the reset occurred.
+ */
+ ahc_run_qoutfifo(ahc);
+#ifdef AHC_TARGET_MODE
+ /*
+ * XXX - In Twin mode, the tqinfifo may have commands
+ * for an unaffected channel in it. However, if
+ * we have run out of ATIO resources to drain that
+ * queue, we may not get them all out here. Further,
+ * the blocked transactions for the reset channel
+ * should just be killed off, irrespecitve of whether
+ * we are blocked on ATIO resources. Write a routine
+ * to compact the tqinfifo appropriately.
+ */
+ if ((ahc->flags & AHC_TARGETROLE) != 0) {
+ ahc_run_tqinfifo(ahc, /*paused*/TRUE);
+ }
+#endif
+
+ /*
+ * Reset the bus if we are initiating this reset
+ */
+ sblkctl = ahc_inb(ahc, SBLKCTL);
+ cur_channel = 'A';
+ if ((ahc->features & AHC_TWIN) != 0
+ && ((sblkctl & SELBUSB) != 0))
+ cur_channel = 'B';
+ scsiseq = ahc_inb(ahc, SCSISEQ_TEMPLATE);
+ if (cur_channel != channel) {
+ /* Case 1: Command for another bus is active
+ * Stealthily reset the other bus without
+ * upsetting the current bus.
+ */
+ ahc_outb(ahc, SBLKCTL, sblkctl ^ SELBUSB);
+ simode1 = ahc_inb(ahc, SIMODE1) & ~(ENBUSFREE|ENSCSIRST);
+#ifdef AHC_TARGET_MODE
+ /*
+ * Bus resets clear ENSELI, so we cannot
+ * defer re-enabling bus reset interrupts
+ * if we are in target mode.
+ */
+ if ((ahc->flags & AHC_TARGETROLE) != 0)
+ simode1 |= ENSCSIRST;
+#endif
+ ahc_outb(ahc, SIMODE1, simode1);
+ if (initiate_reset)
+ ahc_reset_current_bus(ahc);
+ ahc_clear_intstat(ahc);
+ ahc_outb(ahc, SCSISEQ, scsiseq & (ENSELI|ENRSELI|ENAUTOATNP));
+ ahc_outb(ahc, SBLKCTL, sblkctl);
+ restart_needed = FALSE;
+ } else {
+ /* Case 2: A command from this bus is active or we're idle */
+ simode1 = ahc_inb(ahc, SIMODE1) & ~(ENBUSFREE|ENSCSIRST);
+#ifdef AHC_TARGET_MODE
+ /*
+ * Bus resets clear ENSELI, so we cannot
+ * defer re-enabling bus reset interrupts
+ * if we are in target mode.
+ */
+ if ((ahc->flags & AHC_TARGETROLE) != 0)
+ simode1 |= ENSCSIRST;
+#endif
+ ahc_outb(ahc, SIMODE1, simode1);
+ if (initiate_reset)
+ ahc_reset_current_bus(ahc);
+ ahc_clear_intstat(ahc);
+ ahc_outb(ahc, SCSISEQ, scsiseq & (ENSELI|ENRSELI|ENAUTOATNP));
+ restart_needed = TRUE;
+ }
+
+ /*
+ * Clean up all the state information for the
+ * pending transactions on this bus.
+ */
+ found = ahc_abort_scbs(ahc, CAM_TARGET_WILDCARD, channel,
+ CAM_LUN_WILDCARD, SCB_LIST_NULL,
+ ROLE_UNKNOWN, CAM_SCSI_BUS_RESET);
+
+ max_scsiid = (ahc->features & AHC_WIDE) ? 15 : 7;
+
+#ifdef AHC_TARGET_MODE
+ /*
+ * Send an immediate notify ccb to all target more peripheral
+ * drivers affected by this action.
+ */
+ for (target = 0; target <= max_scsiid; target++) {
+ struct ahc_tmode_tstate* tstate;
+ u_int lun;
+
+ tstate = ahc->enabled_targets[target];
+ if (tstate == NULL)
+ continue;
+ for (lun = 0; lun < AHC_NUM_LUNS; lun++) {
+ struct ahc_tmode_lstate* lstate;
+
+ lstate = tstate->enabled_luns[lun];
+ if (lstate == NULL)
+ continue;
+
+ ahc_queue_lstate_event(ahc, lstate, CAM_TARGET_WILDCARD,
+ EVENT_TYPE_BUS_RESET, /*arg*/0);
+ ahc_send_lstate_events(ahc, lstate);
+ }
+ }
+#endif
+ /* Notify the XPT that a bus reset occurred */
+ ahc_send_async(ahc, devinfo.channel, CAM_TARGET_WILDCARD,
+ CAM_LUN_WILDCARD, AC_BUS_RESET);
+
+ /*
+ * Revert to async/narrow transfers until we renegotiate.
+ */
+ for (target = 0; target <= max_scsiid; target++) {
+
+ if (ahc->enabled_targets[target] == NULL)
+ continue;
+ for (initiator = 0; initiator <= max_scsiid; initiator++) {
+ struct ahc_devinfo devinfo;
+
+ ahc_compile_devinfo(&devinfo, target, initiator,
+ CAM_LUN_WILDCARD,
+ channel, ROLE_UNKNOWN);
+ ahc_set_width(ahc, &devinfo, MSG_EXT_WDTR_BUS_8_BIT,
+ AHC_TRANS_CUR, /*paused*/TRUE);
+ ahc_set_syncrate(ahc, &devinfo, /*syncrate*/NULL,
+ /*period*/0, /*offset*/0,
+ /*ppr_options*/0, AHC_TRANS_CUR,
+ /*paused*/TRUE);
+ }
+ }
+
+ if (restart_needed)
+ ahc_restart(ahc);
+ else
+ ahc_unpause(ahc);
+ return found;
+}
+
+
+/***************************** Residual Processing ****************************/
+/*
+ * Calculate the residual for a just completed SCB.
+ */
+static void
+ahc_calc_residual(struct ahc_softc *ahc, struct scb *scb)
+{
+ struct hardware_scb *hscb;
+ struct status_pkt *spkt;
+ uint32_t sgptr;
+ uint32_t resid_sgptr;
+ uint32_t resid;
+
+ /*
+ * 5 cases.
+ * 1) No residual.
+ * SG_RESID_VALID clear in sgptr.
+ * 2) Transferless command
+ * 3) Never performed any transfers.
+ * sgptr has SG_FULL_RESID set.
+ * 4) No residual but target did not
+ * save data pointers after the
+ * last transfer, so sgptr was
+ * never updated.
+ * 5) We have a partial residual.
+ * Use residual_sgptr to determine
+ * where we are.
+ */
+
+ hscb = scb->hscb;
+ sgptr = ahc_le32toh(hscb->sgptr);
+ if ((sgptr & SG_RESID_VALID) == 0)
+ /* Case 1 */
+ return;
+ sgptr &= ~SG_RESID_VALID;
+
+ if ((sgptr & SG_LIST_NULL) != 0)
+ /* Case 2 */
+ return;
+
+ spkt = &hscb->shared_data.status;
+ resid_sgptr = ahc_le32toh(spkt->residual_sg_ptr);
+ if ((sgptr & SG_FULL_RESID) != 0) {
+ /* Case 3 */
+ resid = ahc_get_transfer_length(scb);
+ } else if ((resid_sgptr & SG_LIST_NULL) != 0) {
+ /* Case 4 */
+ return;
+ } else if ((resid_sgptr & ~SG_PTR_MASK) != 0) {
+ panic("Bogus resid sgptr value 0x%x\n", resid_sgptr);
+ } else {
+ struct ahc_dma_seg *sg;
+
+ /*
+ * Remainder of the SG where the transfer
+ * stopped.
+ */
+ resid = ahc_le32toh(spkt->residual_datacnt) & AHC_SG_LEN_MASK;
+ sg = ahc_sg_bus_to_virt(scb, resid_sgptr & SG_PTR_MASK);
+
+ /* The residual sg_ptr always points to the next sg */
+ sg--;
+
+ /*
+ * Add up the contents of all residual
+ * SG segments that are after the SG where
+ * the transfer stopped.
+ */
+ while ((ahc_le32toh(sg->len) & AHC_DMA_LAST_SEG) == 0) {
+ sg++;
+ resid += ahc_le32toh(sg->len) & AHC_SG_LEN_MASK;
+ }
+ }
+ if ((scb->flags & SCB_SENSE) == 0)
+ ahc_set_residual(scb, resid);
+ else
+ ahc_set_sense_residual(scb, resid);
+
+#ifdef AHC_DEBUG
+ if ((ahc_debug & AHC_SHOW_MISC) != 0) {
+ ahc_print_path(ahc, scb);
+ printk("Handled %sResidual of %d bytes\n",
+ (scb->flags & SCB_SENSE) ? "Sense " : "", resid);
+ }
+#endif
+}
+
+/******************************* Target Mode **********************************/
+#ifdef AHC_TARGET_MODE
+/*
+ * Add a target mode event to this lun's queue
+ */
+static void
+ahc_queue_lstate_event(struct ahc_softc *ahc, struct ahc_tmode_lstate *lstate,
+ u_int initiator_id, u_int event_type, u_int event_arg)
+{
+ struct ahc_tmode_event *event;
+ int pending;
+
+ xpt_freeze_devq(lstate->path, /*count*/1);
+ if (lstate->event_w_idx >= lstate->event_r_idx)
+ pending = lstate->event_w_idx - lstate->event_r_idx;
+ else
+ pending = AHC_TMODE_EVENT_BUFFER_SIZE + 1
+ - (lstate->event_r_idx - lstate->event_w_idx);
+
+ if (event_type == EVENT_TYPE_BUS_RESET
+ || event_type == MSG_BUS_DEV_RESET) {
+ /*
+ * Any earlier events are irrelevant, so reset our buffer.
+ * This has the effect of allowing us to deal with reset
+ * floods (an external device holding down the reset line)
+ * without losing the event that is really interesting.
+ */
+ lstate->event_r_idx = 0;
+ lstate->event_w_idx = 0;
+ xpt_release_devq(lstate->path, pending, /*runqueue*/FALSE);
+ }
+
+ if (pending == AHC_TMODE_EVENT_BUFFER_SIZE) {
+ xpt_print_path(lstate->path);
+ printk("immediate event %x:%x lost\n",
+ lstate->event_buffer[lstate->event_r_idx].event_type,
+ lstate->event_buffer[lstate->event_r_idx].event_arg);
+ lstate->event_r_idx++;
+ if (lstate->event_r_idx == AHC_TMODE_EVENT_BUFFER_SIZE)
+ lstate->event_r_idx = 0;
+ xpt_release_devq(lstate->path, /*count*/1, /*runqueue*/FALSE);
+ }
+
+ event = &lstate->event_buffer[lstate->event_w_idx];
+ event->initiator_id = initiator_id;
+ event->event_type = event_type;
+ event->event_arg = event_arg;
+ lstate->event_w_idx++;
+ if (lstate->event_w_idx == AHC_TMODE_EVENT_BUFFER_SIZE)
+ lstate->event_w_idx = 0;
+}
+
+/*
+ * Send any target mode events queued up waiting
+ * for immediate notify resources.
+ */
+void
+ahc_send_lstate_events(struct ahc_softc *ahc, struct ahc_tmode_lstate *lstate)
+{
+ struct ccb_hdr *ccbh;
+ struct ccb_immed_notify *inot;
+
+ while (lstate->event_r_idx != lstate->event_w_idx
+ && (ccbh = SLIST_FIRST(&lstate->immed_notifies)) != NULL) {
+ struct ahc_tmode_event *event;
+
+ event = &lstate->event_buffer[lstate->event_r_idx];
+ SLIST_REMOVE_HEAD(&lstate->immed_notifies, sim_links.sle);
+ inot = (struct ccb_immed_notify *)ccbh;
+ switch (event->event_type) {
+ case EVENT_TYPE_BUS_RESET:
+ ccbh->status = CAM_SCSI_BUS_RESET|CAM_DEV_QFRZN;
+ break;
+ default:
+ ccbh->status = CAM_MESSAGE_RECV|CAM_DEV_QFRZN;
+ inot->message_args[0] = event->event_type;
+ inot->message_args[1] = event->event_arg;
+ break;
+ }
+ inot->initiator_id = event->initiator_id;
+ inot->sense_len = 0;
+ xpt_done((union ccb *)inot);
+ lstate->event_r_idx++;
+ if (lstate->event_r_idx == AHC_TMODE_EVENT_BUFFER_SIZE)
+ lstate->event_r_idx = 0;
+ }
+}
+#endif
+
+/******************** Sequencer Program Patching/Download *********************/
+
+#ifdef AHC_DUMP_SEQ
+void
+ahc_dumpseq(struct ahc_softc* ahc)
+{
+ int i;
+
+ ahc_outb(ahc, SEQCTL, PERRORDIS|FAILDIS|FASTMODE|LOADRAM);
+ ahc_outb(ahc, SEQADDR0, 0);
+ ahc_outb(ahc, SEQADDR1, 0);
+ for (i = 0; i < ahc->instruction_ram_size; i++) {
+ uint8_t ins_bytes[4];
+
+ ahc_insb(ahc, SEQRAM, ins_bytes, 4);
+ printk("0x%08x\n", ins_bytes[0] << 24
+ | ins_bytes[1] << 16
+ | ins_bytes[2] << 8
+ | ins_bytes[3]);
+ }
+}
+#endif
+
+static int
+ahc_loadseq(struct ahc_softc *ahc)
+{
+ struct cs cs_table[num_critical_sections];
+ u_int begin_set[num_critical_sections];
+ u_int end_set[num_critical_sections];
+ const struct patch *cur_patch;
+ u_int cs_count;
+ u_int cur_cs;
+ u_int i;
+ u_int skip_addr;
+ u_int sg_prefetch_cnt;
+ int downloaded;
+ uint8_t download_consts[7];
+
+ /*
+ * Start out with 0 critical sections
+ * that apply to this firmware load.
+ */
+ cs_count = 0;
+ cur_cs = 0;
+ memset(begin_set, 0, sizeof(begin_set));
+ memset(end_set, 0, sizeof(end_set));
+
+ /* Setup downloadable constant table */
+ download_consts[QOUTFIFO_OFFSET] = 0;
+ if (ahc->targetcmds != NULL)
+ download_consts[QOUTFIFO_OFFSET] += 32;
+ download_consts[QINFIFO_OFFSET] = download_consts[QOUTFIFO_OFFSET] + 1;
+ download_consts[CACHESIZE_MASK] = ahc->pci_cachesize - 1;
+ download_consts[INVERTED_CACHESIZE_MASK] = ~(ahc->pci_cachesize - 1);
+ sg_prefetch_cnt = ahc->pci_cachesize;
+ if (sg_prefetch_cnt < (2 * sizeof(struct ahc_dma_seg)))
+ sg_prefetch_cnt = 2 * sizeof(struct ahc_dma_seg);
+ download_consts[SG_PREFETCH_CNT] = sg_prefetch_cnt;
+ download_consts[SG_PREFETCH_ALIGN_MASK] = ~(sg_prefetch_cnt - 1);
+ download_consts[SG_PREFETCH_ADDR_MASK] = (sg_prefetch_cnt - 1);
+
+ cur_patch = patches;
+ downloaded = 0;
+ skip_addr = 0;
+ ahc_outb(ahc, SEQCTL, PERRORDIS|FAILDIS|FASTMODE|LOADRAM);
+ ahc_outb(ahc, SEQADDR0, 0);
+ ahc_outb(ahc, SEQADDR1, 0);
+
+ for (i = 0; i < sizeof(seqprog)/4; i++) {
+ if (ahc_check_patch(ahc, &cur_patch, i, &skip_addr) == 0) {
+ /*
+ * Don't download this instruction as it
+ * is in a patch that was removed.
+ */
+ continue;
+ }
+
+ if (downloaded == ahc->instruction_ram_size) {
+ /*
+ * We're about to exceed the instruction
+ * storage capacity for this chip. Fail
+ * the load.
+ */
+ printk("\n%s: Program too large for instruction memory "
+ "size of %d!\n", ahc_name(ahc),
+ ahc->instruction_ram_size);
+ return (ENOMEM);
+ }
+
+ /*
+ * Move through the CS table until we find a CS
+ * that might apply to this instruction.
+ */
+ for (; cur_cs < num_critical_sections; cur_cs++) {
+ if (critical_sections[cur_cs].end <= i) {
+ if (begin_set[cs_count] == TRUE
+ && end_set[cs_count] == FALSE) {
+ cs_table[cs_count].end = downloaded;
+ end_set[cs_count] = TRUE;
+ cs_count++;
+ }
+ continue;
+ }
+ if (critical_sections[cur_cs].begin <= i
+ && begin_set[cs_count] == FALSE) {
+ cs_table[cs_count].begin = downloaded;
+ begin_set[cs_count] = TRUE;
+ }
+ break;
+ }
+ ahc_download_instr(ahc, i, download_consts);
+ downloaded++;
+ }
+
+ ahc->num_critical_sections = cs_count;
+ if (cs_count != 0) {
+
+ cs_count *= sizeof(struct cs);
+ ahc->critical_sections = kmalloc(cs_count, GFP_ATOMIC);
+ if (ahc->critical_sections == NULL)
+ panic("ahc_loadseq: Could not malloc");
+ memcpy(ahc->critical_sections, cs_table, cs_count);
+ }
+ ahc_outb(ahc, SEQCTL, PERRORDIS|FAILDIS|FASTMODE);
+
+ if (bootverbose) {
+ printk(" %d instructions downloaded\n", downloaded);
+ printk("%s: Features 0x%x, Bugs 0x%x, Flags 0x%x\n",
+ ahc_name(ahc), ahc->features, ahc->bugs, ahc->flags);
+ }
+ return (0);
+}
+
+static int
+ahc_check_patch(struct ahc_softc *ahc, const struct patch **start_patch,
+ u_int start_instr, u_int *skip_addr)
+{
+ const struct patch *cur_patch;
+ const struct patch *last_patch;
+ u_int num_patches;
+
+ num_patches = ARRAY_SIZE(patches);
+ last_patch = &patches[num_patches];
+ cur_patch = *start_patch;
+
+ while (cur_patch < last_patch && start_instr == cur_patch->begin) {
+
+ if (cur_patch->patch_func(ahc) == 0) {
+
+ /* Start rejecting code */
+ *skip_addr = start_instr + cur_patch->skip_instr;
+ cur_patch += cur_patch->skip_patch;
+ } else {
+ /* Accepted this patch. Advance to the next
+ * one and wait for our intruction pointer to
+ * hit this point.
+ */
+ cur_patch++;
+ }
+ }
+
+ *start_patch = cur_patch;
+ if (start_instr < *skip_addr)
+ /* Still skipping */
+ return (0);
+
+ return (1);
+}
+
+static void
+ahc_download_instr(struct ahc_softc *ahc, u_int instrptr, uint8_t *dconsts)
+{
+ union ins_formats instr;
+ struct ins_format1 *fmt1_ins;
+ struct ins_format3 *fmt3_ins;
+ u_int opcode;
+
+ /*
+ * The firmware is always compiled into a little endian format.
+ */
+ instr.integer = ahc_le32toh(*(uint32_t*)&seqprog[instrptr * 4]);
+
+ fmt1_ins = &instr.format1;
+ fmt3_ins = NULL;
+
+ /* Pull the opcode */
+ opcode = instr.format1.opcode;
+ switch (opcode) {
+ case AIC_OP_JMP:
+ case AIC_OP_JC:
+ case AIC_OP_JNC:
+ case AIC_OP_CALL:
+ case AIC_OP_JNE:
+ case AIC_OP_JNZ:
+ case AIC_OP_JE:
+ case AIC_OP_JZ:
+ {
+ const struct patch *cur_patch;
+ int address_offset;
+ u_int address;
+ u_int skip_addr;
+ u_int i;
+
+ fmt3_ins = &instr.format3;
+ address_offset = 0;
+ address = fmt3_ins->address;
+ cur_patch = patches;
+ skip_addr = 0;
+
+ for (i = 0; i < address;) {
+
+ ahc_check_patch(ahc, &cur_patch, i, &skip_addr);
+
+ if (skip_addr > i) {
+ int end_addr;
+
+ end_addr = min(address, skip_addr);
+ address_offset += end_addr - i;
+ i = skip_addr;
+ } else {
+ i++;
+ }
+ }
+ address -= address_offset;
+ fmt3_ins->address = address;
+ /* FALLTHROUGH */
+ }
+ case AIC_OP_OR:
+ case AIC_OP_AND:
+ case AIC_OP_XOR:
+ case AIC_OP_ADD:
+ case AIC_OP_ADC:
+ case AIC_OP_BMOV:
+ if (fmt1_ins->parity != 0) {
+ fmt1_ins->immediate = dconsts[fmt1_ins->immediate];
+ }
+ fmt1_ins->parity = 0;
+ if ((ahc->features & AHC_CMD_CHAN) == 0
+ && opcode == AIC_OP_BMOV) {
+ /*
+ * Block move was added at the same time
+ * as the command channel. Verify that
+ * this is only a move of a single element
+ * and convert the BMOV to a MOV
+ * (AND with an immediate of FF).
+ */
+ if (fmt1_ins->immediate != 1)
+ panic("%s: BMOV not supported\n",
+ ahc_name(ahc));
+ fmt1_ins->opcode = AIC_OP_AND;
+ fmt1_ins->immediate = 0xff;
+ }
+ /* FALLTHROUGH */
+ case AIC_OP_ROL:
+ if ((ahc->features & AHC_ULTRA2) != 0) {
+ int i, count;
+
+ /* Calculate odd parity for the instruction */
+ for (i = 0, count = 0; i < 31; i++) {
+ uint32_t mask;
+
+ mask = 0x01 << i;
+ if ((instr.integer & mask) != 0)
+ count++;
+ }
+ if ((count & 0x01) == 0)
+ instr.format1.parity = 1;
+ } else {
+ /* Compress the instruction for older sequencers */
+ if (fmt3_ins != NULL) {
+ instr.integer =
+ fmt3_ins->immediate
+ | (fmt3_ins->source << 8)
+ | (fmt3_ins->address << 16)
+ | (fmt3_ins->opcode << 25);
+ } else {
+ instr.integer =
+ fmt1_ins->immediate
+ | (fmt1_ins->source << 8)
+ | (fmt1_ins->destination << 16)
+ | (fmt1_ins->ret << 24)
+ | (fmt1_ins->opcode << 25);
+ }
+ }
+ /* The sequencer is a little endian cpu */
+ instr.integer = ahc_htole32(instr.integer);
+ ahc_outsb(ahc, SEQRAM, instr.bytes, 4);
+ break;
+ default:
+ panic("Unknown opcode encountered in seq program");
+ break;
+ }
+}
+
+int
+ahc_print_register(const ahc_reg_parse_entry_t *table, u_int num_entries,
+ const char *name, u_int address, u_int value,
+ u_int *cur_column, u_int wrap_point)
+{
+ int printed;
+ u_int printed_mask;
+
+ if (cur_column != NULL && *cur_column >= wrap_point) {
+ printk("\n");
+ *cur_column = 0;
+ }
+ printed = printk("%s[0x%x]", name, value);
+ if (table == NULL) {
+ printed += printk(" ");
+ *cur_column += printed;
+ return (printed);
+ }
+ printed_mask = 0;
+ while (printed_mask != 0xFF) {
+ int entry;
+
+ for (entry = 0; entry < num_entries; entry++) {
+ if (((value & table[entry].mask)
+ != table[entry].value)
+ || ((printed_mask & table[entry].mask)
+ == table[entry].mask))
+ continue;
+
+ printed += printk("%s%s",
+ printed_mask == 0 ? ":(" : "|",
+ table[entry].name);
+ printed_mask |= table[entry].mask;
+
+ break;
+ }
+ if (entry >= num_entries)
+ break;
+ }
+ if (printed_mask != 0)
+ printed += printk(") ");
+ else
+ printed += printk(" ");
+ if (cur_column != NULL)
+ *cur_column += printed;
+ return (printed);
+}
+
+void
+ahc_dump_card_state(struct ahc_softc *ahc)
+{
+ struct scb *scb;
+ struct scb_tailq *untagged_q;
+ u_int cur_col;
+ int paused;
+ int target;
+ int maxtarget;
+ int i;
+ uint8_t last_phase;
+ uint8_t qinpos;
+ uint8_t qintail;
+ uint8_t qoutpos;
+ uint8_t scb_index;
+ uint8_t saved_scbptr;
+
+ if (ahc_is_paused(ahc)) {
+ paused = 1;
+ } else {
+ paused = 0;
+ ahc_pause(ahc);
+ }
+
+ saved_scbptr = ahc_inb(ahc, SCBPTR);
+ last_phase = ahc_inb(ahc, LASTPHASE);
+ printk(">>>>>>>>>>>>>>>>>> Dump Card State Begins <<<<<<<<<<<<<<<<<\n"
+ "%s: Dumping Card State %s, at SEQADDR 0x%x\n",
+ ahc_name(ahc), ahc_lookup_phase_entry(last_phase)->phasemsg,
+ ahc_inb(ahc, SEQADDR0) | (ahc_inb(ahc, SEQADDR1) << 8));
+ if (paused)
+ printk("Card was paused\n");
+ printk("ACCUM = 0x%x, SINDEX = 0x%x, DINDEX = 0x%x, ARG_2 = 0x%x\n",
+ ahc_inb(ahc, ACCUM), ahc_inb(ahc, SINDEX), ahc_inb(ahc, DINDEX),
+ ahc_inb(ahc, ARG_2));
+ printk("HCNT = 0x%x SCBPTR = 0x%x\n", ahc_inb(ahc, HCNT),
+ ahc_inb(ahc, SCBPTR));
+ cur_col = 0;
+ if ((ahc->features & AHC_DT) != 0)
+ ahc_scsiphase_print(ahc_inb(ahc, SCSIPHASE), &cur_col, 50);
+ ahc_scsisigi_print(ahc_inb(ahc, SCSISIGI), &cur_col, 50);
+ ahc_error_print(ahc_inb(ahc, ERROR), &cur_col, 50);
+ ahc_scsibusl_print(ahc_inb(ahc, SCSIBUSL), &cur_col, 50);
+ ahc_lastphase_print(ahc_inb(ahc, LASTPHASE), &cur_col, 50);
+ ahc_scsiseq_print(ahc_inb(ahc, SCSISEQ), &cur_col, 50);
+ ahc_sblkctl_print(ahc_inb(ahc, SBLKCTL), &cur_col, 50);
+ ahc_scsirate_print(ahc_inb(ahc, SCSIRATE), &cur_col, 50);
+ ahc_seqctl_print(ahc_inb(ahc, SEQCTL), &cur_col, 50);
+ ahc_seq_flags_print(ahc_inb(ahc, SEQ_FLAGS), &cur_col, 50);
+ ahc_sstat0_print(ahc_inb(ahc, SSTAT0), &cur_col, 50);
+ ahc_sstat1_print(ahc_inb(ahc, SSTAT1), &cur_col, 50);
+ ahc_sstat2_print(ahc_inb(ahc, SSTAT2), &cur_col, 50);
+ ahc_sstat3_print(ahc_inb(ahc, SSTAT3), &cur_col, 50);
+ ahc_simode0_print(ahc_inb(ahc, SIMODE0), &cur_col, 50);
+ ahc_simode1_print(ahc_inb(ahc, SIMODE1), &cur_col, 50);
+ ahc_sxfrctl0_print(ahc_inb(ahc, SXFRCTL0), &cur_col, 50);
+ ahc_dfcntrl_print(ahc_inb(ahc, DFCNTRL), &cur_col, 50);
+ ahc_dfstatus_print(ahc_inb(ahc, DFSTATUS), &cur_col, 50);
+ if (cur_col != 0)
+ printk("\n");
+ printk("STACK:");
+ for (i = 0; i < STACK_SIZE; i++)
+ printk(" 0x%x", ahc_inb(ahc, STACK)|(ahc_inb(ahc, STACK) << 8));
+ printk("\nSCB count = %d\n", ahc->scb_data->numscbs);
+ printk("Kernel NEXTQSCB = %d\n", ahc->next_queued_scb->hscb->tag);
+ printk("Card NEXTQSCB = %d\n", ahc_inb(ahc, NEXT_QUEUED_SCB));
+ /* QINFIFO */
+ printk("QINFIFO entries: ");
+ if ((ahc->features & AHC_QUEUE_REGS) != 0) {
+ qinpos = ahc_inb(ahc, SNSCB_QOFF);
+ ahc_outb(ahc, SNSCB_QOFF, qinpos);
+ } else
+ qinpos = ahc_inb(ahc, QINPOS);
+ qintail = ahc->qinfifonext;
+ while (qinpos != qintail) {
+ printk("%d ", ahc->qinfifo[qinpos]);
+ qinpos++;
+ }
+ printk("\n");
+
+ printk("Waiting Queue entries: ");
+ scb_index = ahc_inb(ahc, WAITING_SCBH);
+ i = 0;
+ while (scb_index != SCB_LIST_NULL && i++ < 256) {
+ ahc_outb(ahc, SCBPTR, scb_index);
+ printk("%d:%d ", scb_index, ahc_inb(ahc, SCB_TAG));
+ scb_index = ahc_inb(ahc, SCB_NEXT);
+ }
+ printk("\n");
+
+ printk("Disconnected Queue entries: ");
+ scb_index = ahc_inb(ahc, DISCONNECTED_SCBH);
+ i = 0;
+ while (scb_index != SCB_LIST_NULL && i++ < 256) {
+ ahc_outb(ahc, SCBPTR, scb_index);
+ printk("%d:%d ", scb_index, ahc_inb(ahc, SCB_TAG));
+ scb_index = ahc_inb(ahc, SCB_NEXT);
+ }
+ printk("\n");
+
+ ahc_sync_qoutfifo(ahc, BUS_DMASYNC_POSTREAD);
+ printk("QOUTFIFO entries: ");
+ qoutpos = ahc->qoutfifonext;
+ i = 0;
+ while (ahc->qoutfifo[qoutpos] != SCB_LIST_NULL && i++ < 256) {
+ printk("%d ", ahc->qoutfifo[qoutpos]);
+ qoutpos++;
+ }
+ printk("\n");
+
+ printk("Sequencer Free SCB List: ");
+ scb_index = ahc_inb(ahc, FREE_SCBH);
+ i = 0;
+ while (scb_index != SCB_LIST_NULL && i++ < 256) {
+ ahc_outb(ahc, SCBPTR, scb_index);
+ printk("%d ", scb_index);
+ scb_index = ahc_inb(ahc, SCB_NEXT);
+ }
+ printk("\n");
+
+ printk("Sequencer SCB Info: ");
+ for (i = 0; i < ahc->scb_data->maxhscbs; i++) {
+ ahc_outb(ahc, SCBPTR, i);
+ cur_col = printk("\n%3d ", i);
+
+ ahc_scb_control_print(ahc_inb(ahc, SCB_CONTROL), &cur_col, 60);
+ ahc_scb_scsiid_print(ahc_inb(ahc, SCB_SCSIID), &cur_col, 60);
+ ahc_scb_lun_print(ahc_inb(ahc, SCB_LUN), &cur_col, 60);
+ ahc_scb_tag_print(ahc_inb(ahc, SCB_TAG), &cur_col, 60);
+ }
+ printk("\n");
+
+ printk("Pending list: ");
+ i = 0;
+ LIST_FOREACH(scb, &ahc->pending_scbs, pending_links) {
+ if (i++ > 256)
+ break;
+ cur_col = printk("\n%3d ", scb->hscb->tag);
+ ahc_scb_control_print(scb->hscb->control, &cur_col, 60);
+ ahc_scb_scsiid_print(scb->hscb->scsiid, &cur_col, 60);
+ ahc_scb_lun_print(scb->hscb->lun, &cur_col, 60);
+ if ((ahc->flags & AHC_PAGESCBS) == 0) {
+ ahc_outb(ahc, SCBPTR, scb->hscb->tag);
+ printk("(");
+ ahc_scb_control_print(ahc_inb(ahc, SCB_CONTROL),
+ &cur_col, 60);
+ ahc_scb_tag_print(ahc_inb(ahc, SCB_TAG), &cur_col, 60);
+ printk(")");
+ }
+ }
+ printk("\n");
+
+ printk("Kernel Free SCB list: ");
+ i = 0;
+ SLIST_FOREACH(scb, &ahc->scb_data->free_scbs, links.sle) {
+ if (i++ > 256)
+ break;
+ printk("%d ", scb->hscb->tag);
+ }
+ printk("\n");
+
+ maxtarget = (ahc->features & (AHC_WIDE|AHC_TWIN)) ? 15 : 7;
+ for (target = 0; target <= maxtarget; target++) {
+ untagged_q = &ahc->untagged_queues[target];
+ if (TAILQ_FIRST(untagged_q) == NULL)
+ continue;
+ printk("Untagged Q(%d): ", target);
+ i = 0;
+ TAILQ_FOREACH(scb, untagged_q, links.tqe) {
+ if (i++ > 256)
+ break;
+ printk("%d ", scb->hscb->tag);
+ }
+ printk("\n");
+ }
+
+ ahc_platform_dump_card_state(ahc);
+ printk("\n<<<<<<<<<<<<<<<<< Dump Card State Ends >>>>>>>>>>>>>>>>>>\n");
+ ahc_outb(ahc, SCBPTR, saved_scbptr);
+ if (paused == 0)
+ ahc_unpause(ahc);
+}
+
+/************************* Target Mode ****************************************/
+#ifdef AHC_TARGET_MODE
+cam_status
+ahc_find_tmode_devs(struct ahc_softc *ahc, struct cam_sim *sim, union ccb *ccb,
+ struct ahc_tmode_tstate **tstate,
+ struct ahc_tmode_lstate **lstate,
+ int notfound_failure)
+{
+
+ if ((ahc->features & AHC_TARGETMODE) == 0)
+ return (CAM_REQ_INVALID);
+
+ /*
+ * Handle the 'black hole' device that sucks up
+ * requests to unattached luns on enabled targets.
+ */
+ if (ccb->ccb_h.target_id == CAM_TARGET_WILDCARD
+ && ccb->ccb_h.target_lun == CAM_LUN_WILDCARD) {
+ *tstate = NULL;
+ *lstate = ahc->black_hole;
+ } else {
+ u_int max_id;
+
+ max_id = (ahc->features & AHC_WIDE) ? 16 : 8;
+ if (ccb->ccb_h.target_id >= max_id)
+ return (CAM_TID_INVALID);
+
+ if (ccb->ccb_h.target_lun >= AHC_NUM_LUNS)
+ return (CAM_LUN_INVALID);
+
+ *tstate = ahc->enabled_targets[ccb->ccb_h.target_id];
+ *lstate = NULL;
+ if (*tstate != NULL)
+ *lstate =
+ (*tstate)->enabled_luns[ccb->ccb_h.target_lun];
+ }
+
+ if (notfound_failure != 0 && *lstate == NULL)
+ return (CAM_PATH_INVALID);
+
+ return (CAM_REQ_CMP);
+}
+
+void
+ahc_handle_en_lun(struct ahc_softc *ahc, struct cam_sim *sim, union ccb *ccb)
+{
+ struct ahc_tmode_tstate *tstate;
+ struct ahc_tmode_lstate *lstate;
+ struct ccb_en_lun *cel;
+ cam_status status;
+ u_long s;
+ u_int target;
+ u_int lun;
+ u_int target_mask;
+ u_int our_id;
+ int error;
+ char channel;
+
+ status = ahc_find_tmode_devs(ahc, sim, ccb, &tstate, &lstate,
+ /*notfound_failure*/FALSE);
+
+ if (status != CAM_REQ_CMP) {
+ ccb->ccb_h.status = status;
+ return;
+ }
+
+ if (cam_sim_bus(sim) == 0)
+ our_id = ahc->our_id;
+ else
+ our_id = ahc->our_id_b;
+
+ if (ccb->ccb_h.target_id != our_id) {
+ /*
+ * our_id represents our initiator ID, or
+ * the ID of the first target to have an
+ * enabled lun in target mode. There are
+ * two cases that may preclude enabling a
+ * target id other than our_id.
+ *
+ * o our_id is for an active initiator role.
+ * Since the hardware does not support
+ * reselections to the initiator role at
+ * anything other than our_id, and our_id
+ * is used by the hardware to indicate the
+ * ID to use for both select-out and
+ * reselect-out operations, the only target
+ * ID we can support in this mode is our_id.
+ *
+ * o The MULTARGID feature is not available and
+ * a previous target mode ID has been enabled.
+ */
+ if ((ahc->features & AHC_MULTIROLE) != 0) {
+
+ if ((ahc->features & AHC_MULTI_TID) != 0
+ && (ahc->flags & AHC_INITIATORROLE) != 0) {
+ /*
+ * Only allow additional targets if
+ * the initiator role is disabled.
+ * The hardware cannot handle a re-select-in
+ * on the initiator id during a re-select-out
+ * on a different target id.
+ */
+ status = CAM_TID_INVALID;
+ } else if ((ahc->flags & AHC_INITIATORROLE) != 0
+ || ahc->enabled_luns > 0) {
+ /*
+ * Only allow our target id to change
+ * if the initiator role is not configured
+ * and there are no enabled luns which
+ * are attached to the currently registered
+ * scsi id.
+ */
+ status = CAM_TID_INVALID;
+ }
+ } else if ((ahc->features & AHC_MULTI_TID) == 0
+ && ahc->enabled_luns > 0) {
+
+ status = CAM_TID_INVALID;
+ }
+ }
+
+ if (status != CAM_REQ_CMP) {
+ ccb->ccb_h.status = status;
+ return;
+ }
+
+ /*
+ * We now have an id that is valid.
+ * If we aren't in target mode, switch modes.
+ */
+ if ((ahc->flags & AHC_TARGETROLE) == 0
+ && ccb->ccb_h.target_id != CAM_TARGET_WILDCARD) {
+ u_long s;
+ ahc_flag saved_flags;
+
+ printk("Configuring Target Mode\n");
+ ahc_lock(ahc, &s);
+ if (LIST_FIRST(&ahc->pending_scbs) != NULL) {
+ ccb->ccb_h.status = CAM_BUSY;
+ ahc_unlock(ahc, &s);
+ return;
+ }
+ saved_flags = ahc->flags;
+ ahc->flags |= AHC_TARGETROLE;
+ if ((ahc->features & AHC_MULTIROLE) == 0)
+ ahc->flags &= ~AHC_INITIATORROLE;
+ ahc_pause(ahc);
+ error = ahc_loadseq(ahc);
+ if (error != 0) {
+ /*
+ * Restore original configuration and notify
+ * the caller that we cannot support target mode.
+ * Since the adapter started out in this
+ * configuration, the firmware load will succeed,
+ * so there is no point in checking ahc_loadseq's
+ * return value.
+ */
+ ahc->flags = saved_flags;
+ (void)ahc_loadseq(ahc);
+ ahc_restart(ahc);
+ ahc_unlock(ahc, &s);
+ ccb->ccb_h.status = CAM_FUNC_NOTAVAIL;
+ return;
+ }
+ ahc_restart(ahc);
+ ahc_unlock(ahc, &s);
+ }
+ cel = &ccb->cel;
+ target = ccb->ccb_h.target_id;
+ lun = ccb->ccb_h.target_lun;
+ channel = SIM_CHANNEL(ahc, sim);
+ target_mask = 0x01 << target;
+ if (channel == 'B')
+ target_mask <<= 8;
+
+ if (cel->enable != 0) {
+ u_int scsiseq;
+
+ /* Are we already enabled?? */
+ if (lstate != NULL) {
+ xpt_print_path(ccb->ccb_h.path);
+ printk("Lun already enabled\n");
+ ccb->ccb_h.status = CAM_LUN_ALRDY_ENA;
+ return;
+ }
+
+ if (cel->grp6_len != 0
+ || cel->grp7_len != 0) {
+ /*
+ * Don't (yet?) support vendor
+ * specific commands.
+ */
+ ccb->ccb_h.status = CAM_REQ_INVALID;
+ printk("Non-zero Group Codes\n");
+ return;
+ }
+
+ /*
+ * Seems to be okay.
+ * Setup our data structures.
+ */
+ if (target != CAM_TARGET_WILDCARD && tstate == NULL) {
+ tstate = ahc_alloc_tstate(ahc, target, channel);
+ if (tstate == NULL) {
+ xpt_print_path(ccb->ccb_h.path);
+ printk("Couldn't allocate tstate\n");
+ ccb->ccb_h.status = CAM_RESRC_UNAVAIL;
+ return;
+ }
+ }
+ lstate = kzalloc(sizeof(*lstate), GFP_ATOMIC);
+ if (lstate == NULL) {
+ xpt_print_path(ccb->ccb_h.path);
+ printk("Couldn't allocate lstate\n");
+ ccb->ccb_h.status = CAM_RESRC_UNAVAIL;
+ return;
+ }
+ status = xpt_create_path(&lstate->path, /*periph*/NULL,
+ xpt_path_path_id(ccb->ccb_h.path),
+ xpt_path_target_id(ccb->ccb_h.path),
+ xpt_path_lun_id(ccb->ccb_h.path));
+ if (status != CAM_REQ_CMP) {
+ kfree(lstate);
+ xpt_print_path(ccb->ccb_h.path);
+ printk("Couldn't allocate path\n");
+ ccb->ccb_h.status = CAM_RESRC_UNAVAIL;
+ return;
+ }
+ SLIST_INIT(&lstate->accept_tios);
+ SLIST_INIT(&lstate->immed_notifies);
+ ahc_lock(ahc, &s);
+ ahc_pause(ahc);
+ if (target != CAM_TARGET_WILDCARD) {
+ tstate->enabled_luns[lun] = lstate;
+ ahc->enabled_luns++;
+
+ if ((ahc->features & AHC_MULTI_TID) != 0) {
+ u_int targid_mask;
+
+ targid_mask = ahc_inb(ahc, TARGID)
+ | (ahc_inb(ahc, TARGID + 1) << 8);
+
+ targid_mask |= target_mask;
+ ahc_outb(ahc, TARGID, targid_mask);
+ ahc_outb(ahc, TARGID+1, (targid_mask >> 8));
+
+ ahc_update_scsiid(ahc, targid_mask);
+ } else {
+ u_int our_id;
+ char channel;
+
+ channel = SIM_CHANNEL(ahc, sim);
+ our_id = SIM_SCSI_ID(ahc, sim);
+
+ /*
+ * This can only happen if selections
+ * are not enabled
+ */
+ if (target != our_id) {
+ u_int sblkctl;
+ char cur_channel;
+ int swap;
+
+ sblkctl = ahc_inb(ahc, SBLKCTL);
+ cur_channel = (sblkctl & SELBUSB)
+ ? 'B' : 'A';
+ if ((ahc->features & AHC_TWIN) == 0)
+ cur_channel = 'A';
+ swap = cur_channel != channel;
+ if (channel == 'A')
+ ahc->our_id = target;
+ else
+ ahc->our_id_b = target;
+
+ if (swap)
+ ahc_outb(ahc, SBLKCTL,
+ sblkctl ^ SELBUSB);
+
+ ahc_outb(ahc, SCSIID, target);
+
+ if (swap)
+ ahc_outb(ahc, SBLKCTL, sblkctl);
+ }
+ }
+ } else
+ ahc->black_hole = lstate;
+ /* Allow select-in operations */
+ if (ahc->black_hole != NULL && ahc->enabled_luns > 0) {
+ scsiseq = ahc_inb(ahc, SCSISEQ_TEMPLATE);
+ scsiseq |= ENSELI;
+ ahc_outb(ahc, SCSISEQ_TEMPLATE, scsiseq);
+ scsiseq = ahc_inb(ahc, SCSISEQ);
+ scsiseq |= ENSELI;
+ ahc_outb(ahc, SCSISEQ, scsiseq);
+ }
+ ahc_unpause(ahc);
+ ahc_unlock(ahc, &s);
+ ccb->ccb_h.status = CAM_REQ_CMP;
+ xpt_print_path(ccb->ccb_h.path);
+ printk("Lun now enabled for target mode\n");
+ } else {
+ struct scb *scb;
+ int i, empty;
+
+ if (lstate == NULL) {
+ ccb->ccb_h.status = CAM_LUN_INVALID;
+ return;
+ }
+
+ ahc_lock(ahc, &s);
+
+ ccb->ccb_h.status = CAM_REQ_CMP;
+ LIST_FOREACH(scb, &ahc->pending_scbs, pending_links) {
+ struct ccb_hdr *ccbh;
+
+ ccbh = &scb->io_ctx->ccb_h;
+ if (ccbh->func_code == XPT_CONT_TARGET_IO
+ && !xpt_path_comp(ccbh->path, ccb->ccb_h.path)){
+ printk("CTIO pending\n");
+ ccb->ccb_h.status = CAM_REQ_INVALID;
+ ahc_unlock(ahc, &s);
+ return;
+ }
+ }
+
+ if (SLIST_FIRST(&lstate->accept_tios) != NULL) {
+ printk("ATIOs pending\n");
+ ccb->ccb_h.status = CAM_REQ_INVALID;
+ }
+
+ if (SLIST_FIRST(&lstate->immed_notifies) != NULL) {
+ printk("INOTs pending\n");
+ ccb->ccb_h.status = CAM_REQ_INVALID;
+ }
+
+ if (ccb->ccb_h.status != CAM_REQ_CMP) {
+ ahc_unlock(ahc, &s);
+ return;
+ }
+
+ xpt_print_path(ccb->ccb_h.path);
+ printk("Target mode disabled\n");
+ xpt_free_path(lstate->path);
+ kfree(lstate);
+
+ ahc_pause(ahc);
+ /* Can we clean up the target too? */
+ if (target != CAM_TARGET_WILDCARD) {
+ tstate->enabled_luns[lun] = NULL;
+ ahc->enabled_luns--;
+ for (empty = 1, i = 0; i < 8; i++)
+ if (tstate->enabled_luns[i] != NULL) {
+ empty = 0;
+ break;
+ }
+
+ if (empty) {
+ ahc_free_tstate(ahc, target, channel,
+ /*force*/FALSE);
+ if (ahc->features & AHC_MULTI_TID) {
+ u_int targid_mask;
+
+ targid_mask = ahc_inb(ahc, TARGID)
+ | (ahc_inb(ahc, TARGID + 1)
+ << 8);
+
+ targid_mask &= ~target_mask;
+ ahc_outb(ahc, TARGID, targid_mask);
+ ahc_outb(ahc, TARGID+1,
+ (targid_mask >> 8));
+ ahc_update_scsiid(ahc, targid_mask);
+ }
+ }
+ } else {
+
+ ahc->black_hole = NULL;
+
+ /*
+ * We can't allow selections without
+ * our black hole device.
+ */
+ empty = TRUE;
+ }
+ if (ahc->enabled_luns == 0) {
+ /* Disallow select-in */
+ u_int scsiseq;
+
+ scsiseq = ahc_inb(ahc, SCSISEQ_TEMPLATE);
+ scsiseq &= ~ENSELI;
+ ahc_outb(ahc, SCSISEQ_TEMPLATE, scsiseq);
+ scsiseq = ahc_inb(ahc, SCSISEQ);
+ scsiseq &= ~ENSELI;
+ ahc_outb(ahc, SCSISEQ, scsiseq);
+
+ if ((ahc->features & AHC_MULTIROLE) == 0) {
+ printk("Configuring Initiator Mode\n");
+ ahc->flags &= ~AHC_TARGETROLE;
+ ahc->flags |= AHC_INITIATORROLE;
+ /*
+ * Returning to a configuration that
+ * fit previously will always succeed.
+ */
+ (void)ahc_loadseq(ahc);
+ ahc_restart(ahc);
+ /*
+ * Unpaused. The extra unpause
+ * that follows is harmless.
+ */
+ }
+ }
+ ahc_unpause(ahc);
+ ahc_unlock(ahc, &s);
+ }
+}
+
+static void
+ahc_update_scsiid(struct ahc_softc *ahc, u_int targid_mask)
+{
+ u_int scsiid_mask;
+ u_int scsiid;
+
+ if ((ahc->features & AHC_MULTI_TID) == 0)
+ panic("ahc_update_scsiid called on non-multitid unit\n");
+
+ /*
+ * Since we will rely on the TARGID mask
+ * for selection enables, ensure that OID
+ * in SCSIID is not set to some other ID
+ * that we don't want to allow selections on.
+ */
+ if ((ahc->features & AHC_ULTRA2) != 0)
+ scsiid = ahc_inb(ahc, SCSIID_ULTRA2);
+ else
+ scsiid = ahc_inb(ahc, SCSIID);
+ scsiid_mask = 0x1 << (scsiid & OID);
+ if ((targid_mask & scsiid_mask) == 0) {
+ u_int our_id;
+
+ /* ffs counts from 1 */
+ our_id = ffs(targid_mask);
+ if (our_id == 0)
+ our_id = ahc->our_id;
+ else
+ our_id--;
+ scsiid &= TID;
+ scsiid |= our_id;
+ }
+ if ((ahc->features & AHC_ULTRA2) != 0)
+ ahc_outb(ahc, SCSIID_ULTRA2, scsiid);
+ else
+ ahc_outb(ahc, SCSIID, scsiid);
+}
+
+static void
+ahc_run_tqinfifo(struct ahc_softc *ahc, int paused)
+{
+ struct target_cmd *cmd;
+
+ /*
+ * If the card supports auto-access pause,
+ * we can access the card directly regardless
+ * of whether it is paused or not.
+ */
+ if ((ahc->features & AHC_AUTOPAUSE) != 0)
+ paused = TRUE;
+
+ ahc_sync_tqinfifo(ahc, BUS_DMASYNC_POSTREAD);
+ while ((cmd = &ahc->targetcmds[ahc->tqinfifonext])->cmd_valid != 0) {
+
+ /*
+ * Only advance through the queue if we
+ * have the resources to process the command.
+ */
+ if (ahc_handle_target_cmd(ahc, cmd) != 0)
+ break;
+
+ cmd->cmd_valid = 0;
+ ahc_dmamap_sync(ahc, ahc->shared_data_dmat,
+ ahc->shared_data_dmamap,
+ ahc_targetcmd_offset(ahc, ahc->tqinfifonext),
+ sizeof(struct target_cmd),
+ BUS_DMASYNC_PREREAD);
+ ahc->tqinfifonext++;
+
+ /*
+ * Lazily update our position in the target mode incoming
+ * command queue as seen by the sequencer.
+ */
+ if ((ahc->tqinfifonext & (HOST_TQINPOS - 1)) == 1) {
+ if ((ahc->features & AHC_HS_MAILBOX) != 0) {
+ u_int hs_mailbox;
+
+ hs_mailbox = ahc_inb(ahc, HS_MAILBOX);
+ hs_mailbox &= ~HOST_TQINPOS;
+ hs_mailbox |= ahc->tqinfifonext & HOST_TQINPOS;
+ ahc_outb(ahc, HS_MAILBOX, hs_mailbox);
+ } else {
+ if (!paused)
+ ahc_pause(ahc);
+ ahc_outb(ahc, KERNEL_TQINPOS,
+ ahc->tqinfifonext & HOST_TQINPOS);
+ if (!paused)
+ ahc_unpause(ahc);
+ }
+ }
+ }
+}
+
+static int
+ahc_handle_target_cmd(struct ahc_softc *ahc, struct target_cmd *cmd)
+{
+ struct ahc_tmode_tstate *tstate;
+ struct ahc_tmode_lstate *lstate;
+ struct ccb_accept_tio *atio;
+ uint8_t *byte;
+ int initiator;
+ int target;
+ int lun;
+
+ initiator = SCSIID_TARGET(ahc, cmd->scsiid);
+ target = SCSIID_OUR_ID(cmd->scsiid);
+ lun = (cmd->identify & MSG_IDENTIFY_LUNMASK);
+
+ byte = cmd->bytes;
+ tstate = ahc->enabled_targets[target];
+ lstate = NULL;
+ if (tstate != NULL)
+ lstate = tstate->enabled_luns[lun];
+
+ /*
+ * Commands for disabled luns go to the black hole driver.
+ */
+ if (lstate == NULL)
+ lstate = ahc->black_hole;
+
+ atio = (struct ccb_accept_tio*)SLIST_FIRST(&lstate->accept_tios);
+ if (atio == NULL) {
+ ahc->flags |= AHC_TQINFIFO_BLOCKED;
+ /*
+ * Wait for more ATIOs from the peripheral driver for this lun.
+ */
+ if (bootverbose)
+ printk("%s: ATIOs exhausted\n", ahc_name(ahc));
+ return (1);
+ } else
+ ahc->flags &= ~AHC_TQINFIFO_BLOCKED;
+#if 0
+ printk("Incoming command from %d for %d:%d%s\n",
+ initiator, target, lun,
+ lstate == ahc->black_hole ? "(Black Holed)" : "");
+#endif
+ SLIST_REMOVE_HEAD(&lstate->accept_tios, sim_links.sle);
+
+ if (lstate == ahc->black_hole) {
+ /* Fill in the wildcards */
+ atio->ccb_h.target_id = target;
+ atio->ccb_h.target_lun = lun;
+ }
+
+ /*
+ * Package it up and send it off to
+ * whomever has this lun enabled.
+ */
+ atio->sense_len = 0;
+ atio->init_id = initiator;
+ if (byte[0] != 0xFF) {
+ /* Tag was included */
+ atio->tag_action = *byte++;
+ atio->tag_id = *byte++;
+ atio->ccb_h.flags = CAM_TAG_ACTION_VALID;
+ } else {
+ atio->ccb_h.flags = 0;
+ }
+ byte++;
+
+ /* Okay. Now determine the cdb size based on the command code */
+ switch (*byte >> CMD_GROUP_CODE_SHIFT) {
+ case 0:
+ atio->cdb_len = 6;
+ break;
+ case 1:
+ case 2:
+ atio->cdb_len = 10;
+ break;
+ case 4:
+ atio->cdb_len = 16;
+ break;
+ case 5:
+ atio->cdb_len = 12;
+ break;
+ case 3:
+ default:
+ /* Only copy the opcode. */
+ atio->cdb_len = 1;
+ printk("Reserved or VU command code type encountered\n");
+ break;
+ }
+
+ memcpy(atio->cdb_io.cdb_bytes, byte, atio->cdb_len);
+
+ atio->ccb_h.status |= CAM_CDB_RECVD;
+
+ if ((cmd->identify & MSG_IDENTIFY_DISCFLAG) == 0) {
+ /*
+ * We weren't allowed to disconnect.
+ * We're hanging on the bus until a
+ * continue target I/O comes in response
+ * to this accept tio.
+ */
+#if 0
+ printk("Received Immediate Command %d:%d:%d - %p\n",
+ initiator, target, lun, ahc->pending_device);
+#endif
+ ahc->pending_device = lstate;
+ ahc_freeze_ccb((union ccb *)atio);
+ atio->ccb_h.flags |= CAM_DIS_DISCONNECT;
+ }
+ xpt_done((union ccb*)atio);
+ return (0);
+}
+
+#endif
diff --git a/drivers/scsi/aic7xxx/aic7xxx_inline.h b/drivers/scsi/aic7xxx/aic7xxx_inline.h
new file mode 100644
index 000000000..0b57b783e
--- /dev/null
+++ b/drivers/scsi/aic7xxx/aic7xxx_inline.h
@@ -0,0 +1,97 @@
+/*
+ * Inline routines shareable across OS platforms.
+ *
+ * Copyright (c) 1994-2001 Justin T. Gibbs.
+ * Copyright (c) 2000-2001 Adaptec Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions, and the following disclaimer,
+ * without modification.
+ * 2. Redistributions in binary form must reproduce at minimum a disclaimer
+ * substantially similar to the "NO WARRANTY" disclaimer below
+ * ("Disclaimer") and any redistribution must be conditioned upon
+ * including a substantially similar Disclaimer requirement for further
+ * binary redistribution.
+ * 3. Neither the names of the above-listed copyright holders nor the names
+ * of any contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * Alternatively, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") version 2 as published by the Free
+ * Software Foundation.
+ *
+ * NO WARRANTY
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
+ * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
+ * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGES.
+ *
+ * $Id: //depot/aic7xxx/aic7xxx/aic7xxx_inline.h#43 $
+ *
+ * $FreeBSD$
+ */
+
+#ifndef _AIC7XXX_INLINE_H_
+#define _AIC7XXX_INLINE_H_
+
+/************************* Sequencer Execution Control ************************/
+int ahc_is_paused(struct ahc_softc *ahc);
+void ahc_pause(struct ahc_softc *ahc);
+void ahc_unpause(struct ahc_softc *ahc);
+
+/************************** Memory mapping routines ***************************/
+void ahc_sync_sglist(struct ahc_softc *ahc,
+ struct scb *scb, int op);
+
+/******************************** Debugging ***********************************/
+static inline char *ahc_name(struct ahc_softc *ahc);
+
+static inline char *ahc_name(struct ahc_softc *ahc)
+{
+ return (ahc->name);
+}
+
+/*********************** Miscellaneous Support Functions ***********************/
+
+struct ahc_initiator_tinfo *
+ ahc_fetch_transinfo(struct ahc_softc *ahc,
+ char channel, u_int our_id,
+ u_int remote_id,
+ struct ahc_tmode_tstate **tstate);
+uint16_t
+ ahc_inw(struct ahc_softc *ahc, u_int port);
+void ahc_outw(struct ahc_softc *ahc, u_int port,
+ u_int value);
+uint32_t
+ ahc_inl(struct ahc_softc *ahc, u_int port);
+void ahc_outl(struct ahc_softc *ahc, u_int port,
+ uint32_t value);
+uint64_t
+ ahc_inq(struct ahc_softc *ahc, u_int port);
+void ahc_outq(struct ahc_softc *ahc, u_int port,
+ uint64_t value);
+struct scb*
+ ahc_get_scb(struct ahc_softc *ahc);
+void ahc_free_scb(struct ahc_softc *ahc, struct scb *scb);
+struct scb *
+ ahc_lookup_scb(struct ahc_softc *ahc, u_int tag);
+void ahc_queue_scb(struct ahc_softc *ahc, struct scb *scb);
+struct scsi_sense_data *
+ ahc_get_sense_buf(struct ahc_softc *ahc,
+ struct scb *scb);
+
+/************************** Interrupt Processing ******************************/
+int ahc_intr(struct ahc_softc *ahc);
+
+#endif /* _AIC7XXX_INLINE_H_ */
diff --git a/drivers/scsi/aic7xxx/aic7xxx_osm.c b/drivers/scsi/aic7xxx/aic7xxx_osm.c
new file mode 100644
index 000000000..a2f2c774c
--- /dev/null
+++ b/drivers/scsi/aic7xxx/aic7xxx_osm.c
@@ -0,0 +1,2598 @@
+/*
+ * Adaptec AIC7xxx device driver for Linux.
+ *
+ * $Id: //depot/aic7xxx/linux/drivers/scsi/aic7xxx/aic7xxx_osm.c#235 $
+ *
+ * Copyright (c) 1994 John Aycock
+ * The University of Calgary Department of Computer Science.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2, or (at your option)
+ * any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; see the file COPYING. If not, write to
+ * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
+ *
+ * Sources include the Adaptec 1740 driver (aha1740.c), the Ultrastor 24F
+ * driver (ultrastor.c), various Linux kernel source, the Adaptec EISA
+ * config file (!adp7771.cfg), the Adaptec AHA-2740A Series User's Guide,
+ * the Linux Kernel Hacker's Guide, Writing a SCSI Device Driver for Linux,
+ * the Adaptec 1542 driver (aha1542.c), the Adaptec EISA overlay file
+ * (adp7770.ovl), the Adaptec AHA-2740 Series Technical Reference Manual,
+ * the Adaptec AIC-7770 Data Book, the ANSI SCSI specification, the
+ * ANSI SCSI-2 specification (draft 10c), ...
+ *
+ * --------------------------------------------------------------------------
+ *
+ * Modifications by Daniel M. Eischen (deischen@iworks.InterWorks.org):
+ *
+ * Substantially modified to include support for wide and twin bus
+ * adapters, DMAing of SCBs, tagged queueing, IRQ sharing, bug fixes,
+ * SCB paging, and other rework of the code.
+ *
+ * --------------------------------------------------------------------------
+ * Copyright (c) 1994-2000 Justin T. Gibbs.
+ * Copyright (c) 2000-2001 Adaptec Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions, and the following disclaimer,
+ * without modification.
+ * 2. Redistributions in binary form must reproduce at minimum a disclaimer
+ * substantially similar to the "NO WARRANTY" disclaimer below
+ * ("Disclaimer") and any redistribution must be conditioned upon
+ * including a substantially similar Disclaimer requirement for further
+ * binary redistribution.
+ * 3. Neither the names of the above-listed copyright holders nor the names
+ * of any contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * Alternatively, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") version 2 as published by the Free
+ * Software Foundation.
+ *
+ * NO WARRANTY
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
+ * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
+ * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGES.
+ *
+ *---------------------------------------------------------------------------
+ *
+ * Thanks also go to (in alphabetical order) the following:
+ *
+ * Rory Bolt - Sequencer bug fixes
+ * Jay Estabrook - Initial DEC Alpha support
+ * Doug Ledford - Much needed abort/reset bug fixes
+ * Kai Makisara - DMAing of SCBs
+ *
+ * A Boot time option was also added for not resetting the scsi bus.
+ *
+ * Form: aic7xxx=extended
+ * aic7xxx=no_reset
+ * aic7xxx=verbose
+ *
+ * Daniel M. Eischen, deischen@iworks.InterWorks.org, 1/23/97
+ *
+ * Id: aic7xxx.c,v 4.1 1997/06/12 08:23:42 deang Exp
+ */
+
+/*
+ * Further driver modifications made by Doug Ledford <dledford@redhat.com>
+ *
+ * Copyright (c) 1997-1999 Doug Ledford
+ *
+ * These changes are released under the same licensing terms as the FreeBSD
+ * driver written by Justin Gibbs. Please see his Copyright notice above
+ * for the exact terms and conditions covering my changes as well as the
+ * warranty statement.
+ *
+ * Modifications made to the aic7xxx.c,v 4.1 driver from Dan Eischen include
+ * but are not limited to:
+ *
+ * 1: Import of the latest FreeBSD sequencer code for this driver
+ * 2: Modification of kernel code to accommodate different sequencer semantics
+ * 3: Extensive changes throughout kernel portion of driver to improve
+ * abort/reset processing and error hanndling
+ * 4: Other work contributed by various people on the Internet
+ * 5: Changes to printk information and verbosity selection code
+ * 6: General reliability related changes, especially in IRQ management
+ * 7: Modifications to the default probe/attach order for supported cards
+ * 8: SMP friendliness has been improved
+ *
+ */
+
+#include "aic7xxx_osm.h"
+#include "aic7xxx_inline.h"
+#include <scsi/scsicam.h>
+
+static struct scsi_transport_template *ahc_linux_transport_template = NULL;
+
+#include <linux/init.h> /* __setup */
+#include <linux/mm.h> /* For fetching system memory size */
+#include <linux/blkdev.h> /* For block_size() */
+#include <linux/delay.h> /* For ssleep/msleep */
+#include <linux/slab.h>
+
+
+/*
+ * Set this to the delay in seconds after SCSI bus reset.
+ * Note, we honor this only for the initial bus reset.
+ * The scsi error recovery code performs its own bus settle
+ * delay handling for error recovery actions.
+ */
+#ifdef CONFIG_AIC7XXX_RESET_DELAY_MS
+#define AIC7XXX_RESET_DELAY CONFIG_AIC7XXX_RESET_DELAY_MS
+#else
+#define AIC7XXX_RESET_DELAY 5000
+#endif
+
+/*
+ * To change the default number of tagged transactions allowed per-device,
+ * add a line to the lilo.conf file like:
+ * append="aic7xxx=verbose,tag_info:{{32,32,32,32},{32,32,32,32}}"
+ * which will result in the first four devices on the first two
+ * controllers being set to a tagged queue depth of 32.
+ *
+ * The tag_commands is an array of 16 to allow for wide and twin adapters.
+ * Twin adapters will use indexes 0-7 for channel 0, and indexes 8-15
+ * for channel 1.
+ */
+typedef struct {
+ uint8_t tag_commands[16]; /* Allow for wide/twin adapters. */
+} adapter_tag_info_t;
+
+/*
+ * Modify this as you see fit for your system.
+ *
+ * 0 tagged queuing disabled
+ * 1 <= n <= 253 n == max tags ever dispatched.
+ *
+ * The driver will throttle the number of commands dispatched to a
+ * device if it returns queue full. For devices with a fixed maximum
+ * queue depth, the driver will eventually determine this depth and
+ * lock it in (a console message is printed to indicate that a lock
+ * has occurred). On some devices, queue full is returned for a temporary
+ * resource shortage. These devices will return queue full at varying
+ * depths. The driver will throttle back when the queue fulls occur and
+ * attempt to slowly increase the depth over time as the device recovers
+ * from the resource shortage.
+ *
+ * In this example, the first line will disable tagged queueing for all
+ * the devices on the first probed aic7xxx adapter.
+ *
+ * The second line enables tagged queueing with 4 commands/LUN for IDs
+ * (0, 2-11, 13-15), disables tagged queueing for ID 12, and tells the
+ * driver to attempt to use up to 64 tags for ID 1.
+ *
+ * The third line is the same as the first line.
+ *
+ * The fourth line disables tagged queueing for devices 0 and 3. It
+ * enables tagged queueing for the other IDs, with 16 commands/LUN
+ * for IDs 1 and 4, 127 commands/LUN for ID 8, and 4 commands/LUN for
+ * IDs 2, 5-7, and 9-15.
+ */
+
+/*
+ * NOTE: The below structure is for reference only, the actual structure
+ * to modify in order to change things is just below this comment block.
+adapter_tag_info_t aic7xxx_tag_info[] =
+{
+ {{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}},
+ {{4, 64, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 0, 4, 4, 4}},
+ {{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}},
+ {{0, 16, 4, 0, 16, 4, 4, 4, 127, 4, 4, 4, 4, 4, 4, 4}}
+};
+*/
+
+#ifdef CONFIG_AIC7XXX_CMDS_PER_DEVICE
+#define AIC7XXX_CMDS_PER_DEVICE CONFIG_AIC7XXX_CMDS_PER_DEVICE
+#else
+#define AIC7XXX_CMDS_PER_DEVICE AHC_MAX_QUEUE
+#endif
+
+#define AIC7XXX_CONFIGED_TAG_COMMANDS { \
+ AIC7XXX_CMDS_PER_DEVICE, AIC7XXX_CMDS_PER_DEVICE, \
+ AIC7XXX_CMDS_PER_DEVICE, AIC7XXX_CMDS_PER_DEVICE, \
+ AIC7XXX_CMDS_PER_DEVICE, AIC7XXX_CMDS_PER_DEVICE, \
+ AIC7XXX_CMDS_PER_DEVICE, AIC7XXX_CMDS_PER_DEVICE, \
+ AIC7XXX_CMDS_PER_DEVICE, AIC7XXX_CMDS_PER_DEVICE, \
+ AIC7XXX_CMDS_PER_DEVICE, AIC7XXX_CMDS_PER_DEVICE, \
+ AIC7XXX_CMDS_PER_DEVICE, AIC7XXX_CMDS_PER_DEVICE, \
+ AIC7XXX_CMDS_PER_DEVICE, AIC7XXX_CMDS_PER_DEVICE \
+}
+
+/*
+ * By default, use the number of commands specified by
+ * the users kernel configuration.
+ */
+static adapter_tag_info_t aic7xxx_tag_info[] =
+{
+ {AIC7XXX_CONFIGED_TAG_COMMANDS},
+ {AIC7XXX_CONFIGED_TAG_COMMANDS},
+ {AIC7XXX_CONFIGED_TAG_COMMANDS},
+ {AIC7XXX_CONFIGED_TAG_COMMANDS},
+ {AIC7XXX_CONFIGED_TAG_COMMANDS},
+ {AIC7XXX_CONFIGED_TAG_COMMANDS},
+ {AIC7XXX_CONFIGED_TAG_COMMANDS},
+ {AIC7XXX_CONFIGED_TAG_COMMANDS},
+ {AIC7XXX_CONFIGED_TAG_COMMANDS},
+ {AIC7XXX_CONFIGED_TAG_COMMANDS},
+ {AIC7XXX_CONFIGED_TAG_COMMANDS},
+ {AIC7XXX_CONFIGED_TAG_COMMANDS},
+ {AIC7XXX_CONFIGED_TAG_COMMANDS},
+ {AIC7XXX_CONFIGED_TAG_COMMANDS},
+ {AIC7XXX_CONFIGED_TAG_COMMANDS},
+ {AIC7XXX_CONFIGED_TAG_COMMANDS}
+};
+
+/*
+ * There should be a specific return value for this in scsi.h, but
+ * it seems that most drivers ignore it.
+ */
+#define DID_UNDERFLOW DID_ERROR
+
+void
+ahc_print_path(struct ahc_softc *ahc, struct scb *scb)
+{
+ printk("(scsi%d:%c:%d:%d): ",
+ ahc->platform_data->host->host_no,
+ scb != NULL ? SCB_GET_CHANNEL(ahc, scb) : 'X',
+ scb != NULL ? SCB_GET_TARGET(ahc, scb) : -1,
+ scb != NULL ? SCB_GET_LUN(scb) : -1);
+}
+
+/*
+ * XXX - these options apply unilaterally to _all_ 274x/284x/294x
+ * cards in the system. This should be fixed. Exceptions to this
+ * rule are noted in the comments.
+ */
+
+/*
+ * Skip the scsi bus reset. Non 0 make us skip the reset at startup. This
+ * has no effect on any later resets that might occur due to things like
+ * SCSI bus timeouts.
+ */
+static uint32_t aic7xxx_no_reset;
+
+/*
+ * Should we force EXTENDED translation on a controller.
+ * 0 == Use whatever is in the SEEPROM or default to off
+ * 1 == Use whatever is in the SEEPROM or default to on
+ */
+static uint32_t aic7xxx_extended;
+
+/*
+ * PCI bus parity checking of the Adaptec controllers. This is somewhat
+ * dubious at best. To my knowledge, this option has never actually
+ * solved a PCI parity problem, but on certain machines with broken PCI
+ * chipset configurations where stray PCI transactions with bad parity are
+ * the norm rather than the exception, the error messages can be overwhelming.
+ * It's included in the driver for completeness.
+ * 0 = Shut off PCI parity check
+ * non-0 = reverse polarity pci parity checking
+ */
+static uint32_t aic7xxx_pci_parity = ~0;
+
+/*
+ * There are lots of broken chipsets in the world. Some of them will
+ * violate the PCI spec when we issue byte sized memory writes to our
+ * controller. I/O mapped register access, if allowed by the given
+ * platform, will work in almost all cases.
+ */
+uint32_t aic7xxx_allow_memio = ~0;
+
+/*
+ * So that we can set how long each device is given as a selection timeout.
+ * The table of values goes like this:
+ * 0 - 256ms
+ * 1 - 128ms
+ * 2 - 64ms
+ * 3 - 32ms
+ * We default to 256ms because some older devices need a longer time
+ * to respond to initial selection.
+ */
+static uint32_t aic7xxx_seltime;
+
+/*
+ * Certain devices do not perform any aging on commands. Should the
+ * device be saturated by commands in one portion of the disk, it is
+ * possible for transactions on far away sectors to never be serviced.
+ * To handle these devices, we can periodically send an ordered tag to
+ * force all outstanding transactions to be serviced prior to a new
+ * transaction.
+ */
+static uint32_t aic7xxx_periodic_otag;
+
+/*
+ * Module information and settable options.
+ */
+static char *aic7xxx = NULL;
+
+MODULE_AUTHOR("Maintainer: Hannes Reinecke <hare@suse.de>");
+MODULE_DESCRIPTION("Adaptec AIC77XX/78XX SCSI Host Bus Adapter driver");
+MODULE_LICENSE("Dual BSD/GPL");
+MODULE_VERSION(AIC7XXX_DRIVER_VERSION);
+module_param(aic7xxx, charp, 0444);
+MODULE_PARM_DESC(aic7xxx,
+"period-delimited options string:\n"
+" verbose Enable verbose/diagnostic logging\n"
+" allow_memio Allow device registers to be memory mapped\n"
+" debug Bitmask of debug values to enable\n"
+" no_probe Toggle EISA/VLB controller probing\n"
+" probe_eisa_vl Toggle EISA/VLB controller probing\n"
+" no_reset Suppress initial bus resets\n"
+" extended Enable extended geometry on all controllers\n"
+" periodic_otag Send an ordered tagged transaction\n"
+" periodically to prevent tag starvation.\n"
+" This may be required by some older disk\n"
+" drives or RAID arrays.\n"
+" tag_info:<tag_str> Set per-target tag depth\n"
+" global_tag_depth:<int> Global tag depth for every target\n"
+" on every bus\n"
+" seltime:<int> Selection Timeout\n"
+" (0/256ms,1/128ms,2/64ms,3/32ms)\n"
+"\n"
+" Sample modprobe configuration file:\n"
+" # Toggle EISA/VLB probing\n"
+" # Set tag depth on Controller 1/Target 1 to 10 tags\n"
+" # Shorten the selection timeout to 128ms\n"
+"\n"
+" options aic7xxx 'aic7xxx=probe_eisa_vl.tag_info:{{}.{.10}}.seltime:1'\n"
+);
+
+static void ahc_linux_handle_scsi_status(struct ahc_softc *,
+ struct scsi_device *,
+ struct scb *);
+static void ahc_linux_queue_cmd_complete(struct ahc_softc *ahc,
+ struct scsi_cmnd *cmd);
+static void ahc_linux_freeze_simq(struct ahc_softc *ahc);
+static void ahc_linux_release_simq(struct ahc_softc *ahc);
+static int ahc_linux_queue_recovery_cmd(struct scsi_cmnd *cmd, scb_flag flag);
+static void ahc_linux_initialize_scsi_bus(struct ahc_softc *ahc);
+static u_int ahc_linux_user_tagdepth(struct ahc_softc *ahc,
+ struct ahc_devinfo *devinfo);
+static void ahc_linux_device_queue_depth(struct scsi_device *);
+static int ahc_linux_run_command(struct ahc_softc*,
+ struct ahc_linux_device *,
+ struct scsi_cmnd *);
+static void ahc_linux_setup_tag_info_global(char *p);
+static int aic7xxx_setup(char *s);
+
+static int ahc_linux_unit;
+
+
+/************************** OS Utility Wrappers *******************************/
+void
+ahc_delay(long usec)
+{
+ /*
+ * udelay on Linux can have problems for
+ * multi-millisecond waits. Wait at most
+ * 1024us per call.
+ */
+ while (usec > 0) {
+ udelay(usec % 1024);
+ usec -= 1024;
+ }
+}
+
+/***************************** Low Level I/O **********************************/
+uint8_t
+ahc_inb(struct ahc_softc * ahc, long port)
+{
+ uint8_t x;
+
+ if (ahc->tag == BUS_SPACE_MEMIO) {
+ x = readb(ahc->bsh.maddr + port);
+ } else {
+ x = inb(ahc->bsh.ioport + port);
+ }
+ mb();
+ return (x);
+}
+
+void
+ahc_outb(struct ahc_softc * ahc, long port, uint8_t val)
+{
+ if (ahc->tag == BUS_SPACE_MEMIO) {
+ writeb(val, ahc->bsh.maddr + port);
+ } else {
+ outb(val, ahc->bsh.ioport + port);
+ }
+ mb();
+}
+
+void
+ahc_outsb(struct ahc_softc * ahc, long port, uint8_t *array, int count)
+{
+ int i;
+
+ /*
+ * There is probably a more efficient way to do this on Linux
+ * but we don't use this for anything speed critical and this
+ * should work.
+ */
+ for (i = 0; i < count; i++)
+ ahc_outb(ahc, port, *array++);
+}
+
+void
+ahc_insb(struct ahc_softc * ahc, long port, uint8_t *array, int count)
+{
+ int i;
+
+ /*
+ * There is probably a more efficient way to do this on Linux
+ * but we don't use this for anything speed critical and this
+ * should work.
+ */
+ for (i = 0; i < count; i++)
+ *array++ = ahc_inb(ahc, port);
+}
+
+/********************************* Inlines ************************************/
+static void ahc_linux_unmap_scb(struct ahc_softc*, struct scb*);
+
+static int ahc_linux_map_seg(struct ahc_softc *ahc, struct scb *scb,
+ struct ahc_dma_seg *sg,
+ dma_addr_t addr, bus_size_t len);
+
+static void
+ahc_linux_unmap_scb(struct ahc_softc *ahc, struct scb *scb)
+{
+ struct scsi_cmnd *cmd;
+
+ cmd = scb->io_ctx;
+ ahc_sync_sglist(ahc, scb, BUS_DMASYNC_POSTWRITE);
+
+ scsi_dma_unmap(cmd);
+}
+
+static int
+ahc_linux_map_seg(struct ahc_softc *ahc, struct scb *scb,
+ struct ahc_dma_seg *sg, dma_addr_t addr, bus_size_t len)
+{
+ int consumed;
+
+ if ((scb->sg_count + 1) > AHC_NSEG)
+ panic("Too few segs for dma mapping. "
+ "Increase AHC_NSEG\n");
+
+ consumed = 1;
+ sg->addr = ahc_htole32(addr & 0xFFFFFFFF);
+ scb->platform_data->xfer_len += len;
+
+ if (sizeof(dma_addr_t) > 4
+ && (ahc->flags & AHC_39BIT_ADDRESSING) != 0)
+ len |= (addr >> 8) & AHC_SG_HIGH_ADDR_MASK;
+
+ sg->len = ahc_htole32(len);
+ return (consumed);
+}
+
+/*
+ * Return a string describing the driver.
+ */
+static const char *
+ahc_linux_info(struct Scsi_Host *host)
+{
+ static char buffer[512];
+ char ahc_info[256];
+ char *bp;
+ struct ahc_softc *ahc;
+
+ bp = &buffer[0];
+ ahc = *(struct ahc_softc **)host->hostdata;
+ memset(bp, 0, sizeof(buffer));
+ strcpy(bp, "Adaptec AIC7XXX EISA/VLB/PCI SCSI HBA DRIVER, Rev " AIC7XXX_DRIVER_VERSION "\n"
+ " <");
+ strcat(bp, ahc->description);
+ strcat(bp, ">\n"
+ " ");
+ ahc_controller_info(ahc, ahc_info);
+ strcat(bp, ahc_info);
+ strcat(bp, "\n");
+
+ return (bp);
+}
+
+/*
+ * Queue an SCB to the controller.
+ */
+static int
+ahc_linux_queue_lck(struct scsi_cmnd * cmd, void (*scsi_done) (struct scsi_cmnd *))
+{
+ struct ahc_softc *ahc;
+ struct ahc_linux_device *dev = scsi_transport_device_data(cmd->device);
+ int rtn = SCSI_MLQUEUE_HOST_BUSY;
+ unsigned long flags;
+
+ ahc = *(struct ahc_softc **)cmd->device->host->hostdata;
+
+ ahc_lock(ahc, &flags);
+ if (ahc->platform_data->qfrozen == 0) {
+ cmd->scsi_done = scsi_done;
+ cmd->result = CAM_REQ_INPROG << 16;
+ rtn = ahc_linux_run_command(ahc, dev, cmd);
+ }
+ ahc_unlock(ahc, &flags);
+
+ return rtn;
+}
+
+static DEF_SCSI_QCMD(ahc_linux_queue)
+
+static inline struct scsi_target **
+ahc_linux_target_in_softc(struct scsi_target *starget)
+{
+ struct ahc_softc *ahc =
+ *((struct ahc_softc **)dev_to_shost(&starget->dev)->hostdata);
+ unsigned int target_offset;
+
+ target_offset = starget->id;
+ if (starget->channel != 0)
+ target_offset += 8;
+
+ return &ahc->platform_data->starget[target_offset];
+}
+
+static int
+ahc_linux_target_alloc(struct scsi_target *starget)
+{
+ struct ahc_softc *ahc =
+ *((struct ahc_softc **)dev_to_shost(&starget->dev)->hostdata);
+ struct seeprom_config *sc = ahc->seep_config;
+ unsigned long flags;
+ struct scsi_target **ahc_targp = ahc_linux_target_in_softc(starget);
+ unsigned short scsirate;
+ struct ahc_devinfo devinfo;
+ struct ahc_initiator_tinfo *tinfo;
+ struct ahc_tmode_tstate *tstate;
+ char channel = starget->channel + 'A';
+ unsigned int our_id = ahc->our_id;
+ unsigned int target_offset;
+
+ target_offset = starget->id;
+ if (starget->channel != 0)
+ target_offset += 8;
+
+ if (starget->channel)
+ our_id = ahc->our_id_b;
+
+ ahc_lock(ahc, &flags);
+
+ BUG_ON(*ahc_targp != NULL);
+
+ *ahc_targp = starget;
+
+ if (sc) {
+ int maxsync = AHC_SYNCRATE_DT;
+ int ultra = 0;
+ int flags = sc->device_flags[target_offset];
+
+ if (ahc->flags & AHC_NEWEEPROM_FMT) {
+ if (flags & CFSYNCHISULTRA)
+ ultra = 1;
+ } else if (flags & CFULTRAEN)
+ ultra = 1;
+ /* AIC nutcase; 10MHz appears as ultra = 1, CFXFER = 0x04
+ * change it to ultra=0, CFXFER = 0 */
+ if(ultra && (flags & CFXFER) == 0x04) {
+ ultra = 0;
+ flags &= ~CFXFER;
+ }
+
+ if ((ahc->features & AHC_ULTRA2) != 0) {
+ scsirate = (flags & CFXFER) | (ultra ? 0x8 : 0);
+ } else {
+ scsirate = (flags & CFXFER) << 4;
+ maxsync = ultra ? AHC_SYNCRATE_ULTRA :
+ AHC_SYNCRATE_FAST;
+ }
+ spi_max_width(starget) = (flags & CFWIDEB) ? 1 : 0;
+ if (!(flags & CFSYNCH))
+ spi_max_offset(starget) = 0;
+ spi_min_period(starget) =
+ ahc_find_period(ahc, scsirate, maxsync);
+
+ tinfo = ahc_fetch_transinfo(ahc, channel, ahc->our_id,
+ starget->id, &tstate);
+ }
+ ahc_compile_devinfo(&devinfo, our_id, starget->id,
+ CAM_LUN_WILDCARD, channel,
+ ROLE_INITIATOR);
+ ahc_set_syncrate(ahc, &devinfo, NULL, 0, 0, 0,
+ AHC_TRANS_GOAL, /*paused*/FALSE);
+ ahc_set_width(ahc, &devinfo, MSG_EXT_WDTR_BUS_8_BIT,
+ AHC_TRANS_GOAL, /*paused*/FALSE);
+ ahc_unlock(ahc, &flags);
+
+ return 0;
+}
+
+static void
+ahc_linux_target_destroy(struct scsi_target *starget)
+{
+ struct scsi_target **ahc_targp = ahc_linux_target_in_softc(starget);
+
+ *ahc_targp = NULL;
+}
+
+static int
+ahc_linux_slave_alloc(struct scsi_device *sdev)
+{
+ struct ahc_softc *ahc =
+ *((struct ahc_softc **)sdev->host->hostdata);
+ struct scsi_target *starget = sdev->sdev_target;
+ struct ahc_linux_device *dev;
+
+ if (bootverbose)
+ printk("%s: Slave Alloc %d\n", ahc_name(ahc), sdev->id);
+
+ dev = scsi_transport_device_data(sdev);
+ memset(dev, 0, sizeof(*dev));
+
+ /*
+ * We start out life using untagged
+ * transactions of which we allow one.
+ */
+ dev->openings = 1;
+
+ /*
+ * Set maxtags to 0. This will be changed if we
+ * later determine that we are dealing with
+ * a tagged queuing capable device.
+ */
+ dev->maxtags = 0;
+
+ spi_period(starget) = 0;
+
+ return 0;
+}
+
+static int
+ahc_linux_slave_configure(struct scsi_device *sdev)
+{
+ struct ahc_softc *ahc;
+
+ ahc = *((struct ahc_softc **)sdev->host->hostdata);
+
+ if (bootverbose)
+ sdev_printk(KERN_INFO, sdev, "Slave Configure\n");
+
+ ahc_linux_device_queue_depth(sdev);
+
+ /* Initial Domain Validation */
+ if (!spi_initial_dv(sdev->sdev_target))
+ spi_dv_device(sdev);
+
+ return 0;
+}
+
+#if defined(__i386__)
+/*
+ * Return the disk geometry for the given SCSI device.
+ */
+static int
+ahc_linux_biosparam(struct scsi_device *sdev, struct block_device *bdev,
+ sector_t capacity, int geom[])
+{
+ uint8_t *bh;
+ int heads;
+ int sectors;
+ int cylinders;
+ int ret;
+ int extended;
+ struct ahc_softc *ahc;
+ u_int channel;
+
+ ahc = *((struct ahc_softc **)sdev->host->hostdata);
+ channel = sdev_channel(sdev);
+
+ bh = scsi_bios_ptable(bdev);
+ if (bh) {
+ ret = scsi_partsize(bh, capacity,
+ &geom[2], &geom[0], &geom[1]);
+ kfree(bh);
+ if (ret != -1)
+ return (ret);
+ }
+ heads = 64;
+ sectors = 32;
+ cylinders = aic_sector_div(capacity, heads, sectors);
+
+ if (aic7xxx_extended != 0)
+ extended = 1;
+ else if (channel == 0)
+ extended = (ahc->flags & AHC_EXTENDED_TRANS_A) != 0;
+ else
+ extended = (ahc->flags & AHC_EXTENDED_TRANS_B) != 0;
+ if (extended && cylinders >= 1024) {
+ heads = 255;
+ sectors = 63;
+ cylinders = aic_sector_div(capacity, heads, sectors);
+ }
+ geom[0] = heads;
+ geom[1] = sectors;
+ geom[2] = cylinders;
+ return (0);
+}
+#endif
+
+/*
+ * Abort the current SCSI command(s).
+ */
+static int
+ahc_linux_abort(struct scsi_cmnd *cmd)
+{
+ int error;
+
+ error = ahc_linux_queue_recovery_cmd(cmd, SCB_ABORT);
+ if (error != 0)
+ printk("aic7xxx_abort returns 0x%x\n", error);
+ return (error);
+}
+
+/*
+ * Attempt to send a target reset message to the device that timed out.
+ */
+static int
+ahc_linux_dev_reset(struct scsi_cmnd *cmd)
+{
+ int error;
+
+ error = ahc_linux_queue_recovery_cmd(cmd, SCB_DEVICE_RESET);
+ if (error != 0)
+ printk("aic7xxx_dev_reset returns 0x%x\n", error);
+ return (error);
+}
+
+/*
+ * Reset the SCSI bus.
+ */
+static int
+ahc_linux_bus_reset(struct scsi_cmnd *cmd)
+{
+ struct ahc_softc *ahc;
+ int found;
+ unsigned long flags;
+
+ ahc = *(struct ahc_softc **)cmd->device->host->hostdata;
+
+ ahc_lock(ahc, &flags);
+ found = ahc_reset_channel(ahc, scmd_channel(cmd) + 'A',
+ /*initiate reset*/TRUE);
+ ahc_unlock(ahc, &flags);
+
+ if (bootverbose)
+ printk("%s: SCSI bus reset delivered. "
+ "%d SCBs aborted.\n", ahc_name(ahc), found);
+
+ return SUCCESS;
+}
+
+struct scsi_host_template aic7xxx_driver_template = {
+ .module = THIS_MODULE,
+ .name = "aic7xxx",
+ .proc_name = "aic7xxx",
+ .show_info = ahc_linux_show_info,
+ .write_info = ahc_proc_write_seeprom,
+ .info = ahc_linux_info,
+ .queuecommand = ahc_linux_queue,
+ .eh_abort_handler = ahc_linux_abort,
+ .eh_device_reset_handler = ahc_linux_dev_reset,
+ .eh_bus_reset_handler = ahc_linux_bus_reset,
+#if defined(__i386__)
+ .bios_param = ahc_linux_biosparam,
+#endif
+ .can_queue = AHC_MAX_QUEUE,
+ .this_id = -1,
+ .max_sectors = 8192,
+ .cmd_per_lun = 2,
+ .use_clustering = ENABLE_CLUSTERING,
+ .slave_alloc = ahc_linux_slave_alloc,
+ .slave_configure = ahc_linux_slave_configure,
+ .target_alloc = ahc_linux_target_alloc,
+ .target_destroy = ahc_linux_target_destroy,
+ .use_blk_tags = 1,
+};
+
+/**************************** Tasklet Handler *********************************/
+
+/******************************** Macros **************************************/
+#define BUILD_SCSIID(ahc, cmd) \
+ ((((cmd)->device->id << TID_SHIFT) & TID) \
+ | (((cmd)->device->channel == 0) ? (ahc)->our_id : (ahc)->our_id_b) \
+ | (((cmd)->device->channel == 0) ? 0 : TWIN_CHNLB))
+
+/******************************** Bus DMA *************************************/
+int
+ahc_dma_tag_create(struct ahc_softc *ahc, bus_dma_tag_t parent,
+ bus_size_t alignment, bus_size_t boundary,
+ dma_addr_t lowaddr, dma_addr_t highaddr,
+ bus_dma_filter_t *filter, void *filterarg,
+ bus_size_t maxsize, int nsegments,
+ bus_size_t maxsegsz, int flags, bus_dma_tag_t *ret_tag)
+{
+ bus_dma_tag_t dmat;
+
+ dmat = kmalloc(sizeof(*dmat), GFP_ATOMIC);
+ if (dmat == NULL)
+ return (ENOMEM);
+
+ /*
+ * Linux is very simplistic about DMA memory. For now don't
+ * maintain all specification information. Once Linux supplies
+ * better facilities for doing these operations, or the
+ * needs of this particular driver change, we might need to do
+ * more here.
+ */
+ dmat->alignment = alignment;
+ dmat->boundary = boundary;
+ dmat->maxsize = maxsize;
+ *ret_tag = dmat;
+ return (0);
+}
+
+void
+ahc_dma_tag_destroy(struct ahc_softc *ahc, bus_dma_tag_t dmat)
+{
+ kfree(dmat);
+}
+
+int
+ahc_dmamem_alloc(struct ahc_softc *ahc, bus_dma_tag_t dmat, void** vaddr,
+ int flags, bus_dmamap_t *mapp)
+{
+ *vaddr = pci_alloc_consistent(ahc->dev_softc,
+ dmat->maxsize, mapp);
+ if (*vaddr == NULL)
+ return ENOMEM;
+ return 0;
+}
+
+void
+ahc_dmamem_free(struct ahc_softc *ahc, bus_dma_tag_t dmat,
+ void* vaddr, bus_dmamap_t map)
+{
+ pci_free_consistent(ahc->dev_softc, dmat->maxsize,
+ vaddr, map);
+}
+
+int
+ahc_dmamap_load(struct ahc_softc *ahc, bus_dma_tag_t dmat, bus_dmamap_t map,
+ void *buf, bus_size_t buflen, bus_dmamap_callback_t *cb,
+ void *cb_arg, int flags)
+{
+ /*
+ * Assume for now that this will only be used during
+ * initialization and not for per-transaction buffer mapping.
+ */
+ bus_dma_segment_t stack_sg;
+
+ stack_sg.ds_addr = map;
+ stack_sg.ds_len = dmat->maxsize;
+ cb(cb_arg, &stack_sg, /*nseg*/1, /*error*/0);
+ return (0);
+}
+
+void
+ahc_dmamap_destroy(struct ahc_softc *ahc, bus_dma_tag_t dmat, bus_dmamap_t map)
+{
+}
+
+int
+ahc_dmamap_unload(struct ahc_softc *ahc, bus_dma_tag_t dmat, bus_dmamap_t map)
+{
+ /* Nothing to do */
+ return (0);
+}
+
+static void
+ahc_linux_setup_tag_info_global(char *p)
+{
+ int tags, i, j;
+
+ tags = simple_strtoul(p + 1, NULL, 0) & 0xff;
+ printk("Setting Global Tags= %d\n", tags);
+
+ for (i = 0; i < ARRAY_SIZE(aic7xxx_tag_info); i++) {
+ for (j = 0; j < AHC_NUM_TARGETS; j++) {
+ aic7xxx_tag_info[i].tag_commands[j] = tags;
+ }
+ }
+}
+
+static void
+ahc_linux_setup_tag_info(u_long arg, int instance, int targ, int32_t value)
+{
+
+ if ((instance >= 0) && (targ >= 0)
+ && (instance < ARRAY_SIZE(aic7xxx_tag_info))
+ && (targ < AHC_NUM_TARGETS)) {
+ aic7xxx_tag_info[instance].tag_commands[targ] = value & 0xff;
+ if (bootverbose)
+ printk("tag_info[%d:%d] = %d\n", instance, targ, value);
+ }
+}
+
+static char *
+ahc_parse_brace_option(char *opt_name, char *opt_arg, char *end, int depth,
+ void (*callback)(u_long, int, int, int32_t),
+ u_long callback_arg)
+{
+ char *tok_end;
+ char *tok_end2;
+ int i;
+ int instance;
+ int targ;
+ int done;
+ char tok_list[] = {'.', ',', '{', '}', '\0'};
+
+ /* All options use a ':' name/arg separator */
+ if (*opt_arg != ':')
+ return (opt_arg);
+ opt_arg++;
+ instance = -1;
+ targ = -1;
+ done = FALSE;
+ /*
+ * Restore separator that may be in
+ * the middle of our option argument.
+ */
+ tok_end = strchr(opt_arg, '\0');
+ if (tok_end < end)
+ *tok_end = ',';
+ while (!done) {
+ switch (*opt_arg) {
+ case '{':
+ if (instance == -1) {
+ instance = 0;
+ } else {
+ if (depth > 1) {
+ if (targ == -1)
+ targ = 0;
+ } else {
+ printk("Malformed Option %s\n",
+ opt_name);
+ done = TRUE;
+ }
+ }
+ opt_arg++;
+ break;
+ case '}':
+ if (targ != -1)
+ targ = -1;
+ else if (instance != -1)
+ instance = -1;
+ opt_arg++;
+ break;
+ case ',':
+ case '.':
+ if (instance == -1)
+ done = TRUE;
+ else if (targ >= 0)
+ targ++;
+ else if (instance >= 0)
+ instance++;
+ opt_arg++;
+ break;
+ case '\0':
+ done = TRUE;
+ break;
+ default:
+ tok_end = end;
+ for (i = 0; tok_list[i]; i++) {
+ tok_end2 = strchr(opt_arg, tok_list[i]);
+ if ((tok_end2) && (tok_end2 < tok_end))
+ tok_end = tok_end2;
+ }
+ callback(callback_arg, instance, targ,
+ simple_strtol(opt_arg, NULL, 0));
+ opt_arg = tok_end;
+ break;
+ }
+ }
+ return (opt_arg);
+}
+
+/*
+ * Handle Linux boot parameters. This routine allows for assigning a value
+ * to a parameter with a ':' between the parameter and the value.
+ * ie. aic7xxx=stpwlev:1,extended
+ */
+static int
+aic7xxx_setup(char *s)
+{
+ int i, n;
+ char *p;
+ char *end;
+
+ static const struct {
+ const char *name;
+ uint32_t *flag;
+ } options[] = {
+ { "extended", &aic7xxx_extended },
+ { "no_reset", &aic7xxx_no_reset },
+ { "verbose", &aic7xxx_verbose },
+ { "allow_memio", &aic7xxx_allow_memio},
+#ifdef AHC_DEBUG
+ { "debug", &ahc_debug },
+#endif
+ { "periodic_otag", &aic7xxx_periodic_otag },
+ { "pci_parity", &aic7xxx_pci_parity },
+ { "seltime", &aic7xxx_seltime },
+ { "tag_info", NULL },
+ { "global_tag_depth", NULL },
+ { "dv", NULL }
+ };
+
+ end = strchr(s, '\0');
+
+ /*
+ * XXX ia64 gcc isn't smart enough to know that ARRAY_SIZE
+ * will never be 0 in this case.
+ */
+ n = 0;
+
+ while ((p = strsep(&s, ",.")) != NULL) {
+ if (*p == '\0')
+ continue;
+ for (i = 0; i < ARRAY_SIZE(options); i++) {
+
+ n = strlen(options[i].name);
+ if (strncmp(options[i].name, p, n) == 0)
+ break;
+ }
+ if (i == ARRAY_SIZE(options))
+ continue;
+
+ if (strncmp(p, "global_tag_depth", n) == 0) {
+ ahc_linux_setup_tag_info_global(p + n);
+ } else if (strncmp(p, "tag_info", n) == 0) {
+ s = ahc_parse_brace_option("tag_info", p + n, end,
+ 2, ahc_linux_setup_tag_info, 0);
+ } else if (p[n] == ':') {
+ *(options[i].flag) = simple_strtoul(p + n + 1, NULL, 0);
+ } else if (strncmp(p, "verbose", n) == 0) {
+ *(options[i].flag) = 1;
+ } else {
+ *(options[i].flag) ^= 0xFFFFFFFF;
+ }
+ }
+ return 1;
+}
+
+__setup("aic7xxx=", aic7xxx_setup);
+
+uint32_t aic7xxx_verbose;
+
+int
+ahc_linux_register_host(struct ahc_softc *ahc, struct scsi_host_template *template)
+{
+ char buf[80];
+ struct Scsi_Host *host;
+ char *new_name;
+ u_long s;
+ int retval;
+
+ template->name = ahc->description;
+ host = scsi_host_alloc(template, sizeof(struct ahc_softc *));
+ if (host == NULL)
+ return (ENOMEM);
+
+ *((struct ahc_softc **)host->hostdata) = ahc;
+ ahc->platform_data->host = host;
+ host->can_queue = AHC_MAX_QUEUE;
+ host->cmd_per_lun = 2;
+ /* XXX No way to communicate the ID for multiple channels */
+ host->this_id = ahc->our_id;
+ host->irq = ahc->platform_data->irq;
+ host->max_id = (ahc->features & AHC_WIDE) ? 16 : 8;
+ host->max_lun = AHC_NUM_LUNS;
+ host->max_channel = (ahc->features & AHC_TWIN) ? 1 : 0;
+ host->sg_tablesize = AHC_NSEG;
+ ahc_lock(ahc, &s);
+ ahc_set_unit(ahc, ahc_linux_unit++);
+ ahc_unlock(ahc, &s);
+ sprintf(buf, "scsi%d", host->host_no);
+ new_name = kmalloc(strlen(buf) + 1, GFP_ATOMIC);
+ if (new_name != NULL) {
+ strcpy(new_name, buf);
+ ahc_set_name(ahc, new_name);
+ }
+ host->unique_id = ahc->unit;
+ ahc_linux_initialize_scsi_bus(ahc);
+ ahc_intr_enable(ahc, TRUE);
+
+ host->transportt = ahc_linux_transport_template;
+
+ retval = scsi_add_host(host,
+ (ahc->dev_softc ? &ahc->dev_softc->dev : NULL));
+ if (retval) {
+ printk(KERN_WARNING "aic7xxx: scsi_add_host failed\n");
+ scsi_host_put(host);
+ return retval;
+ }
+
+ scsi_scan_host(host);
+ return 0;
+}
+
+/*
+ * Place the SCSI bus into a known state by either resetting it,
+ * or forcing transfer negotiations on the next command to any
+ * target.
+ */
+void
+ahc_linux_initialize_scsi_bus(struct ahc_softc *ahc)
+{
+ int i;
+ int numtarg;
+ unsigned long s;
+
+ i = 0;
+ numtarg = 0;
+
+ ahc_lock(ahc, &s);
+
+ if (aic7xxx_no_reset != 0)
+ ahc->flags &= ~(AHC_RESET_BUS_A|AHC_RESET_BUS_B);
+
+ if ((ahc->flags & AHC_RESET_BUS_A) != 0)
+ ahc_reset_channel(ahc, 'A', /*initiate_reset*/TRUE);
+ else
+ numtarg = (ahc->features & AHC_WIDE) ? 16 : 8;
+
+ if ((ahc->features & AHC_TWIN) != 0) {
+
+ if ((ahc->flags & AHC_RESET_BUS_B) != 0) {
+ ahc_reset_channel(ahc, 'B', /*initiate_reset*/TRUE);
+ } else {
+ if (numtarg == 0)
+ i = 8;
+ numtarg += 8;
+ }
+ }
+
+ /*
+ * Force negotiation to async for all targets that
+ * will not see an initial bus reset.
+ */
+ for (; i < numtarg; i++) {
+ struct ahc_devinfo devinfo;
+ struct ahc_initiator_tinfo *tinfo;
+ struct ahc_tmode_tstate *tstate;
+ u_int our_id;
+ u_int target_id;
+ char channel;
+
+ channel = 'A';
+ our_id = ahc->our_id;
+ target_id = i;
+ if (i > 7 && (ahc->features & AHC_TWIN) != 0) {
+ channel = 'B';
+ our_id = ahc->our_id_b;
+ target_id = i % 8;
+ }
+ tinfo = ahc_fetch_transinfo(ahc, channel, our_id,
+ target_id, &tstate);
+ ahc_compile_devinfo(&devinfo, our_id, target_id,
+ CAM_LUN_WILDCARD, channel, ROLE_INITIATOR);
+ ahc_update_neg_request(ahc, &devinfo, tstate,
+ tinfo, AHC_NEG_ALWAYS);
+ }
+ ahc_unlock(ahc, &s);
+ /* Give the bus some time to recover */
+ if ((ahc->flags & (AHC_RESET_BUS_A|AHC_RESET_BUS_B)) != 0) {
+ ahc_linux_freeze_simq(ahc);
+ msleep(AIC7XXX_RESET_DELAY);
+ ahc_linux_release_simq(ahc);
+ }
+}
+
+int
+ahc_platform_alloc(struct ahc_softc *ahc, void *platform_arg)
+{
+
+ ahc->platform_data =
+ kzalloc(sizeof(struct ahc_platform_data), GFP_ATOMIC);
+ if (ahc->platform_data == NULL)
+ return (ENOMEM);
+ ahc->platform_data->irq = AHC_LINUX_NOIRQ;
+ ahc_lockinit(ahc);
+ ahc->seltime = (aic7xxx_seltime & 0x3) << 4;
+ ahc->seltime_b = (aic7xxx_seltime & 0x3) << 4;
+ if (aic7xxx_pci_parity == 0)
+ ahc->flags |= AHC_DISABLE_PCI_PERR;
+
+ return (0);
+}
+
+void
+ahc_platform_free(struct ahc_softc *ahc)
+{
+ struct scsi_target *starget;
+ int i;
+
+ if (ahc->platform_data != NULL) {
+ /* destroy all of the device and target objects */
+ for (i = 0; i < AHC_NUM_TARGETS; i++) {
+ starget = ahc->platform_data->starget[i];
+ if (starget != NULL) {
+ ahc->platform_data->starget[i] = NULL;
+ }
+ }
+
+ if (ahc->platform_data->irq != AHC_LINUX_NOIRQ)
+ free_irq(ahc->platform_data->irq, ahc);
+ if (ahc->tag == BUS_SPACE_PIO
+ && ahc->bsh.ioport != 0)
+ release_region(ahc->bsh.ioport, 256);
+ if (ahc->tag == BUS_SPACE_MEMIO
+ && ahc->bsh.maddr != NULL) {
+ iounmap(ahc->bsh.maddr);
+ release_mem_region(ahc->platform_data->mem_busaddr,
+ 0x1000);
+ }
+
+ if (ahc->platform_data->host)
+ scsi_host_put(ahc->platform_data->host);
+
+ kfree(ahc->platform_data);
+ }
+}
+
+void
+ahc_platform_freeze_devq(struct ahc_softc *ahc, struct scb *scb)
+{
+ ahc_platform_abort_scbs(ahc, SCB_GET_TARGET(ahc, scb),
+ SCB_GET_CHANNEL(ahc, scb),
+ SCB_GET_LUN(scb), SCB_LIST_NULL,
+ ROLE_UNKNOWN, CAM_REQUEUE_REQ);
+}
+
+void
+ahc_platform_set_tags(struct ahc_softc *ahc, struct scsi_device *sdev,
+ struct ahc_devinfo *devinfo, ahc_queue_alg alg)
+{
+ struct ahc_linux_device *dev;
+ int was_queuing;
+ int now_queuing;
+
+ if (sdev == NULL)
+ return;
+ dev = scsi_transport_device_data(sdev);
+
+ was_queuing = dev->flags & (AHC_DEV_Q_BASIC|AHC_DEV_Q_TAGGED);
+ switch (alg) {
+ default:
+ case AHC_QUEUE_NONE:
+ now_queuing = 0;
+ break;
+ case AHC_QUEUE_BASIC:
+ now_queuing = AHC_DEV_Q_BASIC;
+ break;
+ case AHC_QUEUE_TAGGED:
+ now_queuing = AHC_DEV_Q_TAGGED;
+ break;
+ }
+ if ((dev->flags & AHC_DEV_FREEZE_TIL_EMPTY) == 0
+ && (was_queuing != now_queuing)
+ && (dev->active != 0)) {
+ dev->flags |= AHC_DEV_FREEZE_TIL_EMPTY;
+ dev->qfrozen++;
+ }
+
+ dev->flags &= ~(AHC_DEV_Q_BASIC|AHC_DEV_Q_TAGGED|AHC_DEV_PERIODIC_OTAG);
+ if (now_queuing) {
+ u_int usertags;
+
+ usertags = ahc_linux_user_tagdepth(ahc, devinfo);
+ if (!was_queuing) {
+ /*
+ * Start out aggressively and allow our
+ * dynamic queue depth algorithm to take
+ * care of the rest.
+ */
+ dev->maxtags = usertags;
+ dev->openings = dev->maxtags - dev->active;
+ }
+ if (dev->maxtags == 0) {
+ /*
+ * Queueing is disabled by the user.
+ */
+ dev->openings = 1;
+ } else if (alg == AHC_QUEUE_TAGGED) {
+ dev->flags |= AHC_DEV_Q_TAGGED;
+ if (aic7xxx_periodic_otag != 0)
+ dev->flags |= AHC_DEV_PERIODIC_OTAG;
+ } else
+ dev->flags |= AHC_DEV_Q_BASIC;
+ } else {
+ /* We can only have one opening. */
+ dev->maxtags = 0;
+ dev->openings = 1 - dev->active;
+ }
+ switch ((dev->flags & (AHC_DEV_Q_BASIC|AHC_DEV_Q_TAGGED))) {
+ case AHC_DEV_Q_BASIC:
+ case AHC_DEV_Q_TAGGED:
+ scsi_change_queue_depth(sdev,
+ dev->openings + dev->active);
+ default:
+ /*
+ * We allow the OS to queue 2 untagged transactions to
+ * us at any time even though we can only execute them
+ * serially on the controller/device. This should
+ * remove some latency.
+ */
+ scsi_change_queue_depth(sdev, 2);
+ break;
+ }
+}
+
+int
+ahc_platform_abort_scbs(struct ahc_softc *ahc, int target, char channel,
+ int lun, u_int tag, role_t role, uint32_t status)
+{
+ return 0;
+}
+
+static u_int
+ahc_linux_user_tagdepth(struct ahc_softc *ahc, struct ahc_devinfo *devinfo)
+{
+ static int warned_user;
+ u_int tags;
+
+ tags = 0;
+ if ((ahc->user_discenable & devinfo->target_mask) != 0) {
+ if (ahc->unit >= ARRAY_SIZE(aic7xxx_tag_info)) {
+ if (warned_user == 0) {
+
+ printk(KERN_WARNING
+"aic7xxx: WARNING: Insufficient tag_info instances\n"
+"aic7xxx: for installed controllers. Using defaults\n"
+"aic7xxx: Please update the aic7xxx_tag_info array in\n"
+"aic7xxx: the aic7xxx_osm..c source file.\n");
+ warned_user++;
+ }
+ tags = AHC_MAX_QUEUE;
+ } else {
+ adapter_tag_info_t *tag_info;
+
+ tag_info = &aic7xxx_tag_info[ahc->unit];
+ tags = tag_info->tag_commands[devinfo->target_offset];
+ if (tags > AHC_MAX_QUEUE)
+ tags = AHC_MAX_QUEUE;
+ }
+ }
+ return (tags);
+}
+
+/*
+ * Determines the queue depth for a given device.
+ */
+static void
+ahc_linux_device_queue_depth(struct scsi_device *sdev)
+{
+ struct ahc_devinfo devinfo;
+ u_int tags;
+ struct ahc_softc *ahc = *((struct ahc_softc **)sdev->host->hostdata);
+
+ ahc_compile_devinfo(&devinfo,
+ sdev->sdev_target->channel == 0
+ ? ahc->our_id : ahc->our_id_b,
+ sdev->sdev_target->id, sdev->lun,
+ sdev->sdev_target->channel == 0 ? 'A' : 'B',
+ ROLE_INITIATOR);
+ tags = ahc_linux_user_tagdepth(ahc, &devinfo);
+ if (tags != 0 && sdev->tagged_supported != 0) {
+
+ ahc_platform_set_tags(ahc, sdev, &devinfo, AHC_QUEUE_TAGGED);
+ ahc_send_async(ahc, devinfo.channel, devinfo.target,
+ devinfo.lun, AC_TRANSFER_NEG);
+ ahc_print_devinfo(ahc, &devinfo);
+ printk("Tagged Queuing enabled. Depth %d\n", tags);
+ } else {
+ ahc_platform_set_tags(ahc, sdev, &devinfo, AHC_QUEUE_NONE);
+ ahc_send_async(ahc, devinfo.channel, devinfo.target,
+ devinfo.lun, AC_TRANSFER_NEG);
+ }
+}
+
+static int
+ahc_linux_run_command(struct ahc_softc *ahc, struct ahc_linux_device *dev,
+ struct scsi_cmnd *cmd)
+{
+ struct scb *scb;
+ struct hardware_scb *hscb;
+ struct ahc_initiator_tinfo *tinfo;
+ struct ahc_tmode_tstate *tstate;
+ uint16_t mask;
+ struct scb_tailq *untagged_q = NULL;
+ int nseg;
+
+ /*
+ * Schedule us to run later. The only reason we are not
+ * running is because the whole controller Q is frozen.
+ */
+ if (ahc->platform_data->qfrozen != 0)
+ return SCSI_MLQUEUE_HOST_BUSY;
+
+ /*
+ * We only allow one untagged transaction
+ * per target in the initiator role unless
+ * we are storing a full busy target *lun*
+ * table in SCB space.
+ */
+ if (!(cmd->flags & SCMD_TAGGED)
+ && (ahc->features & AHC_SCB_BTT) == 0) {
+ int target_offset;
+
+ target_offset = cmd->device->id + cmd->device->channel * 8;
+ untagged_q = &(ahc->untagged_queues[target_offset]);
+ if (!TAILQ_EMPTY(untagged_q))
+ /* if we're already executing an untagged command
+ * we're busy to another */
+ return SCSI_MLQUEUE_DEVICE_BUSY;
+ }
+
+ nseg = scsi_dma_map(cmd);
+ if (nseg < 0)
+ return SCSI_MLQUEUE_HOST_BUSY;
+
+ /*
+ * Get an scb to use.
+ */
+ scb = ahc_get_scb(ahc);
+ if (!scb) {
+ scsi_dma_unmap(cmd);
+ return SCSI_MLQUEUE_HOST_BUSY;
+ }
+
+ scb->io_ctx = cmd;
+ scb->platform_data->dev = dev;
+ hscb = scb->hscb;
+ cmd->host_scribble = (char *)scb;
+
+ /*
+ * Fill out basics of the HSCB.
+ */
+ hscb->control = 0;
+ hscb->scsiid = BUILD_SCSIID(ahc, cmd);
+ hscb->lun = cmd->device->lun;
+ mask = SCB_GET_TARGET_MASK(ahc, scb);
+ tinfo = ahc_fetch_transinfo(ahc, SCB_GET_CHANNEL(ahc, scb),
+ SCB_GET_OUR_ID(scb),
+ SCB_GET_TARGET(ahc, scb), &tstate);
+ hscb->scsirate = tinfo->scsirate;
+ hscb->scsioffset = tinfo->curr.offset;
+ if ((tstate->ultraenb & mask) != 0)
+ hscb->control |= ULTRAENB;
+
+ if ((ahc->user_discenable & mask) != 0)
+ hscb->control |= DISCENB;
+
+ if ((tstate->auto_negotiate & mask) != 0) {
+ scb->flags |= SCB_AUTO_NEGOTIATE;
+ scb->hscb->control |= MK_MESSAGE;
+ }
+
+ if ((dev->flags & (AHC_DEV_Q_TAGGED|AHC_DEV_Q_BASIC)) != 0) {
+ if (dev->commands_since_idle_or_otag == AHC_OTAG_THRESH
+ && (dev->flags & AHC_DEV_Q_TAGGED) != 0) {
+ hscb->control |= MSG_ORDERED_TASK;
+ dev->commands_since_idle_or_otag = 0;
+ } else {
+ hscb->control |= MSG_SIMPLE_TASK;
+ }
+ }
+
+ hscb->cdb_len = cmd->cmd_len;
+ if (hscb->cdb_len <= 12) {
+ memcpy(hscb->shared_data.cdb, cmd->cmnd, hscb->cdb_len);
+ } else {
+ memcpy(hscb->cdb32, cmd->cmnd, hscb->cdb_len);
+ scb->flags |= SCB_CDB32_PTR;
+ }
+
+ scb->platform_data->xfer_len = 0;
+ ahc_set_residual(scb, 0);
+ ahc_set_sense_residual(scb, 0);
+ scb->sg_count = 0;
+
+ if (nseg > 0) {
+ struct ahc_dma_seg *sg;
+ struct scatterlist *cur_seg;
+ int i;
+
+ /* Copy the segments into the SG list. */
+ sg = scb->sg_list;
+ /*
+ * The sg_count may be larger than nseg if
+ * a transfer crosses a 32bit page.
+ */
+ scsi_for_each_sg(cmd, cur_seg, nseg, i) {
+ dma_addr_t addr;
+ bus_size_t len;
+ int consumed;
+
+ addr = sg_dma_address(cur_seg);
+ len = sg_dma_len(cur_seg);
+ consumed = ahc_linux_map_seg(ahc, scb,
+ sg, addr, len);
+ sg += consumed;
+ scb->sg_count += consumed;
+ }
+ sg--;
+ sg->len |= ahc_htole32(AHC_DMA_LAST_SEG);
+
+ /*
+ * Reset the sg list pointer.
+ */
+ scb->hscb->sgptr =
+ ahc_htole32(scb->sg_list_phys | SG_FULL_RESID);
+
+ /*
+ * Copy the first SG into the "current"
+ * data pointer area.
+ */
+ scb->hscb->dataptr = scb->sg_list->addr;
+ scb->hscb->datacnt = scb->sg_list->len;
+ } else {
+ scb->hscb->sgptr = ahc_htole32(SG_LIST_NULL);
+ scb->hscb->dataptr = 0;
+ scb->hscb->datacnt = 0;
+ scb->sg_count = 0;
+ }
+
+ LIST_INSERT_HEAD(&ahc->pending_scbs, scb, pending_links);
+ dev->openings--;
+ dev->active++;
+ dev->commands_issued++;
+ if ((dev->flags & AHC_DEV_PERIODIC_OTAG) != 0)
+ dev->commands_since_idle_or_otag++;
+
+ scb->flags |= SCB_ACTIVE;
+ if (untagged_q) {
+ TAILQ_INSERT_TAIL(untagged_q, scb, links.tqe);
+ scb->flags |= SCB_UNTAGGEDQ;
+ }
+ ahc_queue_scb(ahc, scb);
+ return 0;
+}
+
+/*
+ * SCSI controller interrupt handler.
+ */
+irqreturn_t
+ahc_linux_isr(int irq, void *dev_id)
+{
+ struct ahc_softc *ahc;
+ u_long flags;
+ int ours;
+
+ ahc = (struct ahc_softc *) dev_id;
+ ahc_lock(ahc, &flags);
+ ours = ahc_intr(ahc);
+ ahc_unlock(ahc, &flags);
+ return IRQ_RETVAL(ours);
+}
+
+void
+ahc_platform_flushwork(struct ahc_softc *ahc)
+{
+
+}
+
+void
+ahc_send_async(struct ahc_softc *ahc, char channel,
+ u_int target, u_int lun, ac_code code)
+{
+ switch (code) {
+ case AC_TRANSFER_NEG:
+ {
+ struct scsi_target *starget;
+ struct ahc_linux_target *targ;
+ struct ahc_initiator_tinfo *tinfo;
+ struct ahc_tmode_tstate *tstate;
+ int target_offset;
+ unsigned int target_ppr_options;
+
+ BUG_ON(target == CAM_TARGET_WILDCARD);
+
+ tinfo = ahc_fetch_transinfo(ahc, channel,
+ channel == 'A' ? ahc->our_id
+ : ahc->our_id_b,
+ target, &tstate);
+
+ /*
+ * Don't bother reporting results while
+ * negotiations are still pending.
+ */
+ if (tinfo->curr.period != tinfo->goal.period
+ || tinfo->curr.width != tinfo->goal.width
+ || tinfo->curr.offset != tinfo->goal.offset
+ || tinfo->curr.ppr_options != tinfo->goal.ppr_options)
+ if (bootverbose == 0)
+ break;
+
+ /*
+ * Don't bother reporting results that
+ * are identical to those last reported.
+ */
+ target_offset = target;
+ if (channel == 'B')
+ target_offset += 8;
+ starget = ahc->platform_data->starget[target_offset];
+ if (starget == NULL)
+ break;
+ targ = scsi_transport_target_data(starget);
+
+ target_ppr_options =
+ (spi_dt(starget) ? MSG_EXT_PPR_DT_REQ : 0)
+ + (spi_qas(starget) ? MSG_EXT_PPR_QAS_REQ : 0)
+ + (spi_iu(starget) ? MSG_EXT_PPR_IU_REQ : 0);
+
+ if (tinfo->curr.period == spi_period(starget)
+ && tinfo->curr.width == spi_width(starget)
+ && tinfo->curr.offset == spi_offset(starget)
+ && tinfo->curr.ppr_options == target_ppr_options)
+ if (bootverbose == 0)
+ break;
+
+ spi_period(starget) = tinfo->curr.period;
+ spi_width(starget) = tinfo->curr.width;
+ spi_offset(starget) = tinfo->curr.offset;
+ spi_dt(starget) = tinfo->curr.ppr_options & MSG_EXT_PPR_DT_REQ ? 1 : 0;
+ spi_qas(starget) = tinfo->curr.ppr_options & MSG_EXT_PPR_QAS_REQ ? 1 : 0;
+ spi_iu(starget) = tinfo->curr.ppr_options & MSG_EXT_PPR_IU_REQ ? 1 : 0;
+ spi_display_xfer_agreement(starget);
+ break;
+ }
+ case AC_SENT_BDR:
+ {
+ WARN_ON(lun != CAM_LUN_WILDCARD);
+ scsi_report_device_reset(ahc->platform_data->host,
+ channel - 'A', target);
+ break;
+ }
+ case AC_BUS_RESET:
+ if (ahc->platform_data->host != NULL) {
+ scsi_report_bus_reset(ahc->platform_data->host,
+ channel - 'A');
+ }
+ break;
+ default:
+ panic("ahc_send_async: Unexpected async event");
+ }
+}
+
+/*
+ * Calls the higher level scsi done function and frees the scb.
+ */
+void
+ahc_done(struct ahc_softc *ahc, struct scb *scb)
+{
+ struct scsi_cmnd *cmd;
+ struct ahc_linux_device *dev;
+
+ LIST_REMOVE(scb, pending_links);
+ if ((scb->flags & SCB_UNTAGGEDQ) != 0) {
+ struct scb_tailq *untagged_q;
+ int target_offset;
+
+ target_offset = SCB_GET_TARGET_OFFSET(ahc, scb);
+ untagged_q = &(ahc->untagged_queues[target_offset]);
+ TAILQ_REMOVE(untagged_q, scb, links.tqe);
+ BUG_ON(!TAILQ_EMPTY(untagged_q));
+ } else if ((scb->flags & SCB_ACTIVE) == 0) {
+ /*
+ * Transactions aborted from the untagged queue may
+ * not have been dispatched to the controller, so
+ * only check the SCB_ACTIVE flag for tagged transactions.
+ */
+ printk("SCB %d done'd twice\n", scb->hscb->tag);
+ ahc_dump_card_state(ahc);
+ panic("Stopping for safety");
+ }
+ cmd = scb->io_ctx;
+ dev = scb->platform_data->dev;
+ dev->active--;
+ dev->openings++;
+ if ((cmd->result & (CAM_DEV_QFRZN << 16)) != 0) {
+ cmd->result &= ~(CAM_DEV_QFRZN << 16);
+ dev->qfrozen--;
+ }
+ ahc_linux_unmap_scb(ahc, scb);
+
+ /*
+ * Guard against stale sense data.
+ * The Linux mid-layer assumes that sense
+ * was retrieved anytime the first byte of
+ * the sense buffer looks "sane".
+ */
+ cmd->sense_buffer[0] = 0;
+ if (ahc_get_transaction_status(scb) == CAM_REQ_INPROG) {
+ uint32_t amount_xferred;
+
+ amount_xferred =
+ ahc_get_transfer_length(scb) - ahc_get_residual(scb);
+ if ((scb->flags & SCB_TRANSMISSION_ERROR) != 0) {
+#ifdef AHC_DEBUG
+ if ((ahc_debug & AHC_SHOW_MISC) != 0) {
+ ahc_print_path(ahc, scb);
+ printk("Set CAM_UNCOR_PARITY\n");
+ }
+#endif
+ ahc_set_transaction_status(scb, CAM_UNCOR_PARITY);
+#ifdef AHC_REPORT_UNDERFLOWS
+ /*
+ * This code is disabled by default as some
+ * clients of the SCSI system do not properly
+ * initialize the underflow parameter. This
+ * results in spurious termination of commands
+ * that complete as expected (e.g. underflow is
+ * allowed as command can return variable amounts
+ * of data.
+ */
+ } else if (amount_xferred < scb->io_ctx->underflow) {
+ u_int i;
+
+ ahc_print_path(ahc, scb);
+ printk("CDB:");
+ for (i = 0; i < scb->io_ctx->cmd_len; i++)
+ printk(" 0x%x", scb->io_ctx->cmnd[i]);
+ printk("\n");
+ ahc_print_path(ahc, scb);
+ printk("Saw underflow (%ld of %ld bytes). "
+ "Treated as error\n",
+ ahc_get_residual(scb),
+ ahc_get_transfer_length(scb));
+ ahc_set_transaction_status(scb, CAM_DATA_RUN_ERR);
+#endif
+ } else {
+ ahc_set_transaction_status(scb, CAM_REQ_CMP);
+ }
+ } else if (ahc_get_transaction_status(scb) == CAM_SCSI_STATUS_ERROR) {
+ ahc_linux_handle_scsi_status(ahc, cmd->device, scb);
+ }
+
+ if (dev->openings == 1
+ && ahc_get_transaction_status(scb) == CAM_REQ_CMP
+ && ahc_get_scsi_status(scb) != SCSI_STATUS_QUEUE_FULL)
+ dev->tag_success_count++;
+ /*
+ * Some devices deal with temporary internal resource
+ * shortages by returning queue full. When the queue
+ * full occurrs, we throttle back. Slowly try to get
+ * back to our previous queue depth.
+ */
+ if ((dev->openings + dev->active) < dev->maxtags
+ && dev->tag_success_count > AHC_TAG_SUCCESS_INTERVAL) {
+ dev->tag_success_count = 0;
+ dev->openings++;
+ }
+
+ if (dev->active == 0)
+ dev->commands_since_idle_or_otag = 0;
+
+ if ((scb->flags & SCB_RECOVERY_SCB) != 0) {
+ printk("Recovery SCB completes\n");
+ if (ahc_get_transaction_status(scb) == CAM_BDR_SENT
+ || ahc_get_transaction_status(scb) == CAM_REQ_ABORTED)
+ ahc_set_transaction_status(scb, CAM_CMD_TIMEOUT);
+
+ if (ahc->platform_data->eh_done)
+ complete(ahc->platform_data->eh_done);
+ }
+
+ ahc_free_scb(ahc, scb);
+ ahc_linux_queue_cmd_complete(ahc, cmd);
+}
+
+static void
+ahc_linux_handle_scsi_status(struct ahc_softc *ahc,
+ struct scsi_device *sdev, struct scb *scb)
+{
+ struct ahc_devinfo devinfo;
+ struct ahc_linux_device *dev = scsi_transport_device_data(sdev);
+
+ ahc_compile_devinfo(&devinfo,
+ ahc->our_id,
+ sdev->sdev_target->id, sdev->lun,
+ sdev->sdev_target->channel == 0 ? 'A' : 'B',
+ ROLE_INITIATOR);
+
+ /*
+ * We don't currently trust the mid-layer to
+ * properly deal with queue full or busy. So,
+ * when one occurs, we tell the mid-layer to
+ * unconditionally requeue the command to us
+ * so that we can retry it ourselves. We also
+ * implement our own throttling mechanism so
+ * we don't clobber the device with too many
+ * commands.
+ */
+ switch (ahc_get_scsi_status(scb)) {
+ default:
+ break;
+ case SCSI_STATUS_CHECK_COND:
+ case SCSI_STATUS_CMD_TERMINATED:
+ {
+ struct scsi_cmnd *cmd;
+
+ /*
+ * Copy sense information to the OS's cmd
+ * structure if it is available.
+ */
+ cmd = scb->io_ctx;
+ if (scb->flags & SCB_SENSE) {
+ u_int sense_size;
+
+ sense_size = min(sizeof(struct scsi_sense_data)
+ - ahc_get_sense_residual(scb),
+ (u_long)SCSI_SENSE_BUFFERSIZE);
+ memcpy(cmd->sense_buffer,
+ ahc_get_sense_buf(ahc, scb), sense_size);
+ if (sense_size < SCSI_SENSE_BUFFERSIZE)
+ memset(&cmd->sense_buffer[sense_size], 0,
+ SCSI_SENSE_BUFFERSIZE - sense_size);
+ cmd->result |= (DRIVER_SENSE << 24);
+#ifdef AHC_DEBUG
+ if (ahc_debug & AHC_SHOW_SENSE) {
+ int i;
+
+ printk("Copied %d bytes of sense data:",
+ sense_size);
+ for (i = 0; i < sense_size; i++) {
+ if ((i & 0xF) == 0)
+ printk("\n");
+ printk("0x%x ", cmd->sense_buffer[i]);
+ }
+ printk("\n");
+ }
+#endif
+ }
+ break;
+ }
+ case SCSI_STATUS_QUEUE_FULL:
+ {
+ /*
+ * By the time the core driver has returned this
+ * command, all other commands that were queued
+ * to us but not the device have been returned.
+ * This ensures that dev->active is equal to
+ * the number of commands actually queued to
+ * the device.
+ */
+ dev->tag_success_count = 0;
+ if (dev->active != 0) {
+ /*
+ * Drop our opening count to the number
+ * of commands currently outstanding.
+ */
+ dev->openings = 0;
+/*
+ ahc_print_path(ahc, scb);
+ printk("Dropping tag count to %d\n", dev->active);
+ */
+ if (dev->active == dev->tags_on_last_queuefull) {
+
+ dev->last_queuefull_same_count++;
+ /*
+ * If we repeatedly see a queue full
+ * at the same queue depth, this
+ * device has a fixed number of tag
+ * slots. Lock in this tag depth
+ * so we stop seeing queue fulls from
+ * this device.
+ */
+ if (dev->last_queuefull_same_count
+ == AHC_LOCK_TAGS_COUNT) {
+ dev->maxtags = dev->active;
+ ahc_print_path(ahc, scb);
+ printk("Locking max tag count at %d\n",
+ dev->active);
+ }
+ } else {
+ dev->tags_on_last_queuefull = dev->active;
+ dev->last_queuefull_same_count = 0;
+ }
+ ahc_set_transaction_status(scb, CAM_REQUEUE_REQ);
+ ahc_set_scsi_status(scb, SCSI_STATUS_OK);
+ ahc_platform_set_tags(ahc, sdev, &devinfo,
+ (dev->flags & AHC_DEV_Q_BASIC)
+ ? AHC_QUEUE_BASIC : AHC_QUEUE_TAGGED);
+ break;
+ }
+ /*
+ * Drop down to a single opening, and treat this
+ * as if the target returned BUSY SCSI status.
+ */
+ dev->openings = 1;
+ ahc_set_scsi_status(scb, SCSI_STATUS_BUSY);
+ ahc_platform_set_tags(ahc, sdev, &devinfo,
+ (dev->flags & AHC_DEV_Q_BASIC)
+ ? AHC_QUEUE_BASIC : AHC_QUEUE_TAGGED);
+ break;
+ }
+ }
+}
+
+static void
+ahc_linux_queue_cmd_complete(struct ahc_softc *ahc, struct scsi_cmnd *cmd)
+{
+ /*
+ * Map CAM error codes into Linux Error codes. We
+ * avoid the conversion so that the DV code has the
+ * full error information available when making
+ * state change decisions.
+ */
+ {
+ u_int new_status;
+
+ switch (ahc_cmd_get_transaction_status(cmd)) {
+ case CAM_REQ_INPROG:
+ case CAM_REQ_CMP:
+ case CAM_SCSI_STATUS_ERROR:
+ new_status = DID_OK;
+ break;
+ case CAM_REQ_ABORTED:
+ new_status = DID_ABORT;
+ break;
+ case CAM_BUSY:
+ new_status = DID_BUS_BUSY;
+ break;
+ case CAM_REQ_INVALID:
+ case CAM_PATH_INVALID:
+ new_status = DID_BAD_TARGET;
+ break;
+ case CAM_SEL_TIMEOUT:
+ new_status = DID_NO_CONNECT;
+ break;
+ case CAM_SCSI_BUS_RESET:
+ case CAM_BDR_SENT:
+ new_status = DID_RESET;
+ break;
+ case CAM_UNCOR_PARITY:
+ new_status = DID_PARITY;
+ break;
+ case CAM_CMD_TIMEOUT:
+ new_status = DID_TIME_OUT;
+ break;
+ case CAM_UA_ABORT:
+ case CAM_REQ_CMP_ERR:
+ case CAM_AUTOSENSE_FAIL:
+ case CAM_NO_HBA:
+ case CAM_DATA_RUN_ERR:
+ case CAM_UNEXP_BUSFREE:
+ case CAM_SEQUENCE_FAIL:
+ case CAM_CCB_LEN_ERR:
+ case CAM_PROVIDE_FAIL:
+ case CAM_REQ_TERMIO:
+ case CAM_UNREC_HBA_ERROR:
+ case CAM_REQ_TOO_BIG:
+ new_status = DID_ERROR;
+ break;
+ case CAM_REQUEUE_REQ:
+ new_status = DID_REQUEUE;
+ break;
+ default:
+ /* We should never get here */
+ new_status = DID_ERROR;
+ break;
+ }
+
+ ahc_cmd_set_transaction_status(cmd, new_status);
+ }
+
+ cmd->scsi_done(cmd);
+}
+
+static void
+ahc_linux_freeze_simq(struct ahc_softc *ahc)
+{
+ unsigned long s;
+
+ ahc_lock(ahc, &s);
+ ahc->platform_data->qfrozen++;
+ if (ahc->platform_data->qfrozen == 1) {
+ scsi_block_requests(ahc->platform_data->host);
+
+ /* XXX What about Twin channels? */
+ ahc_platform_abort_scbs(ahc, CAM_TARGET_WILDCARD, ALL_CHANNELS,
+ CAM_LUN_WILDCARD, SCB_LIST_NULL,
+ ROLE_INITIATOR, CAM_REQUEUE_REQ);
+ }
+ ahc_unlock(ahc, &s);
+}
+
+static void
+ahc_linux_release_simq(struct ahc_softc *ahc)
+{
+ u_long s;
+ int unblock_reqs;
+
+ unblock_reqs = 0;
+ ahc_lock(ahc, &s);
+ if (ahc->platform_data->qfrozen > 0)
+ ahc->platform_data->qfrozen--;
+ if (ahc->platform_data->qfrozen == 0)
+ unblock_reqs = 1;
+ ahc_unlock(ahc, &s);
+ /*
+ * There is still a race here. The mid-layer
+ * should keep its own freeze count and use
+ * a bottom half handler to run the queues
+ * so we can unblock with our own lock held.
+ */
+ if (unblock_reqs)
+ scsi_unblock_requests(ahc->platform_data->host);
+}
+
+static int
+ahc_linux_queue_recovery_cmd(struct scsi_cmnd *cmd, scb_flag flag)
+{
+ struct ahc_softc *ahc;
+ struct ahc_linux_device *dev;
+ struct scb *pending_scb;
+ u_int saved_scbptr;
+ u_int active_scb_index;
+ u_int last_phase;
+ u_int saved_scsiid;
+ u_int cdb_byte;
+ int retval;
+ int was_paused;
+ int paused;
+ int wait;
+ int disconnected;
+ unsigned long flags;
+
+ pending_scb = NULL;
+ paused = FALSE;
+ wait = FALSE;
+ ahc = *(struct ahc_softc **)cmd->device->host->hostdata;
+
+ scmd_printk(KERN_INFO, cmd, "Attempting to queue a%s message\n",
+ flag == SCB_ABORT ? "n ABORT" : " TARGET RESET");
+
+ printk("CDB:");
+ for (cdb_byte = 0; cdb_byte < cmd->cmd_len; cdb_byte++)
+ printk(" 0x%x", cmd->cmnd[cdb_byte]);
+ printk("\n");
+
+ ahc_lock(ahc, &flags);
+
+ /*
+ * First determine if we currently own this command.
+ * Start by searching the device queue. If not found
+ * there, check the pending_scb list. If not found
+ * at all, and the system wanted us to just abort the
+ * command, return success.
+ */
+ dev = scsi_transport_device_data(cmd->device);
+
+ if (dev == NULL) {
+ /*
+ * No target device for this command exists,
+ * so we must not still own the command.
+ */
+ printk("%s:%d:%d:%d: Is not an active device\n",
+ ahc_name(ahc), cmd->device->channel, cmd->device->id,
+ (u8)cmd->device->lun);
+ retval = SUCCESS;
+ goto no_cmd;
+ }
+
+ if ((dev->flags & (AHC_DEV_Q_BASIC|AHC_DEV_Q_TAGGED)) == 0
+ && ahc_search_untagged_queues(ahc, cmd, cmd->device->id,
+ cmd->device->channel + 'A',
+ (u8)cmd->device->lun,
+ CAM_REQ_ABORTED, SEARCH_COMPLETE) != 0) {
+ printk("%s:%d:%d:%d: Command found on untagged queue\n",
+ ahc_name(ahc), cmd->device->channel, cmd->device->id,
+ (u8)cmd->device->lun);
+ retval = SUCCESS;
+ goto done;
+ }
+
+ /*
+ * See if we can find a matching cmd in the pending list.
+ */
+ LIST_FOREACH(pending_scb, &ahc->pending_scbs, pending_links) {
+ if (pending_scb->io_ctx == cmd)
+ break;
+ }
+
+ if (pending_scb == NULL && flag == SCB_DEVICE_RESET) {
+
+ /* Any SCB for this device will do for a target reset */
+ LIST_FOREACH(pending_scb, &ahc->pending_scbs, pending_links) {
+ if (ahc_match_scb(ahc, pending_scb, scmd_id(cmd),
+ scmd_channel(cmd) + 'A',
+ CAM_LUN_WILDCARD,
+ SCB_LIST_NULL, ROLE_INITIATOR))
+ break;
+ }
+ }
+
+ if (pending_scb == NULL) {
+ scmd_printk(KERN_INFO, cmd, "Command not found\n");
+ goto no_cmd;
+ }
+
+ if ((pending_scb->flags & SCB_RECOVERY_SCB) != 0) {
+ /*
+ * We can't queue two recovery actions using the same SCB
+ */
+ retval = FAILED;
+ goto done;
+ }
+
+ /*
+ * Ensure that the card doesn't do anything
+ * behind our back and that we didn't "just" miss
+ * an interrupt that would affect this cmd.
+ */
+ was_paused = ahc_is_paused(ahc);
+ ahc_pause_and_flushwork(ahc);
+ paused = TRUE;
+
+ if ((pending_scb->flags & SCB_ACTIVE) == 0) {
+ scmd_printk(KERN_INFO, cmd, "Command already completed\n");
+ goto no_cmd;
+ }
+
+ printk("%s: At time of recovery, card was %spaused\n",
+ ahc_name(ahc), was_paused ? "" : "not ");
+ ahc_dump_card_state(ahc);
+
+ disconnected = TRUE;
+ if (flag == SCB_ABORT) {
+ if (ahc_search_qinfifo(ahc, cmd->device->id,
+ cmd->device->channel + 'A',
+ cmd->device->lun,
+ pending_scb->hscb->tag,
+ ROLE_INITIATOR, CAM_REQ_ABORTED,
+ SEARCH_COMPLETE) > 0) {
+ printk("%s:%d:%d:%d: Cmd aborted from QINFIFO\n",
+ ahc_name(ahc), cmd->device->channel,
+ cmd->device->id, (u8)cmd->device->lun);
+ retval = SUCCESS;
+ goto done;
+ }
+ } else if (ahc_search_qinfifo(ahc, cmd->device->id,
+ cmd->device->channel + 'A',
+ cmd->device->lun,
+ pending_scb->hscb->tag,
+ ROLE_INITIATOR, /*status*/0,
+ SEARCH_COUNT) > 0) {
+ disconnected = FALSE;
+ }
+
+ if (disconnected && (ahc_inb(ahc, SEQ_FLAGS) & NOT_IDENTIFIED) == 0) {
+ struct scb *bus_scb;
+
+ bus_scb = ahc_lookup_scb(ahc, ahc_inb(ahc, SCB_TAG));
+ if (bus_scb == pending_scb)
+ disconnected = FALSE;
+ else if (flag != SCB_ABORT
+ && ahc_inb(ahc, SAVED_SCSIID) == pending_scb->hscb->scsiid
+ && ahc_inb(ahc, SAVED_LUN) == SCB_GET_LUN(pending_scb))
+ disconnected = FALSE;
+ }
+
+ /*
+ * At this point, pending_scb is the scb associated with the
+ * passed in command. That command is currently active on the
+ * bus, is in the disconnected state, or we're hoping to find
+ * a command for the same target active on the bus to abuse to
+ * send a BDR. Queue the appropriate message based on which of
+ * these states we are in.
+ */
+ last_phase = ahc_inb(ahc, LASTPHASE);
+ saved_scbptr = ahc_inb(ahc, SCBPTR);
+ active_scb_index = ahc_inb(ahc, SCB_TAG);
+ saved_scsiid = ahc_inb(ahc, SAVED_SCSIID);
+ if (last_phase != P_BUSFREE
+ && (pending_scb->hscb->tag == active_scb_index
+ || (flag == SCB_DEVICE_RESET
+ && SCSIID_TARGET(ahc, saved_scsiid) == scmd_id(cmd)))) {
+
+ /*
+ * We're active on the bus, so assert ATN
+ * and hope that the target responds.
+ */
+ pending_scb = ahc_lookup_scb(ahc, active_scb_index);
+ pending_scb->flags |= SCB_RECOVERY_SCB|flag;
+ ahc_outb(ahc, MSG_OUT, HOST_MSG);
+ ahc_outb(ahc, SCSISIGO, last_phase|ATNO);
+ scmd_printk(KERN_INFO, cmd, "Device is active, asserting ATN\n");
+ wait = TRUE;
+ } else if (disconnected) {
+
+ /*
+ * Actually re-queue this SCB in an attempt
+ * to select the device before it reconnects.
+ * In either case (selection or reselection),
+ * we will now issue the approprate message
+ * to the timed-out device.
+ *
+ * Set the MK_MESSAGE control bit indicating
+ * that we desire to send a message. We
+ * also set the disconnected flag since
+ * in the paging case there is no guarantee
+ * that our SCB control byte matches the
+ * version on the card. We don't want the
+ * sequencer to abort the command thinking
+ * an unsolicited reselection occurred.
+ */
+ pending_scb->hscb->control |= MK_MESSAGE|DISCONNECTED;
+ pending_scb->flags |= SCB_RECOVERY_SCB|flag;
+
+ /*
+ * Remove any cached copy of this SCB in the
+ * disconnected list in preparation for the
+ * queuing of our abort SCB. We use the
+ * same element in the SCB, SCB_NEXT, for
+ * both the qinfifo and the disconnected list.
+ */
+ ahc_search_disc_list(ahc, cmd->device->id,
+ cmd->device->channel + 'A',
+ cmd->device->lun, pending_scb->hscb->tag,
+ /*stop_on_first*/TRUE,
+ /*remove*/TRUE,
+ /*save_state*/FALSE);
+
+ /*
+ * In the non-paging case, the sequencer will
+ * never re-reference the in-core SCB.
+ * To make sure we are notified during
+ * reselection, set the MK_MESSAGE flag in
+ * the card's copy of the SCB.
+ */
+ if ((ahc->flags & AHC_PAGESCBS) == 0) {
+ ahc_outb(ahc, SCBPTR, pending_scb->hscb->tag);
+ ahc_outb(ahc, SCB_CONTROL,
+ ahc_inb(ahc, SCB_CONTROL)|MK_MESSAGE);
+ }
+
+ /*
+ * Clear out any entries in the QINFIFO first
+ * so we are the next SCB for this target
+ * to run.
+ */
+ ahc_search_qinfifo(ahc, cmd->device->id,
+ cmd->device->channel + 'A',
+ cmd->device->lun, SCB_LIST_NULL,
+ ROLE_INITIATOR, CAM_REQUEUE_REQ,
+ SEARCH_COMPLETE);
+ ahc_qinfifo_requeue_tail(ahc, pending_scb);
+ ahc_outb(ahc, SCBPTR, saved_scbptr);
+ ahc_print_path(ahc, pending_scb);
+ printk("Device is disconnected, re-queuing SCB\n");
+ wait = TRUE;
+ } else {
+ scmd_printk(KERN_INFO, cmd, "Unable to deliver message\n");
+ retval = FAILED;
+ goto done;
+ }
+
+no_cmd:
+ /*
+ * Our assumption is that if we don't have the command, no
+ * recovery action was required, so we return success. Again,
+ * the semantics of the mid-layer recovery engine are not
+ * well defined, so this may change in time.
+ */
+ retval = SUCCESS;
+done:
+ if (paused)
+ ahc_unpause(ahc);
+ if (wait) {
+ DECLARE_COMPLETION_ONSTACK(done);
+
+ ahc->platform_data->eh_done = &done;
+ ahc_unlock(ahc, &flags);
+
+ printk("Recovery code sleeping\n");
+ if (!wait_for_completion_timeout(&done, 5 * HZ)) {
+ ahc_lock(ahc, &flags);
+ ahc->platform_data->eh_done = NULL;
+ ahc_unlock(ahc, &flags);
+
+ printk("Timer Expired\n");
+ retval = FAILED;
+ }
+ printk("Recovery code awake\n");
+ } else
+ ahc_unlock(ahc, &flags);
+ return (retval);
+}
+
+void
+ahc_platform_dump_card_state(struct ahc_softc *ahc)
+{
+}
+
+static void ahc_linux_set_width(struct scsi_target *starget, int width)
+{
+ struct Scsi_Host *shost = dev_to_shost(starget->dev.parent);
+ struct ahc_softc *ahc = *((struct ahc_softc **)shost->hostdata);
+ struct ahc_devinfo devinfo;
+ unsigned long flags;
+
+ ahc_compile_devinfo(&devinfo, shost->this_id, starget->id, 0,
+ starget->channel + 'A', ROLE_INITIATOR);
+ ahc_lock(ahc, &flags);
+ ahc_set_width(ahc, &devinfo, width, AHC_TRANS_GOAL, FALSE);
+ ahc_unlock(ahc, &flags);
+}
+
+static void ahc_linux_set_period(struct scsi_target *starget, int period)
+{
+ struct Scsi_Host *shost = dev_to_shost(starget->dev.parent);
+ struct ahc_softc *ahc = *((struct ahc_softc **)shost->hostdata);
+ struct ahc_tmode_tstate *tstate;
+ struct ahc_initiator_tinfo *tinfo
+ = ahc_fetch_transinfo(ahc,
+ starget->channel + 'A',
+ shost->this_id, starget->id, &tstate);
+ struct ahc_devinfo devinfo;
+ unsigned int ppr_options = tinfo->goal.ppr_options;
+ unsigned long flags;
+ unsigned long offset = tinfo->goal.offset;
+ const struct ahc_syncrate *syncrate;
+
+ if (offset == 0)
+ offset = MAX_OFFSET;
+
+ if (period < 9)
+ period = 9; /* 12.5ns is our minimum */
+ if (period == 9) {
+ if (spi_max_width(starget))
+ ppr_options |= MSG_EXT_PPR_DT_REQ;
+ else
+ /* need wide for DT and need DT for 12.5 ns */
+ period = 10;
+ }
+
+ ahc_compile_devinfo(&devinfo, shost->this_id, starget->id, 0,
+ starget->channel + 'A', ROLE_INITIATOR);
+
+ /* all PPR requests apart from QAS require wide transfers */
+ if (ppr_options & ~MSG_EXT_PPR_QAS_REQ) {
+ if (spi_width(starget) == 0)
+ ppr_options &= MSG_EXT_PPR_QAS_REQ;
+ }
+
+ syncrate = ahc_find_syncrate(ahc, &period, &ppr_options, AHC_SYNCRATE_DT);
+ ahc_lock(ahc, &flags);
+ ahc_set_syncrate(ahc, &devinfo, syncrate, period, offset,
+ ppr_options, AHC_TRANS_GOAL, FALSE);
+ ahc_unlock(ahc, &flags);
+}
+
+static void ahc_linux_set_offset(struct scsi_target *starget, int offset)
+{
+ struct Scsi_Host *shost = dev_to_shost(starget->dev.parent);
+ struct ahc_softc *ahc = *((struct ahc_softc **)shost->hostdata);
+ struct ahc_tmode_tstate *tstate;
+ struct ahc_initiator_tinfo *tinfo
+ = ahc_fetch_transinfo(ahc,
+ starget->channel + 'A',
+ shost->this_id, starget->id, &tstate);
+ struct ahc_devinfo devinfo;
+ unsigned int ppr_options = 0;
+ unsigned int period = 0;
+ unsigned long flags;
+ const struct ahc_syncrate *syncrate = NULL;
+
+ ahc_compile_devinfo(&devinfo, shost->this_id, starget->id, 0,
+ starget->channel + 'A', ROLE_INITIATOR);
+ if (offset != 0) {
+ syncrate = ahc_find_syncrate(ahc, &period, &ppr_options, AHC_SYNCRATE_DT);
+ period = tinfo->goal.period;
+ ppr_options = tinfo->goal.ppr_options;
+ }
+ ahc_lock(ahc, &flags);
+ ahc_set_syncrate(ahc, &devinfo, syncrate, period, offset,
+ ppr_options, AHC_TRANS_GOAL, FALSE);
+ ahc_unlock(ahc, &flags);
+}
+
+static void ahc_linux_set_dt(struct scsi_target *starget, int dt)
+{
+ struct Scsi_Host *shost = dev_to_shost(starget->dev.parent);
+ struct ahc_softc *ahc = *((struct ahc_softc **)shost->hostdata);
+ struct ahc_tmode_tstate *tstate;
+ struct ahc_initiator_tinfo *tinfo
+ = ahc_fetch_transinfo(ahc,
+ starget->channel + 'A',
+ shost->this_id, starget->id, &tstate);
+ struct ahc_devinfo devinfo;
+ unsigned int ppr_options = tinfo->goal.ppr_options
+ & ~MSG_EXT_PPR_DT_REQ;
+ unsigned int period = tinfo->goal.period;
+ unsigned int width = tinfo->goal.width;
+ unsigned long flags;
+ const struct ahc_syncrate *syncrate;
+
+ if (dt && spi_max_width(starget)) {
+ ppr_options |= MSG_EXT_PPR_DT_REQ;
+ if (!width)
+ ahc_linux_set_width(starget, 1);
+ } else if (period == 9)
+ period = 10; /* if resetting DT, period must be >= 25ns */
+
+ ahc_compile_devinfo(&devinfo, shost->this_id, starget->id, 0,
+ starget->channel + 'A', ROLE_INITIATOR);
+ syncrate = ahc_find_syncrate(ahc, &period, &ppr_options,AHC_SYNCRATE_DT);
+ ahc_lock(ahc, &flags);
+ ahc_set_syncrate(ahc, &devinfo, syncrate, period, tinfo->goal.offset,
+ ppr_options, AHC_TRANS_GOAL, FALSE);
+ ahc_unlock(ahc, &flags);
+}
+
+#if 0
+/* FIXME: This code claims to support IU and QAS. However, the actual
+ * sequencer code and aic7xxx_core have no support for these parameters and
+ * will get into a bad state if they're negotiated. Do not enable this
+ * unless you know what you're doing */
+static void ahc_linux_set_qas(struct scsi_target *starget, int qas)
+{
+ struct Scsi_Host *shost = dev_to_shost(starget->dev.parent);
+ struct ahc_softc *ahc = *((struct ahc_softc **)shost->hostdata);
+ struct ahc_tmode_tstate *tstate;
+ struct ahc_initiator_tinfo *tinfo
+ = ahc_fetch_transinfo(ahc,
+ starget->channel + 'A',
+ shost->this_id, starget->id, &tstate);
+ struct ahc_devinfo devinfo;
+ unsigned int ppr_options = tinfo->goal.ppr_options
+ & ~MSG_EXT_PPR_QAS_REQ;
+ unsigned int period = tinfo->goal.period;
+ unsigned long flags;
+ struct ahc_syncrate *syncrate;
+
+ if (qas)
+ ppr_options |= MSG_EXT_PPR_QAS_REQ;
+
+ ahc_compile_devinfo(&devinfo, shost->this_id, starget->id, 0,
+ starget->channel + 'A', ROLE_INITIATOR);
+ syncrate = ahc_find_syncrate(ahc, &period, &ppr_options, AHC_SYNCRATE_DT);
+ ahc_lock(ahc, &flags);
+ ahc_set_syncrate(ahc, &devinfo, syncrate, period, tinfo->goal.offset,
+ ppr_options, AHC_TRANS_GOAL, FALSE);
+ ahc_unlock(ahc, &flags);
+}
+
+static void ahc_linux_set_iu(struct scsi_target *starget, int iu)
+{
+ struct Scsi_Host *shost = dev_to_shost(starget->dev.parent);
+ struct ahc_softc *ahc = *((struct ahc_softc **)shost->hostdata);
+ struct ahc_tmode_tstate *tstate;
+ struct ahc_initiator_tinfo *tinfo
+ = ahc_fetch_transinfo(ahc,
+ starget->channel + 'A',
+ shost->this_id, starget->id, &tstate);
+ struct ahc_devinfo devinfo;
+ unsigned int ppr_options = tinfo->goal.ppr_options
+ & ~MSG_EXT_PPR_IU_REQ;
+ unsigned int period = tinfo->goal.period;
+ unsigned long flags;
+ struct ahc_syncrate *syncrate;
+
+ if (iu)
+ ppr_options |= MSG_EXT_PPR_IU_REQ;
+
+ ahc_compile_devinfo(&devinfo, shost->this_id, starget->id, 0,
+ starget->channel + 'A', ROLE_INITIATOR);
+ syncrate = ahc_find_syncrate(ahc, &period, &ppr_options, AHC_SYNCRATE_DT);
+ ahc_lock(ahc, &flags);
+ ahc_set_syncrate(ahc, &devinfo, syncrate, period, tinfo->goal.offset,
+ ppr_options, AHC_TRANS_GOAL, FALSE);
+ ahc_unlock(ahc, &flags);
+}
+#endif
+
+static void ahc_linux_get_signalling(struct Scsi_Host *shost)
+{
+ struct ahc_softc *ahc = *(struct ahc_softc **)shost->hostdata;
+ unsigned long flags;
+ u8 mode;
+
+ if (!(ahc->features & AHC_ULTRA2)) {
+ /* non-LVD chipset, may not have SBLKCTL reg */
+ spi_signalling(shost) =
+ ahc->features & AHC_HVD ?
+ SPI_SIGNAL_HVD :
+ SPI_SIGNAL_SE;
+ return;
+ }
+
+ ahc_lock(ahc, &flags);
+ ahc_pause(ahc);
+ mode = ahc_inb(ahc, SBLKCTL);
+ ahc_unpause(ahc);
+ ahc_unlock(ahc, &flags);
+
+ if (mode & ENAB40)
+ spi_signalling(shost) = SPI_SIGNAL_LVD;
+ else if (mode & ENAB20)
+ spi_signalling(shost) = SPI_SIGNAL_SE;
+ else
+ spi_signalling(shost) = SPI_SIGNAL_UNKNOWN;
+}
+
+static struct spi_function_template ahc_linux_transport_functions = {
+ .set_offset = ahc_linux_set_offset,
+ .show_offset = 1,
+ .set_period = ahc_linux_set_period,
+ .show_period = 1,
+ .set_width = ahc_linux_set_width,
+ .show_width = 1,
+ .set_dt = ahc_linux_set_dt,
+ .show_dt = 1,
+#if 0
+ .set_iu = ahc_linux_set_iu,
+ .show_iu = 1,
+ .set_qas = ahc_linux_set_qas,
+ .show_qas = 1,
+#endif
+ .get_signalling = ahc_linux_get_signalling,
+};
+
+
+
+static int __init
+ahc_linux_init(void)
+{
+ /*
+ * If we've been passed any parameters, process them now.
+ */
+ if (aic7xxx)
+ aic7xxx_setup(aic7xxx);
+
+ ahc_linux_transport_template =
+ spi_attach_transport(&ahc_linux_transport_functions);
+ if (!ahc_linux_transport_template)
+ return -ENODEV;
+
+ scsi_transport_reserve_device(ahc_linux_transport_template,
+ sizeof(struct ahc_linux_device));
+
+ ahc_linux_pci_init();
+ ahc_linux_eisa_init();
+ return 0;
+}
+
+static void
+ahc_linux_exit(void)
+{
+ ahc_linux_pci_exit();
+ ahc_linux_eisa_exit();
+ spi_release_transport(ahc_linux_transport_template);
+}
+
+module_init(ahc_linux_init);
+module_exit(ahc_linux_exit);
diff --git a/drivers/scsi/aic7xxx/aic7xxx_osm.h b/drivers/scsi/aic7xxx/aic7xxx_osm.h
new file mode 100644
index 000000000..bc4cca92f
--- /dev/null
+++ b/drivers/scsi/aic7xxx/aic7xxx_osm.h
@@ -0,0 +1,705 @@
+/*
+ * Adaptec AIC7xxx device driver for Linux.
+ *
+ * Copyright (c) 1994 John Aycock
+ * The University of Calgary Department of Computer Science.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2, or (at your option)
+ * any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; see the file COPYING. If not, write to
+ * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
+ *
+ * Copyright (c) 2000-2003 Adaptec Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions, and the following disclaimer,
+ * without modification.
+ * 2. Redistributions in binary form must reproduce at minimum a disclaimer
+ * substantially similar to the "NO WARRANTY" disclaimer below
+ * ("Disclaimer") and any redistribution must be conditioned upon
+ * including a substantially similar Disclaimer requirement for further
+ * binary redistribution.
+ * 3. Neither the names of the above-listed copyright holders nor the names
+ * of any contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * Alternatively, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") version 2 as published by the Free
+ * Software Foundation.
+ *
+ * NO WARRANTY
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
+ * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
+ * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGES.
+ *
+ * $Id: //depot/aic7xxx/linux/drivers/scsi/aic7xxx/aic7xxx_osm.h#151 $
+ *
+ */
+#ifndef _AIC7XXX_LINUX_H_
+#define _AIC7XXX_LINUX_H_
+
+#include <linux/types.h>
+#include <linux/blkdev.h>
+#include <linux/delay.h>
+#include <linux/ioport.h>
+#include <linux/pci.h>
+#include <linux/interrupt.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <asm/byteorder.h>
+#include <asm/io.h>
+
+#include <scsi/scsi.h>
+#include <scsi/scsi_cmnd.h>
+#include <scsi/scsi_eh.h>
+#include <scsi/scsi_device.h>
+#include <scsi/scsi_host.h>
+#include <scsi/scsi_tcq.h>
+#include <scsi/scsi_transport.h>
+#include <scsi/scsi_transport_spi.h>
+
+/* Core SCSI definitions */
+#define AIC_LIB_PREFIX ahc
+
+/* Name space conflict with BSD queue macros */
+#ifdef LIST_HEAD
+#undef LIST_HEAD
+#endif
+
+#include "cam.h"
+#include "queue.h"
+#include "scsi_message.h"
+#include "aiclib.h"
+
+/*********************************** Debugging ********************************/
+#ifdef CONFIG_AIC7XXX_DEBUG_ENABLE
+#ifdef CONFIG_AIC7XXX_DEBUG_MASK
+#define AHC_DEBUG 1
+#define AHC_DEBUG_OPTS CONFIG_AIC7XXX_DEBUG_MASK
+#else
+/*
+ * Compile in debugging code, but do not enable any printfs.
+ */
+#define AHC_DEBUG 1
+#endif
+/* No debugging code. */
+#endif
+
+/************************* Forward Declarations *******************************/
+struct ahc_softc;
+typedef struct pci_dev *ahc_dev_softc_t;
+typedef struct scsi_cmnd *ahc_io_ctx_t;
+
+/******************************* Byte Order ***********************************/
+#define ahc_htobe16(x) cpu_to_be16(x)
+#define ahc_htobe32(x) cpu_to_be32(x)
+#define ahc_htobe64(x) cpu_to_be64(x)
+#define ahc_htole16(x) cpu_to_le16(x)
+#define ahc_htole32(x) cpu_to_le32(x)
+#define ahc_htole64(x) cpu_to_le64(x)
+
+#define ahc_be16toh(x) be16_to_cpu(x)
+#define ahc_be32toh(x) be32_to_cpu(x)
+#define ahc_be64toh(x) be64_to_cpu(x)
+#define ahc_le16toh(x) le16_to_cpu(x)
+#define ahc_le32toh(x) le32_to_cpu(x)
+#define ahc_le64toh(x) le64_to_cpu(x)
+
+/************************* Configuration Data *********************************/
+extern u_int aic7xxx_no_probe;
+extern u_int aic7xxx_allow_memio;
+extern struct scsi_host_template aic7xxx_driver_template;
+
+/***************************** Bus Space/DMA **********************************/
+
+typedef uint32_t bus_size_t;
+
+typedef enum {
+ BUS_SPACE_MEMIO,
+ BUS_SPACE_PIO
+} bus_space_tag_t;
+
+typedef union {
+ u_long ioport;
+ volatile uint8_t __iomem *maddr;
+} bus_space_handle_t;
+
+typedef struct bus_dma_segment
+{
+ dma_addr_t ds_addr;
+ bus_size_t ds_len;
+} bus_dma_segment_t;
+
+struct ahc_linux_dma_tag
+{
+ bus_size_t alignment;
+ bus_size_t boundary;
+ bus_size_t maxsize;
+};
+typedef struct ahc_linux_dma_tag* bus_dma_tag_t;
+
+typedef dma_addr_t bus_dmamap_t;
+
+typedef int bus_dma_filter_t(void*, dma_addr_t);
+typedef void bus_dmamap_callback_t(void *, bus_dma_segment_t *, int, int);
+
+#define BUS_DMA_WAITOK 0x0
+#define BUS_DMA_NOWAIT 0x1
+#define BUS_DMA_ALLOCNOW 0x2
+#define BUS_DMA_LOAD_SEGS 0x4 /*
+ * Argument is an S/G list not
+ * a single buffer.
+ */
+
+#define BUS_SPACE_MAXADDR 0xFFFFFFFF
+#define BUS_SPACE_MAXADDR_32BIT 0xFFFFFFFF
+#define BUS_SPACE_MAXSIZE_32BIT 0xFFFFFFFF
+
+int ahc_dma_tag_create(struct ahc_softc *, bus_dma_tag_t /*parent*/,
+ bus_size_t /*alignment*/, bus_size_t /*boundary*/,
+ dma_addr_t /*lowaddr*/, dma_addr_t /*highaddr*/,
+ bus_dma_filter_t*/*filter*/, void */*filterarg*/,
+ bus_size_t /*maxsize*/, int /*nsegments*/,
+ bus_size_t /*maxsegsz*/, int /*flags*/,
+ bus_dma_tag_t */*dma_tagp*/);
+
+void ahc_dma_tag_destroy(struct ahc_softc *, bus_dma_tag_t /*tag*/);
+
+int ahc_dmamem_alloc(struct ahc_softc *, bus_dma_tag_t /*dmat*/,
+ void** /*vaddr*/, int /*flags*/,
+ bus_dmamap_t* /*mapp*/);
+
+void ahc_dmamem_free(struct ahc_softc *, bus_dma_tag_t /*dmat*/,
+ void* /*vaddr*/, bus_dmamap_t /*map*/);
+
+void ahc_dmamap_destroy(struct ahc_softc *, bus_dma_tag_t /*tag*/,
+ bus_dmamap_t /*map*/);
+
+int ahc_dmamap_load(struct ahc_softc *ahc, bus_dma_tag_t /*dmat*/,
+ bus_dmamap_t /*map*/, void * /*buf*/,
+ bus_size_t /*buflen*/, bus_dmamap_callback_t *,
+ void */*callback_arg*/, int /*flags*/);
+
+int ahc_dmamap_unload(struct ahc_softc *, bus_dma_tag_t, bus_dmamap_t);
+
+/*
+ * Operations performed by ahc_dmamap_sync().
+ */
+#define BUS_DMASYNC_PREREAD 0x01 /* pre-read synchronization */
+#define BUS_DMASYNC_POSTREAD 0x02 /* post-read synchronization */
+#define BUS_DMASYNC_PREWRITE 0x04 /* pre-write synchronization */
+#define BUS_DMASYNC_POSTWRITE 0x08 /* post-write synchronization */
+
+/*
+ * XXX
+ * ahc_dmamap_sync is only used on buffers allocated with
+ * the pci_alloc_consistent() API. Although I'm not sure how
+ * this works on architectures with a write buffer, Linux does
+ * not have an API to sync "coherent" memory. Perhaps we need
+ * to do an mb()?
+ */
+#define ahc_dmamap_sync(ahc, dma_tag, dmamap, offset, len, op)
+
+/********************************** Includes **********************************/
+#ifdef CONFIG_AIC7XXX_REG_PRETTY_PRINT
+#define AIC_DEBUG_REGISTERS 1
+#else
+#define AIC_DEBUG_REGISTERS 0
+#endif
+#include "aic7xxx.h"
+
+/***************************** Timer Facilities *******************************/
+static inline void
+ahc_scb_timer_reset(struct scb *scb, u_int usec)
+{
+}
+
+/***************************** SMP support ************************************/
+#include <linux/spinlock.h>
+
+#define AIC7XXX_DRIVER_VERSION "7.0"
+
+/*************************** Device Data Structures ***************************/
+/*
+ * A per probed device structure used to deal with some error recovery
+ * scenarios that the Linux mid-layer code just doesn't know how to
+ * handle. The structure allocated for a device only becomes persistent
+ * after a successfully completed inquiry command to the target when
+ * that inquiry data indicates a lun is present.
+ */
+typedef enum {
+ AHC_DEV_FREEZE_TIL_EMPTY = 0x02, /* Freeze queue until active == 0 */
+ AHC_DEV_Q_BASIC = 0x10, /* Allow basic device queuing */
+ AHC_DEV_Q_TAGGED = 0x20, /* Allow full SCSI2 command queueing */
+ AHC_DEV_PERIODIC_OTAG = 0x40, /* Send OTAG to prevent starvation */
+} ahc_linux_dev_flags;
+
+struct ahc_linux_device {
+ /*
+ * The number of transactions currently
+ * queued to the device.
+ */
+ int active;
+
+ /*
+ * The currently allowed number of
+ * transactions that can be queued to
+ * the device. Must be signed for
+ * conversion from tagged to untagged
+ * mode where the device may have more
+ * than one outstanding active transaction.
+ */
+ int openings;
+
+ /*
+ * A positive count indicates that this
+ * device's queue is halted.
+ */
+ u_int qfrozen;
+
+ /*
+ * Cumulative command counter.
+ */
+ u_long commands_issued;
+
+ /*
+ * The number of tagged transactions when
+ * running at our current opening level
+ * that have been successfully received by
+ * this device since the last QUEUE FULL.
+ */
+ u_int tag_success_count;
+#define AHC_TAG_SUCCESS_INTERVAL 50
+
+ ahc_linux_dev_flags flags;
+
+ /*
+ * The high limit for the tags variable.
+ */
+ u_int maxtags;
+
+ /*
+ * The computed number of tags outstanding
+ * at the time of the last QUEUE FULL event.
+ */
+ u_int tags_on_last_queuefull;
+
+ /*
+ * How many times we have seen a queue full
+ * with the same number of tags. This is used
+ * to stop our adaptive queue depth algorithm
+ * on devices with a fixed number of tags.
+ */
+ u_int last_queuefull_same_count;
+#define AHC_LOCK_TAGS_COUNT 50
+
+ /*
+ * How many transactions have been queued
+ * without the device going idle. We use
+ * this statistic to determine when to issue
+ * an ordered tag to prevent transaction
+ * starvation. This statistic is only updated
+ * if the AHC_DEV_PERIODIC_OTAG flag is set
+ * on this device.
+ */
+ u_int commands_since_idle_or_otag;
+#define AHC_OTAG_THRESH 500
+};
+
+/********************* Definitions Required by the Core ***********************/
+/*
+ * Number of SG segments we require. So long as the S/G segments for
+ * a particular transaction are allocated in a physically contiguous
+ * manner and are allocated below 4GB, the number of S/G segments is
+ * unrestricted.
+ */
+#define AHC_NSEG 128
+
+/*
+ * Per-SCB OSM storage.
+ */
+struct scb_platform_data {
+ struct ahc_linux_device *dev;
+ dma_addr_t buf_busaddr;
+ uint32_t xfer_len;
+ uint32_t sense_resid; /* Auto-Sense residual */
+};
+
+/*
+ * Define a structure used for each host adapter. All members are
+ * aligned on a boundary >= the size of the member to honor the
+ * alignment restrictions of the various platforms supported by
+ * this driver.
+ */
+struct ahc_platform_data {
+ /*
+ * Fields accessed from interrupt context.
+ */
+ struct scsi_target *starget[AHC_NUM_TARGETS];
+
+ spinlock_t spin_lock;
+ u_int qfrozen;
+ struct completion *eh_done;
+ struct Scsi_Host *host; /* pointer to scsi host */
+#define AHC_LINUX_NOIRQ ((uint32_t)~0)
+ uint32_t irq; /* IRQ for this adapter */
+ uint32_t bios_address;
+ resource_size_t mem_busaddr; /* Mem Base Addr */
+};
+
+void ahc_delay(long);
+
+
+/***************************** Low Level I/O **********************************/
+uint8_t ahc_inb(struct ahc_softc * ahc, long port);
+void ahc_outb(struct ahc_softc * ahc, long port, uint8_t val);
+void ahc_outsb(struct ahc_softc * ahc, long port,
+ uint8_t *, int count);
+void ahc_insb(struct ahc_softc * ahc, long port,
+ uint8_t *, int count);
+
+/**************************** Initialization **********************************/
+int ahc_linux_register_host(struct ahc_softc *,
+ struct scsi_host_template *);
+
+/******************************** Locking *************************************/
+/* Lock protecting internal data structures */
+
+static inline void
+ahc_lockinit(struct ahc_softc *ahc)
+{
+ spin_lock_init(&ahc->platform_data->spin_lock);
+}
+
+static inline void
+ahc_lock(struct ahc_softc *ahc, unsigned long *flags)
+{
+ spin_lock_irqsave(&ahc->platform_data->spin_lock, *flags);
+}
+
+static inline void
+ahc_unlock(struct ahc_softc *ahc, unsigned long *flags)
+{
+ spin_unlock_irqrestore(&ahc->platform_data->spin_lock, *flags);
+}
+
+/******************************* PCI Definitions ******************************/
+/*
+ * PCIM_xxx: mask to locate subfield in register
+ * PCIR_xxx: config register offset
+ * PCIC_xxx: device class
+ * PCIS_xxx: device subclass
+ * PCIP_xxx: device programming interface
+ * PCIV_xxx: PCI vendor ID (only required to fixup ancient devices)
+ * PCID_xxx: device ID
+ */
+#define PCIR_DEVVENDOR 0x00
+#define PCIR_VENDOR 0x00
+#define PCIR_DEVICE 0x02
+#define PCIR_COMMAND 0x04
+#define PCIM_CMD_PORTEN 0x0001
+#define PCIM_CMD_MEMEN 0x0002
+#define PCIM_CMD_BUSMASTEREN 0x0004
+#define PCIM_CMD_MWRICEN 0x0010
+#define PCIM_CMD_PERRESPEN 0x0040
+#define PCIM_CMD_SERRESPEN 0x0100
+#define PCIR_STATUS 0x06
+#define PCIR_REVID 0x08
+#define PCIR_PROGIF 0x09
+#define PCIR_SUBCLASS 0x0a
+#define PCIR_CLASS 0x0b
+#define PCIR_CACHELNSZ 0x0c
+#define PCIR_LATTIMER 0x0d
+#define PCIR_HEADERTYPE 0x0e
+#define PCIM_MFDEV 0x80
+#define PCIR_BIST 0x0f
+#define PCIR_CAP_PTR 0x34
+
+/* config registers for header type 0 devices */
+#define PCIR_MAPS 0x10
+#define PCIR_SUBVEND_0 0x2c
+#define PCIR_SUBDEV_0 0x2e
+
+typedef enum
+{
+ AHC_POWER_STATE_D0,
+ AHC_POWER_STATE_D1,
+ AHC_POWER_STATE_D2,
+ AHC_POWER_STATE_D3
+} ahc_power_state;
+
+/**************************** VL/EISA Routines ********************************/
+#ifdef CONFIG_EISA
+int ahc_linux_eisa_init(void);
+void ahc_linux_eisa_exit(void);
+int aic7770_map_registers(struct ahc_softc *ahc,
+ u_int port);
+int aic7770_map_int(struct ahc_softc *ahc, u_int irq);
+#else
+static inline int ahc_linux_eisa_init(void) {
+ return -ENODEV;
+}
+static inline void ahc_linux_eisa_exit(void) {
+}
+#endif
+
+/******************************* PCI Routines *********************************/
+#ifdef CONFIG_PCI
+int ahc_linux_pci_init(void);
+void ahc_linux_pci_exit(void);
+int ahc_pci_map_registers(struct ahc_softc *ahc);
+int ahc_pci_map_int(struct ahc_softc *ahc);
+
+uint32_t ahc_pci_read_config(ahc_dev_softc_t pci,
+ int reg, int width);
+
+void ahc_pci_write_config(ahc_dev_softc_t pci,
+ int reg, uint32_t value,
+ int width);
+
+static inline int ahc_get_pci_function(ahc_dev_softc_t);
+static inline int
+ahc_get_pci_function(ahc_dev_softc_t pci)
+{
+ return (PCI_FUNC(pci->devfn));
+}
+
+static inline int ahc_get_pci_slot(ahc_dev_softc_t);
+static inline int
+ahc_get_pci_slot(ahc_dev_softc_t pci)
+{
+ return (PCI_SLOT(pci->devfn));
+}
+
+static inline int ahc_get_pci_bus(ahc_dev_softc_t);
+static inline int
+ahc_get_pci_bus(ahc_dev_softc_t pci)
+{
+ return (pci->bus->number);
+}
+#else
+static inline int ahc_linux_pci_init(void) {
+ return 0;
+}
+static inline void ahc_linux_pci_exit(void) {
+}
+#endif
+
+static inline void ahc_flush_device_writes(struct ahc_softc *);
+static inline void
+ahc_flush_device_writes(struct ahc_softc *ahc)
+{
+ /* XXX Is this sufficient for all architectures??? */
+ ahc_inb(ahc, INTSTAT);
+}
+
+/**************************** Proc FS Support *********************************/
+int ahc_proc_write_seeprom(struct Scsi_Host *, char *, int);
+int ahc_linux_show_info(struct seq_file *, struct Scsi_Host *);
+
+/*************************** Domain Validation ********************************/
+/*********************** Transaction Access Wrappers *************************/
+static inline void ahc_cmd_set_transaction_status(struct scsi_cmnd *, uint32_t);
+static inline void ahc_set_transaction_status(struct scb *, uint32_t);
+static inline void ahc_cmd_set_scsi_status(struct scsi_cmnd *, uint32_t);
+static inline void ahc_set_scsi_status(struct scb *, uint32_t);
+static inline uint32_t ahc_cmd_get_transaction_status(struct scsi_cmnd *cmd);
+static inline uint32_t ahc_get_transaction_status(struct scb *);
+static inline uint32_t ahc_cmd_get_scsi_status(struct scsi_cmnd *cmd);
+static inline uint32_t ahc_get_scsi_status(struct scb *);
+static inline void ahc_set_transaction_tag(struct scb *, int, u_int);
+static inline u_long ahc_get_transfer_length(struct scb *);
+static inline int ahc_get_transfer_dir(struct scb *);
+static inline void ahc_set_residual(struct scb *, u_long);
+static inline void ahc_set_sense_residual(struct scb *scb, u_long resid);
+static inline u_long ahc_get_residual(struct scb *);
+static inline u_long ahc_get_sense_residual(struct scb *);
+static inline int ahc_perform_autosense(struct scb *);
+static inline uint32_t ahc_get_sense_bufsize(struct ahc_softc *,
+ struct scb *);
+static inline void ahc_notify_xfer_settings_change(struct ahc_softc *,
+ struct ahc_devinfo *);
+static inline void ahc_platform_scb_free(struct ahc_softc *ahc,
+ struct scb *scb);
+static inline void ahc_freeze_scb(struct scb *scb);
+
+static inline
+void ahc_cmd_set_transaction_status(struct scsi_cmnd *cmd, uint32_t status)
+{
+ cmd->result &= ~(CAM_STATUS_MASK << 16);
+ cmd->result |= status << 16;
+}
+
+static inline
+void ahc_set_transaction_status(struct scb *scb, uint32_t status)
+{
+ ahc_cmd_set_transaction_status(scb->io_ctx,status);
+}
+
+static inline
+void ahc_cmd_set_scsi_status(struct scsi_cmnd *cmd, uint32_t status)
+{
+ cmd->result &= ~0xFFFF;
+ cmd->result |= status;
+}
+
+static inline
+void ahc_set_scsi_status(struct scb *scb, uint32_t status)
+{
+ ahc_cmd_set_scsi_status(scb->io_ctx, status);
+}
+
+static inline
+uint32_t ahc_cmd_get_transaction_status(struct scsi_cmnd *cmd)
+{
+ return ((cmd->result >> 16) & CAM_STATUS_MASK);
+}
+
+static inline
+uint32_t ahc_get_transaction_status(struct scb *scb)
+{
+ return (ahc_cmd_get_transaction_status(scb->io_ctx));
+}
+
+static inline
+uint32_t ahc_cmd_get_scsi_status(struct scsi_cmnd *cmd)
+{
+ return (cmd->result & 0xFFFF);
+}
+
+static inline
+uint32_t ahc_get_scsi_status(struct scb *scb)
+{
+ return (ahc_cmd_get_scsi_status(scb->io_ctx));
+}
+
+static inline
+void ahc_set_transaction_tag(struct scb *scb, int enabled, u_int type)
+{
+ /*
+ * Nothing to do for linux as the incoming transaction
+ * has no concept of tag/non tagged, etc.
+ */
+}
+
+static inline
+u_long ahc_get_transfer_length(struct scb *scb)
+{
+ return (scb->platform_data->xfer_len);
+}
+
+static inline
+int ahc_get_transfer_dir(struct scb *scb)
+{
+ return (scb->io_ctx->sc_data_direction);
+}
+
+static inline
+void ahc_set_residual(struct scb *scb, u_long resid)
+{
+ scsi_set_resid(scb->io_ctx, resid);
+}
+
+static inline
+void ahc_set_sense_residual(struct scb *scb, u_long resid)
+{
+ scb->platform_data->sense_resid = resid;
+}
+
+static inline
+u_long ahc_get_residual(struct scb *scb)
+{
+ return scsi_get_resid(scb->io_ctx);
+}
+
+static inline
+u_long ahc_get_sense_residual(struct scb *scb)
+{
+ return (scb->platform_data->sense_resid);
+}
+
+static inline
+int ahc_perform_autosense(struct scb *scb)
+{
+ /*
+ * We always perform autosense in Linux.
+ * On other platforms this is set on a
+ * per-transaction basis.
+ */
+ return (1);
+}
+
+static inline uint32_t
+ahc_get_sense_bufsize(struct ahc_softc *ahc, struct scb *scb)
+{
+ return (sizeof(struct scsi_sense_data));
+}
+
+static inline void
+ahc_notify_xfer_settings_change(struct ahc_softc *ahc,
+ struct ahc_devinfo *devinfo)
+{
+ /* Nothing to do here for linux */
+}
+
+static inline void
+ahc_platform_scb_free(struct ahc_softc *ahc, struct scb *scb)
+{
+}
+
+int ahc_platform_alloc(struct ahc_softc *ahc, void *platform_arg);
+void ahc_platform_free(struct ahc_softc *ahc);
+void ahc_platform_freeze_devq(struct ahc_softc *ahc, struct scb *scb);
+
+static inline void
+ahc_freeze_scb(struct scb *scb)
+{
+ if ((scb->io_ctx->result & (CAM_DEV_QFRZN << 16)) == 0) {
+ scb->io_ctx->result |= CAM_DEV_QFRZN << 16;
+ scb->platform_data->dev->qfrozen++;
+ }
+}
+
+void ahc_platform_set_tags(struct ahc_softc *ahc, struct scsi_device *sdev,
+ struct ahc_devinfo *devinfo, ahc_queue_alg);
+int ahc_platform_abort_scbs(struct ahc_softc *ahc, int target,
+ char channel, int lun, u_int tag,
+ role_t role, uint32_t status);
+irqreturn_t
+ ahc_linux_isr(int irq, void *dev_id);
+void ahc_platform_flushwork(struct ahc_softc *ahc);
+void ahc_done(struct ahc_softc*, struct scb*);
+void ahc_send_async(struct ahc_softc *, char channel,
+ u_int target, u_int lun, ac_code);
+void ahc_print_path(struct ahc_softc *, struct scb *);
+void ahc_platform_dump_card_state(struct ahc_softc *ahc);
+
+#ifdef CONFIG_PCI
+#define AHC_PCI_CONFIG 1
+#else
+#define AHC_PCI_CONFIG 0
+#endif
+#define bootverbose aic7xxx_verbose
+extern u_int aic7xxx_verbose;
+#endif /* _AIC7XXX_LINUX_H_ */
diff --git a/drivers/scsi/aic7xxx/aic7xxx_osm_pci.c b/drivers/scsi/aic7xxx/aic7xxx_osm_pci.c
new file mode 100644
index 000000000..0fc14dac7
--- /dev/null
+++ b/drivers/scsi/aic7xxx/aic7xxx_osm_pci.c
@@ -0,0 +1,470 @@
+/*
+ * Linux driver attachment glue for PCI based controllers.
+ *
+ * Copyright (c) 2000-2001 Adaptec Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions, and the following disclaimer,
+ * without modification.
+ * 2. Redistributions in binary form must reproduce at minimum a disclaimer
+ * substantially similar to the "NO WARRANTY" disclaimer below
+ * ("Disclaimer") and any redistribution must be conditioned upon
+ * including a substantially similar Disclaimer requirement for further
+ * binary redistribution.
+ * 3. Neither the names of the above-listed copyright holders nor the names
+ * of any contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * Alternatively, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") version 2 as published by the Free
+ * Software Foundation.
+ *
+ * NO WARRANTY
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
+ * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
+ * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGES.
+ *
+ * $Id: //depot/aic7xxx/linux/drivers/scsi/aic7xxx/aic7xxx_osm_pci.c#47 $
+ */
+
+#include "aic7xxx_osm.h"
+#include "aic7xxx_pci.h"
+
+/* Define the macro locally since it's different for different class of chips.
+*/
+#define ID(x) ID_C(x, PCI_CLASS_STORAGE_SCSI)
+
+static const struct pci_device_id ahc_linux_pci_id_table[] = {
+ /* aic7850 based controllers */
+ ID(ID_AHA_2902_04_10_15_20C_30C),
+ /* aic7860 based controllers */
+ ID(ID_AHA_2930CU),
+ ID(ID_AHA_1480A & ID_DEV_VENDOR_MASK),
+ ID(ID_AHA_2940AU_0 & ID_DEV_VENDOR_MASK),
+ ID(ID_AHA_2940AU_CN & ID_DEV_VENDOR_MASK),
+ ID(ID_AHA_2930C_VAR & ID_DEV_VENDOR_MASK),
+ /* aic7870 based controllers */
+ ID(ID_AHA_2940),
+ ID(ID_AHA_3940),
+ ID(ID_AHA_398X),
+ ID(ID_AHA_2944),
+ ID(ID_AHA_3944),
+ ID(ID_AHA_4944),
+ /* aic7880 based controllers */
+ ID(ID_AHA_2940U & ID_DEV_VENDOR_MASK),
+ ID(ID_AHA_3940U & ID_DEV_VENDOR_MASK),
+ ID(ID_AHA_2944U & ID_DEV_VENDOR_MASK),
+ ID(ID_AHA_3944U & ID_DEV_VENDOR_MASK),
+ ID(ID_AHA_398XU & ID_DEV_VENDOR_MASK),
+ ID(ID_AHA_4944U & ID_DEV_VENDOR_MASK),
+ ID(ID_AHA_2930U & ID_DEV_VENDOR_MASK),
+ ID(ID_AHA_2940U_PRO & ID_DEV_VENDOR_MASK),
+ ID(ID_AHA_2940U_CN & ID_DEV_VENDOR_MASK),
+ /* aic7890 based controllers */
+ ID(ID_AHA_2930U2),
+ ID(ID_AHA_2940U2B),
+ ID(ID_AHA_2940U2_OEM),
+ ID(ID_AHA_2940U2),
+ ID(ID_AHA_2950U2B),
+ ID16(ID_AIC7890_ARO & ID_AIC7895_ARO_MASK),
+ ID(ID_AAA_131U2),
+ /* aic7890 based controllers */
+ ID(ID_AHA_29160),
+ ID(ID_AHA_29160_CPQ),
+ ID(ID_AHA_29160N),
+ ID(ID_AHA_29160C),
+ ID(ID_AHA_29160B),
+ ID(ID_AHA_19160B),
+ ID(ID_AIC7892_ARO),
+ /* aic7892 based controllers */
+ ID(ID_AHA_2940U_DUAL),
+ ID(ID_AHA_3940AU),
+ ID(ID_AHA_3944AU),
+ ID(ID_AIC7895_ARO),
+ ID(ID_AHA_3950U2B_0),
+ ID(ID_AHA_3950U2B_1),
+ ID(ID_AHA_3950U2D_0),
+ ID(ID_AHA_3950U2D_1),
+ ID(ID_AIC7896_ARO),
+ /* aic7899 based controllers */
+ ID(ID_AHA_3960D),
+ ID(ID_AHA_3960D_CPQ),
+ ID(ID_AIC7899_ARO),
+ /* Generic chip probes for devices we don't know exactly. */
+ ID(ID_AIC7850 & ID_DEV_VENDOR_MASK),
+ ID(ID_AIC7855 & ID_DEV_VENDOR_MASK),
+ ID(ID_AIC7859 & ID_DEV_VENDOR_MASK),
+ ID(ID_AIC7860 & ID_DEV_VENDOR_MASK),
+ ID(ID_AIC7870 & ID_DEV_VENDOR_MASK),
+ ID(ID_AIC7880 & ID_DEV_VENDOR_MASK),
+ ID16(ID_AIC7890 & ID_9005_GENERIC_MASK),
+ ID16(ID_AIC7892 & ID_9005_GENERIC_MASK),
+ ID(ID_AIC7895 & ID_DEV_VENDOR_MASK),
+ ID16(ID_AIC7896 & ID_9005_GENERIC_MASK),
+ ID16(ID_AIC7899 & ID_9005_GENERIC_MASK),
+ ID(ID_AIC7810 & ID_DEV_VENDOR_MASK),
+ ID(ID_AIC7815 & ID_DEV_VENDOR_MASK),
+ { 0 }
+};
+
+MODULE_DEVICE_TABLE(pci, ahc_linux_pci_id_table);
+
+#ifdef CONFIG_PM
+static int
+ahc_linux_pci_dev_suspend(struct pci_dev *pdev, pm_message_t mesg)
+{
+ struct ahc_softc *ahc = pci_get_drvdata(pdev);
+ int rc;
+
+ if ((rc = ahc_suspend(ahc)))
+ return rc;
+
+ pci_save_state(pdev);
+ pci_disable_device(pdev);
+
+ if (mesg.event & PM_EVENT_SLEEP)
+ pci_set_power_state(pdev, PCI_D3hot);
+
+ return rc;
+}
+
+static int
+ahc_linux_pci_dev_resume(struct pci_dev *pdev)
+{
+ struct ahc_softc *ahc = pci_get_drvdata(pdev);
+ int rc;
+
+ pci_set_power_state(pdev, PCI_D0);
+ pci_restore_state(pdev);
+
+ if ((rc = pci_enable_device(pdev))) {
+ dev_printk(KERN_ERR, &pdev->dev,
+ "failed to enable device after resume (%d)\n", rc);
+ return rc;
+ }
+
+ pci_set_master(pdev);
+
+ ahc_pci_resume(ahc);
+
+ return (ahc_resume(ahc));
+}
+#endif
+
+static void
+ahc_linux_pci_dev_remove(struct pci_dev *pdev)
+{
+ struct ahc_softc *ahc = pci_get_drvdata(pdev);
+ u_long s;
+
+ if (ahc->platform_data && ahc->platform_data->host)
+ scsi_remove_host(ahc->platform_data->host);
+
+ ahc_lock(ahc, &s);
+ ahc_intr_enable(ahc, FALSE);
+ ahc_unlock(ahc, &s);
+ ahc_free(ahc);
+}
+
+static void
+ahc_linux_pci_inherit_flags(struct ahc_softc *ahc)
+{
+ struct pci_dev *pdev = ahc->dev_softc, *master_pdev;
+ unsigned int master_devfn = PCI_DEVFN(PCI_SLOT(pdev->devfn), 0);
+
+ master_pdev = pci_get_slot(pdev->bus, master_devfn);
+ if (master_pdev) {
+ struct ahc_softc *master = pci_get_drvdata(master_pdev);
+ if (master) {
+ ahc->flags &= ~AHC_BIOS_ENABLED;
+ ahc->flags |= master->flags & AHC_BIOS_ENABLED;
+
+ ahc->flags &= ~AHC_PRIMARY_CHANNEL;
+ ahc->flags |= master->flags & AHC_PRIMARY_CHANNEL;
+ } else
+ printk(KERN_ERR "aic7xxx: no multichannel peer found!\n");
+ pci_dev_put(master_pdev);
+ }
+}
+
+static int
+ahc_linux_pci_dev_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
+{
+ char buf[80];
+ const uint64_t mask_39bit = 0x7FFFFFFFFFULL;
+ struct ahc_softc *ahc;
+ ahc_dev_softc_t pci;
+ const struct ahc_pci_identity *entry;
+ char *name;
+ int error;
+ struct device *dev = &pdev->dev;
+
+ pci = pdev;
+ entry = ahc_find_pci_device(pci);
+ if (entry == NULL)
+ return (-ENODEV);
+
+ /*
+ * Allocate a softc for this card and
+ * set it up for attachment by our
+ * common detect routine.
+ */
+ sprintf(buf, "ahc_pci:%d:%d:%d",
+ ahc_get_pci_bus(pci),
+ ahc_get_pci_slot(pci),
+ ahc_get_pci_function(pci));
+ name = kstrdup(buf, GFP_ATOMIC);
+ if (name == NULL)
+ return (-ENOMEM);
+ ahc = ahc_alloc(NULL, name);
+ if (ahc == NULL)
+ return (-ENOMEM);
+ if (pci_enable_device(pdev)) {
+ ahc_free(ahc);
+ return (-ENODEV);
+ }
+ pci_set_master(pdev);
+
+ if (sizeof(dma_addr_t) > 4
+ && ahc->features & AHC_LARGE_SCBS
+ && dma_set_mask(dev, mask_39bit) == 0
+ && dma_get_required_mask(dev) > DMA_BIT_MASK(32)) {
+ ahc->flags |= AHC_39BIT_ADDRESSING;
+ } else {
+ if (dma_set_mask(dev, DMA_BIT_MASK(32))) {
+ ahc_free(ahc);
+ printk(KERN_WARNING "aic7xxx: No suitable DMA available.\n");
+ return (-ENODEV);
+ }
+ }
+ ahc->dev_softc = pci;
+ error = ahc_pci_config(ahc, entry);
+ if (error != 0) {
+ ahc_free(ahc);
+ return (-error);
+ }
+
+ /*
+ * Second Function PCI devices need to inherit some
+ * settings from function 0.
+ */
+ if ((ahc->features & AHC_MULTI_FUNC) && PCI_FUNC(pdev->devfn) != 0)
+ ahc_linux_pci_inherit_flags(ahc);
+
+ pci_set_drvdata(pdev, ahc);
+ ahc_linux_register_host(ahc, &aic7xxx_driver_template);
+ return (0);
+}
+
+/******************************* PCI Routines *********************************/
+uint32_t
+ahc_pci_read_config(ahc_dev_softc_t pci, int reg, int width)
+{
+ switch (width) {
+ case 1:
+ {
+ uint8_t retval;
+
+ pci_read_config_byte(pci, reg, &retval);
+ return (retval);
+ }
+ case 2:
+ {
+ uint16_t retval;
+ pci_read_config_word(pci, reg, &retval);
+ return (retval);
+ }
+ case 4:
+ {
+ uint32_t retval;
+ pci_read_config_dword(pci, reg, &retval);
+ return (retval);
+ }
+ default:
+ panic("ahc_pci_read_config: Read size too big");
+ /* NOTREACHED */
+ return (0);
+ }
+}
+
+void
+ahc_pci_write_config(ahc_dev_softc_t pci, int reg, uint32_t value, int width)
+{
+ switch (width) {
+ case 1:
+ pci_write_config_byte(pci, reg, value);
+ break;
+ case 2:
+ pci_write_config_word(pci, reg, value);
+ break;
+ case 4:
+ pci_write_config_dword(pci, reg, value);
+ break;
+ default:
+ panic("ahc_pci_write_config: Write size too big");
+ /* NOTREACHED */
+ }
+}
+
+
+static struct pci_driver aic7xxx_pci_driver = {
+ .name = "aic7xxx",
+ .probe = ahc_linux_pci_dev_probe,
+#ifdef CONFIG_PM
+ .suspend = ahc_linux_pci_dev_suspend,
+ .resume = ahc_linux_pci_dev_resume,
+#endif
+ .remove = ahc_linux_pci_dev_remove,
+ .id_table = ahc_linux_pci_id_table
+};
+
+int
+ahc_linux_pci_init(void)
+{
+ return pci_register_driver(&aic7xxx_pci_driver);
+}
+
+void
+ahc_linux_pci_exit(void)
+{
+ pci_unregister_driver(&aic7xxx_pci_driver);
+}
+
+static int
+ahc_linux_pci_reserve_io_region(struct ahc_softc *ahc, resource_size_t *base)
+{
+ if (aic7xxx_allow_memio == 0)
+ return (ENOMEM);
+
+ *base = pci_resource_start(ahc->dev_softc, 0);
+ if (*base == 0)
+ return (ENOMEM);
+ if (!request_region(*base, 256, "aic7xxx"))
+ return (ENOMEM);
+ return (0);
+}
+
+static int
+ahc_linux_pci_reserve_mem_region(struct ahc_softc *ahc,
+ resource_size_t *bus_addr,
+ uint8_t __iomem **maddr)
+{
+ resource_size_t start;
+ int error;
+
+ error = 0;
+ start = pci_resource_start(ahc->dev_softc, 1);
+ if (start != 0) {
+ *bus_addr = start;
+ if (!request_mem_region(start, 0x1000, "aic7xxx"))
+ error = ENOMEM;
+ if (error == 0) {
+ *maddr = ioremap_nocache(start, 256);
+ if (*maddr == NULL) {
+ error = ENOMEM;
+ release_mem_region(start, 0x1000);
+ }
+ }
+ } else
+ error = ENOMEM;
+ return (error);
+}
+
+int
+ahc_pci_map_registers(struct ahc_softc *ahc)
+{
+ uint32_t command;
+ resource_size_t base;
+ uint8_t __iomem *maddr;
+ int error;
+
+ /*
+ * If its allowed, we prefer memory mapped access.
+ */
+ command = ahc_pci_read_config(ahc->dev_softc, PCIR_COMMAND, 4);
+ command &= ~(PCIM_CMD_PORTEN|PCIM_CMD_MEMEN);
+ base = 0;
+ maddr = NULL;
+ error = ahc_linux_pci_reserve_mem_region(ahc, &base, &maddr);
+ if (error == 0) {
+ ahc->platform_data->mem_busaddr = base;
+ ahc->tag = BUS_SPACE_MEMIO;
+ ahc->bsh.maddr = maddr;
+ ahc_pci_write_config(ahc->dev_softc, PCIR_COMMAND,
+ command | PCIM_CMD_MEMEN, 4);
+
+ /*
+ * Do a quick test to see if memory mapped
+ * I/O is functioning correctly.
+ */
+ if (ahc_pci_test_register_access(ahc) != 0) {
+
+ printk("aic7xxx: PCI Device %d:%d:%d "
+ "failed memory mapped test. Using PIO.\n",
+ ahc_get_pci_bus(ahc->dev_softc),
+ ahc_get_pci_slot(ahc->dev_softc),
+ ahc_get_pci_function(ahc->dev_softc));
+ iounmap(maddr);
+ release_mem_region(ahc->platform_data->mem_busaddr,
+ 0x1000);
+ ahc->bsh.maddr = NULL;
+ maddr = NULL;
+ } else
+ command |= PCIM_CMD_MEMEN;
+ } else {
+ printk("aic7xxx: PCI%d:%d:%d MEM region 0x%llx "
+ "unavailable. Cannot memory map device.\n",
+ ahc_get_pci_bus(ahc->dev_softc),
+ ahc_get_pci_slot(ahc->dev_softc),
+ ahc_get_pci_function(ahc->dev_softc),
+ (unsigned long long)base);
+ }
+
+ /*
+ * We always prefer memory mapped access.
+ */
+ if (maddr == NULL) {
+
+ error = ahc_linux_pci_reserve_io_region(ahc, &base);
+ if (error == 0) {
+ ahc->tag = BUS_SPACE_PIO;
+ ahc->bsh.ioport = (u_long)base;
+ command |= PCIM_CMD_PORTEN;
+ } else {
+ printk("aic7xxx: PCI%d:%d:%d IO region 0x%llx[0..255] "
+ "unavailable. Cannot map device.\n",
+ ahc_get_pci_bus(ahc->dev_softc),
+ ahc_get_pci_slot(ahc->dev_softc),
+ ahc_get_pci_function(ahc->dev_softc),
+ (unsigned long long)base);
+ }
+ }
+ ahc_pci_write_config(ahc->dev_softc, PCIR_COMMAND, command, 4);
+ return (error);
+}
+
+int
+ahc_pci_map_int(struct ahc_softc *ahc)
+{
+ int error;
+
+ error = request_irq(ahc->dev_softc->irq, ahc_linux_isr,
+ IRQF_SHARED, "aic7xxx", ahc);
+ if (error == 0)
+ ahc->platform_data->irq = ahc->dev_softc->irq;
+
+ return (-error);
+}
+
diff --git a/drivers/scsi/aic7xxx/aic7xxx_pci.c b/drivers/scsi/aic7xxx/aic7xxx_pci.c
new file mode 100644
index 000000000..22d5a949e
--- /dev/null
+++ b/drivers/scsi/aic7xxx/aic7xxx_pci.c
@@ -0,0 +1,2469 @@
+/*
+ * Product specific probe and attach routines for:
+ * 3940, 2940, aic7895, aic7890, aic7880,
+ * aic7870, aic7860 and aic7850 SCSI controllers
+ *
+ * Copyright (c) 1994-2001 Justin T. Gibbs.
+ * Copyright (c) 2000-2001 Adaptec Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions, and the following disclaimer,
+ * without modification.
+ * 2. Redistributions in binary form must reproduce at minimum a disclaimer
+ * substantially similar to the "NO WARRANTY" disclaimer below
+ * ("Disclaimer") and any redistribution must be conditioned upon
+ * including a substantially similar Disclaimer requirement for further
+ * binary redistribution.
+ * 3. Neither the names of the above-listed copyright holders nor the names
+ * of any contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * Alternatively, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") version 2 as published by the Free
+ * Software Foundation.
+ *
+ * NO WARRANTY
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
+ * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
+ * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGES.
+ *
+ * $Id: //depot/aic7xxx/aic7xxx/aic7xxx_pci.c#79 $
+ */
+
+#ifdef __linux__
+#include "aic7xxx_osm.h"
+#include "aic7xxx_inline.h"
+#include "aic7xxx_93cx6.h"
+#else
+#include <dev/aic7xxx/aic7xxx_osm.h>
+#include <dev/aic7xxx/aic7xxx_inline.h>
+#include <dev/aic7xxx/aic7xxx_93cx6.h>
+#endif
+
+#include "aic7xxx_pci.h"
+
+static inline uint64_t
+ahc_compose_id(u_int device, u_int vendor, u_int subdevice, u_int subvendor)
+{
+ uint64_t id;
+
+ id = subvendor
+ | (subdevice << 16)
+ | ((uint64_t)vendor << 32)
+ | ((uint64_t)device << 48);
+
+ return (id);
+}
+
+#define AHC_PCI_IOADDR PCIR_MAPS /* I/O Address */
+#define AHC_PCI_MEMADDR (PCIR_MAPS + 4) /* Mem I/O Address */
+
+#define DEVID_9005_TYPE(id) ((id) & 0xF)
+#define DEVID_9005_TYPE_HBA 0x0 /* Standard Card */
+#define DEVID_9005_TYPE_AAA 0x3 /* RAID Card */
+#define DEVID_9005_TYPE_SISL 0x5 /* Container ROMB */
+#define DEVID_9005_TYPE_MB 0xF /* On Motherboard */
+
+#define DEVID_9005_MAXRATE(id) (((id) & 0x30) >> 4)
+#define DEVID_9005_MAXRATE_U160 0x0
+#define DEVID_9005_MAXRATE_ULTRA2 0x1
+#define DEVID_9005_MAXRATE_ULTRA 0x2
+#define DEVID_9005_MAXRATE_FAST 0x3
+
+#define DEVID_9005_MFUNC(id) (((id) & 0x40) >> 6)
+
+#define DEVID_9005_CLASS(id) (((id) & 0xFF00) >> 8)
+#define DEVID_9005_CLASS_SPI 0x0 /* Parallel SCSI */
+
+#define SUBID_9005_TYPE(id) ((id) & 0xF)
+#define SUBID_9005_TYPE_MB 0xF /* On Motherboard */
+#define SUBID_9005_TYPE_CARD 0x0 /* Standard Card */
+#define SUBID_9005_TYPE_LCCARD 0x1 /* Low Cost Card */
+#define SUBID_9005_TYPE_RAID 0x3 /* Combined with Raid */
+
+#define SUBID_9005_TYPE_KNOWN(id) \
+ ((((id) & 0xF) == SUBID_9005_TYPE_MB) \
+ || (((id) & 0xF) == SUBID_9005_TYPE_CARD) \
+ || (((id) & 0xF) == SUBID_9005_TYPE_LCCARD) \
+ || (((id) & 0xF) == SUBID_9005_TYPE_RAID))
+
+#define SUBID_9005_MAXRATE(id) (((id) & 0x30) >> 4)
+#define SUBID_9005_MAXRATE_ULTRA2 0x0
+#define SUBID_9005_MAXRATE_ULTRA 0x1
+#define SUBID_9005_MAXRATE_U160 0x2
+#define SUBID_9005_MAXRATE_RESERVED 0x3
+
+#define SUBID_9005_SEEPTYPE(id) \
+ ((SUBID_9005_TYPE(id) == SUBID_9005_TYPE_MB) \
+ ? ((id) & 0xC0) >> 6 \
+ : ((id) & 0x300) >> 8)
+#define SUBID_9005_SEEPTYPE_NONE 0x0
+#define SUBID_9005_SEEPTYPE_1K 0x1
+#define SUBID_9005_SEEPTYPE_2K_4K 0x2
+#define SUBID_9005_SEEPTYPE_RESERVED 0x3
+#define SUBID_9005_AUTOTERM(id) \
+ ((SUBID_9005_TYPE(id) == SUBID_9005_TYPE_MB) \
+ ? (((id) & 0x400) >> 10) == 0 \
+ : (((id) & 0x40) >> 6) == 0)
+
+#define SUBID_9005_NUMCHAN(id) \
+ ((SUBID_9005_TYPE(id) == SUBID_9005_TYPE_MB) \
+ ? ((id) & 0x300) >> 8 \
+ : ((id) & 0xC00) >> 10)
+
+#define SUBID_9005_LEGACYCONN(id) \
+ ((SUBID_9005_TYPE(id) == SUBID_9005_TYPE_MB) \
+ ? 0 \
+ : ((id) & 0x80) >> 7)
+
+#define SUBID_9005_MFUNCENB(id) \
+ ((SUBID_9005_TYPE(id) == SUBID_9005_TYPE_MB) \
+ ? ((id) & 0x800) >> 11 \
+ : ((id) & 0x1000) >> 12)
+/*
+ * Informational only. Should use chip register to be
+ * certain, but may be use in identification strings.
+ */
+#define SUBID_9005_CARD_SCSIWIDTH_MASK 0x2000
+#define SUBID_9005_CARD_PCIWIDTH_MASK 0x4000
+#define SUBID_9005_CARD_SEDIFF_MASK 0x8000
+
+static ahc_device_setup_t ahc_aic785X_setup;
+static ahc_device_setup_t ahc_aic7860_setup;
+static ahc_device_setup_t ahc_apa1480_setup;
+static ahc_device_setup_t ahc_aic7870_setup;
+static ahc_device_setup_t ahc_aic7870h_setup;
+static ahc_device_setup_t ahc_aha394X_setup;
+static ahc_device_setup_t ahc_aha394Xh_setup;
+static ahc_device_setup_t ahc_aha494X_setup;
+static ahc_device_setup_t ahc_aha494Xh_setup;
+static ahc_device_setup_t ahc_aha398X_setup;
+static ahc_device_setup_t ahc_aic7880_setup;
+static ahc_device_setup_t ahc_aic7880h_setup;
+static ahc_device_setup_t ahc_aha2940Pro_setup;
+static ahc_device_setup_t ahc_aha394XU_setup;
+static ahc_device_setup_t ahc_aha394XUh_setup;
+static ahc_device_setup_t ahc_aha398XU_setup;
+static ahc_device_setup_t ahc_aic7890_setup;
+static ahc_device_setup_t ahc_aic7892_setup;
+static ahc_device_setup_t ahc_aic7895_setup;
+static ahc_device_setup_t ahc_aic7895h_setup;
+static ahc_device_setup_t ahc_aic7896_setup;
+static ahc_device_setup_t ahc_aic7899_setup;
+static ahc_device_setup_t ahc_aha29160C_setup;
+static ahc_device_setup_t ahc_raid_setup;
+static ahc_device_setup_t ahc_aha394XX_setup;
+static ahc_device_setup_t ahc_aha494XX_setup;
+static ahc_device_setup_t ahc_aha398XX_setup;
+
+static const struct ahc_pci_identity ahc_pci_ident_table[] = {
+ /* aic7850 based controllers */
+ {
+ ID_AHA_2902_04_10_15_20C_30C,
+ ID_ALL_MASK,
+ "Adaptec 2902/04/10/15/20C/30C SCSI adapter",
+ ahc_aic785X_setup
+ },
+ /* aic7860 based controllers */
+ {
+ ID_AHA_2930CU,
+ ID_ALL_MASK,
+ "Adaptec 2930CU SCSI adapter",
+ ahc_aic7860_setup
+ },
+ {
+ ID_AHA_1480A & ID_DEV_VENDOR_MASK,
+ ID_DEV_VENDOR_MASK,
+ "Adaptec 1480A Ultra SCSI adapter",
+ ahc_apa1480_setup
+ },
+ {
+ ID_AHA_2940AU_0 & ID_DEV_VENDOR_MASK,
+ ID_DEV_VENDOR_MASK,
+ "Adaptec 2940A Ultra SCSI adapter",
+ ahc_aic7860_setup
+ },
+ {
+ ID_AHA_2940AU_CN & ID_DEV_VENDOR_MASK,
+ ID_DEV_VENDOR_MASK,
+ "Adaptec 2940A/CN Ultra SCSI adapter",
+ ahc_aic7860_setup
+ },
+ {
+ ID_AHA_2930C_VAR & ID_DEV_VENDOR_MASK,
+ ID_DEV_VENDOR_MASK,
+ "Adaptec 2930C Ultra SCSI adapter (VAR)",
+ ahc_aic7860_setup
+ },
+ /* aic7870 based controllers */
+ {
+ ID_AHA_2940,
+ ID_ALL_MASK,
+ "Adaptec 2940 SCSI adapter",
+ ahc_aic7870_setup
+ },
+ {
+ ID_AHA_3940,
+ ID_ALL_MASK,
+ "Adaptec 3940 SCSI adapter",
+ ahc_aha394X_setup
+ },
+ {
+ ID_AHA_398X,
+ ID_ALL_MASK,
+ "Adaptec 398X SCSI RAID adapter",
+ ahc_aha398X_setup
+ },
+ {
+ ID_AHA_2944,
+ ID_ALL_MASK,
+ "Adaptec 2944 SCSI adapter",
+ ahc_aic7870h_setup
+ },
+ {
+ ID_AHA_3944,
+ ID_ALL_MASK,
+ "Adaptec 3944 SCSI adapter",
+ ahc_aha394Xh_setup
+ },
+ {
+ ID_AHA_4944,
+ ID_ALL_MASK,
+ "Adaptec 4944 SCSI adapter",
+ ahc_aha494Xh_setup
+ },
+ /* aic7880 based controllers */
+ {
+ ID_AHA_2940U & ID_DEV_VENDOR_MASK,
+ ID_DEV_VENDOR_MASK,
+ "Adaptec 2940 Ultra SCSI adapter",
+ ahc_aic7880_setup
+ },
+ {
+ ID_AHA_3940U & ID_DEV_VENDOR_MASK,
+ ID_DEV_VENDOR_MASK,
+ "Adaptec 3940 Ultra SCSI adapter",
+ ahc_aha394XU_setup
+ },
+ {
+ ID_AHA_2944U & ID_DEV_VENDOR_MASK,
+ ID_DEV_VENDOR_MASK,
+ "Adaptec 2944 Ultra SCSI adapter",
+ ahc_aic7880h_setup
+ },
+ {
+ ID_AHA_3944U & ID_DEV_VENDOR_MASK,
+ ID_DEV_VENDOR_MASK,
+ "Adaptec 3944 Ultra SCSI adapter",
+ ahc_aha394XUh_setup
+ },
+ {
+ ID_AHA_398XU & ID_DEV_VENDOR_MASK,
+ ID_DEV_VENDOR_MASK,
+ "Adaptec 398X Ultra SCSI RAID adapter",
+ ahc_aha398XU_setup
+ },
+ {
+ /*
+ * XXX Don't know the slot numbers
+ * so we can't identify channels
+ */
+ ID_AHA_4944U & ID_DEV_VENDOR_MASK,
+ ID_DEV_VENDOR_MASK,
+ "Adaptec 4944 Ultra SCSI adapter",
+ ahc_aic7880h_setup
+ },
+ {
+ ID_AHA_2930U & ID_DEV_VENDOR_MASK,
+ ID_DEV_VENDOR_MASK,
+ "Adaptec 2930 Ultra SCSI adapter",
+ ahc_aic7880_setup
+ },
+ {
+ ID_AHA_2940U_PRO & ID_DEV_VENDOR_MASK,
+ ID_DEV_VENDOR_MASK,
+ "Adaptec 2940 Pro Ultra SCSI adapter",
+ ahc_aha2940Pro_setup
+ },
+ {
+ ID_AHA_2940U_CN & ID_DEV_VENDOR_MASK,
+ ID_DEV_VENDOR_MASK,
+ "Adaptec 2940/CN Ultra SCSI adapter",
+ ahc_aic7880_setup
+ },
+ /* Ignore all SISL (AAC on MB) based controllers. */
+ {
+ ID_9005_SISL_ID,
+ ID_9005_SISL_MASK,
+ NULL,
+ NULL
+ },
+ /* aic7890 based controllers */
+ {
+ ID_AHA_2930U2,
+ ID_ALL_MASK,
+ "Adaptec 2930 Ultra2 SCSI adapter",
+ ahc_aic7890_setup
+ },
+ {
+ ID_AHA_2940U2B,
+ ID_ALL_MASK,
+ "Adaptec 2940B Ultra2 SCSI adapter",
+ ahc_aic7890_setup
+ },
+ {
+ ID_AHA_2940U2_OEM,
+ ID_ALL_MASK,
+ "Adaptec 2940 Ultra2 SCSI adapter (OEM)",
+ ahc_aic7890_setup
+ },
+ {
+ ID_AHA_2940U2,
+ ID_ALL_MASK,
+ "Adaptec 2940 Ultra2 SCSI adapter",
+ ahc_aic7890_setup
+ },
+ {
+ ID_AHA_2950U2B,
+ ID_ALL_MASK,
+ "Adaptec 2950 Ultra2 SCSI adapter",
+ ahc_aic7890_setup
+ },
+ {
+ ID_AIC7890_ARO,
+ ID_ALL_MASK,
+ "Adaptec aic7890/91 Ultra2 SCSI adapter (ARO)",
+ ahc_aic7890_setup
+ },
+ {
+ ID_AAA_131U2,
+ ID_ALL_MASK,
+ "Adaptec AAA-131 Ultra2 RAID adapter",
+ ahc_aic7890_setup
+ },
+ /* aic7892 based controllers */
+ {
+ ID_AHA_29160,
+ ID_ALL_MASK,
+ "Adaptec 29160 Ultra160 SCSI adapter",
+ ahc_aic7892_setup
+ },
+ {
+ ID_AHA_29160_CPQ,
+ ID_ALL_MASK,
+ "Adaptec (Compaq OEM) 29160 Ultra160 SCSI adapter",
+ ahc_aic7892_setup
+ },
+ {
+ ID_AHA_29160N,
+ ID_ALL_MASK,
+ "Adaptec 29160N Ultra160 SCSI adapter",
+ ahc_aic7892_setup
+ },
+ {
+ ID_AHA_29160C,
+ ID_ALL_MASK,
+ "Adaptec 29160C Ultra160 SCSI adapter",
+ ahc_aha29160C_setup
+ },
+ {
+ ID_AHA_29160B,
+ ID_ALL_MASK,
+ "Adaptec 29160B Ultra160 SCSI adapter",
+ ahc_aic7892_setup
+ },
+ {
+ ID_AHA_19160B,
+ ID_ALL_MASK,
+ "Adaptec 19160B Ultra160 SCSI adapter",
+ ahc_aic7892_setup
+ },
+ {
+ ID_AIC7892_ARO,
+ ID_ALL_MASK,
+ "Adaptec aic7892 Ultra160 SCSI adapter (ARO)",
+ ahc_aic7892_setup
+ },
+ {
+ ID_AHA_2915_30LP,
+ ID_ALL_MASK,
+ "Adaptec 2915/30LP Ultra160 SCSI adapter",
+ ahc_aic7892_setup
+ },
+ /* aic7895 based controllers */
+ {
+ ID_AHA_2940U_DUAL,
+ ID_ALL_MASK,
+ "Adaptec 2940/DUAL Ultra SCSI adapter",
+ ahc_aic7895_setup
+ },
+ {
+ ID_AHA_3940AU,
+ ID_ALL_MASK,
+ "Adaptec 3940A Ultra SCSI adapter",
+ ahc_aic7895_setup
+ },
+ {
+ ID_AHA_3944AU,
+ ID_ALL_MASK,
+ "Adaptec 3944A Ultra SCSI adapter",
+ ahc_aic7895h_setup
+ },
+ {
+ ID_AIC7895_ARO,
+ ID_AIC7895_ARO_MASK,
+ "Adaptec aic7895 Ultra SCSI adapter (ARO)",
+ ahc_aic7895_setup
+ },
+ /* aic7896/97 based controllers */
+ {
+ ID_AHA_3950U2B_0,
+ ID_ALL_MASK,
+ "Adaptec 3950B Ultra2 SCSI adapter",
+ ahc_aic7896_setup
+ },
+ {
+ ID_AHA_3950U2B_1,
+ ID_ALL_MASK,
+ "Adaptec 3950B Ultra2 SCSI adapter",
+ ahc_aic7896_setup
+ },
+ {
+ ID_AHA_3950U2D_0,
+ ID_ALL_MASK,
+ "Adaptec 3950D Ultra2 SCSI adapter",
+ ahc_aic7896_setup
+ },
+ {
+ ID_AHA_3950U2D_1,
+ ID_ALL_MASK,
+ "Adaptec 3950D Ultra2 SCSI adapter",
+ ahc_aic7896_setup
+ },
+ {
+ ID_AIC7896_ARO,
+ ID_ALL_MASK,
+ "Adaptec aic7896/97 Ultra2 SCSI adapter (ARO)",
+ ahc_aic7896_setup
+ },
+ /* aic7899 based controllers */
+ {
+ ID_AHA_3960D,
+ ID_ALL_MASK,
+ "Adaptec 3960D Ultra160 SCSI adapter",
+ ahc_aic7899_setup
+ },
+ {
+ ID_AHA_3960D_CPQ,
+ ID_ALL_MASK,
+ "Adaptec (Compaq OEM) 3960D Ultra160 SCSI adapter",
+ ahc_aic7899_setup
+ },
+ {
+ ID_AIC7899_ARO,
+ ID_ALL_MASK,
+ "Adaptec aic7899 Ultra160 SCSI adapter (ARO)",
+ ahc_aic7899_setup
+ },
+ /* Generic chip probes for devices we don't know 'exactly' */
+ {
+ ID_AIC7850 & ID_DEV_VENDOR_MASK,
+ ID_DEV_VENDOR_MASK,
+ "Adaptec aic7850 SCSI adapter",
+ ahc_aic785X_setup
+ },
+ {
+ ID_AIC7855 & ID_DEV_VENDOR_MASK,
+ ID_DEV_VENDOR_MASK,
+ "Adaptec aic7855 SCSI adapter",
+ ahc_aic785X_setup
+ },
+ {
+ ID_AIC7859 & ID_DEV_VENDOR_MASK,
+ ID_DEV_VENDOR_MASK,
+ "Adaptec aic7859 SCSI adapter",
+ ahc_aic7860_setup
+ },
+ {
+ ID_AIC7860 & ID_DEV_VENDOR_MASK,
+ ID_DEV_VENDOR_MASK,
+ "Adaptec aic7860 Ultra SCSI adapter",
+ ahc_aic7860_setup
+ },
+ {
+ ID_AIC7870 & ID_DEV_VENDOR_MASK,
+ ID_DEV_VENDOR_MASK,
+ "Adaptec aic7870 SCSI adapter",
+ ahc_aic7870_setup
+ },
+ {
+ ID_AIC7880 & ID_DEV_VENDOR_MASK,
+ ID_DEV_VENDOR_MASK,
+ "Adaptec aic7880 Ultra SCSI adapter",
+ ahc_aic7880_setup
+ },
+ {
+ ID_AIC7890 & ID_9005_GENERIC_MASK,
+ ID_9005_GENERIC_MASK,
+ "Adaptec aic7890/91 Ultra2 SCSI adapter",
+ ahc_aic7890_setup
+ },
+ {
+ ID_AIC7892 & ID_9005_GENERIC_MASK,
+ ID_9005_GENERIC_MASK,
+ "Adaptec aic7892 Ultra160 SCSI adapter",
+ ahc_aic7892_setup
+ },
+ {
+ ID_AIC7895 & ID_DEV_VENDOR_MASK,
+ ID_DEV_VENDOR_MASK,
+ "Adaptec aic7895 Ultra SCSI adapter",
+ ahc_aic7895_setup
+ },
+ {
+ ID_AIC7896 & ID_9005_GENERIC_MASK,
+ ID_9005_GENERIC_MASK,
+ "Adaptec aic7896/97 Ultra2 SCSI adapter",
+ ahc_aic7896_setup
+ },
+ {
+ ID_AIC7899 & ID_9005_GENERIC_MASK,
+ ID_9005_GENERIC_MASK,
+ "Adaptec aic7899 Ultra160 SCSI adapter",
+ ahc_aic7899_setup
+ },
+ {
+ ID_AIC7810 & ID_DEV_VENDOR_MASK,
+ ID_DEV_VENDOR_MASK,
+ "Adaptec aic7810 RAID memory controller",
+ ahc_raid_setup
+ },
+ {
+ ID_AIC7815 & ID_DEV_VENDOR_MASK,
+ ID_DEV_VENDOR_MASK,
+ "Adaptec aic7815 RAID memory controller",
+ ahc_raid_setup
+ }
+};
+
+static const u_int ahc_num_pci_devs = ARRAY_SIZE(ahc_pci_ident_table);
+
+#define AHC_394X_SLOT_CHANNEL_A 4
+#define AHC_394X_SLOT_CHANNEL_B 5
+
+#define AHC_398X_SLOT_CHANNEL_A 4
+#define AHC_398X_SLOT_CHANNEL_B 8
+#define AHC_398X_SLOT_CHANNEL_C 12
+
+#define AHC_494X_SLOT_CHANNEL_A 4
+#define AHC_494X_SLOT_CHANNEL_B 5
+#define AHC_494X_SLOT_CHANNEL_C 6
+#define AHC_494X_SLOT_CHANNEL_D 7
+
+#define DEVCONFIG 0x40
+#define PCIERRGENDIS 0x80000000ul
+#define SCBSIZE32 0x00010000ul /* aic789X only */
+#define REXTVALID 0x00001000ul /* ultra cards only */
+#define MPORTMODE 0x00000400ul /* aic7870+ only */
+#define RAMPSM 0x00000200ul /* aic7870+ only */
+#define VOLSENSE 0x00000100ul
+#define PCI64BIT 0x00000080ul /* 64Bit PCI bus (Ultra2 Only)*/
+#define SCBRAMSEL 0x00000080ul
+#define MRDCEN 0x00000040ul
+#define EXTSCBTIME 0x00000020ul /* aic7870 only */
+#define EXTSCBPEN 0x00000010ul /* aic7870 only */
+#define BERREN 0x00000008ul
+#define DACEN 0x00000004ul
+#define STPWLEVEL 0x00000002ul
+#define DIFACTNEGEN 0x00000001ul /* aic7870 only */
+
+#define CSIZE_LATTIME 0x0c
+#define CACHESIZE 0x0000003ful /* only 5 bits */
+#define LATTIME 0x0000ff00ul
+
+/* PCI STATUS definitions */
+#define DPE 0x80
+#define SSE 0x40
+#define RMA 0x20
+#define RTA 0x10
+#define STA 0x08
+#define DPR 0x01
+
+static int ahc_9005_subdevinfo_valid(uint16_t vendor, uint16_t device,
+ uint16_t subvendor, uint16_t subdevice);
+static int ahc_ext_scbram_present(struct ahc_softc *ahc);
+static void ahc_scbram_config(struct ahc_softc *ahc, int enable,
+ int pcheck, int fast, int large);
+static void ahc_probe_ext_scbram(struct ahc_softc *ahc);
+static void check_extport(struct ahc_softc *ahc, u_int *sxfrctl1);
+static void ahc_parse_pci_eeprom(struct ahc_softc *ahc,
+ struct seeprom_config *sc);
+static void configure_termination(struct ahc_softc *ahc,
+ struct seeprom_descriptor *sd,
+ u_int adapter_control,
+ u_int *sxfrctl1);
+
+static void ahc_new_term_detect(struct ahc_softc *ahc,
+ int *enableSEC_low,
+ int *enableSEC_high,
+ int *enablePRI_low,
+ int *enablePRI_high,
+ int *eeprom_present);
+static void aic787X_cable_detect(struct ahc_softc *ahc, int *internal50_present,
+ int *internal68_present,
+ int *externalcable_present,
+ int *eeprom_present);
+static void aic785X_cable_detect(struct ahc_softc *ahc, int *internal50_present,
+ int *externalcable_present,
+ int *eeprom_present);
+static void write_brdctl(struct ahc_softc *ahc, uint8_t value);
+static uint8_t read_brdctl(struct ahc_softc *ahc);
+static void ahc_pci_intr(struct ahc_softc *ahc);
+static int ahc_pci_chip_init(struct ahc_softc *ahc);
+
+static int
+ahc_9005_subdevinfo_valid(uint16_t device, uint16_t vendor,
+ uint16_t subdevice, uint16_t subvendor)
+{
+ int result;
+
+ /* Default to invalid. */
+ result = 0;
+ if (vendor == 0x9005
+ && subvendor == 0x9005
+ && subdevice != device
+ && SUBID_9005_TYPE_KNOWN(subdevice) != 0) {
+
+ switch (SUBID_9005_TYPE(subdevice)) {
+ case SUBID_9005_TYPE_MB:
+ break;
+ case SUBID_9005_TYPE_CARD:
+ case SUBID_9005_TYPE_LCCARD:
+ /*
+ * Currently only trust Adaptec cards to
+ * get the sub device info correct.
+ */
+ if (DEVID_9005_TYPE(device) == DEVID_9005_TYPE_HBA)
+ result = 1;
+ break;
+ case SUBID_9005_TYPE_RAID:
+ break;
+ default:
+ break;
+ }
+ }
+ return (result);
+}
+
+const struct ahc_pci_identity *
+ahc_find_pci_device(ahc_dev_softc_t pci)
+{
+ uint64_t full_id;
+ uint16_t device;
+ uint16_t vendor;
+ uint16_t subdevice;
+ uint16_t subvendor;
+ const struct ahc_pci_identity *entry;
+ u_int i;
+
+ vendor = ahc_pci_read_config(pci, PCIR_DEVVENDOR, /*bytes*/2);
+ device = ahc_pci_read_config(pci, PCIR_DEVICE, /*bytes*/2);
+ subvendor = ahc_pci_read_config(pci, PCIR_SUBVEND_0, /*bytes*/2);
+ subdevice = ahc_pci_read_config(pci, PCIR_SUBDEV_0, /*bytes*/2);
+ full_id = ahc_compose_id(device, vendor, subdevice, subvendor);
+
+ /*
+ * If the second function is not hooked up, ignore it.
+ * Unfortunately, not all MB vendors implement the
+ * subdevice ID as per the Adaptec spec, so do our best
+ * to sanity check it prior to accepting the subdevice
+ * ID as valid.
+ */
+ if (ahc_get_pci_function(pci) > 0
+ && ahc_9005_subdevinfo_valid(device, vendor, subdevice, subvendor)
+ && SUBID_9005_MFUNCENB(subdevice) == 0)
+ return (NULL);
+
+ for (i = 0; i < ahc_num_pci_devs; i++) {
+ entry = &ahc_pci_ident_table[i];
+ if (entry->full_id == (full_id & entry->id_mask)) {
+ /* Honor exclusion entries. */
+ if (entry->name == NULL)
+ return (NULL);
+ return (entry);
+ }
+ }
+ return (NULL);
+}
+
+int
+ahc_pci_config(struct ahc_softc *ahc, const struct ahc_pci_identity *entry)
+{
+ u_int command;
+ u_int our_id;
+ u_int sxfrctl1;
+ u_int scsiseq;
+ u_int dscommand0;
+ uint32_t devconfig;
+ int error;
+ uint8_t sblkctl;
+
+ our_id = 0;
+ error = entry->setup(ahc);
+ if (error != 0)
+ return (error);
+ ahc->chip |= AHC_PCI;
+ ahc->description = entry->name;
+
+ pci_set_power_state(ahc->dev_softc, AHC_POWER_STATE_D0);
+
+ error = ahc_pci_map_registers(ahc);
+ if (error != 0)
+ return (error);
+
+ /*
+ * Before we continue probing the card, ensure that
+ * its interrupts are *disabled*. We don't want
+ * a misstep to hang the machine in an interrupt
+ * storm.
+ */
+ ahc_intr_enable(ahc, FALSE);
+
+ devconfig = ahc_pci_read_config(ahc->dev_softc, DEVCONFIG, /*bytes*/4);
+
+ /*
+ * If we need to support high memory, enable dual
+ * address cycles. This bit must be set to enable
+ * high address bit generation even if we are on a
+ * 64bit bus (PCI64BIT set in devconfig).
+ */
+ if ((ahc->flags & AHC_39BIT_ADDRESSING) != 0) {
+
+ if (bootverbose)
+ printk("%s: Enabling 39Bit Addressing\n",
+ ahc_name(ahc));
+ devconfig |= DACEN;
+ }
+
+ /* Ensure that pci error generation, a test feature, is disabled. */
+ devconfig |= PCIERRGENDIS;
+
+ ahc_pci_write_config(ahc->dev_softc, DEVCONFIG, devconfig, /*bytes*/4);
+
+ /* Ensure busmastering is enabled */
+ command = ahc_pci_read_config(ahc->dev_softc, PCIR_COMMAND, /*bytes*/2);
+ command |= PCIM_CMD_BUSMASTEREN;
+
+ ahc_pci_write_config(ahc->dev_softc, PCIR_COMMAND, command, /*bytes*/2);
+
+ /* On all PCI adapters, we allow SCB paging */
+ ahc->flags |= AHC_PAGESCBS;
+
+ error = ahc_softc_init(ahc);
+ if (error != 0)
+ return (error);
+
+ /*
+ * Disable PCI parity error checking. Users typically
+ * do this to work around broken PCI chipsets that get
+ * the parity timing wrong and thus generate lots of spurious
+ * errors. The chip only allows us to disable *all* parity
+ * error reporting when doing this, so CIO bus, scb ram, and
+ * scratch ram parity errors will be ignored too.
+ */
+ if ((ahc->flags & AHC_DISABLE_PCI_PERR) != 0)
+ ahc->seqctl |= FAILDIS;
+
+ ahc->bus_intr = ahc_pci_intr;
+ ahc->bus_chip_init = ahc_pci_chip_init;
+
+ /* Remember how the card was setup in case there is no SEEPROM */
+ if ((ahc_inb(ahc, HCNTRL) & POWRDN) == 0) {
+ ahc_pause(ahc);
+ if ((ahc->features & AHC_ULTRA2) != 0)
+ our_id = ahc_inb(ahc, SCSIID_ULTRA2) & OID;
+ else
+ our_id = ahc_inb(ahc, SCSIID) & OID;
+ sxfrctl1 = ahc_inb(ahc, SXFRCTL1) & STPWEN;
+ scsiseq = ahc_inb(ahc, SCSISEQ);
+ } else {
+ sxfrctl1 = STPWEN;
+ our_id = 7;
+ scsiseq = 0;
+ }
+
+ error = ahc_reset(ahc, /*reinit*/FALSE);
+ if (error != 0)
+ return (ENXIO);
+
+ if ((ahc->features & AHC_DT) != 0) {
+ u_int sfunct;
+
+ /* Perform ALT-Mode Setup */
+ sfunct = ahc_inb(ahc, SFUNCT) & ~ALT_MODE;
+ ahc_outb(ahc, SFUNCT, sfunct | ALT_MODE);
+ ahc_outb(ahc, OPTIONMODE,
+ OPTIONMODE_DEFAULTS|AUTOACKEN|BUSFREEREV|EXPPHASEDIS);
+ ahc_outb(ahc, SFUNCT, sfunct);
+
+ /* Normal mode setup */
+ ahc_outb(ahc, CRCCONTROL1, CRCVALCHKEN|CRCENDCHKEN|CRCREQCHKEN
+ |TARGCRCENDEN);
+ }
+
+ dscommand0 = ahc_inb(ahc, DSCOMMAND0);
+ dscommand0 |= MPARCKEN|CACHETHEN;
+ if ((ahc->features & AHC_ULTRA2) != 0) {
+
+ /*
+ * DPARCKEN doesn't work correctly on
+ * some MBs so don't use it.
+ */
+ dscommand0 &= ~DPARCKEN;
+ }
+
+ /*
+ * Handle chips that must have cache line
+ * streaming (dis/en)abled.
+ */
+ if ((ahc->bugs & AHC_CACHETHEN_DIS_BUG) != 0)
+ dscommand0 |= CACHETHEN;
+
+ if ((ahc->bugs & AHC_CACHETHEN_BUG) != 0)
+ dscommand0 &= ~CACHETHEN;
+
+ ahc_outb(ahc, DSCOMMAND0, dscommand0);
+
+ ahc->pci_cachesize =
+ ahc_pci_read_config(ahc->dev_softc, CSIZE_LATTIME,
+ /*bytes*/1) & CACHESIZE;
+ ahc->pci_cachesize *= 4;
+
+ if ((ahc->bugs & AHC_PCI_2_1_RETRY_BUG) != 0
+ && ahc->pci_cachesize == 4) {
+
+ ahc_pci_write_config(ahc->dev_softc, CSIZE_LATTIME,
+ 0, /*bytes*/1);
+ ahc->pci_cachesize = 0;
+ }
+
+ /*
+ * We cannot perform ULTRA speeds without the presence
+ * of the external precision resistor.
+ */
+ if ((ahc->features & AHC_ULTRA) != 0) {
+ uint32_t devconfig;
+
+ devconfig = ahc_pci_read_config(ahc->dev_softc,
+ DEVCONFIG, /*bytes*/4);
+ if ((devconfig & REXTVALID) == 0)
+ ahc->features &= ~AHC_ULTRA;
+ }
+
+ /* See if we have a SEEPROM and perform auto-term */
+ check_extport(ahc, &sxfrctl1);
+
+ /*
+ * Take the LED out of diagnostic mode
+ */
+ sblkctl = ahc_inb(ahc, SBLKCTL);
+ ahc_outb(ahc, SBLKCTL, (sblkctl & ~(DIAGLEDEN|DIAGLEDON)));
+
+ if ((ahc->features & AHC_ULTRA2) != 0) {
+ ahc_outb(ahc, DFF_THRSH, RD_DFTHRSH_MAX|WR_DFTHRSH_MAX);
+ } else {
+ ahc_outb(ahc, DSPCISTATUS, DFTHRSH_100);
+ }
+
+ if (ahc->flags & AHC_USEDEFAULTS) {
+ /*
+ * PCI Adapter default setup
+ * Should only be used if the adapter does not have
+ * a SEEPROM.
+ */
+ /* See if someone else set us up already */
+ if ((ahc->flags & AHC_NO_BIOS_INIT) == 0
+ && scsiseq != 0) {
+ printk("%s: Using left over BIOS settings\n",
+ ahc_name(ahc));
+ ahc->flags &= ~AHC_USEDEFAULTS;
+ ahc->flags |= AHC_BIOS_ENABLED;
+ } else {
+ /*
+ * Assume only one connector and always turn
+ * on termination.
+ */
+ our_id = 0x07;
+ sxfrctl1 = STPWEN;
+ }
+ ahc_outb(ahc, SCSICONF, our_id|ENSPCHK|RESET_SCSI);
+
+ ahc->our_id = our_id;
+ }
+
+ /*
+ * Take a look to see if we have external SRAM.
+ * We currently do not attempt to use SRAM that is
+ * shared among multiple controllers.
+ */
+ ahc_probe_ext_scbram(ahc);
+
+ /*
+ * Record our termination setting for the
+ * generic initialization routine.
+ */
+ if ((sxfrctl1 & STPWEN) != 0)
+ ahc->flags |= AHC_TERM_ENB_A;
+
+ /*
+ * Save chip register configuration data for chip resets
+ * that occur during runtime and resume events.
+ */
+ ahc->bus_softc.pci_softc.devconfig =
+ ahc_pci_read_config(ahc->dev_softc, DEVCONFIG, /*bytes*/4);
+ ahc->bus_softc.pci_softc.command =
+ ahc_pci_read_config(ahc->dev_softc, PCIR_COMMAND, /*bytes*/1);
+ ahc->bus_softc.pci_softc.csize_lattime =
+ ahc_pci_read_config(ahc->dev_softc, CSIZE_LATTIME, /*bytes*/1);
+ ahc->bus_softc.pci_softc.dscommand0 = ahc_inb(ahc, DSCOMMAND0);
+ ahc->bus_softc.pci_softc.dspcistatus = ahc_inb(ahc, DSPCISTATUS);
+ if ((ahc->features & AHC_DT) != 0) {
+ u_int sfunct;
+
+ sfunct = ahc_inb(ahc, SFUNCT) & ~ALT_MODE;
+ ahc_outb(ahc, SFUNCT, sfunct | ALT_MODE);
+ ahc->bus_softc.pci_softc.optionmode = ahc_inb(ahc, OPTIONMODE);
+ ahc->bus_softc.pci_softc.targcrccnt = ahc_inw(ahc, TARGCRCCNT);
+ ahc_outb(ahc, SFUNCT, sfunct);
+ ahc->bus_softc.pci_softc.crccontrol1 =
+ ahc_inb(ahc, CRCCONTROL1);
+ }
+ if ((ahc->features & AHC_MULTI_FUNC) != 0)
+ ahc->bus_softc.pci_softc.scbbaddr = ahc_inb(ahc, SCBBADDR);
+
+ if ((ahc->features & AHC_ULTRA2) != 0)
+ ahc->bus_softc.pci_softc.dff_thrsh = ahc_inb(ahc, DFF_THRSH);
+
+ /* Core initialization */
+ error = ahc_init(ahc);
+ if (error != 0)
+ return (error);
+ ahc->init_level++;
+
+ /*
+ * Allow interrupts now that we are completely setup.
+ */
+ return ahc_pci_map_int(ahc);
+}
+
+/*
+ * Test for the presence of external sram in an
+ * "unshared" configuration.
+ */
+static int
+ahc_ext_scbram_present(struct ahc_softc *ahc)
+{
+ u_int chip;
+ int ramps;
+ int single_user;
+ uint32_t devconfig;
+
+ chip = ahc->chip & AHC_CHIPID_MASK;
+ devconfig = ahc_pci_read_config(ahc->dev_softc,
+ DEVCONFIG, /*bytes*/4);
+ single_user = (devconfig & MPORTMODE) != 0;
+
+ if ((ahc->features & AHC_ULTRA2) != 0)
+ ramps = (ahc_inb(ahc, DSCOMMAND0) & RAMPS) != 0;
+ else if (chip == AHC_AIC7895 || chip == AHC_AIC7895C)
+ /*
+ * External SCBRAM arbitration is flakey
+ * on these chips. Unfortunately this means
+ * we don't use the extra SCB ram space on the
+ * 3940AUW.
+ */
+ ramps = 0;
+ else if (chip >= AHC_AIC7870)
+ ramps = (devconfig & RAMPSM) != 0;
+ else
+ ramps = 0;
+
+ if (ramps && single_user)
+ return (1);
+ return (0);
+}
+
+/*
+ * Enable external scbram.
+ */
+static void
+ahc_scbram_config(struct ahc_softc *ahc, int enable, int pcheck,
+ int fast, int large)
+{
+ uint32_t devconfig;
+
+ if (ahc->features & AHC_MULTI_FUNC) {
+ /*
+ * Set the SCB Base addr (highest address bit)
+ * depending on which channel we are.
+ */
+ ahc_outb(ahc, SCBBADDR, ahc_get_pci_function(ahc->dev_softc));
+ }
+
+ ahc->flags &= ~AHC_LSCBS_ENABLED;
+ if (large)
+ ahc->flags |= AHC_LSCBS_ENABLED;
+ devconfig = ahc_pci_read_config(ahc->dev_softc, DEVCONFIG, /*bytes*/4);
+ if ((ahc->features & AHC_ULTRA2) != 0) {
+ u_int dscommand0;
+
+ dscommand0 = ahc_inb(ahc, DSCOMMAND0);
+ if (enable)
+ dscommand0 &= ~INTSCBRAMSEL;
+ else
+ dscommand0 |= INTSCBRAMSEL;
+ if (large)
+ dscommand0 &= ~USCBSIZE32;
+ else
+ dscommand0 |= USCBSIZE32;
+ ahc_outb(ahc, DSCOMMAND0, dscommand0);
+ } else {
+ if (fast)
+ devconfig &= ~EXTSCBTIME;
+ else
+ devconfig |= EXTSCBTIME;
+ if (enable)
+ devconfig &= ~SCBRAMSEL;
+ else
+ devconfig |= SCBRAMSEL;
+ if (large)
+ devconfig &= ~SCBSIZE32;
+ else
+ devconfig |= SCBSIZE32;
+ }
+ if (pcheck)
+ devconfig |= EXTSCBPEN;
+ else
+ devconfig &= ~EXTSCBPEN;
+
+ ahc_pci_write_config(ahc->dev_softc, DEVCONFIG, devconfig, /*bytes*/4);
+}
+
+/*
+ * Take a look to see if we have external SRAM.
+ * We currently do not attempt to use SRAM that is
+ * shared among multiple controllers.
+ */
+static void
+ahc_probe_ext_scbram(struct ahc_softc *ahc)
+{
+ int num_scbs;
+ int test_num_scbs;
+ int enable;
+ int pcheck;
+ int fast;
+ int large;
+
+ enable = FALSE;
+ pcheck = FALSE;
+ fast = FALSE;
+ large = FALSE;
+ num_scbs = 0;
+
+ if (ahc_ext_scbram_present(ahc) == 0)
+ goto done;
+
+ /*
+ * Probe for the best parameters to use.
+ */
+ ahc_scbram_config(ahc, /*enable*/TRUE, pcheck, fast, large);
+ num_scbs = ahc_probe_scbs(ahc);
+ if (num_scbs == 0) {
+ /* The SRAM wasn't really present. */
+ goto done;
+ }
+ enable = TRUE;
+
+ /*
+ * Clear any outstanding parity error
+ * and ensure that parity error reporting
+ * is enabled.
+ */
+ ahc_outb(ahc, SEQCTL, 0);
+ ahc_outb(ahc, CLRINT, CLRPARERR);
+ ahc_outb(ahc, CLRINT, CLRBRKADRINT);
+
+ /* Now see if we can do parity */
+ ahc_scbram_config(ahc, enable, /*pcheck*/TRUE, fast, large);
+ num_scbs = ahc_probe_scbs(ahc);
+ if ((ahc_inb(ahc, INTSTAT) & BRKADRINT) == 0
+ || (ahc_inb(ahc, ERROR) & MPARERR) == 0)
+ pcheck = TRUE;
+
+ /* Clear any resulting parity error */
+ ahc_outb(ahc, CLRINT, CLRPARERR);
+ ahc_outb(ahc, CLRINT, CLRBRKADRINT);
+
+ /* Now see if we can do fast timing */
+ ahc_scbram_config(ahc, enable, pcheck, /*fast*/TRUE, large);
+ test_num_scbs = ahc_probe_scbs(ahc);
+ if (test_num_scbs == num_scbs
+ && ((ahc_inb(ahc, INTSTAT) & BRKADRINT) == 0
+ || (ahc_inb(ahc, ERROR) & MPARERR) == 0))
+ fast = TRUE;
+
+ /*
+ * See if we can use large SCBs and still maintain
+ * the same overall count of SCBs.
+ */
+ if ((ahc->features & AHC_LARGE_SCBS) != 0) {
+ ahc_scbram_config(ahc, enable, pcheck, fast, /*large*/TRUE);
+ test_num_scbs = ahc_probe_scbs(ahc);
+ if (test_num_scbs >= num_scbs) {
+ large = TRUE;
+ num_scbs = test_num_scbs;
+ if (num_scbs >= 64) {
+ /*
+ * We have enough space to move the
+ * "busy targets table" into SCB space
+ * and make it qualify all the way to the
+ * lun level.
+ */
+ ahc->flags |= AHC_SCB_BTT;
+ }
+ }
+ }
+done:
+ /*
+ * Disable parity error reporting until we
+ * can load instruction ram.
+ */
+ ahc_outb(ahc, SEQCTL, PERRORDIS|FAILDIS);
+ /* Clear any latched parity error */
+ ahc_outb(ahc, CLRINT, CLRPARERR);
+ ahc_outb(ahc, CLRINT, CLRBRKADRINT);
+ if (bootverbose && enable) {
+ printk("%s: External SRAM, %s access%s, %dbytes/SCB\n",
+ ahc_name(ahc), fast ? "fast" : "slow",
+ pcheck ? ", parity checking enabled" : "",
+ large ? 64 : 32);
+ }
+ ahc_scbram_config(ahc, enable, pcheck, fast, large);
+}
+
+/*
+ * Perform some simple tests that should catch situations where
+ * our registers are invalidly mapped.
+ */
+int
+ahc_pci_test_register_access(struct ahc_softc *ahc)
+{
+ int error;
+ u_int status1;
+ uint32_t cmd;
+ uint8_t hcntrl;
+
+ error = EIO;
+
+ /*
+ * Enable PCI error interrupt status, but suppress NMIs
+ * generated by SERR raised due to target aborts.
+ */
+ cmd = ahc_pci_read_config(ahc->dev_softc, PCIR_COMMAND, /*bytes*/2);
+ ahc_pci_write_config(ahc->dev_softc, PCIR_COMMAND,
+ cmd & ~PCIM_CMD_SERRESPEN, /*bytes*/2);
+
+ /*
+ * First a simple test to see if any
+ * registers can be read. Reading
+ * HCNTRL has no side effects and has
+ * at least one bit that is guaranteed to
+ * be zero so it is a good register to
+ * use for this test.
+ */
+ hcntrl = ahc_inb(ahc, HCNTRL);
+
+ if (hcntrl == 0xFF)
+ goto fail;
+
+ if ((hcntrl & CHIPRST) != 0) {
+ /*
+ * The chip has not been initialized since
+ * PCI/EISA/VLB bus reset. Don't trust
+ * "left over BIOS data".
+ */
+ ahc->flags |= AHC_NO_BIOS_INIT;
+ }
+
+ /*
+ * Next create a situation where write combining
+ * or read prefetching could be initiated by the
+ * CPU or host bridge. Our device does not support
+ * either, so look for data corruption and/or flagged
+ * PCI errors. First pause without causing another
+ * chip reset.
+ */
+ hcntrl &= ~CHIPRST;
+ ahc_outb(ahc, HCNTRL, hcntrl|PAUSE);
+ while (ahc_is_paused(ahc) == 0)
+ ;
+
+ /* Clear any PCI errors that occurred before our driver attached. */
+ status1 = ahc_pci_read_config(ahc->dev_softc,
+ PCIR_STATUS + 1, /*bytes*/1);
+ ahc_pci_write_config(ahc->dev_softc, PCIR_STATUS + 1,
+ status1, /*bytes*/1);
+ ahc_outb(ahc, CLRINT, CLRPARERR);
+
+ ahc_outb(ahc, SEQCTL, PERRORDIS);
+ ahc_outb(ahc, SCBPTR, 0);
+ ahc_outl(ahc, SCB_BASE, 0x5aa555aa);
+ if (ahc_inl(ahc, SCB_BASE) != 0x5aa555aa)
+ goto fail;
+
+ status1 = ahc_pci_read_config(ahc->dev_softc,
+ PCIR_STATUS + 1, /*bytes*/1);
+ if ((status1 & STA) != 0)
+ goto fail;
+
+ error = 0;
+
+fail:
+ /* Silently clear any latched errors. */
+ status1 = ahc_pci_read_config(ahc->dev_softc,
+ PCIR_STATUS + 1, /*bytes*/1);
+ ahc_pci_write_config(ahc->dev_softc, PCIR_STATUS + 1,
+ status1, /*bytes*/1);
+ ahc_outb(ahc, CLRINT, CLRPARERR);
+ ahc_outb(ahc, SEQCTL, PERRORDIS|FAILDIS);
+ ahc_pci_write_config(ahc->dev_softc, PCIR_COMMAND, cmd, /*bytes*/2);
+ return (error);
+}
+
+/*
+ * Check the external port logic for a serial eeprom
+ * and termination/cable detection contrls.
+ */
+static void
+check_extport(struct ahc_softc *ahc, u_int *sxfrctl1)
+{
+ struct seeprom_descriptor sd;
+ struct seeprom_config *sc;
+ int have_seeprom;
+ int have_autoterm;
+
+ sd.sd_ahc = ahc;
+ sd.sd_control_offset = SEECTL;
+ sd.sd_status_offset = SEECTL;
+ sd.sd_dataout_offset = SEECTL;
+ sc = ahc->seep_config;
+
+ /*
+ * For some multi-channel devices, the c46 is simply too
+ * small to work. For the other controller types, we can
+ * get our information from either SEEPROM type. Set the
+ * type to start our probe with accordingly.
+ */
+ if (ahc->flags & AHC_LARGE_SEEPROM)
+ sd.sd_chip = C56_66;
+ else
+ sd.sd_chip = C46;
+
+ sd.sd_MS = SEEMS;
+ sd.sd_RDY = SEERDY;
+ sd.sd_CS = SEECS;
+ sd.sd_CK = SEECK;
+ sd.sd_DO = SEEDO;
+ sd.sd_DI = SEEDI;
+
+ have_seeprom = ahc_acquire_seeprom(ahc, &sd);
+ if (have_seeprom) {
+
+ if (bootverbose)
+ printk("%s: Reading SEEPROM...", ahc_name(ahc));
+
+ for (;;) {
+ u_int start_addr;
+
+ start_addr = 32 * (ahc->channel - 'A');
+
+ have_seeprom = ahc_read_seeprom(&sd, (uint16_t *)sc,
+ start_addr,
+ sizeof(*sc)/2);
+
+ if (have_seeprom)
+ have_seeprom = ahc_verify_cksum(sc);
+
+ if (have_seeprom != 0 || sd.sd_chip == C56_66) {
+ if (bootverbose) {
+ if (have_seeprom == 0)
+ printk ("checksum error\n");
+ else
+ printk ("done.\n");
+ }
+ break;
+ }
+ sd.sd_chip = C56_66;
+ }
+ ahc_release_seeprom(&sd);
+
+ /* Remember the SEEPROM type for later */
+ if (sd.sd_chip == C56_66)
+ ahc->flags |= AHC_LARGE_SEEPROM;
+ }
+
+ if (!have_seeprom) {
+ /*
+ * Pull scratch ram settings and treat them as
+ * if they are the contents of an seeprom if
+ * the 'ADPT' signature is found in SCB2.
+ * We manually compose the data as 16bit values
+ * to avoid endian issues.
+ */
+ ahc_outb(ahc, SCBPTR, 2);
+ if (ahc_inb(ahc, SCB_BASE) == 'A'
+ && ahc_inb(ahc, SCB_BASE + 1) == 'D'
+ && ahc_inb(ahc, SCB_BASE + 2) == 'P'
+ && ahc_inb(ahc, SCB_BASE + 3) == 'T') {
+ uint16_t *sc_data;
+ int i;
+
+ sc_data = (uint16_t *)sc;
+ for (i = 0; i < 32; i++, sc_data++) {
+ int j;
+
+ j = i * 2;
+ *sc_data = ahc_inb(ahc, SRAM_BASE + j)
+ | ahc_inb(ahc, SRAM_BASE + j + 1) << 8;
+ }
+ have_seeprom = ahc_verify_cksum(sc);
+ if (have_seeprom)
+ ahc->flags |= AHC_SCB_CONFIG_USED;
+ }
+ /*
+ * Clear any SCB parity errors in case this data and
+ * its associated parity was not initialized by the BIOS
+ */
+ ahc_outb(ahc, CLRINT, CLRPARERR);
+ ahc_outb(ahc, CLRINT, CLRBRKADRINT);
+ }
+
+ if (!have_seeprom) {
+ if (bootverbose)
+ printk("%s: No SEEPROM available.\n", ahc_name(ahc));
+ ahc->flags |= AHC_USEDEFAULTS;
+ kfree(ahc->seep_config);
+ ahc->seep_config = NULL;
+ sc = NULL;
+ } else {
+ ahc_parse_pci_eeprom(ahc, sc);
+ }
+
+ /*
+ * Cards that have the external logic necessary to talk to
+ * a SEEPROM, are almost certain to have the remaining logic
+ * necessary for auto-termination control. This assumption
+ * hasn't failed yet...
+ */
+ have_autoterm = have_seeprom;
+
+ /*
+ * Some low-cost chips have SEEPROM and auto-term control built
+ * in, instead of using a GAL. They can tell us directly
+ * if the termination logic is enabled.
+ */
+ if ((ahc->features & AHC_SPIOCAP) != 0) {
+ if ((ahc_inb(ahc, SPIOCAP) & SSPIOCPS) == 0)
+ have_autoterm = FALSE;
+ }
+
+ if (have_autoterm) {
+ ahc->flags |= AHC_HAS_TERM_LOGIC;
+ ahc_acquire_seeprom(ahc, &sd);
+ configure_termination(ahc, &sd, sc->adapter_control, sxfrctl1);
+ ahc_release_seeprom(&sd);
+ } else if (have_seeprom) {
+ *sxfrctl1 &= ~STPWEN;
+ if ((sc->adapter_control & CFSTERM) != 0)
+ *sxfrctl1 |= STPWEN;
+ if (bootverbose)
+ printk("%s: Low byte termination %sabled\n",
+ ahc_name(ahc),
+ (*sxfrctl1 & STPWEN) ? "en" : "dis");
+ }
+}
+
+static void
+ahc_parse_pci_eeprom(struct ahc_softc *ahc, struct seeprom_config *sc)
+{
+ /*
+ * Put the data we've collected down into SRAM
+ * where ahc_init will find it.
+ */
+ int i;
+ int max_targ = sc->max_targets & CFMAXTARG;
+ u_int scsi_conf;
+ uint16_t discenable;
+ uint16_t ultraenb;
+
+ discenable = 0;
+ ultraenb = 0;
+ if ((sc->adapter_control & CFULTRAEN) != 0) {
+ /*
+ * Determine if this adapter has a "newstyle"
+ * SEEPROM format.
+ */
+ for (i = 0; i < max_targ; i++) {
+ if ((sc->device_flags[i] & CFSYNCHISULTRA) != 0) {
+ ahc->flags |= AHC_NEWEEPROM_FMT;
+ break;
+ }
+ }
+ }
+
+ for (i = 0; i < max_targ; i++) {
+ u_int scsirate;
+ uint16_t target_mask;
+
+ target_mask = 0x01 << i;
+ if (sc->device_flags[i] & CFDISC)
+ discenable |= target_mask;
+ if ((ahc->flags & AHC_NEWEEPROM_FMT) != 0) {
+ if ((sc->device_flags[i] & CFSYNCHISULTRA) != 0)
+ ultraenb |= target_mask;
+ } else if ((sc->adapter_control & CFULTRAEN) != 0) {
+ ultraenb |= target_mask;
+ }
+ if ((sc->device_flags[i] & CFXFER) == 0x04
+ && (ultraenb & target_mask) != 0) {
+ /* Treat 10MHz as a non-ultra speed */
+ sc->device_flags[i] &= ~CFXFER;
+ ultraenb &= ~target_mask;
+ }
+ if ((ahc->features & AHC_ULTRA2) != 0) {
+ u_int offset;
+
+ if (sc->device_flags[i] & CFSYNCH)
+ offset = MAX_OFFSET_ULTRA2;
+ else
+ offset = 0;
+ ahc_outb(ahc, TARG_OFFSET + i, offset);
+
+ /*
+ * The ultra enable bits contain the
+ * high bit of the ultra2 sync rate
+ * field.
+ */
+ scsirate = (sc->device_flags[i] & CFXFER)
+ | ((ultraenb & target_mask) ? 0x8 : 0x0);
+ if (sc->device_flags[i] & CFWIDEB)
+ scsirate |= WIDEXFER;
+ } else {
+ scsirate = (sc->device_flags[i] & CFXFER) << 4;
+ if (sc->device_flags[i] & CFSYNCH)
+ scsirate |= SOFS;
+ if (sc->device_flags[i] & CFWIDEB)
+ scsirate |= WIDEXFER;
+ }
+ ahc_outb(ahc, TARG_SCSIRATE + i, scsirate);
+ }
+ ahc->our_id = sc->brtime_id & CFSCSIID;
+
+ scsi_conf = (ahc->our_id & 0x7);
+ if (sc->adapter_control & CFSPARITY)
+ scsi_conf |= ENSPCHK;
+ if (sc->adapter_control & CFRESETB)
+ scsi_conf |= RESET_SCSI;
+
+ ahc->flags |= (sc->adapter_control & CFBOOTCHAN) >> CFBOOTCHANSHIFT;
+
+ if (sc->bios_control & CFEXTEND)
+ ahc->flags |= AHC_EXTENDED_TRANS_A;
+
+ if (sc->bios_control & CFBIOSEN)
+ ahc->flags |= AHC_BIOS_ENABLED;
+ if (ahc->features & AHC_ULTRA
+ && (ahc->flags & AHC_NEWEEPROM_FMT) == 0) {
+ /* Should we enable Ultra mode? */
+ if (!(sc->adapter_control & CFULTRAEN))
+ /* Treat us as a non-ultra card */
+ ultraenb = 0;
+ }
+
+ if (sc->signature == CFSIGNATURE
+ || sc->signature == CFSIGNATURE2) {
+ uint32_t devconfig;
+
+ /* Honor the STPWLEVEL settings */
+ devconfig = ahc_pci_read_config(ahc->dev_softc,
+ DEVCONFIG, /*bytes*/4);
+ devconfig &= ~STPWLEVEL;
+ if ((sc->bios_control & CFSTPWLEVEL) != 0)
+ devconfig |= STPWLEVEL;
+ ahc_pci_write_config(ahc->dev_softc, DEVCONFIG,
+ devconfig, /*bytes*/4);
+ }
+ /* Set SCSICONF info */
+ ahc_outb(ahc, SCSICONF, scsi_conf);
+ ahc_outb(ahc, DISC_DSB, ~(discenable & 0xff));
+ ahc_outb(ahc, DISC_DSB + 1, ~((discenable >> 8) & 0xff));
+ ahc_outb(ahc, ULTRA_ENB, ultraenb & 0xff);
+ ahc_outb(ahc, ULTRA_ENB + 1, (ultraenb >> 8) & 0xff);
+}
+
+static void
+configure_termination(struct ahc_softc *ahc,
+ struct seeprom_descriptor *sd,
+ u_int adapter_control,
+ u_int *sxfrctl1)
+{
+ uint8_t brddat;
+
+ brddat = 0;
+
+ /*
+ * Update the settings in sxfrctl1 to match the
+ * termination settings
+ */
+ *sxfrctl1 = 0;
+
+ /*
+ * SEECS must be on for the GALS to latch
+ * the data properly. Be sure to leave MS
+ * on or we will release the seeprom.
+ */
+ SEEPROM_OUTB(sd, sd->sd_MS | sd->sd_CS);
+ if ((adapter_control & CFAUTOTERM) != 0
+ || (ahc->features & AHC_NEW_TERMCTL) != 0) {
+ int internal50_present;
+ int internal68_present;
+ int externalcable_present;
+ int eeprom_present;
+ int enableSEC_low;
+ int enableSEC_high;
+ int enablePRI_low;
+ int enablePRI_high;
+ int sum;
+
+ enableSEC_low = 0;
+ enableSEC_high = 0;
+ enablePRI_low = 0;
+ enablePRI_high = 0;
+ if ((ahc->features & AHC_NEW_TERMCTL) != 0) {
+ ahc_new_term_detect(ahc, &enableSEC_low,
+ &enableSEC_high,
+ &enablePRI_low,
+ &enablePRI_high,
+ &eeprom_present);
+ if ((adapter_control & CFSEAUTOTERM) == 0) {
+ if (bootverbose)
+ printk("%s: Manual SE Termination\n",
+ ahc_name(ahc));
+ enableSEC_low = (adapter_control & CFSELOWTERM);
+ enableSEC_high =
+ (adapter_control & CFSEHIGHTERM);
+ }
+ if ((adapter_control & CFAUTOTERM) == 0) {
+ if (bootverbose)
+ printk("%s: Manual LVD Termination\n",
+ ahc_name(ahc));
+ enablePRI_low = (adapter_control & CFSTERM);
+ enablePRI_high = (adapter_control & CFWSTERM);
+ }
+ /* Make the table calculations below happy */
+ internal50_present = 0;
+ internal68_present = 1;
+ externalcable_present = 1;
+ } else if ((ahc->features & AHC_SPIOCAP) != 0) {
+ aic785X_cable_detect(ahc, &internal50_present,
+ &externalcable_present,
+ &eeprom_present);
+ /* Can never support a wide connector. */
+ internal68_present = 0;
+ } else {
+ aic787X_cable_detect(ahc, &internal50_present,
+ &internal68_present,
+ &externalcable_present,
+ &eeprom_present);
+ }
+
+ if ((ahc->features & AHC_WIDE) == 0)
+ internal68_present = 0;
+
+ if (bootverbose
+ && (ahc->features & AHC_ULTRA2) == 0) {
+ printk("%s: internal 50 cable %s present",
+ ahc_name(ahc),
+ internal50_present ? "is":"not");
+
+ if ((ahc->features & AHC_WIDE) != 0)
+ printk(", internal 68 cable %s present",
+ internal68_present ? "is":"not");
+ printk("\n%s: external cable %s present\n",
+ ahc_name(ahc),
+ externalcable_present ? "is":"not");
+ }
+ if (bootverbose)
+ printk("%s: BIOS eeprom %s present\n",
+ ahc_name(ahc), eeprom_present ? "is" : "not");
+
+ if ((ahc->flags & AHC_INT50_SPEEDFLEX) != 0) {
+ /*
+ * The 50 pin connector is a separate bus,
+ * so force it to always be terminated.
+ * In the future, perform current sensing
+ * to determine if we are in the middle of
+ * a properly terminated bus.
+ */
+ internal50_present = 0;
+ }
+
+ /*
+ * Now set the termination based on what
+ * we found.
+ * Flash Enable = BRDDAT7
+ * Secondary High Term Enable = BRDDAT6
+ * Secondary Low Term Enable = BRDDAT5 (7890)
+ * Primary High Term Enable = BRDDAT4 (7890)
+ */
+ if ((ahc->features & AHC_ULTRA2) == 0
+ && (internal50_present != 0)
+ && (internal68_present != 0)
+ && (externalcable_present != 0)) {
+ printk("%s: Illegal cable configuration!!. "
+ "Only two connectors on the "
+ "adapter may be used at a "
+ "time!\n", ahc_name(ahc));
+
+ /*
+ * Pretend there are no cables in the hope
+ * that having all of the termination on
+ * gives us a more stable bus.
+ */
+ internal50_present = 0;
+ internal68_present = 0;
+ externalcable_present = 0;
+ }
+
+ if ((ahc->features & AHC_WIDE) != 0
+ && ((externalcable_present == 0)
+ || (internal68_present == 0)
+ || (enableSEC_high != 0))) {
+ brddat |= BRDDAT6;
+ if (bootverbose) {
+ if ((ahc->flags & AHC_INT50_SPEEDFLEX) != 0)
+ printk("%s: 68 pin termination "
+ "Enabled\n", ahc_name(ahc));
+ else
+ printk("%s: %sHigh byte termination "
+ "Enabled\n", ahc_name(ahc),
+ enableSEC_high ? "Secondary "
+ : "");
+ }
+ }
+
+ sum = internal50_present + internal68_present
+ + externalcable_present;
+ if (sum < 2 || (enableSEC_low != 0)) {
+ if ((ahc->features & AHC_ULTRA2) != 0)
+ brddat |= BRDDAT5;
+ else
+ *sxfrctl1 |= STPWEN;
+ if (bootverbose) {
+ if ((ahc->flags & AHC_INT50_SPEEDFLEX) != 0)
+ printk("%s: 50 pin termination "
+ "Enabled\n", ahc_name(ahc));
+ else
+ printk("%s: %sLow byte termination "
+ "Enabled\n", ahc_name(ahc),
+ enableSEC_low ? "Secondary "
+ : "");
+ }
+ }
+
+ if (enablePRI_low != 0) {
+ *sxfrctl1 |= STPWEN;
+ if (bootverbose)
+ printk("%s: Primary Low Byte termination "
+ "Enabled\n", ahc_name(ahc));
+ }
+
+ /*
+ * Setup STPWEN before setting up the rest of
+ * the termination per the tech note on the U160 cards.
+ */
+ ahc_outb(ahc, SXFRCTL1, *sxfrctl1);
+
+ if (enablePRI_high != 0) {
+ brddat |= BRDDAT4;
+ if (bootverbose)
+ printk("%s: Primary High Byte "
+ "termination Enabled\n",
+ ahc_name(ahc));
+ }
+
+ write_brdctl(ahc, brddat);
+
+ } else {
+ if ((adapter_control & CFSTERM) != 0) {
+ *sxfrctl1 |= STPWEN;
+
+ if (bootverbose)
+ printk("%s: %sLow byte termination Enabled\n",
+ ahc_name(ahc),
+ (ahc->features & AHC_ULTRA2) ? "Primary "
+ : "");
+ }
+
+ if ((adapter_control & CFWSTERM) != 0
+ && (ahc->features & AHC_WIDE) != 0) {
+ brddat |= BRDDAT6;
+ if (bootverbose)
+ printk("%s: %sHigh byte termination Enabled\n",
+ ahc_name(ahc),
+ (ahc->features & AHC_ULTRA2)
+ ? "Secondary " : "");
+ }
+
+ /*
+ * Setup STPWEN before setting up the rest of
+ * the termination per the tech note on the U160 cards.
+ */
+ ahc_outb(ahc, SXFRCTL1, *sxfrctl1);
+
+ if ((ahc->features & AHC_WIDE) != 0)
+ write_brdctl(ahc, brddat);
+ }
+ SEEPROM_OUTB(sd, sd->sd_MS); /* Clear CS */
+}
+
+static void
+ahc_new_term_detect(struct ahc_softc *ahc, int *enableSEC_low,
+ int *enableSEC_high, int *enablePRI_low,
+ int *enablePRI_high, int *eeprom_present)
+{
+ uint8_t brdctl;
+
+ /*
+ * BRDDAT7 = Eeprom
+ * BRDDAT6 = Enable Secondary High Byte termination
+ * BRDDAT5 = Enable Secondary Low Byte termination
+ * BRDDAT4 = Enable Primary high byte termination
+ * BRDDAT3 = Enable Primary low byte termination
+ */
+ brdctl = read_brdctl(ahc);
+ *eeprom_present = brdctl & BRDDAT7;
+ *enableSEC_high = (brdctl & BRDDAT6);
+ *enableSEC_low = (brdctl & BRDDAT5);
+ *enablePRI_high = (brdctl & BRDDAT4);
+ *enablePRI_low = (brdctl & BRDDAT3);
+}
+
+static void
+aic787X_cable_detect(struct ahc_softc *ahc, int *internal50_present,
+ int *internal68_present, int *externalcable_present,
+ int *eeprom_present)
+{
+ uint8_t brdctl;
+
+ /*
+ * First read the status of our cables.
+ * Set the rom bank to 0 since the
+ * bank setting serves as a multiplexor
+ * for the cable detection logic.
+ * BRDDAT5 controls the bank switch.
+ */
+ write_brdctl(ahc, 0);
+
+ /*
+ * Now read the state of the internal
+ * connectors. BRDDAT6 is INT50 and
+ * BRDDAT7 is INT68.
+ */
+ brdctl = read_brdctl(ahc);
+ *internal50_present = (brdctl & BRDDAT6) ? 0 : 1;
+ *internal68_present = (brdctl & BRDDAT7) ? 0 : 1;
+
+ /*
+ * Set the rom bank to 1 and determine
+ * the other signals.
+ */
+ write_brdctl(ahc, BRDDAT5);
+
+ /*
+ * Now read the state of the external
+ * connectors. BRDDAT6 is EXT68 and
+ * BRDDAT7 is EPROMPS.
+ */
+ brdctl = read_brdctl(ahc);
+ *externalcable_present = (brdctl & BRDDAT6) ? 0 : 1;
+ *eeprom_present = (brdctl & BRDDAT7) ? 1 : 0;
+}
+
+static void
+aic785X_cable_detect(struct ahc_softc *ahc, int *internal50_present,
+ int *externalcable_present, int *eeprom_present)
+{
+ uint8_t brdctl;
+ uint8_t spiocap;
+
+ spiocap = ahc_inb(ahc, SPIOCAP);
+ spiocap &= ~SOFTCMDEN;
+ spiocap |= EXT_BRDCTL;
+ ahc_outb(ahc, SPIOCAP, spiocap);
+ ahc_outb(ahc, BRDCTL, BRDRW|BRDCS);
+ ahc_flush_device_writes(ahc);
+ ahc_delay(500);
+ ahc_outb(ahc, BRDCTL, 0);
+ ahc_flush_device_writes(ahc);
+ ahc_delay(500);
+ brdctl = ahc_inb(ahc, BRDCTL);
+ *internal50_present = (brdctl & BRDDAT5) ? 0 : 1;
+ *externalcable_present = (brdctl & BRDDAT6) ? 0 : 1;
+ *eeprom_present = (ahc_inb(ahc, SPIOCAP) & EEPROM) ? 1 : 0;
+}
+
+int
+ahc_acquire_seeprom(struct ahc_softc *ahc, struct seeprom_descriptor *sd)
+{
+ int wait;
+
+ if ((ahc->features & AHC_SPIOCAP) != 0
+ && (ahc_inb(ahc, SPIOCAP) & SEEPROM) == 0)
+ return (0);
+
+ /*
+ * Request access of the memory port. When access is
+ * granted, SEERDY will go high. We use a 1 second
+ * timeout which should be near 1 second more than
+ * is needed. Reason: after the chip reset, there
+ * should be no contention.
+ */
+ SEEPROM_OUTB(sd, sd->sd_MS);
+ wait = 1000; /* 1 second timeout in msec */
+ while (--wait && ((SEEPROM_STATUS_INB(sd) & sd->sd_RDY) == 0)) {
+ ahc_delay(1000); /* delay 1 msec */
+ }
+ if ((SEEPROM_STATUS_INB(sd) & sd->sd_RDY) == 0) {
+ SEEPROM_OUTB(sd, 0);
+ return (0);
+ }
+ return(1);
+}
+
+void
+ahc_release_seeprom(struct seeprom_descriptor *sd)
+{
+ /* Release access to the memory port and the serial EEPROM. */
+ SEEPROM_OUTB(sd, 0);
+}
+
+static void
+write_brdctl(struct ahc_softc *ahc, uint8_t value)
+{
+ uint8_t brdctl;
+
+ if ((ahc->chip & AHC_CHIPID_MASK) == AHC_AIC7895) {
+ brdctl = BRDSTB;
+ if (ahc->channel == 'B')
+ brdctl |= BRDCS;
+ } else if ((ahc->features & AHC_ULTRA2) != 0) {
+ brdctl = 0;
+ } else {
+ brdctl = BRDSTB|BRDCS;
+ }
+ ahc_outb(ahc, BRDCTL, brdctl);
+ ahc_flush_device_writes(ahc);
+ brdctl |= value;
+ ahc_outb(ahc, BRDCTL, brdctl);
+ ahc_flush_device_writes(ahc);
+ if ((ahc->features & AHC_ULTRA2) != 0)
+ brdctl |= BRDSTB_ULTRA2;
+ else
+ brdctl &= ~BRDSTB;
+ ahc_outb(ahc, BRDCTL, brdctl);
+ ahc_flush_device_writes(ahc);
+ if ((ahc->features & AHC_ULTRA2) != 0)
+ brdctl = 0;
+ else
+ brdctl &= ~BRDCS;
+ ahc_outb(ahc, BRDCTL, brdctl);
+}
+
+static uint8_t
+read_brdctl(struct ahc_softc *ahc)
+{
+ uint8_t brdctl;
+ uint8_t value;
+
+ if ((ahc->chip & AHC_CHIPID_MASK) == AHC_AIC7895) {
+ brdctl = BRDRW;
+ if (ahc->channel == 'B')
+ brdctl |= BRDCS;
+ } else if ((ahc->features & AHC_ULTRA2) != 0) {
+ brdctl = BRDRW_ULTRA2;
+ } else {
+ brdctl = BRDRW|BRDCS;
+ }
+ ahc_outb(ahc, BRDCTL, brdctl);
+ ahc_flush_device_writes(ahc);
+ value = ahc_inb(ahc, BRDCTL);
+ ahc_outb(ahc, BRDCTL, 0);
+ return (value);
+}
+
+static void
+ahc_pci_intr(struct ahc_softc *ahc)
+{
+ u_int error;
+ u_int status1;
+
+ error = ahc_inb(ahc, ERROR);
+ if ((error & PCIERRSTAT) == 0)
+ return;
+
+ status1 = ahc_pci_read_config(ahc->dev_softc,
+ PCIR_STATUS + 1, /*bytes*/1);
+
+ printk("%s: PCI error Interrupt at seqaddr = 0x%x\n",
+ ahc_name(ahc),
+ ahc_inb(ahc, SEQADDR0) | (ahc_inb(ahc, SEQADDR1) << 8));
+
+ if (status1 & DPE) {
+ ahc->pci_target_perr_count++;
+ printk("%s: Data Parity Error Detected during address "
+ "or write data phase\n", ahc_name(ahc));
+ }
+ if (status1 & SSE) {
+ printk("%s: Signal System Error Detected\n", ahc_name(ahc));
+ }
+ if (status1 & RMA) {
+ printk("%s: Received a Master Abort\n", ahc_name(ahc));
+ }
+ if (status1 & RTA) {
+ printk("%s: Received a Target Abort\n", ahc_name(ahc));
+ }
+ if (status1 & STA) {
+ printk("%s: Signaled a Target Abort\n", ahc_name(ahc));
+ }
+ if (status1 & DPR) {
+ printk("%s: Data Parity Error has been reported via PERR#\n",
+ ahc_name(ahc));
+ }
+
+ /* Clear latched errors. */
+ ahc_pci_write_config(ahc->dev_softc, PCIR_STATUS + 1,
+ status1, /*bytes*/1);
+
+ if ((status1 & (DPE|SSE|RMA|RTA|STA|DPR)) == 0) {
+ printk("%s: Latched PCIERR interrupt with "
+ "no status bits set\n", ahc_name(ahc));
+ } else {
+ ahc_outb(ahc, CLRINT, CLRPARERR);
+ }
+
+ if (ahc->pci_target_perr_count > AHC_PCI_TARGET_PERR_THRESH) {
+ printk(
+"%s: WARNING WARNING WARNING WARNING\n"
+"%s: Too many PCI parity errors observed as a target.\n"
+"%s: Some device on this bus is generating bad parity.\n"
+"%s: This is an error *observed by*, not *generated by*, this controller.\n"
+"%s: PCI parity error checking has been disabled.\n"
+"%s: WARNING WARNING WARNING WARNING\n",
+ ahc_name(ahc), ahc_name(ahc), ahc_name(ahc),
+ ahc_name(ahc), ahc_name(ahc), ahc_name(ahc));
+ ahc->seqctl |= FAILDIS;
+ ahc_outb(ahc, SEQCTL, ahc->seqctl);
+ }
+ ahc_unpause(ahc);
+}
+
+static int
+ahc_pci_chip_init(struct ahc_softc *ahc)
+{
+ ahc_outb(ahc, DSCOMMAND0, ahc->bus_softc.pci_softc.dscommand0);
+ ahc_outb(ahc, DSPCISTATUS, ahc->bus_softc.pci_softc.dspcistatus);
+ if ((ahc->features & AHC_DT) != 0) {
+ u_int sfunct;
+
+ sfunct = ahc_inb(ahc, SFUNCT) & ~ALT_MODE;
+ ahc_outb(ahc, SFUNCT, sfunct | ALT_MODE);
+ ahc_outb(ahc, OPTIONMODE, ahc->bus_softc.pci_softc.optionmode);
+ ahc_outw(ahc, TARGCRCCNT, ahc->bus_softc.pci_softc.targcrccnt);
+ ahc_outb(ahc, SFUNCT, sfunct);
+ ahc_outb(ahc, CRCCONTROL1,
+ ahc->bus_softc.pci_softc.crccontrol1);
+ }
+ if ((ahc->features & AHC_MULTI_FUNC) != 0)
+ ahc_outb(ahc, SCBBADDR, ahc->bus_softc.pci_softc.scbbaddr);
+
+ if ((ahc->features & AHC_ULTRA2) != 0)
+ ahc_outb(ahc, DFF_THRSH, ahc->bus_softc.pci_softc.dff_thrsh);
+
+ return (ahc_chip_init(ahc));
+}
+
+#ifdef CONFIG_PM
+void
+ahc_pci_resume(struct ahc_softc *ahc)
+{
+ /*
+ * We assume that the OS has restored our register
+ * mappings, etc. Just update the config space registers
+ * that the OS doesn't know about and rely on our chip
+ * reset handler to handle the rest.
+ */
+ ahc_pci_write_config(ahc->dev_softc, DEVCONFIG,
+ ahc->bus_softc.pci_softc.devconfig, /*bytes*/4);
+ ahc_pci_write_config(ahc->dev_softc, PCIR_COMMAND,
+ ahc->bus_softc.pci_softc.command, /*bytes*/1);
+ ahc_pci_write_config(ahc->dev_softc, CSIZE_LATTIME,
+ ahc->bus_softc.pci_softc.csize_lattime, /*bytes*/1);
+ if ((ahc->flags & AHC_HAS_TERM_LOGIC) != 0) {
+ struct seeprom_descriptor sd;
+ u_int sxfrctl1;
+
+ sd.sd_ahc = ahc;
+ sd.sd_control_offset = SEECTL;
+ sd.sd_status_offset = SEECTL;
+ sd.sd_dataout_offset = SEECTL;
+
+ ahc_acquire_seeprom(ahc, &sd);
+ configure_termination(ahc, &sd,
+ ahc->seep_config->adapter_control,
+ &sxfrctl1);
+ ahc_release_seeprom(&sd);
+ }
+}
+#endif
+
+static int
+ahc_aic785X_setup(struct ahc_softc *ahc)
+{
+ ahc_dev_softc_t pci;
+ uint8_t rev;
+
+ pci = ahc->dev_softc;
+ ahc->channel = 'A';
+ ahc->chip = AHC_AIC7850;
+ ahc->features = AHC_AIC7850_FE;
+ ahc->bugs |= AHC_TMODE_WIDEODD_BUG|AHC_CACHETHEN_BUG|AHC_PCI_MWI_BUG;
+ rev = ahc_pci_read_config(pci, PCIR_REVID, /*bytes*/1);
+ if (rev >= 1)
+ ahc->bugs |= AHC_PCI_2_1_RETRY_BUG;
+ ahc->instruction_ram_size = 512;
+ return (0);
+}
+
+static int
+ahc_aic7860_setup(struct ahc_softc *ahc)
+{
+ ahc_dev_softc_t pci;
+ uint8_t rev;
+
+ pci = ahc->dev_softc;
+ ahc->channel = 'A';
+ ahc->chip = AHC_AIC7860;
+ ahc->features = AHC_AIC7860_FE;
+ ahc->bugs |= AHC_TMODE_WIDEODD_BUG|AHC_CACHETHEN_BUG|AHC_PCI_MWI_BUG;
+ rev = ahc_pci_read_config(pci, PCIR_REVID, /*bytes*/1);
+ if (rev >= 1)
+ ahc->bugs |= AHC_PCI_2_1_RETRY_BUG;
+ ahc->instruction_ram_size = 512;
+ return (0);
+}
+
+static int
+ahc_apa1480_setup(struct ahc_softc *ahc)
+{
+ int error;
+
+ error = ahc_aic7860_setup(ahc);
+ if (error != 0)
+ return (error);
+ ahc->features |= AHC_REMOVABLE;
+ return (0);
+}
+
+static int
+ahc_aic7870_setup(struct ahc_softc *ahc)
+{
+
+ ahc->channel = 'A';
+ ahc->chip = AHC_AIC7870;
+ ahc->features = AHC_AIC7870_FE;
+ ahc->bugs |= AHC_TMODE_WIDEODD_BUG|AHC_CACHETHEN_BUG|AHC_PCI_MWI_BUG;
+ ahc->instruction_ram_size = 512;
+ return (0);
+}
+
+static int
+ahc_aic7870h_setup(struct ahc_softc *ahc)
+{
+ int error = ahc_aic7870_setup(ahc);
+
+ ahc->features |= AHC_HVD;
+
+ return error;
+}
+
+static int
+ahc_aha394X_setup(struct ahc_softc *ahc)
+{
+ int error;
+
+ error = ahc_aic7870_setup(ahc);
+ if (error == 0)
+ error = ahc_aha394XX_setup(ahc);
+ return (error);
+}
+
+static int
+ahc_aha394Xh_setup(struct ahc_softc *ahc)
+{
+ int error = ahc_aha394X_setup(ahc);
+
+ ahc->features |= AHC_HVD;
+
+ return error;
+}
+
+static int
+ahc_aha398X_setup(struct ahc_softc *ahc)
+{
+ int error;
+
+ error = ahc_aic7870_setup(ahc);
+ if (error == 0)
+ error = ahc_aha398XX_setup(ahc);
+ return (error);
+}
+
+static int
+ahc_aha494X_setup(struct ahc_softc *ahc)
+{
+ int error;
+
+ error = ahc_aic7870_setup(ahc);
+ if (error == 0)
+ error = ahc_aha494XX_setup(ahc);
+ return (error);
+}
+
+static int
+ahc_aha494Xh_setup(struct ahc_softc *ahc)
+{
+ int error = ahc_aha494X_setup(ahc);
+
+ ahc->features |= AHC_HVD;
+
+ return error;
+}
+
+static int
+ahc_aic7880_setup(struct ahc_softc *ahc)
+{
+ ahc_dev_softc_t pci;
+ uint8_t rev;
+
+ pci = ahc->dev_softc;
+ ahc->channel = 'A';
+ ahc->chip = AHC_AIC7880;
+ ahc->features = AHC_AIC7880_FE;
+ ahc->bugs |= AHC_TMODE_WIDEODD_BUG;
+ rev = ahc_pci_read_config(pci, PCIR_REVID, /*bytes*/1);
+ if (rev >= 1) {
+ ahc->bugs |= AHC_PCI_2_1_RETRY_BUG;
+ } else {
+ ahc->bugs |= AHC_CACHETHEN_BUG|AHC_PCI_MWI_BUG;
+ }
+ ahc->instruction_ram_size = 512;
+ return (0);
+}
+
+static int
+ahc_aic7880h_setup(struct ahc_softc *ahc)
+{
+ int error = ahc_aic7880_setup(ahc);
+
+ ahc->features |= AHC_HVD;
+
+ return error;
+}
+
+
+static int
+ahc_aha2940Pro_setup(struct ahc_softc *ahc)
+{
+
+ ahc->flags |= AHC_INT50_SPEEDFLEX;
+ return (ahc_aic7880_setup(ahc));
+}
+
+static int
+ahc_aha394XU_setup(struct ahc_softc *ahc)
+{
+ int error;
+
+ error = ahc_aic7880_setup(ahc);
+ if (error == 0)
+ error = ahc_aha394XX_setup(ahc);
+ return (error);
+}
+
+static int
+ahc_aha394XUh_setup(struct ahc_softc *ahc)
+{
+ int error = ahc_aha394XU_setup(ahc);
+
+ ahc->features |= AHC_HVD;
+
+ return error;
+}
+
+static int
+ahc_aha398XU_setup(struct ahc_softc *ahc)
+{
+ int error;
+
+ error = ahc_aic7880_setup(ahc);
+ if (error == 0)
+ error = ahc_aha398XX_setup(ahc);
+ return (error);
+}
+
+static int
+ahc_aic7890_setup(struct ahc_softc *ahc)
+{
+ ahc_dev_softc_t pci;
+ uint8_t rev;
+
+ pci = ahc->dev_softc;
+ ahc->channel = 'A';
+ ahc->chip = AHC_AIC7890;
+ ahc->features = AHC_AIC7890_FE;
+ ahc->flags |= AHC_NEWEEPROM_FMT;
+ rev = ahc_pci_read_config(pci, PCIR_REVID, /*bytes*/1);
+ if (rev == 0)
+ ahc->bugs |= AHC_AUTOFLUSH_BUG|AHC_CACHETHEN_BUG;
+ ahc->instruction_ram_size = 768;
+ return (0);
+}
+
+static int
+ahc_aic7892_setup(struct ahc_softc *ahc)
+{
+
+ ahc->channel = 'A';
+ ahc->chip = AHC_AIC7892;
+ ahc->features = AHC_AIC7892_FE;
+ ahc->flags |= AHC_NEWEEPROM_FMT;
+ ahc->bugs |= AHC_SCBCHAN_UPLOAD_BUG;
+ ahc->instruction_ram_size = 1024;
+ return (0);
+}
+
+static int
+ahc_aic7895_setup(struct ahc_softc *ahc)
+{
+ ahc_dev_softc_t pci;
+ uint8_t rev;
+
+ pci = ahc->dev_softc;
+ ahc->channel = ahc_get_pci_function(pci) == 1 ? 'B' : 'A';
+ /*
+ * The 'C' revision of the aic7895 has a few additional features.
+ */
+ rev = ahc_pci_read_config(pci, PCIR_REVID, /*bytes*/1);
+ if (rev >= 4) {
+ ahc->chip = AHC_AIC7895C;
+ ahc->features = AHC_AIC7895C_FE;
+ } else {
+ u_int command;
+
+ ahc->chip = AHC_AIC7895;
+ ahc->features = AHC_AIC7895_FE;
+
+ /*
+ * The BIOS disables the use of MWI transactions
+ * since it does not have the MWI bug work around
+ * we have. Disabling MWI reduces performance, so
+ * turn it on again.
+ */
+ command = ahc_pci_read_config(pci, PCIR_COMMAND, /*bytes*/1);
+ command |= PCIM_CMD_MWRICEN;
+ ahc_pci_write_config(pci, PCIR_COMMAND, command, /*bytes*/1);
+ ahc->bugs |= AHC_PCI_MWI_BUG;
+ }
+ /*
+ * XXX Does CACHETHEN really not work??? What about PCI retry?
+ * on C level chips. Need to test, but for now, play it safe.
+ */
+ ahc->bugs |= AHC_TMODE_WIDEODD_BUG|AHC_PCI_2_1_RETRY_BUG
+ | AHC_CACHETHEN_BUG;
+
+#if 0
+ uint32_t devconfig;
+
+ /*
+ * Cachesize must also be zero due to stray DAC
+ * problem when sitting behind some bridges.
+ */
+ ahc_pci_write_config(pci, CSIZE_LATTIME, 0, /*bytes*/1);
+ devconfig = ahc_pci_read_config(pci, DEVCONFIG, /*bytes*/1);
+ devconfig |= MRDCEN;
+ ahc_pci_write_config(pci, DEVCONFIG, devconfig, /*bytes*/1);
+#endif
+ ahc->flags |= AHC_NEWEEPROM_FMT;
+ ahc->instruction_ram_size = 512;
+ return (0);
+}
+
+static int
+ahc_aic7895h_setup(struct ahc_softc *ahc)
+{
+ int error = ahc_aic7895_setup(ahc);
+
+ ahc->features |= AHC_HVD;
+
+ return error;
+}
+
+static int
+ahc_aic7896_setup(struct ahc_softc *ahc)
+{
+ ahc_dev_softc_t pci;
+
+ pci = ahc->dev_softc;
+ ahc->channel = ahc_get_pci_function(pci) == 1 ? 'B' : 'A';
+ ahc->chip = AHC_AIC7896;
+ ahc->features = AHC_AIC7896_FE;
+ ahc->flags |= AHC_NEWEEPROM_FMT;
+ ahc->bugs |= AHC_CACHETHEN_DIS_BUG;
+ ahc->instruction_ram_size = 768;
+ return (0);
+}
+
+static int
+ahc_aic7899_setup(struct ahc_softc *ahc)
+{
+ ahc_dev_softc_t pci;
+
+ pci = ahc->dev_softc;
+ ahc->channel = ahc_get_pci_function(pci) == 1 ? 'B' : 'A';
+ ahc->chip = AHC_AIC7899;
+ ahc->features = AHC_AIC7899_FE;
+ ahc->flags |= AHC_NEWEEPROM_FMT;
+ ahc->bugs |= AHC_SCBCHAN_UPLOAD_BUG;
+ ahc->instruction_ram_size = 1024;
+ return (0);
+}
+
+static int
+ahc_aha29160C_setup(struct ahc_softc *ahc)
+{
+ int error;
+
+ error = ahc_aic7899_setup(ahc);
+ if (error != 0)
+ return (error);
+ ahc->features |= AHC_REMOVABLE;
+ return (0);
+}
+
+static int
+ahc_raid_setup(struct ahc_softc *ahc)
+{
+ printk("RAID functionality unsupported\n");
+ return (ENXIO);
+}
+
+static int
+ahc_aha394XX_setup(struct ahc_softc *ahc)
+{
+ ahc_dev_softc_t pci;
+
+ pci = ahc->dev_softc;
+ switch (ahc_get_pci_slot(pci)) {
+ case AHC_394X_SLOT_CHANNEL_A:
+ ahc->channel = 'A';
+ break;
+ case AHC_394X_SLOT_CHANNEL_B:
+ ahc->channel = 'B';
+ break;
+ default:
+ printk("adapter at unexpected slot %d\n"
+ "unable to map to a channel\n",
+ ahc_get_pci_slot(pci));
+ ahc->channel = 'A';
+ }
+ return (0);
+}
+
+static int
+ahc_aha398XX_setup(struct ahc_softc *ahc)
+{
+ ahc_dev_softc_t pci;
+
+ pci = ahc->dev_softc;
+ switch (ahc_get_pci_slot(pci)) {
+ case AHC_398X_SLOT_CHANNEL_A:
+ ahc->channel = 'A';
+ break;
+ case AHC_398X_SLOT_CHANNEL_B:
+ ahc->channel = 'B';
+ break;
+ case AHC_398X_SLOT_CHANNEL_C:
+ ahc->channel = 'C';
+ break;
+ default:
+ printk("adapter at unexpected slot %d\n"
+ "unable to map to a channel\n",
+ ahc_get_pci_slot(pci));
+ ahc->channel = 'A';
+ break;
+ }
+ ahc->flags |= AHC_LARGE_SEEPROM;
+ return (0);
+}
+
+static int
+ahc_aha494XX_setup(struct ahc_softc *ahc)
+{
+ ahc_dev_softc_t pci;
+
+ pci = ahc->dev_softc;
+ switch (ahc_get_pci_slot(pci)) {
+ case AHC_494X_SLOT_CHANNEL_A:
+ ahc->channel = 'A';
+ break;
+ case AHC_494X_SLOT_CHANNEL_B:
+ ahc->channel = 'B';
+ break;
+ case AHC_494X_SLOT_CHANNEL_C:
+ ahc->channel = 'C';
+ break;
+ case AHC_494X_SLOT_CHANNEL_D:
+ ahc->channel = 'D';
+ break;
+ default:
+ printk("adapter at unexpected slot %d\n"
+ "unable to map to a channel\n",
+ ahc_get_pci_slot(pci));
+ ahc->channel = 'A';
+ }
+ ahc->flags |= AHC_LARGE_SEEPROM;
+ return (0);
+}
diff --git a/drivers/scsi/aic7xxx/aic7xxx_pci.h b/drivers/scsi/aic7xxx/aic7xxx_pci.h
new file mode 100644
index 000000000..263f85da4
--- /dev/null
+++ b/drivers/scsi/aic7xxx/aic7xxx_pci.h
@@ -0,0 +1,125 @@
+/*
+ * Adaptec AIC7xxx device driver for Linux.
+ *
+ * Copyright (c) 2000-2001 Adaptec Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions, and the following disclaimer,
+ * without modification.
+ * 2. Redistributions in binary form must reproduce at minimum a disclaimer
+ * substantially similar to the "NO WARRANTY" disclaimer below
+ * ("Disclaimer") and any redistribution must be conditioned upon
+ * including a substantially similar Disclaimer requirement for further
+ * binary redistribution.
+ * 3. Neither the names of the above-listed copyright holders nor the names
+ * of any contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * Alternatively, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") version 2 as published by the Free
+ * Software Foundation.
+ *
+ * NO WARRANTY
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
+ * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
+ * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGES.
+ *
+ * $Id$
+ *
+ */
+#ifndef _AIC7XXX_PCI_H_
+#define _AIC7XXX_PCI_H_
+
+#define ID_ALL_MASK 0xFFFFFFFFFFFFFFFFull
+#define ID_DEV_VENDOR_MASK 0xFFFFFFFF00000000ull
+#define ID_9005_GENERIC_MASK 0xFFF0FFFF00000000ull
+#define ID_9005_SISL_MASK 0x000FFFFF00000000ull
+#define ID_9005_SISL_ID 0x0005900500000000ull
+#define ID_AIC7850 0x5078900400000000ull
+#define ID_AHA_2902_04_10_15_20C_30C 0x5078900478509004ull
+#define ID_AIC7855 0x5578900400000000ull
+#define ID_AIC7859 0x3860900400000000ull
+#define ID_AHA_2930CU 0x3860900438699004ull
+#define ID_AIC7860 0x6078900400000000ull
+#define ID_AIC7860C 0x6078900478609004ull
+#define ID_AHA_1480A 0x6075900400000000ull
+#define ID_AHA_2940AU_0 0x6178900400000000ull
+#define ID_AHA_2940AU_1 0x6178900478619004ull
+#define ID_AHA_2940AU_CN 0x2178900478219004ull
+#define ID_AHA_2930C_VAR 0x6038900438689004ull
+
+#define ID_AIC7870 0x7078900400000000ull
+#define ID_AHA_2940 0x7178900400000000ull
+#define ID_AHA_3940 0x7278900400000000ull
+#define ID_AHA_398X 0x7378900400000000ull
+#define ID_AHA_2944 0x7478900400000000ull
+#define ID_AHA_3944 0x7578900400000000ull
+#define ID_AHA_4944 0x7678900400000000ull
+
+#define ID_AIC7880 0x8078900400000000ull
+#define ID_AIC7880_B 0x8078900478809004ull
+#define ID_AHA_2940U 0x8178900400000000ull
+#define ID_AHA_3940U 0x8278900400000000ull
+#define ID_AHA_2944U 0x8478900400000000ull
+#define ID_AHA_3944U 0x8578900400000000ull
+#define ID_AHA_398XU 0x8378900400000000ull
+#define ID_AHA_4944U 0x8678900400000000ull
+#define ID_AHA_2940UB 0x8178900478819004ull
+#define ID_AHA_2930U 0x8878900478889004ull
+#define ID_AHA_2940U_PRO 0x8778900478879004ull
+#define ID_AHA_2940U_CN 0x0078900478009004ull
+
+#define ID_AIC7895 0x7895900478959004ull
+#define ID_AIC7895_ARO 0x7890900478939004ull
+#define ID_AIC7895_ARO_MASK 0xFFF0FFFFFFFFFFFFull
+#define ID_AHA_2940U_DUAL 0x7895900478919004ull
+#define ID_AHA_3940AU 0x7895900478929004ull
+#define ID_AHA_3944AU 0x7895900478949004ull
+
+#define ID_AIC7890 0x001F9005000F9005ull
+#define ID_AIC7890_ARO 0x00139005000F9005ull
+#define ID_AAA_131U2 0x0013900500039005ull
+#define ID_AHA_2930U2 0x0011900501819005ull
+#define ID_AHA_2940U2B 0x00109005A1009005ull
+#define ID_AHA_2940U2_OEM 0x0010900521809005ull
+#define ID_AHA_2940U2 0x00109005A1809005ull
+#define ID_AHA_2950U2B 0x00109005E1009005ull
+
+#define ID_AIC7892 0x008F9005FFFF9005ull
+#define ID_AIC7892_ARO 0x00839005FFFF9005ull
+#define ID_AHA_29160 0x00809005E2A09005ull
+#define ID_AHA_29160_CPQ 0x00809005E2A00E11ull
+#define ID_AHA_29160N 0x0080900562A09005ull
+#define ID_AHA_29160C 0x0080900562209005ull
+#define ID_AHA_29160B 0x00809005E2209005ull
+#define ID_AHA_19160B 0x0081900562A19005ull
+#define ID_AHA_2915_30LP 0x0082900502109005ull
+
+#define ID_AIC7896 0x005F9005FFFF9005ull
+#define ID_AIC7896_ARO 0x00539005FFFF9005ull
+#define ID_AHA_3950U2B_0 0x00509005FFFF9005ull
+#define ID_AHA_3950U2B_1 0x00509005F5009005ull
+#define ID_AHA_3950U2D_0 0x00519005FFFF9005ull
+#define ID_AHA_3950U2D_1 0x00519005B5009005ull
+
+#define ID_AIC7899 0x00CF9005FFFF9005ull
+#define ID_AIC7899_ARO 0x00C39005FFFF9005ull
+#define ID_AHA_3960D 0x00C09005F6209005ull
+#define ID_AHA_3960D_CPQ 0x00C09005F6200E11ull
+
+#define ID_AIC7810 0x1078900400000000ull
+#define ID_AIC7815 0x7815900400000000ull
+
+#endif /* _AIC7XXX_PCI_H_ */
diff --git a/drivers/scsi/aic7xxx/aic7xxx_proc.c b/drivers/scsi/aic7xxx/aic7xxx_proc.c
new file mode 100644
index 000000000..18459605d
--- /dev/null
+++ b/drivers/scsi/aic7xxx/aic7xxx_proc.c
@@ -0,0 +1,342 @@
+/*
+ * Copyright (c) 2000-2001 Adaptec Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions, and the following disclaimer,
+ * without modification.
+ * 2. Redistributions in binary form must reproduce at minimum a disclaimer
+ * substantially similar to the "NO WARRANTY" disclaimer below
+ * ("Disclaimer") and any redistribution must be conditioned upon
+ * including a substantially similar Disclaimer requirement for further
+ * binary redistribution.
+ * 3. Neither the names of the above-listed copyright holders nor the names
+ * of any contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * Alternatively, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") version 2 as published by the Free
+ * Software Foundation.
+ *
+ * NO WARRANTY
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
+ * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
+ * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGES.
+ *
+ * String handling code courtesy of Gerard Roudier's <groudier@club-internet.fr>
+ * sym driver.
+ *
+ * $Id: //depot/aic7xxx/linux/drivers/scsi/aic7xxx/aic7xxx_proc.c#29 $
+ */
+#include "aic7xxx_osm.h"
+#include "aic7xxx_inline.h"
+#include "aic7xxx_93cx6.h"
+
+static void ahc_dump_target_state(struct ahc_softc *ahc,
+ struct seq_file *m,
+ u_int our_id, char channel,
+ u_int target_id, u_int target_offset);
+static void ahc_dump_device_state(struct seq_file *m,
+ struct scsi_device *dev);
+
+/*
+ * Table of syncrates that don't follow the "divisible by 4"
+ * rule. This table will be expanded in future SCSI specs.
+ */
+static const struct {
+ u_int period_factor;
+ u_int period; /* in 100ths of ns */
+} scsi_syncrates[] = {
+ { 0x08, 625 }, /* FAST-160 */
+ { 0x09, 1250 }, /* FAST-80 */
+ { 0x0a, 2500 }, /* FAST-40 40MHz */
+ { 0x0b, 3030 }, /* FAST-40 33MHz */
+ { 0x0c, 5000 } /* FAST-20 */
+};
+
+/*
+ * Return the frequency in kHz corresponding to the given
+ * sync period factor.
+ */
+static u_int
+ahc_calc_syncsrate(u_int period_factor)
+{
+ int i;
+
+ /* See if the period is in the "exception" table */
+ for (i = 0; i < ARRAY_SIZE(scsi_syncrates); i++) {
+
+ if (period_factor == scsi_syncrates[i].period_factor) {
+ /* Period in kHz */
+ return (100000000 / scsi_syncrates[i].period);
+ }
+ }
+
+ /*
+ * Wasn't in the table, so use the standard
+ * 4 times conversion.
+ */
+ return (10000000 / (period_factor * 4 * 10));
+}
+
+static void
+ahc_format_transinfo(struct seq_file *m, struct ahc_transinfo *tinfo)
+{
+ u_int speed;
+ u_int freq;
+ u_int mb;
+
+ speed = 3300;
+ freq = 0;
+ if (tinfo->offset != 0) {
+ freq = ahc_calc_syncsrate(tinfo->period);
+ speed = freq;
+ }
+ speed *= (0x01 << tinfo->width);
+ mb = speed / 1000;
+ if (mb > 0)
+ seq_printf(m, "%d.%03dMB/s transfers", mb, speed % 1000);
+ else
+ seq_printf(m, "%dKB/s transfers", speed);
+
+ if (freq != 0) {
+ seq_printf(m, " (%d.%03dMHz%s, offset %d",
+ freq / 1000, freq % 1000,
+ (tinfo->ppr_options & MSG_EXT_PPR_DT_REQ) != 0
+ ? " DT" : "", tinfo->offset);
+ }
+
+ if (tinfo->width > 0) {
+ if (freq != 0) {
+ seq_puts(m, ", ");
+ } else {
+ seq_puts(m, " (");
+ }
+ seq_printf(m, "%dbit)", 8 * (0x01 << tinfo->width));
+ } else if (freq != 0) {
+ seq_putc(m, ')');
+ }
+ seq_putc(m, '\n');
+}
+
+static void
+ahc_dump_target_state(struct ahc_softc *ahc, struct seq_file *m,
+ u_int our_id, char channel, u_int target_id,
+ u_int target_offset)
+{
+ struct scsi_target *starget;
+ struct ahc_initiator_tinfo *tinfo;
+ struct ahc_tmode_tstate *tstate;
+ int lun;
+
+ tinfo = ahc_fetch_transinfo(ahc, channel, our_id,
+ target_id, &tstate);
+ if ((ahc->features & AHC_TWIN) != 0)
+ seq_printf(m, "Channel %c ", channel);
+ seq_printf(m, "Target %d Negotiation Settings\n", target_id);
+ seq_puts(m, "\tUser: ");
+ ahc_format_transinfo(m, &tinfo->user);
+ starget = ahc->platform_data->starget[target_offset];
+ if (!starget)
+ return;
+
+ seq_puts(m, "\tGoal: ");
+ ahc_format_transinfo(m, &tinfo->goal);
+ seq_puts(m, "\tCurr: ");
+ ahc_format_transinfo(m, &tinfo->curr);
+
+ for (lun = 0; lun < AHC_NUM_LUNS; lun++) {
+ struct scsi_device *sdev;
+
+ sdev = scsi_device_lookup_by_target(starget, lun);
+
+ if (sdev == NULL)
+ continue;
+
+ ahc_dump_device_state(m, sdev);
+ }
+}
+
+static void
+ahc_dump_device_state(struct seq_file *m, struct scsi_device *sdev)
+{
+ struct ahc_linux_device *dev = scsi_transport_device_data(sdev);
+
+ seq_printf(m, "\tChannel %c Target %d Lun %d Settings\n",
+ sdev->sdev_target->channel + 'A',
+ sdev->sdev_target->id, (u8)sdev->lun);
+
+ seq_printf(m, "\t\tCommands Queued %ld\n", dev->commands_issued);
+ seq_printf(m, "\t\tCommands Active %d\n", dev->active);
+ seq_printf(m, "\t\tCommand Openings %d\n", dev->openings);
+ seq_printf(m, "\t\tMax Tagged Openings %d\n", dev->maxtags);
+ seq_printf(m, "\t\tDevice Queue Frozen Count %d\n", dev->qfrozen);
+}
+
+int
+ahc_proc_write_seeprom(struct Scsi_Host *shost, char *buffer, int length)
+{
+ struct ahc_softc *ahc = *(struct ahc_softc **)shost->hostdata;
+ struct seeprom_descriptor sd;
+ int have_seeprom;
+ u_long s;
+ int paused;
+ int written;
+
+ /* Default to failure. */
+ written = -EINVAL;
+ ahc_lock(ahc, &s);
+ paused = ahc_is_paused(ahc);
+ if (!paused)
+ ahc_pause(ahc);
+
+ if (length != sizeof(struct seeprom_config)) {
+ printk("ahc_proc_write_seeprom: incorrect buffer size\n");
+ goto done;
+ }
+
+ have_seeprom = ahc_verify_cksum((struct seeprom_config*)buffer);
+ if (have_seeprom == 0) {
+ printk("ahc_proc_write_seeprom: cksum verification failed\n");
+ goto done;
+ }
+
+ sd.sd_ahc = ahc;
+#if AHC_PCI_CONFIG > 0
+ if ((ahc->chip & AHC_PCI) != 0) {
+ sd.sd_control_offset = SEECTL;
+ sd.sd_status_offset = SEECTL;
+ sd.sd_dataout_offset = SEECTL;
+ if (ahc->flags & AHC_LARGE_SEEPROM)
+ sd.sd_chip = C56_66;
+ else
+ sd.sd_chip = C46;
+ sd.sd_MS = SEEMS;
+ sd.sd_RDY = SEERDY;
+ sd.sd_CS = SEECS;
+ sd.sd_CK = SEECK;
+ sd.sd_DO = SEEDO;
+ sd.sd_DI = SEEDI;
+ have_seeprom = ahc_acquire_seeprom(ahc, &sd);
+ } else
+#endif
+ if ((ahc->chip & AHC_VL) != 0) {
+ sd.sd_control_offset = SEECTL_2840;
+ sd.sd_status_offset = STATUS_2840;
+ sd.sd_dataout_offset = STATUS_2840;
+ sd.sd_chip = C46;
+ sd.sd_MS = 0;
+ sd.sd_RDY = EEPROM_TF;
+ sd.sd_CS = CS_2840;
+ sd.sd_CK = CK_2840;
+ sd.sd_DO = DO_2840;
+ sd.sd_DI = DI_2840;
+ have_seeprom = TRUE;
+ } else {
+ printk("ahc_proc_write_seeprom: unsupported adapter type\n");
+ goto done;
+ }
+
+ if (!have_seeprom) {
+ printk("ahc_proc_write_seeprom: No Serial EEPROM\n");
+ goto done;
+ } else {
+ u_int start_addr;
+
+ if (ahc->seep_config == NULL) {
+ ahc->seep_config = kmalloc(sizeof(*ahc->seep_config), GFP_ATOMIC);
+ if (ahc->seep_config == NULL) {
+ printk("aic7xxx: Unable to allocate serial "
+ "eeprom buffer. Write failing\n");
+ goto done;
+ }
+ }
+ printk("aic7xxx: Writing Serial EEPROM\n");
+ start_addr = 32 * (ahc->channel - 'A');
+ ahc_write_seeprom(&sd, (u_int16_t *)buffer, start_addr,
+ sizeof(struct seeprom_config)/2);
+ ahc_read_seeprom(&sd, (uint16_t *)ahc->seep_config,
+ start_addr, sizeof(struct seeprom_config)/2);
+#if AHC_PCI_CONFIG > 0
+ if ((ahc->chip & AHC_VL) == 0)
+ ahc_release_seeprom(&sd);
+#endif
+ written = length;
+ }
+
+done:
+ if (!paused)
+ ahc_unpause(ahc);
+ ahc_unlock(ahc, &s);
+ return (written);
+}
+
+/*
+ * Return information to handle /proc support for the driver.
+ */
+int
+ahc_linux_show_info(struct seq_file *m, struct Scsi_Host *shost)
+{
+ struct ahc_softc *ahc = *(struct ahc_softc **)shost->hostdata;
+ char ahc_info[256];
+ u_int max_targ;
+ u_int i;
+
+ seq_printf(m, "Adaptec AIC7xxx driver version: %s\n",
+ AIC7XXX_DRIVER_VERSION);
+ seq_printf(m, "%s\n", ahc->description);
+ ahc_controller_info(ahc, ahc_info);
+ seq_printf(m, "%s\n", ahc_info);
+ seq_printf(m, "Allocated SCBs: %d, SG List Length: %d\n\n",
+ ahc->scb_data->numscbs, AHC_NSEG);
+
+
+ if (ahc->seep_config == NULL)
+ seq_puts(m, "No Serial EEPROM\n");
+ else {
+ seq_puts(m, "Serial EEPROM:\n");
+ for (i = 0; i < sizeof(*ahc->seep_config)/2; i++) {
+ if (((i % 8) == 0) && (i != 0)) {
+ seq_putc(m, '\n');
+ }
+ seq_printf(m, "0x%.4x ",
+ ((uint16_t*)ahc->seep_config)[i]);
+ }
+ seq_putc(m, '\n');
+ }
+ seq_putc(m, '\n');
+
+ max_targ = 16;
+ if ((ahc->features & (AHC_WIDE|AHC_TWIN)) == 0)
+ max_targ = 8;
+
+ for (i = 0; i < max_targ; i++) {
+ u_int our_id;
+ u_int target_id;
+ char channel;
+
+ channel = 'A';
+ our_id = ahc->our_id;
+ target_id = i;
+ if (i > 7 && (ahc->features & AHC_TWIN) != 0) {
+ channel = 'B';
+ our_id = ahc->our_id_b;
+ target_id = i % 8;
+ }
+
+ ahc_dump_target_state(ahc, m, our_id,
+ channel, target_id, i);
+ }
+ return 0;
+}
diff --git a/drivers/scsi/aic7xxx/aic7xxx_reg.h_shipped b/drivers/scsi/aic7xxx/aic7xxx_reg.h_shipped
new file mode 100644
index 000000000..e821082a4
--- /dev/null
+++ b/drivers/scsi/aic7xxx/aic7xxx_reg.h_shipped
@@ -0,0 +1,912 @@
+/*
+ * DO NOT EDIT - This file is automatically generated
+ * from the following source files:
+ *
+ * $Id: //depot/aic7xxx/aic7xxx/aic7xxx.seq#58 $
+ * $Id: //depot/aic7xxx/aic7xxx/aic7xxx.reg#40 $
+ */
+typedef int (ahc_reg_print_t)(u_int, u_int *, u_int);
+typedef struct ahc_reg_parse_entry {
+ char *name;
+ uint8_t value;
+ uint8_t mask;
+} ahc_reg_parse_entry_t;
+
+#if AIC_DEBUG_REGISTERS
+ahc_reg_print_t ahc_scsiseq_print;
+#else
+#define ahc_scsiseq_print(regvalue, cur_col, wrap) \
+ ahc_print_register(NULL, 0, "SCSISEQ", 0x00, regvalue, cur_col, wrap)
+#endif
+
+#if AIC_DEBUG_REGISTERS
+ahc_reg_print_t ahc_sxfrctl0_print;
+#else
+#define ahc_sxfrctl0_print(regvalue, cur_col, wrap) \
+ ahc_print_register(NULL, 0, "SXFRCTL0", 0x01, regvalue, cur_col, wrap)
+#endif
+
+#if AIC_DEBUG_REGISTERS
+ahc_reg_print_t ahc_scsisigi_print;
+#else
+#define ahc_scsisigi_print(regvalue, cur_col, wrap) \
+ ahc_print_register(NULL, 0, "SCSISIGI", 0x03, regvalue, cur_col, wrap)
+#endif
+
+#if AIC_DEBUG_REGISTERS
+ahc_reg_print_t ahc_scsirate_print;
+#else
+#define ahc_scsirate_print(regvalue, cur_col, wrap) \
+ ahc_print_register(NULL, 0, "SCSIRATE", 0x04, regvalue, cur_col, wrap)
+#endif
+
+#if AIC_DEBUG_REGISTERS
+ahc_reg_print_t ahc_sstat0_print;
+#else
+#define ahc_sstat0_print(regvalue, cur_col, wrap) \
+ ahc_print_register(NULL, 0, "SSTAT0", 0x0b, regvalue, cur_col, wrap)
+#endif
+
+#if AIC_DEBUG_REGISTERS
+ahc_reg_print_t ahc_sstat1_print;
+#else
+#define ahc_sstat1_print(regvalue, cur_col, wrap) \
+ ahc_print_register(NULL, 0, "SSTAT1", 0x0c, regvalue, cur_col, wrap)
+#endif
+
+#if AIC_DEBUG_REGISTERS
+ahc_reg_print_t ahc_sstat2_print;
+#else
+#define ahc_sstat2_print(regvalue, cur_col, wrap) \
+ ahc_print_register(NULL, 0, "SSTAT2", 0x0d, regvalue, cur_col, wrap)
+#endif
+
+#if AIC_DEBUG_REGISTERS
+ahc_reg_print_t ahc_sstat3_print;
+#else
+#define ahc_sstat3_print(regvalue, cur_col, wrap) \
+ ahc_print_register(NULL, 0, "SSTAT3", 0x0e, regvalue, cur_col, wrap)
+#endif
+
+#if AIC_DEBUG_REGISTERS
+ahc_reg_print_t ahc_simode0_print;
+#else
+#define ahc_simode0_print(regvalue, cur_col, wrap) \
+ ahc_print_register(NULL, 0, "SIMODE0", 0x10, regvalue, cur_col, wrap)
+#endif
+
+#if AIC_DEBUG_REGISTERS
+ahc_reg_print_t ahc_simode1_print;
+#else
+#define ahc_simode1_print(regvalue, cur_col, wrap) \
+ ahc_print_register(NULL, 0, "SIMODE1", 0x11, regvalue, cur_col, wrap)
+#endif
+
+#if AIC_DEBUG_REGISTERS
+ahc_reg_print_t ahc_scsibusl_print;
+#else
+#define ahc_scsibusl_print(regvalue, cur_col, wrap) \
+ ahc_print_register(NULL, 0, "SCSIBUSL", 0x12, regvalue, cur_col, wrap)
+#endif
+
+#if AIC_DEBUG_REGISTERS
+ahc_reg_print_t ahc_sblkctl_print;
+#else
+#define ahc_sblkctl_print(regvalue, cur_col, wrap) \
+ ahc_print_register(NULL, 0, "SBLKCTL", 0x1f, regvalue, cur_col, wrap)
+#endif
+
+#if AIC_DEBUG_REGISTERS
+ahc_reg_print_t ahc_seq_flags_print;
+#else
+#define ahc_seq_flags_print(regvalue, cur_col, wrap) \
+ ahc_print_register(NULL, 0, "SEQ_FLAGS", 0x3c, regvalue, cur_col, wrap)
+#endif
+
+#if AIC_DEBUG_REGISTERS
+ahc_reg_print_t ahc_lastphase_print;
+#else
+#define ahc_lastphase_print(regvalue, cur_col, wrap) \
+ ahc_print_register(NULL, 0, "LASTPHASE", 0x3f, regvalue, cur_col, wrap)
+#endif
+
+#if AIC_DEBUG_REGISTERS
+ahc_reg_print_t ahc_seqctl_print;
+#else
+#define ahc_seqctl_print(regvalue, cur_col, wrap) \
+ ahc_print_register(NULL, 0, "SEQCTL", 0x60, regvalue, cur_col, wrap)
+#endif
+
+#if AIC_DEBUG_REGISTERS
+ahc_reg_print_t ahc_sram_base_print;
+#else
+#define ahc_sram_base_print(regvalue, cur_col, wrap) \
+ ahc_print_register(NULL, 0, "SRAM_BASE", 0x70, regvalue, cur_col, wrap)
+#endif
+
+#if AIC_DEBUG_REGISTERS
+ahc_reg_print_t ahc_error_print;
+#else
+#define ahc_error_print(regvalue, cur_col, wrap) \
+ ahc_print_register(NULL, 0, "ERROR", 0x92, regvalue, cur_col, wrap)
+#endif
+
+#if AIC_DEBUG_REGISTERS
+ahc_reg_print_t ahc_dfcntrl_print;
+#else
+#define ahc_dfcntrl_print(regvalue, cur_col, wrap) \
+ ahc_print_register(NULL, 0, "DFCNTRL", 0x93, regvalue, cur_col, wrap)
+#endif
+
+#if AIC_DEBUG_REGISTERS
+ahc_reg_print_t ahc_dfstatus_print;
+#else
+#define ahc_dfstatus_print(regvalue, cur_col, wrap) \
+ ahc_print_register(NULL, 0, "DFSTATUS", 0x94, regvalue, cur_col, wrap)
+#endif
+
+#if AIC_DEBUG_REGISTERS
+ahc_reg_print_t ahc_scsiphase_print;
+#else
+#define ahc_scsiphase_print(regvalue, cur_col, wrap) \
+ ahc_print_register(NULL, 0, "SCSIPHASE", 0x9e, regvalue, cur_col, wrap)
+#endif
+
+#if AIC_DEBUG_REGISTERS
+ahc_reg_print_t ahc_scb_base_print;
+#else
+#define ahc_scb_base_print(regvalue, cur_col, wrap) \
+ ahc_print_register(NULL, 0, "SCB_BASE", 0xa0, regvalue, cur_col, wrap)
+#endif
+
+#if AIC_DEBUG_REGISTERS
+ahc_reg_print_t ahc_scb_control_print;
+#else
+#define ahc_scb_control_print(regvalue, cur_col, wrap) \
+ ahc_print_register(NULL, 0, "SCB_CONTROL", 0xb8, regvalue, cur_col, wrap)
+#endif
+
+#if AIC_DEBUG_REGISTERS
+ahc_reg_print_t ahc_scb_scsiid_print;
+#else
+#define ahc_scb_scsiid_print(regvalue, cur_col, wrap) \
+ ahc_print_register(NULL, 0, "SCB_SCSIID", 0xb9, regvalue, cur_col, wrap)
+#endif
+
+#if AIC_DEBUG_REGISTERS
+ahc_reg_print_t ahc_scb_lun_print;
+#else
+#define ahc_scb_lun_print(regvalue, cur_col, wrap) \
+ ahc_print_register(NULL, 0, "SCB_LUN", 0xba, regvalue, cur_col, wrap)
+#endif
+
+#if AIC_DEBUG_REGISTERS
+ahc_reg_print_t ahc_scb_tag_print;
+#else
+#define ahc_scb_tag_print(regvalue, cur_col, wrap) \
+ ahc_print_register(NULL, 0, "SCB_TAG", 0xbb, regvalue, cur_col, wrap)
+#endif
+
+
+#define SCSISEQ 0x00
+#define TEMODE 0x80
+#define SCSIRSTO 0x01
+
+#define SXFRCTL0 0x01
+#define DFON 0x80
+#define DFPEXP 0x40
+#define FAST20 0x20
+#define CLRSTCNT 0x10
+#define SPIOEN 0x08
+#define SCAMEN 0x04
+#define CLRCHN 0x02
+
+#define SXFRCTL1 0x02
+#define STIMESEL 0x18
+#define BITBUCKET 0x80
+#define SWRAPEN 0x40
+#define ENSTIMER 0x04
+#define ACTNEGEN 0x02
+#define STPWEN 0x01
+
+#define SCSISIGO 0x03
+#define CDO 0x80
+#define IOO 0x40
+#define MSGO 0x20
+#define ATNO 0x10
+#define SELO 0x08
+#define BSYO 0x04
+#define REQO 0x02
+#define ACKO 0x01
+
+#define SCSISIGI 0x03
+#define P_DATAIN_DT 0x60
+#define P_DATAOUT_DT 0x20
+#define ATNI 0x10
+#define SELI 0x08
+#define BSYI 0x04
+#define REQI 0x02
+#define ACKI 0x01
+
+#define SCSIRATE 0x04
+#define SXFR 0x70
+#define SOFS 0x0f
+#define SXFR_ULTRA2 0x0f
+#define WIDEXFER 0x80
+#define ENABLE_CRC 0x40
+#define SINGLE_EDGE 0x10
+
+#define SCSIID 0x05
+#define SCSIOFFSET 0x05
+#define SOFS_ULTRA2 0x7f
+
+#define SCSIDATL 0x06
+
+#define SCSIDATH 0x07
+
+#define STCNT 0x08
+
+#define OPTIONMODE 0x08
+#define OPTIONMODE_DEFAULTS 0x03
+#define AUTORATEEN 0x80
+#define AUTOACKEN 0x40
+#define ATNMGMNTEN 0x20
+#define BUSFREEREV 0x10
+#define EXPPHASEDIS 0x08
+#define SCSIDATL_IMGEN 0x04
+#define AUTO_MSGOUT_DE 0x02
+#define DIS_MSGIN_DUALEDGE 0x01
+
+#define TARGCRCCNT 0x0a
+
+#define CLRSINT0 0x0b
+#define CLRSELDO 0x40
+#define CLRSELDI 0x20
+#define CLRSELINGO 0x10
+#define CLRIOERR 0x08
+#define CLRSWRAP 0x08
+#define CLRSPIORDY 0x02
+
+#define SSTAT0 0x0b
+#define TARGET 0x80
+#define SELDO 0x40
+#define SELDI 0x20
+#define SELINGO 0x10
+#define SWRAP 0x08
+#define IOERR 0x08
+#define SDONE 0x04
+#define SPIORDY 0x02
+#define DMADONE 0x01
+
+#define CLRSINT1 0x0c
+#define CLRSELTIMEO 0x80
+#define CLRATNO 0x40
+#define CLRSCSIRSTI 0x20
+#define CLRBUSFREE 0x08
+#define CLRSCSIPERR 0x04
+#define CLRPHASECHG 0x02
+#define CLRREQINIT 0x01
+
+#define SSTAT1 0x0c
+#define SELTO 0x80
+#define ATNTARG 0x40
+#define SCSIRSTI 0x20
+#define PHASEMIS 0x10
+#define BUSFREE 0x08
+#define SCSIPERR 0x04
+#define PHASECHG 0x02
+#define REQINIT 0x01
+
+#define SSTAT2 0x0d
+#define SFCNT 0x1f
+#define OVERRUN 0x80
+#define SHVALID 0x40
+#define EXP_ACTIVE 0x10
+#define CRCVALERR 0x08
+#define CRCENDERR 0x04
+#define CRCREQERR 0x02
+#define DUAL_EDGE_ERR 0x01
+
+#define SSTAT3 0x0e
+#define SCSICNT 0xf0
+#define U2OFFCNT 0x7f
+#define OFFCNT 0x0f
+
+#define SCSIID_ULTRA2 0x0f
+
+#define SIMODE0 0x10
+#define ENSELDO 0x40
+#define ENSELDI 0x20
+#define ENSELINGO 0x10
+#define ENIOERR 0x08
+#define ENSWRAP 0x08
+#define ENSDONE 0x04
+#define ENSPIORDY 0x02
+#define ENDMADONE 0x01
+
+#define SIMODE1 0x11
+#define ENSELTIMO 0x80
+#define ENATNTARG 0x40
+#define ENSCSIRST 0x20
+#define ENPHASEMIS 0x10
+#define ENBUSFREE 0x08
+#define ENSCSIPERR 0x04
+#define ENPHASECHG 0x02
+#define ENREQINIT 0x01
+
+#define SCSIBUSL 0x12
+
+#define SCSIBUSH 0x13
+
+#define SXFRCTL2 0x13
+#define ASYNC_SETUP 0x07
+#define AUTORSTDIS 0x10
+#define CMDDMAEN 0x08
+
+#define SHADDR 0x14
+
+#define SELTIMER 0x18
+#define TARGIDIN 0x18
+#define STAGE6 0x20
+#define STAGE5 0x10
+#define STAGE4 0x08
+#define STAGE3 0x04
+#define STAGE2 0x02
+#define STAGE1 0x01
+
+#define SELID 0x19
+#define SELID_MASK 0xf0
+#define ONEBIT 0x08
+
+#define SCAMCTL 0x1a
+#define SCAMLVL 0x03
+#define ENSCAMSELO 0x80
+#define CLRSCAMSELID 0x40
+#define ALTSTIM 0x20
+#define DFLTTID 0x10
+
+#define TARGID 0x1b
+
+#define SPIOCAP 0x1b
+#define SOFT1 0x80
+#define SOFT0 0x40
+#define SOFTCMDEN 0x20
+#define EXT_BRDCTL 0x10
+#define SEEPROM 0x08
+#define EEPROM 0x04
+#define ROM 0x02
+#define SSPIOCPS 0x01
+
+#define BRDCTL 0x1d
+#define BRDDAT7 0x80
+#define BRDDAT6 0x40
+#define BRDDAT5 0x20
+#define BRDDAT4 0x10
+#define BRDSTB 0x10
+#define BRDDAT3 0x08
+#define BRDCS 0x08
+#define BRDDAT2 0x04
+#define BRDRW 0x04
+#define BRDRW_ULTRA2 0x02
+#define BRDCTL1 0x02
+#define BRDCTL0 0x01
+#define BRDSTB_ULTRA2 0x01
+
+#define SEECTL 0x1e
+#define EXTARBACK 0x80
+#define EXTARBREQ 0x40
+#define SEEMS 0x20
+#define SEERDY 0x10
+#define SEECS 0x08
+#define SEECK 0x04
+#define SEEDO 0x02
+#define SEEDI 0x01
+
+#define SBLKCTL 0x1f
+#define DIAGLEDEN 0x80
+#define DIAGLEDON 0x40
+#define AUTOFLUSHDIS 0x20
+#define ENAB40 0x08
+#define SELBUSB 0x08
+#define ENAB20 0x04
+#define SELWIDE 0x02
+#define XCVR 0x01
+
+#define BUSY_TARGETS 0x20
+#define TARG_SCSIRATE 0x20
+
+#define ULTRA_ENB 0x30
+#define CMDSIZE_TABLE 0x30
+
+#define DISC_DSB 0x32
+
+#define CMDSIZE_TABLE_TAIL 0x34
+
+#define MWI_RESIDUAL 0x38
+
+#define NEXT_QUEUED_SCB 0x39
+
+#define MSG_OUT 0x3a
+
+#define DMAPARAMS 0x3b
+#define PRELOADEN 0x80
+#define WIDEODD 0x40
+#define SCSIEN 0x20
+#define SDMAEN 0x10
+#define SDMAENACK 0x10
+#define HDMAEN 0x08
+#define HDMAENACK 0x08
+#define DIRECTION 0x04
+#define FIFOFLUSH 0x02
+#define FIFORESET 0x01
+
+#define SEQ_FLAGS 0x3c
+#define NOT_IDENTIFIED 0x80
+#define NO_CDB_SENT 0x40
+#define TARGET_CMD_IS_TAGGED 0x40
+#define DPHASE 0x20
+#define TARG_CMD_PENDING 0x10
+#define CMDPHASE_PENDING 0x08
+#define DPHASE_PENDING 0x04
+#define SPHASE_PENDING 0x02
+#define NO_DISCONNECT 0x01
+
+#define SAVED_SCSIID 0x3d
+
+#define SAVED_LUN 0x3e
+
+#define LASTPHASE 0x3f
+#define P_MESGIN 0xe0
+#define PHASE_MASK 0xe0
+#define P_STATUS 0xc0
+#define P_MESGOUT 0xa0
+#define P_COMMAND 0x80
+#define P_DATAIN 0x40
+#define P_BUSFREE 0x01
+#define P_DATAOUT 0x00
+#define CDI 0x80
+#define IOI 0x40
+#define MSGI 0x20
+
+#define WAITING_SCBH 0x40
+
+#define DISCONNECTED_SCBH 0x41
+
+#define FREE_SCBH 0x42
+
+#define COMPLETE_SCBH 0x43
+
+#define HSCB_ADDR 0x44
+
+#define SHARED_DATA_ADDR 0x48
+
+#define KERNEL_QINPOS 0x4c
+
+#define QINPOS 0x4d
+
+#define QOUTPOS 0x4e
+
+#define KERNEL_TQINPOS 0x4f
+
+#define TQINPOS 0x50
+
+#define ARG_1 0x51
+#define RETURN_1 0x51
+#define SEND_MSG 0x80
+#define SEND_SENSE 0x40
+#define SEND_REJ 0x20
+#define MSGOUT_PHASEMIS 0x10
+#define EXIT_MSG_LOOP 0x08
+#define CONT_MSG_LOOP 0x04
+#define CONT_TARG_SESSION 0x02
+
+#define ARG_2 0x52
+#define RETURN_2 0x52
+
+#define LAST_MSG 0x53
+#define TARG_IMMEDIATE_SCB 0x53
+
+#define SCSISEQ_TEMPLATE 0x54
+#define ENSELO 0x40
+#define ENSELI 0x20
+#define ENRSELI 0x10
+#define ENAUTOATNO 0x08
+#define ENAUTOATNI 0x04
+#define ENAUTOATNP 0x02
+
+#define HA_274_BIOSGLOBAL 0x56
+#define INITIATOR_TAG 0x56
+#define HA_274_EXTENDED_TRANS 0x01
+
+#define SEQ_FLAGS2 0x57
+#define TARGET_MSG_PENDING 0x02
+#define SCB_DMA 0x01
+
+#define SCSICONF 0x5a
+#define HWSCSIID 0x0f
+#define HSCSIID 0x07
+#define TERM_ENB 0x80
+#define RESET_SCSI 0x40
+#define ENSPCHK 0x20
+
+#define INTDEF 0x5c
+#define VECTOR 0x0f
+#define EDGE_TRIG 0x80
+
+#define HOSTCONF 0x5d
+
+#define HA_274_BIOSCTRL 0x5f
+#define BIOSDISABLED 0x30
+#define BIOSMODE 0x30
+#define CHANNEL_B_PRIMARY 0x08
+
+#define SEQCTL 0x60
+#define PERRORDIS 0x80
+#define PAUSEDIS 0x40
+#define FAILDIS 0x20
+#define FASTMODE 0x10
+#define BRKADRINTEN 0x08
+#define STEP 0x04
+#define SEQRESET 0x02
+#define LOADRAM 0x01
+
+#define SEQRAM 0x61
+
+#define SEQADDR0 0x62
+
+#define SEQADDR1 0x63
+#define SEQADDR1_MASK 0x01
+
+#define ACCUM 0x64
+
+#define SINDEX 0x65
+
+#define DINDEX 0x66
+
+#define ALLONES 0x69
+
+#define ALLZEROS 0x6a
+
+#define NONE 0x6a
+
+#define FLAGS 0x6b
+#define ZERO 0x02
+#define CARRY 0x01
+
+#define SINDIR 0x6c
+
+#define DINDIR 0x6d
+
+#define FUNCTION1 0x6e
+
+#define STACK 0x6f
+
+#define TARG_OFFSET 0x70
+
+#define SRAM_BASE 0x70
+
+#define BCTL 0x84
+#define ACE 0x08
+#define ENABLE 0x01
+
+#define DSCOMMAND0 0x84
+#define CACHETHEN 0x80
+#define DPARCKEN 0x40
+#define MPARCKEN 0x20
+#define EXTREQLCK 0x10
+#define INTSCBRAMSEL 0x08
+#define RAMPS 0x04
+#define USCBSIZE32 0x02
+#define CIOPARCKEN 0x01
+
+#define BUSTIME 0x85
+#define BOFF 0xf0
+#define BON 0x0f
+
+#define DSCOMMAND1 0x85
+#define DSLATT 0xfc
+#define HADDLDSEL1 0x02
+#define HADDLDSEL0 0x01
+
+#define BUSSPD 0x86
+#define DFTHRSH 0xc0
+#define DFTHRSH_75 0x80
+#define STBOFF 0x38
+#define STBON 0x07
+
+#define HS_MAILBOX 0x86
+#define HOST_MAILBOX 0xf0
+#define HOST_TQINPOS 0x80
+#define SEQ_MAILBOX 0x0f
+
+#define DSPCISTATUS 0x86
+#define DFTHRSH_100 0xc0
+
+#define HCNTRL 0x87
+#define POWRDN 0x40
+#define SWINT 0x10
+#define IRQMS 0x08
+#define PAUSE 0x04
+#define INTEN 0x02
+#define CHIPRST 0x01
+#define CHIPRSTACK 0x01
+
+#define HADDR 0x88
+
+#define HCNT 0x8c
+
+#define SCBPTR 0x90
+
+#define INTSTAT 0x91
+#define SEQINT_MASK 0xf1
+#define OUT_OF_RANGE 0xe1
+#define NO_FREE_SCB 0xd1
+#define SCB_MISMATCH 0xc1
+#define MISSED_BUSFREE 0xb1
+#define MKMSG_FAILED 0xa1
+#define DATA_OVERRUN 0x91
+#define PERR_DETECTED 0x81
+#define BAD_STATUS 0x71
+#define HOST_MSG_LOOP 0x61
+#define PDATA_REINIT 0x51
+#define IGN_WIDE_RES 0x41
+#define NO_MATCH 0x31
+#define PROTO_VIOLATION 0x21
+#define SEND_REJECT 0x11
+#define INT_PEND 0x0f
+#define BAD_PHASE 0x01
+#define BRKADRINT 0x08
+#define SCSIINT 0x04
+#define CMDCMPLT 0x02
+#define SEQINT 0x01
+
+#define CLRINT 0x92
+#define CLRPARERR 0x10
+#define CLRBRKADRINT 0x08
+#define CLRSCSIINT 0x04
+#define CLRCMDINT 0x02
+#define CLRSEQINT 0x01
+
+#define ERROR 0x92
+#define CIOPARERR 0x80
+#define PCIERRSTAT 0x40
+#define MPARERR 0x20
+#define DPARERR 0x10
+#define SQPARERR 0x08
+#define ILLOPCODE 0x04
+#define ILLSADDR 0x02
+#define ILLHADDR 0x01
+
+#define DFCNTRL 0x93
+
+#define DFSTATUS 0x94
+#define PRELOAD_AVAIL 0x80
+#define DFCACHETH 0x40
+#define FIFOQWDEMP 0x20
+#define MREQPEND 0x10
+#define HDONE 0x08
+#define DFTHRESH 0x04
+#define FIFOFULL 0x02
+#define FIFOEMP 0x01
+
+#define DFWADDR 0x95
+
+#define DFRADDR 0x97
+
+#define DFDAT 0x99
+
+#define SCBCNT 0x9a
+#define SCBCNT_MASK 0x1f
+#define SCBAUTO 0x80
+
+#define QINFIFO 0x9b
+
+#define QINCNT 0x9c
+
+#define QOUTFIFO 0x9d
+
+#define CRCCONTROL1 0x9d
+#define CRCONSEEN 0x80
+#define CRCVALCHKEN 0x40
+#define CRCENDCHKEN 0x20
+#define CRCREQCHKEN 0x10
+#define TARGCRCENDEN 0x08
+#define TARGCRCCNTEN 0x04
+
+#define QOUTCNT 0x9e
+
+#define SCSIPHASE 0x9e
+#define DATA_PHASE_MASK 0x03
+#define STATUS_PHASE 0x20
+#define COMMAND_PHASE 0x10
+#define MSG_IN_PHASE 0x08
+#define MSG_OUT_PHASE 0x04
+#define DATA_IN_PHASE 0x02
+#define DATA_OUT_PHASE 0x01
+
+#define SFUNCT 0x9f
+#define ALT_MODE 0x80
+
+#define SCB_BASE 0xa0
+
+#define SCB_CDB_PTR 0xa0
+#define SCB_RESIDUAL_DATACNT 0xa0
+#define SCB_CDB_STORE 0xa0
+
+#define SCB_RESIDUAL_SGPTR 0xa4
+
+#define SCB_SCSI_STATUS 0xa8
+
+#define SCB_TARGET_PHASES 0xa9
+
+#define SCB_TARGET_DATA_DIR 0xaa
+
+#define SCB_TARGET_ITAG 0xab
+
+#define SCB_DATAPTR 0xac
+
+#define SCB_DATACNT 0xb0
+#define SG_HIGH_ADDR_BITS 0x7f
+#define SG_LAST_SEG 0x80
+
+#define SCB_SGPTR 0xb4
+#define SG_RESID_VALID 0x04
+#define SG_FULL_RESID 0x02
+#define SG_LIST_NULL 0x01
+
+#define SCB_CONTROL 0xb8
+#define SCB_TAG_TYPE 0x03
+#define STATUS_RCVD 0x80
+#define TARGET_SCB 0x80
+#define DISCENB 0x40
+#define TAG_ENB 0x20
+#define MK_MESSAGE 0x10
+#define ULTRAENB 0x08
+#define DISCONNECTED 0x04
+
+#define SCB_SCSIID 0xb9
+#define TID 0xf0
+#define TWIN_TID 0x70
+#define OID 0x0f
+#define TWIN_CHNLB 0x80
+
+#define SCB_LUN 0xba
+#define LID 0x3f
+#define SCB_XFERLEN_ODD 0x80
+
+#define SCB_TAG 0xbb
+
+#define SCB_CDB_LEN 0xbc
+
+#define SCB_SCSIRATE 0xbd
+
+#define SCB_SCSIOFFSET 0xbe
+
+#define SCB_NEXT 0xbf
+
+#define SCB_64_SPARE 0xc0
+
+#define SEECTL_2840 0xc0
+#define CS_2840 0x04
+#define CK_2840 0x02
+#define DO_2840 0x01
+
+#define STATUS_2840 0xc1
+#define BIOS_SEL 0x60
+#define ADSEL 0x1e
+#define EEPROM_TF 0x80
+#define DI_2840 0x01
+
+#define SCB_64_BTT 0xd0
+
+#define CCHADDR 0xe0
+
+#define CCHCNT 0xe8
+
+#define CCSGRAM 0xe9
+
+#define CCSGADDR 0xea
+
+#define CCSGCTL 0xeb
+#define CCSGDONE 0x80
+#define CCSGEN 0x08
+#define SG_FETCH_NEEDED 0x02
+#define CCSGRESET 0x01
+
+#define CCSCBRAM 0xec
+
+#define CCSCBADDR 0xed
+
+#define CCSCBCTL 0xee
+#define CCSCBDONE 0x80
+#define ARRDONE 0x40
+#define CCARREN 0x10
+#define CCSCBEN 0x08
+#define CCSCBDIR 0x04
+#define CCSCBRESET 0x01
+
+#define CCSCBCNT 0xef
+
+#define SCBBADDR 0xf0
+
+#define CCSCBPTR 0xf1
+
+#define HNSCB_QOFF 0xf4
+
+#define SNSCB_QOFF 0xf6
+
+#define SDSCB_QOFF 0xf8
+
+#define QOFF_CTLSTA 0xfa
+#define SCB_QSIZE 0x07
+#define SCB_QSIZE_256 0x06
+#define SCB_AVAIL 0x40
+#define SNSCB_ROLLOVER 0x20
+#define SDSCB_ROLLOVER 0x10
+
+#define DFF_THRSH 0xfb
+#define WR_DFTHRSH 0x70
+#define WR_DFTHRSH_MAX 0x70
+#define WR_DFTHRSH_90 0x60
+#define WR_DFTHRSH_85 0x50
+#define WR_DFTHRSH_75 0x40
+#define WR_DFTHRSH_63 0x30
+#define WR_DFTHRSH_50 0x20
+#define WR_DFTHRSH_25 0x10
+#define RD_DFTHRSH 0x07
+#define RD_DFTHRSH_MAX 0x07
+#define RD_DFTHRSH_90 0x06
+#define RD_DFTHRSH_85 0x05
+#define RD_DFTHRSH_75 0x04
+#define RD_DFTHRSH_63 0x03
+#define RD_DFTHRSH_50 0x02
+#define RD_DFTHRSH_25 0x01
+#define RD_DFTHRSH_MIN 0x00
+#define WR_DFTHRSH_MIN 0x00
+
+#define SG_CACHE_SHADOW 0xfc
+#define SG_ADDR_MASK 0xf8
+#define LAST_SEG 0x02
+#define LAST_SEG_DONE 0x01
+
+#define SG_CACHE_PRE 0xfc
+
+
+#define MAX_OFFSET_ULTRA2 0x7f
+#define MAX_OFFSET_16BIT 0x08
+#define BUS_8_BIT 0x00
+#define TARGET_CMD_CMPLT 0xfe
+#define STATUS_QUEUE_FULL 0x28
+#define STATUS_BUSY 0x08
+#define MAX_OFFSET_8BIT 0x0f
+#define BUS_32_BIT 0x02
+#define CCSGADDR_MAX 0x80
+#define TID_SHIFT 0x04
+#define SCB_DOWNLOAD_SIZE_64 0x30
+#define HOST_MAILBOX_SHIFT 0x04
+#define CMD_GROUP_CODE_SHIFT 0x05
+#define CCSGRAM_MAXSEGS 0x10
+#define SCB_LIST_NULL 0xff
+#define SG_SIZEOF 0x08
+#define SCB_DOWNLOAD_SIZE 0x20
+#define SEQ_MAILBOX_SHIFT 0x00
+#define TARGET_DATA_IN 0x01
+#define HOST_MSG 0xff
+#define MAX_OFFSET 0x7f
+#define BUS_16_BIT 0x01
+#define SCB_UPLOAD_SIZE 0x20
+#define STACK_SIZE 0x04
+
+
+/* Downloaded Constant Definitions */
+#define INVERTED_CACHESIZE_MASK 0x03
+#define SG_PREFETCH_ADDR_MASK 0x06
+#define SG_PREFETCH_ALIGN_MASK 0x05
+#define QOUTFIFO_OFFSET 0x00
+#define SG_PREFETCH_CNT 0x04
+#define CACHESIZE_MASK 0x02
+#define QINFIFO_OFFSET 0x01
+#define DOWNLOAD_CONST_COUNT 0x07
+
+
+/* Exported Labels */
diff --git a/drivers/scsi/aic7xxx/aic7xxx_reg_print.c_shipped b/drivers/scsi/aic7xxx/aic7xxx_reg_print.c_shipped
new file mode 100644
index 000000000..9f9b88047
--- /dev/null
+++ b/drivers/scsi/aic7xxx/aic7xxx_reg_print.c_shipped
@@ -0,0 +1,413 @@
+/*
+ * DO NOT EDIT - This file is automatically generated
+ * from the following source files:
+ *
+ * $Id: //depot/aic7xxx/aic7xxx/aic7xxx.seq#58 $
+ * $Id: //depot/aic7xxx/aic7xxx/aic7xxx.reg#40 $
+ */
+
+#include "aic7xxx_osm.h"
+
+static const ahc_reg_parse_entry_t SCSISEQ_parse_table[] = {
+ { "SCSIRSTO", 0x01, 0x01 },
+ { "ENAUTOATNP", 0x02, 0x02 },
+ { "ENAUTOATNI", 0x04, 0x04 },
+ { "ENAUTOATNO", 0x08, 0x08 },
+ { "ENRSELI", 0x10, 0x10 },
+ { "ENSELI", 0x20, 0x20 },
+ { "ENSELO", 0x40, 0x40 },
+ { "TEMODE", 0x80, 0x80 }
+};
+
+int
+ahc_scsiseq_print(u_int regvalue, u_int *cur_col, u_int wrap)
+{
+ return (ahc_print_register(SCSISEQ_parse_table, 8, "SCSISEQ",
+ 0x00, regvalue, cur_col, wrap));
+}
+
+static const ahc_reg_parse_entry_t SXFRCTL0_parse_table[] = {
+ { "CLRCHN", 0x02, 0x02 },
+ { "SCAMEN", 0x04, 0x04 },
+ { "SPIOEN", 0x08, 0x08 },
+ { "CLRSTCNT", 0x10, 0x10 },
+ { "FAST20", 0x20, 0x20 },
+ { "DFPEXP", 0x40, 0x40 },
+ { "DFON", 0x80, 0x80 }
+};
+
+int
+ahc_sxfrctl0_print(u_int regvalue, u_int *cur_col, u_int wrap)
+{
+ return (ahc_print_register(SXFRCTL0_parse_table, 7, "SXFRCTL0",
+ 0x01, regvalue, cur_col, wrap));
+}
+
+static const ahc_reg_parse_entry_t SCSISIGI_parse_table[] = {
+ { "ACKI", 0x01, 0x01 },
+ { "REQI", 0x02, 0x02 },
+ { "BSYI", 0x04, 0x04 },
+ { "SELI", 0x08, 0x08 },
+ { "ATNI", 0x10, 0x10 },
+ { "MSGI", 0x20, 0x20 },
+ { "IOI", 0x40, 0x40 },
+ { "CDI", 0x80, 0x80 },
+ { "P_DATAOUT", 0x00, 0x00 },
+ { "P_DATAOUT_DT", 0x20, 0x20 },
+ { "P_DATAIN", 0x40, 0x40 },
+ { "P_DATAIN_DT", 0x60, 0x60 },
+ { "P_COMMAND", 0x80, 0x80 },
+ { "P_MESGOUT", 0xa0, 0xa0 },
+ { "P_STATUS", 0xc0, 0xc0 },
+ { "PHASE_MASK", 0xe0, 0xe0 },
+ { "P_MESGIN", 0xe0, 0xe0 }
+};
+
+int
+ahc_scsisigi_print(u_int regvalue, u_int *cur_col, u_int wrap)
+{
+ return (ahc_print_register(SCSISIGI_parse_table, 17, "SCSISIGI",
+ 0x03, regvalue, cur_col, wrap));
+}
+
+static const ahc_reg_parse_entry_t SCSIRATE_parse_table[] = {
+ { "SINGLE_EDGE", 0x10, 0x10 },
+ { "ENABLE_CRC", 0x40, 0x40 },
+ { "WIDEXFER", 0x80, 0x80 },
+ { "SXFR_ULTRA2", 0x0f, 0x0f },
+ { "SOFS", 0x0f, 0x0f },
+ { "SXFR", 0x70, 0x70 }
+};
+
+int
+ahc_scsirate_print(u_int regvalue, u_int *cur_col, u_int wrap)
+{
+ return (ahc_print_register(SCSIRATE_parse_table, 6, "SCSIRATE",
+ 0x04, regvalue, cur_col, wrap));
+}
+
+static const ahc_reg_parse_entry_t SSTAT0_parse_table[] = {
+ { "DMADONE", 0x01, 0x01 },
+ { "SPIORDY", 0x02, 0x02 },
+ { "SDONE", 0x04, 0x04 },
+ { "SWRAP", 0x08, 0x08 },
+ { "IOERR", 0x08, 0x08 },
+ { "SELINGO", 0x10, 0x10 },
+ { "SELDI", 0x20, 0x20 },
+ { "SELDO", 0x40, 0x40 },
+ { "TARGET", 0x80, 0x80 }
+};
+
+int
+ahc_sstat0_print(u_int regvalue, u_int *cur_col, u_int wrap)
+{
+ return (ahc_print_register(SSTAT0_parse_table, 9, "SSTAT0",
+ 0x0b, regvalue, cur_col, wrap));
+}
+
+static const ahc_reg_parse_entry_t SSTAT1_parse_table[] = {
+ { "REQINIT", 0x01, 0x01 },
+ { "PHASECHG", 0x02, 0x02 },
+ { "SCSIPERR", 0x04, 0x04 },
+ { "BUSFREE", 0x08, 0x08 },
+ { "PHASEMIS", 0x10, 0x10 },
+ { "SCSIRSTI", 0x20, 0x20 },
+ { "ATNTARG", 0x40, 0x40 },
+ { "SELTO", 0x80, 0x80 }
+};
+
+int
+ahc_sstat1_print(u_int regvalue, u_int *cur_col, u_int wrap)
+{
+ return (ahc_print_register(SSTAT1_parse_table, 8, "SSTAT1",
+ 0x0c, regvalue, cur_col, wrap));
+}
+
+static const ahc_reg_parse_entry_t SSTAT2_parse_table[] = {
+ { "DUAL_EDGE_ERR", 0x01, 0x01 },
+ { "CRCREQERR", 0x02, 0x02 },
+ { "CRCENDERR", 0x04, 0x04 },
+ { "CRCVALERR", 0x08, 0x08 },
+ { "EXP_ACTIVE", 0x10, 0x10 },
+ { "SHVALID", 0x40, 0x40 },
+ { "OVERRUN", 0x80, 0x80 },
+ { "SFCNT", 0x1f, 0x1f }
+};
+
+int
+ahc_sstat2_print(u_int regvalue, u_int *cur_col, u_int wrap)
+{
+ return (ahc_print_register(SSTAT2_parse_table, 8, "SSTAT2",
+ 0x0d, regvalue, cur_col, wrap));
+}
+
+static const ahc_reg_parse_entry_t SSTAT3_parse_table[] = {
+ { "OFFCNT", 0x0f, 0x0f },
+ { "U2OFFCNT", 0x7f, 0x7f },
+ { "SCSICNT", 0xf0, 0xf0 }
+};
+
+int
+ahc_sstat3_print(u_int regvalue, u_int *cur_col, u_int wrap)
+{
+ return (ahc_print_register(SSTAT3_parse_table, 3, "SSTAT3",
+ 0x0e, regvalue, cur_col, wrap));
+}
+
+static const ahc_reg_parse_entry_t SIMODE0_parse_table[] = {
+ { "ENDMADONE", 0x01, 0x01 },
+ { "ENSPIORDY", 0x02, 0x02 },
+ { "ENSDONE", 0x04, 0x04 },
+ { "ENSWRAP", 0x08, 0x08 },
+ { "ENIOERR", 0x08, 0x08 },
+ { "ENSELINGO", 0x10, 0x10 },
+ { "ENSELDI", 0x20, 0x20 },
+ { "ENSELDO", 0x40, 0x40 }
+};
+
+int
+ahc_simode0_print(u_int regvalue, u_int *cur_col, u_int wrap)
+{
+ return (ahc_print_register(SIMODE0_parse_table, 8, "SIMODE0",
+ 0x10, regvalue, cur_col, wrap));
+}
+
+static const ahc_reg_parse_entry_t SIMODE1_parse_table[] = {
+ { "ENREQINIT", 0x01, 0x01 },
+ { "ENPHASECHG", 0x02, 0x02 },
+ { "ENSCSIPERR", 0x04, 0x04 },
+ { "ENBUSFREE", 0x08, 0x08 },
+ { "ENPHASEMIS", 0x10, 0x10 },
+ { "ENSCSIRST", 0x20, 0x20 },
+ { "ENATNTARG", 0x40, 0x40 },
+ { "ENSELTIMO", 0x80, 0x80 }
+};
+
+int
+ahc_simode1_print(u_int regvalue, u_int *cur_col, u_int wrap)
+{
+ return (ahc_print_register(SIMODE1_parse_table, 8, "SIMODE1",
+ 0x11, regvalue, cur_col, wrap));
+}
+
+int
+ahc_scsibusl_print(u_int regvalue, u_int *cur_col, u_int wrap)
+{
+ return (ahc_print_register(NULL, 0, "SCSIBUSL",
+ 0x12, regvalue, cur_col, wrap));
+}
+
+static const ahc_reg_parse_entry_t SBLKCTL_parse_table[] = {
+ { "XCVR", 0x01, 0x01 },
+ { "SELWIDE", 0x02, 0x02 },
+ { "ENAB20", 0x04, 0x04 },
+ { "SELBUSB", 0x08, 0x08 },
+ { "ENAB40", 0x08, 0x08 },
+ { "AUTOFLUSHDIS", 0x20, 0x20 },
+ { "DIAGLEDON", 0x40, 0x40 },
+ { "DIAGLEDEN", 0x80, 0x80 }
+};
+
+int
+ahc_sblkctl_print(u_int regvalue, u_int *cur_col, u_int wrap)
+{
+ return (ahc_print_register(SBLKCTL_parse_table, 8, "SBLKCTL",
+ 0x1f, regvalue, cur_col, wrap));
+}
+
+static const ahc_reg_parse_entry_t SEQ_FLAGS_parse_table[] = {
+ { "NO_DISCONNECT", 0x01, 0x01 },
+ { "SPHASE_PENDING", 0x02, 0x02 },
+ { "DPHASE_PENDING", 0x04, 0x04 },
+ { "CMDPHASE_PENDING", 0x08, 0x08 },
+ { "TARG_CMD_PENDING", 0x10, 0x10 },
+ { "DPHASE", 0x20, 0x20 },
+ { "NO_CDB_SENT", 0x40, 0x40 },
+ { "TARGET_CMD_IS_TAGGED",0x40, 0x40 },
+ { "NOT_IDENTIFIED", 0x80, 0x80 }
+};
+
+int
+ahc_seq_flags_print(u_int regvalue, u_int *cur_col, u_int wrap)
+{
+ return (ahc_print_register(SEQ_FLAGS_parse_table, 9, "SEQ_FLAGS",
+ 0x3c, regvalue, cur_col, wrap));
+}
+
+static const ahc_reg_parse_entry_t LASTPHASE_parse_table[] = {
+ { "MSGI", 0x20, 0x20 },
+ { "IOI", 0x40, 0x40 },
+ { "CDI", 0x80, 0x80 },
+ { "P_DATAOUT", 0x00, 0x00 },
+ { "P_BUSFREE", 0x01, 0x01 },
+ { "P_DATAIN", 0x40, 0x40 },
+ { "P_COMMAND", 0x80, 0x80 },
+ { "P_MESGOUT", 0xa0, 0xa0 },
+ { "P_STATUS", 0xc0, 0xc0 },
+ { "PHASE_MASK", 0xe0, 0xe0 },
+ { "P_MESGIN", 0xe0, 0xe0 }
+};
+
+int
+ahc_lastphase_print(u_int regvalue, u_int *cur_col, u_int wrap)
+{
+ return (ahc_print_register(LASTPHASE_parse_table, 11, "LASTPHASE",
+ 0x3f, regvalue, cur_col, wrap));
+}
+
+static const ahc_reg_parse_entry_t SEQCTL_parse_table[] = {
+ { "LOADRAM", 0x01, 0x01 },
+ { "SEQRESET", 0x02, 0x02 },
+ { "STEP", 0x04, 0x04 },
+ { "BRKADRINTEN", 0x08, 0x08 },
+ { "FASTMODE", 0x10, 0x10 },
+ { "FAILDIS", 0x20, 0x20 },
+ { "PAUSEDIS", 0x40, 0x40 },
+ { "PERRORDIS", 0x80, 0x80 }
+};
+
+int
+ahc_seqctl_print(u_int regvalue, u_int *cur_col, u_int wrap)
+{
+ return (ahc_print_register(SEQCTL_parse_table, 8, "SEQCTL",
+ 0x60, regvalue, cur_col, wrap));
+}
+
+int
+ahc_sram_base_print(u_int regvalue, u_int *cur_col, u_int wrap)
+{
+ return (ahc_print_register(NULL, 0, "SRAM_BASE",
+ 0x70, regvalue, cur_col, wrap));
+}
+
+static const ahc_reg_parse_entry_t ERROR_parse_table[] = {
+ { "ILLHADDR", 0x01, 0x01 },
+ { "ILLSADDR", 0x02, 0x02 },
+ { "ILLOPCODE", 0x04, 0x04 },
+ { "SQPARERR", 0x08, 0x08 },
+ { "DPARERR", 0x10, 0x10 },
+ { "MPARERR", 0x20, 0x20 },
+ { "PCIERRSTAT", 0x40, 0x40 },
+ { "CIOPARERR", 0x80, 0x80 }
+};
+
+int
+ahc_error_print(u_int regvalue, u_int *cur_col, u_int wrap)
+{
+ return (ahc_print_register(ERROR_parse_table, 8, "ERROR",
+ 0x92, regvalue, cur_col, wrap));
+}
+
+static const ahc_reg_parse_entry_t DFCNTRL_parse_table[] = {
+ { "FIFORESET", 0x01, 0x01 },
+ { "FIFOFLUSH", 0x02, 0x02 },
+ { "DIRECTION", 0x04, 0x04 },
+ { "HDMAEN", 0x08, 0x08 },
+ { "HDMAENACK", 0x08, 0x08 },
+ { "SDMAEN", 0x10, 0x10 },
+ { "SDMAENACK", 0x10, 0x10 },
+ { "SCSIEN", 0x20, 0x20 },
+ { "WIDEODD", 0x40, 0x40 },
+ { "PRELOADEN", 0x80, 0x80 }
+};
+
+int
+ahc_dfcntrl_print(u_int regvalue, u_int *cur_col, u_int wrap)
+{
+ return (ahc_print_register(DFCNTRL_parse_table, 10, "DFCNTRL",
+ 0x93, regvalue, cur_col, wrap));
+}
+
+static const ahc_reg_parse_entry_t DFSTATUS_parse_table[] = {
+ { "FIFOEMP", 0x01, 0x01 },
+ { "FIFOFULL", 0x02, 0x02 },
+ { "DFTHRESH", 0x04, 0x04 },
+ { "HDONE", 0x08, 0x08 },
+ { "MREQPEND", 0x10, 0x10 },
+ { "FIFOQWDEMP", 0x20, 0x20 },
+ { "DFCACHETH", 0x40, 0x40 },
+ { "PRELOAD_AVAIL", 0x80, 0x80 }
+};
+
+int
+ahc_dfstatus_print(u_int regvalue, u_int *cur_col, u_int wrap)
+{
+ return (ahc_print_register(DFSTATUS_parse_table, 8, "DFSTATUS",
+ 0x94, regvalue, cur_col, wrap));
+}
+
+static const ahc_reg_parse_entry_t SCSIPHASE_parse_table[] = {
+ { "DATA_OUT_PHASE", 0x01, 0x01 },
+ { "DATA_IN_PHASE", 0x02, 0x02 },
+ { "MSG_OUT_PHASE", 0x04, 0x04 },
+ { "MSG_IN_PHASE", 0x08, 0x08 },
+ { "COMMAND_PHASE", 0x10, 0x10 },
+ { "STATUS_PHASE", 0x20, 0x20 },
+ { "DATA_PHASE_MASK", 0x03, 0x03 }
+};
+
+int
+ahc_scsiphase_print(u_int regvalue, u_int *cur_col, u_int wrap)
+{
+ return (ahc_print_register(SCSIPHASE_parse_table, 7, "SCSIPHASE",
+ 0x9e, regvalue, cur_col, wrap));
+}
+
+int
+ahc_scb_base_print(u_int regvalue, u_int *cur_col, u_int wrap)
+{
+ return (ahc_print_register(NULL, 0, "SCB_BASE",
+ 0xa0, regvalue, cur_col, wrap));
+}
+
+static const ahc_reg_parse_entry_t SCB_CONTROL_parse_table[] = {
+ { "DISCONNECTED", 0x04, 0x04 },
+ { "ULTRAENB", 0x08, 0x08 },
+ { "MK_MESSAGE", 0x10, 0x10 },
+ { "TAG_ENB", 0x20, 0x20 },
+ { "DISCENB", 0x40, 0x40 },
+ { "TARGET_SCB", 0x80, 0x80 },
+ { "STATUS_RCVD", 0x80, 0x80 },
+ { "SCB_TAG_TYPE", 0x03, 0x03 }
+};
+
+int
+ahc_scb_control_print(u_int regvalue, u_int *cur_col, u_int wrap)
+{
+ return (ahc_print_register(SCB_CONTROL_parse_table, 8, "SCB_CONTROL",
+ 0xb8, regvalue, cur_col, wrap));
+}
+
+static const ahc_reg_parse_entry_t SCB_SCSIID_parse_table[] = {
+ { "TWIN_CHNLB", 0x80, 0x80 },
+ { "OID", 0x0f, 0x0f },
+ { "TWIN_TID", 0x70, 0x70 },
+ { "TID", 0xf0, 0xf0 }
+};
+
+int
+ahc_scb_scsiid_print(u_int regvalue, u_int *cur_col, u_int wrap)
+{
+ return (ahc_print_register(SCB_SCSIID_parse_table, 4, "SCB_SCSIID",
+ 0xb9, regvalue, cur_col, wrap));
+}
+
+static const ahc_reg_parse_entry_t SCB_LUN_parse_table[] = {
+ { "SCB_XFERLEN_ODD", 0x80, 0x80 },
+ { "LID", 0x3f, 0x3f }
+};
+
+int
+ahc_scb_lun_print(u_int regvalue, u_int *cur_col, u_int wrap)
+{
+ return (ahc_print_register(SCB_LUN_parse_table, 2, "SCB_LUN",
+ 0xba, regvalue, cur_col, wrap));
+}
+
+int
+ahc_scb_tag_print(u_int regvalue, u_int *cur_col, u_int wrap)
+{
+ return (ahc_print_register(NULL, 0, "SCB_TAG",
+ 0xbb, regvalue, cur_col, wrap));
+}
+
diff --git a/drivers/scsi/aic7xxx/aic7xxx_seq.h_shipped b/drivers/scsi/aic7xxx/aic7xxx_seq.h_shipped
new file mode 100644
index 000000000..07e93fbae
--- /dev/null
+++ b/drivers/scsi/aic7xxx/aic7xxx_seq.h_shipped
@@ -0,0 +1,1308 @@
+/*
+ * DO NOT EDIT - This file is automatically generated
+ * from the following source files:
+ *
+ * $Id: //depot/aic7xxx/aic7xxx/aic7xxx.seq#58 $
+ * $Id: //depot/aic7xxx/aic7xxx/aic7xxx.reg#40 $
+ */
+static const uint8_t seqprog[] = {
+ 0xb2, 0x00, 0x00, 0x08,
+ 0xf7, 0x11, 0x22, 0x08,
+ 0x00, 0x65, 0xee, 0x59,
+ 0xf7, 0x01, 0x02, 0x08,
+ 0xff, 0x6a, 0x24, 0x08,
+ 0x40, 0x00, 0x40, 0x68,
+ 0x08, 0x1f, 0x3e, 0x10,
+ 0x40, 0x00, 0x40, 0x68,
+ 0xff, 0x40, 0x3c, 0x60,
+ 0x08, 0x1f, 0x3e, 0x10,
+ 0x60, 0x0b, 0x42, 0x68,
+ 0x40, 0xfa, 0x12, 0x78,
+ 0x01, 0x4d, 0xc8, 0x30,
+ 0x00, 0x4c, 0x12, 0x70,
+ 0x01, 0x39, 0xa2, 0x30,
+ 0x00, 0x6a, 0xc2, 0x5e,
+ 0x01, 0x51, 0x20, 0x31,
+ 0x01, 0x57, 0xae, 0x00,
+ 0x0d, 0x6a, 0x76, 0x00,
+ 0x00, 0x51, 0x14, 0x5e,
+ 0x01, 0x51, 0xc8, 0x30,
+ 0x00, 0x39, 0xc8, 0x60,
+ 0x00, 0xbb, 0x30, 0x70,
+ 0xc1, 0x6a, 0xda, 0x5e,
+ 0x01, 0xbf, 0x72, 0x30,
+ 0x01, 0x40, 0x7e, 0x31,
+ 0x01, 0x90, 0x80, 0x30,
+ 0x01, 0xf6, 0xd4, 0x30,
+ 0x01, 0x4d, 0x9a, 0x18,
+ 0xfe, 0x57, 0xae, 0x08,
+ 0x01, 0x40, 0x20, 0x31,
+ 0x00, 0x65, 0xcc, 0x58,
+ 0x60, 0x0b, 0x40, 0x78,
+ 0x08, 0x6a, 0x18, 0x00,
+ 0x08, 0x11, 0x22, 0x00,
+ 0x60, 0x0b, 0x00, 0x78,
+ 0x40, 0x0b, 0xfa, 0x68,
+ 0x80, 0x0b, 0xb6, 0x78,
+ 0x20, 0x6a, 0x16, 0x00,
+ 0xa4, 0x6a, 0x06, 0x00,
+ 0x08, 0x6a, 0x78, 0x00,
+ 0x01, 0x50, 0xc8, 0x30,
+ 0xe0, 0x6a, 0xcc, 0x00,
+ 0x48, 0x6a, 0xfe, 0x5d,
+ 0x01, 0x6a, 0xdc, 0x01,
+ 0x88, 0x6a, 0xcc, 0x00,
+ 0x48, 0x6a, 0xfe, 0x5d,
+ 0x01, 0x6a, 0x26, 0x01,
+ 0xf0, 0x19, 0x7a, 0x08,
+ 0x0f, 0x18, 0xc8, 0x08,
+ 0x0f, 0x0f, 0xc8, 0x08,
+ 0x0f, 0x05, 0xc8, 0x08,
+ 0x00, 0x3d, 0x7a, 0x00,
+ 0x08, 0x1f, 0x6e, 0x78,
+ 0x80, 0x3d, 0x7a, 0x00,
+ 0x01, 0x3d, 0xd8, 0x31,
+ 0x01, 0x3d, 0x32, 0x31,
+ 0x10, 0x03, 0x4e, 0x79,
+ 0x00, 0x65, 0xf2, 0x58,
+ 0x80, 0x66, 0xae, 0x78,
+ 0x01, 0x66, 0xd8, 0x31,
+ 0x01, 0x66, 0x32, 0x31,
+ 0x3f, 0x66, 0x7c, 0x08,
+ 0x40, 0x66, 0x82, 0x68,
+ 0x01, 0x3c, 0x78, 0x00,
+ 0x10, 0x03, 0x9e, 0x78,
+ 0x00, 0x65, 0xf2, 0x58,
+ 0xe0, 0x66, 0xc8, 0x18,
+ 0x00, 0x65, 0xaa, 0x50,
+ 0xdd, 0x66, 0xc8, 0x18,
+ 0x00, 0x65, 0xaa, 0x48,
+ 0x01, 0x66, 0xd8, 0x31,
+ 0x01, 0x66, 0x32, 0x31,
+ 0x10, 0x03, 0x4e, 0x79,
+ 0x00, 0x65, 0xf2, 0x58,
+ 0x01, 0x66, 0xd8, 0x31,
+ 0x01, 0x66, 0x32, 0x31,
+ 0x01, 0x66, 0xac, 0x30,
+ 0x40, 0x3c, 0x78, 0x00,
+ 0xff, 0x6a, 0xd8, 0x01,
+ 0xff, 0x6a, 0x32, 0x01,
+ 0x10, 0x3c, 0x78, 0x00,
+ 0x02, 0x57, 0x40, 0x69,
+ 0x10, 0x03, 0x3e, 0x69,
+ 0x00, 0x65, 0x20, 0x41,
+ 0x02, 0x57, 0xae, 0x00,
+ 0x00, 0x65, 0x9e, 0x40,
+ 0x61, 0x6a, 0xda, 0x5e,
+ 0x08, 0x51, 0x20, 0x71,
+ 0x02, 0x0b, 0xb2, 0x78,
+ 0x00, 0x65, 0xae, 0x40,
+ 0x1a, 0x01, 0x02, 0x00,
+ 0xf0, 0x19, 0x7a, 0x08,
+ 0x0f, 0x0f, 0xc8, 0x08,
+ 0x0f, 0x05, 0xc8, 0x08,
+ 0x00, 0x3d, 0x7a, 0x00,
+ 0x08, 0x1f, 0xc4, 0x78,
+ 0x80, 0x3d, 0x7a, 0x00,
+ 0x20, 0x6a, 0x16, 0x00,
+ 0x00, 0x65, 0xcc, 0x41,
+ 0x00, 0x65, 0xb4, 0x5e,
+ 0x00, 0x65, 0x12, 0x40,
+ 0x20, 0x11, 0xd2, 0x68,
+ 0x20, 0x6a, 0x18, 0x00,
+ 0x20, 0x11, 0x22, 0x00,
+ 0xf7, 0x1f, 0xca, 0x08,
+ 0x80, 0xb9, 0xd8, 0x78,
+ 0x08, 0x65, 0xca, 0x00,
+ 0x01, 0x65, 0x3e, 0x30,
+ 0x01, 0xb9, 0x1e, 0x30,
+ 0x7f, 0xb9, 0x0a, 0x08,
+ 0x01, 0xb9, 0x0a, 0x30,
+ 0x01, 0x54, 0xca, 0x30,
+ 0x80, 0xb8, 0xe6, 0x78,
+ 0x80, 0x65, 0xca, 0x00,
+ 0x01, 0x65, 0x00, 0x34,
+ 0x01, 0x54, 0x00, 0x34,
+ 0x08, 0xb8, 0xee, 0x78,
+ 0x20, 0x01, 0x02, 0x00,
+ 0x02, 0xbd, 0x08, 0x34,
+ 0x01, 0xbd, 0x08, 0x34,
+ 0x08, 0x01, 0x02, 0x00,
+ 0x02, 0x0b, 0xf4, 0x78,
+ 0xf7, 0x01, 0x02, 0x08,
+ 0x01, 0x06, 0xcc, 0x34,
+ 0xb2, 0x00, 0x00, 0x08,
+ 0x01, 0x40, 0x20, 0x31,
+ 0x01, 0xbf, 0x80, 0x30,
+ 0x01, 0xb9, 0x7a, 0x30,
+ 0x3f, 0xba, 0x7c, 0x08,
+ 0x00, 0x65, 0xea, 0x58,
+ 0x80, 0x0b, 0xc4, 0x79,
+ 0x12, 0x01, 0x02, 0x00,
+ 0x01, 0xab, 0xac, 0x30,
+ 0xe4, 0x6a, 0x70, 0x5d,
+ 0x40, 0x6a, 0x16, 0x00,
+ 0x80, 0x3e, 0x86, 0x5d,
+ 0x20, 0xb8, 0x18, 0x79,
+ 0x20, 0x6a, 0x86, 0x5d,
+ 0x00, 0xab, 0x86, 0x5d,
+ 0x01, 0xa9, 0x78, 0x30,
+ 0x10, 0xb8, 0x20, 0x79,
+ 0xe4, 0x6a, 0x70, 0x5d,
+ 0x00, 0x65, 0xae, 0x40,
+ 0x10, 0x03, 0x3c, 0x69,
+ 0x08, 0x3c, 0x5a, 0x69,
+ 0x04, 0x3c, 0x92, 0x69,
+ 0x02, 0x3c, 0x98, 0x69,
+ 0x01, 0x3c, 0x44, 0x79,
+ 0xff, 0x6a, 0xa6, 0x00,
+ 0x00, 0x65, 0xa4, 0x59,
+ 0x00, 0x6a, 0xc2, 0x5e,
+ 0xff, 0x53, 0x30, 0x71,
+ 0x0d, 0x6a, 0x76, 0x00,
+ 0x00, 0x53, 0x14, 0x5e,
+ 0x00, 0x65, 0xea, 0x58,
+ 0x12, 0x01, 0x02, 0x00,
+ 0x00, 0x65, 0x18, 0x41,
+ 0xa4, 0x6a, 0x06, 0x00,
+ 0x00, 0x65, 0xf2, 0x58,
+ 0xfd, 0x57, 0xae, 0x08,
+ 0x00, 0x65, 0xae, 0x40,
+ 0xe4, 0x6a, 0x70, 0x5d,
+ 0x20, 0x3c, 0x4a, 0x79,
+ 0x02, 0x6a, 0x86, 0x5d,
+ 0x04, 0x6a, 0x86, 0x5d,
+ 0x01, 0x03, 0x4c, 0x69,
+ 0xf7, 0x11, 0x22, 0x08,
+ 0xff, 0x6a, 0x24, 0x08,
+ 0xff, 0x6a, 0x06, 0x08,
+ 0x01, 0x6a, 0x7e, 0x00,
+ 0x00, 0x65, 0xa4, 0x59,
+ 0x00, 0x65, 0x04, 0x40,
+ 0x80, 0x86, 0xc8, 0x08,
+ 0x01, 0x4f, 0xc8, 0x30,
+ 0x00, 0x50, 0x6c, 0x61,
+ 0xc4, 0x6a, 0x70, 0x5d,
+ 0x40, 0x3c, 0x68, 0x79,
+ 0x28, 0x6a, 0x86, 0x5d,
+ 0x00, 0x65, 0x4c, 0x41,
+ 0x08, 0x6a, 0x86, 0x5d,
+ 0x00, 0x65, 0x4c, 0x41,
+ 0x84, 0x6a, 0x70, 0x5d,
+ 0x00, 0x65, 0xf2, 0x58,
+ 0x01, 0x66, 0xc8, 0x30,
+ 0x01, 0x64, 0xd8, 0x31,
+ 0x01, 0x64, 0x32, 0x31,
+ 0x5b, 0x64, 0xc8, 0x28,
+ 0x30, 0x64, 0xca, 0x18,
+ 0x01, 0x6c, 0xc8, 0x30,
+ 0xff, 0x64, 0x8e, 0x79,
+ 0x08, 0x01, 0x02, 0x00,
+ 0x02, 0x0b, 0x80, 0x79,
+ 0x01, 0x64, 0x86, 0x61,
+ 0xf7, 0x01, 0x02, 0x08,
+ 0x01, 0x06, 0xd8, 0x31,
+ 0x01, 0x06, 0x32, 0x31,
+ 0xff, 0x64, 0xc8, 0x18,
+ 0xff, 0x64, 0x80, 0x69,
+ 0xf7, 0x3c, 0x78, 0x08,
+ 0x00, 0x65, 0x20, 0x41,
+ 0x40, 0xaa, 0x7e, 0x10,
+ 0x04, 0xaa, 0x70, 0x5d,
+ 0x00, 0x65, 0x58, 0x42,
+ 0xc4, 0x6a, 0x70, 0x5d,
+ 0xc0, 0x6a, 0x7e, 0x00,
+ 0x00, 0xa8, 0x86, 0x5d,
+ 0xe4, 0x6a, 0x06, 0x00,
+ 0x00, 0x6a, 0x86, 0x5d,
+ 0x00, 0x65, 0x4c, 0x41,
+ 0x10, 0x3c, 0xa8, 0x69,
+ 0x00, 0xbb, 0x8c, 0x44,
+ 0x18, 0x6a, 0xda, 0x01,
+ 0x01, 0x69, 0xd8, 0x31,
+ 0x1c, 0x6a, 0xd0, 0x01,
+ 0x09, 0xee, 0xdc, 0x01,
+ 0x80, 0xee, 0xb0, 0x79,
+ 0xff, 0x6a, 0xdc, 0x09,
+ 0x01, 0x93, 0x26, 0x01,
+ 0x03, 0x6a, 0x2a, 0x01,
+ 0x01, 0x69, 0x32, 0x31,
+ 0x1c, 0x6a, 0xe2, 0x5d,
+ 0x0a, 0x93, 0x26, 0x01,
+ 0x00, 0x65, 0xaa, 0x5e,
+ 0x01, 0x50, 0xa0, 0x18,
+ 0x02, 0x6a, 0x22, 0x05,
+ 0x1a, 0x01, 0x02, 0x00,
+ 0x80, 0x6a, 0x74, 0x00,
+ 0x40, 0x6a, 0x78, 0x00,
+ 0x40, 0x6a, 0x16, 0x00,
+ 0x00, 0x65, 0xda, 0x5d,
+ 0x01, 0x3f, 0xc8, 0x30,
+ 0xbf, 0x64, 0x58, 0x7a,
+ 0x80, 0x64, 0xa0, 0x73,
+ 0xa0, 0x64, 0x02, 0x74,
+ 0xc0, 0x64, 0xf6, 0x73,
+ 0xe0, 0x64, 0x32, 0x74,
+ 0x01, 0x6a, 0xda, 0x5e,
+ 0x00, 0x65, 0xcc, 0x41,
+ 0xf7, 0x11, 0x22, 0x08,
+ 0x01, 0x06, 0xd4, 0x30,
+ 0xff, 0x6a, 0x24, 0x08,
+ 0xf7, 0x01, 0x02, 0x08,
+ 0xc0, 0x6a, 0x78, 0x00,
+ 0x09, 0x0c, 0xe8, 0x79,
+ 0x08, 0x0c, 0x04, 0x68,
+ 0xb1, 0x6a, 0xda, 0x5e,
+ 0xff, 0x6a, 0x26, 0x09,
+ 0x12, 0x01, 0x02, 0x00,
+ 0x02, 0x6a, 0x08, 0x30,
+ 0xff, 0x6a, 0x08, 0x08,
+ 0xdf, 0x01, 0x02, 0x08,
+ 0x01, 0x6a, 0x7e, 0x00,
+ 0xc0, 0x6a, 0x78, 0x04,
+ 0xff, 0x6a, 0xc8, 0x08,
+ 0x08, 0xa4, 0x48, 0x19,
+ 0x00, 0xa5, 0x4a, 0x21,
+ 0x00, 0xa6, 0x4c, 0x21,
+ 0x00, 0xa7, 0x4e, 0x25,
+ 0x08, 0xeb, 0xde, 0x7e,
+ 0x80, 0xeb, 0x08, 0x7a,
+ 0xff, 0x6a, 0xd6, 0x09,
+ 0x08, 0xeb, 0x0c, 0x6a,
+ 0xff, 0x6a, 0xd4, 0x0c,
+ 0x80, 0xa3, 0xde, 0x6e,
+ 0x88, 0xeb, 0x22, 0x72,
+ 0x08, 0xeb, 0xde, 0x6e,
+ 0x04, 0xea, 0x26, 0xe2,
+ 0x08, 0xee, 0xde, 0x6e,
+ 0x04, 0x6a, 0xd0, 0x81,
+ 0x05, 0xa4, 0xc0, 0x89,
+ 0x03, 0xa5, 0xc2, 0x31,
+ 0x09, 0x6a, 0xd6, 0x05,
+ 0x00, 0x65, 0x0a, 0x5a,
+ 0x06, 0xa4, 0xd4, 0x89,
+ 0x80, 0x94, 0xde, 0x7e,
+ 0x07, 0xe9, 0x10, 0x31,
+ 0x01, 0xe9, 0x46, 0x31,
+ 0x00, 0xa3, 0xbc, 0x5e,
+ 0x00, 0x65, 0xfc, 0x59,
+ 0x01, 0xa4, 0xca, 0x30,
+ 0x80, 0xa3, 0x36, 0x7a,
+ 0x02, 0x65, 0xca, 0x00,
+ 0x01, 0x65, 0xf8, 0x31,
+ 0x80, 0x93, 0x26, 0x01,
+ 0xff, 0x6a, 0xd4, 0x0c,
+ 0x01, 0x8c, 0xc8, 0x30,
+ 0x00, 0x88, 0xc8, 0x18,
+ 0x02, 0x64, 0xc8, 0x88,
+ 0xff, 0x64, 0xde, 0x7e,
+ 0xff, 0x8d, 0x4c, 0x6a,
+ 0xff, 0x8e, 0x4c, 0x6a,
+ 0x03, 0x8c, 0xd4, 0x98,
+ 0x00, 0x65, 0xde, 0x56,
+ 0x01, 0x64, 0x70, 0x30,
+ 0xff, 0x64, 0xc8, 0x10,
+ 0x01, 0x64, 0xc8, 0x18,
+ 0x00, 0x8c, 0x18, 0x19,
+ 0xff, 0x8d, 0x1a, 0x21,
+ 0xff, 0x8e, 0x1c, 0x25,
+ 0xc0, 0x3c, 0x5c, 0x7a,
+ 0x21, 0x6a, 0xda, 0x5e,
+ 0xa8, 0x6a, 0x76, 0x00,
+ 0x79, 0x6a, 0x76, 0x00,
+ 0x40, 0x3f, 0x64, 0x6a,
+ 0x04, 0x3b, 0x76, 0x00,
+ 0x04, 0x6a, 0xd4, 0x81,
+ 0x20, 0x3c, 0x6c, 0x7a,
+ 0x51, 0x6a, 0xda, 0x5e,
+ 0x00, 0x65, 0x84, 0x42,
+ 0x20, 0x3c, 0x78, 0x00,
+ 0x00, 0xb3, 0xbc, 0x5e,
+ 0x07, 0xac, 0x10, 0x31,
+ 0x05, 0xb3, 0x46, 0x31,
+ 0x88, 0x6a, 0xcc, 0x00,
+ 0xac, 0x6a, 0xf0, 0x5d,
+ 0xa3, 0x6a, 0xcc, 0x00,
+ 0xb3, 0x6a, 0xf4, 0x5d,
+ 0x00, 0x65, 0x3c, 0x5a,
+ 0xfd, 0xa4, 0x48, 0x09,
+ 0x03, 0x8c, 0x10, 0x30,
+ 0x00, 0x65, 0xe8, 0x5d,
+ 0x01, 0xa4, 0x96, 0x7a,
+ 0x04, 0x3b, 0x76, 0x08,
+ 0x01, 0x3b, 0x26, 0x31,
+ 0x80, 0x02, 0x04, 0x00,
+ 0x10, 0x0c, 0x8c, 0x7a,
+ 0x03, 0x9e, 0x8e, 0x6a,
+ 0x7f, 0x02, 0x04, 0x08,
+ 0x91, 0x6a, 0xda, 0x5e,
+ 0x00, 0x65, 0xcc, 0x41,
+ 0x01, 0xa4, 0xca, 0x30,
+ 0x80, 0xa3, 0x9c, 0x7a,
+ 0x02, 0x65, 0xca, 0x00,
+ 0x01, 0x65, 0xf8, 0x31,
+ 0x01, 0x3b, 0x26, 0x31,
+ 0x00, 0x65, 0x10, 0x5a,
+ 0x01, 0xfc, 0xaa, 0x6a,
+ 0x80, 0x0b, 0xa0, 0x6a,
+ 0x10, 0x0c, 0xa0, 0x7a,
+ 0x20, 0x93, 0xa0, 0x6a,
+ 0x02, 0x93, 0x26, 0x01,
+ 0x02, 0xfc, 0xb4, 0x7a,
+ 0x40, 0x0d, 0xc8, 0x6a,
+ 0x01, 0xa4, 0x48, 0x01,
+ 0x00, 0x65, 0xc8, 0x42,
+ 0x40, 0x0d, 0xba, 0x6a,
+ 0x00, 0x65, 0x10, 0x5a,
+ 0x00, 0x65, 0xac, 0x42,
+ 0x80, 0xfc, 0xc4, 0x7a,
+ 0x80, 0xa4, 0xc4, 0x6a,
+ 0xff, 0xa5, 0x4a, 0x19,
+ 0xff, 0xa6, 0x4c, 0x21,
+ 0xff, 0xa7, 0x4e, 0x21,
+ 0xf8, 0xfc, 0x48, 0x09,
+ 0x7f, 0xa3, 0x46, 0x09,
+ 0x04, 0x3b, 0xe4, 0x6a,
+ 0x02, 0x93, 0x26, 0x01,
+ 0x01, 0x94, 0xca, 0x7a,
+ 0x01, 0x94, 0xca, 0x7a,
+ 0x01, 0x94, 0xca, 0x7a,
+ 0x01, 0x94, 0xca, 0x7a,
+ 0x01, 0x94, 0xca, 0x7a,
+ 0x01, 0xa4, 0xe2, 0x7a,
+ 0x01, 0xfc, 0xd8, 0x7a,
+ 0x01, 0x94, 0xe4, 0x6a,
+ 0x01, 0x94, 0xe4, 0x6a,
+ 0x01, 0x94, 0xe4, 0x6a,
+ 0x00, 0x65, 0x84, 0x42,
+ 0x01, 0x94, 0xe2, 0x7a,
+ 0x10, 0x94, 0xe4, 0x6a,
+ 0xd7, 0x93, 0x26, 0x09,
+ 0x28, 0x93, 0xe8, 0x6a,
+ 0x01, 0x85, 0x0a, 0x01,
+ 0x02, 0xfc, 0xf0, 0x6a,
+ 0x01, 0x14, 0x46, 0x31,
+ 0xff, 0x6a, 0x10, 0x09,
+ 0xfe, 0x85, 0x0a, 0x09,
+ 0xff, 0x38, 0xfe, 0x6a,
+ 0x80, 0xa3, 0xfe, 0x7a,
+ 0x80, 0x0b, 0xfc, 0x7a,
+ 0x04, 0x3b, 0xfe, 0x7a,
+ 0xbf, 0x3b, 0x76, 0x08,
+ 0x01, 0x3b, 0x26, 0x31,
+ 0x00, 0x65, 0x10, 0x5a,
+ 0x01, 0x0b, 0x0c, 0x6b,
+ 0x10, 0x0c, 0x00, 0x7b,
+ 0x04, 0x93, 0x0a, 0x6b,
+ 0x01, 0x94, 0x08, 0x7b,
+ 0x10, 0x94, 0x0a, 0x6b,
+ 0xc7, 0x93, 0x26, 0x09,
+ 0x01, 0x99, 0xd4, 0x30,
+ 0x38, 0x93, 0x0e, 0x6b,
+ 0xff, 0x08, 0x5c, 0x6b,
+ 0xff, 0x09, 0x5c, 0x6b,
+ 0xff, 0x0a, 0x5c, 0x6b,
+ 0xff, 0x38, 0x2a, 0x7b,
+ 0x04, 0x14, 0x10, 0x31,
+ 0x01, 0x38, 0x18, 0x31,
+ 0x02, 0x6a, 0x1a, 0x31,
+ 0x88, 0x6a, 0xcc, 0x00,
+ 0x14, 0x6a, 0xf6, 0x5d,
+ 0x00, 0x38, 0xe2, 0x5d,
+ 0xff, 0x6a, 0x70, 0x08,
+ 0x00, 0x65, 0x56, 0x43,
+ 0x80, 0xa3, 0x30, 0x7b,
+ 0x01, 0xa4, 0x48, 0x01,
+ 0x00, 0x65, 0x5c, 0x43,
+ 0x08, 0xeb, 0x36, 0x7b,
+ 0x00, 0x65, 0x10, 0x5a,
+ 0x08, 0xeb, 0x32, 0x6b,
+ 0x07, 0xe9, 0x10, 0x31,
+ 0x01, 0xe9, 0xca, 0x30,
+ 0x01, 0x65, 0x46, 0x31,
+ 0x00, 0x6a, 0xbc, 0x5e,
+ 0x88, 0x6a, 0xcc, 0x00,
+ 0xa4, 0x6a, 0xf6, 0x5d,
+ 0x08, 0x6a, 0xe2, 0x5d,
+ 0x0d, 0x93, 0x26, 0x01,
+ 0x00, 0x65, 0xaa, 0x5e,
+ 0x88, 0x6a, 0xcc, 0x00,
+ 0x00, 0x65, 0x8c, 0x5e,
+ 0x01, 0x99, 0x46, 0x31,
+ 0x00, 0xa3, 0xbc, 0x5e,
+ 0x01, 0x88, 0x10, 0x31,
+ 0x00, 0x65, 0x3c, 0x5a,
+ 0x00, 0x65, 0xfc, 0x59,
+ 0x03, 0x8c, 0x10, 0x30,
+ 0x00, 0x65, 0xe8, 0x5d,
+ 0x80, 0x0b, 0x84, 0x6a,
+ 0x80, 0x0b, 0x64, 0x6b,
+ 0x01, 0x0c, 0x5e, 0x7b,
+ 0x10, 0x0c, 0x84, 0x7a,
+ 0x03, 0x9e, 0x84, 0x6a,
+ 0x00, 0x65, 0x06, 0x5a,
+ 0x00, 0x6a, 0xbc, 0x5e,
+ 0x01, 0xa4, 0x84, 0x6b,
+ 0xff, 0x38, 0x7a, 0x7b,
+ 0x01, 0x38, 0xc8, 0x30,
+ 0x00, 0x08, 0x40, 0x19,
+ 0xff, 0x6a, 0xc8, 0x08,
+ 0x00, 0x09, 0x42, 0x21,
+ 0x00, 0x0a, 0x44, 0x21,
+ 0xff, 0x6a, 0x70, 0x08,
+ 0x00, 0x65, 0x7c, 0x43,
+ 0x03, 0x08, 0x40, 0x31,
+ 0x03, 0x08, 0x40, 0x31,
+ 0x01, 0x08, 0x40, 0x31,
+ 0x01, 0x09, 0x42, 0x31,
+ 0x01, 0x0a, 0x44, 0x31,
+ 0xfd, 0xb4, 0x68, 0x09,
+ 0x12, 0x01, 0x02, 0x00,
+ 0x12, 0x01, 0x02, 0x00,
+ 0x04, 0x3c, 0xcc, 0x79,
+ 0xfb, 0x3c, 0x78, 0x08,
+ 0x04, 0x93, 0x20, 0x79,
+ 0x01, 0x0c, 0x90, 0x6b,
+ 0x80, 0xba, 0x20, 0x79,
+ 0x80, 0x04, 0x20, 0x79,
+ 0xe4, 0x6a, 0x70, 0x5d,
+ 0x23, 0x6a, 0x86, 0x5d,
+ 0x01, 0x6a, 0x86, 0x5d,
+ 0x00, 0x65, 0x20, 0x41,
+ 0x00, 0x65, 0xcc, 0x41,
+ 0x80, 0x3c, 0xa4, 0x7b,
+ 0x21, 0x6a, 0xda, 0x5e,
+ 0x01, 0xbc, 0x18, 0x31,
+ 0x02, 0x6a, 0x1a, 0x31,
+ 0x02, 0x6a, 0xf8, 0x01,
+ 0x01, 0xbc, 0x10, 0x30,
+ 0x02, 0x6a, 0x12, 0x30,
+ 0x01, 0xbc, 0x10, 0x30,
+ 0xff, 0x6a, 0x12, 0x08,
+ 0xff, 0x6a, 0x14, 0x08,
+ 0xf3, 0xbc, 0xd4, 0x18,
+ 0xa0, 0x6a, 0xca, 0x53,
+ 0x04, 0xa0, 0x10, 0x31,
+ 0xac, 0x6a, 0x26, 0x01,
+ 0x04, 0xa0, 0x10, 0x31,
+ 0x03, 0x08, 0x18, 0x31,
+ 0x88, 0x6a, 0xcc, 0x00,
+ 0xa0, 0x6a, 0xf6, 0x5d,
+ 0x00, 0xbc, 0xe2, 0x5d,
+ 0x3d, 0x6a, 0x26, 0x01,
+ 0x00, 0x65, 0xe2, 0x43,
+ 0xff, 0x6a, 0x10, 0x09,
+ 0xa4, 0x6a, 0x26, 0x01,
+ 0x0c, 0xa0, 0x32, 0x31,
+ 0x05, 0x6a, 0x26, 0x01,
+ 0x35, 0x6a, 0x26, 0x01,
+ 0x0c, 0xa0, 0x32, 0x31,
+ 0x36, 0x6a, 0x26, 0x01,
+ 0x02, 0x93, 0x26, 0x01,
+ 0x35, 0x6a, 0x26, 0x01,
+ 0x00, 0x65, 0x9e, 0x5e,
+ 0x00, 0x65, 0x9e, 0x5e,
+ 0x02, 0x93, 0x26, 0x01,
+ 0xbf, 0x3c, 0x78, 0x08,
+ 0x04, 0x0b, 0xe8, 0x6b,
+ 0x10, 0x0c, 0xe4, 0x7b,
+ 0x01, 0x03, 0xe8, 0x6b,
+ 0x20, 0x93, 0xea, 0x6b,
+ 0x04, 0x0b, 0xf0, 0x6b,
+ 0x40, 0x3c, 0x78, 0x00,
+ 0xc7, 0x93, 0x26, 0x09,
+ 0x38, 0x93, 0xf2, 0x6b,
+ 0x00, 0x65, 0xcc, 0x41,
+ 0x80, 0x3c, 0x58, 0x6c,
+ 0x01, 0x06, 0x50, 0x31,
+ 0x80, 0xb8, 0x70, 0x01,
+ 0x00, 0x65, 0xcc, 0x41,
+ 0x10, 0x3f, 0x06, 0x00,
+ 0x10, 0x6a, 0x06, 0x00,
+ 0x01, 0x3a, 0xca, 0x30,
+ 0x80, 0x65, 0x1e, 0x64,
+ 0x10, 0xb8, 0x42, 0x6c,
+ 0xc0, 0x3e, 0xca, 0x00,
+ 0x40, 0xb8, 0x0e, 0x6c,
+ 0xbf, 0x65, 0xca, 0x08,
+ 0x20, 0xb8, 0x22, 0x7c,
+ 0x01, 0x65, 0x0c, 0x30,
+ 0x00, 0x65, 0xda, 0x5d,
+ 0xa0, 0x3f, 0x2a, 0x64,
+ 0x23, 0xb8, 0x0c, 0x08,
+ 0x00, 0x65, 0xda, 0x5d,
+ 0xa0, 0x3f, 0x2a, 0x64,
+ 0x00, 0xbb, 0x22, 0x44,
+ 0xff, 0x65, 0x22, 0x64,
+ 0x00, 0x65, 0x42, 0x44,
+ 0x40, 0x6a, 0x18, 0x00,
+ 0x01, 0x65, 0x0c, 0x30,
+ 0x00, 0x65, 0xda, 0x5d,
+ 0xa0, 0x3f, 0xfe, 0x73,
+ 0x40, 0x6a, 0x18, 0x00,
+ 0x01, 0x3a, 0xa6, 0x30,
+ 0x08, 0x6a, 0x74, 0x00,
+ 0x00, 0x65, 0xcc, 0x41,
+ 0x64, 0x6a, 0x6a, 0x5d,
+ 0x80, 0x64, 0xda, 0x6c,
+ 0x04, 0x64, 0x9c, 0x74,
+ 0x02, 0x64, 0xac, 0x74,
+ 0x00, 0x6a, 0x62, 0x74,
+ 0x03, 0x64, 0xca, 0x74,
+ 0x23, 0x64, 0x4a, 0x74,
+ 0x08, 0x64, 0x5e, 0x74,
+ 0x61, 0x6a, 0xda, 0x5e,
+ 0x00, 0x65, 0xda, 0x5d,
+ 0x08, 0x51, 0xce, 0x71,
+ 0x00, 0x65, 0x42, 0x44,
+ 0x80, 0x04, 0x5c, 0x7c,
+ 0x51, 0x6a, 0x60, 0x5d,
+ 0x01, 0x51, 0x5c, 0x64,
+ 0x01, 0xa4, 0x54, 0x7c,
+ 0x80, 0xba, 0x5e, 0x6c,
+ 0x41, 0x6a, 0xda, 0x5e,
+ 0x00, 0x65, 0x5e, 0x44,
+ 0x21, 0x6a, 0xda, 0x5e,
+ 0x00, 0x65, 0x5e, 0x44,
+ 0x07, 0x6a, 0x56, 0x5d,
+ 0x01, 0x06, 0xd4, 0x30,
+ 0x00, 0x65, 0xcc, 0x41,
+ 0x80, 0xb8, 0x58, 0x7c,
+ 0xc0, 0x3c, 0x6c, 0x7c,
+ 0x80, 0x3c, 0x58, 0x6c,
+ 0xff, 0xa8, 0x6c, 0x6c,
+ 0x40, 0x3c, 0x58, 0x6c,
+ 0x10, 0xb8, 0x70, 0x7c,
+ 0xa1, 0x6a, 0xda, 0x5e,
+ 0x01, 0xb4, 0x76, 0x6c,
+ 0x02, 0xb4, 0x78, 0x6c,
+ 0x01, 0xa4, 0x78, 0x7c,
+ 0xff, 0xa8, 0x88, 0x7c,
+ 0x04, 0xb4, 0x68, 0x01,
+ 0x01, 0x6a, 0x76, 0x00,
+ 0x00, 0xbb, 0x14, 0x5e,
+ 0xff, 0xa8, 0x88, 0x7c,
+ 0x71, 0x6a, 0xda, 0x5e,
+ 0x40, 0x51, 0x88, 0x64,
+ 0x00, 0x65, 0xb4, 0x5e,
+ 0x00, 0x65, 0xde, 0x41,
+ 0x00, 0xbb, 0x8c, 0x5c,
+ 0x00, 0x65, 0xde, 0x41,
+ 0x00, 0x65, 0xb4, 0x5e,
+ 0x01, 0x65, 0xa2, 0x30,
+ 0x01, 0xf8, 0xc8, 0x30,
+ 0x01, 0x4e, 0xc8, 0x30,
+ 0x00, 0x6a, 0xb8, 0xdd,
+ 0x00, 0x51, 0xca, 0x5d,
+ 0x01, 0x4e, 0x9c, 0x18,
+ 0x02, 0x6a, 0x22, 0x05,
+ 0xc0, 0x3c, 0x58, 0x6c,
+ 0x04, 0xb8, 0x70, 0x01,
+ 0x00, 0x65, 0xd6, 0x5e,
+ 0x20, 0xb8, 0xde, 0x69,
+ 0x01, 0xbb, 0xa2, 0x30,
+ 0x3f, 0xba, 0x7c, 0x08,
+ 0x00, 0xb9, 0xd0, 0x5c,
+ 0x00, 0x65, 0xde, 0x41,
+ 0x01, 0x06, 0xd4, 0x30,
+ 0x20, 0x3c, 0xcc, 0x79,
+ 0x20, 0x3c, 0x5e, 0x7c,
+ 0x01, 0xa4, 0xba, 0x7c,
+ 0x01, 0xb4, 0x68, 0x01,
+ 0x00, 0x65, 0xcc, 0x41,
+ 0x00, 0x65, 0x5e, 0x44,
+ 0x04, 0x14, 0x58, 0x31,
+ 0x01, 0x06, 0xd4, 0x30,
+ 0x08, 0xa0, 0x60, 0x31,
+ 0xac, 0x6a, 0xcc, 0x00,
+ 0x14, 0x6a, 0xf6, 0x5d,
+ 0x01, 0x06, 0xd4, 0x30,
+ 0xa0, 0x6a, 0xee, 0x5d,
+ 0x00, 0x65, 0xcc, 0x41,
+ 0xdf, 0x3c, 0x78, 0x08,
+ 0x12, 0x01, 0x02, 0x00,
+ 0x00, 0x65, 0x5e, 0x44,
+ 0x4c, 0x65, 0xcc, 0x28,
+ 0x01, 0x3e, 0x20, 0x31,
+ 0xd0, 0x66, 0xcc, 0x18,
+ 0x20, 0x66, 0xcc, 0x18,
+ 0x01, 0x51, 0xda, 0x34,
+ 0x4c, 0x3d, 0xca, 0x28,
+ 0x3f, 0x64, 0x7c, 0x08,
+ 0xd0, 0x65, 0xca, 0x18,
+ 0x01, 0x3e, 0x20, 0x31,
+ 0x30, 0x65, 0xd4, 0x18,
+ 0x00, 0x65, 0xe8, 0x4c,
+ 0xe1, 0x6a, 0x22, 0x01,
+ 0xff, 0x6a, 0xd4, 0x08,
+ 0x20, 0x65, 0xd4, 0x18,
+ 0x00, 0x65, 0xf0, 0x54,
+ 0xe1, 0x6a, 0x22, 0x01,
+ 0xff, 0x6a, 0xd4, 0x08,
+ 0x20, 0x65, 0xca, 0x18,
+ 0xe0, 0x65, 0xd4, 0x18,
+ 0x00, 0x65, 0xfa, 0x4c,
+ 0xe1, 0x6a, 0x22, 0x01,
+ 0xff, 0x6a, 0xd4, 0x08,
+ 0xd0, 0x65, 0xd4, 0x18,
+ 0x00, 0x65, 0x02, 0x55,
+ 0xe1, 0x6a, 0x22, 0x01,
+ 0xff, 0x6a, 0xd4, 0x08,
+ 0x01, 0x6c, 0xa2, 0x30,
+ 0xff, 0x51, 0x14, 0x75,
+ 0x00, 0x51, 0x90, 0x5d,
+ 0x01, 0x51, 0x20, 0x31,
+ 0x00, 0x65, 0x36, 0x45,
+ 0x3f, 0xba, 0xc8, 0x08,
+ 0x00, 0x3e, 0x36, 0x75,
+ 0x00, 0x65, 0xb2, 0x5e,
+ 0x80, 0x3c, 0x78, 0x00,
+ 0x01, 0x06, 0xd4, 0x30,
+ 0x00, 0x65, 0xda, 0x5d,
+ 0x01, 0x3c, 0x78, 0x00,
+ 0xe0, 0x3f, 0x52, 0x65,
+ 0x02, 0x3c, 0x78, 0x00,
+ 0x20, 0x12, 0x52, 0x65,
+ 0x51, 0x6a, 0x60, 0x5d,
+ 0x00, 0x51, 0x90, 0x5d,
+ 0x51, 0x6a, 0x60, 0x5d,
+ 0x01, 0x51, 0x20, 0x31,
+ 0x04, 0x3c, 0x78, 0x00,
+ 0x01, 0xb9, 0xc8, 0x30,
+ 0x00, 0x3d, 0x50, 0x65,
+ 0x08, 0x3c, 0x78, 0x00,
+ 0x3f, 0xba, 0xc8, 0x08,
+ 0x00, 0x3e, 0x50, 0x65,
+ 0x10, 0x3c, 0x78, 0x00,
+ 0x04, 0xb8, 0x50, 0x7d,
+ 0xfb, 0xb8, 0x70, 0x09,
+ 0x20, 0xb8, 0x46, 0x6d,
+ 0x01, 0x90, 0xc8, 0x30,
+ 0xff, 0x6a, 0xa2, 0x00,
+ 0x00, 0x3d, 0xd0, 0x5c,
+ 0x01, 0x64, 0x20, 0x31,
+ 0xff, 0x6a, 0x78, 0x08,
+ 0x00, 0x65, 0xea, 0x58,
+ 0x10, 0xb8, 0x5e, 0x7c,
+ 0xff, 0x6a, 0x56, 0x5d,
+ 0x00, 0x65, 0x5e, 0x44,
+ 0x00, 0x65, 0xb2, 0x5e,
+ 0x31, 0x6a, 0xda, 0x5e,
+ 0x00, 0x65, 0x5e, 0x44,
+ 0x10, 0x3f, 0x06, 0x00,
+ 0x10, 0x6a, 0x06, 0x00,
+ 0x01, 0x65, 0x74, 0x34,
+ 0x81, 0x6a, 0xda, 0x5e,
+ 0x00, 0x65, 0x62, 0x45,
+ 0x01, 0x06, 0xd4, 0x30,
+ 0x01, 0x0c, 0x62, 0x7d,
+ 0x04, 0x0c, 0x5c, 0x6d,
+ 0xe0, 0x03, 0x7e, 0x08,
+ 0xe0, 0x3f, 0xcc, 0x61,
+ 0x01, 0x65, 0xcc, 0x30,
+ 0x01, 0x12, 0xda, 0x34,
+ 0x01, 0x06, 0xd4, 0x34,
+ 0x01, 0x03, 0x70, 0x6d,
+ 0x40, 0x03, 0xcc, 0x08,
+ 0x01, 0x65, 0x06, 0x30,
+ 0x40, 0x65, 0xc8, 0x08,
+ 0x00, 0x66, 0x7e, 0x75,
+ 0x40, 0x65, 0x7e, 0x7d,
+ 0x00, 0x65, 0x7e, 0x5d,
+ 0xff, 0x6a, 0xd4, 0x08,
+ 0xff, 0x6a, 0xd4, 0x08,
+ 0xff, 0x6a, 0xd4, 0x08,
+ 0xff, 0x6a, 0xd4, 0x0c,
+ 0x08, 0x01, 0x02, 0x00,
+ 0x02, 0x0b, 0x88, 0x7d,
+ 0x01, 0x65, 0x0c, 0x30,
+ 0x02, 0x0b, 0x8c, 0x7d,
+ 0xf7, 0x01, 0x02, 0x0c,
+ 0x01, 0x65, 0xc8, 0x30,
+ 0xff, 0x41, 0xb0, 0x75,
+ 0x01, 0x41, 0x20, 0x31,
+ 0xff, 0x6a, 0xa4, 0x00,
+ 0x00, 0x65, 0xa0, 0x45,
+ 0xff, 0xbf, 0xb0, 0x75,
+ 0x01, 0x90, 0xa4, 0x30,
+ 0x01, 0xbf, 0x20, 0x31,
+ 0x00, 0xbb, 0x9a, 0x65,
+ 0xff, 0x52, 0xae, 0x75,
+ 0x01, 0xbf, 0xcc, 0x30,
+ 0x01, 0x90, 0xca, 0x30,
+ 0x01, 0x52, 0x20, 0x31,
+ 0x01, 0x66, 0x7e, 0x31,
+ 0x01, 0x65, 0x20, 0x35,
+ 0x01, 0xbf, 0x82, 0x34,
+ 0x01, 0x64, 0xa2, 0x30,
+ 0x00, 0x6a, 0xc2, 0x5e,
+ 0x0d, 0x6a, 0x76, 0x00,
+ 0x00, 0x51, 0x14, 0x46,
+ 0x01, 0x65, 0xa4, 0x30,
+ 0xe0, 0x6a, 0xcc, 0x00,
+ 0x48, 0x6a, 0x08, 0x5e,
+ 0x01, 0x6a, 0xd0, 0x01,
+ 0x01, 0x6a, 0xdc, 0x05,
+ 0x88, 0x6a, 0xcc, 0x00,
+ 0x48, 0x6a, 0x08, 0x5e,
+ 0x01, 0x6a, 0xe2, 0x5d,
+ 0x01, 0x6a, 0x26, 0x05,
+ 0x01, 0x65, 0xd8, 0x31,
+ 0x09, 0xee, 0xdc, 0x01,
+ 0x80, 0xee, 0xce, 0x7d,
+ 0xff, 0x6a, 0xdc, 0x0d,
+ 0x01, 0x65, 0x32, 0x31,
+ 0x0a, 0x93, 0x26, 0x01,
+ 0x00, 0x65, 0xaa, 0x46,
+ 0x81, 0x6a, 0xda, 0x5e,
+ 0x01, 0x0c, 0xda, 0x7d,
+ 0x04, 0x0c, 0xd8, 0x6d,
+ 0xe0, 0x03, 0x06, 0x08,
+ 0xe0, 0x03, 0x7e, 0x0c,
+ 0x01, 0x65, 0x18, 0x31,
+ 0xff, 0x6a, 0x1a, 0x09,
+ 0xff, 0x6a, 0x1c, 0x0d,
+ 0x01, 0x8c, 0x10, 0x30,
+ 0x01, 0x8d, 0x12, 0x30,
+ 0x01, 0x8e, 0x14, 0x34,
+ 0x01, 0x6c, 0xda, 0x30,
+ 0x01, 0x6c, 0xda, 0x30,
+ 0x01, 0x6c, 0xda, 0x30,
+ 0x01, 0x6c, 0xda, 0x30,
+ 0x01, 0x6c, 0xda, 0x30,
+ 0x01, 0x6c, 0xda, 0x30,
+ 0x01, 0x6c, 0xda, 0x30,
+ 0x01, 0x6c, 0xda, 0x34,
+ 0x3d, 0x64, 0xa4, 0x28,
+ 0x55, 0x64, 0xc8, 0x28,
+ 0x00, 0x65, 0x08, 0x46,
+ 0x2e, 0x64, 0xa4, 0x28,
+ 0x66, 0x64, 0xc8, 0x28,
+ 0x00, 0x6c, 0xda, 0x18,
+ 0x01, 0x52, 0xc8, 0x30,
+ 0x00, 0x6c, 0xda, 0x20,
+ 0xff, 0x6a, 0xc8, 0x08,
+ 0x00, 0x6c, 0xda, 0x20,
+ 0x00, 0x6c, 0xda, 0x24,
+ 0x01, 0x65, 0xc8, 0x30,
+ 0xe0, 0x6a, 0xcc, 0x00,
+ 0x44, 0x6a, 0x04, 0x5e,
+ 0x01, 0x90, 0xe2, 0x31,
+ 0x04, 0x3b, 0x28, 0x7e,
+ 0x30, 0x6a, 0xd0, 0x01,
+ 0x20, 0x6a, 0xd0, 0x01,
+ 0x1d, 0x6a, 0xdc, 0x01,
+ 0xdc, 0xee, 0x24, 0x66,
+ 0x00, 0x65, 0x40, 0x46,
+ 0x20, 0x6a, 0xd0, 0x01,
+ 0x01, 0x6a, 0xdc, 0x01,
+ 0x20, 0xa0, 0xd8, 0x31,
+ 0x09, 0xee, 0xdc, 0x01,
+ 0x80, 0xee, 0x30, 0x7e,
+ 0x11, 0x6a, 0xdc, 0x01,
+ 0x50, 0xee, 0x34, 0x66,
+ 0x20, 0x6a, 0xd0, 0x01,
+ 0x09, 0x6a, 0xdc, 0x01,
+ 0x88, 0xee, 0x3a, 0x66,
+ 0x19, 0x6a, 0xdc, 0x01,
+ 0xd8, 0xee, 0x3e, 0x66,
+ 0xff, 0x6a, 0xdc, 0x09,
+ 0x18, 0xee, 0x42, 0x6e,
+ 0xff, 0x6a, 0xd4, 0x0c,
+ 0x88, 0x6a, 0xcc, 0x00,
+ 0x44, 0x6a, 0x04, 0x5e,
+ 0x20, 0x6a, 0xe2, 0x5d,
+ 0x01, 0x3b, 0x26, 0x31,
+ 0x04, 0x3b, 0x5c, 0x6e,
+ 0xa0, 0x6a, 0xca, 0x00,
+ 0x20, 0x65, 0xc8, 0x18,
+ 0x00, 0x65, 0x9a, 0x5e,
+ 0x00, 0x65, 0x54, 0x66,
+ 0x0a, 0x93, 0x26, 0x01,
+ 0x00, 0x65, 0xaa, 0x46,
+ 0xa0, 0x6a, 0xcc, 0x00,
+ 0xff, 0x6a, 0xc8, 0x08,
+ 0x20, 0x94, 0x60, 0x6e,
+ 0x10, 0x94, 0x62, 0x6e,
+ 0x08, 0x94, 0x7c, 0x6e,
+ 0x08, 0x94, 0x7c, 0x6e,
+ 0x08, 0x94, 0x7c, 0x6e,
+ 0xff, 0x8c, 0xc8, 0x10,
+ 0xc1, 0x64, 0xc8, 0x18,
+ 0xf8, 0x64, 0xc8, 0x08,
+ 0x01, 0x99, 0xda, 0x30,
+ 0x00, 0x66, 0x70, 0x66,
+ 0xc0, 0x66, 0xac, 0x76,
+ 0x60, 0x66, 0xc8, 0x18,
+ 0x3d, 0x64, 0xc8, 0x28,
+ 0x00, 0x65, 0x60, 0x46,
+ 0xf7, 0x93, 0x26, 0x09,
+ 0x08, 0x93, 0x7e, 0x6e,
+ 0x00, 0x62, 0xc4, 0x18,
+ 0x00, 0x65, 0xaa, 0x5e,
+ 0x00, 0x65, 0x8a, 0x5e,
+ 0x00, 0x65, 0x8a, 0x5e,
+ 0x00, 0x65, 0x8a, 0x5e,
+ 0x01, 0x99, 0xda, 0x30,
+ 0x01, 0x99, 0xda, 0x30,
+ 0x01, 0x99, 0xda, 0x30,
+ 0x01, 0x99, 0xda, 0x30,
+ 0x01, 0x99, 0xda, 0x30,
+ 0x01, 0x99, 0xda, 0x30,
+ 0x01, 0x99, 0xda, 0x30,
+ 0x01, 0x99, 0xda, 0x34,
+ 0x01, 0x6c, 0x32, 0x31,
+ 0x01, 0x6c, 0x32, 0x31,
+ 0x01, 0x6c, 0x32, 0x31,
+ 0x01, 0x6c, 0x32, 0x31,
+ 0x01, 0x6c, 0x32, 0x31,
+ 0x01, 0x6c, 0x32, 0x31,
+ 0x01, 0x6c, 0x32, 0x31,
+ 0x01, 0x6c, 0x32, 0x35,
+ 0x08, 0x94, 0xaa, 0x7e,
+ 0xf7, 0x93, 0x26, 0x09,
+ 0x08, 0x93, 0xae, 0x6e,
+ 0xff, 0x6a, 0xd4, 0x0c,
+ 0x04, 0xb8, 0xd6, 0x6e,
+ 0x01, 0x42, 0x7e, 0x31,
+ 0xff, 0x6a, 0x76, 0x01,
+ 0x01, 0x90, 0x84, 0x34,
+ 0xff, 0x6a, 0x76, 0x05,
+ 0x01, 0x85, 0x0a, 0x01,
+ 0x7f, 0x65, 0x10, 0x09,
+ 0xfe, 0x85, 0x0a, 0x0d,
+ 0xff, 0x42, 0xd2, 0x66,
+ 0xff, 0x41, 0xca, 0x66,
+ 0xd1, 0x6a, 0xda, 0x5e,
+ 0xff, 0x6a, 0xca, 0x04,
+ 0x01, 0x41, 0x20, 0x31,
+ 0x01, 0xbf, 0x82, 0x30,
+ 0x01, 0x6a, 0x76, 0x00,
+ 0x00, 0xbb, 0x14, 0x46,
+ 0x01, 0x42, 0x20, 0x31,
+ 0x01, 0xbf, 0x84, 0x34,
+ 0x01, 0x41, 0x7e, 0x31,
+ 0x01, 0x90, 0x82, 0x34,
+ 0x01, 0x65, 0x22, 0x31,
+ 0xff, 0x6a, 0xd4, 0x08,
+ 0xff, 0x6a, 0xd4, 0x0c
+};
+
+typedef int ahc_patch_func_t (struct ahc_softc *ahc);
+static ahc_patch_func_t ahc_patch23_func;
+
+static int
+ahc_patch23_func(struct ahc_softc *ahc)
+{
+ return ((ahc->bugs & AHC_SCBCHAN_UPLOAD_BUG) != 0);
+}
+
+static ahc_patch_func_t ahc_patch22_func;
+
+static int
+ahc_patch22_func(struct ahc_softc *ahc)
+{
+ return ((ahc->features & AHC_CMD_CHAN) == 0);
+}
+
+static ahc_patch_func_t ahc_patch21_func;
+
+static int
+ahc_patch21_func(struct ahc_softc *ahc)
+{
+ return ((ahc->features & AHC_QUEUE_REGS) == 0);
+}
+
+static ahc_patch_func_t ahc_patch20_func;
+
+static int
+ahc_patch20_func(struct ahc_softc *ahc)
+{
+ return ((ahc->features & AHC_WIDE) != 0);
+}
+
+static ahc_patch_func_t ahc_patch19_func;
+
+static int
+ahc_patch19_func(struct ahc_softc *ahc)
+{
+ return ((ahc->flags & AHC_SCB_BTT) != 0);
+}
+
+static ahc_patch_func_t ahc_patch18_func;
+
+static int
+ahc_patch18_func(struct ahc_softc *ahc)
+{
+ return ((ahc->bugs & AHC_PCI_2_1_RETRY_BUG) != 0);
+}
+
+static ahc_patch_func_t ahc_patch17_func;
+
+static int
+ahc_patch17_func(struct ahc_softc *ahc)
+{
+ return ((ahc->bugs & AHC_TMODE_WIDEODD_BUG) != 0);
+}
+
+static ahc_patch_func_t ahc_patch16_func;
+
+static int
+ahc_patch16_func(struct ahc_softc *ahc)
+{
+ return ((ahc->bugs & AHC_AUTOFLUSH_BUG) != 0);
+}
+
+static ahc_patch_func_t ahc_patch15_func;
+
+static int
+ahc_patch15_func(struct ahc_softc *ahc)
+{
+ return ((ahc->features & AHC_ULTRA2) == 0);
+}
+
+static ahc_patch_func_t ahc_patch14_func;
+
+static int
+ahc_patch14_func(struct ahc_softc *ahc)
+{
+ return ((ahc->bugs & AHC_PCI_MWI_BUG) != 0 && ahc->pci_cachesize != 0);
+}
+
+static ahc_patch_func_t ahc_patch13_func;
+
+static int
+ahc_patch13_func(struct ahc_softc *ahc)
+{
+ return ((ahc->flags & AHC_39BIT_ADDRESSING) != 0);
+}
+
+static ahc_patch_func_t ahc_patch12_func;
+
+static int
+ahc_patch12_func(struct ahc_softc *ahc)
+{
+ return ((ahc->features & AHC_HS_MAILBOX) != 0);
+}
+
+static ahc_patch_func_t ahc_patch11_func;
+
+static int
+ahc_patch11_func(struct ahc_softc *ahc)
+{
+ return ((ahc->features & AHC_ULTRA) != 0);
+}
+
+static ahc_patch_func_t ahc_patch10_func;
+
+static int
+ahc_patch10_func(struct ahc_softc *ahc)
+{
+ return ((ahc->features & AHC_MULTI_TID) != 0);
+}
+
+static ahc_patch_func_t ahc_patch9_func;
+
+static int
+ahc_patch9_func(struct ahc_softc *ahc)
+{
+ return ((ahc->features & AHC_CMD_CHAN) != 0);
+}
+
+static ahc_patch_func_t ahc_patch8_func;
+
+static int
+ahc_patch8_func(struct ahc_softc *ahc)
+{
+ return ((ahc->flags & AHC_INITIATORROLE) != 0);
+}
+
+static ahc_patch_func_t ahc_patch7_func;
+
+static int
+ahc_patch7_func(struct ahc_softc *ahc)
+{
+ return ((ahc->flags & AHC_TARGETROLE) != 0);
+}
+
+static ahc_patch_func_t ahc_patch6_func;
+
+static int
+ahc_patch6_func(struct ahc_softc *ahc)
+{
+ return ((ahc->features & AHC_DT) == 0);
+}
+
+static ahc_patch_func_t ahc_patch5_func;
+
+static int
+ahc_patch5_func(struct ahc_softc *ahc)
+{
+ return ((ahc->flags & AHC_SEQUENCER_DEBUG) != 0);
+}
+
+static ahc_patch_func_t ahc_patch4_func;
+
+static int
+ahc_patch4_func(struct ahc_softc *ahc)
+{
+ return ((ahc->flags & AHC_PAGESCBS) != 0);
+}
+
+static ahc_patch_func_t ahc_patch3_func;
+
+static int
+ahc_patch3_func(struct ahc_softc *ahc)
+{
+ return ((ahc->features & AHC_QUEUE_REGS) != 0);
+}
+
+static ahc_patch_func_t ahc_patch2_func;
+
+static int
+ahc_patch2_func(struct ahc_softc *ahc)
+{
+ return ((ahc->features & AHC_TWIN) != 0);
+}
+
+static ahc_patch_func_t ahc_patch1_func;
+
+static int
+ahc_patch1_func(struct ahc_softc *ahc)
+{
+ return ((ahc->features & AHC_ULTRA2) != 0);
+}
+
+static ahc_patch_func_t ahc_patch0_func;
+
+static int
+ahc_patch0_func(struct ahc_softc *ahc)
+{
+ return (0);
+}
+
+static const struct patch {
+ ahc_patch_func_t *patch_func;
+ uint32_t begin :10,
+ skip_instr :10,
+ skip_patch :12;
+} patches[] = {
+ { ahc_patch1_func, 4, 1, 1 },
+ { ahc_patch2_func, 6, 2, 1 },
+ { ahc_patch2_func, 9, 1, 1 },
+ { ahc_patch3_func, 11, 1, 2 },
+ { ahc_patch0_func, 12, 2, 1 },
+ { ahc_patch4_func, 15, 1, 2 },
+ { ahc_patch0_func, 16, 1, 1 },
+ { ahc_patch5_func, 22, 2, 1 },
+ { ahc_patch3_func, 27, 1, 2 },
+ { ahc_patch0_func, 28, 1, 1 },
+ { ahc_patch6_func, 34, 1, 1 },
+ { ahc_patch7_func, 37, 54, 19 },
+ { ahc_patch8_func, 37, 1, 1 },
+ { ahc_patch9_func, 42, 3, 2 },
+ { ahc_patch0_func, 45, 3, 1 },
+ { ahc_patch10_func, 49, 1, 2 },
+ { ahc_patch0_func, 50, 2, 3 },
+ { ahc_patch1_func, 50, 1, 2 },
+ { ahc_patch0_func, 51, 1, 1 },
+ { ahc_patch2_func, 53, 2, 1 },
+ { ahc_patch9_func, 55, 1, 2 },
+ { ahc_patch0_func, 56, 1, 1 },
+ { ahc_patch9_func, 60, 1, 2 },
+ { ahc_patch0_func, 61, 1, 1 },
+ { ahc_patch9_func, 71, 1, 2 },
+ { ahc_patch0_func, 72, 1, 1 },
+ { ahc_patch9_func, 75, 1, 2 },
+ { ahc_patch0_func, 76, 1, 1 },
+ { ahc_patch9_func, 79, 1, 2 },
+ { ahc_patch0_func, 80, 1, 1 },
+ { ahc_patch8_func, 91, 9, 4 },
+ { ahc_patch1_func, 93, 1, 2 },
+ { ahc_patch0_func, 94, 1, 1 },
+ { ahc_patch2_func, 96, 2, 1 },
+ { ahc_patch2_func, 105, 4, 1 },
+ { ahc_patch1_func, 109, 1, 2 },
+ { ahc_patch0_func, 110, 2, 3 },
+ { ahc_patch2_func, 110, 1, 2 },
+ { ahc_patch0_func, 111, 1, 1 },
+ { ahc_patch7_func, 112, 4, 2 },
+ { ahc_patch0_func, 116, 1, 1 },
+ { ahc_patch11_func, 117, 2, 1 },
+ { ahc_patch1_func, 119, 1, 2 },
+ { ahc_patch0_func, 120, 1, 1 },
+ { ahc_patch7_func, 121, 4, 1 },
+ { ahc_patch7_func, 131, 95, 11 },
+ { ahc_patch4_func, 151, 1, 1 },
+ { ahc_patch1_func, 168, 1, 1 },
+ { ahc_patch12_func, 173, 1, 2 },
+ { ahc_patch0_func, 174, 1, 1 },
+ { ahc_patch9_func, 185, 1, 2 },
+ { ahc_patch0_func, 186, 1, 1 },
+ { ahc_patch9_func, 195, 1, 2 },
+ { ahc_patch0_func, 196, 1, 1 },
+ { ahc_patch9_func, 212, 6, 2 },
+ { ahc_patch0_func, 218, 6, 1 },
+ { ahc_patch8_func, 226, 21, 2 },
+ { ahc_patch1_func, 241, 1, 1 },
+ { ahc_patch1_func, 249, 1, 2 },
+ { ahc_patch0_func, 250, 2, 2 },
+ { ahc_patch11_func, 251, 1, 1 },
+ { ahc_patch9_func, 259, 27, 3 },
+ { ahc_patch1_func, 275, 10, 2 },
+ { ahc_patch13_func, 278, 1, 1 },
+ { ahc_patch14_func, 286, 14, 1 },
+ { ahc_patch1_func, 302, 1, 2 },
+ { ahc_patch0_func, 303, 1, 1 },
+ { ahc_patch9_func, 306, 1, 1 },
+ { ahc_patch13_func, 311, 1, 1 },
+ { ahc_patch9_func, 312, 2, 2 },
+ { ahc_patch0_func, 314, 4, 1 },
+ { ahc_patch14_func, 318, 1, 1 },
+ { ahc_patch15_func, 320, 2, 3 },
+ { ahc_patch9_func, 320, 1, 2 },
+ { ahc_patch0_func, 321, 1, 1 },
+ { ahc_patch6_func, 326, 1, 2 },
+ { ahc_patch0_func, 327, 1, 1 },
+ { ahc_patch1_func, 331, 47, 11 },
+ { ahc_patch6_func, 338, 2, 4 },
+ { ahc_patch7_func, 338, 1, 1 },
+ { ahc_patch8_func, 339, 1, 1 },
+ { ahc_patch0_func, 340, 1, 1 },
+ { ahc_patch16_func, 341, 1, 1 },
+ { ahc_patch6_func, 357, 6, 3 },
+ { ahc_patch16_func, 357, 5, 1 },
+ { ahc_patch0_func, 363, 7, 1 },
+ { ahc_patch13_func, 373, 5, 1 },
+ { ahc_patch0_func, 378, 52, 17 },
+ { ahc_patch14_func, 378, 1, 1 },
+ { ahc_patch7_func, 380, 2, 2 },
+ { ahc_patch17_func, 381, 1, 1 },
+ { ahc_patch9_func, 384, 1, 1 },
+ { ahc_patch18_func, 391, 1, 1 },
+ { ahc_patch14_func, 396, 9, 3 },
+ { ahc_patch9_func, 397, 3, 2 },
+ { ahc_patch0_func, 400, 3, 1 },
+ { ahc_patch9_func, 408, 6, 2 },
+ { ahc_patch0_func, 414, 9, 2 },
+ { ahc_patch13_func, 414, 1, 1 },
+ { ahc_patch13_func, 423, 2, 1 },
+ { ahc_patch14_func, 425, 1, 1 },
+ { ahc_patch9_func, 427, 1, 2 },
+ { ahc_patch0_func, 428, 1, 1 },
+ { ahc_patch7_func, 429, 1, 1 },
+ { ahc_patch7_func, 430, 1, 1 },
+ { ahc_patch8_func, 431, 3, 3 },
+ { ahc_patch6_func, 432, 1, 2 },
+ { ahc_patch0_func, 433, 1, 1 },
+ { ahc_patch9_func, 434, 1, 1 },
+ { ahc_patch15_func, 435, 1, 2 },
+ { ahc_patch13_func, 435, 1, 1 },
+ { ahc_patch14_func, 437, 9, 4 },
+ { ahc_patch9_func, 437, 1, 1 },
+ { ahc_patch9_func, 444, 2, 1 },
+ { ahc_patch0_func, 446, 4, 3 },
+ { ahc_patch9_func, 446, 1, 2 },
+ { ahc_patch0_func, 447, 3, 1 },
+ { ahc_patch1_func, 451, 2, 1 },
+ { ahc_patch7_func, 453, 10, 2 },
+ { ahc_patch0_func, 463, 1, 1 },
+ { ahc_patch8_func, 464, 118, 22 },
+ { ahc_patch1_func, 466, 3, 2 },
+ { ahc_patch0_func, 469, 5, 3 },
+ { ahc_patch9_func, 469, 2, 2 },
+ { ahc_patch0_func, 471, 3, 1 },
+ { ahc_patch1_func, 476, 2, 2 },
+ { ahc_patch0_func, 478, 6, 3 },
+ { ahc_patch9_func, 478, 2, 2 },
+ { ahc_patch0_func, 480, 3, 1 },
+ { ahc_patch1_func, 486, 2, 2 },
+ { ahc_patch0_func, 488, 9, 7 },
+ { ahc_patch9_func, 488, 5, 6 },
+ { ahc_patch19_func, 488, 1, 2 },
+ { ahc_patch0_func, 489, 1, 1 },
+ { ahc_patch19_func, 491, 1, 2 },
+ { ahc_patch0_func, 492, 1, 1 },
+ { ahc_patch0_func, 493, 4, 1 },
+ { ahc_patch6_func, 498, 3, 2 },
+ { ahc_patch0_func, 501, 1, 1 },
+ { ahc_patch6_func, 511, 1, 2 },
+ { ahc_patch0_func, 512, 1, 1 },
+ { ahc_patch20_func, 549, 7, 1 },
+ { ahc_patch3_func, 584, 1, 2 },
+ { ahc_patch0_func, 585, 1, 1 },
+ { ahc_patch21_func, 588, 1, 1 },
+ { ahc_patch8_func, 590, 106, 33 },
+ { ahc_patch4_func, 592, 1, 1 },
+ { ahc_patch1_func, 598, 2, 2 },
+ { ahc_patch0_func, 600, 1, 1 },
+ { ahc_patch1_func, 603, 1, 2 },
+ { ahc_patch0_func, 604, 1, 1 },
+ { ahc_patch9_func, 605, 3, 3 },
+ { ahc_patch15_func, 606, 1, 1 },
+ { ahc_patch0_func, 608, 4, 1 },
+ { ahc_patch19_func, 617, 2, 2 },
+ { ahc_patch0_func, 619, 1, 1 },
+ { ahc_patch19_func, 623, 10, 3 },
+ { ahc_patch5_func, 625, 8, 1 },
+ { ahc_patch0_func, 633, 9, 2 },
+ { ahc_patch5_func, 634, 8, 1 },
+ { ahc_patch4_func, 644, 1, 2 },
+ { ahc_patch0_func, 645, 1, 1 },
+ { ahc_patch19_func, 646, 1, 2 },
+ { ahc_patch0_func, 647, 3, 2 },
+ { ahc_patch4_func, 649, 1, 1 },
+ { ahc_patch5_func, 650, 1, 1 },
+ { ahc_patch5_func, 653, 1, 1 },
+ { ahc_patch5_func, 655, 1, 1 },
+ { ahc_patch4_func, 657, 2, 2 },
+ { ahc_patch0_func, 659, 2, 1 },
+ { ahc_patch5_func, 661, 1, 1 },
+ { ahc_patch5_func, 664, 1, 1 },
+ { ahc_patch5_func, 667, 1, 1 },
+ { ahc_patch19_func, 671, 1, 1 },
+ { ahc_patch19_func, 674, 1, 1 },
+ { ahc_patch4_func, 680, 1, 1 },
+ { ahc_patch6_func, 683, 1, 2 },
+ { ahc_patch0_func, 684, 1, 1 },
+ { ahc_patch7_func, 696, 16, 1 },
+ { ahc_patch4_func, 712, 20, 1 },
+ { ahc_patch9_func, 733, 4, 2 },
+ { ahc_patch0_func, 737, 4, 1 },
+ { ahc_patch9_func, 741, 4, 2 },
+ { ahc_patch0_func, 745, 3, 1 },
+ { ahc_patch6_func, 751, 1, 1 },
+ { ahc_patch22_func, 753, 14, 1 },
+ { ahc_patch7_func, 767, 3, 1 },
+ { ahc_patch9_func, 779, 24, 8 },
+ { ahc_patch19_func, 783, 1, 2 },
+ { ahc_patch0_func, 784, 1, 1 },
+ { ahc_patch15_func, 789, 4, 2 },
+ { ahc_patch0_func, 793, 7, 3 },
+ { ahc_patch23_func, 793, 5, 2 },
+ { ahc_patch0_func, 798, 2, 1 },
+ { ahc_patch0_func, 803, 42, 3 },
+ { ahc_patch18_func, 815, 18, 2 },
+ { ahc_patch0_func, 833, 1, 1 },
+ { ahc_patch4_func, 857, 1, 1 },
+ { ahc_patch4_func, 858, 3, 2 },
+ { ahc_patch0_func, 861, 1, 1 },
+ { ahc_patch13_func, 862, 3, 1 },
+ { ahc_patch4_func, 865, 12, 1 }
+};
+
+static const struct cs {
+ uint16_t begin;
+ uint16_t end;
+} critical_sections[] = {
+ { 11, 18 },
+ { 21, 30 },
+ { 712, 728 },
+ { 858, 861 },
+ { 865, 871 },
+ { 873, 875 },
+ { 875, 877 }
+};
+
+static const int num_critical_sections = sizeof(critical_sections)
+ / sizeof(*critical_sections);
diff --git a/drivers/scsi/aic7xxx/aicasm/Makefile b/drivers/scsi/aic7xxx/aicasm/Makefile
new file mode 100644
index 000000000..b98c5c105
--- /dev/null
+++ b/drivers/scsi/aic7xxx/aicasm/Makefile
@@ -0,0 +1,80 @@
+PROG= aicasm
+
+.SUFFIXES= .l .y .c .h
+
+CSRCS= aicasm.c aicasm_symbol.c
+YSRCS= aicasm_gram.y aicasm_macro_gram.y
+LSRCS= aicasm_scan.l aicasm_macro_scan.l
+
+GENHDRS= aicdb.h $(YSRCS:.y=.h)
+GENSRCS= $(YSRCS:.y=.c) $(LSRCS:.l=.c)
+
+SRCS= ${CSRCS} ${GENSRCS}
+LIBS= -ldb
+clean-files:= ${GENSRCS} ${GENHDRS} $(YSRCS:.y=.output) $(PROG)
+# Override default kernel CFLAGS. This is a userland app.
+AICASM_CFLAGS:= -I/usr/include -I.
+LEX= flex
+YACC= bison
+YFLAGS= -d
+
+NOMAN= noman
+
+ifneq ($(HOSTCC),)
+AICASM_CC= $(HOSTCC)
+else
+AICASM_CC= $(CC)
+endif
+
+ifdef DEBUG
+CFLAGS+= -DDEBUG -g
+YFLAGS+= -t -v
+LFLAGS= -d
+endif
+
+$(PROG): ${GENHDRS} $(SRCS)
+ $(AICASM_CC) $(AICASM_CFLAGS) $(SRCS) -o $(PROG) $(LIBS)
+
+aicdb.h:
+ @if [ -e "/usr/include/db4/db_185.h" ]; then \
+ echo "#include <db4/db_185.h>" > aicdb.h; \
+ elif [ -e "/usr/include/db3/db_185.h" ]; then \
+ echo "#include <db3/db_185.h>" > aicdb.h; \
+ elif [ -e "/usr/include/db2/db_185.h" ]; then \
+ echo "#include <db2/db_185.h>" > aicdb.h; \
+ elif [ -e "/usr/include/db1/db_185.h" ]; then \
+ echo "#include <db1/db_185.h>" > aicdb.h; \
+ elif [ -e "/usr/include/db/db_185.h" ]; then \
+ echo "#include <db/db_185.h>" > aicdb.h; \
+ elif [ -e "/usr/include/db_185.h" ]; then \
+ echo "#include <db_185.h>" > aicdb.h; \
+ else \
+ echo "*** Install db development libraries"; \
+ fi
+
+clean:
+ rm -f $(clean-files)
+
+# Create a dependency chain in generated files
+# to avoid concurrent invocations of the single
+# rule that builds them all.
+aicasm_gram.c: aicasm_gram.h
+aicasm_gram.c aicasm_gram.h: aicasm_gram.y
+ $(YACC) $(YFLAGS) -b $(<:.y=) $<
+ mv $(<:.y=).tab.c $(<:.y=.c)
+ mv $(<:.y=).tab.h $(<:.y=.h)
+
+# Create a dependency chain in generated files
+# to avoid concurrent invocations of the single
+# rule that builds them all.
+aicasm_macro_gram.c: aicasm_macro_gram.h
+aicasm_macro_gram.c aicasm_macro_gram.h: aicasm_macro_gram.y
+ $(YACC) $(YFLAGS) -b $(<:.y=) -p mm $<
+ mv $(<:.y=).tab.c $(<:.y=.c)
+ mv $(<:.y=).tab.h $(<:.y=.h)
+
+aicasm_scan.c: aicasm_scan.l
+ $(LEX) $(LFLAGS) -o$@ $<
+
+aicasm_macro_scan.c: aicasm_macro_scan.l
+ $(LEX) $(LFLAGS) -Pmm -o$@ $<
diff --git a/drivers/scsi/aic7xxx/aicasm/aicasm.c b/drivers/scsi/aic7xxx/aicasm/aicasm.c
new file mode 100644
index 000000000..2e3117aa3
--- /dev/null
+++ b/drivers/scsi/aic7xxx/aicasm/aicasm.c
@@ -0,0 +1,844 @@
+/*
+ * Aic7xxx SCSI host adapter firmware assembler
+ *
+ * Copyright (c) 1997, 1998, 2000, 2001 Justin T. Gibbs.
+ * Copyright (c) 2001, 2002 Adaptec Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions, and the following disclaimer,
+ * without modification.
+ * 2. Redistributions in binary form must reproduce at minimum a disclaimer
+ * substantially similar to the "NO WARRANTY" disclaimer below
+ * ("Disclaimer") and any redistribution must be conditioned upon
+ * including a substantially similar Disclaimer requirement for further
+ * binary redistribution.
+ * 3. Neither the names of the above-listed copyright holders nor the names
+ * of any contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * Alternatively, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") version 2 as published by the Free
+ * Software Foundation.
+ *
+ * NO WARRANTY
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
+ * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
+ * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGES.
+ *
+ * $Id: //depot/aic7xxx/aic7xxx/aicasm/aicasm.c#23 $
+ *
+ * $FreeBSD$
+ */
+#include <sys/types.h>
+#include <sys/mman.h>
+
+#include <ctype.h>
+#include <inttypes.h>
+#include <regex.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <sysexits.h>
+#include <unistd.h>
+
+#if linux
+#include <endian.h>
+#else
+#include <machine/endian.h>
+#endif
+
+#include "aicasm.h"
+#include "aicasm_symbol.h"
+#include "aicasm_insformat.h"
+
+typedef struct patch {
+ STAILQ_ENTRY(patch) links;
+ int patch_func;
+ u_int begin;
+ u_int skip_instr;
+ u_int skip_patch;
+} patch_t;
+
+STAILQ_HEAD(patch_list, patch) patches;
+
+static void usage(void);
+static void back_patch(void);
+static void output_code(void);
+static void output_listing(char *ifilename);
+static void dump_scope(scope_t *scope);
+static void emit_patch(scope_t *scope, int patch);
+static int check_patch(patch_t **start_patch, int start_instr,
+ int *skip_addr, int *func_vals);
+
+struct path_list search_path;
+int includes_search_curdir;
+char *appname;
+char *stock_include_file;
+FILE *ofile;
+char *ofilename;
+char *regfilename;
+FILE *regfile;
+char *listfilename;
+FILE *listfile;
+char *regdiagfilename;
+FILE *regdiagfile;
+int src_mode;
+int dst_mode;
+
+static STAILQ_HEAD(,instruction) seq_program;
+struct cs_tailq cs_tailq;
+struct scope_list scope_stack;
+symlist_t patch_functions;
+
+#if DEBUG
+extern int yy_flex_debug;
+extern int mm_flex_debug;
+extern int yydebug;
+extern int mmdebug;
+#endif
+extern FILE *yyin;
+extern int yyparse(void);
+
+int main(int argc, char *argv[]);
+
+int
+main(int argc, char *argv[])
+{
+ extern char *optarg;
+ extern int optind;
+ int ch;
+ int retval;
+ char *inputfilename;
+ scope_t *sentinal;
+
+ STAILQ_INIT(&patches);
+ SLIST_INIT(&search_path);
+ STAILQ_INIT(&seq_program);
+ TAILQ_INIT(&cs_tailq);
+ SLIST_INIT(&scope_stack);
+
+ /* Set Sentinal scope node */
+ sentinal = scope_alloc();
+ sentinal->type = SCOPE_ROOT;
+
+ includes_search_curdir = 1;
+ appname = *argv;
+ regfile = NULL;
+ listfile = NULL;
+#if DEBUG
+ yy_flex_debug = 0;
+ mm_flex_debug = 0;
+ yydebug = 0;
+ mmdebug = 0;
+#endif
+ while ((ch = getopt(argc, argv, "d:i:l:n:o:p:r:I:")) != -1) {
+ switch(ch) {
+ case 'd':
+#if DEBUG
+ if (strcmp(optarg, "s") == 0) {
+ yy_flex_debug = 1;
+ mm_flex_debug = 1;
+ } else if (strcmp(optarg, "p") == 0) {
+ yydebug = 1;
+ mmdebug = 1;
+ } else {
+ fprintf(stderr, "%s: -d Requires either an "
+ "'s' or 'p' argument\n", appname);
+ usage();
+ }
+#else
+ stop("-d: Assembler not built with debugging "
+ "information", EX_SOFTWARE);
+#endif
+ break;
+ case 'i':
+ stock_include_file = optarg;
+ break;
+ case 'l':
+ /* Create a program listing */
+ if ((listfile = fopen(optarg, "w")) == NULL) {
+ perror(optarg);
+ stop(NULL, EX_CANTCREAT);
+ }
+ listfilename = optarg;
+ break;
+ case 'n':
+ /* Don't complain about the -nostdinc directrive */
+ if (strcmp(optarg, "ostdinc")) {
+ fprintf(stderr, "%s: Unknown option -%c%s\n",
+ appname, ch, optarg);
+ usage();
+ /* NOTREACHED */
+ }
+ break;
+ case 'o':
+ if ((ofile = fopen(optarg, "w")) == NULL) {
+ perror(optarg);
+ stop(NULL, EX_CANTCREAT);
+ }
+ ofilename = optarg;
+ break;
+ case 'p':
+ /* Create Register Diagnostic "printing" Functions */
+ if ((regdiagfile = fopen(optarg, "w")) == NULL) {
+ perror(optarg);
+ stop(NULL, EX_CANTCREAT);
+ }
+ regdiagfilename = optarg;
+ break;
+ case 'r':
+ if ((regfile = fopen(optarg, "w")) == NULL) {
+ perror(optarg);
+ stop(NULL, EX_CANTCREAT);
+ }
+ regfilename = optarg;
+ break;
+ case 'I':
+ {
+ path_entry_t include_dir;
+
+ if (strcmp(optarg, "-") == 0) {
+ if (includes_search_curdir == 0) {
+ fprintf(stderr, "%s: Warning - '-I-' "
+ "specified multiple "
+ "times\n", appname);
+ }
+ includes_search_curdir = 0;
+ for (include_dir = SLIST_FIRST(&search_path);
+ include_dir != NULL;
+ include_dir = SLIST_NEXT(include_dir,
+ links))
+ /*
+ * All entries before a '-I-' only
+ * apply to includes specified with
+ * quotes instead of "<>".
+ */
+ include_dir->quoted_includes_only = 1;
+ } else {
+ include_dir =
+ (path_entry_t)malloc(sizeof(*include_dir));
+ if (include_dir == NULL) {
+ perror(optarg);
+ stop(NULL, EX_OSERR);
+ }
+ include_dir->directory = strdup(optarg);
+ if (include_dir->directory == NULL) {
+ perror(optarg);
+ stop(NULL, EX_OSERR);
+ }
+ include_dir->quoted_includes_only = 0;
+ SLIST_INSERT_HEAD(&search_path, include_dir,
+ links);
+ }
+ break;
+ }
+ case '?':
+ default:
+ usage();
+ /* NOTREACHED */
+ }
+ }
+ argc -= optind;
+ argv += optind;
+
+ if (argc != 1) {
+ fprintf(stderr, "%s: No input file specifiled\n", appname);
+ usage();
+ /* NOTREACHED */
+ }
+
+ if (regdiagfile != NULL
+ && (regfile == NULL || stock_include_file == NULL)) {
+ fprintf(stderr,
+ "%s: The -p option requires the -r and -i options.\n",
+ appname);
+ usage();
+ /* NOTREACHED */
+ }
+ symtable_open();
+ inputfilename = *argv;
+ include_file(*argv, SOURCE_FILE);
+ retval = yyparse();
+ if (retval == 0) {
+ if (SLIST_FIRST(&scope_stack) == NULL
+ || SLIST_FIRST(&scope_stack)->type != SCOPE_ROOT) {
+ stop("Unterminated conditional expression", EX_DATAERR);
+ /* NOTREACHED */
+ }
+
+ /* Process outmost scope */
+ process_scope(SLIST_FIRST(&scope_stack));
+ /*
+ * Decend the tree of scopes and insert/emit
+ * patches as appropriate. We perform a depth first
+ * tranversal, recursively handling each scope.
+ */
+ /* start at the root scope */
+ dump_scope(SLIST_FIRST(&scope_stack));
+
+ /* Patch up forward jump addresses */
+ back_patch();
+
+ if (ofile != NULL)
+ output_code();
+ if (regfile != NULL)
+ symtable_dump(regfile, regdiagfile);
+ if (listfile != NULL)
+ output_listing(inputfilename);
+ }
+
+ stop(NULL, 0);
+ /* NOTREACHED */
+ return (0);
+}
+
+static void
+usage()
+{
+
+ (void)fprintf(stderr,
+"usage: %-16s [-nostdinc] [-I-] [-I directory] [-o output_file]\n"
+" [-r register_output_file [-p register_diag_file -i includefile]]\n"
+" [-l program_list_file]\n"
+" input_file\n", appname);
+ exit(EX_USAGE);
+}
+
+static void
+back_patch()
+{
+ struct instruction *cur_instr;
+
+ for (cur_instr = STAILQ_FIRST(&seq_program);
+ cur_instr != NULL;
+ cur_instr = STAILQ_NEXT(cur_instr, links)) {
+ if (cur_instr->patch_label != NULL) {
+ struct ins_format3 *f3_instr;
+ u_int address;
+
+ if (cur_instr->patch_label->type != LABEL) {
+ char buf[255];
+
+ snprintf(buf, sizeof(buf),
+ "Undefined label %s",
+ cur_instr->patch_label->name);
+ stop(buf, EX_DATAERR);
+ /* NOTREACHED */
+ }
+ f3_instr = &cur_instr->format.format3;
+ address = f3_instr->address;
+ address += cur_instr->patch_label->info.linfo->address;
+ f3_instr->address = address;
+ }
+ }
+}
+
+static void
+output_code()
+{
+ struct instruction *cur_instr;
+ patch_t *cur_patch;
+ critical_section_t *cs;
+ symbol_node_t *cur_node;
+ int instrcount;
+
+ instrcount = 0;
+ fprintf(ofile,
+"/*\n"
+" * DO NOT EDIT - This file is automatically generated\n"
+" * from the following source files:\n"
+" *\n"
+"%s */\n", versions);
+
+ fprintf(ofile, "static const uint8_t seqprog[] = {\n");
+ for (cur_instr = STAILQ_FIRST(&seq_program);
+ cur_instr != NULL;
+ cur_instr = STAILQ_NEXT(cur_instr, links)) {
+
+ fprintf(ofile, "%s\t0x%02x, 0x%02x, 0x%02x, 0x%02x",
+ cur_instr == STAILQ_FIRST(&seq_program) ? "" : ",\n",
+#ifdef __LITTLE_ENDIAN
+ cur_instr->format.bytes[0],
+ cur_instr->format.bytes[1],
+ cur_instr->format.bytes[2],
+ cur_instr->format.bytes[3]);
+#else
+ cur_instr->format.bytes[3],
+ cur_instr->format.bytes[2],
+ cur_instr->format.bytes[1],
+ cur_instr->format.bytes[0]);
+#endif
+ instrcount++;
+ }
+ fprintf(ofile, "\n};\n\n");
+
+ if (patch_arg_list == NULL)
+ stop("Patch argument list not defined",
+ EX_DATAERR);
+
+ /*
+ * Output patch information. Patch functions first.
+ */
+ fprintf(ofile,
+"typedef int %spatch_func_t (%s);\n", prefix, patch_arg_list);
+
+ for (cur_node = SLIST_FIRST(&patch_functions);
+ cur_node != NULL;
+ cur_node = SLIST_NEXT(cur_node,links)) {
+ fprintf(ofile,
+"static %spatch_func_t %spatch%d_func;\n"
+"\n"
+"static int\n"
+"%spatch%d_func(%s)\n"
+"{\n"
+" return (%s);\n"
+"}\n\n",
+ prefix,
+ prefix,
+ cur_node->symbol->info.condinfo->func_num,
+ prefix,
+ cur_node->symbol->info.condinfo->func_num,
+ patch_arg_list,
+ cur_node->symbol->name);
+ }
+
+ fprintf(ofile,
+"static const struct patch {\n"
+" %spatch_func_t *patch_func;\n"
+" uint32_t begin :10,\n"
+" skip_instr :10,\n"
+" skip_patch :12;\n"
+"} patches[] = {\n", prefix);
+
+ for (cur_patch = STAILQ_FIRST(&patches);
+ cur_patch != NULL;
+ cur_patch = STAILQ_NEXT(cur_patch,links)) {
+ fprintf(ofile, "%s\t{ %spatch%d_func, %d, %d, %d }",
+ cur_patch == STAILQ_FIRST(&patches) ? "" : ",\n",
+ prefix,
+ cur_patch->patch_func, cur_patch->begin,
+ cur_patch->skip_instr, cur_patch->skip_patch);
+ }
+
+ fprintf(ofile, "\n};\n\n");
+
+ fprintf(ofile,
+"static const struct cs {\n"
+" uint16_t begin;\n"
+" uint16_t end;\n"
+"} critical_sections[] = {\n");
+
+ for (cs = TAILQ_FIRST(&cs_tailq);
+ cs != NULL;
+ cs = TAILQ_NEXT(cs, links)) {
+ fprintf(ofile, "%s\t{ %d, %d }",
+ cs == TAILQ_FIRST(&cs_tailq) ? "" : ",\n",
+ cs->begin_addr, cs->end_addr);
+ }
+
+ fprintf(ofile, "\n};\n\n");
+
+ fprintf(ofile,
+"static const int num_critical_sections = sizeof(critical_sections)\n"
+" / sizeof(*critical_sections);\n");
+
+ fprintf(stderr, "%s: %d instructions used\n", appname, instrcount);
+}
+
+static void
+dump_scope(scope_t *scope)
+{
+ scope_t *cur_scope;
+
+ /*
+ * Emit the first patch for this scope
+ */
+ emit_patch(scope, 0);
+
+ /*
+ * Dump each scope within this one.
+ */
+ cur_scope = TAILQ_FIRST(&scope->inner_scope);
+
+ while (cur_scope != NULL) {
+
+ dump_scope(cur_scope);
+
+ cur_scope = TAILQ_NEXT(cur_scope, scope_links);
+ }
+
+ /*
+ * Emit the second, closing, patch for this scope
+ */
+ emit_patch(scope, 1);
+}
+
+void
+emit_patch(scope_t *scope, int patch)
+{
+ patch_info_t *pinfo;
+ patch_t *new_patch;
+
+ pinfo = &scope->patches[patch];
+
+ if (pinfo->skip_instr == 0)
+ /* No-Op patch */
+ return;
+
+ new_patch = (patch_t *)malloc(sizeof(*new_patch));
+
+ if (new_patch == NULL)
+ stop("Could not malloc patch structure", EX_OSERR);
+
+ memset(new_patch, 0, sizeof(*new_patch));
+
+ if (patch == 0) {
+ new_patch->patch_func = scope->func_num;
+ new_patch->begin = scope->begin_addr;
+ } else {
+ new_patch->patch_func = 0;
+ new_patch->begin = scope->end_addr;
+ }
+ new_patch->skip_instr = pinfo->skip_instr;
+ new_patch->skip_patch = pinfo->skip_patch;
+ STAILQ_INSERT_TAIL(&patches, new_patch, links);
+}
+
+void
+output_listing(char *ifilename)
+{
+ char buf[1024];
+ FILE *ifile;
+ struct instruction *cur_instr;
+ patch_t *cur_patch;
+ symbol_node_t *cur_func;
+ int *func_values;
+ int instrcount;
+ int instrptr;
+ int line;
+ int func_count;
+ int skip_addr;
+
+ instrcount = 0;
+ instrptr = 0;
+ line = 1;
+ skip_addr = 0;
+ if ((ifile = fopen(ifilename, "r")) == NULL) {
+ perror(ifilename);
+ stop(NULL, EX_DATAERR);
+ }
+
+ /*
+ * Determine which options to apply to this listing.
+ */
+ for (func_count = 0, cur_func = SLIST_FIRST(&patch_functions);
+ cur_func != NULL;
+ cur_func = SLIST_NEXT(cur_func, links))
+ func_count++;
+
+ func_values = NULL;
+ if (func_count != 0) {
+ func_values = (int *)malloc(func_count * sizeof(int));
+
+ if (func_values == NULL)
+ stop("Could not malloc", EX_OSERR);
+
+ func_values[0] = 0; /* FALSE func */
+ func_count--;
+
+ /*
+ * Ask the user to fill in the return values for
+ * the rest of the functions.
+ */
+
+
+ for (cur_func = SLIST_FIRST(&patch_functions);
+ cur_func != NULL && SLIST_NEXT(cur_func, links) != NULL;
+ cur_func = SLIST_NEXT(cur_func, links), func_count--) {
+ int input;
+
+ fprintf(stdout, "\n(%s)\n", cur_func->symbol->name);
+ fprintf(stdout,
+ "Enter the return value for "
+ "this expression[T/F]:");
+
+ while (1) {
+
+ input = getchar();
+ input = toupper(input);
+
+ if (input == 'T') {
+ func_values[func_count] = 1;
+ break;
+ } else if (input == 'F') {
+ func_values[func_count] = 0;
+ break;
+ }
+ }
+ if (isatty(fileno(stdin)) == 0)
+ putchar(input);
+ }
+ fprintf(stdout, "\nThanks!\n");
+ }
+
+ /* Now output the listing */
+ cur_patch = STAILQ_FIRST(&patches);
+ for (cur_instr = STAILQ_FIRST(&seq_program);
+ cur_instr != NULL;
+ cur_instr = STAILQ_NEXT(cur_instr, links), instrcount++) {
+
+ if (check_patch(&cur_patch, instrcount,
+ &skip_addr, func_values) == 0) {
+ /* Don't count this instruction as it is in a patch
+ * that was removed.
+ */
+ continue;
+ }
+
+ while (line < cur_instr->srcline) {
+ fgets(buf, sizeof(buf), ifile);
+ fprintf(listfile, " \t%s", buf);
+ line++;
+ }
+ fprintf(listfile, "%04x %02x%02x%02x%02x", instrptr,
+#ifdef __LITTLE_ENDIAN
+ cur_instr->format.bytes[0],
+ cur_instr->format.bytes[1],
+ cur_instr->format.bytes[2],
+ cur_instr->format.bytes[3]);
+#else
+ cur_instr->format.bytes[3],
+ cur_instr->format.bytes[2],
+ cur_instr->format.bytes[1],
+ cur_instr->format.bytes[0]);
+#endif
+ /*
+ * Macro expansions can cause several instructions
+ * to be output for a single source line. Only
+ * advance the line once in these cases.
+ */
+ if (line == cur_instr->srcline) {
+ fgets(buf, sizeof(buf), ifile);
+ fprintf(listfile, "\t%s", buf);
+ line++;
+ } else {
+ fprintf(listfile, "\n");
+ }
+ instrptr++;
+ }
+ /* Dump the remainder of the file */
+ while(fgets(buf, sizeof(buf), ifile) != NULL)
+ fprintf(listfile, " %s", buf);
+
+ fclose(ifile);
+}
+
+static int
+check_patch(patch_t **start_patch, int start_instr,
+ int *skip_addr, int *func_vals)
+{
+ patch_t *cur_patch;
+
+ cur_patch = *start_patch;
+
+ while (cur_patch != NULL && start_instr == cur_patch->begin) {
+ if (func_vals[cur_patch->patch_func] == 0) {
+ int skip;
+
+ /* Start rejecting code */
+ *skip_addr = start_instr + cur_patch->skip_instr;
+ for (skip = cur_patch->skip_patch;
+ skip > 0 && cur_patch != NULL;
+ skip--)
+ cur_patch = STAILQ_NEXT(cur_patch, links);
+ } else {
+ /* Accepted this patch. Advance to the next
+ * one and wait for our intruction pointer to
+ * hit this point.
+ */
+ cur_patch = STAILQ_NEXT(cur_patch, links);
+ }
+ }
+
+ *start_patch = cur_patch;
+ if (start_instr < *skip_addr)
+ /* Still skipping */
+ return (0);
+
+ return (1);
+}
+
+/*
+ * Print out error information if appropriate, and clean up before
+ * terminating the program.
+ */
+void
+stop(const char *string, int err_code)
+{
+ if (string != NULL) {
+ fprintf(stderr, "%s: ", appname);
+ if (yyfilename != NULL) {
+ fprintf(stderr, "Stopped at file %s, line %d - ",
+ yyfilename, yylineno);
+ }
+ fprintf(stderr, "%s\n", string);
+ }
+
+ if (ofile != NULL) {
+ fclose(ofile);
+ if (err_code != 0) {
+ fprintf(stderr, "%s: Removing %s due to error\n",
+ appname, ofilename);
+ unlink(ofilename);
+ }
+ }
+
+ if (regfile != NULL) {
+ fclose(regfile);
+ if (err_code != 0) {
+ fprintf(stderr, "%s: Removing %s due to error\n",
+ appname, regfilename);
+ unlink(regfilename);
+ }
+ }
+
+ if (listfile != NULL) {
+ fclose(listfile);
+ if (err_code != 0) {
+ fprintf(stderr, "%s: Removing %s due to error\n",
+ appname, listfilename);
+ unlink(listfilename);
+ }
+ }
+
+ symlist_free(&patch_functions);
+ symtable_close();
+
+ exit(err_code);
+}
+
+struct instruction *
+seq_alloc()
+{
+ struct instruction *new_instr;
+
+ new_instr = (struct instruction *)malloc(sizeof(struct instruction));
+ if (new_instr == NULL)
+ stop("Unable to malloc instruction object", EX_SOFTWARE);
+ memset(new_instr, 0, sizeof(*new_instr));
+ STAILQ_INSERT_TAIL(&seq_program, new_instr, links);
+ new_instr->srcline = yylineno;
+ return new_instr;
+}
+
+critical_section_t *
+cs_alloc()
+{
+ critical_section_t *new_cs;
+
+ new_cs= (critical_section_t *)malloc(sizeof(critical_section_t));
+ if (new_cs == NULL)
+ stop("Unable to malloc critical_section object", EX_SOFTWARE);
+ memset(new_cs, 0, sizeof(*new_cs));
+
+ TAILQ_INSERT_TAIL(&cs_tailq, new_cs, links);
+ return new_cs;
+}
+
+scope_t *
+scope_alloc()
+{
+ scope_t *new_scope;
+
+ new_scope = (scope_t *)malloc(sizeof(scope_t));
+ if (new_scope == NULL)
+ stop("Unable to malloc scope object", EX_SOFTWARE);
+ memset(new_scope, 0, sizeof(*new_scope));
+ TAILQ_INIT(&new_scope->inner_scope);
+
+ if (SLIST_FIRST(&scope_stack) != NULL) {
+ TAILQ_INSERT_TAIL(&SLIST_FIRST(&scope_stack)->inner_scope,
+ new_scope, scope_links);
+ }
+ /* This patch is now the current scope */
+ SLIST_INSERT_HEAD(&scope_stack, new_scope, scope_stack_links);
+ return new_scope;
+}
+
+void
+process_scope(scope_t *scope)
+{
+ /*
+ * We are "leaving" this scope. We should now have
+ * enough information to process the lists of scopes
+ * we encapsulate.
+ */
+ scope_t *cur_scope;
+ u_int skip_patch_count;
+ u_int skip_instr_count;
+
+ cur_scope = TAILQ_LAST(&scope->inner_scope, scope_tailq);
+ skip_patch_count = 0;
+ skip_instr_count = 0;
+ while (cur_scope != NULL) {
+ u_int patch0_patch_skip;
+
+ patch0_patch_skip = 0;
+ switch (cur_scope->type) {
+ case SCOPE_IF:
+ case SCOPE_ELSE_IF:
+ if (skip_instr_count != 0) {
+ /* Create a tail patch */
+ patch0_patch_skip++;
+ cur_scope->patches[1].skip_patch =
+ skip_patch_count + 1;
+ cur_scope->patches[1].skip_instr =
+ skip_instr_count;
+ }
+
+ /* Count Head patch */
+ patch0_patch_skip++;
+
+ /* Count any patches contained in our inner scope */
+ patch0_patch_skip += cur_scope->inner_scope_patches;
+
+ cur_scope->patches[0].skip_patch = patch0_patch_skip;
+ cur_scope->patches[0].skip_instr =
+ cur_scope->end_addr - cur_scope->begin_addr;
+
+ skip_instr_count += cur_scope->patches[0].skip_instr;
+
+ skip_patch_count += patch0_patch_skip;
+ if (cur_scope->type == SCOPE_IF) {
+ scope->inner_scope_patches += skip_patch_count;
+ skip_patch_count = 0;
+ skip_instr_count = 0;
+ }
+ break;
+ case SCOPE_ELSE:
+ /* Count any patches contained in our innter scope */
+ skip_patch_count += cur_scope->inner_scope_patches;
+
+ skip_instr_count += cur_scope->end_addr
+ - cur_scope->begin_addr;
+ break;
+ case SCOPE_ROOT:
+ stop("Unexpected scope type encountered", EX_SOFTWARE);
+ /* NOTREACHED */
+ }
+
+ cur_scope = TAILQ_PREV(cur_scope, scope_tailq, scope_links);
+ }
+}
diff --git a/drivers/scsi/aic7xxx/aicasm/aicasm.h b/drivers/scsi/aic7xxx/aicasm/aicasm.h
new file mode 100644
index 000000000..51678dd46
--- /dev/null
+++ b/drivers/scsi/aic7xxx/aicasm/aicasm.h
@@ -0,0 +1,95 @@
+/*
+ * Assembler for the sequencer program downloaded to Aic7xxx SCSI host adapters
+ *
+ * Copyright (c) 1997 Justin T. Gibbs.
+ * Copyright (c) 2001, 2002 Adaptec Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions, and the following disclaimer,
+ * without modification.
+ * 2. Redistributions in binary form must reproduce at minimum a disclaimer
+ * substantially similar to the "NO WARRANTY" disclaimer below
+ * ("Disclaimer") and any redistribution must be conditioned upon
+ * including a substantially similar Disclaimer requirement for further
+ * binary redistribution.
+ * 3. Neither the names of the above-listed copyright holders nor the names
+ * of any contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * Alternatively, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") version 2 as published by the Free
+ * Software Foundation.
+ *
+ * NO WARRANTY
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
+ * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
+ * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGES.
+ *
+ * $Id: //depot/aic7xxx/aic7xxx/aicasm/aicasm.h#14 $
+ *
+ * $FreeBSD$
+ */
+
+#ifdef __linux__
+#include "../queue.h"
+#else
+#include <sys/queue.h>
+#endif
+
+#ifndef TRUE
+#define TRUE 1
+#endif
+
+#ifndef FALSE
+#define FALSE 0
+#endif
+
+typedef struct path_entry {
+ char *directory;
+ int quoted_includes_only;
+ SLIST_ENTRY(path_entry) links;
+} *path_entry_t;
+
+typedef enum {
+ QUOTED_INCLUDE,
+ BRACKETED_INCLUDE,
+ SOURCE_FILE
+} include_type;
+
+SLIST_HEAD(path_list, path_entry);
+
+extern struct path_list search_path;
+extern struct cs_tailq cs_tailq;
+extern struct scope_list scope_stack;
+extern struct symlist patch_functions;
+extern int includes_search_curdir; /* False if we've seen -I- */
+extern char *appname;
+extern char *stock_include_file;
+extern int yylineno;
+extern char *yyfilename;
+extern char *prefix;
+extern char *patch_arg_list;
+extern char *versions;
+extern int src_mode;
+extern int dst_mode;
+struct symbol;
+
+void stop(const char *errstring, int err_code);
+void include_file(char *file_name, include_type type);
+void expand_macro(struct symbol *macro_symbol);
+struct instruction *seq_alloc(void);
+struct critical_section *cs_alloc(void);
+struct scope *scope_alloc(void);
+void process_scope(struct scope *);
diff --git a/drivers/scsi/aic7xxx/aicasm/aicasm_gram.y b/drivers/scsi/aic7xxx/aicasm/aicasm_gram.y
new file mode 100644
index 000000000..f1586a437
--- /dev/null
+++ b/drivers/scsi/aic7xxx/aicasm/aicasm_gram.y
@@ -0,0 +1,2004 @@
+%{
+/*
+ * Parser for the Aic7xxx SCSI Host adapter sequencer assembler.
+ *
+ * Copyright (c) 1997, 1998, 2000 Justin T. Gibbs.
+ * Copyright (c) 2001, 2002 Adaptec Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions, and the following disclaimer,
+ * without modification.
+ * 2. Redistributions in binary form must reproduce at minimum a disclaimer
+ * substantially similar to the "NO WARRANTY" disclaimer below
+ * ("Disclaimer") and any redistribution must be conditioned upon
+ * including a substantially similar Disclaimer requirement for further
+ * binary redistribution.
+ * 3. Neither the names of the above-listed copyright holders nor the names
+ * of any contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * Alternatively, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") version 2 as published by the Free
+ * Software Foundation.
+ *
+ * NO WARRANTY
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
+ * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
+ * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGES.
+ *
+ * $Id: //depot/aic7xxx/aic7xxx/aicasm/aicasm_gram.y#30 $
+ *
+ * $FreeBSD$
+ */
+
+#include <sys/types.h>
+
+#include <inttypes.h>
+#include <regex.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <sysexits.h>
+
+#ifdef __linux__
+#include "../queue.h"
+#else
+#include <sys/queue.h>
+#endif
+
+#include "aicasm.h"
+#include "aicasm_symbol.h"
+#include "aicasm_insformat.h"
+
+int yylineno;
+char *yyfilename;
+char stock_prefix[] = "aic_";
+char *prefix = stock_prefix;
+char *patch_arg_list;
+char *versions;
+static char errbuf[255];
+static char regex_pattern[255];
+static symbol_t *cur_symbol;
+static symbol_t *field_symbol;
+static symbol_t *scb_or_sram_symbol;
+static symtype cur_symtype;
+static symbol_ref_t accumulator;
+static symbol_ref_t mode_ptr;
+static symbol_ref_t allones;
+static symbol_ref_t allzeros;
+static symbol_ref_t none;
+static symbol_ref_t sindex;
+static int instruction_ptr;
+static int num_srams;
+static int sram_or_scb_offset;
+static int download_constant_count;
+static int in_critical_section;
+static u_int enum_increment;
+static u_int enum_next_value;
+
+static void process_field(int field_type, symbol_t *sym, int mask);
+static void initialize_symbol(symbol_t *symbol);
+static void add_macro_arg(const char *argtext, int position);
+static void add_macro_body(const char *bodytext);
+static void process_register(symbol_t **p_symbol);
+static void format_1_instr(int opcode, symbol_ref_t *dest,
+ expression_t *immed, symbol_ref_t *src, int ret);
+static void format_2_instr(int opcode, symbol_ref_t *dest,
+ expression_t *places, symbol_ref_t *src, int ret);
+static void format_3_instr(int opcode, symbol_ref_t *src,
+ expression_t *immed, symbol_ref_t *address);
+static void test_readable_symbol(symbol_t *symbol);
+static void test_writable_symbol(symbol_t *symbol);
+static void type_check(symbol_ref_t *sym, expression_t *expression, int and_op);
+static void make_expression(expression_t *immed, int value);
+static void add_conditional(symbol_t *symbol);
+static void add_version(const char *verstring);
+static int is_download_const(expression_t *immed);
+static int is_location_address(symbol_t *symbol);
+void yyerror(const char *string);
+
+#define SRAM_SYMNAME "SRAM_BASE"
+#define SCB_SYMNAME "SCB_BASE"
+%}
+
+%union {
+ u_int value;
+ char *str;
+ symbol_t *sym;
+ symbol_ref_t sym_ref;
+ expression_t expression;
+}
+
+%token T_REGISTER
+
+%token <value> T_CONST
+
+%token T_EXPORT
+
+%token T_DOWNLOAD
+
+%token T_SCB
+
+%token T_SRAM
+
+%token T_ALIAS
+
+%token T_SIZE
+
+%token T_EXPR_LSHIFT
+
+%token T_EXPR_RSHIFT
+
+%token <value> T_ADDRESS
+
+%token T_COUNT
+
+%token T_ACCESS_MODE
+
+%token T_DONT_GENERATE_DEBUG_CODE
+
+%token T_MODES
+
+%token T_DEFINE
+
+%token T_SET_SRC_MODE
+
+%token T_SET_DST_MODE
+
+%token <value> T_MODE
+
+%token T_BEGIN_CS
+
+%token T_END_CS
+
+%token T_PAD_PAGE
+
+%token T_FIELD
+
+%token T_ENUM
+
+%token T_MASK
+
+%token <value> T_NUMBER
+
+%token <str> T_PATH T_STRING T_ARG T_MACROBODY
+
+%token <sym> T_CEXPR
+
+%token T_EOF T_INCLUDE T_VERSION T_PREFIX T_PATCH_ARG_LIST
+
+%token <value> T_SHR T_SHL T_ROR T_ROL
+
+%token <value> T_MVI T_MOV T_CLR T_BMOV
+
+%token <value> T_JMP T_JC T_JNC T_JE T_JNE T_JNZ T_JZ T_CALL
+
+%token <value> T_ADD T_ADC
+
+%token <value> T_INC T_DEC
+
+%token <value> T_STC T_CLC
+
+%token <value> T_CMP T_NOT T_XOR
+
+%token <value> T_TEST T_AND
+
+%token <value> T_OR
+
+/* 16 bit extensions, not implemented
+ * %token <value> T_OR16 T_AND16 T_XOR16 T_ADD16
+ * %token <value> T_ADC16 T_MVI16 T_TEST16 T_CMP16 T_CMPXCHG
+ */
+%token T_RET
+
+%token T_NOP
+
+%token T_ACCUM T_ALLONES T_ALLZEROS T_NONE T_SINDEX T_MODE_PTR
+
+%token T_A
+
+%token <sym> T_SYMBOL
+
+%token T_NL
+
+%token T_IF T_ELSE T_ELSE_IF T_ENDIF
+
+%type <sym_ref> reg_symbol address destination source opt_source
+
+%type <expression> expression immediate immediate_or_a
+
+%type <value> export ret f1_opcode f2_opcode jmp_jc_jnc_call jz_jnz je_jne
+
+%type <value> mode_value mode_list macro_arglist
+
+%left '|'
+%left '&'
+%left T_EXPR_LSHIFT T_EXPR_RSHIFT
+%left '+' '-'
+%left '*' '/'
+%right '~'
+%nonassoc UMINUS
+%%
+
+program:
+ include
+| program include
+| prefix
+| program prefix
+| patch_arg_list
+| program patch_arg_list
+| version
+| program version
+| register
+| program register
+| constant
+| program constant
+| macrodefn
+| program macrodefn
+| scratch_ram
+| program scratch_ram
+| scb
+| program scb
+| label
+| program label
+| set_src_mode
+| program set_src_mode
+| set_dst_mode
+| program set_dst_mode
+| critical_section_start
+| program critical_section_start
+| critical_section_end
+| program critical_section_end
+| conditional
+| program conditional
+| code
+| program code
+;
+
+include:
+ T_INCLUDE '<' T_PATH '>'
+ {
+ include_file($3, BRACKETED_INCLUDE);
+ }
+| T_INCLUDE '"' T_PATH '"'
+ {
+ include_file($3, QUOTED_INCLUDE);
+ }
+;
+
+prefix:
+ T_PREFIX '=' T_STRING
+ {
+ if (prefix != stock_prefix)
+ stop("Prefix multiply defined",
+ EX_DATAERR);
+ prefix = strdup($3);
+ if (prefix == NULL)
+ stop("Unable to record prefix", EX_SOFTWARE);
+ }
+;
+
+patch_arg_list:
+ T_PATCH_ARG_LIST '=' T_STRING
+ {
+ if (patch_arg_list != NULL)
+ stop("Patch argument list multiply defined",
+ EX_DATAERR);
+ patch_arg_list = strdup($3);
+ if (patch_arg_list == NULL)
+ stop("Unable to record patch arg list", EX_SOFTWARE);
+ }
+;
+
+version:
+ T_VERSION '=' T_STRING
+ { add_version($3); }
+;
+
+register:
+ T_REGISTER { cur_symtype = REGISTER; } reg_definition
+;
+
+reg_definition:
+ T_SYMBOL '{'
+ {
+ if ($1->type != UNINITIALIZED) {
+ stop("Register multiply defined", EX_DATAERR);
+ /* NOTREACHED */
+ }
+ cur_symbol = $1;
+ cur_symbol->type = cur_symtype;
+ initialize_symbol(cur_symbol);
+ }
+ reg_attribute_list
+ '}'
+ {
+ /*
+ * Default to allowing everything in for registers
+ * with no bit or mask definitions.
+ */
+ if (cur_symbol->info.rinfo->valid_bitmask == 0)
+ cur_symbol->info.rinfo->valid_bitmask = 0xFF;
+
+ if (cur_symbol->info.rinfo->size == 0)
+ cur_symbol->info.rinfo->size = 1;
+
+ /*
+ * This might be useful for registers too.
+ */
+ if (cur_symbol->type != REGISTER) {
+ if (cur_symbol->info.rinfo->address == 0)
+ cur_symbol->info.rinfo->address =
+ sram_or_scb_offset;
+ sram_or_scb_offset +=
+ cur_symbol->info.rinfo->size;
+ }
+ cur_symbol = NULL;
+ }
+;
+
+reg_attribute_list:
+ reg_attribute
+| reg_attribute_list reg_attribute
+;
+
+reg_attribute:
+ reg_address
+| size
+| count
+| access_mode
+| dont_generate_debug_code
+| modes
+| field_defn
+| enum_defn
+| mask_defn
+| alias
+| accumulator
+| mode_pointer
+| allones
+| allzeros
+| none
+| sindex
+;
+
+reg_address:
+ T_ADDRESS T_NUMBER
+ {
+ cur_symbol->info.rinfo->address = $2;
+ }
+;
+
+size:
+ T_SIZE T_NUMBER
+ {
+ cur_symbol->info.rinfo->size = $2;
+ if (scb_or_sram_symbol != NULL) {
+ u_int max_addr;
+ u_int sym_max_addr;
+
+ max_addr = scb_or_sram_symbol->info.rinfo->address
+ + scb_or_sram_symbol->info.rinfo->size;
+ sym_max_addr = cur_symbol->info.rinfo->address
+ + cur_symbol->info.rinfo->size;
+
+ if (sym_max_addr > max_addr)
+ stop("SCB or SRAM space exhausted", EX_DATAERR);
+ }
+ }
+;
+
+count:
+ T_COUNT T_NUMBER
+ {
+ cur_symbol->count += $2;
+ }
+;
+
+access_mode:
+ T_ACCESS_MODE T_MODE
+ {
+ cur_symbol->info.rinfo->mode = $2;
+ }
+;
+
+dont_generate_debug_code:
+ T_DONT_GENERATE_DEBUG_CODE
+ {
+ cur_symbol->dont_generate_debug_code = 1;
+ }
+;
+
+modes:
+ T_MODES mode_list
+ {
+ cur_symbol->info.rinfo->modes = $2;
+ }
+;
+
+mode_list:
+ mode_value
+ {
+ $$ = $1;
+ }
+| mode_list ',' mode_value
+ {
+ $$ = $1 | $3;
+ }
+;
+
+mode_value:
+ T_NUMBER
+ {
+ if ($1 > 4) {
+ stop("Valid register modes range between 0 and 4.",
+ EX_DATAERR);
+ /* NOTREACHED */
+ }
+
+ $$ = (0x1 << $1);
+ }
+| T_SYMBOL
+ {
+ symbol_t *symbol;
+
+ symbol = $1;
+ if (symbol->type != CONST) {
+ stop("Only \"const\" symbols allowed in "
+ "mode definitions.", EX_DATAERR);
+ /* NOTREACHED */
+ }
+ if (symbol->info.cinfo->value > 4) {
+ stop("Valid register modes range between 0 and 4.",
+ EX_DATAERR);
+ /* NOTREACHED */
+ }
+ $$ = (0x1 << symbol->info.cinfo->value);
+ }
+;
+
+field_defn:
+ T_FIELD
+ {
+ field_symbol = NULL;
+ enum_next_value = 0;
+ enum_increment = 1;
+ }
+ '{' enum_entry_list '}'
+| T_FIELD T_SYMBOL expression
+ {
+ process_field(FIELD, $2, $3.value);
+ field_symbol = $2;
+ enum_next_value = 0;
+ enum_increment = 0x01 << (ffs($3.value) - 1);
+ }
+ '{' enum_entry_list '}'
+| T_FIELD T_SYMBOL expression
+ {
+ process_field(FIELD, $2, $3.value);
+ }
+;
+
+enum_defn:
+ T_ENUM
+ {
+ field_symbol = NULL;
+ enum_next_value = 0;
+ enum_increment = 1;
+ }
+ '{' enum_entry_list '}'
+| T_ENUM T_SYMBOL expression
+ {
+ process_field(ENUM, $2, $3.value);
+ field_symbol = $2;
+ enum_next_value = 0;
+ enum_increment = 0x01 << (ffs($3.value) - 1);
+ }
+ '{' enum_entry_list '}'
+;
+
+enum_entry_list:
+ enum_entry
+| enum_entry_list ',' enum_entry
+;
+
+enum_entry:
+ T_SYMBOL
+ {
+ process_field(ENUM_ENTRY, $1, enum_next_value);
+ enum_next_value += enum_increment;
+ }
+| T_SYMBOL expression
+ {
+ process_field(ENUM_ENTRY, $1, $2.value);
+ enum_next_value = $2.value + enum_increment;
+ }
+;
+
+mask_defn:
+ T_MASK T_SYMBOL expression
+ {
+ process_field(MASK, $2, $3.value);
+ }
+;
+
+alias:
+ T_ALIAS T_SYMBOL
+ {
+ if ($2->type != UNINITIALIZED) {
+ stop("Re-definition of register alias",
+ EX_DATAERR);
+ /* NOTREACHED */
+ }
+ $2->type = ALIAS;
+ initialize_symbol($2);
+ $2->info.ainfo->parent = cur_symbol;
+ }
+;
+
+accumulator:
+ T_ACCUM
+ {
+ if (accumulator.symbol != NULL) {
+ stop("Only one accumulator definition allowed",
+ EX_DATAERR);
+ /* NOTREACHED */
+ }
+ accumulator.symbol = cur_symbol;
+ }
+;
+
+mode_pointer:
+ T_MODE_PTR
+ {
+ if (mode_ptr.symbol != NULL) {
+ stop("Only one mode pointer definition allowed",
+ EX_DATAERR);
+ /* NOTREACHED */
+ }
+ mode_ptr.symbol = cur_symbol;
+ }
+;
+
+allones:
+ T_ALLONES
+ {
+ if (allones.symbol != NULL) {
+ stop("Only one definition of allones allowed",
+ EX_DATAERR);
+ /* NOTREACHED */
+ }
+ allones.symbol = cur_symbol;
+ }
+;
+
+allzeros:
+ T_ALLZEROS
+ {
+ if (allzeros.symbol != NULL) {
+ stop("Only one definition of allzeros allowed",
+ EX_DATAERR);
+ /* NOTREACHED */
+ }
+ allzeros.symbol = cur_symbol;
+ }
+;
+
+none:
+ T_NONE
+ {
+ if (none.symbol != NULL) {
+ stop("Only one definition of none allowed",
+ EX_DATAERR);
+ /* NOTREACHED */
+ }
+ none.symbol = cur_symbol;
+ }
+;
+
+sindex:
+ T_SINDEX
+ {
+ if (sindex.symbol != NULL) {
+ stop("Only one definition of sindex allowed",
+ EX_DATAERR);
+ /* NOTREACHED */
+ }
+ sindex.symbol = cur_symbol;
+ }
+;
+
+expression:
+ expression '|' expression
+ {
+ $$.value = $1.value | $3.value;
+ symlist_merge(&$$.referenced_syms,
+ &$1.referenced_syms,
+ &$3.referenced_syms);
+ }
+| expression '&' expression
+ {
+ $$.value = $1.value & $3.value;
+ symlist_merge(&$$.referenced_syms,
+ &$1.referenced_syms,
+ &$3.referenced_syms);
+ }
+| expression '+' expression
+ {
+ $$.value = $1.value + $3.value;
+ symlist_merge(&$$.referenced_syms,
+ &$1.referenced_syms,
+ &$3.referenced_syms);
+ }
+| expression '-' expression
+ {
+ $$.value = $1.value - $3.value;
+ symlist_merge(&($$.referenced_syms),
+ &($1.referenced_syms),
+ &($3.referenced_syms));
+ }
+| expression '*' expression
+ {
+ $$.value = $1.value * $3.value;
+ symlist_merge(&($$.referenced_syms),
+ &($1.referenced_syms),
+ &($3.referenced_syms));
+ }
+| expression '/' expression
+ {
+ $$.value = $1.value / $3.value;
+ symlist_merge(&($$.referenced_syms),
+ &($1.referenced_syms),
+ &($3.referenced_syms));
+ }
+| expression T_EXPR_LSHIFT expression
+ {
+ $$.value = $1.value << $3.value;
+ symlist_merge(&$$.referenced_syms,
+ &$1.referenced_syms,
+ &$3.referenced_syms);
+ }
+| expression T_EXPR_RSHIFT expression
+ {
+ $$.value = $1.value >> $3.value;
+ symlist_merge(&$$.referenced_syms,
+ &$1.referenced_syms,
+ &$3.referenced_syms);
+ }
+| '(' expression ')'
+ {
+ $$ = $2;
+ }
+| '~' expression
+ {
+ $$ = $2;
+ $$.value = (~$$.value) & 0xFF;
+ }
+| '-' expression %prec UMINUS
+ {
+ $$ = $2;
+ $$.value = -$$.value;
+ }
+| T_NUMBER
+ {
+ $$.value = $1;
+ SLIST_INIT(&$$.referenced_syms);
+ }
+| T_SYMBOL
+ {
+ symbol_t *symbol;
+
+ symbol = $1;
+ switch (symbol->type) {
+ case ALIAS:
+ symbol = $1->info.ainfo->parent;
+ case REGISTER:
+ case SCBLOC:
+ case SRAMLOC:
+ $$.value = symbol->info.rinfo->address;
+ break;
+ case MASK:
+ case FIELD:
+ case ENUM:
+ case ENUM_ENTRY:
+ $$.value = symbol->info.finfo->value;
+ break;
+ case DOWNLOAD_CONST:
+ case CONST:
+ $$.value = symbol->info.cinfo->value;
+ break;
+ case UNINITIALIZED:
+ default:
+ {
+ snprintf(errbuf, sizeof(errbuf),
+ "Undefined symbol %s referenced",
+ symbol->name);
+ stop(errbuf, EX_DATAERR);
+ /* NOTREACHED */
+ break;
+ }
+ }
+ SLIST_INIT(&$$.referenced_syms);
+ symlist_add(&$$.referenced_syms, symbol, SYMLIST_INSERT_HEAD);
+ }
+;
+
+constant:
+ T_CONST T_SYMBOL expression
+ {
+ if ($2->type != UNINITIALIZED) {
+ stop("Re-definition of symbol as a constant",
+ EX_DATAERR);
+ /* NOTREACHED */
+ }
+ $2->type = CONST;
+ initialize_symbol($2);
+ $2->info.cinfo->value = $3.value;
+ }
+| T_CONST T_SYMBOL T_DOWNLOAD
+ {
+ if ($1) {
+ stop("Invalid downloaded constant declaration",
+ EX_DATAERR);
+ /* NOTREACHED */
+ }
+ if ($2->type != UNINITIALIZED) {
+ stop("Re-definition of symbol as a downloaded constant",
+ EX_DATAERR);
+ /* NOTREACHED */
+ }
+ $2->type = DOWNLOAD_CONST;
+ initialize_symbol($2);
+ $2->info.cinfo->value = download_constant_count++;
+ }
+;
+
+macrodefn_prologue:
+ T_DEFINE T_SYMBOL
+ {
+ if ($2->type != UNINITIALIZED) {
+ stop("Re-definition of symbol as a macro",
+ EX_DATAERR);
+ /* NOTREACHED */
+ }
+ cur_symbol = $2;
+ cur_symbol->type = MACRO;
+ initialize_symbol(cur_symbol);
+ }
+;
+
+macrodefn:
+ macrodefn_prologue T_MACROBODY
+ {
+ add_macro_body($2);
+ }
+| macrodefn_prologue '(' macro_arglist ')' T_MACROBODY
+ {
+ add_macro_body($5);
+ cur_symbol->info.macroinfo->narg = $3;
+ }
+;
+
+macro_arglist:
+ {
+ /* Macros can take no arguments */
+ $$ = 0;
+ }
+| T_ARG
+ {
+ $$ = 1;
+ add_macro_arg($1, 0);
+ }
+| macro_arglist ',' T_ARG
+ {
+ if ($1 == 0) {
+ stop("Comma without preceding argument in arg list",
+ EX_DATAERR);
+ /* NOTREACHED */
+ }
+ $$ = $1 + 1;
+ add_macro_arg($3, $1);
+ }
+;
+
+scratch_ram:
+ T_SRAM '{'
+ {
+ snprintf(errbuf, sizeof(errbuf), "%s%d", SRAM_SYMNAME,
+ num_srams);
+ cur_symbol = symtable_get(SRAM_SYMNAME);
+ cur_symtype = SRAMLOC;
+ cur_symbol->type = SRAMLOC;
+ initialize_symbol(cur_symbol);
+ cur_symbol->count += 1;
+ }
+ reg_address
+ {
+ sram_or_scb_offset = cur_symbol->info.rinfo->address;
+ }
+ size
+ {
+ scb_or_sram_symbol = cur_symbol;
+ }
+ scb_or_sram_attributes
+ '}'
+ {
+ cur_symbol = NULL;
+ scb_or_sram_symbol = NULL;
+ }
+;
+
+scb:
+ T_SCB '{'
+ {
+ cur_symbol = symtable_get(SCB_SYMNAME);
+ cur_symtype = SCBLOC;
+ if (cur_symbol->type != UNINITIALIZED) {
+ stop("Only one SRAM definition allowed",
+ EX_SOFTWARE);
+ /* NOTREACHED */
+ }
+ cur_symbol->type = SCBLOC;
+ initialize_symbol(cur_symbol);
+ /* 64 bytes of SCB space */
+ cur_symbol->info.rinfo->size = 64;
+ cur_symbol->count += 1;
+ }
+ reg_address
+ {
+ sram_or_scb_offset = cur_symbol->info.rinfo->address;
+ }
+ size
+ {
+ scb_or_sram_symbol = cur_symbol;
+ }
+ scb_or_sram_attributes
+ '}'
+ {
+ cur_symbol = NULL;
+ scb_or_sram_symbol = NULL;
+ }
+;
+
+scb_or_sram_attributes:
+ /* NULL definition is okay */
+| modes
+| scb_or_sram_reg_list
+| modes scb_or_sram_reg_list
+;
+
+scb_or_sram_reg_list:
+ reg_definition
+| scb_or_sram_reg_list reg_definition
+;
+
+reg_symbol:
+ T_SYMBOL
+ {
+ process_register(&$1);
+ $$.symbol = $1;
+ $$.offset = 0;
+ }
+| T_SYMBOL '[' T_SYMBOL ']'
+ {
+ process_register(&$1);
+ if ($3->type != CONST) {
+ stop("register offset must be a constant", EX_DATAERR);
+ /* NOTREACHED */
+ }
+ if (($3->info.cinfo->value + 1) > $1->info.rinfo->size) {
+ stop("Accessing offset beyond range of register",
+ EX_DATAERR);
+ /* NOTREACHED */
+ }
+ $$.symbol = $1;
+ $$.offset = $3->info.cinfo->value;
+ }
+| T_SYMBOL '[' T_NUMBER ']'
+ {
+ process_register(&$1);
+ if (($3 + 1) > $1->info.rinfo->size) {
+ stop("Accessing offset beyond range of register",
+ EX_DATAERR);
+ /* NOTREACHED */
+ }
+ $$.symbol = $1;
+ $$.offset = $3;
+ }
+| T_A
+ {
+ if (accumulator.symbol == NULL) {
+ stop("No accumulator has been defined", EX_DATAERR);
+ /* NOTREACHED */
+ }
+ $$.symbol = accumulator.symbol;
+ $$.offset = 0;
+ }
+;
+
+destination:
+ reg_symbol
+ {
+ test_writable_symbol($1.symbol);
+ $$ = $1;
+ }
+;
+
+immediate:
+ expression
+ { $$ = $1; }
+;
+
+immediate_or_a:
+ expression
+ {
+ if ($1.value == 0 && is_download_const(&$1) == 0) {
+ snprintf(errbuf, sizeof(errbuf),
+ "\nExpression evaluates to 0 and thus "
+ "references the accumulator.\n "
+ "If this is the desired effect, use 'A' "
+ "instead.\n");
+ stop(errbuf, EX_DATAERR);
+ }
+ $$ = $1;
+ }
+| T_A
+ {
+ SLIST_INIT(&$$.referenced_syms);
+ symlist_add(&$$.referenced_syms, accumulator.symbol,
+ SYMLIST_INSERT_HEAD);
+ $$.value = 0;
+ }
+;
+
+source:
+ reg_symbol
+ {
+ test_readable_symbol($1.symbol);
+ $$ = $1;
+ }
+;
+
+opt_source:
+ {
+ $$.symbol = NULL;
+ $$.offset = 0;
+ }
+| ',' source
+ { $$ = $2; }
+;
+
+ret:
+ { $$ = 0; }
+| T_RET
+ { $$ = 1; }
+;
+
+set_src_mode:
+ T_SET_SRC_MODE T_NUMBER ';'
+ {
+ src_mode = $2;
+ }
+;
+
+set_dst_mode:
+ T_SET_DST_MODE T_NUMBER ';'
+ {
+ dst_mode = $2;
+ }
+;
+
+critical_section_start:
+ T_BEGIN_CS ';'
+ {
+ critical_section_t *cs;
+
+ if (in_critical_section != FALSE) {
+ stop("Critical Section within Critical Section",
+ EX_DATAERR);
+ /* NOTREACHED */
+ }
+ cs = cs_alloc();
+ cs->begin_addr = instruction_ptr;
+ in_critical_section = TRUE;
+ }
+;
+
+critical_section_end:
+ T_END_CS ';'
+ {
+ critical_section_t *cs;
+
+ if (in_critical_section == FALSE) {
+ stop("Unballanced 'end_cs'", EX_DATAERR);
+ /* NOTREACHED */
+ }
+ cs = TAILQ_LAST(&cs_tailq, cs_tailq);
+ cs->end_addr = instruction_ptr;
+ in_critical_section = FALSE;
+ }
+;
+
+export:
+ { $$ = 0; }
+| T_EXPORT
+ { $$ = 1; }
+;
+
+label:
+ export T_SYMBOL ':'
+ {
+ if ($2->type != UNINITIALIZED) {
+ stop("Program label multiply defined", EX_DATAERR);
+ /* NOTREACHED */
+ }
+ $2->type = LABEL;
+ initialize_symbol($2);
+ $2->info.linfo->address = instruction_ptr;
+ $2->info.linfo->exported = $1;
+ }
+;
+
+address:
+ T_SYMBOL
+ {
+ $$.symbol = $1;
+ $$.offset = 0;
+ }
+| T_SYMBOL '+' T_NUMBER
+ {
+ $$.symbol = $1;
+ $$.offset = $3;
+ }
+| T_SYMBOL '-' T_NUMBER
+ {
+ $$.symbol = $1;
+ $$.offset = -$3;
+ }
+| '.'
+ {
+ $$.symbol = NULL;
+ $$.offset = 0;
+ }
+| '.' '+' T_NUMBER
+ {
+ $$.symbol = NULL;
+ $$.offset = $3;
+ }
+| '.' '-' T_NUMBER
+ {
+ $$.symbol = NULL;
+ $$.offset = -$3;
+ }
+;
+
+conditional:
+ T_IF T_CEXPR '{'
+ {
+ scope_t *new_scope;
+
+ add_conditional($2);
+ new_scope = scope_alloc();
+ new_scope->type = SCOPE_IF;
+ new_scope->begin_addr = instruction_ptr;
+ new_scope->func_num = $2->info.condinfo->func_num;
+ }
+| T_ELSE T_IF T_CEXPR '{'
+ {
+ scope_t *new_scope;
+ scope_t *scope_context;
+ scope_t *last_scope;
+
+ /*
+ * Ensure that the previous scope is either an
+ * if or and else if.
+ */
+ scope_context = SLIST_FIRST(&scope_stack);
+ last_scope = TAILQ_LAST(&scope_context->inner_scope,
+ scope_tailq);
+ if (last_scope == NULL
+ || last_scope->type == T_ELSE) {
+
+ stop("'else if' without leading 'if'", EX_DATAERR);
+ /* NOTREACHED */
+ }
+ add_conditional($3);
+ new_scope = scope_alloc();
+ new_scope->type = SCOPE_ELSE_IF;
+ new_scope->begin_addr = instruction_ptr;
+ new_scope->func_num = $3->info.condinfo->func_num;
+ }
+| T_ELSE '{'
+ {
+ scope_t *new_scope;
+ scope_t *scope_context;
+ scope_t *last_scope;
+
+ /*
+ * Ensure that the previous scope is either an
+ * if or and else if.
+ */
+ scope_context = SLIST_FIRST(&scope_stack);
+ last_scope = TAILQ_LAST(&scope_context->inner_scope,
+ scope_tailq);
+ if (last_scope == NULL
+ || last_scope->type == SCOPE_ELSE) {
+
+ stop("'else' without leading 'if'", EX_DATAERR);
+ /* NOTREACHED */
+ }
+ new_scope = scope_alloc();
+ new_scope->type = SCOPE_ELSE;
+ new_scope->begin_addr = instruction_ptr;
+ }
+;
+
+conditional:
+ '}'
+ {
+ scope_t *scope_context;
+
+ scope_context = SLIST_FIRST(&scope_stack);
+ if (scope_context->type == SCOPE_ROOT) {
+ stop("Unexpected '}' encountered", EX_DATAERR);
+ /* NOTREACHED */
+ }
+
+ scope_context->end_addr = instruction_ptr;
+
+ /* Pop the scope */
+ SLIST_REMOVE_HEAD(&scope_stack, scope_stack_links);
+
+ process_scope(scope_context);
+
+ if (SLIST_FIRST(&scope_stack) == NULL) {
+ stop("Unexpected '}' encountered", EX_DATAERR);
+ /* NOTREACHED */
+ }
+ }
+;
+
+f1_opcode:
+ T_AND { $$ = AIC_OP_AND; }
+| T_XOR { $$ = AIC_OP_XOR; }
+| T_ADD { $$ = AIC_OP_ADD; }
+| T_ADC { $$ = AIC_OP_ADC; }
+;
+
+code:
+ f1_opcode destination ',' immediate_or_a opt_source ret ';'
+ {
+ format_1_instr($1, &$2, &$4, &$5, $6);
+ }
+;
+
+code:
+ T_OR reg_symbol ',' immediate_or_a opt_source ret ';'
+ {
+ format_1_instr(AIC_OP_OR, &$2, &$4, &$5, $6);
+ }
+;
+
+code:
+ T_INC destination opt_source ret ';'
+ {
+ expression_t immed;
+
+ make_expression(&immed, 1);
+ format_1_instr(AIC_OP_ADD, &$2, &immed, &$3, $4);
+ }
+;
+
+code:
+ T_DEC destination opt_source ret ';'
+ {
+ expression_t immed;
+
+ make_expression(&immed, -1);
+ format_1_instr(AIC_OP_ADD, &$2, &immed, &$3, $4);
+ }
+;
+
+code:
+ T_CLC ret ';'
+ {
+ expression_t immed;
+
+ make_expression(&immed, -1);
+ format_1_instr(AIC_OP_ADD, &none, &immed, &allzeros, $2);
+ }
+| T_CLC T_MVI destination ',' immediate_or_a ret ';'
+ {
+ format_1_instr(AIC_OP_ADD, &$3, &$5, &allzeros, $6);
+ }
+;
+
+code:
+ T_STC ret ';'
+ {
+ expression_t immed;
+
+ make_expression(&immed, 1);
+ format_1_instr(AIC_OP_ADD, &none, &immed, &allones, $2);
+ }
+| T_STC destination ret ';'
+ {
+ expression_t immed;
+
+ make_expression(&immed, 1);
+ format_1_instr(AIC_OP_ADD, &$2, &immed, &allones, $3);
+ }
+;
+
+code:
+ T_BMOV destination ',' source ',' immediate ret ';'
+ {
+ format_1_instr(AIC_OP_BMOV, &$2, &$6, &$4, $7);
+ }
+;
+
+code:
+ T_MOV destination ',' source ret ';'
+ {
+ expression_t immed;
+
+ make_expression(&immed, 1);
+ format_1_instr(AIC_OP_BMOV, &$2, &immed, &$4, $5);
+ }
+;
+
+code:
+ T_MVI destination ',' immediate ret ';'
+ {
+ if ($4.value == 0
+ && is_download_const(&$4) == 0) {
+ expression_t immed;
+
+ /*
+ * Allow move immediates of 0 so that macros,
+ * that can't know the immediate's value and
+ * otherwise compensate, still work.
+ */
+ make_expression(&immed, 1);
+ format_1_instr(AIC_OP_BMOV, &$2, &immed, &allzeros, $5);
+ } else {
+ format_1_instr(AIC_OP_OR, &$2, &$4, &allzeros, $5);
+ }
+ }
+;
+
+code:
+ T_NOT destination opt_source ret ';'
+ {
+ expression_t immed;
+
+ make_expression(&immed, 0xff);
+ format_1_instr(AIC_OP_XOR, &$2, &immed, &$3, $4);
+ }
+;
+
+code:
+ T_CLR destination ret ';'
+ {
+ expression_t immed;
+
+ make_expression(&immed, 0xff);
+ format_1_instr(AIC_OP_AND, &$2, &immed, &allzeros, $3);
+ }
+;
+
+code:
+ T_NOP ret ';'
+ {
+ expression_t immed;
+
+ make_expression(&immed, 0xff);
+ format_1_instr(AIC_OP_AND, &none, &immed, &allzeros, $2);
+ }
+;
+
+code:
+ T_RET ';'
+ {
+ expression_t immed;
+
+ make_expression(&immed, 0xff);
+ format_1_instr(AIC_OP_AND, &none, &immed, &allzeros, TRUE);
+ }
+;
+
+ /*
+ * This grammar differs from the one in the aic7xxx
+ * reference manual since the grammar listed there is
+ * ambiguous and causes a shift/reduce conflict.
+ * It also seems more logical as the "immediate"
+ * argument is listed as the second arg like the
+ * other formats.
+ */
+
+f2_opcode:
+ T_SHL { $$ = AIC_OP_SHL; }
+| T_SHR { $$ = AIC_OP_SHR; }
+| T_ROL { $$ = AIC_OP_ROL; }
+| T_ROR { $$ = AIC_OP_ROR; }
+;
+
+/*
+ * 16bit opcodes, not used
+ *
+ *f4_opcode:
+ * T_OR16 { $$ = AIC_OP_OR16; }
+ *| T_AND16 { $$ = AIC_OP_AND16; }
+ *| T_XOR16 { $$ = AIC_OP_XOR16; }
+ *| T_ADD16 { $$ = AIC_OP_ADD16; }
+ *| T_ADC16 { $$ = AIC_OP_ADC16; }
+ *| T_MVI16 { $$ = AIC_OP_MVI16; }
+ *;
+ */
+
+code:
+ f2_opcode destination ',' expression opt_source ret ';'
+ {
+ format_2_instr($1, &$2, &$4, &$5, $6);
+ }
+;
+
+jmp_jc_jnc_call:
+ T_JMP { $$ = AIC_OP_JMP; }
+| T_JC { $$ = AIC_OP_JC; }
+| T_JNC { $$ = AIC_OP_JNC; }
+| T_CALL { $$ = AIC_OP_CALL; }
+;
+
+jz_jnz:
+ T_JZ { $$ = AIC_OP_JZ; }
+| T_JNZ { $$ = AIC_OP_JNZ; }
+;
+
+je_jne:
+ T_JE { $$ = AIC_OP_JE; }
+| T_JNE { $$ = AIC_OP_JNE; }
+;
+
+code:
+ jmp_jc_jnc_call address ';'
+ {
+ expression_t immed;
+
+ make_expression(&immed, 0);
+ format_3_instr($1, &sindex, &immed, &$2);
+ }
+;
+
+code:
+ T_OR reg_symbol ',' immediate jmp_jc_jnc_call address ';'
+ {
+ type_check(&$2, &$4, AIC_OP_OR);
+ format_3_instr($5, &$2, &$4, &$6);
+ }
+;
+
+code:
+ T_TEST source ',' immediate_or_a jz_jnz address ';'
+ {
+ format_3_instr($5, &$2, &$4, &$6);
+ }
+;
+
+code:
+ T_CMP source ',' immediate_or_a je_jne address ';'
+ {
+ format_3_instr($5, &$2, &$4, &$6);
+ }
+;
+
+code:
+ T_MOV source jmp_jc_jnc_call address ';'
+ {
+ expression_t immed;
+
+ make_expression(&immed, 0);
+ format_3_instr($3, &$2, &immed, &$4);
+ }
+;
+
+code:
+ T_MVI immediate jmp_jc_jnc_call address ';'
+ {
+ format_3_instr($3, &allzeros, &$2, &$4);
+ }
+;
+
+%%
+
+static void
+process_field(int field_type, symbol_t *sym, int value)
+{
+ /*
+ * Add the current register to its
+ * symbol list, if it already exists,
+ * warn if we are setting it to a
+ * different value, or in the bit to
+ * the "allowed bits" of this register.
+ */
+ if (sym->type == UNINITIALIZED) {
+ sym->type = field_type;
+ initialize_symbol(sym);
+ sym->info.finfo->value = value;
+ if (field_type != ENUM_ENTRY) {
+ if (field_type != MASK && value == 0) {
+ stop("Empty Field, or Enum", EX_DATAERR);
+ /* NOTREACHED */
+ }
+ sym->info.finfo->value = value;
+ sym->info.finfo->mask = value;
+ } else if (field_symbol != NULL) {
+ sym->info.finfo->mask = field_symbol->info.finfo->value;
+ } else {
+ sym->info.finfo->mask = 0xFF;
+ }
+ } else if (sym->type != field_type) {
+ stop("Field definition mirrors a definition of the same "
+ " name, but a different type", EX_DATAERR);
+ /* NOTREACHED */
+ } else if (value != sym->info.finfo->value) {
+ stop("Field redefined with a conflicting value", EX_DATAERR);
+ /* NOTREACHED */
+ }
+ /* Fail if this symbol is already listed */
+ if (symlist_search(&(sym->info.finfo->symrefs),
+ cur_symbol->name) != NULL) {
+ stop("Field defined multiple times for register", EX_DATAERR);
+ /* NOTREACHED */
+ }
+ symlist_add(&(sym->info.finfo->symrefs), cur_symbol,
+ SYMLIST_INSERT_HEAD);
+ cur_symbol->info.rinfo->valid_bitmask |= sym->info.finfo->mask;
+ cur_symbol->info.rinfo->typecheck_masks = TRUE;
+ symlist_add(&(cur_symbol->info.rinfo->fields), sym, SYMLIST_SORT);
+}
+
+static void
+initialize_symbol(symbol_t *symbol)
+{
+ switch (symbol->type) {
+ case UNINITIALIZED:
+ stop("Call to initialize_symbol with type field unset",
+ EX_SOFTWARE);
+ /* NOTREACHED */
+ break;
+ case REGISTER:
+ case SRAMLOC:
+ case SCBLOC:
+ symbol->info.rinfo =
+ (struct reg_info *)malloc(sizeof(struct reg_info));
+ if (symbol->info.rinfo == NULL) {
+ stop("Can't create register info", EX_SOFTWARE);
+ /* NOTREACHED */
+ }
+ memset(symbol->info.rinfo, 0,
+ sizeof(struct reg_info));
+ SLIST_INIT(&(symbol->info.rinfo->fields));
+ /*
+ * Default to allowing access in all register modes
+ * or to the mode specified by the SCB or SRAM space
+ * we are in.
+ */
+ if (scb_or_sram_symbol != NULL)
+ symbol->info.rinfo->modes =
+ scb_or_sram_symbol->info.rinfo->modes;
+ else
+ symbol->info.rinfo->modes = ~0;
+ break;
+ case ALIAS:
+ symbol->info.ainfo =
+ (struct alias_info *)malloc(sizeof(struct alias_info));
+ if (symbol->info.ainfo == NULL) {
+ stop("Can't create alias info", EX_SOFTWARE);
+ /* NOTREACHED */
+ }
+ memset(symbol->info.ainfo, 0,
+ sizeof(struct alias_info));
+ break;
+ case MASK:
+ case FIELD:
+ case ENUM:
+ case ENUM_ENTRY:
+ symbol->info.finfo =
+ (struct field_info *)malloc(sizeof(struct field_info));
+ if (symbol->info.finfo == NULL) {
+ stop("Can't create field info", EX_SOFTWARE);
+ /* NOTREACHED */
+ }
+ memset(symbol->info.finfo, 0, sizeof(struct field_info));
+ SLIST_INIT(&(symbol->info.finfo->symrefs));
+ break;
+ case CONST:
+ case DOWNLOAD_CONST:
+ symbol->info.cinfo =
+ (struct const_info *)malloc(sizeof(struct const_info));
+ if (symbol->info.cinfo == NULL) {
+ stop("Can't create alias info", EX_SOFTWARE);
+ /* NOTREACHED */
+ }
+ memset(symbol->info.cinfo, 0,
+ sizeof(struct const_info));
+ break;
+ case LABEL:
+ symbol->info.linfo =
+ (struct label_info *)malloc(sizeof(struct label_info));
+ if (symbol->info.linfo == NULL) {
+ stop("Can't create label info", EX_SOFTWARE);
+ /* NOTREACHED */
+ }
+ memset(symbol->info.linfo, 0,
+ sizeof(struct label_info));
+ break;
+ case CONDITIONAL:
+ symbol->info.condinfo =
+ (struct cond_info *)malloc(sizeof(struct cond_info));
+ if (symbol->info.condinfo == NULL) {
+ stop("Can't create conditional info", EX_SOFTWARE);
+ /* NOTREACHED */
+ }
+ memset(symbol->info.condinfo, 0,
+ sizeof(struct cond_info));
+ break;
+ case MACRO:
+ symbol->info.macroinfo =
+ (struct macro_info *)malloc(sizeof(struct macro_info));
+ if (symbol->info.macroinfo == NULL) {
+ stop("Can't create macro info", EX_SOFTWARE);
+ /* NOTREACHED */
+ }
+ memset(symbol->info.macroinfo, 0,
+ sizeof(struct macro_info));
+ STAILQ_INIT(&symbol->info.macroinfo->args);
+ break;
+ default:
+ stop("Call to initialize_symbol with invalid symbol type",
+ EX_SOFTWARE);
+ /* NOTREACHED */
+ break;
+ }
+}
+
+static void
+add_macro_arg(const char *argtext, int argnum)
+{
+ struct macro_arg *marg;
+ int i;
+ int retval;
+
+ if (cur_symbol == NULL || cur_symbol->type != MACRO) {
+ stop("Invalid current symbol for adding macro arg",
+ EX_SOFTWARE);
+ /* NOTREACHED */
+ }
+
+ marg = (struct macro_arg *)malloc(sizeof(*marg));
+ if (marg == NULL) {
+ stop("Can't create macro_arg structure", EX_SOFTWARE);
+ /* NOTREACHED */
+ }
+ marg->replacement_text = NULL;
+ retval = snprintf(regex_pattern, sizeof(regex_pattern),
+ "[^-/A-Za-z0-9_](%s)([^-/A-Za-z0-9_]|$)",
+ argtext);
+ if (retval >= sizeof(regex_pattern)) {
+ stop("Regex text buffer too small for arg",
+ EX_SOFTWARE);
+ /* NOTREACHED */
+ }
+ retval = regcomp(&marg->arg_regex, regex_pattern, REG_EXTENDED);
+ if (retval != 0) {
+ stop("Regex compilation failed", EX_SOFTWARE);
+ /* NOTREACHED */
+ }
+ STAILQ_INSERT_TAIL(&cur_symbol->info.macroinfo->args, marg, links);
+}
+
+static void
+add_macro_body(const char *bodytext)
+{
+ if (cur_symbol == NULL || cur_symbol->type != MACRO) {
+ stop("Invalid current symbol for adding macro arg",
+ EX_SOFTWARE);
+ /* NOTREACHED */
+ }
+ cur_symbol->info.macroinfo->body = strdup(bodytext);
+ if (cur_symbol->info.macroinfo->body == NULL) {
+ stop("Can't duplicate macro body text", EX_SOFTWARE);
+ /* NOTREACHED */
+ }
+}
+
+static void
+process_register(symbol_t **p_symbol)
+{
+ symbol_t *symbol = *p_symbol;
+
+ if (symbol->type == UNINITIALIZED) {
+ snprintf(errbuf, sizeof(errbuf), "Undefined register %s",
+ symbol->name);
+ stop(errbuf, EX_DATAERR);
+ /* NOTREACHED */
+ } else if (symbol->type == ALIAS) {
+ *p_symbol = symbol->info.ainfo->parent;
+ } else if ((symbol->type != REGISTER)
+ && (symbol->type != SCBLOC)
+ && (symbol->type != SRAMLOC)) {
+ snprintf(errbuf, sizeof(errbuf),
+ "Specified symbol %s is not a register",
+ symbol->name);
+ stop(errbuf, EX_DATAERR);
+ }
+}
+
+static void
+format_1_instr(int opcode, symbol_ref_t *dest, expression_t *immed,
+ symbol_ref_t *src, int ret)
+{
+ struct instruction *instr;
+ struct ins_format1 *f1_instr;
+
+ if (src->symbol == NULL)
+ src = dest;
+
+ /* Test register permissions */
+ test_writable_symbol(dest->symbol);
+ test_readable_symbol(src->symbol);
+
+ if (!is_location_address(dest->symbol)) {
+ /* Ensure that immediate makes sense for this destination */
+ type_check(dest, immed, opcode);
+ }
+
+ /* Allocate sequencer space for the instruction and fill it out */
+ instr = seq_alloc();
+ f1_instr = &instr->format.format1;
+ f1_instr->ret = ret ? 1 : 0;
+ f1_instr->opcode = opcode;
+ f1_instr->destination = dest->symbol->info.rinfo->address
+ + dest->offset;
+ f1_instr->source = src->symbol->info.rinfo->address
+ + src->offset;
+ f1_instr->immediate = immed->value;
+
+ if (is_download_const(immed))
+ f1_instr->parity = 1;
+ else if (dest->symbol == mode_ptr.symbol) {
+ u_int src_value;
+ u_int dst_value;
+
+ /*
+ * Attempt to update mode information if
+ * we are operating on the mode register.
+ */
+ if (src->symbol == allones.symbol)
+ src_value = 0xFF;
+ else if (src->symbol == allzeros.symbol)
+ src_value = 0;
+ else if (src->symbol == mode_ptr.symbol)
+ src_value = (dst_mode << 4) | src_mode;
+ else
+ goto cant_update;
+
+ switch (opcode) {
+ case AIC_OP_AND:
+ dst_value = src_value & immed->value;
+ break;
+ case AIC_OP_XOR:
+ dst_value = src_value ^ immed->value;
+ break;
+ case AIC_OP_ADD:
+ dst_value = (src_value + immed->value) & 0xFF;
+ break;
+ case AIC_OP_OR:
+ dst_value = src_value | immed->value;
+ break;
+ case AIC_OP_BMOV:
+ dst_value = src_value;
+ break;
+ default:
+ goto cant_update;
+ }
+ src_mode = dst_value & 0xF;
+ dst_mode = (dst_value >> 4) & 0xF;
+ }
+
+cant_update:
+ symlist_free(&immed->referenced_syms);
+ instruction_ptr++;
+}
+
+static void
+format_2_instr(int opcode, symbol_ref_t *dest, expression_t *places,
+ symbol_ref_t *src, int ret)
+{
+ struct instruction *instr;
+ struct ins_format2 *f2_instr;
+ uint8_t shift_control;
+
+ if (src->symbol == NULL)
+ src = dest;
+
+ /* Test register permissions */
+ test_writable_symbol(dest->symbol);
+ test_readable_symbol(src->symbol);
+
+ /* Allocate sequencer space for the instruction and fill it out */
+ instr = seq_alloc();
+ f2_instr = &instr->format.format2;
+ f2_instr->ret = ret ? 1 : 0;
+ f2_instr->opcode = AIC_OP_ROL;
+ f2_instr->destination = dest->symbol->info.rinfo->address
+ + dest->offset;
+ f2_instr->source = src->symbol->info.rinfo->address
+ + src->offset;
+ if (places->value > 8 || places->value <= 0) {
+ stop("illegal shift value", EX_DATAERR);
+ /* NOTREACHED */
+ }
+ switch (opcode) {
+ case AIC_OP_SHL:
+ if (places->value == 8)
+ shift_control = 0xf0;
+ else
+ shift_control = (places->value << 4) | places->value;
+ break;
+ case AIC_OP_SHR:
+ if (places->value == 8) {
+ shift_control = 0xf8;
+ } else {
+ shift_control = (places->value << 4)
+ | (8 - places->value)
+ | 0x08;
+ }
+ break;
+ case AIC_OP_ROL:
+ shift_control = places->value & 0x7;
+ break;
+ case AIC_OP_ROR:
+ shift_control = (8 - places->value) | 0x08;
+ break;
+ default:
+ shift_control = 0; /* Quiet Compiler */
+ stop("Invalid shift operation specified", EX_SOFTWARE);
+ /* NOTREACHED */
+ break;
+ };
+ f2_instr->shift_control = shift_control;
+ symlist_free(&places->referenced_syms);
+ instruction_ptr++;
+}
+
+static void
+format_3_instr(int opcode, symbol_ref_t *src,
+ expression_t *immed, symbol_ref_t *address)
+{
+ struct instruction *instr;
+ struct ins_format3 *f3_instr;
+ int addr;
+
+ /* Test register permissions */
+ test_readable_symbol(src->symbol);
+
+ /* Allocate sequencer space for the instruction and fill it out */
+ instr = seq_alloc();
+ f3_instr = &instr->format.format3;
+ if (address->symbol == NULL) {
+ /* 'dot' reference. Use the current instruction pointer */
+ addr = instruction_ptr + address->offset;
+ } else if (address->symbol->type == UNINITIALIZED) {
+ /* forward reference */
+ addr = address->offset;
+ instr->patch_label = address->symbol;
+ } else
+ addr = address->symbol->info.linfo->address + address->offset;
+ f3_instr->opcode = opcode;
+ f3_instr->address = addr;
+ f3_instr->source = src->symbol->info.rinfo->address
+ + src->offset;
+ f3_instr->immediate = immed->value;
+
+ if (is_download_const(immed))
+ f3_instr->parity = 1;
+
+ symlist_free(&immed->referenced_syms);
+ instruction_ptr++;
+}
+
+static void
+test_readable_symbol(symbol_t *symbol)
+{
+ if ((symbol->info.rinfo->modes & (0x1 << src_mode)) == 0) {
+ snprintf(errbuf, sizeof(errbuf),
+ "Register %s unavailable in source reg mode %d",
+ symbol->name, src_mode);
+ stop(errbuf, EX_DATAERR);
+ }
+
+ if (symbol->info.rinfo->mode == WO) {
+ stop("Write Only register specified as source",
+ EX_DATAERR);
+ /* NOTREACHED */
+ }
+}
+
+static void
+test_writable_symbol(symbol_t *symbol)
+{
+ if ((symbol->info.rinfo->modes & (0x1 << dst_mode)) == 0) {
+ snprintf(errbuf, sizeof(errbuf),
+ "Register %s unavailable in destination reg mode %d",
+ symbol->name, dst_mode);
+ stop(errbuf, EX_DATAERR);
+ }
+
+ if (symbol->info.rinfo->mode == RO) {
+ stop("Read Only register specified as destination",
+ EX_DATAERR);
+ /* NOTREACHED */
+ }
+}
+
+static void
+type_check(symbol_ref_t *sym, expression_t *expression, int opcode)
+{
+ symbol_t *symbol = sym->symbol;
+ symbol_node_t *node;
+ int and_op;
+ int8_t value, mask;
+
+ and_op = FALSE;
+ /*
+ * Make sure that we aren't attempting to write something
+ * that hasn't been defined. If this is an and operation,
+ * this is a mask, so "undefined" bits are okay.
+ */
+ if (opcode == AIC_OP_AND || opcode == AIC_OP_JNZ ||
+ opcode == AIC_OP_JZ || opcode == AIC_OP_JNE ||
+ opcode == AIC_OP_BMOV)
+ and_op = TRUE;
+
+ /*
+ * Defaulting to 8 bit logic
+ */
+ mask = (int8_t)~symbol->info.rinfo->valid_bitmask;
+ value = (int8_t)expression->value;
+
+ if (and_op == FALSE && (mask & value) != 0 ) {
+ snprintf(errbuf, sizeof(errbuf),
+ "Invalid bit(s) 0x%x in immediate written to %s",
+ (mask & value),
+ symbol->name);
+ stop(errbuf, EX_DATAERR);
+ /* NOTREACHED */
+ }
+
+ /*
+ * Now make sure that all of the symbols referenced by the
+ * expression are defined for this register.
+ */
+ if (symbol->info.rinfo->typecheck_masks != FALSE) {
+ for(node = expression->referenced_syms.slh_first;
+ node != NULL;
+ node = node->links.sle_next) {
+ if ((node->symbol->type == MASK
+ || node->symbol->type == FIELD
+ || node->symbol->type == ENUM
+ || node->symbol->type == ENUM_ENTRY)
+ && symlist_search(&node->symbol->info.finfo->symrefs,
+ symbol->name) == NULL) {
+ snprintf(errbuf, sizeof(errbuf),
+ "Invalid field or mask %s "
+ "for register %s",
+ node->symbol->name, symbol->name);
+ stop(errbuf, EX_DATAERR);
+ /* NOTREACHED */
+ }
+ }
+ }
+}
+
+static void
+make_expression(expression_t *immed, int value)
+{
+ SLIST_INIT(&immed->referenced_syms);
+ immed->value = value & 0xff;
+}
+
+static void
+add_conditional(symbol_t *symbol)
+{
+ static int numfuncs;
+
+ if (numfuncs == 0) {
+ /* add a special conditional, "0" */
+ symbol_t *false_func;
+
+ false_func = symtable_get("0");
+ if (false_func->type != UNINITIALIZED) {
+ stop("Conditional expression '0' "
+ "conflicts with a symbol", EX_DATAERR);
+ /* NOTREACHED */
+ }
+ false_func->type = CONDITIONAL;
+ initialize_symbol(false_func);
+ false_func->info.condinfo->func_num = numfuncs++;
+ symlist_add(&patch_functions, false_func, SYMLIST_INSERT_HEAD);
+ }
+
+ /* This condition has occurred before */
+ if (symbol->type == CONDITIONAL)
+ return;
+
+ if (symbol->type != UNINITIALIZED) {
+ stop("Conditional expression conflicts with a symbol",
+ EX_DATAERR);
+ /* NOTREACHED */
+ }
+
+ symbol->type = CONDITIONAL;
+ initialize_symbol(symbol);
+ symbol->info.condinfo->func_num = numfuncs++;
+ symlist_add(&patch_functions, symbol, SYMLIST_INSERT_HEAD);
+}
+
+static void
+add_version(const char *verstring)
+{
+ const char prefix[] = " * ";
+ int newlen;
+ int oldlen;
+
+ newlen = strlen(verstring) + strlen(prefix);
+ oldlen = 0;
+ if (versions != NULL)
+ oldlen = strlen(versions);
+ versions = realloc(versions, newlen + oldlen + 2);
+ if (versions == NULL)
+ stop("Can't allocate version string", EX_SOFTWARE);
+ strcpy(&versions[oldlen], prefix);
+ strcpy(&versions[oldlen + strlen(prefix)], verstring);
+ versions[newlen + oldlen] = '\n';
+ versions[newlen + oldlen + 1] = '\0';
+}
+
+void
+yyerror(const char *string)
+{
+ stop(string, EX_DATAERR);
+}
+
+static int
+is_download_const(expression_t *immed)
+{
+ if ((immed->referenced_syms.slh_first != NULL)
+ && (immed->referenced_syms.slh_first->symbol->type == DOWNLOAD_CONST))
+ return (TRUE);
+
+ return (FALSE);
+}
+
+static int
+is_location_address(symbol_t *sym)
+{
+ if (sym->type == SCBLOC ||
+ sym->type == SRAMLOC)
+ return (TRUE);
+ return (FALSE);
+}
+
diff --git a/drivers/scsi/aic7xxx/aicasm/aicasm_insformat.h b/drivers/scsi/aic7xxx/aicasm/aicasm_insformat.h
new file mode 100644
index 000000000..8373447bd
--- /dev/null
+++ b/drivers/scsi/aic7xxx/aicasm/aicasm_insformat.h
@@ -0,0 +1,218 @@
+/*
+ * Instruction formats for the sequencer program downloaded to
+ * Aic7xxx SCSI host adapters
+ *
+ * Copyright (c) 1997, 1998, 2000 Justin T. Gibbs.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions, and the following disclaimer,
+ * without modification.
+ * 2. Redistributions in binary form must reproduce at minimum a disclaimer
+ * substantially similar to the "NO WARRANTY" disclaimer below
+ * ("Disclaimer") and any redistribution must be conditioned upon
+ * including a substantially similar Disclaimer requirement for further
+ * binary redistribution.
+ * 3. Neither the names of the above-listed copyright holders nor the names
+ * of any contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * Alternatively, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") version 2 as published by the Free
+ * Software Foundation.
+ *
+ * NO WARRANTY
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
+ * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
+ * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGES.
+ *
+ * $Id: //depot/aic7xxx/aic7xxx/aicasm/aicasm_insformat.h#12 $
+ *
+ * $FreeBSD$
+ */
+
+#include <asm/byteorder.h>
+
+/* 8bit ALU logic operations */
+struct ins_format1 {
+#ifdef __LITTLE_ENDIAN
+ uint32_t immediate : 8,
+ source : 9,
+ destination : 9,
+ ret : 1,
+ opcode : 4,
+ parity : 1;
+#else
+ uint32_t parity : 1,
+ opcode : 4,
+ ret : 1,
+ destination : 9,
+ source : 9,
+ immediate : 8;
+#endif
+};
+
+/* 8bit ALU shift/rotate operations */
+struct ins_format2 {
+#ifdef __LITTLE_ENDIAN
+ uint32_t shift_control : 8,
+ source : 9,
+ destination : 9,
+ ret : 1,
+ opcode : 4,
+ parity : 1;
+#else
+ uint32_t parity : 1,
+ opcode : 4,
+ ret : 1,
+ destination : 9,
+ source : 9,
+ shift_control : 8;
+#endif
+};
+
+/* 8bit branch control operations */
+struct ins_format3 {
+#ifdef __LITTLE_ENDIAN
+ uint32_t immediate : 8,
+ source : 9,
+ address : 10,
+ opcode : 4,
+ parity : 1;
+#else
+ uint32_t parity : 1,
+ opcode : 4,
+ address : 10,
+ source : 9,
+ immediate : 8;
+#endif
+};
+
+/* 16bit ALU logic operations */
+struct ins_format4 {
+#ifdef __LITTLE_ENDIAN
+ uint32_t opcode_ext : 8,
+ source : 9,
+ destination : 9,
+ ret : 1,
+ opcode : 4,
+ parity : 1;
+#else
+ uint32_t parity : 1,
+ opcode : 4,
+ ret : 1,
+ destination : 9,
+ source : 9,
+ opcode_ext : 8;
+#endif
+};
+
+/* 16bit branch control operations */
+struct ins_format5 {
+#ifdef __LITTLE_ENDIAN
+ uint32_t opcode_ext : 8,
+ source : 9,
+ address : 10,
+ opcode : 4,
+ parity : 1;
+#else
+ uint32_t parity : 1,
+ opcode : 4,
+ address : 10,
+ source : 9,
+ opcode_ext : 8;
+#endif
+};
+
+/* Far branch operations */
+struct ins_format6 {
+#ifdef __LITTLE_ENDIAN
+ uint32_t page : 3,
+ opcode_ext : 5,
+ source : 9,
+ address : 10,
+ opcode : 4,
+ parity : 1;
+#else
+ uint32_t parity : 1,
+ opcode : 4,
+ address : 10,
+ source : 9,
+ opcode_ext : 5,
+ page : 3;
+#endif
+};
+
+union ins_formats {
+ struct ins_format1 format1;
+ struct ins_format2 format2;
+ struct ins_format3 format3;
+ struct ins_format4 format4;
+ struct ins_format5 format5;
+ struct ins_format6 format6;
+ uint8_t bytes[4];
+ uint32_t integer;
+};
+struct instruction {
+ union ins_formats format;
+ u_int srcline;
+ struct symbol *patch_label;
+ STAILQ_ENTRY(instruction) links;
+};
+
+#define AIC_OP_OR 0x0
+#define AIC_OP_AND 0x1
+#define AIC_OP_XOR 0x2
+#define AIC_OP_ADD 0x3
+#define AIC_OP_ADC 0x4
+#define AIC_OP_ROL 0x5
+#define AIC_OP_BMOV 0x6
+
+#define AIC_OP_MVI16 0x7
+
+#define AIC_OP_JMP 0x8
+#define AIC_OP_JC 0x9
+#define AIC_OP_JNC 0xa
+#define AIC_OP_CALL 0xb
+#define AIC_OP_JNE 0xc
+#define AIC_OP_JNZ 0xd
+#define AIC_OP_JE 0xe
+#define AIC_OP_JZ 0xf
+
+/* Pseudo Ops */
+#define AIC_OP_SHL 0x10
+#define AIC_OP_SHR 0x20
+#define AIC_OP_ROR 0x30
+
+/* 16bit Ops. Low byte main opcode. High byte extended opcode. */
+#define AIC_OP_OR16 0x8005
+#define AIC_OP_AND16 0x8105
+#define AIC_OP_XOR16 0x8205
+#define AIC_OP_ADD16 0x8305
+#define AIC_OP_ADC16 0x8405
+#define AIC_OP_JNE16 0x8805
+#define AIC_OP_JNZ16 0x8905
+#define AIC_OP_JE16 0x8C05
+#define AIC_OP_JZ16 0x8B05
+#define AIC_OP_JMP16 0x9005
+#define AIC_OP_JC16 0x9105
+#define AIC_OP_JNC16 0x9205
+#define AIC_OP_CALL16 0x9305
+
+/* Page extension is low three bits of second opcode byte. */
+#define AIC_OP_JMPF 0xA005
+#define AIC_OP_CALLF 0xB005
+#define AIC_OP_JCF 0xC005
+#define AIC_OP_JNCF 0xD005
+#define AIC_OP_CMPXCHG 0xE005
diff --git a/drivers/scsi/aic7xxx/aicasm/aicasm_macro_gram.y b/drivers/scsi/aic7xxx/aicasm/aicasm_macro_gram.y
new file mode 100644
index 000000000..708326df0
--- /dev/null
+++ b/drivers/scsi/aic7xxx/aicasm/aicasm_macro_gram.y
@@ -0,0 +1,165 @@
+%{
+/*
+ * Sub-parser for macro invocation in the Aic7xxx SCSI
+ * Host adapter sequencer assembler.
+ *
+ * Copyright (c) 2001 Adaptec Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions, and the following disclaimer,
+ * without modification.
+ * 2. Redistributions in binary form must reproduce at minimum a disclaimer
+ * substantially similar to the "NO WARRANTY" disclaimer below
+ * ("Disclaimer") and any redistribution must be conditioned upon
+ * including a substantially similar Disclaimer requirement for further
+ * binary redistribution.
+ * 3. Neither the names of the above-listed copyright holders nor the names
+ * of any contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * Alternatively, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") version 2 as published by the Free
+ * Software Foundation.
+ *
+ * NO WARRANTY
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
+ * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
+ * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGES.
+ *
+ * $Id: //depot/aic7xxx/aic7xxx/aicasm/aicasm_macro_gram.y#5 $
+ *
+ * $FreeBSD$
+ */
+
+#include <sys/types.h>
+
+#include <inttypes.h>
+#include <regex.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <sysexits.h>
+
+#ifdef __linux__
+#include "../queue.h"
+#else
+#include <sys/queue.h>
+#endif
+
+#include "aicasm.h"
+#include "aicasm_symbol.h"
+#include "aicasm_insformat.h"
+
+static symbol_t *macro_symbol;
+
+static void add_macro_arg(const char *argtext, int position);
+void mmerror(const char *string);
+
+%}
+
+%union {
+ int value;
+ char *str;
+ symbol_t *sym;
+}
+
+
+%token <str> T_ARG
+
+%token <sym> T_SYMBOL
+
+%type <value> macro_arglist
+
+%%
+
+macrocall:
+ T_SYMBOL '('
+ {
+ macro_symbol = $1;
+ }
+ macro_arglist ')'
+ {
+ if (macro_symbol->info.macroinfo->narg != $4) {
+ printf("Narg == %d", macro_symbol->info.macroinfo->narg);
+ stop("Too few arguments for macro invocation",
+ EX_DATAERR);
+ /* NOTREACHED */
+ }
+ macro_symbol = NULL;
+ YYACCEPT;
+ }
+;
+
+macro_arglist:
+ {
+ /* Macros can take 0 arguments */
+ $$ = 0;
+ }
+| T_ARG
+ {
+ $$ = 1;
+ add_macro_arg($1, 1);
+ }
+| macro_arglist ',' T_ARG
+ {
+ if ($1 == 0) {
+ stop("Comma without preceding argument in arg list",
+ EX_DATAERR);
+ /* NOTREACHED */
+ }
+ $$ = $1 + 1;
+ add_macro_arg($3, $$);
+ }
+;
+
+%%
+
+static void
+add_macro_arg(const char *argtext, int argnum)
+{
+ struct macro_arg *marg;
+ int i;
+
+ if (macro_symbol == NULL || macro_symbol->type != MACRO) {
+ stop("Invalid current symbol for adding macro arg",
+ EX_SOFTWARE);
+ /* NOTREACHED */
+ }
+ /*
+ * Macro Invocation. Find the appropriate argument and fill
+ * in the replace ment text for this call.
+ */
+ i = 0;
+ STAILQ_FOREACH(marg, &macro_symbol->info.macroinfo->args, links) {
+ i++;
+ if (i == argnum)
+ break;
+ }
+ if (marg == NULL) {
+ stop("Too many arguments for macro invocation", EX_DATAERR);
+ /* NOTREACHED */
+ }
+ marg->replacement_text = strdup(argtext);
+ if (marg->replacement_text == NULL) {
+ stop("Unable to replicate replacement text", EX_SOFTWARE);
+ /* NOTREACHED */
+ }
+}
+
+void
+mmerror(const char *string)
+{
+ stop(string, EX_DATAERR);
+}
diff --git a/drivers/scsi/aic7xxx/aicasm/aicasm_macro_scan.l b/drivers/scsi/aic7xxx/aicasm/aicasm_macro_scan.l
new file mode 100644
index 000000000..c0457b8c3
--- /dev/null
+++ b/drivers/scsi/aic7xxx/aicasm/aicasm_macro_scan.l
@@ -0,0 +1,157 @@
+%{
+/*
+ * Sub-Lexical Analyzer for macro invokation in
+ * the Aic7xxx SCSI Host adapter sequencer assembler.
+ *
+ * Copyright (c) 2001 Adaptec Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions, and the following disclaimer,
+ * without modification.
+ * 2. Redistributions in binary form must reproduce at minimum a disclaimer
+ * substantially similar to the "NO WARRANTY" disclaimer below
+ * ("Disclaimer") and any redistribution must be conditioned upon
+ * including a substantially similar Disclaimer requirement for further
+ * binary redistribution.
+ * 3. Neither the names of the above-listed copyright holders nor the names
+ * of any contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * Alternatively, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") version 2 as published by the Free
+ * Software Foundation.
+ *
+ * NO WARRANTY
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
+ * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
+ * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGES.
+ *
+ * $Id: //depot/aic7xxx/aic7xxx/aicasm/aicasm_macro_scan.l#8 $
+ *
+ * $FreeBSD$
+ */
+
+#include <sys/types.h>
+
+#include <inttypes.h>
+#include <limits.h>
+#include <regex.h>
+#include <stdio.h>
+#include <string.h>
+#include <sysexits.h>
+#ifdef __linux__
+#include "../queue.h"
+#else
+#include <sys/queue.h>
+#endif
+
+#include "aicasm.h"
+#include "aicasm_symbol.h"
+#include "aicasm_macro_gram.h"
+
+#define MAX_STR_CONST 4096
+static char string_buf[MAX_STR_CONST];
+static char *string_buf_ptr;
+static int parren_count;
+static char buf[255];
+int mmlineno;
+%}
+
+WORD [A-Za-z_][-A-Za-z_0-9]*
+SPACE [ \t]+
+MCARG [^(), \t]+
+
+%x ARGLIST
+
+%%
+\n {
+ ++mmlineno;
+ }
+\r ;
+<ARGLIST>{SPACE} ;
+<ARGLIST>\( {
+ parren_count++;
+ if (parren_count == 1) {
+ string_buf_ptr = string_buf;
+ return ('(');
+ }
+ *string_buf_ptr++ = '(';
+ }
+<ARGLIST>\) {
+ if (parren_count == 1) {
+ if (string_buf_ptr != string_buf) {
+ /*
+ * Return an argument and
+ * rescan this parren so we
+ * can return it as well.
+ */
+ *string_buf_ptr = '\0';
+ mmlval.str = string_buf;
+ string_buf_ptr = string_buf;
+ unput(')');
+ return T_ARG;
+ }
+ BEGIN INITIAL;
+ return (')');
+ }
+ parren_count--;
+ *string_buf_ptr++ = ')';
+ }
+<ARGLIST>{MCARG} {
+ char *yptr;
+
+ yptr = mmtext;
+ while (*yptr)
+ *string_buf_ptr++ = *yptr++;
+ }
+<ARGLIST>\, {
+ if (string_buf_ptr != string_buf) {
+ /*
+ * Return an argument and
+ * rescan this comma so we
+ * can return it as well.
+ */
+ *string_buf_ptr = '\0';
+ mmlval.str = string_buf;
+ string_buf_ptr = string_buf;
+ unput(',');
+ return T_ARG;
+ }
+ return ',';
+ }
+{WORD}[(] {
+ /* May be a symbol or a macro invocation. */
+ mmlval.sym = symtable_get(mmtext);
+ if (mmlval.sym->type != MACRO) {
+ stop("Expecting Macro Name",
+ EX_DATAERR);
+ }
+ unput('(');
+ parren_count = 0;
+ BEGIN ARGLIST;
+ return T_SYMBOL;
+ }
+. {
+ snprintf(buf, sizeof(buf), "Invalid character "
+ "'%c'", mmtext[0]);
+ stop(buf, EX_DATAERR);
+ }
+%%
+
+int
+mmwrap()
+{
+ stop("EOF encountered in macro call", EX_DATAERR);
+}
diff --git a/drivers/scsi/aic7xxx/aicasm/aicasm_scan.l b/drivers/scsi/aic7xxx/aicasm/aicasm_scan.l
new file mode 100644
index 000000000..93c8667cd
--- /dev/null
+++ b/drivers/scsi/aic7xxx/aicasm/aicasm_scan.l
@@ -0,0 +1,622 @@
+%{
+/*
+ * Lexical Analyzer for the Aic7xxx SCSI Host adapter sequencer assembler.
+ *
+ * Copyright (c) 1997, 1998, 2000 Justin T. Gibbs.
+ * Copyright (c) 2001, 2002 Adaptec Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions, and the following disclaimer,
+ * without modification.
+ * 2. Redistributions in binary form must reproduce at minimum a disclaimer
+ * substantially similar to the "NO WARRANTY" disclaimer below
+ * ("Disclaimer") and any redistribution must be conditioned upon
+ * including a substantially similar Disclaimer requirement for further
+ * binary redistribution.
+ * 3. Neither the names of the above-listed copyright holders nor the names
+ * of any contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * Alternatively, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") version 2 as published by the Free
+ * Software Foundation.
+ *
+ * NO WARRANTY
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
+ * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
+ * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGES.
+ *
+ * $Id: //depot/aic7xxx/aic7xxx/aicasm/aicasm_scan.l#20 $
+ *
+ * $FreeBSD$
+ */
+
+#include <sys/types.h>
+
+#include <inttypes.h>
+#include <limits.h>
+#include <regex.h>
+#include <stdio.h>
+#include <string.h>
+#include <sysexits.h>
+#ifdef __linux__
+#include "../queue.h"
+#else
+#include <sys/queue.h>
+#endif
+
+#include "aicasm.h"
+#include "aicasm_symbol.h"
+#include "aicasm_gram.h"
+
+/* This is used for macro body capture too, so err on the large size. */
+#define MAX_STR_CONST 4096
+static char string_buf[MAX_STR_CONST];
+static char *string_buf_ptr;
+static int parren_count;
+static int quote_count;
+static char buf[255];
+%}
+
+PATH ([/]*[-A-Za-z0-9_.])+
+WORD [A-Za-z_][-A-Za-z_0-9]*
+SPACE [ \t]+
+MCARG [^(), \t]+
+MBODY ((\\[^\n])*[^\n\\]*)+
+
+%x COMMENT
+%x CEXPR
+%x INCLUDE
+%x STRING
+%x MACRODEF
+%x MACROARGLIST
+%x MACROCALLARGS
+%x MACROBODY
+
+%%
+\n { ++yylineno; }
+\r ;
+"/*" { BEGIN COMMENT; /* Enter comment eating state */ }
+<COMMENT>"/*" { fprintf(stderr, "Warning! Comment within comment."); }
+<COMMENT>\n { ++yylineno; }
+<COMMENT>[^*/\n]* ;
+<COMMENT>"*"+[^*/\n]* ;
+<COMMENT>"/"+[^*/\n]* ;
+<COMMENT>"*"+"/" { BEGIN INITIAL; }
+if[ \t]*\( {
+ string_buf_ptr = string_buf;
+ parren_count = 1;
+ BEGIN CEXPR;
+ return T_IF;
+ }
+<CEXPR>\( { *string_buf_ptr++ = '('; parren_count++; }
+<CEXPR>\) {
+ parren_count--;
+ if (parren_count == 0) {
+ /* All done */
+ BEGIN INITIAL;
+ *string_buf_ptr = '\0';
+ yylval.sym = symtable_get(string_buf);
+ return T_CEXPR;
+ } else {
+ *string_buf_ptr++ = ')';
+ }
+ }
+<CEXPR>\n { ++yylineno; }
+<CEXPR>\r ;
+<CEXPR>[^()\n]+ {
+ char *yptr;
+
+ yptr = yytext;
+ while (*yptr != '\0') {
+ /* Remove duplicate spaces */
+ if (*yptr == '\t')
+ *yptr = ' ';
+ if (*yptr == ' '
+ && string_buf_ptr != string_buf
+ && string_buf_ptr[-1] == ' ')
+ yptr++;
+ else
+ *string_buf_ptr++ = *yptr++;
+ }
+ }
+else { return T_ELSE; }
+VERSION { return T_VERSION; }
+PREFIX { return T_PREFIX; }
+PATCH_ARG_LIST { return T_PATCH_ARG_LIST; }
+\" {
+ string_buf_ptr = string_buf;
+ BEGIN STRING;
+ }
+<STRING>[^"]+ {
+ char *yptr;
+
+ yptr = yytext;
+ while (*yptr)
+ *string_buf_ptr++ = *yptr++;
+ }
+<STRING>\" {
+ /* All done */
+ BEGIN INITIAL;
+ *string_buf_ptr = '\0';
+ yylval.str = string_buf;
+ return T_STRING;
+ }
+{SPACE} ;
+
+ /* Register/SCB/SRAM definition keywords */
+export { return T_EXPORT; }
+register { return T_REGISTER; }
+const { yylval.value = FALSE; return T_CONST; }
+download { return T_DOWNLOAD; }
+address { return T_ADDRESS; }
+count { return T_COUNT; }
+access_mode { return T_ACCESS_MODE; }
+dont_generate_debug_code { return T_DONT_GENERATE_DEBUG_CODE; }
+modes { return T_MODES; }
+RW|RO|WO {
+ if (strcmp(yytext, "RW") == 0)
+ yylval.value = RW;
+ else if (strcmp(yytext, "RO") == 0)
+ yylval.value = RO;
+ else
+ yylval.value = WO;
+ return T_MODE;
+ }
+field { return T_FIELD; }
+enum { return T_ENUM; }
+mask { return T_MASK; }
+alias { return T_ALIAS; }
+size { return T_SIZE; }
+scb { return T_SCB; }
+scratch_ram { return T_SRAM; }
+accumulator { return T_ACCUM; }
+mode_pointer { return T_MODE_PTR; }
+allones { return T_ALLONES; }
+allzeros { return T_ALLZEROS; }
+none { return T_NONE; }
+sindex { return T_SINDEX; }
+A { return T_A; }
+
+ /* Instruction Formatting */
+PAD_PAGE { return T_PAD_PAGE; }
+BEGIN_CRITICAL { return T_BEGIN_CS; }
+END_CRITICAL { return T_END_CS; }
+SET_SRC_MODE { return T_SET_SRC_MODE; }
+SET_DST_MODE { return T_SET_DST_MODE; }
+
+ /* Opcodes */
+shl { return T_SHL; }
+shr { return T_SHR; }
+ror { return T_ROR; }
+rol { return T_ROL; }
+mvi { return T_MVI; }
+mov { return T_MOV; }
+clr { return T_CLR; }
+jmp { return T_JMP; }
+jc { return T_JC; }
+jnc { return T_JNC; }
+je { return T_JE; }
+jne { return T_JNE; }
+jz { return T_JZ; }
+jnz { return T_JNZ; }
+call { return T_CALL; }
+add { return T_ADD; }
+adc { return T_ADC; }
+bmov { return T_BMOV; }
+inc { return T_INC; }
+dec { return T_DEC; }
+stc { return T_STC; }
+clc { return T_CLC; }
+cmp { return T_CMP; }
+not { return T_NOT; }
+xor { return T_XOR; }
+test { return T_TEST;}
+and { return T_AND; }
+or { return T_OR; }
+ret { return T_RET; }
+nop { return T_NOP; }
+
+ /* ARP2 16bit extensions */
+ /* or16 { return T_OR16; } */
+ /* and16 { return T_AND16; }*/
+ /* xor16 { return T_XOR16; }*/
+ /* add16 { return T_ADD16; }*/
+ /* adc16 { return T_ADC16; }*/
+ /* mvi16 { return T_MVI16; }*/
+ /* test16 { return T_TEST16; }*/
+ /* cmp16 { return T_CMP16; }*/
+ /* cmpxchg { return T_CMPXCHG; }*/
+
+ /* Allowed Symbols */
+\<\< { return T_EXPR_LSHIFT; }
+\>\> { return T_EXPR_RSHIFT; }
+[-+,:()~|&."{};<>[\]/*!=] { return yytext[0]; }
+
+ /* Number processing */
+0[0-7]* {
+ yylval.value = strtol(yytext, NULL, 8);
+ return T_NUMBER;
+ }
+
+0[xX][0-9a-fA-F]+ {
+ yylval.value = strtoul(yytext + 2, NULL, 16);
+ return T_NUMBER;
+ }
+
+[1-9][0-9]* {
+ yylval.value = strtol(yytext, NULL, 10);
+ return T_NUMBER;
+ }
+ /* Include Files */
+#include{SPACE} {
+ BEGIN INCLUDE;
+ quote_count = 0;
+ return T_INCLUDE;
+ }
+<INCLUDE>[<] { return yytext[0]; }
+<INCLUDE>[>] { BEGIN INITIAL; return yytext[0]; }
+<INCLUDE>[\"] {
+ if (quote_count != 0)
+ BEGIN INITIAL;
+ quote_count++;
+ return yytext[0];
+ }
+<INCLUDE>{PATH} {
+ char *yptr;
+
+ yptr = yytext;
+ string_buf_ptr = string_buf;
+ while (*yptr)
+ *string_buf_ptr++ = *yptr++;
+ yylval.str = string_buf;
+ *string_buf_ptr = '\0';
+ return T_PATH;
+ }
+<INCLUDE>. { stop("Invalid include line", EX_DATAERR); }
+#define{SPACE} {
+ BEGIN MACRODEF;
+ return T_DEFINE;
+ }
+<MACRODEF>{WORD}{SPACE} {
+ char *yptr;
+
+ /* Strip space and return as a normal symbol */
+ yptr = yytext;
+ while (*yptr != ' ' && *yptr != '\t')
+ yptr++;
+ *yptr = '\0';
+ yylval.sym = symtable_get(yytext);
+ string_buf_ptr = string_buf;
+ BEGIN MACROBODY;
+ return T_SYMBOL;
+ }
+<MACRODEF>{WORD}\( {
+ /*
+ * We store the symbol with its opening
+ * parren so we can differentiate macros
+ * that take args from macros with the
+ * same name that do not take args as
+ * is allowed in C.
+ */
+ BEGIN MACROARGLIST;
+ yylval.sym = symtable_get(yytext);
+ unput('(');
+ return T_SYMBOL;
+ }
+<MACROARGLIST>{WORD} {
+ yylval.str = yytext;
+ return T_ARG;
+ }
+<MACROARGLIST>{SPACE} ;
+<MACROARGLIST>[(,] {
+ return yytext[0];
+ }
+<MACROARGLIST>[)] {
+ string_buf_ptr = string_buf;
+ BEGIN MACROBODY;
+ return ')';
+ }
+<MACROARGLIST>. {
+ snprintf(buf, sizeof(buf), "Invalid character "
+ "'%c' in macro argument list",
+ yytext[0]);
+ stop(buf, EX_DATAERR);
+ }
+<MACROCALLARGS>{SPACE} ;
+<MACROCALLARGS>\( {
+ parren_count++;
+ if (parren_count == 1)
+ return ('(');
+ *string_buf_ptr++ = '(';
+ }
+<MACROCALLARGS>\) {
+ parren_count--;
+ if (parren_count == 0) {
+ BEGIN INITIAL;
+ return (')');
+ }
+ *string_buf_ptr++ = ')';
+ }
+<MACROCALLARGS>{MCARG} {
+ char *yptr;
+
+ yptr = yytext;
+ while (*yptr)
+ *string_buf_ptr++ = *yptr++;
+ }
+<MACROCALLARGS>\, {
+ if (string_buf_ptr != string_buf) {
+ /*
+ * Return an argument and
+ * rescan this comma so we
+ * can return it as well.
+ */
+ *string_buf_ptr = '\0';
+ yylval.str = string_buf;
+ string_buf_ptr = string_buf;
+ unput(',');
+ return T_ARG;
+ }
+ return ',';
+ }
+<MACROBODY>\\\n {
+ /* Eat escaped newlines. */
+ ++yylineno;
+ }
+<MACROBODY>\r ;
+<MACROBODY>\n {
+ /* Macros end on the first unescaped newline. */
+ BEGIN INITIAL;
+ *string_buf_ptr = '\0';
+ yylval.str = string_buf;
+ ++yylineno;
+ return T_MACROBODY;
+ }
+<MACROBODY>{MBODY} {
+ char *yptr;
+ char c;
+
+ yptr = yytext;
+ while (c = *yptr++) {
+ /*
+ * Strip carriage returns.
+ */
+ if (c == '\r')
+ continue;
+ *string_buf_ptr++ = c;
+ }
+ }
+{WORD}\( {
+ char *yptr;
+ char *ycopy;
+
+ /* May be a symbol or a macro invocation. */
+ yylval.sym = symtable_get(yytext);
+ if (yylval.sym->type == MACRO) {
+ YY_BUFFER_STATE old_state;
+ YY_BUFFER_STATE temp_state;
+
+ ycopy = strdup(yytext);
+ yptr = ycopy + yyleng;
+ while (yptr > ycopy)
+ unput(*--yptr);
+ old_state = YY_CURRENT_BUFFER;
+ temp_state =
+ yy_create_buffer(stdin,
+ YY_BUF_SIZE);
+ yy_switch_to_buffer(temp_state);
+ mm_switch_to_buffer(old_state);
+ mmparse();
+ mm_switch_to_buffer(temp_state);
+ yy_switch_to_buffer(old_state);
+ mm_delete_buffer(temp_state);
+ expand_macro(yylval.sym);
+ } else {
+ if (yylval.sym->type == UNINITIALIZED) {
+ /* Try without the '(' */
+ symbol_delete(yylval.sym);
+ yytext[yyleng-1] = '\0';
+ yylval.sym =
+ symtable_get(yytext);
+ }
+ unput('(');
+ return T_SYMBOL;
+ }
+ }
+{WORD} {
+ yylval.sym = symtable_get(yytext);
+ if (yylval.sym->type == MACRO) {
+ expand_macro(yylval.sym);
+ } else {
+ return T_SYMBOL;
+ }
+ }
+. {
+ snprintf(buf, sizeof(buf), "Invalid character "
+ "'%c'", yytext[0]);
+ stop(buf, EX_DATAERR);
+ }
+%%
+
+typedef struct include {
+ YY_BUFFER_STATE buffer;
+ int lineno;
+ char *filename;
+ SLIST_ENTRY(include) links;
+}include_t;
+
+SLIST_HEAD(, include) include_stack;
+
+void
+include_file(char *file_name, include_type type)
+{
+ FILE *newfile;
+ include_t *include;
+
+ newfile = NULL;
+ /* Try the current directory first */
+ if (includes_search_curdir != 0 || type == SOURCE_FILE)
+ newfile = fopen(file_name, "r");
+
+ if (newfile == NULL && type != SOURCE_FILE) {
+ path_entry_t include_dir;
+ for (include_dir = search_path.slh_first;
+ include_dir != NULL;
+ include_dir = include_dir->links.sle_next) {
+ char fullname[PATH_MAX];
+
+ if ((include_dir->quoted_includes_only == TRUE)
+ && (type != QUOTED_INCLUDE))
+ continue;
+
+ snprintf(fullname, sizeof(fullname),
+ "%s/%s", include_dir->directory, file_name);
+
+ if ((newfile = fopen(fullname, "r")) != NULL)
+ break;
+ }
+ }
+
+ if (newfile == NULL) {
+ perror(file_name);
+ stop("Unable to open input file", EX_SOFTWARE);
+ /* NOTREACHED */
+ }
+
+ if (type != SOURCE_FILE) {
+ include = (include_t *)malloc(sizeof(include_t));
+ if (include == NULL) {
+ stop("Unable to allocate include stack entry",
+ EX_SOFTWARE);
+ /* NOTREACHED */
+ }
+ include->buffer = YY_CURRENT_BUFFER;
+ include->lineno = yylineno;
+ include->filename = yyfilename;
+ SLIST_INSERT_HEAD(&include_stack, include, links);
+ }
+ yy_switch_to_buffer(yy_create_buffer(newfile, YY_BUF_SIZE));
+ yylineno = 1;
+ yyfilename = strdup(file_name);
+}
+
+static void next_substitution(struct symbol *mac_symbol, const char *body_pos,
+ const char **next_match,
+ struct macro_arg **match_marg, regmatch_t *match);
+
+void
+expand_macro(struct symbol *macro_symbol)
+{
+ struct macro_arg *marg;
+ struct macro_arg *match_marg;
+ const char *body_head;
+ const char *body_pos;
+ const char *next_match;
+
+ /*
+ * Due to the nature of unput, we must work
+ * backwards through the macro body performing
+ * any expansions.
+ */
+ body_head = macro_symbol->info.macroinfo->body;
+ body_pos = body_head + strlen(body_head);
+ while (body_pos > body_head) {
+ regmatch_t match;
+
+ next_match = body_head;
+ match_marg = NULL;
+ next_substitution(macro_symbol, body_pos, &next_match,
+ &match_marg, &match);
+
+ /* Put back everything up until the replacement. */
+ while (body_pos > next_match)
+ unput(*--body_pos);
+
+ /* Perform the replacement. */
+ if (match_marg != NULL) {
+ const char *strp;
+
+ next_match = match_marg->replacement_text;
+ strp = next_match + strlen(next_match);
+ while (strp > next_match)
+ unput(*--strp);
+
+ /* Skip past the unexpanded macro arg. */
+ body_pos -= match.rm_eo - match.rm_so;
+ }
+ }
+
+ /* Cleanup replacement text. */
+ STAILQ_FOREACH(marg, &macro_symbol->info.macroinfo->args, links) {
+ free(marg->replacement_text);
+ }
+}
+
+/*
+ * Find the next substitution in the macro working backwards from
+ * body_pos until the beginning of the macro buffer. next_match
+ * should be initialized to the beginning of the macro buffer prior
+ * to calling this routine.
+ */
+static void
+next_substitution(struct symbol *mac_symbol, const char *body_pos,
+ const char **next_match, struct macro_arg **match_marg,
+ regmatch_t *match)
+{
+ regmatch_t matches[2];
+ struct macro_arg *marg;
+ const char *search_pos;
+ int retval;
+
+ do {
+ search_pos = *next_match;
+
+ STAILQ_FOREACH(marg, &mac_symbol->info.macroinfo->args, links) {
+
+ retval = regexec(&marg->arg_regex, search_pos, 2,
+ matches, 0);
+ if (retval == 0
+ && (matches[1].rm_eo + search_pos) <= body_pos
+ && (matches[1].rm_eo + search_pos) > *next_match) {
+ *match = matches[1];
+ *next_match = match->rm_eo + search_pos;
+ *match_marg = marg;
+ }
+ }
+ } while (search_pos != *next_match);
+}
+
+int
+yywrap()
+{
+ include_t *include;
+
+ yy_delete_buffer(YY_CURRENT_BUFFER);
+ (void)fclose(yyin);
+ if (yyfilename != NULL)
+ free(yyfilename);
+ yyfilename = NULL;
+ include = include_stack.slh_first;
+ if (include != NULL) {
+ yy_switch_to_buffer(include->buffer);
+ yylineno = include->lineno;
+ yyfilename = include->filename;
+ SLIST_REMOVE_HEAD(&include_stack, links);
+ free(include);
+ return (0);
+ }
+ return (1);
+}
diff --git a/drivers/scsi/aic7xxx/aicasm/aicasm_symbol.c b/drivers/scsi/aic7xxx/aicasm/aicasm_symbol.c
new file mode 100644
index 000000000..232aff1fe
--- /dev/null
+++ b/drivers/scsi/aic7xxx/aicasm/aicasm_symbol.c
@@ -0,0 +1,693 @@
+/*
+ * Aic7xxx SCSI host adapter firmware assembler symbol table implementation
+ *
+ * Copyright (c) 1997 Justin T. Gibbs.
+ * Copyright (c) 2002 Adaptec Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions, and the following disclaimer,
+ * without modification.
+ * 2. Redistributions in binary form must reproduce at minimum a disclaimer
+ * substantially similar to the "NO WARRANTY" disclaimer below
+ * ("Disclaimer") and any redistribution must be conditioned upon
+ * including a substantially similar Disclaimer requirement for further
+ * binary redistribution.
+ * 3. Neither the names of the above-listed copyright holders nor the names
+ * of any contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * Alternatively, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") version 2 as published by the Free
+ * Software Foundation.
+ *
+ * NO WARRANTY
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
+ * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
+ * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGES.
+ *
+ * $Id: //depot/aic7xxx/aic7xxx/aicasm/aicasm_symbol.c#24 $
+ *
+ * $FreeBSD$
+ */
+
+#include <sys/types.h>
+
+#ifdef __linux__
+#include "aicdb.h"
+#else
+#include <db.h>
+#endif
+#include <fcntl.h>
+#include <inttypes.h>
+#include <regex.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <sysexits.h>
+
+#include "aicasm_symbol.h"
+#include "aicasm.h"
+
+static DB *symtable;
+
+symbol_t *
+symbol_create(char *name)
+{
+ symbol_t *new_symbol;
+
+ new_symbol = (symbol_t *)malloc(sizeof(symbol_t));
+ if (new_symbol == NULL) {
+ perror("Unable to create new symbol");
+ exit(EX_SOFTWARE);
+ }
+ memset(new_symbol, 0, sizeof(*new_symbol));
+ new_symbol->name = strdup(name);
+ if (new_symbol->name == NULL)
+ stop("Unable to strdup symbol name", EX_SOFTWARE);
+ new_symbol->type = UNINITIALIZED;
+ new_symbol->count = 1;
+ return (new_symbol);
+}
+
+void
+symbol_delete(symbol_t *symbol)
+{
+ if (symtable != NULL) {
+ DBT key;
+
+ key.data = symbol->name;
+ key.size = strlen(symbol->name);
+ symtable->del(symtable, &key, /*flags*/0);
+ }
+ switch(symbol->type) {
+ case SCBLOC:
+ case SRAMLOC:
+ case REGISTER:
+ if (symbol->info.rinfo != NULL)
+ free(symbol->info.rinfo);
+ break;
+ case ALIAS:
+ if (symbol->info.ainfo != NULL)
+ free(symbol->info.ainfo);
+ break;
+ case MASK:
+ case FIELD:
+ case ENUM:
+ case ENUM_ENTRY:
+ if (symbol->info.finfo != NULL) {
+ symlist_free(&symbol->info.finfo->symrefs);
+ free(symbol->info.finfo);
+ }
+ break;
+ case DOWNLOAD_CONST:
+ case CONST:
+ if (symbol->info.cinfo != NULL)
+ free(symbol->info.cinfo);
+ break;
+ case LABEL:
+ if (symbol->info.linfo != NULL)
+ free(symbol->info.linfo);
+ break;
+ case UNINITIALIZED:
+ default:
+ break;
+ }
+ free(symbol->name);
+ free(symbol);
+}
+
+void
+symtable_open()
+{
+ symtable = dbopen(/*filename*/NULL,
+ O_CREAT | O_NONBLOCK | O_RDWR, /*mode*/0, DB_HASH,
+ /*openinfo*/NULL);
+
+ if (symtable == NULL) {
+ perror("Symbol table creation failed");
+ exit(EX_SOFTWARE);
+ /* NOTREACHED */
+ }
+}
+
+void
+symtable_close()
+{
+ if (symtable != NULL) {
+ DBT key;
+ DBT data;
+
+ while (symtable->seq(symtable, &key, &data, R_FIRST) == 0) {
+ symbol_t *stored_ptr;
+
+ memcpy(&stored_ptr, data.data, sizeof(stored_ptr));
+ symbol_delete(stored_ptr);
+ }
+ symtable->close(symtable);
+ }
+}
+
+/*
+ * The semantics of get is to return an uninitialized symbol entry
+ * if a lookup fails.
+ */
+symbol_t *
+symtable_get(char *name)
+{
+ symbol_t *stored_ptr;
+ DBT key;
+ DBT data;
+ int retval;
+
+ key.data = (void *)name;
+ key.size = strlen(name);
+
+ if ((retval = symtable->get(symtable, &key, &data, /*flags*/0)) != 0) {
+ if (retval == -1) {
+ perror("Symbol table get operation failed");
+ exit(EX_SOFTWARE);
+ /* NOTREACHED */
+ } else if (retval == 1) {
+ /* Symbol wasn't found, so create a new one */
+ symbol_t *new_symbol;
+
+ new_symbol = symbol_create(name);
+ data.data = &new_symbol;
+ data.size = sizeof(new_symbol);
+ if (symtable->put(symtable, &key, &data,
+ /*flags*/0) !=0) {
+ perror("Symtable put failed");
+ exit(EX_SOFTWARE);
+ }
+ return (new_symbol);
+ } else {
+ perror("Unexpected return value from db get routine");
+ exit(EX_SOFTWARE);
+ /* NOTREACHED */
+ }
+ }
+ memcpy(&stored_ptr, data.data, sizeof(stored_ptr));
+ stored_ptr->count++;
+ data.data = &stored_ptr;
+ if (symtable->put(symtable, &key, &data, /*flags*/0) !=0) {
+ perror("Symtable put failed");
+ exit(EX_SOFTWARE);
+ }
+ return (stored_ptr);
+}
+
+symbol_node_t *
+symlist_search(symlist_t *symlist, char *symname)
+{
+ symbol_node_t *curnode;
+
+ curnode = SLIST_FIRST(symlist);
+ while(curnode != NULL) {
+ if (strcmp(symname, curnode->symbol->name) == 0)
+ break;
+ curnode = SLIST_NEXT(curnode, links);
+ }
+ return (curnode);
+}
+
+void
+symlist_add(symlist_t *symlist, symbol_t *symbol, int how)
+{
+ symbol_node_t *newnode;
+
+ newnode = (symbol_node_t *)malloc(sizeof(symbol_node_t));
+ if (newnode == NULL) {
+ stop("symlist_add: Unable to malloc symbol_node", EX_SOFTWARE);
+ /* NOTREACHED */
+ }
+ newnode->symbol = symbol;
+ if (how == SYMLIST_SORT) {
+ symbol_node_t *curnode;
+ int field;
+
+ field = FALSE;
+ switch(symbol->type) {
+ case REGISTER:
+ case SCBLOC:
+ case SRAMLOC:
+ break;
+ case FIELD:
+ case MASK:
+ case ENUM:
+ case ENUM_ENTRY:
+ field = TRUE;
+ break;
+ default:
+ stop("symlist_add: Invalid symbol type for sorting",
+ EX_SOFTWARE);
+ /* NOTREACHED */
+ }
+
+ curnode = SLIST_FIRST(symlist);
+ if (curnode == NULL
+ || (field
+ && (curnode->symbol->type > newnode->symbol->type
+ || (curnode->symbol->type == newnode->symbol->type
+ && (curnode->symbol->info.finfo->value >
+ newnode->symbol->info.finfo->value))))
+ || (!field && (curnode->symbol->info.rinfo->address >
+ newnode->symbol->info.rinfo->address))) {
+ SLIST_INSERT_HEAD(symlist, newnode, links);
+ return;
+ }
+
+ while (1) {
+ if (SLIST_NEXT(curnode, links) == NULL) {
+ SLIST_INSERT_AFTER(curnode, newnode,
+ links);
+ break;
+ } else {
+ symbol_t *cursymbol;
+
+ cursymbol = SLIST_NEXT(curnode, links)->symbol;
+ if ((field
+ && (cursymbol->type > symbol->type
+ || (cursymbol->type == symbol->type
+ && (cursymbol->info.finfo->value >
+ symbol->info.finfo->value))))
+ || (!field
+ && (cursymbol->info.rinfo->address >
+ symbol->info.rinfo->address))) {
+ SLIST_INSERT_AFTER(curnode, newnode,
+ links);
+ break;
+ }
+ }
+ curnode = SLIST_NEXT(curnode, links);
+ }
+ } else {
+ SLIST_INSERT_HEAD(symlist, newnode, links);
+ }
+}
+
+void
+symlist_free(symlist_t *symlist)
+{
+ symbol_node_t *node1, *node2;
+
+ node1 = SLIST_FIRST(symlist);
+ while (node1 != NULL) {
+ node2 = SLIST_NEXT(node1, links);
+ free(node1);
+ node1 = node2;
+ }
+ SLIST_INIT(symlist);
+}
+
+void
+symlist_merge(symlist_t *symlist_dest, symlist_t *symlist_src1,
+ symlist_t *symlist_src2)
+{
+ symbol_node_t *node;
+
+ *symlist_dest = *symlist_src1;
+ while((node = SLIST_FIRST(symlist_src2)) != NULL) {
+ SLIST_REMOVE_HEAD(symlist_src2, links);
+ SLIST_INSERT_HEAD(symlist_dest, node, links);
+ }
+
+ /* These are now empty */
+ SLIST_INIT(symlist_src1);
+ SLIST_INIT(symlist_src2);
+}
+
+void
+aic_print_file_prologue(FILE *ofile)
+{
+
+ if (ofile == NULL)
+ return;
+
+ fprintf(ofile,
+"/*\n"
+" * DO NOT EDIT - This file is automatically generated\n"
+" * from the following source files:\n"
+" *\n"
+"%s */\n",
+ versions);
+}
+
+void
+aic_print_include(FILE *dfile, char *include_file)
+{
+
+ if (dfile == NULL)
+ return;
+ fprintf(dfile, "\n#include \"%s\"\n\n", include_file);
+}
+
+void
+aic_print_reg_dump_types(FILE *ofile)
+{
+ if (ofile == NULL)
+ return;
+
+ fprintf(ofile,
+"typedef int (%sreg_print_t)(u_int, u_int *, u_int);\n"
+"typedef struct %sreg_parse_entry {\n"
+" char *name;\n"
+" uint8_t value;\n"
+" uint8_t mask;\n"
+"} %sreg_parse_entry_t;\n"
+"\n",
+ prefix, prefix, prefix);
+}
+
+static void
+aic_print_reg_dump_start(FILE *dfile, symbol_node_t *regnode)
+{
+ if (dfile == NULL)
+ return;
+
+ fprintf(dfile,
+"static const %sreg_parse_entry_t %s_parse_table[] = {\n",
+ prefix,
+ regnode->symbol->name);
+}
+
+static void
+aic_print_reg_dump_end(FILE *ofile, FILE *dfile,
+ symbol_node_t *regnode, u_int num_entries)
+{
+ char *lower_name;
+ char *letter;
+
+ lower_name = strdup(regnode->symbol->name);
+ if (lower_name == NULL)
+ stop("Unable to strdup symbol name", EX_SOFTWARE);
+
+ for (letter = lower_name; *letter != '\0'; letter++)
+ *letter = tolower(*letter);
+
+ if (dfile != NULL) {
+ if (num_entries != 0)
+ fprintf(dfile,
+"\n"
+"};\n"
+"\n");
+
+ fprintf(dfile,
+"int\n"
+"%s%s_print(u_int regvalue, u_int *cur_col, u_int wrap)\n"
+"{\n"
+" return (%sprint_register(%s%s, %d, \"%s\",\n"
+" 0x%02x, regvalue, cur_col, wrap));\n"
+"}\n"
+"\n",
+ prefix,
+ lower_name,
+ prefix,
+ num_entries != 0 ? regnode->symbol->name : "NULL",
+ num_entries != 0 ? "_parse_table" : "",
+ num_entries,
+ regnode->symbol->name,
+ regnode->symbol->info.rinfo->address);
+ }
+
+ fprintf(ofile,
+"#if AIC_DEBUG_REGISTERS\n"
+"%sreg_print_t %s%s_print;\n"
+"#else\n"
+"#define %s%s_print(regvalue, cur_col, wrap) \\\n"
+" %sprint_register(NULL, 0, \"%s\", 0x%02x, regvalue, cur_col, wrap)\n"
+"#endif\n"
+"\n",
+ prefix,
+ prefix,
+ lower_name,
+ prefix,
+ lower_name,
+ prefix,
+ regnode->symbol->name,
+ regnode->symbol->info.rinfo->address);
+}
+
+static void
+aic_print_reg_dump_entry(FILE *dfile, symbol_node_t *curnode)
+{
+ int num_tabs;
+
+ if (dfile == NULL)
+ return;
+
+ fprintf(dfile,
+" { \"%s\",",
+ curnode->symbol->name);
+
+ num_tabs = 3 - (strlen(curnode->symbol->name) + 5) / 8;
+
+ while (num_tabs-- > 0)
+ fputc('\t', dfile);
+ fprintf(dfile, "0x%02x, 0x%02x }",
+ curnode->symbol->info.finfo->value,
+ curnode->symbol->info.finfo->mask);
+}
+
+void
+symtable_dump(FILE *ofile, FILE *dfile)
+{
+ /*
+ * Sort the registers by address with a simple insertion sort.
+ * Put bitmasks next to the first register that defines them.
+ * Put constants at the end.
+ */
+ symlist_t registers;
+ symlist_t masks;
+ symlist_t constants;
+ symlist_t download_constants;
+ symlist_t aliases;
+ symlist_t exported_labels;
+ symbol_node_t *curnode;
+ symbol_node_t *regnode;
+ DBT key;
+ DBT data;
+ int flag;
+ int reg_count = 0, reg_used = 0;
+ u_int i;
+
+ if (symtable == NULL)
+ return;
+
+ SLIST_INIT(&registers);
+ SLIST_INIT(&masks);
+ SLIST_INIT(&constants);
+ SLIST_INIT(&download_constants);
+ SLIST_INIT(&aliases);
+ SLIST_INIT(&exported_labels);
+ flag = R_FIRST;
+ while (symtable->seq(symtable, &key, &data, flag) == 0) {
+ symbol_t *cursym;
+
+ memcpy(&cursym, data.data, sizeof(cursym));
+ switch(cursym->type) {
+ case REGISTER:
+ case SCBLOC:
+ case SRAMLOC:
+ symlist_add(&registers, cursym, SYMLIST_SORT);
+ break;
+ case MASK:
+ case FIELD:
+ case ENUM:
+ case ENUM_ENTRY:
+ symlist_add(&masks, cursym, SYMLIST_SORT);
+ break;
+ case CONST:
+ symlist_add(&constants, cursym,
+ SYMLIST_INSERT_HEAD);
+ break;
+ case DOWNLOAD_CONST:
+ symlist_add(&download_constants, cursym,
+ SYMLIST_INSERT_HEAD);
+ break;
+ case ALIAS:
+ symlist_add(&aliases, cursym,
+ SYMLIST_INSERT_HEAD);
+ break;
+ case LABEL:
+ if (cursym->info.linfo->exported == 0)
+ break;
+ symlist_add(&exported_labels, cursym,
+ SYMLIST_INSERT_HEAD);
+ break;
+ default:
+ break;
+ }
+ flag = R_NEXT;
+ }
+
+ /* Register dianostic functions/declarations first. */
+ aic_print_file_prologue(ofile);
+ aic_print_reg_dump_types(ofile);
+ aic_print_file_prologue(dfile);
+ aic_print_include(dfile, stock_include_file);
+ SLIST_FOREACH(curnode, &registers, links) {
+
+ if (curnode->symbol->dont_generate_debug_code)
+ continue;
+
+ switch(curnode->symbol->type) {
+ case REGISTER:
+ case SCBLOC:
+ case SRAMLOC:
+ {
+ symlist_t *fields;
+ symbol_node_t *fieldnode;
+ int num_entries;
+
+ num_entries = 0;
+ reg_count++;
+ if (curnode->symbol->count == 1)
+ break;
+ fields = &curnode->symbol->info.rinfo->fields;
+ SLIST_FOREACH(fieldnode, fields, links) {
+ if (num_entries == 0)
+ aic_print_reg_dump_start(dfile,
+ curnode);
+ else if (dfile != NULL)
+ fputs(",\n", dfile);
+ num_entries++;
+ aic_print_reg_dump_entry(dfile, fieldnode);
+ }
+ aic_print_reg_dump_end(ofile, dfile,
+ curnode, num_entries);
+ reg_used++;
+ }
+ default:
+ break;
+ }
+ }
+ fprintf(stderr, "%s: %d of %d register definitions used\n", appname,
+ reg_used, reg_count);
+
+ /* Fold in the masks and bits */
+ while (SLIST_FIRST(&masks) != NULL) {
+ char *regname;
+
+ curnode = SLIST_FIRST(&masks);
+ SLIST_REMOVE_HEAD(&masks, links);
+
+ regnode = SLIST_FIRST(&curnode->symbol->info.finfo->symrefs);
+ regname = regnode->symbol->name;
+ regnode = symlist_search(&registers, regname);
+ SLIST_INSERT_AFTER(regnode, curnode, links);
+ }
+
+ /* Add the aliases */
+ while (SLIST_FIRST(&aliases) != NULL) {
+ char *regname;
+
+ curnode = SLIST_FIRST(&aliases);
+ SLIST_REMOVE_HEAD(&aliases, links);
+
+ regname = curnode->symbol->info.ainfo->parent->name;
+ regnode = symlist_search(&registers, regname);
+ SLIST_INSERT_AFTER(regnode, curnode, links);
+ }
+
+ /* Output generated #defines. */
+ while (SLIST_FIRST(&registers) != NULL) {
+ symbol_node_t *curnode;
+ u_int value;
+ char *tab_str;
+ char *tab_str2;
+
+ curnode = SLIST_FIRST(&registers);
+ SLIST_REMOVE_HEAD(&registers, links);
+ switch(curnode->symbol->type) {
+ case REGISTER:
+ case SCBLOC:
+ case SRAMLOC:
+ fprintf(ofile, "\n");
+ value = curnode->symbol->info.rinfo->address;
+ tab_str = "\t";
+ tab_str2 = "\t\t";
+ break;
+ case ALIAS:
+ {
+ symbol_t *parent;
+
+ parent = curnode->symbol->info.ainfo->parent;
+ value = parent->info.rinfo->address;
+ tab_str = "\t";
+ tab_str2 = "\t\t";
+ break;
+ }
+ case MASK:
+ case FIELD:
+ case ENUM:
+ case ENUM_ENTRY:
+ value = curnode->symbol->info.finfo->value;
+ tab_str = "\t\t";
+ tab_str2 = "\t";
+ break;
+ default:
+ value = 0; /* Quiet compiler */
+ tab_str = NULL;
+ tab_str2 = NULL;
+ stop("symtable_dump: Invalid symbol type "
+ "encountered", EX_SOFTWARE);
+ break;
+ }
+ fprintf(ofile, "#define%s%-16s%s0x%02x\n",
+ tab_str, curnode->symbol->name, tab_str2,
+ value);
+ free(curnode);
+ }
+ fprintf(ofile, "\n\n");
+
+ while (SLIST_FIRST(&constants) != NULL) {
+ symbol_node_t *curnode;
+
+ curnode = SLIST_FIRST(&constants);
+ SLIST_REMOVE_HEAD(&constants, links);
+ fprintf(ofile, "#define\t%-8s\t0x%02x\n",
+ curnode->symbol->name,
+ curnode->symbol->info.cinfo->value);
+ free(curnode);
+ }
+
+ fprintf(ofile, "\n\n/* Downloaded Constant Definitions */\n");
+
+ for (i = 0; SLIST_FIRST(&download_constants) != NULL; i++) {
+ symbol_node_t *curnode;
+
+ curnode = SLIST_FIRST(&download_constants);
+ SLIST_REMOVE_HEAD(&download_constants, links);
+ fprintf(ofile, "#define\t%-8s\t0x%02x\n",
+ curnode->symbol->name,
+ curnode->symbol->info.cinfo->value);
+ free(curnode);
+ }
+ fprintf(ofile, "#define\tDOWNLOAD_CONST_COUNT\t0x%02x\n", i);
+
+ fprintf(ofile, "\n\n/* Exported Labels */\n");
+
+ while (SLIST_FIRST(&exported_labels) != NULL) {
+ symbol_node_t *curnode;
+
+ curnode = SLIST_FIRST(&exported_labels);
+ SLIST_REMOVE_HEAD(&exported_labels, links);
+ fprintf(ofile, "#define\tLABEL_%-8s\t0x%02x\n",
+ curnode->symbol->name,
+ curnode->symbol->info.linfo->address);
+ free(curnode);
+ }
+}
+
diff --git a/drivers/scsi/aic7xxx/aicasm/aicasm_symbol.h b/drivers/scsi/aic7xxx/aicasm/aicasm_symbol.h
new file mode 100644
index 000000000..34bbcad7f
--- /dev/null
+++ b/drivers/scsi/aic7xxx/aicasm/aicasm_symbol.h
@@ -0,0 +1,209 @@
+/*
+ * Aic7xxx SCSI host adapter firmware assembler symbol table definitions
+ *
+ * Copyright (c) 1997 Justin T. Gibbs.
+ * Copyright (c) 2002 Adaptec Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions, and the following disclaimer,
+ * without modification.
+ * 2. Redistributions in binary form must reproduce at minimum a disclaimer
+ * substantially similar to the "NO WARRANTY" disclaimer below
+ * ("Disclaimer") and any redistribution must be conditioned upon
+ * including a substantially similar Disclaimer requirement for further
+ * binary redistribution.
+ * 3. Neither the names of the above-listed copyright holders nor the names
+ * of any contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * Alternatively, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") version 2 as published by the Free
+ * Software Foundation.
+ *
+ * NO WARRANTY
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
+ * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
+ * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGES.
+ *
+ * $Id: //depot/aic7xxx/aic7xxx/aicasm/aicasm_symbol.h#17 $
+ *
+ * $FreeBSD$
+ */
+
+#ifdef __linux__
+#include "../queue.h"
+#else
+#include <sys/queue.h>
+#endif
+
+typedef enum {
+ UNINITIALIZED,
+ REGISTER,
+ ALIAS,
+ SCBLOC,
+ SRAMLOC,
+ ENUM_ENTRY,
+ FIELD,
+ MASK,
+ ENUM,
+ CONST,
+ DOWNLOAD_CONST,
+ LABEL,
+ CONDITIONAL,
+ MACRO
+} symtype;
+
+typedef enum {
+ RO = 0x01,
+ WO = 0x02,
+ RW = 0x03
+}amode_t;
+
+typedef SLIST_HEAD(symlist, symbol_node) symlist_t;
+
+struct reg_info {
+ u_int address;
+ int size;
+ amode_t mode;
+ symlist_t fields;
+ uint8_t valid_bitmask;
+ uint8_t modes;
+ int typecheck_masks;
+};
+
+struct field_info {
+ symlist_t symrefs;
+ uint8_t value;
+ uint8_t mask;
+};
+
+struct const_info {
+ u_int value;
+ int define;
+};
+
+struct alias_info {
+ struct symbol *parent;
+};
+
+struct label_info {
+ int address;
+ int exported;
+};
+
+struct cond_info {
+ int func_num;
+};
+
+struct macro_arg {
+ STAILQ_ENTRY(macro_arg) links;
+ regex_t arg_regex;
+ char *replacement_text;
+};
+STAILQ_HEAD(macro_arg_list, macro_arg) args;
+
+struct macro_info {
+ struct macro_arg_list args;
+ int narg;
+ const char* body;
+};
+
+typedef struct expression_info {
+ symlist_t referenced_syms;
+ int value;
+} expression_t;
+
+typedef struct symbol {
+ char *name;
+ symtype type;
+ int count;
+ union {
+ struct reg_info *rinfo;
+ struct field_info *finfo;
+ struct const_info *cinfo;
+ struct alias_info *ainfo;
+ struct label_info *linfo;
+ struct cond_info *condinfo;
+ struct macro_info *macroinfo;
+ } info;
+ int dont_generate_debug_code;
+} symbol_t;
+
+typedef struct symbol_ref {
+ symbol_t *symbol;
+ int offset;
+} symbol_ref_t;
+
+typedef struct symbol_node {
+ SLIST_ENTRY(symbol_node) links;
+ symbol_t *symbol;
+} symbol_node_t;
+
+typedef struct critical_section {
+ TAILQ_ENTRY(critical_section) links;
+ int begin_addr;
+ int end_addr;
+} critical_section_t;
+
+typedef enum {
+ SCOPE_ROOT,
+ SCOPE_IF,
+ SCOPE_ELSE_IF,
+ SCOPE_ELSE
+} scope_type;
+
+typedef struct patch_info {
+ int skip_patch;
+ int skip_instr;
+} patch_info_t;
+
+typedef struct scope {
+ SLIST_ENTRY(scope) scope_stack_links;
+ TAILQ_ENTRY(scope) scope_links;
+ TAILQ_HEAD(, scope) inner_scope;
+ scope_type type;
+ int inner_scope_patches;
+ int begin_addr;
+ int end_addr;
+ patch_info_t patches[2];
+ int func_num;
+} scope_t;
+
+TAILQ_HEAD(cs_tailq, critical_section);
+SLIST_HEAD(scope_list, scope);
+TAILQ_HEAD(scope_tailq, scope);
+
+void symbol_delete(symbol_t *symbol);
+
+void symtable_open(void);
+
+void symtable_close(void);
+
+symbol_t *
+ symtable_get(char *name);
+
+symbol_node_t *
+ symlist_search(symlist_t *symlist, char *symname);
+
+void
+ symlist_add(symlist_t *symlist, symbol_t *symbol, int how);
+#define SYMLIST_INSERT_HEAD 0x00
+#define SYMLIST_SORT 0x01
+
+void symlist_free(symlist_t *symlist);
+
+void symlist_merge(symlist_t *symlist_dest, symlist_t *symlist_src1,
+ symlist_t *symlist_src2);
+void symtable_dump(FILE *ofile, FILE *dfile);
diff --git a/drivers/scsi/aic7xxx/aiclib.c b/drivers/scsi/aic7xxx/aiclib.c
new file mode 100644
index 000000000..828ae3d9a
--- /dev/null
+++ b/drivers/scsi/aic7xxx/aiclib.c
@@ -0,0 +1,34 @@
+/*
+ * Implementation of Utility functions for all SCSI device types.
+ *
+ * Copyright (c) 1997, 1998, 1999 Justin T. Gibbs.
+ * Copyright (c) 1997, 1998 Kenneth D. Merry.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions, and the following disclaimer,
+ * without modification, immediately at the beginning of the file.
+ * 2. The name of the author may not be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR
+ * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * $FreeBSD: src/sys/cam/scsi/scsi_all.c,v 1.38 2002/09/23 04:56:35 mjacob Exp $
+ * $Id$
+ */
+
+#include "aiclib.h"
+
diff --git a/drivers/scsi/aic7xxx/aiclib.h b/drivers/scsi/aic7xxx/aiclib.h
new file mode 100644
index 000000000..f8fd198aa
--- /dev/null
+++ b/drivers/scsi/aic7xxx/aiclib.h
@@ -0,0 +1,195 @@
+/*
+ * Largely written by Julian Elischer (julian@tfs.com)
+ * for TRW Financial Systems.
+ *
+ * TRW Financial Systems, in accordance with their agreement with Carnegie
+ * Mellon University, makes this software available to CMU to distribute
+ * or use in any manner that they see fit as long as this message is kept with
+ * the software. For this reason TFS also grants any other persons or
+ * organisations permission to use or modify this software.
+ *
+ * TFS supplies this software to be publicly redistributed
+ * on the understanding that TFS is not responsible for the correct
+ * functioning of this software in any circumstances.
+ *
+ * Ported to run under 386BSD by Julian Elischer (julian@tfs.com) Sept 1992
+ *
+ * $FreeBSD: src/sys/cam/scsi/scsi_all.h,v 1.21 2002/10/08 17:12:44 ken Exp $
+ *
+ * Copyright (c) 2003 Adaptec Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions, and the following disclaimer,
+ * without modification.
+ * 2. Redistributions in binary form must reproduce at minimum a disclaimer
+ * substantially similar to the "NO WARRANTY" disclaimer below
+ * ("Disclaimer") and any redistribution must be conditioned upon
+ * including a substantially similar Disclaimer requirement for further
+ * binary redistribution.
+ * 3. Neither the names of the above-listed copyright holders nor the names
+ * of any contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * Alternatively, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") version 2 as published by the Free
+ * Software Foundation.
+ *
+ * NO WARRANTY
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
+ * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
+ * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGES.
+ *
+ * $Id$
+ */
+
+#ifndef _AICLIB_H
+#define _AICLIB_H
+
+struct scsi_sense
+{
+ uint8_t opcode;
+ uint8_t byte2;
+ uint8_t unused[2];
+ uint8_t length;
+ uint8_t control;
+};
+
+#define SCSI_REV_0 0
+#define SCSI_REV_CCS 1
+#define SCSI_REV_2 2
+#define SCSI_REV_SPC 3
+#define SCSI_REV_SPC2 4
+
+struct scsi_sense_data
+{
+ uint8_t error_code;
+#define SSD_ERRCODE 0x7F
+#define SSD_CURRENT_ERROR 0x70
+#define SSD_DEFERRED_ERROR 0x71
+#define SSD_ERRCODE_VALID 0x80
+ uint8_t segment;
+ uint8_t flags;
+#define SSD_KEY 0x0F
+#define SSD_KEY_NO_SENSE 0x00
+#define SSD_KEY_RECOVERED_ERROR 0x01
+#define SSD_KEY_NOT_READY 0x02
+#define SSD_KEY_MEDIUM_ERROR 0x03
+#define SSD_KEY_HARDWARE_ERROR 0x04
+#define SSD_KEY_ILLEGAL_REQUEST 0x05
+#define SSD_KEY_UNIT_ATTENTION 0x06
+#define SSD_KEY_DATA_PROTECT 0x07
+#define SSD_KEY_BLANK_CHECK 0x08
+#define SSD_KEY_Vendor_Specific 0x09
+#define SSD_KEY_COPY_ABORTED 0x0a
+#define SSD_KEY_ABORTED_COMMAND 0x0b
+#define SSD_KEY_EQUAL 0x0c
+#define SSD_KEY_VOLUME_OVERFLOW 0x0d
+#define SSD_KEY_MISCOMPARE 0x0e
+#define SSD_KEY_RESERVED 0x0f
+#define SSD_ILI 0x20
+#define SSD_EOM 0x40
+#define SSD_FILEMARK 0x80
+ uint8_t info[4];
+ uint8_t extra_len;
+ uint8_t cmd_spec_info[4];
+ uint8_t add_sense_code;
+ uint8_t add_sense_code_qual;
+ uint8_t fru;
+ uint8_t sense_key_spec[3];
+#define SSD_SCS_VALID 0x80
+#define SSD_FIELDPTR_CMD 0x40
+#define SSD_BITPTR_VALID 0x08
+#define SSD_BITPTR_VALUE 0x07
+#define SSD_MIN_SIZE 18
+ uint8_t extra_bytes[14];
+#define SSD_FULL_SIZE sizeof(struct scsi_sense_data)
+};
+
+/*
+ * Status Byte
+ */
+#define SCSI_STATUS_OK 0x00
+#define SCSI_STATUS_CHECK_COND 0x02
+#define SCSI_STATUS_COND_MET 0x04
+#define SCSI_STATUS_BUSY 0x08
+#define SCSI_STATUS_INTERMED 0x10
+#define SCSI_STATUS_INTERMED_COND_MET 0x14
+#define SCSI_STATUS_RESERV_CONFLICT 0x18
+#define SCSI_STATUS_CMD_TERMINATED 0x22 /* Obsolete in SAM-2 */
+#define SCSI_STATUS_QUEUE_FULL 0x28
+#define SCSI_STATUS_ACA_ACTIVE 0x30
+#define SCSI_STATUS_TASK_ABORTED 0x40
+
+/************************* Large Disk Handling ********************************/
+static inline int
+aic_sector_div(sector_t capacity, int heads, int sectors)
+{
+ /* ugly, ugly sector_div calling convention.. */
+ sector_div(capacity, (heads * sectors));
+ return (int)capacity;
+}
+
+static inline uint32_t
+scsi_4btoul(uint8_t *bytes)
+{
+ uint32_t rv;
+
+ rv = (bytes[0] << 24) |
+ (bytes[1] << 16) |
+ (bytes[2] << 8) |
+ bytes[3];
+ return (rv);
+}
+
+/* Macros for generating the elements of the PCI ID tables. */
+
+#define GETID(v, s) (unsigned)(((v) >> (s)) & 0xFFFF ?: PCI_ANY_ID)
+
+#define ID_C(x, c) \
+{ \
+ GETID(x,32), GETID(x,48), GETID(x,0), GETID(x,16), \
+ (c) << 8, 0xFFFF00, 0 \
+}
+
+#define ID2C(x) \
+ ID_C(x, PCI_CLASS_STORAGE_SCSI), \
+ ID_C(x, PCI_CLASS_STORAGE_RAID)
+
+#define IDIROC(x) ((x) | ~ID_ALL_IROC_MASK)
+
+/* Generate IDs for all 16 possibilites.
+ * The argument has already masked out
+ * the 4 least significant bits of the device id.
+ * (e.g., mask: ID_9005_GENERIC_MASK).
+ */
+#define ID16(x) \
+ ID(x), \
+ ID((x) | 0x0001000000000000ull), \
+ ID((x) | 0x0002000000000000ull), \
+ ID((x) | 0x0003000000000000ull), \
+ ID((x) | 0x0004000000000000ull), \
+ ID((x) | 0x0005000000000000ull), \
+ ID((x) | 0x0006000000000000ull), \
+ ID((x) | 0x0007000000000000ull), \
+ ID((x) | 0x0008000000000000ull), \
+ ID((x) | 0x0009000000000000ull), \
+ ID((x) | 0x000A000000000000ull), \
+ ID((x) | 0x000B000000000000ull), \
+ ID((x) | 0x000C000000000000ull), \
+ ID((x) | 0x000D000000000000ull), \
+ ID((x) | 0x000E000000000000ull), \
+ ID((x) | 0x000F000000000000ull)
+
+#endif /*_AICLIB_H */
diff --git a/drivers/scsi/aic7xxx/cam.h b/drivers/scsi/aic7xxx/cam.h
new file mode 100644
index 000000000..687aef6ef
--- /dev/null
+++ b/drivers/scsi/aic7xxx/cam.h
@@ -0,0 +1,111 @@
+/*
+ * Data structures and definitions for the CAM system.
+ *
+ * Copyright (c) 1997 Justin T. Gibbs.
+ * Copyright (c) 2000 Adaptec Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions, and the following disclaimer,
+ * without modification.
+ * 2. The name of the author may not be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * Alternatively, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL").
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR
+ * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * $Id: //depot/aic7xxx/linux/drivers/scsi/aic7xxx/cam.h#15 $
+ */
+
+#ifndef _AIC7XXX_CAM_H
+#define _AIC7XXX_CAM_H 1
+
+#include <linux/types.h>
+
+#define CAM_BUS_WILDCARD ((u_int)~0)
+#define CAM_TARGET_WILDCARD ((u_int)~0)
+#define CAM_LUN_WILDCARD ((u_int)~0)
+
+/* CAM Status field values */
+typedef enum {
+ CAM_REQ_INPROG, /* CCB request is in progress */
+ CAM_REQ_CMP, /* CCB request completed without error */
+ CAM_REQ_ABORTED, /* CCB request aborted by the host */
+ CAM_UA_ABORT, /* Unable to abort CCB request */
+ CAM_REQ_CMP_ERR, /* CCB request completed with an error */
+ CAM_BUSY, /* CAM subsystem is busy */
+ CAM_REQ_INVALID, /* CCB request was invalid */
+ CAM_PATH_INVALID, /* Supplied Path ID is invalid */
+ CAM_SEL_TIMEOUT, /* Target Selection Timeout */
+ CAM_CMD_TIMEOUT, /* Command timeout */
+ CAM_SCSI_STATUS_ERROR, /* SCSI error, look at error code in CCB */
+ CAM_SCSI_BUS_RESET, /* SCSI Bus Reset Sent/Received */
+ CAM_UNCOR_PARITY, /* Uncorrectable parity error occurred */
+ CAM_AUTOSENSE_FAIL, /* Autosense: request sense cmd fail */
+ CAM_NO_HBA, /* No HBA Detected Error */
+ CAM_DATA_RUN_ERR, /* Data Overrun error */
+ CAM_UNEXP_BUSFREE, /* Unexpected Bus Free */
+ CAM_SEQUENCE_FAIL, /* Protocol Violation */
+ CAM_CCB_LEN_ERR, /* CCB length supplied is inadequate */
+ CAM_PROVIDE_FAIL, /* Unable to provide requested capability */
+ CAM_BDR_SENT, /* A SCSI BDR msg was sent to target */
+ CAM_REQ_TERMIO, /* CCB request terminated by the host */
+ CAM_UNREC_HBA_ERROR, /* Unrecoverable Host Bus Adapter Error */
+ CAM_REQ_TOO_BIG, /* The request was too large for this host */
+ CAM_UA_TERMIO, /* Unable to terminate I/O CCB request */
+ CAM_MSG_REJECT_REC, /* Message Reject Received */
+ CAM_DEV_NOT_THERE, /* SCSI Device Not Installed/there */
+ CAM_RESRC_UNAVAIL, /* Resource Unavailable */
+ /*
+ * This request should be requeued to preserve
+ * transaction ordering. This typically occurs
+ * when the SIM recognizes an error that should
+ * freeze the queue and must place additional
+ * requests for the target at the sim level
+ * back into the XPT queue.
+ */
+ CAM_REQUEUE_REQ,
+ CAM_DEV_QFRZN = 0x40,
+
+ CAM_STATUS_MASK = 0x3F
+} cam_status;
+
+/*
+ * Definitions for the asynchronous callback CCB fields.
+ */
+typedef enum {
+ AC_GETDEV_CHANGED = 0x800,/* Getdev info might have changed */
+ AC_INQ_CHANGED = 0x400,/* Inquiry info might have changed */
+ AC_TRANSFER_NEG = 0x200,/* New transfer settings in effect */
+ AC_LOST_DEVICE = 0x100,/* A device went away */
+ AC_FOUND_DEVICE = 0x080,/* A new device was found */
+ AC_PATH_DEREGISTERED = 0x040,/* A path has de-registered */
+ AC_PATH_REGISTERED = 0x020,/* A new path has been registered */
+ AC_SENT_BDR = 0x010,/* A BDR message was sent to target */
+ AC_SCSI_AEN = 0x008,/* A SCSI AEN has been received */
+ AC_UNSOL_RESEL = 0x002,/* Unsolicited reselection occurred */
+ AC_BUS_RESET = 0x001 /* A SCSI bus reset occurred */
+} ac_code;
+
+typedef enum {
+ CAM_DIR_IN = DMA_FROM_DEVICE,
+ CAM_DIR_OUT = DMA_TO_DEVICE,
+ CAM_DIR_NONE = DMA_NONE,
+} ccb_flags;
+
+#endif /* _AIC7XXX_CAM_H */
diff --git a/drivers/scsi/aic7xxx/queue.h b/drivers/scsi/aic7xxx/queue.h
new file mode 100644
index 000000000..8adf8003a
--- /dev/null
+++ b/drivers/scsi/aic7xxx/queue.h
@@ -0,0 +1,501 @@
+/*
+ * Copyright (c) 1991, 1993
+ * The Regents of the University of California. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * @(#)queue.h 8.5 (Berkeley) 8/20/94
+ * $FreeBSD: src/sys/sys/queue.h,v 1.38 2000/05/26 02:06:56 jake Exp $
+ */
+
+#ifndef _SYS_QUEUE_H_
+#define _SYS_QUEUE_H_
+
+/*
+ * This file defines five types of data structures: singly-linked lists,
+ * singly-linked tail queues, lists, tail queues, and circular queues.
+ *
+ * A singly-linked list is headed by a single forward pointer. The elements
+ * are singly linked for minimum space and pointer manipulation overhead at
+ * the expense of O(n) removal for arbitrary elements. New elements can be
+ * added to the list after an existing element or at the head of the list.
+ * Elements being removed from the head of the list should use the explicit
+ * macro for this purpose for optimum efficiency. A singly-linked list may
+ * only be traversed in the forward direction. Singly-linked lists are ideal
+ * for applications with large datasets and few or no removals or for
+ * implementing a LIFO queue.
+ *
+ * A singly-linked tail queue is headed by a pair of pointers, one to the
+ * head of the list and the other to the tail of the list. The elements are
+ * singly linked for minimum space and pointer manipulation overhead at the
+ * expense of O(n) removal for arbitrary elements. New elements can be added
+ * to the list after an existing element, at the head of the list, or at the
+ * end of the list. Elements being removed from the head of the tail queue
+ * should use the explicit macro for this purpose for optimum efficiency.
+ * A singly-linked tail queue may only be traversed in the forward direction.
+ * Singly-linked tail queues are ideal for applications with large datasets
+ * and few or no removals or for implementing a FIFO queue.
+ *
+ * A list is headed by a single forward pointer (or an array of forward
+ * pointers for a hash table header). The elements are doubly linked
+ * so that an arbitrary element can be removed without a need to
+ * traverse the list. New elements can be added to the list before
+ * or after an existing element or at the head of the list. A list
+ * may only be traversed in the forward direction.
+ *
+ * A tail queue is headed by a pair of pointers, one to the head of the
+ * list and the other to the tail of the list. The elements are doubly
+ * linked so that an arbitrary element can be removed without a need to
+ * traverse the list. New elements can be added to the list before or
+ * after an existing element, at the head of the list, or at the end of
+ * the list. A tail queue may be traversed in either direction.
+ *
+ * A circle queue is headed by a pair of pointers, one to the head of the
+ * list and the other to the tail of the list. The elements are doubly
+ * linked so that an arbitrary element can be removed without a need to
+ * traverse the list. New elements can be added to the list before or after
+ * an existing element, at the head of the list, or at the end of the list.
+ * A circle queue may be traversed in either direction, but has a more
+ * complex end of list detection.
+ *
+ * For details on the use of these macros, see the queue(3) manual page.
+ *
+ *
+ * SLIST LIST STAILQ TAILQ CIRCLEQ
+ * _HEAD + + + + +
+ * _HEAD_INITIALIZER + + + + +
+ * _ENTRY + + + + +
+ * _INIT + + + + +
+ * _EMPTY + + + + +
+ * _FIRST + + + + +
+ * _NEXT + + + + +
+ * _PREV - - - + +
+ * _LAST - - + + +
+ * _FOREACH + + + + +
+ * _FOREACH_REVERSE - - - + +
+ * _INSERT_HEAD + + + + +
+ * _INSERT_BEFORE - + - + +
+ * _INSERT_AFTER + + + + +
+ * _INSERT_TAIL - - + + +
+ * _REMOVE_HEAD + - + - -
+ * _REMOVE + + + + +
+ *
+ */
+
+/*
+ * Singly-linked List declarations.
+ */
+#define SLIST_HEAD(name, type) \
+struct name { \
+ struct type *slh_first; /* first element */ \
+}
+
+#define SLIST_HEAD_INITIALIZER(head) \
+ { NULL }
+
+#define SLIST_ENTRY(type) \
+struct { \
+ struct type *sle_next; /* next element */ \
+}
+
+/*
+ * Singly-linked List functions.
+ */
+#define SLIST_EMPTY(head) ((head)->slh_first == NULL)
+
+#define SLIST_FIRST(head) ((head)->slh_first)
+
+#define SLIST_FOREACH(var, head, field) \
+ for ((var) = SLIST_FIRST((head)); \
+ (var); \
+ (var) = SLIST_NEXT((var), field))
+
+#define SLIST_INIT(head) do { \
+ SLIST_FIRST((head)) = NULL; \
+} while (0)
+
+#define SLIST_INSERT_AFTER(slistelm, elm, field) do { \
+ SLIST_NEXT((elm), field) = SLIST_NEXT((slistelm), field); \
+ SLIST_NEXT((slistelm), field) = (elm); \
+} while (0)
+
+#define SLIST_INSERT_HEAD(head, elm, field) do { \
+ SLIST_NEXT((elm), field) = SLIST_FIRST((head)); \
+ SLIST_FIRST((head)) = (elm); \
+} while (0)
+
+#define SLIST_NEXT(elm, field) ((elm)->field.sle_next)
+
+#define SLIST_REMOVE(head, elm, type, field) do { \
+ if (SLIST_FIRST((head)) == (elm)) { \
+ SLIST_REMOVE_HEAD((head), field); \
+ } \
+ else { \
+ struct type *curelm = SLIST_FIRST((head)); \
+ while (SLIST_NEXT(curelm, field) != (elm)) \
+ curelm = SLIST_NEXT(curelm, field); \
+ SLIST_NEXT(curelm, field) = \
+ SLIST_NEXT(SLIST_NEXT(curelm, field), field); \
+ } \
+} while (0)
+
+#define SLIST_REMOVE_HEAD(head, field) do { \
+ SLIST_FIRST((head)) = SLIST_NEXT(SLIST_FIRST((head)), field); \
+} while (0)
+
+/*
+ * Singly-linked Tail queue declarations.
+ */
+#define STAILQ_HEAD(name, type) \
+struct name { \
+ struct type *stqh_first;/* first element */ \
+ struct type **stqh_last;/* addr of last next element */ \
+}
+
+#define STAILQ_HEAD_INITIALIZER(head) \
+ { NULL, &(head).stqh_first }
+
+#define STAILQ_ENTRY(type) \
+struct { \
+ struct type *stqe_next; /* next element */ \
+}
+
+/*
+ * Singly-linked Tail queue functions.
+ */
+#define STAILQ_EMPTY(head) ((head)->stqh_first == NULL)
+
+#define STAILQ_FIRST(head) ((head)->stqh_first)
+
+#define STAILQ_FOREACH(var, head, field) \
+ for((var) = STAILQ_FIRST((head)); \
+ (var); \
+ (var) = STAILQ_NEXT((var), field))
+
+#define STAILQ_INIT(head) do { \
+ STAILQ_FIRST((head)) = NULL; \
+ (head)->stqh_last = &STAILQ_FIRST((head)); \
+} while (0)
+
+#define STAILQ_INSERT_AFTER(head, tqelm, elm, field) do { \
+ if ((STAILQ_NEXT((elm), field) = STAILQ_NEXT((tqelm), field)) == NULL)\
+ (head)->stqh_last = &STAILQ_NEXT((elm), field); \
+ STAILQ_NEXT((tqelm), field) = (elm); \
+} while (0)
+
+#define STAILQ_INSERT_HEAD(head, elm, field) do { \
+ if ((STAILQ_NEXT((elm), field) = STAILQ_FIRST((head))) == NULL) \
+ (head)->stqh_last = &STAILQ_NEXT((elm), field); \
+ STAILQ_FIRST((head)) = (elm); \
+} while (0)
+
+#define STAILQ_INSERT_TAIL(head, elm, field) do { \
+ STAILQ_NEXT((elm), field) = NULL; \
+ STAILQ_LAST((head)) = (elm); \
+ (head)->stqh_last = &STAILQ_NEXT((elm), field); \
+} while (0)
+
+#define STAILQ_LAST(head) (*(head)->stqh_last)
+
+#define STAILQ_NEXT(elm, field) ((elm)->field.stqe_next)
+
+#define STAILQ_REMOVE(head, elm, type, field) do { \
+ if (STAILQ_FIRST((head)) == (elm)) { \
+ STAILQ_REMOVE_HEAD(head, field); \
+ } \
+ else { \
+ struct type *curelm = STAILQ_FIRST((head)); \
+ while (STAILQ_NEXT(curelm, field) != (elm)) \
+ curelm = STAILQ_NEXT(curelm, field); \
+ if ((STAILQ_NEXT(curelm, field) = \
+ STAILQ_NEXT(STAILQ_NEXT(curelm, field), field)) == NULL)\
+ (head)->stqh_last = &STAILQ_NEXT((curelm), field);\
+ } \
+} while (0)
+
+#define STAILQ_REMOVE_HEAD(head, field) do { \
+ if ((STAILQ_FIRST((head)) = \
+ STAILQ_NEXT(STAILQ_FIRST((head)), field)) == NULL) \
+ (head)->stqh_last = &STAILQ_FIRST((head)); \
+} while (0)
+
+#define STAILQ_REMOVE_HEAD_UNTIL(head, elm, field) do { \
+ if ((STAILQ_FIRST((head)) = STAILQ_NEXT((elm), field)) == NULL) \
+ (head)->stqh_last = &STAILQ_FIRST((head)); \
+} while (0)
+
+/*
+ * List declarations.
+ */
+#define LIST_HEAD(name, type) \
+struct name { \
+ struct type *lh_first; /* first element */ \
+}
+
+#define LIST_HEAD_INITIALIZER(head) \
+ { NULL }
+
+#define LIST_ENTRY(type) \
+struct { \
+ struct type *le_next; /* next element */ \
+ struct type **le_prev; /* address of previous next element */ \
+}
+
+/*
+ * List functions.
+ */
+
+#define LIST_EMPTY(head) ((head)->lh_first == NULL)
+
+#define LIST_FIRST(head) ((head)->lh_first)
+
+#define LIST_FOREACH(var, head, field) \
+ for ((var) = LIST_FIRST((head)); \
+ (var); \
+ (var) = LIST_NEXT((var), field))
+
+#define LIST_INIT(head) do { \
+ LIST_FIRST((head)) = NULL; \
+} while (0)
+
+#define LIST_INSERT_AFTER(listelm, elm, field) do { \
+ if ((LIST_NEXT((elm), field) = LIST_NEXT((listelm), field)) != NULL)\
+ LIST_NEXT((listelm), field)->field.le_prev = \
+ &LIST_NEXT((elm), field); \
+ LIST_NEXT((listelm), field) = (elm); \
+ (elm)->field.le_prev = &LIST_NEXT((listelm), field); \
+} while (0)
+
+#define LIST_INSERT_BEFORE(listelm, elm, field) do { \
+ (elm)->field.le_prev = (listelm)->field.le_prev; \
+ LIST_NEXT((elm), field) = (listelm); \
+ *(listelm)->field.le_prev = (elm); \
+ (listelm)->field.le_prev = &LIST_NEXT((elm), field); \
+} while (0)
+
+#define LIST_INSERT_HEAD(head, elm, field) do { \
+ if ((LIST_NEXT((elm), field) = LIST_FIRST((head))) != NULL) \
+ LIST_FIRST((head))->field.le_prev = &LIST_NEXT((elm), field);\
+ LIST_FIRST((head)) = (elm); \
+ (elm)->field.le_prev = &LIST_FIRST((head)); \
+} while (0)
+
+#define LIST_NEXT(elm, field) ((elm)->field.le_next)
+
+#define LIST_REMOVE(elm, field) do { \
+ if (LIST_NEXT((elm), field) != NULL) \
+ LIST_NEXT((elm), field)->field.le_prev = \
+ (elm)->field.le_prev; \
+ *(elm)->field.le_prev = LIST_NEXT((elm), field); \
+} while (0)
+
+/*
+ * Tail queue declarations.
+ */
+#define TAILQ_HEAD(name, type) \
+struct name { \
+ struct type *tqh_first; /* first element */ \
+ struct type **tqh_last; /* addr of last next element */ \
+}
+
+#define TAILQ_HEAD_INITIALIZER(head) \
+ { NULL, &(head).tqh_first }
+
+#define TAILQ_ENTRY(type) \
+struct { \
+ struct type *tqe_next; /* next element */ \
+ struct type **tqe_prev; /* address of previous next element */ \
+}
+
+/*
+ * Tail queue functions.
+ */
+#define TAILQ_EMPTY(head) ((head)->tqh_first == NULL)
+
+#define TAILQ_FIRST(head) ((head)->tqh_first)
+
+#define TAILQ_FOREACH(var, head, field) \
+ for ((var) = TAILQ_FIRST((head)); \
+ (var); \
+ (var) = TAILQ_NEXT((var), field))
+
+#define TAILQ_FOREACH_REVERSE(var, head, headname, field) \
+ for ((var) = TAILQ_LAST((head), headname); \
+ (var); \
+ (var) = TAILQ_PREV((var), headname, field))
+
+#define TAILQ_INIT(head) do { \
+ TAILQ_FIRST((head)) = NULL; \
+ (head)->tqh_last = &TAILQ_FIRST((head)); \
+} while (0)
+
+#define TAILQ_INSERT_AFTER(head, listelm, elm, field) do { \
+ if ((TAILQ_NEXT((elm), field) = TAILQ_NEXT((listelm), field)) != NULL)\
+ TAILQ_NEXT((elm), field)->field.tqe_prev = \
+ &TAILQ_NEXT((elm), field); \
+ else \
+ (head)->tqh_last = &TAILQ_NEXT((elm), field); \
+ TAILQ_NEXT((listelm), field) = (elm); \
+ (elm)->field.tqe_prev = &TAILQ_NEXT((listelm), field); \
+} while (0)
+
+#define TAILQ_INSERT_BEFORE(listelm, elm, field) do { \
+ (elm)->field.tqe_prev = (listelm)->field.tqe_prev; \
+ TAILQ_NEXT((elm), field) = (listelm); \
+ *(listelm)->field.tqe_prev = (elm); \
+ (listelm)->field.tqe_prev = &TAILQ_NEXT((elm), field); \
+} while (0)
+
+#define TAILQ_INSERT_HEAD(head, elm, field) do { \
+ if ((TAILQ_NEXT((elm), field) = TAILQ_FIRST((head))) != NULL) \
+ TAILQ_FIRST((head))->field.tqe_prev = \
+ &TAILQ_NEXT((elm), field); \
+ else \
+ (head)->tqh_last = &TAILQ_NEXT((elm), field); \
+ TAILQ_FIRST((head)) = (elm); \
+ (elm)->field.tqe_prev = &TAILQ_FIRST((head)); \
+} while (0)
+
+#define TAILQ_INSERT_TAIL(head, elm, field) do { \
+ TAILQ_NEXT((elm), field) = NULL; \
+ (elm)->field.tqe_prev = (head)->tqh_last; \
+ *(head)->tqh_last = (elm); \
+ (head)->tqh_last = &TAILQ_NEXT((elm), field); \
+} while (0)
+
+#define TAILQ_LAST(head, headname) \
+ (*(((struct headname *)((head)->tqh_last))->tqh_last))
+
+#define TAILQ_NEXT(elm, field) ((elm)->field.tqe_next)
+
+#define TAILQ_PREV(elm, headname, field) \
+ (*(((struct headname *)((elm)->field.tqe_prev))->tqh_last))
+
+#define TAILQ_REMOVE(head, elm, field) do { \
+ if ((TAILQ_NEXT((elm), field)) != NULL) \
+ TAILQ_NEXT((elm), field)->field.tqe_prev = \
+ (elm)->field.tqe_prev; \
+ else \
+ (head)->tqh_last = (elm)->field.tqe_prev; \
+ *(elm)->field.tqe_prev = TAILQ_NEXT((elm), field); \
+} while (0)
+
+/*
+ * Circular queue declarations.
+ */
+#define CIRCLEQ_HEAD(name, type) \
+struct name { \
+ struct type *cqh_first; /* first element */ \
+ struct type *cqh_last; /* last element */ \
+}
+
+#define CIRCLEQ_HEAD_INITIALIZER(head) \
+ { (void *)&(head), (void *)&(head) }
+
+#define CIRCLEQ_ENTRY(type) \
+struct { \
+ struct type *cqe_next; /* next element */ \
+ struct type *cqe_prev; /* previous element */ \
+}
+
+/*
+ * Circular queue functions.
+ */
+#define CIRCLEQ_EMPTY(head) ((head)->cqh_first == (void *)(head))
+
+#define CIRCLEQ_FIRST(head) ((head)->cqh_first)
+
+#define CIRCLEQ_FOREACH(var, head, field) \
+ for ((var) = CIRCLEQ_FIRST((head)); \
+ (var) != (void *)(head); \
+ (var) = CIRCLEQ_NEXT((var), field))
+
+#define CIRCLEQ_FOREACH_REVERSE(var, head, field) \
+ for ((var) = CIRCLEQ_LAST((head)); \
+ (var) != (void *)(head); \
+ (var) = CIRCLEQ_PREV((var), field))
+
+#define CIRCLEQ_INIT(head) do { \
+ CIRCLEQ_FIRST((head)) = (void *)(head); \
+ CIRCLEQ_LAST((head)) = (void *)(head); \
+} while (0)
+
+#define CIRCLEQ_INSERT_AFTER(head, listelm, elm, field) do { \
+ CIRCLEQ_NEXT((elm), field) = CIRCLEQ_NEXT((listelm), field); \
+ CIRCLEQ_PREV((elm), field) = (listelm); \
+ if (CIRCLEQ_NEXT((listelm), field) == (void *)(head)) \
+ CIRCLEQ_LAST((head)) = (elm); \
+ else \
+ CIRCLEQ_PREV(CIRCLEQ_NEXT((listelm), field), field) = (elm);\
+ CIRCLEQ_NEXT((listelm), field) = (elm); \
+} while (0)
+
+#define CIRCLEQ_INSERT_BEFORE(head, listelm, elm, field) do { \
+ CIRCLEQ_NEXT((elm), field) = (listelm); \
+ CIRCLEQ_PREV((elm), field) = CIRCLEQ_PREV((listelm), field); \
+ if (CIRCLEQ_PREV((listelm), field) == (void *)(head)) \
+ CIRCLEQ_FIRST((head)) = (elm); \
+ else \
+ CIRCLEQ_NEXT(CIRCLEQ_PREV((listelm), field), field) = (elm);\
+ CIRCLEQ_PREV((listelm), field) = (elm); \
+} while (0)
+
+#define CIRCLEQ_INSERT_HEAD(head, elm, field) do { \
+ CIRCLEQ_NEXT((elm), field) = CIRCLEQ_FIRST((head)); \
+ CIRCLEQ_PREV((elm), field) = (void *)(head); \
+ if (CIRCLEQ_LAST((head)) == (void *)(head)) \
+ CIRCLEQ_LAST((head)) = (elm); \
+ else \
+ CIRCLEQ_PREV(CIRCLEQ_FIRST((head)), field) = (elm); \
+ CIRCLEQ_FIRST((head)) = (elm); \
+} while (0)
+
+#define CIRCLEQ_INSERT_TAIL(head, elm, field) do { \
+ CIRCLEQ_NEXT((elm), field) = (void *)(head); \
+ CIRCLEQ_PREV((elm), field) = CIRCLEQ_LAST((head)); \
+ if (CIRCLEQ_FIRST((head)) == (void *)(head)) \
+ CIRCLEQ_FIRST((head)) = (elm); \
+ else \
+ CIRCLEQ_NEXT(CIRCLEQ_LAST((head)), field) = (elm); \
+ CIRCLEQ_LAST((head)) = (elm); \
+} while (0)
+
+#define CIRCLEQ_LAST(head) ((head)->cqh_last)
+
+#define CIRCLEQ_NEXT(elm,field) ((elm)->field.cqe_next)
+
+#define CIRCLEQ_PREV(elm,field) ((elm)->field.cqe_prev)
+
+#define CIRCLEQ_REMOVE(head, elm, field) do { \
+ if (CIRCLEQ_NEXT((elm), field) == (void *)(head)) \
+ CIRCLEQ_LAST((head)) = CIRCLEQ_PREV((elm), field); \
+ else \
+ CIRCLEQ_PREV(CIRCLEQ_NEXT((elm), field), field) = \
+ CIRCLEQ_PREV((elm), field); \
+ if (CIRCLEQ_PREV((elm), field) == (void *)(head)) \
+ CIRCLEQ_FIRST((head)) = CIRCLEQ_NEXT((elm), field); \
+ else \
+ CIRCLEQ_NEXT(CIRCLEQ_PREV((elm), field), field) = \
+ CIRCLEQ_NEXT((elm), field); \
+} while (0)
+
+#endif /* !_SYS_QUEUE_H_ */
diff --git a/drivers/scsi/aic7xxx/scsi_iu.h b/drivers/scsi/aic7xxx/scsi_iu.h
new file mode 100644
index 000000000..0eafd3c17
--- /dev/null
+++ b/drivers/scsi/aic7xxx/scsi_iu.h
@@ -0,0 +1,39 @@
+/*
+ * This file is in the public domain.
+ */
+#ifndef _SCSI_SCSI_IU_H
+#define _SCSI_SCSI_IU_H 1
+
+struct scsi_status_iu_header
+{
+ u_int8_t reserved[2];
+ u_int8_t flags;
+#define SIU_SNSVALID 0x2
+#define SIU_RSPVALID 0x1
+ u_int8_t status;
+ u_int8_t sense_length[4];
+ u_int8_t pkt_failures_length[4];
+ u_int8_t pkt_failures[1];
+};
+
+#define SIU_PKTFAIL_OFFSET(siu) 12
+#define SIU_PKTFAIL_CODE(siu) (scsi_4btoul((siu)->pkt_failures) & 0xFF)
+#define SIU_PFC_NONE 0
+#define SIU_PFC_CIU_FIELDS_INVALID 2
+#define SIU_PFC_TMF_NOT_SUPPORTED 4
+#define SIU_PFC_TMF_FAILED 5
+#define SIU_PFC_INVALID_TYPE_CODE 6
+#define SIU_PFC_ILLEGAL_REQUEST 7
+#define SIU_SENSE_OFFSET(siu) \
+ (12 + (((siu)->flags & SIU_RSPVALID) \
+ ? scsi_4btoul((siu)->pkt_failures_length) \
+ : 0))
+
+#define SIU_TASKMGMT_NONE 0x00
+#define SIU_TASKMGMT_ABORT_TASK 0x01
+#define SIU_TASKMGMT_ABORT_TASK_SET 0x02
+#define SIU_TASKMGMT_CLEAR_TASK_SET 0x04
+#define SIU_TASKMGMT_LUN_RESET 0x08
+#define SIU_TASKMGMT_TARGET_RESET 0x20
+#define SIU_TASKMGMT_CLEAR_ACA 0x40
+#endif /*_SCSI_SCSI_IU_H*/
diff --git a/drivers/scsi/aic7xxx/scsi_message.h b/drivers/scsi/aic7xxx/scsi_message.h
new file mode 100644
index 000000000..75811e245
--- /dev/null
+++ b/drivers/scsi/aic7xxx/scsi_message.h
@@ -0,0 +1,70 @@
+/*
+ * This file is in the public domain.
+ * $FreeBSD: src/sys/cam/scsi/scsi_message.h,v 1.2 2000/05/01 20:21:29 peter Exp $
+ */
+
+/* Messages (1 byte) */ /* I/T (M)andatory or (O)ptional */
+#define MSG_CMDCOMPLETE 0x00 /* M/M */
+#define MSG_TASK_COMPLETE 0x00 /* M/M */ /* SPI3 Terminology */
+#define MSG_EXTENDED 0x01 /* O/O */
+#define MSG_SAVEDATAPOINTER 0x02 /* O/O */
+#define MSG_RESTOREPOINTERS 0x03 /* O/O */
+#define MSG_DISCONNECT 0x04 /* O/O */
+#define MSG_INITIATOR_DET_ERR 0x05 /* M/M */
+#define MSG_ABORT 0x06 /* O/M */
+#define MSG_ABORT_TASK_SET 0x06 /* O/M */ /* SPI3 Terminology */
+#define MSG_MESSAGE_REJECT 0x07 /* M/M */
+#define MSG_NOOP 0x08 /* M/M */
+#define MSG_PARITY_ERROR 0x09 /* M/M */
+#define MSG_LINK_CMD_COMPLETE 0x0a /* O/O */
+#define MSG_LINK_CMD_COMPLETEF 0x0b /* O/O */
+#define MSG_BUS_DEV_RESET 0x0c /* O/M */
+#define MSG_TARGET_RESET 0x0c /* O/M */ /* SPI3 Terminology */
+#define MSG_ABORT_TAG 0x0d /* O/O */
+#define MSG_ABORT_TASK 0x0d /* O/O */ /* SPI3 Terminology */
+#define MSG_CLEAR_QUEUE 0x0e /* O/O */
+#define MSG_CLEAR_TASK_SET 0x0e /* O/O */ /* SPI3 Terminology */
+#define MSG_INIT_RECOVERY 0x0f /* O/O */ /* Deprecated in SPI3 */
+#define MSG_REL_RECOVERY 0x10 /* O/O */ /* Deprecated in SPI3 */
+#define MSG_TERM_IO_PROC 0x11 /* O/O */ /* Deprecated in SPI3 */
+#define MSG_CLEAR_ACA 0x16 /* O/O */ /* SPI3 */
+#define MSG_LOGICAL_UNIT_RESET 0x17 /* O/O */ /* SPI3 */
+#define MSG_QAS_REQUEST 0x55 /* O/O */ /* SPI3 */
+
+/* Messages (2 byte) */
+#define MSG_SIMPLE_Q_TAG 0x20 /* O/O */
+#define MSG_SIMPLE_TASK 0x20 /* O/O */ /* SPI3 Terminology */
+#define MSG_HEAD_OF_Q_TAG 0x21 /* O/O */
+#define MSG_HEAD_OF_QUEUE_TASK 0x21 /* O/O */ /* SPI3 Terminology */
+#define MSG_ORDERED_Q_TAG 0x22 /* O/O */
+#define MSG_ORDERED_TASK 0x22 /* O/O */ /* SPI3 Terminology */
+#define MSG_IGN_WIDE_RESIDUE 0x23 /* O/O */
+#define MSG_ACA_TASK 0x24 /* 0/0 */ /* SPI3 */
+
+/* Identify message */ /* M/M */
+#define MSG_IDENTIFYFLAG 0x80
+#define MSG_IDENTIFY_DISCFLAG 0x40
+#define MSG_IDENTIFY(lun, disc) (((disc) ? 0xc0 : MSG_IDENTIFYFLAG) | (lun))
+#define MSG_ISIDENTIFY(m) ((m) & MSG_IDENTIFYFLAG)
+#define MSG_IDENTIFY_LUNMASK 0x3F
+
+/* Extended messages (opcode and length) */
+#define MSG_EXT_SDTR 0x01
+#define MSG_EXT_SDTR_LEN 0x03
+
+#define MSG_EXT_WDTR 0x03
+#define MSG_EXT_WDTR_LEN 0x02
+#define MSG_EXT_WDTR_BUS_8_BIT 0x00
+#define MSG_EXT_WDTR_BUS_16_BIT 0x01
+#define MSG_EXT_WDTR_BUS_32_BIT 0x02 /* Deprecated in SPI3 */
+
+#define MSG_EXT_PPR 0x04 /* SPI3 */
+#define MSG_EXT_PPR_LEN 0x06
+#define MSG_EXT_PPR_PCOMP_EN 0x80
+#define MSG_EXT_PPR_RTI 0x40
+#define MSG_EXT_PPR_RD_STRM 0x20
+#define MSG_EXT_PPR_WR_FLOW 0x10
+#define MSG_EXT_PPR_HOLD_MCS 0x08
+#define MSG_EXT_PPR_QAS_REQ 0x04
+#define MSG_EXT_PPR_DT_REQ 0x02
+#define MSG_EXT_PPR_IU_REQ 0x01
diff --git a/drivers/scsi/aic94xx/Kconfig b/drivers/scsi/aic94xx/Kconfig
new file mode 100644
index 000000000..c83fe751d
--- /dev/null
+++ b/drivers/scsi/aic94xx/Kconfig
@@ -0,0 +1,42 @@
+#
+# Kernel configuration file for aic94xx SAS/SATA driver.
+#
+# Copyright (c) 2005 Adaptec, Inc. All rights reserved.
+# Copyright (c) 2005 Luben Tuikov <luben_tuikov@adaptec.com>
+#
+# This file is licensed under GPLv2.
+#
+# This file is part of the aic94xx driver.
+#
+# The aic94xx driver is free software; you can redistribute it and/or
+# modify it under the terms of the GNU General Public License as
+# published by the Free Software Foundation; version 2 of the
+# License.
+#
+# The aic94xx driver is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+# General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Aic94xx Driver; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
+#
+#
+
+config SCSI_AIC94XX
+ tristate "Adaptec AIC94xx SAS/SATA support"
+ depends on PCI
+ select SCSI_SAS_LIBSAS
+ select FW_LOADER
+ help
+ This driver supports Adaptec's SAS/SATA 3Gb/s 64 bit PCI-X
+ AIC94xx chip based host adapters.
+
+config AIC94XX_DEBUG
+ bool "Compile in debug mode"
+ default y
+ depends on SCSI_AIC94XX
+ help
+ Compiles the aic94xx driver in debug mode. In debug mode,
+ the driver prints some messages to the console.
diff --git a/drivers/scsi/aic94xx/Makefile b/drivers/scsi/aic94xx/Makefile
new file mode 100644
index 000000000..c0a15c754
--- /dev/null
+++ b/drivers/scsi/aic94xx/Makefile
@@ -0,0 +1,37 @@
+#
+# Makefile for Adaptec aic94xx SAS/SATA driver.
+#
+# Copyright (C) 2005 Adaptec, Inc. All rights reserved.
+# Copyright (C) 2005 Luben Tuikov <luben_tuikov@adaptec.com>
+#
+# This file is licensed under GPLv2.
+#
+# This file is part of the aic94xx driver.
+#
+# The aic94xx driver is free software; you can redistribute it and/or
+# modify it under the terms of the GNU General Public License as
+# published by the Free Software Foundation; version 2 of the
+# License.
+#
+# The aic94xx driver is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+# General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with the aic94xx driver; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
+
+ccflags-$(CONFIG_AIC94XX_DEBUG) := -DASD_DEBUG -DASD_ENTER_EXIT
+
+obj-$(CONFIG_SCSI_AIC94XX) += aic94xx.o
+aic94xx-y += aic94xx_init.o \
+ aic94xx_hwi.o \
+ aic94xx_reg.o \
+ aic94xx_sds.o \
+ aic94xx_seq.o \
+ aic94xx_dump.o \
+ aic94xx_scb.o \
+ aic94xx_dev.o \
+ aic94xx_tmf.o \
+ aic94xx_task.o
diff --git a/drivers/scsi/aic94xx/aic94xx.h b/drivers/scsi/aic94xx/aic94xx.h
new file mode 100644
index 000000000..26d4ad9ed
--- /dev/null
+++ b/drivers/scsi/aic94xx/aic94xx.h
@@ -0,0 +1,101 @@
+/*
+ * Aic94xx SAS/SATA driver header file.
+ *
+ * Copyright (C) 2005 Adaptec, Inc. All rights reserved.
+ * Copyright (C) 2005 Luben Tuikov <luben_tuikov@adaptec.com>
+ *
+ * This file is licensed under GPLv2.
+ *
+ * This file is part of the aic94xx driver.
+ *
+ * The aic94xx driver is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; version 2 of the
+ * License.
+ *
+ * The aic94xx driver is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with the aic94xx driver; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
+ *
+ * $Id: //depot/aic94xx/aic94xx.h#31 $
+ */
+
+#ifndef _AIC94XX_H_
+#define _AIC94XX_H_
+
+#include <linux/slab.h>
+#include <linux/ctype.h>
+#include <scsi/libsas.h>
+
+#define ASD_DRIVER_NAME "aic94xx"
+#define ASD_DRIVER_DESCRIPTION "Adaptec aic94xx SAS/SATA driver"
+
+#define asd_printk(fmt, ...) printk(KERN_NOTICE ASD_DRIVER_NAME ": " fmt, ## __VA_ARGS__)
+
+#ifdef ASD_ENTER_EXIT
+#define ENTER printk(KERN_NOTICE "%s: ENTER %s\n", ASD_DRIVER_NAME, \
+ __func__)
+#define EXIT printk(KERN_NOTICE "%s: --EXIT %s\n", ASD_DRIVER_NAME, \
+ __func__)
+#else
+#define ENTER
+#define EXIT
+#endif
+
+#ifdef ASD_DEBUG
+#define ASD_DPRINTK asd_printk
+#else
+#define ASD_DPRINTK(fmt, ...)
+#endif
+
+/* 2*ITNL timeout + 1 second */
+#define AIC94XX_SCB_TIMEOUT (5*HZ)
+
+extern struct kmem_cache *asd_dma_token_cache;
+extern struct kmem_cache *asd_ascb_cache;
+
+static inline void asd_stringify_sas_addr(char *p, const u8 *sas_addr)
+{
+ int i;
+ for (i = 0; i < SAS_ADDR_SIZE; i++, p += 2)
+ snprintf(p, 3, "%02X", sas_addr[i]);
+ *p = '\0';
+}
+
+struct asd_ha_struct;
+struct asd_ascb;
+
+int asd_read_ocm(struct asd_ha_struct *asd_ha);
+int asd_read_flash(struct asd_ha_struct *asd_ha);
+
+int asd_dev_found(struct domain_device *dev);
+void asd_dev_gone(struct domain_device *dev);
+
+void asd_invalidate_edb(struct asd_ascb *ascb, int edb_id);
+
+int asd_execute_task(struct sas_task *task, gfp_t gfp_flags);
+
+void asd_set_dmamode(struct domain_device *dev);
+
+/* ---------- TMFs ---------- */
+int asd_abort_task(struct sas_task *);
+int asd_abort_task_set(struct domain_device *, u8 *lun);
+int asd_clear_aca(struct domain_device *, u8 *lun);
+int asd_clear_task_set(struct domain_device *, u8 *lun);
+int asd_lu_reset(struct domain_device *, u8 *lun);
+int asd_I_T_nexus_reset(struct domain_device *dev);
+int asd_query_task(struct sas_task *);
+
+/* ---------- Adapter and Port management ---------- */
+int asd_clear_nexus_port(struct asd_sas_port *port);
+int asd_clear_nexus_ha(struct sas_ha_struct *sas_ha);
+
+/* ---------- Phy Management ---------- */
+int asd_control_phy(struct asd_sas_phy *phy, enum phy_func func, void *arg);
+
+#endif
diff --git a/drivers/scsi/aic94xx/aic94xx_dev.c b/drivers/scsi/aic94xx/aic94xx_dev.c
new file mode 100644
index 000000000..33072388e
--- /dev/null
+++ b/drivers/scsi/aic94xx/aic94xx_dev.c
@@ -0,0 +1,363 @@
+/*
+ * Aic94xx SAS/SATA DDB management
+ *
+ * Copyright (C) 2005 Adaptec, Inc. All rights reserved.
+ * Copyright (C) 2005 Luben Tuikov <luben_tuikov@adaptec.com>
+ *
+ * This file is licensed under GPLv2.
+ *
+ * This file is part of the aic94xx driver.
+ *
+ * The aic94xx driver is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; version 2 of the
+ * License.
+ *
+ * The aic94xx driver is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with the aic94xx driver; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
+ *
+ * $Id: //depot/aic94xx/aic94xx_dev.c#21 $
+ */
+
+#include "aic94xx.h"
+#include "aic94xx_hwi.h"
+#include "aic94xx_reg.h"
+#include "aic94xx_sas.h"
+
+#define FIND_FREE_DDB(_ha) find_first_zero_bit((_ha)->hw_prof.ddb_bitmap, \
+ (_ha)->hw_prof.max_ddbs)
+#define SET_DDB(_ddb, _ha) set_bit(_ddb, (_ha)->hw_prof.ddb_bitmap)
+#define CLEAR_DDB(_ddb, _ha) clear_bit(_ddb, (_ha)->hw_prof.ddb_bitmap)
+
+static int asd_get_ddb(struct asd_ha_struct *asd_ha)
+{
+ int ddb, i;
+
+ ddb = FIND_FREE_DDB(asd_ha);
+ if (ddb >= asd_ha->hw_prof.max_ddbs) {
+ ddb = -ENOMEM;
+ goto out;
+ }
+ SET_DDB(ddb, asd_ha);
+
+ for (i = 0; i < sizeof(struct asd_ddb_ssp_smp_target_port); i+= 4)
+ asd_ddbsite_write_dword(asd_ha, ddb, i, 0);
+out:
+ return ddb;
+}
+
+#define INIT_CONN_TAG offsetof(struct asd_ddb_ssp_smp_target_port, init_conn_tag)
+#define DEST_SAS_ADDR offsetof(struct asd_ddb_ssp_smp_target_port, dest_sas_addr)
+#define SEND_QUEUE_HEAD offsetof(struct asd_ddb_ssp_smp_target_port, send_queue_head)
+#define DDB_TYPE offsetof(struct asd_ddb_ssp_smp_target_port, ddb_type)
+#define CONN_MASK offsetof(struct asd_ddb_ssp_smp_target_port, conn_mask)
+#define DDB_TARG_FLAGS offsetof(struct asd_ddb_ssp_smp_target_port, flags)
+#define DDB_TARG_FLAGS2 offsetof(struct asd_ddb_stp_sata_target_port, flags2)
+#define EXEC_QUEUE_TAIL offsetof(struct asd_ddb_ssp_smp_target_port, exec_queue_tail)
+#define SEND_QUEUE_TAIL offsetof(struct asd_ddb_ssp_smp_target_port, send_queue_tail)
+#define SISTER_DDB offsetof(struct asd_ddb_ssp_smp_target_port, sister_ddb)
+#define MAX_CCONN offsetof(struct asd_ddb_ssp_smp_target_port, max_concurrent_conn)
+#define NUM_CTX offsetof(struct asd_ddb_ssp_smp_target_port, num_contexts)
+#define ATA_CMD_SCBPTR offsetof(struct asd_ddb_stp_sata_target_port, ata_cmd_scbptr)
+#define SATA_TAG_ALLOC_MASK offsetof(struct asd_ddb_stp_sata_target_port, sata_tag_alloc_mask)
+#define NUM_SATA_TAGS offsetof(struct asd_ddb_stp_sata_target_port, num_sata_tags)
+#define SATA_STATUS offsetof(struct asd_ddb_stp_sata_target_port, sata_status)
+#define NCQ_DATA_SCB_PTR offsetof(struct asd_ddb_stp_sata_target_port, ncq_data_scb_ptr)
+#define ITNL_TIMEOUT offsetof(struct asd_ddb_ssp_smp_target_port, itnl_timeout)
+
+static void asd_free_ddb(struct asd_ha_struct *asd_ha, int ddb)
+{
+ if (!ddb || ddb >= 0xFFFF)
+ return;
+ asd_ddbsite_write_byte(asd_ha, ddb, DDB_TYPE, DDB_TYPE_UNUSED);
+ CLEAR_DDB(ddb, asd_ha);
+}
+
+static void asd_set_ddb_type(struct domain_device *dev)
+{
+ struct asd_ha_struct *asd_ha = dev->port->ha->lldd_ha;
+ int ddb = (int) (unsigned long) dev->lldd_dev;
+
+ if (dev->dev_type == SAS_SATA_PM_PORT)
+ asd_ddbsite_write_byte(asd_ha,ddb, DDB_TYPE, DDB_TYPE_PM_PORT);
+ else if (dev->tproto)
+ asd_ddbsite_write_byte(asd_ha,ddb, DDB_TYPE, DDB_TYPE_TARGET);
+ else
+ asd_ddbsite_write_byte(asd_ha,ddb,DDB_TYPE,DDB_TYPE_INITIATOR);
+}
+
+static int asd_init_sata_tag_ddb(struct domain_device *dev)
+{
+ struct asd_ha_struct *asd_ha = dev->port->ha->lldd_ha;
+ int ddb, i;
+
+ ddb = asd_get_ddb(asd_ha);
+ if (ddb < 0)
+ return ddb;
+
+ for (i = 0; i < sizeof(struct asd_ddb_sata_tag); i += 2)
+ asd_ddbsite_write_word(asd_ha, ddb, i, 0xFFFF);
+
+ asd_ddbsite_write_word(asd_ha, (int) (unsigned long) dev->lldd_dev,
+ SISTER_DDB, ddb);
+ return 0;
+}
+
+void asd_set_dmamode(struct domain_device *dev)
+{
+ struct asd_ha_struct *asd_ha = dev->port->ha->lldd_ha;
+ struct ata_device *ata_dev = sas_to_ata_dev(dev);
+ int ddb = (int) (unsigned long) dev->lldd_dev;
+ u32 qdepth = 0;
+
+ if (dev->dev_type == SAS_SATA_DEV || dev->dev_type == SAS_SATA_PM_PORT) {
+ if (ata_id_has_ncq(ata_dev->id))
+ qdepth = ata_id_queue_depth(ata_dev->id);
+ asd_ddbsite_write_dword(asd_ha, ddb, SATA_TAG_ALLOC_MASK,
+ (1ULL<<qdepth)-1);
+ asd_ddbsite_write_byte(asd_ha, ddb, NUM_SATA_TAGS, qdepth);
+ }
+
+ if (qdepth > 0)
+ if (asd_init_sata_tag_ddb(dev) != 0) {
+ unsigned long flags;
+
+ spin_lock_irqsave(dev->sata_dev.ap->lock, flags);
+ ata_dev->flags |= ATA_DFLAG_NCQ_OFF;
+ spin_unlock_irqrestore(dev->sata_dev.ap->lock, flags);
+ }
+}
+
+static int asd_init_sata(struct domain_device *dev)
+{
+ struct asd_ha_struct *asd_ha = dev->port->ha->lldd_ha;
+ int ddb = (int) (unsigned long) dev->lldd_dev;
+
+ asd_ddbsite_write_word(asd_ha, ddb, ATA_CMD_SCBPTR, 0xFFFF);
+ if (dev->dev_type == SAS_SATA_DEV || dev->dev_type == SAS_SATA_PM ||
+ dev->dev_type == SAS_SATA_PM_PORT) {
+ struct dev_to_host_fis *fis = (struct dev_to_host_fis *)
+ dev->frame_rcvd;
+ asd_ddbsite_write_byte(asd_ha, ddb, SATA_STATUS, fis->status);
+ }
+ asd_ddbsite_write_word(asd_ha, ddb, NCQ_DATA_SCB_PTR, 0xFFFF);
+
+ return 0;
+}
+
+static int asd_init_target_ddb(struct domain_device *dev)
+{
+ int ddb, i;
+ struct asd_ha_struct *asd_ha = dev->port->ha->lldd_ha;
+ u8 flags = 0;
+
+ ddb = asd_get_ddb(asd_ha);
+ if (ddb < 0)
+ return ddb;
+
+ dev->lldd_dev = (void *) (unsigned long) ddb;
+
+ asd_ddbsite_write_byte(asd_ha, ddb, 0, DDB_TP_CONN_TYPE);
+ asd_ddbsite_write_byte(asd_ha, ddb, 1, 0);
+ asd_ddbsite_write_word(asd_ha, ddb, INIT_CONN_TAG, 0xFFFF);
+ for (i = 0; i < SAS_ADDR_SIZE; i++)
+ asd_ddbsite_write_byte(asd_ha, ddb, DEST_SAS_ADDR+i,
+ dev->sas_addr[i]);
+ asd_ddbsite_write_word(asd_ha, ddb, SEND_QUEUE_HEAD, 0xFFFF);
+ asd_set_ddb_type(dev);
+ asd_ddbsite_write_byte(asd_ha, ddb, CONN_MASK, dev->port->phy_mask);
+ if (dev->port->oob_mode != SATA_OOB_MODE) {
+ flags |= OPEN_REQUIRED;
+ if ((dev->dev_type == SAS_SATA_DEV) ||
+ (dev->tproto & SAS_PROTOCOL_STP)) {
+ struct smp_resp *rps_resp = &dev->sata_dev.rps_resp;
+ if (rps_resp->frame_type == SMP_RESPONSE &&
+ rps_resp->function == SMP_REPORT_PHY_SATA &&
+ rps_resp->result == SMP_RESP_FUNC_ACC) {
+ if (rps_resp->rps.affil_valid)
+ flags |= STP_AFFIL_POL;
+ if (rps_resp->rps.affil_supp)
+ flags |= SUPPORTS_AFFIL;
+ }
+ } else {
+ flags |= CONCURRENT_CONN_SUPP;
+ if (!dev->parent &&
+ (dev->dev_type == SAS_EDGE_EXPANDER_DEVICE ||
+ dev->dev_type == SAS_FANOUT_EXPANDER_DEVICE))
+ asd_ddbsite_write_byte(asd_ha, ddb, MAX_CCONN,
+ 4);
+ else
+ asd_ddbsite_write_byte(asd_ha, ddb, MAX_CCONN,
+ dev->pathways);
+ asd_ddbsite_write_byte(asd_ha, ddb, NUM_CTX, 1);
+ }
+ }
+ if (dev->dev_type == SAS_SATA_PM)
+ flags |= SATA_MULTIPORT;
+ asd_ddbsite_write_byte(asd_ha, ddb, DDB_TARG_FLAGS, flags);
+
+ flags = 0;
+ if (dev->tproto & SAS_PROTOCOL_STP)
+ flags |= STP_CL_POL_NO_TX;
+ asd_ddbsite_write_byte(asd_ha, ddb, DDB_TARG_FLAGS2, flags);
+
+ asd_ddbsite_write_word(asd_ha, ddb, EXEC_QUEUE_TAIL, 0xFFFF);
+ asd_ddbsite_write_word(asd_ha, ddb, SEND_QUEUE_TAIL, 0xFFFF);
+ asd_ddbsite_write_word(asd_ha, ddb, SISTER_DDB, 0xFFFF);
+
+ if (dev->dev_type == SAS_SATA_DEV || (dev->tproto & SAS_PROTOCOL_STP)) {
+ i = asd_init_sata(dev);
+ if (i < 0) {
+ asd_free_ddb(asd_ha, ddb);
+ return i;
+ }
+ }
+
+ if (dev->dev_type == SAS_END_DEVICE) {
+ struct sas_end_device *rdev = rphy_to_end_device(dev->rphy);
+ if (rdev->I_T_nexus_loss_timeout > 0)
+ asd_ddbsite_write_word(asd_ha, ddb, ITNL_TIMEOUT,
+ min(rdev->I_T_nexus_loss_timeout,
+ (u16)ITNL_TIMEOUT_CONST));
+ else
+ asd_ddbsite_write_word(asd_ha, ddb, ITNL_TIMEOUT,
+ (u16)ITNL_TIMEOUT_CONST);
+ }
+ return 0;
+}
+
+static int asd_init_sata_pm_table_ddb(struct domain_device *dev)
+{
+ struct asd_ha_struct *asd_ha = dev->port->ha->lldd_ha;
+ int ddb, i;
+
+ ddb = asd_get_ddb(asd_ha);
+ if (ddb < 0)
+ return ddb;
+
+ for (i = 0; i < 32; i += 2)
+ asd_ddbsite_write_word(asd_ha, ddb, i, 0xFFFF);
+
+ asd_ddbsite_write_word(asd_ha, (int) (unsigned long) dev->lldd_dev,
+ SISTER_DDB, ddb);
+
+ return 0;
+}
+
+#define PM_PORT_FLAGS offsetof(struct asd_ddb_sata_pm_port, pm_port_flags)
+#define PARENT_DDB offsetof(struct asd_ddb_sata_pm_port, parent_ddb)
+
+/**
+ * asd_init_sata_pm_port_ddb -- SATA Port Multiplier Port
+ * dev: pointer to domain device
+ *
+ * For SATA Port Multiplier Ports we need to allocate one SATA Port
+ * Multiplier Port DDB and depending on whether the target on it
+ * supports SATA II NCQ, one SATA Tag DDB.
+ */
+static int asd_init_sata_pm_port_ddb(struct domain_device *dev)
+{
+ int ddb, i, parent_ddb, pmtable_ddb;
+ struct asd_ha_struct *asd_ha = dev->port->ha->lldd_ha;
+ u8 flags;
+
+ ddb = asd_get_ddb(asd_ha);
+ if (ddb < 0)
+ return ddb;
+
+ asd_set_ddb_type(dev);
+ flags = (dev->sata_dev.port_no << 4) | PM_PORT_SET;
+ asd_ddbsite_write_byte(asd_ha, ddb, PM_PORT_FLAGS, flags);
+ asd_ddbsite_write_word(asd_ha, ddb, SISTER_DDB, 0xFFFF);
+ asd_ddbsite_write_word(asd_ha, ddb, ATA_CMD_SCBPTR, 0xFFFF);
+ asd_init_sata(dev);
+
+ parent_ddb = (int) (unsigned long) dev->parent->lldd_dev;
+ asd_ddbsite_write_word(asd_ha, ddb, PARENT_DDB, parent_ddb);
+ pmtable_ddb = asd_ddbsite_read_word(asd_ha, parent_ddb, SISTER_DDB);
+ asd_ddbsite_write_word(asd_ha, pmtable_ddb, dev->sata_dev.port_no,ddb);
+
+ if (asd_ddbsite_read_byte(asd_ha, ddb, NUM_SATA_TAGS) > 0) {
+ i = asd_init_sata_tag_ddb(dev);
+ if (i < 0) {
+ asd_free_ddb(asd_ha, ddb);
+ return i;
+ }
+ }
+ return 0;
+}
+
+static int asd_init_initiator_ddb(struct domain_device *dev)
+{
+ return -ENODEV;
+}
+
+/**
+ * asd_init_sata_pm_ddb -- SATA Port Multiplier
+ * dev: pointer to domain device
+ *
+ * For STP and direct-attached SATA Port Multipliers we need
+ * one target port DDB entry and one SATA PM table DDB entry.
+ */
+static int asd_init_sata_pm_ddb(struct domain_device *dev)
+{
+ int res = 0;
+
+ res = asd_init_target_ddb(dev);
+ if (res)
+ goto out;
+ res = asd_init_sata_pm_table_ddb(dev);
+ if (res)
+ asd_free_ddb(dev->port->ha->lldd_ha,
+ (int) (unsigned long) dev->lldd_dev);
+out:
+ return res;
+}
+
+int asd_dev_found(struct domain_device *dev)
+{
+ unsigned long flags;
+ int res = 0;
+ struct asd_ha_struct *asd_ha = dev->port->ha->lldd_ha;
+
+ spin_lock_irqsave(&asd_ha->hw_prof.ddb_lock, flags);
+ switch (dev->dev_type) {
+ case SAS_SATA_PM:
+ res = asd_init_sata_pm_ddb(dev);
+ break;
+ case SAS_SATA_PM_PORT:
+ res = asd_init_sata_pm_port_ddb(dev);
+ break;
+ default:
+ if (dev->tproto)
+ res = asd_init_target_ddb(dev);
+ else
+ res = asd_init_initiator_ddb(dev);
+ }
+ spin_unlock_irqrestore(&asd_ha->hw_prof.ddb_lock, flags);
+
+ return res;
+}
+
+void asd_dev_gone(struct domain_device *dev)
+{
+ int ddb, sister_ddb;
+ unsigned long flags;
+ struct asd_ha_struct *asd_ha = dev->port->ha->lldd_ha;
+
+ spin_lock_irqsave(&asd_ha->hw_prof.ddb_lock, flags);
+ ddb = (int) (unsigned long) dev->lldd_dev;
+ sister_ddb = asd_ddbsite_read_word(asd_ha, ddb, SISTER_DDB);
+
+ if (sister_ddb != 0xFFFF)
+ asd_free_ddb(asd_ha, sister_ddb);
+ asd_free_ddb(asd_ha, ddb);
+ dev->lldd_dev = NULL;
+ spin_unlock_irqrestore(&asd_ha->hw_prof.ddb_lock, flags);
+}
diff --git a/drivers/scsi/aic94xx/aic94xx_dump.c b/drivers/scsi/aic94xx/aic94xx_dump.c
new file mode 100644
index 000000000..a16a77c8b
--- /dev/null
+++ b/drivers/scsi/aic94xx/aic94xx_dump.c
@@ -0,0 +1,967 @@
+/*
+ * Aic94xx SAS/SATA driver dump interface.
+ *
+ * Copyright (C) 2004 Adaptec, Inc. All rights reserved.
+ * Copyright (C) 2004 David Chaw <david_chaw@adaptec.com>
+ * Copyright (C) 2005 Luben Tuikov <luben_tuikov@adaptec.com>
+ *
+ * This file is licensed under GPLv2.
+ *
+ * This file is part of the aic94xx driver.
+ *
+ * The aic94xx driver is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; version 2 of the
+ * License.
+ *
+ * The aic94xx driver is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with the aic94xx driver; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
+ *
+ * 2005/07/14/LT Complete overhaul of this file. Update pages, register
+ * locations, names, etc. Make use of macros. Print more information.
+ * Print all cseq and lseq mip and mdp.
+ *
+ */
+
+#include <linux/pci.h>
+#include "aic94xx.h"
+#include "aic94xx_reg.h"
+#include "aic94xx_reg_def.h"
+#include "aic94xx_sas.h"
+
+#include "aic94xx_dump.h"
+
+#ifdef ASD_DEBUG
+
+#define MD(x) (1 << (x))
+#define MODE_COMMON (1 << 31)
+#define MODE_0_7 (0xFF)
+
+static const struct lseq_cio_regs {
+ char *name;
+ u32 offs;
+ u8 width;
+ u32 mode;
+} LSEQmCIOREGS[] = {
+ {"LmMnSCBPTR", 0x20, 16, MD(0)|MD(1)|MD(2)|MD(3)|MD(4) },
+ {"LmMnDDBPTR", 0x22, 16, MD(0)|MD(1)|MD(2)|MD(3)|MD(4) },
+ {"LmREQMBX", 0x30, 32, MODE_COMMON },
+ {"LmRSPMBX", 0x34, 32, MODE_COMMON },
+ {"LmMnINT", 0x38, 32, MODE_0_7 },
+ {"LmMnINTEN", 0x3C, 32, MODE_0_7 },
+ {"LmXMTPRIMD", 0x40, 32, MODE_COMMON },
+ {"LmXMTPRIMCS", 0x44, 8, MODE_COMMON },
+ {"LmCONSTAT", 0x45, 8, MODE_COMMON },
+ {"LmMnDMAERRS", 0x46, 8, MD(0)|MD(1) },
+ {"LmMnSGDMAERRS", 0x47, 8, MD(0)|MD(1) },
+ {"LmMnEXPHDRP", 0x48, 8, MD(0) },
+ {"LmMnSASAALIGN", 0x48, 8, MD(1) },
+ {"LmMnMSKHDRP", 0x49, 8, MD(0) },
+ {"LmMnSTPALIGN", 0x49, 8, MD(1) },
+ {"LmMnRCVHDRP", 0x4A, 8, MD(0) },
+ {"LmMnXMTHDRP", 0x4A, 8, MD(1) },
+ {"LmALIGNMODE", 0x4B, 8, MD(1) },
+ {"LmMnEXPRCVCNT", 0x4C, 32, MD(0) },
+ {"LmMnXMTCNT", 0x4C, 32, MD(1) },
+ {"LmMnCURRTAG", 0x54, 16, MD(0) },
+ {"LmMnPREVTAG", 0x56, 16, MD(0) },
+ {"LmMnACKOFS", 0x58, 8, MD(1) },
+ {"LmMnXFRLVL", 0x59, 8, MD(0)|MD(1) },
+ {"LmMnSGDMACTL", 0x5A, 8, MD(0)|MD(1) },
+ {"LmMnSGDMASTAT", 0x5B, 8, MD(0)|MD(1) },
+ {"LmMnDDMACTL", 0x5C, 8, MD(0)|MD(1) },
+ {"LmMnDDMASTAT", 0x5D, 8, MD(0)|MD(1) },
+ {"LmMnDDMAMODE", 0x5E, 16, MD(0)|MD(1) },
+ {"LmMnPIPECTL", 0x61, 8, MD(0)|MD(1) },
+ {"LmMnACTSCB", 0x62, 16, MD(0)|MD(1) },
+ {"LmMnSGBHADR", 0x64, 8, MD(0)|MD(1) },
+ {"LmMnSGBADR", 0x65, 8, MD(0)|MD(1) },
+ {"LmMnSGDCNT", 0x66, 8, MD(0)|MD(1) },
+ {"LmMnSGDMADR", 0x68, 32, MD(0)|MD(1) },
+ {"LmMnSGDMADR", 0x6C, 32, MD(0)|MD(1) },
+ {"LmMnXFRCNT", 0x70, 32, MD(0)|MD(1) },
+ {"LmMnXMTCRC", 0x74, 32, MD(1) },
+ {"LmCURRTAG", 0x74, 16, MD(0) },
+ {"LmPREVTAG", 0x76, 16, MD(0) },
+ {"LmMnDPSEL", 0x7B, 8, MD(0)|MD(1) },
+ {"LmDPTHSTAT", 0x7C, 8, MODE_COMMON },
+ {"LmMnHOLDLVL", 0x7D, 8, MD(0) },
+ {"LmMnSATAFS", 0x7E, 8, MD(1) },
+ {"LmMnCMPLTSTAT", 0x7F, 8, MD(0)|MD(1) },
+ {"LmPRMSTAT0", 0x80, 32, MODE_COMMON },
+ {"LmPRMSTAT1", 0x84, 32, MODE_COMMON },
+ {"LmGPRMINT", 0x88, 8, MODE_COMMON },
+ {"LmMnCURRSCB", 0x8A, 16, MD(0) },
+ {"LmPRMICODE", 0x8C, 32, MODE_COMMON },
+ {"LmMnRCVCNT", 0x90, 16, MD(0) },
+ {"LmMnBUFSTAT", 0x92, 16, MD(0) },
+ {"LmMnXMTHDRSIZE",0x92, 8, MD(1) },
+ {"LmMnXMTSIZE", 0x93, 8, MD(1) },
+ {"LmMnTGTXFRCNT", 0x94, 32, MD(0) },
+ {"LmMnEXPROFS", 0x98, 32, MD(0) },
+ {"LmMnXMTROFS", 0x98, 32, MD(1) },
+ {"LmMnRCVROFS", 0x9C, 32, MD(0) },
+ {"LmCONCTL", 0xA0, 16, MODE_COMMON },
+ {"LmBITLTIMER", 0xA2, 16, MODE_COMMON },
+ {"LmWWNLOW", 0xA8, 32, MODE_COMMON },
+ {"LmWWNHIGH", 0xAC, 32, MODE_COMMON },
+ {"LmMnFRMERR", 0xB0, 32, MD(0) },
+ {"LmMnFRMERREN", 0xB4, 32, MD(0) },
+ {"LmAWTIMER", 0xB8, 16, MODE_COMMON },
+ {"LmAWTCTL", 0xBA, 8, MODE_COMMON },
+ {"LmMnHDRCMPS", 0xC0, 32, MD(0) },
+ {"LmMnXMTSTAT", 0xC4, 8, MD(1) },
+ {"LmHWTSTATEN", 0xC5, 8, MODE_COMMON },
+ {"LmMnRRDYRC", 0xC6, 8, MD(0) },
+ {"LmMnRRDYTC", 0xC6, 8, MD(1) },
+ {"LmHWTSTAT", 0xC7, 8, MODE_COMMON },
+ {"LmMnDATABUFADR",0xC8, 16, MD(0)|MD(1) },
+ {"LmDWSSTATUS", 0xCB, 8, MODE_COMMON },
+ {"LmMnACTSTAT", 0xCE, 16, MD(0)|MD(1) },
+ {"LmMnREQSCB", 0xD2, 16, MD(0)|MD(1) },
+ {"LmXXXPRIM", 0xD4, 32, MODE_COMMON },
+ {"LmRCVASTAT", 0xD9, 8, MODE_COMMON },
+ {"LmINTDIS1", 0xDA, 8, MODE_COMMON },
+ {"LmPSTORESEL", 0xDB, 8, MODE_COMMON },
+ {"LmPSTORE", 0xDC, 32, MODE_COMMON },
+ {"LmPRIMSTAT0EN", 0xE0, 32, MODE_COMMON },
+ {"LmPRIMSTAT1EN", 0xE4, 32, MODE_COMMON },
+ {"LmDONETCTL", 0xF2, 16, MODE_COMMON },
+ {NULL, 0, 0, 0 }
+};
+/*
+static struct lseq_cio_regs LSEQmOOBREGS[] = {
+ {"OOB_BFLTR" ,0x100, 8, MD(5)},
+ {"OOB_INIT_MIN" ,0x102,16, MD(5)},
+ {"OOB_INIT_MAX" ,0x104,16, MD(5)},
+ {"OOB_INIT_NEG" ,0x106,16, MD(5)},
+ {"OOB_SAS_MIN" ,0x108,16, MD(5)},
+ {"OOB_SAS_MAX" ,0x10A,16, MD(5)},
+ {"OOB_SAS_NEG" ,0x10C,16, MD(5)},
+ {"OOB_WAKE_MIN" ,0x10E,16, MD(5)},
+ {"OOB_WAKE_MAX" ,0x110,16, MD(5)},
+ {"OOB_WAKE_NEG" ,0x112,16, MD(5)},
+ {"OOB_IDLE_MAX" ,0x114,16, MD(5)},
+ {"OOB_BURST_MAX" ,0x116,16, MD(5)},
+ {"OOB_XMIT_BURST" ,0x118, 8, MD(5)},
+ {"OOB_SEND_PAIRS" ,0x119, 8, MD(5)},
+ {"OOB_INIT_IDLE" ,0x11A, 8, MD(5)},
+ {"OOB_INIT_NEGO" ,0x11C, 8, MD(5)},
+ {"OOB_SAS_IDLE" ,0x11E, 8, MD(5)},
+ {"OOB_SAS_NEGO" ,0x120, 8, MD(5)},
+ {"OOB_WAKE_IDLE" ,0x122, 8, MD(5)},
+ {"OOB_WAKE_NEGO" ,0x124, 8, MD(5)},
+ {"OOB_DATA_KBITS" ,0x126, 8, MD(5)},
+ {"OOB_BURST_DATA" ,0x128,32, MD(5)},
+ {"OOB_ALIGN_0_DATA" ,0x12C,32, MD(5)},
+ {"OOB_ALIGN_1_DATA" ,0x130,32, MD(5)},
+ {"OOB_SYNC_DATA" ,0x134,32, MD(5)},
+ {"OOB_D10_2_DATA" ,0x138,32, MD(5)},
+ {"OOB_PHY_RST_CNT" ,0x13C,32, MD(5)},
+ {"OOB_SIG_GEN" ,0x140, 8, MD(5)},
+ {"OOB_XMIT" ,0x141, 8, MD(5)},
+ {"FUNCTION_MAKS" ,0x142, 8, MD(5)},
+ {"OOB_MODE" ,0x143, 8, MD(5)},
+ {"CURRENT_STATUS" ,0x144, 8, MD(5)},
+ {"SPEED_MASK" ,0x145, 8, MD(5)},
+ {"PRIM_COUNT" ,0x146, 8, MD(5)},
+ {"OOB_SIGNALS" ,0x148, 8, MD(5)},
+ {"OOB_DATA_DET" ,0x149, 8, MD(5)},
+ {"OOB_TIME_OUT" ,0x14C, 8, MD(5)},
+ {"OOB_TIMER_ENABLE" ,0x14D, 8, MD(5)},
+ {"OOB_STATUS" ,0x14E, 8, MD(5)},
+ {"HOT_PLUG_DELAY" ,0x150, 8, MD(5)},
+ {"RCD_DELAY" ,0x151, 8, MD(5)},
+ {"COMSAS_TIMER" ,0x152, 8, MD(5)},
+ {"SNTT_DELAY" ,0x153, 8, MD(5)},
+ {"SPD_CHNG_DELAY" ,0x154, 8, MD(5)},
+ {"SNLT_DELAY" ,0x155, 8, MD(5)},
+ {"SNWT_DELAY" ,0x156, 8, MD(5)},
+ {"ALIGN_DELAY" ,0x157, 8, MD(5)},
+ {"INT_ENABLE_0" ,0x158, 8, MD(5)},
+ {"INT_ENABLE_1" ,0x159, 8, MD(5)},
+ {"INT_ENABLE_2" ,0x15A, 8, MD(5)},
+ {"INT_ENABLE_3" ,0x15B, 8, MD(5)},
+ {"OOB_TEST_REG" ,0x15C, 8, MD(5)},
+ {"PHY_CONTROL_0" ,0x160, 8, MD(5)},
+ {"PHY_CONTROL_1" ,0x161, 8, MD(5)},
+ {"PHY_CONTROL_2" ,0x162, 8, MD(5)},
+ {"PHY_CONTROL_3" ,0x163, 8, MD(5)},
+ {"PHY_OOB_CAL_TX" ,0x164, 8, MD(5)},
+ {"PHY_OOB_CAL_RX" ,0x165, 8, MD(5)},
+ {"OOB_PHY_CAL_TX" ,0x166, 8, MD(5)},
+ {"OOB_PHY_CAL_RX" ,0x167, 8, MD(5)},
+ {"PHY_CONTROL_4" ,0x168, 8, MD(5)},
+ {"PHY_TEST" ,0x169, 8, MD(5)},
+ {"PHY_PWR_CTL" ,0x16A, 8, MD(5)},
+ {"PHY_PWR_DELAY" ,0x16B, 8, MD(5)},
+ {"OOB_SM_CON" ,0x16C, 8, MD(5)},
+ {"ADDR_TRAP_1" ,0x16D, 8, MD(5)},
+ {"ADDR_NEXT_1" ,0x16E, 8, MD(5)},
+ {"NEXT_ST_1" ,0x16F, 8, MD(5)},
+ {"OOB_SM_STATE" ,0x170, 8, MD(5)},
+ {"ADDR_TRAP_2" ,0x171, 8, MD(5)},
+ {"ADDR_NEXT_2" ,0x172, 8, MD(5)},
+ {"NEXT_ST_2" ,0x173, 8, MD(5)},
+ {NULL, 0, 0, 0 }
+};
+*/
+#define STR_8BIT " %30s[0x%04x]:0x%02x\n"
+#define STR_16BIT " %30s[0x%04x]:0x%04x\n"
+#define STR_32BIT " %30s[0x%04x]:0x%08x\n"
+#define STR_64BIT " %30s[0x%04x]:0x%llx\n"
+
+#define PRINT_REG_8bit(_ha, _n, _r) asd_printk(STR_8BIT, #_n, _n, \
+ asd_read_reg_byte(_ha, _r))
+#define PRINT_REG_16bit(_ha, _n, _r) asd_printk(STR_16BIT, #_n, _n, \
+ asd_read_reg_word(_ha, _r))
+#define PRINT_REG_32bit(_ha, _n, _r) asd_printk(STR_32BIT, #_n, _n, \
+ asd_read_reg_dword(_ha, _r))
+
+#define PRINT_CREG_8bit(_ha, _n) asd_printk(STR_8BIT, #_n, _n, \
+ asd_read_reg_byte(_ha, C##_n))
+#define PRINT_CREG_16bit(_ha, _n) asd_printk(STR_16BIT, #_n, _n, \
+ asd_read_reg_word(_ha, C##_n))
+#define PRINT_CREG_32bit(_ha, _n) asd_printk(STR_32BIT, #_n, _n, \
+ asd_read_reg_dword(_ha, C##_n))
+
+#define MSTR_8BIT " Mode:%02d %30s[0x%04x]:0x%02x\n"
+#define MSTR_16BIT " Mode:%02d %30s[0x%04x]:0x%04x\n"
+#define MSTR_32BIT " Mode:%02d %30s[0x%04x]:0x%08x\n"
+
+#define PRINT_MREG_8bit(_ha, _m, _n, _r) asd_printk(MSTR_8BIT, _m, #_n, _n, \
+ asd_read_reg_byte(_ha, _r))
+#define PRINT_MREG_16bit(_ha, _m, _n, _r) asd_printk(MSTR_16BIT, _m, #_n, _n, \
+ asd_read_reg_word(_ha, _r))
+#define PRINT_MREG_32bit(_ha, _m, _n, _r) asd_printk(MSTR_32BIT, _m, #_n, _n, \
+ asd_read_reg_dword(_ha, _r))
+
+/* can also be used for MD when the register is mode aware already */
+#define PRINT_MIS_byte(_ha, _n) asd_printk(STR_8BIT, #_n,CSEQ_##_n-CMAPPEDSCR,\
+ asd_read_reg_byte(_ha, CSEQ_##_n))
+#define PRINT_MIS_word(_ha, _n) asd_printk(STR_16BIT,#_n,CSEQ_##_n-CMAPPEDSCR,\
+ asd_read_reg_word(_ha, CSEQ_##_n))
+#define PRINT_MIS_dword(_ha, _n) \
+ asd_printk(STR_32BIT,#_n,CSEQ_##_n-CMAPPEDSCR,\
+ asd_read_reg_dword(_ha, CSEQ_##_n))
+#define PRINT_MIS_qword(_ha, _n) \
+ asd_printk(STR_64BIT, #_n,CSEQ_##_n-CMAPPEDSCR, \
+ (unsigned long long)(((u64)asd_read_reg_dword(_ha, CSEQ_##_n)) \
+ | (((u64)asd_read_reg_dword(_ha, (CSEQ_##_n)+4))<<32)))
+
+#define CMDP_REG(_n, _m) (_m*(CSEQ_PAGE_SIZE*2)+CSEQ_##_n)
+#define PRINT_CMDP_word(_ha, _n) \
+asd_printk("%20s 0x%04x 0x%04x 0x%04x 0x%04x 0x%04x 0x%04x 0x%04x 0x%04x\n", \
+ #_n, \
+ asd_read_reg_word(_ha, CMDP_REG(_n, 0)), \
+ asd_read_reg_word(_ha, CMDP_REG(_n, 1)), \
+ asd_read_reg_word(_ha, CMDP_REG(_n, 2)), \
+ asd_read_reg_word(_ha, CMDP_REG(_n, 3)), \
+ asd_read_reg_word(_ha, CMDP_REG(_n, 4)), \
+ asd_read_reg_word(_ha, CMDP_REG(_n, 5)), \
+ asd_read_reg_word(_ha, CMDP_REG(_n, 6)), \
+ asd_read_reg_word(_ha, CMDP_REG(_n, 7)))
+
+#define PRINT_CMDP_byte(_ha, _n) \
+asd_printk("%20s 0x%04x 0x%04x 0x%04x 0x%04x 0x%04x 0x%04x 0x%04x 0x%04x\n", \
+ #_n, \
+ asd_read_reg_byte(_ha, CMDP_REG(_n, 0)), \
+ asd_read_reg_byte(_ha, CMDP_REG(_n, 1)), \
+ asd_read_reg_byte(_ha, CMDP_REG(_n, 2)), \
+ asd_read_reg_byte(_ha, CMDP_REG(_n, 3)), \
+ asd_read_reg_byte(_ha, CMDP_REG(_n, 4)), \
+ asd_read_reg_byte(_ha, CMDP_REG(_n, 5)), \
+ asd_read_reg_byte(_ha, CMDP_REG(_n, 6)), \
+ asd_read_reg_byte(_ha, CMDP_REG(_n, 7)))
+
+static void asd_dump_cseq_state(struct asd_ha_struct *asd_ha)
+{
+ int mode;
+
+ asd_printk("CSEQ STATE\n");
+
+ asd_printk("ARP2 REGISTERS\n");
+
+ PRINT_CREG_32bit(asd_ha, ARP2CTL);
+ PRINT_CREG_32bit(asd_ha, ARP2INT);
+ PRINT_CREG_32bit(asd_ha, ARP2INTEN);
+ PRINT_CREG_8bit(asd_ha, MODEPTR);
+ PRINT_CREG_8bit(asd_ha, ALTMODE);
+ PRINT_CREG_8bit(asd_ha, FLAG);
+ PRINT_CREG_8bit(asd_ha, ARP2INTCTL);
+ PRINT_CREG_16bit(asd_ha, STACK);
+ PRINT_CREG_16bit(asd_ha, PRGMCNT);
+ PRINT_CREG_16bit(asd_ha, ACCUM);
+ PRINT_CREG_16bit(asd_ha, SINDEX);
+ PRINT_CREG_16bit(asd_ha, DINDEX);
+ PRINT_CREG_8bit(asd_ha, SINDIR);
+ PRINT_CREG_8bit(asd_ha, DINDIR);
+ PRINT_CREG_8bit(asd_ha, JUMLDIR);
+ PRINT_CREG_8bit(asd_ha, ARP2HALTCODE);
+ PRINT_CREG_16bit(asd_ha, CURRADDR);
+ PRINT_CREG_16bit(asd_ha, LASTADDR);
+ PRINT_CREG_16bit(asd_ha, NXTLADDR);
+
+ asd_printk("IOP REGISTERS\n");
+
+ PRINT_REG_32bit(asd_ha, BISTCTL1, CBISTCTL);
+ PRINT_CREG_32bit(asd_ha, MAPPEDSCR);
+
+ asd_printk("CIO REGISTERS\n");
+
+ for (mode = 0; mode < 9; mode++)
+ PRINT_MREG_16bit(asd_ha, mode, MnSCBPTR, CMnSCBPTR(mode));
+ PRINT_MREG_16bit(asd_ha, 15, MnSCBPTR, CMnSCBPTR(15));
+
+ for (mode = 0; mode < 9; mode++)
+ PRINT_MREG_16bit(asd_ha, mode, MnDDBPTR, CMnDDBPTR(mode));
+ PRINT_MREG_16bit(asd_ha, 15, MnDDBPTR, CMnDDBPTR(15));
+
+ for (mode = 0; mode < 8; mode++)
+ PRINT_MREG_32bit(asd_ha, mode, MnREQMBX, CMnREQMBX(mode));
+ for (mode = 0; mode < 8; mode++)
+ PRINT_MREG_32bit(asd_ha, mode, MnRSPMBX, CMnRSPMBX(mode));
+ for (mode = 0; mode < 8; mode++)
+ PRINT_MREG_32bit(asd_ha, mode, MnINT, CMnINT(mode));
+ for (mode = 0; mode < 8; mode++)
+ PRINT_MREG_32bit(asd_ha, mode, MnINTEN, CMnINTEN(mode));
+
+ PRINT_CREG_8bit(asd_ha, SCRATCHPAGE);
+ for (mode = 0; mode < 8; mode++)
+ PRINT_MREG_8bit(asd_ha, mode, MnSCRATCHPAGE,
+ CMnSCRATCHPAGE(mode));
+
+ PRINT_REG_32bit(asd_ha, CLINKCON, CLINKCON);
+ PRINT_REG_8bit(asd_ha, CCONMSK, CCONMSK);
+ PRINT_REG_8bit(asd_ha, CCONEXIST, CCONEXIST);
+ PRINT_REG_16bit(asd_ha, CCONMODE, CCONMODE);
+ PRINT_REG_32bit(asd_ha, CTIMERCALC, CTIMERCALC);
+ PRINT_REG_8bit(asd_ha, CINTDIS, CINTDIS);
+
+ asd_printk("SCRATCH MEMORY\n");
+
+ asd_printk("MIP 4 >>>>>\n");
+ PRINT_MIS_word(asd_ha, Q_EXE_HEAD);
+ PRINT_MIS_word(asd_ha, Q_EXE_TAIL);
+ PRINT_MIS_word(asd_ha, Q_DONE_HEAD);
+ PRINT_MIS_word(asd_ha, Q_DONE_TAIL);
+ PRINT_MIS_word(asd_ha, Q_SEND_HEAD);
+ PRINT_MIS_word(asd_ha, Q_SEND_TAIL);
+ PRINT_MIS_word(asd_ha, Q_DMA2CHIM_HEAD);
+ PRINT_MIS_word(asd_ha, Q_DMA2CHIM_TAIL);
+ PRINT_MIS_word(asd_ha, Q_COPY_HEAD);
+ PRINT_MIS_word(asd_ha, Q_COPY_TAIL);
+ PRINT_MIS_word(asd_ha, REG0);
+ PRINT_MIS_word(asd_ha, REG1);
+ PRINT_MIS_dword(asd_ha, REG2);
+ PRINT_MIS_byte(asd_ha, LINK_CTL_Q_MAP);
+ PRINT_MIS_byte(asd_ha, MAX_CSEQ_MODE);
+ PRINT_MIS_byte(asd_ha, FREE_LIST_HACK_COUNT);
+
+ asd_printk("MIP 5 >>>>\n");
+ PRINT_MIS_qword(asd_ha, EST_NEXUS_REQ_QUEUE);
+ PRINT_MIS_qword(asd_ha, EST_NEXUS_REQ_COUNT);
+ PRINT_MIS_word(asd_ha, Q_EST_NEXUS_HEAD);
+ PRINT_MIS_word(asd_ha, Q_EST_NEXUS_TAIL);
+ PRINT_MIS_word(asd_ha, NEED_EST_NEXUS_SCB);
+ PRINT_MIS_byte(asd_ha, EST_NEXUS_REQ_HEAD);
+ PRINT_MIS_byte(asd_ha, EST_NEXUS_REQ_TAIL);
+ PRINT_MIS_byte(asd_ha, EST_NEXUS_SCB_OFFSET);
+
+ asd_printk("MIP 6 >>>>\n");
+ PRINT_MIS_word(asd_ha, INT_ROUT_RET_ADDR0);
+ PRINT_MIS_word(asd_ha, INT_ROUT_RET_ADDR1);
+ PRINT_MIS_word(asd_ha, INT_ROUT_SCBPTR);
+ PRINT_MIS_byte(asd_ha, INT_ROUT_MODE);
+ PRINT_MIS_byte(asd_ha, ISR_SCRATCH_FLAGS);
+ PRINT_MIS_word(asd_ha, ISR_SAVE_SINDEX);
+ PRINT_MIS_word(asd_ha, ISR_SAVE_DINDEX);
+ PRINT_MIS_word(asd_ha, Q_MONIRTT_HEAD);
+ PRINT_MIS_word(asd_ha, Q_MONIRTT_TAIL);
+ PRINT_MIS_byte(asd_ha, FREE_SCB_MASK);
+ PRINT_MIS_word(asd_ha, BUILTIN_FREE_SCB_HEAD);
+ PRINT_MIS_word(asd_ha, BUILTIN_FREE_SCB_TAIL);
+ PRINT_MIS_word(asd_ha, EXTENDED_FREE_SCB_HEAD);
+ PRINT_MIS_word(asd_ha, EXTENDED_FREE_SCB_TAIL);
+
+ asd_printk("MIP 7 >>>>\n");
+ PRINT_MIS_qword(asd_ha, EMPTY_REQ_QUEUE);
+ PRINT_MIS_qword(asd_ha, EMPTY_REQ_COUNT);
+ PRINT_MIS_word(asd_ha, Q_EMPTY_HEAD);
+ PRINT_MIS_word(asd_ha, Q_EMPTY_TAIL);
+ PRINT_MIS_word(asd_ha, NEED_EMPTY_SCB);
+ PRINT_MIS_byte(asd_ha, EMPTY_REQ_HEAD);
+ PRINT_MIS_byte(asd_ha, EMPTY_REQ_TAIL);
+ PRINT_MIS_byte(asd_ha, EMPTY_SCB_OFFSET);
+ PRINT_MIS_word(asd_ha, PRIMITIVE_DATA);
+ PRINT_MIS_dword(asd_ha, TIMEOUT_CONST);
+
+ asd_printk("MDP 0 >>>>\n");
+ asd_printk("%-20s %6s %6s %6s %6s %6s %6s %6s %6s\n",
+ "Mode: ", "0", "1", "2", "3", "4", "5", "6", "7");
+ PRINT_CMDP_word(asd_ha, LRM_SAVE_SINDEX);
+ PRINT_CMDP_word(asd_ha, LRM_SAVE_SCBPTR);
+ PRINT_CMDP_word(asd_ha, Q_LINK_HEAD);
+ PRINT_CMDP_word(asd_ha, Q_LINK_TAIL);
+ PRINT_CMDP_byte(asd_ha, LRM_SAVE_SCRPAGE);
+
+ asd_printk("MDP 0 Mode 8 >>>>\n");
+ PRINT_MIS_word(asd_ha, RET_ADDR);
+ PRINT_MIS_word(asd_ha, RET_SCBPTR);
+ PRINT_MIS_word(asd_ha, SAVE_SCBPTR);
+ PRINT_MIS_word(asd_ha, EMPTY_TRANS_CTX);
+ PRINT_MIS_word(asd_ha, RESP_LEN);
+ PRINT_MIS_word(asd_ha, TMF_SCBPTR);
+ PRINT_MIS_word(asd_ha, GLOBAL_PREV_SCB);
+ PRINT_MIS_word(asd_ha, GLOBAL_HEAD);
+ PRINT_MIS_word(asd_ha, CLEAR_LU_HEAD);
+ PRINT_MIS_byte(asd_ha, TMF_OPCODE);
+ PRINT_MIS_byte(asd_ha, SCRATCH_FLAGS);
+ PRINT_MIS_word(asd_ha, HSB_SITE);
+ PRINT_MIS_word(asd_ha, FIRST_INV_SCB_SITE);
+ PRINT_MIS_word(asd_ha, FIRST_INV_DDB_SITE);
+
+ asd_printk("MDP 1 Mode 8 >>>>\n");
+ PRINT_MIS_qword(asd_ha, LUN_TO_CLEAR);
+ PRINT_MIS_qword(asd_ha, LUN_TO_CHECK);
+
+ asd_printk("MDP 2 Mode 8 >>>>\n");
+ PRINT_MIS_qword(asd_ha, HQ_NEW_POINTER);
+ PRINT_MIS_qword(asd_ha, HQ_DONE_BASE);
+ PRINT_MIS_dword(asd_ha, HQ_DONE_POINTER);
+ PRINT_MIS_byte(asd_ha, HQ_DONE_PASS);
+}
+
+#define PRINT_LREG_8bit(_h, _lseq, _n) \
+ asd_printk(STR_8BIT, #_n, _n, asd_read_reg_byte(_h, Lm##_n(_lseq)))
+#define PRINT_LREG_16bit(_h, _lseq, _n) \
+ asd_printk(STR_16BIT, #_n, _n, asd_read_reg_word(_h, Lm##_n(_lseq)))
+#define PRINT_LREG_32bit(_h, _lseq, _n) \
+ asd_printk(STR_32BIT, #_n, _n, asd_read_reg_dword(_h, Lm##_n(_lseq)))
+
+#define PRINT_LMIP_byte(_h, _lseq, _n) \
+ asd_printk(STR_8BIT, #_n, LmSEQ_##_n(_lseq)-LmSCRATCH(_lseq), \
+ asd_read_reg_byte(_h, LmSEQ_##_n(_lseq)))
+#define PRINT_LMIP_word(_h, _lseq, _n) \
+ asd_printk(STR_16BIT, #_n, LmSEQ_##_n(_lseq)-LmSCRATCH(_lseq), \
+ asd_read_reg_word(_h, LmSEQ_##_n(_lseq)))
+#define PRINT_LMIP_dword(_h, _lseq, _n) \
+ asd_printk(STR_32BIT, #_n, LmSEQ_##_n(_lseq)-LmSCRATCH(_lseq), \
+ asd_read_reg_dword(_h, LmSEQ_##_n(_lseq)))
+#define PRINT_LMIP_qword(_h, _lseq, _n) \
+ asd_printk(STR_64BIT, #_n, LmSEQ_##_n(_lseq)-LmSCRATCH(_lseq), \
+ (unsigned long long)(((unsigned long long) \
+ asd_read_reg_dword(_h, LmSEQ_##_n(_lseq))) \
+ | (((unsigned long long) \
+ asd_read_reg_dword(_h, LmSEQ_##_n(_lseq)+4))<<32)))
+
+static void asd_print_lseq_cio_reg(struct asd_ha_struct *asd_ha,
+ u32 lseq_cio_addr, int i)
+{
+ switch (LSEQmCIOREGS[i].width) {
+ case 8:
+ asd_printk("%20s[0x%x]: 0x%02x\n", LSEQmCIOREGS[i].name,
+ LSEQmCIOREGS[i].offs,
+ asd_read_reg_byte(asd_ha, lseq_cio_addr +
+ LSEQmCIOREGS[i].offs));
+
+ break;
+ case 16:
+ asd_printk("%20s[0x%x]: 0x%04x\n", LSEQmCIOREGS[i].name,
+ LSEQmCIOREGS[i].offs,
+ asd_read_reg_word(asd_ha, lseq_cio_addr +
+ LSEQmCIOREGS[i].offs));
+
+ break;
+ case 32:
+ asd_printk("%20s[0x%x]: 0x%08x\n", LSEQmCIOREGS[i].name,
+ LSEQmCIOREGS[i].offs,
+ asd_read_reg_dword(asd_ha, lseq_cio_addr +
+ LSEQmCIOREGS[i].offs));
+ break;
+ }
+}
+
+static void asd_dump_lseq_state(struct asd_ha_struct *asd_ha, int lseq)
+{
+ u32 moffs;
+ int mode;
+
+ asd_printk("LSEQ %d STATE\n", lseq);
+
+ asd_printk("LSEQ%d: ARP2 REGISTERS\n", lseq);
+ PRINT_LREG_32bit(asd_ha, lseq, ARP2CTL);
+ PRINT_LREG_32bit(asd_ha, lseq, ARP2INT);
+ PRINT_LREG_32bit(asd_ha, lseq, ARP2INTEN);
+ PRINT_LREG_8bit(asd_ha, lseq, MODEPTR);
+ PRINT_LREG_8bit(asd_ha, lseq, ALTMODE);
+ PRINT_LREG_8bit(asd_ha, lseq, FLAG);
+ PRINT_LREG_8bit(asd_ha, lseq, ARP2INTCTL);
+ PRINT_LREG_16bit(asd_ha, lseq, STACK);
+ PRINT_LREG_16bit(asd_ha, lseq, PRGMCNT);
+ PRINT_LREG_16bit(asd_ha, lseq, ACCUM);
+ PRINT_LREG_16bit(asd_ha, lseq, SINDEX);
+ PRINT_LREG_16bit(asd_ha, lseq, DINDEX);
+ PRINT_LREG_8bit(asd_ha, lseq, SINDIR);
+ PRINT_LREG_8bit(asd_ha, lseq, DINDIR);
+ PRINT_LREG_8bit(asd_ha, lseq, JUMLDIR);
+ PRINT_LREG_8bit(asd_ha, lseq, ARP2HALTCODE);
+ PRINT_LREG_16bit(asd_ha, lseq, CURRADDR);
+ PRINT_LREG_16bit(asd_ha, lseq, LASTADDR);
+ PRINT_LREG_16bit(asd_ha, lseq, NXTLADDR);
+
+ asd_printk("LSEQ%d: IOP REGISTERS\n", lseq);
+
+ PRINT_LREG_32bit(asd_ha, lseq, MODECTL);
+ PRINT_LREG_32bit(asd_ha, lseq, DBGMODE);
+ PRINT_LREG_32bit(asd_ha, lseq, CONTROL);
+ PRINT_REG_32bit(asd_ha, BISTCTL0, LmBISTCTL0(lseq));
+ PRINT_REG_32bit(asd_ha, BISTCTL1, LmBISTCTL1(lseq));
+
+ asd_printk("LSEQ%d: CIO REGISTERS\n", lseq);
+ asd_printk("Mode common:\n");
+
+ for (mode = 0; mode < 8; mode++) {
+ u32 lseq_cio_addr = LmSEQ_PHY_BASE(mode, lseq);
+ int i;
+
+ for (i = 0; LSEQmCIOREGS[i].name; i++)
+ if (LSEQmCIOREGS[i].mode == MODE_COMMON)
+ asd_print_lseq_cio_reg(asd_ha,lseq_cio_addr,i);
+ }
+
+ asd_printk("Mode unique:\n");
+ for (mode = 0; mode < 8; mode++) {
+ u32 lseq_cio_addr = LmSEQ_PHY_BASE(mode, lseq);
+ int i;
+
+ asd_printk("Mode %d\n", mode);
+ for (i = 0; LSEQmCIOREGS[i].name; i++) {
+ if (!(LSEQmCIOREGS[i].mode & (1 << mode)))
+ continue;
+ asd_print_lseq_cio_reg(asd_ha, lseq_cio_addr, i);
+ }
+ }
+
+ asd_printk("SCRATCH MEMORY\n");
+
+ asd_printk("LSEQ%d MIP 0 >>>>\n", lseq);
+ PRINT_LMIP_word(asd_ha, lseq, Q_TGTXFR_HEAD);
+ PRINT_LMIP_word(asd_ha, lseq, Q_TGTXFR_TAIL);
+ PRINT_LMIP_byte(asd_ha, lseq, LINK_NUMBER);
+ PRINT_LMIP_byte(asd_ha, lseq, SCRATCH_FLAGS);
+ PRINT_LMIP_dword(asd_ha, lseq, CONNECTION_STATE);
+ PRINT_LMIP_word(asd_ha, lseq, CONCTL);
+ PRINT_LMIP_byte(asd_ha, lseq, CONSTAT);
+ PRINT_LMIP_byte(asd_ha, lseq, CONNECTION_MODES);
+ PRINT_LMIP_word(asd_ha, lseq, REG1_ISR);
+ PRINT_LMIP_word(asd_ha, lseq, REG2_ISR);
+ PRINT_LMIP_word(asd_ha, lseq, REG3_ISR);
+ PRINT_LMIP_qword(asd_ha, lseq,REG0_ISR);
+
+ asd_printk("LSEQ%d MIP 1 >>>>\n", lseq);
+ PRINT_LMIP_word(asd_ha, lseq, EST_NEXUS_SCBPTR0);
+ PRINT_LMIP_word(asd_ha, lseq, EST_NEXUS_SCBPTR1);
+ PRINT_LMIP_word(asd_ha, lseq, EST_NEXUS_SCBPTR2);
+ PRINT_LMIP_word(asd_ha, lseq, EST_NEXUS_SCBPTR3);
+ PRINT_LMIP_byte(asd_ha, lseq, EST_NEXUS_SCB_OPCODE0);
+ PRINT_LMIP_byte(asd_ha, lseq, EST_NEXUS_SCB_OPCODE1);
+ PRINT_LMIP_byte(asd_ha, lseq, EST_NEXUS_SCB_OPCODE2);
+ PRINT_LMIP_byte(asd_ha, lseq, EST_NEXUS_SCB_OPCODE3);
+ PRINT_LMIP_byte(asd_ha, lseq, EST_NEXUS_SCB_HEAD);
+ PRINT_LMIP_byte(asd_ha, lseq, EST_NEXUS_SCB_TAIL);
+ PRINT_LMIP_byte(asd_ha, lseq, EST_NEXUS_BUF_AVAIL);
+ PRINT_LMIP_dword(asd_ha, lseq, TIMEOUT_CONST);
+ PRINT_LMIP_word(asd_ha, lseq, ISR_SAVE_SINDEX);
+ PRINT_LMIP_word(asd_ha, lseq, ISR_SAVE_DINDEX);
+
+ asd_printk("LSEQ%d MIP 2 >>>>\n", lseq);
+ PRINT_LMIP_word(asd_ha, lseq, EMPTY_SCB_PTR0);
+ PRINT_LMIP_word(asd_ha, lseq, EMPTY_SCB_PTR1);
+ PRINT_LMIP_word(asd_ha, lseq, EMPTY_SCB_PTR2);
+ PRINT_LMIP_word(asd_ha, lseq, EMPTY_SCB_PTR3);
+ PRINT_LMIP_byte(asd_ha, lseq, EMPTY_SCB_OPCD0);
+ PRINT_LMIP_byte(asd_ha, lseq, EMPTY_SCB_OPCD1);
+ PRINT_LMIP_byte(asd_ha, lseq, EMPTY_SCB_OPCD2);
+ PRINT_LMIP_byte(asd_ha, lseq, EMPTY_SCB_OPCD3);
+ PRINT_LMIP_byte(asd_ha, lseq, EMPTY_SCB_HEAD);
+ PRINT_LMIP_byte(asd_ha, lseq, EMPTY_SCB_TAIL);
+ PRINT_LMIP_byte(asd_ha, lseq, EMPTY_BUFS_AVAIL);
+
+ asd_printk("LSEQ%d MIP 3 >>>>\n", lseq);
+ PRINT_LMIP_dword(asd_ha, lseq, DEV_PRES_TMR_TOUT_CONST);
+ PRINT_LMIP_dword(asd_ha, lseq, SATA_INTERLOCK_TIMEOUT);
+ PRINT_LMIP_dword(asd_ha, lseq, SRST_ASSERT_TIMEOUT);
+ PRINT_LMIP_dword(asd_ha, lseq, RCV_FIS_TIMEOUT);
+ PRINT_LMIP_dword(asd_ha, lseq, ONE_MILLISEC_TIMEOUT);
+ PRINT_LMIP_dword(asd_ha, lseq, TEN_MS_COMINIT_TIMEOUT);
+ PRINT_LMIP_dword(asd_ha, lseq, SMP_RCV_TIMEOUT);
+
+ for (mode = 0; mode < 3; mode++) {
+ asd_printk("LSEQ%d MDP 0 MODE %d >>>>\n", lseq, mode);
+ moffs = mode * LSEQ_MODE_SCRATCH_SIZE;
+
+ asd_printk(STR_16BIT, "RET_ADDR", 0,
+ asd_read_reg_word(asd_ha, LmSEQ_RET_ADDR(lseq)
+ + moffs));
+ asd_printk(STR_16BIT, "REG0_MODE", 2,
+ asd_read_reg_word(asd_ha, LmSEQ_REG0_MODE(lseq)
+ + moffs));
+ asd_printk(STR_16BIT, "MODE_FLAGS", 4,
+ asd_read_reg_word(asd_ha, LmSEQ_MODE_FLAGS(lseq)
+ + moffs));
+ asd_printk(STR_16BIT, "RET_ADDR2", 0x6,
+ asd_read_reg_word(asd_ha, LmSEQ_RET_ADDR2(lseq)
+ + moffs));
+ asd_printk(STR_16BIT, "RET_ADDR1", 0x8,
+ asd_read_reg_word(asd_ha, LmSEQ_RET_ADDR1(lseq)
+ + moffs));
+ asd_printk(STR_8BIT, "OPCODE_TO_CSEQ", 0xB,
+ asd_read_reg_byte(asd_ha, LmSEQ_OPCODE_TO_CSEQ(lseq)
+ + moffs));
+ asd_printk(STR_16BIT, "DATA_TO_CSEQ", 0xC,
+ asd_read_reg_word(asd_ha, LmSEQ_DATA_TO_CSEQ(lseq)
+ + moffs));
+ }
+
+ asd_printk("LSEQ%d MDP 0 MODE 5 >>>>\n", lseq);
+ moffs = LSEQ_MODE5_PAGE0_OFFSET;
+ asd_printk(STR_16BIT, "RET_ADDR", 0,
+ asd_read_reg_word(asd_ha, LmSEQ_RET_ADDR(lseq) + moffs));
+ asd_printk(STR_16BIT, "REG0_MODE", 2,
+ asd_read_reg_word(asd_ha, LmSEQ_REG0_MODE(lseq) + moffs));
+ asd_printk(STR_16BIT, "MODE_FLAGS", 4,
+ asd_read_reg_word(asd_ha, LmSEQ_MODE_FLAGS(lseq) + moffs));
+ asd_printk(STR_16BIT, "RET_ADDR2", 0x6,
+ asd_read_reg_word(asd_ha, LmSEQ_RET_ADDR2(lseq) + moffs));
+ asd_printk(STR_16BIT, "RET_ADDR1", 0x8,
+ asd_read_reg_word(asd_ha, LmSEQ_RET_ADDR1(lseq) + moffs));
+ asd_printk(STR_8BIT, "OPCODE_TO_CSEQ", 0xB,
+ asd_read_reg_byte(asd_ha, LmSEQ_OPCODE_TO_CSEQ(lseq) + moffs));
+ asd_printk(STR_16BIT, "DATA_TO_CSEQ", 0xC,
+ asd_read_reg_word(asd_ha, LmSEQ_DATA_TO_CSEQ(lseq) + moffs));
+
+ asd_printk("LSEQ%d MDP 0 MODE 0 >>>>\n", lseq);
+ PRINT_LMIP_word(asd_ha, lseq, FIRST_INV_DDB_SITE);
+ PRINT_LMIP_word(asd_ha, lseq, EMPTY_TRANS_CTX);
+ PRINT_LMIP_word(asd_ha, lseq, RESP_LEN);
+ PRINT_LMIP_word(asd_ha, lseq, FIRST_INV_SCB_SITE);
+ PRINT_LMIP_dword(asd_ha, lseq, INTEN_SAVE);
+ PRINT_LMIP_byte(asd_ha, lseq, LINK_RST_FRM_LEN);
+ PRINT_LMIP_byte(asd_ha, lseq, LINK_RST_PROTOCOL);
+ PRINT_LMIP_byte(asd_ha, lseq, RESP_STATUS);
+ PRINT_LMIP_byte(asd_ha, lseq, LAST_LOADED_SGE);
+ PRINT_LMIP_byte(asd_ha, lseq, SAVE_SCBPTR);
+
+ asd_printk("LSEQ%d MDP 0 MODE 1 >>>>\n", lseq);
+ PRINT_LMIP_word(asd_ha, lseq, Q_XMIT_HEAD);
+ PRINT_LMIP_word(asd_ha, lseq, M1_EMPTY_TRANS_CTX);
+ PRINT_LMIP_word(asd_ha, lseq, INI_CONN_TAG);
+ PRINT_LMIP_byte(asd_ha, lseq, FAILED_OPEN_STATUS);
+ PRINT_LMIP_byte(asd_ha, lseq, XMIT_REQUEST_TYPE);
+ PRINT_LMIP_byte(asd_ha, lseq, M1_RESP_STATUS);
+ PRINT_LMIP_byte(asd_ha, lseq, M1_LAST_LOADED_SGE);
+ PRINT_LMIP_word(asd_ha, lseq, M1_SAVE_SCBPTR);
+
+ asd_printk("LSEQ%d MDP 0 MODE 2 >>>>\n", lseq);
+ PRINT_LMIP_word(asd_ha, lseq, PORT_COUNTER);
+ PRINT_LMIP_word(asd_ha, lseq, PM_TABLE_PTR);
+ PRINT_LMIP_word(asd_ha, lseq, SATA_INTERLOCK_TMR_SAVE);
+ PRINT_LMIP_word(asd_ha, lseq, IP_BITL);
+ PRINT_LMIP_word(asd_ha, lseq, COPY_SMP_CONN_TAG);
+ PRINT_LMIP_byte(asd_ha, lseq, P0M2_OFFS1AH);
+
+ asd_printk("LSEQ%d MDP 0 MODE 4/5 >>>>\n", lseq);
+ PRINT_LMIP_byte(asd_ha, lseq, SAVED_OOB_STATUS);
+ PRINT_LMIP_byte(asd_ha, lseq, SAVED_OOB_MODE);
+ PRINT_LMIP_word(asd_ha, lseq, Q_LINK_HEAD);
+ PRINT_LMIP_byte(asd_ha, lseq, LINK_RST_ERR);
+ PRINT_LMIP_byte(asd_ha, lseq, SAVED_OOB_SIGNALS);
+ PRINT_LMIP_byte(asd_ha, lseq, SAS_RESET_MODE);
+ PRINT_LMIP_byte(asd_ha, lseq, LINK_RESET_RETRY_COUNT);
+ PRINT_LMIP_byte(asd_ha, lseq, NUM_LINK_RESET_RETRIES);
+ PRINT_LMIP_word(asd_ha, lseq, OOB_INT_ENABLES);
+ PRINT_LMIP_word(asd_ha, lseq, NOTIFY_TIMER_TIMEOUT);
+ PRINT_LMIP_word(asd_ha, lseq, NOTIFY_TIMER_DOWN_COUNT);
+
+ asd_printk("LSEQ%d MDP 1 MODE 0 >>>>\n", lseq);
+ PRINT_LMIP_qword(asd_ha, lseq, SG_LIST_PTR_ADDR0);
+ PRINT_LMIP_qword(asd_ha, lseq, SG_LIST_PTR_ADDR1);
+
+ asd_printk("LSEQ%d MDP 1 MODE 1 >>>>\n", lseq);
+ PRINT_LMIP_qword(asd_ha, lseq, M1_SG_LIST_PTR_ADDR0);
+ PRINT_LMIP_qword(asd_ha, lseq, M1_SG_LIST_PTR_ADDR1);
+
+ asd_printk("LSEQ%d MDP 1 MODE 2 >>>>\n", lseq);
+ PRINT_LMIP_dword(asd_ha, lseq, INVALID_DWORD_COUNT);
+ PRINT_LMIP_dword(asd_ha, lseq, DISPARITY_ERROR_COUNT);
+ PRINT_LMIP_dword(asd_ha, lseq, LOSS_OF_SYNC_COUNT);
+
+ asd_printk("LSEQ%d MDP 1 MODE 4/5 >>>>\n", lseq);
+ PRINT_LMIP_dword(asd_ha, lseq, FRAME_TYPE_MASK);
+ PRINT_LMIP_dword(asd_ha, lseq, HASHED_SRC_ADDR_MASK_PRINT);
+ PRINT_LMIP_byte(asd_ha, lseq, NUM_FILL_BYTES_MASK);
+ PRINT_LMIP_word(asd_ha, lseq, TAG_MASK);
+ PRINT_LMIP_word(asd_ha, lseq, TARGET_PORT_XFER_TAG);
+ PRINT_LMIP_dword(asd_ha, lseq, DATA_OFFSET);
+
+ asd_printk("LSEQ%d MDP 2 MODE 0 >>>>\n", lseq);
+ PRINT_LMIP_dword(asd_ha, lseq, SMP_RCV_TIMER_TERM_TS);
+ PRINT_LMIP_byte(asd_ha, lseq, DEVICE_BITS);
+ PRINT_LMIP_word(asd_ha, lseq, SDB_DDB);
+ PRINT_LMIP_word(asd_ha, lseq, SDB_NUM_TAGS);
+ PRINT_LMIP_word(asd_ha, lseq, SDB_CURR_TAG);
+
+ asd_printk("LSEQ%d MDP 2 MODE 1 >>>>\n", lseq);
+ PRINT_LMIP_qword(asd_ha, lseq, TX_ID_ADDR_FRAME);
+ PRINT_LMIP_dword(asd_ha, lseq, OPEN_TIMER_TERM_TS);
+ PRINT_LMIP_dword(asd_ha, lseq, SRST_AS_TIMER_TERM_TS);
+ PRINT_LMIP_dword(asd_ha, lseq, LAST_LOADED_SG_EL);
+
+ asd_printk("LSEQ%d MDP 2 MODE 2 >>>>\n", lseq);
+ PRINT_LMIP_dword(asd_ha, lseq, CLOSE_TIMER_TERM_TS);
+ PRINT_LMIP_dword(asd_ha, lseq, BREAK_TIMER_TERM_TS);
+ PRINT_LMIP_dword(asd_ha, lseq, DWS_RESET_TIMER_TERM_TS);
+ PRINT_LMIP_dword(asd_ha, lseq, SATA_INTERLOCK_TIMER_TERM_TS);
+ PRINT_LMIP_dword(asd_ha, lseq, MCTL_TIMER_TERM_TS);
+
+ asd_printk("LSEQ%d MDP 2 MODE 4/5 >>>>\n", lseq);
+ PRINT_LMIP_dword(asd_ha, lseq, COMINIT_TIMER_TERM_TS);
+ PRINT_LMIP_dword(asd_ha, lseq, RCV_ID_TIMER_TERM_TS);
+ PRINT_LMIP_dword(asd_ha, lseq, RCV_FIS_TIMER_TERM_TS);
+ PRINT_LMIP_dword(asd_ha, lseq, DEV_PRES_TIMER_TERM_TS);
+}
+
+#if 0
+
+/**
+ * asd_dump_ddb_site -- dump a CSEQ DDB site
+ * @asd_ha: pointer to host adapter structure
+ * @site_no: site number of interest
+ */
+void asd_dump_target_ddb(struct asd_ha_struct *asd_ha, u16 site_no)
+{
+ if (site_no >= asd_ha->hw_prof.max_ddbs)
+ return;
+
+#define DDB_FIELDB(__name) \
+ asd_ddbsite_read_byte(asd_ha, site_no, \
+ offsetof(struct asd_ddb_ssp_smp_target_port, __name))
+#define DDB2_FIELDB(__name) \
+ asd_ddbsite_read_byte(asd_ha, site_no, \
+ offsetof(struct asd_ddb_stp_sata_target_port, __name))
+#define DDB_FIELDW(__name) \
+ asd_ddbsite_read_word(asd_ha, site_no, \
+ offsetof(struct asd_ddb_ssp_smp_target_port, __name))
+
+#define DDB_FIELDD(__name) \
+ asd_ddbsite_read_dword(asd_ha, site_no, \
+ offsetof(struct asd_ddb_ssp_smp_target_port, __name))
+
+ asd_printk("DDB: 0x%02x\n", site_no);
+ asd_printk("conn_type: 0x%02x\n", DDB_FIELDB(conn_type));
+ asd_printk("conn_rate: 0x%02x\n", DDB_FIELDB(conn_rate));
+ asd_printk("init_conn_tag: 0x%04x\n", be16_to_cpu(DDB_FIELDW(init_conn_tag)));
+ asd_printk("send_queue_head: 0x%04x\n", be16_to_cpu(DDB_FIELDW(send_queue_head)));
+ asd_printk("sq_suspended: 0x%02x\n", DDB_FIELDB(sq_suspended));
+ asd_printk("DDB Type: 0x%02x\n", DDB_FIELDB(ddb_type));
+ asd_printk("AWT Default: 0x%04x\n", DDB_FIELDW(awt_def));
+ asd_printk("compat_features: 0x%02x\n", DDB_FIELDB(compat_features));
+ asd_printk("Pathway Blocked Count: 0x%02x\n",
+ DDB_FIELDB(pathway_blocked_count));
+ asd_printk("arb_wait_time: 0x%04x\n", DDB_FIELDW(arb_wait_time));
+ asd_printk("more_compat_features: 0x%08x\n",
+ DDB_FIELDD(more_compat_features));
+ asd_printk("Conn Mask: 0x%02x\n", DDB_FIELDB(conn_mask));
+ asd_printk("flags: 0x%02x\n", DDB_FIELDB(flags));
+ asd_printk("flags2: 0x%02x\n", DDB2_FIELDB(flags2));
+ asd_printk("ExecQ Tail: 0x%04x\n",DDB_FIELDW(exec_queue_tail));
+ asd_printk("SendQ Tail: 0x%04x\n",DDB_FIELDW(send_queue_tail));
+ asd_printk("Active Task Count: 0x%04x\n",
+ DDB_FIELDW(active_task_count));
+ asd_printk("ITNL Reason: 0x%02x\n", DDB_FIELDB(itnl_reason));
+ asd_printk("ITNL Timeout Const: 0x%04x\n", DDB_FIELDW(itnl_timeout));
+ asd_printk("ITNL timestamp: 0x%08x\n", DDB_FIELDD(itnl_timestamp));
+}
+
+void asd_dump_ddb_0(struct asd_ha_struct *asd_ha)
+{
+#define DDB0_FIELDB(__name) \
+ asd_ddbsite_read_byte(asd_ha, 0, \
+ offsetof(struct asd_ddb_seq_shared, __name))
+#define DDB0_FIELDW(__name) \
+ asd_ddbsite_read_word(asd_ha, 0, \
+ offsetof(struct asd_ddb_seq_shared, __name))
+
+#define DDB0_FIELDD(__name) \
+ asd_ddbsite_read_dword(asd_ha,0 , \
+ offsetof(struct asd_ddb_seq_shared, __name))
+
+#define DDB0_FIELDA(__name, _o) \
+ asd_ddbsite_read_byte(asd_ha, 0, \
+ offsetof(struct asd_ddb_seq_shared, __name)+_o)
+
+
+ asd_printk("DDB: 0\n");
+ asd_printk("q_free_ddb_head:%04x\n", DDB0_FIELDW(q_free_ddb_head));
+ asd_printk("q_free_ddb_tail:%04x\n", DDB0_FIELDW(q_free_ddb_tail));
+ asd_printk("q_free_ddb_cnt:%04x\n", DDB0_FIELDW(q_free_ddb_cnt));
+ asd_printk("q_used_ddb_head:%04x\n", DDB0_FIELDW(q_used_ddb_head));
+ asd_printk("q_used_ddb_tail:%04x\n", DDB0_FIELDW(q_used_ddb_tail));
+ asd_printk("shared_mem_lock:%04x\n", DDB0_FIELDW(shared_mem_lock));
+ asd_printk("smp_conn_tag:%04x\n", DDB0_FIELDW(smp_conn_tag));
+ asd_printk("est_nexus_buf_cnt:%04x\n", DDB0_FIELDW(est_nexus_buf_cnt));
+ asd_printk("est_nexus_buf_thresh:%04x\n",
+ DDB0_FIELDW(est_nexus_buf_thresh));
+ asd_printk("conn_not_active:%02x\n", DDB0_FIELDB(conn_not_active));
+ asd_printk("phy_is_up:%02x\n", DDB0_FIELDB(phy_is_up));
+ asd_printk("port_map_by_links:%02x %02x %02x %02x "
+ "%02x %02x %02x %02x\n",
+ DDB0_FIELDA(port_map_by_links, 0),
+ DDB0_FIELDA(port_map_by_links, 1),
+ DDB0_FIELDA(port_map_by_links, 2),
+ DDB0_FIELDA(port_map_by_links, 3),
+ DDB0_FIELDA(port_map_by_links, 4),
+ DDB0_FIELDA(port_map_by_links, 5),
+ DDB0_FIELDA(port_map_by_links, 6),
+ DDB0_FIELDA(port_map_by_links, 7));
+}
+
+static void asd_dump_scb_site(struct asd_ha_struct *asd_ha, u16 site_no)
+{
+
+#define SCB_FIELDB(__name) \
+ asd_scbsite_read_byte(asd_ha, site_no, sizeof(struct scb_header) \
+ + offsetof(struct initiate_ssp_task, __name))
+#define SCB_FIELDW(__name) \
+ asd_scbsite_read_word(asd_ha, site_no, sizeof(struct scb_header) \
+ + offsetof(struct initiate_ssp_task, __name))
+#define SCB_FIELDD(__name) \
+ asd_scbsite_read_dword(asd_ha, site_no, sizeof(struct scb_header) \
+ + offsetof(struct initiate_ssp_task, __name))
+
+ asd_printk("Total Xfer Len: 0x%08x.\n", SCB_FIELDD(total_xfer_len));
+ asd_printk("Frame Type: 0x%02x.\n", SCB_FIELDB(ssp_frame.frame_type));
+ asd_printk("Tag: 0x%04x.\n", SCB_FIELDW(ssp_frame.tag));
+ asd_printk("Target Port Xfer Tag: 0x%04x.\n",
+ SCB_FIELDW(ssp_frame.tptt));
+ asd_printk("Data Offset: 0x%08x.\n", SCB_FIELDW(ssp_frame.data_offs));
+ asd_printk("Retry Count: 0x%02x.\n", SCB_FIELDB(retry_count));
+}
+
+/**
+ * asd_dump_scb_sites -- dump currently used CSEQ SCB sites
+ * @asd_ha: pointer to host adapter struct
+ */
+void asd_dump_scb_sites(struct asd_ha_struct *asd_ha)
+{
+ u16 site_no;
+
+ for (site_no = 0; site_no < asd_ha->hw_prof.max_scbs; site_no++) {
+ u8 opcode;
+
+ if (!SCB_SITE_VALID(site_no))
+ continue;
+
+ /* We are only interested in SCB sites currently used.
+ */
+ opcode = asd_scbsite_read_byte(asd_ha, site_no,
+ offsetof(struct scb_header,
+ opcode));
+ if (opcode == 0xFF)
+ continue;
+
+ asd_printk("\nSCB: 0x%x\n", site_no);
+ asd_dump_scb_site(asd_ha, site_no);
+ }
+}
+
+#endif /* 0 */
+
+/**
+ * ads_dump_seq_state -- dump CSEQ and LSEQ states
+ * @asd_ha: pointer to host adapter structure
+ * @lseq_mask: mask of LSEQs of interest
+ */
+void asd_dump_seq_state(struct asd_ha_struct *asd_ha, u8 lseq_mask)
+{
+ int lseq;
+
+ asd_dump_cseq_state(asd_ha);
+
+ if (lseq_mask != 0)
+ for_each_sequencer(lseq_mask, lseq_mask, lseq)
+ asd_dump_lseq_state(asd_ha, lseq);
+}
+
+void asd_dump_frame_rcvd(struct asd_phy *phy,
+ struct done_list_struct *dl)
+{
+ unsigned long flags;
+ int i;
+
+ switch ((dl->status_block[1] & 0x70) >> 3) {
+ case SAS_PROTOCOL_STP:
+ ASD_DPRINTK("STP proto device-to-host FIS:\n");
+ break;
+ default:
+ case SAS_PROTOCOL_SSP:
+ ASD_DPRINTK("SAS proto IDENTIFY:\n");
+ break;
+ }
+ spin_lock_irqsave(&phy->sas_phy.frame_rcvd_lock, flags);
+ for (i = 0; i < phy->sas_phy.frame_rcvd_size; i+=4)
+ ASD_DPRINTK("%02x: %02x %02x %02x %02x\n",
+ i,
+ phy->frame_rcvd[i],
+ phy->frame_rcvd[i+1],
+ phy->frame_rcvd[i+2],
+ phy->frame_rcvd[i+3]);
+ spin_unlock_irqrestore(&phy->sas_phy.frame_rcvd_lock, flags);
+}
+
+#if 0
+
+static void asd_dump_scb(struct asd_ascb *ascb, int ind)
+{
+ asd_printk("scb%d: vaddr: 0x%p, dma_handle: 0x%llx, next: 0x%llx, "
+ "index:%d, opcode:0x%02x\n",
+ ind, ascb->dma_scb.vaddr,
+ (unsigned long long)ascb->dma_scb.dma_handle,
+ (unsigned long long)
+ le64_to_cpu(ascb->scb->header.next_scb),
+ le16_to_cpu(ascb->scb->header.index),
+ ascb->scb->header.opcode);
+}
+
+void asd_dump_scb_list(struct asd_ascb *ascb, int num)
+{
+ int i = 0;
+
+ asd_printk("dumping %d scbs:\n", num);
+
+ asd_dump_scb(ascb, i++);
+ --num;
+
+ if (num > 0 && !list_empty(&ascb->list)) {
+ struct list_head *el;
+
+ list_for_each(el, &ascb->list) {
+ struct asd_ascb *s = list_entry(el, struct asd_ascb,
+ list);
+ asd_dump_scb(s, i++);
+ if (--num <= 0)
+ break;
+ }
+ }
+}
+
+#endif /* 0 */
+
+#endif /* ASD_DEBUG */
diff --git a/drivers/scsi/aic94xx/aic94xx_dump.h b/drivers/scsi/aic94xx/aic94xx_dump.h
new file mode 100644
index 000000000..191a753d4
--- /dev/null
+++ b/drivers/scsi/aic94xx/aic94xx_dump.h
@@ -0,0 +1,43 @@
+/*
+ * Aic94xx SAS/SATA driver dump header file.
+ *
+ * Copyright (C) 2005 Adaptec, Inc. All rights reserved.
+ * Copyright (C) 2005 Luben Tuikov <luben_tuikov@adaptec.com>
+ *
+ * This file is licensed under GPLv2.
+ *
+ * This file is part of the aic94xx driver.
+ *
+ * The aic94xx driver is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; version 2 of the
+ * License.
+ *
+ * The aic94xx driver is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with the aic94xx driver; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
+ *
+ */
+
+#ifndef _AIC94XX_DUMP_H_
+#define _AIC94XX_DUMP_H_
+
+#ifdef ASD_DEBUG
+
+void asd_dump_seq_state(struct asd_ha_struct *asd_ha, u8 lseq_mask);
+void asd_dump_frame_rcvd(struct asd_phy *phy,
+ struct done_list_struct *dl);
+#else /* ASD_DEBUG */
+
+static inline void asd_dump_seq_state(struct asd_ha_struct *asd_ha,
+ u8 lseq_mask) { }
+static inline void asd_dump_frame_rcvd(struct asd_phy *phy,
+ struct done_list_struct *dl) { }
+#endif /* ASD_DEBUG */
+
+#endif /* _AIC94XX_DUMP_H_ */
diff --git a/drivers/scsi/aic94xx/aic94xx_hwi.c b/drivers/scsi/aic94xx/aic94xx_hwi.c
new file mode 100644
index 000000000..9f636a34d
--- /dev/null
+++ b/drivers/scsi/aic94xx/aic94xx_hwi.c
@@ -0,0 +1,1390 @@
+/*
+ * Aic94xx SAS/SATA driver hardware interface.
+ *
+ * Copyright (C) 2005 Adaptec, Inc. All rights reserved.
+ * Copyright (C) 2005 Luben Tuikov <luben_tuikov@adaptec.com>
+ *
+ * This file is licensed under GPLv2.
+ *
+ * This file is part of the aic94xx driver.
+ *
+ * The aic94xx driver is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; version 2 of the
+ * License.
+ *
+ * The aic94xx driver is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with the aic94xx driver; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
+ *
+ */
+
+#include <linux/pci.h>
+#include <linux/slab.h>
+#include <linux/delay.h>
+#include <linux/module.h>
+#include <linux/firmware.h>
+
+#include "aic94xx.h"
+#include "aic94xx_reg.h"
+#include "aic94xx_hwi.h"
+#include "aic94xx_seq.h"
+#include "aic94xx_dump.h"
+
+u32 MBAR0_SWB_SIZE;
+
+/* ---------- Initialization ---------- */
+
+static int asd_get_user_sas_addr(struct asd_ha_struct *asd_ha)
+{
+ /* adapter came with a sas address */
+ if (asd_ha->hw_prof.sas_addr[0])
+ return 0;
+
+ return sas_request_addr(asd_ha->sas_ha.core.shost,
+ asd_ha->hw_prof.sas_addr);
+}
+
+static void asd_propagate_sas_addr(struct asd_ha_struct *asd_ha)
+{
+ int i;
+
+ for (i = 0; i < ASD_MAX_PHYS; i++) {
+ if (asd_ha->hw_prof.phy_desc[i].sas_addr[0] == 0)
+ continue;
+ /* Set a phy's address only if it has none.
+ */
+ ASD_DPRINTK("setting phy%d addr to %llx\n", i,
+ SAS_ADDR(asd_ha->hw_prof.sas_addr));
+ memcpy(asd_ha->hw_prof.phy_desc[i].sas_addr,
+ asd_ha->hw_prof.sas_addr, SAS_ADDR_SIZE);
+ }
+}
+
+/* ---------- PHY initialization ---------- */
+
+static void asd_init_phy_identify(struct asd_phy *phy)
+{
+ phy->identify_frame = phy->id_frm_tok->vaddr;
+
+ memset(phy->identify_frame, 0, sizeof(*phy->identify_frame));
+
+ phy->identify_frame->dev_type = SAS_END_DEVICE;
+ if (phy->sas_phy.role & PHY_ROLE_INITIATOR)
+ phy->identify_frame->initiator_bits = phy->sas_phy.iproto;
+ if (phy->sas_phy.role & PHY_ROLE_TARGET)
+ phy->identify_frame->target_bits = phy->sas_phy.tproto;
+ memcpy(phy->identify_frame->sas_addr, phy->phy_desc->sas_addr,
+ SAS_ADDR_SIZE);
+ phy->identify_frame->phy_id = phy->sas_phy.id;
+}
+
+static int asd_init_phy(struct asd_phy *phy)
+{
+ struct asd_ha_struct *asd_ha = phy->sas_phy.ha->lldd_ha;
+ struct asd_sas_phy *sas_phy = &phy->sas_phy;
+
+ sas_phy->enabled = 1;
+ sas_phy->class = SAS;
+ sas_phy->iproto = SAS_PROTOCOL_ALL;
+ sas_phy->tproto = 0;
+ sas_phy->type = PHY_TYPE_PHYSICAL;
+ sas_phy->role = PHY_ROLE_INITIATOR;
+ sas_phy->oob_mode = OOB_NOT_CONNECTED;
+ sas_phy->linkrate = SAS_LINK_RATE_UNKNOWN;
+
+ phy->id_frm_tok = asd_alloc_coherent(asd_ha,
+ sizeof(*phy->identify_frame),
+ GFP_KERNEL);
+ if (!phy->id_frm_tok) {
+ asd_printk("no mem for IDENTIFY for phy%d\n", sas_phy->id);
+ return -ENOMEM;
+ } else
+ asd_init_phy_identify(phy);
+
+ memset(phy->frame_rcvd, 0, sizeof(phy->frame_rcvd));
+
+ return 0;
+}
+
+static void asd_init_ports(struct asd_ha_struct *asd_ha)
+{
+ int i;
+
+ spin_lock_init(&asd_ha->asd_ports_lock);
+ for (i = 0; i < ASD_MAX_PHYS; i++) {
+ struct asd_port *asd_port = &asd_ha->asd_ports[i];
+
+ memset(asd_port->sas_addr, 0, SAS_ADDR_SIZE);
+ memset(asd_port->attached_sas_addr, 0, SAS_ADDR_SIZE);
+ asd_port->phy_mask = 0;
+ asd_port->num_phys = 0;
+ }
+}
+
+static int asd_init_phys(struct asd_ha_struct *asd_ha)
+{
+ u8 i;
+ u8 phy_mask = asd_ha->hw_prof.enabled_phys;
+
+ for (i = 0; i < ASD_MAX_PHYS; i++) {
+ struct asd_phy *phy = &asd_ha->phys[i];
+
+ phy->phy_desc = &asd_ha->hw_prof.phy_desc[i];
+ phy->asd_port = NULL;
+
+ phy->sas_phy.enabled = 0;
+ phy->sas_phy.id = i;
+ phy->sas_phy.sas_addr = &phy->phy_desc->sas_addr[0];
+ phy->sas_phy.frame_rcvd = &phy->frame_rcvd[0];
+ phy->sas_phy.ha = &asd_ha->sas_ha;
+ phy->sas_phy.lldd_phy = phy;
+ }
+
+ /* Now enable and initialize only the enabled phys. */
+ for_each_phy(phy_mask, phy_mask, i) {
+ int err = asd_init_phy(&asd_ha->phys[i]);
+ if (err)
+ return err;
+ }
+
+ return 0;
+}
+
+/* ---------- Sliding windows ---------- */
+
+static int asd_init_sw(struct asd_ha_struct *asd_ha)
+{
+ struct pci_dev *pcidev = asd_ha->pcidev;
+ int err;
+ u32 v;
+
+ /* Unlock MBARs */
+ err = pci_read_config_dword(pcidev, PCI_CONF_MBAR_KEY, &v);
+ if (err) {
+ asd_printk("couldn't access conf. space of %s\n",
+ pci_name(pcidev));
+ goto Err;
+ }
+ if (v)
+ err = pci_write_config_dword(pcidev, PCI_CONF_MBAR_KEY, v);
+ if (err) {
+ asd_printk("couldn't write to MBAR_KEY of %s\n",
+ pci_name(pcidev));
+ goto Err;
+ }
+
+ /* Set sliding windows A, B and C to point to proper internal
+ * memory regions.
+ */
+ pci_write_config_dword(pcidev, PCI_CONF_MBAR0_SWA, REG_BASE_ADDR);
+ pci_write_config_dword(pcidev, PCI_CONF_MBAR0_SWB,
+ REG_BASE_ADDR_CSEQCIO);
+ pci_write_config_dword(pcidev, PCI_CONF_MBAR0_SWC, REG_BASE_ADDR_EXSI);
+ asd_ha->io_handle[0].swa_base = REG_BASE_ADDR;
+ asd_ha->io_handle[0].swb_base = REG_BASE_ADDR_CSEQCIO;
+ asd_ha->io_handle[0].swc_base = REG_BASE_ADDR_EXSI;
+ MBAR0_SWB_SIZE = asd_ha->io_handle[0].len - 0x80;
+ if (!asd_ha->iospace) {
+ /* MBAR1 will point to OCM (On Chip Memory) */
+ pci_write_config_dword(pcidev, PCI_CONF_MBAR1, OCM_BASE_ADDR);
+ asd_ha->io_handle[1].swa_base = OCM_BASE_ADDR;
+ }
+ spin_lock_init(&asd_ha->iolock);
+Err:
+ return err;
+}
+
+/* ---------- SCB initialization ---------- */
+
+/**
+ * asd_init_scbs - manually allocate the first SCB.
+ * @asd_ha: pointer to host adapter structure
+ *
+ * This allocates the very first SCB which would be sent to the
+ * sequencer for execution. Its bus address is written to
+ * CSEQ_Q_NEW_POINTER, mode page 2, mode 8. Since the bus address of
+ * the _next_ scb to be DMA-ed to the host adapter is read from the last
+ * SCB DMA-ed to the host adapter, we have to always stay one step
+ * ahead of the sequencer and keep one SCB already allocated.
+ */
+static int asd_init_scbs(struct asd_ha_struct *asd_ha)
+{
+ struct asd_seq_data *seq = &asd_ha->seq;
+ int bitmap_bytes;
+
+ /* allocate the index array and bitmap */
+ asd_ha->seq.tc_index_bitmap_bits = asd_ha->hw_prof.max_scbs;
+ asd_ha->seq.tc_index_array = kzalloc(asd_ha->seq.tc_index_bitmap_bits*
+ sizeof(void *), GFP_KERNEL);
+ if (!asd_ha->seq.tc_index_array)
+ return -ENOMEM;
+
+ bitmap_bytes = (asd_ha->seq.tc_index_bitmap_bits+7)/8;
+ bitmap_bytes = BITS_TO_LONGS(bitmap_bytes*8)*sizeof(unsigned long);
+ asd_ha->seq.tc_index_bitmap = kzalloc(bitmap_bytes, GFP_KERNEL);
+ if (!asd_ha->seq.tc_index_bitmap)
+ return -ENOMEM;
+
+ spin_lock_init(&seq->tc_index_lock);
+
+ seq->next_scb.size = sizeof(struct scb);
+ seq->next_scb.vaddr = dma_pool_alloc(asd_ha->scb_pool, GFP_KERNEL,
+ &seq->next_scb.dma_handle);
+ if (!seq->next_scb.vaddr) {
+ kfree(asd_ha->seq.tc_index_bitmap);
+ kfree(asd_ha->seq.tc_index_array);
+ asd_ha->seq.tc_index_bitmap = NULL;
+ asd_ha->seq.tc_index_array = NULL;
+ return -ENOMEM;
+ }
+
+ seq->pending = 0;
+ spin_lock_init(&seq->pend_q_lock);
+ INIT_LIST_HEAD(&seq->pend_q);
+
+ return 0;
+}
+
+static void asd_get_max_scb_ddb(struct asd_ha_struct *asd_ha)
+{
+ asd_ha->hw_prof.max_scbs = asd_get_cmdctx_size(asd_ha)/ASD_SCB_SIZE;
+ asd_ha->hw_prof.max_ddbs = asd_get_devctx_size(asd_ha)/ASD_DDB_SIZE;
+ ASD_DPRINTK("max_scbs:%d, max_ddbs:%d\n",
+ asd_ha->hw_prof.max_scbs,
+ asd_ha->hw_prof.max_ddbs);
+}
+
+/* ---------- Done List initialization ---------- */
+
+static void asd_dl_tasklet_handler(unsigned long);
+
+static int asd_init_dl(struct asd_ha_struct *asd_ha)
+{
+ asd_ha->seq.actual_dl
+ = asd_alloc_coherent(asd_ha,
+ ASD_DL_SIZE * sizeof(struct done_list_struct),
+ GFP_KERNEL);
+ if (!asd_ha->seq.actual_dl)
+ return -ENOMEM;
+ asd_ha->seq.dl = asd_ha->seq.actual_dl->vaddr;
+ asd_ha->seq.dl_toggle = ASD_DEF_DL_TOGGLE;
+ asd_ha->seq.dl_next = 0;
+ tasklet_init(&asd_ha->seq.dl_tasklet, asd_dl_tasklet_handler,
+ (unsigned long) asd_ha);
+
+ return 0;
+}
+
+/* ---------- EDB and ESCB init ---------- */
+
+static int asd_alloc_edbs(struct asd_ha_struct *asd_ha, gfp_t gfp_flags)
+{
+ struct asd_seq_data *seq = &asd_ha->seq;
+ int i;
+
+ seq->edb_arr = kmalloc(seq->num_edbs*sizeof(*seq->edb_arr), gfp_flags);
+ if (!seq->edb_arr)
+ return -ENOMEM;
+
+ for (i = 0; i < seq->num_edbs; i++) {
+ seq->edb_arr[i] = asd_alloc_coherent(asd_ha, ASD_EDB_SIZE,
+ gfp_flags);
+ if (!seq->edb_arr[i])
+ goto Err_unroll;
+ memset(seq->edb_arr[i]->vaddr, 0, ASD_EDB_SIZE);
+ }
+
+ ASD_DPRINTK("num_edbs:%d\n", seq->num_edbs);
+
+ return 0;
+
+Err_unroll:
+ for (i-- ; i >= 0; i--)
+ asd_free_coherent(asd_ha, seq->edb_arr[i]);
+ kfree(seq->edb_arr);
+ seq->edb_arr = NULL;
+
+ return -ENOMEM;
+}
+
+static int asd_alloc_escbs(struct asd_ha_struct *asd_ha,
+ gfp_t gfp_flags)
+{
+ struct asd_seq_data *seq = &asd_ha->seq;
+ struct asd_ascb *escb;
+ int i, escbs;
+
+ seq->escb_arr = kmalloc(seq->num_escbs*sizeof(*seq->escb_arr),
+ gfp_flags);
+ if (!seq->escb_arr)
+ return -ENOMEM;
+
+ escbs = seq->num_escbs;
+ escb = asd_ascb_alloc_list(asd_ha, &escbs, gfp_flags);
+ if (!escb) {
+ asd_printk("couldn't allocate list of escbs\n");
+ goto Err;
+ }
+ seq->num_escbs -= escbs; /* subtract what was not allocated */
+ ASD_DPRINTK("num_escbs:%d\n", seq->num_escbs);
+
+ for (i = 0; i < seq->num_escbs; i++, escb = list_entry(escb->list.next,
+ struct asd_ascb,
+ list)) {
+ seq->escb_arr[i] = escb;
+ escb->scb->header.opcode = EMPTY_SCB;
+ }
+
+ return 0;
+Err:
+ kfree(seq->escb_arr);
+ seq->escb_arr = NULL;
+ return -ENOMEM;
+
+}
+
+static void asd_assign_edbs2escbs(struct asd_ha_struct *asd_ha)
+{
+ struct asd_seq_data *seq = &asd_ha->seq;
+ int i, k, z = 0;
+
+ for (i = 0; i < seq->num_escbs; i++) {
+ struct asd_ascb *ascb = seq->escb_arr[i];
+ struct empty_scb *escb = &ascb->scb->escb;
+
+ ascb->edb_index = z;
+
+ escb->num_valid = ASD_EDBS_PER_SCB;
+
+ for (k = 0; k < ASD_EDBS_PER_SCB; k++) {
+ struct sg_el *eb = &escb->eb[k];
+ struct asd_dma_tok *edb = seq->edb_arr[z++];
+
+ memset(eb, 0, sizeof(*eb));
+ eb->bus_addr = cpu_to_le64(((u64) edb->dma_handle));
+ eb->size = cpu_to_le32(((u32) edb->size));
+ }
+ }
+}
+
+/**
+ * asd_init_escbs -- allocate and initialize empty scbs
+ * @asd_ha: pointer to host adapter structure
+ *
+ * An empty SCB has sg_elements of ASD_EDBS_PER_SCB (7) buffers.
+ * They transport sense data, etc.
+ */
+static int asd_init_escbs(struct asd_ha_struct *asd_ha)
+{
+ struct asd_seq_data *seq = &asd_ha->seq;
+ int err = 0;
+
+ /* Allocate two empty data buffers (edb) per sequencer. */
+ int edbs = 2*(1+asd_ha->hw_prof.num_phys);
+
+ seq->num_escbs = (edbs+ASD_EDBS_PER_SCB-1)/ASD_EDBS_PER_SCB;
+ seq->num_edbs = seq->num_escbs * ASD_EDBS_PER_SCB;
+
+ err = asd_alloc_edbs(asd_ha, GFP_KERNEL);
+ if (err) {
+ asd_printk("couldn't allocate edbs\n");
+ return err;
+ }
+
+ err = asd_alloc_escbs(asd_ha, GFP_KERNEL);
+ if (err) {
+ asd_printk("couldn't allocate escbs\n");
+ return err;
+ }
+
+ asd_assign_edbs2escbs(asd_ha);
+ /* In order to insure that normal SCBs do not overfill sequencer
+ * memory and leave no space for escbs (halting condition),
+ * we increment pending here by the number of escbs. However,
+ * escbs are never pending.
+ */
+ seq->pending = seq->num_escbs;
+ seq->can_queue = 1 + (asd_ha->hw_prof.max_scbs - seq->pending)/2;
+
+ return 0;
+}
+
+/* ---------- HW initialization ---------- */
+
+/**
+ * asd_chip_hardrst -- hard reset the chip
+ * @asd_ha: pointer to host adapter structure
+ *
+ * This takes 16 cycles and is synchronous to CFCLK, which runs
+ * at 200 MHz, so this should take at most 80 nanoseconds.
+ */
+int asd_chip_hardrst(struct asd_ha_struct *asd_ha)
+{
+ int i;
+ int count = 100;
+ u32 reg;
+
+ for (i = 0 ; i < 4 ; i++) {
+ asd_write_reg_dword(asd_ha, COMBIST, HARDRST);
+ }
+
+ do {
+ udelay(1);
+ reg = asd_read_reg_dword(asd_ha, CHIMINT);
+ if (reg & HARDRSTDET) {
+ asd_write_reg_dword(asd_ha, CHIMINT,
+ HARDRSTDET|PORRSTDET);
+ return 0;
+ }
+ } while (--count > 0);
+
+ return -ENODEV;
+}
+
+/**
+ * asd_init_chip -- initialize the chip
+ * @asd_ha: pointer to host adapter structure
+ *
+ * Hard resets the chip, disables HA interrupts, downloads the sequnecer
+ * microcode and starts the sequencers. The caller has to explicitly
+ * enable HA interrupts with asd_enable_ints(asd_ha).
+ */
+static int asd_init_chip(struct asd_ha_struct *asd_ha)
+{
+ int err;
+
+ err = asd_chip_hardrst(asd_ha);
+ if (err) {
+ asd_printk("couldn't hard reset %s\n",
+ pci_name(asd_ha->pcidev));
+ goto out;
+ }
+
+ asd_disable_ints(asd_ha);
+
+ err = asd_init_seqs(asd_ha);
+ if (err) {
+ asd_printk("couldn't init seqs for %s\n",
+ pci_name(asd_ha->pcidev));
+ goto out;
+ }
+
+ err = asd_start_seqs(asd_ha);
+ if (err) {
+ asd_printk("coudln't start seqs for %s\n",
+ pci_name(asd_ha->pcidev));
+ goto out;
+ }
+out:
+ return err;
+}
+
+#define MAX_DEVS ((OCM_MAX_SIZE) / (ASD_DDB_SIZE))
+
+static int max_devs = 0;
+module_param_named(max_devs, max_devs, int, S_IRUGO);
+MODULE_PARM_DESC(max_devs, "\n"
+ "\tMaximum number of SAS devices to support (not LUs).\n"
+ "\tDefault: 2176, Maximum: 65663.\n");
+
+static int max_cmnds = 0;
+module_param_named(max_cmnds, max_cmnds, int, S_IRUGO);
+MODULE_PARM_DESC(max_cmnds, "\n"
+ "\tMaximum number of commands queuable.\n"
+ "\tDefault: 512, Maximum: 66047.\n");
+
+static void asd_extend_devctx_ocm(struct asd_ha_struct *asd_ha)
+{
+ unsigned long dma_addr = OCM_BASE_ADDR;
+ u32 d;
+
+ dma_addr -= asd_ha->hw_prof.max_ddbs * ASD_DDB_SIZE;
+ asd_write_reg_addr(asd_ha, DEVCTXBASE, (dma_addr_t) dma_addr);
+ d = asd_read_reg_dword(asd_ha, CTXDOMAIN);
+ d |= 4;
+ asd_write_reg_dword(asd_ha, CTXDOMAIN, d);
+ asd_ha->hw_prof.max_ddbs += MAX_DEVS;
+}
+
+static int asd_extend_devctx(struct asd_ha_struct *asd_ha)
+{
+ dma_addr_t dma_handle;
+ unsigned long dma_addr;
+ u32 d;
+ int size;
+
+ asd_extend_devctx_ocm(asd_ha);
+
+ asd_ha->hw_prof.ddb_ext = NULL;
+ if (max_devs <= asd_ha->hw_prof.max_ddbs || max_devs > 0xFFFF) {
+ max_devs = asd_ha->hw_prof.max_ddbs;
+ return 0;
+ }
+
+ size = (max_devs - asd_ha->hw_prof.max_ddbs + 1) * ASD_DDB_SIZE;
+
+ asd_ha->hw_prof.ddb_ext = asd_alloc_coherent(asd_ha, size, GFP_KERNEL);
+ if (!asd_ha->hw_prof.ddb_ext) {
+ asd_printk("couldn't allocate memory for %d devices\n",
+ max_devs);
+ max_devs = asd_ha->hw_prof.max_ddbs;
+ return -ENOMEM;
+ }
+ dma_handle = asd_ha->hw_prof.ddb_ext->dma_handle;
+ dma_addr = ALIGN((unsigned long) dma_handle, ASD_DDB_SIZE);
+ dma_addr -= asd_ha->hw_prof.max_ddbs * ASD_DDB_SIZE;
+ dma_handle = (dma_addr_t) dma_addr;
+ asd_write_reg_addr(asd_ha, DEVCTXBASE, dma_handle);
+ d = asd_read_reg_dword(asd_ha, CTXDOMAIN);
+ d &= ~4;
+ asd_write_reg_dword(asd_ha, CTXDOMAIN, d);
+
+ asd_ha->hw_prof.max_ddbs = max_devs;
+
+ return 0;
+}
+
+static int asd_extend_cmdctx(struct asd_ha_struct *asd_ha)
+{
+ dma_addr_t dma_handle;
+ unsigned long dma_addr;
+ u32 d;
+ int size;
+
+ asd_ha->hw_prof.scb_ext = NULL;
+ if (max_cmnds <= asd_ha->hw_prof.max_scbs || max_cmnds > 0xFFFF) {
+ max_cmnds = asd_ha->hw_prof.max_scbs;
+ return 0;
+ }
+
+ size = (max_cmnds - asd_ha->hw_prof.max_scbs + 1) * ASD_SCB_SIZE;
+
+ asd_ha->hw_prof.scb_ext = asd_alloc_coherent(asd_ha, size, GFP_KERNEL);
+ if (!asd_ha->hw_prof.scb_ext) {
+ asd_printk("couldn't allocate memory for %d commands\n",
+ max_cmnds);
+ max_cmnds = asd_ha->hw_prof.max_scbs;
+ return -ENOMEM;
+ }
+ dma_handle = asd_ha->hw_prof.scb_ext->dma_handle;
+ dma_addr = ALIGN((unsigned long) dma_handle, ASD_SCB_SIZE);
+ dma_addr -= asd_ha->hw_prof.max_scbs * ASD_SCB_SIZE;
+ dma_handle = (dma_addr_t) dma_addr;
+ asd_write_reg_addr(asd_ha, CMDCTXBASE, dma_handle);
+ d = asd_read_reg_dword(asd_ha, CTXDOMAIN);
+ d &= ~1;
+ asd_write_reg_dword(asd_ha, CTXDOMAIN, d);
+
+ asd_ha->hw_prof.max_scbs = max_cmnds;
+
+ return 0;
+}
+
+/**
+ * asd_init_ctxmem -- initialize context memory
+ * asd_ha: pointer to host adapter structure
+ *
+ * This function sets the maximum number of SCBs and
+ * DDBs which can be used by the sequencer. This is normally
+ * 512 and 128 respectively. If support for more SCBs or more DDBs
+ * is required then CMDCTXBASE, DEVCTXBASE and CTXDOMAIN are
+ * initialized here to extend context memory to point to host memory,
+ * thus allowing unlimited support for SCBs and DDBs -- only limited
+ * by host memory.
+ */
+static int asd_init_ctxmem(struct asd_ha_struct *asd_ha)
+{
+ int bitmap_bytes;
+
+ asd_get_max_scb_ddb(asd_ha);
+ asd_extend_devctx(asd_ha);
+ asd_extend_cmdctx(asd_ha);
+
+ /* The kernel wants bitmaps to be unsigned long sized. */
+ bitmap_bytes = (asd_ha->hw_prof.max_ddbs+7)/8;
+ bitmap_bytes = BITS_TO_LONGS(bitmap_bytes*8)*sizeof(unsigned long);
+ asd_ha->hw_prof.ddb_bitmap = kzalloc(bitmap_bytes, GFP_KERNEL);
+ if (!asd_ha->hw_prof.ddb_bitmap)
+ return -ENOMEM;
+ spin_lock_init(&asd_ha->hw_prof.ddb_lock);
+
+ return 0;
+}
+
+int asd_init_hw(struct asd_ha_struct *asd_ha)
+{
+ int err;
+ u32 v;
+
+ err = asd_init_sw(asd_ha);
+ if (err)
+ return err;
+
+ err = pci_read_config_dword(asd_ha->pcidev, PCIC_HSTPCIX_CNTRL, &v);
+ if (err) {
+ asd_printk("couldn't read PCIC_HSTPCIX_CNTRL of %s\n",
+ pci_name(asd_ha->pcidev));
+ return err;
+ }
+ pci_write_config_dword(asd_ha->pcidev, PCIC_HSTPCIX_CNTRL,
+ v | SC_TMR_DIS);
+ if (err) {
+ asd_printk("couldn't disable split completion timer of %s\n",
+ pci_name(asd_ha->pcidev));
+ return err;
+ }
+
+ err = asd_read_ocm(asd_ha);
+ if (err) {
+ asd_printk("couldn't read ocm(%d)\n", err);
+ /* While suspicios, it is not an error that we
+ * couldn't read the OCM. */
+ }
+
+ err = asd_read_flash(asd_ha);
+ if (err) {
+ asd_printk("couldn't read flash(%d)\n", err);
+ /* While suspicios, it is not an error that we
+ * couldn't read FLASH memory.
+ */
+ }
+
+ asd_init_ctxmem(asd_ha);
+
+ if (asd_get_user_sas_addr(asd_ha)) {
+ asd_printk("No SAS Address provided for %s\n",
+ pci_name(asd_ha->pcidev));
+ err = -ENODEV;
+ goto Out;
+ }
+
+ asd_propagate_sas_addr(asd_ha);
+
+ err = asd_init_phys(asd_ha);
+ if (err) {
+ asd_printk("couldn't initialize phys for %s\n",
+ pci_name(asd_ha->pcidev));
+ goto Out;
+ }
+
+ asd_init_ports(asd_ha);
+
+ err = asd_init_scbs(asd_ha);
+ if (err) {
+ asd_printk("couldn't initialize scbs for %s\n",
+ pci_name(asd_ha->pcidev));
+ goto Out;
+ }
+
+ err = asd_init_dl(asd_ha);
+ if (err) {
+ asd_printk("couldn't initialize the done list:%d\n",
+ err);
+ goto Out;
+ }
+
+ err = asd_init_escbs(asd_ha);
+ if (err) {
+ asd_printk("couldn't initialize escbs\n");
+ goto Out;
+ }
+
+ err = asd_init_chip(asd_ha);
+ if (err) {
+ asd_printk("couldn't init the chip\n");
+ goto Out;
+ }
+Out:
+ return err;
+}
+
+/* ---------- Chip reset ---------- */
+
+/**
+ * asd_chip_reset -- reset the host adapter, etc
+ * @asd_ha: pointer to host adapter structure of interest
+ *
+ * Called from the ISR. Hard reset the chip. Let everything
+ * timeout. This should be no different than hot-unplugging the
+ * host adapter. Once everything times out we'll init the chip with
+ * a call to asd_init_chip() and enable interrupts with asd_enable_ints().
+ * XXX finish.
+ */
+static void asd_chip_reset(struct asd_ha_struct *asd_ha)
+{
+ struct sas_ha_struct *sas_ha = &asd_ha->sas_ha;
+
+ ASD_DPRINTK("chip reset for %s\n", pci_name(asd_ha->pcidev));
+ asd_chip_hardrst(asd_ha);
+ sas_ha->notify_ha_event(sas_ha, HAE_RESET);
+}
+
+/* ---------- Done List Routines ---------- */
+
+static void asd_dl_tasklet_handler(unsigned long data)
+{
+ struct asd_ha_struct *asd_ha = (struct asd_ha_struct *) data;
+ struct asd_seq_data *seq = &asd_ha->seq;
+ unsigned long flags;
+
+ while (1) {
+ struct done_list_struct *dl = &seq->dl[seq->dl_next];
+ struct asd_ascb *ascb;
+
+ if ((dl->toggle & DL_TOGGLE_MASK) != seq->dl_toggle)
+ break;
+
+ /* find the aSCB */
+ spin_lock_irqsave(&seq->tc_index_lock, flags);
+ ascb = asd_tc_index_find(seq, (int)le16_to_cpu(dl->index));
+ spin_unlock_irqrestore(&seq->tc_index_lock, flags);
+ if (unlikely(!ascb)) {
+ ASD_DPRINTK("BUG:sequencer:dl:no ascb?!\n");
+ goto next_1;
+ } else if (ascb->scb->header.opcode == EMPTY_SCB) {
+ goto out;
+ } else if (!ascb->uldd_timer && !del_timer(&ascb->timer)) {
+ goto next_1;
+ }
+ spin_lock_irqsave(&seq->pend_q_lock, flags);
+ list_del_init(&ascb->list);
+ seq->pending--;
+ spin_unlock_irqrestore(&seq->pend_q_lock, flags);
+ out:
+ ascb->tasklet_complete(ascb, dl);
+
+ next_1:
+ seq->dl_next = (seq->dl_next + 1) & (ASD_DL_SIZE-1);
+ if (!seq->dl_next)
+ seq->dl_toggle ^= DL_TOGGLE_MASK;
+ }
+}
+
+/* ---------- Interrupt Service Routines ---------- */
+
+/**
+ * asd_process_donelist_isr -- schedule processing of done list entries
+ * @asd_ha: pointer to host adapter structure
+ */
+static void asd_process_donelist_isr(struct asd_ha_struct *asd_ha)
+{
+ tasklet_schedule(&asd_ha->seq.dl_tasklet);
+}
+
+/**
+ * asd_com_sas_isr -- process device communication interrupt (COMINT)
+ * @asd_ha: pointer to host adapter structure
+ */
+static void asd_com_sas_isr(struct asd_ha_struct *asd_ha)
+{
+ u32 comstat = asd_read_reg_dword(asd_ha, COMSTAT);
+
+ /* clear COMSTAT int */
+ asd_write_reg_dword(asd_ha, COMSTAT, 0xFFFFFFFF);
+
+ if (comstat & CSBUFPERR) {
+ asd_printk("%s: command/status buffer dma parity error\n",
+ pci_name(asd_ha->pcidev));
+ } else if (comstat & CSERR) {
+ int i;
+ u32 dmaerr = asd_read_reg_dword(asd_ha, DMAERR);
+ dmaerr &= 0xFF;
+ asd_printk("%s: command/status dma error, DMAERR: 0x%02x, "
+ "CSDMAADR: 0x%04x, CSDMAADR+4: 0x%04x\n",
+ pci_name(asd_ha->pcidev),
+ dmaerr,
+ asd_read_reg_dword(asd_ha, CSDMAADR),
+ asd_read_reg_dword(asd_ha, CSDMAADR+4));
+ asd_printk("CSBUFFER:\n");
+ for (i = 0; i < 8; i++) {
+ asd_printk("%08x %08x %08x %08x\n",
+ asd_read_reg_dword(asd_ha, CSBUFFER),
+ asd_read_reg_dword(asd_ha, CSBUFFER+4),
+ asd_read_reg_dword(asd_ha, CSBUFFER+8),
+ asd_read_reg_dword(asd_ha, CSBUFFER+12));
+ }
+ asd_dump_seq_state(asd_ha, 0);
+ } else if (comstat & OVLYERR) {
+ u32 dmaerr = asd_read_reg_dword(asd_ha, DMAERR);
+ dmaerr = (dmaerr >> 8) & 0xFF;
+ asd_printk("%s: overlay dma error:0x%x\n",
+ pci_name(asd_ha->pcidev),
+ dmaerr);
+ }
+ asd_chip_reset(asd_ha);
+}
+
+static void asd_arp2_err(struct asd_ha_struct *asd_ha, u32 dchstatus)
+{
+ static const char *halt_code[256] = {
+ "UNEXPECTED_INTERRUPT0",
+ "UNEXPECTED_INTERRUPT1",
+ "UNEXPECTED_INTERRUPT2",
+ "UNEXPECTED_INTERRUPT3",
+ "UNEXPECTED_INTERRUPT4",
+ "UNEXPECTED_INTERRUPT5",
+ "UNEXPECTED_INTERRUPT6",
+ "UNEXPECTED_INTERRUPT7",
+ "UNEXPECTED_INTERRUPT8",
+ "UNEXPECTED_INTERRUPT9",
+ "UNEXPECTED_INTERRUPT10",
+ [11 ... 19] = "unknown[11,19]",
+ "NO_FREE_SCB_AVAILABLE",
+ "INVALID_SCB_OPCODE",
+ "INVALID_MBX_OPCODE",
+ "INVALID_ATA_STATE",
+ "ATA_QUEUE_FULL",
+ "ATA_TAG_TABLE_FAULT",
+ "ATA_TAG_MASK_FAULT",
+ "BAD_LINK_QUEUE_STATE",
+ "DMA2CHIM_QUEUE_ERROR",
+ "EMPTY_SCB_LIST_FULL",
+ "unknown[30]",
+ "IN_USE_SCB_ON_FREE_LIST",
+ "BAD_OPEN_WAIT_STATE",
+ "INVALID_STP_AFFILIATION",
+ "unknown[34]",
+ "EXEC_QUEUE_ERROR",
+ "TOO_MANY_EMPTIES_NEEDED",
+ "EMPTY_REQ_QUEUE_ERROR",
+ "Q_MONIRTT_MGMT_ERROR",
+ "TARGET_MODE_FLOW_ERROR",
+ "DEVICE_QUEUE_NOT_FOUND",
+ "START_IRTT_TIMER_ERROR",
+ "ABORT_TASK_ILLEGAL_REQ",
+ [43 ... 255] = "unknown[43,255]"
+ };
+
+ if (dchstatus & CSEQINT) {
+ u32 arp2int = asd_read_reg_dword(asd_ha, CARP2INT);
+
+ if (arp2int & (ARP2WAITTO|ARP2ILLOPC|ARP2PERR|ARP2CIOPERR)) {
+ asd_printk("%s: CSEQ arp2int:0x%x\n",
+ pci_name(asd_ha->pcidev),
+ arp2int);
+ } else if (arp2int & ARP2HALTC)
+ asd_printk("%s: CSEQ halted: %s\n",
+ pci_name(asd_ha->pcidev),
+ halt_code[(arp2int>>16)&0xFF]);
+ else
+ asd_printk("%s: CARP2INT:0x%x\n",
+ pci_name(asd_ha->pcidev),
+ arp2int);
+ }
+ if (dchstatus & LSEQINT_MASK) {
+ int lseq;
+ u8 lseq_mask = dchstatus & LSEQINT_MASK;
+
+ for_each_sequencer(lseq_mask, lseq_mask, lseq) {
+ u32 arp2int = asd_read_reg_dword(asd_ha,
+ LmARP2INT(lseq));
+ if (arp2int & (ARP2WAITTO | ARP2ILLOPC | ARP2PERR
+ | ARP2CIOPERR)) {
+ asd_printk("%s: LSEQ%d arp2int:0x%x\n",
+ pci_name(asd_ha->pcidev),
+ lseq, arp2int);
+ /* XXX we should only do lseq reset */
+ } else if (arp2int & ARP2HALTC)
+ asd_printk("%s: LSEQ%d halted: %s\n",
+ pci_name(asd_ha->pcidev),
+ lseq,halt_code[(arp2int>>16)&0xFF]);
+ else
+ asd_printk("%s: LSEQ%d ARP2INT:0x%x\n",
+ pci_name(asd_ha->pcidev), lseq,
+ arp2int);
+ }
+ }
+ asd_chip_reset(asd_ha);
+}
+
+/**
+ * asd_dch_sas_isr -- process device channel interrupt (DEVINT)
+ * @asd_ha: pointer to host adapter structure
+ */
+static void asd_dch_sas_isr(struct asd_ha_struct *asd_ha)
+{
+ u32 dchstatus = asd_read_reg_dword(asd_ha, DCHSTATUS);
+
+ if (dchstatus & CFIFTOERR) {
+ asd_printk("%s: CFIFTOERR\n", pci_name(asd_ha->pcidev));
+ asd_chip_reset(asd_ha);
+ } else
+ asd_arp2_err(asd_ha, dchstatus);
+}
+
+/**
+ * ads_rbi_exsi_isr -- process external system interface interrupt (INITERR)
+ * @asd_ha: pointer to host adapter structure
+ */
+static void asd_rbi_exsi_isr(struct asd_ha_struct *asd_ha)
+{
+ u32 stat0r = asd_read_reg_dword(asd_ha, ASISTAT0R);
+
+ if (!(stat0r & ASIERR)) {
+ asd_printk("hmm, EXSI interrupted but no error?\n");
+ return;
+ }
+
+ if (stat0r & ASIFMTERR) {
+ asd_printk("ASI SEEPROM format error for %s\n",
+ pci_name(asd_ha->pcidev));
+ } else if (stat0r & ASISEECHKERR) {
+ u32 stat1r = asd_read_reg_dword(asd_ha, ASISTAT1R);
+ asd_printk("ASI SEEPROM checksum 0x%x error for %s\n",
+ stat1r & CHECKSUM_MASK,
+ pci_name(asd_ha->pcidev));
+ } else {
+ u32 statr = asd_read_reg_dword(asd_ha, ASIERRSTATR);
+
+ if (!(statr & CPI2ASIMSTERR_MASK)) {
+ ASD_DPRINTK("hmm, ASIERR?\n");
+ return;
+ } else {
+ u32 addr = asd_read_reg_dword(asd_ha, ASIERRADDR);
+ u32 data = asd_read_reg_dword(asd_ha, ASIERRDATAR);
+
+ asd_printk("%s: CPI2 xfer err: addr: 0x%x, wdata: 0x%x, "
+ "count: 0x%x, byteen: 0x%x, targerr: 0x%x "
+ "master id: 0x%x, master err: 0x%x\n",
+ pci_name(asd_ha->pcidev),
+ addr, data,
+ (statr & CPI2ASIBYTECNT_MASK) >> 16,
+ (statr & CPI2ASIBYTEEN_MASK) >> 12,
+ (statr & CPI2ASITARGERR_MASK) >> 8,
+ (statr & CPI2ASITARGMID_MASK) >> 4,
+ (statr & CPI2ASIMSTERR_MASK));
+ }
+ }
+ asd_chip_reset(asd_ha);
+}
+
+/**
+ * asd_hst_pcix_isr -- process host interface interrupts
+ * @asd_ha: pointer to host adapter structure
+ *
+ * Asserted on PCIX errors: target abort, etc.
+ */
+static void asd_hst_pcix_isr(struct asd_ha_struct *asd_ha)
+{
+ u16 status;
+ u32 pcix_status;
+ u32 ecc_status;
+
+ pci_read_config_word(asd_ha->pcidev, PCI_STATUS, &status);
+ pci_read_config_dword(asd_ha->pcidev, PCIX_STATUS, &pcix_status);
+ pci_read_config_dword(asd_ha->pcidev, ECC_CTRL_STAT, &ecc_status);
+
+ if (status & PCI_STATUS_DETECTED_PARITY)
+ asd_printk("parity error for %s\n", pci_name(asd_ha->pcidev));
+ else if (status & PCI_STATUS_REC_MASTER_ABORT)
+ asd_printk("master abort for %s\n", pci_name(asd_ha->pcidev));
+ else if (status & PCI_STATUS_REC_TARGET_ABORT)
+ asd_printk("target abort for %s\n", pci_name(asd_ha->pcidev));
+ else if (status & PCI_STATUS_PARITY)
+ asd_printk("data parity for %s\n", pci_name(asd_ha->pcidev));
+ else if (pcix_status & RCV_SCE) {
+ asd_printk("received split completion error for %s\n",
+ pci_name(asd_ha->pcidev));
+ pci_write_config_dword(asd_ha->pcidev,PCIX_STATUS,pcix_status);
+ /* XXX: Abort task? */
+ return;
+ } else if (pcix_status & UNEXP_SC) {
+ asd_printk("unexpected split completion for %s\n",
+ pci_name(asd_ha->pcidev));
+ pci_write_config_dword(asd_ha->pcidev,PCIX_STATUS,pcix_status);
+ /* ignore */
+ return;
+ } else if (pcix_status & SC_DISCARD)
+ asd_printk("split completion discarded for %s\n",
+ pci_name(asd_ha->pcidev));
+ else if (ecc_status & UNCOR_ECCERR)
+ asd_printk("uncorrectable ECC error for %s\n",
+ pci_name(asd_ha->pcidev));
+ asd_chip_reset(asd_ha);
+}
+
+/**
+ * asd_hw_isr -- host adapter interrupt service routine
+ * @irq: ignored
+ * @dev_id: pointer to host adapter structure
+ *
+ * The ISR processes done list entries and level 3 error handling.
+ */
+irqreturn_t asd_hw_isr(int irq, void *dev_id)
+{
+ struct asd_ha_struct *asd_ha = dev_id;
+ u32 chimint = asd_read_reg_dword(asd_ha, CHIMINT);
+
+ if (!chimint)
+ return IRQ_NONE;
+
+ asd_write_reg_dword(asd_ha, CHIMINT, chimint);
+ (void) asd_read_reg_dword(asd_ha, CHIMINT);
+
+ if (chimint & DLAVAIL)
+ asd_process_donelist_isr(asd_ha);
+ if (chimint & COMINT)
+ asd_com_sas_isr(asd_ha);
+ if (chimint & DEVINT)
+ asd_dch_sas_isr(asd_ha);
+ if (chimint & INITERR)
+ asd_rbi_exsi_isr(asd_ha);
+ if (chimint & HOSTERR)
+ asd_hst_pcix_isr(asd_ha);
+
+ return IRQ_HANDLED;
+}
+
+/* ---------- SCB handling ---------- */
+
+static struct asd_ascb *asd_ascb_alloc(struct asd_ha_struct *asd_ha,
+ gfp_t gfp_flags)
+{
+ extern struct kmem_cache *asd_ascb_cache;
+ struct asd_seq_data *seq = &asd_ha->seq;
+ struct asd_ascb *ascb;
+ unsigned long flags;
+
+ ascb = kmem_cache_zalloc(asd_ascb_cache, gfp_flags);
+
+ if (ascb) {
+ ascb->dma_scb.size = sizeof(struct scb);
+ ascb->dma_scb.vaddr = dma_pool_alloc(asd_ha->scb_pool,
+ gfp_flags,
+ &ascb->dma_scb.dma_handle);
+ if (!ascb->dma_scb.vaddr) {
+ kmem_cache_free(asd_ascb_cache, ascb);
+ return NULL;
+ }
+ memset(ascb->dma_scb.vaddr, 0, sizeof(struct scb));
+ asd_init_ascb(asd_ha, ascb);
+
+ spin_lock_irqsave(&seq->tc_index_lock, flags);
+ ascb->tc_index = asd_tc_index_get(seq, ascb);
+ spin_unlock_irqrestore(&seq->tc_index_lock, flags);
+ if (ascb->tc_index == -1)
+ goto undo;
+
+ ascb->scb->header.index = cpu_to_le16((u16)ascb->tc_index);
+ }
+
+ return ascb;
+undo:
+ dma_pool_free(asd_ha->scb_pool, ascb->dma_scb.vaddr,
+ ascb->dma_scb.dma_handle);
+ kmem_cache_free(asd_ascb_cache, ascb);
+ ASD_DPRINTK("no index for ascb\n");
+ return NULL;
+}
+
+/**
+ * asd_ascb_alloc_list -- allocate a list of aSCBs
+ * @asd_ha: pointer to host adapter structure
+ * @num: pointer to integer number of aSCBs
+ * @gfp_flags: GFP_ flags.
+ *
+ * This is the only function which is used to allocate aSCBs.
+ * It can allocate one or many. If more than one, then they form
+ * a linked list in two ways: by their list field of the ascb struct
+ * and by the next_scb field of the scb_header.
+ *
+ * Returns NULL if no memory was available, else pointer to a list
+ * of ascbs. When this function returns, @num would be the number
+ * of SCBs which were not able to be allocated, 0 if all requested
+ * were able to be allocated.
+ */
+struct asd_ascb *asd_ascb_alloc_list(struct asd_ha_struct
+ *asd_ha, int *num,
+ gfp_t gfp_flags)
+{
+ struct asd_ascb *first = NULL;
+
+ for ( ; *num > 0; --*num) {
+ struct asd_ascb *ascb = asd_ascb_alloc(asd_ha, gfp_flags);
+
+ if (!ascb)
+ break;
+ else if (!first)
+ first = ascb;
+ else {
+ struct asd_ascb *last = list_entry(first->list.prev,
+ struct asd_ascb,
+ list);
+ list_add_tail(&ascb->list, &first->list);
+ last->scb->header.next_scb =
+ cpu_to_le64(((u64)ascb->dma_scb.dma_handle));
+ }
+ }
+
+ return first;
+}
+
+/**
+ * asd_swap_head_scb -- swap the head scb
+ * @asd_ha: pointer to host adapter structure
+ * @ascb: pointer to the head of an ascb list
+ *
+ * The sequencer knows the DMA address of the next SCB to be DMAed to
+ * the host adapter, from initialization or from the last list DMAed.
+ * seq->next_scb keeps the address of this SCB. The sequencer will
+ * DMA to the host adapter this list of SCBs. But the head (first
+ * element) of this list is not known to the sequencer. Here we swap
+ * the head of the list with the known SCB (memcpy()).
+ * Only one memcpy() is required per list so it is in our interest
+ * to keep the list of SCB as long as possible so that the ratio
+ * of number of memcpy calls to the number of SCB DMA-ed is as small
+ * as possible.
+ *
+ * LOCKING: called with the pending list lock held.
+ */
+static void asd_swap_head_scb(struct asd_ha_struct *asd_ha,
+ struct asd_ascb *ascb)
+{
+ struct asd_seq_data *seq = &asd_ha->seq;
+ struct asd_ascb *last = list_entry(ascb->list.prev,
+ struct asd_ascb,
+ list);
+ struct asd_dma_tok t = ascb->dma_scb;
+
+ memcpy(seq->next_scb.vaddr, ascb->scb, sizeof(*ascb->scb));
+ ascb->dma_scb = seq->next_scb;
+ ascb->scb = ascb->dma_scb.vaddr;
+ seq->next_scb = t;
+ last->scb->header.next_scb =
+ cpu_to_le64(((u64)seq->next_scb.dma_handle));
+}
+
+/**
+ * asd_start_timers -- (add and) start timers of SCBs
+ * @list: pointer to struct list_head of the scbs
+ * @to: timeout in jiffies
+ *
+ * If an SCB in the @list has no timer function, assign the default
+ * one, then start the timer of the SCB. This function is
+ * intended to be called from asd_post_ascb_list(), just prior to
+ * posting the SCBs to the sequencer.
+ */
+static void asd_start_scb_timers(struct list_head *list)
+{
+ struct asd_ascb *ascb;
+ list_for_each_entry(ascb, list, list) {
+ if (!ascb->uldd_timer) {
+ ascb->timer.data = (unsigned long) ascb;
+ ascb->timer.function = asd_ascb_timedout;
+ ascb->timer.expires = jiffies + AIC94XX_SCB_TIMEOUT;
+ add_timer(&ascb->timer);
+ }
+ }
+}
+
+/**
+ * asd_post_ascb_list -- post a list of 1 or more aSCBs to the host adapter
+ * @asd_ha: pointer to a host adapter structure
+ * @ascb: pointer to the first aSCB in the list
+ * @num: number of aSCBs in the list (to be posted)
+ *
+ * See queueing comment in asd_post_escb_list().
+ *
+ * Additional note on queuing: In order to minimize the ratio of memcpy()
+ * to the number of ascbs sent, we try to batch-send as many ascbs as possible
+ * in one go.
+ * Two cases are possible:
+ * A) can_queue >= num,
+ * B) can_queue < num.
+ * Case A: we can send the whole batch at once. Increment "pending"
+ * in the beginning of this function, when it is checked, in order to
+ * eliminate races when this function is called by multiple processes.
+ * Case B: should never happen.
+ */
+int asd_post_ascb_list(struct asd_ha_struct *asd_ha, struct asd_ascb *ascb,
+ int num)
+{
+ unsigned long flags;
+ LIST_HEAD(list);
+ int can_queue;
+
+ spin_lock_irqsave(&asd_ha->seq.pend_q_lock, flags);
+ can_queue = asd_ha->hw_prof.max_scbs - asd_ha->seq.pending;
+ if (can_queue >= num)
+ asd_ha->seq.pending += num;
+ else
+ can_queue = 0;
+
+ if (!can_queue) {
+ spin_unlock_irqrestore(&asd_ha->seq.pend_q_lock, flags);
+ asd_printk("%s: scb queue full\n", pci_name(asd_ha->pcidev));
+ return -SAS_QUEUE_FULL;
+ }
+
+ asd_swap_head_scb(asd_ha, ascb);
+
+ __list_add(&list, ascb->list.prev, &ascb->list);
+
+ asd_start_scb_timers(&list);
+
+ asd_ha->seq.scbpro += num;
+ list_splice_init(&list, asd_ha->seq.pend_q.prev);
+ asd_write_reg_dword(asd_ha, SCBPRO, (u32)asd_ha->seq.scbpro);
+ spin_unlock_irqrestore(&asd_ha->seq.pend_q_lock, flags);
+
+ return 0;
+}
+
+/**
+ * asd_post_escb_list -- post a list of 1 or more empty scb
+ * @asd_ha: pointer to a host adapter structure
+ * @ascb: pointer to the first empty SCB in the list
+ * @num: number of aSCBs in the list (to be posted)
+ *
+ * This is essentially the same as asd_post_ascb_list, but we do not
+ * increment pending, add those to the pending list or get indexes.
+ * See asd_init_escbs() and asd_init_post_escbs().
+ *
+ * Since sending a list of ascbs is a superset of sending a single
+ * ascb, this function exists to generalize this. More specifically,
+ * when sending a list of those, we want to do only a _single_
+ * memcpy() at swap head, as opposed to for each ascb sent (in the
+ * case of sending them one by one). That is, we want to minimize the
+ * ratio of memcpy() operations to the number of ascbs sent. The same
+ * logic applies to asd_post_ascb_list().
+ */
+int asd_post_escb_list(struct asd_ha_struct *asd_ha, struct asd_ascb *ascb,
+ int num)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&asd_ha->seq.pend_q_lock, flags);
+ asd_swap_head_scb(asd_ha, ascb);
+ asd_ha->seq.scbpro += num;
+ asd_write_reg_dword(asd_ha, SCBPRO, (u32)asd_ha->seq.scbpro);
+ spin_unlock_irqrestore(&asd_ha->seq.pend_q_lock, flags);
+
+ return 0;
+}
+
+/* ---------- LED ---------- */
+
+/**
+ * asd_turn_led -- turn on/off an LED
+ * @asd_ha: pointer to host adapter structure
+ * @phy_id: the PHY id whose LED we want to manupulate
+ * @op: 1 to turn on, 0 to turn off
+ */
+void asd_turn_led(struct asd_ha_struct *asd_ha, int phy_id, int op)
+{
+ if (phy_id < ASD_MAX_PHYS) {
+ u32 v = asd_read_reg_dword(asd_ha, LmCONTROL(phy_id));
+ if (op)
+ v |= LEDPOL;
+ else
+ v &= ~LEDPOL;
+ asd_write_reg_dword(asd_ha, LmCONTROL(phy_id), v);
+ }
+}
+
+/**
+ * asd_control_led -- enable/disable an LED on the board
+ * @asd_ha: pointer to host adapter structure
+ * @phy_id: integer, the phy id
+ * @op: integer, 1 to enable, 0 to disable the LED
+ *
+ * First we output enable the LED, then we set the source
+ * to be an external module.
+ */
+void asd_control_led(struct asd_ha_struct *asd_ha, int phy_id, int op)
+{
+ if (phy_id < ASD_MAX_PHYS) {
+ u32 v;
+
+ v = asd_read_reg_dword(asd_ha, GPIOOER);
+ if (op)
+ v |= (1 << phy_id);
+ else
+ v &= ~(1 << phy_id);
+ asd_write_reg_dword(asd_ha, GPIOOER, v);
+
+ v = asd_read_reg_dword(asd_ha, GPIOCNFGR);
+ if (op)
+ v |= (1 << phy_id);
+ else
+ v &= ~(1 << phy_id);
+ asd_write_reg_dword(asd_ha, GPIOCNFGR, v);
+ }
+}
+
+/* ---------- PHY enable ---------- */
+
+static int asd_enable_phy(struct asd_ha_struct *asd_ha, int phy_id)
+{
+ struct asd_phy *phy = &asd_ha->phys[phy_id];
+
+ asd_write_reg_byte(asd_ha, LmSEQ_OOB_REG(phy_id, INT_ENABLE_2), 0);
+ asd_write_reg_byte(asd_ha, LmSEQ_OOB_REG(phy_id, HOT_PLUG_DELAY),
+ HOTPLUG_DELAY_TIMEOUT);
+
+ /* Get defaults from manuf. sector */
+ /* XXX we need defaults for those in case MS is broken. */
+ asd_write_reg_byte(asd_ha, LmSEQ_OOB_REG(phy_id, PHY_CONTROL_0),
+ phy->phy_desc->phy_control_0);
+ asd_write_reg_byte(asd_ha, LmSEQ_OOB_REG(phy_id, PHY_CONTROL_1),
+ phy->phy_desc->phy_control_1);
+ asd_write_reg_byte(asd_ha, LmSEQ_OOB_REG(phy_id, PHY_CONTROL_2),
+ phy->phy_desc->phy_control_2);
+ asd_write_reg_byte(asd_ha, LmSEQ_OOB_REG(phy_id, PHY_CONTROL_3),
+ phy->phy_desc->phy_control_3);
+
+ asd_write_reg_dword(asd_ha, LmSEQ_TEN_MS_COMINIT_TIMEOUT(phy_id),
+ ASD_COMINIT_TIMEOUT);
+
+ asd_write_reg_addr(asd_ha, LmSEQ_TX_ID_ADDR_FRAME(phy_id),
+ phy->id_frm_tok->dma_handle);
+
+ asd_control_led(asd_ha, phy_id, 1);
+
+ return 0;
+}
+
+int asd_enable_phys(struct asd_ha_struct *asd_ha, const u8 phy_mask)
+{
+ u8 phy_m;
+ u8 i;
+ int num = 0, k;
+ struct asd_ascb *ascb;
+ struct asd_ascb *ascb_list;
+
+ if (!phy_mask) {
+ asd_printk("%s called with phy_mask of 0!?\n", __func__);
+ return 0;
+ }
+
+ for_each_phy(phy_mask, phy_m, i) {
+ num++;
+ asd_enable_phy(asd_ha, i);
+ }
+
+ k = num;
+ ascb_list = asd_ascb_alloc_list(asd_ha, &k, GFP_KERNEL);
+ if (!ascb_list) {
+ asd_printk("no memory for control phy ascb list\n");
+ return -ENOMEM;
+ }
+ num -= k;
+
+ ascb = ascb_list;
+ for_each_phy(phy_mask, phy_m, i) {
+ asd_build_control_phy(ascb, i, ENABLE_PHY);
+ ascb = list_entry(ascb->list.next, struct asd_ascb, list);
+ }
+ ASD_DPRINTK("posting %d control phy scbs\n", num);
+ k = asd_post_ascb_list(asd_ha, ascb_list, num);
+ if (k)
+ asd_ascb_free_list(ascb_list);
+
+ return k;
+}
diff --git a/drivers/scsi/aic94xx/aic94xx_hwi.h b/drivers/scsi/aic94xx/aic94xx_hwi.h
new file mode 100644
index 000000000..8c1c28239
--- /dev/null
+++ b/drivers/scsi/aic94xx/aic94xx_hwi.h
@@ -0,0 +1,398 @@
+/*
+ * Aic94xx SAS/SATA driver hardware interface header file.
+ *
+ * Copyright (C) 2005 Adaptec, Inc. All rights reserved.
+ * Copyright (C) 2005 Luben Tuikov <luben_tuikov@adaptec.com>
+ *
+ * This file is licensed under GPLv2.
+ *
+ * This file is part of the aic94xx driver.
+ *
+ * The aic94xx driver is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; version 2 of the
+ * License.
+ *
+ * The aic94xx driver is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with the aic94xx driver; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
+ *
+ */
+
+#ifndef _AIC94XX_HWI_H_
+#define _AIC94XX_HWI_H_
+
+#include <linux/interrupt.h>
+#include <linux/pci.h>
+#include <linux/dma-mapping.h>
+
+#include <scsi/libsas.h>
+
+#include "aic94xx.h"
+#include "aic94xx_sas.h"
+
+/* Define ASD_MAX_PHYS to the maximum phys ever. Currently 8. */
+#define ASD_MAX_PHYS 8
+#define ASD_PCBA_SN_SIZE 12
+
+struct asd_ha_addrspace {
+ void __iomem *addr;
+ unsigned long start; /* pci resource start */
+ unsigned long len; /* pci resource len */
+ unsigned long flags; /* pci resource flags */
+
+ /* addresses internal to the host adapter */
+ u32 swa_base; /* mmspace 1 (MBAR1) uses this only */
+ u32 swb_base;
+ u32 swc_base;
+};
+
+struct bios_struct {
+ int present;
+ u8 maj;
+ u8 min;
+ u32 bld;
+};
+
+struct unit_element_struct {
+ u16 num;
+ u16 size;
+ void *area;
+};
+
+struct flash_struct {
+ u32 bar;
+ int present;
+ int wide;
+ u8 manuf;
+ u8 dev_id;
+ u8 sec_prot;
+ u8 method;
+
+ u32 dir_offs;
+};
+
+struct asd_phy_desc {
+ /* From CTRL-A settings, then set to what is appropriate */
+ u8 sas_addr[SAS_ADDR_SIZE];
+ u8 max_sas_lrate;
+ u8 min_sas_lrate;
+ u8 max_sata_lrate;
+ u8 min_sata_lrate;
+ u8 flags;
+#define ASD_CRC_DIS 1
+#define ASD_SATA_SPINUP_HOLD 2
+
+ u8 phy_control_0; /* mode 5 reg 0x160 */
+ u8 phy_control_1; /* mode 5 reg 0x161 */
+ u8 phy_control_2; /* mode 5 reg 0x162 */
+ u8 phy_control_3; /* mode 5 reg 0x163 */
+};
+
+struct asd_dma_tok {
+ void *vaddr;
+ dma_addr_t dma_handle;
+ size_t size;
+};
+
+struct hw_profile {
+ struct bios_struct bios;
+ struct unit_element_struct ue;
+ struct flash_struct flash;
+
+ u8 sas_addr[SAS_ADDR_SIZE];
+ char pcba_sn[ASD_PCBA_SN_SIZE+1];
+
+ u8 enabled_phys; /* mask of enabled phys */
+ struct asd_phy_desc phy_desc[ASD_MAX_PHYS];
+ u32 max_scbs; /* absolute sequencer scb queue size */
+ struct asd_dma_tok *scb_ext;
+ u32 max_ddbs;
+ struct asd_dma_tok *ddb_ext;
+
+ spinlock_t ddb_lock;
+ void *ddb_bitmap;
+
+ int num_phys; /* ENABLEABLE */
+ int max_phys; /* REPORTED + ENABLEABLE */
+
+ unsigned addr_range; /* max # of addrs; max # of possible ports */
+ unsigned port_name_base;
+ unsigned dev_name_base;
+ unsigned sata_name_base;
+};
+
+struct asd_ascb {
+ struct list_head list;
+ struct asd_ha_struct *ha;
+
+ struct scb *scb; /* equals dma_scb->vaddr */
+ struct asd_dma_tok dma_scb;
+ struct asd_dma_tok *sg_arr;
+
+ void (*tasklet_complete)(struct asd_ascb *, struct done_list_struct *);
+ u8 uldd_timer:1;
+
+ /* internally generated command */
+ struct timer_list timer;
+ struct completion *completion;
+ u8 tag_valid:1;
+ __be16 tag; /* error recovery only */
+
+ /* If this is an Empty SCB, index of first edb in seq->edb_arr. */
+ int edb_index;
+
+ /* Used by the timer timeout function. */
+ int tc_index;
+
+ void *uldd_task;
+};
+
+#define ASD_DL_SIZE_BITS 0x8
+#define ASD_DL_SIZE (1<<(2+ASD_DL_SIZE_BITS))
+#define ASD_DEF_DL_TOGGLE 0x01
+
+struct asd_seq_data {
+ spinlock_t pend_q_lock;
+ u16 scbpro;
+ int pending;
+ struct list_head pend_q;
+ int can_queue; /* per adapter */
+ struct asd_dma_tok next_scb; /* next scb to be delivered to CSEQ */
+
+ spinlock_t tc_index_lock;
+ void **tc_index_array;
+ void *tc_index_bitmap;
+ int tc_index_bitmap_bits;
+
+ struct tasklet_struct dl_tasklet;
+ struct done_list_struct *dl; /* array of done list entries, equals */
+ struct asd_dma_tok *actual_dl; /* actual_dl->vaddr */
+ int dl_toggle;
+ int dl_next;
+
+ int num_edbs;
+ struct asd_dma_tok **edb_arr;
+ int num_escbs;
+ struct asd_ascb **escb_arr; /* array of pointers to escbs */
+};
+
+/* This is an internal port structure. These are used to get accurate
+ * phy_mask for updating DDB 0.
+ */
+struct asd_port {
+ u8 sas_addr[SAS_ADDR_SIZE];
+ u8 attached_sas_addr[SAS_ADDR_SIZE];
+ u32 phy_mask;
+ int num_phys;
+};
+
+/* This is the Host Adapter structure. It describes the hardware
+ * SAS adapter.
+ */
+struct asd_ha_struct {
+ struct pci_dev *pcidev;
+ const char *name;
+
+ struct sas_ha_struct sas_ha;
+
+ u8 revision_id;
+
+ int iospace;
+ spinlock_t iolock;
+ struct asd_ha_addrspace io_handle[2];
+
+ struct hw_profile hw_prof;
+
+ struct asd_phy phys[ASD_MAX_PHYS];
+ spinlock_t asd_ports_lock;
+ struct asd_port asd_ports[ASD_MAX_PHYS];
+ struct asd_sas_port ports[ASD_MAX_PHYS];
+
+ struct dma_pool *scb_pool;
+
+ struct asd_seq_data seq; /* sequencer related */
+ u32 bios_status;
+ const struct firmware *bios_image;
+};
+
+/* ---------- Common macros ---------- */
+
+#define ASD_BUSADDR_LO(__dma_handle) ((u32)(__dma_handle))
+#define ASD_BUSADDR_HI(__dma_handle) (((sizeof(dma_addr_t))==8) \
+ ? ((u32)((__dma_handle) >> 32)) \
+ : ((u32)0))
+
+#define dev_to_asd_ha(__dev) pci_get_drvdata(to_pci_dev(__dev))
+#define SCB_SITE_VALID(__site_no) (((__site_no) & 0xF0FF) != 0x00FF \
+ && ((__site_no) & 0xF0FF) > 0x001F)
+/* For each bit set in __lseq_mask, set __lseq to equal the bit
+ * position of the set bit and execute the statement following.
+ * __mc is the temporary mask, used as a mask "counter".
+ */
+#define for_each_sequencer(__lseq_mask, __mc, __lseq) \
+ for ((__mc)=(__lseq_mask),(__lseq)=0;(__mc)!=0;(__lseq++),(__mc)>>=1)\
+ if (((__mc) & 1))
+#define for_each_phy(__lseq_mask, __mc, __lseq) \
+ for ((__mc)=(__lseq_mask),(__lseq)=0;(__mc)!=0;(__lseq++),(__mc)>>=1)\
+ if (((__mc) & 1))
+
+#define PHY_ENABLED(_HA, _I) ((_HA)->hw_prof.enabled_phys & (1<<(_I)))
+
+/* ---------- DMA allocs ---------- */
+
+static inline struct asd_dma_tok *asd_dmatok_alloc(gfp_t flags)
+{
+ return kmem_cache_alloc(asd_dma_token_cache, flags);
+}
+
+static inline void asd_dmatok_free(struct asd_dma_tok *token)
+{
+ kmem_cache_free(asd_dma_token_cache, token);
+}
+
+static inline struct asd_dma_tok *asd_alloc_coherent(struct asd_ha_struct *
+ asd_ha, size_t size,
+ gfp_t flags)
+{
+ struct asd_dma_tok *token = asd_dmatok_alloc(flags);
+ if (token) {
+ token->size = size;
+ token->vaddr = dma_alloc_coherent(&asd_ha->pcidev->dev,
+ token->size,
+ &token->dma_handle,
+ flags);
+ if (!token->vaddr) {
+ asd_dmatok_free(token);
+ token = NULL;
+ }
+ }
+ return token;
+}
+
+static inline void asd_free_coherent(struct asd_ha_struct *asd_ha,
+ struct asd_dma_tok *token)
+{
+ if (token) {
+ dma_free_coherent(&asd_ha->pcidev->dev, token->size,
+ token->vaddr, token->dma_handle);
+ asd_dmatok_free(token);
+ }
+}
+
+static inline void asd_init_ascb(struct asd_ha_struct *asd_ha,
+ struct asd_ascb *ascb)
+{
+ INIT_LIST_HEAD(&ascb->list);
+ ascb->scb = ascb->dma_scb.vaddr;
+ ascb->ha = asd_ha;
+ ascb->timer.function = NULL;
+ init_timer(&ascb->timer);
+ ascb->tc_index = -1;
+}
+
+/* Must be called with the tc_index_lock held!
+ */
+static inline void asd_tc_index_release(struct asd_seq_data *seq, int index)
+{
+ seq->tc_index_array[index] = NULL;
+ clear_bit(index, seq->tc_index_bitmap);
+}
+
+/* Must be called with the tc_index_lock held!
+ */
+static inline int asd_tc_index_get(struct asd_seq_data *seq, void *ptr)
+{
+ int index;
+
+ index = find_first_zero_bit(seq->tc_index_bitmap,
+ seq->tc_index_bitmap_bits);
+ if (index == seq->tc_index_bitmap_bits)
+ return -1;
+
+ seq->tc_index_array[index] = ptr;
+ set_bit(index, seq->tc_index_bitmap);
+
+ return index;
+}
+
+/* Must be called with the tc_index_lock held!
+ */
+static inline void *asd_tc_index_find(struct asd_seq_data *seq, int index)
+{
+ return seq->tc_index_array[index];
+}
+
+/**
+ * asd_ascb_free -- free a single aSCB after is has completed
+ * @ascb: pointer to the aSCB of interest
+ *
+ * This frees an aSCB after it has been executed/completed by
+ * the sequencer.
+ */
+static inline void asd_ascb_free(struct asd_ascb *ascb)
+{
+ if (ascb) {
+ struct asd_ha_struct *asd_ha = ascb->ha;
+ unsigned long flags;
+
+ BUG_ON(!list_empty(&ascb->list));
+ spin_lock_irqsave(&ascb->ha->seq.tc_index_lock, flags);
+ asd_tc_index_release(&ascb->ha->seq, ascb->tc_index);
+ spin_unlock_irqrestore(&ascb->ha->seq.tc_index_lock, flags);
+ dma_pool_free(asd_ha->scb_pool, ascb->dma_scb.vaddr,
+ ascb->dma_scb.dma_handle);
+ kmem_cache_free(asd_ascb_cache, ascb);
+ }
+}
+
+/**
+ * asd_ascb_list_free -- free a list of ascbs
+ * @ascb_list: a list of ascbs
+ *
+ * This function will free a list of ascbs allocated by asd_ascb_alloc_list.
+ * It is used when say the scb queueing function returned QUEUE_FULL,
+ * and we do not need the ascbs any more.
+ */
+static inline void asd_ascb_free_list(struct asd_ascb *ascb_list)
+{
+ LIST_HEAD(list);
+ struct list_head *n, *pos;
+
+ __list_add(&list, ascb_list->list.prev, &ascb_list->list);
+ list_for_each_safe(pos, n, &list) {
+ list_del_init(pos);
+ asd_ascb_free(list_entry(pos, struct asd_ascb, list));
+ }
+}
+
+/* ---------- Function declarations ---------- */
+
+int asd_init_hw(struct asd_ha_struct *asd_ha);
+irqreturn_t asd_hw_isr(int irq, void *dev_id);
+
+
+struct asd_ascb *asd_ascb_alloc_list(struct asd_ha_struct
+ *asd_ha, int *num,
+ gfp_t gfp_mask);
+
+int asd_post_ascb_list(struct asd_ha_struct *asd_ha, struct asd_ascb *ascb,
+ int num);
+int asd_post_escb_list(struct asd_ha_struct *asd_ha, struct asd_ascb *ascb,
+ int num);
+
+int asd_init_post_escbs(struct asd_ha_struct *asd_ha);
+void asd_build_control_phy(struct asd_ascb *ascb, int phy_id, u8 subfunc);
+void asd_control_led(struct asd_ha_struct *asd_ha, int phy_id, int op);
+void asd_turn_led(struct asd_ha_struct *asd_ha, int phy_id, int op);
+int asd_enable_phys(struct asd_ha_struct *asd_ha, const u8 phy_mask);
+
+void asd_ascb_timedout(unsigned long data);
+int asd_chip_hardrst(struct asd_ha_struct *asd_ha);
+
+#endif
diff --git a/drivers/scsi/aic94xx/aic94xx_init.c b/drivers/scsi/aic94xx/aic94xx_init.c
new file mode 100644
index 000000000..02a2512b7
--- /dev/null
+++ b/drivers/scsi/aic94xx/aic94xx_init.c
@@ -0,0 +1,1080 @@
+/*
+ * Aic94xx SAS/SATA driver initialization.
+ *
+ * Copyright (C) 2005 Adaptec, Inc. All rights reserved.
+ * Copyright (C) 2005 Luben Tuikov <luben_tuikov@adaptec.com>
+ *
+ * This file is licensed under GPLv2.
+ *
+ * This file is part of the aic94xx driver.
+ *
+ * The aic94xx driver is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; version 2 of the
+ * License.
+ *
+ * The aic94xx driver is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with the aic94xx driver; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
+ *
+ */
+
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/pci.h>
+#include <linux/delay.h>
+#include <linux/firmware.h>
+#include <linux/slab.h>
+
+#include <scsi/scsi_host.h>
+
+#include "aic94xx.h"
+#include "aic94xx_reg.h"
+#include "aic94xx_hwi.h"
+#include "aic94xx_seq.h"
+#include "aic94xx_sds.h"
+
+/* The format is "version.release.patchlevel" */
+#define ASD_DRIVER_VERSION "1.0.3"
+
+static int use_msi = 0;
+module_param_named(use_msi, use_msi, int, S_IRUGO);
+MODULE_PARM_DESC(use_msi, "\n"
+ "\tEnable(1) or disable(0) using PCI MSI.\n"
+ "\tDefault: 0");
+
+static struct scsi_transport_template *aic94xx_transport_template;
+static int asd_scan_finished(struct Scsi_Host *, unsigned long);
+static void asd_scan_start(struct Scsi_Host *);
+
+static struct scsi_host_template aic94xx_sht = {
+ .module = THIS_MODULE,
+ /* .name is initialized */
+ .name = "aic94xx",
+ .queuecommand = sas_queuecommand,
+ .target_alloc = sas_target_alloc,
+ .slave_configure = sas_slave_configure,
+ .scan_finished = asd_scan_finished,
+ .scan_start = asd_scan_start,
+ .change_queue_depth = sas_change_queue_depth,
+ .bios_param = sas_bios_param,
+ .can_queue = 1,
+ .cmd_per_lun = 1,
+ .this_id = -1,
+ .sg_tablesize = SG_ALL,
+ .max_sectors = SCSI_DEFAULT_MAX_SECTORS,
+ .use_clustering = ENABLE_CLUSTERING,
+ .eh_device_reset_handler = sas_eh_device_reset_handler,
+ .eh_bus_reset_handler = sas_eh_bus_reset_handler,
+ .target_destroy = sas_target_destroy,
+ .ioctl = sas_ioctl,
+ .use_blk_tags = 1,
+ .track_queue_depth = 1,
+};
+
+static int asd_map_memio(struct asd_ha_struct *asd_ha)
+{
+ int err, i;
+ struct asd_ha_addrspace *io_handle;
+
+ asd_ha->iospace = 0;
+ for (i = 0; i < 3; i += 2) {
+ io_handle = &asd_ha->io_handle[i==0?0:1];
+ io_handle->start = pci_resource_start(asd_ha->pcidev, i);
+ io_handle->len = pci_resource_len(asd_ha->pcidev, i);
+ io_handle->flags = pci_resource_flags(asd_ha->pcidev, i);
+ err = -ENODEV;
+ if (!io_handle->start || !io_handle->len) {
+ asd_printk("MBAR%d start or length for %s is 0.\n",
+ i==0?0:1, pci_name(asd_ha->pcidev));
+ goto Err;
+ }
+ err = pci_request_region(asd_ha->pcidev, i, ASD_DRIVER_NAME);
+ if (err) {
+ asd_printk("couldn't reserve memory region for %s\n",
+ pci_name(asd_ha->pcidev));
+ goto Err;
+ }
+ if (io_handle->flags & IORESOURCE_CACHEABLE)
+ io_handle->addr = ioremap(io_handle->start,
+ io_handle->len);
+ else
+ io_handle->addr = ioremap_nocache(io_handle->start,
+ io_handle->len);
+ if (!io_handle->addr) {
+ asd_printk("couldn't map MBAR%d of %s\n", i==0?0:1,
+ pci_name(asd_ha->pcidev));
+ goto Err_unreq;
+ }
+ }
+
+ return 0;
+Err_unreq:
+ pci_release_region(asd_ha->pcidev, i);
+Err:
+ if (i > 0) {
+ io_handle = &asd_ha->io_handle[0];
+ iounmap(io_handle->addr);
+ pci_release_region(asd_ha->pcidev, 0);
+ }
+ return err;
+}
+
+static void asd_unmap_memio(struct asd_ha_struct *asd_ha)
+{
+ struct asd_ha_addrspace *io_handle;
+
+ io_handle = &asd_ha->io_handle[1];
+ iounmap(io_handle->addr);
+ pci_release_region(asd_ha->pcidev, 2);
+
+ io_handle = &asd_ha->io_handle[0];
+ iounmap(io_handle->addr);
+ pci_release_region(asd_ha->pcidev, 0);
+}
+
+static int asd_map_ioport(struct asd_ha_struct *asd_ha)
+{
+ int i = PCI_IOBAR_OFFSET, err;
+ struct asd_ha_addrspace *io_handle = &asd_ha->io_handle[0];
+
+ asd_ha->iospace = 1;
+ io_handle->start = pci_resource_start(asd_ha->pcidev, i);
+ io_handle->len = pci_resource_len(asd_ha->pcidev, i);
+ io_handle->flags = pci_resource_flags(asd_ha->pcidev, i);
+ io_handle->addr = (void __iomem *) io_handle->start;
+ if (!io_handle->start || !io_handle->len) {
+ asd_printk("couldn't get IO ports for %s\n",
+ pci_name(asd_ha->pcidev));
+ return -ENODEV;
+ }
+ err = pci_request_region(asd_ha->pcidev, i, ASD_DRIVER_NAME);
+ if (err) {
+ asd_printk("couldn't reserve io space for %s\n",
+ pci_name(asd_ha->pcidev));
+ }
+
+ return err;
+}
+
+static void asd_unmap_ioport(struct asd_ha_struct *asd_ha)
+{
+ pci_release_region(asd_ha->pcidev, PCI_IOBAR_OFFSET);
+}
+
+static int asd_map_ha(struct asd_ha_struct *asd_ha)
+{
+ int err;
+ u16 cmd_reg;
+
+ err = pci_read_config_word(asd_ha->pcidev, PCI_COMMAND, &cmd_reg);
+ if (err) {
+ asd_printk("couldn't read command register of %s\n",
+ pci_name(asd_ha->pcidev));
+ goto Err;
+ }
+
+ err = -ENODEV;
+ if (cmd_reg & PCI_COMMAND_MEMORY) {
+ if ((err = asd_map_memio(asd_ha)))
+ goto Err;
+ } else if (cmd_reg & PCI_COMMAND_IO) {
+ if ((err = asd_map_ioport(asd_ha)))
+ goto Err;
+ asd_printk("%s ioport mapped -- upgrade your hardware\n",
+ pci_name(asd_ha->pcidev));
+ } else {
+ asd_printk("no proper device access to %s\n",
+ pci_name(asd_ha->pcidev));
+ goto Err;
+ }
+
+ return 0;
+Err:
+ return err;
+}
+
+static void asd_unmap_ha(struct asd_ha_struct *asd_ha)
+{
+ if (asd_ha->iospace)
+ asd_unmap_ioport(asd_ha);
+ else
+ asd_unmap_memio(asd_ha);
+}
+
+static const char *asd_dev_rev[30] = {
+ [0] = "A0",
+ [1] = "A1",
+ [8] = "B0",
+};
+
+static int asd_common_setup(struct asd_ha_struct *asd_ha)
+{
+ int err, i;
+
+ asd_ha->revision_id = asd_ha->pcidev->revision;
+
+ err = -ENODEV;
+ if (asd_ha->revision_id < AIC9410_DEV_REV_B0) {
+ asd_printk("%s is revision %s (%X), which is not supported\n",
+ pci_name(asd_ha->pcidev),
+ asd_dev_rev[asd_ha->revision_id],
+ asd_ha->revision_id);
+ goto Err;
+ }
+ /* Provide some sane default values. */
+ asd_ha->hw_prof.max_scbs = 512;
+ asd_ha->hw_prof.max_ddbs = ASD_MAX_DDBS;
+ asd_ha->hw_prof.num_phys = ASD_MAX_PHYS;
+ /* All phys are enabled, by default. */
+ asd_ha->hw_prof.enabled_phys = 0xFF;
+ for (i = 0; i < ASD_MAX_PHYS; i++) {
+ asd_ha->hw_prof.phy_desc[i].max_sas_lrate =
+ SAS_LINK_RATE_3_0_GBPS;
+ asd_ha->hw_prof.phy_desc[i].min_sas_lrate =
+ SAS_LINK_RATE_1_5_GBPS;
+ asd_ha->hw_prof.phy_desc[i].max_sata_lrate =
+ SAS_LINK_RATE_1_5_GBPS;
+ asd_ha->hw_prof.phy_desc[i].min_sata_lrate =
+ SAS_LINK_RATE_1_5_GBPS;
+ }
+
+ return 0;
+Err:
+ return err;
+}
+
+static int asd_aic9410_setup(struct asd_ha_struct *asd_ha)
+{
+ int err = asd_common_setup(asd_ha);
+
+ if (err)
+ return err;
+
+ asd_ha->hw_prof.addr_range = 8;
+ asd_ha->hw_prof.port_name_base = 0;
+ asd_ha->hw_prof.dev_name_base = 8;
+ asd_ha->hw_prof.sata_name_base = 16;
+
+ return 0;
+}
+
+static int asd_aic9405_setup(struct asd_ha_struct *asd_ha)
+{
+ int err = asd_common_setup(asd_ha);
+
+ if (err)
+ return err;
+
+ asd_ha->hw_prof.addr_range = 4;
+ asd_ha->hw_prof.port_name_base = 0;
+ asd_ha->hw_prof.dev_name_base = 4;
+ asd_ha->hw_prof.sata_name_base = 8;
+
+ return 0;
+}
+
+static ssize_t asd_show_dev_rev(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct asd_ha_struct *asd_ha = dev_to_asd_ha(dev);
+ return snprintf(buf, PAGE_SIZE, "%s\n",
+ asd_dev_rev[asd_ha->revision_id]);
+}
+static DEVICE_ATTR(revision, S_IRUGO, asd_show_dev_rev, NULL);
+
+static ssize_t asd_show_dev_bios_build(struct device *dev,
+ struct device_attribute *attr,char *buf)
+{
+ struct asd_ha_struct *asd_ha = dev_to_asd_ha(dev);
+ return snprintf(buf, PAGE_SIZE, "%d\n", asd_ha->hw_prof.bios.bld);
+}
+static DEVICE_ATTR(bios_build, S_IRUGO, asd_show_dev_bios_build, NULL);
+
+static ssize_t asd_show_dev_pcba_sn(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct asd_ha_struct *asd_ha = dev_to_asd_ha(dev);
+ return snprintf(buf, PAGE_SIZE, "%s\n", asd_ha->hw_prof.pcba_sn);
+}
+static DEVICE_ATTR(pcba_sn, S_IRUGO, asd_show_dev_pcba_sn, NULL);
+
+#define FLASH_CMD_NONE 0x00
+#define FLASH_CMD_UPDATE 0x01
+#define FLASH_CMD_VERIFY 0x02
+
+struct flash_command {
+ u8 command[8];
+ int code;
+};
+
+static struct flash_command flash_command_table[] =
+{
+ {"verify", FLASH_CMD_VERIFY},
+ {"update", FLASH_CMD_UPDATE},
+ {"", FLASH_CMD_NONE} /* Last entry should be NULL. */
+};
+
+struct error_bios {
+ char *reason;
+ int err_code;
+};
+
+static struct error_bios flash_error_table[] =
+{
+ {"Failed to open bios image file", FAIL_OPEN_BIOS_FILE},
+ {"PCI ID mismatch", FAIL_CHECK_PCI_ID},
+ {"Checksum mismatch", FAIL_CHECK_SUM},
+ {"Unknown Error", FAIL_UNKNOWN},
+ {"Failed to verify.", FAIL_VERIFY},
+ {"Failed to reset flash chip.", FAIL_RESET_FLASH},
+ {"Failed to find flash chip type.", FAIL_FIND_FLASH_ID},
+ {"Failed to erash flash chip.", FAIL_ERASE_FLASH},
+ {"Failed to program flash chip.", FAIL_WRITE_FLASH},
+ {"Flash in progress", FLASH_IN_PROGRESS},
+ {"Image file size Error", FAIL_FILE_SIZE},
+ {"Input parameter error", FAIL_PARAMETERS},
+ {"Out of memory", FAIL_OUT_MEMORY},
+ {"OK", 0} /* Last entry err_code = 0. */
+};
+
+static ssize_t asd_store_update_bios(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct asd_ha_struct *asd_ha = dev_to_asd_ha(dev);
+ char *cmd_ptr, *filename_ptr;
+ struct bios_file_header header, *hdr_ptr;
+ int res, i;
+ u32 csum = 0;
+ int flash_command = FLASH_CMD_NONE;
+ int err = 0;
+
+ cmd_ptr = kzalloc(count*2, GFP_KERNEL);
+
+ if (!cmd_ptr) {
+ err = FAIL_OUT_MEMORY;
+ goto out;
+ }
+
+ filename_ptr = cmd_ptr + count;
+ res = sscanf(buf, "%s %s", cmd_ptr, filename_ptr);
+ if (res != 2) {
+ err = FAIL_PARAMETERS;
+ goto out1;
+ }
+
+ for (i = 0; flash_command_table[i].code != FLASH_CMD_NONE; i++) {
+ if (!memcmp(flash_command_table[i].command,
+ cmd_ptr, strlen(cmd_ptr))) {
+ flash_command = flash_command_table[i].code;
+ break;
+ }
+ }
+ if (flash_command == FLASH_CMD_NONE) {
+ err = FAIL_PARAMETERS;
+ goto out1;
+ }
+
+ if (asd_ha->bios_status == FLASH_IN_PROGRESS) {
+ err = FLASH_IN_PROGRESS;
+ goto out1;
+ }
+ err = request_firmware(&asd_ha->bios_image,
+ filename_ptr,
+ &asd_ha->pcidev->dev);
+ if (err) {
+ asd_printk("Failed to load bios image file %s, error %d\n",
+ filename_ptr, err);
+ err = FAIL_OPEN_BIOS_FILE;
+ goto out1;
+ }
+
+ hdr_ptr = (struct bios_file_header *)asd_ha->bios_image->data;
+
+ if ((hdr_ptr->contrl_id.vendor != asd_ha->pcidev->vendor ||
+ hdr_ptr->contrl_id.device != asd_ha->pcidev->device) &&
+ (hdr_ptr->contrl_id.sub_vendor != asd_ha->pcidev->vendor ||
+ hdr_ptr->contrl_id.sub_device != asd_ha->pcidev->device)) {
+
+ ASD_DPRINTK("The PCI vendor or device id does not match\n");
+ ASD_DPRINTK("vendor=%x dev=%x sub_vendor=%x sub_dev=%x"
+ " pci vendor=%x pci dev=%x\n",
+ hdr_ptr->contrl_id.vendor,
+ hdr_ptr->contrl_id.device,
+ hdr_ptr->contrl_id.sub_vendor,
+ hdr_ptr->contrl_id.sub_device,
+ asd_ha->pcidev->vendor,
+ asd_ha->pcidev->device);
+ err = FAIL_CHECK_PCI_ID;
+ goto out2;
+ }
+
+ if (hdr_ptr->filelen != asd_ha->bios_image->size) {
+ err = FAIL_FILE_SIZE;
+ goto out2;
+ }
+
+ /* calculate checksum */
+ for (i = 0; i < hdr_ptr->filelen; i++)
+ csum += asd_ha->bios_image->data[i];
+
+ if ((csum & 0x0000ffff) != hdr_ptr->checksum) {
+ ASD_DPRINTK("BIOS file checksum mismatch\n");
+ err = FAIL_CHECK_SUM;
+ goto out2;
+ }
+ if (flash_command == FLASH_CMD_UPDATE) {
+ asd_ha->bios_status = FLASH_IN_PROGRESS;
+ err = asd_write_flash_seg(asd_ha,
+ &asd_ha->bios_image->data[sizeof(*hdr_ptr)],
+ 0, hdr_ptr->filelen-sizeof(*hdr_ptr));
+ if (!err)
+ err = asd_verify_flash_seg(asd_ha,
+ &asd_ha->bios_image->data[sizeof(*hdr_ptr)],
+ 0, hdr_ptr->filelen-sizeof(*hdr_ptr));
+ } else {
+ asd_ha->bios_status = FLASH_IN_PROGRESS;
+ err = asd_verify_flash_seg(asd_ha,
+ &asd_ha->bios_image->data[sizeof(header)],
+ 0, hdr_ptr->filelen-sizeof(header));
+ }
+
+out2:
+ release_firmware(asd_ha->bios_image);
+out1:
+ kfree(cmd_ptr);
+out:
+ asd_ha->bios_status = err;
+
+ if (!err)
+ return count;
+ else
+ return -err;
+}
+
+static ssize_t asd_show_update_bios(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ int i;
+ struct asd_ha_struct *asd_ha = dev_to_asd_ha(dev);
+
+ for (i = 0; flash_error_table[i].err_code != 0; i++) {
+ if (flash_error_table[i].err_code == asd_ha->bios_status)
+ break;
+ }
+ if (asd_ha->bios_status != FLASH_IN_PROGRESS)
+ asd_ha->bios_status = FLASH_OK;
+
+ return snprintf(buf, PAGE_SIZE, "status=%x %s\n",
+ flash_error_table[i].err_code,
+ flash_error_table[i].reason);
+}
+
+static DEVICE_ATTR(update_bios, S_IRUGO|S_IWUSR,
+ asd_show_update_bios, asd_store_update_bios);
+
+static int asd_create_dev_attrs(struct asd_ha_struct *asd_ha)
+{
+ int err;
+
+ err = device_create_file(&asd_ha->pcidev->dev, &dev_attr_revision);
+ if (err)
+ return err;
+
+ err = device_create_file(&asd_ha->pcidev->dev, &dev_attr_bios_build);
+ if (err)
+ goto err_rev;
+
+ err = device_create_file(&asd_ha->pcidev->dev, &dev_attr_pcba_sn);
+ if (err)
+ goto err_biosb;
+ err = device_create_file(&asd_ha->pcidev->dev, &dev_attr_update_bios);
+ if (err)
+ goto err_update_bios;
+
+ return 0;
+
+err_update_bios:
+ device_remove_file(&asd_ha->pcidev->dev, &dev_attr_pcba_sn);
+err_biosb:
+ device_remove_file(&asd_ha->pcidev->dev, &dev_attr_bios_build);
+err_rev:
+ device_remove_file(&asd_ha->pcidev->dev, &dev_attr_revision);
+ return err;
+}
+
+static void asd_remove_dev_attrs(struct asd_ha_struct *asd_ha)
+{
+ device_remove_file(&asd_ha->pcidev->dev, &dev_attr_revision);
+ device_remove_file(&asd_ha->pcidev->dev, &dev_attr_bios_build);
+ device_remove_file(&asd_ha->pcidev->dev, &dev_attr_pcba_sn);
+ device_remove_file(&asd_ha->pcidev->dev, &dev_attr_update_bios);
+}
+
+/* The first entry, 0, is used for dynamic ids, the rest for devices
+ * we know about.
+ */
+static const struct asd_pcidev_struct {
+ const char * name;
+ int (*setup)(struct asd_ha_struct *asd_ha);
+} asd_pcidev_data[] = {
+ /* Id 0 is used for dynamic ids. */
+ { .name = "Adaptec AIC-94xx SAS/SATA Host Adapter",
+ .setup = asd_aic9410_setup
+ },
+ { .name = "Adaptec AIC-9410W SAS/SATA Host Adapter",
+ .setup = asd_aic9410_setup
+ },
+ { .name = "Adaptec AIC-9405W SAS/SATA Host Adapter",
+ .setup = asd_aic9405_setup
+ },
+};
+
+static int asd_create_ha_caches(struct asd_ha_struct *asd_ha)
+{
+ asd_ha->scb_pool = dma_pool_create(ASD_DRIVER_NAME "_scb_pool",
+ &asd_ha->pcidev->dev,
+ sizeof(struct scb),
+ 8, 0);
+ if (!asd_ha->scb_pool) {
+ asd_printk("couldn't create scb pool\n");
+ return -ENOMEM;
+ }
+
+ return 0;
+}
+
+/**
+ * asd_free_edbs -- free empty data buffers
+ * asd_ha: pointer to host adapter structure
+ */
+static void asd_free_edbs(struct asd_ha_struct *asd_ha)
+{
+ struct asd_seq_data *seq = &asd_ha->seq;
+ int i;
+
+ for (i = 0; i < seq->num_edbs; i++)
+ asd_free_coherent(asd_ha, seq->edb_arr[i]);
+ kfree(seq->edb_arr);
+ seq->edb_arr = NULL;
+}
+
+static void asd_free_escbs(struct asd_ha_struct *asd_ha)
+{
+ struct asd_seq_data *seq = &asd_ha->seq;
+ int i;
+
+ for (i = 0; i < seq->num_escbs; i++) {
+ if (!list_empty(&seq->escb_arr[i]->list))
+ list_del_init(&seq->escb_arr[i]->list);
+
+ asd_ascb_free(seq->escb_arr[i]);
+ }
+ kfree(seq->escb_arr);
+ seq->escb_arr = NULL;
+}
+
+static void asd_destroy_ha_caches(struct asd_ha_struct *asd_ha)
+{
+ int i;
+
+ if (asd_ha->hw_prof.ddb_ext)
+ asd_free_coherent(asd_ha, asd_ha->hw_prof.ddb_ext);
+ if (asd_ha->hw_prof.scb_ext)
+ asd_free_coherent(asd_ha, asd_ha->hw_prof.scb_ext);
+
+ if (asd_ha->hw_prof.ddb_bitmap)
+ kfree(asd_ha->hw_prof.ddb_bitmap);
+ asd_ha->hw_prof.ddb_bitmap = NULL;
+
+ for (i = 0; i < ASD_MAX_PHYS; i++) {
+ struct asd_phy *phy = &asd_ha->phys[i];
+
+ asd_free_coherent(asd_ha, phy->id_frm_tok);
+ }
+ if (asd_ha->seq.escb_arr)
+ asd_free_escbs(asd_ha);
+ if (asd_ha->seq.edb_arr)
+ asd_free_edbs(asd_ha);
+ if (asd_ha->hw_prof.ue.area) {
+ kfree(asd_ha->hw_prof.ue.area);
+ asd_ha->hw_prof.ue.area = NULL;
+ }
+ if (asd_ha->seq.tc_index_array) {
+ kfree(asd_ha->seq.tc_index_array);
+ kfree(asd_ha->seq.tc_index_bitmap);
+ asd_ha->seq.tc_index_array = NULL;
+ asd_ha->seq.tc_index_bitmap = NULL;
+ }
+ if (asd_ha->seq.actual_dl) {
+ asd_free_coherent(asd_ha, asd_ha->seq.actual_dl);
+ asd_ha->seq.actual_dl = NULL;
+ asd_ha->seq.dl = NULL;
+ }
+ if (asd_ha->seq.next_scb.vaddr) {
+ dma_pool_free(asd_ha->scb_pool, asd_ha->seq.next_scb.vaddr,
+ asd_ha->seq.next_scb.dma_handle);
+ asd_ha->seq.next_scb.vaddr = NULL;
+ }
+ dma_pool_destroy(asd_ha->scb_pool);
+ asd_ha->scb_pool = NULL;
+}
+
+struct kmem_cache *asd_dma_token_cache;
+struct kmem_cache *asd_ascb_cache;
+
+static int asd_create_global_caches(void)
+{
+ if (!asd_dma_token_cache) {
+ asd_dma_token_cache
+ = kmem_cache_create(ASD_DRIVER_NAME "_dma_token",
+ sizeof(struct asd_dma_tok),
+ 0,
+ SLAB_HWCACHE_ALIGN,
+ NULL);
+ if (!asd_dma_token_cache) {
+ asd_printk("couldn't create dma token cache\n");
+ return -ENOMEM;
+ }
+ }
+
+ if (!asd_ascb_cache) {
+ asd_ascb_cache = kmem_cache_create(ASD_DRIVER_NAME "_ascb",
+ sizeof(struct asd_ascb),
+ 0,
+ SLAB_HWCACHE_ALIGN,
+ NULL);
+ if (!asd_ascb_cache) {
+ asd_printk("couldn't create ascb cache\n");
+ goto Err;
+ }
+ }
+
+ return 0;
+Err:
+ kmem_cache_destroy(asd_dma_token_cache);
+ asd_dma_token_cache = NULL;
+ return -ENOMEM;
+}
+
+static void asd_destroy_global_caches(void)
+{
+ if (asd_dma_token_cache)
+ kmem_cache_destroy(asd_dma_token_cache);
+ asd_dma_token_cache = NULL;
+
+ if (asd_ascb_cache)
+ kmem_cache_destroy(asd_ascb_cache);
+ asd_ascb_cache = NULL;
+}
+
+static int asd_register_sas_ha(struct asd_ha_struct *asd_ha)
+{
+ int i;
+ struct asd_sas_phy **sas_phys =
+ kcalloc(ASD_MAX_PHYS, sizeof(*sas_phys), GFP_KERNEL);
+ struct asd_sas_port **sas_ports =
+ kcalloc(ASD_MAX_PHYS, sizeof(*sas_ports), GFP_KERNEL);
+
+ if (!sas_phys || !sas_ports) {
+ kfree(sas_phys);
+ kfree(sas_ports);
+ return -ENOMEM;
+ }
+
+ asd_ha->sas_ha.sas_ha_name = (char *) asd_ha->name;
+ asd_ha->sas_ha.lldd_module = THIS_MODULE;
+ asd_ha->sas_ha.sas_addr = &asd_ha->hw_prof.sas_addr[0];
+
+ for (i = 0; i < ASD_MAX_PHYS; i++) {
+ sas_phys[i] = &asd_ha->phys[i].sas_phy;
+ sas_ports[i] = &asd_ha->ports[i];
+ }
+
+ asd_ha->sas_ha.sas_phy = sas_phys;
+ asd_ha->sas_ha.sas_port= sas_ports;
+ asd_ha->sas_ha.num_phys= ASD_MAX_PHYS;
+
+ return sas_register_ha(&asd_ha->sas_ha);
+}
+
+static int asd_unregister_sas_ha(struct asd_ha_struct *asd_ha)
+{
+ int err;
+
+ err = sas_unregister_ha(&asd_ha->sas_ha);
+
+ sas_remove_host(asd_ha->sas_ha.core.shost);
+ scsi_remove_host(asd_ha->sas_ha.core.shost);
+ scsi_host_put(asd_ha->sas_ha.core.shost);
+
+ kfree(asd_ha->sas_ha.sas_phy);
+ kfree(asd_ha->sas_ha.sas_port);
+
+ return err;
+}
+
+static int asd_pci_probe(struct pci_dev *dev, const struct pci_device_id *id)
+{
+ const struct asd_pcidev_struct *asd_dev;
+ unsigned asd_id = (unsigned) id->driver_data;
+ struct asd_ha_struct *asd_ha;
+ struct Scsi_Host *shost;
+ int err;
+
+ if (asd_id >= ARRAY_SIZE(asd_pcidev_data)) {
+ asd_printk("wrong driver_data in PCI table\n");
+ return -ENODEV;
+ }
+
+ if ((err = pci_enable_device(dev))) {
+ asd_printk("couldn't enable device %s\n", pci_name(dev));
+ return err;
+ }
+
+ pci_set_master(dev);
+
+ err = -ENOMEM;
+
+ shost = scsi_host_alloc(&aic94xx_sht, sizeof(void *));
+ if (!shost)
+ goto Err;
+
+ asd_dev = &asd_pcidev_data[asd_id];
+
+ asd_ha = kzalloc(sizeof(*asd_ha), GFP_KERNEL);
+ if (!asd_ha) {
+ asd_printk("out of memory\n");
+ goto Err_put;
+ }
+ asd_ha->pcidev = dev;
+ asd_ha->sas_ha.dev = &asd_ha->pcidev->dev;
+ asd_ha->sas_ha.lldd_ha = asd_ha;
+
+ asd_ha->bios_status = FLASH_OK;
+ asd_ha->name = asd_dev->name;
+ asd_printk("found %s, device %s\n", asd_ha->name, pci_name(dev));
+
+ SHOST_TO_SAS_HA(shost) = &asd_ha->sas_ha;
+ asd_ha->sas_ha.core.shost = shost;
+ shost->transportt = aic94xx_transport_template;
+ shost->max_id = ~0;
+ shost->max_lun = ~0;
+ shost->max_cmd_len = 16;
+
+ err = scsi_add_host(shost, &dev->dev);
+ if (err)
+ goto Err_free;
+
+ err = asd_dev->setup(asd_ha);
+ if (err)
+ goto Err_remove;
+
+ err = -ENODEV;
+ if (!pci_set_dma_mask(dev, DMA_BIT_MASK(64))
+ && !pci_set_consistent_dma_mask(dev, DMA_BIT_MASK(64)))
+ ;
+ else if (!pci_set_dma_mask(dev, DMA_BIT_MASK(32))
+ && !pci_set_consistent_dma_mask(dev, DMA_BIT_MASK(32)))
+ ;
+ else {
+ asd_printk("no suitable DMA mask for %s\n", pci_name(dev));
+ goto Err_remove;
+ }
+
+ pci_set_drvdata(dev, asd_ha);
+
+ err = asd_map_ha(asd_ha);
+ if (err)
+ goto Err_remove;
+
+ err = asd_create_ha_caches(asd_ha);
+ if (err)
+ goto Err_unmap;
+
+ err = asd_init_hw(asd_ha);
+ if (err)
+ goto Err_free_cache;
+
+ asd_printk("device %s: SAS addr %llx, PCBA SN %s, %d phys, %d enabled "
+ "phys, flash %s, BIOS %s%d\n",
+ pci_name(dev), SAS_ADDR(asd_ha->hw_prof.sas_addr),
+ asd_ha->hw_prof.pcba_sn, asd_ha->hw_prof.max_phys,
+ asd_ha->hw_prof.num_phys,
+ asd_ha->hw_prof.flash.present ? "present" : "not present",
+ asd_ha->hw_prof.bios.present ? "build " : "not present",
+ asd_ha->hw_prof.bios.bld);
+
+ shost->can_queue = asd_ha->seq.can_queue;
+
+ if (use_msi)
+ pci_enable_msi(asd_ha->pcidev);
+
+ err = request_irq(asd_ha->pcidev->irq, asd_hw_isr, IRQF_SHARED,
+ ASD_DRIVER_NAME, asd_ha);
+ if (err) {
+ asd_printk("couldn't get irq %d for %s\n",
+ asd_ha->pcidev->irq, pci_name(asd_ha->pcidev));
+ goto Err_irq;
+ }
+ asd_enable_ints(asd_ha);
+
+ err = asd_init_post_escbs(asd_ha);
+ if (err) {
+ asd_printk("couldn't post escbs for %s\n",
+ pci_name(asd_ha->pcidev));
+ goto Err_escbs;
+ }
+ ASD_DPRINTK("escbs posted\n");
+
+ err = asd_create_dev_attrs(asd_ha);
+ if (err)
+ goto Err_dev_attrs;
+
+ err = asd_register_sas_ha(asd_ha);
+ if (err)
+ goto Err_reg_sas;
+
+ scsi_scan_host(shost);
+
+ return 0;
+
+Err_reg_sas:
+ asd_remove_dev_attrs(asd_ha);
+Err_dev_attrs:
+Err_escbs:
+ asd_disable_ints(asd_ha);
+ free_irq(dev->irq, asd_ha);
+Err_irq:
+ if (use_msi)
+ pci_disable_msi(dev);
+ asd_chip_hardrst(asd_ha);
+Err_free_cache:
+ asd_destroy_ha_caches(asd_ha);
+Err_unmap:
+ asd_unmap_ha(asd_ha);
+Err_remove:
+ scsi_remove_host(shost);
+Err_free:
+ kfree(asd_ha);
+Err_put:
+ scsi_host_put(shost);
+Err:
+ pci_disable_device(dev);
+ return err;
+}
+
+static void asd_free_queues(struct asd_ha_struct *asd_ha)
+{
+ unsigned long flags;
+ LIST_HEAD(pending);
+ struct list_head *n, *pos;
+
+ spin_lock_irqsave(&asd_ha->seq.pend_q_lock, flags);
+ asd_ha->seq.pending = 0;
+ list_splice_init(&asd_ha->seq.pend_q, &pending);
+ spin_unlock_irqrestore(&asd_ha->seq.pend_q_lock, flags);
+
+ if (!list_empty(&pending))
+ ASD_DPRINTK("Uh-oh! Pending is not empty!\n");
+
+ list_for_each_safe(pos, n, &pending) {
+ struct asd_ascb *ascb = list_entry(pos, struct asd_ascb, list);
+ /*
+ * Delete unexpired ascb timers. This may happen if we issue
+ * a CONTROL PHY scb to an adapter and rmmod before the scb
+ * times out. Apparently we don't wait for the CONTROL PHY
+ * to complete, so it doesn't matter if we kill the timer.
+ */
+ del_timer_sync(&ascb->timer);
+ WARN_ON(ascb->scb->header.opcode != CONTROL_PHY);
+
+ list_del_init(pos);
+ ASD_DPRINTK("freeing from pending\n");
+ asd_ascb_free(ascb);
+ }
+}
+
+static void asd_turn_off_leds(struct asd_ha_struct *asd_ha)
+{
+ u8 phy_mask = asd_ha->hw_prof.enabled_phys;
+ u8 i;
+
+ for_each_phy(phy_mask, phy_mask, i) {
+ asd_turn_led(asd_ha, i, 0);
+ asd_control_led(asd_ha, i, 0);
+ }
+}
+
+static void asd_pci_remove(struct pci_dev *dev)
+{
+ struct asd_ha_struct *asd_ha = pci_get_drvdata(dev);
+
+ if (!asd_ha)
+ return;
+
+ asd_unregister_sas_ha(asd_ha);
+
+ asd_disable_ints(asd_ha);
+
+ asd_remove_dev_attrs(asd_ha);
+
+ /* XXX more here as needed */
+
+ free_irq(dev->irq, asd_ha);
+ if (use_msi)
+ pci_disable_msi(asd_ha->pcidev);
+ asd_turn_off_leds(asd_ha);
+ asd_chip_hardrst(asd_ha);
+ asd_free_queues(asd_ha);
+ asd_destroy_ha_caches(asd_ha);
+ asd_unmap_ha(asd_ha);
+ kfree(asd_ha);
+ pci_disable_device(dev);
+ return;
+}
+
+static void asd_scan_start(struct Scsi_Host *shost)
+{
+ struct asd_ha_struct *asd_ha;
+ int err;
+
+ asd_ha = SHOST_TO_SAS_HA(shost)->lldd_ha;
+ err = asd_enable_phys(asd_ha, asd_ha->hw_prof.enabled_phys);
+ if (err)
+ asd_printk("Couldn't enable phys, err:%d\n", err);
+}
+
+static int asd_scan_finished(struct Scsi_Host *shost, unsigned long time)
+{
+ /* give the phy enabling interrupt event time to come in (1s
+ * is empirically about all it takes) */
+ if (time < HZ)
+ return 0;
+ /* Wait for discovery to finish */
+ sas_drain_work(SHOST_TO_SAS_HA(shost));
+ return 1;
+}
+
+static ssize_t asd_version_show(struct device_driver *driver, char *buf)
+{
+ return snprintf(buf, PAGE_SIZE, "%s\n", ASD_DRIVER_VERSION);
+}
+static DRIVER_ATTR(version, S_IRUGO, asd_version_show, NULL);
+
+static int asd_create_driver_attrs(struct device_driver *driver)
+{
+ return driver_create_file(driver, &driver_attr_version);
+}
+
+static void asd_remove_driver_attrs(struct device_driver *driver)
+{
+ driver_remove_file(driver, &driver_attr_version);
+}
+
+static struct sas_domain_function_template aic94xx_transport_functions = {
+ .lldd_dev_found = asd_dev_found,
+ .lldd_dev_gone = asd_dev_gone,
+
+ .lldd_execute_task = asd_execute_task,
+
+ .lldd_abort_task = asd_abort_task,
+ .lldd_abort_task_set = asd_abort_task_set,
+ .lldd_clear_aca = asd_clear_aca,
+ .lldd_clear_task_set = asd_clear_task_set,
+ .lldd_I_T_nexus_reset = asd_I_T_nexus_reset,
+ .lldd_lu_reset = asd_lu_reset,
+ .lldd_query_task = asd_query_task,
+
+ .lldd_clear_nexus_port = asd_clear_nexus_port,
+ .lldd_clear_nexus_ha = asd_clear_nexus_ha,
+
+ .lldd_control_phy = asd_control_phy,
+
+ .lldd_ata_set_dmamode = asd_set_dmamode,
+};
+
+static const struct pci_device_id aic94xx_pci_table[] = {
+ {PCI_DEVICE(PCI_VENDOR_ID_ADAPTEC2, 0x410),0, 0, 1},
+ {PCI_DEVICE(PCI_VENDOR_ID_ADAPTEC2, 0x412),0, 0, 1},
+ {PCI_DEVICE(PCI_VENDOR_ID_ADAPTEC2, 0x416),0, 0, 1},
+ {PCI_DEVICE(PCI_VENDOR_ID_ADAPTEC2, 0x41E),0, 0, 1},
+ {PCI_DEVICE(PCI_VENDOR_ID_ADAPTEC2, 0x41F),0, 0, 1},
+ {PCI_DEVICE(PCI_VENDOR_ID_ADAPTEC2, 0x430),0, 0, 2},
+ {PCI_DEVICE(PCI_VENDOR_ID_ADAPTEC2, 0x432),0, 0, 2},
+ {PCI_DEVICE(PCI_VENDOR_ID_ADAPTEC2, 0x43E),0, 0, 2},
+ {PCI_DEVICE(PCI_VENDOR_ID_ADAPTEC2, 0x43F),0, 0, 2},
+ {}
+};
+
+MODULE_DEVICE_TABLE(pci, aic94xx_pci_table);
+
+static struct pci_driver aic94xx_pci_driver = {
+ .name = ASD_DRIVER_NAME,
+ .id_table = aic94xx_pci_table,
+ .probe = asd_pci_probe,
+ .remove = asd_pci_remove,
+};
+
+static int __init aic94xx_init(void)
+{
+ int err;
+
+
+ asd_printk("%s version %s loaded\n", ASD_DRIVER_DESCRIPTION,
+ ASD_DRIVER_VERSION);
+
+ err = asd_create_global_caches();
+ if (err)
+ return err;
+
+ aic94xx_transport_template =
+ sas_domain_attach_transport(&aic94xx_transport_functions);
+ if (!aic94xx_transport_template)
+ goto out_destroy_caches;
+
+ err = pci_register_driver(&aic94xx_pci_driver);
+ if (err)
+ goto out_release_transport;
+
+ err = asd_create_driver_attrs(&aic94xx_pci_driver.driver);
+ if (err)
+ goto out_unregister_pcidrv;
+
+ return err;
+
+ out_unregister_pcidrv:
+ pci_unregister_driver(&aic94xx_pci_driver);
+ out_release_transport:
+ sas_release_transport(aic94xx_transport_template);
+ out_destroy_caches:
+ asd_destroy_global_caches();
+
+ return err;
+}
+
+static void __exit aic94xx_exit(void)
+{
+ asd_remove_driver_attrs(&aic94xx_pci_driver.driver);
+ pci_unregister_driver(&aic94xx_pci_driver);
+ sas_release_transport(aic94xx_transport_template);
+ asd_release_firmware();
+ asd_destroy_global_caches();
+ asd_printk("%s version %s unloaded\n", ASD_DRIVER_DESCRIPTION,
+ ASD_DRIVER_VERSION);
+}
+
+module_init(aic94xx_init);
+module_exit(aic94xx_exit);
+
+MODULE_AUTHOR("Luben Tuikov <luben_tuikov@adaptec.com>");
+MODULE_DESCRIPTION(ASD_DRIVER_DESCRIPTION);
+MODULE_LICENSE("GPL v2");
+MODULE_VERSION(ASD_DRIVER_VERSION);
diff --git a/drivers/scsi/aic94xx/aic94xx_reg.c b/drivers/scsi/aic94xx/aic94xx_reg.c
new file mode 100644
index 000000000..56b17c225
--- /dev/null
+++ b/drivers/scsi/aic94xx/aic94xx_reg.c
@@ -0,0 +1,331 @@
+/*
+ * Aic94xx SAS/SATA driver register access.
+ *
+ * Copyright (C) 2005 Adaptec, Inc. All rights reserved.
+ * Copyright (C) 2005 Luben Tuikov <luben_tuikov@adaptec.com>
+ *
+ * This file is licensed under GPLv2.
+ *
+ * This file is part of the aic94xx driver.
+ *
+ * The aic94xx driver is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; version 2 of the
+ * License.
+ *
+ * The aic94xx driver is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with the aic94xx driver; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
+ *
+ */
+
+#include <linux/pci.h>
+#include "aic94xx_reg.h"
+#include "aic94xx.h"
+
+/* Writing to device address space.
+ * Offset comes before value to remind that the operation of
+ * this function is *offs = val.
+ */
+static void asd_write_byte(struct asd_ha_struct *asd_ha,
+ unsigned long offs, u8 val)
+{
+ if (unlikely(asd_ha->iospace))
+ outb(val,
+ (unsigned long)asd_ha->io_handle[0].addr + (offs & 0xFF));
+ else
+ writeb(val, asd_ha->io_handle[0].addr + offs);
+ wmb();
+}
+
+static void asd_write_word(struct asd_ha_struct *asd_ha,
+ unsigned long offs, u16 val)
+{
+ if (unlikely(asd_ha->iospace))
+ outw(val,
+ (unsigned long)asd_ha->io_handle[0].addr + (offs & 0xFF));
+ else
+ writew(val, asd_ha->io_handle[0].addr + offs);
+ wmb();
+}
+
+static void asd_write_dword(struct asd_ha_struct *asd_ha,
+ unsigned long offs, u32 val)
+{
+ if (unlikely(asd_ha->iospace))
+ outl(val,
+ (unsigned long)asd_ha->io_handle[0].addr + (offs & 0xFF));
+ else
+ writel(val, asd_ha->io_handle[0].addr + offs);
+ wmb();
+}
+
+/* Reading from device address space.
+ */
+static u8 asd_read_byte(struct asd_ha_struct *asd_ha, unsigned long offs)
+{
+ u8 val;
+ if (unlikely(asd_ha->iospace))
+ val = inb((unsigned long) asd_ha->io_handle[0].addr
+ + (offs & 0xFF));
+ else
+ val = readb(asd_ha->io_handle[0].addr + offs);
+ rmb();
+ return val;
+}
+
+static u16 asd_read_word(struct asd_ha_struct *asd_ha,
+ unsigned long offs)
+{
+ u16 val;
+ if (unlikely(asd_ha->iospace))
+ val = inw((unsigned long)asd_ha->io_handle[0].addr
+ + (offs & 0xFF));
+ else
+ val = readw(asd_ha->io_handle[0].addr + offs);
+ rmb();
+ return val;
+}
+
+static u32 asd_read_dword(struct asd_ha_struct *asd_ha,
+ unsigned long offs)
+{
+ u32 val;
+ if (unlikely(asd_ha->iospace))
+ val = inl((unsigned long) asd_ha->io_handle[0].addr
+ + (offs & 0xFF));
+ else
+ val = readl(asd_ha->io_handle[0].addr + offs);
+ rmb();
+ return val;
+}
+
+static inline u32 asd_mem_offs_swa(void)
+{
+ return 0;
+}
+
+static inline u32 asd_mem_offs_swc(void)
+{
+ return asd_mem_offs_swa() + MBAR0_SWA_SIZE;
+}
+
+static inline u32 asd_mem_offs_swb(void)
+{
+ return asd_mem_offs_swc() + MBAR0_SWC_SIZE + 0x20;
+}
+
+/* We know that the register wanted is in the range
+ * of the sliding window.
+ */
+#define ASD_READ_SW(ww, type, ord) \
+static type asd_read_##ww##_##ord(struct asd_ha_struct *asd_ha, \
+ u32 reg) \
+{ \
+ struct asd_ha_addrspace *io_handle = &asd_ha->io_handle[0]; \
+ u32 map_offs = (reg - io_handle->ww##_base) + asd_mem_offs_##ww();\
+ return asd_read_##ord(asd_ha, (unsigned long)map_offs); \
+}
+
+#define ASD_WRITE_SW(ww, type, ord) \
+static void asd_write_##ww##_##ord(struct asd_ha_struct *asd_ha, \
+ u32 reg, type val) \
+{ \
+ struct asd_ha_addrspace *io_handle = &asd_ha->io_handle[0]; \
+ u32 map_offs = (reg - io_handle->ww##_base) + asd_mem_offs_##ww();\
+ asd_write_##ord(asd_ha, (unsigned long)map_offs, val); \
+}
+
+ASD_READ_SW(swa, u8, byte);
+ASD_READ_SW(swa, u16, word);
+ASD_READ_SW(swa, u32, dword);
+
+ASD_READ_SW(swb, u8, byte);
+ASD_READ_SW(swb, u16, word);
+ASD_READ_SW(swb, u32, dword);
+
+ASD_READ_SW(swc, u8, byte);
+ASD_READ_SW(swc, u16, word);
+ASD_READ_SW(swc, u32, dword);
+
+ASD_WRITE_SW(swa, u8, byte);
+ASD_WRITE_SW(swa, u16, word);
+ASD_WRITE_SW(swa, u32, dword);
+
+ASD_WRITE_SW(swb, u8, byte);
+ASD_WRITE_SW(swb, u16, word);
+ASD_WRITE_SW(swb, u32, dword);
+
+ASD_WRITE_SW(swc, u8, byte);
+ASD_WRITE_SW(swc, u16, word);
+ASD_WRITE_SW(swc, u32, dword);
+
+/*
+ * A word about sliding windows:
+ * MBAR0 is divided into sliding windows A, C and B, in that order.
+ * SWA starts at offset 0 of MBAR0, up to 0x57, with size 0x58 bytes.
+ * SWC starts at offset 0x58 of MBAR0, up to 0x60, with size 0x8 bytes.
+ * From 0x60 to 0x7F, we have a copy of PCI config space 0x60-0x7F.
+ * SWB starts at offset 0x80 of MBAR0 and extends to the end of MBAR0.
+ * See asd_init_sw() in aic94xx_hwi.c
+ *
+ * We map the most common registers we'd access of the internal 4GB
+ * host adapter memory space. If a register/internal memory location
+ * is wanted which is not mapped, we slide SWB, by paging it,
+ * see asd_move_swb() in aic94xx_reg.c.
+ */
+
+/**
+ * asd_move_swb -- move sliding window B
+ * @asd_ha: pointer to host adapter structure
+ * @reg: register desired to be within range of the new window
+ */
+static void asd_move_swb(struct asd_ha_struct *asd_ha, u32 reg)
+{
+ u32 base = reg & ~(MBAR0_SWB_SIZE-1);
+ pci_write_config_dword(asd_ha->pcidev, PCI_CONF_MBAR0_SWB, base);
+ asd_ha->io_handle[0].swb_base = base;
+}
+
+static void __asd_write_reg_byte(struct asd_ha_struct *asd_ha, u32 reg, u8 val)
+{
+ struct asd_ha_addrspace *io_handle=&asd_ha->io_handle[0];
+ BUG_ON(reg >= 0xC0000000 || reg < ALL_BASE_ADDR);
+ if (io_handle->swa_base <= reg
+ && reg < io_handle->swa_base + MBAR0_SWA_SIZE)
+ asd_write_swa_byte (asd_ha, reg,val);
+ else if (io_handle->swb_base <= reg
+ && reg < io_handle->swb_base + MBAR0_SWB_SIZE)
+ asd_write_swb_byte (asd_ha, reg, val);
+ else if (io_handle->swc_base <= reg
+ && reg < io_handle->swc_base + MBAR0_SWC_SIZE)
+ asd_write_swc_byte (asd_ha, reg, val);
+ else {
+ /* Ok, we have to move SWB */
+ asd_move_swb(asd_ha, reg);
+ asd_write_swb_byte (asd_ha, reg, val);
+ }
+}
+
+#define ASD_WRITE_REG(type, ord) \
+void asd_write_reg_##ord (struct asd_ha_struct *asd_ha, u32 reg, type val)\
+{ \
+ struct asd_ha_addrspace *io_handle=&asd_ha->io_handle[0]; \
+ unsigned long flags; \
+ BUG_ON(reg >= 0xC0000000 || reg < ALL_BASE_ADDR); \
+ spin_lock_irqsave(&asd_ha->iolock, flags); \
+ if (io_handle->swa_base <= reg \
+ && reg < io_handle->swa_base + MBAR0_SWA_SIZE) \
+ asd_write_swa_##ord (asd_ha, reg,val); \
+ else if (io_handle->swb_base <= reg \
+ && reg < io_handle->swb_base + MBAR0_SWB_SIZE) \
+ asd_write_swb_##ord (asd_ha, reg, val); \
+ else if (io_handle->swc_base <= reg \
+ && reg < io_handle->swc_base + MBAR0_SWC_SIZE) \
+ asd_write_swc_##ord (asd_ha, reg, val); \
+ else { \
+ /* Ok, we have to move SWB */ \
+ asd_move_swb(asd_ha, reg); \
+ asd_write_swb_##ord (asd_ha, reg, val); \
+ } \
+ spin_unlock_irqrestore(&asd_ha->iolock, flags); \
+}
+
+ASD_WRITE_REG(u8, byte);
+ASD_WRITE_REG(u16,word);
+ASD_WRITE_REG(u32,dword);
+
+static u8 __asd_read_reg_byte(struct asd_ha_struct *asd_ha, u32 reg)
+{
+ struct asd_ha_addrspace *io_handle=&asd_ha->io_handle[0];
+ u8 val;
+ BUG_ON(reg >= 0xC0000000 || reg < ALL_BASE_ADDR);
+ if (io_handle->swa_base <= reg
+ && reg < io_handle->swa_base + MBAR0_SWA_SIZE)
+ val = asd_read_swa_byte (asd_ha, reg);
+ else if (io_handle->swb_base <= reg
+ && reg < io_handle->swb_base + MBAR0_SWB_SIZE)
+ val = asd_read_swb_byte (asd_ha, reg);
+ else if (io_handle->swc_base <= reg
+ && reg < io_handle->swc_base + MBAR0_SWC_SIZE)
+ val = asd_read_swc_byte (asd_ha, reg);
+ else {
+ /* Ok, we have to move SWB */
+ asd_move_swb(asd_ha, reg);
+ val = asd_read_swb_byte (asd_ha, reg);
+ }
+ return val;
+}
+
+#define ASD_READ_REG(type, ord) \
+type asd_read_reg_##ord (struct asd_ha_struct *asd_ha, u32 reg) \
+{ \
+ struct asd_ha_addrspace *io_handle=&asd_ha->io_handle[0]; \
+ type val; \
+ unsigned long flags; \
+ BUG_ON(reg >= 0xC0000000 || reg < ALL_BASE_ADDR); \
+ spin_lock_irqsave(&asd_ha->iolock, flags); \
+ if (io_handle->swa_base <= reg \
+ && reg < io_handle->swa_base + MBAR0_SWA_SIZE) \
+ val = asd_read_swa_##ord (asd_ha, reg); \
+ else if (io_handle->swb_base <= reg \
+ && reg < io_handle->swb_base + MBAR0_SWB_SIZE) \
+ val = asd_read_swb_##ord (asd_ha, reg); \
+ else if (io_handle->swc_base <= reg \
+ && reg < io_handle->swc_base + MBAR0_SWC_SIZE) \
+ val = asd_read_swc_##ord (asd_ha, reg); \
+ else { \
+ /* Ok, we have to move SWB */ \
+ asd_move_swb(asd_ha, reg); \
+ val = asd_read_swb_##ord (asd_ha, reg); \
+ } \
+ spin_unlock_irqrestore(&asd_ha->iolock, flags); \
+ return val; \
+}
+
+ASD_READ_REG(u8, byte);
+ASD_READ_REG(u16,word);
+ASD_READ_REG(u32,dword);
+
+/**
+ * asd_read_reg_string -- read a string of bytes from io space memory
+ * @asd_ha: pointer to host adapter structure
+ * @dst: pointer to a destination buffer where data will be written to
+ * @offs: start offset (register) to read from
+ * @count: number of bytes to read
+ */
+void asd_read_reg_string(struct asd_ha_struct *asd_ha, void *dst,
+ u32 offs, int count)
+{
+ u8 *p = dst;
+ unsigned long flags;
+
+ spin_lock_irqsave(&asd_ha->iolock, flags);
+ for ( ; count > 0; count--, offs++, p++)
+ *p = __asd_read_reg_byte(asd_ha, offs);
+ spin_unlock_irqrestore(&asd_ha->iolock, flags);
+}
+
+/**
+ * asd_write_reg_string -- write a string of bytes to io space memory
+ * @asd_ha: pointer to host adapter structure
+ * @src: pointer to source buffer where data will be read from
+ * @offs: start offset (register) to write to
+ * @count: number of bytes to write
+ */
+void asd_write_reg_string(struct asd_ha_struct *asd_ha, void *src,
+ u32 offs, int count)
+{
+ u8 *p = src;
+ unsigned long flags;
+
+ spin_lock_irqsave(&asd_ha->iolock, flags);
+ for ( ; count > 0; count--, offs++, p++)
+ __asd_write_reg_byte(asd_ha, offs, *p);
+ spin_unlock_irqrestore(&asd_ha->iolock, flags);
+}
diff --git a/drivers/scsi/aic94xx/aic94xx_reg.h b/drivers/scsi/aic94xx/aic94xx_reg.h
new file mode 100644
index 000000000..2279307fd
--- /dev/null
+++ b/drivers/scsi/aic94xx/aic94xx_reg.h
@@ -0,0 +1,302 @@
+/*
+ * Aic94xx SAS/SATA driver hardware registers definitions.
+ *
+ * Copyright (C) 2005 Adaptec, Inc. All rights reserved.
+ * Copyright (C) 2005 Luben Tuikov <luben_tuikov@adaptec.com>
+ *
+ * This file is licensed under GPLv2.
+ *
+ * This file is part of the aic94xx driver.
+ *
+ * The aic94xx driver is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; version 2 of the
+ * License.
+ *
+ * The aic94xx driver is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with the aic94xx driver; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
+ *
+ */
+
+#ifndef _AIC94XX_REG_H_
+#define _AIC94XX_REG_H_
+
+#include <asm/io.h>
+#include "aic94xx_hwi.h"
+
+/* Values */
+#define AIC9410_DEV_REV_B0 0x8
+
+/* MBAR0, SWA, SWB, SWC, internal memory space addresses */
+#define REG_BASE_ADDR 0xB8000000
+#define REG_BASE_ADDR_CSEQCIO 0xB8002000
+#define REG_BASE_ADDR_EXSI 0xB8042800
+
+#define MBAR0_SWA_SIZE 0x58
+extern u32 MBAR0_SWB_SIZE;
+#define MBAR0_SWC_SIZE 0x8
+
+/* MBAR1, points to On Chip Memory */
+#define OCM_BASE_ADDR 0xA0000000
+#define OCM_MAX_SIZE 0x20000
+
+/* Smallest address possible to reference */
+#define ALL_BASE_ADDR OCM_BASE_ADDR
+
+/* PCI configuration space registers */
+#define PCI_IOBAR_OFFSET 4
+
+#define PCI_CONF_MBAR1 0x6C
+#define PCI_CONF_MBAR0_SWA 0x70
+#define PCI_CONF_MBAR0_SWB 0x74
+#define PCI_CONF_MBAR0_SWC 0x78
+#define PCI_CONF_MBAR_KEY 0x7C
+#define PCI_CONF_FLSH_BAR 0xB8
+
+#include "aic94xx_reg_def.h"
+
+u8 asd_read_reg_byte(struct asd_ha_struct *asd_ha, u32 reg);
+u16 asd_read_reg_word(struct asd_ha_struct *asd_ha, u32 reg);
+u32 asd_read_reg_dword(struct asd_ha_struct *asd_ha, u32 reg);
+
+void asd_write_reg_byte(struct asd_ha_struct *asd_ha, u32 reg, u8 val);
+void asd_write_reg_word(struct asd_ha_struct *asd_ha, u32 reg, u16 val);
+void asd_write_reg_dword(struct asd_ha_struct *asd_ha, u32 reg, u32 val);
+
+void asd_read_reg_string(struct asd_ha_struct *asd_ha, void *dst,
+ u32 offs, int count);
+void asd_write_reg_string(struct asd_ha_struct *asd_ha, void *src,
+ u32 offs, int count);
+
+#define ASD_READ_OCM(type, ord, S) \
+static inline type asd_read_ocm_##ord (struct asd_ha_struct *asd_ha, \
+ u32 offs) \
+{ \
+ struct asd_ha_addrspace *io_handle = &asd_ha->io_handle[1]; \
+ type val = read##S (io_handle->addr + (unsigned long) offs); \
+ rmb(); \
+ return val; \
+}
+
+ASD_READ_OCM(u8, byte, b);
+ASD_READ_OCM(u16,word, w);
+ASD_READ_OCM(u32,dword,l);
+
+#define ASD_WRITE_OCM(type, ord, S) \
+static inline void asd_write_ocm_##ord (struct asd_ha_struct *asd_ha, \
+ u32 offs, type val) \
+{ \
+ struct asd_ha_addrspace *io_handle = &asd_ha->io_handle[1]; \
+ write##S (val, io_handle->addr + (unsigned long) offs); \
+ return; \
+}
+
+ASD_WRITE_OCM(u8, byte, b);
+ASD_WRITE_OCM(u16,word, w);
+ASD_WRITE_OCM(u32,dword,l);
+
+#define ASD_DDBSITE_READ(type, ord) \
+static inline type asd_ddbsite_read_##ord (struct asd_ha_struct *asd_ha, \
+ u16 ddb_site_no, \
+ u16 offs) \
+{ \
+ asd_write_reg_word(asd_ha, ALTCIOADR, MnDDB_SITE + offs); \
+ asd_write_reg_word(asd_ha, ADDBPTR, ddb_site_no); \
+ return asd_read_reg_##ord (asd_ha, CTXACCESS); \
+}
+
+ASD_DDBSITE_READ(u32, dword);
+ASD_DDBSITE_READ(u16, word);
+
+static inline u8 asd_ddbsite_read_byte(struct asd_ha_struct *asd_ha,
+ u16 ddb_site_no,
+ u16 offs)
+{
+ if (offs & 1)
+ return asd_ddbsite_read_word(asd_ha, ddb_site_no,
+ offs & ~1) >> 8;
+ else
+ return asd_ddbsite_read_word(asd_ha, ddb_site_no,
+ offs) & 0xFF;
+}
+
+
+#define ASD_DDBSITE_WRITE(type, ord) \
+static inline void asd_ddbsite_write_##ord (struct asd_ha_struct *asd_ha, \
+ u16 ddb_site_no, \
+ u16 offs, type val) \
+{ \
+ asd_write_reg_word(asd_ha, ALTCIOADR, MnDDB_SITE + offs); \
+ asd_write_reg_word(asd_ha, ADDBPTR, ddb_site_no); \
+ asd_write_reg_##ord (asd_ha, CTXACCESS, val); \
+}
+
+ASD_DDBSITE_WRITE(u32, dword);
+ASD_DDBSITE_WRITE(u16, word);
+
+static inline void asd_ddbsite_write_byte(struct asd_ha_struct *asd_ha,
+ u16 ddb_site_no,
+ u16 offs, u8 val)
+{
+ u16 base = offs & ~1;
+ u16 rval = asd_ddbsite_read_word(asd_ha, ddb_site_no, base);
+ if (offs & 1)
+ rval = (val << 8) | (rval & 0xFF);
+ else
+ rval = (rval & 0xFF00) | val;
+ asd_ddbsite_write_word(asd_ha, ddb_site_no, base, rval);
+}
+
+
+#define ASD_SCBSITE_READ(type, ord) \
+static inline type asd_scbsite_read_##ord (struct asd_ha_struct *asd_ha, \
+ u16 scb_site_no, \
+ u16 offs) \
+{ \
+ asd_write_reg_word(asd_ha, ALTCIOADR, MnSCB_SITE + offs); \
+ asd_write_reg_word(asd_ha, ASCBPTR, scb_site_no); \
+ return asd_read_reg_##ord (asd_ha, CTXACCESS); \
+}
+
+ASD_SCBSITE_READ(u32, dword);
+ASD_SCBSITE_READ(u16, word);
+
+static inline u8 asd_scbsite_read_byte(struct asd_ha_struct *asd_ha,
+ u16 scb_site_no,
+ u16 offs)
+{
+ if (offs & 1)
+ return asd_scbsite_read_word(asd_ha, scb_site_no,
+ offs & ~1) >> 8;
+ else
+ return asd_scbsite_read_word(asd_ha, scb_site_no,
+ offs) & 0xFF;
+}
+
+
+#define ASD_SCBSITE_WRITE(type, ord) \
+static inline void asd_scbsite_write_##ord (struct asd_ha_struct *asd_ha, \
+ u16 scb_site_no, \
+ u16 offs, type val) \
+{ \
+ asd_write_reg_word(asd_ha, ALTCIOADR, MnSCB_SITE + offs); \
+ asd_write_reg_word(asd_ha, ASCBPTR, scb_site_no); \
+ asd_write_reg_##ord (asd_ha, CTXACCESS, val); \
+}
+
+ASD_SCBSITE_WRITE(u32, dword);
+ASD_SCBSITE_WRITE(u16, word);
+
+static inline void asd_scbsite_write_byte(struct asd_ha_struct *asd_ha,
+ u16 scb_site_no,
+ u16 offs, u8 val)
+{
+ u16 base = offs & ~1;
+ u16 rval = asd_scbsite_read_word(asd_ha, scb_site_no, base);
+ if (offs & 1)
+ rval = (val << 8) | (rval & 0xFF);
+ else
+ rval = (rval & 0xFF00) | val;
+ asd_scbsite_write_word(asd_ha, scb_site_no, base, rval);
+}
+
+/**
+ * asd_ddbsite_update_word -- atomically update a word in a ddb site
+ * @asd_ha: pointer to host adapter structure
+ * @ddb_site_no: the DDB site number
+ * @offs: the offset into the DDB
+ * @oldval: old value found in that offset
+ * @newval: the new value to replace it
+ *
+ * This function is used when the sequencers are running and we need to
+ * update a DDB site atomically without expensive pausing and upausing
+ * of the sequencers and accessing the DDB site through the CIO bus.
+ *
+ * Return 0 on success; -EFAULT on parity error; -EAGAIN if the old value
+ * is different than the current value at that offset.
+ */
+static inline int asd_ddbsite_update_word(struct asd_ha_struct *asd_ha,
+ u16 ddb_site_no, u16 offs,
+ u16 oldval, u16 newval)
+{
+ u8 done;
+ u16 oval = asd_ddbsite_read_word(asd_ha, ddb_site_no, offs);
+ if (oval != oldval)
+ return -EAGAIN;
+ asd_write_reg_word(asd_ha, AOLDDATA, oldval);
+ asd_write_reg_word(asd_ha, ANEWDATA, newval);
+ do {
+ done = asd_read_reg_byte(asd_ha, ATOMICSTATCTL);
+ } while (!(done & ATOMICDONE));
+ if (done & ATOMICERR)
+ return -EFAULT; /* parity error */
+ else if (done & ATOMICWIN)
+ return 0; /* success */
+ else
+ return -EAGAIN; /* oldval different than current value */
+}
+
+static inline int asd_ddbsite_update_byte(struct asd_ha_struct *asd_ha,
+ u16 ddb_site_no, u16 offs,
+ u8 _oldval, u8 _newval)
+{
+ u16 base = offs & ~1;
+ u16 oval;
+ u16 nval = asd_ddbsite_read_word(asd_ha, ddb_site_no, base);
+ if (offs & 1) {
+ if ((nval >> 8) != _oldval)
+ return -EAGAIN;
+ nval = (_newval << 8) | (nval & 0xFF);
+ oval = (_oldval << 8) | (nval & 0xFF);
+ } else {
+ if ((nval & 0xFF) != _oldval)
+ return -EAGAIN;
+ nval = (nval & 0xFF00) | _newval;
+ oval = (nval & 0xFF00) | _oldval;
+ }
+ return asd_ddbsite_update_word(asd_ha, ddb_site_no, base, oval, nval);
+}
+
+static inline void asd_write_reg_addr(struct asd_ha_struct *asd_ha, u32 reg,
+ dma_addr_t dma_handle)
+{
+ asd_write_reg_dword(asd_ha, reg, ASD_BUSADDR_LO(dma_handle));
+ asd_write_reg_dword(asd_ha, reg+4, ASD_BUSADDR_HI(dma_handle));
+}
+
+static inline u32 asd_get_cmdctx_size(struct asd_ha_struct *asd_ha)
+{
+ /* DCHREVISION returns 0, possibly broken */
+ u32 ctxmemsize = asd_read_reg_dword(asd_ha, LmMnINT(0,0)) & CTXMEMSIZE;
+ return ctxmemsize ? 65536 : 32768;
+}
+
+static inline u32 asd_get_devctx_size(struct asd_ha_struct *asd_ha)
+{
+ u32 ctxmemsize = asd_read_reg_dword(asd_ha, LmMnINT(0,0)) & CTXMEMSIZE;
+ return ctxmemsize ? 8192 : 4096;
+}
+
+static inline void asd_disable_ints(struct asd_ha_struct *asd_ha)
+{
+ asd_write_reg_dword(asd_ha, CHIMINTEN, RST_CHIMINTEN);
+}
+
+static inline void asd_enable_ints(struct asd_ha_struct *asd_ha)
+{
+ /* Enable COM SAS interrupt on errors, COMSTAT */
+ asd_write_reg_dword(asd_ha, COMSTATEN,
+ EN_CSBUFPERR | EN_CSERR | EN_OVLYERR);
+ /* Enable DCH SAS CFIFTOERR */
+ asd_write_reg_dword(asd_ha, DCHSTATUS, EN_CFIFTOERR);
+ /* Enable Host Device interrupts */
+ asd_write_reg_dword(asd_ha, CHIMINTEN, SET_CHIMINTEN);
+}
+
+#endif
diff --git a/drivers/scsi/aic94xx/aic94xx_reg_def.h b/drivers/scsi/aic94xx/aic94xx_reg_def.h
new file mode 100644
index 000000000..dd6cc8008
--- /dev/null
+++ b/drivers/scsi/aic94xx/aic94xx_reg_def.h
@@ -0,0 +1,2399 @@
+/*
+ * Aic94xx SAS/SATA driver hardware registers definitions.
+ *
+ * Copyright (C) 2004 Adaptec, Inc. All rights reserved.
+ * Copyright (C) 2004 David Chaw <david_chaw@adaptec.com>
+ * Copyright (C) 2005 Luben Tuikov <luben_tuikov@adaptec.com>
+ *
+ * Luben Tuikov: Some register value updates to make it work with the window
+ * agnostic register r/w functions. Some register corrections, sizes,
+ * etc.
+ *
+ * This file is licensed under GPLv2.
+ *
+ * This file is part of the aic94xx driver.
+ *
+ * The aic94xx driver is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; version 2 of the
+ * License.
+ *
+ * The aic94xx driver is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with the aic94xx driver; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
+ *
+ * $Id: //depot/aic94xx/aic94xx_reg_def.h#27 $
+ *
+ */
+
+#ifndef _ADP94XX_REG_DEF_H_
+#define _ADP94XX_REG_DEF_H_
+
+/*
+ * Common definitions.
+ */
+#define CSEQ_MODE_PAGE_SIZE 0x200 /* CSEQ mode page size */
+#define LmSEQ_MODE_PAGE_SIZE 0x200 /* LmSEQ mode page size */
+#define LmSEQ_HOST_REG_SIZE 0x4000 /* LmSEQ Host Register size */
+
+/********************* COM_SAS registers definition *************************/
+
+/* The base is REG_BASE_ADDR, defined in aic94xx_reg.h.
+ */
+
+/*
+ * CHIM Registers, Address Range : (0x00-0xFF)
+ */
+#define COMBIST (REG_BASE_ADDR + 0x00)
+
+/* bits 31:24 */
+#define L7BLKRST 0x80000000
+#define L6BLKRST 0x40000000
+#define L5BLKRST 0x20000000
+#define L4BLKRST 0x10000000
+#define L3BLKRST 0x08000000
+#define L2BLKRST 0x04000000
+#define L1BLKRST 0x02000000
+#define L0BLKRST 0x01000000
+#define LmBLKRST 0xFF000000
+#define LmBLKRST_COMBIST(phyid) (1 << (24 + phyid))
+
+#define OCMBLKRST 0x00400000
+#define CTXMEMBLKRST 0x00200000
+#define CSEQBLKRST 0x00100000
+#define EXSIBLKRST 0x00040000
+#define DPIBLKRST 0x00020000
+#define DFIFBLKRST 0x00010000
+#define HARDRST 0x00000200
+#define COMBLKRST 0x00000100
+#define FRCDFPERR 0x00000080
+#define FRCCIOPERR 0x00000020
+#define FRCBISTERR 0x00000010
+#define COMBISTEN 0x00000004
+#define COMBISTDONE 0x00000002 /* ro */
+#define COMBISTFAIL 0x00000001 /* ro */
+
+#define COMSTAT (REG_BASE_ADDR + 0x04)
+
+#define REQMBXREAD 0x00000040
+#define RSPMBXAVAIL 0x00000020
+#define CSBUFPERR 0x00000008
+#define OVLYERR 0x00000004
+#define CSERR 0x00000002
+#define OVLYDMADONE 0x00000001
+
+#define COMSTAT_MASK (REQMBXREAD | RSPMBXAVAIL | \
+ CSBUFPERR | OVLYERR | CSERR |\
+ OVLYDMADONE)
+
+#define COMSTATEN (REG_BASE_ADDR + 0x08)
+
+#define EN_REQMBXREAD 0x00000040
+#define EN_RSPMBXAVAIL 0x00000020
+#define EN_CSBUFPERR 0x00000008
+#define EN_OVLYERR 0x00000004
+#define EN_CSERR 0x00000002
+#define EN_OVLYDONE 0x00000001
+
+#define SCBPRO (REG_BASE_ADDR + 0x0C)
+
+#define SCBCONS_MASK 0xFFFF0000
+#define SCBPRO_MASK 0x0000FFFF
+
+#define CHIMREQMBX (REG_BASE_ADDR + 0x10)
+
+#define CHIMRSPMBX (REG_BASE_ADDR + 0x14)
+
+#define CHIMINT (REG_BASE_ADDR + 0x18)
+
+#define EXT_INT0 0x00000800
+#define EXT_INT1 0x00000400
+#define PORRSTDET 0x00000200
+#define HARDRSTDET 0x00000100
+#define DLAVAILQ 0x00000080 /* ro */
+#define HOSTERR 0x00000040
+#define INITERR 0x00000020
+#define DEVINT 0x00000010
+#define COMINT 0x00000008
+#define DEVTIMER2 0x00000004
+#define DEVTIMER1 0x00000002
+#define DLAVAIL 0x00000001
+
+#define CHIMINT_MASK (HOSTERR | INITERR | DEVINT | COMINT |\
+ DEVTIMER2 | DEVTIMER1 | DLAVAIL)
+
+#define DEVEXCEPT_MASK (HOSTERR | INITERR | DEVINT | COMINT)
+
+#define CHIMINTEN (REG_BASE_ADDR + 0x1C)
+
+#define RST_EN_EXT_INT1 0x01000000
+#define RST_EN_EXT_INT0 0x00800000
+#define RST_EN_HOSTERR 0x00400000
+#define RST_EN_INITERR 0x00200000
+#define RST_EN_DEVINT 0x00100000
+#define RST_EN_COMINT 0x00080000
+#define RST_EN_DEVTIMER2 0x00040000
+#define RST_EN_DEVTIMER1 0x00020000
+#define RST_EN_DLAVAIL 0x00010000
+#define SET_EN_EXT_INT1 0x00000100
+#define SET_EN_EXT_INT0 0x00000080
+#define SET_EN_HOSTERR 0x00000040
+#define SET_EN_INITERR 0x00000020
+#define SET_EN_DEVINT 0x00000010
+#define SET_EN_COMINT 0x00000008
+#define SET_EN_DEVTIMER2 0x00000004
+#define SET_EN_DEVTIMER1 0x00000002
+#define SET_EN_DLAVAIL 0x00000001
+
+#define RST_CHIMINTEN (RST_EN_HOSTERR | RST_EN_INITERR | \
+ RST_EN_DEVINT | RST_EN_COMINT | \
+ RST_EN_DEVTIMER2 | RST_EN_DEVTIMER1 |\
+ RST_EN_DLAVAIL)
+
+#define SET_CHIMINTEN (SET_EN_HOSTERR | SET_EN_INITERR |\
+ SET_EN_DEVINT | SET_EN_COMINT |\
+ SET_EN_DLAVAIL)
+
+#define OVLYDMACTL (REG_BASE_ADDR + 0x20)
+
+#define OVLYADR_MASK 0x07FF0000
+#define OVLYLSEQ_MASK 0x0000FF00
+#define OVLYCSEQ 0x00000080
+#define OVLYHALTERR 0x00000040
+#define PIOCMODE 0x00000020
+#define RESETOVLYDMA 0x00000008 /* wo */
+#define STARTOVLYDMA 0x00000004
+#define STOPOVLYDMA 0x00000002 /* wo */
+#define OVLYDMAACT 0x00000001 /* ro */
+
+#define OVLYDMACNT (REG_BASE_ADDR + 0x24)
+
+#define OVLYDOMAIN1 0x20000000 /* ro */
+#define OVLYDOMAIN0 0x10000000
+#define OVLYBUFADR_MASK 0x007F0000
+#define OVLYDMACNT_MASK 0x00003FFF
+
+#define OVLYDMAADR (REG_BASE_ADDR + 0x28)
+
+#define DMAERR (REG_BASE_ADDR + 0x30)
+
+#define OVLYERRSTAT_MASK 0x0000FF00 /* ro */
+#define CSERRSTAT_MASK 0x000000FF /* ro */
+
+#define SPIODATA (REG_BASE_ADDR + 0x34)
+
+/* 0x38 - 0x3C are reserved */
+
+#define T1CNTRLR (REG_BASE_ADDR + 0x40)
+
+#define T1DONE 0x00010000 /* ro */
+#define TIMER64 0x00000400
+#define T1ENABLE 0x00000200
+#define T1RELOAD 0x00000100
+#define T1PRESCALER_MASK 0x00000003
+
+#define T1CMPR (REG_BASE_ADDR + 0x44)
+
+#define T1CNTR (REG_BASE_ADDR + 0x48)
+
+#define T2CNTRLR (REG_BASE_ADDR + 0x4C)
+
+#define T2DONE 0x00010000 /* ro */
+#define T2ENABLE 0x00000200
+#define T2RELOAD 0x00000100
+#define T2PRESCALER_MASK 0x00000003
+
+#define T2CMPR (REG_BASE_ADDR + 0x50)
+
+#define T2CNTR (REG_BASE_ADDR + 0x54)
+
+/* 0x58h - 0xFCh are reserved */
+
+/*
+ * DCH_SAS Registers, Address Range : (0x800-0xFFF)
+ */
+#define CMDCTXBASE (REG_BASE_ADDR + 0x800)
+
+#define DEVCTXBASE (REG_BASE_ADDR + 0x808)
+
+#define CTXDOMAIN (REG_BASE_ADDR + 0x810)
+
+#define DEVCTXDOMAIN1 0x00000008 /* ro */
+#define DEVCTXDOMAIN0 0x00000004
+#define CMDCTXDOMAIN1 0x00000002 /* ro */
+#define CMDCTXDOMAIN0 0x00000001
+
+#define DCHCTL (REG_BASE_ADDR + 0x814)
+
+#define OCMBISTREPAIR 0x00080000
+#define OCMBISTEN 0x00040000
+#define OCMBISTDN 0x00020000 /* ro */
+#define OCMBISTFAIL 0x00010000 /* ro */
+#define DDBBISTEN 0x00004000
+#define DDBBISTDN 0x00002000 /* ro */
+#define DDBBISTFAIL 0x00001000 /* ro */
+#define SCBBISTEN 0x00000400
+#define SCBBISTDN 0x00000200 /* ro */
+#define SCBBISTFAIL 0x00000100 /* ro */
+
+#define MEMSEL_MASK 0x000000E0
+#define MEMSEL_CCM_LSEQ 0x00000000
+#define MEMSEL_CCM_IOP 0x00000020
+#define MEMSEL_CCM_SASCTL 0x00000040
+#define MEMSEL_DCM_LSEQ 0x00000060
+#define MEMSEL_DCM_IOP 0x00000080
+#define MEMSEL_OCM 0x000000A0
+
+#define FRCERR 0x00000010
+#define AUTORLS 0x00000001
+
+#define DCHREVISION (REG_BASE_ADDR + 0x818)
+
+#define DCHREVISION_MASK 0x000000FF
+
+#define DCHSTATUS (REG_BASE_ADDR + 0x81C)
+
+#define EN_CFIFTOERR 0x00020000
+#define CFIFTOERR 0x00000200
+#define CSEQINT 0x00000100 /* ro */
+#define LSEQ7INT 0x00000080 /* ro */
+#define LSEQ6INT 0x00000040 /* ro */
+#define LSEQ5INT 0x00000020 /* ro */
+#define LSEQ4INT 0x00000010 /* ro */
+#define LSEQ3INT 0x00000008 /* ro */
+#define LSEQ2INT 0x00000004 /* ro */
+#define LSEQ1INT 0x00000002 /* ro */
+#define LSEQ0INT 0x00000001 /* ro */
+
+#define LSEQINT_MASK (LSEQ7INT | LSEQ6INT | LSEQ5INT |\
+ LSEQ4INT | LSEQ3INT | LSEQ2INT |\
+ LSEQ1INT | LSEQ0INT)
+
+#define DCHDFIFDEBUG (REG_BASE_ADDR + 0x820)
+#define ENFAIRMST 0x00FF0000
+#define DISWRMST9 0x00000200
+#define DISWRMST8 0x00000100
+#define DISRDMST 0x000000FF
+
+#define ATOMICSTATCTL (REG_BASE_ADDR + 0x824)
+/* 8 bit wide */
+#define AUTOINC 0x80
+#define ATOMICERR 0x04
+#define ATOMICWIN 0x02
+#define ATOMICDONE 0x01
+
+
+#define ALTCIOADR (REG_BASE_ADDR + 0x828)
+/* 16 bit; bits 8:0 define CIO addr space of CSEQ */
+
+#define ASCBPTR (REG_BASE_ADDR + 0x82C)
+/* 16 bit wide */
+
+#define ADDBPTR (REG_BASE_ADDR + 0x82E)
+/* 16 bit wide */
+
+#define ANEWDATA (REG_BASE_ADDR + 0x830)
+/* 16 bit */
+
+#define AOLDDATA (REG_BASE_ADDR + 0x834)
+/* 16 bit */
+
+#define CTXACCESS (REG_BASE_ADDR + 0x838)
+/* 32 bit */
+
+/* 0x83Ch - 0xFFCh are reserved */
+
+/*
+ * ARP2 External Processor Registers, Address Range : (0x00-0x1F)
+ */
+#define ARP2CTL 0x00
+
+#define FRCSCRPERR 0x00040000
+#define FRCARP2PERR 0x00020000
+#define FRCARP2ILLOPC 0x00010000
+#define ENWAITTO 0x00008000
+#define PERRORDIS 0x00004000
+#define FAILDIS 0x00002000
+#define CIOPERRDIS 0x00001000
+#define BREAKEN3 0x00000800
+#define BREAKEN2 0x00000400
+#define BREAKEN1 0x00000200
+#define BREAKEN0 0x00000100
+#define EPAUSE 0x00000008
+#define PAUSED 0x00000004 /* ro */
+#define STEP 0x00000002
+#define ARP2RESET 0x00000001 /* wo */
+
+#define ARP2INT 0x04
+
+#define HALTCODE_MASK 0x00FF0000 /* ro */
+#define ARP2WAITTO 0x00000100
+#define ARP2HALTC 0x00000080
+#define ARP2ILLOPC 0x00000040
+#define ARP2PERR 0x00000020
+#define ARP2CIOPERR 0x00000010
+#define ARP2BREAK3 0x00000008
+#define ARP2BREAK2 0x00000004
+#define ARP2BREAK1 0x00000002
+#define ARP2BREAK0 0x00000001
+
+#define ARP2INTEN 0x08
+
+#define EN_ARP2WAITTO 0x00000100
+#define EN_ARP2HALTC 0x00000080
+#define EN_ARP2ILLOPC 0x00000040
+#define EN_ARP2PERR 0x00000020
+#define EN_ARP2CIOPERR 0x00000010
+#define EN_ARP2BREAK3 0x00000008
+#define EN_ARP2BREAK2 0x00000004
+#define EN_ARP2BREAK1 0x00000002
+#define EN_ARP2BREAK0 0x00000001
+
+#define ARP2BREAKADR01 0x0C
+
+#define BREAKADR1_MASK 0x0FFF0000
+#define BREAKADR0_MASK 0x00000FFF
+
+#define ARP2BREAKADR23 0x10
+
+#define BREAKADR3_MASK 0x0FFF0000
+#define BREAKADR2_MASK 0x00000FFF
+
+/* 0x14h - 0x1Ch are reserved */
+
+/*
+ * ARP2 Registers, Address Range : (0x00-0x1F)
+ * The definitions have the same address offset for CSEQ and LmSEQ
+ * CIO Bus Registers.
+ */
+#define MODEPTR 0x00
+
+#define DSTMODE 0xF0
+#define SRCMODE 0x0F
+
+#define ALTMODE 0x01
+
+#define ALTDMODE 0xF0
+#define ALTSMODE 0x0F
+
+#define ATOMICXCHG 0x02
+
+#define FLAG 0x04
+
+#define INTCODE_MASK 0xF0
+#define ALTMODEV2 0x04
+#define CARRY_INT 0x02
+#define CARRY 0x01
+
+#define ARP2INTCTL 0x05
+
+#define PAUSEDIS 0x80
+#define RSTINTCTL 0x40
+#define POPALTMODE 0x08
+#define ALTMODEV 0x04
+#define INTMASK 0x02
+#define IRET 0x01
+
+#define STACK 0x06
+
+#define FUNCTION1 0x07
+
+#define PRGMCNT 0x08
+
+#define ACCUM 0x0A
+
+#define SINDEX 0x0C
+
+#define DINDEX 0x0E
+
+#define ALLONES 0x10
+
+#define ALLZEROS 0x11
+
+#define SINDIR 0x12
+
+#define DINDIR 0x13
+
+#define JUMLDIR 0x14
+
+#define ARP2HALTCODE 0x15
+
+#define CURRADDR 0x16
+
+#define LASTADDR 0x18
+
+#define NXTLADDR 0x1A
+
+#define DBGPORTPTR 0x1C
+
+#define DBGPORT 0x1D
+
+/*
+ * CIO Registers.
+ * The definitions have the same address offset for CSEQ and LmSEQ
+ * CIO Bus Registers.
+ */
+#define MnSCBPTR 0x20
+
+#define MnDDBPTR 0x22
+
+#define SCRATCHPAGE 0x24
+
+#define MnSCRATCHPAGE 0x25
+
+#define SCRATCHPAGESV 0x26
+
+#define MnSCRATCHPAGESV 0x27
+
+#define MnDMAERRS 0x46
+
+#define MnSGDMAERRS 0x47
+
+#define MnSGBUF 0x53
+
+#define MnSGDMASTAT 0x5b
+
+#define MnDDMACTL 0x5c /* RAZOR.rspec.fm rev 1.5 is wrong */
+
+#define MnDDMASTAT 0x5d /* RAZOR.rspec.fm rev 1.5 is wrong */
+
+#define MnDDMAMODE 0x5e /* RAZOR.rspec.fm rev 1.5 is wrong */
+
+#define MnDMAENG 0x60
+
+#define MnPIPECTL 0x61
+
+#define MnSGBADR 0x65
+
+#define MnSCB_SITE 0x100
+
+#define MnDDB_SITE 0x180
+
+/*
+ * The common definitions below have the same address offset for both
+ * CSEQ and LmSEQ.
+ */
+#define BISTCTL0 0x4C
+
+#define BISTCTL1 0x50
+
+#define MAPPEDSCR 0x800
+
+/*
+ * CSEQ Host Register, Address Range : (0x000-0xFFC)
+ */
+#define CSEQ_HOST_REG_BASE_ADR 0xB8001000
+
+#define CARP2CTL (CSEQ_HOST_REG_BASE_ADR + ARP2CTL)
+
+#define CARP2INT (CSEQ_HOST_REG_BASE_ADR + ARP2INT)
+
+#define CARP2INTEN (CSEQ_HOST_REG_BASE_ADR + ARP2INTEN)
+
+#define CARP2BREAKADR01 (CSEQ_HOST_REG_BASE_ADR+ARP2BREAKADR01)
+
+#define CARP2BREAKADR23 (CSEQ_HOST_REG_BASE_ADR+ARP2BREAKADR23)
+
+#define CBISTCTL (CSEQ_HOST_REG_BASE_ADR + BISTCTL1)
+
+#define CSEQRAMBISTEN 0x00000040
+#define CSEQRAMBISTDN 0x00000020 /* ro */
+#define CSEQRAMBISTFAIL 0x00000010 /* ro */
+#define CSEQSCRBISTEN 0x00000004
+#define CSEQSCRBISTDN 0x00000002 /* ro */
+#define CSEQSCRBISTFAIL 0x00000001 /* ro */
+
+#define CMAPPEDSCR (CSEQ_HOST_REG_BASE_ADR + MAPPEDSCR)
+
+/*
+ * CSEQ CIO Bus Registers, Address Range : (0x0000-0x1FFC)
+ * 16 modes, each mode is 512 bytes.
+ * Unless specified, the register should valid for all modes.
+ */
+#define CSEQ_CIO_REG_BASE_ADR REG_BASE_ADDR_CSEQCIO
+
+#define CSEQm_CIO_REG(Mode, Reg) \
+ (CSEQ_CIO_REG_BASE_ADR + \
+ ((u32) (Mode) * CSEQ_MODE_PAGE_SIZE) + (u32) (Reg))
+
+#define CMODEPTR (CSEQ_CIO_REG_BASE_ADR + MODEPTR)
+
+#define CALTMODE (CSEQ_CIO_REG_BASE_ADR + ALTMODE)
+
+#define CATOMICXCHG (CSEQ_CIO_REG_BASE_ADR + ATOMICXCHG)
+
+#define CFLAG (CSEQ_CIO_REG_BASE_ADR + FLAG)
+
+#define CARP2INTCTL (CSEQ_CIO_REG_BASE_ADR + ARP2INTCTL)
+
+#define CSTACK (CSEQ_CIO_REG_BASE_ADR + STACK)
+
+#define CFUNCTION1 (CSEQ_CIO_REG_BASE_ADR + FUNCTION1)
+
+#define CPRGMCNT (CSEQ_CIO_REG_BASE_ADR + PRGMCNT)
+
+#define CACCUM (CSEQ_CIO_REG_BASE_ADR + ACCUM)
+
+#define CSINDEX (CSEQ_CIO_REG_BASE_ADR + SINDEX)
+
+#define CDINDEX (CSEQ_CIO_REG_BASE_ADR + DINDEX)
+
+#define CALLONES (CSEQ_CIO_REG_BASE_ADR + ALLONES)
+
+#define CALLZEROS (CSEQ_CIO_REG_BASE_ADR + ALLZEROS)
+
+#define CSINDIR (CSEQ_CIO_REG_BASE_ADR + SINDIR)
+
+#define CDINDIR (CSEQ_CIO_REG_BASE_ADR + DINDIR)
+
+#define CJUMLDIR (CSEQ_CIO_REG_BASE_ADR + JUMLDIR)
+
+#define CARP2HALTCODE (CSEQ_CIO_REG_BASE_ADR + ARP2HALTCODE)
+
+#define CCURRADDR (CSEQ_CIO_REG_BASE_ADR + CURRADDR)
+
+#define CLASTADDR (CSEQ_CIO_REG_BASE_ADR + LASTADDR)
+
+#define CNXTLADDR (CSEQ_CIO_REG_BASE_ADR + NXTLADDR)
+
+#define CDBGPORTPTR (CSEQ_CIO_REG_BASE_ADR + DBGPORTPTR)
+
+#define CDBGPORT (CSEQ_CIO_REG_BASE_ADR + DBGPORT)
+
+#define CSCRATCHPAGE (CSEQ_CIO_REG_BASE_ADR + SCRATCHPAGE)
+
+#define CMnSCBPTR(Mode) CSEQm_CIO_REG(Mode, MnSCBPTR)
+
+#define CMnDDBPTR(Mode) CSEQm_CIO_REG(Mode, MnDDBPTR)
+
+#define CMnSCRATCHPAGE(Mode) CSEQm_CIO_REG(Mode, MnSCRATCHPAGE)
+
+#define CLINKCON (CSEQ_CIO_REG_BASE_ADR + 0x28)
+
+#define CCIOAACESS (CSEQ_CIO_REG_BASE_ADR + 0x2C)
+
+/* mode 0-7 */
+#define MnREQMBX 0x30
+#define CMnREQMBX(Mode) CSEQm_CIO_REG(Mode, 0x30)
+
+/* mode 8 */
+#define CSEQCON CSEQm_CIO_REG(8, 0x30)
+
+/* mode 0-7 */
+#define MnRSPMBX 0x34
+#define CMnRSPMBX(Mode) CSEQm_CIO_REG(Mode, 0x34)
+
+/* mode 8 */
+#define CSEQCOMCTL CSEQm_CIO_REG(8, 0x34)
+
+/* mode 8 */
+#define CSEQCOMSTAT CSEQm_CIO_REG(8, 0x35)
+
+/* mode 8 */
+#define CSEQCOMINTEN CSEQm_CIO_REG(8, 0x36)
+
+/* mode 8 */
+#define CSEQCOMDMACTL CSEQm_CIO_REG(8, 0x37)
+
+#define CSHALTERR 0x10
+#define RESETCSDMA 0x08 /* wo */
+#define STARTCSDMA 0x04
+#define STOPCSDMA 0x02 /* wo */
+#define CSDMAACT 0x01 /* ro */
+
+/* mode 0-7 */
+#define MnINT 0x38
+#define CMnINT(Mode) CSEQm_CIO_REG(Mode, 0x38)
+
+#define CMnREQMBXE 0x02
+#define CMnRSPMBXF 0x01
+#define CMnINT_MASK 0x00000003
+
+/* mode 8 */
+#define CSEQREQMBX CSEQm_CIO_REG(8, 0x38)
+
+/* mode 0-7 */
+#define MnINTEN 0x3C
+#define CMnINTEN(Mode) CSEQm_CIO_REG(Mode, 0x3C)
+
+#define EN_CMnRSPMBXF 0x01
+
+/* mode 8 */
+#define CSEQRSPMBX CSEQm_CIO_REG(8, 0x3C)
+
+/* mode 8 */
+#define CSDMAADR CSEQm_CIO_REG(8, 0x40)
+
+/* mode 8 */
+#define CSDMACNT CSEQm_CIO_REG(8, 0x48)
+
+/* mode 8 */
+#define CSEQDLCTL CSEQm_CIO_REG(8, 0x4D)
+
+#define DONELISTEND 0x10
+#define DONELISTSIZE_MASK 0x0F
+#define DONELISTSIZE_8ELEM 0x01
+#define DONELISTSIZE_16ELEM 0x02
+#define DONELISTSIZE_32ELEM 0x03
+#define DONELISTSIZE_64ELEM 0x04
+#define DONELISTSIZE_128ELEM 0x05
+#define DONELISTSIZE_256ELEM 0x06
+#define DONELISTSIZE_512ELEM 0x07
+#define DONELISTSIZE_1024ELEM 0x08
+#define DONELISTSIZE_2048ELEM 0x09
+#define DONELISTSIZE_4096ELEM 0x0A
+#define DONELISTSIZE_8192ELEM 0x0B
+#define DONELISTSIZE_16384ELEM 0x0C
+
+/* mode 8 */
+#define CSEQDLOFFS CSEQm_CIO_REG(8, 0x4E)
+
+/* mode 11 */
+#define CM11INTVEC0 CSEQm_CIO_REG(11, 0x50)
+
+/* mode 11 */
+#define CM11INTVEC1 CSEQm_CIO_REG(11, 0x52)
+
+/* mode 11 */
+#define CM11INTVEC2 CSEQm_CIO_REG(11, 0x54)
+
+#define CCONMSK (CSEQ_CIO_REG_BASE_ADR + 0x60)
+
+#define CCONEXIST (CSEQ_CIO_REG_BASE_ADR + 0x61)
+
+#define CCONMODE (CSEQ_CIO_REG_BASE_ADR + 0x62)
+
+#define CTIMERCALC (CSEQ_CIO_REG_BASE_ADR + 0x64)
+
+#define CINTDIS (CSEQ_CIO_REG_BASE_ADR + 0x68)
+
+/* mode 8, 32x32 bits, 128 bytes of mapped buffer */
+#define CSBUFFER CSEQm_CIO_REG(8, 0x80)
+
+#define CSCRATCH (CSEQ_CIO_REG_BASE_ADR + 0x1C0)
+
+/* mode 0-8 */
+#define CMnSCRATCH(Mode) CSEQm_CIO_REG(Mode, 0x1E0)
+
+/*
+ * CSEQ Mapped Instruction RAM Page, Address Range : (0x0000-0x1FFC)
+ */
+#define CSEQ_RAM_REG_BASE_ADR 0xB8004000
+
+/*
+ * The common definitions below have the same address offset for all the Link
+ * sequencers.
+ */
+#define MODECTL 0x40
+
+#define DBGMODE 0x44
+
+#define CONTROL 0x48
+#define LEDTIMER 0x00010000
+#define LEDTIMERS_10us 0x00000000
+#define LEDTIMERS_1ms 0x00000800
+#define LEDTIMERS_100ms 0x00001000
+#define LEDMODE_TXRX 0x00000000
+#define LEDMODE_CONNECTED 0x00000200
+#define LEDPOL 0x00000100
+
+#define LSEQRAM 0x1000
+
+/*
+ * LmSEQ Host Registers, Address Range : (0x0000-0x3FFC)
+ */
+#define LSEQ0_HOST_REG_BASE_ADR 0xB8020000
+#define LSEQ1_HOST_REG_BASE_ADR 0xB8024000
+#define LSEQ2_HOST_REG_BASE_ADR 0xB8028000
+#define LSEQ3_HOST_REG_BASE_ADR 0xB802C000
+#define LSEQ4_HOST_REG_BASE_ADR 0xB8030000
+#define LSEQ5_HOST_REG_BASE_ADR 0xB8034000
+#define LSEQ6_HOST_REG_BASE_ADR 0xB8038000
+#define LSEQ7_HOST_REG_BASE_ADR 0xB803C000
+
+#define LmARP2CTL(LinkNum) (LSEQ0_HOST_REG_BASE_ADR + \
+ ((LinkNum)*LmSEQ_HOST_REG_SIZE) + \
+ ARP2CTL)
+
+#define LmARP2INT(LinkNum) (LSEQ0_HOST_REG_BASE_ADR + \
+ ((LinkNum)*LmSEQ_HOST_REG_SIZE) + \
+ ARP2INT)
+
+#define LmARP2INTEN(LinkNum) (LSEQ0_HOST_REG_BASE_ADR + \
+ ((LinkNum)*LmSEQ_HOST_REG_SIZE) + \
+ ARP2INTEN)
+
+#define LmDBGMODE(LinkNum) (LSEQ0_HOST_REG_BASE_ADR + \
+ ((LinkNum)*LmSEQ_HOST_REG_SIZE) + \
+ DBGMODE)
+
+#define LmCONTROL(LinkNum) (LSEQ0_HOST_REG_BASE_ADR + \
+ ((LinkNum)*LmSEQ_HOST_REG_SIZE) + \
+ CONTROL)
+
+#define LmARP2BREAKADR01(LinkNum) (LSEQ0_HOST_REG_BASE_ADR + \
+ ((LinkNum)*LmSEQ_HOST_REG_SIZE) + \
+ ARP2BREAKADR01)
+
+#define LmARP2BREAKADR23(LinkNum) (LSEQ0_HOST_REG_BASE_ADR + \
+ ((LinkNum)*LmSEQ_HOST_REG_SIZE) + \
+ ARP2BREAKADR23)
+
+#define LmMODECTL(LinkNum) (LSEQ0_HOST_REG_BASE_ADR + \
+ ((LinkNum)*LmSEQ_HOST_REG_SIZE) + \
+ MODECTL)
+
+#define LmAUTODISCI 0x08000000
+#define LmDSBLBITLT 0x04000000
+#define LmDSBLANTT 0x02000000
+#define LmDSBLCRTT 0x01000000
+#define LmDSBLCONT 0x00000100
+#define LmPRIMODE 0x00000080
+#define LmDSBLHOLD 0x00000040
+#define LmDISACK 0x00000020
+#define LmBLIND48 0x00000010
+#define LmRCVMODE_MASK 0x0000000C
+#define LmRCVMODE_PLD 0x00000000
+#define LmRCVMODE_HPC 0x00000004
+
+#define LmDBGMODE(LinkNum) (LSEQ0_HOST_REG_BASE_ADR + \
+ ((LinkNum)*LmSEQ_HOST_REG_SIZE) + \
+ DBGMODE)
+
+#define LmFRCPERR 0x80000000
+#define LmMEMSEL_MASK 0x30000000
+#define LmFRCRBPERR 0x00000000
+#define LmFRCTBPERR 0x10000000
+#define LmFRCSGBPERR 0x20000000
+#define LmFRCARBPERR 0x30000000
+#define LmRCVIDW 0x00080000
+#define LmINVDWERR 0x00040000
+#define LmRCVDISP 0x00004000
+#define LmDISPERR 0x00002000
+#define LmDSBLDSCR 0x00000800
+#define LmDSBLSCR 0x00000400
+#define LmFRCNAK 0x00000200
+#define LmFRCROFS 0x00000100
+#define LmFRCCRC 0x00000080
+#define LmFRMTYPE_MASK 0x00000070
+#define LmSG_DATA 0x00000000
+#define LmSG_COMMAND 0x00000010
+#define LmSG_TASK 0x00000020
+#define LmSG_TGTXFER 0x00000030
+#define LmSG_RESPONSE 0x00000040
+#define LmSG_IDENADDR 0x00000050
+#define LmSG_OPENADDR 0x00000060
+#define LmDISCRCGEN 0x00000008
+#define LmDISCRCCHK 0x00000004
+#define LmSSXMTFRM 0x00000002
+#define LmSSRCVFRM 0x00000001
+
+#define LmCONTROL(LinkNum) (LSEQ0_HOST_REG_BASE_ADR + \
+ ((LinkNum)*LmSEQ_HOST_REG_SIZE) + \
+ CONTROL)
+
+#define LmSTEPXMTFRM 0x00000002
+#define LmSTEPRCVFRM 0x00000001
+
+#define LmBISTCTL0(LinkNum) (LSEQ0_HOST_REG_BASE_ADR + \
+ ((LinkNum)*LmSEQ_HOST_REG_SIZE) + \
+ BISTCTL0)
+
+#define ARBBISTEN 0x40000000
+#define ARBBISTDN 0x20000000 /* ro */
+#define ARBBISTFAIL 0x10000000 /* ro */
+#define TBBISTEN 0x00000400
+#define TBBISTDN 0x00000200 /* ro */
+#define TBBISTFAIL 0x00000100 /* ro */
+#define RBBISTEN 0x00000040
+#define RBBISTDN 0x00000020 /* ro */
+#define RBBISTFAIL 0x00000010 /* ro */
+#define SGBISTEN 0x00000004
+#define SGBISTDN 0x00000002 /* ro */
+#define SGBISTFAIL 0x00000001 /* ro */
+
+#define LmBISTCTL1(LinkNum) (LSEQ0_HOST_REG_BASE_ADR + \
+ ((LinkNum)*LmSEQ_HOST_REG_SIZE) +\
+ BISTCTL1)
+
+#define LmRAMPAGE1 0x00000200
+#define LmRAMPAGE0 0x00000100
+#define LmIMEMBISTEN 0x00000040
+#define LmIMEMBISTDN 0x00000020 /* ro */
+#define LmIMEMBISTFAIL 0x00000010 /* ro */
+#define LmSCRBISTEN 0x00000004
+#define LmSCRBISTDN 0x00000002 /* ro */
+#define LmSCRBISTFAIL 0x00000001 /* ro */
+#define LmRAMPAGE (LmRAMPAGE1 + LmRAMPAGE0)
+#define LmRAMPAGE_LSHIFT 0x8
+
+#define LmSCRATCH(LinkNum) (LSEQ0_HOST_REG_BASE_ADR + \
+ ((LinkNum) * LmSEQ_HOST_REG_SIZE) +\
+ MAPPEDSCR)
+
+#define LmSEQRAM(LinkNum) (LSEQ0_HOST_REG_BASE_ADR + \
+ ((LinkNum) * LmSEQ_HOST_REG_SIZE) +\
+ LSEQRAM)
+
+/*
+ * LmSEQ CIO Bus Register, Address Range : (0x0000-0xFFC)
+ * 8 modes, each mode is 512 bytes.
+ * Unless specified, the register should valid for all modes.
+ */
+#define LmSEQ_CIOBUS_REG_BASE 0x2000
+
+#define LmSEQ_PHY_BASE(Mode, LinkNum) \
+ (LSEQ0_HOST_REG_BASE_ADR + \
+ (LmSEQ_HOST_REG_SIZE * (u32) (LinkNum)) + \
+ LmSEQ_CIOBUS_REG_BASE + \
+ ((u32) (Mode) * LmSEQ_MODE_PAGE_SIZE))
+
+#define LmSEQ_PHY_REG(Mode, LinkNum, Reg) \
+ (LmSEQ_PHY_BASE(Mode, LinkNum) + (u32) (Reg))
+
+#define LmMODEPTR(LinkNum) LmSEQ_PHY_REG(0, LinkNum, MODEPTR)
+
+#define LmALTMODE(LinkNum) LmSEQ_PHY_REG(0, LinkNum, ALTMODE)
+
+#define LmATOMICXCHG(LinkNum) LmSEQ_PHY_REG(0, LinkNum, ATOMICXCHG)
+
+#define LmFLAG(LinkNum) LmSEQ_PHY_REG(0, LinkNum, FLAG)
+
+#define LmARP2INTCTL(LinkNum) LmSEQ_PHY_REG(0, LinkNum, ARP2INTCTL)
+
+#define LmSTACK(LinkNum) LmSEQ_PHY_REG(0, LinkNum, STACK)
+
+#define LmFUNCTION1(LinkNum) LmSEQ_PHY_REG(0, LinkNum, FUNCTION1)
+
+#define LmPRGMCNT(LinkNum) LmSEQ_PHY_REG(0, LinkNum, PRGMCNT)
+
+#define LmACCUM(LinkNum) LmSEQ_PHY_REG(0, LinkNum, ACCUM)
+
+#define LmSINDEX(LinkNum) LmSEQ_PHY_REG(0, LinkNum, SINDEX)
+
+#define LmDINDEX(LinkNum) LmSEQ_PHY_REG(0, LinkNum, DINDEX)
+
+#define LmALLONES(LinkNum) LmSEQ_PHY_REG(0, LinkNum, ALLONES)
+
+#define LmALLZEROS(LinkNum) LmSEQ_PHY_REG(0, LinkNum, ALLZEROS)
+
+#define LmSINDIR(LinkNum) LmSEQ_PHY_REG(0, LinkNum, SINDIR)
+
+#define LmDINDIR(LinkNum) LmSEQ_PHY_REG(0, LinkNum, DINDIR)
+
+#define LmJUMLDIR(LinkNum) LmSEQ_PHY_REG(0, LinkNum, JUMLDIR)
+
+#define LmARP2HALTCODE(LinkNum) LmSEQ_PHY_REG(0, LinkNum, ARP2HALTCODE)
+
+#define LmCURRADDR(LinkNum) LmSEQ_PHY_REG(0, LinkNum, CURRADDR)
+
+#define LmLASTADDR(LinkNum) LmSEQ_PHY_REG(0, LinkNum, LASTADDR)
+
+#define LmNXTLADDR(LinkNum) LmSEQ_PHY_REG(0, LinkNum, NXTLADDR)
+
+#define LmDBGPORTPTR(LinkNum) LmSEQ_PHY_REG(0, LinkNum, DBGPORTPTR)
+
+#define LmDBGPORT(LinkNum) LmSEQ_PHY_REG(0, LinkNum, DBGPORT)
+
+#define LmSCRATCHPAGE(LinkNum) LmSEQ_PHY_REG(0, LinkNum, SCRATCHPAGE)
+
+#define LmMnSCRATCHPAGE(LinkNum, Mode) LmSEQ_PHY_REG(Mode, LinkNum, \
+ MnSCRATCHPAGE)
+
+#define LmTIMERCALC(LinkNum) LmSEQ_PHY_REG(0, LinkNum, 0x28)
+
+#define LmREQMBX(LinkNum) LmSEQ_PHY_REG(0, LinkNum, 0x30)
+
+#define LmRSPMBX(LinkNum) LmSEQ_PHY_REG(0, LinkNum, 0x34)
+
+#define LmMnINT(LinkNum, Mode) LmSEQ_PHY_REG(Mode, LinkNum, 0x38)
+
+#define CTXMEMSIZE 0x80000000 /* ro */
+#define LmACKREQ 0x08000000
+#define LmNAKREQ 0x04000000
+#define LmMnXMTERR 0x02000000
+#define LmM5OOBSVC 0x01000000
+#define LmHWTINT 0x00800000
+#define LmMnCTXDONE 0x00100000
+#define LmM2REQMBXF 0x00080000
+#define LmM2RSPMBXE 0x00040000
+#define LmMnDMAERR 0x00020000
+#define LmRCVPRIM 0x00010000
+#define LmRCVERR 0x00008000
+#define LmADDRRCV 0x00004000
+#define LmMnHDRMISS 0x00002000
+#define LmMnWAITSCB 0x00001000
+#define LmMnRLSSCB 0x00000800
+#define LmMnSAVECTX 0x00000400
+#define LmMnFETCHSG 0x00000200
+#define LmMnLOADCTX 0x00000100
+#define LmMnCFGICL 0x00000080
+#define LmMnCFGSATA 0x00000040
+#define LmMnCFGEXPSATA 0x00000020
+#define LmMnCFGCMPLT 0x00000010
+#define LmMnCFGRBUF 0x00000008
+#define LmMnSAVETTR 0x00000004
+#define LmMnCFGRDAT 0x00000002
+#define LmMnCFGHDR 0x00000001
+
+#define LmMnINTEN(LinkNum, Mode) LmSEQ_PHY_REG(Mode, LinkNum, 0x3C)
+
+#define EN_LmACKREQ 0x08000000
+#define EN_LmNAKREQ 0x04000000
+#define EN_LmMnXMTERR 0x02000000
+#define EN_LmM5OOBSVC 0x01000000
+#define EN_LmHWTINT 0x00800000
+#define EN_LmMnCTXDONE 0x00100000
+#define EN_LmM2REQMBXF 0x00080000
+#define EN_LmM2RSPMBXE 0x00040000
+#define EN_LmMnDMAERR 0x00020000
+#define EN_LmRCVPRIM 0x00010000
+#define EN_LmRCVERR 0x00008000
+#define EN_LmADDRRCV 0x00004000
+#define EN_LmMnHDRMISS 0x00002000
+#define EN_LmMnWAITSCB 0x00001000
+#define EN_LmMnRLSSCB 0x00000800
+#define EN_LmMnSAVECTX 0x00000400
+#define EN_LmMnFETCHSG 0x00000200
+#define EN_LmMnLOADCTX 0x00000100
+#define EN_LmMnCFGICL 0x00000080
+#define EN_LmMnCFGSATA 0x00000040
+#define EN_LmMnCFGEXPSATA 0x00000020
+#define EN_LmMnCFGCMPLT 0x00000010
+#define EN_LmMnCFGRBUF 0x00000008
+#define EN_LmMnSAVETTR 0x00000004
+#define EN_LmMnCFGRDAT 0x00000002
+#define EN_LmMnCFGHDR 0x00000001
+
+#define LmM0INTEN_MASK (EN_LmMnCFGCMPLT | EN_LmMnCFGRBUF | \
+ EN_LmMnSAVETTR | EN_LmMnCFGRDAT | \
+ EN_LmMnCFGHDR | EN_LmRCVERR | \
+ EN_LmADDRRCV | EN_LmMnHDRMISS | \
+ EN_LmMnRLSSCB | EN_LmMnSAVECTX | \
+ EN_LmMnFETCHSG | EN_LmMnLOADCTX | \
+ EN_LmHWTINT | EN_LmMnCTXDONE | \
+ EN_LmRCVPRIM | EN_LmMnCFGSATA | \
+ EN_LmMnCFGEXPSATA | EN_LmMnDMAERR)
+
+#define LmM1INTEN_MASK (EN_LmMnCFGCMPLT | EN_LmADDRRCV | \
+ EN_LmMnRLSSCB | EN_LmMnSAVECTX | \
+ EN_LmMnFETCHSG | EN_LmMnLOADCTX | \
+ EN_LmMnXMTERR | EN_LmHWTINT | \
+ EN_LmMnCTXDONE | EN_LmRCVPRIM | \
+ EN_LmRCVERR | EN_LmMnDMAERR)
+
+#define LmM2INTEN_MASK (EN_LmADDRRCV | EN_LmHWTINT | \
+ EN_LmM2REQMBXF | EN_LmRCVPRIM | \
+ EN_LmRCVERR)
+
+#define LmM5INTEN_MASK (EN_LmADDRRCV | EN_LmM5OOBSVC | \
+ EN_LmHWTINT | EN_LmRCVPRIM | \
+ EN_LmRCVERR)
+
+#define LmXMTPRIMD(LinkNum) LmSEQ_PHY_REG(0, LinkNum, 0x40)
+
+#define LmXMTPRIMCS(LinkNum) LmSEQ_PHY_REG(0, LinkNum, 0x44)
+
+#define LmCONSTAT(LinkNum) LmSEQ_PHY_REG(0, LinkNum, 0x45)
+
+#define LmMnDMAERRS(LinkNum, Mode) LmSEQ_PHY_REG(Mode, LinkNum, 0x46)
+
+#define LmMnSGDMAERRS(LinkNum, Mode) LmSEQ_PHY_REG(Mode, LinkNum, 0x47)
+
+#define LmM0EXPHDRP(LinkNum) LmSEQ_PHY_REG(0, LinkNum, 0x48)
+
+#define LmM1SASALIGN(LinkNum) LmSEQ_PHY_REG(1, LinkNum, 0x48)
+#define SAS_ALIGN_DEFAULT 0xFF
+
+#define LmM0MSKHDRP(LinkNum) LmSEQ_PHY_REG(0, LinkNum, 0x49)
+
+#define LmM1STPALIGN(LinkNum) LmSEQ_PHY_REG(1, LinkNum, 0x49)
+#define STP_ALIGN_DEFAULT 0x1F
+
+#define LmM0RCVHDRP(LinkNum) LmSEQ_PHY_REG(0, LinkNum, 0x4A)
+
+#define LmM1XMTHDRP(LinkNum) LmSEQ_PHY_REG(1, LinkNum, 0x4A)
+
+#define LmM0ICLADR(LinkNum) LmSEQ_PHY_REG(0, LinkNum, 0x4B)
+
+#define LmM1ALIGNMODE(LinkNum) LmSEQ_PHY_REG(1, LinkNum, 0x4B)
+
+#define LmDISALIGN 0x20
+#define LmROTSTPALIGN 0x10
+#define LmSTPALIGN 0x08
+#define LmROTNOTIFY 0x04
+#define LmDUALALIGN 0x02
+#define LmROTALIGN 0x01
+
+#define LmM0EXPRCVNT(LinkNum) LmSEQ_PHY_REG(0, LinkNum, 0x4C)
+
+#define LmM1XMTCNT(LinkNum) LmSEQ_PHY_REG(1, LinkNum, 0x4C)
+
+#define LmMnBUFSTAT(LinkNum, Mode) LmSEQ_PHY_REG(Mode, LinkNum, 0x4E)
+
+#define LmMnBUFPERR 0x01
+
+/* mode 0-1 */
+#define LmMnXFRLVL(LinkNum, Mode) LmSEQ_PHY_REG(Mode, LinkNum, 0x59)
+
+#define LmMnXFRLVL_128 0x05
+#define LmMnXFRLVL_256 0x04
+#define LmMnXFRLVL_512 0x03
+#define LmMnXFRLVL_1024 0x02
+#define LmMnXFRLVL_1536 0x01
+#define LmMnXFRLVL_2048 0x00
+
+ /* mode 0-1 */
+#define LmMnSGDMACTL(LinkNum, Mode) LmSEQ_PHY_REG(Mode, LinkNum, 0x5A)
+
+#define LmMnRESETSG 0x04
+#define LmMnSTOPSG 0x02
+#define LmMnSTARTSG 0x01
+
+/* mode 0-1 */
+#define LmMnSGDMASTAT(LinkNum, Mode) LmSEQ_PHY_REG(Mode, LinkNum, 0x5B)
+
+/* mode 0-1 */
+#define LmMnDDMACTL(LinkNum, Mode) LmSEQ_PHY_REG(Mode, LinkNum, 0x5C)
+
+#define LmMnFLUSH 0x40 /* wo */
+#define LmMnRLSRTRY 0x20 /* wo */
+#define LmMnDISCARD 0x10 /* wo */
+#define LmMnRESETDAT 0x08 /* wo */
+#define LmMnSUSDAT 0x04 /* wo */
+#define LmMnSTOPDAT 0x02 /* wo */
+#define LmMnSTARTDAT 0x01 /* wo */
+
+/* mode 0-1 */
+#define LmMnDDMASTAT(LinkNum, Mode) LmSEQ_PHY_REG(Mode, LinkNum, 0x5D)
+
+#define LmMnDPEMPTY 0x80
+#define LmMnFLUSHING 0x40
+#define LmMnDDMAREQ 0x20
+#define LmMnHDMAREQ 0x10
+#define LmMnDATFREE 0x08
+#define LmMnDATSUS 0x04
+#define LmMnDATACT 0x02
+#define LmMnDATEN 0x01
+
+/* mode 0-1 */
+#define LmMnDDMAMODE(LinkNum, Mode) LmSEQ_PHY_REG(Mode, LinkNum, 0x5E)
+
+#define LmMnDMATYPE_NORMAL 0x0000
+#define LmMnDMATYPE_HOST_ONLY_TX 0x0001
+#define LmMnDMATYPE_DEVICE_ONLY_TX 0x0002
+#define LmMnDMATYPE_INVALID 0x0003
+#define LmMnDMATYPE_MASK 0x0003
+
+#define LmMnDMAWRAP 0x0004
+#define LmMnBITBUCKET 0x0008
+#define LmMnDISHDR 0x0010
+#define LmMnSTPCRC 0x0020
+#define LmXTEST 0x0040
+#define LmMnDISCRC 0x0080
+#define LmMnENINTLK 0x0100
+#define LmMnADDRFRM 0x0400
+#define LmMnENXMTCRC 0x0800
+
+/* mode 0-1 */
+#define LmMnXFRCNT(LinkNum, Mode) LmSEQ_PHY_REG(Mode, LinkNum, 0x70)
+
+/* mode 0-1 */
+#define LmMnDPSEL(LinkNum, Mode) LmSEQ_PHY_REG(Mode, LinkNum, 0x7B)
+#define LmMnDPSEL_MASK 0x07
+#define LmMnEOLPRE 0x40
+#define LmMnEOSPRE 0x80
+
+/* Registers used in conjunction with LmMnDPSEL and LmMnDPACC registers */
+/* Receive Mode n = 0 */
+#define LmMnHRADDR 0x00
+#define LmMnHBYTECNT 0x01
+#define LmMnHREWIND 0x02
+#define LmMnDWADDR 0x03
+#define LmMnDSPACECNT 0x04
+#define LmMnDFRMSIZE 0x05
+
+/* Registers used in conjunction with LmMnDPSEL and LmMnDPACC registers */
+/* Transmit Mode n = 1 */
+#define LmMnHWADDR 0x00
+#define LmMnHSPACECNT 0x01
+/* #define LmMnHREWIND 0x02 */
+#define LmMnDRADDR 0x03
+#define LmMnDBYTECNT 0x04
+/* #define LmMnDFRMSIZE 0x05 */
+
+/* mode 0-1 */
+#define LmMnDPACC(LinkNum, Mode) LmSEQ_PHY_REG(Mode, LinkNum, 0x78)
+#define LmMnDPACC_MASK 0x00FFFFFF
+
+/* mode 0-1 */
+#define LmMnHOLDLVL(LinkNum, Mode) LmSEQ_PHY_REG(Mode, LinkNum, 0x7D)
+
+#define LmPRMSTAT0(LinkNum) LmSEQ_PHY_REG(0, LinkNum, 0x80)
+#define LmPRMSTAT0BYTE0 0x80
+#define LmPRMSTAT0BYTE1 0x81
+#define LmPRMSTAT0BYTE2 0x82
+#define LmPRMSTAT0BYTE3 0x83
+
+#define LmFRAMERCVD 0x80000000
+#define LmXFRRDYRCVD 0x40000000
+#define LmUNKNOWNP 0x20000000
+#define LmBREAK 0x10000000
+#define LmDONE 0x08000000
+#define LmOPENACPT 0x04000000
+#define LmOPENRJCT 0x02000000
+#define LmOPENRTRY 0x01000000
+#define LmCLOSERV1 0x00800000
+#define LmCLOSERV0 0x00400000
+#define LmCLOSENORM 0x00200000
+#define LmCLOSECLAF 0x00100000
+#define LmNOTIFYRV2 0x00080000
+#define LmNOTIFYRV1 0x00040000
+#define LmNOTIFYRV0 0x00020000
+#define LmNOTIFYSPIN 0x00010000
+#define LmBROADRV4 0x00008000
+#define LmBROADRV3 0x00004000
+#define LmBROADRV2 0x00002000
+#define LmBROADRV1 0x00001000
+#define LmBROADSES 0x00000800
+#define LmBROADRVCH1 0x00000400
+#define LmBROADRVCH0 0x00000200
+#define LmBROADCH 0x00000100
+#define LmAIPRVWP 0x00000080
+#define LmAIPWP 0x00000040
+#define LmAIPWD 0x00000020
+#define LmAIPWC 0x00000010
+#define LmAIPRV2 0x00000008
+#define LmAIPRV1 0x00000004
+#define LmAIPRV0 0x00000002
+#define LmAIPNRML 0x00000001
+
+#define LmBROADCAST_MASK (LmBROADCH | LmBROADRVCH0 | \
+ LmBROADRVCH1)
+
+#define LmPRMSTAT1(LinkNum) LmSEQ_PHY_REG(0, LinkNum, 0x84)
+#define LmPRMSTAT1BYTE0 0x84
+#define LmPRMSTAT1BYTE1 0x85
+#define LmPRMSTAT1BYTE2 0x86
+#define LmPRMSTAT1BYTE3 0x87
+
+#define LmFRMRCVDSTAT 0x80000000
+#define LmBREAK_DET 0x04000000
+#define LmCLOSE_DET 0x02000000
+#define LmDONE_DET 0x01000000
+#define LmXRDY 0x00040000
+#define LmSYNCSRST 0x00020000
+#define LmSYNC 0x00010000
+#define LmXHOLD 0x00008000
+#define LmRRDY 0x00004000
+#define LmHOLD 0x00002000
+#define LmROK 0x00001000
+#define LmRIP 0x00000800
+#define LmCRBLK 0x00000400
+#define LmACK 0x00000200
+#define LmNAK 0x00000100
+#define LmHARDRST 0x00000080
+#define LmERROR 0x00000040
+#define LmRERR 0x00000020
+#define LmPMREQP 0x00000010
+#define LmPMREQS 0x00000008
+#define LmPMACK 0x00000004
+#define LmPMNAK 0x00000002
+#define LmDMAT 0x00000001
+
+/* mode 1 */
+#define LmMnSATAFS(LinkNum, Mode) LmSEQ_PHY_REG(Mode, LinkNum, 0x7E)
+#define LmMnXMTSIZE(LinkNum, Mode) LmSEQ_PHY_REG(Mode, LinkNum, 0x93)
+
+/* mode 0 */
+#define LmMnFRMERR(LinkNum, Mode) LmSEQ_PHY_REG(Mode, LinkNum, 0xB0)
+
+#define LmACRCERR 0x00000800
+#define LmPHYOVRN 0x00000400
+#define LmOBOVRN 0x00000200
+#define LmMnZERODATA 0x00000100
+#define LmSATAINTLK 0x00000080
+#define LmMnCRCERR 0x00000020
+#define LmRRDYOVRN 0x00000010
+#define LmMISSSOAF 0x00000008
+#define LmMISSSOF 0x00000004
+#define LmMISSEOAF 0x00000002
+#define LmMISSEOF 0x00000001
+
+#define LmFRMERREN(LinkNum) LmSEQ_PHY_REG(0, LinkNum, 0xB4)
+
+#define EN_LmACRCERR 0x00000800
+#define EN_LmPHYOVRN 0x00000400
+#define EN_LmOBOVRN 0x00000200
+#define EN_LmMnZERODATA 0x00000100
+#define EN_LmSATAINTLK 0x00000080
+#define EN_LmFRMBAD 0x00000040
+#define EN_LmMnCRCERR 0x00000020
+#define EN_LmRRDYOVRN 0x00000010
+#define EN_LmMISSSOAF 0x00000008
+#define EN_LmMISSSOF 0x00000004
+#define EN_LmMISSEOAF 0x00000002
+#define EN_LmMISSEOF 0x00000001
+
+#define LmFRMERREN_MASK (EN_LmSATAINTLK | EN_LmMnCRCERR | \
+ EN_LmRRDYOVRN | EN_LmMISSSOF | \
+ EN_LmMISSEOAF | EN_LmMISSEOF | \
+ EN_LmACRCERR | LmPHYOVRN | \
+ EN_LmOBOVRN | EN_LmMnZERODATA)
+
+#define LmHWTSTATEN(LinkNum) LmSEQ_PHY_REG(0, LinkNum, 0xC5)
+
+#define EN_LmDONETO 0x80
+#define EN_LmINVDISP 0x40
+#define EN_LmINVDW 0x20
+#define EN_LmDWSEVENT 0x08
+#define EN_LmCRTTTO 0x04
+#define EN_LmANTTTO 0x02
+#define EN_LmBITLTTO 0x01
+
+#define LmHWTSTATEN_MASK (EN_LmINVDISP | EN_LmINVDW | \
+ EN_LmDWSEVENT | EN_LmCRTTTO | \
+ EN_LmANTTTO | EN_LmDONETO | \
+ EN_LmBITLTTO)
+
+#define LmHWTSTAT(LinkNum) LmSEQ_PHY_REG(0, LinkNum, 0xC7)
+
+#define LmDONETO 0x80
+#define LmINVDISP 0x40
+#define LmINVDW 0x20
+#define LmDWSEVENT 0x08
+#define LmCRTTTO 0x04
+#define LmANTTTO 0x02
+#define LmBITLTTO 0x01
+
+#define LmMnDATABUFADR(LinkNum, Mode) LmSEQ_PHY_REG(Mode, LinkNum, 0xC8)
+#define LmDATABUFADR_MASK 0x0FFF
+
+#define LmMnDATABUF(LinkNum, Mode) LmSEQ_PHY_REG(Mode, LinkNum, 0xCA)
+
+#define LmPRIMSTAT0EN(LinkNum) LmSEQ_PHY_REG(0, LinkNum, 0xE0)
+
+#define EN_LmUNKNOWNP 0x20000000
+#define EN_LmBREAK 0x10000000
+#define EN_LmDONE 0x08000000
+#define EN_LmOPENACPT 0x04000000
+#define EN_LmOPENRJCT 0x02000000
+#define EN_LmOPENRTRY 0x01000000
+#define EN_LmCLOSERV1 0x00800000
+#define EN_LmCLOSERV0 0x00400000
+#define EN_LmCLOSENORM 0x00200000
+#define EN_LmCLOSECLAF 0x00100000
+#define EN_LmNOTIFYRV2 0x00080000
+#define EN_LmNOTIFYRV1 0x00040000
+#define EN_LmNOTIFYRV0 0x00020000
+#define EN_LmNOTIFYSPIN 0x00010000
+#define EN_LmBROADRV4 0x00008000
+#define EN_LmBROADRV3 0x00004000
+#define EN_LmBROADRV2 0x00002000
+#define EN_LmBROADRV1 0x00001000
+#define EN_LmBROADRV0 0x00000800
+#define EN_LmBROADRVCH1 0x00000400
+#define EN_LmBROADRVCH0 0x00000200
+#define EN_LmBROADCH 0x00000100
+#define EN_LmAIPRVWP 0x00000080
+#define EN_LmAIPWP 0x00000040
+#define EN_LmAIPWD 0x00000020
+#define EN_LmAIPWC 0x00000010
+#define EN_LmAIPRV2 0x00000008
+#define EN_LmAIPRV1 0x00000004
+#define EN_LmAIPRV0 0x00000002
+#define EN_LmAIPNRML 0x00000001
+
+#define LmPRIMSTAT0EN_MASK (EN_LmBREAK | \
+ EN_LmDONE | EN_LmOPENACPT | \
+ EN_LmOPENRJCT | EN_LmOPENRTRY | \
+ EN_LmCLOSERV1 | EN_LmCLOSERV0 | \
+ EN_LmCLOSENORM | EN_LmCLOSECLAF | \
+ EN_LmBROADRV4 | EN_LmBROADRV3 | \
+ EN_LmBROADRV2 | EN_LmBROADRV1 | \
+ EN_LmBROADRV0 | EN_LmBROADRVCH1 | \
+ EN_LmBROADRVCH0 | EN_LmBROADCH | \
+ EN_LmAIPRVWP | EN_LmAIPWP | \
+ EN_LmAIPWD | EN_LmAIPWC | \
+ EN_LmAIPRV2 | EN_LmAIPRV1 | \
+ EN_LmAIPRV0 | EN_LmAIPNRML)
+
+#define LmPRIMSTAT1EN(LinkNum) LmSEQ_PHY_REG(0, LinkNum, 0xE4)
+
+#define EN_LmXRDY 0x00040000
+#define EN_LmSYNCSRST 0x00020000
+#define EN_LmSYNC 0x00010000
+#define EN_LmXHOLD 0x00008000
+#define EN_LmRRDY 0x00004000
+#define EN_LmHOLD 0x00002000
+#define EN_LmROK 0x00001000
+#define EN_LmRIP 0x00000800
+#define EN_LmCRBLK 0x00000400
+#define EN_LmACK 0x00000200
+#define EN_LmNAK 0x00000100
+#define EN_LmHARDRST 0x00000080
+#define EN_LmERROR 0x00000040
+#define EN_LmRERR 0x00000020
+#define EN_LmPMREQP 0x00000010
+#define EN_LmPMREQS 0x00000008
+#define EN_LmPMACK 0x00000004
+#define EN_LmPMNAK 0x00000002
+#define EN_LmDMAT 0x00000001
+
+#define LmPRIMSTAT1EN_MASK (EN_LmHARDRST | \
+ EN_LmSYNCSRST | \
+ EN_LmPMREQP | EN_LmPMREQS | \
+ EN_LmPMACK | EN_LmPMNAK)
+
+#define LmSMSTATE(LinkNum) LmSEQ_PHY_REG(0, LinkNum, 0xE8)
+
+#define LmSMSTATEBRK(LinkNum) LmSEQ_PHY_REG(0, LinkNum, 0xEC)
+
+#define LmSMDBGCTL(LinkNum) LmSEQ_PHY_REG(0, LinkNum, 0xF0)
+
+
+/*
+ * LmSEQ CIO Bus Mode 3 Register.
+ * Mode 3: Configuration and Setup, IOP Context SCB.
+ */
+#define LmM3SATATIMER(LinkNum) LmSEQ_PHY_REG(3, LinkNum, 0x48)
+
+#define LmM3INTVEC0(LinkNum) LmSEQ_PHY_REG(3, LinkNum, 0x90)
+
+#define LmM3INTVEC1(LinkNum) LmSEQ_PHY_REG(3, LinkNum, 0x92)
+
+#define LmM3INTVEC2(LinkNum) LmSEQ_PHY_REG(3, LinkNum, 0x94)
+
+#define LmM3INTVEC3(LinkNum) LmSEQ_PHY_REG(3, LinkNum, 0x96)
+
+#define LmM3INTVEC4(LinkNum) LmSEQ_PHY_REG(3, LinkNum, 0x98)
+
+#define LmM3INTVEC5(LinkNum) LmSEQ_PHY_REG(3, LinkNum, 0x9A)
+
+#define LmM3INTVEC6(LinkNum) LmSEQ_PHY_REG(3, LinkNum, 0x9C)
+
+#define LmM3INTVEC7(LinkNum) LmSEQ_PHY_REG(3, LinkNum, 0x9E)
+
+#define LmM3INTVEC8(LinkNum) LmSEQ_PHY_REG(3, LinkNum, 0xA4)
+
+#define LmM3INTVEC9(LinkNum) LmSEQ_PHY_REG(3, LinkNum, 0xA6)
+
+#define LmM3INTVEC10(LinkNum) LmSEQ_PHY_REG(3, LinkNum, 0xB0)
+
+#define LmM3FRMGAP(LinkNum) LmSEQ_PHY_REG(3, LinkNum, 0xB4)
+
+#define LmBITL_TIMER(LinkNum) LmSEQ_PHY_REG(0, LinkNum, 0xA2)
+
+#define LmWWN(LinkNum) LmSEQ_PHY_REG(0, LinkNum, 0xA8)
+
+
+/*
+ * LmSEQ CIO Bus Mode 5 Registers.
+ * Mode 5: Phy/OOB Control and Status.
+ */
+#define LmSEQ_OOB_REG(phy_id, reg) LmSEQ_PHY_REG(5, (phy_id), (reg))
+
+#define OOB_BFLTR 0x100
+
+#define BFLTR_THR_MASK 0xF0
+#define BFLTR_TC_MASK 0x0F
+
+#define OOB_INIT_MIN 0x102
+
+#define OOB_INIT_MAX 0x104
+
+#define OOB_INIT_NEG 0x106
+
+#define OOB_SAS_MIN 0x108
+
+#define OOB_SAS_MAX 0x10A
+
+#define OOB_SAS_NEG 0x10C
+
+#define OOB_WAKE_MIN 0x10E
+
+#define OOB_WAKE_MAX 0x110
+
+#define OOB_WAKE_NEG 0x112
+
+#define OOB_IDLE_MAX 0x114
+
+#define OOB_BURST_MAX 0x116
+
+#define OOB_DATA_KBITS 0x126
+
+#define OOB_ALIGN_0_DATA 0x12C
+
+#define OOB_ALIGN_1_DATA 0x130
+
+#define D10_2_DATA_k 0x00
+#define SYNC_DATA_k 0x02
+#define ALIGN_1_DATA_k 0x04
+#define ALIGN_0_DATA_k 0x08
+#define BURST_DATA_k 0x10
+
+#define OOB_PHY_RESET_COUNT 0x13C
+
+#define OOB_SIG_GEN 0x140
+
+#define START_OOB 0x80
+#define START_DWS 0x40
+#define ALIGN_CNT3 0x30
+#define ALIGN_CNT2 0x20
+#define ALIGN_CNT1 0x10
+#define ALIGN_CNT4 0x00
+#define STOP_DWS 0x08
+#define SEND_COMSAS 0x04
+#define SEND_COMINIT 0x02
+#define SEND_COMWAKE 0x01
+
+#define OOB_XMIT 0x141
+
+#define TX_ENABLE 0x80
+#define XMIT_OOB_BURST 0x10
+#define XMIT_D10_2 0x08
+#define XMIT_SYNC 0x04
+#define XMIT_ALIGN_1 0x02
+#define XMIT_ALIGN_0 0x01
+
+#define FUNCTION_MASK 0x142
+
+#define SAS_MODE_DIS 0x80
+#define SATA_MODE_DIS 0x40
+#define SPINUP_HOLD_DIS 0x20
+#define HOT_PLUG_DIS 0x10
+#define SATA_PS_DIS 0x08
+#define FUNCTION_MASK_DEFAULT (SPINUP_HOLD_DIS | SATA_PS_DIS)
+
+#define OOB_MODE 0x143
+
+#define SAS_MODE 0x80
+#define SATA_MODE 0x40
+#define SLOW_CLK 0x20
+#define FORCE_XMIT_15 0x08
+#define PHY_SPEED_60 0x04
+#define PHY_SPEED_30 0x02
+#define PHY_SPEED_15 0x01
+
+#define CURRENT_STATUS 0x144
+
+#define CURRENT_OOB_DONE 0x80
+#define CURRENT_LOSS_OF_SIGNAL 0x40
+#define CURRENT_SPINUP_HOLD 0x20
+#define CURRENT_HOT_PLUG_CNCT 0x10
+#define CURRENT_GTO_TIMEOUT 0x08
+#define CURRENT_OOB_TIMEOUT 0x04
+#define CURRENT_DEVICE_PRESENT 0x02
+#define CURRENT_OOB_ERROR 0x01
+
+#define CURRENT_OOB1_ERROR (CURRENT_HOT_PLUG_CNCT | \
+ CURRENT_GTO_TIMEOUT)
+
+#define CURRENT_OOB2_ERROR (CURRENT_HOT_PLUG_CNCT | \
+ CURRENT_OOB_ERROR)
+
+#define DEVICE_ADDED_W_CNT (CURRENT_OOB_DONE | \
+ CURRENT_HOT_PLUG_CNCT | \
+ CURRENT_DEVICE_PRESENT)
+
+#define DEVICE_ADDED_WO_CNT (CURRENT_OOB_DONE | \
+ CURRENT_DEVICE_PRESENT)
+
+#define DEVICE_REMOVED CURRENT_LOSS_OF_SIGNAL
+
+#define CURRENT_PHY_MASK (CURRENT_OOB_DONE | \
+ CURRENT_LOSS_OF_SIGNAL | \
+ CURRENT_SPINUP_HOLD | \
+ CURRENT_HOT_PLUG_CNCT | \
+ CURRENT_GTO_TIMEOUT | \
+ CURRENT_DEVICE_PRESENT | \
+ CURRENT_OOB_ERROR )
+
+#define CURRENT_ERR_MASK (CURRENT_LOSS_OF_SIGNAL | \
+ CURRENT_GTO_TIMEOUT | \
+ CURRENT_OOB_TIMEOUT | \
+ CURRENT_OOB_ERROR )
+
+#define SPEED_MASK 0x145
+
+#define SATA_SPEED_30_DIS 0x10
+#define SATA_SPEED_15_DIS 0x08
+#define SAS_SPEED_60_DIS 0x04
+#define SAS_SPEED_30_DIS 0x02
+#define SAS_SPEED_15_DIS 0x01
+#define SAS_SPEED_MASK_DEFAULT 0x00
+
+#define OOB_TIMER_ENABLE 0x14D
+
+#define HOT_PLUG_EN 0x80
+#define RCD_EN 0x40
+#define COMTIMER_EN 0x20
+#define SNTT_EN 0x10
+#define SNLT_EN 0x04
+#define SNWT_EN 0x02
+#define ALIGN_EN 0x01
+
+#define OOB_STATUS 0x14E
+
+#define OOB_DONE 0x80
+#define LOSS_OF_SIGNAL 0x40 /* ro */
+#define SPINUP_HOLD 0x20
+#define HOT_PLUG_CNCT 0x10 /* ro */
+#define GTO_TIMEOUT 0x08 /* ro */
+#define OOB_TIMEOUT 0x04 /* ro */
+#define DEVICE_PRESENT 0x02 /* ro */
+#define OOB_ERROR 0x01 /* ro */
+
+#define OOB_STATUS_ERROR_MASK (LOSS_OF_SIGNAL | GTO_TIMEOUT | \
+ OOB_TIMEOUT | OOB_ERROR)
+
+#define OOB_STATUS_CLEAR 0x14F
+
+#define OOB_DONE_CLR 0x80
+#define LOSS_OF_SIGNAL_CLR 0x40
+#define SPINUP_HOLD_CLR 0x20
+#define HOT_PLUG_CNCT_CLR 0x10
+#define GTO_TIMEOUT_CLR 0x08
+#define OOB_TIMEOUT_CLR 0x04
+#define OOB_ERROR_CLR 0x01
+
+#define HOT_PLUG_DELAY 0x150
+/* In 5 ms units. 20 = 100 ms. */
+#define HOTPLUG_DELAY_TIMEOUT 20
+
+
+#define INT_ENABLE_2 0x15A
+
+#define OOB_DONE_EN 0x80
+#define LOSS_OF_SIGNAL_EN 0x40
+#define SPINUP_HOLD_EN 0x20
+#define HOT_PLUG_CNCT_EN 0x10
+#define GTO_TIMEOUT_EN 0x08
+#define OOB_TIMEOUT_EN 0x04
+#define DEVICE_PRESENT_EN 0x02
+#define OOB_ERROR_EN 0x01
+
+#define PHY_CONTROL_0 0x160
+
+#define PHY_LOWPWREN_TX 0x80
+#define PHY_LOWPWREN_RX 0x40
+#define SPARE_REG_160_B5 0x20
+#define OFFSET_CANCEL_RX 0x10
+
+/* bits 3:2 */
+#define PHY_RXCOMCENTER_60V 0x00
+#define PHY_RXCOMCENTER_70V 0x04
+#define PHY_RXCOMCENTER_80V 0x08
+#define PHY_RXCOMCENTER_90V 0x0C
+#define PHY_RXCOMCENTER_MASK 0x0C
+
+#define PHY_RESET 0x02
+#define SAS_DEFAULT_SEL 0x01
+
+#define PHY_CONTROL_1 0x161
+
+/* bits 2:0 */
+#define SATA_PHY_DETLEVEL_50mv 0x00
+#define SATA_PHY_DETLEVEL_75mv 0x01
+#define SATA_PHY_DETLEVEL_100mv 0x02
+#define SATA_PHY_DETLEVEL_125mv 0x03
+#define SATA_PHY_DETLEVEL_150mv 0x04
+#define SATA_PHY_DETLEVEL_175mv 0x05
+#define SATA_PHY_DETLEVEL_200mv 0x06
+#define SATA_PHY_DETLEVEL_225mv 0x07
+#define SATA_PHY_DETLEVEL_MASK 0x07
+
+/* bits 5:3 */
+#define SAS_PHY_DETLEVEL_50mv 0x00
+#define SAS_PHY_DETLEVEL_75mv 0x08
+#define SAS_PHY_DETLEVEL_100mv 0x10
+#define SAS_PHY_DETLEVEL_125mv 0x11
+#define SAS_PHY_DETLEVEL_150mv 0x20
+#define SAS_PHY_DETLEVEL_175mv 0x21
+#define SAS_PHY_DETLEVEL_200mv 0x30
+#define SAS_PHY_DETLEVEL_225mv 0x31
+#define SAS_PHY_DETLEVEL_MASK 0x38
+
+#define PHY_CONTROL_2 0x162
+
+/* bits 7:5 */
+#define SATA_PHY_DRV_400mv 0x00
+#define SATA_PHY_DRV_450mv 0x20
+#define SATA_PHY_DRV_500mv 0x40
+#define SATA_PHY_DRV_550mv 0x60
+#define SATA_PHY_DRV_600mv 0x80
+#define SATA_PHY_DRV_650mv 0xA0
+#define SATA_PHY_DRV_725mv 0xC0
+#define SATA_PHY_DRV_800mv 0xE0
+#define SATA_PHY_DRV_MASK 0xE0
+
+/* bits 4:3 */
+#define SATA_PREEMP_0 0x00
+#define SATA_PREEMP_1 0x08
+#define SATA_PREEMP_2 0x10
+#define SATA_PREEMP_3 0x18
+#define SATA_PREEMP_MASK 0x18
+
+#define SATA_CMSH1P5 0x04
+
+/* bits 1:0 */
+#define SATA_SLEW_0 0x00
+#define SATA_SLEW_1 0x01
+#define SATA_SLEW_2 0x02
+#define SATA_SLEW_3 0x03
+#define SATA_SLEW_MASK 0x03
+
+#define PHY_CONTROL_3 0x163
+
+/* bits 7:5 */
+#define SAS_PHY_DRV_400mv 0x00
+#define SAS_PHY_DRV_450mv 0x20
+#define SAS_PHY_DRV_500mv 0x40
+#define SAS_PHY_DRV_550mv 0x60
+#define SAS_PHY_DRV_600mv 0x80
+#define SAS_PHY_DRV_650mv 0xA0
+#define SAS_PHY_DRV_725mv 0xC0
+#define SAS_PHY_DRV_800mv 0xE0
+#define SAS_PHY_DRV_MASK 0xE0
+
+/* bits 4:3 */
+#define SAS_PREEMP_0 0x00
+#define SAS_PREEMP_1 0x08
+#define SAS_PREEMP_2 0x10
+#define SAS_PREEMP_3 0x18
+#define SAS_PREEMP_MASK 0x18
+
+#define SAS_CMSH1P5 0x04
+
+/* bits 1:0 */
+#define SAS_SLEW_0 0x00
+#define SAS_SLEW_1 0x01
+#define SAS_SLEW_2 0x02
+#define SAS_SLEW_3 0x03
+#define SAS_SLEW_MASK 0x03
+
+#define PHY_CONTROL_4 0x168
+
+#define PHY_DONE_CAL_TX 0x80
+#define PHY_DONE_CAL_RX 0x40
+#define RX_TERM_LOAD_DIS 0x20
+#define TX_TERM_LOAD_DIS 0x10
+#define AUTO_TERM_CAL_DIS 0x08
+#define PHY_SIGDET_FLTR_EN 0x04
+#define OSC_FREQ 0x02
+#define PHY_START_CAL 0x01
+
+/*
+ * HST_PCIX2 Registers, Address Range: (0x00-0xFC)
+ */
+#define PCIX_REG_BASE_ADR 0xB8040000
+
+#define PCIC_VENDOR_ID 0x00
+
+#define PCIC_DEVICE_ID 0x02
+
+#define PCIC_COMMAND 0x04
+
+#define INT_DIS 0x0400
+#define FBB_EN 0x0200 /* ro */
+#define SERR_EN 0x0100
+#define STEP_EN 0x0080 /* ro */
+#define PERR_EN 0x0040
+#define VGA_EN 0x0020 /* ro */
+#define MWI_EN 0x0010
+#define SPC_EN 0x0008
+#define MST_EN 0x0004
+#define MEM_EN 0x0002
+#define IO_EN 0x0001
+
+#define PCIC_STATUS 0x06
+
+#define PERR_DET 0x8000
+#define SERR_GEN 0x4000
+#define MABT_DET 0x2000
+#define TABT_DET 0x1000
+#define TABT_GEN 0x0800
+#define DPERR_DET 0x0100
+#define CAP_LIST 0x0010
+#define INT_STAT 0x0008
+
+#define PCIC_DEVREV_ID 0x08
+
+#define PCIC_CLASS_CODE 0x09
+
+#define PCIC_CACHELINE_SIZE 0x0C
+
+#define PCIC_MBAR0 0x10
+
+#define PCIC_MBAR0_OFFSET 0
+
+#define PCIC_MBAR1 0x18
+
+#define PCIC_MBAR1_OFFSET 2
+
+#define PCIC_IOBAR 0x20
+
+#define PCIC_IOBAR_OFFSET 4
+
+#define PCIC_SUBVENDOR_ID 0x2C
+
+#define PCIC_SUBSYTEM_ID 0x2E
+
+#define PCIX_STATUS 0x44
+#define RCV_SCE 0x20000000
+#define UNEXP_SC 0x00080000
+#define SC_DISCARD 0x00040000
+
+#define ECC_CTRL_STAT 0x48
+#define UNCOR_ECCERR 0x00000008
+
+#define PCIC_PM_CSR 0x5C
+
+#define PWR_STATE_D0 0
+#define PWR_STATE_D1 1 /* not supported */
+#define PWR_STATE_D2 2 /* not supported */
+#define PWR_STATE_D3 3
+
+#define PCIC_BASE1 0x6C /* internal use only */
+
+#define BASE1_RSVD 0xFFFFFFF8
+
+#define PCIC_BASEA 0x70 /* internal use only */
+
+#define BASEA_RSVD 0xFFFFFFC0
+#define BASEA_START 0
+
+#define PCIC_BASEB 0x74 /* internal use only */
+
+#define BASEB_RSVD 0xFFFFFF80
+#define BASEB_IOMAP_MASK 0x7F
+#define BASEB_START 0x80
+
+#define PCIC_BASEC 0x78 /* internal use only */
+
+#define BASEC_RSVD 0xFFFFFFFC
+#define BASEC_MASK 0x03
+#define BASEC_START 0x58
+
+#define PCIC_MBAR_KEY 0x7C /* internal use only */
+
+#define MBAR_KEY_MASK 0xFFFFFFFF
+
+#define PCIC_HSTPCIX_CNTRL 0xA0
+
+#define REWIND_DIS 0x0800
+#define SC_TMR_DIS 0x04000000
+
+#define PCIC_MBAR0_MASK 0xA8
+#define PCIC_MBAR0_SIZE_MASK 0x1FFFE000
+#define PCIC_MBAR0_SIZE_SHIFT 13
+#define PCIC_MBAR0_SIZE(val) \
+ (((val) & PCIC_MBAR0_SIZE_MASK) >> PCIC_MBAR0_SIZE_SHIFT)
+
+#define PCIC_FLASH_MBAR 0xB8
+
+#define PCIC_INTRPT_STAT 0xD4
+
+#define PCIC_TP_CTRL 0xFC
+
+/*
+ * EXSI Registers, Address Range: (0x00-0xFC)
+ */
+#define EXSI_REG_BASE_ADR REG_BASE_ADDR_EXSI
+
+#define EXSICNFGR (EXSI_REG_BASE_ADR + 0x00)
+
+#define OCMINITIALIZED 0x80000000
+#define ASIEN 0x00400000
+#define HCMODE 0x00200000
+#define PCIDEF 0x00100000
+#define COMSTOCK 0x00080000
+#define SEEPROMEND 0x00040000
+#define MSTTIMEN 0x00020000
+#define XREGEX 0x00000200
+#define NVRAMW 0x00000100
+#define NVRAMEX 0x00000080
+#define SRAMW 0x00000040
+#define SRAMEX 0x00000020
+#define FLASHW 0x00000010
+#define FLASHEX 0x00000008
+#define SEEPROMCFG 0x00000004
+#define SEEPROMTYP 0x00000002
+#define SEEPROMEX 0x00000001
+
+
+#define EXSICNTRLR (EXSI_REG_BASE_ADR + 0x04)
+
+#define MODINT_EN 0x00000001
+
+
+#define PMSTATR (EXSI_REG_BASE_ADR + 0x10)
+
+#define FLASHRST 0x00000002
+#define FLASHRDY 0x00000001
+
+
+#define FLCNFGR (EXSI_REG_BASE_ADR + 0x14)
+
+#define FLWEH_MASK 0x30000000
+#define FLWESU_MASK 0x0C000000
+#define FLWEPW_MASK 0x03F00000
+#define FLOEH_MASK 0x000C0000
+#define FLOESU_MASK 0x00030000
+#define FLOEPW_MASK 0x0000FC00
+#define FLCSH_MASK 0x00000300
+#define FLCSSU_MASK 0x000000C0
+#define FLCSPW_MASK 0x0000003F
+
+#define SRCNFGR (EXSI_REG_BASE_ADR + 0x18)
+
+#define SRWEH_MASK 0x30000000
+#define SRWESU_MASK 0x0C000000
+#define SRWEPW_MASK 0x03F00000
+
+#define SROEH_MASK 0x000C0000
+#define SROESU_MASK 0x00030000
+#define SROEPW_MASK 0x0000FC00
+#define SRCSH_MASK 0x00000300
+#define SRCSSU_MASK 0x000000C0
+#define SRCSPW_MASK 0x0000003F
+
+#define NVCNFGR (EXSI_REG_BASE_ADR + 0x1C)
+
+#define NVWEH_MASK 0x30000000
+#define NVWESU_MASK 0x0C000000
+#define NVWEPW_MASK 0x03F00000
+#define NVOEH_MASK 0x000C0000
+#define NVOESU_MASK 0x00030000
+#define NVOEPW_MASK 0x0000FC00
+#define NVCSH_MASK 0x00000300
+#define NVCSSU_MASK 0x000000C0
+#define NVCSPW_MASK 0x0000003F
+
+#define XRCNFGR (EXSI_REG_BASE_ADR + 0x20)
+
+#define XRWEH_MASK 0x30000000
+#define XRWESU_MASK 0x0C000000
+#define XRWEPW_MASK 0x03F00000
+#define XROEH_MASK 0x000C0000
+#define XROESU_MASK 0x00030000
+#define XROEPW_MASK 0x0000FC00
+#define XRCSH_MASK 0x00000300
+#define XRCSSU_MASK 0x000000C0
+#define XRCSPW_MASK 0x0000003F
+
+#define XREGADDR (EXSI_REG_BASE_ADR + 0x24)
+
+#define XRADDRINCEN 0x80000000
+#define XREGADD_MASK 0x007FFFFF
+
+
+#define XREGDATAR (EXSI_REG_BASE_ADR + 0x28)
+
+#define XREGDATA_MASK 0x0000FFFF
+
+#define GPIOOER (EXSI_REG_BASE_ADR + 0x40)
+
+#define GPIOODENR (EXSI_REG_BASE_ADR + 0x44)
+
+#define GPIOINVR (EXSI_REG_BASE_ADR + 0x48)
+
+#define GPIODATAOR (EXSI_REG_BASE_ADR + 0x4C)
+
+#define GPIODATAIR (EXSI_REG_BASE_ADR + 0x50)
+
+#define GPIOCNFGR (EXSI_REG_BASE_ADR + 0x54)
+
+#define GPIO_EXTSRC 0x00000001
+
+#define SCNTRLR (EXSI_REG_BASE_ADR + 0xA0)
+
+#define SXFERDONE 0x00000100
+#define SXFERCNT_MASK 0x000000E0
+#define SCMDTYP_MASK 0x0000001C
+#define SXFERSTART 0x00000002
+#define SXFEREN 0x00000001
+
+#define SRATER (EXSI_REG_BASE_ADR + 0xA4)
+
+#define SADDRR (EXSI_REG_BASE_ADR + 0xA8)
+
+#define SADDR_MASK 0x0000FFFF
+
+#define SDATAOR (EXSI_REG_BASE_ADR + 0xAC)
+
+#define SDATAOR0 (EXSI_REG_BASE_ADR + 0xAC)
+#define SDATAOR1 (EXSI_REG_BASE_ADR + 0xAD)
+#define SDATAOR2 (EXSI_REG_BASE_ADR + 0xAE)
+#define SDATAOR3 (EXSI_REG_BASE_ADR + 0xAF)
+
+#define SDATAIR (EXSI_REG_BASE_ADR + 0xB0)
+
+#define SDATAIR0 (EXSI_REG_BASE_ADR + 0xB0)
+#define SDATAIR1 (EXSI_REG_BASE_ADR + 0xB1)
+#define SDATAIR2 (EXSI_REG_BASE_ADR + 0xB2)
+#define SDATAIR3 (EXSI_REG_BASE_ADR + 0xB3)
+
+#define ASISTAT0R (EXSI_REG_BASE_ADR + 0xD0)
+#define ASIFMTERR 0x00000400
+#define ASISEECHKERR 0x00000200
+#define ASIERR 0x00000100
+
+#define ASISTAT1R (EXSI_REG_BASE_ADR + 0xD4)
+#define CHECKSUM_MASK 0x0000FFFF
+
+#define ASIERRADDR (EXSI_REG_BASE_ADR + 0xD8)
+#define ASIERRDATAR (EXSI_REG_BASE_ADR + 0xDC)
+#define ASIERRSTATR (EXSI_REG_BASE_ADR + 0xE0)
+#define CPI2ASIBYTECNT_MASK 0x00070000
+#define CPI2ASIBYTEEN_MASK 0x0000F000
+#define CPI2ASITARGERR_MASK 0x00000F00
+#define CPI2ASITARGMID_MASK 0x000000F0
+#define CPI2ASIMSTERR_MASK 0x0000000F
+
+/*
+ * XSRAM, External SRAM (DWord and any BE pattern accessible)
+ */
+#define XSRAM_REG_BASE_ADDR 0xB8100000
+#define XSRAM_SIZE 0x100000
+
+/*
+ * NVRAM Registers, Address Range: (0x00000 - 0x3FFFF).
+ */
+#define NVRAM_REG_BASE_ADR 0xBF800000
+#define NVRAM_MAX_BASE_ADR 0x003FFFFF
+
+/* OCM base address */
+#define OCM_BASE_ADDR 0xA0000000
+#define OCM_MAX_SIZE 0x20000
+
+/*
+ * Sequencers (Central and Link) Scratch RAM page definitions.
+ */
+
+/*
+ * The Central Management Sequencer (CSEQ) Scratch Memory is a 1024
+ * byte memory. It is dword accessible and has byte parity
+ * protection. The CSEQ accesses it in 32 byte windows, either as mode
+ * dependent or mode independent memory. Each mode has 96 bytes,
+ * (three 32 byte pages 0-2, not contiguous), leaving 128 bytes of
+ * Mode Independent memory (four 32 byte pages 3-7). Note that mode
+ * dependent scratch memory, Mode 8, page 0-3 overlaps mode
+ * independent scratch memory, pages 0-3.
+ * - 896 bytes of mode dependent scratch, 96 bytes per Modes 0-7, and
+ * 128 bytes in mode 8,
+ * - 259 bytes of mode independent scratch, common to modes 0-15.
+ *
+ * Sequencer scratch RAM is 1024 bytes. This scratch memory is
+ * divided into mode dependent and mode independent scratch with this
+ * memory further subdivided into pages of size 32 bytes. There are 5
+ * pages (160 bytes) of mode independent scratch and 3 pages of
+ * dependent scratch memory for modes 0-7 (768 bytes). Mode 8 pages
+ * 0-2 dependent scratch overlap with pages 0-2 of mode independent
+ * scratch memory.
+ *
+ * The host accesses this scratch in a different manner from the
+ * central sequencer. The sequencer has to use CSEQ registers CSCRPAGE
+ * and CMnSCRPAGE to access the scratch memory. A flat mapping of the
+ * scratch memory is available for software convenience and to prevent
+ * corruption while the sequencer is running. This memory is mapped
+ * onto addresses 800h - BFFh, total of 400h bytes.
+ *
+ * These addresses are mapped as follows:
+ *
+ * 800h-83Fh Mode Dependent Scratch Mode 0 Pages 0-1
+ * 840h-87Fh Mode Dependent Scratch Mode 1 Pages 0-1
+ * 880h-8BFh Mode Dependent Scratch Mode 2 Pages 0-1
+ * 8C0h-8FFh Mode Dependent Scratch Mode 3 Pages 0-1
+ * 900h-93Fh Mode Dependent Scratch Mode 4 Pages 0-1
+ * 940h-97Fh Mode Dependent Scratch Mode 5 Pages 0-1
+ * 980h-9BFh Mode Dependent Scratch Mode 6 Pages 0-1
+ * 9C0h-9FFh Mode Dependent Scratch Mode 7 Pages 0-1
+ * A00h-A5Fh Mode Dependent Scratch Mode 8 Pages 0-2
+ * Mode Independent Scratch Pages 0-2
+ * A60h-A7Fh Mode Dependent Scratch Mode 8 Page 3
+ * Mode Independent Scratch Page 3
+ * A80h-AFFh Mode Independent Scratch Pages 4-7
+ * B00h-B1Fh Mode Dependent Scratch Mode 0 Page 2
+ * B20h-B3Fh Mode Dependent Scratch Mode 1 Page 2
+ * B40h-B5Fh Mode Dependent Scratch Mode 2 Page 2
+ * B60h-B7Fh Mode Dependent Scratch Mode 3 Page 2
+ * B80h-B9Fh Mode Dependent Scratch Mode 4 Page 2
+ * BA0h-BBFh Mode Dependent Scratch Mode 5 Page 2
+ * BC0h-BDFh Mode Dependent Scratch Mode 6 Page 2
+ * BE0h-BFFh Mode Dependent Scratch Mode 7 Page 2
+ */
+
+/* General macros */
+#define CSEQ_PAGE_SIZE 32 /* Scratch page size (in bytes) */
+
+/* All macros start with offsets from base + 0x800 (CMAPPEDSCR).
+ * Mode dependent scratch page 0, mode 0.
+ * For modes 1-7 you have to do arithmetic. */
+#define CSEQ_LRM_SAVE_SINDEX (CMAPPEDSCR + 0x0000)
+#define CSEQ_LRM_SAVE_SCBPTR (CMAPPEDSCR + 0x0002)
+#define CSEQ_Q_LINK_HEAD (CMAPPEDSCR + 0x0004)
+#define CSEQ_Q_LINK_TAIL (CMAPPEDSCR + 0x0006)
+#define CSEQ_LRM_SAVE_SCRPAGE (CMAPPEDSCR + 0x0008)
+
+/* Mode dependent scratch page 0 mode 8 macros. */
+#define CSEQ_RET_ADDR (CMAPPEDSCR + 0x0200)
+#define CSEQ_RET_SCBPTR (CMAPPEDSCR + 0x0202)
+#define CSEQ_SAVE_SCBPTR (CMAPPEDSCR + 0x0204)
+#define CSEQ_EMPTY_TRANS_CTX (CMAPPEDSCR + 0x0206)
+#define CSEQ_RESP_LEN (CMAPPEDSCR + 0x0208)
+#define CSEQ_TMF_SCBPTR (CMAPPEDSCR + 0x020A)
+#define CSEQ_GLOBAL_PREV_SCB (CMAPPEDSCR + 0x020C)
+#define CSEQ_GLOBAL_HEAD (CMAPPEDSCR + 0x020E)
+#define CSEQ_CLEAR_LU_HEAD (CMAPPEDSCR + 0x0210)
+#define CSEQ_TMF_OPCODE (CMAPPEDSCR + 0x0212)
+#define CSEQ_SCRATCH_FLAGS (CMAPPEDSCR + 0x0213)
+#define CSEQ_HSB_SITE (CMAPPEDSCR + 0x021A)
+#define CSEQ_FIRST_INV_SCB_SITE (CMAPPEDSCR + 0x021C)
+#define CSEQ_FIRST_INV_DDB_SITE (CMAPPEDSCR + 0x021E)
+
+/* Mode dependent scratch page 1 mode 8 macros. */
+#define CSEQ_LUN_TO_CLEAR (CMAPPEDSCR + 0x0220)
+#define CSEQ_LUN_TO_CHECK (CMAPPEDSCR + 0x0228)
+
+/* Mode dependent scratch page 2 mode 8 macros */
+#define CSEQ_HQ_NEW_POINTER (CMAPPEDSCR + 0x0240)
+#define CSEQ_HQ_DONE_BASE (CMAPPEDSCR + 0x0248)
+#define CSEQ_HQ_DONE_POINTER (CMAPPEDSCR + 0x0250)
+#define CSEQ_HQ_DONE_PASS (CMAPPEDSCR + 0x0254)
+
+/* Mode independent scratch page 4 macros. */
+#define CSEQ_Q_EXE_HEAD (CMAPPEDSCR + 0x0280)
+#define CSEQ_Q_EXE_TAIL (CMAPPEDSCR + 0x0282)
+#define CSEQ_Q_DONE_HEAD (CMAPPEDSCR + 0x0284)
+#define CSEQ_Q_DONE_TAIL (CMAPPEDSCR + 0x0286)
+#define CSEQ_Q_SEND_HEAD (CMAPPEDSCR + 0x0288)
+#define CSEQ_Q_SEND_TAIL (CMAPPEDSCR + 0x028A)
+#define CSEQ_Q_DMA2CHIM_HEAD (CMAPPEDSCR + 0x028C)
+#define CSEQ_Q_DMA2CHIM_TAIL (CMAPPEDSCR + 0x028E)
+#define CSEQ_Q_COPY_HEAD (CMAPPEDSCR + 0x0290)
+#define CSEQ_Q_COPY_TAIL (CMAPPEDSCR + 0x0292)
+#define CSEQ_REG0 (CMAPPEDSCR + 0x0294)
+#define CSEQ_REG1 (CMAPPEDSCR + 0x0296)
+#define CSEQ_REG2 (CMAPPEDSCR + 0x0298)
+#define CSEQ_LINK_CTL_Q_MAP (CMAPPEDSCR + 0x029C)
+#define CSEQ_MAX_CSEQ_MODE (CMAPPEDSCR + 0x029D)
+#define CSEQ_FREE_LIST_HACK_COUNT (CMAPPEDSCR + 0x029E)
+
+/* Mode independent scratch page 5 macros. */
+#define CSEQ_EST_NEXUS_REQ_QUEUE (CMAPPEDSCR + 0x02A0)
+#define CSEQ_EST_NEXUS_REQ_COUNT (CMAPPEDSCR + 0x02A8)
+#define CSEQ_Q_EST_NEXUS_HEAD (CMAPPEDSCR + 0x02B0)
+#define CSEQ_Q_EST_NEXUS_TAIL (CMAPPEDSCR + 0x02B2)
+#define CSEQ_NEED_EST_NEXUS_SCB (CMAPPEDSCR + 0x02B4)
+#define CSEQ_EST_NEXUS_REQ_HEAD (CMAPPEDSCR + 0x02B6)
+#define CSEQ_EST_NEXUS_REQ_TAIL (CMAPPEDSCR + 0x02B7)
+#define CSEQ_EST_NEXUS_SCB_OFFSET (CMAPPEDSCR + 0x02B8)
+
+/* Mode independent scratch page 6 macros. */
+#define CSEQ_INT_ROUT_RET_ADDR0 (CMAPPEDSCR + 0x02C0)
+#define CSEQ_INT_ROUT_RET_ADDR1 (CMAPPEDSCR + 0x02C2)
+#define CSEQ_INT_ROUT_SCBPTR (CMAPPEDSCR + 0x02C4)
+#define CSEQ_INT_ROUT_MODE (CMAPPEDSCR + 0x02C6)
+#define CSEQ_ISR_SCRATCH_FLAGS (CMAPPEDSCR + 0x02C7)
+#define CSEQ_ISR_SAVE_SINDEX (CMAPPEDSCR + 0x02C8)
+#define CSEQ_ISR_SAVE_DINDEX (CMAPPEDSCR + 0x02CA)
+#define CSEQ_Q_MONIRTT_HEAD (CMAPPEDSCR + 0x02D0)
+#define CSEQ_Q_MONIRTT_TAIL (CMAPPEDSCR + 0x02D2)
+#define CSEQ_FREE_SCB_MASK (CMAPPEDSCR + 0x02D5)
+#define CSEQ_BUILTIN_FREE_SCB_HEAD (CMAPPEDSCR + 0x02D6)
+#define CSEQ_BUILTIN_FREE_SCB_TAIL (CMAPPEDSCR + 0x02D8)
+#define CSEQ_EXTENDED_FREE_SCB_HEAD (CMAPPEDSCR + 0x02DA)
+#define CSEQ_EXTENDED_FREE_SCB_TAIL (CMAPPEDSCR + 0x02DC)
+
+/* Mode independent scratch page 7 macros. */
+#define CSEQ_EMPTY_REQ_QUEUE (CMAPPEDSCR + 0x02E0)
+#define CSEQ_EMPTY_REQ_COUNT (CMAPPEDSCR + 0x02E8)
+#define CSEQ_Q_EMPTY_HEAD (CMAPPEDSCR + 0x02F0)
+#define CSEQ_Q_EMPTY_TAIL (CMAPPEDSCR + 0x02F2)
+#define CSEQ_NEED_EMPTY_SCB (CMAPPEDSCR + 0x02F4)
+#define CSEQ_EMPTY_REQ_HEAD (CMAPPEDSCR + 0x02F6)
+#define CSEQ_EMPTY_REQ_TAIL (CMAPPEDSCR + 0x02F7)
+#define CSEQ_EMPTY_SCB_OFFSET (CMAPPEDSCR + 0x02F8)
+#define CSEQ_PRIMITIVE_DATA (CMAPPEDSCR + 0x02FA)
+#define CSEQ_TIMEOUT_CONST (CMAPPEDSCR + 0x02FC)
+
+/***************************************************************************
+* Link m Sequencer scratch RAM is 512 bytes.
+* This scratch memory is divided into mode dependent and mode
+* independent scratch with this memory further subdivided into
+* pages of size 32 bytes. There are 4 pages (128 bytes) of
+* mode independent scratch and 4 pages of dependent scratch
+* memory for modes 0-2 (384 bytes).
+*
+* The host accesses this scratch in a different manner from the
+* link sequencer. The sequencer has to use LSEQ registers
+* LmSCRPAGE and LmMnSCRPAGE to access the scratch memory. A flat
+* mapping of the scratch memory is available for software
+* convenience and to prevent corruption while the sequencer is
+* running. This memory is mapped onto addresses 800h - 9FFh.
+*
+* These addresses are mapped as follows:
+*
+* 800h-85Fh Mode Dependent Scratch Mode 0 Pages 0-2
+* 860h-87Fh Mode Dependent Scratch Mode 0 Page 3
+* Mode Dependent Scratch Mode 5 Page 0
+* 880h-8DFh Mode Dependent Scratch Mode 1 Pages 0-2
+* 8E0h-8FFh Mode Dependent Scratch Mode 1 Page 3
+* Mode Dependent Scratch Mode 5 Page 1
+* 900h-95Fh Mode Dependent Scratch Mode 2 Pages 0-2
+* 960h-97Fh Mode Dependent Scratch Mode 2 Page 3
+* Mode Dependent Scratch Mode 5 Page 2
+* 980h-9DFh Mode Independent Scratch Pages 0-3
+* 9E0h-9FFh Mode Independent Scratch Page 3
+* Mode Dependent Scratch Mode 5 Page 3
+*
+****************************************************************************/
+/* General macros */
+#define LSEQ_MODE_SCRATCH_SIZE 0x80 /* Size of scratch RAM per mode */
+#define LSEQ_PAGE_SIZE 0x20 /* Scratch page size (in bytes) */
+#define LSEQ_MODE5_PAGE0_OFFSET 0x60
+
+/* Common mode dependent scratch page 0 macros for modes 0,1,2, and 5 */
+/* Indexed using LSEQ_MODE_SCRATCH_SIZE * mode, for modes 0,1,2. */
+#define LmSEQ_RET_ADDR(LinkNum) (LmSCRATCH(LinkNum) + 0x0000)
+#define LmSEQ_REG0_MODE(LinkNum) (LmSCRATCH(LinkNum) + 0x0002)
+#define LmSEQ_MODE_FLAGS(LinkNum) (LmSCRATCH(LinkNum) + 0x0004)
+
+/* Mode flag macros (byte 0) */
+#define SAS_SAVECTX_OCCURRED 0x80
+#define SAS_OOBSVC_OCCURRED 0x40
+#define SAS_OOB_DEVICE_PRESENT 0x20
+#define SAS_CFGHDR_OCCURRED 0x10
+#define SAS_RCV_INTS_ARE_DISABLED 0x08
+#define SAS_OOB_HOT_PLUG_CNCT 0x04
+#define SAS_AWAIT_OPEN_CONNECTION 0x02
+#define SAS_CFGCMPLT_OCCURRED 0x01
+
+/* Mode flag macros (byte 1) */
+#define SAS_RLSSCB_OCCURRED 0x80
+#define SAS_FORCED_HEADER_MISS 0x40
+
+#define LmSEQ_RET_ADDR2(LinkNum) (LmSCRATCH(LinkNum) + 0x0006)
+#define LmSEQ_RET_ADDR1(LinkNum) (LmSCRATCH(LinkNum) + 0x0008)
+#define LmSEQ_OPCODE_TO_CSEQ(LinkNum) (LmSCRATCH(LinkNum) + 0x000B)
+#define LmSEQ_DATA_TO_CSEQ(LinkNum) (LmSCRATCH(LinkNum) + 0x000C)
+
+/* Mode dependent scratch page 0 macros for mode 0 (non-common) */
+/* Absolute offsets */
+#define LmSEQ_FIRST_INV_DDB_SITE(LinkNum) (LmSCRATCH(LinkNum) + 0x000E)
+#define LmSEQ_EMPTY_TRANS_CTX(LinkNum) (LmSCRATCH(LinkNum) + 0x0010)
+#define LmSEQ_RESP_LEN(LinkNum) (LmSCRATCH(LinkNum) + 0x0012)
+#define LmSEQ_FIRST_INV_SCB_SITE(LinkNum) (LmSCRATCH(LinkNum) + 0x0014)
+#define LmSEQ_INTEN_SAVE(LinkNum) (LmSCRATCH(LinkNum) + 0x0016)
+#define LmSEQ_LINK_RST_FRM_LEN(LinkNum) (LmSCRATCH(LinkNum) + 0x001A)
+#define LmSEQ_LINK_RST_PROTOCOL(LinkNum) (LmSCRATCH(LinkNum) + 0x001B)
+#define LmSEQ_RESP_STATUS(LinkNum) (LmSCRATCH(LinkNum) + 0x001C)
+#define LmSEQ_LAST_LOADED_SGE(LinkNum) (LmSCRATCH(LinkNum) + 0x001D)
+#define LmSEQ_SAVE_SCBPTR(LinkNum) (LmSCRATCH(LinkNum) + 0x001E)
+
+/* Mode dependent scratch page 0 macros for mode 1 (non-common) */
+/* Absolute offsets */
+#define LmSEQ_Q_XMIT_HEAD(LinkNum) (LmSCRATCH(LinkNum) + 0x008E)
+#define LmSEQ_M1_EMPTY_TRANS_CTX(LinkNum) (LmSCRATCH(LinkNum) + 0x0090)
+#define LmSEQ_INI_CONN_TAG(LinkNum) (LmSCRATCH(LinkNum) + 0x0092)
+#define LmSEQ_FAILED_OPEN_STATUS(LinkNum) (LmSCRATCH(LinkNum) + 0x009A)
+#define LmSEQ_XMIT_REQUEST_TYPE(LinkNum) (LmSCRATCH(LinkNum) + 0x009B)
+#define LmSEQ_M1_RESP_STATUS(LinkNum) (LmSCRATCH(LinkNum) + 0x009C)
+#define LmSEQ_M1_LAST_LOADED_SGE(LinkNum) (LmSCRATCH(LinkNum) + 0x009D)
+#define LmSEQ_M1_SAVE_SCBPTR(LinkNum) (LmSCRATCH(LinkNum) + 0x009E)
+
+/* Mode dependent scratch page 0 macros for mode 2 (non-common) */
+#define LmSEQ_PORT_COUNTER(LinkNum) (LmSCRATCH(LinkNum) + 0x010E)
+#define LmSEQ_PM_TABLE_PTR(LinkNum) (LmSCRATCH(LinkNum) + 0x0110)
+#define LmSEQ_SATA_INTERLOCK_TMR_SAVE(LinkNum) (LmSCRATCH(LinkNum) + 0x0112)
+#define LmSEQ_IP_BITL(LinkNum) (LmSCRATCH(LinkNum) + 0x0114)
+#define LmSEQ_COPY_SMP_CONN_TAG(LinkNum) (LmSCRATCH(LinkNum) + 0x0116)
+#define LmSEQ_P0M2_OFFS1AH(LinkNum) (LmSCRATCH(LinkNum) + 0x011A)
+
+/* Mode dependent scratch page 0 macros for modes 4/5 (non-common) */
+/* Absolute offsets */
+#define LmSEQ_SAVED_OOB_STATUS(LinkNum) (LmSCRATCH(LinkNum) + 0x006E)
+#define LmSEQ_SAVED_OOB_MODE(LinkNum) (LmSCRATCH(LinkNum) + 0x006F)
+#define LmSEQ_Q_LINK_HEAD(LinkNum) (LmSCRATCH(LinkNum) + 0x0070)
+#define LmSEQ_LINK_RST_ERR(LinkNum) (LmSCRATCH(LinkNum) + 0x0072)
+#define LmSEQ_SAVED_OOB_SIGNALS(LinkNum) (LmSCRATCH(LinkNum) + 0x0073)
+#define LmSEQ_SAS_RESET_MODE(LinkNum) (LmSCRATCH(LinkNum) + 0x0074)
+#define LmSEQ_LINK_RESET_RETRY_COUNT(LinkNum) (LmSCRATCH(LinkNum) + 0x0075)
+#define LmSEQ_NUM_LINK_RESET_RETRIES(LinkNum) (LmSCRATCH(LinkNum) + 0x0076)
+#define LmSEQ_OOB_INT_ENABLES(LinkNum) (LmSCRATCH(LinkNum) + 0x0078)
+#define LmSEQ_NOTIFY_TIMER_DOWN_COUNT(LinkNum) (LmSCRATCH(LinkNum) + 0x007A)
+#define LmSEQ_NOTIFY_TIMER_TIMEOUT(LinkNum) (LmSCRATCH(LinkNum) + 0x007C)
+#define LmSEQ_NOTIFY_TIMER_INITIAL_COUNT(LinkNum) (LmSCRATCH(LinkNum) + 0x007E)
+
+/* Mode dependent scratch page 1, mode 0 and mode 1 */
+#define LmSEQ_SG_LIST_PTR_ADDR0(LinkNum) (LmSCRATCH(LinkNum) + 0x0020)
+#define LmSEQ_SG_LIST_PTR_ADDR1(LinkNum) (LmSCRATCH(LinkNum) + 0x0030)
+#define LmSEQ_M1_SG_LIST_PTR_ADDR0(LinkNum) (LmSCRATCH(LinkNum) + 0x00A0)
+#define LmSEQ_M1_SG_LIST_PTR_ADDR1(LinkNum) (LmSCRATCH(LinkNum) + 0x00B0)
+
+/* Mode dependent scratch page 1 macros for mode 2 */
+/* Absolute offsets */
+#define LmSEQ_INVALID_DWORD_COUNT(LinkNum) (LmSCRATCH(LinkNum) + 0x0120)
+#define LmSEQ_DISPARITY_ERROR_COUNT(LinkNum) (LmSCRATCH(LinkNum) + 0x0124)
+#define LmSEQ_LOSS_OF_SYNC_COUNT(LinkNum) (LmSCRATCH(LinkNum) + 0x0128)
+
+/* Mode dependent scratch page 1 macros for mode 4/5 */
+#define LmSEQ_FRAME_TYPE_MASK(LinkNum) (LmSCRATCH(LinkNum) + 0x00E0)
+#define LmSEQ_HASHED_DEST_ADDR_MASK(LinkNum) (LmSCRATCH(LinkNum) + 0x00E1)
+#define LmSEQ_HASHED_SRC_ADDR_MASK_PRINT(LinkNum) (LmSCRATCH(LinkNum) + 0x00E4)
+#define LmSEQ_HASHED_SRC_ADDR_MASK(LinkNum) (LmSCRATCH(LinkNum) + 0x00E5)
+#define LmSEQ_NUM_FILL_BYTES_MASK(LinkNum) (LmSCRATCH(LinkNum) + 0x00EB)
+#define LmSEQ_TAG_MASK(LinkNum) (LmSCRATCH(LinkNum) + 0x00F0)
+#define LmSEQ_TARGET_PORT_XFER_TAG(LinkNum) (LmSCRATCH(LinkNum) + 0x00F2)
+#define LmSEQ_DATA_OFFSET(LinkNum) (LmSCRATCH(LinkNum) + 0x00F4)
+
+/* Mode dependent scratch page 2 macros for mode 0 */
+/* Absolute offsets */
+#define LmSEQ_SMP_RCV_TIMER_TERM_TS(LinkNum) (LmSCRATCH(LinkNum) + 0x0040)
+#define LmSEQ_DEVICE_BITS(LinkNum) (LmSCRATCH(LinkNum) + 0x005B)
+#define LmSEQ_SDB_DDB(LinkNum) (LmSCRATCH(LinkNum) + 0x005C)
+#define LmSEQ_SDB_NUM_TAGS(LinkNum) (LmSCRATCH(LinkNum) + 0x005E)
+#define LmSEQ_SDB_CURR_TAG(LinkNum) (LmSCRATCH(LinkNum) + 0x005F)
+
+/* Mode dependent scratch page 2 macros for mode 1 */
+/* Absolute offsets */
+/* byte 0 bits 1-0 are domain select. */
+#define LmSEQ_TX_ID_ADDR_FRAME(LinkNum) (LmSCRATCH(LinkNum) + 0x00C0)
+#define LmSEQ_OPEN_TIMER_TERM_TS(LinkNum) (LmSCRATCH(LinkNum) + 0x00C8)
+#define LmSEQ_SRST_AS_TIMER_TERM_TS(LinkNum) (LmSCRATCH(LinkNum) + 0x00CC)
+#define LmSEQ_LAST_LOADED_SG_EL(LinkNum) (LmSCRATCH(LinkNum) + 0x00D4)
+
+/* Mode dependent scratch page 2 macros for mode 2 */
+/* Absolute offsets */
+#define LmSEQ_STP_SHUTDOWN_TIMER_TERM_TS(LinkNum) (LmSCRATCH(LinkNum) + 0x0140)
+#define LmSEQ_CLOSE_TIMER_TERM_TS(LinkNum) (LmSCRATCH(LinkNum) + 0x0144)
+#define LmSEQ_BREAK_TIMER_TERM_TS(LinkNum) (LmSCRATCH(LinkNum) + 0x0148)
+#define LmSEQ_DWS_RESET_TIMER_TERM_TS(LinkNum) (LmSCRATCH(LinkNum) + 0x014C)
+#define LmSEQ_SATA_INTERLOCK_TIMER_TERM_TS(LinkNum) \
+ (LmSCRATCH(LinkNum) + 0x0150)
+#define LmSEQ_MCTL_TIMER_TERM_TS(LinkNum) (LmSCRATCH(LinkNum) + 0x0154)
+
+/* Mode dependent scratch page 2 macros for mode 5 */
+#define LmSEQ_COMINIT_TIMER_TERM_TS(LinkNum) (LmSCRATCH(LinkNum) + 0x0160)
+#define LmSEQ_RCV_ID_TIMER_TERM_TS(LinkNum) (LmSCRATCH(LinkNum) + 0x0164)
+#define LmSEQ_RCV_FIS_TIMER_TERM_TS(LinkNum) (LmSCRATCH(LinkNum) + 0x0168)
+#define LmSEQ_DEV_PRES_TIMER_TERM_TS(LinkNum) (LmSCRATCH(LinkNum) + 0x016C)
+
+/* Mode dependent scratch page 3 macros for modes 0 and 1 */
+/* None defined */
+
+/* Mode dependent scratch page 3 macros for modes 2 and 5 */
+/* None defined */
+
+/* Mode Independent Scratch page 0 macros. */
+#define LmSEQ_Q_TGTXFR_HEAD(LinkNum) (LmSCRATCH(LinkNum) + 0x0180)
+#define LmSEQ_Q_TGTXFR_TAIL(LinkNum) (LmSCRATCH(LinkNum) + 0x0182)
+#define LmSEQ_LINK_NUMBER(LinkNum) (LmSCRATCH(LinkNum) + 0x0186)
+#define LmSEQ_SCRATCH_FLAGS(LinkNum) (LmSCRATCH(LinkNum) + 0x0187)
+/*
+ * Currently only bit 0, SAS_DWSAQD, is used.
+ */
+#define SAS_DWSAQD 0x01 /*
+ * DWSSTATUS: DWSAQD
+ * bit las read in ISR.
+ */
+#define LmSEQ_CONNECTION_STATE(LinkNum) (LmSCRATCH(LinkNum) + 0x0188)
+/* Connection states (byte 0) */
+#define SAS_WE_OPENED_CS 0x01
+#define SAS_DEVICE_OPENED_CS 0x02
+#define SAS_WE_SENT_DONE_CS 0x04
+#define SAS_DEVICE_SENT_DONE_CS 0x08
+#define SAS_WE_SENT_CLOSE_CS 0x10
+#define SAS_DEVICE_SENT_CLOSE_CS 0x20
+#define SAS_WE_SENT_BREAK_CS 0x40
+#define SAS_DEVICE_SENT_BREAK_CS 0x80
+/* Connection states (byte 1) */
+#define SAS_OPN_TIMEOUT_OR_OPN_RJCT_CS 0x01
+#define SAS_AIP_RECEIVED_CS 0x02
+#define SAS_CREDIT_TIMEOUT_OCCURRED_CS 0x04
+#define SAS_ACKNAK_TIMEOUT_OCCURRED_CS 0x08
+#define SAS_SMPRSP_TIMEOUT_OCCURRED_CS 0x10
+#define SAS_DONE_TIMEOUT_OCCURRED_CS 0x20
+/* Connection states (byte 2) */
+#define SAS_SMP_RESPONSE_RECEIVED_CS 0x01
+#define SAS_INTLK_TIMEOUT_OCCURRED_CS 0x02
+#define SAS_DEVICE_SENT_DMAT_CS 0x04
+#define SAS_DEVICE_SENT_SYNCSRST_CS 0x08
+#define SAS_CLEARING_AFFILIATION_CS 0x20
+#define SAS_RXTASK_ACTIVE_CS 0x40
+#define SAS_TXTASK_ACTIVE_CS 0x80
+/* Connection states (byte 3) */
+#define SAS_PHY_LOSS_OF_SIGNAL_CS 0x01
+#define SAS_DWS_TIMER_EXPIRED_CS 0x02
+#define SAS_LINK_RESET_NOT_COMPLETE_CS 0x04
+#define SAS_PHY_DISABLED_CS 0x08
+#define SAS_LINK_CTL_TASK_ACTIVE_CS 0x10
+#define SAS_PHY_EVENT_TASK_ACTIVE_CS 0x20
+#define SAS_DEVICE_SENT_ID_FRAME_CS 0x40
+#define SAS_DEVICE_SENT_REG_FIS_CS 0x40
+#define SAS_DEVICE_SENT_HARD_RESET_CS 0x80
+#define SAS_PHY_IS_DOWN_FLAGS (SAS_PHY_LOSS_OF_SIGNAL_CS|\
+ SAS_DWS_TIMER_EXPIRED_CS |\
+ SAS_LINK_RESET_NOT_COMPLETE_CS|\
+ SAS_PHY_DISABLED_CS)
+
+#define SAS_LINK_CTL_PHY_EVENT_FLAGS (SAS_LINK_CTL_TASK_ACTIVE_CS |\
+ SAS_PHY_EVENT_TASK_ACTIVE_CS |\
+ SAS_DEVICE_SENT_ID_FRAME_CS |\
+ SAS_DEVICE_SENT_HARD_RESET_CS)
+
+#define LmSEQ_CONCTL(LinkNum) (LmSCRATCH(LinkNum) + 0x018C)
+#define LmSEQ_CONSTAT(LinkNum) (LmSCRATCH(LinkNum) + 0x018E)
+#define LmSEQ_CONNECTION_MODES(LinkNum) (LmSCRATCH(LinkNum) + 0x018F)
+#define LmSEQ_REG1_ISR(LinkNum) (LmSCRATCH(LinkNum) + 0x0192)
+#define LmSEQ_REG2_ISR(LinkNum) (LmSCRATCH(LinkNum) + 0x0194)
+#define LmSEQ_REG3_ISR(LinkNum) (LmSCRATCH(LinkNum) + 0x0196)
+#define LmSEQ_REG0_ISR(LinkNum) (LmSCRATCH(LinkNum) + 0x0198)
+
+/* Mode independent scratch page 1 macros. */
+#define LmSEQ_EST_NEXUS_SCBPTR0(LinkNum) (LmSCRATCH(LinkNum) + 0x01A0)
+#define LmSEQ_EST_NEXUS_SCBPTR1(LinkNum) (LmSCRATCH(LinkNum) + 0x01A2)
+#define LmSEQ_EST_NEXUS_SCBPTR2(LinkNum) (LmSCRATCH(LinkNum) + 0x01A4)
+#define LmSEQ_EST_NEXUS_SCBPTR3(LinkNum) (LmSCRATCH(LinkNum) + 0x01A6)
+#define LmSEQ_EST_NEXUS_SCB_OPCODE0(LinkNum) (LmSCRATCH(LinkNum) + 0x01A8)
+#define LmSEQ_EST_NEXUS_SCB_OPCODE1(LinkNum) (LmSCRATCH(LinkNum) + 0x01A9)
+#define LmSEQ_EST_NEXUS_SCB_OPCODE2(LinkNum) (LmSCRATCH(LinkNum) + 0x01AA)
+#define LmSEQ_EST_NEXUS_SCB_OPCODE3(LinkNum) (LmSCRATCH(LinkNum) + 0x01AB)
+#define LmSEQ_EST_NEXUS_SCB_HEAD(LinkNum) (LmSCRATCH(LinkNum) + 0x01AC)
+#define LmSEQ_EST_NEXUS_SCB_TAIL(LinkNum) (LmSCRATCH(LinkNum) + 0x01AD)
+#define LmSEQ_EST_NEXUS_BUF_AVAIL(LinkNum) (LmSCRATCH(LinkNum) + 0x01AE)
+#define LmSEQ_TIMEOUT_CONST(LinkNum) (LmSCRATCH(LinkNum) + 0x01B8)
+#define LmSEQ_ISR_SAVE_SINDEX(LinkNum) (LmSCRATCH(LinkNum) + 0x01BC)
+#define LmSEQ_ISR_SAVE_DINDEX(LinkNum) (LmSCRATCH(LinkNum) + 0x01BE)
+
+/* Mode independent scratch page 2 macros. */
+#define LmSEQ_EMPTY_SCB_PTR0(LinkNum) (LmSCRATCH(LinkNum) + 0x01C0)
+#define LmSEQ_EMPTY_SCB_PTR1(LinkNum) (LmSCRATCH(LinkNum) + 0x01C2)
+#define LmSEQ_EMPTY_SCB_PTR2(LinkNum) (LmSCRATCH(LinkNum) + 0x01C4)
+#define LmSEQ_EMPTY_SCB_PTR3(LinkNum) (LmSCRATCH(LinkNum) + 0x01C6)
+#define LmSEQ_EMPTY_SCB_OPCD0(LinkNum) (LmSCRATCH(LinkNum) + 0x01C8)
+#define LmSEQ_EMPTY_SCB_OPCD1(LinkNum) (LmSCRATCH(LinkNum) + 0x01C9)
+#define LmSEQ_EMPTY_SCB_OPCD2(LinkNum) (LmSCRATCH(LinkNum) + 0x01CA)
+#define LmSEQ_EMPTY_SCB_OPCD3(LinkNum) (LmSCRATCH(LinkNum) + 0x01CB)
+#define LmSEQ_EMPTY_SCB_HEAD(LinkNum) (LmSCRATCH(LinkNum) + 0x01CC)
+#define LmSEQ_EMPTY_SCB_TAIL(LinkNum) (LmSCRATCH(LinkNum) + 0x01CD)
+#define LmSEQ_EMPTY_BUFS_AVAIL(LinkNum) (LmSCRATCH(LinkNum) + 0x01CE)
+#define LmSEQ_ATA_SCR_REGS(LinkNum) (LmSCRATCH(LinkNum) + 0x01D4)
+
+/* Mode independent scratch page 3 macros. */
+#define LmSEQ_DEV_PRES_TMR_TOUT_CONST(LinkNum) (LmSCRATCH(LinkNum) + 0x01E0)
+#define LmSEQ_SATA_INTERLOCK_TIMEOUT(LinkNum) (LmSCRATCH(LinkNum) + 0x01E4)
+#define LmSEQ_STP_SHUTDOWN_TIMEOUT(LinkNum) (LmSCRATCH(LinkNum) + 0x01E8)
+#define LmSEQ_SRST_ASSERT_TIMEOUT(LinkNum) (LmSCRATCH(LinkNum) + 0x01EC)
+#define LmSEQ_RCV_FIS_TIMEOUT(LinkNum) (LmSCRATCH(LinkNum) + 0x01F0)
+#define LmSEQ_ONE_MILLISEC_TIMEOUT(LinkNum) (LmSCRATCH(LinkNum) + 0x01F4)
+#define LmSEQ_TEN_MS_COMINIT_TIMEOUT(LinkNum) (LmSCRATCH(LinkNum) + 0x01F8)
+#define LmSEQ_SMP_RCV_TIMEOUT(LinkNum) (LmSCRATCH(LinkNum) + 0x01FC)
+
+#endif
diff --git a/drivers/scsi/aic94xx/aic94xx_sas.h b/drivers/scsi/aic94xx/aic94xx_sas.h
new file mode 100644
index 000000000..912e6b755
--- /dev/null
+++ b/drivers/scsi/aic94xx/aic94xx_sas.h
@@ -0,0 +1,787 @@
+/*
+ * Aic94xx SAS/SATA driver SAS definitions and hardware interface header file.
+ *
+ * Copyright (C) 2005 Adaptec, Inc. All rights reserved.
+ * Copyright (C) 2005 Luben Tuikov <luben_tuikov@adaptec.com>
+ *
+ * This file is licensed under GPLv2.
+ *
+ * This file is part of the aic94xx driver.
+ *
+ * The aic94xx driver is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; version 2 of the
+ * License.
+ *
+ * The aic94xx driver is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with the aic94xx driver; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
+ *
+ */
+
+#ifndef _AIC94XX_SAS_H_
+#define _AIC94XX_SAS_H_
+
+#include <scsi/libsas.h>
+
+/* ---------- DDBs ---------- */
+/* DDBs are device descriptor blocks which describe a device in the
+ * domain that this sequencer can maintain low-level connections for
+ * us. They are be 64 bytes.
+ */
+#define ASD_MAX_DDBS 128
+
+struct asd_ddb_ssp_smp_target_port {
+ u8 conn_type; /* byte 0 */
+#define DDB_TP_CONN_TYPE 0x81 /* Initiator port and addr frame type 0x01 */
+
+ u8 conn_rate;
+ __be16 init_conn_tag;
+ u8 dest_sas_addr[8]; /* bytes 4-11 */
+
+ __le16 send_queue_head;
+ u8 sq_suspended;
+ u8 ddb_type; /* DDB_TYPE_TARGET */
+#define DDB_TYPE_UNUSED 0xFF
+#define DDB_TYPE_TARGET 0xFE
+#define DDB_TYPE_INITIATOR 0xFD
+#define DDB_TYPE_PM_PORT 0xFC
+
+ __le16 _r_a;
+ __be16 awt_def;
+
+ u8 compat_features; /* byte 20 */
+ u8 pathway_blocked_count;
+ __be16 arb_wait_time;
+ __be32 more_compat_features; /* byte 24 */
+
+ u8 conn_mask;
+ u8 flags; /* concurrent conn:2,2 and open:0(1) */
+#define CONCURRENT_CONN_SUPP 0x04
+#define OPEN_REQUIRED 0x01
+
+ u16 _r_b;
+ __le16 exec_queue_tail;
+ __le16 send_queue_tail;
+ __le16 sister_ddb;
+
+ __le16 _r_c;
+
+ u8 max_concurrent_conn;
+ u8 num_concurrent_conn;
+ u8 num_contexts;
+
+ u8 _r_d;
+
+ __le16 active_task_count;
+
+ u8 _r_e[9];
+
+ u8 itnl_reason; /* I_T nexus loss reason */
+
+ __le16 _r_f;
+
+ __le16 itnl_timeout;
+#define ITNL_TIMEOUT_CONST 0x7D0 /* 2 seconds */
+
+ __le32 itnl_timestamp;
+} __attribute__ ((packed));
+
+struct asd_ddb_stp_sata_target_port {
+ u8 conn_type; /* byte 0 */
+ u8 conn_rate;
+ __be16 init_conn_tag;
+ u8 dest_sas_addr[8]; /* bytes 4-11 */
+
+ __le16 send_queue_head;
+ u8 sq_suspended;
+ u8 ddb_type; /* DDB_TYPE_TARGET */
+
+ __le16 _r_a;
+
+ __be16 awt_def;
+ u8 compat_features; /* byte 20 */
+ u8 pathway_blocked_count;
+ __be16 arb_wait_time;
+ __be32 more_compat_features; /* byte 24 */
+
+ u8 conn_mask;
+ u8 flags; /* concurrent conn:2,2 and open:0(1) */
+#define SATA_MULTIPORT 0x80
+#define SUPPORTS_AFFIL 0x40
+#define STP_AFFIL_POL 0x20
+
+ u8 _r_b;
+ u8 flags2; /* STP close policy:0 */
+#define STP_CL_POL_NO_TX 0x00
+#define STP_CL_POL_BTW_CMDS 0x01
+
+ __le16 exec_queue_tail;
+ __le16 send_queue_tail;
+ __le16 sister_ddb;
+ __le16 ata_cmd_scbptr;
+ __le32 sata_tag_alloc_mask;
+ __le16 active_task_count;
+ __le16 _r_c;
+ __le32 sata_sactive;
+ u8 num_sata_tags;
+ u8 sata_status;
+ u8 sata_ending_status;
+ u8 itnl_reason; /* I_T nexus loss reason */
+ __le16 ncq_data_scb_ptr;
+ __le16 itnl_timeout;
+ __le32 itnl_timestamp;
+} __attribute__ ((packed));
+
+/* This struct asd_ddb_init_port, describes the device descriptor block
+ * of an initiator port (when the sequencer is operating in target mode).
+ * Bytes [0,11] and [20,27] are from the OPEN address frame.
+ * The sequencer allocates an initiator port DDB entry.
+ */
+struct asd_ddb_init_port {
+ u8 conn_type; /* byte 0 */
+ u8 conn_rate;
+ __be16 init_conn_tag; /* BE */
+ u8 dest_sas_addr[8];
+ __le16 send_queue_head; /* LE, byte 12 */
+ u8 sq_suspended;
+ u8 ddb_type; /* DDB_TYPE_INITIATOR */
+ __le16 _r_a;
+ __be16 awt_def; /* BE */
+ u8 compat_features;
+ u8 pathway_blocked_count;
+ __be16 arb_wait_time; /* BE */
+ __be32 more_compat_features; /* BE */
+ u8 conn_mask;
+ u8 flags; /* == 5 */
+ u16 _r_b;
+ __le16 exec_queue_tail; /* execution queue tail */
+ __le16 send_queue_tail;
+ __le16 sister_ddb;
+ __le16 init_resp_timeout; /* initiator response timeout */
+ __le32 _r_c;
+ __le16 active_tasks; /* active task count */
+ __le16 init_list; /* initiator list link pointer */
+ __le32 _r_d;
+ u8 max_conn_to[3]; /* from Conn-Disc mode page, in us, LE */
+ u8 itnl_reason; /* I_T nexus loss reason */
+ __le16 bus_inact_to; /* from Conn-Disc mode page, in 100 us, LE */
+ __le16 itnl_to; /* from the Protocol Specific Port Ctrl MP */
+ __le32 itnl_timestamp;
+} __attribute__ ((packed));
+
+/* This struct asd_ddb_sata_tag, describes a look-up table to be used
+ * by the sequencers. SATA II, IDENTIFY DEVICE data, word 76, bit 8:
+ * NCQ support. This table is used by the sequencers to find the
+ * corresponding SCB, given a SATA II tag value.
+ */
+struct asd_ddb_sata_tag {
+ __le16 scb_pointer[32];
+} __attribute__ ((packed));
+
+/* This struct asd_ddb_sata_pm_table, describes a port number to
+ * connection handle look-up table. SATA targets attached to a port
+ * multiplier require a 4-bit port number value. There is one DDB
+ * entry of this type for each SATA port multiplier (sister DDB).
+ * Given a SATA PM port number, this table gives us the SATA PM Port
+ * DDB of the SATA port multiplier port (i.e. the SATA target
+ * discovered on the port).
+ */
+struct asd_ddb_sata_pm_table {
+ __le16 ddb_pointer[16];
+ __le16 _r_a[16];
+} __attribute__ ((packed));
+
+/* This struct asd_ddb_sata_pm_port, describes the SATA port multiplier
+ * port format DDB.
+ */
+struct asd_ddb_sata_pm_port {
+ u8 _r_a[15];
+ u8 ddb_type;
+ u8 _r_b[13];
+ u8 pm_port_flags;
+#define PM_PORT_MASK 0xF0
+#define PM_PORT_SET 0x02
+ u8 _r_c[6];
+ __le16 sister_ddb;
+ __le16 ata_cmd_scbptr;
+ __le32 sata_tag_alloc_mask;
+ __le16 active_task_count;
+ __le16 parent_ddb;
+ __le32 sata_sactive;
+ u8 num_sata_tags;
+ u8 sata_status;
+ u8 sata_ending_status;
+ u8 _r_d[9];
+} __attribute__ ((packed));
+
+/* This struct asd_ddb_seq_shared, describes a DDB shared by the
+ * central and link sequencers. port_map_by_links is indexed phy
+ * number [0,7]; each byte is a bit mask of all the phys that are in
+ * the same port as the indexed phy.
+ */
+struct asd_ddb_seq_shared {
+ __le16 q_free_ddb_head;
+ __le16 q_free_ddb_tail;
+ __le16 q_free_ddb_cnt;
+ __le16 q_used_ddb_head;
+ __le16 q_used_ddb_tail;
+ __le16 shared_mem_lock;
+ __le16 smp_conn_tag;
+ __le16 est_nexus_buf_cnt;
+ __le16 est_nexus_buf_thresh;
+ u32 _r_a;
+ u8 settable_max_contexts;
+ u8 _r_b[23];
+ u8 conn_not_active;
+ u8 phy_is_up;
+ u8 _r_c[8];
+ u8 port_map_by_links[8];
+} __attribute__ ((packed));
+
+/* ---------- SG Element ---------- */
+
+/* This struct sg_el, describes the hardware scatter gather buffer
+ * element. All entries are little endian. In an SCB, there are 2 of
+ * this, plus one more, called a link element of this indicating a
+ * sublist if needed.
+ *
+ * A link element has only the bus address set and the flags (DS) bit
+ * valid. The bus address points to the start of the sublist.
+ *
+ * If a sublist is needed, then that sublist should also include the 2
+ * sg_el embedded in the SCB, in which case next_sg_offset is 32,
+ * since sizeof(sg_el) = 16; EOS should be 1 and EOL 0 in this case.
+ */
+struct sg_el {
+ __le64 bus_addr;
+ __le32 size;
+ __le16 _r;
+ u8 next_sg_offs;
+ u8 flags;
+#define ASD_SG_EL_DS_MASK 0x30
+#define ASD_SG_EL_DS_OCM 0x10
+#define ASD_SG_EL_DS_HM 0x00
+#define ASD_SG_EL_LIST_MASK 0xC0
+#define ASD_SG_EL_LIST_EOL 0x40
+#define ASD_SG_EL_LIST_EOS 0x80
+} __attribute__ ((packed));
+
+/* ---------- SCBs ---------- */
+
+/* An SCB (sequencer control block) is comprised of a common header
+ * and a task part, for a total of 128 bytes. All fields are in LE
+ * order, unless otherwise noted.
+ */
+
+/* This struct scb_header, defines the SCB header format.
+ */
+struct scb_header {
+ __le64 next_scb;
+ __le16 index; /* transaction context */
+ u8 opcode;
+} __attribute__ ((packed));
+
+/* SCB opcodes: Execution queue
+ */
+#define INITIATE_SSP_TASK 0x00
+#define INITIATE_LONG_SSP_TASK 0x01
+#define INITIATE_BIDIR_SSP_TASK 0x02
+#define SCB_ABORT_TASK 0x03
+#define INITIATE_SSP_TMF 0x04
+#define SSP_TARG_GET_DATA 0x05
+#define SSP_TARG_GET_DATA_GOOD 0x06
+#define SSP_TARG_SEND_RESP 0x07
+#define QUERY_SSP_TASK 0x08
+#define INITIATE_ATA_TASK 0x09
+#define INITIATE_ATAPI_TASK 0x0a
+#define CONTROL_ATA_DEV 0x0b
+#define INITIATE_SMP_TASK 0x0c
+#define SMP_TARG_SEND_RESP 0x0f
+
+/* SCB opcodes: Send Queue
+ */
+#define SSP_TARG_SEND_DATA 0x40
+#define SSP_TARG_SEND_DATA_GOOD 0x41
+
+/* SCB opcodes: Link Queue
+ */
+#define CONTROL_PHY 0x80
+#define SEND_PRIMITIVE 0x81
+#define INITIATE_LINK_ADM_TASK 0x82
+
+/* SCB opcodes: other
+ */
+#define EMPTY_SCB 0xc0
+#define INITIATE_SEQ_ADM_TASK 0xc1
+#define EST_ICL_TARG_WINDOW 0xc2
+#define COPY_MEM 0xc3
+#define CLEAR_NEXUS 0xc4
+#define INITIATE_DDB_ADM_TASK 0xc6
+#define ESTABLISH_NEXUS_ESCB 0xd0
+
+#define LUN_SIZE 8
+
+/* See SAS spec, task IU
+ */
+struct ssp_task_iu {
+ u8 lun[LUN_SIZE]; /* BE */
+ u16 _r_a;
+ u8 tmf;
+ u8 _r_b;
+ __be16 tag; /* BE */
+ u8 _r_c[14];
+} __attribute__ ((packed));
+
+/* See SAS spec, command IU
+ */
+struct ssp_command_iu {
+ u8 lun[LUN_SIZE];
+ u8 _r_a;
+ u8 efb_prio_attr; /* enable first burst, task prio & attr */
+#define EFB_MASK 0x80
+#define TASK_PRIO_MASK 0x78
+#define TASK_ATTR_MASK 0x07
+
+ u8 _r_b;
+ u8 add_cdb_len; /* in dwords, since bit 0,1 are reserved */
+ union {
+ u8 cdb[16];
+ struct {
+ __le64 long_cdb_addr; /* bus address, LE */
+ __le32 long_cdb_size; /* LE */
+ u8 _r_c[3];
+ u8 eol_ds; /* eol:6,6, ds:5,4 */
+ } long_cdb; /* sequencer extension */
+ };
+} __attribute__ ((packed));
+
+struct xfer_rdy_iu {
+ __be32 requested_offset; /* BE */
+ __be32 write_data_len; /* BE */
+ __be32 _r_a;
+} __attribute__ ((packed));
+
+/* ---------- SCB tasks ---------- */
+
+/* This is both ssp_task and long_ssp_task
+ */
+struct initiate_ssp_task {
+ u8 proto_conn_rate; /* proto:6,4, conn_rate:3,0 */
+ __le32 total_xfer_len;
+ struct ssp_frame_hdr ssp_frame;
+ struct ssp_command_iu ssp_cmd;
+ __le16 sister_scb; /* 0xFFFF */
+ __le16 conn_handle; /* index to DDB for the intended target */
+ u8 data_dir; /* :1,0 */
+#define DATA_DIR_NONE 0x00
+#define DATA_DIR_IN 0x01
+#define DATA_DIR_OUT 0x02
+#define DATA_DIR_BYRECIPIENT 0x03
+
+ u8 _r_a;
+ u8 retry_count;
+ u8 _r_b[5];
+ struct sg_el sg_element[3]; /* 2 real and 1 link */
+} __attribute__ ((packed));
+
+/* This defines both ata_task and atapi_task.
+ * ata: C bit of FIS should be 1,
+ * atapi: C bit of FIS should be 1, and command register should be 0xA0,
+ * to indicate a packet command.
+ */
+struct initiate_ata_task {
+ u8 proto_conn_rate;
+ __le32 total_xfer_len;
+ struct host_to_dev_fis fis;
+ __le32 data_offs;
+ u8 atapi_packet[16];
+ u8 _r_a[12];
+ __le16 sister_scb;
+ __le16 conn_handle;
+ u8 ata_flags; /* CSMI:6,6, DTM:4,4, QT:3,3, data dir:1,0 */
+#define CSMI_TASK 0x40
+#define DATA_XFER_MODE_DMA 0x10
+#define ATA_Q_TYPE_MASK 0x08
+#define ATA_Q_TYPE_UNTAGGED 0x00
+#define ATA_Q_TYPE_NCQ 0x08
+
+ u8 _r_b;
+ u8 retry_count;
+ u8 _r_c;
+ u8 flags;
+#define STP_AFFIL_POLICY 0x20
+#define SET_AFFIL_POLICY 0x10
+#define RET_PARTIAL_SGLIST 0x02
+
+ u8 _r_d[3];
+ struct sg_el sg_element[3];
+} __attribute__ ((packed));
+
+struct initiate_smp_task {
+ u8 proto_conn_rate;
+ u8 _r_a[40];
+ struct sg_el smp_req;
+ __le16 sister_scb;
+ __le16 conn_handle;
+ u8 _r_c[8];
+ struct sg_el smp_resp;
+ u8 _r_d[32];
+} __attribute__ ((packed));
+
+struct control_phy {
+ u8 phy_id;
+ u8 sub_func;
+#define DISABLE_PHY 0x00
+#define ENABLE_PHY 0x01
+#define RELEASE_SPINUP_HOLD 0x02
+#define ENABLE_PHY_NO_SAS_OOB 0x03
+#define ENABLE_PHY_NO_SATA_OOB 0x04
+#define PHY_NO_OP 0x05
+#define EXECUTE_HARD_RESET 0x81
+
+ u8 func_mask;
+ u8 speed_mask;
+ u8 hot_plug_delay;
+ u8 port_type;
+ u8 flags;
+#define DEV_PRES_TIMER_OVERRIDE_ENABLE 0x01
+#define DISABLE_PHY_IF_OOB_FAILS 0x02
+
+ __le32 timeout_override;
+ u8 link_reset_retries;
+ u8 _r_a[47];
+ __le16 conn_handle;
+ u8 _r_b[56];
+} __attribute__ ((packed));
+
+struct control_ata_dev {
+ u8 proto_conn_rate;
+ __le32 _r_a;
+ struct host_to_dev_fis fis;
+ u8 _r_b[32];
+ __le16 sister_scb;
+ __le16 conn_handle;
+ u8 ata_flags; /* 0 */
+ u8 _r_c[55];
+} __attribute__ ((packed));
+
+struct empty_scb {
+ u8 num_valid;
+ __le32 _r_a;
+#define ASD_EDBS_PER_SCB 7
+/* header+data+CRC+DMA suffix data */
+#define ASD_EDB_SIZE (24+1024+4+16)
+ struct sg_el eb[ASD_EDBS_PER_SCB];
+#define ELEMENT_NOT_VALID 0xC0
+} __attribute__ ((packed));
+
+struct initiate_link_adm {
+ u8 phy_id;
+ u8 sub_func;
+#define GET_LINK_ERROR_COUNT 0x00
+#define RESET_LINK_ERROR_COUNT 0x01
+#define ENABLE_NOTIFY_SPINUP_INTS 0x02
+
+ u8 _r_a[57];
+ __le16 conn_handle;
+ u8 _r_b[56];
+} __attribute__ ((packed));
+
+struct copy_memory {
+ u8 _r_a;
+ __le16 xfer_len;
+ __le16 _r_b;
+ __le64 src_busaddr;
+ u8 src_ds; /* See definition of sg_el */
+ u8 _r_c[45];
+ __le16 conn_handle;
+ __le64 _r_d;
+ __le64 dest_busaddr;
+ u8 dest_ds; /* See definition of sg_el */
+ u8 _r_e[39];
+} __attribute__ ((packed));
+
+struct abort_task {
+ u8 proto_conn_rate;
+ __le32 _r_a;
+ struct ssp_frame_hdr ssp_frame;
+ struct ssp_task_iu ssp_task;
+ __le16 sister_scb;
+ __le16 conn_handle;
+ u8 flags; /* ovrd_itnl_timer:3,3, suspend_data_trans:2,2 */
+#define SUSPEND_DATA_TRANS 0x04
+
+ u8 _r_b;
+ u8 retry_count;
+ u8 _r_c[5];
+ __le16 index; /* Transaction context of task to be queried */
+ __le16 itnl_to;
+ u8 _r_d[44];
+} __attribute__ ((packed));
+
+struct clear_nexus {
+ u8 nexus;
+#define NEXUS_ADAPTER 0x00
+#define NEXUS_PORT 0x01
+#define NEXUS_I_T 0x02
+#define NEXUS_I_T_L 0x03
+#define NEXUS_TAG 0x04
+#define NEXUS_TRANS_CX 0x05
+#define NEXUS_SATA_TAG 0x06
+#define NEXUS_T_L 0x07
+#define NEXUS_L 0x08
+#define NEXUS_T_TAG 0x09
+
+ __le32 _r_a;
+ u8 flags;
+#define SUSPEND_TX 0x80
+#define RESUME_TX 0x40
+#define SEND_Q 0x04
+#define EXEC_Q 0x02
+#define NOTINQ 0x01
+
+ u8 _r_b[3];
+ u8 conn_mask;
+ u8 _r_c[19];
+ struct ssp_task_iu ssp_task; /* LUN and TAG */
+ __le16 _r_d;
+ __le16 conn_handle;
+ __le64 _r_e;
+ __le16 index; /* Transaction context of task to be cleared */
+ __le16 context; /* Clear nexus context */
+ u8 _r_f[44];
+} __attribute__ ((packed));
+
+struct initiate_ssp_tmf {
+ u8 proto_conn_rate;
+ __le32 _r_a;
+ struct ssp_frame_hdr ssp_frame;
+ struct ssp_task_iu ssp_task;
+ __le16 sister_scb;
+ __le16 conn_handle;
+ u8 flags; /* itnl override and suspend data tx */
+#define OVERRIDE_ITNL_TIMER 8
+
+ u8 _r_b;
+ u8 retry_count;
+ u8 _r_c[5];
+ __le16 index; /* Transaction context of task to be queried */
+ __le16 itnl_to;
+ u8 _r_d[44];
+} __attribute__ ((packed));
+
+/* Transmits an arbitrary primitive on the link.
+ * Used for NOTIFY and BROADCAST.
+ */
+struct send_prim {
+ u8 phy_id;
+ u8 wait_transmit; /* :0,0 */
+ u8 xmit_flags;
+#define XMTPSIZE_MASK 0xF0
+#define XMTPSIZE_SINGLE 0x10
+#define XMTPSIZE_REPEATED 0x20
+#define XMTPSIZE_CONT 0x20
+#define XMTPSIZE_TRIPLE 0x30
+#define XMTPSIZE_REDUNDANT 0x60
+#define XMTPSIZE_INF 0
+
+#define XMTCONTEN 0x04
+#define XMTPFRM 0x02 /* Transmit at the next frame boundary */
+#define XMTPIMM 0x01 /* Transmit immediately */
+
+ __le16 _r_a;
+ u8 prim[4]; /* K, D0, D1, D2 */
+ u8 _r_b[50];
+ __le16 conn_handle;
+ u8 _r_c[56];
+} __attribute__ ((packed));
+
+/* This describes both SSP Target Get Data and SSP Target Get Data And
+ * Send Good Response SCBs. Used when the sequencer is operating in
+ * target mode...
+ */
+struct ssp_targ_get_data {
+ u8 proto_conn_rate;
+ __le32 total_xfer_len;
+ struct ssp_frame_hdr ssp_frame;
+ struct xfer_rdy_iu xfer_rdy;
+ u8 lun[LUN_SIZE];
+ __le64 _r_a;
+ __le16 sister_scb;
+ __le16 conn_handle;
+ u8 data_dir; /* 01b */
+ u8 _r_b;
+ u8 retry_count;
+ u8 _r_c[5];
+ struct sg_el sg_element[3];
+} __attribute__ ((packed));
+
+/* ---------- The actual SCB struct ---------- */
+
+struct scb {
+ struct scb_header header;
+ union {
+ struct initiate_ssp_task ssp_task;
+ struct initiate_ata_task ata_task;
+ struct initiate_smp_task smp_task;
+ struct control_phy control_phy;
+ struct control_ata_dev control_ata_dev;
+ struct empty_scb escb;
+ struct initiate_link_adm link_adm;
+ struct copy_memory cp_mem;
+ struct abort_task abort_task;
+ struct clear_nexus clear_nexus;
+ struct initiate_ssp_tmf ssp_tmf;
+ };
+} __attribute__ ((packed));
+
+/* ---------- Done List ---------- */
+/* The done list entry opcode field is defined below.
+ * The mnemonic encoding and meaning is as follows:
+ * TC - Task Complete, status was received and acknowledged
+ * TF - Task Failed, indicates an error prior to receiving acknowledgment
+ * for the command:
+ * - no conn,
+ * - NACK or R_ERR received in response to this command,
+ * - credit blocked or not available, or in the case of SMP request,
+ * - no SMP response was received.
+ * In these four cases it is known that the target didn't receive the
+ * command.
+ * TI - Task Interrupted, error after the command was acknowledged. It is
+ * known that the command was received by the target.
+ * TU - Task Unacked, command was transmitted but neither ACK (R_OK) nor NAK
+ * (R_ERR) was received due to loss of signal, broken connection, loss of
+ * dword sync or other reason. The application client should send the
+ * appropriate task query.
+ * TA - Task Aborted, see TF.
+ * _RESP - The completion includes an empty buffer containing status.
+ * TO - Timeout.
+ */
+#define TC_NO_ERROR 0x00
+#define TC_UNDERRUN 0x01
+#define TC_OVERRUN 0x02
+#define TF_OPEN_TO 0x03
+#define TF_OPEN_REJECT 0x04
+#define TI_BREAK 0x05
+#define TI_PROTO_ERR 0x06
+#define TC_SSP_RESP 0x07
+#define TI_PHY_DOWN 0x08
+#define TF_PHY_DOWN 0x09
+#define TC_LINK_ADM_RESP 0x0a
+#define TC_CSMI 0x0b
+#define TC_ATA_RESP 0x0c
+#define TU_PHY_DOWN 0x0d
+#define TU_BREAK 0x0e
+#define TI_SATA_TO 0x0f
+#define TI_NAK 0x10
+#define TC_CONTROL_PHY 0x11
+#define TF_BREAK 0x12
+#define TC_RESUME 0x13
+#define TI_ACK_NAK_TO 0x14
+#define TF_SMPRSP_TO 0x15
+#define TF_SMP_XMIT_RCV_ERR 0x16
+#define TC_PARTIAL_SG_LIST 0x17
+#define TU_ACK_NAK_TO 0x18
+#define TU_SATA_TO 0x19
+#define TF_NAK_RECV 0x1a
+#define TA_I_T_NEXUS_LOSS 0x1b
+#define TC_ATA_R_ERR_RECV 0x1c
+#define TF_TMF_NO_CTX 0x1d
+#define TA_ON_REQ 0x1e
+#define TF_TMF_NO_TAG 0x1f
+#define TF_TMF_TAG_FREE 0x20
+#define TF_TMF_TASK_DONE 0x21
+#define TF_TMF_NO_CONN_HANDLE 0x22
+#define TC_TASK_CLEARED 0x23
+#define TI_SYNCS_RECV 0x24
+#define TU_SYNCS_RECV 0x25
+#define TF_IRTT_TO 0x26
+#define TF_NO_SMP_CONN 0x27
+#define TF_IU_SHORT 0x28
+#define TF_DATA_OFFS_ERR 0x29
+#define TF_INV_CONN_HANDLE 0x2a
+#define TF_REQUESTED_N_PENDING 0x2b
+
+/* 0xc1 - 0xc7: empty buffer received,
+ 0xd1 - 0xd7: establish nexus empty buffer received
+*/
+/* This is the ESCB mask */
+#define ESCB_RECVD 0xC0
+
+
+/* This struct done_list_struct defines the done list entry.
+ * All fields are LE.
+ */
+struct done_list_struct {
+ __le16 index; /* aka transaction context */
+ u8 opcode;
+ u8 status_block[4];
+ u8 toggle; /* bit 0 */
+#define DL_TOGGLE_MASK 0x01
+} __attribute__ ((packed));
+
+/* ---------- PHYS ---------- */
+
+struct asd_phy {
+ struct asd_sas_phy sas_phy;
+ struct asd_phy_desc *phy_desc; /* hw profile */
+
+ struct sas_identify_frame *identify_frame;
+ struct asd_dma_tok *id_frm_tok;
+ struct asd_port *asd_port;
+
+ u8 frame_rcvd[ASD_EDB_SIZE];
+};
+
+
+#define ASD_SCB_SIZE sizeof(struct scb)
+#define ASD_DDB_SIZE sizeof(struct asd_ddb_ssp_smp_target_port)
+
+/* Define this to 0 if you do not want NOTIFY (ENABLE SPINIP) sent.
+ * Default: 0x10 (it's a mask)
+ */
+#define ASD_NOTIFY_ENABLE_SPINUP 0x10
+
+/* If enabled, set this to the interval between transmission
+ * of NOTIFY (ENABLE SPINUP). In units of 200 us.
+ */
+#define ASD_NOTIFY_TIMEOUT 2500
+
+/* Initial delay after OOB, before we transmit NOTIFY (ENABLE SPINUP).
+ * If 0, transmit immediately. In milliseconds.
+ */
+#define ASD_NOTIFY_DOWN_COUNT 0
+
+/* Device present timer timeout constant, 10 ms. */
+#define ASD_DEV_PRESENT_TIMEOUT 0x2710
+
+#define ASD_SATA_INTERLOCK_TIMEOUT 0
+
+/* How long to wait before shutting down an STP connection, unless
+ * an STP target sent frame(s). 50 usec.
+ * IGNORED by the sequencer (i.e. value 0 always).
+ */
+#define ASD_STP_SHUTDOWN_TIMEOUT 0x0
+
+/* ATA soft reset timer timeout. 5 usec. */
+#define ASD_SRST_ASSERT_TIMEOUT 0x05
+
+/* 31 sec */
+#define ASD_RCV_FIS_TIMEOUT 0x01D905C0
+
+#define ASD_ONE_MILLISEC_TIMEOUT 0x03e8
+
+/* COMINIT timer */
+#define ASD_TEN_MILLISEC_TIMEOUT 0x2710
+#define ASD_COMINIT_TIMEOUT ASD_TEN_MILLISEC_TIMEOUT
+
+/* 1 sec */
+#define ASD_SMP_RCV_TIMEOUT 0x000F4240
+
+#endif
diff --git a/drivers/scsi/aic94xx/aic94xx_scb.c b/drivers/scsi/aic94xx/aic94xx_scb.c
new file mode 100644
index 000000000..fdac7c2fe
--- /dev/null
+++ b/drivers/scsi/aic94xx/aic94xx_scb.c
@@ -0,0 +1,937 @@
+/*
+ * Aic94xx SAS/SATA driver SCB management.
+ *
+ * Copyright (C) 2005 Adaptec, Inc. All rights reserved.
+ * Copyright (C) 2005 Luben Tuikov <luben_tuikov@adaptec.com>
+ *
+ * This file is licensed under GPLv2.
+ *
+ * This file is part of the aic94xx driver.
+ *
+ * The aic94xx driver is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; version 2 of the
+ * License.
+ *
+ * The aic94xx driver is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with the aic94xx driver; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
+ *
+ */
+
+#include <linux/gfp.h>
+#include <scsi/scsi_host.h>
+
+#include "aic94xx.h"
+#include "aic94xx_reg.h"
+#include "aic94xx_hwi.h"
+#include "aic94xx_seq.h"
+
+#include "aic94xx_dump.h"
+
+/* ---------- EMPTY SCB ---------- */
+
+#define DL_PHY_MASK 7
+#define BYTES_DMAED 0
+#define PRIMITIVE_RECVD 0x08
+#define PHY_EVENT 0x10
+#define LINK_RESET_ERROR 0x18
+#define TIMER_EVENT 0x20
+#define REQ_TASK_ABORT 0xF0
+#define REQ_DEVICE_RESET 0xF1
+#define SIGNAL_NCQ_ERROR 0xF2
+#define CLEAR_NCQ_ERROR 0xF3
+
+#define PHY_EVENTS_STATUS (CURRENT_LOSS_OF_SIGNAL | CURRENT_OOB_DONE \
+ | CURRENT_SPINUP_HOLD | CURRENT_GTO_TIMEOUT \
+ | CURRENT_OOB_ERROR)
+
+static void get_lrate_mode(struct asd_phy *phy, u8 oob_mode)
+{
+ struct sas_phy *sas_phy = phy->sas_phy.phy;
+
+ switch (oob_mode & 7) {
+ case PHY_SPEED_60:
+ /* FIXME: sas transport class doesn't have this */
+ phy->sas_phy.linkrate = SAS_LINK_RATE_6_0_GBPS;
+ phy->sas_phy.phy->negotiated_linkrate = SAS_LINK_RATE_6_0_GBPS;
+ break;
+ case PHY_SPEED_30:
+ phy->sas_phy.linkrate = SAS_LINK_RATE_3_0_GBPS;
+ phy->sas_phy.phy->negotiated_linkrate = SAS_LINK_RATE_3_0_GBPS;
+ break;
+ case PHY_SPEED_15:
+ phy->sas_phy.linkrate = SAS_LINK_RATE_1_5_GBPS;
+ phy->sas_phy.phy->negotiated_linkrate = SAS_LINK_RATE_1_5_GBPS;
+ break;
+ }
+ sas_phy->negotiated_linkrate = phy->sas_phy.linkrate;
+ sas_phy->maximum_linkrate_hw = SAS_LINK_RATE_3_0_GBPS;
+ sas_phy->minimum_linkrate_hw = SAS_LINK_RATE_1_5_GBPS;
+ sas_phy->maximum_linkrate = phy->phy_desc->max_sas_lrate;
+ sas_phy->minimum_linkrate = phy->phy_desc->min_sas_lrate;
+
+ if (oob_mode & SAS_MODE)
+ phy->sas_phy.oob_mode = SAS_OOB_MODE;
+ else if (oob_mode & SATA_MODE)
+ phy->sas_phy.oob_mode = SATA_OOB_MODE;
+}
+
+static void asd_phy_event_tasklet(struct asd_ascb *ascb,
+ struct done_list_struct *dl)
+{
+ struct asd_ha_struct *asd_ha = ascb->ha;
+ struct sas_ha_struct *sas_ha = &asd_ha->sas_ha;
+ int phy_id = dl->status_block[0] & DL_PHY_MASK;
+ struct asd_phy *phy = &asd_ha->phys[phy_id];
+
+ u8 oob_status = dl->status_block[1] & PHY_EVENTS_STATUS;
+ u8 oob_mode = dl->status_block[2];
+
+ switch (oob_status) {
+ case CURRENT_LOSS_OF_SIGNAL:
+ /* directly attached device was removed */
+ ASD_DPRINTK("phy%d: device unplugged\n", phy_id);
+ asd_turn_led(asd_ha, phy_id, 0);
+ sas_phy_disconnected(&phy->sas_phy);
+ sas_ha->notify_phy_event(&phy->sas_phy, PHYE_LOSS_OF_SIGNAL);
+ break;
+ case CURRENT_OOB_DONE:
+ /* hot plugged device */
+ asd_turn_led(asd_ha, phy_id, 1);
+ get_lrate_mode(phy, oob_mode);
+ ASD_DPRINTK("phy%d device plugged: lrate:0x%x, proto:0x%x\n",
+ phy_id, phy->sas_phy.linkrate, phy->sas_phy.iproto);
+ sas_ha->notify_phy_event(&phy->sas_phy, PHYE_OOB_DONE);
+ break;
+ case CURRENT_SPINUP_HOLD:
+ /* hot plug SATA, no COMWAKE sent */
+ asd_turn_led(asd_ha, phy_id, 1);
+ sas_ha->notify_phy_event(&phy->sas_phy, PHYE_SPINUP_HOLD);
+ break;
+ case CURRENT_GTO_TIMEOUT:
+ case CURRENT_OOB_ERROR:
+ ASD_DPRINTK("phy%d error while OOB: oob status:0x%x\n", phy_id,
+ dl->status_block[1]);
+ asd_turn_led(asd_ha, phy_id, 0);
+ sas_phy_disconnected(&phy->sas_phy);
+ sas_ha->notify_phy_event(&phy->sas_phy, PHYE_OOB_ERROR);
+ break;
+ }
+}
+
+/* If phys are enabled sparsely, this will do the right thing. */
+static unsigned ord_phy(struct asd_ha_struct *asd_ha, struct asd_phy *phy)
+{
+ u8 enabled_mask = asd_ha->hw_prof.enabled_phys;
+ int i, k = 0;
+
+ for_each_phy(enabled_mask, enabled_mask, i) {
+ if (&asd_ha->phys[i] == phy)
+ return k;
+ k++;
+ }
+ return 0;
+}
+
+/**
+ * asd_get_attached_sas_addr -- extract/generate attached SAS address
+ * phy: pointer to asd_phy
+ * sas_addr: pointer to buffer where the SAS address is to be written
+ *
+ * This function extracts the SAS address from an IDENTIFY frame
+ * received. If OOB is SATA, then a SAS address is generated from the
+ * HA tables.
+ *
+ * LOCKING: the frame_rcvd_lock needs to be held since this parses the frame
+ * buffer.
+ */
+static void asd_get_attached_sas_addr(struct asd_phy *phy, u8 *sas_addr)
+{
+ if (phy->sas_phy.frame_rcvd[0] == 0x34
+ && phy->sas_phy.oob_mode == SATA_OOB_MODE) {
+ struct asd_ha_struct *asd_ha = phy->sas_phy.ha->lldd_ha;
+ /* FIS device-to-host */
+ u64 addr = be64_to_cpu(*(__be64 *)phy->phy_desc->sas_addr);
+
+ addr += asd_ha->hw_prof.sata_name_base + ord_phy(asd_ha, phy);
+ *(__be64 *)sas_addr = cpu_to_be64(addr);
+ } else {
+ struct sas_identify_frame *idframe =
+ (void *) phy->sas_phy.frame_rcvd;
+ memcpy(sas_addr, idframe->sas_addr, SAS_ADDR_SIZE);
+ }
+}
+
+static void asd_form_port(struct asd_ha_struct *asd_ha, struct asd_phy *phy)
+{
+ int i;
+ struct asd_port *free_port = NULL;
+ struct asd_port *port;
+ struct asd_sas_phy *sas_phy = &phy->sas_phy;
+ unsigned long flags;
+
+ spin_lock_irqsave(&asd_ha->asd_ports_lock, flags);
+ if (!phy->asd_port) {
+ for (i = 0; i < ASD_MAX_PHYS; i++) {
+ port = &asd_ha->asd_ports[i];
+
+ /* Check for wide port */
+ if (port->num_phys > 0 &&
+ memcmp(port->sas_addr, sas_phy->sas_addr,
+ SAS_ADDR_SIZE) == 0 &&
+ memcmp(port->attached_sas_addr,
+ sas_phy->attached_sas_addr,
+ SAS_ADDR_SIZE) == 0) {
+ break;
+ }
+
+ /* Find a free port */
+ if (port->num_phys == 0 && free_port == NULL) {
+ free_port = port;
+ }
+ }
+
+ /* Use a free port if this doesn't form a wide port */
+ if (i >= ASD_MAX_PHYS) {
+ port = free_port;
+ BUG_ON(!port);
+ memcpy(port->sas_addr, sas_phy->sas_addr,
+ SAS_ADDR_SIZE);
+ memcpy(port->attached_sas_addr,
+ sas_phy->attached_sas_addr,
+ SAS_ADDR_SIZE);
+ }
+ port->num_phys++;
+ port->phy_mask |= (1U << sas_phy->id);
+ phy->asd_port = port;
+ }
+ ASD_DPRINTK("%s: updating phy_mask 0x%x for phy%d\n",
+ __func__, phy->asd_port->phy_mask, sas_phy->id);
+ asd_update_port_links(asd_ha, phy);
+ spin_unlock_irqrestore(&asd_ha->asd_ports_lock, flags);
+}
+
+static void asd_deform_port(struct asd_ha_struct *asd_ha, struct asd_phy *phy)
+{
+ struct asd_port *port = phy->asd_port;
+ struct asd_sas_phy *sas_phy = &phy->sas_phy;
+ unsigned long flags;
+
+ spin_lock_irqsave(&asd_ha->asd_ports_lock, flags);
+ if (port) {
+ port->num_phys--;
+ port->phy_mask &= ~(1U << sas_phy->id);
+ phy->asd_port = NULL;
+ }
+ spin_unlock_irqrestore(&asd_ha->asd_ports_lock, flags);
+}
+
+static void asd_bytes_dmaed_tasklet(struct asd_ascb *ascb,
+ struct done_list_struct *dl,
+ int edb_id, int phy_id)
+{
+ unsigned long flags;
+ int edb_el = edb_id + ascb->edb_index;
+ struct asd_dma_tok *edb = ascb->ha->seq.edb_arr[edb_el];
+ struct asd_phy *phy = &ascb->ha->phys[phy_id];
+ struct sas_ha_struct *sas_ha = phy->sas_phy.ha;
+ u16 size = ((dl->status_block[3] & 7) << 8) | dl->status_block[2];
+
+ size = min(size, (u16) sizeof(phy->frame_rcvd));
+
+ spin_lock_irqsave(&phy->sas_phy.frame_rcvd_lock, flags);
+ memcpy(phy->sas_phy.frame_rcvd, edb->vaddr, size);
+ phy->sas_phy.frame_rcvd_size = size;
+ asd_get_attached_sas_addr(phy, phy->sas_phy.attached_sas_addr);
+ spin_unlock_irqrestore(&phy->sas_phy.frame_rcvd_lock, flags);
+ asd_dump_frame_rcvd(phy, dl);
+ asd_form_port(ascb->ha, phy);
+ sas_ha->notify_port_event(&phy->sas_phy, PORTE_BYTES_DMAED);
+}
+
+static void asd_link_reset_err_tasklet(struct asd_ascb *ascb,
+ struct done_list_struct *dl,
+ int phy_id)
+{
+ struct asd_ha_struct *asd_ha = ascb->ha;
+ struct sas_ha_struct *sas_ha = &asd_ha->sas_ha;
+ struct asd_sas_phy *sas_phy = sas_ha->sas_phy[phy_id];
+ struct asd_phy *phy = &asd_ha->phys[phy_id];
+ u8 lr_error = dl->status_block[1];
+ u8 retries_left = dl->status_block[2];
+
+ switch (lr_error) {
+ case 0:
+ ASD_DPRINTK("phy%d: Receive ID timer expired\n", phy_id);
+ break;
+ case 1:
+ ASD_DPRINTK("phy%d: Loss of signal\n", phy_id);
+ break;
+ case 2:
+ ASD_DPRINTK("phy%d: Loss of dword sync\n", phy_id);
+ break;
+ case 3:
+ ASD_DPRINTK("phy%d: Receive FIS timeout\n", phy_id);
+ break;
+ default:
+ ASD_DPRINTK("phy%d: unknown link reset error code: 0x%x\n",
+ phy_id, lr_error);
+ break;
+ }
+
+ asd_turn_led(asd_ha, phy_id, 0);
+ sas_phy_disconnected(sas_phy);
+ asd_deform_port(asd_ha, phy);
+ sas_ha->notify_port_event(sas_phy, PORTE_LINK_RESET_ERR);
+
+ if (retries_left == 0) {
+ int num = 1;
+ struct asd_ascb *cp = asd_ascb_alloc_list(ascb->ha, &num,
+ GFP_ATOMIC);
+ if (!cp) {
+ asd_printk("%s: out of memory\n", __func__);
+ goto out;
+ }
+ ASD_DPRINTK("phy%d: retries:0 performing link reset seq\n",
+ phy_id);
+ asd_build_control_phy(cp, phy_id, ENABLE_PHY);
+ if (asd_post_ascb_list(ascb->ha, cp, 1) != 0)
+ asd_ascb_free(cp);
+ }
+out:
+ ;
+}
+
+static void asd_primitive_rcvd_tasklet(struct asd_ascb *ascb,
+ struct done_list_struct *dl,
+ int phy_id)
+{
+ unsigned long flags;
+ struct sas_ha_struct *sas_ha = &ascb->ha->sas_ha;
+ struct asd_sas_phy *sas_phy = sas_ha->sas_phy[phy_id];
+ struct asd_ha_struct *asd_ha = ascb->ha;
+ struct asd_phy *phy = &asd_ha->phys[phy_id];
+ u8 reg = dl->status_block[1];
+ u32 cont = dl->status_block[2] << ((reg & 3)*8);
+
+ reg &= ~3;
+ switch (reg) {
+ case LmPRMSTAT0BYTE0:
+ switch (cont) {
+ case LmBROADCH:
+ case LmBROADRVCH0:
+ case LmBROADRVCH1:
+ case LmBROADSES:
+ ASD_DPRINTK("phy%d: BROADCAST change received:%d\n",
+ phy_id, cont);
+ spin_lock_irqsave(&sas_phy->sas_prim_lock, flags);
+ sas_phy->sas_prim = ffs(cont);
+ spin_unlock_irqrestore(&sas_phy->sas_prim_lock, flags);
+ sas_ha->notify_port_event(sas_phy,PORTE_BROADCAST_RCVD);
+ break;
+
+ case LmUNKNOWNP:
+ ASD_DPRINTK("phy%d: unknown BREAK\n", phy_id);
+ break;
+
+ default:
+ ASD_DPRINTK("phy%d: primitive reg:0x%x, cont:0x%04x\n",
+ phy_id, reg, cont);
+ break;
+ }
+ break;
+ case LmPRMSTAT1BYTE0:
+ switch (cont) {
+ case LmHARDRST:
+ ASD_DPRINTK("phy%d: HARD_RESET primitive rcvd\n",
+ phy_id);
+ /* The sequencer disables all phys on that port.
+ * We have to re-enable the phys ourselves. */
+ asd_deform_port(asd_ha, phy);
+ sas_ha->notify_port_event(sas_phy, PORTE_HARD_RESET);
+ break;
+
+ default:
+ ASD_DPRINTK("phy%d: primitive reg:0x%x, cont:0x%04x\n",
+ phy_id, reg, cont);
+ break;
+ }
+ break;
+ default:
+ ASD_DPRINTK("unknown primitive register:0x%x\n",
+ dl->status_block[1]);
+ break;
+ }
+}
+
+/**
+ * asd_invalidate_edb -- invalidate an EDB and if necessary post the ESCB
+ * @ascb: pointer to Empty SCB
+ * @edb_id: index [0,6] to the empty data buffer which is to be invalidated
+ *
+ * After an EDB has been invalidated, if all EDBs in this ESCB have been
+ * invalidated, the ESCB is posted back to the sequencer.
+ * Context is tasklet/IRQ.
+ */
+void asd_invalidate_edb(struct asd_ascb *ascb, int edb_id)
+{
+ struct asd_seq_data *seq = &ascb->ha->seq;
+ struct empty_scb *escb = &ascb->scb->escb;
+ struct sg_el *eb = &escb->eb[edb_id];
+ struct asd_dma_tok *edb = seq->edb_arr[ascb->edb_index + edb_id];
+
+ memset(edb->vaddr, 0, ASD_EDB_SIZE);
+ eb->flags |= ELEMENT_NOT_VALID;
+ escb->num_valid--;
+
+ if (escb->num_valid == 0) {
+ int i;
+ /* ASD_DPRINTK("reposting escb: vaddr: 0x%p, "
+ "dma_handle: 0x%08llx, next: 0x%08llx, "
+ "index:%d, opcode:0x%02x\n",
+ ascb->dma_scb.vaddr,
+ (u64)ascb->dma_scb.dma_handle,
+ le64_to_cpu(ascb->scb->header.next_scb),
+ le16_to_cpu(ascb->scb->header.index),
+ ascb->scb->header.opcode);
+ */
+ escb->num_valid = ASD_EDBS_PER_SCB;
+ for (i = 0; i < ASD_EDBS_PER_SCB; i++)
+ escb->eb[i].flags = 0;
+ if (!list_empty(&ascb->list))
+ list_del_init(&ascb->list);
+ i = asd_post_escb_list(ascb->ha, ascb, 1);
+ if (i)
+ asd_printk("couldn't post escb, err:%d\n", i);
+ }
+}
+
+static void escb_tasklet_complete(struct asd_ascb *ascb,
+ struct done_list_struct *dl)
+{
+ struct asd_ha_struct *asd_ha = ascb->ha;
+ struct sas_ha_struct *sas_ha = &asd_ha->sas_ha;
+ int edb = (dl->opcode & DL_PHY_MASK) - 1; /* [0xc1,0xc7] -> [0,6] */
+ u8 sb_opcode = dl->status_block[0];
+ int phy_id = sb_opcode & DL_PHY_MASK;
+ struct asd_sas_phy *sas_phy = sas_ha->sas_phy[phy_id];
+ struct asd_phy *phy = &asd_ha->phys[phy_id];
+
+ if (edb > 6 || edb < 0) {
+ ASD_DPRINTK("edb is 0x%x! dl->opcode is 0x%x\n",
+ edb, dl->opcode);
+ ASD_DPRINTK("sb_opcode : 0x%x, phy_id: 0x%x\n",
+ sb_opcode, phy_id);
+ ASD_DPRINTK("escb: vaddr: 0x%p, "
+ "dma_handle: 0x%llx, next: 0x%llx, "
+ "index:%d, opcode:0x%02x\n",
+ ascb->dma_scb.vaddr,
+ (unsigned long long)ascb->dma_scb.dma_handle,
+ (unsigned long long)
+ le64_to_cpu(ascb->scb->header.next_scb),
+ le16_to_cpu(ascb->scb->header.index),
+ ascb->scb->header.opcode);
+ }
+
+ /* Catch these before we mask off the sb_opcode bits */
+ switch (sb_opcode) {
+ case REQ_TASK_ABORT: {
+ struct asd_ascb *a, *b;
+ u16 tc_abort;
+ struct domain_device *failed_dev = NULL;
+
+ ASD_DPRINTK("%s: REQ_TASK_ABORT, reason=0x%X\n",
+ __func__, dl->status_block[3]);
+
+ /*
+ * Find the task that caused the abort and abort it first.
+ * The sequencer won't put anything on the done list until
+ * that happens.
+ */
+ tc_abort = *((u16*)(&dl->status_block[1]));
+ tc_abort = le16_to_cpu(tc_abort);
+
+ list_for_each_entry_safe(a, b, &asd_ha->seq.pend_q, list) {
+ struct sas_task *task = a->uldd_task;
+
+ if (a->tc_index != tc_abort)
+ continue;
+
+ if (task) {
+ failed_dev = task->dev;
+ sas_task_abort(task);
+ } else {
+ ASD_DPRINTK("R_T_A for non TASK scb 0x%x\n",
+ a->scb->header.opcode);
+ }
+ break;
+ }
+
+ if (!failed_dev) {
+ ASD_DPRINTK("%s: Can't find task (tc=%d) to abort!\n",
+ __func__, tc_abort);
+ goto out;
+ }
+
+ /*
+ * Now abort everything else for that device (hba?) so
+ * that the EH will wake up and do something.
+ */
+ list_for_each_entry_safe(a, b, &asd_ha->seq.pend_q, list) {
+ struct sas_task *task = a->uldd_task;
+
+ if (task &&
+ task->dev == failed_dev &&
+ a->tc_index != tc_abort)
+ sas_task_abort(task);
+ }
+
+ goto out;
+ }
+ case REQ_DEVICE_RESET: {
+ struct asd_ascb *a;
+ u16 conn_handle;
+ unsigned long flags;
+ struct sas_task *last_dev_task = NULL;
+
+ conn_handle = *((u16*)(&dl->status_block[1]));
+ conn_handle = le16_to_cpu(conn_handle);
+
+ ASD_DPRINTK("%s: REQ_DEVICE_RESET, reason=0x%X\n", __func__,
+ dl->status_block[3]);
+
+ /* Find the last pending task for the device... */
+ list_for_each_entry(a, &asd_ha->seq.pend_q, list) {
+ u16 x;
+ struct domain_device *dev;
+ struct sas_task *task = a->uldd_task;
+
+ if (!task)
+ continue;
+ dev = task->dev;
+
+ x = (unsigned long)dev->lldd_dev;
+ if (x == conn_handle)
+ last_dev_task = task;
+ }
+
+ if (!last_dev_task) {
+ ASD_DPRINTK("%s: Device reset for idle device %d?\n",
+ __func__, conn_handle);
+ goto out;
+ }
+
+ /* ...and set the reset flag */
+ spin_lock_irqsave(&last_dev_task->task_state_lock, flags);
+ last_dev_task->task_state_flags |= SAS_TASK_NEED_DEV_RESET;
+ spin_unlock_irqrestore(&last_dev_task->task_state_lock, flags);
+
+ /* Kill all pending tasks for the device */
+ list_for_each_entry(a, &asd_ha->seq.pend_q, list) {
+ u16 x;
+ struct domain_device *dev;
+ struct sas_task *task = a->uldd_task;
+
+ if (!task)
+ continue;
+ dev = task->dev;
+
+ x = (unsigned long)dev->lldd_dev;
+ if (x == conn_handle)
+ sas_task_abort(task);
+ }
+
+ goto out;
+ }
+ case SIGNAL_NCQ_ERROR:
+ ASD_DPRINTK("%s: SIGNAL_NCQ_ERROR\n", __func__);
+ goto out;
+ case CLEAR_NCQ_ERROR:
+ ASD_DPRINTK("%s: CLEAR_NCQ_ERROR\n", __func__);
+ goto out;
+ }
+
+ sb_opcode &= ~DL_PHY_MASK;
+
+ switch (sb_opcode) {
+ case BYTES_DMAED:
+ ASD_DPRINTK("%s: phy%d: BYTES_DMAED\n", __func__, phy_id);
+ asd_bytes_dmaed_tasklet(ascb, dl, edb, phy_id);
+ break;
+ case PRIMITIVE_RECVD:
+ ASD_DPRINTK("%s: phy%d: PRIMITIVE_RECVD\n", __func__,
+ phy_id);
+ asd_primitive_rcvd_tasklet(ascb, dl, phy_id);
+ break;
+ case PHY_EVENT:
+ ASD_DPRINTK("%s: phy%d: PHY_EVENT\n", __func__, phy_id);
+ asd_phy_event_tasklet(ascb, dl);
+ break;
+ case LINK_RESET_ERROR:
+ ASD_DPRINTK("%s: phy%d: LINK_RESET_ERROR\n", __func__,
+ phy_id);
+ asd_link_reset_err_tasklet(ascb, dl, phy_id);
+ break;
+ case TIMER_EVENT:
+ ASD_DPRINTK("%s: phy%d: TIMER_EVENT, lost dw sync\n",
+ __func__, phy_id);
+ asd_turn_led(asd_ha, phy_id, 0);
+ /* the device is gone */
+ sas_phy_disconnected(sas_phy);
+ asd_deform_port(asd_ha, phy);
+ sas_ha->notify_port_event(sas_phy, PORTE_TIMER_EVENT);
+ break;
+ default:
+ ASD_DPRINTK("%s: phy%d: unknown event:0x%x\n", __func__,
+ phy_id, sb_opcode);
+ ASD_DPRINTK("edb is 0x%x! dl->opcode is 0x%x\n",
+ edb, dl->opcode);
+ ASD_DPRINTK("sb_opcode : 0x%x, phy_id: 0x%x\n",
+ sb_opcode, phy_id);
+ ASD_DPRINTK("escb: vaddr: 0x%p, "
+ "dma_handle: 0x%llx, next: 0x%llx, "
+ "index:%d, opcode:0x%02x\n",
+ ascb->dma_scb.vaddr,
+ (unsigned long long)ascb->dma_scb.dma_handle,
+ (unsigned long long)
+ le64_to_cpu(ascb->scb->header.next_scb),
+ le16_to_cpu(ascb->scb->header.index),
+ ascb->scb->header.opcode);
+
+ break;
+ }
+out:
+ asd_invalidate_edb(ascb, edb);
+}
+
+int asd_init_post_escbs(struct asd_ha_struct *asd_ha)
+{
+ struct asd_seq_data *seq = &asd_ha->seq;
+ int i;
+
+ for (i = 0; i < seq->num_escbs; i++)
+ seq->escb_arr[i]->tasklet_complete = escb_tasklet_complete;
+
+ ASD_DPRINTK("posting %d escbs\n", i);
+ return asd_post_escb_list(asd_ha, seq->escb_arr[0], seq->num_escbs);
+}
+
+/* ---------- CONTROL PHY ---------- */
+
+#define CONTROL_PHY_STATUS (CURRENT_DEVICE_PRESENT | CURRENT_OOB_DONE \
+ | CURRENT_SPINUP_HOLD | CURRENT_GTO_TIMEOUT \
+ | CURRENT_OOB_ERROR)
+
+/**
+ * control_phy_tasklet_complete -- tasklet complete for CONTROL PHY ascb
+ * @ascb: pointer to an ascb
+ * @dl: pointer to the done list entry
+ *
+ * This function completes a CONTROL PHY scb and frees the ascb.
+ * A note on LEDs:
+ * - an LED blinks if there is IO though it,
+ * - if a device is connected to the LED, it is lit,
+ * - if no device is connected to the LED, is is dimmed (off).
+ */
+static void control_phy_tasklet_complete(struct asd_ascb *ascb,
+ struct done_list_struct *dl)
+{
+ struct asd_ha_struct *asd_ha = ascb->ha;
+ struct scb *scb = ascb->scb;
+ struct control_phy *control_phy = &scb->control_phy;
+ u8 phy_id = control_phy->phy_id;
+ struct asd_phy *phy = &ascb->ha->phys[phy_id];
+
+ u8 status = dl->status_block[0];
+ u8 oob_status = dl->status_block[1];
+ u8 oob_mode = dl->status_block[2];
+ /* u8 oob_signals= dl->status_block[3]; */
+
+ if (status != 0) {
+ ASD_DPRINTK("%s: phy%d status block opcode:0x%x\n",
+ __func__, phy_id, status);
+ goto out;
+ }
+
+ switch (control_phy->sub_func) {
+ case DISABLE_PHY:
+ asd_ha->hw_prof.enabled_phys &= ~(1 << phy_id);
+ asd_turn_led(asd_ha, phy_id, 0);
+ asd_control_led(asd_ha, phy_id, 0);
+ ASD_DPRINTK("%s: disable phy%d\n", __func__, phy_id);
+ break;
+
+ case ENABLE_PHY:
+ asd_control_led(asd_ha, phy_id, 1);
+ if (oob_status & CURRENT_OOB_DONE) {
+ asd_ha->hw_prof.enabled_phys |= (1 << phy_id);
+ get_lrate_mode(phy, oob_mode);
+ asd_turn_led(asd_ha, phy_id, 1);
+ ASD_DPRINTK("%s: phy%d, lrate:0x%x, proto:0x%x\n",
+ __func__, phy_id,phy->sas_phy.linkrate,
+ phy->sas_phy.iproto);
+ } else if (oob_status & CURRENT_SPINUP_HOLD) {
+ asd_ha->hw_prof.enabled_phys |= (1 << phy_id);
+ asd_turn_led(asd_ha, phy_id, 1);
+ ASD_DPRINTK("%s: phy%d, spinup hold\n", __func__,
+ phy_id);
+ } else if (oob_status & CURRENT_ERR_MASK) {
+ asd_turn_led(asd_ha, phy_id, 0);
+ ASD_DPRINTK("%s: phy%d: error: oob status:0x%02x\n",
+ __func__, phy_id, oob_status);
+ } else if (oob_status & (CURRENT_HOT_PLUG_CNCT
+ | CURRENT_DEVICE_PRESENT)) {
+ asd_ha->hw_prof.enabled_phys |= (1 << phy_id);
+ asd_turn_led(asd_ha, phy_id, 1);
+ ASD_DPRINTK("%s: phy%d: hot plug or device present\n",
+ __func__, phy_id);
+ } else {
+ asd_ha->hw_prof.enabled_phys |= (1 << phy_id);
+ asd_turn_led(asd_ha, phy_id, 0);
+ ASD_DPRINTK("%s: phy%d: no device present: "
+ "oob_status:0x%x\n",
+ __func__, phy_id, oob_status);
+ }
+ break;
+ case RELEASE_SPINUP_HOLD:
+ case PHY_NO_OP:
+ case EXECUTE_HARD_RESET:
+ ASD_DPRINTK("%s: phy%d: sub_func:0x%x\n", __func__,
+ phy_id, control_phy->sub_func);
+ /* XXX finish */
+ break;
+ default:
+ ASD_DPRINTK("%s: phy%d: sub_func:0x%x?\n", __func__,
+ phy_id, control_phy->sub_func);
+ break;
+ }
+out:
+ asd_ascb_free(ascb);
+}
+
+static void set_speed_mask(u8 *speed_mask, struct asd_phy_desc *pd)
+{
+ /* disable all speeds, then enable defaults */
+ *speed_mask = SAS_SPEED_60_DIS | SAS_SPEED_30_DIS | SAS_SPEED_15_DIS
+ | SATA_SPEED_30_DIS | SATA_SPEED_15_DIS;
+
+ switch (pd->max_sas_lrate) {
+ case SAS_LINK_RATE_6_0_GBPS:
+ *speed_mask &= ~SAS_SPEED_60_DIS;
+ default:
+ case SAS_LINK_RATE_3_0_GBPS:
+ *speed_mask &= ~SAS_SPEED_30_DIS;
+ case SAS_LINK_RATE_1_5_GBPS:
+ *speed_mask &= ~SAS_SPEED_15_DIS;
+ }
+
+ switch (pd->min_sas_lrate) {
+ case SAS_LINK_RATE_6_0_GBPS:
+ *speed_mask |= SAS_SPEED_30_DIS;
+ case SAS_LINK_RATE_3_0_GBPS:
+ *speed_mask |= SAS_SPEED_15_DIS;
+ default:
+ case SAS_LINK_RATE_1_5_GBPS:
+ /* nothing to do */
+ ;
+ }
+
+ switch (pd->max_sata_lrate) {
+ case SAS_LINK_RATE_3_0_GBPS:
+ *speed_mask &= ~SATA_SPEED_30_DIS;
+ default:
+ case SAS_LINK_RATE_1_5_GBPS:
+ *speed_mask &= ~SATA_SPEED_15_DIS;
+ }
+
+ switch (pd->min_sata_lrate) {
+ case SAS_LINK_RATE_3_0_GBPS:
+ *speed_mask |= SATA_SPEED_15_DIS;
+ default:
+ case SAS_LINK_RATE_1_5_GBPS:
+ /* nothing to do */
+ ;
+ }
+}
+
+/**
+ * asd_build_control_phy -- build a CONTROL PHY SCB
+ * @ascb: pointer to an ascb
+ * @phy_id: phy id to control, integer
+ * @subfunc: subfunction, what to actually to do the phy
+ *
+ * This function builds a CONTROL PHY scb. No allocation of any kind
+ * is performed. @ascb is allocated with the list function.
+ * The caller can override the ascb->tasklet_complete to point
+ * to its own callback function. It must call asd_ascb_free()
+ * at its tasklet complete function.
+ * See the default implementation.
+ */
+void asd_build_control_phy(struct asd_ascb *ascb, int phy_id, u8 subfunc)
+{
+ struct asd_phy *phy = &ascb->ha->phys[phy_id];
+ struct scb *scb = ascb->scb;
+ struct control_phy *control_phy = &scb->control_phy;
+
+ scb->header.opcode = CONTROL_PHY;
+ control_phy->phy_id = (u8) phy_id;
+ control_phy->sub_func = subfunc;
+
+ switch (subfunc) {
+ case EXECUTE_HARD_RESET: /* 0x81 */
+ case ENABLE_PHY: /* 0x01 */
+ /* decide hot plug delay */
+ control_phy->hot_plug_delay = HOTPLUG_DELAY_TIMEOUT;
+
+ /* decide speed mask */
+ set_speed_mask(&control_phy->speed_mask, phy->phy_desc);
+
+ /* initiator port settings are in the hi nibble */
+ if (phy->sas_phy.role == PHY_ROLE_INITIATOR)
+ control_phy->port_type = SAS_PROTOCOL_ALL << 4;
+ else if (phy->sas_phy.role == PHY_ROLE_TARGET)
+ control_phy->port_type = SAS_PROTOCOL_ALL;
+ else
+ control_phy->port_type =
+ (SAS_PROTOCOL_ALL << 4) | SAS_PROTOCOL_ALL;
+
+ /* link reset retries, this should be nominal */
+ control_phy->link_reset_retries = 10;
+
+ case RELEASE_SPINUP_HOLD: /* 0x02 */
+ /* decide the func_mask */
+ control_phy->func_mask = FUNCTION_MASK_DEFAULT;
+ if (phy->phy_desc->flags & ASD_SATA_SPINUP_HOLD)
+ control_phy->func_mask &= ~SPINUP_HOLD_DIS;
+ else
+ control_phy->func_mask |= SPINUP_HOLD_DIS;
+ }
+
+ control_phy->conn_handle = cpu_to_le16(0xFFFF);
+
+ ascb->tasklet_complete = control_phy_tasklet_complete;
+}
+
+/* ---------- INITIATE LINK ADM TASK ---------- */
+
+#if 0
+
+static void link_adm_tasklet_complete(struct asd_ascb *ascb,
+ struct done_list_struct *dl)
+{
+ u8 opcode = dl->opcode;
+ struct initiate_link_adm *link_adm = &ascb->scb->link_adm;
+ u8 phy_id = link_adm->phy_id;
+
+ if (opcode != TC_NO_ERROR) {
+ asd_printk("phy%d: link adm task 0x%x completed with error "
+ "0x%x\n", phy_id, link_adm->sub_func, opcode);
+ }
+ ASD_DPRINTK("phy%d: link adm task 0x%x: 0x%x\n",
+ phy_id, link_adm->sub_func, opcode);
+
+ asd_ascb_free(ascb);
+}
+
+void asd_build_initiate_link_adm_task(struct asd_ascb *ascb, int phy_id,
+ u8 subfunc)
+{
+ struct scb *scb = ascb->scb;
+ struct initiate_link_adm *link_adm = &scb->link_adm;
+
+ scb->header.opcode = INITIATE_LINK_ADM_TASK;
+
+ link_adm->phy_id = phy_id;
+ link_adm->sub_func = subfunc;
+ link_adm->conn_handle = cpu_to_le16(0xFFFF);
+
+ ascb->tasklet_complete = link_adm_tasklet_complete;
+}
+
+#endif /* 0 */
+
+/* ---------- SCB timer ---------- */
+
+/**
+ * asd_ascb_timedout -- called when a pending SCB's timer has expired
+ * @data: unsigned long, a pointer to the ascb in question
+ *
+ * This is the default timeout function which does the most necessary.
+ * Upper layers can implement their own timeout function, say to free
+ * resources they have with this SCB, and then call this one at the
+ * end of their timeout function. To do this, one should initialize
+ * the ascb->timer.{function, data, expires} prior to calling the post
+ * function. The timer is started by the post function.
+ */
+void asd_ascb_timedout(unsigned long data)
+{
+ struct asd_ascb *ascb = (void *) data;
+ struct asd_seq_data *seq = &ascb->ha->seq;
+ unsigned long flags;
+
+ ASD_DPRINTK("scb:0x%x timed out\n", ascb->scb->header.opcode);
+
+ spin_lock_irqsave(&seq->pend_q_lock, flags);
+ seq->pending--;
+ list_del_init(&ascb->list);
+ spin_unlock_irqrestore(&seq->pend_q_lock, flags);
+
+ asd_ascb_free(ascb);
+}
+
+/* ---------- CONTROL PHY ---------- */
+
+/* Given the spec value, return a driver value. */
+static const int phy_func_table[] = {
+ [PHY_FUNC_NOP] = PHY_NO_OP,
+ [PHY_FUNC_LINK_RESET] = ENABLE_PHY,
+ [PHY_FUNC_HARD_RESET] = EXECUTE_HARD_RESET,
+ [PHY_FUNC_DISABLE] = DISABLE_PHY,
+ [PHY_FUNC_RELEASE_SPINUP_HOLD] = RELEASE_SPINUP_HOLD,
+};
+
+int asd_control_phy(struct asd_sas_phy *phy, enum phy_func func, void *arg)
+{
+ struct asd_ha_struct *asd_ha = phy->ha->lldd_ha;
+ struct asd_phy_desc *pd = asd_ha->phys[phy->id].phy_desc;
+ struct asd_ascb *ascb;
+ struct sas_phy_linkrates *rates;
+ int res = 1;
+
+ switch (func) {
+ case PHY_FUNC_CLEAR_ERROR_LOG:
+ case PHY_FUNC_GET_EVENTS:
+ return -ENOSYS;
+ case PHY_FUNC_SET_LINK_RATE:
+ rates = arg;
+ if (rates->minimum_linkrate) {
+ pd->min_sas_lrate = rates->minimum_linkrate;
+ pd->min_sata_lrate = rates->minimum_linkrate;
+ }
+ if (rates->maximum_linkrate) {
+ pd->max_sas_lrate = rates->maximum_linkrate;
+ pd->max_sata_lrate = rates->maximum_linkrate;
+ }
+ func = PHY_FUNC_LINK_RESET;
+ break;
+ default:
+ break;
+ }
+
+ ascb = asd_ascb_alloc_list(asd_ha, &res, GFP_KERNEL);
+ if (!ascb)
+ return -ENOMEM;
+
+ asd_build_control_phy(ascb, phy->id, phy_func_table[func]);
+ res = asd_post_ascb_list(asd_ha, ascb , 1);
+ if (res)
+ asd_ascb_free(ascb);
+
+ return res;
+}
diff --git a/drivers/scsi/aic94xx/aic94xx_sds.c b/drivers/scsi/aic94xx/aic94xx_sds.c
new file mode 100644
index 000000000..edb43fda9
--- /dev/null
+++ b/drivers/scsi/aic94xx/aic94xx_sds.c
@@ -0,0 +1,1475 @@
+/*
+ * Aic94xx SAS/SATA driver access to shared data structures and memory
+ * maps.
+ *
+ * Copyright (C) 2005 Adaptec, Inc. All rights reserved.
+ * Copyright (C) 2005 Luben Tuikov <luben_tuikov@adaptec.com>
+ *
+ * This file is licensed under GPLv2.
+ *
+ * This file is part of the aic94xx driver.
+ *
+ * The aic94xx driver is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; version 2 of the
+ * License.
+ *
+ * The aic94xx driver is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with the aic94xx driver; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
+ *
+ */
+
+#include <linux/pci.h>
+#include <linux/slab.h>
+#include <linux/delay.h>
+
+#include "aic94xx.h"
+#include "aic94xx_reg.h"
+#include "aic94xx_sds.h"
+
+/* ---------- OCM stuff ---------- */
+
+struct asd_ocm_dir_ent {
+ u8 type;
+ u8 offs[3];
+ u8 _r1;
+ u8 size[3];
+} __attribute__ ((packed));
+
+struct asd_ocm_dir {
+ char sig[2];
+ u8 _r1[2];
+ u8 major; /* 0 */
+ u8 minor; /* 0 */
+ u8 _r2;
+ u8 num_de;
+ struct asd_ocm_dir_ent entry[15];
+} __attribute__ ((packed));
+
+#define OCM_DE_OCM_DIR 0x00
+#define OCM_DE_WIN_DRVR 0x01
+#define OCM_DE_BIOS_CHIM 0x02
+#define OCM_DE_RAID_ENGN 0x03
+#define OCM_DE_BIOS_INTL 0x04
+#define OCM_DE_BIOS_CHIM_OSM 0x05
+#define OCM_DE_BIOS_CHIM_DYNAMIC 0x06
+#define OCM_DE_ADDC2C_RES0 0x07
+#define OCM_DE_ADDC2C_RES1 0x08
+#define OCM_DE_ADDC2C_RES2 0x09
+#define OCM_DE_ADDC2C_RES3 0x0A
+
+#define OCM_INIT_DIR_ENTRIES 5
+/***************************************************************************
+* OCM directory default
+***************************************************************************/
+static struct asd_ocm_dir OCMDirInit =
+{
+ .sig = {0x4D, 0x4F}, /* signature */
+ .num_de = OCM_INIT_DIR_ENTRIES, /* no. of directory entries */
+};
+
+/***************************************************************************
+* OCM directory Entries default
+***************************************************************************/
+static struct asd_ocm_dir_ent OCMDirEntriesInit[OCM_INIT_DIR_ENTRIES] =
+{
+ {
+ .type = (OCM_DE_ADDC2C_RES0), /* Entry type */
+ .offs = {128}, /* Offset */
+ .size = {0, 4}, /* size */
+ },
+ {
+ .type = (OCM_DE_ADDC2C_RES1), /* Entry type */
+ .offs = {128, 4}, /* Offset */
+ .size = {0, 4}, /* size */
+ },
+ {
+ .type = (OCM_DE_ADDC2C_RES2), /* Entry type */
+ .offs = {128, 8}, /* Offset */
+ .size = {0, 4}, /* size */
+ },
+ {
+ .type = (OCM_DE_ADDC2C_RES3), /* Entry type */
+ .offs = {128, 12}, /* Offset */
+ .size = {0, 4}, /* size */
+ },
+ {
+ .type = (OCM_DE_WIN_DRVR), /* Entry type */
+ .offs = {128, 16}, /* Offset */
+ .size = {128, 235, 1}, /* size */
+ },
+};
+
+struct asd_bios_chim_struct {
+ char sig[4];
+ u8 major; /* 1 */
+ u8 minor; /* 0 */
+ u8 bios_major;
+ u8 bios_minor;
+ __le32 bios_build;
+ u8 flags;
+ u8 pci_slot;
+ __le16 ue_num;
+ __le16 ue_size;
+ u8 _r[14];
+ /* The unit element array is right here.
+ */
+} __attribute__ ((packed));
+
+/**
+ * asd_read_ocm_seg - read an on chip memory (OCM) segment
+ * @asd_ha: pointer to the host adapter structure
+ * @buffer: where to write the read data
+ * @offs: offset into OCM where to read from
+ * @size: how many bytes to read
+ *
+ * Return the number of bytes not read. Return 0 on success.
+ */
+static int asd_read_ocm_seg(struct asd_ha_struct *asd_ha, void *buffer,
+ u32 offs, int size)
+{
+ u8 *p = buffer;
+ if (unlikely(asd_ha->iospace))
+ asd_read_reg_string(asd_ha, buffer, offs+OCM_BASE_ADDR, size);
+ else {
+ for ( ; size > 0; size--, offs++, p++)
+ *p = asd_read_ocm_byte(asd_ha, offs);
+ }
+ return size;
+}
+
+static int asd_read_ocm_dir(struct asd_ha_struct *asd_ha,
+ struct asd_ocm_dir *dir, u32 offs)
+{
+ int err = asd_read_ocm_seg(asd_ha, dir, offs, sizeof(*dir));
+ if (err) {
+ ASD_DPRINTK("couldn't read ocm segment\n");
+ return err;
+ }
+
+ if (dir->sig[0] != 'M' || dir->sig[1] != 'O') {
+ ASD_DPRINTK("no valid dir signature(%c%c) at start of OCM\n",
+ dir->sig[0], dir->sig[1]);
+ return -ENOENT;
+ }
+ if (dir->major != 0) {
+ asd_printk("unsupported major version of ocm dir:0x%x\n",
+ dir->major);
+ return -ENOENT;
+ }
+ dir->num_de &= 0xf;
+ return 0;
+}
+
+/**
+ * asd_write_ocm_seg - write an on chip memory (OCM) segment
+ * @asd_ha: pointer to the host adapter structure
+ * @buffer: where to read the write data
+ * @offs: offset into OCM to write to
+ * @size: how many bytes to write
+ *
+ * Return the number of bytes not written. Return 0 on success.
+ */
+static void asd_write_ocm_seg(struct asd_ha_struct *asd_ha, void *buffer,
+ u32 offs, int size)
+{
+ u8 *p = buffer;
+ if (unlikely(asd_ha->iospace))
+ asd_write_reg_string(asd_ha, buffer, offs+OCM_BASE_ADDR, size);
+ else {
+ for ( ; size > 0; size--, offs++, p++)
+ asd_write_ocm_byte(asd_ha, offs, *p);
+ }
+ return;
+}
+
+#define THREE_TO_NUM(X) ((X)[0] | ((X)[1] << 8) | ((X)[2] << 16))
+
+static int asd_find_dir_entry(struct asd_ocm_dir *dir, u8 type,
+ u32 *offs, u32 *size)
+{
+ int i;
+ struct asd_ocm_dir_ent *ent;
+
+ for (i = 0; i < dir->num_de; i++) {
+ if (dir->entry[i].type == type)
+ break;
+ }
+ if (i >= dir->num_de)
+ return -ENOENT;
+ ent = &dir->entry[i];
+ *offs = (u32) THREE_TO_NUM(ent->offs);
+ *size = (u32) THREE_TO_NUM(ent->size);
+ return 0;
+}
+
+#define OCM_BIOS_CHIM_DE 2
+#define BC_BIOS_PRESENT 1
+
+static int asd_get_bios_chim(struct asd_ha_struct *asd_ha,
+ struct asd_ocm_dir *dir)
+{
+ int err;
+ struct asd_bios_chim_struct *bc_struct;
+ u32 offs, size;
+
+ err = asd_find_dir_entry(dir, OCM_BIOS_CHIM_DE, &offs, &size);
+ if (err) {
+ ASD_DPRINTK("couldn't find BIOS_CHIM dir ent\n");
+ goto out;
+ }
+ err = -ENOMEM;
+ bc_struct = kmalloc(sizeof(*bc_struct), GFP_KERNEL);
+ if (!bc_struct) {
+ asd_printk("no memory for bios_chim struct\n");
+ goto out;
+ }
+ err = asd_read_ocm_seg(asd_ha, (void *)bc_struct, offs,
+ sizeof(*bc_struct));
+ if (err) {
+ ASD_DPRINTK("couldn't read ocm segment\n");
+ goto out2;
+ }
+ if (strncmp(bc_struct->sig, "SOIB", 4)
+ && strncmp(bc_struct->sig, "IPSA", 4)) {
+ ASD_DPRINTK("BIOS_CHIM entry has no valid sig(%c%c%c%c)\n",
+ bc_struct->sig[0], bc_struct->sig[1],
+ bc_struct->sig[2], bc_struct->sig[3]);
+ err = -ENOENT;
+ goto out2;
+ }
+ if (bc_struct->major != 1) {
+ asd_printk("BIOS_CHIM unsupported major version:0x%x\n",
+ bc_struct->major);
+ err = -ENOENT;
+ goto out2;
+ }
+ if (bc_struct->flags & BC_BIOS_PRESENT) {
+ asd_ha->hw_prof.bios.present = 1;
+ asd_ha->hw_prof.bios.maj = bc_struct->bios_major;
+ asd_ha->hw_prof.bios.min = bc_struct->bios_minor;
+ asd_ha->hw_prof.bios.bld = le32_to_cpu(bc_struct->bios_build);
+ ASD_DPRINTK("BIOS present (%d,%d), %d\n",
+ asd_ha->hw_prof.bios.maj,
+ asd_ha->hw_prof.bios.min,
+ asd_ha->hw_prof.bios.bld);
+ }
+ asd_ha->hw_prof.ue.num = le16_to_cpu(bc_struct->ue_num);
+ asd_ha->hw_prof.ue.size= le16_to_cpu(bc_struct->ue_size);
+ ASD_DPRINTK("ue num:%d, ue size:%d\n", asd_ha->hw_prof.ue.num,
+ asd_ha->hw_prof.ue.size);
+ size = asd_ha->hw_prof.ue.num * asd_ha->hw_prof.ue.size;
+ if (size > 0) {
+ err = -ENOMEM;
+ asd_ha->hw_prof.ue.area = kmalloc(size, GFP_KERNEL);
+ if (!asd_ha->hw_prof.ue.area)
+ goto out2;
+ err = asd_read_ocm_seg(asd_ha, (void *)asd_ha->hw_prof.ue.area,
+ offs + sizeof(*bc_struct), size);
+ if (err) {
+ kfree(asd_ha->hw_prof.ue.area);
+ asd_ha->hw_prof.ue.area = NULL;
+ asd_ha->hw_prof.ue.num = 0;
+ asd_ha->hw_prof.ue.size = 0;
+ ASD_DPRINTK("couldn't read ue entries(%d)\n", err);
+ }
+ }
+out2:
+ kfree(bc_struct);
+out:
+ return err;
+}
+
+static void
+asd_hwi_initialize_ocm_dir (struct asd_ha_struct *asd_ha)
+{
+ int i;
+
+ /* Zero OCM */
+ for (i = 0; i < OCM_MAX_SIZE; i += 4)
+ asd_write_ocm_dword(asd_ha, i, 0);
+
+ /* Write Dir */
+ asd_write_ocm_seg(asd_ha, &OCMDirInit, 0,
+ sizeof(struct asd_ocm_dir));
+
+ /* Write Dir Entries */
+ for (i = 0; i < OCM_INIT_DIR_ENTRIES; i++)
+ asd_write_ocm_seg(asd_ha, &OCMDirEntriesInit[i],
+ sizeof(struct asd_ocm_dir) +
+ (i * sizeof(struct asd_ocm_dir_ent))
+ , sizeof(struct asd_ocm_dir_ent));
+
+}
+
+static int
+asd_hwi_check_ocm_access (struct asd_ha_struct *asd_ha)
+{
+ struct pci_dev *pcidev = asd_ha->pcidev;
+ u32 reg;
+ int err = 0;
+ u32 v;
+
+ /* check if OCM has been initialized by BIOS */
+ reg = asd_read_reg_dword(asd_ha, EXSICNFGR);
+
+ if (!(reg & OCMINITIALIZED)) {
+ err = pci_read_config_dword(pcidev, PCIC_INTRPT_STAT, &v);
+ if (err) {
+ asd_printk("couldn't access PCIC_INTRPT_STAT of %s\n",
+ pci_name(pcidev));
+ goto out;
+ }
+
+ printk(KERN_INFO "OCM is not initialized by BIOS,"
+ "reinitialize it and ignore it, current IntrptStatus"
+ "is 0x%x\n", v);
+
+ if (v)
+ err = pci_write_config_dword(pcidev,
+ PCIC_INTRPT_STAT, v);
+ if (err) {
+ asd_printk("couldn't write PCIC_INTRPT_STAT of %s\n",
+ pci_name(pcidev));
+ goto out;
+ }
+
+ asd_hwi_initialize_ocm_dir(asd_ha);
+
+ }
+out:
+ return err;
+}
+
+/**
+ * asd_read_ocm - read on chip memory (OCM)
+ * @asd_ha: pointer to the host adapter structure
+ */
+int asd_read_ocm(struct asd_ha_struct *asd_ha)
+{
+ int err;
+ struct asd_ocm_dir *dir;
+
+ if (asd_hwi_check_ocm_access(asd_ha))
+ return -1;
+
+ dir = kmalloc(sizeof(*dir), GFP_KERNEL);
+ if (!dir) {
+ asd_printk("no memory for ocm dir\n");
+ return -ENOMEM;
+ }
+
+ err = asd_read_ocm_dir(asd_ha, dir, 0);
+ if (err)
+ goto out;
+
+ err = asd_get_bios_chim(asd_ha, dir);
+out:
+ kfree(dir);
+ return err;
+}
+
+/* ---------- FLASH stuff ---------- */
+
+#define FLASH_RESET 0xF0
+
+#define ASD_FLASH_SIZE 0x200000
+#define FLASH_DIR_COOKIE "*** ADAPTEC FLASH DIRECTORY *** "
+#define FLASH_NEXT_ENTRY_OFFS 0x2000
+#define FLASH_MAX_DIR_ENTRIES 32
+
+#define FLASH_DE_TYPE_MASK 0x3FFFFFFF
+#define FLASH_DE_MS 0x120
+#define FLASH_DE_CTRL_A_USER 0xE0
+
+struct asd_flash_de {
+ __le32 type;
+ __le32 offs;
+ __le32 pad_size;
+ __le32 image_size;
+ __le32 chksum;
+ u8 _r[12];
+ u8 version[32];
+} __attribute__ ((packed));
+
+struct asd_flash_dir {
+ u8 cookie[32];
+ __le32 rev; /* 2 */
+ __le32 chksum;
+ __le32 chksum_antidote;
+ __le32 bld;
+ u8 bld_id[32]; /* build id data */
+ u8 ver_data[32]; /* date and time of build */
+ __le32 ae_mask;
+ __le32 v_mask;
+ __le32 oc_mask;
+ u8 _r[20];
+ struct asd_flash_de dir_entry[FLASH_MAX_DIR_ENTRIES];
+} __attribute__ ((packed));
+
+struct asd_manuf_sec {
+ char sig[2]; /* 'S', 'M' */
+ u16 offs_next;
+ u8 maj; /* 0 */
+ u8 min; /* 0 */
+ u16 chksum;
+ u16 size;
+ u8 _r[6];
+ u8 sas_addr[SAS_ADDR_SIZE];
+ u8 pcba_sn[ASD_PCBA_SN_SIZE];
+ /* Here start the other segments */
+ u8 linked_list[0];
+} __attribute__ ((packed));
+
+struct asd_manuf_phy_desc {
+ u8 state; /* low 4 bits */
+#define MS_PHY_STATE_ENABLED 0
+#define MS_PHY_STATE_REPORTED 1
+#define MS_PHY_STATE_HIDDEN 2
+ u8 phy_id;
+ u16 _r;
+ u8 phy_control_0; /* mode 5 reg 0x160 */
+ u8 phy_control_1; /* mode 5 reg 0x161 */
+ u8 phy_control_2; /* mode 5 reg 0x162 */
+ u8 phy_control_3; /* mode 5 reg 0x163 */
+} __attribute__ ((packed));
+
+struct asd_manuf_phy_param {
+ char sig[2]; /* 'P', 'M' */
+ u16 next;
+ u8 maj; /* 0 */
+ u8 min; /* 2 */
+ u8 num_phy_desc; /* 8 */
+ u8 phy_desc_size; /* 8 */
+ u8 _r[3];
+ u8 usage_model_id;
+ u32 _r2;
+ struct asd_manuf_phy_desc phy_desc[ASD_MAX_PHYS];
+} __attribute__ ((packed));
+
+#if 0
+static const char *asd_sb_type[] = {
+ "unknown",
+ "SGPIO",
+ [2 ... 0x7F] = "unknown",
+ [0x80] = "ADPT_I2C",
+ [0x81 ... 0xFF] = "VENDOR_UNIQUExx"
+};
+#endif
+
+struct asd_ms_sb_desc {
+ u8 type;
+ u8 node_desc_index;
+ u8 conn_desc_index;
+ u8 _recvd[0];
+} __attribute__ ((packed));
+
+#if 0
+static const char *asd_conn_type[] = {
+ [0 ... 7] = "unknown",
+ "SFF8470",
+ "SFF8482",
+ "SFF8484",
+ [0x80] = "PCIX_DAUGHTER0",
+ [0x81] = "SAS_DAUGHTER0",
+ [0x82 ... 0xFF] = "VENDOR_UNIQUExx"
+};
+
+static const char *asd_conn_location[] = {
+ "unknown",
+ "internal",
+ "external",
+ "board_to_board",
+};
+#endif
+
+struct asd_ms_conn_desc {
+ u8 type;
+ u8 location;
+ u8 num_sideband_desc;
+ u8 size_sideband_desc;
+ u32 _resvd;
+ u8 name[16];
+ struct asd_ms_sb_desc sb_desc[0];
+} __attribute__ ((packed));
+
+struct asd_nd_phy_desc {
+ u8 vp_attch_type;
+ u8 attch_specific[0];
+} __attribute__ ((packed));
+
+#if 0
+static const char *asd_node_type[] = {
+ "IOP",
+ "IO_CONTROLLER",
+ "EXPANDER",
+ "PORT_MULTIPLIER",
+ "PORT_MULTIPLEXER",
+ "MULTI_DROP_I2C_BUS",
+};
+#endif
+
+struct asd_ms_node_desc {
+ u8 type;
+ u8 num_phy_desc;
+ u8 size_phy_desc;
+ u8 _resvd;
+ u8 name[16];
+ struct asd_nd_phy_desc phy_desc[0];
+} __attribute__ ((packed));
+
+struct asd_ms_conn_map {
+ char sig[2]; /* 'M', 'C' */
+ __le16 next;
+ u8 maj; /* 0 */
+ u8 min; /* 0 */
+ __le16 cm_size; /* size of this struct */
+ u8 num_conn;
+ u8 conn_size;
+ u8 num_nodes;
+ u8 usage_model_id;
+ u32 _resvd;
+ struct asd_ms_conn_desc conn_desc[0];
+ struct asd_ms_node_desc node_desc[0];
+} __attribute__ ((packed));
+
+struct asd_ctrla_phy_entry {
+ u8 sas_addr[SAS_ADDR_SIZE];
+ u8 sas_link_rates; /* max in hi bits, min in low bits */
+ u8 flags;
+ u8 sata_link_rates;
+ u8 _r[5];
+} __attribute__ ((packed));
+
+struct asd_ctrla_phy_settings {
+ u8 id0; /* P'h'y */
+ u8 _r;
+ u16 next;
+ u8 num_phys; /* number of PHYs in the PCI function */
+ u8 _r2[3];
+ struct asd_ctrla_phy_entry phy_ent[ASD_MAX_PHYS];
+} __attribute__ ((packed));
+
+struct asd_ll_el {
+ u8 id0;
+ u8 id1;
+ __le16 next;
+ u8 something_here[0];
+} __attribute__ ((packed));
+
+static int asd_poll_flash(struct asd_ha_struct *asd_ha)
+{
+ int c;
+ u8 d;
+
+ for (c = 5000; c > 0; c--) {
+ d = asd_read_reg_byte(asd_ha, asd_ha->hw_prof.flash.bar);
+ d ^= asd_read_reg_byte(asd_ha, asd_ha->hw_prof.flash.bar);
+ if (!d)
+ return 0;
+ udelay(5);
+ }
+ return -ENOENT;
+}
+
+static int asd_reset_flash(struct asd_ha_struct *asd_ha)
+{
+ int err;
+
+ err = asd_poll_flash(asd_ha);
+ if (err)
+ return err;
+ asd_write_reg_byte(asd_ha, asd_ha->hw_prof.flash.bar, FLASH_RESET);
+ err = asd_poll_flash(asd_ha);
+
+ return err;
+}
+
+static int asd_read_flash_seg(struct asd_ha_struct *asd_ha,
+ void *buffer, u32 offs, int size)
+{
+ asd_read_reg_string(asd_ha, buffer, asd_ha->hw_prof.flash.bar+offs,
+ size);
+ return 0;
+}
+
+/**
+ * asd_find_flash_dir - finds and reads the flash directory
+ * @asd_ha: pointer to the host adapter structure
+ * @flash_dir: pointer to flash directory structure
+ *
+ * If found, the flash directory segment will be copied to
+ * @flash_dir. Return 1 if found, 0 if not.
+ */
+static int asd_find_flash_dir(struct asd_ha_struct *asd_ha,
+ struct asd_flash_dir *flash_dir)
+{
+ u32 v;
+ for (v = 0; v < ASD_FLASH_SIZE; v += FLASH_NEXT_ENTRY_OFFS) {
+ asd_read_flash_seg(asd_ha, flash_dir, v,
+ sizeof(FLASH_DIR_COOKIE)-1);
+ if (memcmp(flash_dir->cookie, FLASH_DIR_COOKIE,
+ sizeof(FLASH_DIR_COOKIE)-1) == 0) {
+ asd_ha->hw_prof.flash.dir_offs = v;
+ asd_read_flash_seg(asd_ha, flash_dir, v,
+ sizeof(*flash_dir));
+ return 1;
+ }
+ }
+ return 0;
+}
+
+static int asd_flash_getid(struct asd_ha_struct *asd_ha)
+{
+ int err = 0;
+ u32 reg;
+
+ reg = asd_read_reg_dword(asd_ha, EXSICNFGR);
+
+ if (pci_read_config_dword(asd_ha->pcidev, PCI_CONF_FLSH_BAR,
+ &asd_ha->hw_prof.flash.bar)) {
+ asd_printk("couldn't read PCI_CONF_FLSH_BAR of %s\n",
+ pci_name(asd_ha->pcidev));
+ return -ENOENT;
+ }
+ asd_ha->hw_prof.flash.present = 1;
+ asd_ha->hw_prof.flash.wide = reg & FLASHW ? 1 : 0;
+ err = asd_reset_flash(asd_ha);
+ if (err) {
+ ASD_DPRINTK("couldn't reset flash(%d)\n", err);
+ return err;
+ }
+ return 0;
+}
+
+static u16 asd_calc_flash_chksum(u16 *p, int size)
+{
+ u16 chksum = 0;
+
+ while (size-- > 0)
+ chksum += *p++;
+
+ return chksum;
+}
+
+
+static int asd_find_flash_de(struct asd_flash_dir *flash_dir, u32 entry_type,
+ u32 *offs, u32 *size)
+{
+ int i;
+ struct asd_flash_de *de;
+
+ for (i = 0; i < FLASH_MAX_DIR_ENTRIES; i++) {
+ u32 type = le32_to_cpu(flash_dir->dir_entry[i].type);
+
+ type &= FLASH_DE_TYPE_MASK;
+ if (type == entry_type)
+ break;
+ }
+ if (i >= FLASH_MAX_DIR_ENTRIES)
+ return -ENOENT;
+ de = &flash_dir->dir_entry[i];
+ *offs = le32_to_cpu(de->offs);
+ *size = le32_to_cpu(de->pad_size);
+ return 0;
+}
+
+static int asd_validate_ms(struct asd_manuf_sec *ms)
+{
+ if (ms->sig[0] != 'S' || ms->sig[1] != 'M') {
+ ASD_DPRINTK("manuf sec: no valid sig(%c%c)\n",
+ ms->sig[0], ms->sig[1]);
+ return -ENOENT;
+ }
+ if (ms->maj != 0) {
+ asd_printk("unsupported manuf. sector. major version:%x\n",
+ ms->maj);
+ return -ENOENT;
+ }
+ ms->offs_next = le16_to_cpu((__force __le16) ms->offs_next);
+ ms->chksum = le16_to_cpu((__force __le16) ms->chksum);
+ ms->size = le16_to_cpu((__force __le16) ms->size);
+
+ if (asd_calc_flash_chksum((u16 *)ms, ms->size/2)) {
+ asd_printk("failed manuf sector checksum\n");
+ }
+
+ return 0;
+}
+
+static int asd_ms_get_sas_addr(struct asd_ha_struct *asd_ha,
+ struct asd_manuf_sec *ms)
+{
+ memcpy(asd_ha->hw_prof.sas_addr, ms->sas_addr, SAS_ADDR_SIZE);
+ return 0;
+}
+
+static int asd_ms_get_pcba_sn(struct asd_ha_struct *asd_ha,
+ struct asd_manuf_sec *ms)
+{
+ memcpy(asd_ha->hw_prof.pcba_sn, ms->pcba_sn, ASD_PCBA_SN_SIZE);
+ asd_ha->hw_prof.pcba_sn[ASD_PCBA_SN_SIZE] = '\0';
+ return 0;
+}
+
+/**
+ * asd_find_ll_by_id - find a linked list entry by its id
+ * @start: void pointer to the first element in the linked list
+ * @id0: the first byte of the id (offs 0)
+ * @id1: the second byte of the id (offs 1)
+ *
+ * @start has to be the _base_ element start, since the
+ * linked list entries's offset is from this pointer.
+ * Some linked list entries use only the first id, in which case
+ * you can pass 0xFF for the second.
+ */
+static void *asd_find_ll_by_id(void * const start, const u8 id0, const u8 id1)
+{
+ struct asd_ll_el *el = start;
+
+ do {
+ switch (id1) {
+ default:
+ if (el->id1 == id1)
+ case 0xFF:
+ if (el->id0 == id0)
+ return el;
+ }
+ el = start + le16_to_cpu(el->next);
+ } while (el != start);
+
+ return NULL;
+}
+
+/**
+ * asd_ms_get_phy_params - get phy parameters from the manufacturing sector
+ * @asd_ha: pointer to the host adapter structure
+ * @manuf_sec: pointer to the manufacturing sector
+ *
+ * The manufacturing sector contans also the linked list of sub-segments,
+ * since when it was read, its size was taken from the flash directory,
+ * not from the structure size.
+ *
+ * HIDDEN phys do not count in the total count. REPORTED phys cannot
+ * be enabled but are reported and counted towards the total.
+ * ENABLED phys are enabled by default and count towards the total.
+ * The absolute total phy number is ASD_MAX_PHYS. hw_prof->num_phys
+ * merely specifies the number of phys the host adapter decided to
+ * report. E.g., it is possible for phys 0, 1 and 2 to be HIDDEN,
+ * phys 3, 4 and 5 to be REPORTED and phys 6 and 7 to be ENABLED.
+ * In this case ASD_MAX_PHYS is 8, hw_prof->num_phys is 5, and only 2
+ * are actually enabled (enabled by default, max number of phys
+ * enableable in this case).
+ */
+static int asd_ms_get_phy_params(struct asd_ha_struct *asd_ha,
+ struct asd_manuf_sec *manuf_sec)
+{
+ int i;
+ int en_phys = 0;
+ int rep_phys = 0;
+ struct asd_manuf_phy_param *phy_param;
+ struct asd_manuf_phy_param dflt_phy_param;
+
+ phy_param = asd_find_ll_by_id(manuf_sec, 'P', 'M');
+ if (!phy_param) {
+ ASD_DPRINTK("ms: no phy parameters found\n");
+ ASD_DPRINTK("ms: Creating default phy parameters\n");
+ dflt_phy_param.sig[0] = 'P';
+ dflt_phy_param.sig[1] = 'M';
+ dflt_phy_param.maj = 0;
+ dflt_phy_param.min = 2;
+ dflt_phy_param.num_phy_desc = 8;
+ dflt_phy_param.phy_desc_size = sizeof(struct asd_manuf_phy_desc);
+ for (i =0; i < ASD_MAX_PHYS; i++) {
+ dflt_phy_param.phy_desc[i].state = 0;
+ dflt_phy_param.phy_desc[i].phy_id = i;
+ dflt_phy_param.phy_desc[i].phy_control_0 = 0xf6;
+ dflt_phy_param.phy_desc[i].phy_control_1 = 0x10;
+ dflt_phy_param.phy_desc[i].phy_control_2 = 0x43;
+ dflt_phy_param.phy_desc[i].phy_control_3 = 0xeb;
+ }
+
+ phy_param = &dflt_phy_param;
+
+ }
+
+ if (phy_param->maj != 0) {
+ asd_printk("unsupported manuf. phy param major version:0x%x\n",
+ phy_param->maj);
+ return -ENOENT;
+ }
+
+ ASD_DPRINTK("ms: num_phy_desc: %d\n", phy_param->num_phy_desc);
+ asd_ha->hw_prof.enabled_phys = 0;
+ for (i = 0; i < phy_param->num_phy_desc; i++) {
+ struct asd_manuf_phy_desc *pd = &phy_param->phy_desc[i];
+ switch (pd->state & 0xF) {
+ case MS_PHY_STATE_HIDDEN:
+ ASD_DPRINTK("ms: phy%d: HIDDEN\n", i);
+ continue;
+ case MS_PHY_STATE_REPORTED:
+ ASD_DPRINTK("ms: phy%d: REPORTED\n", i);
+ asd_ha->hw_prof.enabled_phys &= ~(1 << i);
+ rep_phys++;
+ continue;
+ case MS_PHY_STATE_ENABLED:
+ ASD_DPRINTK("ms: phy%d: ENABLED\n", i);
+ asd_ha->hw_prof.enabled_phys |= (1 << i);
+ en_phys++;
+ break;
+ }
+ asd_ha->hw_prof.phy_desc[i].phy_control_0 = pd->phy_control_0;
+ asd_ha->hw_prof.phy_desc[i].phy_control_1 = pd->phy_control_1;
+ asd_ha->hw_prof.phy_desc[i].phy_control_2 = pd->phy_control_2;
+ asd_ha->hw_prof.phy_desc[i].phy_control_3 = pd->phy_control_3;
+ }
+ asd_ha->hw_prof.max_phys = rep_phys + en_phys;
+ asd_ha->hw_prof.num_phys = en_phys;
+ ASD_DPRINTK("ms: max_phys:0x%x, num_phys:0x%x\n",
+ asd_ha->hw_prof.max_phys, asd_ha->hw_prof.num_phys);
+ ASD_DPRINTK("ms: enabled_phys:0x%x\n", asd_ha->hw_prof.enabled_phys);
+ return 0;
+}
+
+static int asd_ms_get_connector_map(struct asd_ha_struct *asd_ha,
+ struct asd_manuf_sec *manuf_sec)
+{
+ struct asd_ms_conn_map *cm;
+
+ cm = asd_find_ll_by_id(manuf_sec, 'M', 'C');
+ if (!cm) {
+ ASD_DPRINTK("ms: no connector map found\n");
+ return 0;
+ }
+
+ if (cm->maj != 0) {
+ ASD_DPRINTK("ms: unsupported: connector map major version 0x%x"
+ "\n", cm->maj);
+ return -ENOENT;
+ }
+
+ /* XXX */
+
+ return 0;
+}
+
+
+/**
+ * asd_process_ms - find and extract information from the manufacturing sector
+ * @asd_ha: pointer to the host adapter structure
+ * @flash_dir: pointer to the flash directory
+ */
+static int asd_process_ms(struct asd_ha_struct *asd_ha,
+ struct asd_flash_dir *flash_dir)
+{
+ int err;
+ struct asd_manuf_sec *manuf_sec;
+ u32 offs, size;
+
+ err = asd_find_flash_de(flash_dir, FLASH_DE_MS, &offs, &size);
+ if (err) {
+ ASD_DPRINTK("Couldn't find the manuf. sector\n");
+ goto out;
+ }
+
+ if (size == 0)
+ goto out;
+
+ err = -ENOMEM;
+ manuf_sec = kmalloc(size, GFP_KERNEL);
+ if (!manuf_sec) {
+ ASD_DPRINTK("no mem for manuf sector\n");
+ goto out;
+ }
+
+ err = asd_read_flash_seg(asd_ha, (void *)manuf_sec, offs, size);
+ if (err) {
+ ASD_DPRINTK("couldn't read manuf sector at 0x%x, size 0x%x\n",
+ offs, size);
+ goto out2;
+ }
+
+ err = asd_validate_ms(manuf_sec);
+ if (err) {
+ ASD_DPRINTK("couldn't validate manuf sector\n");
+ goto out2;
+ }
+
+ err = asd_ms_get_sas_addr(asd_ha, manuf_sec);
+ if (err) {
+ ASD_DPRINTK("couldn't read the SAS_ADDR\n");
+ goto out2;
+ }
+ ASD_DPRINTK("manuf sect SAS_ADDR %llx\n",
+ SAS_ADDR(asd_ha->hw_prof.sas_addr));
+
+ err = asd_ms_get_pcba_sn(asd_ha, manuf_sec);
+ if (err) {
+ ASD_DPRINTK("couldn't read the PCBA SN\n");
+ goto out2;
+ }
+ ASD_DPRINTK("manuf sect PCBA SN %s\n", asd_ha->hw_prof.pcba_sn);
+
+ err = asd_ms_get_phy_params(asd_ha, manuf_sec);
+ if (err) {
+ ASD_DPRINTK("ms: couldn't get phy parameters\n");
+ goto out2;
+ }
+
+ err = asd_ms_get_connector_map(asd_ha, manuf_sec);
+ if (err) {
+ ASD_DPRINTK("ms: couldn't get connector map\n");
+ goto out2;
+ }
+
+out2:
+ kfree(manuf_sec);
+out:
+ return err;
+}
+
+static int asd_process_ctrla_phy_settings(struct asd_ha_struct *asd_ha,
+ struct asd_ctrla_phy_settings *ps)
+{
+ int i;
+ for (i = 0; i < ps->num_phys; i++) {
+ struct asd_ctrla_phy_entry *pe = &ps->phy_ent[i];
+
+ if (!PHY_ENABLED(asd_ha, i))
+ continue;
+ if (*(u64 *)pe->sas_addr == 0) {
+ asd_ha->hw_prof.enabled_phys &= ~(1 << i);
+ continue;
+ }
+ /* This is the SAS address which should be sent in IDENTIFY. */
+ memcpy(asd_ha->hw_prof.phy_desc[i].sas_addr, pe->sas_addr,
+ SAS_ADDR_SIZE);
+ asd_ha->hw_prof.phy_desc[i].max_sas_lrate =
+ (pe->sas_link_rates & 0xF0) >> 4;
+ asd_ha->hw_prof.phy_desc[i].min_sas_lrate =
+ (pe->sas_link_rates & 0x0F);
+ asd_ha->hw_prof.phy_desc[i].max_sata_lrate =
+ (pe->sata_link_rates & 0xF0) >> 4;
+ asd_ha->hw_prof.phy_desc[i].min_sata_lrate =
+ (pe->sata_link_rates & 0x0F);
+ asd_ha->hw_prof.phy_desc[i].flags = pe->flags;
+ ASD_DPRINTK("ctrla: phy%d: sas_addr: %llx, sas rate:0x%x-0x%x,"
+ " sata rate:0x%x-0x%x, flags:0x%x\n",
+ i,
+ SAS_ADDR(asd_ha->hw_prof.phy_desc[i].sas_addr),
+ asd_ha->hw_prof.phy_desc[i].max_sas_lrate,
+ asd_ha->hw_prof.phy_desc[i].min_sas_lrate,
+ asd_ha->hw_prof.phy_desc[i].max_sata_lrate,
+ asd_ha->hw_prof.phy_desc[i].min_sata_lrate,
+ asd_ha->hw_prof.phy_desc[i].flags);
+ }
+
+ return 0;
+}
+
+/**
+ * asd_process_ctrl_a_user - process CTRL-A user settings
+ * @asd_ha: pointer to the host adapter structure
+ * @flash_dir: pointer to the flash directory
+ */
+static int asd_process_ctrl_a_user(struct asd_ha_struct *asd_ha,
+ struct asd_flash_dir *flash_dir)
+{
+ int err, i;
+ u32 offs, size;
+ struct asd_ll_el *el;
+ struct asd_ctrla_phy_settings *ps;
+ struct asd_ctrla_phy_settings dflt_ps;
+
+ err = asd_find_flash_de(flash_dir, FLASH_DE_CTRL_A_USER, &offs, &size);
+ if (err) {
+ ASD_DPRINTK("couldn't find CTRL-A user settings section\n");
+ ASD_DPRINTK("Creating default CTRL-A user settings section\n");
+
+ dflt_ps.id0 = 'h';
+ dflt_ps.num_phys = 8;
+ for (i =0; i < ASD_MAX_PHYS; i++) {
+ memcpy(dflt_ps.phy_ent[i].sas_addr,
+ asd_ha->hw_prof.sas_addr, SAS_ADDR_SIZE);
+ dflt_ps.phy_ent[i].sas_link_rates = 0x98;
+ dflt_ps.phy_ent[i].flags = 0x0;
+ dflt_ps.phy_ent[i].sata_link_rates = 0x0;
+ }
+
+ size = sizeof(struct asd_ctrla_phy_settings);
+ ps = &dflt_ps;
+ }
+
+ if (size == 0)
+ goto out;
+
+ err = -ENOMEM;
+ el = kmalloc(size, GFP_KERNEL);
+ if (!el) {
+ ASD_DPRINTK("no mem for ctrla user settings section\n");
+ goto out;
+ }
+
+ err = asd_read_flash_seg(asd_ha, (void *)el, offs, size);
+ if (err) {
+ ASD_DPRINTK("couldn't read ctrla phy settings section\n");
+ goto out2;
+ }
+
+ err = -ENOENT;
+ ps = asd_find_ll_by_id(el, 'h', 0xFF);
+ if (!ps) {
+ ASD_DPRINTK("couldn't find ctrla phy settings struct\n");
+ goto out2;
+ }
+
+ err = asd_process_ctrla_phy_settings(asd_ha, ps);
+ if (err) {
+ ASD_DPRINTK("couldn't process ctrla phy settings\n");
+ goto out2;
+ }
+out2:
+ kfree(el);
+out:
+ return err;
+}
+
+/**
+ * asd_read_flash - read flash memory
+ * @asd_ha: pointer to the host adapter structure
+ */
+int asd_read_flash(struct asd_ha_struct *asd_ha)
+{
+ int err;
+ struct asd_flash_dir *flash_dir;
+
+ err = asd_flash_getid(asd_ha);
+ if (err)
+ return err;
+
+ flash_dir = kmalloc(sizeof(*flash_dir), GFP_KERNEL);
+ if (!flash_dir)
+ return -ENOMEM;
+
+ err = -ENOENT;
+ if (!asd_find_flash_dir(asd_ha, flash_dir)) {
+ ASD_DPRINTK("couldn't find flash directory\n");
+ goto out;
+ }
+
+ if (le32_to_cpu(flash_dir->rev) != 2) {
+ asd_printk("unsupported flash dir version:0x%x\n",
+ le32_to_cpu(flash_dir->rev));
+ goto out;
+ }
+
+ err = asd_process_ms(asd_ha, flash_dir);
+ if (err) {
+ ASD_DPRINTK("couldn't process manuf sector settings\n");
+ goto out;
+ }
+
+ err = asd_process_ctrl_a_user(asd_ha, flash_dir);
+ if (err) {
+ ASD_DPRINTK("couldn't process CTRL-A user settings\n");
+ goto out;
+ }
+
+out:
+ kfree(flash_dir);
+ return err;
+}
+
+/**
+ * asd_verify_flash_seg - verify data with flash memory
+ * @asd_ha: pointer to the host adapter structure
+ * @src: pointer to the source data to be verified
+ * @dest_offset: offset from flash memory
+ * @bytes_to_verify: total bytes to verify
+ */
+int asd_verify_flash_seg(struct asd_ha_struct *asd_ha,
+ const void *src, u32 dest_offset, u32 bytes_to_verify)
+{
+ const u8 *src_buf;
+ u8 flash_char;
+ int err;
+ u32 nv_offset, reg, i;
+
+ reg = asd_ha->hw_prof.flash.bar;
+ src_buf = NULL;
+
+ err = FLASH_OK;
+ nv_offset = dest_offset;
+ src_buf = (const u8 *)src;
+ for (i = 0; i < bytes_to_verify; i++) {
+ flash_char = asd_read_reg_byte(asd_ha, reg + nv_offset + i);
+ if (flash_char != src_buf[i]) {
+ err = FAIL_VERIFY;
+ break;
+ }
+ }
+ return err;
+}
+
+/**
+ * asd_write_flash_seg - write data into flash memory
+ * @asd_ha: pointer to the host adapter structure
+ * @src: pointer to the source data to be written
+ * @dest_offset: offset from flash memory
+ * @bytes_to_write: total bytes to write
+ */
+int asd_write_flash_seg(struct asd_ha_struct *asd_ha,
+ const void *src, u32 dest_offset, u32 bytes_to_write)
+{
+ const u8 *src_buf;
+ u32 nv_offset, reg, i;
+ int err;
+
+ reg = asd_ha->hw_prof.flash.bar;
+ src_buf = NULL;
+
+ err = asd_check_flash_type(asd_ha);
+ if (err) {
+ ASD_DPRINTK("couldn't find the type of flash. err=%d\n", err);
+ return err;
+ }
+
+ nv_offset = dest_offset;
+ err = asd_erase_nv_sector(asd_ha, nv_offset, bytes_to_write);
+ if (err) {
+ ASD_DPRINTK("Erase failed at offset:0x%x\n",
+ nv_offset);
+ return err;
+ }
+
+ err = asd_reset_flash(asd_ha);
+ if (err) {
+ ASD_DPRINTK("couldn't reset flash. err=%d\n", err);
+ return err;
+ }
+
+ src_buf = (const u8 *)src;
+ for (i = 0; i < bytes_to_write; i++) {
+ /* Setup program command sequence */
+ switch (asd_ha->hw_prof.flash.method) {
+ case FLASH_METHOD_A:
+ {
+ asd_write_reg_byte(asd_ha,
+ (reg + 0xAAA), 0xAA);
+ asd_write_reg_byte(asd_ha,
+ (reg + 0x555), 0x55);
+ asd_write_reg_byte(asd_ha,
+ (reg + 0xAAA), 0xA0);
+ asd_write_reg_byte(asd_ha,
+ (reg + nv_offset + i),
+ (*(src_buf + i)));
+ break;
+ }
+ case FLASH_METHOD_B:
+ {
+ asd_write_reg_byte(asd_ha,
+ (reg + 0x555), 0xAA);
+ asd_write_reg_byte(asd_ha,
+ (reg + 0x2AA), 0x55);
+ asd_write_reg_byte(asd_ha,
+ (reg + 0x555), 0xA0);
+ asd_write_reg_byte(asd_ha,
+ (reg + nv_offset + i),
+ (*(src_buf + i)));
+ break;
+ }
+ default:
+ break;
+ }
+ if (asd_chk_write_status(asd_ha,
+ (nv_offset + i), 0) != 0) {
+ ASD_DPRINTK("aicx: Write failed at offset:0x%x\n",
+ reg + nv_offset + i);
+ return FAIL_WRITE_FLASH;
+ }
+ }
+
+ err = asd_reset_flash(asd_ha);
+ if (err) {
+ ASD_DPRINTK("couldn't reset flash. err=%d\n", err);
+ return err;
+ }
+ return 0;
+}
+
+int asd_chk_write_status(struct asd_ha_struct *asd_ha,
+ u32 sector_addr, u8 erase_flag)
+{
+ u32 reg;
+ u32 loop_cnt;
+ u8 nv_data1, nv_data2;
+ u8 toggle_bit1;
+
+ /*
+ * Read from DQ2 requires sector address
+ * while it's dont care for DQ6
+ */
+ reg = asd_ha->hw_prof.flash.bar;
+
+ for (loop_cnt = 0; loop_cnt < 50000; loop_cnt++) {
+ nv_data1 = asd_read_reg_byte(asd_ha, reg);
+ nv_data2 = asd_read_reg_byte(asd_ha, reg);
+
+ toggle_bit1 = ((nv_data1 & FLASH_STATUS_BIT_MASK_DQ6)
+ ^ (nv_data2 & FLASH_STATUS_BIT_MASK_DQ6));
+
+ if (toggle_bit1 == 0) {
+ return 0;
+ } else {
+ if (nv_data2 & FLASH_STATUS_BIT_MASK_DQ5) {
+ nv_data1 = asd_read_reg_byte(asd_ha,
+ reg);
+ nv_data2 = asd_read_reg_byte(asd_ha,
+ reg);
+ toggle_bit1 =
+ ((nv_data1 & FLASH_STATUS_BIT_MASK_DQ6)
+ ^ (nv_data2 & FLASH_STATUS_BIT_MASK_DQ6));
+
+ if (toggle_bit1 == 0)
+ return 0;
+ }
+ }
+
+ /*
+ * ERASE is a sector-by-sector operation and requires
+ * more time to finish while WRITE is byte-byte-byte
+ * operation and takes lesser time to finish.
+ *
+ * For some strange reason a reduced ERASE delay gives different
+ * behaviour across different spirit boards. Hence we set
+ * a optimum balance of 50mus for ERASE which works well
+ * across all boards.
+ */
+ if (erase_flag) {
+ udelay(FLASH_STATUS_ERASE_DELAY_COUNT);
+ } else {
+ udelay(FLASH_STATUS_WRITE_DELAY_COUNT);
+ }
+ }
+ return -1;
+}
+
+/**
+ * asd_hwi_erase_nv_sector - Erase the flash memory sectors.
+ * @asd_ha: pointer to the host adapter structure
+ * @flash_addr: pointer to offset from flash memory
+ * @size: total bytes to erase.
+ */
+int asd_erase_nv_sector(struct asd_ha_struct *asd_ha, u32 flash_addr, u32 size)
+{
+ u32 reg;
+ u32 sector_addr;
+
+ reg = asd_ha->hw_prof.flash.bar;
+
+ /* sector staring address */
+ sector_addr = flash_addr & FLASH_SECTOR_SIZE_MASK;
+
+ /*
+ * Erasing an flash sector needs to be done in six consecutive
+ * write cyles.
+ */
+ while (sector_addr < flash_addr+size) {
+ switch (asd_ha->hw_prof.flash.method) {
+ case FLASH_METHOD_A:
+ asd_write_reg_byte(asd_ha, (reg + 0xAAA), 0xAA);
+ asd_write_reg_byte(asd_ha, (reg + 0x555), 0x55);
+ asd_write_reg_byte(asd_ha, (reg + 0xAAA), 0x80);
+ asd_write_reg_byte(asd_ha, (reg + 0xAAA), 0xAA);
+ asd_write_reg_byte(asd_ha, (reg + 0x555), 0x55);
+ asd_write_reg_byte(asd_ha, (reg + sector_addr), 0x30);
+ break;
+ case FLASH_METHOD_B:
+ asd_write_reg_byte(asd_ha, (reg + 0x555), 0xAA);
+ asd_write_reg_byte(asd_ha, (reg + 0x2AA), 0x55);
+ asd_write_reg_byte(asd_ha, (reg + 0x555), 0x80);
+ asd_write_reg_byte(asd_ha, (reg + 0x555), 0xAA);
+ asd_write_reg_byte(asd_ha, (reg + 0x2AA), 0x55);
+ asd_write_reg_byte(asd_ha, (reg + sector_addr), 0x30);
+ break;
+ default:
+ break;
+ }
+
+ if (asd_chk_write_status(asd_ha, sector_addr, 1) != 0)
+ return FAIL_ERASE_FLASH;
+
+ sector_addr += FLASH_SECTOR_SIZE;
+ }
+
+ return 0;
+}
+
+int asd_check_flash_type(struct asd_ha_struct *asd_ha)
+{
+ u8 manuf_id;
+ u8 dev_id;
+ u8 sec_prot;
+ u32 inc;
+ u32 reg;
+ int err;
+
+ /* get Flash memory base address */
+ reg = asd_ha->hw_prof.flash.bar;
+
+ /* Determine flash info */
+ err = asd_reset_flash(asd_ha);
+ if (err) {
+ ASD_DPRINTK("couldn't reset flash. err=%d\n", err);
+ return err;
+ }
+
+ asd_ha->hw_prof.flash.method = FLASH_METHOD_UNKNOWN;
+ asd_ha->hw_prof.flash.manuf = FLASH_MANUF_ID_UNKNOWN;
+ asd_ha->hw_prof.flash.dev_id = FLASH_DEV_ID_UNKNOWN;
+
+ /* Get flash info. This would most likely be AMD Am29LV family flash.
+ * First try the sequence for word mode. It is the same as for
+ * 008B (byte mode only), 160B (word mode) and 800D (word mode).
+ */
+ inc = asd_ha->hw_prof.flash.wide ? 2 : 1;
+ asd_write_reg_byte(asd_ha, reg + 0xAAA, 0xAA);
+ asd_write_reg_byte(asd_ha, reg + 0x555, 0x55);
+ asd_write_reg_byte(asd_ha, reg + 0xAAA, 0x90);
+ manuf_id = asd_read_reg_byte(asd_ha, reg);
+ dev_id = asd_read_reg_byte(asd_ha, reg + inc);
+ sec_prot = asd_read_reg_byte(asd_ha, reg + inc + inc);
+ /* Get out of autoselect mode. */
+ err = asd_reset_flash(asd_ha);
+ if (err) {
+ ASD_DPRINTK("couldn't reset flash. err=%d\n", err);
+ return err;
+ }
+ ASD_DPRINTK("Flash MethodA manuf_id(0x%x) dev_id(0x%x) "
+ "sec_prot(0x%x)\n", manuf_id, dev_id, sec_prot);
+ err = asd_reset_flash(asd_ha);
+ if (err != 0)
+ return err;
+
+ switch (manuf_id) {
+ case FLASH_MANUF_ID_AMD:
+ switch (sec_prot) {
+ case FLASH_DEV_ID_AM29LV800DT:
+ case FLASH_DEV_ID_AM29LV640MT:
+ case FLASH_DEV_ID_AM29F800B:
+ asd_ha->hw_prof.flash.method = FLASH_METHOD_A;
+ break;
+ default:
+ break;
+ }
+ break;
+ case FLASH_MANUF_ID_ST:
+ switch (sec_prot) {
+ case FLASH_DEV_ID_STM29W800DT:
+ case FLASH_DEV_ID_STM29LV640:
+ asd_ha->hw_prof.flash.method = FLASH_METHOD_A;
+ break;
+ default:
+ break;
+ }
+ break;
+ case FLASH_MANUF_ID_FUJITSU:
+ switch (sec_prot) {
+ case FLASH_DEV_ID_MBM29LV800TE:
+ case FLASH_DEV_ID_MBM29DL800TA:
+ asd_ha->hw_prof.flash.method = FLASH_METHOD_A;
+ break;
+ }
+ break;
+ case FLASH_MANUF_ID_MACRONIX:
+ switch (sec_prot) {
+ case FLASH_DEV_ID_MX29LV800BT:
+ asd_ha->hw_prof.flash.method = FLASH_METHOD_A;
+ break;
+ }
+ break;
+ }
+
+ if (asd_ha->hw_prof.flash.method == FLASH_METHOD_UNKNOWN) {
+ err = asd_reset_flash(asd_ha);
+ if (err) {
+ ASD_DPRINTK("couldn't reset flash. err=%d\n", err);
+ return err;
+ }
+
+ /* Issue Unlock sequence for AM29LV008BT */
+ asd_write_reg_byte(asd_ha, (reg + 0x555), 0xAA);
+ asd_write_reg_byte(asd_ha, (reg + 0x2AA), 0x55);
+ asd_write_reg_byte(asd_ha, (reg + 0x555), 0x90);
+ manuf_id = asd_read_reg_byte(asd_ha, reg);
+ dev_id = asd_read_reg_byte(asd_ha, reg + inc);
+ sec_prot = asd_read_reg_byte(asd_ha, reg + inc + inc);
+
+ ASD_DPRINTK("Flash MethodB manuf_id(0x%x) dev_id(0x%x) sec_prot"
+ "(0x%x)\n", manuf_id, dev_id, sec_prot);
+
+ err = asd_reset_flash(asd_ha);
+ if (err != 0) {
+ ASD_DPRINTK("couldn't reset flash. err=%d\n", err);
+ return err;
+ }
+
+ switch (manuf_id) {
+ case FLASH_MANUF_ID_AMD:
+ switch (dev_id) {
+ case FLASH_DEV_ID_AM29LV008BT:
+ asd_ha->hw_prof.flash.method = FLASH_METHOD_B;
+ break;
+ default:
+ break;
+ }
+ break;
+ case FLASH_MANUF_ID_ST:
+ switch (dev_id) {
+ case FLASH_DEV_ID_STM29008:
+ asd_ha->hw_prof.flash.method = FLASH_METHOD_B;
+ break;
+ default:
+ break;
+ }
+ break;
+ case FLASH_MANUF_ID_FUJITSU:
+ switch (dev_id) {
+ case FLASH_DEV_ID_MBM29LV008TA:
+ asd_ha->hw_prof.flash.method = FLASH_METHOD_B;
+ break;
+ }
+ break;
+ case FLASH_MANUF_ID_INTEL:
+ switch (dev_id) {
+ case FLASH_DEV_ID_I28LV00TAT:
+ asd_ha->hw_prof.flash.method = FLASH_METHOD_B;
+ break;
+ }
+ break;
+ case FLASH_MANUF_ID_MACRONIX:
+ switch (dev_id) {
+ case FLASH_DEV_ID_I28LV00TAT:
+ asd_ha->hw_prof.flash.method = FLASH_METHOD_B;
+ break;
+ }
+ break;
+ default:
+ return FAIL_FIND_FLASH_ID;
+ }
+ }
+
+ if (asd_ha->hw_prof.flash.method == FLASH_METHOD_UNKNOWN)
+ return FAIL_FIND_FLASH_ID;
+
+ asd_ha->hw_prof.flash.manuf = manuf_id;
+ asd_ha->hw_prof.flash.dev_id = dev_id;
+ asd_ha->hw_prof.flash.sec_prot = sec_prot;
+ return 0;
+}
diff --git a/drivers/scsi/aic94xx/aic94xx_sds.h b/drivers/scsi/aic94xx/aic94xx_sds.h
new file mode 100644
index 000000000..a06dc0114
--- /dev/null
+++ b/drivers/scsi/aic94xx/aic94xx_sds.h
@@ -0,0 +1,121 @@
+/*
+ * Aic94xx SAS/SATA driver hardware interface header file.
+ *
+ * Copyright (C) 2005 Adaptec, Inc. All rights reserved.
+ * Copyright (C) 2005 Gilbert Wu <gilbert_wu@adaptec.com>
+ *
+ * This file is licensed under GPLv2.
+ *
+ * This file is part of the aic94xx driver.
+ *
+ * The aic94xx driver is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; version 2 of the
+ * License.
+ *
+ * The aic94xx driver is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with the aic94xx driver; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
+ *
+ */
+#ifndef _AIC94XX_SDS_H_
+#define _AIC94XX_SDS_H_
+
+enum {
+ FLASH_METHOD_UNKNOWN,
+ FLASH_METHOD_A,
+ FLASH_METHOD_B
+};
+
+#define FLASH_MANUF_ID_AMD 0x01
+#define FLASH_MANUF_ID_ST 0x20
+#define FLASH_MANUF_ID_FUJITSU 0x04
+#define FLASH_MANUF_ID_MACRONIX 0xC2
+#define FLASH_MANUF_ID_INTEL 0x89
+#define FLASH_MANUF_ID_UNKNOWN 0xFF
+
+#define FLASH_DEV_ID_AM29LV008BT 0x3E
+#define FLASH_DEV_ID_AM29LV800DT 0xDA
+#define FLASH_DEV_ID_STM29W800DT 0xD7
+#define FLASH_DEV_ID_STM29LV640 0xDE
+#define FLASH_DEV_ID_STM29008 0xEA
+#define FLASH_DEV_ID_MBM29LV800TE 0xDA
+#define FLASH_DEV_ID_MBM29DL800TA 0x4A
+#define FLASH_DEV_ID_MBM29LV008TA 0x3E
+#define FLASH_DEV_ID_AM29LV640MT 0x7E
+#define FLASH_DEV_ID_AM29F800B 0xD6
+#define FLASH_DEV_ID_MX29LV800BT 0xDA
+#define FLASH_DEV_ID_MX29LV008CT 0xDA
+#define FLASH_DEV_ID_I28LV00TAT 0x3E
+#define FLASH_DEV_ID_UNKNOWN 0xFF
+
+/* status bit mask values */
+#define FLASH_STATUS_BIT_MASK_DQ6 0x40
+#define FLASH_STATUS_BIT_MASK_DQ5 0x20
+#define FLASH_STATUS_BIT_MASK_DQ2 0x04
+
+/* minimum value in micro seconds needed for checking status */
+#define FLASH_STATUS_ERASE_DELAY_COUNT 50
+#define FLASH_STATUS_WRITE_DELAY_COUNT 25
+
+#define FLASH_SECTOR_SIZE 0x010000
+#define FLASH_SECTOR_SIZE_MASK 0xffff0000
+
+#define FLASH_OK 0x000000
+#define FAIL_OPEN_BIOS_FILE 0x000100
+#define FAIL_CHECK_PCI_ID 0x000200
+#define FAIL_CHECK_SUM 0x000300
+#define FAIL_UNKNOWN 0x000400
+#define FAIL_VERIFY 0x000500
+#define FAIL_RESET_FLASH 0x000600
+#define FAIL_FIND_FLASH_ID 0x000700
+#define FAIL_ERASE_FLASH 0x000800
+#define FAIL_WRITE_FLASH 0x000900
+#define FAIL_FILE_SIZE 0x000a00
+#define FAIL_PARAMETERS 0x000b00
+#define FAIL_OUT_MEMORY 0x000c00
+#define FLASH_IN_PROGRESS 0x001000
+
+struct controller_id {
+ u32 vendor; /* PCI Vendor ID */
+ u32 device; /* PCI Device ID */
+ u32 sub_vendor; /* PCI Subvendor ID */
+ u32 sub_device; /* PCI Subdevice ID */
+};
+
+struct image_info {
+ u32 ImageId; /* Identifies the image */
+ u32 ImageOffset; /* Offset the beginning of the file */
+ u32 ImageLength; /* length of the image */
+ u32 ImageChecksum; /* Image checksum */
+ u32 ImageVersion; /* Version of the image, could be build number */
+};
+
+struct bios_file_header {
+ u8 signature[32]; /* Signature/Cookie to identify the file */
+ u32 checksum; /*Entire file checksum with this field zero */
+ u32 antidote; /* Entire file checksum with this field 0xFFFFFFFF */
+ struct controller_id contrl_id; /*PCI id to identify the controller */
+ u32 filelen; /*Length of the entire file*/
+ u32 chunk_num; /*The chunk/part number for multiple Image files */
+ u32 total_chunks; /*Total number of chunks/parts in the image file */
+ u32 num_images; /* Number of images in the file */
+ u32 build_num; /* Build number of this image */
+ struct image_info image_header;
+};
+
+int asd_verify_flash_seg(struct asd_ha_struct *asd_ha,
+ const void *src, u32 dest_offset, u32 bytes_to_verify);
+int asd_write_flash_seg(struct asd_ha_struct *asd_ha,
+ const void *src, u32 dest_offset, u32 bytes_to_write);
+int asd_chk_write_status(struct asd_ha_struct *asd_ha,
+ u32 sector_addr, u8 erase_flag);
+int asd_check_flash_type(struct asd_ha_struct *asd_ha);
+int asd_erase_nv_sector(struct asd_ha_struct *asd_ha,
+ u32 flash_addr, u32 size);
+#endif
diff --git a/drivers/scsi/aic94xx/aic94xx_seq.c b/drivers/scsi/aic94xx/aic94xx_seq.c
new file mode 100644
index 000000000..eb041d680
--- /dev/null
+++ b/drivers/scsi/aic94xx/aic94xx_seq.c
@@ -0,0 +1,1415 @@
+/*
+ * Aic94xx SAS/SATA driver sequencer interface.
+ *
+ * Copyright (C) 2005 Adaptec, Inc. All rights reserved.
+ * Copyright (C) 2005 Luben Tuikov <luben_tuikov@adaptec.com>
+ *
+ * Parts of this code adapted from David Chaw's adp94xx_seq.c.
+ *
+ * This file is licensed under GPLv2.
+ *
+ * This file is part of the aic94xx driver.
+ *
+ * The aic94xx driver is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; version 2 of the
+ * License.
+ *
+ * The aic94xx driver is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with the aic94xx driver; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
+ *
+ */
+
+#include <linux/delay.h>
+#include <linux/gfp.h>
+#include <linux/pci.h>
+#include <linux/module.h>
+#include <linux/firmware.h>
+#include "aic94xx_reg.h"
+#include "aic94xx_hwi.h"
+
+#include "aic94xx_seq.h"
+#include "aic94xx_dump.h"
+
+/* It takes no more than 0.05 us for an instruction
+ * to complete. So waiting for 1 us should be more than
+ * plenty.
+ */
+#define PAUSE_DELAY 1
+#define PAUSE_TRIES 1000
+
+static const struct firmware *sequencer_fw;
+static u16 cseq_vecs[CSEQ_NUM_VECS], lseq_vecs[LSEQ_NUM_VECS], mode2_task,
+ cseq_idle_loop, lseq_idle_loop;
+static const u8 *cseq_code, *lseq_code;
+static u32 cseq_code_size, lseq_code_size;
+
+static u16 first_scb_site_no = 0xFFFF;
+static u16 last_scb_site_no;
+
+/* ---------- Pause/Unpause CSEQ/LSEQ ---------- */
+
+/**
+ * asd_pause_cseq - pause the central sequencer
+ * @asd_ha: pointer to host adapter structure
+ *
+ * Return 0 on success, negative on failure.
+ */
+static int asd_pause_cseq(struct asd_ha_struct *asd_ha)
+{
+ int count = PAUSE_TRIES;
+ u32 arp2ctl;
+
+ arp2ctl = asd_read_reg_dword(asd_ha, CARP2CTL);
+ if (arp2ctl & PAUSED)
+ return 0;
+
+ asd_write_reg_dword(asd_ha, CARP2CTL, arp2ctl | EPAUSE);
+ do {
+ arp2ctl = asd_read_reg_dword(asd_ha, CARP2CTL);
+ if (arp2ctl & PAUSED)
+ return 0;
+ udelay(PAUSE_DELAY);
+ } while (--count > 0);
+
+ ASD_DPRINTK("couldn't pause CSEQ\n");
+ return -1;
+}
+
+/**
+ * asd_unpause_cseq - unpause the central sequencer.
+ * @asd_ha: pointer to host adapter structure.
+ *
+ * Return 0 on success, negative on error.
+ */
+static int asd_unpause_cseq(struct asd_ha_struct *asd_ha)
+{
+ u32 arp2ctl;
+ int count = PAUSE_TRIES;
+
+ arp2ctl = asd_read_reg_dword(asd_ha, CARP2CTL);
+ if (!(arp2ctl & PAUSED))
+ return 0;
+
+ asd_write_reg_dword(asd_ha, CARP2CTL, arp2ctl & ~EPAUSE);
+ do {
+ arp2ctl = asd_read_reg_dword(asd_ha, CARP2CTL);
+ if (!(arp2ctl & PAUSED))
+ return 0;
+ udelay(PAUSE_DELAY);
+ } while (--count > 0);
+
+ ASD_DPRINTK("couldn't unpause the CSEQ\n");
+ return -1;
+}
+
+/**
+ * asd_seq_pause_lseq - pause a link sequencer
+ * @asd_ha: pointer to a host adapter structure
+ * @lseq: link sequencer of interest
+ *
+ * Return 0 on success, negative on error.
+ */
+static int asd_seq_pause_lseq(struct asd_ha_struct *asd_ha, int lseq)
+{
+ u32 arp2ctl;
+ int count = PAUSE_TRIES;
+
+ arp2ctl = asd_read_reg_dword(asd_ha, LmARP2CTL(lseq));
+ if (arp2ctl & PAUSED)
+ return 0;
+
+ asd_write_reg_dword(asd_ha, LmARP2CTL(lseq), arp2ctl | EPAUSE);
+ do {
+ arp2ctl = asd_read_reg_dword(asd_ha, LmARP2CTL(lseq));
+ if (arp2ctl & PAUSED)
+ return 0;
+ udelay(PAUSE_DELAY);
+ } while (--count > 0);
+
+ ASD_DPRINTK("couldn't pause LSEQ %d\n", lseq);
+ return -1;
+}
+
+/**
+ * asd_pause_lseq - pause the link sequencer(s)
+ * @asd_ha: pointer to host adapter structure
+ * @lseq_mask: mask of link sequencers of interest
+ *
+ * Return 0 on success, negative on failure.
+ */
+static int asd_pause_lseq(struct asd_ha_struct *asd_ha, u8 lseq_mask)
+{
+ int lseq;
+ int err = 0;
+
+ for_each_sequencer(lseq_mask, lseq_mask, lseq) {
+ err = asd_seq_pause_lseq(asd_ha, lseq);
+ if (err)
+ return err;
+ }
+
+ return err;
+}
+
+/**
+ * asd_seq_unpause_lseq - unpause a link sequencer
+ * @asd_ha: pointer to host adapter structure
+ * @lseq: link sequencer of interest
+ *
+ * Return 0 on success, negative on error.
+ */
+static int asd_seq_unpause_lseq(struct asd_ha_struct *asd_ha, int lseq)
+{
+ u32 arp2ctl;
+ int count = PAUSE_TRIES;
+
+ arp2ctl = asd_read_reg_dword(asd_ha, LmARP2CTL(lseq));
+ if (!(arp2ctl & PAUSED))
+ return 0;
+
+ asd_write_reg_dword(asd_ha, LmARP2CTL(lseq), arp2ctl & ~EPAUSE);
+ do {
+ arp2ctl = asd_read_reg_dword(asd_ha, LmARP2CTL(lseq));
+ if (!(arp2ctl & PAUSED))
+ return 0;
+ udelay(PAUSE_DELAY);
+ } while (--count > 0);
+
+ ASD_DPRINTK("couldn't unpause LSEQ %d\n", lseq);
+ return 0;
+}
+
+
+/* ---------- Downloading CSEQ/LSEQ microcode ---------- */
+
+static int asd_verify_cseq(struct asd_ha_struct *asd_ha, const u8 *_prog,
+ u32 size)
+{
+ u32 addr = CSEQ_RAM_REG_BASE_ADR;
+ const u32 *prog = (u32 *) _prog;
+ u32 i;
+
+ for (i = 0; i < size; i += 4, prog++, addr += 4) {
+ u32 val = asd_read_reg_dword(asd_ha, addr);
+
+ if (le32_to_cpu(*prog) != val) {
+ asd_printk("%s: cseq verify failed at %u "
+ "read:0x%x, wanted:0x%x\n",
+ pci_name(asd_ha->pcidev),
+ i, val, le32_to_cpu(*prog));
+ return -1;
+ }
+ }
+ ASD_DPRINTK("verified %d bytes, passed\n", size);
+ return 0;
+}
+
+/**
+ * asd_verify_lseq - verify the microcode of a link sequencer
+ * @asd_ha: pointer to host adapter structure
+ * @_prog: pointer to the microcode
+ * @size: size of the microcode in bytes
+ * @lseq: link sequencer of interest
+ *
+ * The link sequencer code is accessed in 4 KB pages, which are selected
+ * by setting LmRAMPAGE (bits 8 and 9) of the LmBISTCTL1 register.
+ * The 10 KB LSEQm instruction code is mapped, page at a time, at
+ * LmSEQRAM address.
+ */
+static int asd_verify_lseq(struct asd_ha_struct *asd_ha, const u8 *_prog,
+ u32 size, int lseq)
+{
+#define LSEQ_CODEPAGE_SIZE 4096
+ int pages = (size + LSEQ_CODEPAGE_SIZE - 1) / LSEQ_CODEPAGE_SIZE;
+ u32 page;
+ const u32 *prog = (u32 *) _prog;
+
+ for (page = 0; page < pages; page++) {
+ u32 i;
+
+ asd_write_reg_dword(asd_ha, LmBISTCTL1(lseq),
+ page << LmRAMPAGE_LSHIFT);
+ for (i = 0; size > 0 && i < LSEQ_CODEPAGE_SIZE;
+ i += 4, prog++, size-=4) {
+
+ u32 val = asd_read_reg_dword(asd_ha, LmSEQRAM(lseq)+i);
+
+ if (le32_to_cpu(*prog) != val) {
+ asd_printk("%s: LSEQ%d verify failed "
+ "page:%d, offs:%d\n",
+ pci_name(asd_ha->pcidev),
+ lseq, page, i);
+ return -1;
+ }
+ }
+ }
+ ASD_DPRINTK("LSEQ%d verified %d bytes, passed\n", lseq,
+ (int)((u8 *)prog-_prog));
+ return 0;
+}
+
+/**
+ * asd_verify_seq -- verify CSEQ/LSEQ microcode
+ * @asd_ha: pointer to host adapter structure
+ * @prog: pointer to microcode
+ * @size: size of the microcode
+ * @lseq_mask: if 0, verify CSEQ microcode, else mask of LSEQs of interest
+ *
+ * Return 0 if microcode is correct, negative on mismatch.
+ */
+static int asd_verify_seq(struct asd_ha_struct *asd_ha, const u8 *prog,
+ u32 size, u8 lseq_mask)
+{
+ if (lseq_mask == 0)
+ return asd_verify_cseq(asd_ha, prog, size);
+ else {
+ int lseq, err;
+
+ for_each_sequencer(lseq_mask, lseq_mask, lseq) {
+ err = asd_verify_lseq(asd_ha, prog, size, lseq);
+ if (err)
+ return err;
+ }
+ }
+
+ return 0;
+}
+#define ASD_DMA_MODE_DOWNLOAD
+#ifdef ASD_DMA_MODE_DOWNLOAD
+/* This is the size of the CSEQ Mapped instruction page */
+#define MAX_DMA_OVLY_COUNT ((1U << 14)-1)
+static int asd_download_seq(struct asd_ha_struct *asd_ha,
+ const u8 * const prog, u32 size, u8 lseq_mask)
+{
+ u32 comstaten;
+ u32 reg;
+ int page;
+ const int pages = (size + MAX_DMA_OVLY_COUNT - 1) / MAX_DMA_OVLY_COUNT;
+ struct asd_dma_tok *token;
+ int err = 0;
+
+ if (size % 4) {
+ asd_printk("sequencer program not multiple of 4\n");
+ return -1;
+ }
+
+ asd_pause_cseq(asd_ha);
+ asd_pause_lseq(asd_ha, 0xFF);
+
+ /* save, disable and clear interrupts */
+ comstaten = asd_read_reg_dword(asd_ha, COMSTATEN);
+ asd_write_reg_dword(asd_ha, COMSTATEN, 0);
+ asd_write_reg_dword(asd_ha, COMSTAT, COMSTAT_MASK);
+
+ asd_write_reg_dword(asd_ha, CHIMINTEN, RST_CHIMINTEN);
+ asd_write_reg_dword(asd_ha, CHIMINT, CHIMINT_MASK);
+
+ token = asd_alloc_coherent(asd_ha, MAX_DMA_OVLY_COUNT, GFP_KERNEL);
+ if (!token) {
+ asd_printk("out of memory for dma SEQ download\n");
+ err = -ENOMEM;
+ goto out;
+ }
+ ASD_DPRINTK("dma-ing %d bytes\n", size);
+
+ for (page = 0; page < pages; page++) {
+ int i;
+ u32 left = min(size-page*MAX_DMA_OVLY_COUNT,
+ (u32)MAX_DMA_OVLY_COUNT);
+
+ memcpy(token->vaddr, prog + page*MAX_DMA_OVLY_COUNT, left);
+ asd_write_reg_addr(asd_ha, OVLYDMAADR, token->dma_handle);
+ asd_write_reg_dword(asd_ha, OVLYDMACNT, left);
+ reg = !page ? RESETOVLYDMA : 0;
+ reg |= (STARTOVLYDMA | OVLYHALTERR);
+ reg |= (lseq_mask ? (((u32)lseq_mask) << 8) : OVLYCSEQ);
+ /* Start DMA. */
+ asd_write_reg_dword(asd_ha, OVLYDMACTL, reg);
+
+ for (i = PAUSE_TRIES*100; i > 0; i--) {
+ u32 dmadone = asd_read_reg_dword(asd_ha, OVLYDMACTL);
+ if (!(dmadone & OVLYDMAACT))
+ break;
+ udelay(PAUSE_DELAY);
+ }
+ }
+
+ reg = asd_read_reg_dword(asd_ha, COMSTAT);
+ if (!(reg & OVLYDMADONE) || (reg & OVLYERR)
+ || (asd_read_reg_dword(asd_ha, CHIMINT) & DEVEXCEPT_MASK)){
+ asd_printk("%s: error DMA-ing sequencer code\n",
+ pci_name(asd_ha->pcidev));
+ err = -ENODEV;
+ }
+
+ asd_free_coherent(asd_ha, token);
+ out:
+ asd_write_reg_dword(asd_ha, COMSTATEN, comstaten);
+
+ return err ? : asd_verify_seq(asd_ha, prog, size, lseq_mask);
+}
+#else /* ASD_DMA_MODE_DOWNLOAD */
+static int asd_download_seq(struct asd_ha_struct *asd_ha, const u8 *_prog,
+ u32 size, u8 lseq_mask)
+{
+ int i;
+ u32 reg = 0;
+ const u32 *prog = (u32 *) _prog;
+
+ if (size % 4) {
+ asd_printk("sequencer program not multiple of 4\n");
+ return -1;
+ }
+
+ asd_pause_cseq(asd_ha);
+ asd_pause_lseq(asd_ha, 0xFF);
+
+ reg |= (lseq_mask ? (((u32)lseq_mask) << 8) : OVLYCSEQ);
+ reg |= PIOCMODE;
+
+ asd_write_reg_dword(asd_ha, OVLYDMACNT, size);
+ asd_write_reg_dword(asd_ha, OVLYDMACTL, reg);
+
+ ASD_DPRINTK("downloading %s sequencer%s in PIO mode...\n",
+ lseq_mask ? "LSEQ" : "CSEQ", lseq_mask ? "s" : "");
+
+ for (i = 0; i < size; i += 4, prog++)
+ asd_write_reg_dword(asd_ha, SPIODATA, *prog);
+
+ reg = (reg & ~PIOCMODE) | OVLYHALTERR;
+ asd_write_reg_dword(asd_ha, OVLYDMACTL, reg);
+
+ return asd_verify_seq(asd_ha, _prog, size, lseq_mask);
+}
+#endif /* ASD_DMA_MODE_DOWNLOAD */
+
+/**
+ * asd_seq_download_seqs - download the sequencer microcode
+ * @asd_ha: pointer to host adapter structure
+ *
+ * Download the central and link sequencer microcode.
+ */
+static int asd_seq_download_seqs(struct asd_ha_struct *asd_ha)
+{
+ int err;
+
+ if (!asd_ha->hw_prof.enabled_phys) {
+ asd_printk("%s: no enabled phys!\n", pci_name(asd_ha->pcidev));
+ return -ENODEV;
+ }
+
+ /* Download the CSEQ */
+ ASD_DPRINTK("downloading CSEQ...\n");
+ err = asd_download_seq(asd_ha, cseq_code, cseq_code_size, 0);
+ if (err) {
+ asd_printk("CSEQ download failed:%d\n", err);
+ return err;
+ }
+
+ /* Download the Link Sequencers code. All of the Link Sequencers
+ * microcode can be downloaded at the same time.
+ */
+ ASD_DPRINTK("downloading LSEQs...\n");
+ err = asd_download_seq(asd_ha, lseq_code, lseq_code_size,
+ asd_ha->hw_prof.enabled_phys);
+ if (err) {
+ /* Try it one at a time */
+ u8 lseq;
+ u8 lseq_mask = asd_ha->hw_prof.enabled_phys;
+
+ for_each_sequencer(lseq_mask, lseq_mask, lseq) {
+ err = asd_download_seq(asd_ha, lseq_code,
+ lseq_code_size, 1<<lseq);
+ if (err)
+ break;
+ }
+ }
+ if (err)
+ asd_printk("LSEQs download failed:%d\n", err);
+
+ return err;
+}
+
+/* ---------- Initializing the chip, chip memory, etc. ---------- */
+
+/**
+ * asd_init_cseq_mip - initialize CSEQ mode independent pages 4-7
+ * @asd_ha: pointer to host adapter structure
+ */
+static void asd_init_cseq_mip(struct asd_ha_struct *asd_ha)
+{
+ /* CSEQ Mode Independent, page 4 setup. */
+ asd_write_reg_word(asd_ha, CSEQ_Q_EXE_HEAD, 0xFFFF);
+ asd_write_reg_word(asd_ha, CSEQ_Q_EXE_TAIL, 0xFFFF);
+ asd_write_reg_word(asd_ha, CSEQ_Q_DONE_HEAD, 0xFFFF);
+ asd_write_reg_word(asd_ha, CSEQ_Q_DONE_TAIL, 0xFFFF);
+ asd_write_reg_word(asd_ha, CSEQ_Q_SEND_HEAD, 0xFFFF);
+ asd_write_reg_word(asd_ha, CSEQ_Q_SEND_TAIL, 0xFFFF);
+ asd_write_reg_word(asd_ha, CSEQ_Q_DMA2CHIM_HEAD, 0xFFFF);
+ asd_write_reg_word(asd_ha, CSEQ_Q_DMA2CHIM_TAIL, 0xFFFF);
+ asd_write_reg_word(asd_ha, CSEQ_Q_COPY_HEAD, 0xFFFF);
+ asd_write_reg_word(asd_ha, CSEQ_Q_COPY_TAIL, 0xFFFF);
+ asd_write_reg_word(asd_ha, CSEQ_REG0, 0);
+ asd_write_reg_word(asd_ha, CSEQ_REG1, 0);
+ asd_write_reg_dword(asd_ha, CSEQ_REG2, 0);
+ asd_write_reg_byte(asd_ha, CSEQ_LINK_CTL_Q_MAP, 0);
+ {
+ u8 con = asd_read_reg_byte(asd_ha, CCONEXIST);
+ u8 val = hweight8(con);
+ asd_write_reg_byte(asd_ha, CSEQ_MAX_CSEQ_MODE, (val<<4)|val);
+ }
+ asd_write_reg_word(asd_ha, CSEQ_FREE_LIST_HACK_COUNT, 0);
+
+ /* CSEQ Mode independent, page 5 setup. */
+ asd_write_reg_dword(asd_ha, CSEQ_EST_NEXUS_REQ_QUEUE, 0);
+ asd_write_reg_dword(asd_ha, CSEQ_EST_NEXUS_REQ_QUEUE+4, 0);
+ asd_write_reg_dword(asd_ha, CSEQ_EST_NEXUS_REQ_COUNT, 0);
+ asd_write_reg_dword(asd_ha, CSEQ_EST_NEXUS_REQ_COUNT+4, 0);
+ asd_write_reg_word(asd_ha, CSEQ_Q_EST_NEXUS_HEAD, 0xFFFF);
+ asd_write_reg_word(asd_ha, CSEQ_Q_EST_NEXUS_TAIL, 0xFFFF);
+ asd_write_reg_word(asd_ha, CSEQ_NEED_EST_NEXUS_SCB, 0);
+ asd_write_reg_byte(asd_ha, CSEQ_EST_NEXUS_REQ_HEAD, 0);
+ asd_write_reg_byte(asd_ha, CSEQ_EST_NEXUS_REQ_TAIL, 0);
+ asd_write_reg_byte(asd_ha, CSEQ_EST_NEXUS_SCB_OFFSET, 0);
+
+ /* CSEQ Mode independent, page 6 setup. */
+ asd_write_reg_word(asd_ha, CSEQ_INT_ROUT_RET_ADDR0, 0);
+ asd_write_reg_word(asd_ha, CSEQ_INT_ROUT_RET_ADDR1, 0);
+ asd_write_reg_word(asd_ha, CSEQ_INT_ROUT_SCBPTR, 0);
+ asd_write_reg_byte(asd_ha, CSEQ_INT_ROUT_MODE, 0);
+ asd_write_reg_byte(asd_ha, CSEQ_ISR_SCRATCH_FLAGS, 0);
+ asd_write_reg_word(asd_ha, CSEQ_ISR_SAVE_SINDEX, 0);
+ asd_write_reg_word(asd_ha, CSEQ_ISR_SAVE_DINDEX, 0);
+ asd_write_reg_word(asd_ha, CSEQ_Q_MONIRTT_HEAD, 0xFFFF);
+ asd_write_reg_word(asd_ha, CSEQ_Q_MONIRTT_TAIL, 0xFFFF);
+ /* Calculate the free scb mask. */
+ {
+ u16 cmdctx = asd_get_cmdctx_size(asd_ha);
+ cmdctx = (~((cmdctx/128)-1)) >> 8;
+ asd_write_reg_byte(asd_ha, CSEQ_FREE_SCB_MASK, (u8)cmdctx);
+ }
+ asd_write_reg_word(asd_ha, CSEQ_BUILTIN_FREE_SCB_HEAD,
+ first_scb_site_no);
+ asd_write_reg_word(asd_ha, CSEQ_BUILTIN_FREE_SCB_TAIL,
+ last_scb_site_no);
+ asd_write_reg_word(asd_ha, CSEQ_EXTENDED_FREE_SCB_HEAD, 0xFFFF);
+ asd_write_reg_word(asd_ha, CSEQ_EXTENDED_FREE_SCB_TAIL, 0xFFFF);
+
+ /* CSEQ Mode independent, page 7 setup. */
+ asd_write_reg_dword(asd_ha, CSEQ_EMPTY_REQ_QUEUE, 0);
+ asd_write_reg_dword(asd_ha, CSEQ_EMPTY_REQ_QUEUE+4, 0);
+ asd_write_reg_dword(asd_ha, CSEQ_EMPTY_REQ_COUNT, 0);
+ asd_write_reg_dword(asd_ha, CSEQ_EMPTY_REQ_COUNT+4, 0);
+ asd_write_reg_word(asd_ha, CSEQ_Q_EMPTY_HEAD, 0xFFFF);
+ asd_write_reg_word(asd_ha, CSEQ_Q_EMPTY_TAIL, 0xFFFF);
+ asd_write_reg_word(asd_ha, CSEQ_NEED_EMPTY_SCB, 0);
+ asd_write_reg_byte(asd_ha, CSEQ_EMPTY_REQ_HEAD, 0);
+ asd_write_reg_byte(asd_ha, CSEQ_EMPTY_REQ_TAIL, 0);
+ asd_write_reg_byte(asd_ha, CSEQ_EMPTY_SCB_OFFSET, 0);
+ asd_write_reg_word(asd_ha, CSEQ_PRIMITIVE_DATA, 0);
+ asd_write_reg_dword(asd_ha, CSEQ_TIMEOUT_CONST, 0);
+}
+
+/**
+ * asd_init_cseq_mdp - initialize CSEQ Mode dependent pages
+ * @asd_ha: pointer to host adapter structure
+ */
+static void asd_init_cseq_mdp(struct asd_ha_struct *asd_ha)
+{
+ int i;
+ int moffs;
+
+ moffs = CSEQ_PAGE_SIZE * 2;
+
+ /* CSEQ Mode dependent, modes 0-7, page 0 setup. */
+ for (i = 0; i < 8; i++) {
+ asd_write_reg_word(asd_ha, i*moffs+CSEQ_LRM_SAVE_SINDEX, 0);
+ asd_write_reg_word(asd_ha, i*moffs+CSEQ_LRM_SAVE_SCBPTR, 0);
+ asd_write_reg_word(asd_ha, i*moffs+CSEQ_Q_LINK_HEAD, 0xFFFF);
+ asd_write_reg_word(asd_ha, i*moffs+CSEQ_Q_LINK_TAIL, 0xFFFF);
+ asd_write_reg_byte(asd_ha, i*moffs+CSEQ_LRM_SAVE_SCRPAGE, 0);
+ }
+
+ /* CSEQ Mode dependent, mode 0-7, page 1 and 2 shall be ignored. */
+
+ /* CSEQ Mode dependent, mode 8, page 0 setup. */
+ asd_write_reg_word(asd_ha, CSEQ_RET_ADDR, 0xFFFF);
+ asd_write_reg_word(asd_ha, CSEQ_RET_SCBPTR, 0);
+ asd_write_reg_word(asd_ha, CSEQ_SAVE_SCBPTR, 0);
+ asd_write_reg_word(asd_ha, CSEQ_EMPTY_TRANS_CTX, 0);
+ asd_write_reg_word(asd_ha, CSEQ_RESP_LEN, 0);
+ asd_write_reg_word(asd_ha, CSEQ_TMF_SCBPTR, 0);
+ asd_write_reg_word(asd_ha, CSEQ_GLOBAL_PREV_SCB, 0);
+ asd_write_reg_word(asd_ha, CSEQ_GLOBAL_HEAD, 0);
+ asd_write_reg_word(asd_ha, CSEQ_CLEAR_LU_HEAD, 0);
+ asd_write_reg_byte(asd_ha, CSEQ_TMF_OPCODE, 0);
+ asd_write_reg_byte(asd_ha, CSEQ_SCRATCH_FLAGS, 0);
+ asd_write_reg_word(asd_ha, CSEQ_HSB_SITE, 0);
+ asd_write_reg_word(asd_ha, CSEQ_FIRST_INV_SCB_SITE,
+ (u16)last_scb_site_no+1);
+ asd_write_reg_word(asd_ha, CSEQ_FIRST_INV_DDB_SITE,
+ (u16)asd_ha->hw_prof.max_ddbs);
+
+ /* CSEQ Mode dependent, mode 8, page 1 setup. */
+ asd_write_reg_dword(asd_ha, CSEQ_LUN_TO_CLEAR, 0);
+ asd_write_reg_dword(asd_ha, CSEQ_LUN_TO_CLEAR + 4, 0);
+ asd_write_reg_dword(asd_ha, CSEQ_LUN_TO_CHECK, 0);
+ asd_write_reg_dword(asd_ha, CSEQ_LUN_TO_CHECK + 4, 0);
+
+ /* CSEQ Mode dependent, mode 8, page 2 setup. */
+ /* Tell the sequencer the bus address of the first SCB. */
+ asd_write_reg_addr(asd_ha, CSEQ_HQ_NEW_POINTER,
+ asd_ha->seq.next_scb.dma_handle);
+ ASD_DPRINTK("First SCB dma_handle: 0x%llx\n",
+ (unsigned long long)asd_ha->seq.next_scb.dma_handle);
+
+ /* Tell the sequencer the first Done List entry address. */
+ asd_write_reg_addr(asd_ha, CSEQ_HQ_DONE_BASE,
+ asd_ha->seq.actual_dl->dma_handle);
+
+ /* Initialize the Q_DONE_POINTER with the least significant
+ * 4 bytes of the first Done List address. */
+ asd_write_reg_dword(asd_ha, CSEQ_HQ_DONE_POINTER,
+ ASD_BUSADDR_LO(asd_ha->seq.actual_dl->dma_handle));
+
+ asd_write_reg_byte(asd_ha, CSEQ_HQ_DONE_PASS, ASD_DEF_DL_TOGGLE);
+
+ /* CSEQ Mode dependent, mode 8, page 3 shall be ignored. */
+}
+
+/**
+ * asd_init_cseq_scratch -- setup and init CSEQ
+ * @asd_ha: pointer to host adapter structure
+ *
+ * Setup and initialize Central sequencers. Initialize the mode
+ * independent and dependent scratch page to the default settings.
+ */
+static void asd_init_cseq_scratch(struct asd_ha_struct *asd_ha)
+{
+ asd_init_cseq_mip(asd_ha);
+ asd_init_cseq_mdp(asd_ha);
+}
+
+/**
+ * asd_init_lseq_mip -- initialize LSEQ Mode independent pages 0-3
+ * @asd_ha: pointer to host adapter structure
+ */
+static void asd_init_lseq_mip(struct asd_ha_struct *asd_ha, u8 lseq)
+{
+ int i;
+
+ /* LSEQ Mode independent page 0 setup. */
+ asd_write_reg_word(asd_ha, LmSEQ_Q_TGTXFR_HEAD(lseq), 0xFFFF);
+ asd_write_reg_word(asd_ha, LmSEQ_Q_TGTXFR_TAIL(lseq), 0xFFFF);
+ asd_write_reg_byte(asd_ha, LmSEQ_LINK_NUMBER(lseq), lseq);
+ asd_write_reg_byte(asd_ha, LmSEQ_SCRATCH_FLAGS(lseq),
+ ASD_NOTIFY_ENABLE_SPINUP);
+ asd_write_reg_dword(asd_ha, LmSEQ_CONNECTION_STATE(lseq),0x08000000);
+ asd_write_reg_word(asd_ha, LmSEQ_CONCTL(lseq), 0);
+ asd_write_reg_byte(asd_ha, LmSEQ_CONSTAT(lseq), 0);
+ asd_write_reg_byte(asd_ha, LmSEQ_CONNECTION_MODES(lseq), 0);
+ asd_write_reg_word(asd_ha, LmSEQ_REG1_ISR(lseq), 0);
+ asd_write_reg_word(asd_ha, LmSEQ_REG2_ISR(lseq), 0);
+ asd_write_reg_word(asd_ha, LmSEQ_REG3_ISR(lseq), 0);
+ asd_write_reg_dword(asd_ha, LmSEQ_REG0_ISR(lseq), 0);
+ asd_write_reg_dword(asd_ha, LmSEQ_REG0_ISR(lseq)+4, 0);
+
+ /* LSEQ Mode independent page 1 setup. */
+ asd_write_reg_word(asd_ha, LmSEQ_EST_NEXUS_SCBPTR0(lseq), 0xFFFF);
+ asd_write_reg_word(asd_ha, LmSEQ_EST_NEXUS_SCBPTR1(lseq), 0xFFFF);
+ asd_write_reg_word(asd_ha, LmSEQ_EST_NEXUS_SCBPTR2(lseq), 0xFFFF);
+ asd_write_reg_word(asd_ha, LmSEQ_EST_NEXUS_SCBPTR3(lseq), 0xFFFF);
+ asd_write_reg_byte(asd_ha, LmSEQ_EST_NEXUS_SCB_OPCODE0(lseq), 0);
+ asd_write_reg_byte(asd_ha, LmSEQ_EST_NEXUS_SCB_OPCODE1(lseq), 0);
+ asd_write_reg_byte(asd_ha, LmSEQ_EST_NEXUS_SCB_OPCODE2(lseq), 0);
+ asd_write_reg_byte(asd_ha, LmSEQ_EST_NEXUS_SCB_OPCODE3(lseq), 0);
+ asd_write_reg_byte(asd_ha, LmSEQ_EST_NEXUS_SCB_HEAD(lseq), 0);
+ asd_write_reg_byte(asd_ha, LmSEQ_EST_NEXUS_SCB_TAIL(lseq), 0);
+ asd_write_reg_byte(asd_ha, LmSEQ_EST_NEXUS_BUF_AVAIL(lseq), 0);
+ asd_write_reg_dword(asd_ha, LmSEQ_TIMEOUT_CONST(lseq), 0);
+ asd_write_reg_word(asd_ha, LmSEQ_ISR_SAVE_SINDEX(lseq), 0);
+ asd_write_reg_word(asd_ha, LmSEQ_ISR_SAVE_DINDEX(lseq), 0);
+
+ /* LSEQ Mode Independent page 2 setup. */
+ asd_write_reg_word(asd_ha, LmSEQ_EMPTY_SCB_PTR0(lseq), 0xFFFF);
+ asd_write_reg_word(asd_ha, LmSEQ_EMPTY_SCB_PTR1(lseq), 0xFFFF);
+ asd_write_reg_word(asd_ha, LmSEQ_EMPTY_SCB_PTR2(lseq), 0xFFFF);
+ asd_write_reg_word(asd_ha, LmSEQ_EMPTY_SCB_PTR3(lseq), 0xFFFF);
+ asd_write_reg_byte(asd_ha, LmSEQ_EMPTY_SCB_OPCD0(lseq), 0);
+ asd_write_reg_byte(asd_ha, LmSEQ_EMPTY_SCB_OPCD1(lseq), 0);
+ asd_write_reg_byte(asd_ha, LmSEQ_EMPTY_SCB_OPCD2(lseq), 0);
+ asd_write_reg_byte(asd_ha, LmSEQ_EMPTY_SCB_OPCD3(lseq), 0);
+ asd_write_reg_byte(asd_ha, LmSEQ_EMPTY_SCB_HEAD(lseq), 0);
+ asd_write_reg_byte(asd_ha, LmSEQ_EMPTY_SCB_TAIL(lseq), 0);
+ asd_write_reg_byte(asd_ha, LmSEQ_EMPTY_BUFS_AVAIL(lseq), 0);
+ for (i = 0; i < 12; i += 4)
+ asd_write_reg_dword(asd_ha, LmSEQ_ATA_SCR_REGS(lseq) + i, 0);
+
+ /* LSEQ Mode Independent page 3 setup. */
+
+ /* Device present timer timeout */
+ asd_write_reg_dword(asd_ha, LmSEQ_DEV_PRES_TMR_TOUT_CONST(lseq),
+ ASD_DEV_PRESENT_TIMEOUT);
+
+ /* SATA interlock timer disabled */
+ asd_write_reg_dword(asd_ha, LmSEQ_SATA_INTERLOCK_TIMEOUT(lseq),
+ ASD_SATA_INTERLOCK_TIMEOUT);
+
+ /* STP shutdown timer timeout constant, IGNORED by the sequencer,
+ * always 0. */
+ asd_write_reg_dword(asd_ha, LmSEQ_STP_SHUTDOWN_TIMEOUT(lseq),
+ ASD_STP_SHUTDOWN_TIMEOUT);
+
+ asd_write_reg_dword(asd_ha, LmSEQ_SRST_ASSERT_TIMEOUT(lseq),
+ ASD_SRST_ASSERT_TIMEOUT);
+
+ asd_write_reg_dword(asd_ha, LmSEQ_RCV_FIS_TIMEOUT(lseq),
+ ASD_RCV_FIS_TIMEOUT);
+
+ asd_write_reg_dword(asd_ha, LmSEQ_ONE_MILLISEC_TIMEOUT(lseq),
+ ASD_ONE_MILLISEC_TIMEOUT);
+
+ /* COM_INIT timer */
+ asd_write_reg_dword(asd_ha, LmSEQ_TEN_MS_COMINIT_TIMEOUT(lseq),
+ ASD_TEN_MILLISEC_TIMEOUT);
+
+ asd_write_reg_dword(asd_ha, LmSEQ_SMP_RCV_TIMEOUT(lseq),
+ ASD_SMP_RCV_TIMEOUT);
+}
+
+/**
+ * asd_init_lseq_mdp -- initialize LSEQ mode dependent pages.
+ * @asd_ha: pointer to host adapter structure
+ */
+static void asd_init_lseq_mdp(struct asd_ha_struct *asd_ha, int lseq)
+{
+ int i;
+ u32 moffs;
+ u16 ret_addr[] = {
+ 0xFFFF, /* mode 0 */
+ 0xFFFF, /* mode 1 */
+ mode2_task, /* mode 2 */
+ 0,
+ 0xFFFF, /* mode 4/5 */
+ 0xFFFF, /* mode 4/5 */
+ };
+
+ /*
+ * Mode 0,1,2 and 4/5 have common field on page 0 for the first
+ * 14 bytes.
+ */
+ for (i = 0; i < 3; i++) {
+ moffs = i * LSEQ_MODE_SCRATCH_SIZE;
+ asd_write_reg_word(asd_ha, LmSEQ_RET_ADDR(lseq)+moffs,
+ ret_addr[i]);
+ asd_write_reg_word(asd_ha, LmSEQ_REG0_MODE(lseq)+moffs, 0);
+ asd_write_reg_word(asd_ha, LmSEQ_MODE_FLAGS(lseq)+moffs, 0);
+ asd_write_reg_word(asd_ha, LmSEQ_RET_ADDR2(lseq)+moffs,0xFFFF);
+ asd_write_reg_word(asd_ha, LmSEQ_RET_ADDR1(lseq)+moffs,0xFFFF);
+ asd_write_reg_byte(asd_ha, LmSEQ_OPCODE_TO_CSEQ(lseq)+moffs,0);
+ asd_write_reg_word(asd_ha, LmSEQ_DATA_TO_CSEQ(lseq)+moffs,0);
+ }
+ /*
+ * Mode 5 page 0 overlaps the same scratch page with Mode 0 page 3.
+ */
+ asd_write_reg_word(asd_ha,
+ LmSEQ_RET_ADDR(lseq)+LSEQ_MODE5_PAGE0_OFFSET,
+ ret_addr[5]);
+ asd_write_reg_word(asd_ha,
+ LmSEQ_REG0_MODE(lseq)+LSEQ_MODE5_PAGE0_OFFSET,0);
+ asd_write_reg_word(asd_ha,
+ LmSEQ_MODE_FLAGS(lseq)+LSEQ_MODE5_PAGE0_OFFSET, 0);
+ asd_write_reg_word(asd_ha,
+ LmSEQ_RET_ADDR2(lseq)+LSEQ_MODE5_PAGE0_OFFSET,0xFFFF);
+ asd_write_reg_word(asd_ha,
+ LmSEQ_RET_ADDR1(lseq)+LSEQ_MODE5_PAGE0_OFFSET,0xFFFF);
+ asd_write_reg_byte(asd_ha,
+ LmSEQ_OPCODE_TO_CSEQ(lseq)+LSEQ_MODE5_PAGE0_OFFSET,0);
+ asd_write_reg_word(asd_ha,
+ LmSEQ_DATA_TO_CSEQ(lseq)+LSEQ_MODE5_PAGE0_OFFSET, 0);
+
+ /* LSEQ Mode dependent 0, page 0 setup. */
+ asd_write_reg_word(asd_ha, LmSEQ_FIRST_INV_DDB_SITE(lseq),
+ (u16)asd_ha->hw_prof.max_ddbs);
+ asd_write_reg_word(asd_ha, LmSEQ_EMPTY_TRANS_CTX(lseq), 0);
+ asd_write_reg_word(asd_ha, LmSEQ_RESP_LEN(lseq), 0);
+ asd_write_reg_word(asd_ha, LmSEQ_FIRST_INV_SCB_SITE(lseq),
+ (u16)last_scb_site_no+1);
+ asd_write_reg_word(asd_ha, LmSEQ_INTEN_SAVE(lseq),
+ (u16) ((LmM0INTEN_MASK & 0xFFFF0000) >> 16));
+ asd_write_reg_word(asd_ha, LmSEQ_INTEN_SAVE(lseq) + 2,
+ (u16) LmM0INTEN_MASK & 0xFFFF);
+ asd_write_reg_byte(asd_ha, LmSEQ_LINK_RST_FRM_LEN(lseq), 0);
+ asd_write_reg_byte(asd_ha, LmSEQ_LINK_RST_PROTOCOL(lseq), 0);
+ asd_write_reg_byte(asd_ha, LmSEQ_RESP_STATUS(lseq), 0);
+ asd_write_reg_byte(asd_ha, LmSEQ_LAST_LOADED_SGE(lseq), 0);
+ asd_write_reg_word(asd_ha, LmSEQ_SAVE_SCBPTR(lseq), 0);
+
+ /* LSEQ mode dependent, mode 1, page 0 setup. */
+ asd_write_reg_word(asd_ha, LmSEQ_Q_XMIT_HEAD(lseq), 0xFFFF);
+ asd_write_reg_word(asd_ha, LmSEQ_M1_EMPTY_TRANS_CTX(lseq), 0);
+ asd_write_reg_word(asd_ha, LmSEQ_INI_CONN_TAG(lseq), 0);
+ asd_write_reg_byte(asd_ha, LmSEQ_FAILED_OPEN_STATUS(lseq), 0);
+ asd_write_reg_byte(asd_ha, LmSEQ_XMIT_REQUEST_TYPE(lseq), 0);
+ asd_write_reg_byte(asd_ha, LmSEQ_M1_RESP_STATUS(lseq), 0);
+ asd_write_reg_byte(asd_ha, LmSEQ_M1_LAST_LOADED_SGE(lseq), 0);
+ asd_write_reg_word(asd_ha, LmSEQ_M1_SAVE_SCBPTR(lseq), 0);
+
+ /* LSEQ Mode dependent mode 2, page 0 setup */
+ asd_write_reg_word(asd_ha, LmSEQ_PORT_COUNTER(lseq), 0);
+ asd_write_reg_word(asd_ha, LmSEQ_PM_TABLE_PTR(lseq), 0);
+ asd_write_reg_word(asd_ha, LmSEQ_SATA_INTERLOCK_TMR_SAVE(lseq), 0);
+ asd_write_reg_word(asd_ha, LmSEQ_IP_BITL(lseq), 0);
+ asd_write_reg_word(asd_ha, LmSEQ_COPY_SMP_CONN_TAG(lseq), 0);
+ asd_write_reg_byte(asd_ha, LmSEQ_P0M2_OFFS1AH(lseq), 0);
+
+ /* LSEQ Mode dependent, mode 4/5, page 0 setup. */
+ asd_write_reg_byte(asd_ha, LmSEQ_SAVED_OOB_STATUS(lseq), 0);
+ asd_write_reg_byte(asd_ha, LmSEQ_SAVED_OOB_MODE(lseq), 0);
+ asd_write_reg_word(asd_ha, LmSEQ_Q_LINK_HEAD(lseq), 0xFFFF);
+ asd_write_reg_byte(asd_ha, LmSEQ_LINK_RST_ERR(lseq), 0);
+ asd_write_reg_byte(asd_ha, LmSEQ_SAVED_OOB_SIGNALS(lseq), 0);
+ asd_write_reg_byte(asd_ha, LmSEQ_SAS_RESET_MODE(lseq), 0);
+ asd_write_reg_byte(asd_ha, LmSEQ_LINK_RESET_RETRY_COUNT(lseq), 0);
+ asd_write_reg_byte(asd_ha, LmSEQ_NUM_LINK_RESET_RETRIES(lseq), 0);
+ asd_write_reg_word(asd_ha, LmSEQ_OOB_INT_ENABLES(lseq), 0);
+ /*
+ * Set the desired interval between transmissions of the NOTIFY
+ * (ENABLE SPINUP) primitive. Must be initialized to val - 1.
+ */
+ asd_write_reg_word(asd_ha, LmSEQ_NOTIFY_TIMER_TIMEOUT(lseq),
+ ASD_NOTIFY_TIMEOUT - 1);
+ /* No delay for the first NOTIFY to be sent to the attached target. */
+ asd_write_reg_word(asd_ha, LmSEQ_NOTIFY_TIMER_DOWN_COUNT(lseq),
+ ASD_NOTIFY_DOWN_COUNT);
+ asd_write_reg_word(asd_ha, LmSEQ_NOTIFY_TIMER_INITIAL_COUNT(lseq),
+ ASD_NOTIFY_DOWN_COUNT);
+
+ /* LSEQ Mode dependent, mode 0 and 1, page 1 setup. */
+ for (i = 0; i < 2; i++) {
+ int j;
+ /* Start from Page 1 of Mode 0 and 1. */
+ moffs = LSEQ_PAGE_SIZE + i*LSEQ_MODE_SCRATCH_SIZE;
+ /* All the fields of page 1 can be initialized to 0. */
+ for (j = 0; j < LSEQ_PAGE_SIZE; j += 4)
+ asd_write_reg_dword(asd_ha, LmSCRATCH(lseq)+moffs+j,0);
+ }
+
+ /* LSEQ Mode dependent, mode 2, page 1 setup. */
+ asd_write_reg_dword(asd_ha, LmSEQ_INVALID_DWORD_COUNT(lseq), 0);
+ asd_write_reg_dword(asd_ha, LmSEQ_DISPARITY_ERROR_COUNT(lseq), 0);
+ asd_write_reg_dword(asd_ha, LmSEQ_LOSS_OF_SYNC_COUNT(lseq), 0);
+
+ /* LSEQ Mode dependent, mode 4/5, page 1. */
+ for (i = 0; i < LSEQ_PAGE_SIZE; i+=4)
+ asd_write_reg_dword(asd_ha, LmSEQ_FRAME_TYPE_MASK(lseq)+i, 0);
+ asd_write_reg_byte(asd_ha, LmSEQ_FRAME_TYPE_MASK(lseq), 0xFF);
+ asd_write_reg_byte(asd_ha, LmSEQ_HASHED_DEST_ADDR_MASK(lseq), 0xFF);
+ asd_write_reg_byte(asd_ha, LmSEQ_HASHED_DEST_ADDR_MASK(lseq)+1,0xFF);
+ asd_write_reg_byte(asd_ha, LmSEQ_HASHED_DEST_ADDR_MASK(lseq)+2,0xFF);
+ asd_write_reg_byte(asd_ha, LmSEQ_HASHED_SRC_ADDR_MASK(lseq), 0xFF);
+ asd_write_reg_byte(asd_ha, LmSEQ_HASHED_SRC_ADDR_MASK(lseq)+1, 0xFF);
+ asd_write_reg_byte(asd_ha, LmSEQ_HASHED_SRC_ADDR_MASK(lseq)+2, 0xFF);
+ asd_write_reg_dword(asd_ha, LmSEQ_DATA_OFFSET(lseq), 0xFFFFFFFF);
+
+ /* LSEQ Mode dependent, mode 0, page 2 setup. */
+ asd_write_reg_dword(asd_ha, LmSEQ_SMP_RCV_TIMER_TERM_TS(lseq), 0);
+ asd_write_reg_byte(asd_ha, LmSEQ_DEVICE_BITS(lseq), 0);
+ asd_write_reg_word(asd_ha, LmSEQ_SDB_DDB(lseq), 0);
+ asd_write_reg_byte(asd_ha, LmSEQ_SDB_NUM_TAGS(lseq), 0);
+ asd_write_reg_byte(asd_ha, LmSEQ_SDB_CURR_TAG(lseq), 0);
+
+ /* LSEQ Mode Dependent 1, page 2 setup. */
+ asd_write_reg_dword(asd_ha, LmSEQ_TX_ID_ADDR_FRAME(lseq), 0);
+ asd_write_reg_dword(asd_ha, LmSEQ_TX_ID_ADDR_FRAME(lseq)+4, 0);
+ asd_write_reg_dword(asd_ha, LmSEQ_OPEN_TIMER_TERM_TS(lseq), 0);
+ asd_write_reg_dword(asd_ha, LmSEQ_SRST_AS_TIMER_TERM_TS(lseq), 0);
+ asd_write_reg_dword(asd_ha, LmSEQ_LAST_LOADED_SG_EL(lseq), 0);
+
+ /* LSEQ Mode Dependent 2, page 2 setup. */
+ /* The LmSEQ_STP_SHUTDOWN_TIMER_TERM_TS is IGNORED by the sequencer,
+ * i.e. always 0. */
+ asd_write_reg_dword(asd_ha, LmSEQ_STP_SHUTDOWN_TIMER_TERM_TS(lseq),0);
+ asd_write_reg_dword(asd_ha, LmSEQ_CLOSE_TIMER_TERM_TS(lseq), 0);
+ asd_write_reg_dword(asd_ha, LmSEQ_BREAK_TIMER_TERM_TS(lseq), 0);
+ asd_write_reg_dword(asd_ha, LmSEQ_DWS_RESET_TIMER_TERM_TS(lseq), 0);
+ asd_write_reg_dword(asd_ha,LmSEQ_SATA_INTERLOCK_TIMER_TERM_TS(lseq),0);
+ asd_write_reg_dword(asd_ha, LmSEQ_MCTL_TIMER_TERM_TS(lseq), 0);
+
+ /* LSEQ Mode Dependent 4/5, page 2 setup. */
+ asd_write_reg_dword(asd_ha, LmSEQ_COMINIT_TIMER_TERM_TS(lseq), 0);
+ asd_write_reg_dword(asd_ha, LmSEQ_RCV_ID_TIMER_TERM_TS(lseq), 0);
+ asd_write_reg_dword(asd_ha, LmSEQ_RCV_FIS_TIMER_TERM_TS(lseq), 0);
+ asd_write_reg_dword(asd_ha, LmSEQ_DEV_PRES_TIMER_TERM_TS(lseq), 0);
+}
+
+/**
+ * asd_init_lseq_scratch -- setup and init link sequencers
+ * @asd_ha: pointer to host adapter struct
+ */
+static void asd_init_lseq_scratch(struct asd_ha_struct *asd_ha)
+{
+ u8 lseq;
+ u8 lseq_mask;
+
+ lseq_mask = asd_ha->hw_prof.enabled_phys;
+ for_each_sequencer(lseq_mask, lseq_mask, lseq) {
+ asd_init_lseq_mip(asd_ha, lseq);
+ asd_init_lseq_mdp(asd_ha, lseq);
+ }
+}
+
+/**
+ * asd_init_scb_sites -- initialize sequencer SCB sites (memory).
+ * @asd_ha: pointer to host adapter structure
+ *
+ * This should be done before initializing common CSEQ and LSEQ
+ * scratch since those areas depend on some computed values here,
+ * last_scb_site_no, etc.
+ */
+static void asd_init_scb_sites(struct asd_ha_struct *asd_ha)
+{
+ u16 site_no;
+ u16 max_scbs = 0;
+
+ for (site_no = asd_ha->hw_prof.max_scbs-1;
+ site_no != (u16) -1;
+ site_no--) {
+ u16 i;
+
+ /* Initialize all fields in the SCB site to 0. */
+ for (i = 0; i < ASD_SCB_SIZE; i += 4)
+ asd_scbsite_write_dword(asd_ha, site_no, i, 0);
+
+ /* Initialize SCB Site Opcode field to invalid. */
+ asd_scbsite_write_byte(asd_ha, site_no,
+ offsetof(struct scb_header, opcode),
+ 0xFF);
+
+ /* Initialize SCB Site Flags field to mean a response
+ * frame has been received. This means inadvertent
+ * frames received to be dropped. */
+ asd_scbsite_write_byte(asd_ha, site_no, 0x49, 0x01);
+
+ /* Workaround needed by SEQ to fix a SATA issue is to exclude
+ * certain SCB sites from the free list. */
+ if (!SCB_SITE_VALID(site_no))
+ continue;
+
+ if (last_scb_site_no == 0)
+ last_scb_site_no = site_no;
+
+ /* For every SCB site, we need to initialize the
+ * following fields: Q_NEXT, SCB_OPCODE, SCB_FLAGS,
+ * and SG Element Flag. */
+
+ /* Q_NEXT field of the last SCB is invalidated. */
+ asd_scbsite_write_word(asd_ha, site_no, 0, first_scb_site_no);
+
+ first_scb_site_no = site_no;
+ max_scbs++;
+ }
+ asd_ha->hw_prof.max_scbs = max_scbs;
+ ASD_DPRINTK("max_scbs:%d\n", asd_ha->hw_prof.max_scbs);
+ ASD_DPRINTK("first_scb_site_no:0x%x\n", first_scb_site_no);
+ ASD_DPRINTK("last_scb_site_no:0x%x\n", last_scb_site_no);
+}
+
+/**
+ * asd_init_cseq_cio - initialize CSEQ CIO registers
+ * @asd_ha: pointer to host adapter structure
+ */
+static void asd_init_cseq_cio(struct asd_ha_struct *asd_ha)
+{
+ int i;
+
+ asd_write_reg_byte(asd_ha, CSEQCOMINTEN, 0);
+ asd_write_reg_byte(asd_ha, CSEQDLCTL, ASD_DL_SIZE_BITS);
+ asd_write_reg_byte(asd_ha, CSEQDLOFFS, 0);
+ asd_write_reg_byte(asd_ha, CSEQDLOFFS+1, 0);
+ asd_ha->seq.scbpro = 0;
+ asd_write_reg_dword(asd_ha, SCBPRO, 0);
+ asd_write_reg_dword(asd_ha, CSEQCON, 0);
+
+ /* Initialize CSEQ Mode 11 Interrupt Vectors.
+ * The addresses are 16 bit wide and in dword units.
+ * The values of their macros are in byte units.
+ * Thus we have to divide by 4. */
+ asd_write_reg_word(asd_ha, CM11INTVEC0, cseq_vecs[0]);
+ asd_write_reg_word(asd_ha, CM11INTVEC1, cseq_vecs[1]);
+ asd_write_reg_word(asd_ha, CM11INTVEC2, cseq_vecs[2]);
+
+ /* Enable ARP2HALTC (ARP2 Halted from Halt Code Write). */
+ asd_write_reg_byte(asd_ha, CARP2INTEN, EN_ARP2HALTC);
+
+ /* Initialize CSEQ Scratch Page to 0x04. */
+ asd_write_reg_byte(asd_ha, CSCRATCHPAGE, 0x04);
+
+ /* Initialize CSEQ Mode[0-8] Dependent registers. */
+ /* Initialize Scratch Page to 0. */
+ for (i = 0; i < 9; i++)
+ asd_write_reg_byte(asd_ha, CMnSCRATCHPAGE(i), 0);
+
+ /* Reset the ARP2 Program Count. */
+ asd_write_reg_word(asd_ha, CPRGMCNT, cseq_idle_loop);
+
+ for (i = 0; i < 8; i++) {
+ /* Initialize Mode n Link m Interrupt Enable. */
+ asd_write_reg_dword(asd_ha, CMnINTEN(i), EN_CMnRSPMBXF);
+ /* Initialize Mode n Request Mailbox. */
+ asd_write_reg_dword(asd_ha, CMnREQMBX(i), 0);
+ }
+}
+
+/**
+ * asd_init_lseq_cio -- initialize LmSEQ CIO registers
+ * @asd_ha: pointer to host adapter structure
+ */
+static void asd_init_lseq_cio(struct asd_ha_struct *asd_ha, int lseq)
+{
+ u8 *sas_addr;
+ int i;
+
+ /* Enable ARP2HALTC (ARP2 Halted from Halt Code Write). */
+ asd_write_reg_dword(asd_ha, LmARP2INTEN(lseq), EN_ARP2HALTC);
+
+ asd_write_reg_byte(asd_ha, LmSCRATCHPAGE(lseq), 0);
+
+ /* Initialize Mode 0,1, and 2 SCRATCHPAGE to 0. */
+ for (i = 0; i < 3; i++)
+ asd_write_reg_byte(asd_ha, LmMnSCRATCHPAGE(lseq, i), 0);
+
+ /* Initialize Mode 5 SCRATCHPAGE to 0. */
+ asd_write_reg_byte(asd_ha, LmMnSCRATCHPAGE(lseq, 5), 0);
+
+ asd_write_reg_dword(asd_ha, LmRSPMBX(lseq), 0);
+ /* Initialize Mode 0,1,2 and 5 Interrupt Enable and
+ * Interrupt registers. */
+ asd_write_reg_dword(asd_ha, LmMnINTEN(lseq, 0), LmM0INTEN_MASK);
+ asd_write_reg_dword(asd_ha, LmMnINT(lseq, 0), 0xFFFFFFFF);
+ /* Mode 1 */
+ asd_write_reg_dword(asd_ha, LmMnINTEN(lseq, 1), LmM1INTEN_MASK);
+ asd_write_reg_dword(asd_ha, LmMnINT(lseq, 1), 0xFFFFFFFF);
+ /* Mode 2 */
+ asd_write_reg_dword(asd_ha, LmMnINTEN(lseq, 2), LmM2INTEN_MASK);
+ asd_write_reg_dword(asd_ha, LmMnINT(lseq, 2), 0xFFFFFFFF);
+ /* Mode 5 */
+ asd_write_reg_dword(asd_ha, LmMnINTEN(lseq, 5), LmM5INTEN_MASK);
+ asd_write_reg_dword(asd_ha, LmMnINT(lseq, 5), 0xFFFFFFFF);
+
+ /* Enable HW Timer status. */
+ asd_write_reg_byte(asd_ha, LmHWTSTATEN(lseq), LmHWTSTATEN_MASK);
+
+ /* Enable Primitive Status 0 and 1. */
+ asd_write_reg_dword(asd_ha, LmPRIMSTAT0EN(lseq), LmPRIMSTAT0EN_MASK);
+ asd_write_reg_dword(asd_ha, LmPRIMSTAT1EN(lseq), LmPRIMSTAT1EN_MASK);
+
+ /* Enable Frame Error. */
+ asd_write_reg_dword(asd_ha, LmFRMERREN(lseq), LmFRMERREN_MASK);
+ asd_write_reg_byte(asd_ha, LmMnHOLDLVL(lseq, 0), 0x50);
+
+ /* Initialize Mode 0 Transfer Level to 512. */
+ asd_write_reg_byte(asd_ha, LmMnXFRLVL(lseq, 0), LmMnXFRLVL_512);
+ /* Initialize Mode 1 Transfer Level to 256. */
+ asd_write_reg_byte(asd_ha, LmMnXFRLVL(lseq, 1), LmMnXFRLVL_256);
+
+ /* Initialize Program Count. */
+ asd_write_reg_word(asd_ha, LmPRGMCNT(lseq), lseq_idle_loop);
+
+ /* Enable Blind SG Move. */
+ asd_write_reg_dword(asd_ha, LmMODECTL(lseq), LmBLIND48);
+ asd_write_reg_word(asd_ha, LmM3SATATIMER(lseq),
+ ASD_SATA_INTERLOCK_TIMEOUT);
+
+ (void) asd_read_reg_dword(asd_ha, LmREQMBX(lseq));
+
+ /* Clear Primitive Status 0 and 1. */
+ asd_write_reg_dword(asd_ha, LmPRMSTAT0(lseq), 0xFFFFFFFF);
+ asd_write_reg_dword(asd_ha, LmPRMSTAT1(lseq), 0xFFFFFFFF);
+
+ /* Clear HW Timer status. */
+ asd_write_reg_byte(asd_ha, LmHWTSTAT(lseq), 0xFF);
+
+ /* Clear DMA Errors for Mode 0 and 1. */
+ asd_write_reg_byte(asd_ha, LmMnDMAERRS(lseq, 0), 0xFF);
+ asd_write_reg_byte(asd_ha, LmMnDMAERRS(lseq, 1), 0xFF);
+
+ /* Clear SG DMA Errors for Mode 0 and 1. */
+ asd_write_reg_byte(asd_ha, LmMnSGDMAERRS(lseq, 0), 0xFF);
+ asd_write_reg_byte(asd_ha, LmMnSGDMAERRS(lseq, 1), 0xFF);
+
+ /* Clear Mode 0 Buffer Parity Error. */
+ asd_write_reg_byte(asd_ha, LmMnBUFSTAT(lseq, 0), LmMnBUFPERR);
+
+ /* Clear Mode 0 Frame Error register. */
+ asd_write_reg_dword(asd_ha, LmMnFRMERR(lseq, 0), 0xFFFFFFFF);
+
+ /* Reset LSEQ external interrupt arbiter. */
+ asd_write_reg_byte(asd_ha, LmARP2INTCTL(lseq), RSTINTCTL);
+
+ /* Set the Phy SAS for the LmSEQ WWN. */
+ sas_addr = asd_ha->phys[lseq].phy_desc->sas_addr;
+ for (i = 0; i < SAS_ADDR_SIZE; i++)
+ asd_write_reg_byte(asd_ha, LmWWN(lseq) + i, sas_addr[i]);
+
+ /* Set the Transmit Size to 1024 bytes, 0 = 256 Dwords. */
+ asd_write_reg_byte(asd_ha, LmMnXMTSIZE(lseq, 1), 0);
+
+ /* Set the Bus Inactivity Time Limit Timer. */
+ asd_write_reg_word(asd_ha, LmBITL_TIMER(lseq), 9);
+
+ /* Enable SATA Port Multiplier. */
+ asd_write_reg_byte(asd_ha, LmMnSATAFS(lseq, 1), 0x80);
+
+ /* Initialize Interrupt Vector[0-10] address in Mode 3.
+ * See the comment on CSEQ_INT_* */
+ asd_write_reg_word(asd_ha, LmM3INTVEC0(lseq), lseq_vecs[0]);
+ asd_write_reg_word(asd_ha, LmM3INTVEC1(lseq), lseq_vecs[1]);
+ asd_write_reg_word(asd_ha, LmM3INTVEC2(lseq), lseq_vecs[2]);
+ asd_write_reg_word(asd_ha, LmM3INTVEC3(lseq), lseq_vecs[3]);
+ asd_write_reg_word(asd_ha, LmM3INTVEC4(lseq), lseq_vecs[4]);
+ asd_write_reg_word(asd_ha, LmM3INTVEC5(lseq), lseq_vecs[5]);
+ asd_write_reg_word(asd_ha, LmM3INTVEC6(lseq), lseq_vecs[6]);
+ asd_write_reg_word(asd_ha, LmM3INTVEC7(lseq), lseq_vecs[7]);
+ asd_write_reg_word(asd_ha, LmM3INTVEC8(lseq), lseq_vecs[8]);
+ asd_write_reg_word(asd_ha, LmM3INTVEC9(lseq), lseq_vecs[9]);
+ asd_write_reg_word(asd_ha, LmM3INTVEC10(lseq), lseq_vecs[10]);
+ /*
+ * Program the Link LED control, applicable only for
+ * Chip Rev. B or later.
+ */
+ asd_write_reg_dword(asd_ha, LmCONTROL(lseq),
+ (LEDTIMER | LEDMODE_TXRX | LEDTIMERS_100ms));
+
+ /* Set the Align Rate for SAS and STP mode. */
+ asd_write_reg_byte(asd_ha, LmM1SASALIGN(lseq), SAS_ALIGN_DEFAULT);
+ asd_write_reg_byte(asd_ha, LmM1STPALIGN(lseq), STP_ALIGN_DEFAULT);
+}
+
+
+/**
+ * asd_post_init_cseq -- clear CSEQ Mode n Int. status and Response mailbox
+ * @asd_ha: pointer to host adapter struct
+ */
+static void asd_post_init_cseq(struct asd_ha_struct *asd_ha)
+{
+ int i;
+
+ for (i = 0; i < 8; i++)
+ asd_write_reg_dword(asd_ha, CMnINT(i), 0xFFFFFFFF);
+ for (i = 0; i < 8; i++)
+ asd_read_reg_dword(asd_ha, CMnRSPMBX(i));
+ /* Reset the external interrupt arbiter. */
+ asd_write_reg_byte(asd_ha, CARP2INTCTL, RSTINTCTL);
+}
+
+/**
+ * asd_init_ddb_0 -- initialize DDB 0
+ * @asd_ha: pointer to host adapter structure
+ *
+ * Initialize DDB site 0 which is used internally by the sequencer.
+ */
+static void asd_init_ddb_0(struct asd_ha_struct *asd_ha)
+{
+ int i;
+
+ /* Zero out the DDB explicitly */
+ for (i = 0; i < sizeof(struct asd_ddb_seq_shared); i+=4)
+ asd_ddbsite_write_dword(asd_ha, 0, i, 0);
+
+ asd_ddbsite_write_word(asd_ha, 0,
+ offsetof(struct asd_ddb_seq_shared, q_free_ddb_head), 0);
+ asd_ddbsite_write_word(asd_ha, 0,
+ offsetof(struct asd_ddb_seq_shared, q_free_ddb_tail),
+ asd_ha->hw_prof.max_ddbs-1);
+ asd_ddbsite_write_word(asd_ha, 0,
+ offsetof(struct asd_ddb_seq_shared, q_free_ddb_cnt), 0);
+ asd_ddbsite_write_word(asd_ha, 0,
+ offsetof(struct asd_ddb_seq_shared, q_used_ddb_head), 0xFFFF);
+ asd_ddbsite_write_word(asd_ha, 0,
+ offsetof(struct asd_ddb_seq_shared, q_used_ddb_tail), 0xFFFF);
+ asd_ddbsite_write_word(asd_ha, 0,
+ offsetof(struct asd_ddb_seq_shared, shared_mem_lock), 0);
+ asd_ddbsite_write_word(asd_ha, 0,
+ offsetof(struct asd_ddb_seq_shared, smp_conn_tag), 0);
+ asd_ddbsite_write_word(asd_ha, 0,
+ offsetof(struct asd_ddb_seq_shared, est_nexus_buf_cnt), 0);
+ asd_ddbsite_write_word(asd_ha, 0,
+ offsetof(struct asd_ddb_seq_shared, est_nexus_buf_thresh),
+ asd_ha->hw_prof.num_phys * 2);
+ asd_ddbsite_write_byte(asd_ha, 0,
+ offsetof(struct asd_ddb_seq_shared, settable_max_contexts),0);
+ asd_ddbsite_write_byte(asd_ha, 0,
+ offsetof(struct asd_ddb_seq_shared, conn_not_active), 0xFF);
+ asd_ddbsite_write_byte(asd_ha, 0,
+ offsetof(struct asd_ddb_seq_shared, phy_is_up), 0x00);
+ /* DDB 0 is reserved */
+ set_bit(0, asd_ha->hw_prof.ddb_bitmap);
+}
+
+static void asd_seq_init_ddb_sites(struct asd_ha_struct *asd_ha)
+{
+ unsigned int i;
+ unsigned int ddb_site;
+
+ for (ddb_site = 0 ; ddb_site < ASD_MAX_DDBS; ddb_site++)
+ for (i = 0; i < sizeof(struct asd_ddb_ssp_smp_target_port); i+= 4)
+ asd_ddbsite_write_dword(asd_ha, ddb_site, i, 0);
+}
+
+/**
+ * asd_seq_setup_seqs -- setup and initialize central and link sequencers
+ * @asd_ha: pointer to host adapter structure
+ */
+static void asd_seq_setup_seqs(struct asd_ha_struct *asd_ha)
+{
+ int lseq;
+ u8 lseq_mask;
+
+ /* Initialize DDB sites */
+ asd_seq_init_ddb_sites(asd_ha);
+
+ /* Initialize SCB sites. Done first to compute some values which
+ * the rest of the init code depends on. */
+ asd_init_scb_sites(asd_ha);
+
+ /* Initialize CSEQ Scratch RAM registers. */
+ asd_init_cseq_scratch(asd_ha);
+
+ /* Initialize LmSEQ Scratch RAM registers. */
+ asd_init_lseq_scratch(asd_ha);
+
+ /* Initialize CSEQ CIO registers. */
+ asd_init_cseq_cio(asd_ha);
+
+ asd_init_ddb_0(asd_ha);
+
+ /* Initialize LmSEQ CIO registers. */
+ lseq_mask = asd_ha->hw_prof.enabled_phys;
+ for_each_sequencer(lseq_mask, lseq_mask, lseq)
+ asd_init_lseq_cio(asd_ha, lseq);
+ asd_post_init_cseq(asd_ha);
+}
+
+
+/**
+ * asd_seq_start_cseq -- start the central sequencer, CSEQ
+ * @asd_ha: pointer to host adapter structure
+ */
+static int asd_seq_start_cseq(struct asd_ha_struct *asd_ha)
+{
+ /* Reset the ARP2 instruction to location zero. */
+ asd_write_reg_word(asd_ha, CPRGMCNT, cseq_idle_loop);
+
+ /* Unpause the CSEQ */
+ return asd_unpause_cseq(asd_ha);
+}
+
+/**
+ * asd_seq_start_lseq -- start a link sequencer
+ * @asd_ha: pointer to host adapter structure
+ * @lseq: the link sequencer of interest
+ */
+static int asd_seq_start_lseq(struct asd_ha_struct *asd_ha, int lseq)
+{
+ /* Reset the ARP2 instruction to location zero. */
+ asd_write_reg_word(asd_ha, LmPRGMCNT(lseq), lseq_idle_loop);
+
+ /* Unpause the LmSEQ */
+ return asd_seq_unpause_lseq(asd_ha, lseq);
+}
+
+int asd_release_firmware(void)
+{
+ release_firmware(sequencer_fw);
+ return 0;
+}
+
+static int asd_request_firmware(struct asd_ha_struct *asd_ha)
+{
+ int err, i;
+ struct sequencer_file_header header;
+ const struct sequencer_file_header *hdr_ptr;
+ u32 csum = 0;
+ u16 *ptr_cseq_vecs, *ptr_lseq_vecs;
+
+ if (sequencer_fw)
+ /* already loaded */
+ return 0;
+
+ err = reject_firmware(&sequencer_fw,
+ SAS_RAZOR_SEQUENCER_FW_FILE,
+ &asd_ha->pcidev->dev);
+ if (err)
+ return err;
+
+ hdr_ptr = (const struct sequencer_file_header *)sequencer_fw->data;
+
+ header.csum = le32_to_cpu(hdr_ptr->csum);
+ header.major = le32_to_cpu(hdr_ptr->major);
+ header.minor = le32_to_cpu(hdr_ptr->minor);
+ header.cseq_table_offset = le32_to_cpu(hdr_ptr->cseq_table_offset);
+ header.cseq_table_size = le32_to_cpu(hdr_ptr->cseq_table_size);
+ header.lseq_table_offset = le32_to_cpu(hdr_ptr->lseq_table_offset);
+ header.lseq_table_size = le32_to_cpu(hdr_ptr->lseq_table_size);
+ header.cseq_code_offset = le32_to_cpu(hdr_ptr->cseq_code_offset);
+ header.cseq_code_size = le32_to_cpu(hdr_ptr->cseq_code_size);
+ header.lseq_code_offset = le32_to_cpu(hdr_ptr->lseq_code_offset);
+ header.lseq_code_size = le32_to_cpu(hdr_ptr->lseq_code_size);
+ header.mode2_task = le16_to_cpu(hdr_ptr->mode2_task);
+ header.cseq_idle_loop = le16_to_cpu(hdr_ptr->cseq_idle_loop);
+ header.lseq_idle_loop = le16_to_cpu(hdr_ptr->lseq_idle_loop);
+
+ for (i = sizeof(header.csum); i < sequencer_fw->size; i++)
+ csum += sequencer_fw->data[i];
+
+ if (csum != header.csum) {
+ asd_printk("Firmware file checksum mismatch\n");
+ return -EINVAL;
+ }
+
+ if (header.cseq_table_size != CSEQ_NUM_VECS ||
+ header.lseq_table_size != LSEQ_NUM_VECS) {
+ asd_printk("Firmware file table size mismatch\n");
+ return -EINVAL;
+ }
+
+ asd_printk("Found sequencer Firmware version %d.%d (%s)\n",
+ header.major, header.minor, hdr_ptr->version);
+
+ if (header.major != SAS_RAZOR_SEQUENCER_FW_MAJOR) {
+ asd_printk("Firmware Major Version Mismatch;"
+ "driver requires version %d.X",
+ SAS_RAZOR_SEQUENCER_FW_MAJOR);
+ return -EINVAL;
+ }
+
+ ptr_cseq_vecs = (u16 *)&sequencer_fw->data[header.cseq_table_offset];
+ ptr_lseq_vecs = (u16 *)&sequencer_fw->data[header.lseq_table_offset];
+ mode2_task = header.mode2_task;
+ cseq_idle_loop = header.cseq_idle_loop;
+ lseq_idle_loop = header.lseq_idle_loop;
+
+ for (i = 0; i < CSEQ_NUM_VECS; i++)
+ cseq_vecs[i] = le16_to_cpu(ptr_cseq_vecs[i]);
+
+ for (i = 0; i < LSEQ_NUM_VECS; i++)
+ lseq_vecs[i] = le16_to_cpu(ptr_lseq_vecs[i]);
+
+ cseq_code = &sequencer_fw->data[header.cseq_code_offset];
+ cseq_code_size = header.cseq_code_size;
+ lseq_code = &sequencer_fw->data[header.lseq_code_offset];
+ lseq_code_size = header.lseq_code_size;
+
+ return 0;
+}
+
+int asd_init_seqs(struct asd_ha_struct *asd_ha)
+{
+ int err;
+
+ err = asd_request_firmware(asd_ha);
+
+ if (err) {
+ asd_printk("Failed to load sequencer firmware file %s, error %d\n",
+ SAS_RAZOR_SEQUENCER_FW_FILE, err);
+ return err;
+ }
+
+ err = asd_seq_download_seqs(asd_ha);
+ if (err) {
+ asd_printk("couldn't download sequencers for %s\n",
+ pci_name(asd_ha->pcidev));
+ return err;
+ }
+
+ asd_seq_setup_seqs(asd_ha);
+
+ return 0;
+}
+
+int asd_start_seqs(struct asd_ha_struct *asd_ha)
+{
+ int err;
+ u8 lseq_mask;
+ int lseq;
+
+ err = asd_seq_start_cseq(asd_ha);
+ if (err) {
+ asd_printk("couldn't start CSEQ for %s\n",
+ pci_name(asd_ha->pcidev));
+ return err;
+ }
+
+ lseq_mask = asd_ha->hw_prof.enabled_phys;
+ for_each_sequencer(lseq_mask, lseq_mask, lseq) {
+ err = asd_seq_start_lseq(asd_ha, lseq);
+ if (err) {
+ asd_printk("coudln't start LSEQ %d for %s\n", lseq,
+ pci_name(asd_ha->pcidev));
+ return err;
+ }
+ }
+
+ return 0;
+}
+
+/**
+ * asd_update_port_links -- update port_map_by_links and phy_is_up
+ * @sas_phy: pointer to the phy which has been added to a port
+ *
+ * 1) When a link reset has completed and we got BYTES DMAED with a
+ * valid frame we call this function for that phy, to indicate that
+ * the phy is up, i.e. we update the phy_is_up in DDB 0. The
+ * sequencer checks phy_is_up when pending SCBs are to be sent, and
+ * when an open address frame has been received.
+ *
+ * 2) When we know of ports, we call this function to update the map
+ * of phys participaing in that port, i.e. we update the
+ * port_map_by_links in DDB 0. When a HARD_RESET primitive has been
+ * received, the sequencer disables all phys in that port.
+ * port_map_by_links is also used as the conn_mask byte in the
+ * initiator/target port DDB.
+ */
+void asd_update_port_links(struct asd_ha_struct *asd_ha, struct asd_phy *phy)
+{
+ const u8 phy_mask = (u8) phy->asd_port->phy_mask;
+ u8 phy_is_up;
+ u8 mask;
+ int i, err;
+ unsigned long flags;
+
+ spin_lock_irqsave(&asd_ha->hw_prof.ddb_lock, flags);
+ for_each_phy(phy_mask, mask, i)
+ asd_ddbsite_write_byte(asd_ha, 0,
+ offsetof(struct asd_ddb_seq_shared,
+ port_map_by_links)+i,phy_mask);
+
+ for (i = 0; i < 12; i++) {
+ phy_is_up = asd_ddbsite_read_byte(asd_ha, 0,
+ offsetof(struct asd_ddb_seq_shared, phy_is_up));
+ err = asd_ddbsite_update_byte(asd_ha, 0,
+ offsetof(struct asd_ddb_seq_shared, phy_is_up),
+ phy_is_up,
+ phy_is_up | phy_mask);
+ if (!err)
+ break;
+ else if (err == -EFAULT) {
+ asd_printk("phy_is_up: parity error in DDB 0\n");
+ break;
+ }
+ }
+ spin_unlock_irqrestore(&asd_ha->hw_prof.ddb_lock, flags);
+
+ if (err)
+ asd_printk("couldn't update DDB 0:error:%d\n", err);
+}
+
+/*(DEBLOBBED)*/
diff --git a/drivers/scsi/aic94xx/aic94xx_seq.h b/drivers/scsi/aic94xx/aic94xx_seq.h
new file mode 100644
index 000000000..e3413f302
--- /dev/null
+++ b/drivers/scsi/aic94xx/aic94xx_seq.h
@@ -0,0 +1,68 @@
+/*
+ * Aic94xx SAS/SATA driver sequencer interface header file.
+ *
+ * Copyright (C) 2005 Adaptec, Inc. All rights reserved.
+ * Copyright (C) 2005 Luben Tuikov <luben_tuikov@adaptec.com>
+ *
+ * This file is licensed under GPLv2.
+ *
+ * This file is part of the aic94xx driver.
+ *
+ * The aic94xx driver is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; version 2 of the
+ * License.
+ *
+ * The aic94xx driver is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with the aic94xx driver; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
+ *
+ */
+
+#ifndef _AIC94XX_SEQ_H_
+#define _AIC94XX_SEQ_H_
+
+#define CSEQ_NUM_VECS 3
+#define LSEQ_NUM_VECS 11
+
+#define SAS_RAZOR_SEQUENCER_FW_FILE "/*(DEBLOBBED)*/"
+#define SAS_RAZOR_SEQUENCER_FW_MAJOR 1
+
+/* Note: All quantites in the sequencer file are little endian */
+struct sequencer_file_header {
+ /* Checksum of the entire contents of the sequencer excluding
+ * these four bytes */
+ u32 csum;
+ /* numeric major version */
+ u32 major;
+ /* numeric minor version */
+ u32 minor;
+ /* version string printed by driver */
+ char version[16];
+ u32 cseq_table_offset;
+ u32 cseq_table_size;
+ u32 lseq_table_offset;
+ u32 lseq_table_size;
+ u32 cseq_code_offset;
+ u32 cseq_code_size;
+ u32 lseq_code_offset;
+ u32 lseq_code_size;
+ u16 mode2_task;
+ u16 cseq_idle_loop;
+ u16 lseq_idle_loop;
+} __attribute__((packed));
+
+#ifdef __KERNEL__
+int asd_init_seqs(struct asd_ha_struct *asd_ha);
+int asd_start_seqs(struct asd_ha_struct *asd_ha);
+int asd_release_firmware(void);
+
+void asd_update_port_links(struct asd_ha_struct *asd_ha, struct asd_phy *phy);
+#endif
+
+#endif
diff --git a/drivers/scsi/aic94xx/aic94xx_task.c b/drivers/scsi/aic94xx/aic94xx_task.c
new file mode 100644
index 000000000..cdd4ab683
--- /dev/null
+++ b/drivers/scsi/aic94xx/aic94xx_task.c
@@ -0,0 +1,643 @@
+/*
+ * Aic94xx SAS/SATA Tasks
+ *
+ * Copyright (C) 2005 Adaptec, Inc. All rights reserved.
+ * Copyright (C) 2005 Luben Tuikov <luben_tuikov@adaptec.com>
+ *
+ * This file is licensed under GPLv2.
+ *
+ * This file is part of the aic94xx driver.
+ *
+ * The aic94xx driver is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; version 2 of the
+ * License.
+ *
+ * The aic94xx driver is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with the aic94xx driver; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
+ *
+ */
+
+#include <linux/spinlock.h>
+#include "aic94xx.h"
+#include "aic94xx_sas.h"
+#include "aic94xx_hwi.h"
+
+static void asd_unbuild_ata_ascb(struct asd_ascb *a);
+static void asd_unbuild_smp_ascb(struct asd_ascb *a);
+static void asd_unbuild_ssp_ascb(struct asd_ascb *a);
+
+static void asd_can_dequeue(struct asd_ha_struct *asd_ha, int num)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&asd_ha->seq.pend_q_lock, flags);
+ asd_ha->seq.can_queue += num;
+ spin_unlock_irqrestore(&asd_ha->seq.pend_q_lock, flags);
+}
+
+/* PCI_DMA_... to our direction translation.
+ */
+static const u8 data_dir_flags[] = {
+ [PCI_DMA_BIDIRECTIONAL] = DATA_DIR_BYRECIPIENT, /* UNSPECIFIED */
+ [PCI_DMA_TODEVICE] = DATA_DIR_OUT, /* OUTBOUND */
+ [PCI_DMA_FROMDEVICE] = DATA_DIR_IN, /* INBOUND */
+ [PCI_DMA_NONE] = DATA_DIR_NONE, /* NO TRANSFER */
+};
+
+static int asd_map_scatterlist(struct sas_task *task,
+ struct sg_el *sg_arr,
+ gfp_t gfp_flags)
+{
+ struct asd_ascb *ascb = task->lldd_task;
+ struct asd_ha_struct *asd_ha = ascb->ha;
+ struct scatterlist *sc;
+ int num_sg, res;
+
+ if (task->data_dir == PCI_DMA_NONE)
+ return 0;
+
+ if (task->num_scatter == 0) {
+ void *p = task->scatter;
+ dma_addr_t dma = pci_map_single(asd_ha->pcidev, p,
+ task->total_xfer_len,
+ task->data_dir);
+ sg_arr[0].bus_addr = cpu_to_le64((u64)dma);
+ sg_arr[0].size = cpu_to_le32(task->total_xfer_len);
+ sg_arr[0].flags |= ASD_SG_EL_LIST_EOL;
+ return 0;
+ }
+
+ /* STP tasks come from libata which has already mapped
+ * the SG list */
+ if (sas_protocol_ata(task->task_proto))
+ num_sg = task->num_scatter;
+ else
+ num_sg = pci_map_sg(asd_ha->pcidev, task->scatter,
+ task->num_scatter, task->data_dir);
+ if (num_sg == 0)
+ return -ENOMEM;
+
+ if (num_sg > 3) {
+ int i;
+
+ ascb->sg_arr = asd_alloc_coherent(asd_ha,
+ num_sg*sizeof(struct sg_el),
+ gfp_flags);
+ if (!ascb->sg_arr) {
+ res = -ENOMEM;
+ goto err_unmap;
+ }
+ for_each_sg(task->scatter, sc, num_sg, i) {
+ struct sg_el *sg =
+ &((struct sg_el *)ascb->sg_arr->vaddr)[i];
+ sg->bus_addr = cpu_to_le64((u64)sg_dma_address(sc));
+ sg->size = cpu_to_le32((u32)sg_dma_len(sc));
+ if (i == num_sg-1)
+ sg->flags |= ASD_SG_EL_LIST_EOL;
+ }
+
+ for_each_sg(task->scatter, sc, 2, i) {
+ sg_arr[i].bus_addr =
+ cpu_to_le64((u64)sg_dma_address(sc));
+ sg_arr[i].size = cpu_to_le32((u32)sg_dma_len(sc));
+ }
+ sg_arr[1].next_sg_offs = 2 * sizeof(*sg_arr);
+ sg_arr[1].flags |= ASD_SG_EL_LIST_EOS;
+
+ memset(&sg_arr[2], 0, sizeof(*sg_arr));
+ sg_arr[2].bus_addr=cpu_to_le64((u64)ascb->sg_arr->dma_handle);
+ } else {
+ int i;
+ for_each_sg(task->scatter, sc, num_sg, i) {
+ sg_arr[i].bus_addr =
+ cpu_to_le64((u64)sg_dma_address(sc));
+ sg_arr[i].size = cpu_to_le32((u32)sg_dma_len(sc));
+ }
+ sg_arr[i-1].flags |= ASD_SG_EL_LIST_EOL;
+ }
+
+ return 0;
+err_unmap:
+ if (sas_protocol_ata(task->task_proto))
+ pci_unmap_sg(asd_ha->pcidev, task->scatter, task->num_scatter,
+ task->data_dir);
+ return res;
+}
+
+static void asd_unmap_scatterlist(struct asd_ascb *ascb)
+{
+ struct asd_ha_struct *asd_ha = ascb->ha;
+ struct sas_task *task = ascb->uldd_task;
+
+ if (task->data_dir == PCI_DMA_NONE)
+ return;
+
+ if (task->num_scatter == 0) {
+ dma_addr_t dma = (dma_addr_t)
+ le64_to_cpu(ascb->scb->ssp_task.sg_element[0].bus_addr);
+ pci_unmap_single(ascb->ha->pcidev, dma, task->total_xfer_len,
+ task->data_dir);
+ return;
+ }
+
+ asd_free_coherent(asd_ha, ascb->sg_arr);
+ if (task->task_proto != SAS_PROTOCOL_STP)
+ pci_unmap_sg(asd_ha->pcidev, task->scatter, task->num_scatter,
+ task->data_dir);
+}
+
+/* ---------- Task complete tasklet ---------- */
+
+static void asd_get_response_tasklet(struct asd_ascb *ascb,
+ struct done_list_struct *dl)
+{
+ struct asd_ha_struct *asd_ha = ascb->ha;
+ struct sas_task *task = ascb->uldd_task;
+ struct task_status_struct *ts = &task->task_status;
+ unsigned long flags;
+ struct tc_resp_sb_struct {
+ __le16 index_escb;
+ u8 len_lsb;
+ u8 flags;
+ } __attribute__ ((packed)) *resp_sb = (void *) dl->status_block;
+
+/* int size = ((resp_sb->flags & 7) << 8) | resp_sb->len_lsb; */
+ int edb_id = ((resp_sb->flags & 0x70) >> 4)-1;
+ struct asd_ascb *escb;
+ struct asd_dma_tok *edb;
+ void *r;
+
+ spin_lock_irqsave(&asd_ha->seq.tc_index_lock, flags);
+ escb = asd_tc_index_find(&asd_ha->seq,
+ (int)le16_to_cpu(resp_sb->index_escb));
+ spin_unlock_irqrestore(&asd_ha->seq.tc_index_lock, flags);
+
+ if (!escb) {
+ ASD_DPRINTK("Uh-oh! No escb for this dl?!\n");
+ return;
+ }
+
+ ts->buf_valid_size = 0;
+ edb = asd_ha->seq.edb_arr[edb_id + escb->edb_index];
+ r = edb->vaddr;
+ if (task->task_proto == SAS_PROTOCOL_SSP) {
+ struct ssp_response_iu *iu =
+ r + 16 + sizeof(struct ssp_frame_hdr);
+
+ ts->residual = le32_to_cpu(*(__le32 *)r);
+
+ sas_ssp_task_response(&asd_ha->pcidev->dev, task, iu);
+ } else {
+ struct ata_task_resp *resp = (void *) &ts->buf[0];
+
+ ts->residual = le32_to_cpu(*(__le32 *)r);
+
+ if (SAS_STATUS_BUF_SIZE >= sizeof(*resp)) {
+ resp->frame_len = le16_to_cpu(*(__le16 *)(r+6));
+ memcpy(&resp->ending_fis[0], r+16, ATA_RESP_FIS_SIZE);
+ ts->buf_valid_size = sizeof(*resp);
+ }
+ }
+
+ asd_invalidate_edb(escb, edb_id);
+}
+
+static void asd_task_tasklet_complete(struct asd_ascb *ascb,
+ struct done_list_struct *dl)
+{
+ struct sas_task *task = ascb->uldd_task;
+ struct task_status_struct *ts = &task->task_status;
+ unsigned long flags;
+ u8 opcode = dl->opcode;
+
+ asd_can_dequeue(ascb->ha, 1);
+
+Again:
+ switch (opcode) {
+ case TC_NO_ERROR:
+ ts->resp = SAS_TASK_COMPLETE;
+ ts->stat = SAM_STAT_GOOD;
+ break;
+ case TC_UNDERRUN:
+ ts->resp = SAS_TASK_COMPLETE;
+ ts->stat = SAS_DATA_UNDERRUN;
+ ts->residual = le32_to_cpu(*(__le32 *)dl->status_block);
+ break;
+ case TC_OVERRUN:
+ ts->resp = SAS_TASK_COMPLETE;
+ ts->stat = SAS_DATA_OVERRUN;
+ ts->residual = 0;
+ break;
+ case TC_SSP_RESP:
+ case TC_ATA_RESP:
+ ts->resp = SAS_TASK_COMPLETE;
+ ts->stat = SAS_PROTO_RESPONSE;
+ asd_get_response_tasklet(ascb, dl);
+ break;
+ case TF_OPEN_REJECT:
+ ts->resp = SAS_TASK_UNDELIVERED;
+ ts->stat = SAS_OPEN_REJECT;
+ if (dl->status_block[1] & 2)
+ ts->open_rej_reason = 1 + dl->status_block[2];
+ else if (dl->status_block[1] & 1)
+ ts->open_rej_reason = (dl->status_block[2] >> 4)+10;
+ else
+ ts->open_rej_reason = SAS_OREJ_UNKNOWN;
+ break;
+ case TF_OPEN_TO:
+ ts->resp = SAS_TASK_UNDELIVERED;
+ ts->stat = SAS_OPEN_TO;
+ break;
+ case TF_PHY_DOWN:
+ case TU_PHY_DOWN:
+ ts->resp = SAS_TASK_UNDELIVERED;
+ ts->stat = SAS_PHY_DOWN;
+ break;
+ case TI_PHY_DOWN:
+ ts->resp = SAS_TASK_COMPLETE;
+ ts->stat = SAS_PHY_DOWN;
+ break;
+ case TI_BREAK:
+ case TI_PROTO_ERR:
+ case TI_NAK:
+ case TI_ACK_NAK_TO:
+ case TF_SMP_XMIT_RCV_ERR:
+ case TC_ATA_R_ERR_RECV:
+ ts->resp = SAS_TASK_COMPLETE;
+ ts->stat = SAS_INTERRUPTED;
+ break;
+ case TF_BREAK:
+ case TU_BREAK:
+ case TU_ACK_NAK_TO:
+ case TF_SMPRSP_TO:
+ ts->resp = SAS_TASK_UNDELIVERED;
+ ts->stat = SAS_DEV_NO_RESPONSE;
+ break;
+ case TF_NAK_RECV:
+ ts->resp = SAS_TASK_COMPLETE;
+ ts->stat = SAS_NAK_R_ERR;
+ break;
+ case TA_I_T_NEXUS_LOSS:
+ opcode = dl->status_block[0];
+ goto Again;
+ break;
+ case TF_INV_CONN_HANDLE:
+ ts->resp = SAS_TASK_UNDELIVERED;
+ ts->stat = SAS_DEVICE_UNKNOWN;
+ break;
+ case TF_REQUESTED_N_PENDING:
+ ts->resp = SAS_TASK_UNDELIVERED;
+ ts->stat = SAS_PENDING;
+ break;
+ case TC_TASK_CLEARED:
+ case TA_ON_REQ:
+ ts->resp = SAS_TASK_COMPLETE;
+ ts->stat = SAS_ABORTED_TASK;
+ break;
+
+ case TF_NO_SMP_CONN:
+ case TF_TMF_NO_CTX:
+ case TF_TMF_NO_TAG:
+ case TF_TMF_TAG_FREE:
+ case TF_TMF_TASK_DONE:
+ case TF_TMF_NO_CONN_HANDLE:
+ case TF_IRTT_TO:
+ case TF_IU_SHORT:
+ case TF_DATA_OFFS_ERR:
+ ts->resp = SAS_TASK_UNDELIVERED;
+ ts->stat = SAS_DEV_NO_RESPONSE;
+ break;
+
+ case TC_LINK_ADM_RESP:
+ case TC_CONTROL_PHY:
+ case TC_RESUME:
+ case TC_PARTIAL_SG_LIST:
+ default:
+ ASD_DPRINTK("%s: dl opcode: 0x%x?\n", __func__, opcode);
+ break;
+ }
+
+ switch (task->task_proto) {
+ case SAS_PROTOCOL_SATA:
+ case SAS_PROTOCOL_STP:
+ asd_unbuild_ata_ascb(ascb);
+ break;
+ case SAS_PROTOCOL_SMP:
+ asd_unbuild_smp_ascb(ascb);
+ break;
+ case SAS_PROTOCOL_SSP:
+ asd_unbuild_ssp_ascb(ascb);
+ default:
+ break;
+ }
+
+ spin_lock_irqsave(&task->task_state_lock, flags);
+ task->task_state_flags &= ~SAS_TASK_STATE_PENDING;
+ task->task_state_flags &= ~SAS_TASK_AT_INITIATOR;
+ task->task_state_flags |= SAS_TASK_STATE_DONE;
+ if (unlikely((task->task_state_flags & SAS_TASK_STATE_ABORTED))) {
+ struct completion *completion = ascb->completion;
+ spin_unlock_irqrestore(&task->task_state_lock, flags);
+ ASD_DPRINTK("task 0x%p done with opcode 0x%x resp 0x%x "
+ "stat 0x%x but aborted by upper layer!\n",
+ task, opcode, ts->resp, ts->stat);
+ if (completion)
+ complete(completion);
+ } else {
+ spin_unlock_irqrestore(&task->task_state_lock, flags);
+ task->lldd_task = NULL;
+ asd_ascb_free(ascb);
+ mb();
+ task->task_done(task);
+ }
+}
+
+/* ---------- ATA ---------- */
+
+static int asd_build_ata_ascb(struct asd_ascb *ascb, struct sas_task *task,
+ gfp_t gfp_flags)
+{
+ struct domain_device *dev = task->dev;
+ struct scb *scb;
+ u8 flags;
+ int res = 0;
+
+ scb = ascb->scb;
+
+ if (unlikely(task->ata_task.device_control_reg_update))
+ scb->header.opcode = CONTROL_ATA_DEV;
+ else if (dev->sata_dev.class == ATA_DEV_ATAPI)
+ scb->header.opcode = INITIATE_ATAPI_TASK;
+ else
+ scb->header.opcode = INITIATE_ATA_TASK;
+
+ scb->ata_task.proto_conn_rate = (1 << 5); /* STP */
+ if (dev->port->oob_mode == SAS_OOB_MODE)
+ scb->ata_task.proto_conn_rate |= dev->linkrate;
+
+ scb->ata_task.total_xfer_len = cpu_to_le32(task->total_xfer_len);
+ scb->ata_task.fis = task->ata_task.fis;
+ if (likely(!task->ata_task.device_control_reg_update))
+ scb->ata_task.fis.flags |= 0x80; /* C=1: update ATA cmd reg */
+ scb->ata_task.fis.flags &= 0xF0; /* PM_PORT field shall be 0 */
+ if (dev->sata_dev.class == ATA_DEV_ATAPI)
+ memcpy(scb->ata_task.atapi_packet, task->ata_task.atapi_packet,
+ 16);
+ scb->ata_task.sister_scb = cpu_to_le16(0xFFFF);
+ scb->ata_task.conn_handle = cpu_to_le16(
+ (u16)(unsigned long)dev->lldd_dev);
+
+ if (likely(!task->ata_task.device_control_reg_update)) {
+ flags = 0;
+ if (task->ata_task.dma_xfer)
+ flags |= DATA_XFER_MODE_DMA;
+ if (task->ata_task.use_ncq &&
+ dev->sata_dev.class != ATA_DEV_ATAPI)
+ flags |= ATA_Q_TYPE_NCQ;
+ flags |= data_dir_flags[task->data_dir];
+ scb->ata_task.ata_flags = flags;
+
+ scb->ata_task.retry_count = task->ata_task.retry_count;
+
+ flags = 0;
+ if (task->ata_task.set_affil_pol)
+ flags |= SET_AFFIL_POLICY;
+ if (task->ata_task.stp_affil_pol)
+ flags |= STP_AFFIL_POLICY;
+ scb->ata_task.flags = flags;
+ }
+ ascb->tasklet_complete = asd_task_tasklet_complete;
+
+ if (likely(!task->ata_task.device_control_reg_update))
+ res = asd_map_scatterlist(task, scb->ata_task.sg_element,
+ gfp_flags);
+
+ return res;
+}
+
+static void asd_unbuild_ata_ascb(struct asd_ascb *a)
+{
+ asd_unmap_scatterlist(a);
+}
+
+/* ---------- SMP ---------- */
+
+static int asd_build_smp_ascb(struct asd_ascb *ascb, struct sas_task *task,
+ gfp_t gfp_flags)
+{
+ struct asd_ha_struct *asd_ha = ascb->ha;
+ struct domain_device *dev = task->dev;
+ struct scb *scb;
+
+ pci_map_sg(asd_ha->pcidev, &task->smp_task.smp_req, 1,
+ PCI_DMA_TODEVICE);
+ pci_map_sg(asd_ha->pcidev, &task->smp_task.smp_resp, 1,
+ PCI_DMA_FROMDEVICE);
+
+ scb = ascb->scb;
+
+ scb->header.opcode = INITIATE_SMP_TASK;
+
+ scb->smp_task.proto_conn_rate = dev->linkrate;
+
+ scb->smp_task.smp_req.bus_addr =
+ cpu_to_le64((u64)sg_dma_address(&task->smp_task.smp_req));
+ scb->smp_task.smp_req.size =
+ cpu_to_le32((u32)sg_dma_len(&task->smp_task.smp_req)-4);
+
+ scb->smp_task.smp_resp.bus_addr =
+ cpu_to_le64((u64)sg_dma_address(&task->smp_task.smp_resp));
+ scb->smp_task.smp_resp.size =
+ cpu_to_le32((u32)sg_dma_len(&task->smp_task.smp_resp)-4);
+
+ scb->smp_task.sister_scb = cpu_to_le16(0xFFFF);
+ scb->smp_task.conn_handle = cpu_to_le16((u16)
+ (unsigned long)dev->lldd_dev);
+
+ ascb->tasklet_complete = asd_task_tasklet_complete;
+
+ return 0;
+}
+
+static void asd_unbuild_smp_ascb(struct asd_ascb *a)
+{
+ struct sas_task *task = a->uldd_task;
+
+ BUG_ON(!task);
+ pci_unmap_sg(a->ha->pcidev, &task->smp_task.smp_req, 1,
+ PCI_DMA_TODEVICE);
+ pci_unmap_sg(a->ha->pcidev, &task->smp_task.smp_resp, 1,
+ PCI_DMA_FROMDEVICE);
+}
+
+/* ---------- SSP ---------- */
+
+static int asd_build_ssp_ascb(struct asd_ascb *ascb, struct sas_task *task,
+ gfp_t gfp_flags)
+{
+ struct domain_device *dev = task->dev;
+ struct scb *scb;
+ int res = 0;
+
+ scb = ascb->scb;
+
+ scb->header.opcode = INITIATE_SSP_TASK;
+
+ scb->ssp_task.proto_conn_rate = (1 << 4); /* SSP */
+ scb->ssp_task.proto_conn_rate |= dev->linkrate;
+ scb->ssp_task.total_xfer_len = cpu_to_le32(task->total_xfer_len);
+ scb->ssp_task.ssp_frame.frame_type = SSP_DATA;
+ memcpy(scb->ssp_task.ssp_frame.hashed_dest_addr, dev->hashed_sas_addr,
+ HASHED_SAS_ADDR_SIZE);
+ memcpy(scb->ssp_task.ssp_frame.hashed_src_addr,
+ dev->port->ha->hashed_sas_addr, HASHED_SAS_ADDR_SIZE);
+ scb->ssp_task.ssp_frame.tptt = cpu_to_be16(0xFFFF);
+
+ memcpy(scb->ssp_task.ssp_cmd.lun, task->ssp_task.LUN, 8);
+ if (task->ssp_task.enable_first_burst)
+ scb->ssp_task.ssp_cmd.efb_prio_attr |= EFB_MASK;
+ scb->ssp_task.ssp_cmd.efb_prio_attr |= (task->ssp_task.task_prio << 3);
+ scb->ssp_task.ssp_cmd.efb_prio_attr |= (task->ssp_task.task_attr & 7);
+ memcpy(scb->ssp_task.ssp_cmd.cdb, task->ssp_task.cmd->cmnd,
+ task->ssp_task.cmd->cmd_len);
+
+ scb->ssp_task.sister_scb = cpu_to_le16(0xFFFF);
+ scb->ssp_task.conn_handle = cpu_to_le16(
+ (u16)(unsigned long)dev->lldd_dev);
+ scb->ssp_task.data_dir = data_dir_flags[task->data_dir];
+ scb->ssp_task.retry_count = scb->ssp_task.retry_count;
+
+ ascb->tasklet_complete = asd_task_tasklet_complete;
+
+ res = asd_map_scatterlist(task, scb->ssp_task.sg_element, gfp_flags);
+
+ return res;
+}
+
+static void asd_unbuild_ssp_ascb(struct asd_ascb *a)
+{
+ asd_unmap_scatterlist(a);
+}
+
+/* ---------- Execute Task ---------- */
+
+static int asd_can_queue(struct asd_ha_struct *asd_ha, int num)
+{
+ int res = 0;
+ unsigned long flags;
+
+ spin_lock_irqsave(&asd_ha->seq.pend_q_lock, flags);
+ if ((asd_ha->seq.can_queue - num) < 0)
+ res = -SAS_QUEUE_FULL;
+ else
+ asd_ha->seq.can_queue -= num;
+ spin_unlock_irqrestore(&asd_ha->seq.pend_q_lock, flags);
+
+ return res;
+}
+
+int asd_execute_task(struct sas_task *task, gfp_t gfp_flags)
+{
+ int res = 0;
+ LIST_HEAD(alist);
+ struct sas_task *t = task;
+ struct asd_ascb *ascb = NULL, *a;
+ struct asd_ha_struct *asd_ha = task->dev->port->ha->lldd_ha;
+ unsigned long flags;
+
+ res = asd_can_queue(asd_ha, 1);
+ if (res)
+ return res;
+
+ res = 1;
+ ascb = asd_ascb_alloc_list(asd_ha, &res, gfp_flags);
+ if (res) {
+ res = -ENOMEM;
+ goto out_err;
+ }
+
+ __list_add(&alist, ascb->list.prev, &ascb->list);
+ list_for_each_entry(a, &alist, list) {
+ a->uldd_task = t;
+ t->lldd_task = a;
+ break;
+ }
+ list_for_each_entry(a, &alist, list) {
+ t = a->uldd_task;
+ a->uldd_timer = 1;
+ if (t->task_proto & SAS_PROTOCOL_STP)
+ t->task_proto = SAS_PROTOCOL_STP;
+ switch (t->task_proto) {
+ case SAS_PROTOCOL_SATA:
+ case SAS_PROTOCOL_STP:
+ res = asd_build_ata_ascb(a, t, gfp_flags);
+ break;
+ case SAS_PROTOCOL_SMP:
+ res = asd_build_smp_ascb(a, t, gfp_flags);
+ break;
+ case SAS_PROTOCOL_SSP:
+ res = asd_build_ssp_ascb(a, t, gfp_flags);
+ break;
+ default:
+ asd_printk("unknown sas_task proto: 0x%x\n",
+ t->task_proto);
+ res = -ENOMEM;
+ break;
+ }
+ if (res)
+ goto out_err_unmap;
+
+ spin_lock_irqsave(&t->task_state_lock, flags);
+ t->task_state_flags |= SAS_TASK_AT_INITIATOR;
+ spin_unlock_irqrestore(&t->task_state_lock, flags);
+ }
+ list_del_init(&alist);
+
+ res = asd_post_ascb_list(asd_ha, ascb, 1);
+ if (unlikely(res)) {
+ a = NULL;
+ __list_add(&alist, ascb->list.prev, &ascb->list);
+ goto out_err_unmap;
+ }
+
+ return 0;
+out_err_unmap:
+ {
+ struct asd_ascb *b = a;
+ list_for_each_entry(a, &alist, list) {
+ if (a == b)
+ break;
+ t = a->uldd_task;
+ spin_lock_irqsave(&t->task_state_lock, flags);
+ t->task_state_flags &= ~SAS_TASK_AT_INITIATOR;
+ spin_unlock_irqrestore(&t->task_state_lock, flags);
+ switch (t->task_proto) {
+ case SAS_PROTOCOL_SATA:
+ case SAS_PROTOCOL_STP:
+ asd_unbuild_ata_ascb(a);
+ break;
+ case SAS_PROTOCOL_SMP:
+ asd_unbuild_smp_ascb(a);
+ break;
+ case SAS_PROTOCOL_SSP:
+ asd_unbuild_ssp_ascb(a);
+ default:
+ break;
+ }
+ t->lldd_task = NULL;
+ }
+ }
+ list_del_init(&alist);
+out_err:
+ if (ascb)
+ asd_ascb_free_list(ascb);
+ asd_can_dequeue(asd_ha, 1);
+ return res;
+}
diff --git a/drivers/scsi/aic94xx/aic94xx_tmf.c b/drivers/scsi/aic94xx/aic94xx_tmf.c
new file mode 100644
index 000000000..d4c35df3d
--- /dev/null
+++ b/drivers/scsi/aic94xx/aic94xx_tmf.c
@@ -0,0 +1,714 @@
+/*
+ * Aic94xx Task Management Functions
+ *
+ * Copyright (C) 2005 Adaptec, Inc. All rights reserved.
+ * Copyright (C) 2005 Luben Tuikov <luben_tuikov@adaptec.com>
+ *
+ * This file is licensed under GPLv2.
+ *
+ * This file is part of the aic94xx driver.
+ *
+ * The aic94xx driver is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; version 2 of the
+ * License.
+ *
+ * The aic94xx driver is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with the aic94xx driver; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
+ *
+ */
+
+#include <linux/spinlock.h>
+#include <linux/gfp.h>
+#include "aic94xx.h"
+#include "aic94xx_sas.h"
+#include "aic94xx_hwi.h"
+
+/* ---------- Internal enqueue ---------- */
+
+static int asd_enqueue_internal(struct asd_ascb *ascb,
+ void (*tasklet_complete)(struct asd_ascb *,
+ struct done_list_struct *),
+ void (*timed_out)(unsigned long))
+{
+ int res;
+
+ ascb->tasklet_complete = tasklet_complete;
+ ascb->uldd_timer = 1;
+
+ ascb->timer.data = (unsigned long) ascb;
+ ascb->timer.function = timed_out;
+ ascb->timer.expires = jiffies + AIC94XX_SCB_TIMEOUT;
+
+ add_timer(&ascb->timer);
+
+ res = asd_post_ascb_list(ascb->ha, ascb, 1);
+ if (unlikely(res))
+ del_timer(&ascb->timer);
+ return res;
+}
+
+/* ---------- CLEAR NEXUS ---------- */
+
+struct tasklet_completion_status {
+ int dl_opcode;
+ int tmf_state;
+ u8 tag_valid:1;
+ __be16 tag;
+};
+
+#define DECLARE_TCS(tcs) \
+ struct tasklet_completion_status tcs = { \
+ .dl_opcode = 0, \
+ .tmf_state = 0, \
+ .tag_valid = 0, \
+ .tag = 0, \
+ }
+
+
+static void asd_clear_nexus_tasklet_complete(struct asd_ascb *ascb,
+ struct done_list_struct *dl)
+{
+ struct tasklet_completion_status *tcs = ascb->uldd_task;
+ ASD_DPRINTK("%s: here\n", __func__);
+ if (!del_timer(&ascb->timer)) {
+ ASD_DPRINTK("%s: couldn't delete timer\n", __func__);
+ return;
+ }
+ ASD_DPRINTK("%s: opcode: 0x%x\n", __func__, dl->opcode);
+ tcs->dl_opcode = dl->opcode;
+ complete(ascb->completion);
+ asd_ascb_free(ascb);
+}
+
+static void asd_clear_nexus_timedout(unsigned long data)
+{
+ struct asd_ascb *ascb = (void *)data;
+ struct tasklet_completion_status *tcs = ascb->uldd_task;
+
+ ASD_DPRINTK("%s: here\n", __func__);
+ tcs->dl_opcode = TMF_RESP_FUNC_FAILED;
+ complete(ascb->completion);
+}
+
+#define CLEAR_NEXUS_PRE \
+ struct asd_ascb *ascb; \
+ struct scb *scb; \
+ int res; \
+ DECLARE_COMPLETION_ONSTACK(completion); \
+ DECLARE_TCS(tcs); \
+ \
+ ASD_DPRINTK("%s: PRE\n", __func__); \
+ res = 1; \
+ ascb = asd_ascb_alloc_list(asd_ha, &res, GFP_KERNEL); \
+ if (!ascb) \
+ return -ENOMEM; \
+ \
+ ascb->completion = &completion; \
+ ascb->uldd_task = &tcs; \
+ scb = ascb->scb; \
+ scb->header.opcode = CLEAR_NEXUS
+
+#define CLEAR_NEXUS_POST \
+ ASD_DPRINTK("%s: POST\n", __func__); \
+ res = asd_enqueue_internal(ascb, asd_clear_nexus_tasklet_complete, \
+ asd_clear_nexus_timedout); \
+ if (res) \
+ goto out_err; \
+ ASD_DPRINTK("%s: clear nexus posted, waiting...\n", __func__); \
+ wait_for_completion(&completion); \
+ res = tcs.dl_opcode; \
+ if (res == TC_NO_ERROR) \
+ res = TMF_RESP_FUNC_COMPLETE; \
+ return res; \
+out_err: \
+ asd_ascb_free(ascb); \
+ return res
+
+int asd_clear_nexus_ha(struct sas_ha_struct *sas_ha)
+{
+ struct asd_ha_struct *asd_ha = sas_ha->lldd_ha;
+
+ CLEAR_NEXUS_PRE;
+ scb->clear_nexus.nexus = NEXUS_ADAPTER;
+ CLEAR_NEXUS_POST;
+}
+
+int asd_clear_nexus_port(struct asd_sas_port *port)
+{
+ struct asd_ha_struct *asd_ha = port->ha->lldd_ha;
+
+ CLEAR_NEXUS_PRE;
+ scb->clear_nexus.nexus = NEXUS_PORT;
+ scb->clear_nexus.conn_mask = port->phy_mask;
+ CLEAR_NEXUS_POST;
+}
+
+enum clear_nexus_phase {
+ NEXUS_PHASE_PRE,
+ NEXUS_PHASE_POST,
+ NEXUS_PHASE_RESUME,
+};
+
+static int asd_clear_nexus_I_T(struct domain_device *dev,
+ enum clear_nexus_phase phase)
+{
+ struct asd_ha_struct *asd_ha = dev->port->ha->lldd_ha;
+
+ CLEAR_NEXUS_PRE;
+ scb->clear_nexus.nexus = NEXUS_I_T;
+ switch (phase) {
+ case NEXUS_PHASE_PRE:
+ scb->clear_nexus.flags = EXEC_Q | SUSPEND_TX;
+ break;
+ case NEXUS_PHASE_POST:
+ scb->clear_nexus.flags = SEND_Q | NOTINQ;
+ break;
+ case NEXUS_PHASE_RESUME:
+ scb->clear_nexus.flags = RESUME_TX;
+ }
+ scb->clear_nexus.conn_handle = cpu_to_le16((u16)(unsigned long)
+ dev->lldd_dev);
+ CLEAR_NEXUS_POST;
+}
+
+int asd_I_T_nexus_reset(struct domain_device *dev)
+{
+ int res, tmp_res, i;
+ struct sas_phy *phy = sas_get_local_phy(dev);
+ /* Standard mandates link reset for ATA (type 0) and
+ * hard reset for SSP (type 1) */
+ int reset_type = (dev->dev_type == SAS_SATA_DEV ||
+ (dev->tproto & SAS_PROTOCOL_STP)) ? 0 : 1;
+
+ asd_clear_nexus_I_T(dev, NEXUS_PHASE_PRE);
+ /* send a hard reset */
+ ASD_DPRINTK("sending %s reset to %s\n",
+ reset_type ? "hard" : "soft", dev_name(&phy->dev));
+ res = sas_phy_reset(phy, reset_type);
+ if (res == TMF_RESP_FUNC_COMPLETE || res == -ENODEV) {
+ /* wait for the maximum settle time */
+ msleep(500);
+ /* clear all outstanding commands (keep nexus suspended) */
+ asd_clear_nexus_I_T(dev, NEXUS_PHASE_POST);
+ }
+ for (i = 0 ; i < 3; i++) {
+ tmp_res = asd_clear_nexus_I_T(dev, NEXUS_PHASE_RESUME);
+ if (tmp_res == TC_RESUME)
+ goto out;
+ msleep(500);
+ }
+
+ /* This is a bit of a problem: the sequencer is still suspended
+ * and is refusing to resume. Hope it will resume on a bigger hammer
+ * or the disk is lost */
+ dev_printk(KERN_ERR, &phy->dev,
+ "Failed to resume nexus after reset 0x%x\n", tmp_res);
+
+ res = TMF_RESP_FUNC_FAILED;
+ out:
+ sas_put_local_phy(phy);
+ return res;
+}
+
+static int asd_clear_nexus_I_T_L(struct domain_device *dev, u8 *lun)
+{
+ struct asd_ha_struct *asd_ha = dev->port->ha->lldd_ha;
+
+ CLEAR_NEXUS_PRE;
+ scb->clear_nexus.nexus = NEXUS_I_T_L;
+ scb->clear_nexus.flags = SEND_Q | EXEC_Q | NOTINQ;
+ memcpy(scb->clear_nexus.ssp_task.lun, lun, 8);
+ scb->clear_nexus.conn_handle = cpu_to_le16((u16)(unsigned long)
+ dev->lldd_dev);
+ CLEAR_NEXUS_POST;
+}
+
+static int asd_clear_nexus_tag(struct sas_task *task)
+{
+ struct asd_ha_struct *asd_ha = task->dev->port->ha->lldd_ha;
+ struct asd_ascb *tascb = task->lldd_task;
+
+ CLEAR_NEXUS_PRE;
+ scb->clear_nexus.nexus = NEXUS_TAG;
+ memcpy(scb->clear_nexus.ssp_task.lun, task->ssp_task.LUN, 8);
+ scb->clear_nexus.ssp_task.tag = tascb->tag;
+ if (task->dev->tproto)
+ scb->clear_nexus.conn_handle = cpu_to_le16((u16)(unsigned long)
+ task->dev->lldd_dev);
+ CLEAR_NEXUS_POST;
+}
+
+static int asd_clear_nexus_index(struct sas_task *task)
+{
+ struct asd_ha_struct *asd_ha = task->dev->port->ha->lldd_ha;
+ struct asd_ascb *tascb = task->lldd_task;
+
+ CLEAR_NEXUS_PRE;
+ scb->clear_nexus.nexus = NEXUS_TRANS_CX;
+ if (task->dev->tproto)
+ scb->clear_nexus.conn_handle = cpu_to_le16((u16)(unsigned long)
+ task->dev->lldd_dev);
+ scb->clear_nexus.index = cpu_to_le16(tascb->tc_index);
+ CLEAR_NEXUS_POST;
+}
+
+/* ---------- TMFs ---------- */
+
+static void asd_tmf_timedout(unsigned long data)
+{
+ struct asd_ascb *ascb = (void *) data;
+ struct tasklet_completion_status *tcs = ascb->uldd_task;
+
+ ASD_DPRINTK("tmf timed out\n");
+ tcs->tmf_state = TMF_RESP_FUNC_FAILED;
+ complete(ascb->completion);
+}
+
+static int asd_get_tmf_resp_tasklet(struct asd_ascb *ascb,
+ struct done_list_struct *dl)
+{
+ struct asd_ha_struct *asd_ha = ascb->ha;
+ unsigned long flags;
+ struct tc_resp_sb_struct {
+ __le16 index_escb;
+ u8 len_lsb;
+ u8 flags;
+ } __attribute__ ((packed)) *resp_sb = (void *) dl->status_block;
+
+ int edb_id = ((resp_sb->flags & 0x70) >> 4)-1;
+ struct asd_ascb *escb;
+ struct asd_dma_tok *edb;
+ struct ssp_frame_hdr *fh;
+ struct ssp_response_iu *ru;
+ int res = TMF_RESP_FUNC_FAILED;
+
+ ASD_DPRINTK("tmf resp tasklet\n");
+
+ spin_lock_irqsave(&asd_ha->seq.tc_index_lock, flags);
+ escb = asd_tc_index_find(&asd_ha->seq,
+ (int)le16_to_cpu(resp_sb->index_escb));
+ spin_unlock_irqrestore(&asd_ha->seq.tc_index_lock, flags);
+
+ if (!escb) {
+ ASD_DPRINTK("Uh-oh! No escb for this dl?!\n");
+ return res;
+ }
+
+ edb = asd_ha->seq.edb_arr[edb_id + escb->edb_index];
+ ascb->tag = *(__be16 *)(edb->vaddr+4);
+ fh = edb->vaddr + 16;
+ ru = edb->vaddr + 16 + sizeof(*fh);
+ res = ru->status;
+ if (ru->datapres == 1) /* Response data present */
+ res = ru->resp_data[3];
+#if 0
+ ascb->tag = fh->tag;
+#endif
+ ascb->tag_valid = 1;
+
+ asd_invalidate_edb(escb, edb_id);
+ return res;
+}
+
+static void asd_tmf_tasklet_complete(struct asd_ascb *ascb,
+ struct done_list_struct *dl)
+{
+ struct tasklet_completion_status *tcs;
+
+ if (!del_timer(&ascb->timer))
+ return;
+
+ tcs = ascb->uldd_task;
+ ASD_DPRINTK("tmf tasklet complete\n");
+
+ tcs->dl_opcode = dl->opcode;
+
+ if (dl->opcode == TC_SSP_RESP) {
+ tcs->tmf_state = asd_get_tmf_resp_tasklet(ascb, dl);
+ tcs->tag_valid = ascb->tag_valid;
+ tcs->tag = ascb->tag;
+ }
+
+ complete(ascb->completion);
+ asd_ascb_free(ascb);
+}
+
+static int asd_clear_nexus(struct sas_task *task)
+{
+ int res = TMF_RESP_FUNC_FAILED;
+ int leftover;
+ struct asd_ascb *tascb = task->lldd_task;
+ DECLARE_COMPLETION_ONSTACK(completion);
+ unsigned long flags;
+
+ tascb->completion = &completion;
+
+ ASD_DPRINTK("task not done, clearing nexus\n");
+ if (tascb->tag_valid)
+ res = asd_clear_nexus_tag(task);
+ else
+ res = asd_clear_nexus_index(task);
+ leftover = wait_for_completion_timeout(&completion,
+ AIC94XX_SCB_TIMEOUT);
+ tascb->completion = NULL;
+ ASD_DPRINTK("came back from clear nexus\n");
+ spin_lock_irqsave(&task->task_state_lock, flags);
+ if (leftover < 1)
+ res = TMF_RESP_FUNC_FAILED;
+ if (task->task_state_flags & SAS_TASK_STATE_DONE)
+ res = TMF_RESP_FUNC_COMPLETE;
+ spin_unlock_irqrestore(&task->task_state_lock, flags);
+
+ return res;
+}
+
+/**
+ * asd_abort_task -- ABORT TASK TMF
+ * @task: the task to be aborted
+ *
+ * Before calling ABORT TASK the task state flags should be ORed with
+ * SAS_TASK_STATE_ABORTED (unless SAS_TASK_STATE_DONE is set) under
+ * the task_state_lock IRQ spinlock, then ABORT TASK *must* be called.
+ *
+ * Implements the ABORT TASK TMF, I_T_L_Q nexus.
+ * Returns: SAS TMF responses (see sas_task.h),
+ * -ENOMEM,
+ * -SAS_QUEUE_FULL.
+ *
+ * When ABORT TASK returns, the caller of ABORT TASK checks first the
+ * task->task_state_flags, and then the return value of ABORT TASK.
+ *
+ * If the task has task state bit SAS_TASK_STATE_DONE set, then the
+ * task was completed successfully prior to it being aborted. The
+ * caller of ABORT TASK has responsibility to call task->task_done()
+ * xor free the task, depending on their framework. The return code
+ * is TMF_RESP_FUNC_FAILED in this case.
+ *
+ * Else the SAS_TASK_STATE_DONE bit is not set,
+ * If the return code is TMF_RESP_FUNC_COMPLETE, then
+ * the task was aborted successfully. The caller of
+ * ABORT TASK has responsibility to call task->task_done()
+ * to finish the task, xor free the task depending on their
+ * framework.
+ * else
+ * the ABORT TASK returned some kind of error. The task
+ * was _not_ cancelled. Nothing can be assumed.
+ * The caller of ABORT TASK may wish to retry.
+ */
+int asd_abort_task(struct sas_task *task)
+{
+ struct asd_ascb *tascb = task->lldd_task;
+ struct asd_ha_struct *asd_ha = tascb->ha;
+ int res = 1;
+ unsigned long flags;
+ struct asd_ascb *ascb = NULL;
+ struct scb *scb;
+ int leftover;
+ DECLARE_TCS(tcs);
+ DECLARE_COMPLETION_ONSTACK(completion);
+ DECLARE_COMPLETION_ONSTACK(tascb_completion);
+
+ tascb->completion = &tascb_completion;
+
+ spin_lock_irqsave(&task->task_state_lock, flags);
+ if (task->task_state_flags & SAS_TASK_STATE_DONE) {
+ spin_unlock_irqrestore(&task->task_state_lock, flags);
+ res = TMF_RESP_FUNC_COMPLETE;
+ ASD_DPRINTK("%s: task 0x%p done\n", __func__, task);
+ goto out_done;
+ }
+ spin_unlock_irqrestore(&task->task_state_lock, flags);
+
+ ascb = asd_ascb_alloc_list(asd_ha, &res, GFP_KERNEL);
+ if (!ascb)
+ return -ENOMEM;
+
+ ascb->uldd_task = &tcs;
+ ascb->completion = &completion;
+ scb = ascb->scb;
+ scb->header.opcode = SCB_ABORT_TASK;
+
+ switch (task->task_proto) {
+ case SAS_PROTOCOL_SATA:
+ case SAS_PROTOCOL_STP:
+ scb->abort_task.proto_conn_rate = (1 << 5); /* STP */
+ break;
+ case SAS_PROTOCOL_SSP:
+ scb->abort_task.proto_conn_rate = (1 << 4); /* SSP */
+ scb->abort_task.proto_conn_rate |= task->dev->linkrate;
+ break;
+ case SAS_PROTOCOL_SMP:
+ break;
+ default:
+ break;
+ }
+
+ if (task->task_proto == SAS_PROTOCOL_SSP) {
+ scb->abort_task.ssp_frame.frame_type = SSP_TASK;
+ memcpy(scb->abort_task.ssp_frame.hashed_dest_addr,
+ task->dev->hashed_sas_addr, HASHED_SAS_ADDR_SIZE);
+ memcpy(scb->abort_task.ssp_frame.hashed_src_addr,
+ task->dev->port->ha->hashed_sas_addr,
+ HASHED_SAS_ADDR_SIZE);
+ scb->abort_task.ssp_frame.tptt = cpu_to_be16(0xFFFF);
+
+ memcpy(scb->abort_task.ssp_task.lun, task->ssp_task.LUN, 8);
+ scb->abort_task.ssp_task.tmf = TMF_ABORT_TASK;
+ scb->abort_task.ssp_task.tag = cpu_to_be16(0xFFFF);
+ }
+
+ scb->abort_task.sister_scb = cpu_to_le16(0xFFFF);
+ scb->abort_task.conn_handle = cpu_to_le16(
+ (u16)(unsigned long)task->dev->lldd_dev);
+ scb->abort_task.retry_count = 1;
+ scb->abort_task.index = cpu_to_le16((u16)tascb->tc_index);
+ scb->abort_task.itnl_to = cpu_to_le16(ITNL_TIMEOUT_CONST);
+
+ res = asd_enqueue_internal(ascb, asd_tmf_tasklet_complete,
+ asd_tmf_timedout);
+ if (res)
+ goto out_free;
+ wait_for_completion(&completion);
+ ASD_DPRINTK("tmf came back\n");
+
+ tascb->tag = tcs.tag;
+ tascb->tag_valid = tcs.tag_valid;
+
+ spin_lock_irqsave(&task->task_state_lock, flags);
+ if (task->task_state_flags & SAS_TASK_STATE_DONE) {
+ spin_unlock_irqrestore(&task->task_state_lock, flags);
+ res = TMF_RESP_FUNC_COMPLETE;
+ ASD_DPRINTK("%s: task 0x%p done\n", __func__, task);
+ goto out_done;
+ }
+ spin_unlock_irqrestore(&task->task_state_lock, flags);
+
+ if (tcs.dl_opcode == TC_SSP_RESP) {
+ /* The task to be aborted has been sent to the device.
+ * We got a Response IU for the ABORT TASK TMF. */
+ if (tcs.tmf_state == TMF_RESP_FUNC_COMPLETE)
+ res = asd_clear_nexus(task);
+ else
+ res = tcs.tmf_state;
+ } else if (tcs.dl_opcode == TC_NO_ERROR &&
+ tcs.tmf_state == TMF_RESP_FUNC_FAILED) {
+ /* timeout */
+ res = TMF_RESP_FUNC_FAILED;
+ } else {
+ /* In the following we assume that the managing layer
+ * will _never_ make a mistake, when issuing ABORT
+ * TASK.
+ */
+ switch (tcs.dl_opcode) {
+ default:
+ res = asd_clear_nexus(task);
+ /* fallthrough */
+ case TC_NO_ERROR:
+ break;
+ /* The task hasn't been sent to the device xor
+ * we never got a (sane) Response IU for the
+ * ABORT TASK TMF.
+ */
+ case TF_NAK_RECV:
+ res = TMF_RESP_INVALID_FRAME;
+ break;
+ case TF_TMF_TASK_DONE: /* done but not reported yet */
+ res = TMF_RESP_FUNC_FAILED;
+ leftover =
+ wait_for_completion_timeout(&tascb_completion,
+ AIC94XX_SCB_TIMEOUT);
+ spin_lock_irqsave(&task->task_state_lock, flags);
+ if (leftover < 1)
+ res = TMF_RESP_FUNC_FAILED;
+ if (task->task_state_flags & SAS_TASK_STATE_DONE)
+ res = TMF_RESP_FUNC_COMPLETE;
+ spin_unlock_irqrestore(&task->task_state_lock, flags);
+ break;
+ case TF_TMF_NO_TAG:
+ case TF_TMF_TAG_FREE: /* the tag is in the free list */
+ case TF_TMF_NO_CONN_HANDLE: /* no such device */
+ res = TMF_RESP_FUNC_COMPLETE;
+ break;
+ case TF_TMF_NO_CTX: /* not in seq, or proto != SSP */
+ res = TMF_RESP_FUNC_ESUPP;
+ break;
+ }
+ }
+ out_done:
+ tascb->completion = NULL;
+ if (res == TMF_RESP_FUNC_COMPLETE) {
+ task->lldd_task = NULL;
+ mb();
+ asd_ascb_free(tascb);
+ }
+ ASD_DPRINTK("task 0x%p aborted, res: 0x%x\n", task, res);
+ return res;
+
+ out_free:
+ asd_ascb_free(ascb);
+ ASD_DPRINTK("task 0x%p aborted, res: 0x%x\n", task, res);
+ return res;
+}
+
+/**
+ * asd_initiate_ssp_tmf -- send a TMF to an I_T_L or I_T_L_Q nexus
+ * @dev: pointer to struct domain_device of interest
+ * @lun: pointer to u8[8] which is the LUN
+ * @tmf: the TMF to be performed (see sas_task.h or the SAS spec)
+ * @index: the transaction context of the task to be queried if QT TMF
+ *
+ * This function is used to send ABORT TASK SET, CLEAR ACA,
+ * CLEAR TASK SET, LU RESET and QUERY TASK TMFs.
+ *
+ * No SCBs should be queued to the I_T_L nexus when this SCB is
+ * pending.
+ *
+ * Returns: TMF response code (see sas_task.h or the SAS spec)
+ */
+static int asd_initiate_ssp_tmf(struct domain_device *dev, u8 *lun,
+ int tmf, int index)
+{
+ struct asd_ha_struct *asd_ha = dev->port->ha->lldd_ha;
+ struct asd_ascb *ascb;
+ int res = 1;
+ struct scb *scb;
+ DECLARE_COMPLETION_ONSTACK(completion);
+ DECLARE_TCS(tcs);
+
+ if (!(dev->tproto & SAS_PROTOCOL_SSP))
+ return TMF_RESP_FUNC_ESUPP;
+
+ ascb = asd_ascb_alloc_list(asd_ha, &res, GFP_KERNEL);
+ if (!ascb)
+ return -ENOMEM;
+
+ ascb->completion = &completion;
+ ascb->uldd_task = &tcs;
+ scb = ascb->scb;
+
+ if (tmf == TMF_QUERY_TASK)
+ scb->header.opcode = QUERY_SSP_TASK;
+ else
+ scb->header.opcode = INITIATE_SSP_TMF;
+
+ scb->ssp_tmf.proto_conn_rate = (1 << 4); /* SSP */
+ scb->ssp_tmf.proto_conn_rate |= dev->linkrate;
+ /* SSP frame header */
+ scb->ssp_tmf.ssp_frame.frame_type = SSP_TASK;
+ memcpy(scb->ssp_tmf.ssp_frame.hashed_dest_addr,
+ dev->hashed_sas_addr, HASHED_SAS_ADDR_SIZE);
+ memcpy(scb->ssp_tmf.ssp_frame.hashed_src_addr,
+ dev->port->ha->hashed_sas_addr, HASHED_SAS_ADDR_SIZE);
+ scb->ssp_tmf.ssp_frame.tptt = cpu_to_be16(0xFFFF);
+ /* SSP Task IU */
+ memcpy(scb->ssp_tmf.ssp_task.lun, lun, 8);
+ scb->ssp_tmf.ssp_task.tmf = tmf;
+
+ scb->ssp_tmf.sister_scb = cpu_to_le16(0xFFFF);
+ scb->ssp_tmf.conn_handle= cpu_to_le16((u16)(unsigned long)
+ dev->lldd_dev);
+ scb->ssp_tmf.retry_count = 1;
+ scb->ssp_tmf.itnl_to = cpu_to_le16(ITNL_TIMEOUT_CONST);
+ if (tmf == TMF_QUERY_TASK)
+ scb->ssp_tmf.index = cpu_to_le16(index);
+
+ res = asd_enqueue_internal(ascb, asd_tmf_tasklet_complete,
+ asd_tmf_timedout);
+ if (res)
+ goto out_err;
+ wait_for_completion(&completion);
+
+ switch (tcs.dl_opcode) {
+ case TC_NO_ERROR:
+ res = TMF_RESP_FUNC_COMPLETE;
+ break;
+ case TF_NAK_RECV:
+ res = TMF_RESP_INVALID_FRAME;
+ break;
+ case TF_TMF_TASK_DONE:
+ res = TMF_RESP_FUNC_FAILED;
+ break;
+ case TF_TMF_NO_TAG:
+ case TF_TMF_TAG_FREE: /* the tag is in the free list */
+ case TF_TMF_NO_CONN_HANDLE: /* no such device */
+ res = TMF_RESP_FUNC_COMPLETE;
+ break;
+ case TF_TMF_NO_CTX: /* not in seq, or proto != SSP */
+ res = TMF_RESP_FUNC_ESUPP;
+ break;
+ default:
+ /* Allow TMF response codes to propagate upwards */
+ res = tcs.dl_opcode;
+ break;
+ }
+ return res;
+out_err:
+ asd_ascb_free(ascb);
+ return res;
+}
+
+int asd_abort_task_set(struct domain_device *dev, u8 *lun)
+{
+ int res = asd_initiate_ssp_tmf(dev, lun, TMF_ABORT_TASK_SET, 0);
+
+ if (res == TMF_RESP_FUNC_COMPLETE)
+ asd_clear_nexus_I_T_L(dev, lun);
+ return res;
+}
+
+int asd_clear_aca(struct domain_device *dev, u8 *lun)
+{
+ int res = asd_initiate_ssp_tmf(dev, lun, TMF_CLEAR_ACA, 0);
+
+ if (res == TMF_RESP_FUNC_COMPLETE)
+ asd_clear_nexus_I_T_L(dev, lun);
+ return res;
+}
+
+int asd_clear_task_set(struct domain_device *dev, u8 *lun)
+{
+ int res = asd_initiate_ssp_tmf(dev, lun, TMF_CLEAR_TASK_SET, 0);
+
+ if (res == TMF_RESP_FUNC_COMPLETE)
+ asd_clear_nexus_I_T_L(dev, lun);
+ return res;
+}
+
+int asd_lu_reset(struct domain_device *dev, u8 *lun)
+{
+ int res = asd_initiate_ssp_tmf(dev, lun, TMF_LU_RESET, 0);
+
+ if (res == TMF_RESP_FUNC_COMPLETE)
+ asd_clear_nexus_I_T_L(dev, lun);
+ return res;
+}
+
+/**
+ * asd_query_task -- send a QUERY TASK TMF to an I_T_L_Q nexus
+ * task: pointer to sas_task struct of interest
+ *
+ * Returns: TMF_RESP_FUNC_COMPLETE if the task is not in the task set,
+ * or TMF_RESP_FUNC_SUCC if the task is in the task set.
+ *
+ * Normally the management layer sets the task to aborted state,
+ * and then calls query task and then abort task.
+ */
+int asd_query_task(struct sas_task *task)
+{
+ struct asd_ascb *ascb = task->lldd_task;
+ int index;
+
+ if (ascb) {
+ index = ascb->tc_index;
+ return asd_initiate_ssp_tmf(task->dev, task->ssp_task.LUN,
+ TMF_QUERY_TASK, index);
+ }
+ return TMF_RESP_FUNC_COMPLETE;
+}
diff --git a/drivers/scsi/am53c974.c b/drivers/scsi/am53c974.c
new file mode 100644
index 000000000..beea30e5a
--- /dev/null
+++ b/drivers/scsi/am53c974.c
@@ -0,0 +1,582 @@
+/*
+ * AMD am53c974 driver.
+ * Copyright (c) 2014 Hannes Reinecke, SUSE Linux GmbH
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/delay.h>
+#include <linux/pci.h>
+#include <linux/interrupt.h>
+
+#include <scsi/scsi_host.h>
+
+#include "esp_scsi.h"
+
+#define DRV_MODULE_NAME "am53c974"
+#define DRV_MODULE_VERSION "1.00"
+
+static bool am53c974_debug;
+static bool am53c974_fenab = true;
+
+#define esp_dma_log(f, a...) \
+ do { \
+ if (am53c974_debug) \
+ shost_printk(KERN_DEBUG, esp->host, f, ##a); \
+ } while (0)
+
+#define ESP_DMA_CMD 0x10
+#define ESP_DMA_STC 0x11
+#define ESP_DMA_SPA 0x12
+#define ESP_DMA_WBC 0x13
+#define ESP_DMA_WAC 0x14
+#define ESP_DMA_STATUS 0x15
+#define ESP_DMA_SMDLA 0x16
+#define ESP_DMA_WMAC 0x17
+
+#define ESP_DMA_CMD_IDLE 0x00
+#define ESP_DMA_CMD_BLAST 0x01
+#define ESP_DMA_CMD_ABORT 0x02
+#define ESP_DMA_CMD_START 0x03
+#define ESP_DMA_CMD_MASK 0x03
+#define ESP_DMA_CMD_DIAG 0x04
+#define ESP_DMA_CMD_MDL 0x10
+#define ESP_DMA_CMD_INTE_P 0x20
+#define ESP_DMA_CMD_INTE_D 0x40
+#define ESP_DMA_CMD_DIR 0x80
+
+#define ESP_DMA_STAT_PWDN 0x01
+#define ESP_DMA_STAT_ERROR 0x02
+#define ESP_DMA_STAT_ABORT 0x04
+#define ESP_DMA_STAT_DONE 0x08
+#define ESP_DMA_STAT_SCSIINT 0x10
+#define ESP_DMA_STAT_BCMPLT 0x20
+
+/* EEPROM is accessed with 16-bit values */
+#define DC390_EEPROM_READ 0x80
+#define DC390_EEPROM_LEN 0x40
+
+/*
+ * DC390 EEPROM
+ *
+ * 8 * 4 bytes of per-device options
+ * followed by HBA specific options
+ */
+
+/* Per-device options */
+#define DC390_EE_MODE1 0x00
+#define DC390_EE_SPEED 0x01
+
+/* HBA-specific options */
+#define DC390_EE_ADAPT_SCSI_ID 0x40
+#define DC390_EE_MODE2 0x41
+#define DC390_EE_DELAY 0x42
+#define DC390_EE_TAG_CMD_NUM 0x43
+
+#define DC390_EE_MODE1_PARITY_CHK 0x01
+#define DC390_EE_MODE1_SYNC_NEGO 0x02
+#define DC390_EE_MODE1_EN_DISC 0x04
+#define DC390_EE_MODE1_SEND_START 0x08
+#define DC390_EE_MODE1_TCQ 0x10
+
+#define DC390_EE_MODE2_MORE_2DRV 0x01
+#define DC390_EE_MODE2_GREATER_1G 0x02
+#define DC390_EE_MODE2_RST_SCSI_BUS 0x04
+#define DC390_EE_MODE2_ACTIVE_NEGATION 0x08
+#define DC390_EE_MODE2_NO_SEEK 0x10
+#define DC390_EE_MODE2_LUN_CHECK 0x20
+
+struct pci_esp_priv {
+ struct esp *esp;
+ u8 dma_status;
+};
+
+static void pci_esp_dma_drain(struct esp *esp);
+
+static inline struct pci_esp_priv *pci_esp_get_priv(struct esp *esp)
+{
+ struct pci_dev *pdev = esp->dev;
+
+ return pci_get_drvdata(pdev);
+}
+
+static void pci_esp_write8(struct esp *esp, u8 val, unsigned long reg)
+{
+ iowrite8(val, esp->regs + (reg * 4UL));
+}
+
+static u8 pci_esp_read8(struct esp *esp, unsigned long reg)
+{
+ return ioread8(esp->regs + (reg * 4UL));
+}
+
+static void pci_esp_write32(struct esp *esp, u32 val, unsigned long reg)
+{
+ return iowrite32(val, esp->regs + (reg * 4UL));
+}
+
+static dma_addr_t pci_esp_map_single(struct esp *esp, void *buf,
+ size_t sz, int dir)
+{
+ return pci_map_single(esp->dev, buf, sz, dir);
+}
+
+static int pci_esp_map_sg(struct esp *esp, struct scatterlist *sg,
+ int num_sg, int dir)
+{
+ return pci_map_sg(esp->dev, sg, num_sg, dir);
+}
+
+static void pci_esp_unmap_single(struct esp *esp, dma_addr_t addr,
+ size_t sz, int dir)
+{
+ pci_unmap_single(esp->dev, addr, sz, dir);
+}
+
+static void pci_esp_unmap_sg(struct esp *esp, struct scatterlist *sg,
+ int num_sg, int dir)
+{
+ pci_unmap_sg(esp->dev, sg, num_sg, dir);
+}
+
+static int pci_esp_irq_pending(struct esp *esp)
+{
+ struct pci_esp_priv *pep = pci_esp_get_priv(esp);
+
+ pep->dma_status = pci_esp_read8(esp, ESP_DMA_STATUS);
+ esp_dma_log("dma intr dreg[%02x]\n", pep->dma_status);
+
+ if (pep->dma_status & (ESP_DMA_STAT_ERROR |
+ ESP_DMA_STAT_ABORT |
+ ESP_DMA_STAT_DONE |
+ ESP_DMA_STAT_SCSIINT))
+ return 1;
+
+ return 0;
+}
+
+static void pci_esp_reset_dma(struct esp *esp)
+{
+ /* Nothing to do ? */
+}
+
+static void pci_esp_dma_drain(struct esp *esp)
+{
+ u8 resid;
+ int lim = 1000;
+
+
+ if ((esp->sreg & ESP_STAT_PMASK) == ESP_DOP ||
+ (esp->sreg & ESP_STAT_PMASK) == ESP_DIP)
+ /* Data-In or Data-Out, nothing to be done */
+ return;
+
+ while (--lim > 0) {
+ resid = pci_esp_read8(esp, ESP_FFLAGS) & ESP_FF_FBYTES;
+ if (resid <= 1)
+ break;
+ cpu_relax();
+ }
+
+ /*
+ * When there is a residual BCMPLT will never be set
+ * (obviously). But we still have to issue the BLAST
+ * command, otherwise the data will not being transferred.
+ * But we'll never know when the BLAST operation is
+ * finished. So check for some time and give up eventually.
+ */
+ lim = 1000;
+ pci_esp_write8(esp, ESP_DMA_CMD_DIR | ESP_DMA_CMD_BLAST, ESP_DMA_CMD);
+ while (pci_esp_read8(esp, ESP_DMA_STATUS) & ESP_DMA_STAT_BCMPLT) {
+ if (--lim == 0)
+ break;
+ cpu_relax();
+ }
+ pci_esp_write8(esp, ESP_DMA_CMD_DIR | ESP_DMA_CMD_IDLE, ESP_DMA_CMD);
+ esp_dma_log("DMA blast done (%d tries, %d bytes left)\n", lim, resid);
+ /* BLAST residual handling is currently untested */
+ if (WARN_ON_ONCE(resid == 1)) {
+ struct esp_cmd_entry *ent = esp->active_cmd;
+
+ ent->flags |= ESP_CMD_FLAG_RESIDUAL;
+ }
+}
+
+static void pci_esp_dma_invalidate(struct esp *esp)
+{
+ struct pci_esp_priv *pep = pci_esp_get_priv(esp);
+
+ esp_dma_log("invalidate DMA\n");
+
+ pci_esp_write8(esp, ESP_DMA_CMD_IDLE, ESP_DMA_CMD);
+ pep->dma_status = 0;
+}
+
+static int pci_esp_dma_error(struct esp *esp)
+{
+ struct pci_esp_priv *pep = pci_esp_get_priv(esp);
+
+ if (pep->dma_status & ESP_DMA_STAT_ERROR) {
+ u8 dma_cmd = pci_esp_read8(esp, ESP_DMA_CMD);
+
+ if ((dma_cmd & ESP_DMA_CMD_MASK) == ESP_DMA_CMD_START)
+ pci_esp_write8(esp, ESP_DMA_CMD_ABORT, ESP_DMA_CMD);
+
+ return 1;
+ }
+ if (pep->dma_status & ESP_DMA_STAT_ABORT) {
+ pci_esp_write8(esp, ESP_DMA_CMD_IDLE, ESP_DMA_CMD);
+ pep->dma_status = pci_esp_read8(esp, ESP_DMA_CMD);
+ return 1;
+ }
+ return 0;
+}
+
+static void pci_esp_send_dma_cmd(struct esp *esp, u32 addr, u32 esp_count,
+ u32 dma_count, int write, u8 cmd)
+{
+ struct pci_esp_priv *pep = pci_esp_get_priv(esp);
+ u32 val = 0;
+
+ BUG_ON(!(cmd & ESP_CMD_DMA));
+
+ pep->dma_status = 0;
+
+ /* Set DMA engine to IDLE */
+ if (write)
+ /* DMA write direction logic is inverted */
+ val |= ESP_DMA_CMD_DIR;
+ pci_esp_write8(esp, ESP_DMA_CMD_IDLE | val, ESP_DMA_CMD);
+
+ pci_esp_write8(esp, (esp_count >> 0) & 0xff, ESP_TCLOW);
+ pci_esp_write8(esp, (esp_count >> 8) & 0xff, ESP_TCMED);
+ if (esp->config2 & ESP_CONFIG2_FENAB)
+ pci_esp_write8(esp, (esp_count >> 16) & 0xff, ESP_TCHI);
+
+ pci_esp_write32(esp, esp_count, ESP_DMA_STC);
+ pci_esp_write32(esp, addr, ESP_DMA_SPA);
+
+ esp_dma_log("start dma addr[%x] count[%d:%d]\n",
+ addr, esp_count, dma_count);
+
+ scsi_esp_cmd(esp, cmd);
+ /* Send DMA Start command */
+ pci_esp_write8(esp, ESP_DMA_CMD_START | val, ESP_DMA_CMD);
+}
+
+static u32 pci_esp_dma_length_limit(struct esp *esp, u32 dma_addr, u32 dma_len)
+{
+ int dma_limit = 16;
+ u32 base, end;
+
+ /*
+ * If CONFIG2_FENAB is set we can
+ * handle up to 24 bit addresses
+ */
+ if (esp->config2 & ESP_CONFIG2_FENAB)
+ dma_limit = 24;
+
+ if (dma_len > (1U << dma_limit))
+ dma_len = (1U << dma_limit);
+
+ /*
+ * Prevent crossing a 24-bit address boundary.
+ */
+ base = dma_addr & ((1U << 24) - 1U);
+ end = base + dma_len;
+ if (end > (1U << 24))
+ end = (1U <<24);
+ dma_len = end - base;
+
+ return dma_len;
+}
+
+static const struct esp_driver_ops pci_esp_ops = {
+ .esp_write8 = pci_esp_write8,
+ .esp_read8 = pci_esp_read8,
+ .map_single = pci_esp_map_single,
+ .map_sg = pci_esp_map_sg,
+ .unmap_single = pci_esp_unmap_single,
+ .unmap_sg = pci_esp_unmap_sg,
+ .irq_pending = pci_esp_irq_pending,
+ .reset_dma = pci_esp_reset_dma,
+ .dma_drain = pci_esp_dma_drain,
+ .dma_invalidate = pci_esp_dma_invalidate,
+ .send_dma_cmd = pci_esp_send_dma_cmd,
+ .dma_error = pci_esp_dma_error,
+ .dma_length_limit = pci_esp_dma_length_limit,
+};
+
+/*
+ * Read DC-390 eeprom
+ */
+static void dc390_eeprom_prepare_read(struct pci_dev *pdev, u8 cmd)
+{
+ u8 carry_flag = 1, j = 0x80, bval;
+ int i;
+
+ for (i = 0; i < 9; i++) {
+ if (carry_flag) {
+ pci_write_config_byte(pdev, 0x80, 0x40);
+ bval = 0xc0;
+ } else
+ bval = 0x80;
+
+ udelay(160);
+ pci_write_config_byte(pdev, 0x80, bval);
+ udelay(160);
+ pci_write_config_byte(pdev, 0x80, 0);
+ udelay(160);
+
+ carry_flag = (cmd & j) ? 1 : 0;
+ j >>= 1;
+ }
+}
+
+static u16 dc390_eeprom_get_data(struct pci_dev *pdev)
+{
+ int i;
+ u16 wval = 0;
+ u8 bval;
+
+ for (i = 0; i < 16; i++) {
+ wval <<= 1;
+
+ pci_write_config_byte(pdev, 0x80, 0x80);
+ udelay(160);
+ pci_write_config_byte(pdev, 0x80, 0x40);
+ udelay(160);
+ pci_read_config_byte(pdev, 0x00, &bval);
+
+ if (bval == 0x22)
+ wval |= 1;
+ }
+
+ return wval;
+}
+
+static void dc390_read_eeprom(struct pci_dev *pdev, u16 *ptr)
+{
+ u8 cmd = DC390_EEPROM_READ, i;
+
+ for (i = 0; i < DC390_EEPROM_LEN; i++) {
+ pci_write_config_byte(pdev, 0xc0, 0);
+ udelay(160);
+
+ dc390_eeprom_prepare_read(pdev, cmd++);
+ *ptr++ = dc390_eeprom_get_data(pdev);
+
+ pci_write_config_byte(pdev, 0x80, 0);
+ pci_write_config_byte(pdev, 0x80, 0);
+ udelay(160);
+ }
+}
+
+static void dc390_check_eeprom(struct esp *esp)
+{
+ u8 EEbuf[128];
+ u16 *ptr = (u16 *)EEbuf, wval = 0;
+ int i;
+
+ dc390_read_eeprom((struct pci_dev *)esp->dev, ptr);
+
+ for (i = 0; i < DC390_EEPROM_LEN; i++, ptr++)
+ wval += *ptr;
+
+ /* no Tekram EEprom found */
+ if (wval != 0x1234) {
+ struct pci_dev *pdev = esp->dev;
+ dev_printk(KERN_INFO, &pdev->dev,
+ "No valid Tekram EEprom found\n");
+ return;
+ }
+ esp->scsi_id = EEbuf[DC390_EE_ADAPT_SCSI_ID];
+ esp->num_tags = 2 << EEbuf[DC390_EE_TAG_CMD_NUM];
+ if (EEbuf[DC390_EE_MODE2] & DC390_EE_MODE2_ACTIVE_NEGATION)
+ esp->config4 |= ESP_CONFIG4_RADE | ESP_CONFIG4_RAE;
+}
+
+static int pci_esp_probe_one(struct pci_dev *pdev,
+ const struct pci_device_id *id)
+{
+ struct scsi_host_template *hostt = &scsi_esp_template;
+ int err = -ENODEV;
+ struct Scsi_Host *shost;
+ struct esp *esp;
+ struct pci_esp_priv *pep;
+
+ if (pci_enable_device(pdev)) {
+ dev_printk(KERN_INFO, &pdev->dev, "cannot enable device\n");
+ return -ENODEV;
+ }
+
+ if (pci_set_dma_mask(pdev, DMA_BIT_MASK(32))) {
+ dev_printk(KERN_INFO, &pdev->dev,
+ "failed to set 32bit DMA mask\n");
+ goto fail_disable_device;
+ }
+
+ shost = scsi_host_alloc(hostt, sizeof(struct esp));
+ if (!shost) {
+ dev_printk(KERN_INFO, &pdev->dev,
+ "failed to allocate scsi host\n");
+ err = -ENOMEM;
+ goto fail_disable_device;
+ }
+
+ pep = kzalloc(sizeof(struct pci_esp_priv), GFP_KERNEL);
+ if (!pep) {
+ dev_printk(KERN_INFO, &pdev->dev,
+ "failed to allocate esp_priv\n");
+ err = -ENOMEM;
+ goto fail_host_alloc;
+ }
+
+ esp = shost_priv(shost);
+ esp->host = shost;
+ esp->dev = pdev;
+ esp->ops = &pci_esp_ops;
+ /*
+ * The am53c974 HBA has a design flaw of generating
+ * spurious DMA completion interrupts when using
+ * DMA for command submission.
+ */
+ esp->flags |= ESP_FLAG_USE_FIFO;
+ /*
+ * Enable CONFIG2_FENAB to allow for large DMA transfers
+ */
+ if (am53c974_fenab)
+ esp->config2 |= ESP_CONFIG2_FENAB;
+
+ pep->esp = esp;
+
+ if (pci_request_regions(pdev, DRV_MODULE_NAME)) {
+ dev_printk(KERN_ERR, &pdev->dev,
+ "pci memory selection failed\n");
+ goto fail_priv_alloc;
+ }
+
+ esp->regs = pci_iomap(pdev, 0, pci_resource_len(pdev, 0));
+ if (!esp->regs) {
+ dev_printk(KERN_ERR, &pdev->dev, "pci I/O map failed\n");
+ err = -EINVAL;
+ goto fail_release_regions;
+ }
+ esp->dma_regs = esp->regs;
+
+ pci_set_master(pdev);
+
+ esp->command_block = pci_alloc_consistent(pdev, 16,
+ &esp->command_block_dma);
+ if (!esp->command_block) {
+ dev_printk(KERN_ERR, &pdev->dev,
+ "failed to allocate command block\n");
+ err = -ENOMEM;
+ goto fail_unmap_regs;
+ }
+
+ pci_set_drvdata(pdev, pep);
+
+ err = request_irq(pdev->irq, scsi_esp_intr, IRQF_SHARED,
+ DRV_MODULE_NAME, esp);
+ if (err < 0) {
+ dev_printk(KERN_ERR, &pdev->dev, "failed to register IRQ\n");
+ goto fail_unmap_command_block;
+ }
+
+ esp->scsi_id = 7;
+ dc390_check_eeprom(esp);
+
+ shost->this_id = esp->scsi_id;
+ shost->max_id = 8;
+ shost->irq = pdev->irq;
+ shost->io_port = pci_resource_start(pdev, 0);
+ shost->n_io_port = pci_resource_len(pdev, 0);
+ shost->unique_id = shost->io_port;
+ esp->scsi_id_mask = (1 << esp->scsi_id);
+ /* Assume 40MHz clock */
+ esp->cfreq = 40000000;
+
+ err = scsi_esp_register(esp, &pdev->dev);
+ if (err)
+ goto fail_free_irq;
+
+ return 0;
+
+fail_free_irq:
+ free_irq(pdev->irq, esp);
+fail_unmap_command_block:
+ pci_set_drvdata(pdev, NULL);
+ pci_free_consistent(pdev, 16, esp->command_block,
+ esp->command_block_dma);
+fail_unmap_regs:
+ pci_iounmap(pdev, esp->regs);
+fail_release_regions:
+ pci_release_regions(pdev);
+fail_priv_alloc:
+ kfree(pep);
+fail_host_alloc:
+ scsi_host_put(shost);
+fail_disable_device:
+ pci_disable_device(pdev);
+
+ return err;
+}
+
+static void pci_esp_remove_one(struct pci_dev *pdev)
+{
+ struct pci_esp_priv *pep = pci_get_drvdata(pdev);
+ struct esp *esp = pep->esp;
+
+ scsi_esp_unregister(esp);
+ free_irq(pdev->irq, esp);
+ pci_set_drvdata(pdev, NULL);
+ pci_free_consistent(pdev, 16, esp->command_block,
+ esp->command_block_dma);
+ pci_iounmap(pdev, esp->regs);
+ pci_release_regions(pdev);
+ pci_disable_device(pdev);
+ kfree(pep);
+
+ scsi_host_put(esp->host);
+}
+
+static struct pci_device_id am53c974_pci_tbl[] = {
+ { PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_SCSI,
+ PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 },
+ { }
+};
+MODULE_DEVICE_TABLE(pci, am53c974_pci_tbl);
+
+static struct pci_driver am53c974_driver = {
+ .name = DRV_MODULE_NAME,
+ .id_table = am53c974_pci_tbl,
+ .probe = pci_esp_probe_one,
+ .remove = pci_esp_remove_one,
+};
+
+static int __init am53c974_module_init(void)
+{
+ return pci_register_driver(&am53c974_driver);
+}
+
+static void __exit am53c974_module_exit(void)
+{
+ pci_unregister_driver(&am53c974_driver);
+}
+
+MODULE_DESCRIPTION("AM53C974 SCSI driver");
+MODULE_AUTHOR("Hannes Reinecke <hare@suse.de>");
+MODULE_LICENSE("GPL");
+MODULE_VERSION(DRV_MODULE_VERSION);
+MODULE_ALIAS("tmscsim");
+
+module_param(am53c974_debug, bool, 0644);
+MODULE_PARM_DESC(am53c974_debug, "Enable debugging");
+
+module_param(am53c974_fenab, bool, 0444);
+MODULE_PARM_DESC(am53c974_fenab, "Enable 24-bit DMA transfer sizes");
+
+module_init(am53c974_module_init);
+module_exit(am53c974_module_exit);
diff --git a/drivers/scsi/arcmsr/Makefile b/drivers/scsi/arcmsr/Makefile
new file mode 100644
index 000000000..721aced39
--- /dev/null
+++ b/drivers/scsi/arcmsr/Makefile
@@ -0,0 +1,6 @@
+# File: drivers/arcmsr/Makefile
+# Makefile for the ARECA PCI-X PCI-EXPRESS SATA RAID controllers SCSI driver.
+
+arcmsr-objs := arcmsr_attr.o arcmsr_hba.o
+
+obj-$(CONFIG_SCSI_ARCMSR) := arcmsr.o
diff --git a/drivers/scsi/arcmsr/arcmsr.h b/drivers/scsi/arcmsr/arcmsr.h
new file mode 100644
index 000000000..3bcaaac0a
--- /dev/null
+++ b/drivers/scsi/arcmsr/arcmsr.h
@@ -0,0 +1,814 @@
+/*
+*******************************************************************************
+** O.S : Linux
+** FILE NAME : arcmsr.h
+** BY : Nick Cheng
+** Description: SCSI RAID Device Driver for
+** ARECA RAID Host adapter
+*******************************************************************************
+** Copyright (C) 2002 - 2005, Areca Technology Corporation All rights reserved.
+**
+** Web site: www.areca.com.tw
+** E-mail: support@areca.com.tw
+**
+** This program is free software; you can redistribute it and/or modify
+** it under the terms of the GNU General Public License version 2 as
+** published by the Free Software Foundation.
+** This program is distributed in the hope that it will be useful,
+** but WITHOUT ANY WARRANTY; without even the implied warranty of
+** MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+** GNU General Public License for more details.
+*******************************************************************************
+** Redistribution and use in source and binary forms, with or without
+** modification, are permitted provided that the following conditions
+** are met:
+** 1. Redistributions of source code must retain the above copyright
+** notice, this list of conditions and the following disclaimer.
+** 2. Redistributions in binary form must reproduce the above copyright
+** notice, this list of conditions and the following disclaimer in the
+** documentation and/or other materials provided with the distribution.
+** 3. The name of the author may not be used to endorse or promote products
+** derived from this software without specific prior written permission.
+**
+** THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+** IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+** OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+** IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+** INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES(INCLUDING, BUT
+** NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+** DATA, OR PROFITS; OR BUSINESS INTERRUPTION)HOWEVER CAUSED AND ON ANY
+** THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+**(INCLUDING NEGLIGENCE OR OTHERWISE)ARISING IN ANY WAY OUT OF THE USE OF
+** THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+*******************************************************************************
+*/
+#include <linux/interrupt.h>
+struct device_attribute;
+/*The limit of outstanding scsi command that firmware can handle*/
+#ifdef CONFIG_XEN
+ #define ARCMSR_MAX_FREECCB_NUM 160
+#define ARCMSR_MAX_OUTSTANDING_CMD 155
+#else
+ #define ARCMSR_MAX_FREECCB_NUM 320
+#define ARCMSR_MAX_OUTSTANDING_CMD 255
+#endif
+#define ARCMSR_DRIVER_VERSION "v1.30.00.04-20140919"
+#define ARCMSR_SCSI_INITIATOR_ID 255
+#define ARCMSR_MAX_XFER_SECTORS 512
+#define ARCMSR_MAX_XFER_SECTORS_B 4096
+#define ARCMSR_MAX_XFER_SECTORS_C 304
+#define ARCMSR_MAX_TARGETID 17
+#define ARCMSR_MAX_TARGETLUN 8
+#define ARCMSR_MAX_CMD_PERLUN ARCMSR_MAX_OUTSTANDING_CMD
+#define ARCMSR_MAX_QBUFFER 4096
+#define ARCMSR_DEFAULT_SG_ENTRIES 38
+#define ARCMSR_MAX_HBB_POSTQUEUE 264
+#define ARCMSR_MAX_ARC1214_POSTQUEUE 256
+#define ARCMSR_MAX_ARC1214_DONEQUEUE 257
+#define ARCMSR_MAX_XFER_LEN 0x26000 /* 152K */
+#define ARCMSR_CDB_SG_PAGE_LENGTH 256
+#define ARCMST_NUM_MSIX_VECTORS 4
+#ifndef PCI_DEVICE_ID_ARECA_1880
+#define PCI_DEVICE_ID_ARECA_1880 0x1880
+ #endif
+#ifndef PCI_DEVICE_ID_ARECA_1214
+ #define PCI_DEVICE_ID_ARECA_1214 0x1214
+#endif
+/*
+**********************************************************************************
+**
+**********************************************************************************
+*/
+#define ARC_SUCCESS 0
+#define ARC_FAILURE 1
+/*
+*******************************************************************************
+** split 64bits dma addressing
+*******************************************************************************
+*/
+#define dma_addr_hi32(addr) (uint32_t) ((addr>>16)>>16)
+#define dma_addr_lo32(addr) (uint32_t) (addr & 0xffffffff)
+/*
+*******************************************************************************
+** MESSAGE CONTROL CODE
+*******************************************************************************
+*/
+struct CMD_MESSAGE
+{
+ uint32_t HeaderLength;
+ uint8_t Signature[8];
+ uint32_t Timeout;
+ uint32_t ControlCode;
+ uint32_t ReturnCode;
+ uint32_t Length;
+};
+/*
+*******************************************************************************
+** IOP Message Transfer Data for user space
+*******************************************************************************
+*/
+#define ARCMSR_API_DATA_BUFLEN 1032
+struct CMD_MESSAGE_FIELD
+{
+ struct CMD_MESSAGE cmdmessage;
+ uint8_t messagedatabuffer[ARCMSR_API_DATA_BUFLEN];
+};
+/* IOP message transfer */
+#define ARCMSR_MESSAGE_FAIL 0x0001
+/* DeviceType */
+#define ARECA_SATA_RAID 0x90000000
+/* FunctionCode */
+#define FUNCTION_READ_RQBUFFER 0x0801
+#define FUNCTION_WRITE_WQBUFFER 0x0802
+#define FUNCTION_CLEAR_RQBUFFER 0x0803
+#define FUNCTION_CLEAR_WQBUFFER 0x0804
+#define FUNCTION_CLEAR_ALLQBUFFER 0x0805
+#define FUNCTION_RETURN_CODE_3F 0x0806
+#define FUNCTION_SAY_HELLO 0x0807
+#define FUNCTION_SAY_GOODBYE 0x0808
+#define FUNCTION_FLUSH_ADAPTER_CACHE 0x0809
+#define FUNCTION_GET_FIRMWARE_STATUS 0x080A
+#define FUNCTION_HARDWARE_RESET 0x080B
+/* ARECA IO CONTROL CODE*/
+#define ARCMSR_MESSAGE_READ_RQBUFFER \
+ ARECA_SATA_RAID | FUNCTION_READ_RQBUFFER
+#define ARCMSR_MESSAGE_WRITE_WQBUFFER \
+ ARECA_SATA_RAID | FUNCTION_WRITE_WQBUFFER
+#define ARCMSR_MESSAGE_CLEAR_RQBUFFER \
+ ARECA_SATA_RAID | FUNCTION_CLEAR_RQBUFFER
+#define ARCMSR_MESSAGE_CLEAR_WQBUFFER \
+ ARECA_SATA_RAID | FUNCTION_CLEAR_WQBUFFER
+#define ARCMSR_MESSAGE_CLEAR_ALLQBUFFER \
+ ARECA_SATA_RAID | FUNCTION_CLEAR_ALLQBUFFER
+#define ARCMSR_MESSAGE_RETURN_CODE_3F \
+ ARECA_SATA_RAID | FUNCTION_RETURN_CODE_3F
+#define ARCMSR_MESSAGE_SAY_HELLO \
+ ARECA_SATA_RAID | FUNCTION_SAY_HELLO
+#define ARCMSR_MESSAGE_SAY_GOODBYE \
+ ARECA_SATA_RAID | FUNCTION_SAY_GOODBYE
+#define ARCMSR_MESSAGE_FLUSH_ADAPTER_CACHE \
+ ARECA_SATA_RAID | FUNCTION_FLUSH_ADAPTER_CACHE
+/* ARECA IOCTL ReturnCode */
+#define ARCMSR_MESSAGE_RETURNCODE_OK 0x00000001
+#define ARCMSR_MESSAGE_RETURNCODE_ERROR 0x00000006
+#define ARCMSR_MESSAGE_RETURNCODE_3F 0x0000003F
+#define ARCMSR_MESSAGE_RETURNCODE_BUS_HANG_ON 0x00000088
+/*
+*************************************************************
+** structure for holding DMA address data
+*************************************************************
+*/
+#define IS_DMA64 (sizeof(dma_addr_t) == 8)
+#define IS_SG64_ADDR 0x01000000 /* bit24 */
+struct SG32ENTRY
+{
+ __le32 length;
+ __le32 address;
+}__attribute__ ((packed));
+struct SG64ENTRY
+{
+ __le32 length;
+ __le32 address;
+ __le32 addresshigh;
+}__attribute__ ((packed));
+/*
+********************************************************************
+** Q Buffer of IOP Message Transfer
+********************************************************************
+*/
+struct QBUFFER
+{
+ uint32_t data_len;
+ uint8_t data[124];
+};
+/*
+*******************************************************************************
+** FIRMWARE INFO for Intel IOP R 80331 processor (Type A)
+*******************************************************************************
+*/
+struct FIRMWARE_INFO
+{
+ uint32_t signature; /*0, 00-03*/
+ uint32_t request_len; /*1, 04-07*/
+ uint32_t numbers_queue; /*2, 08-11*/
+ uint32_t sdram_size; /*3, 12-15*/
+ uint32_t ide_channels; /*4, 16-19*/
+ char vendor[40]; /*5, 20-59*/
+ char model[8]; /*15, 60-67*/
+ char firmware_ver[16]; /*17, 68-83*/
+ char device_map[16]; /*21, 84-99*/
+ uint32_t cfgVersion; /*25,100-103 Added for checking of new firmware capability*/
+ uint8_t cfgSerial[16]; /*26,104-119*/
+ uint32_t cfgPicStatus; /*30,120-123*/
+};
+/* signature of set and get firmware config */
+#define ARCMSR_SIGNATURE_GET_CONFIG 0x87974060
+#define ARCMSR_SIGNATURE_SET_CONFIG 0x87974063
+/* message code of inbound message register */
+#define ARCMSR_INBOUND_MESG0_NOP 0x00000000
+#define ARCMSR_INBOUND_MESG0_GET_CONFIG 0x00000001
+#define ARCMSR_INBOUND_MESG0_SET_CONFIG 0x00000002
+#define ARCMSR_INBOUND_MESG0_ABORT_CMD 0x00000003
+#define ARCMSR_INBOUND_MESG0_STOP_BGRB 0x00000004
+#define ARCMSR_INBOUND_MESG0_FLUSH_CACHE 0x00000005
+#define ARCMSR_INBOUND_MESG0_START_BGRB 0x00000006
+#define ARCMSR_INBOUND_MESG0_CHK331PENDING 0x00000007
+#define ARCMSR_INBOUND_MESG0_SYNC_TIMER 0x00000008
+/* doorbell interrupt generator */
+#define ARCMSR_INBOUND_DRIVER_DATA_WRITE_OK 0x00000001
+#define ARCMSR_INBOUND_DRIVER_DATA_READ_OK 0x00000002
+#define ARCMSR_OUTBOUND_IOP331_DATA_WRITE_OK 0x00000001
+#define ARCMSR_OUTBOUND_IOP331_DATA_READ_OK 0x00000002
+/* ccb areca cdb flag */
+#define ARCMSR_CCBPOST_FLAG_SGL_BSIZE 0x80000000
+#define ARCMSR_CCBPOST_FLAG_IAM_BIOS 0x40000000
+#define ARCMSR_CCBREPLY_FLAG_IAM_BIOS 0x40000000
+#define ARCMSR_CCBREPLY_FLAG_ERROR_MODE0 0x10000000
+#define ARCMSR_CCBREPLY_FLAG_ERROR_MODE1 0x00000001
+/* outbound firmware ok */
+#define ARCMSR_OUTBOUND_MESG1_FIRMWARE_OK 0x80000000
+/* ARC-1680 Bus Reset*/
+#define ARCMSR_ARC1680_BUS_RESET 0x00000003
+/* ARC-1880 Bus Reset*/
+#define ARCMSR_ARC1880_RESET_ADAPTER 0x00000024
+#define ARCMSR_ARC1880_DiagWrite_ENABLE 0x00000080
+
+/*
+************************************************************************
+** SPEC. for Areca Type B adapter
+************************************************************************
+*/
+/* ARECA HBB COMMAND for its FIRMWARE */
+/* window of "instruction flags" from driver to iop */
+#define ARCMSR_DRV2IOP_DOORBELL 0x00020400
+#define ARCMSR_DRV2IOP_DOORBELL_MASK 0x00020404
+/* window of "instruction flags" from iop to driver */
+#define ARCMSR_IOP2DRV_DOORBELL 0x00020408
+#define ARCMSR_IOP2DRV_DOORBELL_MASK 0x0002040C
+/* ARECA FLAG LANGUAGE */
+/* ioctl transfer */
+#define ARCMSR_IOP2DRV_DATA_WRITE_OK 0x00000001
+/* ioctl transfer */
+#define ARCMSR_IOP2DRV_DATA_READ_OK 0x00000002
+#define ARCMSR_IOP2DRV_CDB_DONE 0x00000004
+#define ARCMSR_IOP2DRV_MESSAGE_CMD_DONE 0x00000008
+
+#define ARCMSR_DOORBELL_HANDLE_INT 0x0000000F
+#define ARCMSR_DOORBELL_INT_CLEAR_PATTERN 0xFF00FFF0
+#define ARCMSR_MESSAGE_INT_CLEAR_PATTERN 0xFF00FFF7
+/* (ARCMSR_INBOUND_MESG0_GET_CONFIG<<16)|ARCMSR_DRV2IOP_MESSAGE_CMD_POSTED) */
+#define ARCMSR_MESSAGE_GET_CONFIG 0x00010008
+/* (ARCMSR_INBOUND_MESG0_SET_CONFIG<<16)|ARCMSR_DRV2IOP_MESSAGE_CMD_POSTED) */
+#define ARCMSR_MESSAGE_SET_CONFIG 0x00020008
+/* (ARCMSR_INBOUND_MESG0_ABORT_CMD<<16)|ARCMSR_DRV2IOP_MESSAGE_CMD_POSTED) */
+#define ARCMSR_MESSAGE_ABORT_CMD 0x00030008
+/* (ARCMSR_INBOUND_MESG0_STOP_BGRB<<16)|ARCMSR_DRV2IOP_MESSAGE_CMD_POSTED) */
+#define ARCMSR_MESSAGE_STOP_BGRB 0x00040008
+/* (ARCMSR_INBOUND_MESG0_FLUSH_CACHE<<16)|ARCMSR_DRV2IOP_MESSAGE_CMD_POSTED) */
+#define ARCMSR_MESSAGE_FLUSH_CACHE 0x00050008
+/* (ARCMSR_INBOUND_MESG0_START_BGRB<<16)|ARCMSR_DRV2IOP_MESSAGE_CMD_POSTED) */
+#define ARCMSR_MESSAGE_START_BGRB 0x00060008
+#define ARCMSR_MESSAGE_START_DRIVER_MODE 0x000E0008
+#define ARCMSR_MESSAGE_SET_POST_WINDOW 0x000F0008
+#define ARCMSR_MESSAGE_ACTIVE_EOI_MODE 0x00100008
+/* ARCMSR_OUTBOUND_MESG1_FIRMWARE_OK */
+#define ARCMSR_MESSAGE_FIRMWARE_OK 0x80000000
+/* ioctl transfer */
+#define ARCMSR_DRV2IOP_DATA_WRITE_OK 0x00000001
+/* ioctl transfer */
+#define ARCMSR_DRV2IOP_DATA_READ_OK 0x00000002
+#define ARCMSR_DRV2IOP_CDB_POSTED 0x00000004
+#define ARCMSR_DRV2IOP_MESSAGE_CMD_POSTED 0x00000008
+#define ARCMSR_DRV2IOP_END_OF_INTERRUPT 0x00000010
+
+/* data tunnel buffer between user space program and its firmware */
+/* user space data to iop 128bytes */
+#define ARCMSR_MESSAGE_WBUFFER 0x0000fe00
+/* iop data to user space 128bytes */
+#define ARCMSR_MESSAGE_RBUFFER 0x0000ff00
+/* iop message_rwbuffer for message command */
+#define ARCMSR_MESSAGE_RWBUFFER 0x0000fa00
+/*
+************************************************************************
+** SPEC. for Areca HBC adapter
+************************************************************************
+*/
+#define ARCMSR_HBC_ISR_THROTTLING_LEVEL 12
+#define ARCMSR_HBC_ISR_MAX_DONE_QUEUE 20
+/* Host Interrupt Mask */
+#define ARCMSR_HBCMU_UTILITY_A_ISR_MASK 0x00000001 /* When clear, the Utility_A interrupt routes to the host.*/
+#define ARCMSR_HBCMU_OUTBOUND_DOORBELL_ISR_MASK 0x00000004 /* When clear, the General Outbound Doorbell interrupt routes to the host.*/
+#define ARCMSR_HBCMU_OUTBOUND_POSTQUEUE_ISR_MASK 0x00000008 /* When clear, the Outbound Post List FIFO Not Empty interrupt routes to the host.*/
+#define ARCMSR_HBCMU_ALL_INTMASKENABLE 0x0000000D /* disable all ISR */
+/* Host Interrupt Status */
+#define ARCMSR_HBCMU_UTILITY_A_ISR 0x00000001
+ /*
+ ** Set when the Utility_A Interrupt bit is set in the Outbound Doorbell Register.
+ ** It clears by writing a 1 to the Utility_A bit in the Outbound Doorbell Clear Register or through automatic clearing (if enabled).
+ */
+#define ARCMSR_HBCMU_OUTBOUND_DOORBELL_ISR 0x00000004
+ /*
+ ** Set if Outbound Doorbell register bits 30:1 have a non-zero
+ ** value. This bit clears only when Outbound Doorbell bits
+ ** 30:1 are ALL clear. Only a write to the Outbound Doorbell
+ ** Clear register clears bits in the Outbound Doorbell register.
+ */
+#define ARCMSR_HBCMU_OUTBOUND_POSTQUEUE_ISR 0x00000008
+ /*
+ ** Set whenever the Outbound Post List Producer/Consumer
+ ** Register (FIFO) is not empty. It clears when the Outbound
+ ** Post List FIFO is empty.
+ */
+#define ARCMSR_HBCMU_SAS_ALL_INT 0x00000010
+ /*
+ ** This bit indicates a SAS interrupt from a source external to
+ ** the PCIe core. This bit is not maskable.
+ */
+ /* DoorBell*/
+#define ARCMSR_HBCMU_DRV2IOP_DATA_WRITE_OK 0x00000002
+#define ARCMSR_HBCMU_DRV2IOP_DATA_READ_OK 0x00000004
+ /*inbound message 0 ready*/
+#define ARCMSR_HBCMU_DRV2IOP_MESSAGE_CMD_DONE 0x00000008
+ /*more than 12 request completed in a time*/
+#define ARCMSR_HBCMU_DRV2IOP_POSTQUEUE_THROTTLING 0x00000010
+#define ARCMSR_HBCMU_IOP2DRV_DATA_WRITE_OK 0x00000002
+ /*outbound DATA WRITE isr door bell clear*/
+#define ARCMSR_HBCMU_IOP2DRV_DATA_WRITE_DOORBELL_CLEAR 0x00000002
+#define ARCMSR_HBCMU_IOP2DRV_DATA_READ_OK 0x00000004
+ /*outbound DATA READ isr door bell clear*/
+#define ARCMSR_HBCMU_IOP2DRV_DATA_READ_DOORBELL_CLEAR 0x00000004
+ /*outbound message 0 ready*/
+#define ARCMSR_HBCMU_IOP2DRV_MESSAGE_CMD_DONE 0x00000008
+ /*outbound message cmd isr door bell clear*/
+#define ARCMSR_HBCMU_IOP2DRV_MESSAGE_CMD_DONE_DOORBELL_CLEAR 0x00000008
+ /*ARCMSR_HBAMU_MESSAGE_FIRMWARE_OK*/
+#define ARCMSR_HBCMU_MESSAGE_FIRMWARE_OK 0x80000000
+/*
+*******************************************************************************
+** SPEC. for Areca Type D adapter
+*******************************************************************************
+*/
+#define ARCMSR_ARC1214_CHIP_ID 0x00004
+#define ARCMSR_ARC1214_CPU_MEMORY_CONFIGURATION 0x00008
+#define ARCMSR_ARC1214_I2_HOST_INTERRUPT_MASK 0x00034
+#define ARCMSR_ARC1214_SAMPLE_RESET 0x00100
+#define ARCMSR_ARC1214_RESET_REQUEST 0x00108
+#define ARCMSR_ARC1214_MAIN_INTERRUPT_STATUS 0x00200
+#define ARCMSR_ARC1214_PCIE_F0_INTERRUPT_ENABLE 0x0020C
+#define ARCMSR_ARC1214_INBOUND_MESSAGE0 0x00400
+#define ARCMSR_ARC1214_INBOUND_MESSAGE1 0x00404
+#define ARCMSR_ARC1214_OUTBOUND_MESSAGE0 0x00420
+#define ARCMSR_ARC1214_OUTBOUND_MESSAGE1 0x00424
+#define ARCMSR_ARC1214_INBOUND_DOORBELL 0x00460
+#define ARCMSR_ARC1214_OUTBOUND_DOORBELL 0x00480
+#define ARCMSR_ARC1214_OUTBOUND_DOORBELL_ENABLE 0x00484
+#define ARCMSR_ARC1214_INBOUND_LIST_BASE_LOW 0x01000
+#define ARCMSR_ARC1214_INBOUND_LIST_BASE_HIGH 0x01004
+#define ARCMSR_ARC1214_INBOUND_LIST_WRITE_POINTER 0x01018
+#define ARCMSR_ARC1214_OUTBOUND_LIST_BASE_LOW 0x01060
+#define ARCMSR_ARC1214_OUTBOUND_LIST_BASE_HIGH 0x01064
+#define ARCMSR_ARC1214_OUTBOUND_LIST_COPY_POINTER 0x0106C
+#define ARCMSR_ARC1214_OUTBOUND_LIST_READ_POINTER 0x01070
+#define ARCMSR_ARC1214_OUTBOUND_INTERRUPT_CAUSE 0x01088
+#define ARCMSR_ARC1214_OUTBOUND_INTERRUPT_ENABLE 0x0108C
+#define ARCMSR_ARC1214_MESSAGE_WBUFFER 0x02000
+#define ARCMSR_ARC1214_MESSAGE_RBUFFER 0x02100
+#define ARCMSR_ARC1214_MESSAGE_RWBUFFER 0x02200
+/* Host Interrupt Mask */
+#define ARCMSR_ARC1214_ALL_INT_ENABLE 0x00001010
+#define ARCMSR_ARC1214_ALL_INT_DISABLE 0x00000000
+/* Host Interrupt Status */
+#define ARCMSR_ARC1214_OUTBOUND_DOORBELL_ISR 0x00001000
+#define ARCMSR_ARC1214_OUTBOUND_POSTQUEUE_ISR 0x00000010
+/* DoorBell*/
+#define ARCMSR_ARC1214_DRV2IOP_DATA_IN_READY 0x00000001
+#define ARCMSR_ARC1214_DRV2IOP_DATA_OUT_READ 0x00000002
+/*inbound message 0 ready*/
+#define ARCMSR_ARC1214_IOP2DRV_DATA_WRITE_OK 0x00000001
+/*outbound DATA WRITE isr door bell clear*/
+#define ARCMSR_ARC1214_IOP2DRV_DATA_READ_OK 0x00000002
+/*outbound message 0 ready*/
+#define ARCMSR_ARC1214_IOP2DRV_MESSAGE_CMD_DONE 0x02000000
+/*outbound message cmd isr door bell clear*/
+/*ARCMSR_HBAMU_MESSAGE_FIRMWARE_OK*/
+#define ARCMSR_ARC1214_MESSAGE_FIRMWARE_OK 0x80000000
+#define ARCMSR_ARC1214_OUTBOUND_LIST_INTERRUPT_CLEAR 0x00000001
+/*
+*******************************************************************************
+** ARECA SCSI COMMAND DESCRIPTOR BLOCK size 0x1F8 (504)
+*******************************************************************************
+*/
+struct ARCMSR_CDB
+{
+ uint8_t Bus;
+ uint8_t TargetID;
+ uint8_t LUN;
+ uint8_t Function;
+ uint8_t CdbLength;
+ uint8_t sgcount;
+ uint8_t Flags;
+#define ARCMSR_CDB_FLAG_SGL_BSIZE 0x01
+#define ARCMSR_CDB_FLAG_BIOS 0x02
+#define ARCMSR_CDB_FLAG_WRITE 0x04
+#define ARCMSR_CDB_FLAG_SIMPLEQ 0x00
+#define ARCMSR_CDB_FLAG_HEADQ 0x08
+#define ARCMSR_CDB_FLAG_ORDEREDQ 0x10
+
+ uint8_t msgPages;
+ uint32_t msgContext;
+ uint32_t DataLength;
+ uint8_t Cdb[16];
+ uint8_t DeviceStatus;
+#define ARCMSR_DEV_CHECK_CONDITION 0x02
+#define ARCMSR_DEV_SELECT_TIMEOUT 0xF0
+#define ARCMSR_DEV_ABORTED 0xF1
+#define ARCMSR_DEV_INIT_FAIL 0xF2
+
+ uint8_t SenseData[15];
+ union
+ {
+ struct SG32ENTRY sg32entry[1];
+ struct SG64ENTRY sg64entry[1];
+ } u;
+};
+/*
+*******************************************************************************
+** Messaging Unit (MU) of the Intel R 80331 I/O processor(Type A) and Type B processor
+*******************************************************************************
+*/
+struct MessageUnit_A
+{
+ uint32_t resrved0[4]; /*0000 000F*/
+ uint32_t inbound_msgaddr0; /*0010 0013*/
+ uint32_t inbound_msgaddr1; /*0014 0017*/
+ uint32_t outbound_msgaddr0; /*0018 001B*/
+ uint32_t outbound_msgaddr1; /*001C 001F*/
+ uint32_t inbound_doorbell; /*0020 0023*/
+ uint32_t inbound_intstatus; /*0024 0027*/
+ uint32_t inbound_intmask; /*0028 002B*/
+ uint32_t outbound_doorbell; /*002C 002F*/
+ uint32_t outbound_intstatus; /*0030 0033*/
+ uint32_t outbound_intmask; /*0034 0037*/
+ uint32_t reserved1[2]; /*0038 003F*/
+ uint32_t inbound_queueport; /*0040 0043*/
+ uint32_t outbound_queueport; /*0044 0047*/
+ uint32_t reserved2[2]; /*0048 004F*/
+ uint32_t reserved3[492]; /*0050 07FF 492*/
+ uint32_t reserved4[128]; /*0800 09FF 128*/
+ uint32_t message_rwbuffer[256]; /*0a00 0DFF 256*/
+ uint32_t message_wbuffer[32]; /*0E00 0E7F 32*/
+ uint32_t reserved5[32]; /*0E80 0EFF 32*/
+ uint32_t message_rbuffer[32]; /*0F00 0F7F 32*/
+ uint32_t reserved6[32]; /*0F80 0FFF 32*/
+};
+
+struct MessageUnit_B
+{
+ uint32_t post_qbuffer[ARCMSR_MAX_HBB_POSTQUEUE];
+ uint32_t done_qbuffer[ARCMSR_MAX_HBB_POSTQUEUE];
+ uint32_t postq_index;
+ uint32_t doneq_index;
+ uint32_t __iomem *drv2iop_doorbell;
+ uint32_t __iomem *drv2iop_doorbell_mask;
+ uint32_t __iomem *iop2drv_doorbell;
+ uint32_t __iomem *iop2drv_doorbell_mask;
+ uint32_t __iomem *message_rwbuffer;
+ uint32_t __iomem *message_wbuffer;
+ uint32_t __iomem *message_rbuffer;
+};
+/*
+*********************************************************************
+** LSI
+*********************************************************************
+*/
+struct MessageUnit_C{
+ uint32_t message_unit_status; /*0000 0003*/
+ uint32_t slave_error_attribute; /*0004 0007*/
+ uint32_t slave_error_address; /*0008 000B*/
+ uint32_t posted_outbound_doorbell; /*000C 000F*/
+ uint32_t master_error_attribute; /*0010 0013*/
+ uint32_t master_error_address_low; /*0014 0017*/
+ uint32_t master_error_address_high; /*0018 001B*/
+ uint32_t hcb_size; /*001C 001F*/
+ uint32_t inbound_doorbell; /*0020 0023*/
+ uint32_t diagnostic_rw_data; /*0024 0027*/
+ uint32_t diagnostic_rw_address_low; /*0028 002B*/
+ uint32_t diagnostic_rw_address_high; /*002C 002F*/
+ uint32_t host_int_status; /*0030 0033*/
+ uint32_t host_int_mask; /*0034 0037*/
+ uint32_t dcr_data; /*0038 003B*/
+ uint32_t dcr_address; /*003C 003F*/
+ uint32_t inbound_queueport; /*0040 0043*/
+ uint32_t outbound_queueport; /*0044 0047*/
+ uint32_t hcb_pci_address_low; /*0048 004B*/
+ uint32_t hcb_pci_address_high; /*004C 004F*/
+ uint32_t iop_int_status; /*0050 0053*/
+ uint32_t iop_int_mask; /*0054 0057*/
+ uint32_t iop_inbound_queue_port; /*0058 005B*/
+ uint32_t iop_outbound_queue_port; /*005C 005F*/
+ uint32_t inbound_free_list_index; /*0060 0063*/
+ uint32_t inbound_post_list_index; /*0064 0067*/
+ uint32_t outbound_free_list_index; /*0068 006B*/
+ uint32_t outbound_post_list_index; /*006C 006F*/
+ uint32_t inbound_doorbell_clear; /*0070 0073*/
+ uint32_t i2o_message_unit_control; /*0074 0077*/
+ uint32_t last_used_message_source_address_low; /*0078 007B*/
+ uint32_t last_used_message_source_address_high; /*007C 007F*/
+ uint32_t pull_mode_data_byte_count[4]; /*0080 008F*/
+ uint32_t message_dest_address_index; /*0090 0093*/
+ uint32_t done_queue_not_empty_int_counter_timer; /*0094 0097*/
+ uint32_t utility_A_int_counter_timer; /*0098 009B*/
+ uint32_t outbound_doorbell; /*009C 009F*/
+ uint32_t outbound_doorbell_clear; /*00A0 00A3*/
+ uint32_t message_source_address_index; /*00A4 00A7*/
+ uint32_t message_done_queue_index; /*00A8 00AB*/
+ uint32_t reserved0; /*00AC 00AF*/
+ uint32_t inbound_msgaddr0; /*00B0 00B3*/
+ uint32_t inbound_msgaddr1; /*00B4 00B7*/
+ uint32_t outbound_msgaddr0; /*00B8 00BB*/
+ uint32_t outbound_msgaddr1; /*00BC 00BF*/
+ uint32_t inbound_queueport_low; /*00C0 00C3*/
+ uint32_t inbound_queueport_high; /*00C4 00C7*/
+ uint32_t outbound_queueport_low; /*00C8 00CB*/
+ uint32_t outbound_queueport_high; /*00CC 00CF*/
+ uint32_t iop_inbound_queue_port_low; /*00D0 00D3*/
+ uint32_t iop_inbound_queue_port_high; /*00D4 00D7*/
+ uint32_t iop_outbound_queue_port_low; /*00D8 00DB*/
+ uint32_t iop_outbound_queue_port_high; /*00DC 00DF*/
+ uint32_t message_dest_queue_port_low; /*00E0 00E3*/
+ uint32_t message_dest_queue_port_high; /*00E4 00E7*/
+ uint32_t last_used_message_dest_address_low; /*00E8 00EB*/
+ uint32_t last_used_message_dest_address_high; /*00EC 00EF*/
+ uint32_t message_done_queue_base_address_low; /*00F0 00F3*/
+ uint32_t message_done_queue_base_address_high; /*00F4 00F7*/
+ uint32_t host_diagnostic; /*00F8 00FB*/
+ uint32_t write_sequence; /*00FC 00FF*/
+ uint32_t reserved1[34]; /*0100 0187*/
+ uint32_t reserved2[1950]; /*0188 1FFF*/
+ uint32_t message_wbuffer[32]; /*2000 207F*/
+ uint32_t reserved3[32]; /*2080 20FF*/
+ uint32_t message_rbuffer[32]; /*2100 217F*/
+ uint32_t reserved4[32]; /*2180 21FF*/
+ uint32_t msgcode_rwbuffer[256]; /*2200 23FF*/
+};
+/*
+*********************************************************************
+** Messaging Unit (MU) of Type D processor
+*********************************************************************
+*/
+struct InBound_SRB {
+ uint32_t addressLow; /* pointer to SRB block */
+ uint32_t addressHigh;
+ uint32_t length; /* in DWORDs */
+ uint32_t reserved0;
+};
+
+struct OutBound_SRB {
+ uint32_t addressLow; /* pointer to SRB block */
+ uint32_t addressHigh;
+};
+
+struct MessageUnit_D {
+ struct InBound_SRB post_qbuffer[ARCMSR_MAX_ARC1214_POSTQUEUE];
+ volatile struct OutBound_SRB
+ done_qbuffer[ARCMSR_MAX_ARC1214_DONEQUEUE];
+ u16 postq_index;
+ volatile u16 doneq_index;
+ u32 __iomem *chip_id; /* 0x00004 */
+ u32 __iomem *cpu_mem_config; /* 0x00008 */
+ u32 __iomem *i2o_host_interrupt_mask; /* 0x00034 */
+ u32 __iomem *sample_at_reset; /* 0x00100 */
+ u32 __iomem *reset_request; /* 0x00108 */
+ u32 __iomem *host_int_status; /* 0x00200 */
+ u32 __iomem *pcief0_int_enable; /* 0x0020C */
+ u32 __iomem *inbound_msgaddr0; /* 0x00400 */
+ u32 __iomem *inbound_msgaddr1; /* 0x00404 */
+ u32 __iomem *outbound_msgaddr0; /* 0x00420 */
+ u32 __iomem *outbound_msgaddr1; /* 0x00424 */
+ u32 __iomem *inbound_doorbell; /* 0x00460 */
+ u32 __iomem *outbound_doorbell; /* 0x00480 */
+ u32 __iomem *outbound_doorbell_enable; /* 0x00484 */
+ u32 __iomem *inboundlist_base_low; /* 0x01000 */
+ u32 __iomem *inboundlist_base_high; /* 0x01004 */
+ u32 __iomem *inboundlist_write_pointer; /* 0x01018 */
+ u32 __iomem *outboundlist_base_low; /* 0x01060 */
+ u32 __iomem *outboundlist_base_high; /* 0x01064 */
+ u32 __iomem *outboundlist_copy_pointer; /* 0x0106C */
+ u32 __iomem *outboundlist_read_pointer; /* 0x01070 0x01072 */
+ u32 __iomem *outboundlist_interrupt_cause; /* 0x1088 */
+ u32 __iomem *outboundlist_interrupt_enable; /* 0x108C */
+ u32 __iomem *message_wbuffer; /* 0x2000 */
+ u32 __iomem *message_rbuffer; /* 0x2100 */
+ u32 __iomem *msgcode_rwbuffer; /* 0x2200 */
+};
+/*
+*******************************************************************************
+** Adapter Control Block
+*******************************************************************************
+*/
+struct AdapterControlBlock
+{
+ uint32_t adapter_type; /* adapter A,B..... */
+ #define ACB_ADAPTER_TYPE_A 0x00000001 /* hba I IOP */
+ #define ACB_ADAPTER_TYPE_B 0x00000002 /* hbb M IOP */
+ #define ACB_ADAPTER_TYPE_C 0x00000004 /* hbc P IOP */
+ #define ACB_ADAPTER_TYPE_D 0x00000008 /* hbd A IOP */
+ u32 roundup_ccbsize;
+ struct pci_dev * pdev;
+ struct Scsi_Host * host;
+ unsigned long vir2phy_offset;
+ struct msix_entry entries[ARCMST_NUM_MSIX_VECTORS];
+ /* Offset is used in making arc cdb physical to virtual calculations */
+ uint32_t outbound_int_enable;
+ uint32_t cdb_phyaddr_hi32;
+ uint32_t reg_mu_acc_handle0;
+ spinlock_t eh_lock;
+ spinlock_t ccblist_lock;
+ spinlock_t postq_lock;
+ spinlock_t doneq_lock;
+ spinlock_t rqbuffer_lock;
+ spinlock_t wqbuffer_lock;
+ union {
+ struct MessageUnit_A __iomem *pmuA;
+ struct MessageUnit_B *pmuB;
+ struct MessageUnit_C __iomem *pmuC;
+ struct MessageUnit_D *pmuD;
+ };
+ /* message unit ATU inbound base address0 */
+ void __iomem *mem_base0;
+ void __iomem *mem_base1;
+ uint32_t acb_flags;
+ u16 dev_id;
+ uint8_t adapter_index;
+ #define ACB_F_SCSISTOPADAPTER 0x0001
+ #define ACB_F_MSG_STOP_BGRB 0x0002
+ /* stop RAID background rebuild */
+ #define ACB_F_MSG_START_BGRB 0x0004
+ /* stop RAID background rebuild */
+ #define ACB_F_IOPDATA_OVERFLOW 0x0008
+ /* iop message data rqbuffer overflow */
+ #define ACB_F_MESSAGE_WQBUFFER_CLEARED 0x0010
+ /* message clear wqbuffer */
+ #define ACB_F_MESSAGE_RQBUFFER_CLEARED 0x0020
+ /* message clear rqbuffer */
+ #define ACB_F_MESSAGE_WQBUFFER_READED 0x0040
+ #define ACB_F_BUS_RESET 0x0080
+ #define ACB_F_BUS_HANG_ON 0x0800/* need hardware reset bus */
+
+ #define ACB_F_IOP_INITED 0x0100
+ /* iop init */
+ #define ACB_F_ABORT 0x0200
+ #define ACB_F_FIRMWARE_TRAP 0x0400
+ #define ACB_F_MSI_ENABLED 0x1000
+ #define ACB_F_MSIX_ENABLED 0x2000
+ struct CommandControlBlock * pccb_pool[ARCMSR_MAX_FREECCB_NUM];
+ /* used for memory free */
+ struct list_head ccb_free_list;
+ /* head of free ccb list */
+
+ atomic_t ccboutstandingcount;
+ /*The present outstanding command number that in the IOP that
+ waiting for being handled by FW*/
+
+ void * dma_coherent;
+ /* dma_coherent used for memory free */
+ dma_addr_t dma_coherent_handle;
+ /* dma_coherent_handle used for memory free */
+ dma_addr_t dma_coherent_handle2;
+ void *dma_coherent2;
+ unsigned int uncache_size;
+ uint8_t rqbuffer[ARCMSR_MAX_QBUFFER];
+ /* data collection buffer for read from 80331 */
+ int32_t rqbuf_getIndex;
+ /* first of read buffer */
+ int32_t rqbuf_putIndex;
+ /* last of read buffer */
+ uint8_t wqbuffer[ARCMSR_MAX_QBUFFER];
+ /* data collection buffer for write to 80331 */
+ int32_t wqbuf_getIndex;
+ /* first of write buffer */
+ int32_t wqbuf_putIndex;
+ /* last of write buffer */
+ uint8_t devstate[ARCMSR_MAX_TARGETID][ARCMSR_MAX_TARGETLUN];
+ /* id0 ..... id15, lun0...lun7 */
+#define ARECA_RAID_GONE 0x55
+#define ARECA_RAID_GOOD 0xaa
+ uint32_t num_resets;
+ uint32_t num_aborts;
+ uint32_t signature;
+ uint32_t firm_request_len;
+ uint32_t firm_numbers_queue;
+ uint32_t firm_sdram_size;
+ uint32_t firm_hd_channels;
+ uint32_t firm_cfg_version;
+ char firm_model[12];
+ char firm_version[20];
+ char device_map[20]; /*21,84-99*/
+ struct work_struct arcmsr_do_message_isr_bh;
+ struct timer_list eternal_timer;
+ unsigned short fw_flag;
+ #define FW_NORMAL 0x0000
+ #define FW_BOG 0x0001
+ #define FW_DEADLOCK 0x0010
+ atomic_t rq_map_token;
+ atomic_t ante_token_value;
+ uint32_t maxOutstanding;
+ int msix_vector_count;
+};/* HW_DEVICE_EXTENSION */
+/*
+*******************************************************************************
+** Command Control Block
+** this CCB length must be 32 bytes boundary
+*******************************************************************************
+*/
+struct CommandControlBlock{
+ /*x32:sizeof struct_CCB=(32+60)byte, x64:sizeof struct_CCB=(64+60)byte*/
+ struct list_head list; /*x32: 8byte, x64: 16byte*/
+ struct scsi_cmnd *pcmd; /*8 bytes pointer of linux scsi command */
+ struct AdapterControlBlock *acb; /*x32: 4byte, x64: 8byte*/
+ uint32_t cdb_phyaddr; /*x32: 4byte, x64: 4byte*/
+ uint32_t arc_cdb_size; /*x32:4byte,x64:4byte*/
+ uint16_t ccb_flags; /*x32: 2byte, x64: 2byte*/
+ #define CCB_FLAG_READ 0x0000
+ #define CCB_FLAG_WRITE 0x0001
+ #define CCB_FLAG_ERROR 0x0002
+ #define CCB_FLAG_FLUSHCACHE 0x0004
+ #define CCB_FLAG_MASTER_ABORTED 0x0008
+ uint16_t startdone; /*x32:2byte,x32:2byte*/
+ #define ARCMSR_CCB_DONE 0x0000
+ #define ARCMSR_CCB_START 0x55AA
+ #define ARCMSR_CCB_ABORTED 0xAA55
+ #define ARCMSR_CCB_ILLEGAL 0xFFFF
+ #if BITS_PER_LONG == 64
+ /* ======================512+64 bytes======================== */
+ uint32_t reserved[5]; /*24 byte*/
+ #else
+ /* ======================512+32 bytes======================== */
+ uint32_t reserved; /*8 byte*/
+ #endif
+ /* ======================================================= */
+ struct ARCMSR_CDB arcmsr_cdb;
+};
+/*
+*******************************************************************************
+** ARECA SCSI sense data
+*******************************************************************************
+*/
+struct SENSE_DATA
+{
+ uint8_t ErrorCode:7;
+#define SCSI_SENSE_CURRENT_ERRORS 0x70
+#define SCSI_SENSE_DEFERRED_ERRORS 0x71
+ uint8_t Valid:1;
+ uint8_t SegmentNumber;
+ uint8_t SenseKey:4;
+ uint8_t Reserved:1;
+ uint8_t IncorrectLength:1;
+ uint8_t EndOfMedia:1;
+ uint8_t FileMark:1;
+ uint8_t Information[4];
+ uint8_t AdditionalSenseLength;
+ uint8_t CommandSpecificInformation[4];
+ uint8_t AdditionalSenseCode;
+ uint8_t AdditionalSenseCodeQualifier;
+ uint8_t FieldReplaceableUnitCode;
+ uint8_t SenseKeySpecific[3];
+};
+/*
+*******************************************************************************
+** Outbound Interrupt Status Register - OISR
+*******************************************************************************
+*/
+#define ARCMSR_MU_OUTBOUND_INTERRUPT_STATUS_REG 0x30
+#define ARCMSR_MU_OUTBOUND_PCI_INT 0x10
+#define ARCMSR_MU_OUTBOUND_POSTQUEUE_INT 0x08
+#define ARCMSR_MU_OUTBOUND_DOORBELL_INT 0x04
+#define ARCMSR_MU_OUTBOUND_MESSAGE1_INT 0x02
+#define ARCMSR_MU_OUTBOUND_MESSAGE0_INT 0x01
+#define ARCMSR_MU_OUTBOUND_HANDLE_INT \
+ (ARCMSR_MU_OUTBOUND_MESSAGE0_INT \
+ |ARCMSR_MU_OUTBOUND_MESSAGE1_INT \
+ |ARCMSR_MU_OUTBOUND_DOORBELL_INT \
+ |ARCMSR_MU_OUTBOUND_POSTQUEUE_INT \
+ |ARCMSR_MU_OUTBOUND_PCI_INT)
+/*
+*******************************************************************************
+** Outbound Interrupt Mask Register - OIMR
+*******************************************************************************
+*/
+#define ARCMSR_MU_OUTBOUND_INTERRUPT_MASK_REG 0x34
+#define ARCMSR_MU_OUTBOUND_PCI_INTMASKENABLE 0x10
+#define ARCMSR_MU_OUTBOUND_POSTQUEUE_INTMASKENABLE 0x08
+#define ARCMSR_MU_OUTBOUND_DOORBELL_INTMASKENABLE 0x04
+#define ARCMSR_MU_OUTBOUND_MESSAGE1_INTMASKENABLE 0x02
+#define ARCMSR_MU_OUTBOUND_MESSAGE0_INTMASKENABLE 0x01
+#define ARCMSR_MU_OUTBOUND_ALL_INTMASKENABLE 0x1F
+
+extern void arcmsr_write_ioctldata2iop(struct AdapterControlBlock *);
+extern uint32_t arcmsr_Read_iop_rqbuffer_data(struct AdapterControlBlock *,
+ struct QBUFFER __iomem *);
+extern void arcmsr_clear_iop2drv_rqueue_buffer(struct AdapterControlBlock *);
+extern struct QBUFFER __iomem *arcmsr_get_iop_rqbuffer(struct AdapterControlBlock *);
+extern struct device_attribute *arcmsr_host_attrs[];
+extern int arcmsr_alloc_sysfs_attr(struct AdapterControlBlock *);
+void arcmsr_free_sysfs_attr(struct AdapterControlBlock *acb);
diff --git a/drivers/scsi/arcmsr/arcmsr_attr.c b/drivers/scsi/arcmsr/arcmsr_attr.c
new file mode 100644
index 000000000..9c86481f7
--- /dev/null
+++ b/drivers/scsi/arcmsr/arcmsr_attr.c
@@ -0,0 +1,404 @@
+/*
+*******************************************************************************
+** O.S : Linux
+** FILE NAME : arcmsr_attr.c
+** BY : Nick Cheng
+** Description: attributes exported to sysfs and device host
+*******************************************************************************
+** Copyright (C) 2002 - 2005, Areca Technology Corporation All rights reserved
+**
+** Web site: www.areca.com.tw
+** E-mail: support@areca.com.tw
+**
+** This program is free software; you can redistribute it and/or modify
+** it under the terms of the GNU General Public License version 2 as
+** published by the Free Software Foundation.
+** This program is distributed in the hope that it will be useful,
+** but WITHOUT ANY WARRANTY; without even the implied warranty of
+** MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+** GNU General Public License for more details.
+*******************************************************************************
+** Redistribution and use in source and binary forms, with or without
+** modification, are permitted provided that the following conditions
+** are met:
+** 1. Redistributions of source code must retain the above copyright
+** notice, this list of conditions and the following disclaimer.
+** 2. Redistributions in binary form must reproduce the above copyright
+** notice, this list of conditions and the following disclaimer in the
+** documentation and/or other materials provided with the distribution.
+** 3. The name of the author may not be used to endorse or promote products
+** derived from this software without specific prior written permission.
+**
+** THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+** IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+** OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+** IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+** INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES(INCLUDING,BUT
+** NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+** DATA, OR PROFITS; OR BUSINESS INTERRUPTION)HOWEVER CAUSED AND ON ANY
+** THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+** (INCLUDING NEGLIGENCE OR OTHERWISE)ARISING IN ANY WAY OUT OF THE USE OF
+** THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+*******************************************************************************
+** For history of changes, see Documentation/scsi/ChangeLog.arcmsr
+** Firmware Specification, see Documentation/scsi/arcmsr_spec.txt
+*******************************************************************************
+*/
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/errno.h>
+#include <linux/delay.h>
+#include <linux/pci.h>
+#include <linux/circ_buf.h>
+
+#include <scsi/scsi_cmnd.h>
+#include <scsi/scsi_device.h>
+#include <scsi/scsi_host.h>
+#include <scsi/scsi_transport.h>
+#include "arcmsr.h"
+
+struct device_attribute *arcmsr_host_attrs[];
+
+static ssize_t arcmsr_sysfs_iop_message_read(struct file *filp,
+ struct kobject *kobj,
+ struct bin_attribute *bin,
+ char *buf, loff_t off,
+ size_t count)
+{
+ struct device *dev = container_of(kobj,struct device,kobj);
+ struct Scsi_Host *host = class_to_shost(dev);
+ struct AdapterControlBlock *acb = (struct AdapterControlBlock *) host->hostdata;
+ uint8_t *ptmpQbuffer;
+ int32_t allxfer_len = 0;
+ unsigned long flags;
+
+ if (!capable(CAP_SYS_ADMIN))
+ return -EACCES;
+
+ /* do message unit read. */
+ ptmpQbuffer = (uint8_t *)buf;
+ spin_lock_irqsave(&acb->rqbuffer_lock, flags);
+ if (acb->rqbuf_getIndex != acb->rqbuf_putIndex) {
+ unsigned int tail = acb->rqbuf_getIndex;
+ unsigned int head = acb->rqbuf_putIndex;
+ unsigned int cnt_to_end = CIRC_CNT_TO_END(head, tail, ARCMSR_MAX_QBUFFER);
+
+ allxfer_len = CIRC_CNT(head, tail, ARCMSR_MAX_QBUFFER);
+ if (allxfer_len > ARCMSR_API_DATA_BUFLEN)
+ allxfer_len = ARCMSR_API_DATA_BUFLEN;
+
+ if (allxfer_len <= cnt_to_end)
+ memcpy(ptmpQbuffer, acb->rqbuffer + tail, allxfer_len);
+ else {
+ memcpy(ptmpQbuffer, acb->rqbuffer + tail, cnt_to_end);
+ memcpy(ptmpQbuffer + cnt_to_end, acb->rqbuffer, allxfer_len - cnt_to_end);
+ }
+ acb->rqbuf_getIndex = (acb->rqbuf_getIndex + allxfer_len) % ARCMSR_MAX_QBUFFER;
+ }
+ if (acb->acb_flags & ACB_F_IOPDATA_OVERFLOW) {
+ struct QBUFFER __iomem *prbuffer;
+ acb->acb_flags &= ~ACB_F_IOPDATA_OVERFLOW;
+ prbuffer = arcmsr_get_iop_rqbuffer(acb);
+ if (arcmsr_Read_iop_rqbuffer_data(acb, prbuffer) == 0)
+ acb->acb_flags |= ACB_F_IOPDATA_OVERFLOW;
+ }
+ spin_unlock_irqrestore(&acb->rqbuffer_lock, flags);
+ return allxfer_len;
+}
+
+static ssize_t arcmsr_sysfs_iop_message_write(struct file *filp,
+ struct kobject *kobj,
+ struct bin_attribute *bin,
+ char *buf, loff_t off,
+ size_t count)
+{
+ struct device *dev = container_of(kobj,struct device,kobj);
+ struct Scsi_Host *host = class_to_shost(dev);
+ struct AdapterControlBlock *acb = (struct AdapterControlBlock *) host->hostdata;
+ int32_t user_len, cnt2end;
+ uint8_t *pQbuffer, *ptmpuserbuffer;
+ unsigned long flags;
+
+ if (!capable(CAP_SYS_ADMIN))
+ return -EACCES;
+ if (count > ARCMSR_API_DATA_BUFLEN)
+ return -EINVAL;
+ /* do message unit write. */
+ ptmpuserbuffer = (uint8_t *)buf;
+ user_len = (int32_t)count;
+ spin_lock_irqsave(&acb->wqbuffer_lock, flags);
+ if (acb->wqbuf_putIndex != acb->wqbuf_getIndex) {
+ arcmsr_write_ioctldata2iop(acb);
+ spin_unlock_irqrestore(&acb->wqbuffer_lock, flags);
+ return 0; /*need retry*/
+ } else {
+ pQbuffer = &acb->wqbuffer[acb->wqbuf_putIndex];
+ cnt2end = ARCMSR_MAX_QBUFFER - acb->wqbuf_putIndex;
+ if (user_len > cnt2end) {
+ memcpy(pQbuffer, ptmpuserbuffer, cnt2end);
+ ptmpuserbuffer += cnt2end;
+ user_len -= cnt2end;
+ acb->wqbuf_putIndex = 0;
+ pQbuffer = acb->wqbuffer;
+ }
+ memcpy(pQbuffer, ptmpuserbuffer, user_len);
+ acb->wqbuf_putIndex += user_len;
+ acb->wqbuf_putIndex %= ARCMSR_MAX_QBUFFER;
+ if (acb->acb_flags & ACB_F_MESSAGE_WQBUFFER_CLEARED) {
+ acb->acb_flags &=
+ ~ACB_F_MESSAGE_WQBUFFER_CLEARED;
+ arcmsr_write_ioctldata2iop(acb);
+ }
+ spin_unlock_irqrestore(&acb->wqbuffer_lock, flags);
+ return count;
+ }
+}
+
+static ssize_t arcmsr_sysfs_iop_message_clear(struct file *filp,
+ struct kobject *kobj,
+ struct bin_attribute *bin,
+ char *buf, loff_t off,
+ size_t count)
+{
+ struct device *dev = container_of(kobj,struct device,kobj);
+ struct Scsi_Host *host = class_to_shost(dev);
+ struct AdapterControlBlock *acb = (struct AdapterControlBlock *) host->hostdata;
+ uint8_t *pQbuffer;
+ unsigned long flags;
+
+ if (!capable(CAP_SYS_ADMIN))
+ return -EACCES;
+
+ arcmsr_clear_iop2drv_rqueue_buffer(acb);
+ acb->acb_flags |=
+ (ACB_F_MESSAGE_WQBUFFER_CLEARED
+ | ACB_F_MESSAGE_RQBUFFER_CLEARED
+ | ACB_F_MESSAGE_WQBUFFER_READED);
+ spin_lock_irqsave(&acb->rqbuffer_lock, flags);
+ acb->rqbuf_getIndex = 0;
+ acb->rqbuf_putIndex = 0;
+ spin_unlock_irqrestore(&acb->rqbuffer_lock, flags);
+ spin_lock_irqsave(&acb->wqbuffer_lock, flags);
+ acb->wqbuf_getIndex = 0;
+ acb->wqbuf_putIndex = 0;
+ spin_unlock_irqrestore(&acb->wqbuffer_lock, flags);
+ pQbuffer = acb->rqbuffer;
+ memset(pQbuffer, 0, sizeof (struct QBUFFER));
+ pQbuffer = acb->wqbuffer;
+ memset(pQbuffer, 0, sizeof (struct QBUFFER));
+ return 1;
+}
+
+static struct bin_attribute arcmsr_sysfs_message_read_attr = {
+ .attr = {
+ .name = "mu_read",
+ .mode = S_IRUSR ,
+ },
+ .size = ARCMSR_API_DATA_BUFLEN,
+ .read = arcmsr_sysfs_iop_message_read,
+};
+
+static struct bin_attribute arcmsr_sysfs_message_write_attr = {
+ .attr = {
+ .name = "mu_write",
+ .mode = S_IWUSR,
+ },
+ .size = ARCMSR_API_DATA_BUFLEN,
+ .write = arcmsr_sysfs_iop_message_write,
+};
+
+static struct bin_attribute arcmsr_sysfs_message_clear_attr = {
+ .attr = {
+ .name = "mu_clear",
+ .mode = S_IWUSR,
+ },
+ .size = 1,
+ .write = arcmsr_sysfs_iop_message_clear,
+};
+
+int arcmsr_alloc_sysfs_attr(struct AdapterControlBlock *acb)
+{
+ struct Scsi_Host *host = acb->host;
+ int error;
+
+ error = sysfs_create_bin_file(&host->shost_dev.kobj, &arcmsr_sysfs_message_read_attr);
+ if (error) {
+ printk(KERN_ERR "arcmsr: alloc sysfs mu_read failed\n");
+ goto error_bin_file_message_read;
+ }
+ error = sysfs_create_bin_file(&host->shost_dev.kobj, &arcmsr_sysfs_message_write_attr);
+ if (error) {
+ printk(KERN_ERR "arcmsr: alloc sysfs mu_write failed\n");
+ goto error_bin_file_message_write;
+ }
+ error = sysfs_create_bin_file(&host->shost_dev.kobj, &arcmsr_sysfs_message_clear_attr);
+ if (error) {
+ printk(KERN_ERR "arcmsr: alloc sysfs mu_clear failed\n");
+ goto error_bin_file_message_clear;
+ }
+ return 0;
+error_bin_file_message_clear:
+ sysfs_remove_bin_file(&host->shost_dev.kobj, &arcmsr_sysfs_message_write_attr);
+error_bin_file_message_write:
+ sysfs_remove_bin_file(&host->shost_dev.kobj, &arcmsr_sysfs_message_read_attr);
+error_bin_file_message_read:
+ return error;
+}
+
+void arcmsr_free_sysfs_attr(struct AdapterControlBlock *acb)
+{
+ struct Scsi_Host *host = acb->host;
+
+ sysfs_remove_bin_file(&host->shost_dev.kobj, &arcmsr_sysfs_message_clear_attr);
+ sysfs_remove_bin_file(&host->shost_dev.kobj, &arcmsr_sysfs_message_write_attr);
+ sysfs_remove_bin_file(&host->shost_dev.kobj, &arcmsr_sysfs_message_read_attr);
+}
+
+
+static ssize_t
+arcmsr_attr_host_driver_version(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ return snprintf(buf, PAGE_SIZE,
+ "%s\n",
+ ARCMSR_DRIVER_VERSION);
+}
+
+static ssize_t
+arcmsr_attr_host_driver_posted_cmd(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct Scsi_Host *host = class_to_shost(dev);
+ struct AdapterControlBlock *acb =
+ (struct AdapterControlBlock *) host->hostdata;
+ return snprintf(buf, PAGE_SIZE,
+ "%4d\n",
+ atomic_read(&acb->ccboutstandingcount));
+}
+
+static ssize_t
+arcmsr_attr_host_driver_reset(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct Scsi_Host *host = class_to_shost(dev);
+ struct AdapterControlBlock *acb =
+ (struct AdapterControlBlock *) host->hostdata;
+ return snprintf(buf, PAGE_SIZE,
+ "%4d\n",
+ acb->num_resets);
+}
+
+static ssize_t
+arcmsr_attr_host_driver_abort(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct Scsi_Host *host = class_to_shost(dev);
+ struct AdapterControlBlock *acb =
+ (struct AdapterControlBlock *) host->hostdata;
+ return snprintf(buf, PAGE_SIZE,
+ "%4d\n",
+ acb->num_aborts);
+}
+
+static ssize_t
+arcmsr_attr_host_fw_model(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct Scsi_Host *host = class_to_shost(dev);
+ struct AdapterControlBlock *acb =
+ (struct AdapterControlBlock *) host->hostdata;
+ return snprintf(buf, PAGE_SIZE,
+ "%s\n",
+ acb->firm_model);
+}
+
+static ssize_t
+arcmsr_attr_host_fw_version(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct Scsi_Host *host = class_to_shost(dev);
+ struct AdapterControlBlock *acb =
+ (struct AdapterControlBlock *) host->hostdata;
+
+ return snprintf(buf, PAGE_SIZE,
+ "%s\n",
+ acb->firm_version);
+}
+
+static ssize_t
+arcmsr_attr_host_fw_request_len(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct Scsi_Host *host = class_to_shost(dev);
+ struct AdapterControlBlock *acb =
+ (struct AdapterControlBlock *) host->hostdata;
+
+ return snprintf(buf, PAGE_SIZE,
+ "%4d\n",
+ acb->firm_request_len);
+}
+
+static ssize_t
+arcmsr_attr_host_fw_numbers_queue(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct Scsi_Host *host = class_to_shost(dev);
+ struct AdapterControlBlock *acb =
+ (struct AdapterControlBlock *) host->hostdata;
+
+ return snprintf(buf, PAGE_SIZE,
+ "%4d\n",
+ acb->firm_numbers_queue);
+}
+
+static ssize_t
+arcmsr_attr_host_fw_sdram_size(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct Scsi_Host *host = class_to_shost(dev);
+ struct AdapterControlBlock *acb =
+ (struct AdapterControlBlock *) host->hostdata;
+
+ return snprintf(buf, PAGE_SIZE,
+ "%4d\n",
+ acb->firm_sdram_size);
+}
+
+static ssize_t
+arcmsr_attr_host_fw_hd_channels(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct Scsi_Host *host = class_to_shost(dev);
+ struct AdapterControlBlock *acb =
+ (struct AdapterControlBlock *) host->hostdata;
+
+ return snprintf(buf, PAGE_SIZE,
+ "%4d\n",
+ acb->firm_hd_channels);
+}
+
+static DEVICE_ATTR(host_driver_version, S_IRUGO, arcmsr_attr_host_driver_version, NULL);
+static DEVICE_ATTR(host_driver_posted_cmd, S_IRUGO, arcmsr_attr_host_driver_posted_cmd, NULL);
+static DEVICE_ATTR(host_driver_reset, S_IRUGO, arcmsr_attr_host_driver_reset, NULL);
+static DEVICE_ATTR(host_driver_abort, S_IRUGO, arcmsr_attr_host_driver_abort, NULL);
+static DEVICE_ATTR(host_fw_model, S_IRUGO, arcmsr_attr_host_fw_model, NULL);
+static DEVICE_ATTR(host_fw_version, S_IRUGO, arcmsr_attr_host_fw_version, NULL);
+static DEVICE_ATTR(host_fw_request_len, S_IRUGO, arcmsr_attr_host_fw_request_len, NULL);
+static DEVICE_ATTR(host_fw_numbers_queue, S_IRUGO, arcmsr_attr_host_fw_numbers_queue, NULL);
+static DEVICE_ATTR(host_fw_sdram_size, S_IRUGO, arcmsr_attr_host_fw_sdram_size, NULL);
+static DEVICE_ATTR(host_fw_hd_channels, S_IRUGO, arcmsr_attr_host_fw_hd_channels, NULL);
+
+struct device_attribute *arcmsr_host_attrs[] = {
+ &dev_attr_host_driver_version,
+ &dev_attr_host_driver_posted_cmd,
+ &dev_attr_host_driver_reset,
+ &dev_attr_host_driver_abort,
+ &dev_attr_host_fw_model,
+ &dev_attr_host_fw_version,
+ &dev_attr_host_fw_request_len,
+ &dev_attr_host_fw_numbers_queue,
+ &dev_attr_host_fw_sdram_size,
+ &dev_attr_host_fw_hd_channels,
+ NULL,
+};
diff --git a/drivers/scsi/arcmsr/arcmsr_hba.c b/drivers/scsi/arcmsr/arcmsr_hba.c
new file mode 100644
index 000000000..914c39f9f
--- /dev/null
+++ b/drivers/scsi/arcmsr/arcmsr_hba.c
@@ -0,0 +1,4027 @@
+/*
+*******************************************************************************
+** O.S : Linux
+** FILE NAME : arcmsr_hba.c
+** BY : Nick Cheng, C.L. Huang
+** Description: SCSI RAID Device Driver for Areca RAID Controller
+*******************************************************************************
+** Copyright (C) 2002 - 2014, Areca Technology Corporation All rights reserved
+**
+** Web site: www.areca.com.tw
+** E-mail: support@areca.com.tw
+**
+** This program is free software; you can redistribute it and/or modify
+** it under the terms of the GNU General Public License version 2 as
+** published by the Free Software Foundation.
+** This program is distributed in the hope that it will be useful,
+** but WITHOUT ANY WARRANTY; without even the implied warranty of
+** MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+** GNU General Public License for more details.
+*******************************************************************************
+** Redistribution and use in source and binary forms, with or without
+** modification, are permitted provided that the following conditions
+** are met:
+** 1. Redistributions of source code must retain the above copyright
+** notice, this list of conditions and the following disclaimer.
+** 2. Redistributions in binary form must reproduce the above copyright
+** notice, this list of conditions and the following disclaimer in the
+** documentation and/or other materials provided with the distribution.
+** 3. The name of the author may not be used to endorse or promote products
+** derived from this software without specific prior written permission.
+**
+** THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+** IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+** OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+** IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+** INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES(INCLUDING,BUT
+** NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+** DATA, OR PROFITS; OR BUSINESS INTERRUPTION)HOWEVER CAUSED AND ON ANY
+** THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+** (INCLUDING NEGLIGENCE OR OTHERWISE)ARISING IN ANY WAY OUT OF THE USE OF
+** THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+*******************************************************************************
+** For history of changes, see Documentation/scsi/ChangeLog.arcmsr
+** Firmware Specification, see Documentation/scsi/arcmsr_spec.txt
+*******************************************************************************
+*/
+#include <linux/module.h>
+#include <linux/reboot.h>
+#include <linux/spinlock.h>
+#include <linux/pci_ids.h>
+#include <linux/interrupt.h>
+#include <linux/moduleparam.h>
+#include <linux/errno.h>
+#include <linux/types.h>
+#include <linux/delay.h>
+#include <linux/dma-mapping.h>
+#include <linux/timer.h>
+#include <linux/slab.h>
+#include <linux/pci.h>
+#include <linux/aer.h>
+#include <linux/circ_buf.h>
+#include <asm/dma.h>
+#include <asm/io.h>
+#include <asm/uaccess.h>
+#include <scsi/scsi_host.h>
+#include <scsi/scsi.h>
+#include <scsi/scsi_cmnd.h>
+#include <scsi/scsi_tcq.h>
+#include <scsi/scsi_device.h>
+#include <scsi/scsi_transport.h>
+#include <scsi/scsicam.h>
+#include "arcmsr.h"
+MODULE_AUTHOR("Nick Cheng, C.L. Huang <support@areca.com.tw>");
+MODULE_DESCRIPTION("Areca ARC11xx/12xx/16xx/188x SAS/SATA RAID Controller Driver");
+MODULE_LICENSE("Dual BSD/GPL");
+MODULE_VERSION(ARCMSR_DRIVER_VERSION);
+
+#define ARCMSR_SLEEPTIME 10
+#define ARCMSR_RETRYCOUNT 12
+
+static wait_queue_head_t wait_q;
+static int arcmsr_iop_message_xfer(struct AdapterControlBlock *acb,
+ struct scsi_cmnd *cmd);
+static int arcmsr_iop_confirm(struct AdapterControlBlock *acb);
+static int arcmsr_abort(struct scsi_cmnd *);
+static int arcmsr_bus_reset(struct scsi_cmnd *);
+static int arcmsr_bios_param(struct scsi_device *sdev,
+ struct block_device *bdev, sector_t capacity, int *info);
+static int arcmsr_queue_command(struct Scsi_Host *h, struct scsi_cmnd *cmd);
+static int arcmsr_probe(struct pci_dev *pdev,
+ const struct pci_device_id *id);
+static int arcmsr_suspend(struct pci_dev *pdev, pm_message_t state);
+static int arcmsr_resume(struct pci_dev *pdev);
+static void arcmsr_remove(struct pci_dev *pdev);
+static void arcmsr_shutdown(struct pci_dev *pdev);
+static void arcmsr_iop_init(struct AdapterControlBlock *acb);
+static void arcmsr_free_ccb_pool(struct AdapterControlBlock *acb);
+static u32 arcmsr_disable_outbound_ints(struct AdapterControlBlock *acb);
+static void arcmsr_enable_outbound_ints(struct AdapterControlBlock *acb,
+ u32 intmask_org);
+static void arcmsr_stop_adapter_bgrb(struct AdapterControlBlock *acb);
+static void arcmsr_hbaA_flush_cache(struct AdapterControlBlock *acb);
+static void arcmsr_hbaB_flush_cache(struct AdapterControlBlock *acb);
+static void arcmsr_request_device_map(unsigned long pacb);
+static void arcmsr_hbaA_request_device_map(struct AdapterControlBlock *acb);
+static void arcmsr_hbaB_request_device_map(struct AdapterControlBlock *acb);
+static void arcmsr_hbaC_request_device_map(struct AdapterControlBlock *acb);
+static void arcmsr_message_isr_bh_fn(struct work_struct *work);
+static bool arcmsr_get_firmware_spec(struct AdapterControlBlock *acb);
+static void arcmsr_start_adapter_bgrb(struct AdapterControlBlock *acb);
+static void arcmsr_hbaC_message_isr(struct AdapterControlBlock *pACB);
+static void arcmsr_hbaD_message_isr(struct AdapterControlBlock *acb);
+static void arcmsr_hardware_reset(struct AdapterControlBlock *acb);
+static const char *arcmsr_info(struct Scsi_Host *);
+static irqreturn_t arcmsr_interrupt(struct AdapterControlBlock *acb);
+static void arcmsr_free_irq(struct pci_dev *, struct AdapterControlBlock *);
+static int arcmsr_adjust_disk_queue_depth(struct scsi_device *sdev, int queue_depth)
+{
+ if (queue_depth > ARCMSR_MAX_CMD_PERLUN)
+ queue_depth = ARCMSR_MAX_CMD_PERLUN;
+ return scsi_change_queue_depth(sdev, queue_depth);
+}
+
+static struct scsi_host_template arcmsr_scsi_host_template = {
+ .module = THIS_MODULE,
+ .name = "Areca SAS/SATA RAID driver",
+ .info = arcmsr_info,
+ .queuecommand = arcmsr_queue_command,
+ .eh_abort_handler = arcmsr_abort,
+ .eh_bus_reset_handler = arcmsr_bus_reset,
+ .bios_param = arcmsr_bios_param,
+ .change_queue_depth = arcmsr_adjust_disk_queue_depth,
+ .can_queue = ARCMSR_MAX_OUTSTANDING_CMD,
+ .this_id = ARCMSR_SCSI_INITIATOR_ID,
+ .sg_tablesize = ARCMSR_DEFAULT_SG_ENTRIES,
+ .max_sectors = ARCMSR_MAX_XFER_SECTORS_C,
+ .cmd_per_lun = ARCMSR_MAX_CMD_PERLUN,
+ .use_clustering = ENABLE_CLUSTERING,
+ .shost_attrs = arcmsr_host_attrs,
+ .no_write_same = 1,
+};
+
+static struct pci_device_id arcmsr_device_id_table[] = {
+ {PCI_DEVICE(PCI_VENDOR_ID_ARECA, PCI_DEVICE_ID_ARECA_1110),
+ .driver_data = ACB_ADAPTER_TYPE_A},
+ {PCI_DEVICE(PCI_VENDOR_ID_ARECA, PCI_DEVICE_ID_ARECA_1120),
+ .driver_data = ACB_ADAPTER_TYPE_A},
+ {PCI_DEVICE(PCI_VENDOR_ID_ARECA, PCI_DEVICE_ID_ARECA_1130),
+ .driver_data = ACB_ADAPTER_TYPE_A},
+ {PCI_DEVICE(PCI_VENDOR_ID_ARECA, PCI_DEVICE_ID_ARECA_1160),
+ .driver_data = ACB_ADAPTER_TYPE_A},
+ {PCI_DEVICE(PCI_VENDOR_ID_ARECA, PCI_DEVICE_ID_ARECA_1170),
+ .driver_data = ACB_ADAPTER_TYPE_A},
+ {PCI_DEVICE(PCI_VENDOR_ID_ARECA, PCI_DEVICE_ID_ARECA_1200),
+ .driver_data = ACB_ADAPTER_TYPE_B},
+ {PCI_DEVICE(PCI_VENDOR_ID_ARECA, PCI_DEVICE_ID_ARECA_1201),
+ .driver_data = ACB_ADAPTER_TYPE_B},
+ {PCI_DEVICE(PCI_VENDOR_ID_ARECA, PCI_DEVICE_ID_ARECA_1202),
+ .driver_data = ACB_ADAPTER_TYPE_B},
+ {PCI_DEVICE(PCI_VENDOR_ID_ARECA, PCI_DEVICE_ID_ARECA_1210),
+ .driver_data = ACB_ADAPTER_TYPE_A},
+ {PCI_DEVICE(PCI_VENDOR_ID_ARECA, PCI_DEVICE_ID_ARECA_1214),
+ .driver_data = ACB_ADAPTER_TYPE_D},
+ {PCI_DEVICE(PCI_VENDOR_ID_ARECA, PCI_DEVICE_ID_ARECA_1220),
+ .driver_data = ACB_ADAPTER_TYPE_A},
+ {PCI_DEVICE(PCI_VENDOR_ID_ARECA, PCI_DEVICE_ID_ARECA_1230),
+ .driver_data = ACB_ADAPTER_TYPE_A},
+ {PCI_DEVICE(PCI_VENDOR_ID_ARECA, PCI_DEVICE_ID_ARECA_1260),
+ .driver_data = ACB_ADAPTER_TYPE_A},
+ {PCI_DEVICE(PCI_VENDOR_ID_ARECA, PCI_DEVICE_ID_ARECA_1270),
+ .driver_data = ACB_ADAPTER_TYPE_A},
+ {PCI_DEVICE(PCI_VENDOR_ID_ARECA, PCI_DEVICE_ID_ARECA_1280),
+ .driver_data = ACB_ADAPTER_TYPE_A},
+ {PCI_DEVICE(PCI_VENDOR_ID_ARECA, PCI_DEVICE_ID_ARECA_1380),
+ .driver_data = ACB_ADAPTER_TYPE_A},
+ {PCI_DEVICE(PCI_VENDOR_ID_ARECA, PCI_DEVICE_ID_ARECA_1381),
+ .driver_data = ACB_ADAPTER_TYPE_A},
+ {PCI_DEVICE(PCI_VENDOR_ID_ARECA, PCI_DEVICE_ID_ARECA_1680),
+ .driver_data = ACB_ADAPTER_TYPE_A},
+ {PCI_DEVICE(PCI_VENDOR_ID_ARECA, PCI_DEVICE_ID_ARECA_1681),
+ .driver_data = ACB_ADAPTER_TYPE_A},
+ {PCI_DEVICE(PCI_VENDOR_ID_ARECA, PCI_DEVICE_ID_ARECA_1880),
+ .driver_data = ACB_ADAPTER_TYPE_C},
+ {0, 0}, /* Terminating entry */
+};
+MODULE_DEVICE_TABLE(pci, arcmsr_device_id_table);
+
+static struct pci_driver arcmsr_pci_driver = {
+ .name = "arcmsr",
+ .id_table = arcmsr_device_id_table,
+ .probe = arcmsr_probe,
+ .remove = arcmsr_remove,
+ .suspend = arcmsr_suspend,
+ .resume = arcmsr_resume,
+ .shutdown = arcmsr_shutdown,
+};
+/*
+****************************************************************************
+****************************************************************************
+*/
+
+static void arcmsr_free_mu(struct AdapterControlBlock *acb)
+{
+ switch (acb->adapter_type) {
+ case ACB_ADAPTER_TYPE_B:
+ case ACB_ADAPTER_TYPE_D: {
+ dma_free_coherent(&acb->pdev->dev, acb->roundup_ccbsize,
+ acb->dma_coherent2, acb->dma_coherent_handle2);
+ break;
+ }
+ }
+}
+
+static bool arcmsr_remap_pciregion(struct AdapterControlBlock *acb)
+{
+ struct pci_dev *pdev = acb->pdev;
+ switch (acb->adapter_type){
+ case ACB_ADAPTER_TYPE_A:{
+ acb->pmuA = ioremap(pci_resource_start(pdev,0), pci_resource_len(pdev,0));
+ if (!acb->pmuA) {
+ printk(KERN_NOTICE "arcmsr%d: memory mapping region fail \n", acb->host->host_no);
+ return false;
+ }
+ break;
+ }
+ case ACB_ADAPTER_TYPE_B:{
+ void __iomem *mem_base0, *mem_base1;
+ mem_base0 = ioremap(pci_resource_start(pdev, 0), pci_resource_len(pdev, 0));
+ if (!mem_base0) {
+ printk(KERN_NOTICE "arcmsr%d: memory mapping region fail \n", acb->host->host_no);
+ return false;
+ }
+ mem_base1 = ioremap(pci_resource_start(pdev, 2), pci_resource_len(pdev, 2));
+ if (!mem_base1) {
+ iounmap(mem_base0);
+ printk(KERN_NOTICE "arcmsr%d: memory mapping region fail \n", acb->host->host_no);
+ return false;
+ }
+ acb->mem_base0 = mem_base0;
+ acb->mem_base1 = mem_base1;
+ break;
+ }
+ case ACB_ADAPTER_TYPE_C:{
+ acb->pmuC = ioremap_nocache(pci_resource_start(pdev, 1), pci_resource_len(pdev, 1));
+ if (!acb->pmuC) {
+ printk(KERN_NOTICE "arcmsr%d: memory mapping region fail \n", acb->host->host_no);
+ return false;
+ }
+ if (readl(&acb->pmuC->outbound_doorbell) & ARCMSR_HBCMU_IOP2DRV_MESSAGE_CMD_DONE) {
+ writel(ARCMSR_HBCMU_IOP2DRV_MESSAGE_CMD_DONE_DOORBELL_CLEAR, &acb->pmuC->outbound_doorbell_clear);/*clear interrupt*/
+ return true;
+ }
+ break;
+ }
+ case ACB_ADAPTER_TYPE_D: {
+ void __iomem *mem_base0;
+ unsigned long addr, range, flags;
+
+ addr = (unsigned long)pci_resource_start(pdev, 0);
+ range = pci_resource_len(pdev, 0);
+ flags = pci_resource_flags(pdev, 0);
+ if (flags & IORESOURCE_CACHEABLE)
+ mem_base0 = ioremap(addr, range);
+ else
+ mem_base0 = ioremap_nocache(addr, range);
+ if (!mem_base0) {
+ pr_notice("arcmsr%d: memory mapping region fail\n",
+ acb->host->host_no);
+ return false;
+ }
+ acb->mem_base0 = mem_base0;
+ break;
+ }
+ }
+ return true;
+}
+
+static void arcmsr_unmap_pciregion(struct AdapterControlBlock *acb)
+{
+ switch (acb->adapter_type) {
+ case ACB_ADAPTER_TYPE_A:{
+ iounmap(acb->pmuA);
+ }
+ break;
+ case ACB_ADAPTER_TYPE_B:{
+ iounmap(acb->mem_base0);
+ iounmap(acb->mem_base1);
+ }
+
+ break;
+ case ACB_ADAPTER_TYPE_C:{
+ iounmap(acb->pmuC);
+ }
+ break;
+ case ACB_ADAPTER_TYPE_D:
+ iounmap(acb->mem_base0);
+ break;
+ }
+}
+
+static irqreturn_t arcmsr_do_interrupt(int irq, void *dev_id)
+{
+ irqreturn_t handle_state;
+ struct AdapterControlBlock *acb = dev_id;
+
+ handle_state = arcmsr_interrupt(acb);
+ return handle_state;
+}
+
+static int arcmsr_bios_param(struct scsi_device *sdev,
+ struct block_device *bdev, sector_t capacity, int *geom)
+{
+ int ret, heads, sectors, cylinders, total_capacity;
+ unsigned char *buffer;/* return copy of block device's partition table */
+
+ buffer = scsi_bios_ptable(bdev);
+ if (buffer) {
+ ret = scsi_partsize(buffer, capacity, &geom[2], &geom[0], &geom[1]);
+ kfree(buffer);
+ if (ret != -1)
+ return ret;
+ }
+ total_capacity = capacity;
+ heads = 64;
+ sectors = 32;
+ cylinders = total_capacity / (heads * sectors);
+ if (cylinders > 1024) {
+ heads = 255;
+ sectors = 63;
+ cylinders = total_capacity / (heads * sectors);
+ }
+ geom[0] = heads;
+ geom[1] = sectors;
+ geom[2] = cylinders;
+ return 0;
+}
+
+static uint8_t arcmsr_hbaA_wait_msgint_ready(struct AdapterControlBlock *acb)
+{
+ struct MessageUnit_A __iomem *reg = acb->pmuA;
+ int i;
+
+ for (i = 0; i < 2000; i++) {
+ if (readl(&reg->outbound_intstatus) &
+ ARCMSR_MU_OUTBOUND_MESSAGE0_INT) {
+ writel(ARCMSR_MU_OUTBOUND_MESSAGE0_INT,
+ &reg->outbound_intstatus);
+ return true;
+ }
+ msleep(10);
+ } /* max 20 seconds */
+
+ return false;
+}
+
+static uint8_t arcmsr_hbaB_wait_msgint_ready(struct AdapterControlBlock *acb)
+{
+ struct MessageUnit_B *reg = acb->pmuB;
+ int i;
+
+ for (i = 0; i < 2000; i++) {
+ if (readl(reg->iop2drv_doorbell)
+ & ARCMSR_IOP2DRV_MESSAGE_CMD_DONE) {
+ writel(ARCMSR_MESSAGE_INT_CLEAR_PATTERN,
+ reg->iop2drv_doorbell);
+ writel(ARCMSR_DRV2IOP_END_OF_INTERRUPT,
+ reg->drv2iop_doorbell);
+ return true;
+ }
+ msleep(10);
+ } /* max 20 seconds */
+
+ return false;
+}
+
+static uint8_t arcmsr_hbaC_wait_msgint_ready(struct AdapterControlBlock *pACB)
+{
+ struct MessageUnit_C __iomem *phbcmu = pACB->pmuC;
+ int i;
+
+ for (i = 0; i < 2000; i++) {
+ if (readl(&phbcmu->outbound_doorbell)
+ & ARCMSR_HBCMU_IOP2DRV_MESSAGE_CMD_DONE) {
+ writel(ARCMSR_HBCMU_IOP2DRV_MESSAGE_CMD_DONE_DOORBELL_CLEAR,
+ &phbcmu->outbound_doorbell_clear); /*clear interrupt*/
+ return true;
+ }
+ msleep(10);
+ } /* max 20 seconds */
+
+ return false;
+}
+
+static bool arcmsr_hbaD_wait_msgint_ready(struct AdapterControlBlock *pACB)
+{
+ struct MessageUnit_D *reg = pACB->pmuD;
+ int i;
+
+ for (i = 0; i < 2000; i++) {
+ if (readl(reg->outbound_doorbell)
+ & ARCMSR_ARC1214_IOP2DRV_MESSAGE_CMD_DONE) {
+ writel(ARCMSR_ARC1214_IOP2DRV_MESSAGE_CMD_DONE,
+ reg->outbound_doorbell);
+ return true;
+ }
+ msleep(10);
+ } /* max 20 seconds */
+ return false;
+}
+
+static void arcmsr_hbaA_flush_cache(struct AdapterControlBlock *acb)
+{
+ struct MessageUnit_A __iomem *reg = acb->pmuA;
+ int retry_count = 30;
+ writel(ARCMSR_INBOUND_MESG0_FLUSH_CACHE, &reg->inbound_msgaddr0);
+ do {
+ if (arcmsr_hbaA_wait_msgint_ready(acb))
+ break;
+ else {
+ retry_count--;
+ printk(KERN_NOTICE "arcmsr%d: wait 'flush adapter cache' \
+ timeout, retry count down = %d \n", acb->host->host_no, retry_count);
+ }
+ } while (retry_count != 0);
+}
+
+static void arcmsr_hbaB_flush_cache(struct AdapterControlBlock *acb)
+{
+ struct MessageUnit_B *reg = acb->pmuB;
+ int retry_count = 30;
+ writel(ARCMSR_MESSAGE_FLUSH_CACHE, reg->drv2iop_doorbell);
+ do {
+ if (arcmsr_hbaB_wait_msgint_ready(acb))
+ break;
+ else {
+ retry_count--;
+ printk(KERN_NOTICE "arcmsr%d: wait 'flush adapter cache' \
+ timeout,retry count down = %d \n", acb->host->host_no, retry_count);
+ }
+ } while (retry_count != 0);
+}
+
+static void arcmsr_hbaC_flush_cache(struct AdapterControlBlock *pACB)
+{
+ struct MessageUnit_C __iomem *reg = pACB->pmuC;
+ int retry_count = 30;/* enlarge wait flush adapter cache time: 10 minute */
+ writel(ARCMSR_INBOUND_MESG0_FLUSH_CACHE, &reg->inbound_msgaddr0);
+ writel(ARCMSR_HBCMU_DRV2IOP_MESSAGE_CMD_DONE, &reg->inbound_doorbell);
+ do {
+ if (arcmsr_hbaC_wait_msgint_ready(pACB)) {
+ break;
+ } else {
+ retry_count--;
+ printk(KERN_NOTICE "arcmsr%d: wait 'flush adapter cache' \
+ timeout,retry count down = %d \n", pACB->host->host_no, retry_count);
+ }
+ } while (retry_count != 0);
+ return;
+}
+
+static void arcmsr_hbaD_flush_cache(struct AdapterControlBlock *pACB)
+{
+ int retry_count = 15;
+ struct MessageUnit_D *reg = pACB->pmuD;
+
+ writel(ARCMSR_INBOUND_MESG0_FLUSH_CACHE, reg->inbound_msgaddr0);
+ do {
+ if (arcmsr_hbaD_wait_msgint_ready(pACB))
+ break;
+
+ retry_count--;
+ pr_notice("arcmsr%d: wait 'flush adapter "
+ "cache' timeout, retry count down = %d\n",
+ pACB->host->host_no, retry_count);
+ } while (retry_count != 0);
+}
+
+static void arcmsr_flush_adapter_cache(struct AdapterControlBlock *acb)
+{
+ switch (acb->adapter_type) {
+
+ case ACB_ADAPTER_TYPE_A: {
+ arcmsr_hbaA_flush_cache(acb);
+ }
+ break;
+
+ case ACB_ADAPTER_TYPE_B: {
+ arcmsr_hbaB_flush_cache(acb);
+ }
+ break;
+ case ACB_ADAPTER_TYPE_C: {
+ arcmsr_hbaC_flush_cache(acb);
+ }
+ break;
+ case ACB_ADAPTER_TYPE_D:
+ arcmsr_hbaD_flush_cache(acb);
+ break;
+ }
+}
+
+static int arcmsr_alloc_ccb_pool(struct AdapterControlBlock *acb)
+{
+ struct pci_dev *pdev = acb->pdev;
+ void *dma_coherent;
+ dma_addr_t dma_coherent_handle;
+ struct CommandControlBlock *ccb_tmp;
+ int i = 0, j = 0;
+ dma_addr_t cdb_phyaddr;
+ unsigned long roundup_ccbsize;
+ unsigned long max_xfer_len;
+ unsigned long max_sg_entrys;
+ uint32_t firm_config_version;
+
+ for (i = 0; i < ARCMSR_MAX_TARGETID; i++)
+ for (j = 0; j < ARCMSR_MAX_TARGETLUN; j++)
+ acb->devstate[i][j] = ARECA_RAID_GONE;
+
+ max_xfer_len = ARCMSR_MAX_XFER_LEN;
+ max_sg_entrys = ARCMSR_DEFAULT_SG_ENTRIES;
+ firm_config_version = acb->firm_cfg_version;
+ if((firm_config_version & 0xFF) >= 3){
+ max_xfer_len = (ARCMSR_CDB_SG_PAGE_LENGTH << ((firm_config_version >> 8) & 0xFF)) * 1024;/* max 4M byte */
+ max_sg_entrys = (max_xfer_len/4096);
+ }
+ acb->host->max_sectors = max_xfer_len/512;
+ acb->host->sg_tablesize = max_sg_entrys;
+ roundup_ccbsize = roundup(sizeof(struct CommandControlBlock) + (max_sg_entrys - 1) * sizeof(struct SG64ENTRY), 32);
+ acb->uncache_size = roundup_ccbsize * ARCMSR_MAX_FREECCB_NUM;
+ dma_coherent = dma_alloc_coherent(&pdev->dev, acb->uncache_size, &dma_coherent_handle, GFP_KERNEL);
+ if(!dma_coherent){
+ printk(KERN_NOTICE "arcmsr%d: dma_alloc_coherent got error\n", acb->host->host_no);
+ return -ENOMEM;
+ }
+ acb->dma_coherent = dma_coherent;
+ acb->dma_coherent_handle = dma_coherent_handle;
+ memset(dma_coherent, 0, acb->uncache_size);
+ ccb_tmp = dma_coherent;
+ acb->vir2phy_offset = (unsigned long)dma_coherent - (unsigned long)dma_coherent_handle;
+ for(i = 0; i < ARCMSR_MAX_FREECCB_NUM; i++){
+ cdb_phyaddr = dma_coherent_handle + offsetof(struct CommandControlBlock, arcmsr_cdb);
+ switch (acb->adapter_type) {
+ case ACB_ADAPTER_TYPE_A:
+ case ACB_ADAPTER_TYPE_B:
+ ccb_tmp->cdb_phyaddr = cdb_phyaddr >> 5;
+ break;
+ case ACB_ADAPTER_TYPE_C:
+ case ACB_ADAPTER_TYPE_D:
+ ccb_tmp->cdb_phyaddr = cdb_phyaddr;
+ break;
+ }
+ acb->pccb_pool[i] = ccb_tmp;
+ ccb_tmp->acb = acb;
+ INIT_LIST_HEAD(&ccb_tmp->list);
+ list_add_tail(&ccb_tmp->list, &acb->ccb_free_list);
+ ccb_tmp = (struct CommandControlBlock *)((unsigned long)ccb_tmp + roundup_ccbsize);
+ dma_coherent_handle = dma_coherent_handle + roundup_ccbsize;
+ }
+ return 0;
+}
+
+static void arcmsr_message_isr_bh_fn(struct work_struct *work)
+{
+ struct AdapterControlBlock *acb = container_of(work,
+ struct AdapterControlBlock, arcmsr_do_message_isr_bh);
+ char *acb_dev_map = (char *)acb->device_map;
+ uint32_t __iomem *signature = NULL;
+ char __iomem *devicemap = NULL;
+ int target, lun;
+ struct scsi_device *psdev;
+ char diff, temp;
+
+ switch (acb->adapter_type) {
+ case ACB_ADAPTER_TYPE_A: {
+ struct MessageUnit_A __iomem *reg = acb->pmuA;
+
+ signature = (uint32_t __iomem *)(&reg->message_rwbuffer[0]);
+ devicemap = (char __iomem *)(&reg->message_rwbuffer[21]);
+ break;
+ }
+ case ACB_ADAPTER_TYPE_B: {
+ struct MessageUnit_B *reg = acb->pmuB;
+
+ signature = (uint32_t __iomem *)(&reg->message_rwbuffer[0]);
+ devicemap = (char __iomem *)(&reg->message_rwbuffer[21]);
+ break;
+ }
+ case ACB_ADAPTER_TYPE_C: {
+ struct MessageUnit_C __iomem *reg = acb->pmuC;
+
+ signature = (uint32_t __iomem *)(&reg->msgcode_rwbuffer[0]);
+ devicemap = (char __iomem *)(&reg->msgcode_rwbuffer[21]);
+ break;
+ }
+ case ACB_ADAPTER_TYPE_D: {
+ struct MessageUnit_D *reg = acb->pmuD;
+
+ signature = (uint32_t __iomem *)(&reg->msgcode_rwbuffer[0]);
+ devicemap = (char __iomem *)(&reg->msgcode_rwbuffer[21]);
+ break;
+ }
+ }
+ atomic_inc(&acb->rq_map_token);
+ if (readl(signature) != ARCMSR_SIGNATURE_GET_CONFIG)
+ return;
+ for (target = 0; target < ARCMSR_MAX_TARGETID - 1;
+ target++) {
+ temp = readb(devicemap);
+ diff = (*acb_dev_map) ^ temp;
+ if (diff != 0) {
+ *acb_dev_map = temp;
+ for (lun = 0; lun < ARCMSR_MAX_TARGETLUN;
+ lun++) {
+ if ((diff & 0x01) == 1 &&
+ (temp & 0x01) == 1) {
+ scsi_add_device(acb->host,
+ 0, target, lun);
+ } else if ((diff & 0x01) == 1
+ && (temp & 0x01) == 0) {
+ psdev = scsi_device_lookup(acb->host,
+ 0, target, lun);
+ if (psdev != NULL) {
+ scsi_remove_device(psdev);
+ scsi_device_put(psdev);
+ }
+ }
+ temp >>= 1;
+ diff >>= 1;
+ }
+ }
+ devicemap++;
+ acb_dev_map++;
+ }
+}
+
+static int
+arcmsr_request_irq(struct pci_dev *pdev, struct AdapterControlBlock *acb)
+{
+ int i, j, r;
+ struct msix_entry entries[ARCMST_NUM_MSIX_VECTORS];
+
+ for (i = 0; i < ARCMST_NUM_MSIX_VECTORS; i++)
+ entries[i].entry = i;
+ r = pci_enable_msix_range(pdev, entries, 1, ARCMST_NUM_MSIX_VECTORS);
+ if (r < 0)
+ goto msi_int;
+ acb->msix_vector_count = r;
+ for (i = 0; i < r; i++) {
+ if (request_irq(entries[i].vector,
+ arcmsr_do_interrupt, 0, "arcmsr", acb)) {
+ pr_warn("arcmsr%d: request_irq =%d failed!\n",
+ acb->host->host_no, entries[i].vector);
+ for (j = 0 ; j < i ; j++)
+ free_irq(entries[j].vector, acb);
+ pci_disable_msix(pdev);
+ goto msi_int;
+ }
+ acb->entries[i] = entries[i];
+ }
+ acb->acb_flags |= ACB_F_MSIX_ENABLED;
+ pr_info("arcmsr%d: msi-x enabled\n", acb->host->host_no);
+ return SUCCESS;
+msi_int:
+ if (pci_enable_msi_exact(pdev, 1) < 0)
+ goto legacy_int;
+ if (request_irq(pdev->irq, arcmsr_do_interrupt,
+ IRQF_SHARED, "arcmsr", acb)) {
+ pr_warn("arcmsr%d: request_irq =%d failed!\n",
+ acb->host->host_no, pdev->irq);
+ pci_disable_msi(pdev);
+ goto legacy_int;
+ }
+ acb->acb_flags |= ACB_F_MSI_ENABLED;
+ pr_info("arcmsr%d: msi enabled\n", acb->host->host_no);
+ return SUCCESS;
+legacy_int:
+ if (request_irq(pdev->irq, arcmsr_do_interrupt,
+ IRQF_SHARED, "arcmsr", acb)) {
+ pr_warn("arcmsr%d: request_irq = %d failed!\n",
+ acb->host->host_no, pdev->irq);
+ return FAILED;
+ }
+ return SUCCESS;
+}
+
+static int arcmsr_probe(struct pci_dev *pdev, const struct pci_device_id *id)
+{
+ struct Scsi_Host *host;
+ struct AdapterControlBlock *acb;
+ uint8_t bus,dev_fun;
+ int error;
+ error = pci_enable_device(pdev);
+ if(error){
+ return -ENODEV;
+ }
+ host = scsi_host_alloc(&arcmsr_scsi_host_template, sizeof(struct AdapterControlBlock));
+ if(!host){
+ goto pci_disable_dev;
+ }
+ error = pci_set_dma_mask(pdev, DMA_BIT_MASK(64));
+ if(error){
+ error = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
+ if(error){
+ printk(KERN_WARNING
+ "scsi%d: No suitable DMA mask available\n",
+ host->host_no);
+ goto scsi_host_release;
+ }
+ }
+ init_waitqueue_head(&wait_q);
+ bus = pdev->bus->number;
+ dev_fun = pdev->devfn;
+ acb = (struct AdapterControlBlock *) host->hostdata;
+ memset(acb,0,sizeof(struct AdapterControlBlock));
+ acb->pdev = pdev;
+ acb->host = host;
+ host->max_lun = ARCMSR_MAX_TARGETLUN;
+ host->max_id = ARCMSR_MAX_TARGETID; /*16:8*/
+ host->max_cmd_len = 16; /*this is issue of 64bit LBA ,over 2T byte*/
+ host->can_queue = ARCMSR_MAX_OUTSTANDING_CMD;
+ host->cmd_per_lun = ARCMSR_MAX_CMD_PERLUN;
+ host->this_id = ARCMSR_SCSI_INITIATOR_ID;
+ host->unique_id = (bus << 8) | dev_fun;
+ pci_set_drvdata(pdev, host);
+ pci_set_master(pdev);
+ error = pci_request_regions(pdev, "arcmsr");
+ if(error){
+ goto scsi_host_release;
+ }
+ spin_lock_init(&acb->eh_lock);
+ spin_lock_init(&acb->ccblist_lock);
+ spin_lock_init(&acb->postq_lock);
+ spin_lock_init(&acb->doneq_lock);
+ spin_lock_init(&acb->rqbuffer_lock);
+ spin_lock_init(&acb->wqbuffer_lock);
+ acb->acb_flags |= (ACB_F_MESSAGE_WQBUFFER_CLEARED |
+ ACB_F_MESSAGE_RQBUFFER_CLEARED |
+ ACB_F_MESSAGE_WQBUFFER_READED);
+ acb->acb_flags &= ~ACB_F_SCSISTOPADAPTER;
+ INIT_LIST_HEAD(&acb->ccb_free_list);
+ acb->adapter_type = id->driver_data;
+ error = arcmsr_remap_pciregion(acb);
+ if(!error){
+ goto pci_release_regs;
+ }
+ error = arcmsr_get_firmware_spec(acb);
+ if(!error){
+ goto unmap_pci_region;
+ }
+ error = arcmsr_alloc_ccb_pool(acb);
+ if(error){
+ goto free_hbb_mu;
+ }
+ error = scsi_add_host(host, &pdev->dev);
+ if(error){
+ goto free_ccb_pool;
+ }
+ if (arcmsr_request_irq(pdev, acb) == FAILED)
+ goto scsi_host_remove;
+ arcmsr_iop_init(acb);
+ INIT_WORK(&acb->arcmsr_do_message_isr_bh, arcmsr_message_isr_bh_fn);
+ atomic_set(&acb->rq_map_token, 16);
+ atomic_set(&acb->ante_token_value, 16);
+ acb->fw_flag = FW_NORMAL;
+ init_timer(&acb->eternal_timer);
+ acb->eternal_timer.expires = jiffies + msecs_to_jiffies(6 * HZ);
+ acb->eternal_timer.data = (unsigned long) acb;
+ acb->eternal_timer.function = &arcmsr_request_device_map;
+ add_timer(&acb->eternal_timer);
+ if(arcmsr_alloc_sysfs_attr(acb))
+ goto out_free_sysfs;
+ scsi_scan_host(host);
+ return 0;
+out_free_sysfs:
+ del_timer_sync(&acb->eternal_timer);
+ flush_work(&acb->arcmsr_do_message_isr_bh);
+ arcmsr_stop_adapter_bgrb(acb);
+ arcmsr_flush_adapter_cache(acb);
+ arcmsr_free_irq(pdev, acb);
+scsi_host_remove:
+ scsi_remove_host(host);
+free_ccb_pool:
+ arcmsr_free_ccb_pool(acb);
+free_hbb_mu:
+ arcmsr_free_mu(acb);
+unmap_pci_region:
+ arcmsr_unmap_pciregion(acb);
+pci_release_regs:
+ pci_release_regions(pdev);
+scsi_host_release:
+ scsi_host_put(host);
+pci_disable_dev:
+ pci_disable_device(pdev);
+ return -ENODEV;
+}
+
+static void arcmsr_free_irq(struct pci_dev *pdev,
+ struct AdapterControlBlock *acb)
+{
+ int i;
+
+ if (acb->acb_flags & ACB_F_MSI_ENABLED) {
+ free_irq(pdev->irq, acb);
+ pci_disable_msi(pdev);
+ } else if (acb->acb_flags & ACB_F_MSIX_ENABLED) {
+ for (i = 0; i < acb->msix_vector_count; i++)
+ free_irq(acb->entries[i].vector, acb);
+ pci_disable_msix(pdev);
+ } else
+ free_irq(pdev->irq, acb);
+}
+
+static int arcmsr_suspend(struct pci_dev *pdev, pm_message_t state)
+{
+ uint32_t intmask_org;
+ struct Scsi_Host *host = pci_get_drvdata(pdev);
+ struct AdapterControlBlock *acb =
+ (struct AdapterControlBlock *)host->hostdata;
+
+ intmask_org = arcmsr_disable_outbound_ints(acb);
+ arcmsr_free_irq(pdev, acb);
+ del_timer_sync(&acb->eternal_timer);
+ flush_work(&acb->arcmsr_do_message_isr_bh);
+ arcmsr_stop_adapter_bgrb(acb);
+ arcmsr_flush_adapter_cache(acb);
+ pci_set_drvdata(pdev, host);
+ pci_save_state(pdev);
+ pci_disable_device(pdev);
+ pci_set_power_state(pdev, pci_choose_state(pdev, state));
+ return 0;
+}
+
+static int arcmsr_resume(struct pci_dev *pdev)
+{
+ int error;
+ struct Scsi_Host *host = pci_get_drvdata(pdev);
+ struct AdapterControlBlock *acb =
+ (struct AdapterControlBlock *)host->hostdata;
+
+ pci_set_power_state(pdev, PCI_D0);
+ pci_enable_wake(pdev, PCI_D0, 0);
+ pci_restore_state(pdev);
+ if (pci_enable_device(pdev)) {
+ pr_warn("%s: pci_enable_device error\n", __func__);
+ return -ENODEV;
+ }
+ error = pci_set_dma_mask(pdev, DMA_BIT_MASK(64));
+ if (error) {
+ error = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
+ if (error) {
+ pr_warn("scsi%d: No suitable DMA mask available\n",
+ host->host_no);
+ goto controller_unregister;
+ }
+ }
+ pci_set_master(pdev);
+ if (arcmsr_request_irq(pdev, acb) == FAILED)
+ goto controller_stop;
+ arcmsr_iop_init(acb);
+ INIT_WORK(&acb->arcmsr_do_message_isr_bh, arcmsr_message_isr_bh_fn);
+ atomic_set(&acb->rq_map_token, 16);
+ atomic_set(&acb->ante_token_value, 16);
+ acb->fw_flag = FW_NORMAL;
+ init_timer(&acb->eternal_timer);
+ acb->eternal_timer.expires = jiffies + msecs_to_jiffies(6 * HZ);
+ acb->eternal_timer.data = (unsigned long) acb;
+ acb->eternal_timer.function = &arcmsr_request_device_map;
+ add_timer(&acb->eternal_timer);
+ return 0;
+controller_stop:
+ arcmsr_stop_adapter_bgrb(acb);
+ arcmsr_flush_adapter_cache(acb);
+controller_unregister:
+ scsi_remove_host(host);
+ arcmsr_free_ccb_pool(acb);
+ arcmsr_unmap_pciregion(acb);
+ pci_release_regions(pdev);
+ scsi_host_put(host);
+ pci_disable_device(pdev);
+ return -ENODEV;
+}
+
+static uint8_t arcmsr_hbaA_abort_allcmd(struct AdapterControlBlock *acb)
+{
+ struct MessageUnit_A __iomem *reg = acb->pmuA;
+ writel(ARCMSR_INBOUND_MESG0_ABORT_CMD, &reg->inbound_msgaddr0);
+ if (!arcmsr_hbaA_wait_msgint_ready(acb)) {
+ printk(KERN_NOTICE
+ "arcmsr%d: wait 'abort all outstanding command' timeout\n"
+ , acb->host->host_no);
+ return false;
+ }
+ return true;
+}
+
+static uint8_t arcmsr_hbaB_abort_allcmd(struct AdapterControlBlock *acb)
+{
+ struct MessageUnit_B *reg = acb->pmuB;
+
+ writel(ARCMSR_MESSAGE_ABORT_CMD, reg->drv2iop_doorbell);
+ if (!arcmsr_hbaB_wait_msgint_ready(acb)) {
+ printk(KERN_NOTICE
+ "arcmsr%d: wait 'abort all outstanding command' timeout\n"
+ , acb->host->host_no);
+ return false;
+ }
+ return true;
+}
+static uint8_t arcmsr_hbaC_abort_allcmd(struct AdapterControlBlock *pACB)
+{
+ struct MessageUnit_C __iomem *reg = pACB->pmuC;
+ writel(ARCMSR_INBOUND_MESG0_ABORT_CMD, &reg->inbound_msgaddr0);
+ writel(ARCMSR_HBCMU_DRV2IOP_MESSAGE_CMD_DONE, &reg->inbound_doorbell);
+ if (!arcmsr_hbaC_wait_msgint_ready(pACB)) {
+ printk(KERN_NOTICE
+ "arcmsr%d: wait 'abort all outstanding command' timeout\n"
+ , pACB->host->host_no);
+ return false;
+ }
+ return true;
+}
+
+static uint8_t arcmsr_hbaD_abort_allcmd(struct AdapterControlBlock *pACB)
+{
+ struct MessageUnit_D *reg = pACB->pmuD;
+
+ writel(ARCMSR_INBOUND_MESG0_ABORT_CMD, reg->inbound_msgaddr0);
+ if (!arcmsr_hbaD_wait_msgint_ready(pACB)) {
+ pr_notice("arcmsr%d: wait 'abort all outstanding "
+ "command' timeout\n", pACB->host->host_no);
+ return false;
+ }
+ return true;
+}
+
+static uint8_t arcmsr_abort_allcmd(struct AdapterControlBlock *acb)
+{
+ uint8_t rtnval = 0;
+ switch (acb->adapter_type) {
+ case ACB_ADAPTER_TYPE_A: {
+ rtnval = arcmsr_hbaA_abort_allcmd(acb);
+ }
+ break;
+
+ case ACB_ADAPTER_TYPE_B: {
+ rtnval = arcmsr_hbaB_abort_allcmd(acb);
+ }
+ break;
+
+ case ACB_ADAPTER_TYPE_C: {
+ rtnval = arcmsr_hbaC_abort_allcmd(acb);
+ }
+ break;
+
+ case ACB_ADAPTER_TYPE_D:
+ rtnval = arcmsr_hbaD_abort_allcmd(acb);
+ break;
+ }
+ return rtnval;
+}
+
+static void arcmsr_pci_unmap_dma(struct CommandControlBlock *ccb)
+{
+ struct scsi_cmnd *pcmd = ccb->pcmd;
+
+ scsi_dma_unmap(pcmd);
+}
+
+static void arcmsr_ccb_complete(struct CommandControlBlock *ccb)
+{
+ struct AdapterControlBlock *acb = ccb->acb;
+ struct scsi_cmnd *pcmd = ccb->pcmd;
+ unsigned long flags;
+ atomic_dec(&acb->ccboutstandingcount);
+ arcmsr_pci_unmap_dma(ccb);
+ ccb->startdone = ARCMSR_CCB_DONE;
+ spin_lock_irqsave(&acb->ccblist_lock, flags);
+ list_add_tail(&ccb->list, &acb->ccb_free_list);
+ spin_unlock_irqrestore(&acb->ccblist_lock, flags);
+ pcmd->scsi_done(pcmd);
+}
+
+static void arcmsr_report_sense_info(struct CommandControlBlock *ccb)
+{
+
+ struct scsi_cmnd *pcmd = ccb->pcmd;
+ struct SENSE_DATA *sensebuffer = (struct SENSE_DATA *)pcmd->sense_buffer;
+ pcmd->result = DID_OK << 16;
+ if (sensebuffer) {
+ int sense_data_length =
+ sizeof(struct SENSE_DATA) < SCSI_SENSE_BUFFERSIZE
+ ? sizeof(struct SENSE_DATA) : SCSI_SENSE_BUFFERSIZE;
+ memset(sensebuffer, 0, SCSI_SENSE_BUFFERSIZE);
+ memcpy(sensebuffer, ccb->arcmsr_cdb.SenseData, sense_data_length);
+ sensebuffer->ErrorCode = SCSI_SENSE_CURRENT_ERRORS;
+ sensebuffer->Valid = 1;
+ }
+}
+
+static u32 arcmsr_disable_outbound_ints(struct AdapterControlBlock *acb)
+{
+ u32 orig_mask = 0;
+ switch (acb->adapter_type) {
+ case ACB_ADAPTER_TYPE_A : {
+ struct MessageUnit_A __iomem *reg = acb->pmuA;
+ orig_mask = readl(&reg->outbound_intmask);
+ writel(orig_mask|ARCMSR_MU_OUTBOUND_ALL_INTMASKENABLE, \
+ &reg->outbound_intmask);
+ }
+ break;
+ case ACB_ADAPTER_TYPE_B : {
+ struct MessageUnit_B *reg = acb->pmuB;
+ orig_mask = readl(reg->iop2drv_doorbell_mask);
+ writel(0, reg->iop2drv_doorbell_mask);
+ }
+ break;
+ case ACB_ADAPTER_TYPE_C:{
+ struct MessageUnit_C __iomem *reg = acb->pmuC;
+ /* disable all outbound interrupt */
+ orig_mask = readl(&reg->host_int_mask); /* disable outbound message0 int */
+ writel(orig_mask|ARCMSR_HBCMU_ALL_INTMASKENABLE, &reg->host_int_mask);
+ }
+ break;
+ case ACB_ADAPTER_TYPE_D: {
+ struct MessageUnit_D *reg = acb->pmuD;
+ /* disable all outbound interrupt */
+ writel(ARCMSR_ARC1214_ALL_INT_DISABLE, reg->pcief0_int_enable);
+ }
+ break;
+ }
+ return orig_mask;
+}
+
+static void arcmsr_report_ccb_state(struct AdapterControlBlock *acb,
+ struct CommandControlBlock *ccb, bool error)
+{
+ uint8_t id, lun;
+ id = ccb->pcmd->device->id;
+ lun = ccb->pcmd->device->lun;
+ if (!error) {
+ if (acb->devstate[id][lun] == ARECA_RAID_GONE)
+ acb->devstate[id][lun] = ARECA_RAID_GOOD;
+ ccb->pcmd->result = DID_OK << 16;
+ arcmsr_ccb_complete(ccb);
+ }else{
+ switch (ccb->arcmsr_cdb.DeviceStatus) {
+ case ARCMSR_DEV_SELECT_TIMEOUT: {
+ acb->devstate[id][lun] = ARECA_RAID_GONE;
+ ccb->pcmd->result = DID_NO_CONNECT << 16;
+ arcmsr_ccb_complete(ccb);
+ }
+ break;
+
+ case ARCMSR_DEV_ABORTED:
+
+ case ARCMSR_DEV_INIT_FAIL: {
+ acb->devstate[id][lun] = ARECA_RAID_GONE;
+ ccb->pcmd->result = DID_BAD_TARGET << 16;
+ arcmsr_ccb_complete(ccb);
+ }
+ break;
+
+ case ARCMSR_DEV_CHECK_CONDITION: {
+ acb->devstate[id][lun] = ARECA_RAID_GOOD;
+ arcmsr_report_sense_info(ccb);
+ arcmsr_ccb_complete(ccb);
+ }
+ break;
+
+ default:
+ printk(KERN_NOTICE
+ "arcmsr%d: scsi id = %d lun = %d isr get command error done, \
+ but got unknown DeviceStatus = 0x%x \n"
+ , acb->host->host_no
+ , id
+ , lun
+ , ccb->arcmsr_cdb.DeviceStatus);
+ acb->devstate[id][lun] = ARECA_RAID_GONE;
+ ccb->pcmd->result = DID_NO_CONNECT << 16;
+ arcmsr_ccb_complete(ccb);
+ break;
+ }
+ }
+}
+
+static void arcmsr_drain_donequeue(struct AdapterControlBlock *acb, struct CommandControlBlock *pCCB, bool error)
+{
+ int id, lun;
+ if ((pCCB->acb != acb) || (pCCB->startdone != ARCMSR_CCB_START)) {
+ if (pCCB->startdone == ARCMSR_CCB_ABORTED) {
+ struct scsi_cmnd *abortcmd = pCCB->pcmd;
+ if (abortcmd) {
+ id = abortcmd->device->id;
+ lun = abortcmd->device->lun;
+ abortcmd->result |= DID_ABORT << 16;
+ arcmsr_ccb_complete(pCCB);
+ printk(KERN_NOTICE "arcmsr%d: pCCB ='0x%p' isr got aborted command \n",
+ acb->host->host_no, pCCB);
+ }
+ return;
+ }
+ printk(KERN_NOTICE "arcmsr%d: isr get an illegal ccb command \
+ done acb = '0x%p'"
+ "ccb = '0x%p' ccbacb = '0x%p' startdone = 0x%x"
+ " ccboutstandingcount = %d \n"
+ , acb->host->host_no
+ , acb
+ , pCCB
+ , pCCB->acb
+ , pCCB->startdone
+ , atomic_read(&acb->ccboutstandingcount));
+ return;
+ }
+ arcmsr_report_ccb_state(acb, pCCB, error);
+}
+
+static void arcmsr_done4abort_postqueue(struct AdapterControlBlock *acb)
+{
+ int i = 0;
+ uint32_t flag_ccb, ccb_cdb_phy;
+ struct ARCMSR_CDB *pARCMSR_CDB;
+ bool error;
+ struct CommandControlBlock *pCCB;
+ switch (acb->adapter_type) {
+
+ case ACB_ADAPTER_TYPE_A: {
+ struct MessageUnit_A __iomem *reg = acb->pmuA;
+ uint32_t outbound_intstatus;
+ outbound_intstatus = readl(&reg->outbound_intstatus) &
+ acb->outbound_int_enable;
+ /*clear and abort all outbound posted Q*/
+ writel(outbound_intstatus, &reg->outbound_intstatus);/*clear interrupt*/
+ while(((flag_ccb = readl(&reg->outbound_queueport)) != 0xFFFFFFFF)
+ && (i++ < ARCMSR_MAX_OUTSTANDING_CMD)) {
+ pARCMSR_CDB = (struct ARCMSR_CDB *)(acb->vir2phy_offset + (flag_ccb << 5));/*frame must be 32 bytes aligned*/
+ pCCB = container_of(pARCMSR_CDB, struct CommandControlBlock, arcmsr_cdb);
+ error = (flag_ccb & ARCMSR_CCBREPLY_FLAG_ERROR_MODE0) ? true : false;
+ arcmsr_drain_donequeue(acb, pCCB, error);
+ }
+ }
+ break;
+
+ case ACB_ADAPTER_TYPE_B: {
+ struct MessageUnit_B *reg = acb->pmuB;
+ /*clear all outbound posted Q*/
+ writel(ARCMSR_DOORBELL_INT_CLEAR_PATTERN, reg->iop2drv_doorbell); /* clear doorbell interrupt */
+ for (i = 0; i < ARCMSR_MAX_HBB_POSTQUEUE; i++) {
+ flag_ccb = reg->done_qbuffer[i];
+ if (flag_ccb != 0) {
+ reg->done_qbuffer[i] = 0;
+ pARCMSR_CDB = (struct ARCMSR_CDB *)(acb->vir2phy_offset+(flag_ccb << 5));/*frame must be 32 bytes aligned*/
+ pCCB = container_of(pARCMSR_CDB, struct CommandControlBlock, arcmsr_cdb);
+ error = (flag_ccb & ARCMSR_CCBREPLY_FLAG_ERROR_MODE0) ? true : false;
+ arcmsr_drain_donequeue(acb, pCCB, error);
+ }
+ reg->post_qbuffer[i] = 0;
+ }
+ reg->doneq_index = 0;
+ reg->postq_index = 0;
+ }
+ break;
+ case ACB_ADAPTER_TYPE_C: {
+ struct MessageUnit_C __iomem *reg = acb->pmuC;
+ while ((readl(&reg->host_int_status) & ARCMSR_HBCMU_OUTBOUND_POSTQUEUE_ISR) && (i++ < ARCMSR_MAX_OUTSTANDING_CMD)) {
+ /*need to do*/
+ flag_ccb = readl(&reg->outbound_queueport_low);
+ ccb_cdb_phy = (flag_ccb & 0xFFFFFFF0);
+ pARCMSR_CDB = (struct ARCMSR_CDB *)(acb->vir2phy_offset+ccb_cdb_phy);/*frame must be 32 bytes aligned*/
+ pCCB = container_of(pARCMSR_CDB, struct CommandControlBlock, arcmsr_cdb);
+ error = (flag_ccb & ARCMSR_CCBREPLY_FLAG_ERROR_MODE1) ? true : false;
+ arcmsr_drain_donequeue(acb, pCCB, error);
+ }
+ }
+ break;
+ case ACB_ADAPTER_TYPE_D: {
+ struct MessageUnit_D *pmu = acb->pmuD;
+ uint32_t outbound_write_pointer;
+ uint32_t doneq_index, index_stripped, addressLow, residual, toggle;
+ unsigned long flags;
+
+ residual = atomic_read(&acb->ccboutstandingcount);
+ for (i = 0; i < residual; i++) {
+ spin_lock_irqsave(&acb->doneq_lock, flags);
+ outbound_write_pointer =
+ pmu->done_qbuffer[0].addressLow + 1;
+ doneq_index = pmu->doneq_index;
+ if ((doneq_index & 0xFFF) !=
+ (outbound_write_pointer & 0xFFF)) {
+ toggle = doneq_index & 0x4000;
+ index_stripped = (doneq_index & 0xFFF) + 1;
+ index_stripped %= ARCMSR_MAX_ARC1214_DONEQUEUE;
+ pmu->doneq_index = index_stripped ? (index_stripped | toggle) :
+ ((toggle ^ 0x4000) + 1);
+ doneq_index = pmu->doneq_index;
+ spin_unlock_irqrestore(&acb->doneq_lock, flags);
+ addressLow = pmu->done_qbuffer[doneq_index &
+ 0xFFF].addressLow;
+ ccb_cdb_phy = (addressLow & 0xFFFFFFF0);
+ pARCMSR_CDB = (struct ARCMSR_CDB *)
+ (acb->vir2phy_offset + ccb_cdb_phy);
+ pCCB = container_of(pARCMSR_CDB,
+ struct CommandControlBlock, arcmsr_cdb);
+ error = (addressLow &
+ ARCMSR_CCBREPLY_FLAG_ERROR_MODE1) ?
+ true : false;
+ arcmsr_drain_donequeue(acb, pCCB, error);
+ writel(doneq_index,
+ pmu->outboundlist_read_pointer);
+ } else {
+ spin_unlock_irqrestore(&acb->doneq_lock, flags);
+ mdelay(10);
+ }
+ }
+ pmu->postq_index = 0;
+ pmu->doneq_index = 0x40FF;
+ }
+ break;
+ }
+}
+
+static void arcmsr_remove(struct pci_dev *pdev)
+{
+ struct Scsi_Host *host = pci_get_drvdata(pdev);
+ struct AdapterControlBlock *acb =
+ (struct AdapterControlBlock *) host->hostdata;
+ int poll_count = 0;
+ arcmsr_free_sysfs_attr(acb);
+ scsi_remove_host(host);
+ flush_work(&acb->arcmsr_do_message_isr_bh);
+ del_timer_sync(&acb->eternal_timer);
+ arcmsr_disable_outbound_ints(acb);
+ arcmsr_stop_adapter_bgrb(acb);
+ arcmsr_flush_adapter_cache(acb);
+ acb->acb_flags |= ACB_F_SCSISTOPADAPTER;
+ acb->acb_flags &= ~ACB_F_IOP_INITED;
+
+ for (poll_count = 0; poll_count < ARCMSR_MAX_OUTSTANDING_CMD; poll_count++){
+ if (!atomic_read(&acb->ccboutstandingcount))
+ break;
+ arcmsr_interrupt(acb);/* FIXME: need spinlock */
+ msleep(25);
+ }
+
+ if (atomic_read(&acb->ccboutstandingcount)) {
+ int i;
+
+ arcmsr_abort_allcmd(acb);
+ arcmsr_done4abort_postqueue(acb);
+ for (i = 0; i < ARCMSR_MAX_FREECCB_NUM; i++) {
+ struct CommandControlBlock *ccb = acb->pccb_pool[i];
+ if (ccb->startdone == ARCMSR_CCB_START) {
+ ccb->startdone = ARCMSR_CCB_ABORTED;
+ ccb->pcmd->result = DID_ABORT << 16;
+ arcmsr_ccb_complete(ccb);
+ }
+ }
+ }
+ arcmsr_free_irq(pdev, acb);
+ arcmsr_free_ccb_pool(acb);
+ arcmsr_free_mu(acb);
+ arcmsr_unmap_pciregion(acb);
+ pci_release_regions(pdev);
+ scsi_host_put(host);
+ pci_disable_device(pdev);
+}
+
+static void arcmsr_shutdown(struct pci_dev *pdev)
+{
+ struct Scsi_Host *host = pci_get_drvdata(pdev);
+ struct AdapterControlBlock *acb =
+ (struct AdapterControlBlock *)host->hostdata;
+ del_timer_sync(&acb->eternal_timer);
+ arcmsr_disable_outbound_ints(acb);
+ arcmsr_free_irq(pdev, acb);
+ flush_work(&acb->arcmsr_do_message_isr_bh);
+ arcmsr_stop_adapter_bgrb(acb);
+ arcmsr_flush_adapter_cache(acb);
+}
+
+static int arcmsr_module_init(void)
+{
+ int error = 0;
+ error = pci_register_driver(&arcmsr_pci_driver);
+ return error;
+}
+
+static void arcmsr_module_exit(void)
+{
+ pci_unregister_driver(&arcmsr_pci_driver);
+}
+module_init(arcmsr_module_init);
+module_exit(arcmsr_module_exit);
+
+static void arcmsr_enable_outbound_ints(struct AdapterControlBlock *acb,
+ u32 intmask_org)
+{
+ u32 mask;
+ switch (acb->adapter_type) {
+
+ case ACB_ADAPTER_TYPE_A: {
+ struct MessageUnit_A __iomem *reg = acb->pmuA;
+ mask = intmask_org & ~(ARCMSR_MU_OUTBOUND_POSTQUEUE_INTMASKENABLE |
+ ARCMSR_MU_OUTBOUND_DOORBELL_INTMASKENABLE|
+ ARCMSR_MU_OUTBOUND_MESSAGE0_INTMASKENABLE);
+ writel(mask, &reg->outbound_intmask);
+ acb->outbound_int_enable = ~(intmask_org & mask) & 0x000000ff;
+ }
+ break;
+
+ case ACB_ADAPTER_TYPE_B: {
+ struct MessageUnit_B *reg = acb->pmuB;
+ mask = intmask_org | (ARCMSR_IOP2DRV_DATA_WRITE_OK |
+ ARCMSR_IOP2DRV_DATA_READ_OK |
+ ARCMSR_IOP2DRV_CDB_DONE |
+ ARCMSR_IOP2DRV_MESSAGE_CMD_DONE);
+ writel(mask, reg->iop2drv_doorbell_mask);
+ acb->outbound_int_enable = (intmask_org | mask) & 0x0000000f;
+ }
+ break;
+ case ACB_ADAPTER_TYPE_C: {
+ struct MessageUnit_C __iomem *reg = acb->pmuC;
+ mask = ~(ARCMSR_HBCMU_UTILITY_A_ISR_MASK | ARCMSR_HBCMU_OUTBOUND_DOORBELL_ISR_MASK|ARCMSR_HBCMU_OUTBOUND_POSTQUEUE_ISR_MASK);
+ writel(intmask_org & mask, &reg->host_int_mask);
+ acb->outbound_int_enable = ~(intmask_org & mask) & 0x0000000f;
+ }
+ break;
+ case ACB_ADAPTER_TYPE_D: {
+ struct MessageUnit_D *reg = acb->pmuD;
+
+ mask = ARCMSR_ARC1214_ALL_INT_ENABLE;
+ writel(intmask_org | mask, reg->pcief0_int_enable);
+ break;
+ }
+ }
+}
+
+static int arcmsr_build_ccb(struct AdapterControlBlock *acb,
+ struct CommandControlBlock *ccb, struct scsi_cmnd *pcmd)
+{
+ struct ARCMSR_CDB *arcmsr_cdb = (struct ARCMSR_CDB *)&ccb->arcmsr_cdb;
+ int8_t *psge = (int8_t *)&arcmsr_cdb->u;
+ __le32 address_lo, address_hi;
+ int arccdbsize = 0x30;
+ __le32 length = 0;
+ int i;
+ struct scatterlist *sg;
+ int nseg;
+ ccb->pcmd = pcmd;
+ memset(arcmsr_cdb, 0, sizeof(struct ARCMSR_CDB));
+ arcmsr_cdb->TargetID = pcmd->device->id;
+ arcmsr_cdb->LUN = pcmd->device->lun;
+ arcmsr_cdb->Function = 1;
+ arcmsr_cdb->msgContext = 0;
+ memcpy(arcmsr_cdb->Cdb, pcmd->cmnd, pcmd->cmd_len);
+
+ nseg = scsi_dma_map(pcmd);
+ if (unlikely(nseg > acb->host->sg_tablesize || nseg < 0))
+ return FAILED;
+ scsi_for_each_sg(pcmd, sg, nseg, i) {
+ /* Get the physical address of the current data pointer */
+ length = cpu_to_le32(sg_dma_len(sg));
+ address_lo = cpu_to_le32(dma_addr_lo32(sg_dma_address(sg)));
+ address_hi = cpu_to_le32(dma_addr_hi32(sg_dma_address(sg)));
+ if (address_hi == 0) {
+ struct SG32ENTRY *pdma_sg = (struct SG32ENTRY *)psge;
+
+ pdma_sg->address = address_lo;
+ pdma_sg->length = length;
+ psge += sizeof (struct SG32ENTRY);
+ arccdbsize += sizeof (struct SG32ENTRY);
+ } else {
+ struct SG64ENTRY *pdma_sg = (struct SG64ENTRY *)psge;
+
+ pdma_sg->addresshigh = address_hi;
+ pdma_sg->address = address_lo;
+ pdma_sg->length = length|cpu_to_le32(IS_SG64_ADDR);
+ psge += sizeof (struct SG64ENTRY);
+ arccdbsize += sizeof (struct SG64ENTRY);
+ }
+ }
+ arcmsr_cdb->sgcount = (uint8_t)nseg;
+ arcmsr_cdb->DataLength = scsi_bufflen(pcmd);
+ arcmsr_cdb->msgPages = arccdbsize/0x100 + (arccdbsize % 0x100 ? 1 : 0);
+ if ( arccdbsize > 256)
+ arcmsr_cdb->Flags |= ARCMSR_CDB_FLAG_SGL_BSIZE;
+ if (pcmd->sc_data_direction == DMA_TO_DEVICE)
+ arcmsr_cdb->Flags |= ARCMSR_CDB_FLAG_WRITE;
+ ccb->arc_cdb_size = arccdbsize;
+ return SUCCESS;
+}
+
+static void arcmsr_post_ccb(struct AdapterControlBlock *acb, struct CommandControlBlock *ccb)
+{
+ uint32_t cdb_phyaddr = ccb->cdb_phyaddr;
+ struct ARCMSR_CDB *arcmsr_cdb = (struct ARCMSR_CDB *)&ccb->arcmsr_cdb;
+ atomic_inc(&acb->ccboutstandingcount);
+ ccb->startdone = ARCMSR_CCB_START;
+ switch (acb->adapter_type) {
+ case ACB_ADAPTER_TYPE_A: {
+ struct MessageUnit_A __iomem *reg = acb->pmuA;
+
+ if (arcmsr_cdb->Flags & ARCMSR_CDB_FLAG_SGL_BSIZE)
+ writel(cdb_phyaddr | ARCMSR_CCBPOST_FLAG_SGL_BSIZE,
+ &reg->inbound_queueport);
+ else
+ writel(cdb_phyaddr, &reg->inbound_queueport);
+ break;
+ }
+
+ case ACB_ADAPTER_TYPE_B: {
+ struct MessageUnit_B *reg = acb->pmuB;
+ uint32_t ending_index, index = reg->postq_index;
+
+ ending_index = ((index + 1) % ARCMSR_MAX_HBB_POSTQUEUE);
+ reg->post_qbuffer[ending_index] = 0;
+ if (arcmsr_cdb->Flags & ARCMSR_CDB_FLAG_SGL_BSIZE) {
+ reg->post_qbuffer[index] =
+ cdb_phyaddr | ARCMSR_CCBPOST_FLAG_SGL_BSIZE;
+ } else {
+ reg->post_qbuffer[index] = cdb_phyaddr;
+ }
+ index++;
+ index %= ARCMSR_MAX_HBB_POSTQUEUE;/*if last index number set it to 0 */
+ reg->postq_index = index;
+ writel(ARCMSR_DRV2IOP_CDB_POSTED, reg->drv2iop_doorbell);
+ }
+ break;
+ case ACB_ADAPTER_TYPE_C: {
+ struct MessageUnit_C __iomem *phbcmu = acb->pmuC;
+ uint32_t ccb_post_stamp, arc_cdb_size;
+
+ arc_cdb_size = (ccb->arc_cdb_size > 0x300) ? 0x300 : ccb->arc_cdb_size;
+ ccb_post_stamp = (cdb_phyaddr | ((arc_cdb_size - 1) >> 6) | 1);
+ if (acb->cdb_phyaddr_hi32) {
+ writel(acb->cdb_phyaddr_hi32, &phbcmu->inbound_queueport_high);
+ writel(ccb_post_stamp, &phbcmu->inbound_queueport_low);
+ } else {
+ writel(ccb_post_stamp, &phbcmu->inbound_queueport_low);
+ }
+ }
+ break;
+ case ACB_ADAPTER_TYPE_D: {
+ struct MessageUnit_D *pmu = acb->pmuD;
+ u16 index_stripped;
+ u16 postq_index, toggle;
+ unsigned long flags;
+ struct InBound_SRB *pinbound_srb;
+
+ spin_lock_irqsave(&acb->postq_lock, flags);
+ postq_index = pmu->postq_index;
+ pinbound_srb = (struct InBound_SRB *)&(pmu->post_qbuffer[postq_index & 0xFF]);
+ pinbound_srb->addressHigh = dma_addr_hi32(cdb_phyaddr);
+ pinbound_srb->addressLow = dma_addr_lo32(cdb_phyaddr);
+ pinbound_srb->length = ccb->arc_cdb_size >> 2;
+ arcmsr_cdb->msgContext = dma_addr_lo32(cdb_phyaddr);
+ toggle = postq_index & 0x4000;
+ index_stripped = postq_index + 1;
+ index_stripped &= (ARCMSR_MAX_ARC1214_POSTQUEUE - 1);
+ pmu->postq_index = index_stripped ? (index_stripped | toggle) :
+ (toggle ^ 0x4000);
+ writel(postq_index, pmu->inboundlist_write_pointer);
+ spin_unlock_irqrestore(&acb->postq_lock, flags);
+ break;
+ }
+ }
+}
+
+static void arcmsr_hbaA_stop_bgrb(struct AdapterControlBlock *acb)
+{
+ struct MessageUnit_A __iomem *reg = acb->pmuA;
+ acb->acb_flags &= ~ACB_F_MSG_START_BGRB;
+ writel(ARCMSR_INBOUND_MESG0_STOP_BGRB, &reg->inbound_msgaddr0);
+ if (!arcmsr_hbaA_wait_msgint_ready(acb)) {
+ printk(KERN_NOTICE
+ "arcmsr%d: wait 'stop adapter background rebulid' timeout\n"
+ , acb->host->host_no);
+ }
+}
+
+static void arcmsr_hbaB_stop_bgrb(struct AdapterControlBlock *acb)
+{
+ struct MessageUnit_B *reg = acb->pmuB;
+ acb->acb_flags &= ~ACB_F_MSG_START_BGRB;
+ writel(ARCMSR_MESSAGE_STOP_BGRB, reg->drv2iop_doorbell);
+
+ if (!arcmsr_hbaB_wait_msgint_ready(acb)) {
+ printk(KERN_NOTICE
+ "arcmsr%d: wait 'stop adapter background rebulid' timeout\n"
+ , acb->host->host_no);
+ }
+}
+
+static void arcmsr_hbaC_stop_bgrb(struct AdapterControlBlock *pACB)
+{
+ struct MessageUnit_C __iomem *reg = pACB->pmuC;
+ pACB->acb_flags &= ~ACB_F_MSG_START_BGRB;
+ writel(ARCMSR_INBOUND_MESG0_STOP_BGRB, &reg->inbound_msgaddr0);
+ writel(ARCMSR_HBCMU_DRV2IOP_MESSAGE_CMD_DONE, &reg->inbound_doorbell);
+ if (!arcmsr_hbaC_wait_msgint_ready(pACB)) {
+ printk(KERN_NOTICE
+ "arcmsr%d: wait 'stop adapter background rebulid' timeout\n"
+ , pACB->host->host_no);
+ }
+ return;
+}
+
+static void arcmsr_hbaD_stop_bgrb(struct AdapterControlBlock *pACB)
+{
+ struct MessageUnit_D *reg = pACB->pmuD;
+
+ pACB->acb_flags &= ~ACB_F_MSG_START_BGRB;
+ writel(ARCMSR_INBOUND_MESG0_STOP_BGRB, reg->inbound_msgaddr0);
+ if (!arcmsr_hbaD_wait_msgint_ready(pACB))
+ pr_notice("arcmsr%d: wait 'stop adapter background rebulid' "
+ "timeout\n", pACB->host->host_no);
+}
+
+static void arcmsr_stop_adapter_bgrb(struct AdapterControlBlock *acb)
+{
+ switch (acb->adapter_type) {
+ case ACB_ADAPTER_TYPE_A: {
+ arcmsr_hbaA_stop_bgrb(acb);
+ }
+ break;
+
+ case ACB_ADAPTER_TYPE_B: {
+ arcmsr_hbaB_stop_bgrb(acb);
+ }
+ break;
+ case ACB_ADAPTER_TYPE_C: {
+ arcmsr_hbaC_stop_bgrb(acb);
+ }
+ break;
+ case ACB_ADAPTER_TYPE_D:
+ arcmsr_hbaD_stop_bgrb(acb);
+ break;
+ }
+}
+
+static void arcmsr_free_ccb_pool(struct AdapterControlBlock *acb)
+{
+ dma_free_coherent(&acb->pdev->dev, acb->uncache_size, acb->dma_coherent, acb->dma_coherent_handle);
+}
+
+static void arcmsr_iop_message_read(struct AdapterControlBlock *acb)
+{
+ switch (acb->adapter_type) {
+ case ACB_ADAPTER_TYPE_A: {
+ struct MessageUnit_A __iomem *reg = acb->pmuA;
+ writel(ARCMSR_INBOUND_DRIVER_DATA_READ_OK, &reg->inbound_doorbell);
+ }
+ break;
+
+ case ACB_ADAPTER_TYPE_B: {
+ struct MessageUnit_B *reg = acb->pmuB;
+ writel(ARCMSR_DRV2IOP_DATA_READ_OK, reg->drv2iop_doorbell);
+ }
+ break;
+ case ACB_ADAPTER_TYPE_C: {
+ struct MessageUnit_C __iomem *reg = acb->pmuC;
+
+ writel(ARCMSR_HBCMU_DRV2IOP_DATA_READ_OK, &reg->inbound_doorbell);
+ }
+ break;
+ case ACB_ADAPTER_TYPE_D: {
+ struct MessageUnit_D *reg = acb->pmuD;
+ writel(ARCMSR_ARC1214_DRV2IOP_DATA_OUT_READ,
+ reg->inbound_doorbell);
+ }
+ break;
+ }
+}
+
+static void arcmsr_iop_message_wrote(struct AdapterControlBlock *acb)
+{
+ switch (acb->adapter_type) {
+ case ACB_ADAPTER_TYPE_A: {
+ struct MessageUnit_A __iomem *reg = acb->pmuA;
+ /*
+ ** push inbound doorbell tell iop, driver data write ok
+ ** and wait reply on next hwinterrupt for next Qbuffer post
+ */
+ writel(ARCMSR_INBOUND_DRIVER_DATA_WRITE_OK, &reg->inbound_doorbell);
+ }
+ break;
+
+ case ACB_ADAPTER_TYPE_B: {
+ struct MessageUnit_B *reg = acb->pmuB;
+ /*
+ ** push inbound doorbell tell iop, driver data write ok
+ ** and wait reply on next hwinterrupt for next Qbuffer post
+ */
+ writel(ARCMSR_DRV2IOP_DATA_WRITE_OK, reg->drv2iop_doorbell);
+ }
+ break;
+ case ACB_ADAPTER_TYPE_C: {
+ struct MessageUnit_C __iomem *reg = acb->pmuC;
+ /*
+ ** push inbound doorbell tell iop, driver data write ok
+ ** and wait reply on next hwinterrupt for next Qbuffer post
+ */
+ writel(ARCMSR_HBCMU_DRV2IOP_DATA_WRITE_OK, &reg->inbound_doorbell);
+ }
+ break;
+ case ACB_ADAPTER_TYPE_D: {
+ struct MessageUnit_D *reg = acb->pmuD;
+ writel(ARCMSR_ARC1214_DRV2IOP_DATA_IN_READY,
+ reg->inbound_doorbell);
+ }
+ break;
+ }
+}
+
+struct QBUFFER __iomem *arcmsr_get_iop_rqbuffer(struct AdapterControlBlock *acb)
+{
+ struct QBUFFER __iomem *qbuffer = NULL;
+ switch (acb->adapter_type) {
+
+ case ACB_ADAPTER_TYPE_A: {
+ struct MessageUnit_A __iomem *reg = acb->pmuA;
+ qbuffer = (struct QBUFFER __iomem *)&reg->message_rbuffer;
+ }
+ break;
+
+ case ACB_ADAPTER_TYPE_B: {
+ struct MessageUnit_B *reg = acb->pmuB;
+ qbuffer = (struct QBUFFER __iomem *)reg->message_rbuffer;
+ }
+ break;
+ case ACB_ADAPTER_TYPE_C: {
+ struct MessageUnit_C __iomem *phbcmu = acb->pmuC;
+ qbuffer = (struct QBUFFER __iomem *)&phbcmu->message_rbuffer;
+ }
+ break;
+ case ACB_ADAPTER_TYPE_D: {
+ struct MessageUnit_D *reg = acb->pmuD;
+ qbuffer = (struct QBUFFER __iomem *)reg->message_rbuffer;
+ }
+ break;
+ }
+ return qbuffer;
+}
+
+static struct QBUFFER __iomem *arcmsr_get_iop_wqbuffer(struct AdapterControlBlock *acb)
+{
+ struct QBUFFER __iomem *pqbuffer = NULL;
+ switch (acb->adapter_type) {
+
+ case ACB_ADAPTER_TYPE_A: {
+ struct MessageUnit_A __iomem *reg = acb->pmuA;
+ pqbuffer = (struct QBUFFER __iomem *) &reg->message_wbuffer;
+ }
+ break;
+
+ case ACB_ADAPTER_TYPE_B: {
+ struct MessageUnit_B *reg = acb->pmuB;
+ pqbuffer = (struct QBUFFER __iomem *)reg->message_wbuffer;
+ }
+ break;
+ case ACB_ADAPTER_TYPE_C: {
+ struct MessageUnit_C __iomem *reg = acb->pmuC;
+ pqbuffer = (struct QBUFFER __iomem *)&reg->message_wbuffer;
+ }
+ break;
+ case ACB_ADAPTER_TYPE_D: {
+ struct MessageUnit_D *reg = acb->pmuD;
+ pqbuffer = (struct QBUFFER __iomem *)reg->message_wbuffer;
+ }
+ break;
+ }
+ return pqbuffer;
+}
+
+static uint32_t
+arcmsr_Read_iop_rqbuffer_in_DWORD(struct AdapterControlBlock *acb,
+ struct QBUFFER __iomem *prbuffer)
+{
+ uint8_t *pQbuffer;
+ uint8_t *buf1 = NULL;
+ uint32_t __iomem *iop_data;
+ uint32_t iop_len, data_len, *buf2 = NULL;
+
+ iop_data = (uint32_t __iomem *)prbuffer->data;
+ iop_len = readl(&prbuffer->data_len);
+ if (iop_len > 0) {
+ buf1 = kmalloc(128, GFP_ATOMIC);
+ buf2 = (uint32_t *)buf1;
+ if (buf1 == NULL)
+ return 0;
+ data_len = iop_len;
+ while (data_len >= 4) {
+ *buf2++ = readl(iop_data);
+ iop_data++;
+ data_len -= 4;
+ }
+ if (data_len)
+ *buf2 = readl(iop_data);
+ buf2 = (uint32_t *)buf1;
+ }
+ while (iop_len > 0) {
+ pQbuffer = &acb->rqbuffer[acb->rqbuf_putIndex];
+ *pQbuffer = *buf1;
+ acb->rqbuf_putIndex++;
+ /* if last, index number set it to 0 */
+ acb->rqbuf_putIndex %= ARCMSR_MAX_QBUFFER;
+ buf1++;
+ iop_len--;
+ }
+ kfree(buf2);
+ /* let IOP know data has been read */
+ arcmsr_iop_message_read(acb);
+ return 1;
+}
+
+uint32_t
+arcmsr_Read_iop_rqbuffer_data(struct AdapterControlBlock *acb,
+ struct QBUFFER __iomem *prbuffer) {
+
+ uint8_t *pQbuffer;
+ uint8_t __iomem *iop_data;
+ uint32_t iop_len;
+
+ if (acb->adapter_type & (ACB_ADAPTER_TYPE_C | ACB_ADAPTER_TYPE_D))
+ return arcmsr_Read_iop_rqbuffer_in_DWORD(acb, prbuffer);
+ iop_data = (uint8_t __iomem *)prbuffer->data;
+ iop_len = readl(&prbuffer->data_len);
+ while (iop_len > 0) {
+ pQbuffer = &acb->rqbuffer[acb->rqbuf_putIndex];
+ *pQbuffer = readb(iop_data);
+ acb->rqbuf_putIndex++;
+ acb->rqbuf_putIndex %= ARCMSR_MAX_QBUFFER;
+ iop_data++;
+ iop_len--;
+ }
+ arcmsr_iop_message_read(acb);
+ return 1;
+}
+
+static void arcmsr_iop2drv_data_wrote_handle(struct AdapterControlBlock *acb)
+{
+ unsigned long flags;
+ struct QBUFFER __iomem *prbuffer;
+ int32_t buf_empty_len;
+
+ spin_lock_irqsave(&acb->rqbuffer_lock, flags);
+ prbuffer = arcmsr_get_iop_rqbuffer(acb);
+ buf_empty_len = (acb->rqbuf_putIndex - acb->rqbuf_getIndex - 1) &
+ (ARCMSR_MAX_QBUFFER - 1);
+ if (buf_empty_len >= readl(&prbuffer->data_len)) {
+ if (arcmsr_Read_iop_rqbuffer_data(acb, prbuffer) == 0)
+ acb->acb_flags |= ACB_F_IOPDATA_OVERFLOW;
+ } else
+ acb->acb_flags |= ACB_F_IOPDATA_OVERFLOW;
+ spin_unlock_irqrestore(&acb->rqbuffer_lock, flags);
+}
+
+static void arcmsr_write_ioctldata2iop_in_DWORD(struct AdapterControlBlock *acb)
+{
+ uint8_t *pQbuffer;
+ struct QBUFFER __iomem *pwbuffer;
+ uint8_t *buf1 = NULL;
+ uint32_t __iomem *iop_data;
+ uint32_t allxfer_len = 0, data_len, *buf2 = NULL, data;
+
+ if (acb->acb_flags & ACB_F_MESSAGE_WQBUFFER_READED) {
+ buf1 = kmalloc(128, GFP_ATOMIC);
+ buf2 = (uint32_t *)buf1;
+ if (buf1 == NULL)
+ return;
+
+ acb->acb_flags &= (~ACB_F_MESSAGE_WQBUFFER_READED);
+ pwbuffer = arcmsr_get_iop_wqbuffer(acb);
+ iop_data = (uint32_t __iomem *)pwbuffer->data;
+ while ((acb->wqbuf_getIndex != acb->wqbuf_putIndex)
+ && (allxfer_len < 124)) {
+ pQbuffer = &acb->wqbuffer[acb->wqbuf_getIndex];
+ *buf1 = *pQbuffer;
+ acb->wqbuf_getIndex++;
+ acb->wqbuf_getIndex %= ARCMSR_MAX_QBUFFER;
+ buf1++;
+ allxfer_len++;
+ }
+ data_len = allxfer_len;
+ buf1 = (uint8_t *)buf2;
+ while (data_len >= 4) {
+ data = *buf2++;
+ writel(data, iop_data);
+ iop_data++;
+ data_len -= 4;
+ }
+ if (data_len) {
+ data = *buf2;
+ writel(data, iop_data);
+ }
+ writel(allxfer_len, &pwbuffer->data_len);
+ kfree(buf1);
+ arcmsr_iop_message_wrote(acb);
+ }
+}
+
+void
+arcmsr_write_ioctldata2iop(struct AdapterControlBlock *acb)
+{
+ uint8_t *pQbuffer;
+ struct QBUFFER __iomem *pwbuffer;
+ uint8_t __iomem *iop_data;
+ int32_t allxfer_len = 0;
+
+ if (acb->adapter_type & (ACB_ADAPTER_TYPE_C | ACB_ADAPTER_TYPE_D)) {
+ arcmsr_write_ioctldata2iop_in_DWORD(acb);
+ return;
+ }
+ if (acb->acb_flags & ACB_F_MESSAGE_WQBUFFER_READED) {
+ acb->acb_flags &= (~ACB_F_MESSAGE_WQBUFFER_READED);
+ pwbuffer = arcmsr_get_iop_wqbuffer(acb);
+ iop_data = (uint8_t __iomem *)pwbuffer->data;
+ while ((acb->wqbuf_getIndex != acb->wqbuf_putIndex)
+ && (allxfer_len < 124)) {
+ pQbuffer = &acb->wqbuffer[acb->wqbuf_getIndex];
+ writeb(*pQbuffer, iop_data);
+ acb->wqbuf_getIndex++;
+ acb->wqbuf_getIndex %= ARCMSR_MAX_QBUFFER;
+ iop_data++;
+ allxfer_len++;
+ }
+ writel(allxfer_len, &pwbuffer->data_len);
+ arcmsr_iop_message_wrote(acb);
+ }
+}
+
+static void arcmsr_iop2drv_data_read_handle(struct AdapterControlBlock *acb)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&acb->wqbuffer_lock, flags);
+ acb->acb_flags |= ACB_F_MESSAGE_WQBUFFER_READED;
+ if (acb->wqbuf_getIndex != acb->wqbuf_putIndex)
+ arcmsr_write_ioctldata2iop(acb);
+ if (acb->wqbuf_getIndex == acb->wqbuf_putIndex)
+ acb->acb_flags |= ACB_F_MESSAGE_WQBUFFER_CLEARED;
+ spin_unlock_irqrestore(&acb->wqbuffer_lock, flags);
+}
+
+static void arcmsr_hbaA_doorbell_isr(struct AdapterControlBlock *acb)
+{
+ uint32_t outbound_doorbell;
+ struct MessageUnit_A __iomem *reg = acb->pmuA;
+ outbound_doorbell = readl(&reg->outbound_doorbell);
+ do {
+ writel(outbound_doorbell, &reg->outbound_doorbell);
+ if (outbound_doorbell & ARCMSR_OUTBOUND_IOP331_DATA_WRITE_OK)
+ arcmsr_iop2drv_data_wrote_handle(acb);
+ if (outbound_doorbell & ARCMSR_OUTBOUND_IOP331_DATA_READ_OK)
+ arcmsr_iop2drv_data_read_handle(acb);
+ outbound_doorbell = readl(&reg->outbound_doorbell);
+ } while (outbound_doorbell & (ARCMSR_OUTBOUND_IOP331_DATA_WRITE_OK
+ | ARCMSR_OUTBOUND_IOP331_DATA_READ_OK));
+}
+static void arcmsr_hbaC_doorbell_isr(struct AdapterControlBlock *pACB)
+{
+ uint32_t outbound_doorbell;
+ struct MessageUnit_C __iomem *reg = pACB->pmuC;
+ /*
+ *******************************************************************
+ ** Maybe here we need to check wrqbuffer_lock is lock or not
+ ** DOORBELL: din! don!
+ ** check if there are any mail need to pack from firmware
+ *******************************************************************
+ */
+ outbound_doorbell = readl(&reg->outbound_doorbell);
+ do {
+ writel(outbound_doorbell, &reg->outbound_doorbell_clear);
+ readl(&reg->outbound_doorbell_clear);
+ if (outbound_doorbell & ARCMSR_HBCMU_IOP2DRV_DATA_WRITE_OK)
+ arcmsr_iop2drv_data_wrote_handle(pACB);
+ if (outbound_doorbell & ARCMSR_HBCMU_IOP2DRV_DATA_READ_OK)
+ arcmsr_iop2drv_data_read_handle(pACB);
+ if (outbound_doorbell & ARCMSR_HBCMU_IOP2DRV_MESSAGE_CMD_DONE)
+ arcmsr_hbaC_message_isr(pACB);
+ outbound_doorbell = readl(&reg->outbound_doorbell);
+ } while (outbound_doorbell & (ARCMSR_HBCMU_IOP2DRV_DATA_WRITE_OK
+ | ARCMSR_HBCMU_IOP2DRV_DATA_READ_OK
+ | ARCMSR_HBCMU_IOP2DRV_MESSAGE_CMD_DONE));
+}
+
+static void arcmsr_hbaD_doorbell_isr(struct AdapterControlBlock *pACB)
+{
+ uint32_t outbound_doorbell;
+ struct MessageUnit_D *pmu = pACB->pmuD;
+
+ outbound_doorbell = readl(pmu->outbound_doorbell);
+ do {
+ writel(outbound_doorbell, pmu->outbound_doorbell);
+ if (outbound_doorbell & ARCMSR_ARC1214_IOP2DRV_MESSAGE_CMD_DONE)
+ arcmsr_hbaD_message_isr(pACB);
+ if (outbound_doorbell & ARCMSR_ARC1214_IOP2DRV_DATA_WRITE_OK)
+ arcmsr_iop2drv_data_wrote_handle(pACB);
+ if (outbound_doorbell & ARCMSR_ARC1214_IOP2DRV_DATA_READ_OK)
+ arcmsr_iop2drv_data_read_handle(pACB);
+ outbound_doorbell = readl(pmu->outbound_doorbell);
+ } while (outbound_doorbell & (ARCMSR_ARC1214_IOP2DRV_DATA_WRITE_OK
+ | ARCMSR_ARC1214_IOP2DRV_DATA_READ_OK
+ | ARCMSR_ARC1214_IOP2DRV_MESSAGE_CMD_DONE));
+}
+
+static void arcmsr_hbaA_postqueue_isr(struct AdapterControlBlock *acb)
+{
+ uint32_t flag_ccb;
+ struct MessageUnit_A __iomem *reg = acb->pmuA;
+ struct ARCMSR_CDB *pARCMSR_CDB;
+ struct CommandControlBlock *pCCB;
+ bool error;
+ while ((flag_ccb = readl(&reg->outbound_queueport)) != 0xFFFFFFFF) {
+ pARCMSR_CDB = (struct ARCMSR_CDB *)(acb->vir2phy_offset + (flag_ccb << 5));/*frame must be 32 bytes aligned*/
+ pCCB = container_of(pARCMSR_CDB, struct CommandControlBlock, arcmsr_cdb);
+ error = (flag_ccb & ARCMSR_CCBREPLY_FLAG_ERROR_MODE0) ? true : false;
+ arcmsr_drain_donequeue(acb, pCCB, error);
+ }
+}
+static void arcmsr_hbaB_postqueue_isr(struct AdapterControlBlock *acb)
+{
+ uint32_t index;
+ uint32_t flag_ccb;
+ struct MessageUnit_B *reg = acb->pmuB;
+ struct ARCMSR_CDB *pARCMSR_CDB;
+ struct CommandControlBlock *pCCB;
+ bool error;
+ index = reg->doneq_index;
+ while ((flag_ccb = reg->done_qbuffer[index]) != 0) {
+ reg->done_qbuffer[index] = 0;
+ pARCMSR_CDB = (struct ARCMSR_CDB *)(acb->vir2phy_offset+(flag_ccb << 5));/*frame must be 32 bytes aligned*/
+ pCCB = container_of(pARCMSR_CDB, struct CommandControlBlock, arcmsr_cdb);
+ error = (flag_ccb & ARCMSR_CCBREPLY_FLAG_ERROR_MODE0) ? true : false;
+ arcmsr_drain_donequeue(acb, pCCB, error);
+ index++;
+ index %= ARCMSR_MAX_HBB_POSTQUEUE;
+ reg->doneq_index = index;
+ }
+}
+
+static void arcmsr_hbaC_postqueue_isr(struct AdapterControlBlock *acb)
+{
+ struct MessageUnit_C __iomem *phbcmu;
+ struct ARCMSR_CDB *arcmsr_cdb;
+ struct CommandControlBlock *ccb;
+ uint32_t flag_ccb, ccb_cdb_phy, throttling = 0;
+ int error;
+
+ phbcmu = acb->pmuC;
+ /* areca cdb command done */
+ /* Use correct offset and size for syncing */
+
+ while ((flag_ccb = readl(&phbcmu->outbound_queueport_low)) !=
+ 0xFFFFFFFF) {
+ ccb_cdb_phy = (flag_ccb & 0xFFFFFFF0);
+ arcmsr_cdb = (struct ARCMSR_CDB *)(acb->vir2phy_offset
+ + ccb_cdb_phy);
+ ccb = container_of(arcmsr_cdb, struct CommandControlBlock,
+ arcmsr_cdb);
+ error = (flag_ccb & ARCMSR_CCBREPLY_FLAG_ERROR_MODE1)
+ ? true : false;
+ /* check if command done with no error */
+ arcmsr_drain_donequeue(acb, ccb, error);
+ throttling++;
+ if (throttling == ARCMSR_HBC_ISR_THROTTLING_LEVEL) {
+ writel(ARCMSR_HBCMU_DRV2IOP_POSTQUEUE_THROTTLING,
+ &phbcmu->inbound_doorbell);
+ throttling = 0;
+ }
+ }
+}
+
+static void arcmsr_hbaD_postqueue_isr(struct AdapterControlBlock *acb)
+{
+ u32 outbound_write_pointer, doneq_index, index_stripped, toggle;
+ uint32_t addressLow, ccb_cdb_phy;
+ int error;
+ struct MessageUnit_D *pmu;
+ struct ARCMSR_CDB *arcmsr_cdb;
+ struct CommandControlBlock *ccb;
+ unsigned long flags;
+
+ spin_lock_irqsave(&acb->doneq_lock, flags);
+ pmu = acb->pmuD;
+ outbound_write_pointer = pmu->done_qbuffer[0].addressLow + 1;
+ doneq_index = pmu->doneq_index;
+ if ((doneq_index & 0xFFF) != (outbound_write_pointer & 0xFFF)) {
+ do {
+ toggle = doneq_index & 0x4000;
+ index_stripped = (doneq_index & 0xFFF) + 1;
+ index_stripped %= ARCMSR_MAX_ARC1214_DONEQUEUE;
+ pmu->doneq_index = index_stripped ? (index_stripped | toggle) :
+ ((toggle ^ 0x4000) + 1);
+ doneq_index = pmu->doneq_index;
+ addressLow = pmu->done_qbuffer[doneq_index &
+ 0xFFF].addressLow;
+ ccb_cdb_phy = (addressLow & 0xFFFFFFF0);
+ arcmsr_cdb = (struct ARCMSR_CDB *)(acb->vir2phy_offset
+ + ccb_cdb_phy);
+ ccb = container_of(arcmsr_cdb,
+ struct CommandControlBlock, arcmsr_cdb);
+ error = (addressLow & ARCMSR_CCBREPLY_FLAG_ERROR_MODE1)
+ ? true : false;
+ arcmsr_drain_donequeue(acb, ccb, error);
+ writel(doneq_index, pmu->outboundlist_read_pointer);
+ } while ((doneq_index & 0xFFF) !=
+ (outbound_write_pointer & 0xFFF));
+ }
+ writel(ARCMSR_ARC1214_OUTBOUND_LIST_INTERRUPT_CLEAR,
+ pmu->outboundlist_interrupt_cause);
+ readl(pmu->outboundlist_interrupt_cause);
+ spin_unlock_irqrestore(&acb->doneq_lock, flags);
+}
+
+/*
+**********************************************************************************
+** Handle a message interrupt
+**
+** The only message interrupt we expect is in response to a query for the current adapter config.
+** We want this in order to compare the drivemap so that we can detect newly-attached drives.
+**********************************************************************************
+*/
+static void arcmsr_hbaA_message_isr(struct AdapterControlBlock *acb)
+{
+ struct MessageUnit_A __iomem *reg = acb->pmuA;
+ /*clear interrupt and message state*/
+ writel(ARCMSR_MU_OUTBOUND_MESSAGE0_INT, &reg->outbound_intstatus);
+ schedule_work(&acb->arcmsr_do_message_isr_bh);
+}
+static void arcmsr_hbaB_message_isr(struct AdapterControlBlock *acb)
+{
+ struct MessageUnit_B *reg = acb->pmuB;
+
+ /*clear interrupt and message state*/
+ writel(ARCMSR_MESSAGE_INT_CLEAR_PATTERN, reg->iop2drv_doorbell);
+ schedule_work(&acb->arcmsr_do_message_isr_bh);
+}
+/*
+**********************************************************************************
+** Handle a message interrupt
+**
+** The only message interrupt we expect is in response to a query for the
+** current adapter config.
+** We want this in order to compare the drivemap so that we can detect newly-attached drives.
+**********************************************************************************
+*/
+static void arcmsr_hbaC_message_isr(struct AdapterControlBlock *acb)
+{
+ struct MessageUnit_C __iomem *reg = acb->pmuC;
+ /*clear interrupt and message state*/
+ writel(ARCMSR_HBCMU_IOP2DRV_MESSAGE_CMD_DONE_DOORBELL_CLEAR, &reg->outbound_doorbell_clear);
+ schedule_work(&acb->arcmsr_do_message_isr_bh);
+}
+
+static void arcmsr_hbaD_message_isr(struct AdapterControlBlock *acb)
+{
+ struct MessageUnit_D *reg = acb->pmuD;
+
+ writel(ARCMSR_ARC1214_IOP2DRV_MESSAGE_CMD_DONE, reg->outbound_doorbell);
+ readl(reg->outbound_doorbell);
+ schedule_work(&acb->arcmsr_do_message_isr_bh);
+}
+
+static int arcmsr_hbaA_handle_isr(struct AdapterControlBlock *acb)
+{
+ uint32_t outbound_intstatus;
+ struct MessageUnit_A __iomem *reg = acb->pmuA;
+ outbound_intstatus = readl(&reg->outbound_intstatus) &
+ acb->outbound_int_enable;
+ if (!(outbound_intstatus & ARCMSR_MU_OUTBOUND_HANDLE_INT))
+ return IRQ_NONE;
+ do {
+ writel(outbound_intstatus, &reg->outbound_intstatus);
+ if (outbound_intstatus & ARCMSR_MU_OUTBOUND_DOORBELL_INT)
+ arcmsr_hbaA_doorbell_isr(acb);
+ if (outbound_intstatus & ARCMSR_MU_OUTBOUND_POSTQUEUE_INT)
+ arcmsr_hbaA_postqueue_isr(acb);
+ if (outbound_intstatus & ARCMSR_MU_OUTBOUND_MESSAGE0_INT)
+ arcmsr_hbaA_message_isr(acb);
+ outbound_intstatus = readl(&reg->outbound_intstatus) &
+ acb->outbound_int_enable;
+ } while (outbound_intstatus & (ARCMSR_MU_OUTBOUND_DOORBELL_INT
+ | ARCMSR_MU_OUTBOUND_POSTQUEUE_INT
+ | ARCMSR_MU_OUTBOUND_MESSAGE0_INT));
+ return IRQ_HANDLED;
+}
+
+static int arcmsr_hbaB_handle_isr(struct AdapterControlBlock *acb)
+{
+ uint32_t outbound_doorbell;
+ struct MessageUnit_B *reg = acb->pmuB;
+ outbound_doorbell = readl(reg->iop2drv_doorbell) &
+ acb->outbound_int_enable;
+ if (!outbound_doorbell)
+ return IRQ_NONE;
+ do {
+ writel(~outbound_doorbell, reg->iop2drv_doorbell);
+ writel(ARCMSR_DRV2IOP_END_OF_INTERRUPT, reg->drv2iop_doorbell);
+ if (outbound_doorbell & ARCMSR_IOP2DRV_DATA_WRITE_OK)
+ arcmsr_iop2drv_data_wrote_handle(acb);
+ if (outbound_doorbell & ARCMSR_IOP2DRV_DATA_READ_OK)
+ arcmsr_iop2drv_data_read_handle(acb);
+ if (outbound_doorbell & ARCMSR_IOP2DRV_CDB_DONE)
+ arcmsr_hbaB_postqueue_isr(acb);
+ if (outbound_doorbell & ARCMSR_IOP2DRV_MESSAGE_CMD_DONE)
+ arcmsr_hbaB_message_isr(acb);
+ outbound_doorbell = readl(reg->iop2drv_doorbell) &
+ acb->outbound_int_enable;
+ } while (outbound_doorbell & (ARCMSR_IOP2DRV_DATA_WRITE_OK
+ | ARCMSR_IOP2DRV_DATA_READ_OK
+ | ARCMSR_IOP2DRV_CDB_DONE
+ | ARCMSR_IOP2DRV_MESSAGE_CMD_DONE));
+ return IRQ_HANDLED;
+}
+
+static int arcmsr_hbaC_handle_isr(struct AdapterControlBlock *pACB)
+{
+ uint32_t host_interrupt_status;
+ struct MessageUnit_C __iomem *phbcmu = pACB->pmuC;
+ /*
+ *********************************************
+ ** check outbound intstatus
+ *********************************************
+ */
+ host_interrupt_status = readl(&phbcmu->host_int_status) &
+ (ARCMSR_HBCMU_OUTBOUND_POSTQUEUE_ISR |
+ ARCMSR_HBCMU_OUTBOUND_DOORBELL_ISR);
+ if (!host_interrupt_status)
+ return IRQ_NONE;
+ do {
+ if (host_interrupt_status & ARCMSR_HBCMU_OUTBOUND_DOORBELL_ISR)
+ arcmsr_hbaC_doorbell_isr(pACB);
+ /* MU post queue interrupts*/
+ if (host_interrupt_status & ARCMSR_HBCMU_OUTBOUND_POSTQUEUE_ISR)
+ arcmsr_hbaC_postqueue_isr(pACB);
+ host_interrupt_status = readl(&phbcmu->host_int_status);
+ } while (host_interrupt_status & (ARCMSR_HBCMU_OUTBOUND_POSTQUEUE_ISR |
+ ARCMSR_HBCMU_OUTBOUND_DOORBELL_ISR));
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t arcmsr_hbaD_handle_isr(struct AdapterControlBlock *pACB)
+{
+ u32 host_interrupt_status;
+ struct MessageUnit_D *pmu = pACB->pmuD;
+
+ host_interrupt_status = readl(pmu->host_int_status) &
+ (ARCMSR_ARC1214_OUTBOUND_POSTQUEUE_ISR |
+ ARCMSR_ARC1214_OUTBOUND_DOORBELL_ISR);
+ if (!host_interrupt_status)
+ return IRQ_NONE;
+ do {
+ /* MU post queue interrupts*/
+ if (host_interrupt_status &
+ ARCMSR_ARC1214_OUTBOUND_POSTQUEUE_ISR)
+ arcmsr_hbaD_postqueue_isr(pACB);
+ if (host_interrupt_status &
+ ARCMSR_ARC1214_OUTBOUND_DOORBELL_ISR)
+ arcmsr_hbaD_doorbell_isr(pACB);
+ host_interrupt_status = readl(pmu->host_int_status);
+ } while (host_interrupt_status &
+ (ARCMSR_ARC1214_OUTBOUND_POSTQUEUE_ISR |
+ ARCMSR_ARC1214_OUTBOUND_DOORBELL_ISR));
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t arcmsr_interrupt(struct AdapterControlBlock *acb)
+{
+ switch (acb->adapter_type) {
+ case ACB_ADAPTER_TYPE_A:
+ return arcmsr_hbaA_handle_isr(acb);
+ break;
+ case ACB_ADAPTER_TYPE_B:
+ return arcmsr_hbaB_handle_isr(acb);
+ break;
+ case ACB_ADAPTER_TYPE_C:
+ return arcmsr_hbaC_handle_isr(acb);
+ case ACB_ADAPTER_TYPE_D:
+ return arcmsr_hbaD_handle_isr(acb);
+ default:
+ return IRQ_NONE;
+ }
+}
+
+static void arcmsr_iop_parking(struct AdapterControlBlock *acb)
+{
+ if (acb) {
+ /* stop adapter background rebuild */
+ if (acb->acb_flags & ACB_F_MSG_START_BGRB) {
+ uint32_t intmask_org;
+ acb->acb_flags &= ~ACB_F_MSG_START_BGRB;
+ intmask_org = arcmsr_disable_outbound_ints(acb);
+ arcmsr_stop_adapter_bgrb(acb);
+ arcmsr_flush_adapter_cache(acb);
+ arcmsr_enable_outbound_ints(acb, intmask_org);
+ }
+ }
+}
+
+
+void arcmsr_clear_iop2drv_rqueue_buffer(struct AdapterControlBlock *acb)
+{
+ uint32_t i;
+
+ if (acb->acb_flags & ACB_F_IOPDATA_OVERFLOW) {
+ for (i = 0; i < 15; i++) {
+ if (acb->acb_flags & ACB_F_IOPDATA_OVERFLOW) {
+ acb->acb_flags &= ~ACB_F_IOPDATA_OVERFLOW;
+ acb->rqbuf_getIndex = 0;
+ acb->rqbuf_putIndex = 0;
+ arcmsr_iop_message_read(acb);
+ mdelay(30);
+ } else if (acb->rqbuf_getIndex !=
+ acb->rqbuf_putIndex) {
+ acb->rqbuf_getIndex = 0;
+ acb->rqbuf_putIndex = 0;
+ mdelay(30);
+ } else
+ break;
+ }
+ }
+}
+
+static int arcmsr_iop_message_xfer(struct AdapterControlBlock *acb,
+ struct scsi_cmnd *cmd)
+{
+ char *buffer;
+ unsigned short use_sg;
+ int retvalue = 0, transfer_len = 0;
+ unsigned long flags;
+ struct CMD_MESSAGE_FIELD *pcmdmessagefld;
+ uint32_t controlcode = (uint32_t)cmd->cmnd[5] << 24 |
+ (uint32_t)cmd->cmnd[6] << 16 |
+ (uint32_t)cmd->cmnd[7] << 8 |
+ (uint32_t)cmd->cmnd[8];
+ struct scatterlist *sg;
+
+ use_sg = scsi_sg_count(cmd);
+ sg = scsi_sglist(cmd);
+ buffer = kmap_atomic(sg_page(sg)) + sg->offset;
+ if (use_sg > 1) {
+ retvalue = ARCMSR_MESSAGE_FAIL;
+ goto message_out;
+ }
+ transfer_len += sg->length;
+ if (transfer_len > sizeof(struct CMD_MESSAGE_FIELD)) {
+ retvalue = ARCMSR_MESSAGE_FAIL;
+ pr_info("%s: ARCMSR_MESSAGE_FAIL!\n", __func__);
+ goto message_out;
+ }
+ pcmdmessagefld = (struct CMD_MESSAGE_FIELD *)buffer;
+ switch (controlcode) {
+ case ARCMSR_MESSAGE_READ_RQBUFFER: {
+ unsigned char *ver_addr;
+ uint8_t *ptmpQbuffer;
+ uint32_t allxfer_len = 0;
+ ver_addr = kmalloc(ARCMSR_API_DATA_BUFLEN, GFP_ATOMIC);
+ if (!ver_addr) {
+ retvalue = ARCMSR_MESSAGE_FAIL;
+ pr_info("%s: memory not enough!\n", __func__);
+ goto message_out;
+ }
+ ptmpQbuffer = ver_addr;
+ spin_lock_irqsave(&acb->rqbuffer_lock, flags);
+ if (acb->rqbuf_getIndex != acb->rqbuf_putIndex) {
+ unsigned int tail = acb->rqbuf_getIndex;
+ unsigned int head = acb->rqbuf_putIndex;
+ unsigned int cnt_to_end = CIRC_CNT_TO_END(head, tail, ARCMSR_MAX_QBUFFER);
+
+ allxfer_len = CIRC_CNT(head, tail, ARCMSR_MAX_QBUFFER);
+ if (allxfer_len > ARCMSR_API_DATA_BUFLEN)
+ allxfer_len = ARCMSR_API_DATA_BUFLEN;
+
+ if (allxfer_len <= cnt_to_end)
+ memcpy(ptmpQbuffer, acb->rqbuffer + tail, allxfer_len);
+ else {
+ memcpy(ptmpQbuffer, acb->rqbuffer + tail, cnt_to_end);
+ memcpy(ptmpQbuffer + cnt_to_end, acb->rqbuffer, allxfer_len - cnt_to_end);
+ }
+ acb->rqbuf_getIndex = (acb->rqbuf_getIndex + allxfer_len) % ARCMSR_MAX_QBUFFER;
+ }
+ memcpy(pcmdmessagefld->messagedatabuffer, ver_addr,
+ allxfer_len);
+ if (acb->acb_flags & ACB_F_IOPDATA_OVERFLOW) {
+ struct QBUFFER __iomem *prbuffer;
+ acb->acb_flags &= ~ACB_F_IOPDATA_OVERFLOW;
+ prbuffer = arcmsr_get_iop_rqbuffer(acb);
+ if (arcmsr_Read_iop_rqbuffer_data(acb, prbuffer) == 0)
+ acb->acb_flags |= ACB_F_IOPDATA_OVERFLOW;
+ }
+ spin_unlock_irqrestore(&acb->rqbuffer_lock, flags);
+ kfree(ver_addr);
+ pcmdmessagefld->cmdmessage.Length = allxfer_len;
+ if (acb->fw_flag == FW_DEADLOCK)
+ pcmdmessagefld->cmdmessage.ReturnCode =
+ ARCMSR_MESSAGE_RETURNCODE_BUS_HANG_ON;
+ else
+ pcmdmessagefld->cmdmessage.ReturnCode =
+ ARCMSR_MESSAGE_RETURNCODE_OK;
+ break;
+ }
+ case ARCMSR_MESSAGE_WRITE_WQBUFFER: {
+ unsigned char *ver_addr;
+ int32_t user_len, cnt2end;
+ uint8_t *pQbuffer, *ptmpuserbuffer;
+ ver_addr = kmalloc(ARCMSR_API_DATA_BUFLEN, GFP_ATOMIC);
+ if (!ver_addr) {
+ retvalue = ARCMSR_MESSAGE_FAIL;
+ goto message_out;
+ }
+ ptmpuserbuffer = ver_addr;
+ user_len = pcmdmessagefld->cmdmessage.Length;
+ memcpy(ptmpuserbuffer,
+ pcmdmessagefld->messagedatabuffer, user_len);
+ spin_lock_irqsave(&acb->wqbuffer_lock, flags);
+ if (acb->wqbuf_putIndex != acb->wqbuf_getIndex) {
+ struct SENSE_DATA *sensebuffer =
+ (struct SENSE_DATA *)cmd->sense_buffer;
+ arcmsr_write_ioctldata2iop(acb);
+ /* has error report sensedata */
+ sensebuffer->ErrorCode = SCSI_SENSE_CURRENT_ERRORS;
+ sensebuffer->SenseKey = ILLEGAL_REQUEST;
+ sensebuffer->AdditionalSenseLength = 0x0A;
+ sensebuffer->AdditionalSenseCode = 0x20;
+ sensebuffer->Valid = 1;
+ retvalue = ARCMSR_MESSAGE_FAIL;
+ } else {
+ pQbuffer = &acb->wqbuffer[acb->wqbuf_putIndex];
+ cnt2end = ARCMSR_MAX_QBUFFER - acb->wqbuf_putIndex;
+ if (user_len > cnt2end) {
+ memcpy(pQbuffer, ptmpuserbuffer, cnt2end);
+ ptmpuserbuffer += cnt2end;
+ user_len -= cnt2end;
+ acb->wqbuf_putIndex = 0;
+ pQbuffer = acb->wqbuffer;
+ }
+ memcpy(pQbuffer, ptmpuserbuffer, user_len);
+ acb->wqbuf_putIndex += user_len;
+ acb->wqbuf_putIndex %= ARCMSR_MAX_QBUFFER;
+ if (acb->acb_flags & ACB_F_MESSAGE_WQBUFFER_CLEARED) {
+ acb->acb_flags &=
+ ~ACB_F_MESSAGE_WQBUFFER_CLEARED;
+ arcmsr_write_ioctldata2iop(acb);
+ }
+ }
+ spin_unlock_irqrestore(&acb->wqbuffer_lock, flags);
+ kfree(ver_addr);
+ if (acb->fw_flag == FW_DEADLOCK)
+ pcmdmessagefld->cmdmessage.ReturnCode =
+ ARCMSR_MESSAGE_RETURNCODE_BUS_HANG_ON;
+ else
+ pcmdmessagefld->cmdmessage.ReturnCode =
+ ARCMSR_MESSAGE_RETURNCODE_OK;
+ break;
+ }
+ case ARCMSR_MESSAGE_CLEAR_RQBUFFER: {
+ uint8_t *pQbuffer = acb->rqbuffer;
+
+ arcmsr_clear_iop2drv_rqueue_buffer(acb);
+ spin_lock_irqsave(&acb->rqbuffer_lock, flags);
+ acb->acb_flags |= ACB_F_MESSAGE_RQBUFFER_CLEARED;
+ acb->rqbuf_getIndex = 0;
+ acb->rqbuf_putIndex = 0;
+ memset(pQbuffer, 0, ARCMSR_MAX_QBUFFER);
+ spin_unlock_irqrestore(&acb->rqbuffer_lock, flags);
+ if (acb->fw_flag == FW_DEADLOCK)
+ pcmdmessagefld->cmdmessage.ReturnCode =
+ ARCMSR_MESSAGE_RETURNCODE_BUS_HANG_ON;
+ else
+ pcmdmessagefld->cmdmessage.ReturnCode =
+ ARCMSR_MESSAGE_RETURNCODE_OK;
+ break;
+ }
+ case ARCMSR_MESSAGE_CLEAR_WQBUFFER: {
+ uint8_t *pQbuffer = acb->wqbuffer;
+ spin_lock_irqsave(&acb->wqbuffer_lock, flags);
+ acb->acb_flags |= (ACB_F_MESSAGE_WQBUFFER_CLEARED |
+ ACB_F_MESSAGE_WQBUFFER_READED);
+ acb->wqbuf_getIndex = 0;
+ acb->wqbuf_putIndex = 0;
+ memset(pQbuffer, 0, ARCMSR_MAX_QBUFFER);
+ spin_unlock_irqrestore(&acb->wqbuffer_lock, flags);
+ if (acb->fw_flag == FW_DEADLOCK)
+ pcmdmessagefld->cmdmessage.ReturnCode =
+ ARCMSR_MESSAGE_RETURNCODE_BUS_HANG_ON;
+ else
+ pcmdmessagefld->cmdmessage.ReturnCode =
+ ARCMSR_MESSAGE_RETURNCODE_OK;
+ break;
+ }
+ case ARCMSR_MESSAGE_CLEAR_ALLQBUFFER: {
+ uint8_t *pQbuffer;
+ arcmsr_clear_iop2drv_rqueue_buffer(acb);
+ spin_lock_irqsave(&acb->rqbuffer_lock, flags);
+ acb->acb_flags |= ACB_F_MESSAGE_RQBUFFER_CLEARED;
+ acb->rqbuf_getIndex = 0;
+ acb->rqbuf_putIndex = 0;
+ pQbuffer = acb->rqbuffer;
+ memset(pQbuffer, 0, sizeof(struct QBUFFER));
+ spin_unlock_irqrestore(&acb->rqbuffer_lock, flags);
+ spin_lock_irqsave(&acb->wqbuffer_lock, flags);
+ acb->acb_flags |= (ACB_F_MESSAGE_WQBUFFER_CLEARED |
+ ACB_F_MESSAGE_WQBUFFER_READED);
+ acb->wqbuf_getIndex = 0;
+ acb->wqbuf_putIndex = 0;
+ pQbuffer = acb->wqbuffer;
+ memset(pQbuffer, 0, sizeof(struct QBUFFER));
+ spin_unlock_irqrestore(&acb->wqbuffer_lock, flags);
+ if (acb->fw_flag == FW_DEADLOCK)
+ pcmdmessagefld->cmdmessage.ReturnCode =
+ ARCMSR_MESSAGE_RETURNCODE_BUS_HANG_ON;
+ else
+ pcmdmessagefld->cmdmessage.ReturnCode =
+ ARCMSR_MESSAGE_RETURNCODE_OK;
+ break;
+ }
+ case ARCMSR_MESSAGE_RETURN_CODE_3F: {
+ if (acb->fw_flag == FW_DEADLOCK)
+ pcmdmessagefld->cmdmessage.ReturnCode =
+ ARCMSR_MESSAGE_RETURNCODE_BUS_HANG_ON;
+ else
+ pcmdmessagefld->cmdmessage.ReturnCode =
+ ARCMSR_MESSAGE_RETURNCODE_3F;
+ break;
+ }
+ case ARCMSR_MESSAGE_SAY_HELLO: {
+ int8_t *hello_string = "Hello! I am ARCMSR";
+ if (acb->fw_flag == FW_DEADLOCK)
+ pcmdmessagefld->cmdmessage.ReturnCode =
+ ARCMSR_MESSAGE_RETURNCODE_BUS_HANG_ON;
+ else
+ pcmdmessagefld->cmdmessage.ReturnCode =
+ ARCMSR_MESSAGE_RETURNCODE_OK;
+ memcpy(pcmdmessagefld->messagedatabuffer,
+ hello_string, (int16_t)strlen(hello_string));
+ break;
+ }
+ case ARCMSR_MESSAGE_SAY_GOODBYE: {
+ if (acb->fw_flag == FW_DEADLOCK)
+ pcmdmessagefld->cmdmessage.ReturnCode =
+ ARCMSR_MESSAGE_RETURNCODE_BUS_HANG_ON;
+ else
+ pcmdmessagefld->cmdmessage.ReturnCode =
+ ARCMSR_MESSAGE_RETURNCODE_OK;
+ arcmsr_iop_parking(acb);
+ break;
+ }
+ case ARCMSR_MESSAGE_FLUSH_ADAPTER_CACHE: {
+ if (acb->fw_flag == FW_DEADLOCK)
+ pcmdmessagefld->cmdmessage.ReturnCode =
+ ARCMSR_MESSAGE_RETURNCODE_BUS_HANG_ON;
+ else
+ pcmdmessagefld->cmdmessage.ReturnCode =
+ ARCMSR_MESSAGE_RETURNCODE_OK;
+ arcmsr_flush_adapter_cache(acb);
+ break;
+ }
+ default:
+ retvalue = ARCMSR_MESSAGE_FAIL;
+ pr_info("%s: unknown controlcode!\n", __func__);
+ }
+message_out:
+ if (use_sg) {
+ struct scatterlist *sg = scsi_sglist(cmd);
+ kunmap_atomic(buffer - sg->offset);
+ }
+ return retvalue;
+}
+
+static struct CommandControlBlock *arcmsr_get_freeccb(struct AdapterControlBlock *acb)
+{
+ struct list_head *head = &acb->ccb_free_list;
+ struct CommandControlBlock *ccb = NULL;
+ unsigned long flags;
+ spin_lock_irqsave(&acb->ccblist_lock, flags);
+ if (!list_empty(head)) {
+ ccb = list_entry(head->next, struct CommandControlBlock, list);
+ list_del_init(&ccb->list);
+ }else{
+ spin_unlock_irqrestore(&acb->ccblist_lock, flags);
+ return NULL;
+ }
+ spin_unlock_irqrestore(&acb->ccblist_lock, flags);
+ return ccb;
+}
+
+static void arcmsr_handle_virtual_command(struct AdapterControlBlock *acb,
+ struct scsi_cmnd *cmd)
+{
+ switch (cmd->cmnd[0]) {
+ case INQUIRY: {
+ unsigned char inqdata[36];
+ char *buffer;
+ struct scatterlist *sg;
+
+ if (cmd->device->lun) {
+ cmd->result = (DID_TIME_OUT << 16);
+ cmd->scsi_done(cmd);
+ return;
+ }
+ inqdata[0] = TYPE_PROCESSOR;
+ /* Periph Qualifier & Periph Dev Type */
+ inqdata[1] = 0;
+ /* rem media bit & Dev Type Modifier */
+ inqdata[2] = 0;
+ /* ISO, ECMA, & ANSI versions */
+ inqdata[4] = 31;
+ /* length of additional data */
+ strncpy(&inqdata[8], "Areca ", 8);
+ /* Vendor Identification */
+ strncpy(&inqdata[16], "RAID controller ", 16);
+ /* Product Identification */
+ strncpy(&inqdata[32], "R001", 4); /* Product Revision */
+
+ sg = scsi_sglist(cmd);
+ buffer = kmap_atomic(sg_page(sg)) + sg->offset;
+
+ memcpy(buffer, inqdata, sizeof(inqdata));
+ sg = scsi_sglist(cmd);
+ kunmap_atomic(buffer - sg->offset);
+
+ cmd->scsi_done(cmd);
+ }
+ break;
+ case WRITE_BUFFER:
+ case READ_BUFFER: {
+ if (arcmsr_iop_message_xfer(acb, cmd))
+ cmd->result = (DID_ERROR << 16);
+ cmd->scsi_done(cmd);
+ }
+ break;
+ default:
+ cmd->scsi_done(cmd);
+ }
+}
+
+static int arcmsr_queue_command_lck(struct scsi_cmnd *cmd,
+ void (* done)(struct scsi_cmnd *))
+{
+ struct Scsi_Host *host = cmd->device->host;
+ struct AdapterControlBlock *acb = (struct AdapterControlBlock *) host->hostdata;
+ struct CommandControlBlock *ccb;
+ int target = cmd->device->id;
+ int lun = cmd->device->lun;
+ uint8_t scsicmd = cmd->cmnd[0];
+ cmd->scsi_done = done;
+ cmd->host_scribble = NULL;
+ cmd->result = 0;
+ if ((scsicmd == SYNCHRONIZE_CACHE) ||(scsicmd == SEND_DIAGNOSTIC)){
+ if(acb->devstate[target][lun] == ARECA_RAID_GONE) {
+ cmd->result = (DID_NO_CONNECT << 16);
+ }
+ cmd->scsi_done(cmd);
+ return 0;
+ }
+ if (target == 16) {
+ /* virtual device for iop message transfer */
+ arcmsr_handle_virtual_command(acb, cmd);
+ return 0;
+ }
+ ccb = arcmsr_get_freeccb(acb);
+ if (!ccb)
+ return SCSI_MLQUEUE_HOST_BUSY;
+ if (arcmsr_build_ccb( acb, ccb, cmd ) == FAILED) {
+ cmd->result = (DID_ERROR << 16) | (RESERVATION_CONFLICT << 1);
+ cmd->scsi_done(cmd);
+ return 0;
+ }
+ arcmsr_post_ccb(acb, ccb);
+ return 0;
+}
+
+static DEF_SCSI_QCMD(arcmsr_queue_command)
+
+static bool arcmsr_hbaA_get_config(struct AdapterControlBlock *acb)
+{
+ struct MessageUnit_A __iomem *reg = acb->pmuA;
+ char *acb_firm_model = acb->firm_model;
+ char *acb_firm_version = acb->firm_version;
+ char *acb_device_map = acb->device_map;
+ char __iomem *iop_firm_model = (char __iomem *)(&reg->message_rwbuffer[15]);
+ char __iomem *iop_firm_version = (char __iomem *)(&reg->message_rwbuffer[17]);
+ char __iomem *iop_device_map = (char __iomem *)(&reg->message_rwbuffer[21]);
+ int count;
+ writel(ARCMSR_INBOUND_MESG0_GET_CONFIG, &reg->inbound_msgaddr0);
+ if (!arcmsr_hbaA_wait_msgint_ready(acb)) {
+ printk(KERN_NOTICE "arcmsr%d: wait 'get adapter firmware \
+ miscellaneous data' timeout \n", acb->host->host_no);
+ return false;
+ }
+ count = 8;
+ while (count){
+ *acb_firm_model = readb(iop_firm_model);
+ acb_firm_model++;
+ iop_firm_model++;
+ count--;
+ }
+
+ count = 16;
+ while (count){
+ *acb_firm_version = readb(iop_firm_version);
+ acb_firm_version++;
+ iop_firm_version++;
+ count--;
+ }
+
+ count=16;
+ while(count){
+ *acb_device_map = readb(iop_device_map);
+ acb_device_map++;
+ iop_device_map++;
+ count--;
+ }
+ pr_notice("Areca RAID Controller%d: Model %s, F/W %s\n",
+ acb->host->host_no,
+ acb->firm_model,
+ acb->firm_version);
+ acb->signature = readl(&reg->message_rwbuffer[0]);
+ acb->firm_request_len = readl(&reg->message_rwbuffer[1]);
+ acb->firm_numbers_queue = readl(&reg->message_rwbuffer[2]);
+ acb->firm_sdram_size = readl(&reg->message_rwbuffer[3]);
+ acb->firm_hd_channels = readl(&reg->message_rwbuffer[4]);
+ acb->firm_cfg_version = readl(&reg->message_rwbuffer[25]); /*firm_cfg_version,25,100-103*/
+ return true;
+}
+static bool arcmsr_hbaB_get_config(struct AdapterControlBlock *acb)
+{
+ struct MessageUnit_B *reg = acb->pmuB;
+ struct pci_dev *pdev = acb->pdev;
+ void *dma_coherent;
+ dma_addr_t dma_coherent_handle;
+ char *acb_firm_model = acb->firm_model;
+ char *acb_firm_version = acb->firm_version;
+ char *acb_device_map = acb->device_map;
+ char __iomem *iop_firm_model;
+ /*firm_model,15,60-67*/
+ char __iomem *iop_firm_version;
+ /*firm_version,17,68-83*/
+ char __iomem *iop_device_map;
+ /*firm_version,21,84-99*/
+ int count;
+
+ acb->roundup_ccbsize = roundup(sizeof(struct MessageUnit_B), 32);
+ dma_coherent = dma_alloc_coherent(&pdev->dev, acb->roundup_ccbsize,
+ &dma_coherent_handle, GFP_KERNEL);
+ if (!dma_coherent){
+ printk(KERN_NOTICE
+ "arcmsr%d: dma_alloc_coherent got error for hbb mu\n",
+ acb->host->host_no);
+ return false;
+ }
+ acb->dma_coherent_handle2 = dma_coherent_handle;
+ acb->dma_coherent2 = dma_coherent;
+ reg = (struct MessageUnit_B *)dma_coherent;
+ acb->pmuB = reg;
+ reg->drv2iop_doorbell= (uint32_t __iomem *)((unsigned long)acb->mem_base0 + ARCMSR_DRV2IOP_DOORBELL);
+ reg->drv2iop_doorbell_mask = (uint32_t __iomem *)((unsigned long)acb->mem_base0 + ARCMSR_DRV2IOP_DOORBELL_MASK);
+ reg->iop2drv_doorbell = (uint32_t __iomem *)((unsigned long)acb->mem_base0 + ARCMSR_IOP2DRV_DOORBELL);
+ reg->iop2drv_doorbell_mask = (uint32_t __iomem *)((unsigned long)acb->mem_base0 + ARCMSR_IOP2DRV_DOORBELL_MASK);
+ reg->message_wbuffer = (uint32_t __iomem *)((unsigned long)acb->mem_base1 + ARCMSR_MESSAGE_WBUFFER);
+ reg->message_rbuffer = (uint32_t __iomem *)((unsigned long)acb->mem_base1 + ARCMSR_MESSAGE_RBUFFER);
+ reg->message_rwbuffer = (uint32_t __iomem *)((unsigned long)acb->mem_base1 + ARCMSR_MESSAGE_RWBUFFER);
+ iop_firm_model = (char __iomem *)(&reg->message_rwbuffer[15]); /*firm_model,15,60-67*/
+ iop_firm_version = (char __iomem *)(&reg->message_rwbuffer[17]); /*firm_version,17,68-83*/
+ iop_device_map = (char __iomem *)(&reg->message_rwbuffer[21]); /*firm_version,21,84-99*/
+
+ writel(ARCMSR_MESSAGE_GET_CONFIG, reg->drv2iop_doorbell);
+ if (!arcmsr_hbaB_wait_msgint_ready(acb)) {
+ printk(KERN_NOTICE "arcmsr%d: wait 'get adapter firmware \
+ miscellaneous data' timeout \n", acb->host->host_no);
+ return false;
+ }
+ count = 8;
+ while (count){
+ *acb_firm_model = readb(iop_firm_model);
+ acb_firm_model++;
+ iop_firm_model++;
+ count--;
+ }
+ count = 16;
+ while (count){
+ *acb_firm_version = readb(iop_firm_version);
+ acb_firm_version++;
+ iop_firm_version++;
+ count--;
+ }
+
+ count = 16;
+ while(count){
+ *acb_device_map = readb(iop_device_map);
+ acb_device_map++;
+ iop_device_map++;
+ count--;
+ }
+
+ pr_notice("Areca RAID Controller%d: Model %s, F/W %s\n",
+ acb->host->host_no,
+ acb->firm_model,
+ acb->firm_version);
+
+ acb->signature = readl(&reg->message_rwbuffer[1]);
+ /*firm_signature,1,00-03*/
+ acb->firm_request_len = readl(&reg->message_rwbuffer[2]);
+ /*firm_request_len,1,04-07*/
+ acb->firm_numbers_queue = readl(&reg->message_rwbuffer[3]);
+ /*firm_numbers_queue,2,08-11*/
+ acb->firm_sdram_size = readl(&reg->message_rwbuffer[4]);
+ /*firm_sdram_size,3,12-15*/
+ acb->firm_hd_channels = readl(&reg->message_rwbuffer[5]);
+ /*firm_ide_channels,4,16-19*/
+ acb->firm_cfg_version = readl(&reg->message_rwbuffer[25]); /*firm_cfg_version,25,100-103*/
+ /*firm_ide_channels,4,16-19*/
+ return true;
+}
+
+static bool arcmsr_hbaC_get_config(struct AdapterControlBlock *pACB)
+{
+ uint32_t intmask_org, Index, firmware_state = 0;
+ struct MessageUnit_C __iomem *reg = pACB->pmuC;
+ char *acb_firm_model = pACB->firm_model;
+ char *acb_firm_version = pACB->firm_version;
+ char __iomem *iop_firm_model = (char __iomem *)(&reg->msgcode_rwbuffer[15]); /*firm_model,15,60-67*/
+ char __iomem *iop_firm_version = (char __iomem *)(&reg->msgcode_rwbuffer[17]); /*firm_version,17,68-83*/
+ int count;
+ /* disable all outbound interrupt */
+ intmask_org = readl(&reg->host_int_mask); /* disable outbound message0 int */
+ writel(intmask_org|ARCMSR_HBCMU_ALL_INTMASKENABLE, &reg->host_int_mask);
+ /* wait firmware ready */
+ do {
+ firmware_state = readl(&reg->outbound_msgaddr1);
+ } while ((firmware_state & ARCMSR_HBCMU_MESSAGE_FIRMWARE_OK) == 0);
+ /* post "get config" instruction */
+ writel(ARCMSR_INBOUND_MESG0_GET_CONFIG, &reg->inbound_msgaddr0);
+ writel(ARCMSR_HBCMU_DRV2IOP_MESSAGE_CMD_DONE, &reg->inbound_doorbell);
+ /* wait message ready */
+ for (Index = 0; Index < 2000; Index++) {
+ if (readl(&reg->outbound_doorbell) & ARCMSR_HBCMU_IOP2DRV_MESSAGE_CMD_DONE) {
+ writel(ARCMSR_HBCMU_IOP2DRV_MESSAGE_CMD_DONE_DOORBELL_CLEAR, &reg->outbound_doorbell_clear);/*clear interrupt*/
+ break;
+ }
+ udelay(10);
+ } /*max 1 seconds*/
+ if (Index >= 2000) {
+ printk(KERN_NOTICE "arcmsr%d: wait 'get adapter firmware \
+ miscellaneous data' timeout \n", pACB->host->host_no);
+ return false;
+ }
+ count = 8;
+ while (count) {
+ *acb_firm_model = readb(iop_firm_model);
+ acb_firm_model++;
+ iop_firm_model++;
+ count--;
+ }
+ count = 16;
+ while (count) {
+ *acb_firm_version = readb(iop_firm_version);
+ acb_firm_version++;
+ iop_firm_version++;
+ count--;
+ }
+ pr_notice("Areca RAID Controller%d: Model %s, F/W %s\n",
+ pACB->host->host_no,
+ pACB->firm_model,
+ pACB->firm_version);
+ pACB->firm_request_len = readl(&reg->msgcode_rwbuffer[1]); /*firm_request_len,1,04-07*/
+ pACB->firm_numbers_queue = readl(&reg->msgcode_rwbuffer[2]); /*firm_numbers_queue,2,08-11*/
+ pACB->firm_sdram_size = readl(&reg->msgcode_rwbuffer[3]); /*firm_sdram_size,3,12-15*/
+ pACB->firm_hd_channels = readl(&reg->msgcode_rwbuffer[4]); /*firm_ide_channels,4,16-19*/
+ pACB->firm_cfg_version = readl(&reg->msgcode_rwbuffer[25]); /*firm_cfg_version,25,100-103*/
+ /*all interrupt service will be enable at arcmsr_iop_init*/
+ return true;
+}
+
+static bool arcmsr_hbaD_get_config(struct AdapterControlBlock *acb)
+{
+ char *acb_firm_model = acb->firm_model;
+ char *acb_firm_version = acb->firm_version;
+ char *acb_device_map = acb->device_map;
+ char __iomem *iop_firm_model;
+ char __iomem *iop_firm_version;
+ char __iomem *iop_device_map;
+ u32 count;
+ struct MessageUnit_D *reg;
+ void *dma_coherent2;
+ dma_addr_t dma_coherent_handle2;
+ struct pci_dev *pdev = acb->pdev;
+
+ acb->roundup_ccbsize = roundup(sizeof(struct MessageUnit_D), 32);
+ dma_coherent2 = dma_alloc_coherent(&pdev->dev, acb->roundup_ccbsize,
+ &dma_coherent_handle2, GFP_KERNEL);
+ if (!dma_coherent2) {
+ pr_notice("DMA allocation failed...\n");
+ return false;
+ }
+ memset(dma_coherent2, 0, acb->roundup_ccbsize);
+ acb->dma_coherent_handle2 = dma_coherent_handle2;
+ acb->dma_coherent2 = dma_coherent2;
+ reg = (struct MessageUnit_D *)dma_coherent2;
+ acb->pmuD = reg;
+ reg->chip_id = acb->mem_base0 + ARCMSR_ARC1214_CHIP_ID;
+ reg->cpu_mem_config = acb->mem_base0 +
+ ARCMSR_ARC1214_CPU_MEMORY_CONFIGURATION;
+ reg->i2o_host_interrupt_mask = acb->mem_base0 +
+ ARCMSR_ARC1214_I2_HOST_INTERRUPT_MASK;
+ reg->sample_at_reset = acb->mem_base0 + ARCMSR_ARC1214_SAMPLE_RESET;
+ reg->reset_request = acb->mem_base0 + ARCMSR_ARC1214_RESET_REQUEST;
+ reg->host_int_status = acb->mem_base0 +
+ ARCMSR_ARC1214_MAIN_INTERRUPT_STATUS;
+ reg->pcief0_int_enable = acb->mem_base0 +
+ ARCMSR_ARC1214_PCIE_F0_INTERRUPT_ENABLE;
+ reg->inbound_msgaddr0 = acb->mem_base0 +
+ ARCMSR_ARC1214_INBOUND_MESSAGE0;
+ reg->inbound_msgaddr1 = acb->mem_base0 +
+ ARCMSR_ARC1214_INBOUND_MESSAGE1;
+ reg->outbound_msgaddr0 = acb->mem_base0 +
+ ARCMSR_ARC1214_OUTBOUND_MESSAGE0;
+ reg->outbound_msgaddr1 = acb->mem_base0 +
+ ARCMSR_ARC1214_OUTBOUND_MESSAGE1;
+ reg->inbound_doorbell = acb->mem_base0 +
+ ARCMSR_ARC1214_INBOUND_DOORBELL;
+ reg->outbound_doorbell = acb->mem_base0 +
+ ARCMSR_ARC1214_OUTBOUND_DOORBELL;
+ reg->outbound_doorbell_enable = acb->mem_base0 +
+ ARCMSR_ARC1214_OUTBOUND_DOORBELL_ENABLE;
+ reg->inboundlist_base_low = acb->mem_base0 +
+ ARCMSR_ARC1214_INBOUND_LIST_BASE_LOW;
+ reg->inboundlist_base_high = acb->mem_base0 +
+ ARCMSR_ARC1214_INBOUND_LIST_BASE_HIGH;
+ reg->inboundlist_write_pointer = acb->mem_base0 +
+ ARCMSR_ARC1214_INBOUND_LIST_WRITE_POINTER;
+ reg->outboundlist_base_low = acb->mem_base0 +
+ ARCMSR_ARC1214_OUTBOUND_LIST_BASE_LOW;
+ reg->outboundlist_base_high = acb->mem_base0 +
+ ARCMSR_ARC1214_OUTBOUND_LIST_BASE_HIGH;
+ reg->outboundlist_copy_pointer = acb->mem_base0 +
+ ARCMSR_ARC1214_OUTBOUND_LIST_COPY_POINTER;
+ reg->outboundlist_read_pointer = acb->mem_base0 +
+ ARCMSR_ARC1214_OUTBOUND_LIST_READ_POINTER;
+ reg->outboundlist_interrupt_cause = acb->mem_base0 +
+ ARCMSR_ARC1214_OUTBOUND_INTERRUPT_CAUSE;
+ reg->outboundlist_interrupt_enable = acb->mem_base0 +
+ ARCMSR_ARC1214_OUTBOUND_INTERRUPT_ENABLE;
+ reg->message_wbuffer = acb->mem_base0 + ARCMSR_ARC1214_MESSAGE_WBUFFER;
+ reg->message_rbuffer = acb->mem_base0 + ARCMSR_ARC1214_MESSAGE_RBUFFER;
+ reg->msgcode_rwbuffer = acb->mem_base0 +
+ ARCMSR_ARC1214_MESSAGE_RWBUFFER;
+ iop_firm_model = (char __iomem *)(&reg->msgcode_rwbuffer[15]);
+ iop_firm_version = (char __iomem *)(&reg->msgcode_rwbuffer[17]);
+ iop_device_map = (char __iomem *)(&reg->msgcode_rwbuffer[21]);
+ if (readl(acb->pmuD->outbound_doorbell) &
+ ARCMSR_ARC1214_IOP2DRV_MESSAGE_CMD_DONE) {
+ writel(ARCMSR_ARC1214_IOP2DRV_MESSAGE_CMD_DONE,
+ acb->pmuD->outbound_doorbell);/*clear interrupt*/
+ }
+ /* post "get config" instruction */
+ writel(ARCMSR_INBOUND_MESG0_GET_CONFIG, reg->inbound_msgaddr0);
+ /* wait message ready */
+ if (!arcmsr_hbaD_wait_msgint_ready(acb)) {
+ pr_notice("arcmsr%d: wait get adapter firmware "
+ "miscellaneous data timeout\n", acb->host->host_no);
+ dma_free_coherent(&acb->pdev->dev, acb->roundup_ccbsize,
+ acb->dma_coherent2, acb->dma_coherent_handle2);
+ return false;
+ }
+ count = 8;
+ while (count) {
+ *acb_firm_model = readb(iop_firm_model);
+ acb_firm_model++;
+ iop_firm_model++;
+ count--;
+ }
+ count = 16;
+ while (count) {
+ *acb_firm_version = readb(iop_firm_version);
+ acb_firm_version++;
+ iop_firm_version++;
+ count--;
+ }
+ count = 16;
+ while (count) {
+ *acb_device_map = readb(iop_device_map);
+ acb_device_map++;
+ iop_device_map++;
+ count--;
+ }
+ acb->signature = readl(&reg->msgcode_rwbuffer[1]);
+ /*firm_signature,1,00-03*/
+ acb->firm_request_len = readl(&reg->msgcode_rwbuffer[2]);
+ /*firm_request_len,1,04-07*/
+ acb->firm_numbers_queue = readl(&reg->msgcode_rwbuffer[3]);
+ /*firm_numbers_queue,2,08-11*/
+ acb->firm_sdram_size = readl(&reg->msgcode_rwbuffer[4]);
+ /*firm_sdram_size,3,12-15*/
+ acb->firm_hd_channels = readl(&reg->msgcode_rwbuffer[5]);
+ /*firm_hd_channels,4,16-19*/
+ acb->firm_cfg_version = readl(&reg->msgcode_rwbuffer[25]);
+ pr_notice("Areca RAID Controller%d: Model %s, F/W %s\n",
+ acb->host->host_no,
+ acb->firm_model,
+ acb->firm_version);
+ return true;
+}
+
+static bool arcmsr_get_firmware_spec(struct AdapterControlBlock *acb)
+{
+ bool rtn = false;
+
+ switch (acb->adapter_type) {
+ case ACB_ADAPTER_TYPE_A:
+ rtn = arcmsr_hbaA_get_config(acb);
+ break;
+ case ACB_ADAPTER_TYPE_B:
+ rtn = arcmsr_hbaB_get_config(acb);
+ break;
+ case ACB_ADAPTER_TYPE_C:
+ rtn = arcmsr_hbaC_get_config(acb);
+ break;
+ case ACB_ADAPTER_TYPE_D:
+ rtn = arcmsr_hbaD_get_config(acb);
+ break;
+ default:
+ break;
+ }
+ if (acb->firm_numbers_queue > ARCMSR_MAX_OUTSTANDING_CMD)
+ acb->maxOutstanding = ARCMSR_MAX_OUTSTANDING_CMD;
+ else
+ acb->maxOutstanding = acb->firm_numbers_queue - 1;
+ acb->host->can_queue = acb->maxOutstanding;
+ return rtn;
+}
+
+static int arcmsr_hbaA_polling_ccbdone(struct AdapterControlBlock *acb,
+ struct CommandControlBlock *poll_ccb)
+{
+ struct MessageUnit_A __iomem *reg = acb->pmuA;
+ struct CommandControlBlock *ccb;
+ struct ARCMSR_CDB *arcmsr_cdb;
+ uint32_t flag_ccb, outbound_intstatus, poll_ccb_done = 0, poll_count = 0;
+ int rtn;
+ bool error;
+ polling_hba_ccb_retry:
+ poll_count++;
+ outbound_intstatus = readl(&reg->outbound_intstatus) & acb->outbound_int_enable;
+ writel(outbound_intstatus, &reg->outbound_intstatus);/*clear interrupt*/
+ while (1) {
+ if ((flag_ccb = readl(&reg->outbound_queueport)) == 0xFFFFFFFF) {
+ if (poll_ccb_done){
+ rtn = SUCCESS;
+ break;
+ }else {
+ msleep(25);
+ if (poll_count > 100){
+ rtn = FAILED;
+ break;
+ }
+ goto polling_hba_ccb_retry;
+ }
+ }
+ arcmsr_cdb = (struct ARCMSR_CDB *)(acb->vir2phy_offset + (flag_ccb << 5));
+ ccb = container_of(arcmsr_cdb, struct CommandControlBlock, arcmsr_cdb);
+ poll_ccb_done |= (ccb == poll_ccb) ? 1 : 0;
+ if ((ccb->acb != acb) || (ccb->startdone != ARCMSR_CCB_START)) {
+ if ((ccb->startdone == ARCMSR_CCB_ABORTED) || (ccb == poll_ccb)) {
+ printk(KERN_NOTICE "arcmsr%d: scsi id = %d lun = %d ccb = '0x%p'"
+ " poll command abort successfully \n"
+ , acb->host->host_no
+ , ccb->pcmd->device->id
+ , (u32)ccb->pcmd->device->lun
+ , ccb);
+ ccb->pcmd->result = DID_ABORT << 16;
+ arcmsr_ccb_complete(ccb);
+ continue;
+ }
+ printk(KERN_NOTICE "arcmsr%d: polling get an illegal ccb"
+ " command done ccb = '0x%p'"
+ "ccboutstandingcount = %d \n"
+ , acb->host->host_no
+ , ccb
+ , atomic_read(&acb->ccboutstandingcount));
+ continue;
+ }
+ error = (flag_ccb & ARCMSR_CCBREPLY_FLAG_ERROR_MODE0) ? true : false;
+ arcmsr_report_ccb_state(acb, ccb, error);
+ }
+ return rtn;
+}
+
+static int arcmsr_hbaB_polling_ccbdone(struct AdapterControlBlock *acb,
+ struct CommandControlBlock *poll_ccb)
+{
+ struct MessageUnit_B *reg = acb->pmuB;
+ struct ARCMSR_CDB *arcmsr_cdb;
+ struct CommandControlBlock *ccb;
+ uint32_t flag_ccb, poll_ccb_done = 0, poll_count = 0;
+ int index, rtn;
+ bool error;
+ polling_hbb_ccb_retry:
+
+ poll_count++;
+ /* clear doorbell interrupt */
+ writel(ARCMSR_DOORBELL_INT_CLEAR_PATTERN, reg->iop2drv_doorbell);
+ while(1){
+ index = reg->doneq_index;
+ flag_ccb = reg->done_qbuffer[index];
+ if (flag_ccb == 0) {
+ if (poll_ccb_done){
+ rtn = SUCCESS;
+ break;
+ }else {
+ msleep(25);
+ if (poll_count > 100){
+ rtn = FAILED;
+ break;
+ }
+ goto polling_hbb_ccb_retry;
+ }
+ }
+ reg->done_qbuffer[index] = 0;
+ index++;
+ /*if last index number set it to 0 */
+ index %= ARCMSR_MAX_HBB_POSTQUEUE;
+ reg->doneq_index = index;
+ /* check if command done with no error*/
+ arcmsr_cdb = (struct ARCMSR_CDB *)(acb->vir2phy_offset + (flag_ccb << 5));
+ ccb = container_of(arcmsr_cdb, struct CommandControlBlock, arcmsr_cdb);
+ poll_ccb_done |= (ccb == poll_ccb) ? 1 : 0;
+ if ((ccb->acb != acb) || (ccb->startdone != ARCMSR_CCB_START)) {
+ if ((ccb->startdone == ARCMSR_CCB_ABORTED) || (ccb == poll_ccb)) {
+ printk(KERN_NOTICE "arcmsr%d: scsi id = %d lun = %d ccb = '0x%p'"
+ " poll command abort successfully \n"
+ ,acb->host->host_no
+ ,ccb->pcmd->device->id
+ ,(u32)ccb->pcmd->device->lun
+ ,ccb);
+ ccb->pcmd->result = DID_ABORT << 16;
+ arcmsr_ccb_complete(ccb);
+ continue;
+ }
+ printk(KERN_NOTICE "arcmsr%d: polling get an illegal ccb"
+ " command done ccb = '0x%p'"
+ "ccboutstandingcount = %d \n"
+ , acb->host->host_no
+ , ccb
+ , atomic_read(&acb->ccboutstandingcount));
+ continue;
+ }
+ error = (flag_ccb & ARCMSR_CCBREPLY_FLAG_ERROR_MODE0) ? true : false;
+ arcmsr_report_ccb_state(acb, ccb, error);
+ }
+ return rtn;
+}
+
+static int arcmsr_hbaC_polling_ccbdone(struct AdapterControlBlock *acb,
+ struct CommandControlBlock *poll_ccb)
+{
+ struct MessageUnit_C __iomem *reg = acb->pmuC;
+ uint32_t flag_ccb, ccb_cdb_phy;
+ struct ARCMSR_CDB *arcmsr_cdb;
+ bool error;
+ struct CommandControlBlock *pCCB;
+ uint32_t poll_ccb_done = 0, poll_count = 0;
+ int rtn;
+polling_hbc_ccb_retry:
+ poll_count++;
+ while (1) {
+ if ((readl(&reg->host_int_status) & ARCMSR_HBCMU_OUTBOUND_POSTQUEUE_ISR) == 0) {
+ if (poll_ccb_done) {
+ rtn = SUCCESS;
+ break;
+ } else {
+ msleep(25);
+ if (poll_count > 100) {
+ rtn = FAILED;
+ break;
+ }
+ goto polling_hbc_ccb_retry;
+ }
+ }
+ flag_ccb = readl(&reg->outbound_queueport_low);
+ ccb_cdb_phy = (flag_ccb & 0xFFFFFFF0);
+ arcmsr_cdb = (struct ARCMSR_CDB *)(acb->vir2phy_offset + ccb_cdb_phy);/*frame must be 32 bytes aligned*/
+ pCCB = container_of(arcmsr_cdb, struct CommandControlBlock, arcmsr_cdb);
+ poll_ccb_done |= (pCCB == poll_ccb) ? 1 : 0;
+ /* check ifcommand done with no error*/
+ if ((pCCB->acb != acb) || (pCCB->startdone != ARCMSR_CCB_START)) {
+ if (pCCB->startdone == ARCMSR_CCB_ABORTED) {
+ printk(KERN_NOTICE "arcmsr%d: scsi id = %d lun = %d ccb = '0x%p'"
+ " poll command abort successfully \n"
+ , acb->host->host_no
+ , pCCB->pcmd->device->id
+ , (u32)pCCB->pcmd->device->lun
+ , pCCB);
+ pCCB->pcmd->result = DID_ABORT << 16;
+ arcmsr_ccb_complete(pCCB);
+ continue;
+ }
+ printk(KERN_NOTICE "arcmsr%d: polling get an illegal ccb"
+ " command done ccb = '0x%p'"
+ "ccboutstandingcount = %d \n"
+ , acb->host->host_no
+ , pCCB
+ , atomic_read(&acb->ccboutstandingcount));
+ continue;
+ }
+ error = (flag_ccb & ARCMSR_CCBREPLY_FLAG_ERROR_MODE1) ? true : false;
+ arcmsr_report_ccb_state(acb, pCCB, error);
+ }
+ return rtn;
+}
+
+static int arcmsr_hbaD_polling_ccbdone(struct AdapterControlBlock *acb,
+ struct CommandControlBlock *poll_ccb)
+{
+ bool error;
+ uint32_t poll_ccb_done = 0, poll_count = 0, flag_ccb, ccb_cdb_phy;
+ int rtn, doneq_index, index_stripped, outbound_write_pointer, toggle;
+ unsigned long flags;
+ struct ARCMSR_CDB *arcmsr_cdb;
+ struct CommandControlBlock *pCCB;
+ struct MessageUnit_D *pmu = acb->pmuD;
+
+polling_hbaD_ccb_retry:
+ poll_count++;
+ while (1) {
+ spin_lock_irqsave(&acb->doneq_lock, flags);
+ outbound_write_pointer = pmu->done_qbuffer[0].addressLow + 1;
+ doneq_index = pmu->doneq_index;
+ if ((outbound_write_pointer & 0xFFF) == (doneq_index & 0xFFF)) {
+ spin_unlock_irqrestore(&acb->doneq_lock, flags);
+ if (poll_ccb_done) {
+ rtn = SUCCESS;
+ break;
+ } else {
+ msleep(25);
+ if (poll_count > 40) {
+ rtn = FAILED;
+ break;
+ }
+ goto polling_hbaD_ccb_retry;
+ }
+ }
+ toggle = doneq_index & 0x4000;
+ index_stripped = (doneq_index & 0xFFF) + 1;
+ index_stripped %= ARCMSR_MAX_ARC1214_DONEQUEUE;
+ pmu->doneq_index = index_stripped ? (index_stripped | toggle) :
+ ((toggle ^ 0x4000) + 1);
+ doneq_index = pmu->doneq_index;
+ spin_unlock_irqrestore(&acb->doneq_lock, flags);
+ flag_ccb = pmu->done_qbuffer[doneq_index & 0xFFF].addressLow;
+ ccb_cdb_phy = (flag_ccb & 0xFFFFFFF0);
+ arcmsr_cdb = (struct ARCMSR_CDB *)(acb->vir2phy_offset +
+ ccb_cdb_phy);
+ pCCB = container_of(arcmsr_cdb, struct CommandControlBlock,
+ arcmsr_cdb);
+ poll_ccb_done |= (pCCB == poll_ccb) ? 1 : 0;
+ if ((pCCB->acb != acb) ||
+ (pCCB->startdone != ARCMSR_CCB_START)) {
+ if (pCCB->startdone == ARCMSR_CCB_ABORTED) {
+ pr_notice("arcmsr%d: scsi id = %d "
+ "lun = %d ccb = '0x%p' poll command "
+ "abort successfully\n"
+ , acb->host->host_no
+ , pCCB->pcmd->device->id
+ , (u32)pCCB->pcmd->device->lun
+ , pCCB);
+ pCCB->pcmd->result = DID_ABORT << 16;
+ arcmsr_ccb_complete(pCCB);
+ continue;
+ }
+ pr_notice("arcmsr%d: polling an illegal "
+ "ccb command done ccb = '0x%p' "
+ "ccboutstandingcount = %d\n"
+ , acb->host->host_no
+ , pCCB
+ , atomic_read(&acb->ccboutstandingcount));
+ continue;
+ }
+ error = (flag_ccb & ARCMSR_CCBREPLY_FLAG_ERROR_MODE1)
+ ? true : false;
+ arcmsr_report_ccb_state(acb, pCCB, error);
+ }
+ return rtn;
+}
+
+static int arcmsr_polling_ccbdone(struct AdapterControlBlock *acb,
+ struct CommandControlBlock *poll_ccb)
+{
+ int rtn = 0;
+ switch (acb->adapter_type) {
+
+ case ACB_ADAPTER_TYPE_A: {
+ rtn = arcmsr_hbaA_polling_ccbdone(acb, poll_ccb);
+ }
+ break;
+
+ case ACB_ADAPTER_TYPE_B: {
+ rtn = arcmsr_hbaB_polling_ccbdone(acb, poll_ccb);
+ }
+ break;
+ case ACB_ADAPTER_TYPE_C: {
+ rtn = arcmsr_hbaC_polling_ccbdone(acb, poll_ccb);
+ }
+ break;
+ case ACB_ADAPTER_TYPE_D:
+ rtn = arcmsr_hbaD_polling_ccbdone(acb, poll_ccb);
+ break;
+ }
+ return rtn;
+}
+
+static int arcmsr_iop_confirm(struct AdapterControlBlock *acb)
+{
+ uint32_t cdb_phyaddr, cdb_phyaddr_hi32;
+ dma_addr_t dma_coherent_handle;
+
+ /*
+ ********************************************************************
+ ** here we need to tell iop 331 our freeccb.HighPart
+ ** if freeccb.HighPart is not zero
+ ********************************************************************
+ */
+ switch (acb->adapter_type) {
+ case ACB_ADAPTER_TYPE_B:
+ case ACB_ADAPTER_TYPE_D:
+ dma_coherent_handle = acb->dma_coherent_handle2;
+ break;
+ default:
+ dma_coherent_handle = acb->dma_coherent_handle;
+ break;
+ }
+ cdb_phyaddr = lower_32_bits(dma_coherent_handle);
+ cdb_phyaddr_hi32 = upper_32_bits(dma_coherent_handle);
+ acb->cdb_phyaddr_hi32 = cdb_phyaddr_hi32;
+ /*
+ ***********************************************************************
+ ** if adapter type B, set window of "post command Q"
+ ***********************************************************************
+ */
+ switch (acb->adapter_type) {
+
+ case ACB_ADAPTER_TYPE_A: {
+ if (cdb_phyaddr_hi32 != 0) {
+ struct MessageUnit_A __iomem *reg = acb->pmuA;
+ writel(ARCMSR_SIGNATURE_SET_CONFIG, \
+ &reg->message_rwbuffer[0]);
+ writel(cdb_phyaddr_hi32, &reg->message_rwbuffer[1]);
+ writel(ARCMSR_INBOUND_MESG0_SET_CONFIG, \
+ &reg->inbound_msgaddr0);
+ if (!arcmsr_hbaA_wait_msgint_ready(acb)) {
+ printk(KERN_NOTICE "arcmsr%d: ""set ccb high \
+ part physical address timeout\n",
+ acb->host->host_no);
+ return 1;
+ }
+ }
+ }
+ break;
+
+ case ACB_ADAPTER_TYPE_B: {
+ uint32_t __iomem *rwbuffer;
+
+ struct MessageUnit_B *reg = acb->pmuB;
+ reg->postq_index = 0;
+ reg->doneq_index = 0;
+ writel(ARCMSR_MESSAGE_SET_POST_WINDOW, reg->drv2iop_doorbell);
+ if (!arcmsr_hbaB_wait_msgint_ready(acb)) {
+ printk(KERN_NOTICE "arcmsr%d:can not set diver mode\n", \
+ acb->host->host_no);
+ return 1;
+ }
+ rwbuffer = reg->message_rwbuffer;
+ /* driver "set config" signature */
+ writel(ARCMSR_SIGNATURE_SET_CONFIG, rwbuffer++);
+ /* normal should be zero */
+ writel(cdb_phyaddr_hi32, rwbuffer++);
+ /* postQ size (256 + 8)*4 */
+ writel(cdb_phyaddr, rwbuffer++);
+ /* doneQ size (256 + 8)*4 */
+ writel(cdb_phyaddr + 1056, rwbuffer++);
+ /* ccb maxQ size must be --> [(256 + 8)*4]*/
+ writel(1056, rwbuffer);
+
+ writel(ARCMSR_MESSAGE_SET_CONFIG, reg->drv2iop_doorbell);
+ if (!arcmsr_hbaB_wait_msgint_ready(acb)) {
+ printk(KERN_NOTICE "arcmsr%d: 'set command Q window' \
+ timeout \n",acb->host->host_no);
+ return 1;
+ }
+ writel(ARCMSR_MESSAGE_START_DRIVER_MODE, reg->drv2iop_doorbell);
+ if (!arcmsr_hbaB_wait_msgint_ready(acb)) {
+ pr_err("arcmsr%d: can't set driver mode.\n",
+ acb->host->host_no);
+ return 1;
+ }
+ }
+ break;
+ case ACB_ADAPTER_TYPE_C: {
+ if (cdb_phyaddr_hi32 != 0) {
+ struct MessageUnit_C __iomem *reg = acb->pmuC;
+
+ printk(KERN_NOTICE "arcmsr%d: cdb_phyaddr_hi32=0x%x\n",
+ acb->adapter_index, cdb_phyaddr_hi32);
+ writel(ARCMSR_SIGNATURE_SET_CONFIG, &reg->msgcode_rwbuffer[0]);
+ writel(cdb_phyaddr_hi32, &reg->msgcode_rwbuffer[1]);
+ writel(ARCMSR_INBOUND_MESG0_SET_CONFIG, &reg->inbound_msgaddr0);
+ writel(ARCMSR_HBCMU_DRV2IOP_MESSAGE_CMD_DONE, &reg->inbound_doorbell);
+ if (!arcmsr_hbaC_wait_msgint_ready(acb)) {
+ printk(KERN_NOTICE "arcmsr%d: 'set command Q window' \
+ timeout \n", acb->host->host_no);
+ return 1;
+ }
+ }
+ }
+ break;
+ case ACB_ADAPTER_TYPE_D: {
+ uint32_t __iomem *rwbuffer;
+ struct MessageUnit_D *reg = acb->pmuD;
+ reg->postq_index = 0;
+ reg->doneq_index = 0;
+ rwbuffer = reg->msgcode_rwbuffer;
+ writel(ARCMSR_SIGNATURE_SET_CONFIG, rwbuffer++);
+ writel(cdb_phyaddr_hi32, rwbuffer++);
+ writel(cdb_phyaddr, rwbuffer++);
+ writel(cdb_phyaddr + (ARCMSR_MAX_ARC1214_POSTQUEUE *
+ sizeof(struct InBound_SRB)), rwbuffer++);
+ writel(0x100, rwbuffer);
+ writel(ARCMSR_INBOUND_MESG0_SET_CONFIG, reg->inbound_msgaddr0);
+ if (!arcmsr_hbaD_wait_msgint_ready(acb)) {
+ pr_notice("arcmsr%d: 'set command Q window' timeout\n",
+ acb->host->host_no);
+ return 1;
+ }
+ }
+ break;
+ }
+ return 0;
+}
+
+static void arcmsr_wait_firmware_ready(struct AdapterControlBlock *acb)
+{
+ uint32_t firmware_state = 0;
+ switch (acb->adapter_type) {
+
+ case ACB_ADAPTER_TYPE_A: {
+ struct MessageUnit_A __iomem *reg = acb->pmuA;
+ do {
+ firmware_state = readl(&reg->outbound_msgaddr1);
+ } while ((firmware_state & ARCMSR_OUTBOUND_MESG1_FIRMWARE_OK) == 0);
+ }
+ break;
+
+ case ACB_ADAPTER_TYPE_B: {
+ struct MessageUnit_B *reg = acb->pmuB;
+ do {
+ firmware_state = readl(reg->iop2drv_doorbell);
+ } while ((firmware_state & ARCMSR_MESSAGE_FIRMWARE_OK) == 0);
+ writel(ARCMSR_DRV2IOP_END_OF_INTERRUPT, reg->drv2iop_doorbell);
+ }
+ break;
+ case ACB_ADAPTER_TYPE_C: {
+ struct MessageUnit_C __iomem *reg = acb->pmuC;
+ do {
+ firmware_state = readl(&reg->outbound_msgaddr1);
+ } while ((firmware_state & ARCMSR_HBCMU_MESSAGE_FIRMWARE_OK) == 0);
+ }
+ break;
+ case ACB_ADAPTER_TYPE_D: {
+ struct MessageUnit_D *reg = acb->pmuD;
+ do {
+ firmware_state = readl(reg->outbound_msgaddr1);
+ } while ((firmware_state &
+ ARCMSR_ARC1214_MESSAGE_FIRMWARE_OK) == 0);
+ }
+ break;
+ }
+}
+
+static void arcmsr_hbaA_request_device_map(struct AdapterControlBlock *acb)
+{
+ struct MessageUnit_A __iomem *reg = acb->pmuA;
+ if (unlikely(atomic_read(&acb->rq_map_token) == 0) || ((acb->acb_flags & ACB_F_BUS_RESET) != 0 ) || ((acb->acb_flags & ACB_F_ABORT) != 0 )){
+ mod_timer(&acb->eternal_timer, jiffies + msecs_to_jiffies(6 * HZ));
+ return;
+ } else {
+ acb->fw_flag = FW_NORMAL;
+ if (atomic_read(&acb->ante_token_value) == atomic_read(&acb->rq_map_token)){
+ atomic_set(&acb->rq_map_token, 16);
+ }
+ atomic_set(&acb->ante_token_value, atomic_read(&acb->rq_map_token));
+ if (atomic_dec_and_test(&acb->rq_map_token)) {
+ mod_timer(&acb->eternal_timer, jiffies + msecs_to_jiffies(6 * HZ));
+ return;
+ }
+ writel(ARCMSR_INBOUND_MESG0_GET_CONFIG, &reg->inbound_msgaddr0);
+ mod_timer(&acb->eternal_timer, jiffies + msecs_to_jiffies(6 * HZ));
+ }
+ return;
+}
+
+static void arcmsr_hbaB_request_device_map(struct AdapterControlBlock *acb)
+{
+ struct MessageUnit_B *reg = acb->pmuB;
+ if (unlikely(atomic_read(&acb->rq_map_token) == 0) || ((acb->acb_flags & ACB_F_BUS_RESET) != 0 ) || ((acb->acb_flags & ACB_F_ABORT) != 0 )){
+ mod_timer(&acb->eternal_timer, jiffies + msecs_to_jiffies(6 * HZ));
+ return;
+ } else {
+ acb->fw_flag = FW_NORMAL;
+ if (atomic_read(&acb->ante_token_value) == atomic_read(&acb->rq_map_token)) {
+ atomic_set(&acb->rq_map_token, 16);
+ }
+ atomic_set(&acb->ante_token_value, atomic_read(&acb->rq_map_token));
+ if (atomic_dec_and_test(&acb->rq_map_token)) {
+ mod_timer(&acb->eternal_timer, jiffies + msecs_to_jiffies(6 * HZ));
+ return;
+ }
+ writel(ARCMSR_MESSAGE_GET_CONFIG, reg->drv2iop_doorbell);
+ mod_timer(&acb->eternal_timer, jiffies + msecs_to_jiffies(6 * HZ));
+ }
+ return;
+}
+
+static void arcmsr_hbaC_request_device_map(struct AdapterControlBlock *acb)
+{
+ struct MessageUnit_C __iomem *reg = acb->pmuC;
+ if (unlikely(atomic_read(&acb->rq_map_token) == 0) || ((acb->acb_flags & ACB_F_BUS_RESET) != 0) || ((acb->acb_flags & ACB_F_ABORT) != 0)) {
+ mod_timer(&acb->eternal_timer, jiffies + msecs_to_jiffies(6 * HZ));
+ return;
+ } else {
+ acb->fw_flag = FW_NORMAL;
+ if (atomic_read(&acb->ante_token_value) == atomic_read(&acb->rq_map_token)) {
+ atomic_set(&acb->rq_map_token, 16);
+ }
+ atomic_set(&acb->ante_token_value, atomic_read(&acb->rq_map_token));
+ if (atomic_dec_and_test(&acb->rq_map_token)) {
+ mod_timer(&acb->eternal_timer, jiffies + msecs_to_jiffies(6 * HZ));
+ return;
+ }
+ writel(ARCMSR_INBOUND_MESG0_GET_CONFIG, &reg->inbound_msgaddr0);
+ writel(ARCMSR_HBCMU_DRV2IOP_MESSAGE_CMD_DONE, &reg->inbound_doorbell);
+ mod_timer(&acb->eternal_timer, jiffies + msecs_to_jiffies(6 * HZ));
+ }
+ return;
+}
+
+static void arcmsr_hbaD_request_device_map(struct AdapterControlBlock *acb)
+{
+ struct MessageUnit_D *reg = acb->pmuD;
+
+ if (unlikely(atomic_read(&acb->rq_map_token) == 0) ||
+ ((acb->acb_flags & ACB_F_BUS_RESET) != 0) ||
+ ((acb->acb_flags & ACB_F_ABORT) != 0)) {
+ mod_timer(&acb->eternal_timer,
+ jiffies + msecs_to_jiffies(6 * HZ));
+ } else {
+ acb->fw_flag = FW_NORMAL;
+ if (atomic_read(&acb->ante_token_value) ==
+ atomic_read(&acb->rq_map_token)) {
+ atomic_set(&acb->rq_map_token, 16);
+ }
+ atomic_set(&acb->ante_token_value,
+ atomic_read(&acb->rq_map_token));
+ if (atomic_dec_and_test(&acb->rq_map_token)) {
+ mod_timer(&acb->eternal_timer, jiffies +
+ msecs_to_jiffies(6 * HZ));
+ return;
+ }
+ writel(ARCMSR_INBOUND_MESG0_GET_CONFIG,
+ reg->inbound_msgaddr0);
+ mod_timer(&acb->eternal_timer, jiffies +
+ msecs_to_jiffies(6 * HZ));
+ }
+}
+
+static void arcmsr_request_device_map(unsigned long pacb)
+{
+ struct AdapterControlBlock *acb = (struct AdapterControlBlock *)pacb;
+ switch (acb->adapter_type) {
+ case ACB_ADAPTER_TYPE_A: {
+ arcmsr_hbaA_request_device_map(acb);
+ }
+ break;
+ case ACB_ADAPTER_TYPE_B: {
+ arcmsr_hbaB_request_device_map(acb);
+ }
+ break;
+ case ACB_ADAPTER_TYPE_C: {
+ arcmsr_hbaC_request_device_map(acb);
+ }
+ break;
+ case ACB_ADAPTER_TYPE_D:
+ arcmsr_hbaD_request_device_map(acb);
+ break;
+ }
+}
+
+static void arcmsr_hbaA_start_bgrb(struct AdapterControlBlock *acb)
+{
+ struct MessageUnit_A __iomem *reg = acb->pmuA;
+ acb->acb_flags |= ACB_F_MSG_START_BGRB;
+ writel(ARCMSR_INBOUND_MESG0_START_BGRB, &reg->inbound_msgaddr0);
+ if (!arcmsr_hbaA_wait_msgint_ready(acb)) {
+ printk(KERN_NOTICE "arcmsr%d: wait 'start adapter background \
+ rebulid' timeout \n", acb->host->host_no);
+ }
+}
+
+static void arcmsr_hbaB_start_bgrb(struct AdapterControlBlock *acb)
+{
+ struct MessageUnit_B *reg = acb->pmuB;
+ acb->acb_flags |= ACB_F_MSG_START_BGRB;
+ writel(ARCMSR_MESSAGE_START_BGRB, reg->drv2iop_doorbell);
+ if (!arcmsr_hbaB_wait_msgint_ready(acb)) {
+ printk(KERN_NOTICE "arcmsr%d: wait 'start adapter background \
+ rebulid' timeout \n",acb->host->host_no);
+ }
+}
+
+static void arcmsr_hbaC_start_bgrb(struct AdapterControlBlock *pACB)
+{
+ struct MessageUnit_C __iomem *phbcmu = pACB->pmuC;
+ pACB->acb_flags |= ACB_F_MSG_START_BGRB;
+ writel(ARCMSR_INBOUND_MESG0_START_BGRB, &phbcmu->inbound_msgaddr0);
+ writel(ARCMSR_HBCMU_DRV2IOP_MESSAGE_CMD_DONE, &phbcmu->inbound_doorbell);
+ if (!arcmsr_hbaC_wait_msgint_ready(pACB)) {
+ printk(KERN_NOTICE "arcmsr%d: wait 'start adapter background \
+ rebulid' timeout \n", pACB->host->host_no);
+ }
+ return;
+}
+
+static void arcmsr_hbaD_start_bgrb(struct AdapterControlBlock *pACB)
+{
+ struct MessageUnit_D *pmu = pACB->pmuD;
+
+ pACB->acb_flags |= ACB_F_MSG_START_BGRB;
+ writel(ARCMSR_INBOUND_MESG0_START_BGRB, pmu->inbound_msgaddr0);
+ if (!arcmsr_hbaD_wait_msgint_ready(pACB)) {
+ pr_notice("arcmsr%d: wait 'start adapter "
+ "background rebulid' timeout\n", pACB->host->host_no);
+ }
+}
+
+static void arcmsr_start_adapter_bgrb(struct AdapterControlBlock *acb)
+{
+ switch (acb->adapter_type) {
+ case ACB_ADAPTER_TYPE_A:
+ arcmsr_hbaA_start_bgrb(acb);
+ break;
+ case ACB_ADAPTER_TYPE_B:
+ arcmsr_hbaB_start_bgrb(acb);
+ break;
+ case ACB_ADAPTER_TYPE_C:
+ arcmsr_hbaC_start_bgrb(acb);
+ break;
+ case ACB_ADAPTER_TYPE_D:
+ arcmsr_hbaD_start_bgrb(acb);
+ break;
+ }
+}
+
+static void arcmsr_clear_doorbell_queue_buffer(struct AdapterControlBlock *acb)
+{
+ switch (acb->adapter_type) {
+ case ACB_ADAPTER_TYPE_A: {
+ struct MessageUnit_A __iomem *reg = acb->pmuA;
+ uint32_t outbound_doorbell;
+ /* empty doorbell Qbuffer if door bell ringed */
+ outbound_doorbell = readl(&reg->outbound_doorbell);
+ /*clear doorbell interrupt */
+ writel(outbound_doorbell, &reg->outbound_doorbell);
+ writel(ARCMSR_INBOUND_DRIVER_DATA_READ_OK, &reg->inbound_doorbell);
+ }
+ break;
+
+ case ACB_ADAPTER_TYPE_B: {
+ struct MessageUnit_B *reg = acb->pmuB;
+ /*clear interrupt and message state*/
+ writel(ARCMSR_MESSAGE_INT_CLEAR_PATTERN, reg->iop2drv_doorbell);
+ writel(ARCMSR_DRV2IOP_DATA_READ_OK, reg->drv2iop_doorbell);
+ /* let IOP know data has been read */
+ }
+ break;
+ case ACB_ADAPTER_TYPE_C: {
+ struct MessageUnit_C __iomem *reg = acb->pmuC;
+ uint32_t outbound_doorbell, i;
+ /* empty doorbell Qbuffer if door bell ringed */
+ outbound_doorbell = readl(&reg->outbound_doorbell);
+ writel(outbound_doorbell, &reg->outbound_doorbell_clear);
+ writel(ARCMSR_HBCMU_DRV2IOP_DATA_READ_OK, &reg->inbound_doorbell);
+ for (i = 0; i < 200; i++) {
+ msleep(20);
+ outbound_doorbell = readl(&reg->outbound_doorbell);
+ if (outbound_doorbell &
+ ARCMSR_HBCMU_IOP2DRV_DATA_WRITE_OK) {
+ writel(outbound_doorbell,
+ &reg->outbound_doorbell_clear);
+ writel(ARCMSR_HBCMU_DRV2IOP_DATA_READ_OK,
+ &reg->inbound_doorbell);
+ } else
+ break;
+ }
+ }
+ break;
+ case ACB_ADAPTER_TYPE_D: {
+ struct MessageUnit_D *reg = acb->pmuD;
+ uint32_t outbound_doorbell, i;
+ /* empty doorbell Qbuffer if door bell ringed */
+ outbound_doorbell = readl(reg->outbound_doorbell);
+ writel(outbound_doorbell, reg->outbound_doorbell);
+ writel(ARCMSR_ARC1214_DRV2IOP_DATA_OUT_READ,
+ reg->inbound_doorbell);
+ for (i = 0; i < 200; i++) {
+ msleep(20);
+ outbound_doorbell = readl(reg->outbound_doorbell);
+ if (outbound_doorbell &
+ ARCMSR_ARC1214_IOP2DRV_DATA_WRITE_OK) {
+ writel(outbound_doorbell,
+ reg->outbound_doorbell);
+ writel(ARCMSR_ARC1214_DRV2IOP_DATA_OUT_READ,
+ reg->inbound_doorbell);
+ } else
+ break;
+ }
+ }
+ break;
+ }
+}
+
+static void arcmsr_enable_eoi_mode(struct AdapterControlBlock *acb)
+{
+ switch (acb->adapter_type) {
+ case ACB_ADAPTER_TYPE_A:
+ return;
+ case ACB_ADAPTER_TYPE_B:
+ {
+ struct MessageUnit_B *reg = acb->pmuB;
+ writel(ARCMSR_MESSAGE_ACTIVE_EOI_MODE, reg->drv2iop_doorbell);
+ if (!arcmsr_hbaB_wait_msgint_ready(acb)) {
+ printk(KERN_NOTICE "ARCMSR IOP enables EOI_MODE TIMEOUT");
+ return;
+ }
+ }
+ break;
+ case ACB_ADAPTER_TYPE_C:
+ return;
+ }
+ return;
+}
+
+static void arcmsr_hardware_reset(struct AdapterControlBlock *acb)
+{
+ uint8_t value[64];
+ int i, count = 0;
+ struct MessageUnit_A __iomem *pmuA = acb->pmuA;
+ struct MessageUnit_C __iomem *pmuC = acb->pmuC;
+ struct MessageUnit_D *pmuD = acb->pmuD;
+
+ /* backup pci config data */
+ printk(KERN_NOTICE "arcmsr%d: executing hw bus reset .....\n", acb->host->host_no);
+ for (i = 0; i < 64; i++) {
+ pci_read_config_byte(acb->pdev, i, &value[i]);
+ }
+ /* hardware reset signal */
+ if ((acb->dev_id == 0x1680)) {
+ writel(ARCMSR_ARC1680_BUS_RESET, &pmuA->reserved1[0]);
+ } else if ((acb->dev_id == 0x1880)) {
+ do {
+ count++;
+ writel(0xF, &pmuC->write_sequence);
+ writel(0x4, &pmuC->write_sequence);
+ writel(0xB, &pmuC->write_sequence);
+ writel(0x2, &pmuC->write_sequence);
+ writel(0x7, &pmuC->write_sequence);
+ writel(0xD, &pmuC->write_sequence);
+ } while (((readl(&pmuC->host_diagnostic) & ARCMSR_ARC1880_DiagWrite_ENABLE) == 0) && (count < 5));
+ writel(ARCMSR_ARC1880_RESET_ADAPTER, &pmuC->host_diagnostic);
+ } else if ((acb->dev_id == 0x1214)) {
+ writel(0x20, pmuD->reset_request);
+ } else {
+ pci_write_config_byte(acb->pdev, 0x84, 0x20);
+ }
+ msleep(2000);
+ /* write back pci config data */
+ for (i = 0; i < 64; i++) {
+ pci_write_config_byte(acb->pdev, i, value[i]);
+ }
+ msleep(1000);
+ return;
+}
+static void arcmsr_iop_init(struct AdapterControlBlock *acb)
+{
+ uint32_t intmask_org;
+ /* disable all outbound interrupt */
+ intmask_org = arcmsr_disable_outbound_ints(acb);
+ arcmsr_wait_firmware_ready(acb);
+ arcmsr_iop_confirm(acb);
+ /*start background rebuild*/
+ arcmsr_start_adapter_bgrb(acb);
+ /* empty doorbell Qbuffer if door bell ringed */
+ arcmsr_clear_doorbell_queue_buffer(acb);
+ arcmsr_enable_eoi_mode(acb);
+ /* enable outbound Post Queue,outbound doorbell Interrupt */
+ arcmsr_enable_outbound_ints(acb, intmask_org);
+ acb->acb_flags |= ACB_F_IOP_INITED;
+}
+
+static uint8_t arcmsr_iop_reset(struct AdapterControlBlock *acb)
+{
+ struct CommandControlBlock *ccb;
+ uint32_t intmask_org;
+ uint8_t rtnval = 0x00;
+ int i = 0;
+ unsigned long flags;
+
+ if (atomic_read(&acb->ccboutstandingcount) != 0) {
+ /* disable all outbound interrupt */
+ intmask_org = arcmsr_disable_outbound_ints(acb);
+ /* talk to iop 331 outstanding command aborted */
+ rtnval = arcmsr_abort_allcmd(acb);
+ /* clear all outbound posted Q */
+ arcmsr_done4abort_postqueue(acb);
+ for (i = 0; i < ARCMSR_MAX_FREECCB_NUM; i++) {
+ ccb = acb->pccb_pool[i];
+ if (ccb->startdone == ARCMSR_CCB_START) {
+ scsi_dma_unmap(ccb->pcmd);
+ ccb->startdone = ARCMSR_CCB_DONE;
+ ccb->ccb_flags = 0;
+ spin_lock_irqsave(&acb->ccblist_lock, flags);
+ list_add_tail(&ccb->list, &acb->ccb_free_list);
+ spin_unlock_irqrestore(&acb->ccblist_lock, flags);
+ }
+ }
+ atomic_set(&acb->ccboutstandingcount, 0);
+ /* enable all outbound interrupt */
+ arcmsr_enable_outbound_ints(acb, intmask_org);
+ return rtnval;
+ }
+ return rtnval;
+}
+
+static int arcmsr_bus_reset(struct scsi_cmnd *cmd)
+{
+ struct AdapterControlBlock *acb;
+ uint32_t intmask_org, outbound_doorbell;
+ int retry_count = 0;
+ int rtn = FAILED;
+ acb = (struct AdapterControlBlock *) cmd->device->host->hostdata;
+ printk(KERN_ERR "arcmsr: executing bus reset eh.....num_resets = %d, num_aborts = %d \n", acb->num_resets, acb->num_aborts);
+ acb->num_resets++;
+
+ switch(acb->adapter_type){
+ case ACB_ADAPTER_TYPE_A:{
+ if (acb->acb_flags & ACB_F_BUS_RESET){
+ long timeout;
+ printk(KERN_ERR "arcmsr: there is an bus reset eh proceeding.......\n");
+ timeout = wait_event_timeout(wait_q, (acb->acb_flags & ACB_F_BUS_RESET) == 0, 220*HZ);
+ if (timeout) {
+ return SUCCESS;
+ }
+ }
+ acb->acb_flags |= ACB_F_BUS_RESET;
+ if (!arcmsr_iop_reset(acb)) {
+ struct MessageUnit_A __iomem *reg;
+ reg = acb->pmuA;
+ arcmsr_hardware_reset(acb);
+ acb->acb_flags &= ~ACB_F_IOP_INITED;
+sleep_again:
+ ssleep(ARCMSR_SLEEPTIME);
+ if ((readl(&reg->outbound_msgaddr1) & ARCMSR_OUTBOUND_MESG1_FIRMWARE_OK) == 0) {
+ printk(KERN_ERR "arcmsr%d: waiting for hw bus reset return, retry=%d\n", acb->host->host_no, retry_count);
+ if (retry_count > ARCMSR_RETRYCOUNT) {
+ acb->fw_flag = FW_DEADLOCK;
+ printk(KERN_ERR "arcmsr%d: waiting for hw bus reset return, RETRY TERMINATED!!\n", acb->host->host_no);
+ return FAILED;
+ }
+ retry_count++;
+ goto sleep_again;
+ }
+ acb->acb_flags |= ACB_F_IOP_INITED;
+ /* disable all outbound interrupt */
+ intmask_org = arcmsr_disable_outbound_ints(acb);
+ arcmsr_get_firmware_spec(acb);
+ arcmsr_start_adapter_bgrb(acb);
+ /* clear Qbuffer if door bell ringed */
+ outbound_doorbell = readl(&reg->outbound_doorbell);
+ writel(outbound_doorbell, &reg->outbound_doorbell); /*clear interrupt */
+ writel(ARCMSR_INBOUND_DRIVER_DATA_READ_OK, &reg->inbound_doorbell);
+ /* enable outbound Post Queue,outbound doorbell Interrupt */
+ arcmsr_enable_outbound_ints(acb, intmask_org);
+ atomic_set(&acb->rq_map_token, 16);
+ atomic_set(&acb->ante_token_value, 16);
+ acb->fw_flag = FW_NORMAL;
+ mod_timer(&acb->eternal_timer, jiffies + msecs_to_jiffies(6 * HZ));
+ acb->acb_flags &= ~ACB_F_BUS_RESET;
+ rtn = SUCCESS;
+ printk(KERN_ERR "arcmsr: scsi bus reset eh returns with success\n");
+ } else {
+ acb->acb_flags &= ~ACB_F_BUS_RESET;
+ atomic_set(&acb->rq_map_token, 16);
+ atomic_set(&acb->ante_token_value, 16);
+ acb->fw_flag = FW_NORMAL;
+ mod_timer(&acb->eternal_timer, jiffies + msecs_to_jiffies(6*HZ));
+ rtn = SUCCESS;
+ }
+ break;
+ }
+ case ACB_ADAPTER_TYPE_B:{
+ acb->acb_flags |= ACB_F_BUS_RESET;
+ if (!arcmsr_iop_reset(acb)) {
+ acb->acb_flags &= ~ACB_F_BUS_RESET;
+ rtn = FAILED;
+ } else {
+ acb->acb_flags &= ~ACB_F_BUS_RESET;
+ atomic_set(&acb->rq_map_token, 16);
+ atomic_set(&acb->ante_token_value, 16);
+ acb->fw_flag = FW_NORMAL;
+ mod_timer(&acb->eternal_timer, jiffies + msecs_to_jiffies(6 * HZ));
+ rtn = SUCCESS;
+ }
+ break;
+ }
+ case ACB_ADAPTER_TYPE_C:{
+ if (acb->acb_flags & ACB_F_BUS_RESET) {
+ long timeout;
+ printk(KERN_ERR "arcmsr: there is an bus reset eh proceeding.......\n");
+ timeout = wait_event_timeout(wait_q, (acb->acb_flags & ACB_F_BUS_RESET) == 0, 220*HZ);
+ if (timeout) {
+ return SUCCESS;
+ }
+ }
+ acb->acb_flags |= ACB_F_BUS_RESET;
+ if (!arcmsr_iop_reset(acb)) {
+ struct MessageUnit_C __iomem *reg;
+ reg = acb->pmuC;
+ arcmsr_hardware_reset(acb);
+ acb->acb_flags &= ~ACB_F_IOP_INITED;
+sleep:
+ ssleep(ARCMSR_SLEEPTIME);
+ if ((readl(&reg->host_diagnostic) & 0x04) != 0) {
+ printk(KERN_ERR "arcmsr%d: waiting for hw bus reset return, retry=%d\n", acb->host->host_no, retry_count);
+ if (retry_count > ARCMSR_RETRYCOUNT) {
+ acb->fw_flag = FW_DEADLOCK;
+ printk(KERN_ERR "arcmsr%d: waiting for hw bus reset return, RETRY TERMINATED!!\n", acb->host->host_no);
+ return FAILED;
+ }
+ retry_count++;
+ goto sleep;
+ }
+ acb->acb_flags |= ACB_F_IOP_INITED;
+ /* disable all outbound interrupt */
+ intmask_org = arcmsr_disable_outbound_ints(acb);
+ arcmsr_get_firmware_spec(acb);
+ arcmsr_start_adapter_bgrb(acb);
+ /* clear Qbuffer if door bell ringed */
+ arcmsr_clear_doorbell_queue_buffer(acb);
+ /* enable outbound Post Queue,outbound doorbell Interrupt */
+ arcmsr_enable_outbound_ints(acb, intmask_org);
+ atomic_set(&acb->rq_map_token, 16);
+ atomic_set(&acb->ante_token_value, 16);
+ acb->fw_flag = FW_NORMAL;
+ mod_timer(&acb->eternal_timer, jiffies + msecs_to_jiffies(6 * HZ));
+ acb->acb_flags &= ~ACB_F_BUS_RESET;
+ rtn = SUCCESS;
+ printk(KERN_ERR "arcmsr: scsi bus reset eh returns with success\n");
+ } else {
+ acb->acb_flags &= ~ACB_F_BUS_RESET;
+ atomic_set(&acb->rq_map_token, 16);
+ atomic_set(&acb->ante_token_value, 16);
+ acb->fw_flag = FW_NORMAL;
+ mod_timer(&acb->eternal_timer, jiffies + msecs_to_jiffies(6*HZ));
+ rtn = SUCCESS;
+ }
+ break;
+ }
+ case ACB_ADAPTER_TYPE_D: {
+ if (acb->acb_flags & ACB_F_BUS_RESET) {
+ long timeout;
+ pr_notice("arcmsr: there is an bus reset"
+ " eh proceeding.......\n");
+ timeout = wait_event_timeout(wait_q, (acb->acb_flags
+ & ACB_F_BUS_RESET) == 0, 220 * HZ);
+ if (timeout)
+ return SUCCESS;
+ }
+ acb->acb_flags |= ACB_F_BUS_RESET;
+ if (!arcmsr_iop_reset(acb)) {
+ struct MessageUnit_D *reg;
+ reg = acb->pmuD;
+ arcmsr_hardware_reset(acb);
+ acb->acb_flags &= ~ACB_F_IOP_INITED;
+ nap:
+ ssleep(ARCMSR_SLEEPTIME);
+ if ((readl(reg->sample_at_reset) & 0x80) != 0) {
+ pr_err("arcmsr%d: waiting for "
+ "hw bus reset return, retry=%d\n",
+ acb->host->host_no, retry_count);
+ if (retry_count > ARCMSR_RETRYCOUNT) {
+ acb->fw_flag = FW_DEADLOCK;
+ pr_err("arcmsr%d: waiting for hw bus"
+ " reset return, "
+ "RETRY TERMINATED!!\n",
+ acb->host->host_no);
+ return FAILED;
+ }
+ retry_count++;
+ goto nap;
+ }
+ acb->acb_flags |= ACB_F_IOP_INITED;
+ /* disable all outbound interrupt */
+ intmask_org = arcmsr_disable_outbound_ints(acb);
+ arcmsr_get_firmware_spec(acb);
+ arcmsr_start_adapter_bgrb(acb);
+ arcmsr_clear_doorbell_queue_buffer(acb);
+ arcmsr_enable_outbound_ints(acb, intmask_org);
+ atomic_set(&acb->rq_map_token, 16);
+ atomic_set(&acb->ante_token_value, 16);
+ acb->fw_flag = FW_NORMAL;
+ mod_timer(&acb->eternal_timer,
+ jiffies + msecs_to_jiffies(6 * HZ));
+ acb->acb_flags &= ~ACB_F_BUS_RESET;
+ rtn = SUCCESS;
+ pr_err("arcmsr: scsi bus reset "
+ "eh returns with success\n");
+ } else {
+ acb->acb_flags &= ~ACB_F_BUS_RESET;
+ atomic_set(&acb->rq_map_token, 16);
+ atomic_set(&acb->ante_token_value, 16);
+ acb->fw_flag = FW_NORMAL;
+ mod_timer(&acb->eternal_timer,
+ jiffies + msecs_to_jiffies(6 * HZ));
+ rtn = SUCCESS;
+ }
+ break;
+ }
+ }
+ return rtn;
+}
+
+static int arcmsr_abort_one_cmd(struct AdapterControlBlock *acb,
+ struct CommandControlBlock *ccb)
+{
+ int rtn;
+ rtn = arcmsr_polling_ccbdone(acb, ccb);
+ return rtn;
+}
+
+static int arcmsr_abort(struct scsi_cmnd *cmd)
+{
+ struct AdapterControlBlock *acb =
+ (struct AdapterControlBlock *)cmd->device->host->hostdata;
+ int i = 0;
+ int rtn = FAILED;
+ uint32_t intmask_org;
+
+ printk(KERN_NOTICE
+ "arcmsr%d: abort device command of scsi id = %d lun = %d\n",
+ acb->host->host_no, cmd->device->id, (u32)cmd->device->lun);
+ acb->acb_flags |= ACB_F_ABORT;
+ acb->num_aborts++;
+ /*
+ ************************************************
+ ** the all interrupt service routine is locked
+ ** we need to handle it as soon as possible and exit
+ ************************************************
+ */
+ if (!atomic_read(&acb->ccboutstandingcount)) {
+ acb->acb_flags &= ~ACB_F_ABORT;
+ return rtn;
+ }
+
+ intmask_org = arcmsr_disable_outbound_ints(acb);
+ for (i = 0; i < ARCMSR_MAX_FREECCB_NUM; i++) {
+ struct CommandControlBlock *ccb = acb->pccb_pool[i];
+ if (ccb->startdone == ARCMSR_CCB_START && ccb->pcmd == cmd) {
+ ccb->startdone = ARCMSR_CCB_ABORTED;
+ rtn = arcmsr_abort_one_cmd(acb, ccb);
+ break;
+ }
+ }
+ acb->acb_flags &= ~ACB_F_ABORT;
+ arcmsr_enable_outbound_ints(acb, intmask_org);
+ return rtn;
+}
+
+static const char *arcmsr_info(struct Scsi_Host *host)
+{
+ struct AdapterControlBlock *acb =
+ (struct AdapterControlBlock *) host->hostdata;
+ static char buf[256];
+ char *type;
+ int raid6 = 1;
+ switch (acb->pdev->device) {
+ case PCI_DEVICE_ID_ARECA_1110:
+ case PCI_DEVICE_ID_ARECA_1200:
+ case PCI_DEVICE_ID_ARECA_1202:
+ case PCI_DEVICE_ID_ARECA_1210:
+ raid6 = 0;
+ /*FALLTHRU*/
+ case PCI_DEVICE_ID_ARECA_1120:
+ case PCI_DEVICE_ID_ARECA_1130:
+ case PCI_DEVICE_ID_ARECA_1160:
+ case PCI_DEVICE_ID_ARECA_1170:
+ case PCI_DEVICE_ID_ARECA_1201:
+ case PCI_DEVICE_ID_ARECA_1220:
+ case PCI_DEVICE_ID_ARECA_1230:
+ case PCI_DEVICE_ID_ARECA_1260:
+ case PCI_DEVICE_ID_ARECA_1270:
+ case PCI_DEVICE_ID_ARECA_1280:
+ type = "SATA";
+ break;
+ case PCI_DEVICE_ID_ARECA_1214:
+ case PCI_DEVICE_ID_ARECA_1380:
+ case PCI_DEVICE_ID_ARECA_1381:
+ case PCI_DEVICE_ID_ARECA_1680:
+ case PCI_DEVICE_ID_ARECA_1681:
+ case PCI_DEVICE_ID_ARECA_1880:
+ type = "SAS/SATA";
+ break;
+ default:
+ type = "unknown";
+ raid6 = 0;
+ break;
+ }
+ sprintf(buf, "Areca %s RAID Controller %s\narcmsr version %s\n",
+ type, raid6 ? "(RAID6 capable)" : "", ARCMSR_DRIVER_VERSION);
+ return buf;
+}
diff --git a/drivers/scsi/arm/Kconfig b/drivers/scsi/arm/Kconfig
new file mode 100644
index 000000000..cfd172a43
--- /dev/null
+++ b/drivers/scsi/arm/Kconfig
@@ -0,0 +1,84 @@
+#
+# SCSI driver configuration for Acorn
+#
+config SCSI_ACORNSCSI_3
+ tristate "Acorn SCSI card (aka30) support"
+ depends on ARCH_ACORN && SCSI
+ select SCSI_SPI_ATTRS
+ help
+ This enables support for the Acorn SCSI card (aka30). If you have an
+ Acorn system with one of these, say Y. If unsure, say N.
+
+config SCSI_ACORNSCSI_TAGGED_QUEUE
+ bool "Support SCSI 2 Tagged queueing"
+ depends on SCSI_ACORNSCSI_3
+ help
+ Say Y here to enable tagged queuing support on the Acorn SCSI card.
+
+ This is a feature of SCSI-2 which improves performance: the host
+ adapter can send several SCSI commands to a device's queue even if
+ previous commands haven't finished yet. Some SCSI devices don't
+ implement this properly, so the safe answer is N.
+
+config SCSI_ACORNSCSI_SYNC
+ bool "Support SCSI 2 Synchronous Transfers"
+ depends on SCSI_ACORNSCSI_3
+ help
+ Say Y here to enable synchronous transfer negotiation with all
+ targets on the Acorn SCSI card.
+
+ In general, this improves performance; however some SCSI devices
+ don't implement it properly, so the safe answer is N.
+
+config SCSI_ARXESCSI
+ tristate "ARXE SCSI support"
+ depends on ARCH_ACORN && SCSI
+ help
+ Around 1991, Arxe Systems Limited released a high density floppy
+ disc interface for the Acorn Archimedes range, to allow the use of
+ HD discs from the then new A5000 on earlier models. This interface
+ was either sold on its own or with an integral SCSI controller.
+ Technical details on this NCR53c94-based device are available at
+ <http://www.cryton.demon.co.uk/acornbits/scsi_arxe.html>
+ Say Y here to compile in support for the SCSI controller.
+
+config SCSI_CUMANA_2
+ tristate "CumanaSCSI II support"
+ depends on ARCH_ACORN && SCSI
+ help
+ This enables support for the Cumana SCSI II card. If you have an
+ Acorn system with one of these, say Y. If unsure, say N.
+
+config SCSI_EESOXSCSI
+ tristate "EESOX support"
+ depends on ARCH_ACORN && SCSI
+ help
+ This enables support for the EESOX SCSI card. If you have an Acorn
+ system with one of these, say Y, otherwise say N.
+
+config SCSI_POWERTECSCSI
+ tristate "PowerTec support"
+ depends on ARCH_ACORN && SCSI
+ help
+ This enables support for the Powertec SCSI card on Acorn systems. If
+ you have one of these, say Y. If unsure, say N.
+
+comment "The following drivers are not fully supported"
+ depends on ARCH_ACORN
+
+config SCSI_CUMANA_1
+ tristate "CumanaSCSI I support"
+ depends on ARCH_ACORN && SCSI
+ select SCSI_SPI_ATTRS
+ help
+ This enables support for the Cumana SCSI I card. If you have an
+ Acorn system with one of these, say Y. If unsure, say N.
+
+config SCSI_OAK1
+ tristate "Oak SCSI support"
+ depends on ARCH_ACORN && SCSI
+ select SCSI_SPI_ATTRS
+ help
+ This enables support for the Oak SCSI card. If you have an Acorn
+ system with one of these, say Y. If unsure, say N.
+
diff --git a/drivers/scsi/arm/Makefile b/drivers/scsi/arm/Makefile
new file mode 100644
index 000000000..16c3e86a6
--- /dev/null
+++ b/drivers/scsi/arm/Makefile
@@ -0,0 +1,13 @@
+#
+# Makefile for drivers/scsi/arm
+#
+
+acornscsi_mod-objs := acornscsi.o acornscsi-io.o
+
+obj-$(CONFIG_SCSI_ACORNSCSI_3) += acornscsi_mod.o queue.o msgqueue.o
+obj-$(CONFIG_SCSI_ARXESCSI) += arxescsi.o fas216.o queue.o msgqueue.o
+obj-$(CONFIG_SCSI_CUMANA_1) += cumana_1.o
+obj-$(CONFIG_SCSI_CUMANA_2) += cumana_2.o fas216.o queue.o msgqueue.o
+obj-$(CONFIG_SCSI_OAK1) += oak.o
+obj-$(CONFIG_SCSI_POWERTECSCSI) += powertec.o fas216.o queue.o msgqueue.o
+obj-$(CONFIG_SCSI_EESOXSCSI) += eesox.o fas216.o queue.o msgqueue.o
diff --git a/drivers/scsi/arm/acornscsi-io.S b/drivers/scsi/arm/acornscsi-io.S
new file mode 100644
index 000000000..22171b211
--- /dev/null
+++ b/drivers/scsi/arm/acornscsi-io.S
@@ -0,0 +1,138 @@
+/*
+ * linux/drivers/acorn/scsi/acornscsi-io.S: Acorn SCSI card IO
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#include <linux/linkage.h>
+
+#include <asm/assembler.h>
+#include <mach/hardware.h>
+
+#if defined(__APCS_32__)
+#define LOADREGS(t,r,l...) ldm##t r, l
+#elif defined(__APCS_26__)
+#define LOADREGS(t,r,l...) ldm##t r, l##^
+#endif
+
+@ Purpose: transfer a block of data from the acorn scsi card to memory
+@ Proto : void acornscsi_in(unsigned int addr_start, char *buffer, int length)
+@ Returns: nothing
+
+ .align
+ENTRY(__acornscsi_in)
+ stmfd sp!, {r4 - r7, lr}
+ bic r0, r0, #3
+ mov lr, #0xff
+ orr lr, lr, #0xff00
+acornscsi_in16lp:
+ subs r2, r2, #16
+ bmi acornscsi_in8
+ ldmia r0!, {r3, r4, r5, r6}
+ and r3, r3, lr
+ orr r3, r3, r4, lsl #16
+ and r4, r5, lr
+ orr r4, r4, r6, lsl #16
+ ldmia r0!, {r5, r6, r7, ip}
+ and r5, r5, lr
+ orr r5, r5, r6, lsl #16
+ and r6, r7, lr
+ orr r6, r6, ip, lsl #16
+ stmia r1!, {r3 - r6}
+ bne acornscsi_in16lp
+ LOADREGS(fd, sp!, {r4 - r7, pc})
+
+acornscsi_in8: adds r2, r2, #8
+ bmi acornscsi_in4
+ ldmia r0!, {r3, r4, r5, r6}
+ and r3, r3, lr
+ orr r3, r3, r4, lsl #16
+ and r4, r5, lr
+ orr r4, r4, r6, lsl #16
+ stmia r1!, {r3 - r4}
+ LOADREGS(eqfd, sp!, {r4 - r7, pc})
+ sub r2, r2, #8
+
+acornscsi_in4: adds r2, r2, #4
+ bmi acornscsi_in2
+ ldmia r0!, {r3, r4}
+ and r3, r3, lr
+ orr r3, r3, r4, lsl #16
+ str r3, [r1], #4
+ LOADREGS(eqfd, sp!, {r4 - r7, pc})
+ sub r2, r2, #4
+
+acornscsi_in2: adds r2, r2, #2
+ ldr r3, [r0], #4
+ and r3, r3, lr
+ strb r3, [r1], #1
+ mov r3, r3, lsr #8
+ strplb r3, [r1], #1
+ LOADREGS(fd, sp!, {r4 - r7, pc})
+
+@ Purpose: transfer a block of data from memory to the acorn scsi card
+@ Proto : void acornscsi_in(unsigned int addr_start, char *buffer, int length)
+@ Returns: nothing
+
+ENTRY(__acornscsi_out)
+ stmfd sp!, {r4 - r6, lr}
+ bic r0, r0, #3
+acornscsi_out16lp:
+ subs r2, r2, #16
+ bmi acornscsi_out8
+ ldmia r1!, {r4, r6, ip, lr}
+ mov r3, r4, lsl #16
+ orr r3, r3, r3, lsr #16
+ mov r4, r4, lsr #16
+ orr r4, r4, r4, lsl #16
+ mov r5, r6, lsl #16
+ orr r5, r5, r5, lsr #16
+ mov r6, r6, lsr #16
+ orr r6, r6, r6, lsl #16
+ stmia r0!, {r3, r4, r5, r6}
+ mov r3, ip, lsl #16
+ orr r3, r3, r3, lsr #16
+ mov r4, ip, lsr #16
+ orr r4, r4, r4, lsl #16
+ mov ip, lr, lsl #16
+ orr ip, ip, ip, lsr #16
+ mov lr, lr, lsr #16
+ orr lr, lr, lr, lsl #16
+ stmia r0!, {r3, r4, ip, lr}
+ bne acornscsi_out16lp
+ LOADREGS(fd, sp!, {r4 - r6, pc})
+
+acornscsi_out8: adds r2, r2, #8
+ bmi acornscsi_out4
+ ldmia r1!, {r4, r6}
+ mov r3, r4, lsl #16
+ orr r3, r3, r3, lsr #16
+ mov r4, r4, lsr #16
+ orr r4, r4, r4, lsl #16
+ mov r5, r6, lsl #16
+ orr r5, r5, r5, lsr #16
+ mov r6, r6, lsr #16
+ orr r6, r6, r6, lsl #16
+ stmia r0!, {r3, r4, r5, r6}
+ LOADREGS(eqfd, sp!, {r4 - r6, pc})
+
+ sub r2, r2, #8
+acornscsi_out4: adds r2, r2, #4
+ bmi acornscsi_out2
+ ldr r4, [r1], #4
+ mov r3, r4, lsl #16
+ orr r3, r3, r3, lsr #16
+ mov r4, r4, lsr #16
+ orr r4, r4, r4, lsl #16
+ stmia r0!, {r3, r4}
+ LOADREGS(eqfd, sp!, {r4 - r6, pc})
+
+ sub r2, r2, #4
+acornscsi_out2: adds r2, r2, #2
+ ldr r3, [r1], #2
+ strb r3, [r0], #1
+ mov r3, r3, lsr #8
+ strplb r3, [r0], #1
+ LOADREGS(fd, sp!, {r4 - r6, pc})
+
diff --git a/drivers/scsi/arm/acornscsi.c b/drivers/scsi/arm/acornscsi.c
new file mode 100644
index 000000000..deaaf8498
--- /dev/null
+++ b/drivers/scsi/arm/acornscsi.c
@@ -0,0 +1,3014 @@
+/*
+ * linux/drivers/acorn/scsi/acornscsi.c
+ *
+ * Acorn SCSI 3 driver
+ * By R.M.King.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * Abandoned using the Select and Transfer command since there were
+ * some nasty races between our software and the target devices that
+ * were not easy to solve, and the device errata had a lot of entries
+ * for this command, some of them quite nasty...
+ *
+ * Changelog:
+ * 26-Sep-1997 RMK Re-jigged to use the queue module.
+ * Re-coded state machine to be based on driver
+ * state not scsi state. Should be easier to debug.
+ * Added acornscsi_release to clean up properly.
+ * Updated proc/scsi reporting.
+ * 05-Oct-1997 RMK Implemented writing to SCSI devices.
+ * 06-Oct-1997 RMK Corrected small (non-serious) bug with the connect/
+ * reconnect race condition causing a warning message.
+ * 12-Oct-1997 RMK Added catch for re-entering interrupt routine.
+ * 15-Oct-1997 RMK Improved handling of commands.
+ * 27-Jun-1998 RMK Changed asm/delay.h to linux/delay.h.
+ * 13-Dec-1998 RMK Better abort code and command handling. Extra state
+ * transitions added to allow dodgy devices to work.
+ */
+#define DEBUG_NO_WRITE 1
+#define DEBUG_QUEUES 2
+#define DEBUG_DMA 4
+#define DEBUG_ABORT 8
+#define DEBUG_DISCON 16
+#define DEBUG_CONNECT 32
+#define DEBUG_PHASES 64
+#define DEBUG_WRITE 128
+#define DEBUG_LINK 256
+#define DEBUG_MESSAGES 512
+#define DEBUG_RESET 1024
+#define DEBUG_ALL (DEBUG_RESET|DEBUG_MESSAGES|DEBUG_LINK|DEBUG_WRITE|\
+ DEBUG_PHASES|DEBUG_CONNECT|DEBUG_DISCON|DEBUG_ABORT|\
+ DEBUG_DMA|DEBUG_QUEUES)
+
+/* DRIVER CONFIGURATION
+ *
+ * SCSI-II Tagged queue support.
+ *
+ * I don't have any SCSI devices that support it, so it is totally untested
+ * (except to make sure that it doesn't interfere with any non-tagging
+ * devices). It is not fully implemented either - what happens when a
+ * tagging device reconnects???
+ *
+ * You can tell if you have a device that supports tagged queueing my
+ * cating (eg) /proc/scsi/acornscsi/0 and see if the SCSI revision is reported
+ * as '2 TAG'.
+ *
+ * Also note that CONFIG_SCSI_ACORNSCSI_TAGGED_QUEUE is normally set in the config
+ * scripts, but disabled here. Once debugged, remove the #undef, otherwise to debug,
+ * comment out the undef.
+ */
+#undef CONFIG_SCSI_ACORNSCSI_TAGGED_QUEUE
+/*
+ * SCSI-II Synchronous transfer support.
+ *
+ * Tried and tested...
+ *
+ * SDTR_SIZE - maximum number of un-acknowledged bytes (0 = off, 12 = max)
+ * SDTR_PERIOD - period of REQ signal (min=125, max=1020)
+ * DEFAULT_PERIOD - default REQ period.
+ */
+#define SDTR_SIZE 12
+#define SDTR_PERIOD 125
+#define DEFAULT_PERIOD 500
+
+/*
+ * Debugging information
+ *
+ * DEBUG - bit mask from list above
+ * DEBUG_TARGET - is defined to the target number if you want to debug
+ * a specific target. [only recon/write/dma].
+ */
+#define DEBUG (DEBUG_RESET|DEBUG_WRITE|DEBUG_NO_WRITE)
+/* only allow writing to SCSI device 0 */
+#define NO_WRITE 0xFE
+/*#define DEBUG_TARGET 2*/
+/*
+ * Select timeout time (in 10ms units)
+ *
+ * This is the timeout used between the start of selection and the WD33C93
+ * chip deciding that the device isn't responding.
+ */
+#define TIMEOUT_TIME 10
+/*
+ * Define this if you want to have verbose explanation of SCSI
+ * status/messages.
+ */
+#undef CONFIG_ACORNSCSI_CONSTANTS
+/*
+ * Define this if you want to use the on board DMAC [don't remove this option]
+ * If not set, then use PIO mode (not currently supported).
+ */
+#define USE_DMAC
+
+/*
+ * ====================================================================================
+ */
+
+#ifdef DEBUG_TARGET
+#define DBG(cmd,xxx...) \
+ if (cmd->device->id == DEBUG_TARGET) { \
+ xxx; \
+ }
+#else
+#define DBG(cmd,xxx...) xxx
+#endif
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/string.h>
+#include <linux/signal.h>
+#include <linux/errno.h>
+#include <linux/proc_fs.h>
+#include <linux/ioport.h>
+#include <linux/blkdev.h>
+#include <linux/delay.h>
+#include <linux/interrupt.h>
+#include <linux/init.h>
+#include <linux/bitops.h>
+#include <linux/stringify.h>
+#include <linux/io.h>
+
+#include <asm/ecard.h>
+
+#include "../scsi.h"
+#include <scsi/scsi_dbg.h>
+#include <scsi/scsi_host.h>
+#include <scsi/scsi_transport_spi.h>
+#include "acornscsi.h"
+#include "msgqueue.h"
+#include "scsi.h"
+
+#include <scsi/scsicam.h>
+
+#define VER_MAJOR 2
+#define VER_MINOR 0
+#define VER_PATCH 6
+
+#ifndef ABORT_TAG
+#define ABORT_TAG 0xd
+#else
+#error "Yippee! ABORT TAG is now defined! Remove this error!"
+#endif
+
+#ifdef USE_DMAC
+/*
+ * DMAC setup parameters
+ */
+#define INIT_DEVCON0 (DEVCON0_RQL|DEVCON0_EXW|DEVCON0_CMP)
+#define INIT_DEVCON1 (DEVCON1_BHLD)
+#define DMAC_READ (MODECON_READ)
+#define DMAC_WRITE (MODECON_WRITE)
+#define INIT_SBICDMA (CTRL_DMABURST)
+
+#define scsi_xferred have_data_in
+
+/*
+ * Size of on-board DMA buffer
+ */
+#define DMAC_BUFFER_SIZE 65536
+#endif
+
+#define STATUS_BUFFER_TO_PRINT 24
+
+unsigned int sdtr_period = SDTR_PERIOD;
+unsigned int sdtr_size = SDTR_SIZE;
+
+static void acornscsi_done(AS_Host *host, struct scsi_cmnd **SCpntp,
+ unsigned int result);
+static int acornscsi_reconnect_finish(AS_Host *host);
+static void acornscsi_dma_cleanup(AS_Host *host);
+static void acornscsi_abortcmd(AS_Host *host, unsigned char tag);
+
+/* ====================================================================================
+ * Miscellaneous
+ */
+
+/* Offsets from MEMC base */
+#define SBIC_REGIDX 0x2000
+#define SBIC_REGVAL 0x2004
+#define DMAC_OFFSET 0x3000
+
+/* Offsets from FAST IOC base */
+#define INT_REG 0x2000
+#define PAGE_REG 0x3000
+
+static inline void sbic_arm_write(AS_Host *host, unsigned int reg, unsigned int value)
+{
+ writeb(reg, host->base + SBIC_REGIDX);
+ writeb(value, host->base + SBIC_REGVAL);
+}
+
+static inline int sbic_arm_read(AS_Host *host, unsigned int reg)
+{
+ if(reg == SBIC_ASR)
+ return readl(host->base + SBIC_REGIDX) & 255;
+ writeb(reg, host->base + SBIC_REGIDX);
+ return readl(host->base + SBIC_REGVAL) & 255;
+}
+
+#define sbic_arm_writenext(host, val) writeb((val), (host)->base + SBIC_REGVAL)
+#define sbic_arm_readnext(host) readb((host)->base + SBIC_REGVAL)
+
+#ifdef USE_DMAC
+#define dmac_read(host,reg) \
+ readb((host)->base + DMAC_OFFSET + ((reg) << 2))
+
+#define dmac_write(host,reg,value) \
+ ({ writeb((value), (host)->base + DMAC_OFFSET + ((reg) << 2)); })
+
+#define dmac_clearintr(host) writeb(0, (host)->fast + INT_REG)
+
+static inline unsigned int dmac_address(AS_Host *host)
+{
+ return dmac_read(host, DMAC_TXADRHI) << 16 |
+ dmac_read(host, DMAC_TXADRMD) << 8 |
+ dmac_read(host, DMAC_TXADRLO);
+}
+
+static
+void acornscsi_dumpdma(AS_Host *host, char *where)
+{
+ unsigned int mode, addr, len;
+
+ mode = dmac_read(host, DMAC_MODECON);
+ addr = dmac_address(host);
+ len = dmac_read(host, DMAC_TXCNTHI) << 8 |
+ dmac_read(host, DMAC_TXCNTLO);
+
+ printk("scsi%d: %s: DMAC %02x @%06x+%04x msk %02x, ",
+ host->host->host_no, where,
+ mode, addr, (len + 1) & 0xffff,
+ dmac_read(host, DMAC_MASKREG));
+
+ printk("DMA @%06x, ", host->dma.start_addr);
+ printk("BH @%p +%04x, ", host->scsi.SCp.ptr,
+ host->scsi.SCp.this_residual);
+ printk("DT @+%04x ST @+%04x", host->dma.transferred,
+ host->scsi.SCp.scsi_xferred);
+ printk("\n");
+}
+#endif
+
+static
+unsigned long acornscsi_sbic_xfcount(AS_Host *host)
+{
+ unsigned long length;
+
+ length = sbic_arm_read(host, SBIC_TRANSCNTH) << 16;
+ length |= sbic_arm_readnext(host) << 8;
+ length |= sbic_arm_readnext(host);
+
+ return length;
+}
+
+static int
+acornscsi_sbic_wait(AS_Host *host, int stat_mask, int stat, int timeout, char *msg)
+{
+ int asr;
+
+ do {
+ asr = sbic_arm_read(host, SBIC_ASR);
+
+ if ((asr & stat_mask) == stat)
+ return 0;
+
+ udelay(1);
+ } while (--timeout);
+
+ printk("scsi%d: timeout while %s\n", host->host->host_no, msg);
+
+ return -1;
+}
+
+static
+int acornscsi_sbic_issuecmd(AS_Host *host, int command)
+{
+ if (acornscsi_sbic_wait(host, ASR_CIP, 0, 1000, "issuing command"))
+ return -1;
+
+ sbic_arm_write(host, SBIC_CMND, command);
+
+ return 0;
+}
+
+static void
+acornscsi_csdelay(unsigned int cs)
+{
+ unsigned long target_jiffies, flags;
+
+ target_jiffies = jiffies + 1 + cs * HZ / 100;
+
+ local_save_flags(flags);
+ local_irq_enable();
+
+ while (time_before(jiffies, target_jiffies)) barrier();
+
+ local_irq_restore(flags);
+}
+
+static
+void acornscsi_resetcard(AS_Host *host)
+{
+ unsigned int i, timeout;
+
+ /* assert reset line */
+ host->card.page_reg = 0x80;
+ writeb(host->card.page_reg, host->fast + PAGE_REG);
+
+ /* wait 3 cs. SCSI standard says 25ms. */
+ acornscsi_csdelay(3);
+
+ host->card.page_reg = 0;
+ writeb(host->card.page_reg, host->fast + PAGE_REG);
+
+ /*
+ * Should get a reset from the card
+ */
+ timeout = 1000;
+ do {
+ if (readb(host->fast + INT_REG) & 8)
+ break;
+ udelay(1);
+ } while (--timeout);
+
+ if (timeout == 0)
+ printk("scsi%d: timeout while resetting card\n",
+ host->host->host_no);
+
+ sbic_arm_read(host, SBIC_ASR);
+ sbic_arm_read(host, SBIC_SSR);
+
+ /* setup sbic - WD33C93A */
+ sbic_arm_write(host, SBIC_OWNID, OWNID_EAF | host->host->this_id);
+ sbic_arm_write(host, SBIC_CMND, CMND_RESET);
+
+ /*
+ * Command should cause a reset interrupt
+ */
+ timeout = 1000;
+ do {
+ if (readb(host->fast + INT_REG) & 8)
+ break;
+ udelay(1);
+ } while (--timeout);
+
+ if (timeout == 0)
+ printk("scsi%d: timeout while resetting card\n",
+ host->host->host_no);
+
+ sbic_arm_read(host, SBIC_ASR);
+ if (sbic_arm_read(host, SBIC_SSR) != 0x01)
+ printk(KERN_CRIT "scsi%d: WD33C93A didn't give enhanced reset interrupt\n",
+ host->host->host_no);
+
+ sbic_arm_write(host, SBIC_CTRL, INIT_SBICDMA | CTRL_IDI);
+ sbic_arm_write(host, SBIC_TIMEOUT, TIMEOUT_TIME);
+ sbic_arm_write(host, SBIC_SYNCHTRANSFER, SYNCHTRANSFER_2DBA);
+ sbic_arm_write(host, SBIC_SOURCEID, SOURCEID_ER | SOURCEID_DSP);
+
+ host->card.page_reg = 0x40;
+ writeb(host->card.page_reg, host->fast + PAGE_REG);
+
+ /* setup dmac - uPC71071 */
+ dmac_write(host, DMAC_INIT, 0);
+#ifdef USE_DMAC
+ dmac_write(host, DMAC_INIT, INIT_8BIT);
+ dmac_write(host, DMAC_CHANNEL, CHANNEL_0);
+ dmac_write(host, DMAC_DEVCON0, INIT_DEVCON0);
+ dmac_write(host, DMAC_DEVCON1, INIT_DEVCON1);
+#endif
+
+ host->SCpnt = NULL;
+ host->scsi.phase = PHASE_IDLE;
+ host->scsi.disconnectable = 0;
+
+ memset(host->busyluns, 0, sizeof(host->busyluns));
+
+ for (i = 0; i < 8; i++) {
+ host->device[i].sync_state = SYNC_NEGOCIATE;
+ host->device[i].disconnect_ok = 1;
+ }
+
+ /* wait 25 cs. SCSI standard says 250ms. */
+ acornscsi_csdelay(25);
+}
+
+/*=============================================================================================
+ * Utility routines (eg. debug)
+ */
+#ifdef CONFIG_ACORNSCSI_CONSTANTS
+static char *acornscsi_interrupttype[] = {
+ "rst", "suc", "p/a", "3",
+ "term", "5", "6", "7",
+ "serv", "9", "a", "b",
+ "c", "d", "e", "f"
+};
+
+static signed char acornscsi_map[] = {
+ 0, 1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
+ -1, 2, -1, -1, -1, -1, 3, -1, 4, 5, 6, 7, 8, 9, 10, 11,
+ 12, 13, 14, -1, -1, -1, -1, -1, 4, 5, 6, 7, 8, 9, 10, 11,
+ -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
+ 15, 16, 17, 18, 19, -1, -1, 20, 4, 5, 6, 7, 8, 9, 10, 11,
+ -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
+ -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
+ -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
+ 21, 22, -1, -1, -1, 23, -1, -1, 4, 5, 6, 7, 8, 9, 10, 11,
+ -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
+ -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
+ -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
+ -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
+ -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
+ -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
+ -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1
+};
+
+static char *acornscsi_interruptcode[] = {
+ /* 0 */
+ "reset - normal mode", /* 00 */
+ "reset - advanced mode", /* 01 */
+
+ /* 2 */
+ "sel", /* 11 */
+ "sel+xfer", /* 16 */
+ "data-out", /* 18 */
+ "data-in", /* 19 */
+ "cmd", /* 1A */
+ "stat", /* 1B */
+ "??-out", /* 1C */
+ "??-in", /* 1D */
+ "msg-out", /* 1E */
+ "msg-in", /* 1F */
+
+ /* 12 */
+ "/ACK asserted", /* 20 */
+ "save-data-ptr", /* 21 */
+ "{re}sel", /* 22 */
+
+ /* 15 */
+ "inv cmd", /* 40 */
+ "unexpected disconnect", /* 41 */
+ "sel timeout", /* 42 */
+ "P err", /* 43 */
+ "P err+ATN", /* 44 */
+ "bad status byte", /* 47 */
+
+ /* 21 */
+ "resel, no id", /* 80 */
+ "resel", /* 81 */
+ "discon", /* 85 */
+};
+
+static
+void print_scsi_status(unsigned int ssr)
+{
+ if (acornscsi_map[ssr] != -1)
+ printk("%s:%s",
+ acornscsi_interrupttype[(ssr >> 4)],
+ acornscsi_interruptcode[acornscsi_map[ssr]]);
+ else
+ printk("%X:%X", ssr >> 4, ssr & 0x0f);
+}
+#endif
+
+static
+void print_sbic_status(int asr, int ssr, int cmdphase)
+{
+#ifdef CONFIG_ACORNSCSI_CONSTANTS
+ printk("sbic: %c%c%c%c%c%c ",
+ asr & ASR_INT ? 'I' : 'i',
+ asr & ASR_LCI ? 'L' : 'l',
+ asr & ASR_BSY ? 'B' : 'b',
+ asr & ASR_CIP ? 'C' : 'c',
+ asr & ASR_PE ? 'P' : 'p',
+ asr & ASR_DBR ? 'D' : 'd');
+ printk("scsi: ");
+ print_scsi_status(ssr);
+ printk(" ph %02X\n", cmdphase);
+#else
+ printk("sbic: %02X scsi: %X:%X ph: %02X\n",
+ asr, (ssr & 0xf0)>>4, ssr & 0x0f, cmdphase);
+#endif
+}
+
+static void
+acornscsi_dumplogline(AS_Host *host, int target, int line)
+{
+ unsigned long prev;
+ signed int ptr;
+
+ ptr = host->status_ptr[target] - STATUS_BUFFER_TO_PRINT;
+ if (ptr < 0)
+ ptr += STATUS_BUFFER_SIZE;
+
+ printk("%c: %3s:", target == 8 ? 'H' : '0' + target,
+ line == 0 ? "ph" : line == 1 ? "ssr" : "int");
+
+ prev = host->status[target][ptr].when;
+
+ for (; ptr != host->status_ptr[target]; ptr = (ptr + 1) & (STATUS_BUFFER_SIZE - 1)) {
+ unsigned long time_diff;
+
+ if (!host->status[target][ptr].when)
+ continue;
+
+ switch (line) {
+ case 0:
+ printk("%c%02X", host->status[target][ptr].irq ? '-' : ' ',
+ host->status[target][ptr].ph);
+ break;
+
+ case 1:
+ printk(" %02X", host->status[target][ptr].ssr);
+ break;
+
+ case 2:
+ time_diff = host->status[target][ptr].when - prev;
+ prev = host->status[target][ptr].when;
+ if (time_diff == 0)
+ printk("==^");
+ else if (time_diff >= 100)
+ printk(" ");
+ else
+ printk(" %02ld", time_diff);
+ break;
+ }
+ }
+
+ printk("\n");
+}
+
+static
+void acornscsi_dumplog(AS_Host *host, int target)
+{
+ do {
+ acornscsi_dumplogline(host, target, 0);
+ acornscsi_dumplogline(host, target, 1);
+ acornscsi_dumplogline(host, target, 2);
+
+ if (target == 8)
+ break;
+
+ target = 8;
+ } while (1);
+}
+
+static
+char acornscsi_target(AS_Host *host)
+{
+ if (host->SCpnt)
+ return '0' + host->SCpnt->device->id;
+ return 'H';
+}
+
+/*
+ * Prototype: cmdtype_t acornscsi_cmdtype(int command)
+ * Purpose : differentiate READ from WRITE from other commands
+ * Params : command - command to interpret
+ * Returns : CMD_READ - command reads data,
+ * CMD_WRITE - command writes data,
+ * CMD_MISC - everything else
+ */
+static inline
+cmdtype_t acornscsi_cmdtype(int command)
+{
+ switch (command) {
+ case WRITE_6: case WRITE_10: case WRITE_12:
+ return CMD_WRITE;
+ case READ_6: case READ_10: case READ_12:
+ return CMD_READ;
+ default:
+ return CMD_MISC;
+ }
+}
+
+/*
+ * Prototype: int acornscsi_datadirection(int command)
+ * Purpose : differentiate between commands that have a DATA IN phase
+ * and a DATA OUT phase
+ * Params : command - command to interpret
+ * Returns : DATADIR_OUT - data out phase expected
+ * DATADIR_IN - data in phase expected
+ */
+static
+datadir_t acornscsi_datadirection(int command)
+{
+ switch (command) {
+ case CHANGE_DEFINITION: case COMPARE: case COPY:
+ case COPY_VERIFY: case LOG_SELECT: case MODE_SELECT:
+ case MODE_SELECT_10: case SEND_DIAGNOSTIC: case WRITE_BUFFER:
+ case FORMAT_UNIT: case REASSIGN_BLOCKS: case RESERVE:
+ case SEARCH_EQUAL: case SEARCH_HIGH: case SEARCH_LOW:
+ case WRITE_6: case WRITE_10: case WRITE_VERIFY:
+ case UPDATE_BLOCK: case WRITE_LONG: case WRITE_SAME:
+ case SEARCH_HIGH_12: case SEARCH_EQUAL_12: case SEARCH_LOW_12:
+ case WRITE_12: case WRITE_VERIFY_12: case SET_WINDOW:
+ case MEDIUM_SCAN: case SEND_VOLUME_TAG: case 0xea:
+ return DATADIR_OUT;
+ default:
+ return DATADIR_IN;
+ }
+}
+
+/*
+ * Purpose : provide values for synchronous transfers with 33C93.
+ * Copyright: Copyright (c) 1996 John Shifflett, GeoLog Consulting
+ * Modified by Russell King for 8MHz WD33C93A
+ */
+static struct sync_xfer_tbl {
+ unsigned int period_ns;
+ unsigned char reg_value;
+} sync_xfer_table[] = {
+ { 1, 0x20 }, { 249, 0x20 }, { 374, 0x30 },
+ { 499, 0x40 }, { 624, 0x50 }, { 749, 0x60 },
+ { 874, 0x70 }, { 999, 0x00 }, { 0, 0 }
+};
+
+/*
+ * Prototype: int acornscsi_getperiod(unsigned char syncxfer)
+ * Purpose : period for the synchronous transfer setting
+ * Params : syncxfer SYNCXFER register value
+ * Returns : period in ns.
+ */
+static
+int acornscsi_getperiod(unsigned char syncxfer)
+{
+ int i;
+
+ syncxfer &= 0xf0;
+ if (syncxfer == 0x10)
+ syncxfer = 0;
+
+ for (i = 1; sync_xfer_table[i].period_ns; i++)
+ if (syncxfer == sync_xfer_table[i].reg_value)
+ return sync_xfer_table[i].period_ns;
+ return 0;
+}
+
+/*
+ * Prototype: int round_period(unsigned int period)
+ * Purpose : return index into above table for a required REQ period
+ * Params : period - time (ns) for REQ
+ * Returns : table index
+ * Copyright: Copyright (c) 1996 John Shifflett, GeoLog Consulting
+ */
+static inline
+int round_period(unsigned int period)
+{
+ int i;
+
+ for (i = 1; sync_xfer_table[i].period_ns; i++) {
+ if ((period <= sync_xfer_table[i].period_ns) &&
+ (period > sync_xfer_table[i - 1].period_ns))
+ return i;
+ }
+ return 7;
+}
+
+/*
+ * Prototype: unsigned char calc_sync_xfer(unsigned int period, unsigned int offset)
+ * Purpose : calculate value for 33c93s SYNC register
+ * Params : period - time (ns) for REQ
+ * offset - offset in bytes between REQ/ACK
+ * Returns : value for SYNC register
+ * Copyright: Copyright (c) 1996 John Shifflett, GeoLog Consulting
+ */
+static
+unsigned char calc_sync_xfer(unsigned int period, unsigned int offset)
+{
+ return sync_xfer_table[round_period(period)].reg_value |
+ ((offset < SDTR_SIZE) ? offset : SDTR_SIZE);
+}
+
+/* ====================================================================================
+ * Command functions
+ */
+/*
+ * Function: acornscsi_kick(AS_Host *host)
+ * Purpose : kick next command to interface
+ * Params : host - host to send command to
+ * Returns : INTR_IDLE if idle, otherwise INTR_PROCESSING
+ * Notes : interrupts are always disabled!
+ */
+static
+intr_ret_t acornscsi_kick(AS_Host *host)
+{
+ int from_queue = 0;
+ struct scsi_cmnd *SCpnt;
+
+ /* first check to see if a command is waiting to be executed */
+ SCpnt = host->origSCpnt;
+ host->origSCpnt = NULL;
+
+ /* retrieve next command */
+ if (!SCpnt) {
+ SCpnt = queue_remove_exclude(&host->queues.issue, host->busyluns);
+ if (!SCpnt)
+ return INTR_IDLE;
+
+ from_queue = 1;
+ }
+
+ if (host->scsi.disconnectable && host->SCpnt) {
+ queue_add_cmd_tail(&host->queues.disconnected, host->SCpnt);
+ host->scsi.disconnectable = 0;
+#if (DEBUG & (DEBUG_QUEUES|DEBUG_DISCON))
+ DBG(host->SCpnt, printk("scsi%d.%c: moved command to disconnected queue\n",
+ host->host->host_no, acornscsi_target(host)));
+#endif
+ host->SCpnt = NULL;
+ }
+
+ /*
+ * If we have an interrupt pending, then we may have been reselected.
+ * In this case, we don't want to write to the registers
+ */
+ if (!(sbic_arm_read(host, SBIC_ASR) & (ASR_INT|ASR_BSY|ASR_CIP))) {
+ sbic_arm_write(host, SBIC_DESTID, SCpnt->device->id);
+ sbic_arm_write(host, SBIC_CMND, CMND_SELWITHATN);
+ }
+
+ /*
+ * claim host busy - all of these must happen atomically wrt
+ * our interrupt routine. Failure means command loss.
+ */
+ host->scsi.phase = PHASE_CONNECTING;
+ host->SCpnt = SCpnt;
+ host->scsi.SCp = SCpnt->SCp;
+ host->dma.xfer_setup = 0;
+ host->dma.xfer_required = 0;
+ host->dma.xfer_done = 0;
+
+#if (DEBUG & (DEBUG_ABORT|DEBUG_CONNECT))
+ DBG(SCpnt,printk("scsi%d.%c: starting cmd %02X\n",
+ host->host->host_no, '0' + SCpnt->device->id,
+ SCpnt->cmnd[0]));
+#endif
+
+ if (from_queue) {
+#ifdef CONFIG_SCSI_ACORNSCSI_TAGGED_QUEUE
+ /*
+ * tagged queueing - allocate a new tag to this command
+ */
+ if (SCpnt->device->simple_tags) {
+ SCpnt->device->current_tag += 1;
+ if (SCpnt->device->current_tag == 0)
+ SCpnt->device->current_tag = 1;
+ SCpnt->tag = SCpnt->device->current_tag;
+ } else
+#endif
+ set_bit(SCpnt->device->id * 8 +
+ (u8)(SCpnt->device->lun & 0x07), host->busyluns);
+
+ host->stats.removes += 1;
+
+ switch (acornscsi_cmdtype(SCpnt->cmnd[0])) {
+ case CMD_WRITE:
+ host->stats.writes += 1;
+ break;
+ case CMD_READ:
+ host->stats.reads += 1;
+ break;
+ case CMD_MISC:
+ host->stats.miscs += 1;
+ break;
+ }
+ }
+
+ return INTR_PROCESSING;
+}
+
+/*
+ * Function: void acornscsi_done(AS_Host *host, struct scsi_cmnd **SCpntp, unsigned int result)
+ * Purpose : complete processing for command
+ * Params : host - interface that completed
+ * result - driver byte of result
+ */
+static void acornscsi_done(AS_Host *host, struct scsi_cmnd **SCpntp,
+ unsigned int result)
+{
+ struct scsi_cmnd *SCpnt = *SCpntp;
+
+ /* clean up */
+ sbic_arm_write(host, SBIC_SOURCEID, SOURCEID_ER | SOURCEID_DSP);
+
+ host->stats.fins += 1;
+
+ if (SCpnt) {
+ *SCpntp = NULL;
+
+ acornscsi_dma_cleanup(host);
+
+ SCpnt->result = result << 16 | host->scsi.SCp.Message << 8 | host->scsi.SCp.Status;
+
+ /*
+ * In theory, this should not happen. In practice, it seems to.
+ * Only trigger an error if the device attempts to report all happy
+ * but with untransferred buffers... If we don't do something, then
+ * data loss will occur. Should we check SCpnt->underflow here?
+ * It doesn't appear to be set to something meaningful by the higher
+ * levels all the time.
+ */
+ if (result == DID_OK) {
+ int xfer_warn = 0;
+
+ if (SCpnt->underflow == 0) {
+ if (host->scsi.SCp.ptr &&
+ acornscsi_cmdtype(SCpnt->cmnd[0]) != CMD_MISC)
+ xfer_warn = 1;
+ } else {
+ if (host->scsi.SCp.scsi_xferred < SCpnt->underflow ||
+ host->scsi.SCp.scsi_xferred != host->dma.transferred)
+ xfer_warn = 1;
+ }
+
+ /* ANSI standard says: (SCSI-2 Rev 10c Sect 5.6.6)
+ * Targets which break data transfers into multiple
+ * connections shall end each successful connection
+ * (except possibly the last) with a SAVE DATA
+ * POINTER - DISCONNECT message sequence.
+ *
+ * This makes it difficult to ensure that a transfer has
+ * completed. If we reach the end of a transfer during
+ * the command, then we can only have finished the transfer.
+ * therefore, if we seem to have some data remaining, this
+ * is not a problem.
+ */
+ if (host->dma.xfer_done)
+ xfer_warn = 0;
+
+ if (xfer_warn) {
+ switch (status_byte(SCpnt->result)) {
+ case CHECK_CONDITION:
+ case COMMAND_TERMINATED:
+ case BUSY:
+ case QUEUE_FULL:
+ case RESERVATION_CONFLICT:
+ break;
+
+ default:
+ scmd_printk(KERN_ERR, SCpnt,
+ "incomplete data transfer detected: "
+ "result=%08X", SCpnt->result);
+ scsi_print_command(SCpnt);
+ acornscsi_dumpdma(host, "done");
+ acornscsi_dumplog(host, SCpnt->device->id);
+ set_host_byte(SCpnt, DID_ERROR);
+ }
+ }
+ }
+
+ if (!SCpnt->scsi_done)
+ panic("scsi%d.H: null scsi_done function in acornscsi_done", host->host->host_no);
+
+ clear_bit(SCpnt->device->id * 8 +
+ (u8)(SCpnt->device->lun & 0x7), host->busyluns);
+
+ SCpnt->scsi_done(SCpnt);
+ } else
+ printk("scsi%d: null command in acornscsi_done", host->host->host_no);
+
+ host->scsi.phase = PHASE_IDLE;
+}
+
+/* ====================================================================================
+ * DMA routines
+ */
+/*
+ * Purpose : update SCSI Data Pointer
+ * Notes : this will only be one SG entry or less
+ */
+static
+void acornscsi_data_updateptr(AS_Host *host, struct scsi_pointer *SCp, unsigned int length)
+{
+ SCp->ptr += length;
+ SCp->this_residual -= length;
+
+ if (SCp->this_residual == 0 && next_SCp(SCp) == 0)
+ host->dma.xfer_done = 1;
+}
+
+/*
+ * Prototype: void acornscsi_data_read(AS_Host *host, char *ptr,
+ * unsigned int start_addr, unsigned int length)
+ * Purpose : read data from DMA RAM
+ * Params : host - host to transfer from
+ * ptr - DRAM address
+ * start_addr - host mem address
+ * length - number of bytes to transfer
+ * Notes : this will only be one SG entry or less
+ */
+static
+void acornscsi_data_read(AS_Host *host, char *ptr,
+ unsigned int start_addr, unsigned int length)
+{
+ extern void __acornscsi_in(void __iomem *, char *buf, int len);
+ unsigned int page, offset, len = length;
+
+ page = (start_addr >> 12);
+ offset = start_addr & ((1 << 12) - 1);
+
+ writeb((page & 0x3f) | host->card.page_reg, host->fast + PAGE_REG);
+
+ while (len > 0) {
+ unsigned int this_len;
+
+ if (len + offset > (1 << 12))
+ this_len = (1 << 12) - offset;
+ else
+ this_len = len;
+
+ __acornscsi_in(host->base + (offset << 1), ptr, this_len);
+
+ offset += this_len;
+ ptr += this_len;
+ len -= this_len;
+
+ if (offset == (1 << 12)) {
+ offset = 0;
+ page ++;
+ writeb((page & 0x3f) | host->card.page_reg, host->fast + PAGE_REG);
+ }
+ }
+ writeb(host->card.page_reg, host->fast + PAGE_REG);
+}
+
+/*
+ * Prototype: void acornscsi_data_write(AS_Host *host, char *ptr,
+ * unsigned int start_addr, unsigned int length)
+ * Purpose : write data to DMA RAM
+ * Params : host - host to transfer from
+ * ptr - DRAM address
+ * start_addr - host mem address
+ * length - number of bytes to transfer
+ * Notes : this will only be one SG entry or less
+ */
+static
+void acornscsi_data_write(AS_Host *host, char *ptr,
+ unsigned int start_addr, unsigned int length)
+{
+ extern void __acornscsi_out(void __iomem *, char *buf, int len);
+ unsigned int page, offset, len = length;
+
+ page = (start_addr >> 12);
+ offset = start_addr & ((1 << 12) - 1);
+
+ writeb((page & 0x3f) | host->card.page_reg, host->fast + PAGE_REG);
+
+ while (len > 0) {
+ unsigned int this_len;
+
+ if (len + offset > (1 << 12))
+ this_len = (1 << 12) - offset;
+ else
+ this_len = len;
+
+ __acornscsi_out(host->base + (offset << 1), ptr, this_len);
+
+ offset += this_len;
+ ptr += this_len;
+ len -= this_len;
+
+ if (offset == (1 << 12)) {
+ offset = 0;
+ page ++;
+ writeb((page & 0x3f) | host->card.page_reg, host->fast + PAGE_REG);
+ }
+ }
+ writeb(host->card.page_reg, host->fast + PAGE_REG);
+}
+
+/* =========================================================================================
+ * On-board DMA routines
+ */
+#ifdef USE_DMAC
+/*
+ * Prototype: void acornscsi_dmastop(AS_Host *host)
+ * Purpose : stop all DMA
+ * Params : host - host on which to stop DMA
+ * Notes : This is called when leaving DATA IN/OUT phase,
+ * or when interface is RESET
+ */
+static inline
+void acornscsi_dma_stop(AS_Host *host)
+{
+ dmac_write(host, DMAC_MASKREG, MASK_ON);
+ dmac_clearintr(host);
+
+#if (DEBUG & DEBUG_DMA)
+ DBG(host->SCpnt, acornscsi_dumpdma(host, "stop"));
+#endif
+}
+
+/*
+ * Function: void acornscsi_dma_setup(AS_Host *host, dmadir_t direction)
+ * Purpose : setup DMA controller for data transfer
+ * Params : host - host to setup
+ * direction - data transfer direction
+ * Notes : This is called when entering DATA I/O phase, not
+ * while we're in a DATA I/O phase
+ */
+static
+void acornscsi_dma_setup(AS_Host *host, dmadir_t direction)
+{
+ unsigned int address, length, mode;
+
+ host->dma.direction = direction;
+
+ dmac_write(host, DMAC_MASKREG, MASK_ON);
+
+ if (direction == DMA_OUT) {
+#if (DEBUG & DEBUG_NO_WRITE)
+ if (NO_WRITE & (1 << host->SCpnt->device->id)) {
+ printk(KERN_CRIT "scsi%d.%c: I can't handle DMA_OUT!\n",
+ host->host->host_no, acornscsi_target(host));
+ return;
+ }
+#endif
+ mode = DMAC_WRITE;
+ } else
+ mode = DMAC_READ;
+
+ /*
+ * Allocate some buffer space, limited to half the buffer size
+ */
+ length = min_t(unsigned int, host->scsi.SCp.this_residual, DMAC_BUFFER_SIZE / 2);
+ if (length) {
+ host->dma.start_addr = address = host->dma.free_addr;
+ host->dma.free_addr = (host->dma.free_addr + length) &
+ (DMAC_BUFFER_SIZE - 1);
+
+ /*
+ * Transfer data to DMA memory
+ */
+ if (direction == DMA_OUT)
+ acornscsi_data_write(host, host->scsi.SCp.ptr, host->dma.start_addr,
+ length);
+
+ length -= 1;
+ dmac_write(host, DMAC_TXCNTLO, length);
+ dmac_write(host, DMAC_TXCNTHI, length >> 8);
+ dmac_write(host, DMAC_TXADRLO, address);
+ dmac_write(host, DMAC_TXADRMD, address >> 8);
+ dmac_write(host, DMAC_TXADRHI, 0);
+ dmac_write(host, DMAC_MODECON, mode);
+ dmac_write(host, DMAC_MASKREG, MASK_OFF);
+
+#if (DEBUG & DEBUG_DMA)
+ DBG(host->SCpnt, acornscsi_dumpdma(host, "strt"));
+#endif
+ host->dma.xfer_setup = 1;
+ }
+}
+
+/*
+ * Function: void acornscsi_dma_cleanup(AS_Host *host)
+ * Purpose : ensure that all DMA transfers are up-to-date & host->scsi.SCp is correct
+ * Params : host - host to finish
+ * Notes : This is called when a command is:
+ * terminating, RESTORE_POINTERS, SAVE_POINTERS, DISCONECT
+ * : This must not return until all transfers are completed.
+ */
+static
+void acornscsi_dma_cleanup(AS_Host *host)
+{
+ dmac_write(host, DMAC_MASKREG, MASK_ON);
+ dmac_clearintr(host);
+
+ /*
+ * Check for a pending transfer
+ */
+ if (host->dma.xfer_required) {
+ host->dma.xfer_required = 0;
+ if (host->dma.direction == DMA_IN)
+ acornscsi_data_read(host, host->dma.xfer_ptr,
+ host->dma.xfer_start, host->dma.xfer_length);
+ }
+
+ /*
+ * Has a transfer been setup?
+ */
+ if (host->dma.xfer_setup) {
+ unsigned int transferred;
+
+ host->dma.xfer_setup = 0;
+
+#if (DEBUG & DEBUG_DMA)
+ DBG(host->SCpnt, acornscsi_dumpdma(host, "cupi"));
+#endif
+
+ /*
+ * Calculate number of bytes transferred from DMA.
+ */
+ transferred = dmac_address(host) - host->dma.start_addr;
+ host->dma.transferred += transferred;
+
+ if (host->dma.direction == DMA_IN)
+ acornscsi_data_read(host, host->scsi.SCp.ptr,
+ host->dma.start_addr, transferred);
+
+ /*
+ * Update SCSI pointers
+ */
+ acornscsi_data_updateptr(host, &host->scsi.SCp, transferred);
+#if (DEBUG & DEBUG_DMA)
+ DBG(host->SCpnt, acornscsi_dumpdma(host, "cupo"));
+#endif
+ }
+}
+
+/*
+ * Function: void acornscsi_dmacintr(AS_Host *host)
+ * Purpose : handle interrupts from DMAC device
+ * Params : host - host to process
+ * Notes : If reading, we schedule the read to main memory &
+ * allow the transfer to continue.
+ * : If writing, we fill the onboard DMA memory from main
+ * memory.
+ * : Called whenever DMAC finished it's current transfer.
+ */
+static
+void acornscsi_dma_intr(AS_Host *host)
+{
+ unsigned int address, length, transferred;
+
+#if (DEBUG & DEBUG_DMA)
+ DBG(host->SCpnt, acornscsi_dumpdma(host, "inti"));
+#endif
+
+ dmac_write(host, DMAC_MASKREG, MASK_ON);
+ dmac_clearintr(host);
+
+ /*
+ * Calculate amount transferred via DMA
+ */
+ transferred = dmac_address(host) - host->dma.start_addr;
+ host->dma.transferred += transferred;
+
+ /*
+ * Schedule DMA transfer off board
+ */
+ if (host->dma.direction == DMA_IN) {
+ host->dma.xfer_start = host->dma.start_addr;
+ host->dma.xfer_length = transferred;
+ host->dma.xfer_ptr = host->scsi.SCp.ptr;
+ host->dma.xfer_required = 1;
+ }
+
+ acornscsi_data_updateptr(host, &host->scsi.SCp, transferred);
+
+ /*
+ * Allocate some buffer space, limited to half the on-board RAM size
+ */
+ length = min_t(unsigned int, host->scsi.SCp.this_residual, DMAC_BUFFER_SIZE / 2);
+ if (length) {
+ host->dma.start_addr = address = host->dma.free_addr;
+ host->dma.free_addr = (host->dma.free_addr + length) &
+ (DMAC_BUFFER_SIZE - 1);
+
+ /*
+ * Transfer data to DMA memory
+ */
+ if (host->dma.direction == DMA_OUT)
+ acornscsi_data_write(host, host->scsi.SCp.ptr, host->dma.start_addr,
+ length);
+
+ length -= 1;
+ dmac_write(host, DMAC_TXCNTLO, length);
+ dmac_write(host, DMAC_TXCNTHI, length >> 8);
+ dmac_write(host, DMAC_TXADRLO, address);
+ dmac_write(host, DMAC_TXADRMD, address >> 8);
+ dmac_write(host, DMAC_TXADRHI, 0);
+ dmac_write(host, DMAC_MASKREG, MASK_OFF);
+
+#if (DEBUG & DEBUG_DMA)
+ DBG(host->SCpnt, acornscsi_dumpdma(host, "into"));
+#endif
+ } else {
+ host->dma.xfer_setup = 0;
+#if 0
+ /*
+ * If the interface still wants more, then this is an error.
+ * We give it another byte, but we also attempt to raise an
+ * attention condition. We continue giving one byte until
+ * the device recognises the attention.
+ */
+ if (dmac_read(host, DMAC_STATUS) & STATUS_RQ0) {
+ acornscsi_abortcmd(host, host->SCpnt->tag);
+
+ dmac_write(host, DMAC_TXCNTLO, 0);
+ dmac_write(host, DMAC_TXCNTHI, 0);
+ dmac_write(host, DMAC_TXADRLO, 0);
+ dmac_write(host, DMAC_TXADRMD, 0);
+ dmac_write(host, DMAC_TXADRHI, 0);
+ dmac_write(host, DMAC_MASKREG, MASK_OFF);
+ }
+#endif
+ }
+}
+
+/*
+ * Function: void acornscsi_dma_xfer(AS_Host *host)
+ * Purpose : transfer data between AcornSCSI and memory
+ * Params : host - host to process
+ */
+static
+void acornscsi_dma_xfer(AS_Host *host)
+{
+ host->dma.xfer_required = 0;
+
+ if (host->dma.direction == DMA_IN)
+ acornscsi_data_read(host, host->dma.xfer_ptr,
+ host->dma.xfer_start, host->dma.xfer_length);
+}
+
+/*
+ * Function: void acornscsi_dma_adjust(AS_Host *host)
+ * Purpose : adjust DMA pointers & count for bytes transferred to
+ * SBIC but not SCSI bus.
+ * Params : host - host to adjust DMA count for
+ */
+static
+void acornscsi_dma_adjust(AS_Host *host)
+{
+ if (host->dma.xfer_setup) {
+ signed long transferred;
+#if (DEBUG & (DEBUG_DMA|DEBUG_WRITE))
+ DBG(host->SCpnt, acornscsi_dumpdma(host, "adji"));
+#endif
+ /*
+ * Calculate correct DMA address - DMA is ahead of SCSI bus while
+ * writing.
+ * host->scsi.SCp.scsi_xferred is the number of bytes
+ * actually transferred to/from the SCSI bus.
+ * host->dma.transferred is the number of bytes transferred
+ * over DMA since host->dma.start_addr was last set.
+ *
+ * real_dma_addr = host->dma.start_addr + host->scsi.SCp.scsi_xferred
+ * - host->dma.transferred
+ */
+ transferred = host->scsi.SCp.scsi_xferred - host->dma.transferred;
+ if (transferred < 0)
+ printk("scsi%d.%c: Ack! DMA write correction %ld < 0!\n",
+ host->host->host_no, acornscsi_target(host), transferred);
+ else if (transferred == 0)
+ host->dma.xfer_setup = 0;
+ else {
+ transferred += host->dma.start_addr;
+ dmac_write(host, DMAC_TXADRLO, transferred);
+ dmac_write(host, DMAC_TXADRMD, transferred >> 8);
+ dmac_write(host, DMAC_TXADRHI, transferred >> 16);
+#if (DEBUG & (DEBUG_DMA|DEBUG_WRITE))
+ DBG(host->SCpnt, acornscsi_dumpdma(host, "adjo"));
+#endif
+ }
+ }
+}
+#endif
+
+/* =========================================================================================
+ * Data I/O
+ */
+static int
+acornscsi_write_pio(AS_Host *host, char *bytes, int *ptr, int len, unsigned int max_timeout)
+{
+ unsigned int asr, timeout = max_timeout;
+ int my_ptr = *ptr;
+
+ while (my_ptr < len) {
+ asr = sbic_arm_read(host, SBIC_ASR);
+
+ if (asr & ASR_DBR) {
+ timeout = max_timeout;
+
+ sbic_arm_write(host, SBIC_DATA, bytes[my_ptr++]);
+ } else if (asr & ASR_INT)
+ break;
+ else if (--timeout == 0)
+ break;
+ udelay(1);
+ }
+
+ *ptr = my_ptr;
+
+ return (timeout == 0) ? -1 : 0;
+}
+
+/*
+ * Function: void acornscsi_sendcommand(AS_Host *host)
+ * Purpose : send a command to a target
+ * Params : host - host which is connected to target
+ */
+static void
+acornscsi_sendcommand(AS_Host *host)
+{
+ struct scsi_cmnd *SCpnt = host->SCpnt;
+
+ sbic_arm_write(host, SBIC_TRANSCNTH, 0);
+ sbic_arm_writenext(host, 0);
+ sbic_arm_writenext(host, SCpnt->cmd_len - host->scsi.SCp.sent_command);
+
+ acornscsi_sbic_issuecmd(host, CMND_XFERINFO);
+
+ if (acornscsi_write_pio(host, SCpnt->cmnd,
+ (int *)&host->scsi.SCp.sent_command, SCpnt->cmd_len, 1000000))
+ printk("scsi%d: timeout while sending command\n", host->host->host_no);
+
+ host->scsi.phase = PHASE_COMMAND;
+}
+
+static
+void acornscsi_sendmessage(AS_Host *host)
+{
+ unsigned int message_length = msgqueue_msglength(&host->scsi.msgs);
+ unsigned int msgnr;
+ struct message *msg;
+
+#if (DEBUG & DEBUG_MESSAGES)
+ printk("scsi%d.%c: sending message ",
+ host->host->host_no, acornscsi_target(host));
+#endif
+
+ switch (message_length) {
+ case 0:
+ acornscsi_sbic_issuecmd(host, CMND_XFERINFO | CMND_SBT);
+
+ acornscsi_sbic_wait(host, ASR_DBR, ASR_DBR, 1000, "sending message 1");
+
+ sbic_arm_write(host, SBIC_DATA, NOP);
+
+ host->scsi.last_message = NOP;
+#if (DEBUG & DEBUG_MESSAGES)
+ printk("NOP");
+#endif
+ break;
+
+ case 1:
+ acornscsi_sbic_issuecmd(host, CMND_XFERINFO | CMND_SBT);
+ msg = msgqueue_getmsg(&host->scsi.msgs, 0);
+
+ acornscsi_sbic_wait(host, ASR_DBR, ASR_DBR, 1000, "sending message 2");
+
+ sbic_arm_write(host, SBIC_DATA, msg->msg[0]);
+
+ host->scsi.last_message = msg->msg[0];
+#if (DEBUG & DEBUG_MESSAGES)
+ spi_print_msg(msg->msg);
+#endif
+ break;
+
+ default:
+ /*
+ * ANSI standard says: (SCSI-2 Rev 10c Sect 5.6.14)
+ * 'When a target sends this (MESSAGE_REJECT) message, it
+ * shall change to MESSAGE IN phase and send this message
+ * prior to requesting additional message bytes from the
+ * initiator. This provides an interlock so that the
+ * initiator can determine which message byte is rejected.
+ */
+ sbic_arm_write(host, SBIC_TRANSCNTH, 0);
+ sbic_arm_writenext(host, 0);
+ sbic_arm_writenext(host, message_length);
+ acornscsi_sbic_issuecmd(host, CMND_XFERINFO);
+
+ msgnr = 0;
+ while ((msg = msgqueue_getmsg(&host->scsi.msgs, msgnr++)) != NULL) {
+ unsigned int i;
+#if (DEBUG & DEBUG_MESSAGES)
+ spi_print_msg(msg);
+#endif
+ i = 0;
+ if (acornscsi_write_pio(host, msg->msg, &i, msg->length, 1000000))
+ printk("scsi%d: timeout while sending message\n", host->host->host_no);
+
+ host->scsi.last_message = msg->msg[0];
+ if (msg->msg[0] == EXTENDED_MESSAGE)
+ host->scsi.last_message |= msg->msg[2] << 8;
+
+ if (i != msg->length)
+ break;
+ }
+ break;
+ }
+#if (DEBUG & DEBUG_MESSAGES)
+ printk("\n");
+#endif
+}
+
+/*
+ * Function: void acornscsi_readstatusbyte(AS_Host *host)
+ * Purpose : Read status byte from connected target
+ * Params : host - host connected to target
+ */
+static
+void acornscsi_readstatusbyte(AS_Host *host)
+{
+ acornscsi_sbic_issuecmd(host, CMND_XFERINFO|CMND_SBT);
+ acornscsi_sbic_wait(host, ASR_DBR, ASR_DBR, 1000, "reading status byte");
+ host->scsi.SCp.Status = sbic_arm_read(host, SBIC_DATA);
+}
+
+/*
+ * Function: unsigned char acornscsi_readmessagebyte(AS_Host *host)
+ * Purpose : Read one message byte from connected target
+ * Params : host - host connected to target
+ */
+static
+unsigned char acornscsi_readmessagebyte(AS_Host *host)
+{
+ unsigned char message;
+
+ acornscsi_sbic_issuecmd(host, CMND_XFERINFO | CMND_SBT);
+
+ acornscsi_sbic_wait(host, ASR_DBR, ASR_DBR, 1000, "for message byte");
+
+ message = sbic_arm_read(host, SBIC_DATA);
+
+ /* wait for MSGIN-XFER-PAUSED */
+ acornscsi_sbic_wait(host, ASR_INT, ASR_INT, 1000, "for interrupt after message byte");
+
+ sbic_arm_read(host, SBIC_SSR);
+
+ return message;
+}
+
+/*
+ * Function: void acornscsi_message(AS_Host *host)
+ * Purpose : Read complete message from connected target & action message
+ * Params : host - host connected to target
+ */
+static
+void acornscsi_message(AS_Host *host)
+{
+ unsigned char message[16];
+ unsigned int msgidx = 0, msglen = 1;
+
+ do {
+ message[msgidx] = acornscsi_readmessagebyte(host);
+
+ switch (msgidx) {
+ case 0:
+ if (message[0] == EXTENDED_MESSAGE ||
+ (message[0] >= 0x20 && message[0] <= 0x2f))
+ msglen = 2;
+ break;
+
+ case 1:
+ if (message[0] == EXTENDED_MESSAGE)
+ msglen += message[msgidx];
+ break;
+ }
+ msgidx += 1;
+ if (msgidx < msglen) {
+ acornscsi_sbic_issuecmd(host, CMND_NEGATEACK);
+
+ /* wait for next msg-in */
+ acornscsi_sbic_wait(host, ASR_INT, ASR_INT, 1000, "for interrupt after negate ack");
+ sbic_arm_read(host, SBIC_SSR);
+ }
+ } while (msgidx < msglen);
+
+#if (DEBUG & DEBUG_MESSAGES)
+ printk("scsi%d.%c: message in: ",
+ host->host->host_no, acornscsi_target(host));
+ spi_print_msg(message);
+ printk("\n");
+#endif
+
+ if (host->scsi.phase == PHASE_RECONNECTED) {
+ /*
+ * ANSI standard says: (Section SCSI-2 Rev. 10c Sect 5.6.17)
+ * 'Whenever a target reconnects to an initiator to continue
+ * a tagged I/O process, the SIMPLE QUEUE TAG message shall
+ * be sent immediately following the IDENTIFY message...'
+ */
+ if (message[0] == SIMPLE_QUEUE_TAG)
+ host->scsi.reconnected.tag = message[1];
+ if (acornscsi_reconnect_finish(host))
+ host->scsi.phase = PHASE_MSGIN;
+ }
+
+ switch (message[0]) {
+ case ABORT:
+ case ABORT_TAG:
+ case COMMAND_COMPLETE:
+ if (host->scsi.phase != PHASE_STATUSIN) {
+ printk(KERN_ERR "scsi%d.%c: command complete following non-status in phase?\n",
+ host->host->host_no, acornscsi_target(host));
+ acornscsi_dumplog(host, host->SCpnt->device->id);
+ }
+ host->scsi.phase = PHASE_DONE;
+ host->scsi.SCp.Message = message[0];
+ break;
+
+ case SAVE_POINTERS:
+ /*
+ * ANSI standard says: (Section SCSI-2 Rev. 10c Sect 5.6.20)
+ * 'The SAVE DATA POINTER message is sent from a target to
+ * direct the initiator to copy the active data pointer to
+ * the saved data pointer for the current I/O process.
+ */
+ acornscsi_dma_cleanup(host);
+ host->SCpnt->SCp = host->scsi.SCp;
+ host->SCpnt->SCp.sent_command = 0;
+ host->scsi.phase = PHASE_MSGIN;
+ break;
+
+ case RESTORE_POINTERS:
+ /*
+ * ANSI standard says: (Section SCSI-2 Rev. 10c Sect 5.6.19)
+ * 'The RESTORE POINTERS message is sent from a target to
+ * direct the initiator to copy the most recently saved
+ * command, data, and status pointers for the I/O process
+ * to the corresponding active pointers. The command and
+ * status pointers shall be restored to the beginning of
+ * the present command and status areas.'
+ */
+ acornscsi_dma_cleanup(host);
+ host->scsi.SCp = host->SCpnt->SCp;
+ host->scsi.phase = PHASE_MSGIN;
+ break;
+
+ case DISCONNECT:
+ /*
+ * ANSI standard says: (Section SCSI-2 Rev. 10c Sect 6.4.2)
+ * 'On those occasions when an error or exception condition occurs
+ * and the target elects to repeat the information transfer, the
+ * target may repeat the transfer either issuing a RESTORE POINTERS
+ * message or by disconnecting without issuing a SAVE POINTERS
+ * message. When reconnection is completed, the most recent
+ * saved pointer values are restored.'
+ */
+ acornscsi_dma_cleanup(host);
+ host->scsi.phase = PHASE_DISCONNECT;
+ break;
+
+ case MESSAGE_REJECT:
+#if 0 /* this isn't needed any more */
+ /*
+ * If we were negociating sync transfer, we don't yet know if
+ * this REJECT is for the sync transfer or for the tagged queue/wide
+ * transfer. Re-initiate sync transfer negotiation now, and if
+ * we got a REJECT in response to SDTR, then it'll be set to DONE.
+ */
+ if (host->device[host->SCpnt->device->id].sync_state == SYNC_SENT_REQUEST)
+ host->device[host->SCpnt->device->id].sync_state = SYNC_NEGOCIATE;
+#endif
+
+ /*
+ * If we have any messages waiting to go out, then assert ATN now
+ */
+ if (msgqueue_msglength(&host->scsi.msgs))
+ acornscsi_sbic_issuecmd(host, CMND_ASSERTATN);
+
+ switch (host->scsi.last_message) {
+#ifdef CONFIG_SCSI_ACORNSCSI_TAGGED_QUEUE
+ case HEAD_OF_QUEUE_TAG:
+ case ORDERED_QUEUE_TAG:
+ case SIMPLE_QUEUE_TAG:
+ /*
+ * ANSI standard says: (Section SCSI-2 Rev. 10c Sect 5.6.17)
+ * If a target does not implement tagged queuing and a queue tag
+ * message is received, it shall respond with a MESSAGE REJECT
+ * message and accept the I/O process as if it were untagged.
+ */
+ printk(KERN_NOTICE "scsi%d.%c: disabling tagged queueing\n",
+ host->host->host_no, acornscsi_target(host));
+ host->SCpnt->device->simple_tags = 0;
+ set_bit(host->SCpnt->device->id * 8 +
+ (u8)(host->SCpnt->device->lun & 0x7), host->busyluns);
+ break;
+#endif
+ case EXTENDED_MESSAGE | (EXTENDED_SDTR << 8):
+ /*
+ * Target can't handle synchronous transfers
+ */
+ printk(KERN_NOTICE "scsi%d.%c: Using asynchronous transfer\n",
+ host->host->host_no, acornscsi_target(host));
+ host->device[host->SCpnt->device->id].sync_xfer = SYNCHTRANSFER_2DBA;
+ host->device[host->SCpnt->device->id].sync_state = SYNC_ASYNCHRONOUS;
+ sbic_arm_write(host, SBIC_SYNCHTRANSFER, host->device[host->SCpnt->device->id].sync_xfer);
+ break;
+
+ default:
+ break;
+ }
+ break;
+
+ case QUEUE_FULL:
+ /* TODO: target queue is full */
+ break;
+
+ case SIMPLE_QUEUE_TAG:
+ /* tag queue reconnect... message[1] = queue tag. Print something to indicate something happened! */
+ printk("scsi%d.%c: reconnect queue tag %02X\n",
+ host->host->host_no, acornscsi_target(host),
+ message[1]);
+ break;
+
+ case EXTENDED_MESSAGE:
+ switch (message[2]) {
+#ifdef CONFIG_SCSI_ACORNSCSI_SYNC
+ case EXTENDED_SDTR:
+ if (host->device[host->SCpnt->device->id].sync_state == SYNC_SENT_REQUEST) {
+ /*
+ * We requested synchronous transfers. This isn't quite right...
+ * We can only say if this succeeded if we proceed on to execute the
+ * command from this message. If we get a MESSAGE PARITY ERROR,
+ * and the target retries fail, then we fallback to asynchronous mode
+ */
+ host->device[host->SCpnt->device->id].sync_state = SYNC_COMPLETED;
+ printk(KERN_NOTICE "scsi%d.%c: Using synchronous transfer, offset %d, %d ns\n",
+ host->host->host_no, acornscsi_target(host),
+ message[4], message[3] * 4);
+ host->device[host->SCpnt->device->id].sync_xfer =
+ calc_sync_xfer(message[3] * 4, message[4]);
+ } else {
+ unsigned char period, length;
+ /*
+ * Target requested synchronous transfers. The agreement is only
+ * to be in operation AFTER the target leaves message out phase.
+ */
+ acornscsi_sbic_issuecmd(host, CMND_ASSERTATN);
+ period = max_t(unsigned int, message[3], sdtr_period / 4);
+ length = min_t(unsigned int, message[4], sdtr_size);
+ msgqueue_addmsg(&host->scsi.msgs, 5, EXTENDED_MESSAGE, 3,
+ EXTENDED_SDTR, period, length);
+ host->device[host->SCpnt->device->id].sync_xfer =
+ calc_sync_xfer(period * 4, length);
+ }
+ sbic_arm_write(host, SBIC_SYNCHTRANSFER, host->device[host->SCpnt->device->id].sync_xfer);
+ break;
+#else
+ /* We do not accept synchronous transfers. Respond with a
+ * MESSAGE_REJECT.
+ */
+#endif
+
+ case EXTENDED_WDTR:
+ /* The WD33C93A is only 8-bit. We respond with a MESSAGE_REJECT
+ * to a wide data transfer request.
+ */
+ default:
+ acornscsi_sbic_issuecmd(host, CMND_ASSERTATN);
+ msgqueue_flush(&host->scsi.msgs);
+ msgqueue_addmsg(&host->scsi.msgs, 1, MESSAGE_REJECT);
+ break;
+ }
+ break;
+
+ default: /* reject message */
+ printk(KERN_ERR "scsi%d.%c: unrecognised message %02X, rejecting\n",
+ host->host->host_no, acornscsi_target(host),
+ message[0]);
+ acornscsi_sbic_issuecmd(host, CMND_ASSERTATN);
+ msgqueue_flush(&host->scsi.msgs);
+ msgqueue_addmsg(&host->scsi.msgs, 1, MESSAGE_REJECT);
+ host->scsi.phase = PHASE_MSGIN;
+ break;
+ }
+ acornscsi_sbic_issuecmd(host, CMND_NEGATEACK);
+}
+
+/*
+ * Function: int acornscsi_buildmessages(AS_Host *host)
+ * Purpose : build the connection messages for a host
+ * Params : host - host to add messages to
+ */
+static
+void acornscsi_buildmessages(AS_Host *host)
+{
+#if 0
+ /* does the device need resetting? */
+ if (cmd_reset) {
+ msgqueue_addmsg(&host->scsi.msgs, 1, BUS_DEVICE_RESET);
+ return;
+ }
+#endif
+
+ msgqueue_addmsg(&host->scsi.msgs, 1,
+ IDENTIFY(host->device[host->SCpnt->device->id].disconnect_ok,
+ host->SCpnt->device->lun));
+
+#if 0
+ /* does the device need the current command aborted */
+ if (cmd_aborted) {
+ acornscsi_abortcmd(host->SCpnt->tag);
+ return;
+ }
+#endif
+
+#ifdef CONFIG_SCSI_ACORNSCSI_TAGGED_QUEUE
+ if (host->SCpnt->tag) {
+ unsigned int tag_type;
+
+ if (host->SCpnt->cmnd[0] == REQUEST_SENSE ||
+ host->SCpnt->cmnd[0] == TEST_UNIT_READY ||
+ host->SCpnt->cmnd[0] == INQUIRY)
+ tag_type = HEAD_OF_QUEUE_TAG;
+ else
+ tag_type = SIMPLE_QUEUE_TAG;
+ msgqueue_addmsg(&host->scsi.msgs, 2, tag_type, host->SCpnt->tag);
+ }
+#endif
+
+#ifdef CONFIG_SCSI_ACORNSCSI_SYNC
+ if (host->device[host->SCpnt->device->id].sync_state == SYNC_NEGOCIATE) {
+ host->device[host->SCpnt->device->id].sync_state = SYNC_SENT_REQUEST;
+ msgqueue_addmsg(&host->scsi.msgs, 5,
+ EXTENDED_MESSAGE, 3, EXTENDED_SDTR,
+ sdtr_period / 4, sdtr_size);
+ }
+#endif
+}
+
+/*
+ * Function: int acornscsi_starttransfer(AS_Host *host)
+ * Purpose : transfer data to/from connected target
+ * Params : host - host to which target is connected
+ * Returns : 0 if failure
+ */
+static
+int acornscsi_starttransfer(AS_Host *host)
+{
+ int residual;
+
+ if (!host->scsi.SCp.ptr /*&& host->scsi.SCp.this_residual*/) {
+ printk(KERN_ERR "scsi%d.%c: null buffer passed to acornscsi_starttransfer\n",
+ host->host->host_no, acornscsi_target(host));
+ return 0;
+ }
+
+ residual = scsi_bufflen(host->SCpnt) - host->scsi.SCp.scsi_xferred;
+
+ sbic_arm_write(host, SBIC_SYNCHTRANSFER, host->device[host->SCpnt->device->id].sync_xfer);
+ sbic_arm_writenext(host, residual >> 16);
+ sbic_arm_writenext(host, residual >> 8);
+ sbic_arm_writenext(host, residual);
+ acornscsi_sbic_issuecmd(host, CMND_XFERINFO);
+ return 1;
+}
+
+/* =========================================================================================
+ * Connection & Disconnection
+ */
+/*
+ * Function : acornscsi_reconnect(AS_Host *host)
+ * Purpose : reconnect a previously disconnected command
+ * Params : host - host specific data
+ * Remarks : SCSI spec says:
+ * 'The set of active pointers is restored from the set
+ * of saved pointers upon reconnection of the I/O process'
+ */
+static
+int acornscsi_reconnect(AS_Host *host)
+{
+ unsigned int target, lun, ok = 0;
+
+ target = sbic_arm_read(host, SBIC_SOURCEID);
+
+ if (!(target & 8))
+ printk(KERN_ERR "scsi%d: invalid source id after reselection "
+ "- device fault?\n",
+ host->host->host_no);
+
+ target &= 7;
+
+ if (host->SCpnt && !host->scsi.disconnectable) {
+ printk(KERN_ERR "scsi%d.%d: reconnected while command in "
+ "progress to target %d?\n",
+ host->host->host_no, target, host->SCpnt->device->id);
+ host->SCpnt = NULL;
+ }
+
+ lun = sbic_arm_read(host, SBIC_DATA) & 7;
+
+ host->scsi.reconnected.target = target;
+ host->scsi.reconnected.lun = lun;
+ host->scsi.reconnected.tag = 0;
+
+ if (host->scsi.disconnectable && host->SCpnt &&
+ host->SCpnt->device->id == target && host->SCpnt->device->lun == lun)
+ ok = 1;
+
+ if (!ok && queue_probetgtlun(&host->queues.disconnected, target, lun))
+ ok = 1;
+
+ ADD_STATUS(target, 0x81, host->scsi.phase, 0);
+
+ if (ok) {
+ host->scsi.phase = PHASE_RECONNECTED;
+ } else {
+ /* this doesn't seem to work */
+ printk(KERN_ERR "scsi%d.%c: reselected with no command "
+ "to reconnect with\n",
+ host->host->host_no, '0' + target);
+ acornscsi_dumplog(host, target);
+ acornscsi_abortcmd(host, 0);
+ if (host->SCpnt) {
+ queue_add_cmd_tail(&host->queues.disconnected, host->SCpnt);
+ host->SCpnt = NULL;
+ }
+ }
+ acornscsi_sbic_issuecmd(host, CMND_NEGATEACK);
+ return !ok;
+}
+
+/*
+ * Function: int acornscsi_reconect_finish(AS_Host *host)
+ * Purpose : finish reconnecting a command
+ * Params : host - host to complete
+ * Returns : 0 if failed
+ */
+static
+int acornscsi_reconnect_finish(AS_Host *host)
+{
+ if (host->scsi.disconnectable && host->SCpnt) {
+ host->scsi.disconnectable = 0;
+ if (host->SCpnt->device->id == host->scsi.reconnected.target &&
+ host->SCpnt->device->lun == host->scsi.reconnected.lun &&
+ host->SCpnt->tag == host->scsi.reconnected.tag) {
+#if (DEBUG & (DEBUG_QUEUES|DEBUG_DISCON))
+ DBG(host->SCpnt, printk("scsi%d.%c: reconnected",
+ host->host->host_no, acornscsi_target(host)));
+#endif
+ } else {
+ queue_add_cmd_tail(&host->queues.disconnected, host->SCpnt);
+#if (DEBUG & (DEBUG_QUEUES|DEBUG_DISCON))
+ DBG(host->SCpnt, printk("scsi%d.%c: had to move command "
+ "to disconnected queue\n",
+ host->host->host_no, acornscsi_target(host)));
+#endif
+ host->SCpnt = NULL;
+ }
+ }
+ if (!host->SCpnt) {
+ host->SCpnt = queue_remove_tgtluntag(&host->queues.disconnected,
+ host->scsi.reconnected.target,
+ host->scsi.reconnected.lun,
+ host->scsi.reconnected.tag);
+#if (DEBUG & (DEBUG_QUEUES|DEBUG_DISCON))
+ DBG(host->SCpnt, printk("scsi%d.%c: had to get command",
+ host->host->host_no, acornscsi_target(host)));
+#endif
+ }
+
+ if (!host->SCpnt)
+ acornscsi_abortcmd(host, host->scsi.reconnected.tag);
+ else {
+ /*
+ * Restore data pointer from SAVED pointers.
+ */
+ host->scsi.SCp = host->SCpnt->SCp;
+#if (DEBUG & (DEBUG_QUEUES|DEBUG_DISCON))
+ printk(", data pointers: [%p, %X]",
+ host->scsi.SCp.ptr, host->scsi.SCp.this_residual);
+#endif
+ }
+#if (DEBUG & (DEBUG_QUEUES|DEBUG_DISCON))
+ printk("\n");
+#endif
+
+ host->dma.transferred = host->scsi.SCp.scsi_xferred;
+
+ return host->SCpnt != NULL;
+}
+
+/*
+ * Function: void acornscsi_disconnect_unexpected(AS_Host *host)
+ * Purpose : handle an unexpected disconnect
+ * Params : host - host on which disconnect occurred
+ */
+static
+void acornscsi_disconnect_unexpected(AS_Host *host)
+{
+ printk(KERN_ERR "scsi%d.%c: unexpected disconnect\n",
+ host->host->host_no, acornscsi_target(host));
+#if (DEBUG & DEBUG_ABORT)
+ acornscsi_dumplog(host, 8);
+#endif
+
+ acornscsi_done(host, &host->SCpnt, DID_ERROR);
+}
+
+/*
+ * Function: void acornscsi_abortcmd(AS_host *host, unsigned char tag)
+ * Purpose : abort a currently executing command
+ * Params : host - host with connected command to abort
+ * tag - tag to abort
+ */
+static
+void acornscsi_abortcmd(AS_Host *host, unsigned char tag)
+{
+ host->scsi.phase = PHASE_ABORTED;
+ sbic_arm_write(host, SBIC_CMND, CMND_ASSERTATN);
+
+ msgqueue_flush(&host->scsi.msgs);
+#ifdef CONFIG_SCSI_ACORNSCSI_TAGGED_QUEUE
+ if (tag)
+ msgqueue_addmsg(&host->scsi.msgs, 2, ABORT_TAG, tag);
+ else
+#endif
+ msgqueue_addmsg(&host->scsi.msgs, 1, ABORT);
+}
+
+/* ==========================================================================================
+ * Interrupt routines.
+ */
+/*
+ * Function: int acornscsi_sbicintr(AS_Host *host)
+ * Purpose : handle interrupts from SCSI device
+ * Params : host - host to process
+ * Returns : INTR_PROCESS if expecting another SBIC interrupt
+ * INTR_IDLE if no interrupt
+ * INTR_NEXT_COMMAND if we have finished processing the command
+ */
+static
+intr_ret_t acornscsi_sbicintr(AS_Host *host, int in_irq)
+{
+ unsigned int asr, ssr;
+
+ asr = sbic_arm_read(host, SBIC_ASR);
+ if (!(asr & ASR_INT))
+ return INTR_IDLE;
+
+ ssr = sbic_arm_read(host, SBIC_SSR);
+
+#if (DEBUG & DEBUG_PHASES)
+ print_sbic_status(asr, ssr, host->scsi.phase);
+#endif
+
+ ADD_STATUS(8, ssr, host->scsi.phase, in_irq);
+
+ if (host->SCpnt && !host->scsi.disconnectable)
+ ADD_STATUS(host->SCpnt->device->id, ssr, host->scsi.phase, in_irq);
+
+ switch (ssr) {
+ case 0x00: /* reset state - not advanced */
+ printk(KERN_ERR "scsi%d: reset in standard mode but wanted advanced mode.\n",
+ host->host->host_no);
+ /* setup sbic - WD33C93A */
+ sbic_arm_write(host, SBIC_OWNID, OWNID_EAF | host->host->this_id);
+ sbic_arm_write(host, SBIC_CMND, CMND_RESET);
+ return INTR_IDLE;
+
+ case 0x01: /* reset state - advanced */
+ sbic_arm_write(host, SBIC_CTRL, INIT_SBICDMA | CTRL_IDI);
+ sbic_arm_write(host, SBIC_TIMEOUT, TIMEOUT_TIME);
+ sbic_arm_write(host, SBIC_SYNCHTRANSFER, SYNCHTRANSFER_2DBA);
+ sbic_arm_write(host, SBIC_SOURCEID, SOURCEID_ER | SOURCEID_DSP);
+ msgqueue_flush(&host->scsi.msgs);
+ return INTR_IDLE;
+
+ case 0x41: /* unexpected disconnect aborted command */
+ acornscsi_disconnect_unexpected(host);
+ return INTR_NEXT_COMMAND;
+ }
+
+ switch (host->scsi.phase) {
+ case PHASE_CONNECTING: /* STATE: command removed from issue queue */
+ switch (ssr) {
+ case 0x11: /* -> PHASE_CONNECTED */
+ /* BUS FREE -> SELECTION */
+ host->scsi.phase = PHASE_CONNECTED;
+ msgqueue_flush(&host->scsi.msgs);
+ host->dma.transferred = host->scsi.SCp.scsi_xferred;
+ /* 33C93 gives next interrupt indicating bus phase */
+ asr = sbic_arm_read(host, SBIC_ASR);
+ if (!(asr & ASR_INT))
+ break;
+ ssr = sbic_arm_read(host, SBIC_SSR);
+ ADD_STATUS(8, ssr, host->scsi.phase, 1);
+ ADD_STATUS(host->SCpnt->device->id, ssr, host->scsi.phase, 1);
+ goto connected;
+
+ case 0x42: /* select timed out */
+ /* -> PHASE_IDLE */
+ acornscsi_done(host, &host->SCpnt, DID_NO_CONNECT);
+ return INTR_NEXT_COMMAND;
+
+ case 0x81: /* -> PHASE_RECONNECTED or PHASE_ABORTED */
+ /* BUS FREE -> RESELECTION */
+ host->origSCpnt = host->SCpnt;
+ host->SCpnt = NULL;
+ msgqueue_flush(&host->scsi.msgs);
+ acornscsi_reconnect(host);
+ break;
+
+ default:
+ printk(KERN_ERR "scsi%d.%c: PHASE_CONNECTING, SSR %02X?\n",
+ host->host->host_no, acornscsi_target(host), ssr);
+ acornscsi_dumplog(host, host->SCpnt ? host->SCpnt->device->id : 8);
+ acornscsi_abortcmd(host, host->SCpnt->tag);
+ }
+ return INTR_PROCESSING;
+
+ connected:
+ case PHASE_CONNECTED: /* STATE: device selected ok */
+ switch (ssr) {
+#ifdef NONSTANDARD
+ case 0x8a: /* -> PHASE_COMMAND, PHASE_COMMANDPAUSED */
+ /* SELECTION -> COMMAND */
+ acornscsi_sendcommand(host);
+ break;
+
+ case 0x8b: /* -> PHASE_STATUS */
+ /* SELECTION -> STATUS */
+ acornscsi_readstatusbyte(host);
+ host->scsi.phase = PHASE_STATUSIN;
+ break;
+#endif
+
+ case 0x8e: /* -> PHASE_MSGOUT */
+ /* SELECTION ->MESSAGE OUT */
+ host->scsi.phase = PHASE_MSGOUT;
+ acornscsi_buildmessages(host);
+ acornscsi_sendmessage(host);
+ break;
+
+ /* these should not happen */
+ case 0x85: /* target disconnected */
+ acornscsi_done(host, &host->SCpnt, DID_ERROR);
+ break;
+
+ default:
+ printk(KERN_ERR "scsi%d.%c: PHASE_CONNECTED, SSR %02X?\n",
+ host->host->host_no, acornscsi_target(host), ssr);
+ acornscsi_dumplog(host, host->SCpnt ? host->SCpnt->device->id : 8);
+ acornscsi_abortcmd(host, host->SCpnt->tag);
+ }
+ return INTR_PROCESSING;
+
+ case PHASE_MSGOUT: /* STATE: connected & sent IDENTIFY message */
+ /*
+ * SCSI standard says that MESSAGE OUT phases can be followed by a
+ * DATA phase, STATUS phase, MESSAGE IN phase or COMMAND phase
+ */
+ switch (ssr) {
+ case 0x8a: /* -> PHASE_COMMAND, PHASE_COMMANDPAUSED */
+ case 0x1a: /* -> PHASE_COMMAND, PHASE_COMMANDPAUSED */
+ /* MESSAGE OUT -> COMMAND */
+ acornscsi_sendcommand(host);
+ break;
+
+ case 0x8b: /* -> PHASE_STATUS */
+ case 0x1b: /* -> PHASE_STATUS */
+ /* MESSAGE OUT -> STATUS */
+ acornscsi_readstatusbyte(host);
+ host->scsi.phase = PHASE_STATUSIN;
+ break;
+
+ case 0x8e: /* -> PHASE_MSGOUT */
+ /* MESSAGE_OUT(MESSAGE_IN) ->MESSAGE OUT */
+ acornscsi_sendmessage(host);
+ break;
+
+ case 0x4f: /* -> PHASE_MSGIN, PHASE_DISCONNECT */
+ case 0x1f: /* -> PHASE_MSGIN, PHASE_DISCONNECT */
+ /* MESSAGE OUT -> MESSAGE IN */
+ acornscsi_message(host);
+ break;
+
+ default:
+ printk(KERN_ERR "scsi%d.%c: PHASE_MSGOUT, SSR %02X?\n",
+ host->host->host_no, acornscsi_target(host), ssr);
+ acornscsi_dumplog(host, host->SCpnt ? host->SCpnt->device->id : 8);
+ }
+ return INTR_PROCESSING;
+
+ case PHASE_COMMAND: /* STATE: connected & command sent */
+ switch (ssr) {
+ case 0x18: /* -> PHASE_DATAOUT */
+ /* COMMAND -> DATA OUT */
+ if (host->scsi.SCp.sent_command != host->SCpnt->cmd_len)
+ acornscsi_abortcmd(host, host->SCpnt->tag);
+ acornscsi_dma_setup(host, DMA_OUT);
+ if (!acornscsi_starttransfer(host))
+ acornscsi_abortcmd(host, host->SCpnt->tag);
+ host->scsi.phase = PHASE_DATAOUT;
+ return INTR_IDLE;
+
+ case 0x19: /* -> PHASE_DATAIN */
+ /* COMMAND -> DATA IN */
+ if (host->scsi.SCp.sent_command != host->SCpnt->cmd_len)
+ acornscsi_abortcmd(host, host->SCpnt->tag);
+ acornscsi_dma_setup(host, DMA_IN);
+ if (!acornscsi_starttransfer(host))
+ acornscsi_abortcmd(host, host->SCpnt->tag);
+ host->scsi.phase = PHASE_DATAIN;
+ return INTR_IDLE;
+
+ case 0x1b: /* -> PHASE_STATUS */
+ /* COMMAND -> STATUS */
+ acornscsi_readstatusbyte(host);
+ host->scsi.phase = PHASE_STATUSIN;
+ break;
+
+ case 0x1e: /* -> PHASE_MSGOUT */
+ /* COMMAND -> MESSAGE OUT */
+ acornscsi_sendmessage(host);
+ break;
+
+ case 0x1f: /* -> PHASE_MSGIN, PHASE_DISCONNECT */
+ /* COMMAND -> MESSAGE IN */
+ acornscsi_message(host);
+ break;
+
+ default:
+ printk(KERN_ERR "scsi%d.%c: PHASE_COMMAND, SSR %02X?\n",
+ host->host->host_no, acornscsi_target(host), ssr);
+ acornscsi_dumplog(host, host->SCpnt ? host->SCpnt->device->id : 8);
+ }
+ return INTR_PROCESSING;
+
+ case PHASE_DISCONNECT: /* STATE: connected, received DISCONNECT msg */
+ if (ssr == 0x85) { /* -> PHASE_IDLE */
+ host->scsi.disconnectable = 1;
+ host->scsi.reconnected.tag = 0;
+ host->scsi.phase = PHASE_IDLE;
+ host->stats.disconnects += 1;
+ } else {
+ printk(KERN_ERR "scsi%d.%c: PHASE_DISCONNECT, SSR %02X instead of disconnect?\n",
+ host->host->host_no, acornscsi_target(host), ssr);
+ acornscsi_dumplog(host, host->SCpnt ? host->SCpnt->device->id : 8);
+ }
+ return INTR_NEXT_COMMAND;
+
+ case PHASE_IDLE: /* STATE: disconnected */
+ if (ssr == 0x81) /* -> PHASE_RECONNECTED or PHASE_ABORTED */
+ acornscsi_reconnect(host);
+ else {
+ printk(KERN_ERR "scsi%d.%c: PHASE_IDLE, SSR %02X while idle?\n",
+ host->host->host_no, acornscsi_target(host), ssr);
+ acornscsi_dumplog(host, host->SCpnt ? host->SCpnt->device->id : 8);
+ }
+ return INTR_PROCESSING;
+
+ case PHASE_RECONNECTED: /* STATE: device reconnected to initiator */
+ /*
+ * Command reconnected - if MESGIN, get message - it may be
+ * the tag. If not, get command out of disconnected queue
+ */
+ /*
+ * If we reconnected and we're not in MESSAGE IN phase after IDENTIFY,
+ * reconnect I_T_L command
+ */
+ if (ssr != 0x8f && !acornscsi_reconnect_finish(host))
+ return INTR_IDLE;
+ ADD_STATUS(host->SCpnt->device->id, ssr, host->scsi.phase, in_irq);
+ switch (ssr) {
+ case 0x88: /* data out phase */
+ /* -> PHASE_DATAOUT */
+ /* MESSAGE IN -> DATA OUT */
+ acornscsi_dma_setup(host, DMA_OUT);
+ if (!acornscsi_starttransfer(host))
+ acornscsi_abortcmd(host, host->SCpnt->tag);
+ host->scsi.phase = PHASE_DATAOUT;
+ return INTR_IDLE;
+
+ case 0x89: /* data in phase */
+ /* -> PHASE_DATAIN */
+ /* MESSAGE IN -> DATA IN */
+ acornscsi_dma_setup(host, DMA_IN);
+ if (!acornscsi_starttransfer(host))
+ acornscsi_abortcmd(host, host->SCpnt->tag);
+ host->scsi.phase = PHASE_DATAIN;
+ return INTR_IDLE;
+
+ case 0x8a: /* command out */
+ /* MESSAGE IN -> COMMAND */
+ acornscsi_sendcommand(host);/* -> PHASE_COMMAND, PHASE_COMMANDPAUSED */
+ break;
+
+ case 0x8b: /* status in */
+ /* -> PHASE_STATUSIN */
+ /* MESSAGE IN -> STATUS */
+ acornscsi_readstatusbyte(host);
+ host->scsi.phase = PHASE_STATUSIN;
+ break;
+
+ case 0x8e: /* message out */
+ /* -> PHASE_MSGOUT */
+ /* MESSAGE IN -> MESSAGE OUT */
+ acornscsi_sendmessage(host);
+ break;
+
+ case 0x8f: /* message in */
+ acornscsi_message(host); /* -> PHASE_MSGIN, PHASE_DISCONNECT */
+ break;
+
+ default:
+ printk(KERN_ERR "scsi%d.%c: PHASE_RECONNECTED, SSR %02X after reconnect?\n",
+ host->host->host_no, acornscsi_target(host), ssr);
+ acornscsi_dumplog(host, host->SCpnt ? host->SCpnt->device->id : 8);
+ }
+ return INTR_PROCESSING;
+
+ case PHASE_DATAIN: /* STATE: transferred data in */
+ /*
+ * This is simple - if we disconnect then the DMA address & count is
+ * correct.
+ */
+ switch (ssr) {
+ case 0x19: /* -> PHASE_DATAIN */
+ case 0x89: /* -> PHASE_DATAIN */
+ acornscsi_abortcmd(host, host->SCpnt->tag);
+ return INTR_IDLE;
+
+ case 0x1b: /* -> PHASE_STATUSIN */
+ case 0x4b: /* -> PHASE_STATUSIN */
+ case 0x8b: /* -> PHASE_STATUSIN */
+ /* DATA IN -> STATUS */
+ host->scsi.SCp.scsi_xferred = scsi_bufflen(host->SCpnt) -
+ acornscsi_sbic_xfcount(host);
+ acornscsi_dma_stop(host);
+ acornscsi_readstatusbyte(host);
+ host->scsi.phase = PHASE_STATUSIN;
+ break;
+
+ case 0x1e: /* -> PHASE_MSGOUT */
+ case 0x4e: /* -> PHASE_MSGOUT */
+ case 0x8e: /* -> PHASE_MSGOUT */
+ /* DATA IN -> MESSAGE OUT */
+ host->scsi.SCp.scsi_xferred = scsi_bufflen(host->SCpnt) -
+ acornscsi_sbic_xfcount(host);
+ acornscsi_dma_stop(host);
+ acornscsi_sendmessage(host);
+ break;
+
+ case 0x1f: /* message in */
+ case 0x4f: /* message in */
+ case 0x8f: /* message in */
+ /* DATA IN -> MESSAGE IN */
+ host->scsi.SCp.scsi_xferred = scsi_bufflen(host->SCpnt) -
+ acornscsi_sbic_xfcount(host);
+ acornscsi_dma_stop(host);
+ acornscsi_message(host); /* -> PHASE_MSGIN, PHASE_DISCONNECT */
+ break;
+
+ default:
+ printk(KERN_ERR "scsi%d.%c: PHASE_DATAIN, SSR %02X?\n",
+ host->host->host_no, acornscsi_target(host), ssr);
+ acornscsi_dumplog(host, host->SCpnt ? host->SCpnt->device->id : 8);
+ }
+ return INTR_PROCESSING;
+
+ case PHASE_DATAOUT: /* STATE: transferred data out */
+ /*
+ * This is more complicated - if we disconnect, the DMA could be 12
+ * bytes ahead of us. We need to correct this.
+ */
+ switch (ssr) {
+ case 0x18: /* -> PHASE_DATAOUT */
+ case 0x88: /* -> PHASE_DATAOUT */
+ acornscsi_abortcmd(host, host->SCpnt->tag);
+ return INTR_IDLE;
+
+ case 0x1b: /* -> PHASE_STATUSIN */
+ case 0x4b: /* -> PHASE_STATUSIN */
+ case 0x8b: /* -> PHASE_STATUSIN */
+ /* DATA OUT -> STATUS */
+ host->scsi.SCp.scsi_xferred = scsi_bufflen(host->SCpnt) -
+ acornscsi_sbic_xfcount(host);
+ acornscsi_dma_stop(host);
+ acornscsi_dma_adjust(host);
+ acornscsi_readstatusbyte(host);
+ host->scsi.phase = PHASE_STATUSIN;
+ break;
+
+ case 0x1e: /* -> PHASE_MSGOUT */
+ case 0x4e: /* -> PHASE_MSGOUT */
+ case 0x8e: /* -> PHASE_MSGOUT */
+ /* DATA OUT -> MESSAGE OUT */
+ host->scsi.SCp.scsi_xferred = scsi_bufflen(host->SCpnt) -
+ acornscsi_sbic_xfcount(host);
+ acornscsi_dma_stop(host);
+ acornscsi_dma_adjust(host);
+ acornscsi_sendmessage(host);
+ break;
+
+ case 0x1f: /* message in */
+ case 0x4f: /* message in */
+ case 0x8f: /* message in */
+ /* DATA OUT -> MESSAGE IN */
+ host->scsi.SCp.scsi_xferred = scsi_bufflen(host->SCpnt) -
+ acornscsi_sbic_xfcount(host);
+ acornscsi_dma_stop(host);
+ acornscsi_dma_adjust(host);
+ acornscsi_message(host); /* -> PHASE_MSGIN, PHASE_DISCONNECT */
+ break;
+
+ default:
+ printk(KERN_ERR "scsi%d.%c: PHASE_DATAOUT, SSR %02X?\n",
+ host->host->host_no, acornscsi_target(host), ssr);
+ acornscsi_dumplog(host, host->SCpnt ? host->SCpnt->device->id : 8);
+ }
+ return INTR_PROCESSING;
+
+ case PHASE_STATUSIN: /* STATE: status in complete */
+ switch (ssr) {
+ case 0x1f: /* -> PHASE_MSGIN, PHASE_DONE, PHASE_DISCONNECT */
+ case 0x8f: /* -> PHASE_MSGIN, PHASE_DONE, PHASE_DISCONNECT */
+ /* STATUS -> MESSAGE IN */
+ acornscsi_message(host);
+ break;
+
+ case 0x1e: /* -> PHASE_MSGOUT */
+ case 0x8e: /* -> PHASE_MSGOUT */
+ /* STATUS -> MESSAGE OUT */
+ acornscsi_sendmessage(host);
+ break;
+
+ default:
+ printk(KERN_ERR "scsi%d.%c: PHASE_STATUSIN, SSR %02X instead of MESSAGE_IN?\n",
+ host->host->host_no, acornscsi_target(host), ssr);
+ acornscsi_dumplog(host, host->SCpnt ? host->SCpnt->device->id : 8);
+ }
+ return INTR_PROCESSING;
+
+ case PHASE_MSGIN: /* STATE: message in */
+ switch (ssr) {
+ case 0x1e: /* -> PHASE_MSGOUT */
+ case 0x4e: /* -> PHASE_MSGOUT */
+ case 0x8e: /* -> PHASE_MSGOUT */
+ /* MESSAGE IN -> MESSAGE OUT */
+ acornscsi_sendmessage(host);
+ break;
+
+ case 0x1f: /* -> PHASE_MSGIN, PHASE_DONE, PHASE_DISCONNECT */
+ case 0x2f:
+ case 0x4f:
+ case 0x8f:
+ acornscsi_message(host);
+ break;
+
+ case 0x85:
+ printk("scsi%d.%c: strange message in disconnection\n",
+ host->host->host_no, acornscsi_target(host));
+ acornscsi_dumplog(host, host->SCpnt ? host->SCpnt->device->id : 8);
+ acornscsi_done(host, &host->SCpnt, DID_ERROR);
+ break;
+
+ default:
+ printk(KERN_ERR "scsi%d.%c: PHASE_MSGIN, SSR %02X after message in?\n",
+ host->host->host_no, acornscsi_target(host), ssr);
+ acornscsi_dumplog(host, host->SCpnt ? host->SCpnt->device->id : 8);
+ }
+ return INTR_PROCESSING;
+
+ case PHASE_DONE: /* STATE: received status & message */
+ switch (ssr) {
+ case 0x85: /* -> PHASE_IDLE */
+ acornscsi_done(host, &host->SCpnt, DID_OK);
+ return INTR_NEXT_COMMAND;
+
+ case 0x1e:
+ case 0x8e:
+ acornscsi_sendmessage(host);
+ break;
+
+ default:
+ printk(KERN_ERR "scsi%d.%c: PHASE_DONE, SSR %02X instead of disconnect?\n",
+ host->host->host_no, acornscsi_target(host), ssr);
+ acornscsi_dumplog(host, host->SCpnt ? host->SCpnt->device->id : 8);
+ }
+ return INTR_PROCESSING;
+
+ case PHASE_ABORTED:
+ switch (ssr) {
+ case 0x85:
+ if (host->SCpnt)
+ acornscsi_done(host, &host->SCpnt, DID_ABORT);
+ else {
+ clear_bit(host->scsi.reconnected.target * 8 + host->scsi.reconnected.lun,
+ host->busyluns);
+ host->scsi.phase = PHASE_IDLE;
+ }
+ return INTR_NEXT_COMMAND;
+
+ case 0x1e:
+ case 0x2e:
+ case 0x4e:
+ case 0x8e:
+ acornscsi_sendmessage(host);
+ break;
+
+ default:
+ printk(KERN_ERR "scsi%d.%c: PHASE_ABORTED, SSR %02X?\n",
+ host->host->host_no, acornscsi_target(host), ssr);
+ acornscsi_dumplog(host, host->SCpnt ? host->SCpnt->device->id : 8);
+ }
+ return INTR_PROCESSING;
+
+ default:
+ printk(KERN_ERR "scsi%d.%c: unknown driver phase %d\n",
+ host->host->host_no, acornscsi_target(host), ssr);
+ acornscsi_dumplog(host, host->SCpnt ? host->SCpnt->device->id : 8);
+ }
+ return INTR_PROCESSING;
+}
+
+/*
+ * Prototype: void acornscsi_intr(int irq, void *dev_id)
+ * Purpose : handle interrupts from Acorn SCSI card
+ * Params : irq - interrupt number
+ * dev_id - device specific data (AS_Host structure)
+ */
+static irqreturn_t
+acornscsi_intr(int irq, void *dev_id)
+{
+ AS_Host *host = (AS_Host *)dev_id;
+ intr_ret_t ret;
+ int iostatus;
+ int in_irq = 0;
+
+ do {
+ ret = INTR_IDLE;
+
+ iostatus = readb(host->fast + INT_REG);
+
+ if (iostatus & 2) {
+ acornscsi_dma_intr(host);
+ iostatus = readb(host->fast + INT_REG);
+ }
+
+ if (iostatus & 8)
+ ret = acornscsi_sbicintr(host, in_irq);
+
+ /*
+ * If we have a transfer pending, start it.
+ * Only start it if the interface has already started transferring
+ * it's data
+ */
+ if (host->dma.xfer_required)
+ acornscsi_dma_xfer(host);
+
+ if (ret == INTR_NEXT_COMMAND)
+ ret = acornscsi_kick(host);
+
+ in_irq = 1;
+ } while (ret != INTR_IDLE);
+
+ return IRQ_HANDLED;
+}
+
+/*=============================================================================================
+ * Interfaces between interrupt handler and rest of scsi code
+ */
+
+/*
+ * Function : acornscsi_queuecmd(struct scsi_cmnd *cmd, void (*done)(struct scsi_cmnd *))
+ * Purpose : queues a SCSI command
+ * Params : cmd - SCSI command
+ * done - function called on completion, with pointer to command descriptor
+ * Returns : 0, or < 0 on error.
+ */
+static int acornscsi_queuecmd_lck(struct scsi_cmnd *SCpnt,
+ void (*done)(struct scsi_cmnd *))
+{
+ AS_Host *host = (AS_Host *)SCpnt->device->host->hostdata;
+
+ if (!done) {
+ /* there should be some way of rejecting errors like this without panicing... */
+ panic("scsi%d: queuecommand called with NULL done function [cmd=%p]",
+ host->host->host_no, SCpnt);
+ return -EINVAL;
+ }
+
+#if (DEBUG & DEBUG_NO_WRITE)
+ if (acornscsi_cmdtype(SCpnt->cmnd[0]) == CMD_WRITE && (NO_WRITE & (1 << SCpnt->device->id))) {
+ printk(KERN_CRIT "scsi%d.%c: WRITE attempted with NO_WRITE flag set\n",
+ host->host->host_no, '0' + SCpnt->device->id);
+ SCpnt->result = DID_NO_CONNECT << 16;
+ done(SCpnt);
+ return 0;
+ }
+#endif
+
+ SCpnt->scsi_done = done;
+ SCpnt->host_scribble = NULL;
+ SCpnt->result = 0;
+ SCpnt->tag = 0;
+ SCpnt->SCp.phase = (int)acornscsi_datadirection(SCpnt->cmnd[0]);
+ SCpnt->SCp.sent_command = 0;
+ SCpnt->SCp.scsi_xferred = 0;
+
+ init_SCp(SCpnt);
+
+ host->stats.queues += 1;
+
+ {
+ unsigned long flags;
+
+ if (!queue_add_cmd_ordered(&host->queues.issue, SCpnt)) {
+ SCpnt->result = DID_ERROR << 16;
+ done(SCpnt);
+ return 0;
+ }
+ local_irq_save(flags);
+ if (host->scsi.phase == PHASE_IDLE)
+ acornscsi_kick(host);
+ local_irq_restore(flags);
+ }
+ return 0;
+}
+
+DEF_SCSI_QCMD(acornscsi_queuecmd)
+
+/*
+ * Prototype: void acornscsi_reportstatus(struct scsi_cmnd **SCpntp1, struct scsi_cmnd **SCpntp2, int result)
+ * Purpose : pass a result to *SCpntp1, and check if *SCpntp1 = *SCpntp2
+ * Params : SCpntp1 - pointer to command to return
+ * SCpntp2 - pointer to command to check
+ * result - result to pass back to mid-level done function
+ * Returns : *SCpntp2 = NULL if *SCpntp1 is the same command structure as *SCpntp2.
+ */
+static inline void acornscsi_reportstatus(struct scsi_cmnd **SCpntp1,
+ struct scsi_cmnd **SCpntp2,
+ int result)
+{
+ struct scsi_cmnd *SCpnt = *SCpntp1;
+
+ if (SCpnt) {
+ *SCpntp1 = NULL;
+
+ SCpnt->result = result;
+ SCpnt->scsi_done(SCpnt);
+ }
+
+ if (SCpnt == *SCpntp2)
+ *SCpntp2 = NULL;
+}
+
+enum res_abort { res_not_running, res_success, res_success_clear, res_snooze };
+
+/*
+ * Prototype: enum res acornscsi_do_abort(struct scsi_cmnd *SCpnt)
+ * Purpose : abort a command on this host
+ * Params : SCpnt - command to abort
+ * Returns : our abort status
+ */
+static enum res_abort acornscsi_do_abort(AS_Host *host, struct scsi_cmnd *SCpnt)
+{
+ enum res_abort res = res_not_running;
+
+ if (queue_remove_cmd(&host->queues.issue, SCpnt)) {
+ /*
+ * The command was on the issue queue, and has not been
+ * issued yet. We can remove the command from the queue,
+ * and acknowledge the abort. Neither the devices nor the
+ * interface know about the command.
+ */
+//#if (DEBUG & DEBUG_ABORT)
+ printk("on issue queue ");
+//#endif
+ res = res_success;
+ } else if (queue_remove_cmd(&host->queues.disconnected, SCpnt)) {
+ /*
+ * The command was on the disconnected queue. Simply
+ * acknowledge the abort condition, and when the target
+ * reconnects, we will give it an ABORT message. The
+ * target should then disconnect, and we will clear
+ * the busylun bit.
+ */
+//#if (DEBUG & DEBUG_ABORT)
+ printk("on disconnected queue ");
+//#endif
+ res = res_success;
+ } else if (host->SCpnt == SCpnt) {
+ unsigned long flags;
+
+//#if (DEBUG & DEBUG_ABORT)
+ printk("executing ");
+//#endif
+
+ local_irq_save(flags);
+ switch (host->scsi.phase) {
+ /*
+ * If the interface is idle, and the command is 'disconnectable',
+ * then it is the same as on the disconnected queue. We simply
+ * remove all traces of the command. When the target reconnects,
+ * we will give it an ABORT message since the command could not
+ * be found. When the target finally disconnects, we will clear
+ * the busylun bit.
+ */
+ case PHASE_IDLE:
+ if (host->scsi.disconnectable) {
+ host->scsi.disconnectable = 0;
+ host->SCpnt = NULL;
+ res = res_success;
+ }
+ break;
+
+ /*
+ * If the command has connected and done nothing further,
+ * simply force a disconnect. We also need to clear the
+ * busylun bit.
+ */
+ case PHASE_CONNECTED:
+ sbic_arm_write(host, SBIC_CMND, CMND_DISCONNECT);
+ host->SCpnt = NULL;
+ res = res_success_clear;
+ break;
+
+ default:
+ acornscsi_abortcmd(host, host->SCpnt->tag);
+ res = res_snooze;
+ }
+ local_irq_restore(flags);
+ } else if (host->origSCpnt == SCpnt) {
+ /*
+ * The command will be executed next, but a command
+ * is currently using the interface. This is similar to
+ * being on the issue queue, except the busylun bit has
+ * been set.
+ */
+ host->origSCpnt = NULL;
+//#if (DEBUG & DEBUG_ABORT)
+ printk("waiting for execution ");
+//#endif
+ res = res_success_clear;
+ } else
+ printk("unknown ");
+
+ return res;
+}
+
+/*
+ * Prototype: int acornscsi_abort(struct scsi_cmnd *SCpnt)
+ * Purpose : abort a command on this host
+ * Params : SCpnt - command to abort
+ * Returns : one of SCSI_ABORT_ macros
+ */
+int acornscsi_abort(struct scsi_cmnd *SCpnt)
+{
+ AS_Host *host = (AS_Host *) SCpnt->device->host->hostdata;
+ int result;
+
+ host->stats.aborts += 1;
+
+#if (DEBUG & DEBUG_ABORT)
+ {
+ int asr, ssr;
+ asr = sbic_arm_read(host, SBIC_ASR);
+ ssr = sbic_arm_read(host, SBIC_SSR);
+
+ printk(KERN_WARNING "acornscsi_abort: ");
+ print_sbic_status(asr, ssr, host->scsi.phase);
+ acornscsi_dumplog(host, SCpnt->device->id);
+ }
+#endif
+
+ printk("scsi%d: ", host->host->host_no);
+
+ switch (acornscsi_do_abort(host, SCpnt)) {
+ /*
+ * We managed to find the command and cleared it out.
+ * We do not expect the command to be executing on the
+ * target, but we have set the busylun bit.
+ */
+ case res_success_clear:
+//#if (DEBUG & DEBUG_ABORT)
+ printk("clear ");
+//#endif
+ clear_bit(SCpnt->device->id * 8 +
+ (u8)(SCpnt->device->lun & 0x7), host->busyluns);
+
+ /*
+ * We found the command, and cleared it out. Either
+ * the command is still known to be executing on the
+ * target, or the busylun bit is not set.
+ */
+ case res_success:
+//#if (DEBUG & DEBUG_ABORT)
+ printk("success\n");
+//#endif
+ result = SUCCESS;
+ break;
+
+ /*
+ * We did find the command, but unfortunately we couldn't
+ * unhook it from ourselves. Wait some more, and if it
+ * still doesn't complete, reset the interface.
+ */
+ case res_snooze:
+//#if (DEBUG & DEBUG_ABORT)
+ printk("snooze\n");
+//#endif
+ result = FAILED;
+ break;
+
+ /*
+ * The command could not be found (either because it completed,
+ * or it got dropped.
+ */
+ default:
+ case res_not_running:
+ acornscsi_dumplog(host, SCpnt->device->id);
+ result = FAILED;
+//#if (DEBUG & DEBUG_ABORT)
+ printk("not running\n");
+//#endif
+ break;
+ }
+
+ return result;
+}
+
+/*
+ * Prototype: int acornscsi_reset(struct scsi_cmnd *SCpnt)
+ * Purpose : reset a command on this host/reset this host
+ * Params : SCpnt - command causing reset
+ * Returns : one of SCSI_RESET_ macros
+ */
+int acornscsi_bus_reset(struct scsi_cmnd *SCpnt)
+{
+ AS_Host *host = (AS_Host *)SCpnt->device->host->hostdata;
+ struct scsi_cmnd *SCptr;
+
+ host->stats.resets += 1;
+
+#if (DEBUG & DEBUG_RESET)
+ {
+ int asr, ssr;
+
+ asr = sbic_arm_read(host, SBIC_ASR);
+ ssr = sbic_arm_read(host, SBIC_SSR);
+
+ printk(KERN_WARNING "acornscsi_reset: ");
+ print_sbic_status(asr, ssr, host->scsi.phase);
+ acornscsi_dumplog(host, SCpnt->device->id);
+ }
+#endif
+
+ acornscsi_dma_stop(host);
+
+ /*
+ * do hard reset. This resets all devices on this host, and so we
+ * must set the reset status on all commands.
+ */
+ acornscsi_resetcard(host);
+
+ while ((SCptr = queue_remove(&host->queues.disconnected)) != NULL)
+ ;
+
+ return SUCCESS;
+}
+
+/*==============================================================================================
+ * initialisation & miscellaneous support
+ */
+
+/*
+ * Function: char *acornscsi_info(struct Scsi_Host *host)
+ * Purpose : return a string describing this interface
+ * Params : host - host to give information on
+ * Returns : a constant string
+ */
+const
+char *acornscsi_info(struct Scsi_Host *host)
+{
+ static char string[100], *p;
+
+ p = string;
+
+ p += sprintf(string, "%s at port %08lX irq %d v%d.%d.%d"
+#ifdef CONFIG_SCSI_ACORNSCSI_SYNC
+ " SYNC"
+#endif
+#ifdef CONFIG_SCSI_ACORNSCSI_TAGGED_QUEUE
+ " TAG"
+#endif
+#if (DEBUG & DEBUG_NO_WRITE)
+ " NOWRITE (" __stringify(NO_WRITE) ")"
+#endif
+ , host->hostt->name, host->io_port, host->irq,
+ VER_MAJOR, VER_MINOR, VER_PATCH);
+ return string;
+}
+
+static int acornscsi_show_info(struct seq_file *m, struct Scsi_Host *instance)
+{
+ int devidx;
+ struct scsi_device *scd;
+ AS_Host *host;
+
+ host = (AS_Host *)instance->hostdata;
+
+ seq_printf(m, "AcornSCSI driver v%d.%d.%d"
+#ifdef CONFIG_SCSI_ACORNSCSI_SYNC
+ " SYNC"
+#endif
+#ifdef CONFIG_SCSI_ACORNSCSI_TAGGED_QUEUE
+ " TAG"
+#endif
+#if (DEBUG & DEBUG_NO_WRITE)
+ " NOWRITE (" __stringify(NO_WRITE) ")"
+#endif
+ "\n\n", VER_MAJOR, VER_MINOR, VER_PATCH);
+
+ seq_printf(m, "SBIC: WD33C93A Address: %p IRQ : %d\n",
+ host->base + SBIC_REGIDX, host->scsi.irq);
+#ifdef USE_DMAC
+ seq_printf(m, "DMAC: uPC71071 Address: %p IRQ : %d\n\n",
+ host->base + DMAC_OFFSET, host->scsi.irq);
+#endif
+
+ seq_printf(m, "Statistics:\n"
+ "Queued commands: %-10u Issued commands: %-10u\n"
+ "Done commands : %-10u Reads : %-10u\n"
+ "Writes : %-10u Others : %-10u\n"
+ "Disconnects : %-10u Aborts : %-10u\n"
+ "Resets : %-10u\n\nLast phases:",
+ host->stats.queues, host->stats.removes,
+ host->stats.fins, host->stats.reads,
+ host->stats.writes, host->stats.miscs,
+ host->stats.disconnects, host->stats.aborts,
+ host->stats.resets);
+
+ for (devidx = 0; devidx < 9; devidx ++) {
+ unsigned int statptr, prev;
+
+ seq_printf(m, "\n%c:", devidx == 8 ? 'H' : ('0' + devidx));
+ statptr = host->status_ptr[devidx] - 10;
+
+ if ((signed int)statptr < 0)
+ statptr += STATUS_BUFFER_SIZE;
+
+ prev = host->status[devidx][statptr].when;
+
+ for (; statptr != host->status_ptr[devidx]; statptr = (statptr + 1) & (STATUS_BUFFER_SIZE - 1)) {
+ if (host->status[devidx][statptr].when) {
+ seq_printf(m, "%c%02X:%02X+%2ld",
+ host->status[devidx][statptr].irq ? '-' : ' ',
+ host->status[devidx][statptr].ph,
+ host->status[devidx][statptr].ssr,
+ (host->status[devidx][statptr].when - prev) < 100 ?
+ (host->status[devidx][statptr].when - prev) : 99);
+ prev = host->status[devidx][statptr].when;
+ }
+ }
+ }
+
+ seq_printf(m, "\nAttached devices:\n");
+
+ shost_for_each_device(scd, instance) {
+ seq_printf(m, "Device/Lun TaggedQ Sync\n");
+ seq_printf(m, " %d/%llu ", scd->id, scd->lun);
+ if (scd->tagged_supported)
+ seq_printf(m, "%3sabled(%3d) ",
+ scd->simple_tags ? "en" : "dis",
+ scd->current_tag);
+ else
+ seq_printf(m, "unsupported ");
+
+ if (host->device[scd->id].sync_xfer & 15)
+ seq_printf(m, "offset %d, %d ns\n",
+ host->device[scd->id].sync_xfer & 15,
+ acornscsi_getperiod(host->device[scd->id].sync_xfer));
+ else
+ seq_printf(m, "async\n");
+
+ }
+ return 0;
+}
+
+static struct scsi_host_template acornscsi_template = {
+ .module = THIS_MODULE,
+ .show_info = acornscsi_show_info,
+ .name = "AcornSCSI",
+ .info = acornscsi_info,
+ .queuecommand = acornscsi_queuecmd,
+ .eh_abort_handler = acornscsi_abort,
+ .eh_bus_reset_handler = acornscsi_bus_reset,
+ .can_queue = 16,
+ .this_id = 7,
+ .sg_tablesize = SG_ALL,
+ .cmd_per_lun = 2,
+ .use_clustering = DISABLE_CLUSTERING,
+ .proc_name = "acornscsi",
+};
+
+static int acornscsi_probe(struct expansion_card *ec, const struct ecard_id *id)
+{
+ struct Scsi_Host *host;
+ AS_Host *ashost;
+ int ret;
+
+ ret = ecard_request_resources(ec);
+ if (ret)
+ goto out;
+
+ host = scsi_host_alloc(&acornscsi_template, sizeof(AS_Host));
+ if (!host) {
+ ret = -ENOMEM;
+ goto out_release;
+ }
+
+ ashost = (AS_Host *)host->hostdata;
+
+ ashost->base = ecardm_iomap(ec, ECARD_RES_MEMC, 0, 0);
+ ashost->fast = ecardm_iomap(ec, ECARD_RES_IOCFAST, 0, 0);
+ if (!ashost->base || !ashost->fast)
+ goto out_put;
+
+ host->irq = ec->irq;
+ ashost->host = host;
+ ashost->scsi.irq = host->irq;
+
+ ec->irqaddr = ashost->fast + INT_REG;
+ ec->irqmask = 0x0a;
+
+ ret = request_irq(host->irq, acornscsi_intr, 0, "acornscsi", ashost);
+ if (ret) {
+ printk(KERN_CRIT "scsi%d: IRQ%d not free: %d\n",
+ host->host_no, ashost->scsi.irq, ret);
+ goto out_put;
+ }
+
+ memset(&ashost->stats, 0, sizeof (ashost->stats));
+ queue_initialise(&ashost->queues.issue);
+ queue_initialise(&ashost->queues.disconnected);
+ msgqueue_initialise(&ashost->scsi.msgs);
+
+ acornscsi_resetcard(ashost);
+
+ ret = scsi_add_host(host, &ec->dev);
+ if (ret)
+ goto out_irq;
+
+ scsi_scan_host(host);
+ goto out;
+
+ out_irq:
+ free_irq(host->irq, ashost);
+ msgqueue_free(&ashost->scsi.msgs);
+ queue_free(&ashost->queues.disconnected);
+ queue_free(&ashost->queues.issue);
+ out_put:
+ ecardm_iounmap(ec, ashost->fast);
+ ecardm_iounmap(ec, ashost->base);
+ scsi_host_put(host);
+ out_release:
+ ecard_release_resources(ec);
+ out:
+ return ret;
+}
+
+static void acornscsi_remove(struct expansion_card *ec)
+{
+ struct Scsi_Host *host = ecard_get_drvdata(ec);
+ AS_Host *ashost = (AS_Host *)host->hostdata;
+
+ ecard_set_drvdata(ec, NULL);
+ scsi_remove_host(host);
+
+ /*
+ * Put card into RESET state
+ */
+ writeb(0x80, ashost->fast + PAGE_REG);
+
+ free_irq(host->irq, ashost);
+
+ msgqueue_free(&ashost->scsi.msgs);
+ queue_free(&ashost->queues.disconnected);
+ queue_free(&ashost->queues.issue);
+ ecardm_iounmap(ec, ashost->fast);
+ ecardm_iounmap(ec, ashost->base);
+ scsi_host_put(host);
+ ecard_release_resources(ec);
+}
+
+static const struct ecard_id acornscsi_cids[] = {
+ { MANU_ACORN, PROD_ACORN_SCSI },
+ { 0xffff, 0xffff },
+};
+
+static struct ecard_driver acornscsi_driver = {
+ .probe = acornscsi_probe,
+ .remove = acornscsi_remove,
+ .id_table = acornscsi_cids,
+ .drv = {
+ .name = "acornscsi",
+ },
+};
+
+static int __init acornscsi_init(void)
+{
+ return ecard_register_driver(&acornscsi_driver);
+}
+
+static void __exit acornscsi_exit(void)
+{
+ ecard_remove_driver(&acornscsi_driver);
+}
+
+module_init(acornscsi_init);
+module_exit(acornscsi_exit);
+
+MODULE_AUTHOR("Russell King");
+MODULE_DESCRIPTION("AcornSCSI driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/scsi/arm/acornscsi.h b/drivers/scsi/arm/acornscsi.h
new file mode 100644
index 000000000..01bc715a3
--- /dev/null
+++ b/drivers/scsi/arm/acornscsi.h
@@ -0,0 +1,353 @@
+/*
+ * linux/drivers/acorn/scsi/acornscsi.h
+ *
+ * Copyright (C) 1997 Russell King
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * Acorn SCSI driver
+ */
+#ifndef ACORNSCSI_H
+#define ACORNSCSI_H
+
+/* SBIC registers */
+#define SBIC_OWNID 0
+#define OWNID_FS1 (1<<7)
+#define OWNID_FS2 (1<<6)
+#define OWNID_EHP (1<<4)
+#define OWNID_EAF (1<<3)
+
+#define SBIC_CTRL 1
+#define CTRL_DMAMODE (1<<7)
+#define CTRL_DMADBAMODE (1<<6)
+#define CTRL_DMABURST (1<<5)
+#define CTRL_DMAPOLLED 0
+#define CTRL_HHP (1<<4)
+#define CTRL_EDI (1<<3)
+#define CTRL_IDI (1<<2)
+#define CTRL_HA (1<<1)
+#define CTRL_HSP (1<<0)
+
+#define SBIC_TIMEOUT 2
+#define SBIC_TOTSECTS 3
+#define SBIC_TOTHEADS 4
+#define SBIC_TOTCYLH 5
+#define SBIC_TOTCYLL 6
+#define SBIC_LOGADDRH 7
+#define SBIC_LOGADDRM2 8
+#define SBIC_LOGADDRM1 9
+#define SBIC_LOGADDRL 10
+#define SBIC_SECTORNUM 11
+#define SBIC_HEADNUM 12
+#define SBIC_CYLH 13
+#define SBIC_CYLL 14
+#define SBIC_TARGETLUN 15
+#define TARGETLUN_TLV (1<<7)
+#define TARGETLUN_DOK (1<<6)
+
+#define SBIC_CMNDPHASE 16
+#define SBIC_SYNCHTRANSFER 17
+#define SYNCHTRANSFER_OF0 0x00
+#define SYNCHTRANSFER_OF1 0x01
+#define SYNCHTRANSFER_OF2 0x02
+#define SYNCHTRANSFER_OF3 0x03
+#define SYNCHTRANSFER_OF4 0x04
+#define SYNCHTRANSFER_OF5 0x05
+#define SYNCHTRANSFER_OF6 0x06
+#define SYNCHTRANSFER_OF7 0x07
+#define SYNCHTRANSFER_OF8 0x08
+#define SYNCHTRANSFER_OF9 0x09
+#define SYNCHTRANSFER_OF10 0x0A
+#define SYNCHTRANSFER_OF11 0x0B
+#define SYNCHTRANSFER_OF12 0x0C
+#define SYNCHTRANSFER_8DBA 0x00
+#define SYNCHTRANSFER_2DBA 0x20
+#define SYNCHTRANSFER_3DBA 0x30
+#define SYNCHTRANSFER_4DBA 0x40
+#define SYNCHTRANSFER_5DBA 0x50
+#define SYNCHTRANSFER_6DBA 0x60
+#define SYNCHTRANSFER_7DBA 0x70
+
+#define SBIC_TRANSCNTH 18
+#define SBIC_TRANSCNTM 19
+#define SBIC_TRANSCNTL 20
+#define SBIC_DESTID 21
+#define DESTID_SCC (1<<7)
+#define DESTID_DPD (1<<6)
+
+#define SBIC_SOURCEID 22
+#define SOURCEID_ER (1<<7)
+#define SOURCEID_ES (1<<6)
+#define SOURCEID_DSP (1<<5)
+#define SOURCEID_SIV (1<<4)
+
+#define SBIC_SSR 23
+#define SBIC_CMND 24
+#define CMND_RESET 0x00
+#define CMND_ABORT 0x01
+#define CMND_ASSERTATN 0x02
+#define CMND_NEGATEACK 0x03
+#define CMND_DISCONNECT 0x04
+#define CMND_RESELECT 0x05
+#define CMND_SELWITHATN 0x06
+#define CMND_SELECT 0x07
+#define CMND_SELECTATNTRANSFER 0x08
+#define CMND_SELECTTRANSFER 0x09
+#define CMND_RESELECTRXDATA 0x0A
+#define CMND_RESELECTTXDATA 0x0B
+#define CMND_WAITFORSELRECV 0x0C
+#define CMND_SENDSTATCMD 0x0D
+#define CMND_SENDDISCONNECT 0x0E
+#define CMND_SETIDI 0x0F
+#define CMND_RECEIVECMD 0x10
+#define CMND_RECEIVEDTA 0x11
+#define CMND_RECEIVEMSG 0x12
+#define CMND_RECEIVEUSP 0x13
+#define CMND_SENDCMD 0x14
+#define CMND_SENDDATA 0x15
+#define CMND_SENDMSG 0x16
+#define CMND_SENDUSP 0x17
+#define CMND_TRANSLATEADDR 0x18
+#define CMND_XFERINFO 0x20
+#define CMND_SBT (1<<7)
+
+#define SBIC_DATA 25
+#define SBIC_ASR 26
+#define ASR_INT (1<<7)
+#define ASR_LCI (1<<6)
+#define ASR_BSY (1<<5)
+#define ASR_CIP (1<<4)
+#define ASR_PE (1<<1)
+#define ASR_DBR (1<<0)
+
+/* DMAC registers */
+#define DMAC_INIT 0x00
+#define INIT_8BIT (1)
+
+#define DMAC_CHANNEL 0x80
+#define CHANNEL_0 0x00
+#define CHANNEL_1 0x01
+#define CHANNEL_2 0x02
+#define CHANNEL_3 0x03
+
+#define DMAC_TXCNTLO 0x01
+#define DMAC_TXCNTHI 0x81
+#define DMAC_TXADRLO 0x02
+#define DMAC_TXADRMD 0x82
+#define DMAC_TXADRHI 0x03
+
+#define DMAC_DEVCON0 0x04
+#define DEVCON0_AKL (1<<7)
+#define DEVCON0_RQL (1<<6)
+#define DEVCON0_EXW (1<<5)
+#define DEVCON0_ROT (1<<4)
+#define DEVCON0_CMP (1<<3)
+#define DEVCON0_DDMA (1<<2)
+#define DEVCON0_AHLD (1<<1)
+#define DEVCON0_MTM (1<<0)
+
+#define DMAC_DEVCON1 0x84
+#define DEVCON1_WEV (1<<1)
+#define DEVCON1_BHLD (1<<0)
+
+#define DMAC_MODECON 0x05
+#define MODECON_WOED 0x01
+#define MODECON_VERIFY 0x00
+#define MODECON_READ 0x04
+#define MODECON_WRITE 0x08
+#define MODECON_AUTOINIT 0x10
+#define MODECON_ADDRDIR 0x20
+#define MODECON_DEMAND 0x00
+#define MODECON_SINGLE 0x40
+#define MODECON_BLOCK 0x80
+#define MODECON_CASCADE 0xC0
+
+#define DMAC_STATUS 0x85
+#define STATUS_TC0 (1<<0)
+#define STATUS_RQ0 (1<<4)
+
+#define DMAC_TEMPLO 0x06
+#define DMAC_TEMPHI 0x86
+#define DMAC_REQREG 0x07
+#define DMAC_MASKREG 0x87
+#define MASKREG_M0 0x01
+#define MASKREG_M1 0x02
+#define MASKREG_M2 0x04
+#define MASKREG_M3 0x08
+
+/* miscellaneous internal variables */
+
+#define MASK_ON (MASKREG_M3|MASKREG_M2|MASKREG_M1|MASKREG_M0)
+#define MASK_OFF (MASKREG_M3|MASKREG_M2|MASKREG_M1)
+
+/*
+ * SCSI driver phases
+ */
+typedef enum {
+ PHASE_IDLE, /* we're not planning on doing anything */
+ PHASE_CONNECTING, /* connecting to a target */
+ PHASE_CONNECTED, /* connected to a target */
+ PHASE_MSGOUT, /* message out to device */
+ PHASE_RECONNECTED, /* reconnected */
+ PHASE_COMMANDPAUSED, /* command partly sent */
+ PHASE_COMMAND, /* command all sent */
+ PHASE_DATAOUT, /* data out to device */
+ PHASE_DATAIN, /* data in from device */
+ PHASE_STATUSIN, /* status in from device */
+ PHASE_MSGIN, /* message in from device */
+ PHASE_DONE, /* finished */
+ PHASE_ABORTED, /* aborted */
+ PHASE_DISCONNECT, /* disconnecting */
+} phase_t;
+
+/*
+ * After interrupt, what to do now
+ */
+typedef enum {
+ INTR_IDLE, /* not expecting another IRQ */
+ INTR_NEXT_COMMAND, /* start next command */
+ INTR_PROCESSING, /* interrupt routine still processing */
+} intr_ret_t;
+
+/*
+ * DMA direction
+ */
+typedef enum {
+ DMA_OUT, /* DMA from memory to chip */
+ DMA_IN /* DMA from chip to memory */
+} dmadir_t;
+
+/*
+ * Synchronous transfer state
+ */
+typedef enum { /* Synchronous transfer state */
+ SYNC_ASYNCHRONOUS, /* don't negotiate synchronous transfers*/
+ SYNC_NEGOCIATE, /* start negotiation */
+ SYNC_SENT_REQUEST, /* sent SDTR message */
+ SYNC_COMPLETED, /* received SDTR reply */
+} syncxfer_t;
+
+/*
+ * Command type
+ */
+typedef enum { /* command type */
+ CMD_READ, /* READ_6, READ_10, READ_12 */
+ CMD_WRITE, /* WRITE_6, WRITE_10, WRITE_12 */
+ CMD_MISC, /* Others */
+} cmdtype_t;
+
+/*
+ * Data phase direction
+ */
+typedef enum { /* Data direction */
+ DATADIR_IN, /* Data in phase expected */
+ DATADIR_OUT /* Data out phase expected */
+} datadir_t;
+
+#include "queue.h"
+#include "msgqueue.h"
+
+#define STATUS_BUFFER_SIZE 32
+/*
+ * This is used to dump the previous states of the SBIC
+ */
+struct status_entry {
+ unsigned long when;
+ unsigned char ssr;
+ unsigned char ph;
+ unsigned char irq;
+ unsigned char unused;
+};
+
+#define ADD_STATUS(_q,_ssr,_ph,_irq) \
+({ \
+ host->status[(_q)][host->status_ptr[(_q)]].when = jiffies; \
+ host->status[(_q)][host->status_ptr[(_q)]].ssr = (_ssr); \
+ host->status[(_q)][host->status_ptr[(_q)]].ph = (_ph); \
+ host->status[(_q)][host->status_ptr[(_q)]].irq = (_irq); \
+ host->status_ptr[(_q)] = (host->status_ptr[(_q)] + 1) & (STATUS_BUFFER_SIZE - 1); \
+})
+
+/*
+ * AcornSCSI host specific data
+ */
+typedef struct acornscsi_hostdata {
+ /* miscellaneous */
+ struct Scsi_Host *host; /* host */
+ struct scsi_cmnd *SCpnt; /* currently processing command */
+ struct scsi_cmnd *origSCpnt; /* original connecting command */
+ void __iomem *base; /* memc base address */
+ void __iomem *fast; /* fast ioc base address */
+
+ /* driver information */
+ struct {
+ unsigned int irq; /* interrupt */
+ phase_t phase; /* current phase */
+
+ struct {
+ unsigned char target; /* reconnected target */
+ unsigned char lun; /* reconnected lun */
+ unsigned char tag; /* reconnected tag */
+ } reconnected;
+
+ struct scsi_pointer SCp; /* current commands data pointer */
+
+ MsgQueue_t msgs;
+
+ unsigned short last_message; /* last message to be sent */
+ unsigned char disconnectable:1; /* this command can be disconnected */
+ } scsi;
+
+ /* statistics information */
+ struct {
+ unsigned int queues;
+ unsigned int removes;
+ unsigned int fins;
+ unsigned int reads;
+ unsigned int writes;
+ unsigned int miscs;
+ unsigned int disconnects;
+ unsigned int aborts;
+ unsigned int resets;
+ } stats;
+
+ /* queue handling */
+ struct {
+ Queue_t issue; /* issue queue */
+ Queue_t disconnected; /* disconnected command queue */
+ } queues;
+
+ /* per-device info */
+ struct {
+ unsigned char sync_xfer; /* synchronous transfer (SBIC value) */
+ syncxfer_t sync_state; /* sync xfer negotiation state */
+ unsigned char disconnect_ok:1; /* device can disconnect */
+ } device[8];
+ unsigned long busyluns[64 / sizeof(unsigned long)];/* array of bits indicating LUNs busy */
+
+ /* DMA info */
+ struct {
+ unsigned int free_addr; /* next free address */
+ unsigned int start_addr; /* start address of current transfer */
+ dmadir_t direction; /* dma direction */
+ unsigned int transferred; /* number of bytes transferred */
+ unsigned int xfer_start; /* scheduled DMA transfer start */
+ unsigned int xfer_length; /* scheduled DMA transfer length */
+ char *xfer_ptr; /* pointer to area */
+ unsigned char xfer_required:1; /* set if we need to transfer something */
+ unsigned char xfer_setup:1; /* set if DMA is setup */
+ unsigned char xfer_done:1; /* set if DMA reached end of BH list */
+ } dma;
+
+ /* card info */
+ struct {
+ unsigned char page_reg; /* current setting of page reg */
+ } card;
+
+ unsigned char status_ptr[9];
+ struct status_entry status[9][STATUS_BUFFER_SIZE];
+} AS_Host;
+
+#endif /* ACORNSCSI_H */
diff --git a/drivers/scsi/arm/arxescsi.c b/drivers/scsi/arm/arxescsi.c
new file mode 100644
index 000000000..32d23212d
--- /dev/null
+++ b/drivers/scsi/arm/arxescsi.c
@@ -0,0 +1,358 @@
+/*
+ * linux/drivers/scsi/arm/arxescsi.c
+ *
+ * Copyright (C) 1997-2000 Russell King, Stefan Hanske
+ *
+ * This driver is based on experimentation. Hence, it may have made
+ * assumptions about the particular card that I have available, and
+ * may not be reliable!
+ *
+ * Changelog:
+ * 30-08-1997 RMK 0.0.0 Created, READONLY version as cumana_2.c
+ * 22-01-1998 RMK 0.0.1 Updated to 2.1.80
+ * 15-04-1998 RMK 0.0.1 Only do PIO if FAS216 will allow it.
+ * 11-06-1998 SH 0.0.2 Changed to support ARXE 16-bit SCSI card
+ * enabled writing
+ * 01-01-2000 SH 0.1.0 Added *real* pseudo dma writing
+ * (arxescsi_pseudo_dma_write)
+ * 02-04-2000 RMK 0.1.1 Updated for new error handling code.
+ * 22-10-2000 SH Updated for new registering scheme.
+ */
+#include <linux/module.h>
+#include <linux/blkdev.h>
+#include <linux/kernel.h>
+#include <linux/string.h>
+#include <linux/ioport.h>
+#include <linux/proc_fs.h>
+#include <linux/unistd.h>
+#include <linux/stat.h>
+#include <linux/delay.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+
+#include <asm/dma.h>
+#include <asm/io.h>
+#include <asm/ecard.h>
+
+#include "../scsi.h"
+#include <scsi/scsi_host.h>
+#include "fas216.h"
+
+struct arxescsi_info {
+ FAS216_Info info;
+ struct expansion_card *ec;
+ void __iomem *base;
+};
+
+#define DMADATA_OFFSET (0x200)
+
+#define DMASTAT_OFFSET (0x600)
+#define DMASTAT_DRQ (1 << 0)
+
+#define CSTATUS_IRQ (1 << 0)
+
+#define VERSION "1.10 (23/01/2003 2.5.57)"
+
+/*
+ * Function: int arxescsi_dma_setup(host, SCpnt, direction, min_type)
+ * Purpose : initialises DMA/PIO
+ * Params : host - host
+ * SCpnt - command
+ * direction - DMA on to/off of card
+ * min_type - minimum DMA support that we must have for this transfer
+ * Returns : 0 if we should not set CMD_WITHDMA for transfer info command
+ */
+static fasdmatype_t
+arxescsi_dma_setup(struct Scsi_Host *host, struct scsi_pointer *SCp,
+ fasdmadir_t direction, fasdmatype_t min_type)
+{
+ /*
+ * We don't do real DMA
+ */
+ return fasdma_pseudo;
+}
+
+static void arxescsi_pseudo_dma_write(unsigned char *addr, void __iomem *base)
+{
+ __asm__ __volatile__(
+ " stmdb sp!, {r0-r12}\n"
+ " mov r3, %0\n"
+ " mov r1, %1\n"
+ " add r2, r1, #512\n"
+ " mov r4, #256\n"
+ ".loop_1: ldmia r3!, {r6, r8, r10, r12}\n"
+ " mov r5, r6, lsl #16\n"
+ " mov r7, r8, lsl #16\n"
+ ".loop_2: ldrb r0, [r1, #1536]\n"
+ " tst r0, #1\n"
+ " beq .loop_2\n"
+ " stmia r2, {r5-r8}\n\t"
+ " mov r9, r10, lsl #16\n"
+ " mov r11, r12, lsl #16\n"
+ ".loop_3: ldrb r0, [r1, #1536]\n"
+ " tst r0, #1\n"
+ " beq .loop_3\n"
+ " stmia r2, {r9-r12}\n"
+ " subs r4, r4, #16\n"
+ " bne .loop_1\n"
+ " ldmia sp!, {r0-r12}\n"
+ :
+ : "r" (addr), "r" (base));
+}
+
+/*
+ * Function: int arxescsi_dma_pseudo(host, SCpnt, direction, transfer)
+ * Purpose : handles pseudo DMA
+ * Params : host - host
+ * SCpnt - command
+ * direction - DMA on to/off of card
+ * transfer - minimum number of bytes we expect to transfer
+ */
+static void
+arxescsi_dma_pseudo(struct Scsi_Host *host, struct scsi_pointer *SCp,
+ fasdmadir_t direction, int transfer)
+{
+ struct arxescsi_info *info = (struct arxescsi_info *)host->hostdata;
+ unsigned int length, error = 0;
+ void __iomem *base = info->info.scsi.io_base;
+ unsigned char *addr;
+
+ length = SCp->this_residual;
+ addr = SCp->ptr;
+
+ if (direction == DMA_OUT) {
+ unsigned int word;
+ while (length > 256) {
+ if (readb(base + 0x80) & STAT_INT) {
+ error = 1;
+ break;
+ }
+ arxescsi_pseudo_dma_write(addr, base);
+ addr += 256;
+ length -= 256;
+ }
+
+ if (!error)
+ while (length > 0) {
+ if (readb(base + 0x80) & STAT_INT)
+ break;
+
+ if (!(readb(base + DMASTAT_OFFSET) & DMASTAT_DRQ))
+ continue;
+
+ word = *addr | *(addr + 1) << 8;
+
+ writew(word, base + DMADATA_OFFSET);
+ if (length > 1) {
+ addr += 2;
+ length -= 2;
+ } else {
+ addr += 1;
+ length -= 1;
+ }
+ }
+ }
+ else {
+ if (transfer && (transfer & 255)) {
+ while (length >= 256) {
+ if (readb(base + 0x80) & STAT_INT) {
+ error = 1;
+ break;
+ }
+
+ if (!(readb(base + DMASTAT_OFFSET) & DMASTAT_DRQ))
+ continue;
+
+ readsw(base + DMADATA_OFFSET, addr, 256 >> 1);
+ addr += 256;
+ length -= 256;
+ }
+ }
+
+ if (!(error))
+ while (length > 0) {
+ unsigned long word;
+
+ if (readb(base + 0x80) & STAT_INT)
+ break;
+
+ if (!(readb(base + DMASTAT_OFFSET) & DMASTAT_DRQ))
+ continue;
+
+ word = readw(base + DMADATA_OFFSET);
+ *addr++ = word;
+ if (--length > 0) {
+ *addr++ = word >> 8;
+ length --;
+ }
+ }
+ }
+}
+
+/*
+ * Function: int arxescsi_dma_stop(host, SCpnt)
+ * Purpose : stops DMA/PIO
+ * Params : host - host
+ * SCpnt - command
+ */
+static void arxescsi_dma_stop(struct Scsi_Host *host, struct scsi_pointer *SCp)
+{
+ /*
+ * no DMA to stop
+ */
+}
+
+/*
+ * Function: const char *arxescsi_info(struct Scsi_Host * host)
+ * Purpose : returns a descriptive string about this interface,
+ * Params : host - driver host structure to return info for.
+ * Returns : pointer to a static buffer containing null terminated string.
+ */
+static const char *arxescsi_info(struct Scsi_Host *host)
+{
+ struct arxescsi_info *info = (struct arxescsi_info *)host->hostdata;
+ static char string[150];
+
+ sprintf(string, "%s (%s) in slot %d v%s",
+ host->hostt->name, info->info.scsi.type, info->ec->slot_no,
+ VERSION);
+
+ return string;
+}
+
+static int
+arxescsi_show_info(struct seq_file *m, struct Scsi_Host *host)
+{
+ struct arxescsi_info *info;
+ info = (struct arxescsi_info *)host->hostdata;
+
+ seq_printf(m, "ARXE 16-bit SCSI driver v%s\n", VERSION);
+ fas216_print_host(&info->info, m);
+ fas216_print_stats(&info->info, m);
+ fas216_print_devices(&info->info, m);
+ return 0;
+}
+
+static struct scsi_host_template arxescsi_template = {
+ .show_info = arxescsi_show_info,
+ .name = "ARXE SCSI card",
+ .info = arxescsi_info,
+ .queuecommand = fas216_noqueue_command,
+ .eh_host_reset_handler = fas216_eh_host_reset,
+ .eh_bus_reset_handler = fas216_eh_bus_reset,
+ .eh_device_reset_handler = fas216_eh_device_reset,
+ .eh_abort_handler = fas216_eh_abort,
+ .can_queue = 0,
+ .this_id = 7,
+ .sg_tablesize = SG_ALL,
+ .cmd_per_lun = 1,
+ .use_clustering = DISABLE_CLUSTERING,
+ .proc_name = "arxescsi",
+};
+
+static int arxescsi_probe(struct expansion_card *ec, const struct ecard_id *id)
+{
+ struct Scsi_Host *host;
+ struct arxescsi_info *info;
+ void __iomem *base;
+ int ret;
+
+ ret = ecard_request_resources(ec);
+ if (ret)
+ goto out;
+
+ base = ecardm_iomap(ec, ECARD_RES_MEMC, 0, 0);
+ if (!base) {
+ ret = -ENOMEM;
+ goto out_region;
+ }
+
+ host = scsi_host_alloc(&arxescsi_template, sizeof(struct arxescsi_info));
+ if (!host) {
+ ret = -ENOMEM;
+ goto out_region;
+ }
+
+ info = (struct arxescsi_info *)host->hostdata;
+ info->ec = ec;
+ info->base = base;
+
+ info->info.scsi.io_base = base + 0x2000;
+ info->info.scsi.irq = 0;
+ info->info.scsi.dma = NO_DMA;
+ info->info.scsi.io_shift = 5;
+ info->info.ifcfg.clockrate = 24; /* MHz */
+ info->info.ifcfg.select_timeout = 255;
+ info->info.ifcfg.asyncperiod = 200; /* ns */
+ info->info.ifcfg.sync_max_depth = 0;
+ info->info.ifcfg.cntl3 = CNTL3_FASTSCSI | CNTL3_FASTCLK;
+ info->info.ifcfg.disconnect_ok = 0;
+ info->info.ifcfg.wide_max_size = 0;
+ info->info.ifcfg.capabilities = FASCAP_PSEUDODMA;
+ info->info.dma.setup = arxescsi_dma_setup;
+ info->info.dma.pseudo = arxescsi_dma_pseudo;
+ info->info.dma.stop = arxescsi_dma_stop;
+
+ ec->irqaddr = base;
+ ec->irqmask = CSTATUS_IRQ;
+
+ ret = fas216_init(host);
+ if (ret)
+ goto out_unregister;
+
+ ret = fas216_add(host, &ec->dev);
+ if (ret == 0)
+ goto out;
+
+ fas216_release(host);
+ out_unregister:
+ scsi_host_put(host);
+ out_region:
+ ecard_release_resources(ec);
+ out:
+ return ret;
+}
+
+static void arxescsi_remove(struct expansion_card *ec)
+{
+ struct Scsi_Host *host = ecard_get_drvdata(ec);
+
+ ecard_set_drvdata(ec, NULL);
+ fas216_remove(host);
+
+ fas216_release(host);
+ scsi_host_put(host);
+ ecard_release_resources(ec);
+}
+
+static const struct ecard_id arxescsi_cids[] = {
+ { MANU_ARXE, PROD_ARXE_SCSI },
+ { 0xffff, 0xffff },
+};
+
+static struct ecard_driver arxescsi_driver = {
+ .probe = arxescsi_probe,
+ .remove = arxescsi_remove,
+ .id_table = arxescsi_cids,
+ .drv = {
+ .name = "arxescsi",
+ },
+};
+
+static int __init init_arxe_scsi_driver(void)
+{
+ return ecard_register_driver(&arxescsi_driver);
+}
+
+static void __exit exit_arxe_scsi_driver(void)
+{
+ ecard_remove_driver(&arxescsi_driver);
+}
+
+module_init(init_arxe_scsi_driver);
+module_exit(exit_arxe_scsi_driver);
+
+MODULE_AUTHOR("Stefan Hanske");
+MODULE_DESCRIPTION("ARXESCSI driver for Acorn machines");
+MODULE_LICENSE("GPL");
+
diff --git a/drivers/scsi/arm/cumana_1.c b/drivers/scsi/arm/cumana_1.c
new file mode 100644
index 000000000..d28d6c0f1
--- /dev/null
+++ b/drivers/scsi/arm/cumana_1.c
@@ -0,0 +1,324 @@
+/*
+ * Generic Generic NCR5380 driver
+ *
+ * Copyright 1995-2002, Russell King
+ */
+#include <linux/module.h>
+#include <linux/signal.h>
+#include <linux/ioport.h>
+#include <linux/delay.h>
+#include <linux/blkdev.h>
+#include <linux/init.h>
+
+#include <asm/ecard.h>
+#include <asm/io.h>
+
+#include <scsi/scsi_host.h>
+
+#include <scsi/scsicam.h>
+
+#define PSEUDO_DMA
+
+#define priv(host) ((struct NCR5380_hostdata *)(host)->hostdata)
+#define NCR5380_local_declare() struct Scsi_Host *_instance
+#define NCR5380_setup(instance) _instance = instance
+#define NCR5380_read(reg) cumanascsi_read(_instance, reg)
+#define NCR5380_write(reg, value) cumanascsi_write(_instance, reg, value)
+#define NCR5380_intr cumanascsi_intr
+#define NCR5380_queue_command cumanascsi_queue_command
+#define NCR5380_info cumanascsi_info
+
+#define NCR5380_implementation_fields \
+ unsigned ctrl; \
+ void __iomem *base; \
+ void __iomem *dma
+
+#include "../NCR5380.h"
+
+void cumanascsi_setup(char *str, int *ints)
+{
+}
+
+#define CTRL 0x16fc
+#define STAT 0x2004
+#define L(v) (((v)<<16)|((v) & 0x0000ffff))
+#define H(v) (((v)>>16)|((v) & 0xffff0000))
+
+static inline int
+NCR5380_pwrite(struct Scsi_Host *host, unsigned char *addr, int len)
+{
+ unsigned long *laddr;
+ void __iomem *dma = priv(host)->dma + 0x2000;
+
+ if(!len) return 0;
+
+ writeb(0x02, priv(host)->base + CTRL);
+ laddr = (unsigned long *)addr;
+ while(len >= 32)
+ {
+ unsigned int status;
+ unsigned long v;
+ status = readb(priv(host)->base + STAT);
+ if(status & 0x80)
+ goto end;
+ if(!(status & 0x40))
+ continue;
+ v=*laddr++; writew(L(v), dma); writew(H(v), dma);
+ v=*laddr++; writew(L(v), dma); writew(H(v), dma);
+ v=*laddr++; writew(L(v), dma); writew(H(v), dma);
+ v=*laddr++; writew(L(v), dma); writew(H(v), dma);
+ v=*laddr++; writew(L(v), dma); writew(H(v), dma);
+ v=*laddr++; writew(L(v), dma); writew(H(v), dma);
+ v=*laddr++; writew(L(v), dma); writew(H(v), dma);
+ v=*laddr++; writew(L(v), dma); writew(H(v), dma);
+ len -= 32;
+ if(len == 0)
+ break;
+ }
+
+ addr = (unsigned char *)laddr;
+ writeb(0x12, priv(host)->base + CTRL);
+
+ while(len > 0)
+ {
+ unsigned int status;
+ status = readb(priv(host)->base + STAT);
+ if(status & 0x80)
+ goto end;
+ if(status & 0x40)
+ {
+ writeb(*addr++, dma);
+ if(--len == 0)
+ break;
+ }
+
+ status = readb(priv(host)->base + STAT);
+ if(status & 0x80)
+ goto end;
+ if(status & 0x40)
+ {
+ writeb(*addr++, dma);
+ if(--len == 0)
+ break;
+ }
+ }
+end:
+ writeb(priv(host)->ctrl | 0x40, priv(host)->base + CTRL);
+ return len;
+}
+
+static inline int
+NCR5380_pread(struct Scsi_Host *host, unsigned char *addr, int len)
+{
+ unsigned long *laddr;
+ void __iomem *dma = priv(host)->dma + 0x2000;
+
+ if(!len) return 0;
+
+ writeb(0x00, priv(host)->base + CTRL);
+ laddr = (unsigned long *)addr;
+ while(len >= 32)
+ {
+ unsigned int status;
+ status = readb(priv(host)->base + STAT);
+ if(status & 0x80)
+ goto end;
+ if(!(status & 0x40))
+ continue;
+ *laddr++ = readw(dma) | (readw(dma) << 16);
+ *laddr++ = readw(dma) | (readw(dma) << 16);
+ *laddr++ = readw(dma) | (readw(dma) << 16);
+ *laddr++ = readw(dma) | (readw(dma) << 16);
+ *laddr++ = readw(dma) | (readw(dma) << 16);
+ *laddr++ = readw(dma) | (readw(dma) << 16);
+ *laddr++ = readw(dma) | (readw(dma) << 16);
+ *laddr++ = readw(dma) | (readw(dma) << 16);
+ len -= 32;
+ if(len == 0)
+ break;
+ }
+
+ addr = (unsigned char *)laddr;
+ writeb(0x10, priv(host)->base + CTRL);
+
+ while(len > 0)
+ {
+ unsigned int status;
+ status = readb(priv(host)->base + STAT);
+ if(status & 0x80)
+ goto end;
+ if(status & 0x40)
+ {
+ *addr++ = readb(dma);
+ if(--len == 0)
+ break;
+ }
+
+ status = readb(priv(host)->base + STAT);
+ if(status & 0x80)
+ goto end;
+ if(status & 0x40)
+ {
+ *addr++ = readb(dma);
+ if(--len == 0)
+ break;
+ }
+ }
+end:
+ writeb(priv(host)->ctrl | 0x40, priv(host)->base + CTRL);
+ return len;
+}
+
+static unsigned char cumanascsi_read(struct Scsi_Host *host, unsigned int reg)
+{
+ void __iomem *base = priv(host)->base;
+ unsigned char val;
+
+ writeb(0, base + CTRL);
+
+ val = readb(base + 0x2100 + (reg << 2));
+
+ priv(host)->ctrl = 0x40;
+ writeb(0x40, base + CTRL);
+
+ return val;
+}
+
+static void cumanascsi_write(struct Scsi_Host *host, unsigned int reg, unsigned int value)
+{
+ void __iomem *base = priv(host)->base;
+
+ writeb(0, base + CTRL);
+
+ writeb(value, base + 0x2100 + (reg << 2));
+
+ priv(host)->ctrl = 0x40;
+ writeb(0x40, base + CTRL);
+}
+
+#include "../NCR5380.c"
+
+static struct scsi_host_template cumanascsi_template = {
+ .module = THIS_MODULE,
+ .name = "Cumana 16-bit SCSI",
+ .info = cumanascsi_info,
+ .queuecommand = cumanascsi_queue_command,
+ .eh_abort_handler = NCR5380_abort,
+ .eh_bus_reset_handler = NCR5380_bus_reset,
+ .can_queue = 16,
+ .this_id = 7,
+ .sg_tablesize = SG_ALL,
+ .cmd_per_lun = 2,
+ .use_clustering = DISABLE_CLUSTERING,
+ .proc_name = "CumanaSCSI-1",
+};
+
+static int cumanascsi1_probe(struct expansion_card *ec,
+ const struct ecard_id *id)
+{
+ struct Scsi_Host *host;
+ int ret;
+
+ ret = ecard_request_resources(ec);
+ if (ret)
+ goto out;
+
+ host = scsi_host_alloc(&cumanascsi_template, sizeof(struct NCR5380_hostdata));
+ if (!host) {
+ ret = -ENOMEM;
+ goto out_release;
+ }
+
+ priv(host)->base = ioremap(ecard_resource_start(ec, ECARD_RES_IOCSLOW),
+ ecard_resource_len(ec, ECARD_RES_IOCSLOW));
+ priv(host)->dma = ioremap(ecard_resource_start(ec, ECARD_RES_MEMC),
+ ecard_resource_len(ec, ECARD_RES_MEMC));
+ if (!priv(host)->base || !priv(host)->dma) {
+ ret = -ENOMEM;
+ goto out_unmap;
+ }
+
+ host->irq = ec->irq;
+
+ NCR5380_init(host, 0);
+
+ priv(host)->ctrl = 0;
+ writeb(0, priv(host)->base + CTRL);
+
+ host->n_io_port = 255;
+ if (!(request_region(host->io_port, host->n_io_port, "CumanaSCSI-1"))) {
+ ret = -EBUSY;
+ goto out_unmap;
+ }
+
+ ret = request_irq(host->irq, cumanascsi_intr, 0,
+ "CumanaSCSI-1", host);
+ if (ret) {
+ printk("scsi%d: IRQ%d not free: %d\n",
+ host->host_no, host->irq, ret);
+ goto out_unmap;
+ }
+
+ ret = scsi_add_host(host, &ec->dev);
+ if (ret)
+ goto out_free_irq;
+
+ scsi_scan_host(host);
+ goto out;
+
+ out_free_irq:
+ free_irq(host->irq, host);
+ out_unmap:
+ iounmap(priv(host)->base);
+ iounmap(priv(host)->dma);
+ scsi_host_put(host);
+ out_release:
+ ecard_release_resources(ec);
+ out:
+ return ret;
+}
+
+static void cumanascsi1_remove(struct expansion_card *ec)
+{
+ struct Scsi_Host *host = ecard_get_drvdata(ec);
+
+ ecard_set_drvdata(ec, NULL);
+
+ scsi_remove_host(host);
+ free_irq(host->irq, host);
+ NCR5380_exit(host);
+ iounmap(priv(host)->base);
+ iounmap(priv(host)->dma);
+ scsi_host_put(host);
+ ecard_release_resources(ec);
+}
+
+static const struct ecard_id cumanascsi1_cids[] = {
+ { MANU_CUMANA, PROD_CUMANA_SCSI_1 },
+ { 0xffff, 0xffff }
+};
+
+static struct ecard_driver cumanascsi1_driver = {
+ .probe = cumanascsi1_probe,
+ .remove = cumanascsi1_remove,
+ .id_table = cumanascsi1_cids,
+ .drv = {
+ .name = "cumanascsi1",
+ },
+};
+
+static int __init cumanascsi_init(void)
+{
+ return ecard_register_driver(&cumanascsi1_driver);
+}
+
+static void __exit cumanascsi_exit(void)
+{
+ ecard_remove_driver(&cumanascsi1_driver);
+}
+
+module_init(cumanascsi_init);
+module_exit(cumanascsi_exit);
+
+MODULE_DESCRIPTION("Cumana SCSI-1 driver for Acorn machines");
+MODULE_LICENSE("GPL");
diff --git a/drivers/scsi/arm/cumana_2.c b/drivers/scsi/arm/cumana_2.c
new file mode 100644
index 000000000..abc66f526
--- /dev/null
+++ b/drivers/scsi/arm/cumana_2.c
@@ -0,0 +1,521 @@
+/*
+ * linux/drivers/acorn/scsi/cumana_2.c
+ *
+ * Copyright (C) 1997-2005 Russell King
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * Changelog:
+ * 30-08-1997 RMK 0.0.0 Created, READONLY version.
+ * 22-01-1998 RMK 0.0.1 Updated to 2.1.80.
+ * 15-04-1998 RMK 0.0.1 Only do PIO if FAS216 will allow it.
+ * 02-05-1998 RMK 0.0.2 Updated & added DMA support.
+ * 27-06-1998 RMK Changed asm/delay.h to linux/delay.h
+ * 18-08-1998 RMK 0.0.3 Fixed synchronous transfer depth.
+ * 02-04-2000 RMK 0.0.4 Updated for new error handling code.
+ */
+#include <linux/module.h>
+#include <linux/blkdev.h>
+#include <linux/kernel.h>
+#include <linux/string.h>
+#include <linux/ioport.h>
+#include <linux/proc_fs.h>
+#include <linux/delay.h>
+#include <linux/interrupt.h>
+#include <linux/init.h>
+#include <linux/dma-mapping.h>
+
+#include <asm/dma.h>
+#include <asm/ecard.h>
+#include <asm/io.h>
+#include <asm/pgtable.h>
+
+#include "../scsi.h"
+#include <scsi/scsi_host.h>
+#include "fas216.h"
+#include "scsi.h"
+
+#include <scsi/scsicam.h>
+
+#define CUMANASCSI2_STATUS (0x0000)
+#define STATUS_INT (1 << 0)
+#define STATUS_DRQ (1 << 1)
+#define STATUS_LATCHED (1 << 3)
+
+#define CUMANASCSI2_ALATCH (0x0014)
+#define ALATCH_ENA_INT (3)
+#define ALATCH_DIS_INT (2)
+#define ALATCH_ENA_TERM (5)
+#define ALATCH_DIS_TERM (4)
+#define ALATCH_ENA_BIT32 (11)
+#define ALATCH_DIS_BIT32 (10)
+#define ALATCH_ENA_DMA (13)
+#define ALATCH_DIS_DMA (12)
+#define ALATCH_DMA_OUT (15)
+#define ALATCH_DMA_IN (14)
+
+#define CUMANASCSI2_PSEUDODMA (0x0200)
+
+#define CUMANASCSI2_FAS216_OFFSET (0x0300)
+#define CUMANASCSI2_FAS216_SHIFT 2
+
+/*
+ * Version
+ */
+#define VERSION "1.00 (13/11/2002 2.5.47)"
+
+/*
+ * Use term=0,1,0,0,0 to turn terminators on/off
+ */
+static int term[MAX_ECARDS] = { 1, 1, 1, 1, 1, 1, 1, 1 };
+
+#define NR_SG 256
+
+struct cumanascsi2_info {
+ FAS216_Info info;
+ struct expansion_card *ec;
+ void __iomem *base;
+ unsigned int terms; /* Terminator state */
+ struct scatterlist sg[NR_SG]; /* Scatter DMA list */
+};
+
+#define CSTATUS_IRQ (1 << 0)
+#define CSTATUS_DRQ (1 << 1)
+
+/* Prototype: void cumanascsi_2_irqenable(ec, irqnr)
+ * Purpose : Enable interrupts on Cumana SCSI 2 card
+ * Params : ec - expansion card structure
+ * : irqnr - interrupt number
+ */
+static void
+cumanascsi_2_irqenable(struct expansion_card *ec, int irqnr)
+{
+ struct cumanascsi2_info *info = ec->irq_data;
+ writeb(ALATCH_ENA_INT, info->base + CUMANASCSI2_ALATCH);
+}
+
+/* Prototype: void cumanascsi_2_irqdisable(ec, irqnr)
+ * Purpose : Disable interrupts on Cumana SCSI 2 card
+ * Params : ec - expansion card structure
+ * : irqnr - interrupt number
+ */
+static void
+cumanascsi_2_irqdisable(struct expansion_card *ec, int irqnr)
+{
+ struct cumanascsi2_info *info = ec->irq_data;
+ writeb(ALATCH_DIS_INT, info->base + CUMANASCSI2_ALATCH);
+}
+
+static const expansioncard_ops_t cumanascsi_2_ops = {
+ .irqenable = cumanascsi_2_irqenable,
+ .irqdisable = cumanascsi_2_irqdisable,
+};
+
+/* Prototype: void cumanascsi_2_terminator_ctl(host, on_off)
+ * Purpose : Turn the Cumana SCSI 2 terminators on or off
+ * Params : host - card to turn on/off
+ * : on_off - !0 to turn on, 0 to turn off
+ */
+static void
+cumanascsi_2_terminator_ctl(struct Scsi_Host *host, int on_off)
+{
+ struct cumanascsi2_info *info = (struct cumanascsi2_info *)host->hostdata;
+
+ if (on_off) {
+ info->terms = 1;
+ writeb(ALATCH_ENA_TERM, info->base + CUMANASCSI2_ALATCH);
+ } else {
+ info->terms = 0;
+ writeb(ALATCH_DIS_TERM, info->base + CUMANASCSI2_ALATCH);
+ }
+}
+
+/* Prototype: void cumanascsi_2_intr(irq, *dev_id, *regs)
+ * Purpose : handle interrupts from Cumana SCSI 2 card
+ * Params : irq - interrupt number
+ * dev_id - user-defined (Scsi_Host structure)
+ */
+static irqreturn_t
+cumanascsi_2_intr(int irq, void *dev_id)
+{
+ struct cumanascsi2_info *info = dev_id;
+
+ return fas216_intr(&info->info);
+}
+
+/* Prototype: fasdmatype_t cumanascsi_2_dma_setup(host, SCpnt, direction, min_type)
+ * Purpose : initialises DMA/PIO
+ * Params : host - host
+ * SCpnt - command
+ * direction - DMA on to/off of card
+ * min_type - minimum DMA support that we must have for this transfer
+ * Returns : type of transfer to be performed
+ */
+static fasdmatype_t
+cumanascsi_2_dma_setup(struct Scsi_Host *host, struct scsi_pointer *SCp,
+ fasdmadir_t direction, fasdmatype_t min_type)
+{
+ struct cumanascsi2_info *info = (struct cumanascsi2_info *)host->hostdata;
+ struct device *dev = scsi_get_device(host);
+ int dmach = info->info.scsi.dma;
+
+ writeb(ALATCH_DIS_DMA, info->base + CUMANASCSI2_ALATCH);
+
+ if (dmach != NO_DMA &&
+ (min_type == fasdma_real_all || SCp->this_residual >= 512)) {
+ int bufs, map_dir, dma_dir, alatch_dir;
+
+ bufs = copy_SCp_to_sg(&info->sg[0], SCp, NR_SG);
+
+ if (direction == DMA_OUT)
+ map_dir = DMA_TO_DEVICE,
+ dma_dir = DMA_MODE_WRITE,
+ alatch_dir = ALATCH_DMA_OUT;
+ else
+ map_dir = DMA_FROM_DEVICE,
+ dma_dir = DMA_MODE_READ,
+ alatch_dir = ALATCH_DMA_IN;
+
+ dma_map_sg(dev, info->sg, bufs, map_dir);
+
+ disable_dma(dmach);
+ set_dma_sg(dmach, info->sg, bufs);
+ writeb(alatch_dir, info->base + CUMANASCSI2_ALATCH);
+ set_dma_mode(dmach, dma_dir);
+ enable_dma(dmach);
+ writeb(ALATCH_ENA_DMA, info->base + CUMANASCSI2_ALATCH);
+ writeb(ALATCH_DIS_BIT32, info->base + CUMANASCSI2_ALATCH);
+ return fasdma_real_all;
+ }
+
+ /*
+ * If we're not doing DMA,
+ * we'll do pseudo DMA
+ */
+ return fasdma_pio;
+}
+
+/*
+ * Prototype: void cumanascsi_2_dma_pseudo(host, SCpnt, direction, transfer)
+ * Purpose : handles pseudo DMA
+ * Params : host - host
+ * SCpnt - command
+ * direction - DMA on to/off of card
+ * transfer - minimum number of bytes we expect to transfer
+ */
+static void
+cumanascsi_2_dma_pseudo(struct Scsi_Host *host, struct scsi_pointer *SCp,
+ fasdmadir_t direction, int transfer)
+{
+ struct cumanascsi2_info *info = (struct cumanascsi2_info *)host->hostdata;
+ unsigned int length;
+ unsigned char *addr;
+
+ length = SCp->this_residual;
+ addr = SCp->ptr;
+
+ if (direction == DMA_OUT)
+#if 0
+ while (length > 1) {
+ unsigned long word;
+ unsigned int status = readb(info->base + CUMANASCSI2_STATUS);
+
+ if (status & STATUS_INT)
+ goto end;
+
+ if (!(status & STATUS_DRQ))
+ continue;
+
+ word = *addr | *(addr + 1) << 8;
+ writew(word, info->base + CUMANASCSI2_PSEUDODMA);
+ addr += 2;
+ length -= 2;
+ }
+#else
+ printk ("PSEUDO_OUT???\n");
+#endif
+ else {
+ if (transfer && (transfer & 255)) {
+ while (length >= 256) {
+ unsigned int status = readb(info->base + CUMANASCSI2_STATUS);
+
+ if (status & STATUS_INT)
+ return;
+
+ if (!(status & STATUS_DRQ))
+ continue;
+
+ readsw(info->base + CUMANASCSI2_PSEUDODMA,
+ addr, 256 >> 1);
+ addr += 256;
+ length -= 256;
+ }
+ }
+
+ while (length > 0) {
+ unsigned long word;
+ unsigned int status = readb(info->base + CUMANASCSI2_STATUS);
+
+ if (status & STATUS_INT)
+ return;
+
+ if (!(status & STATUS_DRQ))
+ continue;
+
+ word = readw(info->base + CUMANASCSI2_PSEUDODMA);
+ *addr++ = word;
+ if (--length > 0) {
+ *addr++ = word >> 8;
+ length --;
+ }
+ }
+ }
+}
+
+/* Prototype: int cumanascsi_2_dma_stop(host, SCpnt)
+ * Purpose : stops DMA/PIO
+ * Params : host - host
+ * SCpnt - command
+ */
+static void
+cumanascsi_2_dma_stop(struct Scsi_Host *host, struct scsi_pointer *SCp)
+{
+ struct cumanascsi2_info *info = (struct cumanascsi2_info *)host->hostdata;
+ if (info->info.scsi.dma != NO_DMA) {
+ writeb(ALATCH_DIS_DMA, info->base + CUMANASCSI2_ALATCH);
+ disable_dma(info->info.scsi.dma);
+ }
+}
+
+/* Prototype: const char *cumanascsi_2_info(struct Scsi_Host * host)
+ * Purpose : returns a descriptive string about this interface,
+ * Params : host - driver host structure to return info for.
+ * Returns : pointer to a static buffer containing null terminated string.
+ */
+const char *cumanascsi_2_info(struct Scsi_Host *host)
+{
+ struct cumanascsi2_info *info = (struct cumanascsi2_info *)host->hostdata;
+ static char string[150];
+
+ sprintf(string, "%s (%s) in slot %d v%s terminators o%s",
+ host->hostt->name, info->info.scsi.type, info->ec->slot_no,
+ VERSION, info->terms ? "n" : "ff");
+
+ return string;
+}
+
+/* Prototype: int cumanascsi_2_set_proc_info(struct Scsi_Host *host, char *buffer, int length)
+ * Purpose : Set a driver specific function
+ * Params : host - host to setup
+ * : buffer - buffer containing string describing operation
+ * : length - length of string
+ * Returns : -EINVAL, or 0
+ */
+static int
+cumanascsi_2_set_proc_info(struct Scsi_Host *host, char *buffer, int length)
+{
+ int ret = length;
+
+ if (length >= 11 && strncmp(buffer, "CUMANASCSI2", 11) == 0) {
+ buffer += 11;
+ length -= 11;
+
+ if (length >= 5 && strncmp(buffer, "term=", 5) == 0) {
+ if (buffer[5] == '1')
+ cumanascsi_2_terminator_ctl(host, 1);
+ else if (buffer[5] == '0')
+ cumanascsi_2_terminator_ctl(host, 0);
+ else
+ ret = -EINVAL;
+ } else
+ ret = -EINVAL;
+ } else
+ ret = -EINVAL;
+
+ return ret;
+}
+
+static int cumanascsi_2_show_info(struct seq_file *m, struct Scsi_Host *host)
+{
+ struct cumanascsi2_info *info;
+ info = (struct cumanascsi2_info *)host->hostdata;
+
+ seq_printf(m, "Cumana SCSI II driver v%s\n", VERSION);
+ fas216_print_host(&info->info, m);
+ seq_printf(m, "Term : o%s\n",
+ info->terms ? "n" : "ff");
+
+ fas216_print_stats(&info->info, m);
+ fas216_print_devices(&info->info, m);
+ return 0;
+}
+
+static struct scsi_host_template cumanascsi2_template = {
+ .module = THIS_MODULE,
+ .show_info = cumanascsi_2_show_info,
+ .write_info = cumanascsi_2_set_proc_info,
+ .name = "Cumana SCSI II",
+ .info = cumanascsi_2_info,
+ .queuecommand = fas216_queue_command,
+ .eh_host_reset_handler = fas216_eh_host_reset,
+ .eh_bus_reset_handler = fas216_eh_bus_reset,
+ .eh_device_reset_handler = fas216_eh_device_reset,
+ .eh_abort_handler = fas216_eh_abort,
+ .can_queue = 1,
+ .this_id = 7,
+ .sg_tablesize = SCSI_MAX_SG_CHAIN_SEGMENTS,
+ .dma_boundary = IOMD_DMA_BOUNDARY,
+ .cmd_per_lun = 1,
+ .use_clustering = DISABLE_CLUSTERING,
+ .proc_name = "cumanascsi2",
+};
+
+static int cumanascsi2_probe(struct expansion_card *ec,
+ const struct ecard_id *id)
+{
+ struct Scsi_Host *host;
+ struct cumanascsi2_info *info;
+ void __iomem *base;
+ int ret;
+
+ ret = ecard_request_resources(ec);
+ if (ret)
+ goto out;
+
+ base = ecardm_iomap(ec, ECARD_RES_MEMC, 0, 0);
+ if (!base) {
+ ret = -ENOMEM;
+ goto out_region;
+ }
+
+ host = scsi_host_alloc(&cumanascsi2_template,
+ sizeof(struct cumanascsi2_info));
+ if (!host) {
+ ret = -ENOMEM;
+ goto out_region;
+ }
+
+ ecard_set_drvdata(ec, host);
+
+ info = (struct cumanascsi2_info *)host->hostdata;
+ info->ec = ec;
+ info->base = base;
+
+ cumanascsi_2_terminator_ctl(host, term[ec->slot_no]);
+
+ info->info.scsi.io_base = base + CUMANASCSI2_FAS216_OFFSET;
+ info->info.scsi.io_shift = CUMANASCSI2_FAS216_SHIFT;
+ info->info.scsi.irq = ec->irq;
+ info->info.scsi.dma = ec->dma;
+ info->info.ifcfg.clockrate = 40; /* MHz */
+ info->info.ifcfg.select_timeout = 255;
+ info->info.ifcfg.asyncperiod = 200; /* ns */
+ info->info.ifcfg.sync_max_depth = 7;
+ info->info.ifcfg.cntl3 = CNTL3_BS8 | CNTL3_FASTSCSI | CNTL3_FASTCLK;
+ info->info.ifcfg.disconnect_ok = 1;
+ info->info.ifcfg.wide_max_size = 0;
+ info->info.ifcfg.capabilities = FASCAP_PSEUDODMA;
+ info->info.dma.setup = cumanascsi_2_dma_setup;
+ info->info.dma.pseudo = cumanascsi_2_dma_pseudo;
+ info->info.dma.stop = cumanascsi_2_dma_stop;
+
+ ec->irqaddr = info->base + CUMANASCSI2_STATUS;
+ ec->irqmask = STATUS_INT;
+
+ ecard_setirq(ec, &cumanascsi_2_ops, info);
+
+ ret = fas216_init(host);
+ if (ret)
+ goto out_free;
+
+ ret = request_irq(ec->irq, cumanascsi_2_intr,
+ 0, "cumanascsi2", info);
+ if (ret) {
+ printk("scsi%d: IRQ%d not free: %d\n",
+ host->host_no, ec->irq, ret);
+ goto out_release;
+ }
+
+ if (info->info.scsi.dma != NO_DMA) {
+ if (request_dma(info->info.scsi.dma, "cumanascsi2")) {
+ printk("scsi%d: DMA%d not free, using PIO\n",
+ host->host_no, info->info.scsi.dma);
+ info->info.scsi.dma = NO_DMA;
+ } else {
+ set_dma_speed(info->info.scsi.dma, 180);
+ info->info.ifcfg.capabilities |= FASCAP_DMA;
+ }
+ }
+
+ ret = fas216_add(host, &ec->dev);
+ if (ret == 0)
+ goto out;
+
+ if (info->info.scsi.dma != NO_DMA)
+ free_dma(info->info.scsi.dma);
+ free_irq(ec->irq, host);
+
+ out_release:
+ fas216_release(host);
+
+ out_free:
+ scsi_host_put(host);
+
+ out_region:
+ ecard_release_resources(ec);
+
+ out:
+ return ret;
+}
+
+static void cumanascsi2_remove(struct expansion_card *ec)
+{
+ struct Scsi_Host *host = ecard_get_drvdata(ec);
+ struct cumanascsi2_info *info = (struct cumanascsi2_info *)host->hostdata;
+
+ ecard_set_drvdata(ec, NULL);
+ fas216_remove(host);
+
+ if (info->info.scsi.dma != NO_DMA)
+ free_dma(info->info.scsi.dma);
+ free_irq(ec->irq, info);
+
+ fas216_release(host);
+ scsi_host_put(host);
+ ecard_release_resources(ec);
+}
+
+static const struct ecard_id cumanascsi2_cids[] = {
+ { MANU_CUMANA, PROD_CUMANA_SCSI_2 },
+ { 0xffff, 0xffff },
+};
+
+static struct ecard_driver cumanascsi2_driver = {
+ .probe = cumanascsi2_probe,
+ .remove = cumanascsi2_remove,
+ .id_table = cumanascsi2_cids,
+ .drv = {
+ .name = "cumanascsi2",
+ },
+};
+
+static int __init cumanascsi2_init(void)
+{
+ return ecard_register_driver(&cumanascsi2_driver);
+}
+
+static void __exit cumanascsi2_exit(void)
+{
+ ecard_remove_driver(&cumanascsi2_driver);
+}
+
+module_init(cumanascsi2_init);
+module_exit(cumanascsi2_exit);
+
+MODULE_AUTHOR("Russell King");
+MODULE_DESCRIPTION("Cumana SCSI-2 driver for Acorn machines");
+module_param_array(term, int, NULL, 0);
+MODULE_PARM_DESC(term, "SCSI bus termination");
+MODULE_LICENSE("GPL");
diff --git a/drivers/scsi/arm/eesox.c b/drivers/scsi/arm/eesox.c
new file mode 100644
index 000000000..5bf3c0d13
--- /dev/null
+++ b/drivers/scsi/arm/eesox.c
@@ -0,0 +1,645 @@
+/*
+ * linux/drivers/acorn/scsi/eesox.c
+ *
+ * Copyright (C) 1997-2005 Russell King
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This driver is based on experimentation. Hence, it may have made
+ * assumptions about the particular card that I have available, and
+ * may not be reliable!
+ *
+ * Changelog:
+ * 01-10-1997 RMK Created, READONLY version
+ * 15-02-1998 RMK READ/WRITE version
+ * added DMA support and hardware definitions
+ * 14-03-1998 RMK Updated DMA support
+ * Added terminator control
+ * 15-04-1998 RMK Only do PIO if FAS216 will allow it.
+ * 27-06-1998 RMK Changed asm/delay.h to linux/delay.h
+ * 02-04-2000 RMK 0.0.3 Fixed NO_IRQ/NO_DMA problem, updated for new
+ * error handling code.
+ */
+#include <linux/module.h>
+#include <linux/blkdev.h>
+#include <linux/kernel.h>
+#include <linux/string.h>
+#include <linux/ioport.h>
+#include <linux/proc_fs.h>
+#include <linux/delay.h>
+#include <linux/interrupt.h>
+#include <linux/init.h>
+#include <linux/dma-mapping.h>
+
+#include <asm/io.h>
+#include <asm/dma.h>
+#include <asm/ecard.h>
+#include <asm/pgtable.h>
+
+#include "../scsi.h"
+#include <scsi/scsi_host.h>
+#include "fas216.h"
+#include "scsi.h"
+
+#include <scsi/scsicam.h>
+
+#define EESOX_FAS216_OFFSET 0x3000
+#define EESOX_FAS216_SHIFT 5
+
+#define EESOX_DMASTAT 0x2800
+#define EESOX_STAT_INTR 0x01
+#define EESOX_STAT_DMA 0x02
+
+#define EESOX_CONTROL 0x2800
+#define EESOX_INTR_ENABLE 0x04
+#define EESOX_TERM_ENABLE 0x02
+#define EESOX_RESET 0x01
+
+#define EESOX_DMADATA 0x3800
+
+#define VERSION "1.10 (17/01/2003 2.5.59)"
+
+/*
+ * Use term=0,1,0,0,0 to turn terminators on/off
+ */
+static int term[MAX_ECARDS] = { 1, 1, 1, 1, 1, 1, 1, 1 };
+
+#define NR_SG 256
+
+struct eesoxscsi_info {
+ FAS216_Info info;
+ struct expansion_card *ec;
+ void __iomem *base;
+ void __iomem *ctl_port;
+ unsigned int control;
+ struct scatterlist sg[NR_SG]; /* Scatter DMA list */
+};
+
+/* Prototype: void eesoxscsi_irqenable(ec, irqnr)
+ * Purpose : Enable interrupts on EESOX SCSI card
+ * Params : ec - expansion card structure
+ * : irqnr - interrupt number
+ */
+static void
+eesoxscsi_irqenable(struct expansion_card *ec, int irqnr)
+{
+ struct eesoxscsi_info *info = (struct eesoxscsi_info *)ec->irq_data;
+
+ info->control |= EESOX_INTR_ENABLE;
+
+ writeb(info->control, info->ctl_port);
+}
+
+/* Prototype: void eesoxscsi_irqdisable(ec, irqnr)
+ * Purpose : Disable interrupts on EESOX SCSI card
+ * Params : ec - expansion card structure
+ * : irqnr - interrupt number
+ */
+static void
+eesoxscsi_irqdisable(struct expansion_card *ec, int irqnr)
+{
+ struct eesoxscsi_info *info = (struct eesoxscsi_info *)ec->irq_data;
+
+ info->control &= ~EESOX_INTR_ENABLE;
+
+ writeb(info->control, info->ctl_port);
+}
+
+static const expansioncard_ops_t eesoxscsi_ops = {
+ .irqenable = eesoxscsi_irqenable,
+ .irqdisable = eesoxscsi_irqdisable,
+};
+
+/* Prototype: void eesoxscsi_terminator_ctl(*host, on_off)
+ * Purpose : Turn the EESOX SCSI terminators on or off
+ * Params : host - card to turn on/off
+ * : on_off - !0 to turn on, 0 to turn off
+ */
+static void
+eesoxscsi_terminator_ctl(struct Scsi_Host *host, int on_off)
+{
+ struct eesoxscsi_info *info = (struct eesoxscsi_info *)host->hostdata;
+ unsigned long flags;
+
+ spin_lock_irqsave(host->host_lock, flags);
+ if (on_off)
+ info->control |= EESOX_TERM_ENABLE;
+ else
+ info->control &= ~EESOX_TERM_ENABLE;
+
+ writeb(info->control, info->ctl_port);
+ spin_unlock_irqrestore(host->host_lock, flags);
+}
+
+/* Prototype: void eesoxscsi_intr(irq, *dev_id, *regs)
+ * Purpose : handle interrupts from EESOX SCSI card
+ * Params : irq - interrupt number
+ * dev_id - user-defined (Scsi_Host structure)
+ */
+static irqreturn_t
+eesoxscsi_intr(int irq, void *dev_id)
+{
+ struct eesoxscsi_info *info = dev_id;
+
+ return fas216_intr(&info->info);
+}
+
+/* Prototype: fasdmatype_t eesoxscsi_dma_setup(host, SCpnt, direction, min_type)
+ * Purpose : initialises DMA/PIO
+ * Params : host - host
+ * SCpnt - command
+ * direction - DMA on to/off of card
+ * min_type - minimum DMA support that we must have for this transfer
+ * Returns : type of transfer to be performed
+ */
+static fasdmatype_t
+eesoxscsi_dma_setup(struct Scsi_Host *host, struct scsi_pointer *SCp,
+ fasdmadir_t direction, fasdmatype_t min_type)
+{
+ struct eesoxscsi_info *info = (struct eesoxscsi_info *)host->hostdata;
+ struct device *dev = scsi_get_device(host);
+ int dmach = info->info.scsi.dma;
+
+ if (dmach != NO_DMA &&
+ (min_type == fasdma_real_all || SCp->this_residual >= 512)) {
+ int bufs, map_dir, dma_dir;
+
+ bufs = copy_SCp_to_sg(&info->sg[0], SCp, NR_SG);
+
+ if (direction == DMA_OUT)
+ map_dir = DMA_TO_DEVICE,
+ dma_dir = DMA_MODE_WRITE;
+ else
+ map_dir = DMA_FROM_DEVICE,
+ dma_dir = DMA_MODE_READ;
+
+ dma_map_sg(dev, info->sg, bufs, map_dir);
+
+ disable_dma(dmach);
+ set_dma_sg(dmach, info->sg, bufs);
+ set_dma_mode(dmach, dma_dir);
+ enable_dma(dmach);
+ return fasdma_real_all;
+ }
+ /*
+ * We don't do DMA, we only do slow PIO
+ *
+ * Some day, we will do Pseudo DMA
+ */
+ return fasdma_pseudo;
+}
+
+static void eesoxscsi_buffer_in(void *buf, int length, void __iomem *base)
+{
+ const void __iomem *reg_fas = base + EESOX_FAS216_OFFSET;
+ const void __iomem *reg_dmastat = base + EESOX_DMASTAT;
+ const void __iomem *reg_dmadata = base + EESOX_DMADATA;
+ register const unsigned long mask = 0xffff;
+
+ do {
+ unsigned int status;
+
+ /*
+ * Interrupt request?
+ */
+ status = readb(reg_fas + (REG_STAT << EESOX_FAS216_SHIFT));
+ if (status & STAT_INT)
+ break;
+
+ /*
+ * DMA request active?
+ */
+ status = readb(reg_dmastat);
+ if (!(status & EESOX_STAT_DMA))
+ continue;
+
+ /*
+ * Get number of bytes in FIFO
+ */
+ status = readb(reg_fas + (REG_CFIS << EESOX_FAS216_SHIFT)) & CFIS_CF;
+ if (status > 16)
+ status = 16;
+ if (status > length)
+ status = length;
+
+ /*
+ * Align buffer.
+ */
+ if (((u32)buf) & 2 && status >= 2) {
+ *(u16 *)buf = readl(reg_dmadata);
+ buf += 2;
+ status -= 2;
+ length -= 2;
+ }
+
+ if (status >= 8) {
+ unsigned long l1, l2;
+
+ l1 = readl(reg_dmadata) & mask;
+ l1 |= readl(reg_dmadata) << 16;
+ l2 = readl(reg_dmadata) & mask;
+ l2 |= readl(reg_dmadata) << 16;
+ *(u32 *)buf = l1;
+ buf += 4;
+ *(u32 *)buf = l2;
+ buf += 4;
+ length -= 8;
+ continue;
+ }
+
+ if (status >= 4) {
+ unsigned long l1;
+
+ l1 = readl(reg_dmadata) & mask;
+ l1 |= readl(reg_dmadata) << 16;
+
+ *(u32 *)buf = l1;
+ buf += 4;
+ length -= 4;
+ continue;
+ }
+
+ if (status >= 2) {
+ *(u16 *)buf = readl(reg_dmadata);
+ buf += 2;
+ length -= 2;
+ }
+ } while (length);
+}
+
+static void eesoxscsi_buffer_out(void *buf, int length, void __iomem *base)
+{
+ const void __iomem *reg_fas = base + EESOX_FAS216_OFFSET;
+ const void __iomem *reg_dmastat = base + EESOX_DMASTAT;
+ void __iomem *reg_dmadata = base + EESOX_DMADATA;
+
+ do {
+ unsigned int status;
+
+ /*
+ * Interrupt request?
+ */
+ status = readb(reg_fas + (REG_STAT << EESOX_FAS216_SHIFT));
+ if (status & STAT_INT)
+ break;
+
+ /*
+ * DMA request active?
+ */
+ status = readb(reg_dmastat);
+ if (!(status & EESOX_STAT_DMA))
+ continue;
+
+ /*
+ * Get number of bytes in FIFO
+ */
+ status = readb(reg_fas + (REG_CFIS << EESOX_FAS216_SHIFT)) & CFIS_CF;
+ if (status > 16)
+ status = 16;
+ status = 16 - status;
+ if (status > length)
+ status = length;
+ status &= ~1;
+
+ /*
+ * Align buffer.
+ */
+ if (((u32)buf) & 2 && status >= 2) {
+ writel(*(u16 *)buf << 16, reg_dmadata);
+ buf += 2;
+ status -= 2;
+ length -= 2;
+ }
+
+ if (status >= 8) {
+ unsigned long l1, l2;
+
+ l1 = *(u32 *)buf;
+ buf += 4;
+ l2 = *(u32 *)buf;
+ buf += 4;
+
+ writel(l1 << 16, reg_dmadata);
+ writel(l1, reg_dmadata);
+ writel(l2 << 16, reg_dmadata);
+ writel(l2, reg_dmadata);
+ length -= 8;
+ continue;
+ }
+
+ if (status >= 4) {
+ unsigned long l1;
+
+ l1 = *(u32 *)buf;
+ buf += 4;
+
+ writel(l1 << 16, reg_dmadata);
+ writel(l1, reg_dmadata);
+ length -= 4;
+ continue;
+ }
+
+ if (status >= 2) {
+ writel(*(u16 *)buf << 16, reg_dmadata);
+ buf += 2;
+ length -= 2;
+ }
+ } while (length);
+}
+
+static void
+eesoxscsi_dma_pseudo(struct Scsi_Host *host, struct scsi_pointer *SCp,
+ fasdmadir_t dir, int transfer_size)
+{
+ struct eesoxscsi_info *info = (struct eesoxscsi_info *)host->hostdata;
+ if (dir == DMA_IN) {
+ eesoxscsi_buffer_in(SCp->ptr, SCp->this_residual, info->base);
+ } else {
+ eesoxscsi_buffer_out(SCp->ptr, SCp->this_residual, info->base);
+ }
+}
+
+/* Prototype: int eesoxscsi_dma_stop(host, SCpnt)
+ * Purpose : stops DMA/PIO
+ * Params : host - host
+ * SCpnt - command
+ */
+static void
+eesoxscsi_dma_stop(struct Scsi_Host *host, struct scsi_pointer *SCp)
+{
+ struct eesoxscsi_info *info = (struct eesoxscsi_info *)host->hostdata;
+ if (info->info.scsi.dma != NO_DMA)
+ disable_dma(info->info.scsi.dma);
+}
+
+/* Prototype: const char *eesoxscsi_info(struct Scsi_Host * host)
+ * Purpose : returns a descriptive string about this interface,
+ * Params : host - driver host structure to return info for.
+ * Returns : pointer to a static buffer containing null terminated string.
+ */
+const char *eesoxscsi_info(struct Scsi_Host *host)
+{
+ struct eesoxscsi_info *info = (struct eesoxscsi_info *)host->hostdata;
+ static char string[150];
+
+ sprintf(string, "%s (%s) in slot %d v%s terminators o%s",
+ host->hostt->name, info->info.scsi.type, info->ec->slot_no,
+ VERSION, info->control & EESOX_TERM_ENABLE ? "n" : "ff");
+
+ return string;
+}
+
+/* Prototype: int eesoxscsi_set_proc_info(struct Scsi_Host *host, char *buffer, int length)
+ * Purpose : Set a driver specific function
+ * Params : host - host to setup
+ * : buffer - buffer containing string describing operation
+ * : length - length of string
+ * Returns : -EINVAL, or 0
+ */
+static int
+eesoxscsi_set_proc_info(struct Scsi_Host *host, char *buffer, int length)
+{
+ int ret = length;
+
+ if (length >= 9 && strncmp(buffer, "EESOXSCSI", 9) == 0) {
+ buffer += 9;
+ length -= 9;
+
+ if (length >= 5 && strncmp(buffer, "term=", 5) == 0) {
+ if (buffer[5] == '1')
+ eesoxscsi_terminator_ctl(host, 1);
+ else if (buffer[5] == '0')
+ eesoxscsi_terminator_ctl(host, 0);
+ else
+ ret = -EINVAL;
+ } else
+ ret = -EINVAL;
+ } else
+ ret = -EINVAL;
+
+ return ret;
+}
+
+static int eesoxscsi_show_info(struct seq_file *m, struct Scsi_Host *host)
+{
+ struct eesoxscsi_info *info;
+
+ info = (struct eesoxscsi_info *)host->hostdata;
+
+ seq_printf(m, "EESOX SCSI driver v%s\n", VERSION);
+ fas216_print_host(&info->info, m);
+ seq_printf(m, "Term : o%s\n",
+ info->control & EESOX_TERM_ENABLE ? "n" : "ff");
+
+ fas216_print_stats(&info->info, m);
+ fas216_print_devices(&info->info, m);
+ return 0;
+}
+
+static ssize_t eesoxscsi_show_term(struct device *dev, struct device_attribute *attr, char *buf)
+{
+ struct expansion_card *ec = ECARD_DEV(dev);
+ struct Scsi_Host *host = ecard_get_drvdata(ec);
+ struct eesoxscsi_info *info = (struct eesoxscsi_info *)host->hostdata;
+
+ return sprintf(buf, "%d\n", info->control & EESOX_TERM_ENABLE ? 1 : 0);
+}
+
+static ssize_t eesoxscsi_store_term(struct device *dev, struct device_attribute *attr, const char *buf, size_t len)
+{
+ struct expansion_card *ec = ECARD_DEV(dev);
+ struct Scsi_Host *host = ecard_get_drvdata(ec);
+ struct eesoxscsi_info *info = (struct eesoxscsi_info *)host->hostdata;
+ unsigned long flags;
+
+ if (len > 1) {
+ spin_lock_irqsave(host->host_lock, flags);
+ if (buf[0] != '0') {
+ info->control |= EESOX_TERM_ENABLE;
+ } else {
+ info->control &= ~EESOX_TERM_ENABLE;
+ }
+ writeb(info->control, info->ctl_port);
+ spin_unlock_irqrestore(host->host_lock, flags);
+ }
+
+ return len;
+}
+
+static DEVICE_ATTR(bus_term, S_IRUGO | S_IWUSR,
+ eesoxscsi_show_term, eesoxscsi_store_term);
+
+static struct scsi_host_template eesox_template = {
+ .module = THIS_MODULE,
+ .show_info = eesoxscsi_show_info,
+ .write_info = eesoxscsi_set_proc_info,
+ .name = "EESOX SCSI",
+ .info = eesoxscsi_info,
+ .queuecommand = fas216_queue_command,
+ .eh_host_reset_handler = fas216_eh_host_reset,
+ .eh_bus_reset_handler = fas216_eh_bus_reset,
+ .eh_device_reset_handler = fas216_eh_device_reset,
+ .eh_abort_handler = fas216_eh_abort,
+ .can_queue = 1,
+ .this_id = 7,
+ .sg_tablesize = SCSI_MAX_SG_CHAIN_SEGMENTS,
+ .dma_boundary = IOMD_DMA_BOUNDARY,
+ .cmd_per_lun = 1,
+ .use_clustering = DISABLE_CLUSTERING,
+ .proc_name = "eesox",
+};
+
+static int eesoxscsi_probe(struct expansion_card *ec, const struct ecard_id *id)
+{
+ struct Scsi_Host *host;
+ struct eesoxscsi_info *info;
+ void __iomem *base;
+ int ret;
+
+ ret = ecard_request_resources(ec);
+ if (ret)
+ goto out;
+
+ base = ecardm_iomap(ec, ECARD_RES_IOCFAST, 0, 0);
+ if (!base) {
+ ret = -ENOMEM;
+ goto out_region;
+ }
+
+ host = scsi_host_alloc(&eesox_template,
+ sizeof(struct eesoxscsi_info));
+ if (!host) {
+ ret = -ENOMEM;
+ goto out_region;
+ }
+
+ ecard_set_drvdata(ec, host);
+
+ info = (struct eesoxscsi_info *)host->hostdata;
+ info->ec = ec;
+ info->base = base;
+ info->ctl_port = base + EESOX_CONTROL;
+ info->control = term[ec->slot_no] ? EESOX_TERM_ENABLE : 0;
+ writeb(info->control, info->ctl_port);
+
+ info->info.scsi.io_base = base + EESOX_FAS216_OFFSET;
+ info->info.scsi.io_shift = EESOX_FAS216_SHIFT;
+ info->info.scsi.irq = ec->irq;
+ info->info.scsi.dma = ec->dma;
+ info->info.ifcfg.clockrate = 40; /* MHz */
+ info->info.ifcfg.select_timeout = 255;
+ info->info.ifcfg.asyncperiod = 200; /* ns */
+ info->info.ifcfg.sync_max_depth = 7;
+ info->info.ifcfg.cntl3 = CNTL3_FASTSCSI | CNTL3_FASTCLK;
+ info->info.ifcfg.disconnect_ok = 1;
+ info->info.ifcfg.wide_max_size = 0;
+ info->info.ifcfg.capabilities = FASCAP_PSEUDODMA;
+ info->info.dma.setup = eesoxscsi_dma_setup;
+ info->info.dma.pseudo = eesoxscsi_dma_pseudo;
+ info->info.dma.stop = eesoxscsi_dma_stop;
+
+ ec->irqaddr = base + EESOX_DMASTAT;
+ ec->irqmask = EESOX_STAT_INTR;
+
+ ecard_setirq(ec, &eesoxscsi_ops, info);
+
+ device_create_file(&ec->dev, &dev_attr_bus_term);
+
+ ret = fas216_init(host);
+ if (ret)
+ goto out_free;
+
+ ret = request_irq(ec->irq, eesoxscsi_intr, 0, "eesoxscsi", info);
+ if (ret) {
+ printk("scsi%d: IRQ%d not free: %d\n",
+ host->host_no, ec->irq, ret);
+ goto out_remove;
+ }
+
+ if (info->info.scsi.dma != NO_DMA) {
+ if (request_dma(info->info.scsi.dma, "eesox")) {
+ printk("scsi%d: DMA%d not free, DMA disabled\n",
+ host->host_no, info->info.scsi.dma);
+ info->info.scsi.dma = NO_DMA;
+ } else {
+ set_dma_speed(info->info.scsi.dma, 180);
+ info->info.ifcfg.capabilities |= FASCAP_DMA;
+ info->info.ifcfg.cntl3 |= CNTL3_BS8;
+ }
+ }
+
+ ret = fas216_add(host, &ec->dev);
+ if (ret == 0)
+ goto out;
+
+ if (info->info.scsi.dma != NO_DMA)
+ free_dma(info->info.scsi.dma);
+ free_irq(ec->irq, host);
+
+ out_remove:
+ fas216_remove(host);
+
+ out_free:
+ device_remove_file(&ec->dev, &dev_attr_bus_term);
+ scsi_host_put(host);
+
+ out_region:
+ ecard_release_resources(ec);
+
+ out:
+ return ret;
+}
+
+static void eesoxscsi_remove(struct expansion_card *ec)
+{
+ struct Scsi_Host *host = ecard_get_drvdata(ec);
+ struct eesoxscsi_info *info = (struct eesoxscsi_info *)host->hostdata;
+
+ ecard_set_drvdata(ec, NULL);
+ fas216_remove(host);
+
+ if (info->info.scsi.dma != NO_DMA)
+ free_dma(info->info.scsi.dma);
+ free_irq(ec->irq, info);
+
+ device_remove_file(&ec->dev, &dev_attr_bus_term);
+
+ fas216_release(host);
+ scsi_host_put(host);
+ ecard_release_resources(ec);
+}
+
+static const struct ecard_id eesoxscsi_cids[] = {
+ { MANU_EESOX, PROD_EESOX_SCSI2 },
+ { 0xffff, 0xffff },
+};
+
+static struct ecard_driver eesoxscsi_driver = {
+ .probe = eesoxscsi_probe,
+ .remove = eesoxscsi_remove,
+ .id_table = eesoxscsi_cids,
+ .drv = {
+ .name = "eesoxscsi",
+ },
+};
+
+static int __init eesox_init(void)
+{
+ return ecard_register_driver(&eesoxscsi_driver);
+}
+
+static void __exit eesox_exit(void)
+{
+ ecard_remove_driver(&eesoxscsi_driver);
+}
+
+module_init(eesox_init);
+module_exit(eesox_exit);
+
+MODULE_AUTHOR("Russell King");
+MODULE_DESCRIPTION("EESOX 'Fast' SCSI driver for Acorn machines");
+module_param_array(term, int, NULL, 0);
+MODULE_PARM_DESC(term, "SCSI bus termination");
+MODULE_LICENSE("GPL");
diff --git a/drivers/scsi/arm/fas216.c b/drivers/scsi/arm/fas216.c
new file mode 100644
index 000000000..decdc71b6
--- /dev/null
+++ b/drivers/scsi/arm/fas216.c
@@ -0,0 +1,3032 @@
+/*
+ * linux/drivers/acorn/scsi/fas216.c
+ *
+ * Copyright (C) 1997-2003 Russell King
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * Based on information in qlogicfas.c by Tom Zerucha, Michael Griffith, and
+ * other sources, including:
+ * the AMD Am53CF94 data sheet
+ * the AMD Am53C94 data sheet
+ *
+ * This is a generic driver. To use it, have a look at cumana_2.c. You
+ * should define your own structure that overlays FAS216_Info, eg:
+ * struct my_host_data {
+ * FAS216_Info info;
+ * ... my host specific data ...
+ * };
+ *
+ * Changelog:
+ * 30-08-1997 RMK Created
+ * 14-09-1997 RMK Started disconnect support
+ * 08-02-1998 RMK Corrected real DMA support
+ * 15-02-1998 RMK Started sync xfer support
+ * 06-04-1998 RMK Tightened conditions for printing incomplete
+ * transfers
+ * 02-05-1998 RMK Added extra checks in fas216_reset
+ * 24-05-1998 RMK Fixed synchronous transfers with period >= 200ns
+ * 27-06-1998 RMK Changed asm/delay.h to linux/delay.h
+ * 26-08-1998 RMK Improved message support wrt MESSAGE_REJECT
+ * 02-04-2000 RMK Converted to use the new error handling, and
+ * automatically request sense data upon check
+ * condition status from targets.
+ */
+#include <linux/module.h>
+#include <linux/blkdev.h>
+#include <linux/kernel.h>
+#include <linux/string.h>
+#include <linux/ioport.h>
+#include <linux/proc_fs.h>
+#include <linux/delay.h>
+#include <linux/bitops.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+
+#include <asm/dma.h>
+#include <asm/io.h>
+#include <asm/irq.h>
+#include <asm/ecard.h>
+
+#include "../scsi.h"
+#include <scsi/scsi_dbg.h>
+#include <scsi/scsi_host.h>
+#include "fas216.h"
+#include "scsi.h"
+
+/* NOTE: SCSI2 Synchronous transfers *require* DMA according to
+ * the data sheet. This restriction is crazy, especially when
+ * you only want to send 16 bytes! What were the guys who
+ * designed this chip on at that time? Did they read the SCSI2
+ * spec at all? The following sections are taken from the SCSI2
+ * standard (s2r10) concerning this:
+ *
+ * > IMPLEMENTORS NOTES:
+ * > (1) Re-negotiation at every selection is not recommended, since a
+ * > significant performance impact is likely.
+ *
+ * > The implied synchronous agreement shall remain in effect until a BUS DEVICE
+ * > RESET message is received, until a hard reset condition occurs, or until one
+ * > of the two SCSI devices elects to modify the agreement. The default data
+ * > transfer mode is asynchronous data transfer mode. The default data transfer
+ * > mode is entered at power on, after a BUS DEVICE RESET message, or after a hard
+ * > reset condition.
+ *
+ * In total, this means that once you have elected to use synchronous
+ * transfers, you must always use DMA.
+ *
+ * I was thinking that this was a good chip until I found this restriction ;(
+ */
+#define SCSI2_SYNC
+#undef SCSI2_TAG
+
+#undef DEBUG_CONNECT
+#undef DEBUG_MESSAGES
+
+#undef CHECK_STRUCTURE
+
+#define LOG_CONNECT (1 << 0)
+#define LOG_BUSSERVICE (1 << 1)
+#define LOG_FUNCTIONDONE (1 << 2)
+#define LOG_MESSAGES (1 << 3)
+#define LOG_BUFFER (1 << 4)
+#define LOG_ERROR (1 << 8)
+
+static int level_mask = LOG_ERROR;
+
+module_param(level_mask, int, 0644);
+
+static int __init fas216_log_setup(char *str)
+{
+ char *s;
+
+ level_mask = 0;
+
+ while ((s = strsep(&str, ",")) != NULL) {
+ switch (s[0]) {
+ case 'a':
+ if (strcmp(s, "all") == 0)
+ level_mask |= -1;
+ break;
+ case 'b':
+ if (strncmp(s, "bus", 3) == 0)
+ level_mask |= LOG_BUSSERVICE;
+ if (strncmp(s, "buf", 3) == 0)
+ level_mask |= LOG_BUFFER;
+ break;
+ case 'c':
+ level_mask |= LOG_CONNECT;
+ break;
+ case 'e':
+ level_mask |= LOG_ERROR;
+ break;
+ case 'm':
+ level_mask |= LOG_MESSAGES;
+ break;
+ case 'n':
+ if (strcmp(s, "none") == 0)
+ level_mask = 0;
+ break;
+ case 's':
+ level_mask |= LOG_FUNCTIONDONE;
+ break;
+ }
+ }
+ return 1;
+}
+
+__setup("fas216_logging=", fas216_log_setup);
+
+static inline unsigned char fas216_readb(FAS216_Info *info, unsigned int reg)
+{
+ unsigned int off = reg << info->scsi.io_shift;
+ return readb(info->scsi.io_base + off);
+}
+
+static inline void fas216_writeb(FAS216_Info *info, unsigned int reg, unsigned int val)
+{
+ unsigned int off = reg << info->scsi.io_shift;
+ writeb(val, info->scsi.io_base + off);
+}
+
+static void fas216_dumpstate(FAS216_Info *info)
+{
+ unsigned char is, stat, inst;
+
+ is = fas216_readb(info, REG_IS);
+ stat = fas216_readb(info, REG_STAT);
+ inst = fas216_readb(info, REG_INST);
+
+ printk("FAS216: CTCL=%02X CTCM=%02X CMD=%02X STAT=%02X"
+ " INST=%02X IS=%02X CFIS=%02X",
+ fas216_readb(info, REG_CTCL),
+ fas216_readb(info, REG_CTCM),
+ fas216_readb(info, REG_CMD), stat, inst, is,
+ fas216_readb(info, REG_CFIS));
+ printk(" CNTL1=%02X CNTL2=%02X CNTL3=%02X CTCH=%02X\n",
+ fas216_readb(info, REG_CNTL1),
+ fas216_readb(info, REG_CNTL2),
+ fas216_readb(info, REG_CNTL3),
+ fas216_readb(info, REG_CTCH));
+}
+
+static void print_SCp(struct scsi_pointer *SCp, const char *prefix, const char *suffix)
+{
+ printk("%sptr %p this_residual 0x%x buffer %p buffers_residual 0x%x%s",
+ prefix, SCp->ptr, SCp->this_residual, SCp->buffer,
+ SCp->buffers_residual, suffix);
+}
+
+#ifdef CHECK_STRUCTURE
+static void fas216_dumpinfo(FAS216_Info *info)
+{
+ static int used = 0;
+ int i;
+
+ if (used++)
+ return;
+
+ printk("FAS216_Info=\n");
+ printk(" { magic_start=%lX host=%p SCpnt=%p origSCpnt=%p\n",
+ info->magic_start, info->host, info->SCpnt,
+ info->origSCpnt);
+ printk(" scsi={ io_shift=%X irq=%X cfg={ %X %X %X %X }\n",
+ info->scsi.io_shift, info->scsi.irq,
+ info->scsi.cfg[0], info->scsi.cfg[1], info->scsi.cfg[2],
+ info->scsi.cfg[3]);
+ printk(" type=%p phase=%X\n",
+ info->scsi.type, info->scsi.phase);
+ print_SCp(&info->scsi.SCp, " SCp={ ", " }\n");
+ printk(" msgs async_stp=%X disconnectable=%d aborting=%d }\n",
+ info->scsi.async_stp,
+ info->scsi.disconnectable, info->scsi.aborting);
+ printk(" stats={ queues=%X removes=%X fins=%X reads=%X writes=%X miscs=%X\n"
+ " disconnects=%X aborts=%X bus_resets=%X host_resets=%X}\n",
+ info->stats.queues, info->stats.removes, info->stats.fins,
+ info->stats.reads, info->stats.writes, info->stats.miscs,
+ info->stats.disconnects, info->stats.aborts, info->stats.bus_resets,
+ info->stats.host_resets);
+ printk(" ifcfg={ clockrate=%X select_timeout=%X asyncperiod=%X sync_max_depth=%X }\n",
+ info->ifcfg.clockrate, info->ifcfg.select_timeout,
+ info->ifcfg.asyncperiod, info->ifcfg.sync_max_depth);
+ for (i = 0; i < 8; i++) {
+ printk(" busyluns[%d]=%08lx dev[%d]={ disconnect_ok=%d stp=%X sof=%X sync_state=%X }\n",
+ i, info->busyluns[i], i,
+ info->device[i].disconnect_ok, info->device[i].stp,
+ info->device[i].sof, info->device[i].sync_state);
+ }
+ printk(" dma={ transfer_type=%X setup=%p pseudo=%p stop=%p }\n",
+ info->dma.transfer_type, info->dma.setup,
+ info->dma.pseudo, info->dma.stop);
+ printk(" internal_done=%X magic_end=%lX }\n",
+ info->internal_done, info->magic_end);
+}
+
+static void __fas216_checkmagic(FAS216_Info *info, const char *func)
+{
+ int corruption = 0;
+ if (info->magic_start != MAGIC) {
+ printk(KERN_CRIT "FAS216 Error: magic at start corrupted\n");
+ corruption++;
+ }
+ if (info->magic_end != MAGIC) {
+ printk(KERN_CRIT "FAS216 Error: magic at end corrupted\n");
+ corruption++;
+ }
+ if (corruption) {
+ fas216_dumpinfo(info);
+ panic("scsi memory space corrupted in %s", func);
+ }
+}
+#define fas216_checkmagic(info) __fas216_checkmagic((info), __func__)
+#else
+#define fas216_checkmagic(info)
+#endif
+
+static const char *fas216_bus_phase(int stat)
+{
+ static const char *phases[] = {
+ "DATA OUT", "DATA IN",
+ "COMMAND", "STATUS",
+ "MISC OUT", "MISC IN",
+ "MESG OUT", "MESG IN"
+ };
+
+ return phases[stat & STAT_BUSMASK];
+}
+
+static const char *fas216_drv_phase(FAS216_Info *info)
+{
+ static const char *phases[] = {
+ [PHASE_IDLE] = "idle",
+ [PHASE_SELECTION] = "selection",
+ [PHASE_COMMAND] = "command",
+ [PHASE_DATAOUT] = "data out",
+ [PHASE_DATAIN] = "data in",
+ [PHASE_MSGIN] = "message in",
+ [PHASE_MSGIN_DISCONNECT]= "disconnect",
+ [PHASE_MSGOUT_EXPECT] = "expect message out",
+ [PHASE_MSGOUT] = "message out",
+ [PHASE_STATUS] = "status",
+ [PHASE_DONE] = "done",
+ };
+
+ if (info->scsi.phase < ARRAY_SIZE(phases) &&
+ phases[info->scsi.phase])
+ return phases[info->scsi.phase];
+ return "???";
+}
+
+static char fas216_target(FAS216_Info *info)
+{
+ if (info->SCpnt)
+ return '0' + info->SCpnt->device->id;
+ else
+ return 'H';
+}
+
+static void
+fas216_do_log(FAS216_Info *info, char target, char *fmt, va_list ap)
+{
+ static char buf[1024];
+
+ vsnprintf(buf, sizeof(buf), fmt, ap);
+ printk("scsi%d.%c: %s", info->host->host_no, target, buf);
+}
+
+static void fas216_log_command(FAS216_Info *info, int level,
+ struct scsi_cmnd *SCpnt, char *fmt, ...)
+{
+ va_list args;
+
+ if (level != 0 && !(level & level_mask))
+ return;
+
+ va_start(args, fmt);
+ fas216_do_log(info, '0' + SCpnt->device->id, fmt, args);
+ va_end(args);
+
+ scsi_print_command(SCpnt);
+}
+
+static void
+fas216_log_target(FAS216_Info *info, int level, int target, char *fmt, ...)
+{
+ va_list args;
+
+ if (level != 0 && !(level & level_mask))
+ return;
+
+ if (target < 0)
+ target = 'H';
+ else
+ target += '0';
+
+ va_start(args, fmt);
+ fas216_do_log(info, target, fmt, args);
+ va_end(args);
+
+ printk("\n");
+}
+
+static void fas216_log(FAS216_Info *info, int level, char *fmt, ...)
+{
+ va_list args;
+
+ if (level != 0 && !(level & level_mask))
+ return;
+
+ va_start(args, fmt);
+ fas216_do_log(info, fas216_target(info), fmt, args);
+ va_end(args);
+
+ printk("\n");
+}
+
+#define PH_SIZE 32
+
+static struct { int stat, ssr, isr, ph; } ph_list[PH_SIZE];
+static int ph_ptr;
+
+static void add_debug_list(int stat, int ssr, int isr, int ph)
+{
+ ph_list[ph_ptr].stat = stat;
+ ph_list[ph_ptr].ssr = ssr;
+ ph_list[ph_ptr].isr = isr;
+ ph_list[ph_ptr].ph = ph;
+
+ ph_ptr = (ph_ptr + 1) & (PH_SIZE-1);
+}
+
+static struct { int command; void *from; } cmd_list[8];
+static int cmd_ptr;
+
+static void fas216_cmd(FAS216_Info *info, unsigned int command)
+{
+ cmd_list[cmd_ptr].command = command;
+ cmd_list[cmd_ptr].from = __builtin_return_address(0);
+
+ cmd_ptr = (cmd_ptr + 1) & 7;
+
+ fas216_writeb(info, REG_CMD, command);
+}
+
+static void print_debug_list(void)
+{
+ int i;
+
+ i = ph_ptr;
+
+ printk(KERN_ERR "SCSI IRQ trail\n");
+ do {
+ printk(" %02x:%02x:%02x:%1x",
+ ph_list[i].stat, ph_list[i].ssr,
+ ph_list[i].isr, ph_list[i].ph);
+ i = (i + 1) & (PH_SIZE - 1);
+ if (((i ^ ph_ptr) & 7) == 0)
+ printk("\n");
+ } while (i != ph_ptr);
+ if ((i ^ ph_ptr) & 7)
+ printk("\n");
+
+ i = cmd_ptr;
+ printk(KERN_ERR "FAS216 commands: ");
+ do {
+ printk("%02x:%p ", cmd_list[i].command, cmd_list[i].from);
+ i = (i + 1) & 7;
+ } while (i != cmd_ptr);
+ printk("\n");
+}
+
+static void fas216_done(FAS216_Info *info, unsigned int result);
+
+/**
+ * fas216_get_last_msg - retrive last message from the list
+ * @info: interface to search
+ * @pos: current fifo position
+ *
+ * Retrieve a last message from the list, using position in fifo.
+ */
+static inline unsigned short
+fas216_get_last_msg(FAS216_Info *info, int pos)
+{
+ unsigned short packed_msg = NOP;
+ struct message *msg;
+ int msgnr = 0;
+
+ while ((msg = msgqueue_getmsg(&info->scsi.msgs, msgnr++)) != NULL) {
+ if (pos >= msg->fifo)
+ break;
+ }
+
+ if (msg) {
+ if (msg->msg[0] == EXTENDED_MESSAGE)
+ packed_msg = EXTENDED_MESSAGE | msg->msg[2] << 8;
+ else
+ packed_msg = msg->msg[0];
+ }
+
+ fas216_log(info, LOG_MESSAGES,
+ "Message: %04x found at position %02x\n", packed_msg, pos);
+
+ return packed_msg;
+}
+
+/**
+ * fas216_syncperiod - calculate STP register value
+ * @info: state structure for interface connected to device
+ * @ns: period in ns (between subsequent bytes)
+ *
+ * Calculate value to be loaded into the STP register for a given period
+ * in ns. Returns a value suitable for REG_STP.
+ */
+static int fas216_syncperiod(FAS216_Info *info, int ns)
+{
+ int value = (info->ifcfg.clockrate * ns) / 1000;
+
+ fas216_checkmagic(info);
+
+ if (value < 4)
+ value = 4;
+ else if (value > 35)
+ value = 35;
+
+ return value & 31;
+}
+
+/**
+ * fas216_set_sync - setup FAS216 chip for specified transfer period.
+ * @info: state structure for interface connected to device
+ * @target: target
+ *
+ * Correctly setup FAS216 chip for specified transfer period.
+ * Notes : we need to switch the chip out of FASTSCSI mode if we have
+ * a transfer period >= 200ns - otherwise the chip will violate
+ * the SCSI timings.
+ */
+static void fas216_set_sync(FAS216_Info *info, int target)
+{
+ unsigned int cntl3;
+
+ fas216_writeb(info, REG_SOF, info->device[target].sof);
+ fas216_writeb(info, REG_STP, info->device[target].stp);
+
+ cntl3 = info->scsi.cfg[2];
+ if (info->device[target].period >= (200 / 4))
+ cntl3 = cntl3 & ~CNTL3_FASTSCSI;
+
+ fas216_writeb(info, REG_CNTL3, cntl3);
+}
+
+/* Synchronous transfer support
+ *
+ * Note: The SCSI II r10 spec says (5.6.12):
+ *
+ * (2) Due to historical problems with early host adapters that could
+ * not accept an SDTR message, some targets may not initiate synchronous
+ * negotiation after a power cycle as required by this standard. Host
+ * adapters that support synchronous mode may avoid the ensuing failure
+ * modes when the target is independently power cycled by initiating a
+ * synchronous negotiation on each REQUEST SENSE and INQUIRY command.
+ * This approach increases the SCSI bus overhead and is not recommended
+ * for new implementations. The correct method is to respond to an
+ * SDTR message with a MESSAGE REJECT message if the either the
+ * initiator or target devices does not support synchronous transfers
+ * or does not want to negotiate for synchronous transfers at the time.
+ * Using the correct method assures compatibility with wide data
+ * transfers and future enhancements.
+ *
+ * We will always initiate a synchronous transfer negotiation request on
+ * every INQUIRY or REQUEST SENSE message, unless the target itself has
+ * at some point performed a synchronous transfer negotiation request, or
+ * we have synchronous transfers disabled for this device.
+ */
+
+/**
+ * fas216_handlesync - Handle a synchronous transfer message
+ * @info: state structure for interface
+ * @msg: message from target
+ *
+ * Handle a synchronous transfer message from the target
+ */
+static void fas216_handlesync(FAS216_Info *info, char *msg)
+{
+ struct fas216_device *dev = &info->device[info->SCpnt->device->id];
+ enum { sync, async, none, reject } res = none;
+
+#ifdef SCSI2_SYNC
+ switch (msg[0]) {
+ case MESSAGE_REJECT:
+ /* Synchronous transfer request failed.
+ * Note: SCSI II r10:
+ *
+ * SCSI devices that are capable of synchronous
+ * data transfers shall not respond to an SDTR
+ * message with a MESSAGE REJECT message.
+ *
+ * Hence, if we get this condition, we disable
+ * negotiation for this device.
+ */
+ if (dev->sync_state == neg_inprogress) {
+ dev->sync_state = neg_invalid;
+ res = async;
+ }
+ break;
+
+ case EXTENDED_MESSAGE:
+ switch (dev->sync_state) {
+ /* We don't accept synchronous transfer requests.
+ * Respond with a MESSAGE_REJECT to prevent a
+ * synchronous transfer agreement from being reached.
+ */
+ case neg_invalid:
+ res = reject;
+ break;
+
+ /* We were not negotiating a synchronous transfer,
+ * but the device sent us a negotiation request.
+ * Honour the request by sending back a SDTR
+ * message containing our capability, limited by
+ * the targets capability.
+ */
+ default:
+ fas216_cmd(info, CMD_SETATN);
+ if (msg[4] > info->ifcfg.sync_max_depth)
+ msg[4] = info->ifcfg.sync_max_depth;
+ if (msg[3] < 1000 / info->ifcfg.clockrate)
+ msg[3] = 1000 / info->ifcfg.clockrate;
+
+ msgqueue_flush(&info->scsi.msgs);
+ msgqueue_addmsg(&info->scsi.msgs, 5,
+ EXTENDED_MESSAGE, 3, EXTENDED_SDTR,
+ msg[3], msg[4]);
+ info->scsi.phase = PHASE_MSGOUT_EXPECT;
+
+ /* This is wrong. The agreement is not in effect
+ * until this message is accepted by the device
+ */
+ dev->sync_state = neg_targcomplete;
+ res = sync;
+ break;
+
+ /* We initiated the synchronous transfer negotiation,
+ * and have successfully received a response from the
+ * target. The synchronous transfer agreement has been
+ * reached. Note: if the values returned are out of our
+ * bounds, we must reject the message.
+ */
+ case neg_inprogress:
+ res = reject;
+ if (msg[4] <= info->ifcfg.sync_max_depth &&
+ msg[3] >= 1000 / info->ifcfg.clockrate) {
+ dev->sync_state = neg_complete;
+ res = sync;
+ }
+ break;
+ }
+ }
+#else
+ res = reject;
+#endif
+
+ switch (res) {
+ case sync:
+ dev->period = msg[3];
+ dev->sof = msg[4];
+ dev->stp = fas216_syncperiod(info, msg[3] * 4);
+ fas216_set_sync(info, info->SCpnt->device->id);
+ break;
+
+ case reject:
+ fas216_cmd(info, CMD_SETATN);
+ msgqueue_flush(&info->scsi.msgs);
+ msgqueue_addmsg(&info->scsi.msgs, 1, MESSAGE_REJECT);
+ info->scsi.phase = PHASE_MSGOUT_EXPECT;
+
+ case async:
+ dev->period = info->ifcfg.asyncperiod / 4;
+ dev->sof = 0;
+ dev->stp = info->scsi.async_stp;
+ fas216_set_sync(info, info->SCpnt->device->id);
+ break;
+
+ case none:
+ break;
+ }
+}
+
+/**
+ * fas216_updateptrs - update data pointers after transfer suspended/paused
+ * @info: interface's local pointer to update
+ * @bytes_transferred: number of bytes transferred
+ *
+ * Update data pointers after transfer suspended/paused
+ */
+static void fas216_updateptrs(FAS216_Info *info, int bytes_transferred)
+{
+ struct scsi_pointer *SCp = &info->scsi.SCp;
+
+ fas216_checkmagic(info);
+
+ BUG_ON(bytes_transferred < 0);
+
+ SCp->phase -= bytes_transferred;
+
+ while (bytes_transferred != 0) {
+ if (SCp->this_residual > bytes_transferred)
+ break;
+ /*
+ * We have used up this buffer. Move on to the
+ * next buffer.
+ */
+ bytes_transferred -= SCp->this_residual;
+ if (!next_SCp(SCp) && bytes_transferred) {
+ printk(KERN_WARNING "scsi%d.%c: out of buffers\n",
+ info->host->host_no, '0' + info->SCpnt->device->id);
+ return;
+ }
+ }
+
+ SCp->this_residual -= bytes_transferred;
+ if (SCp->this_residual)
+ SCp->ptr += bytes_transferred;
+ else
+ SCp->ptr = NULL;
+}
+
+/**
+ * fas216_pio - transfer data off of/on to card using programmed IO
+ * @info: interface to transfer data to/from
+ * @direction: direction to transfer data (DMA_OUT/DMA_IN)
+ *
+ * Transfer data off of/on to card using programmed IO.
+ * Notes: this is incredibly slow.
+ */
+static void fas216_pio(FAS216_Info *info, fasdmadir_t direction)
+{
+ struct scsi_pointer *SCp = &info->scsi.SCp;
+
+ fas216_checkmagic(info);
+
+ if (direction == DMA_OUT)
+ fas216_writeb(info, REG_FF, get_next_SCp_byte(SCp));
+ else
+ put_next_SCp_byte(SCp, fas216_readb(info, REG_FF));
+
+ if (SCp->this_residual == 0)
+ next_SCp(SCp);
+}
+
+static void fas216_set_stc(FAS216_Info *info, unsigned int length)
+{
+ fas216_writeb(info, REG_STCL, length);
+ fas216_writeb(info, REG_STCM, length >> 8);
+ fas216_writeb(info, REG_STCH, length >> 16);
+}
+
+static unsigned int fas216_get_ctc(FAS216_Info *info)
+{
+ return fas216_readb(info, REG_CTCL) +
+ (fas216_readb(info, REG_CTCM) << 8) +
+ (fas216_readb(info, REG_CTCH) << 16);
+}
+
+/**
+ * fas216_cleanuptransfer - clean up after a transfer has completed.
+ * @info: interface to clean up
+ *
+ * Update the data pointers according to the number of bytes transferred
+ * on the SCSI bus.
+ */
+static void fas216_cleanuptransfer(FAS216_Info *info)
+{
+ unsigned long total, residual, fifo;
+ fasdmatype_t dmatype = info->dma.transfer_type;
+
+ info->dma.transfer_type = fasdma_none;
+
+ /*
+ * PIO transfers do not need to be cleaned up.
+ */
+ if (dmatype == fasdma_pio || dmatype == fasdma_none)
+ return;
+
+ if (dmatype == fasdma_real_all)
+ total = info->scsi.SCp.phase;
+ else
+ total = info->scsi.SCp.this_residual;
+
+ residual = fas216_get_ctc(info);
+
+ fifo = fas216_readb(info, REG_CFIS) & CFIS_CF;
+
+ fas216_log(info, LOG_BUFFER, "cleaning up from previous "
+ "transfer: length 0x%06x, residual 0x%x, fifo %d",
+ total, residual, fifo);
+
+ /*
+ * If we were performing Data-Out, the transfer counter
+ * counts down each time a byte is transferred by the
+ * host to the FIFO. This means we must include the
+ * bytes left in the FIFO from the transfer counter.
+ */
+ if (info->scsi.phase == PHASE_DATAOUT)
+ residual += fifo;
+
+ fas216_updateptrs(info, total - residual);
+}
+
+/**
+ * fas216_transfer - Perform a DMA/PIO transfer off of/on to card
+ * @info: interface from which device disconnected from
+ *
+ * Start a DMA/PIO transfer off of/on to card
+ */
+static void fas216_transfer(FAS216_Info *info)
+{
+ fasdmadir_t direction;
+ fasdmatype_t dmatype;
+
+ fas216_log(info, LOG_BUFFER,
+ "starttransfer: buffer %p length 0x%06x reqlen 0x%06x",
+ info->scsi.SCp.ptr, info->scsi.SCp.this_residual,
+ info->scsi.SCp.phase);
+
+ if (!info->scsi.SCp.ptr) {
+ fas216_log(info, LOG_ERROR, "null buffer passed to "
+ "fas216_starttransfer");
+ print_SCp(&info->scsi.SCp, "SCp: ", "\n");
+ print_SCp(&info->SCpnt->SCp, "Cmnd SCp: ", "\n");
+ return;
+ }
+
+ /*
+ * If we have a synchronous transfer agreement in effect, we must
+ * use DMA mode. If we are using asynchronous transfers, we may
+ * use DMA mode or PIO mode.
+ */
+ if (info->device[info->SCpnt->device->id].sof)
+ dmatype = fasdma_real_all;
+ else
+ dmatype = fasdma_pio;
+
+ if (info->scsi.phase == PHASE_DATAOUT)
+ direction = DMA_OUT;
+ else
+ direction = DMA_IN;
+
+ if (info->dma.setup)
+ dmatype = info->dma.setup(info->host, &info->scsi.SCp,
+ direction, dmatype);
+ info->dma.transfer_type = dmatype;
+
+ if (dmatype == fasdma_real_all)
+ fas216_set_stc(info, info->scsi.SCp.phase);
+ else
+ fas216_set_stc(info, info->scsi.SCp.this_residual);
+
+ switch (dmatype) {
+ case fasdma_pio:
+ fas216_log(info, LOG_BUFFER, "PIO transfer");
+ fas216_writeb(info, REG_SOF, 0);
+ fas216_writeb(info, REG_STP, info->scsi.async_stp);
+ fas216_cmd(info, CMD_TRANSFERINFO);
+ fas216_pio(info, direction);
+ break;
+
+ case fasdma_pseudo:
+ fas216_log(info, LOG_BUFFER, "pseudo transfer");
+ fas216_cmd(info, CMD_TRANSFERINFO | CMD_WITHDMA);
+ info->dma.pseudo(info->host, &info->scsi.SCp,
+ direction, info->SCpnt->transfersize);
+ break;
+
+ case fasdma_real_block:
+ fas216_log(info, LOG_BUFFER, "block dma transfer");
+ fas216_cmd(info, CMD_TRANSFERINFO | CMD_WITHDMA);
+ break;
+
+ case fasdma_real_all:
+ fas216_log(info, LOG_BUFFER, "total dma transfer");
+ fas216_cmd(info, CMD_TRANSFERINFO | CMD_WITHDMA);
+ break;
+
+ default:
+ fas216_log(info, LOG_BUFFER | LOG_ERROR,
+ "invalid FAS216 DMA type");
+ break;
+ }
+}
+
+/**
+ * fas216_stoptransfer - Stop a DMA transfer onto / off of the card
+ * @info: interface from which device disconnected from
+ *
+ * Called when we switch away from DATA IN or DATA OUT phases.
+ */
+static void fas216_stoptransfer(FAS216_Info *info)
+{
+ fas216_checkmagic(info);
+
+ if (info->dma.transfer_type == fasdma_real_all ||
+ info->dma.transfer_type == fasdma_real_block)
+ info->dma.stop(info->host, &info->scsi.SCp);
+
+ fas216_cleanuptransfer(info);
+
+ if (info->scsi.phase == PHASE_DATAIN) {
+ unsigned int fifo;
+
+ /*
+ * If we were performing Data-In, then the FIFO counter
+ * contains the number of bytes not transferred via DMA
+ * from the on-board FIFO. Read them manually.
+ */
+ fifo = fas216_readb(info, REG_CFIS) & CFIS_CF;
+ while (fifo && info->scsi.SCp.ptr) {
+ *info->scsi.SCp.ptr = fas216_readb(info, REG_FF);
+ fas216_updateptrs(info, 1);
+ fifo--;
+ }
+ } else {
+ /*
+ * After a Data-Out phase, there may be unsent
+ * bytes left in the FIFO. Flush them out.
+ */
+ fas216_cmd(info, CMD_FLUSHFIFO);
+ }
+}
+
+static void fas216_aborttransfer(FAS216_Info *info)
+{
+ fas216_checkmagic(info);
+
+ if (info->dma.transfer_type == fasdma_real_all ||
+ info->dma.transfer_type == fasdma_real_block)
+ info->dma.stop(info->host, &info->scsi.SCp);
+
+ info->dma.transfer_type = fasdma_none;
+ fas216_cmd(info, CMD_FLUSHFIFO);
+}
+
+static void fas216_kick(FAS216_Info *info);
+
+/**
+ * fas216_disconnected_intr - handle device disconnection
+ * @info: interface from which device disconnected from
+ *
+ * Handle device disconnection
+ */
+static void fas216_disconnect_intr(FAS216_Info *info)
+{
+ unsigned long flags;
+
+ fas216_checkmagic(info);
+
+ fas216_log(info, LOG_CONNECT, "disconnect phase=%02x",
+ info->scsi.phase);
+
+ msgqueue_flush(&info->scsi.msgs);
+
+ switch (info->scsi.phase) {
+ case PHASE_SELECTION: /* while selecting - no target */
+ case PHASE_SELSTEPS:
+ fas216_done(info, DID_NO_CONNECT);
+ break;
+
+ case PHASE_MSGIN_DISCONNECT: /* message in - disconnecting */
+ info->scsi.disconnectable = 1;
+ info->scsi.phase = PHASE_IDLE;
+ info->stats.disconnects += 1;
+ spin_lock_irqsave(&info->host_lock, flags);
+ if (info->scsi.phase == PHASE_IDLE)
+ fas216_kick(info);
+ spin_unlock_irqrestore(&info->host_lock, flags);
+ break;
+
+ case PHASE_DONE: /* at end of command - complete */
+ fas216_done(info, DID_OK);
+ break;
+
+ case PHASE_MSGOUT: /* message out - possible ABORT message */
+ if (fas216_get_last_msg(info, info->scsi.msgin_fifo) == ABORT) {
+ info->scsi.aborting = 0;
+ fas216_done(info, DID_ABORT);
+ break;
+ }
+
+ default: /* huh? */
+ printk(KERN_ERR "scsi%d.%c: unexpected disconnect in phase %s\n",
+ info->host->host_no, fas216_target(info), fas216_drv_phase(info));
+ print_debug_list();
+ fas216_stoptransfer(info);
+ fas216_done(info, DID_ERROR);
+ break;
+ }
+}
+
+/**
+ * fas216_reselected_intr - start reconnection of a device
+ * @info: interface which was reselected
+ *
+ * Start reconnection of a device
+ */
+static void
+fas216_reselected_intr(FAS216_Info *info)
+{
+ unsigned int cfis, i;
+ unsigned char msg[4];
+ unsigned char target, lun, tag;
+
+ fas216_checkmagic(info);
+
+ WARN_ON(info->scsi.phase == PHASE_SELECTION ||
+ info->scsi.phase == PHASE_SELSTEPS);
+
+ cfis = fas216_readb(info, REG_CFIS);
+
+ fas216_log(info, LOG_CONNECT, "reconnect phase=%02x cfis=%02x",
+ info->scsi.phase, cfis);
+
+ cfis &= CFIS_CF;
+
+ if (cfis < 2 || cfis > 4) {
+ printk(KERN_ERR "scsi%d.H: incorrect number of bytes after reselect\n",
+ info->host->host_no);
+ goto bad_message;
+ }
+
+ for (i = 0; i < cfis; i++)
+ msg[i] = fas216_readb(info, REG_FF);
+
+ if (!(msg[0] & (1 << info->host->this_id)) ||
+ !(msg[1] & 0x80))
+ goto initiator_error;
+
+ target = msg[0] & ~(1 << info->host->this_id);
+ target = ffs(target) - 1;
+ lun = msg[1] & 7;
+ tag = 0;
+
+ if (cfis >= 3) {
+ if (msg[2] != SIMPLE_QUEUE_TAG)
+ goto initiator_error;
+
+ tag = msg[3];
+ }
+
+ /* set up for synchronous transfers */
+ fas216_writeb(info, REG_SDID, target);
+ fas216_set_sync(info, target);
+ msgqueue_flush(&info->scsi.msgs);
+
+ fas216_log(info, LOG_CONNECT, "Reconnected: target %1x lun %1x tag %02x",
+ target, lun, tag);
+
+ if (info->scsi.disconnectable && info->SCpnt) {
+ info->scsi.disconnectable = 0;
+ if (info->SCpnt->device->id == target &&
+ info->SCpnt->device->lun == lun &&
+ info->SCpnt->tag == tag) {
+ fas216_log(info, LOG_CONNECT, "reconnected previously executing command");
+ } else {
+ queue_add_cmd_tail(&info->queues.disconnected, info->SCpnt);
+ fas216_log(info, LOG_CONNECT, "had to move command to disconnected queue");
+ info->SCpnt = NULL;
+ }
+ }
+ if (!info->SCpnt) {
+ info->SCpnt = queue_remove_tgtluntag(&info->queues.disconnected,
+ target, lun, tag);
+ fas216_log(info, LOG_CONNECT, "had to get command");
+ }
+
+ if (info->SCpnt) {
+ /*
+ * Restore data pointer from SAVED data pointer
+ */
+ info->scsi.SCp = info->SCpnt->SCp;
+
+ fas216_log(info, LOG_CONNECT, "data pointers: [%p, %X]",
+ info->scsi.SCp.ptr, info->scsi.SCp.this_residual);
+ info->scsi.phase = PHASE_MSGIN;
+ } else {
+ /*
+ * Our command structure not found - abort the
+ * command on the target. Since we have no
+ * record of this command, we can't send
+ * an INITIATOR DETECTED ERROR message.
+ */
+ fas216_cmd(info, CMD_SETATN);
+
+#if 0
+ if (tag)
+ msgqueue_addmsg(&info->scsi.msgs, 2, ABORT_TAG, tag);
+ else
+#endif
+ msgqueue_addmsg(&info->scsi.msgs, 1, ABORT);
+ info->scsi.phase = PHASE_MSGOUT_EXPECT;
+ info->scsi.aborting = 1;
+ }
+
+ fas216_cmd(info, CMD_MSGACCEPTED);
+ return;
+
+ initiator_error:
+ printk(KERN_ERR "scsi%d.H: error during reselection: bytes",
+ info->host->host_no);
+ for (i = 0; i < cfis; i++)
+ printk(" %02x", msg[i]);
+ printk("\n");
+ bad_message:
+ fas216_cmd(info, CMD_SETATN);
+ msgqueue_flush(&info->scsi.msgs);
+ msgqueue_addmsg(&info->scsi.msgs, 1, INITIATOR_ERROR);
+ info->scsi.phase = PHASE_MSGOUT_EXPECT;
+ fas216_cmd(info, CMD_MSGACCEPTED);
+}
+
+static void fas216_parse_message(FAS216_Info *info, unsigned char *message, int msglen)
+{
+ int i;
+
+ switch (message[0]) {
+ case COMMAND_COMPLETE:
+ if (msglen != 1)
+ goto unrecognised;
+
+ printk(KERN_ERR "scsi%d.%c: command complete with no "
+ "status in MESSAGE_IN?\n",
+ info->host->host_no, fas216_target(info));
+ break;
+
+ case SAVE_POINTERS:
+ if (msglen != 1)
+ goto unrecognised;
+
+ /*
+ * Save current data pointer to SAVED data pointer
+ * SCSI II standard says that we must not acknowledge
+ * this until we have really saved pointers.
+ * NOTE: we DO NOT save the command nor status pointers
+ * as required by the SCSI II standard. These always
+ * point to the start of their respective areas.
+ */
+ info->SCpnt->SCp = info->scsi.SCp;
+ info->SCpnt->SCp.sent_command = 0;
+ fas216_log(info, LOG_CONNECT | LOG_MESSAGES | LOG_BUFFER,
+ "save data pointers: [%p, %X]",
+ info->scsi.SCp.ptr, info->scsi.SCp.this_residual);
+ break;
+
+ case RESTORE_POINTERS:
+ if (msglen != 1)
+ goto unrecognised;
+
+ /*
+ * Restore current data pointer from SAVED data pointer
+ */
+ info->scsi.SCp = info->SCpnt->SCp;
+ fas216_log(info, LOG_CONNECT | LOG_MESSAGES | LOG_BUFFER,
+ "restore data pointers: [%p, 0x%x]",
+ info->scsi.SCp.ptr, info->scsi.SCp.this_residual);
+ break;
+
+ case DISCONNECT:
+ if (msglen != 1)
+ goto unrecognised;
+
+ info->scsi.phase = PHASE_MSGIN_DISCONNECT;
+ break;
+
+ case MESSAGE_REJECT:
+ if (msglen != 1)
+ goto unrecognised;
+
+ switch (fas216_get_last_msg(info, info->scsi.msgin_fifo)) {
+ case EXTENDED_MESSAGE | EXTENDED_SDTR << 8:
+ fas216_handlesync(info, message);
+ break;
+
+ default:
+ fas216_log(info, 0, "reject, last message 0x%04x",
+ fas216_get_last_msg(info, info->scsi.msgin_fifo));
+ }
+ break;
+
+ case NOP:
+ break;
+
+ case EXTENDED_MESSAGE:
+ if (msglen < 3)
+ goto unrecognised;
+
+ switch (message[2]) {
+ case EXTENDED_SDTR: /* Sync transfer negotiation request/reply */
+ fas216_handlesync(info, message);
+ break;
+
+ default:
+ goto unrecognised;
+ }
+ break;
+
+ default:
+ goto unrecognised;
+ }
+ return;
+
+unrecognised:
+ fas216_log(info, 0, "unrecognised message, rejecting");
+ printk("scsi%d.%c: message was", info->host->host_no, fas216_target(info));
+ for (i = 0; i < msglen; i++)
+ printk("%s%02X", i & 31 ? " " : "\n ", message[i]);
+ printk("\n");
+
+ /*
+ * Something strange seems to be happening here -
+ * I can't use SETATN since the chip gives me an
+ * invalid command interrupt when I do. Weird.
+ */
+fas216_cmd(info, CMD_NOP);
+fas216_dumpstate(info);
+ fas216_cmd(info, CMD_SETATN);
+ msgqueue_flush(&info->scsi.msgs);
+ msgqueue_addmsg(&info->scsi.msgs, 1, MESSAGE_REJECT);
+ info->scsi.phase = PHASE_MSGOUT_EXPECT;
+fas216_dumpstate(info);
+}
+
+static int fas216_wait_cmd(FAS216_Info *info, int cmd)
+{
+ int tout;
+ int stat;
+
+ fas216_cmd(info, cmd);
+
+ for (tout = 1000; tout; tout -= 1) {
+ stat = fas216_readb(info, REG_STAT);
+ if (stat & (STAT_INT|STAT_PARITYERROR))
+ break;
+ udelay(1);
+ }
+
+ return stat;
+}
+
+static int fas216_get_msg_byte(FAS216_Info *info)
+{
+ unsigned int stat = fas216_wait_cmd(info, CMD_MSGACCEPTED);
+
+ if ((stat & STAT_INT) == 0)
+ goto timedout;
+
+ if ((stat & STAT_BUSMASK) != STAT_MESGIN)
+ goto unexpected_phase_change;
+
+ fas216_readb(info, REG_INST);
+
+ stat = fas216_wait_cmd(info, CMD_TRANSFERINFO);
+
+ if ((stat & STAT_INT) == 0)
+ goto timedout;
+
+ if (stat & STAT_PARITYERROR)
+ goto parity_error;
+
+ if ((stat & STAT_BUSMASK) != STAT_MESGIN)
+ goto unexpected_phase_change;
+
+ fas216_readb(info, REG_INST);
+
+ return fas216_readb(info, REG_FF);
+
+timedout:
+ fas216_log(info, LOG_ERROR, "timed out waiting for message byte");
+ return -1;
+
+unexpected_phase_change:
+ fas216_log(info, LOG_ERROR, "unexpected phase change: status = %02x", stat);
+ return -2;
+
+parity_error:
+ fas216_log(info, LOG_ERROR, "parity error during message in phase");
+ return -3;
+}
+
+/**
+ * fas216_message - handle a function done interrupt from FAS216 chip
+ * @info: interface which caused function done interrupt
+ *
+ * Handle a function done interrupt from FAS216 chip
+ */
+static void fas216_message(FAS216_Info *info)
+{
+ unsigned char *message = info->scsi.message;
+ unsigned int msglen = 1;
+ int msgbyte = 0;
+
+ fas216_checkmagic(info);
+
+ message[0] = fas216_readb(info, REG_FF);
+
+ if (message[0] == EXTENDED_MESSAGE) {
+ msgbyte = fas216_get_msg_byte(info);
+
+ if (msgbyte >= 0) {
+ message[1] = msgbyte;
+
+ for (msglen = 2; msglen < message[1] + 2; msglen++) {
+ msgbyte = fas216_get_msg_byte(info);
+
+ if (msgbyte >= 0)
+ message[msglen] = msgbyte;
+ else
+ break;
+ }
+ }
+ }
+
+ if (msgbyte == -3)
+ goto parity_error;
+
+#ifdef DEBUG_MESSAGES
+ {
+ int i;
+
+ printk("scsi%d.%c: message in: ",
+ info->host->host_no, fas216_target(info));
+ for (i = 0; i < msglen; i++)
+ printk("%02X ", message[i]);
+ printk("\n");
+ }
+#endif
+
+ fas216_parse_message(info, message, msglen);
+ fas216_cmd(info, CMD_MSGACCEPTED);
+ return;
+
+parity_error:
+ fas216_cmd(info, CMD_SETATN);
+ msgqueue_flush(&info->scsi.msgs);
+ msgqueue_addmsg(&info->scsi.msgs, 1, MSG_PARITY_ERROR);
+ info->scsi.phase = PHASE_MSGOUT_EXPECT;
+ fas216_cmd(info, CMD_MSGACCEPTED);
+ return;
+}
+
+/**
+ * fas216_send_command - send command after all message bytes have been sent
+ * @info: interface which caused bus service
+ *
+ * Send a command to a target after all message bytes have been sent
+ */
+static void fas216_send_command(FAS216_Info *info)
+{
+ int i;
+
+ fas216_checkmagic(info);
+
+ fas216_cmd(info, CMD_NOP|CMD_WITHDMA);
+ fas216_cmd(info, CMD_FLUSHFIFO);
+
+ /* load command */
+ for (i = info->scsi.SCp.sent_command; i < info->SCpnt->cmd_len; i++)
+ fas216_writeb(info, REG_FF, info->SCpnt->cmnd[i]);
+
+ fas216_cmd(info, CMD_TRANSFERINFO);
+
+ info->scsi.phase = PHASE_COMMAND;
+}
+
+/**
+ * fas216_send_messageout - handle bus service to send a message
+ * @info: interface which caused bus service
+ *
+ * Handle bus service to send a message.
+ * Note: We do not allow the device to change the data direction!
+ */
+static void fas216_send_messageout(FAS216_Info *info, int start)
+{
+ unsigned int tot_msglen = msgqueue_msglength(&info->scsi.msgs);
+
+ fas216_checkmagic(info);
+
+ fas216_cmd(info, CMD_FLUSHFIFO);
+
+ if (tot_msglen) {
+ struct message *msg;
+ int msgnr = 0;
+
+ while ((msg = msgqueue_getmsg(&info->scsi.msgs, msgnr++)) != NULL) {
+ int i;
+
+ for (i = start; i < msg->length; i++)
+ fas216_writeb(info, REG_FF, msg->msg[i]);
+
+ msg->fifo = tot_msglen - (fas216_readb(info, REG_CFIS) & CFIS_CF);
+ start = 0;
+ }
+ } else
+ fas216_writeb(info, REG_FF, NOP);
+
+ fas216_cmd(info, CMD_TRANSFERINFO);
+
+ info->scsi.phase = PHASE_MSGOUT;
+}
+
+/**
+ * fas216_busservice_intr - handle bus service interrupt from FAS216 chip
+ * @info: interface which caused bus service interrupt
+ * @stat: Status register contents
+ * @is: SCSI Status register contents
+ *
+ * Handle a bus service interrupt from FAS216 chip
+ */
+static void fas216_busservice_intr(FAS216_Info *info, unsigned int stat, unsigned int is)
+{
+ fas216_checkmagic(info);
+
+ fas216_log(info, LOG_BUSSERVICE,
+ "bus service: stat=%02x is=%02x phase=%02x",
+ stat, is, info->scsi.phase);
+
+ switch (info->scsi.phase) {
+ case PHASE_SELECTION:
+ if ((is & IS_BITS) != IS_MSGBYTESENT)
+ goto bad_is;
+ break;
+
+ case PHASE_SELSTEPS:
+ switch (is & IS_BITS) {
+ case IS_SELARB:
+ case IS_MSGBYTESENT:
+ goto bad_is;
+
+ case IS_NOTCOMMAND:
+ case IS_EARLYPHASE:
+ if ((stat & STAT_BUSMASK) == STAT_MESGIN)
+ break;
+ goto bad_is;
+
+ case IS_COMPLETE:
+ break;
+ }
+
+ default:
+ break;
+ }
+
+ fas216_cmd(info, CMD_NOP);
+
+#define STATE(st,ph) ((ph) << 3 | (st))
+ /* This table describes the legal SCSI state transitions,
+ * as described by the SCSI II spec.
+ */
+ switch (STATE(stat & STAT_BUSMASK, info->scsi.phase)) {
+ case STATE(STAT_DATAIN, PHASE_SELSTEPS):/* Sel w/ steps -> Data In */
+ case STATE(STAT_DATAIN, PHASE_MSGOUT): /* Message Out -> Data In */
+ case STATE(STAT_DATAIN, PHASE_COMMAND): /* Command -> Data In */
+ case STATE(STAT_DATAIN, PHASE_MSGIN): /* Message In -> Data In */
+ info->scsi.phase = PHASE_DATAIN;
+ fas216_transfer(info);
+ return;
+
+ case STATE(STAT_DATAIN, PHASE_DATAIN): /* Data In -> Data In */
+ case STATE(STAT_DATAOUT, PHASE_DATAOUT):/* Data Out -> Data Out */
+ fas216_cleanuptransfer(info);
+ fas216_transfer(info);
+ return;
+
+ case STATE(STAT_DATAOUT, PHASE_SELSTEPS):/* Sel w/ steps-> Data Out */
+ case STATE(STAT_DATAOUT, PHASE_MSGOUT): /* Message Out -> Data Out */
+ case STATE(STAT_DATAOUT, PHASE_COMMAND):/* Command -> Data Out */
+ case STATE(STAT_DATAOUT, PHASE_MSGIN): /* Message In -> Data Out */
+ fas216_cmd(info, CMD_FLUSHFIFO);
+ info->scsi.phase = PHASE_DATAOUT;
+ fas216_transfer(info);
+ return;
+
+ case STATE(STAT_STATUS, PHASE_DATAOUT): /* Data Out -> Status */
+ case STATE(STAT_STATUS, PHASE_DATAIN): /* Data In -> Status */
+ fas216_stoptransfer(info);
+ case STATE(STAT_STATUS, PHASE_SELSTEPS):/* Sel w/ steps -> Status */
+ case STATE(STAT_STATUS, PHASE_MSGOUT): /* Message Out -> Status */
+ case STATE(STAT_STATUS, PHASE_COMMAND): /* Command -> Status */
+ case STATE(STAT_STATUS, PHASE_MSGIN): /* Message In -> Status */
+ fas216_cmd(info, CMD_INITCMDCOMPLETE);
+ info->scsi.phase = PHASE_STATUS;
+ return;
+
+ case STATE(STAT_MESGIN, PHASE_DATAOUT): /* Data Out -> Message In */
+ case STATE(STAT_MESGIN, PHASE_DATAIN): /* Data In -> Message In */
+ fas216_stoptransfer(info);
+ case STATE(STAT_MESGIN, PHASE_COMMAND): /* Command -> Message In */
+ case STATE(STAT_MESGIN, PHASE_SELSTEPS):/* Sel w/ steps -> Message In */
+ case STATE(STAT_MESGIN, PHASE_MSGOUT): /* Message Out -> Message In */
+ info->scsi.msgin_fifo = fas216_readb(info, REG_CFIS) & CFIS_CF;
+ fas216_cmd(info, CMD_FLUSHFIFO);
+ fas216_cmd(info, CMD_TRANSFERINFO);
+ info->scsi.phase = PHASE_MSGIN;
+ return;
+
+ case STATE(STAT_MESGIN, PHASE_MSGIN):
+ info->scsi.msgin_fifo = fas216_readb(info, REG_CFIS) & CFIS_CF;
+ fas216_cmd(info, CMD_TRANSFERINFO);
+ return;
+
+ case STATE(STAT_COMMAND, PHASE_MSGOUT): /* Message Out -> Command */
+ case STATE(STAT_COMMAND, PHASE_MSGIN): /* Message In -> Command */
+ fas216_send_command(info);
+ info->scsi.phase = PHASE_COMMAND;
+ return;
+
+
+ /*
+ * Selection -> Message Out
+ */
+ case STATE(STAT_MESGOUT, PHASE_SELECTION):
+ fas216_send_messageout(info, 1);
+ return;
+
+ /*
+ * Message Out -> Message Out
+ */
+ case STATE(STAT_MESGOUT, PHASE_SELSTEPS):
+ case STATE(STAT_MESGOUT, PHASE_MSGOUT):
+ /*
+ * If we get another message out phase, this usually
+ * means some parity error occurred. Resend complete
+ * set of messages. If we have more than one byte to
+ * send, we need to assert ATN again.
+ */
+ if (info->device[info->SCpnt->device->id].parity_check) {
+ /*
+ * We were testing... good, the device
+ * supports parity checking.
+ */
+ info->device[info->SCpnt->device->id].parity_check = 0;
+ info->device[info->SCpnt->device->id].parity_enabled = 1;
+ fas216_writeb(info, REG_CNTL1, info->scsi.cfg[0]);
+ }
+
+ if (msgqueue_msglength(&info->scsi.msgs) > 1)
+ fas216_cmd(info, CMD_SETATN);
+ /*FALLTHROUGH*/
+
+ /*
+ * Any -> Message Out
+ */
+ case STATE(STAT_MESGOUT, PHASE_MSGOUT_EXPECT):
+ fas216_send_messageout(info, 0);
+ return;
+
+ /* Error recovery rules.
+ * These either attempt to abort or retry the operation.
+ * TODO: we need more of these
+ */
+ case STATE(STAT_COMMAND, PHASE_COMMAND):/* Command -> Command */
+ /* error - we've sent out all the command bytes
+ * we have.
+ * NOTE: we need SAVE DATA POINTERS/RESTORE DATA POINTERS
+ * to include the command bytes sent for this to work
+ * correctly.
+ */
+ printk(KERN_ERR "scsi%d.%c: "
+ "target trying to receive more command bytes\n",
+ info->host->host_no, fas216_target(info));
+ fas216_cmd(info, CMD_SETATN);
+ fas216_set_stc(info, 15);
+ fas216_cmd(info, CMD_PADBYTES | CMD_WITHDMA);
+ msgqueue_flush(&info->scsi.msgs);
+ msgqueue_addmsg(&info->scsi.msgs, 1, INITIATOR_ERROR);
+ info->scsi.phase = PHASE_MSGOUT_EXPECT;
+ return;
+ }
+
+ if (info->scsi.phase == PHASE_MSGIN_DISCONNECT) {
+ printk(KERN_ERR "scsi%d.%c: disconnect message received, but bus service %s?\n",
+ info->host->host_no, fas216_target(info),
+ fas216_bus_phase(stat));
+ msgqueue_flush(&info->scsi.msgs);
+ fas216_cmd(info, CMD_SETATN);
+ msgqueue_addmsg(&info->scsi.msgs, 1, INITIATOR_ERROR);
+ info->scsi.phase = PHASE_MSGOUT_EXPECT;
+ info->scsi.aborting = 1;
+ fas216_cmd(info, CMD_TRANSFERINFO);
+ return;
+ }
+ printk(KERN_ERR "scsi%d.%c: bus phase %s after %s?\n",
+ info->host->host_no, fas216_target(info),
+ fas216_bus_phase(stat),
+ fas216_drv_phase(info));
+ print_debug_list();
+ return;
+
+bad_is:
+ fas216_log(info, 0, "bus service at step %d?", is & IS_BITS);
+ fas216_dumpstate(info);
+ print_debug_list();
+
+ fas216_done(info, DID_ERROR);
+}
+
+/**
+ * fas216_funcdone_intr - handle a function done interrupt from FAS216 chip
+ * @info: interface which caused function done interrupt
+ * @stat: Status register contents
+ * @is: SCSI Status register contents
+ *
+ * Handle a function done interrupt from FAS216 chip
+ */
+static void fas216_funcdone_intr(FAS216_Info *info, unsigned int stat, unsigned int is)
+{
+ unsigned int fifo_len = fas216_readb(info, REG_CFIS) & CFIS_CF;
+
+ fas216_checkmagic(info);
+
+ fas216_log(info, LOG_FUNCTIONDONE,
+ "function done: stat=%02x is=%02x phase=%02x",
+ stat, is, info->scsi.phase);
+
+ switch (info->scsi.phase) {
+ case PHASE_STATUS: /* status phase - read status and msg */
+ if (fifo_len != 2) {
+ fas216_log(info, 0, "odd number of bytes in FIFO: %d", fifo_len);
+ }
+ /*
+ * Read status then message byte.
+ */
+ info->scsi.SCp.Status = fas216_readb(info, REG_FF);
+ info->scsi.SCp.Message = fas216_readb(info, REG_FF);
+ info->scsi.phase = PHASE_DONE;
+ fas216_cmd(info, CMD_MSGACCEPTED);
+ break;
+
+ case PHASE_IDLE:
+ case PHASE_SELECTION:
+ case PHASE_SELSTEPS:
+ break;
+
+ case PHASE_MSGIN: /* message in phase */
+ if ((stat & STAT_BUSMASK) == STAT_MESGIN) {
+ info->scsi.msgin_fifo = fifo_len;
+ fas216_message(info);
+ break;
+ }
+
+ default:
+ fas216_log(info, 0, "internal phase %s for function done?"
+ " What do I do with this?",
+ fas216_target(info), fas216_drv_phase(info));
+ }
+}
+
+static void fas216_bus_reset(FAS216_Info *info)
+{
+ neg_t sync_state;
+ int i;
+
+ msgqueue_flush(&info->scsi.msgs);
+
+ sync_state = neg_invalid;
+
+#ifdef SCSI2_SYNC
+ if (info->ifcfg.capabilities & (FASCAP_DMA|FASCAP_PSEUDODMA))
+ sync_state = neg_wait;
+#endif
+
+ info->scsi.phase = PHASE_IDLE;
+ info->SCpnt = NULL; /* bug! */
+ memset(&info->scsi.SCp, 0, sizeof(info->scsi.SCp));
+
+ for (i = 0; i < 8; i++) {
+ info->device[i].disconnect_ok = info->ifcfg.disconnect_ok;
+ info->device[i].sync_state = sync_state;
+ info->device[i].period = info->ifcfg.asyncperiod / 4;
+ info->device[i].stp = info->scsi.async_stp;
+ info->device[i].sof = 0;
+ info->device[i].wide_xfer = 0;
+ }
+
+ info->rst_bus_status = 1;
+ wake_up(&info->eh_wait);
+}
+
+/**
+ * fas216_intr - handle interrupts to progress a command
+ * @info: interface to service
+ *
+ * Handle interrupts from the interface to progress a command
+ */
+irqreturn_t fas216_intr(FAS216_Info *info)
+{
+ unsigned char inst, is, stat;
+ int handled = IRQ_NONE;
+
+ fas216_checkmagic(info);
+
+ stat = fas216_readb(info, REG_STAT);
+ is = fas216_readb(info, REG_IS);
+ inst = fas216_readb(info, REG_INST);
+
+ add_debug_list(stat, is, inst, info->scsi.phase);
+
+ if (stat & STAT_INT) {
+ if (inst & INST_BUSRESET) {
+ fas216_log(info, 0, "bus reset detected");
+ fas216_bus_reset(info);
+ scsi_report_bus_reset(info->host, 0);
+ } else if (inst & INST_ILLEGALCMD) {
+ fas216_log(info, LOG_ERROR, "illegal command given\n");
+ fas216_dumpstate(info);
+ print_debug_list();
+ } else if (inst & INST_DISCONNECT)
+ fas216_disconnect_intr(info);
+ else if (inst & INST_RESELECTED) /* reselected */
+ fas216_reselected_intr(info);
+ else if (inst & INST_BUSSERVICE) /* bus service request */
+ fas216_busservice_intr(info, stat, is);
+ else if (inst & INST_FUNCDONE) /* function done */
+ fas216_funcdone_intr(info, stat, is);
+ else
+ fas216_log(info, 0, "unknown interrupt received:"
+ " phase %s inst %02X is %02X stat %02X",
+ fas216_drv_phase(info), inst, is, stat);
+ handled = IRQ_HANDLED;
+ }
+ return handled;
+}
+
+static void __fas216_start_command(FAS216_Info *info, struct scsi_cmnd *SCpnt)
+{
+ int tot_msglen;
+
+ /* following what the ESP driver says */
+ fas216_set_stc(info, 0);
+ fas216_cmd(info, CMD_NOP | CMD_WITHDMA);
+
+ /* flush FIFO */
+ fas216_cmd(info, CMD_FLUSHFIFO);
+
+ /* load bus-id and timeout */
+ fas216_writeb(info, REG_SDID, BUSID(SCpnt->device->id));
+ fas216_writeb(info, REG_STIM, info->ifcfg.select_timeout);
+
+ /* synchronous transfers */
+ fas216_set_sync(info, SCpnt->device->id);
+
+ tot_msglen = msgqueue_msglength(&info->scsi.msgs);
+
+#ifdef DEBUG_MESSAGES
+ {
+ struct message *msg;
+ int msgnr = 0, i;
+
+ printk("scsi%d.%c: message out: ",
+ info->host->host_no, '0' + SCpnt->device->id);
+ while ((msg = msgqueue_getmsg(&info->scsi.msgs, msgnr++)) != NULL) {
+ printk("{ ");
+ for (i = 0; i < msg->length; i++)
+ printk("%02x ", msg->msg[i]);
+ printk("} ");
+ }
+ printk("\n");
+ }
+#endif
+
+ if (tot_msglen == 1 || tot_msglen == 3) {
+ /*
+ * We have an easy message length to send...
+ */
+ struct message *msg;
+ int msgnr = 0, i;
+
+ info->scsi.phase = PHASE_SELSTEPS;
+
+ /* load message bytes */
+ while ((msg = msgqueue_getmsg(&info->scsi.msgs, msgnr++)) != NULL) {
+ for (i = 0; i < msg->length; i++)
+ fas216_writeb(info, REG_FF, msg->msg[i]);
+ msg->fifo = tot_msglen - (fas216_readb(info, REG_CFIS) & CFIS_CF);
+ }
+
+ /* load command */
+ for (i = 0; i < SCpnt->cmd_len; i++)
+ fas216_writeb(info, REG_FF, SCpnt->cmnd[i]);
+
+ if (tot_msglen == 1)
+ fas216_cmd(info, CMD_SELECTATN);
+ else
+ fas216_cmd(info, CMD_SELECTATN3);
+ } else {
+ /*
+ * We have an unusual number of message bytes to send.
+ * Load first byte into fifo, and issue SELECT with ATN and
+ * stop steps.
+ */
+ struct message *msg = msgqueue_getmsg(&info->scsi.msgs, 0);
+
+ fas216_writeb(info, REG_FF, msg->msg[0]);
+ msg->fifo = 1;
+
+ fas216_cmd(info, CMD_SELECTATNSTOP);
+ }
+}
+
+/*
+ * Decide whether we need to perform a parity test on this device.
+ * Can also be used to force parity error conditions during initial
+ * information transfer phase (message out) for test purposes.
+ */
+static int parity_test(FAS216_Info *info, int target)
+{
+#if 0
+ if (target == 3) {
+ info->device[target].parity_check = 0;
+ return 1;
+ }
+#endif
+ return info->device[target].parity_check;
+}
+
+static void fas216_start_command(FAS216_Info *info, struct scsi_cmnd *SCpnt)
+{
+ int disconnect_ok;
+
+ /*
+ * claim host busy
+ */
+ info->scsi.phase = PHASE_SELECTION;
+ info->scsi.SCp = SCpnt->SCp;
+ info->SCpnt = SCpnt;
+ info->dma.transfer_type = fasdma_none;
+
+ if (parity_test(info, SCpnt->device->id))
+ fas216_writeb(info, REG_CNTL1, info->scsi.cfg[0] | CNTL1_PTE);
+ else
+ fas216_writeb(info, REG_CNTL1, info->scsi.cfg[0]);
+
+ /*
+ * Don't allow request sense commands to disconnect.
+ */
+ disconnect_ok = SCpnt->cmnd[0] != REQUEST_SENSE &&
+ info->device[SCpnt->device->id].disconnect_ok;
+
+ /*
+ * build outgoing message bytes
+ */
+ msgqueue_flush(&info->scsi.msgs);
+ msgqueue_addmsg(&info->scsi.msgs, 1, IDENTIFY(disconnect_ok, SCpnt->device->lun));
+
+ /*
+ * add tag message if required
+ */
+ if (SCpnt->tag)
+ msgqueue_addmsg(&info->scsi.msgs, 2, SIMPLE_QUEUE_TAG, SCpnt->tag);
+
+ do {
+#ifdef SCSI2_SYNC
+ if ((info->device[SCpnt->device->id].sync_state == neg_wait ||
+ info->device[SCpnt->device->id].sync_state == neg_complete) &&
+ (SCpnt->cmnd[0] == REQUEST_SENSE ||
+ SCpnt->cmnd[0] == INQUIRY)) {
+ info->device[SCpnt->device->id].sync_state = neg_inprogress;
+ msgqueue_addmsg(&info->scsi.msgs, 5,
+ EXTENDED_MESSAGE, 3, EXTENDED_SDTR,
+ 1000 / info->ifcfg.clockrate,
+ info->ifcfg.sync_max_depth);
+ break;
+ }
+#endif
+ } while (0);
+
+ __fas216_start_command(info, SCpnt);
+}
+
+static void fas216_allocate_tag(FAS216_Info *info, struct scsi_cmnd *SCpnt)
+{
+#ifdef SCSI2_TAG
+ /*
+ * tagged queuing - allocate a new tag to this command
+ */
+ if (SCpnt->device->simple_tags && SCpnt->cmnd[0] != REQUEST_SENSE &&
+ SCpnt->cmnd[0] != INQUIRY) {
+ SCpnt->device->current_tag += 1;
+ if (SCpnt->device->current_tag == 0)
+ SCpnt->device->current_tag = 1;
+ SCpnt->tag = SCpnt->device->current_tag;
+ } else
+#endif
+ set_bit(SCpnt->device->id * 8 +
+ (u8)(SCpnt->device->lun & 0x7), info->busyluns);
+
+ info->stats.removes += 1;
+ switch (SCpnt->cmnd[0]) {
+ case WRITE_6:
+ case WRITE_10:
+ case WRITE_12:
+ info->stats.writes += 1;
+ break;
+ case READ_6:
+ case READ_10:
+ case READ_12:
+ info->stats.reads += 1;
+ break;
+ default:
+ info->stats.miscs += 1;
+ break;
+ }
+}
+
+static void fas216_do_bus_device_reset(FAS216_Info *info,
+ struct scsi_cmnd *SCpnt)
+{
+ struct message *msg;
+
+ /*
+ * claim host busy
+ */
+ info->scsi.phase = PHASE_SELECTION;
+ info->scsi.SCp = SCpnt->SCp;
+ info->SCpnt = SCpnt;
+ info->dma.transfer_type = fasdma_none;
+
+ fas216_log(info, LOG_ERROR, "sending bus device reset");
+
+ msgqueue_flush(&info->scsi.msgs);
+ msgqueue_addmsg(&info->scsi.msgs, 1, BUS_DEVICE_RESET);
+
+ /* following what the ESP driver says */
+ fas216_set_stc(info, 0);
+ fas216_cmd(info, CMD_NOP | CMD_WITHDMA);
+
+ /* flush FIFO */
+ fas216_cmd(info, CMD_FLUSHFIFO);
+
+ /* load bus-id and timeout */
+ fas216_writeb(info, REG_SDID, BUSID(SCpnt->device->id));
+ fas216_writeb(info, REG_STIM, info->ifcfg.select_timeout);
+
+ /* synchronous transfers */
+ fas216_set_sync(info, SCpnt->device->id);
+
+ msg = msgqueue_getmsg(&info->scsi.msgs, 0);
+
+ fas216_writeb(info, REG_FF, BUS_DEVICE_RESET);
+ msg->fifo = 1;
+
+ fas216_cmd(info, CMD_SELECTATNSTOP);
+}
+
+/**
+ * fas216_kick - kick a command to the interface
+ * @info: our host interface to kick
+ *
+ * Kick a command to the interface, interface should be idle.
+ * Notes: Interrupts are always disabled!
+ */
+static void fas216_kick(FAS216_Info *info)
+{
+ struct scsi_cmnd *SCpnt = NULL;
+#define TYPE_OTHER 0
+#define TYPE_RESET 1
+#define TYPE_QUEUE 2
+ int where_from = TYPE_OTHER;
+
+ fas216_checkmagic(info);
+
+ /*
+ * Obtain the next command to process.
+ */
+ do {
+ if (info->rstSCpnt) {
+ SCpnt = info->rstSCpnt;
+ /* don't remove it */
+ where_from = TYPE_RESET;
+ break;
+ }
+
+ if (info->reqSCpnt) {
+ SCpnt = info->reqSCpnt;
+ info->reqSCpnt = NULL;
+ break;
+ }
+
+ if (info->origSCpnt) {
+ SCpnt = info->origSCpnt;
+ info->origSCpnt = NULL;
+ break;
+ }
+
+ /* retrieve next command */
+ if (!SCpnt) {
+ SCpnt = queue_remove_exclude(&info->queues.issue,
+ info->busyluns);
+ where_from = TYPE_QUEUE;
+ break;
+ }
+ } while (0);
+
+ if (!SCpnt) {
+ /*
+ * no command pending, so enable reselection.
+ */
+ fas216_cmd(info, CMD_ENABLESEL);
+ return;
+ }
+
+ /*
+ * We're going to start a command, so disable reselection
+ */
+ fas216_cmd(info, CMD_DISABLESEL);
+
+ if (info->scsi.disconnectable && info->SCpnt) {
+ fas216_log(info, LOG_CONNECT,
+ "moved command for %d to disconnected queue",
+ info->SCpnt->device->id);
+ queue_add_cmd_tail(&info->queues.disconnected, info->SCpnt);
+ info->scsi.disconnectable = 0;
+ info->SCpnt = NULL;
+ }
+
+ fas216_log_command(info, LOG_CONNECT | LOG_MESSAGES, SCpnt,
+ "starting");
+
+ switch (where_from) {
+ case TYPE_QUEUE:
+ fas216_allocate_tag(info, SCpnt);
+ case TYPE_OTHER:
+ fas216_start_command(info, SCpnt);
+ break;
+ case TYPE_RESET:
+ fas216_do_bus_device_reset(info, SCpnt);
+ break;
+ }
+
+ fas216_log(info, LOG_CONNECT, "select: data pointers [%p, %X]",
+ info->scsi.SCp.ptr, info->scsi.SCp.this_residual);
+
+ /*
+ * should now get either DISCONNECT or
+ * (FUNCTION DONE with BUS SERVICE) interrupt
+ */
+}
+
+/*
+ * Clean up from issuing a BUS DEVICE RESET message to a device.
+ */
+static void fas216_devicereset_done(FAS216_Info *info, struct scsi_cmnd *SCpnt,
+ unsigned int result)
+{
+ fas216_log(info, LOG_ERROR, "fas216 device reset complete");
+
+ info->rstSCpnt = NULL;
+ info->rst_dev_status = 1;
+ wake_up(&info->eh_wait);
+}
+
+/**
+ * fas216_rq_sns_done - Finish processing automatic request sense command
+ * @info: interface that completed
+ * @SCpnt: command that completed
+ * @result: driver byte of result
+ *
+ * Finish processing automatic request sense command
+ */
+static void fas216_rq_sns_done(FAS216_Info *info, struct scsi_cmnd *SCpnt,
+ unsigned int result)
+{
+ fas216_log_target(info, LOG_CONNECT, SCpnt->device->id,
+ "request sense complete, result=0x%04x%02x%02x",
+ result, SCpnt->SCp.Message, SCpnt->SCp.Status);
+
+ if (result != DID_OK || SCpnt->SCp.Status != GOOD)
+ /*
+ * Something went wrong. Make sure that we don't
+ * have valid data in the sense buffer that could
+ * confuse the higher levels.
+ */
+ memset(SCpnt->sense_buffer, 0, sizeof(SCpnt->sense_buffer));
+//printk("scsi%d.%c: sense buffer: ", info->host->host_no, '0' + SCpnt->device->id);
+//{ int i; for (i = 0; i < 32; i++) printk("%02x ", SCpnt->sense_buffer[i]); printk("\n"); }
+ /*
+ * Note that we don't set SCpnt->result, since that should
+ * reflect the status of the command that we were asked by
+ * the upper layers to process. This would have been set
+ * correctly by fas216_std_done.
+ */
+ scsi_eh_restore_cmnd(SCpnt, &info->ses);
+ SCpnt->scsi_done(SCpnt);
+}
+
+/**
+ * fas216_std_done - finish processing of standard command
+ * @info: interface that completed
+ * @SCpnt: command that completed
+ * @result: driver byte of result
+ *
+ * Finish processing of standard command
+ */
+static void
+fas216_std_done(FAS216_Info *info, struct scsi_cmnd *SCpnt, unsigned int result)
+{
+ info->stats.fins += 1;
+
+ SCpnt->result = result << 16 | info->scsi.SCp.Message << 8 |
+ info->scsi.SCp.Status;
+
+ fas216_log_command(info, LOG_CONNECT, SCpnt,
+ "command complete, result=0x%08x", SCpnt->result);
+
+ /*
+ * If the driver detected an error, we're all done.
+ */
+ if (host_byte(SCpnt->result) != DID_OK ||
+ msg_byte(SCpnt->result) != COMMAND_COMPLETE)
+ goto done;
+
+ /*
+ * If the command returned CHECK_CONDITION or COMMAND_TERMINATED
+ * status, request the sense information.
+ */
+ if (status_byte(SCpnt->result) == CHECK_CONDITION ||
+ status_byte(SCpnt->result) == COMMAND_TERMINATED)
+ goto request_sense;
+
+ /*
+ * If the command did not complete with GOOD status,
+ * we are all done here.
+ */
+ if (status_byte(SCpnt->result) != GOOD)
+ goto done;
+
+ /*
+ * We have successfully completed a command. Make sure that
+ * we do not have any buffers left to transfer. The world
+ * is not perfect, and we seem to occasionally hit this.
+ * It can be indicative of a buggy driver, target or the upper
+ * levels of the SCSI code.
+ */
+ if (info->scsi.SCp.ptr) {
+ switch (SCpnt->cmnd[0]) {
+ case INQUIRY:
+ case START_STOP:
+ case MODE_SENSE:
+ break;
+
+ default:
+ scmd_printk(KERN_ERR, SCpnt,
+ "incomplete data transfer detected: res=%08X ptr=%p len=%X\n",
+ SCpnt->result, info->scsi.SCp.ptr,
+ info->scsi.SCp.this_residual);
+ scsi_print_command(SCpnt);
+ set_host_byte(SCpnt, DID_ERROR);
+ goto request_sense;
+ }
+ }
+
+done:
+ if (SCpnt->scsi_done) {
+ SCpnt->scsi_done(SCpnt);
+ return;
+ }
+
+ panic("scsi%d.H: null scsi_done function in fas216_done",
+ info->host->host_no);
+
+
+request_sense:
+ if (SCpnt->cmnd[0] == REQUEST_SENSE)
+ goto done;
+
+ scsi_eh_prep_cmnd(SCpnt, &info->ses, NULL, 0, ~0);
+ fas216_log_target(info, LOG_CONNECT, SCpnt->device->id,
+ "requesting sense");
+ init_SCp(SCpnt);
+ SCpnt->SCp.Message = 0;
+ SCpnt->SCp.Status = 0;
+ SCpnt->tag = 0;
+ SCpnt->host_scribble = (void *)fas216_rq_sns_done;
+
+ /*
+ * Place this command into the high priority "request
+ * sense" slot. This will be the very next command
+ * executed, unless a target connects to us.
+ */
+ if (info->reqSCpnt)
+ printk(KERN_WARNING "scsi%d.%c: losing request command\n",
+ info->host->host_no, '0' + SCpnt->device->id);
+ info->reqSCpnt = SCpnt;
+}
+
+/**
+ * fas216_done - complete processing for current command
+ * @info: interface that completed
+ * @result: driver byte of result
+ *
+ * Complete processing for current command
+ */
+static void fas216_done(FAS216_Info *info, unsigned int result)
+{
+ void (*fn)(FAS216_Info *, struct scsi_cmnd *, unsigned int);
+ struct scsi_cmnd *SCpnt;
+ unsigned long flags;
+
+ fas216_checkmagic(info);
+
+ if (!info->SCpnt)
+ goto no_command;
+
+ SCpnt = info->SCpnt;
+ info->SCpnt = NULL;
+ info->scsi.phase = PHASE_IDLE;
+
+ if (info->scsi.aborting) {
+ fas216_log(info, 0, "uncaught abort - returning DID_ABORT");
+ result = DID_ABORT;
+ info->scsi.aborting = 0;
+ }
+
+ /*
+ * Sanity check the completion - if we have zero bytes left
+ * to transfer, we should not have a valid pointer.
+ */
+ if (info->scsi.SCp.ptr && info->scsi.SCp.this_residual == 0) {
+ scmd_printk(KERN_INFO, SCpnt,
+ "zero bytes left to transfer, but buffer pointer still valid: ptr=%p len=%08x\n",
+ info->scsi.SCp.ptr, info->scsi.SCp.this_residual);
+ info->scsi.SCp.ptr = NULL;
+ scsi_print_command(SCpnt);
+ }
+
+ /*
+ * Clear down this command as completed. If we need to request
+ * the sense information, fas216_kick will re-assert the busy
+ * status.
+ */
+ info->device[SCpnt->device->id].parity_check = 0;
+ clear_bit(SCpnt->device->id * 8 +
+ (u8)(SCpnt->device->lun & 0x7), info->busyluns);
+
+ fn = (void (*)(FAS216_Info *, struct scsi_cmnd *, unsigned int))SCpnt->host_scribble;
+ fn(info, SCpnt, result);
+
+ if (info->scsi.irq) {
+ spin_lock_irqsave(&info->host_lock, flags);
+ if (info->scsi.phase == PHASE_IDLE)
+ fas216_kick(info);
+ spin_unlock_irqrestore(&info->host_lock, flags);
+ }
+ return;
+
+no_command:
+ panic("scsi%d.H: null command in fas216_done",
+ info->host->host_no);
+}
+
+/**
+ * fas216_queue_command - queue a command for adapter to process.
+ * @SCpnt: Command to queue
+ * @done: done function to call once command is complete
+ *
+ * Queue a command for adapter to process.
+ * Returns: 0 on success, else error.
+ * Notes: io_request_lock is held, interrupts are disabled.
+ */
+static int fas216_queue_command_lck(struct scsi_cmnd *SCpnt,
+ void (*done)(struct scsi_cmnd *))
+{
+ FAS216_Info *info = (FAS216_Info *)SCpnt->device->host->hostdata;
+ int result;
+
+ fas216_checkmagic(info);
+
+ fas216_log_command(info, LOG_CONNECT, SCpnt,
+ "received command (%p)", SCpnt);
+
+ SCpnt->scsi_done = done;
+ SCpnt->host_scribble = (void *)fas216_std_done;
+ SCpnt->result = 0;
+
+ init_SCp(SCpnt);
+
+ info->stats.queues += 1;
+ SCpnt->tag = 0;
+
+ spin_lock(&info->host_lock);
+
+ /*
+ * Add command into execute queue and let it complete under
+ * whatever scheme we're using.
+ */
+ result = !queue_add_cmd_ordered(&info->queues.issue, SCpnt);
+
+ /*
+ * If we successfully added the command,
+ * kick the interface to get it moving.
+ */
+ if (result == 0 && info->scsi.phase == PHASE_IDLE)
+ fas216_kick(info);
+ spin_unlock(&info->host_lock);
+
+ fas216_log_target(info, LOG_CONNECT, -1, "queue %s",
+ result ? "failure" : "success");
+
+ return result;
+}
+
+DEF_SCSI_QCMD(fas216_queue_command)
+
+/**
+ * fas216_internal_done - trigger restart of a waiting thread in fas216_noqueue_command
+ * @SCpnt: Command to wake
+ *
+ * Trigger restart of a waiting thread in fas216_command
+ */
+static void fas216_internal_done(struct scsi_cmnd *SCpnt)
+{
+ FAS216_Info *info = (FAS216_Info *)SCpnt->device->host->hostdata;
+
+ fas216_checkmagic(info);
+
+ info->internal_done = 1;
+}
+
+/**
+ * fas216_noqueue_command - process a command for the adapter.
+ * @SCpnt: Command to queue
+ *
+ * Queue a command for adapter to process.
+ * Returns: scsi result code.
+ * Notes: io_request_lock is held, interrupts are disabled.
+ */
+static int fas216_noqueue_command_lck(struct scsi_cmnd *SCpnt,
+ void (*done)(struct scsi_cmnd *))
+{
+ FAS216_Info *info = (FAS216_Info *)SCpnt->device->host->hostdata;
+
+ fas216_checkmagic(info);
+
+ /*
+ * We should only be using this if we don't have an interrupt.
+ * Provide some "incentive" to use the queueing code.
+ */
+ BUG_ON(info->scsi.irq);
+
+ info->internal_done = 0;
+ fas216_queue_command_lck(SCpnt, fas216_internal_done);
+
+ /*
+ * This wastes time, since we can't return until the command is
+ * complete. We can't sleep either since we may get re-entered!
+ * However, we must re-enable interrupts, or else we'll be
+ * waiting forever.
+ */
+ spin_unlock_irq(info->host->host_lock);
+
+ while (!info->internal_done) {
+ /*
+ * If we don't have an IRQ, then we must poll the card for
+ * it's interrupt, and use that to call this driver's
+ * interrupt routine. That way, we keep the command
+ * progressing. Maybe we can add some intelligence here
+ * and go to sleep if we know that the device is going
+ * to be some time (eg, disconnected).
+ */
+ if (fas216_readb(info, REG_STAT) & STAT_INT) {
+ spin_lock_irq(info->host->host_lock);
+ fas216_intr(info);
+ spin_unlock_irq(info->host->host_lock);
+ }
+ }
+
+ spin_lock_irq(info->host->host_lock);
+
+ done(SCpnt);
+
+ return 0;
+}
+
+DEF_SCSI_QCMD(fas216_noqueue_command)
+
+/*
+ * Error handler timeout function. Indicate that we timed out,
+ * and wake up any error handler process so it can continue.
+ */
+static void fas216_eh_timer(unsigned long data)
+{
+ FAS216_Info *info = (FAS216_Info *)data;
+
+ fas216_log(info, LOG_ERROR, "error handling timed out\n");
+
+ del_timer(&info->eh_timer);
+
+ if (info->rst_bus_status == 0)
+ info->rst_bus_status = -1;
+ if (info->rst_dev_status == 0)
+ info->rst_dev_status = -1;
+
+ wake_up(&info->eh_wait);
+}
+
+enum res_find {
+ res_failed, /* not found */
+ res_success, /* command on issue queue */
+ res_hw_abort /* command on disconnected dev */
+};
+
+/**
+ * fas216_do_abort - decide how to abort a command
+ * @SCpnt: command to abort
+ *
+ * Decide how to abort a command.
+ * Returns: abort status
+ */
+static enum res_find fas216_find_command(FAS216_Info *info,
+ struct scsi_cmnd *SCpnt)
+{
+ enum res_find res = res_failed;
+
+ if (queue_remove_cmd(&info->queues.issue, SCpnt)) {
+ /*
+ * The command was on the issue queue, and has not been
+ * issued yet. We can remove the command from the queue,
+ * and acknowledge the abort. Neither the device nor the
+ * interface know about the command.
+ */
+ printk("on issue queue ");
+
+ res = res_success;
+ } else if (queue_remove_cmd(&info->queues.disconnected, SCpnt)) {
+ /*
+ * The command was on the disconnected queue. We must
+ * reconnect with the device if possible, and send it
+ * an abort message.
+ */
+ printk("on disconnected queue ");
+
+ res = res_hw_abort;
+ } else if (info->SCpnt == SCpnt) {
+ printk("executing ");
+
+ switch (info->scsi.phase) {
+ /*
+ * If the interface is idle, and the command is 'disconnectable',
+ * then it is the same as on the disconnected queue.
+ */
+ case PHASE_IDLE:
+ if (info->scsi.disconnectable) {
+ info->scsi.disconnectable = 0;
+ info->SCpnt = NULL;
+ res = res_hw_abort;
+ }
+ break;
+
+ default:
+ break;
+ }
+ } else if (info->origSCpnt == SCpnt) {
+ /*
+ * The command will be executed next, but a command
+ * is currently using the interface. This is similar to
+ * being on the issue queue, except the busylun bit has
+ * been set.
+ */
+ info->origSCpnt = NULL;
+ clear_bit(SCpnt->device->id * 8 +
+ (u8)(SCpnt->device->lun & 0x7), info->busyluns);
+ printk("waiting for execution ");
+ res = res_success;
+ } else
+ printk("unknown ");
+
+ return res;
+}
+
+/**
+ * fas216_eh_abort - abort this command
+ * @SCpnt: command to abort
+ *
+ * Abort this command.
+ * Returns: FAILED if unable to abort
+ * Notes: io_request_lock is taken, and irqs are disabled
+ */
+int fas216_eh_abort(struct scsi_cmnd *SCpnt)
+{
+ FAS216_Info *info = (FAS216_Info *)SCpnt->device->host->hostdata;
+ int result = FAILED;
+
+ fas216_checkmagic(info);
+
+ info->stats.aborts += 1;
+
+ scmd_printk(KERN_WARNING, SCpnt, "abort command\n");
+
+ print_debug_list();
+ fas216_dumpstate(info);
+
+ switch (fas216_find_command(info, SCpnt)) {
+ /*
+ * We found the command, and cleared it out. Either
+ * the command is still known to be executing on the
+ * target, or the busylun bit is not set.
+ */
+ case res_success:
+ scmd_printk(KERN_WARNING, SCpnt, "abort %p success\n", SCpnt);
+ result = SUCCESS;
+ break;
+
+ /*
+ * We need to reconnect to the target and send it an
+ * ABORT or ABORT_TAG message. We can only do this
+ * if the bus is free.
+ */
+ case res_hw_abort:
+
+ /*
+ * We are unable to abort the command for some reason.
+ */
+ default:
+ case res_failed:
+ scmd_printk(KERN_WARNING, SCpnt, "abort %p failed\n", SCpnt);
+ break;
+ }
+
+ return result;
+}
+
+/**
+ * fas216_eh_device_reset - Reset the device associated with this command
+ * @SCpnt: command specifing device to reset
+ *
+ * Reset the device associated with this command.
+ * Returns: FAILED if unable to reset.
+ * Notes: We won't be re-entered, so we'll only have one device
+ * reset on the go at one time.
+ */
+int fas216_eh_device_reset(struct scsi_cmnd *SCpnt)
+{
+ FAS216_Info *info = (FAS216_Info *)SCpnt->device->host->hostdata;
+ unsigned long flags;
+ int i, res = FAILED, target = SCpnt->device->id;
+
+ fas216_log(info, LOG_ERROR, "device reset for target %d", target);
+
+ spin_lock_irqsave(&info->host_lock, flags);
+
+ do {
+ /*
+ * If we are currently connected to a device, and
+ * it is the device we want to reset, there is
+ * nothing we can do here. Chances are it is stuck,
+ * and we need a bus reset.
+ */
+ if (info->SCpnt && !info->scsi.disconnectable &&
+ info->SCpnt->device->id == SCpnt->device->id)
+ break;
+
+ /*
+ * We're going to be resetting this device. Remove
+ * all pending commands from the driver. By doing
+ * so, we guarantee that we won't touch the command
+ * structures except to process the reset request.
+ */
+ queue_remove_all_target(&info->queues.issue, target);
+ queue_remove_all_target(&info->queues.disconnected, target);
+ if (info->origSCpnt && info->origSCpnt->device->id == target)
+ info->origSCpnt = NULL;
+ if (info->reqSCpnt && info->reqSCpnt->device->id == target)
+ info->reqSCpnt = NULL;
+ for (i = 0; i < 8; i++)
+ clear_bit(target * 8 + i, info->busyluns);
+
+ /*
+ * Hijack this SCSI command structure to send
+ * a bus device reset message to this device.
+ */
+ SCpnt->host_scribble = (void *)fas216_devicereset_done;
+
+ info->rst_dev_status = 0;
+ info->rstSCpnt = SCpnt;
+
+ if (info->scsi.phase == PHASE_IDLE)
+ fas216_kick(info);
+
+ mod_timer(&info->eh_timer, jiffies + 30 * HZ);
+ spin_unlock_irqrestore(&info->host_lock, flags);
+
+ /*
+ * Wait up to 30 seconds for the reset to complete.
+ */
+ wait_event(info->eh_wait, info->rst_dev_status);
+
+ del_timer_sync(&info->eh_timer);
+ spin_lock_irqsave(&info->host_lock, flags);
+ info->rstSCpnt = NULL;
+
+ if (info->rst_dev_status == 1)
+ res = SUCCESS;
+ } while (0);
+
+ SCpnt->host_scribble = NULL;
+ spin_unlock_irqrestore(&info->host_lock, flags);
+
+ fas216_log(info, LOG_ERROR, "device reset complete: %s\n",
+ res == SUCCESS ? "success" : "failed");
+
+ return res;
+}
+
+/**
+ * fas216_eh_bus_reset - Reset the bus associated with the command
+ * @SCpnt: command specifing bus to reset
+ *
+ * Reset the bus associated with the command.
+ * Returns: FAILED if unable to reset.
+ * Notes: Further commands are blocked.
+ */
+int fas216_eh_bus_reset(struct scsi_cmnd *SCpnt)
+{
+ FAS216_Info *info = (FAS216_Info *)SCpnt->device->host->hostdata;
+ unsigned long flags;
+ struct scsi_device *SDpnt;
+
+ fas216_checkmagic(info);
+ fas216_log(info, LOG_ERROR, "resetting bus");
+
+ info->stats.bus_resets += 1;
+
+ spin_lock_irqsave(&info->host_lock, flags);
+
+ /*
+ * Stop all activity on this interface.
+ */
+ fas216_aborttransfer(info);
+ fas216_writeb(info, REG_CNTL3, info->scsi.cfg[2]);
+
+ /*
+ * Clear any pending interrupts.
+ */
+ while (fas216_readb(info, REG_STAT) & STAT_INT)
+ fas216_readb(info, REG_INST);
+
+ info->rst_bus_status = 0;
+
+ /*
+ * For each attached hard-reset device, clear out
+ * all command structures. Leave the running
+ * command in place.
+ */
+ shost_for_each_device(SDpnt, info->host) {
+ int i;
+
+ if (SDpnt->soft_reset)
+ continue;
+
+ queue_remove_all_target(&info->queues.issue, SDpnt->id);
+ queue_remove_all_target(&info->queues.disconnected, SDpnt->id);
+ if (info->origSCpnt && info->origSCpnt->device->id == SDpnt->id)
+ info->origSCpnt = NULL;
+ if (info->reqSCpnt && info->reqSCpnt->device->id == SDpnt->id)
+ info->reqSCpnt = NULL;
+ info->SCpnt = NULL;
+
+ for (i = 0; i < 8; i++)
+ clear_bit(SDpnt->id * 8 + i, info->busyluns);
+ }
+
+ info->scsi.phase = PHASE_IDLE;
+
+ /*
+ * Reset the SCSI bus. Device cleanup happens in
+ * the interrupt handler.
+ */
+ fas216_cmd(info, CMD_RESETSCSI);
+
+ mod_timer(&info->eh_timer, jiffies + HZ);
+ spin_unlock_irqrestore(&info->host_lock, flags);
+
+ /*
+ * Wait one second for the interrupt.
+ */
+ wait_event(info->eh_wait, info->rst_bus_status);
+ del_timer_sync(&info->eh_timer);
+
+ fas216_log(info, LOG_ERROR, "bus reset complete: %s\n",
+ info->rst_bus_status == 1 ? "success" : "failed");
+
+ return info->rst_bus_status == 1 ? SUCCESS : FAILED;
+}
+
+/**
+ * fas216_init_chip - Initialise FAS216 state after reset
+ * @info: state structure for interface
+ *
+ * Initialise FAS216 state after reset
+ */
+static void fas216_init_chip(FAS216_Info *info)
+{
+ unsigned int clock = ((info->ifcfg.clockrate - 1) / 5 + 1) & 7;
+ fas216_writeb(info, REG_CLKF, clock);
+ fas216_writeb(info, REG_CNTL1, info->scsi.cfg[0]);
+ fas216_writeb(info, REG_CNTL2, info->scsi.cfg[1]);
+ fas216_writeb(info, REG_CNTL3, info->scsi.cfg[2]);
+ fas216_writeb(info, REG_STIM, info->ifcfg.select_timeout);
+ fas216_writeb(info, REG_SOF, 0);
+ fas216_writeb(info, REG_STP, info->scsi.async_stp);
+ fas216_writeb(info, REG_CNTL1, info->scsi.cfg[0]);
+}
+
+/**
+ * fas216_eh_host_reset - Reset the host associated with this command
+ * @SCpnt: command specifing host to reset
+ *
+ * Reset the host associated with this command.
+ * Returns: FAILED if unable to reset.
+ * Notes: io_request_lock is taken, and irqs are disabled
+ */
+int fas216_eh_host_reset(struct scsi_cmnd *SCpnt)
+{
+ FAS216_Info *info = (FAS216_Info *)SCpnt->device->host->hostdata;
+
+ spin_lock_irq(info->host->host_lock);
+
+ fas216_checkmagic(info);
+
+ fas216_log(info, LOG_ERROR, "resetting host");
+
+ /*
+ * Reset the SCSI chip.
+ */
+ fas216_cmd(info, CMD_RESETCHIP);
+
+ /*
+ * Ugly ugly ugly!
+ * We need to release the host_lock and enable
+ * IRQs if we sleep, but we must relock and disable
+ * IRQs after the sleep.
+ */
+ spin_unlock_irq(info->host->host_lock);
+ msleep(50 * 1000/100);
+ spin_lock_irq(info->host->host_lock);
+
+ /*
+ * Release the SCSI reset.
+ */
+ fas216_cmd(info, CMD_NOP);
+
+ fas216_init_chip(info);
+
+ spin_unlock_irq(info->host->host_lock);
+ return SUCCESS;
+}
+
+#define TYPE_UNKNOWN 0
+#define TYPE_NCR53C90 1
+#define TYPE_NCR53C90A 2
+#define TYPE_NCR53C9x 3
+#define TYPE_Am53CF94 4
+#define TYPE_EmFAS216 5
+#define TYPE_QLFAS216 6
+
+static char *chip_types[] = {
+ "unknown",
+ "NS NCR53C90",
+ "NS NCR53C90A",
+ "NS NCR53C9x",
+ "AMD Am53CF94",
+ "Emulex FAS216",
+ "QLogic FAS216"
+};
+
+static int fas216_detect_type(FAS216_Info *info)
+{
+ int family, rev;
+
+ /*
+ * Reset the chip.
+ */
+ fas216_writeb(info, REG_CMD, CMD_RESETCHIP);
+ udelay(50);
+ fas216_writeb(info, REG_CMD, CMD_NOP);
+
+ /*
+ * Check to see if control reg 2 is present.
+ */
+ fas216_writeb(info, REG_CNTL3, 0);
+ fas216_writeb(info, REG_CNTL2, CNTL2_S2FE);
+
+ /*
+ * If we are unable to read back control reg 2
+ * correctly, it is not present, and we have a
+ * NCR53C90.
+ */
+ if ((fas216_readb(info, REG_CNTL2) & (~0xe0)) != CNTL2_S2FE)
+ return TYPE_NCR53C90;
+
+ /*
+ * Now, check control register 3
+ */
+ fas216_writeb(info, REG_CNTL2, 0);
+ fas216_writeb(info, REG_CNTL3, 0);
+ fas216_writeb(info, REG_CNTL3, 5);
+
+ /*
+ * If we are unable to read the register back
+ * correctly, we have a NCR53C90A
+ */
+ if (fas216_readb(info, REG_CNTL3) != 5)
+ return TYPE_NCR53C90A;
+
+ /*
+ * Now read the ID from the chip.
+ */
+ fas216_writeb(info, REG_CNTL3, 0);
+
+ fas216_writeb(info, REG_CNTL3, CNTL3_ADIDCHK);
+ fas216_writeb(info, REG_CNTL3, 0);
+
+ fas216_writeb(info, REG_CMD, CMD_RESETCHIP);
+ udelay(50);
+ fas216_writeb(info, REG_CMD, CMD_WITHDMA | CMD_NOP);
+
+ fas216_writeb(info, REG_CNTL2, CNTL2_ENF);
+ fas216_writeb(info, REG_CMD, CMD_RESETCHIP);
+ udelay(50);
+ fas216_writeb(info, REG_CMD, CMD_NOP);
+
+ rev = fas216_readb(info, REG_ID);
+ family = rev >> 3;
+ rev &= 7;
+
+ switch (family) {
+ case 0x01:
+ if (rev == 4)
+ return TYPE_Am53CF94;
+ break;
+
+ case 0x02:
+ switch (rev) {
+ case 2:
+ return TYPE_EmFAS216;
+ case 3:
+ return TYPE_QLFAS216;
+ }
+ break;
+
+ default:
+ break;
+ }
+ printk("family %x rev %x\n", family, rev);
+ return TYPE_NCR53C9x;
+}
+
+/**
+ * fas216_reset_state - Initialise driver internal state
+ * @info: state to initialise
+ *
+ * Initialise driver internal state
+ */
+static void fas216_reset_state(FAS216_Info *info)
+{
+ int i;
+
+ fas216_checkmagic(info);
+
+ fas216_bus_reset(info);
+
+ /*
+ * Clear out all stale info in our state structure
+ */
+ memset(info->busyluns, 0, sizeof(info->busyluns));
+ info->scsi.disconnectable = 0;
+ info->scsi.aborting = 0;
+
+ for (i = 0; i < 8; i++) {
+ info->device[i].parity_enabled = 0;
+ info->device[i].parity_check = 1;
+ }
+
+ /*
+ * Drain all commands on disconnected queue
+ */
+ while (queue_remove(&info->queues.disconnected) != NULL);
+
+ /*
+ * Remove executing commands.
+ */
+ info->SCpnt = NULL;
+ info->reqSCpnt = NULL;
+ info->rstSCpnt = NULL;
+ info->origSCpnt = NULL;
+}
+
+/**
+ * fas216_init - initialise FAS/NCR/AMD SCSI structures.
+ * @host: a driver-specific filled-out structure
+ *
+ * Initialise FAS/NCR/AMD SCSI structures.
+ * Returns: 0 on success
+ */
+int fas216_init(struct Scsi_Host *host)
+{
+ FAS216_Info *info = (FAS216_Info *)host->hostdata;
+
+ info->magic_start = MAGIC;
+ info->magic_end = MAGIC;
+ info->host = host;
+ info->scsi.cfg[0] = host->this_id | CNTL1_PERE;
+ info->scsi.cfg[1] = CNTL2_ENF | CNTL2_S2FE;
+ info->scsi.cfg[2] = info->ifcfg.cntl3 |
+ CNTL3_ADIDCHK | CNTL3_QTAG | CNTL3_G2CB | CNTL3_LBTM;
+ info->scsi.async_stp = fas216_syncperiod(info, info->ifcfg.asyncperiod);
+
+ info->rst_dev_status = -1;
+ info->rst_bus_status = -1;
+ init_waitqueue_head(&info->eh_wait);
+ init_timer(&info->eh_timer);
+ info->eh_timer.data = (unsigned long)info;
+ info->eh_timer.function = fas216_eh_timer;
+
+ spin_lock_init(&info->host_lock);
+
+ memset(&info->stats, 0, sizeof(info->stats));
+
+ msgqueue_initialise(&info->scsi.msgs);
+
+ if (!queue_initialise(&info->queues.issue))
+ return -ENOMEM;
+
+ if (!queue_initialise(&info->queues.disconnected)) {
+ queue_free(&info->queues.issue);
+ return -ENOMEM;
+ }
+
+ return 0;
+}
+
+/**
+ * fas216_add - initialise FAS/NCR/AMD SCSI ic.
+ * @host: a driver-specific filled-out structure
+ * @dev: parent device
+ *
+ * Initialise FAS/NCR/AMD SCSI ic.
+ * Returns: 0 on success
+ */
+int fas216_add(struct Scsi_Host *host, struct device *dev)
+{
+ FAS216_Info *info = (FAS216_Info *)host->hostdata;
+ int type, ret;
+
+ if (info->ifcfg.clockrate <= 10 || info->ifcfg.clockrate > 40) {
+ printk(KERN_CRIT "fas216: invalid clock rate %u MHz\n",
+ info->ifcfg.clockrate);
+ return -EINVAL;
+ }
+
+ fas216_reset_state(info);
+ type = fas216_detect_type(info);
+ info->scsi.type = chip_types[type];
+
+ udelay(300);
+
+ /*
+ * Initialise the chip correctly.
+ */
+ fas216_init_chip(info);
+
+ /*
+ * Reset the SCSI bus. We don't want to see
+ * the resulting reset interrupt, so mask it
+ * out.
+ */
+ fas216_writeb(info, REG_CNTL1, info->scsi.cfg[0] | CNTL1_DISR);
+ fas216_writeb(info, REG_CMD, CMD_RESETSCSI);
+
+ /*
+ * scsi standard says wait 250ms
+ */
+ spin_unlock_irq(info->host->host_lock);
+ msleep(100*1000/100);
+ spin_lock_irq(info->host->host_lock);
+
+ fas216_writeb(info, REG_CNTL1, info->scsi.cfg[0]);
+ fas216_readb(info, REG_INST);
+
+ fas216_checkmagic(info);
+
+ ret = scsi_add_host(host, dev);
+ if (ret)
+ fas216_writeb(info, REG_CMD, CMD_RESETCHIP);
+ else
+ scsi_scan_host(host);
+
+ return ret;
+}
+
+void fas216_remove(struct Scsi_Host *host)
+{
+ FAS216_Info *info = (FAS216_Info *)host->hostdata;
+
+ fas216_checkmagic(info);
+ scsi_remove_host(host);
+
+ fas216_writeb(info, REG_CMD, CMD_RESETCHIP);
+ scsi_host_put(host);
+}
+
+/**
+ * fas216_release - release all resources for FAS/NCR/AMD SCSI ic.
+ * @host: a driver-specific filled-out structure
+ *
+ * release all resources and put everything to bed for FAS/NCR/AMD SCSI ic.
+ */
+void fas216_release(struct Scsi_Host *host)
+{
+ FAS216_Info *info = (FAS216_Info *)host->hostdata;
+
+ queue_free(&info->queues.disconnected);
+ queue_free(&info->queues.issue);
+}
+
+void fas216_print_host(FAS216_Info *info, struct seq_file *m)
+{
+ seq_printf(m,
+ "\n"
+ "Chip : %s\n"
+ " Address: 0x%p\n"
+ " IRQ : %d\n"
+ " DMA : %d\n",
+ info->scsi.type, info->scsi.io_base,
+ info->scsi.irq, info->scsi.dma);
+}
+
+void fas216_print_stats(FAS216_Info *info, struct seq_file *m)
+{
+ seq_printf(m, "\n"
+ "Command Statistics:\n"
+ " Queued : %u\n"
+ " Issued : %u\n"
+ " Completed : %u\n"
+ " Reads : %u\n"
+ " Writes : %u\n"
+ " Others : %u\n"
+ " Disconnects: %u\n"
+ " Aborts : %u\n"
+ " Bus resets : %u\n"
+ " Host resets: %u\n",
+ info->stats.queues, info->stats.removes,
+ info->stats.fins, info->stats.reads,
+ info->stats.writes, info->stats.miscs,
+ info->stats.disconnects, info->stats.aborts,
+ info->stats.bus_resets, info->stats.host_resets);
+}
+
+void fas216_print_devices(FAS216_Info *info, struct seq_file *m)
+{
+ struct fas216_device *dev;
+ struct scsi_device *scd;
+
+ seq_puts(m, "Device/Lun TaggedQ Parity Sync\n");
+
+ shost_for_each_device(scd, info->host) {
+ dev = &info->device[scd->id];
+ seq_printf(m, " %d/%llu ", scd->id, scd->lun);
+ if (scd->tagged_supported)
+ seq_printf(m, "%3sabled(%3d) ",
+ scd->simple_tags ? "en" : "dis",
+ scd->current_tag);
+ else
+ seq_puts(m, "unsupported ");
+
+ seq_printf(m, "%3sabled ", dev->parity_enabled ? "en" : "dis");
+
+ if (dev->sof)
+ seq_printf(m, "offset %d, %d ns\n",
+ dev->sof, dev->period * 4);
+ else
+ seq_puts(m, "async\n");
+ }
+}
+
+EXPORT_SYMBOL(fas216_init);
+EXPORT_SYMBOL(fas216_add);
+EXPORT_SYMBOL(fas216_queue_command);
+EXPORT_SYMBOL(fas216_noqueue_command);
+EXPORT_SYMBOL(fas216_intr);
+EXPORT_SYMBOL(fas216_remove);
+EXPORT_SYMBOL(fas216_release);
+EXPORT_SYMBOL(fas216_eh_abort);
+EXPORT_SYMBOL(fas216_eh_device_reset);
+EXPORT_SYMBOL(fas216_eh_bus_reset);
+EXPORT_SYMBOL(fas216_eh_host_reset);
+EXPORT_SYMBOL(fas216_print_host);
+EXPORT_SYMBOL(fas216_print_stats);
+EXPORT_SYMBOL(fas216_print_devices);
+
+MODULE_AUTHOR("Russell King");
+MODULE_DESCRIPTION("Generic FAS216/NCR53C9x driver core");
+MODULE_LICENSE("GPL");
diff --git a/drivers/scsi/arm/fas216.h b/drivers/scsi/arm/fas216.h
new file mode 100644
index 000000000..c57c16ef8
--- /dev/null
+++ b/drivers/scsi/arm/fas216.h
@@ -0,0 +1,393 @@
+/*
+ * linux/drivers/acorn/scsi/fas216.h
+ *
+ * Copyright (C) 1997-2000 Russell King
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * FAS216 generic driver
+ */
+#ifndef FAS216_H
+#define FAS216_H
+
+#include <scsi/scsi_eh.h>
+
+#include "queue.h"
+#include "msgqueue.h"
+
+/* FAS register definitions */
+
+/* transfer count low */
+#define REG_CTCL (0)
+#define REG_STCL (0)
+
+/* transfer count medium */
+#define REG_CTCM (1)
+#define REG_STCM (1)
+
+/* fifo data */
+#define REG_FF (2)
+
+/* command */
+#define REG_CMD (3)
+#define CMD_NOP 0x00
+#define CMD_FLUSHFIFO 0x01
+#define CMD_RESETCHIP 0x02
+#define CMD_RESETSCSI 0x03
+
+#define CMD_TRANSFERINFO 0x10
+#define CMD_INITCMDCOMPLETE 0x11
+#define CMD_MSGACCEPTED 0x12
+#define CMD_PADBYTES 0x18
+#define CMD_SETATN 0x1a
+#define CMD_RSETATN 0x1b
+
+#define CMD_SELECTWOATN 0x41
+#define CMD_SELECTATN 0x42
+#define CMD_SELECTATNSTOP 0x43
+#define CMD_ENABLESEL 0x44
+#define CMD_DISABLESEL 0x45
+#define CMD_SELECTATN3 0x46
+#define CMD_RESEL3 0x47
+
+#define CMD_WITHDMA 0x80
+
+/* status register (read) */
+#define REG_STAT (4)
+#define STAT_IO (1 << 0) /* IO phase */
+#define STAT_CD (1 << 1) /* CD phase */
+#define STAT_MSG (1 << 2) /* MSG phase */
+#define STAT_TRANSFERDONE (1 << 3) /* Transfer completed */
+#define STAT_TRANSFERCNTZ (1 << 4) /* Transfer counter is zero */
+#define STAT_PARITYERROR (1 << 5) /* Parity error */
+#define STAT_REALBAD (1 << 6) /* Something bad */
+#define STAT_INT (1 << 7) /* Interrupt */
+
+#define STAT_BUSMASK (STAT_MSG|STAT_CD|STAT_IO)
+#define STAT_DATAOUT (0) /* Data out */
+#define STAT_DATAIN (STAT_IO) /* Data in */
+#define STAT_COMMAND (STAT_CD) /* Command out */
+#define STAT_STATUS (STAT_CD|STAT_IO) /* Status In */
+#define STAT_MESGOUT (STAT_MSG|STAT_CD) /* Message out */
+#define STAT_MESGIN (STAT_MSG|STAT_CD|STAT_IO) /* Message In */
+
+/* bus ID for select / reselect */
+#define REG_SDID (4)
+#define BUSID(target) ((target) & 7)
+
+/* Interrupt status register (read) */
+#define REG_INST (5)
+#define INST_SELWOATN (1 << 0) /* Select w/o ATN */
+#define INST_SELATN (1 << 1) /* Select w/ATN */
+#define INST_RESELECTED (1 << 2) /* Reselected */
+#define INST_FUNCDONE (1 << 3) /* Function done */
+#define INST_BUSSERVICE (1 << 4) /* Bus service */
+#define INST_DISCONNECT (1 << 5) /* Disconnect */
+#define INST_ILLEGALCMD (1 << 6) /* Illegal command */
+#define INST_BUSRESET (1 << 7) /* SCSI Bus reset */
+
+/* Timeout register (write) */
+#define REG_STIM (5)
+
+/* Sequence step register (read) */
+#define REG_IS (6)
+#define IS_BITS 0x07
+#define IS_SELARB 0x00 /* Select & Arb ok */
+#define IS_MSGBYTESENT 0x01 /* One byte message sent*/
+#define IS_NOTCOMMAND 0x02 /* Not in command state */
+#define IS_EARLYPHASE 0x03 /* Early phase change */
+#define IS_COMPLETE 0x04 /* Command ok */
+#define IS_SOF 0x08 /* Sync off flag */
+
+/* Transfer period step (write) */
+#define REG_STP (6)
+
+/* Synchronous Offset (write) */
+#define REG_SOF (7)
+
+/* Fifo state register (read) */
+#define REG_CFIS (7)
+#define CFIS_CF 0x1f /* Num bytes in FIFO */
+#define CFIS_IS 0xe0 /* Step */
+
+/* config register 1 */
+#define REG_CNTL1 (8)
+#define CNTL1_CID (7 << 0) /* Chip ID */
+#define CNTL1_STE (1 << 3) /* Self test enable */
+#define CNTL1_PERE (1 << 4) /* Parity enable reporting en. */
+#define CNTL1_PTE (1 << 5) /* Parity test enable */
+#define CNTL1_DISR (1 << 6) /* Disable Irq on SCSI reset */
+#define CNTL1_ETM (1 << 7) /* Extended Timing Mode */
+
+/* Clock conversion factor (read) */
+#define REG_CLKF (9)
+#define CLKF_F37MHZ 0x00 /* 35.01 - 40 MHz */
+#define CLKF_F10MHZ 0x02 /* 10 MHz */
+#define CLKF_F12MHZ 0x03 /* 10.01 - 15 MHz */
+#define CLKF_F17MHZ 0x04 /* 15.01 - 20 MHz */
+#define CLKF_F22MHZ 0x05 /* 20.01 - 25 MHz */
+#define CLKF_F27MHZ 0x06 /* 25.01 - 30 MHz */
+#define CLKF_F32MHZ 0x07 /* 30.01 - 35 MHz */
+
+/* Chip test register (write) */
+#define REG_FTM (10)
+#define TEST_FTM 0x01 /* Force target mode */
+#define TEST_FIM 0x02 /* Force initiator mode */
+#define TEST_FHI 0x04 /* Force high impedance mode */
+
+/* Configuration register 2 (read/write) */
+#define REG_CNTL2 (11)
+#define CNTL2_PGDP (1 << 0) /* Pass Th/Generate Data Parity */
+#define CNTL2_PGRP (1 << 1) /* Pass Th/Generate Reg Parity */
+#define CNTL2_ACDPE (1 << 2) /* Abort on Cmd/Data Parity Err */
+#define CNTL2_S2FE (1 << 3) /* SCSI2 Features Enable */
+#define CNTL2_TSDR (1 << 4) /* Tristate DREQ */
+#define CNTL2_SBO (1 << 5) /* Select Byte Order */
+#define CNTL2_ENF (1 << 6) /* Enable features */
+#define CNTL2_DAE (1 << 7) /* Data Alignment Enable */
+
+/* Configuration register 3 (read/write) */
+#define REG_CNTL3 (12)
+#define CNTL3_BS8 (1 << 0) /* Burst size 8 */
+#define CNTL3_MDM (1 << 1) /* Modify DMA mode */
+#define CNTL3_LBTM (1 << 2) /* Last Byte Transfer mode */
+#define CNTL3_FASTCLK (1 << 3) /* Fast SCSI clocking */
+#define CNTL3_FASTSCSI (1 << 4) /* Fast SCSI */
+#define CNTL3_G2CB (1 << 5) /* Group2 SCSI support */
+#define CNTL3_QTAG (1 << 6) /* Enable 3 byte msgs */
+#define CNTL3_ADIDCHK (1 << 7) /* Additional ID check */
+
+/* High transfer count (read/write) */
+#define REG_CTCH (14)
+#define REG_STCH (14)
+
+/* ID register (read only) */
+#define REG_ID (14)
+
+/* Data alignment */
+#define REG_DAL (15)
+
+typedef enum {
+ PHASE_IDLE, /* we're not planning on doing anything */
+ PHASE_SELECTION, /* selecting a device */
+ PHASE_SELSTEPS, /* selection with command steps */
+ PHASE_COMMAND, /* command sent */
+ PHASE_MESSAGESENT, /* selected, and we're sending cmd */
+ PHASE_DATAOUT, /* data out to device */
+ PHASE_DATAIN, /* data in from device */
+ PHASE_MSGIN, /* message in from device */
+ PHASE_MSGIN_DISCONNECT, /* disconnecting from bus */
+ PHASE_MSGOUT, /* after message out phase */
+ PHASE_MSGOUT_EXPECT, /* expecting message out */
+ PHASE_STATUS, /* status from device */
+ PHASE_DONE /* Command complete */
+} phase_t;
+
+typedef enum {
+ DMA_OUT, /* DMA from memory to chip */
+ DMA_IN /* DMA from chip to memory */
+} fasdmadir_t;
+
+typedef enum {
+ fasdma_none, /* No dma */
+ fasdma_pio, /* PIO mode */
+ fasdma_pseudo, /* Pseudo DMA */
+ fasdma_real_block, /* Real DMA, on block by block basis */
+ fasdma_real_all /* Real DMA, on request by request */
+} fasdmatype_t;
+
+typedef enum {
+ neg_wait, /* Negotiate with device */
+ neg_inprogress, /* Negotiation sent */
+ neg_complete, /* Negotiation complete */
+ neg_targcomplete, /* Target completed negotiation */
+ neg_invalid /* Negotiation not supported */
+} neg_t;
+
+#define MAGIC 0x441296bdUL
+#define NR_MSGS 8
+
+#define FASCAP_DMA (1 << 0)
+#define FASCAP_PSEUDODMA (1 << 1)
+
+typedef struct {
+ unsigned long magic_start;
+ spinlock_t host_lock;
+ struct Scsi_Host *host; /* host */
+ struct scsi_cmnd *SCpnt; /* currently processing command */
+ struct scsi_cmnd *origSCpnt; /* original connecting command */
+ struct scsi_cmnd *reqSCpnt; /* request sense command */
+ struct scsi_cmnd *rstSCpnt; /* reset command */
+ struct scsi_cmnd *pending_SCpnt[8]; /* per-device pending commands */
+ int next_pending; /* next pending device */
+
+ /*
+ * Error recovery
+ */
+ wait_queue_head_t eh_wait;
+ struct timer_list eh_timer;
+ unsigned int rst_dev_status;
+ unsigned int rst_bus_status;
+
+ /* driver information */
+ struct {
+ phase_t phase; /* current phase */
+ void __iomem *io_base; /* iomem base of FAS216 */
+ unsigned int io_shift; /* shift to adjust reg offsets by */
+ unsigned char cfg[4]; /* configuration registers */
+ const char *type; /* chip type */
+ unsigned int irq; /* interrupt */
+ int dma; /* dma channel */
+
+ struct scsi_pointer SCp; /* current commands data pointer */
+
+ MsgQueue_t msgs; /* message queue for connected device */
+
+ unsigned int async_stp; /* Async transfer STP value */
+ unsigned char msgin_fifo; /* bytes in fifo at time of message in */
+ unsigned char message[256]; /* last message received from device */
+
+ unsigned char disconnectable:1; /* this command can be disconnected */
+ unsigned char aborting:1; /* aborting command */
+ } scsi;
+
+ /* statistics information */
+ struct {
+ unsigned int queues;
+ unsigned int removes;
+ unsigned int fins;
+ unsigned int reads;
+ unsigned int writes;
+ unsigned int miscs;
+ unsigned int disconnects;
+ unsigned int aborts;
+ unsigned int bus_resets;
+ unsigned int host_resets;
+ } stats;
+
+ /* configuration information */
+ struct {
+ unsigned char clockrate; /* clock rate of FAS device (MHz) */
+ unsigned char select_timeout; /* timeout (R5) */
+ unsigned char sync_max_depth; /* Synchronous xfer max fifo depth */
+ unsigned char wide_max_size; /* Maximum wide transfer size */
+ unsigned char cntl3; /* Control Reg 3 */
+ unsigned int asyncperiod; /* Async transfer period (ns) */
+ unsigned int capabilities; /* driver capabilities */
+ unsigned int disconnect_ok:1; /* Disconnects allowed? */
+ } ifcfg;
+
+ /* queue handling */
+ struct {
+ Queue_t issue; /* issue queue */
+ Queue_t disconnected; /* disconnected command queue */
+ } queues;
+
+ /* per-device info */
+ struct fas216_device {
+ unsigned char disconnect_ok:1; /* device can disconnect */
+ unsigned char parity_enabled:1; /* parity checking enabled */
+ unsigned char parity_check:1; /* need to check parity checking */
+ unsigned char period; /* sync xfer period in (*4ns) */
+ unsigned char stp; /* synchronous transfer period */
+ unsigned char sof; /* synchronous offset register */
+ unsigned char wide_xfer; /* currently negociated wide transfer */
+ neg_t sync_state; /* synchronous transfer mode */
+ neg_t wide_state; /* wide transfer mode */
+ } device[8];
+ unsigned long busyluns[64/sizeof(unsigned long)];/* array of bits indicating LUNs busy */
+
+ /* dma */
+ struct {
+ fasdmatype_t transfer_type; /* current type of DMA transfer */
+ fasdmatype_t (*setup) (struct Scsi_Host *host, struct scsi_pointer *SCp, fasdmadir_t direction, fasdmatype_t min_dma);
+ void (*pseudo)(struct Scsi_Host *host, struct scsi_pointer *SCp, fasdmadir_t direction, int transfer);
+ void (*stop) (struct Scsi_Host *host, struct scsi_pointer *SCp);
+ } dma;
+
+ /* miscellaneous */
+ int internal_done; /* flag to indicate request done */
+ struct scsi_eh_save ses; /* holds request sense restore info */
+ unsigned long magic_end;
+} FAS216_Info;
+
+/* Function: int fas216_init (struct Scsi_Host *instance)
+ * Purpose : initialise FAS/NCR/AMD SCSI structures.
+ * Params : instance - a driver-specific filled-out structure
+ * Returns : 0 on success
+ */
+extern int fas216_init (struct Scsi_Host *instance);
+
+/* Function: int fas216_add (struct Scsi_Host *instance, struct device *dev)
+ * Purpose : initialise FAS/NCR/AMD SCSI ic.
+ * Params : instance - a driver-specific filled-out structure
+ * Returns : 0 on success
+ */
+extern int fas216_add (struct Scsi_Host *instance, struct device *dev);
+
+/* Function: int fas216_queue_command(struct Scsi_Host *h, struct scsi_cmnd *SCpnt)
+ * Purpose : queue a command for adapter to process.
+ * Params : h - host adapter
+ * : SCpnt - Command to queue
+ * Returns : 0 - success, else error
+ */
+extern int fas216_queue_command(struct Scsi_Host *h, struct scsi_cmnd *SCpnt);
+
+/* Function: int fas216_noqueue_command(struct Scsi_Host *h, struct scsi_cmnd *SCpnt)
+ * Purpose : queue a command for adapter to process, and process it to completion.
+ * Params : h - host adapter
+ * : SCpnt - Command to queue
+ * Returns : 0 - success, else error
+ */
+extern int fas216_noqueue_command(struct Scsi_Host *, struct scsi_cmnd *);
+
+/* Function: irqreturn_t fas216_intr (FAS216_Info *info)
+ * Purpose : handle interrupts from the interface to progress a command
+ * Params : info - interface to service
+ */
+extern irqreturn_t fas216_intr (FAS216_Info *info);
+
+extern void fas216_remove (struct Scsi_Host *instance);
+
+/* Function: void fas216_release (struct Scsi_Host *instance)
+ * Purpose : release all resources and put everything to bed for FAS/NCR/AMD SCSI ic.
+ * Params : instance - a driver-specific filled-out structure
+ * Returns : 0 on success
+ */
+extern void fas216_release (struct Scsi_Host *instance);
+
+extern void fas216_print_host(FAS216_Info *info, struct seq_file *m);
+extern void fas216_print_stats(FAS216_Info *info, struct seq_file *m);
+extern void fas216_print_devices(FAS216_Info *info, struct seq_file *m);
+
+/* Function: int fas216_eh_abort(struct scsi_cmnd *SCpnt)
+ * Purpose : abort this command
+ * Params : SCpnt - command to abort
+ * Returns : FAILED if unable to abort
+ */
+extern int fas216_eh_abort(struct scsi_cmnd *SCpnt);
+
+/* Function: int fas216_eh_device_reset(struct scsi_cmnd *SCpnt)
+ * Purpose : Reset the device associated with this command
+ * Params : SCpnt - command specifing device to reset
+ * Returns : FAILED if unable to reset
+ */
+extern int fas216_eh_device_reset(struct scsi_cmnd *SCpnt);
+
+/* Function: int fas216_eh_bus_reset(struct scsi_cmnd *SCpnt)
+ * Purpose : Reset the complete bus associated with this command
+ * Params : SCpnt - command specifing bus to reset
+ * Returns : FAILED if unable to reset
+ */
+extern int fas216_eh_bus_reset(struct scsi_cmnd *SCpnt);
+
+/* Function: int fas216_eh_host_reset(struct scsi_cmnd *SCpnt)
+ * Purpose : Reset the host associated with this command
+ * Params : SCpnt - command specifing host to reset
+ * Returns : FAILED if unable to reset
+ */
+extern int fas216_eh_host_reset(struct scsi_cmnd *SCpnt);
+
+#endif /* FAS216_H */
diff --git a/drivers/scsi/arm/msgqueue.c b/drivers/scsi/arm/msgqueue.c
new file mode 100644
index 000000000..7c95c7582
--- /dev/null
+++ b/drivers/scsi/arm/msgqueue.c
@@ -0,0 +1,171 @@
+/*
+ * linux/drivers/acorn/scsi/msgqueue.c
+ *
+ * Copyright (C) 1997-1998 Russell King
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * message queue handling
+ */
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/stddef.h>
+#include <linux/init.h>
+
+#include "msgqueue.h"
+
+/*
+ * Function: struct msgqueue_entry *mqe_alloc(MsgQueue_t *msgq)
+ * Purpose : Allocate a message queue entry
+ * Params : msgq - message queue to claim entry for
+ * Returns : message queue entry or NULL.
+ */
+static struct msgqueue_entry *mqe_alloc(MsgQueue_t *msgq)
+{
+ struct msgqueue_entry *mq;
+
+ if ((mq = msgq->free) != NULL)
+ msgq->free = mq->next;
+
+ return mq;
+}
+
+/*
+ * Function: void mqe_free(MsgQueue_t *msgq, struct msgqueue_entry *mq)
+ * Purpose : free a message queue entry
+ * Params : msgq - message queue to free entry from
+ * mq - message queue entry to free
+ */
+static void mqe_free(MsgQueue_t *msgq, struct msgqueue_entry *mq)
+{
+ if (mq) {
+ mq->next = msgq->free;
+ msgq->free = mq;
+ }
+}
+
+/*
+ * Function: void msgqueue_initialise(MsgQueue_t *msgq)
+ * Purpose : initialise a message queue
+ * Params : msgq - queue to initialise
+ */
+void msgqueue_initialise(MsgQueue_t *msgq)
+{
+ int i;
+
+ msgq->qe = NULL;
+ msgq->free = &msgq->entries[0];
+
+ for (i = 0; i < NR_MESSAGES; i++)
+ msgq->entries[i].next = &msgq->entries[i + 1];
+
+ msgq->entries[NR_MESSAGES - 1].next = NULL;
+}
+
+
+/*
+ * Function: void msgqueue_free(MsgQueue_t *msgq)
+ * Purpose : free a queue
+ * Params : msgq - queue to free
+ */
+void msgqueue_free(MsgQueue_t *msgq)
+{
+}
+
+/*
+ * Function: int msgqueue_msglength(MsgQueue_t *msgq)
+ * Purpose : calculate the total length of all messages on the message queue
+ * Params : msgq - queue to examine
+ * Returns : number of bytes of messages in queue
+ */
+int msgqueue_msglength(MsgQueue_t *msgq)
+{
+ struct msgqueue_entry *mq = msgq->qe;
+ int length = 0;
+
+ for (mq = msgq->qe; mq; mq = mq->next)
+ length += mq->msg.length;
+
+ return length;
+}
+
+/*
+ * Function: struct message *msgqueue_getmsg(MsgQueue_t *msgq, int msgno)
+ * Purpose : return a message
+ * Params : msgq - queue to obtain message from
+ * : msgno - message number
+ * Returns : pointer to message string, or NULL
+ */
+struct message *msgqueue_getmsg(MsgQueue_t *msgq, int msgno)
+{
+ struct msgqueue_entry *mq;
+
+ for (mq = msgq->qe; mq && msgno; mq = mq->next, msgno--);
+
+ return mq ? &mq->msg : NULL;
+}
+
+/*
+ * Function: int msgqueue_addmsg(MsgQueue_t *msgq, int length, ...)
+ * Purpose : add a message onto a message queue
+ * Params : msgq - queue to add message on
+ * length - length of message
+ * ... - message bytes
+ * Returns : != 0 if successful
+ */
+int msgqueue_addmsg(MsgQueue_t *msgq, int length, ...)
+{
+ struct msgqueue_entry *mq = mqe_alloc(msgq);
+ va_list ap;
+
+ if (mq) {
+ struct msgqueue_entry **mqp;
+ int i;
+
+ va_start(ap, length);
+ for (i = 0; i < length; i++)
+ mq->msg.msg[i] = va_arg(ap, unsigned int);
+ va_end(ap);
+
+ mq->msg.length = length;
+ mq->msg.fifo = 0;
+ mq->next = NULL;
+
+ mqp = &msgq->qe;
+ while (*mqp)
+ mqp = &(*mqp)->next;
+
+ *mqp = mq;
+ }
+
+ return mq != NULL;
+}
+
+/*
+ * Function: void msgqueue_flush(MsgQueue_t *msgq)
+ * Purpose : flush all messages from message queue
+ * Params : msgq - queue to flush
+ */
+void msgqueue_flush(MsgQueue_t *msgq)
+{
+ struct msgqueue_entry *mq, *mqnext;
+
+ for (mq = msgq->qe; mq; mq = mqnext) {
+ mqnext = mq->next;
+ mqe_free(msgq, mq);
+ }
+ msgq->qe = NULL;
+}
+
+EXPORT_SYMBOL(msgqueue_initialise);
+EXPORT_SYMBOL(msgqueue_free);
+EXPORT_SYMBOL(msgqueue_msglength);
+EXPORT_SYMBOL(msgqueue_getmsg);
+EXPORT_SYMBOL(msgqueue_addmsg);
+EXPORT_SYMBOL(msgqueue_flush);
+
+MODULE_AUTHOR("Russell King");
+MODULE_DESCRIPTION("SCSI message queue handling");
+MODULE_LICENSE("GPL");
diff --git a/drivers/scsi/arm/msgqueue.h b/drivers/scsi/arm/msgqueue.h
new file mode 100644
index 000000000..41c7333df
--- /dev/null
+++ b/drivers/scsi/arm/msgqueue.h
@@ -0,0 +1,82 @@
+/*
+ * linux/drivers/acorn/scsi/msgqueue.h
+ *
+ * Copyright (C) 1997 Russell King
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * message queue handling
+ */
+#ifndef MSGQUEUE_H
+#define MSGQUEUE_H
+
+struct message {
+ char msg[8];
+ int length;
+ int fifo;
+};
+
+struct msgqueue_entry {
+ struct message msg;
+ struct msgqueue_entry *next;
+};
+
+#define NR_MESSAGES 4
+
+typedef struct {
+ struct msgqueue_entry *qe;
+ struct msgqueue_entry *free;
+ struct msgqueue_entry entries[NR_MESSAGES];
+} MsgQueue_t;
+
+/*
+ * Function: void msgqueue_initialise(MsgQueue_t *msgq)
+ * Purpose : initialise a message queue
+ * Params : msgq - queue to initialise
+ */
+extern void msgqueue_initialise(MsgQueue_t *msgq);
+
+/*
+ * Function: void msgqueue_free(MsgQueue_t *msgq)
+ * Purpose : free a queue
+ * Params : msgq - queue to free
+ */
+extern void msgqueue_free(MsgQueue_t *msgq);
+
+/*
+ * Function: int msgqueue_msglength(MsgQueue_t *msgq)
+ * Purpose : calculate the total length of all messages on the message queue
+ * Params : msgq - queue to examine
+ * Returns : number of bytes of messages in queue
+ */
+extern int msgqueue_msglength(MsgQueue_t *msgq);
+
+/*
+ * Function: struct message *msgqueue_getmsg(MsgQueue_t *msgq, int msgno)
+ * Purpose : return a message & its length
+ * Params : msgq - queue to obtain message from
+ * : msgno - message number
+ * Returns : pointer to message string, or NULL
+ */
+extern struct message *msgqueue_getmsg(MsgQueue_t *msgq, int msgno);
+
+/*
+ * Function: int msgqueue_addmsg(MsgQueue_t *msgq, int length, ...)
+ * Purpose : add a message onto a message queue
+ * Params : msgq - queue to add message on
+ * length - length of message
+ * ... - message bytes
+ * Returns : != 0 if successful
+ */
+extern int msgqueue_addmsg(MsgQueue_t *msgq, int length, ...);
+
+/*
+ * Function: void msgqueue_flush(MsgQueue_t *msgq)
+ * Purpose : flush all messages from message queue
+ * Params : msgq - queue to flush
+ */
+extern void msgqueue_flush(MsgQueue_t *msgq);
+
+#endif
diff --git a/drivers/scsi/arm/oak.c b/drivers/scsi/arm/oak.c
new file mode 100644
index 000000000..7c6fa1479
--- /dev/null
+++ b/drivers/scsi/arm/oak.c
@@ -0,0 +1,207 @@
+/*
+ * Oak Generic NCR5380 driver
+ *
+ * Copyright 1995-2002, Russell King
+ */
+
+#include <linux/module.h>
+#include <linux/signal.h>
+#include <linux/ioport.h>
+#include <linux/delay.h>
+#include <linux/blkdev.h>
+#include <linux/init.h>
+
+#include <asm/ecard.h>
+#include <asm/io.h>
+
+#include <scsi/scsi_host.h>
+
+/*#define PSEUDO_DMA*/
+#define DONT_USE_INTR
+
+#define priv(host) ((struct NCR5380_hostdata *)(host)->hostdata)
+#define NCR5380_local_declare() void __iomem *_base
+#define NCR5380_setup(host) _base = priv(host)->base
+
+#define NCR5380_read(reg) readb(_base + ((reg) << 2))
+#define NCR5380_write(reg, value) writeb(value, _base + ((reg) << 2))
+#define NCR5380_queue_command oakscsi_queue_command
+#define NCR5380_info oakscsi_info
+#define NCR5380_show_info oakscsi_show_info
+
+#define NCR5380_implementation_fields \
+ void __iomem *base
+
+#include "../NCR5380.h"
+
+#undef START_DMA_INITIATOR_RECEIVE_REG
+#define START_DMA_INITIATOR_RECEIVE_REG (128 + 7)
+
+#define STAT ((128 + 16) << 2)
+#define DATA ((128 + 8) << 2)
+
+static inline int NCR5380_pwrite(struct Scsi_Host *instance, unsigned char *addr,
+ int len)
+{
+ void __iomem *base = priv(instance)->base;
+
+printk("writing %p len %d\n",addr, len);
+ if(!len) return -1;
+
+ while(1)
+ {
+ int status;
+ while (((status = readw(base + STAT)) & 0x100)==0);
+ }
+}
+
+static inline int NCR5380_pread(struct Scsi_Host *instance, unsigned char *addr,
+ int len)
+{
+ void __iomem *base = priv(instance)->base;
+printk("reading %p len %d\n", addr, len);
+ while(len > 0)
+ {
+ unsigned int status, timeout;
+ unsigned long b;
+
+ timeout = 0x01FFFFFF;
+
+ while (((status = readw(base + STAT)) & 0x100)==0)
+ {
+ timeout--;
+ if(status & 0x200 || !timeout)
+ {
+ printk("status = %08X\n", status);
+ return 1;
+ }
+ }
+
+ if(len >= 128)
+ {
+ readsw(base + DATA, addr, 128);
+ addr += 128;
+ len -= 128;
+ }
+ else
+ {
+ b = (unsigned long) readw(base + DATA);
+ *addr ++ = b;
+ len -= 1;
+ if(len)
+ *addr ++ = b>>8;
+ len -= 1;
+ }
+ }
+ return 0;
+}
+
+#undef STAT
+#undef DATA
+
+#include "../NCR5380.c"
+
+static struct scsi_host_template oakscsi_template = {
+ .module = THIS_MODULE,
+ .show_info = oakscsi_show_info,
+ .name = "Oak 16-bit SCSI",
+ .info = oakscsi_info,
+ .queuecommand = oakscsi_queue_command,
+ .eh_abort_handler = NCR5380_abort,
+ .eh_bus_reset_handler = NCR5380_bus_reset,
+ .can_queue = 16,
+ .this_id = 7,
+ .sg_tablesize = SG_ALL,
+ .cmd_per_lun = 2,
+ .use_clustering = DISABLE_CLUSTERING,
+ .proc_name = "oakscsi",
+};
+
+static int oakscsi_probe(struct expansion_card *ec, const struct ecard_id *id)
+{
+ struct Scsi_Host *host;
+ int ret = -ENOMEM;
+
+ ret = ecard_request_resources(ec);
+ if (ret)
+ goto out;
+
+ host = scsi_host_alloc(&oakscsi_template, sizeof(struct NCR5380_hostdata));
+ if (!host) {
+ ret = -ENOMEM;
+ goto release;
+ }
+
+ priv(host)->base = ioremap(ecard_resource_start(ec, ECARD_RES_MEMC),
+ ecard_resource_len(ec, ECARD_RES_MEMC));
+ if (!priv(host)->base) {
+ ret = -ENOMEM;
+ goto unreg;
+ }
+
+ host->irq = NO_IRQ;
+ host->n_io_port = 255;
+
+ NCR5380_init(host, 0);
+
+ ret = scsi_add_host(host, &ec->dev);
+ if (ret)
+ goto out_unmap;
+
+ scsi_scan_host(host);
+ goto out;
+
+ out_unmap:
+ iounmap(priv(host)->base);
+ unreg:
+ scsi_host_put(host);
+ release:
+ ecard_release_resources(ec);
+ out:
+ return ret;
+}
+
+static void oakscsi_remove(struct expansion_card *ec)
+{
+ struct Scsi_Host *host = ecard_get_drvdata(ec);
+
+ ecard_set_drvdata(ec, NULL);
+ scsi_remove_host(host);
+
+ NCR5380_exit(host);
+ iounmap(priv(host)->base);
+ scsi_host_put(host);
+ ecard_release_resources(ec);
+}
+
+static const struct ecard_id oakscsi_cids[] = {
+ { MANU_OAK, PROD_OAK_SCSI },
+ { 0xffff, 0xffff }
+};
+
+static struct ecard_driver oakscsi_driver = {
+ .probe = oakscsi_probe,
+ .remove = oakscsi_remove,
+ .id_table = oakscsi_cids,
+ .drv = {
+ .name = "oakscsi",
+ },
+};
+
+static int __init oakscsi_init(void)
+{
+ return ecard_register_driver(&oakscsi_driver);
+}
+
+static void __exit oakscsi_exit(void)
+{
+ ecard_remove_driver(&oakscsi_driver);
+}
+
+module_init(oakscsi_init);
+module_exit(oakscsi_exit);
+
+MODULE_AUTHOR("Russell King");
+MODULE_DESCRIPTION("Oak SCSI driver");
+MODULE_LICENSE("GPL");
+
diff --git a/drivers/scsi/arm/powertec.c b/drivers/scsi/arm/powertec.c
new file mode 100644
index 000000000..5e1b73e1b
--- /dev/null
+++ b/drivers/scsi/arm/powertec.c
@@ -0,0 +1,451 @@
+/*
+ * linux/drivers/acorn/scsi/powertec.c
+ *
+ * Copyright (C) 1997-2005 Russell King
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#include <linux/module.h>
+#include <linux/blkdev.h>
+#include <linux/kernel.h>
+#include <linux/string.h>
+#include <linux/ioport.h>
+#include <linux/proc_fs.h>
+#include <linux/delay.h>
+#include <linux/interrupt.h>
+#include <linux/init.h>
+#include <linux/dma-mapping.h>
+
+#include <asm/dma.h>
+#include <asm/ecard.h>
+#include <asm/io.h>
+#include <asm/pgtable.h>
+
+#include "../scsi.h"
+#include <scsi/scsi_host.h>
+#include "fas216.h"
+#include "scsi.h"
+
+#include <scsi/scsicam.h>
+
+#define POWERTEC_FAS216_OFFSET 0x3000
+#define POWERTEC_FAS216_SHIFT 6
+
+#define POWERTEC_INTR_STATUS 0x2000
+#define POWERTEC_INTR_BIT 0x80
+
+#define POWERTEC_RESET_CONTROL 0x1018
+#define POWERTEC_RESET_BIT 1
+
+#define POWERTEC_TERM_CONTROL 0x2018
+#define POWERTEC_TERM_ENABLE 1
+
+#define POWERTEC_INTR_CONTROL 0x101c
+#define POWERTEC_INTR_ENABLE 1
+#define POWERTEC_INTR_DISABLE 0
+
+#define VERSION "1.10 (19/01/2003 2.5.59)"
+
+/*
+ * Use term=0,1,0,0,0 to turn terminators on/off.
+ * One entry per slot.
+ */
+static int term[MAX_ECARDS] = { 1, 1, 1, 1, 1, 1, 1, 1 };
+
+#define NR_SG 256
+
+struct powertec_info {
+ FAS216_Info info;
+ struct expansion_card *ec;
+ void __iomem *base;
+ unsigned int term_ctl;
+ struct scatterlist sg[NR_SG];
+};
+
+/* Prototype: void powertecscsi_irqenable(ec, irqnr)
+ * Purpose : Enable interrupts on Powertec SCSI card
+ * Params : ec - expansion card structure
+ * : irqnr - interrupt number
+ */
+static void
+powertecscsi_irqenable(struct expansion_card *ec, int irqnr)
+{
+ struct powertec_info *info = ec->irq_data;
+ writeb(POWERTEC_INTR_ENABLE, info->base + POWERTEC_INTR_CONTROL);
+}
+
+/* Prototype: void powertecscsi_irqdisable(ec, irqnr)
+ * Purpose : Disable interrupts on Powertec SCSI card
+ * Params : ec - expansion card structure
+ * : irqnr - interrupt number
+ */
+static void
+powertecscsi_irqdisable(struct expansion_card *ec, int irqnr)
+{
+ struct powertec_info *info = ec->irq_data;
+ writeb(POWERTEC_INTR_DISABLE, info->base + POWERTEC_INTR_CONTROL);
+}
+
+static const expansioncard_ops_t powertecscsi_ops = {
+ .irqenable = powertecscsi_irqenable,
+ .irqdisable = powertecscsi_irqdisable,
+};
+
+/* Prototype: void powertecscsi_terminator_ctl(host, on_off)
+ * Purpose : Turn the Powertec SCSI terminators on or off
+ * Params : host - card to turn on/off
+ * : on_off - !0 to turn on, 0 to turn off
+ */
+static void
+powertecscsi_terminator_ctl(struct Scsi_Host *host, int on_off)
+{
+ struct powertec_info *info = (struct powertec_info *)host->hostdata;
+
+ info->term_ctl = on_off ? POWERTEC_TERM_ENABLE : 0;
+ writeb(info->term_ctl, info->base + POWERTEC_TERM_CONTROL);
+}
+
+/* Prototype: void powertecscsi_intr(irq, *dev_id, *regs)
+ * Purpose : handle interrupts from Powertec SCSI card
+ * Params : irq - interrupt number
+ * dev_id - user-defined (Scsi_Host structure)
+ */
+static irqreturn_t powertecscsi_intr(int irq, void *dev_id)
+{
+ struct powertec_info *info = dev_id;
+
+ return fas216_intr(&info->info);
+}
+
+/* Prototype: fasdmatype_t powertecscsi_dma_setup(host, SCpnt, direction, min_type)
+ * Purpose : initialises DMA/PIO
+ * Params : host - host
+ * SCpnt - command
+ * direction - DMA on to/off of card
+ * min_type - minimum DMA support that we must have for this transfer
+ * Returns : type of transfer to be performed
+ */
+static fasdmatype_t
+powertecscsi_dma_setup(struct Scsi_Host *host, struct scsi_pointer *SCp,
+ fasdmadir_t direction, fasdmatype_t min_type)
+{
+ struct powertec_info *info = (struct powertec_info *)host->hostdata;
+ struct device *dev = scsi_get_device(host);
+ int dmach = info->info.scsi.dma;
+
+ if (info->info.ifcfg.capabilities & FASCAP_DMA &&
+ min_type == fasdma_real_all) {
+ int bufs, map_dir, dma_dir;
+
+ bufs = copy_SCp_to_sg(&info->sg[0], SCp, NR_SG);
+
+ if (direction == DMA_OUT)
+ map_dir = DMA_TO_DEVICE,
+ dma_dir = DMA_MODE_WRITE;
+ else
+ map_dir = DMA_FROM_DEVICE,
+ dma_dir = DMA_MODE_READ;
+
+ dma_map_sg(dev, info->sg, bufs, map_dir);
+
+ disable_dma(dmach);
+ set_dma_sg(dmach, info->sg, bufs);
+ set_dma_mode(dmach, dma_dir);
+ enable_dma(dmach);
+ return fasdma_real_all;
+ }
+
+ /*
+ * If we're not doing DMA,
+ * we'll do slow PIO
+ */
+ return fasdma_pio;
+}
+
+/* Prototype: int powertecscsi_dma_stop(host, SCpnt)
+ * Purpose : stops DMA/PIO
+ * Params : host - host
+ * SCpnt - command
+ */
+static void
+powertecscsi_dma_stop(struct Scsi_Host *host, struct scsi_pointer *SCp)
+{
+ struct powertec_info *info = (struct powertec_info *)host->hostdata;
+ if (info->info.scsi.dma != NO_DMA)
+ disable_dma(info->info.scsi.dma);
+}
+
+/* Prototype: const char *powertecscsi_info(struct Scsi_Host * host)
+ * Purpose : returns a descriptive string about this interface,
+ * Params : host - driver host structure to return info for.
+ * Returns : pointer to a static buffer containing null terminated string.
+ */
+const char *powertecscsi_info(struct Scsi_Host *host)
+{
+ struct powertec_info *info = (struct powertec_info *)host->hostdata;
+ static char string[150];
+
+ sprintf(string, "%s (%s) in slot %d v%s terminators o%s",
+ host->hostt->name, info->info.scsi.type, info->ec->slot_no,
+ VERSION, info->term_ctl ? "n" : "ff");
+
+ return string;
+}
+
+/* Prototype: int powertecscsi_set_proc_info(struct Scsi_Host *host, char *buffer, int length)
+ * Purpose : Set a driver specific function
+ * Params : host - host to setup
+ * : buffer - buffer containing string describing operation
+ * : length - length of string
+ * Returns : -EINVAL, or 0
+ */
+static int
+powertecscsi_set_proc_info(struct Scsi_Host *host, char *buffer, int length)
+{
+ int ret = length;
+
+ if (length >= 12 && strncmp(buffer, "POWERTECSCSI", 12) == 0) {
+ buffer += 12;
+ length -= 12;
+
+ if (length >= 5 && strncmp(buffer, "term=", 5) == 0) {
+ if (buffer[5] == '1')
+ powertecscsi_terminator_ctl(host, 1);
+ else if (buffer[5] == '0')
+ powertecscsi_terminator_ctl(host, 0);
+ else
+ ret = -EINVAL;
+ } else
+ ret = -EINVAL;
+ } else
+ ret = -EINVAL;
+
+ return ret;
+}
+
+/* Prototype: int powertecscsi_proc_info(char *buffer, char **start, off_t offset,
+ * int length, int host_no, int inout)
+ * Purpose : Return information about the driver to a user process accessing
+ * the /proc filesystem.
+ * Params : buffer - a buffer to write information to
+ * start - a pointer into this buffer set by this routine to the start
+ * of the required information.
+ * offset - offset into information that we have read up to.
+ * length - length of buffer
+ * inout - 0 for reading, 1 for writing.
+ * Returns : length of data written to buffer.
+ */
+static int powertecscsi_show_info(struct seq_file *m, struct Scsi_Host *host)
+{
+ struct powertec_info *info;
+
+ info = (struct powertec_info *)host->hostdata;
+
+ seq_printf(m, "PowerTec SCSI driver v%s\n", VERSION);
+ fas216_print_host(&info->info, m);
+ seq_printf(m, "Term : o%s\n",
+ info->term_ctl ? "n" : "ff");
+
+ fas216_print_stats(&info->info, m);
+ fas216_print_devices(&info->info, m);
+ return 0;
+}
+
+static ssize_t powertecscsi_show_term(struct device *dev, struct device_attribute *attr, char *buf)
+{
+ struct expansion_card *ec = ECARD_DEV(dev);
+ struct Scsi_Host *host = ecard_get_drvdata(ec);
+ struct powertec_info *info = (struct powertec_info *)host->hostdata;
+
+ return sprintf(buf, "%d\n", info->term_ctl ? 1 : 0);
+}
+
+static ssize_t
+powertecscsi_store_term(struct device *dev, struct device_attribute *attr, const char *buf, size_t len)
+{
+ struct expansion_card *ec = ECARD_DEV(dev);
+ struct Scsi_Host *host = ecard_get_drvdata(ec);
+
+ if (len > 1)
+ powertecscsi_terminator_ctl(host, buf[0] != '0');
+
+ return len;
+}
+
+static DEVICE_ATTR(bus_term, S_IRUGO | S_IWUSR,
+ powertecscsi_show_term, powertecscsi_store_term);
+
+static struct scsi_host_template powertecscsi_template = {
+ .module = THIS_MODULE,
+ .show_info = powertecscsi_show_info,
+ .write_info = powertecscsi_set_proc_info,
+ .name = "PowerTec SCSI",
+ .info = powertecscsi_info,
+ .queuecommand = fas216_queue_command,
+ .eh_host_reset_handler = fas216_eh_host_reset,
+ .eh_bus_reset_handler = fas216_eh_bus_reset,
+ .eh_device_reset_handler = fas216_eh_device_reset,
+ .eh_abort_handler = fas216_eh_abort,
+
+ .can_queue = 8,
+ .this_id = 7,
+ .sg_tablesize = SCSI_MAX_SG_CHAIN_SEGMENTS,
+ .dma_boundary = IOMD_DMA_BOUNDARY,
+ .cmd_per_lun = 2,
+ .use_clustering = ENABLE_CLUSTERING,
+ .proc_name = "powertec",
+};
+
+static int powertecscsi_probe(struct expansion_card *ec,
+ const struct ecard_id *id)
+{
+ struct Scsi_Host *host;
+ struct powertec_info *info;
+ void __iomem *base;
+ int ret;
+
+ ret = ecard_request_resources(ec);
+ if (ret)
+ goto out;
+
+ base = ecardm_iomap(ec, ECARD_RES_IOCFAST, 0, 0);
+ if (!base) {
+ ret = -ENOMEM;
+ goto out_region;
+ }
+
+ host = scsi_host_alloc(&powertecscsi_template,
+ sizeof (struct powertec_info));
+ if (!host) {
+ ret = -ENOMEM;
+ goto out_region;
+ }
+
+ ecard_set_drvdata(ec, host);
+
+ info = (struct powertec_info *)host->hostdata;
+ info->base = base;
+ powertecscsi_terminator_ctl(host, term[ec->slot_no]);
+
+ info->ec = ec;
+ info->info.scsi.io_base = base + POWERTEC_FAS216_OFFSET;
+ info->info.scsi.io_shift = POWERTEC_FAS216_SHIFT;
+ info->info.scsi.irq = ec->irq;
+ info->info.scsi.dma = ec->dma;
+ info->info.ifcfg.clockrate = 40; /* MHz */
+ info->info.ifcfg.select_timeout = 255;
+ info->info.ifcfg.asyncperiod = 200; /* ns */
+ info->info.ifcfg.sync_max_depth = 7;
+ info->info.ifcfg.cntl3 = CNTL3_BS8 | CNTL3_FASTSCSI | CNTL3_FASTCLK;
+ info->info.ifcfg.disconnect_ok = 1;
+ info->info.ifcfg.wide_max_size = 0;
+ info->info.ifcfg.capabilities = 0;
+ info->info.dma.setup = powertecscsi_dma_setup;
+ info->info.dma.pseudo = NULL;
+ info->info.dma.stop = powertecscsi_dma_stop;
+
+ ec->irqaddr = base + POWERTEC_INTR_STATUS;
+ ec->irqmask = POWERTEC_INTR_BIT;
+
+ ecard_setirq(ec, &powertecscsi_ops, info);
+
+ device_create_file(&ec->dev, &dev_attr_bus_term);
+
+ ret = fas216_init(host);
+ if (ret)
+ goto out_free;
+
+ ret = request_irq(ec->irq, powertecscsi_intr,
+ 0, "powertec", info);
+ if (ret) {
+ printk("scsi%d: IRQ%d not free: %d\n",
+ host->host_no, ec->irq, ret);
+ goto out_release;
+ }
+
+ if (info->info.scsi.dma != NO_DMA) {
+ if (request_dma(info->info.scsi.dma, "powertec")) {
+ printk("scsi%d: DMA%d not free, using PIO\n",
+ host->host_no, info->info.scsi.dma);
+ info->info.scsi.dma = NO_DMA;
+ } else {
+ set_dma_speed(info->info.scsi.dma, 180);
+ info->info.ifcfg.capabilities |= FASCAP_DMA;
+ }
+ }
+
+ ret = fas216_add(host, &ec->dev);
+ if (ret == 0)
+ goto out;
+
+ if (info->info.scsi.dma != NO_DMA)
+ free_dma(info->info.scsi.dma);
+ free_irq(ec->irq, host);
+
+ out_release:
+ fas216_release(host);
+
+ out_free:
+ device_remove_file(&ec->dev, &dev_attr_bus_term);
+ scsi_host_put(host);
+
+ out_region:
+ ecard_release_resources(ec);
+
+ out:
+ return ret;
+}
+
+static void powertecscsi_remove(struct expansion_card *ec)
+{
+ struct Scsi_Host *host = ecard_get_drvdata(ec);
+ struct powertec_info *info = (struct powertec_info *)host->hostdata;
+
+ ecard_set_drvdata(ec, NULL);
+ fas216_remove(host);
+
+ device_remove_file(&ec->dev, &dev_attr_bus_term);
+
+ if (info->info.scsi.dma != NO_DMA)
+ free_dma(info->info.scsi.dma);
+ free_irq(ec->irq, info);
+
+ fas216_release(host);
+ scsi_host_put(host);
+ ecard_release_resources(ec);
+}
+
+static const struct ecard_id powertecscsi_cids[] = {
+ { MANU_ALSYSTEMS, PROD_ALSYS_SCSIATAPI },
+ { 0xffff, 0xffff },
+};
+
+static struct ecard_driver powertecscsi_driver = {
+ .probe = powertecscsi_probe,
+ .remove = powertecscsi_remove,
+ .id_table = powertecscsi_cids,
+ .drv = {
+ .name = "powertecscsi",
+ },
+};
+
+static int __init powertecscsi_init(void)
+{
+ return ecard_register_driver(&powertecscsi_driver);
+}
+
+static void __exit powertecscsi_exit(void)
+{
+ ecard_remove_driver(&powertecscsi_driver);
+}
+
+module_init(powertecscsi_init);
+module_exit(powertecscsi_exit);
+
+MODULE_AUTHOR("Russell King");
+MODULE_DESCRIPTION("Powertec SCSI driver");
+module_param_array(term, int, NULL, 0);
+MODULE_PARM_DESC(term, "SCSI bus termination");
+MODULE_LICENSE("GPL");
diff --git a/drivers/scsi/arm/queue.c b/drivers/scsi/arm/queue.c
new file mode 100644
index 000000000..3441ce3eb
--- /dev/null
+++ b/drivers/scsi/arm/queue.c
@@ -0,0 +1,318 @@
+/*
+ * linux/drivers/acorn/scsi/queue.c: queue handling primitives
+ *
+ * Copyright (C) 1997-2000 Russell King
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * Changelog:
+ * 15-Sep-1997 RMK Created.
+ * 11-Oct-1997 RMK Corrected problem with queue_remove_exclude
+ * not updating internal linked list properly
+ * (was causing commands to go missing).
+ * 30-Aug-2000 RMK Use Linux list handling and spinlocks
+ */
+#include <linux/module.h>
+#include <linux/blkdev.h>
+#include <linux/kernel.h>
+#include <linux/string.h>
+#include <linux/slab.h>
+#include <linux/spinlock.h>
+#include <linux/list.h>
+#include <linux/init.h>
+
+#include "../scsi.h"
+
+#define DEBUG
+
+typedef struct queue_entry {
+ struct list_head list;
+ struct scsi_cmnd *SCpnt;
+#ifdef DEBUG
+ unsigned long magic;
+#endif
+} QE_t;
+
+#ifdef DEBUG
+#define QUEUE_MAGIC_FREE 0xf7e1c9a3
+#define QUEUE_MAGIC_USED 0xf7e1cc33
+
+#define SET_MAGIC(q,m) ((q)->magic = (m))
+#define BAD_MAGIC(q,m) ((q)->magic != (m))
+#else
+#define SET_MAGIC(q,m) do { } while (0)
+#define BAD_MAGIC(q,m) (0)
+#endif
+
+#include "queue.h"
+
+#define NR_QE 32
+
+/*
+ * Function: void queue_initialise (Queue_t *queue)
+ * Purpose : initialise a queue
+ * Params : queue - queue to initialise
+ */
+int queue_initialise (Queue_t *queue)
+{
+ unsigned int nqueues = NR_QE;
+ QE_t *q;
+
+ spin_lock_init(&queue->queue_lock);
+ INIT_LIST_HEAD(&queue->head);
+ INIT_LIST_HEAD(&queue->free);
+
+ /*
+ * If life was easier, then SCpnt would have a
+ * host-available list head, and we wouldn't
+ * need to keep free lists or allocate this
+ * memory.
+ */
+ queue->alloc = q = kmalloc(sizeof(QE_t) * nqueues, GFP_KERNEL);
+ if (q) {
+ for (; nqueues; q++, nqueues--) {
+ SET_MAGIC(q, QUEUE_MAGIC_FREE);
+ q->SCpnt = NULL;
+ list_add(&q->list, &queue->free);
+ }
+ }
+
+ return queue->alloc != NULL;
+}
+
+/*
+ * Function: void queue_free (Queue_t *queue)
+ * Purpose : free a queue
+ * Params : queue - queue to free
+ */
+void queue_free (Queue_t *queue)
+{
+ if (!list_empty(&queue->head))
+ printk(KERN_WARNING "freeing non-empty queue %p\n", queue);
+ kfree(queue->alloc);
+}
+
+
+/*
+ * Function: int __queue_add(Queue_t *queue, struct scsi_cmnd *SCpnt, int head)
+ * Purpose : Add a new command onto a queue, adding REQUEST_SENSE to head.
+ * Params : queue - destination queue
+ * SCpnt - command to add
+ * head - add command to head of queue
+ * Returns : 0 on error, !0 on success
+ */
+int __queue_add(Queue_t *queue, struct scsi_cmnd *SCpnt, int head)
+{
+ unsigned long flags;
+ struct list_head *l;
+ QE_t *q;
+ int ret = 0;
+
+ spin_lock_irqsave(&queue->queue_lock, flags);
+ if (list_empty(&queue->free))
+ goto empty;
+
+ l = queue->free.next;
+ list_del(l);
+
+ q = list_entry(l, QE_t, list);
+ BUG_ON(BAD_MAGIC(q, QUEUE_MAGIC_FREE));
+
+ SET_MAGIC(q, QUEUE_MAGIC_USED);
+ q->SCpnt = SCpnt;
+
+ if (head)
+ list_add(l, &queue->head);
+ else
+ list_add_tail(l, &queue->head);
+
+ ret = 1;
+empty:
+ spin_unlock_irqrestore(&queue->queue_lock, flags);
+ return ret;
+}
+
+static struct scsi_cmnd *__queue_remove(Queue_t *queue, struct list_head *ent)
+{
+ QE_t *q;
+
+ /*
+ * Move the entry from the "used" list onto the "free" list
+ */
+ list_del(ent);
+ q = list_entry(ent, QE_t, list);
+ BUG_ON(BAD_MAGIC(q, QUEUE_MAGIC_USED));
+
+ SET_MAGIC(q, QUEUE_MAGIC_FREE);
+ list_add(ent, &queue->free);
+
+ return q->SCpnt;
+}
+
+/*
+ * Function: struct scsi_cmnd *queue_remove_exclude (queue, exclude)
+ * Purpose : remove a SCSI command from a queue
+ * Params : queue - queue to remove command from
+ * exclude - bit array of target&lun which is busy
+ * Returns : struct scsi_cmnd if successful (and a reference), or NULL if no command available
+ */
+struct scsi_cmnd *queue_remove_exclude(Queue_t *queue, unsigned long *exclude)
+{
+ unsigned long flags;
+ struct list_head *l;
+ struct scsi_cmnd *SCpnt = NULL;
+
+ spin_lock_irqsave(&queue->queue_lock, flags);
+ list_for_each(l, &queue->head) {
+ QE_t *q = list_entry(l, QE_t, list);
+ if (!test_bit(q->SCpnt->device->id * 8 +
+ (u8)(q->SCpnt->device->lun & 0x7), exclude)) {
+ SCpnt = __queue_remove(queue, l);
+ break;
+ }
+ }
+ spin_unlock_irqrestore(&queue->queue_lock, flags);
+
+ return SCpnt;
+}
+
+/*
+ * Function: struct scsi_cmnd *queue_remove (queue)
+ * Purpose : removes first SCSI command from a queue
+ * Params : queue - queue to remove command from
+ * Returns : struct scsi_cmnd if successful (and a reference), or NULL if no command available
+ */
+struct scsi_cmnd *queue_remove(Queue_t *queue)
+{
+ unsigned long flags;
+ struct scsi_cmnd *SCpnt = NULL;
+
+ spin_lock_irqsave(&queue->queue_lock, flags);
+ if (!list_empty(&queue->head))
+ SCpnt = __queue_remove(queue, queue->head.next);
+ spin_unlock_irqrestore(&queue->queue_lock, flags);
+
+ return SCpnt;
+}
+
+/*
+ * Function: struct scsi_cmnd *queue_remove_tgtluntag (queue, target, lun, tag)
+ * Purpose : remove a SCSI command from the queue for a specified target/lun/tag
+ * Params : queue - queue to remove command from
+ * target - target that we want
+ * lun - lun on device
+ * tag - tag on device
+ * Returns : struct scsi_cmnd if successful, or NULL if no command satisfies requirements
+ */
+struct scsi_cmnd *queue_remove_tgtluntag(Queue_t *queue, int target, int lun,
+ int tag)
+{
+ unsigned long flags;
+ struct list_head *l;
+ struct scsi_cmnd *SCpnt = NULL;
+
+ spin_lock_irqsave(&queue->queue_lock, flags);
+ list_for_each(l, &queue->head) {
+ QE_t *q = list_entry(l, QE_t, list);
+ if (q->SCpnt->device->id == target && q->SCpnt->device->lun == lun &&
+ q->SCpnt->tag == tag) {
+ SCpnt = __queue_remove(queue, l);
+ break;
+ }
+ }
+ spin_unlock_irqrestore(&queue->queue_lock, flags);
+
+ return SCpnt;
+}
+
+/*
+ * Function: queue_remove_all_target(queue, target)
+ * Purpose : remove all SCSI commands from the queue for a specified target
+ * Params : queue - queue to remove command from
+ * target - target device id
+ * Returns : nothing
+ */
+void queue_remove_all_target(Queue_t *queue, int target)
+{
+ unsigned long flags;
+ struct list_head *l;
+
+ spin_lock_irqsave(&queue->queue_lock, flags);
+ list_for_each(l, &queue->head) {
+ QE_t *q = list_entry(l, QE_t, list);
+ if (q->SCpnt->device->id == target)
+ __queue_remove(queue, l);
+ }
+ spin_unlock_irqrestore(&queue->queue_lock, flags);
+}
+
+/*
+ * Function: int queue_probetgtlun (queue, target, lun)
+ * Purpose : check to see if we have a command in the queue for the specified
+ * target/lun.
+ * Params : queue - queue to look in
+ * target - target we want to probe
+ * lun - lun on target
+ * Returns : 0 if not found, != 0 if found
+ */
+int queue_probetgtlun (Queue_t *queue, int target, int lun)
+{
+ unsigned long flags;
+ struct list_head *l;
+ int found = 0;
+
+ spin_lock_irqsave(&queue->queue_lock, flags);
+ list_for_each(l, &queue->head) {
+ QE_t *q = list_entry(l, QE_t, list);
+ if (q->SCpnt->device->id == target && q->SCpnt->device->lun == lun) {
+ found = 1;
+ break;
+ }
+ }
+ spin_unlock_irqrestore(&queue->queue_lock, flags);
+
+ return found;
+}
+
+/*
+ * Function: int queue_remove_cmd(Queue_t *queue, struct scsi_cmnd *SCpnt)
+ * Purpose : remove a specific command from the queues
+ * Params : queue - queue to look in
+ * SCpnt - command to find
+ * Returns : 0 if not found
+ */
+int queue_remove_cmd(Queue_t *queue, struct scsi_cmnd *SCpnt)
+{
+ unsigned long flags;
+ struct list_head *l;
+ int found = 0;
+
+ spin_lock_irqsave(&queue->queue_lock, flags);
+ list_for_each(l, &queue->head) {
+ QE_t *q = list_entry(l, QE_t, list);
+ if (q->SCpnt == SCpnt) {
+ __queue_remove(queue, l);
+ found = 1;
+ break;
+ }
+ }
+ spin_unlock_irqrestore(&queue->queue_lock, flags);
+
+ return found;
+}
+
+EXPORT_SYMBOL(queue_initialise);
+EXPORT_SYMBOL(queue_free);
+EXPORT_SYMBOL(__queue_add);
+EXPORT_SYMBOL(queue_remove);
+EXPORT_SYMBOL(queue_remove_exclude);
+EXPORT_SYMBOL(queue_remove_tgtluntag);
+EXPORT_SYMBOL(queue_remove_cmd);
+EXPORT_SYMBOL(queue_remove_all_target);
+EXPORT_SYMBOL(queue_probetgtlun);
+
+MODULE_AUTHOR("Russell King");
+MODULE_DESCRIPTION("SCSI command queueing");
+MODULE_LICENSE("GPL");
diff --git a/drivers/scsi/arm/queue.h b/drivers/scsi/arm/queue.h
new file mode 100644
index 000000000..3c519c923
--- /dev/null
+++ b/drivers/scsi/arm/queue.h
@@ -0,0 +1,107 @@
+/*
+ * linux/drivers/acorn/scsi/queue.h: queue handling
+ *
+ * Copyright (C) 1997 Russell King
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#ifndef QUEUE_H
+#define QUEUE_H
+
+typedef struct {
+ struct list_head head;
+ struct list_head free;
+ spinlock_t queue_lock;
+ void *alloc; /* start of allocated mem */
+} Queue_t;
+
+/*
+ * Function: void queue_initialise (Queue_t *queue)
+ * Purpose : initialise a queue
+ * Params : queue - queue to initialise
+ */
+extern int queue_initialise (Queue_t *queue);
+
+/*
+ * Function: void queue_free (Queue_t *queue)
+ * Purpose : free a queue
+ * Params : queue - queue to free
+ */
+extern void queue_free (Queue_t *queue);
+
+/*
+ * Function: struct scsi_cmnd *queue_remove (queue)
+ * Purpose : removes first SCSI command from a queue
+ * Params : queue - queue to remove command from
+ * Returns : struct scsi_cmnd if successful (and a reference), or NULL if no command available
+ */
+extern struct scsi_cmnd *queue_remove (Queue_t *queue);
+
+/*
+ * Function: struct scsi_cmnd *queue_remove_exclude_ref (queue, exclude)
+ * Purpose : remove a SCSI command from a queue
+ * Params : queue - queue to remove command from
+ * exclude - array of busy LUNs
+ * Returns : struct scsi_cmnd if successful (and a reference), or NULL if no command available
+ */
+extern struct scsi_cmnd *queue_remove_exclude(Queue_t *queue,
+ unsigned long *exclude);
+
+#define queue_add_cmd_ordered(queue,SCpnt) \
+ __queue_add(queue,SCpnt,(SCpnt)->cmnd[0] == REQUEST_SENSE)
+#define queue_add_cmd_tail(queue,SCpnt) \
+ __queue_add(queue,SCpnt,0)
+/*
+ * Function: int __queue_add(Queue_t *queue, struct scsi_cmnd *SCpnt, int head)
+ * Purpose : Add a new command onto a queue
+ * Params : queue - destination queue
+ * SCpnt - command to add
+ * head - add command to head of queue
+ * Returns : 0 on error, !0 on success
+ */
+extern int __queue_add(Queue_t *queue, struct scsi_cmnd *SCpnt, int head);
+
+/*
+ * Function: struct scsi_cmnd *queue_remove_tgtluntag (queue, target, lun, tag)
+ * Purpose : remove a SCSI command from the queue for a specified target/lun/tag
+ * Params : queue - queue to remove command from
+ * target - target that we want
+ * lun - lun on device
+ * tag - tag on device
+ * Returns : struct scsi_cmnd if successful, or NULL if no command satisfies requirements
+ */
+extern struct scsi_cmnd *queue_remove_tgtluntag(Queue_t *queue, int target,
+ int lun, int tag);
+
+/*
+ * Function: queue_remove_all_target(queue, target)
+ * Purpose : remove all SCSI commands from the queue for a specified target
+ * Params : queue - queue to remove command from
+ * target - target device id
+ * Returns : nothing
+ */
+extern void queue_remove_all_target(Queue_t *queue, int target);
+
+/*
+ * Function: int queue_probetgtlun (queue, target, lun)
+ * Purpose : check to see if we have a command in the queue for the specified
+ * target/lun.
+ * Params : queue - queue to look in
+ * target - target we want to probe
+ * lun - lun on target
+ * Returns : 0 if not found, != 0 if found
+ */
+extern int queue_probetgtlun (Queue_t *queue, int target, int lun);
+
+/*
+ * Function: int queue_remove_cmd (Queue_t *queue, struct scsi_cmnd *SCpnt)
+ * Purpose : remove a specific command from the queues
+ * Params : queue - queue to look in
+ * SCpnt - command to find
+ * Returns : 0 if not found
+ */
+int queue_remove_cmd(Queue_t *queue, struct scsi_cmnd *SCpnt);
+
+#endif /* QUEUE_H */
diff --git a/drivers/scsi/arm/scsi.h b/drivers/scsi/arm/scsi.h
new file mode 100644
index 000000000..138a521ba
--- /dev/null
+++ b/drivers/scsi/arm/scsi.h
@@ -0,0 +1,128 @@
+/*
+ * linux/drivers/acorn/scsi/scsi.h
+ *
+ * Copyright (C) 2002 Russell King
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * Commonly used scsi driver functions.
+ */
+
+#include <linux/scatterlist.h>
+
+#define BELT_AND_BRACES
+
+/*
+ * The scatter-gather list handling. This contains all
+ * the yucky stuff that needs to be fixed properly.
+ */
+
+/*
+ * copy_SCp_to_sg() Assumes contiguous allocation at @sg of at-most @max
+ * entries of uninitialized memory. SCp is from scsi-ml and has a valid
+ * (possibly chained) sg-list
+ */
+static inline int copy_SCp_to_sg(struct scatterlist *sg, struct scsi_pointer *SCp, int max)
+{
+ int bufs = SCp->buffers_residual;
+
+ /* FIXME: It should be easy for drivers to loop on copy_SCp_to_sg().
+ * and to remove this BUG_ON. Use min() in-its-place
+ */
+ BUG_ON(bufs + 1 > max);
+
+ sg_set_buf(sg, SCp->ptr, SCp->this_residual);
+
+ if (bufs) {
+ struct scatterlist *src_sg;
+ unsigned i;
+
+ for_each_sg(sg_next(SCp->buffer), src_sg, bufs, i)
+ *(++sg) = *src_sg;
+ sg_mark_end(sg);
+ }
+
+ return bufs + 1;
+}
+
+static inline int next_SCp(struct scsi_pointer *SCp)
+{
+ int ret = SCp->buffers_residual;
+ if (ret) {
+ SCp->buffer = sg_next(SCp->buffer);
+ SCp->buffers_residual--;
+ SCp->ptr = sg_virt(SCp->buffer);
+ SCp->this_residual = SCp->buffer->length;
+ } else {
+ SCp->ptr = NULL;
+ SCp->this_residual = 0;
+ }
+ return ret;
+}
+
+static inline unsigned char get_next_SCp_byte(struct scsi_pointer *SCp)
+{
+ char c = *SCp->ptr;
+
+ SCp->ptr += 1;
+ SCp->this_residual -= 1;
+
+ return c;
+}
+
+static inline void put_next_SCp_byte(struct scsi_pointer *SCp, unsigned char c)
+{
+ *SCp->ptr = c;
+ SCp->ptr += 1;
+ SCp->this_residual -= 1;
+}
+
+static inline void init_SCp(struct scsi_cmnd *SCpnt)
+{
+ memset(&SCpnt->SCp, 0, sizeof(struct scsi_pointer));
+
+ if (scsi_bufflen(SCpnt)) {
+ unsigned long len = 0;
+
+ SCpnt->SCp.buffer = scsi_sglist(SCpnt);
+ SCpnt->SCp.buffers_residual = scsi_sg_count(SCpnt) - 1;
+ SCpnt->SCp.ptr = sg_virt(SCpnt->SCp.buffer);
+ SCpnt->SCp.this_residual = SCpnt->SCp.buffer->length;
+ SCpnt->SCp.phase = scsi_bufflen(SCpnt);
+
+#ifdef BELT_AND_BRACES
+ { /*
+ * Calculate correct buffer length. Some commands
+ * come in with the wrong scsi_bufflen.
+ */
+ struct scatterlist *sg;
+ unsigned i, sg_count = scsi_sg_count(SCpnt);
+
+ scsi_for_each_sg(SCpnt, sg, sg_count, i)
+ len += sg->length;
+
+ if (scsi_bufflen(SCpnt) != len) {
+ printk(KERN_WARNING
+ "scsi%d.%c: bad request buffer "
+ "length %d, should be %ld\n",
+ SCpnt->device->host->host_no,
+ '0' + SCpnt->device->id,
+ scsi_bufflen(SCpnt), len);
+ /*
+ * FIXME: Totaly naive fixup. We should abort
+ * with error
+ */
+ SCpnt->SCp.phase =
+ min_t(unsigned long, len,
+ scsi_bufflen(SCpnt));
+ }
+ }
+#endif
+ } else {
+ SCpnt->SCp.ptr = NULL;
+ SCpnt->SCp.this_residual = 0;
+ SCpnt->SCp.phase = 0;
+ }
+}
diff --git a/drivers/scsi/atari_NCR5380.c b/drivers/scsi/atari_NCR5380.c
new file mode 100644
index 000000000..db87ece6e
--- /dev/null
+++ b/drivers/scsi/atari_NCR5380.c
@@ -0,0 +1,2927 @@
+/*
+ * NCR 5380 generic driver routines. These should make it *trivial*
+ * to implement 5380 SCSI drivers under Linux with a non-trantor
+ * architecture.
+ *
+ * Note that these routines also work with NR53c400 family chips.
+ *
+ * Copyright 1993, Drew Eckhardt
+ * Visionary Computing
+ * (Unix and Linux consulting and custom programming)
+ * drew@colorado.edu
+ * +1 (303) 666-5836
+ *
+ * For more information, please consult
+ *
+ * NCR 5380 Family
+ * SCSI Protocol Controller
+ * Databook
+ *
+ * NCR Microelectronics
+ * 1635 Aeroplaza Drive
+ * Colorado Springs, CO 80916
+ * 1+ (719) 578-3400
+ * 1+ (800) 334-5454
+ */
+
+/*
+ * ++roman: To port the 5380 driver to the Atari, I had to do some changes in
+ * this file, too:
+ *
+ * - Some of the debug statements were incorrect (undefined variables and the
+ * like). I fixed that.
+ *
+ * - In information_transfer(), I think a #ifdef was wrong. Looking at the
+ * possible DMA transfer size should also happen for REAL_DMA. I added this
+ * in the #if statement.
+ *
+ * - When using real DMA, information_transfer() should return in a DATAOUT
+ * phase after starting the DMA. It has nothing more to do.
+ *
+ * - The interrupt service routine should run main after end of DMA, too (not
+ * only after RESELECTION interrupts). Additionally, it should _not_ test
+ * for more interrupts after running main, since a DMA process may have
+ * been started and interrupts are turned on now. The new int could happen
+ * inside the execution of NCR5380_intr(), leading to recursive
+ * calls.
+ *
+ * - I've added a function merge_contiguous_buffers() that tries to
+ * merge scatter-gather buffers that are located at contiguous
+ * physical addresses and can be processed with the same DMA setup.
+ * Since most scatter-gather operations work on a page (4K) of
+ * 4 buffers (1K), in more than 90% of all cases three interrupts and
+ * DMA setup actions are saved.
+ *
+ * - I've deleted all the stuff for AUTOPROBE_IRQ, REAL_DMA_POLL, PSEUDO_DMA
+ * and USLEEP, because these were messing up readability and will never be
+ * needed for Atari SCSI.
+ *
+ * - I've revised the NCR5380_main() calling scheme (relax the 'main_running'
+ * stuff), and 'main' is executed in a bottom half if awoken by an
+ * interrupt.
+ *
+ * - The code was quite cluttered up by "#if (NDEBUG & NDEBUG_*) printk..."
+ * constructs. In my eyes, this made the source rather unreadable, so I
+ * finally replaced that by the *_PRINTK() macros.
+ *
+ */
+
+/*
+ * Further development / testing that should be done :
+ * 1. Test linked command handling code after Eric is ready with
+ * the high level code.
+ */
+
+/* Adapted for the sun3 by Sam Creasey. */
+
+#include <scsi/scsi_dbg.h>
+#include <scsi/scsi_transport_spi.h>
+
+#if (NDEBUG & NDEBUG_LISTS)
+#define LIST(x, y) \
+ do { \
+ printk("LINE:%d Adding %p to %p\n", \
+ __LINE__, (void*)(x), (void*)(y)); \
+ if ((x) == (y)) \
+ udelay(5); \
+ } while (0)
+#define REMOVE(w, x, y, z) \
+ do { \
+ printk("LINE:%d Removing: %p->%p %p->%p \n", \
+ __LINE__, (void*)(w), (void*)(x), \
+ (void*)(y), (void*)(z)); \
+ if ((x) == (y)) \
+ udelay(5); \
+ } while (0)
+#else
+#define LIST(x,y)
+#define REMOVE(w,x,y,z)
+#endif
+
+#ifndef notyet
+#undef LINKED
+#endif
+
+/*
+ * Design
+ *
+ * This is a generic 5380 driver. To use it on a different platform,
+ * one simply writes appropriate system specific macros (ie, data
+ * transfer - some PC's will use the I/O bus, 68K's must use
+ * memory mapped) and drops this file in their 'C' wrapper.
+ *
+ * As far as command queueing, two queues are maintained for
+ * each 5380 in the system - commands that haven't been issued yet,
+ * and commands that are currently executing. This means that an
+ * unlimited number of commands may be queued, letting
+ * more commands propagate from the higher driver levels giving higher
+ * throughput. Note that both I_T_L and I_T_L_Q nexuses are supported,
+ * allowing multiple commands to propagate all the way to a SCSI-II device
+ * while a command is already executing.
+ *
+ *
+ * Issues specific to the NCR5380 :
+ *
+ * When used in a PIO or pseudo-dma mode, the NCR5380 is a braindead
+ * piece of hardware that requires you to sit in a loop polling for
+ * the REQ signal as long as you are connected. Some devices are
+ * brain dead (ie, many TEXEL CD ROM drives) and won't disconnect
+ * while doing long seek operations.
+ *
+ * The workaround for this is to keep track of devices that have
+ * disconnected. If the device hasn't disconnected, for commands that
+ * should disconnect, we do something like
+ *
+ * while (!REQ is asserted) { sleep for N usecs; poll for M usecs }
+ *
+ * Some tweaking of N and M needs to be done. An algorithm based
+ * on "time to data" would give the best results as long as short time
+ * to datas (ie, on the same track) were considered, however these
+ * broken devices are the exception rather than the rule and I'd rather
+ * spend my time optimizing for the normal case.
+ *
+ * Architecture :
+ *
+ * At the heart of the design is a coroutine, NCR5380_main,
+ * which is started from a workqueue for each NCR5380 host in the
+ * system. It attempts to establish I_T_L or I_T_L_Q nexuses by
+ * removing the commands from the issue queue and calling
+ * NCR5380_select() if a nexus is not established.
+ *
+ * Once a nexus is established, the NCR5380_information_transfer()
+ * phase goes through the various phases as instructed by the target.
+ * if the target goes into MSG IN and sends a DISCONNECT message,
+ * the command structure is placed into the per instance disconnected
+ * queue, and NCR5380_main tries to find more work. If the target is
+ * idle for too long, the system will try to sleep.
+ *
+ * If a command has disconnected, eventually an interrupt will trigger,
+ * calling NCR5380_intr() which will in turn call NCR5380_reselect
+ * to reestablish a nexus. This will run main if necessary.
+ *
+ * On command termination, the done function will be called as
+ * appropriate.
+ *
+ * SCSI pointers are maintained in the SCp field of SCSI command
+ * structures, being initialized after the command is connected
+ * in NCR5380_select, and set as appropriate in NCR5380_information_transfer.
+ * Note that in violation of the standard, an implicit SAVE POINTERS operation
+ * is done, since some BROKEN disks fail to issue an explicit SAVE POINTERS.
+ */
+
+/*
+ * Using this file :
+ * This file a skeleton Linux SCSI driver for the NCR 5380 series
+ * of chips. To use it, you write an architecture specific functions
+ * and macros and include this file in your driver.
+ *
+ * These macros control options :
+ * AUTOSENSE - if defined, REQUEST SENSE will be performed automatically
+ * for commands that return with a CHECK CONDITION status.
+ *
+ * DIFFERENTIAL - if defined, NCR53c81 chips will use external differential
+ * transceivers.
+ *
+ * LINKED - if defined, linked commands are supported.
+ *
+ * REAL_DMA - if defined, REAL DMA is used during the data transfer phases.
+ *
+ * SUPPORT_TAGS - if defined, SCSI-2 tagged queuing is used where possible
+ *
+ * These macros MUST be defined :
+ *
+ * NCR5380_read(register) - read from the specified register
+ *
+ * NCR5380_write(register, value) - write to the specific register
+ *
+ * NCR5380_implementation_fields - additional fields needed for this
+ * specific implementation of the NCR5380
+ *
+ * Either real DMA *or* pseudo DMA may be implemented
+ * REAL functions :
+ * NCR5380_REAL_DMA should be defined if real DMA is to be used.
+ * Note that the DMA setup functions should return the number of bytes
+ * that they were able to program the controller for.
+ *
+ * Also note that generic i386/PC versions of these macros are
+ * available as NCR5380_i386_dma_write_setup,
+ * NCR5380_i386_dma_read_setup, and NCR5380_i386_dma_residual.
+ *
+ * NCR5380_dma_write_setup(instance, src, count) - initialize
+ * NCR5380_dma_read_setup(instance, dst, count) - initialize
+ * NCR5380_dma_residual(instance); - residual count
+ *
+ * PSEUDO functions :
+ * NCR5380_pwrite(instance, src, count)
+ * NCR5380_pread(instance, dst, count);
+ *
+ * The generic driver is initialized by calling NCR5380_init(instance),
+ * after setting the appropriate host specific fields and ID. If the
+ * driver wishes to autoprobe for an IRQ line, the NCR5380_probe_irq(instance,
+ * possible) function may be used.
+ */
+
+/* Macros ease life... :-) */
+#define SETUP_HOSTDATA(in) \
+ struct NCR5380_hostdata *hostdata = \
+ (struct NCR5380_hostdata *)(in)->hostdata
+#define HOSTDATA(in) ((struct NCR5380_hostdata *)(in)->hostdata)
+
+#define NEXT(cmd) ((struct scsi_cmnd *)(cmd)->host_scribble)
+#define SET_NEXT(cmd,next) ((cmd)->host_scribble = (void *)(next))
+#define NEXTADDR(cmd) ((struct scsi_cmnd **)&(cmd)->host_scribble)
+
+#define HOSTNO instance->host_no
+#define H_NO(cmd) (cmd)->device->host->host_no
+
+#ifdef SUPPORT_TAGS
+
+/*
+ * Functions for handling tagged queuing
+ * =====================================
+ *
+ * ++roman (01/96): Now I've implemented SCSI-2 tagged queuing. Some notes:
+ *
+ * Using consecutive numbers for the tags is no good idea in my eyes. There
+ * could be wrong re-usings if the counter (8 bit!) wraps and some early
+ * command has been preempted for a long time. My solution: a bitfield for
+ * remembering used tags.
+ *
+ * There's also the problem that each target has a certain queue size, but we
+ * cannot know it in advance :-( We just see a QUEUE_FULL status being
+ * returned. So, in this case, the driver internal queue size assumption is
+ * reduced to the number of active tags if QUEUE_FULL is returned by the
+ * target. The command is returned to the mid-level, but with status changed
+ * to BUSY, since --as I've seen-- the mid-level can't handle QUEUE_FULL
+ * correctly.
+ *
+ * We're also not allowed running tagged commands as long as an untagged
+ * command is active. And REQUEST SENSE commands after a contingent allegiance
+ * condition _must_ be untagged. To keep track whether an untagged command has
+ * been issued, the host->busy array is still employed, as it is without
+ * support for tagged queuing.
+ *
+ * One could suspect that there are possible race conditions between
+ * is_lun_busy(), cmd_get_tag() and cmd_free_tag(). But I think this isn't the
+ * case: is_lun_busy() and cmd_get_tag() are both called from NCR5380_main(),
+ * which already guaranteed to be running at most once. It is also the only
+ * place where tags/LUNs are allocated. So no other allocation can slip
+ * between that pair, there could only happen a reselection, which can free a
+ * tag, but that doesn't hurt. Only the sequence in cmd_free_tag() becomes
+ * important: the tag bit must be cleared before 'nr_allocated' is decreased.
+ */
+
+static void __init init_tags(struct NCR5380_hostdata *hostdata)
+{
+ int target, lun;
+ struct tag_alloc *ta;
+
+ if (!(hostdata->flags & FLAG_TAGGED_QUEUING))
+ return;
+
+ for (target = 0; target < 8; ++target) {
+ for (lun = 0; lun < 8; ++lun) {
+ ta = &hostdata->TagAlloc[target][lun];
+ bitmap_zero(ta->allocated, MAX_TAGS);
+ ta->nr_allocated = 0;
+ /* At the beginning, assume the maximum queue size we could
+ * support (MAX_TAGS). This value will be decreased if the target
+ * returns QUEUE_FULL status.
+ */
+ ta->queue_size = MAX_TAGS;
+ }
+ }
+}
+
+
+/* Check if we can issue a command to this LUN: First see if the LUN is marked
+ * busy by an untagged command. If the command should use tagged queuing, also
+ * check that there is a free tag and the target's queue won't overflow. This
+ * function should be called with interrupts disabled to avoid race
+ * conditions.
+ */
+
+static int is_lun_busy(struct scsi_cmnd *cmd, int should_be_tagged)
+{
+ u8 lun = cmd->device->lun;
+ SETUP_HOSTDATA(cmd->device->host);
+
+ if (hostdata->busy[cmd->device->id] & (1 << lun))
+ return 1;
+ if (!should_be_tagged ||
+ !(hostdata->flags & FLAG_TAGGED_QUEUING) ||
+ !cmd->device->tagged_supported)
+ return 0;
+ if (hostdata->TagAlloc[scmd_id(cmd)][lun].nr_allocated >=
+ hostdata->TagAlloc[scmd_id(cmd)][lun].queue_size) {
+ dprintk(NDEBUG_TAGS, "scsi%d: target %d lun %d: no free tags\n",
+ H_NO(cmd), cmd->device->id, lun);
+ return 1;
+ }
+ return 0;
+}
+
+
+/* Allocate a tag for a command (there are no checks anymore, check_lun_busy()
+ * must be called before!), or reserve the LUN in 'busy' if the command is
+ * untagged.
+ */
+
+static void cmd_get_tag(struct scsi_cmnd *cmd, int should_be_tagged)
+{
+ u8 lun = cmd->device->lun;
+ SETUP_HOSTDATA(cmd->device->host);
+
+ /* If we or the target don't support tagged queuing, allocate the LUN for
+ * an untagged command.
+ */
+ if (!should_be_tagged ||
+ !(hostdata->flags & FLAG_TAGGED_QUEUING) ||
+ !cmd->device->tagged_supported) {
+ cmd->tag = TAG_NONE;
+ hostdata->busy[cmd->device->id] |= (1 << lun);
+ dprintk(NDEBUG_TAGS, "scsi%d: target %d lun %d now allocated by untagged "
+ "command\n", H_NO(cmd), cmd->device->id, lun);
+ } else {
+ struct tag_alloc *ta = &hostdata->TagAlloc[scmd_id(cmd)][lun];
+
+ cmd->tag = find_first_zero_bit(ta->allocated, MAX_TAGS);
+ set_bit(cmd->tag, ta->allocated);
+ ta->nr_allocated++;
+ dprintk(NDEBUG_TAGS, "scsi%d: using tag %d for target %d lun %d "
+ "(now %d tags in use)\n",
+ H_NO(cmd), cmd->tag, cmd->device->id,
+ lun, ta->nr_allocated);
+ }
+}
+
+
+/* Mark the tag of command 'cmd' as free, or in case of an untagged command,
+ * unlock the LUN.
+ */
+
+static void cmd_free_tag(struct scsi_cmnd *cmd)
+{
+ u8 lun = cmd->device->lun;
+ SETUP_HOSTDATA(cmd->device->host);
+
+ if (cmd->tag == TAG_NONE) {
+ hostdata->busy[cmd->device->id] &= ~(1 << lun);
+ dprintk(NDEBUG_TAGS, "scsi%d: target %d lun %d untagged cmd finished\n",
+ H_NO(cmd), cmd->device->id, lun);
+ } else if (cmd->tag >= MAX_TAGS) {
+ printk(KERN_NOTICE "scsi%d: trying to free bad tag %d!\n",
+ H_NO(cmd), cmd->tag);
+ } else {
+ struct tag_alloc *ta = &hostdata->TagAlloc[scmd_id(cmd)][lun];
+ clear_bit(cmd->tag, ta->allocated);
+ ta->nr_allocated--;
+ dprintk(NDEBUG_TAGS, "scsi%d: freed tag %d for target %d lun %d\n",
+ H_NO(cmd), cmd->tag, cmd->device->id, lun);
+ }
+}
+
+
+static void free_all_tags(struct NCR5380_hostdata *hostdata)
+{
+ int target, lun;
+ struct tag_alloc *ta;
+
+ if (!(hostdata->flags & FLAG_TAGGED_QUEUING))
+ return;
+
+ for (target = 0; target < 8; ++target) {
+ for (lun = 0; lun < 8; ++lun) {
+ ta = &hostdata->TagAlloc[target][lun];
+ bitmap_zero(ta->allocated, MAX_TAGS);
+ ta->nr_allocated = 0;
+ }
+ }
+}
+
+#endif /* SUPPORT_TAGS */
+
+
+/*
+ * Function: void merge_contiguous_buffers( struct scsi_cmnd *cmd )
+ *
+ * Purpose: Try to merge several scatter-gather requests into one DMA
+ * transfer. This is possible if the scatter buffers lie on
+ * physical contiguous addresses.
+ *
+ * Parameters: struct scsi_cmnd *cmd
+ * The command to work on. The first scatter buffer's data are
+ * assumed to be already transferred into ptr/this_residual.
+ */
+
+static void merge_contiguous_buffers(struct scsi_cmnd *cmd)
+{
+#if !defined(CONFIG_SUN3)
+ unsigned long endaddr;
+#if (NDEBUG & NDEBUG_MERGING)
+ unsigned long oldlen = cmd->SCp.this_residual;
+ int cnt = 1;
+#endif
+
+ for (endaddr = virt_to_phys(cmd->SCp.ptr + cmd->SCp.this_residual - 1) + 1;
+ cmd->SCp.buffers_residual &&
+ virt_to_phys(sg_virt(&cmd->SCp.buffer[1])) == endaddr;) {
+ dprintk(NDEBUG_MERGING, "VTOP(%p) == %08lx -> merging\n",
+ page_address(sg_page(&cmd->SCp.buffer[1])), endaddr);
+#if (NDEBUG & NDEBUG_MERGING)
+ ++cnt;
+#endif
+ ++cmd->SCp.buffer;
+ --cmd->SCp.buffers_residual;
+ cmd->SCp.this_residual += cmd->SCp.buffer->length;
+ endaddr += cmd->SCp.buffer->length;
+ }
+#if (NDEBUG & NDEBUG_MERGING)
+ if (oldlen != cmd->SCp.this_residual)
+ dprintk(NDEBUG_MERGING, "merged %d buffers from %p, new length %08x\n",
+ cnt, cmd->SCp.ptr, cmd->SCp.this_residual);
+#endif
+#endif /* !defined(CONFIG_SUN3) */
+}
+
+/**
+ * initialize_SCp - init the scsi pointer field
+ * @cmd: command block to set up
+ *
+ * Set up the internal fields in the SCSI command.
+ */
+
+static inline void initialize_SCp(struct scsi_cmnd *cmd)
+{
+ /*
+ * Initialize the Scsi Pointer field so that all of the commands in the
+ * various queues are valid.
+ */
+
+ if (scsi_bufflen(cmd)) {
+ cmd->SCp.buffer = scsi_sglist(cmd);
+ cmd->SCp.buffers_residual = scsi_sg_count(cmd) - 1;
+ cmd->SCp.ptr = sg_virt(cmd->SCp.buffer);
+ cmd->SCp.this_residual = cmd->SCp.buffer->length;
+ /* ++roman: Try to merge some scatter-buffers if they are at
+ * contiguous physical addresses.
+ */
+ merge_contiguous_buffers(cmd);
+ } else {
+ cmd->SCp.buffer = NULL;
+ cmd->SCp.buffers_residual = 0;
+ cmd->SCp.ptr = NULL;
+ cmd->SCp.this_residual = 0;
+ }
+}
+
+#include <linux/delay.h>
+
+#if NDEBUG
+static struct {
+ unsigned char mask;
+ const char *name;
+} signals[] = {
+ { SR_DBP, "PARITY"}, { SR_RST, "RST" }, { SR_BSY, "BSY" },
+ { SR_REQ, "REQ" }, { SR_MSG, "MSG" }, { SR_CD, "CD" }, { SR_IO, "IO" },
+ { SR_SEL, "SEL" }, {0, NULL}
+}, basrs[] = {
+ {BASR_ATN, "ATN"}, {BASR_ACK, "ACK"}, {0, NULL}
+}, icrs[] = {
+ {ICR_ASSERT_RST, "ASSERT RST"},{ICR_ASSERT_ACK, "ASSERT ACK"},
+ {ICR_ASSERT_BSY, "ASSERT BSY"}, {ICR_ASSERT_SEL, "ASSERT SEL"},
+ {ICR_ASSERT_ATN, "ASSERT ATN"}, {ICR_ASSERT_DATA, "ASSERT DATA"},
+ {0, NULL}
+}, mrs[] = {
+ {MR_BLOCK_DMA_MODE, "MODE BLOCK DMA"}, {MR_TARGET, "MODE TARGET"},
+ {MR_ENABLE_PAR_CHECK, "MODE PARITY CHECK"}, {MR_ENABLE_PAR_INTR,
+ "MODE PARITY INTR"}, {MR_ENABLE_EOP_INTR,"MODE EOP INTR"},
+ {MR_MONITOR_BSY, "MODE MONITOR BSY"},
+ {MR_DMA_MODE, "MODE DMA"}, {MR_ARBITRATE, "MODE ARBITRATION"},
+ {0, NULL}
+};
+
+/**
+ * NCR5380_print - print scsi bus signals
+ * @instance: adapter state to dump
+ *
+ * Print the SCSI bus signals for debugging purposes
+ */
+
+static void NCR5380_print(struct Scsi_Host *instance)
+{
+ unsigned char status, data, basr, mr, icr, i;
+ unsigned long flags;
+
+ local_irq_save(flags);
+ data = NCR5380_read(CURRENT_SCSI_DATA_REG);
+ status = NCR5380_read(STATUS_REG);
+ mr = NCR5380_read(MODE_REG);
+ icr = NCR5380_read(INITIATOR_COMMAND_REG);
+ basr = NCR5380_read(BUS_AND_STATUS_REG);
+ local_irq_restore(flags);
+ printk("STATUS_REG: %02x ", status);
+ for (i = 0; signals[i].mask; ++i)
+ if (status & signals[i].mask)
+ printk(",%s", signals[i].name);
+ printk("\nBASR: %02x ", basr);
+ for (i = 0; basrs[i].mask; ++i)
+ if (basr & basrs[i].mask)
+ printk(",%s", basrs[i].name);
+ printk("\nICR: %02x ", icr);
+ for (i = 0; icrs[i].mask; ++i)
+ if (icr & icrs[i].mask)
+ printk(",%s", icrs[i].name);
+ printk("\nMODE: %02x ", mr);
+ for (i = 0; mrs[i].mask; ++i)
+ if (mr & mrs[i].mask)
+ printk(",%s", mrs[i].name);
+ printk("\n");
+}
+
+static struct {
+ unsigned char value;
+ const char *name;
+} phases[] = {
+ {PHASE_DATAOUT, "DATAOUT"}, {PHASE_DATAIN, "DATAIN"}, {PHASE_CMDOUT, "CMDOUT"},
+ {PHASE_STATIN, "STATIN"}, {PHASE_MSGOUT, "MSGOUT"}, {PHASE_MSGIN, "MSGIN"},
+ {PHASE_UNKNOWN, "UNKNOWN"}
+};
+
+/**
+ * NCR5380_print_phase - show SCSI phase
+ * @instance: adapter to dump
+ *
+ * Print the current SCSI phase for debugging purposes
+ *
+ * Locks: none
+ */
+
+static void NCR5380_print_phase(struct Scsi_Host *instance)
+{
+ unsigned char status;
+ int i;
+
+ status = NCR5380_read(STATUS_REG);
+ if (!(status & SR_REQ))
+ printk(KERN_DEBUG "scsi%d: REQ not asserted, phase unknown.\n", HOSTNO);
+ else {
+ for (i = 0; (phases[i].value != PHASE_UNKNOWN) &&
+ (phases[i].value != (status & PHASE_MASK)); ++i)
+ ;
+ printk(KERN_DEBUG "scsi%d: phase %s\n", HOSTNO, phases[i].name);
+ }
+}
+
+#endif
+
+/*
+ * ++roman: New scheme of calling NCR5380_main()
+ *
+ * If we're not in an interrupt, we can call our main directly, it cannot be
+ * already running. Else, we queue it on a task queue, if not 'main_running'
+ * tells us that a lower level is already executing it. This way,
+ * 'main_running' needs not be protected in a special way.
+ *
+ * queue_main() is a utility function for putting our main onto the task
+ * queue, if main_running is false. It should be called only from a
+ * interrupt or bottom half.
+ */
+
+#include <linux/gfp.h>
+#include <linux/workqueue.h>
+#include <linux/interrupt.h>
+
+static inline void queue_main(struct NCR5380_hostdata *hostdata)
+{
+ if (!hostdata->main_running) {
+ /* If in interrupt and NCR5380_main() not already running,
+ queue it on the 'immediate' task queue, to be processed
+ immediately after the current interrupt processing has
+ finished. */
+ schedule_work(&hostdata->main_task);
+ }
+ /* else: nothing to do: the running NCR5380_main() will pick up
+ any newly queued command. */
+}
+
+/**
+ * NCR58380_info - report driver and host information
+ * @instance: relevant scsi host instance
+ *
+ * For use as the host template info() handler.
+ *
+ * Locks: none
+ */
+
+static const char *NCR5380_info(struct Scsi_Host *instance)
+{
+ struct NCR5380_hostdata *hostdata = shost_priv(instance);
+
+ return hostdata->info;
+}
+
+static void prepare_info(struct Scsi_Host *instance)
+{
+ struct NCR5380_hostdata *hostdata = shost_priv(instance);
+
+ snprintf(hostdata->info, sizeof(hostdata->info),
+ "%s, io_port 0x%lx, n_io_port %d, "
+ "base 0x%lx, irq %d, "
+ "can_queue %d, cmd_per_lun %d, "
+ "sg_tablesize %d, this_id %d, "
+ "flags { %s}, "
+ "options { %s} ",
+ instance->hostt->name, instance->io_port, instance->n_io_port,
+ instance->base, instance->irq,
+ instance->can_queue, instance->cmd_per_lun,
+ instance->sg_tablesize, instance->this_id,
+ hostdata->flags & FLAG_TAGGED_QUEUING ? "TAGGED_QUEUING " : "",
+#ifdef DIFFERENTIAL
+ "DIFFERENTIAL "
+#endif
+#ifdef REAL_DMA
+ "REAL_DMA "
+#endif
+#ifdef PARITY
+ "PARITY "
+#endif
+#ifdef SUPPORT_TAGS
+ "SUPPORT_TAGS "
+#endif
+ "");
+}
+
+/**
+ * NCR5380_print_status - dump controller info
+ * @instance: controller to dump
+ *
+ * Print commands in the various queues, called from NCR5380_abort
+ * to aid debugging.
+ */
+
+static void lprint_Scsi_Cmnd(struct scsi_cmnd *cmd)
+{
+ int i, s;
+ unsigned char *command;
+ printk("scsi%d: destination target %d, lun %llu\n",
+ H_NO(cmd), cmd->device->id, cmd->device->lun);
+ printk(KERN_CONT " command = ");
+ command = cmd->cmnd;
+ printk(KERN_CONT "%2d (0x%02x)", command[0], command[0]);
+ for (i = 1, s = COMMAND_SIZE(command[0]); i < s; ++i)
+ printk(KERN_CONT " %02x", command[i]);
+ printk("\n");
+}
+
+static void NCR5380_print_status(struct Scsi_Host *instance)
+{
+ struct NCR5380_hostdata *hostdata;
+ struct scsi_cmnd *ptr;
+ unsigned long flags;
+
+ NCR5380_dprint(NDEBUG_ANY, instance);
+ NCR5380_dprint_phase(NDEBUG_ANY, instance);
+
+ hostdata = (struct NCR5380_hostdata *)instance->hostdata;
+
+ local_irq_save(flags);
+ printk("NCR5380: coroutine is%s running.\n",
+ hostdata->main_running ? "" : "n't");
+ if (!hostdata->connected)
+ printk("scsi%d: no currently connected command\n", HOSTNO);
+ else
+ lprint_Scsi_Cmnd((struct scsi_cmnd *) hostdata->connected);
+ printk("scsi%d: issue_queue\n", HOSTNO);
+ for (ptr = (struct scsi_cmnd *)hostdata->issue_queue; ptr; ptr = NEXT(ptr))
+ lprint_Scsi_Cmnd(ptr);
+
+ printk("scsi%d: disconnected_queue\n", HOSTNO);
+ for (ptr = (struct scsi_cmnd *) hostdata->disconnected_queue; ptr;
+ ptr = NEXT(ptr))
+ lprint_Scsi_Cmnd(ptr);
+
+ local_irq_restore(flags);
+ printk("\n");
+}
+
+static void show_Scsi_Cmnd(struct scsi_cmnd *cmd, struct seq_file *m)
+{
+ int i, s;
+ unsigned char *command;
+ seq_printf(m, "scsi%d: destination target %d, lun %llu\n",
+ H_NO(cmd), cmd->device->id, cmd->device->lun);
+ seq_puts(m, " command = ");
+ command = cmd->cmnd;
+ seq_printf(m, "%2d (0x%02x)", command[0], command[0]);
+ for (i = 1, s = COMMAND_SIZE(command[0]); i < s; ++i)
+ seq_printf(m, " %02x", command[i]);
+ seq_putc(m, '\n');
+}
+
+static int __maybe_unused NCR5380_show_info(struct seq_file *m,
+ struct Scsi_Host *instance)
+{
+ struct NCR5380_hostdata *hostdata;
+ struct scsi_cmnd *ptr;
+ unsigned long flags;
+
+ hostdata = (struct NCR5380_hostdata *)instance->hostdata;
+
+ local_irq_save(flags);
+ seq_printf(m, "NCR5380: coroutine is%s running.\n",
+ hostdata->main_running ? "" : "n't");
+ if (!hostdata->connected)
+ seq_printf(m, "scsi%d: no currently connected command\n", HOSTNO);
+ else
+ show_Scsi_Cmnd((struct scsi_cmnd *) hostdata->connected, m);
+ seq_printf(m, "scsi%d: issue_queue\n", HOSTNO);
+ for (ptr = (struct scsi_cmnd *)hostdata->issue_queue; ptr; ptr = NEXT(ptr))
+ show_Scsi_Cmnd(ptr, m);
+
+ seq_printf(m, "scsi%d: disconnected_queue\n", HOSTNO);
+ for (ptr = (struct scsi_cmnd *) hostdata->disconnected_queue; ptr;
+ ptr = NEXT(ptr))
+ show_Scsi_Cmnd(ptr, m);
+
+ local_irq_restore(flags);
+ return 0;
+}
+
+/**
+ * NCR5380_init - initialise an NCR5380
+ * @instance: adapter to configure
+ * @flags: control flags
+ *
+ * Initializes *instance and corresponding 5380 chip,
+ * with flags OR'd into the initial flags value.
+ *
+ * Notes : I assume that the host, hostno, and id bits have been
+ * set correctly. I don't care about the irq and other fields.
+ *
+ * Returns 0 for success
+ */
+
+static int __init NCR5380_init(struct Scsi_Host *instance, int flags)
+{
+ int i;
+ SETUP_HOSTDATA(instance);
+
+ hostdata->host = instance;
+ hostdata->aborted = 0;
+ hostdata->id_mask = 1 << instance->this_id;
+ hostdata->id_higher_mask = 0;
+ for (i = hostdata->id_mask; i <= 0x80; i <<= 1)
+ if (i > hostdata->id_mask)
+ hostdata->id_higher_mask |= i;
+ for (i = 0; i < 8; ++i)
+ hostdata->busy[i] = 0;
+#ifdef SUPPORT_TAGS
+ init_tags(hostdata);
+#endif
+#if defined (REAL_DMA)
+ hostdata->dma_len = 0;
+#endif
+ hostdata->targets_present = 0;
+ hostdata->connected = NULL;
+ hostdata->issue_queue = NULL;
+ hostdata->disconnected_queue = NULL;
+ hostdata->flags = flags;
+
+ INIT_WORK(&hostdata->main_task, NCR5380_main);
+
+ prepare_info(instance);
+
+ NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE);
+ NCR5380_write(MODE_REG, MR_BASE);
+ NCR5380_write(TARGET_COMMAND_REG, 0);
+ NCR5380_write(SELECT_ENABLE_REG, 0);
+
+ return 0;
+}
+
+/**
+ * NCR5380_exit - remove an NCR5380
+ * @instance: adapter to remove
+ *
+ * Assumes that no more work can be queued (e.g. by NCR5380_intr).
+ */
+
+static void NCR5380_exit(struct Scsi_Host *instance)
+{
+ struct NCR5380_hostdata *hostdata = shost_priv(instance);
+
+ cancel_work_sync(&hostdata->main_task);
+}
+
+/**
+ * NCR5380_queue_command - queue a command
+ * @instance: the relevant SCSI adapter
+ * @cmd: SCSI command
+ *
+ * cmd is added to the per instance issue_queue, with minor
+ * twiddling done to the host specific fields of cmd. If the
+ * main coroutine is not running, it is restarted.
+ */
+
+static int NCR5380_queue_command(struct Scsi_Host *instance,
+ struct scsi_cmnd *cmd)
+{
+ struct NCR5380_hostdata *hostdata = shost_priv(instance);
+ struct scsi_cmnd *tmp;
+ unsigned long flags;
+
+#if (NDEBUG & NDEBUG_NO_WRITE)
+ switch (cmd->cmnd[0]) {
+ case WRITE_6:
+ case WRITE_10:
+ printk(KERN_NOTICE "scsi%d: WRITE attempted with NO_WRITE debugging flag set\n",
+ H_NO(cmd));
+ cmd->result = (DID_ERROR << 16);
+ cmd->scsi_done(cmd);
+ return 0;
+ }
+#endif /* (NDEBUG & NDEBUG_NO_WRITE) */
+
+ /*
+ * We use the host_scribble field as a pointer to the next command
+ * in a queue
+ */
+
+ SET_NEXT(cmd, NULL);
+ cmd->result = 0;
+
+ /*
+ * Insert the cmd into the issue queue. Note that REQUEST SENSE
+ * commands are added to the head of the queue since any command will
+ * clear the contingent allegiance condition that exists and the
+ * sense data is only guaranteed to be valid while the condition exists.
+ */
+
+ /* ++guenther: now that the issue queue is being set up, we can lock ST-DMA.
+ * Otherwise a running NCR5380_main may steal the lock.
+ * Lock before actually inserting due to fairness reasons explained in
+ * atari_scsi.c. If we insert first, then it's impossible for this driver
+ * to release the lock.
+ * Stop timer for this command while waiting for the lock, or timeouts
+ * may happen (and they really do), and it's no good if the command doesn't
+ * appear in any of the queues.
+ * ++roman: Just disabling the NCR interrupt isn't sufficient here,
+ * because also a timer int can trigger an abort or reset, which would
+ * alter queues and touch the lock.
+ */
+ if (!NCR5380_acquire_dma_irq(instance))
+ return SCSI_MLQUEUE_HOST_BUSY;
+
+ local_irq_save(flags);
+
+ /*
+ * Insert the cmd into the issue queue. Note that REQUEST SENSE
+ * commands are added to the head of the queue since any command will
+ * clear the contingent allegiance condition that exists and the
+ * sense data is only guaranteed to be valid while the condition exists.
+ */
+
+ if (!(hostdata->issue_queue) || (cmd->cmnd[0] == REQUEST_SENSE)) {
+ LIST(cmd, hostdata->issue_queue);
+ SET_NEXT(cmd, hostdata->issue_queue);
+ hostdata->issue_queue = cmd;
+ } else {
+ for (tmp = (struct scsi_cmnd *)hostdata->issue_queue;
+ NEXT(tmp); tmp = NEXT(tmp))
+ ;
+ LIST(cmd, tmp);
+ SET_NEXT(tmp, cmd);
+ }
+ local_irq_restore(flags);
+
+ dprintk(NDEBUG_QUEUES, "scsi%d: command added to %s of queue\n", H_NO(cmd),
+ (cmd->cmnd[0] == REQUEST_SENSE) ? "head" : "tail");
+
+ /* If queue_command() is called from an interrupt (real one or bottom
+ * half), we let queue_main() do the job of taking care about main. If it
+ * is already running, this is a no-op, else main will be queued.
+ *
+ * If we're not in an interrupt, we can call NCR5380_main()
+ * unconditionally, because it cannot be already running.
+ */
+ if (in_interrupt() || irqs_disabled())
+ queue_main(hostdata);
+ else
+ NCR5380_main(&hostdata->main_task);
+ return 0;
+}
+
+static inline void maybe_release_dma_irq(struct Scsi_Host *instance)
+{
+ struct NCR5380_hostdata *hostdata = shost_priv(instance);
+
+ /* Caller does the locking needed to set & test these data atomically */
+ if (!hostdata->disconnected_queue &&
+ !hostdata->issue_queue &&
+ !hostdata->connected &&
+ !hostdata->retain_dma_intr)
+ NCR5380_release_dma_irq(instance);
+}
+
+/**
+ * NCR5380_main - NCR state machines
+ *
+ * NCR5380_main is a coroutine that runs as long as more work can
+ * be done on the NCR5380 host adapters in a system. Both
+ * NCR5380_queue_command() and NCR5380_intr() will try to start it
+ * in case it is not running.
+ *
+ * Locks: called as its own thread with no locks held.
+ */
+
+static void NCR5380_main(struct work_struct *work)
+{
+ struct NCR5380_hostdata *hostdata =
+ container_of(work, struct NCR5380_hostdata, main_task);
+ struct Scsi_Host *instance = hostdata->host;
+ struct scsi_cmnd *tmp, *prev;
+ int done;
+ unsigned long flags;
+
+ /*
+ * We run (with interrupts disabled) until we're sure that none of
+ * the host adapters have anything that can be done, at which point
+ * we set main_running to 0 and exit.
+ *
+ * Interrupts are enabled before doing various other internal
+ * instructions, after we've decided that we need to run through
+ * the loop again.
+ *
+ * this should prevent any race conditions.
+ *
+ * ++roman: Just disabling the NCR interrupt isn't sufficient here,
+ * because also a timer int can trigger an abort or reset, which can
+ * alter queues and touch the Falcon lock.
+ */
+
+ /* Tell int handlers main() is now already executing. Note that
+ no races are possible here. If an int comes in before
+ 'main_running' is set here, and queues/executes main via the
+ task queue, it doesn't do any harm, just this instance of main
+ won't find any work left to do. */
+ if (hostdata->main_running)
+ return;
+ hostdata->main_running = 1;
+
+ local_save_flags(flags);
+ do {
+ local_irq_disable(); /* Freeze request queues */
+ done = 1;
+
+ if (!hostdata->connected) {
+ dprintk(NDEBUG_MAIN, "scsi%d: not connected\n", HOSTNO);
+ /*
+ * Search through the issue_queue for a command destined
+ * for a target that's not busy.
+ */
+#if (NDEBUG & NDEBUG_LISTS)
+ for (tmp = (struct scsi_cmnd *) hostdata->issue_queue, prev = NULL;
+ tmp && (tmp != prev); prev = tmp, tmp = NEXT(tmp))
+ ;
+ /*printk("%p ", tmp);*/
+ if ((tmp == prev) && tmp)
+ printk(" LOOP\n");
+ /* else printk("\n"); */
+#endif
+ for (tmp = (struct scsi_cmnd *) hostdata->issue_queue,
+ prev = NULL; tmp; prev = tmp, tmp = NEXT(tmp)) {
+ u8 lun = tmp->device->lun;
+
+ dprintk(NDEBUG_LISTS,
+ "MAIN tmp=%p target=%d busy=%d lun=%d\n",
+ tmp, scmd_id(tmp), hostdata->busy[scmd_id(tmp)],
+ lun);
+ /* When we find one, remove it from the issue queue. */
+ /* ++guenther: possible race with Falcon locking */
+ if (
+#ifdef SUPPORT_TAGS
+ !is_lun_busy( tmp, tmp->cmnd[0] != REQUEST_SENSE)
+#else
+ !(hostdata->busy[tmp->device->id] & (1 << lun))
+#endif
+ ) {
+ /* ++guenther: just to be sure, this must be atomic */
+ local_irq_disable();
+ if (prev) {
+ REMOVE(prev, NEXT(prev), tmp, NEXT(tmp));
+ SET_NEXT(prev, NEXT(tmp));
+ } else {
+ REMOVE(-1, hostdata->issue_queue, tmp, NEXT(tmp));
+ hostdata->issue_queue = NEXT(tmp);
+ }
+ SET_NEXT(tmp, NULL);
+ hostdata->retain_dma_intr++;
+
+ /* reenable interrupts after finding one */
+ local_irq_restore(flags);
+
+ /*
+ * Attempt to establish an I_T_L nexus here.
+ * On success, instance->hostdata->connected is set.
+ * On failure, we must add the command back to the
+ * issue queue so we can keep trying.
+ */
+ dprintk(NDEBUG_MAIN, "scsi%d: main(): command for target %d "
+ "lun %d removed from issue_queue\n",
+ HOSTNO, tmp->device->id, lun);
+ /*
+ * REQUEST SENSE commands are issued without tagged
+ * queueing, even on SCSI-II devices because the
+ * contingent allegiance condition exists for the
+ * entire unit.
+ */
+ /* ++roman: ...and the standard also requires that
+ * REQUEST SENSE command are untagged.
+ */
+
+#ifdef SUPPORT_TAGS
+ cmd_get_tag(tmp, tmp->cmnd[0] != REQUEST_SENSE);
+#endif
+ if (!NCR5380_select(instance, tmp)) {
+ local_irq_disable();
+ hostdata->retain_dma_intr--;
+ /* release if target did not response! */
+ maybe_release_dma_irq(instance);
+ local_irq_restore(flags);
+ break;
+ } else {
+ local_irq_disable();
+ LIST(tmp, hostdata->issue_queue);
+ SET_NEXT(tmp, hostdata->issue_queue);
+ hostdata->issue_queue = tmp;
+#ifdef SUPPORT_TAGS
+ cmd_free_tag(tmp);
+#endif
+ hostdata->retain_dma_intr--;
+ local_irq_restore(flags);
+ dprintk(NDEBUG_MAIN, "scsi%d: main(): select() failed, "
+ "returned to issue_queue\n", HOSTNO);
+ if (hostdata->connected)
+ break;
+ }
+ } /* if target/lun/target queue is not busy */
+ } /* for issue_queue */
+ } /* if (!hostdata->connected) */
+
+ if (hostdata->connected
+#ifdef REAL_DMA
+ && !hostdata->dma_len
+#endif
+ ) {
+ local_irq_restore(flags);
+ dprintk(NDEBUG_MAIN, "scsi%d: main: performing information transfer\n",
+ HOSTNO);
+ NCR5380_information_transfer(instance);
+ dprintk(NDEBUG_MAIN, "scsi%d: main: done set false\n", HOSTNO);
+ done = 0;
+ }
+ } while (!done);
+
+ /* Better allow ints _after_ 'main_running' has been cleared, else
+ an interrupt could believe we'll pick up the work it left for
+ us, but we won't see it anymore here... */
+ hostdata->main_running = 0;
+ local_irq_restore(flags);
+}
+
+
+#ifdef REAL_DMA
+/*
+ * Function : void NCR5380_dma_complete (struct Scsi_Host *instance)
+ *
+ * Purpose : Called by interrupt handler when DMA finishes or a phase
+ * mismatch occurs (which would finish the DMA transfer).
+ *
+ * Inputs : instance - this instance of the NCR5380.
+ *
+ */
+
+static void NCR5380_dma_complete(struct Scsi_Host *instance)
+{
+ SETUP_HOSTDATA(instance);
+ int transferred;
+ unsigned char **data;
+ volatile int *count;
+ int saved_data = 0, overrun = 0;
+ unsigned char p;
+
+ if (!hostdata->connected) {
+ printk(KERN_WARNING "scsi%d: received end of DMA interrupt with "
+ "no connected cmd\n", HOSTNO);
+ return;
+ }
+
+ if (hostdata->read_overruns) {
+ p = hostdata->connected->SCp.phase;
+ if (p & SR_IO) {
+ udelay(10);
+ if ((NCR5380_read(BUS_AND_STATUS_REG) &
+ (BASR_PHASE_MATCH|BASR_ACK)) ==
+ (BASR_PHASE_MATCH|BASR_ACK)) {
+ saved_data = NCR5380_read(INPUT_DATA_REG);
+ overrun = 1;
+ dprintk(NDEBUG_DMA, "scsi%d: read overrun handled\n", HOSTNO);
+ }
+ }
+ }
+
+ dprintk(NDEBUG_DMA, "scsi%d: real DMA transfer complete, basr 0x%X, sr 0x%X\n",
+ HOSTNO, NCR5380_read(BUS_AND_STATUS_REG),
+ NCR5380_read(STATUS_REG));
+
+#if defined(CONFIG_SUN3)
+ if ((sun3scsi_dma_finish(rq_data_dir(hostdata->connected->request)))) {
+ pr_err("scsi%d: overrun in UDC counter -- not prepared to deal with this!\n",
+ instance->host_no);
+ BUG();
+ }
+
+ /* make sure we're not stuck in a data phase */
+ if ((NCR5380_read(BUS_AND_STATUS_REG) & (BASR_PHASE_MATCH | BASR_ACK)) ==
+ (BASR_PHASE_MATCH | BASR_ACK)) {
+ pr_err("scsi%d: BASR %02x\n", instance->host_no,
+ NCR5380_read(BUS_AND_STATUS_REG));
+ pr_err("scsi%d: bus stuck in data phase -- probably a single byte overrun!\n",
+ instance->host_no);
+ BUG();
+ }
+#endif
+
+ (void)NCR5380_read(RESET_PARITY_INTERRUPT_REG);
+ NCR5380_write(MODE_REG, MR_BASE);
+ NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE);
+
+ transferred = hostdata->dma_len - NCR5380_dma_residual(instance);
+ hostdata->dma_len = 0;
+
+ data = (unsigned char **)&hostdata->connected->SCp.ptr;
+ count = &hostdata->connected->SCp.this_residual;
+ *data += transferred;
+ *count -= transferred;
+
+ if (hostdata->read_overruns) {
+ int cnt, toPIO;
+
+ if ((NCR5380_read(STATUS_REG) & PHASE_MASK) == p && (p & SR_IO)) {
+ cnt = toPIO = hostdata->read_overruns;
+ if (overrun) {
+ dprintk(NDEBUG_DMA, "Got an input overrun, using saved byte\n");
+ *(*data)++ = saved_data;
+ (*count)--;
+ cnt--;
+ toPIO--;
+ }
+ dprintk(NDEBUG_DMA, "Doing %d-byte PIO to 0x%08lx\n", cnt, (long)*data);
+ NCR5380_transfer_pio(instance, &p, &cnt, data);
+ *count -= toPIO - cnt;
+ }
+ }
+}
+#endif /* REAL_DMA */
+
+
+/**
+ * NCR5380_intr - generic NCR5380 irq handler
+ * @irq: interrupt number
+ * @dev_id: device info
+ *
+ * Handle interrupts, reestablishing I_T_L or I_T_L_Q nexuses
+ * from the disconnected queue, and restarting NCR5380_main()
+ * as required.
+ */
+
+static irqreturn_t NCR5380_intr(int irq, void *dev_id)
+{
+ struct Scsi_Host *instance = dev_id;
+ int done = 1, handled = 0;
+ unsigned char basr;
+
+ dprintk(NDEBUG_INTR, "scsi%d: NCR5380 irq triggered\n", HOSTNO);
+
+ /* Look for pending interrupts */
+ basr = NCR5380_read(BUS_AND_STATUS_REG);
+ dprintk(NDEBUG_INTR, "scsi%d: BASR=%02x\n", HOSTNO, basr);
+ /* dispatch to appropriate routine if found and done=0 */
+ if (basr & BASR_IRQ) {
+ NCR5380_dprint(NDEBUG_INTR, instance);
+ if ((NCR5380_read(STATUS_REG) & (SR_SEL|SR_IO)) == (SR_SEL|SR_IO)) {
+ done = 0;
+ dprintk(NDEBUG_INTR, "scsi%d: SEL interrupt\n", HOSTNO);
+ NCR5380_reselect(instance);
+ (void)NCR5380_read(RESET_PARITY_INTERRUPT_REG);
+ } else if (basr & BASR_PARITY_ERROR) {
+ dprintk(NDEBUG_INTR, "scsi%d: PARITY interrupt\n", HOSTNO);
+ (void)NCR5380_read(RESET_PARITY_INTERRUPT_REG);
+ } else if ((NCR5380_read(STATUS_REG) & SR_RST) == SR_RST) {
+ dprintk(NDEBUG_INTR, "scsi%d: RESET interrupt\n", HOSTNO);
+ (void)NCR5380_read(RESET_PARITY_INTERRUPT_REG);
+ } else {
+ /*
+ * The rest of the interrupt conditions can occur only during a
+ * DMA transfer
+ */
+
+#if defined(REAL_DMA)
+ /*
+ * We should only get PHASE MISMATCH and EOP interrupts if we have
+ * DMA enabled, so do a sanity check based on the current setting
+ * of the MODE register.
+ */
+
+ if ((NCR5380_read(MODE_REG) & MR_DMA_MODE) &&
+ ((basr & BASR_END_DMA_TRANSFER) ||
+ !(basr & BASR_PHASE_MATCH))) {
+
+ dprintk(NDEBUG_INTR, "scsi%d: PHASE MISM or EOP interrupt\n", HOSTNO);
+ NCR5380_dma_complete( instance );
+ done = 0;
+ } else
+#endif /* REAL_DMA */
+ {
+/* MS: Ignore unknown phase mismatch interrupts (caused by EOP interrupt) */
+ if (basr & BASR_PHASE_MATCH)
+ dprintk(NDEBUG_INTR, "scsi%d: unknown interrupt, "
+ "BASR 0x%x, MR 0x%x, SR 0x%x\n",
+ HOSTNO, basr, NCR5380_read(MODE_REG),
+ NCR5380_read(STATUS_REG));
+ (void)NCR5380_read(RESET_PARITY_INTERRUPT_REG);
+#ifdef SUN3_SCSI_VME
+ dregs->csr |= CSR_DMA_ENABLE;
+#endif
+ }
+ } /* if !(SELECTION || PARITY) */
+ handled = 1;
+ } /* BASR & IRQ */ else {
+ printk(KERN_NOTICE "scsi%d: interrupt without IRQ bit set in BASR, "
+ "BASR 0x%X, MR 0x%X, SR 0x%x\n", HOSTNO, basr,
+ NCR5380_read(MODE_REG), NCR5380_read(STATUS_REG));
+ (void)NCR5380_read(RESET_PARITY_INTERRUPT_REG);
+#ifdef SUN3_SCSI_VME
+ dregs->csr |= CSR_DMA_ENABLE;
+#endif
+ }
+
+ if (!done) {
+ dprintk(NDEBUG_INTR, "scsi%d: in int routine, calling main\n", HOSTNO);
+ /* Put a call to NCR5380_main() on the queue... */
+ queue_main(shost_priv(instance));
+ }
+ return IRQ_RETVAL(handled);
+}
+
+/*
+ * Function : int NCR5380_select(struct Scsi_Host *instance,
+ * struct scsi_cmnd *cmd)
+ *
+ * Purpose : establishes I_T_L or I_T_L_Q nexus for new or existing command,
+ * including ARBITRATION, SELECTION, and initial message out for
+ * IDENTIFY and queue messages.
+ *
+ * Inputs : instance - instantiation of the 5380 driver on which this
+ * target lives, cmd - SCSI command to execute.
+ *
+ * Returns : -1 if selection could not execute for some reason,
+ * 0 if selection succeeded or failed because the target
+ * did not respond.
+ *
+ * Side effects :
+ * If bus busy, arbitration failed, etc, NCR5380_select() will exit
+ * with registers as they should have been on entry - ie
+ * SELECT_ENABLE will be set appropriately, the NCR5380
+ * will cease to drive any SCSI bus signals.
+ *
+ * If successful : I_T_L or I_T_L_Q nexus will be established,
+ * instance->connected will be set to cmd.
+ * SELECT interrupt will be disabled.
+ *
+ * If failed (no target) : cmd->scsi_done() will be called, and the
+ * cmd->result host byte set to DID_BAD_TARGET.
+ */
+
+static int NCR5380_select(struct Scsi_Host *instance, struct scsi_cmnd *cmd)
+{
+ SETUP_HOSTDATA(instance);
+ unsigned char tmp[3], phase;
+ unsigned char *data;
+ int len;
+ unsigned long timeout;
+ unsigned long flags;
+
+ hostdata->restart_select = 0;
+ NCR5380_dprint(NDEBUG_ARBITRATION, instance);
+ dprintk(NDEBUG_ARBITRATION, "scsi%d: starting arbitration, id = %d\n", HOSTNO,
+ instance->this_id);
+
+ /*
+ * Set the phase bits to 0, otherwise the NCR5380 won't drive the
+ * data bus during SELECTION.
+ */
+
+ local_irq_save(flags);
+ if (hostdata->connected) {
+ local_irq_restore(flags);
+ return -1;
+ }
+ NCR5380_write(TARGET_COMMAND_REG, 0);
+
+ /*
+ * Start arbitration.
+ */
+
+ NCR5380_write(OUTPUT_DATA_REG, hostdata->id_mask);
+ NCR5380_write(MODE_REG, MR_ARBITRATE);
+
+ local_irq_restore(flags);
+
+ /* Wait for arbitration logic to complete */
+#if defined(NCR_TIMEOUT)
+ {
+ unsigned long timeout = jiffies + 2*NCR_TIMEOUT;
+
+ while (!(NCR5380_read(INITIATOR_COMMAND_REG) & ICR_ARBITRATION_PROGRESS) &&
+ time_before(jiffies, timeout) && !hostdata->connected)
+ ;
+ if (time_after_eq(jiffies, timeout)) {
+ printk("scsi : arbitration timeout at %d\n", __LINE__);
+ NCR5380_write(MODE_REG, MR_BASE);
+ NCR5380_write(SELECT_ENABLE_REG, hostdata->id_mask);
+ return -1;
+ }
+ }
+#else /* NCR_TIMEOUT */
+ while (!(NCR5380_read(INITIATOR_COMMAND_REG) & ICR_ARBITRATION_PROGRESS) &&
+ !hostdata->connected)
+ ;
+#endif
+
+ dprintk(NDEBUG_ARBITRATION, "scsi%d: arbitration complete\n", HOSTNO);
+
+ if (hostdata->connected) {
+ NCR5380_write(MODE_REG, MR_BASE);
+ return -1;
+ }
+ /*
+ * The arbitration delay is 2.2us, but this is a minimum and there is
+ * no maximum so we can safely sleep for ceil(2.2) usecs to accommodate
+ * the integral nature of udelay().
+ *
+ */
+
+ udelay(3);
+
+ /* Check for lost arbitration */
+ if ((NCR5380_read(INITIATOR_COMMAND_REG) & ICR_ARBITRATION_LOST) ||
+ (NCR5380_read(CURRENT_SCSI_DATA_REG) & hostdata->id_higher_mask) ||
+ (NCR5380_read(INITIATOR_COMMAND_REG) & ICR_ARBITRATION_LOST) ||
+ hostdata->connected) {
+ NCR5380_write(MODE_REG, MR_BASE);
+ dprintk(NDEBUG_ARBITRATION, "scsi%d: lost arbitration, deasserting MR_ARBITRATE\n",
+ HOSTNO);
+ return -1;
+ }
+
+ /* after/during arbitration, BSY should be asserted.
+ IBM DPES-31080 Version S31Q works now */
+ /* Tnx to Thomas_Roesch@m2.maus.de for finding this! (Roman) */
+ NCR5380_write(INITIATOR_COMMAND_REG,
+ ICR_BASE | ICR_ASSERT_SEL | ICR_ASSERT_BSY);
+
+ if ((NCR5380_read(INITIATOR_COMMAND_REG) & ICR_ARBITRATION_LOST) ||
+ hostdata->connected) {
+ NCR5380_write(MODE_REG, MR_BASE);
+ NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE);
+ dprintk(NDEBUG_ARBITRATION, "scsi%d: lost arbitration, deasserting ICR_ASSERT_SEL\n",
+ HOSTNO);
+ return -1;
+ }
+
+ /*
+ * Again, bus clear + bus settle time is 1.2us, however, this is
+ * a minimum so we'll udelay ceil(1.2)
+ */
+
+#ifdef CONFIG_ATARI_SCSI_TOSHIBA_DELAY
+ /* ++roman: But some targets (see above :-) seem to need a bit more... */
+ udelay(15);
+#else
+ udelay(2);
+#endif
+
+ if (hostdata->connected) {
+ NCR5380_write(MODE_REG, MR_BASE);
+ NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE);
+ return -1;
+ }
+
+ dprintk(NDEBUG_ARBITRATION, "scsi%d: won arbitration\n", HOSTNO);
+
+ /*
+ * Now that we have won arbitration, start Selection process, asserting
+ * the host and target ID's on the SCSI bus.
+ */
+
+ NCR5380_write(OUTPUT_DATA_REG, (hostdata->id_mask | (1 << cmd->device->id)));
+
+ /*
+ * Raise ATN while SEL is true before BSY goes false from arbitration,
+ * since this is the only way to guarantee that we'll get a MESSAGE OUT
+ * phase immediately after selection.
+ */
+
+ NCR5380_write(INITIATOR_COMMAND_REG, (ICR_BASE | ICR_ASSERT_BSY |
+ ICR_ASSERT_DATA | ICR_ASSERT_ATN | ICR_ASSERT_SEL ));
+ NCR5380_write(MODE_REG, MR_BASE);
+
+ /*
+ * Reselect interrupts must be turned off prior to the dropping of BSY,
+ * otherwise we will trigger an interrupt.
+ */
+
+ if (hostdata->connected) {
+ NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE);
+ return -1;
+ }
+
+ NCR5380_write(SELECT_ENABLE_REG, 0);
+
+ /*
+ * The initiator shall then wait at least two deskew delays and release
+ * the BSY signal.
+ */
+ udelay(1); /* wingel -- wait two bus deskew delay >2*45ns */
+
+ /* Reset BSY */
+ NCR5380_write(INITIATOR_COMMAND_REG, (ICR_BASE | ICR_ASSERT_DATA |
+ ICR_ASSERT_ATN | ICR_ASSERT_SEL));
+
+ /*
+ * Something weird happens when we cease to drive BSY - looks
+ * like the board/chip is letting us do another read before the
+ * appropriate propagation delay has expired, and we're confusing
+ * a BSY signal from ourselves as the target's response to SELECTION.
+ *
+ * A small delay (the 'C++' frontend breaks the pipeline with an
+ * unnecessary jump, making it work on my 386-33/Trantor T128, the
+ * tighter 'C' code breaks and requires this) solves the problem -
+ * the 1 us delay is arbitrary, and only used because this delay will
+ * be the same on other platforms and since it works here, it should
+ * work there.
+ *
+ * wingel suggests that this could be due to failing to wait
+ * one deskew delay.
+ */
+
+ udelay(1);
+
+ dprintk(NDEBUG_SELECTION, "scsi%d: selecting target %d\n", HOSTNO, cmd->device->id);
+
+ /*
+ * The SCSI specification calls for a 250 ms timeout for the actual
+ * selection.
+ */
+
+ timeout = jiffies + msecs_to_jiffies(250);
+
+ /*
+ * XXX very interesting - we're seeing a bounce where the BSY we
+ * asserted is being reflected / still asserted (propagation delay?)
+ * and it's detecting as true. Sigh.
+ */
+
+#if 0
+ /* ++roman: If a target conformed to the SCSI standard, it wouldn't assert
+ * IO while SEL is true. But again, there are some disks out the in the
+ * world that do that nevertheless. (Somebody claimed that this announces
+ * reselection capability of the target.) So we better skip that test and
+ * only wait for BSY... (Famous german words: Der Klügere gibt nach :-)
+ */
+
+ while (time_before(jiffies, timeout) &&
+ !(NCR5380_read(STATUS_REG) & (SR_BSY | SR_IO)))
+ ;
+
+ if ((NCR5380_read(STATUS_REG) & (SR_SEL | SR_IO)) == (SR_SEL | SR_IO)) {
+ NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE);
+ NCR5380_reselect(instance);
+ printk(KERN_ERR "scsi%d: reselection after won arbitration?\n",
+ HOSTNO);
+ NCR5380_write(SELECT_ENABLE_REG, hostdata->id_mask);
+ return -1;
+ }
+#else
+ while (time_before(jiffies, timeout) && !(NCR5380_read(STATUS_REG) & SR_BSY))
+ ;
+#endif
+
+ /*
+ * No less than two deskew delays after the initiator detects the
+ * BSY signal is true, it shall release the SEL signal and may
+ * change the DATA BUS. -wingel
+ */
+
+ udelay(1);
+
+ NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE | ICR_ASSERT_ATN);
+
+ if (!(NCR5380_read(STATUS_REG) & SR_BSY)) {
+ NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE);
+ if (hostdata->targets_present & (1 << cmd->device->id)) {
+ printk(KERN_ERR "scsi%d: weirdness\n", HOSTNO);
+ if (hostdata->restart_select)
+ printk(KERN_NOTICE "\trestart select\n");
+ NCR5380_dprint(NDEBUG_ANY, instance);
+ NCR5380_write(SELECT_ENABLE_REG, hostdata->id_mask);
+ return -1;
+ }
+ cmd->result = DID_BAD_TARGET << 16;
+#ifdef SUPPORT_TAGS
+ cmd_free_tag(cmd);
+#endif
+ cmd->scsi_done(cmd);
+ NCR5380_write(SELECT_ENABLE_REG, hostdata->id_mask);
+ dprintk(NDEBUG_SELECTION, "scsi%d: target did not respond within 250ms\n", HOSTNO);
+ NCR5380_write(SELECT_ENABLE_REG, hostdata->id_mask);
+ return 0;
+ }
+
+ hostdata->targets_present |= (1 << cmd->device->id);
+
+ /*
+ * Since we followed the SCSI spec, and raised ATN while SEL
+ * was true but before BSY was false during selection, the information
+ * transfer phase should be a MESSAGE OUT phase so that we can send the
+ * IDENTIFY message.
+ *
+ * If SCSI-II tagged queuing is enabled, we also send a SIMPLE_QUEUE_TAG
+ * message (2 bytes) with a tag ID that we increment with every command
+ * until it wraps back to 0.
+ *
+ * XXX - it turns out that there are some broken SCSI-II devices,
+ * which claim to support tagged queuing but fail when more than
+ * some number of commands are issued at once.
+ */
+
+ /* Wait for start of REQ/ACK handshake */
+ while (!(NCR5380_read(STATUS_REG) & SR_REQ))
+ ;
+
+ dprintk(NDEBUG_SELECTION, "scsi%d: target %d selected, going into MESSAGE OUT phase.\n",
+ HOSTNO, cmd->device->id);
+ tmp[0] = IDENTIFY(1, cmd->device->lun);
+
+#ifdef SUPPORT_TAGS
+ if (cmd->tag != TAG_NONE) {
+ tmp[1] = hostdata->last_message = SIMPLE_QUEUE_TAG;
+ tmp[2] = cmd->tag;
+ len = 3;
+ } else
+ len = 1;
+#else
+ len = 1;
+ cmd->tag = 0;
+#endif /* SUPPORT_TAGS */
+
+ /* Send message(s) */
+ data = tmp;
+ phase = PHASE_MSGOUT;
+ NCR5380_transfer_pio(instance, &phase, &len, &data);
+ dprintk(NDEBUG_SELECTION, "scsi%d: nexus established.\n", HOSTNO);
+ /* XXX need to handle errors here */
+ hostdata->connected = cmd;
+#ifndef SUPPORT_TAGS
+ hostdata->busy[cmd->device->id] |= (1 << cmd->device->lun);
+#endif
+#ifdef SUN3_SCSI_VME
+ dregs->csr |= CSR_INTR;
+#endif
+
+ initialize_SCp(cmd);
+
+ return 0;
+}
+
+/*
+ * Function : int NCR5380_transfer_pio (struct Scsi_Host *instance,
+ * unsigned char *phase, int *count, unsigned char **data)
+ *
+ * Purpose : transfers data in given phase using polled I/O
+ *
+ * Inputs : instance - instance of driver, *phase - pointer to
+ * what phase is expected, *count - pointer to number of
+ * bytes to transfer, **data - pointer to data pointer.
+ *
+ * Returns : -1 when different phase is entered without transferring
+ * maximum number of bytes, 0 if all bytes are transferred or exit
+ * is in same phase.
+ *
+ * Also, *phase, *count, *data are modified in place.
+ *
+ * XXX Note : handling for bus free may be useful.
+ */
+
+/*
+ * Note : this code is not as quick as it could be, however it
+ * IS 100% reliable, and for the actual data transfer where speed
+ * counts, we will always do a pseudo DMA or DMA transfer.
+ */
+
+static int NCR5380_transfer_pio(struct Scsi_Host *instance,
+ unsigned char *phase, int *count,
+ unsigned char **data)
+{
+ register unsigned char p = *phase, tmp;
+ register int c = *count;
+ register unsigned char *d = *data;
+
+ /*
+ * The NCR5380 chip will only drive the SCSI bus when the
+ * phase specified in the appropriate bits of the TARGET COMMAND
+ * REGISTER match the STATUS REGISTER
+ */
+
+ NCR5380_write(TARGET_COMMAND_REG, PHASE_SR_TO_TCR(p));
+
+ do {
+ /*
+ * Wait for assertion of REQ, after which the phase bits will be
+ * valid
+ */
+ while (!((tmp = NCR5380_read(STATUS_REG)) & SR_REQ))
+ ;
+
+ dprintk(NDEBUG_HANDSHAKE, "scsi%d: REQ detected\n", HOSTNO);
+
+ /* Check for phase mismatch */
+ if ((tmp & PHASE_MASK) != p) {
+ dprintk(NDEBUG_PIO, "scsi%d: phase mismatch\n", HOSTNO);
+ NCR5380_dprint_phase(NDEBUG_PIO, instance);
+ break;
+ }
+
+ /* Do actual transfer from SCSI bus to / from memory */
+ if (!(p & SR_IO))
+ NCR5380_write(OUTPUT_DATA_REG, *d);
+ else
+ *d = NCR5380_read(CURRENT_SCSI_DATA_REG);
+
+ ++d;
+
+ /*
+ * The SCSI standard suggests that in MSGOUT phase, the initiator
+ * should drop ATN on the last byte of the message phase
+ * after REQ has been asserted for the handshake but before
+ * the initiator raises ACK.
+ */
+
+ if (!(p & SR_IO)) {
+ if (!((p & SR_MSG) && c > 1)) {
+ NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE | ICR_ASSERT_DATA);
+ NCR5380_dprint(NDEBUG_PIO, instance);
+ NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE |
+ ICR_ASSERT_DATA | ICR_ASSERT_ACK);
+ } else {
+ NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE |
+ ICR_ASSERT_DATA | ICR_ASSERT_ATN);
+ NCR5380_dprint(NDEBUG_PIO, instance);
+ NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE |
+ ICR_ASSERT_DATA | ICR_ASSERT_ATN | ICR_ASSERT_ACK);
+ }
+ } else {
+ NCR5380_dprint(NDEBUG_PIO, instance);
+ NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE | ICR_ASSERT_ACK);
+ }
+
+ while (NCR5380_read(STATUS_REG) & SR_REQ)
+ ;
+
+ dprintk(NDEBUG_HANDSHAKE, "scsi%d: req false, handshake complete\n", HOSTNO);
+
+ /*
+ * We have several special cases to consider during REQ/ACK handshaking :
+ * 1. We were in MSGOUT phase, and we are on the last byte of the
+ * message. ATN must be dropped as ACK is dropped.
+ *
+ * 2. We are in a MSGIN phase, and we are on the last byte of the
+ * message. We must exit with ACK asserted, so that the calling
+ * code may raise ATN before dropping ACK to reject the message.
+ *
+ * 3. ACK and ATN are clear and the target may proceed as normal.
+ */
+ if (!(p == PHASE_MSGIN && c == 1)) {
+ if (p == PHASE_MSGOUT && c > 1)
+ NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE | ICR_ASSERT_ATN);
+ else
+ NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE);
+ }
+ } while (--c);
+
+ dprintk(NDEBUG_PIO, "scsi%d: residual %d\n", HOSTNO, c);
+
+ *count = c;
+ *data = d;
+ tmp = NCR5380_read(STATUS_REG);
+ /* The phase read from the bus is valid if either REQ is (already)
+ * asserted or if ACK hasn't been released yet. The latter is the case if
+ * we're in MSGIN and all wanted bytes have been received.
+ */
+ if ((tmp & SR_REQ) || (p == PHASE_MSGIN && c == 0))
+ *phase = tmp & PHASE_MASK;
+ else
+ *phase = PHASE_UNKNOWN;
+
+ if (!c || (*phase == p))
+ return 0;
+ else
+ return -1;
+}
+
+/*
+ * Function : do_abort (Scsi_Host *host)
+ *
+ * Purpose : abort the currently established nexus. Should only be
+ * called from a routine which can drop into a
+ *
+ * Returns : 0 on success, -1 on failure.
+ */
+
+static int do_abort(struct Scsi_Host *instance)
+{
+ unsigned char tmp, *msgptr, phase;
+ int len;
+
+ /* Request message out phase */
+ NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE | ICR_ASSERT_ATN);
+
+ /*
+ * Wait for the target to indicate a valid phase by asserting
+ * REQ. Once this happens, we'll have either a MSGOUT phase
+ * and can immediately send the ABORT message, or we'll have some
+ * other phase and will have to source/sink data.
+ *
+ * We really don't care what value was on the bus or what value
+ * the target sees, so we just handshake.
+ */
+
+ while (!((tmp = NCR5380_read(STATUS_REG)) & SR_REQ))
+ ;
+
+ NCR5380_write(TARGET_COMMAND_REG, PHASE_SR_TO_TCR(tmp));
+
+ if ((tmp & PHASE_MASK) != PHASE_MSGOUT) {
+ NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE | ICR_ASSERT_ATN |
+ ICR_ASSERT_ACK);
+ while (NCR5380_read(STATUS_REG) & SR_REQ)
+ ;
+ NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE | ICR_ASSERT_ATN);
+ }
+
+ tmp = ABORT;
+ msgptr = &tmp;
+ len = 1;
+ phase = PHASE_MSGOUT;
+ NCR5380_transfer_pio(instance, &phase, &len, &msgptr);
+
+ /*
+ * If we got here, and the command completed successfully,
+ * we're about to go into bus free state.
+ */
+
+ return len ? -1 : 0;
+}
+
+#if defined(REAL_DMA)
+/*
+ * Function : int NCR5380_transfer_dma (struct Scsi_Host *instance,
+ * unsigned char *phase, int *count, unsigned char **data)
+ *
+ * Purpose : transfers data in given phase using either real
+ * or pseudo DMA.
+ *
+ * Inputs : instance - instance of driver, *phase - pointer to
+ * what phase is expected, *count - pointer to number of
+ * bytes to transfer, **data - pointer to data pointer.
+ *
+ * Returns : -1 when different phase is entered without transferring
+ * maximum number of bytes, 0 if all bytes or transferred or exit
+ * is in same phase.
+ *
+ * Also, *phase, *count, *data are modified in place.
+ *
+ */
+
+
+static int NCR5380_transfer_dma(struct Scsi_Host *instance,
+ unsigned char *phase, int *count,
+ unsigned char **data)
+{
+ SETUP_HOSTDATA(instance);
+ register int c = *count;
+ register unsigned char p = *phase;
+ unsigned long flags;
+
+#if defined(CONFIG_SUN3)
+ /* sanity check */
+ if (!sun3_dma_setup_done) {
+ pr_err("scsi%d: transfer_dma without setup!\n",
+ instance->host_no);
+ BUG();
+ }
+ hostdata->dma_len = c;
+
+ dprintk(NDEBUG_DMA, "scsi%d: initializing DMA for %s, %d bytes %s %p\n",
+ instance->host_no, (p & SR_IO) ? "reading" : "writing",
+ c, (p & SR_IO) ? "to" : "from", *data);
+
+ /* netbsd turns off ints here, why not be safe and do it too */
+ local_irq_save(flags);
+
+ /* send start chain */
+ sun3scsi_dma_start(c, *data);
+
+ if (p & SR_IO) {
+ NCR5380_write(TARGET_COMMAND_REG, 1);
+ NCR5380_read(RESET_PARITY_INTERRUPT_REG);
+ NCR5380_write(INITIATOR_COMMAND_REG, 0);
+ NCR5380_write(MODE_REG,
+ (NCR5380_read(MODE_REG) | MR_DMA_MODE | MR_ENABLE_EOP_INTR));
+ NCR5380_write(START_DMA_INITIATOR_RECEIVE_REG, 0);
+ } else {
+ NCR5380_write(TARGET_COMMAND_REG, 0);
+ NCR5380_read(RESET_PARITY_INTERRUPT_REG);
+ NCR5380_write(INITIATOR_COMMAND_REG, ICR_ASSERT_DATA);
+ NCR5380_write(MODE_REG,
+ (NCR5380_read(MODE_REG) | MR_DMA_MODE | MR_ENABLE_EOP_INTR));
+ NCR5380_write(START_DMA_SEND_REG, 0);
+ }
+
+#ifdef SUN3_SCSI_VME
+ dregs->csr |= CSR_DMA_ENABLE;
+#endif
+
+ local_irq_restore(flags);
+
+ sun3_dma_active = 1;
+
+#else /* !defined(CONFIG_SUN3) */
+ register unsigned char *d = *data;
+ unsigned char tmp;
+
+ if ((tmp = (NCR5380_read(STATUS_REG) & PHASE_MASK)) != p) {
+ *phase = tmp;
+ return -1;
+ }
+
+ if (hostdata->read_overruns && (p & SR_IO))
+ c -= hostdata->read_overruns;
+
+ dprintk(NDEBUG_DMA, "scsi%d: initializing DMA for %s, %d bytes %s %p\n",
+ HOSTNO, (p & SR_IO) ? "reading" : "writing",
+ c, (p & SR_IO) ? "to" : "from", d);
+
+ NCR5380_write(TARGET_COMMAND_REG, PHASE_SR_TO_TCR(p));
+
+#ifdef REAL_DMA
+ NCR5380_write(MODE_REG, MR_BASE | MR_DMA_MODE | MR_ENABLE_EOP_INTR | MR_MONITOR_BSY);
+#endif /* def REAL_DMA */
+
+ if (!(hostdata->flags & FLAG_LATE_DMA_SETUP)) {
+ /* On the Medusa, it is a must to initialize the DMA before
+ * starting the NCR. This is also the cleaner way for the TT.
+ */
+ local_irq_save(flags);
+ hostdata->dma_len = (p & SR_IO) ?
+ NCR5380_dma_read_setup(instance, d, c) :
+ NCR5380_dma_write_setup(instance, d, c);
+ local_irq_restore(flags);
+ }
+
+ if (p & SR_IO)
+ NCR5380_write(START_DMA_INITIATOR_RECEIVE_REG, 0);
+ else {
+ NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE | ICR_ASSERT_DATA);
+ NCR5380_write(START_DMA_SEND_REG, 0);
+ }
+
+ if (hostdata->flags & FLAG_LATE_DMA_SETUP) {
+ /* On the Falcon, the DMA setup must be done after the last */
+ /* NCR access, else the DMA setup gets trashed!
+ */
+ local_irq_save(flags);
+ hostdata->dma_len = (p & SR_IO) ?
+ NCR5380_dma_read_setup(instance, d, c) :
+ NCR5380_dma_write_setup(instance, d, c);
+ local_irq_restore(flags);
+ }
+#endif /* !defined(CONFIG_SUN3) */
+
+ return 0;
+}
+#endif /* defined(REAL_DMA) */
+
+/*
+ * Function : NCR5380_information_transfer (struct Scsi_Host *instance)
+ *
+ * Purpose : run through the various SCSI phases and do as the target
+ * directs us to. Operates on the currently connected command,
+ * instance->connected.
+ *
+ * Inputs : instance, instance for which we are doing commands
+ *
+ * Side effects : SCSI things happen, the disconnected queue will be
+ * modified if a command disconnects, *instance->connected will
+ * change.
+ *
+ * XXX Note : we need to watch for bus free or a reset condition here
+ * to recover from an unexpected bus free condition.
+ */
+
+static void NCR5380_information_transfer(struct Scsi_Host *instance)
+{
+ SETUP_HOSTDATA(instance);
+ unsigned long flags;
+ unsigned char msgout = NOP;
+ int sink = 0;
+ int len;
+#if defined(REAL_DMA)
+ int transfersize;
+#endif
+ unsigned char *data;
+ unsigned char phase, tmp, extended_msg[10], old_phase = 0xff;
+ struct scsi_cmnd *cmd = (struct scsi_cmnd *) hostdata->connected;
+
+#ifdef SUN3_SCSI_VME
+ dregs->csr |= CSR_INTR;
+#endif
+
+ while (1) {
+ tmp = NCR5380_read(STATUS_REG);
+ /* We only have a valid SCSI phase when REQ is asserted */
+ if (tmp & SR_REQ) {
+ phase = (tmp & PHASE_MASK);
+ if (phase != old_phase) {
+ old_phase = phase;
+ NCR5380_dprint_phase(NDEBUG_INFORMATION, instance);
+ }
+#if defined(CONFIG_SUN3)
+ if (phase == PHASE_CMDOUT) {
+#if defined(REAL_DMA)
+ void *d;
+ unsigned long count;
+
+ if (!cmd->SCp.this_residual && cmd->SCp.buffers_residual) {
+ count = cmd->SCp.buffer->length;
+ d = sg_virt(cmd->SCp.buffer);
+ } else {
+ count = cmd->SCp.this_residual;
+ d = cmd->SCp.ptr;
+ }
+ /* this command setup for dma yet? */
+ if ((count >= DMA_MIN_SIZE) && (sun3_dma_setup_done != cmd)) {
+ if (cmd->request->cmd_type == REQ_TYPE_FS) {
+ sun3scsi_dma_setup(d, count,
+ rq_data_dir(cmd->request));
+ sun3_dma_setup_done = cmd;
+ }
+ }
+#endif
+#ifdef SUN3_SCSI_VME
+ dregs->csr |= CSR_INTR;
+#endif
+ }
+#endif /* CONFIG_SUN3 */
+
+ if (sink && (phase != PHASE_MSGOUT)) {
+ NCR5380_write(TARGET_COMMAND_REG, PHASE_SR_TO_TCR(tmp));
+
+ NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE | ICR_ASSERT_ATN |
+ ICR_ASSERT_ACK);
+ while (NCR5380_read(STATUS_REG) & SR_REQ)
+ ;
+ NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE |
+ ICR_ASSERT_ATN);
+ sink = 0;
+ continue;
+ }
+
+ switch (phase) {
+ case PHASE_DATAOUT:
+#if (NDEBUG & NDEBUG_NO_DATAOUT)
+ printk("scsi%d: NDEBUG_NO_DATAOUT set, attempted DATAOUT "
+ "aborted\n", HOSTNO);
+ sink = 1;
+ do_abort(instance);
+ cmd->result = DID_ERROR << 16;
+ cmd->scsi_done(cmd);
+ return;
+#endif
+ case PHASE_DATAIN:
+ /*
+ * If there is no room left in the current buffer in the
+ * scatter-gather list, move onto the next one.
+ */
+
+ if (!cmd->SCp.this_residual && cmd->SCp.buffers_residual) {
+ ++cmd->SCp.buffer;
+ --cmd->SCp.buffers_residual;
+ cmd->SCp.this_residual = cmd->SCp.buffer->length;
+ cmd->SCp.ptr = sg_virt(cmd->SCp.buffer);
+ /* ++roman: Try to merge some scatter-buffers if
+ * they are at contiguous physical addresses.
+ */
+ merge_contiguous_buffers(cmd);
+ dprintk(NDEBUG_INFORMATION, "scsi%d: %d bytes and %d buffers left\n",
+ HOSTNO, cmd->SCp.this_residual,
+ cmd->SCp.buffers_residual);
+ }
+
+ /*
+ * The preferred transfer method is going to be
+ * PSEUDO-DMA for systems that are strictly PIO,
+ * since we can let the hardware do the handshaking.
+ *
+ * For this to work, we need to know the transfersize
+ * ahead of time, since the pseudo-DMA code will sit
+ * in an unconditional loop.
+ */
+
+ /* ++roman: I suggest, this should be
+ * #if def(REAL_DMA)
+ * instead of leaving REAL_DMA out.
+ */
+
+#if defined(REAL_DMA)
+ if (
+#if !defined(CONFIG_SUN3)
+ !cmd->device->borken &&
+#endif
+ (transfersize = NCR5380_dma_xfer_len(instance, cmd, phase)) >= DMA_MIN_SIZE) {
+ len = transfersize;
+ cmd->SCp.phase = phase;
+ if (NCR5380_transfer_dma(instance, &phase,
+ &len, (unsigned char **)&cmd->SCp.ptr)) {
+ /*
+ * If the watchdog timer fires, all future
+ * accesses to this device will use the
+ * polled-IO. */
+ scmd_printk(KERN_INFO, cmd,
+ "switching to slow handshake\n");
+ cmd->device->borken = 1;
+ NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE |
+ ICR_ASSERT_ATN);
+ sink = 1;
+ do_abort(instance);
+ cmd->result = DID_ERROR << 16;
+ cmd->scsi_done(cmd);
+ /* XXX - need to source or sink data here, as appropriate */
+ } else {
+#ifdef REAL_DMA
+ /* ++roman: When using real DMA,
+ * information_transfer() should return after
+ * starting DMA since it has nothing more to
+ * do.
+ */
+ return;
+#else
+ cmd->SCp.this_residual -= transfersize - len;
+#endif
+ }
+ } else
+#endif /* defined(REAL_DMA) */
+ NCR5380_transfer_pio(instance, &phase,
+ (int *)&cmd->SCp.this_residual,
+ (unsigned char **)&cmd->SCp.ptr);
+#if defined(CONFIG_SUN3) && defined(REAL_DMA)
+ /* if we had intended to dma that command clear it */
+ if (sun3_dma_setup_done == cmd)
+ sun3_dma_setup_done = NULL;
+#endif
+ break;
+ case PHASE_MSGIN:
+ len = 1;
+ data = &tmp;
+ NCR5380_write(SELECT_ENABLE_REG, 0); /* disable reselects */
+ NCR5380_transfer_pio(instance, &phase, &len, &data);
+ cmd->SCp.Message = tmp;
+
+ switch (tmp) {
+ /*
+ * Linking lets us reduce the time required to get the
+ * next command out to the device, hopefully this will
+ * mean we don't waste another revolution due to the delays
+ * required by ARBITRATION and another SELECTION.
+ *
+ * In the current implementation proposal, low level drivers
+ * merely have to start the next command, pointed to by
+ * next_link, done() is called as with unlinked commands.
+ */
+#ifdef LINKED
+ case LINKED_CMD_COMPLETE:
+ case LINKED_FLG_CMD_COMPLETE:
+ /* Accept message by clearing ACK */
+ NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE);
+
+ dprintk(NDEBUG_LINKED, "scsi%d: target %d lun %llu linked command "
+ "complete.\n", HOSTNO, cmd->device->id, cmd->device->lun);
+
+ /* Enable reselect interrupts */
+ NCR5380_write(SELECT_ENABLE_REG, hostdata->id_mask);
+ /*
+ * Sanity check : A linked command should only terminate
+ * with one of these messages if there are more linked
+ * commands available.
+ */
+
+ if (!cmd->next_link) {
+ printk(KERN_NOTICE "scsi%d: target %d lun %llu "
+ "linked command complete, no next_link\n",
+ HOSTNO, cmd->device->id, cmd->device->lun);
+ sink = 1;
+ do_abort(instance);
+ return;
+ }
+
+ initialize_SCp(cmd->next_link);
+ /* The next command is still part of this process; copy it
+ * and don't free it! */
+ cmd->next_link->tag = cmd->tag;
+ cmd->result = cmd->SCp.Status | (cmd->SCp.Message << 8);
+ dprintk(NDEBUG_LINKED, "scsi%d: target %d lun %llu linked request "
+ "done, calling scsi_done().\n",
+ HOSTNO, cmd->device->id, cmd->device->lun);
+ cmd->scsi_done(cmd);
+ cmd = hostdata->connected;
+ break;
+#endif /* def LINKED */
+ case ABORT:
+ case COMMAND_COMPLETE:
+ /* Accept message by clearing ACK */
+ NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE);
+ dprintk(NDEBUG_QUEUES, "scsi%d: command for target %d, lun %llu "
+ "completed\n", HOSTNO, cmd->device->id, cmd->device->lun);
+
+ local_irq_save(flags);
+ hostdata->retain_dma_intr++;
+ hostdata->connected = NULL;
+#ifdef SUPPORT_TAGS
+ cmd_free_tag(cmd);
+ if (status_byte(cmd->SCp.Status) == QUEUE_FULL) {
+ /* Turn a QUEUE FULL status into BUSY, I think the
+ * mid level cannot handle QUEUE FULL :-( (The
+ * command is retried after BUSY). Also update our
+ * queue size to the number of currently issued
+ * commands now.
+ */
+ /* ++Andreas: the mid level code knows about
+ QUEUE_FULL now. */
+ struct tag_alloc *ta = &hostdata->TagAlloc[scmd_id(cmd)][cmd->device->lun];
+ dprintk(NDEBUG_TAGS, "scsi%d: target %d lun %llu returned "
+ "QUEUE_FULL after %d commands\n",
+ HOSTNO, cmd->device->id, cmd->device->lun,
+ ta->nr_allocated);
+ if (ta->queue_size > ta->nr_allocated)
+ ta->nr_allocated = ta->queue_size;
+ }
+#else
+ hostdata->busy[cmd->device->id] &= ~(1 << cmd->device->lun);
+#endif
+ /* Enable reselect interrupts */
+ NCR5380_write(SELECT_ENABLE_REG, hostdata->id_mask);
+
+ /*
+ * I'm not sure what the correct thing to do here is :
+ *
+ * If the command that just executed is NOT a request
+ * sense, the obvious thing to do is to set the result
+ * code to the values of the stored parameters.
+ *
+ * If it was a REQUEST SENSE command, we need some way to
+ * differentiate between the failure code of the original
+ * and the failure code of the REQUEST sense - the obvious
+ * case is success, where we fall through and leave the
+ * result code unchanged.
+ *
+ * The non-obvious place is where the REQUEST SENSE failed
+ */
+
+ if (cmd->cmnd[0] != REQUEST_SENSE)
+ cmd->result = cmd->SCp.Status | (cmd->SCp.Message << 8);
+ else if (status_byte(cmd->SCp.Status) != GOOD)
+ cmd->result = (cmd->result & 0x00ffff) | (DID_ERROR << 16);
+
+ if ((cmd->cmnd[0] == REQUEST_SENSE) &&
+ hostdata->ses.cmd_len) {
+ scsi_eh_restore_cmnd(cmd, &hostdata->ses);
+ hostdata->ses.cmd_len = 0 ;
+ }
+
+ if ((cmd->cmnd[0] != REQUEST_SENSE) &&
+ (status_byte(cmd->SCp.Status) == CHECK_CONDITION)) {
+ scsi_eh_prep_cmnd(cmd, &hostdata->ses, NULL, 0, ~0);
+
+ dprintk(NDEBUG_AUTOSENSE, "scsi%d: performing request sense\n", HOSTNO);
+
+ LIST(cmd,hostdata->issue_queue);
+ SET_NEXT(cmd, hostdata->issue_queue);
+ hostdata->issue_queue = (struct scsi_cmnd *) cmd;
+ dprintk(NDEBUG_QUEUES, "scsi%d: REQUEST SENSE added to head of "
+ "issue queue\n", H_NO(cmd));
+ } else {
+ cmd->scsi_done(cmd);
+ }
+
+ local_irq_restore(flags);
+
+ NCR5380_write(SELECT_ENABLE_REG, hostdata->id_mask);
+ /*
+ * Restore phase bits to 0 so an interrupted selection,
+ * arbitration can resume.
+ */
+ NCR5380_write(TARGET_COMMAND_REG, 0);
+
+ while ((NCR5380_read(STATUS_REG) & SR_BSY) && !hostdata->connected)
+ barrier();
+
+ local_irq_save(flags);
+ hostdata->retain_dma_intr--;
+ /* ++roman: For Falcon SCSI, release the lock on the
+ * ST-DMA here if no other commands are waiting on the
+ * disconnected queue.
+ */
+ maybe_release_dma_irq(instance);
+ local_irq_restore(flags);
+ return;
+ case MESSAGE_REJECT:
+ /* Accept message by clearing ACK */
+ NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE);
+ /* Enable reselect interrupts */
+ NCR5380_write(SELECT_ENABLE_REG, hostdata->id_mask);
+ switch (hostdata->last_message) {
+ case HEAD_OF_QUEUE_TAG:
+ case ORDERED_QUEUE_TAG:
+ case SIMPLE_QUEUE_TAG:
+ /* The target obviously doesn't support tagged
+ * queuing, even though it announced this ability in
+ * its INQUIRY data ?!? (maybe only this LUN?) Ok,
+ * clear 'tagged_supported' and lock the LUN, since
+ * the command is treated as untagged further on.
+ */
+ cmd->device->tagged_supported = 0;
+ hostdata->busy[cmd->device->id] |= (1 << cmd->device->lun);
+ cmd->tag = TAG_NONE;
+ dprintk(NDEBUG_TAGS, "scsi%d: target %d lun %llu rejected "
+ "QUEUE_TAG message; tagged queuing "
+ "disabled\n",
+ HOSTNO, cmd->device->id, cmd->device->lun);
+ break;
+ }
+ break;
+ case DISCONNECT:
+ /* Accept message by clearing ACK */
+ NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE);
+ local_irq_save(flags);
+ cmd->device->disconnect = 1;
+ LIST(cmd,hostdata->disconnected_queue);
+ SET_NEXT(cmd, hostdata->disconnected_queue);
+ hostdata->connected = NULL;
+ hostdata->disconnected_queue = cmd;
+ local_irq_restore(flags);
+ dprintk(NDEBUG_QUEUES, "scsi%d: command for target %d lun %llu was "
+ "moved from connected to the "
+ "disconnected_queue\n", HOSTNO,
+ cmd->device->id, cmd->device->lun);
+ /*
+ * Restore phase bits to 0 so an interrupted selection,
+ * arbitration can resume.
+ */
+ NCR5380_write(TARGET_COMMAND_REG, 0);
+
+ /* Enable reselect interrupts */
+ NCR5380_write(SELECT_ENABLE_REG, hostdata->id_mask);
+ /* Wait for bus free to avoid nasty timeouts */
+ while ((NCR5380_read(STATUS_REG) & SR_BSY) && !hostdata->connected)
+ barrier();
+#ifdef SUN3_SCSI_VME
+ dregs->csr |= CSR_DMA_ENABLE;
+#endif
+ return;
+ /*
+ * The SCSI data pointer is *IMPLICITLY* saved on a disconnect
+ * operation, in violation of the SCSI spec so we can safely
+ * ignore SAVE/RESTORE pointers calls.
+ *
+ * Unfortunately, some disks violate the SCSI spec and
+ * don't issue the required SAVE_POINTERS message before
+ * disconnecting, and we have to break spec to remain
+ * compatible.
+ */
+ case SAVE_POINTERS:
+ case RESTORE_POINTERS:
+ /* Accept message by clearing ACK */
+ NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE);
+ /* Enable reselect interrupts */
+ NCR5380_write(SELECT_ENABLE_REG, hostdata->id_mask);
+ break;
+ case EXTENDED_MESSAGE:
+ /*
+ * Extended messages are sent in the following format :
+ * Byte
+ * 0 EXTENDED_MESSAGE == 1
+ * 1 length (includes one byte for code, doesn't
+ * include first two bytes)
+ * 2 code
+ * 3..length+1 arguments
+ *
+ * Start the extended message buffer with the EXTENDED_MESSAGE
+ * byte, since spi_print_msg() wants the whole thing.
+ */
+ extended_msg[0] = EXTENDED_MESSAGE;
+ /* Accept first byte by clearing ACK */
+ NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE);
+
+ dprintk(NDEBUG_EXTENDED, "scsi%d: receiving extended message\n", HOSTNO);
+
+ len = 2;
+ data = extended_msg + 1;
+ phase = PHASE_MSGIN;
+ NCR5380_transfer_pio(instance, &phase, &len, &data);
+ dprintk(NDEBUG_EXTENDED, "scsi%d: length=%d, code=0x%02x\n", HOSTNO,
+ (int)extended_msg[1], (int)extended_msg[2]);
+
+ if (!len && extended_msg[1] <=
+ (sizeof(extended_msg) - 1)) {
+ /* Accept third byte by clearing ACK */
+ NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE);
+ len = extended_msg[1] - 1;
+ data = extended_msg + 3;
+ phase = PHASE_MSGIN;
+
+ NCR5380_transfer_pio(instance, &phase, &len, &data);
+ dprintk(NDEBUG_EXTENDED, "scsi%d: message received, residual %d\n",
+ HOSTNO, len);
+
+ switch (extended_msg[2]) {
+ case EXTENDED_SDTR:
+ case EXTENDED_WDTR:
+ case EXTENDED_MODIFY_DATA_POINTER:
+ case EXTENDED_EXTENDED_IDENTIFY:
+ tmp = 0;
+ }
+ } else if (len) {
+ printk(KERN_NOTICE "scsi%d: error receiving "
+ "extended message\n", HOSTNO);
+ tmp = 0;
+ } else {
+ printk(KERN_NOTICE "scsi%d: extended message "
+ "code %02x length %d is too long\n",
+ HOSTNO, extended_msg[2], extended_msg[1]);
+ tmp = 0;
+ }
+ /* Fall through to reject message */
+
+ /*
+ * If we get something weird that we aren't expecting,
+ * reject it.
+ */
+ default:
+ if (!tmp) {
+ printk(KERN_INFO "scsi%d: rejecting message ",
+ instance->host_no);
+ spi_print_msg(extended_msg);
+ printk("\n");
+ } else if (tmp != EXTENDED_MESSAGE)
+ scmd_printk(KERN_INFO, cmd,
+ "rejecting unknown message %02x\n",
+ tmp);
+ else
+ scmd_printk(KERN_INFO, cmd,
+ "rejecting unknown extended message code %02x, length %d\n",
+ extended_msg[1], extended_msg[0]);
+
+ msgout = MESSAGE_REJECT;
+ NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE | ICR_ASSERT_ATN);
+ break;
+ } /* switch (tmp) */
+ break;
+ case PHASE_MSGOUT:
+ len = 1;
+ data = &msgout;
+ hostdata->last_message = msgout;
+ NCR5380_transfer_pio(instance, &phase, &len, &data);
+ if (msgout == ABORT) {
+ local_irq_save(flags);
+#ifdef SUPPORT_TAGS
+ cmd_free_tag(cmd);
+#else
+ hostdata->busy[cmd->device->id] &= ~(1 << cmd->device->lun);
+#endif
+ hostdata->connected = NULL;
+ cmd->result = DID_ERROR << 16;
+ NCR5380_write(SELECT_ENABLE_REG, hostdata->id_mask);
+ maybe_release_dma_irq(instance);
+ local_irq_restore(flags);
+ cmd->scsi_done(cmd);
+ return;
+ }
+ msgout = NOP;
+ break;
+ case PHASE_CMDOUT:
+ len = cmd->cmd_len;
+ data = cmd->cmnd;
+ /*
+ * XXX for performance reasons, on machines with a
+ * PSEUDO-DMA architecture we should probably
+ * use the dma transfer function.
+ */
+ NCR5380_transfer_pio(instance, &phase, &len, &data);
+ break;
+ case PHASE_STATIN:
+ len = 1;
+ data = &tmp;
+ NCR5380_transfer_pio(instance, &phase, &len, &data);
+ cmd->SCp.Status = tmp;
+ break;
+ default:
+ printk("scsi%d: unknown phase\n", HOSTNO);
+ NCR5380_dprint(NDEBUG_ANY, instance);
+ } /* switch(phase) */
+ } /* if (tmp * SR_REQ) */
+ } /* while (1) */
+}
+
+/*
+ * Function : void NCR5380_reselect (struct Scsi_Host *instance)
+ *
+ * Purpose : does reselection, initializing the instance->connected
+ * field to point to the scsi_cmnd for which the I_T_L or I_T_L_Q
+ * nexus has been reestablished,
+ *
+ * Inputs : instance - this instance of the NCR5380.
+ *
+ */
+
+
+/* it might eventually prove necessary to do a dma setup on
+ reselection, but it doesn't seem to be needed now -- sam */
+
+static void NCR5380_reselect(struct Scsi_Host *instance)
+{
+ SETUP_HOSTDATA(instance);
+ unsigned char target_mask;
+ unsigned char lun;
+#ifdef SUPPORT_TAGS
+ unsigned char tag;
+#endif
+ unsigned char msg[3];
+ int __maybe_unused len;
+ unsigned char __maybe_unused *data, __maybe_unused phase;
+ struct scsi_cmnd *tmp = NULL, *prev;
+
+ /*
+ * Disable arbitration, etc. since the host adapter obviously
+ * lost, and tell an interrupted NCR5380_select() to restart.
+ */
+
+ NCR5380_write(MODE_REG, MR_BASE);
+ hostdata->restart_select = 1;
+
+ target_mask = NCR5380_read(CURRENT_SCSI_DATA_REG) & ~(hostdata->id_mask);
+
+ dprintk(NDEBUG_RESELECTION, "scsi%d: reselect\n", HOSTNO);
+
+ /*
+ * At this point, we have detected that our SCSI ID is on the bus,
+ * SEL is true and BSY was false for at least one bus settle delay
+ * (400 ns).
+ *
+ * We must assert BSY ourselves, until the target drops the SEL
+ * signal.
+ */
+
+ NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE | ICR_ASSERT_BSY);
+
+ while (NCR5380_read(STATUS_REG) & SR_SEL)
+ ;
+ NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE);
+
+ /*
+ * Wait for target to go into MSGIN.
+ */
+
+ while (!(NCR5380_read(STATUS_REG) & SR_REQ))
+ ;
+
+#if defined(CONFIG_SUN3) && defined(REAL_DMA)
+ /* acknowledge toggle to MSGIN */
+ NCR5380_write(TARGET_COMMAND_REG, PHASE_SR_TO_TCR(PHASE_MSGIN));
+
+ /* peek at the byte without really hitting the bus */
+ msg[0] = NCR5380_read(CURRENT_SCSI_DATA_REG);
+#else
+ len = 1;
+ data = msg;
+ phase = PHASE_MSGIN;
+ NCR5380_transfer_pio(instance, &phase, &len, &data);
+#endif
+
+ if (!(msg[0] & 0x80)) {
+ printk(KERN_DEBUG "scsi%d: expecting IDENTIFY message, got ", HOSTNO);
+ spi_print_msg(msg);
+ do_abort(instance);
+ return;
+ }
+ lun = (msg[0] & 0x07);
+
+#if defined(SUPPORT_TAGS) && !defined(CONFIG_SUN3)
+ /* If the phase is still MSGIN, the target wants to send some more
+ * messages. In case it supports tagged queuing, this is probably a
+ * SIMPLE_QUEUE_TAG for the I_T_L_Q nexus.
+ */
+ tag = TAG_NONE;
+ if (phase == PHASE_MSGIN && (hostdata->flags & FLAG_TAGGED_QUEUING)) {
+ /* Accept previous IDENTIFY message by clearing ACK */
+ NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE);
+ len = 2;
+ data = msg + 1;
+ if (!NCR5380_transfer_pio(instance, &phase, &len, &data) &&
+ msg[1] == SIMPLE_QUEUE_TAG)
+ tag = msg[2];
+ dprintk(NDEBUG_TAGS, "scsi%d: target mask %02x, lun %d sent tag %d at "
+ "reselection\n", HOSTNO, target_mask, lun, tag);
+ }
+#endif
+
+ /*
+ * Find the command corresponding to the I_T_L or I_T_L_Q nexus we
+ * just reestablished, and remove it from the disconnected queue.
+ */
+
+ for (tmp = (struct scsi_cmnd *) hostdata->disconnected_queue, prev = NULL;
+ tmp; prev = tmp, tmp = NEXT(tmp)) {
+ if ((target_mask == (1 << tmp->device->id)) && (lun == tmp->device->lun)
+#ifdef SUPPORT_TAGS
+ && (tag == tmp->tag)
+#endif
+ ) {
+ if (prev) {
+ REMOVE(prev, NEXT(prev), tmp, NEXT(tmp));
+ SET_NEXT(prev, NEXT(tmp));
+ } else {
+ REMOVE(-1, hostdata->disconnected_queue, tmp, NEXT(tmp));
+ hostdata->disconnected_queue = NEXT(tmp);
+ }
+ SET_NEXT(tmp, NULL);
+ break;
+ }
+ }
+
+ if (!tmp) {
+ printk(KERN_WARNING "scsi%d: warning: target bitmask %02x lun %d "
+#ifdef SUPPORT_TAGS
+ "tag %d "
+#endif
+ "not in disconnected_queue.\n",
+ HOSTNO, target_mask, lun
+#ifdef SUPPORT_TAGS
+ , tag
+#endif
+ );
+ /*
+ * Since we have an established nexus that we can't do anything
+ * with, we must abort it.
+ */
+ do_abort(instance);
+ return;
+ }
+
+#if defined(CONFIG_SUN3) && defined(REAL_DMA)
+ /* engage dma setup for the command we just saw */
+ {
+ void *d;
+ unsigned long count;
+
+ if (!tmp->SCp.this_residual && tmp->SCp.buffers_residual) {
+ count = tmp->SCp.buffer->length;
+ d = sg_virt(tmp->SCp.buffer);
+ } else {
+ count = tmp->SCp.this_residual;
+ d = tmp->SCp.ptr;
+ }
+ /* setup this command for dma if not already */
+ if ((count >= DMA_MIN_SIZE) && (sun3_dma_setup_done != tmp)) {
+ sun3scsi_dma_setup(d, count, rq_data_dir(tmp->request));
+ sun3_dma_setup_done = tmp;
+ }
+ }
+
+ NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE | ICR_ASSERT_ACK);
+#endif
+
+ /* Accept message by clearing ACK */
+ NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE);
+
+#if defined(SUPPORT_TAGS) && defined(CONFIG_SUN3)
+ /* If the phase is still MSGIN, the target wants to send some more
+ * messages. In case it supports tagged queuing, this is probably a
+ * SIMPLE_QUEUE_TAG for the I_T_L_Q nexus.
+ */
+ tag = TAG_NONE;
+ if (phase == PHASE_MSGIN && setup_use_tagged_queuing) {
+ /* Accept previous IDENTIFY message by clearing ACK */
+ NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE);
+ len = 2;
+ data = msg + 1;
+ if (!NCR5380_transfer_pio(instance, &phase, &len, &data) &&
+ msg[1] == SIMPLE_QUEUE_TAG)
+ tag = msg[2];
+ dprintk(NDEBUG_TAGS, "scsi%d: target mask %02x, lun %d sent tag %d at reselection\n"
+ HOSTNO, target_mask, lun, tag);
+ }
+#endif
+
+ hostdata->connected = tmp;
+ dprintk(NDEBUG_RESELECTION, "scsi%d: nexus established, target = %d, lun = %llu, tag = %d\n",
+ HOSTNO, tmp->device->id, tmp->device->lun, tmp->tag);
+}
+
+
+/*
+ * Function : int NCR5380_abort (struct scsi_cmnd *cmd)
+ *
+ * Purpose : abort a command
+ *
+ * Inputs : cmd - the scsi_cmnd to abort, code - code to set the
+ * host byte of the result field to, if zero DID_ABORTED is
+ * used.
+ *
+ * Returns : SUCCESS - success, FAILED on failure.
+ *
+ * XXX - there is no way to abort the command that is currently
+ * connected, you have to wait for it to complete. If this is
+ * a problem, we could implement longjmp() / setjmp(), setjmp()
+ * called where the loop started in NCR5380_main().
+ */
+
+static
+int NCR5380_abort(struct scsi_cmnd *cmd)
+{
+ struct Scsi_Host *instance = cmd->device->host;
+ SETUP_HOSTDATA(instance);
+ struct scsi_cmnd *tmp, **prev;
+ unsigned long flags;
+
+ scmd_printk(KERN_NOTICE, cmd, "aborting command\n");
+
+ NCR5380_print_status(instance);
+
+ local_irq_save(flags);
+
+ dprintk(NDEBUG_ABORT, "scsi%d: abort called basr 0x%02x, sr 0x%02x\n", HOSTNO,
+ NCR5380_read(BUS_AND_STATUS_REG),
+ NCR5380_read(STATUS_REG));
+
+#if 1
+ /*
+ * Case 1 : If the command is the currently executing command,
+ * we'll set the aborted flag and return control so that
+ * information transfer routine can exit cleanly.
+ */
+
+ if (hostdata->connected == cmd) {
+
+ dprintk(NDEBUG_ABORT, "scsi%d: aborting connected command\n", HOSTNO);
+ /*
+ * We should perform BSY checking, and make sure we haven't slipped
+ * into BUS FREE.
+ */
+
+ /* NCR5380_write(INITIATOR_COMMAND_REG, ICR_ASSERT_ATN); */
+ /*
+ * Since we can't change phases until we've completed the current
+ * handshake, we have to source or sink a byte of data if the current
+ * phase is not MSGOUT.
+ */
+
+ /*
+ * Return control to the executing NCR drive so we can clear the
+ * aborted flag and get back into our main loop.
+ */
+
+ if (do_abort(instance) == 0) {
+ hostdata->aborted = 1;
+ hostdata->connected = NULL;
+ cmd->result = DID_ABORT << 16;
+#ifdef SUPPORT_TAGS
+ cmd_free_tag(cmd);
+#else
+ hostdata->busy[cmd->device->id] &= ~(1 << cmd->device->lun);
+#endif
+ maybe_release_dma_irq(instance);
+ local_irq_restore(flags);
+ cmd->scsi_done(cmd);
+ return SUCCESS;
+ } else {
+ local_irq_restore(flags);
+ printk("scsi%d: abort of connected command failed!\n", HOSTNO);
+ return FAILED;
+ }
+ }
+#endif
+
+ /*
+ * Case 2 : If the command hasn't been issued yet, we simply remove it
+ * from the issue queue.
+ */
+ for (prev = (struct scsi_cmnd **)&(hostdata->issue_queue),
+ tmp = (struct scsi_cmnd *)hostdata->issue_queue;
+ tmp; prev = NEXTADDR(tmp), tmp = NEXT(tmp)) {
+ if (cmd == tmp) {
+ REMOVE(5, *prev, tmp, NEXT(tmp));
+ (*prev) = NEXT(tmp);
+ SET_NEXT(tmp, NULL);
+ tmp->result = DID_ABORT << 16;
+ maybe_release_dma_irq(instance);
+ local_irq_restore(flags);
+ dprintk(NDEBUG_ABORT, "scsi%d: abort removed command from issue queue.\n",
+ HOSTNO);
+ /* Tagged queuing note: no tag to free here, hasn't been assigned
+ * yet... */
+ tmp->scsi_done(tmp);
+ return SUCCESS;
+ }
+ }
+
+ /*
+ * Case 3 : If any commands are connected, we're going to fail the abort
+ * and let the high level SCSI driver retry at a later time or
+ * issue a reset.
+ *
+ * Timeouts, and therefore aborted commands, will be highly unlikely
+ * and handling them cleanly in this situation would make the common
+ * case of noresets less efficient, and would pollute our code. So,
+ * we fail.
+ */
+
+ if (hostdata->connected) {
+ local_irq_restore(flags);
+ dprintk(NDEBUG_ABORT, "scsi%d: abort failed, command connected.\n", HOSTNO);
+ return FAILED;
+ }
+
+ /*
+ * Case 4: If the command is currently disconnected from the bus, and
+ * there are no connected commands, we reconnect the I_T_L or
+ * I_T_L_Q nexus associated with it, go into message out, and send
+ * an abort message.
+ *
+ * This case is especially ugly. In order to reestablish the nexus, we
+ * need to call NCR5380_select(). The easiest way to implement this
+ * function was to abort if the bus was busy, and let the interrupt
+ * handler triggered on the SEL for reselect take care of lost arbitrations
+ * where necessary, meaning interrupts need to be enabled.
+ *
+ * When interrupts are enabled, the queues may change - so we
+ * can't remove it from the disconnected queue before selecting it
+ * because that could cause a failure in hashing the nexus if that
+ * device reselected.
+ *
+ * Since the queues may change, we can't use the pointers from when we
+ * first locate it.
+ *
+ * So, we must first locate the command, and if NCR5380_select()
+ * succeeds, then issue the abort, relocate the command and remove
+ * it from the disconnected queue.
+ */
+
+ for (tmp = (struct scsi_cmnd *) hostdata->disconnected_queue; tmp;
+ tmp = NEXT(tmp)) {
+ if (cmd == tmp) {
+ local_irq_restore(flags);
+ dprintk(NDEBUG_ABORT, "scsi%d: aborting disconnected command.\n", HOSTNO);
+
+ if (NCR5380_select(instance, cmd))
+ return FAILED;
+
+ dprintk(NDEBUG_ABORT, "scsi%d: nexus reestablished.\n", HOSTNO);
+
+ do_abort(instance);
+
+ local_irq_save(flags);
+ for (prev = (struct scsi_cmnd **)&(hostdata->disconnected_queue),
+ tmp = (struct scsi_cmnd *)hostdata->disconnected_queue;
+ tmp; prev = NEXTADDR(tmp), tmp = NEXT(tmp)) {
+ if (cmd == tmp) {
+ REMOVE(5, *prev, tmp, NEXT(tmp));
+ *prev = NEXT(tmp);
+ SET_NEXT(tmp, NULL);
+ tmp->result = DID_ABORT << 16;
+ /* We must unlock the tag/LUN immediately here, since the
+ * target goes to BUS FREE and doesn't send us another
+ * message (COMMAND_COMPLETE or the like)
+ */
+#ifdef SUPPORT_TAGS
+ cmd_free_tag(tmp);
+#else
+ hostdata->busy[cmd->device->id] &= ~(1 << cmd->device->lun);
+#endif
+ maybe_release_dma_irq(instance);
+ local_irq_restore(flags);
+ tmp->scsi_done(tmp);
+ return SUCCESS;
+ }
+ }
+ }
+ }
+
+ /* Maybe it is sufficient just to release the ST-DMA lock... (if
+ * possible at all) At least, we should check if the lock could be
+ * released after the abort, in case it is kept due to some bug.
+ */
+ maybe_release_dma_irq(instance);
+ local_irq_restore(flags);
+
+ /*
+ * Case 5 : If we reached this point, the command was not found in any of
+ * the queues.
+ *
+ * We probably reached this point because of an unlikely race condition
+ * between the command completing successfully and the abortion code,
+ * so we won't panic, but we will notify the user in case something really
+ * broke.
+ */
+
+ printk(KERN_INFO "scsi%d: warning : SCSI command probably completed successfully before abortion\n", HOSTNO);
+
+ return FAILED;
+}
+
+
+/*
+ * Function : int NCR5380_reset (struct scsi_cmnd *cmd)
+ *
+ * Purpose : reset the SCSI bus.
+ *
+ * Returns : SUCCESS or FAILURE
+ *
+ */
+
+static int NCR5380_bus_reset(struct scsi_cmnd *cmd)
+{
+ struct Scsi_Host *instance = cmd->device->host;
+ struct NCR5380_hostdata *hostdata = shost_priv(instance);
+ int i;
+ unsigned long flags;
+
+ NCR5380_print_status(instance);
+
+ /* get in phase */
+ NCR5380_write(TARGET_COMMAND_REG,
+ PHASE_SR_TO_TCR(NCR5380_read(STATUS_REG)));
+ /* assert RST */
+ NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE | ICR_ASSERT_RST);
+ udelay(40);
+ /* reset NCR registers */
+ NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE);
+ NCR5380_write(MODE_REG, MR_BASE);
+ NCR5380_write(TARGET_COMMAND_REG, 0);
+ NCR5380_write(SELECT_ENABLE_REG, 0);
+ /* ++roman: reset interrupt condition! otherwise no interrupts don't get
+ * through anymore ... */
+ (void)NCR5380_read(RESET_PARITY_INTERRUPT_REG);
+
+ /* After the reset, there are no more connected or disconnected commands
+ * and no busy units; so clear the low-level status here to avoid
+ * conflicts when the mid-level code tries to wake up the affected
+ * commands!
+ */
+
+ if (hostdata->issue_queue)
+ dprintk(NDEBUG_ABORT, "scsi%d: reset aborted issued command(s)\n", H_NO(cmd));
+ if (hostdata->connected)
+ dprintk(NDEBUG_ABORT, "scsi%d: reset aborted a connected command\n", H_NO(cmd));
+ if (hostdata->disconnected_queue)
+ dprintk(NDEBUG_ABORT, "scsi%d: reset aborted disconnected command(s)\n", H_NO(cmd));
+
+ local_irq_save(flags);
+ hostdata->issue_queue = NULL;
+ hostdata->connected = NULL;
+ hostdata->disconnected_queue = NULL;
+#ifdef SUPPORT_TAGS
+ free_all_tags(hostdata);
+#endif
+ for (i = 0; i < 8; ++i)
+ hostdata->busy[i] = 0;
+#ifdef REAL_DMA
+ hostdata->dma_len = 0;
+#endif
+
+ maybe_release_dma_irq(instance);
+ local_irq_restore(flags);
+
+ return SUCCESS;
+}
diff --git a/drivers/scsi/atari_scsi.c b/drivers/scsi/atari_scsi.c
new file mode 100644
index 000000000..5ede3daa9
--- /dev/null
+++ b/drivers/scsi/atari_scsi.c
@@ -0,0 +1,1023 @@
+/*
+ * atari_scsi.c -- Device dependent functions for the Atari generic SCSI port
+ *
+ * Copyright 1994 Roman Hodek <Roman.Hodek@informatik.uni-erlangen.de>
+ *
+ * Loosely based on the work of Robert De Vries' team and added:
+ * - working real DMA
+ * - Falcon support (untested yet!) ++bjoern fixed and now it works
+ * - lots of extensions and bug fixes.
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file COPYING in the main directory of this archive
+ * for more details.
+ *
+ */
+
+
+/**************************************************************************/
+/* */
+/* Notes for Falcon SCSI: */
+/* ---------------------- */
+/* */
+/* Since the Falcon SCSI uses the ST-DMA chip, that is shared among */
+/* several device drivers, locking and unlocking the access to this */
+/* chip is required. But locking is not possible from an interrupt, */
+/* since it puts the process to sleep if the lock is not available. */
+/* This prevents "late" locking of the DMA chip, i.e. locking it just */
+/* before using it, since in case of disconnection-reconnection */
+/* commands, the DMA is started from the reselection interrupt. */
+/* */
+/* Two possible schemes for ST-DMA-locking would be: */
+/* 1) The lock is taken for each command separately and disconnecting */
+/* is forbidden (i.e. can_queue = 1). */
+/* 2) The DMA chip is locked when the first command comes in and */
+/* released when the last command is finished and all queues are */
+/* empty. */
+/* The first alternative would result in bad performance, since the */
+/* interleaving of commands would not be used. The second is unfair to */
+/* other drivers using the ST-DMA, because the queues will seldom be */
+/* totally empty if there is a lot of disk traffic. */
+/* */
+/* For this reasons I decided to employ a more elaborate scheme: */
+/* - First, we give up the lock every time we can (for fairness), this */
+/* means every time a command finishes and there are no other commands */
+/* on the disconnected queue. */
+/* - If there are others waiting to lock the DMA chip, we stop */
+/* issuing commands, i.e. moving them onto the issue queue. */
+/* Because of that, the disconnected queue will run empty in a */
+/* while. Instead we go to sleep on a 'fairness_queue'. */
+/* - If the lock is released, all processes waiting on the fairness */
+/* queue will be woken. The first of them tries to re-lock the DMA, */
+/* the others wait for the first to finish this task. After that, */
+/* they can all run on and do their commands... */
+/* This sounds complicated (and it is it :-(), but it seems to be a */
+/* good compromise between fairness and performance: As long as no one */
+/* else wants to work with the ST-DMA chip, SCSI can go along as */
+/* usual. If now someone else comes, this behaviour is changed to a */
+/* "fairness mode": just already initiated commands are finished and */
+/* then the lock is released. The other one waiting will probably win */
+/* the race for locking the DMA, since it was waiting for longer. And */
+/* after it has finished, SCSI can go ahead again. Finally: I hope I */
+/* have not produced any deadlock possibilities! */
+/* */
+/**************************************************************************/
+
+
+#include <linux/module.h>
+#include <linux/types.h>
+#include <linux/delay.h>
+#include <linux/blkdev.h>
+#include <linux/interrupt.h>
+#include <linux/init.h>
+#include <linux/nvram.h>
+#include <linux/bitops.h>
+#include <linux/wait.h>
+#include <linux/platform_device.h>
+
+#include <asm/setup.h>
+#include <asm/atarihw.h>
+#include <asm/atariints.h>
+#include <asm/atari_stdma.h>
+#include <asm/atari_stram.h>
+#include <asm/io.h>
+
+#include <scsi/scsi_host.h>
+
+/* Definitions for the core NCR5380 driver. */
+
+#define REAL_DMA
+#define SUPPORT_TAGS
+#define MAX_TAGS 32
+#define DMA_MIN_SIZE 32
+
+#define NCR5380_implementation_fields /* none */
+
+#define NCR5380_read(reg) atari_scsi_reg_read(reg)
+#define NCR5380_write(reg, value) atari_scsi_reg_write(reg, value)
+
+#define NCR5380_queue_command atari_scsi_queue_command
+#define NCR5380_abort atari_scsi_abort
+#define NCR5380_show_info atari_scsi_show_info
+#define NCR5380_info atari_scsi_info
+
+#define NCR5380_dma_read_setup(instance, data, count) \
+ atari_scsi_dma_setup(instance, data, count, 0)
+#define NCR5380_dma_write_setup(instance, data, count) \
+ atari_scsi_dma_setup(instance, data, count, 1)
+#define NCR5380_dma_residual(instance) \
+ atari_scsi_dma_residual(instance)
+#define NCR5380_dma_xfer_len(instance, cmd, phase) \
+ atari_dma_xfer_len(cmd->SCp.this_residual, cmd, !((phase) & SR_IO))
+
+#define NCR5380_acquire_dma_irq(instance) falcon_get_lock(instance)
+#define NCR5380_release_dma_irq(instance) falcon_release_lock()
+
+#include "NCR5380.h"
+
+
+#define IS_A_TT() ATARIHW_PRESENT(TT_SCSI)
+
+#define SCSI_DMA_WRITE_P(elt,val) \
+ do { \
+ unsigned long v = val; \
+ tt_scsi_dma.elt##_lo = v & 0xff; \
+ v >>= 8; \
+ tt_scsi_dma.elt##_lmd = v & 0xff; \
+ v >>= 8; \
+ tt_scsi_dma.elt##_hmd = v & 0xff; \
+ v >>= 8; \
+ tt_scsi_dma.elt##_hi = v & 0xff; \
+ } while(0)
+
+#define SCSI_DMA_READ_P(elt) \
+ (((((((unsigned long)tt_scsi_dma.elt##_hi << 8) | \
+ (unsigned long)tt_scsi_dma.elt##_hmd) << 8) | \
+ (unsigned long)tt_scsi_dma.elt##_lmd) << 8) | \
+ (unsigned long)tt_scsi_dma.elt##_lo)
+
+
+static inline void SCSI_DMA_SETADR(unsigned long adr)
+{
+ st_dma.dma_lo = (unsigned char)adr;
+ MFPDELAY();
+ adr >>= 8;
+ st_dma.dma_md = (unsigned char)adr;
+ MFPDELAY();
+ adr >>= 8;
+ st_dma.dma_hi = (unsigned char)adr;
+ MFPDELAY();
+}
+
+static inline unsigned long SCSI_DMA_GETADR(void)
+{
+ unsigned long adr;
+ adr = st_dma.dma_lo;
+ MFPDELAY();
+ adr |= (st_dma.dma_md & 0xff) << 8;
+ MFPDELAY();
+ adr |= (st_dma.dma_hi & 0xff) << 16;
+ MFPDELAY();
+ return adr;
+}
+
+#define HOSTDATA_DMALEN (((struct NCR5380_hostdata *) \
+ (atari_scsi_host->hostdata))->dma_len)
+
+/* Time (in jiffies) to wait after a reset; the SCSI standard calls for 250ms,
+ * we usually do 0.5s to be on the safe side. But Toshiba CD-ROMs once more
+ * need ten times the standard value... */
+#ifndef CONFIG_ATARI_SCSI_TOSHIBA_DELAY
+#define AFTER_RESET_DELAY (HZ/2)
+#else
+#define AFTER_RESET_DELAY (5*HZ/2)
+#endif
+
+#ifdef REAL_DMA
+static void atari_scsi_fetch_restbytes(void);
+#endif
+
+static struct Scsi_Host *atari_scsi_host;
+static unsigned char (*atari_scsi_reg_read)(unsigned char reg);
+static void (*atari_scsi_reg_write)(unsigned char reg, unsigned char value);
+
+#ifdef REAL_DMA
+static unsigned long atari_dma_residual, atari_dma_startaddr;
+static short atari_dma_active;
+/* pointer to the dribble buffer */
+static char *atari_dma_buffer;
+/* precalculated physical address of the dribble buffer */
+static unsigned long atari_dma_phys_buffer;
+/* != 0 tells the Falcon int handler to copy data from the dribble buffer */
+static char *atari_dma_orig_addr;
+/* size of the dribble buffer; 4k seems enough, since the Falcon cannot use
+ * scatter-gather anyway, so most transfers are 1024 byte only. In the rare
+ * cases where requests to physical contiguous buffers have been merged, this
+ * request is <= 4k (one page). So I don't think we have to split transfers
+ * just due to this buffer size...
+ */
+#define STRAM_BUFFER_SIZE (4096)
+/* mask for address bits that can't be used with the ST-DMA */
+static unsigned long atari_dma_stram_mask;
+#define STRAM_ADDR(a) (((a) & atari_dma_stram_mask) == 0)
+#endif
+
+static int setup_can_queue = -1;
+module_param(setup_can_queue, int, 0);
+static int setup_cmd_per_lun = -1;
+module_param(setup_cmd_per_lun, int, 0);
+static int setup_sg_tablesize = -1;
+module_param(setup_sg_tablesize, int, 0);
+#ifdef SUPPORT_TAGS
+static int setup_use_tagged_queuing = -1;
+module_param(setup_use_tagged_queuing, int, 0);
+#endif
+static int setup_hostid = -1;
+module_param(setup_hostid, int, 0);
+
+
+#if defined(REAL_DMA)
+
+static int scsi_dma_is_ignored_buserr(unsigned char dma_stat)
+{
+ int i;
+ unsigned long addr = SCSI_DMA_READ_P(dma_addr), end_addr;
+
+ if (dma_stat & 0x01) {
+
+ /* A bus error happens when DMA-ing from the last page of a
+ * physical memory chunk (DMA prefetch!), but that doesn't hurt.
+ * Check for this case:
+ */
+
+ for (i = 0; i < m68k_num_memory; ++i) {
+ end_addr = m68k_memory[i].addr + m68k_memory[i].size;
+ if (end_addr <= addr && addr <= end_addr + 4)
+ return 1;
+ }
+ }
+ return 0;
+}
+
+
+#if 0
+/* Dead code... wasn't called anyway :-) and causes some trouble, because at
+ * end-of-DMA, both SCSI ints are triggered simultaneously, so the NCR int has
+ * to clear the DMA int pending bit before it allows other level 6 interrupts.
+ */
+static void scsi_dma_buserr(int irq, void *dummy)
+{
+ unsigned char dma_stat = tt_scsi_dma.dma_ctrl;
+
+ /* Don't do anything if a NCR interrupt is pending. Probably it's just
+ * masked... */
+ if (atari_irq_pending(IRQ_TT_MFP_SCSI))
+ return;
+
+ printk("Bad SCSI DMA interrupt! dma_addr=0x%08lx dma_stat=%02x dma_cnt=%08lx\n",
+ SCSI_DMA_READ_P(dma_addr), dma_stat, SCSI_DMA_READ_P(dma_cnt));
+ if (dma_stat & 0x80) {
+ if (!scsi_dma_is_ignored_buserr(dma_stat))
+ printk("SCSI DMA bus error -- bad DMA programming!\n");
+ } else {
+ /* Under normal circumstances we never should get to this point,
+ * since both interrupts are triggered simultaneously and the 5380
+ * int has higher priority. When this irq is handled, that DMA
+ * interrupt is cleared. So a warning message is printed here.
+ */
+ printk("SCSI DMA intr ?? -- this shouldn't happen!\n");
+ }
+}
+#endif
+
+#endif
+
+
+static irqreturn_t scsi_tt_intr(int irq, void *dummy)
+{
+#ifdef REAL_DMA
+ int dma_stat;
+
+ dma_stat = tt_scsi_dma.dma_ctrl;
+
+ dprintk(NDEBUG_INTR, "scsi%d: NCR5380 interrupt, DMA status = %02x\n",
+ atari_scsi_host->host_no, dma_stat & 0xff);
+
+ /* Look if it was the DMA that has interrupted: First possibility
+ * is that a bus error occurred...
+ */
+ if (dma_stat & 0x80) {
+ if (!scsi_dma_is_ignored_buserr(dma_stat)) {
+ printk(KERN_ERR "SCSI DMA caused bus error near 0x%08lx\n",
+ SCSI_DMA_READ_P(dma_addr));
+ printk(KERN_CRIT "SCSI DMA bus error -- bad DMA programming!");
+ }
+ }
+
+ /* If the DMA is active but not finished, we have the case
+ * that some other 5380 interrupt occurred within the DMA transfer.
+ * This means we have residual bytes, if the desired end address
+ * is not yet reached. Maybe we have to fetch some bytes from the
+ * rest data register, too. The residual must be calculated from
+ * the address pointer, not the counter register, because only the
+ * addr reg counts bytes not yet written and pending in the rest
+ * data reg!
+ */
+ if ((dma_stat & 0x02) && !(dma_stat & 0x40)) {
+ atari_dma_residual = HOSTDATA_DMALEN - (SCSI_DMA_READ_P(dma_addr) - atari_dma_startaddr);
+
+ dprintk(NDEBUG_DMA, "SCSI DMA: There are %ld residual bytes.\n",
+ atari_dma_residual);
+
+ if ((signed int)atari_dma_residual < 0)
+ atari_dma_residual = 0;
+ if ((dma_stat & 1) == 0) {
+ /*
+ * After read operations, we maybe have to
+ * transport some rest bytes
+ */
+ atari_scsi_fetch_restbytes();
+ } else {
+ /*
+ * There seems to be a nasty bug in some SCSI-DMA/NCR
+ * combinations: If a target disconnects while a write
+ * operation is going on, the address register of the
+ * DMA may be a few bytes farer than it actually read.
+ * This is probably due to DMA prefetching and a delay
+ * between DMA and NCR. Experiments showed that the
+ * dma_addr is 9 bytes to high, but this could vary.
+ * The problem is, that the residual is thus calculated
+ * wrong and the next transfer will start behind where
+ * it should. So we round up the residual to the next
+ * multiple of a sector size, if it isn't already a
+ * multiple and the originally expected transfer size
+ * was. The latter condition is there to ensure that
+ * the correction is taken only for "real" data
+ * transfers and not for, e.g., the parameters of some
+ * other command. These shouldn't disconnect anyway.
+ */
+ if (atari_dma_residual & 0x1ff) {
+ dprintk(NDEBUG_DMA, "SCSI DMA: DMA bug corrected, "
+ "difference %ld bytes\n",
+ 512 - (atari_dma_residual & 0x1ff));
+ atari_dma_residual = (atari_dma_residual + 511) & ~0x1ff;
+ }
+ }
+ tt_scsi_dma.dma_ctrl = 0;
+ }
+
+ /* If the DMA is finished, fetch the rest bytes and turn it off */
+ if (dma_stat & 0x40) {
+ atari_dma_residual = 0;
+ if ((dma_stat & 1) == 0)
+ atari_scsi_fetch_restbytes();
+ tt_scsi_dma.dma_ctrl = 0;
+ }
+
+#endif /* REAL_DMA */
+
+ NCR5380_intr(irq, dummy);
+
+ return IRQ_HANDLED;
+}
+
+
+static irqreturn_t scsi_falcon_intr(int irq, void *dummy)
+{
+#ifdef REAL_DMA
+ int dma_stat;
+
+ /* Turn off DMA and select sector counter register before
+ * accessing the status register (Atari recommendation!)
+ */
+ st_dma.dma_mode_status = 0x90;
+ dma_stat = st_dma.dma_mode_status;
+
+ /* Bit 0 indicates some error in the DMA process... don't know
+ * what happened exactly (no further docu).
+ */
+ if (!(dma_stat & 0x01)) {
+ /* DMA error */
+ printk(KERN_CRIT "SCSI DMA error near 0x%08lx!\n", SCSI_DMA_GETADR());
+ }
+
+ /* If the DMA was active, but now bit 1 is not clear, it is some
+ * other 5380 interrupt that finishes the DMA transfer. We have to
+ * calculate the number of residual bytes and give a warning if
+ * bytes are stuck in the ST-DMA fifo (there's no way to reach them!)
+ */
+ if (atari_dma_active && (dma_stat & 0x02)) {
+ unsigned long transferred;
+
+ transferred = SCSI_DMA_GETADR() - atari_dma_startaddr;
+ /* The ST-DMA address is incremented in 2-byte steps, but the
+ * data are written only in 16-byte chunks. If the number of
+ * transferred bytes is not divisible by 16, the remainder is
+ * lost somewhere in outer space.
+ */
+ if (transferred & 15)
+ printk(KERN_ERR "SCSI DMA error: %ld bytes lost in "
+ "ST-DMA fifo\n", transferred & 15);
+
+ atari_dma_residual = HOSTDATA_DMALEN - transferred;
+ dprintk(NDEBUG_DMA, "SCSI DMA: There are %ld residual bytes.\n",
+ atari_dma_residual);
+ } else
+ atari_dma_residual = 0;
+ atari_dma_active = 0;
+
+ if (atari_dma_orig_addr) {
+ /* If the dribble buffer was used on a read operation, copy the DMA-ed
+ * data to the original destination address.
+ */
+ memcpy(atari_dma_orig_addr, phys_to_virt(atari_dma_startaddr),
+ HOSTDATA_DMALEN - atari_dma_residual);
+ atari_dma_orig_addr = NULL;
+ }
+
+#endif /* REAL_DMA */
+
+ NCR5380_intr(irq, dummy);
+ return IRQ_HANDLED;
+}
+
+
+#ifdef REAL_DMA
+static void atari_scsi_fetch_restbytes(void)
+{
+ int nr;
+ char *src, *dst;
+ unsigned long phys_dst;
+
+ /* fetch rest bytes in the DMA register */
+ phys_dst = SCSI_DMA_READ_P(dma_addr);
+ nr = phys_dst & 3;
+ if (nr) {
+ /* there are 'nr' bytes left for the last long address
+ before the DMA pointer */
+ phys_dst ^= nr;
+ dprintk(NDEBUG_DMA, "SCSI DMA: there are %d rest bytes for phys addr 0x%08lx",
+ nr, phys_dst);
+ /* The content of the DMA pointer is a physical address! */
+ dst = phys_to_virt(phys_dst);
+ dprintk(NDEBUG_DMA, " = virt addr %p\n", dst);
+ for (src = (char *)&tt_scsi_dma.dma_restdata; nr != 0; --nr)
+ *dst++ = *src++;
+ }
+}
+#endif /* REAL_DMA */
+
+
+/* This function releases the lock on the DMA chip if there is no
+ * connected command and the disconnected queue is empty.
+ */
+
+static void falcon_release_lock(void)
+{
+ if (IS_A_TT())
+ return;
+
+ if (stdma_is_locked_by(scsi_falcon_intr))
+ stdma_release();
+}
+
+/* This function manages the locking of the ST-DMA.
+ * If the DMA isn't locked already for SCSI, it tries to lock it by
+ * calling stdma_lock(). But if the DMA is locked by the SCSI code and
+ * there are other drivers waiting for the chip, we do not issue the
+ * command immediately but tell the SCSI mid-layer to defer.
+ */
+
+static int falcon_get_lock(struct Scsi_Host *instance)
+{
+ if (IS_A_TT())
+ return 1;
+
+ if (in_interrupt())
+ return stdma_try_lock(scsi_falcon_intr, instance);
+
+ stdma_lock(scsi_falcon_intr, instance);
+ return 1;
+}
+
+#ifndef MODULE
+static int __init atari_scsi_setup(char *str)
+{
+ /* Format of atascsi parameter is:
+ * atascsi=<can_queue>,<cmd_per_lun>,<sg_tablesize>,<hostid>,<use_tags>
+ * Defaults depend on TT or Falcon, determined at run time.
+ * Negative values mean don't change.
+ */
+ int ints[6];
+
+ get_options(str, ARRAY_SIZE(ints), ints);
+
+ if (ints[0] < 1) {
+ printk("atari_scsi_setup: no arguments!\n");
+ return 0;
+ }
+ if (ints[0] >= 1)
+ setup_can_queue = ints[1];
+ if (ints[0] >= 2)
+ setup_cmd_per_lun = ints[2];
+ if (ints[0] >= 3)
+ setup_sg_tablesize = ints[3];
+ if (ints[0] >= 4)
+ setup_hostid = ints[4];
+#ifdef SUPPORT_TAGS
+ if (ints[0] >= 5)
+ setup_use_tagged_queuing = ints[5];
+#endif
+
+ return 1;
+}
+
+__setup("atascsi=", atari_scsi_setup);
+#endif /* !MODULE */
+
+
+#ifdef CONFIG_ATARI_SCSI_RESET_BOOT
+static void __init atari_scsi_reset_boot(void)
+{
+ unsigned long end;
+
+ /*
+ * Do a SCSI reset to clean up the bus during initialization. No messing
+ * with the queues, interrupts, or locks necessary here.
+ */
+
+ printk("Atari SCSI: resetting the SCSI bus...");
+
+ /* get in phase */
+ NCR5380_write(TARGET_COMMAND_REG,
+ PHASE_SR_TO_TCR(NCR5380_read(STATUS_REG)));
+
+ /* assert RST */
+ NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE | ICR_ASSERT_RST);
+ /* The min. reset hold time is 25us, so 40us should be enough */
+ udelay(50);
+ /* reset RST and interrupt */
+ NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE);
+ NCR5380_read(RESET_PARITY_INTERRUPT_REG);
+
+ end = jiffies + AFTER_RESET_DELAY;
+ while (time_before(jiffies, end))
+ barrier();
+
+ printk(" done\n");
+}
+#endif
+
+#if defined(REAL_DMA)
+
+static unsigned long atari_scsi_dma_setup(struct Scsi_Host *instance,
+ void *data, unsigned long count,
+ int dir)
+{
+ unsigned long addr = virt_to_phys(data);
+
+ dprintk(NDEBUG_DMA, "scsi%d: setting up dma, data = %p, phys = %lx, count = %ld, "
+ "dir = %d\n", instance->host_no, data, addr, count, dir);
+
+ if (!IS_A_TT() && !STRAM_ADDR(addr)) {
+ /* If we have a non-DMAable address on a Falcon, use the dribble
+ * buffer; 'orig_addr' != 0 in the read case tells the interrupt
+ * handler to copy data from the dribble buffer to the originally
+ * wanted address.
+ */
+ if (dir)
+ memcpy(atari_dma_buffer, data, count);
+ else
+ atari_dma_orig_addr = data;
+ addr = atari_dma_phys_buffer;
+ }
+
+ atari_dma_startaddr = addr; /* Needed for calculating residual later. */
+
+ /* Cache cleanup stuff: On writes, push any dirty cache out before sending
+ * it to the peripheral. (Must be done before DMA setup, since at least
+ * the ST-DMA begins to fill internal buffers right after setup. For
+ * reads, invalidate any cache, may be altered after DMA without CPU
+ * knowledge.
+ *
+ * ++roman: For the Medusa, there's no need at all for that cache stuff,
+ * because the hardware does bus snooping (fine!).
+ */
+ dma_cache_maintenance(addr, count, dir);
+
+ if (count == 0)
+ printk(KERN_NOTICE "SCSI warning: DMA programmed for 0 bytes !\n");
+
+ if (IS_A_TT()) {
+ tt_scsi_dma.dma_ctrl = dir;
+ SCSI_DMA_WRITE_P(dma_addr, addr);
+ SCSI_DMA_WRITE_P(dma_cnt, count);
+ tt_scsi_dma.dma_ctrl = dir | 2;
+ } else { /* ! IS_A_TT */
+
+ /* set address */
+ SCSI_DMA_SETADR(addr);
+
+ /* toggle direction bit to clear FIFO and set DMA direction */
+ dir <<= 8;
+ st_dma.dma_mode_status = 0x90 | dir;
+ st_dma.dma_mode_status = 0x90 | (dir ^ 0x100);
+ st_dma.dma_mode_status = 0x90 | dir;
+ udelay(40);
+ /* On writes, round up the transfer length to the next multiple of 512
+ * (see also comment at atari_dma_xfer_len()). */
+ st_dma.fdc_acces_seccount = (count + (dir ? 511 : 0)) >> 9;
+ udelay(40);
+ st_dma.dma_mode_status = 0x10 | dir;
+ udelay(40);
+ /* need not restore value of dir, only boolean value is tested */
+ atari_dma_active = 1;
+ }
+
+ return count;
+}
+
+
+static long atari_scsi_dma_residual(struct Scsi_Host *instance)
+{
+ return atari_dma_residual;
+}
+
+
+#define CMD_SURELY_BLOCK_MODE 0
+#define CMD_SURELY_BYTE_MODE 1
+#define CMD_MODE_UNKNOWN 2
+
+static int falcon_classify_cmd(struct scsi_cmnd *cmd)
+{
+ unsigned char opcode = cmd->cmnd[0];
+
+ if (opcode == READ_DEFECT_DATA || opcode == READ_LONG ||
+ opcode == READ_BUFFER)
+ return CMD_SURELY_BYTE_MODE;
+ else if (opcode == READ_6 || opcode == READ_10 ||
+ opcode == 0xa8 /* READ_12 */ || opcode == READ_REVERSE ||
+ opcode == RECOVER_BUFFERED_DATA) {
+ /* In case of a sequential-access target (tape), special care is
+ * needed here: The transfer is block-mode only if the 'fixed' bit is
+ * set! */
+ if (cmd->device->type == TYPE_TAPE && !(cmd->cmnd[1] & 1))
+ return CMD_SURELY_BYTE_MODE;
+ else
+ return CMD_SURELY_BLOCK_MODE;
+ } else
+ return CMD_MODE_UNKNOWN;
+}
+
+
+/* This function calculates the number of bytes that can be transferred via
+ * DMA. On the TT, this is arbitrary, but on the Falcon we have to use the
+ * ST-DMA chip. There are only multiples of 512 bytes possible and max.
+ * 255*512 bytes :-( This means also, that defining READ_OVERRUNS is not
+ * possible on the Falcon, since that would require to program the DMA for
+ * n*512 - atari_read_overrun bytes. But it seems that the Falcon doesn't have
+ * the overrun problem, so this question is academic :-)
+ */
+
+static unsigned long atari_dma_xfer_len(unsigned long wanted_len,
+ struct scsi_cmnd *cmd, int write_flag)
+{
+ unsigned long possible_len, limit;
+
+ if (IS_A_TT())
+ /* TT SCSI DMA can transfer arbitrary #bytes */
+ return wanted_len;
+
+ /* ST DMA chip is stupid -- only multiples of 512 bytes! (and max.
+ * 255*512 bytes, but this should be enough)
+ *
+ * ++roman: Aaargl! Another Falcon-SCSI problem... There are some commands
+ * that return a number of bytes which cannot be known beforehand. In this
+ * case, the given transfer length is an "allocation length". Now it
+ * can happen that this allocation length is a multiple of 512 bytes and
+ * the DMA is used. But if not n*512 bytes really arrive, some input data
+ * will be lost in the ST-DMA's FIFO :-( Thus, we have to distinguish
+ * between commands that do block transfers and those that do byte
+ * transfers. But this isn't easy... there are lots of vendor specific
+ * commands, and the user can issue any command via the
+ * SCSI_IOCTL_SEND_COMMAND.
+ *
+ * The solution: We classify SCSI commands in 1) surely block-mode cmd.s,
+ * 2) surely byte-mode cmd.s and 3) cmd.s with unknown mode. In case 1)
+ * and 3), the thing to do is obvious: allow any number of blocks via DMA
+ * or none. In case 2), we apply some heuristic: Byte mode is assumed if
+ * the transfer (allocation) length is < 1024, hoping that no cmd. not
+ * explicitly known as byte mode have such big allocation lengths...
+ * BTW, all the discussion above applies only to reads. DMA writes are
+ * unproblematic anyways, since the targets aborts the transfer after
+ * receiving a sufficient number of bytes.
+ *
+ * Another point: If the transfer is from/to an non-ST-RAM address, we
+ * use the dribble buffer and thus can do only STRAM_BUFFER_SIZE bytes.
+ */
+
+ if (write_flag) {
+ /* Write operation can always use the DMA, but the transfer size must
+ * be rounded up to the next multiple of 512 (atari_dma_setup() does
+ * this).
+ */
+ possible_len = wanted_len;
+ } else {
+ /* Read operations: if the wanted transfer length is not a multiple of
+ * 512, we cannot use DMA, since the ST-DMA cannot split transfers
+ * (no interrupt on DMA finished!)
+ */
+ if (wanted_len & 0x1ff)
+ possible_len = 0;
+ else {
+ /* Now classify the command (see above) and decide whether it is
+ * allowed to do DMA at all */
+ switch (falcon_classify_cmd(cmd)) {
+ case CMD_SURELY_BLOCK_MODE:
+ possible_len = wanted_len;
+ break;
+ case CMD_SURELY_BYTE_MODE:
+ possible_len = 0; /* DMA prohibited */
+ break;
+ case CMD_MODE_UNKNOWN:
+ default:
+ /* For unknown commands assume block transfers if the transfer
+ * size/allocation length is >= 1024 */
+ possible_len = (wanted_len < 1024) ? 0 : wanted_len;
+ break;
+ }
+ }
+ }
+
+ /* Last step: apply the hard limit on DMA transfers */
+ limit = (atari_dma_buffer && !STRAM_ADDR(virt_to_phys(cmd->SCp.ptr))) ?
+ STRAM_BUFFER_SIZE : 255*512;
+ if (possible_len > limit)
+ possible_len = limit;
+
+ if (possible_len != wanted_len)
+ dprintk(NDEBUG_DMA, "Sorry, must cut DMA transfer size to %ld bytes "
+ "instead of %ld\n", possible_len, wanted_len);
+
+ return possible_len;
+}
+
+
+#endif /* REAL_DMA */
+
+
+/* NCR5380 register access functions
+ *
+ * There are separate functions for TT and Falcon, because the access
+ * methods are quite different. The calling macros NCR5380_read and
+ * NCR5380_write call these functions via function pointers.
+ */
+
+static unsigned char atari_scsi_tt_reg_read(unsigned char reg)
+{
+ return tt_scsi_regp[reg * 2];
+}
+
+static void atari_scsi_tt_reg_write(unsigned char reg, unsigned char value)
+{
+ tt_scsi_regp[reg * 2] = value;
+}
+
+static unsigned char atari_scsi_falcon_reg_read(unsigned char reg)
+{
+ dma_wd.dma_mode_status= (u_short)(0x88 + reg);
+ return (u_char)dma_wd.fdc_acces_seccount;
+}
+
+static void atari_scsi_falcon_reg_write(unsigned char reg, unsigned char value)
+{
+ dma_wd.dma_mode_status = (u_short)(0x88 + reg);
+ dma_wd.fdc_acces_seccount = (u_short)value;
+}
+
+
+#include "atari_NCR5380.c"
+
+static int atari_scsi_bus_reset(struct scsi_cmnd *cmd)
+{
+ int rv;
+ unsigned long flags;
+
+ local_irq_save(flags);
+
+#ifdef REAL_DMA
+ /* Abort a maybe active DMA transfer */
+ if (IS_A_TT()) {
+ tt_scsi_dma.dma_ctrl = 0;
+ } else {
+ st_dma.dma_mode_status = 0x90;
+ atari_dma_active = 0;
+ atari_dma_orig_addr = NULL;
+ }
+#endif
+
+ rv = NCR5380_bus_reset(cmd);
+
+ /* The 5380 raises its IRQ line while _RST is active but the ST DMA
+ * "lock" has been released so this interrupt may end up handled by
+ * floppy or IDE driver (if one of them holds the lock). The NCR5380
+ * interrupt flag has been cleared already.
+ */
+
+ local_irq_restore(flags);
+
+ return rv;
+}
+
+#define DRV_MODULE_NAME "atari_scsi"
+#define PFX DRV_MODULE_NAME ": "
+
+static struct scsi_host_template atari_scsi_template = {
+ .module = THIS_MODULE,
+ .proc_name = DRV_MODULE_NAME,
+ .show_info = atari_scsi_show_info,
+ .name = "Atari native SCSI",
+ .info = atari_scsi_info,
+ .queuecommand = atari_scsi_queue_command,
+ .eh_abort_handler = atari_scsi_abort,
+ .eh_bus_reset_handler = atari_scsi_bus_reset,
+ .this_id = 7,
+ .use_clustering = DISABLE_CLUSTERING
+};
+
+static int __init atari_scsi_probe(struct platform_device *pdev)
+{
+ struct Scsi_Host *instance;
+ int error;
+ struct resource *irq;
+ int host_flags = 0;
+
+ irq = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
+ if (!irq)
+ return -ENODEV;
+
+ if (ATARIHW_PRESENT(TT_SCSI)) {
+ atari_scsi_reg_read = atari_scsi_tt_reg_read;
+ atari_scsi_reg_write = atari_scsi_tt_reg_write;
+ } else {
+ atari_scsi_reg_read = atari_scsi_falcon_reg_read;
+ atari_scsi_reg_write = atari_scsi_falcon_reg_write;
+ }
+
+ /* The values for CMD_PER_LUN and CAN_QUEUE are somehow arbitrary.
+ * Higher values should work, too; try it!
+ * (But cmd_per_lun costs memory!)
+ *
+ * But there seems to be a bug somewhere that requires CAN_QUEUE to be
+ * 2*CMD_PER_LUN. At least on a TT, no spurious timeouts seen since
+ * changed CMD_PER_LUN...
+ *
+ * Note: The Falcon currently uses 8/1 setting due to unsolved problems
+ * with cmd_per_lun != 1
+ */
+ if (ATARIHW_PRESENT(TT_SCSI)) {
+ atari_scsi_template.can_queue = 16;
+ atari_scsi_template.cmd_per_lun = 8;
+ atari_scsi_template.sg_tablesize = SG_ALL;
+ } else {
+ atari_scsi_template.can_queue = 8;
+ atari_scsi_template.cmd_per_lun = 1;
+ atari_scsi_template.sg_tablesize = SG_NONE;
+ }
+
+ if (setup_can_queue > 0)
+ atari_scsi_template.can_queue = setup_can_queue;
+
+ if (setup_cmd_per_lun > 0)
+ atari_scsi_template.cmd_per_lun = setup_cmd_per_lun;
+
+ /* Leave sg_tablesize at 0 on a Falcon! */
+ if (ATARIHW_PRESENT(TT_SCSI) && setup_sg_tablesize >= 0)
+ atari_scsi_template.sg_tablesize = setup_sg_tablesize;
+
+ if (setup_hostid >= 0) {
+ atari_scsi_template.this_id = setup_hostid & 7;
+ } else {
+ /* Test if a host id is set in the NVRam */
+ if (ATARIHW_PRESENT(TT_CLK) && nvram_check_checksum()) {
+ unsigned char b = nvram_read_byte(14);
+
+ /* Arbitration enabled? (for TOS)
+ * If yes, use configured host ID
+ */
+ if (b & 0x80)
+ atari_scsi_template.this_id = b & 7;
+ }
+ }
+
+
+#ifdef REAL_DMA
+ /* If running on a Falcon and if there's TT-Ram (i.e., more than one
+ * memory block, since there's always ST-Ram in a Falcon), then
+ * allocate a STRAM_BUFFER_SIZE byte dribble buffer for transfers
+ * from/to alternative Ram.
+ */
+ if (ATARIHW_PRESENT(ST_SCSI) && !ATARIHW_PRESENT(EXTD_DMA) &&
+ m68k_num_memory > 1) {
+ atari_dma_buffer = atari_stram_alloc(STRAM_BUFFER_SIZE, "SCSI");
+ if (!atari_dma_buffer) {
+ pr_err(PFX "can't allocate ST-RAM double buffer\n");
+ return -ENOMEM;
+ }
+ atari_dma_phys_buffer = atari_stram_to_phys(atari_dma_buffer);
+ atari_dma_orig_addr = 0;
+ }
+#endif
+
+ instance = scsi_host_alloc(&atari_scsi_template,
+ sizeof(struct NCR5380_hostdata));
+ if (!instance) {
+ error = -ENOMEM;
+ goto fail_alloc;
+ }
+ atari_scsi_host = instance;
+
+#ifdef CONFIG_ATARI_SCSI_RESET_BOOT
+ atari_scsi_reset_boot();
+#endif
+
+ instance->irq = irq->start;
+
+ host_flags |= IS_A_TT() ? 0 : FLAG_LATE_DMA_SETUP;
+
+#ifdef SUPPORT_TAGS
+ host_flags |= setup_use_tagged_queuing > 0 ? FLAG_TAGGED_QUEUING : 0;
+#endif
+
+ NCR5380_init(instance, host_flags);
+
+ if (IS_A_TT()) {
+ error = request_irq(instance->irq, scsi_tt_intr, 0,
+ "NCR5380", instance);
+ if (error) {
+ pr_err(PFX "request irq %d failed, aborting\n",
+ instance->irq);
+ goto fail_irq;
+ }
+ tt_mfp.active_edge |= 0x80; /* SCSI int on L->H */
+#ifdef REAL_DMA
+ tt_scsi_dma.dma_ctrl = 0;
+ atari_dma_residual = 0;
+
+ /* While the read overruns (described by Drew Eckhardt in
+ * NCR5380.c) never happened on TTs, they do in fact on the
+ * Medusa (This was the cause why SCSI didn't work right for
+ * so long there.) Since handling the overruns slows down
+ * a bit, I turned the #ifdef's into a runtime condition.
+ *
+ * In principle it should be sufficient to do max. 1 byte with
+ * PIO, but there is another problem on the Medusa with the DMA
+ * rest data register. So read_overruns is currently set
+ * to 4 to avoid having transfers that aren't a multiple of 4.
+ * If the rest data bug is fixed, this can be lowered to 1.
+ */
+ if (MACH_IS_MEDUSA) {
+ struct NCR5380_hostdata *hostdata =
+ shost_priv(instance);
+
+ hostdata->read_overruns = 4;
+ }
+#endif
+ } else {
+ /* Nothing to do for the interrupt: the ST-DMA is initialized
+ * already.
+ */
+#ifdef REAL_DMA
+ atari_dma_residual = 0;
+ atari_dma_active = 0;
+ atari_dma_stram_mask = (ATARIHW_PRESENT(EXTD_DMA) ? 0x00000000
+ : 0xff000000);
+#endif
+ }
+
+ error = scsi_add_host(instance, NULL);
+ if (error)
+ goto fail_host;
+
+ platform_set_drvdata(pdev, instance);
+
+ scsi_scan_host(instance);
+ return 0;
+
+fail_host:
+ if (IS_A_TT())
+ free_irq(instance->irq, instance);
+fail_irq:
+ NCR5380_exit(instance);
+ scsi_host_put(instance);
+fail_alloc:
+ if (atari_dma_buffer)
+ atari_stram_free(atari_dma_buffer);
+ return error;
+}
+
+static int __exit atari_scsi_remove(struct platform_device *pdev)
+{
+ struct Scsi_Host *instance = platform_get_drvdata(pdev);
+
+ scsi_remove_host(instance);
+ if (IS_A_TT())
+ free_irq(instance->irq, instance);
+ NCR5380_exit(instance);
+ scsi_host_put(instance);
+ if (atari_dma_buffer)
+ atari_stram_free(atari_dma_buffer);
+ return 0;
+}
+
+static struct platform_driver atari_scsi_driver = {
+ .remove = __exit_p(atari_scsi_remove),
+ .driver = {
+ .name = DRV_MODULE_NAME,
+ },
+};
+
+module_platform_driver_probe(atari_scsi_driver, atari_scsi_probe);
+
+MODULE_ALIAS("platform:" DRV_MODULE_NAME);
+MODULE_LICENSE("GPL");
diff --git a/drivers/scsi/atp870u.c b/drivers/scsi/atp870u.c
new file mode 100644
index 000000000..0836433e3
--- /dev/null
+++ b/drivers/scsi/atp870u.c
@@ -0,0 +1,3897 @@
+/*
+ * Copyright (C) 1997 Wu Ching Chen
+ * 2.1.x update (C) 1998 Krzysztof G. Baranowski
+ * 2.5.x update (C) 2002 Red Hat
+ * 2.6.x update (C) 2004 Red Hat
+ *
+ * Marcelo Tosatti <marcelo@conectiva.com.br> : SMP fixes
+ *
+ * Wu Ching Chen : NULL pointer fixes 2000/06/02
+ * support atp876 chip
+ * enable 32 bit fifo transfer
+ * support cdrom & remove device run ultra speed
+ * fix disconnect bug 2000/12/21
+ * support atp880 chip lvd u160 2001/05/15
+ * fix prd table bug 2001/09/12 (7.1)
+ *
+ * atp885 support add by ACARD Hao Ping Lian 2005/01/05
+ */
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/kernel.h>
+#include <linux/types.h>
+#include <linux/string.h>
+#include <linux/ioport.h>
+#include <linux/delay.h>
+#include <linux/proc_fs.h>
+#include <linux/spinlock.h>
+#include <linux/pci.h>
+#include <linux/blkdev.h>
+#include <linux/dma-mapping.h>
+#include <linux/slab.h>
+#include <asm/io.h>
+
+#include <scsi/scsi.h>
+#include <scsi/scsi_cmnd.h>
+#include <scsi/scsi_device.h>
+#include <scsi/scsi_host.h>
+
+#include "atp870u.h"
+
+static struct scsi_host_template atp870u_template;
+static void send_s870(struct atp_unit *dev,unsigned char c);
+static void is885(struct atp_unit *dev, unsigned int wkport,unsigned char c);
+static void tscam_885(void);
+
+static irqreturn_t atp870u_intr_handle(int irq, void *dev_id)
+{
+ unsigned long flags;
+ unsigned short int tmpcip, id;
+ unsigned char i, j, c, target_id, lun,cmdp;
+ unsigned char *prd;
+ struct scsi_cmnd *workreq;
+ unsigned int workport, tmport, tmport1;
+ unsigned long adrcnt, k;
+#ifdef ED_DBGP
+ unsigned long l;
+#endif
+ int errstus;
+ struct Scsi_Host *host = dev_id;
+ struct atp_unit *dev = (struct atp_unit *)&host->hostdata;
+
+ for (c = 0; c < 2; c++) {
+ tmport = dev->ioport[c] + 0x1f;
+ j = inb(tmport);
+ if ((j & 0x80) != 0)
+ {
+ goto ch_sel;
+ }
+ dev->in_int[c] = 0;
+ }
+ return IRQ_NONE;
+ch_sel:
+#ifdef ED_DBGP
+ printk("atp870u_intr_handle enter\n");
+#endif
+ dev->in_int[c] = 1;
+ cmdp = inb(dev->ioport[c] + 0x10);
+ workport = dev->ioport[c];
+ if (dev->working[c] != 0) {
+ if (dev->dev_id == ATP885_DEVID) {
+ tmport1 = workport + 0x16;
+ if ((inb(tmport1) & 0x80) == 0)
+ outb((inb(tmport1) | 0x80), tmport1);
+ }
+ tmpcip = dev->pciport[c];
+ if ((inb(tmpcip) & 0x08) != 0)
+ {
+ tmpcip += 0x2;
+ for (k=0; k < 1000; k++) {
+ if ((inb(tmpcip) & 0x08) == 0) {
+ goto stop_dma;
+ }
+ if ((inb(tmpcip) & 0x01) == 0) {
+ goto stop_dma;
+ }
+ }
+ }
+stop_dma:
+ tmpcip = dev->pciport[c];
+ outb(0x00, tmpcip);
+ tmport -= 0x08;
+
+ i = inb(tmport);
+
+ if (dev->dev_id == ATP885_DEVID) {
+ tmpcip += 2;
+ outb(0x06, tmpcip);
+ tmpcip -= 2;
+ }
+
+ tmport -= 0x02;
+ target_id = inb(tmport);
+ tmport += 0x02;
+
+ /*
+ * Remap wide devices onto id numbers
+ */
+
+ if ((target_id & 0x40) != 0) {
+ target_id = (target_id & 0x07) | 0x08;
+ } else {
+ target_id &= 0x07;
+ }
+
+ if ((j & 0x40) != 0) {
+ if (dev->last_cmd[c] == 0xff) {
+ dev->last_cmd[c] = target_id;
+ }
+ dev->last_cmd[c] |= 0x40;
+ }
+ if (dev->dev_id == ATP885_DEVID)
+ dev->r1f[c][target_id] |= j;
+#ifdef ED_DBGP
+ printk("atp870u_intr_handle status = %x\n",i);
+#endif
+ if (i == 0x85) {
+ if ((dev->last_cmd[c] & 0xf0) != 0x40) {
+ dev->last_cmd[c] = 0xff;
+ }
+ if (dev->dev_id == ATP885_DEVID) {
+ tmport -= 0x05;
+ adrcnt = 0;
+ ((unsigned char *) &adrcnt)[2] = inb(tmport++);
+ ((unsigned char *) &adrcnt)[1] = inb(tmport++);
+ ((unsigned char *) &adrcnt)[0] = inb(tmport);
+ if (dev->id[c][target_id].last_len != adrcnt)
+ {
+ k = dev->id[c][target_id].last_len;
+ k -= adrcnt;
+ dev->id[c][target_id].tran_len = k;
+ dev->id[c][target_id].last_len = adrcnt;
+ }
+#ifdef ED_DBGP
+ printk("tmport = %x dev->id[c][target_id].last_len = %d dev->id[c][target_id].tran_len = %d\n",tmport,dev->id[c][target_id].last_len,dev->id[c][target_id].tran_len);
+#endif
+ }
+
+ /*
+ * Flip wide
+ */
+ if (dev->wide_id[c] != 0) {
+ tmport = workport + 0x1b;
+ outb(0x01, tmport);
+ while ((inb(tmport) & 0x01) != 0x01) {
+ outb(0x01, tmport);
+ }
+ }
+ /*
+ * Issue more commands
+ */
+ spin_lock_irqsave(dev->host->host_lock, flags);
+ if (((dev->quhd[c] != dev->quend[c]) || (dev->last_cmd[c] != 0xff)) &&
+ (dev->in_snd[c] == 0)) {
+#ifdef ED_DBGP
+ printk("Call sent_s870\n");
+#endif
+ send_s870(dev,c);
+ }
+ spin_unlock_irqrestore(dev->host->host_lock, flags);
+ /*
+ * Done
+ */
+ dev->in_int[c] = 0;
+#ifdef ED_DBGP
+ printk("Status 0x85 return\n");
+#endif
+ goto handled;
+ }
+
+ if (i == 0x40) {
+ dev->last_cmd[c] |= 0x40;
+ dev->in_int[c] = 0;
+ goto handled;
+ }
+
+ if (i == 0x21) {
+ if ((dev->last_cmd[c] & 0xf0) != 0x40) {
+ dev->last_cmd[c] = 0xff;
+ }
+ tmport -= 0x05;
+ adrcnt = 0;
+ ((unsigned char *) &adrcnt)[2] = inb(tmport++);
+ ((unsigned char *) &adrcnt)[1] = inb(tmport++);
+ ((unsigned char *) &adrcnt)[0] = inb(tmport);
+ k = dev->id[c][target_id].last_len;
+ k -= adrcnt;
+ dev->id[c][target_id].tran_len = k;
+ dev->id[c][target_id].last_len = adrcnt;
+ tmport -= 0x04;
+ outb(0x41, tmport);
+ tmport += 0x08;
+ outb(0x08, tmport);
+ dev->in_int[c] = 0;
+ goto handled;
+ }
+
+ if (dev->dev_id == ATP885_DEVID) {
+ if ((i == 0x4c) || (i == 0x4d) || (i == 0x8c) || (i == 0x8d)) {
+ if ((i == 0x4c) || (i == 0x8c))
+ i=0x48;
+ else
+ i=0x49;
+ }
+
+ }
+ if ((i == 0x80) || (i == 0x8f)) {
+#ifdef ED_DBGP
+ printk(KERN_DEBUG "Device reselect\n");
+#endif
+ lun = 0;
+ tmport -= 0x07;
+ if (cmdp == 0x44 || i==0x80) {
+ tmport += 0x0d;
+ lun = inb(tmport) & 0x07;
+ } else {
+ if ((dev->last_cmd[c] & 0xf0) != 0x40) {
+ dev->last_cmd[c] = 0xff;
+ }
+ if (cmdp == 0x41) {
+#ifdef ED_DBGP
+ printk("cmdp = 0x41\n");
+#endif
+ tmport += 0x02;
+ adrcnt = 0;
+ ((unsigned char *) &adrcnt)[2] = inb(tmport++);
+ ((unsigned char *) &adrcnt)[1] = inb(tmport++);
+ ((unsigned char *) &adrcnt)[0] = inb(tmport);
+ k = dev->id[c][target_id].last_len;
+ k -= adrcnt;
+ dev->id[c][target_id].tran_len = k;
+ dev->id[c][target_id].last_len = adrcnt;
+ tmport += 0x04;
+ outb(0x08, tmport);
+ dev->in_int[c] = 0;
+ goto handled;
+ } else {
+#ifdef ED_DBGP
+ printk("cmdp != 0x41\n");
+#endif
+ outb(0x46, tmport);
+ dev->id[c][target_id].dirct = 0x00;
+ tmport += 0x02;
+ outb(0x00, tmport++);
+ outb(0x00, tmport++);
+ outb(0x00, tmport++);
+ tmport += 0x03;
+ outb(0x08, tmport);
+ dev->in_int[c] = 0;
+ goto handled;
+ }
+ }
+ if (dev->last_cmd[c] != 0xff) {
+ dev->last_cmd[c] |= 0x40;
+ }
+ if (dev->dev_id == ATP885_DEVID) {
+ j = inb(dev->baseport + 0x29) & 0xfe;
+ outb(j, dev->baseport + 0x29);
+ tmport = workport + 0x16;
+ } else {
+ tmport = workport + 0x10;
+ outb(0x45, tmport);
+ tmport += 0x06;
+ }
+
+ target_id = inb(tmport);
+ /*
+ * Remap wide identifiers
+ */
+ if ((target_id & 0x10) != 0) {
+ target_id = (target_id & 0x07) | 0x08;
+ } else {
+ target_id &= 0x07;
+ }
+ if (dev->dev_id == ATP885_DEVID) {
+ tmport = workport + 0x10;
+ outb(0x45, tmport);
+ }
+ workreq = dev->id[c][target_id].curr_req;
+#ifdef ED_DBGP
+ scmd_printk(KERN_DEBUG, workreq, "CDB");
+ for (l = 0; l < workreq->cmd_len; l++)
+ printk(KERN_DEBUG " %x",workreq->cmnd[l]);
+ printk("\n");
+#endif
+
+ tmport = workport + 0x0f;
+ outb(lun, tmport);
+ tmport += 0x02;
+ outb(dev->id[c][target_id].devsp, tmport++);
+ adrcnt = dev->id[c][target_id].tran_len;
+ k = dev->id[c][target_id].last_len;
+
+ outb(((unsigned char *) &k)[2], tmport++);
+ outb(((unsigned char *) &k)[1], tmport++);
+ outb(((unsigned char *) &k)[0], tmport++);
+#ifdef ED_DBGP
+ printk("k %x, k[0] 0x%x k[1] 0x%x k[2] 0x%x\n", k, inb(tmport-1), inb(tmport-2), inb(tmport-3));
+#endif
+ /* Remap wide */
+ j = target_id;
+ if (target_id > 7) {
+ j = (j & 0x07) | 0x40;
+ }
+ /* Add direction */
+ j |= dev->id[c][target_id].dirct;
+ outb(j, tmport++);
+ outb(0x80,tmport);
+
+ /* enable 32 bit fifo transfer */
+ if (dev->dev_id == ATP885_DEVID) {
+ tmpcip = dev->pciport[c] + 1;
+ i=inb(tmpcip) & 0xf3;
+ //j=workreq->cmnd[0];
+ if ((workreq->cmnd[0] == 0x08) || (workreq->cmnd[0] == 0x28) || (workreq->cmnd[0] == 0x0a) || (workreq->cmnd[0] == 0x2a)) {
+ i |= 0x0c;
+ }
+ outb(i,tmpcip);
+ } else if ((dev->dev_id == ATP880_DEVID1) ||
+ (dev->dev_id == ATP880_DEVID2) ) {
+ tmport = workport - 0x05;
+ if ((workreq->cmnd[0] == 0x08) || (workreq->cmnd[0] == 0x28) || (workreq->cmnd[0] == 0x0a) || (workreq->cmnd[0] == 0x2a)) {
+ outb((unsigned char) ((inb(tmport) & 0x3f) | 0xc0), tmport);
+ } else {
+ outb((unsigned char) (inb(tmport) & 0x3f), tmport);
+ }
+ } else {
+ tmport = workport + 0x3a;
+ if ((workreq->cmnd[0] == 0x08) || (workreq->cmnd[0] == 0x28) || (workreq->cmnd[0] == 0x0a) || (workreq->cmnd[0] == 0x2a)) {
+ outb((unsigned char) ((inb(tmport) & 0xf3) | 0x08), tmport);
+ } else {
+ outb((unsigned char) (inb(tmport) & 0xf3), tmport);
+ }
+ }
+ tmport = workport + 0x1b;
+ j = 0;
+ id = 1;
+ id = id << target_id;
+ /*
+ * Is this a wide device
+ */
+ if ((id & dev->wide_id[c]) != 0) {
+ j |= 0x01;
+ }
+ outb(j, tmport);
+ while ((inb(tmport) & 0x01) != j) {
+ outb(j,tmport);
+ }
+ if (dev->id[c][target_id].last_len == 0) {
+ tmport = workport + 0x18;
+ outb(0x08, tmport);
+ dev->in_int[c] = 0;
+#ifdef ED_DBGP
+ printk("dev->id[c][target_id].last_len = 0\n");
+#endif
+ goto handled;
+ }
+#ifdef ED_DBGP
+ printk("target_id = %d adrcnt = %d\n",target_id,adrcnt);
+#endif
+ prd = dev->id[c][target_id].prd_pos;
+ while (adrcnt != 0) {
+ id = ((unsigned short int *)prd)[2];
+ if (id == 0) {
+ k = 0x10000;
+ } else {
+ k = id;
+ }
+ if (k > adrcnt) {
+ ((unsigned short int *)prd)[2] = (unsigned short int)
+ (k - adrcnt);
+ ((unsigned long *)prd)[0] += adrcnt;
+ adrcnt = 0;
+ dev->id[c][target_id].prd_pos = prd;
+ } else {
+ adrcnt -= k;
+ dev->id[c][target_id].prdaddr += 0x08;
+ prd += 0x08;
+ if (adrcnt == 0) {
+ dev->id[c][target_id].prd_pos = prd;
+ }
+ }
+ }
+ tmpcip = dev->pciport[c] + 0x04;
+ outl(dev->id[c][target_id].prdaddr, tmpcip);
+#ifdef ED_DBGP
+ printk("dev->id[%d][%d].prdaddr 0x%8x\n", c, target_id, dev->id[c][target_id].prdaddr);
+#endif
+ if (dev->dev_id == ATP885_DEVID) {
+ tmpcip -= 0x04;
+ } else {
+ tmpcip -= 0x02;
+ outb(0x06, tmpcip);
+ outb(0x00, tmpcip);
+ tmpcip -= 0x02;
+ }
+ tmport = workport + 0x18;
+ /*
+ * Check transfer direction
+ */
+ if (dev->id[c][target_id].dirct != 0) {
+ outb(0x08, tmport);
+ outb(0x01, tmpcip);
+ dev->in_int[c] = 0;
+#ifdef ED_DBGP
+ printk("status 0x80 return dirct != 0\n");
+#endif
+ goto handled;
+ }
+ outb(0x08, tmport);
+ outb(0x09, tmpcip);
+ dev->in_int[c] = 0;
+#ifdef ED_DBGP
+ printk("status 0x80 return dirct = 0\n");
+#endif
+ goto handled;
+ }
+
+ /*
+ * Current scsi request on this target
+ */
+
+ workreq = dev->id[c][target_id].curr_req;
+
+ if (i == 0x42) {
+ if ((dev->last_cmd[c] & 0xf0) != 0x40)
+ {
+ dev->last_cmd[c] = 0xff;
+ }
+ errstus = 0x02;
+ workreq->result = errstus;
+ goto go_42;
+ }
+ if (i == 0x16) {
+ if ((dev->last_cmd[c] & 0xf0) != 0x40) {
+ dev->last_cmd[c] = 0xff;
+ }
+ errstus = 0;
+ tmport -= 0x08;
+ errstus = inb(tmport);
+ if (((dev->r1f[c][target_id] & 0x10) != 0)&&(dev->dev_id==ATP885_DEVID)) {
+ printk(KERN_WARNING "AEC67162 CRC ERROR !\n");
+ errstus = 0x02;
+ }
+ workreq->result = errstus;
+go_42:
+ if (dev->dev_id == ATP885_DEVID) {
+ j = inb(dev->baseport + 0x29) | 0x01;
+ outb(j, dev->baseport + 0x29);
+ }
+ /*
+ * Complete the command
+ */
+ scsi_dma_unmap(workreq);
+
+ spin_lock_irqsave(dev->host->host_lock, flags);
+ (*workreq->scsi_done) (workreq);
+#ifdef ED_DBGP
+ printk("workreq->scsi_done\n");
+#endif
+ /*
+ * Clear it off the queue
+ */
+ dev->id[c][target_id].curr_req = NULL;
+ dev->working[c]--;
+ spin_unlock_irqrestore(dev->host->host_lock, flags);
+ /*
+ * Take it back wide
+ */
+ if (dev->wide_id[c] != 0) {
+ tmport = workport + 0x1b;
+ outb(0x01, tmport);
+ while ((inb(tmport) & 0x01) != 0x01) {
+ outb(0x01, tmport);
+ }
+ }
+ /*
+ * If there is stuff to send and nothing going then send it
+ */
+ spin_lock_irqsave(dev->host->host_lock, flags);
+ if (((dev->last_cmd[c] != 0xff) || (dev->quhd[c] != dev->quend[c])) &&
+ (dev->in_snd[c] == 0)) {
+#ifdef ED_DBGP
+ printk("Call sent_s870(scsi_done)\n");
+#endif
+ send_s870(dev,c);
+ }
+ spin_unlock_irqrestore(dev->host->host_lock, flags);
+ dev->in_int[c] = 0;
+ goto handled;
+ }
+ if ((dev->last_cmd[c] & 0xf0) != 0x40) {
+ dev->last_cmd[c] = 0xff;
+ }
+ if (i == 0x4f) {
+ i = 0x89;
+ }
+ i &= 0x0f;
+ if (i == 0x09) {
+ tmpcip += 4;
+ outl(dev->id[c][target_id].prdaddr, tmpcip);
+ tmpcip = tmpcip - 2;
+ outb(0x06, tmpcip);
+ outb(0x00, tmpcip);
+ tmpcip = tmpcip - 2;
+ tmport = workport + 0x10;
+ outb(0x41, tmport);
+ if (dev->dev_id == ATP885_DEVID) {
+ tmport += 2;
+ k = dev->id[c][target_id].last_len;
+ outb((unsigned char) (((unsigned char *) (&k))[2]), tmport++);
+ outb((unsigned char) (((unsigned char *) (&k))[1]), tmport++);
+ outb((unsigned char) (((unsigned char *) (&k))[0]), tmport);
+ dev->id[c][target_id].dirct = 0x00;
+ tmport += 0x04;
+ } else {
+ dev->id[c][target_id].dirct = 0x00;
+ tmport += 0x08;
+ }
+ outb(0x08, tmport);
+ outb(0x09, tmpcip);
+ dev->in_int[c] = 0;
+ goto handled;
+ }
+ if (i == 0x08) {
+ tmpcip += 4;
+ outl(dev->id[c][target_id].prdaddr, tmpcip);
+ tmpcip = tmpcip - 2;
+ outb(0x06, tmpcip);
+ outb(0x00, tmpcip);
+ tmpcip = tmpcip - 2;
+ tmport = workport + 0x10;
+ outb(0x41, tmport);
+ if (dev->dev_id == ATP885_DEVID) {
+ tmport += 2;
+ k = dev->id[c][target_id].last_len;
+ outb((unsigned char) (((unsigned char *) (&k))[2]), tmport++);
+ outb((unsigned char) (((unsigned char *) (&k))[1]), tmport++);
+ outb((unsigned char) (((unsigned char *) (&k))[0]), tmport++);
+ } else {
+ tmport += 5;
+ }
+ outb((unsigned char) (inb(tmport) | 0x20), tmport);
+ dev->id[c][target_id].dirct = 0x20;
+ tmport += 0x03;
+ outb(0x08, tmport);
+ outb(0x01, tmpcip);
+ dev->in_int[c] = 0;
+ goto handled;
+ }
+ tmport -= 0x07;
+ if (i == 0x0a) {
+ outb(0x30, tmport);
+ } else {
+ outb(0x46, tmport);
+ }
+ dev->id[c][target_id].dirct = 0x00;
+ tmport += 0x02;
+ outb(0x00, tmport++);
+ outb(0x00, tmport++);
+ outb(0x00, tmport++);
+ tmport += 0x03;
+ outb(0x08, tmport);
+ dev->in_int[c] = 0;
+ goto handled;
+ } else {
+// tmport = workport + 0x17;
+// inb(tmport);
+// dev->working[c] = 0;
+ dev->in_int[c] = 0;
+ goto handled;
+ }
+
+handled:
+#ifdef ED_DBGP
+ printk("atp870u_intr_handle exit\n");
+#endif
+ return IRQ_HANDLED;
+}
+/**
+ * atp870u_queuecommand - Queue SCSI command
+ * @req_p: request block
+ * @done: completion function
+ *
+ * Queue a command to the ATP queue. Called with the host lock held.
+ */
+static int atp870u_queuecommand_lck(struct scsi_cmnd *req_p,
+ void (*done) (struct scsi_cmnd *))
+{
+ unsigned char c;
+ unsigned int tmport,m;
+ struct atp_unit *dev;
+ struct Scsi_Host *host;
+
+ c = scmd_channel(req_p);
+ req_p->sense_buffer[0]=0;
+ scsi_set_resid(req_p, 0);
+ if (scmd_channel(req_p) > 1) {
+ req_p->result = 0x00040000;
+ done(req_p);
+#ifdef ED_DBGP
+ printk("atp870u_queuecommand : req_p->device->channel > 1\n");
+#endif
+ return 0;
+ }
+
+ host = req_p->device->host;
+ dev = (struct atp_unit *)&host->hostdata;
+
+
+
+ m = 1;
+ m = m << scmd_id(req_p);
+
+ /*
+ * Fake a timeout for missing targets
+ */
+
+ if ((m & dev->active_id[c]) == 0) {
+ req_p->result = 0x00040000;
+ done(req_p);
+ return 0;
+ }
+
+ if (done) {
+ req_p->scsi_done = done;
+ } else {
+#ifdef ED_DBGP
+ printk( "atp870u_queuecommand: done can't be NULL\n");
+#endif
+ req_p->result = 0;
+ done(req_p);
+ return 0;
+ }
+
+ /*
+ * Count new command
+ */
+ dev->quend[c]++;
+ if (dev->quend[c] >= qcnt) {
+ dev->quend[c] = 0;
+ }
+
+ /*
+ * Check queue state
+ */
+ if (dev->quhd[c] == dev->quend[c]) {
+ if (dev->quend[c] == 0) {
+ dev->quend[c] = qcnt;
+ }
+#ifdef ED_DBGP
+ printk("atp870u_queuecommand : dev->quhd[c] == dev->quend[c]\n");
+#endif
+ dev->quend[c]--;
+ req_p->result = 0x00020000;
+ done(req_p);
+ return 0;
+ }
+ dev->quereq[c][dev->quend[c]] = req_p;
+ tmport = dev->ioport[c] + 0x1c;
+#ifdef ED_DBGP
+ printk("dev->ioport[c] = %x inb(tmport) = %x dev->in_int[%d] = %d dev->in_snd[%d] = %d\n",dev->ioport[c],inb(tmport),c,dev->in_int[c],c,dev->in_snd[c]);
+#endif
+ if ((inb(tmport) == 0) && (dev->in_int[c] == 0) && (dev->in_snd[c] == 0)) {
+#ifdef ED_DBGP
+ printk("Call sent_s870(atp870u_queuecommand)\n");
+#endif
+ send_s870(dev,c);
+ }
+#ifdef ED_DBGP
+ printk("atp870u_queuecommand : exit\n");
+#endif
+ return 0;
+}
+
+static DEF_SCSI_QCMD(atp870u_queuecommand)
+
+/**
+ * send_s870 - send a command to the controller
+ * @host: host
+ *
+ * On entry there is work queued to be done. We move some of that work to the
+ * controller itself.
+ *
+ * Caller holds the host lock.
+ */
+static void send_s870(struct atp_unit *dev,unsigned char c)
+{
+ unsigned int tmport;
+ struct scsi_cmnd *workreq;
+ unsigned int i;//,k;
+ unsigned char j, target_id;
+ unsigned char *prd;
+ unsigned short int tmpcip, w;
+ unsigned long l, bttl = 0;
+ unsigned int workport;
+ unsigned long sg_count;
+
+ if (dev->in_snd[c] != 0) {
+#ifdef ED_DBGP
+ printk("cmnd in_snd\n");
+#endif
+ return;
+ }
+#ifdef ED_DBGP
+ printk("Sent_s870 enter\n");
+#endif
+ dev->in_snd[c] = 1;
+ if ((dev->last_cmd[c] != 0xff) && ((dev->last_cmd[c] & 0x40) != 0)) {
+ dev->last_cmd[c] &= 0x0f;
+ workreq = dev->id[c][dev->last_cmd[c]].curr_req;
+ if (workreq != NULL) { /* check NULL pointer */
+ goto cmd_subp;
+ }
+ dev->last_cmd[c] = 0xff;
+ if (dev->quhd[c] == dev->quend[c]) {
+ dev->in_snd[c] = 0;
+ return ;
+ }
+ }
+ if ((dev->last_cmd[c] != 0xff) && (dev->working[c] != 0)) {
+ dev->in_snd[c] = 0;
+ return ;
+ }
+ dev->working[c]++;
+ j = dev->quhd[c];
+ dev->quhd[c]++;
+ if (dev->quhd[c] >= qcnt) {
+ dev->quhd[c] = 0;
+ }
+ workreq = dev->quereq[c][dev->quhd[c]];
+ if (dev->id[c][scmd_id(workreq)].curr_req == NULL) {
+ dev->id[c][scmd_id(workreq)].curr_req = workreq;
+ dev->last_cmd[c] = scmd_id(workreq);
+ goto cmd_subp;
+ }
+ dev->quhd[c] = j;
+ dev->working[c]--;
+ dev->in_snd[c] = 0;
+ return;
+cmd_subp:
+ workport = dev->ioport[c];
+ tmport = workport + 0x1f;
+ if ((inb(tmport) & 0xb0) != 0) {
+ goto abortsnd;
+ }
+ tmport = workport + 0x1c;
+ if (inb(tmport) == 0) {
+ goto oktosend;
+ }
+abortsnd:
+#ifdef ED_DBGP
+ printk("Abort to Send\n");
+#endif
+ dev->last_cmd[c] |= 0x40;
+ dev->in_snd[c] = 0;
+ return;
+oktosend:
+#ifdef ED_DBGP
+ printk("OK to Send\n");
+ scmd_printk(KERN_DEBUG, workreq, "CDB");
+ for(i=0;i<workreq->cmd_len;i++) {
+ printk(" %x",workreq->cmnd[i]);
+ }
+ printk("\n");
+#endif
+ l = scsi_bufflen(workreq);
+
+ if (dev->dev_id == ATP885_DEVID) {
+ j = inb(dev->baseport + 0x29) & 0xfe;
+ outb(j, dev->baseport + 0x29);
+ dev->r1f[c][scmd_id(workreq)] = 0;
+ }
+
+ if (workreq->cmnd[0] == READ_CAPACITY) {
+ if (l > 8)
+ l = 8;
+ }
+ if (workreq->cmnd[0] == 0x00) {
+ l = 0;
+ }
+
+ tmport = workport + 0x1b;
+ j = 0;
+ target_id = scmd_id(workreq);
+
+ /*
+ * Wide ?
+ */
+ w = 1;
+ w = w << target_id;
+ if ((w & dev->wide_id[c]) != 0) {
+ j |= 0x01;
+ }
+ outb(j, tmport);
+ while ((inb(tmport) & 0x01) != j) {
+ outb(j,tmport);
+#ifdef ED_DBGP
+ printk("send_s870 while loop 1\n");
+#endif
+ }
+ /*
+ * Write the command
+ */
+
+ tmport = workport;
+ outb(workreq->cmd_len, tmport++);
+ outb(0x2c, tmport++);
+ if (dev->dev_id == ATP885_DEVID) {
+ outb(0x7f, tmport++);
+ } else {
+ outb(0xcf, tmport++);
+ }
+ for (i = 0; i < workreq->cmd_len; i++) {
+ outb(workreq->cmnd[i], tmport++);
+ }
+ tmport = workport + 0x0f;
+ outb(workreq->device->lun, tmport);
+ tmport += 0x02;
+ /*
+ * Write the target
+ */
+ outb(dev->id[c][target_id].devsp, tmport++);
+#ifdef ED_DBGP
+ printk("dev->id[%d][%d].devsp = %2x\n",c,target_id,dev->id[c][target_id].devsp);
+#endif
+
+ sg_count = scsi_dma_map(workreq);
+ /*
+ * Write transfer size
+ */
+ outb((unsigned char) (((unsigned char *) (&l))[2]), tmport++);
+ outb((unsigned char) (((unsigned char *) (&l))[1]), tmport++);
+ outb((unsigned char) (((unsigned char *) (&l))[0]), tmport++);
+ j = target_id;
+ dev->id[c][j].last_len = l;
+ dev->id[c][j].tran_len = 0;
+#ifdef ED_DBGP
+ printk("dev->id[%2d][%2d].last_len = %d\n",c,j,dev->id[c][j].last_len);
+#endif
+ /*
+ * Flip the wide bits
+ */
+ if ((j & 0x08) != 0) {
+ j = (j & 0x07) | 0x40;
+ }
+ /*
+ * Check transfer direction
+ */
+ if (workreq->sc_data_direction == DMA_TO_DEVICE) {
+ outb((unsigned char) (j | 0x20), tmport++);
+ } else {
+ outb(j, tmport++);
+ }
+ outb((unsigned char) (inb(tmport) | 0x80), tmport);
+ outb(0x80, tmport);
+ tmport = workport + 0x1c;
+ dev->id[c][target_id].dirct = 0;
+ if (l == 0) {
+ if (inb(tmport) == 0) {
+ tmport = workport + 0x18;
+#ifdef ED_DBGP
+ printk("change SCSI_CMD_REG 0x08\n");
+#endif
+ outb(0x08, tmport);
+ } else {
+ dev->last_cmd[c] |= 0x40;
+ }
+ dev->in_snd[c] = 0;
+ return;
+ }
+ tmpcip = dev->pciport[c];
+ prd = dev->id[c][target_id].prd_table;
+ dev->id[c][target_id].prd_pos = prd;
+
+ /*
+ * Now write the request list. Either as scatter/gather or as
+ * a linear chain.
+ */
+
+ if (l) {
+ struct scatterlist *sgpnt;
+ i = 0;
+ scsi_for_each_sg(workreq, sgpnt, sg_count, j) {
+ bttl = sg_dma_address(sgpnt);
+ l=sg_dma_len(sgpnt);
+#ifdef ED_DBGP
+ printk("1. bttl %x, l %x\n",bttl, l);
+#endif
+ while (l > 0x10000) {
+ (((u16 *) (prd))[i + 3]) = 0x0000;
+ (((u16 *) (prd))[i + 2]) = 0x0000;
+ (((u32 *) (prd))[i >> 1]) = cpu_to_le32(bttl);
+ l -= 0x10000;
+ bttl += 0x10000;
+ i += 0x04;
+ }
+ (((u32 *) (prd))[i >> 1]) = cpu_to_le32(bttl);
+ (((u16 *) (prd))[i + 2]) = cpu_to_le16(l);
+ (((u16 *) (prd))[i + 3]) = 0;
+ i += 0x04;
+ }
+ (((u16 *) (prd))[i - 1]) = cpu_to_le16(0x8000);
+#ifdef ED_DBGP
+ printk("prd %4x %4x %4x %4x\n",(((unsigned short int *)prd)[0]),(((unsigned short int *)prd)[1]),(((unsigned short int *)prd)[2]),(((unsigned short int *)prd)[3]));
+ printk("2. bttl %x, l %x\n",bttl, l);
+#endif
+ }
+ tmpcip += 4;
+#ifdef ED_DBGP
+ printk("send_s870: prdaddr_2 0x%8x tmpcip %x target_id %d\n", dev->id[c][target_id].prdaddr,tmpcip,target_id);
+#endif
+ dev->id[c][target_id].prdaddr = dev->id[c][target_id].prd_bus;
+ outl(dev->id[c][target_id].prdaddr, tmpcip);
+ tmpcip = tmpcip - 2;
+ outb(0x06, tmpcip);
+ outb(0x00, tmpcip);
+ if (dev->dev_id == ATP885_DEVID) {
+ tmpcip--;
+ j=inb(tmpcip) & 0xf3;
+ if ((workreq->cmnd[0] == 0x08) || (workreq->cmnd[0] == 0x28) ||
+ (workreq->cmnd[0] == 0x0a) || (workreq->cmnd[0] == 0x2a)) {
+ j |= 0x0c;
+ }
+ outb(j,tmpcip);
+ tmpcip--;
+ } else if ((dev->dev_id == ATP880_DEVID1) ||
+ (dev->dev_id == ATP880_DEVID2)) {
+ tmpcip =tmpcip -2;
+ tmport = workport - 0x05;
+ if ((workreq->cmnd[0] == 0x08) || (workreq->cmnd[0] == 0x28) || (workreq->cmnd[0] == 0x0a) || (workreq->cmnd[0] == 0x2a)) {
+ outb((unsigned char) ((inb(tmport) & 0x3f) | 0xc0), tmport);
+ } else {
+ outb((unsigned char) (inb(tmport) & 0x3f), tmport);
+ }
+ } else {
+ tmpcip =tmpcip -2;
+ tmport = workport + 0x3a;
+ if ((workreq->cmnd[0] == 0x08) || (workreq->cmnd[0] == 0x28) || (workreq->cmnd[0] == 0x0a) || (workreq->cmnd[0] == 0x2a)) {
+ outb((inb(tmport) & 0xf3) | 0x08, tmport);
+ } else {
+ outb(inb(tmport) & 0xf3, tmport);
+ }
+ }
+ tmport = workport + 0x1c;
+
+ if(workreq->sc_data_direction == DMA_TO_DEVICE) {
+ dev->id[c][target_id].dirct = 0x20;
+ if (inb(tmport) == 0) {
+ tmport = workport + 0x18;
+ outb(0x08, tmport);
+ outb(0x01, tmpcip);
+#ifdef ED_DBGP
+ printk( "start DMA(to target)\n");
+#endif
+ } else {
+ dev->last_cmd[c] |= 0x40;
+ }
+ dev->in_snd[c] = 0;
+ return;
+ }
+ if (inb(tmport) == 0) {
+ tmport = workport + 0x18;
+ outb(0x08, tmport);
+ outb(0x09, tmpcip);
+#ifdef ED_DBGP
+ printk( "start DMA(to host)\n");
+#endif
+ } else {
+ dev->last_cmd[c] |= 0x40;
+ }
+ dev->in_snd[c] = 0;
+ return;
+
+}
+
+static unsigned char fun_scam(struct atp_unit *dev, unsigned short int *val)
+{
+ unsigned int tmport;
+ unsigned short int i, k;
+ unsigned char j;
+
+ tmport = dev->ioport[0] + 0x1c;
+ outw(*val, tmport);
+FUN_D7:
+ for (i = 0; i < 10; i++) { /* stable >= bus settle delay(400 ns) */
+ k = inw(tmport);
+ j = (unsigned char) (k >> 8);
+ if ((k & 0x8000) != 0) { /* DB7 all release? */
+ goto FUN_D7;
+ }
+ }
+ *val |= 0x4000; /* assert DB6 */
+ outw(*val, tmport);
+ *val &= 0xdfff; /* assert DB5 */
+ outw(*val, tmport);
+FUN_D5:
+ for (i = 0; i < 10; i++) { /* stable >= bus settle delay(400 ns) */
+ if ((inw(tmport) & 0x2000) != 0) { /* DB5 all release? */
+ goto FUN_D5;
+ }
+ }
+ *val |= 0x8000; /* no DB4-0, assert DB7 */
+ *val &= 0xe0ff;
+ outw(*val, tmport);
+ *val &= 0xbfff; /* release DB6 */
+ outw(*val, tmport);
+FUN_D6:
+ for (i = 0; i < 10; i++) { /* stable >= bus settle delay(400 ns) */
+ if ((inw(tmport) & 0x4000) != 0) { /* DB6 all release? */
+ goto FUN_D6;
+ }
+ }
+
+ return j;
+}
+
+static void tscam(struct Scsi_Host *host)
+{
+
+ unsigned int tmport;
+ unsigned char i, j, k;
+ unsigned long n;
+ unsigned short int m, assignid_map, val;
+ unsigned char mbuf[33], quintet[2];
+ struct atp_unit *dev = (struct atp_unit *)&host->hostdata;
+ static unsigned char g2q_tab[8] = {
+ 0x38, 0x31, 0x32, 0x2b, 0x34, 0x2d, 0x2e, 0x27
+ };
+
+/* I can't believe we need this before we've even done anything. Remove it
+ * and see if anyone bitches.
+ for (i = 0; i < 0x10; i++) {
+ udelay(0xffff);
+ }
+ */
+
+ tmport = dev->ioport[0] + 1;
+ outb(0x08, tmport++);
+ outb(0x7f, tmport);
+ tmport = dev->ioport[0] + 0x11;
+ outb(0x20, tmport);
+
+ if ((dev->scam_on & 0x40) == 0) {
+ return;
+ }
+ m = 1;
+ m <<= dev->host_id[0];
+ j = 16;
+ if (dev->chip_ver < 4) {
+ m |= 0xff00;
+ j = 8;
+ }
+ assignid_map = m;
+ tmport = dev->ioport[0] + 0x02;
+ outb(0x02, tmport++); /* 2*2=4ms,3EH 2/32*3E=3.9ms */
+ outb(0, tmport++);
+ outb(0, tmport++);
+ outb(0, tmport++);
+ outb(0, tmport++);
+ outb(0, tmport++);
+ outb(0, tmport++);
+
+ for (i = 0; i < j; i++) {
+ m = 1;
+ m = m << i;
+ if ((m & assignid_map) != 0) {
+ continue;
+ }
+ tmport = dev->ioport[0] + 0x0f;
+ outb(0, tmport++);
+ tmport += 0x02;
+ outb(0, tmport++);
+ outb(0, tmport++);
+ outb(0, tmport++);
+ if (i > 7) {
+ k = (i & 0x07) | 0x40;
+ } else {
+ k = i;
+ }
+ outb(k, tmport++);
+ tmport = dev->ioport[0] + 0x1b;
+ if (dev->chip_ver == 4) {
+ outb(0x01, tmport);
+ } else {
+ outb(0x00, tmport);
+ }
+wait_rdyok:
+ tmport = dev->ioport[0] + 0x18;
+ outb(0x09, tmport);
+ tmport += 0x07;
+
+ while ((inb(tmport) & 0x80) == 0x00)
+ cpu_relax();
+ tmport -= 0x08;
+ k = inb(tmport);
+ if (k != 0x16) {
+ if ((k == 0x85) || (k == 0x42)) {
+ continue;
+ }
+ tmport = dev->ioport[0] + 0x10;
+ outb(0x41, tmport);
+ goto wait_rdyok;
+ }
+ assignid_map |= m;
+
+ }
+ tmport = dev->ioport[0] + 0x02;
+ outb(0x7f, tmport);
+ tmport = dev->ioport[0] + 0x1b;
+ outb(0x02, tmport);
+
+ outb(0, 0x80);
+
+ val = 0x0080; /* bsy */
+ tmport = dev->ioport[0] + 0x1c;
+ outw(val, tmport);
+ val |= 0x0040; /* sel */
+ outw(val, tmport);
+ val |= 0x0004; /* msg */
+ outw(val, tmport);
+ inb(0x80); /* 2 deskew delay(45ns*2=90ns) */
+ val &= 0x007f; /* no bsy */
+ outw(val, tmport);
+ mdelay(128);
+ val &= 0x00fb; /* after 1ms no msg */
+ outw(val, tmport);
+wait_nomsg:
+ if ((inb(tmport) & 0x04) != 0) {
+ goto wait_nomsg;
+ }
+ outb(1, 0x80);
+ udelay(100);
+ for (n = 0; n < 0x30000; n++) {
+ if ((inb(tmport) & 0x80) != 0) { /* bsy ? */
+ goto wait_io;
+ }
+ }
+ goto TCM_SYNC;
+wait_io:
+ for (n = 0; n < 0x30000; n++) {
+ if ((inb(tmport) & 0x81) == 0x0081) {
+ goto wait_io1;
+ }
+ }
+ goto TCM_SYNC;
+wait_io1:
+ inb(0x80);
+ val |= 0x8003; /* io,cd,db7 */
+ outw(val, tmport);
+ inb(0x80);
+ val &= 0x00bf; /* no sel */
+ outw(val, tmport);
+ outb(2, 0x80);
+TCM_SYNC:
+ /*
+ * The funny division into multiple delays is to accomodate
+ * arches like ARM where udelay() multiplies its argument by
+ * a large number to initialize a loop counter. To avoid
+ * overflow, the maximum supported udelay is 2000 microseconds.
+ *
+ * XXX it would be more polite to find a way to use msleep()
+ */
+ mdelay(2);
+ udelay(48);
+ if ((inb(tmport) & 0x80) == 0x00) { /* bsy ? */
+ outw(0, tmport--);
+ outb(0, tmport);
+ tmport = dev->ioport[0] + 0x15;
+ outb(0, tmport);
+ tmport += 0x03;
+ outb(0x09, tmport);
+ tmport += 0x07;
+ while ((inb(tmport) & 0x80) == 0)
+ cpu_relax();
+ tmport -= 0x08;
+ inb(tmport);
+ return;
+ }
+ val &= 0x00ff; /* synchronization */
+ val |= 0x3f00;
+ fun_scam(dev, &val);
+ outb(3, 0x80);
+ val &= 0x00ff; /* isolation */
+ val |= 0x2000;
+ fun_scam(dev, &val);
+ outb(4, 0x80);
+ i = 8;
+ j = 0;
+TCM_ID:
+ if ((inw(tmport) & 0x2000) == 0) {
+ goto TCM_ID;
+ }
+ outb(5, 0x80);
+ val &= 0x00ff; /* get ID_STRING */
+ val |= 0x2000;
+ k = fun_scam(dev, &val);
+ if ((k & 0x03) == 0) {
+ goto TCM_5;
+ }
+ mbuf[j] <<= 0x01;
+ mbuf[j] &= 0xfe;
+ if ((k & 0x02) != 0) {
+ mbuf[j] |= 0x01;
+ }
+ i--;
+ if (i > 0) {
+ goto TCM_ID;
+ }
+ j++;
+ i = 8;
+ goto TCM_ID;
+
+TCM_5: /* isolation complete.. */
+/* mbuf[32]=0;
+ printk(" \n%x %x %x %s\n ",assignid_map,mbuf[0],mbuf[1],&mbuf[2]); */
+ i = 15;
+ j = mbuf[0];
+ if ((j & 0x20) != 0) { /* bit5=1:ID up to 7 */
+ i = 7;
+ }
+ if ((j & 0x06) == 0) { /* IDvalid? */
+ goto G2Q5;
+ }
+ k = mbuf[1];
+small_id:
+ m = 1;
+ m <<= k;
+ if ((m & assignid_map) == 0) {
+ goto G2Q_QUIN;
+ }
+ if (k > 0) {
+ k--;
+ goto small_id;
+ }
+G2Q5: /* srch from max acceptable ID# */
+ k = i; /* max acceptable ID# */
+G2Q_LP:
+ m = 1;
+ m <<= k;
+ if ((m & assignid_map) == 0) {
+ goto G2Q_QUIN;
+ }
+ if (k > 0) {
+ k--;
+ goto G2Q_LP;
+ }
+G2Q_QUIN: /* k=binID#, */
+ assignid_map |= m;
+ if (k < 8) {
+ quintet[0] = 0x38; /* 1st dft ID<8 */
+ } else {
+ quintet[0] = 0x31; /* 1st ID>=8 */
+ }
+ k &= 0x07;
+ quintet[1] = g2q_tab[k];
+
+ val &= 0x00ff; /* AssignID 1stQuintet,AH=001xxxxx */
+ m = quintet[0] << 8;
+ val |= m;
+ fun_scam(dev, &val);
+ val &= 0x00ff; /* AssignID 2ndQuintet,AH=001xxxxx */
+ m = quintet[1] << 8;
+ val |= m;
+ fun_scam(dev, &val);
+
+ goto TCM_SYNC;
+
+}
+
+static void is870(struct atp_unit *dev, unsigned int wkport)
+{
+ unsigned int tmport;
+ unsigned char i, j, k, rmb, n;
+ unsigned short int m;
+ static unsigned char mbuf[512];
+ static unsigned char satn[9] = { 0, 0, 0, 0, 0, 0, 0, 6, 6 };
+ static unsigned char inqd[9] = { 0x12, 0, 0, 0, 0x24, 0, 0, 0x24, 6 };
+ static unsigned char synn[6] = { 0x80, 1, 3, 1, 0x19, 0x0e };
+ static unsigned char synu[6] = { 0x80, 1, 3, 1, 0x0c, 0x0e };
+ static unsigned char synw[6] = { 0x80, 1, 3, 1, 0x0c, 0x07 };
+ static unsigned char wide[6] = { 0x80, 1, 2, 3, 1, 0 };
+
+ tmport = wkport + 0x3a;
+ outb((unsigned char) (inb(tmport) | 0x10), tmport);
+
+ for (i = 0; i < 16; i++) {
+ if ((dev->chip_ver != 4) && (i > 7)) {
+ break;
+ }
+ m = 1;
+ m = m << i;
+ if ((m & dev->active_id[0]) != 0) {
+ continue;
+ }
+ if (i == dev->host_id[0]) {
+ printk(KERN_INFO " ID: %2d Host Adapter\n", dev->host_id[0]);
+ continue;
+ }
+ tmport = wkport + 0x1b;
+ if (dev->chip_ver == 4) {
+ outb(0x01, tmport);
+ } else {
+ outb(0x00, tmport);
+ }
+ tmport = wkport + 1;
+ outb(0x08, tmport++);
+ outb(0x7f, tmport++);
+ outb(satn[0], tmport++);
+ outb(satn[1], tmport++);
+ outb(satn[2], tmport++);
+ outb(satn[3], tmport++);
+ outb(satn[4], tmport++);
+ outb(satn[5], tmport++);
+ tmport += 0x06;
+ outb(0, tmport);
+ tmport += 0x02;
+ outb(dev->id[0][i].devsp, tmport++);
+ outb(0, tmport++);
+ outb(satn[6], tmport++);
+ outb(satn[7], tmport++);
+ j = i;
+ if ((j & 0x08) != 0) {
+ j = (j & 0x07) | 0x40;
+ }
+ outb(j, tmport);
+ tmport += 0x03;
+ outb(satn[8], tmport);
+ tmport += 0x07;
+
+ while ((inb(tmport) & 0x80) == 0x00)
+ cpu_relax();
+
+ tmport -= 0x08;
+ if (inb(tmport) != 0x11 && inb(tmport) != 0x8e)
+ continue;
+
+ while (inb(tmport) != 0x8e)
+ cpu_relax();
+
+ dev->active_id[0] |= m;
+
+ tmport = wkport + 0x10;
+ outb(0x30, tmport);
+ tmport = wkport + 0x04;
+ outb(0x00, tmport);
+
+phase_cmd:
+ tmport = wkport + 0x18;
+ outb(0x08, tmport);
+ tmport += 0x07;
+ while ((inb(tmport) & 0x80) == 0x00)
+ cpu_relax();
+ tmport -= 0x08;
+ j = inb(tmport);
+ if (j != 0x16) {
+ tmport = wkport + 0x10;
+ outb(0x41, tmport);
+ goto phase_cmd;
+ }
+sel_ok:
+ tmport = wkport + 3;
+ outb(inqd[0], tmport++);
+ outb(inqd[1], tmport++);
+ outb(inqd[2], tmport++);
+ outb(inqd[3], tmport++);
+ outb(inqd[4], tmport++);
+ outb(inqd[5], tmport);
+ tmport += 0x07;
+ outb(0, tmport);
+ tmport += 0x02;
+ outb(dev->id[0][i].devsp, tmport++);
+ outb(0, tmport++);
+ outb(inqd[6], tmport++);
+ outb(inqd[7], tmport++);
+ tmport += 0x03;
+ outb(inqd[8], tmport);
+ tmport += 0x07;
+
+ while ((inb(tmport) & 0x80) == 0x00)
+ cpu_relax();
+
+ tmport -= 0x08;
+ if (inb(tmport) != 0x11 && inb(tmport) != 0x8e)
+ continue;
+
+ while (inb(tmport) != 0x8e)
+ cpu_relax();
+
+ tmport = wkport + 0x1b;
+ if (dev->chip_ver == 4)
+ outb(0x00, tmport);
+
+ tmport = wkport + 0x18;
+ outb(0x08, tmport);
+ tmport += 0x07;
+ j = 0;
+rd_inq_data:
+ k = inb(tmport);
+ if ((k & 0x01) != 0) {
+ tmport -= 0x06;
+ mbuf[j++] = inb(tmport);
+ tmport += 0x06;
+ goto rd_inq_data;
+ }
+ if ((k & 0x80) == 0) {
+ goto rd_inq_data;
+ }
+ tmport -= 0x08;
+ j = inb(tmport);
+ if (j == 0x16) {
+ goto inq_ok;
+ }
+ tmport = wkport + 0x10;
+ outb(0x46, tmport);
+ tmport += 0x02;
+ outb(0, tmport++);
+ outb(0, tmport++);
+ outb(0, tmport++);
+ tmport += 0x03;
+ outb(0x08, tmport);
+ tmport += 0x07;
+
+ while ((inb(tmport) & 0x80) == 0x00)
+ cpu_relax();
+
+ tmport -= 0x08;
+ if (inb(tmport) != 0x16) {
+ goto sel_ok;
+ }
+inq_ok:
+ mbuf[36] = 0;
+ printk(KERN_INFO " ID: %2d %s\n", i, &mbuf[8]);
+ dev->id[0][i].devtype = mbuf[0];
+ rmb = mbuf[1];
+ n = mbuf[7];
+ if (dev->chip_ver != 4) {
+ goto not_wide;
+ }
+ if ((mbuf[7] & 0x60) == 0) {
+ goto not_wide;
+ }
+ if ((dev->global_map[0] & 0x20) == 0) {
+ goto not_wide;
+ }
+ tmport = wkport + 0x1b;
+ outb(0x01, tmport);
+ tmport = wkport + 3;
+ outb(satn[0], tmport++);
+ outb(satn[1], tmport++);
+ outb(satn[2], tmport++);
+ outb(satn[3], tmport++);
+ outb(satn[4], tmport++);
+ outb(satn[5], tmport++);
+ tmport += 0x06;
+ outb(0, tmport);
+ tmport += 0x02;
+ outb(dev->id[0][i].devsp, tmport++);
+ outb(0, tmport++);
+ outb(satn[6], tmport++);
+ outb(satn[7], tmport++);
+ tmport += 0x03;
+ outb(satn[8], tmport);
+ tmport += 0x07;
+
+ while ((inb(tmport) & 0x80) == 0x00)
+ cpu_relax();
+
+ tmport -= 0x08;
+ if (inb(tmport) != 0x11 && inb(tmport) != 0x8e)
+ continue;
+
+ while (inb(tmport) != 0x8e)
+ cpu_relax();
+
+try_wide:
+ j = 0;
+ tmport = wkport + 0x14;
+ outb(0x05, tmport);
+ tmport += 0x04;
+ outb(0x20, tmport);
+ tmport += 0x07;
+
+ while ((inb(tmport) & 0x80) == 0) {
+ if ((inb(tmport) & 0x01) != 0) {
+ tmport -= 0x06;
+ outb(wide[j++], tmport);
+ tmport += 0x06;
+ }
+ }
+ tmport -= 0x08;
+
+ while ((inb(tmport) & 0x80) == 0x00)
+ cpu_relax();
+
+ j = inb(tmport) & 0x0f;
+ if (j == 0x0f) {
+ goto widep_in;
+ }
+ if (j == 0x0a) {
+ goto widep_cmd;
+ }
+ if (j == 0x0e) {
+ goto try_wide;
+ }
+ continue;
+widep_out:
+ tmport = wkport + 0x18;
+ outb(0x20, tmport);
+ tmport += 0x07;
+ while ((inb(tmport) & 0x80) == 0) {
+ if ((inb(tmport) & 0x01) != 0) {
+ tmport -= 0x06;
+ outb(0, tmport);
+ tmport += 0x06;
+ }
+ }
+ tmport -= 0x08;
+ j = inb(tmport) & 0x0f;
+ if (j == 0x0f) {
+ goto widep_in;
+ }
+ if (j == 0x0a) {
+ goto widep_cmd;
+ }
+ if (j == 0x0e) {
+ goto widep_out;
+ }
+ continue;
+widep_in:
+ tmport = wkport + 0x14;
+ outb(0xff, tmport);
+ tmport += 0x04;
+ outb(0x20, tmport);
+ tmport += 0x07;
+ k = 0;
+widep_in1:
+ j = inb(tmport);
+ if ((j & 0x01) != 0) {
+ tmport -= 0x06;
+ mbuf[k++] = inb(tmport);
+ tmport += 0x06;
+ goto widep_in1;
+ }
+ if ((j & 0x80) == 0x00) {
+ goto widep_in1;
+ }
+ tmport -= 0x08;
+ j = inb(tmport) & 0x0f;
+ if (j == 0x0f) {
+ goto widep_in;
+ }
+ if (j == 0x0a) {
+ goto widep_cmd;
+ }
+ if (j == 0x0e) {
+ goto widep_out;
+ }
+ continue;
+widep_cmd:
+ tmport = wkport + 0x10;
+ outb(0x30, tmport);
+ tmport = wkport + 0x14;
+ outb(0x00, tmport);
+ tmport += 0x04;
+ outb(0x08, tmport);
+ tmport += 0x07;
+
+ while ((inb(tmport) & 0x80) == 0x00)
+ cpu_relax();
+
+ tmport -= 0x08;
+ j = inb(tmport);
+ if (j != 0x16) {
+ if (j == 0x4e) {
+ goto widep_out;
+ }
+ continue;
+ }
+ if (mbuf[0] != 0x01) {
+ goto not_wide;
+ }
+ if (mbuf[1] != 0x02) {
+ goto not_wide;
+ }
+ if (mbuf[2] != 0x03) {
+ goto not_wide;
+ }
+ if (mbuf[3] != 0x01) {
+ goto not_wide;
+ }
+ m = 1;
+ m = m << i;
+ dev->wide_id[0] |= m;
+not_wide:
+ if ((dev->id[0][i].devtype == 0x00) || (dev->id[0][i].devtype == 0x07) || ((dev->id[0][i].devtype == 0x05) && ((n & 0x10) != 0))) {
+ goto set_sync;
+ }
+ continue;
+set_sync:
+ tmport = wkport + 0x1b;
+ j = 0;
+ if ((m & dev->wide_id[0]) != 0) {
+ j |= 0x01;
+ }
+ outb(j, tmport);
+ tmport = wkport + 3;
+ outb(satn[0], tmport++);
+ outb(satn[1], tmport++);
+ outb(satn[2], tmport++);
+ outb(satn[3], tmport++);
+ outb(satn[4], tmport++);
+ outb(satn[5], tmport++);
+ tmport += 0x06;
+ outb(0, tmport);
+ tmport += 0x02;
+ outb(dev->id[0][i].devsp, tmport++);
+ outb(0, tmport++);
+ outb(satn[6], tmport++);
+ outb(satn[7], tmport++);
+ tmport += 0x03;
+ outb(satn[8], tmport);
+ tmport += 0x07;
+
+ while ((inb(tmport) & 0x80) == 0x00)
+ cpu_relax();
+
+ tmport -= 0x08;
+ if (inb(tmport) != 0x11 && inb(tmport) != 0x8e)
+ continue;
+
+ while (inb(tmport) != 0x8e)
+ cpu_relax();
+
+try_sync:
+ j = 0;
+ tmport = wkport + 0x14;
+ outb(0x06, tmport);
+ tmport += 0x04;
+ outb(0x20, tmport);
+ tmport += 0x07;
+
+ while ((inb(tmport) & 0x80) == 0) {
+ if ((inb(tmport) & 0x01) != 0) {
+ tmport -= 0x06;
+ if ((m & dev->wide_id[0]) != 0) {
+ outb(synw[j++], tmport);
+ } else {
+ if ((m & dev->ultra_map[0]) != 0) {
+ outb(synu[j++], tmport);
+ } else {
+ outb(synn[j++], tmport);
+ }
+ }
+ tmport += 0x06;
+ }
+ }
+ tmport -= 0x08;
+
+ while ((inb(tmport) & 0x80) == 0x00)
+ cpu_relax();
+
+ j = inb(tmport) & 0x0f;
+ if (j == 0x0f) {
+ goto phase_ins;
+ }
+ if (j == 0x0a) {
+ goto phase_cmds;
+ }
+ if (j == 0x0e) {
+ goto try_sync;
+ }
+ continue;
+phase_outs:
+ tmport = wkport + 0x18;
+ outb(0x20, tmport);
+ tmport += 0x07;
+ while ((inb(tmport) & 0x80) == 0x00) {
+ if ((inb(tmport) & 0x01) != 0x00) {
+ tmport -= 0x06;
+ outb(0x00, tmport);
+ tmport += 0x06;
+ }
+ }
+ tmport -= 0x08;
+ j = inb(tmport);
+ if (j == 0x85) {
+ goto tar_dcons;
+ }
+ j &= 0x0f;
+ if (j == 0x0f) {
+ goto phase_ins;
+ }
+ if (j == 0x0a) {
+ goto phase_cmds;
+ }
+ if (j == 0x0e) {
+ goto phase_outs;
+ }
+ continue;
+phase_ins:
+ tmport = wkport + 0x14;
+ outb(0xff, tmport);
+ tmport += 0x04;
+ outb(0x20, tmport);
+ tmport += 0x07;
+ k = 0;
+phase_ins1:
+ j = inb(tmport);
+ if ((j & 0x01) != 0x00) {
+ tmport -= 0x06;
+ mbuf[k++] = inb(tmport);
+ tmport += 0x06;
+ goto phase_ins1;
+ }
+ if ((j & 0x80) == 0x00) {
+ goto phase_ins1;
+ }
+ tmport -= 0x08;
+
+ while ((inb(tmport) & 0x80) == 0x00)
+ cpu_relax();
+
+ j = inb(tmport);
+ if (j == 0x85) {
+ goto tar_dcons;
+ }
+ j &= 0x0f;
+ if (j == 0x0f) {
+ goto phase_ins;
+ }
+ if (j == 0x0a) {
+ goto phase_cmds;
+ }
+ if (j == 0x0e) {
+ goto phase_outs;
+ }
+ continue;
+phase_cmds:
+ tmport = wkport + 0x10;
+ outb(0x30, tmport);
+tar_dcons:
+ tmport = wkport + 0x14;
+ outb(0x00, tmport);
+ tmport += 0x04;
+ outb(0x08, tmport);
+ tmport += 0x07;
+
+ while ((inb(tmport) & 0x80) == 0x00)
+ cpu_relax();
+
+ tmport -= 0x08;
+ j = inb(tmport);
+ if (j != 0x16) {
+ continue;
+ }
+ if (mbuf[0] != 0x01) {
+ continue;
+ }
+ if (mbuf[1] != 0x03) {
+ continue;
+ }
+ if (mbuf[4] == 0x00) {
+ continue;
+ }
+ if (mbuf[3] > 0x64) {
+ continue;
+ }
+ if (mbuf[4] > 0x0c) {
+ mbuf[4] = 0x0c;
+ }
+ dev->id[0][i].devsp = mbuf[4];
+ if ((mbuf[3] < 0x0d) && (rmb == 0)) {
+ j = 0xa0;
+ goto set_syn_ok;
+ }
+ if (mbuf[3] < 0x1a) {
+ j = 0x20;
+ goto set_syn_ok;
+ }
+ if (mbuf[3] < 0x33) {
+ j = 0x40;
+ goto set_syn_ok;
+ }
+ if (mbuf[3] < 0x4c) {
+ j = 0x50;
+ goto set_syn_ok;
+ }
+ j = 0x60;
+set_syn_ok:
+ dev->id[0][i].devsp = (dev->id[0][i].devsp & 0x0f) | j;
+ }
+ tmport = wkport + 0x3a;
+ outb((unsigned char) (inb(tmport) & 0xef), tmport);
+}
+
+static void is880(struct atp_unit *dev, unsigned int wkport)
+{
+ unsigned int tmport;
+ unsigned char i, j, k, rmb, n, lvdmode;
+ unsigned short int m;
+ static unsigned char mbuf[512];
+ static unsigned char satn[9] = { 0, 0, 0, 0, 0, 0, 0, 6, 6 };
+ static unsigned char inqd[9] = { 0x12, 0, 0, 0, 0x24, 0, 0, 0x24, 6 };
+ static unsigned char synn[6] = { 0x80, 1, 3, 1, 0x19, 0x0e };
+ unsigned char synu[6] = { 0x80, 1, 3, 1, 0x0a, 0x0e };
+ static unsigned char synw[6] = { 0x80, 1, 3, 1, 0x19, 0x0e };
+ unsigned char synuw[6] = { 0x80, 1, 3, 1, 0x0a, 0x0e };
+ static unsigned char wide[6] = { 0x80, 1, 2, 3, 1, 0 };
+ static unsigned char u3[9] = { 0x80, 1, 6, 4, 0x09, 00, 0x0e, 0x01, 0x02 };
+
+ lvdmode = inb(wkport + 0x3f) & 0x40;
+
+ for (i = 0; i < 16; i++) {
+ m = 1;
+ m = m << i;
+ if ((m & dev->active_id[0]) != 0) {
+ continue;
+ }
+ if (i == dev->host_id[0]) {
+ printk(KERN_INFO " ID: %2d Host Adapter\n", dev->host_id[0]);
+ continue;
+ }
+ tmport = wkport + 0x5b;
+ outb(0x01, tmport);
+ tmport = wkport + 0x41;
+ outb(0x08, tmport++);
+ outb(0x7f, tmport++);
+ outb(satn[0], tmport++);
+ outb(satn[1], tmport++);
+ outb(satn[2], tmport++);
+ outb(satn[3], tmport++);
+ outb(satn[4], tmport++);
+ outb(satn[5], tmport++);
+ tmport += 0x06;
+ outb(0, tmport);
+ tmport += 0x02;
+ outb(dev->id[0][i].devsp, tmport++);
+ outb(0, tmport++);
+ outb(satn[6], tmport++);
+ outb(satn[7], tmport++);
+ j = i;
+ if ((j & 0x08) != 0) {
+ j = (j & 0x07) | 0x40;
+ }
+ outb(j, tmport);
+ tmport += 0x03;
+ outb(satn[8], tmport);
+ tmport += 0x07;
+
+ while ((inb(tmport) & 0x80) == 0x00)
+ cpu_relax();
+
+ tmport -= 0x08;
+ if (inb(tmport) != 0x11 && inb(tmport) != 0x8e)
+ continue;
+
+ while (inb(tmport) != 0x8e)
+ cpu_relax();
+
+ dev->active_id[0] |= m;
+
+ tmport = wkport + 0x50;
+ outb(0x30, tmport);
+ tmport = wkport + 0x54;
+ outb(0x00, tmport);
+
+phase_cmd:
+ tmport = wkport + 0x58;
+ outb(0x08, tmport);
+ tmport += 0x07;
+
+ while ((inb(tmport) & 0x80) == 0x00)
+ cpu_relax();
+
+ tmport -= 0x08;
+ j = inb(tmport);
+ if (j != 0x16) {
+ tmport = wkport + 0x50;
+ outb(0x41, tmport);
+ goto phase_cmd;
+ }
+sel_ok:
+ tmport = wkport + 0x43;
+ outb(inqd[0], tmport++);
+ outb(inqd[1], tmport++);
+ outb(inqd[2], tmport++);
+ outb(inqd[3], tmport++);
+ outb(inqd[4], tmport++);
+ outb(inqd[5], tmport);
+ tmport += 0x07;
+ outb(0, tmport);
+ tmport += 0x02;
+ outb(dev->id[0][i].devsp, tmport++);
+ outb(0, tmport++);
+ outb(inqd[6], tmport++);
+ outb(inqd[7], tmport++);
+ tmport += 0x03;
+ outb(inqd[8], tmport);
+ tmport += 0x07;
+
+ while ((inb(tmport) & 0x80) == 0x00)
+ cpu_relax();
+
+ tmport -= 0x08;
+ if (inb(tmport) != 0x11 && inb(tmport) != 0x8e)
+ continue;
+
+ while (inb(tmport) != 0x8e)
+ cpu_relax();
+
+ tmport = wkport + 0x5b;
+ outb(0x00, tmport);
+ tmport = wkport + 0x58;
+ outb(0x08, tmport);
+ tmport += 0x07;
+ j = 0;
+rd_inq_data:
+ k = inb(tmport);
+ if ((k & 0x01) != 0) {
+ tmport -= 0x06;
+ mbuf[j++] = inb(tmport);
+ tmport += 0x06;
+ goto rd_inq_data;
+ }
+ if ((k & 0x80) == 0) {
+ goto rd_inq_data;
+ }
+ tmport -= 0x08;
+ j = inb(tmport);
+ if (j == 0x16) {
+ goto inq_ok;
+ }
+ tmport = wkport + 0x50;
+ outb(0x46, tmport);
+ tmport += 0x02;
+ outb(0, tmport++);
+ outb(0, tmport++);
+ outb(0, tmport++);
+ tmport += 0x03;
+ outb(0x08, tmport);
+ tmport += 0x07;
+ while ((inb(tmport) & 0x80) == 0x00)
+ cpu_relax();
+
+ tmport -= 0x08;
+ if (inb(tmport) != 0x16)
+ goto sel_ok;
+
+inq_ok:
+ mbuf[36] = 0;
+ printk(KERN_INFO " ID: %2d %s\n", i, &mbuf[8]);
+ dev->id[0][i].devtype = mbuf[0];
+ rmb = mbuf[1];
+ n = mbuf[7];
+ if ((mbuf[7] & 0x60) == 0) {
+ goto not_wide;
+ }
+ if ((i < 8) && ((dev->global_map[0] & 0x20) == 0)) {
+ goto not_wide;
+ }
+ if (lvdmode == 0) {
+ goto chg_wide;
+ }
+ if (dev->sp[0][i] != 0x04) // force u2
+ {
+ goto chg_wide;
+ }
+
+ tmport = wkport + 0x5b;
+ outb(0x01, tmport);
+ tmport = wkport + 0x43;
+ outb(satn[0], tmport++);
+ outb(satn[1], tmport++);
+ outb(satn[2], tmport++);
+ outb(satn[3], tmport++);
+ outb(satn[4], tmport++);
+ outb(satn[5], tmport++);
+ tmport += 0x06;
+ outb(0, tmport);
+ tmport += 0x02;
+ outb(dev->id[0][i].devsp, tmport++);
+ outb(0, tmport++);
+ outb(satn[6], tmport++);
+ outb(satn[7], tmport++);
+ tmport += 0x03;
+ outb(satn[8], tmport);
+ tmport += 0x07;
+
+ while ((inb(tmport) & 0x80) == 0x00)
+ cpu_relax();
+
+ tmport -= 0x08;
+
+ if (inb(tmport) != 0x11 && inb(tmport) != 0x8e)
+ continue;
+
+ while (inb(tmport) != 0x8e)
+ cpu_relax();
+
+try_u3:
+ j = 0;
+ tmport = wkport + 0x54;
+ outb(0x09, tmport);
+ tmport += 0x04;
+ outb(0x20, tmport);
+ tmport += 0x07;
+
+ while ((inb(tmport) & 0x80) == 0) {
+ if ((inb(tmport) & 0x01) != 0) {
+ tmport -= 0x06;
+ outb(u3[j++], tmport);
+ tmport += 0x06;
+ }
+ }
+ tmport -= 0x08;
+
+ while ((inb(tmport) & 0x80) == 0x00)
+ cpu_relax();
+
+ j = inb(tmport) & 0x0f;
+ if (j == 0x0f) {
+ goto u3p_in;
+ }
+ if (j == 0x0a) {
+ goto u3p_cmd;
+ }
+ if (j == 0x0e) {
+ goto try_u3;
+ }
+ continue;
+u3p_out:
+ tmport = wkport + 0x58;
+ outb(0x20, tmport);
+ tmport += 0x07;
+ while ((inb(tmport) & 0x80) == 0) {
+ if ((inb(tmport) & 0x01) != 0) {
+ tmport -= 0x06;
+ outb(0, tmport);
+ tmport += 0x06;
+ }
+ }
+ tmport -= 0x08;
+ j = inb(tmport) & 0x0f;
+ if (j == 0x0f) {
+ goto u3p_in;
+ }
+ if (j == 0x0a) {
+ goto u3p_cmd;
+ }
+ if (j == 0x0e) {
+ goto u3p_out;
+ }
+ continue;
+u3p_in:
+ tmport = wkport + 0x54;
+ outb(0x09, tmport);
+ tmport += 0x04;
+ outb(0x20, tmport);
+ tmport += 0x07;
+ k = 0;
+u3p_in1:
+ j = inb(tmport);
+ if ((j & 0x01) != 0) {
+ tmport -= 0x06;
+ mbuf[k++] = inb(tmport);
+ tmport += 0x06;
+ goto u3p_in1;
+ }
+ if ((j & 0x80) == 0x00) {
+ goto u3p_in1;
+ }
+ tmport -= 0x08;
+ j = inb(tmport) & 0x0f;
+ if (j == 0x0f) {
+ goto u3p_in;
+ }
+ if (j == 0x0a) {
+ goto u3p_cmd;
+ }
+ if (j == 0x0e) {
+ goto u3p_out;
+ }
+ continue;
+u3p_cmd:
+ tmport = wkport + 0x50;
+ outb(0x30, tmport);
+ tmport = wkport + 0x54;
+ outb(0x00, tmport);
+ tmport += 0x04;
+ outb(0x08, tmport);
+ tmport += 0x07;
+
+ while ((inb(tmport) & 0x80) == 0x00)
+ cpu_relax();
+
+ tmport -= 0x08;
+ j = inb(tmport);
+ if (j != 0x16) {
+ if (j == 0x4e) {
+ goto u3p_out;
+ }
+ continue;
+ }
+ if (mbuf[0] != 0x01) {
+ goto chg_wide;
+ }
+ if (mbuf[1] != 0x06) {
+ goto chg_wide;
+ }
+ if (mbuf[2] != 0x04) {
+ goto chg_wide;
+ }
+ if (mbuf[3] == 0x09) {
+ m = 1;
+ m = m << i;
+ dev->wide_id[0] |= m;
+ dev->id[0][i].devsp = 0xce;
+ continue;
+ }
+chg_wide:
+ tmport = wkport + 0x5b;
+ outb(0x01, tmport);
+ tmport = wkport + 0x43;
+ outb(satn[0], tmport++);
+ outb(satn[1], tmport++);
+ outb(satn[2], tmport++);
+ outb(satn[3], tmport++);
+ outb(satn[4], tmport++);
+ outb(satn[5], tmport++);
+ tmport += 0x06;
+ outb(0, tmport);
+ tmport += 0x02;
+ outb(dev->id[0][i].devsp, tmport++);
+ outb(0, tmport++);
+ outb(satn[6], tmport++);
+ outb(satn[7], tmport++);
+ tmport += 0x03;
+ outb(satn[8], tmport);
+ tmport += 0x07;
+
+ while ((inb(tmport) & 0x80) == 0x00)
+ cpu_relax();
+
+ tmport -= 0x08;
+ if (inb(tmport) != 0x11 && inb(tmport) != 0x8e)
+ continue;
+
+ while (inb(tmport) != 0x8e)
+ cpu_relax();
+
+try_wide:
+ j = 0;
+ tmport = wkport + 0x54;
+ outb(0x05, tmport);
+ tmport += 0x04;
+ outb(0x20, tmport);
+ tmport += 0x07;
+
+ while ((inb(tmport) & 0x80) == 0) {
+ if ((inb(tmport) & 0x01) != 0) {
+ tmport -= 0x06;
+ outb(wide[j++], tmport);
+ tmport += 0x06;
+ }
+ }
+ tmport -= 0x08;
+ while ((inb(tmport) & 0x80) == 0x00)
+ cpu_relax();
+
+ j = inb(tmport) & 0x0f;
+ if (j == 0x0f) {
+ goto widep_in;
+ }
+ if (j == 0x0a) {
+ goto widep_cmd;
+ }
+ if (j == 0x0e) {
+ goto try_wide;
+ }
+ continue;
+widep_out:
+ tmport = wkport + 0x58;
+ outb(0x20, tmport);
+ tmport += 0x07;
+ while ((inb(tmport) & 0x80) == 0) {
+ if ((inb(tmport) & 0x01) != 0) {
+ tmport -= 0x06;
+ outb(0, tmport);
+ tmport += 0x06;
+ }
+ }
+ tmport -= 0x08;
+ j = inb(tmport) & 0x0f;
+ if (j == 0x0f) {
+ goto widep_in;
+ }
+ if (j == 0x0a) {
+ goto widep_cmd;
+ }
+ if (j == 0x0e) {
+ goto widep_out;
+ }
+ continue;
+widep_in:
+ tmport = wkport + 0x54;
+ outb(0xff, tmport);
+ tmport += 0x04;
+ outb(0x20, tmport);
+ tmport += 0x07;
+ k = 0;
+widep_in1:
+ j = inb(tmport);
+ if ((j & 0x01) != 0) {
+ tmport -= 0x06;
+ mbuf[k++] = inb(tmport);
+ tmport += 0x06;
+ goto widep_in1;
+ }
+ if ((j & 0x80) == 0x00) {
+ goto widep_in1;
+ }
+ tmport -= 0x08;
+ j = inb(tmport) & 0x0f;
+ if (j == 0x0f) {
+ goto widep_in;
+ }
+ if (j == 0x0a) {
+ goto widep_cmd;
+ }
+ if (j == 0x0e) {
+ goto widep_out;
+ }
+ continue;
+widep_cmd:
+ tmport = wkport + 0x50;
+ outb(0x30, tmport);
+ tmport = wkport + 0x54;
+ outb(0x00, tmport);
+ tmport += 0x04;
+ outb(0x08, tmport);
+ tmport += 0x07;
+
+ while ((inb(tmport) & 0x80) == 0x00)
+ cpu_relax();
+
+ tmport -= 0x08;
+ j = inb(tmport);
+ if (j != 0x16) {
+ if (j == 0x4e) {
+ goto widep_out;
+ }
+ continue;
+ }
+ if (mbuf[0] != 0x01) {
+ goto not_wide;
+ }
+ if (mbuf[1] != 0x02) {
+ goto not_wide;
+ }
+ if (mbuf[2] != 0x03) {
+ goto not_wide;
+ }
+ if (mbuf[3] != 0x01) {
+ goto not_wide;
+ }
+ m = 1;
+ m = m << i;
+ dev->wide_id[0] |= m;
+not_wide:
+ if ((dev->id[0][i].devtype == 0x00) || (dev->id[0][i].devtype == 0x07) || ((dev->id[0][i].devtype == 0x05) && ((n & 0x10) != 0))) {
+ m = 1;
+ m = m << i;
+ if ((dev->async[0] & m) != 0) {
+ goto set_sync;
+ }
+ }
+ continue;
+set_sync:
+ if (dev->sp[0][i] == 0x02) {
+ synu[4] = 0x0c;
+ synuw[4] = 0x0c;
+ } else {
+ if (dev->sp[0][i] >= 0x03) {
+ synu[4] = 0x0a;
+ synuw[4] = 0x0a;
+ }
+ }
+ tmport = wkport + 0x5b;
+ j = 0;
+ if ((m & dev->wide_id[0]) != 0) {
+ j |= 0x01;
+ }
+ outb(j, tmport);
+ tmport = wkport + 0x43;
+ outb(satn[0], tmport++);
+ outb(satn[1], tmport++);
+ outb(satn[2], tmport++);
+ outb(satn[3], tmport++);
+ outb(satn[4], tmport++);
+ outb(satn[5], tmport++);
+ tmport += 0x06;
+ outb(0, tmport);
+ tmport += 0x02;
+ outb(dev->id[0][i].devsp, tmport++);
+ outb(0, tmport++);
+ outb(satn[6], tmport++);
+ outb(satn[7], tmport++);
+ tmport += 0x03;
+ outb(satn[8], tmport);
+ tmport += 0x07;
+
+ while ((inb(tmport) & 0x80) == 0x00)
+ cpu_relax();
+
+ tmport -= 0x08;
+ if ((inb(tmport) != 0x11) && (inb(tmport) != 0x8e)) {
+ continue;
+ }
+ while (inb(tmport) != 0x8e)
+ cpu_relax();
+
+try_sync:
+ j = 0;
+ tmport = wkport + 0x54;
+ outb(0x06, tmport);
+ tmport += 0x04;
+ outb(0x20, tmport);
+ tmport += 0x07;
+
+ while ((inb(tmport) & 0x80) == 0) {
+ if ((inb(tmport) & 0x01) != 0) {
+ tmport -= 0x06;
+ if ((m & dev->wide_id[0]) != 0) {
+ if ((m & dev->ultra_map[0]) != 0) {
+ outb(synuw[j++], tmport);
+ } else {
+ outb(synw[j++], tmport);
+ }
+ } else {
+ if ((m & dev->ultra_map[0]) != 0) {
+ outb(synu[j++], tmport);
+ } else {
+ outb(synn[j++], tmport);
+ }
+ }
+ tmport += 0x06;
+ }
+ }
+ tmport -= 0x08;
+
+ while ((inb(tmport) & 0x80) == 0x00)
+ cpu_relax();
+
+ j = inb(tmport) & 0x0f;
+ if (j == 0x0f) {
+ goto phase_ins;
+ }
+ if (j == 0x0a) {
+ goto phase_cmds;
+ }
+ if (j == 0x0e) {
+ goto try_sync;
+ }
+ continue;
+phase_outs:
+ tmport = wkport + 0x58;
+ outb(0x20, tmport);
+ tmport += 0x07;
+ while ((inb(tmport) & 0x80) == 0x00) {
+ if ((inb(tmport) & 0x01) != 0x00) {
+ tmport -= 0x06;
+ outb(0x00, tmport);
+ tmport += 0x06;
+ }
+ }
+ tmport -= 0x08;
+ j = inb(tmport);
+ if (j == 0x85) {
+ goto tar_dcons;
+ }
+ j &= 0x0f;
+ if (j == 0x0f) {
+ goto phase_ins;
+ }
+ if (j == 0x0a) {
+ goto phase_cmds;
+ }
+ if (j == 0x0e) {
+ goto phase_outs;
+ }
+ continue;
+phase_ins:
+ tmport = wkport + 0x54;
+ outb(0x06, tmport);
+ tmport += 0x04;
+ outb(0x20, tmport);
+ tmport += 0x07;
+ k = 0;
+phase_ins1:
+ j = inb(tmport);
+ if ((j & 0x01) != 0x00) {
+ tmport -= 0x06;
+ mbuf[k++] = inb(tmport);
+ tmport += 0x06;
+ goto phase_ins1;
+ }
+ if ((j & 0x80) == 0x00) {
+ goto phase_ins1;
+ }
+ tmport -= 0x08;
+
+ while ((inb(tmport) & 0x80) == 0x00)
+ cpu_relax();
+
+ j = inb(tmport);
+ if (j == 0x85) {
+ goto tar_dcons;
+ }
+ j &= 0x0f;
+ if (j == 0x0f) {
+ goto phase_ins;
+ }
+ if (j == 0x0a) {
+ goto phase_cmds;
+ }
+ if (j == 0x0e) {
+ goto phase_outs;
+ }
+ continue;
+phase_cmds:
+ tmport = wkport + 0x50;
+ outb(0x30, tmport);
+tar_dcons:
+ tmport = wkport + 0x54;
+ outb(0x00, tmport);
+ tmport += 0x04;
+ outb(0x08, tmport);
+ tmport += 0x07;
+
+ while ((inb(tmport) & 0x80) == 0x00)
+ cpu_relax();
+
+ tmport -= 0x08;
+ j = inb(tmport);
+ if (j != 0x16) {
+ continue;
+ }
+ if (mbuf[0] != 0x01) {
+ continue;
+ }
+ if (mbuf[1] != 0x03) {
+ continue;
+ }
+ if (mbuf[4] == 0x00) {
+ continue;
+ }
+ if (mbuf[3] > 0x64) {
+ continue;
+ }
+ if (mbuf[4] > 0x0e) {
+ mbuf[4] = 0x0e;
+ }
+ dev->id[0][i].devsp = mbuf[4];
+ if (mbuf[3] < 0x0c) {
+ j = 0xb0;
+ goto set_syn_ok;
+ }
+ if ((mbuf[3] < 0x0d) && (rmb == 0)) {
+ j = 0xa0;
+ goto set_syn_ok;
+ }
+ if (mbuf[3] < 0x1a) {
+ j = 0x20;
+ goto set_syn_ok;
+ }
+ if (mbuf[3] < 0x33) {
+ j = 0x40;
+ goto set_syn_ok;
+ }
+ if (mbuf[3] < 0x4c) {
+ j = 0x50;
+ goto set_syn_ok;
+ }
+ j = 0x60;
+set_syn_ok:
+ dev->id[0][i].devsp = (dev->id[0][i].devsp & 0x0f) | j;
+ }
+}
+
+static void atp870u_free_tables(struct Scsi_Host *host)
+{
+ struct atp_unit *atp_dev = (struct atp_unit *)&host->hostdata;
+ int j, k;
+ for (j=0; j < 2; j++) {
+ for (k = 0; k < 16; k++) {
+ if (!atp_dev->id[j][k].prd_table)
+ continue;
+ pci_free_consistent(atp_dev->pdev, 1024, atp_dev->id[j][k].prd_table, atp_dev->id[j][k].prd_bus);
+ atp_dev->id[j][k].prd_table = NULL;
+ }
+ }
+}
+
+static int atp870u_init_tables(struct Scsi_Host *host)
+{
+ struct atp_unit *atp_dev = (struct atp_unit *)&host->hostdata;
+ int c,k;
+ for(c=0;c < 2;c++) {
+ for(k=0;k<16;k++) {
+ atp_dev->id[c][k].prd_table = pci_alloc_consistent(atp_dev->pdev, 1024, &(atp_dev->id[c][k].prd_bus));
+ if (!atp_dev->id[c][k].prd_table) {
+ printk("atp870u_init_tables fail\n");
+ atp870u_free_tables(host);
+ return -ENOMEM;
+ }
+ atp_dev->id[c][k].prdaddr = atp_dev->id[c][k].prd_bus;
+ atp_dev->id[c][k].devsp=0x20;
+ atp_dev->id[c][k].devtype = 0x7f;
+ atp_dev->id[c][k].curr_req = NULL;
+ }
+
+ atp_dev->active_id[c] = 0;
+ atp_dev->wide_id[c] = 0;
+ atp_dev->host_id[c] = 0x07;
+ atp_dev->quhd[c] = 0;
+ atp_dev->quend[c] = 0;
+ atp_dev->last_cmd[c] = 0xff;
+ atp_dev->in_snd[c] = 0;
+ atp_dev->in_int[c] = 0;
+
+ for (k = 0; k < qcnt; k++) {
+ atp_dev->quereq[c][k] = NULL;
+ }
+ for (k = 0; k < 16; k++) {
+ atp_dev->id[c][k].curr_req = NULL;
+ atp_dev->sp[c][k] = 0x04;
+ }
+ }
+ return 0;
+}
+
+/* return non-zero on detection */
+static int atp870u_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
+{
+ unsigned char k, m, c;
+ unsigned long flags;
+ unsigned int base_io, tmport, error,n;
+ unsigned char host_id;
+ struct Scsi_Host *shpnt = NULL;
+ struct atp_unit *atpdev, *p;
+ unsigned char setupdata[2][16];
+ int count = 0;
+
+ atpdev = kzalloc(sizeof(*atpdev), GFP_KERNEL);
+ if (!atpdev)
+ return -ENOMEM;
+
+ if (pci_enable_device(pdev))
+ goto err_eio;
+
+ if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(32))) {
+ printk(KERN_INFO "atp870u: use 32bit DMA mask.\n");
+ } else {
+ printk(KERN_ERR "atp870u: DMA mask required but not available.\n");
+ goto err_eio;
+ }
+
+ /*
+ * It's probably easier to weed out some revisions like
+ * this than via the PCI device table
+ */
+ if (ent->device == PCI_DEVICE_ID_ARTOP_AEC7610) {
+ atpdev->chip_ver = pdev->revision;
+ if (atpdev->chip_ver < 2)
+ goto err_eio;
+ }
+
+ switch (ent->device) {
+ case PCI_DEVICE_ID_ARTOP_AEC7612UW:
+ case PCI_DEVICE_ID_ARTOP_AEC7612SUW:
+ case ATP880_DEVID1:
+ case ATP880_DEVID2:
+ case ATP885_DEVID:
+ atpdev->chip_ver = 0x04;
+ default:
+ break;
+ }
+ base_io = pci_resource_start(pdev, 0);
+ base_io &= 0xfffffff8;
+
+ if ((ent->device == ATP880_DEVID1)||(ent->device == ATP880_DEVID2)) {
+ atpdev->chip_ver = pdev->revision;
+ pci_write_config_byte(pdev, PCI_LATENCY_TIMER, 0x80);//JCC082803
+
+ host_id = inb(base_io + 0x39);
+ host_id >>= 0x04;
+
+ printk(KERN_INFO " ACARD AEC-67160 PCI Ultra3 LVD Host Adapter: %d"
+ " IO:%x, IRQ:%d.\n", count, base_io, pdev->irq);
+ atpdev->ioport[0] = base_io + 0x40;
+ atpdev->pciport[0] = base_io + 0x28;
+ atpdev->dev_id = ent->device;
+ atpdev->host_id[0] = host_id;
+
+ tmport = base_io + 0x22;
+ atpdev->scam_on = inb(tmport);
+ tmport += 0x13;
+ atpdev->global_map[0] = inb(tmport);
+ tmport += 0x07;
+ atpdev->ultra_map[0] = inw(tmport);
+
+ n = 0x3f09;
+next_fblk_880:
+ if (n >= 0x4000)
+ goto flash_ok_880;
+
+ m = 0;
+ outw(n, base_io + 0x34);
+ n += 0x0002;
+ if (inb(base_io + 0x30) == 0xff)
+ goto flash_ok_880;
+
+ atpdev->sp[0][m++] = inb(base_io + 0x30);
+ atpdev->sp[0][m++] = inb(base_io + 0x31);
+ atpdev->sp[0][m++] = inb(base_io + 0x32);
+ atpdev->sp[0][m++] = inb(base_io + 0x33);
+ outw(n, base_io + 0x34);
+ n += 0x0002;
+ atpdev->sp[0][m++] = inb(base_io + 0x30);
+ atpdev->sp[0][m++] = inb(base_io + 0x31);
+ atpdev->sp[0][m++] = inb(base_io + 0x32);
+ atpdev->sp[0][m++] = inb(base_io + 0x33);
+ outw(n, base_io + 0x34);
+ n += 0x0002;
+ atpdev->sp[0][m++] = inb(base_io + 0x30);
+ atpdev->sp[0][m++] = inb(base_io + 0x31);
+ atpdev->sp[0][m++] = inb(base_io + 0x32);
+ atpdev->sp[0][m++] = inb(base_io + 0x33);
+ outw(n, base_io + 0x34);
+ n += 0x0002;
+ atpdev->sp[0][m++] = inb(base_io + 0x30);
+ atpdev->sp[0][m++] = inb(base_io + 0x31);
+ atpdev->sp[0][m++] = inb(base_io + 0x32);
+ atpdev->sp[0][m++] = inb(base_io + 0x33);
+ n += 0x0018;
+ goto next_fblk_880;
+flash_ok_880:
+ outw(0, base_io + 0x34);
+ atpdev->ultra_map[0] = 0;
+ atpdev->async[0] = 0;
+ for (k = 0; k < 16; k++) {
+ n = 1;
+ n = n << k;
+ if (atpdev->sp[0][k] > 1) {
+ atpdev->ultra_map[0] |= n;
+ } else {
+ if (atpdev->sp[0][k] == 0)
+ atpdev->async[0] |= n;
+ }
+ }
+ atpdev->async[0] = ~(atpdev->async[0]);
+ outb(atpdev->global_map[0], base_io + 0x35);
+
+ shpnt = scsi_host_alloc(&atp870u_template, sizeof(struct atp_unit));
+ if (!shpnt)
+ goto err_nomem;
+
+ p = (struct atp_unit *)&shpnt->hostdata;
+
+ atpdev->host = shpnt;
+ atpdev->pdev = pdev;
+ pci_set_drvdata(pdev, p);
+ memcpy(p, atpdev, sizeof(*atpdev));
+ if (atp870u_init_tables(shpnt) < 0) {
+ printk(KERN_ERR "Unable to allocate tables for Acard controller\n");
+ goto unregister;
+ }
+
+ if (request_irq(pdev->irq, atp870u_intr_handle, IRQF_SHARED, "atp880i", shpnt)) {
+ printk(KERN_ERR "Unable to allocate IRQ%d for Acard controller.\n", pdev->irq);
+ goto free_tables;
+ }
+
+ spin_lock_irqsave(shpnt->host_lock, flags);
+ tmport = base_io + 0x38;
+ k = inb(tmport) & 0x80;
+ outb(k, tmport);
+ tmport += 0x03;
+ outb(0x20, tmport);
+ mdelay(32);
+ outb(0, tmport);
+ mdelay(32);
+ tmport = base_io + 0x5b;
+ inb(tmport);
+ tmport -= 0x04;
+ inb(tmport);
+ tmport = base_io + 0x40;
+ outb((host_id | 0x08), tmport);
+ tmport += 0x18;
+ outb(0, tmport);
+ tmport += 0x07;
+ while ((inb(tmport) & 0x80) == 0)
+ mdelay(1);
+ tmport -= 0x08;
+ inb(tmport);
+ tmport = base_io + 0x41;
+ outb(8, tmport++);
+ outb(0x7f, tmport);
+ tmport = base_io + 0x51;
+ outb(0x20, tmport);
+
+ tscam(shpnt);
+ is880(p, base_io);
+ tmport = base_io + 0x38;
+ outb(0xb0, tmport);
+ shpnt->max_id = 16;
+ shpnt->this_id = host_id;
+ shpnt->unique_id = base_io;
+ shpnt->io_port = base_io;
+ shpnt->n_io_port = 0x60; /* Number of bytes of I/O space used */
+ shpnt->irq = pdev->irq;
+ } else if (ent->device == ATP885_DEVID) {
+ printk(KERN_INFO " ACARD AEC-67162 PCI Ultra3 LVD Host Adapter: IO:%x, IRQ:%d.\n"
+ , base_io, pdev->irq);
+
+ atpdev->pdev = pdev;
+ atpdev->dev_id = ent->device;
+ atpdev->baseport = base_io;
+ atpdev->ioport[0] = base_io + 0x80;
+ atpdev->ioport[1] = base_io + 0xc0;
+ atpdev->pciport[0] = base_io + 0x40;
+ atpdev->pciport[1] = base_io + 0x50;
+
+ shpnt = scsi_host_alloc(&atp870u_template, sizeof(struct atp_unit));
+ if (!shpnt)
+ goto err_nomem;
+
+ p = (struct atp_unit *)&shpnt->hostdata;
+
+ atpdev->host = shpnt;
+ atpdev->pdev = pdev;
+ pci_set_drvdata(pdev, p);
+ memcpy(p, atpdev, sizeof(struct atp_unit));
+ if (atp870u_init_tables(shpnt) < 0)
+ goto unregister;
+
+#ifdef ED_DBGP
+ printk("request_irq() shpnt %p hostdata %p\n", shpnt, p);
+#endif
+ if (request_irq(pdev->irq, atp870u_intr_handle, IRQF_SHARED, "atp870u", shpnt)) {
+ printk(KERN_ERR "Unable to allocate IRQ for Acard controller.\n");
+ goto free_tables;
+ }
+
+ spin_lock_irqsave(shpnt->host_lock, flags);
+
+ c=inb(base_io + 0x29);
+ outb((c | 0x04),base_io + 0x29);
+
+ n=0x1f80;
+next_fblk_885:
+ if (n >= 0x2000) {
+ goto flash_ok_885;
+ }
+ outw(n,base_io + 0x3c);
+ if (inl(base_io + 0x38) == 0xffffffff) {
+ goto flash_ok_885;
+ }
+ for (m=0; m < 2; m++) {
+ p->global_map[m]= 0;
+ for (k=0; k < 4; k++) {
+ outw(n++,base_io + 0x3c);
+ ((unsigned long *)&setupdata[m][0])[k]=inl(base_io + 0x38);
+ }
+ for (k=0; k < 4; k++) {
+ outw(n++,base_io + 0x3c);
+ ((unsigned long *)&p->sp[m][0])[k]=inl(base_io + 0x38);
+ }
+ n += 8;
+ }
+ goto next_fblk_885;
+flash_ok_885:
+#ifdef ED_DBGP
+ printk( "Flash Read OK\n");
+#endif
+ c=inb(base_io + 0x29);
+ outb((c & 0xfb),base_io + 0x29);
+ for (c=0;c < 2;c++) {
+ p->ultra_map[c]=0;
+ p->async[c] = 0;
+ for (k=0; k < 16; k++) {
+ n=1;
+ n = n << k;
+ if (p->sp[c][k] > 1) {
+ p->ultra_map[c] |= n;
+ } else {
+ if (p->sp[c][k] == 0) {
+ p->async[c] |= n;
+ }
+ }
+ }
+ p->async[c] = ~(p->async[c]);
+
+ if (p->global_map[c] == 0) {
+ k=setupdata[c][1];
+ if ((k & 0x40) != 0)
+ p->global_map[c] |= 0x20;
+ k &= 0x07;
+ p->global_map[c] |= k;
+ if ((setupdata[c][2] & 0x04) != 0)
+ p->global_map[c] |= 0x08;
+ p->host_id[c] = setupdata[c][0] & 0x07;
+ }
+ }
+
+ k = inb(base_io + 0x28) & 0x8f;
+ k |= 0x10;
+ outb(k, base_io + 0x28);
+ outb(0x80, base_io + 0x41);
+ outb(0x80, base_io + 0x51);
+ mdelay(100);
+ outb(0, base_io + 0x41);
+ outb(0, base_io + 0x51);
+ mdelay(1000);
+ inb(base_io + 0x9b);
+ inb(base_io + 0x97);
+ inb(base_io + 0xdb);
+ inb(base_io + 0xd7);
+ tmport = base_io + 0x80;
+ k=p->host_id[0];
+ if (k > 7)
+ k = (k & 0x07) | 0x40;
+ k |= 0x08;
+ outb(k, tmport);
+ tmport += 0x18;
+ outb(0, tmport);
+ tmport += 0x07;
+
+ while ((inb(tmport) & 0x80) == 0)
+ cpu_relax();
+
+ tmport -= 0x08;
+ inb(tmport);
+ tmport = base_io + 0x81;
+ outb(8, tmport++);
+ outb(0x7f, tmport);
+ tmport = base_io + 0x91;
+ outb(0x20, tmport);
+
+ tmport = base_io + 0xc0;
+ k=p->host_id[1];
+ if (k > 7)
+ k = (k & 0x07) | 0x40;
+ k |= 0x08;
+ outb(k, tmport);
+ tmport += 0x18;
+ outb(0, tmport);
+ tmport += 0x07;
+
+ while ((inb(tmport) & 0x80) == 0)
+ cpu_relax();
+
+ tmport -= 0x08;
+ inb(tmport);
+ tmport = base_io + 0xc1;
+ outb(8, tmport++);
+ outb(0x7f, tmport);
+ tmport = base_io + 0xd1;
+ outb(0x20, tmport);
+
+ tscam_885();
+ printk(KERN_INFO " Scanning Channel A SCSI Device ...\n");
+ is885(p, base_io + 0x80, 0);
+ printk(KERN_INFO " Scanning Channel B SCSI Device ...\n");
+ is885(p, base_io + 0xc0, 1);
+
+ k = inb(base_io + 0x28) & 0xcf;
+ k |= 0xc0;
+ outb(k, base_io + 0x28);
+ k = inb(base_io + 0x1f) | 0x80;
+ outb(k, base_io + 0x1f);
+ k = inb(base_io + 0x29) | 0x01;
+ outb(k, base_io + 0x29);
+#ifdef ED_DBGP
+ //printk("atp885: atp_host[0] 0x%p\n", atp_host[0]);
+#endif
+ shpnt->max_id = 16;
+ shpnt->max_lun = (p->global_map[0] & 0x07) + 1;
+ shpnt->max_channel = 1;
+ shpnt->this_id = p->host_id[0];
+ shpnt->unique_id = base_io;
+ shpnt->io_port = base_io;
+ shpnt->n_io_port = 0xff; /* Number of bytes of I/O space used */
+ shpnt->irq = pdev->irq;
+
+ } else {
+ error = pci_read_config_byte(pdev, 0x49, &host_id);
+
+ printk(KERN_INFO " ACARD AEC-671X PCI Ultra/W SCSI-2/3 Host Adapter: %d "
+ "IO:%x, IRQ:%d.\n", count, base_io, pdev->irq);
+
+ atpdev->ioport[0] = base_io;
+ atpdev->pciport[0] = base_io + 0x20;
+ atpdev->dev_id = ent->device;
+ host_id &= 0x07;
+ atpdev->host_id[0] = host_id;
+ tmport = base_io + 0x22;
+ atpdev->scam_on = inb(tmport);
+ tmport += 0x0b;
+ atpdev->global_map[0] = inb(tmport++);
+ atpdev->ultra_map[0] = inw(tmport);
+
+ if (atpdev->ultra_map[0] == 0) {
+ atpdev->scam_on = 0x00;
+ atpdev->global_map[0] = 0x20;
+ atpdev->ultra_map[0] = 0xffff;
+ }
+
+ shpnt = scsi_host_alloc(&atp870u_template, sizeof(struct atp_unit));
+ if (!shpnt)
+ goto err_nomem;
+
+ p = (struct atp_unit *)&shpnt->hostdata;
+
+ atpdev->host = shpnt;
+ atpdev->pdev = pdev;
+ pci_set_drvdata(pdev, p);
+ memcpy(p, atpdev, sizeof(*atpdev));
+ if (atp870u_init_tables(shpnt) < 0)
+ goto unregister;
+
+ if (request_irq(pdev->irq, atp870u_intr_handle, IRQF_SHARED, "atp870i", shpnt)) {
+ printk(KERN_ERR "Unable to allocate IRQ%d for Acard controller.\n", pdev->irq);
+ goto free_tables;
+ }
+
+ spin_lock_irqsave(shpnt->host_lock, flags);
+ if (atpdev->chip_ver > 0x07) { /* check if atp876 chip then enable terminator */
+ tmport = base_io + 0x3e;
+ outb(0x00, tmport);
+ }
+
+ tmport = base_io + 0x3a;
+ k = (inb(tmport) & 0xf3) | 0x10;
+ outb(k, tmport);
+ outb((k & 0xdf), tmport);
+ mdelay(32);
+ outb(k, tmport);
+ mdelay(32);
+ tmport = base_io;
+ outb((host_id | 0x08), tmport);
+ tmport += 0x18;
+ outb(0, tmport);
+ tmport += 0x07;
+ while ((inb(tmport) & 0x80) == 0)
+ mdelay(1);
+
+ tmport -= 0x08;
+ inb(tmport);
+ tmport = base_io + 1;
+ outb(8, tmport++);
+ outb(0x7f, tmport);
+ tmport = base_io + 0x11;
+ outb(0x20, tmport);
+
+ tscam(shpnt);
+ is870(p, base_io);
+ tmport = base_io + 0x3a;
+ outb((inb(tmport) & 0xef), tmport);
+ tmport++;
+ outb((inb(tmport) | 0x20), tmport);
+ if (atpdev->chip_ver == 4)
+ shpnt->max_id = 16;
+ else
+ shpnt->max_id = 8;
+ shpnt->this_id = host_id;
+ shpnt->unique_id = base_io;
+ shpnt->io_port = base_io;
+ shpnt->n_io_port = 0x40; /* Number of bytes of I/O space used */
+ shpnt->irq = pdev->irq;
+ }
+ spin_unlock_irqrestore(shpnt->host_lock, flags);
+ if(ent->device==ATP885_DEVID) {
+ if(!request_region(base_io, 0xff, "atp870u")) /* Register the IO ports that we use */
+ goto request_io_fail;
+ } else if((ent->device==ATP880_DEVID1)||(ent->device==ATP880_DEVID2)) {
+ if(!request_region(base_io, 0x60, "atp870u")) /* Register the IO ports that we use */
+ goto request_io_fail;
+ } else {
+ if(!request_region(base_io, 0x40, "atp870u")) /* Register the IO ports that we use */
+ goto request_io_fail;
+ }
+ count++;
+ if (scsi_add_host(shpnt, &pdev->dev))
+ goto scsi_add_fail;
+ scsi_scan_host(shpnt);
+#ifdef ED_DBGP
+ printk("atp870u_prob : exit\n");
+#endif
+ return 0;
+
+scsi_add_fail:
+ printk("atp870u_prob:scsi_add_fail\n");
+ if(ent->device==ATP885_DEVID) {
+ release_region(base_io, 0xff);
+ } else if((ent->device==ATP880_DEVID1)||(ent->device==ATP880_DEVID2)) {
+ release_region(base_io, 0x60);
+ } else {
+ release_region(base_io, 0x40);
+ }
+request_io_fail:
+ printk("atp870u_prob:request_io_fail\n");
+ free_irq(pdev->irq, shpnt);
+free_tables:
+ printk("atp870u_prob:free_table\n");
+ atp870u_free_tables(shpnt);
+unregister:
+ printk("atp870u_prob:unregister\n");
+ scsi_host_put(shpnt);
+ return -1;
+err_eio:
+ kfree(atpdev);
+ return -EIO;
+err_nomem:
+ kfree(atpdev);
+ return -ENOMEM;
+}
+
+/* The abort command does not leave the device in a clean state where
+ it is available to be used again. Until this gets worked out, we will
+ leave it commented out. */
+
+static int atp870u_abort(struct scsi_cmnd * SCpnt)
+{
+ unsigned char j, k, c;
+ struct scsi_cmnd *workrequ;
+ unsigned int tmport;
+ struct atp_unit *dev;
+ struct Scsi_Host *host;
+ host = SCpnt->device->host;
+
+ dev = (struct atp_unit *)&host->hostdata;
+ c = scmd_channel(SCpnt);
+ printk(" atp870u: abort Channel = %x \n", c);
+ printk("working=%x last_cmd=%x ", dev->working[c], dev->last_cmd[c]);
+ printk(" quhdu=%x quendu=%x ", dev->quhd[c], dev->quend[c]);
+ tmport = dev->ioport[c];
+ for (j = 0; j < 0x18; j++) {
+ printk(" r%2x=%2x", j, inb(tmport++));
+ }
+ tmport += 0x04;
+ printk(" r1c=%2x", inb(tmport));
+ tmport += 0x03;
+ printk(" r1f=%2x in_snd=%2x ", inb(tmport), dev->in_snd[c]);
+ tmport= dev->pciport[c];
+ printk(" d00=%2x", inb(tmport));
+ tmport += 0x02;
+ printk(" d02=%2x", inb(tmport));
+ for(j=0;j<16;j++) {
+ if (dev->id[c][j].curr_req != NULL) {
+ workrequ = dev->id[c][j].curr_req;
+ printk("\n que cdb= ");
+ for (k=0; k < workrequ->cmd_len; k++) {
+ printk(" %2x ",workrequ->cmnd[k]);
+ }
+ printk(" last_lenu= %x ",(unsigned int)dev->id[c][j].last_len);
+ }
+ }
+ return SUCCESS;
+}
+
+static const char *atp870u_info(struct Scsi_Host *notused)
+{
+ static char buffer[128];
+
+ strcpy(buffer, "ACARD AEC-6710/6712/67160 PCI Ultra/W/LVD SCSI-3 Adapter Driver V2.6+ac ");
+
+ return buffer;
+}
+
+static int atp870u_show_info(struct seq_file *m, struct Scsi_Host *HBAptr)
+{
+ seq_puts(m, "ACARD AEC-671X Driver Version: 2.6+ac\n\n"
+ "Adapter Configuration:\n");
+ seq_printf(m, " Base IO: %#.4lx\n", HBAptr->io_port);
+ seq_printf(m, " IRQ: %d\n", HBAptr->irq);
+ return 0;
+}
+
+
+static int atp870u_biosparam(struct scsi_device *disk, struct block_device *dev,
+ sector_t capacity, int *ip)
+{
+ int heads, sectors, cylinders;
+
+ heads = 64;
+ sectors = 32;
+ cylinders = (unsigned long)capacity / (heads * sectors);
+ if (cylinders > 1024) {
+ heads = 255;
+ sectors = 63;
+ cylinders = (unsigned long)capacity / (heads * sectors);
+ }
+ ip[0] = heads;
+ ip[1] = sectors;
+ ip[2] = cylinders;
+
+ return 0;
+}
+
+static void atp870u_remove (struct pci_dev *pdev)
+{
+ struct atp_unit *devext = pci_get_drvdata(pdev);
+ struct Scsi_Host *pshost = devext->host;
+
+
+ scsi_remove_host(pshost);
+ printk(KERN_INFO "free_irq : %d\n",pshost->irq);
+ free_irq(pshost->irq, pshost);
+ release_region(pshost->io_port, pshost->n_io_port);
+ printk(KERN_INFO "atp870u_free_tables : %p\n",pshost);
+ atp870u_free_tables(pshost);
+ printk(KERN_INFO "scsi_host_put : %p\n",pshost);
+ scsi_host_put(pshost);
+}
+MODULE_LICENSE("GPL");
+
+static struct scsi_host_template atp870u_template = {
+ .module = THIS_MODULE,
+ .name = "atp870u" /* name */,
+ .proc_name = "atp870u",
+ .show_info = atp870u_show_info,
+ .info = atp870u_info /* info */,
+ .queuecommand = atp870u_queuecommand /* queuecommand */,
+ .eh_abort_handler = atp870u_abort /* abort */,
+ .bios_param = atp870u_biosparam /* biosparm */,
+ .can_queue = qcnt /* can_queue */,
+ .this_id = 7 /* SCSI ID */,
+ .sg_tablesize = ATP870U_SCATTER /*SG_ALL*/ /*SG_NONE*/,
+ .cmd_per_lun = ATP870U_CMDLUN /* commands per lun */,
+ .use_clustering = ENABLE_CLUSTERING,
+ .max_sectors = ATP870U_MAX_SECTORS,
+};
+
+static struct pci_device_id atp870u_id_table[] = {
+ { PCI_DEVICE(PCI_VENDOR_ID_ARTOP, ATP885_DEVID) },
+ { PCI_DEVICE(PCI_VENDOR_ID_ARTOP, ATP880_DEVID1) },
+ { PCI_DEVICE(PCI_VENDOR_ID_ARTOP, ATP880_DEVID2) },
+ { PCI_DEVICE(PCI_VENDOR_ID_ARTOP, PCI_DEVICE_ID_ARTOP_AEC7610) },
+ { PCI_DEVICE(PCI_VENDOR_ID_ARTOP, PCI_DEVICE_ID_ARTOP_AEC7612UW) },
+ { PCI_DEVICE(PCI_VENDOR_ID_ARTOP, PCI_DEVICE_ID_ARTOP_AEC7612U) },
+ { PCI_DEVICE(PCI_VENDOR_ID_ARTOP, PCI_DEVICE_ID_ARTOP_AEC7612S) },
+ { PCI_DEVICE(PCI_VENDOR_ID_ARTOP, PCI_DEVICE_ID_ARTOP_AEC7612D) },
+ { PCI_DEVICE(PCI_VENDOR_ID_ARTOP, PCI_DEVICE_ID_ARTOP_AEC7612SUW) },
+ { PCI_DEVICE(PCI_VENDOR_ID_ARTOP, PCI_DEVICE_ID_ARTOP_8060) },
+ { 0, },
+};
+
+MODULE_DEVICE_TABLE(pci, atp870u_id_table);
+
+static struct pci_driver atp870u_driver = {
+ .id_table = atp870u_id_table,
+ .name = "atp870u",
+ .probe = atp870u_probe,
+ .remove = atp870u_remove,
+};
+
+static int __init atp870u_init(void)
+{
+#ifdef ED_DBGP
+ printk("atp870u_init: Entry\n");
+#endif
+ return pci_register_driver(&atp870u_driver);
+}
+
+static void __exit atp870u_exit(void)
+{
+#ifdef ED_DBGP
+ printk("atp870u_exit: Entry\n");
+#endif
+ pci_unregister_driver(&atp870u_driver);
+}
+
+static void tscam_885(void)
+{
+ unsigned char i;
+
+ for (i = 0; i < 0x2; i++) {
+ mdelay(300);
+ }
+ return;
+}
+
+
+
+static void is885(struct atp_unit *dev, unsigned int wkport,unsigned char c)
+{
+ unsigned int tmport;
+ unsigned char i, j, k, rmb, n, lvdmode;
+ unsigned short int m;
+ static unsigned char mbuf[512];
+ static unsigned char satn[9] = {0, 0, 0, 0, 0, 0, 0, 6, 6};
+ static unsigned char inqd[9] = {0x12, 0, 0, 0, 0x24, 0, 0, 0x24, 6};
+ static unsigned char synn[6] = {0x80, 1, 3, 1, 0x19, 0x0e};
+ unsigned char synu[6] = {0x80, 1, 3, 1, 0x0a, 0x0e};
+ static unsigned char synw[6] = {0x80, 1, 3, 1, 0x19, 0x0e};
+ unsigned char synuw[6] = {0x80, 1, 3, 1, 0x0a, 0x0e};
+ static unsigned char wide[6] = {0x80, 1, 2, 3, 1, 0};
+ static unsigned char u3[9] = { 0x80,1,6,4,0x09,00,0x0e,0x01,0x02 };
+
+ lvdmode=inb(wkport + 0x1b) >> 7;
+
+ for (i = 0; i < 16; i++) {
+ m = 1;
+ m = m << i;
+ if ((m & dev->active_id[c]) != 0) {
+ continue;
+ }
+ if (i == dev->host_id[c]) {
+ printk(KERN_INFO " ID: %2d Host Adapter\n", dev->host_id[c]);
+ continue;
+ }
+ tmport = wkport + 0x1b;
+ outb(0x01, tmport);
+ tmport = wkport + 0x01;
+ outb(0x08, tmport++);
+ outb(0x7f, tmport++);
+ outb(satn[0], tmport++);
+ outb(satn[1], tmport++);
+ outb(satn[2], tmport++);
+ outb(satn[3], tmport++);
+ outb(satn[4], tmport++);
+ outb(satn[5], tmport++);
+ tmport += 0x06;
+ outb(0, tmport);
+ tmport += 0x02;
+ outb(dev->id[c][i].devsp, tmport++);
+
+ outb(0, tmport++);
+ outb(satn[6], tmport++);
+ outb(satn[7], tmport++);
+ j = i;
+ if ((j & 0x08) != 0) {
+ j = (j & 0x07) | 0x40;
+ }
+ outb(j, tmport);
+ tmport += 0x03;
+ outb(satn[8], tmport);
+ tmport += 0x07;
+
+ while ((inb(tmport) & 0x80) == 0x00)
+ cpu_relax();
+ tmport -= 0x08;
+ if ((inb(tmport) != 0x11) && (inb(tmport) != 0x8e)) {
+ continue;
+ }
+ while (inb(tmport) != 0x8e)
+ cpu_relax();
+ dev->active_id[c] |= m;
+
+ tmport = wkport + 0x10;
+ outb(0x30, tmport);
+ tmport = wkport + 0x14;
+ outb(0x00, tmport);
+
+phase_cmd:
+ tmport = wkport + 0x18;
+ outb(0x08, tmport);
+ tmport += 0x07;
+ while ((inb(tmport) & 0x80) == 0x00)
+ cpu_relax();
+ tmport -= 0x08;
+ j = inb(tmport);
+ if (j != 0x16) {
+ tmport = wkport + 0x10;
+ outb(0x41, tmport);
+ goto phase_cmd;
+ }
+sel_ok:
+ tmport = wkport + 0x03;
+ outb(inqd[0], tmport++);
+ outb(inqd[1], tmport++);
+ outb(inqd[2], tmport++);
+ outb(inqd[3], tmport++);
+ outb(inqd[4], tmport++);
+ outb(inqd[5], tmport);
+ tmport += 0x07;
+ outb(0, tmport);
+ tmport += 0x02;
+ outb(dev->id[c][i].devsp, tmport++);
+ outb(0, tmport++);
+ outb(inqd[6], tmport++);
+ outb(inqd[7], tmport++);
+ tmport += 0x03;
+ outb(inqd[8], tmport);
+ tmport += 0x07;
+ while ((inb(tmport) & 0x80) == 0x00)
+ cpu_relax();
+ tmport -= 0x08;
+ if ((inb(tmport) != 0x11) && (inb(tmport) != 0x8e)) {
+ continue;
+ }
+ while (inb(tmport) != 0x8e)
+ cpu_relax();
+ tmport = wkport + 0x1b;
+ outb(0x00, tmport);
+ tmport = wkport + 0x18;
+ outb(0x08, tmport);
+ tmport += 0x07;
+ j = 0;
+rd_inq_data:
+ k = inb(tmport);
+ if ((k & 0x01) != 0) {
+ tmport -= 0x06;
+ mbuf[j++] = inb(tmport);
+ tmport += 0x06;
+ goto rd_inq_data;
+ }
+ if ((k & 0x80) == 0) {
+ goto rd_inq_data;
+ }
+ tmport -= 0x08;
+ j = inb(tmport);
+ if (j == 0x16) {
+ goto inq_ok;
+ }
+ tmport = wkport + 0x10;
+ outb(0x46, tmport);
+ tmport += 0x02;
+ outb(0, tmport++);
+ outb(0, tmport++);
+ outb(0, tmport++);
+ tmport += 0x03;
+ outb(0x08, tmport);
+ tmport += 0x07;
+ while ((inb(tmport) & 0x80) == 0x00)
+ cpu_relax();
+ tmport -= 0x08;
+ if (inb(tmport) != 0x16) {
+ goto sel_ok;
+ }
+inq_ok:
+ mbuf[36] = 0;
+ printk( KERN_INFO" ID: %2d %s\n", i, &mbuf[8]);
+ dev->id[c][i].devtype = mbuf[0];
+ rmb = mbuf[1];
+ n = mbuf[7];
+ if ((mbuf[7] & 0x60) == 0) {
+ goto not_wide;
+ }
+ if ((i < 8) && ((dev->global_map[c] & 0x20) == 0)) {
+ goto not_wide;
+ }
+ if (lvdmode == 0) {
+ goto chg_wide;
+ }
+ if (dev->sp[c][i] != 0x04) { // force u2
+ goto chg_wide;
+ }
+
+ tmport = wkport + 0x1b;
+ outb(0x01, tmport);
+ tmport = wkport + 0x03;
+ outb(satn[0], tmport++);
+ outb(satn[1], tmport++);
+ outb(satn[2], tmport++);
+ outb(satn[3], tmport++);
+ outb(satn[4], tmport++);
+ outb(satn[5], tmport++);
+ tmport += 0x06;
+ outb(0, tmport);
+ tmport += 0x02;
+ outb(dev->id[c][i].devsp, tmport++);
+ outb(0, tmport++);
+ outb(satn[6], tmport++);
+ outb(satn[7], tmport++);
+ tmport += 0x03;
+ outb(satn[8], tmport);
+ tmport += 0x07;
+
+ while ((inb(tmport) & 0x80) == 0x00)
+ cpu_relax();
+ tmport -= 0x08;
+ if ((inb(tmport) != 0x11) && (inb(tmport) != 0x8e)) {
+ continue;
+ }
+ while (inb(tmport) != 0x8e)
+ cpu_relax();
+try_u3:
+ j = 0;
+ tmport = wkport + 0x14;
+ outb(0x09, tmport);
+ tmport += 0x04;
+ outb(0x20, tmport);
+ tmport += 0x07;
+
+ while ((inb(tmport) & 0x80) == 0) {
+ if ((inb(tmport) & 0x01) != 0) {
+ tmport -= 0x06;
+ outb(u3[j++], tmport);
+ tmport += 0x06;
+ }
+ cpu_relax();
+ }
+ tmport -= 0x08;
+ while ((inb(tmport) & 0x80) == 0x00)
+ cpu_relax();
+ j = inb(tmport) & 0x0f;
+ if (j == 0x0f) {
+ goto u3p_in;
+ }
+ if (j == 0x0a) {
+ goto u3p_cmd;
+ }
+ if (j == 0x0e) {
+ goto try_u3;
+ }
+ continue;
+u3p_out:
+ tmport = wkport + 0x18;
+ outb(0x20, tmport);
+ tmport += 0x07;
+ while ((inb(tmport) & 0x80) == 0) {
+ if ((inb(tmport) & 0x01) != 0) {
+ tmport -= 0x06;
+ outb(0, tmport);
+ tmport += 0x06;
+ }
+ cpu_relax();
+ }
+ tmport -= 0x08;
+ j = inb(tmport) & 0x0f;
+ if (j == 0x0f) {
+ goto u3p_in;
+ }
+ if (j == 0x0a) {
+ goto u3p_cmd;
+ }
+ if (j == 0x0e) {
+ goto u3p_out;
+ }
+ continue;
+u3p_in:
+ tmport = wkport + 0x14;
+ outb(0x09, tmport);
+ tmport += 0x04;
+ outb(0x20, tmport);
+ tmport += 0x07;
+ k = 0;
+u3p_in1:
+ j = inb(tmport);
+ if ((j & 0x01) != 0) {
+ tmport -= 0x06;
+ mbuf[k++] = inb(tmport);
+ tmport += 0x06;
+ goto u3p_in1;
+ }
+ if ((j & 0x80) == 0x00) {
+ goto u3p_in1;
+ }
+ tmport -= 0x08;
+ j = inb(tmport) & 0x0f;
+ if (j == 0x0f) {
+ goto u3p_in;
+ }
+ if (j == 0x0a) {
+ goto u3p_cmd;
+ }
+ if (j == 0x0e) {
+ goto u3p_out;
+ }
+ continue;
+u3p_cmd:
+ tmport = wkport + 0x10;
+ outb(0x30, tmport);
+ tmport = wkport + 0x14;
+ outb(0x00, tmport);
+ tmport += 0x04;
+ outb(0x08, tmport);
+ tmport += 0x07;
+ while ((inb(tmport) & 0x80) == 0x00);
+ tmport -= 0x08;
+ j = inb(tmport);
+ if (j != 0x16) {
+ if (j == 0x4e) {
+ goto u3p_out;
+ }
+ continue;
+ }
+ if (mbuf[0] != 0x01) {
+ goto chg_wide;
+ }
+ if (mbuf[1] != 0x06) {
+ goto chg_wide;
+ }
+ if (mbuf[2] != 0x04) {
+ goto chg_wide;
+ }
+ if (mbuf[3] == 0x09) {
+ m = 1;
+ m = m << i;
+ dev->wide_id[c] |= m;
+ dev->id[c][i].devsp = 0xce;
+#ifdef ED_DBGP
+ printk("dev->id[%2d][%2d].devsp = %2x\n",c,i,dev->id[c][i].devsp);
+#endif
+ continue;
+ }
+chg_wide:
+ tmport = wkport + 0x1b;
+ outb(0x01, tmport);
+ tmport = wkport + 0x03;
+ outb(satn[0], tmport++);
+ outb(satn[1], tmport++);
+ outb(satn[2], tmport++);
+ outb(satn[3], tmport++);
+ outb(satn[4], tmport++);
+ outb(satn[5], tmport++);
+ tmport += 0x06;
+ outb(0, tmport);
+ tmport += 0x02;
+ outb(dev->id[c][i].devsp, tmport++);
+ outb(0, tmport++);
+ outb(satn[6], tmport++);
+ outb(satn[7], tmport++);
+ tmport += 0x03;
+ outb(satn[8], tmport);
+ tmport += 0x07;
+
+ while ((inb(tmport) & 0x80) == 0x00)
+ cpu_relax();
+ tmport -= 0x08;
+ if ((inb(tmport) != 0x11) && (inb(tmport) != 0x8e)) {
+ continue;
+ }
+ while (inb(tmport) != 0x8e)
+ cpu_relax();
+try_wide:
+ j = 0;
+ tmport = wkport + 0x14;
+ outb(0x05, tmport);
+ tmport += 0x04;
+ outb(0x20, tmport);
+ tmport += 0x07;
+
+ while ((inb(tmport) & 0x80) == 0) {
+ if ((inb(tmport) & 0x01) != 0) {
+ tmport -= 0x06;
+ outb(wide[j++], tmport);
+ tmport += 0x06;
+ }
+ cpu_relax();
+ }
+ tmport -= 0x08;
+ while ((inb(tmport) & 0x80) == 0x00)
+ cpu_relax();
+ j = inb(tmport) & 0x0f;
+ if (j == 0x0f) {
+ goto widep_in;
+ }
+ if (j == 0x0a) {
+ goto widep_cmd;
+ }
+ if (j == 0x0e) {
+ goto try_wide;
+ }
+ continue;
+widep_out:
+ tmport = wkport + 0x18;
+ outb(0x20, tmport);
+ tmport += 0x07;
+ while ((inb(tmport) & 0x80) == 0) {
+ if ((inb(tmport) & 0x01) != 0) {
+ tmport -= 0x06;
+ outb(0, tmport);
+ tmport += 0x06;
+ }
+ cpu_relax();
+ }
+ tmport -= 0x08;
+ j = inb(tmport) & 0x0f;
+ if (j == 0x0f) {
+ goto widep_in;
+ }
+ if (j == 0x0a) {
+ goto widep_cmd;
+ }
+ if (j == 0x0e) {
+ goto widep_out;
+ }
+ continue;
+widep_in:
+ tmport = wkport + 0x14;
+ outb(0xff, tmport);
+ tmport += 0x04;
+ outb(0x20, tmport);
+ tmport += 0x07;
+ k = 0;
+widep_in1:
+ j = inb(tmport);
+ if ((j & 0x01) != 0) {
+ tmport -= 0x06;
+ mbuf[k++] = inb(tmport);
+ tmport += 0x06;
+ goto widep_in1;
+ }
+ if ((j & 0x80) == 0x00) {
+ goto widep_in1;
+ }
+ tmport -= 0x08;
+ j = inb(tmport) & 0x0f;
+ if (j == 0x0f) {
+ goto widep_in;
+ }
+ if (j == 0x0a) {
+ goto widep_cmd;
+ }
+ if (j == 0x0e) {
+ goto widep_out;
+ }
+ continue;
+widep_cmd:
+ tmport = wkport + 0x10;
+ outb(0x30, tmport);
+ tmport = wkport + 0x14;
+ outb(0x00, tmport);
+ tmport += 0x04;
+ outb(0x08, tmport);
+ tmport += 0x07;
+ while ((inb(tmport) & 0x80) == 0x00)
+ cpu_relax();
+ tmport -= 0x08;
+ j = inb(tmport);
+ if (j != 0x16) {
+ if (j == 0x4e) {
+ goto widep_out;
+ }
+ continue;
+ }
+ if (mbuf[0] != 0x01) {
+ goto not_wide;
+ }
+ if (mbuf[1] != 0x02) {
+ goto not_wide;
+ }
+ if (mbuf[2] != 0x03) {
+ goto not_wide;
+ }
+ if (mbuf[3] != 0x01) {
+ goto not_wide;
+ }
+ m = 1;
+ m = m << i;
+ dev->wide_id[c] |= m;
+not_wide:
+ if ((dev->id[c][i].devtype == 0x00) || (dev->id[c][i].devtype == 0x07) ||
+ ((dev->id[c][i].devtype == 0x05) && ((n & 0x10) != 0))) {
+ m = 1;
+ m = m << i;
+ if ((dev->async[c] & m) != 0) {
+ goto set_sync;
+ }
+ }
+ continue;
+set_sync:
+ if (dev->sp[c][i] == 0x02) {
+ synu[4]=0x0c;
+ synuw[4]=0x0c;
+ } else {
+ if (dev->sp[c][i] >= 0x03) {
+ synu[4]=0x0a;
+ synuw[4]=0x0a;
+ }
+ }
+ tmport = wkport + 0x1b;
+ j = 0;
+ if ((m & dev->wide_id[c]) != 0) {
+ j |= 0x01;
+ }
+ outb(j, tmport);
+ tmport = wkport + 0x03;
+ outb(satn[0], tmport++);
+ outb(satn[1], tmport++);
+ outb(satn[2], tmport++);
+ outb(satn[3], tmport++);
+ outb(satn[4], tmport++);
+ outb(satn[5], tmport++);
+ tmport += 0x06;
+ outb(0, tmport);
+ tmport += 0x02;
+ outb(dev->id[c][i].devsp, tmport++);
+ outb(0, tmport++);
+ outb(satn[6], tmport++);
+ outb(satn[7], tmport++);
+ tmport += 0x03;
+ outb(satn[8], tmport);
+ tmport += 0x07;
+
+ while ((inb(tmport) & 0x80) == 0x00)
+ cpu_relax();
+ tmport -= 0x08;
+ if ((inb(tmport) != 0x11) && (inb(tmport) != 0x8e)) {
+ continue;
+ }
+ while (inb(tmport) != 0x8e)
+ cpu_relax();
+try_sync:
+ j = 0;
+ tmport = wkport + 0x14;
+ outb(0x06, tmport);
+ tmport += 0x04;
+ outb(0x20, tmport);
+ tmport += 0x07;
+
+ while ((inb(tmport) & 0x80) == 0) {
+ if ((inb(tmport) & 0x01) != 0) {
+ tmport -= 0x06;
+ if ((m & dev->wide_id[c]) != 0) {
+ if ((m & dev->ultra_map[c]) != 0) {
+ outb(synuw[j++], tmport);
+ } else {
+ outb(synw[j++], tmport);
+ }
+ } else {
+ if ((m & dev->ultra_map[c]) != 0) {
+ outb(synu[j++], tmport);
+ } else {
+ outb(synn[j++], tmport);
+ }
+ }
+ tmport += 0x06;
+ }
+ }
+ tmport -= 0x08;
+ while ((inb(tmport) & 0x80) == 0x00)
+ cpu_relax();
+ j = inb(tmport) & 0x0f;
+ if (j == 0x0f) {
+ goto phase_ins;
+ }
+ if (j == 0x0a) {
+ goto phase_cmds;
+ }
+ if (j == 0x0e) {
+ goto try_sync;
+ }
+ continue;
+phase_outs:
+ tmport = wkport + 0x18;
+ outb(0x20, tmport);
+ tmport += 0x07;
+ while ((inb(tmport) & 0x80) == 0x00) {
+ if ((inb(tmport) & 0x01) != 0x00) {
+ tmport -= 0x06;
+ outb(0x00, tmport);
+ tmport += 0x06;
+ }
+ cpu_relax();
+ }
+ tmport -= 0x08;
+ j = inb(tmport);
+ if (j == 0x85) {
+ goto tar_dcons;
+ }
+ j &= 0x0f;
+ if (j == 0x0f) {
+ goto phase_ins;
+ }
+ if (j == 0x0a) {
+ goto phase_cmds;
+ }
+ if (j == 0x0e) {
+ goto phase_outs;
+ }
+ continue;
+phase_ins:
+ tmport = wkport + 0x14;
+ outb(0x06, tmport);
+ tmport += 0x04;
+ outb(0x20, tmport);
+ tmport += 0x07;
+ k = 0;
+phase_ins1:
+ j = inb(tmport);
+ if ((j & 0x01) != 0x00) {
+ tmport -= 0x06;
+ mbuf[k++] = inb(tmport);
+ tmport += 0x06;
+ goto phase_ins1;
+ }
+ if ((j & 0x80) == 0x00) {
+ goto phase_ins1;
+ }
+ tmport -= 0x08;
+ while ((inb(tmport) & 0x80) == 0x00);
+ j = inb(tmport);
+ if (j == 0x85) {
+ goto tar_dcons;
+ }
+ j &= 0x0f;
+ if (j == 0x0f) {
+ goto phase_ins;
+ }
+ if (j == 0x0a) {
+ goto phase_cmds;
+ }
+ if (j == 0x0e) {
+ goto phase_outs;
+ }
+ continue;
+phase_cmds:
+ tmport = wkport + 0x10;
+ outb(0x30, tmport);
+tar_dcons:
+ tmport = wkport + 0x14;
+ outb(0x00, tmport);
+ tmport += 0x04;
+ outb(0x08, tmport);
+ tmport += 0x07;
+ while ((inb(tmport) & 0x80) == 0x00)
+ cpu_relax();
+ tmport -= 0x08;
+ j = inb(tmport);
+ if (j != 0x16) {
+ continue;
+ }
+ if (mbuf[0] != 0x01) {
+ continue;
+ }
+ if (mbuf[1] != 0x03) {
+ continue;
+ }
+ if (mbuf[4] == 0x00) {
+ continue;
+ }
+ if (mbuf[3] > 0x64) {
+ continue;
+ }
+ if (mbuf[4] > 0x0e) {
+ mbuf[4] = 0x0e;
+ }
+ dev->id[c][i].devsp = mbuf[4];
+ if (mbuf[3] < 0x0c){
+ j = 0xb0;
+ goto set_syn_ok;
+ }
+ if ((mbuf[3] < 0x0d) && (rmb == 0)) {
+ j = 0xa0;
+ goto set_syn_ok;
+ }
+ if (mbuf[3] < 0x1a) {
+ j = 0x20;
+ goto set_syn_ok;
+ }
+ if (mbuf[3] < 0x33) {
+ j = 0x40;
+ goto set_syn_ok;
+ }
+ if (mbuf[3] < 0x4c) {
+ j = 0x50;
+ goto set_syn_ok;
+ }
+ j = 0x60;
+ set_syn_ok:
+ dev->id[c][i].devsp = (dev->id[c][i].devsp & 0x0f) | j;
+#ifdef ED_DBGP
+ printk("dev->id[%2d][%2d].devsp = %2x\n",c,i,dev->id[c][i].devsp);
+#endif
+ }
+ tmport = wkport + 0x16;
+ outb(0x80, tmport);
+}
+
+module_init(atp870u_init);
+module_exit(atp870u_exit);
+
diff --git a/drivers/scsi/atp870u.h b/drivers/scsi/atp870u.h
new file mode 100644
index 000000000..62bae64a0
--- /dev/null
+++ b/drivers/scsi/atp870u.h
@@ -0,0 +1,67 @@
+#ifndef _ATP870U_H
+#define _ATP870U_H
+
+#include <linux/types.h>
+#include <linux/kdev_t.h>
+
+/* I/O Port */
+
+#define MAX_CDB 12
+#define MAX_SENSE 14
+#define qcnt 32
+#define ATP870U_SCATTER 128
+#define ATP870U_CMDLUN 1
+
+#define MAX_ADAPTER 8
+#define MAX_SCSI_ID 16
+#define ATP870U_MAX_SECTORS 128
+
+#define ATP885_DEVID 0x808A
+#define ATP880_DEVID1 0x8080
+#define ATP880_DEVID2 0x8081
+
+//#define ED_DBGP
+
+struct atp_unit
+{
+ unsigned long baseport;
+ unsigned long ioport[2];
+ unsigned long pciport[2];
+ unsigned long irq;
+ unsigned char last_cmd[2];
+ unsigned char in_snd[2];
+ unsigned char in_int[2];
+ unsigned char quhd[2];
+ unsigned char quend[2];
+ unsigned char global_map[2];
+ unsigned char chip_ver;
+ unsigned char scam_on;
+ unsigned char host_id[2];
+ unsigned int working[2];
+ unsigned short wide_id[2];
+ unsigned short active_id[2];
+ unsigned short ultra_map[2];
+ unsigned short async[2];
+ unsigned short dev_id;
+ unsigned char sp[2][16];
+ unsigned char r1f[2][16];
+ struct scsi_cmnd *quereq[2][qcnt];
+ struct atp_id
+ {
+ unsigned char dirct;
+ unsigned char devsp;
+ unsigned char devtype;
+ unsigned long tran_len;
+ unsigned long last_len;
+ unsigned char *prd_pos;
+ unsigned char *prd_table; /* Kernel address of PRD table */
+ dma_addr_t prd_bus; /* Bus address of PRD */
+ dma_addr_t prdaddr; /* Dynamically updated in driver */
+ struct scsi_cmnd *curr_req;
+ } id[2][16];
+ struct Scsi_Host *host;
+ struct pci_dev *pdev;
+ unsigned int unit;
+};
+
+#endif
diff --git a/drivers/scsi/be2iscsi/Kconfig b/drivers/scsi/be2iscsi/Kconfig
new file mode 100644
index 000000000..ceaca32e7
--- /dev/null
+++ b/drivers/scsi/be2iscsi/Kconfig
@@ -0,0 +1,9 @@
+config BE2ISCSI
+ tristate "ServerEngines' 10Gbps iSCSI - BladeEngine 2"
+ depends on PCI && SCSI && NET
+ select SCSI_ISCSI_ATTRS
+ select ISCSI_BOOT_SYSFS
+
+ help
+ This driver implements the iSCSI functionality for ServerEngines'
+ 10Gbps Storage adapter - BladeEngine 2.
diff --git a/drivers/scsi/be2iscsi/Makefile b/drivers/scsi/be2iscsi/Makefile
new file mode 100644
index 000000000..c11f443e3
--- /dev/null
+++ b/drivers/scsi/be2iscsi/Makefile
@@ -0,0 +1,8 @@
+#
+# Makefile to build the iSCSI driver for ServerEngine's BladeEngine.
+#
+#
+
+obj-$(CONFIG_BE2ISCSI) += be2iscsi.o
+
+be2iscsi-y := be_iscsi.o be_main.o be_mgmt.o be_cmds.o
diff --git a/drivers/scsi/be2iscsi/be.h b/drivers/scsi/be2iscsi/be.h
new file mode 100644
index 000000000..32070099c
--- /dev/null
+++ b/drivers/scsi/be2iscsi/be.h
@@ -0,0 +1,211 @@
+/**
+ * Copyright (C) 2005 - 2015 Avago Technologies
+ * All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation. The full GNU General
+ * Public License is included in this distribution in the file called COPYING.
+ *
+ * Contact Information:
+ * linux-drivers@avagotech.com
+ *
+ * Avago Technologies
+ * 3333 Susan Street
+ * Costa Mesa, CA 92626
+ */
+
+#ifndef BEISCSI_H
+#define BEISCSI_H
+
+#include <linux/pci.h>
+#include <linux/if_vlan.h>
+#include <linux/blk-iopoll.h>
+#define FW_VER_LEN 32
+#define MCC_Q_LEN 128
+#define MCC_CQ_LEN 256
+#define MAX_MCC_CMD 16
+/* BladeEngine Generation numbers */
+#define BE_GEN2 2
+#define BE_GEN3 3
+#define BE_GEN4 4
+struct be_dma_mem {
+ void *va;
+ dma_addr_t dma;
+ u32 size;
+};
+
+struct be_queue_info {
+ struct be_dma_mem dma_mem;
+ u16 len;
+ u16 entry_size; /* Size of an element in the queue */
+ u16 id;
+ u16 tail, head;
+ bool created;
+ atomic_t used; /* Number of valid elements in the queue */
+};
+
+static inline u32 MODULO(u16 val, u16 limit)
+{
+ WARN_ON(limit & (limit - 1));
+ return val & (limit - 1);
+}
+
+static inline void index_inc(u16 *index, u16 limit)
+{
+ *index = MODULO((*index + 1), limit);
+}
+
+static inline void *queue_head_node(struct be_queue_info *q)
+{
+ return q->dma_mem.va + q->head * q->entry_size;
+}
+
+static inline void *queue_get_wrb(struct be_queue_info *q, unsigned int wrb_num)
+{
+ return q->dma_mem.va + wrb_num * q->entry_size;
+}
+
+static inline void *queue_tail_node(struct be_queue_info *q)
+{
+ return q->dma_mem.va + q->tail * q->entry_size;
+}
+
+static inline void queue_head_inc(struct be_queue_info *q)
+{
+ index_inc(&q->head, q->len);
+}
+
+static inline void queue_tail_inc(struct be_queue_info *q)
+{
+ index_inc(&q->tail, q->len);
+}
+
+/*ISCSI */
+
+struct be_aic_obj { /* Adaptive interrupt coalescing (AIC) info */
+ bool enable;
+ u32 min_eqd; /* in usecs */
+ u32 max_eqd; /* in usecs */
+ u32 prev_eqd; /* in usecs */
+ u32 et_eqd; /* configured val when aic is off */
+ ulong jiffs;
+ u64 eq_prev; /* Used to calculate eqe */
+};
+
+struct be_eq_obj {
+ bool todo_mcc_cq;
+ bool todo_cq;
+ u32 cq_count;
+ struct be_queue_info q;
+ struct beiscsi_hba *phba;
+ struct be_queue_info *cq;
+ struct work_struct work_cqs; /* Work Item */
+ struct blk_iopoll iopoll;
+};
+
+struct be_mcc_obj {
+ struct be_queue_info q;
+ struct be_queue_info cq;
+};
+
+struct beiscsi_mcc_tag_state {
+#define MCC_TAG_STATE_COMPLETED 0x00
+#define MCC_TAG_STATE_RUNNING 0x01
+#define MCC_TAG_STATE_TIMEOUT 0x02
+ uint8_t tag_state;
+ struct be_dma_mem tag_mem_state;
+};
+
+struct be_ctrl_info {
+ u8 __iomem *csr;
+ u8 __iomem *db; /* Door Bell */
+ u8 __iomem *pcicfg; /* PCI config space */
+ struct pci_dev *pdev;
+
+ /* Mbox used for cmd request/response */
+ spinlock_t mbox_lock; /* For serializing mbox cmds to BE card */
+ struct be_dma_mem mbox_mem;
+ /* Mbox mem is adjusted to align to 16 bytes. The allocated addr
+ * is stored for freeing purpose */
+ struct be_dma_mem mbox_mem_alloced;
+
+ /* MCC Rings */
+ struct be_mcc_obj mcc_obj;
+ spinlock_t mcc_lock; /* For serializing mcc cmds to BE card */
+ spinlock_t mcc_cq_lock;
+
+ wait_queue_head_t mcc_wait[MAX_MCC_CMD + 1];
+ unsigned int mcc_tag[MAX_MCC_CMD];
+ unsigned int mcc_numtag[MAX_MCC_CMD + 1];
+ unsigned short mcc_alloc_index;
+ unsigned short mcc_free_index;
+ unsigned int mcc_tag_available;
+
+ struct beiscsi_mcc_tag_state ptag_state[MAX_MCC_CMD + 1];
+};
+
+#include "be_cmds.h"
+
+#define PAGE_SHIFT_4K 12
+#define PAGE_SIZE_4K (1 << PAGE_SHIFT_4K)
+#define mcc_timeout 120000 /* 12s timeout */
+#define BEISCSI_LOGOUT_SYNC_DELAY 250
+
+/* Returns number of pages spanned by the data starting at the given addr */
+#define PAGES_4K_SPANNED(_address, size) \
+ ((u32)((((size_t)(_address) & (PAGE_SIZE_4K - 1)) + \
+ (size) + (PAGE_SIZE_4K - 1)) >> PAGE_SHIFT_4K))
+
+/* Returns bit offset within a DWORD of a bitfield */
+#define AMAP_BIT_OFFSET(_struct, field) \
+ (((size_t)&(((_struct *)0)->field))%32)
+
+/* Returns the bit mask of the field that is NOT shifted into location. */
+static inline u32 amap_mask(u32 bitsize)
+{
+ return (bitsize == 32 ? 0xFFFFFFFF : (1 << bitsize) - 1);
+}
+
+static inline void amap_set(void *ptr, u32 dw_offset, u32 mask,
+ u32 offset, u32 value)
+{
+ u32 *dw = (u32 *) ptr + dw_offset;
+ *dw &= ~(mask << offset);
+ *dw |= (mask & value) << offset;
+}
+
+#define AMAP_SET_BITS(_struct, field, ptr, val) \
+ amap_set(ptr, \
+ offsetof(_struct, field)/32, \
+ amap_mask(sizeof(((_struct *)0)->field)), \
+ AMAP_BIT_OFFSET(_struct, field), \
+ val)
+
+static inline u32 amap_get(void *ptr, u32 dw_offset, u32 mask, u32 offset)
+{
+ u32 *dw = ptr;
+ return mask & (*(dw + dw_offset) >> offset);
+}
+
+#define AMAP_GET_BITS(_struct, field, ptr) \
+ amap_get(ptr, \
+ offsetof(_struct, field)/32, \
+ amap_mask(sizeof(((_struct *)0)->field)), \
+ AMAP_BIT_OFFSET(_struct, field))
+
+#define be_dws_cpu_to_le(wrb, len) swap_dws(wrb, len)
+#define be_dws_le_to_cpu(wrb, len) swap_dws(wrb, len)
+static inline void swap_dws(void *wrb, int len)
+{
+#ifdef __BIG_ENDIAN
+ u32 *dw = wrb;
+ WARN_ON(len % 4);
+ do {
+ *dw = cpu_to_le32(*dw);
+ dw++;
+ len -= 4;
+ } while (len);
+#endif /* __BIG_ENDIAN */
+}
+#endif /* BEISCSI_H */
diff --git a/drivers/scsi/be2iscsi/be_cmds.c b/drivers/scsi/be2iscsi/be_cmds.c
new file mode 100644
index 000000000..447cf7ce6
--- /dev/null
+++ b/drivers/scsi/be2iscsi/be_cmds.c
@@ -0,0 +1,1443 @@
+/**
+ * Copyright (C) 2005 - 2015 Avago Technologies
+ * All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation. The full GNU General
+ * Public License is included in this distribution in the file called COPYING.
+ *
+ * Contact Information:
+ * linux-drivers@avagotech.com
+ *
+ * Avago Technologies
+ * 3333 Susan Street
+ * Costa Mesa, CA 92626
+ */
+
+#include <scsi/iscsi_proto.h>
+
+#include "be_main.h"
+#include "be.h"
+#include "be_mgmt.h"
+
+int beiscsi_pci_soft_reset(struct beiscsi_hba *phba)
+{
+ u32 sreset;
+ u8 *pci_reset_offset = 0;
+ u8 *pci_online0_offset = 0;
+ u8 *pci_online1_offset = 0;
+ u32 pconline0 = 0;
+ u32 pconline1 = 0;
+ u32 i;
+
+ pci_reset_offset = (u8 *)phba->pci_va + BE2_SOFT_RESET;
+ pci_online0_offset = (u8 *)phba->pci_va + BE2_PCI_ONLINE0;
+ pci_online1_offset = (u8 *)phba->pci_va + BE2_PCI_ONLINE1;
+ sreset = readl((void *)pci_reset_offset);
+ sreset |= BE2_SET_RESET;
+ writel(sreset, (void *)pci_reset_offset);
+
+ i = 0;
+ while (sreset & BE2_SET_RESET) {
+ if (i > 64)
+ break;
+ msleep(100);
+ sreset = readl((void *)pci_reset_offset);
+ i++;
+ }
+
+ if (sreset & BE2_SET_RESET) {
+ printk(KERN_ERR DRV_NAME
+ " Soft Reset did not deassert\n");
+ return -EIO;
+ }
+ pconline1 = BE2_MPU_IRAM_ONLINE;
+ writel(pconline0, (void *)pci_online0_offset);
+ writel(pconline1, (void *)pci_online1_offset);
+
+ sreset |= BE2_SET_RESET;
+ writel(sreset, (void *)pci_reset_offset);
+
+ i = 0;
+ while (sreset & BE2_SET_RESET) {
+ if (i > 64)
+ break;
+ msleep(1);
+ sreset = readl((void *)pci_reset_offset);
+ i++;
+ }
+ if (sreset & BE2_SET_RESET) {
+ printk(KERN_ERR DRV_NAME
+ " MPU Online Soft Reset did not deassert\n");
+ return -EIO;
+ }
+ return 0;
+}
+
+int be_chk_reset_complete(struct beiscsi_hba *phba)
+{
+ unsigned int num_loop;
+ u8 *mpu_sem = 0;
+ u32 status;
+
+ num_loop = 1000;
+ mpu_sem = (u8 *)phba->csr_va + MPU_EP_SEMAPHORE;
+ msleep(5000);
+
+ while (num_loop) {
+ status = readl((void *)mpu_sem);
+
+ if ((status & 0x80000000) || (status & 0x0000FFFF) == 0xC000)
+ break;
+ msleep(60);
+ num_loop--;
+ }
+
+ if ((status & 0x80000000) || (!num_loop)) {
+ beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
+ "BC_%d : Failed in be_chk_reset_complete"
+ "status = 0x%x\n", status);
+ return -EIO;
+ }
+
+ return 0;
+}
+
+void be_mcc_notify(struct beiscsi_hba *phba)
+{
+ struct be_queue_info *mccq = &phba->ctrl.mcc_obj.q;
+ u32 val = 0;
+
+ val |= mccq->id & DB_MCCQ_RING_ID_MASK;
+ val |= 1 << DB_MCCQ_NUM_POSTED_SHIFT;
+ iowrite32(val, phba->db_va + DB_MCCQ_OFFSET);
+}
+
+unsigned int alloc_mcc_tag(struct beiscsi_hba *phba)
+{
+ unsigned int tag = 0;
+
+ if (phba->ctrl.mcc_tag_available) {
+ tag = phba->ctrl.mcc_tag[phba->ctrl.mcc_alloc_index];
+ phba->ctrl.mcc_tag[phba->ctrl.mcc_alloc_index] = 0;
+ phba->ctrl.mcc_numtag[tag] = 0;
+ }
+ if (tag) {
+ phba->ctrl.mcc_tag_available--;
+ if (phba->ctrl.mcc_alloc_index == (MAX_MCC_CMD - 1))
+ phba->ctrl.mcc_alloc_index = 0;
+ else
+ phba->ctrl.mcc_alloc_index++;
+ }
+ return tag;
+}
+
+/*
+ * beiscsi_mccq_compl()- Wait for completion of MBX
+ * @phba: Driver private structure
+ * @tag: Tag for the MBX Command
+ * @wrb: the WRB used for the MBX Command
+ * @mbx_cmd_mem: ptr to memory allocated for MBX Cmd
+ *
+ * Waits for MBX completion with the passed TAG.
+ *
+ * return
+ * Success: 0
+ * Failure: Non-Zero
+ **/
+int beiscsi_mccq_compl(struct beiscsi_hba *phba,
+ uint32_t tag, struct be_mcc_wrb **wrb,
+ struct be_dma_mem *mbx_cmd_mem)
+{
+ int rc = 0;
+ uint32_t mcc_tag_response;
+ uint16_t status = 0, addl_status = 0, wrb_num = 0;
+ struct be_mcc_wrb *temp_wrb;
+ struct be_cmd_req_hdr *mbx_hdr;
+ struct be_cmd_resp_hdr *mbx_resp_hdr;
+ struct be_queue_info *mccq = &phba->ctrl.mcc_obj.q;
+
+ if (beiscsi_error(phba)) {
+ free_mcc_tag(&phba->ctrl, tag);
+ return -EPERM;
+ }
+
+ /* Set MBX Tag state to Active */
+ spin_lock(&phba->ctrl.mbox_lock);
+ phba->ctrl.ptag_state[tag].tag_state = MCC_TAG_STATE_RUNNING;
+ spin_unlock(&phba->ctrl.mbox_lock);
+
+ /* wait for the mccq completion */
+ rc = wait_event_interruptible_timeout(
+ phba->ctrl.mcc_wait[tag],
+ phba->ctrl.mcc_numtag[tag],
+ msecs_to_jiffies(
+ BEISCSI_HOST_MBX_TIMEOUT));
+
+ if (rc <= 0) {
+ struct be_dma_mem *tag_mem;
+ /* Set MBX Tag state to timeout */
+ spin_lock(&phba->ctrl.mbox_lock);
+ phba->ctrl.ptag_state[tag].tag_state = MCC_TAG_STATE_TIMEOUT;
+ spin_unlock(&phba->ctrl.mbox_lock);
+
+ /* Store resource addr to be freed later */
+ tag_mem = &phba->ctrl.ptag_state[tag].tag_mem_state;
+ if (mbx_cmd_mem) {
+ tag_mem->size = mbx_cmd_mem->size;
+ tag_mem->va = mbx_cmd_mem->va;
+ tag_mem->dma = mbx_cmd_mem->dma;
+ } else
+ tag_mem->size = 0;
+
+ beiscsi_log(phba, KERN_ERR,
+ BEISCSI_LOG_INIT | BEISCSI_LOG_EH |
+ BEISCSI_LOG_CONFIG,
+ "BC_%d : MBX Cmd Completion timed out\n");
+ return -EBUSY;
+ } else {
+ rc = 0;
+ /* Set MBX Tag state to completed */
+ spin_lock(&phba->ctrl.mbox_lock);
+ phba->ctrl.ptag_state[tag].tag_state = MCC_TAG_STATE_COMPLETED;
+ spin_unlock(&phba->ctrl.mbox_lock);
+ }
+
+ mcc_tag_response = phba->ctrl.mcc_numtag[tag];
+ status = (mcc_tag_response & CQE_STATUS_MASK);
+ addl_status = ((mcc_tag_response & CQE_STATUS_ADDL_MASK) >>
+ CQE_STATUS_ADDL_SHIFT);
+
+ if (mbx_cmd_mem) {
+ mbx_hdr = (struct be_cmd_req_hdr *)mbx_cmd_mem->va;
+ } else {
+ wrb_num = (mcc_tag_response & CQE_STATUS_WRB_MASK) >>
+ CQE_STATUS_WRB_SHIFT;
+ temp_wrb = (struct be_mcc_wrb *)queue_get_wrb(mccq, wrb_num);
+ mbx_hdr = embedded_payload(temp_wrb);
+
+ if (wrb)
+ *wrb = temp_wrb;
+ }
+
+ if (status || addl_status) {
+ beiscsi_log(phba, KERN_WARNING,
+ BEISCSI_LOG_INIT | BEISCSI_LOG_EH |
+ BEISCSI_LOG_CONFIG,
+ "BC_%d : MBX Cmd Failed for "
+ "Subsys : %d Opcode : %d with "
+ "Status : %d and Extd_Status : %d\n",
+ mbx_hdr->subsystem,
+ mbx_hdr->opcode,
+ status, addl_status);
+
+ if (status == MCC_STATUS_INSUFFICIENT_BUFFER) {
+ mbx_resp_hdr = (struct be_cmd_resp_hdr *) mbx_hdr;
+ beiscsi_log(phba, KERN_WARNING,
+ BEISCSI_LOG_INIT | BEISCSI_LOG_EH |
+ BEISCSI_LOG_CONFIG,
+ "BC_%d : Insufficient Buffer Error "
+ "Resp_Len : %d Actual_Resp_Len : %d\n",
+ mbx_resp_hdr->response_length,
+ mbx_resp_hdr->actual_resp_len);
+
+ rc = -EAGAIN;
+ goto release_mcc_tag;
+ }
+ rc = -EIO;
+ }
+
+release_mcc_tag:
+ /* Release the MCC entry */
+ free_mcc_tag(&phba->ctrl, tag);
+
+ return rc;
+}
+
+void free_mcc_tag(struct be_ctrl_info *ctrl, unsigned int tag)
+{
+ spin_lock(&ctrl->mbox_lock);
+ tag = tag & 0x000000FF;
+ ctrl->mcc_tag[ctrl->mcc_free_index] = tag;
+ if (ctrl->mcc_free_index == (MAX_MCC_CMD - 1))
+ ctrl->mcc_free_index = 0;
+ else
+ ctrl->mcc_free_index++;
+ ctrl->mcc_tag_available++;
+ spin_unlock(&ctrl->mbox_lock);
+}
+
+bool is_link_state_evt(u32 trailer)
+{
+ return (((trailer >> ASYNC_TRAILER_EVENT_CODE_SHIFT) &
+ ASYNC_TRAILER_EVENT_CODE_MASK) ==
+ ASYNC_EVENT_CODE_LINK_STATE);
+}
+
+static bool is_iscsi_evt(u32 trailer)
+{
+ return ((trailer >> ASYNC_TRAILER_EVENT_CODE_SHIFT) &
+ ASYNC_TRAILER_EVENT_CODE_MASK) ==
+ ASYNC_EVENT_CODE_ISCSI;
+}
+
+static int iscsi_evt_type(u32 trailer)
+{
+ return (trailer >> ASYNC_TRAILER_EVENT_TYPE_SHIFT) &
+ ASYNC_TRAILER_EVENT_TYPE_MASK;
+}
+
+static inline bool be_mcc_compl_is_new(struct be_mcc_compl *compl)
+{
+ if (compl->flags != 0) {
+ compl->flags = le32_to_cpu(compl->flags);
+ WARN_ON((compl->flags & CQE_FLAGS_VALID_MASK) == 0);
+ return true;
+ } else
+ return false;
+}
+
+static inline void be_mcc_compl_use(struct be_mcc_compl *compl)
+{
+ compl->flags = 0;
+}
+
+/*
+ * be_mcc_compl_process()- Check the MBX comapletion status
+ * @ctrl: Function specific MBX data structure
+ * @compl: Completion status of MBX Command
+ *
+ * Check for the MBX completion status when BMBX method used
+ *
+ * return
+ * Success: Zero
+ * Failure: Non-Zero
+ **/
+static int be_mcc_compl_process(struct be_ctrl_info *ctrl,
+ struct be_mcc_compl *compl)
+{
+ u16 compl_status, extd_status;
+ struct be_mcc_wrb *wrb = wrb_from_mbox(&ctrl->mbox_mem);
+ struct beiscsi_hba *phba = pci_get_drvdata(ctrl->pdev);
+ struct be_cmd_req_hdr *hdr = embedded_payload(wrb);
+ struct be_cmd_resp_hdr *resp_hdr;
+
+ be_dws_le_to_cpu(compl, 4);
+
+ compl_status = (compl->status >> CQE_STATUS_COMPL_SHIFT) &
+ CQE_STATUS_COMPL_MASK;
+ if (compl_status != MCC_STATUS_SUCCESS) {
+ extd_status = (compl->status >> CQE_STATUS_EXTD_SHIFT) &
+ CQE_STATUS_EXTD_MASK;
+
+ beiscsi_log(phba, KERN_ERR,
+ BEISCSI_LOG_CONFIG | BEISCSI_LOG_MBOX,
+ "BC_%d : error in cmd completion: "
+ "Subsystem : %d Opcode : %d "
+ "status(compl/extd)=%d/%d\n",
+ hdr->subsystem, hdr->opcode,
+ compl_status, extd_status);
+
+ if (compl_status == MCC_STATUS_INSUFFICIENT_BUFFER) {
+ resp_hdr = (struct be_cmd_resp_hdr *) hdr;
+ if (resp_hdr->response_length)
+ return 0;
+ }
+ return -EBUSY;
+ }
+ return 0;
+}
+
+int be_mcc_compl_process_isr(struct be_ctrl_info *ctrl,
+ struct be_mcc_compl *compl)
+{
+ struct beiscsi_hba *phba = pci_get_drvdata(ctrl->pdev);
+ u16 compl_status, extd_status;
+ unsigned short tag;
+
+ be_dws_le_to_cpu(compl, 4);
+
+ compl_status = (compl->status >> CQE_STATUS_COMPL_SHIFT) &
+ CQE_STATUS_COMPL_MASK;
+ /* The ctrl.mcc_numtag[tag] is filled with
+ * [31] = valid, [30:24] = Rsvd, [23:16] = wrb, [15:8] = extd_status,
+ * [7:0] = compl_status
+ */
+ tag = (compl->tag0 & 0x000000FF);
+ extd_status = (compl->status >> CQE_STATUS_EXTD_SHIFT) &
+ CQE_STATUS_EXTD_MASK;
+
+ ctrl->mcc_numtag[tag] = 0x80000000;
+ ctrl->mcc_numtag[tag] |= (compl->tag0 & 0x00FF0000);
+ ctrl->mcc_numtag[tag] |= (extd_status & 0x000000FF) << 8;
+ ctrl->mcc_numtag[tag] |= (compl_status & 0x000000FF);
+
+ if (ctrl->ptag_state[tag].tag_state == MCC_TAG_STATE_RUNNING) {
+ wake_up_interruptible(&ctrl->mcc_wait[tag]);
+ } else if (ctrl->ptag_state[tag].tag_state == MCC_TAG_STATE_TIMEOUT) {
+ struct be_dma_mem *tag_mem;
+ tag_mem = &ctrl->ptag_state[tag].tag_mem_state;
+
+ beiscsi_log(phba, KERN_WARNING,
+ BEISCSI_LOG_MBOX | BEISCSI_LOG_INIT |
+ BEISCSI_LOG_CONFIG,
+ "BC_%d : MBX Completion for timeout Command "
+ "from FW\n");
+ /* Check if memory needs to be freed */
+ if (tag_mem->size)
+ pci_free_consistent(ctrl->pdev, tag_mem->size,
+ tag_mem->va, tag_mem->dma);
+
+ /* Change tag state */
+ spin_lock(&phba->ctrl.mbox_lock);
+ ctrl->ptag_state[tag].tag_state = MCC_TAG_STATE_COMPLETED;
+ spin_unlock(&phba->ctrl.mbox_lock);
+
+ /* Free MCC Tag */
+ free_mcc_tag(ctrl, tag);
+ }
+
+ return 0;
+}
+
+static struct be_mcc_compl *be_mcc_compl_get(struct beiscsi_hba *phba)
+{
+ struct be_queue_info *mcc_cq = &phba->ctrl.mcc_obj.cq;
+ struct be_mcc_compl *compl = queue_tail_node(mcc_cq);
+
+ if (be_mcc_compl_is_new(compl)) {
+ queue_tail_inc(mcc_cq);
+ return compl;
+ }
+ return NULL;
+}
+
+/**
+ * be2iscsi_fail_session(): Closing session with appropriate error
+ * @cls_session: ptr to session
+ *
+ * Depending on adapter state appropriate error flag is passed.
+ **/
+void be2iscsi_fail_session(struct iscsi_cls_session *cls_session)
+{
+ struct Scsi_Host *shost = iscsi_session_to_shost(cls_session);
+ struct beiscsi_hba *phba = iscsi_host_priv(shost);
+ uint32_t iscsi_err_flag;
+
+ if (phba->state & BE_ADAPTER_STATE_SHUTDOWN)
+ iscsi_err_flag = ISCSI_ERR_INVALID_HOST;
+ else
+ iscsi_err_flag = ISCSI_ERR_CONN_FAILED;
+
+ iscsi_session_failure(cls_session->dd_data, ISCSI_ERR_CONN_FAILED);
+}
+
+void beiscsi_async_link_state_process(struct beiscsi_hba *phba,
+ struct be_async_event_link_state *evt)
+{
+ if ((evt->port_link_status == ASYNC_EVENT_LINK_DOWN) ||
+ ((evt->port_link_status & ASYNC_EVENT_LOGICAL) &&
+ (evt->port_fault != BEISCSI_PHY_LINK_FAULT_NONE))) {
+ phba->state = BE_ADAPTER_LINK_DOWN;
+
+ beiscsi_log(phba, KERN_ERR,
+ BEISCSI_LOG_CONFIG | BEISCSI_LOG_INIT,
+ "BC_%d : Link Down on Port %d\n",
+ evt->physical_port);
+
+ iscsi_host_for_each_session(phba->shost,
+ be2iscsi_fail_session);
+ } else if ((evt->port_link_status & ASYNC_EVENT_LINK_UP) ||
+ ((evt->port_link_status & ASYNC_EVENT_LOGICAL) &&
+ (evt->port_fault == BEISCSI_PHY_LINK_FAULT_NONE))) {
+ phba->state = BE_ADAPTER_LINK_UP | BE_ADAPTER_CHECK_BOOT;
+
+ beiscsi_log(phba, KERN_ERR,
+ BEISCSI_LOG_CONFIG | BEISCSI_LOG_INIT,
+ "BC_%d : Link UP on Port %d\n",
+ evt->physical_port);
+ }
+}
+
+int beiscsi_process_mcc(struct beiscsi_hba *phba)
+{
+ struct be_mcc_compl *compl;
+ int num = 0, status = 0;
+ struct be_ctrl_info *ctrl = &phba->ctrl;
+
+ spin_lock_bh(&phba->ctrl.mcc_cq_lock);
+ while ((compl = be_mcc_compl_get(phba))) {
+ if (compl->flags & CQE_FLAGS_ASYNC_MASK) {
+ /* Interpret flags as an async trailer */
+ if (is_link_state_evt(compl->flags))
+ /* Interpret compl as a async link evt */
+ beiscsi_async_link_state_process(phba,
+ (struct be_async_event_link_state *) compl);
+ else if (is_iscsi_evt(compl->flags)) {
+ switch (iscsi_evt_type(compl->flags)) {
+ case ASYNC_EVENT_NEW_ISCSI_TGT_DISC:
+ case ASYNC_EVENT_NEW_ISCSI_CONN:
+ case ASYNC_EVENT_NEW_TCP_CONN:
+ phba->state |= BE_ADAPTER_CHECK_BOOT;
+ beiscsi_log(phba, KERN_ERR,
+ BEISCSI_LOG_CONFIG |
+ BEISCSI_LOG_MBOX,
+ "BC_%d : Async iscsi Event,"
+ " flags handled = 0x%08x\n",
+ compl->flags);
+ break;
+ default:
+ beiscsi_log(phba, KERN_ERR,
+ BEISCSI_LOG_CONFIG |
+ BEISCSI_LOG_MBOX,
+ "BC_%d : Unsupported Async"
+ " Event, flags = 0x%08x\n",
+ compl->flags);
+ }
+ } else
+ beiscsi_log(phba, KERN_ERR,
+ BEISCSI_LOG_CONFIG |
+ BEISCSI_LOG_MBOX,
+ "BC_%d : Unsupported Async Event, flags"
+ " = 0x%08x\n", compl->flags);
+
+ } else if (compl->flags & CQE_FLAGS_COMPLETED_MASK) {
+ status = be_mcc_compl_process(ctrl, compl);
+ atomic_dec(&phba->ctrl.mcc_obj.q.used);
+ }
+ be_mcc_compl_use(compl);
+ num++;
+ }
+
+ if (num)
+ hwi_ring_cq_db(phba, phba->ctrl.mcc_obj.cq.id, num, 1, 0);
+
+ spin_unlock_bh(&phba->ctrl.mcc_cq_lock);
+ return status;
+}
+
+/*
+ * be_mcc_wait_compl()- Wait for MBX completion
+ * @phba: driver private structure
+ *
+ * Wait till no more pending mcc requests are present
+ *
+ * return
+ * Success: 0
+ * Failure: Non-Zero
+ *
+ **/
+static int be_mcc_wait_compl(struct beiscsi_hba *phba)
+{
+ int i, status;
+ for (i = 0; i < mcc_timeout; i++) {
+ if (beiscsi_error(phba))
+ return -EIO;
+
+ status = beiscsi_process_mcc(phba);
+ if (status)
+ return status;
+
+ if (atomic_read(&phba->ctrl.mcc_obj.q.used) == 0)
+ break;
+ udelay(100);
+ }
+ if (i == mcc_timeout) {
+ beiscsi_log(phba, KERN_ERR,
+ BEISCSI_LOG_CONFIG | BEISCSI_LOG_MBOX,
+ "BC_%d : FW Timed Out\n");
+ phba->fw_timeout = true;
+ beiscsi_ue_detect(phba);
+ return -EBUSY;
+ }
+ return 0;
+}
+
+/*
+ * be_mcc_notify_wait()- Notify and wait for Compl
+ * @phba: driver private structure
+ *
+ * Notify MCC requests and wait for completion
+ *
+ * return
+ * Success: 0
+ * Failure: Non-Zero
+ **/
+int be_mcc_notify_wait(struct beiscsi_hba *phba)
+{
+ be_mcc_notify(phba);
+ return be_mcc_wait_compl(phba);
+}
+
+/*
+ * be_mbox_db_ready_wait()- Check ready status
+ * @ctrl: Function specific MBX data structure
+ *
+ * Check for the ready status of FW to send BMBX
+ * commands to adapter.
+ *
+ * return
+ * Success: 0
+ * Failure: Non-Zero
+ **/
+static int be_mbox_db_ready_wait(struct be_ctrl_info *ctrl)
+{
+#define BEISCSI_MBX_RDY_BIT_TIMEOUT 4000 /* 4sec */
+ void __iomem *db = ctrl->db + MPU_MAILBOX_DB_OFFSET;
+ struct beiscsi_hba *phba = pci_get_drvdata(ctrl->pdev);
+ unsigned long timeout;
+ bool read_flag = false;
+ int ret = 0, i;
+ u32 ready;
+ DECLARE_WAIT_QUEUE_HEAD_ONSTACK(rdybit_check_q);
+
+ if (beiscsi_error(phba))
+ return -EIO;
+
+ timeout = jiffies + (HZ * 110);
+
+ do {
+ for (i = 0; i < BEISCSI_MBX_RDY_BIT_TIMEOUT; i++) {
+ ready = ioread32(db) & MPU_MAILBOX_DB_RDY_MASK;
+ if (ready) {
+ read_flag = true;
+ break;
+ }
+ mdelay(1);
+ }
+
+ if (!read_flag) {
+ wait_event_timeout(rdybit_check_q,
+ (read_flag != true),
+ HZ * 5);
+ }
+ } while ((time_before(jiffies, timeout)) && !read_flag);
+
+ if (!read_flag) {
+ beiscsi_log(phba, KERN_ERR,
+ BEISCSI_LOG_CONFIG | BEISCSI_LOG_MBOX,
+ "BC_%d : FW Timed Out\n");
+ phba->fw_timeout = true;
+ beiscsi_ue_detect(phba);
+ ret = -EBUSY;
+ }
+
+ return ret;
+}
+
+/*
+ * be_mbox_notify: Notify adapter of new BMBX command
+ * @ctrl: Function specific MBX data structure
+ *
+ * Ring doorbell to inform adapter of a BMBX command
+ * to process
+ *
+ * return
+ * Success: 0
+ * Failure: Non-Zero
+ **/
+int be_mbox_notify(struct be_ctrl_info *ctrl)
+{
+ int status;
+ u32 val = 0;
+ void __iomem *db = ctrl->db + MPU_MAILBOX_DB_OFFSET;
+ struct be_dma_mem *mbox_mem = &ctrl->mbox_mem;
+ struct be_mcc_mailbox *mbox = mbox_mem->va;
+ struct be_mcc_compl *compl = &mbox->compl;
+ struct beiscsi_hba *phba = pci_get_drvdata(ctrl->pdev);
+
+ status = be_mbox_db_ready_wait(ctrl);
+ if (status)
+ return status;
+
+ val &= ~MPU_MAILBOX_DB_RDY_MASK;
+ val |= MPU_MAILBOX_DB_HI_MASK;
+ val |= (upper_32_bits(mbox_mem->dma) >> 2) << 2;
+ iowrite32(val, db);
+
+ status = be_mbox_db_ready_wait(ctrl);
+ if (status)
+ return status;
+
+ val = 0;
+ val &= ~MPU_MAILBOX_DB_RDY_MASK;
+ val &= ~MPU_MAILBOX_DB_HI_MASK;
+ val |= (u32) (mbox_mem->dma >> 4) << 2;
+ iowrite32(val, db);
+
+ status = be_mbox_db_ready_wait(ctrl);
+ if (status)
+ return status;
+
+ if (be_mcc_compl_is_new(compl)) {
+ status = be_mcc_compl_process(ctrl, &mbox->compl);
+ be_mcc_compl_use(compl);
+ if (status) {
+ beiscsi_log(phba, KERN_ERR,
+ BEISCSI_LOG_CONFIG | BEISCSI_LOG_MBOX,
+ "BC_%d : After be_mcc_compl_process\n");
+
+ return status;
+ }
+ } else {
+ beiscsi_log(phba, KERN_ERR,
+ BEISCSI_LOG_CONFIG | BEISCSI_LOG_MBOX,
+ "BC_%d : Invalid Mailbox Completion\n");
+
+ return -EBUSY;
+ }
+ return 0;
+}
+
+/*
+ * Insert the mailbox address into the doorbell in two steps
+ * Polls on the mbox doorbell till a command completion (or a timeout) occurs
+ */
+static int be_mbox_notify_wait(struct beiscsi_hba *phba)
+{
+ int status;
+ u32 val = 0;
+ void __iomem *db = phba->ctrl.db + MPU_MAILBOX_DB_OFFSET;
+ struct be_dma_mem *mbox_mem = &phba->ctrl.mbox_mem;
+ struct be_mcc_mailbox *mbox = mbox_mem->va;
+ struct be_mcc_compl *compl = &mbox->compl;
+ struct be_ctrl_info *ctrl = &phba->ctrl;
+
+ status = be_mbox_db_ready_wait(ctrl);
+ if (status)
+ return status;
+
+ val |= MPU_MAILBOX_DB_HI_MASK;
+ /* at bits 2 - 31 place mbox dma addr msb bits 34 - 63 */
+ val |= (upper_32_bits(mbox_mem->dma) >> 2) << 2;
+ iowrite32(val, db);
+
+ /* wait for ready to be set */
+ status = be_mbox_db_ready_wait(ctrl);
+ if (status != 0)
+ return status;
+
+ val = 0;
+ /* at bits 2 - 31 place mbox dma addr lsb bits 4 - 33 */
+ val |= (u32)(mbox_mem->dma >> 4) << 2;
+ iowrite32(val, db);
+
+ status = be_mbox_db_ready_wait(ctrl);
+ if (status != 0)
+ return status;
+
+ /* A cq entry has been made now */
+ if (be_mcc_compl_is_new(compl)) {
+ status = be_mcc_compl_process(ctrl, &mbox->compl);
+ be_mcc_compl_use(compl);
+ if (status)
+ return status;
+ } else {
+ beiscsi_log(phba, KERN_ERR,
+ BEISCSI_LOG_CONFIG | BEISCSI_LOG_MBOX,
+ "BC_%d : invalid mailbox completion\n");
+
+ return -EBUSY;
+ }
+ return 0;
+}
+
+void be_wrb_hdr_prepare(struct be_mcc_wrb *wrb, int payload_len,
+ bool embedded, u8 sge_cnt)
+{
+ if (embedded)
+ wrb->embedded |= MCC_WRB_EMBEDDED_MASK;
+ else
+ wrb->embedded |= (sge_cnt & MCC_WRB_SGE_CNT_MASK) <<
+ MCC_WRB_SGE_CNT_SHIFT;
+ wrb->payload_length = payload_len;
+ be_dws_cpu_to_le(wrb, 8);
+}
+
+void be_cmd_hdr_prepare(struct be_cmd_req_hdr *req_hdr,
+ u8 subsystem, u8 opcode, int cmd_len)
+{
+ req_hdr->opcode = opcode;
+ req_hdr->subsystem = subsystem;
+ req_hdr->request_length = cpu_to_le32(cmd_len - sizeof(*req_hdr));
+ req_hdr->timeout = BEISCSI_FW_MBX_TIMEOUT;
+}
+
+static void be_cmd_page_addrs_prepare(struct phys_addr *pages, u32 max_pages,
+ struct be_dma_mem *mem)
+{
+ int i, buf_pages;
+ u64 dma = (u64) mem->dma;
+
+ buf_pages = min(PAGES_4K_SPANNED(mem->va, mem->size), max_pages);
+ for (i = 0; i < buf_pages; i++) {
+ pages[i].lo = cpu_to_le32(dma & 0xFFFFFFFF);
+ pages[i].hi = cpu_to_le32(upper_32_bits(dma));
+ dma += PAGE_SIZE_4K;
+ }
+}
+
+static u32 eq_delay_to_mult(u32 usec_delay)
+{
+#define MAX_INTR_RATE 651042
+ const u32 round = 10;
+ u32 multiplier;
+
+ if (usec_delay == 0)
+ multiplier = 0;
+ else {
+ u32 interrupt_rate = 1000000 / usec_delay;
+ if (interrupt_rate == 0)
+ multiplier = 1023;
+ else {
+ multiplier = (MAX_INTR_RATE - interrupt_rate) * round;
+ multiplier /= interrupt_rate;
+ multiplier = (multiplier + round / 2) / round;
+ multiplier = min(multiplier, (u32) 1023);
+ }
+ }
+ return multiplier;
+}
+
+struct be_mcc_wrb *wrb_from_mbox(struct be_dma_mem *mbox_mem)
+{
+ return &((struct be_mcc_mailbox *)(mbox_mem->va))->wrb;
+}
+
+struct be_mcc_wrb *wrb_from_mccq(struct beiscsi_hba *phba)
+{
+ struct be_queue_info *mccq = &phba->ctrl.mcc_obj.q;
+ struct be_mcc_wrb *wrb;
+
+ WARN_ON(atomic_read(&mccq->used) >= mccq->len);
+ wrb = queue_head_node(mccq);
+ memset(wrb, 0, sizeof(*wrb));
+ wrb->tag0 = (mccq->head & 0x000000FF) << 16;
+ queue_head_inc(mccq);
+ atomic_inc(&mccq->used);
+ return wrb;
+}
+
+
+int beiscsi_cmd_eq_create(struct be_ctrl_info *ctrl,
+ struct be_queue_info *eq, int eq_delay)
+{
+ struct be_mcc_wrb *wrb = wrb_from_mbox(&ctrl->mbox_mem);
+ struct be_cmd_req_eq_create *req = embedded_payload(wrb);
+ struct be_cmd_resp_eq_create *resp = embedded_payload(wrb);
+ struct be_dma_mem *q_mem = &eq->dma_mem;
+ int status;
+
+ spin_lock(&ctrl->mbox_lock);
+ memset(wrb, 0, sizeof(*wrb));
+
+ be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0);
+
+ be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
+ OPCODE_COMMON_EQ_CREATE, sizeof(*req));
+
+ req->num_pages = cpu_to_le16(PAGES_4K_SPANNED(q_mem->va, q_mem->size));
+
+ AMAP_SET_BITS(struct amap_eq_context, func, req->context,
+ PCI_FUNC(ctrl->pdev->devfn));
+ AMAP_SET_BITS(struct amap_eq_context, valid, req->context, 1);
+ AMAP_SET_BITS(struct amap_eq_context, size, req->context, 0);
+ AMAP_SET_BITS(struct amap_eq_context, count, req->context,
+ __ilog2_u32(eq->len / 256));
+ AMAP_SET_BITS(struct amap_eq_context, delaymult, req->context,
+ eq_delay_to_mult(eq_delay));
+ be_dws_cpu_to_le(req->context, sizeof(req->context));
+
+ be_cmd_page_addrs_prepare(req->pages, ARRAY_SIZE(req->pages), q_mem);
+
+ status = be_mbox_notify(ctrl);
+ if (!status) {
+ eq->id = le16_to_cpu(resp->eq_id);
+ eq->created = true;
+ }
+ spin_unlock(&ctrl->mbox_lock);
+ return status;
+}
+
+/**
+ * be_cmd_fw_initialize()- Initialize FW
+ * @ctrl: Pointer to function control structure
+ *
+ * Send FW initialize pattern for the function.
+ *
+ * return
+ * Success: 0
+ * Failure: Non-Zero value
+ **/
+int be_cmd_fw_initialize(struct be_ctrl_info *ctrl)
+{
+ struct be_mcc_wrb *wrb = wrb_from_mbox(&ctrl->mbox_mem);
+ struct beiscsi_hba *phba = pci_get_drvdata(ctrl->pdev);
+ int status;
+ u8 *endian_check;
+
+ spin_lock(&ctrl->mbox_lock);
+ memset(wrb, 0, sizeof(*wrb));
+
+ endian_check = (u8 *) wrb;
+ *endian_check++ = 0xFF;
+ *endian_check++ = 0x12;
+ *endian_check++ = 0x34;
+ *endian_check++ = 0xFF;
+ *endian_check++ = 0xFF;
+ *endian_check++ = 0x56;
+ *endian_check++ = 0x78;
+ *endian_check++ = 0xFF;
+ be_dws_cpu_to_le(wrb, sizeof(*wrb));
+
+ status = be_mbox_notify(ctrl);
+ if (status)
+ beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
+ "BC_%d : be_cmd_fw_initialize Failed\n");
+
+ spin_unlock(&ctrl->mbox_lock);
+ return status;
+}
+
+/**
+ * be_cmd_fw_uninit()- Uinitialize FW
+ * @ctrl: Pointer to function control structure
+ *
+ * Send FW uninitialize pattern for the function
+ *
+ * return
+ * Success: 0
+ * Failure: Non-Zero value
+ **/
+int be_cmd_fw_uninit(struct be_ctrl_info *ctrl)
+{
+ struct be_mcc_wrb *wrb = wrb_from_mbox(&ctrl->mbox_mem);
+ struct beiscsi_hba *phba = pci_get_drvdata(ctrl->pdev);
+ int status;
+ u8 *endian_check;
+
+ spin_lock(&ctrl->mbox_lock);
+ memset(wrb, 0, sizeof(*wrb));
+
+ endian_check = (u8 *) wrb;
+ *endian_check++ = 0xFF;
+ *endian_check++ = 0xAA;
+ *endian_check++ = 0xBB;
+ *endian_check++ = 0xFF;
+ *endian_check++ = 0xFF;
+ *endian_check++ = 0xCC;
+ *endian_check++ = 0xDD;
+ *endian_check = 0xFF;
+
+ be_dws_cpu_to_le(wrb, sizeof(*wrb));
+
+ status = be_mbox_notify(ctrl);
+ if (status)
+ beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
+ "BC_%d : be_cmd_fw_uninit Failed\n");
+
+ spin_unlock(&ctrl->mbox_lock);
+ return status;
+}
+
+int beiscsi_cmd_cq_create(struct be_ctrl_info *ctrl,
+ struct be_queue_info *cq, struct be_queue_info *eq,
+ bool sol_evts, bool no_delay, int coalesce_wm)
+{
+ struct be_mcc_wrb *wrb = wrb_from_mbox(&ctrl->mbox_mem);
+ struct be_cmd_req_cq_create *req = embedded_payload(wrb);
+ struct be_cmd_resp_cq_create *resp = embedded_payload(wrb);
+ struct beiscsi_hba *phba = pci_get_drvdata(ctrl->pdev);
+ struct be_dma_mem *q_mem = &cq->dma_mem;
+ void *ctxt = &req->context;
+ int status;
+
+ spin_lock(&ctrl->mbox_lock);
+ memset(wrb, 0, sizeof(*wrb));
+
+ be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0);
+
+ be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
+ OPCODE_COMMON_CQ_CREATE, sizeof(*req));
+
+ req->num_pages = cpu_to_le16(PAGES_4K_SPANNED(q_mem->va, q_mem->size));
+ if (is_chip_be2_be3r(phba)) {
+ AMAP_SET_BITS(struct amap_cq_context, coalescwm,
+ ctxt, coalesce_wm);
+ AMAP_SET_BITS(struct amap_cq_context, nodelay, ctxt, no_delay);
+ AMAP_SET_BITS(struct amap_cq_context, count, ctxt,
+ __ilog2_u32(cq->len / 256));
+ AMAP_SET_BITS(struct amap_cq_context, valid, ctxt, 1);
+ AMAP_SET_BITS(struct amap_cq_context, solevent, ctxt, sol_evts);
+ AMAP_SET_BITS(struct amap_cq_context, eventable, ctxt, 1);
+ AMAP_SET_BITS(struct amap_cq_context, eqid, ctxt, eq->id);
+ AMAP_SET_BITS(struct amap_cq_context, armed, ctxt, 1);
+ AMAP_SET_BITS(struct amap_cq_context, func, ctxt,
+ PCI_FUNC(ctrl->pdev->devfn));
+ } else {
+ req->hdr.version = MBX_CMD_VER2;
+ req->page_size = 1;
+ AMAP_SET_BITS(struct amap_cq_context_v2, coalescwm,
+ ctxt, coalesce_wm);
+ AMAP_SET_BITS(struct amap_cq_context_v2, nodelay,
+ ctxt, no_delay);
+ AMAP_SET_BITS(struct amap_cq_context_v2, count, ctxt,
+ __ilog2_u32(cq->len / 256));
+ AMAP_SET_BITS(struct amap_cq_context_v2, valid, ctxt, 1);
+ AMAP_SET_BITS(struct amap_cq_context_v2, eventable, ctxt, 1);
+ AMAP_SET_BITS(struct amap_cq_context_v2, eqid, ctxt, eq->id);
+ AMAP_SET_BITS(struct amap_cq_context_v2, armed, ctxt, 1);
+ }
+
+ be_dws_cpu_to_le(ctxt, sizeof(req->context));
+
+ be_cmd_page_addrs_prepare(req->pages, ARRAY_SIZE(req->pages), q_mem);
+
+ status = be_mbox_notify(ctrl);
+ if (!status) {
+ cq->id = le16_to_cpu(resp->cq_id);
+ cq->created = true;
+ } else
+ beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
+ "BC_%d : In be_cmd_cq_create, status=ox%08x\n",
+ status);
+
+ spin_unlock(&ctrl->mbox_lock);
+
+ return status;
+}
+
+static u32 be_encoded_q_len(int q_len)
+{
+ u32 len_encoded = fls(q_len); /* log2(len) + 1 */
+ if (len_encoded == 16)
+ len_encoded = 0;
+ return len_encoded;
+}
+
+int beiscsi_cmd_mccq_create(struct beiscsi_hba *phba,
+ struct be_queue_info *mccq,
+ struct be_queue_info *cq)
+{
+ struct be_mcc_wrb *wrb;
+ struct be_cmd_req_mcc_create *req;
+ struct be_dma_mem *q_mem = &mccq->dma_mem;
+ struct be_ctrl_info *ctrl;
+ void *ctxt;
+ int status;
+
+ spin_lock(&phba->ctrl.mbox_lock);
+ ctrl = &phba->ctrl;
+ wrb = wrb_from_mbox(&ctrl->mbox_mem);
+ memset(wrb, 0, sizeof(*wrb));
+ req = embedded_payload(wrb);
+ ctxt = &req->context;
+
+ be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0);
+
+ be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
+ OPCODE_COMMON_MCC_CREATE, sizeof(*req));
+
+ req->num_pages = PAGES_4K_SPANNED(q_mem->va, q_mem->size);
+
+ AMAP_SET_BITS(struct amap_mcc_context, fid, ctxt,
+ PCI_FUNC(phba->pcidev->devfn));
+ AMAP_SET_BITS(struct amap_mcc_context, valid, ctxt, 1);
+ AMAP_SET_BITS(struct amap_mcc_context, ring_size, ctxt,
+ be_encoded_q_len(mccq->len));
+ AMAP_SET_BITS(struct amap_mcc_context, cq_id, ctxt, cq->id);
+
+ be_dws_cpu_to_le(ctxt, sizeof(req->context));
+
+ be_cmd_page_addrs_prepare(req->pages, ARRAY_SIZE(req->pages), q_mem);
+
+ status = be_mbox_notify_wait(phba);
+ if (!status) {
+ struct be_cmd_resp_mcc_create *resp = embedded_payload(wrb);
+ mccq->id = le16_to_cpu(resp->id);
+ mccq->created = true;
+ }
+ spin_unlock(&phba->ctrl.mbox_lock);
+
+ return status;
+}
+
+int beiscsi_cmd_q_destroy(struct be_ctrl_info *ctrl, struct be_queue_info *q,
+ int queue_type)
+{
+ struct be_mcc_wrb *wrb = wrb_from_mbox(&ctrl->mbox_mem);
+ struct be_cmd_req_q_destroy *req = embedded_payload(wrb);
+ struct beiscsi_hba *phba = pci_get_drvdata(ctrl->pdev);
+ u8 subsys = 0, opcode = 0;
+ int status;
+
+ beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT,
+ "BC_%d : In beiscsi_cmd_q_destroy "
+ "queue_type : %d\n", queue_type);
+
+ spin_lock(&ctrl->mbox_lock);
+ memset(wrb, 0, sizeof(*wrb));
+ be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0);
+
+ switch (queue_type) {
+ case QTYPE_EQ:
+ subsys = CMD_SUBSYSTEM_COMMON;
+ opcode = OPCODE_COMMON_EQ_DESTROY;
+ break;
+ case QTYPE_CQ:
+ subsys = CMD_SUBSYSTEM_COMMON;
+ opcode = OPCODE_COMMON_CQ_DESTROY;
+ break;
+ case QTYPE_MCCQ:
+ subsys = CMD_SUBSYSTEM_COMMON;
+ opcode = OPCODE_COMMON_MCC_DESTROY;
+ break;
+ case QTYPE_WRBQ:
+ subsys = CMD_SUBSYSTEM_ISCSI;
+ opcode = OPCODE_COMMON_ISCSI_WRBQ_DESTROY;
+ break;
+ case QTYPE_DPDUQ:
+ subsys = CMD_SUBSYSTEM_ISCSI;
+ opcode = OPCODE_COMMON_ISCSI_DEFQ_DESTROY;
+ break;
+ case QTYPE_SGL:
+ subsys = CMD_SUBSYSTEM_ISCSI;
+ opcode = OPCODE_COMMON_ISCSI_CFG_REMOVE_SGL_PAGES;
+ break;
+ default:
+ spin_unlock(&ctrl->mbox_lock);
+ BUG();
+ return -ENXIO;
+ }
+ be_cmd_hdr_prepare(&req->hdr, subsys, opcode, sizeof(*req));
+ if (queue_type != QTYPE_SGL)
+ req->id = cpu_to_le16(q->id);
+
+ status = be_mbox_notify(ctrl);
+
+ spin_unlock(&ctrl->mbox_lock);
+ return status;
+}
+
+/**
+ * be_cmd_create_default_pdu_queue()- Create DEFQ for the adapter
+ * @ctrl: ptr to ctrl_info
+ * @cq: Completion Queue
+ * @dq: Default Queue
+ * @lenght: ring size
+ * @entry_size: size of each entry in DEFQ
+ * @is_header: Header or Data DEFQ
+ * @ulp_num: Bind to which ULP
+ *
+ * Create HDR/Data DEFQ for the passed ULP. Unsol PDU are posted
+ * on this queue by the FW
+ *
+ * return
+ * Success: 0
+ * Failure: Non-Zero Value
+ *
+ **/
+int be_cmd_create_default_pdu_queue(struct be_ctrl_info *ctrl,
+ struct be_queue_info *cq,
+ struct be_queue_info *dq, int length,
+ int entry_size, uint8_t is_header,
+ uint8_t ulp_num)
+{
+ struct be_mcc_wrb *wrb = wrb_from_mbox(&ctrl->mbox_mem);
+ struct be_defq_create_req *req = embedded_payload(wrb);
+ struct be_dma_mem *q_mem = &dq->dma_mem;
+ struct beiscsi_hba *phba = pci_get_drvdata(ctrl->pdev);
+ void *ctxt = &req->context;
+ int status;
+
+ spin_lock(&ctrl->mbox_lock);
+ memset(wrb, 0, sizeof(*wrb));
+
+ be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0);
+
+ be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ISCSI,
+ OPCODE_COMMON_ISCSI_DEFQ_CREATE, sizeof(*req));
+
+ req->num_pages = PAGES_4K_SPANNED(q_mem->va, q_mem->size);
+ if (phba->fw_config.dual_ulp_aware) {
+ req->ulp_num = ulp_num;
+ req->dua_feature |= (1 << BEISCSI_DUAL_ULP_AWARE_BIT);
+ req->dua_feature |= (1 << BEISCSI_BIND_Q_TO_ULP_BIT);
+ }
+
+ if (is_chip_be2_be3r(phba)) {
+ AMAP_SET_BITS(struct amap_be_default_pdu_context,
+ rx_pdid, ctxt, 0);
+ AMAP_SET_BITS(struct amap_be_default_pdu_context,
+ rx_pdid_valid, ctxt, 1);
+ AMAP_SET_BITS(struct amap_be_default_pdu_context,
+ pci_func_id, ctxt, PCI_FUNC(ctrl->pdev->devfn));
+ AMAP_SET_BITS(struct amap_be_default_pdu_context,
+ ring_size, ctxt,
+ be_encoded_q_len(length /
+ sizeof(struct phys_addr)));
+ AMAP_SET_BITS(struct amap_be_default_pdu_context,
+ default_buffer_size, ctxt, entry_size);
+ AMAP_SET_BITS(struct amap_be_default_pdu_context,
+ cq_id_recv, ctxt, cq->id);
+ } else {
+ AMAP_SET_BITS(struct amap_default_pdu_context_ext,
+ rx_pdid, ctxt, 0);
+ AMAP_SET_BITS(struct amap_default_pdu_context_ext,
+ rx_pdid_valid, ctxt, 1);
+ AMAP_SET_BITS(struct amap_default_pdu_context_ext,
+ ring_size, ctxt,
+ be_encoded_q_len(length /
+ sizeof(struct phys_addr)));
+ AMAP_SET_BITS(struct amap_default_pdu_context_ext,
+ default_buffer_size, ctxt, entry_size);
+ AMAP_SET_BITS(struct amap_default_pdu_context_ext,
+ cq_id_recv, ctxt, cq->id);
+ }
+
+ be_dws_cpu_to_le(ctxt, sizeof(req->context));
+
+ be_cmd_page_addrs_prepare(req->pages, ARRAY_SIZE(req->pages), q_mem);
+
+ status = be_mbox_notify(ctrl);
+ if (!status) {
+ struct be_ring *defq_ring;
+ struct be_defq_create_resp *resp = embedded_payload(wrb);
+
+ dq->id = le16_to_cpu(resp->id);
+ dq->created = true;
+ if (is_header)
+ defq_ring = &phba->phwi_ctrlr->default_pdu_hdr[ulp_num];
+ else
+ defq_ring = &phba->phwi_ctrlr->
+ default_pdu_data[ulp_num];
+
+ defq_ring->id = dq->id;
+
+ if (!phba->fw_config.dual_ulp_aware) {
+ defq_ring->ulp_num = BEISCSI_ULP0;
+ defq_ring->doorbell_offset = DB_RXULP0_OFFSET;
+ } else {
+ defq_ring->ulp_num = resp->ulp_num;
+ defq_ring->doorbell_offset = resp->doorbell_offset;
+ }
+ }
+ spin_unlock(&ctrl->mbox_lock);
+
+ return status;
+}
+
+/**
+ * be_cmd_wrbq_create()- Create WRBQ
+ * @ctrl: ptr to ctrl_info
+ * @q_mem: memory details for the queue
+ * @wrbq: queue info
+ * @pwrb_context: ptr to wrb_context
+ * @ulp_num: ULP on which the WRBQ is to be created
+ *
+ * Create WRBQ on the passed ULP_NUM.
+ *
+ **/
+int be_cmd_wrbq_create(struct be_ctrl_info *ctrl,
+ struct be_dma_mem *q_mem,
+ struct be_queue_info *wrbq,
+ struct hwi_wrb_context *pwrb_context,
+ uint8_t ulp_num)
+{
+ struct be_mcc_wrb *wrb = wrb_from_mbox(&ctrl->mbox_mem);
+ struct be_wrbq_create_req *req = embedded_payload(wrb);
+ struct be_wrbq_create_resp *resp = embedded_payload(wrb);
+ struct beiscsi_hba *phba = pci_get_drvdata(ctrl->pdev);
+ int status;
+
+ spin_lock(&ctrl->mbox_lock);
+ memset(wrb, 0, sizeof(*wrb));
+
+ be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0);
+
+ be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ISCSI,
+ OPCODE_COMMON_ISCSI_WRBQ_CREATE, sizeof(*req));
+ req->num_pages = PAGES_4K_SPANNED(q_mem->va, q_mem->size);
+
+ if (phba->fw_config.dual_ulp_aware) {
+ req->ulp_num = ulp_num;
+ req->dua_feature |= (1 << BEISCSI_DUAL_ULP_AWARE_BIT);
+ req->dua_feature |= (1 << BEISCSI_BIND_Q_TO_ULP_BIT);
+ }
+
+ be_cmd_page_addrs_prepare(req->pages, ARRAY_SIZE(req->pages), q_mem);
+
+ status = be_mbox_notify(ctrl);
+ if (!status) {
+ wrbq->id = le16_to_cpu(resp->cid);
+ wrbq->created = true;
+
+ pwrb_context->cid = wrbq->id;
+ if (!phba->fw_config.dual_ulp_aware) {
+ pwrb_context->doorbell_offset = DB_TXULP0_OFFSET;
+ pwrb_context->ulp_num = BEISCSI_ULP0;
+ } else {
+ pwrb_context->ulp_num = resp->ulp_num;
+ pwrb_context->doorbell_offset = resp->doorbell_offset;
+ }
+ }
+ spin_unlock(&ctrl->mbox_lock);
+ return status;
+}
+
+int be_cmd_iscsi_post_template_hdr(struct be_ctrl_info *ctrl,
+ struct be_dma_mem *q_mem)
+{
+ struct be_mcc_wrb *wrb = wrb_from_mbox(&ctrl->mbox_mem);
+ struct be_post_template_pages_req *req = embedded_payload(wrb);
+ int status;
+
+ spin_lock(&ctrl->mbox_lock);
+
+ memset(wrb, 0, sizeof(*wrb));
+ be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0);
+ be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
+ OPCODE_COMMON_ADD_TEMPLATE_HEADER_BUFFERS,
+ sizeof(*req));
+
+ req->num_pages = PAGES_4K_SPANNED(q_mem->va, q_mem->size);
+ req->type = BEISCSI_TEMPLATE_HDR_TYPE_ISCSI;
+ be_cmd_page_addrs_prepare(req->pages, ARRAY_SIZE(req->pages), q_mem);
+
+ status = be_mbox_notify(ctrl);
+ spin_unlock(&ctrl->mbox_lock);
+ return status;
+}
+
+int be_cmd_iscsi_remove_template_hdr(struct be_ctrl_info *ctrl)
+{
+ struct be_mcc_wrb *wrb = wrb_from_mbox(&ctrl->mbox_mem);
+ struct be_remove_template_pages_req *req = embedded_payload(wrb);
+ int status;
+
+ spin_lock(&ctrl->mbox_lock);
+
+ memset(wrb, 0, sizeof(*wrb));
+ be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0);
+ be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
+ OPCODE_COMMON_REMOVE_TEMPLATE_HEADER_BUFFERS,
+ sizeof(*req));
+
+ req->type = BEISCSI_TEMPLATE_HDR_TYPE_ISCSI;
+
+ status = be_mbox_notify(ctrl);
+ spin_unlock(&ctrl->mbox_lock);
+ return status;
+}
+
+int be_cmd_iscsi_post_sgl_pages(struct be_ctrl_info *ctrl,
+ struct be_dma_mem *q_mem,
+ u32 page_offset, u32 num_pages)
+{
+ struct be_mcc_wrb *wrb = wrb_from_mbox(&ctrl->mbox_mem);
+ struct be_post_sgl_pages_req *req = embedded_payload(wrb);
+ struct beiscsi_hba *phba = pci_get_drvdata(ctrl->pdev);
+ int status;
+ unsigned int curr_pages;
+ u32 internal_page_offset = 0;
+ u32 temp_num_pages = num_pages;
+
+ if (num_pages == 0xff)
+ num_pages = 1;
+
+ spin_lock(&ctrl->mbox_lock);
+ do {
+ memset(wrb, 0, sizeof(*wrb));
+ be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0);
+ be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ISCSI,
+ OPCODE_COMMON_ISCSI_CFG_POST_SGL_PAGES,
+ sizeof(*req));
+ curr_pages = BE_NUMBER_OF_FIELD(struct be_post_sgl_pages_req,
+ pages);
+ req->num_pages = min(num_pages, curr_pages);
+ req->page_offset = page_offset;
+ be_cmd_page_addrs_prepare(req->pages, req->num_pages, q_mem);
+ q_mem->dma = q_mem->dma + (req->num_pages * PAGE_SIZE);
+ internal_page_offset += req->num_pages;
+ page_offset += req->num_pages;
+ num_pages -= req->num_pages;
+
+ if (temp_num_pages == 0xff)
+ req->num_pages = temp_num_pages;
+
+ status = be_mbox_notify(ctrl);
+ if (status) {
+ beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
+ "BC_%d : FW CMD to map iscsi frags failed.\n");
+
+ goto error;
+ }
+ } while (num_pages > 0);
+error:
+ spin_unlock(&ctrl->mbox_lock);
+ if (status != 0)
+ beiscsi_cmd_q_destroy(ctrl, NULL, QTYPE_SGL);
+ return status;
+}
+
+int beiscsi_cmd_reset_function(struct beiscsi_hba *phba)
+{
+ struct be_ctrl_info *ctrl = &phba->ctrl;
+ struct be_mcc_wrb *wrb = wrb_from_mbox(&ctrl->mbox_mem);
+ struct be_post_sgl_pages_req *req = embedded_payload(wrb);
+ int status;
+
+ spin_lock(&ctrl->mbox_lock);
+
+ req = embedded_payload(wrb);
+ be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0);
+ be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
+ OPCODE_COMMON_FUNCTION_RESET, sizeof(*req));
+ status = be_mbox_notify_wait(phba);
+
+ spin_unlock(&ctrl->mbox_lock);
+ return status;
+}
+
+/**
+ * be_cmd_set_vlan()- Configure VLAN paramters on the adapter
+ * @phba: device priv structure instance
+ * @vlan_tag: TAG to be set
+ *
+ * Set the VLAN_TAG for the adapter or Disable VLAN on adapter
+ *
+ * returns
+ * TAG for the MBX Cmd
+ * **/
+int be_cmd_set_vlan(struct beiscsi_hba *phba,
+ uint16_t vlan_tag)
+{
+ unsigned int tag = 0;
+ struct be_mcc_wrb *wrb;
+ struct be_cmd_set_vlan_req *req;
+ struct be_ctrl_info *ctrl = &phba->ctrl;
+
+ spin_lock(&ctrl->mbox_lock);
+ tag = alloc_mcc_tag(phba);
+ if (!tag) {
+ spin_unlock(&ctrl->mbox_lock);
+ return tag;
+ }
+
+ wrb = wrb_from_mccq(phba);
+ req = embedded_payload(wrb);
+ wrb->tag0 |= tag;
+ be_wrb_hdr_prepare(wrb, sizeof(*wrb), true, 0);
+ be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ISCSI,
+ OPCODE_COMMON_ISCSI_NTWK_SET_VLAN,
+ sizeof(*req));
+
+ req->interface_hndl = phba->interface_handle;
+ req->vlan_priority = vlan_tag;
+
+ be_mcc_notify(phba);
+ spin_unlock(&ctrl->mbox_lock);
+
+ return tag;
+}
diff --git a/drivers/scsi/be2iscsi/be_cmds.h b/drivers/scsi/be2iscsi/be_cmds.h
new file mode 100644
index 000000000..f11d325fe
--- /dev/null
+++ b/drivers/scsi/be2iscsi/be_cmds.h
@@ -0,0 +1,1359 @@
+/**
+ * Copyright (C) 2005 - 2015 Avago Technologies
+ * All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation. The full GNU General
+ * Public License is included in this distribution in the file called COPYING.
+ *
+ * Contact Information:
+ * linux-drivers@avagotech.com
+ *
+ * Avago Technologies
+ * 3333 Susan Street
+ * Costa Mesa, CA 92626
+ */
+
+#ifndef BEISCSI_CMDS_H
+#define BEISCSI_CMDS_H
+
+/**
+ * The driver sends configuration and managements command requests to the
+ * firmware in the BE. These requests are communicated to the processor
+ * using Work Request Blocks (WRBs) submitted to the MCC-WRB ring or via one
+ * WRB inside a MAILBOX.
+ * The commands are serviced by the ARM processor in the OneConnect's MPU.
+ */
+struct be_sge {
+ __le32 pa_lo;
+ __le32 pa_hi;
+ __le32 len;
+};
+
+#define MCC_WRB_SGE_CNT_SHIFT 3 /* bits 3 - 7 of dword 0 */
+#define MCC_WRB_SGE_CNT_MASK 0x1F /* bits 3 - 7 of dword 0 */
+struct be_mcc_wrb {
+ u32 embedded; /* dword 0 */
+ u32 payload_length; /* dword 1 */
+ u32 tag0; /* dword 2 */
+ u32 tag1; /* dword 3 */
+ u32 rsvd; /* dword 4 */
+ union {
+#define EMBED_MBX_MAX_PAYLOAD_SIZE 220
+ u8 embedded_payload[236]; /* used by embedded cmds */
+ struct be_sge sgl[19]; /* used by non-embedded cmds */
+ } payload;
+};
+
+#define CQE_FLAGS_VALID_MASK (1 << 31)
+#define CQE_FLAGS_ASYNC_MASK (1 << 30)
+#define CQE_FLAGS_COMPLETED_MASK (1 << 28)
+#define CQE_FLAGS_CONSUMED_MASK (1 << 27)
+
+/* Completion Status */
+#define MCC_STATUS_SUCCESS 0x0
+#define MCC_STATUS_FAILED 0x1
+#define MCC_STATUS_ILLEGAL_REQUEST 0x2
+#define MCC_STATUS_ILLEGAL_FIELD 0x3
+#define MCC_STATUS_INSUFFICIENT_BUFFER 0x4
+
+#define CQE_STATUS_COMPL_MASK 0xFFFF
+#define CQE_STATUS_COMPL_SHIFT 0 /* bits 0 - 15 */
+#define CQE_STATUS_EXTD_MASK 0xFFFF
+#define CQE_STATUS_EXTD_SHIFT 16 /* bits 0 - 15 */
+#define CQE_STATUS_ADDL_MASK 0xFF00
+#define CQE_STATUS_MASK 0xFF
+#define CQE_STATUS_ADDL_SHIFT 0x08
+#define CQE_STATUS_WRB_MASK 0xFF0000
+#define CQE_STATUS_WRB_SHIFT 16
+#define BEISCSI_HOST_MBX_TIMEOUT (110 * 1000)
+#define BEISCSI_FW_MBX_TIMEOUT 100
+
+/* MBOX Command VER */
+#define MBX_CMD_VER1 0x01
+#define MBX_CMD_VER2 0x02
+
+struct be_mcc_compl {
+ u32 status; /* dword 0 */
+ u32 tag0; /* dword 1 */
+ u32 tag1; /* dword 2 */
+ u32 flags; /* dword 3 */
+};
+
+/********* Mailbox door bell *************/
+/**
+ * Used for driver communication with the FW.
+ * The software must write this register twice to post any command. First,
+ * it writes the register with hi=1 and the upper bits of the physical address
+ * for the MAILBOX structure. Software must poll the ready bit until this
+ * is acknowledged. Then, sotware writes the register with hi=0 with the lower
+ * bits in the address. It must poll the ready bit until the command is
+ * complete. Upon completion, the MAILBOX will contain a valid completion
+ * queue entry.
+ */
+#define MPU_MAILBOX_DB_OFFSET 0x160
+#define MPU_MAILBOX_DB_RDY_MASK 0x1 /* bit 0 */
+#define MPU_MAILBOX_DB_HI_MASK 0x2 /* bit 1 */
+
+/********** MPU semphore ******************/
+#define MPU_EP_SEMAPHORE_OFFSET 0xac
+#define EP_SEMAPHORE_POST_STAGE_MASK 0x0000FFFF
+#define EP_SEMAPHORE_POST_ERR_MASK 0x1
+#define EP_SEMAPHORE_POST_ERR_SHIFT 31
+
+/********** MCC door bell ************/
+#define DB_MCCQ_OFFSET 0x140
+#define DB_MCCQ_RING_ID_MASK 0xFFFF /* bits 0 - 15 */
+/* Number of entries posted */
+#define DB_MCCQ_NUM_POSTED_SHIFT 16 /* bits 16 - 29 */
+
+/* MPU semphore POST stage values */
+#define POST_STAGE_ARMFW_RDY 0xc000 /* FW is done with POST */
+
+/**
+ * When the async bit of mcc_compl is set, the last 4 bytes of
+ * mcc_compl is interpreted as follows:
+ */
+#define ASYNC_TRAILER_EVENT_CODE_SHIFT 8 /* bits 8 - 15 */
+#define ASYNC_TRAILER_EVENT_CODE_MASK 0xFF
+#define ASYNC_EVENT_CODE_LINK_STATE 0x1
+#define ASYNC_EVENT_CODE_ISCSI 0x4
+
+#define ASYNC_TRAILER_EVENT_TYPE_SHIFT 16 /* bits 16 - 23 */
+#define ASYNC_TRAILER_EVENT_TYPE_MASK 0xF
+#define ASYNC_EVENT_NEW_ISCSI_TGT_DISC 0x4
+#define ASYNC_EVENT_NEW_ISCSI_CONN 0x5
+#define ASYNC_EVENT_NEW_TCP_CONN 0x7
+
+struct be_async_event_trailer {
+ u32 code;
+};
+
+enum {
+ ASYNC_EVENT_LINK_DOWN = 0x0,
+ ASYNC_EVENT_LINK_UP = 0x1,
+ ASYNC_EVENT_LOGICAL = 0x2
+};
+
+/**
+ * When the event code of an async trailer is link-state, the mcc_compl
+ * must be interpreted as follows
+ */
+struct be_async_event_link_state {
+ u8 physical_port;
+ u8 port_link_status;
+ u8 port_duplex;
+ u8 port_speed;
+#define BEISCSI_PHY_LINK_FAULT_NONE 0x00
+#define BEISCSI_PHY_LINK_FAULT_LOCAL 0x01
+#define BEISCSI_PHY_LINK_FAULT_REMOTE 0x02
+ u8 port_fault;
+ u8 rsvd0[7];
+ struct be_async_event_trailer trailer;
+} __packed;
+
+struct be_mcc_mailbox {
+ struct be_mcc_wrb wrb;
+ struct be_mcc_compl compl;
+};
+
+/* Type of subsystems supported by FW */
+#define CMD_SUBSYSTEM_COMMON 0x1
+#define CMD_SUBSYSTEM_ISCSI 0x2
+#define CMD_SUBSYSTEM_ETH 0x3
+#define CMD_SUBSYSTEM_ISCSI_INI 0x6
+#define CMD_COMMON_TCP_UPLOAD 0x1
+
+/**
+ * List of common opcodes subsystem CMD_SUBSYSTEM_COMMON
+ * These opcodes are unique for each subsystem defined above
+ */
+#define OPCODE_COMMON_CQ_CREATE 12
+#define OPCODE_COMMON_EQ_CREATE 13
+#define OPCODE_COMMON_MCC_CREATE 21
+#define OPCODE_COMMON_ADD_TEMPLATE_HEADER_BUFFERS 24
+#define OPCODE_COMMON_REMOVE_TEMPLATE_HEADER_BUFFERS 25
+#define OPCODE_COMMON_GET_CNTL_ATTRIBUTES 32
+#define OPCODE_COMMON_GET_FW_VERSION 35
+#define OPCODE_COMMON_MODIFY_EQ_DELAY 41
+#define OPCODE_COMMON_FIRMWARE_CONFIG 42
+#define OPCODE_COMMON_MCC_DESTROY 53
+#define OPCODE_COMMON_CQ_DESTROY 54
+#define OPCODE_COMMON_EQ_DESTROY 55
+#define OPCODE_COMMON_QUERY_FIRMWARE_CONFIG 58
+#define OPCODE_COMMON_FUNCTION_RESET 61
+
+/**
+ * LIST of opcodes that are common between Initiator and Target
+ * used by CMD_SUBSYSTEM_ISCSI
+ * These opcodes are unique for each subsystem defined above
+ */
+#define OPCODE_COMMON_ISCSI_CFG_POST_SGL_PAGES 2
+#define OPCODE_COMMON_ISCSI_CFG_REMOVE_SGL_PAGES 3
+#define OPCODE_COMMON_ISCSI_NTWK_GET_NIC_CONFIG 7
+#define OPCODE_COMMON_ISCSI_NTWK_SET_VLAN 14
+#define OPCODE_COMMON_ISCSI_NTWK_CONFIG_STATELESS_IP_ADDR 17
+#define OPCODE_COMMON_ISCSI_NTWK_REL_STATELESS_IP_ADDR 18
+#define OPCODE_COMMON_ISCSI_NTWK_MODIFY_IP_ADDR 21
+#define OPCODE_COMMON_ISCSI_NTWK_GET_DEFAULT_GATEWAY 22
+#define OPCODE_COMMON_ISCSI_NTWK_MODIFY_DEFAULT_GATEWAY 23
+#define OPCODE_COMMON_ISCSI_NTWK_GET_ALL_IF_ID 24
+#define OPCODE_COMMON_ISCSI_NTWK_GET_IF_INFO 25
+#define OPCODE_COMMON_ISCSI_SET_FRAGNUM_BITS_FOR_SGL_CRA 61
+#define OPCODE_COMMON_ISCSI_DEFQ_CREATE 64
+#define OPCODE_COMMON_ISCSI_DEFQ_DESTROY 65
+#define OPCODE_COMMON_ISCSI_WRBQ_CREATE 66
+#define OPCODE_COMMON_ISCSI_WRBQ_DESTROY 67
+
+struct be_cmd_req_hdr {
+ u8 opcode; /* dword 0 */
+ u8 subsystem; /* dword 0 */
+ u8 port_number; /* dword 0 */
+ u8 domain; /* dword 0 */
+ u32 timeout; /* dword 1 */
+ u32 request_length; /* dword 2 */
+ u8 version; /* dword 3 */
+ u8 rsvd0[3]; /* dword 3 */
+};
+
+struct be_cmd_resp_hdr {
+ u32 info; /* dword 0 */
+ u32 status; /* dword 1 */
+ u32 response_length; /* dword 2 */
+ u32 actual_resp_len; /* dword 3 */
+};
+
+struct phys_addr {
+ u32 lo;
+ u32 hi;
+};
+
+struct virt_addr {
+ u32 lo;
+ u32 hi;
+};
+/**************************
+ * BE Command definitions *
+ **************************/
+
+/**
+ * Pseudo amap definition in which each bit of the actual structure is defined
+ * as a byte - used to calculate offset/shift/mask of each field
+ */
+struct amap_eq_context {
+ u8 cidx[13]; /* dword 0 */
+ u8 rsvd0[3]; /* dword 0 */
+ u8 epidx[13]; /* dword 0 */
+ u8 valid; /* dword 0 */
+ u8 rsvd1; /* dword 0 */
+ u8 size; /* dword 0 */
+ u8 pidx[13]; /* dword 1 */
+ u8 rsvd2[3]; /* dword 1 */
+ u8 pd[10]; /* dword 1 */
+ u8 count[3]; /* dword 1 */
+ u8 solevent; /* dword 1 */
+ u8 stalled; /* dword 1 */
+ u8 armed; /* dword 1 */
+ u8 rsvd3[4]; /* dword 2 */
+ u8 func[8]; /* dword 2 */
+ u8 rsvd4; /* dword 2 */
+ u8 delaymult[10]; /* dword 2 */
+ u8 rsvd5[2]; /* dword 2 */
+ u8 phase[2]; /* dword 2 */
+ u8 nodelay; /* dword 2 */
+ u8 rsvd6[4]; /* dword 2 */
+ u8 rsvd7[32]; /* dword 3 */
+} __packed;
+
+struct be_cmd_req_eq_create {
+ struct be_cmd_req_hdr hdr; /* dw[4] */
+ u16 num_pages; /* sword */
+ u16 rsvd0; /* sword */
+ u8 context[sizeof(struct amap_eq_context) / 8]; /* dw[4] */
+ struct phys_addr pages[8];
+} __packed;
+
+struct be_cmd_resp_eq_create {
+ struct be_cmd_resp_hdr resp_hdr;
+ u16 eq_id; /* sword */
+ u16 rsvd0; /* sword */
+} __packed;
+
+struct be_set_eqd {
+ u32 eq_id;
+ u32 phase;
+ u32 delay_multiplier;
+} __packed;
+
+struct mgmt_chap_format {
+ u32 flags;
+ u8 intr_chap_name[256];
+ u8 intr_secret[16];
+ u8 target_chap_name[256];
+ u8 target_secret[16];
+ u16 intr_chap_name_length;
+ u16 intr_secret_length;
+ u16 target_chap_name_length;
+ u16 target_secret_length;
+} __packed;
+
+struct mgmt_auth_method_format {
+ u8 auth_method_type;
+ u8 padding[3];
+ struct mgmt_chap_format chap;
+} __packed;
+
+struct mgmt_conn_login_options {
+ u8 flags;
+ u8 header_digest;
+ u8 data_digest;
+ u8 rsvd0;
+ u32 max_recv_datasegment_len_ini;
+ u32 max_recv_datasegment_len_tgt;
+ u32 tcp_mss;
+ u32 tcp_window_size;
+ struct mgmt_auth_method_format auth_data;
+} __packed;
+
+struct ip_addr_format {
+ u16 size_of_structure;
+ u8 reserved;
+ u8 ip_type;
+ u8 addr[16];
+ u32 rsvd0;
+} __packed;
+
+struct mgmt_conn_info {
+ u32 connection_handle;
+ u32 connection_status;
+ u16 src_port;
+ u16 dest_port;
+ u16 dest_port_redirected;
+ u16 cid;
+ u32 estimated_throughput;
+ struct ip_addr_format src_ipaddr;
+ struct ip_addr_format dest_ipaddr;
+ struct ip_addr_format dest_ipaddr_redirected;
+ struct mgmt_conn_login_options negotiated_login_options;
+} __packed;
+
+struct mgmt_session_login_options {
+ u8 flags;
+ u8 error_recovery_level;
+ u16 rsvd0;
+ u32 first_burst_length;
+ u32 max_burst_length;
+ u16 max_connections;
+ u16 max_outstanding_r2t;
+ u16 default_time2wait;
+ u16 default_time2retain;
+} __packed;
+
+struct mgmt_session_info {
+ u32 session_handle;
+ u32 status;
+ u8 isid[6];
+ u16 tsih;
+ u32 session_flags;
+ u16 conn_count;
+ u16 pad;
+ u8 target_name[224];
+ u8 initiator_iscsiname[224];
+ struct mgmt_session_login_options negotiated_login_options;
+ struct mgmt_conn_info conn_list[1];
+} __packed;
+
+struct be_cmd_get_session_req {
+ struct be_cmd_req_hdr hdr;
+ u32 session_handle;
+} __packed;
+
+struct be_cmd_get_session_resp {
+ struct be_cmd_resp_hdr hdr;
+ struct mgmt_session_info session_info;
+} __packed;
+
+struct mac_addr {
+ u16 size_of_structure;
+ u8 addr[ETH_ALEN];
+} __packed;
+
+struct be_cmd_get_boot_target_req {
+ struct be_cmd_req_hdr hdr;
+} __packed;
+
+struct be_cmd_get_boot_target_resp {
+ struct be_cmd_resp_hdr hdr;
+ u32 boot_session_count;
+ int boot_session_handle;
+};
+
+struct be_cmd_reopen_session_req {
+ struct be_cmd_req_hdr hdr;
+#define BE_REOPEN_ALL_SESSIONS 0x00
+#define BE_REOPEN_BOOT_SESSIONS 0x01
+#define BE_REOPEN_A_SESSION 0x02
+ u16 reopen_type;
+ u16 rsvd;
+ u32 session_handle;
+} __packed;
+
+struct be_cmd_reopen_session_resp {
+ struct be_cmd_resp_hdr hdr;
+ u32 rsvd;
+ u32 session_handle;
+} __packed;
+
+
+struct be_cmd_mac_query_req {
+ struct be_cmd_req_hdr hdr;
+ u8 type;
+ u8 permanent;
+ u16 if_id;
+} __packed;
+
+struct be_cmd_get_mac_resp {
+ struct be_cmd_resp_hdr hdr;
+ struct mac_addr mac;
+};
+
+struct be_ip_addr_subnet_format {
+ u16 size_of_structure;
+ u8 ip_type;
+ u8 ipv6_prefix_length;
+ u8 addr[16];
+ u8 subnet_mask[16];
+ u32 rsvd0;
+} __packed;
+
+struct be_cmd_get_if_info_req {
+ struct be_cmd_req_hdr hdr;
+ u32 interface_hndl;
+ u32 ip_type;
+} __packed;
+
+struct be_cmd_get_if_info_resp {
+ struct be_cmd_req_hdr hdr;
+ u32 interface_hndl;
+ u32 vlan_priority;
+ u32 ip_addr_count;
+ u32 dhcp_state;
+ struct be_ip_addr_subnet_format ip_addr;
+} __packed;
+
+struct be_ip_addr_record {
+ u32 action;
+ u32 interface_hndl;
+ struct be_ip_addr_subnet_format ip_addr;
+ u32 status;
+} __packed;
+
+struct be_ip_addr_record_params {
+ u32 record_entry_count;
+ struct be_ip_addr_record ip_record;
+} __packed;
+
+struct be_cmd_set_ip_addr_req {
+ struct be_cmd_req_hdr hdr;
+ struct be_ip_addr_record_params ip_params;
+} __packed;
+
+
+struct be_cmd_set_dhcp_req {
+ struct be_cmd_req_hdr hdr;
+ u32 interface_hndl;
+ u32 ip_type;
+ u32 flags;
+ u32 retry_count;
+} __packed;
+
+struct be_cmd_rel_dhcp_req {
+ struct be_cmd_req_hdr hdr;
+ u32 interface_hndl;
+ u32 ip_type;
+} __packed;
+
+struct be_cmd_set_def_gateway_req {
+ struct be_cmd_req_hdr hdr;
+ u32 action;
+ struct ip_addr_format ip_addr;
+} __packed;
+
+struct be_cmd_get_def_gateway_req {
+ struct be_cmd_req_hdr hdr;
+ u32 ip_type;
+} __packed;
+
+struct be_cmd_get_def_gateway_resp {
+ struct be_cmd_req_hdr hdr;
+ struct ip_addr_format ip_addr;
+} __packed;
+
+#define BEISCSI_VLAN_DISABLE 0xFFFF
+struct be_cmd_set_vlan_req {
+ struct be_cmd_req_hdr hdr;
+ u32 interface_hndl;
+ u32 vlan_priority;
+} __packed;
+/******************** Create CQ ***************************/
+/**
+ * Pseudo amap definition in which each bit of the actual structure is defined
+ * as a byte - used to calculate offset/shift/mask of each field
+ */
+struct amap_cq_context {
+ u8 cidx[11]; /* dword 0 */
+ u8 rsvd0; /* dword 0 */
+ u8 coalescwm[2]; /* dword 0 */
+ u8 nodelay; /* dword 0 */
+ u8 epidx[11]; /* dword 0 */
+ u8 rsvd1; /* dword 0 */
+ u8 count[2]; /* dword 0 */
+ u8 valid; /* dword 0 */
+ u8 solevent; /* dword 0 */
+ u8 eventable; /* dword 0 */
+ u8 pidx[11]; /* dword 1 */
+ u8 rsvd2; /* dword 1 */
+ u8 pd[10]; /* dword 1 */
+ u8 eqid[8]; /* dword 1 */
+ u8 stalled; /* dword 1 */
+ u8 armed; /* dword 1 */
+ u8 rsvd3[4]; /* dword 2 */
+ u8 func[8]; /* dword 2 */
+ u8 rsvd4[20]; /* dword 2 */
+ u8 rsvd5[32]; /* dword 3 */
+} __packed;
+
+struct amap_cq_context_v2 {
+ u8 rsvd0[12]; /* dword 0 */
+ u8 coalescwm[2]; /* dword 0 */
+ u8 nodelay; /* dword 0 */
+ u8 rsvd1[12]; /* dword 0 */
+ u8 count[2]; /* dword 0 */
+ u8 valid; /* dword 0 */
+ u8 rsvd2; /* dword 0 */
+ u8 eventable; /* dword 0 */
+ u8 eqid[16]; /* dword 1 */
+ u8 rsvd3[15]; /* dword 1 */
+ u8 armed; /* dword 1 */
+ u8 cqecount[16];/* dword 2 */
+ u8 rsvd4[16]; /* dword 2 */
+ u8 rsvd5[32]; /* dword 3 */
+};
+
+struct be_cmd_req_cq_create {
+ struct be_cmd_req_hdr hdr;
+ u16 num_pages;
+ u8 page_size;
+ u8 rsvd0;
+ u8 context[sizeof(struct amap_cq_context) / 8];
+ struct phys_addr pages[4];
+} __packed;
+
+struct be_cmd_resp_cq_create {
+ struct be_cmd_resp_hdr hdr;
+ u16 cq_id;
+ u16 rsvd0;
+} __packed;
+
+/******************** Create MCCQ ***************************/
+/**
+ * Pseudo amap definition in which each bit of the actual structure is defined
+ * as a byte - used to calculate offset/shift/mask of each field
+ */
+struct amap_mcc_context {
+ u8 con_index[14];
+ u8 rsvd0[2];
+ u8 ring_size[4];
+ u8 fetch_wrb;
+ u8 fetch_r2t;
+ u8 cq_id[10];
+ u8 prod_index[14];
+ u8 fid[8];
+ u8 pdid[9];
+ u8 valid;
+ u8 rsvd1[32];
+ u8 rsvd2[32];
+} __packed;
+
+struct be_cmd_req_mcc_create {
+ struct be_cmd_req_hdr hdr;
+ u16 num_pages;
+ u16 rsvd0;
+ u8 context[sizeof(struct amap_mcc_context) / 8];
+ struct phys_addr pages[8];
+} __packed;
+
+struct be_cmd_resp_mcc_create {
+ struct be_cmd_resp_hdr hdr;
+ u16 id;
+ u16 rsvd0;
+} __packed;
+
+/******************** Q Destroy ***************************/
+/* Type of Queue to be destroyed */
+enum {
+ QTYPE_EQ = 1,
+ QTYPE_CQ,
+ QTYPE_MCCQ,
+ QTYPE_WRBQ,
+ QTYPE_DPDUQ,
+ QTYPE_SGL
+};
+
+struct be_cmd_req_q_destroy {
+ struct be_cmd_req_hdr hdr;
+ u16 id;
+ u16 bypass_flush; /* valid only for rx q destroy */
+} __packed;
+
+struct macaddr {
+ u8 byte[ETH_ALEN];
+};
+
+struct be_cmd_req_mcast_mac_config {
+ struct be_cmd_req_hdr hdr;
+ u16 num_mac;
+ u8 promiscuous;
+ u8 interface_id;
+ struct macaddr mac[32];
+} __packed;
+
+static inline void *embedded_payload(struct be_mcc_wrb *wrb)
+{
+ return wrb->payload.embedded_payload;
+}
+
+static inline struct be_sge *nonembedded_sgl(struct be_mcc_wrb *wrb)
+{
+ return &wrb->payload.sgl[0];
+}
+
+/******************** Modify EQ Delay *******************/
+struct be_cmd_req_modify_eq_delay {
+ struct be_cmd_req_hdr hdr;
+ __le32 num_eq;
+ struct {
+ __le32 eq_id;
+ __le32 phase;
+ __le32 delay_multiplier;
+ } delay[MAX_CPUS];
+} __packed;
+
+/******************** Get MAC ADDR *******************/
+
+#define ETH_ALEN 6
+
+struct be_cmd_get_nic_conf_req {
+ struct be_cmd_req_hdr hdr;
+ u32 nic_port_count;
+ u32 speed;
+ u32 max_speed;
+ u32 link_state;
+ u32 max_frame_size;
+ u16 size_of_structure;
+ u8 mac_address[ETH_ALEN];
+ u32 rsvd[23];
+};
+
+struct be_cmd_get_nic_conf_resp {
+ struct be_cmd_resp_hdr hdr;
+ u32 nic_port_count;
+ u32 speed;
+ u32 max_speed;
+ u32 link_state;
+ u32 max_frame_size;
+ u16 size_of_structure;
+ u8 mac_address[6];
+ u32 rsvd[23];
+};
+
+#define BEISCSI_ALIAS_LEN 32
+
+struct be_cmd_hba_name {
+ struct be_cmd_req_hdr hdr;
+ u16 flags;
+ u16 rsvd0;
+ u8 initiator_name[ISCSI_NAME_LEN];
+ u8 initiator_alias[BEISCSI_ALIAS_LEN];
+} __packed;
+
+struct be_cmd_ntwk_link_status_req {
+ struct be_cmd_req_hdr hdr;
+ u32 rsvd0;
+} __packed;
+
+/*** Port Speed Values ***/
+#define BE2ISCSI_LINK_SPEED_ZERO 0x00
+#define BE2ISCSI_LINK_SPEED_10MBPS 0x01
+#define BE2ISCSI_LINK_SPEED_100MBPS 0x02
+#define BE2ISCSI_LINK_SPEED_1GBPS 0x03
+#define BE2ISCSI_LINK_SPEED_10GBPS 0x04
+struct be_cmd_ntwk_link_status_resp {
+ struct be_cmd_resp_hdr hdr;
+ u8 phys_port;
+ u8 mac_duplex;
+ u8 mac_speed;
+ u8 mac_fault;
+ u8 mgmt_mac_duplex;
+ u8 mgmt_mac_speed;
+ u16 qos_link_speed;
+ u32 logical_link_speed;
+} __packed;
+
+int beiscsi_cmd_eq_create(struct be_ctrl_info *ctrl,
+ struct be_queue_info *eq, int eq_delay);
+
+int beiscsi_cmd_cq_create(struct be_ctrl_info *ctrl,
+ struct be_queue_info *cq, struct be_queue_info *eq,
+ bool sol_evts, bool no_delay,
+ int num_cqe_dma_coalesce);
+
+int beiscsi_cmd_q_destroy(struct be_ctrl_info *ctrl, struct be_queue_info *q,
+ int type);
+int beiscsi_cmd_mccq_create(struct beiscsi_hba *phba,
+ struct be_queue_info *mccq,
+ struct be_queue_info *cq);
+
+int be_poll_mcc(struct be_ctrl_info *ctrl);
+int mgmt_check_supported_fw(struct be_ctrl_info *ctrl,
+ struct beiscsi_hba *phba);
+unsigned int be_cmd_get_initname(struct beiscsi_hba *phba);
+unsigned int be_cmd_get_port_speed(struct beiscsi_hba *phba);
+
+void free_mcc_tag(struct be_ctrl_info *ctrl, unsigned int tag);
+
+int be_cmd_modify_eq_delay(struct beiscsi_hba *phba, struct be_set_eqd *,
+ int num);
+int beiscsi_mccq_compl(struct beiscsi_hba *phba,
+ uint32_t tag, struct be_mcc_wrb **wrb,
+ struct be_dma_mem *mbx_cmd_mem);
+/*ISCSI Functuions */
+int be_cmd_fw_initialize(struct be_ctrl_info *ctrl);
+int be_cmd_fw_uninit(struct be_ctrl_info *ctrl);
+
+struct be_mcc_wrb *wrb_from_mbox(struct be_dma_mem *mbox_mem);
+struct be_mcc_wrb *wrb_from_mccq(struct beiscsi_hba *phba);
+int be_mcc_notify_wait(struct beiscsi_hba *phba);
+void be_mcc_notify(struct beiscsi_hba *phba);
+unsigned int alloc_mcc_tag(struct beiscsi_hba *phba);
+void beiscsi_async_link_state_process(struct beiscsi_hba *phba,
+ struct be_async_event_link_state *evt);
+int be_mcc_compl_process_isr(struct be_ctrl_info *ctrl,
+ struct be_mcc_compl *compl);
+
+int be_mbox_notify(struct be_ctrl_info *ctrl);
+
+int be_cmd_create_default_pdu_queue(struct be_ctrl_info *ctrl,
+ struct be_queue_info *cq,
+ struct be_queue_info *dq, int length,
+ int entry_size, uint8_t is_header,
+ uint8_t ulp_num);
+
+int be_cmd_iscsi_post_template_hdr(struct be_ctrl_info *ctrl,
+ struct be_dma_mem *q_mem);
+
+int be_cmd_iscsi_remove_template_hdr(struct be_ctrl_info *ctrl);
+
+int be_cmd_iscsi_post_sgl_pages(struct be_ctrl_info *ctrl,
+ struct be_dma_mem *q_mem, u32 page_offset,
+ u32 num_pages);
+
+int beiscsi_cmd_reset_function(struct beiscsi_hba *phba);
+
+int be_cmd_wrbq_create(struct be_ctrl_info *ctrl, struct be_dma_mem *q_mem,
+ struct be_queue_info *wrbq,
+ struct hwi_wrb_context *pwrb_context,
+ uint8_t ulp_num);
+
+bool is_link_state_evt(u32 trailer);
+
+/* Configuration Functions */
+int be_cmd_set_vlan(struct beiscsi_hba *phba, uint16_t vlan_tag);
+
+struct be_default_pdu_context {
+ u32 dw[4];
+} __packed;
+
+struct amap_be_default_pdu_context {
+ u8 dbuf_cindex[13]; /* dword 0 */
+ u8 rsvd0[3]; /* dword 0 */
+ u8 ring_size[4]; /* dword 0 */
+ u8 ring_state[4]; /* dword 0 */
+ u8 rsvd1[8]; /* dword 0 */
+ u8 dbuf_pindex[13]; /* dword 1 */
+ u8 rsvd2; /* dword 1 */
+ u8 pci_func_id[8]; /* dword 1 */
+ u8 rx_pdid[9]; /* dword 1 */
+ u8 rx_pdid_valid; /* dword 1 */
+ u8 default_buffer_size[16]; /* dword 2 */
+ u8 cq_id_recv[10]; /* dword 2 */
+ u8 rx_pdid_not_valid; /* dword 2 */
+ u8 rsvd3[5]; /* dword 2 */
+ u8 rsvd4[32]; /* dword 3 */
+} __packed;
+
+struct amap_default_pdu_context_ext {
+ u8 rsvd0[16]; /* dword 0 */
+ u8 ring_size[4]; /* dword 0 */
+ u8 rsvd1[12]; /* dword 0 */
+ u8 rsvd2[22]; /* dword 1 */
+ u8 rx_pdid[9]; /* dword 1 */
+ u8 rx_pdid_valid; /* dword 1 */
+ u8 default_buffer_size[16]; /* dword 2 */
+ u8 cq_id_recv[16]; /* dword 2 */
+ u8 rsvd3[32]; /* dword 3 */
+} __packed;
+
+struct be_defq_create_req {
+ struct be_cmd_req_hdr hdr;
+ u16 num_pages;
+ u8 ulp_num;
+#define BEISCSI_DUAL_ULP_AWARE_BIT 0 /* Byte 3 - Bit 0 */
+#define BEISCSI_BIND_Q_TO_ULP_BIT 1 /* Byte 3 - Bit 1 */
+ u8 dua_feature;
+ struct be_default_pdu_context context;
+ struct phys_addr pages[8];
+} __packed;
+
+struct be_defq_create_resp {
+ struct be_cmd_req_hdr hdr;
+ u16 id;
+ u8 rsvd0;
+ u8 ulp_num;
+ u32 doorbell_offset;
+ u16 register_set;
+ u16 doorbell_format;
+} __packed;
+
+struct be_post_template_pages_req {
+ struct be_cmd_req_hdr hdr;
+ u16 num_pages;
+#define BEISCSI_TEMPLATE_HDR_TYPE_ISCSI 0x1
+ u16 type;
+ struct phys_addr scratch_pa;
+ struct virt_addr scratch_va;
+ struct virt_addr pages_va;
+ struct phys_addr pages[16];
+} __packed;
+
+struct be_remove_template_pages_req {
+ struct be_cmd_req_hdr hdr;
+ u16 type;
+ u16 rsvd0;
+} __packed;
+
+struct be_post_sgl_pages_req {
+ struct be_cmd_req_hdr hdr;
+ u16 num_pages;
+ u16 page_offset;
+ u32 rsvd0;
+ struct phys_addr pages[26];
+ u32 rsvd1;
+} __packed;
+
+struct be_wrbq_create_req {
+ struct be_cmd_req_hdr hdr;
+ u16 num_pages;
+ u8 ulp_num;
+ u8 dua_feature;
+ struct phys_addr pages[8];
+} __packed;
+
+struct be_wrbq_create_resp {
+ struct be_cmd_resp_hdr resp_hdr;
+ u16 cid;
+ u8 rsvd0;
+ u8 ulp_num;
+ u32 doorbell_offset;
+ u16 register_set;
+ u16 doorbell_format;
+} __packed;
+
+#define SOL_CID_MASK 0x0000FFC0
+#define SOL_CODE_MASK 0x0000003F
+#define SOL_WRB_INDEX_MASK 0x00FF0000
+#define SOL_CMD_WND_MASK 0xFF000000
+#define SOL_RES_CNT_MASK 0x7FFFFFFF
+#define SOL_EXP_CMD_SN_MASK 0xFFFFFFFF
+#define SOL_HW_STS_MASK 0x000000FF
+#define SOL_STS_MASK 0x0000FF00
+#define SOL_RESP_MASK 0x00FF0000
+#define SOL_FLAGS_MASK 0x7F000000
+#define SOL_S_MASK 0x80000000
+
+struct sol_cqe {
+ u32 dw[4];
+};
+
+struct amap_sol_cqe {
+ u8 hw_sts[8]; /* dword 0 */
+ u8 i_sts[8]; /* dword 0 */
+ u8 i_resp[8]; /* dword 0 */
+ u8 i_flags[7]; /* dword 0 */
+ u8 s; /* dword 0 */
+ u8 i_exp_cmd_sn[32]; /* dword 1 */
+ u8 code[6]; /* dword 2 */
+ u8 cid[10]; /* dword 2 */
+ u8 wrb_index[8]; /* dword 2 */
+ u8 i_cmd_wnd[8]; /* dword 2 */
+ u8 i_res_cnt[31]; /* dword 3 */
+ u8 valid; /* dword 3 */
+} __packed;
+
+#define SOL_ICD_INDEX_MASK 0x0003FFC0
+struct amap_sol_cqe_ring {
+ u8 hw_sts[8]; /* dword 0 */
+ u8 i_sts[8]; /* dword 0 */
+ u8 i_resp[8]; /* dword 0 */
+ u8 i_flags[7]; /* dword 0 */
+ u8 s; /* dword 0 */
+ u8 i_exp_cmd_sn[32]; /* dword 1 */
+ u8 code[6]; /* dword 2 */
+ u8 icd_index[12]; /* dword 2 */
+ u8 rsvd[6]; /* dword 2 */
+ u8 i_cmd_wnd[8]; /* dword 2 */
+ u8 i_res_cnt[31]; /* dword 3 */
+ u8 valid; /* dword 3 */
+} __packed;
+
+struct amap_sol_cqe_v2 {
+ u8 hw_sts[8]; /* dword 0 */
+ u8 i_sts[8]; /* dword 0 */
+ u8 wrb_index[16]; /* dword 0 */
+ u8 i_exp_cmd_sn[32]; /* dword 1 */
+ u8 code[6]; /* dword 2 */
+ u8 cmd_cmpl; /* dword 2 */
+ u8 rsvd0; /* dword 2 */
+ u8 i_cmd_wnd[8]; /* dword 2 */
+ u8 cid[13]; /* dword 2 */
+ u8 u; /* dword 2 */
+ u8 o; /* dword 2 */
+ u8 s; /* dword 2 */
+ u8 i_res_cnt[31]; /* dword 3 */
+ u8 valid; /* dword 3 */
+} __packed;
+
+struct common_sol_cqe {
+ u32 exp_cmdsn;
+ u32 res_cnt;
+ u16 wrb_index;
+ u16 cid;
+ u8 hw_sts;
+ u8 cmd_wnd;
+ u8 res_flag; /* the s feild of structure */
+ u8 i_resp; /* for skh if cmd_complete is set then i_sts is response */
+ u8 i_flags; /* for skh or the u and o feilds */
+ u8 i_sts; /* for skh if cmd_complete is not-set then i_sts is status */
+};
+
+/*** iSCSI ack/driver message completions ***/
+struct amap_it_dmsg_cqe {
+ u8 ack_num[32]; /* DWORD 0 */
+ u8 pdu_bytes_rcvd[32]; /* DWORD 1 */
+ u8 code[6]; /* DWORD 2 */
+ u8 cid[10]; /* DWORD 2 */
+ u8 wrb_idx[8]; /* DWORD 2 */
+ u8 rsvd0[8]; /* DWORD 2*/
+ u8 rsvd1[31]; /* DWORD 3*/
+ u8 valid; /* DWORD 3 */
+} __packed;
+
+struct amap_it_dmsg_cqe_v2 {
+ u8 ack_num[32]; /* DWORD 0 */
+ u8 pdu_bytes_rcvd[32]; /* DWORD 1 */
+ u8 code[6]; /* DWORD 2 */
+ u8 rsvd0[10]; /* DWORD 2 */
+ u8 wrb_idx[16]; /* DWORD 2 */
+ u8 rsvd1[16]; /* DWORD 3 */
+ u8 cid[13]; /* DWORD 3 */
+ u8 rsvd2[2]; /* DWORD 3 */
+ u8 valid; /* DWORD 3 */
+} __packed;
+
+
+/**
+ * Post WRB Queue Doorbell Register used by the host Storage
+ * stack to notify the
+ * controller of a posted Work Request Block
+ */
+#define DB_WRB_POST_CID_MASK 0xFFFF /* bits 0 - 16 */
+#define DB_DEF_PDU_WRB_INDEX_MASK 0xFF /* bits 0 - 9 */
+
+#define DB_DEF_PDU_WRB_INDEX_SHIFT 16
+#define DB_DEF_PDU_NUM_POSTED_SHIFT 24
+
+struct fragnum_bits_for_sgl_cra_in {
+ struct be_cmd_req_hdr hdr;
+ u32 num_bits;
+} __packed;
+
+struct iscsi_cleanup_req {
+ struct be_cmd_req_hdr hdr;
+ u16 chute;
+ u8 hdr_ring_id;
+ u8 data_ring_id;
+
+} __packed;
+
+struct eq_delay {
+ u32 eq_id;
+ u32 phase;
+ u32 delay_multiplier;
+} __packed;
+
+struct be_eq_delay_params_in {
+ struct be_cmd_req_hdr hdr;
+ u32 num_eq;
+ struct eq_delay delay[8];
+} __packed;
+
+struct tcp_connect_and_offload_in {
+ struct be_cmd_req_hdr hdr;
+ struct ip_addr_format ip_address;
+ u16 tcp_port;
+ u16 cid;
+ u16 cq_id;
+ u16 defq_id;
+ struct phys_addr dataout_template_pa;
+ u16 hdr_ring_id;
+ u16 data_ring_id;
+ u8 do_offload;
+ u8 rsvd0[3];
+} __packed;
+
+struct tcp_connect_and_offload_in_v1 {
+ struct be_cmd_req_hdr hdr;
+ struct ip_addr_format ip_address;
+ u16 tcp_port;
+ u16 cid;
+ u16 cq_id;
+ u16 defq_id;
+ struct phys_addr dataout_template_pa;
+ u16 hdr_ring_id;
+ u16 data_ring_id;
+ u8 do_offload;
+ u8 ifd_state;
+ u8 rsvd0[2];
+ u16 tcp_window_size;
+ u8 tcp_window_scale_count;
+ u8 rsvd1;
+ u32 tcp_mss:24;
+ u8 rsvd2;
+} __packed;
+
+struct tcp_connect_and_offload_out {
+ struct be_cmd_resp_hdr hdr;
+ u32 connection_handle;
+ u16 cid;
+ u16 rsvd0;
+
+} __packed;
+
+struct be_mcc_wrb_context {
+ struct MCC_WRB *wrb;
+ int *users_final_status;
+} __packed;
+
+#define DB_DEF_PDU_RING_ID_MASK 0x3FFF /* bits 0 - 13 */
+#define DB_DEF_PDU_CQPROC_MASK 0x3FFF /* bits 16 - 29 */
+#define DB_DEF_PDU_REARM_SHIFT 14
+#define DB_DEF_PDU_EVENT_SHIFT 15
+#define DB_DEF_PDU_CQPROC_SHIFT 16
+
+struct dmsg_cqe {
+ u32 dw[4];
+} __packed;
+
+struct tcp_upload_params_in {
+ struct be_cmd_req_hdr hdr;
+ u16 id;
+ u16 upload_type;
+ u32 reset_seq;
+} __packed;
+
+struct tcp_upload_params_out {
+ u32 dw[32];
+} __packed;
+
+union tcp_upload_params {
+ struct tcp_upload_params_in request;
+ struct tcp_upload_params_out response;
+} __packed;
+
+struct be_ulp_fw_cfg {
+#define BEISCSI_ULP_ISCSI_INI_MODE 0x10
+ u32 ulp_mode;
+ u32 etx_base;
+ u32 etx_count;
+ u32 sq_base;
+ u32 sq_count;
+ u32 rq_base;
+ u32 rq_count;
+ u32 dq_base;
+ u32 dq_count;
+ u32 lro_base;
+ u32 lro_count;
+ u32 icd_base;
+ u32 icd_count;
+};
+
+struct be_ulp_chain_icd {
+ u32 chain_base;
+ u32 chain_count;
+};
+
+struct be_fw_cfg {
+ struct be_cmd_req_hdr hdr;
+ u32 be_config_number;
+ u32 asic_revision;
+ u32 phys_port;
+#define BEISCSI_FUNC_ISCSI_INI_MODE 0x10
+#define BEISCSI_FUNC_DUA_MODE 0x800
+ u32 function_mode;
+ struct be_ulp_fw_cfg ulp[2];
+ u32 function_caps;
+ u32 cqid_base;
+ u32 cqid_count;
+ u32 eqid_base;
+ u32 eqid_count;
+ struct be_ulp_chain_icd chain_icd[2];
+} __packed;
+
+struct be_cmd_get_all_if_id_req {
+ struct be_cmd_req_hdr hdr;
+ u32 if_count;
+ u32 if_hndl_list[1];
+} __packed;
+
+#define ISCSI_OPCODE_SCSI_DATA_OUT 5
+#define OPCODE_COMMON_NTWK_LINK_STATUS_QUERY 5
+#define OPCODE_COMMON_MODIFY_EQ_DELAY 41
+#define OPCODE_COMMON_ISCSI_CLEANUP 59
+#define OPCODE_COMMON_TCP_UPLOAD 56
+#define OPCODE_COMMON_ISCSI_TCP_CONNECT_AND_OFFLOAD 70
+#define OPCODE_COMMON_ISCSI_ERROR_RECOVERY_INVALIDATE_COMMANDS 1
+#define OPCODE_ISCSI_INI_CFG_GET_HBA_NAME 6
+#define OPCODE_ISCSI_INI_CFG_SET_HBA_NAME 7
+#define OPCODE_ISCSI_INI_SESSION_GET_A_SESSION 14
+#define OPCODE_ISCSI_INI_DRIVER_REOPEN_ALL_SESSIONS 36
+#define OPCODE_ISCSI_INI_DRIVER_OFFLOAD_SESSION 41
+#define OPCODE_ISCSI_INI_DRIVER_INVALIDATE_CONNECTION 42
+#define OPCODE_ISCSI_INI_BOOT_GET_BOOT_TARGET 52
+#define OPCODE_COMMON_WRITE_FLASH 96
+#define OPCODE_COMMON_READ_FLASH 97
+
+/* --- CMD_ISCSI_INVALIDATE_CONNECTION_TYPE --- */
+#define CMD_ISCSI_COMMAND_INVALIDATE 1
+#define CMD_ISCSI_CONNECTION_INVALIDATE 0x8001
+#define CMD_ISCSI_CONNECTION_ISSUE_TCP_RST 0x8002
+
+#define INI_WR_CMD 1 /* Initiator write command */
+#define INI_TMF_CMD 2 /* Initiator TMF command */
+#define INI_NOPOUT_CMD 3 /* Initiator; Send a NOP-OUT */
+#define INI_RD_CMD 5 /* Initiator requesting to send
+ * a read command
+ */
+#define TGT_CTX_UPDT_CMD 7 /* Target context update */
+#define TGT_STS_CMD 8 /* Target R2T and other BHS
+ * where only the status number
+ * need to be updated
+ */
+#define TGT_DATAIN_CMD 9 /* Target Data-Ins in response
+ * to read command
+ */
+#define TGT_SOS_PDU 10 /* Target:standalone status
+ * response
+ */
+#define TGT_DM_CMD 11 /* Indicates that the bhs
+ * preparedby
+ * driver should not be touched
+ */
+/* --- CMD_CHUTE_TYPE --- */
+#define CMD_CONNECTION_CHUTE_0 1
+#define CMD_CONNECTION_CHUTE_1 2
+#define CMD_CONNECTION_CHUTE_2 3
+
+#define EQ_MAJOR_CODE_COMPLETION 0
+
+#define CMD_ISCSI_SESSION_DEL_CFG_FROM_FLASH 0
+#define CMD_ISCSI_SESSION_SAVE_CFG_ON_FLASH 1
+
+/* --- CONNECTION_UPLOAD_PARAMS --- */
+/* These parameters are used to define the type of upload desired. */
+#define CONNECTION_UPLOAD_GRACEFUL 1 /* Graceful upload */
+#define CONNECTION_UPLOAD_ABORT_RESET 2 /* Abortive upload with
+ * reset
+ */
+#define CONNECTION_UPLOAD_ABORT 3 /* Abortive upload without
+ * reset
+ */
+#define CONNECTION_UPLOAD_ABORT_WITH_SEQ 4 /* Abortive upload with reset,
+ * sequence number by driver */
+
+/* Returns the number of items in the field array. */
+#define BE_NUMBER_OF_FIELD(_type_, _field_) \
+ (FIELD_SIZEOF(_type_, _field_)/sizeof((((_type_ *)0)->_field_[0])))\
+
+/**
+ * Different types of iSCSI completions to host driver for both initiator
+ * and taget mode
+ * of operation.
+ */
+#define SOL_CMD_COMPLETE 1 /* Solicited command completed
+ * normally
+ */
+#define SOL_CMD_KILLED_DATA_DIGEST_ERR 2 /* Solicited command got
+ * invalidated internally due
+ * to Data Digest error
+ */
+#define CXN_KILLED_PDU_SIZE_EXCEEDS_DSL 3 /* Connection got invalidated
+ * internally
+ * due to a received PDU
+ * size > DSL
+ */
+#define CXN_KILLED_BURST_LEN_MISMATCH 4 /* Connection got invalidated
+ * internally due ti received
+ * PDU sequence size >
+ * FBL/MBL.
+ */
+#define CXN_KILLED_AHS_RCVD 5 /* Connection got invalidated
+ * internally due to a received
+ * PDU Hdr that has
+ * AHS */
+#define CXN_KILLED_HDR_DIGEST_ERR 6 /* Connection got invalidated
+ * internally due to Hdr Digest
+ * error
+ */
+#define CXN_KILLED_UNKNOWN_HDR 7 /* Connection got invalidated
+ * internally
+ * due to a bad opcode in the
+ * pdu hdr
+ */
+#define CXN_KILLED_STALE_ITT_TTT_RCVD 8 /* Connection got invalidated
+ * internally due to a received
+ * ITT/TTT that does not belong
+ * to this Connection
+ */
+#define CXN_KILLED_INVALID_ITT_TTT_RCVD 9 /* Connection got invalidated
+ * internally due to received
+ * ITT/TTT value > Max
+ * Supported ITTs/TTTs
+ */
+#define CXN_KILLED_RST_RCVD 10 /* Connection got invalidated
+ * internally due to an
+ * incoming TCP RST
+ */
+#define CXN_KILLED_TIMED_OUT 11 /* Connection got invalidated
+ * internally due to timeout on
+ * tcp segment 12 retransmit
+ * attempts failed
+ */
+#define CXN_KILLED_RST_SENT 12 /* Connection got invalidated
+ * internally due to TCP RST
+ * sent by the Tx side
+ */
+#define CXN_KILLED_FIN_RCVD 13 /* Connection got invalidated
+ * internally due to an
+ * incoming TCP FIN.
+ */
+#define CXN_KILLED_BAD_UNSOL_PDU_RCVD 14 /* Connection got invalidated
+ * internally due to bad
+ * unsolicited PDU Unsolicited
+ * PDUs are PDUs with
+ * ITT=0xffffffff
+ */
+#define CXN_KILLED_BAD_WRB_INDEX_ERROR 15 /* Connection got invalidated
+ * internally due to bad WRB
+ * index.
+ */
+#define CXN_KILLED_OVER_RUN_RESIDUAL 16 /* Command got invalidated
+ * internally due to received
+ * command has residual
+ * over run bytes.
+ */
+#define CXN_KILLED_UNDER_RUN_RESIDUAL 17 /* Command got invalidated
+ * internally due to received
+ * command has residual under
+ * run bytes.
+ */
+#define CMD_KILLED_INVALID_STATSN_RCVD 18 /* Command got invalidated
+ * internally due to a received
+ * PDU has an invalid StatusSN
+ */
+#define CMD_KILLED_INVALID_R2T_RCVD 19 /* Command got invalidated
+ * internally due to a received
+ * an R2T with some invalid
+ * fields in it
+ */
+#define CMD_CXN_KILLED_LUN_INVALID 20 /* Command got invalidated
+ * internally due to received
+ * PDU has an invalid LUN.
+ */
+#define CMD_CXN_KILLED_ICD_INVALID 21 /* Command got invalidated
+ * internally due to the
+ * corresponding ICD not in a
+ * valid state
+ */
+#define CMD_CXN_KILLED_ITT_INVALID 22 /* Command got invalidated due
+ * to received PDU has an
+ * invalid ITT.
+ */
+#define CMD_CXN_KILLED_SEQ_OUTOFORDER 23 /* Command got invalidated due
+ * to received sequence buffer
+ * offset is out of order.
+ */
+#define CMD_CXN_KILLED_INVALID_DATASN_RCVD 24 /* Command got invalidated
+ * internally due to a
+ * received PDU has an invalid
+ * DataSN
+ */
+#define CXN_INVALIDATE_NOTIFY 25 /* Connection invalidation
+ * completion notify.
+ */
+#define CXN_INVALIDATE_INDEX_NOTIFY 26 /* Connection invalidation
+ * completion
+ * with data PDU index.
+ */
+#define CMD_INVALIDATED_NOTIFY 27 /* Command invalidation
+ * completionnotifify.
+ */
+#define UNSOL_HDR_NOTIFY 28 /* Unsolicited header notify.*/
+#define UNSOL_DATA_NOTIFY 29 /* Unsolicited data notify.*/
+#define UNSOL_DATA_DIGEST_ERROR_NOTIFY 30 /* Unsolicited data digest
+ * error notify.
+ */
+#define DRIVERMSG_NOTIFY 31 /* TCP acknowledge based
+ * notification.
+ */
+#define CXN_KILLED_CMND_DATA_NOT_ON_SAME_CONN 32 /* Connection got invalidated
+ * internally due to command
+ * and data are not on same
+ * connection.
+ */
+#define SOL_CMD_KILLED_DIF_ERR 33 /* Solicited command got
+ * invalidated internally due
+ * to DIF error
+ */
+#define CXN_KILLED_SYN_RCVD 34 /* Connection got invalidated
+ * internally due to incoming
+ * TCP SYN
+ */
+#define CXN_KILLED_IMM_DATA_RCVD 35 /* Connection got invalidated
+ * internally due to an
+ * incoming Unsolicited PDU
+ * that has immediate data on
+ * the cxn
+ */
+
+int beiscsi_pci_soft_reset(struct beiscsi_hba *phba);
+int be_chk_reset_complete(struct beiscsi_hba *phba);
+
+void be_wrb_hdr_prepare(struct be_mcc_wrb *wrb, int payload_len,
+ bool embedded, u8 sge_cnt);
+
+void be_cmd_hdr_prepare(struct be_cmd_req_hdr *req_hdr,
+ u8 subsystem, u8 opcode, int cmd_len);
+
+void be2iscsi_fail_session(struct iscsi_cls_session *cls_session);
+#endif /* !BEISCSI_CMDS_H */
diff --git a/drivers/scsi/be2iscsi/be_iscsi.c b/drivers/scsi/be2iscsi/be_iscsi.c
new file mode 100644
index 000000000..2f0700796
--- /dev/null
+++ b/drivers/scsi/be2iscsi/be_iscsi.c
@@ -0,0 +1,1472 @@
+/**
+ * Copyright (C) 2005 - 2015 Avago Technologies
+ * All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation. The full GNU General
+ * Public License is included in this distribution in the file called COPYING.
+ *
+ * Written by: Jayamohan Kallickal (jayamohan.kallickal@avagotech.com)
+ *
+ * Contact Information:
+ * linux-drivers@avagotech.com
+ *
+ * Avago Technologies
+ * 3333 Susan Street
+ * Costa Mesa, CA 92626
+ */
+
+#include <scsi/libiscsi.h>
+#include <scsi/scsi_transport_iscsi.h>
+#include <scsi/scsi_transport.h>
+#include <scsi/scsi_cmnd.h>
+#include <scsi/scsi_device.h>
+#include <scsi/scsi_host.h>
+#include <scsi/scsi_netlink.h>
+#include <net/netlink.h>
+#include <scsi/scsi.h>
+
+#include "be_iscsi.h"
+
+extern struct iscsi_transport beiscsi_iscsi_transport;
+
+/**
+ * beiscsi_session_create - creates a new iscsi session
+ * @cmds_max: max commands supported
+ * @qdepth: max queue depth supported
+ * @initial_cmdsn: initial iscsi CMDSN
+ */
+struct iscsi_cls_session *beiscsi_session_create(struct iscsi_endpoint *ep,
+ u16 cmds_max,
+ u16 qdepth,
+ u32 initial_cmdsn)
+{
+ struct Scsi_Host *shost;
+ struct beiscsi_endpoint *beiscsi_ep;
+ struct iscsi_cls_session *cls_session;
+ struct beiscsi_hba *phba;
+ struct iscsi_session *sess;
+ struct beiscsi_session *beiscsi_sess;
+ struct beiscsi_io_task *io_task;
+
+
+ if (!ep) {
+ printk(KERN_ERR
+ "beiscsi_session_create: invalid ep\n");
+ return NULL;
+ }
+ beiscsi_ep = ep->dd_data;
+ phba = beiscsi_ep->phba;
+
+ if (phba->state & BE_ADAPTER_PCI_ERR) {
+ beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_CONFIG,
+ "BS_%d : PCI_ERROR Recovery\n");
+ return NULL;
+ } else {
+ beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_CONFIG,
+ "BS_%d : In beiscsi_session_create\n");
+ }
+
+ if (cmds_max > beiscsi_ep->phba->params.wrbs_per_cxn) {
+ beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_CONFIG,
+ "BS_%d : Cannot handle %d cmds."
+ "Max cmds per session supported is %d. Using %d."
+ "\n", cmds_max,
+ beiscsi_ep->phba->params.wrbs_per_cxn,
+ beiscsi_ep->phba->params.wrbs_per_cxn);
+
+ cmds_max = beiscsi_ep->phba->params.wrbs_per_cxn;
+ }
+
+ shost = phba->shost;
+ cls_session = iscsi_session_setup(&beiscsi_iscsi_transport,
+ shost, cmds_max,
+ sizeof(*beiscsi_sess),
+ sizeof(*io_task),
+ initial_cmdsn, ISCSI_MAX_TARGET);
+ if (!cls_session)
+ return NULL;
+ sess = cls_session->dd_data;
+ beiscsi_sess = sess->dd_data;
+ beiscsi_sess->bhs_pool = pci_pool_create("beiscsi_bhs_pool",
+ phba->pcidev,
+ sizeof(struct be_cmd_bhs),
+ 64, 0);
+ if (!beiscsi_sess->bhs_pool)
+ goto destroy_sess;
+
+ return cls_session;
+destroy_sess:
+ iscsi_session_teardown(cls_session);
+ return NULL;
+}
+
+/**
+ * beiscsi_session_destroy - destroys iscsi session
+ * @cls_session: pointer to iscsi cls session
+ *
+ * Destroys iSCSI session instance and releases
+ * resources allocated for it.
+ */
+void beiscsi_session_destroy(struct iscsi_cls_session *cls_session)
+{
+ struct iscsi_session *sess = cls_session->dd_data;
+ struct beiscsi_session *beiscsi_sess = sess->dd_data;
+
+ printk(KERN_INFO "In beiscsi_session_destroy\n");
+ pci_pool_destroy(beiscsi_sess->bhs_pool);
+ iscsi_session_teardown(cls_session);
+}
+
+/**
+ * beiscsi_conn_create - create an instance of iscsi connection
+ * @cls_session: ptr to iscsi_cls_session
+ * @cid: iscsi cid
+ */
+struct iscsi_cls_conn *
+beiscsi_conn_create(struct iscsi_cls_session *cls_session, u32 cid)
+{
+ struct beiscsi_hba *phba;
+ struct Scsi_Host *shost;
+ struct iscsi_cls_conn *cls_conn;
+ struct beiscsi_conn *beiscsi_conn;
+ struct iscsi_conn *conn;
+ struct iscsi_session *sess;
+ struct beiscsi_session *beiscsi_sess;
+
+ shost = iscsi_session_to_shost(cls_session);
+ phba = iscsi_host_priv(shost);
+
+ beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_CONFIG,
+ "BS_%d : In beiscsi_conn_create ,cid"
+ "from iscsi layer=%d\n", cid);
+
+ cls_conn = iscsi_conn_setup(cls_session, sizeof(*beiscsi_conn), cid);
+ if (!cls_conn)
+ return NULL;
+
+ conn = cls_conn->dd_data;
+ beiscsi_conn = conn->dd_data;
+ beiscsi_conn->ep = NULL;
+ beiscsi_conn->phba = phba;
+ beiscsi_conn->conn = conn;
+ sess = cls_session->dd_data;
+ beiscsi_sess = sess->dd_data;
+ beiscsi_conn->beiscsi_sess = beiscsi_sess;
+ return cls_conn;
+}
+
+/**
+ * beiscsi_bindconn_cid - Bind the beiscsi_conn with phba connection table
+ * @beiscsi_conn: The pointer to beiscsi_conn structure
+ * @phba: The phba instance
+ * @cid: The cid to free
+ */
+static int beiscsi_bindconn_cid(struct beiscsi_hba *phba,
+ struct beiscsi_conn *beiscsi_conn,
+ unsigned int cid)
+{
+ uint16_t cri_index = BE_GET_CRI_FROM_CID(cid);
+
+ if (phba->conn_table[cri_index]) {
+ beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_CONFIG,
+ "BS_%d : Connection table already occupied. Detected clash\n");
+
+ return -EINVAL;
+ } else {
+ beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_CONFIG,
+ "BS_%d : phba->conn_table[%d]=%p(beiscsi_conn)\n",
+ cri_index, beiscsi_conn);
+
+ phba->conn_table[cri_index] = beiscsi_conn;
+ }
+ return 0;
+}
+
+/**
+ * beiscsi_conn_bind - Binds iscsi session/connection with TCP connection
+ * @cls_session: pointer to iscsi cls session
+ * @cls_conn: pointer to iscsi cls conn
+ * @transport_fd: EP handle(64 bit)
+ *
+ * This function binds the TCP Conn with iSCSI Connection and Session.
+ */
+int beiscsi_conn_bind(struct iscsi_cls_session *cls_session,
+ struct iscsi_cls_conn *cls_conn,
+ u64 transport_fd, int is_leading)
+{
+ struct iscsi_conn *conn = cls_conn->dd_data;
+ struct beiscsi_conn *beiscsi_conn = conn->dd_data;
+ struct Scsi_Host *shost = iscsi_session_to_shost(cls_session);
+ struct beiscsi_hba *phba = iscsi_host_priv(shost);
+ struct hwi_controller *phwi_ctrlr = phba->phwi_ctrlr;
+ struct hwi_wrb_context *pwrb_context;
+ struct beiscsi_endpoint *beiscsi_ep;
+ struct iscsi_endpoint *ep;
+
+ ep = iscsi_lookup_endpoint(transport_fd);
+ if (!ep)
+ return -EINVAL;
+
+ beiscsi_ep = ep->dd_data;
+
+ if (iscsi_conn_bind(cls_session, cls_conn, is_leading))
+ return -EINVAL;
+
+ if (beiscsi_ep->phba != phba) {
+ beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_CONFIG,
+ "BS_%d : beiscsi_ep->hba=%p not equal to phba=%p\n",
+ beiscsi_ep->phba, phba);
+
+ return -EEXIST;
+ }
+
+ pwrb_context = &phwi_ctrlr->wrb_context[BE_GET_CRI_FROM_CID(
+ beiscsi_ep->ep_cid)];
+
+ beiscsi_conn->beiscsi_conn_cid = beiscsi_ep->ep_cid;
+ beiscsi_conn->ep = beiscsi_ep;
+ beiscsi_ep->conn = beiscsi_conn;
+ beiscsi_conn->doorbell_offset = pwrb_context->doorbell_offset;
+
+ beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_CONFIG,
+ "BS_%d : beiscsi_conn=%p conn=%p ep_cid=%d\n",
+ beiscsi_conn, conn, beiscsi_ep->ep_cid);
+
+ return beiscsi_bindconn_cid(phba, beiscsi_conn, beiscsi_ep->ep_cid);
+}
+
+static int beiscsi_create_ipv4_iface(struct beiscsi_hba *phba)
+{
+ if (phba->ipv4_iface)
+ return 0;
+
+ phba->ipv4_iface = iscsi_create_iface(phba->shost,
+ &beiscsi_iscsi_transport,
+ ISCSI_IFACE_TYPE_IPV4,
+ 0, 0);
+ if (!phba->ipv4_iface) {
+ beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_CONFIG,
+ "BS_%d : Could not "
+ "create default IPv4 address.\n");
+ return -ENODEV;
+ }
+
+ return 0;
+}
+
+static int beiscsi_create_ipv6_iface(struct beiscsi_hba *phba)
+{
+ if (phba->ipv6_iface)
+ return 0;
+
+ phba->ipv6_iface = iscsi_create_iface(phba->shost,
+ &beiscsi_iscsi_transport,
+ ISCSI_IFACE_TYPE_IPV6,
+ 0, 0);
+ if (!phba->ipv6_iface) {
+ beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_CONFIG,
+ "BS_%d : Could not "
+ "create default IPv6 address.\n");
+ return -ENODEV;
+ }
+
+ return 0;
+}
+
+void beiscsi_create_def_ifaces(struct beiscsi_hba *phba)
+{
+ struct be_cmd_get_if_info_resp *if_info;
+
+ if (!mgmt_get_if_info(phba, BE2_IPV4, &if_info)) {
+ beiscsi_create_ipv4_iface(phba);
+ kfree(if_info);
+ }
+
+ if (!mgmt_get_if_info(phba, BE2_IPV6, &if_info)) {
+ beiscsi_create_ipv6_iface(phba);
+ kfree(if_info);
+ }
+}
+
+void beiscsi_destroy_def_ifaces(struct beiscsi_hba *phba)
+{
+ if (phba->ipv6_iface)
+ iscsi_destroy_iface(phba->ipv6_iface);
+ if (phba->ipv4_iface)
+ iscsi_destroy_iface(phba->ipv4_iface);
+}
+
+static int
+beiscsi_set_static_ip(struct Scsi_Host *shost,
+ struct iscsi_iface_param_info *iface_param,
+ void *data, uint32_t dt_len)
+{
+ struct beiscsi_hba *phba = iscsi_host_priv(shost);
+ struct iscsi_iface_param_info *iface_ip = NULL;
+ struct iscsi_iface_param_info *iface_subnet = NULL;
+ struct nlattr *nla;
+ int ret;
+
+
+ switch (iface_param->param) {
+ case ISCSI_NET_PARAM_IPV4_BOOTPROTO:
+ nla = nla_find(data, dt_len, ISCSI_NET_PARAM_IPV4_ADDR);
+ if (nla)
+ iface_ip = nla_data(nla);
+
+ nla = nla_find(data, dt_len, ISCSI_NET_PARAM_IPV4_SUBNET);
+ if (nla)
+ iface_subnet = nla_data(nla);
+ break;
+ case ISCSI_NET_PARAM_IPV4_ADDR:
+ iface_ip = iface_param;
+ nla = nla_find(data, dt_len, ISCSI_NET_PARAM_IPV4_SUBNET);
+ if (nla)
+ iface_subnet = nla_data(nla);
+ break;
+ case ISCSI_NET_PARAM_IPV4_SUBNET:
+ iface_subnet = iface_param;
+ nla = nla_find(data, dt_len, ISCSI_NET_PARAM_IPV4_ADDR);
+ if (nla)
+ iface_ip = nla_data(nla);
+ break;
+ default:
+ beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_CONFIG,
+ "BS_%d : Unsupported param %d\n",
+ iface_param->param);
+ }
+
+ if (!iface_ip || !iface_subnet) {
+ beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_CONFIG,
+ "BS_%d : IP and Subnet Mask required\n");
+ return -EINVAL;
+ }
+
+ ret = mgmt_set_ip(phba, iface_ip, iface_subnet,
+ ISCSI_BOOTPROTO_STATIC);
+
+ return ret;
+}
+
+/**
+ * beiscsi_set_vlan_tag()- Set the VLAN TAG
+ * @shost: Scsi Host for the driver instance
+ * @iface_param: Interface paramters
+ *
+ * Set the VLAN TAG for the adapter or disable
+ * the VLAN config
+ *
+ * returns
+ * Success: 0
+ * Failure: Non-Zero Value
+ **/
+static int
+beiscsi_set_vlan_tag(struct Scsi_Host *shost,
+ struct iscsi_iface_param_info *iface_param)
+{
+ struct beiscsi_hba *phba = iscsi_host_priv(shost);
+ int ret = 0;
+
+ /* Get the Interface Handle */
+ if (mgmt_get_all_if_id(phba)) {
+ beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_CONFIG,
+ "BS_%d : Getting Interface Handle Failed\n");
+ return -EIO;
+ }
+
+ switch (iface_param->param) {
+ case ISCSI_NET_PARAM_VLAN_ENABLED:
+ if (iface_param->value[0] != ISCSI_VLAN_ENABLE)
+ ret = mgmt_set_vlan(phba, BEISCSI_VLAN_DISABLE);
+ break;
+ case ISCSI_NET_PARAM_VLAN_TAG:
+ ret = mgmt_set_vlan(phba,
+ *((uint16_t *)iface_param->value));
+ break;
+ default:
+ beiscsi_log(phba, KERN_WARNING, BEISCSI_LOG_CONFIG,
+ "BS_%d : Unknown Param Type : %d\n",
+ iface_param->param);
+ return -ENOSYS;
+ }
+ return ret;
+}
+
+
+static int
+beiscsi_set_ipv4(struct Scsi_Host *shost,
+ struct iscsi_iface_param_info *iface_param,
+ void *data, uint32_t dt_len)
+{
+ struct beiscsi_hba *phba = iscsi_host_priv(shost);
+ int ret = 0;
+
+ /* Check the param */
+ switch (iface_param->param) {
+ case ISCSI_NET_PARAM_IPV4_GW:
+ ret = mgmt_set_gateway(phba, iface_param);
+ break;
+ case ISCSI_NET_PARAM_IPV4_BOOTPROTO:
+ if (iface_param->value[0] == ISCSI_BOOTPROTO_DHCP)
+ ret = mgmt_set_ip(phba, iface_param,
+ NULL, ISCSI_BOOTPROTO_DHCP);
+ else if (iface_param->value[0] == ISCSI_BOOTPROTO_STATIC)
+ ret = beiscsi_set_static_ip(shost, iface_param,
+ data, dt_len);
+ else
+ beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_CONFIG,
+ "BS_%d : Invalid BOOTPROTO: %d\n",
+ iface_param->value[0]);
+ break;
+ case ISCSI_NET_PARAM_IFACE_ENABLE:
+ if (iface_param->value[0] == ISCSI_IFACE_ENABLE)
+ ret = beiscsi_create_ipv4_iface(phba);
+ else
+ iscsi_destroy_iface(phba->ipv4_iface);
+ break;
+ case ISCSI_NET_PARAM_IPV4_SUBNET:
+ case ISCSI_NET_PARAM_IPV4_ADDR:
+ ret = beiscsi_set_static_ip(shost, iface_param,
+ data, dt_len);
+ break;
+ case ISCSI_NET_PARAM_VLAN_ENABLED:
+ case ISCSI_NET_PARAM_VLAN_TAG:
+ ret = beiscsi_set_vlan_tag(shost, iface_param);
+ break;
+ default:
+ beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_CONFIG,
+ "BS_%d : Param %d not supported\n",
+ iface_param->param);
+ }
+
+ return ret;
+}
+
+static int
+beiscsi_set_ipv6(struct Scsi_Host *shost,
+ struct iscsi_iface_param_info *iface_param,
+ void *data, uint32_t dt_len)
+{
+ struct beiscsi_hba *phba = iscsi_host_priv(shost);
+ int ret = 0;
+
+ switch (iface_param->param) {
+ case ISCSI_NET_PARAM_IFACE_ENABLE:
+ if (iface_param->value[0] == ISCSI_IFACE_ENABLE)
+ ret = beiscsi_create_ipv6_iface(phba);
+ else {
+ iscsi_destroy_iface(phba->ipv6_iface);
+ ret = 0;
+ }
+ break;
+ case ISCSI_NET_PARAM_IPV6_ADDR:
+ ret = mgmt_set_ip(phba, iface_param, NULL,
+ ISCSI_BOOTPROTO_STATIC);
+ break;
+ default:
+ beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_CONFIG,
+ "BS_%d : Param %d not supported\n",
+ iface_param->param);
+ }
+
+ return ret;
+}
+
+int be2iscsi_iface_set_param(struct Scsi_Host *shost,
+ void *data, uint32_t dt_len)
+{
+ struct iscsi_iface_param_info *iface_param = NULL;
+ struct beiscsi_hba *phba = iscsi_host_priv(shost);
+ struct nlattr *attrib;
+ uint32_t rm_len = dt_len;
+ int ret = 0 ;
+
+ if (phba->state & BE_ADAPTER_PCI_ERR) {
+ beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_CONFIG,
+ "BS_%d : In PCI_ERROR Recovery\n");
+ return -EBUSY;
+ }
+
+ nla_for_each_attr(attrib, data, dt_len, rm_len) {
+ iface_param = nla_data(attrib);
+
+ if (iface_param->param_type != ISCSI_NET_PARAM)
+ continue;
+
+ /*
+ * BE2ISCSI only supports 1 interface
+ */
+ if (iface_param->iface_num) {
+ beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_CONFIG,
+ "BS_%d : Invalid iface_num %d."
+ "Only iface_num 0 is supported.\n",
+ iface_param->iface_num);
+
+ return -EINVAL;
+ }
+
+ switch (iface_param->iface_type) {
+ case ISCSI_IFACE_TYPE_IPV4:
+ ret = beiscsi_set_ipv4(shost, iface_param,
+ data, dt_len);
+ break;
+ case ISCSI_IFACE_TYPE_IPV6:
+ ret = beiscsi_set_ipv6(shost, iface_param,
+ data, dt_len);
+ break;
+ default:
+ beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_CONFIG,
+ "BS_%d : Invalid iface type :%d passed\n",
+ iface_param->iface_type);
+ break;
+ }
+
+ if (ret)
+ return ret;
+ }
+
+ return ret;
+}
+
+static int be2iscsi_get_if_param(struct beiscsi_hba *phba,
+ struct iscsi_iface *iface, int param,
+ char *buf)
+{
+ struct be_cmd_get_if_info_resp *if_info;
+ int len, ip_type = BE2_IPV4;
+
+ if (iface->iface_type == ISCSI_IFACE_TYPE_IPV6)
+ ip_type = BE2_IPV6;
+
+ len = mgmt_get_if_info(phba, ip_type, &if_info);
+ if (len)
+ return len;
+
+ switch (param) {
+ case ISCSI_NET_PARAM_IPV4_ADDR:
+ len = sprintf(buf, "%pI4\n", if_info->ip_addr.addr);
+ break;
+ case ISCSI_NET_PARAM_IPV6_ADDR:
+ len = sprintf(buf, "%pI6\n", if_info->ip_addr.addr);
+ break;
+ case ISCSI_NET_PARAM_IPV4_BOOTPROTO:
+ if (!if_info->dhcp_state)
+ len = sprintf(buf, "static\n");
+ else
+ len = sprintf(buf, "dhcp\n");
+ break;
+ case ISCSI_NET_PARAM_IPV4_SUBNET:
+ len = sprintf(buf, "%pI4\n", if_info->ip_addr.subnet_mask);
+ break;
+ case ISCSI_NET_PARAM_VLAN_ENABLED:
+ len = sprintf(buf, "%s\n",
+ (if_info->vlan_priority == BEISCSI_VLAN_DISABLE)
+ ? "Disabled\n" : "Enabled\n");
+ break;
+ case ISCSI_NET_PARAM_VLAN_ID:
+ if (if_info->vlan_priority == BEISCSI_VLAN_DISABLE)
+ len = -EINVAL;
+ else
+ len = sprintf(buf, "%d\n",
+ (if_info->vlan_priority &
+ ISCSI_MAX_VLAN_ID));
+ break;
+ case ISCSI_NET_PARAM_VLAN_PRIORITY:
+ if (if_info->vlan_priority == BEISCSI_VLAN_DISABLE)
+ len = -EINVAL;
+ else
+ len = sprintf(buf, "%d\n",
+ ((if_info->vlan_priority >> 13) &
+ ISCSI_MAX_VLAN_PRIORITY));
+ break;
+ default:
+ WARN_ON(1);
+ }
+
+ kfree(if_info);
+ return len;
+}
+
+int be2iscsi_iface_get_param(struct iscsi_iface *iface,
+ enum iscsi_param_type param_type,
+ int param, char *buf)
+{
+ struct Scsi_Host *shost = iscsi_iface_to_shost(iface);
+ struct beiscsi_hba *phba = iscsi_host_priv(shost);
+ struct be_cmd_get_def_gateway_resp gateway;
+ int len = -ENOSYS;
+
+ if (phba->state & BE_ADAPTER_PCI_ERR) {
+ beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_CONFIG,
+ "BS_%d : In PCI_ERROR Recovery\n");
+ return -EBUSY;
+ }
+
+ switch (param) {
+ case ISCSI_NET_PARAM_IPV4_ADDR:
+ case ISCSI_NET_PARAM_IPV4_SUBNET:
+ case ISCSI_NET_PARAM_IPV4_BOOTPROTO:
+ case ISCSI_NET_PARAM_IPV6_ADDR:
+ case ISCSI_NET_PARAM_VLAN_ENABLED:
+ case ISCSI_NET_PARAM_VLAN_ID:
+ case ISCSI_NET_PARAM_VLAN_PRIORITY:
+ len = be2iscsi_get_if_param(phba, iface, param, buf);
+ break;
+ case ISCSI_NET_PARAM_IFACE_ENABLE:
+ len = sprintf(buf, "enabled\n");
+ break;
+ case ISCSI_NET_PARAM_IPV4_GW:
+ memset(&gateway, 0, sizeof(gateway));
+ len = mgmt_get_gateway(phba, BE2_IPV4, &gateway);
+ if (!len)
+ len = sprintf(buf, "%pI4\n", &gateway.ip_addr.addr);
+ break;
+ default:
+ len = -ENOSYS;
+ }
+
+ return len;
+}
+
+/**
+ * beiscsi_ep_get_param - get the iscsi parameter
+ * @ep: pointer to iscsi ep
+ * @param: parameter type identifier
+ * @buf: buffer pointer
+ *
+ * returns iscsi parameter
+ */
+int beiscsi_ep_get_param(struct iscsi_endpoint *ep,
+ enum iscsi_param param, char *buf)
+{
+ struct beiscsi_endpoint *beiscsi_ep = ep->dd_data;
+ int len = 0;
+
+ beiscsi_log(beiscsi_ep->phba, KERN_INFO,
+ BEISCSI_LOG_CONFIG,
+ "BS_%d : In beiscsi_ep_get_param,"
+ " param= %d\n", param);
+
+ switch (param) {
+ case ISCSI_PARAM_CONN_PORT:
+ len = sprintf(buf, "%hu\n", beiscsi_ep->dst_tcpport);
+ break;
+ case ISCSI_PARAM_CONN_ADDRESS:
+ if (beiscsi_ep->ip_type == BE2_IPV4)
+ len = sprintf(buf, "%pI4\n", &beiscsi_ep->dst_addr);
+ else
+ len = sprintf(buf, "%pI6\n", &beiscsi_ep->dst6_addr);
+ break;
+ default:
+ return -ENOSYS;
+ }
+ return len;
+}
+
+int beiscsi_set_param(struct iscsi_cls_conn *cls_conn,
+ enum iscsi_param param, char *buf, int buflen)
+{
+ struct iscsi_conn *conn = cls_conn->dd_data;
+ struct iscsi_session *session = conn->session;
+ struct beiscsi_hba *phba = NULL;
+ int ret;
+
+ phba = ((struct beiscsi_conn *)conn->dd_data)->phba;
+ beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_CONFIG,
+ "BS_%d : In beiscsi_conn_set_param,"
+ " param= %d\n", param);
+
+ ret = iscsi_set_param(cls_conn, param, buf, buflen);
+ if (ret)
+ return ret;
+ /*
+ * If userspace tried to set the value to higher than we can
+ * support override here.
+ */
+ switch (param) {
+ case ISCSI_PARAM_FIRST_BURST:
+ if (session->first_burst > 8192)
+ session->first_burst = 8192;
+ break;
+ case ISCSI_PARAM_MAX_RECV_DLENGTH:
+ if (conn->max_recv_dlength > 65536)
+ conn->max_recv_dlength = 65536;
+ break;
+ case ISCSI_PARAM_MAX_BURST:
+ if (session->max_burst > 262144)
+ session->max_burst = 262144;
+ break;
+ case ISCSI_PARAM_MAX_XMIT_DLENGTH:
+ if (conn->max_xmit_dlength > 65536)
+ conn->max_xmit_dlength = 65536;
+ default:
+ return 0;
+ }
+
+ return 0;
+}
+
+/**
+ * beiscsi_get_initname - Read Initiator Name from flash
+ * @buf: buffer bointer
+ * @phba: The device priv structure instance
+ *
+ * returns number of bytes
+ */
+static int beiscsi_get_initname(char *buf, struct beiscsi_hba *phba)
+{
+ int rc;
+ unsigned int tag;
+ struct be_mcc_wrb *wrb;
+ struct be_cmd_hba_name *resp;
+
+ tag = be_cmd_get_initname(phba);
+ if (!tag) {
+ beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_CONFIG,
+ "BS_%d : Getting Initiator Name Failed\n");
+
+ return -EBUSY;
+ }
+
+ rc = beiscsi_mccq_compl(phba, tag, &wrb, NULL);
+ if (rc) {
+ beiscsi_log(phba, KERN_ERR,
+ BEISCSI_LOG_CONFIG | BEISCSI_LOG_MBOX,
+ "BS_%d : Initiator Name MBX Failed\n");
+ return rc;
+ }
+
+ resp = embedded_payload(wrb);
+ rc = sprintf(buf, "%s\n", resp->initiator_name);
+ return rc;
+}
+
+/**
+ * beiscsi_get_port_state - Get the Port State
+ * @shost : pointer to scsi_host structure
+ *
+ */
+static void beiscsi_get_port_state(struct Scsi_Host *shost)
+{
+ struct beiscsi_hba *phba = iscsi_host_priv(shost);
+ struct iscsi_cls_host *ihost = shost->shost_data;
+
+ ihost->port_state = (phba->state == BE_ADAPTER_LINK_UP) ?
+ ISCSI_PORT_STATE_UP : ISCSI_PORT_STATE_DOWN;
+}
+
+/**
+ * beiscsi_get_port_speed - Get the Port Speed from Adapter
+ * @shost : pointer to scsi_host structure
+ *
+ * returns Success/Failure
+ */
+static int beiscsi_get_port_speed(struct Scsi_Host *shost)
+{
+ int rc;
+ unsigned int tag;
+ struct be_mcc_wrb *wrb;
+ struct be_cmd_ntwk_link_status_resp *resp;
+ struct beiscsi_hba *phba = iscsi_host_priv(shost);
+ struct iscsi_cls_host *ihost = shost->shost_data;
+
+ tag = be_cmd_get_port_speed(phba);
+ if (!tag) {
+ beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_CONFIG,
+ "BS_%d : Getting Port Speed Failed\n");
+
+ return -EBUSY;
+ }
+ rc = beiscsi_mccq_compl(phba, tag, &wrb, NULL);
+ if (rc) {
+ beiscsi_log(phba, KERN_ERR,
+ BEISCSI_LOG_CONFIG | BEISCSI_LOG_MBOX,
+ "BS_%d : Port Speed MBX Failed\n");
+ return rc;
+ }
+ resp = embedded_payload(wrb);
+
+ switch (resp->mac_speed) {
+ case BE2ISCSI_LINK_SPEED_10MBPS:
+ ihost->port_speed = ISCSI_PORT_SPEED_10MBPS;
+ break;
+ case BE2ISCSI_LINK_SPEED_100MBPS:
+ ihost->port_speed = ISCSI_PORT_SPEED_100MBPS;
+ break;
+ case BE2ISCSI_LINK_SPEED_1GBPS:
+ ihost->port_speed = ISCSI_PORT_SPEED_1GBPS;
+ break;
+ case BE2ISCSI_LINK_SPEED_10GBPS:
+ ihost->port_speed = ISCSI_PORT_SPEED_10GBPS;
+ break;
+ default:
+ ihost->port_speed = ISCSI_PORT_SPEED_UNKNOWN;
+ }
+ return 0;
+}
+
+/**
+ * beiscsi_get_host_param - get the iscsi parameter
+ * @shost: pointer to scsi_host structure
+ * @param: parameter type identifier
+ * @buf: buffer pointer
+ *
+ * returns host parameter
+ */
+int beiscsi_get_host_param(struct Scsi_Host *shost,
+ enum iscsi_host_param param, char *buf)
+{
+ struct beiscsi_hba *phba = iscsi_host_priv(shost);
+ int status = 0;
+
+
+ if (phba->state & BE_ADAPTER_PCI_ERR) {
+ beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_CONFIG,
+ "BS_%d : In PCI_ERROR Recovery\n");
+ return -EBUSY;
+ } else {
+ beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_CONFIG,
+ "BS_%d : In beiscsi_get_host_param,"
+ " param = %d\n", param);
+ }
+
+ switch (param) {
+ case ISCSI_HOST_PARAM_HWADDRESS:
+ status = beiscsi_get_macaddr(buf, phba);
+ if (status < 0) {
+ beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_CONFIG,
+ "BS_%d : beiscsi_get_macaddr Failed\n");
+ return status;
+ }
+ break;
+ case ISCSI_HOST_PARAM_INITIATOR_NAME:
+ status = beiscsi_get_initname(buf, phba);
+ if (status < 0) {
+ beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_CONFIG,
+ "BS_%d : Retreiving Initiator Name Failed\n");
+ return status;
+ }
+ break;
+ case ISCSI_HOST_PARAM_PORT_STATE:
+ beiscsi_get_port_state(shost);
+ status = sprintf(buf, "%s\n", iscsi_get_port_state_name(shost));
+ break;
+ case ISCSI_HOST_PARAM_PORT_SPEED:
+ status = beiscsi_get_port_speed(shost);
+ if (status) {
+ beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_CONFIG,
+ "BS_%d : Retreiving Port Speed Failed\n");
+ return status;
+ }
+ status = sprintf(buf, "%s\n", iscsi_get_port_speed_name(shost));
+ break;
+ default:
+ return iscsi_host_get_param(shost, param, buf);
+ }
+ return status;
+}
+
+int beiscsi_get_macaddr(char *buf, struct beiscsi_hba *phba)
+{
+ struct be_cmd_get_nic_conf_resp resp;
+ int rc;
+
+ if (phba->mac_addr_set)
+ return sysfs_format_mac(buf, phba->mac_address, ETH_ALEN);
+
+ memset(&resp, 0, sizeof(resp));
+ rc = mgmt_get_nic_conf(phba, &resp);
+ if (rc)
+ return rc;
+
+ phba->mac_addr_set = true;
+ memcpy(phba->mac_address, resp.mac_address, ETH_ALEN);
+ return sysfs_format_mac(buf, phba->mac_address, ETH_ALEN);
+}
+
+/**
+ * beiscsi_conn_get_stats - get the iscsi stats
+ * @cls_conn: pointer to iscsi cls conn
+ * @stats: pointer to iscsi_stats structure
+ *
+ * returns iscsi stats
+ */
+void beiscsi_conn_get_stats(struct iscsi_cls_conn *cls_conn,
+ struct iscsi_stats *stats)
+{
+ struct iscsi_conn *conn = cls_conn->dd_data;
+ struct beiscsi_hba *phba = NULL;
+
+ phba = ((struct beiscsi_conn *)conn->dd_data)->phba;
+ beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_CONFIG,
+ "BS_%d : In beiscsi_conn_get_stats\n");
+
+ stats->txdata_octets = conn->txdata_octets;
+ stats->rxdata_octets = conn->rxdata_octets;
+ stats->dataout_pdus = conn->dataout_pdus_cnt;
+ stats->scsirsp_pdus = conn->scsirsp_pdus_cnt;
+ stats->scsicmd_pdus = conn->scsicmd_pdus_cnt;
+ stats->datain_pdus = conn->datain_pdus_cnt;
+ stats->tmfrsp_pdus = conn->tmfrsp_pdus_cnt;
+ stats->tmfcmd_pdus = conn->tmfcmd_pdus_cnt;
+ stats->r2t_pdus = conn->r2t_pdus_cnt;
+ stats->digest_err = 0;
+ stats->timeout_err = 0;
+ stats->custom_length = 1;
+ strcpy(stats->custom[0].desc, "eh_abort_cnt");
+ stats->custom[0].value = conn->eh_abort_cnt;
+}
+
+/**
+ * beiscsi_set_params_for_offld - get the parameters for offload
+ * @beiscsi_conn: pointer to beiscsi_conn
+ * @params: pointer to offload_params structure
+ */
+static void beiscsi_set_params_for_offld(struct beiscsi_conn *beiscsi_conn,
+ struct beiscsi_offload_params *params)
+{
+ struct iscsi_conn *conn = beiscsi_conn->conn;
+ struct iscsi_session *session = conn->session;
+
+ AMAP_SET_BITS(struct amap_beiscsi_offload_params, max_burst_length,
+ params, session->max_burst);
+ AMAP_SET_BITS(struct amap_beiscsi_offload_params,
+ max_send_data_segment_length, params,
+ conn->max_xmit_dlength);
+ AMAP_SET_BITS(struct amap_beiscsi_offload_params, first_burst_length,
+ params, session->first_burst);
+ AMAP_SET_BITS(struct amap_beiscsi_offload_params, erl, params,
+ session->erl);
+ AMAP_SET_BITS(struct amap_beiscsi_offload_params, dde, params,
+ conn->datadgst_en);
+ AMAP_SET_BITS(struct amap_beiscsi_offload_params, hde, params,
+ conn->hdrdgst_en);
+ AMAP_SET_BITS(struct amap_beiscsi_offload_params, ir2t, params,
+ session->initial_r2t_en);
+ AMAP_SET_BITS(struct amap_beiscsi_offload_params, imd, params,
+ session->imm_data_en);
+ AMAP_SET_BITS(struct amap_beiscsi_offload_params,
+ data_seq_inorder, params,
+ session->dataseq_inorder_en);
+ AMAP_SET_BITS(struct amap_beiscsi_offload_params,
+ pdu_seq_inorder, params,
+ session->pdu_inorder_en);
+ AMAP_SET_BITS(struct amap_beiscsi_offload_params, max_r2t, params,
+ session->max_r2t);
+ AMAP_SET_BITS(struct amap_beiscsi_offload_params, exp_statsn, params,
+ (conn->exp_statsn - 1));
+ AMAP_SET_BITS(struct amap_beiscsi_offload_params,
+ max_recv_data_segment_length, params,
+ conn->max_recv_dlength);
+
+}
+
+/**
+ * beiscsi_conn_start - offload of session to chip
+ * @cls_conn: pointer to beiscsi_conn
+ */
+int beiscsi_conn_start(struct iscsi_cls_conn *cls_conn)
+{
+ struct iscsi_conn *conn = cls_conn->dd_data;
+ struct beiscsi_conn *beiscsi_conn = conn->dd_data;
+ struct beiscsi_endpoint *beiscsi_ep;
+ struct beiscsi_offload_params params;
+ struct beiscsi_hba *phba;
+
+ phba = ((struct beiscsi_conn *)conn->dd_data)->phba;
+
+ if (phba->state & BE_ADAPTER_PCI_ERR) {
+ beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_CONFIG,
+ "BS_%d : In PCI_ERROR Recovery\n");
+ return -EBUSY;
+ } else {
+ beiscsi_log(beiscsi_conn->phba, KERN_INFO,
+ BEISCSI_LOG_CONFIG,
+ "BS_%d : In beiscsi_conn_start\n");
+ }
+
+ memset(&params, 0, sizeof(struct beiscsi_offload_params));
+ beiscsi_ep = beiscsi_conn->ep;
+ if (!beiscsi_ep)
+ beiscsi_log(beiscsi_conn->phba, KERN_ERR,
+ BEISCSI_LOG_CONFIG,
+ "BS_%d : In beiscsi_conn_start , no beiscsi_ep\n");
+
+ beiscsi_conn->login_in_progress = 0;
+ beiscsi_set_params_for_offld(beiscsi_conn, &params);
+ beiscsi_offload_connection(beiscsi_conn, &params);
+ iscsi_conn_start(cls_conn);
+ return 0;
+}
+
+/**
+ * beiscsi_get_cid - Allocate a cid
+ * @phba: The phba instance
+ */
+static int beiscsi_get_cid(struct beiscsi_hba *phba)
+{
+ unsigned short cid = 0xFFFF, cid_from_ulp;
+ struct ulp_cid_info *cid_info = NULL;
+ uint16_t cid_avlbl_ulp0, cid_avlbl_ulp1;
+
+ /* Find the ULP which has more CID available */
+ cid_avlbl_ulp0 = (phba->cid_array_info[BEISCSI_ULP0]) ?
+ BEISCSI_ULP0_AVLBL_CID(phba) : 0;
+ cid_avlbl_ulp1 = (phba->cid_array_info[BEISCSI_ULP1]) ?
+ BEISCSI_ULP1_AVLBL_CID(phba) : 0;
+ cid_from_ulp = (cid_avlbl_ulp0 > cid_avlbl_ulp1) ?
+ BEISCSI_ULP0 : BEISCSI_ULP1;
+
+ if (test_bit(cid_from_ulp, (void *)&phba->fw_config.ulp_supported)) {
+ cid_info = phba->cid_array_info[cid_from_ulp];
+ if (!cid_info->avlbl_cids)
+ return cid;
+
+ cid = cid_info->cid_array[cid_info->cid_alloc++];
+
+ if (cid_info->cid_alloc == BEISCSI_GET_CID_COUNT(
+ phba, cid_from_ulp))
+ cid_info->cid_alloc = 0;
+
+ cid_info->avlbl_cids--;
+ }
+ return cid;
+}
+
+/**
+ * beiscsi_put_cid - Free the cid
+ * @phba: The phba for which the cid is being freed
+ * @cid: The cid to free
+ */
+static void beiscsi_put_cid(struct beiscsi_hba *phba, unsigned short cid)
+{
+ uint16_t cid_post_ulp;
+ struct hwi_controller *phwi_ctrlr;
+ struct hwi_wrb_context *pwrb_context;
+ struct ulp_cid_info *cid_info = NULL;
+ uint16_t cri_index = BE_GET_CRI_FROM_CID(cid);
+
+ phwi_ctrlr = phba->phwi_ctrlr;
+ pwrb_context = &phwi_ctrlr->wrb_context[cri_index];
+ cid_post_ulp = pwrb_context->ulp_num;
+
+ cid_info = phba->cid_array_info[cid_post_ulp];
+ cid_info->avlbl_cids++;
+
+ cid_info->cid_array[cid_info->cid_free++] = cid;
+ if (cid_info->cid_free == BEISCSI_GET_CID_COUNT(phba, cid_post_ulp))
+ cid_info->cid_free = 0;
+}
+
+/**
+ * beiscsi_free_ep - free endpoint
+ * @ep: pointer to iscsi endpoint structure
+ */
+static void beiscsi_free_ep(struct beiscsi_endpoint *beiscsi_ep)
+{
+ struct beiscsi_hba *phba = beiscsi_ep->phba;
+ struct beiscsi_conn *beiscsi_conn;
+
+ beiscsi_put_cid(phba, beiscsi_ep->ep_cid);
+ beiscsi_ep->phba = NULL;
+ phba->ep_array[BE_GET_CRI_FROM_CID
+ (beiscsi_ep->ep_cid)] = NULL;
+
+ /**
+ * Check if any connection resource allocated by driver
+ * is to be freed.This case occurs when target redirection
+ * or connection retry is done.
+ **/
+ if (!beiscsi_ep->conn)
+ return;
+
+ beiscsi_conn = beiscsi_ep->conn;
+ if (beiscsi_conn->login_in_progress) {
+ beiscsi_free_mgmt_task_handles(beiscsi_conn,
+ beiscsi_conn->task);
+ beiscsi_conn->login_in_progress = 0;
+ }
+}
+
+/**
+ * beiscsi_open_conn - Ask FW to open a TCP connection
+ * @ep: endpoint to be used
+ * @src_addr: The source IP address
+ * @dst_addr: The Destination IP address
+ *
+ * Asks the FW to open a TCP connection
+ */
+static int beiscsi_open_conn(struct iscsi_endpoint *ep,
+ struct sockaddr *src_addr,
+ struct sockaddr *dst_addr, int non_blocking)
+{
+ struct beiscsi_endpoint *beiscsi_ep = ep->dd_data;
+ struct beiscsi_hba *phba = beiscsi_ep->phba;
+ struct tcp_connect_and_offload_out *ptcpcnct_out;
+ struct be_dma_mem nonemb_cmd;
+ unsigned int tag, req_memsize;
+ int ret = -ENOMEM;
+
+ beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_CONFIG,
+ "BS_%d : In beiscsi_open_conn\n");
+
+ beiscsi_ep->ep_cid = beiscsi_get_cid(phba);
+ if (beiscsi_ep->ep_cid == 0xFFFF) {
+ beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_CONFIG,
+ "BS_%d : No free cid available\n");
+ return ret;
+ }
+
+ beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_CONFIG,
+ "BS_%d : In beiscsi_open_conn, ep_cid=%d\n",
+ beiscsi_ep->ep_cid);
+
+ phba->ep_array[BE_GET_CRI_FROM_CID
+ (beiscsi_ep->ep_cid)] = ep;
+
+ beiscsi_ep->cid_vld = 0;
+
+ if (is_chip_be2_be3r(phba))
+ req_memsize = sizeof(struct tcp_connect_and_offload_in);
+ else
+ req_memsize = sizeof(struct tcp_connect_and_offload_in_v1);
+
+ nonemb_cmd.va = pci_alloc_consistent(phba->ctrl.pdev,
+ req_memsize,
+ &nonemb_cmd.dma);
+ if (nonemb_cmd.va == NULL) {
+
+ beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_CONFIG,
+ "BS_%d : Failed to allocate memory for"
+ " mgmt_open_connection\n");
+
+ beiscsi_free_ep(beiscsi_ep);
+ return -ENOMEM;
+ }
+ nonemb_cmd.size = req_memsize;
+ memset(nonemb_cmd.va, 0, nonemb_cmd.size);
+ tag = mgmt_open_connection(phba, dst_addr, beiscsi_ep, &nonemb_cmd);
+ if (tag <= 0) {
+ beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_CONFIG,
+ "BS_%d : mgmt_open_connection Failed for cid=%d\n",
+ beiscsi_ep->ep_cid);
+
+ pci_free_consistent(phba->ctrl.pdev, nonemb_cmd.size,
+ nonemb_cmd.va, nonemb_cmd.dma);
+ beiscsi_free_ep(beiscsi_ep);
+ return -EAGAIN;
+ }
+
+ ret = beiscsi_mccq_compl(phba, tag, NULL, &nonemb_cmd);
+ if (ret) {
+ beiscsi_log(phba, KERN_ERR,
+ BEISCSI_LOG_CONFIG | BEISCSI_LOG_MBOX,
+ "BS_%d : mgmt_open_connection Failed");
+
+ if (ret != -EBUSY)
+ pci_free_consistent(phba->ctrl.pdev, nonemb_cmd.size,
+ nonemb_cmd.va, nonemb_cmd.dma);
+
+ beiscsi_free_ep(beiscsi_ep);
+ return ret;
+ }
+
+ ptcpcnct_out = (struct tcp_connect_and_offload_out *)nonemb_cmd.va;
+ beiscsi_ep = ep->dd_data;
+ beiscsi_ep->fw_handle = ptcpcnct_out->connection_handle;
+ beiscsi_ep->cid_vld = 1;
+ beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_CONFIG,
+ "BS_%d : mgmt_open_connection Success\n");
+
+ pci_free_consistent(phba->ctrl.pdev, nonemb_cmd.size,
+ nonemb_cmd.va, nonemb_cmd.dma);
+ return 0;
+}
+
+/**
+ * beiscsi_ep_connect - Ask chip to create TCP Conn
+ * @scsi_host: Pointer to scsi_host structure
+ * @dst_addr: The IP address of Target
+ * @non_blocking: blocking or non-blocking call
+ *
+ * This routines first asks chip to create a connection and then allocates an EP
+ */
+struct iscsi_endpoint *
+beiscsi_ep_connect(struct Scsi_Host *shost, struct sockaddr *dst_addr,
+ int non_blocking)
+{
+ struct beiscsi_hba *phba;
+ struct beiscsi_endpoint *beiscsi_ep;
+ struct iscsi_endpoint *ep;
+ int ret;
+
+ if (shost)
+ phba = iscsi_host_priv(shost);
+ else {
+ ret = -ENXIO;
+ printk(KERN_ERR
+ "beiscsi_ep_connect shost is NULL\n");
+ return ERR_PTR(ret);
+ }
+
+ if (beiscsi_error(phba)) {
+ ret = -EIO;
+ beiscsi_log(phba, KERN_WARNING, BEISCSI_LOG_CONFIG,
+ "BS_%d : The FW state Not Stable!!!\n");
+ return ERR_PTR(ret);
+ }
+
+ if (phba->state & BE_ADAPTER_PCI_ERR) {
+ ret = -EBUSY;
+ beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_CONFIG,
+ "BS_%d : In PCI_ERROR Recovery\n");
+ return ERR_PTR(ret);
+ } else if (phba->state & BE_ADAPTER_LINK_DOWN) {
+ ret = -EBUSY;
+ beiscsi_log(phba, KERN_WARNING, BEISCSI_LOG_CONFIG,
+ "BS_%d : The Adapter Port state is Down!!!\n");
+ return ERR_PTR(ret);
+ }
+
+ ep = iscsi_create_endpoint(sizeof(struct beiscsi_endpoint));
+ if (!ep) {
+ ret = -ENOMEM;
+ return ERR_PTR(ret);
+ }
+
+ beiscsi_ep = ep->dd_data;
+ beiscsi_ep->phba = phba;
+ beiscsi_ep->openiscsi_ep = ep;
+ ret = beiscsi_open_conn(ep, NULL, dst_addr, non_blocking);
+ if (ret) {
+ beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_CONFIG,
+ "BS_%d : Failed in beiscsi_open_conn\n");
+ goto free_ep;
+ }
+
+ return ep;
+
+free_ep:
+ iscsi_destroy_endpoint(ep);
+ return ERR_PTR(ret);
+}
+
+/**
+ * beiscsi_ep_poll - Poll to see if connection is established
+ * @ep: endpoint to be used
+ * @timeout_ms: timeout specified in millisecs
+ *
+ * Poll to see if TCP connection established
+ */
+int beiscsi_ep_poll(struct iscsi_endpoint *ep, int timeout_ms)
+{
+ struct beiscsi_endpoint *beiscsi_ep = ep->dd_data;
+
+ beiscsi_log(beiscsi_ep->phba, KERN_INFO, BEISCSI_LOG_CONFIG,
+ "BS_%d : In beiscsi_ep_poll\n");
+
+ if (beiscsi_ep->cid_vld == 1)
+ return 1;
+ else
+ return 0;
+}
+
+/**
+ * beiscsi_flush_cq()- Flush the CQ created.
+ * @phba: ptr device priv structure.
+ *
+ * Before the connection resource are freed flush
+ * all the CQ enteries
+ **/
+static void beiscsi_flush_cq(struct beiscsi_hba *phba)
+{
+ uint16_t i;
+ struct be_eq_obj *pbe_eq;
+ struct hwi_controller *phwi_ctrlr;
+ struct hwi_context_memory *phwi_context;
+
+ phwi_ctrlr = phba->phwi_ctrlr;
+ phwi_context = phwi_ctrlr->phwi_ctxt;
+
+ for (i = 0; i < phba->num_cpus; i++) {
+ pbe_eq = &phwi_context->be_eq[i];
+ blk_iopoll_disable(&pbe_eq->iopoll);
+ beiscsi_process_cq(pbe_eq);
+ blk_iopoll_enable(&pbe_eq->iopoll);
+ }
+}
+
+/**
+ * beiscsi_close_conn - Upload the connection
+ * @ep: The iscsi endpoint
+ * @flag: The type of connection closure
+ */
+static int beiscsi_close_conn(struct beiscsi_endpoint *beiscsi_ep, int flag)
+{
+ int ret = 0;
+ unsigned int tag;
+ struct beiscsi_hba *phba = beiscsi_ep->phba;
+
+ tag = mgmt_upload_connection(phba, beiscsi_ep->ep_cid, flag);
+ if (!tag) {
+ beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_CONFIG,
+ "BS_%d : upload failed for cid 0x%x\n",
+ beiscsi_ep->ep_cid);
+
+ ret = -EAGAIN;
+ }
+
+ ret = beiscsi_mccq_compl(phba, tag, NULL, NULL);
+
+ /* Flush the CQ entries */
+ beiscsi_flush_cq(phba);
+
+ return ret;
+}
+
+/**
+ * beiscsi_unbind_conn_to_cid - Unbind the beiscsi_conn from phba conn table
+ * @phba: The phba instance
+ * @cid: The cid to free
+ */
+static int beiscsi_unbind_conn_to_cid(struct beiscsi_hba *phba,
+ unsigned int cid)
+{
+ uint16_t cri_index = BE_GET_CRI_FROM_CID(cid);
+
+ if (phba->conn_table[cri_index])
+ phba->conn_table[cri_index] = NULL;
+ else {
+ beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_CONFIG,
+ "BS_%d : Connection table Not occupied.\n");
+ return -EINVAL;
+ }
+ return 0;
+}
+
+/**
+ * beiscsi_ep_disconnect - Tears down the TCP connection
+ * @ep: endpoint to be used
+ *
+ * Tears down the TCP connection
+ */
+void beiscsi_ep_disconnect(struct iscsi_endpoint *ep)
+{
+ struct beiscsi_conn *beiscsi_conn;
+ struct beiscsi_endpoint *beiscsi_ep;
+ struct beiscsi_hba *phba;
+ unsigned int tag;
+ uint8_t mgmt_invalidate_flag, tcp_upload_flag;
+ unsigned short savecfg_flag = CMD_ISCSI_SESSION_SAVE_CFG_ON_FLASH;
+
+ beiscsi_ep = ep->dd_data;
+ phba = beiscsi_ep->phba;
+ beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_CONFIG,
+ "BS_%d : In beiscsi_ep_disconnect for ep_cid = %d\n",
+ beiscsi_ep->ep_cid);
+
+ if (beiscsi_ep->conn) {
+ beiscsi_conn = beiscsi_ep->conn;
+ iscsi_suspend_queue(beiscsi_conn->conn);
+ mgmt_invalidate_flag = ~BEISCSI_NO_RST_ISSUE;
+ tcp_upload_flag = CONNECTION_UPLOAD_GRACEFUL;
+ } else {
+ mgmt_invalidate_flag = BEISCSI_NO_RST_ISSUE;
+ tcp_upload_flag = CONNECTION_UPLOAD_ABORT;
+ }
+
+ if (phba->state & BE_ADAPTER_PCI_ERR) {
+ beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_CONFIG,
+ "BS_%d : PCI_ERROR Recovery\n");
+ goto free_ep;
+ }
+
+ tag = mgmt_invalidate_connection(phba, beiscsi_ep,
+ beiscsi_ep->ep_cid,
+ mgmt_invalidate_flag,
+ savecfg_flag);
+ if (!tag) {
+ beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_CONFIG,
+ "BS_%d : mgmt_invalidate_connection Failed for cid=%d\n",
+ beiscsi_ep->ep_cid);
+ }
+
+ beiscsi_mccq_compl(phba, tag, NULL, NULL);
+ beiscsi_close_conn(beiscsi_ep, tcp_upload_flag);
+free_ep:
+ msleep(BEISCSI_LOGOUT_SYNC_DELAY);
+ beiscsi_free_ep(beiscsi_ep);
+ beiscsi_unbind_conn_to_cid(phba, beiscsi_ep->ep_cid);
+ iscsi_destroy_endpoint(beiscsi_ep->openiscsi_ep);
+}
+
+umode_t be2iscsi_attr_is_visible(int param_type, int param)
+{
+ switch (param_type) {
+ case ISCSI_NET_PARAM:
+ switch (param) {
+ case ISCSI_NET_PARAM_IFACE_ENABLE:
+ case ISCSI_NET_PARAM_IPV4_ADDR:
+ case ISCSI_NET_PARAM_IPV4_SUBNET:
+ case ISCSI_NET_PARAM_IPV4_BOOTPROTO:
+ case ISCSI_NET_PARAM_IPV4_GW:
+ case ISCSI_NET_PARAM_IPV6_ADDR:
+ case ISCSI_NET_PARAM_VLAN_ID:
+ case ISCSI_NET_PARAM_VLAN_PRIORITY:
+ case ISCSI_NET_PARAM_VLAN_ENABLED:
+ return S_IRUGO;
+ default:
+ return 0;
+ }
+ case ISCSI_HOST_PARAM:
+ switch (param) {
+ case ISCSI_HOST_PARAM_HWADDRESS:
+ case ISCSI_HOST_PARAM_INITIATOR_NAME:
+ case ISCSI_HOST_PARAM_PORT_STATE:
+ case ISCSI_HOST_PARAM_PORT_SPEED:
+ return S_IRUGO;
+ default:
+ return 0;
+ }
+ case ISCSI_PARAM:
+ switch (param) {
+ case ISCSI_PARAM_MAX_RECV_DLENGTH:
+ case ISCSI_PARAM_MAX_XMIT_DLENGTH:
+ case ISCSI_PARAM_HDRDGST_EN:
+ case ISCSI_PARAM_DATADGST_EN:
+ case ISCSI_PARAM_CONN_ADDRESS:
+ case ISCSI_PARAM_CONN_PORT:
+ case ISCSI_PARAM_EXP_STATSN:
+ case ISCSI_PARAM_PERSISTENT_ADDRESS:
+ case ISCSI_PARAM_PERSISTENT_PORT:
+ case ISCSI_PARAM_PING_TMO:
+ case ISCSI_PARAM_RECV_TMO:
+ case ISCSI_PARAM_INITIAL_R2T_EN:
+ case ISCSI_PARAM_MAX_R2T:
+ case ISCSI_PARAM_IMM_DATA_EN:
+ case ISCSI_PARAM_FIRST_BURST:
+ case ISCSI_PARAM_MAX_BURST:
+ case ISCSI_PARAM_PDU_INORDER_EN:
+ case ISCSI_PARAM_DATASEQ_INORDER_EN:
+ case ISCSI_PARAM_ERL:
+ case ISCSI_PARAM_TARGET_NAME:
+ case ISCSI_PARAM_TPGT:
+ case ISCSI_PARAM_USERNAME:
+ case ISCSI_PARAM_PASSWORD:
+ case ISCSI_PARAM_USERNAME_IN:
+ case ISCSI_PARAM_PASSWORD_IN:
+ case ISCSI_PARAM_FAST_ABORT:
+ case ISCSI_PARAM_ABORT_TMO:
+ case ISCSI_PARAM_LU_RESET_TMO:
+ case ISCSI_PARAM_IFACE_NAME:
+ case ISCSI_PARAM_INITIATOR_NAME:
+ return S_IRUGO;
+ default:
+ return 0;
+ }
+ }
+
+ return 0;
+}
diff --git a/drivers/scsi/be2iscsi/be_iscsi.h b/drivers/scsi/be2iscsi/be_iscsi.h
new file mode 100644
index 000000000..0c84e1c07
--- /dev/null
+++ b/drivers/scsi/be2iscsi/be_iscsi.h
@@ -0,0 +1,91 @@
+/**
+ * Copyright (C) 2005 - 2015 Avago Technologies
+ * All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation. The full GNU General
+ * Public License is included in this distribution in the file called COPYING.
+ *
+ * Written by: Jayamohan Kallickal (jayamohan.kallickal@avagotech.com)
+ *
+ * Contact Information:
+ * linux-drivers@avagotech.com
+ *
+ * Avago Technologies
+ * 3333 Susan Street
+ * Costa Mesa, CA 92626
+ */
+
+#ifndef _BE_ISCSI_
+#define _BE_ISCSI_
+
+#include "be_main.h"
+#include "be_mgmt.h"
+
+#define BE2_IPV4 0x1
+#define BE2_IPV6 0x10
+#define BE2_DHCP_V4 0x05
+
+#define NON_BLOCKING 0x0
+#define BLOCKING 0x1
+
+void beiscsi_create_def_ifaces(struct beiscsi_hba *phba);
+
+void beiscsi_destroy_def_ifaces(struct beiscsi_hba *phba);
+
+int be2iscsi_iface_get_param(struct iscsi_iface *iface,
+ enum iscsi_param_type param_type,
+ int param, char *buf);
+
+int be2iscsi_iface_set_param(struct Scsi_Host *shost,
+ void *data, uint32_t count);
+
+umode_t be2iscsi_attr_is_visible(int param_type, int param);
+
+void beiscsi_offload_connection(struct beiscsi_conn *beiscsi_conn,
+ struct beiscsi_offload_params *params);
+
+void beiscsi_offload_iscsi(struct beiscsi_hba *phba, struct iscsi_conn *conn,
+ struct beiscsi_conn *beiscsi_conn,
+ unsigned int fw_handle);
+
+struct iscsi_cls_session *beiscsi_session_create(struct iscsi_endpoint *ep,
+ uint16_t cmds_max,
+ uint16_t qdepth,
+ uint32_t initial_cmdsn);
+
+void beiscsi_session_destroy(struct iscsi_cls_session *cls_session);
+
+struct iscsi_cls_conn *beiscsi_conn_create(struct iscsi_cls_session
+ *cls_session, uint32_t cid);
+
+int beiscsi_conn_bind(struct iscsi_cls_session *cls_session,
+ struct iscsi_cls_conn *cls_conn,
+ uint64_t transport_fd, int is_leading);
+
+int beiscsi_ep_get_param(struct iscsi_endpoint *ep, enum iscsi_param param,
+ char *buf);
+
+int beiscsi_get_host_param(struct Scsi_Host *shost,
+ enum iscsi_host_param param, char *buf);
+
+int beiscsi_get_macaddr(char *buf, struct beiscsi_hba *phba);
+
+int beiscsi_set_param(struct iscsi_cls_conn *cls_conn,
+ enum iscsi_param param, char *buf, int buflen);
+
+int beiscsi_conn_start(struct iscsi_cls_conn *cls_conn);
+
+struct iscsi_endpoint *beiscsi_ep_connect(struct Scsi_Host *shost,
+ struct sockaddr *dst_addr,
+ int non_blocking);
+
+int beiscsi_ep_poll(struct iscsi_endpoint *ep, int timeout_ms);
+
+void beiscsi_ep_disconnect(struct iscsi_endpoint *ep);
+
+void beiscsi_conn_get_stats(struct iscsi_cls_conn *cls_conn,
+ struct iscsi_stats *stats);
+
+#endif
diff --git a/drivers/scsi/be2iscsi/be_main.c b/drivers/scsi/be2iscsi/be_main.c
new file mode 100644
index 000000000..1f74760ce
--- /dev/null
+++ b/drivers/scsi/be2iscsi/be_main.c
@@ -0,0 +1,5828 @@
+/**
+ * Copyright (C) 2005 - 2015 Avago Technologies
+ * All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation. The full GNU General
+ * Public License is included in this distribution in the file called COPYING.
+ *
+ * Written by: Jayamohan Kallickal (jayamohan.kallickal@avagotech.com)
+ *
+ * Contact Information:
+ * linux-drivers@avagotech.com
+ *
+ * Avago Technologies
+ * 3333 Susan Street
+ * Costa Mesa, CA 92626
+ */
+
+#include <linux/reboot.h>
+#include <linux/delay.h>
+#include <linux/slab.h>
+#include <linux/interrupt.h>
+#include <linux/blkdev.h>
+#include <linux/pci.h>
+#include <linux/string.h>
+#include <linux/kernel.h>
+#include <linux/semaphore.h>
+#include <linux/iscsi_boot_sysfs.h>
+#include <linux/module.h>
+#include <linux/bsg-lib.h>
+
+#include <scsi/libiscsi.h>
+#include <scsi/scsi_bsg_iscsi.h>
+#include <scsi/scsi_netlink.h>
+#include <scsi/scsi_transport_iscsi.h>
+#include <scsi/scsi_transport.h>
+#include <scsi/scsi_cmnd.h>
+#include <scsi/scsi_device.h>
+#include <scsi/scsi_host.h>
+#include <scsi/scsi.h>
+#include "be_main.h"
+#include "be_iscsi.h"
+#include "be_mgmt.h"
+#include "be_cmds.h"
+
+static unsigned int be_iopoll_budget = 10;
+static unsigned int be_max_phys_size = 64;
+static unsigned int enable_msix = 1;
+
+MODULE_DESCRIPTION(DRV_DESC " " BUILD_STR);
+MODULE_VERSION(BUILD_STR);
+MODULE_AUTHOR("Avago Technologies");
+MODULE_LICENSE("GPL");
+module_param(be_iopoll_budget, int, 0);
+module_param(enable_msix, int, 0);
+module_param(be_max_phys_size, uint, S_IRUGO);
+MODULE_PARM_DESC(be_max_phys_size,
+ "Maximum Size (In Kilobytes) of physically contiguous "
+ "memory that can be allocated. Range is 16 - 128");
+
+#define beiscsi_disp_param(_name)\
+ssize_t \
+beiscsi_##_name##_disp(struct device *dev,\
+ struct device_attribute *attrib, char *buf) \
+{ \
+ struct Scsi_Host *shost = class_to_shost(dev);\
+ struct beiscsi_hba *phba = iscsi_host_priv(shost); \
+ uint32_t param_val = 0; \
+ param_val = phba->attr_##_name;\
+ return snprintf(buf, PAGE_SIZE, "%d\n",\
+ phba->attr_##_name);\
+}
+
+#define beiscsi_change_param(_name, _minval, _maxval, _defaval)\
+int \
+beiscsi_##_name##_change(struct beiscsi_hba *phba, uint32_t val)\
+{\
+ if (val >= _minval && val <= _maxval) {\
+ beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,\
+ "BA_%d : beiscsi_"#_name" updated "\
+ "from 0x%x ==> 0x%x\n",\
+ phba->attr_##_name, val); \
+ phba->attr_##_name = val;\
+ return 0;\
+ } \
+ beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT, \
+ "BA_%d beiscsi_"#_name" attribute "\
+ "cannot be updated to 0x%x, "\
+ "range allowed is ["#_minval" - "#_maxval"]\n", val);\
+ return -EINVAL;\
+}
+
+#define beiscsi_store_param(_name) \
+ssize_t \
+beiscsi_##_name##_store(struct device *dev,\
+ struct device_attribute *attr, const char *buf,\
+ size_t count) \
+{ \
+ struct Scsi_Host *shost = class_to_shost(dev);\
+ struct beiscsi_hba *phba = iscsi_host_priv(shost);\
+ uint32_t param_val = 0;\
+ if (!isdigit(buf[0]))\
+ return -EINVAL;\
+ if (sscanf(buf, "%i", &param_val) != 1)\
+ return -EINVAL;\
+ if (beiscsi_##_name##_change(phba, param_val) == 0) \
+ return strlen(buf);\
+ else \
+ return -EINVAL;\
+}
+
+#define beiscsi_init_param(_name, _minval, _maxval, _defval) \
+int \
+beiscsi_##_name##_init(struct beiscsi_hba *phba, uint32_t val) \
+{ \
+ if (val >= _minval && val <= _maxval) {\
+ phba->attr_##_name = val;\
+ return 0;\
+ } \
+ beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,\
+ "BA_%d beiscsi_"#_name" attribute " \
+ "cannot be updated to 0x%x, "\
+ "range allowed is ["#_minval" - "#_maxval"]\n", val);\
+ phba->attr_##_name = _defval;\
+ return -EINVAL;\
+}
+
+#define BEISCSI_RW_ATTR(_name, _minval, _maxval, _defval, _descp) \
+static uint beiscsi_##_name = _defval;\
+module_param(beiscsi_##_name, uint, S_IRUGO);\
+MODULE_PARM_DESC(beiscsi_##_name, _descp);\
+beiscsi_disp_param(_name)\
+beiscsi_change_param(_name, _minval, _maxval, _defval)\
+beiscsi_store_param(_name)\
+beiscsi_init_param(_name, _minval, _maxval, _defval)\
+DEVICE_ATTR(beiscsi_##_name, S_IRUGO | S_IWUSR,\
+ beiscsi_##_name##_disp, beiscsi_##_name##_store)
+
+/*
+ * When new log level added update the
+ * the MAX allowed value for log_enable
+ */
+BEISCSI_RW_ATTR(log_enable, 0x00,
+ 0xFF, 0x00, "Enable logging Bit Mask\n"
+ "\t\t\t\tInitialization Events : 0x01\n"
+ "\t\t\t\tMailbox Events : 0x02\n"
+ "\t\t\t\tMiscellaneous Events : 0x04\n"
+ "\t\t\t\tError Handling : 0x08\n"
+ "\t\t\t\tIO Path Events : 0x10\n"
+ "\t\t\t\tConfiguration Path : 0x20\n"
+ "\t\t\t\tiSCSI Protocol : 0x40\n");
+
+DEVICE_ATTR(beiscsi_drvr_ver, S_IRUGO, beiscsi_drvr_ver_disp, NULL);
+DEVICE_ATTR(beiscsi_adapter_family, S_IRUGO, beiscsi_adap_family_disp, NULL);
+DEVICE_ATTR(beiscsi_fw_ver, S_IRUGO, beiscsi_fw_ver_disp, NULL);
+DEVICE_ATTR(beiscsi_phys_port, S_IRUGO, beiscsi_phys_port_disp, NULL);
+DEVICE_ATTR(beiscsi_active_session_count, S_IRUGO,
+ beiscsi_active_session_disp, NULL);
+DEVICE_ATTR(beiscsi_free_session_count, S_IRUGO,
+ beiscsi_free_session_disp, NULL);
+struct device_attribute *beiscsi_attrs[] = {
+ &dev_attr_beiscsi_log_enable,
+ &dev_attr_beiscsi_drvr_ver,
+ &dev_attr_beiscsi_adapter_family,
+ &dev_attr_beiscsi_fw_ver,
+ &dev_attr_beiscsi_active_session_count,
+ &dev_attr_beiscsi_free_session_count,
+ &dev_attr_beiscsi_phys_port,
+ NULL,
+};
+
+static char const *cqe_desc[] = {
+ "RESERVED_DESC",
+ "SOL_CMD_COMPLETE",
+ "SOL_CMD_KILLED_DATA_DIGEST_ERR",
+ "CXN_KILLED_PDU_SIZE_EXCEEDS_DSL",
+ "CXN_KILLED_BURST_LEN_MISMATCH",
+ "CXN_KILLED_AHS_RCVD",
+ "CXN_KILLED_HDR_DIGEST_ERR",
+ "CXN_KILLED_UNKNOWN_HDR",
+ "CXN_KILLED_STALE_ITT_TTT_RCVD",
+ "CXN_KILLED_INVALID_ITT_TTT_RCVD",
+ "CXN_KILLED_RST_RCVD",
+ "CXN_KILLED_TIMED_OUT",
+ "CXN_KILLED_RST_SENT",
+ "CXN_KILLED_FIN_RCVD",
+ "CXN_KILLED_BAD_UNSOL_PDU_RCVD",
+ "CXN_KILLED_BAD_WRB_INDEX_ERROR",
+ "CXN_KILLED_OVER_RUN_RESIDUAL",
+ "CXN_KILLED_UNDER_RUN_RESIDUAL",
+ "CMD_KILLED_INVALID_STATSN_RCVD",
+ "CMD_KILLED_INVALID_R2T_RCVD",
+ "CMD_CXN_KILLED_LUN_INVALID",
+ "CMD_CXN_KILLED_ICD_INVALID",
+ "CMD_CXN_KILLED_ITT_INVALID",
+ "CMD_CXN_KILLED_SEQ_OUTOFORDER",
+ "CMD_CXN_KILLED_INVALID_DATASN_RCVD",
+ "CXN_INVALIDATE_NOTIFY",
+ "CXN_INVALIDATE_INDEX_NOTIFY",
+ "CMD_INVALIDATED_NOTIFY",
+ "UNSOL_HDR_NOTIFY",
+ "UNSOL_DATA_NOTIFY",
+ "UNSOL_DATA_DIGEST_ERROR_NOTIFY",
+ "DRIVERMSG_NOTIFY",
+ "CXN_KILLED_CMND_DATA_NOT_ON_SAME_CONN",
+ "SOL_CMD_KILLED_DIF_ERR",
+ "CXN_KILLED_SYN_RCVD",
+ "CXN_KILLED_IMM_DATA_RCVD"
+};
+
+static int beiscsi_slave_configure(struct scsi_device *sdev)
+{
+ blk_queue_max_segment_size(sdev->request_queue, 65536);
+ return 0;
+}
+
+static int beiscsi_eh_abort(struct scsi_cmnd *sc)
+{
+ struct iscsi_cls_session *cls_session;
+ struct iscsi_task *aborted_task = (struct iscsi_task *)sc->SCp.ptr;
+ struct beiscsi_io_task *aborted_io_task;
+ struct iscsi_conn *conn;
+ struct beiscsi_conn *beiscsi_conn;
+ struct beiscsi_hba *phba;
+ struct iscsi_session *session;
+ struct invalidate_command_table *inv_tbl;
+ struct be_dma_mem nonemb_cmd;
+ unsigned int cid, tag, num_invalidate;
+ int rc;
+
+ cls_session = starget_to_session(scsi_target(sc->device));
+ session = cls_session->dd_data;
+
+ spin_lock_bh(&session->frwd_lock);
+ if (!aborted_task || !aborted_task->sc) {
+ /* we raced */
+ spin_unlock_bh(&session->frwd_lock);
+ return SUCCESS;
+ }
+
+ aborted_io_task = aborted_task->dd_data;
+ if (!aborted_io_task->scsi_cmnd) {
+ /* raced or invalid command */
+ spin_unlock_bh(&session->frwd_lock);
+ return SUCCESS;
+ }
+ spin_unlock_bh(&session->frwd_lock);
+ /* Invalidate WRB Posted for this Task */
+ AMAP_SET_BITS(struct amap_iscsi_wrb, invld,
+ aborted_io_task->pwrb_handle->pwrb,
+ 1);
+
+ conn = aborted_task->conn;
+ beiscsi_conn = conn->dd_data;
+ phba = beiscsi_conn->phba;
+
+ /* invalidate iocb */
+ cid = beiscsi_conn->beiscsi_conn_cid;
+ inv_tbl = phba->inv_tbl;
+ memset(inv_tbl, 0x0, sizeof(*inv_tbl));
+ inv_tbl->cid = cid;
+ inv_tbl->icd = aborted_io_task->psgl_handle->sgl_index;
+ num_invalidate = 1;
+ nonemb_cmd.va = pci_alloc_consistent(phba->ctrl.pdev,
+ sizeof(struct invalidate_commands_params_in),
+ &nonemb_cmd.dma);
+ if (nonemb_cmd.va == NULL) {
+ beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_EH,
+ "BM_%d : Failed to allocate memory for"
+ "mgmt_invalidate_icds\n");
+ return FAILED;
+ }
+ nonemb_cmd.size = sizeof(struct invalidate_commands_params_in);
+
+ tag = mgmt_invalidate_icds(phba, inv_tbl, num_invalidate,
+ cid, &nonemb_cmd);
+ if (!tag) {
+ beiscsi_log(phba, KERN_WARNING, BEISCSI_LOG_EH,
+ "BM_%d : mgmt_invalidate_icds could not be"
+ "submitted\n");
+ pci_free_consistent(phba->ctrl.pdev, nonemb_cmd.size,
+ nonemb_cmd.va, nonemb_cmd.dma);
+
+ return FAILED;
+ }
+
+ rc = beiscsi_mccq_compl(phba, tag, NULL, &nonemb_cmd);
+ if (rc != -EBUSY)
+ pci_free_consistent(phba->ctrl.pdev, nonemb_cmd.size,
+ nonemb_cmd.va, nonemb_cmd.dma);
+
+ return iscsi_eh_abort(sc);
+}
+
+static int beiscsi_eh_device_reset(struct scsi_cmnd *sc)
+{
+ struct iscsi_task *abrt_task;
+ struct beiscsi_io_task *abrt_io_task;
+ struct iscsi_conn *conn;
+ struct beiscsi_conn *beiscsi_conn;
+ struct beiscsi_hba *phba;
+ struct iscsi_session *session;
+ struct iscsi_cls_session *cls_session;
+ struct invalidate_command_table *inv_tbl;
+ struct be_dma_mem nonemb_cmd;
+ unsigned int cid, tag, i, num_invalidate;
+ int rc;
+
+ /* invalidate iocbs */
+ cls_session = starget_to_session(scsi_target(sc->device));
+ session = cls_session->dd_data;
+ spin_lock_bh(&session->frwd_lock);
+ if (!session->leadconn || session->state != ISCSI_STATE_LOGGED_IN) {
+ spin_unlock_bh(&session->frwd_lock);
+ return FAILED;
+ }
+ conn = session->leadconn;
+ beiscsi_conn = conn->dd_data;
+ phba = beiscsi_conn->phba;
+ cid = beiscsi_conn->beiscsi_conn_cid;
+ inv_tbl = phba->inv_tbl;
+ memset(inv_tbl, 0x0, sizeof(*inv_tbl) * BE2_CMDS_PER_CXN);
+ num_invalidate = 0;
+ for (i = 0; i < conn->session->cmds_max; i++) {
+ abrt_task = conn->session->cmds[i];
+ abrt_io_task = abrt_task->dd_data;
+ if (!abrt_task->sc || abrt_task->state == ISCSI_TASK_FREE)
+ continue;
+
+ if (sc->device->lun != abrt_task->sc->device->lun)
+ continue;
+
+ /* Invalidate WRB Posted for this Task */
+ AMAP_SET_BITS(struct amap_iscsi_wrb, invld,
+ abrt_io_task->pwrb_handle->pwrb,
+ 1);
+
+ inv_tbl->cid = cid;
+ inv_tbl->icd = abrt_io_task->psgl_handle->sgl_index;
+ num_invalidate++;
+ inv_tbl++;
+ }
+ spin_unlock_bh(&session->frwd_lock);
+ inv_tbl = phba->inv_tbl;
+
+ nonemb_cmd.va = pci_alloc_consistent(phba->ctrl.pdev,
+ sizeof(struct invalidate_commands_params_in),
+ &nonemb_cmd.dma);
+ if (nonemb_cmd.va == NULL) {
+ beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_EH,
+ "BM_%d : Failed to allocate memory for"
+ "mgmt_invalidate_icds\n");
+ return FAILED;
+ }
+ nonemb_cmd.size = sizeof(struct invalidate_commands_params_in);
+ memset(nonemb_cmd.va, 0, nonemb_cmd.size);
+ tag = mgmt_invalidate_icds(phba, inv_tbl, num_invalidate,
+ cid, &nonemb_cmd);
+ if (!tag) {
+ beiscsi_log(phba, KERN_WARNING, BEISCSI_LOG_EH,
+ "BM_%d : mgmt_invalidate_icds could not be"
+ " submitted\n");
+ pci_free_consistent(phba->ctrl.pdev, nonemb_cmd.size,
+ nonemb_cmd.va, nonemb_cmd.dma);
+ return FAILED;
+ }
+
+ rc = beiscsi_mccq_compl(phba, tag, NULL, &nonemb_cmd);
+ if (rc != -EBUSY)
+ pci_free_consistent(phba->ctrl.pdev, nonemb_cmd.size,
+ nonemb_cmd.va, nonemb_cmd.dma);
+ return iscsi_eh_device_reset(sc);
+}
+
+static ssize_t beiscsi_show_boot_tgt_info(void *data, int type, char *buf)
+{
+ struct beiscsi_hba *phba = data;
+ struct mgmt_session_info *boot_sess = &phba->boot_sess;
+ struct mgmt_conn_info *boot_conn = &boot_sess->conn_list[0];
+ char *str = buf;
+ int rc;
+
+ switch (type) {
+ case ISCSI_BOOT_TGT_NAME:
+ rc = sprintf(buf, "%.*s\n",
+ (int)strlen(boot_sess->target_name),
+ (char *)&boot_sess->target_name);
+ break;
+ case ISCSI_BOOT_TGT_IP_ADDR:
+ if (boot_conn->dest_ipaddr.ip_type == 0x1)
+ rc = sprintf(buf, "%pI4\n",
+ (char *)&boot_conn->dest_ipaddr.addr);
+ else
+ rc = sprintf(str, "%pI6\n",
+ (char *)&boot_conn->dest_ipaddr.addr);
+ break;
+ case ISCSI_BOOT_TGT_PORT:
+ rc = sprintf(str, "%d\n", boot_conn->dest_port);
+ break;
+
+ case ISCSI_BOOT_TGT_CHAP_NAME:
+ rc = sprintf(str, "%.*s\n",
+ boot_conn->negotiated_login_options.auth_data.chap.
+ target_chap_name_length,
+ (char *)&boot_conn->negotiated_login_options.
+ auth_data.chap.target_chap_name);
+ break;
+ case ISCSI_BOOT_TGT_CHAP_SECRET:
+ rc = sprintf(str, "%.*s\n",
+ boot_conn->negotiated_login_options.auth_data.chap.
+ target_secret_length,
+ (char *)&boot_conn->negotiated_login_options.
+ auth_data.chap.target_secret);
+ break;
+ case ISCSI_BOOT_TGT_REV_CHAP_NAME:
+ rc = sprintf(str, "%.*s\n",
+ boot_conn->negotiated_login_options.auth_data.chap.
+ intr_chap_name_length,
+ (char *)&boot_conn->negotiated_login_options.
+ auth_data.chap.intr_chap_name);
+ break;
+ case ISCSI_BOOT_TGT_REV_CHAP_SECRET:
+ rc = sprintf(str, "%.*s\n",
+ boot_conn->negotiated_login_options.auth_data.chap.
+ intr_secret_length,
+ (char *)&boot_conn->negotiated_login_options.
+ auth_data.chap.intr_secret);
+ break;
+ case ISCSI_BOOT_TGT_FLAGS:
+ rc = sprintf(str, "2\n");
+ break;
+ case ISCSI_BOOT_TGT_NIC_ASSOC:
+ rc = sprintf(str, "0\n");
+ break;
+ default:
+ rc = -ENOSYS;
+ break;
+ }
+ return rc;
+}
+
+static ssize_t beiscsi_show_boot_ini_info(void *data, int type, char *buf)
+{
+ struct beiscsi_hba *phba = data;
+ char *str = buf;
+ int rc;
+
+ switch (type) {
+ case ISCSI_BOOT_INI_INITIATOR_NAME:
+ rc = sprintf(str, "%s\n", phba->boot_sess.initiator_iscsiname);
+ break;
+ default:
+ rc = -ENOSYS;
+ break;
+ }
+ return rc;
+}
+
+static ssize_t beiscsi_show_boot_eth_info(void *data, int type, char *buf)
+{
+ struct beiscsi_hba *phba = data;
+ char *str = buf;
+ int rc;
+
+ switch (type) {
+ case ISCSI_BOOT_ETH_FLAGS:
+ rc = sprintf(str, "2\n");
+ break;
+ case ISCSI_BOOT_ETH_INDEX:
+ rc = sprintf(str, "0\n");
+ break;
+ case ISCSI_BOOT_ETH_MAC:
+ rc = beiscsi_get_macaddr(str, phba);
+ break;
+ default:
+ rc = -ENOSYS;
+ break;
+ }
+ return rc;
+}
+
+
+static umode_t beiscsi_tgt_get_attr_visibility(void *data, int type)
+{
+ umode_t rc;
+
+ switch (type) {
+ case ISCSI_BOOT_TGT_NAME:
+ case ISCSI_BOOT_TGT_IP_ADDR:
+ case ISCSI_BOOT_TGT_PORT:
+ case ISCSI_BOOT_TGT_CHAP_NAME:
+ case ISCSI_BOOT_TGT_CHAP_SECRET:
+ case ISCSI_BOOT_TGT_REV_CHAP_NAME:
+ case ISCSI_BOOT_TGT_REV_CHAP_SECRET:
+ case ISCSI_BOOT_TGT_NIC_ASSOC:
+ case ISCSI_BOOT_TGT_FLAGS:
+ rc = S_IRUGO;
+ break;
+ default:
+ rc = 0;
+ break;
+ }
+ return rc;
+}
+
+static umode_t beiscsi_ini_get_attr_visibility(void *data, int type)
+{
+ umode_t rc;
+
+ switch (type) {
+ case ISCSI_BOOT_INI_INITIATOR_NAME:
+ rc = S_IRUGO;
+ break;
+ default:
+ rc = 0;
+ break;
+ }
+ return rc;
+}
+
+
+static umode_t beiscsi_eth_get_attr_visibility(void *data, int type)
+{
+ umode_t rc;
+
+ switch (type) {
+ case ISCSI_BOOT_ETH_FLAGS:
+ case ISCSI_BOOT_ETH_MAC:
+ case ISCSI_BOOT_ETH_INDEX:
+ rc = S_IRUGO;
+ break;
+ default:
+ rc = 0;
+ break;
+ }
+ return rc;
+}
+
+/*------------------- PCI Driver operations and data ----------------- */
+static const struct pci_device_id beiscsi_pci_id_table[] = {
+ { PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID1) },
+ { PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID2) },
+ { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID1) },
+ { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID2) },
+ { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID3) },
+ { PCI_DEVICE(ELX_VENDOR_ID, OC_SKH_ID1) },
+ { 0 }
+};
+MODULE_DEVICE_TABLE(pci, beiscsi_pci_id_table);
+
+
+static struct scsi_host_template beiscsi_sht = {
+ .module = THIS_MODULE,
+ .name = "Avago Technologies 10Gbe open-iscsi Initiator Driver",
+ .proc_name = DRV_NAME,
+ .queuecommand = iscsi_queuecommand,
+ .change_queue_depth = scsi_change_queue_depth,
+ .slave_configure = beiscsi_slave_configure,
+ .target_alloc = iscsi_target_alloc,
+ .eh_abort_handler = beiscsi_eh_abort,
+ .eh_device_reset_handler = beiscsi_eh_device_reset,
+ .eh_target_reset_handler = iscsi_eh_session_reset,
+ .shost_attrs = beiscsi_attrs,
+ .sg_tablesize = BEISCSI_SGLIST_ELEMENTS,
+ .can_queue = BE2_IO_DEPTH,
+ .this_id = -1,
+ .max_sectors = BEISCSI_MAX_SECTORS,
+ .cmd_per_lun = BEISCSI_CMD_PER_LUN,
+ .use_clustering = ENABLE_CLUSTERING,
+ .vendor_id = SCSI_NL_VID_TYPE_PCI | BE_VENDOR_ID,
+ .track_queue_depth = 1,
+};
+
+static struct scsi_transport_template *beiscsi_scsi_transport;
+
+static struct beiscsi_hba *beiscsi_hba_alloc(struct pci_dev *pcidev)
+{
+ struct beiscsi_hba *phba;
+ struct Scsi_Host *shost;
+
+ shost = iscsi_host_alloc(&beiscsi_sht, sizeof(*phba), 0);
+ if (!shost) {
+ dev_err(&pcidev->dev,
+ "beiscsi_hba_alloc - iscsi_host_alloc failed\n");
+ return NULL;
+ }
+ shost->max_id = BE2_MAX_SESSIONS;
+ shost->max_channel = 0;
+ shost->max_cmd_len = BEISCSI_MAX_CMD_LEN;
+ shost->max_lun = BEISCSI_NUM_MAX_LUN;
+ shost->transportt = beiscsi_scsi_transport;
+ phba = iscsi_host_priv(shost);
+ memset(phba, 0, sizeof(*phba));
+ phba->shost = shost;
+ phba->pcidev = pci_dev_get(pcidev);
+ pci_set_drvdata(pcidev, phba);
+ phba->interface_handle = 0xFFFFFFFF;
+
+ return phba;
+}
+
+static void beiscsi_unmap_pci_function(struct beiscsi_hba *phba)
+{
+ if (phba->csr_va) {
+ iounmap(phba->csr_va);
+ phba->csr_va = NULL;
+ }
+ if (phba->db_va) {
+ iounmap(phba->db_va);
+ phba->db_va = NULL;
+ }
+ if (phba->pci_va) {
+ iounmap(phba->pci_va);
+ phba->pci_va = NULL;
+ }
+}
+
+static int beiscsi_map_pci_bars(struct beiscsi_hba *phba,
+ struct pci_dev *pcidev)
+{
+ u8 __iomem *addr;
+ int pcicfg_reg;
+
+ addr = ioremap_nocache(pci_resource_start(pcidev, 2),
+ pci_resource_len(pcidev, 2));
+ if (addr == NULL)
+ return -ENOMEM;
+ phba->ctrl.csr = addr;
+ phba->csr_va = addr;
+ phba->csr_pa.u.a64.address = pci_resource_start(pcidev, 2);
+
+ addr = ioremap_nocache(pci_resource_start(pcidev, 4), 128 * 1024);
+ if (addr == NULL)
+ goto pci_map_err;
+ phba->ctrl.db = addr;
+ phba->db_va = addr;
+ phba->db_pa.u.a64.address = pci_resource_start(pcidev, 4);
+
+ if (phba->generation == BE_GEN2)
+ pcicfg_reg = 1;
+ else
+ pcicfg_reg = 0;
+
+ addr = ioremap_nocache(pci_resource_start(pcidev, pcicfg_reg),
+ pci_resource_len(pcidev, pcicfg_reg));
+
+ if (addr == NULL)
+ goto pci_map_err;
+ phba->ctrl.pcicfg = addr;
+ phba->pci_va = addr;
+ phba->pci_pa.u.a64.address = pci_resource_start(pcidev, pcicfg_reg);
+ return 0;
+
+pci_map_err:
+ beiscsi_unmap_pci_function(phba);
+ return -ENOMEM;
+}
+
+static int beiscsi_enable_pci(struct pci_dev *pcidev)
+{
+ int ret;
+
+ ret = pci_enable_device(pcidev);
+ if (ret) {
+ dev_err(&pcidev->dev,
+ "beiscsi_enable_pci - enable device failed\n");
+ return ret;
+ }
+
+ pci_set_master(pcidev);
+ ret = pci_set_dma_mask(pcidev, DMA_BIT_MASK(64));
+ if (ret) {
+ ret = pci_set_dma_mask(pcidev, DMA_BIT_MASK(32));
+ if (ret) {
+ dev_err(&pcidev->dev, "Could not set PCI DMA Mask\n");
+ pci_disable_device(pcidev);
+ return ret;
+ } else {
+ ret = pci_set_consistent_dma_mask(pcidev,
+ DMA_BIT_MASK(32));
+ }
+ } else {
+ ret = pci_set_consistent_dma_mask(pcidev, DMA_BIT_MASK(64));
+ if (ret) {
+ dev_err(&pcidev->dev, "Could not set PCI DMA Mask\n");
+ pci_disable_device(pcidev);
+ return ret;
+ }
+ }
+ return 0;
+}
+
+static int be_ctrl_init(struct beiscsi_hba *phba, struct pci_dev *pdev)
+{
+ struct be_ctrl_info *ctrl = &phba->ctrl;
+ struct be_dma_mem *mbox_mem_alloc = &ctrl->mbox_mem_alloced;
+ struct be_dma_mem *mbox_mem_align = &ctrl->mbox_mem;
+ int status = 0;
+
+ ctrl->pdev = pdev;
+ status = beiscsi_map_pci_bars(phba, pdev);
+ if (status)
+ return status;
+ mbox_mem_alloc->size = sizeof(struct be_mcc_mailbox) + 16;
+ mbox_mem_alloc->va = pci_alloc_consistent(pdev,
+ mbox_mem_alloc->size,
+ &mbox_mem_alloc->dma);
+ if (!mbox_mem_alloc->va) {
+ beiscsi_unmap_pci_function(phba);
+ return -ENOMEM;
+ }
+
+ mbox_mem_align->size = sizeof(struct be_mcc_mailbox);
+ mbox_mem_align->va = PTR_ALIGN(mbox_mem_alloc->va, 16);
+ mbox_mem_align->dma = PTR_ALIGN(mbox_mem_alloc->dma, 16);
+ memset(mbox_mem_align->va, 0, sizeof(struct be_mcc_mailbox));
+ spin_lock_init(&ctrl->mbox_lock);
+ spin_lock_init(&phba->ctrl.mcc_lock);
+ spin_lock_init(&phba->ctrl.mcc_cq_lock);
+
+ return status;
+}
+
+/**
+ * beiscsi_get_params()- Set the config paramters
+ * @phba: ptr device priv structure
+ **/
+static void beiscsi_get_params(struct beiscsi_hba *phba)
+{
+ uint32_t total_cid_count = 0;
+ uint32_t total_icd_count = 0;
+ uint8_t ulp_num = 0;
+
+ total_cid_count = BEISCSI_GET_CID_COUNT(phba, BEISCSI_ULP0) +
+ BEISCSI_GET_CID_COUNT(phba, BEISCSI_ULP1);
+
+ for (ulp_num = 0; ulp_num < BEISCSI_ULP_COUNT; ulp_num++) {
+ uint32_t align_mask = 0;
+ uint32_t icd_post_per_page = 0;
+ uint32_t icd_count_unavailable = 0;
+ uint32_t icd_start = 0, icd_count = 0;
+ uint32_t icd_start_align = 0, icd_count_align = 0;
+
+ if (test_bit(ulp_num, &phba->fw_config.ulp_supported)) {
+ icd_start = phba->fw_config.iscsi_icd_start[ulp_num];
+ icd_count = phba->fw_config.iscsi_icd_count[ulp_num];
+
+ /* Get ICD count that can be posted on each page */
+ icd_post_per_page = (PAGE_SIZE / (BE2_SGE *
+ sizeof(struct iscsi_sge)));
+ align_mask = (icd_post_per_page - 1);
+
+ /* Check if icd_start is aligned ICD per page posting */
+ if (icd_start % icd_post_per_page) {
+ icd_start_align = ((icd_start +
+ icd_post_per_page) &
+ ~(align_mask));
+ phba->fw_config.
+ iscsi_icd_start[ulp_num] =
+ icd_start_align;
+ }
+
+ icd_count_align = (icd_count & ~align_mask);
+
+ /* ICD discarded in the process of alignment */
+ if (icd_start_align)
+ icd_count_unavailable = ((icd_start_align -
+ icd_start) +
+ (icd_count -
+ icd_count_align));
+
+ /* Updated ICD count available */
+ phba->fw_config.iscsi_icd_count[ulp_num] = (icd_count -
+ icd_count_unavailable);
+
+ beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT,
+ "BM_%d : Aligned ICD values\n"
+ "\t ICD Start : %d\n"
+ "\t ICD Count : %d\n"
+ "\t ICD Discarded : %d\n",
+ phba->fw_config.
+ iscsi_icd_start[ulp_num],
+ phba->fw_config.
+ iscsi_icd_count[ulp_num],
+ icd_count_unavailable);
+ break;
+ }
+ }
+
+ total_icd_count = phba->fw_config.iscsi_icd_count[ulp_num];
+ phba->params.ios_per_ctrl = (total_icd_count -
+ (total_cid_count +
+ BE2_TMFS + BE2_NOPOUT_REQ));
+ phba->params.cxns_per_ctrl = total_cid_count;
+ phba->params.asyncpdus_per_ctrl = total_cid_count;
+ phba->params.icds_per_ctrl = total_icd_count;
+ phba->params.num_sge_per_io = BE2_SGE;
+ phba->params.defpdu_hdr_sz = BE2_DEFPDU_HDR_SZ;
+ phba->params.defpdu_data_sz = BE2_DEFPDU_DATA_SZ;
+ phba->params.eq_timer = 64;
+ phba->params.num_eq_entries = 1024;
+ phba->params.num_cq_entries = 1024;
+ phba->params.wrbs_per_cxn = 256;
+}
+
+static void hwi_ring_eq_db(struct beiscsi_hba *phba,
+ unsigned int id, unsigned int clr_interrupt,
+ unsigned int num_processed,
+ unsigned char rearm, unsigned char event)
+{
+ u32 val = 0;
+
+ if (rearm)
+ val |= 1 << DB_EQ_REARM_SHIFT;
+ if (clr_interrupt)
+ val |= 1 << DB_EQ_CLR_SHIFT;
+ if (event)
+ val |= 1 << DB_EQ_EVNT_SHIFT;
+
+ val |= num_processed << DB_EQ_NUM_POPPED_SHIFT;
+ /* Setting lower order EQ_ID Bits */
+ val |= (id & DB_EQ_RING_ID_LOW_MASK);
+
+ /* Setting Higher order EQ_ID Bits */
+ val |= (((id >> DB_EQ_HIGH_FEILD_SHIFT) &
+ DB_EQ_RING_ID_HIGH_MASK)
+ << DB_EQ_HIGH_SET_SHIFT);
+
+ iowrite32(val, phba->db_va + DB_EQ_OFFSET);
+}
+
+/**
+ * be_isr_mcc - The isr routine of the driver.
+ * @irq: Not used
+ * @dev_id: Pointer to host adapter structure
+ */
+static irqreturn_t be_isr_mcc(int irq, void *dev_id)
+{
+ struct beiscsi_hba *phba;
+ struct be_eq_entry *eqe = NULL;
+ struct be_queue_info *eq;
+ struct be_queue_info *mcc;
+ unsigned int num_eq_processed;
+ struct be_eq_obj *pbe_eq;
+ unsigned long flags;
+
+ pbe_eq = dev_id;
+ eq = &pbe_eq->q;
+ phba = pbe_eq->phba;
+ mcc = &phba->ctrl.mcc_obj.cq;
+ eqe = queue_tail_node(eq);
+
+ num_eq_processed = 0;
+
+ while (eqe->dw[offsetof(struct amap_eq_entry, valid) / 32]
+ & EQE_VALID_MASK) {
+ if (((eqe->dw[offsetof(struct amap_eq_entry,
+ resource_id) / 32] &
+ EQE_RESID_MASK) >> 16) == mcc->id) {
+ spin_lock_irqsave(&phba->isr_lock, flags);
+ pbe_eq->todo_mcc_cq = true;
+ spin_unlock_irqrestore(&phba->isr_lock, flags);
+ }
+ AMAP_SET_BITS(struct amap_eq_entry, valid, eqe, 0);
+ queue_tail_inc(eq);
+ eqe = queue_tail_node(eq);
+ num_eq_processed++;
+ }
+ if (pbe_eq->todo_mcc_cq)
+ queue_work(phba->wq, &pbe_eq->work_cqs);
+ if (num_eq_processed)
+ hwi_ring_eq_db(phba, eq->id, 1, num_eq_processed, 1, 1);
+
+ return IRQ_HANDLED;
+}
+
+/**
+ * be_isr_msix - The isr routine of the driver.
+ * @irq: Not used
+ * @dev_id: Pointer to host adapter structure
+ */
+static irqreturn_t be_isr_msix(int irq, void *dev_id)
+{
+ struct beiscsi_hba *phba;
+ struct be_eq_entry *eqe = NULL;
+ struct be_queue_info *eq;
+ struct be_queue_info *cq;
+ unsigned int num_eq_processed;
+ struct be_eq_obj *pbe_eq;
+
+ pbe_eq = dev_id;
+ eq = &pbe_eq->q;
+ cq = pbe_eq->cq;
+ eqe = queue_tail_node(eq);
+
+ phba = pbe_eq->phba;
+ num_eq_processed = 0;
+ while (eqe->dw[offsetof(struct amap_eq_entry, valid) / 32]
+ & EQE_VALID_MASK) {
+ if (!blk_iopoll_sched_prep(&pbe_eq->iopoll))
+ blk_iopoll_sched(&pbe_eq->iopoll);
+
+ AMAP_SET_BITS(struct amap_eq_entry, valid, eqe, 0);
+ queue_tail_inc(eq);
+ eqe = queue_tail_node(eq);
+ num_eq_processed++;
+ }
+
+ if (num_eq_processed)
+ hwi_ring_eq_db(phba, eq->id, 1, num_eq_processed, 0, 1);
+
+ return IRQ_HANDLED;
+}
+
+/**
+ * be_isr - The isr routine of the driver.
+ * @irq: Not used
+ * @dev_id: Pointer to host adapter structure
+ */
+static irqreturn_t be_isr(int irq, void *dev_id)
+{
+ struct beiscsi_hba *phba;
+ struct hwi_controller *phwi_ctrlr;
+ struct hwi_context_memory *phwi_context;
+ struct be_eq_entry *eqe = NULL;
+ struct be_queue_info *eq;
+ struct be_queue_info *mcc;
+ unsigned long flags, index;
+ unsigned int num_mcceq_processed, num_ioeq_processed;
+ struct be_ctrl_info *ctrl;
+ struct be_eq_obj *pbe_eq;
+ int isr;
+
+ phba = dev_id;
+ ctrl = &phba->ctrl;
+ isr = ioread32(ctrl->csr + CEV_ISR0_OFFSET +
+ (PCI_FUNC(ctrl->pdev->devfn) * CEV_ISR_SIZE));
+ if (!isr)
+ return IRQ_NONE;
+
+ phwi_ctrlr = phba->phwi_ctrlr;
+ phwi_context = phwi_ctrlr->phwi_ctxt;
+ pbe_eq = &phwi_context->be_eq[0];
+
+ eq = &phwi_context->be_eq[0].q;
+ mcc = &phba->ctrl.mcc_obj.cq;
+ index = 0;
+ eqe = queue_tail_node(eq);
+
+ num_ioeq_processed = 0;
+ num_mcceq_processed = 0;
+ while (eqe->dw[offsetof(struct amap_eq_entry, valid) / 32]
+ & EQE_VALID_MASK) {
+ if (((eqe->dw[offsetof(struct amap_eq_entry,
+ resource_id) / 32] &
+ EQE_RESID_MASK) >> 16) == mcc->id) {
+ spin_lock_irqsave(&phba->isr_lock, flags);
+ pbe_eq->todo_mcc_cq = true;
+ spin_unlock_irqrestore(&phba->isr_lock, flags);
+ num_mcceq_processed++;
+ } else {
+ if (!blk_iopoll_sched_prep(&pbe_eq->iopoll))
+ blk_iopoll_sched(&pbe_eq->iopoll);
+ num_ioeq_processed++;
+ }
+ AMAP_SET_BITS(struct amap_eq_entry, valid, eqe, 0);
+ queue_tail_inc(eq);
+ eqe = queue_tail_node(eq);
+ }
+ if (num_ioeq_processed || num_mcceq_processed) {
+ if (pbe_eq->todo_mcc_cq)
+ queue_work(phba->wq, &pbe_eq->work_cqs);
+
+ if ((num_mcceq_processed) && (!num_ioeq_processed))
+ hwi_ring_eq_db(phba, eq->id, 0,
+ (num_ioeq_processed +
+ num_mcceq_processed) , 1, 1);
+ else
+ hwi_ring_eq_db(phba, eq->id, 0,
+ (num_ioeq_processed +
+ num_mcceq_processed), 0, 1);
+
+ return IRQ_HANDLED;
+ } else
+ return IRQ_NONE;
+}
+
+static int beiscsi_init_irqs(struct beiscsi_hba *phba)
+{
+ struct pci_dev *pcidev = phba->pcidev;
+ struct hwi_controller *phwi_ctrlr;
+ struct hwi_context_memory *phwi_context;
+ int ret, msix_vec, i, j;
+
+ phwi_ctrlr = phba->phwi_ctrlr;
+ phwi_context = phwi_ctrlr->phwi_ctxt;
+
+ if (phba->msix_enabled) {
+ for (i = 0; i < phba->num_cpus; i++) {
+ phba->msi_name[i] = kzalloc(BEISCSI_MSI_NAME,
+ GFP_KERNEL);
+ if (!phba->msi_name[i]) {
+ ret = -ENOMEM;
+ goto free_msix_irqs;
+ }
+
+ sprintf(phba->msi_name[i], "beiscsi_%02x_%02x",
+ phba->shost->host_no, i);
+ msix_vec = phba->msix_entries[i].vector;
+ ret = request_irq(msix_vec, be_isr_msix, 0,
+ phba->msi_name[i],
+ &phwi_context->be_eq[i]);
+ if (ret) {
+ beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
+ "BM_%d : beiscsi_init_irqs-Failed to"
+ "register msix for i = %d\n",
+ i);
+ kfree(phba->msi_name[i]);
+ goto free_msix_irqs;
+ }
+ }
+ phba->msi_name[i] = kzalloc(BEISCSI_MSI_NAME, GFP_KERNEL);
+ if (!phba->msi_name[i]) {
+ ret = -ENOMEM;
+ goto free_msix_irqs;
+ }
+ sprintf(phba->msi_name[i], "beiscsi_mcc_%02x",
+ phba->shost->host_no);
+ msix_vec = phba->msix_entries[i].vector;
+ ret = request_irq(msix_vec, be_isr_mcc, 0, phba->msi_name[i],
+ &phwi_context->be_eq[i]);
+ if (ret) {
+ beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT ,
+ "BM_%d : beiscsi_init_irqs-"
+ "Failed to register beiscsi_msix_mcc\n");
+ kfree(phba->msi_name[i]);
+ goto free_msix_irqs;
+ }
+
+ } else {
+ ret = request_irq(pcidev->irq, be_isr, IRQF_SHARED,
+ "beiscsi", phba);
+ if (ret) {
+ beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
+ "BM_%d : beiscsi_init_irqs-"
+ "Failed to register irq\\n");
+ return ret;
+ }
+ }
+ return 0;
+free_msix_irqs:
+ for (j = i - 1; j >= 0; j--) {
+ kfree(phba->msi_name[j]);
+ msix_vec = phba->msix_entries[j].vector;
+ free_irq(msix_vec, &phwi_context->be_eq[j]);
+ }
+ return ret;
+}
+
+void hwi_ring_cq_db(struct beiscsi_hba *phba,
+ unsigned int id, unsigned int num_processed,
+ unsigned char rearm, unsigned char event)
+{
+ u32 val = 0;
+
+ if (rearm)
+ val |= 1 << DB_CQ_REARM_SHIFT;
+
+ val |= num_processed << DB_CQ_NUM_POPPED_SHIFT;
+
+ /* Setting lower order CQ_ID Bits */
+ val |= (id & DB_CQ_RING_ID_LOW_MASK);
+
+ /* Setting Higher order CQ_ID Bits */
+ val |= (((id >> DB_CQ_HIGH_FEILD_SHIFT) &
+ DB_CQ_RING_ID_HIGH_MASK)
+ << DB_CQ_HIGH_SET_SHIFT);
+
+ iowrite32(val, phba->db_va + DB_CQ_OFFSET);
+}
+
+static unsigned int
+beiscsi_process_async_pdu(struct beiscsi_conn *beiscsi_conn,
+ struct beiscsi_hba *phba,
+ struct pdu_base *ppdu,
+ unsigned long pdu_len,
+ void *pbuffer, unsigned long buf_len)
+{
+ struct iscsi_conn *conn = beiscsi_conn->conn;
+ struct iscsi_session *session = conn->session;
+ struct iscsi_task *task;
+ struct beiscsi_io_task *io_task;
+ struct iscsi_hdr *login_hdr;
+
+ switch (ppdu->dw[offsetof(struct amap_pdu_base, opcode) / 32] &
+ PDUBASE_OPCODE_MASK) {
+ case ISCSI_OP_NOOP_IN:
+ pbuffer = NULL;
+ buf_len = 0;
+ break;
+ case ISCSI_OP_ASYNC_EVENT:
+ break;
+ case ISCSI_OP_REJECT:
+ WARN_ON(!pbuffer);
+ WARN_ON(!(buf_len == 48));
+ beiscsi_log(phba, KERN_ERR,
+ BEISCSI_LOG_CONFIG | BEISCSI_LOG_IO,
+ "BM_%d : In ISCSI_OP_REJECT\n");
+ break;
+ case ISCSI_OP_LOGIN_RSP:
+ case ISCSI_OP_TEXT_RSP:
+ task = conn->login_task;
+ io_task = task->dd_data;
+ login_hdr = (struct iscsi_hdr *)ppdu;
+ login_hdr->itt = io_task->libiscsi_itt;
+ break;
+ default:
+ beiscsi_log(phba, KERN_WARNING,
+ BEISCSI_LOG_IO | BEISCSI_LOG_CONFIG,
+ "BM_%d : Unrecognized opcode 0x%x in async msg\n",
+ (ppdu->
+ dw[offsetof(struct amap_pdu_base, opcode) / 32]
+ & PDUBASE_OPCODE_MASK));
+ return 1;
+ }
+
+ spin_lock_bh(&session->back_lock);
+ __iscsi_complete_pdu(conn, (struct iscsi_hdr *)ppdu, pbuffer, buf_len);
+ spin_unlock_bh(&session->back_lock);
+ return 0;
+}
+
+static struct sgl_handle *alloc_io_sgl_handle(struct beiscsi_hba *phba)
+{
+ struct sgl_handle *psgl_handle;
+
+ if (phba->io_sgl_hndl_avbl) {
+ beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_IO,
+ "BM_%d : In alloc_io_sgl_handle,"
+ " io_sgl_alloc_index=%d\n",
+ phba->io_sgl_alloc_index);
+
+ psgl_handle = phba->io_sgl_hndl_base[phba->
+ io_sgl_alloc_index];
+ phba->io_sgl_hndl_base[phba->io_sgl_alloc_index] = NULL;
+ phba->io_sgl_hndl_avbl--;
+ if (phba->io_sgl_alloc_index == (phba->params.
+ ios_per_ctrl - 1))
+ phba->io_sgl_alloc_index = 0;
+ else
+ phba->io_sgl_alloc_index++;
+ } else
+ psgl_handle = NULL;
+ return psgl_handle;
+}
+
+static void
+free_io_sgl_handle(struct beiscsi_hba *phba, struct sgl_handle *psgl_handle)
+{
+ beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_IO,
+ "BM_%d : In free_,io_sgl_free_index=%d\n",
+ phba->io_sgl_free_index);
+
+ if (phba->io_sgl_hndl_base[phba->io_sgl_free_index]) {
+ /*
+ * this can happen if clean_task is called on a task that
+ * failed in xmit_task or alloc_pdu.
+ */
+ beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_IO,
+ "BM_%d : Double Free in IO SGL io_sgl_free_index=%d,"
+ "value there=%p\n", phba->io_sgl_free_index,
+ phba->io_sgl_hndl_base
+ [phba->io_sgl_free_index]);
+ return;
+ }
+ phba->io_sgl_hndl_base[phba->io_sgl_free_index] = psgl_handle;
+ phba->io_sgl_hndl_avbl++;
+ if (phba->io_sgl_free_index == (phba->params.ios_per_ctrl - 1))
+ phba->io_sgl_free_index = 0;
+ else
+ phba->io_sgl_free_index++;
+}
+
+/**
+ * alloc_wrb_handle - To allocate a wrb handle
+ * @phba: The hba pointer
+ * @cid: The cid to use for allocation
+ *
+ * This happens under session_lock until submission to chip
+ */
+struct wrb_handle *alloc_wrb_handle(struct beiscsi_hba *phba, unsigned int cid)
+{
+ struct hwi_wrb_context *pwrb_context;
+ struct hwi_controller *phwi_ctrlr;
+ struct wrb_handle *pwrb_handle, *pwrb_handle_tmp;
+ uint16_t cri_index = BE_GET_CRI_FROM_CID(cid);
+
+ phwi_ctrlr = phba->phwi_ctrlr;
+ pwrb_context = &phwi_ctrlr->wrb_context[cri_index];
+ if (pwrb_context->wrb_handles_available >= 2) {
+ pwrb_handle = pwrb_context->pwrb_handle_base[
+ pwrb_context->alloc_index];
+ pwrb_context->wrb_handles_available--;
+ if (pwrb_context->alloc_index ==
+ (phba->params.wrbs_per_cxn - 1))
+ pwrb_context->alloc_index = 0;
+ else
+ pwrb_context->alloc_index++;
+ pwrb_handle_tmp = pwrb_context->pwrb_handle_base[
+ pwrb_context->alloc_index];
+ pwrb_handle->nxt_wrb_index = pwrb_handle_tmp->wrb_index;
+ } else
+ pwrb_handle = NULL;
+ return pwrb_handle;
+}
+
+/**
+ * free_wrb_handle - To free the wrb handle back to pool
+ * @phba: The hba pointer
+ * @pwrb_context: The context to free from
+ * @pwrb_handle: The wrb_handle to free
+ *
+ * This happens under session_lock until submission to chip
+ */
+static void
+free_wrb_handle(struct beiscsi_hba *phba, struct hwi_wrb_context *pwrb_context,
+ struct wrb_handle *pwrb_handle)
+{
+ pwrb_context->pwrb_handle_base[pwrb_context->free_index] = pwrb_handle;
+ pwrb_context->wrb_handles_available++;
+ if (pwrb_context->free_index == (phba->params.wrbs_per_cxn - 1))
+ pwrb_context->free_index = 0;
+ else
+ pwrb_context->free_index++;
+
+ beiscsi_log(phba, KERN_INFO,
+ BEISCSI_LOG_IO | BEISCSI_LOG_CONFIG,
+ "BM_%d : FREE WRB: pwrb_handle=%p free_index=0x%x"
+ "wrb_handles_available=%d\n",
+ pwrb_handle, pwrb_context->free_index,
+ pwrb_context->wrb_handles_available);
+}
+
+static struct sgl_handle *alloc_mgmt_sgl_handle(struct beiscsi_hba *phba)
+{
+ struct sgl_handle *psgl_handle;
+
+ if (phba->eh_sgl_hndl_avbl) {
+ psgl_handle = phba->eh_sgl_hndl_base[phba->eh_sgl_alloc_index];
+ phba->eh_sgl_hndl_base[phba->eh_sgl_alloc_index] = NULL;
+ beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_CONFIG,
+ "BM_%d : mgmt_sgl_alloc_index=%d=0x%x\n",
+ phba->eh_sgl_alloc_index,
+ phba->eh_sgl_alloc_index);
+
+ phba->eh_sgl_hndl_avbl--;
+ if (phba->eh_sgl_alloc_index ==
+ (phba->params.icds_per_ctrl - phba->params.ios_per_ctrl -
+ 1))
+ phba->eh_sgl_alloc_index = 0;
+ else
+ phba->eh_sgl_alloc_index++;
+ } else
+ psgl_handle = NULL;
+ return psgl_handle;
+}
+
+void
+free_mgmt_sgl_handle(struct beiscsi_hba *phba, struct sgl_handle *psgl_handle)
+{
+
+ beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_CONFIG,
+ "BM_%d : In free_mgmt_sgl_handle,"
+ "eh_sgl_free_index=%d\n",
+ phba->eh_sgl_free_index);
+
+ if (phba->eh_sgl_hndl_base[phba->eh_sgl_free_index]) {
+ /*
+ * this can happen if clean_task is called on a task that
+ * failed in xmit_task or alloc_pdu.
+ */
+ beiscsi_log(phba, KERN_WARNING, BEISCSI_LOG_CONFIG,
+ "BM_%d : Double Free in eh SGL ,"
+ "eh_sgl_free_index=%d\n",
+ phba->eh_sgl_free_index);
+ return;
+ }
+ phba->eh_sgl_hndl_base[phba->eh_sgl_free_index] = psgl_handle;
+ phba->eh_sgl_hndl_avbl++;
+ if (phba->eh_sgl_free_index ==
+ (phba->params.icds_per_ctrl - phba->params.ios_per_ctrl - 1))
+ phba->eh_sgl_free_index = 0;
+ else
+ phba->eh_sgl_free_index++;
+}
+
+static void
+be_complete_io(struct beiscsi_conn *beiscsi_conn,
+ struct iscsi_task *task,
+ struct common_sol_cqe *csol_cqe)
+{
+ struct beiscsi_io_task *io_task = task->dd_data;
+ struct be_status_bhs *sts_bhs =
+ (struct be_status_bhs *)io_task->cmd_bhs;
+ struct iscsi_conn *conn = beiscsi_conn->conn;
+ unsigned char *sense;
+ u32 resid = 0, exp_cmdsn, max_cmdsn;
+ u8 rsp, status, flags;
+
+ exp_cmdsn = csol_cqe->exp_cmdsn;
+ max_cmdsn = (csol_cqe->exp_cmdsn +
+ csol_cqe->cmd_wnd - 1);
+ rsp = csol_cqe->i_resp;
+ status = csol_cqe->i_sts;
+ flags = csol_cqe->i_flags;
+ resid = csol_cqe->res_cnt;
+
+ if (!task->sc) {
+ if (io_task->scsi_cmnd) {
+ scsi_dma_unmap(io_task->scsi_cmnd);
+ io_task->scsi_cmnd = NULL;
+ }
+
+ return;
+ }
+ task->sc->result = (DID_OK << 16) | status;
+ if (rsp != ISCSI_STATUS_CMD_COMPLETED) {
+ task->sc->result = DID_ERROR << 16;
+ goto unmap;
+ }
+
+ /* bidi not initially supported */
+ if (flags & (ISCSI_FLAG_CMD_UNDERFLOW | ISCSI_FLAG_CMD_OVERFLOW)) {
+ if (!status && (flags & ISCSI_FLAG_CMD_OVERFLOW))
+ task->sc->result = DID_ERROR << 16;
+
+ if (flags & ISCSI_FLAG_CMD_UNDERFLOW) {
+ scsi_set_resid(task->sc, resid);
+ if (!status && (scsi_bufflen(task->sc) - resid <
+ task->sc->underflow))
+ task->sc->result = DID_ERROR << 16;
+ }
+ }
+
+ if (status == SAM_STAT_CHECK_CONDITION) {
+ u16 sense_len;
+ unsigned short *slen = (unsigned short *)sts_bhs->sense_info;
+
+ sense = sts_bhs->sense_info + sizeof(unsigned short);
+ sense_len = be16_to_cpu(*slen);
+ memcpy(task->sc->sense_buffer, sense,
+ min_t(u16, sense_len, SCSI_SENSE_BUFFERSIZE));
+ }
+
+ if (io_task->cmd_bhs->iscsi_hdr.flags & ISCSI_FLAG_CMD_READ)
+ conn->rxdata_octets += resid;
+unmap:
+ scsi_dma_unmap(io_task->scsi_cmnd);
+ io_task->scsi_cmnd = NULL;
+ iscsi_complete_scsi_task(task, exp_cmdsn, max_cmdsn);
+}
+
+static void
+be_complete_logout(struct beiscsi_conn *beiscsi_conn,
+ struct iscsi_task *task,
+ struct common_sol_cqe *csol_cqe)
+{
+ struct iscsi_logout_rsp *hdr;
+ struct beiscsi_io_task *io_task = task->dd_data;
+ struct iscsi_conn *conn = beiscsi_conn->conn;
+
+ hdr = (struct iscsi_logout_rsp *)task->hdr;
+ hdr->opcode = ISCSI_OP_LOGOUT_RSP;
+ hdr->t2wait = 5;
+ hdr->t2retain = 0;
+ hdr->flags = csol_cqe->i_flags;
+ hdr->response = csol_cqe->i_resp;
+ hdr->exp_cmdsn = cpu_to_be32(csol_cqe->exp_cmdsn);
+ hdr->max_cmdsn = cpu_to_be32(csol_cqe->exp_cmdsn +
+ csol_cqe->cmd_wnd - 1);
+
+ hdr->dlength[0] = 0;
+ hdr->dlength[1] = 0;
+ hdr->dlength[2] = 0;
+ hdr->hlength = 0;
+ hdr->itt = io_task->libiscsi_itt;
+ __iscsi_complete_pdu(conn, (struct iscsi_hdr *)hdr, NULL, 0);
+}
+
+static void
+be_complete_tmf(struct beiscsi_conn *beiscsi_conn,
+ struct iscsi_task *task,
+ struct common_sol_cqe *csol_cqe)
+{
+ struct iscsi_tm_rsp *hdr;
+ struct iscsi_conn *conn = beiscsi_conn->conn;
+ struct beiscsi_io_task *io_task = task->dd_data;
+
+ hdr = (struct iscsi_tm_rsp *)task->hdr;
+ hdr->opcode = ISCSI_OP_SCSI_TMFUNC_RSP;
+ hdr->flags = csol_cqe->i_flags;
+ hdr->response = csol_cqe->i_resp;
+ hdr->exp_cmdsn = cpu_to_be32(csol_cqe->exp_cmdsn);
+ hdr->max_cmdsn = cpu_to_be32(csol_cqe->exp_cmdsn +
+ csol_cqe->cmd_wnd - 1);
+
+ hdr->itt = io_task->libiscsi_itt;
+ __iscsi_complete_pdu(conn, (struct iscsi_hdr *)hdr, NULL, 0);
+}
+
+static void
+hwi_complete_drvr_msgs(struct beiscsi_conn *beiscsi_conn,
+ struct beiscsi_hba *phba, struct sol_cqe *psol)
+{
+ struct hwi_wrb_context *pwrb_context;
+ struct wrb_handle *pwrb_handle = NULL;
+ struct hwi_controller *phwi_ctrlr;
+ struct iscsi_task *task;
+ struct beiscsi_io_task *io_task;
+ uint16_t wrb_index, cid, cri_index;
+
+ phwi_ctrlr = phba->phwi_ctrlr;
+ if (is_chip_be2_be3r(phba)) {
+ wrb_index = AMAP_GET_BITS(struct amap_it_dmsg_cqe,
+ wrb_idx, psol);
+ cid = AMAP_GET_BITS(struct amap_it_dmsg_cqe,
+ cid, psol);
+ } else {
+ wrb_index = AMAP_GET_BITS(struct amap_it_dmsg_cqe_v2,
+ wrb_idx, psol);
+ cid = AMAP_GET_BITS(struct amap_it_dmsg_cqe_v2,
+ cid, psol);
+ }
+
+ cri_index = BE_GET_CRI_FROM_CID(cid);
+ pwrb_context = &phwi_ctrlr->wrb_context[cri_index];
+ pwrb_handle = pwrb_context->pwrb_handle_basestd[wrb_index];
+ task = pwrb_handle->pio_handle;
+
+ io_task = task->dd_data;
+ memset(io_task->pwrb_handle->pwrb, 0, sizeof(struct iscsi_wrb));
+ iscsi_put_task(task);
+}
+
+static void
+be_complete_nopin_resp(struct beiscsi_conn *beiscsi_conn,
+ struct iscsi_task *task,
+ struct common_sol_cqe *csol_cqe)
+{
+ struct iscsi_nopin *hdr;
+ struct iscsi_conn *conn = beiscsi_conn->conn;
+ struct beiscsi_io_task *io_task = task->dd_data;
+
+ hdr = (struct iscsi_nopin *)task->hdr;
+ hdr->flags = csol_cqe->i_flags;
+ hdr->exp_cmdsn = cpu_to_be32(csol_cqe->exp_cmdsn);
+ hdr->max_cmdsn = cpu_to_be32(csol_cqe->exp_cmdsn +
+ csol_cqe->cmd_wnd - 1);
+
+ hdr->opcode = ISCSI_OP_NOOP_IN;
+ hdr->itt = io_task->libiscsi_itt;
+ __iscsi_complete_pdu(conn, (struct iscsi_hdr *)hdr, NULL, 0);
+}
+
+static void adapter_get_sol_cqe(struct beiscsi_hba *phba,
+ struct sol_cqe *psol,
+ struct common_sol_cqe *csol_cqe)
+{
+ if (is_chip_be2_be3r(phba)) {
+ csol_cqe->exp_cmdsn = AMAP_GET_BITS(struct amap_sol_cqe,
+ i_exp_cmd_sn, psol);
+ csol_cqe->res_cnt = AMAP_GET_BITS(struct amap_sol_cqe,
+ i_res_cnt, psol);
+ csol_cqe->cmd_wnd = AMAP_GET_BITS(struct amap_sol_cqe,
+ i_cmd_wnd, psol);
+ csol_cqe->wrb_index = AMAP_GET_BITS(struct amap_sol_cqe,
+ wrb_index, psol);
+ csol_cqe->cid = AMAP_GET_BITS(struct amap_sol_cqe,
+ cid, psol);
+ csol_cqe->hw_sts = AMAP_GET_BITS(struct amap_sol_cqe,
+ hw_sts, psol);
+ csol_cqe->i_resp = AMAP_GET_BITS(struct amap_sol_cqe,
+ i_resp, psol);
+ csol_cqe->i_sts = AMAP_GET_BITS(struct amap_sol_cqe,
+ i_sts, psol);
+ csol_cqe->i_flags = AMAP_GET_BITS(struct amap_sol_cqe,
+ i_flags, psol);
+ } else {
+ csol_cqe->exp_cmdsn = AMAP_GET_BITS(struct amap_sol_cqe_v2,
+ i_exp_cmd_sn, psol);
+ csol_cqe->res_cnt = AMAP_GET_BITS(struct amap_sol_cqe_v2,
+ i_res_cnt, psol);
+ csol_cqe->wrb_index = AMAP_GET_BITS(struct amap_sol_cqe_v2,
+ wrb_index, psol);
+ csol_cqe->cid = AMAP_GET_BITS(struct amap_sol_cqe_v2,
+ cid, psol);
+ csol_cqe->hw_sts = AMAP_GET_BITS(struct amap_sol_cqe_v2,
+ hw_sts, psol);
+ csol_cqe->cmd_wnd = AMAP_GET_BITS(struct amap_sol_cqe_v2,
+ i_cmd_wnd, psol);
+ if (AMAP_GET_BITS(struct amap_sol_cqe_v2,
+ cmd_cmpl, psol))
+ csol_cqe->i_sts = AMAP_GET_BITS(struct amap_sol_cqe_v2,
+ i_sts, psol);
+ else
+ csol_cqe->i_resp = AMAP_GET_BITS(struct amap_sol_cqe_v2,
+ i_sts, psol);
+ if (AMAP_GET_BITS(struct amap_sol_cqe_v2,
+ u, psol))
+ csol_cqe->i_flags = ISCSI_FLAG_CMD_UNDERFLOW;
+
+ if (AMAP_GET_BITS(struct amap_sol_cqe_v2,
+ o, psol))
+ csol_cqe->i_flags |= ISCSI_FLAG_CMD_OVERFLOW;
+ }
+}
+
+
+static void hwi_complete_cmd(struct beiscsi_conn *beiscsi_conn,
+ struct beiscsi_hba *phba, struct sol_cqe *psol)
+{
+ struct hwi_wrb_context *pwrb_context;
+ struct wrb_handle *pwrb_handle;
+ struct iscsi_wrb *pwrb = NULL;
+ struct hwi_controller *phwi_ctrlr;
+ struct iscsi_task *task;
+ unsigned int type;
+ struct iscsi_conn *conn = beiscsi_conn->conn;
+ struct iscsi_session *session = conn->session;
+ struct common_sol_cqe csol_cqe = {0};
+ uint16_t cri_index = 0;
+
+ phwi_ctrlr = phba->phwi_ctrlr;
+
+ /* Copy the elements to a common structure */
+ adapter_get_sol_cqe(phba, psol, &csol_cqe);
+
+ cri_index = BE_GET_CRI_FROM_CID(csol_cqe.cid);
+ pwrb_context = &phwi_ctrlr->wrb_context[cri_index];
+
+ pwrb_handle = pwrb_context->pwrb_handle_basestd[
+ csol_cqe.wrb_index];
+
+ task = pwrb_handle->pio_handle;
+ pwrb = pwrb_handle->pwrb;
+ type = ((struct beiscsi_io_task *)task->dd_data)->wrb_type;
+
+ spin_lock_bh(&session->back_lock);
+ switch (type) {
+ case HWH_TYPE_IO:
+ case HWH_TYPE_IO_RD:
+ if ((task->hdr->opcode & ISCSI_OPCODE_MASK) ==
+ ISCSI_OP_NOOP_OUT)
+ be_complete_nopin_resp(beiscsi_conn, task, &csol_cqe);
+ else
+ be_complete_io(beiscsi_conn, task, &csol_cqe);
+ break;
+
+ case HWH_TYPE_LOGOUT:
+ if ((task->hdr->opcode & ISCSI_OPCODE_MASK) == ISCSI_OP_LOGOUT)
+ be_complete_logout(beiscsi_conn, task, &csol_cqe);
+ else
+ be_complete_tmf(beiscsi_conn, task, &csol_cqe);
+ break;
+
+ case HWH_TYPE_LOGIN:
+ beiscsi_log(phba, KERN_ERR,
+ BEISCSI_LOG_CONFIG | BEISCSI_LOG_IO,
+ "BM_%d :\t\t No HWH_TYPE_LOGIN Expected in"
+ " hwi_complete_cmd- Solicited path\n");
+ break;
+
+ case HWH_TYPE_NOP:
+ be_complete_nopin_resp(beiscsi_conn, task, &csol_cqe);
+ break;
+
+ default:
+ beiscsi_log(phba, KERN_WARNING,
+ BEISCSI_LOG_CONFIG | BEISCSI_LOG_IO,
+ "BM_%d : In hwi_complete_cmd, unknown type = %d"
+ "wrb_index 0x%x CID 0x%x\n", type,
+ csol_cqe.wrb_index,
+ csol_cqe.cid);
+ break;
+ }
+
+ spin_unlock_bh(&session->back_lock);
+}
+
+static struct list_head *hwi_get_async_busy_list(struct hwi_async_pdu_context
+ *pasync_ctx, unsigned int is_header,
+ unsigned int host_write_ptr)
+{
+ if (is_header)
+ return &pasync_ctx->async_entry[host_write_ptr].
+ header_busy_list;
+ else
+ return &pasync_ctx->async_entry[host_write_ptr].data_busy_list;
+}
+
+static struct async_pdu_handle *
+hwi_get_async_handle(struct beiscsi_hba *phba,
+ struct beiscsi_conn *beiscsi_conn,
+ struct hwi_async_pdu_context *pasync_ctx,
+ struct i_t_dpdu_cqe *pdpdu_cqe, unsigned int *pcq_index)
+{
+ struct be_bus_address phys_addr;
+ struct list_head *pbusy_list;
+ struct async_pdu_handle *pasync_handle = NULL;
+ unsigned char is_header = 0;
+ unsigned int index, dpl;
+
+ if (is_chip_be2_be3r(phba)) {
+ dpl = AMAP_GET_BITS(struct amap_i_t_dpdu_cqe,
+ dpl, pdpdu_cqe);
+ index = AMAP_GET_BITS(struct amap_i_t_dpdu_cqe,
+ index, pdpdu_cqe);
+ } else {
+ dpl = AMAP_GET_BITS(struct amap_i_t_dpdu_cqe_v2,
+ dpl, pdpdu_cqe);
+ index = AMAP_GET_BITS(struct amap_i_t_dpdu_cqe_v2,
+ index, pdpdu_cqe);
+ }
+
+ phys_addr.u.a32.address_lo =
+ (pdpdu_cqe->dw[offsetof(struct amap_i_t_dpdu_cqe,
+ db_addr_lo) / 32] - dpl);
+ phys_addr.u.a32.address_hi =
+ pdpdu_cqe->dw[offsetof(struct amap_i_t_dpdu_cqe,
+ db_addr_hi) / 32];
+
+ phys_addr.u.a64.address =
+ *((unsigned long long *)(&phys_addr.u.a64.address));
+
+ switch (pdpdu_cqe->dw[offsetof(struct amap_i_t_dpdu_cqe, code) / 32]
+ & PDUCQE_CODE_MASK) {
+ case UNSOL_HDR_NOTIFY:
+ is_header = 1;
+
+ pbusy_list = hwi_get_async_busy_list(pasync_ctx,
+ is_header, index);
+ break;
+ case UNSOL_DATA_NOTIFY:
+ pbusy_list = hwi_get_async_busy_list(pasync_ctx,
+ is_header, index);
+ break;
+ default:
+ pbusy_list = NULL;
+ beiscsi_log(phba, KERN_WARNING,
+ BEISCSI_LOG_IO | BEISCSI_LOG_CONFIG,
+ "BM_%d : Unexpected code=%d\n",
+ pdpdu_cqe->dw[offsetof(struct amap_i_t_dpdu_cqe,
+ code) / 32] & PDUCQE_CODE_MASK);
+ return NULL;
+ }
+
+ WARN_ON(list_empty(pbusy_list));
+ list_for_each_entry(pasync_handle, pbusy_list, link) {
+ if (pasync_handle->pa.u.a64.address == phys_addr.u.a64.address)
+ break;
+ }
+
+ WARN_ON(!pasync_handle);
+
+ pasync_handle->cri = BE_GET_ASYNC_CRI_FROM_CID(
+ beiscsi_conn->beiscsi_conn_cid);
+ pasync_handle->is_header = is_header;
+ pasync_handle->buffer_len = dpl;
+ *pcq_index = index;
+
+ return pasync_handle;
+}
+
+static unsigned int
+hwi_update_async_writables(struct beiscsi_hba *phba,
+ struct hwi_async_pdu_context *pasync_ctx,
+ unsigned int is_header, unsigned int cq_index)
+{
+ struct list_head *pbusy_list;
+ struct async_pdu_handle *pasync_handle;
+ unsigned int num_entries, writables = 0;
+ unsigned int *pep_read_ptr, *pwritables;
+
+ num_entries = pasync_ctx->num_entries;
+ if (is_header) {
+ pep_read_ptr = &pasync_ctx->async_header.ep_read_ptr;
+ pwritables = &pasync_ctx->async_header.writables;
+ } else {
+ pep_read_ptr = &pasync_ctx->async_data.ep_read_ptr;
+ pwritables = &pasync_ctx->async_data.writables;
+ }
+
+ while ((*pep_read_ptr) != cq_index) {
+ (*pep_read_ptr)++;
+ *pep_read_ptr = (*pep_read_ptr) % num_entries;
+
+ pbusy_list = hwi_get_async_busy_list(pasync_ctx, is_header,
+ *pep_read_ptr);
+ if (writables == 0)
+ WARN_ON(list_empty(pbusy_list));
+
+ if (!list_empty(pbusy_list)) {
+ pasync_handle = list_entry(pbusy_list->next,
+ struct async_pdu_handle,
+ link);
+ WARN_ON(!pasync_handle);
+ pasync_handle->consumed = 1;
+ }
+
+ writables++;
+ }
+
+ if (!writables) {
+ beiscsi_log(phba, KERN_ERR,
+ BEISCSI_LOG_CONFIG | BEISCSI_LOG_IO,
+ "BM_%d : Duplicate notification received - index 0x%x!!\n",
+ cq_index);
+ WARN_ON(1);
+ }
+
+ *pwritables = *pwritables + writables;
+ return 0;
+}
+
+static void hwi_free_async_msg(struct beiscsi_hba *phba,
+ struct hwi_async_pdu_context *pasync_ctx,
+ unsigned int cri)
+{
+ struct async_pdu_handle *pasync_handle, *tmp_handle;
+ struct list_head *plist;
+
+ plist = &pasync_ctx->async_entry[cri].wait_queue.list;
+ list_for_each_entry_safe(pasync_handle, tmp_handle, plist, link) {
+ list_del(&pasync_handle->link);
+
+ if (pasync_handle->is_header) {
+ list_add_tail(&pasync_handle->link,
+ &pasync_ctx->async_header.free_list);
+ pasync_ctx->async_header.free_entries++;
+ } else {
+ list_add_tail(&pasync_handle->link,
+ &pasync_ctx->async_data.free_list);
+ pasync_ctx->async_data.free_entries++;
+ }
+ }
+
+ INIT_LIST_HEAD(&pasync_ctx->async_entry[cri].wait_queue.list);
+ pasync_ctx->async_entry[cri].wait_queue.hdr_received = 0;
+ pasync_ctx->async_entry[cri].wait_queue.bytes_received = 0;
+}
+
+static struct phys_addr *
+hwi_get_ring_address(struct hwi_async_pdu_context *pasync_ctx,
+ unsigned int is_header, unsigned int host_write_ptr)
+{
+ struct phys_addr *pasync_sge = NULL;
+
+ if (is_header)
+ pasync_sge = pasync_ctx->async_header.ring_base;
+ else
+ pasync_sge = pasync_ctx->async_data.ring_base;
+
+ return pasync_sge + host_write_ptr;
+}
+
+static void hwi_post_async_buffers(struct beiscsi_hba *phba,
+ unsigned int is_header, uint8_t ulp_num)
+{
+ struct hwi_controller *phwi_ctrlr;
+ struct hwi_async_pdu_context *pasync_ctx;
+ struct async_pdu_handle *pasync_handle;
+ struct list_head *pfree_link, *pbusy_list;
+ struct phys_addr *pasync_sge;
+ unsigned int ring_id, num_entries;
+ unsigned int host_write_num, doorbell_offset;
+ unsigned int writables;
+ unsigned int i = 0;
+ u32 doorbell = 0;
+
+ phwi_ctrlr = phba->phwi_ctrlr;
+ pasync_ctx = HWI_GET_ASYNC_PDU_CTX(phwi_ctrlr, ulp_num);
+ num_entries = pasync_ctx->num_entries;
+
+ if (is_header) {
+ writables = min(pasync_ctx->async_header.writables,
+ pasync_ctx->async_header.free_entries);
+ pfree_link = pasync_ctx->async_header.free_list.next;
+ host_write_num = pasync_ctx->async_header.host_write_ptr;
+ ring_id = phwi_ctrlr->default_pdu_hdr[ulp_num].id;
+ doorbell_offset = phwi_ctrlr->default_pdu_hdr[ulp_num].
+ doorbell_offset;
+ } else {
+ writables = min(pasync_ctx->async_data.writables,
+ pasync_ctx->async_data.free_entries);
+ pfree_link = pasync_ctx->async_data.free_list.next;
+ host_write_num = pasync_ctx->async_data.host_write_ptr;
+ ring_id = phwi_ctrlr->default_pdu_data[ulp_num].id;
+ doorbell_offset = phwi_ctrlr->default_pdu_data[ulp_num].
+ doorbell_offset;
+ }
+
+ writables = (writables / 8) * 8;
+ if (writables) {
+ for (i = 0; i < writables; i++) {
+ pbusy_list =
+ hwi_get_async_busy_list(pasync_ctx, is_header,
+ host_write_num);
+ pasync_handle =
+ list_entry(pfree_link, struct async_pdu_handle,
+ link);
+ WARN_ON(!pasync_handle);
+ pasync_handle->consumed = 0;
+
+ pfree_link = pfree_link->next;
+
+ pasync_sge = hwi_get_ring_address(pasync_ctx,
+ is_header, host_write_num);
+
+ pasync_sge->hi = pasync_handle->pa.u.a32.address_lo;
+ pasync_sge->lo = pasync_handle->pa.u.a32.address_hi;
+
+ list_move(&pasync_handle->link, pbusy_list);
+
+ host_write_num++;
+ host_write_num = host_write_num % num_entries;
+ }
+
+ if (is_header) {
+ pasync_ctx->async_header.host_write_ptr =
+ host_write_num;
+ pasync_ctx->async_header.free_entries -= writables;
+ pasync_ctx->async_header.writables -= writables;
+ pasync_ctx->async_header.busy_entries += writables;
+ } else {
+ pasync_ctx->async_data.host_write_ptr = host_write_num;
+ pasync_ctx->async_data.free_entries -= writables;
+ pasync_ctx->async_data.writables -= writables;
+ pasync_ctx->async_data.busy_entries += writables;
+ }
+
+ doorbell |= ring_id & DB_DEF_PDU_RING_ID_MASK;
+ doorbell |= 1 << DB_DEF_PDU_REARM_SHIFT;
+ doorbell |= 0 << DB_DEF_PDU_EVENT_SHIFT;
+ doorbell |= (writables & DB_DEF_PDU_CQPROC_MASK)
+ << DB_DEF_PDU_CQPROC_SHIFT;
+
+ iowrite32(doorbell, phba->db_va + doorbell_offset);
+ }
+}
+
+static void hwi_flush_default_pdu_buffer(struct beiscsi_hba *phba,
+ struct beiscsi_conn *beiscsi_conn,
+ struct i_t_dpdu_cqe *pdpdu_cqe)
+{
+ struct hwi_controller *phwi_ctrlr;
+ struct hwi_async_pdu_context *pasync_ctx;
+ struct async_pdu_handle *pasync_handle = NULL;
+ unsigned int cq_index = -1;
+ uint16_t cri_index = BE_GET_CRI_FROM_CID(
+ beiscsi_conn->beiscsi_conn_cid);
+
+ phwi_ctrlr = phba->phwi_ctrlr;
+ pasync_ctx = HWI_GET_ASYNC_PDU_CTX(phwi_ctrlr,
+ BEISCSI_GET_ULP_FROM_CRI(phwi_ctrlr,
+ cri_index));
+
+ pasync_handle = hwi_get_async_handle(phba, beiscsi_conn, pasync_ctx,
+ pdpdu_cqe, &cq_index);
+ BUG_ON(pasync_handle->is_header != 0);
+ if (pasync_handle->consumed == 0)
+ hwi_update_async_writables(phba, pasync_ctx,
+ pasync_handle->is_header, cq_index);
+
+ hwi_free_async_msg(phba, pasync_ctx, pasync_handle->cri);
+ hwi_post_async_buffers(phba, pasync_handle->is_header,
+ BEISCSI_GET_ULP_FROM_CRI(phwi_ctrlr,
+ cri_index));
+}
+
+static unsigned int
+hwi_fwd_async_msg(struct beiscsi_conn *beiscsi_conn,
+ struct beiscsi_hba *phba,
+ struct hwi_async_pdu_context *pasync_ctx, unsigned short cri)
+{
+ struct list_head *plist;
+ struct async_pdu_handle *pasync_handle;
+ void *phdr = NULL;
+ unsigned int hdr_len = 0, buf_len = 0;
+ unsigned int status, index = 0, offset = 0;
+ void *pfirst_buffer = NULL;
+ unsigned int num_buf = 0;
+
+ plist = &pasync_ctx->async_entry[cri].wait_queue.list;
+
+ list_for_each_entry(pasync_handle, plist, link) {
+ if (index == 0) {
+ phdr = pasync_handle->pbuffer;
+ hdr_len = pasync_handle->buffer_len;
+ } else {
+ buf_len = pasync_handle->buffer_len;
+ if (!num_buf) {
+ pfirst_buffer = pasync_handle->pbuffer;
+ num_buf++;
+ }
+ memcpy(pfirst_buffer + offset,
+ pasync_handle->pbuffer, buf_len);
+ offset += buf_len;
+ }
+ index++;
+ }
+
+ status = beiscsi_process_async_pdu(beiscsi_conn, phba,
+ phdr, hdr_len, pfirst_buffer,
+ offset);
+
+ hwi_free_async_msg(phba, pasync_ctx, cri);
+ return 0;
+}
+
+static unsigned int
+hwi_gather_async_pdu(struct beiscsi_conn *beiscsi_conn,
+ struct beiscsi_hba *phba,
+ struct async_pdu_handle *pasync_handle)
+{
+ struct hwi_async_pdu_context *pasync_ctx;
+ struct hwi_controller *phwi_ctrlr;
+ unsigned int bytes_needed = 0, status = 0;
+ unsigned short cri = pasync_handle->cri;
+ struct pdu_base *ppdu;
+
+ phwi_ctrlr = phba->phwi_ctrlr;
+ pasync_ctx = HWI_GET_ASYNC_PDU_CTX(phwi_ctrlr,
+ BEISCSI_GET_ULP_FROM_CRI(phwi_ctrlr,
+ BE_GET_CRI_FROM_CID(beiscsi_conn->
+ beiscsi_conn_cid)));
+
+ list_del(&pasync_handle->link);
+ if (pasync_handle->is_header) {
+ pasync_ctx->async_header.busy_entries--;
+ if (pasync_ctx->async_entry[cri].wait_queue.hdr_received) {
+ hwi_free_async_msg(phba, pasync_ctx, cri);
+ BUG();
+ }
+
+ pasync_ctx->async_entry[cri].wait_queue.bytes_received = 0;
+ pasync_ctx->async_entry[cri].wait_queue.hdr_received = 1;
+ pasync_ctx->async_entry[cri].wait_queue.hdr_len =
+ (unsigned short)pasync_handle->buffer_len;
+ list_add_tail(&pasync_handle->link,
+ &pasync_ctx->async_entry[cri].wait_queue.list);
+
+ ppdu = pasync_handle->pbuffer;
+ bytes_needed = ((((ppdu->dw[offsetof(struct amap_pdu_base,
+ data_len_hi) / 32] & PDUBASE_DATALENHI_MASK) << 8) &
+ 0xFFFF0000) | ((be16_to_cpu((ppdu->
+ dw[offsetof(struct amap_pdu_base, data_len_lo) / 32]
+ & PDUBASE_DATALENLO_MASK) >> 16)) & 0x0000FFFF));
+
+ if (status == 0) {
+ pasync_ctx->async_entry[cri].wait_queue.bytes_needed =
+ bytes_needed;
+
+ if (bytes_needed == 0)
+ status = hwi_fwd_async_msg(beiscsi_conn, phba,
+ pasync_ctx, cri);
+ }
+ } else {
+ pasync_ctx->async_data.busy_entries--;
+ if (pasync_ctx->async_entry[cri].wait_queue.hdr_received) {
+ list_add_tail(&pasync_handle->link,
+ &pasync_ctx->async_entry[cri].wait_queue.
+ list);
+ pasync_ctx->async_entry[cri].wait_queue.
+ bytes_received +=
+ (unsigned short)pasync_handle->buffer_len;
+
+ if (pasync_ctx->async_entry[cri].wait_queue.
+ bytes_received >=
+ pasync_ctx->async_entry[cri].wait_queue.
+ bytes_needed)
+ status = hwi_fwd_async_msg(beiscsi_conn, phba,
+ pasync_ctx, cri);
+ }
+ }
+ return status;
+}
+
+static void hwi_process_default_pdu_ring(struct beiscsi_conn *beiscsi_conn,
+ struct beiscsi_hba *phba,
+ struct i_t_dpdu_cqe *pdpdu_cqe)
+{
+ struct hwi_controller *phwi_ctrlr;
+ struct hwi_async_pdu_context *pasync_ctx;
+ struct async_pdu_handle *pasync_handle = NULL;
+ unsigned int cq_index = -1;
+ uint16_t cri_index = BE_GET_CRI_FROM_CID(
+ beiscsi_conn->beiscsi_conn_cid);
+
+ phwi_ctrlr = phba->phwi_ctrlr;
+ pasync_ctx = HWI_GET_ASYNC_PDU_CTX(phwi_ctrlr,
+ BEISCSI_GET_ULP_FROM_CRI(phwi_ctrlr,
+ cri_index));
+
+ pasync_handle = hwi_get_async_handle(phba, beiscsi_conn, pasync_ctx,
+ pdpdu_cqe, &cq_index);
+
+ if (pasync_handle->consumed == 0)
+ hwi_update_async_writables(phba, pasync_ctx,
+ pasync_handle->is_header, cq_index);
+
+ hwi_gather_async_pdu(beiscsi_conn, phba, pasync_handle);
+ hwi_post_async_buffers(phba, pasync_handle->is_header,
+ BEISCSI_GET_ULP_FROM_CRI(
+ phwi_ctrlr, cri_index));
+}
+
+static void beiscsi_process_mcc_isr(struct beiscsi_hba *phba)
+{
+ struct be_queue_info *mcc_cq;
+ struct be_mcc_compl *mcc_compl;
+ unsigned int num_processed = 0;
+
+ mcc_cq = &phba->ctrl.mcc_obj.cq;
+ mcc_compl = queue_tail_node(mcc_cq);
+ mcc_compl->flags = le32_to_cpu(mcc_compl->flags);
+ while (mcc_compl->flags & CQE_FLAGS_VALID_MASK) {
+
+ if (num_processed >= 32) {
+ hwi_ring_cq_db(phba, mcc_cq->id,
+ num_processed, 0, 0);
+ num_processed = 0;
+ }
+ if (mcc_compl->flags & CQE_FLAGS_ASYNC_MASK) {
+ /* Interpret flags as an async trailer */
+ if (is_link_state_evt(mcc_compl->flags))
+ /* Interpret compl as a async link evt */
+ beiscsi_async_link_state_process(phba,
+ (struct be_async_event_link_state *) mcc_compl);
+ else
+ beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_MBOX,
+ "BM_%d : Unsupported Async Event, flags"
+ " = 0x%08x\n",
+ mcc_compl->flags);
+ } else if (mcc_compl->flags & CQE_FLAGS_COMPLETED_MASK) {
+ be_mcc_compl_process_isr(&phba->ctrl, mcc_compl);
+ atomic_dec(&phba->ctrl.mcc_obj.q.used);
+ }
+
+ mcc_compl->flags = 0;
+ queue_tail_inc(mcc_cq);
+ mcc_compl = queue_tail_node(mcc_cq);
+ mcc_compl->flags = le32_to_cpu(mcc_compl->flags);
+ num_processed++;
+ }
+
+ if (num_processed > 0)
+ hwi_ring_cq_db(phba, mcc_cq->id, num_processed, 1, 0);
+
+}
+
+/**
+ * beiscsi_process_cq()- Process the Completion Queue
+ * @pbe_eq: Event Q on which the Completion has come
+ *
+ * return
+ * Number of Completion Entries processed.
+ **/
+unsigned int beiscsi_process_cq(struct be_eq_obj *pbe_eq)
+{
+ struct be_queue_info *cq;
+ struct sol_cqe *sol;
+ struct dmsg_cqe *dmsg;
+ unsigned int num_processed = 0;
+ unsigned int tot_nump = 0;
+ unsigned short code = 0, cid = 0;
+ uint16_t cri_index = 0;
+ struct beiscsi_conn *beiscsi_conn;
+ struct beiscsi_endpoint *beiscsi_ep;
+ struct iscsi_endpoint *ep;
+ struct beiscsi_hba *phba;
+
+ cq = pbe_eq->cq;
+ sol = queue_tail_node(cq);
+ phba = pbe_eq->phba;
+
+ while (sol->dw[offsetof(struct amap_sol_cqe, valid) / 32] &
+ CQE_VALID_MASK) {
+ be_dws_le_to_cpu(sol, sizeof(struct sol_cqe));
+
+ code = (sol->dw[offsetof(struct amap_sol_cqe, code) /
+ 32] & CQE_CODE_MASK);
+
+ /* Get the CID */
+ if (is_chip_be2_be3r(phba)) {
+ cid = AMAP_GET_BITS(struct amap_sol_cqe, cid, sol);
+ } else {
+ if ((code == DRIVERMSG_NOTIFY) ||
+ (code == UNSOL_HDR_NOTIFY) ||
+ (code == UNSOL_DATA_NOTIFY))
+ cid = AMAP_GET_BITS(
+ struct amap_i_t_dpdu_cqe_v2,
+ cid, sol);
+ else
+ cid = AMAP_GET_BITS(struct amap_sol_cqe_v2,
+ cid, sol);
+ }
+
+ cri_index = BE_GET_CRI_FROM_CID(cid);
+ ep = phba->ep_array[cri_index];
+
+ if (ep == NULL) {
+ /* connection has already been freed
+ * just move on to next one
+ */
+ beiscsi_log(phba, KERN_WARNING,
+ BEISCSI_LOG_INIT,
+ "BM_%d : proc cqe of disconn ep: cid %d\n",
+ cid);
+ goto proc_next_cqe;
+ }
+
+ beiscsi_ep = ep->dd_data;
+ beiscsi_conn = beiscsi_ep->conn;
+
+ if (num_processed >= 32) {
+ hwi_ring_cq_db(phba, cq->id,
+ num_processed, 0, 0);
+ tot_nump += num_processed;
+ num_processed = 0;
+ }
+
+ switch (code) {
+ case SOL_CMD_COMPLETE:
+ hwi_complete_cmd(beiscsi_conn, phba, sol);
+ break;
+ case DRIVERMSG_NOTIFY:
+ beiscsi_log(phba, KERN_INFO,
+ BEISCSI_LOG_IO | BEISCSI_LOG_CONFIG,
+ "BM_%d : Received %s[%d] on CID : %d\n",
+ cqe_desc[code], code, cid);
+
+ dmsg = (struct dmsg_cqe *)sol;
+ hwi_complete_drvr_msgs(beiscsi_conn, phba, sol);
+ break;
+ case UNSOL_HDR_NOTIFY:
+ beiscsi_log(phba, KERN_INFO,
+ BEISCSI_LOG_IO | BEISCSI_LOG_CONFIG,
+ "BM_%d : Received %s[%d] on CID : %d\n",
+ cqe_desc[code], code, cid);
+
+ spin_lock_bh(&phba->async_pdu_lock);
+ hwi_process_default_pdu_ring(beiscsi_conn, phba,
+ (struct i_t_dpdu_cqe *)sol);
+ spin_unlock_bh(&phba->async_pdu_lock);
+ break;
+ case UNSOL_DATA_NOTIFY:
+ beiscsi_log(phba, KERN_INFO,
+ BEISCSI_LOG_CONFIG | BEISCSI_LOG_IO,
+ "BM_%d : Received %s[%d] on CID : %d\n",
+ cqe_desc[code], code, cid);
+
+ spin_lock_bh(&phba->async_pdu_lock);
+ hwi_process_default_pdu_ring(beiscsi_conn, phba,
+ (struct i_t_dpdu_cqe *)sol);
+ spin_unlock_bh(&phba->async_pdu_lock);
+ break;
+ case CXN_INVALIDATE_INDEX_NOTIFY:
+ case CMD_INVALIDATED_NOTIFY:
+ case CXN_INVALIDATE_NOTIFY:
+ beiscsi_log(phba, KERN_ERR,
+ BEISCSI_LOG_IO | BEISCSI_LOG_CONFIG,
+ "BM_%d : Ignoring %s[%d] on CID : %d\n",
+ cqe_desc[code], code, cid);
+ break;
+ case SOL_CMD_KILLED_DATA_DIGEST_ERR:
+ case CMD_KILLED_INVALID_STATSN_RCVD:
+ case CMD_KILLED_INVALID_R2T_RCVD:
+ case CMD_CXN_KILLED_LUN_INVALID:
+ case CMD_CXN_KILLED_ICD_INVALID:
+ case CMD_CXN_KILLED_ITT_INVALID:
+ case CMD_CXN_KILLED_SEQ_OUTOFORDER:
+ case CMD_CXN_KILLED_INVALID_DATASN_RCVD:
+ beiscsi_log(phba, KERN_ERR,
+ BEISCSI_LOG_CONFIG | BEISCSI_LOG_IO,
+ "BM_%d : Cmd Notification %s[%d] on CID : %d\n",
+ cqe_desc[code], code, cid);
+ break;
+ case UNSOL_DATA_DIGEST_ERROR_NOTIFY:
+ beiscsi_log(phba, KERN_ERR,
+ BEISCSI_LOG_IO | BEISCSI_LOG_CONFIG,
+ "BM_%d : Dropping %s[%d] on DPDU ring on CID : %d\n",
+ cqe_desc[code], code, cid);
+ spin_lock_bh(&phba->async_pdu_lock);
+ hwi_flush_default_pdu_buffer(phba, beiscsi_conn,
+ (struct i_t_dpdu_cqe *) sol);
+ spin_unlock_bh(&phba->async_pdu_lock);
+ break;
+ case CXN_KILLED_PDU_SIZE_EXCEEDS_DSL:
+ case CXN_KILLED_BURST_LEN_MISMATCH:
+ case CXN_KILLED_AHS_RCVD:
+ case CXN_KILLED_HDR_DIGEST_ERR:
+ case CXN_KILLED_UNKNOWN_HDR:
+ case CXN_KILLED_STALE_ITT_TTT_RCVD:
+ case CXN_KILLED_INVALID_ITT_TTT_RCVD:
+ case CXN_KILLED_TIMED_OUT:
+ case CXN_KILLED_FIN_RCVD:
+ case CXN_KILLED_RST_SENT:
+ case CXN_KILLED_RST_RCVD:
+ case CXN_KILLED_BAD_UNSOL_PDU_RCVD:
+ case CXN_KILLED_BAD_WRB_INDEX_ERROR:
+ case CXN_KILLED_OVER_RUN_RESIDUAL:
+ case CXN_KILLED_UNDER_RUN_RESIDUAL:
+ case CXN_KILLED_CMND_DATA_NOT_ON_SAME_CONN:
+ beiscsi_log(phba, KERN_ERR,
+ BEISCSI_LOG_IO | BEISCSI_LOG_CONFIG,
+ "BM_%d : Event %s[%d] received on CID : %d\n",
+ cqe_desc[code], code, cid);
+ if (beiscsi_conn)
+ iscsi_conn_failure(beiscsi_conn->conn,
+ ISCSI_ERR_CONN_FAILED);
+ break;
+ default:
+ beiscsi_log(phba, KERN_ERR,
+ BEISCSI_LOG_IO | BEISCSI_LOG_CONFIG,
+ "BM_%d : Invalid CQE Event Received Code : %d"
+ "CID 0x%x...\n",
+ code, cid);
+ break;
+ }
+
+proc_next_cqe:
+ AMAP_SET_BITS(struct amap_sol_cqe, valid, sol, 0);
+ queue_tail_inc(cq);
+ sol = queue_tail_node(cq);
+ num_processed++;
+ }
+
+ if (num_processed > 0) {
+ tot_nump += num_processed;
+ hwi_ring_cq_db(phba, cq->id, num_processed, 1, 0);
+ }
+ return tot_nump;
+}
+
+void beiscsi_process_all_cqs(struct work_struct *work)
+{
+ unsigned long flags;
+ struct hwi_controller *phwi_ctrlr;
+ struct hwi_context_memory *phwi_context;
+ struct beiscsi_hba *phba;
+ struct be_eq_obj *pbe_eq =
+ container_of(work, struct be_eq_obj, work_cqs);
+
+ phba = pbe_eq->phba;
+ phwi_ctrlr = phba->phwi_ctrlr;
+ phwi_context = phwi_ctrlr->phwi_ctxt;
+
+ if (pbe_eq->todo_mcc_cq) {
+ spin_lock_irqsave(&phba->isr_lock, flags);
+ pbe_eq->todo_mcc_cq = false;
+ spin_unlock_irqrestore(&phba->isr_lock, flags);
+ beiscsi_process_mcc_isr(phba);
+ }
+
+ if (pbe_eq->todo_cq) {
+ spin_lock_irqsave(&phba->isr_lock, flags);
+ pbe_eq->todo_cq = false;
+ spin_unlock_irqrestore(&phba->isr_lock, flags);
+ beiscsi_process_cq(pbe_eq);
+ }
+
+ /* rearm EQ for further interrupts */
+ hwi_ring_eq_db(phba, pbe_eq->q.id, 0, 0, 1, 1);
+}
+
+static int be_iopoll(struct blk_iopoll *iop, int budget)
+{
+ unsigned int ret;
+ struct beiscsi_hba *phba;
+ struct be_eq_obj *pbe_eq;
+
+ pbe_eq = container_of(iop, struct be_eq_obj, iopoll);
+ ret = beiscsi_process_cq(pbe_eq);
+ pbe_eq->cq_count += ret;
+ if (ret < budget) {
+ phba = pbe_eq->phba;
+ blk_iopoll_complete(iop);
+ beiscsi_log(phba, KERN_INFO,
+ BEISCSI_LOG_CONFIG | BEISCSI_LOG_IO,
+ "BM_%d : rearm pbe_eq->q.id =%d\n",
+ pbe_eq->q.id);
+ hwi_ring_eq_db(phba, pbe_eq->q.id, 0, 0, 1, 1);
+ }
+ return ret;
+}
+
+static void
+hwi_write_sgl_v2(struct iscsi_wrb *pwrb, struct scatterlist *sg,
+ unsigned int num_sg, struct beiscsi_io_task *io_task)
+{
+ struct iscsi_sge *psgl;
+ unsigned int sg_len, index;
+ unsigned int sge_len = 0;
+ unsigned long long addr;
+ struct scatterlist *l_sg;
+ unsigned int offset;
+
+ AMAP_SET_BITS(struct amap_iscsi_wrb_v2, iscsi_bhs_addr_lo, pwrb,
+ io_task->bhs_pa.u.a32.address_lo);
+ AMAP_SET_BITS(struct amap_iscsi_wrb_v2, iscsi_bhs_addr_hi, pwrb,
+ io_task->bhs_pa.u.a32.address_hi);
+
+ l_sg = sg;
+ for (index = 0; (index < num_sg) && (index < 2); index++,
+ sg = sg_next(sg)) {
+ if (index == 0) {
+ sg_len = sg_dma_len(sg);
+ addr = (u64) sg_dma_address(sg);
+ AMAP_SET_BITS(struct amap_iscsi_wrb_v2,
+ sge0_addr_lo, pwrb,
+ lower_32_bits(addr));
+ AMAP_SET_BITS(struct amap_iscsi_wrb_v2,
+ sge0_addr_hi, pwrb,
+ upper_32_bits(addr));
+ AMAP_SET_BITS(struct amap_iscsi_wrb_v2,
+ sge0_len, pwrb,
+ sg_len);
+ sge_len = sg_len;
+ } else {
+ AMAP_SET_BITS(struct amap_iscsi_wrb_v2, sge1_r2t_offset,
+ pwrb, sge_len);
+ sg_len = sg_dma_len(sg);
+ addr = (u64) sg_dma_address(sg);
+ AMAP_SET_BITS(struct amap_iscsi_wrb_v2,
+ sge1_addr_lo, pwrb,
+ lower_32_bits(addr));
+ AMAP_SET_BITS(struct amap_iscsi_wrb_v2,
+ sge1_addr_hi, pwrb,
+ upper_32_bits(addr));
+ AMAP_SET_BITS(struct amap_iscsi_wrb_v2,
+ sge1_len, pwrb,
+ sg_len);
+ }
+ }
+ psgl = (struct iscsi_sge *)io_task->psgl_handle->pfrag;
+ memset(psgl, 0, sizeof(*psgl) * BE2_SGE);
+
+ AMAP_SET_BITS(struct amap_iscsi_sge, len, psgl, io_task->bhs_len - 2);
+
+ AMAP_SET_BITS(struct amap_iscsi_sge, addr_hi, psgl,
+ io_task->bhs_pa.u.a32.address_hi);
+ AMAP_SET_BITS(struct amap_iscsi_sge, addr_lo, psgl,
+ io_task->bhs_pa.u.a32.address_lo);
+
+ if (num_sg == 1) {
+ AMAP_SET_BITS(struct amap_iscsi_wrb_v2, sge0_last, pwrb,
+ 1);
+ AMAP_SET_BITS(struct amap_iscsi_wrb_v2, sge1_last, pwrb,
+ 0);
+ } else if (num_sg == 2) {
+ AMAP_SET_BITS(struct amap_iscsi_wrb_v2, sge0_last, pwrb,
+ 0);
+ AMAP_SET_BITS(struct amap_iscsi_wrb_v2, sge1_last, pwrb,
+ 1);
+ } else {
+ AMAP_SET_BITS(struct amap_iscsi_wrb_v2, sge0_last, pwrb,
+ 0);
+ AMAP_SET_BITS(struct amap_iscsi_wrb_v2, sge1_last, pwrb,
+ 0);
+ }
+
+ sg = l_sg;
+ psgl++;
+ psgl++;
+ offset = 0;
+ for (index = 0; index < num_sg; index++, sg = sg_next(sg), psgl++) {
+ sg_len = sg_dma_len(sg);
+ addr = (u64) sg_dma_address(sg);
+ AMAP_SET_BITS(struct amap_iscsi_sge, addr_lo, psgl,
+ lower_32_bits(addr));
+ AMAP_SET_BITS(struct amap_iscsi_sge, addr_hi, psgl,
+ upper_32_bits(addr));
+ AMAP_SET_BITS(struct amap_iscsi_sge, len, psgl, sg_len);
+ AMAP_SET_BITS(struct amap_iscsi_sge, sge_offset, psgl, offset);
+ AMAP_SET_BITS(struct amap_iscsi_sge, last_sge, psgl, 0);
+ offset += sg_len;
+ }
+ psgl--;
+ AMAP_SET_BITS(struct amap_iscsi_sge, last_sge, psgl, 1);
+}
+
+static void
+hwi_write_sgl(struct iscsi_wrb *pwrb, struct scatterlist *sg,
+ unsigned int num_sg, struct beiscsi_io_task *io_task)
+{
+ struct iscsi_sge *psgl;
+ unsigned int sg_len, index;
+ unsigned int sge_len = 0;
+ unsigned long long addr;
+ struct scatterlist *l_sg;
+ unsigned int offset;
+
+ AMAP_SET_BITS(struct amap_iscsi_wrb, iscsi_bhs_addr_lo, pwrb,
+ io_task->bhs_pa.u.a32.address_lo);
+ AMAP_SET_BITS(struct amap_iscsi_wrb, iscsi_bhs_addr_hi, pwrb,
+ io_task->bhs_pa.u.a32.address_hi);
+
+ l_sg = sg;
+ for (index = 0; (index < num_sg) && (index < 2); index++,
+ sg = sg_next(sg)) {
+ if (index == 0) {
+ sg_len = sg_dma_len(sg);
+ addr = (u64) sg_dma_address(sg);
+ AMAP_SET_BITS(struct amap_iscsi_wrb, sge0_addr_lo, pwrb,
+ ((u32)(addr & 0xFFFFFFFF)));
+ AMAP_SET_BITS(struct amap_iscsi_wrb, sge0_addr_hi, pwrb,
+ ((u32)(addr >> 32)));
+ AMAP_SET_BITS(struct amap_iscsi_wrb, sge0_len, pwrb,
+ sg_len);
+ sge_len = sg_len;
+ } else {
+ AMAP_SET_BITS(struct amap_iscsi_wrb, sge1_r2t_offset,
+ pwrb, sge_len);
+ sg_len = sg_dma_len(sg);
+ addr = (u64) sg_dma_address(sg);
+ AMAP_SET_BITS(struct amap_iscsi_wrb, sge1_addr_lo, pwrb,
+ ((u32)(addr & 0xFFFFFFFF)));
+ AMAP_SET_BITS(struct amap_iscsi_wrb, sge1_addr_hi, pwrb,
+ ((u32)(addr >> 32)));
+ AMAP_SET_BITS(struct amap_iscsi_wrb, sge1_len, pwrb,
+ sg_len);
+ }
+ }
+ psgl = (struct iscsi_sge *)io_task->psgl_handle->pfrag;
+ memset(psgl, 0, sizeof(*psgl) * BE2_SGE);
+
+ AMAP_SET_BITS(struct amap_iscsi_sge, len, psgl, io_task->bhs_len - 2);
+
+ AMAP_SET_BITS(struct amap_iscsi_sge, addr_hi, psgl,
+ io_task->bhs_pa.u.a32.address_hi);
+ AMAP_SET_BITS(struct amap_iscsi_sge, addr_lo, psgl,
+ io_task->bhs_pa.u.a32.address_lo);
+
+ if (num_sg == 1) {
+ AMAP_SET_BITS(struct amap_iscsi_wrb, sge0_last, pwrb,
+ 1);
+ AMAP_SET_BITS(struct amap_iscsi_wrb, sge1_last, pwrb,
+ 0);
+ } else if (num_sg == 2) {
+ AMAP_SET_BITS(struct amap_iscsi_wrb, sge0_last, pwrb,
+ 0);
+ AMAP_SET_BITS(struct amap_iscsi_wrb, sge1_last, pwrb,
+ 1);
+ } else {
+ AMAP_SET_BITS(struct amap_iscsi_wrb, sge0_last, pwrb,
+ 0);
+ AMAP_SET_BITS(struct amap_iscsi_wrb, sge1_last, pwrb,
+ 0);
+ }
+ sg = l_sg;
+ psgl++;
+ psgl++;
+ offset = 0;
+ for (index = 0; index < num_sg; index++, sg = sg_next(sg), psgl++) {
+ sg_len = sg_dma_len(sg);
+ addr = (u64) sg_dma_address(sg);
+ AMAP_SET_BITS(struct amap_iscsi_sge, addr_lo, psgl,
+ (addr & 0xFFFFFFFF));
+ AMAP_SET_BITS(struct amap_iscsi_sge, addr_hi, psgl,
+ (addr >> 32));
+ AMAP_SET_BITS(struct amap_iscsi_sge, len, psgl, sg_len);
+ AMAP_SET_BITS(struct amap_iscsi_sge, sge_offset, psgl, offset);
+ AMAP_SET_BITS(struct amap_iscsi_sge, last_sge, psgl, 0);
+ offset += sg_len;
+ }
+ psgl--;
+ AMAP_SET_BITS(struct amap_iscsi_sge, last_sge, psgl, 1);
+}
+
+/**
+ * hwi_write_buffer()- Populate the WRB with task info
+ * @pwrb: ptr to the WRB entry
+ * @task: iscsi task which is to be executed
+ **/
+static void hwi_write_buffer(struct iscsi_wrb *pwrb, struct iscsi_task *task)
+{
+ struct iscsi_sge *psgl;
+ struct beiscsi_io_task *io_task = task->dd_data;
+ struct beiscsi_conn *beiscsi_conn = io_task->conn;
+ struct beiscsi_hba *phba = beiscsi_conn->phba;
+ uint8_t dsp_value = 0;
+
+ io_task->bhs_len = sizeof(struct be_nonio_bhs) - 2;
+ AMAP_SET_BITS(struct amap_iscsi_wrb, iscsi_bhs_addr_lo, pwrb,
+ io_task->bhs_pa.u.a32.address_lo);
+ AMAP_SET_BITS(struct amap_iscsi_wrb, iscsi_bhs_addr_hi, pwrb,
+ io_task->bhs_pa.u.a32.address_hi);
+
+ if (task->data) {
+
+ /* Check for the data_count */
+ dsp_value = (task->data_count) ? 1 : 0;
+
+ if (is_chip_be2_be3r(phba))
+ AMAP_SET_BITS(struct amap_iscsi_wrb, dsp,
+ pwrb, dsp_value);
+ else
+ AMAP_SET_BITS(struct amap_iscsi_wrb_v2, dsp,
+ pwrb, dsp_value);
+
+ /* Map addr only if there is data_count */
+ if (dsp_value) {
+ io_task->mtask_addr = pci_map_single(phba->pcidev,
+ task->data,
+ task->data_count,
+ PCI_DMA_TODEVICE);
+ io_task->mtask_data_count = task->data_count;
+ } else
+ io_task->mtask_addr = 0;
+
+ AMAP_SET_BITS(struct amap_iscsi_wrb, sge0_addr_lo, pwrb,
+ lower_32_bits(io_task->mtask_addr));
+ AMAP_SET_BITS(struct amap_iscsi_wrb, sge0_addr_hi, pwrb,
+ upper_32_bits(io_task->mtask_addr));
+ AMAP_SET_BITS(struct amap_iscsi_wrb, sge0_len, pwrb,
+ task->data_count);
+
+ AMAP_SET_BITS(struct amap_iscsi_wrb, sge0_last, pwrb, 1);
+ } else {
+ AMAP_SET_BITS(struct amap_iscsi_wrb, dsp, pwrb, 0);
+ io_task->mtask_addr = 0;
+ }
+
+ psgl = (struct iscsi_sge *)io_task->psgl_handle->pfrag;
+
+ AMAP_SET_BITS(struct amap_iscsi_sge, len, psgl, io_task->bhs_len);
+
+ AMAP_SET_BITS(struct amap_iscsi_sge, addr_hi, psgl,
+ io_task->bhs_pa.u.a32.address_hi);
+ AMAP_SET_BITS(struct amap_iscsi_sge, addr_lo, psgl,
+ io_task->bhs_pa.u.a32.address_lo);
+ if (task->data) {
+ psgl++;
+ AMAP_SET_BITS(struct amap_iscsi_sge, addr_hi, psgl, 0);
+ AMAP_SET_BITS(struct amap_iscsi_sge, addr_lo, psgl, 0);
+ AMAP_SET_BITS(struct amap_iscsi_sge, len, psgl, 0);
+ AMAP_SET_BITS(struct amap_iscsi_sge, sge_offset, psgl, 0);
+ AMAP_SET_BITS(struct amap_iscsi_sge, rsvd0, psgl, 0);
+ AMAP_SET_BITS(struct amap_iscsi_sge, last_sge, psgl, 0);
+
+ psgl++;
+ if (task->data) {
+ AMAP_SET_BITS(struct amap_iscsi_sge, addr_lo, psgl,
+ lower_32_bits(io_task->mtask_addr));
+ AMAP_SET_BITS(struct amap_iscsi_sge, addr_hi, psgl,
+ upper_32_bits(io_task->mtask_addr));
+ }
+ AMAP_SET_BITS(struct amap_iscsi_sge, len, psgl, 0x106);
+ }
+ AMAP_SET_BITS(struct amap_iscsi_sge, last_sge, psgl, 1);
+}
+
+/**
+ * beiscsi_find_mem_req()- Find mem needed
+ * @phba: ptr to HBA struct
+ **/
+static void beiscsi_find_mem_req(struct beiscsi_hba *phba)
+{
+ uint8_t mem_descr_index, ulp_num;
+ unsigned int num_cq_pages, num_async_pdu_buf_pages;
+ unsigned int num_async_pdu_data_pages, wrb_sz_per_cxn;
+ unsigned int num_async_pdu_buf_sgl_pages, num_async_pdu_data_sgl_pages;
+
+ num_cq_pages = PAGES_REQUIRED(phba->params.num_cq_entries * \
+ sizeof(struct sol_cqe));
+
+ phba->params.hwi_ws_sz = sizeof(struct hwi_controller);
+
+ phba->mem_req[ISCSI_MEM_GLOBAL_HEADER] = 2 *
+ BE_ISCSI_PDU_HEADER_SIZE;
+ phba->mem_req[HWI_MEM_ADDN_CONTEXT] =
+ sizeof(struct hwi_context_memory);
+
+
+ phba->mem_req[HWI_MEM_WRB] = sizeof(struct iscsi_wrb)
+ * (phba->params.wrbs_per_cxn)
+ * phba->params.cxns_per_ctrl;
+ wrb_sz_per_cxn = sizeof(struct wrb_handle) *
+ (phba->params.wrbs_per_cxn);
+ phba->mem_req[HWI_MEM_WRBH] = roundup_pow_of_two((wrb_sz_per_cxn) *
+ phba->params.cxns_per_ctrl);
+
+ phba->mem_req[HWI_MEM_SGLH] = sizeof(struct sgl_handle) *
+ phba->params.icds_per_ctrl;
+ phba->mem_req[HWI_MEM_SGE] = sizeof(struct iscsi_sge) *
+ phba->params.num_sge_per_io * phba->params.icds_per_ctrl;
+ for (ulp_num = 0; ulp_num < BEISCSI_ULP_COUNT; ulp_num++) {
+ if (test_bit(ulp_num, &phba->fw_config.ulp_supported)) {
+
+ num_async_pdu_buf_sgl_pages =
+ PAGES_REQUIRED(BEISCSI_GET_CID_COUNT(
+ phba, ulp_num) *
+ sizeof(struct phys_addr));
+
+ num_async_pdu_buf_pages =
+ PAGES_REQUIRED(BEISCSI_GET_CID_COUNT(
+ phba, ulp_num) *
+ phba->params.defpdu_hdr_sz);
+
+ num_async_pdu_data_pages =
+ PAGES_REQUIRED(BEISCSI_GET_CID_COUNT(
+ phba, ulp_num) *
+ phba->params.defpdu_data_sz);
+
+ num_async_pdu_data_sgl_pages =
+ PAGES_REQUIRED(BEISCSI_GET_CID_COUNT(
+ phba, ulp_num) *
+ sizeof(struct phys_addr));
+
+ mem_descr_index = (HWI_MEM_TEMPLATE_HDR_ULP0 +
+ (ulp_num * MEM_DESCR_OFFSET));
+ phba->mem_req[mem_descr_index] =
+ BEISCSI_GET_CID_COUNT(phba, ulp_num) *
+ BEISCSI_TEMPLATE_HDR_PER_CXN_SIZE;
+
+ mem_descr_index = (HWI_MEM_ASYNC_HEADER_BUF_ULP0 +
+ (ulp_num * MEM_DESCR_OFFSET));
+ phba->mem_req[mem_descr_index] =
+ num_async_pdu_buf_pages *
+ PAGE_SIZE;
+
+ mem_descr_index = (HWI_MEM_ASYNC_DATA_BUF_ULP0 +
+ (ulp_num * MEM_DESCR_OFFSET));
+ phba->mem_req[mem_descr_index] =
+ num_async_pdu_data_pages *
+ PAGE_SIZE;
+
+ mem_descr_index = (HWI_MEM_ASYNC_HEADER_RING_ULP0 +
+ (ulp_num * MEM_DESCR_OFFSET));
+ phba->mem_req[mem_descr_index] =
+ num_async_pdu_buf_sgl_pages *
+ PAGE_SIZE;
+
+ mem_descr_index = (HWI_MEM_ASYNC_DATA_RING_ULP0 +
+ (ulp_num * MEM_DESCR_OFFSET));
+ phba->mem_req[mem_descr_index] =
+ num_async_pdu_data_sgl_pages *
+ PAGE_SIZE;
+
+ mem_descr_index = (HWI_MEM_ASYNC_HEADER_HANDLE_ULP0 +
+ (ulp_num * MEM_DESCR_OFFSET));
+ phba->mem_req[mem_descr_index] =
+ BEISCSI_GET_CID_COUNT(phba, ulp_num) *
+ sizeof(struct async_pdu_handle);
+
+ mem_descr_index = (HWI_MEM_ASYNC_DATA_HANDLE_ULP0 +
+ (ulp_num * MEM_DESCR_OFFSET));
+ phba->mem_req[mem_descr_index] =
+ BEISCSI_GET_CID_COUNT(phba, ulp_num) *
+ sizeof(struct async_pdu_handle);
+
+ mem_descr_index = (HWI_MEM_ASYNC_PDU_CONTEXT_ULP0 +
+ (ulp_num * MEM_DESCR_OFFSET));
+ phba->mem_req[mem_descr_index] =
+ sizeof(struct hwi_async_pdu_context) +
+ (BEISCSI_GET_CID_COUNT(phba, ulp_num) *
+ sizeof(struct hwi_async_entry));
+ }
+ }
+}
+
+static int beiscsi_alloc_mem(struct beiscsi_hba *phba)
+{
+ dma_addr_t bus_add;
+ struct hwi_controller *phwi_ctrlr;
+ struct be_mem_descriptor *mem_descr;
+ struct mem_array *mem_arr, *mem_arr_orig;
+ unsigned int i, j, alloc_size, curr_alloc_size;
+
+ phba->phwi_ctrlr = kzalloc(phba->params.hwi_ws_sz, GFP_KERNEL);
+ if (!phba->phwi_ctrlr)
+ return -ENOMEM;
+
+ /* Allocate memory for wrb_context */
+ phwi_ctrlr = phba->phwi_ctrlr;
+ phwi_ctrlr->wrb_context = kzalloc(sizeof(struct hwi_wrb_context) *
+ phba->params.cxns_per_ctrl,
+ GFP_KERNEL);
+ if (!phwi_ctrlr->wrb_context)
+ return -ENOMEM;
+
+ phba->init_mem = kcalloc(SE_MEM_MAX, sizeof(*mem_descr),
+ GFP_KERNEL);
+ if (!phba->init_mem) {
+ kfree(phwi_ctrlr->wrb_context);
+ kfree(phba->phwi_ctrlr);
+ return -ENOMEM;
+ }
+
+ mem_arr_orig = kmalloc(sizeof(*mem_arr_orig) * BEISCSI_MAX_FRAGS_INIT,
+ GFP_KERNEL);
+ if (!mem_arr_orig) {
+ kfree(phba->init_mem);
+ kfree(phwi_ctrlr->wrb_context);
+ kfree(phba->phwi_ctrlr);
+ return -ENOMEM;
+ }
+
+ mem_descr = phba->init_mem;
+ for (i = 0; i < SE_MEM_MAX; i++) {
+ if (!phba->mem_req[i]) {
+ mem_descr->mem_array = NULL;
+ mem_descr++;
+ continue;
+ }
+
+ j = 0;
+ mem_arr = mem_arr_orig;
+ alloc_size = phba->mem_req[i];
+ memset(mem_arr, 0, sizeof(struct mem_array) *
+ BEISCSI_MAX_FRAGS_INIT);
+ curr_alloc_size = min(be_max_phys_size * 1024, alloc_size);
+ do {
+ mem_arr->virtual_address = pci_alloc_consistent(
+ phba->pcidev,
+ curr_alloc_size,
+ &bus_add);
+ if (!mem_arr->virtual_address) {
+ if (curr_alloc_size <= BE_MIN_MEM_SIZE)
+ goto free_mem;
+ if (curr_alloc_size -
+ rounddown_pow_of_two(curr_alloc_size))
+ curr_alloc_size = rounddown_pow_of_two
+ (curr_alloc_size);
+ else
+ curr_alloc_size = curr_alloc_size / 2;
+ } else {
+ mem_arr->bus_address.u.
+ a64.address = (__u64) bus_add;
+ mem_arr->size = curr_alloc_size;
+ alloc_size -= curr_alloc_size;
+ curr_alloc_size = min(be_max_phys_size *
+ 1024, alloc_size);
+ j++;
+ mem_arr++;
+ }
+ } while (alloc_size);
+ mem_descr->num_elements = j;
+ mem_descr->size_in_bytes = phba->mem_req[i];
+ mem_descr->mem_array = kmalloc(sizeof(*mem_arr) * j,
+ GFP_KERNEL);
+ if (!mem_descr->mem_array)
+ goto free_mem;
+
+ memcpy(mem_descr->mem_array, mem_arr_orig,
+ sizeof(struct mem_array) * j);
+ mem_descr++;
+ }
+ kfree(mem_arr_orig);
+ return 0;
+free_mem:
+ mem_descr->num_elements = j;
+ while ((i) || (j)) {
+ for (j = mem_descr->num_elements; j > 0; j--) {
+ pci_free_consistent(phba->pcidev,
+ mem_descr->mem_array[j - 1].size,
+ mem_descr->mem_array[j - 1].
+ virtual_address,
+ (unsigned long)mem_descr->
+ mem_array[j - 1].
+ bus_address.u.a64.address);
+ }
+ if (i) {
+ i--;
+ kfree(mem_descr->mem_array);
+ mem_descr--;
+ }
+ }
+ kfree(mem_arr_orig);
+ kfree(phba->init_mem);
+ kfree(phba->phwi_ctrlr->wrb_context);
+ kfree(phba->phwi_ctrlr);
+ return -ENOMEM;
+}
+
+static int beiscsi_get_memory(struct beiscsi_hba *phba)
+{
+ beiscsi_find_mem_req(phba);
+ return beiscsi_alloc_mem(phba);
+}
+
+static void iscsi_init_global_templates(struct beiscsi_hba *phba)
+{
+ struct pdu_data_out *pdata_out;
+ struct pdu_nop_out *pnop_out;
+ struct be_mem_descriptor *mem_descr;
+
+ mem_descr = phba->init_mem;
+ mem_descr += ISCSI_MEM_GLOBAL_HEADER;
+ pdata_out =
+ (struct pdu_data_out *)mem_descr->mem_array[0].virtual_address;
+ memset(pdata_out, 0, BE_ISCSI_PDU_HEADER_SIZE);
+
+ AMAP_SET_BITS(struct amap_pdu_data_out, opcode, pdata_out,
+ IIOC_SCSI_DATA);
+
+ pnop_out =
+ (struct pdu_nop_out *)((unsigned char *)mem_descr->mem_array[0].
+ virtual_address + BE_ISCSI_PDU_HEADER_SIZE);
+
+ memset(pnop_out, 0, BE_ISCSI_PDU_HEADER_SIZE);
+ AMAP_SET_BITS(struct amap_pdu_nop_out, ttt, pnop_out, 0xFFFFFFFF);
+ AMAP_SET_BITS(struct amap_pdu_nop_out, f_bit, pnop_out, 1);
+ AMAP_SET_BITS(struct amap_pdu_nop_out, i_bit, pnop_out, 0);
+}
+
+static int beiscsi_init_wrb_handle(struct beiscsi_hba *phba)
+{
+ struct be_mem_descriptor *mem_descr_wrbh, *mem_descr_wrb;
+ struct hwi_context_memory *phwi_ctxt;
+ struct wrb_handle *pwrb_handle = NULL;
+ struct hwi_controller *phwi_ctrlr;
+ struct hwi_wrb_context *pwrb_context;
+ struct iscsi_wrb *pwrb = NULL;
+ unsigned int num_cxn_wrbh = 0;
+ unsigned int num_cxn_wrb = 0, j, idx = 0, index;
+
+ mem_descr_wrbh = phba->init_mem;
+ mem_descr_wrbh += HWI_MEM_WRBH;
+
+ mem_descr_wrb = phba->init_mem;
+ mem_descr_wrb += HWI_MEM_WRB;
+ phwi_ctrlr = phba->phwi_ctrlr;
+
+ /* Allocate memory for WRBQ */
+ phwi_ctxt = phwi_ctrlr->phwi_ctxt;
+ phwi_ctxt->be_wrbq = kzalloc(sizeof(struct be_queue_info) *
+ phba->params.cxns_per_ctrl,
+ GFP_KERNEL);
+ if (!phwi_ctxt->be_wrbq) {
+ beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
+ "BM_%d : WRBQ Mem Alloc Failed\n");
+ return -ENOMEM;
+ }
+
+ for (index = 0; index < phba->params.cxns_per_ctrl; index++) {
+ pwrb_context = &phwi_ctrlr->wrb_context[index];
+ pwrb_context->pwrb_handle_base =
+ kzalloc(sizeof(struct wrb_handle *) *
+ phba->params.wrbs_per_cxn, GFP_KERNEL);
+ if (!pwrb_context->pwrb_handle_base) {
+ beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
+ "BM_%d : Mem Alloc Failed. Failing to load\n");
+ goto init_wrb_hndl_failed;
+ }
+ pwrb_context->pwrb_handle_basestd =
+ kzalloc(sizeof(struct wrb_handle *) *
+ phba->params.wrbs_per_cxn, GFP_KERNEL);
+ if (!pwrb_context->pwrb_handle_basestd) {
+ beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
+ "BM_%d : Mem Alloc Failed. Failing to load\n");
+ goto init_wrb_hndl_failed;
+ }
+ if (!num_cxn_wrbh) {
+ pwrb_handle =
+ mem_descr_wrbh->mem_array[idx].virtual_address;
+ num_cxn_wrbh = ((mem_descr_wrbh->mem_array[idx].size) /
+ ((sizeof(struct wrb_handle)) *
+ phba->params.wrbs_per_cxn));
+ idx++;
+ }
+ pwrb_context->alloc_index = 0;
+ pwrb_context->wrb_handles_available = 0;
+ pwrb_context->free_index = 0;
+
+ if (num_cxn_wrbh) {
+ for (j = 0; j < phba->params.wrbs_per_cxn; j++) {
+ pwrb_context->pwrb_handle_base[j] = pwrb_handle;
+ pwrb_context->pwrb_handle_basestd[j] =
+ pwrb_handle;
+ pwrb_context->wrb_handles_available++;
+ pwrb_handle->wrb_index = j;
+ pwrb_handle++;
+ }
+ num_cxn_wrbh--;
+ }
+ }
+ idx = 0;
+ for (index = 0; index < phba->params.cxns_per_ctrl; index++) {
+ pwrb_context = &phwi_ctrlr->wrb_context[index];
+ if (!num_cxn_wrb) {
+ pwrb = mem_descr_wrb->mem_array[idx].virtual_address;
+ num_cxn_wrb = (mem_descr_wrb->mem_array[idx].size) /
+ ((sizeof(struct iscsi_wrb) *
+ phba->params.wrbs_per_cxn));
+ idx++;
+ }
+
+ if (num_cxn_wrb) {
+ for (j = 0; j < phba->params.wrbs_per_cxn; j++) {
+ pwrb_handle = pwrb_context->pwrb_handle_base[j];
+ pwrb_handle->pwrb = pwrb;
+ pwrb++;
+ }
+ num_cxn_wrb--;
+ }
+ }
+ return 0;
+init_wrb_hndl_failed:
+ for (j = index; j > 0; j--) {
+ pwrb_context = &phwi_ctrlr->wrb_context[j];
+ kfree(pwrb_context->pwrb_handle_base);
+ kfree(pwrb_context->pwrb_handle_basestd);
+ }
+ return -ENOMEM;
+}
+
+static int hwi_init_async_pdu_ctx(struct beiscsi_hba *phba)
+{
+ uint8_t ulp_num;
+ struct hwi_controller *phwi_ctrlr;
+ struct hba_parameters *p = &phba->params;
+ struct hwi_async_pdu_context *pasync_ctx;
+ struct async_pdu_handle *pasync_header_h, *pasync_data_h;
+ unsigned int index, idx, num_per_mem, num_async_data;
+ struct be_mem_descriptor *mem_descr;
+
+ for (ulp_num = 0; ulp_num < BEISCSI_ULP_COUNT; ulp_num++) {
+ if (test_bit(ulp_num, &phba->fw_config.ulp_supported)) {
+
+ mem_descr = (struct be_mem_descriptor *)phba->init_mem;
+ mem_descr += (HWI_MEM_ASYNC_PDU_CONTEXT_ULP0 +
+ (ulp_num * MEM_DESCR_OFFSET));
+
+ phwi_ctrlr = phba->phwi_ctrlr;
+ phwi_ctrlr->phwi_ctxt->pasync_ctx[ulp_num] =
+ (struct hwi_async_pdu_context *)
+ mem_descr->mem_array[0].virtual_address;
+
+ pasync_ctx = phwi_ctrlr->phwi_ctxt->pasync_ctx[ulp_num];
+ memset(pasync_ctx, 0, sizeof(*pasync_ctx));
+
+ pasync_ctx->async_entry =
+ (struct hwi_async_entry *)
+ ((long unsigned int)pasync_ctx +
+ sizeof(struct hwi_async_pdu_context));
+
+ pasync_ctx->num_entries = BEISCSI_GET_CID_COUNT(phba,
+ ulp_num);
+ pasync_ctx->buffer_size = p->defpdu_hdr_sz;
+
+ mem_descr = (struct be_mem_descriptor *)phba->init_mem;
+ mem_descr += HWI_MEM_ASYNC_HEADER_BUF_ULP0 +
+ (ulp_num * MEM_DESCR_OFFSET);
+ if (mem_descr->mem_array[0].virtual_address) {
+ beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT,
+ "BM_%d : hwi_init_async_pdu_ctx"
+ " HWI_MEM_ASYNC_HEADER_BUF_ULP%d va=%p\n",
+ ulp_num,
+ mem_descr->mem_array[0].
+ virtual_address);
+ } else
+ beiscsi_log(phba, KERN_WARNING,
+ BEISCSI_LOG_INIT,
+ "BM_%d : No Virtual address for ULP : %d\n",
+ ulp_num);
+
+ pasync_ctx->async_header.va_base =
+ mem_descr->mem_array[0].virtual_address;
+
+ pasync_ctx->async_header.pa_base.u.a64.address =
+ mem_descr->mem_array[0].
+ bus_address.u.a64.address;
+
+ mem_descr = (struct be_mem_descriptor *)phba->init_mem;
+ mem_descr += HWI_MEM_ASYNC_HEADER_RING_ULP0 +
+ (ulp_num * MEM_DESCR_OFFSET);
+ if (mem_descr->mem_array[0].virtual_address) {
+ beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT,
+ "BM_%d : hwi_init_async_pdu_ctx"
+ " HWI_MEM_ASYNC_HEADER_RING_ULP%d va=%p\n",
+ ulp_num,
+ mem_descr->mem_array[0].
+ virtual_address);
+ } else
+ beiscsi_log(phba, KERN_WARNING,
+ BEISCSI_LOG_INIT,
+ "BM_%d : No Virtual address for ULP : %d\n",
+ ulp_num);
+
+ pasync_ctx->async_header.ring_base =
+ mem_descr->mem_array[0].virtual_address;
+
+ mem_descr = (struct be_mem_descriptor *)phba->init_mem;
+ mem_descr += HWI_MEM_ASYNC_HEADER_HANDLE_ULP0 +
+ (ulp_num * MEM_DESCR_OFFSET);
+ if (mem_descr->mem_array[0].virtual_address) {
+ beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT,
+ "BM_%d : hwi_init_async_pdu_ctx"
+ " HWI_MEM_ASYNC_HEADER_HANDLE_ULP%d va=%p\n",
+ ulp_num,
+ mem_descr->mem_array[0].
+ virtual_address);
+ } else
+ beiscsi_log(phba, KERN_WARNING,
+ BEISCSI_LOG_INIT,
+ "BM_%d : No Virtual address for ULP : %d\n",
+ ulp_num);
+
+ pasync_ctx->async_header.handle_base =
+ mem_descr->mem_array[0].virtual_address;
+ pasync_ctx->async_header.writables = 0;
+ INIT_LIST_HEAD(&pasync_ctx->async_header.free_list);
+
+ mem_descr = (struct be_mem_descriptor *)phba->init_mem;
+ mem_descr += HWI_MEM_ASYNC_DATA_RING_ULP0 +
+ (ulp_num * MEM_DESCR_OFFSET);
+ if (mem_descr->mem_array[0].virtual_address) {
+ beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT,
+ "BM_%d : hwi_init_async_pdu_ctx"
+ " HWI_MEM_ASYNC_DATA_RING_ULP%d va=%p\n",
+ ulp_num,
+ mem_descr->mem_array[0].
+ virtual_address);
+ } else
+ beiscsi_log(phba, KERN_WARNING,
+ BEISCSI_LOG_INIT,
+ "BM_%d : No Virtual address for ULP : %d\n",
+ ulp_num);
+
+ pasync_ctx->async_data.ring_base =
+ mem_descr->mem_array[0].virtual_address;
+
+ mem_descr = (struct be_mem_descriptor *)phba->init_mem;
+ mem_descr += HWI_MEM_ASYNC_DATA_HANDLE_ULP0 +
+ (ulp_num * MEM_DESCR_OFFSET);
+ if (!mem_descr->mem_array[0].virtual_address)
+ beiscsi_log(phba, KERN_WARNING,
+ BEISCSI_LOG_INIT,
+ "BM_%d : No Virtual address for ULP : %d\n",
+ ulp_num);
+
+ pasync_ctx->async_data.handle_base =
+ mem_descr->mem_array[0].virtual_address;
+ pasync_ctx->async_data.writables = 0;
+ INIT_LIST_HEAD(&pasync_ctx->async_data.free_list);
+
+ pasync_header_h =
+ (struct async_pdu_handle *)
+ pasync_ctx->async_header.handle_base;
+ pasync_data_h =
+ (struct async_pdu_handle *)
+ pasync_ctx->async_data.handle_base;
+
+ mem_descr = (struct be_mem_descriptor *)phba->init_mem;
+ mem_descr += HWI_MEM_ASYNC_DATA_BUF_ULP0 +
+ (ulp_num * MEM_DESCR_OFFSET);
+ if (mem_descr->mem_array[0].virtual_address) {
+ beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT,
+ "BM_%d : hwi_init_async_pdu_ctx"
+ " HWI_MEM_ASYNC_DATA_BUF_ULP%d va=%p\n",
+ ulp_num,
+ mem_descr->mem_array[0].
+ virtual_address);
+ } else
+ beiscsi_log(phba, KERN_WARNING,
+ BEISCSI_LOG_INIT,
+ "BM_%d : No Virtual address for ULP : %d\n",
+ ulp_num);
+
+ idx = 0;
+ pasync_ctx->async_data.va_base =
+ mem_descr->mem_array[idx].virtual_address;
+ pasync_ctx->async_data.pa_base.u.a64.address =
+ mem_descr->mem_array[idx].
+ bus_address.u.a64.address;
+
+ num_async_data = ((mem_descr->mem_array[idx].size) /
+ phba->params.defpdu_data_sz);
+ num_per_mem = 0;
+
+ for (index = 0; index < BEISCSI_GET_CID_COUNT
+ (phba, ulp_num); index++) {
+ pasync_header_h->cri = -1;
+ pasync_header_h->index = (char)index;
+ INIT_LIST_HEAD(&pasync_header_h->link);
+ pasync_header_h->pbuffer =
+ (void *)((unsigned long)
+ (pasync_ctx->
+ async_header.va_base) +
+ (p->defpdu_hdr_sz * index));
+
+ pasync_header_h->pa.u.a64.address =
+ pasync_ctx->async_header.pa_base.u.a64.
+ address + (p->defpdu_hdr_sz * index);
+
+ list_add_tail(&pasync_header_h->link,
+ &pasync_ctx->async_header.
+ free_list);
+ pasync_header_h++;
+ pasync_ctx->async_header.free_entries++;
+ pasync_ctx->async_header.writables++;
+
+ INIT_LIST_HEAD(&pasync_ctx->async_entry[index].
+ wait_queue.list);
+ INIT_LIST_HEAD(&pasync_ctx->async_entry[index].
+ header_busy_list);
+ pasync_data_h->cri = -1;
+ pasync_data_h->index = (char)index;
+ INIT_LIST_HEAD(&pasync_data_h->link);
+
+ if (!num_async_data) {
+ num_per_mem = 0;
+ idx++;
+ pasync_ctx->async_data.va_base =
+ mem_descr->mem_array[idx].
+ virtual_address;
+ pasync_ctx->async_data.pa_base.u.
+ a64.address =
+ mem_descr->mem_array[idx].
+ bus_address.u.a64.address;
+ num_async_data =
+ ((mem_descr->mem_array[idx].
+ size) /
+ phba->params.defpdu_data_sz);
+ }
+ pasync_data_h->pbuffer =
+ (void *)((unsigned long)
+ (pasync_ctx->async_data.va_base) +
+ (p->defpdu_data_sz * num_per_mem));
+
+ pasync_data_h->pa.u.a64.address =
+ pasync_ctx->async_data.pa_base.u.a64.
+ address + (p->defpdu_data_sz *
+ num_per_mem);
+ num_per_mem++;
+ num_async_data--;
+
+ list_add_tail(&pasync_data_h->link,
+ &pasync_ctx->async_data.
+ free_list);
+ pasync_data_h++;
+ pasync_ctx->async_data.free_entries++;
+ pasync_ctx->async_data.writables++;
+
+ INIT_LIST_HEAD(&pasync_ctx->async_entry[index].
+ data_busy_list);
+ }
+
+ pasync_ctx->async_header.host_write_ptr = 0;
+ pasync_ctx->async_header.ep_read_ptr = -1;
+ pasync_ctx->async_data.host_write_ptr = 0;
+ pasync_ctx->async_data.ep_read_ptr = -1;
+ }
+ }
+
+ return 0;
+}
+
+static int
+be_sgl_create_contiguous(void *virtual_address,
+ u64 physical_address, u32 length,
+ struct be_dma_mem *sgl)
+{
+ WARN_ON(!virtual_address);
+ WARN_ON(!physical_address);
+ WARN_ON(!length > 0);
+ WARN_ON(!sgl);
+
+ sgl->va = virtual_address;
+ sgl->dma = (unsigned long)physical_address;
+ sgl->size = length;
+
+ return 0;
+}
+
+static void be_sgl_destroy_contiguous(struct be_dma_mem *sgl)
+{
+ memset(sgl, 0, sizeof(*sgl));
+}
+
+static void
+hwi_build_be_sgl_arr(struct beiscsi_hba *phba,
+ struct mem_array *pmem, struct be_dma_mem *sgl)
+{
+ if (sgl->va)
+ be_sgl_destroy_contiguous(sgl);
+
+ be_sgl_create_contiguous(pmem->virtual_address,
+ pmem->bus_address.u.a64.address,
+ pmem->size, sgl);
+}
+
+static void
+hwi_build_be_sgl_by_offset(struct beiscsi_hba *phba,
+ struct mem_array *pmem, struct be_dma_mem *sgl)
+{
+ if (sgl->va)
+ be_sgl_destroy_contiguous(sgl);
+
+ be_sgl_create_contiguous((unsigned char *)pmem->virtual_address,
+ pmem->bus_address.u.a64.address,
+ pmem->size, sgl);
+}
+
+static int be_fill_queue(struct be_queue_info *q,
+ u16 len, u16 entry_size, void *vaddress)
+{
+ struct be_dma_mem *mem = &q->dma_mem;
+
+ memset(q, 0, sizeof(*q));
+ q->len = len;
+ q->entry_size = entry_size;
+ mem->size = len * entry_size;
+ mem->va = vaddress;
+ if (!mem->va)
+ return -ENOMEM;
+ memset(mem->va, 0, mem->size);
+ return 0;
+}
+
+static int beiscsi_create_eqs(struct beiscsi_hba *phba,
+ struct hwi_context_memory *phwi_context)
+{
+ unsigned int i, num_eq_pages;
+ int ret = 0, eq_for_mcc;
+ struct be_queue_info *eq;
+ struct be_dma_mem *mem;
+ void *eq_vaddress;
+ dma_addr_t paddr;
+
+ num_eq_pages = PAGES_REQUIRED(phba->params.num_eq_entries * \
+ sizeof(struct be_eq_entry));
+
+ if (phba->msix_enabled)
+ eq_for_mcc = 1;
+ else
+ eq_for_mcc = 0;
+ for (i = 0; i < (phba->num_cpus + eq_for_mcc); i++) {
+ eq = &phwi_context->be_eq[i].q;
+ mem = &eq->dma_mem;
+ phwi_context->be_eq[i].phba = phba;
+ eq_vaddress = pci_alloc_consistent(phba->pcidev,
+ num_eq_pages * PAGE_SIZE,
+ &paddr);
+ if (!eq_vaddress)
+ goto create_eq_error;
+
+ mem->va = eq_vaddress;
+ ret = be_fill_queue(eq, phba->params.num_eq_entries,
+ sizeof(struct be_eq_entry), eq_vaddress);
+ if (ret) {
+ beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
+ "BM_%d : be_fill_queue Failed for EQ\n");
+ goto create_eq_error;
+ }
+
+ mem->dma = paddr;
+ ret = beiscsi_cmd_eq_create(&phba->ctrl, eq,
+ phwi_context->cur_eqd);
+ if (ret) {
+ beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
+ "BM_%d : beiscsi_cmd_eq_create"
+ "Failed for EQ\n");
+ goto create_eq_error;
+ }
+
+ beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT,
+ "BM_%d : eqid = %d\n",
+ phwi_context->be_eq[i].q.id);
+ }
+ return 0;
+create_eq_error:
+ for (i = 0; i < (phba->num_cpus + eq_for_mcc); i++) {
+ eq = &phwi_context->be_eq[i].q;
+ mem = &eq->dma_mem;
+ if (mem->va)
+ pci_free_consistent(phba->pcidev, num_eq_pages
+ * PAGE_SIZE,
+ mem->va, mem->dma);
+ }
+ return ret;
+}
+
+static int beiscsi_create_cqs(struct beiscsi_hba *phba,
+ struct hwi_context_memory *phwi_context)
+{
+ unsigned int i, num_cq_pages;
+ int ret = 0;
+ struct be_queue_info *cq, *eq;
+ struct be_dma_mem *mem;
+ struct be_eq_obj *pbe_eq;
+ void *cq_vaddress;
+ dma_addr_t paddr;
+
+ num_cq_pages = PAGES_REQUIRED(phba->params.num_cq_entries * \
+ sizeof(struct sol_cqe));
+
+ for (i = 0; i < phba->num_cpus; i++) {
+ cq = &phwi_context->be_cq[i];
+ eq = &phwi_context->be_eq[i].q;
+ pbe_eq = &phwi_context->be_eq[i];
+ pbe_eq->cq = cq;
+ pbe_eq->phba = phba;
+ mem = &cq->dma_mem;
+ cq_vaddress = pci_alloc_consistent(phba->pcidev,
+ num_cq_pages * PAGE_SIZE,
+ &paddr);
+ if (!cq_vaddress)
+ goto create_cq_error;
+ ret = be_fill_queue(cq, phba->params.num_cq_entries,
+ sizeof(struct sol_cqe), cq_vaddress);
+ if (ret) {
+ beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
+ "BM_%d : be_fill_queue Failed "
+ "for ISCSI CQ\n");
+ goto create_cq_error;
+ }
+
+ mem->dma = paddr;
+ ret = beiscsi_cmd_cq_create(&phba->ctrl, cq, eq, false,
+ false, 0);
+ if (ret) {
+ beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
+ "BM_%d : beiscsi_cmd_eq_create"
+ "Failed for ISCSI CQ\n");
+ goto create_cq_error;
+ }
+ beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT,
+ "BM_%d : iscsi cq_id is %d for eq_id %d\n"
+ "iSCSI CQ CREATED\n", cq->id, eq->id);
+ }
+ return 0;
+
+create_cq_error:
+ for (i = 0; i < phba->num_cpus; i++) {
+ cq = &phwi_context->be_cq[i];
+ mem = &cq->dma_mem;
+ if (mem->va)
+ pci_free_consistent(phba->pcidev, num_cq_pages
+ * PAGE_SIZE,
+ mem->va, mem->dma);
+ }
+ return ret;
+
+}
+
+static int
+beiscsi_create_def_hdr(struct beiscsi_hba *phba,
+ struct hwi_context_memory *phwi_context,
+ struct hwi_controller *phwi_ctrlr,
+ unsigned int def_pdu_ring_sz, uint8_t ulp_num)
+{
+ unsigned int idx;
+ int ret;
+ struct be_queue_info *dq, *cq;
+ struct be_dma_mem *mem;
+ struct be_mem_descriptor *mem_descr;
+ void *dq_vaddress;
+
+ idx = 0;
+ dq = &phwi_context->be_def_hdrq[ulp_num];
+ cq = &phwi_context->be_cq[0];
+ mem = &dq->dma_mem;
+ mem_descr = phba->init_mem;
+ mem_descr += HWI_MEM_ASYNC_HEADER_RING_ULP0 +
+ (ulp_num * MEM_DESCR_OFFSET);
+ dq_vaddress = mem_descr->mem_array[idx].virtual_address;
+ ret = be_fill_queue(dq, mem_descr->mem_array[0].size /
+ sizeof(struct phys_addr),
+ sizeof(struct phys_addr), dq_vaddress);
+ if (ret) {
+ beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
+ "BM_%d : be_fill_queue Failed for DEF PDU HDR on ULP : %d\n",
+ ulp_num);
+
+ return ret;
+ }
+ mem->dma = (unsigned long)mem_descr->mem_array[idx].
+ bus_address.u.a64.address;
+ ret = be_cmd_create_default_pdu_queue(&phba->ctrl, cq, dq,
+ def_pdu_ring_sz,
+ phba->params.defpdu_hdr_sz,
+ BEISCSI_DEFQ_HDR, ulp_num);
+ if (ret) {
+ beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
+ "BM_%d : be_cmd_create_default_pdu_queue Failed DEFHDR on ULP : %d\n",
+ ulp_num);
+
+ return ret;
+ }
+
+ beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT,
+ "BM_%d : iscsi hdr def pdu id for ULP : %d is %d\n",
+ ulp_num,
+ phwi_context->be_def_hdrq[ulp_num].id);
+ hwi_post_async_buffers(phba, BEISCSI_DEFQ_HDR, ulp_num);
+ return 0;
+}
+
+static int
+beiscsi_create_def_data(struct beiscsi_hba *phba,
+ struct hwi_context_memory *phwi_context,
+ struct hwi_controller *phwi_ctrlr,
+ unsigned int def_pdu_ring_sz, uint8_t ulp_num)
+{
+ unsigned int idx;
+ int ret;
+ struct be_queue_info *dataq, *cq;
+ struct be_dma_mem *mem;
+ struct be_mem_descriptor *mem_descr;
+ void *dq_vaddress;
+
+ idx = 0;
+ dataq = &phwi_context->be_def_dataq[ulp_num];
+ cq = &phwi_context->be_cq[0];
+ mem = &dataq->dma_mem;
+ mem_descr = phba->init_mem;
+ mem_descr += HWI_MEM_ASYNC_DATA_RING_ULP0 +
+ (ulp_num * MEM_DESCR_OFFSET);
+ dq_vaddress = mem_descr->mem_array[idx].virtual_address;
+ ret = be_fill_queue(dataq, mem_descr->mem_array[0].size /
+ sizeof(struct phys_addr),
+ sizeof(struct phys_addr), dq_vaddress);
+ if (ret) {
+ beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
+ "BM_%d : be_fill_queue Failed for DEF PDU "
+ "DATA on ULP : %d\n",
+ ulp_num);
+
+ return ret;
+ }
+ mem->dma = (unsigned long)mem_descr->mem_array[idx].
+ bus_address.u.a64.address;
+ ret = be_cmd_create_default_pdu_queue(&phba->ctrl, cq, dataq,
+ def_pdu_ring_sz,
+ phba->params.defpdu_data_sz,
+ BEISCSI_DEFQ_DATA, ulp_num);
+ if (ret) {
+ beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
+ "BM_%d be_cmd_create_default_pdu_queue"
+ " Failed for DEF PDU DATA on ULP : %d\n",
+ ulp_num);
+ return ret;
+ }
+
+ beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT,
+ "BM_%d : iscsi def data id on ULP : %d is %d\n",
+ ulp_num,
+ phwi_context->be_def_dataq[ulp_num].id);
+
+ hwi_post_async_buffers(phba, BEISCSI_DEFQ_DATA, ulp_num);
+ beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT,
+ "BM_%d : DEFAULT PDU DATA RING CREATED"
+ "on ULP : %d\n", ulp_num);
+
+ return 0;
+}
+
+
+static int
+beiscsi_post_template_hdr(struct beiscsi_hba *phba)
+{
+ struct be_mem_descriptor *mem_descr;
+ struct mem_array *pm_arr;
+ struct be_dma_mem sgl;
+ int status, ulp_num;
+
+ for (ulp_num = 0; ulp_num < BEISCSI_ULP_COUNT; ulp_num++) {
+ if (test_bit(ulp_num, &phba->fw_config.ulp_supported)) {
+ mem_descr = (struct be_mem_descriptor *)phba->init_mem;
+ mem_descr += HWI_MEM_TEMPLATE_HDR_ULP0 +
+ (ulp_num * MEM_DESCR_OFFSET);
+ pm_arr = mem_descr->mem_array;
+
+ hwi_build_be_sgl_arr(phba, pm_arr, &sgl);
+ status = be_cmd_iscsi_post_template_hdr(
+ &phba->ctrl, &sgl);
+
+ if (status != 0) {
+ beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
+ "BM_%d : Post Template HDR Failed for"
+ "ULP_%d\n", ulp_num);
+ return status;
+ }
+
+ beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT,
+ "BM_%d : Template HDR Pages Posted for"
+ "ULP_%d\n", ulp_num);
+ }
+ }
+ return 0;
+}
+
+static int
+beiscsi_post_pages(struct beiscsi_hba *phba)
+{
+ struct be_mem_descriptor *mem_descr;
+ struct mem_array *pm_arr;
+ unsigned int page_offset, i;
+ struct be_dma_mem sgl;
+ int status, ulp_num = 0;
+
+ mem_descr = phba->init_mem;
+ mem_descr += HWI_MEM_SGE;
+ pm_arr = mem_descr->mem_array;
+
+ for (ulp_num = 0; ulp_num < BEISCSI_ULP_COUNT; ulp_num++)
+ if (test_bit(ulp_num, &phba->fw_config.ulp_supported))
+ break;
+
+ page_offset = (sizeof(struct iscsi_sge) * phba->params.num_sge_per_io *
+ phba->fw_config.iscsi_icd_start[ulp_num]) / PAGE_SIZE;
+ for (i = 0; i < mem_descr->num_elements; i++) {
+ hwi_build_be_sgl_arr(phba, pm_arr, &sgl);
+ status = be_cmd_iscsi_post_sgl_pages(&phba->ctrl, &sgl,
+ page_offset,
+ (pm_arr->size / PAGE_SIZE));
+ page_offset += pm_arr->size / PAGE_SIZE;
+ if (status != 0) {
+ beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
+ "BM_%d : post sgl failed.\n");
+ return status;
+ }
+ pm_arr++;
+ }
+ beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT,
+ "BM_%d : POSTED PAGES\n");
+ return 0;
+}
+
+static void be_queue_free(struct beiscsi_hba *phba, struct be_queue_info *q)
+{
+ struct be_dma_mem *mem = &q->dma_mem;
+ if (mem->va) {
+ pci_free_consistent(phba->pcidev, mem->size,
+ mem->va, mem->dma);
+ mem->va = NULL;
+ }
+}
+
+static int be_queue_alloc(struct beiscsi_hba *phba, struct be_queue_info *q,
+ u16 len, u16 entry_size)
+{
+ struct be_dma_mem *mem = &q->dma_mem;
+
+ memset(q, 0, sizeof(*q));
+ q->len = len;
+ q->entry_size = entry_size;
+ mem->size = len * entry_size;
+ mem->va = pci_zalloc_consistent(phba->pcidev, mem->size, &mem->dma);
+ if (!mem->va)
+ return -ENOMEM;
+ return 0;
+}
+
+static int
+beiscsi_create_wrb_rings(struct beiscsi_hba *phba,
+ struct hwi_context_memory *phwi_context,
+ struct hwi_controller *phwi_ctrlr)
+{
+ unsigned int wrb_mem_index, offset, size, num_wrb_rings;
+ u64 pa_addr_lo;
+ unsigned int idx, num, i, ulp_num;
+ struct mem_array *pwrb_arr;
+ void *wrb_vaddr;
+ struct be_dma_mem sgl;
+ struct be_mem_descriptor *mem_descr;
+ struct hwi_wrb_context *pwrb_context;
+ int status;
+ uint8_t ulp_count = 0, ulp_base_num = 0;
+ uint16_t cid_count_ulp[BEISCSI_ULP_COUNT] = { 0 };
+
+ idx = 0;
+ mem_descr = phba->init_mem;
+ mem_descr += HWI_MEM_WRB;
+ pwrb_arr = kmalloc(sizeof(*pwrb_arr) * phba->params.cxns_per_ctrl,
+ GFP_KERNEL);
+ if (!pwrb_arr) {
+ beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
+ "BM_%d : Memory alloc failed in create wrb ring.\n");
+ return -ENOMEM;
+ }
+ wrb_vaddr = mem_descr->mem_array[idx].virtual_address;
+ pa_addr_lo = mem_descr->mem_array[idx].bus_address.u.a64.address;
+ num_wrb_rings = mem_descr->mem_array[idx].size /
+ (phba->params.wrbs_per_cxn * sizeof(struct iscsi_wrb));
+
+ for (num = 0; num < phba->params.cxns_per_ctrl; num++) {
+ if (num_wrb_rings) {
+ pwrb_arr[num].virtual_address = wrb_vaddr;
+ pwrb_arr[num].bus_address.u.a64.address = pa_addr_lo;
+ pwrb_arr[num].size = phba->params.wrbs_per_cxn *
+ sizeof(struct iscsi_wrb);
+ wrb_vaddr += pwrb_arr[num].size;
+ pa_addr_lo += pwrb_arr[num].size;
+ num_wrb_rings--;
+ } else {
+ idx++;
+ wrb_vaddr = mem_descr->mem_array[idx].virtual_address;
+ pa_addr_lo = mem_descr->mem_array[idx].\
+ bus_address.u.a64.address;
+ num_wrb_rings = mem_descr->mem_array[idx].size /
+ (phba->params.wrbs_per_cxn *
+ sizeof(struct iscsi_wrb));
+ pwrb_arr[num].virtual_address = wrb_vaddr;
+ pwrb_arr[num].bus_address.u.a64.address\
+ = pa_addr_lo;
+ pwrb_arr[num].size = phba->params.wrbs_per_cxn *
+ sizeof(struct iscsi_wrb);
+ wrb_vaddr += pwrb_arr[num].size;
+ pa_addr_lo += pwrb_arr[num].size;
+ num_wrb_rings--;
+ }
+ }
+
+ /* Get the ULP Count */
+ for (ulp_num = 0; ulp_num < BEISCSI_ULP_COUNT; ulp_num++)
+ if (test_bit(ulp_num, &phba->fw_config.ulp_supported)) {
+ ulp_count++;
+ ulp_base_num = ulp_num;
+ cid_count_ulp[ulp_num] =
+ BEISCSI_GET_CID_COUNT(phba, ulp_num);
+ }
+
+ for (i = 0; i < phba->params.cxns_per_ctrl; i++) {
+ wrb_mem_index = 0;
+ offset = 0;
+ size = 0;
+
+ if (ulp_count > 1) {
+ ulp_base_num = (ulp_base_num + 1) % BEISCSI_ULP_COUNT;
+
+ if (!cid_count_ulp[ulp_base_num])
+ ulp_base_num = (ulp_base_num + 1) %
+ BEISCSI_ULP_COUNT;
+
+ cid_count_ulp[ulp_base_num]--;
+ }
+
+
+ hwi_build_be_sgl_by_offset(phba, &pwrb_arr[i], &sgl);
+ status = be_cmd_wrbq_create(&phba->ctrl, &sgl,
+ &phwi_context->be_wrbq[i],
+ &phwi_ctrlr->wrb_context[i],
+ ulp_base_num);
+ if (status != 0) {
+ beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
+ "BM_%d : wrbq create failed.");
+ kfree(pwrb_arr);
+ return status;
+ }
+ pwrb_context = &phwi_ctrlr->wrb_context[i];
+ BE_SET_CID_TO_CRI(i, pwrb_context->cid);
+ }
+ kfree(pwrb_arr);
+ return 0;
+}
+
+static void free_wrb_handles(struct beiscsi_hba *phba)
+{
+ unsigned int index;
+ struct hwi_controller *phwi_ctrlr;
+ struct hwi_wrb_context *pwrb_context;
+
+ phwi_ctrlr = phba->phwi_ctrlr;
+ for (index = 0; index < phba->params.cxns_per_ctrl; index++) {
+ pwrb_context = &phwi_ctrlr->wrb_context[index];
+ kfree(pwrb_context->pwrb_handle_base);
+ kfree(pwrb_context->pwrb_handle_basestd);
+ }
+}
+
+static void be_mcc_queues_destroy(struct beiscsi_hba *phba)
+{
+ struct be_queue_info *q;
+ struct be_ctrl_info *ctrl = &phba->ctrl;
+
+ q = &phba->ctrl.mcc_obj.q;
+ if (q->created)
+ beiscsi_cmd_q_destroy(ctrl, q, QTYPE_MCCQ);
+ be_queue_free(phba, q);
+
+ q = &phba->ctrl.mcc_obj.cq;
+ if (q->created)
+ beiscsi_cmd_q_destroy(ctrl, q, QTYPE_CQ);
+ be_queue_free(phba, q);
+}
+
+static void hwi_cleanup(struct beiscsi_hba *phba)
+{
+ struct be_queue_info *q;
+ struct be_ctrl_info *ctrl = &phba->ctrl;
+ struct hwi_controller *phwi_ctrlr;
+ struct hwi_context_memory *phwi_context;
+ struct hwi_async_pdu_context *pasync_ctx;
+ int i, eq_for_mcc, ulp_num;
+
+ phwi_ctrlr = phba->phwi_ctrlr;
+ phwi_context = phwi_ctrlr->phwi_ctxt;
+
+ be_cmd_iscsi_remove_template_hdr(ctrl);
+
+ for (i = 0; i < phba->params.cxns_per_ctrl; i++) {
+ q = &phwi_context->be_wrbq[i];
+ if (q->created)
+ beiscsi_cmd_q_destroy(ctrl, q, QTYPE_WRBQ);
+ }
+ kfree(phwi_context->be_wrbq);
+ free_wrb_handles(phba);
+
+ for (ulp_num = 0; ulp_num < BEISCSI_ULP_COUNT; ulp_num++) {
+ if (test_bit(ulp_num, &phba->fw_config.ulp_supported)) {
+
+ q = &phwi_context->be_def_hdrq[ulp_num];
+ if (q->created)
+ beiscsi_cmd_q_destroy(ctrl, q, QTYPE_DPDUQ);
+
+ q = &phwi_context->be_def_dataq[ulp_num];
+ if (q->created)
+ beiscsi_cmd_q_destroy(ctrl, q, QTYPE_DPDUQ);
+
+ pasync_ctx = phwi_ctrlr->phwi_ctxt->pasync_ctx[ulp_num];
+ }
+ }
+
+ beiscsi_cmd_q_destroy(ctrl, NULL, QTYPE_SGL);
+
+ for (i = 0; i < (phba->num_cpus); i++) {
+ q = &phwi_context->be_cq[i];
+ if (q->created)
+ beiscsi_cmd_q_destroy(ctrl, q, QTYPE_CQ);
+ }
+
+ be_mcc_queues_destroy(phba);
+ if (phba->msix_enabled)
+ eq_for_mcc = 1;
+ else
+ eq_for_mcc = 0;
+ for (i = 0; i < (phba->num_cpus + eq_for_mcc); i++) {
+ q = &phwi_context->be_eq[i].q;
+ if (q->created)
+ beiscsi_cmd_q_destroy(ctrl, q, QTYPE_EQ);
+ }
+ be_cmd_fw_uninit(ctrl);
+}
+
+static int be_mcc_queues_create(struct beiscsi_hba *phba,
+ struct hwi_context_memory *phwi_context)
+{
+ struct be_queue_info *q, *cq;
+ struct be_ctrl_info *ctrl = &phba->ctrl;
+
+ /* Alloc MCC compl queue */
+ cq = &phba->ctrl.mcc_obj.cq;
+ if (be_queue_alloc(phba, cq, MCC_CQ_LEN,
+ sizeof(struct be_mcc_compl)))
+ goto err;
+ /* Ask BE to create MCC compl queue; */
+ if (phba->msix_enabled) {
+ if (beiscsi_cmd_cq_create(ctrl, cq, &phwi_context->be_eq
+ [phba->num_cpus].q, false, true, 0))
+ goto mcc_cq_free;
+ } else {
+ if (beiscsi_cmd_cq_create(ctrl, cq, &phwi_context->be_eq[0].q,
+ false, true, 0))
+ goto mcc_cq_free;
+ }
+
+ /* Alloc MCC queue */
+ q = &phba->ctrl.mcc_obj.q;
+ if (be_queue_alloc(phba, q, MCC_Q_LEN, sizeof(struct be_mcc_wrb)))
+ goto mcc_cq_destroy;
+
+ /* Ask BE to create MCC queue */
+ if (beiscsi_cmd_mccq_create(phba, q, cq))
+ goto mcc_q_free;
+
+ return 0;
+
+mcc_q_free:
+ be_queue_free(phba, q);
+mcc_cq_destroy:
+ beiscsi_cmd_q_destroy(ctrl, cq, QTYPE_CQ);
+mcc_cq_free:
+ be_queue_free(phba, cq);
+err:
+ return -ENOMEM;
+}
+
+/**
+ * find_num_cpus()- Get the CPU online count
+ * @phba: ptr to priv structure
+ *
+ * CPU count is used for creating EQ.
+ **/
+static void find_num_cpus(struct beiscsi_hba *phba)
+{
+ int num_cpus = 0;
+
+ num_cpus = num_online_cpus();
+
+ switch (phba->generation) {
+ case BE_GEN2:
+ case BE_GEN3:
+ phba->num_cpus = (num_cpus > BEISCSI_MAX_NUM_CPUS) ?
+ BEISCSI_MAX_NUM_CPUS : num_cpus;
+ break;
+ case BE_GEN4:
+ /*
+ * If eqid_count == 1 fall back to
+ * INTX mechanism
+ **/
+ if (phba->fw_config.eqid_count == 1) {
+ enable_msix = 0;
+ phba->num_cpus = 1;
+ return;
+ }
+
+ phba->num_cpus =
+ (num_cpus > (phba->fw_config.eqid_count - 1)) ?
+ (phba->fw_config.eqid_count - 1) : num_cpus;
+ break;
+ default:
+ phba->num_cpus = 1;
+ }
+}
+
+static int hwi_init_port(struct beiscsi_hba *phba)
+{
+ struct hwi_controller *phwi_ctrlr;
+ struct hwi_context_memory *phwi_context;
+ unsigned int def_pdu_ring_sz;
+ struct be_ctrl_info *ctrl = &phba->ctrl;
+ int status, ulp_num;
+
+ phwi_ctrlr = phba->phwi_ctrlr;
+ phwi_context = phwi_ctrlr->phwi_ctxt;
+ phwi_context->max_eqd = 128;
+ phwi_context->min_eqd = 0;
+ phwi_context->cur_eqd = 0;
+ be_cmd_fw_initialize(&phba->ctrl);
+
+ status = beiscsi_create_eqs(phba, phwi_context);
+ if (status != 0) {
+ beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
+ "BM_%d : EQ not created\n");
+ goto error;
+ }
+
+ status = be_mcc_queues_create(phba, phwi_context);
+ if (status != 0)
+ goto error;
+
+ status = mgmt_check_supported_fw(ctrl, phba);
+ if (status != 0) {
+ beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
+ "BM_%d : Unsupported fw version\n");
+ goto error;
+ }
+
+ status = beiscsi_create_cqs(phba, phwi_context);
+ if (status != 0) {
+ beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
+ "BM_%d : CQ not created\n");
+ goto error;
+ }
+
+ for (ulp_num = 0; ulp_num < BEISCSI_ULP_COUNT; ulp_num++) {
+ if (test_bit(ulp_num, &phba->fw_config.ulp_supported)) {
+
+ def_pdu_ring_sz =
+ BEISCSI_GET_CID_COUNT(phba, ulp_num) *
+ sizeof(struct phys_addr);
+
+ status = beiscsi_create_def_hdr(phba, phwi_context,
+ phwi_ctrlr,
+ def_pdu_ring_sz,
+ ulp_num);
+ if (status != 0) {
+ beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
+ "BM_%d : Default Header not created for ULP : %d\n",
+ ulp_num);
+ goto error;
+ }
+
+ status = beiscsi_create_def_data(phba, phwi_context,
+ phwi_ctrlr,
+ def_pdu_ring_sz,
+ ulp_num);
+ if (status != 0) {
+ beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
+ "BM_%d : Default Data not created for ULP : %d\n",
+ ulp_num);
+ goto error;
+ }
+ }
+ }
+
+ status = beiscsi_post_pages(phba);
+ if (status != 0) {
+ beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
+ "BM_%d : Post SGL Pages Failed\n");
+ goto error;
+ }
+
+ status = beiscsi_post_template_hdr(phba);
+ if (status != 0) {
+ beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
+ "BM_%d : Template HDR Posting for CXN Failed\n");
+ }
+
+ status = beiscsi_create_wrb_rings(phba, phwi_context, phwi_ctrlr);
+ if (status != 0) {
+ beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
+ "BM_%d : WRB Rings not created\n");
+ goto error;
+ }
+
+ for (ulp_num = 0; ulp_num < BEISCSI_ULP_COUNT; ulp_num++) {
+ uint16_t async_arr_idx = 0;
+
+ if (test_bit(ulp_num, &phba->fw_config.ulp_supported)) {
+ uint16_t cri = 0;
+ struct hwi_async_pdu_context *pasync_ctx;
+
+ pasync_ctx = HWI_GET_ASYNC_PDU_CTX(
+ phwi_ctrlr, ulp_num);
+ for (cri = 0; cri <
+ phba->params.cxns_per_ctrl; cri++) {
+ if (ulp_num == BEISCSI_GET_ULP_FROM_CRI
+ (phwi_ctrlr, cri))
+ pasync_ctx->cid_to_async_cri_map[
+ phwi_ctrlr->wrb_context[cri].cid] =
+ async_arr_idx++;
+ }
+ }
+ }
+
+ beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT,
+ "BM_%d : hwi_init_port success\n");
+ return 0;
+
+error:
+ beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
+ "BM_%d : hwi_init_port failed");
+ hwi_cleanup(phba);
+ return status;
+}
+
+static int hwi_init_controller(struct beiscsi_hba *phba)
+{
+ struct hwi_controller *phwi_ctrlr;
+
+ phwi_ctrlr = phba->phwi_ctrlr;
+ if (1 == phba->init_mem[HWI_MEM_ADDN_CONTEXT].num_elements) {
+ phwi_ctrlr->phwi_ctxt = (struct hwi_context_memory *)phba->
+ init_mem[HWI_MEM_ADDN_CONTEXT].mem_array[0].virtual_address;
+ beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT,
+ "BM_%d : phwi_ctrlr->phwi_ctxt=%p\n",
+ phwi_ctrlr->phwi_ctxt);
+ } else {
+ beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
+ "BM_%d : HWI_MEM_ADDN_CONTEXT is more "
+ "than one element.Failing to load\n");
+ return -ENOMEM;
+ }
+
+ iscsi_init_global_templates(phba);
+ if (beiscsi_init_wrb_handle(phba))
+ return -ENOMEM;
+
+ if (hwi_init_async_pdu_ctx(phba)) {
+ beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
+ "BM_%d : hwi_init_async_pdu_ctx failed\n");
+ return -ENOMEM;
+ }
+
+ if (hwi_init_port(phba) != 0) {
+ beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
+ "BM_%d : hwi_init_controller failed\n");
+
+ return -ENOMEM;
+ }
+ return 0;
+}
+
+static void beiscsi_free_mem(struct beiscsi_hba *phba)
+{
+ struct be_mem_descriptor *mem_descr;
+ int i, j;
+
+ mem_descr = phba->init_mem;
+ i = 0;
+ j = 0;
+ for (i = 0; i < SE_MEM_MAX; i++) {
+ for (j = mem_descr->num_elements; j > 0; j--) {
+ pci_free_consistent(phba->pcidev,
+ mem_descr->mem_array[j - 1].size,
+ mem_descr->mem_array[j - 1].virtual_address,
+ (unsigned long)mem_descr->mem_array[j - 1].
+ bus_address.u.a64.address);
+ }
+
+ kfree(mem_descr->mem_array);
+ mem_descr++;
+ }
+ kfree(phba->init_mem);
+ kfree(phba->phwi_ctrlr->wrb_context);
+ kfree(phba->phwi_ctrlr);
+}
+
+static int beiscsi_init_controller(struct beiscsi_hba *phba)
+{
+ int ret = -ENOMEM;
+
+ ret = beiscsi_get_memory(phba);
+ if (ret < 0) {
+ beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
+ "BM_%d : beiscsi_dev_probe -"
+ "Failed in beiscsi_alloc_memory\n");
+ return ret;
+ }
+
+ ret = hwi_init_controller(phba);
+ if (ret)
+ goto free_init;
+ beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT,
+ "BM_%d : Return success from beiscsi_init_controller");
+
+ return 0;
+
+free_init:
+ beiscsi_free_mem(phba);
+ return ret;
+}
+
+static int beiscsi_init_sgl_handle(struct beiscsi_hba *phba)
+{
+ struct be_mem_descriptor *mem_descr_sglh, *mem_descr_sg;
+ struct sgl_handle *psgl_handle;
+ struct iscsi_sge *pfrag;
+ unsigned int arr_index, i, idx;
+ unsigned int ulp_icd_start, ulp_num = 0;
+
+ phba->io_sgl_hndl_avbl = 0;
+ phba->eh_sgl_hndl_avbl = 0;
+
+ mem_descr_sglh = phba->init_mem;
+ mem_descr_sglh += HWI_MEM_SGLH;
+ if (1 == mem_descr_sglh->num_elements) {
+ phba->io_sgl_hndl_base = kzalloc(sizeof(struct sgl_handle *) *
+ phba->params.ios_per_ctrl,
+ GFP_KERNEL);
+ if (!phba->io_sgl_hndl_base) {
+ beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
+ "BM_%d : Mem Alloc Failed. Failing to load\n");
+ return -ENOMEM;
+ }
+ phba->eh_sgl_hndl_base = kzalloc(sizeof(struct sgl_handle *) *
+ (phba->params.icds_per_ctrl -
+ phba->params.ios_per_ctrl),
+ GFP_KERNEL);
+ if (!phba->eh_sgl_hndl_base) {
+ kfree(phba->io_sgl_hndl_base);
+ beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
+ "BM_%d : Mem Alloc Failed. Failing to load\n");
+ return -ENOMEM;
+ }
+ } else {
+ beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
+ "BM_%d : HWI_MEM_SGLH is more than one element."
+ "Failing to load\n");
+ return -ENOMEM;
+ }
+
+ arr_index = 0;
+ idx = 0;
+ while (idx < mem_descr_sglh->num_elements) {
+ psgl_handle = mem_descr_sglh->mem_array[idx].virtual_address;
+
+ for (i = 0; i < (mem_descr_sglh->mem_array[idx].size /
+ sizeof(struct sgl_handle)); i++) {
+ if (arr_index < phba->params.ios_per_ctrl) {
+ phba->io_sgl_hndl_base[arr_index] = psgl_handle;
+ phba->io_sgl_hndl_avbl++;
+ arr_index++;
+ } else {
+ phba->eh_sgl_hndl_base[arr_index -
+ phba->params.ios_per_ctrl] =
+ psgl_handle;
+ arr_index++;
+ phba->eh_sgl_hndl_avbl++;
+ }
+ psgl_handle++;
+ }
+ idx++;
+ }
+ beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT,
+ "BM_%d : phba->io_sgl_hndl_avbl=%d"
+ "phba->eh_sgl_hndl_avbl=%d\n",
+ phba->io_sgl_hndl_avbl,
+ phba->eh_sgl_hndl_avbl);
+
+ mem_descr_sg = phba->init_mem;
+ mem_descr_sg += HWI_MEM_SGE;
+ beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT,
+ "\n BM_%d : mem_descr_sg->num_elements=%d\n",
+ mem_descr_sg->num_elements);
+
+ for (ulp_num = 0; ulp_num < BEISCSI_ULP_COUNT; ulp_num++)
+ if (test_bit(ulp_num, &phba->fw_config.ulp_supported))
+ break;
+
+ ulp_icd_start = phba->fw_config.iscsi_icd_start[ulp_num];
+
+ arr_index = 0;
+ idx = 0;
+ while (idx < mem_descr_sg->num_elements) {
+ pfrag = mem_descr_sg->mem_array[idx].virtual_address;
+
+ for (i = 0;
+ i < (mem_descr_sg->mem_array[idx].size) /
+ (sizeof(struct iscsi_sge) * phba->params.num_sge_per_io);
+ i++) {
+ if (arr_index < phba->params.ios_per_ctrl)
+ psgl_handle = phba->io_sgl_hndl_base[arr_index];
+ else
+ psgl_handle = phba->eh_sgl_hndl_base[arr_index -
+ phba->params.ios_per_ctrl];
+ psgl_handle->pfrag = pfrag;
+ AMAP_SET_BITS(struct amap_iscsi_sge, addr_hi, pfrag, 0);
+ AMAP_SET_BITS(struct amap_iscsi_sge, addr_lo, pfrag, 0);
+ pfrag += phba->params.num_sge_per_io;
+ psgl_handle->sgl_index = ulp_icd_start + arr_index++;
+ }
+ idx++;
+ }
+ phba->io_sgl_free_index = 0;
+ phba->io_sgl_alloc_index = 0;
+ phba->eh_sgl_free_index = 0;
+ phba->eh_sgl_alloc_index = 0;
+ return 0;
+}
+
+static int hba_setup_cid_tbls(struct beiscsi_hba *phba)
+{
+ int ret;
+ uint16_t i, ulp_num;
+ struct ulp_cid_info *ptr_cid_info = NULL;
+
+ for (ulp_num = 0; ulp_num < BEISCSI_ULP_COUNT; ulp_num++) {
+ if (test_bit(ulp_num, (void *)&phba->fw_config.ulp_supported)) {
+ ptr_cid_info = kzalloc(sizeof(struct ulp_cid_info),
+ GFP_KERNEL);
+
+ if (!ptr_cid_info) {
+ beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
+ "BM_%d : Failed to allocate memory"
+ "for ULP_CID_INFO for ULP : %d\n",
+ ulp_num);
+ ret = -ENOMEM;
+ goto free_memory;
+
+ }
+
+ /* Allocate memory for CID array */
+ ptr_cid_info->cid_array = kzalloc(sizeof(void *) *
+ BEISCSI_GET_CID_COUNT(phba,
+ ulp_num), GFP_KERNEL);
+ if (!ptr_cid_info->cid_array) {
+ beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
+ "BM_%d : Failed to allocate memory"
+ "for CID_ARRAY for ULP : %d\n",
+ ulp_num);
+ kfree(ptr_cid_info);
+ ptr_cid_info = NULL;
+ ret = -ENOMEM;
+
+ goto free_memory;
+ }
+ ptr_cid_info->avlbl_cids = BEISCSI_GET_CID_COUNT(
+ phba, ulp_num);
+
+ /* Save the cid_info_array ptr */
+ phba->cid_array_info[ulp_num] = ptr_cid_info;
+ }
+ }
+ phba->ep_array = kzalloc(sizeof(struct iscsi_endpoint *) *
+ phba->params.cxns_per_ctrl, GFP_KERNEL);
+ if (!phba->ep_array) {
+ beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
+ "BM_%d : Failed to allocate memory in "
+ "hba_setup_cid_tbls\n");
+ ret = -ENOMEM;
+
+ goto free_memory;
+ }
+
+ phba->conn_table = kzalloc(sizeof(struct beiscsi_conn *) *
+ phba->params.cxns_per_ctrl, GFP_KERNEL);
+ if (!phba->conn_table) {
+ beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
+ "BM_%d : Failed to allocate memory in"
+ "hba_setup_cid_tbls\n");
+
+ kfree(phba->ep_array);
+ phba->ep_array = NULL;
+ ret = -ENOMEM;
+
+ goto free_memory;
+ }
+
+ for (i = 0; i < phba->params.cxns_per_ctrl; i++) {
+ ulp_num = phba->phwi_ctrlr->wrb_context[i].ulp_num;
+
+ ptr_cid_info = phba->cid_array_info[ulp_num];
+ ptr_cid_info->cid_array[ptr_cid_info->cid_alloc++] =
+ phba->phwi_ctrlr->wrb_context[i].cid;
+
+ }
+
+ for (ulp_num = 0; ulp_num < BEISCSI_ULP_COUNT; ulp_num++) {
+ if (test_bit(ulp_num, (void *)&phba->fw_config.ulp_supported)) {
+ ptr_cid_info = phba->cid_array_info[ulp_num];
+
+ ptr_cid_info->cid_alloc = 0;
+ ptr_cid_info->cid_free = 0;
+ }
+ }
+ return 0;
+
+free_memory:
+ for (ulp_num = 0; ulp_num < BEISCSI_ULP_COUNT; ulp_num++) {
+ if (test_bit(ulp_num, (void *)&phba->fw_config.ulp_supported)) {
+ ptr_cid_info = phba->cid_array_info[ulp_num];
+
+ if (ptr_cid_info) {
+ kfree(ptr_cid_info->cid_array);
+ kfree(ptr_cid_info);
+ phba->cid_array_info[ulp_num] = NULL;
+ }
+ }
+ }
+
+ return ret;
+}
+
+static void hwi_enable_intr(struct beiscsi_hba *phba)
+{
+ struct be_ctrl_info *ctrl = &phba->ctrl;
+ struct hwi_controller *phwi_ctrlr;
+ struct hwi_context_memory *phwi_context;
+ struct be_queue_info *eq;
+ u8 __iomem *addr;
+ u32 reg, i;
+ u32 enabled;
+
+ phwi_ctrlr = phba->phwi_ctrlr;
+ phwi_context = phwi_ctrlr->phwi_ctxt;
+
+ addr = (u8 __iomem *) ((u8 __iomem *) ctrl->pcicfg +
+ PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET);
+ reg = ioread32(addr);
+
+ enabled = reg & MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
+ if (!enabled) {
+ reg |= MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
+ beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT,
+ "BM_%d : reg =x%08x addr=%p\n", reg, addr);
+ iowrite32(reg, addr);
+ }
+
+ if (!phba->msix_enabled) {
+ eq = &phwi_context->be_eq[0].q;
+ beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT,
+ "BM_%d : eq->id=%d\n", eq->id);
+
+ hwi_ring_eq_db(phba, eq->id, 0, 0, 1, 1);
+ } else {
+ for (i = 0; i <= phba->num_cpus; i++) {
+ eq = &phwi_context->be_eq[i].q;
+ beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT,
+ "BM_%d : eq->id=%d\n", eq->id);
+ hwi_ring_eq_db(phba, eq->id, 0, 0, 1, 1);
+ }
+ }
+}
+
+static void hwi_disable_intr(struct beiscsi_hba *phba)
+{
+ struct be_ctrl_info *ctrl = &phba->ctrl;
+
+ u8 __iomem *addr = ctrl->pcicfg + PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET;
+ u32 reg = ioread32(addr);
+
+ u32 enabled = reg & MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
+ if (enabled) {
+ reg &= ~MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
+ iowrite32(reg, addr);
+ } else
+ beiscsi_log(phba, KERN_WARNING, BEISCSI_LOG_INIT,
+ "BM_%d : In hwi_disable_intr, Already Disabled\n");
+}
+
+/**
+ * beiscsi_get_boot_info()- Get the boot session info
+ * @phba: The device priv structure instance
+ *
+ * Get the boot target info and store in driver priv structure
+ *
+ * return values
+ * Success: 0
+ * Failure: Non-Zero Value
+ **/
+static int beiscsi_get_boot_info(struct beiscsi_hba *phba)
+{
+ struct be_cmd_get_session_resp *session_resp;
+ struct be_dma_mem nonemb_cmd;
+ unsigned int tag;
+ unsigned int s_handle;
+ int ret = -ENOMEM;
+
+ /* Get the session handle of the boot target */
+ ret = be_mgmt_get_boot_shandle(phba, &s_handle);
+ if (ret) {
+ beiscsi_log(phba, KERN_ERR,
+ BEISCSI_LOG_INIT | BEISCSI_LOG_CONFIG,
+ "BM_%d : No boot session\n");
+ return ret;
+ }
+ nonemb_cmd.va = pci_zalloc_consistent(phba->ctrl.pdev,
+ sizeof(*session_resp),
+ &nonemb_cmd.dma);
+ if (nonemb_cmd.va == NULL) {
+ beiscsi_log(phba, KERN_ERR,
+ BEISCSI_LOG_INIT | BEISCSI_LOG_CONFIG,
+ "BM_%d : Failed to allocate memory for"
+ "beiscsi_get_session_info\n");
+
+ return -ENOMEM;
+ }
+
+ tag = mgmt_get_session_info(phba, s_handle,
+ &nonemb_cmd);
+ if (!tag) {
+ beiscsi_log(phba, KERN_ERR,
+ BEISCSI_LOG_INIT | BEISCSI_LOG_CONFIG,
+ "BM_%d : beiscsi_get_session_info"
+ " Failed\n");
+
+ goto boot_freemem;
+ }
+
+ ret = beiscsi_mccq_compl(phba, tag, NULL, &nonemb_cmd);
+ if (ret) {
+ beiscsi_log(phba, KERN_ERR,
+ BEISCSI_LOG_INIT | BEISCSI_LOG_CONFIG,
+ "BM_%d : beiscsi_get_session_info Failed");
+
+ if (ret != -EBUSY)
+ goto boot_freemem;
+ else
+ return ret;
+ }
+
+ session_resp = nonemb_cmd.va ;
+
+ memcpy(&phba->boot_sess, &session_resp->session_info,
+ sizeof(struct mgmt_session_info));
+ ret = 0;
+
+boot_freemem:
+ pci_free_consistent(phba->ctrl.pdev, nonemb_cmd.size,
+ nonemb_cmd.va, nonemb_cmd.dma);
+ return ret;
+}
+
+static void beiscsi_boot_release(void *data)
+{
+ struct beiscsi_hba *phba = data;
+
+ scsi_host_put(phba->shost);
+}
+
+static int beiscsi_setup_boot_info(struct beiscsi_hba *phba)
+{
+ struct iscsi_boot_kobj *boot_kobj;
+
+ /* it has been created previously */
+ if (phba->boot_kset)
+ return 0;
+
+ /* get boot info using mgmt cmd */
+ if (beiscsi_get_boot_info(phba))
+ /* Try to see if we can carry on without this */
+ return 0;
+
+ phba->boot_kset = iscsi_boot_create_host_kset(phba->shost->host_no);
+ if (!phba->boot_kset)
+ return -ENOMEM;
+
+ /* get a ref because the show function will ref the phba */
+ if (!scsi_host_get(phba->shost))
+ goto free_kset;
+ boot_kobj = iscsi_boot_create_target(phba->boot_kset, 0, phba,
+ beiscsi_show_boot_tgt_info,
+ beiscsi_tgt_get_attr_visibility,
+ beiscsi_boot_release);
+ if (!boot_kobj)
+ goto put_shost;
+
+ if (!scsi_host_get(phba->shost))
+ goto free_kset;
+ boot_kobj = iscsi_boot_create_initiator(phba->boot_kset, 0, phba,
+ beiscsi_show_boot_ini_info,
+ beiscsi_ini_get_attr_visibility,
+ beiscsi_boot_release);
+ if (!boot_kobj)
+ goto put_shost;
+
+ if (!scsi_host_get(phba->shost))
+ goto free_kset;
+ boot_kobj = iscsi_boot_create_ethernet(phba->boot_kset, 0, phba,
+ beiscsi_show_boot_eth_info,
+ beiscsi_eth_get_attr_visibility,
+ beiscsi_boot_release);
+ if (!boot_kobj)
+ goto put_shost;
+ return 0;
+
+put_shost:
+ scsi_host_put(phba->shost);
+free_kset:
+ iscsi_boot_destroy_kset(phba->boot_kset);
+ return -ENOMEM;
+}
+
+static int beiscsi_init_port(struct beiscsi_hba *phba)
+{
+ int ret;
+
+ ret = beiscsi_init_controller(phba);
+ if (ret < 0) {
+ beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
+ "BM_%d : beiscsi_dev_probe - Failed in"
+ "beiscsi_init_controller\n");
+ return ret;
+ }
+ ret = beiscsi_init_sgl_handle(phba);
+ if (ret < 0) {
+ beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
+ "BM_%d : beiscsi_dev_probe - Failed in"
+ "beiscsi_init_sgl_handle\n");
+ goto do_cleanup_ctrlr;
+ }
+
+ if (hba_setup_cid_tbls(phba)) {
+ beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
+ "BM_%d : Failed in hba_setup_cid_tbls\n");
+ kfree(phba->io_sgl_hndl_base);
+ kfree(phba->eh_sgl_hndl_base);
+ goto do_cleanup_ctrlr;
+ }
+
+ return ret;
+
+do_cleanup_ctrlr:
+ hwi_cleanup(phba);
+ return ret;
+}
+
+static void hwi_purge_eq(struct beiscsi_hba *phba)
+{
+ struct hwi_controller *phwi_ctrlr;
+ struct hwi_context_memory *phwi_context;
+ struct be_queue_info *eq;
+ struct be_eq_entry *eqe = NULL;
+ int i, eq_msix;
+ unsigned int num_processed;
+
+ phwi_ctrlr = phba->phwi_ctrlr;
+ phwi_context = phwi_ctrlr->phwi_ctxt;
+ if (phba->msix_enabled)
+ eq_msix = 1;
+ else
+ eq_msix = 0;
+
+ for (i = 0; i < (phba->num_cpus + eq_msix); i++) {
+ eq = &phwi_context->be_eq[i].q;
+ eqe = queue_tail_node(eq);
+ num_processed = 0;
+ while (eqe->dw[offsetof(struct amap_eq_entry, valid) / 32]
+ & EQE_VALID_MASK) {
+ AMAP_SET_BITS(struct amap_eq_entry, valid, eqe, 0);
+ queue_tail_inc(eq);
+ eqe = queue_tail_node(eq);
+ num_processed++;
+ }
+
+ if (num_processed)
+ hwi_ring_eq_db(phba, eq->id, 1, num_processed, 1, 1);
+ }
+}
+
+static void beiscsi_clean_port(struct beiscsi_hba *phba)
+{
+ int mgmt_status, ulp_num;
+ struct ulp_cid_info *ptr_cid_info = NULL;
+
+ for (ulp_num = 0; ulp_num < BEISCSI_ULP_COUNT; ulp_num++) {
+ if (test_bit(ulp_num, (void *)&phba->fw_config.ulp_supported)) {
+ mgmt_status = mgmt_epfw_cleanup(phba, ulp_num);
+ if (mgmt_status)
+ beiscsi_log(phba, KERN_WARNING,
+ BEISCSI_LOG_INIT,
+ "BM_%d : mgmt_epfw_cleanup FAILED"
+ " for ULP_%d\n", ulp_num);
+ }
+ }
+
+ hwi_purge_eq(phba);
+ hwi_cleanup(phba);
+ kfree(phba->io_sgl_hndl_base);
+ kfree(phba->eh_sgl_hndl_base);
+ kfree(phba->ep_array);
+ kfree(phba->conn_table);
+
+ for (ulp_num = 0; ulp_num < BEISCSI_ULP_COUNT; ulp_num++) {
+ if (test_bit(ulp_num, (void *)&phba->fw_config.ulp_supported)) {
+ ptr_cid_info = phba->cid_array_info[ulp_num];
+
+ if (ptr_cid_info) {
+ kfree(ptr_cid_info->cid_array);
+ kfree(ptr_cid_info);
+ phba->cid_array_info[ulp_num] = NULL;
+ }
+ }
+ }
+
+}
+
+/**
+ * beiscsi_free_mgmt_task_handles()- Free driver CXN resources
+ * @beiscsi_conn: ptr to the conn to be cleaned up
+ * @task: ptr to iscsi_task resource to be freed.
+ *
+ * Free driver mgmt resources binded to CXN.
+ **/
+void
+beiscsi_free_mgmt_task_handles(struct beiscsi_conn *beiscsi_conn,
+ struct iscsi_task *task)
+{
+ struct beiscsi_io_task *io_task;
+ struct beiscsi_hba *phba = beiscsi_conn->phba;
+ struct hwi_wrb_context *pwrb_context;
+ struct hwi_controller *phwi_ctrlr;
+ uint16_t cri_index = BE_GET_CRI_FROM_CID(
+ beiscsi_conn->beiscsi_conn_cid);
+
+ phwi_ctrlr = phba->phwi_ctrlr;
+ pwrb_context = &phwi_ctrlr->wrb_context[cri_index];
+
+ io_task = task->dd_data;
+
+ if (io_task->pwrb_handle) {
+ memset(io_task->pwrb_handle->pwrb, 0,
+ sizeof(struct iscsi_wrb));
+ free_wrb_handle(phba, pwrb_context,
+ io_task->pwrb_handle);
+ io_task->pwrb_handle = NULL;
+ }
+
+ if (io_task->psgl_handle) {
+ spin_lock_bh(&phba->mgmt_sgl_lock);
+ free_mgmt_sgl_handle(phba,
+ io_task->psgl_handle);
+ io_task->psgl_handle = NULL;
+ spin_unlock_bh(&phba->mgmt_sgl_lock);
+ }
+
+ if (io_task->mtask_addr)
+ pci_unmap_single(phba->pcidev,
+ io_task->mtask_addr,
+ io_task->mtask_data_count,
+ PCI_DMA_TODEVICE);
+}
+
+/**
+ * beiscsi_cleanup_task()- Free driver resources of the task
+ * @task: ptr to the iscsi task
+ *
+ **/
+static void beiscsi_cleanup_task(struct iscsi_task *task)
+{
+ struct beiscsi_io_task *io_task = task->dd_data;
+ struct iscsi_conn *conn = task->conn;
+ struct beiscsi_conn *beiscsi_conn = conn->dd_data;
+ struct beiscsi_hba *phba = beiscsi_conn->phba;
+ struct beiscsi_session *beiscsi_sess = beiscsi_conn->beiscsi_sess;
+ struct hwi_wrb_context *pwrb_context;
+ struct hwi_controller *phwi_ctrlr;
+ uint16_t cri_index = BE_GET_CRI_FROM_CID(
+ beiscsi_conn->beiscsi_conn_cid);
+
+ phwi_ctrlr = phba->phwi_ctrlr;
+ pwrb_context = &phwi_ctrlr->wrb_context[cri_index];
+
+ if (io_task->cmd_bhs) {
+ pci_pool_free(beiscsi_sess->bhs_pool, io_task->cmd_bhs,
+ io_task->bhs_pa.u.a64.address);
+ io_task->cmd_bhs = NULL;
+ }
+
+ if (task->sc) {
+ if (io_task->pwrb_handle) {
+ free_wrb_handle(phba, pwrb_context,
+ io_task->pwrb_handle);
+ io_task->pwrb_handle = NULL;
+ }
+
+ if (io_task->psgl_handle) {
+ spin_lock(&phba->io_sgl_lock);
+ free_io_sgl_handle(phba, io_task->psgl_handle);
+ spin_unlock(&phba->io_sgl_lock);
+ io_task->psgl_handle = NULL;
+ }
+
+ if (io_task->scsi_cmnd) {
+ scsi_dma_unmap(io_task->scsi_cmnd);
+ io_task->scsi_cmnd = NULL;
+ }
+ } else {
+ if (!beiscsi_conn->login_in_progress)
+ beiscsi_free_mgmt_task_handles(beiscsi_conn, task);
+ }
+}
+
+void
+beiscsi_offload_connection(struct beiscsi_conn *beiscsi_conn,
+ struct beiscsi_offload_params *params)
+{
+ struct wrb_handle *pwrb_handle;
+ struct beiscsi_hba *phba = beiscsi_conn->phba;
+ struct iscsi_task *task = beiscsi_conn->task;
+ struct iscsi_session *session = task->conn->session;
+ u32 doorbell = 0;
+
+ /*
+ * We can always use 0 here because it is reserved by libiscsi for
+ * login/startup related tasks.
+ */
+ beiscsi_conn->login_in_progress = 0;
+ spin_lock_bh(&session->back_lock);
+ beiscsi_cleanup_task(task);
+ spin_unlock_bh(&session->back_lock);
+
+ pwrb_handle = alloc_wrb_handle(phba, beiscsi_conn->beiscsi_conn_cid);
+
+ /* Check for the adapter family */
+ if (is_chip_be2_be3r(phba))
+ beiscsi_offload_cxn_v0(params, pwrb_handle,
+ phba->init_mem);
+ else
+ beiscsi_offload_cxn_v2(params, pwrb_handle);
+
+ be_dws_le_to_cpu(pwrb_handle->pwrb,
+ sizeof(struct iscsi_target_context_update_wrb));
+
+ doorbell |= beiscsi_conn->beiscsi_conn_cid & DB_WRB_POST_CID_MASK;
+ doorbell |= (pwrb_handle->wrb_index & DB_DEF_PDU_WRB_INDEX_MASK)
+ << DB_DEF_PDU_WRB_INDEX_SHIFT;
+ doorbell |= 1 << DB_DEF_PDU_NUM_POSTED_SHIFT;
+ iowrite32(doorbell, phba->db_va +
+ beiscsi_conn->doorbell_offset);
+}
+
+static void beiscsi_parse_pdu(struct iscsi_conn *conn, itt_t itt,
+ int *index, int *age)
+{
+ *index = (int)itt;
+ if (age)
+ *age = conn->session->age;
+}
+
+/**
+ * beiscsi_alloc_pdu - allocates pdu and related resources
+ * @task: libiscsi task
+ * @opcode: opcode of pdu for task
+ *
+ * This is called with the session lock held. It will allocate
+ * the wrb and sgl if needed for the command. And it will prep
+ * the pdu's itt. beiscsi_parse_pdu will later translate
+ * the pdu itt to the libiscsi task itt.
+ */
+static int beiscsi_alloc_pdu(struct iscsi_task *task, uint8_t opcode)
+{
+ struct beiscsi_io_task *io_task = task->dd_data;
+ struct iscsi_conn *conn = task->conn;
+ struct beiscsi_conn *beiscsi_conn = conn->dd_data;
+ struct beiscsi_hba *phba = beiscsi_conn->phba;
+ struct hwi_wrb_context *pwrb_context;
+ struct hwi_controller *phwi_ctrlr;
+ itt_t itt;
+ uint16_t cri_index = 0;
+ struct beiscsi_session *beiscsi_sess = beiscsi_conn->beiscsi_sess;
+ dma_addr_t paddr;
+
+ io_task->cmd_bhs = pci_pool_alloc(beiscsi_sess->bhs_pool,
+ GFP_ATOMIC, &paddr);
+ if (!io_task->cmd_bhs)
+ return -ENOMEM;
+ io_task->bhs_pa.u.a64.address = paddr;
+ io_task->libiscsi_itt = (itt_t)task->itt;
+ io_task->conn = beiscsi_conn;
+
+ task->hdr = (struct iscsi_hdr *)&io_task->cmd_bhs->iscsi_hdr;
+ task->hdr_max = sizeof(struct be_cmd_bhs);
+ io_task->psgl_handle = NULL;
+ io_task->pwrb_handle = NULL;
+
+ if (task->sc) {
+ spin_lock(&phba->io_sgl_lock);
+ io_task->psgl_handle = alloc_io_sgl_handle(phba);
+ spin_unlock(&phba->io_sgl_lock);
+ if (!io_task->psgl_handle) {
+ beiscsi_log(phba, KERN_ERR,
+ BEISCSI_LOG_IO | BEISCSI_LOG_CONFIG,
+ "BM_%d : Alloc of IO_SGL_ICD Failed"
+ "for the CID : %d\n",
+ beiscsi_conn->beiscsi_conn_cid);
+ goto free_hndls;
+ }
+ io_task->pwrb_handle = alloc_wrb_handle(phba,
+ beiscsi_conn->beiscsi_conn_cid);
+ if (!io_task->pwrb_handle) {
+ beiscsi_log(phba, KERN_ERR,
+ BEISCSI_LOG_IO | BEISCSI_LOG_CONFIG,
+ "BM_%d : Alloc of WRB_HANDLE Failed"
+ "for the CID : %d\n",
+ beiscsi_conn->beiscsi_conn_cid);
+ goto free_io_hndls;
+ }
+ } else {
+ io_task->scsi_cmnd = NULL;
+ if ((opcode & ISCSI_OPCODE_MASK) == ISCSI_OP_LOGIN) {
+ beiscsi_conn->task = task;
+ if (!beiscsi_conn->login_in_progress) {
+ spin_lock(&phba->mgmt_sgl_lock);
+ io_task->psgl_handle = (struct sgl_handle *)
+ alloc_mgmt_sgl_handle(phba);
+ spin_unlock(&phba->mgmt_sgl_lock);
+ if (!io_task->psgl_handle) {
+ beiscsi_log(phba, KERN_ERR,
+ BEISCSI_LOG_IO |
+ BEISCSI_LOG_CONFIG,
+ "BM_%d : Alloc of MGMT_SGL_ICD Failed"
+ "for the CID : %d\n",
+ beiscsi_conn->
+ beiscsi_conn_cid);
+ goto free_hndls;
+ }
+
+ beiscsi_conn->login_in_progress = 1;
+ beiscsi_conn->plogin_sgl_handle =
+ io_task->psgl_handle;
+ io_task->pwrb_handle =
+ alloc_wrb_handle(phba,
+ beiscsi_conn->beiscsi_conn_cid);
+ if (!io_task->pwrb_handle) {
+ beiscsi_log(phba, KERN_ERR,
+ BEISCSI_LOG_IO |
+ BEISCSI_LOG_CONFIG,
+ "BM_%d : Alloc of WRB_HANDLE Failed"
+ "for the CID : %d\n",
+ beiscsi_conn->
+ beiscsi_conn_cid);
+ goto free_mgmt_hndls;
+ }
+ beiscsi_conn->plogin_wrb_handle =
+ io_task->pwrb_handle;
+
+ } else {
+ io_task->psgl_handle =
+ beiscsi_conn->plogin_sgl_handle;
+ io_task->pwrb_handle =
+ beiscsi_conn->plogin_wrb_handle;
+ }
+ } else {
+ spin_lock(&phba->mgmt_sgl_lock);
+ io_task->psgl_handle = alloc_mgmt_sgl_handle(phba);
+ spin_unlock(&phba->mgmt_sgl_lock);
+ if (!io_task->psgl_handle) {
+ beiscsi_log(phba, KERN_ERR,
+ BEISCSI_LOG_IO |
+ BEISCSI_LOG_CONFIG,
+ "BM_%d : Alloc of MGMT_SGL_ICD Failed"
+ "for the CID : %d\n",
+ beiscsi_conn->
+ beiscsi_conn_cid);
+ goto free_hndls;
+ }
+ io_task->pwrb_handle =
+ alloc_wrb_handle(phba,
+ beiscsi_conn->beiscsi_conn_cid);
+ if (!io_task->pwrb_handle) {
+ beiscsi_log(phba, KERN_ERR,
+ BEISCSI_LOG_IO | BEISCSI_LOG_CONFIG,
+ "BM_%d : Alloc of WRB_HANDLE Failed"
+ "for the CID : %d\n",
+ beiscsi_conn->beiscsi_conn_cid);
+ goto free_mgmt_hndls;
+ }
+
+ }
+ }
+ itt = (itt_t) cpu_to_be32(((unsigned int)io_task->pwrb_handle->
+ wrb_index << 16) | (unsigned int)
+ (io_task->psgl_handle->sgl_index));
+ io_task->pwrb_handle->pio_handle = task;
+
+ io_task->cmd_bhs->iscsi_hdr.itt = itt;
+ return 0;
+
+free_io_hndls:
+ spin_lock(&phba->io_sgl_lock);
+ free_io_sgl_handle(phba, io_task->psgl_handle);
+ spin_unlock(&phba->io_sgl_lock);
+ goto free_hndls;
+free_mgmt_hndls:
+ spin_lock(&phba->mgmt_sgl_lock);
+ free_mgmt_sgl_handle(phba, io_task->psgl_handle);
+ io_task->psgl_handle = NULL;
+ spin_unlock(&phba->mgmt_sgl_lock);
+free_hndls:
+ phwi_ctrlr = phba->phwi_ctrlr;
+ cri_index = BE_GET_CRI_FROM_CID(
+ beiscsi_conn->beiscsi_conn_cid);
+ pwrb_context = &phwi_ctrlr->wrb_context[cri_index];
+ if (io_task->pwrb_handle)
+ free_wrb_handle(phba, pwrb_context, io_task->pwrb_handle);
+ io_task->pwrb_handle = NULL;
+ pci_pool_free(beiscsi_sess->bhs_pool, io_task->cmd_bhs,
+ io_task->bhs_pa.u.a64.address);
+ io_task->cmd_bhs = NULL;
+ return -ENOMEM;
+}
+int beiscsi_iotask_v2(struct iscsi_task *task, struct scatterlist *sg,
+ unsigned int num_sg, unsigned int xferlen,
+ unsigned int writedir)
+{
+
+ struct beiscsi_io_task *io_task = task->dd_data;
+ struct iscsi_conn *conn = task->conn;
+ struct beiscsi_conn *beiscsi_conn = conn->dd_data;
+ struct beiscsi_hba *phba = beiscsi_conn->phba;
+ struct iscsi_wrb *pwrb = NULL;
+ unsigned int doorbell = 0;
+
+ pwrb = io_task->pwrb_handle->pwrb;
+
+ io_task->cmd_bhs->iscsi_hdr.exp_statsn = 0;
+ io_task->bhs_len = sizeof(struct be_cmd_bhs);
+
+ if (writedir) {
+ AMAP_SET_BITS(struct amap_iscsi_wrb_v2, type, pwrb,
+ INI_WR_CMD);
+ AMAP_SET_BITS(struct amap_iscsi_wrb_v2, dsp, pwrb, 1);
+ } else {
+ AMAP_SET_BITS(struct amap_iscsi_wrb_v2, type, pwrb,
+ INI_RD_CMD);
+ AMAP_SET_BITS(struct amap_iscsi_wrb_v2, dsp, pwrb, 0);
+ }
+
+ io_task->wrb_type = AMAP_GET_BITS(struct amap_iscsi_wrb_v2,
+ type, pwrb);
+
+ AMAP_SET_BITS(struct amap_iscsi_wrb_v2, lun, pwrb,
+ cpu_to_be16(*(unsigned short *)
+ &io_task->cmd_bhs->iscsi_hdr.lun));
+ AMAP_SET_BITS(struct amap_iscsi_wrb_v2, r2t_exp_dtl, pwrb, xferlen);
+ AMAP_SET_BITS(struct amap_iscsi_wrb_v2, wrb_idx, pwrb,
+ io_task->pwrb_handle->wrb_index);
+ AMAP_SET_BITS(struct amap_iscsi_wrb_v2, cmdsn_itt, pwrb,
+ be32_to_cpu(task->cmdsn));
+ AMAP_SET_BITS(struct amap_iscsi_wrb_v2, sgl_idx, pwrb,
+ io_task->psgl_handle->sgl_index);
+
+ hwi_write_sgl_v2(pwrb, sg, num_sg, io_task);
+ AMAP_SET_BITS(struct amap_iscsi_wrb_v2, ptr2nextwrb, pwrb,
+ io_task->pwrb_handle->nxt_wrb_index);
+
+ be_dws_le_to_cpu(pwrb, sizeof(struct iscsi_wrb));
+
+ doorbell |= beiscsi_conn->beiscsi_conn_cid & DB_WRB_POST_CID_MASK;
+ doorbell |= (io_task->pwrb_handle->wrb_index &
+ DB_DEF_PDU_WRB_INDEX_MASK) <<
+ DB_DEF_PDU_WRB_INDEX_SHIFT;
+ doorbell |= 1 << DB_DEF_PDU_NUM_POSTED_SHIFT;
+ iowrite32(doorbell, phba->db_va +
+ beiscsi_conn->doorbell_offset);
+ return 0;
+}
+
+static int beiscsi_iotask(struct iscsi_task *task, struct scatterlist *sg,
+ unsigned int num_sg, unsigned int xferlen,
+ unsigned int writedir)
+{
+
+ struct beiscsi_io_task *io_task = task->dd_data;
+ struct iscsi_conn *conn = task->conn;
+ struct beiscsi_conn *beiscsi_conn = conn->dd_data;
+ struct beiscsi_hba *phba = beiscsi_conn->phba;
+ struct iscsi_wrb *pwrb = NULL;
+ unsigned int doorbell = 0;
+
+ pwrb = io_task->pwrb_handle->pwrb;
+ io_task->cmd_bhs->iscsi_hdr.exp_statsn = 0;
+ io_task->bhs_len = sizeof(struct be_cmd_bhs);
+
+ if (writedir) {
+ AMAP_SET_BITS(struct amap_iscsi_wrb, type, pwrb,
+ INI_WR_CMD);
+ AMAP_SET_BITS(struct amap_iscsi_wrb, dsp, pwrb, 1);
+ } else {
+ AMAP_SET_BITS(struct amap_iscsi_wrb, type, pwrb,
+ INI_RD_CMD);
+ AMAP_SET_BITS(struct amap_iscsi_wrb, dsp, pwrb, 0);
+ }
+
+ io_task->wrb_type = AMAP_GET_BITS(struct amap_iscsi_wrb,
+ type, pwrb);
+
+ AMAP_SET_BITS(struct amap_iscsi_wrb, lun, pwrb,
+ cpu_to_be16(*(unsigned short *)
+ &io_task->cmd_bhs->iscsi_hdr.lun));
+ AMAP_SET_BITS(struct amap_iscsi_wrb, r2t_exp_dtl, pwrb, xferlen);
+ AMAP_SET_BITS(struct amap_iscsi_wrb, wrb_idx, pwrb,
+ io_task->pwrb_handle->wrb_index);
+ AMAP_SET_BITS(struct amap_iscsi_wrb, cmdsn_itt, pwrb,
+ be32_to_cpu(task->cmdsn));
+ AMAP_SET_BITS(struct amap_iscsi_wrb, sgl_icd_idx, pwrb,
+ io_task->psgl_handle->sgl_index);
+
+ hwi_write_sgl(pwrb, sg, num_sg, io_task);
+
+ AMAP_SET_BITS(struct amap_iscsi_wrb, ptr2nextwrb, pwrb,
+ io_task->pwrb_handle->nxt_wrb_index);
+ be_dws_le_to_cpu(pwrb, sizeof(struct iscsi_wrb));
+
+ doorbell |= beiscsi_conn->beiscsi_conn_cid & DB_WRB_POST_CID_MASK;
+ doorbell |= (io_task->pwrb_handle->wrb_index &
+ DB_DEF_PDU_WRB_INDEX_MASK) << DB_DEF_PDU_WRB_INDEX_SHIFT;
+ doorbell |= 1 << DB_DEF_PDU_NUM_POSTED_SHIFT;
+
+ iowrite32(doorbell, phba->db_va +
+ beiscsi_conn->doorbell_offset);
+ return 0;
+}
+
+static int beiscsi_mtask(struct iscsi_task *task)
+{
+ struct beiscsi_io_task *io_task = task->dd_data;
+ struct iscsi_conn *conn = task->conn;
+ struct beiscsi_conn *beiscsi_conn = conn->dd_data;
+ struct beiscsi_hba *phba = beiscsi_conn->phba;
+ struct iscsi_wrb *pwrb = NULL;
+ unsigned int doorbell = 0;
+ unsigned int cid;
+ unsigned int pwrb_typeoffset = 0;
+
+ cid = beiscsi_conn->beiscsi_conn_cid;
+ pwrb = io_task->pwrb_handle->pwrb;
+ memset(pwrb, 0, sizeof(*pwrb));
+
+ if (is_chip_be2_be3r(phba)) {
+ AMAP_SET_BITS(struct amap_iscsi_wrb, cmdsn_itt, pwrb,
+ be32_to_cpu(task->cmdsn));
+ AMAP_SET_BITS(struct amap_iscsi_wrb, wrb_idx, pwrb,
+ io_task->pwrb_handle->wrb_index);
+ AMAP_SET_BITS(struct amap_iscsi_wrb, sgl_icd_idx, pwrb,
+ io_task->psgl_handle->sgl_index);
+ AMAP_SET_BITS(struct amap_iscsi_wrb, r2t_exp_dtl, pwrb,
+ task->data_count);
+ AMAP_SET_BITS(struct amap_iscsi_wrb, ptr2nextwrb, pwrb,
+ io_task->pwrb_handle->nxt_wrb_index);
+ pwrb_typeoffset = BE_WRB_TYPE_OFFSET;
+ } else {
+ AMAP_SET_BITS(struct amap_iscsi_wrb_v2, cmdsn_itt, pwrb,
+ be32_to_cpu(task->cmdsn));
+ AMAP_SET_BITS(struct amap_iscsi_wrb_v2, wrb_idx, pwrb,
+ io_task->pwrb_handle->wrb_index);
+ AMAP_SET_BITS(struct amap_iscsi_wrb_v2, sgl_idx, pwrb,
+ io_task->psgl_handle->sgl_index);
+ AMAP_SET_BITS(struct amap_iscsi_wrb_v2, r2t_exp_dtl, pwrb,
+ task->data_count);
+ AMAP_SET_BITS(struct amap_iscsi_wrb_v2, ptr2nextwrb, pwrb,
+ io_task->pwrb_handle->nxt_wrb_index);
+ pwrb_typeoffset = SKH_WRB_TYPE_OFFSET;
+ }
+
+
+ switch (task->hdr->opcode & ISCSI_OPCODE_MASK) {
+ case ISCSI_OP_LOGIN:
+ AMAP_SET_BITS(struct amap_iscsi_wrb, cmdsn_itt, pwrb, 1);
+ ADAPTER_SET_WRB_TYPE(pwrb, TGT_DM_CMD, pwrb_typeoffset);
+ hwi_write_buffer(pwrb, task);
+ break;
+ case ISCSI_OP_NOOP_OUT:
+ if (task->hdr->ttt != ISCSI_RESERVED_TAG) {
+ ADAPTER_SET_WRB_TYPE(pwrb, TGT_DM_CMD, pwrb_typeoffset);
+ if (is_chip_be2_be3r(phba))
+ AMAP_SET_BITS(struct amap_iscsi_wrb,
+ dmsg, pwrb, 1);
+ else
+ AMAP_SET_BITS(struct amap_iscsi_wrb_v2,
+ dmsg, pwrb, 1);
+ } else {
+ ADAPTER_SET_WRB_TYPE(pwrb, INI_RD_CMD, pwrb_typeoffset);
+ if (is_chip_be2_be3r(phba))
+ AMAP_SET_BITS(struct amap_iscsi_wrb,
+ dmsg, pwrb, 0);
+ else
+ AMAP_SET_BITS(struct amap_iscsi_wrb_v2,
+ dmsg, pwrb, 0);
+ }
+ hwi_write_buffer(pwrb, task);
+ break;
+ case ISCSI_OP_TEXT:
+ ADAPTER_SET_WRB_TYPE(pwrb, TGT_DM_CMD, pwrb_typeoffset);
+ hwi_write_buffer(pwrb, task);
+ break;
+ case ISCSI_OP_SCSI_TMFUNC:
+ ADAPTER_SET_WRB_TYPE(pwrb, INI_TMF_CMD, pwrb_typeoffset);
+ hwi_write_buffer(pwrb, task);
+ break;
+ case ISCSI_OP_LOGOUT:
+ ADAPTER_SET_WRB_TYPE(pwrb, HWH_TYPE_LOGOUT, pwrb_typeoffset);
+ hwi_write_buffer(pwrb, task);
+ break;
+
+ default:
+ beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_CONFIG,
+ "BM_%d : opcode =%d Not supported\n",
+ task->hdr->opcode & ISCSI_OPCODE_MASK);
+
+ return -EINVAL;
+ }
+
+ /* Set the task type */
+ io_task->wrb_type = (is_chip_be2_be3r(phba)) ?
+ AMAP_GET_BITS(struct amap_iscsi_wrb, type, pwrb) :
+ AMAP_GET_BITS(struct amap_iscsi_wrb_v2, type, pwrb);
+
+ doorbell |= cid & DB_WRB_POST_CID_MASK;
+ doorbell |= (io_task->pwrb_handle->wrb_index &
+ DB_DEF_PDU_WRB_INDEX_MASK) << DB_DEF_PDU_WRB_INDEX_SHIFT;
+ doorbell |= 1 << DB_DEF_PDU_NUM_POSTED_SHIFT;
+ iowrite32(doorbell, phba->db_va +
+ beiscsi_conn->doorbell_offset);
+ return 0;
+}
+
+static int beiscsi_task_xmit(struct iscsi_task *task)
+{
+ struct beiscsi_io_task *io_task = task->dd_data;
+ struct scsi_cmnd *sc = task->sc;
+ struct beiscsi_hba *phba = NULL;
+ struct scatterlist *sg;
+ int num_sg;
+ unsigned int writedir = 0, xferlen = 0;
+
+ phba = ((struct beiscsi_conn *)task->conn->dd_data)->phba;
+
+ if (!sc)
+ return beiscsi_mtask(task);
+
+ io_task->scsi_cmnd = sc;
+ num_sg = scsi_dma_map(sc);
+ if (num_sg < 0) {
+ struct iscsi_conn *conn = task->conn;
+ struct beiscsi_hba *phba = NULL;
+
+ phba = ((struct beiscsi_conn *)conn->dd_data)->phba;
+ beiscsi_log(phba, KERN_ERR,
+ BEISCSI_LOG_IO | BEISCSI_LOG_ISCSI,
+ "BM_%d : scsi_dma_map Failed "
+ "Driver_ITT : 0x%x ITT : 0x%x Xferlen : 0x%x\n",
+ be32_to_cpu(io_task->cmd_bhs->iscsi_hdr.itt),
+ io_task->libiscsi_itt, scsi_bufflen(sc));
+
+ return num_sg;
+ }
+ xferlen = scsi_bufflen(sc);
+ sg = scsi_sglist(sc);
+ if (sc->sc_data_direction == DMA_TO_DEVICE)
+ writedir = 1;
+ else
+ writedir = 0;
+
+ return phba->iotask_fn(task, sg, num_sg, xferlen, writedir);
+}
+
+/**
+ * beiscsi_bsg_request - handle bsg request from ISCSI transport
+ * @job: job to handle
+ */
+static int beiscsi_bsg_request(struct bsg_job *job)
+{
+ struct Scsi_Host *shost;
+ struct beiscsi_hba *phba;
+ struct iscsi_bsg_request *bsg_req = job->request;
+ int rc = -EINVAL;
+ unsigned int tag;
+ struct be_dma_mem nonemb_cmd;
+ struct be_cmd_resp_hdr *resp;
+ struct iscsi_bsg_reply *bsg_reply = job->reply;
+ unsigned short status, extd_status;
+
+ shost = iscsi_job_to_shost(job);
+ phba = iscsi_host_priv(shost);
+
+ switch (bsg_req->msgcode) {
+ case ISCSI_BSG_HST_VENDOR:
+ nonemb_cmd.va = pci_alloc_consistent(phba->ctrl.pdev,
+ job->request_payload.payload_len,
+ &nonemb_cmd.dma);
+ if (nonemb_cmd.va == NULL) {
+ beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_CONFIG,
+ "BM_%d : Failed to allocate memory for "
+ "beiscsi_bsg_request\n");
+ return -ENOMEM;
+ }
+ tag = mgmt_vendor_specific_fw_cmd(&phba->ctrl, phba, job,
+ &nonemb_cmd);
+ if (!tag) {
+ beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_CONFIG,
+ "BM_%d : MBX Tag Allocation Failed\n");
+
+ pci_free_consistent(phba->ctrl.pdev, nonemb_cmd.size,
+ nonemb_cmd.va, nonemb_cmd.dma);
+ return -EAGAIN;
+ }
+
+ rc = wait_event_interruptible_timeout(
+ phba->ctrl.mcc_wait[tag],
+ phba->ctrl.mcc_numtag[tag],
+ msecs_to_jiffies(
+ BEISCSI_HOST_MBX_TIMEOUT));
+ extd_status = (phba->ctrl.mcc_numtag[tag] & 0x0000FF00) >> 8;
+ status = phba->ctrl.mcc_numtag[tag] & 0x000000FF;
+ free_mcc_tag(&phba->ctrl, tag);
+ resp = (struct be_cmd_resp_hdr *)nonemb_cmd.va;
+ sg_copy_from_buffer(job->reply_payload.sg_list,
+ job->reply_payload.sg_cnt,
+ nonemb_cmd.va, (resp->response_length
+ + sizeof(*resp)));
+ bsg_reply->reply_payload_rcv_len = resp->response_length;
+ bsg_reply->result = status;
+ bsg_job_done(job, bsg_reply->result,
+ bsg_reply->reply_payload_rcv_len);
+ pci_free_consistent(phba->ctrl.pdev, nonemb_cmd.size,
+ nonemb_cmd.va, nonemb_cmd.dma);
+ if (status || extd_status) {
+ beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_CONFIG,
+ "BM_%d : MBX Cmd Failed"
+ " status = %d extd_status = %d\n",
+ status, extd_status);
+
+ return -EIO;
+ } else {
+ rc = 0;
+ }
+ break;
+
+ default:
+ beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_CONFIG,
+ "BM_%d : Unsupported bsg command: 0x%x\n",
+ bsg_req->msgcode);
+ break;
+ }
+
+ return rc;
+}
+
+void beiscsi_hba_attrs_init(struct beiscsi_hba *phba)
+{
+ /* Set the logging parameter */
+ beiscsi_log_enable_init(phba, beiscsi_log_enable);
+}
+
+/*
+ * beiscsi_quiesce()- Cleanup Driver resources
+ * @phba: Instance Priv structure
+ * @unload_state:i Clean or EEH unload state
+ *
+ * Free the OS and HW resources held by the driver
+ **/
+static void beiscsi_quiesce(struct beiscsi_hba *phba,
+ uint32_t unload_state)
+{
+ struct hwi_controller *phwi_ctrlr;
+ struct hwi_context_memory *phwi_context;
+ struct be_eq_obj *pbe_eq;
+ unsigned int i, msix_vec;
+
+ phwi_ctrlr = phba->phwi_ctrlr;
+ phwi_context = phwi_ctrlr->phwi_ctxt;
+ hwi_disable_intr(phba);
+ if (phba->msix_enabled) {
+ for (i = 0; i <= phba->num_cpus; i++) {
+ msix_vec = phba->msix_entries[i].vector;
+ synchronize_irq(msix_vec);
+ free_irq(msix_vec, &phwi_context->be_eq[i]);
+ kfree(phba->msi_name[i]);
+ }
+ } else
+ if (phba->pcidev->irq) {
+ synchronize_irq(phba->pcidev->irq);
+ free_irq(phba->pcidev->irq, phba);
+ }
+ pci_disable_msix(phba->pcidev);
+ cancel_delayed_work_sync(&phba->beiscsi_hw_check_task);
+
+ for (i = 0; i < phba->num_cpus; i++) {
+ pbe_eq = &phwi_context->be_eq[i];
+ blk_iopoll_disable(&pbe_eq->iopoll);
+ }
+
+ if (unload_state == BEISCSI_CLEAN_UNLOAD) {
+ destroy_workqueue(phba->wq);
+ beiscsi_clean_port(phba);
+ beiscsi_free_mem(phba);
+
+ beiscsi_unmap_pci_function(phba);
+ pci_free_consistent(phba->pcidev,
+ phba->ctrl.mbox_mem_alloced.size,
+ phba->ctrl.mbox_mem_alloced.va,
+ phba->ctrl.mbox_mem_alloced.dma);
+ } else {
+ hwi_purge_eq(phba);
+ hwi_cleanup(phba);
+ }
+
+}
+
+static void beiscsi_remove(struct pci_dev *pcidev)
+{
+
+ struct beiscsi_hba *phba = NULL;
+
+ phba = pci_get_drvdata(pcidev);
+ if (!phba) {
+ dev_err(&pcidev->dev, "beiscsi_remove called with no phba\n");
+ return;
+ }
+
+ beiscsi_destroy_def_ifaces(phba);
+ beiscsi_quiesce(phba, BEISCSI_CLEAN_UNLOAD);
+ iscsi_boot_destroy_kset(phba->boot_kset);
+ iscsi_host_remove(phba->shost);
+ pci_dev_put(phba->pcidev);
+ iscsi_host_free(phba->shost);
+ pci_disable_pcie_error_reporting(pcidev);
+ pci_set_drvdata(pcidev, NULL);
+ pci_disable_device(pcidev);
+}
+
+static void beiscsi_shutdown(struct pci_dev *pcidev)
+{
+
+ struct beiscsi_hba *phba = NULL;
+
+ phba = (struct beiscsi_hba *)pci_get_drvdata(pcidev);
+ if (!phba) {
+ dev_err(&pcidev->dev, "beiscsi_shutdown called with no phba\n");
+ return;
+ }
+
+ phba->state = BE_ADAPTER_STATE_SHUTDOWN;
+ iscsi_host_for_each_session(phba->shost, be2iscsi_fail_session);
+ beiscsi_quiesce(phba, BEISCSI_CLEAN_UNLOAD);
+ pci_disable_device(pcidev);
+}
+
+static void beiscsi_msix_enable(struct beiscsi_hba *phba)
+{
+ int i, status;
+
+ for (i = 0; i <= phba->num_cpus; i++)
+ phba->msix_entries[i].entry = i;
+
+ status = pci_enable_msix_range(phba->pcidev, phba->msix_entries,
+ phba->num_cpus + 1, phba->num_cpus + 1);
+ if (status > 0)
+ phba->msix_enabled = true;
+
+ return;
+}
+
+static void be_eqd_update(struct beiscsi_hba *phba)
+{
+ struct be_set_eqd set_eqd[MAX_CPUS];
+ struct be_aic_obj *aic;
+ struct be_eq_obj *pbe_eq;
+ struct hwi_controller *phwi_ctrlr;
+ struct hwi_context_memory *phwi_context;
+ int eqd, i, num = 0;
+ ulong now;
+ u32 pps, delta;
+ unsigned int tag;
+
+ phwi_ctrlr = phba->phwi_ctrlr;
+ phwi_context = phwi_ctrlr->phwi_ctxt;
+
+ for (i = 0; i <= phba->num_cpus; i++) {
+ aic = &phba->aic_obj[i];
+ pbe_eq = &phwi_context->be_eq[i];
+ now = jiffies;
+ if (!aic->jiffs || time_before(now, aic->jiffs) ||
+ pbe_eq->cq_count < aic->eq_prev) {
+ aic->jiffs = now;
+ aic->eq_prev = pbe_eq->cq_count;
+ continue;
+ }
+ delta = jiffies_to_msecs(now - aic->jiffs);
+ pps = (((u32)(pbe_eq->cq_count - aic->eq_prev) * 1000) / delta);
+ eqd = (pps / 1500) << 2;
+
+ if (eqd < 8)
+ eqd = 0;
+ eqd = min_t(u32, eqd, phwi_context->max_eqd);
+ eqd = max_t(u32, eqd, phwi_context->min_eqd);
+
+ aic->jiffs = now;
+ aic->eq_prev = pbe_eq->cq_count;
+
+ if (eqd != aic->prev_eqd) {
+ set_eqd[num].delay_multiplier = (eqd * 65)/100;
+ set_eqd[num].eq_id = pbe_eq->q.id;
+ aic->prev_eqd = eqd;
+ num++;
+ }
+ }
+ if (num) {
+ tag = be_cmd_modify_eq_delay(phba, set_eqd, num);
+ if (tag)
+ beiscsi_mccq_compl(phba, tag, NULL, NULL);
+ }
+}
+
+static void be_check_boot_session(struct beiscsi_hba *phba)
+{
+ if (beiscsi_setup_boot_info(phba))
+ beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
+ "BM_%d : Could not set up "
+ "iSCSI boot info on async event.\n");
+}
+
+/*
+ * beiscsi_hw_health_check()- Check adapter health
+ * @work: work item to check HW health
+ *
+ * Check if adapter in an unrecoverable state or not.
+ **/
+static void
+beiscsi_hw_health_check(struct work_struct *work)
+{
+ struct beiscsi_hba *phba =
+ container_of(work, struct beiscsi_hba,
+ beiscsi_hw_check_task.work);
+
+ be_eqd_update(phba);
+
+ if (phba->state & BE_ADAPTER_CHECK_BOOT) {
+ phba->state &= ~BE_ADAPTER_CHECK_BOOT;
+ be_check_boot_session(phba);
+ }
+
+ beiscsi_ue_detect(phba);
+
+ schedule_delayed_work(&phba->beiscsi_hw_check_task,
+ msecs_to_jiffies(1000));
+}
+
+
+static pci_ers_result_t beiscsi_eeh_err_detected(struct pci_dev *pdev,
+ pci_channel_state_t state)
+{
+ struct beiscsi_hba *phba = NULL;
+
+ phba = (struct beiscsi_hba *)pci_get_drvdata(pdev);
+ phba->state |= BE_ADAPTER_PCI_ERR;
+
+ beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
+ "BM_%d : EEH error detected\n");
+
+ beiscsi_quiesce(phba, BEISCSI_EEH_UNLOAD);
+
+ if (state == pci_channel_io_perm_failure) {
+ beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
+ "BM_%d : EEH : State PERM Failure");
+ return PCI_ERS_RESULT_DISCONNECT;
+ }
+
+ pci_disable_device(pdev);
+
+ /* The error could cause the FW to trigger a flash debug dump.
+ * Resetting the card while flash dump is in progress
+ * can cause it not to recover; wait for it to finish.
+ * Wait only for first function as it is needed only once per
+ * adapter.
+ **/
+ if (pdev->devfn == 0)
+ ssleep(30);
+
+ return PCI_ERS_RESULT_NEED_RESET;
+}
+
+static pci_ers_result_t beiscsi_eeh_reset(struct pci_dev *pdev)
+{
+ struct beiscsi_hba *phba = NULL;
+ int status = 0;
+
+ phba = (struct beiscsi_hba *)pci_get_drvdata(pdev);
+
+ beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
+ "BM_%d : EEH Reset\n");
+
+ status = pci_enable_device(pdev);
+ if (status)
+ return PCI_ERS_RESULT_DISCONNECT;
+
+ pci_set_master(pdev);
+ pci_set_power_state(pdev, PCI_D0);
+ pci_restore_state(pdev);
+
+ /* Wait for the CHIP Reset to complete */
+ status = be_chk_reset_complete(phba);
+ if (!status) {
+ beiscsi_log(phba, KERN_WARNING, BEISCSI_LOG_INIT,
+ "BM_%d : EEH Reset Completed\n");
+ } else {
+ beiscsi_log(phba, KERN_WARNING, BEISCSI_LOG_INIT,
+ "BM_%d : EEH Reset Completion Failure\n");
+ return PCI_ERS_RESULT_DISCONNECT;
+ }
+
+ pci_cleanup_aer_uncorrect_error_status(pdev);
+ return PCI_ERS_RESULT_RECOVERED;
+}
+
+static void beiscsi_eeh_resume(struct pci_dev *pdev)
+{
+ int ret = 0, i;
+ struct be_eq_obj *pbe_eq;
+ struct beiscsi_hba *phba = NULL;
+ struct hwi_controller *phwi_ctrlr;
+ struct hwi_context_memory *phwi_context;
+
+ phba = (struct beiscsi_hba *)pci_get_drvdata(pdev);
+ pci_save_state(pdev);
+
+ if (enable_msix)
+ find_num_cpus(phba);
+ else
+ phba->num_cpus = 1;
+
+ if (enable_msix) {
+ beiscsi_msix_enable(phba);
+ if (!phba->msix_enabled)
+ phba->num_cpus = 1;
+ }
+
+ ret = beiscsi_cmd_reset_function(phba);
+ if (ret) {
+ beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
+ "BM_%d : Reset Failed\n");
+ goto ret_err;
+ }
+
+ ret = be_chk_reset_complete(phba);
+ if (ret) {
+ beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
+ "BM_%d : Failed to get out of reset.\n");
+ goto ret_err;
+ }
+
+ beiscsi_get_params(phba);
+ phba->shost->max_id = phba->params.cxns_per_ctrl;
+ phba->shost->can_queue = phba->params.ios_per_ctrl;
+ ret = hwi_init_controller(phba);
+
+ for (i = 0; i < MAX_MCC_CMD; i++) {
+ init_waitqueue_head(&phba->ctrl.mcc_wait[i + 1]);
+ phba->ctrl.mcc_tag[i] = i + 1;
+ phba->ctrl.mcc_numtag[i + 1] = 0;
+ phba->ctrl.mcc_tag_available++;
+ }
+
+ phwi_ctrlr = phba->phwi_ctrlr;
+ phwi_context = phwi_ctrlr->phwi_ctxt;
+
+ for (i = 0; i < phba->num_cpus; i++) {
+ pbe_eq = &phwi_context->be_eq[i];
+ blk_iopoll_init(&pbe_eq->iopoll, be_iopoll_budget,
+ be_iopoll);
+ blk_iopoll_enable(&pbe_eq->iopoll);
+ }
+
+ i = (phba->msix_enabled) ? i : 0;
+ /* Work item for MCC handling */
+ pbe_eq = &phwi_context->be_eq[i];
+ INIT_WORK(&pbe_eq->work_cqs, beiscsi_process_all_cqs);
+
+ ret = beiscsi_init_irqs(phba);
+ if (ret < 0) {
+ beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
+ "BM_%d : beiscsi_eeh_resume - "
+ "Failed to beiscsi_init_irqs\n");
+ goto ret_err;
+ }
+
+ hwi_enable_intr(phba);
+ phba->state &= ~BE_ADAPTER_PCI_ERR;
+
+ return;
+ret_err:
+ beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
+ "BM_%d : AER EEH Resume Failed\n");
+}
+
+static int beiscsi_dev_probe(struct pci_dev *pcidev,
+ const struct pci_device_id *id)
+{
+ struct beiscsi_hba *phba = NULL;
+ struct hwi_controller *phwi_ctrlr;
+ struct hwi_context_memory *phwi_context;
+ struct be_eq_obj *pbe_eq;
+ int ret = 0, i;
+
+ ret = beiscsi_enable_pci(pcidev);
+ if (ret < 0) {
+ dev_err(&pcidev->dev,
+ "beiscsi_dev_probe - Failed to enable pci device\n");
+ return ret;
+ }
+
+ phba = beiscsi_hba_alloc(pcidev);
+ if (!phba) {
+ dev_err(&pcidev->dev,
+ "beiscsi_dev_probe - Failed in beiscsi_hba_alloc\n");
+ goto disable_pci;
+ }
+
+ /* Enable EEH reporting */
+ ret = pci_enable_pcie_error_reporting(pcidev);
+ if (ret)
+ beiscsi_log(phba, KERN_WARNING, BEISCSI_LOG_INIT,
+ "BM_%d : PCIe Error Reporting "
+ "Enabling Failed\n");
+
+ pci_save_state(pcidev);
+
+ /* Initialize Driver configuration Paramters */
+ beiscsi_hba_attrs_init(phba);
+
+ phba->fw_timeout = false;
+ phba->mac_addr_set = false;
+
+
+ switch (pcidev->device) {
+ case BE_DEVICE_ID1:
+ case OC_DEVICE_ID1:
+ case OC_DEVICE_ID2:
+ phba->generation = BE_GEN2;
+ phba->iotask_fn = beiscsi_iotask;
+ break;
+ case BE_DEVICE_ID2:
+ case OC_DEVICE_ID3:
+ phba->generation = BE_GEN3;
+ phba->iotask_fn = beiscsi_iotask;
+ break;
+ case OC_SKH_ID1:
+ phba->generation = BE_GEN4;
+ phba->iotask_fn = beiscsi_iotask_v2;
+ break;
+ default:
+ phba->generation = 0;
+ }
+
+ ret = be_ctrl_init(phba, pcidev);
+ if (ret) {
+ beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
+ "BM_%d : beiscsi_dev_probe-"
+ "Failed in be_ctrl_init\n");
+ goto hba_free;
+ }
+
+ ret = beiscsi_cmd_reset_function(phba);
+ if (ret) {
+ beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
+ "BM_%d : Reset Failed\n");
+ goto hba_free;
+ }
+ ret = be_chk_reset_complete(phba);
+ if (ret) {
+ beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
+ "BM_%d : Failed to get out of reset.\n");
+ goto hba_free;
+ }
+
+ spin_lock_init(&phba->io_sgl_lock);
+ spin_lock_init(&phba->mgmt_sgl_lock);
+ spin_lock_init(&phba->isr_lock);
+ spin_lock_init(&phba->async_pdu_lock);
+ ret = mgmt_get_fw_config(&phba->ctrl, phba);
+ if (ret != 0) {
+ beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
+ "BM_%d : Error getting fw config\n");
+ goto free_port;
+ }
+
+ if (enable_msix)
+ find_num_cpus(phba);
+ else
+ phba->num_cpus = 1;
+
+ beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT,
+ "BM_%d : num_cpus = %d\n",
+ phba->num_cpus);
+
+ if (enable_msix) {
+ beiscsi_msix_enable(phba);
+ if (!phba->msix_enabled)
+ phba->num_cpus = 1;
+ }
+
+ phba->shost->max_id = phba->params.cxns_per_ctrl;
+ beiscsi_get_params(phba);
+ phba->shost->can_queue = phba->params.ios_per_ctrl;
+ ret = beiscsi_init_port(phba);
+ if (ret < 0) {
+ beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
+ "BM_%d : beiscsi_dev_probe-"
+ "Failed in beiscsi_init_port\n");
+ goto free_port;
+ }
+
+ for (i = 0; i < MAX_MCC_CMD; i++) {
+ init_waitqueue_head(&phba->ctrl.mcc_wait[i + 1]);
+ phba->ctrl.mcc_tag[i] = i + 1;
+ phba->ctrl.mcc_numtag[i + 1] = 0;
+ phba->ctrl.mcc_tag_available++;
+ memset(&phba->ctrl.ptag_state[i].tag_mem_state, 0,
+ sizeof(struct be_dma_mem));
+ }
+
+ phba->ctrl.mcc_alloc_index = phba->ctrl.mcc_free_index = 0;
+
+ snprintf(phba->wq_name, sizeof(phba->wq_name), "beiscsi_%02x_wq",
+ phba->shost->host_no);
+ phba->wq = alloc_workqueue("%s", WQ_MEM_RECLAIM, 1, phba->wq_name);
+ if (!phba->wq) {
+ beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
+ "BM_%d : beiscsi_dev_probe-"
+ "Failed to allocate work queue\n");
+ goto free_twq;
+ }
+
+ INIT_DELAYED_WORK(&phba->beiscsi_hw_check_task,
+ beiscsi_hw_health_check);
+
+ phwi_ctrlr = phba->phwi_ctrlr;
+ phwi_context = phwi_ctrlr->phwi_ctxt;
+
+ for (i = 0; i < phba->num_cpus; i++) {
+ pbe_eq = &phwi_context->be_eq[i];
+ blk_iopoll_init(&pbe_eq->iopoll, be_iopoll_budget,
+ be_iopoll);
+ blk_iopoll_enable(&pbe_eq->iopoll);
+ }
+
+ i = (phba->msix_enabled) ? i : 0;
+ /* Work item for MCC handling */
+ pbe_eq = &phwi_context->be_eq[i];
+ INIT_WORK(&pbe_eq->work_cqs, beiscsi_process_all_cqs);
+
+ ret = beiscsi_init_irqs(phba);
+ if (ret < 0) {
+ beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
+ "BM_%d : beiscsi_dev_probe-"
+ "Failed to beiscsi_init_irqs\n");
+ goto free_blkenbld;
+ }
+ hwi_enable_intr(phba);
+
+ if (iscsi_host_add(phba->shost, &phba->pcidev->dev))
+ goto free_blkenbld;
+
+ if (beiscsi_setup_boot_info(phba))
+ /*
+ * log error but continue, because we may not be using
+ * iscsi boot.
+ */
+ beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
+ "BM_%d : Could not set up "
+ "iSCSI boot info.\n");
+
+ beiscsi_create_def_ifaces(phba);
+ schedule_delayed_work(&phba->beiscsi_hw_check_task,
+ msecs_to_jiffies(1000));
+
+ beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT,
+ "\n\n\n BM_%d : SUCCESS - DRIVER LOADED\n\n\n");
+ return 0;
+
+free_blkenbld:
+ destroy_workqueue(phba->wq);
+ for (i = 0; i < phba->num_cpus; i++) {
+ pbe_eq = &phwi_context->be_eq[i];
+ blk_iopoll_disable(&pbe_eq->iopoll);
+ }
+free_twq:
+ beiscsi_clean_port(phba);
+ beiscsi_free_mem(phba);
+free_port:
+ pci_free_consistent(phba->pcidev,
+ phba->ctrl.mbox_mem_alloced.size,
+ phba->ctrl.mbox_mem_alloced.va,
+ phba->ctrl.mbox_mem_alloced.dma);
+ beiscsi_unmap_pci_function(phba);
+hba_free:
+ if (phba->msix_enabled)
+ pci_disable_msix(phba->pcidev);
+ pci_dev_put(phba->pcidev);
+ iscsi_host_free(phba->shost);
+ pci_set_drvdata(pcidev, NULL);
+disable_pci:
+ pci_disable_device(pcidev);
+ return ret;
+}
+
+static struct pci_error_handlers beiscsi_eeh_handlers = {
+ .error_detected = beiscsi_eeh_err_detected,
+ .slot_reset = beiscsi_eeh_reset,
+ .resume = beiscsi_eeh_resume,
+};
+
+struct iscsi_transport beiscsi_iscsi_transport = {
+ .owner = THIS_MODULE,
+ .name = DRV_NAME,
+ .caps = CAP_RECOVERY_L0 | CAP_HDRDGST | CAP_TEXT_NEGO |
+ CAP_MULTI_R2T | CAP_DATADGST | CAP_DATA_PATH_OFFLOAD,
+ .create_session = beiscsi_session_create,
+ .destroy_session = beiscsi_session_destroy,
+ .create_conn = beiscsi_conn_create,
+ .bind_conn = beiscsi_conn_bind,
+ .destroy_conn = iscsi_conn_teardown,
+ .attr_is_visible = be2iscsi_attr_is_visible,
+ .set_iface_param = be2iscsi_iface_set_param,
+ .get_iface_param = be2iscsi_iface_get_param,
+ .set_param = beiscsi_set_param,
+ .get_conn_param = iscsi_conn_get_param,
+ .get_session_param = iscsi_session_get_param,
+ .get_host_param = beiscsi_get_host_param,
+ .start_conn = beiscsi_conn_start,
+ .stop_conn = iscsi_conn_stop,
+ .send_pdu = iscsi_conn_send_pdu,
+ .xmit_task = beiscsi_task_xmit,
+ .cleanup_task = beiscsi_cleanup_task,
+ .alloc_pdu = beiscsi_alloc_pdu,
+ .parse_pdu_itt = beiscsi_parse_pdu,
+ .get_stats = beiscsi_conn_get_stats,
+ .get_ep_param = beiscsi_ep_get_param,
+ .ep_connect = beiscsi_ep_connect,
+ .ep_poll = beiscsi_ep_poll,
+ .ep_disconnect = beiscsi_ep_disconnect,
+ .session_recovery_timedout = iscsi_session_recovery_timedout,
+ .bsg_request = beiscsi_bsg_request,
+};
+
+static struct pci_driver beiscsi_pci_driver = {
+ .name = DRV_NAME,
+ .probe = beiscsi_dev_probe,
+ .remove = beiscsi_remove,
+ .shutdown = beiscsi_shutdown,
+ .id_table = beiscsi_pci_id_table,
+ .err_handler = &beiscsi_eeh_handlers
+};
+
+
+static int __init beiscsi_module_init(void)
+{
+ int ret;
+
+ beiscsi_scsi_transport =
+ iscsi_register_transport(&beiscsi_iscsi_transport);
+ if (!beiscsi_scsi_transport) {
+ printk(KERN_ERR
+ "beiscsi_module_init - Unable to register beiscsi transport.\n");
+ return -ENOMEM;
+ }
+ printk(KERN_INFO "In beiscsi_module_init, tt=%p\n",
+ &beiscsi_iscsi_transport);
+
+ ret = pci_register_driver(&beiscsi_pci_driver);
+ if (ret) {
+ printk(KERN_ERR
+ "beiscsi_module_init - Unable to register beiscsi pci driver.\n");
+ goto unregister_iscsi_transport;
+ }
+ return 0;
+
+unregister_iscsi_transport:
+ iscsi_unregister_transport(&beiscsi_iscsi_transport);
+ return ret;
+}
+
+static void __exit beiscsi_module_exit(void)
+{
+ pci_unregister_driver(&beiscsi_pci_driver);
+ iscsi_unregister_transport(&beiscsi_iscsi_transport);
+}
+
+module_init(beiscsi_module_init);
+module_exit(beiscsi_module_exit);
diff --git a/drivers/scsi/be2iscsi/be_main.h b/drivers/scsi/be2iscsi/be_main.h
new file mode 100644
index 000000000..e70ea26bb
--- /dev/null
+++ b/drivers/scsi/be2iscsi/be_main.h
@@ -0,0 +1,1080 @@
+/**
+ * Copyright (C) 2005 - 2015 Avago Technologies
+ * All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation. The full GNU General
+ * Public License is included in this distribution in the file called COPYING.
+ *
+ * Written by: Jayamohan Kallickal (jayamohan.kallickal@avagotech.com)
+ *
+ * Contact Information:
+ * linux-drivers@avagotech.com
+ *
+ * Avago Technologies
+ * 3333 Susan Street
+ * Costa Mesa, CA 92626
+ */
+
+#ifndef _BEISCSI_MAIN_
+#define _BEISCSI_MAIN_
+
+#include <linux/kernel.h>
+#include <linux/pci.h>
+#include <linux/if_ether.h>
+#include <linux/in.h>
+#include <linux/ctype.h>
+#include <linux/module.h>
+#include <linux/aer.h>
+#include <scsi/scsi.h>
+#include <scsi/scsi_cmnd.h>
+#include <scsi/scsi_device.h>
+#include <scsi/scsi_host.h>
+#include <scsi/iscsi_proto.h>
+#include <scsi/libiscsi.h>
+#include <scsi/scsi_transport_iscsi.h>
+
+#define DRV_NAME "be2iscsi"
+#define BUILD_STR "10.4.114.0"
+#define BE_NAME "Avago Technologies OneConnect" \
+ "Open-iSCSI Driver version" BUILD_STR
+#define DRV_DESC BE_NAME " " "Driver"
+
+#define BE_VENDOR_ID 0x19A2
+#define ELX_VENDOR_ID 0x10DF
+/* DEVICE ID's for BE2 */
+#define BE_DEVICE_ID1 0x212
+#define OC_DEVICE_ID1 0x702
+#define OC_DEVICE_ID2 0x703
+
+/* DEVICE ID's for BE3 */
+#define BE_DEVICE_ID2 0x222
+#define OC_DEVICE_ID3 0x712
+
+/* DEVICE ID for SKH */
+#define OC_SKH_ID1 0x722
+
+#define BE2_IO_DEPTH 1024
+#define BE2_MAX_SESSIONS 256
+#define BE2_CMDS_PER_CXN 128
+#define BE2_TMFS 16
+#define BE2_NOPOUT_REQ 16
+#define BE2_SGE 32
+#define BE2_DEFPDU_HDR_SZ 64
+#define BE2_DEFPDU_DATA_SZ 8192
+
+#define MAX_CPUS 64
+#define BEISCSI_MAX_NUM_CPUS 7
+
+#define BEISCSI_VER_STRLEN 32
+
+#define BEISCSI_SGLIST_ELEMENTS 30
+
+#define BEISCSI_CMD_PER_LUN 128 /* scsi_host->cmd_per_lun */
+#define BEISCSI_MAX_SECTORS 1024 /* scsi_host->max_sectors */
+#define BEISCSI_TEMPLATE_HDR_PER_CXN_SIZE 128 /* Template size per cxn */
+
+#define BEISCSI_MAX_CMD_LEN 16 /* scsi_host->max_cmd_len */
+#define BEISCSI_NUM_MAX_LUN 256 /* scsi_host->max_lun */
+#define BEISCSI_NUM_DEVICES_SUPPORTED 0x01
+#define BEISCSI_MAX_FRAGS_INIT 192
+#define BE_NUM_MSIX_ENTRIES 1
+
+#define MPU_EP_CONTROL 0
+#define MPU_EP_SEMAPHORE 0xac
+#define BE2_SOFT_RESET 0x5c
+#define BE2_PCI_ONLINE0 0xb0
+#define BE2_PCI_ONLINE1 0xb4
+#define BE2_SET_RESET 0x80
+#define BE2_MPU_IRAM_ONLINE 0x00000080
+
+#define BE_SENSE_INFO_SIZE 258
+#define BE_ISCSI_PDU_HEADER_SIZE 64
+#define BE_MIN_MEM_SIZE 16384
+#define MAX_CMD_SZ 65536
+#define IIOC_SCSI_DATA 0x05 /* Write Operation */
+
+#define INVALID_SESS_HANDLE 0xFFFFFFFF
+
+/**
+ * Adapter States
+ **/
+#define BE_ADAPTER_LINK_UP 0x001
+#define BE_ADAPTER_LINK_DOWN 0x002
+#define BE_ADAPTER_PCI_ERR 0x004
+#define BE_ADAPTER_STATE_SHUTDOWN 0x008
+#define BE_ADAPTER_CHECK_BOOT 0x010
+
+
+#define BEISCSI_CLEAN_UNLOAD 0x01
+#define BEISCSI_EEH_UNLOAD 0x02
+/**
+ * hardware needs the async PDU buffers to be posted in multiples of 8
+ * So have atleast 8 of them by default
+ */
+
+#define HWI_GET_ASYNC_PDU_CTX(phwi, ulp_num) \
+ (phwi->phwi_ctxt->pasync_ctx[ulp_num])
+
+/********* Memory BAR register ************/
+#define PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET 0xfc
+/**
+ * Host Interrupt Enable, if set interrupts are enabled although "PCI Interrupt
+ * Disable" may still globally block interrupts in addition to individual
+ * interrupt masks; a mechanism for the device driver to block all interrupts
+ * atomically without having to arbitrate for the PCI Interrupt Disable bit
+ * with the OS.
+ */
+#define MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK (1 << 29) /* bit 29 */
+
+/********* ISR0 Register offset **********/
+#define CEV_ISR0_OFFSET 0xC18
+#define CEV_ISR_SIZE 4
+
+/**
+ * Macros for reading/writing a protection domain or CSR registers
+ * in BladeEngine.
+ */
+
+#define DB_TXULP0_OFFSET 0x40
+#define DB_RXULP0_OFFSET 0xA0
+/********* Event Q door bell *************/
+#define DB_EQ_OFFSET DB_CQ_OFFSET
+#define DB_EQ_RING_ID_LOW_MASK 0x1FF /* bits 0 - 8 */
+/* Clear the interrupt for this eq */
+#define DB_EQ_CLR_SHIFT (9) /* bit 9 */
+/* Must be 1 */
+#define DB_EQ_EVNT_SHIFT (10) /* bit 10 */
+/* Higher Order EQ_ID bit */
+#define DB_EQ_RING_ID_HIGH_MASK 0x1F /* bits 11 - 15 */
+#define DB_EQ_HIGH_SET_SHIFT 11
+#define DB_EQ_HIGH_FEILD_SHIFT 9
+/* Number of event entries processed */
+#define DB_EQ_NUM_POPPED_SHIFT (16) /* bits 16 - 28 */
+/* Rearm bit */
+#define DB_EQ_REARM_SHIFT (29) /* bit 29 */
+
+/********* Compl Q door bell *************/
+#define DB_CQ_OFFSET 0x120
+#define DB_CQ_RING_ID_LOW_MASK 0x3FF /* bits 0 - 9 */
+/* Higher Order CQ_ID bit */
+#define DB_CQ_RING_ID_HIGH_MASK 0x1F /* bits 11 - 15 */
+#define DB_CQ_HIGH_SET_SHIFT 11
+#define DB_CQ_HIGH_FEILD_SHIFT 10
+
+/* Number of event entries processed */
+#define DB_CQ_NUM_POPPED_SHIFT (16) /* bits 16 - 28 */
+/* Rearm bit */
+#define DB_CQ_REARM_SHIFT (29) /* bit 29 */
+
+#define GET_HWI_CONTROLLER_WS(pc) (pc->phwi_ctrlr)
+#define HWI_GET_DEF_BUFQ_ID(pc, ulp_num) (((struct hwi_controller *)\
+ (GET_HWI_CONTROLLER_WS(pc)))->default_pdu_data[ulp_num].id)
+#define HWI_GET_DEF_HDRQ_ID(pc, ulp_num) (((struct hwi_controller *)\
+ (GET_HWI_CONTROLLER_WS(pc)))->default_pdu_hdr[ulp_num].id)
+
+#define PAGES_REQUIRED(x) \
+ ((x < PAGE_SIZE) ? 1 : ((x + PAGE_SIZE - 1) / PAGE_SIZE))
+
+#define BEISCSI_MSI_NAME 20 /* size of msi_name string */
+
+#define MEM_DESCR_OFFSET 8
+#define BEISCSI_DEFQ_HDR 1
+#define BEISCSI_DEFQ_DATA 0
+enum be_mem_enum {
+ HWI_MEM_ADDN_CONTEXT,
+ HWI_MEM_WRB,
+ HWI_MEM_WRBH,
+ HWI_MEM_SGLH,
+ HWI_MEM_SGE,
+ HWI_MEM_TEMPLATE_HDR_ULP0,
+ HWI_MEM_ASYNC_HEADER_BUF_ULP0, /* 6 */
+ HWI_MEM_ASYNC_DATA_BUF_ULP0,
+ HWI_MEM_ASYNC_HEADER_RING_ULP0,
+ HWI_MEM_ASYNC_DATA_RING_ULP0,
+ HWI_MEM_ASYNC_HEADER_HANDLE_ULP0,
+ HWI_MEM_ASYNC_DATA_HANDLE_ULP0, /* 11 */
+ HWI_MEM_ASYNC_PDU_CONTEXT_ULP0,
+ HWI_MEM_TEMPLATE_HDR_ULP1,
+ HWI_MEM_ASYNC_HEADER_BUF_ULP1, /* 14 */
+ HWI_MEM_ASYNC_DATA_BUF_ULP1,
+ HWI_MEM_ASYNC_HEADER_RING_ULP1,
+ HWI_MEM_ASYNC_DATA_RING_ULP1,
+ HWI_MEM_ASYNC_HEADER_HANDLE_ULP1,
+ HWI_MEM_ASYNC_DATA_HANDLE_ULP1, /* 19 */
+ HWI_MEM_ASYNC_PDU_CONTEXT_ULP1,
+ ISCSI_MEM_GLOBAL_HEADER,
+ SE_MEM_MAX
+};
+
+struct be_bus_address32 {
+ unsigned int address_lo;
+ unsigned int address_hi;
+};
+
+struct be_bus_address64 {
+ unsigned long long address;
+};
+
+struct be_bus_address {
+ union {
+ struct be_bus_address32 a32;
+ struct be_bus_address64 a64;
+ } u;
+};
+
+struct mem_array {
+ struct be_bus_address bus_address; /* Bus address of location */
+ void *virtual_address; /* virtual address to the location */
+ unsigned int size; /* Size required by memory block */
+};
+
+struct be_mem_descriptor {
+ unsigned int index; /* Index of this memory parameter */
+ unsigned int category; /* type indicates cached/non-cached */
+ unsigned int num_elements; /* number of elements in this
+ * descriptor
+ */
+ unsigned int alignment_mask; /* Alignment mask for this block */
+ unsigned int size_in_bytes; /* Size required by memory block */
+ struct mem_array *mem_array;
+};
+
+struct sgl_handle {
+ unsigned int sgl_index;
+ unsigned int type;
+ unsigned int cid;
+ struct iscsi_task *task;
+ struct iscsi_sge *pfrag;
+};
+
+struct hba_parameters {
+ unsigned int ios_per_ctrl;
+ unsigned int cxns_per_ctrl;
+ unsigned int asyncpdus_per_ctrl;
+ unsigned int icds_per_ctrl;
+ unsigned int num_sge_per_io;
+ unsigned int defpdu_hdr_sz;
+ unsigned int defpdu_data_sz;
+ unsigned int num_cq_entries;
+ unsigned int num_eq_entries;
+ unsigned int wrbs_per_cxn;
+ unsigned int crashmode;
+ unsigned int hba_num;
+
+ unsigned int mgmt_ws_sz;
+ unsigned int hwi_ws_sz;
+
+ unsigned int eto;
+ unsigned int ldto;
+
+ unsigned int dbg_flags;
+ unsigned int num_cxn;
+
+ unsigned int eq_timer;
+ /**
+ * These are calculated from other params. They're here
+ * for debug purposes
+ */
+ unsigned int num_mcc_pages;
+ unsigned int num_mcc_cq_pages;
+ unsigned int num_cq_pages;
+ unsigned int num_eq_pages;
+
+ unsigned int num_async_pdu_buf_pages;
+ unsigned int num_async_pdu_buf_sgl_pages;
+ unsigned int num_async_pdu_buf_cq_pages;
+
+ unsigned int num_async_pdu_hdr_pages;
+ unsigned int num_async_pdu_hdr_sgl_pages;
+ unsigned int num_async_pdu_hdr_cq_pages;
+
+ unsigned int num_sge;
+};
+
+struct invalidate_command_table {
+ unsigned short icd;
+ unsigned short cid;
+} __packed;
+
+#define BEISCSI_GET_ULP_FROM_CRI(phwi_ctrlr, cri) \
+ (phwi_ctrlr->wrb_context[cri].ulp_num)
+struct hwi_wrb_context {
+ struct list_head wrb_handle_list;
+ struct list_head wrb_handle_drvr_list;
+ struct wrb_handle **pwrb_handle_base;
+ struct wrb_handle **pwrb_handle_basestd;
+ struct iscsi_wrb *plast_wrb;
+ unsigned short alloc_index;
+ unsigned short free_index;
+ unsigned short wrb_handles_available;
+ unsigned short cid;
+ uint8_t ulp_num; /* ULP to which CID binded */
+ uint16_t register_set;
+ uint16_t doorbell_format;
+ uint32_t doorbell_offset;
+};
+
+struct ulp_cid_info {
+ unsigned short *cid_array;
+ unsigned short avlbl_cids;
+ unsigned short cid_alloc;
+ unsigned short cid_free;
+};
+
+#include "be.h"
+#define chip_be2(phba) (phba->generation == BE_GEN2)
+#define chip_be3_r(phba) (phba->generation == BE_GEN3)
+#define is_chip_be2_be3r(phba) (chip_be3_r(phba) || (chip_be2(phba)))
+
+#define BEISCSI_ULP0 0
+#define BEISCSI_ULP1 1
+#define BEISCSI_ULP_COUNT 2
+#define BEISCSI_ULP0_LOADED 0x01
+#define BEISCSI_ULP1_LOADED 0x02
+
+#define BEISCSI_ULP_AVLBL_CID(phba, ulp_num) \
+ (((struct ulp_cid_info *)phba->cid_array_info[ulp_num])->avlbl_cids)
+#define BEISCSI_ULP0_AVLBL_CID(phba) \
+ BEISCSI_ULP_AVLBL_CID(phba, BEISCSI_ULP0)
+#define BEISCSI_ULP1_AVLBL_CID(phba) \
+ BEISCSI_ULP_AVLBL_CID(phba, BEISCSI_ULP1)
+
+struct beiscsi_hba {
+ struct hba_parameters params;
+ struct hwi_controller *phwi_ctrlr;
+ unsigned int mem_req[SE_MEM_MAX];
+ /* PCI BAR mapped addresses */
+ u8 __iomem *csr_va; /* CSR */
+ u8 __iomem *db_va; /* Door Bell */
+ u8 __iomem *pci_va; /* PCI Config */
+ struct be_bus_address csr_pa; /* CSR */
+ struct be_bus_address db_pa; /* CSR */
+ struct be_bus_address pci_pa; /* CSR */
+ /* PCI representation of our HBA */
+ struct pci_dev *pcidev;
+ unsigned short asic_revision;
+ unsigned int num_cpus;
+ unsigned int nxt_cqid;
+ struct msix_entry msix_entries[MAX_CPUS];
+ char *msi_name[MAX_CPUS];
+ bool msix_enabled;
+ struct be_mem_descriptor *init_mem;
+
+ unsigned short io_sgl_alloc_index;
+ unsigned short io_sgl_free_index;
+ unsigned short io_sgl_hndl_avbl;
+ struct sgl_handle **io_sgl_hndl_base;
+ struct sgl_handle **sgl_hndl_array;
+
+ unsigned short eh_sgl_alloc_index;
+ unsigned short eh_sgl_free_index;
+ unsigned short eh_sgl_hndl_avbl;
+ struct sgl_handle **eh_sgl_hndl_base;
+ spinlock_t io_sgl_lock;
+ spinlock_t mgmt_sgl_lock;
+ spinlock_t isr_lock;
+ spinlock_t async_pdu_lock;
+ unsigned int age;
+ struct list_head hba_queue;
+#define BE_MAX_SESSION 2048
+#define BE_SET_CID_TO_CRI(cri_index, cid) \
+ (phba->cid_to_cri_map[cid] = cri_index)
+#define BE_GET_CRI_FROM_CID(cid) (phba->cid_to_cri_map[cid])
+ unsigned short cid_to_cri_map[BE_MAX_SESSION];
+ struct ulp_cid_info *cid_array_info[BEISCSI_ULP_COUNT];
+ struct iscsi_endpoint **ep_array;
+ struct beiscsi_conn **conn_table;
+ struct iscsi_boot_kset *boot_kset;
+ struct Scsi_Host *shost;
+ struct iscsi_iface *ipv4_iface;
+ struct iscsi_iface *ipv6_iface;
+ struct {
+ /**
+ * group together since they are used most frequently
+ * for cid to cri conversion
+ */
+ unsigned int phys_port;
+ unsigned int eqid_count;
+ unsigned int cqid_count;
+ unsigned int iscsi_cid_start[BEISCSI_ULP_COUNT];
+#define BEISCSI_GET_CID_COUNT(phba, ulp_num) \
+ (phba->fw_config.iscsi_cid_count[ulp_num])
+ unsigned int iscsi_cid_count[BEISCSI_ULP_COUNT];
+ unsigned int iscsi_icd_count[BEISCSI_ULP_COUNT];
+ unsigned int iscsi_icd_start[BEISCSI_ULP_COUNT];
+ unsigned int iscsi_chain_start[BEISCSI_ULP_COUNT];
+ unsigned int iscsi_chain_count[BEISCSI_ULP_COUNT];
+
+ unsigned short iscsi_features;
+ uint16_t dual_ulp_aware;
+ unsigned long ulp_supported;
+ } fw_config;
+
+ unsigned int state;
+ bool fw_timeout;
+ bool ue_detected;
+ struct delayed_work beiscsi_hw_check_task;
+
+ bool mac_addr_set;
+ u8 mac_address[ETH_ALEN];
+ char fw_ver_str[BEISCSI_VER_STRLEN];
+ char wq_name[20];
+ struct workqueue_struct *wq; /* The actuak work queue */
+ struct be_ctrl_info ctrl;
+ unsigned int generation;
+ unsigned int interface_handle;
+ struct mgmt_session_info boot_sess;
+ struct invalidate_command_table inv_tbl[128];
+
+ struct be_aic_obj aic_obj[MAX_CPUS];
+ unsigned int attr_log_enable;
+ int (*iotask_fn)(struct iscsi_task *,
+ struct scatterlist *sg,
+ uint32_t num_sg, uint32_t xferlen,
+ uint32_t writedir);
+};
+
+struct beiscsi_session {
+ struct pci_pool *bhs_pool;
+};
+
+/**
+ * struct beiscsi_conn - iscsi connection structure
+ */
+struct beiscsi_conn {
+ struct iscsi_conn *conn;
+ struct beiscsi_hba *phba;
+ u32 exp_statsn;
+ u32 doorbell_offset;
+ u32 beiscsi_conn_cid;
+ struct beiscsi_endpoint *ep;
+ unsigned short login_in_progress;
+ struct wrb_handle *plogin_wrb_handle;
+ struct sgl_handle *plogin_sgl_handle;
+ struct beiscsi_session *beiscsi_sess;
+ struct iscsi_task *task;
+};
+
+/* This structure is used by the chip */
+struct pdu_data_out {
+ u32 dw[12];
+};
+/**
+ * Pseudo amap definition in which each bit of the actual structure is defined
+ * as a byte: used to calculate offset/shift/mask of each field
+ */
+struct amap_pdu_data_out {
+ u8 opcode[6]; /* opcode */
+ u8 rsvd0[2]; /* should be 0 */
+ u8 rsvd1[7];
+ u8 final_bit; /* F bit */
+ u8 rsvd2[16];
+ u8 ahs_length[8]; /* no AHS */
+ u8 data_len_hi[8];
+ u8 data_len_lo[16]; /* DataSegmentLength */
+ u8 lun[64];
+ u8 itt[32]; /* ITT; initiator task tag */
+ u8 ttt[32]; /* TTT; valid for R2T or 0xffffffff */
+ u8 rsvd3[32];
+ u8 exp_stat_sn[32];
+ u8 rsvd4[32];
+ u8 data_sn[32];
+ u8 buffer_offset[32];
+ u8 rsvd5[32];
+};
+
+struct be_cmd_bhs {
+ struct iscsi_scsi_req iscsi_hdr;
+ unsigned char pad1[16];
+ struct pdu_data_out iscsi_data_pdu;
+ unsigned char pad2[BE_SENSE_INFO_SIZE -
+ sizeof(struct pdu_data_out)];
+};
+
+struct beiscsi_io_task {
+ struct wrb_handle *pwrb_handle;
+ struct sgl_handle *psgl_handle;
+ struct beiscsi_conn *conn;
+ struct scsi_cmnd *scsi_cmnd;
+ unsigned int cmd_sn;
+ unsigned int flags;
+ unsigned short cid;
+ unsigned short header_len;
+ itt_t libiscsi_itt;
+ struct be_cmd_bhs *cmd_bhs;
+ struct be_bus_address bhs_pa;
+ unsigned short bhs_len;
+ dma_addr_t mtask_addr;
+ uint32_t mtask_data_count;
+ uint8_t wrb_type;
+};
+
+struct be_nonio_bhs {
+ struct iscsi_hdr iscsi_hdr;
+ unsigned char pad1[16];
+ struct pdu_data_out iscsi_data_pdu;
+ unsigned char pad2[BE_SENSE_INFO_SIZE -
+ sizeof(struct pdu_data_out)];
+};
+
+struct be_status_bhs {
+ struct iscsi_scsi_req iscsi_hdr;
+ unsigned char pad1[16];
+ /**
+ * The plus 2 below is to hold the sense info length that gets
+ * DMA'ed by RxULP
+ */
+ unsigned char sense_info[BE_SENSE_INFO_SIZE];
+};
+
+struct iscsi_sge {
+ u32 dw[4];
+};
+
+/**
+ * Pseudo amap definition in which each bit of the actual structure is defined
+ * as a byte: used to calculate offset/shift/mask of each field
+ */
+struct amap_iscsi_sge {
+ u8 addr_hi[32];
+ u8 addr_lo[32];
+ u8 sge_offset[22]; /* DWORD 2 */
+ u8 rsvd0[9]; /* DWORD 2 */
+ u8 last_sge; /* DWORD 2 */
+ u8 len[17]; /* DWORD 3 */
+ u8 rsvd1[15]; /* DWORD 3 */
+};
+
+struct beiscsi_offload_params {
+ u32 dw[6];
+};
+
+#define OFFLD_PARAMS_ERL 0x00000003
+#define OFFLD_PARAMS_DDE 0x00000004
+#define OFFLD_PARAMS_HDE 0x00000008
+#define OFFLD_PARAMS_IR2T 0x00000010
+#define OFFLD_PARAMS_IMD 0x00000020
+#define OFFLD_PARAMS_DATA_SEQ_INORDER 0x00000040
+#define OFFLD_PARAMS_PDU_SEQ_INORDER 0x00000080
+#define OFFLD_PARAMS_MAX_R2T 0x00FFFF00
+
+/**
+ * Pseudo amap definition in which each bit of the actual structure is defined
+ * as a byte: used to calculate offset/shift/mask of each field
+ */
+struct amap_beiscsi_offload_params {
+ u8 max_burst_length[32];
+ u8 max_send_data_segment_length[32];
+ u8 first_burst_length[32];
+ u8 erl[2];
+ u8 dde[1];
+ u8 hde[1];
+ u8 ir2t[1];
+ u8 imd[1];
+ u8 data_seq_inorder[1];
+ u8 pdu_seq_inorder[1];
+ u8 max_r2t[16];
+ u8 pad[8];
+ u8 exp_statsn[32];
+ u8 max_recv_data_segment_length[32];
+};
+
+/* void hwi_complete_drvr_msgs(struct beiscsi_conn *beiscsi_conn,
+ struct beiscsi_hba *phba, struct sol_cqe *psol);*/
+
+struct async_pdu_handle {
+ struct list_head link;
+ struct be_bus_address pa;
+ void *pbuffer;
+ unsigned int consumed;
+ unsigned char index;
+ unsigned char is_header;
+ unsigned short cri;
+ unsigned long buffer_len;
+};
+
+struct hwi_async_entry {
+ struct {
+ unsigned char hdr_received;
+ unsigned char hdr_len;
+ unsigned short bytes_received;
+ unsigned int bytes_needed;
+ struct list_head list;
+ } wait_queue;
+
+ struct list_head header_busy_list;
+ struct list_head data_busy_list;
+};
+
+struct hwi_async_pdu_context {
+ struct {
+ struct be_bus_address pa_base;
+ void *va_base;
+ void *ring_base;
+ struct async_pdu_handle *handle_base;
+
+ unsigned int host_write_ptr;
+ unsigned int ep_read_ptr;
+ unsigned int writables;
+
+ unsigned int free_entries;
+ unsigned int busy_entries;
+
+ struct list_head free_list;
+ } async_header;
+
+ struct {
+ struct be_bus_address pa_base;
+ void *va_base;
+ void *ring_base;
+ struct async_pdu_handle *handle_base;
+
+ unsigned int host_write_ptr;
+ unsigned int ep_read_ptr;
+ unsigned int writables;
+
+ unsigned int free_entries;
+ unsigned int busy_entries;
+ struct list_head free_list;
+ } async_data;
+
+ unsigned int buffer_size;
+ unsigned int num_entries;
+#define BE_GET_ASYNC_CRI_FROM_CID(cid) (pasync_ctx->cid_to_async_cri_map[cid])
+ unsigned short cid_to_async_cri_map[BE_MAX_SESSION];
+ /**
+ * This is a varying size list! Do not add anything
+ * after this entry!!
+ */
+ struct hwi_async_entry *async_entry;
+};
+
+#define PDUCQE_CODE_MASK 0x0000003F
+#define PDUCQE_DPL_MASK 0xFFFF0000
+#define PDUCQE_INDEX_MASK 0x0000FFFF
+
+struct i_t_dpdu_cqe {
+ u32 dw[4];
+} __packed;
+
+/**
+ * Pseudo amap definition in which each bit of the actual structure is defined
+ * as a byte: used to calculate offset/shift/mask of each field
+ */
+struct amap_i_t_dpdu_cqe {
+ u8 db_addr_hi[32];
+ u8 db_addr_lo[32];
+ u8 code[6];
+ u8 cid[10];
+ u8 dpl[16];
+ u8 index[16];
+ u8 num_cons[10];
+ u8 rsvd0[4];
+ u8 final;
+ u8 valid;
+} __packed;
+
+struct amap_i_t_dpdu_cqe_v2 {
+ u8 db_addr_hi[32]; /* DWORD 0 */
+ u8 db_addr_lo[32]; /* DWORD 1 */
+ u8 code[6]; /* DWORD 2 */
+ u8 num_cons; /* DWORD 2*/
+ u8 rsvd0[8]; /* DWORD 2 */
+ u8 dpl[17]; /* DWORD 2 */
+ u8 index[16]; /* DWORD 3 */
+ u8 cid[13]; /* DWORD 3 */
+ u8 rsvd1; /* DWORD 3 */
+ u8 final; /* DWORD 3 */
+ u8 valid; /* DWORD 3 */
+} __packed;
+
+#define CQE_VALID_MASK 0x80000000
+#define CQE_CODE_MASK 0x0000003F
+#define CQE_CID_MASK 0x0000FFC0
+
+#define EQE_VALID_MASK 0x00000001
+#define EQE_MAJORCODE_MASK 0x0000000E
+#define EQE_RESID_MASK 0xFFFF0000
+
+struct be_eq_entry {
+ u32 dw[1];
+} __packed;
+
+/**
+ * Pseudo amap definition in which each bit of the actual structure is defined
+ * as a byte: used to calculate offset/shift/mask of each field
+ */
+struct amap_eq_entry {
+ u8 valid; /* DWORD 0 */
+ u8 major_code[3]; /* DWORD 0 */
+ u8 minor_code[12]; /* DWORD 0 */
+ u8 resource_id[16]; /* DWORD 0 */
+
+} __packed;
+
+struct cq_db {
+ u32 dw[1];
+} __packed;
+
+/**
+ * Pseudo amap definition in which each bit of the actual structure is defined
+ * as a byte: used to calculate offset/shift/mask of each field
+ */
+struct amap_cq_db {
+ u8 qid[10];
+ u8 event[1];
+ u8 rsvd0[5];
+ u8 num_popped[13];
+ u8 rearm[1];
+ u8 rsvd1[2];
+} __packed;
+
+void beiscsi_process_eq(struct beiscsi_hba *phba);
+
+struct iscsi_wrb {
+ u32 dw[16];
+} __packed;
+
+#define WRB_TYPE_MASK 0xF0000000
+#define SKH_WRB_TYPE_OFFSET 27
+#define BE_WRB_TYPE_OFFSET 28
+
+#define ADAPTER_SET_WRB_TYPE(pwrb, wrb_type, type_offset) \
+ (pwrb->dw[0] |= (wrb_type << type_offset))
+
+/**
+ * Pseudo amap definition in which each bit of the actual structure is defined
+ * as a byte: used to calculate offset/shift/mask of each field
+ */
+struct amap_iscsi_wrb {
+ u8 lun[14]; /* DWORD 0 */
+ u8 lt; /* DWORD 0 */
+ u8 invld; /* DWORD 0 */
+ u8 wrb_idx[8]; /* DWORD 0 */
+ u8 dsp; /* DWORD 0 */
+ u8 dmsg; /* DWORD 0 */
+ u8 undr_run; /* DWORD 0 */
+ u8 over_run; /* DWORD 0 */
+ u8 type[4]; /* DWORD 0 */
+ u8 ptr2nextwrb[8]; /* DWORD 1 */
+ u8 r2t_exp_dtl[24]; /* DWORD 1 */
+ u8 sgl_icd_idx[12]; /* DWORD 2 */
+ u8 rsvd0[20]; /* DWORD 2 */
+ u8 exp_data_sn[32]; /* DWORD 3 */
+ u8 iscsi_bhs_addr_hi[32]; /* DWORD 4 */
+ u8 iscsi_bhs_addr_lo[32]; /* DWORD 5 */
+ u8 cmdsn_itt[32]; /* DWORD 6 */
+ u8 dif_ref_tag[32]; /* DWORD 7 */
+ u8 sge0_addr_hi[32]; /* DWORD 8 */
+ u8 sge0_addr_lo[32]; /* DWORD 9 */
+ u8 sge0_offset[22]; /* DWORD 10 */
+ u8 pbs; /* DWORD 10 */
+ u8 dif_mode[2]; /* DWORD 10 */
+ u8 rsvd1[6]; /* DWORD 10 */
+ u8 sge0_last; /* DWORD 10 */
+ u8 sge0_len[17]; /* DWORD 11 */
+ u8 dif_meta_tag[14]; /* DWORD 11 */
+ u8 sge0_in_ddr; /* DWORD 11 */
+ u8 sge1_addr_hi[32]; /* DWORD 12 */
+ u8 sge1_addr_lo[32]; /* DWORD 13 */
+ u8 sge1_r2t_offset[22]; /* DWORD 14 */
+ u8 rsvd2[9]; /* DWORD 14 */
+ u8 sge1_last; /* DWORD 14 */
+ u8 sge1_len[17]; /* DWORD 15 */
+ u8 ref_sgl_icd_idx[12]; /* DWORD 15 */
+ u8 rsvd3[2]; /* DWORD 15 */
+ u8 sge1_in_ddr; /* DWORD 15 */
+
+} __packed;
+
+struct amap_iscsi_wrb_v2 {
+ u8 r2t_exp_dtl[25]; /* DWORD 0 */
+ u8 rsvd0[2]; /* DWORD 0*/
+ u8 type[5]; /* DWORD 0 */
+ u8 ptr2nextwrb[8]; /* DWORD 1 */
+ u8 wrb_idx[8]; /* DWORD 1 */
+ u8 lun[16]; /* DWORD 1 */
+ u8 sgl_idx[16]; /* DWORD 2 */
+ u8 ref_sgl_icd_idx[16]; /* DWORD 2 */
+ u8 exp_data_sn[32]; /* DWORD 3 */
+ u8 iscsi_bhs_addr_hi[32]; /* DWORD 4 */
+ u8 iscsi_bhs_addr_lo[32]; /* DWORD 5 */
+ u8 cq_id[16]; /* DWORD 6 */
+ u8 rsvd1[16]; /* DWORD 6 */
+ u8 cmdsn_itt[32]; /* DWORD 7 */
+ u8 sge0_addr_hi[32]; /* DWORD 8 */
+ u8 sge0_addr_lo[32]; /* DWORD 9 */
+ u8 sge0_offset[24]; /* DWORD 10 */
+ u8 rsvd2[7]; /* DWORD 10 */
+ u8 sge0_last; /* DWORD 10 */
+ u8 sge0_len[17]; /* DWORD 11 */
+ u8 rsvd3[7]; /* DWORD 11 */
+ u8 diff_enbl; /* DWORD 11 */
+ u8 u_run; /* DWORD 11 */
+ u8 o_run; /* DWORD 11 */
+ u8 invalid; /* DWORD 11 */
+ u8 dsp; /* DWORD 11 */
+ u8 dmsg; /* DWORD 11 */
+ u8 rsvd4; /* DWORD 11 */
+ u8 lt; /* DWORD 11 */
+ u8 sge1_addr_hi[32]; /* DWORD 12 */
+ u8 sge1_addr_lo[32]; /* DWORD 13 */
+ u8 sge1_r2t_offset[24]; /* DWORD 14 */
+ u8 rsvd5[7]; /* DWORD 14 */
+ u8 sge1_last; /* DWORD 14 */
+ u8 sge1_len[17]; /* DWORD 15 */
+ u8 rsvd6[15]; /* DWORD 15 */
+} __packed;
+
+
+struct wrb_handle *alloc_wrb_handle(struct beiscsi_hba *phba, unsigned int cid);
+void
+free_mgmt_sgl_handle(struct beiscsi_hba *phba, struct sgl_handle *psgl_handle);
+
+void beiscsi_process_all_cqs(struct work_struct *work);
+void beiscsi_free_mgmt_task_handles(struct beiscsi_conn *beiscsi_conn,
+ struct iscsi_task *task);
+
+void hwi_ring_cq_db(struct beiscsi_hba *phba,
+ unsigned int id, unsigned int num_processed,
+ unsigned char rearm, unsigned char event);
+
+unsigned int beiscsi_process_cq(struct be_eq_obj *pbe_eq);
+
+static inline bool beiscsi_error(struct beiscsi_hba *phba)
+{
+ return phba->ue_detected || phba->fw_timeout;
+}
+
+struct pdu_nop_out {
+ u32 dw[12];
+};
+
+/**
+ * Pseudo amap definition in which each bit of the actual structure is defined
+ * as a byte: used to calculate offset/shift/mask of each field
+ */
+struct amap_pdu_nop_out {
+ u8 opcode[6]; /* opcode 0x00 */
+ u8 i_bit; /* I Bit */
+ u8 x_bit; /* reserved; should be 0 */
+ u8 fp_bit_filler1[7];
+ u8 f_bit; /* always 1 */
+ u8 reserved1[16];
+ u8 ahs_length[8]; /* no AHS */
+ u8 data_len_hi[8];
+ u8 data_len_lo[16]; /* DataSegmentLength */
+ u8 lun[64];
+ u8 itt[32]; /* initiator id for ping or 0xffffffff */
+ u8 ttt[32]; /* target id for ping or 0xffffffff */
+ u8 cmd_sn[32];
+ u8 exp_stat_sn[32];
+ u8 reserved5[128];
+};
+
+#define PDUBASE_OPCODE_MASK 0x0000003F
+#define PDUBASE_DATALENHI_MASK 0x0000FF00
+#define PDUBASE_DATALENLO_MASK 0xFFFF0000
+
+struct pdu_base {
+ u32 dw[16];
+} __packed;
+
+/**
+ * Pseudo amap definition in which each bit of the actual structure is defined
+ * as a byte: used to calculate offset/shift/mask of each field
+ */
+struct amap_pdu_base {
+ u8 opcode[6];
+ u8 i_bit; /* immediate bit */
+ u8 x_bit; /* reserved, always 0 */
+ u8 reserved1[24]; /* opcode-specific fields */
+ u8 ahs_length[8]; /* length units is 4 byte words */
+ u8 data_len_hi[8];
+ u8 data_len_lo[16]; /* DatasegmentLength */
+ u8 lun[64]; /* lun or opcode-specific fields */
+ u8 itt[32]; /* initiator task tag */
+ u8 reserved4[224];
+};
+
+struct iscsi_target_context_update_wrb {
+ u32 dw[16];
+} __packed;
+
+/**
+ * Pseudo amap definition in which each bit of the actual structure is defined
+ * as a byte: used to calculate offset/shift/mask of each field
+ */
+#define BE_TGT_CTX_UPDT_CMD 0x07
+struct amap_iscsi_target_context_update_wrb {
+ u8 lun[14]; /* DWORD 0 */
+ u8 lt; /* DWORD 0 */
+ u8 invld; /* DWORD 0 */
+ u8 wrb_idx[8]; /* DWORD 0 */
+ u8 dsp; /* DWORD 0 */
+ u8 dmsg; /* DWORD 0 */
+ u8 undr_run; /* DWORD 0 */
+ u8 over_run; /* DWORD 0 */
+ u8 type[4]; /* DWORD 0 */
+ u8 ptr2nextwrb[8]; /* DWORD 1 */
+ u8 max_burst_length[19]; /* DWORD 1 */
+ u8 rsvd0[5]; /* DWORD 1 */
+ u8 rsvd1[15]; /* DWORD 2 */
+ u8 max_send_data_segment_length[17]; /* DWORD 2 */
+ u8 first_burst_length[14]; /* DWORD 3 */
+ u8 rsvd2[2]; /* DWORD 3 */
+ u8 tx_wrbindex_drv_msg[8]; /* DWORD 3 */
+ u8 rsvd3[5]; /* DWORD 3 */
+ u8 session_state[3]; /* DWORD 3 */
+ u8 rsvd4[16]; /* DWORD 4 */
+ u8 tx_jumbo; /* DWORD 4 */
+ u8 hde; /* DWORD 4 */
+ u8 dde; /* DWORD 4 */
+ u8 erl[2]; /* DWORD 4 */
+ u8 domain_id[5]; /* DWORD 4 */
+ u8 mode; /* DWORD 4 */
+ u8 imd; /* DWORD 4 */
+ u8 ir2t; /* DWORD 4 */
+ u8 notpredblq[2]; /* DWORD 4 */
+ u8 compltonack; /* DWORD 4 */
+ u8 stat_sn[32]; /* DWORD 5 */
+ u8 pad_buffer_addr_hi[32]; /* DWORD 6 */
+ u8 pad_buffer_addr_lo[32]; /* DWORD 7 */
+ u8 pad_addr_hi[32]; /* DWORD 8 */
+ u8 pad_addr_lo[32]; /* DWORD 9 */
+ u8 rsvd5[32]; /* DWORD 10 */
+ u8 rsvd6[32]; /* DWORD 11 */
+ u8 rsvd7[32]; /* DWORD 12 */
+ u8 rsvd8[32]; /* DWORD 13 */
+ u8 rsvd9[32]; /* DWORD 14 */
+ u8 rsvd10[32]; /* DWORD 15 */
+
+} __packed;
+
+#define BEISCSI_MAX_RECV_DATASEG_LEN (64 * 1024)
+#define BEISCSI_MAX_CXNS 1
+struct amap_iscsi_target_context_update_wrb_v2 {
+ u8 max_burst_length[24]; /* DWORD 0 */
+ u8 rsvd0[3]; /* DWORD 0 */
+ u8 type[5]; /* DWORD 0 */
+ u8 ptr2nextwrb[8]; /* DWORD 1 */
+ u8 wrb_idx[8]; /* DWORD 1 */
+ u8 rsvd1[16]; /* DWORD 1 */
+ u8 max_send_data_segment_length[24]; /* DWORD 2 */
+ u8 rsvd2[8]; /* DWORD 2 */
+ u8 first_burst_length[24]; /* DWORD 3 */
+ u8 rsvd3[8]; /* DOWRD 3 */
+ u8 max_r2t[16]; /* DWORD 4 */
+ u8 rsvd4; /* DWORD 4 */
+ u8 hde; /* DWORD 4 */
+ u8 dde; /* DWORD 4 */
+ u8 erl[2]; /* DWORD 4 */
+ u8 rsvd5[6]; /* DWORD 4 */
+ u8 imd; /* DWORD 4 */
+ u8 ir2t; /* DWORD 4 */
+ u8 rsvd6[3]; /* DWORD 4 */
+ u8 stat_sn[32]; /* DWORD 5 */
+ u8 rsvd7[32]; /* DWORD 6 */
+ u8 rsvd8[32]; /* DWORD 7 */
+ u8 max_recv_dataseg_len[24]; /* DWORD 8 */
+ u8 rsvd9[8]; /* DWORD 8 */
+ u8 rsvd10[32]; /* DWORD 9 */
+ u8 rsvd11[32]; /* DWORD 10 */
+ u8 max_cxns[16]; /* DWORD 11 */
+ u8 rsvd12[11]; /* DWORD 11*/
+ u8 invld; /* DWORD 11 */
+ u8 rsvd13;/* DWORD 11*/
+ u8 dmsg; /* DWORD 11 */
+ u8 data_seq_inorder; /* DWORD 11 */
+ u8 pdu_seq_inorder; /* DWORD 11 */
+ u8 rsvd14[32]; /*DWORD 12 */
+ u8 rsvd15[32]; /* DWORD 13 */
+ u8 rsvd16[32]; /* DWORD 14 */
+ u8 rsvd17[32]; /* DWORD 15 */
+} __packed;
+
+
+struct be_ring {
+ u32 pages; /* queue size in pages */
+ u32 id; /* queue id assigned by beklib */
+ u32 num; /* number of elements in queue */
+ u32 cidx; /* consumer index */
+ u32 pidx; /* producer index -- not used by most rings */
+ u32 item_size; /* size in bytes of one object */
+ u8 ulp_num; /* ULP to which CID binded */
+ u16 register_set;
+ u16 doorbell_format;
+ u32 doorbell_offset;
+
+ void *va; /* The virtual address of the ring. This
+ * should be last to allow 32 & 64 bit debugger
+ * extensions to work.
+ */
+};
+
+struct hwi_controller {
+ struct list_head io_sgl_list;
+ struct list_head eh_sgl_list;
+ struct sgl_handle *psgl_handle_base;
+ unsigned int wrb_mem_index;
+
+ struct hwi_wrb_context *wrb_context;
+ struct mcc_wrb *pmcc_wrb_base;
+ struct be_ring default_pdu_hdr[BEISCSI_ULP_COUNT];
+ struct be_ring default_pdu_data[BEISCSI_ULP_COUNT];
+ struct hwi_context_memory *phwi_ctxt;
+};
+
+enum hwh_type_enum {
+ HWH_TYPE_IO = 1,
+ HWH_TYPE_LOGOUT = 2,
+ HWH_TYPE_TMF = 3,
+ HWH_TYPE_NOP = 4,
+ HWH_TYPE_IO_RD = 5,
+ HWH_TYPE_LOGIN = 11,
+ HWH_TYPE_INVALID = 0xFFFFFFFF
+};
+
+struct wrb_handle {
+ enum hwh_type_enum type;
+ unsigned short wrb_index;
+ unsigned short nxt_wrb_index;
+
+ struct iscsi_task *pio_handle;
+ struct iscsi_wrb *pwrb;
+};
+
+struct hwi_context_memory {
+ /* Adaptive interrupt coalescing (AIC) info */
+ u16 min_eqd; /* in usecs */
+ u16 max_eqd; /* in usecs */
+ u16 cur_eqd; /* in usecs */
+ struct be_eq_obj be_eq[MAX_CPUS];
+ struct be_queue_info be_cq[MAX_CPUS - 1];
+
+ struct be_queue_info *be_wrbq;
+ struct be_queue_info be_def_hdrq[BEISCSI_ULP_COUNT];
+ struct be_queue_info be_def_dataq[BEISCSI_ULP_COUNT];
+ struct hwi_async_pdu_context *pasync_ctx[BEISCSI_ULP_COUNT];
+};
+
+/* Logging related definitions */
+#define BEISCSI_LOG_INIT 0x0001 /* Initialization events */
+#define BEISCSI_LOG_MBOX 0x0002 /* Mailbox Events */
+#define BEISCSI_LOG_MISC 0x0004 /* Miscllaneous Events */
+#define BEISCSI_LOG_EH 0x0008 /* Error Handler */
+#define BEISCSI_LOG_IO 0x0010 /* IO Code Path */
+#define BEISCSI_LOG_CONFIG 0x0020 /* CONFIG Code Path */
+#define BEISCSI_LOG_ISCSI 0x0040 /* SCSI/iSCSI Protocol related Logs */
+
+#define beiscsi_log(phba, level, mask, fmt, arg...) \
+do { \
+ uint32_t log_value = phba->attr_log_enable; \
+ if (((mask) & log_value) || (level[1] <= '3')) \
+ shost_printk(level, phba->shost, \
+ fmt, __LINE__, ##arg); \
+} while (0)
+
+#endif
diff --git a/drivers/scsi/be2iscsi/be_mgmt.c b/drivers/scsi/be2iscsi/be_mgmt.c
new file mode 100644
index 000000000..c2c4d6975
--- /dev/null
+++ b/drivers/scsi/be2iscsi/be_mgmt.c
@@ -0,0 +1,1709 @@
+/**
+ * Copyright (C) 2005 - 2015 Avago Technologies
+ * All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation. The full GNU General
+ * Public License is included in this distribution in the file called COPYING.
+ *
+ * Written by: Jayamohan Kallickal (jayamohan.kallickal@avagotech.com)
+ *
+ * Contact Information:
+ * linux-drivers@avagotech.com
+ *
+ * Avago Technologies
+ * 3333 Susan Street
+ * Costa Mesa, CA 92626
+ */
+
+#include <linux/bsg-lib.h>
+#include <scsi/scsi_transport_iscsi.h>
+#include <scsi/scsi_bsg_iscsi.h>
+#include "be_mgmt.h"
+#include "be_iscsi.h"
+#include "be_main.h"
+
+/* UE Status Low CSR */
+static const char * const desc_ue_status_low[] = {
+ "CEV",
+ "CTX",
+ "DBUF",
+ "ERX",
+ "Host",
+ "MPU",
+ "NDMA",
+ "PTC ",
+ "RDMA ",
+ "RXF ",
+ "RXIPS ",
+ "RXULP0 ",
+ "RXULP1 ",
+ "RXULP2 ",
+ "TIM ",
+ "TPOST ",
+ "TPRE ",
+ "TXIPS ",
+ "TXULP0 ",
+ "TXULP1 ",
+ "UC ",
+ "WDMA ",
+ "TXULP2 ",
+ "HOST1 ",
+ "P0_OB_LINK ",
+ "P1_OB_LINK ",
+ "HOST_GPIO ",
+ "MBOX ",
+ "AXGMAC0",
+ "AXGMAC1",
+ "JTAG",
+ "MPU_INTPEND"
+};
+
+/* UE Status High CSR */
+static const char * const desc_ue_status_hi[] = {
+ "LPCMEMHOST",
+ "MGMT_MAC",
+ "PCS0ONLINE",
+ "MPU_IRAM",
+ "PCS1ONLINE",
+ "PCTL0",
+ "PCTL1",
+ "PMEM",
+ "RR",
+ "TXPB",
+ "RXPP",
+ "XAUI",
+ "TXP",
+ "ARM",
+ "IPC",
+ "HOST2",
+ "HOST3",
+ "HOST4",
+ "HOST5",
+ "HOST6",
+ "HOST7",
+ "HOST8",
+ "HOST9",
+ "NETC",
+ "Unknown",
+ "Unknown",
+ "Unknown",
+ "Unknown",
+ "Unknown",
+ "Unknown",
+ "Unknown",
+ "Unknown"
+};
+
+/*
+ * beiscsi_ue_detec()- Detect Unrecoverable Error on adapter
+ * @phba: Driver priv structure
+ *
+ * Read registers linked to UE and check for the UE status
+ **/
+void beiscsi_ue_detect(struct beiscsi_hba *phba)
+{
+ uint32_t ue_hi = 0, ue_lo = 0;
+ uint32_t ue_mask_hi = 0, ue_mask_lo = 0;
+ uint8_t i = 0;
+
+ if (phba->ue_detected)
+ return;
+
+ pci_read_config_dword(phba->pcidev,
+ PCICFG_UE_STATUS_LOW, &ue_lo);
+ pci_read_config_dword(phba->pcidev,
+ PCICFG_UE_STATUS_MASK_LOW,
+ &ue_mask_lo);
+ pci_read_config_dword(phba->pcidev,
+ PCICFG_UE_STATUS_HIGH,
+ &ue_hi);
+ pci_read_config_dword(phba->pcidev,
+ PCICFG_UE_STATUS_MASK_HI,
+ &ue_mask_hi);
+
+ ue_lo = (ue_lo & ~ue_mask_lo);
+ ue_hi = (ue_hi & ~ue_mask_hi);
+
+
+ if (ue_lo || ue_hi) {
+ phba->ue_detected = true;
+ beiscsi_log(phba, KERN_ERR,
+ BEISCSI_LOG_CONFIG | BEISCSI_LOG_MBOX,
+ "BG_%d : Error detected on the adapter\n");
+ }
+
+ if (ue_lo) {
+ for (i = 0; ue_lo; ue_lo >>= 1, i++) {
+ if (ue_lo & 1)
+ beiscsi_log(phba, KERN_ERR,
+ BEISCSI_LOG_CONFIG,
+ "BG_%d : UE_LOW %s bit set\n",
+ desc_ue_status_low[i]);
+ }
+ }
+
+ if (ue_hi) {
+ for (i = 0; ue_hi; ue_hi >>= 1, i++) {
+ if (ue_hi & 1)
+ beiscsi_log(phba, KERN_ERR,
+ BEISCSI_LOG_CONFIG,
+ "BG_%d : UE_HIGH %s bit set\n",
+ desc_ue_status_hi[i]);
+ }
+ }
+}
+
+int be_cmd_modify_eq_delay(struct beiscsi_hba *phba,
+ struct be_set_eqd *set_eqd, int num)
+{
+ struct be_ctrl_info *ctrl = &phba->ctrl;
+ struct be_mcc_wrb *wrb;
+ struct be_cmd_req_modify_eq_delay *req;
+ unsigned int tag = 0;
+ int i;
+
+ spin_lock(&ctrl->mbox_lock);
+ tag = alloc_mcc_tag(phba);
+ if (!tag) {
+ spin_unlock(&ctrl->mbox_lock);
+ return tag;
+ }
+
+ wrb = wrb_from_mccq(phba);
+ req = embedded_payload(wrb);
+
+ wrb->tag0 |= tag;
+ be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0);
+ be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
+ OPCODE_COMMON_MODIFY_EQ_DELAY, sizeof(*req));
+
+ req->num_eq = cpu_to_le32(num);
+ for (i = 0; i < num; i++) {
+ req->delay[i].eq_id = cpu_to_le32(set_eqd[i].eq_id);
+ req->delay[i].phase = 0;
+ req->delay[i].delay_multiplier =
+ cpu_to_le32(set_eqd[i].delay_multiplier);
+ }
+
+ be_mcc_notify(phba);
+ spin_unlock(&ctrl->mbox_lock);
+ return tag;
+}
+
+/**
+ * mgmt_reopen_session()- Reopen a session based on reopen_type
+ * @phba: Device priv structure instance
+ * @reopen_type: Type of reopen_session FW should do.
+ * @sess_handle: Session Handle of the session to be re-opened
+ *
+ * return
+ * the TAG used for MBOX Command
+ *
+ **/
+unsigned int mgmt_reopen_session(struct beiscsi_hba *phba,
+ unsigned int reopen_type,
+ unsigned int sess_handle)
+{
+ struct be_ctrl_info *ctrl = &phba->ctrl;
+ struct be_mcc_wrb *wrb;
+ struct be_cmd_reopen_session_req *req;
+ unsigned int tag = 0;
+
+ beiscsi_log(phba, KERN_INFO,
+ BEISCSI_LOG_CONFIG | BEISCSI_LOG_MBOX,
+ "BG_%d : In bescsi_get_boot_target\n");
+
+ spin_lock(&ctrl->mbox_lock);
+ tag = alloc_mcc_tag(phba);
+ if (!tag) {
+ spin_unlock(&ctrl->mbox_lock);
+ return tag;
+ }
+
+ wrb = wrb_from_mccq(phba);
+ req = embedded_payload(wrb);
+ wrb->tag0 |= tag;
+ be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0);
+ be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ISCSI_INI,
+ OPCODE_ISCSI_INI_DRIVER_REOPEN_ALL_SESSIONS,
+ sizeof(struct be_cmd_reopen_session_resp));
+
+ /* set the reopen_type,sess_handle */
+ req->reopen_type = reopen_type;
+ req->session_handle = sess_handle;
+
+ be_mcc_notify(phba);
+ spin_unlock(&ctrl->mbox_lock);
+ return tag;
+}
+
+unsigned int mgmt_get_boot_target(struct beiscsi_hba *phba)
+{
+ struct be_ctrl_info *ctrl = &phba->ctrl;
+ struct be_mcc_wrb *wrb;
+ struct be_cmd_get_boot_target_req *req;
+ unsigned int tag = 0;
+
+ beiscsi_log(phba, KERN_INFO,
+ BEISCSI_LOG_CONFIG | BEISCSI_LOG_MBOX,
+ "BG_%d : In bescsi_get_boot_target\n");
+
+ spin_lock(&ctrl->mbox_lock);
+ tag = alloc_mcc_tag(phba);
+ if (!tag) {
+ spin_unlock(&ctrl->mbox_lock);
+ return tag;
+ }
+
+ wrb = wrb_from_mccq(phba);
+ req = embedded_payload(wrb);
+ wrb->tag0 |= tag;
+ be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0);
+ be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ISCSI_INI,
+ OPCODE_ISCSI_INI_BOOT_GET_BOOT_TARGET,
+ sizeof(struct be_cmd_get_boot_target_resp));
+
+ be_mcc_notify(phba);
+ spin_unlock(&ctrl->mbox_lock);
+ return tag;
+}
+
+unsigned int mgmt_get_session_info(struct beiscsi_hba *phba,
+ u32 boot_session_handle,
+ struct be_dma_mem *nonemb_cmd)
+{
+ struct be_ctrl_info *ctrl = &phba->ctrl;
+ struct be_mcc_wrb *wrb;
+ unsigned int tag = 0;
+ struct be_cmd_get_session_req *req;
+ struct be_cmd_get_session_resp *resp;
+ struct be_sge *sge;
+
+ beiscsi_log(phba, KERN_INFO,
+ BEISCSI_LOG_CONFIG | BEISCSI_LOG_MBOX,
+ "BG_%d : In beiscsi_get_session_info\n");
+
+ spin_lock(&ctrl->mbox_lock);
+ tag = alloc_mcc_tag(phba);
+ if (!tag) {
+ spin_unlock(&ctrl->mbox_lock);
+ return tag;
+ }
+
+ nonemb_cmd->size = sizeof(*resp);
+ req = nonemb_cmd->va;
+ memset(req, 0, sizeof(*req));
+ wrb = wrb_from_mccq(phba);
+ sge = nonembedded_sgl(wrb);
+ wrb->tag0 |= tag;
+
+
+ wrb->tag0 |= tag;
+ be_wrb_hdr_prepare(wrb, sizeof(*req), false, 1);
+ be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ISCSI_INI,
+ OPCODE_ISCSI_INI_SESSION_GET_A_SESSION,
+ sizeof(*resp));
+ req->session_handle = boot_session_handle;
+ sge->pa_hi = cpu_to_le32(upper_32_bits(nonemb_cmd->dma));
+ sge->pa_lo = cpu_to_le32(nonemb_cmd->dma & 0xFFFFFFFF);
+ sge->len = cpu_to_le32(nonemb_cmd->size);
+
+ be_mcc_notify(phba);
+ spin_unlock(&ctrl->mbox_lock);
+ return tag;
+}
+
+/**
+ * mgmt_get_fw_config()- Get the FW config for the function
+ * @ctrl: ptr to Ctrl Info
+ * @phba: ptr to the dev priv structure
+ *
+ * Get the FW config and resources available for the function.
+ * The resources are created based on the count received here.
+ *
+ * return
+ * Success: 0
+ * Failure: Non-Zero Value
+ **/
+int mgmt_get_fw_config(struct be_ctrl_info *ctrl,
+ struct beiscsi_hba *phba)
+{
+ struct be_mcc_wrb *wrb = wrb_from_mbox(&ctrl->mbox_mem);
+ struct be_fw_cfg *req = embedded_payload(wrb);
+ int status = 0;
+
+ spin_lock(&ctrl->mbox_lock);
+ memset(wrb, 0, sizeof(*wrb));
+
+ be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0);
+
+ be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
+ OPCODE_COMMON_QUERY_FIRMWARE_CONFIG,
+ EMBED_MBX_MAX_PAYLOAD_SIZE);
+ status = be_mbox_notify(ctrl);
+ if (!status) {
+ uint8_t ulp_num = 0;
+ struct be_fw_cfg *pfw_cfg;
+ pfw_cfg = req;
+
+ if (!is_chip_be2_be3r(phba)) {
+ phba->fw_config.eqid_count = pfw_cfg->eqid_count;
+ phba->fw_config.cqid_count = pfw_cfg->cqid_count;
+
+ beiscsi_log(phba, KERN_INFO,
+ BEISCSI_LOG_INIT,
+ "BG_%d : EQ_Count : %d CQ_Count : %d\n",
+ phba->fw_config.eqid_count,
+ phba->fw_config.cqid_count);
+ }
+
+ for (ulp_num = 0; ulp_num < BEISCSI_ULP_COUNT; ulp_num++)
+ if (pfw_cfg->ulp[ulp_num].ulp_mode &
+ BEISCSI_ULP_ISCSI_INI_MODE)
+ set_bit(ulp_num,
+ &phba->fw_config.ulp_supported);
+
+ phba->fw_config.phys_port = pfw_cfg->phys_port;
+ for (ulp_num = 0; ulp_num < BEISCSI_ULP_COUNT; ulp_num++) {
+ if (test_bit(ulp_num, &phba->fw_config.ulp_supported)) {
+
+ phba->fw_config.iscsi_cid_start[ulp_num] =
+ pfw_cfg->ulp[ulp_num].sq_base;
+ phba->fw_config.iscsi_cid_count[ulp_num] =
+ pfw_cfg->ulp[ulp_num].sq_count;
+
+ phba->fw_config.iscsi_icd_start[ulp_num] =
+ pfw_cfg->ulp[ulp_num].icd_base;
+ phba->fw_config.iscsi_icd_count[ulp_num] =
+ pfw_cfg->ulp[ulp_num].icd_count;
+
+ phba->fw_config.iscsi_chain_start[ulp_num] =
+ pfw_cfg->chain_icd[ulp_num].chain_base;
+ phba->fw_config.iscsi_chain_count[ulp_num] =
+ pfw_cfg->chain_icd[ulp_num].chain_count;
+
+ beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT,
+ "BG_%d : Function loaded on ULP : %d\n"
+ "\tiscsi_cid_count : %d\n"
+ "\tiscsi_cid_start : %d\n"
+ "\t iscsi_icd_count : %d\n"
+ "\t iscsi_icd_start : %d\n",
+ ulp_num,
+ phba->fw_config.
+ iscsi_cid_count[ulp_num],
+ phba->fw_config.
+ iscsi_cid_start[ulp_num],
+ phba->fw_config.
+ iscsi_icd_count[ulp_num],
+ phba->fw_config.
+ iscsi_icd_start[ulp_num]);
+ }
+ }
+
+ phba->fw_config.dual_ulp_aware = (pfw_cfg->function_mode &
+ BEISCSI_FUNC_DUA_MODE);
+
+ beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT,
+ "BG_%d : DUA Mode : 0x%x\n",
+ phba->fw_config.dual_ulp_aware);
+
+ } else {
+ beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
+ "BG_%d : Failed in mgmt_get_fw_config\n");
+ status = -EINVAL;
+ }
+
+ spin_unlock(&ctrl->mbox_lock);
+ return status;
+}
+
+int mgmt_check_supported_fw(struct be_ctrl_info *ctrl,
+ struct beiscsi_hba *phba)
+{
+ struct be_dma_mem nonemb_cmd;
+ struct be_mcc_wrb *wrb = wrb_from_mbox(&ctrl->mbox_mem);
+ struct be_mgmt_controller_attributes *req;
+ struct be_sge *sge = nonembedded_sgl(wrb);
+ int status = 0;
+
+ nonemb_cmd.va = pci_alloc_consistent(ctrl->pdev,
+ sizeof(struct be_mgmt_controller_attributes),
+ &nonemb_cmd.dma);
+ if (nonemb_cmd.va == NULL) {
+ beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
+ "BG_%d : Failed to allocate memory for "
+ "mgmt_check_supported_fw\n");
+ return -ENOMEM;
+ }
+ nonemb_cmd.size = sizeof(struct be_mgmt_controller_attributes);
+ req = nonemb_cmd.va;
+ memset(req, 0, sizeof(*req));
+ spin_lock(&ctrl->mbox_lock);
+ memset(wrb, 0, sizeof(*wrb));
+ be_wrb_hdr_prepare(wrb, sizeof(*req), false, 1);
+ be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
+ OPCODE_COMMON_GET_CNTL_ATTRIBUTES, sizeof(*req));
+ sge->pa_hi = cpu_to_le32(upper_32_bits(nonemb_cmd.dma));
+ sge->pa_lo = cpu_to_le32(nonemb_cmd.dma & 0xFFFFFFFF);
+ sge->len = cpu_to_le32(nonemb_cmd.size);
+ status = be_mbox_notify(ctrl);
+ if (!status) {
+ struct be_mgmt_controller_attributes_resp *resp = nonemb_cmd.va;
+ beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT,
+ "BG_%d : Firmware Version of CMD : %s\n"
+ "Firmware Version is : %s\n"
+ "Developer Build, not performing version check...\n",
+ resp->params.hba_attribs
+ .flashrom_version_string,
+ resp->params.hba_attribs.
+ firmware_version_string);
+
+ phba->fw_config.iscsi_features =
+ resp->params.hba_attribs.iscsi_features;
+ beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT,
+ "BM_%d : phba->fw_config.iscsi_features = %d\n",
+ phba->fw_config.iscsi_features);
+ memcpy(phba->fw_ver_str, resp->params.hba_attribs.
+ firmware_version_string, BEISCSI_VER_STRLEN);
+ } else
+ beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
+ "BG_%d : Failed in mgmt_check_supported_fw\n");
+ spin_unlock(&ctrl->mbox_lock);
+ if (nonemb_cmd.va)
+ pci_free_consistent(ctrl->pdev, nonemb_cmd.size,
+ nonemb_cmd.va, nonemb_cmd.dma);
+
+ return status;
+}
+
+unsigned int mgmt_vendor_specific_fw_cmd(struct be_ctrl_info *ctrl,
+ struct beiscsi_hba *phba,
+ struct bsg_job *job,
+ struct be_dma_mem *nonemb_cmd)
+{
+ struct be_cmd_resp_hdr *resp;
+ struct be_mcc_wrb *wrb;
+ struct be_sge *mcc_sge;
+ unsigned int tag = 0;
+ struct iscsi_bsg_request *bsg_req = job->request;
+ struct be_bsg_vendor_cmd *req = nonemb_cmd->va;
+ unsigned short region, sector_size, sector, offset;
+
+ nonemb_cmd->size = job->request_payload.payload_len;
+ memset(nonemb_cmd->va, 0, nonemb_cmd->size);
+ resp = nonemb_cmd->va;
+ region = bsg_req->rqst_data.h_vendor.vendor_cmd[1];
+ sector_size = bsg_req->rqst_data.h_vendor.vendor_cmd[2];
+ sector = bsg_req->rqst_data.h_vendor.vendor_cmd[3];
+ offset = bsg_req->rqst_data.h_vendor.vendor_cmd[4];
+ req->region = region;
+ req->sector = sector;
+ req->offset = offset;
+ spin_lock(&ctrl->mbox_lock);
+
+ switch (bsg_req->rqst_data.h_vendor.vendor_cmd[0]) {
+ case BEISCSI_WRITE_FLASH:
+ offset = sector * sector_size + offset;
+ be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ISCSI,
+ OPCODE_COMMON_WRITE_FLASH, sizeof(*req));
+ sg_copy_to_buffer(job->request_payload.sg_list,
+ job->request_payload.sg_cnt,
+ nonemb_cmd->va + offset, job->request_len);
+ break;
+ case BEISCSI_READ_FLASH:
+ be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ISCSI,
+ OPCODE_COMMON_READ_FLASH, sizeof(*req));
+ break;
+ default:
+ beiscsi_log(phba, KERN_WARNING, BEISCSI_LOG_CONFIG,
+ "BG_%d : Unsupported cmd = 0x%x\n\n",
+ bsg_req->rqst_data.h_vendor.vendor_cmd[0]);
+
+ spin_unlock(&ctrl->mbox_lock);
+ return -ENOSYS;
+ }
+
+ tag = alloc_mcc_tag(phba);
+ if (!tag) {
+ spin_unlock(&ctrl->mbox_lock);
+ return tag;
+ }
+
+ wrb = wrb_from_mccq(phba);
+ mcc_sge = nonembedded_sgl(wrb);
+ be_wrb_hdr_prepare(wrb, nonemb_cmd->size, false,
+ job->request_payload.sg_cnt);
+ mcc_sge->pa_hi = cpu_to_le32(upper_32_bits(nonemb_cmd->dma));
+ mcc_sge->pa_lo = cpu_to_le32(nonemb_cmd->dma & 0xFFFFFFFF);
+ mcc_sge->len = cpu_to_le32(nonemb_cmd->size);
+ wrb->tag0 |= tag;
+
+ be_mcc_notify(phba);
+
+ spin_unlock(&ctrl->mbox_lock);
+ return tag;
+}
+
+/**
+ * mgmt_epfw_cleanup()- Inform FW to cleanup data structures.
+ * @phba: pointer to dev priv structure
+ * @ulp_num: ULP number.
+ *
+ * return
+ * Success: 0
+ * Failure: Non-Zero Value
+ **/
+int mgmt_epfw_cleanup(struct beiscsi_hba *phba, unsigned short ulp_num)
+{
+ struct be_ctrl_info *ctrl = &phba->ctrl;
+ struct be_mcc_wrb *wrb = wrb_from_mccq(phba);
+ struct iscsi_cleanup_req *req = embedded_payload(wrb);
+ int status = 0;
+
+ spin_lock(&ctrl->mbox_lock);
+
+ be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0);
+ be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ISCSI,
+ OPCODE_COMMON_ISCSI_CLEANUP, sizeof(*req));
+
+ req->chute = (1 << ulp_num);
+ req->hdr_ring_id = cpu_to_le16(HWI_GET_DEF_HDRQ_ID(phba, ulp_num));
+ req->data_ring_id = cpu_to_le16(HWI_GET_DEF_BUFQ_ID(phba, ulp_num));
+
+ status = be_mcc_notify_wait(phba);
+ if (status)
+ beiscsi_log(phba, KERN_WARNING, BEISCSI_LOG_INIT,
+ "BG_%d : mgmt_epfw_cleanup , FAILED\n");
+ spin_unlock(&ctrl->mbox_lock);
+ return status;
+}
+
+unsigned int mgmt_invalidate_icds(struct beiscsi_hba *phba,
+ struct invalidate_command_table *inv_tbl,
+ unsigned int num_invalidate, unsigned int cid,
+ struct be_dma_mem *nonemb_cmd)
+
+{
+ struct be_ctrl_info *ctrl = &phba->ctrl;
+ struct be_mcc_wrb *wrb;
+ struct be_sge *sge;
+ struct invalidate_commands_params_in *req;
+ unsigned int i, tag = 0;
+
+ spin_lock(&ctrl->mbox_lock);
+ tag = alloc_mcc_tag(phba);
+ if (!tag) {
+ spin_unlock(&ctrl->mbox_lock);
+ return tag;
+ }
+
+ req = nonemb_cmd->va;
+ memset(req, 0, sizeof(*req));
+ wrb = wrb_from_mccq(phba);
+ sge = nonembedded_sgl(wrb);
+ wrb->tag0 |= tag;
+
+ be_wrb_hdr_prepare(wrb, sizeof(*req), false, 1);
+ be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ISCSI,
+ OPCODE_COMMON_ISCSI_ERROR_RECOVERY_INVALIDATE_COMMANDS,
+ sizeof(*req));
+ req->ref_handle = 0;
+ req->cleanup_type = CMD_ISCSI_COMMAND_INVALIDATE;
+ for (i = 0; i < num_invalidate; i++) {
+ req->table[i].icd = inv_tbl->icd;
+ req->table[i].cid = inv_tbl->cid;
+ req->icd_count++;
+ inv_tbl++;
+ }
+ sge->pa_hi = cpu_to_le32(upper_32_bits(nonemb_cmd->dma));
+ sge->pa_lo = cpu_to_le32(nonemb_cmd->dma & 0xFFFFFFFF);
+ sge->len = cpu_to_le32(nonemb_cmd->size);
+
+ be_mcc_notify(phba);
+ spin_unlock(&ctrl->mbox_lock);
+ return tag;
+}
+
+unsigned int mgmt_invalidate_connection(struct beiscsi_hba *phba,
+ struct beiscsi_endpoint *beiscsi_ep,
+ unsigned short cid,
+ unsigned short issue_reset,
+ unsigned short savecfg_flag)
+{
+ struct be_ctrl_info *ctrl = &phba->ctrl;
+ struct be_mcc_wrb *wrb;
+ struct iscsi_invalidate_connection_params_in *req;
+ unsigned int tag = 0;
+
+ spin_lock(&ctrl->mbox_lock);
+ tag = alloc_mcc_tag(phba);
+ if (!tag) {
+ spin_unlock(&ctrl->mbox_lock);
+ return tag;
+ }
+ wrb = wrb_from_mccq(phba);
+ wrb->tag0 |= tag;
+ req = embedded_payload(wrb);
+
+ be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0);
+ be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ISCSI_INI,
+ OPCODE_ISCSI_INI_DRIVER_INVALIDATE_CONNECTION,
+ sizeof(*req));
+ req->session_handle = beiscsi_ep->fw_handle;
+ req->cid = cid;
+ if (issue_reset)
+ req->cleanup_type = CMD_ISCSI_CONNECTION_ISSUE_TCP_RST;
+ else
+ req->cleanup_type = CMD_ISCSI_CONNECTION_INVALIDATE;
+ req->save_cfg = savecfg_flag;
+ be_mcc_notify(phba);
+ spin_unlock(&ctrl->mbox_lock);
+ return tag;
+}
+
+unsigned int mgmt_upload_connection(struct beiscsi_hba *phba,
+ unsigned short cid, unsigned int upload_flag)
+{
+ struct be_ctrl_info *ctrl = &phba->ctrl;
+ struct be_mcc_wrb *wrb;
+ struct tcp_upload_params_in *req;
+ unsigned int tag = 0;
+
+ spin_lock(&ctrl->mbox_lock);
+ tag = alloc_mcc_tag(phba);
+ if (!tag) {
+ spin_unlock(&ctrl->mbox_lock);
+ return tag;
+ }
+ wrb = wrb_from_mccq(phba);
+ req = embedded_payload(wrb);
+ wrb->tag0 |= tag;
+
+ be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0);
+ be_cmd_hdr_prepare(&req->hdr, CMD_COMMON_TCP_UPLOAD,
+ OPCODE_COMMON_TCP_UPLOAD, sizeof(*req));
+ req->id = (unsigned short)cid;
+ req->upload_type = (unsigned char)upload_flag;
+ be_mcc_notify(phba);
+ spin_unlock(&ctrl->mbox_lock);
+ return tag;
+}
+
+/**
+ * mgmt_open_connection()- Establish a TCP CXN
+ * @dst_addr: Destination Address
+ * @beiscsi_ep: ptr to device endpoint struct
+ * @nonemb_cmd: ptr to memory allocated for command
+ *
+ * return
+ * Success: Tag number of the MBX Command issued
+ * Failure: Error code
+ **/
+int mgmt_open_connection(struct beiscsi_hba *phba,
+ struct sockaddr *dst_addr,
+ struct beiscsi_endpoint *beiscsi_ep,
+ struct be_dma_mem *nonemb_cmd)
+{
+ struct hwi_controller *phwi_ctrlr;
+ struct hwi_context_memory *phwi_context;
+ struct sockaddr_in *daddr_in = (struct sockaddr_in *)dst_addr;
+ struct sockaddr_in6 *daddr_in6 = (struct sockaddr_in6 *)dst_addr;
+ struct be_ctrl_info *ctrl = &phba->ctrl;
+ struct be_mcc_wrb *wrb;
+ struct tcp_connect_and_offload_in_v1 *req;
+ unsigned short def_hdr_id;
+ unsigned short def_data_id;
+ struct phys_addr template_address = { 0, 0 };
+ struct phys_addr *ptemplate_address;
+ unsigned int tag = 0;
+ unsigned int i, ulp_num;
+ unsigned short cid = beiscsi_ep->ep_cid;
+ struct be_sge *sge;
+
+ phwi_ctrlr = phba->phwi_ctrlr;
+ phwi_context = phwi_ctrlr->phwi_ctxt;
+
+ ulp_num = phwi_ctrlr->wrb_context[BE_GET_CRI_FROM_CID(cid)].ulp_num;
+
+ def_hdr_id = (unsigned short)HWI_GET_DEF_HDRQ_ID(phba, ulp_num);
+ def_data_id = (unsigned short)HWI_GET_DEF_BUFQ_ID(phba, ulp_num);
+
+ ptemplate_address = &template_address;
+ ISCSI_GET_PDU_TEMPLATE_ADDRESS(phba, ptemplate_address);
+ spin_lock(&ctrl->mbox_lock);
+ tag = alloc_mcc_tag(phba);
+ if (!tag) {
+ spin_unlock(&ctrl->mbox_lock);
+ return tag;
+ }
+ wrb = wrb_from_mccq(phba);
+ sge = nonembedded_sgl(wrb);
+
+ req = nonemb_cmd->va;
+ memset(req, 0, sizeof(*req));
+ wrb->tag0 |= tag;
+
+ be_wrb_hdr_prepare(wrb, nonemb_cmd->size, false, 1);
+ be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ISCSI,
+ OPCODE_COMMON_ISCSI_TCP_CONNECT_AND_OFFLOAD,
+ nonemb_cmd->size);
+ if (dst_addr->sa_family == PF_INET) {
+ __be32 s_addr = daddr_in->sin_addr.s_addr;
+ req->ip_address.ip_type = BE2_IPV4;
+ req->ip_address.addr[0] = s_addr & 0x000000ff;
+ req->ip_address.addr[1] = (s_addr & 0x0000ff00) >> 8;
+ req->ip_address.addr[2] = (s_addr & 0x00ff0000) >> 16;
+ req->ip_address.addr[3] = (s_addr & 0xff000000) >> 24;
+ req->tcp_port = ntohs(daddr_in->sin_port);
+ beiscsi_ep->dst_addr = daddr_in->sin_addr.s_addr;
+ beiscsi_ep->dst_tcpport = ntohs(daddr_in->sin_port);
+ beiscsi_ep->ip_type = BE2_IPV4;
+ } else if (dst_addr->sa_family == PF_INET6) {
+ req->ip_address.ip_type = BE2_IPV6;
+ memcpy(&req->ip_address.addr,
+ &daddr_in6->sin6_addr.in6_u.u6_addr8, 16);
+ req->tcp_port = ntohs(daddr_in6->sin6_port);
+ beiscsi_ep->dst_tcpport = ntohs(daddr_in6->sin6_port);
+ memcpy(&beiscsi_ep->dst6_addr,
+ &daddr_in6->sin6_addr.in6_u.u6_addr8, 16);
+ beiscsi_ep->ip_type = BE2_IPV6;
+ } else{
+ beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_CONFIG,
+ "BG_%d : unknown addr family %d\n",
+ dst_addr->sa_family);
+ spin_unlock(&ctrl->mbox_lock);
+ free_mcc_tag(&phba->ctrl, tag);
+ return -EINVAL;
+
+ }
+ req->cid = cid;
+ i = phba->nxt_cqid++;
+ if (phba->nxt_cqid == phba->num_cpus)
+ phba->nxt_cqid = 0;
+ req->cq_id = phwi_context->be_cq[i].id;
+ beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_CONFIG,
+ "BG_%d : i=%d cq_id=%d\n", i, req->cq_id);
+ req->defq_id = def_hdr_id;
+ req->hdr_ring_id = def_hdr_id;
+ req->data_ring_id = def_data_id;
+ req->do_offload = 1;
+ req->dataout_template_pa.lo = ptemplate_address->lo;
+ req->dataout_template_pa.hi = ptemplate_address->hi;
+ sge->pa_hi = cpu_to_le32(upper_32_bits(nonemb_cmd->dma));
+ sge->pa_lo = cpu_to_le32(nonemb_cmd->dma & 0xFFFFFFFF);
+ sge->len = cpu_to_le32(nonemb_cmd->size);
+
+ if (!is_chip_be2_be3r(phba)) {
+ req->hdr.version = MBX_CMD_VER1;
+ req->tcp_window_size = 0;
+ req->tcp_window_scale_count = 2;
+ }
+
+ be_mcc_notify(phba);
+ spin_unlock(&ctrl->mbox_lock);
+ return tag;
+}
+
+unsigned int mgmt_get_all_if_id(struct beiscsi_hba *phba)
+{
+ struct be_ctrl_info *ctrl = &phba->ctrl;
+ struct be_mcc_wrb *wrb = wrb_from_mbox(&ctrl->mbox_mem);
+ struct be_cmd_get_all_if_id_req *req = embedded_payload(wrb);
+ struct be_cmd_get_all_if_id_req *pbe_allid = req;
+ int status = 0;
+
+ memset(wrb, 0, sizeof(*wrb));
+
+ spin_lock(&ctrl->mbox_lock);
+
+ be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0);
+ be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ISCSI,
+ OPCODE_COMMON_ISCSI_NTWK_GET_ALL_IF_ID,
+ sizeof(*req));
+ status = be_mbox_notify(ctrl);
+ if (!status)
+ phba->interface_handle = pbe_allid->if_hndl_list[0];
+ else {
+ beiscsi_log(phba, KERN_WARNING, BEISCSI_LOG_CONFIG,
+ "BG_%d : Failed in mgmt_get_all_if_id\n");
+ }
+ spin_unlock(&ctrl->mbox_lock);
+
+ return status;
+}
+
+/*
+ * mgmt_exec_nonemb_cmd()- Execute Non Embedded MBX Cmd
+ * @phba: Driver priv structure
+ * @nonemb_cmd: Address of the MBX command issued
+ * @resp_buf: Buffer to copy the MBX cmd response
+ * @resp_buf_len: respone lenght to be copied
+ *
+ **/
+static int mgmt_exec_nonemb_cmd(struct beiscsi_hba *phba,
+ struct be_dma_mem *nonemb_cmd, void *resp_buf,
+ int resp_buf_len)
+{
+ struct be_ctrl_info *ctrl = &phba->ctrl;
+ struct be_mcc_wrb *wrb;
+ struct be_sge *sge;
+ unsigned int tag;
+ int rc = 0;
+
+ spin_lock(&ctrl->mbox_lock);
+ tag = alloc_mcc_tag(phba);
+ if (!tag) {
+ spin_unlock(&ctrl->mbox_lock);
+ rc = -ENOMEM;
+ goto free_cmd;
+ }
+
+ wrb = wrb_from_mccq(phba);
+ wrb->tag0 |= tag;
+ sge = nonembedded_sgl(wrb);
+
+ be_wrb_hdr_prepare(wrb, nonemb_cmd->size, false, 1);
+ sge->pa_hi = cpu_to_le32(upper_32_bits(nonemb_cmd->dma));
+ sge->pa_lo = cpu_to_le32(lower_32_bits(nonemb_cmd->dma));
+ sge->len = cpu_to_le32(nonemb_cmd->size);
+
+ be_mcc_notify(phba);
+ spin_unlock(&ctrl->mbox_lock);
+
+ rc = beiscsi_mccq_compl(phba, tag, NULL, nonemb_cmd);
+
+ if (resp_buf)
+ memcpy(resp_buf, nonemb_cmd->va, resp_buf_len);
+
+ if (rc) {
+ /* Check if the MBX Cmd needs to be re-issued */
+ if (rc == -EAGAIN)
+ return rc;
+
+ beiscsi_log(phba, KERN_WARNING,
+ BEISCSI_LOG_CONFIG | BEISCSI_LOG_MBOX,
+ "BG_%d : mgmt_exec_nonemb_cmd Failed status\n");
+
+ if (rc != -EBUSY)
+ goto free_cmd;
+ else
+ return rc;
+ }
+free_cmd:
+ pci_free_consistent(ctrl->pdev, nonemb_cmd->size,
+ nonemb_cmd->va, nonemb_cmd->dma);
+ return rc;
+}
+
+static int mgmt_alloc_cmd_data(struct beiscsi_hba *phba, struct be_dma_mem *cmd,
+ int iscsi_cmd, int size)
+{
+ cmd->va = pci_zalloc_consistent(phba->ctrl.pdev, size, &cmd->dma);
+ if (!cmd->va) {
+ beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_CONFIG,
+ "BG_%d : Failed to allocate memory for if info\n");
+ return -ENOMEM;
+ }
+ cmd->size = size;
+ be_cmd_hdr_prepare(cmd->va, CMD_SUBSYSTEM_ISCSI, iscsi_cmd, size);
+ return 0;
+}
+
+static int
+mgmt_static_ip_modify(struct beiscsi_hba *phba,
+ struct be_cmd_get_if_info_resp *if_info,
+ struct iscsi_iface_param_info *ip_param,
+ struct iscsi_iface_param_info *subnet_param,
+ uint32_t ip_action)
+{
+ struct be_cmd_set_ip_addr_req *req;
+ struct be_dma_mem nonemb_cmd;
+ uint32_t ip_type;
+ int rc;
+
+ rc = mgmt_alloc_cmd_data(phba, &nonemb_cmd,
+ OPCODE_COMMON_ISCSI_NTWK_MODIFY_IP_ADDR,
+ sizeof(*req));
+ if (rc)
+ return rc;
+
+ ip_type = (ip_param->param == ISCSI_NET_PARAM_IPV6_ADDR) ?
+ BE2_IPV6 : BE2_IPV4 ;
+
+ req = nonemb_cmd.va;
+ req->ip_params.record_entry_count = 1;
+ req->ip_params.ip_record.action = ip_action;
+ req->ip_params.ip_record.interface_hndl =
+ phba->interface_handle;
+ req->ip_params.ip_record.ip_addr.size_of_structure =
+ sizeof(struct be_ip_addr_subnet_format);
+ req->ip_params.ip_record.ip_addr.ip_type = ip_type;
+
+ if (ip_action == IP_ACTION_ADD) {
+ memcpy(req->ip_params.ip_record.ip_addr.addr, ip_param->value,
+ sizeof(req->ip_params.ip_record.ip_addr.addr));
+
+ if (subnet_param)
+ memcpy(req->ip_params.ip_record.ip_addr.subnet_mask,
+ subnet_param->value,
+ sizeof(req->ip_params.ip_record.ip_addr.subnet_mask));
+ } else {
+ memcpy(req->ip_params.ip_record.ip_addr.addr,
+ if_info->ip_addr.addr,
+ sizeof(req->ip_params.ip_record.ip_addr.addr));
+
+ memcpy(req->ip_params.ip_record.ip_addr.subnet_mask,
+ if_info->ip_addr.subnet_mask,
+ sizeof(req->ip_params.ip_record.ip_addr.subnet_mask));
+ }
+
+ rc = mgmt_exec_nonemb_cmd(phba, &nonemb_cmd, NULL, 0);
+ if (rc < 0)
+ beiscsi_log(phba, KERN_WARNING, BEISCSI_LOG_CONFIG,
+ "BG_%d : Failed to Modify existing IP Address\n");
+ return rc;
+}
+
+static int mgmt_modify_gateway(struct beiscsi_hba *phba, uint8_t *gt_addr,
+ uint32_t gtway_action, uint32_t param_len)
+{
+ struct be_cmd_set_def_gateway_req *req;
+ struct be_dma_mem nonemb_cmd;
+ int rt_val;
+
+
+ rt_val = mgmt_alloc_cmd_data(phba, &nonemb_cmd,
+ OPCODE_COMMON_ISCSI_NTWK_MODIFY_DEFAULT_GATEWAY,
+ sizeof(*req));
+ if (rt_val)
+ return rt_val;
+
+ req = nonemb_cmd.va;
+ req->action = gtway_action;
+ req->ip_addr.ip_type = BE2_IPV4;
+
+ memcpy(req->ip_addr.addr, gt_addr, sizeof(req->ip_addr.addr));
+
+ return mgmt_exec_nonemb_cmd(phba, &nonemb_cmd, NULL, 0);
+}
+
+int mgmt_set_ip(struct beiscsi_hba *phba,
+ struct iscsi_iface_param_info *ip_param,
+ struct iscsi_iface_param_info *subnet_param,
+ uint32_t boot_proto)
+{
+ struct be_cmd_get_def_gateway_resp gtway_addr_set;
+ struct be_cmd_get_if_info_resp *if_info;
+ struct be_cmd_set_dhcp_req *dhcpreq;
+ struct be_cmd_rel_dhcp_req *reldhcp;
+ struct be_dma_mem nonemb_cmd;
+ uint8_t *gtway_addr;
+ uint32_t ip_type;
+ int rc;
+
+ if (mgmt_get_all_if_id(phba))
+ return -EIO;
+
+ ip_type = (ip_param->param == ISCSI_NET_PARAM_IPV6_ADDR) ?
+ BE2_IPV6 : BE2_IPV4 ;
+
+ rc = mgmt_get_if_info(phba, ip_type, &if_info);
+ if (rc)
+ return rc;
+
+ if (boot_proto == ISCSI_BOOTPROTO_DHCP) {
+ if (if_info->dhcp_state) {
+ beiscsi_log(phba, KERN_WARNING, BEISCSI_LOG_CONFIG,
+ "BG_%d : DHCP Already Enabled\n");
+ goto exit;
+ }
+ /* The ip_param->len is 1 in DHCP case. Setting
+ proper IP len as this it is used while
+ freeing the Static IP.
+ */
+ ip_param->len = (ip_param->param == ISCSI_NET_PARAM_IPV6_ADDR) ?
+ IP_V6_LEN : IP_V4_LEN;
+
+ } else {
+ if (if_info->dhcp_state) {
+
+ memset(if_info, 0, sizeof(*if_info));
+ rc = mgmt_alloc_cmd_data(phba, &nonemb_cmd,
+ OPCODE_COMMON_ISCSI_NTWK_REL_STATELESS_IP_ADDR,
+ sizeof(*reldhcp));
+
+ if (rc)
+ goto exit;
+
+ reldhcp = nonemb_cmd.va;
+ reldhcp->interface_hndl = phba->interface_handle;
+ reldhcp->ip_type = ip_type;
+
+ rc = mgmt_exec_nonemb_cmd(phba, &nonemb_cmd, NULL, 0);
+ if (rc < 0) {
+ beiscsi_log(phba, KERN_WARNING,
+ BEISCSI_LOG_CONFIG,
+ "BG_%d : Failed to Delete existing dhcp\n");
+ goto exit;
+ }
+ }
+ }
+
+ /* Delete the Static IP Set */
+ if (if_info->ip_addr.addr[0]) {
+ rc = mgmt_static_ip_modify(phba, if_info, ip_param, NULL,
+ IP_ACTION_DEL);
+ if (rc)
+ goto exit;
+ }
+
+ /* Delete the Gateway settings if mode change is to DHCP */
+ if (boot_proto == ISCSI_BOOTPROTO_DHCP) {
+ memset(&gtway_addr_set, 0, sizeof(gtway_addr_set));
+ rc = mgmt_get_gateway(phba, BE2_IPV4, &gtway_addr_set);
+ if (rc) {
+ beiscsi_log(phba, KERN_WARNING, BEISCSI_LOG_CONFIG,
+ "BG_%d : Failed to Get Gateway Addr\n");
+ goto exit;
+ }
+
+ if (gtway_addr_set.ip_addr.addr[0]) {
+ gtway_addr = (uint8_t *)&gtway_addr_set.ip_addr.addr;
+ rc = mgmt_modify_gateway(phba, gtway_addr,
+ IP_ACTION_DEL, IP_V4_LEN);
+
+ if (rc) {
+ beiscsi_log(phba, KERN_WARNING,
+ BEISCSI_LOG_CONFIG,
+ "BG_%d : Failed to clear Gateway Addr Set\n");
+ goto exit;
+ }
+ }
+ }
+
+ /* Set Adapter to DHCP/Static Mode */
+ if (boot_proto == ISCSI_BOOTPROTO_DHCP) {
+ rc = mgmt_alloc_cmd_data(phba, &nonemb_cmd,
+ OPCODE_COMMON_ISCSI_NTWK_CONFIG_STATELESS_IP_ADDR,
+ sizeof(*dhcpreq));
+ if (rc)
+ goto exit;
+
+ dhcpreq = nonemb_cmd.va;
+ dhcpreq->flags = BLOCKING;
+ dhcpreq->retry_count = 1;
+ dhcpreq->interface_hndl = phba->interface_handle;
+ dhcpreq->ip_type = BE2_DHCP_V4;
+
+ rc = mgmt_exec_nonemb_cmd(phba, &nonemb_cmd, NULL, 0);
+ } else {
+ rc = mgmt_static_ip_modify(phba, if_info, ip_param,
+ subnet_param, IP_ACTION_ADD);
+ }
+
+exit:
+ kfree(if_info);
+ return rc;
+}
+
+int mgmt_set_gateway(struct beiscsi_hba *phba,
+ struct iscsi_iface_param_info *gateway_param)
+{
+ struct be_cmd_get_def_gateway_resp gtway_addr_set;
+ uint8_t *gtway_addr;
+ int rt_val;
+
+ memset(&gtway_addr_set, 0, sizeof(gtway_addr_set));
+ rt_val = mgmt_get_gateway(phba, BE2_IPV4, &gtway_addr_set);
+ if (rt_val) {
+ beiscsi_log(phba, KERN_WARNING, BEISCSI_LOG_CONFIG,
+ "BG_%d : Failed to Get Gateway Addr\n");
+ return rt_val;
+ }
+
+ if (gtway_addr_set.ip_addr.addr[0]) {
+ gtway_addr = (uint8_t *)&gtway_addr_set.ip_addr.addr;
+ rt_val = mgmt_modify_gateway(phba, gtway_addr, IP_ACTION_DEL,
+ gateway_param->len);
+ if (rt_val) {
+ beiscsi_log(phba, KERN_WARNING, BEISCSI_LOG_CONFIG,
+ "BG_%d : Failed to clear Gateway Addr Set\n");
+ return rt_val;
+ }
+ }
+
+ gtway_addr = (uint8_t *)&gateway_param->value;
+ rt_val = mgmt_modify_gateway(phba, gtway_addr, IP_ACTION_ADD,
+ gateway_param->len);
+
+ if (rt_val)
+ beiscsi_log(phba, KERN_WARNING, BEISCSI_LOG_CONFIG,
+ "BG_%d : Failed to Set Gateway Addr\n");
+
+ return rt_val;
+}
+
+int mgmt_get_gateway(struct beiscsi_hba *phba, int ip_type,
+ struct be_cmd_get_def_gateway_resp *gateway)
+{
+ struct be_cmd_get_def_gateway_req *req;
+ struct be_dma_mem nonemb_cmd;
+ int rc;
+
+ rc = mgmt_alloc_cmd_data(phba, &nonemb_cmd,
+ OPCODE_COMMON_ISCSI_NTWK_GET_DEFAULT_GATEWAY,
+ sizeof(*gateway));
+ if (rc)
+ return rc;
+
+ req = nonemb_cmd.va;
+ req->ip_type = ip_type;
+
+ return mgmt_exec_nonemb_cmd(phba, &nonemb_cmd, gateway,
+ sizeof(*gateway));
+}
+
+int mgmt_get_if_info(struct beiscsi_hba *phba, int ip_type,
+ struct be_cmd_get_if_info_resp **if_info)
+{
+ struct be_cmd_get_if_info_req *req;
+ struct be_dma_mem nonemb_cmd;
+ uint32_t ioctl_size = sizeof(struct be_cmd_get_if_info_resp);
+ int rc;
+
+ if (mgmt_get_all_if_id(phba))
+ return -EIO;
+
+ do {
+ rc = mgmt_alloc_cmd_data(phba, &nonemb_cmd,
+ OPCODE_COMMON_ISCSI_NTWK_GET_IF_INFO,
+ ioctl_size);
+ if (rc)
+ return rc;
+
+ req = nonemb_cmd.va;
+ req->interface_hndl = phba->interface_handle;
+ req->ip_type = ip_type;
+
+ /* Allocate memory for if_info */
+ *if_info = kzalloc(ioctl_size, GFP_KERNEL);
+ if (!*if_info) {
+ beiscsi_log(phba, KERN_ERR,
+ BEISCSI_LOG_INIT | BEISCSI_LOG_CONFIG,
+ "BG_%d : Memory Allocation Failure\n");
+
+ /* Free the DMA memory for the IOCTL issuing */
+ pci_free_consistent(phba->ctrl.pdev,
+ nonemb_cmd.size,
+ nonemb_cmd.va,
+ nonemb_cmd.dma);
+ return -ENOMEM;
+ }
+
+ rc = mgmt_exec_nonemb_cmd(phba, &nonemb_cmd, *if_info,
+ ioctl_size);
+
+ /* Check if the error is because of Insufficent_Buffer */
+ if (rc == -EAGAIN) {
+
+ /* Get the new memory size */
+ ioctl_size = ((struct be_cmd_resp_hdr *)
+ nonemb_cmd.va)->actual_resp_len;
+ ioctl_size += sizeof(struct be_cmd_req_hdr);
+
+ /* Free the previous allocated DMA memory */
+ pci_free_consistent(phba->ctrl.pdev, nonemb_cmd.size,
+ nonemb_cmd.va,
+ nonemb_cmd.dma);
+
+ /* Free the virtual memory */
+ kfree(*if_info);
+ } else
+ break;
+ } while (true);
+ return rc;
+}
+
+int mgmt_get_nic_conf(struct beiscsi_hba *phba,
+ struct be_cmd_get_nic_conf_resp *nic)
+{
+ struct be_dma_mem nonemb_cmd;
+ int rc;
+
+ rc = mgmt_alloc_cmd_data(phba, &nonemb_cmd,
+ OPCODE_COMMON_ISCSI_NTWK_GET_NIC_CONFIG,
+ sizeof(*nic));
+ if (rc)
+ return rc;
+
+ return mgmt_exec_nonemb_cmd(phba, &nonemb_cmd, nic, sizeof(*nic));
+}
+
+
+
+unsigned int be_cmd_get_initname(struct beiscsi_hba *phba)
+{
+ unsigned int tag = 0;
+ struct be_mcc_wrb *wrb;
+ struct be_cmd_hba_name *req;
+ struct be_ctrl_info *ctrl = &phba->ctrl;
+
+ spin_lock(&ctrl->mbox_lock);
+ tag = alloc_mcc_tag(phba);
+ if (!tag) {
+ spin_unlock(&ctrl->mbox_lock);
+ return tag;
+ }
+
+ wrb = wrb_from_mccq(phba);
+ req = embedded_payload(wrb);
+ wrb->tag0 |= tag;
+ be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0);
+ be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ISCSI_INI,
+ OPCODE_ISCSI_INI_CFG_GET_HBA_NAME,
+ sizeof(*req));
+
+ be_mcc_notify(phba);
+ spin_unlock(&ctrl->mbox_lock);
+ return tag;
+}
+
+unsigned int be_cmd_get_port_speed(struct beiscsi_hba *phba)
+{
+ unsigned int tag = 0;
+ struct be_mcc_wrb *wrb;
+ struct be_cmd_ntwk_link_status_req *req;
+ struct be_ctrl_info *ctrl = &phba->ctrl;
+
+ spin_lock(&ctrl->mbox_lock);
+ tag = alloc_mcc_tag(phba);
+ if (!tag) {
+ spin_unlock(&ctrl->mbox_lock);
+ return tag;
+ }
+
+ wrb = wrb_from_mccq(phba);
+ req = embedded_payload(wrb);
+ wrb->tag0 |= tag;
+ be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0);
+ be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
+ OPCODE_COMMON_NTWK_LINK_STATUS_QUERY,
+ sizeof(*req));
+
+ be_mcc_notify(phba);
+ spin_unlock(&ctrl->mbox_lock);
+ return tag;
+}
+
+/**
+ * be_mgmt_get_boot_shandle()- Get the session handle
+ * @phba: device priv structure instance
+ * @s_handle: session handle returned for boot session.
+ *
+ * Get the boot target session handle. In case of
+ * crashdump mode driver has to issue and MBX Cmd
+ * for FW to login to boot target
+ *
+ * return
+ * Success: 0
+ * Failure: Non-Zero value
+ *
+ **/
+int be_mgmt_get_boot_shandle(struct beiscsi_hba *phba,
+ unsigned int *s_handle)
+{
+ struct be_cmd_get_boot_target_resp *boot_resp;
+ struct be_mcc_wrb *wrb;
+ unsigned int tag;
+ uint8_t boot_retry = 3;
+ int rc;
+
+ do {
+ /* Get the Boot Target Session Handle and Count*/
+ tag = mgmt_get_boot_target(phba);
+ if (!tag) {
+ beiscsi_log(phba, KERN_ERR,
+ BEISCSI_LOG_CONFIG | BEISCSI_LOG_INIT,
+ "BG_%d : Getting Boot Target Info Failed\n");
+ return -EAGAIN;
+ }
+
+ rc = beiscsi_mccq_compl(phba, tag, &wrb, NULL);
+ if (rc) {
+ beiscsi_log(phba, KERN_ERR,
+ BEISCSI_LOG_INIT | BEISCSI_LOG_CONFIG,
+ "BG_%d : MBX CMD get_boot_target Failed\n");
+ return -EBUSY;
+ }
+
+ boot_resp = embedded_payload(wrb);
+
+ /* Check if the there are any Boot targets configured */
+ if (!boot_resp->boot_session_count) {
+ beiscsi_log(phba, KERN_INFO,
+ BEISCSI_LOG_INIT | BEISCSI_LOG_CONFIG,
+ "BG_%d ;No boot targets configured\n");
+ return -ENXIO;
+ }
+
+ /* FW returns the session handle of the boot session */
+ if (boot_resp->boot_session_handle != INVALID_SESS_HANDLE) {
+ *s_handle = boot_resp->boot_session_handle;
+ return 0;
+ }
+
+ /* Issue MBX Cmd to FW to login to the boot target */
+ tag = mgmt_reopen_session(phba, BE_REOPEN_BOOT_SESSIONS,
+ INVALID_SESS_HANDLE);
+ if (!tag) {
+ beiscsi_log(phba, KERN_ERR,
+ BEISCSI_LOG_INIT | BEISCSI_LOG_CONFIG,
+ "BG_%d : mgmt_reopen_session Failed\n");
+ return -EAGAIN;
+ }
+
+ rc = beiscsi_mccq_compl(phba, tag, NULL, NULL);
+ if (rc) {
+ beiscsi_log(phba, KERN_ERR,
+ BEISCSI_LOG_INIT | BEISCSI_LOG_CONFIG,
+ "BG_%d : mgmt_reopen_session Failed");
+ return rc;
+ }
+ } while (--boot_retry);
+
+ /* Couldn't log into the boot target */
+ beiscsi_log(phba, KERN_ERR,
+ BEISCSI_LOG_INIT | BEISCSI_LOG_CONFIG,
+ "BG_%d : Login to Boot Target Failed\n");
+ return -ENXIO;
+}
+
+/**
+ * mgmt_set_vlan()- Issue and wait for CMD completion
+ * @phba: device private structure instance
+ * @vlan_tag: VLAN tag
+ *
+ * Issue the MBX Cmd and wait for the completion of the
+ * command.
+ *
+ * returns
+ * Success: 0
+ * Failure: Non-Xero Value
+ **/
+int mgmt_set_vlan(struct beiscsi_hba *phba,
+ uint16_t vlan_tag)
+{
+ int rc;
+ unsigned int tag;
+
+ tag = be_cmd_set_vlan(phba, vlan_tag);
+ if (!tag) {
+ beiscsi_log(phba, KERN_ERR,
+ (BEISCSI_LOG_CONFIG | BEISCSI_LOG_MBOX),
+ "BG_%d : VLAN Setting Failed\n");
+ return -EBUSY;
+ }
+
+ rc = beiscsi_mccq_compl(phba, tag, NULL, NULL);
+ if (rc) {
+ beiscsi_log(phba, KERN_ERR,
+ (BEISCSI_LOG_CONFIG | BEISCSI_LOG_MBOX),
+ "BS_%d : VLAN MBX Cmd Failed\n");
+ return rc;
+ }
+ return rc;
+}
+
+/**
+ * beiscsi_drvr_ver_disp()- Display the driver Name and Version
+ * @dev: ptr to device not used.
+ * @attr: device attribute, not used.
+ * @buf: contains formatted text driver name and version
+ *
+ * return
+ * size of the formatted string
+ **/
+ssize_t
+beiscsi_drvr_ver_disp(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ return snprintf(buf, PAGE_SIZE, BE_NAME "\n");
+}
+
+/**
+ * beiscsi_fw_ver_disp()- Display Firmware Version
+ * @dev: ptr to device not used.
+ * @attr: device attribute, not used.
+ * @buf: contains formatted text Firmware version
+ *
+ * return
+ * size of the formatted string
+ **/
+ssize_t
+beiscsi_fw_ver_disp(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct Scsi_Host *shost = class_to_shost(dev);
+ struct beiscsi_hba *phba = iscsi_host_priv(shost);
+
+ return snprintf(buf, PAGE_SIZE, "%s\n", phba->fw_ver_str);
+}
+
+/**
+ * beiscsi_active_session_disp()- Display Sessions Active
+ * @dev: ptr to device not used.
+ * @attr: device attribute, not used.
+ * @buf: contains formatted text Session Count
+ *
+ * return
+ * size of the formatted string
+ **/
+ssize_t
+beiscsi_active_session_disp(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct Scsi_Host *shost = class_to_shost(dev);
+ struct beiscsi_hba *phba = iscsi_host_priv(shost);
+ uint16_t avlbl_cids = 0, ulp_num, len = 0, total_cids = 0;
+
+ for (ulp_num = 0; ulp_num < BEISCSI_ULP_COUNT; ulp_num++) {
+ if (test_bit(ulp_num, (void *)&phba->fw_config.ulp_supported)) {
+ avlbl_cids = BEISCSI_ULP_AVLBL_CID(phba, ulp_num);
+ total_cids = BEISCSI_GET_CID_COUNT(phba, ulp_num);
+ len += snprintf(buf+len, PAGE_SIZE - len,
+ "ULP%d : %d\n", ulp_num,
+ (total_cids - avlbl_cids));
+ } else
+ len += snprintf(buf+len, PAGE_SIZE - len,
+ "ULP%d : %d\n", ulp_num, 0);
+ }
+
+ return len;
+}
+
+/**
+ * beiscsi_free_session_disp()- Display Avaliable Session
+ * @dev: ptr to device not used.
+ * @attr: device attribute, not used.
+ * @buf: contains formatted text Session Count
+ *
+ * return
+ * size of the formatted string
+ **/
+ssize_t
+beiscsi_free_session_disp(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct Scsi_Host *shost = class_to_shost(dev);
+ struct beiscsi_hba *phba = iscsi_host_priv(shost);
+ uint16_t ulp_num, len = 0;
+
+ for (ulp_num = 0; ulp_num < BEISCSI_ULP_COUNT; ulp_num++) {
+ if (test_bit(ulp_num, (void *)&phba->fw_config.ulp_supported))
+ len += snprintf(buf+len, PAGE_SIZE - len,
+ "ULP%d : %d\n", ulp_num,
+ BEISCSI_ULP_AVLBL_CID(phba, ulp_num));
+ else
+ len += snprintf(buf+len, PAGE_SIZE - len,
+ "ULP%d : %d\n", ulp_num, 0);
+ }
+
+ return len;
+}
+
+/**
+ * beiscsi_adap_family_disp()- Display adapter family.
+ * @dev: ptr to device to get priv structure
+ * @attr: device attribute, not used.
+ * @buf: contains formatted text driver name and version
+ *
+ * return
+ * size of the formatted string
+ **/
+ssize_t
+beiscsi_adap_family_disp(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ uint16_t dev_id = 0;
+ struct Scsi_Host *shost = class_to_shost(dev);
+ struct beiscsi_hba *phba = iscsi_host_priv(shost);
+
+ dev_id = phba->pcidev->device;
+ switch (dev_id) {
+ case BE_DEVICE_ID1:
+ case OC_DEVICE_ID1:
+ case OC_DEVICE_ID2:
+ return snprintf(buf, PAGE_SIZE, "BE2 Adapter Family\n");
+ break;
+ case BE_DEVICE_ID2:
+ case OC_DEVICE_ID3:
+ return snprintf(buf, PAGE_SIZE, "BE3-R Adapter Family\n");
+ break;
+ case OC_SKH_ID1:
+ return snprintf(buf, PAGE_SIZE, "Skyhawk-R Adapter Family\n");
+ break;
+ default:
+ return snprintf(buf, PAGE_SIZE,
+ "Unknown Adapter Family: 0x%x\n", dev_id);
+ break;
+ }
+}
+
+/**
+ * beiscsi_phys_port()- Display Physical Port Identifier
+ * @dev: ptr to device not used.
+ * @attr: device attribute, not used.
+ * @buf: contains formatted text port identifier
+ *
+ * return
+ * size of the formatted string
+ **/
+ssize_t
+beiscsi_phys_port_disp(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct Scsi_Host *shost = class_to_shost(dev);
+ struct beiscsi_hba *phba = iscsi_host_priv(shost);
+
+ return snprintf(buf, PAGE_SIZE, "Port Identifier : %d\n",
+ phba->fw_config.phys_port);
+}
+
+void beiscsi_offload_cxn_v0(struct beiscsi_offload_params *params,
+ struct wrb_handle *pwrb_handle,
+ struct be_mem_descriptor *mem_descr)
+{
+ struct iscsi_wrb *pwrb = pwrb_handle->pwrb;
+
+ memset(pwrb, 0, sizeof(*pwrb));
+ AMAP_SET_BITS(struct amap_iscsi_target_context_update_wrb,
+ max_send_data_segment_length, pwrb,
+ params->dw[offsetof(struct amap_beiscsi_offload_params,
+ max_send_data_segment_length) / 32]);
+ AMAP_SET_BITS(struct amap_iscsi_target_context_update_wrb, type, pwrb,
+ BE_TGT_CTX_UPDT_CMD);
+ AMAP_SET_BITS(struct amap_iscsi_target_context_update_wrb,
+ first_burst_length,
+ pwrb,
+ params->dw[offsetof(struct amap_beiscsi_offload_params,
+ first_burst_length) / 32]);
+ AMAP_SET_BITS(struct amap_iscsi_target_context_update_wrb, erl, pwrb,
+ (params->dw[offsetof(struct amap_beiscsi_offload_params,
+ erl) / 32] & OFFLD_PARAMS_ERL));
+ AMAP_SET_BITS(struct amap_iscsi_target_context_update_wrb, dde, pwrb,
+ (params->dw[offsetof(struct amap_beiscsi_offload_params,
+ dde) / 32] & OFFLD_PARAMS_DDE) >> 2);
+ AMAP_SET_BITS(struct amap_iscsi_target_context_update_wrb, hde, pwrb,
+ (params->dw[offsetof(struct amap_beiscsi_offload_params,
+ hde) / 32] & OFFLD_PARAMS_HDE) >> 3);
+ AMAP_SET_BITS(struct amap_iscsi_target_context_update_wrb, ir2t, pwrb,
+ (params->dw[offsetof(struct amap_beiscsi_offload_params,
+ ir2t) / 32] & OFFLD_PARAMS_IR2T) >> 4);
+ AMAP_SET_BITS(struct amap_iscsi_target_context_update_wrb, imd, pwrb,
+ (params->dw[offsetof(struct amap_beiscsi_offload_params,
+ imd) / 32] & OFFLD_PARAMS_IMD) >> 5);
+ AMAP_SET_BITS(struct amap_iscsi_target_context_update_wrb, stat_sn,
+ pwrb,
+ (params->dw[offsetof(struct amap_beiscsi_offload_params,
+ exp_statsn) / 32] + 1));
+ AMAP_SET_BITS(struct amap_iscsi_target_context_update_wrb, wrb_idx,
+ pwrb, pwrb_handle->wrb_index);
+
+ AMAP_SET_BITS(struct amap_iscsi_target_context_update_wrb,
+ max_burst_length, pwrb, params->dw[offsetof
+ (struct amap_beiscsi_offload_params,
+ max_burst_length) / 32]);
+
+ AMAP_SET_BITS(struct amap_iscsi_target_context_update_wrb, ptr2nextwrb,
+ pwrb, pwrb_handle->nxt_wrb_index);
+ AMAP_SET_BITS(struct amap_iscsi_target_context_update_wrb,
+ session_state, pwrb, 0);
+ AMAP_SET_BITS(struct amap_iscsi_target_context_update_wrb, compltonack,
+ pwrb, 1);
+ AMAP_SET_BITS(struct amap_iscsi_target_context_update_wrb, notpredblq,
+ pwrb, 0);
+ AMAP_SET_BITS(struct amap_iscsi_target_context_update_wrb, mode, pwrb,
+ 0);
+
+ mem_descr += ISCSI_MEM_GLOBAL_HEADER;
+ AMAP_SET_BITS(struct amap_iscsi_target_context_update_wrb,
+ pad_buffer_addr_hi, pwrb,
+ mem_descr->mem_array[0].bus_address.u.a32.address_hi);
+ AMAP_SET_BITS(struct amap_iscsi_target_context_update_wrb,
+ pad_buffer_addr_lo, pwrb,
+ mem_descr->mem_array[0].bus_address.u.a32.address_lo);
+}
+
+void beiscsi_offload_cxn_v2(struct beiscsi_offload_params *params,
+ struct wrb_handle *pwrb_handle)
+{
+ struct iscsi_wrb *pwrb = pwrb_handle->pwrb;
+
+ memset(pwrb, 0, sizeof(*pwrb));
+
+ AMAP_SET_BITS(struct amap_iscsi_target_context_update_wrb_v2,
+ max_burst_length, pwrb, params->dw[offsetof
+ (struct amap_beiscsi_offload_params,
+ max_burst_length) / 32]);
+ AMAP_SET_BITS(struct amap_iscsi_target_context_update_wrb_v2,
+ type, pwrb,
+ BE_TGT_CTX_UPDT_CMD);
+ AMAP_SET_BITS(struct amap_iscsi_target_context_update_wrb_v2,
+ ptr2nextwrb,
+ pwrb, pwrb_handle->nxt_wrb_index);
+ AMAP_SET_BITS(struct amap_iscsi_target_context_update_wrb_v2, wrb_idx,
+ pwrb, pwrb_handle->wrb_index);
+ AMAP_SET_BITS(struct amap_iscsi_target_context_update_wrb_v2,
+ max_send_data_segment_length, pwrb,
+ params->dw[offsetof(struct amap_beiscsi_offload_params,
+ max_send_data_segment_length) / 32]);
+ AMAP_SET_BITS(struct amap_iscsi_target_context_update_wrb_v2,
+ first_burst_length, pwrb,
+ params->dw[offsetof(struct amap_beiscsi_offload_params,
+ first_burst_length) / 32]);
+ AMAP_SET_BITS(struct amap_iscsi_target_context_update_wrb_v2,
+ max_recv_dataseg_len, pwrb,
+ params->dw[offsetof(struct amap_beiscsi_offload_params,
+ max_recv_data_segment_length) / 32]);
+ AMAP_SET_BITS(struct amap_iscsi_target_context_update_wrb_v2,
+ max_cxns, pwrb, BEISCSI_MAX_CXNS);
+ AMAP_SET_BITS(struct amap_iscsi_target_context_update_wrb_v2, erl, pwrb,
+ (params->dw[offsetof(struct amap_beiscsi_offload_params,
+ erl) / 32] & OFFLD_PARAMS_ERL));
+ AMAP_SET_BITS(struct amap_iscsi_target_context_update_wrb_v2, dde, pwrb,
+ (params->dw[offsetof(struct amap_beiscsi_offload_params,
+ dde) / 32] & OFFLD_PARAMS_DDE) >> 2);
+ AMAP_SET_BITS(struct amap_iscsi_target_context_update_wrb_v2, hde, pwrb,
+ (params->dw[offsetof(struct amap_beiscsi_offload_params,
+ hde) / 32] & OFFLD_PARAMS_HDE) >> 3);
+ AMAP_SET_BITS(struct amap_iscsi_target_context_update_wrb_v2,
+ ir2t, pwrb,
+ (params->dw[offsetof(struct amap_beiscsi_offload_params,
+ ir2t) / 32] & OFFLD_PARAMS_IR2T) >> 4);
+ AMAP_SET_BITS(struct amap_iscsi_target_context_update_wrb_v2, imd, pwrb,
+ (params->dw[offsetof(struct amap_beiscsi_offload_params,
+ imd) / 32] & OFFLD_PARAMS_IMD) >> 5);
+ AMAP_SET_BITS(struct amap_iscsi_target_context_update_wrb_v2,
+ data_seq_inorder,
+ pwrb,
+ (params->dw[offsetof(struct amap_beiscsi_offload_params,
+ data_seq_inorder) / 32] &
+ OFFLD_PARAMS_DATA_SEQ_INORDER) >> 6);
+ AMAP_SET_BITS(struct amap_iscsi_target_context_update_wrb_v2,
+ pdu_seq_inorder,
+ pwrb,
+ (params->dw[offsetof(struct amap_beiscsi_offload_params,
+ pdu_seq_inorder) / 32] &
+ OFFLD_PARAMS_PDU_SEQ_INORDER) >> 7);
+ AMAP_SET_BITS(struct amap_iscsi_target_context_update_wrb_v2, max_r2t,
+ pwrb,
+ (params->dw[offsetof(struct amap_beiscsi_offload_params,
+ max_r2t) / 32] &
+ OFFLD_PARAMS_MAX_R2T) >> 8);
+ AMAP_SET_BITS(struct amap_iscsi_target_context_update_wrb_v2, stat_sn,
+ pwrb,
+ (params->dw[offsetof(struct amap_beiscsi_offload_params,
+ exp_statsn) / 32] + 1));
+}
diff --git a/drivers/scsi/be2iscsi/be_mgmt.h b/drivers/scsi/be2iscsi/be_mgmt.h
new file mode 100644
index 000000000..9356b9a86
--- /dev/null
+++ b/drivers/scsi/be2iscsi/be_mgmt.h
@@ -0,0 +1,341 @@
+/**
+ * Copyright (C) 2005 - 2015 Avago Technologies
+ * All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation. The full GNU General
+ * Public License is included in this distribution in the file called COPYING.
+ *
+ * Written by: Jayamohan Kallickal (jayamohan.kallickal@avagotech.com)
+ *
+ * Contact Information:
+ * linux-drivers@avagotech.com
+ *
+ * Avago Technologies
+ * 3333 Susan Street
+ * Costa Mesa, CA 92626
+ */
+
+#ifndef _BEISCSI_MGMT_
+#define _BEISCSI_MGMT_
+
+#include <scsi/scsi_bsg_iscsi.h>
+#include "be_iscsi.h"
+#include "be_main.h"
+
+#define IP_ACTION_ADD 0x01
+#define IP_ACTION_DEL 0x02
+
+#define IP_V6_LEN 16
+#define IP_V4_LEN 4
+
+/* UE Status and Mask register */
+#define PCICFG_UE_STATUS_LOW 0xA0
+#define PCICFG_UE_STATUS_HIGH 0xA4
+#define PCICFG_UE_STATUS_MASK_LOW 0xA8
+#define PCICFG_UE_STATUS_MASK_HI 0xAC
+
+/**
+ * Pseudo amap definition in which each bit of the actual structure is defined
+ * as a byte: used to calculate offset/shift/mask of each field
+ */
+struct amap_mcc_sge {
+ u8 pa_lo[32]; /* dword 0 */
+ u8 pa_hi[32]; /* dword 1 */
+ u8 length[32]; /* DWORD 2 */
+} __packed;
+
+/**
+ * Pseudo amap definition in which each bit of the actual structure is defined
+ * as a byte: used to calculate offset/shift/mask of each field
+ */
+struct amap_mcc_wrb_payload {
+ union {
+ struct amap_mcc_sge sgl[19];
+ u8 embedded[59 * 32]; /* DWORDS 57 to 115 */
+ } u;
+} __packed;
+
+/**
+ * Pseudo amap definition in which each bit of the actual structure is defined
+ * as a byte: used to calculate offset/shift/mask of each field
+ */
+struct amap_mcc_wrb {
+ u8 embedded; /* DWORD 0 */
+ u8 rsvd0[2]; /* DWORD 0 */
+ u8 sge_count[5]; /* DWORD 0 */
+ u8 rsvd1[16]; /* DWORD 0 */
+ u8 special[8]; /* DWORD 0 */
+ u8 payload_length[32];
+ u8 tag[64]; /* DWORD 2 */
+ u8 rsvd2[32]; /* DWORD 4 */
+ struct amap_mcc_wrb_payload payload;
+};
+
+struct mcc_sge {
+ u32 pa_lo; /* dword 0 */
+ u32 pa_hi; /* dword 1 */
+ u32 length; /* DWORD 2 */
+} __packed;
+
+struct mcc_wrb_payload {
+ union {
+ struct mcc_sge sgl[19];
+ u32 embedded[59]; /* DWORDS 57 to 115 */
+ } u;
+} __packed;
+
+#define MCC_WRB_EMBEDDED_MASK 0x00000001
+
+struct mcc_wrb {
+ u32 dw[0]; /* DWORD 0 */
+ u32 payload_length;
+ u32 tag[2]; /* DWORD 2 */
+ u32 rsvd2[1]; /* DWORD 4 */
+ struct mcc_wrb_payload payload;
+};
+
+int mgmt_epfw_cleanup(struct beiscsi_hba *phba, unsigned short chute);
+int mgmt_open_connection(struct beiscsi_hba *phba,
+ struct sockaddr *dst_addr,
+ struct beiscsi_endpoint *beiscsi_ep,
+ struct be_dma_mem *nonemb_cmd);
+
+unsigned int mgmt_upload_connection(struct beiscsi_hba *phba,
+ unsigned short cid,
+ unsigned int upload_flag);
+unsigned int mgmt_invalidate_icds(struct beiscsi_hba *phba,
+ struct invalidate_command_table *inv_tbl,
+ unsigned int num_invalidate, unsigned int cid,
+ struct be_dma_mem *nonemb_cmd);
+unsigned int mgmt_vendor_specific_fw_cmd(struct be_ctrl_info *ctrl,
+ struct beiscsi_hba *phba,
+ struct bsg_job *job,
+ struct be_dma_mem *nonemb_cmd);
+
+#define BEISCSI_NO_RST_ISSUE 0
+struct iscsi_invalidate_connection_params_in {
+ struct be_cmd_req_hdr hdr;
+ unsigned int session_handle;
+ unsigned short cid;
+ unsigned short unused;
+ unsigned short cleanup_type;
+ unsigned short save_cfg;
+} __packed;
+
+struct iscsi_invalidate_connection_params_out {
+ unsigned int session_handle;
+ unsigned short cid;
+ unsigned short unused;
+} __packed;
+
+union iscsi_invalidate_connection_params {
+ struct iscsi_invalidate_connection_params_in request;
+ struct iscsi_invalidate_connection_params_out response;
+} __packed;
+
+struct invalidate_commands_params_in {
+ struct be_cmd_req_hdr hdr;
+ unsigned int ref_handle;
+ unsigned int icd_count;
+ struct invalidate_command_table table[128];
+ unsigned short cleanup_type;
+ unsigned short unused;
+} __packed;
+
+struct invalidate_commands_params_out {
+ unsigned int ref_handle;
+ unsigned int icd_count;
+ unsigned int icd_status[128];
+} __packed;
+
+union invalidate_commands_params {
+ struct invalidate_commands_params_in request;
+ struct invalidate_commands_params_out response;
+} __packed;
+
+struct mgmt_hba_attributes {
+ u8 flashrom_version_string[BEISCSI_VER_STRLEN];
+ u8 manufacturer_name[BEISCSI_VER_STRLEN];
+ u32 supported_modes;
+ u8 seeprom_version_lo;
+ u8 seeprom_version_hi;
+ u8 rsvd0[2];
+ u32 fw_cmd_data_struct_version;
+ u32 ep_fw_data_struct_version;
+ u8 ncsi_version_string[12];
+ u32 default_extended_timeout;
+ u8 controller_model_number[BEISCSI_VER_STRLEN];
+ u8 controller_description[64];
+ u8 controller_serial_number[BEISCSI_VER_STRLEN];
+ u8 ip_version_string[BEISCSI_VER_STRLEN];
+ u8 firmware_version_string[BEISCSI_VER_STRLEN];
+ u8 bios_version_string[BEISCSI_VER_STRLEN];
+ u8 redboot_version_string[BEISCSI_VER_STRLEN];
+ u8 driver_version_string[BEISCSI_VER_STRLEN];
+ u8 fw_on_flash_version_string[BEISCSI_VER_STRLEN];
+ u32 functionalities_supported;
+ u16 max_cdblength;
+ u8 asic_revision;
+ u8 generational_guid[16];
+ u8 hba_port_count;
+ u16 default_link_down_timeout;
+ u8 iscsi_ver_min_max;
+ u8 multifunction_device;
+ u8 cache_valid;
+ u8 hba_status;
+ u8 max_domains_supported;
+ u8 phy_port;
+ u32 firmware_post_status;
+ u32 hba_mtu[8];
+ u8 iscsi_features;
+ u8 asic_generation;
+ u8 future_u8[2];
+ u32 future_u32[3];
+} __packed;
+
+struct mgmt_controller_attributes {
+ struct mgmt_hba_attributes hba_attribs;
+ u16 pci_vendor_id;
+ u16 pci_device_id;
+ u16 pci_sub_vendor_id;
+ u16 pci_sub_system_id;
+ u8 pci_bus_number;
+ u8 pci_device_number;
+ u8 pci_function_number;
+ u8 interface_type;
+ u64 unique_identifier;
+ u8 netfilters;
+ u8 rsvd0[3];
+ u32 future_u32[4];
+} __packed;
+
+struct be_mgmt_controller_attributes {
+ struct be_cmd_req_hdr hdr;
+ struct mgmt_controller_attributes params;
+} __packed;
+
+struct be_mgmt_controller_attributes_resp {
+ struct be_cmd_resp_hdr hdr;
+ struct mgmt_controller_attributes params;
+} __packed;
+
+struct be_bsg_vendor_cmd {
+ struct be_cmd_req_hdr hdr;
+ unsigned short region;
+ unsigned short offset;
+ unsigned short sector;
+} __packed;
+
+/* configuration management */
+
+#define GET_MGMT_CONTROLLER_WS(phba) (phba->pmgmt_ws)
+
+/* MGMT CMD flags */
+
+#define MGMT_CMDH_FREE (1<<0)
+
+/* --- MGMT_ERROR_CODES --- */
+/* Error Codes returned in the status field of the CMD response header */
+#define MGMT_STATUS_SUCCESS 0 /* The CMD completed without errors */
+#define MGMT_STATUS_FAILED 1 /* Error status in the Status field of */
+ /* the CMD_RESPONSE_HEADER */
+
+#define ISCSI_GET_PDU_TEMPLATE_ADDRESS(pc, pa) {\
+ pa->lo = phba->init_mem[ISCSI_MEM_GLOBAL_HEADER].mem_array[0].\
+ bus_address.u.a32.address_lo; \
+ pa->hi = phba->init_mem[ISCSI_MEM_GLOBAL_HEADER].mem_array[0].\
+ bus_address.u.a32.address_hi; \
+}
+
+#define BEISCSI_WRITE_FLASH 0
+#define BEISCSI_READ_FLASH 1
+
+struct beiscsi_endpoint {
+ struct beiscsi_hba *phba;
+ struct beiscsi_sess *sess;
+ struct beiscsi_conn *conn;
+ struct iscsi_endpoint *openiscsi_ep;
+ unsigned short ip_type;
+ char dst6_addr[ISCSI_ADDRESS_BUF_LEN];
+ unsigned long dst_addr;
+ unsigned short ep_cid;
+ unsigned int fw_handle;
+ u16 dst_tcpport;
+ u16 cid_vld;
+};
+
+int mgmt_get_fw_config(struct be_ctrl_info *ctrl,
+ struct beiscsi_hba *phba);
+
+unsigned int mgmt_invalidate_connection(struct beiscsi_hba *phba,
+ struct beiscsi_endpoint *beiscsi_ep,
+ unsigned short cid,
+ unsigned short issue_reset,
+ unsigned short savecfg_flag);
+
+int mgmt_set_ip(struct beiscsi_hba *phba,
+ struct iscsi_iface_param_info *ip_param,
+ struct iscsi_iface_param_info *subnet_param,
+ uint32_t boot_proto);
+
+unsigned int mgmt_get_boot_target(struct beiscsi_hba *phba);
+
+unsigned int mgmt_reopen_session(struct beiscsi_hba *phba,
+ unsigned int reopen_type,
+ unsigned sess_handle);
+
+unsigned int mgmt_get_session_info(struct beiscsi_hba *phba,
+ u32 boot_session_handle,
+ struct be_dma_mem *nonemb_cmd);
+
+int mgmt_get_nic_conf(struct beiscsi_hba *phba,
+ struct be_cmd_get_nic_conf_resp *mac);
+
+int mgmt_get_if_info(struct beiscsi_hba *phba, int ip_type,
+ struct be_cmd_get_if_info_resp **if_info);
+
+int mgmt_get_gateway(struct beiscsi_hba *phba, int ip_type,
+ struct be_cmd_get_def_gateway_resp *gateway);
+
+int mgmt_set_gateway(struct beiscsi_hba *phba,
+ struct iscsi_iface_param_info *gateway_param);
+
+int be_mgmt_get_boot_shandle(struct beiscsi_hba *phba,
+ unsigned int *s_handle);
+
+unsigned int mgmt_get_all_if_id(struct beiscsi_hba *phba);
+
+int mgmt_set_vlan(struct beiscsi_hba *phba, uint16_t vlan_tag);
+
+ssize_t beiscsi_drvr_ver_disp(struct device *dev,
+ struct device_attribute *attr, char *buf);
+
+ssize_t beiscsi_fw_ver_disp(struct device *dev,
+ struct device_attribute *attr, char *buf);
+
+ssize_t beiscsi_active_session_disp(struct device *dev,
+ struct device_attribute *attr, char *buf);
+
+ssize_t beiscsi_adap_family_disp(struct device *dev,
+ struct device_attribute *attr, char *buf);
+
+
+ssize_t beiscsi_free_session_disp(struct device *dev,
+ struct device_attribute *attr, char *buf);
+
+ssize_t beiscsi_phys_port_disp(struct device *dev,
+ struct device_attribute *attr, char *buf);
+
+void beiscsi_offload_cxn_v0(struct beiscsi_offload_params *params,
+ struct wrb_handle *pwrb_handle,
+ struct be_mem_descriptor *mem_descr);
+
+void beiscsi_offload_cxn_v2(struct beiscsi_offload_params *params,
+ struct wrb_handle *pwrb_handle);
+void beiscsi_ue_detect(struct beiscsi_hba *phba);
+int be_cmd_modify_eq_delay(struct beiscsi_hba *phba,
+ struct be_set_eqd *, int num);
+
+#endif
diff --git a/drivers/scsi/bfa/Makefile b/drivers/scsi/bfa/Makefile
new file mode 100644
index 000000000..475cf925d
--- /dev/null
+++ b/drivers/scsi/bfa/Makefile
@@ -0,0 +1,6 @@
+obj-$(CONFIG_SCSI_BFA_FC) := bfa.o
+
+bfa-y := bfad.o bfad_im.o bfad_attr.o bfad_debugfs.o bfad_bsg.o
+bfa-y += bfa_ioc.o bfa_ioc_cb.o bfa_ioc_ct.o bfa_hw_cb.o bfa_hw_ct.o
+bfa-y += bfa_fcs.o bfa_fcs_lport.o bfa_fcs_rport.o bfa_fcs_fcpim.o bfa_fcbuild.o
+bfa-y += bfa_port.o bfa_fcpim.o bfa_core.o bfa_svc.o
diff --git a/drivers/scsi/bfa/bfa.h b/drivers/scsi/bfa/bfa.h
new file mode 100644
index 000000000..4ad7e368b
--- /dev/null
+++ b/drivers/scsi/bfa/bfa.h
@@ -0,0 +1,448 @@
+/*
+ * Copyright (c) 2005-2010 Brocade Communications Systems, Inc.
+ * All rights reserved
+ * www.brocade.com
+ *
+ * Linux driver for Brocade Fibre Channel Host Bus Adapter.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License (GPL) Version 2 as
+ * published by the Free Software Foundation
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ */
+#ifndef __BFA_H__
+#define __BFA_H__
+
+#include "bfad_drv.h"
+#include "bfa_cs.h"
+#include "bfa_plog.h"
+#include "bfa_defs_svc.h"
+#include "bfi.h"
+#include "bfa_ioc.h"
+
+struct bfa_s;
+
+typedef void (*bfa_isr_func_t) (struct bfa_s *bfa, struct bfi_msg_s *m);
+typedef void (*bfa_cb_cbfn_status_t) (void *cbarg, bfa_status_t status);
+
+/*
+ * Interrupt message handlers
+ */
+void bfa_isr_unhandled(struct bfa_s *bfa, struct bfi_msg_s *m);
+
+/*
+ * Request and response queue related defines
+ */
+#define BFA_REQQ_NELEMS_MIN (4)
+#define BFA_RSPQ_NELEMS_MIN (4)
+
+#define bfa_reqq_pi(__bfa, __reqq) ((__bfa)->iocfc.req_cq_pi[__reqq])
+#define bfa_reqq_ci(__bfa, __reqq) \
+ (*(u32 *)((__bfa)->iocfc.req_cq_shadow_ci[__reqq].kva))
+
+#define bfa_reqq_full(__bfa, __reqq) \
+ (((bfa_reqq_pi(__bfa, __reqq) + 1) & \
+ ((__bfa)->iocfc.cfg.drvcfg.num_reqq_elems - 1)) == \
+ bfa_reqq_ci(__bfa, __reqq))
+
+#define bfa_reqq_next(__bfa, __reqq) \
+ (bfa_reqq_full(__bfa, __reqq) ? NULL : \
+ ((void *)((struct bfi_msg_s *)((__bfa)->iocfc.req_cq_ba[__reqq].kva) \
+ + bfa_reqq_pi((__bfa), (__reqq)))))
+
+#define bfa_reqq_produce(__bfa, __reqq, __mh) do { \
+ (__mh).mtag.h2i.qid = (__bfa)->iocfc.hw_qid[__reqq];\
+ (__bfa)->iocfc.req_cq_pi[__reqq]++; \
+ (__bfa)->iocfc.req_cq_pi[__reqq] &= \
+ ((__bfa)->iocfc.cfg.drvcfg.num_reqq_elems - 1); \
+ writel((__bfa)->iocfc.req_cq_pi[__reqq], \
+ (__bfa)->iocfc.bfa_regs.cpe_q_pi[__reqq]); \
+ mmiowb(); \
+ } while (0)
+
+#define bfa_rspq_pi(__bfa, __rspq) \
+ (*(u32 *)((__bfa)->iocfc.rsp_cq_shadow_pi[__rspq].kva))
+
+#define bfa_rspq_ci(__bfa, __rspq) ((__bfa)->iocfc.rsp_cq_ci[__rspq])
+#define bfa_rspq_elem(__bfa, __rspq, __ci) \
+ (&((struct bfi_msg_s *)((__bfa)->iocfc.rsp_cq_ba[__rspq].kva))[__ci])
+
+#define CQ_INCR(__index, __size) do { \
+ (__index)++; \
+ (__index) &= ((__size) - 1); \
+} while (0)
+
+/*
+ * Circular queue usage assignments
+ */
+enum {
+ BFA_REQQ_IOC = 0, /* all low-priority IOC msgs */
+ BFA_REQQ_FCXP = 0, /* all FCXP messages */
+ BFA_REQQ_LPS = 0, /* all lport service msgs */
+ BFA_REQQ_PORT = 0, /* all port messages */
+ BFA_REQQ_FLASH = 0, /* for flash module */
+ BFA_REQQ_DIAG = 0, /* for diag module */
+ BFA_REQQ_RPORT = 0, /* all port messages */
+ BFA_REQQ_SBOOT = 0, /* all san boot messages */
+ BFA_REQQ_QOS_LO = 1, /* all low priority IO */
+ BFA_REQQ_QOS_MD = 2, /* all medium priority IO */
+ BFA_REQQ_QOS_HI = 3, /* all high priority IO */
+};
+
+static inline void
+bfa_reqq_winit(struct bfa_reqq_wait_s *wqe, void (*qresume) (void *cbarg),
+ void *cbarg)
+{
+ wqe->qresume = qresume;
+ wqe->cbarg = cbarg;
+}
+
+#define bfa_reqq(__bfa, __reqq) (&(__bfa)->reqq_waitq[__reqq])
+
+/*
+ * static inline void
+ * bfa_reqq_wait(struct bfa_s *bfa, int reqq, struct bfa_reqq_wait_s *wqe)
+ */
+#define bfa_reqq_wait(__bfa, __reqq, __wqe) do { \
+ \
+ struct list_head *waitq = bfa_reqq(__bfa, __reqq); \
+ \
+ WARN_ON(((__reqq) >= BFI_IOC_MAX_CQS)); \
+ WARN_ON(!((__wqe)->qresume && (__wqe)->cbarg)); \
+ \
+ list_add_tail(&(__wqe)->qe, waitq); \
+ } while (0)
+
+#define bfa_reqq_wcancel(__wqe) list_del(&(__wqe)->qe)
+
+#define bfa_cb_queue(__bfa, __hcb_qe, __cbfn, __cbarg) do { \
+ (__hcb_qe)->cbfn = (__cbfn); \
+ (__hcb_qe)->cbarg = (__cbarg); \
+ (__hcb_qe)->pre_rmv = BFA_FALSE; \
+ list_add_tail(&(__hcb_qe)->qe, &(__bfa)->comp_q); \
+ } while (0)
+
+#define bfa_cb_dequeue(__hcb_qe) list_del(&(__hcb_qe)->qe)
+
+#define bfa_cb_queue_once(__bfa, __hcb_qe, __cbfn, __cbarg) do { \
+ (__hcb_qe)->cbfn = (__cbfn); \
+ (__hcb_qe)->cbarg = (__cbarg); \
+ if (!(__hcb_qe)->once) { \
+ list_add_tail(&(__hcb_qe)->qe, &(__bfa)->comp_q); \
+ (__hcb_qe)->once = BFA_TRUE; \
+ } \
+ } while (0)
+
+#define bfa_cb_queue_status(__bfa, __hcb_qe, __status) do { \
+ (__hcb_qe)->fw_status = (__status); \
+ list_add_tail(&(__hcb_qe)->qe, &(__bfa)->comp_q); \
+} while (0)
+
+#define bfa_cb_queue_done(__hcb_qe) do { \
+ (__hcb_qe)->once = BFA_FALSE; \
+ } while (0)
+
+
+/*
+ * PCI devices supported by the current BFA
+ */
+struct bfa_pciid_s {
+ u16 device_id;
+ u16 vendor_id;
+};
+
+extern char bfa_version[];
+
+struct bfa_iocfc_regs_s {
+ void __iomem *intr_status;
+ void __iomem *intr_mask;
+ void __iomem *cpe_q_pi[BFI_IOC_MAX_CQS];
+ void __iomem *cpe_q_ci[BFI_IOC_MAX_CQS];
+ void __iomem *cpe_q_ctrl[BFI_IOC_MAX_CQS];
+ void __iomem *rme_q_ci[BFI_IOC_MAX_CQS];
+ void __iomem *rme_q_pi[BFI_IOC_MAX_CQS];
+ void __iomem *rme_q_ctrl[BFI_IOC_MAX_CQS];
+};
+
+/*
+ * MSIX vector handlers
+ */
+#define BFA_MSIX_MAX_VECTORS 22
+typedef void (*bfa_msix_handler_t)(struct bfa_s *bfa, int vec);
+struct bfa_msix_s {
+ int nvecs;
+ bfa_msix_handler_t handler[BFA_MSIX_MAX_VECTORS];
+};
+
+/*
+ * Chip specific interfaces
+ */
+struct bfa_hwif_s {
+ void (*hw_reginit)(struct bfa_s *bfa);
+ void (*hw_reqq_ack)(struct bfa_s *bfa, int reqq);
+ void (*hw_rspq_ack)(struct bfa_s *bfa, int rspq, u32 ci);
+ void (*hw_msix_init)(struct bfa_s *bfa, int nvecs);
+ void (*hw_msix_ctrl_install)(struct bfa_s *bfa);
+ void (*hw_msix_queue_install)(struct bfa_s *bfa);
+ void (*hw_msix_uninstall)(struct bfa_s *bfa);
+ void (*hw_isr_mode_set)(struct bfa_s *bfa, bfa_boolean_t msix);
+ void (*hw_msix_getvecs)(struct bfa_s *bfa, u32 *vecmap,
+ u32 *nvecs, u32 *maxvec);
+ void (*hw_msix_get_rme_range) (struct bfa_s *bfa, u32 *start,
+ u32 *end);
+ int cpe_vec_q0;
+ int rme_vec_q0;
+};
+typedef void (*bfa_cb_iocfc_t) (void *cbarg, enum bfa_status status);
+
+struct bfa_faa_cbfn_s {
+ bfa_cb_iocfc_t faa_cbfn;
+ void *faa_cbarg;
+};
+
+#define BFA_FAA_ENABLED 1
+#define BFA_FAA_DISABLED 2
+
+/*
+ * FAA attributes
+ */
+struct bfa_faa_attr_s {
+ wwn_t faa;
+ u8 faa_state;
+ u8 pwwn_source;
+ u8 rsvd[6];
+};
+
+struct bfa_faa_args_s {
+ struct bfa_faa_attr_s *faa_attr;
+ struct bfa_faa_cbfn_s faa_cb;
+ u8 faa_state;
+ bfa_boolean_t busy;
+};
+
+struct bfa_iocfc_s {
+ bfa_fsm_t fsm;
+ struct bfa_s *bfa;
+ struct bfa_iocfc_cfg_s cfg;
+ u32 req_cq_pi[BFI_IOC_MAX_CQS];
+ u32 rsp_cq_ci[BFI_IOC_MAX_CQS];
+ u8 hw_qid[BFI_IOC_MAX_CQS];
+ struct bfa_cb_qe_s init_hcb_qe;
+ struct bfa_cb_qe_s stop_hcb_qe;
+ struct bfa_cb_qe_s dis_hcb_qe;
+ struct bfa_cb_qe_s en_hcb_qe;
+ struct bfa_cb_qe_s stats_hcb_qe;
+ bfa_boolean_t submod_enabled;
+ bfa_boolean_t cb_reqd; /* Driver call back reqd */
+ bfa_status_t op_status; /* Status of bfa iocfc op */
+
+ struct bfa_dma_s cfg_info;
+ struct bfi_iocfc_cfg_s *cfginfo;
+ struct bfa_dma_s cfgrsp_dma;
+ struct bfi_iocfc_cfgrsp_s *cfgrsp;
+ struct bfa_dma_s req_cq_ba[BFI_IOC_MAX_CQS];
+ struct bfa_dma_s req_cq_shadow_ci[BFI_IOC_MAX_CQS];
+ struct bfa_dma_s rsp_cq_ba[BFI_IOC_MAX_CQS];
+ struct bfa_dma_s rsp_cq_shadow_pi[BFI_IOC_MAX_CQS];
+ struct bfa_iocfc_regs_s bfa_regs; /* BFA device registers */
+ struct bfa_hwif_s hwif;
+ bfa_cb_iocfc_t updateq_cbfn; /* bios callback function */
+ void *updateq_cbarg; /* bios callback arg */
+ u32 intr_mask;
+ struct bfa_faa_args_s faa_args;
+ struct bfa_mem_dma_s ioc_dma;
+ struct bfa_mem_dma_s iocfc_dma;
+ struct bfa_mem_dma_s reqq_dma[BFI_IOC_MAX_CQS];
+ struct bfa_mem_dma_s rspq_dma[BFI_IOC_MAX_CQS];
+ struct bfa_mem_kva_s kva_seg;
+};
+
+#define BFA_MEM_IOC_DMA(_bfa) (&((_bfa)->iocfc.ioc_dma))
+#define BFA_MEM_IOCFC_DMA(_bfa) (&((_bfa)->iocfc.iocfc_dma))
+#define BFA_MEM_REQQ_DMA(_bfa, _qno) (&((_bfa)->iocfc.reqq_dma[(_qno)]))
+#define BFA_MEM_RSPQ_DMA(_bfa, _qno) (&((_bfa)->iocfc.rspq_dma[(_qno)]))
+#define BFA_MEM_IOCFC_KVA(_bfa) (&((_bfa)->iocfc.kva_seg))
+
+#define bfa_fn_lpu(__bfa) \
+ bfi_fn_lpu(bfa_ioc_pcifn(&(__bfa)->ioc), bfa_ioc_portid(&(__bfa)->ioc))
+#define bfa_msix_init(__bfa, __nvecs) \
+ ((__bfa)->iocfc.hwif.hw_msix_init(__bfa, __nvecs))
+#define bfa_msix_ctrl_install(__bfa) \
+ ((__bfa)->iocfc.hwif.hw_msix_ctrl_install(__bfa))
+#define bfa_msix_queue_install(__bfa) \
+ ((__bfa)->iocfc.hwif.hw_msix_queue_install(__bfa))
+#define bfa_msix_uninstall(__bfa) \
+ ((__bfa)->iocfc.hwif.hw_msix_uninstall(__bfa))
+#define bfa_isr_rspq_ack(__bfa, __queue, __ci) \
+ ((__bfa)->iocfc.hwif.hw_rspq_ack(__bfa, __queue, __ci))
+#define bfa_isr_reqq_ack(__bfa, __queue) do { \
+ if ((__bfa)->iocfc.hwif.hw_reqq_ack) \
+ (__bfa)->iocfc.hwif.hw_reqq_ack(__bfa, __queue); \
+} while (0)
+#define bfa_isr_mode_set(__bfa, __msix) do { \
+ if ((__bfa)->iocfc.hwif.hw_isr_mode_set) \
+ (__bfa)->iocfc.hwif.hw_isr_mode_set(__bfa, __msix); \
+} while (0)
+#define bfa_msix_getvecs(__bfa, __vecmap, __nvecs, __maxvec) \
+ ((__bfa)->iocfc.hwif.hw_msix_getvecs(__bfa, __vecmap, \
+ __nvecs, __maxvec))
+#define bfa_msix_get_rme_range(__bfa, __start, __end) \
+ ((__bfa)->iocfc.hwif.hw_msix_get_rme_range(__bfa, __start, __end))
+#define bfa_msix(__bfa, __vec) \
+ ((__bfa)->msix.handler[__vec](__bfa, __vec))
+
+/*
+ * FC specific IOC functions.
+ */
+void bfa_iocfc_meminfo(struct bfa_iocfc_cfg_s *cfg,
+ struct bfa_meminfo_s *meminfo,
+ struct bfa_s *bfa);
+void bfa_iocfc_attach(struct bfa_s *bfa, void *bfad,
+ struct bfa_iocfc_cfg_s *cfg,
+ struct bfa_pcidev_s *pcidev);
+void bfa_iocfc_init(struct bfa_s *bfa);
+void bfa_iocfc_start(struct bfa_s *bfa);
+void bfa_iocfc_stop(struct bfa_s *bfa);
+void bfa_iocfc_isr(void *bfa, struct bfi_mbmsg_s *msg);
+void bfa_iocfc_set_snsbase(struct bfa_s *bfa, int seg_no, u64 snsbase_pa);
+bfa_boolean_t bfa_iocfc_is_operational(struct bfa_s *bfa);
+void bfa_iocfc_reset_queues(struct bfa_s *bfa);
+
+void bfa_msix_all(struct bfa_s *bfa, int vec);
+void bfa_msix_reqq(struct bfa_s *bfa, int vec);
+void bfa_msix_rspq(struct bfa_s *bfa, int vec);
+void bfa_msix_lpu_err(struct bfa_s *bfa, int vec);
+
+void bfa_hwcb_reginit(struct bfa_s *bfa);
+void bfa_hwcb_rspq_ack(struct bfa_s *bfa, int rspq, u32 ci);
+void bfa_hwcb_msix_init(struct bfa_s *bfa, int nvecs);
+void bfa_hwcb_msix_ctrl_install(struct bfa_s *bfa);
+void bfa_hwcb_msix_queue_install(struct bfa_s *bfa);
+void bfa_hwcb_msix_uninstall(struct bfa_s *bfa);
+void bfa_hwcb_isr_mode_set(struct bfa_s *bfa, bfa_boolean_t msix);
+void bfa_hwcb_msix_getvecs(struct bfa_s *bfa, u32 *vecmap, u32 *nvecs,
+ u32 *maxvec);
+void bfa_hwcb_msix_get_rme_range(struct bfa_s *bfa, u32 *start,
+ u32 *end);
+void bfa_hwct_reginit(struct bfa_s *bfa);
+void bfa_hwct2_reginit(struct bfa_s *bfa);
+void bfa_hwct_reqq_ack(struct bfa_s *bfa, int rspq);
+void bfa_hwct_rspq_ack(struct bfa_s *bfa, int rspq, u32 ci);
+void bfa_hwct2_rspq_ack(struct bfa_s *bfa, int rspq, u32 ci);
+void bfa_hwct_msix_init(struct bfa_s *bfa, int nvecs);
+void bfa_hwct_msix_ctrl_install(struct bfa_s *bfa);
+void bfa_hwct_msix_queue_install(struct bfa_s *bfa);
+void bfa_hwct_msix_uninstall(struct bfa_s *bfa);
+void bfa_hwct_isr_mode_set(struct bfa_s *bfa, bfa_boolean_t msix);
+void bfa_hwct_msix_getvecs(struct bfa_s *bfa, u32 *vecmap, u32 *nvecs,
+ u32 *maxvec);
+void bfa_hwct_msix_get_rme_range(struct bfa_s *bfa, u32 *start,
+ u32 *end);
+void bfa_iocfc_get_bootwwns(struct bfa_s *bfa, u8 *nwwns, wwn_t *wwns);
+int bfa_iocfc_get_pbc_vports(struct bfa_s *bfa,
+ struct bfi_pbc_vport_s *pbc_vport);
+
+
+/*
+ *----------------------------------------------------------------------
+ * BFA public interfaces
+ *----------------------------------------------------------------------
+ */
+#define bfa_stats(_mod, _stats) ((_mod)->stats._stats++)
+#define bfa_ioc_get_stats(__bfa, __ioc_stats) \
+ bfa_ioc_fetch_stats(&(__bfa)->ioc, __ioc_stats)
+#define bfa_ioc_clear_stats(__bfa) \
+ bfa_ioc_clr_stats(&(__bfa)->ioc)
+#define bfa_get_nports(__bfa) \
+ bfa_ioc_get_nports(&(__bfa)->ioc)
+#define bfa_get_adapter_manufacturer(__bfa, __manufacturer) \
+ bfa_ioc_get_adapter_manufacturer(&(__bfa)->ioc, __manufacturer)
+#define bfa_get_adapter_model(__bfa, __model) \
+ bfa_ioc_get_adapter_model(&(__bfa)->ioc, __model)
+#define bfa_get_adapter_serial_num(__bfa, __serial_num) \
+ bfa_ioc_get_adapter_serial_num(&(__bfa)->ioc, __serial_num)
+#define bfa_get_adapter_fw_ver(__bfa, __fw_ver) \
+ bfa_ioc_get_adapter_fw_ver(&(__bfa)->ioc, __fw_ver)
+#define bfa_get_adapter_optrom_ver(__bfa, __optrom_ver) \
+ bfa_ioc_get_adapter_optrom_ver(&(__bfa)->ioc, __optrom_ver)
+#define bfa_get_pci_chip_rev(__bfa, __chip_rev) \
+ bfa_ioc_get_pci_chip_rev(&(__bfa)->ioc, __chip_rev)
+#define bfa_get_ioc_state(__bfa) \
+ bfa_ioc_get_state(&(__bfa)->ioc)
+#define bfa_get_type(__bfa) \
+ bfa_ioc_get_type(&(__bfa)->ioc)
+#define bfa_get_mac(__bfa) \
+ bfa_ioc_get_mac(&(__bfa)->ioc)
+#define bfa_get_mfg_mac(__bfa) \
+ bfa_ioc_get_mfg_mac(&(__bfa)->ioc)
+#define bfa_get_fw_clock_res(__bfa) \
+ ((__bfa)->iocfc.cfgrsp->fwcfg.fw_tick_res)
+
+/*
+ * lun mask macros return NULL when min cfg is enabled and there is
+ * no memory allocated for lunmask.
+ */
+#define bfa_get_lun_mask(__bfa) \
+ ((&(__bfa)->modules.dconf_mod)->min_cfg) ? NULL : \
+ (&(BFA_DCONF_MOD(__bfa)->dconf->lun_mask))
+
+#define bfa_get_lun_mask_list(_bfa) \
+ ((&(_bfa)->modules.dconf_mod)->min_cfg) ? NULL : \
+ (bfa_get_lun_mask(_bfa)->lun_list)
+
+#define bfa_get_lun_mask_status(_bfa) \
+ (((&(_bfa)->modules.dconf_mod)->min_cfg) \
+ ? BFA_LUNMASK_MINCFG : ((bfa_get_lun_mask(_bfa))->status))
+
+void bfa_get_pciids(struct bfa_pciid_s **pciids, int *npciids);
+void bfa_cfg_get_default(struct bfa_iocfc_cfg_s *cfg);
+void bfa_cfg_get_min(struct bfa_iocfc_cfg_s *cfg);
+void bfa_cfg_get_meminfo(struct bfa_iocfc_cfg_s *cfg,
+ struct bfa_meminfo_s *meminfo,
+ struct bfa_s *bfa);
+void bfa_attach(struct bfa_s *bfa, void *bfad, struct bfa_iocfc_cfg_s *cfg,
+ struct bfa_meminfo_s *meminfo,
+ struct bfa_pcidev_s *pcidev);
+void bfa_detach(struct bfa_s *bfa);
+void bfa_cb_init(void *bfad, bfa_status_t status);
+void bfa_cb_updateq(void *bfad, bfa_status_t status);
+
+bfa_boolean_t bfa_intx(struct bfa_s *bfa);
+void bfa_isr_enable(struct bfa_s *bfa);
+void bfa_isr_disable(struct bfa_s *bfa);
+
+void bfa_comp_deq(struct bfa_s *bfa, struct list_head *comp_q);
+void bfa_comp_process(struct bfa_s *bfa, struct list_head *comp_q);
+void bfa_comp_free(struct bfa_s *bfa, struct list_head *comp_q);
+
+typedef void (*bfa_cb_ioc_t) (void *cbarg, enum bfa_status status);
+void bfa_iocfc_get_attr(struct bfa_s *bfa, struct bfa_iocfc_attr_s *attr);
+
+
+bfa_status_t bfa_iocfc_israttr_set(struct bfa_s *bfa,
+ struct bfa_iocfc_intr_attr_s *attr);
+
+void bfa_iocfc_enable(struct bfa_s *bfa);
+void bfa_iocfc_disable(struct bfa_s *bfa);
+#define bfa_timer_start(_bfa, _timer, _timercb, _arg, _timeout) \
+ bfa_timer_begin(&(_bfa)->timer_mod, _timer, _timercb, _arg, _timeout)
+
+struct bfa_cb_pending_q_s {
+ struct bfa_cb_qe_s hcb_qe;
+ void *data; /* Driver buffer */
+};
+
+/* Common macros to operate on pending stats/attr apis */
+#define bfa_pending_q_init(__qe, __cbfn, __cbarg, __data) do { \
+ bfa_q_qe_init(&((__qe)->hcb_qe.qe)); \
+ (__qe)->hcb_qe.cbfn = (__cbfn); \
+ (__qe)->hcb_qe.cbarg = (__cbarg); \
+ (__qe)->hcb_qe.pre_rmv = BFA_TRUE; \
+ (__qe)->data = (__data); \
+} while (0)
+
+#endif /* __BFA_H__ */
diff --git a/drivers/scsi/bfa/bfa_core.c b/drivers/scsi/bfa/bfa_core.c
new file mode 100644
index 000000000..e3f67b097
--- /dev/null
+++ b/drivers/scsi/bfa/bfa_core.c
@@ -0,0 +1,2000 @@
+/*
+ * Copyright (c) 2005-2010 Brocade Communications Systems, Inc.
+ * All rights reserved
+ * www.brocade.com
+ *
+ * Linux driver for Brocade Fibre Channel Host Bus Adapter.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License (GPL) Version 2 as
+ * published by the Free Software Foundation
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ */
+
+#include "bfad_drv.h"
+#include "bfa_modules.h"
+#include "bfi_reg.h"
+
+BFA_TRC_FILE(HAL, CORE);
+
+/*
+ * BFA module list terminated by NULL
+ */
+static struct bfa_module_s *hal_mods[] = {
+ &hal_mod_fcdiag,
+ &hal_mod_sgpg,
+ &hal_mod_fcport,
+ &hal_mod_fcxp,
+ &hal_mod_lps,
+ &hal_mod_uf,
+ &hal_mod_rport,
+ &hal_mod_fcp,
+ &hal_mod_dconf,
+ NULL
+};
+
+/*
+ * Message handlers for various modules.
+ */
+static bfa_isr_func_t bfa_isrs[BFI_MC_MAX] = {
+ bfa_isr_unhandled, /* NONE */
+ bfa_isr_unhandled, /* BFI_MC_IOC */
+ bfa_fcdiag_intr, /* BFI_MC_DIAG */
+ bfa_isr_unhandled, /* BFI_MC_FLASH */
+ bfa_isr_unhandled, /* BFI_MC_CEE */
+ bfa_fcport_isr, /* BFI_MC_FCPORT */
+ bfa_isr_unhandled, /* BFI_MC_IOCFC */
+ bfa_isr_unhandled, /* BFI_MC_LL */
+ bfa_uf_isr, /* BFI_MC_UF */
+ bfa_fcxp_isr, /* BFI_MC_FCXP */
+ bfa_lps_isr, /* BFI_MC_LPS */
+ bfa_rport_isr, /* BFI_MC_RPORT */
+ bfa_itn_isr, /* BFI_MC_ITN */
+ bfa_isr_unhandled, /* BFI_MC_IOIM_READ */
+ bfa_isr_unhandled, /* BFI_MC_IOIM_WRITE */
+ bfa_isr_unhandled, /* BFI_MC_IOIM_IO */
+ bfa_ioim_isr, /* BFI_MC_IOIM */
+ bfa_ioim_good_comp_isr, /* BFI_MC_IOIM_IOCOM */
+ bfa_tskim_isr, /* BFI_MC_TSKIM */
+ bfa_isr_unhandled, /* BFI_MC_SBOOT */
+ bfa_isr_unhandled, /* BFI_MC_IPFC */
+ bfa_isr_unhandled, /* BFI_MC_PORT */
+ bfa_isr_unhandled, /* --------- */
+ bfa_isr_unhandled, /* --------- */
+ bfa_isr_unhandled, /* --------- */
+ bfa_isr_unhandled, /* --------- */
+ bfa_isr_unhandled, /* --------- */
+ bfa_isr_unhandled, /* --------- */
+ bfa_isr_unhandled, /* --------- */
+ bfa_isr_unhandled, /* --------- */
+ bfa_isr_unhandled, /* --------- */
+ bfa_isr_unhandled, /* --------- */
+};
+/*
+ * Message handlers for mailbox command classes
+ */
+static bfa_ioc_mbox_mcfunc_t bfa_mbox_isrs[BFI_MC_MAX] = {
+ NULL,
+ NULL, /* BFI_MC_IOC */
+ NULL, /* BFI_MC_DIAG */
+ NULL, /* BFI_MC_FLASH */
+ NULL, /* BFI_MC_CEE */
+ NULL, /* BFI_MC_PORT */
+ bfa_iocfc_isr, /* BFI_MC_IOCFC */
+ NULL,
+};
+
+
+
+static void
+bfa_com_port_attach(struct bfa_s *bfa)
+{
+ struct bfa_port_s *port = &bfa->modules.port;
+ struct bfa_mem_dma_s *port_dma = BFA_MEM_PORT_DMA(bfa);
+
+ bfa_port_attach(port, &bfa->ioc, bfa, bfa->trcmod);
+ bfa_port_mem_claim(port, port_dma->kva_curp, port_dma->dma_curp);
+}
+
+/*
+ * ablk module attach
+ */
+static void
+bfa_com_ablk_attach(struct bfa_s *bfa)
+{
+ struct bfa_ablk_s *ablk = &bfa->modules.ablk;
+ struct bfa_mem_dma_s *ablk_dma = BFA_MEM_ABLK_DMA(bfa);
+
+ bfa_ablk_attach(ablk, &bfa->ioc);
+ bfa_ablk_memclaim(ablk, ablk_dma->kva_curp, ablk_dma->dma_curp);
+}
+
+static void
+bfa_com_cee_attach(struct bfa_s *bfa)
+{
+ struct bfa_cee_s *cee = &bfa->modules.cee;
+ struct bfa_mem_dma_s *cee_dma = BFA_MEM_CEE_DMA(bfa);
+
+ cee->trcmod = bfa->trcmod;
+ bfa_cee_attach(cee, &bfa->ioc, bfa);
+ bfa_cee_mem_claim(cee, cee_dma->kva_curp, cee_dma->dma_curp);
+}
+
+static void
+bfa_com_sfp_attach(struct bfa_s *bfa)
+{
+ struct bfa_sfp_s *sfp = BFA_SFP_MOD(bfa);
+ struct bfa_mem_dma_s *sfp_dma = BFA_MEM_SFP_DMA(bfa);
+
+ bfa_sfp_attach(sfp, &bfa->ioc, bfa, bfa->trcmod);
+ bfa_sfp_memclaim(sfp, sfp_dma->kva_curp, sfp_dma->dma_curp);
+}
+
+static void
+bfa_com_flash_attach(struct bfa_s *bfa, bfa_boolean_t mincfg)
+{
+ struct bfa_flash_s *flash = BFA_FLASH(bfa);
+ struct bfa_mem_dma_s *flash_dma = BFA_MEM_FLASH_DMA(bfa);
+
+ bfa_flash_attach(flash, &bfa->ioc, bfa, bfa->trcmod, mincfg);
+ bfa_flash_memclaim(flash, flash_dma->kva_curp,
+ flash_dma->dma_curp, mincfg);
+}
+
+static void
+bfa_com_diag_attach(struct bfa_s *bfa)
+{
+ struct bfa_diag_s *diag = BFA_DIAG_MOD(bfa);
+ struct bfa_mem_dma_s *diag_dma = BFA_MEM_DIAG_DMA(bfa);
+
+ bfa_diag_attach(diag, &bfa->ioc, bfa, bfa_fcport_beacon, bfa->trcmod);
+ bfa_diag_memclaim(diag, diag_dma->kva_curp, diag_dma->dma_curp);
+}
+
+static void
+bfa_com_phy_attach(struct bfa_s *bfa, bfa_boolean_t mincfg)
+{
+ struct bfa_phy_s *phy = BFA_PHY(bfa);
+ struct bfa_mem_dma_s *phy_dma = BFA_MEM_PHY_DMA(bfa);
+
+ bfa_phy_attach(phy, &bfa->ioc, bfa, bfa->trcmod, mincfg);
+ bfa_phy_memclaim(phy, phy_dma->kva_curp, phy_dma->dma_curp, mincfg);
+}
+
+static void
+bfa_com_fru_attach(struct bfa_s *bfa, bfa_boolean_t mincfg)
+{
+ struct bfa_fru_s *fru = BFA_FRU(bfa);
+ struct bfa_mem_dma_s *fru_dma = BFA_MEM_FRU_DMA(bfa);
+
+ bfa_fru_attach(fru, &bfa->ioc, bfa, bfa->trcmod, mincfg);
+ bfa_fru_memclaim(fru, fru_dma->kva_curp, fru_dma->dma_curp, mincfg);
+}
+
+/*
+ * BFA IOC FC related definitions
+ */
+
+/*
+ * IOC local definitions
+ */
+#define BFA_IOCFC_TOV 5000 /* msecs */
+
+enum {
+ BFA_IOCFC_ACT_NONE = 0,
+ BFA_IOCFC_ACT_INIT = 1,
+ BFA_IOCFC_ACT_STOP = 2,
+ BFA_IOCFC_ACT_DISABLE = 3,
+ BFA_IOCFC_ACT_ENABLE = 4,
+};
+
+#define DEF_CFG_NUM_FABRICS 1
+#define DEF_CFG_NUM_LPORTS 256
+#define DEF_CFG_NUM_CQS 4
+#define DEF_CFG_NUM_IOIM_REQS (BFA_IOIM_MAX)
+#define DEF_CFG_NUM_TSKIM_REQS 128
+#define DEF_CFG_NUM_FCXP_REQS 64
+#define DEF_CFG_NUM_UF_BUFS 64
+#define DEF_CFG_NUM_RPORTS 1024
+#define DEF_CFG_NUM_ITNIMS (DEF_CFG_NUM_RPORTS)
+#define DEF_CFG_NUM_TINS 256
+
+#define DEF_CFG_NUM_SGPGS 2048
+#define DEF_CFG_NUM_REQQ_ELEMS 256
+#define DEF_CFG_NUM_RSPQ_ELEMS 64
+#define DEF_CFG_NUM_SBOOT_TGTS 16
+#define DEF_CFG_NUM_SBOOT_LUNS 16
+
+/*
+ * IOCFC state machine definitions/declarations
+ */
+bfa_fsm_state_decl(bfa_iocfc, stopped, struct bfa_iocfc_s, enum iocfc_event);
+bfa_fsm_state_decl(bfa_iocfc, initing, struct bfa_iocfc_s, enum iocfc_event);
+bfa_fsm_state_decl(bfa_iocfc, dconf_read, struct bfa_iocfc_s, enum iocfc_event);
+bfa_fsm_state_decl(bfa_iocfc, init_cfg_wait,
+ struct bfa_iocfc_s, enum iocfc_event);
+bfa_fsm_state_decl(bfa_iocfc, init_cfg_done,
+ struct bfa_iocfc_s, enum iocfc_event);
+bfa_fsm_state_decl(bfa_iocfc, operational,
+ struct bfa_iocfc_s, enum iocfc_event);
+bfa_fsm_state_decl(bfa_iocfc, dconf_write,
+ struct bfa_iocfc_s, enum iocfc_event);
+bfa_fsm_state_decl(bfa_iocfc, stopping, struct bfa_iocfc_s, enum iocfc_event);
+bfa_fsm_state_decl(bfa_iocfc, enabling, struct bfa_iocfc_s, enum iocfc_event);
+bfa_fsm_state_decl(bfa_iocfc, cfg_wait, struct bfa_iocfc_s, enum iocfc_event);
+bfa_fsm_state_decl(bfa_iocfc, disabling, struct bfa_iocfc_s, enum iocfc_event);
+bfa_fsm_state_decl(bfa_iocfc, disabled, struct bfa_iocfc_s, enum iocfc_event);
+bfa_fsm_state_decl(bfa_iocfc, failed, struct bfa_iocfc_s, enum iocfc_event);
+bfa_fsm_state_decl(bfa_iocfc, init_failed,
+ struct bfa_iocfc_s, enum iocfc_event);
+
+/*
+ * forward declaration for IOC FC functions
+ */
+static void bfa_iocfc_start_submod(struct bfa_s *bfa);
+static void bfa_iocfc_disable_submod(struct bfa_s *bfa);
+static void bfa_iocfc_send_cfg(void *bfa_arg);
+static void bfa_iocfc_enable_cbfn(void *bfa_arg, enum bfa_status status);
+static void bfa_iocfc_disable_cbfn(void *bfa_arg);
+static void bfa_iocfc_hbfail_cbfn(void *bfa_arg);
+static void bfa_iocfc_reset_cbfn(void *bfa_arg);
+static struct bfa_ioc_cbfn_s bfa_iocfc_cbfn;
+static void bfa_iocfc_init_cb(void *bfa_arg, bfa_boolean_t complete);
+static void bfa_iocfc_stop_cb(void *bfa_arg, bfa_boolean_t compl);
+static void bfa_iocfc_enable_cb(void *bfa_arg, bfa_boolean_t compl);
+static void bfa_iocfc_disable_cb(void *bfa_arg, bfa_boolean_t compl);
+
+static void
+bfa_iocfc_sm_stopped_entry(struct bfa_iocfc_s *iocfc)
+{
+}
+
+static void
+bfa_iocfc_sm_stopped(struct bfa_iocfc_s *iocfc, enum iocfc_event event)
+{
+ bfa_trc(iocfc->bfa, event);
+
+ switch (event) {
+ case IOCFC_E_INIT:
+ case IOCFC_E_ENABLE:
+ bfa_fsm_set_state(iocfc, bfa_iocfc_sm_initing);
+ break;
+ default:
+ bfa_sm_fault(iocfc->bfa, event);
+ break;
+ }
+}
+
+static void
+bfa_iocfc_sm_initing_entry(struct bfa_iocfc_s *iocfc)
+{
+ bfa_ioc_enable(&iocfc->bfa->ioc);
+}
+
+static void
+bfa_iocfc_sm_initing(struct bfa_iocfc_s *iocfc, enum iocfc_event event)
+{
+ bfa_trc(iocfc->bfa, event);
+
+ switch (event) {
+ case IOCFC_E_IOC_ENABLED:
+ bfa_fsm_set_state(iocfc, bfa_iocfc_sm_dconf_read);
+ break;
+
+ case IOCFC_E_DISABLE:
+ bfa_fsm_set_state(iocfc, bfa_iocfc_sm_disabling);
+ break;
+
+ case IOCFC_E_STOP:
+ bfa_fsm_set_state(iocfc, bfa_iocfc_sm_stopping);
+ break;
+
+ case IOCFC_E_IOC_FAILED:
+ bfa_fsm_set_state(iocfc, bfa_iocfc_sm_init_failed);
+ break;
+ default:
+ bfa_sm_fault(iocfc->bfa, event);
+ break;
+ }
+}
+
+static void
+bfa_iocfc_sm_dconf_read_entry(struct bfa_iocfc_s *iocfc)
+{
+ bfa_dconf_modinit(iocfc->bfa);
+}
+
+static void
+bfa_iocfc_sm_dconf_read(struct bfa_iocfc_s *iocfc, enum iocfc_event event)
+{
+ bfa_trc(iocfc->bfa, event);
+
+ switch (event) {
+ case IOCFC_E_DCONF_DONE:
+ bfa_fsm_set_state(iocfc, bfa_iocfc_sm_init_cfg_wait);
+ break;
+
+ case IOCFC_E_DISABLE:
+ bfa_fsm_set_state(iocfc, bfa_iocfc_sm_disabling);
+ break;
+
+ case IOCFC_E_STOP:
+ bfa_fsm_set_state(iocfc, bfa_iocfc_sm_stopping);
+ break;
+
+ case IOCFC_E_IOC_FAILED:
+ bfa_fsm_set_state(iocfc, bfa_iocfc_sm_init_failed);
+ break;
+ default:
+ bfa_sm_fault(iocfc->bfa, event);
+ break;
+ }
+}
+
+static void
+bfa_iocfc_sm_init_cfg_wait_entry(struct bfa_iocfc_s *iocfc)
+{
+ bfa_iocfc_send_cfg(iocfc->bfa);
+}
+
+static void
+bfa_iocfc_sm_init_cfg_wait(struct bfa_iocfc_s *iocfc, enum iocfc_event event)
+{
+ bfa_trc(iocfc->bfa, event);
+
+ switch (event) {
+ case IOCFC_E_CFG_DONE:
+ bfa_fsm_set_state(iocfc, bfa_iocfc_sm_init_cfg_done);
+ break;
+
+ case IOCFC_E_DISABLE:
+ bfa_fsm_set_state(iocfc, bfa_iocfc_sm_disabling);
+ break;
+
+ case IOCFC_E_STOP:
+ bfa_fsm_set_state(iocfc, bfa_iocfc_sm_stopping);
+ break;
+
+ case IOCFC_E_IOC_FAILED:
+ bfa_fsm_set_state(iocfc, bfa_iocfc_sm_init_failed);
+ break;
+ default:
+ bfa_sm_fault(iocfc->bfa, event);
+ break;
+ }
+}
+
+static void
+bfa_iocfc_sm_init_cfg_done_entry(struct bfa_iocfc_s *iocfc)
+{
+ iocfc->bfa->iocfc.op_status = BFA_STATUS_OK;
+ bfa_cb_queue(iocfc->bfa, &iocfc->bfa->iocfc.init_hcb_qe,
+ bfa_iocfc_init_cb, iocfc->bfa);
+}
+
+static void
+bfa_iocfc_sm_init_cfg_done(struct bfa_iocfc_s *iocfc, enum iocfc_event event)
+{
+ bfa_trc(iocfc->bfa, event);
+
+ switch (event) {
+ case IOCFC_E_START:
+ bfa_fsm_set_state(iocfc, bfa_iocfc_sm_operational);
+ break;
+ case IOCFC_E_STOP:
+ bfa_fsm_set_state(iocfc, bfa_iocfc_sm_stopping);
+ break;
+ case IOCFC_E_DISABLE:
+ bfa_fsm_set_state(iocfc, bfa_iocfc_sm_disabling);
+ break;
+ case IOCFC_E_IOC_FAILED:
+ bfa_fsm_set_state(iocfc, bfa_iocfc_sm_failed);
+ break;
+ default:
+ bfa_sm_fault(iocfc->bfa, event);
+ break;
+ }
+}
+
+static void
+bfa_iocfc_sm_operational_entry(struct bfa_iocfc_s *iocfc)
+{
+ bfa_fcport_init(iocfc->bfa);
+ bfa_iocfc_start_submod(iocfc->bfa);
+}
+
+static void
+bfa_iocfc_sm_operational(struct bfa_iocfc_s *iocfc, enum iocfc_event event)
+{
+ bfa_trc(iocfc->bfa, event);
+
+ switch (event) {
+ case IOCFC_E_STOP:
+ bfa_fsm_set_state(iocfc, bfa_iocfc_sm_dconf_write);
+ break;
+ case IOCFC_E_DISABLE:
+ bfa_fsm_set_state(iocfc, bfa_iocfc_sm_disabling);
+ break;
+ case IOCFC_E_IOC_FAILED:
+ bfa_fsm_set_state(iocfc, bfa_iocfc_sm_failed);
+ break;
+ default:
+ bfa_sm_fault(iocfc->bfa, event);
+ break;
+ }
+}
+
+static void
+bfa_iocfc_sm_dconf_write_entry(struct bfa_iocfc_s *iocfc)
+{
+ bfa_dconf_modexit(iocfc->bfa);
+}
+
+static void
+bfa_iocfc_sm_dconf_write(struct bfa_iocfc_s *iocfc, enum iocfc_event event)
+{
+ bfa_trc(iocfc->bfa, event);
+
+ switch (event) {
+ case IOCFC_E_DCONF_DONE:
+ case IOCFC_E_IOC_FAILED:
+ bfa_fsm_set_state(iocfc, bfa_iocfc_sm_stopping);
+ break;
+ default:
+ bfa_sm_fault(iocfc->bfa, event);
+ break;
+ }
+}
+
+static void
+bfa_iocfc_sm_stopping_entry(struct bfa_iocfc_s *iocfc)
+{
+ bfa_ioc_disable(&iocfc->bfa->ioc);
+}
+
+static void
+bfa_iocfc_sm_stopping(struct bfa_iocfc_s *iocfc, enum iocfc_event event)
+{
+ bfa_trc(iocfc->bfa, event);
+
+ switch (event) {
+ case IOCFC_E_IOC_DISABLED:
+ bfa_isr_disable(iocfc->bfa);
+ bfa_iocfc_disable_submod(iocfc->bfa);
+ bfa_fsm_set_state(iocfc, bfa_iocfc_sm_stopped);
+ iocfc->bfa->iocfc.op_status = BFA_STATUS_OK;
+ bfa_cb_queue(iocfc->bfa, &iocfc->bfa->iocfc.stop_hcb_qe,
+ bfa_iocfc_stop_cb, iocfc->bfa);
+ break;
+
+ case IOCFC_E_IOC_ENABLED:
+ case IOCFC_E_DCONF_DONE:
+ case IOCFC_E_CFG_DONE:
+ break;
+
+ default:
+ bfa_sm_fault(iocfc->bfa, event);
+ break;
+ }
+}
+
+static void
+bfa_iocfc_sm_enabling_entry(struct bfa_iocfc_s *iocfc)
+{
+ bfa_ioc_enable(&iocfc->bfa->ioc);
+}
+
+static void
+bfa_iocfc_sm_enabling(struct bfa_iocfc_s *iocfc, enum iocfc_event event)
+{
+ bfa_trc(iocfc->bfa, event);
+
+ switch (event) {
+ case IOCFC_E_IOC_ENABLED:
+ bfa_fsm_set_state(iocfc, bfa_iocfc_sm_cfg_wait);
+ break;
+
+ case IOCFC_E_DISABLE:
+ bfa_fsm_set_state(iocfc, bfa_iocfc_sm_disabling);
+ break;
+
+ case IOCFC_E_STOP:
+ bfa_fsm_set_state(iocfc, bfa_iocfc_sm_dconf_write);
+ break;
+
+ case IOCFC_E_IOC_FAILED:
+ bfa_fsm_set_state(iocfc, bfa_iocfc_sm_failed);
+
+ if (iocfc->bfa->iocfc.cb_reqd == BFA_FALSE)
+ break;
+
+ iocfc->bfa->iocfc.op_status = BFA_STATUS_FAILED;
+ bfa_cb_queue(iocfc->bfa, &iocfc->bfa->iocfc.en_hcb_qe,
+ bfa_iocfc_enable_cb, iocfc->bfa);
+ iocfc->bfa->iocfc.cb_reqd = BFA_FALSE;
+ break;
+ default:
+ bfa_sm_fault(iocfc->bfa, event);
+ break;
+ }
+}
+
+static void
+bfa_iocfc_sm_cfg_wait_entry(struct bfa_iocfc_s *iocfc)
+{
+ bfa_iocfc_send_cfg(iocfc->bfa);
+}
+
+static void
+bfa_iocfc_sm_cfg_wait(struct bfa_iocfc_s *iocfc, enum iocfc_event event)
+{
+ bfa_trc(iocfc->bfa, event);
+
+ switch (event) {
+ case IOCFC_E_CFG_DONE:
+ bfa_fsm_set_state(iocfc, bfa_iocfc_sm_operational);
+ if (iocfc->bfa->iocfc.cb_reqd == BFA_FALSE)
+ break;
+
+ iocfc->bfa->iocfc.op_status = BFA_STATUS_OK;
+ bfa_cb_queue(iocfc->bfa, &iocfc->bfa->iocfc.en_hcb_qe,
+ bfa_iocfc_enable_cb, iocfc->bfa);
+ iocfc->bfa->iocfc.cb_reqd = BFA_FALSE;
+ break;
+ case IOCFC_E_DISABLE:
+ bfa_fsm_set_state(iocfc, bfa_iocfc_sm_disabling);
+ break;
+
+ case IOCFC_E_STOP:
+ bfa_fsm_set_state(iocfc, bfa_iocfc_sm_dconf_write);
+ break;
+ case IOCFC_E_IOC_FAILED:
+ bfa_fsm_set_state(iocfc, bfa_iocfc_sm_failed);
+ if (iocfc->bfa->iocfc.cb_reqd == BFA_FALSE)
+ break;
+
+ iocfc->bfa->iocfc.op_status = BFA_STATUS_FAILED;
+ bfa_cb_queue(iocfc->bfa, &iocfc->bfa->iocfc.en_hcb_qe,
+ bfa_iocfc_enable_cb, iocfc->bfa);
+ iocfc->bfa->iocfc.cb_reqd = BFA_FALSE;
+ break;
+ default:
+ bfa_sm_fault(iocfc->bfa, event);
+ break;
+ }
+}
+
+static void
+bfa_iocfc_sm_disabling_entry(struct bfa_iocfc_s *iocfc)
+{
+ bfa_ioc_disable(&iocfc->bfa->ioc);
+}
+
+static void
+bfa_iocfc_sm_disabling(struct bfa_iocfc_s *iocfc, enum iocfc_event event)
+{
+ bfa_trc(iocfc->bfa, event);
+
+ switch (event) {
+ case IOCFC_E_IOC_DISABLED:
+ bfa_fsm_set_state(iocfc, bfa_iocfc_sm_disabled);
+ break;
+ case IOCFC_E_IOC_ENABLED:
+ case IOCFC_E_DCONF_DONE:
+ case IOCFC_E_CFG_DONE:
+ break;
+ default:
+ bfa_sm_fault(iocfc->bfa, event);
+ break;
+ }
+}
+
+static void
+bfa_iocfc_sm_disabled_entry(struct bfa_iocfc_s *iocfc)
+{
+ bfa_isr_disable(iocfc->bfa);
+ bfa_iocfc_disable_submod(iocfc->bfa);
+ iocfc->bfa->iocfc.op_status = BFA_STATUS_OK;
+ bfa_cb_queue(iocfc->bfa, &iocfc->bfa->iocfc.dis_hcb_qe,
+ bfa_iocfc_disable_cb, iocfc->bfa);
+}
+
+static void
+bfa_iocfc_sm_disabled(struct bfa_iocfc_s *iocfc, enum iocfc_event event)
+{
+ bfa_trc(iocfc->bfa, event);
+
+ switch (event) {
+ case IOCFC_E_STOP:
+ bfa_fsm_set_state(iocfc, bfa_iocfc_sm_dconf_write);
+ break;
+ case IOCFC_E_ENABLE:
+ bfa_fsm_set_state(iocfc, bfa_iocfc_sm_enabling);
+ break;
+ default:
+ bfa_sm_fault(iocfc->bfa, event);
+ break;
+ }
+}
+
+static void
+bfa_iocfc_sm_failed_entry(struct bfa_iocfc_s *iocfc)
+{
+ bfa_isr_disable(iocfc->bfa);
+ bfa_iocfc_disable_submod(iocfc->bfa);
+}
+
+static void
+bfa_iocfc_sm_failed(struct bfa_iocfc_s *iocfc, enum iocfc_event event)
+{
+ bfa_trc(iocfc->bfa, event);
+
+ switch (event) {
+ case IOCFC_E_STOP:
+ bfa_fsm_set_state(iocfc, bfa_iocfc_sm_dconf_write);
+ break;
+ case IOCFC_E_DISABLE:
+ bfa_fsm_set_state(iocfc, bfa_iocfc_sm_disabling);
+ break;
+ case IOCFC_E_IOC_ENABLED:
+ bfa_fsm_set_state(iocfc, bfa_iocfc_sm_cfg_wait);
+ break;
+ case IOCFC_E_IOC_FAILED:
+ break;
+ default:
+ bfa_sm_fault(iocfc->bfa, event);
+ break;
+ }
+}
+
+static void
+bfa_iocfc_sm_init_failed_entry(struct bfa_iocfc_s *iocfc)
+{
+ bfa_isr_disable(iocfc->bfa);
+ iocfc->bfa->iocfc.op_status = BFA_STATUS_FAILED;
+ bfa_cb_queue(iocfc->bfa, &iocfc->bfa->iocfc.init_hcb_qe,
+ bfa_iocfc_init_cb, iocfc->bfa);
+}
+
+static void
+bfa_iocfc_sm_init_failed(struct bfa_iocfc_s *iocfc, enum iocfc_event event)
+{
+ bfa_trc(iocfc->bfa, event);
+
+ switch (event) {
+ case IOCFC_E_STOP:
+ bfa_fsm_set_state(iocfc, bfa_iocfc_sm_stopping);
+ break;
+ case IOCFC_E_DISABLE:
+ bfa_ioc_disable(&iocfc->bfa->ioc);
+ break;
+ case IOCFC_E_IOC_ENABLED:
+ bfa_fsm_set_state(iocfc, bfa_iocfc_sm_dconf_read);
+ break;
+ case IOCFC_E_IOC_DISABLED:
+ bfa_fsm_set_state(iocfc, bfa_iocfc_sm_stopped);
+ iocfc->bfa->iocfc.op_status = BFA_STATUS_OK;
+ bfa_cb_queue(iocfc->bfa, &iocfc->bfa->iocfc.dis_hcb_qe,
+ bfa_iocfc_disable_cb, iocfc->bfa);
+ break;
+ case IOCFC_E_IOC_FAILED:
+ break;
+ default:
+ bfa_sm_fault(iocfc->bfa, event);
+ break;
+ }
+}
+
+/*
+ * BFA Interrupt handling functions
+ */
+static void
+bfa_reqq_resume(struct bfa_s *bfa, int qid)
+{
+ struct list_head *waitq, *qe, *qen;
+ struct bfa_reqq_wait_s *wqe;
+
+ waitq = bfa_reqq(bfa, qid);
+ list_for_each_safe(qe, qen, waitq) {
+ /*
+ * Callback only as long as there is room in request queue
+ */
+ if (bfa_reqq_full(bfa, qid))
+ break;
+
+ list_del(qe);
+ wqe = (struct bfa_reqq_wait_s *) qe;
+ wqe->qresume(wqe->cbarg);
+ }
+}
+
+bfa_boolean_t
+bfa_isr_rspq(struct bfa_s *bfa, int qid)
+{
+ struct bfi_msg_s *m;
+ u32 pi, ci;
+ struct list_head *waitq;
+ bfa_boolean_t ret;
+
+ ci = bfa_rspq_ci(bfa, qid);
+ pi = bfa_rspq_pi(bfa, qid);
+
+ ret = (ci != pi);
+
+ while (ci != pi) {
+ m = bfa_rspq_elem(bfa, qid, ci);
+ WARN_ON(m->mhdr.msg_class >= BFI_MC_MAX);
+
+ bfa_isrs[m->mhdr.msg_class] (bfa, m);
+ CQ_INCR(ci, bfa->iocfc.cfg.drvcfg.num_rspq_elems);
+ }
+
+ /*
+ * acknowledge RME completions and update CI
+ */
+ bfa_isr_rspq_ack(bfa, qid, ci);
+
+ /*
+ * Resume any pending requests in the corresponding reqq.
+ */
+ waitq = bfa_reqq(bfa, qid);
+ if (!list_empty(waitq))
+ bfa_reqq_resume(bfa, qid);
+
+ return ret;
+}
+
+static inline void
+bfa_isr_reqq(struct bfa_s *bfa, int qid)
+{
+ struct list_head *waitq;
+
+ bfa_isr_reqq_ack(bfa, qid);
+
+ /*
+ * Resume any pending requests in the corresponding reqq.
+ */
+ waitq = bfa_reqq(bfa, qid);
+ if (!list_empty(waitq))
+ bfa_reqq_resume(bfa, qid);
+}
+
+void
+bfa_msix_all(struct bfa_s *bfa, int vec)
+{
+ u32 intr, qintr;
+ int queue;
+
+ intr = readl(bfa->iocfc.bfa_regs.intr_status);
+ if (!intr)
+ return;
+
+ /*
+ * RME completion queue interrupt
+ */
+ qintr = intr & __HFN_INT_RME_MASK;
+ if (qintr && bfa->queue_process) {
+ for (queue = 0; queue < BFI_IOC_MAX_CQS; queue++)
+ bfa_isr_rspq(bfa, queue);
+ }
+
+ intr &= ~qintr;
+ if (!intr)
+ return;
+
+ /*
+ * CPE completion queue interrupt
+ */
+ qintr = intr & __HFN_INT_CPE_MASK;
+ if (qintr && bfa->queue_process) {
+ for (queue = 0; queue < BFI_IOC_MAX_CQS; queue++)
+ bfa_isr_reqq(bfa, queue);
+ }
+ intr &= ~qintr;
+ if (!intr)
+ return;
+
+ bfa_msix_lpu_err(bfa, intr);
+}
+
+bfa_boolean_t
+bfa_intx(struct bfa_s *bfa)
+{
+ u32 intr, qintr;
+ int queue;
+ bfa_boolean_t rspq_comp = BFA_FALSE;
+
+ intr = readl(bfa->iocfc.bfa_regs.intr_status);
+
+ qintr = intr & (__HFN_INT_RME_MASK | __HFN_INT_CPE_MASK);
+ if (qintr)
+ writel(qintr, bfa->iocfc.bfa_regs.intr_status);
+
+ /*
+ * Unconditional RME completion queue interrupt
+ */
+ if (bfa->queue_process) {
+ for (queue = 0; queue < BFI_IOC_MAX_CQS; queue++)
+ if (bfa_isr_rspq(bfa, queue))
+ rspq_comp = BFA_TRUE;
+ }
+
+ if (!intr)
+ return (qintr | rspq_comp) ? BFA_TRUE : BFA_FALSE;
+
+ /*
+ * CPE completion queue interrupt
+ */
+ qintr = intr & __HFN_INT_CPE_MASK;
+ if (qintr && bfa->queue_process) {
+ for (queue = 0; queue < BFI_IOC_MAX_CQS; queue++)
+ bfa_isr_reqq(bfa, queue);
+ }
+ intr &= ~qintr;
+ if (!intr)
+ return BFA_TRUE;
+
+ if (bfa->intr_enabled)
+ bfa_msix_lpu_err(bfa, intr);
+
+ return BFA_TRUE;
+}
+
+void
+bfa_isr_enable(struct bfa_s *bfa)
+{
+ u32 umsk;
+ int port_id = bfa_ioc_portid(&bfa->ioc);
+
+ bfa_trc(bfa, bfa_ioc_pcifn(&bfa->ioc));
+ bfa_trc(bfa, port_id);
+
+ bfa_msix_ctrl_install(bfa);
+
+ if (bfa_asic_id_ct2(bfa->ioc.pcidev.device_id)) {
+ umsk = __HFN_INT_ERR_MASK_CT2;
+ umsk |= port_id == 0 ?
+ __HFN_INT_FN0_MASK_CT2 : __HFN_INT_FN1_MASK_CT2;
+ } else {
+ umsk = __HFN_INT_ERR_MASK;
+ umsk |= port_id == 0 ? __HFN_INT_FN0_MASK : __HFN_INT_FN1_MASK;
+ }
+
+ writel(umsk, bfa->iocfc.bfa_regs.intr_status);
+ writel(~umsk, bfa->iocfc.bfa_regs.intr_mask);
+ bfa->iocfc.intr_mask = ~umsk;
+ bfa_isr_mode_set(bfa, bfa->msix.nvecs != 0);
+
+ /*
+ * Set the flag indicating successful enabling of interrupts
+ */
+ bfa->intr_enabled = BFA_TRUE;
+}
+
+void
+bfa_isr_disable(struct bfa_s *bfa)
+{
+ bfa->intr_enabled = BFA_FALSE;
+ bfa_isr_mode_set(bfa, BFA_FALSE);
+ writel(-1L, bfa->iocfc.bfa_regs.intr_mask);
+ bfa_msix_uninstall(bfa);
+}
+
+void
+bfa_msix_reqq(struct bfa_s *bfa, int vec)
+{
+ bfa_isr_reqq(bfa, vec - bfa->iocfc.hwif.cpe_vec_q0);
+}
+
+void
+bfa_isr_unhandled(struct bfa_s *bfa, struct bfi_msg_s *m)
+{
+ bfa_trc(bfa, m->mhdr.msg_class);
+ bfa_trc(bfa, m->mhdr.msg_id);
+ bfa_trc(bfa, m->mhdr.mtag.i2htok);
+ WARN_ON(1);
+ bfa_trc_stop(bfa->trcmod);
+}
+
+void
+bfa_msix_rspq(struct bfa_s *bfa, int vec)
+{
+ bfa_isr_rspq(bfa, vec - bfa->iocfc.hwif.rme_vec_q0);
+}
+
+void
+bfa_msix_lpu_err(struct bfa_s *bfa, int vec)
+{
+ u32 intr, curr_value;
+ bfa_boolean_t lpu_isr, halt_isr, pss_isr;
+
+ intr = readl(bfa->iocfc.bfa_regs.intr_status);
+
+ if (bfa_asic_id_ct2(bfa->ioc.pcidev.device_id)) {
+ halt_isr = intr & __HFN_INT_CPQ_HALT_CT2;
+ pss_isr = intr & __HFN_INT_ERR_PSS_CT2;
+ lpu_isr = intr & (__HFN_INT_MBOX_LPU0_CT2 |
+ __HFN_INT_MBOX_LPU1_CT2);
+ intr &= __HFN_INT_ERR_MASK_CT2;
+ } else {
+ halt_isr = bfa_asic_id_ct(bfa->ioc.pcidev.device_id) ?
+ (intr & __HFN_INT_LL_HALT) : 0;
+ pss_isr = intr & __HFN_INT_ERR_PSS;
+ lpu_isr = intr & (__HFN_INT_MBOX_LPU0 | __HFN_INT_MBOX_LPU1);
+ intr &= __HFN_INT_ERR_MASK;
+ }
+
+ if (lpu_isr)
+ bfa_ioc_mbox_isr(&bfa->ioc);
+
+ if (intr) {
+ if (halt_isr) {
+ /*
+ * If LL_HALT bit is set then FW Init Halt LL Port
+ * Register needs to be cleared as well so Interrupt
+ * Status Register will be cleared.
+ */
+ curr_value = readl(bfa->ioc.ioc_regs.ll_halt);
+ curr_value &= ~__FW_INIT_HALT_P;
+ writel(curr_value, bfa->ioc.ioc_regs.ll_halt);
+ }
+
+ if (pss_isr) {
+ /*
+ * ERR_PSS bit needs to be cleared as well in case
+ * interrups are shared so driver's interrupt handler is
+ * still called even though it is already masked out.
+ */
+ curr_value = readl(
+ bfa->ioc.ioc_regs.pss_err_status_reg);
+ writel(curr_value,
+ bfa->ioc.ioc_regs.pss_err_status_reg);
+ }
+
+ writel(intr, bfa->iocfc.bfa_regs.intr_status);
+ bfa_ioc_error_isr(&bfa->ioc);
+ }
+}
+
+/*
+ * BFA IOC FC related functions
+ */
+
+/*
+ * BFA IOC private functions
+ */
+
+/*
+ * Use the Mailbox interface to send BFI_IOCFC_H2I_CFG_REQ
+ */
+static void
+bfa_iocfc_send_cfg(void *bfa_arg)
+{
+ struct bfa_s *bfa = bfa_arg;
+ struct bfa_iocfc_s *iocfc = &bfa->iocfc;
+ struct bfi_iocfc_cfg_req_s cfg_req;
+ struct bfi_iocfc_cfg_s *cfg_info = iocfc->cfginfo;
+ struct bfa_iocfc_cfg_s *cfg = &iocfc->cfg;
+ int i;
+
+ WARN_ON(cfg->fwcfg.num_cqs > BFI_IOC_MAX_CQS);
+ bfa_trc(bfa, cfg->fwcfg.num_cqs);
+
+ bfa_iocfc_reset_queues(bfa);
+
+ /*
+ * initialize IOC configuration info
+ */
+ cfg_info->single_msix_vec = 0;
+ if (bfa->msix.nvecs == 1)
+ cfg_info->single_msix_vec = 1;
+ cfg_info->endian_sig = BFI_IOC_ENDIAN_SIG;
+ cfg_info->num_cqs = cfg->fwcfg.num_cqs;
+ cfg_info->num_ioim_reqs = cpu_to_be16(bfa_fcpim_get_throttle_cfg(bfa,
+ cfg->fwcfg.num_ioim_reqs));
+ cfg_info->num_fwtio_reqs = cpu_to_be16(cfg->fwcfg.num_fwtio_reqs);
+
+ bfa_dma_be_addr_set(cfg_info->cfgrsp_addr, iocfc->cfgrsp_dma.pa);
+ /*
+ * dma map REQ and RSP circular queues and shadow pointers
+ */
+ for (i = 0; i < cfg->fwcfg.num_cqs; i++) {
+ bfa_dma_be_addr_set(cfg_info->req_cq_ba[i],
+ iocfc->req_cq_ba[i].pa);
+ bfa_dma_be_addr_set(cfg_info->req_shadow_ci[i],
+ iocfc->req_cq_shadow_ci[i].pa);
+ cfg_info->req_cq_elems[i] =
+ cpu_to_be16(cfg->drvcfg.num_reqq_elems);
+
+ bfa_dma_be_addr_set(cfg_info->rsp_cq_ba[i],
+ iocfc->rsp_cq_ba[i].pa);
+ bfa_dma_be_addr_set(cfg_info->rsp_shadow_pi[i],
+ iocfc->rsp_cq_shadow_pi[i].pa);
+ cfg_info->rsp_cq_elems[i] =
+ cpu_to_be16(cfg->drvcfg.num_rspq_elems);
+ }
+
+ /*
+ * Enable interrupt coalescing if it is driver init path
+ * and not ioc disable/enable path.
+ */
+ if (bfa_fsm_cmp_state(iocfc, bfa_iocfc_sm_init_cfg_wait))
+ cfg_info->intr_attr.coalesce = BFA_TRUE;
+
+ /*
+ * dma map IOC configuration itself
+ */
+ bfi_h2i_set(cfg_req.mh, BFI_MC_IOCFC, BFI_IOCFC_H2I_CFG_REQ,
+ bfa_fn_lpu(bfa));
+ bfa_dma_be_addr_set(cfg_req.ioc_cfg_dma_addr, iocfc->cfg_info.pa);
+
+ bfa_ioc_mbox_send(&bfa->ioc, &cfg_req,
+ sizeof(struct bfi_iocfc_cfg_req_s));
+}
+
+static void
+bfa_iocfc_init_mem(struct bfa_s *bfa, void *bfad, struct bfa_iocfc_cfg_s *cfg,
+ struct bfa_pcidev_s *pcidev)
+{
+ struct bfa_iocfc_s *iocfc = &bfa->iocfc;
+
+ bfa->bfad = bfad;
+ iocfc->bfa = bfa;
+ iocfc->cfg = *cfg;
+
+ /*
+ * Initialize chip specific handlers.
+ */
+ if (bfa_asic_id_ctc(bfa_ioc_devid(&bfa->ioc))) {
+ iocfc->hwif.hw_reginit = bfa_hwct_reginit;
+ iocfc->hwif.hw_reqq_ack = bfa_hwct_reqq_ack;
+ iocfc->hwif.hw_rspq_ack = bfa_hwct_rspq_ack;
+ iocfc->hwif.hw_msix_init = bfa_hwct_msix_init;
+ iocfc->hwif.hw_msix_ctrl_install = bfa_hwct_msix_ctrl_install;
+ iocfc->hwif.hw_msix_queue_install = bfa_hwct_msix_queue_install;
+ iocfc->hwif.hw_msix_uninstall = bfa_hwct_msix_uninstall;
+ iocfc->hwif.hw_isr_mode_set = bfa_hwct_isr_mode_set;
+ iocfc->hwif.hw_msix_getvecs = bfa_hwct_msix_getvecs;
+ iocfc->hwif.hw_msix_get_rme_range = bfa_hwct_msix_get_rme_range;
+ iocfc->hwif.rme_vec_q0 = BFI_MSIX_RME_QMIN_CT;
+ iocfc->hwif.cpe_vec_q0 = BFI_MSIX_CPE_QMIN_CT;
+ } else {
+ iocfc->hwif.hw_reginit = bfa_hwcb_reginit;
+ iocfc->hwif.hw_reqq_ack = NULL;
+ iocfc->hwif.hw_rspq_ack = bfa_hwcb_rspq_ack;
+ iocfc->hwif.hw_msix_init = bfa_hwcb_msix_init;
+ iocfc->hwif.hw_msix_ctrl_install = bfa_hwcb_msix_ctrl_install;
+ iocfc->hwif.hw_msix_queue_install = bfa_hwcb_msix_queue_install;
+ iocfc->hwif.hw_msix_uninstall = bfa_hwcb_msix_uninstall;
+ iocfc->hwif.hw_isr_mode_set = bfa_hwcb_isr_mode_set;
+ iocfc->hwif.hw_msix_getvecs = bfa_hwcb_msix_getvecs;
+ iocfc->hwif.hw_msix_get_rme_range = bfa_hwcb_msix_get_rme_range;
+ iocfc->hwif.rme_vec_q0 = BFI_MSIX_RME_QMIN_CB +
+ bfa_ioc_pcifn(&bfa->ioc) * BFI_IOC_MAX_CQS;
+ iocfc->hwif.cpe_vec_q0 = BFI_MSIX_CPE_QMIN_CB +
+ bfa_ioc_pcifn(&bfa->ioc) * BFI_IOC_MAX_CQS;
+ }
+
+ if (bfa_asic_id_ct2(bfa_ioc_devid(&bfa->ioc))) {
+ iocfc->hwif.hw_reginit = bfa_hwct2_reginit;
+ iocfc->hwif.hw_isr_mode_set = NULL;
+ iocfc->hwif.hw_rspq_ack = bfa_hwct2_rspq_ack;
+ }
+
+ iocfc->hwif.hw_reginit(bfa);
+ bfa->msix.nvecs = 0;
+}
+
+static void
+bfa_iocfc_mem_claim(struct bfa_s *bfa, struct bfa_iocfc_cfg_s *cfg)
+{
+ u8 *dm_kva = NULL;
+ u64 dm_pa = 0;
+ int i, per_reqq_sz, per_rspq_sz;
+ struct bfa_iocfc_s *iocfc = &bfa->iocfc;
+ struct bfa_mem_dma_s *ioc_dma = BFA_MEM_IOC_DMA(bfa);
+ struct bfa_mem_dma_s *iocfc_dma = BFA_MEM_IOCFC_DMA(bfa);
+ struct bfa_mem_dma_s *reqq_dma, *rspq_dma;
+
+ /* First allocate dma memory for IOC */
+ bfa_ioc_mem_claim(&bfa->ioc, bfa_mem_dma_virt(ioc_dma),
+ bfa_mem_dma_phys(ioc_dma));
+
+ /* Claim DMA-able memory for the request/response queues */
+ per_reqq_sz = BFA_ROUNDUP((cfg->drvcfg.num_reqq_elems * BFI_LMSG_SZ),
+ BFA_DMA_ALIGN_SZ);
+ per_rspq_sz = BFA_ROUNDUP((cfg->drvcfg.num_rspq_elems * BFI_LMSG_SZ),
+ BFA_DMA_ALIGN_SZ);
+
+ for (i = 0; i < cfg->fwcfg.num_cqs; i++) {
+ reqq_dma = BFA_MEM_REQQ_DMA(bfa, i);
+ iocfc->req_cq_ba[i].kva = bfa_mem_dma_virt(reqq_dma);
+ iocfc->req_cq_ba[i].pa = bfa_mem_dma_phys(reqq_dma);
+ memset(iocfc->req_cq_ba[i].kva, 0, per_reqq_sz);
+
+ rspq_dma = BFA_MEM_RSPQ_DMA(bfa, i);
+ iocfc->rsp_cq_ba[i].kva = bfa_mem_dma_virt(rspq_dma);
+ iocfc->rsp_cq_ba[i].pa = bfa_mem_dma_phys(rspq_dma);
+ memset(iocfc->rsp_cq_ba[i].kva, 0, per_rspq_sz);
+ }
+
+ /* Claim IOCFC dma memory - for shadow CI/PI */
+ dm_kva = bfa_mem_dma_virt(iocfc_dma);
+ dm_pa = bfa_mem_dma_phys(iocfc_dma);
+
+ for (i = 0; i < cfg->fwcfg.num_cqs; i++) {
+ iocfc->req_cq_shadow_ci[i].kva = dm_kva;
+ iocfc->req_cq_shadow_ci[i].pa = dm_pa;
+ dm_kva += BFA_CACHELINE_SZ;
+ dm_pa += BFA_CACHELINE_SZ;
+
+ iocfc->rsp_cq_shadow_pi[i].kva = dm_kva;
+ iocfc->rsp_cq_shadow_pi[i].pa = dm_pa;
+ dm_kva += BFA_CACHELINE_SZ;
+ dm_pa += BFA_CACHELINE_SZ;
+ }
+
+ /* Claim IOCFC dma memory - for the config info page */
+ bfa->iocfc.cfg_info.kva = dm_kva;
+ bfa->iocfc.cfg_info.pa = dm_pa;
+ bfa->iocfc.cfginfo = (struct bfi_iocfc_cfg_s *) dm_kva;
+ dm_kva += BFA_ROUNDUP(sizeof(struct bfi_iocfc_cfg_s), BFA_CACHELINE_SZ);
+ dm_pa += BFA_ROUNDUP(sizeof(struct bfi_iocfc_cfg_s), BFA_CACHELINE_SZ);
+
+ /* Claim IOCFC dma memory - for the config response */
+ bfa->iocfc.cfgrsp_dma.kva = dm_kva;
+ bfa->iocfc.cfgrsp_dma.pa = dm_pa;
+ bfa->iocfc.cfgrsp = (struct bfi_iocfc_cfgrsp_s *) dm_kva;
+ dm_kva += BFA_ROUNDUP(sizeof(struct bfi_iocfc_cfgrsp_s),
+ BFA_CACHELINE_SZ);
+ dm_pa += BFA_ROUNDUP(sizeof(struct bfi_iocfc_cfgrsp_s),
+ BFA_CACHELINE_SZ);
+
+ /* Claim IOCFC kva memory */
+ bfa_ioc_debug_memclaim(&bfa->ioc, bfa_mem_kva_curp(iocfc));
+ bfa_mem_kva_curp(iocfc) += BFA_DBG_FWTRC_LEN;
+}
+
+/*
+ * Start BFA submodules.
+ */
+static void
+bfa_iocfc_start_submod(struct bfa_s *bfa)
+{
+ int i;
+
+ bfa->queue_process = BFA_TRUE;
+ for (i = 0; i < BFI_IOC_MAX_CQS; i++)
+ bfa_isr_rspq_ack(bfa, i, bfa_rspq_ci(bfa, i));
+
+ for (i = 0; hal_mods[i]; i++)
+ hal_mods[i]->start(bfa);
+
+ bfa->iocfc.submod_enabled = BFA_TRUE;
+}
+
+/*
+ * Disable BFA submodules.
+ */
+static void
+bfa_iocfc_disable_submod(struct bfa_s *bfa)
+{
+ int i;
+
+ if (bfa->iocfc.submod_enabled == BFA_FALSE)
+ return;
+
+ for (i = 0; hal_mods[i]; i++)
+ hal_mods[i]->iocdisable(bfa);
+
+ bfa->iocfc.submod_enabled = BFA_FALSE;
+}
+
+static void
+bfa_iocfc_init_cb(void *bfa_arg, bfa_boolean_t complete)
+{
+ struct bfa_s *bfa = bfa_arg;
+
+ if (complete)
+ bfa_cb_init(bfa->bfad, bfa->iocfc.op_status);
+}
+
+static void
+bfa_iocfc_stop_cb(void *bfa_arg, bfa_boolean_t compl)
+{
+ struct bfa_s *bfa = bfa_arg;
+ struct bfad_s *bfad = bfa->bfad;
+
+ if (compl)
+ complete(&bfad->comp);
+}
+
+static void
+bfa_iocfc_enable_cb(void *bfa_arg, bfa_boolean_t compl)
+{
+ struct bfa_s *bfa = bfa_arg;
+ struct bfad_s *bfad = bfa->bfad;
+
+ if (compl)
+ complete(&bfad->enable_comp);
+}
+
+static void
+bfa_iocfc_disable_cb(void *bfa_arg, bfa_boolean_t compl)
+{
+ struct bfa_s *bfa = bfa_arg;
+ struct bfad_s *bfad = bfa->bfad;
+
+ if (compl)
+ complete(&bfad->disable_comp);
+}
+
+/**
+ * configure queue registers from firmware response
+ */
+static void
+bfa_iocfc_qreg(struct bfa_s *bfa, struct bfi_iocfc_qreg_s *qreg)
+{
+ int i;
+ struct bfa_iocfc_regs_s *r = &bfa->iocfc.bfa_regs;
+ void __iomem *kva = bfa_ioc_bar0(&bfa->ioc);
+
+ for (i = 0; i < BFI_IOC_MAX_CQS; i++) {
+ bfa->iocfc.hw_qid[i] = qreg->hw_qid[i];
+ r->cpe_q_ci[i] = kva + be32_to_cpu(qreg->cpe_q_ci_off[i]);
+ r->cpe_q_pi[i] = kva + be32_to_cpu(qreg->cpe_q_pi_off[i]);
+ r->cpe_q_ctrl[i] = kva + be32_to_cpu(qreg->cpe_qctl_off[i]);
+ r->rme_q_ci[i] = kva + be32_to_cpu(qreg->rme_q_ci_off[i]);
+ r->rme_q_pi[i] = kva + be32_to_cpu(qreg->rme_q_pi_off[i]);
+ r->rme_q_ctrl[i] = kva + be32_to_cpu(qreg->rme_qctl_off[i]);
+ }
+}
+
+static void
+bfa_iocfc_res_recfg(struct bfa_s *bfa, struct bfa_iocfc_fwcfg_s *fwcfg)
+{
+ struct bfa_iocfc_s *iocfc = &bfa->iocfc;
+ struct bfi_iocfc_cfg_s *cfg_info = iocfc->cfginfo;
+
+ bfa_fcxp_res_recfg(bfa, fwcfg->num_fcxp_reqs);
+ bfa_uf_res_recfg(bfa, fwcfg->num_uf_bufs);
+ bfa_rport_res_recfg(bfa, fwcfg->num_rports);
+ bfa_fcp_res_recfg(bfa, cpu_to_be16(cfg_info->num_ioim_reqs),
+ fwcfg->num_ioim_reqs);
+ bfa_tskim_res_recfg(bfa, fwcfg->num_tskim_reqs);
+}
+
+/*
+ * Update BFA configuration from firmware configuration.
+ */
+static void
+bfa_iocfc_cfgrsp(struct bfa_s *bfa)
+{
+ struct bfa_iocfc_s *iocfc = &bfa->iocfc;
+ struct bfi_iocfc_cfgrsp_s *cfgrsp = iocfc->cfgrsp;
+ struct bfa_iocfc_fwcfg_s *fwcfg = &cfgrsp->fwcfg;
+
+ fwcfg->num_cqs = fwcfg->num_cqs;
+ fwcfg->num_ioim_reqs = be16_to_cpu(fwcfg->num_ioim_reqs);
+ fwcfg->num_fwtio_reqs = be16_to_cpu(fwcfg->num_fwtio_reqs);
+ fwcfg->num_tskim_reqs = be16_to_cpu(fwcfg->num_tskim_reqs);
+ fwcfg->num_fcxp_reqs = be16_to_cpu(fwcfg->num_fcxp_reqs);
+ fwcfg->num_uf_bufs = be16_to_cpu(fwcfg->num_uf_bufs);
+ fwcfg->num_rports = be16_to_cpu(fwcfg->num_rports);
+
+ /*
+ * configure queue register offsets as learnt from firmware
+ */
+ bfa_iocfc_qreg(bfa, &cfgrsp->qreg);
+
+ /*
+ * Re-configure resources as learnt from Firmware
+ */
+ bfa_iocfc_res_recfg(bfa, fwcfg);
+
+ /*
+ * Install MSIX queue handlers
+ */
+ bfa_msix_queue_install(bfa);
+
+ if (bfa->iocfc.cfgrsp->pbc_cfg.pbc_pwwn != 0) {
+ bfa->ioc.attr->pwwn = bfa->iocfc.cfgrsp->pbc_cfg.pbc_pwwn;
+ bfa->ioc.attr->nwwn = bfa->iocfc.cfgrsp->pbc_cfg.pbc_nwwn;
+ bfa_fsm_send_event(iocfc, IOCFC_E_CFG_DONE);
+ }
+}
+
+void
+bfa_iocfc_reset_queues(struct bfa_s *bfa)
+{
+ int q;
+
+ for (q = 0; q < BFI_IOC_MAX_CQS; q++) {
+ bfa_reqq_ci(bfa, q) = 0;
+ bfa_reqq_pi(bfa, q) = 0;
+ bfa_rspq_ci(bfa, q) = 0;
+ bfa_rspq_pi(bfa, q) = 0;
+ }
+}
+
+/*
+ * Process FAA pwwn msg from fw.
+ */
+static void
+bfa_iocfc_process_faa_addr(struct bfa_s *bfa, struct bfi_faa_addr_msg_s *msg)
+{
+ struct bfa_iocfc_s *iocfc = &bfa->iocfc;
+ struct bfi_iocfc_cfgrsp_s *cfgrsp = iocfc->cfgrsp;
+
+ cfgrsp->pbc_cfg.pbc_pwwn = msg->pwwn;
+ cfgrsp->pbc_cfg.pbc_nwwn = msg->nwwn;
+
+ bfa->ioc.attr->pwwn = msg->pwwn;
+ bfa->ioc.attr->nwwn = msg->nwwn;
+ bfa_fsm_send_event(iocfc, IOCFC_E_CFG_DONE);
+}
+
+/* Fabric Assigned Address specific functions */
+
+/*
+ * Check whether IOC is ready before sending command down
+ */
+static bfa_status_t
+bfa_faa_validate_request(struct bfa_s *bfa)
+{
+ enum bfa_ioc_type_e ioc_type = bfa_get_type(bfa);
+ u32 card_type = bfa->ioc.attr->card_type;
+
+ if (bfa_ioc_is_operational(&bfa->ioc)) {
+ if ((ioc_type != BFA_IOC_TYPE_FC) || bfa_mfg_is_mezz(card_type))
+ return BFA_STATUS_FEATURE_NOT_SUPPORTED;
+ } else {
+ return BFA_STATUS_IOC_NON_OP;
+ }
+
+ return BFA_STATUS_OK;
+}
+
+bfa_status_t
+bfa_faa_query(struct bfa_s *bfa, struct bfa_faa_attr_s *attr,
+ bfa_cb_iocfc_t cbfn, void *cbarg)
+{
+ struct bfi_faa_query_s faa_attr_req;
+ struct bfa_iocfc_s *iocfc = &bfa->iocfc;
+ bfa_status_t status;
+
+ status = bfa_faa_validate_request(bfa);
+ if (status != BFA_STATUS_OK)
+ return status;
+
+ if (iocfc->faa_args.busy == BFA_TRUE)
+ return BFA_STATUS_DEVBUSY;
+
+ iocfc->faa_args.faa_attr = attr;
+ iocfc->faa_args.faa_cb.faa_cbfn = cbfn;
+ iocfc->faa_args.faa_cb.faa_cbarg = cbarg;
+
+ iocfc->faa_args.busy = BFA_TRUE;
+ memset(&faa_attr_req, 0, sizeof(struct bfi_faa_query_s));
+ bfi_h2i_set(faa_attr_req.mh, BFI_MC_IOCFC,
+ BFI_IOCFC_H2I_FAA_QUERY_REQ, bfa_fn_lpu(bfa));
+
+ bfa_ioc_mbox_send(&bfa->ioc, &faa_attr_req,
+ sizeof(struct bfi_faa_query_s));
+
+ return BFA_STATUS_OK;
+}
+
+/*
+ * FAA query response
+ */
+static void
+bfa_faa_query_reply(struct bfa_iocfc_s *iocfc,
+ bfi_faa_query_rsp_t *rsp)
+{
+ void *cbarg = iocfc->faa_args.faa_cb.faa_cbarg;
+
+ if (iocfc->faa_args.faa_attr) {
+ iocfc->faa_args.faa_attr->faa = rsp->faa;
+ iocfc->faa_args.faa_attr->faa_state = rsp->faa_status;
+ iocfc->faa_args.faa_attr->pwwn_source = rsp->addr_source;
+ }
+
+ WARN_ON(!iocfc->faa_args.faa_cb.faa_cbfn);
+
+ iocfc->faa_args.faa_cb.faa_cbfn(cbarg, BFA_STATUS_OK);
+ iocfc->faa_args.busy = BFA_FALSE;
+}
+
+/*
+ * IOC enable request is complete
+ */
+static void
+bfa_iocfc_enable_cbfn(void *bfa_arg, enum bfa_status status)
+{
+ struct bfa_s *bfa = bfa_arg;
+
+ if (status == BFA_STATUS_OK)
+ bfa_fsm_send_event(&bfa->iocfc, IOCFC_E_IOC_ENABLED);
+ else
+ bfa_fsm_send_event(&bfa->iocfc, IOCFC_E_IOC_FAILED);
+}
+
+/*
+ * IOC disable request is complete
+ */
+static void
+bfa_iocfc_disable_cbfn(void *bfa_arg)
+{
+ struct bfa_s *bfa = bfa_arg;
+
+ bfa->queue_process = BFA_FALSE;
+ bfa_fsm_send_event(&bfa->iocfc, IOCFC_E_IOC_DISABLED);
+}
+
+/*
+ * Notify sub-modules of hardware failure.
+ */
+static void
+bfa_iocfc_hbfail_cbfn(void *bfa_arg)
+{
+ struct bfa_s *bfa = bfa_arg;
+
+ bfa->queue_process = BFA_FALSE;
+ bfa_fsm_send_event(&bfa->iocfc, IOCFC_E_IOC_FAILED);
+}
+
+/*
+ * Actions on chip-reset completion.
+ */
+static void
+bfa_iocfc_reset_cbfn(void *bfa_arg)
+{
+ struct bfa_s *bfa = bfa_arg;
+
+ bfa_iocfc_reset_queues(bfa);
+ bfa_isr_enable(bfa);
+}
+
+/*
+ * Query IOC memory requirement information.
+ */
+void
+bfa_iocfc_meminfo(struct bfa_iocfc_cfg_s *cfg, struct bfa_meminfo_s *meminfo,
+ struct bfa_s *bfa)
+{
+ int q, per_reqq_sz, per_rspq_sz;
+ struct bfa_mem_dma_s *ioc_dma = BFA_MEM_IOC_DMA(bfa);
+ struct bfa_mem_dma_s *iocfc_dma = BFA_MEM_IOCFC_DMA(bfa);
+ struct bfa_mem_kva_s *iocfc_kva = BFA_MEM_IOCFC_KVA(bfa);
+ u32 dm_len = 0;
+
+ /* dma memory setup for IOC */
+ bfa_mem_dma_setup(meminfo, ioc_dma,
+ BFA_ROUNDUP(sizeof(struct bfi_ioc_attr_s), BFA_DMA_ALIGN_SZ));
+
+ /* dma memory setup for REQ/RSP queues */
+ per_reqq_sz = BFA_ROUNDUP((cfg->drvcfg.num_reqq_elems * BFI_LMSG_SZ),
+ BFA_DMA_ALIGN_SZ);
+ per_rspq_sz = BFA_ROUNDUP((cfg->drvcfg.num_rspq_elems * BFI_LMSG_SZ),
+ BFA_DMA_ALIGN_SZ);
+
+ for (q = 0; q < cfg->fwcfg.num_cqs; q++) {
+ bfa_mem_dma_setup(meminfo, BFA_MEM_REQQ_DMA(bfa, q),
+ per_reqq_sz);
+ bfa_mem_dma_setup(meminfo, BFA_MEM_RSPQ_DMA(bfa, q),
+ per_rspq_sz);
+ }
+
+ /* IOCFC dma memory - calculate Shadow CI/PI size */
+ for (q = 0; q < cfg->fwcfg.num_cqs; q++)
+ dm_len += (2 * BFA_CACHELINE_SZ);
+
+ /* IOCFC dma memory - calculate config info / rsp size */
+ dm_len += BFA_ROUNDUP(sizeof(struct bfi_iocfc_cfg_s), BFA_CACHELINE_SZ);
+ dm_len += BFA_ROUNDUP(sizeof(struct bfi_iocfc_cfgrsp_s),
+ BFA_CACHELINE_SZ);
+
+ /* dma memory setup for IOCFC */
+ bfa_mem_dma_setup(meminfo, iocfc_dma, dm_len);
+
+ /* kva memory setup for IOCFC */
+ bfa_mem_kva_setup(meminfo, iocfc_kva, BFA_DBG_FWTRC_LEN);
+}
+
+/*
+ * Query IOC memory requirement information.
+ */
+void
+bfa_iocfc_attach(struct bfa_s *bfa, void *bfad, struct bfa_iocfc_cfg_s *cfg,
+ struct bfa_pcidev_s *pcidev)
+{
+ int i;
+ struct bfa_ioc_s *ioc = &bfa->ioc;
+
+ bfa_iocfc_cbfn.enable_cbfn = bfa_iocfc_enable_cbfn;
+ bfa_iocfc_cbfn.disable_cbfn = bfa_iocfc_disable_cbfn;
+ bfa_iocfc_cbfn.hbfail_cbfn = bfa_iocfc_hbfail_cbfn;
+ bfa_iocfc_cbfn.reset_cbfn = bfa_iocfc_reset_cbfn;
+
+ ioc->trcmod = bfa->trcmod;
+ bfa_ioc_attach(&bfa->ioc, bfa, &bfa_iocfc_cbfn, &bfa->timer_mod);
+
+ bfa_ioc_pci_init(&bfa->ioc, pcidev, BFI_PCIFN_CLASS_FC);
+ bfa_ioc_mbox_register(&bfa->ioc, bfa_mbox_isrs);
+
+ bfa_iocfc_init_mem(bfa, bfad, cfg, pcidev);
+ bfa_iocfc_mem_claim(bfa, cfg);
+ INIT_LIST_HEAD(&bfa->timer_mod.timer_q);
+
+ INIT_LIST_HEAD(&bfa->comp_q);
+ for (i = 0; i < BFI_IOC_MAX_CQS; i++)
+ INIT_LIST_HEAD(&bfa->reqq_waitq[i]);
+
+ bfa->iocfc.cb_reqd = BFA_FALSE;
+ bfa->iocfc.op_status = BFA_STATUS_OK;
+ bfa->iocfc.submod_enabled = BFA_FALSE;
+
+ bfa_fsm_set_state(&bfa->iocfc, bfa_iocfc_sm_stopped);
+}
+
+/*
+ * Query IOC memory requirement information.
+ */
+void
+bfa_iocfc_init(struct bfa_s *bfa)
+{
+ bfa_fsm_send_event(&bfa->iocfc, IOCFC_E_INIT);
+}
+
+/*
+ * IOC start called from bfa_start(). Called to start IOC operations
+ * at driver instantiation for this instance.
+ */
+void
+bfa_iocfc_start(struct bfa_s *bfa)
+{
+ bfa_fsm_send_event(&bfa->iocfc, IOCFC_E_START);
+}
+
+/*
+ * IOC stop called from bfa_stop(). Called only when driver is unloaded
+ * for this instance.
+ */
+void
+bfa_iocfc_stop(struct bfa_s *bfa)
+{
+ bfa_fsm_send_event(&bfa->iocfc, IOCFC_E_STOP);
+}
+
+void
+bfa_iocfc_isr(void *bfaarg, struct bfi_mbmsg_s *m)
+{
+ struct bfa_s *bfa = bfaarg;
+ struct bfa_iocfc_s *iocfc = &bfa->iocfc;
+ union bfi_iocfc_i2h_msg_u *msg;
+
+ msg = (union bfi_iocfc_i2h_msg_u *) m;
+ bfa_trc(bfa, msg->mh.msg_id);
+
+ switch (msg->mh.msg_id) {
+ case BFI_IOCFC_I2H_CFG_REPLY:
+ bfa_iocfc_cfgrsp(bfa);
+ break;
+ case BFI_IOCFC_I2H_UPDATEQ_RSP:
+ iocfc->updateq_cbfn(iocfc->updateq_cbarg, BFA_STATUS_OK);
+ break;
+ case BFI_IOCFC_I2H_ADDR_MSG:
+ bfa_iocfc_process_faa_addr(bfa,
+ (struct bfi_faa_addr_msg_s *)msg);
+ break;
+ case BFI_IOCFC_I2H_FAA_QUERY_RSP:
+ bfa_faa_query_reply(iocfc, (bfi_faa_query_rsp_t *)msg);
+ break;
+ default:
+ WARN_ON(1);
+ }
+}
+
+void
+bfa_iocfc_get_attr(struct bfa_s *bfa, struct bfa_iocfc_attr_s *attr)
+{
+ struct bfa_iocfc_s *iocfc = &bfa->iocfc;
+
+ attr->intr_attr.coalesce = iocfc->cfginfo->intr_attr.coalesce;
+
+ attr->intr_attr.delay = iocfc->cfginfo->intr_attr.delay ?
+ be16_to_cpu(iocfc->cfginfo->intr_attr.delay) :
+ be16_to_cpu(iocfc->cfgrsp->intr_attr.delay);
+
+ attr->intr_attr.latency = iocfc->cfginfo->intr_attr.latency ?
+ be16_to_cpu(iocfc->cfginfo->intr_attr.latency) :
+ be16_to_cpu(iocfc->cfgrsp->intr_attr.latency);
+
+ attr->config = iocfc->cfg;
+}
+
+bfa_status_t
+bfa_iocfc_israttr_set(struct bfa_s *bfa, struct bfa_iocfc_intr_attr_s *attr)
+{
+ struct bfa_iocfc_s *iocfc = &bfa->iocfc;
+ struct bfi_iocfc_set_intr_req_s *m;
+
+ iocfc->cfginfo->intr_attr.coalesce = attr->coalesce;
+ iocfc->cfginfo->intr_attr.delay = cpu_to_be16(attr->delay);
+ iocfc->cfginfo->intr_attr.latency = cpu_to_be16(attr->latency);
+
+ if (!bfa_iocfc_is_operational(bfa))
+ return BFA_STATUS_OK;
+
+ m = bfa_reqq_next(bfa, BFA_REQQ_IOC);
+ if (!m)
+ return BFA_STATUS_DEVBUSY;
+
+ bfi_h2i_set(m->mh, BFI_MC_IOCFC, BFI_IOCFC_H2I_SET_INTR_REQ,
+ bfa_fn_lpu(bfa));
+ m->coalesce = iocfc->cfginfo->intr_attr.coalesce;
+ m->delay = iocfc->cfginfo->intr_attr.delay;
+ m->latency = iocfc->cfginfo->intr_attr.latency;
+
+ bfa_trc(bfa, attr->delay);
+ bfa_trc(bfa, attr->latency);
+
+ bfa_reqq_produce(bfa, BFA_REQQ_IOC, m->mh);
+ return BFA_STATUS_OK;
+}
+
+void
+bfa_iocfc_set_snsbase(struct bfa_s *bfa, int seg_no, u64 snsbase_pa)
+{
+ struct bfa_iocfc_s *iocfc = &bfa->iocfc;
+
+ iocfc->cfginfo->sense_buf_len = (BFI_IOIM_SNSLEN - 1);
+ bfa_dma_be_addr_set(iocfc->cfginfo->ioim_snsbase[seg_no], snsbase_pa);
+}
+/*
+ * Enable IOC after it is disabled.
+ */
+void
+bfa_iocfc_enable(struct bfa_s *bfa)
+{
+ bfa_plog_str(bfa->plog, BFA_PL_MID_HAL, BFA_PL_EID_MISC, 0,
+ "IOC Enable");
+ bfa->iocfc.cb_reqd = BFA_TRUE;
+ bfa_fsm_send_event(&bfa->iocfc, IOCFC_E_ENABLE);
+}
+
+void
+bfa_iocfc_disable(struct bfa_s *bfa)
+{
+ bfa_plog_str(bfa->plog, BFA_PL_MID_HAL, BFA_PL_EID_MISC, 0,
+ "IOC Disable");
+
+ bfa_fsm_send_event(&bfa->iocfc, IOCFC_E_DISABLE);
+}
+
+bfa_boolean_t
+bfa_iocfc_is_operational(struct bfa_s *bfa)
+{
+ return bfa_ioc_is_operational(&bfa->ioc) &&
+ bfa_fsm_cmp_state(&bfa->iocfc, bfa_iocfc_sm_operational);
+}
+
+/*
+ * Return boot target port wwns -- read from boot information in flash.
+ */
+void
+bfa_iocfc_get_bootwwns(struct bfa_s *bfa, u8 *nwwns, wwn_t *wwns)
+{
+ struct bfa_iocfc_s *iocfc = &bfa->iocfc;
+ struct bfi_iocfc_cfgrsp_s *cfgrsp = iocfc->cfgrsp;
+ int i;
+
+ if (cfgrsp->pbc_cfg.boot_enabled && cfgrsp->pbc_cfg.nbluns) {
+ bfa_trc(bfa, cfgrsp->pbc_cfg.nbluns);
+ *nwwns = cfgrsp->pbc_cfg.nbluns;
+ for (i = 0; i < cfgrsp->pbc_cfg.nbluns; i++)
+ wwns[i] = cfgrsp->pbc_cfg.blun[i].tgt_pwwn;
+
+ return;
+ }
+
+ *nwwns = cfgrsp->bootwwns.nwwns;
+ memcpy(wwns, cfgrsp->bootwwns.wwn, sizeof(cfgrsp->bootwwns.wwn));
+}
+
+int
+bfa_iocfc_get_pbc_vports(struct bfa_s *bfa, struct bfi_pbc_vport_s *pbc_vport)
+{
+ struct bfa_iocfc_s *iocfc = &bfa->iocfc;
+ struct bfi_iocfc_cfgrsp_s *cfgrsp = iocfc->cfgrsp;
+
+ memcpy(pbc_vport, cfgrsp->pbc_cfg.vport, sizeof(cfgrsp->pbc_cfg.vport));
+ return cfgrsp->pbc_cfg.nvports;
+}
+
+
+/*
+ * Use this function query the memory requirement of the BFA library.
+ * This function needs to be called before bfa_attach() to get the
+ * memory required of the BFA layer for a given driver configuration.
+ *
+ * This call will fail, if the cap is out of range compared to pre-defined
+ * values within the BFA library
+ *
+ * @param[in] cfg - pointer to bfa_ioc_cfg_t. Driver layer should indicate
+ * its configuration in this structure.
+ * The default values for struct bfa_iocfc_cfg_s can be
+ * fetched using bfa_cfg_get_default() API.
+ *
+ * If cap's boundary check fails, the library will use
+ * the default bfa_cap_t values (and log a warning msg).
+ *
+ * @param[out] meminfo - pointer to bfa_meminfo_t. This content
+ * indicates the memory type (see bfa_mem_type_t) and
+ * amount of memory required.
+ *
+ * Driver should allocate the memory, populate the
+ * starting address for each block and provide the same
+ * structure as input parameter to bfa_attach() call.
+ *
+ * @param[in] bfa - pointer to the bfa structure, used while fetching the
+ * dma, kva memory information of the bfa sub-modules.
+ *
+ * @return void
+ *
+ * Special Considerations: @note
+ */
+void
+bfa_cfg_get_meminfo(struct bfa_iocfc_cfg_s *cfg, struct bfa_meminfo_s *meminfo,
+ struct bfa_s *bfa)
+{
+ int i;
+ struct bfa_mem_dma_s *port_dma = BFA_MEM_PORT_DMA(bfa);
+ struct bfa_mem_dma_s *ablk_dma = BFA_MEM_ABLK_DMA(bfa);
+ struct bfa_mem_dma_s *cee_dma = BFA_MEM_CEE_DMA(bfa);
+ struct bfa_mem_dma_s *sfp_dma = BFA_MEM_SFP_DMA(bfa);
+ struct bfa_mem_dma_s *flash_dma = BFA_MEM_FLASH_DMA(bfa);
+ struct bfa_mem_dma_s *diag_dma = BFA_MEM_DIAG_DMA(bfa);
+ struct bfa_mem_dma_s *phy_dma = BFA_MEM_PHY_DMA(bfa);
+ struct bfa_mem_dma_s *fru_dma = BFA_MEM_FRU_DMA(bfa);
+
+ WARN_ON((cfg == NULL) || (meminfo == NULL));
+
+ memset((void *)meminfo, 0, sizeof(struct bfa_meminfo_s));
+
+ /* Initialize the DMA & KVA meminfo queues */
+ INIT_LIST_HEAD(&meminfo->dma_info.qe);
+ INIT_LIST_HEAD(&meminfo->kva_info.qe);
+
+ bfa_iocfc_meminfo(cfg, meminfo, bfa);
+
+ for (i = 0; hal_mods[i]; i++)
+ hal_mods[i]->meminfo(cfg, meminfo, bfa);
+
+ /* dma info setup */
+ bfa_mem_dma_setup(meminfo, port_dma, bfa_port_meminfo());
+ bfa_mem_dma_setup(meminfo, ablk_dma, bfa_ablk_meminfo());
+ bfa_mem_dma_setup(meminfo, cee_dma, bfa_cee_meminfo());
+ bfa_mem_dma_setup(meminfo, sfp_dma, bfa_sfp_meminfo());
+ bfa_mem_dma_setup(meminfo, flash_dma,
+ bfa_flash_meminfo(cfg->drvcfg.min_cfg));
+ bfa_mem_dma_setup(meminfo, diag_dma, bfa_diag_meminfo());
+ bfa_mem_dma_setup(meminfo, phy_dma,
+ bfa_phy_meminfo(cfg->drvcfg.min_cfg));
+ bfa_mem_dma_setup(meminfo, fru_dma,
+ bfa_fru_meminfo(cfg->drvcfg.min_cfg));
+}
+
+/*
+ * Use this function to do attach the driver instance with the BFA
+ * library. This function will not trigger any HW initialization
+ * process (which will be done in bfa_init() call)
+ *
+ * This call will fail, if the cap is out of range compared to
+ * pre-defined values within the BFA library
+ *
+ * @param[out] bfa Pointer to bfa_t.
+ * @param[in] bfad Opaque handle back to the driver's IOC structure
+ * @param[in] cfg Pointer to bfa_ioc_cfg_t. Should be same structure
+ * that was used in bfa_cfg_get_meminfo().
+ * @param[in] meminfo Pointer to bfa_meminfo_t. The driver should
+ * use the bfa_cfg_get_meminfo() call to
+ * find the memory blocks required, allocate the
+ * required memory and provide the starting addresses.
+ * @param[in] pcidev pointer to struct bfa_pcidev_s
+ *
+ * @return
+ * void
+ *
+ * Special Considerations:
+ *
+ * @note
+ *
+ */
+void
+bfa_attach(struct bfa_s *bfa, void *bfad, struct bfa_iocfc_cfg_s *cfg,
+ struct bfa_meminfo_s *meminfo, struct bfa_pcidev_s *pcidev)
+{
+ int i;
+ struct bfa_mem_dma_s *dma_info, *dma_elem;
+ struct bfa_mem_kva_s *kva_info, *kva_elem;
+ struct list_head *dm_qe, *km_qe;
+
+ bfa->fcs = BFA_FALSE;
+
+ WARN_ON((cfg == NULL) || (meminfo == NULL));
+
+ /* Initialize memory pointers for iterative allocation */
+ dma_info = &meminfo->dma_info;
+ dma_info->kva_curp = dma_info->kva;
+ dma_info->dma_curp = dma_info->dma;
+
+ kva_info = &meminfo->kva_info;
+ kva_info->kva_curp = kva_info->kva;
+
+ list_for_each(dm_qe, &dma_info->qe) {
+ dma_elem = (struct bfa_mem_dma_s *) dm_qe;
+ dma_elem->kva_curp = dma_elem->kva;
+ dma_elem->dma_curp = dma_elem->dma;
+ }
+
+ list_for_each(km_qe, &kva_info->qe) {
+ kva_elem = (struct bfa_mem_kva_s *) km_qe;
+ kva_elem->kva_curp = kva_elem->kva;
+ }
+
+ bfa_iocfc_attach(bfa, bfad, cfg, pcidev);
+
+ for (i = 0; hal_mods[i]; i++)
+ hal_mods[i]->attach(bfa, bfad, cfg, pcidev);
+
+ bfa_com_port_attach(bfa);
+ bfa_com_ablk_attach(bfa);
+ bfa_com_cee_attach(bfa);
+ bfa_com_sfp_attach(bfa);
+ bfa_com_flash_attach(bfa, cfg->drvcfg.min_cfg);
+ bfa_com_diag_attach(bfa);
+ bfa_com_phy_attach(bfa, cfg->drvcfg.min_cfg);
+ bfa_com_fru_attach(bfa, cfg->drvcfg.min_cfg);
+}
+
+/*
+ * Use this function to delete a BFA IOC. IOC should be stopped (by
+ * calling bfa_stop()) before this function call.
+ *
+ * @param[in] bfa - pointer to bfa_t.
+ *
+ * @return
+ * void
+ *
+ * Special Considerations:
+ *
+ * @note
+ */
+void
+bfa_detach(struct bfa_s *bfa)
+{
+ int i;
+
+ for (i = 0; hal_mods[i]; i++)
+ hal_mods[i]->detach(bfa);
+ bfa_ioc_detach(&bfa->ioc);
+}
+
+void
+bfa_comp_deq(struct bfa_s *bfa, struct list_head *comp_q)
+{
+ INIT_LIST_HEAD(comp_q);
+ list_splice_tail_init(&bfa->comp_q, comp_q);
+}
+
+void
+bfa_comp_process(struct bfa_s *bfa, struct list_head *comp_q)
+{
+ struct list_head *qe;
+ struct list_head *qen;
+ struct bfa_cb_qe_s *hcb_qe;
+ bfa_cb_cbfn_status_t cbfn;
+
+ list_for_each_safe(qe, qen, comp_q) {
+ hcb_qe = (struct bfa_cb_qe_s *) qe;
+ if (hcb_qe->pre_rmv) {
+ /* qe is invalid after return, dequeue before cbfn() */
+ list_del(qe);
+ cbfn = (bfa_cb_cbfn_status_t)(hcb_qe->cbfn);
+ cbfn(hcb_qe->cbarg, hcb_qe->fw_status);
+ } else
+ hcb_qe->cbfn(hcb_qe->cbarg, BFA_TRUE);
+ }
+}
+
+void
+bfa_comp_free(struct bfa_s *bfa, struct list_head *comp_q)
+{
+ struct list_head *qe;
+ struct bfa_cb_qe_s *hcb_qe;
+
+ while (!list_empty(comp_q)) {
+ bfa_q_deq(comp_q, &qe);
+ hcb_qe = (struct bfa_cb_qe_s *) qe;
+ WARN_ON(hcb_qe->pre_rmv);
+ hcb_qe->cbfn(hcb_qe->cbarg, BFA_FALSE);
+ }
+}
+
+/*
+ * Return the list of PCI vendor/device id lists supported by this
+ * BFA instance.
+ */
+void
+bfa_get_pciids(struct bfa_pciid_s **pciids, int *npciids)
+{
+ static struct bfa_pciid_s __pciids[] = {
+ {BFA_PCI_VENDOR_ID_BROCADE, BFA_PCI_DEVICE_ID_FC_8G2P},
+ {BFA_PCI_VENDOR_ID_BROCADE, BFA_PCI_DEVICE_ID_FC_8G1P},
+ {BFA_PCI_VENDOR_ID_BROCADE, BFA_PCI_DEVICE_ID_CT},
+ {BFA_PCI_VENDOR_ID_BROCADE, BFA_PCI_DEVICE_ID_CT_FC},
+ };
+
+ *npciids = sizeof(__pciids) / sizeof(__pciids[0]);
+ *pciids = __pciids;
+}
+
+/*
+ * Use this function query the default struct bfa_iocfc_cfg_s value (compiled
+ * into BFA layer). The OS driver can then turn back and overwrite entries that
+ * have been configured by the user.
+ *
+ * @param[in] cfg - pointer to bfa_ioc_cfg_t
+ *
+ * @return
+ * void
+ *
+ * Special Considerations:
+ * note
+ */
+void
+bfa_cfg_get_default(struct bfa_iocfc_cfg_s *cfg)
+{
+ cfg->fwcfg.num_fabrics = DEF_CFG_NUM_FABRICS;
+ cfg->fwcfg.num_lports = DEF_CFG_NUM_LPORTS;
+ cfg->fwcfg.num_rports = DEF_CFG_NUM_RPORTS;
+ cfg->fwcfg.num_ioim_reqs = DEF_CFG_NUM_IOIM_REQS;
+ cfg->fwcfg.num_tskim_reqs = DEF_CFG_NUM_TSKIM_REQS;
+ cfg->fwcfg.num_fcxp_reqs = DEF_CFG_NUM_FCXP_REQS;
+ cfg->fwcfg.num_uf_bufs = DEF_CFG_NUM_UF_BUFS;
+ cfg->fwcfg.num_cqs = DEF_CFG_NUM_CQS;
+ cfg->fwcfg.num_fwtio_reqs = 0;
+
+ cfg->drvcfg.num_reqq_elems = DEF_CFG_NUM_REQQ_ELEMS;
+ cfg->drvcfg.num_rspq_elems = DEF_CFG_NUM_RSPQ_ELEMS;
+ cfg->drvcfg.num_sgpgs = DEF_CFG_NUM_SGPGS;
+ cfg->drvcfg.num_sboot_tgts = DEF_CFG_NUM_SBOOT_TGTS;
+ cfg->drvcfg.num_sboot_luns = DEF_CFG_NUM_SBOOT_LUNS;
+ cfg->drvcfg.path_tov = BFA_FCPIM_PATHTOV_DEF;
+ cfg->drvcfg.ioc_recover = BFA_FALSE;
+ cfg->drvcfg.delay_comp = BFA_FALSE;
+
+}
+
+void
+bfa_cfg_get_min(struct bfa_iocfc_cfg_s *cfg)
+{
+ bfa_cfg_get_default(cfg);
+ cfg->fwcfg.num_ioim_reqs = BFA_IOIM_MIN;
+ cfg->fwcfg.num_tskim_reqs = BFA_TSKIM_MIN;
+ cfg->fwcfg.num_fcxp_reqs = BFA_FCXP_MIN;
+ cfg->fwcfg.num_uf_bufs = BFA_UF_MIN;
+ cfg->fwcfg.num_rports = BFA_RPORT_MIN;
+ cfg->fwcfg.num_fwtio_reqs = 0;
+
+ cfg->drvcfg.num_sgpgs = BFA_SGPG_MIN;
+ cfg->drvcfg.num_reqq_elems = BFA_REQQ_NELEMS_MIN;
+ cfg->drvcfg.num_rspq_elems = BFA_RSPQ_NELEMS_MIN;
+ cfg->drvcfg.min_cfg = BFA_TRUE;
+}
diff --git a/drivers/scsi/bfa/bfa_cs.h b/drivers/scsi/bfa/bfa_cs.h
new file mode 100644
index 000000000..91a8aa394
--- /dev/null
+++ b/drivers/scsi/bfa/bfa_cs.h
@@ -0,0 +1,366 @@
+/*
+ * Copyright (c) 2005-2010 Brocade Communications Systems, Inc.
+ * All rights reserved
+ * www.brocade.com
+ *
+ * Linux driver for Brocade Fibre Channel Host Bus Adapter.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License (GPL) Version 2 as
+ * published by the Free Software Foundation
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ */
+
+/*
+ * bfa_cs.h BFA common services
+ */
+
+#ifndef __BFA_CS_H__
+#define __BFA_CS_H__
+
+#include "bfad_drv.h"
+
+/*
+ * BFA TRC
+ */
+
+#ifndef BFA_TRC_MAX
+#define BFA_TRC_MAX (4 * 1024)
+#endif
+
+#define BFA_TRC_TS(_trcm) \
+ ({ \
+ struct timeval tv; \
+ \
+ do_gettimeofday(&tv); \
+ (tv.tv_sec*1000000+tv.tv_usec); \
+ })
+
+#ifndef BFA_TRC_TS
+#define BFA_TRC_TS(_trcm) ((_trcm)->ticks++)
+#endif
+
+struct bfa_trc_s {
+#ifdef __BIG_ENDIAN
+ u16 fileno;
+ u16 line;
+#else
+ u16 line;
+ u16 fileno;
+#endif
+ u32 timestamp;
+ union {
+ struct {
+ u32 rsvd;
+ u32 u32;
+ } u32;
+ u64 u64;
+ } data;
+};
+
+struct bfa_trc_mod_s {
+ u32 head;
+ u32 tail;
+ u32 ntrc;
+ u32 stopped;
+ u32 ticks;
+ u32 rsvd[3];
+ struct bfa_trc_s trc[BFA_TRC_MAX];
+};
+
+enum {
+ BFA_TRC_HAL = 1, /* BFA modules */
+ BFA_TRC_FCS = 2, /* BFA FCS modules */
+ BFA_TRC_LDRV = 3, /* Linux driver modules */
+ BFA_TRC_CNA = 4, /* Common modules */
+};
+#define BFA_TRC_MOD_SH 10
+#define BFA_TRC_MOD(__mod) ((BFA_TRC_ ## __mod) << BFA_TRC_MOD_SH)
+
+/*
+ * Define a new tracing file (module). Module should match one defined above.
+ */
+#define BFA_TRC_FILE(__mod, __submod) \
+ static int __trc_fileno = ((BFA_TRC_ ## __mod ## _ ## __submod) | \
+ BFA_TRC_MOD(__mod))
+
+
+#define bfa_trc32(_trcp, _data) \
+ __bfa_trc((_trcp)->trcmod, __trc_fileno, __LINE__, (u32)_data)
+#define bfa_trc(_trcp, _data) \
+ __bfa_trc((_trcp)->trcmod, __trc_fileno, __LINE__, (u64)_data)
+
+static inline void
+bfa_trc_init(struct bfa_trc_mod_s *trcm)
+{
+ trcm->head = trcm->tail = trcm->stopped = 0;
+ trcm->ntrc = BFA_TRC_MAX;
+}
+
+static inline void
+bfa_trc_stop(struct bfa_trc_mod_s *trcm)
+{
+ trcm->stopped = 1;
+}
+
+static inline void
+__bfa_trc(struct bfa_trc_mod_s *trcm, int fileno, int line, u64 data)
+{
+ int tail = trcm->tail;
+ struct bfa_trc_s *trc = &trcm->trc[tail];
+
+ if (trcm->stopped)
+ return;
+
+ trc->fileno = (u16) fileno;
+ trc->line = (u16) line;
+ trc->data.u64 = data;
+ trc->timestamp = BFA_TRC_TS(trcm);
+
+ trcm->tail = (trcm->tail + 1) & (BFA_TRC_MAX - 1);
+ if (trcm->tail == trcm->head)
+ trcm->head = (trcm->head + 1) & (BFA_TRC_MAX - 1);
+}
+
+
+static inline void
+__bfa_trc32(struct bfa_trc_mod_s *trcm, int fileno, int line, u32 data)
+{
+ int tail = trcm->tail;
+ struct bfa_trc_s *trc = &trcm->trc[tail];
+
+ if (trcm->stopped)
+ return;
+
+ trc->fileno = (u16) fileno;
+ trc->line = (u16) line;
+ trc->data.u32.u32 = data;
+ trc->timestamp = BFA_TRC_TS(trcm);
+
+ trcm->tail = (trcm->tail + 1) & (BFA_TRC_MAX - 1);
+ if (trcm->tail == trcm->head)
+ trcm->head = (trcm->head + 1) & (BFA_TRC_MAX - 1);
+}
+
+#define bfa_sm_fault(__mod, __event) do { \
+ bfa_trc(__mod, (((u32)0xDEAD << 16) | __event)); \
+ printk(KERN_ERR "Assertion failure: %s:%d: %d", \
+ __FILE__, __LINE__, (__event)); \
+} while (0)
+
+/* BFA queue definitions */
+#define bfa_q_first(_q) ((void *)(((struct list_head *) (_q))->next))
+#define bfa_q_next(_qe) (((struct list_head *) (_qe))->next)
+#define bfa_q_prev(_qe) (((struct list_head *) (_qe))->prev)
+
+/*
+ * bfa_q_qe_init - to initialize a queue element
+ */
+#define bfa_q_qe_init(_qe) { \
+ bfa_q_next(_qe) = (struct list_head *) NULL; \
+ bfa_q_prev(_qe) = (struct list_head *) NULL; \
+}
+
+/*
+ * bfa_q_deq - dequeue an element from head of the queue
+ */
+#define bfa_q_deq(_q, _qe) do { \
+ if (!list_empty(_q)) { \
+ (*((struct list_head **) (_qe))) = bfa_q_next(_q); \
+ bfa_q_prev(bfa_q_next(*((struct list_head **) _qe))) = \
+ (struct list_head *) (_q); \
+ bfa_q_next(_q) = bfa_q_next(*((struct list_head **) _qe));\
+ } else { \
+ *((struct list_head **) (_qe)) = (struct list_head *) NULL;\
+ } \
+} while (0)
+
+/*
+ * bfa_q_deq_tail - dequeue an element from tail of the queue
+ */
+#define bfa_q_deq_tail(_q, _qe) { \
+ if (!list_empty(_q)) { \
+ *((struct list_head **) (_qe)) = bfa_q_prev(_q); \
+ bfa_q_next(bfa_q_prev(*((struct list_head **) _qe))) = \
+ (struct list_head *) (_q); \
+ bfa_q_prev(_q) = bfa_q_prev(*(struct list_head **) _qe);\
+ } else { \
+ *((struct list_head **) (_qe)) = (struct list_head *) NULL;\
+ } \
+}
+
+static inline int
+bfa_q_is_on_q_func(struct list_head *q, struct list_head *qe)
+{
+ struct list_head *tqe;
+
+ tqe = bfa_q_next(q);
+ while (tqe != q) {
+ if (tqe == qe)
+ return 1;
+ tqe = bfa_q_next(tqe);
+ if (tqe == NULL)
+ break;
+ }
+ return 0;
+}
+
+#define bfa_q_is_on_q(_q, _qe) \
+ bfa_q_is_on_q_func(_q, (struct list_head *)(_qe))
+
+/*
+ * @ BFA state machine interfaces
+ */
+
+typedef void (*bfa_sm_t)(void *sm, int event);
+
+/*
+ * oc - object class eg. bfa_ioc
+ * st - state, eg. reset
+ * otype - object type, eg. struct bfa_ioc_s
+ * etype - object type, eg. enum ioc_event
+ */
+#define bfa_sm_state_decl(oc, st, otype, etype) \
+ static void oc ## _sm_ ## st(otype * fsm, etype event)
+
+#define bfa_sm_set_state(_sm, _state) ((_sm)->sm = (bfa_sm_t)(_state))
+#define bfa_sm_send_event(_sm, _event) ((_sm)->sm((_sm), (_event)))
+#define bfa_sm_get_state(_sm) ((_sm)->sm)
+#define bfa_sm_cmp_state(_sm, _state) ((_sm)->sm == (bfa_sm_t)(_state))
+
+/*
+ * For converting from state machine function to state encoding.
+ */
+struct bfa_sm_table_s {
+ bfa_sm_t sm; /* state machine function */
+ int state; /* state machine encoding */
+ char *name; /* state name for display */
+};
+#define BFA_SM(_sm) ((bfa_sm_t)(_sm))
+
+/*
+ * State machine with entry actions.
+ */
+typedef void (*bfa_fsm_t)(void *fsm, int event);
+
+/*
+ * oc - object class eg. bfa_ioc
+ * st - state, eg. reset
+ * otype - object type, eg. struct bfa_ioc_s
+ * etype - object type, eg. enum ioc_event
+ */
+#define bfa_fsm_state_decl(oc, st, otype, etype) \
+ static void oc ## _sm_ ## st(otype * fsm, etype event); \
+ static void oc ## _sm_ ## st ## _entry(otype * fsm)
+
+#define bfa_fsm_set_state(_fsm, _state) do { \
+ (_fsm)->fsm = (bfa_fsm_t)(_state); \
+ _state ## _entry(_fsm); \
+} while (0)
+
+#define bfa_fsm_send_event(_fsm, _event) ((_fsm)->fsm((_fsm), (_event)))
+#define bfa_fsm_get_state(_fsm) ((_fsm)->fsm)
+#define bfa_fsm_cmp_state(_fsm, _state) \
+ ((_fsm)->fsm == (bfa_fsm_t)(_state))
+
+static inline int
+bfa_sm_to_state(struct bfa_sm_table_s *smt, bfa_sm_t sm)
+{
+ int i = 0;
+
+ while (smt[i].sm && smt[i].sm != sm)
+ i++;
+ return smt[i].state;
+}
+
+/*
+ * @ Generic wait counter.
+ */
+
+typedef void (*bfa_wc_resume_t) (void *cbarg);
+
+struct bfa_wc_s {
+ bfa_wc_resume_t wc_resume;
+ void *wc_cbarg;
+ int wc_count;
+};
+
+static inline void
+bfa_wc_up(struct bfa_wc_s *wc)
+{
+ wc->wc_count++;
+}
+
+static inline void
+bfa_wc_down(struct bfa_wc_s *wc)
+{
+ wc->wc_count--;
+ if (wc->wc_count == 0)
+ wc->wc_resume(wc->wc_cbarg);
+}
+
+/*
+ * Initialize a waiting counter.
+ */
+static inline void
+bfa_wc_init(struct bfa_wc_s *wc, bfa_wc_resume_t wc_resume, void *wc_cbarg)
+{
+ wc->wc_resume = wc_resume;
+ wc->wc_cbarg = wc_cbarg;
+ wc->wc_count = 0;
+ bfa_wc_up(wc);
+}
+
+/*
+ * Wait for counter to reach zero
+ */
+static inline void
+bfa_wc_wait(struct bfa_wc_s *wc)
+{
+ bfa_wc_down(wc);
+}
+
+static inline void
+wwn2str(char *wwn_str, u64 wwn)
+{
+ union {
+ u64 wwn;
+ u8 byte[8];
+ } w;
+
+ w.wwn = wwn;
+ sprintf(wwn_str, "%02x:%02x:%02x:%02x:%02x:%02x:%02x:%02x", w.byte[0],
+ w.byte[1], w.byte[2], w.byte[3], w.byte[4], w.byte[5],
+ w.byte[6], w.byte[7]);
+}
+
+static inline void
+fcid2str(char *fcid_str, u32 fcid)
+{
+ union {
+ u32 fcid;
+ u8 byte[4];
+ } f;
+
+ f.fcid = fcid;
+ sprintf(fcid_str, "%02x:%02x:%02x", f.byte[1], f.byte[2], f.byte[3]);
+}
+
+#define bfa_swap_3b(_x) \
+ ((((_x) & 0xff) << 16) | \
+ ((_x) & 0x00ff00) | \
+ (((_x) & 0xff0000) >> 16))
+
+#ifndef __BIG_ENDIAN
+#define bfa_hton3b(_x) bfa_swap_3b(_x)
+#else
+#define bfa_hton3b(_x) (_x)
+#endif
+
+#define bfa_ntoh3b(_x) bfa_hton3b(_x)
+
+#endif /* __BFA_CS_H__ */
diff --git a/drivers/scsi/bfa/bfa_defs.h b/drivers/scsi/bfa/bfa_defs.h
new file mode 100644
index 000000000..877b86dd2
--- /dev/null
+++ b/drivers/scsi/bfa/bfa_defs.h
@@ -0,0 +1,1287 @@
+/*
+ * Copyright (c) 2005-2010 Brocade Communications Systems, Inc.
+ * All rights reserved
+ * www.brocade.com
+ *
+ * Linux driver for Brocade Fibre Channel Host Bus Adapter.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License (GPL) Version 2 as
+ * published by the Free Software Foundation
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ */
+
+#ifndef __BFA_DEFS_H__
+#define __BFA_DEFS_H__
+
+#include "bfa_fc.h"
+#include "bfad_drv.h"
+
+#define BFA_MFG_SERIALNUM_SIZE 11
+#define STRSZ(_n) (((_n) + 4) & ~3)
+
+/*
+ * Manufacturing card type
+ */
+enum {
+ BFA_MFG_TYPE_CB_MAX = 825, /* Crossbow card type max */
+ BFA_MFG_TYPE_FC8P2 = 825, /* 8G 2port FC card */
+ BFA_MFG_TYPE_FC8P1 = 815, /* 8G 1port FC card */
+ BFA_MFG_TYPE_FC4P2 = 425, /* 4G 2port FC card */
+ BFA_MFG_TYPE_FC4P1 = 415, /* 4G 1port FC card */
+ BFA_MFG_TYPE_CNA10P2 = 1020, /* 10G 2port CNA card */
+ BFA_MFG_TYPE_CNA10P1 = 1010, /* 10G 1port CNA card */
+ BFA_MFG_TYPE_JAYHAWK = 804, /* Jayhawk mezz card */
+ BFA_MFG_TYPE_WANCHESE = 1007, /* Wanchese mezz card */
+ BFA_MFG_TYPE_ASTRA = 807, /* Astra mezz card */
+ BFA_MFG_TYPE_LIGHTNING_P0 = 902, /* Lightning mezz card - old */
+ BFA_MFG_TYPE_LIGHTNING = 1741, /* Lightning mezz card */
+ BFA_MFG_TYPE_PROWLER_F = 1560, /* Prowler FC only cards */
+ BFA_MFG_TYPE_PROWLER_N = 1410, /* Prowler NIC only cards */
+ BFA_MFG_TYPE_PROWLER_C = 1710, /* Prowler CNA only cards */
+ BFA_MFG_TYPE_PROWLER_D = 1860, /* Prowler Dual cards */
+ BFA_MFG_TYPE_CHINOOK = 1867, /* Chinook cards */
+ BFA_MFG_TYPE_CHINOOK2 = 1869, /*!< Chinook2 cards */
+ BFA_MFG_TYPE_INVALID = 0, /* Invalid card type */
+};
+
+#pragma pack(1)
+
+/*
+ * Check if Mezz card
+ */
+#define bfa_mfg_is_mezz(type) (( \
+ (type) == BFA_MFG_TYPE_JAYHAWK || \
+ (type) == BFA_MFG_TYPE_WANCHESE || \
+ (type) == BFA_MFG_TYPE_ASTRA || \
+ (type) == BFA_MFG_TYPE_LIGHTNING_P0 || \
+ (type) == BFA_MFG_TYPE_LIGHTNING || \
+ (type) == BFA_MFG_TYPE_CHINOOK || \
+ (type) == BFA_MFG_TYPE_CHINOOK2))
+
+/*
+ * Check if the card having old wwn/mac handling
+ */
+#define bfa_mfg_is_old_wwn_mac_model(type) (( \
+ (type) == BFA_MFG_TYPE_FC8P2 || \
+ (type) == BFA_MFG_TYPE_FC8P1 || \
+ (type) == BFA_MFG_TYPE_FC4P2 || \
+ (type) == BFA_MFG_TYPE_FC4P1 || \
+ (type) == BFA_MFG_TYPE_CNA10P2 || \
+ (type) == BFA_MFG_TYPE_CNA10P1 || \
+ (type) == BFA_MFG_TYPE_JAYHAWK || \
+ (type) == BFA_MFG_TYPE_WANCHESE))
+
+#define bfa_mfg_increment_wwn_mac(m, i) \
+do { \
+ u32 t = ((u32)(m)[0] << 16) | ((u32)(m)[1] << 8) | \
+ (u32)(m)[2]; \
+ t += (i); \
+ (m)[0] = (t >> 16) & 0xFF; \
+ (m)[1] = (t >> 8) & 0xFF; \
+ (m)[2] = t & 0xFF; \
+} while (0)
+
+/*
+ * VPD data length
+ */
+#define BFA_MFG_VPD_LEN 512
+
+/*
+ * VPD vendor tag
+ */
+enum {
+ BFA_MFG_VPD_UNKNOWN = 0, /* vendor unknown */
+ BFA_MFG_VPD_IBM = 1, /* vendor IBM */
+ BFA_MFG_VPD_HP = 2, /* vendor HP */
+ BFA_MFG_VPD_DELL = 3, /* vendor DELL */
+ BFA_MFG_VPD_PCI_IBM = 0x08, /* PCI VPD IBM */
+ BFA_MFG_VPD_PCI_HP = 0x10, /* PCI VPD HP */
+ BFA_MFG_VPD_PCI_DELL = 0x20, /* PCI VPD DELL */
+ BFA_MFG_VPD_PCI_BRCD = 0xf8, /* PCI VPD Brocade */
+};
+
+/*
+ * All numerical fields are in big-endian format.
+ */
+struct bfa_mfg_vpd_s {
+ u8 version; /* vpd data version */
+ u8 vpd_sig[3]; /* characters 'V', 'P', 'D' */
+ u8 chksum; /* u8 checksum */
+ u8 vendor; /* vendor */
+ u8 len; /* vpd data length excluding header */
+ u8 rsv;
+ u8 data[BFA_MFG_VPD_LEN]; /* vpd data */
+};
+
+#pragma pack()
+
+/*
+ * Status return values
+ */
+enum bfa_status {
+ BFA_STATUS_OK = 0, /* Success */
+ BFA_STATUS_FAILED = 1, /* Operation failed */
+ BFA_STATUS_EINVAL = 2, /* Invalid params Check input
+ * parameters */
+ BFA_STATUS_ENOMEM = 3, /* Out of resources */
+ BFA_STATUS_ETIMER = 5, /* Timer expired - Retry, if persists,
+ * contact support */
+ BFA_STATUS_EPROTOCOL = 6, /* Protocol error */
+ BFA_STATUS_BADFLASH = 9, /* Flash is bad */
+ BFA_STATUS_SFP_UNSUPP = 10, /* Unsupported SFP - Replace SFP */
+ BFA_STATUS_UNKNOWN_VFID = 11, /* VF_ID not found */
+ BFA_STATUS_DATACORRUPTED = 12, /* Diag returned data corrupted */
+ BFA_STATUS_DEVBUSY = 13, /* Device busy - Retry operation */
+ BFA_STATUS_HDMA_FAILED = 16, /* Host dma failed contact support */
+ BFA_STATUS_FLASH_BAD_LEN = 17, /* Flash bad length */
+ BFA_STATUS_UNKNOWN_LWWN = 18, /* LPORT PWWN not found */
+ BFA_STATUS_UNKNOWN_RWWN = 19, /* RPORT PWWN not found */
+ BFA_STATUS_VPORT_EXISTS = 21, /* VPORT already exists */
+ BFA_STATUS_VPORT_MAX = 22, /* Reached max VPORT supported limit */
+ BFA_STATUS_UNSUPP_SPEED = 23, /* Invalid Speed Check speed setting */
+ BFA_STATUS_INVLD_DFSZ = 24, /* Invalid Max data field size */
+ BFA_STATUS_CMD_NOTSUPP = 26, /* Command/API not supported */
+ BFA_STATUS_FABRIC_RJT = 29, /* Reject from attached fabric */
+ BFA_STATUS_UNKNOWN_VWWN = 30, /* VPORT PWWN not found */
+ BFA_STATUS_PORT_OFFLINE = 34, /* Port is not online */
+ BFA_STATUS_VPORT_WWN_BP = 46, /* WWN is same as base port's WWN */
+ BFA_STATUS_PORT_NOT_DISABLED = 47, /* Port not disabled disable port */
+ BFA_STATUS_NO_FCPIM_NEXUS = 52, /* No FCP Nexus exists with the rport */
+ BFA_STATUS_IOC_FAILURE = 56, /* IOC failure - Retry, if persists
+ * contact support */
+ BFA_STATUS_INVALID_WWN = 57, /* Invalid WWN */
+ BFA_STATUS_ADAPTER_ENABLED = 60, /* Adapter is not disabled */
+ BFA_STATUS_IOC_NON_OP = 61, /* IOC is not operational */
+ BFA_STATUS_VERSION_FAIL = 70, /* Application/Driver version mismatch */
+ BFA_STATUS_DIAG_BUSY = 71, /* diag busy */
+ BFA_STATUS_BEACON_ON = 72, /* Port Beacon already on */
+ BFA_STATUS_ENOFSAVE = 78, /* No saved firmware trace */
+ BFA_STATUS_IOC_DISABLED = 82, /* IOC is already disabled */
+ BFA_STATUS_ERROR_TRL_ENABLED = 87, /* TRL is enabled */
+ BFA_STATUS_ERROR_QOS_ENABLED = 88, /* QoS is enabled */
+ BFA_STATUS_NO_SFP_DEV = 89, /* No SFP device check or replace SFP */
+ BFA_STATUS_MEMTEST_FAILED = 90, /* Memory test failed contact support */
+ BFA_STATUS_LEDTEST_OP = 109, /* LED test is operating */
+ BFA_STATUS_INVALID_MAC = 134, /* Invalid MAC address */
+ BFA_STATUS_CMD_NOTSUPP_CNA = 146, /* Command not supported for CNA */
+ BFA_STATUS_PBC = 154, /* Operation not allowed for pre-boot
+ * configuration */
+ BFA_STATUS_BAD_FWCFG = 156, /* Bad firmware configuration */
+ BFA_STATUS_INVALID_VENDOR = 158, /* Invalid switch vendor */
+ BFA_STATUS_SFP_NOT_READY = 159, /* SFP info is not ready. Retry */
+ BFA_STATUS_TRUNK_ENABLED = 164, /* Trunk is already enabled on
+ * this adapter */
+ BFA_STATUS_TRUNK_DISABLED = 165, /* Trunking is disabled on
+ * the adapter */
+ BFA_STATUS_IOPROFILE_OFF = 175, /* IO profile OFF */
+ BFA_STATUS_PHY_NOT_PRESENT = 183, /* PHY module not present */
+ BFA_STATUS_FEATURE_NOT_SUPPORTED = 192, /* Feature not supported */
+ BFA_STATUS_ENTRY_EXISTS = 193, /* Entry already exists */
+ BFA_STATUS_ENTRY_NOT_EXISTS = 194, /* Entry does not exist */
+ BFA_STATUS_NO_CHANGE = 195, /* Feature already in that state */
+ BFA_STATUS_FAA_ENABLED = 197, /* FAA is already enabled */
+ BFA_STATUS_FAA_DISABLED = 198, /* FAA is already disabled */
+ BFA_STATUS_FAA_ACQUIRED = 199, /* FAA is already acquired */
+ BFA_STATUS_FAA_ACQ_ADDR = 200, /* Acquiring addr */
+ BFA_STATUS_BBCR_FC_ONLY = 201, /*!< BBCredit Recovery is supported for *
+ * FC mode only */
+ BFA_STATUS_ERROR_TRUNK_ENABLED = 203, /* Trunk enabled on adapter */
+ BFA_STATUS_MAX_ENTRY_REACHED = 212, /* MAX entry reached */
+ BFA_STATUS_TOPOLOGY_LOOP = 230, /* Topology is set to Loop */
+ BFA_STATUS_LOOP_UNSUPP_MEZZ = 231, /* Loop topology is not supported
+ * on mezz cards */
+ BFA_STATUS_INVALID_BW = 233, /* Invalid bandwidth value */
+ BFA_STATUS_QOS_BW_INVALID = 234, /* Invalid QOS bandwidth
+ * configuration */
+ BFA_STATUS_DPORT_ENABLED = 235, /* D-port mode is already enabled */
+ BFA_STATUS_DPORT_DISABLED = 236, /* D-port mode is already disabled */
+ BFA_STATUS_CMD_NOTSUPP_MEZZ = 239, /* Cmd not supported for MEZZ card */
+ BFA_STATUS_FRU_NOT_PRESENT = 240, /* fru module not present */
+ BFA_STATUS_DPORT_NO_SFP = 243, /* SFP is not present.\n D-port will be
+ * enabled but it will be operational
+ * only after inserting a valid SFP. */
+ BFA_STATUS_DPORT_ERR = 245, /* D-port mode is enabled */
+ BFA_STATUS_DPORT_ENOSYS = 254, /* Switch has no D_Port functionality */
+ BFA_STATUS_DPORT_CANT_PERF = 255, /* Switch port is not D_Port capable
+ * or D_Port is disabled */
+ BFA_STATUS_DPORT_LOGICALERR = 256, /* Switch D_Port fail */
+ BFA_STATUS_DPORT_SWBUSY = 257, /* Switch port busy */
+ BFA_STATUS_ERR_BBCR_SPEED_UNSUPPORT = 258, /*!< BB credit recovery is
+ * supported at max port speed alone */
+ BFA_STATUS_ERROR_BBCR_ENABLED = 259, /*!< BB credit recovery
+ * is enabled */
+ BFA_STATUS_INVALID_BBSCN = 260, /*!< Invalid BBSCN value.
+ * Valid range is [1-15] */
+ BFA_STATUS_DDPORT_ERR = 261, /* Dynamic D_Port mode is active.\n To
+ * exit dynamic mode, disable D_Port on
+ * the remote port */
+ BFA_STATUS_DPORT_SFPWRAP_ERR = 262, /* Clear e/o_wrap fail, check or
+ * replace SFP */
+ BFA_STATUS_BBCR_CFG_NO_CHANGE = 265, /*!< BBCR is operational.
+ * Disable BBCR and try this operation again. */
+ BFA_STATUS_DPORT_SW_NOTREADY = 268, /* Remote port is not ready to
+ * start dport test. Check remote
+ * port status. */
+ BFA_STATUS_DPORT_INV_SFP = 271, /* Invalid SFP for D-PORT mode. */
+ BFA_STATUS_DPORT_CMD_NOTSUPP = 273, /* Dport is not supported by
+ * remote port */
+ BFA_STATUS_MAX_VAL /* Unknown error code */
+};
+#define bfa_status_t enum bfa_status
+
+enum bfa_eproto_status {
+ BFA_EPROTO_BAD_ACCEPT = 0,
+ BFA_EPROTO_UNKNOWN_RSP = 1
+};
+#define bfa_eproto_status_t enum bfa_eproto_status
+
+enum bfa_boolean {
+ BFA_FALSE = 0,
+ BFA_TRUE = 1
+};
+#define bfa_boolean_t enum bfa_boolean
+
+#define BFA_STRING_32 32
+#define BFA_VERSION_LEN 64
+
+/*
+ * ---------------------- adapter definitions ------------
+ */
+
+/*
+ * BFA adapter level attributes.
+ */
+enum {
+ BFA_ADAPTER_SERIAL_NUM_LEN = STRSZ(BFA_MFG_SERIALNUM_SIZE),
+ /*
+ *!< adapter serial num length
+ */
+ BFA_ADAPTER_MODEL_NAME_LEN = 16, /* model name length */
+ BFA_ADAPTER_MODEL_DESCR_LEN = 128, /* model description length */
+ BFA_ADAPTER_MFG_NAME_LEN = 8, /* manufacturer name length */
+ BFA_ADAPTER_SYM_NAME_LEN = 64, /* adapter symbolic name length */
+ BFA_ADAPTER_OS_TYPE_LEN = 64, /* adapter os type length */
+ BFA_ADAPTER_UUID_LEN = 16, /* adapter uuid length */
+};
+
+struct bfa_adapter_attr_s {
+ char manufacturer[BFA_ADAPTER_MFG_NAME_LEN];
+ char serial_num[BFA_ADAPTER_SERIAL_NUM_LEN];
+ u32 card_type;
+ char model[BFA_ADAPTER_MODEL_NAME_LEN];
+ char model_descr[BFA_ADAPTER_MODEL_DESCR_LEN];
+ wwn_t pwwn;
+ char node_symname[FC_SYMNAME_MAX];
+ char hw_ver[BFA_VERSION_LEN];
+ char fw_ver[BFA_VERSION_LEN];
+ char optrom_ver[BFA_VERSION_LEN];
+ char os_type[BFA_ADAPTER_OS_TYPE_LEN];
+ struct bfa_mfg_vpd_s vpd;
+ struct mac_s mac;
+
+ u8 nports;
+ u8 max_speed;
+ u8 prototype;
+ char asic_rev;
+
+ u8 pcie_gen;
+ u8 pcie_lanes_orig;
+ u8 pcie_lanes;
+ u8 cna_capable;
+
+ u8 is_mezz;
+ u8 trunk_capable;
+ u8 mfg_day; /* manufacturing day */
+ u8 mfg_month; /* manufacturing month */
+ u16 mfg_year; /* manufacturing year */
+ u16 rsvd;
+ u8 uuid[BFA_ADAPTER_UUID_LEN];
+};
+
+/*
+ * ---------------------- IOC definitions ------------
+ */
+
+enum {
+ BFA_IOC_DRIVER_LEN = 16,
+ BFA_IOC_CHIP_REV_LEN = 8,
+};
+
+/*
+ * Driver and firmware versions.
+ */
+struct bfa_ioc_driver_attr_s {
+ char driver[BFA_IOC_DRIVER_LEN]; /* driver name */
+ char driver_ver[BFA_VERSION_LEN]; /* driver version */
+ char fw_ver[BFA_VERSION_LEN]; /* firmware version */
+ char bios_ver[BFA_VERSION_LEN]; /* bios version */
+ char efi_ver[BFA_VERSION_LEN]; /* EFI version */
+ char ob_ver[BFA_VERSION_LEN]; /* openboot version */
+};
+
+/*
+ * IOC PCI device attributes
+ */
+struct bfa_ioc_pci_attr_s {
+ u16 vendor_id; /* PCI vendor ID */
+ u16 device_id; /* PCI device ID */
+ u16 ssid; /* subsystem ID */
+ u16 ssvid; /* subsystem vendor ID */
+ u32 pcifn; /* PCI device function */
+ u32 rsvd; /* padding */
+ char chip_rev[BFA_IOC_CHIP_REV_LEN]; /* chip revision */
+};
+
+/*
+ * IOC states
+ */
+enum bfa_ioc_state {
+ BFA_IOC_UNINIT = 1, /* IOC is in uninit state */
+ BFA_IOC_RESET = 2, /* IOC is in reset state */
+ BFA_IOC_SEMWAIT = 3, /* Waiting for IOC h/w semaphore */
+ BFA_IOC_HWINIT = 4, /* IOC h/w is being initialized */
+ BFA_IOC_GETATTR = 5, /* IOC is being configured */
+ BFA_IOC_OPERATIONAL = 6, /* IOC is operational */
+ BFA_IOC_INITFAIL = 7, /* IOC hardware failure */
+ BFA_IOC_FAIL = 8, /* IOC heart-beat failure */
+ BFA_IOC_DISABLING = 9, /* IOC is being disabled */
+ BFA_IOC_DISABLED = 10, /* IOC is disabled */
+ BFA_IOC_FWMISMATCH = 11, /* IOC f/w different from drivers */
+ BFA_IOC_ENABLING = 12, /* IOC is being enabled */
+ BFA_IOC_HWFAIL = 13, /* PCI mapping doesn't exist */
+ BFA_IOC_ACQ_ADDR = 14, /* Acquiring addr from fabric */
+};
+
+/*
+ * IOC firmware stats
+ */
+struct bfa_fw_ioc_stats_s {
+ u32 enable_reqs;
+ u32 disable_reqs;
+ u32 get_attr_reqs;
+ u32 dbg_sync;
+ u32 dbg_dump;
+ u32 unknown_reqs;
+};
+
+/*
+ * IOC driver stats
+ */
+struct bfa_ioc_drv_stats_s {
+ u32 ioc_isrs;
+ u32 ioc_enables;
+ u32 ioc_disables;
+ u32 ioc_hbfails;
+ u32 ioc_boots;
+ u32 stats_tmos;
+ u32 hb_count;
+ u32 disable_reqs;
+ u32 enable_reqs;
+ u32 disable_replies;
+ u32 enable_replies;
+ u32 rsvd;
+};
+
+/*
+ * IOC statistics
+ */
+struct bfa_ioc_stats_s {
+ struct bfa_ioc_drv_stats_s drv_stats; /* driver IOC stats */
+ struct bfa_fw_ioc_stats_s fw_stats; /* firmware IOC stats */
+};
+
+enum bfa_ioc_type_e {
+ BFA_IOC_TYPE_FC = 1,
+ BFA_IOC_TYPE_FCoE = 2,
+ BFA_IOC_TYPE_LL = 3,
+};
+
+/*
+ * IOC attributes returned in queries
+ */
+struct bfa_ioc_attr_s {
+ enum bfa_ioc_type_e ioc_type;
+ enum bfa_ioc_state state; /* IOC state */
+ struct bfa_adapter_attr_s adapter_attr; /* HBA attributes */
+ struct bfa_ioc_driver_attr_s driver_attr; /* driver attr */
+ struct bfa_ioc_pci_attr_s pci_attr;
+ u8 port_id; /* port number */
+ u8 port_mode; /* bfa_mode_s */
+ u8 cap_bm; /* capability */
+ u8 port_mode_cfg; /* bfa_mode_s */
+ u8 def_fn; /* 1 if default fn */
+ u8 rsvd[3]; /* 64bit align */
+};
+
+/*
+ * AEN related definitions
+ */
+enum bfa_aen_category {
+ BFA_AEN_CAT_ADAPTER = 1,
+ BFA_AEN_CAT_PORT = 2,
+ BFA_AEN_CAT_LPORT = 3,
+ BFA_AEN_CAT_RPORT = 4,
+ BFA_AEN_CAT_ITNIM = 5,
+ BFA_AEN_CAT_AUDIT = 8,
+ BFA_AEN_CAT_IOC = 9,
+};
+
+/* BFA adapter level events */
+enum bfa_adapter_aen_event {
+ BFA_ADAPTER_AEN_ADD = 1, /* New Adapter found event */
+ BFA_ADAPTER_AEN_REMOVE = 2, /* Adapter removed event */
+};
+
+struct bfa_adapter_aen_data_s {
+ char serial_num[BFA_ADAPTER_SERIAL_NUM_LEN];
+ u32 nports; /* Number of NPorts */
+ wwn_t pwwn; /* WWN of one of its physical port */
+};
+
+/* BFA physical port Level events */
+enum bfa_port_aen_event {
+ BFA_PORT_AEN_ONLINE = 1, /* Physical Port online event */
+ BFA_PORT_AEN_OFFLINE = 2, /* Physical Port offline event */
+ BFA_PORT_AEN_RLIR = 3, /* RLIR event, not supported */
+ BFA_PORT_AEN_SFP_INSERT = 4, /* SFP inserted event */
+ BFA_PORT_AEN_SFP_REMOVE = 5, /* SFP removed event */
+ BFA_PORT_AEN_SFP_POM = 6, /* SFP POM event */
+ BFA_PORT_AEN_ENABLE = 7, /* Physical Port enable event */
+ BFA_PORT_AEN_DISABLE = 8, /* Physical Port disable event */
+ BFA_PORT_AEN_AUTH_ON = 9, /* Physical Port auth success event */
+ BFA_PORT_AEN_AUTH_OFF = 10, /* Physical Port auth fail event */
+ BFA_PORT_AEN_DISCONNECT = 11, /* Physical Port disconnect event */
+ BFA_PORT_AEN_QOS_NEG = 12, /* Base Port QOS negotiation event */
+ BFA_PORT_AEN_FABRIC_NAME_CHANGE = 13, /* Fabric Name/WWN change */
+ BFA_PORT_AEN_SFP_ACCESS_ERROR = 14, /* SFP read error event */
+ BFA_PORT_AEN_SFP_UNSUPPORT = 15, /* Unsupported SFP event */
+};
+
+enum bfa_port_aen_sfp_pom {
+ BFA_PORT_AEN_SFP_POM_GREEN = 1, /* Normal */
+ BFA_PORT_AEN_SFP_POM_AMBER = 2, /* Warning */
+ BFA_PORT_AEN_SFP_POM_RED = 3, /* Critical */
+ BFA_PORT_AEN_SFP_POM_MAX = BFA_PORT_AEN_SFP_POM_RED
+};
+
+struct bfa_port_aen_data_s {
+ wwn_t pwwn; /* WWN of the physical port */
+ wwn_t fwwn; /* WWN of the fabric port */
+ u32 phy_port_num; /* For SFP related events */
+ u16 ioc_type;
+ u16 level; /* Only transitions will be informed */
+ mac_t mac; /* MAC address of the ethernet port */
+ u16 rsvd;
+};
+
+/* BFA AEN logical port events */
+enum bfa_lport_aen_event {
+ BFA_LPORT_AEN_NEW = 1, /* LPort created event */
+ BFA_LPORT_AEN_DELETE = 2, /* LPort deleted event */
+ BFA_LPORT_AEN_ONLINE = 3, /* LPort online event */
+ BFA_LPORT_AEN_OFFLINE = 4, /* LPort offline event */
+ BFA_LPORT_AEN_DISCONNECT = 5, /* LPort disconnect event */
+ BFA_LPORT_AEN_NEW_PROP = 6, /* VPort created event */
+ BFA_LPORT_AEN_DELETE_PROP = 7, /* VPort deleted event */
+ BFA_LPORT_AEN_NEW_STANDARD = 8, /* VPort created event */
+ BFA_LPORT_AEN_DELETE_STANDARD = 9, /* VPort deleted event */
+ BFA_LPORT_AEN_NPIV_DUP_WWN = 10, /* VPort with duplicate WWN */
+ BFA_LPORT_AEN_NPIV_FABRIC_MAX = 11, /* Max NPIV in fabric/fport */
+ BFA_LPORT_AEN_NPIV_UNKNOWN = 12, /* Unknown NPIV Error code */
+};
+
+struct bfa_lport_aen_data_s {
+ u16 vf_id; /* vf_id of this logical port */
+ u16 roles; /* Logical port mode,IM/TM/IP etc */
+ u32 rsvd;
+ wwn_t ppwwn; /* WWN of its physical port */
+ wwn_t lpwwn; /* WWN of this logical port */
+};
+
+/* BFA ITNIM events */
+enum bfa_itnim_aen_event {
+ BFA_ITNIM_AEN_ONLINE = 1, /* Target online */
+ BFA_ITNIM_AEN_OFFLINE = 2, /* Target offline */
+ BFA_ITNIM_AEN_DISCONNECT = 3, /* Target disconnected */
+};
+
+struct bfa_itnim_aen_data_s {
+ u16 vf_id; /* vf_id of the IT nexus */
+ u16 rsvd[3];
+ wwn_t ppwwn; /* WWN of its physical port */
+ wwn_t lpwwn; /* WWN of logical port */
+ wwn_t rpwwn; /* WWN of remote(target) port */
+};
+
+/* BFA audit events */
+enum bfa_audit_aen_event {
+ BFA_AUDIT_AEN_AUTH_ENABLE = 1,
+ BFA_AUDIT_AEN_AUTH_DISABLE = 2,
+ BFA_AUDIT_AEN_FLASH_ERASE = 3,
+ BFA_AUDIT_AEN_FLASH_UPDATE = 4,
+};
+
+struct bfa_audit_aen_data_s {
+ wwn_t pwwn;
+ int partition_inst;
+ int partition_type;
+};
+
+/* BFA IOC level events */
+enum bfa_ioc_aen_event {
+ BFA_IOC_AEN_HBGOOD = 1, /* Heart Beat restore event */
+ BFA_IOC_AEN_HBFAIL = 2, /* Heart Beat failure event */
+ BFA_IOC_AEN_ENABLE = 3, /* IOC enabled event */
+ BFA_IOC_AEN_DISABLE = 4, /* IOC disabled event */
+ BFA_IOC_AEN_FWMISMATCH = 5, /* IOC firmware mismatch */
+ BFA_IOC_AEN_FWCFG_ERROR = 6, /* IOC firmware config error */
+ BFA_IOC_AEN_INVALID_VENDOR = 7,
+ BFA_IOC_AEN_INVALID_NWWN = 8, /* Zero NWWN */
+ BFA_IOC_AEN_INVALID_PWWN = 9 /* Zero PWWN */
+};
+
+struct bfa_ioc_aen_data_s {
+ wwn_t pwwn;
+ u16 ioc_type;
+ mac_t mac;
+};
+
+/*
+ * ---------------------- mfg definitions ------------
+ */
+
+/*
+ * Checksum size
+ */
+#define BFA_MFG_CHKSUM_SIZE 16
+
+#define BFA_MFG_PARTNUM_SIZE 14
+#define BFA_MFG_SUPPLIER_ID_SIZE 10
+#define BFA_MFG_SUPPLIER_PARTNUM_SIZE 20
+#define BFA_MFG_SUPPLIER_SERIALNUM_SIZE 20
+#define BFA_MFG_SUPPLIER_REVISION_SIZE 4
+/*
+ * Initial capability definition
+ */
+#define BFA_MFG_IC_FC 0x01
+#define BFA_MFG_IC_ETH 0x02
+
+/*
+ * Adapter capability mask definition
+ */
+#define BFA_CM_HBA 0x01
+#define BFA_CM_CNA 0x02
+#define BFA_CM_NIC 0x04
+#define BFA_CM_FC16G 0x08
+#define BFA_CM_SRIOV 0x10
+#define BFA_CM_MEZZ 0x20
+
+#pragma pack(1)
+
+/*
+ * All numerical fields are in big-endian format.
+ */
+struct bfa_mfg_block_s {
+ u8 version; /*!< manufacturing block version */
+ u8 mfg_sig[3]; /*!< characters 'M', 'F', 'G' */
+ u16 mfgsize; /*!< mfg block size */
+ u16 u16_chksum; /*!< old u16 checksum */
+ char brcd_serialnum[STRSZ(BFA_MFG_SERIALNUM_SIZE)];
+ char brcd_partnum[STRSZ(BFA_MFG_PARTNUM_SIZE)];
+ u8 mfg_day; /*!< manufacturing day */
+ u8 mfg_month; /*!< manufacturing month */
+ u16 mfg_year; /*!< manufacturing year */
+ wwn_t mfg_wwn; /*!< wwn base for this adapter */
+ u8 num_wwn; /*!< number of wwns assigned */
+ u8 mfg_speeds; /*!< speeds allowed for this adapter */
+ u8 rsv[2];
+ char supplier_id[STRSZ(BFA_MFG_SUPPLIER_ID_SIZE)];
+ char supplier_partnum[STRSZ(BFA_MFG_SUPPLIER_PARTNUM_SIZE)];
+ char supplier_serialnum[STRSZ(BFA_MFG_SUPPLIER_SERIALNUM_SIZE)];
+ char supplier_revision[STRSZ(BFA_MFG_SUPPLIER_REVISION_SIZE)];
+ mac_t mfg_mac; /*!< base mac address */
+ u8 num_mac; /*!< number of mac addresses */
+ u8 rsv2;
+ u32 card_type; /*!< card type */
+ char cap_nic; /*!< capability nic */
+ char cap_cna; /*!< capability cna */
+ char cap_hba; /*!< capability hba */
+ char cap_fc16g; /*!< capability fc 16g */
+ char cap_sriov; /*!< capability sriov */
+ char cap_mezz; /*!< capability mezz */
+ u8 rsv3;
+ u8 mfg_nports; /*!< number of ports */
+ char media[8]; /*!< xfi/xaui */
+ char initial_mode[8]; /*!< initial mode: hba/cna/nic */
+ u8 rsv4[84];
+ u8 md5_chksum[BFA_MFG_CHKSUM_SIZE]; /*!< md5 checksum */
+};
+
+#pragma pack()
+
+/*
+ * ---------------------- pci definitions ------------
+ */
+
+/*
+ * PCI device and vendor ID information
+ */
+enum {
+ BFA_PCI_VENDOR_ID_BROCADE = 0x1657,
+ BFA_PCI_DEVICE_ID_FC_8G2P = 0x13,
+ BFA_PCI_DEVICE_ID_FC_8G1P = 0x17,
+ BFA_PCI_DEVICE_ID_CT = 0x14,
+ BFA_PCI_DEVICE_ID_CT_FC = 0x21,
+ BFA_PCI_DEVICE_ID_CT2 = 0x22,
+ BFA_PCI_DEVICE_ID_CT2_QUAD = 0x23,
+};
+
+#define bfa_asic_id_cb(__d) \
+ ((__d) == BFA_PCI_DEVICE_ID_FC_8G2P || \
+ (__d) == BFA_PCI_DEVICE_ID_FC_8G1P)
+#define bfa_asic_id_ct(__d) \
+ ((__d) == BFA_PCI_DEVICE_ID_CT || \
+ (__d) == BFA_PCI_DEVICE_ID_CT_FC)
+#define bfa_asic_id_ct2(__d) \
+ ((__d) == BFA_PCI_DEVICE_ID_CT2 || \
+ (__d) == BFA_PCI_DEVICE_ID_CT2_QUAD)
+#define bfa_asic_id_ctc(__d) \
+ (bfa_asic_id_ct(__d) || bfa_asic_id_ct2(__d))
+
+/*
+ * PCI sub-system device and vendor ID information
+ */
+enum {
+ BFA_PCI_FCOE_SSDEVICE_ID = 0x14,
+ BFA_PCI_CT2_SSID_FCoE = 0x22,
+ BFA_PCI_CT2_SSID_ETH = 0x23,
+ BFA_PCI_CT2_SSID_FC = 0x24,
+};
+
+/*
+ * Maximum number of device address ranges mapped through different BAR(s)
+ */
+#define BFA_PCI_ACCESS_RANGES 1
+
+/*
+ * Port speed settings. Each specific speed is a bit field. Use multiple
+ * bits to specify speeds to be selected for auto-negotiation.
+ */
+enum bfa_port_speed {
+ BFA_PORT_SPEED_UNKNOWN = 0,
+ BFA_PORT_SPEED_1GBPS = 1,
+ BFA_PORT_SPEED_2GBPS = 2,
+ BFA_PORT_SPEED_4GBPS = 4,
+ BFA_PORT_SPEED_8GBPS = 8,
+ BFA_PORT_SPEED_10GBPS = 10,
+ BFA_PORT_SPEED_16GBPS = 16,
+ BFA_PORT_SPEED_AUTO = 0xf,
+};
+#define bfa_port_speed_t enum bfa_port_speed
+
+enum {
+ BFA_BOOT_BOOTLUN_MAX = 4, /* maximum boot lun per IOC */
+ BFA_PREBOOT_BOOTLUN_MAX = 8, /* maximum preboot lun per IOC */
+};
+
+#define BOOT_CFG_REV1 1
+#define BOOT_CFG_VLAN 1
+
+/*
+ * Boot options setting. Boot options setting determines from where
+ * to get the boot lun information
+ */
+enum bfa_boot_bootopt {
+ BFA_BOOT_AUTO_DISCOVER = 0, /* Boot from blun provided by fabric */
+ BFA_BOOT_STORED_BLUN = 1, /* Boot from bluns stored in flash */
+ BFA_BOOT_FIRST_LUN = 2, /* Boot from first discovered blun */
+ BFA_BOOT_PBC = 3, /* Boot from pbc configured blun */
+};
+
+#pragma pack(1)
+/*
+ * Boot lun information.
+ */
+struct bfa_boot_bootlun_s {
+ wwn_t pwwn; /* port wwn of target */
+ struct scsi_lun lun; /* 64-bit lun */
+};
+#pragma pack()
+
+/*
+ * BOOT boot configuraton
+ */
+struct bfa_boot_cfg_s {
+ u8 version;
+ u8 rsvd1;
+ u16 chksum;
+ u8 enable; /* enable/disable SAN boot */
+ u8 speed; /* boot speed settings */
+ u8 topology; /* boot topology setting */
+ u8 bootopt; /* bfa_boot_bootopt_t */
+ u32 nbluns; /* number of boot luns */
+ u32 rsvd2;
+ struct bfa_boot_bootlun_s blun[BFA_BOOT_BOOTLUN_MAX];
+ struct bfa_boot_bootlun_s blun_disc[BFA_BOOT_BOOTLUN_MAX];
+};
+
+struct bfa_boot_pbc_s {
+ u8 enable; /* enable/disable SAN boot */
+ u8 speed; /* boot speed settings */
+ u8 topology; /* boot topology setting */
+ u8 rsvd1;
+ u32 nbluns; /* number of boot luns */
+ struct bfa_boot_bootlun_s pblun[BFA_PREBOOT_BOOTLUN_MAX];
+};
+
+struct bfa_ethboot_cfg_s {
+ u8 version;
+ u8 rsvd1;
+ u16 chksum;
+ u8 enable; /* enable/disable Eth/PXE boot */
+ u8 rsvd2;
+ u16 vlan;
+};
+
+/*
+ * ASIC block configuration related structures
+ */
+#define BFA_ABLK_MAX_PORTS 2
+#define BFA_ABLK_MAX_PFS 16
+#define BFA_ABLK_MAX 2
+
+#pragma pack(1)
+enum bfa_mode_s {
+ BFA_MODE_HBA = 1,
+ BFA_MODE_CNA = 2,
+ BFA_MODE_NIC = 3
+};
+
+struct bfa_adapter_cfg_mode_s {
+ u16 max_pf;
+ u16 max_vf;
+ enum bfa_mode_s mode;
+};
+
+struct bfa_ablk_cfg_pf_s {
+ u16 pers;
+ u8 port_id;
+ u8 optrom;
+ u8 valid;
+ u8 sriov;
+ u8 max_vfs;
+ u8 rsvd[1];
+ u16 num_qpairs;
+ u16 num_vectors;
+ u16 bw_min;
+ u16 bw_max;
+};
+
+struct bfa_ablk_cfg_port_s {
+ u8 mode;
+ u8 type;
+ u8 max_pfs;
+ u8 rsvd[5];
+};
+
+struct bfa_ablk_cfg_inst_s {
+ u8 nports;
+ u8 max_pfs;
+ u8 rsvd[6];
+ struct bfa_ablk_cfg_pf_s pf_cfg[BFA_ABLK_MAX_PFS];
+ struct bfa_ablk_cfg_port_s port_cfg[BFA_ABLK_MAX_PORTS];
+};
+
+struct bfa_ablk_cfg_s {
+ struct bfa_ablk_cfg_inst_s inst[BFA_ABLK_MAX];
+};
+
+
+/*
+ * SFP module specific
+ */
+#define SFP_DIAGMON_SIZE 10 /* num bytes of diag monitor data */
+
+/* SFP state change notification event */
+#define BFA_SFP_SCN_REMOVED 0
+#define BFA_SFP_SCN_INSERTED 1
+#define BFA_SFP_SCN_POM 2
+#define BFA_SFP_SCN_FAILED 3
+#define BFA_SFP_SCN_UNSUPPORT 4
+#define BFA_SFP_SCN_VALID 5
+
+enum bfa_defs_sfp_media_e {
+ BFA_SFP_MEDIA_UNKNOWN = 0x00,
+ BFA_SFP_MEDIA_CU = 0x01,
+ BFA_SFP_MEDIA_LW = 0x02,
+ BFA_SFP_MEDIA_SW = 0x03,
+ BFA_SFP_MEDIA_EL = 0x04,
+ BFA_SFP_MEDIA_UNSUPPORT = 0x05,
+};
+
+/*
+ * values for xmtr_tech above
+ */
+enum {
+ SFP_XMTR_TECH_CU = (1 << 0), /* copper FC-BaseT */
+ SFP_XMTR_TECH_CP = (1 << 1), /* copper passive */
+ SFP_XMTR_TECH_CA = (1 << 2), /* copper active */
+ SFP_XMTR_TECH_LL = (1 << 3), /* longwave laser */
+ SFP_XMTR_TECH_SL = (1 << 4), /* shortwave laser w/ OFC */
+ SFP_XMTR_TECH_SN = (1 << 5), /* shortwave laser w/o OFC */
+ SFP_XMTR_TECH_EL_INTRA = (1 << 6), /* elec intra-enclosure */
+ SFP_XMTR_TECH_EL_INTER = (1 << 7), /* elec inter-enclosure */
+ SFP_XMTR_TECH_LC = (1 << 8), /* longwave laser */
+ SFP_XMTR_TECH_SA = (1 << 9)
+};
+
+/*
+ * Serial ID: Data Fields -- Address A0h
+ * Basic ID field total 64 bytes
+ */
+struct sfp_srlid_base_s {
+ u8 id; /* 00: Identifier */
+ u8 extid; /* 01: Extended Identifier */
+ u8 connector; /* 02: Connector */
+ u8 xcvr[8]; /* 03-10: Transceiver */
+ u8 encoding; /* 11: Encoding */
+ u8 br_norm; /* 12: BR, Nominal */
+ u8 rate_id; /* 13: Rate Identifier */
+ u8 len_km; /* 14: Length single mode km */
+ u8 len_100m; /* 15: Length single mode 100m */
+ u8 len_om2; /* 16: Length om2 fiber 10m */
+ u8 len_om1; /* 17: Length om1 fiber 10m */
+ u8 len_cu; /* 18: Length copper 1m */
+ u8 len_om3; /* 19: Length om3 fiber 10m */
+ u8 vendor_name[16];/* 20-35 */
+ u8 unalloc1;
+ u8 vendor_oui[3]; /* 37-39 */
+ u8 vendor_pn[16]; /* 40-55 */
+ u8 vendor_rev[4]; /* 56-59 */
+ u8 wavelen[2]; /* 60-61 */
+ u8 unalloc2;
+ u8 cc_base; /* 63: check code for base id field */
+};
+
+/*
+ * Serial ID: Data Fields -- Address A0h
+ * Extended id field total 32 bytes
+ */
+struct sfp_srlid_ext_s {
+ u8 options[2];
+ u8 br_max;
+ u8 br_min;
+ u8 vendor_sn[16];
+ u8 date_code[8];
+ u8 diag_mon_type; /* 92: Diagnostic Monitoring type */
+ u8 en_options;
+ u8 sff_8472;
+ u8 cc_ext;
+};
+
+/*
+ * Diagnostic: Data Fields -- Address A2h
+ * Diagnostic and control/status base field total 96 bytes
+ */
+struct sfp_diag_base_s {
+ /*
+ * Alarm and warning Thresholds 40 bytes
+ */
+ u8 temp_high_alarm[2]; /* 00-01 */
+ u8 temp_low_alarm[2]; /* 02-03 */
+ u8 temp_high_warning[2]; /* 04-05 */
+ u8 temp_low_warning[2]; /* 06-07 */
+
+ u8 volt_high_alarm[2]; /* 08-09 */
+ u8 volt_low_alarm[2]; /* 10-11 */
+ u8 volt_high_warning[2]; /* 12-13 */
+ u8 volt_low_warning[2]; /* 14-15 */
+
+ u8 bias_high_alarm[2]; /* 16-17 */
+ u8 bias_low_alarm[2]; /* 18-19 */
+ u8 bias_high_warning[2]; /* 20-21 */
+ u8 bias_low_warning[2]; /* 22-23 */
+
+ u8 tx_pwr_high_alarm[2]; /* 24-25 */
+ u8 tx_pwr_low_alarm[2]; /* 26-27 */
+ u8 tx_pwr_high_warning[2]; /* 28-29 */
+ u8 tx_pwr_low_warning[2]; /* 30-31 */
+
+ u8 rx_pwr_high_alarm[2]; /* 32-33 */
+ u8 rx_pwr_low_alarm[2]; /* 34-35 */
+ u8 rx_pwr_high_warning[2]; /* 36-37 */
+ u8 rx_pwr_low_warning[2]; /* 38-39 */
+
+ u8 unallocate_1[16];
+
+ /*
+ * ext_cal_const[36]
+ */
+ u8 rx_pwr[20];
+ u8 tx_i[4];
+ u8 tx_pwr[4];
+ u8 temp[4];
+ u8 volt[4];
+ u8 unallocate_2[3];
+ u8 cc_dmi;
+};
+
+/*
+ * Diagnostic: Data Fields -- Address A2h
+ * Diagnostic and control/status extended field total 24 bytes
+ */
+struct sfp_diag_ext_s {
+ u8 diag[SFP_DIAGMON_SIZE];
+ u8 unalloc1[4];
+ u8 status_ctl;
+ u8 rsvd;
+ u8 alarm_flags[2];
+ u8 unalloc2[2];
+ u8 warning_flags[2];
+ u8 ext_status_ctl[2];
+};
+
+/*
+ * Diagnostic: Data Fields -- Address A2h
+ * General Use Fields: User Writable Table - Features's Control Registers
+ * Total 32 bytes
+ */
+struct sfp_usr_eeprom_s {
+ u8 rsvd1[2]; /* 128-129 */
+ u8 ewrap; /* 130 */
+ u8 rsvd2[2]; /* */
+ u8 owrap; /* 133 */
+ u8 rsvd3[2]; /* */
+ u8 prbs; /* 136: PRBS 7 generator */
+ u8 rsvd4[2]; /* */
+ u8 tx_eqz_16; /* 139: TX Equalizer (16xFC) */
+ u8 tx_eqz_8; /* 140: TX Equalizer (8xFC) */
+ u8 rsvd5[2]; /* */
+ u8 rx_emp_16; /* 143: RX Emphasis (16xFC) */
+ u8 rx_emp_8; /* 144: RX Emphasis (8xFC) */
+ u8 rsvd6[2]; /* */
+ u8 tx_eye_adj; /* 147: TX eye Threshold Adjust */
+ u8 rsvd7[3]; /* */
+ u8 tx_eye_qctl; /* 151: TX eye Quality Control */
+ u8 tx_eye_qres; /* 152: TX eye Quality Result */
+ u8 rsvd8[2]; /* */
+ u8 poh[3]; /* 155-157: Power On Hours */
+ u8 rsvd9[2]; /* */
+};
+
+struct sfp_mem_s {
+ struct sfp_srlid_base_s srlid_base;
+ struct sfp_srlid_ext_s srlid_ext;
+ struct sfp_diag_base_s diag_base;
+ struct sfp_diag_ext_s diag_ext;
+ struct sfp_usr_eeprom_s usr_eeprom;
+};
+
+/*
+ * transceiver codes (SFF-8472 Rev 10.2 Table 3.5)
+ */
+union sfp_xcvr_e10g_code_u {
+ u8 b;
+ struct {
+#ifdef __BIG_ENDIAN
+ u8 e10g_unall:1; /* 10G Ethernet compliance */
+ u8 e10g_lrm:1;
+ u8 e10g_lr:1;
+ u8 e10g_sr:1;
+ u8 ib_sx:1; /* Infiniband compliance */
+ u8 ib_lx:1;
+ u8 ib_cu_a:1;
+ u8 ib_cu_p:1;
+#else
+ u8 ib_cu_p:1;
+ u8 ib_cu_a:1;
+ u8 ib_lx:1;
+ u8 ib_sx:1; /* Infiniband compliance */
+ u8 e10g_sr:1;
+ u8 e10g_lr:1;
+ u8 e10g_lrm:1;
+ u8 e10g_unall:1; /* 10G Ethernet compliance */
+#endif
+ } r;
+};
+
+union sfp_xcvr_so1_code_u {
+ u8 b;
+ struct {
+ u8 escon:2; /* ESCON compliance code */
+ u8 oc192_reach:1; /* SONET compliance code */
+ u8 so_reach:2;
+ u8 oc48_reach:3;
+ } r;
+};
+
+union sfp_xcvr_so2_code_u {
+ u8 b;
+ struct {
+ u8 reserved:1;
+ u8 oc12_reach:3; /* OC12 reach */
+ u8 reserved1:1;
+ u8 oc3_reach:3; /* OC3 reach */
+ } r;
+};
+
+union sfp_xcvr_eth_code_u {
+ u8 b;
+ struct {
+ u8 base_px:1;
+ u8 base_bx10:1;
+ u8 e100base_fx:1;
+ u8 e100base_lx:1;
+ u8 e1000base_t:1;
+ u8 e1000base_cx:1;
+ u8 e1000base_lx:1;
+ u8 e1000base_sx:1;
+ } r;
+};
+
+struct sfp_xcvr_fc1_code_s {
+ u8 link_len:5; /* FC link length */
+ u8 xmtr_tech2:3;
+ u8 xmtr_tech1:7; /* FC transmitter technology */
+ u8 reserved1:1;
+};
+
+union sfp_xcvr_fc2_code_u {
+ u8 b;
+ struct {
+ u8 tw_media:1; /* twin axial pair (tw) */
+ u8 tp_media:1; /* shielded twisted pair (sp) */
+ u8 mi_media:1; /* miniature coax (mi) */
+ u8 tv_media:1; /* video coax (tv) */
+ u8 m6_media:1; /* multimode, 62.5m (m6) */
+ u8 m5_media:1; /* multimode, 50m (m5) */
+ u8 reserved:1;
+ u8 sm_media:1; /* single mode (sm) */
+ } r;
+};
+
+union sfp_xcvr_fc3_code_u {
+ u8 b;
+ struct {
+#ifdef __BIG_ENDIAN
+ u8 rsv4:1;
+ u8 mb800:1; /* 800 Mbytes/sec */
+ u8 mb1600:1; /* 1600 Mbytes/sec */
+ u8 mb400:1; /* 400 Mbytes/sec */
+ u8 rsv2:1;
+ u8 mb200:1; /* 200 Mbytes/sec */
+ u8 rsv1:1;
+ u8 mb100:1; /* 100 Mbytes/sec */
+#else
+ u8 mb100:1; /* 100 Mbytes/sec */
+ u8 rsv1:1;
+ u8 mb200:1; /* 200 Mbytes/sec */
+ u8 rsv2:1;
+ u8 mb400:1; /* 400 Mbytes/sec */
+ u8 mb1600:1; /* 1600 Mbytes/sec */
+ u8 mb800:1; /* 800 Mbytes/sec */
+ u8 rsv4:1;
+#endif
+ } r;
+};
+
+struct sfp_xcvr_s {
+ union sfp_xcvr_e10g_code_u e10g;
+ union sfp_xcvr_so1_code_u so1;
+ union sfp_xcvr_so2_code_u so2;
+ union sfp_xcvr_eth_code_u eth;
+ struct sfp_xcvr_fc1_code_s fc1;
+ union sfp_xcvr_fc2_code_u fc2;
+ union sfp_xcvr_fc3_code_u fc3;
+};
+
+/*
+ * Flash module specific
+ */
+#define BFA_FLASH_PART_ENTRY_SIZE 32 /* partition entry size */
+#define BFA_FLASH_PART_MAX 32 /* maximal # of partitions */
+
+enum bfa_flash_part_type {
+ BFA_FLASH_PART_OPTROM = 1, /* option rom partition */
+ BFA_FLASH_PART_FWIMG = 2, /* firmware image partition */
+ BFA_FLASH_PART_FWCFG = 3, /* firmware tuneable config */
+ BFA_FLASH_PART_DRV = 4, /* IOC driver config */
+ BFA_FLASH_PART_BOOT = 5, /* boot config */
+ BFA_FLASH_PART_ASIC = 6, /* asic bootstrap configuration */
+ BFA_FLASH_PART_MFG = 7, /* manufacturing block partition */
+ BFA_FLASH_PART_OPTROM2 = 8, /* 2nd option rom partition */
+ BFA_FLASH_PART_VPD = 9, /* vpd data of OEM info */
+ BFA_FLASH_PART_PBC = 10, /* pre-boot config */
+ BFA_FLASH_PART_BOOTOVL = 11, /* boot overlay partition */
+ BFA_FLASH_PART_LOG = 12, /* firmware log partition */
+ BFA_FLASH_PART_PXECFG = 13, /* pxe boot config partition */
+ BFA_FLASH_PART_PXEOVL = 14, /* pxe boot overlay partition */
+ BFA_FLASH_PART_PORTCFG = 15, /* port cfg partition */
+ BFA_FLASH_PART_ASICBK = 16, /* asic backup partition */
+};
+
+/*
+ * flash partition attributes
+ */
+struct bfa_flash_part_attr_s {
+ u32 part_type; /* partition type */
+ u32 part_instance; /* partition instance */
+ u32 part_off; /* partition offset */
+ u32 part_size; /* partition size */
+ u32 part_len; /* partition content length */
+ u32 part_status; /* partition status */
+ char rsv[BFA_FLASH_PART_ENTRY_SIZE - 24];
+};
+
+/*
+ * flash attributes
+ */
+struct bfa_flash_attr_s {
+ u32 status; /* flash overall status */
+ u32 npart; /* num of partitions */
+ struct bfa_flash_part_attr_s part[BFA_FLASH_PART_MAX];
+};
+
+/*
+ * DIAG module specific
+ */
+#define LB_PATTERN_DEFAULT 0xB5B5B5B5
+#define QTEST_CNT_DEFAULT 10
+#define QTEST_PAT_DEFAULT LB_PATTERN_DEFAULT
+#define DPORT_ENABLE_LOOPCNT_DEFAULT (1024 * 1024)
+
+struct bfa_diag_memtest_s {
+ u8 algo;
+ u8 rsvd[7];
+};
+
+struct bfa_diag_memtest_result {
+ u32 status;
+ u32 addr;
+ u32 exp; /* expect value read from reg */
+ u32 act; /* actually value read */
+ u32 err_status; /* error status reg */
+ u32 err_status1; /* extra error info reg */
+ u32 err_addr; /* error address reg */
+ u8 algo;
+ u8 rsv[3];
+};
+
+struct bfa_diag_loopback_result_s {
+ u32 numtxmfrm; /* no. of transmit frame */
+ u32 numosffrm; /* no. of outstanding frame */
+ u32 numrcvfrm; /* no. of received good frame */
+ u32 badfrminf; /* mis-match info */
+ u32 badfrmnum; /* mis-match fram number */
+ u8 status; /* loopback test result */
+ u8 rsvd[3];
+};
+
+enum bfa_diag_dport_test_status {
+ DPORT_TEST_ST_IDLE = 0, /* the test has not started yet. */
+ DPORT_TEST_ST_FINAL = 1, /* the test done successfully */
+ DPORT_TEST_ST_SKIP = 2, /* the test skipped */
+ DPORT_TEST_ST_FAIL = 3, /* the test failed */
+ DPORT_TEST_ST_INPRG = 4, /* the testing is in progress */
+ DPORT_TEST_ST_RESPONDER = 5, /* test triggered from remote port */
+ DPORT_TEST_ST_STOPPED = 6, /* the test stopped by user. */
+ DPORT_TEST_ST_MAX
+};
+
+enum bfa_diag_dport_test_type {
+ DPORT_TEST_ELOOP = 0,
+ DPORT_TEST_OLOOP = 1,
+ DPORT_TEST_ROLOOP = 2,
+ DPORT_TEST_LINK = 3,
+ DPORT_TEST_MAX
+};
+
+enum bfa_diag_dport_test_opmode {
+ BFA_DPORT_OPMODE_AUTO = 0,
+ BFA_DPORT_OPMODE_MANU = 1,
+};
+
+struct bfa_diag_dport_subtest_result_s {
+ u8 status; /* bfa_diag_dport_test_status */
+ u8 rsvd[7]; /* 64bit align */
+ u64 start_time; /* timestamp */
+};
+
+struct bfa_diag_dport_result_s {
+ wwn_t rp_pwwn; /* switch port wwn */
+ wwn_t rp_nwwn; /* switch node wwn */
+ u64 start_time; /* user/sw start time */
+ u64 end_time; /* timestamp */
+ u8 status; /* bfa_diag_dport_test_status */
+ u8 mode; /* bfa_diag_dport_test_opmode */
+ u8 rsvd; /* 64bit align */
+ u8 speed; /* link speed for buf_reqd */
+ u16 buffer_required;
+ u16 frmsz; /* frame size for buf_reqd */
+ u32 lpcnt; /* Frame count */
+ u32 pat; /* Pattern */
+ u32 roundtrip_latency; /* in nano sec */
+ u32 est_cable_distance; /* in meter */
+ struct bfa_diag_dport_subtest_result_s subtest[DPORT_TEST_MAX];
+};
+
+struct bfa_diag_ledtest_s {
+ u32 cmd; /* bfa_led_op_t */
+ u32 color; /* bfa_led_color_t */
+ u16 freq; /* no. of blinks every 10 secs */
+ u8 led; /* bitmap of LEDs to be tested */
+ u8 rsvd[5];
+};
+
+struct bfa_diag_loopback_s {
+ u32 loopcnt;
+ u32 pattern;
+ u8 lb_mode; /* bfa_port_opmode_t */
+ u8 speed; /* bfa_port_speed_t */
+ u8 rsvd[2];
+};
+
+/*
+ * PHY module specific
+ */
+enum bfa_phy_status_e {
+ BFA_PHY_STATUS_GOOD = 0, /* phy is good */
+ BFA_PHY_STATUS_NOT_PRESENT = 1, /* phy does not exist */
+ BFA_PHY_STATUS_BAD = 2, /* phy is bad */
+};
+
+/*
+ * phy attributes for phy query
+ */
+struct bfa_phy_attr_s {
+ u32 status; /* phy present/absent status */
+ u32 length; /* firmware length */
+ u32 fw_ver; /* firmware version */
+ u32 an_status; /* AN status */
+ u32 pma_pmd_status; /* PMA/PMD link status */
+ u32 pma_pmd_signal; /* PMA/PMD signal detect */
+ u32 pcs_status; /* PCS link status */
+};
+
+/*
+ * phy stats
+ */
+struct bfa_phy_stats_s {
+ u32 status; /* phy stats status */
+ u32 link_breaks; /* Num of link breaks after linkup */
+ u32 pma_pmd_fault; /* NPMA/PMD fault */
+ u32 pcs_fault; /* PCS fault */
+ u32 speed_neg; /* Num of speed negotiation */
+ u32 tx_eq_training; /* Num of TX EQ training */
+ u32 tx_eq_timeout; /* Num of TX EQ timeout */
+ u32 crc_error; /* Num of CRC errors */
+};
+
+#pragma pack()
+
+#endif /* __BFA_DEFS_H__ */
diff --git a/drivers/scsi/bfa/bfa_defs_fcs.h b/drivers/scsi/bfa/bfa_defs_fcs.h
new file mode 100644
index 000000000..06f0a163c
--- /dev/null
+++ b/drivers/scsi/bfa/bfa_defs_fcs.h
@@ -0,0 +1,478 @@
+/*
+ * Copyright (c) 2005-2010 Brocade Communications Systems, Inc.
+ * All rights reserved
+ * www.brocade.com
+ *
+ * Linux driver for Brocade Fibre Channel Host Bus Adapter.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License (GPL) Version 2 as
+ * published by the Free Software Foundation
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ */
+
+#ifndef __BFA_DEFS_FCS_H__
+#define __BFA_DEFS_FCS_H__
+
+#include "bfa_fc.h"
+#include "bfa_defs_svc.h"
+
+/*
+ * VF states
+ */
+enum bfa_vf_state {
+ BFA_VF_UNINIT = 0, /* fabric is not yet initialized */
+ BFA_VF_LINK_DOWN = 1, /* link is down */
+ BFA_VF_FLOGI = 2, /* flogi is in progress */
+ BFA_VF_AUTH = 3, /* authentication in progress */
+ BFA_VF_NOFABRIC = 4, /* fabric is not present */
+ BFA_VF_ONLINE = 5, /* login to fabric is complete */
+ BFA_VF_EVFP = 6, /* EVFP is in progress */
+ BFA_VF_ISOLATED = 7, /* port isolated due to vf_id mismatch */
+};
+
+/*
+ * VF statistics
+ */
+struct bfa_vf_stats_s {
+ u32 flogi_sent; /* Num FLOGIs sent */
+ u32 flogi_rsp_err; /* FLOGI response errors */
+ u32 flogi_acc_err; /* FLOGI accept errors */
+ u32 flogi_accepts; /* FLOGI accepts received */
+ u32 flogi_rejects; /* FLOGI rejects received */
+ u32 flogi_unknown_rsp; /* Unknown responses for FLOGI */
+ u32 flogi_alloc_wait; /* Allocation waits prior to sending FLOGI */
+ u32 flogi_rcvd; /* FLOGIs received */
+ u32 flogi_rejected; /* Incoming FLOGIs rejected */
+ u32 fabric_onlines; /* Internal fabric online notification sent
+ * to other modules */
+ u32 fabric_offlines; /* Internal fabric offline notification sent
+ * to other modules */
+ u32 resvd; /* padding for 64 bit alignment */
+};
+
+/*
+ * VF attributes returned in queries
+ */
+struct bfa_vf_attr_s {
+ enum bfa_vf_state state; /* VF state */
+ u32 rsvd;
+ wwn_t fabric_name; /* fabric name */
+};
+
+#define BFA_FCS_MAX_LPORTS 256
+#define BFA_FCS_FABRIC_IPADDR_SZ 16
+
+/*
+ * symbolic names for base port/virtual port
+ */
+#define BFA_SYMNAME_MAXLEN 128 /* 128 bytes */
+struct bfa_lport_symname_s {
+ char symname[BFA_SYMNAME_MAXLEN];
+};
+
+/*
+* Roles of FCS port:
+ * - FCP IM and FCP TM roles cannot be enabled together for a FCS port
+ * - Create multiple ports if both IM and TM functions required.
+ * - Atleast one role must be specified.
+ */
+enum bfa_lport_role {
+ BFA_LPORT_ROLE_FCP_IM = 0x01, /* FCP initiator role */
+ BFA_LPORT_ROLE_FCP_MAX = BFA_LPORT_ROLE_FCP_IM,
+};
+
+/*
+ * FCS port configuration.
+ */
+struct bfa_lport_cfg_s {
+ wwn_t pwwn; /* port wwn */
+ wwn_t nwwn; /* node wwn */
+ struct bfa_lport_symname_s sym_name; /* vm port symbolic name */
+ struct bfa_lport_symname_s node_sym_name; /* Node symbolic name */
+ enum bfa_lport_role roles; /* FCS port roles */
+ u32 rsvd;
+ bfa_boolean_t preboot_vp; /* vport created from PBC */
+ u8 tag[16]; /* opaque tag from application */
+ u8 padding[4];
+};
+
+/*
+ * FCS port states
+ */
+enum bfa_lport_state {
+ BFA_LPORT_UNINIT = 0, /* PORT is not yet initialized */
+ BFA_LPORT_FDISC = 1, /* FDISC is in progress */
+ BFA_LPORT_ONLINE = 2, /* login to fabric is complete */
+ BFA_LPORT_OFFLINE = 3, /* No login to fabric */
+};
+
+/*
+ * FCS port type.
+ */
+enum bfa_lport_type {
+ BFA_LPORT_TYPE_PHYSICAL = 0,
+ BFA_LPORT_TYPE_VIRTUAL,
+};
+
+/*
+ * FCS port offline reason.
+ */
+enum bfa_lport_offline_reason {
+ BFA_LPORT_OFFLINE_UNKNOWN = 0,
+ BFA_LPORT_OFFLINE_LINKDOWN,
+ BFA_LPORT_OFFLINE_FAB_UNSUPPORTED, /* NPIV not supported by the
+ * fabric */
+ BFA_LPORT_OFFLINE_FAB_NORESOURCES,
+ BFA_LPORT_OFFLINE_FAB_LOGOUT,
+};
+
+/*
+ * FCS lport info.
+ */
+struct bfa_lport_info_s {
+ u8 port_type; /* bfa_lport_type_t : physical or
+ * virtual */
+ u8 port_state; /* one of bfa_lport_state values */
+ u8 offline_reason; /* one of bfa_lport_offline_reason_t
+ * values */
+ wwn_t port_wwn;
+ wwn_t node_wwn;
+
+ /*
+ * following 4 feilds are valid for Physical Ports only
+ */
+ u32 max_vports_supp; /* Max supported vports */
+ u32 num_vports_inuse; /* Num of in use vports */
+ u32 max_rports_supp; /* Max supported rports */
+ u32 num_rports_inuse; /* Num of doscovered rports */
+
+};
+
+/*
+ * FCS port statistics
+ */
+struct bfa_lport_stats_s {
+ u32 ns_plogi_sent;
+ u32 ns_plogi_rsp_err;
+ u32 ns_plogi_acc_err;
+ u32 ns_plogi_accepts;
+ u32 ns_rejects; /* NS command rejects */
+ u32 ns_plogi_unknown_rsp;
+ u32 ns_plogi_alloc_wait;
+
+ u32 ns_retries; /* NS command retries */
+ u32 ns_timeouts; /* NS command timeouts */
+
+ u32 ns_rspnid_sent;
+ u32 ns_rspnid_accepts;
+ u32 ns_rspnid_rsp_err;
+ u32 ns_rspnid_rejects;
+ u32 ns_rspnid_alloc_wait;
+
+ u32 ns_rftid_sent;
+ u32 ns_rftid_accepts;
+ u32 ns_rftid_rsp_err;
+ u32 ns_rftid_rejects;
+ u32 ns_rftid_alloc_wait;
+
+ u32 ns_rffid_sent;
+ u32 ns_rffid_accepts;
+ u32 ns_rffid_rsp_err;
+ u32 ns_rffid_rejects;
+ u32 ns_rffid_alloc_wait;
+
+ u32 ns_gidft_sent;
+ u32 ns_gidft_accepts;
+ u32 ns_gidft_rsp_err;
+ u32 ns_gidft_rejects;
+ u32 ns_gidft_unknown_rsp;
+ u32 ns_gidft_alloc_wait;
+
+ u32 ns_rnnid_sent;
+ u32 ns_rnnid_accepts;
+ u32 ns_rnnid_rsp_err;
+ u32 ns_rnnid_rejects;
+ u32 ns_rnnid_alloc_wait;
+
+ u32 ns_rsnn_nn_sent;
+ u32 ns_rsnn_nn_accepts;
+ u32 ns_rsnn_nn_rsp_err;
+ u32 ns_rsnn_nn_rejects;
+ u32 ns_rsnn_nn_alloc_wait;
+
+ /*
+ * Mgmt Server stats
+ */
+ u32 ms_retries; /* MS command retries */
+ u32 ms_timeouts; /* MS command timeouts */
+ u32 ms_plogi_sent;
+ u32 ms_plogi_rsp_err;
+ u32 ms_plogi_acc_err;
+ u32 ms_plogi_accepts;
+ u32 ms_rejects; /* MS command rejects */
+ u32 ms_plogi_unknown_rsp;
+ u32 ms_plogi_alloc_wait;
+
+ u32 num_rscn; /* Num of RSCN received */
+ u32 num_portid_rscn;/* Num portid format RSCN
+ * received */
+
+ u32 uf_recvs; /* Unsolicited recv frames */
+ u32 uf_recv_drops; /* Dropped received frames */
+
+ u32 plogi_rcvd; /* Received plogi */
+ u32 prli_rcvd; /* Received prli */
+ u32 adisc_rcvd; /* Received adisc */
+ u32 prlo_rcvd; /* Received prlo */
+ u32 logo_rcvd; /* Received logo */
+ u32 rpsc_rcvd; /* Received rpsc */
+ u32 un_handled_els_rcvd; /* Received unhandled ELS */
+ u32 rport_plogi_timeouts; /* Rport plogi retry timeout count */
+ u32 rport_del_max_plogi_retry; /* Deleted rport
+ * (max retry of plogi) */
+};
+
+/*
+ * BFA port attribute returned in queries
+ */
+struct bfa_lport_attr_s {
+ enum bfa_lport_state state; /* port state */
+ u32 pid; /* port ID */
+ struct bfa_lport_cfg_s port_cfg; /* port configuration */
+ enum bfa_port_type port_type; /* current topology */
+ u32 loopback; /* cable is externally looped back */
+ wwn_t fabric_name; /* attached switch's nwwn */
+ u8 fabric_ip_addr[BFA_FCS_FABRIC_IPADDR_SZ]; /* attached
+ * fabric's ip addr */
+ mac_t fpma_mac; /* Lport's FPMA Mac address */
+ u16 authfail; /* auth failed state */
+};
+
+
+/*
+ * VPORT states
+ */
+enum bfa_vport_state {
+ BFA_FCS_VPORT_UNINIT = 0,
+ BFA_FCS_VPORT_CREATED = 1,
+ BFA_FCS_VPORT_OFFLINE = 1,
+ BFA_FCS_VPORT_FDISC_SEND = 2,
+ BFA_FCS_VPORT_FDISC = 3,
+ BFA_FCS_VPORT_FDISC_RETRY = 4,
+ BFA_FCS_VPORT_FDISC_RSP_WAIT = 5,
+ BFA_FCS_VPORT_ONLINE = 6,
+ BFA_FCS_VPORT_DELETING = 7,
+ BFA_FCS_VPORT_CLEANUP = 8,
+ BFA_FCS_VPORT_LOGO_SEND = 9,
+ BFA_FCS_VPORT_LOGO = 10,
+ BFA_FCS_VPORT_ERROR = 11,
+ BFA_FCS_VPORT_MAX_STATE,
+};
+
+/*
+ * vport statistics
+ */
+struct bfa_vport_stats_s {
+ struct bfa_lport_stats_s port_stats; /* base class (port) stats */
+ /*
+ * TODO - remove
+ */
+
+ u32 fdisc_sent; /* num fdisc sent */
+ u32 fdisc_accepts; /* fdisc accepts */
+ u32 fdisc_retries; /* fdisc retries */
+ u32 fdisc_timeouts; /* fdisc timeouts */
+ u32 fdisc_rsp_err; /* fdisc response error */
+ u32 fdisc_acc_bad; /* bad fdisc accepts */
+ u32 fdisc_rejects; /* fdisc rejects */
+ u32 fdisc_unknown_rsp;
+ /*
+ *!< fdisc rsp unknown error
+ */
+ u32 fdisc_alloc_wait;/* fdisc req (fcxp)alloc wait */
+
+ u32 logo_alloc_wait;/* logo req (fcxp) alloc wait */
+ u32 logo_sent; /* logo sent */
+ u32 logo_accepts; /* logo accepts */
+ u32 logo_rejects; /* logo rejects */
+ u32 logo_rsp_err; /* logo rsp errors */
+ u32 logo_unknown_rsp;
+ /* logo rsp unknown errors */
+
+ u32 fab_no_npiv; /* fabric does not support npiv */
+
+ u32 fab_offline; /* offline events from fab SM */
+ u32 fab_online; /* online events from fab SM */
+ u32 fab_cleanup; /* cleanup request from fab SM */
+ u32 rsvd;
+};
+
+/*
+ * BFA vport attribute returned in queries
+ */
+struct bfa_vport_attr_s {
+ struct bfa_lport_attr_s port_attr; /* base class (port) attributes */
+ enum bfa_vport_state vport_state; /* vport state */
+ u32 rsvd;
+};
+
+/*
+ * FCS remote port states
+ */
+enum bfa_rport_state {
+ BFA_RPORT_UNINIT = 0, /* PORT is not yet initialized */
+ BFA_RPORT_OFFLINE = 1, /* rport is offline */
+ BFA_RPORT_PLOGI = 2, /* PLOGI to rport is in progress */
+ BFA_RPORT_ONLINE = 3, /* login to rport is complete */
+ BFA_RPORT_PLOGI_RETRY = 4, /* retrying login to rport */
+ BFA_RPORT_NSQUERY = 5, /* nameserver query */
+ BFA_RPORT_ADISC = 6, /* ADISC authentication */
+ BFA_RPORT_LOGO = 7, /* logging out with rport */
+ BFA_RPORT_LOGORCV = 8, /* handling LOGO from rport */
+ BFA_RPORT_NSDISC = 9, /* re-discover rport */
+};
+
+/*
+ * Rport Scsi Function : Initiator/Target.
+ */
+enum bfa_rport_function {
+ BFA_RPORT_INITIATOR = 0x01, /* SCSI Initiator */
+ BFA_RPORT_TARGET = 0x02, /* SCSI Target */
+};
+
+/*
+ * port/node symbolic names for rport
+ */
+#define BFA_RPORT_SYMNAME_MAXLEN 255
+struct bfa_rport_symname_s {
+ char symname[BFA_RPORT_SYMNAME_MAXLEN];
+};
+
+/*
+ * FCS remote port statistics
+ */
+struct bfa_rport_stats_s {
+ u32 offlines; /* remote port offline count */
+ u32 onlines; /* remote port online count */
+ u32 rscns; /* RSCN affecting rport */
+ u32 plogis; /* plogis sent */
+ u32 plogi_accs; /* plogi accepts */
+ u32 plogi_timeouts; /* plogi timeouts */
+ u32 plogi_rejects; /* rcvd plogi rejects */
+ u32 plogi_failed; /* local failure */
+ u32 plogi_rcvd; /* plogis rcvd */
+ u32 prli_rcvd; /* inbound PRLIs */
+ u32 adisc_rcvd; /* ADISCs received */
+ u32 adisc_rejects; /* recvd ADISC rejects */
+ u32 adisc_sent; /* ADISC requests sent */
+ u32 adisc_accs; /* ADISC accepted by rport */
+ u32 adisc_failed; /* ADISC failed (no response) */
+ u32 adisc_rejected; /* ADISC rejected by us */
+ u32 logos; /* logos sent */
+ u32 logo_accs; /* LOGO accepts from rport */
+ u32 logo_failed; /* LOGO failures */
+ u32 logo_rejected; /* LOGO rejects from rport */
+ u32 logo_rcvd; /* LOGO from remote port */
+
+ u32 rpsc_rcvd; /* RPSC received */
+ u32 rpsc_rejects; /* recvd RPSC rejects */
+ u32 rpsc_sent; /* RPSC requests sent */
+ u32 rpsc_accs; /* RPSC accepted by rport */
+ u32 rpsc_failed; /* RPSC failed (no response) */
+ u32 rpsc_rejected; /* RPSC rejected by us */
+
+ u32 rjt_insuff_res; /* LS RJT with insuff resources */
+ struct bfa_rport_hal_stats_s hal_stats; /* BFA rport stats */
+};
+
+/*
+ * FCS remote port attributes returned in queries
+ */
+struct bfa_rport_attr_s {
+ wwn_t nwwn; /* node wwn */
+ wwn_t pwwn; /* port wwn */
+ enum fc_cos cos_supported; /* supported class of services */
+ u32 pid; /* port ID */
+ u32 df_sz; /* Max payload size */
+ enum bfa_rport_state state; /* Rport State machine state */
+ enum fc_cos fc_cos; /* FC classes of services */
+ bfa_boolean_t cisc; /* CISC capable device */
+ struct bfa_rport_symname_s symname; /* Symbolic Name */
+ enum bfa_rport_function scsi_function; /* Initiator/Target */
+ struct bfa_rport_qos_attr_s qos_attr; /* qos attributes */
+ enum bfa_port_speed curr_speed; /* operating speed got from
+ * RPSC ELS. UNKNOWN, if RPSC
+ * is not supported */
+ bfa_boolean_t trl_enforced; /* TRL enforced ? TRUE/FALSE */
+ enum bfa_port_speed assigned_speed; /* Speed assigned by the user.
+ * will be used if RPSC is not
+ * supported by the rport */
+};
+
+struct bfa_rport_remote_link_stats_s {
+ u32 lfc; /* Link Failure Count */
+ u32 lsyc; /* Loss of Synchronization Count */
+ u32 lsic; /* Loss of Signal Count */
+ u32 pspec; /* Primitive Sequence Protocol Error Count */
+ u32 itwc; /* Invalid Transmission Word Count */
+ u32 icc; /* Invalid CRC Count */
+};
+
+struct bfa_rport_qualifier_s {
+ wwn_t pwwn; /* Port WWN */
+ u32 pid; /* port ID */
+ u32 rsvd;
+};
+
+#define BFA_MAX_IO_INDEX 7
+#define BFA_NO_IO_INDEX 9
+
+/*
+ * FCS itnim states
+ */
+enum bfa_itnim_state {
+ BFA_ITNIM_OFFLINE = 0, /* offline */
+ BFA_ITNIM_PRLI_SEND = 1, /* prli send */
+ BFA_ITNIM_PRLI_SENT = 2, /* prli sent */
+ BFA_ITNIM_PRLI_RETRY = 3, /* prli retry */
+ BFA_ITNIM_HCB_ONLINE = 4, /* online callback */
+ BFA_ITNIM_ONLINE = 5, /* online */
+ BFA_ITNIM_HCB_OFFLINE = 6, /* offline callback */
+ BFA_ITNIM_INITIATIOR = 7, /* initiator */
+};
+
+/*
+ * FCS remote port statistics
+ */
+struct bfa_itnim_stats_s {
+ u32 onlines; /* num rport online */
+ u32 offlines; /* num rport offline */
+ u32 prli_sent; /* num prli sent out */
+ u32 fcxp_alloc_wait;/* num fcxp alloc waits */
+ u32 prli_rsp_err; /* num prli rsp errors */
+ u32 prli_rsp_acc; /* num prli rsp accepts */
+ u32 initiator; /* rport is an initiator */
+ u32 prli_rsp_parse_err; /* prli rsp parsing errors */
+ u32 prli_rsp_rjt; /* num prli rsp rejects */
+ u32 timeout; /* num timeouts detected */
+ u32 sler; /* num sler notification from BFA */
+ u32 rsvd; /* padding for 64 bit alignment */
+};
+
+/*
+ * FCS itnim attributes returned in queries
+ */
+struct bfa_itnim_attr_s {
+ enum bfa_itnim_state state; /* FCS itnim state */
+ u8 retry; /* data retransmision support */
+ u8 task_retry_id; /* task retry ident support */
+ u8 rec_support; /* REC supported */
+ u8 conf_comp; /* confirmed completion supp */
+};
+
+#endif /* __BFA_DEFS_FCS_H__ */
diff --git a/drivers/scsi/bfa/bfa_defs_svc.h b/drivers/scsi/bfa/bfa_defs_svc.h
new file mode 100644
index 000000000..638f441ff
--- /dev/null
+++ b/drivers/scsi/bfa/bfa_defs_svc.h
@@ -0,0 +1,1462 @@
+/*
+ * Copyright (c) 2005-2010 Brocade Communications Systems, Inc.
+ * All rights reserved
+ * www.brocade.com
+ *
+ * Linux driver for Brocade Fibre Channel Host Bus Adapter.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License (GPL) Version 2 as
+ * published by the Free Software Foundation
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ */
+
+#ifndef __BFA_DEFS_SVC_H__
+#define __BFA_DEFS_SVC_H__
+
+#include "bfa_defs.h"
+#include "bfa_fc.h"
+#include "bfi.h"
+
+#define BFA_IOCFC_INTR_DELAY 1125
+#define BFA_IOCFC_INTR_LATENCY 225
+#define BFA_IOCFCOE_INTR_DELAY 25
+#define BFA_IOCFCOE_INTR_LATENCY 5
+
+/*
+ * Interrupt coalescing configuration.
+ */
+#pragma pack(1)
+struct bfa_iocfc_intr_attr_s {
+ u8 coalesce; /* enable/disable coalescing */
+ u8 rsvd[3];
+ __be16 latency; /* latency in microseconds */
+ __be16 delay; /* delay in microseconds */
+};
+
+/*
+ * IOC firmware configuraton
+ */
+struct bfa_iocfc_fwcfg_s {
+ u16 num_fabrics; /* number of fabrics */
+ u16 num_lports; /* number of local lports */
+ u16 num_rports; /* number of remote ports */
+ u16 num_ioim_reqs; /* number of IO reqs */
+ u16 num_tskim_reqs; /* task management requests */
+ u16 num_fwtio_reqs; /* number of TM IO reqs in FW */
+ u16 num_fcxp_reqs; /* unassisted FC exchanges */
+ u16 num_uf_bufs; /* unsolicited recv buffers */
+ u8 num_cqs;
+ u8 fw_tick_res; /* FW clock resolution in ms */
+ u8 rsvd[6];
+};
+#pragma pack()
+
+struct bfa_iocfc_drvcfg_s {
+ u16 num_reqq_elems; /* number of req queue elements */
+ u16 num_rspq_elems; /* number of rsp queue elements */
+ u16 num_sgpgs; /* number of total SG pages */
+ u16 num_sboot_tgts; /* number of SAN boot targets */
+ u16 num_sboot_luns; /* number of SAN boot luns */
+ u16 ioc_recover; /* IOC recovery mode */
+ u16 min_cfg; /* minimum configuration */
+ u16 path_tov; /* device path timeout */
+ u16 num_tio_reqs; /* number of TM IO reqs */
+ u8 port_mode;
+ u8 rsvd_a;
+ bfa_boolean_t delay_comp; /* delay completion of failed
+ * inflight IOs */
+ u16 num_ttsk_reqs; /* TM task management requests */
+ u32 rsvd;
+};
+
+/*
+ * IOC configuration
+ */
+struct bfa_iocfc_cfg_s {
+ struct bfa_iocfc_fwcfg_s fwcfg; /* firmware side config */
+ struct bfa_iocfc_drvcfg_s drvcfg; /* driver side config */
+};
+
+/*
+ * IOC firmware IO stats
+ */
+struct bfa_fw_ioim_stats_s {
+ u32 host_abort; /* IO aborted by host driver*/
+ u32 host_cleanup; /* IO clean up by host driver */
+
+ u32 fw_io_timeout; /* IOs timedout */
+ u32 fw_frm_parse; /* frame parsed by f/w */
+ u32 fw_frm_data; /* fcp_data frame parsed by f/w */
+ u32 fw_frm_rsp; /* fcp_rsp frame parsed by f/w */
+ u32 fw_frm_xfer_rdy; /* xfer_rdy frame parsed by f/w */
+ u32 fw_frm_bls_acc; /* BLS ACC frame parsed by f/w */
+ u32 fw_frm_tgt_abort; /* target ABTS parsed by f/w */
+ u32 fw_frm_unknown; /* unknown parsed by f/w */
+ u32 fw_data_dma; /* f/w DMA'ed the data frame */
+ u32 fw_frm_drop; /* f/w drop the frame */
+
+ u32 rec_timeout; /* FW rec timed out */
+ u32 error_rec; /* FW sending rec on
+ * an error condition*/
+ u32 wait_for_si; /* FW wait for SI */
+ u32 rec_rsp_inval; /* REC rsp invalid */
+ u32 rec_rsp_xchg_comp; /* REC rsp xchg complete */
+ u32 rec_rsp_rd_si_ownd; /* REC rsp read si owned */
+
+ u32 seqr_io_abort; /* target does not know cmd so abort */
+ u32 seqr_io_retry; /* SEQR failed so retry IO */
+
+ u32 itn_cisc_upd_rsp; /* ITN cisc updated on fcp_rsp */
+ u32 itn_cisc_upd_data; /* ITN cisc updated on fcp_data */
+ u32 itn_cisc_upd_xfer_rdy; /* ITN cisc updated on fcp_data */
+
+ u32 fcp_data_lost; /* fcp data lost */
+
+ u32 ro_set_in_xfer_rdy; /* Target set RO in Xfer_rdy frame */
+ u32 xfer_rdy_ooo_err; /* Out of order Xfer_rdy received */
+ u32 xfer_rdy_unknown_err; /* unknown error in xfer_rdy frame */
+
+ u32 io_abort_timeout; /* ABTS timedout */
+ u32 sler_initiated; /* SLER initiated */
+
+ u32 unexp_fcp_rsp; /* fcp response in wrong state */
+
+ u32 fcp_rsp_under_run; /* fcp rsp IO underrun */
+ u32 fcp_rsp_under_run_wr; /* fcp rsp IO underrun for write */
+ u32 fcp_rsp_under_run_err; /* fcp rsp IO underrun error */
+ u32 fcp_rsp_resid_inval; /* invalid residue */
+ u32 fcp_rsp_over_run; /* fcp rsp IO overrun */
+ u32 fcp_rsp_over_run_err; /* fcp rsp IO overrun error */
+ u32 fcp_rsp_proto_err; /* protocol error in fcp rsp */
+ u32 fcp_rsp_sense_err; /* error in sense info in fcp rsp */
+ u32 fcp_conf_req; /* FCP conf requested */
+
+ u32 tgt_aborted_io; /* target initiated abort */
+
+ u32 ioh_edtov_timeout_event;/* IOH edtov timer popped */
+ u32 ioh_fcp_rsp_excp_event; /* IOH FCP_RSP exception */
+ u32 ioh_fcp_conf_event; /* IOH FCP_CONF */
+ u32 ioh_mult_frm_rsp_event; /* IOH multi_frame FCP_RSP */
+ u32 ioh_hit_class2_event; /* IOH hit class2 */
+ u32 ioh_miss_other_event; /* IOH miss other */
+ u32 ioh_seq_cnt_err_event; /* IOH seq cnt error */
+ u32 ioh_len_err_event; /* IOH len error - fcp_dl !=
+ * bytes xfered */
+ u32 ioh_seq_len_err_event; /* IOH seq len error */
+ u32 ioh_data_oor_event; /* Data out of range */
+ u32 ioh_ro_ooo_event; /* Relative offset out of range */
+ u32 ioh_cpu_owned_event; /* IOH hit -iost owned by f/w */
+ u32 ioh_unexp_frame_event; /* unexpected frame received
+ * count */
+ u32 ioh_err_int; /* IOH error int during data-phase
+ * for scsi write */
+};
+
+struct bfa_fw_tio_stats_s {
+ u32 tio_conf_proc; /* TIO CONF processed */
+ u32 tio_conf_drop; /* TIO CONF dropped */
+ u32 tio_cleanup_req; /* TIO cleanup requested */
+ u32 tio_cleanup_comp; /* TIO cleanup completed */
+ u32 tio_abort_rsp; /* TIO abort response */
+ u32 tio_abort_rsp_comp; /* TIO abort rsp completed */
+ u32 tio_abts_req; /* TIO ABTS requested */
+ u32 tio_abts_ack; /* TIO ABTS ack-ed */
+ u32 tio_abts_ack_nocomp;/* TIO ABTS ack-ed but not completed */
+ u32 tio_abts_tmo; /* TIO ABTS timeout */
+ u32 tio_snsdata_dma; /* TIO sense data DMA */
+ u32 tio_rxwchan_wait; /* TIO waiting for RX wait channel */
+ u32 tio_rxwchan_avail; /* TIO RX wait channel available */
+ u32 tio_hit_bls; /* TIO IOH BLS event */
+ u32 tio_uf_recv; /* TIO received UF */
+ u32 tio_rd_invalid_sm; /* TIO read reqst in wrong state machine */
+ u32 tio_wr_invalid_sm; /* TIO write reqst in wrong state machine */
+
+ u32 ds_rxwchan_wait; /* DS waiting for RX wait channel */
+ u32 ds_rxwchan_avail; /* DS RX wait channel available */
+ u32 ds_unaligned_rd; /* DS unaligned read */
+ u32 ds_rdcomp_invalid_sm; /* DS read completed in wrong state
+ * machine */
+ u32 ds_wrcomp_invalid_sm; /* DS write completed in wrong state
+ * machine */
+ u32 ds_flush_req; /* DS flush requested */
+ u32 ds_flush_comp; /* DS flush completed */
+ u32 ds_xfrdy_exp; /* DS XFER_RDY expired */
+ u32 ds_seq_cnt_err; /* DS seq cnt error */
+ u32 ds_seq_len_err; /* DS seq len error */
+ u32 ds_data_oor; /* DS data out of order */
+ u32 ds_hit_bls; /* DS hit BLS */
+ u32 ds_edtov_timer_exp; /* DS edtov expired */
+ u32 ds_cpu_owned; /* DS cpu owned */
+ u32 ds_hit_class2; /* DS hit class2 */
+ u32 ds_length_err; /* DS length error */
+ u32 ds_ro_ooo_err; /* DS relative offset out-of-order error */
+ u32 ds_rectov_timer_exp;/* DS rectov expired */
+ u32 ds_unexp_fr_err; /* DS unexp frame error */
+};
+
+/*
+ * IOC firmware IO stats
+ */
+struct bfa_fw_io_stats_s {
+ struct bfa_fw_ioim_stats_s ioim_stats;
+ struct bfa_fw_tio_stats_s tio_stats;
+};
+
+/*
+ * IOC port firmware stats
+ */
+
+struct bfa_fw_port_fpg_stats_s {
+ u32 intr_evt;
+ u32 intr;
+ u32 intr_excess;
+ u32 intr_cause0;
+ u32 intr_other;
+ u32 intr_other_ign;
+ u32 sig_lost;
+ u32 sig_regained;
+ u32 sync_lost;
+ u32 sync_to;
+ u32 sync_regained;
+ u32 div2_overflow;
+ u32 div2_underflow;
+ u32 efifo_overflow;
+ u32 efifo_underflow;
+ u32 idle_rx;
+ u32 lrr_rx;
+ u32 lr_rx;
+ u32 ols_rx;
+ u32 nos_rx;
+ u32 lip_rx;
+ u32 arbf0_rx;
+ u32 arb_rx;
+ u32 mrk_rx;
+ u32 const_mrk_rx;
+ u32 prim_unknown;
+};
+
+
+struct bfa_fw_port_lksm_stats_s {
+ u32 hwsm_success; /* hwsm state machine success */
+ u32 hwsm_fails; /* hwsm fails */
+ u32 hwsm_wdtov; /* hwsm timed out */
+ u32 swsm_success; /* swsm success */
+ u32 swsm_fails; /* swsm fails */
+ u32 swsm_wdtov; /* swsm timed out */
+ u32 busybufs; /* link init failed due to busybuf */
+ u32 buf_waits; /* bufwait state entries */
+ u32 link_fails; /* link failures */
+ u32 psp_errors; /* primitive sequence protocol errors */
+ u32 lr_unexp; /* No. of times LR rx-ed unexpectedly */
+ u32 lrr_unexp; /* No. of times LRR rx-ed unexpectedly */
+ u32 lr_tx; /* No. of times LR tx started */
+ u32 lrr_tx; /* No. of times LRR tx started */
+ u32 ols_tx; /* No. of times OLS tx started */
+ u32 nos_tx; /* No. of times NOS tx started */
+ u32 hwsm_lrr_rx; /* No. of times LRR rx-ed by HWSM */
+ u32 hwsm_lr_rx; /* No. of times LR rx-ed by HWSM */
+};
+
+struct bfa_fw_port_snsm_stats_s {
+ u32 hwsm_success; /* Successful hwsm terminations */
+ u32 hwsm_fails; /* hwsm fail count */
+ u32 hwsm_wdtov; /* hwsm timed out */
+ u32 swsm_success; /* swsm success */
+ u32 swsm_wdtov; /* swsm timed out */
+ u32 error_resets; /* error resets initiated by upsm */
+ u32 sync_lost; /* Sync loss count */
+ u32 sig_lost; /* Signal loss count */
+ u32 asn8g_attempts; /* SNSM HWSM at 8Gbps attempts */
+ u32 adapt_success; /* SNSM adaptation success */
+ u32 adapt_fails; /* SNSM adaptation failures */
+ u32 adapt_ign_fails; /* SNSM adaptation failures ignored */
+};
+
+struct bfa_fw_port_physm_stats_s {
+ u32 module_inserts; /* Module insert count */
+ u32 module_xtracts; /* Module extracts count */
+ u32 module_invalids; /* Invalid module inserted count */
+ u32 module_read_ign; /* Module validation status ignored */
+ u32 laser_faults; /* Laser fault count */
+ u32 rsvd;
+};
+
+struct bfa_fw_fip_stats_s {
+ u32 vlan_req; /* vlan discovery requests */
+ u32 vlan_notify; /* vlan notifications */
+ u32 vlan_err; /* vlan response error */
+ u32 vlan_timeouts; /* vlan disvoery timeouts */
+ u32 vlan_invalids; /* invalid vlan in discovery advert. */
+ u32 disc_req; /* Discovery solicit requests */
+ u32 disc_rsp; /* Discovery solicit response */
+ u32 disc_err; /* Discovery advt. parse errors */
+ u32 disc_unsol; /* Discovery unsolicited */
+ u32 disc_timeouts; /* Discovery timeouts */
+ u32 disc_fcf_unavail; /* Discovery FCF Not Avail. */
+ u32 linksvc_unsupp; /* Unsupported link service req */
+ u32 linksvc_err; /* Parse error in link service req */
+ u32 logo_req; /* FIP logos received */
+ u32 clrvlink_req; /* Clear virtual link req */
+ u32 op_unsupp; /* Unsupported FIP operation */
+ u32 untagged; /* Untagged frames (ignored) */
+ u32 invalid_version; /* Invalid FIP version */
+};
+
+struct bfa_fw_lps_stats_s {
+ u32 mac_invalids; /* Invalid mac assigned */
+ u32 rsvd;
+};
+
+struct bfa_fw_fcoe_stats_s {
+ u32 cee_linkups; /* CEE link up count */
+ u32 cee_linkdns; /* CEE link down count */
+ u32 fip_linkups; /* FIP link up count */
+ u32 fip_linkdns; /* FIP link up count */
+ u32 fip_fails; /* FIP fail count */
+ u32 mac_invalids; /* Invalid mac assigned */
+};
+
+/*
+ * IOC firmware FCoE port stats
+ */
+struct bfa_fw_fcoe_port_stats_s {
+ struct bfa_fw_fcoe_stats_s fcoe_stats;
+ struct bfa_fw_fip_stats_s fip_stats;
+};
+
+/**
+ * @brief LPSM statistics
+ */
+struct bfa_fw_lpsm_stats_s {
+ u32 cls_rx; /* LPSM cls_rx */
+ u32 cls_tx; /* LPSM cls_tx */
+ u32 arbf0_rx; /* LPSM abrf0 rcvd */
+ u32 arbf0_tx; /* LPSM abrf0 xmit */
+ u32 init_rx; /* LPSM loop init start */
+ u32 unexp_hwst; /* LPSM unknown hw state */
+ u32 unexp_frame; /* LPSM unknown_frame */
+ u32 unexp_prim; /* LPSM unexpected primitive */
+ u32 prev_alpa_unavail; /* LPSM prev alpa unavailable */
+ u32 alpa_unavail; /* LPSM alpa not available */
+ u32 lip_rx; /* LPSM lip rcvd */
+ u32 lip_f7f7_rx; /* LPSM lip f7f7 rcvd */
+ u32 lip_f8_rx; /* LPSM lip f8 rcvd */
+ u32 lip_f8f7_rx; /* LPSM lip f8f7 rcvd */
+ u32 lip_other_rx; /* LPSM lip other rcvd */
+ u32 lip_tx; /* LPSM lip xmit */
+ u32 retry_tov; /* LPSM retry TOV */
+ u32 lip_tov; /* LPSM LIP wait TOV */
+ u32 idle_tov; /* LPSM idle wait TOV */
+ u32 arbf0_tov; /* LPSM arbfo wait TOV */
+ u32 stop_loop_tov; /* LPSM stop loop wait TOV */
+ u32 lixa_tov; /* LPSM lisa wait TOV */
+ u32 lixx_tov; /* LPSM lilp/lirp wait TOV */
+ u32 cls_tov; /* LPSM cls wait TOV */
+ u32 sler; /* LPSM SLER recvd */
+ u32 failed; /* LPSM failed */
+ u32 success; /* LPSM online */
+};
+
+/*
+ * IOC firmware FC uport stats
+ */
+struct bfa_fw_fc_uport_stats_s {
+ struct bfa_fw_port_snsm_stats_s snsm_stats;
+ struct bfa_fw_port_lksm_stats_s lksm_stats;
+ struct bfa_fw_lpsm_stats_s lpsm_stats;
+};
+
+/*
+ * IOC firmware FC port stats
+ */
+union bfa_fw_fc_port_stats_s {
+ struct bfa_fw_fc_uport_stats_s fc_stats;
+ struct bfa_fw_fcoe_port_stats_s fcoe_stats;
+};
+
+/*
+ * IOC firmware port stats
+ */
+struct bfa_fw_port_stats_s {
+ struct bfa_fw_port_fpg_stats_s fpg_stats;
+ struct bfa_fw_port_physm_stats_s physm_stats;
+ union bfa_fw_fc_port_stats_s fc_port;
+};
+
+/*
+ * fcxchg module statistics
+ */
+struct bfa_fw_fcxchg_stats_s {
+ u32 ua_tag_inv;
+ u32 ua_state_inv;
+};
+
+/*
+ * Trunk statistics
+ */
+struct bfa_fw_trunk_stats_s {
+ u32 emt_recvd; /* Trunk EMT received */
+ u32 emt_accepted; /* Trunk EMT Accepted */
+ u32 emt_rejected; /* Trunk EMT rejected */
+ u32 etp_recvd; /* Trunk ETP received */
+ u32 etp_accepted; /* Trunk ETP Accepted */
+ u32 etp_rejected; /* Trunk ETP rejected */
+ u32 lr_recvd; /* Trunk LR received */
+ u32 rsvd; /* padding for 64 bit alignment */
+};
+
+struct bfa_fw_aport_stats_s {
+ u32 flogi_sent; /* Flogi sent */
+ u32 flogi_acc_recvd; /* Flogi Acc received */
+ u32 flogi_rjt_recvd; /* Flogi rejects received */
+ u32 flogi_retries; /* Flogi retries */
+
+ u32 elp_recvd; /* ELP received */
+ u32 elp_accepted; /* ELP Accepted */
+ u32 elp_rejected; /* ELP rejected */
+ u32 elp_dropped; /* ELP dropped */
+
+ u32 bbcr_lr_count; /*!< BBCR Link Resets */
+ u32 frame_lost_intrs; /*!< BBCR Frame loss intrs */
+ u32 rrdy_lost_intrs; /*!< BBCR Rrdy loss intrs */
+
+ u32 rsvd;
+};
+
+/*
+ * IOCFC firmware stats
+ */
+struct bfa_fw_iocfc_stats_s {
+ u32 cfg_reqs; /* cfg request */
+ u32 updq_reqs; /* update queue request */
+ u32 ic_reqs; /* interrupt coalesce reqs */
+ u32 unknown_reqs;
+ u32 set_intr_reqs; /* set interrupt reqs */
+};
+
+/*
+ * IOC attributes returned in queries
+ */
+struct bfa_iocfc_attr_s {
+ struct bfa_iocfc_cfg_s config; /* IOCFC config */
+ struct bfa_iocfc_intr_attr_s intr_attr; /* interrupt attr */
+};
+
+/*
+ * Eth_sndrcv mod stats
+ */
+struct bfa_fw_eth_sndrcv_stats_s {
+ u32 crc_err;
+ u32 rsvd; /* 64bit align */
+};
+
+/*
+ * CT MAC mod stats
+ */
+struct bfa_fw_mac_mod_stats_s {
+ u32 mac_on; /* MAC got turned-on */
+ u32 link_up; /* link-up */
+ u32 signal_off; /* lost signal */
+ u32 dfe_on; /* DFE on */
+ u32 mac_reset; /* # of MAC reset to bring lnk up */
+ u32 pcs_reset; /* # of PCS reset to bring lnk up */
+ u32 loopback; /* MAC got into serdes loopback */
+ u32 lb_mac_reset;
+ /* # of MAC reset to bring link up in loopback */
+ u32 lb_pcs_reset;
+ /* # of PCS reset to bring link up in loopback */
+ u32 rsvd; /* 64bit align */
+};
+
+/*
+ * CT MOD stats
+ */
+struct bfa_fw_ct_mod_stats_s {
+ u32 rxa_rds_undrun; /* RxA RDS underrun */
+ u32 rad_bpc_ovfl; /* RAD BPC overflow */
+ u32 rad_rlb_bpc_ovfl; /* RAD RLB BPC overflow */
+ u32 bpc_fcs_err; /* BPC FCS_ERR */
+ u32 txa_tso_hdr; /* TxA TSO header too long */
+ u32 rsvd; /* 64bit align */
+};
+
+/*
+ * RDS mod stats
+ */
+struct bfa_fw_rds_stats_s {
+ u32 no_fid_drop_err; /* RDS no fid drop error */
+ u32 rsvd; /* 64bit align */
+};
+
+/*
+ * IOC firmware stats
+ */
+struct bfa_fw_stats_s {
+ struct bfa_fw_ioc_stats_s ioc_stats;
+ struct bfa_fw_iocfc_stats_s iocfc_stats;
+ struct bfa_fw_io_stats_s io_stats;
+ struct bfa_fw_port_stats_s port_stats;
+ struct bfa_fw_fcxchg_stats_s fcxchg_stats;
+ struct bfa_fw_lps_stats_s lps_stats;
+ struct bfa_fw_trunk_stats_s trunk_stats;
+ struct bfa_fw_aport_stats_s aport_stats;
+ struct bfa_fw_mac_mod_stats_s macmod_stats;
+ struct bfa_fw_ct_mod_stats_s ctmod_stats;
+ struct bfa_fw_eth_sndrcv_stats_s ethsndrcv_stats;
+ struct bfa_fw_rds_stats_s rds_stats;
+};
+
+#define BFA_IOCFC_PATHTOV_MAX 60
+#define BFA_IOCFC_QDEPTH_MAX 2000
+
+/*
+ * QoS states
+ */
+enum bfa_qos_state {
+ BFA_QOS_DISABLED = 0, /* QoS is disabled */
+ BFA_QOS_ONLINE = 1, /* QoS is online */
+ BFA_QOS_OFFLINE = 2, /* QoS is offline */
+};
+
+/*
+ * QoS Priority levels.
+ */
+enum bfa_qos_priority {
+ BFA_QOS_UNKNOWN = 0,
+ BFA_QOS_HIGH = 1, /* QoS Priority Level High */
+ BFA_QOS_MED = 2, /* QoS Priority Level Medium */
+ BFA_QOS_LOW = 3, /* QoS Priority Level Low */
+};
+
+/*
+ * QoS bandwidth allocation for each priority level
+ */
+enum bfa_qos_bw_alloc {
+ BFA_QOS_BW_HIGH = 60, /* bandwidth allocation for High */
+ BFA_QOS_BW_MED = 30, /* bandwidth allocation for Medium */
+ BFA_QOS_BW_LOW = 10, /* bandwidth allocation for Low */
+};
+#pragma pack(1)
+
+struct bfa_qos_bw_s {
+ u8 qos_bw_set;
+ u8 high;
+ u8 med;
+ u8 low;
+};
+
+/*
+ * QoS attribute returned in QoS Query
+ */
+struct bfa_qos_attr_s {
+ u8 state; /* QoS current state */
+ u8 rsvd1[3];
+ u32 total_bb_cr; /* Total BB Credits */
+ struct bfa_qos_bw_s qos_bw; /* QOS bw cfg */
+ struct bfa_qos_bw_s qos_bw_op; /* QOS bw operational */
+};
+
+enum bfa_bbcr_state {
+ BFA_BBCR_DISABLED, /*!< BBCR is disable */
+ BFA_BBCR_ONLINE, /*!< BBCR is online */
+ BFA_BBCR_OFFLINE, /*!< BBCR is offline */
+};
+
+enum bfa_bbcr_err_reason {
+ BFA_BBCR_ERR_REASON_NONE, /*!< Unknown */
+ BFA_BBCR_ERR_REASON_SPEED_UNSUP, /*!< Port speed < max sup_speed */
+ BFA_BBCR_ERR_REASON_PEER_UNSUP, /*!< BBCR is disable on peer port */
+ BFA_BBCR_ERR_REASON_NON_BRCD_SW, /*!< Connected to non BRCD switch */
+ BFA_BBCR_ERR_REASON_FLOGI_RJT, /*!< Login rejected by the switch */
+};
+
+struct bfa_bbcr_attr_s {
+ u8 state;
+ u8 peer_bb_scn;
+ u8 reason;
+ u8 rsvd;
+};
+
+/*
+ * These fields should be displayed only from the CLI.
+ * There will be a separate BFAL API (get_qos_vc_attr ?)
+ * to retrieve this.
+ *
+ */
+#define BFA_QOS_MAX_VC 16
+
+struct bfa_qos_vc_info_s {
+ u8 vc_credit;
+ u8 borrow_credit;
+ u8 priority;
+ u8 resvd;
+};
+
+struct bfa_qos_vc_attr_s {
+ u16 total_vc_count; /* Total VC Count */
+ u16 shared_credit;
+ u32 elp_opmode_flags;
+ struct bfa_qos_vc_info_s vc_info[BFA_QOS_MAX_VC]; /* as many as
+ * total_vc_count */
+};
+
+/*
+ * QoS statistics
+ */
+struct bfa_qos_stats_s {
+ u32 flogi_sent; /* QoS Flogi sent */
+ u32 flogi_acc_recvd; /* QoS Flogi Acc received */
+ u32 flogi_rjt_recvd; /* QoS Flogi rejects received */
+ u32 flogi_retries; /* QoS Flogi retries */
+
+ u32 elp_recvd; /* QoS ELP received */
+ u32 elp_accepted; /* QoS ELP Accepted */
+ u32 elp_rejected; /* QoS ELP rejected */
+ u32 elp_dropped; /* QoS ELP dropped */
+
+ u32 qos_rscn_recvd; /* QoS RSCN received */
+ u32 rsvd; /* padding for 64 bit alignment */
+};
+
+/*
+ * FCoE statistics
+ */
+struct bfa_fcoe_stats_s {
+ u64 secs_reset; /* Seconds since stats reset */
+ u64 cee_linkups; /* CEE link up */
+ u64 cee_linkdns; /* CEE link down */
+ u64 fip_linkups; /* FIP link up */
+ u64 fip_linkdns; /* FIP link down */
+ u64 fip_fails; /* FIP failures */
+ u64 mac_invalids; /* Invalid mac assignments */
+ u64 vlan_req; /* Vlan requests */
+ u64 vlan_notify; /* Vlan notifications */
+ u64 vlan_err; /* Vlan notification errors */
+ u64 vlan_timeouts; /* Vlan request timeouts */
+ u64 vlan_invalids; /* Vlan invalids */
+ u64 disc_req; /* Discovery requests */
+ u64 disc_rsp; /* Discovery responses */
+ u64 disc_err; /* Discovery error frames */
+ u64 disc_unsol; /* Discovery unsolicited */
+ u64 disc_timeouts; /* Discovery timeouts */
+ u64 disc_fcf_unavail; /* Discovery FCF not avail */
+ u64 linksvc_unsupp; /* FIP link service req unsupp */
+ u64 linksvc_err; /* FIP link service req errors */
+ u64 logo_req; /* FIP logos received */
+ u64 clrvlink_req; /* Clear virtual link requests */
+ u64 op_unsupp; /* FIP operation unsupp. */
+ u64 untagged; /* FIP untagged frames */
+ u64 txf_ucast; /* Tx FCoE unicast frames */
+ u64 txf_ucast_vlan; /* Tx FCoE unicast vlan frames */
+ u64 txf_ucast_octets; /* Tx FCoE unicast octets */
+ u64 txf_mcast; /* Tx FCoE multicast frames */
+ u64 txf_mcast_vlan; /* Tx FCoE multicast vlan frames */
+ u64 txf_mcast_octets; /* Tx FCoE multicast octets */
+ u64 txf_bcast; /* Tx FCoE broadcast frames */
+ u64 txf_bcast_vlan; /* Tx FCoE broadcast vlan frames */
+ u64 txf_bcast_octets; /* Tx FCoE broadcast octets */
+ u64 txf_timeout; /* Tx timeouts */
+ u64 txf_parity_errors; /* Transmit parity err */
+ u64 txf_fid_parity_errors; /* Transmit FID parity err */
+ u64 rxf_ucast_octets; /* Rx FCoE unicast octets */
+ u64 rxf_ucast; /* Rx FCoE unicast frames */
+ u64 rxf_ucast_vlan; /* Rx FCoE unicast vlan frames */
+ u64 rxf_mcast_octets; /* Rx FCoE multicast octets */
+ u64 rxf_mcast; /* Rx FCoE multicast frames */
+ u64 rxf_mcast_vlan; /* Rx FCoE multicast vlan frames */
+ u64 rxf_bcast_octets; /* Rx FCoE broadcast octets */
+ u64 rxf_bcast; /* Rx FCoE broadcast frames */
+ u64 rxf_bcast_vlan; /* Rx FCoE broadcast vlan frames */
+};
+
+/*
+ * QoS or FCoE stats (fcport stats excluding physical FC port stats)
+ */
+union bfa_fcport_stats_u {
+ struct bfa_qos_stats_s fcqos;
+ struct bfa_fcoe_stats_s fcoe;
+};
+#pragma pack()
+
+struct bfa_fcpim_del_itn_stats_s {
+ u32 del_itn_iocomp_aborted; /* Aborted IO requests */
+ u32 del_itn_iocomp_timedout; /* IO timeouts */
+ u32 del_itn_iocom_sqer_needed; /* IO retry for SQ error recovery */
+ u32 del_itn_iocom_res_free; /* Delayed freeing of IO resources */
+ u32 del_itn_iocom_hostabrts; /* Host IO abort requests */
+ u32 del_itn_total_ios; /* Total IO count */
+ u32 del_io_iocdowns; /* IO cleaned-up due to IOC down */
+ u32 del_tm_iocdowns; /* TM cleaned-up due to IOC down */
+};
+
+struct bfa_itnim_iostats_s {
+
+ u32 total_ios; /* Total IO Requests */
+ u32 input_reqs; /* Data in-bound requests */
+ u32 output_reqs; /* Data out-bound requests */
+ u32 io_comps; /* Total IO Completions */
+ u32 wr_throughput; /* Write data transferred in bytes */
+ u32 rd_throughput; /* Read data transferred in bytes */
+
+ u32 iocomp_ok; /* Slowpath IO completions */
+ u32 iocomp_underrun; /* IO underrun */
+ u32 iocomp_overrun; /* IO overrun */
+ u32 qwait; /* IO Request-Q wait */
+ u32 qresumes; /* IO Request-Q wait done */
+ u32 no_iotags; /* No free IO tag */
+ u32 iocomp_timedout; /* IO timeouts */
+ u32 iocom_nexus_abort; /* IO failure due to target offline */
+ u32 iocom_proto_err; /* IO protocol errors */
+ u32 iocom_dif_err; /* IO SBC-3 protection errors */
+
+ u32 iocom_sqer_needed; /* fcp-2 error recovery failed */
+ u32 iocom_res_free; /* Delayed freeing of IO tag */
+
+
+ u32 io_aborts; /* Host IO abort requests */
+ u32 iocom_hostabrts; /* Host IO abort completions */
+ u32 io_cleanups; /* IO clean-up requests */
+ u32 path_tov_expired; /* IO path tov expired */
+ u32 iocomp_aborted; /* IO abort completions */
+ u32 io_iocdowns; /* IO cleaned-up due to IOC down */
+ u32 iocom_utags; /* IO comp with unknown tags */
+
+ u32 io_tmaborts; /* Abort request due to TM command */
+ u32 tm_io_comps; /* Abort completion due to TM command */
+
+ u32 creates; /* IT Nexus create requests */
+ u32 fw_create; /* IT Nexus FW create requests */
+ u32 create_comps; /* IT Nexus FW create completions */
+ u32 onlines; /* IT Nexus onlines */
+ u32 offlines; /* IT Nexus offlines */
+ u32 fw_delete; /* IT Nexus FW delete requests */
+ u32 delete_comps; /* IT Nexus FW delete completions */
+ u32 deletes; /* IT Nexus delete requests */
+ u32 sler_events; /* SLER events */
+ u32 ioc_disabled; /* Num IOC disables */
+ u32 cleanup_comps; /* IT Nexus cleanup completions */
+
+ u32 tm_cmnds; /* TM Requests */
+ u32 tm_fw_rsps; /* TM Completions */
+ u32 tm_success; /* TM initiated IO cleanup success */
+ u32 tm_failures; /* TM initiated IO cleanup failure */
+ u32 no_tskims; /* No free TM tag */
+ u32 tm_qwait; /* TM Request-Q wait */
+ u32 tm_qresumes; /* TM Request-Q wait done */
+
+ u32 tm_iocdowns; /* TM cleaned-up due to IOC down */
+ u32 tm_cleanups; /* TM cleanup requests */
+ u32 tm_cleanup_comps; /* TM cleanup completions */
+ u32 rsvd[6];
+};
+
+/* Modify char* port_stt[] in bfal_port.c if a new state was added */
+enum bfa_port_states {
+ BFA_PORT_ST_UNINIT = 1,
+ BFA_PORT_ST_ENABLING_QWAIT = 2,
+ BFA_PORT_ST_ENABLING = 3,
+ BFA_PORT_ST_LINKDOWN = 4,
+ BFA_PORT_ST_LINKUP = 5,
+ BFA_PORT_ST_DISABLING_QWAIT = 6,
+ BFA_PORT_ST_DISABLING = 7,
+ BFA_PORT_ST_DISABLED = 8,
+ BFA_PORT_ST_STOPPED = 9,
+ BFA_PORT_ST_IOCDOWN = 10,
+ BFA_PORT_ST_IOCDIS = 11,
+ BFA_PORT_ST_FWMISMATCH = 12,
+ BFA_PORT_ST_PREBOOT_DISABLED = 13,
+ BFA_PORT_ST_TOGGLING_QWAIT = 14,
+ BFA_PORT_ST_FAA_MISCONFIG = 15,
+ BFA_PORT_ST_DPORT = 16,
+ BFA_PORT_ST_DDPORT = 17,
+ BFA_PORT_ST_MAX_STATE,
+};
+
+/*
+ * Port operational type (in sync with SNIA port type).
+ */
+enum bfa_port_type {
+ BFA_PORT_TYPE_UNKNOWN = 1, /* port type is unknown */
+ BFA_PORT_TYPE_NPORT = 5, /* P2P with switched fabric */
+ BFA_PORT_TYPE_NLPORT = 6, /* public loop */
+ BFA_PORT_TYPE_LPORT = 20, /* private loop */
+ BFA_PORT_TYPE_P2P = 21, /* P2P with no switched fabric */
+ BFA_PORT_TYPE_VPORT = 22, /* NPIV - virtual port */
+};
+
+/*
+ * Port topology setting. A port's topology and fabric login status
+ * determine its operational type.
+ */
+enum bfa_port_topology {
+ BFA_PORT_TOPOLOGY_NONE = 0, /* No valid topology */
+ BFA_PORT_TOPOLOGY_P2P_OLD_VER = 1, /* P2P def for older ver */
+ BFA_PORT_TOPOLOGY_LOOP = 2, /* LOOP topology */
+ BFA_PORT_TOPOLOGY_AUTO_OLD_VER = 3, /* auto def for older ver */
+ BFA_PORT_TOPOLOGY_AUTO = 4, /* auto topology selection */
+ BFA_PORT_TOPOLOGY_P2P = 5, /* P2P only */
+};
+
+/*
+ * Physical port loopback types.
+ */
+enum bfa_port_opmode {
+ BFA_PORT_OPMODE_NORMAL = 0x00, /* normal non-loopback mode */
+ BFA_PORT_OPMODE_LB_INT = 0x01, /* internal loop back */
+ BFA_PORT_OPMODE_LB_SLW = 0x02, /* serial link wrapback (serdes) */
+ BFA_PORT_OPMODE_LB_EXT = 0x04, /* external loop back (serdes) */
+ BFA_PORT_OPMODE_LB_CBL = 0x08, /* cabled loop back */
+ BFA_PORT_OPMODE_LB_NLINT = 0x20, /* NL_Port internal loopback */
+};
+
+#define BFA_PORT_OPMODE_LB_HARD(_mode) \
+ ((_mode == BFA_PORT_OPMODE_LB_INT) || \
+ (_mode == BFA_PORT_OPMODE_LB_SLW) || \
+ (_mode == BFA_PORT_OPMODE_LB_EXT))
+
+/*
+ * Port link state
+ */
+enum bfa_port_linkstate {
+ BFA_PORT_LINKUP = 1, /* Physical port/Trunk link up */
+ BFA_PORT_LINKDOWN = 2, /* Physical port/Trunk link down */
+};
+
+/*
+ * Port link state reason code
+ */
+enum bfa_port_linkstate_rsn {
+ BFA_PORT_LINKSTATE_RSN_NONE = 0,
+ BFA_PORT_LINKSTATE_RSN_DISABLED = 1,
+ BFA_PORT_LINKSTATE_RSN_RX_NOS = 2,
+ BFA_PORT_LINKSTATE_RSN_RX_OLS = 3,
+ BFA_PORT_LINKSTATE_RSN_RX_LIP = 4,
+ BFA_PORT_LINKSTATE_RSN_RX_LIPF7 = 5,
+ BFA_PORT_LINKSTATE_RSN_SFP_REMOVED = 6,
+ BFA_PORT_LINKSTATE_RSN_PORT_FAULT = 7,
+ BFA_PORT_LINKSTATE_RSN_RX_LOS = 8,
+ BFA_PORT_LINKSTATE_RSN_LOCAL_FAULT = 9,
+ BFA_PORT_LINKSTATE_RSN_REMOTE_FAULT = 10,
+ BFA_PORT_LINKSTATE_RSN_TIMEOUT = 11,
+ BFA_PORT_LINKSTATE_RSN_FAA_MISCONFIG = 12,
+
+
+
+ /* CEE related reason codes/errors */
+ CEE_LLDP_INFO_AGED_OUT = 20,
+ CEE_LLDP_SHUTDOWN_TLV_RCVD = 21,
+ CEE_PEER_NOT_ADVERTISE_DCBX = 22,
+ CEE_PEER_NOT_ADVERTISE_PG = 23,
+ CEE_PEER_NOT_ADVERTISE_PFC = 24,
+ CEE_PEER_NOT_ADVERTISE_FCOE = 25,
+ CEE_PG_NOT_COMPATIBLE = 26,
+ CEE_PFC_NOT_COMPATIBLE = 27,
+ CEE_FCOE_NOT_COMPATIBLE = 28,
+ CEE_BAD_PG_RCVD = 29,
+ CEE_BAD_BW_RCVD = 30,
+ CEE_BAD_PFC_RCVD = 31,
+ CEE_BAD_APP_PRI_RCVD = 32,
+ CEE_FCOE_PRI_PFC_OFF = 33,
+ CEE_DUP_CONTROL_TLV_RCVD = 34,
+ CEE_DUP_FEAT_TLV_RCVD = 35,
+ CEE_APPLY_NEW_CFG = 36, /* reason, not error */
+ CEE_PROTOCOL_INIT = 37, /* reason, not error */
+ CEE_PHY_LINK_DOWN = 38,
+ CEE_LLS_FCOE_ABSENT = 39,
+ CEE_LLS_FCOE_DOWN = 40,
+ CEE_ISCSI_NOT_COMPATIBLE = 41,
+ CEE_ISCSI_PRI_PFC_OFF = 42,
+ CEE_ISCSI_PRI_OVERLAP_FCOE_PRI = 43
+};
+
+#define MAX_LUN_MASK_CFG 16
+
+/*
+ * Initially flash content may be fff. On making LUN mask enable and disable
+ * state chnage. when report lun command is being processed it goes from
+ * BFA_LUN_MASK_ACTIVE to BFA_LUN_MASK_FETCH and comes back to
+ * BFA_LUN_MASK_ACTIVE.
+ */
+enum bfa_ioim_lun_mask_state_s {
+ BFA_IOIM_LUN_MASK_INACTIVE = 0,
+ BFA_IOIM_LUN_MASK_ACTIVE = 1,
+ BFA_IOIM_LUN_MASK_FETCHED = 2,
+};
+
+enum bfa_lunmask_state_s {
+ BFA_LUNMASK_DISABLED = 0x00,
+ BFA_LUNMASK_ENABLED = 0x01,
+ BFA_LUNMASK_MINCFG = 0x02,
+ BFA_LUNMASK_UNINITIALIZED = 0xff,
+};
+
+/**
+ * FEC states
+ */
+enum bfa_fec_state_s {
+ BFA_FEC_ONLINE = 1, /*!< FEC is online */
+ BFA_FEC_OFFLINE = 2, /*!< FEC is offline */
+ BFA_FEC_OFFLINE_NOT_16G = 3, /*!< FEC is offline (speed not 16Gig) */
+};
+
+#pragma pack(1)
+/*
+ * LUN mask configuration
+ */
+struct bfa_lun_mask_s {
+ wwn_t lp_wwn;
+ wwn_t rp_wwn;
+ struct scsi_lun lun;
+ u8 ua;
+ u8 rsvd[3];
+ u16 rp_tag;
+ u8 lp_tag;
+ u8 state;
+};
+
+#define MAX_LUN_MASK_CFG 16
+struct bfa_lunmask_cfg_s {
+ u32 status;
+ u32 rsvd;
+ struct bfa_lun_mask_s lun_list[MAX_LUN_MASK_CFG];
+};
+
+struct bfa_throttle_cfg_s {
+ u16 is_valid;
+ u16 value;
+ u32 rsvd;
+};
+
+struct bfa_defs_fcpim_throttle_s {
+ u16 max_value;
+ u16 cur_value;
+ u16 cfg_value;
+ u16 rsvd;
+};
+
+#define BFA_BB_SCN_DEF 3
+#define BFA_BB_SCN_MAX 0x0F
+
+/*
+ * Physical port configuration
+ */
+struct bfa_port_cfg_s {
+ u8 topology; /* bfa_port_topology */
+ u8 speed; /* enum bfa_port_speed */
+ u8 trunked; /* trunked or not */
+ u8 qos_enabled; /* qos enabled or not */
+ u8 cfg_hardalpa; /* is hard alpa configured */
+ u8 hardalpa; /* configured hard alpa */
+ __be16 maxfrsize; /* maximum frame size */
+ u8 rx_bbcredit; /* receive buffer credits */
+ u8 tx_bbcredit; /* transmit buffer credits */
+ u8 ratelimit; /* ratelimit enabled or not */
+ u8 trl_def_speed; /* ratelimit default speed */
+ u8 bb_cr_enabled; /*!< Config state of BB_SCN */
+ u8 bb_scn; /*!< BB_SCN value for FLOGI Exchg */
+ u8 faa_state; /* FAA enabled/disabled */
+ u8 rsvd1;
+ u16 path_tov; /* device path timeout */
+ u16 q_depth; /* SCSI Queue depth */
+ struct bfa_qos_bw_s qos_bw; /* QOS bandwidth */
+};
+#pragma pack()
+
+/*
+ * Port attribute values.
+ */
+struct bfa_port_attr_s {
+ /*
+ * Static fields
+ */
+ wwn_t nwwn; /* node wwn */
+ wwn_t pwwn; /* port wwn */
+ wwn_t factorynwwn; /* factory node wwn */
+ wwn_t factorypwwn; /* factory port wwn */
+ enum fc_cos cos_supported; /* supported class of
+ * services */
+ u32 rsvd;
+ struct fc_symname_s port_symname; /* port symbolic name */
+ enum bfa_port_speed speed_supported; /* supported speeds */
+ bfa_boolean_t pbind_enabled;
+
+ /*
+ * Configured values
+ */
+ struct bfa_port_cfg_s pport_cfg; /* pport cfg */
+
+ /*
+ * Dynamic field - info from BFA
+ */
+ enum bfa_port_states port_state; /* current port state */
+ enum bfa_port_speed speed; /* current speed */
+ enum bfa_port_topology topology; /* current topology */
+ bfa_boolean_t beacon; /* current beacon status */
+ bfa_boolean_t link_e2e_beacon; /* link beacon is on */
+ bfa_boolean_t bbsc_op_status; /* fc credit recovery oper
+ * state */
+ enum bfa_fec_state_s fec_state; /*!< current FEC state */
+
+ /*
+ * Dynamic field - info from FCS
+ */
+ u32 pid; /* port ID */
+ enum bfa_port_type port_type; /* current topology */
+ u32 loopback; /* external loopback */
+ u32 authfail; /* auth fail state */
+
+ /* FCoE specific */
+ u16 fcoe_vlan;
+ u8 rsvd1[2];
+};
+
+/*
+ * Port FCP mappings.
+ */
+struct bfa_port_fcpmap_s {
+ char osdevname[256];
+ u32 bus;
+ u32 target;
+ u32 oslun;
+ u32 fcid;
+ wwn_t nwwn;
+ wwn_t pwwn;
+ u64 fcplun;
+ char luid[256];
+};
+
+/*
+ * Port RNID info.
+ */
+struct bfa_port_rnid_s {
+ wwn_t wwn;
+ u32 unittype;
+ u32 portid;
+ u32 attached_nodes_num;
+ u16 ip_version;
+ u16 udp_port;
+ u8 ipaddr[16];
+ u16 rsvd;
+ u16 topologydiscoveryflags;
+};
+
+#pragma pack(1)
+struct bfa_fcport_fcf_s {
+ wwn_t name; /* FCF name */
+ wwn_t fabric_name; /* Fabric Name */
+ u8 fipenabled; /* FIP enabled or not */
+ u8 fipfailed; /* FIP failed or not */
+ u8 resv[2];
+ u8 pri; /* FCF priority */
+ u8 version; /* FIP version used */
+ u8 available; /* Available for login */
+ u8 fka_disabled; /* FKA is disabled */
+ u8 maxsz_verified; /* FCoE max size verified */
+ u8 fc_map[3]; /* FC map */
+ __be16 vlan; /* FCoE vlan tag/priority */
+ u32 fka_adv_per; /* FIP ka advert. period */
+ mac_t mac; /* FCF mac */
+};
+
+/*
+ * Trunk states for BCU/BFAL
+ */
+enum bfa_trunk_state {
+ BFA_TRUNK_DISABLED = 0, /* Trunk is not configured */
+ BFA_TRUNK_ONLINE = 1, /* Trunk is online */
+ BFA_TRUNK_OFFLINE = 2, /* Trunk is offline */
+};
+
+/*
+ * VC attributes for trunked link
+ */
+struct bfa_trunk_vc_attr_s {
+ u32 bb_credit;
+ u32 elp_opmode_flags;
+ u32 req_credit;
+ u16 vc_credits[8];
+};
+
+struct bfa_fcport_loop_info_s {
+ u8 myalpa; /* alpa claimed */
+ u8 alpabm_val; /* alpa bitmap valid or not (1 or 0) */
+ u8 resvd[6];
+ struct fc_alpabm_s alpabm; /* alpa bitmap */
+};
+
+/*
+ * Link state information
+ */
+struct bfa_port_link_s {
+ u8 linkstate; /* Link state bfa_port_linkstate */
+ u8 linkstate_rsn; /* bfa_port_linkstate_rsn_t */
+ u8 topology; /* P2P/LOOP bfa_port_topology */
+ u8 speed; /* Link speed (1/2/4/8 G) */
+ u32 linkstate_opt; /* Linkstate optional data (debug) */
+ u8 trunked; /* Trunked or not (1 or 0) */
+ u8 fec_state; /*!< State of FEC */
+ u8 resvd[6];
+ struct bfa_qos_attr_s qos_attr; /* QoS Attributes */
+ union {
+ struct bfa_fcport_loop_info_s loop_info;
+ struct bfa_bbcr_attr_s bbcr_attr;
+ union {
+ struct bfa_qos_vc_attr_s qos_vc_attr;
+ /* VC info from ELP */
+ struct bfa_trunk_vc_attr_s trunk_vc_attr;
+ struct bfa_fcport_fcf_s fcf;
+ /* FCF information (for FCoE) */
+ } vc_fcf;
+ } attr;
+};
+#pragma pack()
+
+enum bfa_trunk_link_fctl {
+ BFA_TRUNK_LINK_FCTL_NORMAL,
+ BFA_TRUNK_LINK_FCTL_VC,
+ BFA_TRUNK_LINK_FCTL_VC_QOS,
+};
+
+enum bfa_trunk_link_state {
+ BFA_TRUNK_LINK_STATE_UP = 1, /* link part of trunk */
+ BFA_TRUNK_LINK_STATE_DN_LINKDN = 2, /* physical link down */
+ BFA_TRUNK_LINK_STATE_DN_GRP_MIS = 3, /* trunk group different */
+ BFA_TRUNK_LINK_STATE_DN_SPD_MIS = 4, /* speed mismatch */
+ BFA_TRUNK_LINK_STATE_DN_MODE_MIS = 5, /* remote port not trunked */
+};
+
+#define BFA_TRUNK_MAX_PORTS 2
+struct bfa_trunk_link_attr_s {
+ wwn_t trunk_wwn;
+ enum bfa_trunk_link_fctl fctl;
+ enum bfa_trunk_link_state link_state;
+ enum bfa_port_speed speed;
+ u32 deskew;
+};
+
+struct bfa_trunk_attr_s {
+ enum bfa_trunk_state state;
+ enum bfa_port_speed speed;
+ u32 port_id;
+ u32 rsvd;
+ struct bfa_trunk_link_attr_s link_attr[BFA_TRUNK_MAX_PORTS];
+};
+
+struct bfa_rport_hal_stats_s {
+ u32 sm_un_cr; /* uninit: create events */
+ u32 sm_un_unexp; /* uninit: exception events */
+ u32 sm_cr_on; /* created: online events */
+ u32 sm_cr_del; /* created: delete events */
+ u32 sm_cr_hwf; /* created: IOC down */
+ u32 sm_cr_unexp; /* created: exception events */
+ u32 sm_fwc_rsp; /* fw create: f/w responses */
+ u32 sm_fwc_del; /* fw create: delete events */
+ u32 sm_fwc_off; /* fw create: offline events */
+ u32 sm_fwc_hwf; /* fw create: IOC down */
+ u32 sm_fwc_unexp; /* fw create: exception events*/
+ u32 sm_on_off; /* online: offline events */
+ u32 sm_on_del; /* online: delete events */
+ u32 sm_on_hwf; /* online: IOC down events */
+ u32 sm_on_unexp; /* online: exception events */
+ u32 sm_fwd_rsp; /* fw delete: fw responses */
+ u32 sm_fwd_del; /* fw delete: delete events */
+ u32 sm_fwd_hwf; /* fw delete: IOC down events */
+ u32 sm_fwd_unexp; /* fw delete: exception events*/
+ u32 sm_off_del; /* offline: delete events */
+ u32 sm_off_on; /* offline: online events */
+ u32 sm_off_hwf; /* offline: IOC down events */
+ u32 sm_off_unexp; /* offline: exception events */
+ u32 sm_del_fwrsp; /* delete: fw responses */
+ u32 sm_del_hwf; /* delete: IOC down events */
+ u32 sm_del_unexp; /* delete: exception events */
+ u32 sm_delp_fwrsp; /* delete pend: fw responses */
+ u32 sm_delp_hwf; /* delete pend: IOC downs */
+ u32 sm_delp_unexp; /* delete pend: exceptions */
+ u32 sm_offp_fwrsp; /* off-pending: fw responses */
+ u32 sm_offp_del; /* off-pending: deletes */
+ u32 sm_offp_hwf; /* off-pending: IOC downs */
+ u32 sm_offp_unexp; /* off-pending: exceptions */
+ u32 sm_iocd_off; /* IOC down: offline events */
+ u32 sm_iocd_del; /* IOC down: delete events */
+ u32 sm_iocd_on; /* IOC down: online events */
+ u32 sm_iocd_unexp; /* IOC down: exceptions */
+ u32 rsvd;
+};
+#pragma pack(1)
+/*
+ * Rport's QoS attributes
+ */
+struct bfa_rport_qos_attr_s {
+ u8 qos_priority; /* rport's QoS priority */
+ u8 rsvd[3];
+ u32 qos_flow_id; /* QoS flow Id */
+};
+#pragma pack()
+
+#define BFA_IOBUCKET_MAX 14
+
+struct bfa_itnim_latency_s {
+ u32 min[BFA_IOBUCKET_MAX];
+ u32 max[BFA_IOBUCKET_MAX];
+ u32 count[BFA_IOBUCKET_MAX];
+ u32 avg[BFA_IOBUCKET_MAX];
+};
+
+struct bfa_itnim_ioprofile_s {
+ u32 clock_res_mul;
+ u32 clock_res_div;
+ u32 index;
+ u32 io_profile_start_time; /* IO profile start time */
+ u32 iocomps[BFA_IOBUCKET_MAX]; /* IO completed */
+ struct bfa_itnim_latency_s io_latency;
+};
+
+/*
+ * vHBA port attribute values.
+ */
+struct bfa_vhba_attr_s {
+ wwn_t nwwn; /* node wwn */
+ wwn_t pwwn; /* port wwn */
+ u32 pid; /* port ID */
+ bfa_boolean_t io_profile; /* get it from fcpim mod */
+ bfa_boolean_t plog_enabled; /* portlog is enabled */
+ u16 path_tov;
+ u8 rsvd[2];
+};
+
+/*
+ * FC physical port statistics.
+ */
+struct bfa_port_fc_stats_s {
+ u64 secs_reset; /* Seconds since stats is reset */
+ u64 tx_frames; /* Tx frames */
+ u64 tx_words; /* Tx words */
+ u64 tx_lip; /* Tx LIP */
+ u64 tx_lip_f7f7; /* Tx LIP_F7F7 */
+ u64 tx_lip_f8f7; /* Tx LIP_F8F7 */
+ u64 tx_arbf0; /* Tx ARB F0 */
+ u64 tx_nos; /* Tx NOS */
+ u64 tx_ols; /* Tx OLS */
+ u64 tx_lr; /* Tx LR */
+ u64 tx_lrr; /* Tx LRR */
+ u64 rx_frames; /* Rx frames */
+ u64 rx_words; /* Rx words */
+ u64 lip_count; /* Rx LIP */
+ u64 rx_lip_f7f7; /* Rx LIP_F7F7 */
+ u64 rx_lip_f8f7; /* Rx LIP_F8F7 */
+ u64 rx_arbf0; /* Rx ARB F0 */
+ u64 nos_count; /* Rx NOS */
+ u64 ols_count; /* Rx OLS */
+ u64 lr_count; /* Rx LR */
+ u64 lrr_count; /* Rx LRR */
+ u64 invalid_crcs; /* Rx CRC err frames */
+ u64 invalid_crc_gd_eof; /* Rx CRC err good EOF frames */
+ u64 undersized_frm; /* Rx undersized frames */
+ u64 oversized_frm; /* Rx oversized frames */
+ u64 bad_eof_frm; /* Rx frames with bad EOF */
+ u64 error_frames; /* Errored frames */
+ u64 dropped_frames; /* Dropped frames */
+ u64 link_failures; /* Link Failure (LF) count */
+ u64 loss_of_syncs; /* Loss of sync count */
+ u64 loss_of_signals; /* Loss of signal count */
+ u64 primseq_errs; /* Primitive sequence protocol err. */
+ u64 bad_os_count; /* Invalid ordered sets */
+ u64 err_enc_out; /* Encoding err nonframe_8b10b */
+ u64 err_enc; /* Encoding err frame_8b10b */
+ u64 bbcr_frames_lost; /*!< BBCR Frames Lost */
+ u64 bbcr_rrdys_lost; /*!< BBCR RRDYs Lost */
+ u64 bbcr_link_resets; /*!< BBCR Link Resets */
+ u64 bbcr_frame_lost_intrs; /*!< BBCR Frame loss intrs */
+ u64 bbcr_rrdy_lost_intrs; /*!< BBCR Rrdy loss intrs */
+ u64 loop_timeouts; /* Loop timeouts */
+};
+
+/*
+ * Eth Physical Port statistics.
+ */
+struct bfa_port_eth_stats_s {
+ u64 secs_reset; /* Seconds since stats is reset */
+ u64 frame_64; /* Frames 64 bytes */
+ u64 frame_65_127; /* Frames 65-127 bytes */
+ u64 frame_128_255; /* Frames 128-255 bytes */
+ u64 frame_256_511; /* Frames 256-511 bytes */
+ u64 frame_512_1023; /* Frames 512-1023 bytes */
+ u64 frame_1024_1518; /* Frames 1024-1518 bytes */
+ u64 frame_1519_1522; /* Frames 1519-1522 bytes */
+ u64 tx_bytes; /* Tx bytes */
+ u64 tx_packets; /* Tx packets */
+ u64 tx_mcast_packets; /* Tx multicast packets */
+ u64 tx_bcast_packets; /* Tx broadcast packets */
+ u64 tx_control_frame; /* Tx control frame */
+ u64 tx_drop; /* Tx drops */
+ u64 tx_jabber; /* Tx jabber */
+ u64 tx_fcs_error; /* Tx FCS errors */
+ u64 tx_fragments; /* Tx fragments */
+ u64 rx_bytes; /* Rx bytes */
+ u64 rx_packets; /* Rx packets */
+ u64 rx_mcast_packets; /* Rx multicast packets */
+ u64 rx_bcast_packets; /* Rx broadcast packets */
+ u64 rx_control_frames; /* Rx control frames */
+ u64 rx_unknown_opcode; /* Rx unknown opcode */
+ u64 rx_drop; /* Rx drops */
+ u64 rx_jabber; /* Rx jabber */
+ u64 rx_fcs_error; /* Rx FCS errors */
+ u64 rx_alignment_error; /* Rx alignment errors */
+ u64 rx_frame_length_error; /* Rx frame len errors */
+ u64 rx_code_error; /* Rx code errors */
+ u64 rx_fragments; /* Rx fragments */
+ u64 rx_pause; /* Rx pause */
+ u64 rx_zero_pause; /* Rx zero pause */
+ u64 tx_pause; /* Tx pause */
+ u64 tx_zero_pause; /* Tx zero pause */
+ u64 rx_fcoe_pause; /* Rx FCoE pause */
+ u64 rx_fcoe_zero_pause; /* Rx FCoE zero pause */
+ u64 tx_fcoe_pause; /* Tx FCoE pause */
+ u64 tx_fcoe_zero_pause; /* Tx FCoE zero pause */
+ u64 rx_iscsi_pause; /* Rx iSCSI pause */
+ u64 rx_iscsi_zero_pause; /* Rx iSCSI zero pause */
+ u64 tx_iscsi_pause; /* Tx iSCSI pause */
+ u64 tx_iscsi_zero_pause; /* Tx iSCSI zero pause */
+};
+
+/*
+ * Port statistics.
+ */
+union bfa_port_stats_u {
+ struct bfa_port_fc_stats_s fc;
+ struct bfa_port_eth_stats_s eth;
+};
+
+struct bfa_port_cfg_mode_s {
+ u16 max_pf;
+ u16 max_vf;
+ enum bfa_mode_s mode;
+};
+
+#pragma pack(1)
+
+#define BFA_CEE_LLDP_MAX_STRING_LEN (128)
+#define BFA_CEE_DCBX_MAX_PRIORITY (8)
+#define BFA_CEE_DCBX_MAX_PGID (8)
+
+struct bfa_cee_lldp_str_s {
+ u8 sub_type;
+ u8 len;
+ u8 rsvd[2];
+ u8 value[BFA_CEE_LLDP_MAX_STRING_LEN];
+};
+
+struct bfa_cee_lldp_cfg_s {
+ struct bfa_cee_lldp_str_s chassis_id;
+ struct bfa_cee_lldp_str_s port_id;
+ struct bfa_cee_lldp_str_s port_desc;
+ struct bfa_cee_lldp_str_s sys_name;
+ struct bfa_cee_lldp_str_s sys_desc;
+ struct bfa_cee_lldp_str_s mgmt_addr;
+ u16 time_to_live;
+ u16 enabled_system_cap;
+};
+
+/* CEE/DCBX parameters */
+struct bfa_cee_dcbx_cfg_s {
+ u8 pgid[BFA_CEE_DCBX_MAX_PRIORITY];
+ u8 pg_percentage[BFA_CEE_DCBX_MAX_PGID];
+ u8 pfc_primap; /* bitmap of priorties with PFC enabled */
+ u8 fcoe_primap; /* bitmap of priorities used for FcoE traffic */
+ u8 iscsi_primap; /* bitmap of priorities used for iSCSI traffic */
+ u8 dcbx_version; /* operating version:CEE or preCEE */
+ u8 lls_fcoe; /* FCoE Logical Link Status */
+ u8 lls_lan; /* LAN Logical Link Status */
+ u8 rsvd[2];
+};
+
+/* CEE Query */
+struct bfa_cee_attr_s {
+ u8 cee_status;
+ u8 error_reason;
+ struct bfa_cee_lldp_cfg_s lldp_remote;
+ struct bfa_cee_dcbx_cfg_s dcbx_remote;
+ mac_t src_mac;
+ u8 link_speed;
+ u8 nw_priority;
+ u8 filler[2];
+};
+
+/* LLDP/DCBX/CEE Statistics */
+struct bfa_cee_stats_s {
+ u32 lldp_tx_frames; /* LLDP Tx Frames */
+ u32 lldp_rx_frames; /* LLDP Rx Frames */
+ u32 lldp_rx_frames_invalid; /* LLDP Rx Frames invalid */
+ u32 lldp_rx_frames_new; /* LLDP Rx Frames new */
+ u32 lldp_tlvs_unrecognized; /* LLDP Rx unrecog. TLVs */
+ u32 lldp_rx_shutdown_tlvs; /* LLDP Rx shutdown TLVs */
+ u32 lldp_info_aged_out; /* LLDP remote info aged */
+ u32 dcbx_phylink_ups; /* DCBX phy link ups */
+ u32 dcbx_phylink_downs; /* DCBX phy link downs */
+ u32 dcbx_rx_tlvs; /* DCBX Rx TLVs */
+ u32 dcbx_rx_tlvs_invalid; /* DCBX Rx TLVs invalid */
+ u32 dcbx_control_tlv_error; /* DCBX control TLV errors */
+ u32 dcbx_feature_tlv_error; /* DCBX feature TLV errors */
+ u32 dcbx_cee_cfg_new; /* DCBX new CEE cfg rcvd */
+ u32 cee_status_down; /* DCB status down */
+ u32 cee_status_up; /* DCB status up */
+ u32 cee_hw_cfg_changed; /* DCB hw cfg changed */
+ u32 cee_rx_invalid_cfg; /* DCB invalid cfg */
+};
+
+#pragma pack()
+
+/*
+ * AEN related definitions
+ */
+#define BFAD_NL_VENDOR_ID (((u64)0x01 << SCSI_NL_VID_TYPE_SHIFT) \
+ | BFA_PCI_VENDOR_ID_BROCADE)
+
+/* BFA remote port events */
+enum bfa_rport_aen_event {
+ BFA_RPORT_AEN_ONLINE = 1, /* RPort online event */
+ BFA_RPORT_AEN_OFFLINE = 2, /* RPort offline event */
+ BFA_RPORT_AEN_DISCONNECT = 3, /* RPort disconnect event */
+ BFA_RPORT_AEN_QOS_PRIO = 4, /* QOS priority change event */
+ BFA_RPORT_AEN_QOS_FLOWID = 5, /* QOS flow Id change event */
+};
+
+struct bfa_rport_aen_data_s {
+ u16 vf_id; /* vf_id of this logical port */
+ u16 rsvd[3];
+ wwn_t ppwwn; /* WWN of its physical port */
+ wwn_t lpwwn; /* WWN of this logical port */
+ wwn_t rpwwn; /* WWN of this remote port */
+ union {
+ struct bfa_rport_qos_attr_s qos;
+ } priv;
+};
+
+union bfa_aen_data_u {
+ struct bfa_adapter_aen_data_s adapter;
+ struct bfa_port_aen_data_s port;
+ struct bfa_lport_aen_data_s lport;
+ struct bfa_rport_aen_data_s rport;
+ struct bfa_itnim_aen_data_s itnim;
+ struct bfa_audit_aen_data_s audit;
+ struct bfa_ioc_aen_data_s ioc;
+};
+
+#define BFA_AEN_MAX_ENTRY 512
+
+struct bfa_aen_entry_s {
+ struct list_head qe;
+ enum bfa_aen_category aen_category;
+ u32 aen_type;
+ union bfa_aen_data_u aen_data;
+ struct timeval aen_tv;
+ u32 seq_num;
+ u32 bfad_num;
+};
+
+#endif /* __BFA_DEFS_SVC_H__ */
diff --git a/drivers/scsi/bfa/bfa_fc.h b/drivers/scsi/bfa/bfa_fc.h
new file mode 100644
index 000000000..64069a0a3
--- /dev/null
+++ b/drivers/scsi/bfa/bfa_fc.h
@@ -0,0 +1,1628 @@
+/*
+ * Copyright (c) 2005-2010 Brocade Communications Systems, Inc.
+ * All rights reserved
+ * www.brocade.com
+ *
+ * Linux driver for Brocade Fibre Channel Host Bus Adapter.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License (GPL) Version 2 as
+ * published by the Free Software Foundation
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ */
+
+#ifndef __BFA_FC_H__
+#define __BFA_FC_H__
+
+#include "bfad_drv.h"
+
+typedef u64 wwn_t;
+
+#define WWN_NULL (0)
+#define FC_SYMNAME_MAX 256 /* max name server symbolic name size */
+#define FC_ALPA_MAX 128
+
+#pragma pack(1)
+
+#define MAC_ADDRLEN (6)
+struct mac_s { u8 mac[MAC_ADDRLEN]; };
+#define mac_t struct mac_s
+
+/*
+ * generic SCSI cdb definition
+ */
+#define SCSI_MAX_CDBLEN 16
+struct scsi_cdb_s {
+ u8 scsi_cdb[SCSI_MAX_CDBLEN];
+};
+
+/* ------------------------------------------------------------
+ * SCSI status byte values
+ * ------------------------------------------------------------
+ */
+#define SCSI_STATUS_GOOD 0x00
+#define SCSI_STATUS_CHECK_CONDITION 0x02
+#define SCSI_STATUS_CONDITION_MET 0x04
+#define SCSI_STATUS_BUSY 0x08
+#define SCSI_STATUS_INTERMEDIATE 0x10
+#define SCSI_STATUS_ICM 0x14 /* intermediate condition met */
+#define SCSI_STATUS_RESERVATION_CONFLICT 0x18
+#define SCSI_STATUS_COMMAND_TERMINATED 0x22
+#define SCSI_STATUS_QUEUE_FULL 0x28
+#define SCSI_STATUS_ACA_ACTIVE 0x30
+
+#define SCSI_MAX_ALLOC_LEN 0xFF /* maximum allocarion length */
+
+/*
+ * Fibre Channel Header Structure (FCHS) definition
+ */
+struct fchs_s {
+#ifdef __BIG_ENDIAN
+ u32 routing:4; /* routing bits */
+ u32 cat_info:4; /* category info */
+#else
+ u32 cat_info:4; /* category info */
+ u32 routing:4; /* routing bits */
+#endif
+ u32 d_id:24; /* destination identifier */
+
+ u32 cs_ctl:8; /* class specific control */
+ u32 s_id:24; /* source identifier */
+
+ u32 type:8; /* data structure type */
+ u32 f_ctl:24; /* initial frame control */
+
+ u8 seq_id; /* sequence identifier */
+ u8 df_ctl; /* data field control */
+ u16 seq_cnt; /* sequence count */
+
+ __be16 ox_id; /* originator exchange ID */
+ u16 rx_id; /* responder exchange ID */
+
+ u32 ro; /* relative offset */
+};
+
+/*
+ * routing bit definitions
+ */
+enum {
+ FC_RTG_FC4_DEV_DATA = 0x0, /* FC-4 Device Data */
+ FC_RTG_EXT_LINK = 0x2, /* Extended Link Data */
+ FC_RTG_FC4_LINK_DATA = 0x3, /* FC-4 Link Data */
+ FC_RTG_VIDEO_DATA = 0x4, /* Video Data */
+ FC_RTG_EXT_HDR = 0x5, /* VFT, IFR or Encapsuled */
+ FC_RTG_BASIC_LINK = 0x8, /* Basic Link data */
+ FC_RTG_LINK_CTRL = 0xC, /* Link Control */
+};
+
+/*
+ * information category for extended link data and FC-4 Link Data
+ */
+enum {
+ FC_CAT_LD_REQUEST = 0x2, /* Request */
+ FC_CAT_LD_REPLY = 0x3, /* Reply */
+ FC_CAT_LD_DIAG = 0xF, /* for DIAG use only */
+};
+
+/*
+ * information category for extended headers (VFT, IFR or encapsulation)
+ */
+enum {
+ FC_CAT_VFT_HDR = 0x0, /* Virtual fabric tagging header */
+ FC_CAT_IFR_HDR = 0x1, /* Inter-Fabric routing header */
+ FC_CAT_ENC_HDR = 0x2, /* Encapsulation header */
+};
+
+/*
+ * information category for FC-4 device data
+ */
+enum {
+ FC_CAT_UNCATEG_INFO = 0x0, /* Uncategorized information */
+ FC_CAT_SOLICIT_DATA = 0x1, /* Solicited Data */
+ FC_CAT_UNSOLICIT_CTRL = 0x2, /* Unsolicited Control */
+ FC_CAT_SOLICIT_CTRL = 0x3, /* Solicited Control */
+ FC_CAT_UNSOLICIT_DATA = 0x4, /* Unsolicited Data */
+ FC_CAT_DATA_DESC = 0x5, /* Data Descriptor */
+ FC_CAT_UNSOLICIT_CMD = 0x6, /* Unsolicited Command */
+ FC_CAT_CMD_STATUS = 0x7, /* Command Status */
+};
+
+/*
+ * Type Field Definitions. FC-PH Section 18.5 pg. 165
+ */
+enum {
+ FC_TYPE_BLS = 0x0, /* Basic Link Service */
+ FC_TYPE_ELS = 0x1, /* Extended Link Service */
+ FC_TYPE_IP = 0x5, /* IP */
+ FC_TYPE_FCP = 0x8, /* SCSI-FCP */
+ FC_TYPE_GPP = 0x9, /* SCSI_GPP */
+ FC_TYPE_SERVICES = 0x20, /* Fibre Channel Services */
+ FC_TYPE_FC_FSS = 0x22, /* Fabric Switch Services */
+ FC_TYPE_FC_AL = 0x23, /* FC-AL */
+ FC_TYPE_FC_SNMP = 0x24, /* FC-SNMP */
+ FC_TYPE_FC_SPINFAB = 0xEE, /* SPINFAB */
+ FC_TYPE_FC_DIAG = 0xEF, /* DIAG */
+ FC_TYPE_MAX = 256, /* 256 FC-4 types */
+};
+
+/*
+ * Frame Control Definitions. FC-PH Table-45. pg. 168
+ */
+enum {
+ FCTL_EC_ORIG = 0x000000, /* exchange originator */
+ FCTL_EC_RESP = 0x800000, /* exchange responder */
+ FCTL_SEQ_INI = 0x000000, /* sequence initiator */
+ FCTL_SEQ_REC = 0x400000, /* sequence recipient */
+ FCTL_FS_EXCH = 0x200000, /* first sequence of xchg */
+ FCTL_LS_EXCH = 0x100000, /* last sequence of xchg */
+ FCTL_END_SEQ = 0x080000, /* last frame of sequence */
+ FCTL_SI_XFER = 0x010000, /* seq initiative transfer */
+ FCTL_RO_PRESENT = 0x000008, /* relative offset present */
+ FCTL_FILLBYTE_MASK = 0x000003 /* , fill byte mask */
+};
+
+/*
+ * Fabric Well Known Addresses
+ */
+enum {
+ FC_MIN_WELL_KNOWN_ADDR = 0xFFFFF0,
+ FC_DOMAIN_CONTROLLER_MASK = 0xFFFC00,
+ FC_ALIAS_SERVER = 0xFFFFF8,
+ FC_MGMT_SERVER = 0xFFFFFA,
+ FC_TIME_SERVER = 0xFFFFFB,
+ FC_NAME_SERVER = 0xFFFFFC,
+ FC_FABRIC_CONTROLLER = 0xFFFFFD,
+ FC_FABRIC_PORT = 0xFFFFFE,
+ FC_BROADCAST_SERVER = 0xFFFFFF
+};
+
+/*
+ * domain/area/port defines
+ */
+#define FC_DOMAIN_MASK 0xFF0000
+#define FC_DOMAIN_SHIFT 16
+#define FC_AREA_MASK 0x00FF00
+#define FC_AREA_SHIFT 8
+#define FC_PORT_MASK 0x0000FF
+#define FC_PORT_SHIFT 0
+
+#define FC_GET_DOMAIN(p) (((p) & FC_DOMAIN_MASK) >> FC_DOMAIN_SHIFT)
+#define FC_GET_AREA(p) (((p) & FC_AREA_MASK) >> FC_AREA_SHIFT)
+#define FC_GET_PORT(p) (((p) & FC_PORT_MASK) >> FC_PORT_SHIFT)
+
+#define FC_DOMAIN_CTRLR(p) (FC_DOMAIN_CONTROLLER_MASK | (FC_GET_DOMAIN(p)))
+
+enum {
+ FC_RXID_ANY = 0xFFFFU,
+};
+
+/*
+ * generic ELS command
+ */
+struct fc_els_cmd_s {
+ u32 els_code:8; /* ELS Command Code */
+ u32 reserved:24;
+};
+
+/*
+ * ELS Command Codes. FC-PH Table-75. pg. 223
+ */
+enum {
+ FC_ELS_LS_RJT = 0x1, /* Link Service Reject. */
+ FC_ELS_ACC = 0x02, /* Accept */
+ FC_ELS_PLOGI = 0x03, /* N_Port Login. */
+ FC_ELS_FLOGI = 0x04, /* F_Port Login. */
+ FC_ELS_LOGO = 0x05, /* Logout. */
+ FC_ELS_ABTX = 0x06, /* Abort Exchange */
+ FC_ELS_RES = 0x08, /* Read Exchange status */
+ FC_ELS_RSS = 0x09, /* Read sequence status block */
+ FC_ELS_RSI = 0x0A, /* Request Sequence Initiative */
+ FC_ELS_ESTC = 0x0C, /* Estimate Credit. */
+ FC_ELS_RTV = 0x0E, /* Read Timeout Value. */
+ FC_ELS_RLS = 0x0F, /* Read Link Status. */
+ FC_ELS_ECHO = 0x10, /* Echo */
+ FC_ELS_TEST = 0x11, /* Test */
+ FC_ELS_RRQ = 0x12, /* Reinstate Recovery Qualifier. */
+ FC_ELS_REC = 0x13, /* Add this for TAPE support in FCR */
+ FC_ELS_PRLI = 0x20, /* Process Login */
+ FC_ELS_PRLO = 0x21, /* Process Logout. */
+ FC_ELS_SCN = 0x22, /* State Change Notification. */
+ FC_ELS_TPRLO = 0x24, /* Third Party Process Logout. */
+ FC_ELS_PDISC = 0x50, /* Discover N_Port Parameters. */
+ FC_ELS_FDISC = 0x51, /* Discover F_Port Parameters. */
+ FC_ELS_ADISC = 0x52, /* Discover Address. */
+ FC_ELS_FARP_REQ = 0x54, /* FARP Request. */
+ FC_ELS_FARP_REP = 0x55, /* FARP Reply. */
+ FC_ELS_FAN = 0x60, /* Fabric Address Notification */
+ FC_ELS_RSCN = 0x61, /* Reg State Change Notification */
+ FC_ELS_SCR = 0x62, /* State Change Registration. */
+ FC_ELS_RTIN = 0x77, /* Mangement server request */
+ FC_ELS_RNID = 0x78, /* Mangement server request */
+ FC_ELS_RLIR = 0x79, /* Registered Link Incident Record */
+
+ FC_ELS_RPSC = 0x7D, /* Report Port Speed Capabilities */
+ FC_ELS_QSA = 0x7E, /* Query Security Attributes. Ref FC-SP */
+ FC_ELS_E2E_LBEACON = 0x81,
+ /* End-to-End Link Beacon */
+ FC_ELS_AUTH = 0x90, /* Authentication. Ref FC-SP */
+ FC_ELS_RFCN = 0x97, /* Request Fabric Change Notification. Ref
+ *FC-SP */
+};
+
+/*
+ * Version numbers for FC-PH standards,
+ * used in login to indicate what port
+ * supports. See FC-PH-X table 158.
+ */
+enum {
+ FC_PH_VER_4_3 = 0x09,
+ FC_PH_VER_PH_3 = 0x20,
+};
+
+/*
+ * PDU size defines
+ */
+enum {
+ FC_MIN_PDUSZ = 512,
+ FC_MAX_PDUSZ = 2112,
+};
+
+/*
+ * N_Port PLOGI Common Service Parameters.
+ * FC-PH-x. Figure-76. pg. 308.
+ */
+struct fc_plogi_csp_s {
+ u8 verhi; /* FC-PH high version */
+ u8 verlo; /* FC-PH low version */
+ __be16 bbcred; /* BB_Credit */
+
+#ifdef __BIG_ENDIAN
+ u8 ciro:1, /* continuously increasing RO */
+ rro:1, /* random relative offset */
+ npiv_supp:1, /* NPIV supported */
+ port_type:1, /* N_Port/F_port */
+ altbbcred:1, /* alternate BB_Credit */
+ resolution:1, /* ms/ns ED_TOV resolution */
+ vvl_info:1, /* VVL Info included */
+ reserved1:1;
+
+ u8 hg_supp:1,
+ query_dbc:1,
+ security:1,
+ sync_cap:1,
+ r_t_tov:1,
+ dh_dup_supp:1,
+ cisc:1, /* continuously increasing seq count */
+ payload:1;
+#else
+ u8 reserved2:2,
+ resolution:1, /* ms/ns ED_TOV resolution */
+ altbbcred:1, /* alternate BB_Credit */
+ port_type:1, /* N_Port/F_port */
+ npiv_supp:1, /* NPIV supported */
+ rro:1, /* random relative offset */
+ ciro:1; /* continuously increasing RO */
+
+ u8 payload:1,
+ cisc:1, /* continuously increasing seq count */
+ dh_dup_supp:1,
+ r_t_tov:1,
+ sync_cap:1,
+ security:1,
+ query_dbc:1,
+ hg_supp:1;
+#endif
+ __be16 rxsz; /* receive data_field size */
+ __be16 conseq;
+ __be16 ro_bitmap;
+ __be32 e_d_tov;
+};
+
+/*
+ * N_Port PLOGI Class Specific Parameters.
+ * FC-PH-x. Figure 78. pg. 318.
+ */
+struct fc_plogi_clp_s {
+#ifdef __BIG_ENDIAN
+ u32 class_valid:1;
+ u32 intermix:1; /* class intermix supported if set =1.
+ * valid only for class1. Reserved for
+ * class2 & class3 */
+ u32 reserved1:2;
+ u32 sequential:1;
+ u32 reserved2:3;
+#else
+ u32 reserved2:3;
+ u32 sequential:1;
+ u32 reserved1:2;
+ u32 intermix:1; /* class intermix supported if set =1.
+ * valid only for class1. Reserved for
+ * class2 & class3 */
+ u32 class_valid:1;
+#endif
+ u32 reserved3:24;
+
+ u32 reserved4:16;
+ u32 rxsz:16; /* Receive data_field size */
+
+ u32 reserved5:8;
+ u32 conseq:8;
+ u32 e2e_credit:16; /* end to end credit */
+
+ u32 reserved7:8;
+ u32 ospx:8;
+ u32 reserved8:16;
+};
+
+/* ASCII value for each character in string "BRCD" */
+#define FLOGI_VVL_BRCD 0x42524344
+
+/*
+ * PLOGI els command and reply payload
+ */
+struct fc_logi_s {
+ struct fc_els_cmd_s els_cmd; /* ELS command code */
+ struct fc_plogi_csp_s csp; /* common service params */
+ wwn_t port_name;
+ wwn_t node_name;
+ struct fc_plogi_clp_s class1; /* class 1 service parameters */
+ struct fc_plogi_clp_s class2; /* class 2 service parameters */
+ struct fc_plogi_clp_s class3; /* class 3 service parameters */
+ struct fc_plogi_clp_s class4; /* class 4 service parameters */
+ u8 vvl[16]; /* vendor version level */
+};
+
+/*
+ * LOGO els command payload
+ */
+struct fc_logo_s {
+ struct fc_els_cmd_s els_cmd; /* ELS command code */
+ u32 res1:8;
+ u32 nport_id:24; /* N_Port identifier of source */
+ wwn_t orig_port_name; /* Port name of the LOGO originator */
+};
+
+/*
+ * ADISC els command payload
+ */
+struct fc_adisc_s {
+ struct fc_els_cmd_s els_cmd; /* ELS command code */
+ u32 res1:8;
+ u32 orig_HA:24; /* originator hard address */
+ wwn_t orig_port_name; /* originator port name */
+ wwn_t orig_node_name; /* originator node name */
+ u32 res2:8;
+ u32 nport_id:24; /* originator NPortID */
+};
+
+/*
+ * Exchange status block
+ */
+struct fc_exch_status_blk_s {
+ u32 oxid:16;
+ u32 rxid:16;
+ u32 res1:8;
+ u32 orig_np:24; /* originator NPortID */
+ u32 res2:8;
+ u32 resp_np:24; /* responder NPortID */
+ u32 es_bits;
+ u32 res3;
+ /*
+ * un modified section of the fields
+ */
+};
+
+/*
+ * RES els command payload
+ */
+struct fc_res_s {
+ struct fc_els_cmd_s els_cmd; /* ELS command code */
+ u32 res1:8;
+ u32 nport_id:24; /* N_Port identifier of source */
+ u32 oxid:16;
+ u32 rxid:16;
+ u8 assoc_hdr[32];
+};
+
+/*
+ * RES els accept payload
+ */
+struct fc_res_acc_s {
+ struct fc_els_cmd_s els_cmd; /* ELS command code */
+ struct fc_exch_status_blk_s fc_exch_blk; /* Exchange status block */
+};
+
+/*
+ * REC els command payload
+ */
+struct fc_rec_s {
+ struct fc_els_cmd_s els_cmd; /* ELS command code */
+ u32 res1:8;
+ u32 nport_id:24; /* N_Port identifier of source */
+ u32 oxid:16;
+ u32 rxid:16;
+};
+
+#define FC_REC_ESB_OWN_RSP 0x80000000 /* responder owns */
+#define FC_REC_ESB_SI 0x40000000 /* SI is owned */
+#define FC_REC_ESB_COMP 0x20000000 /* exchange is complete */
+#define FC_REC_ESB_ENDCOND_ABN 0x10000000 /* abnormal ending */
+#define FC_REC_ESB_RQACT 0x04000000 /* recovery qual active */
+#define FC_REC_ESB_ERRP_MSK 0x03000000
+#define FC_REC_ESB_OXID_INV 0x00800000 /* invalid OXID */
+#define FC_REC_ESB_RXID_INV 0x00400000 /* invalid RXID */
+#define FC_REC_ESB_PRIO_INUSE 0x00200000
+
+/*
+ * REC els accept payload
+ */
+struct fc_rec_acc_s {
+ struct fc_els_cmd_s els_cmd; /* ELS command code */
+ u32 oxid:16;
+ u32 rxid:16;
+ u32 res1:8;
+ u32 orig_id:24; /* N_Port id of exchange originator */
+ u32 res2:8;
+ u32 resp_id:24; /* N_Port id of exchange responder */
+ u32 count; /* data transfer count */
+ u32 e_stat; /* exchange status */
+};
+
+/*
+ * RSI els payload
+ */
+struct fc_rsi_s {
+ struct fc_els_cmd_s els_cmd;
+ u32 res1:8;
+ u32 orig_sid:24;
+ u32 oxid:16;
+ u32 rxid:16;
+};
+
+/*
+ * structure for PRLI paramater pages, both request & response
+ * see FC-PH-X table 113 & 115 for explanation also FCP table 8
+ */
+struct fc_prli_params_s {
+ u32 reserved:16;
+#ifdef __BIG_ENDIAN
+ u32 reserved1:5;
+ u32 rec_support:1;
+ u32 task_retry_id:1;
+ u32 retry:1;
+
+ u32 confirm:1;
+ u32 doverlay:1;
+ u32 initiator:1;
+ u32 target:1;
+ u32 cdmix:1;
+ u32 drmix:1;
+ u32 rxrdisab:1;
+ u32 wxrdisab:1;
+#else
+ u32 retry:1;
+ u32 task_retry_id:1;
+ u32 rec_support:1;
+ u32 reserved1:5;
+
+ u32 wxrdisab:1;
+ u32 rxrdisab:1;
+ u32 drmix:1;
+ u32 cdmix:1;
+ u32 target:1;
+ u32 initiator:1;
+ u32 doverlay:1;
+ u32 confirm:1;
+#endif
+};
+
+/*
+ * valid values for rspcode in PRLI ACC payload
+ */
+enum {
+ FC_PRLI_ACC_XQTD = 0x1, /* request executed */
+ FC_PRLI_ACC_PREDEF_IMG = 0x5, /* predefined image - no prli needed */
+};
+
+struct fc_prli_params_page_s {
+ u32 type:8;
+ u32 codext:8;
+#ifdef __BIG_ENDIAN
+ u32 origprocasv:1;
+ u32 rsppav:1;
+ u32 imagepair:1;
+ u32 reserved1:1;
+ u32 rspcode:4;
+#else
+ u32 rspcode:4;
+ u32 reserved1:1;
+ u32 imagepair:1;
+ u32 rsppav:1;
+ u32 origprocasv:1;
+#endif
+ u32 reserved2:8;
+
+ u32 origprocas;
+ u32 rspprocas;
+ struct fc_prli_params_s servparams;
+};
+
+/*
+ * PRLI request and accept payload, FC-PH-X tables 112 & 114
+ */
+struct fc_prli_s {
+ u32 command:8;
+ u32 pglen:8;
+ u32 pagebytes:16;
+ struct fc_prli_params_page_s parampage;
+};
+
+/*
+ * PRLO logout params page
+ */
+struct fc_prlo_params_page_s {
+ u32 type:8;
+ u32 type_ext:8;
+#ifdef __BIG_ENDIAN
+ u32 opa_valid:1; /* originator process associator valid */
+ u32 rpa_valid:1; /* responder process associator valid */
+ u32 res1:14;
+#else
+ u32 res1:14;
+ u32 rpa_valid:1; /* responder process associator valid */
+ u32 opa_valid:1; /* originator process associator valid */
+#endif
+ u32 orig_process_assc;
+ u32 resp_process_assc;
+
+ u32 res2;
+};
+
+/*
+ * PRLO els command payload
+ */
+struct fc_prlo_s {
+ u32 command:8;
+ u32 page_len:8;
+ u32 payload_len:16;
+ struct fc_prlo_params_page_s prlo_params[1];
+};
+
+/*
+ * PRLO Logout response parameter page
+ */
+struct fc_prlo_acc_params_page_s {
+ u32 type:8;
+ u32 type_ext:8;
+
+#ifdef __BIG_ENDIAN
+ u32 opa_valid:1; /* originator process associator valid */
+ u32 rpa_valid:1; /* responder process associator valid */
+ u32 res1:14;
+#else
+ u32 res1:14;
+ u32 rpa_valid:1; /* responder process associator valid */
+ u32 opa_valid:1; /* originator process associator valid */
+#endif
+ u32 orig_process_assc;
+ u32 resp_process_assc;
+
+ u32 fc4type_csp;
+};
+
+/*
+ * PRLO els command ACC payload
+ */
+struct fc_prlo_acc_s {
+ u32 command:8;
+ u32 page_len:8;
+ u32 payload_len:16;
+ struct fc_prlo_acc_params_page_s prlo_acc_params[1];
+};
+
+/*
+ * SCR els command payload
+ */
+enum {
+ FC_SCR_REG_FUNC_FABRIC_DETECTED = 0x01,
+ FC_SCR_REG_FUNC_N_PORT_DETECTED = 0x02,
+ FC_SCR_REG_FUNC_FULL = 0x03,
+ FC_SCR_REG_FUNC_CLEAR_REG = 0xFF,
+};
+
+/* SCR VU registrations */
+enum {
+ FC_VU_SCR_REG_FUNC_FABRIC_NAME_CHANGE = 0x01
+};
+
+struct fc_scr_s {
+ u32 command:8;
+ u32 res:24;
+ u32 vu_reg_func:8; /* Vendor Unique Registrations */
+ u32 res1:16;
+ u32 reg_func:8;
+};
+
+/*
+ * Information category for Basic link data
+ */
+enum {
+ FC_CAT_NOP = 0x0,
+ FC_CAT_ABTS = 0x1,
+ FC_CAT_RMC = 0x2,
+ FC_CAT_BA_ACC = 0x4,
+ FC_CAT_BA_RJT = 0x5,
+ FC_CAT_PRMT = 0x6,
+};
+
+/*
+ * LS_RJT els reply payload
+ */
+struct fc_ls_rjt_s {
+ struct fc_els_cmd_s els_cmd; /* ELS command code */
+ u32 res1:8;
+ u32 reason_code:8; /* Reason code for reject */
+ u32 reason_code_expl:8; /* Reason code explanation */
+ u32 vendor_unique:8; /* Vendor specific */
+};
+
+/*
+ * LS_RJT reason codes
+ */
+enum {
+ FC_LS_RJT_RSN_INV_CMD_CODE = 0x01,
+ FC_LS_RJT_RSN_LOGICAL_ERROR = 0x03,
+ FC_LS_RJT_RSN_LOGICAL_BUSY = 0x05,
+ FC_LS_RJT_RSN_PROTOCOL_ERROR = 0x07,
+ FC_LS_RJT_RSN_UNABLE_TO_PERF_CMD = 0x09,
+ FC_LS_RJT_RSN_CMD_NOT_SUPP = 0x0B,
+};
+
+/*
+ * LS_RJT reason code explanation
+ */
+enum {
+ FC_LS_RJT_EXP_NO_ADDL_INFO = 0x00,
+ FC_LS_RJT_EXP_SPARMS_ERR_OPTIONS = 0x01,
+ FC_LS_RJT_EXP_SPARMS_ERR_INI_CTL = 0x03,
+ FC_LS_RJT_EXP_SPARMS_ERR_REC_CTL = 0x05,
+ FC_LS_RJT_EXP_SPARMS_ERR_RXSZ = 0x07,
+ FC_LS_RJT_EXP_SPARMS_ERR_CONSEQ = 0x09,
+ FC_LS_RJT_EXP_SPARMS_ERR_CREDIT = 0x0B,
+ FC_LS_RJT_EXP_INV_PORT_NAME = 0x0D,
+ FC_LS_RJT_EXP_INV_NODE_FABRIC_NAME = 0x0E,
+ FC_LS_RJT_EXP_INV_CSP = 0x0F,
+ FC_LS_RJT_EXP_INV_ASSOC_HDR = 0x11,
+ FC_LS_RJT_EXP_ASSOC_HDR_REQD = 0x13,
+ FC_LS_RJT_EXP_INV_ORIG_S_ID = 0x15,
+ FC_LS_RJT_EXP_INV_OXID_RXID_COMB = 0x17,
+ FC_LS_RJT_EXP_CMD_ALREADY_IN_PROG = 0x19,
+ FC_LS_RJT_EXP_LOGIN_REQUIRED = 0x1E,
+ FC_LS_RJT_EXP_INVALID_NPORT_ID = 0x1F,
+ FC_LS_RJT_EXP_INSUFF_RES = 0x29,
+ FC_LS_RJT_EXP_CMD_NOT_SUPP = 0x2C,
+ FC_LS_RJT_EXP_INV_PAYLOAD_LEN = 0x2D,
+};
+
+/*
+ * RRQ els command payload
+ */
+struct fc_rrq_s {
+ struct fc_els_cmd_s els_cmd; /* ELS command code */
+ u32 res1:8;
+ u32 s_id:24; /* exchange originator S_ID */
+
+ u32 ox_id:16; /* originator exchange ID */
+ u32 rx_id:16; /* responder exchange ID */
+
+ u32 res2[8]; /* optional association header */
+};
+
+/*
+ * ABTS BA_ACC reply payload
+ */
+struct fc_ba_acc_s {
+ u32 seq_id_valid:8; /* set to 0x00 for Abort Exchange */
+ u32 seq_id:8; /* invalid for Abort Exchange */
+ u32 res2:16;
+ u32 ox_id:16; /* OX_ID from ABTS frame */
+ u32 rx_id:16; /* RX_ID from ABTS frame */
+ u32 low_seq_cnt:16; /* set to 0x0000 for Abort Exchange */
+ u32 high_seq_cnt:16; /* set to 0xFFFF for Abort Exchange */
+};
+
+/*
+ * ABTS BA_RJT reject payload
+ */
+struct fc_ba_rjt_s {
+ u32 res1:8; /* Reserved */
+ u32 reason_code:8; /* reason code for reject */
+ u32 reason_expl:8; /* reason code explanation */
+ u32 vendor_unique:8; /* vendor unique reason code,set to 0 */
+};
+
+/*
+ * TPRLO logout parameter page
+ */
+struct fc_tprlo_params_page_s {
+ u32 type:8;
+ u32 type_ext:8;
+
+#ifdef __BIG_ENDIAN
+ u32 opa_valid:1;
+ u32 rpa_valid:1;
+ u32 tpo_nport_valid:1;
+ u32 global_process_logout:1;
+ u32 res1:12;
+#else
+ u32 res1:12;
+ u32 global_process_logout:1;
+ u32 tpo_nport_valid:1;
+ u32 rpa_valid:1;
+ u32 opa_valid:1;
+#endif
+
+ u32 orig_process_assc;
+ u32 resp_process_assc;
+
+ u32 res2:8;
+ u32 tpo_nport_id;
+};
+
+/*
+ * TPRLO ELS command payload
+ */
+struct fc_tprlo_s {
+ u32 command:8;
+ u32 page_len:8;
+ u32 payload_len:16;
+
+ struct fc_tprlo_params_page_s tprlo_params[1];
+};
+
+enum fc_tprlo_type {
+ FC_GLOBAL_LOGO = 1,
+ FC_TPR_LOGO
+};
+
+/*
+ * TPRLO els command ACC payload
+ */
+struct fc_tprlo_acc_s {
+ u32 command:8;
+ u32 page_len:8;
+ u32 payload_len:16;
+ struct fc_prlo_acc_params_page_s tprlo_acc_params[1];
+};
+
+/*
+ * RSCN els command req payload
+ */
+#define FC_RSCN_PGLEN 0x4
+
+enum fc_rscn_format {
+ FC_RSCN_FORMAT_PORTID = 0x0,
+ FC_RSCN_FORMAT_AREA = 0x1,
+ FC_RSCN_FORMAT_DOMAIN = 0x2,
+ FC_RSCN_FORMAT_FABRIC = 0x3,
+};
+
+struct fc_rscn_event_s {
+ u32 format:2;
+ u32 qualifier:4;
+ u32 resvd:2;
+ u32 portid:24;
+};
+
+struct fc_rscn_pl_s {
+ u8 command;
+ u8 pagelen;
+ __be16 payldlen;
+ struct fc_rscn_event_s event[1];
+};
+
+/*
+ * ECHO els command req payload
+ */
+struct fc_echo_s {
+ struct fc_els_cmd_s els_cmd;
+};
+
+/*
+ * RNID els command
+ */
+#define RNID_NODEID_DATA_FORMAT_COMMON 0x00
+#define RNID_NODEID_DATA_FORMAT_FCP3 0x08
+#define RNID_NODEID_DATA_FORMAT_DISCOVERY 0xDF
+
+#define RNID_ASSOCIATED_TYPE_UNKNOWN 0x00000001
+#define RNID_ASSOCIATED_TYPE_OTHER 0x00000002
+#define RNID_ASSOCIATED_TYPE_HUB 0x00000003
+#define RNID_ASSOCIATED_TYPE_SWITCH 0x00000004
+#define RNID_ASSOCIATED_TYPE_GATEWAY 0x00000005
+#define RNID_ASSOCIATED_TYPE_STORAGE_DEVICE 0x00000009
+#define RNID_ASSOCIATED_TYPE_HOST 0x0000000A
+#define RNID_ASSOCIATED_TYPE_STORAGE_SUBSYSTEM 0x0000000B
+#define RNID_ASSOCIATED_TYPE_STORAGE_ACCESS_DEVICE 0x0000000E
+#define RNID_ASSOCIATED_TYPE_NAS_SERVER 0x00000011
+#define RNID_ASSOCIATED_TYPE_BRIDGE 0x00000002
+#define RNID_ASSOCIATED_TYPE_VIRTUALIZATION_DEVICE 0x00000003
+#define RNID_ASSOCIATED_TYPE_MULTI_FUNCTION_DEVICE 0x000000FF
+
+/*
+ * RNID els command payload
+ */
+struct fc_rnid_cmd_s {
+ struct fc_els_cmd_s els_cmd;
+ u32 node_id_data_format:8;
+ u32 reserved:24;
+};
+
+/*
+ * RNID els response payload
+ */
+
+struct fc_rnid_common_id_data_s {
+ wwn_t port_name;
+ wwn_t node_name;
+};
+
+struct fc_rnid_general_topology_data_s {
+ u32 vendor_unique[4];
+ __be32 asso_type;
+ u32 phy_port_num;
+ __be32 num_attached_nodes;
+ u32 node_mgmt:8;
+ u32 ip_version:8;
+ u32 udp_tcp_port_num:16;
+ u32 ip_address[4];
+ u32 reserved:16;
+ u32 vendor_specific:16;
+};
+
+struct fc_rnid_acc_s {
+ struct fc_els_cmd_s els_cmd;
+ u32 node_id_data_format:8;
+ u32 common_id_data_length:8;
+ u32 reserved:8;
+ u32 specific_id_data_length:8;
+ struct fc_rnid_common_id_data_s common_id_data;
+ struct fc_rnid_general_topology_data_s gen_topology_data;
+};
+
+#define RNID_ASSOCIATED_TYPE_UNKNOWN 0x00000001
+#define RNID_ASSOCIATED_TYPE_OTHER 0x00000002
+#define RNID_ASSOCIATED_TYPE_HUB 0x00000003
+#define RNID_ASSOCIATED_TYPE_SWITCH 0x00000004
+#define RNID_ASSOCIATED_TYPE_GATEWAY 0x00000005
+#define RNID_ASSOCIATED_TYPE_STORAGE_DEVICE 0x00000009
+#define RNID_ASSOCIATED_TYPE_HOST 0x0000000A
+#define RNID_ASSOCIATED_TYPE_STORAGE_SUBSYSTEM 0x0000000B
+#define RNID_ASSOCIATED_TYPE_STORAGE_ACCESS_DEVICE 0x0000000E
+#define RNID_ASSOCIATED_TYPE_NAS_SERVER 0x00000011
+#define RNID_ASSOCIATED_TYPE_BRIDGE 0x00000002
+#define RNID_ASSOCIATED_TYPE_VIRTUALIZATION_DEVICE 0x00000003
+#define RNID_ASSOCIATED_TYPE_MULTI_FUNCTION_DEVICE 0x000000FF
+
+enum fc_rpsc_speed_cap {
+ RPSC_SPEED_CAP_1G = 0x8000,
+ RPSC_SPEED_CAP_2G = 0x4000,
+ RPSC_SPEED_CAP_4G = 0x2000,
+ RPSC_SPEED_CAP_10G = 0x1000,
+ RPSC_SPEED_CAP_8G = 0x0800,
+ RPSC_SPEED_CAP_16G = 0x0400,
+
+ RPSC_SPEED_CAP_UNKNOWN = 0x0001,
+};
+
+enum fc_rpsc_op_speed {
+ RPSC_OP_SPEED_1G = 0x8000,
+ RPSC_OP_SPEED_2G = 0x4000,
+ RPSC_OP_SPEED_4G = 0x2000,
+ RPSC_OP_SPEED_10G = 0x1000,
+ RPSC_OP_SPEED_8G = 0x0800,
+ RPSC_OP_SPEED_16G = 0x0400,
+
+ RPSC_OP_SPEED_NOT_EST = 0x0001, /* speed not established */
+};
+
+struct fc_rpsc_speed_info_s {
+ __be16 port_speed_cap; /* see enum fc_rpsc_speed_cap */
+ __be16 port_op_speed; /* see enum fc_rpsc_op_speed */
+};
+
+/*
+ * If RPSC request is sent to the Domain Controller, the request is for
+ * all the ports within that domain.
+ */
+struct fc_rpsc_cmd_s {
+ struct fc_els_cmd_s els_cmd;
+};
+
+/*
+ * RPSC Acc
+ */
+struct fc_rpsc_acc_s {
+ u32 command:8;
+ u32 rsvd:8;
+ u32 num_entries:16;
+
+ struct fc_rpsc_speed_info_s speed_info[1];
+};
+
+/*
+ * If RPSC2 request is sent to the Domain Controller,
+ */
+#define FC_BRCD_TOKEN 0x42524344
+
+struct fc_rpsc2_cmd_s {
+ struct fc_els_cmd_s els_cmd;
+ __be32 token;
+ u16 resvd;
+ __be16 num_pids; /* Number of pids in the request */
+ struct {
+ u32 rsvd1:8;
+ u32 pid:24; /* port identifier */
+ } pid_list[1];
+};
+
+enum fc_rpsc2_port_type {
+ RPSC2_PORT_TYPE_UNKNOWN = 0,
+ RPSC2_PORT_TYPE_NPORT = 1,
+ RPSC2_PORT_TYPE_NLPORT = 2,
+ RPSC2_PORT_TYPE_NPIV_PORT = 0x5f,
+ RPSC2_PORT_TYPE_NPORT_TRUNK = 0x6f,
+};
+
+/*
+ * RPSC2 portInfo entry structure
+ */
+struct fc_rpsc2_port_info_s {
+ __be32 pid; /* PID */
+ u16 resvd1;
+ __be16 index; /* port number / index */
+ u8 resvd2;
+ u8 type; /* port type N/NL/... */
+ __be16 speed; /* port Operating Speed */
+};
+
+/*
+ * RPSC2 Accept payload
+ */
+struct fc_rpsc2_acc_s {
+ u8 els_cmd;
+ u8 resvd;
+ __be16 num_pids; /* Number of pids in the request */
+ struct fc_rpsc2_port_info_s port_info[1]; /* port information */
+};
+
+/*
+ * bit fields so that multiple classes can be specified
+ */
+enum fc_cos {
+ FC_CLASS_2 = 0x04,
+ FC_CLASS_3 = 0x08,
+ FC_CLASS_2_3 = 0x0C,
+};
+
+/*
+ * symbolic name
+ */
+struct fc_symname_s {
+ u8 symname[FC_SYMNAME_MAX];
+};
+
+struct fc_alpabm_s {
+ u8 alpa_bm[FC_ALPA_MAX / 8];
+};
+
+/*
+ * protocol default timeout values
+ */
+#define FC_ED_TOV 2
+#define FC_REC_TOV (FC_ED_TOV + 1)
+#define FC_RA_TOV 10
+#define FC_ELS_TOV (2 * FC_RA_TOV)
+#define FC_FCCT_TOV (3 * FC_RA_TOV)
+
+/*
+ * virtual fabric related defines
+ */
+#define FC_VF_ID_NULL 0 /* must not be used as VF_ID */
+#define FC_VF_ID_MIN 1
+#define FC_VF_ID_MAX 0xEFF
+#define FC_VF_ID_CTL 0xFEF /* control VF_ID */
+
+/*
+ * Virtual Fabric Tagging header format
+ * @caution This is defined only in BIG ENDIAN format.
+ */
+struct fc_vft_s {
+ u32 r_ctl:8;
+ u32 ver:2;
+ u32 type:4;
+ u32 res_a:2;
+ u32 priority:3;
+ u32 vf_id:12;
+ u32 res_b:1;
+ u32 hopct:8;
+ u32 res_c:24;
+};
+
+/*
+ * FCP_CMND definitions
+ */
+#define FCP_CMND_CDB_LEN 16
+#define FCP_CMND_LUN_LEN 8
+
+struct fcp_cmnd_s {
+ struct scsi_lun lun; /* 64-bit LU number */
+ u8 crn; /* command reference number */
+#ifdef __BIG_ENDIAN
+ u8 resvd:1,
+ priority:4, /* FCP-3: SAM-3 priority */
+ taskattr:3; /* scsi task attribute */
+#else
+ u8 taskattr:3, /* scsi task attribute */
+ priority:4, /* FCP-3: SAM-3 priority */
+ resvd:1;
+#endif
+ u8 tm_flags; /* task management flags */
+#ifdef __BIG_ENDIAN
+ u8 addl_cdb_len:6, /* additional CDB length words */
+ iodir:2; /* read/write FCP_DATA IUs */
+#else
+ u8 iodir:2, /* read/write FCP_DATA IUs */
+ addl_cdb_len:6; /* additional CDB length */
+#endif
+ struct scsi_cdb_s cdb;
+
+ __be32 fcp_dl; /* bytes to be transferred */
+};
+
+#define fcp_cmnd_cdb_len(_cmnd) ((_cmnd)->addl_cdb_len * 4 + FCP_CMND_CDB_LEN)
+#define fcp_cmnd_fcpdl(_cmnd) ((&(_cmnd)->fcp_dl)[(_cmnd)->addl_cdb_len])
+
+/*
+ * struct fcp_cmnd_s .iodir field values
+ */
+enum fcp_iodir {
+ FCP_IODIR_NONE = 0,
+ FCP_IODIR_WRITE = 1,
+ FCP_IODIR_READ = 2,
+ FCP_IODIR_RW = 3,
+};
+
+/*
+ * Task management flags field - only one bit shall be set
+ */
+enum fcp_tm_cmnd {
+ FCP_TM_ABORT_TASK_SET = BIT(1),
+ FCP_TM_CLEAR_TASK_SET = BIT(2),
+ FCP_TM_LUN_RESET = BIT(4),
+ FCP_TM_TARGET_RESET = BIT(5), /* obsolete in FCP-3 */
+ FCP_TM_CLEAR_ACA = BIT(6),
+};
+
+/*
+ * FCP_RSP residue flags
+ */
+enum fcp_residue {
+ FCP_NO_RESIDUE = 0, /* no residue */
+ FCP_RESID_OVER = 1, /* more data left that was not sent */
+ FCP_RESID_UNDER = 2, /* less data than requested */
+};
+
+struct fcp_rspinfo_s {
+ u32 res0:24;
+ u32 rsp_code:8; /* response code (as above) */
+ u32 res1;
+};
+
+struct fcp_resp_s {
+ u32 reserved[2]; /* 2 words reserved */
+ u16 reserved2;
+#ifdef __BIG_ENDIAN
+ u8 reserved3:3;
+ u8 fcp_conf_req:1; /* FCP_CONF is requested */
+ u8 resid_flags:2; /* underflow/overflow */
+ u8 sns_len_valid:1; /* sense len is valid */
+ u8 rsp_len_valid:1; /* response len is valid */
+#else
+ u8 rsp_len_valid:1; /* response len is valid */
+ u8 sns_len_valid:1; /* sense len is valid */
+ u8 resid_flags:2; /* underflow/overflow */
+ u8 fcp_conf_req:1; /* FCP_CONF is requested */
+ u8 reserved3:3;
+#endif
+ u8 scsi_status; /* one byte SCSI status */
+ u32 residue; /* residual data bytes */
+ u32 sns_len; /* length od sense info */
+ u32 rsp_len; /* length of response info */
+};
+
+#define fcp_snslen(__fcprsp) ((__fcprsp)->sns_len_valid ? \
+ (__fcprsp)->sns_len : 0)
+#define fcp_rsplen(__fcprsp) ((__fcprsp)->rsp_len_valid ? \
+ (__fcprsp)->rsp_len : 0)
+#define fcp_rspinfo(__fcprsp) ((struct fcp_rspinfo_s *)((__fcprsp) + 1))
+#define fcp_snsinfo(__fcprsp) (((u8 *)fcp_rspinfo(__fcprsp)) + \
+ fcp_rsplen(__fcprsp))
+/*
+ * CT
+ */
+struct ct_hdr_s {
+ u32 rev_id:8; /* Revision of the CT */
+ u32 in_id:24; /* Initiator Id */
+ u32 gs_type:8; /* Generic service Type */
+ u32 gs_sub_type:8; /* Generic service sub type */
+ u32 options:8; /* options */
+ u32 rsvrd:8; /* reserved */
+ u32 cmd_rsp_code:16;/* ct command/response code */
+ u32 max_res_size:16;/* maximum/residual size */
+ u32 frag_id:8; /* fragment ID */
+ u32 reason_code:8; /* reason code */
+ u32 exp_code:8; /* explanation code */
+ u32 vendor_unq:8; /* vendor unique */
+};
+
+/*
+ * defines for the Revision
+ */
+enum {
+ CT_GS3_REVISION = 0x01,
+};
+
+/*
+ * defines for gs_type
+ */
+enum {
+ CT_GSTYPE_KEYSERVICE = 0xF7,
+ CT_GSTYPE_ALIASSERVICE = 0xF8,
+ CT_GSTYPE_MGMTSERVICE = 0xFA,
+ CT_GSTYPE_TIMESERVICE = 0xFB,
+ CT_GSTYPE_DIRSERVICE = 0xFC,
+};
+
+/*
+ * defines for gs_sub_type for gs type directory service
+ */
+enum {
+ CT_GSSUBTYPE_NAMESERVER = 0x02,
+};
+
+/*
+ * defines for gs_sub_type for gs type management service
+ */
+enum {
+ CT_GSSUBTYPE_CFGSERVER = 0x01,
+ CT_GSSUBTYPE_UNZONED_NS = 0x02,
+ CT_GSSUBTYPE_ZONESERVER = 0x03,
+ CT_GSSUBTYPE_LOCKSERVER = 0x04,
+ CT_GSSUBTYPE_HBA_MGMTSERVER = 0x10, /* for FDMI */
+};
+
+/*
+ * defines for CT response code field
+ */
+enum {
+ CT_RSP_REJECT = 0x8001,
+ CT_RSP_ACCEPT = 0x8002,
+};
+
+/*
+ * defintions for CT reason code
+ */
+enum {
+ CT_RSN_INV_CMD = 0x01,
+ CT_RSN_INV_VER = 0x02,
+ CT_RSN_LOGIC_ERR = 0x03,
+ CT_RSN_INV_SIZE = 0x04,
+ CT_RSN_LOGICAL_BUSY = 0x05,
+ CT_RSN_PROTO_ERR = 0x07,
+ CT_RSN_UNABLE_TO_PERF = 0x09,
+ CT_RSN_NOT_SUPP = 0x0B,
+ CT_RSN_SERVER_NOT_AVBL = 0x0D,
+ CT_RSN_SESSION_COULD_NOT_BE_ESTBD = 0x0E,
+ CT_RSN_VENDOR_SPECIFIC = 0xFF,
+
+};
+
+/*
+ * definitions for explanations code for Name server
+ */
+enum {
+ CT_NS_EXP_NOADDITIONAL = 0x00,
+ CT_NS_EXP_ID_NOT_REG = 0x01,
+ CT_NS_EXP_PN_NOT_REG = 0x02,
+ CT_NS_EXP_NN_NOT_REG = 0x03,
+ CT_NS_EXP_CS_NOT_REG = 0x04,
+ CT_NS_EXP_IPN_NOT_REG = 0x05,
+ CT_NS_EXP_IPA_NOT_REG = 0x06,
+ CT_NS_EXP_FT_NOT_REG = 0x07,
+ CT_NS_EXP_SPN_NOT_REG = 0x08,
+ CT_NS_EXP_SNN_NOT_REG = 0x09,
+ CT_NS_EXP_PT_NOT_REG = 0x0A,
+ CT_NS_EXP_IPP_NOT_REG = 0x0B,
+ CT_NS_EXP_FPN_NOT_REG = 0x0C,
+ CT_NS_EXP_HA_NOT_REG = 0x0D,
+ CT_NS_EXP_FD_NOT_REG = 0x0E,
+ CT_NS_EXP_FF_NOT_REG = 0x0F,
+ CT_NS_EXP_ACCESSDENIED = 0x10,
+ CT_NS_EXP_UNACCEPTABLE_ID = 0x11,
+ CT_NS_EXP_DATABASEEMPTY = 0x12,
+ CT_NS_EXP_NOT_REG_IN_SCOPE = 0x13,
+ CT_NS_EXP_DOM_ID_NOT_PRESENT = 0x14,
+ CT_NS_EXP_PORT_NUM_NOT_PRESENT = 0x15,
+ CT_NS_EXP_NO_DEVICE_ATTACHED = 0x16
+};
+
+/*
+ * defintions for the explanation code for all servers
+ */
+enum {
+ CT_EXP_AUTH_EXCEPTION = 0xF1,
+ CT_EXP_DB_FULL = 0xF2,
+ CT_EXP_DB_EMPTY = 0xF3,
+ CT_EXP_PROCESSING_REQ = 0xF4,
+ CT_EXP_UNABLE_TO_VERIFY_CONN = 0xF5,
+ CT_EXP_DEVICES_NOT_IN_CMN_ZONE = 0xF6
+};
+
+/*
+ * Command codes for Name server
+ */
+enum {
+ GS_GID_PN = 0x0121, /* Get Id on port name */
+ GS_GPN_ID = 0x0112, /* Get port name on ID */
+ GS_GNN_ID = 0x0113, /* Get node name on ID */
+ GS_GID_FT = 0x0171, /* Get Id on FC4 type */
+ GS_GSPN_ID = 0x0118, /* Get symbolic PN on ID */
+ GS_RFT_ID = 0x0217, /* Register fc4type on ID */
+ GS_RSPN_ID = 0x0218, /* Register symbolic PN on ID */
+ GS_RSNN_NN = 0x0239, /* Register symbolic NN on NN */
+ GS_RPN_ID = 0x0212, /* Register port name */
+ GS_RNN_ID = 0x0213, /* Register node name */
+ GS_RCS_ID = 0x0214, /* Register class of service */
+ GS_RPT_ID = 0x021A, /* Register port type */
+ GS_GA_NXT = 0x0100, /* Get all next */
+ GS_RFF_ID = 0x021F, /* Register FC4 Feature */
+};
+
+struct fcgs_id_req_s {
+ u32 rsvd:8;
+ u32 dap:24; /* port identifier */
+};
+#define fcgs_gpnid_req_t struct fcgs_id_req_s
+#define fcgs_gnnid_req_t struct fcgs_id_req_s
+#define fcgs_gspnid_req_t struct fcgs_id_req_s
+
+struct fcgs_gidpn_req_s {
+ wwn_t port_name; /* port wwn */
+};
+
+struct fcgs_gidpn_resp_s {
+ u32 rsvd:8;
+ u32 dap:24; /* port identifier */
+};
+
+/*
+ * RFT_ID
+ */
+struct fcgs_rftid_req_s {
+ u32 rsvd:8;
+ u32 dap:24; /* port identifier */
+ __be32 fc4_type[8]; /* fc4 types */
+};
+
+/*
+ * RFF_ID : Register FC4 features.
+ */
+#define FC_GS_FCP_FC4_FEATURE_INITIATOR 0x02
+#define FC_GS_FCP_FC4_FEATURE_TARGET 0x01
+
+struct fcgs_rffid_req_s {
+ u32 rsvd:8;
+ u32 dap:24; /* port identifier */
+ u32 rsvd1:16;
+ u32 fc4ftr_bits:8; /* fc4 feature bits */
+ u32 fc4_type:8; /* corresponding FC4 Type */
+};
+
+/*
+ * GID_FT Request
+ */
+struct fcgs_gidft_req_s {
+ u8 reserved;
+ u8 domain_id; /* domain, 0 - all fabric */
+ u8 area_id; /* area, 0 - whole domain */
+ u8 fc4_type; /* FC_TYPE_FCP for SCSI devices */
+};
+
+/*
+ * GID_FT Response
+ */
+struct fcgs_gidft_resp_s {
+ u8 last:1; /* last port identifier flag */
+ u8 reserved:7;
+ u32 pid:24; /* port identifier */
+};
+
+/*
+ * RSPN_ID
+ */
+struct fcgs_rspnid_req_s {
+ u32 rsvd:8;
+ u32 dap:24; /* port identifier */
+ u8 spn_len; /* symbolic port name length */
+ u8 spn[256]; /* symbolic port name */
+};
+
+/*
+ * RSNN_NN
+ */
+struct fcgs_rsnn_nn_req_s {
+ wwn_t node_name; /* Node name */
+ u8 snn_len; /* symbolic node name length */
+ u8 snn[256]; /* symbolic node name */
+};
+
+/*
+ * RPN_ID
+ */
+struct fcgs_rpnid_req_s {
+ u32 rsvd:8;
+ u32 port_id:24;
+ wwn_t port_name;
+};
+
+/*
+ * RNN_ID
+ */
+struct fcgs_rnnid_req_s {
+ u32 rsvd:8;
+ u32 port_id:24;
+ wwn_t node_name;
+};
+
+/*
+ * RCS_ID
+ */
+struct fcgs_rcsid_req_s {
+ u32 rsvd:8;
+ u32 port_id:24;
+ u32 cos;
+};
+
+/*
+ * RPT_ID
+ */
+struct fcgs_rptid_req_s {
+ u32 rsvd:8;
+ u32 port_id:24;
+ u32 port_type:8;
+ u32 rsvd1:24;
+};
+
+/*
+ * GA_NXT Request
+ */
+struct fcgs_ganxt_req_s {
+ u32 rsvd:8;
+ u32 port_id:24;
+};
+
+/*
+ * GA_NXT Response
+ */
+struct fcgs_ganxt_rsp_s {
+ u32 port_type:8; /* Port Type */
+ u32 port_id:24; /* Port Identifier */
+ wwn_t port_name; /* Port Name */
+ u8 spn_len; /* Length of Symbolic Port Name */
+ char spn[255]; /* Symbolic Port Name */
+ wwn_t node_name; /* Node Name */
+ u8 snn_len; /* Length of Symbolic Node Name */
+ char snn[255]; /* Symbolic Node Name */
+ u8 ipa[8]; /* Initial Process Associator */
+ u8 ip[16]; /* IP Address */
+ u32 cos; /* Class of Service */
+ u32 fc4types[8]; /* FC-4 TYPEs */
+ wwn_t fabric_port_name; /* Fabric Port Name */
+ u32 rsvd:8; /* Reserved */
+ u32 hard_addr:24; /* Hard Address */
+};
+
+/*
+ * Command codes for Fabric Configuration Server
+ */
+enum {
+ GS_FC_GFN_CMD = 0x0114, /* GS FC Get Fabric Name */
+ GS_FC_GMAL_CMD = 0x0116, /* GS FC GMAL */
+ GS_FC_TRACE_CMD = 0x0400, /* GS FC Trace Route */
+ GS_FC_PING_CMD = 0x0401, /* GS FC Ping */
+};
+
+/*
+ * GMAL Command ( Get ( interconnect Element) Management Address List)
+ * To retrieve the IP Address of a Switch.
+ */
+#define CT_GMAL_RESP_PREFIX_TELNET "telnet://"
+#define CT_GMAL_RESP_PREFIX_HTTP "http://"
+
+/* GMAL/GFN request */
+struct fcgs_req_s {
+ wwn_t wwn; /* PWWN/NWWN */
+};
+
+#define fcgs_gmal_req_t struct fcgs_req_s
+#define fcgs_gfn_req_t struct fcgs_req_s
+
+/* Accept Response to GMAL */
+struct fcgs_gmal_resp_s {
+ __be32 ms_len; /* Num of entries */
+ u8 ms_ma[256];
+};
+
+struct fcgs_gmal_entry_s {
+ u8 len;
+ u8 prefix[7]; /* like "http://" */
+ u8 ip_addr[248];
+};
+
+/*
+ * FDMI Command Codes
+ */
+#define FDMI_GRHL 0x0100
+#define FDMI_GHAT 0x0101
+#define FDMI_GRPL 0x0102
+#define FDMI_GPAT 0x0110
+#define FDMI_RHBA 0x0200
+#define FDMI_RHAT 0x0201
+#define FDMI_RPRT 0x0210
+#define FDMI_RPA 0x0211
+#define FDMI_DHBA 0x0300
+#define FDMI_DPRT 0x0310
+
+/*
+ * FDMI reason codes
+ */
+#define FDMI_NO_ADDITIONAL_EXP 0x00
+#define FDMI_HBA_ALREADY_REG 0x10
+#define FDMI_HBA_ATTRIB_NOT_REG 0x11
+#define FDMI_HBA_ATTRIB_MULTIPLE 0x12
+#define FDMI_HBA_ATTRIB_LENGTH_INVALID 0x13
+#define FDMI_HBA_ATTRIB_NOT_PRESENT 0x14
+#define FDMI_PORT_ORIG_NOT_IN_LIST 0x15
+#define FDMI_PORT_HBA_NOT_IN_LIST 0x16
+#define FDMI_PORT_ATTRIB_NOT_REG 0x20
+#define FDMI_PORT_NOT_REG 0x21
+#define FDMI_PORT_ATTRIB_MULTIPLE 0x22
+#define FDMI_PORT_ATTRIB_LENGTH_INVALID 0x23
+#define FDMI_PORT_ALREADY_REGISTEREED 0x24
+
+/*
+ * FDMI Transmission Speed Mask values
+ */
+#define FDMI_TRANS_SPEED_1G 0x00000001
+#define FDMI_TRANS_SPEED_2G 0x00000002
+#define FDMI_TRANS_SPEED_10G 0x00000004
+#define FDMI_TRANS_SPEED_4G 0x00000008
+#define FDMI_TRANS_SPEED_8G 0x00000010
+#define FDMI_TRANS_SPEED_16G 0x00000020
+#define FDMI_TRANS_SPEED_UNKNOWN 0x00008000
+
+/*
+ * FDMI HBA attribute types
+ */
+enum fdmi_hba_attribute_type {
+ FDMI_HBA_ATTRIB_NODENAME = 1, /* 0x0001 */
+ FDMI_HBA_ATTRIB_MANUFACTURER, /* 0x0002 */
+ FDMI_HBA_ATTRIB_SERIALNUM, /* 0x0003 */
+ FDMI_HBA_ATTRIB_MODEL, /* 0x0004 */
+ FDMI_HBA_ATTRIB_MODEL_DESC, /* 0x0005 */
+ FDMI_HBA_ATTRIB_HW_VERSION, /* 0x0006 */
+ FDMI_HBA_ATTRIB_DRIVER_VERSION, /* 0x0007 */
+ FDMI_HBA_ATTRIB_ROM_VERSION, /* 0x0008 */
+ FDMI_HBA_ATTRIB_FW_VERSION, /* 0x0009 */
+ FDMI_HBA_ATTRIB_OS_NAME, /* 0x000A */
+ FDMI_HBA_ATTRIB_MAX_CT, /* 0x000B */
+ FDMI_HBA_ATTRIB_NODE_SYM_NAME, /* 0x000C */
+ FDMI_HBA_ATTRIB_VENDOR_INFO, /* 0x000D */
+ FDMI_HBA_ATTRIB_NUM_PORTS, /* 0x000E */
+ FDMI_HBA_ATTRIB_FABRIC_NAME, /* 0x000F */
+ FDMI_HBA_ATTRIB_BIOS_VER, /* 0x0010 */
+ FDMI_HBA_ATTRIB_VENDOR_ID = 0x00E0,
+
+ FDMI_HBA_ATTRIB_MAX_TYPE
+};
+
+/*
+ * FDMI Port attribute types
+ */
+enum fdmi_port_attribute_type {
+ FDMI_PORT_ATTRIB_FC4_TYPES = 1, /* 0x0001 */
+ FDMI_PORT_ATTRIB_SUPP_SPEED, /* 0x0002 */
+ FDMI_PORT_ATTRIB_PORT_SPEED, /* 0x0003 */
+ FDMI_PORT_ATTRIB_FRAME_SIZE, /* 0x0004 */
+ FDMI_PORT_ATTRIB_DEV_NAME, /* 0x0005 */
+ FDMI_PORT_ATTRIB_HOST_NAME, /* 0x0006 */
+ FDMI_PORT_ATTRIB_NODE_NAME, /* 0x0007 */
+ FDMI_PORT_ATTRIB_PORT_NAME, /* 0x0008 */
+ FDMI_PORT_ATTRIB_PORT_SYM_NAME, /* 0x0009 */
+ FDMI_PORT_ATTRIB_PORT_TYPE, /* 0x000A */
+ FDMI_PORT_ATTRIB_SUPP_COS, /* 0x000B */
+ FDMI_PORT_ATTRIB_PORT_FAB_NAME, /* 0x000C */
+ FDMI_PORT_ATTRIB_PORT_FC4_TYPE, /* 0x000D */
+ FDMI_PORT_ATTRIB_PORT_STATE = 0x101, /* 0x0101 */
+ FDMI_PORT_ATTRIB_PORT_NUM_RPRT = 0x102, /* 0x0102 */
+
+ FDMI_PORT_ATTR_MAX_TYPE
+};
+
+/*
+ * FDMI attribute
+ */
+struct fdmi_attr_s {
+ __be16 type;
+ __be16 len;
+ u8 value[1];
+};
+
+/*
+ * HBA Attribute Block
+ */
+struct fdmi_hba_attr_s {
+ __be32 attr_count; /* # of attributes */
+ struct fdmi_attr_s hba_attr; /* n attributes */
+};
+
+/*
+ * Registered Port List
+ */
+struct fdmi_port_list_s {
+ __be32 num_ports; /* number Of Port Entries */
+ wwn_t port_entry; /* one or more */
+};
+
+/*
+ * Port Attribute Block
+ */
+struct fdmi_port_attr_s {
+ __be32 attr_count; /* # of attributes */
+ struct fdmi_attr_s port_attr; /* n attributes */
+};
+
+/*
+ * FDMI Register HBA Attributes
+ */
+struct fdmi_rhba_s {
+ wwn_t hba_id; /* HBA Identifier */
+ struct fdmi_port_list_s port_list; /* Registered Port List */
+ struct fdmi_hba_attr_s hba_attr_blk; /* HBA attribute block */
+};
+
+/*
+ * FDMI Register Port
+ */
+struct fdmi_rprt_s {
+ wwn_t hba_id; /* HBA Identifier */
+ wwn_t port_name; /* Port wwn */
+ struct fdmi_port_attr_s port_attr_blk; /* Port Attr Block */
+};
+
+/*
+ * FDMI Register Port Attributes
+ */
+struct fdmi_rpa_s {
+ wwn_t port_name; /* port wwn */
+ struct fdmi_port_attr_s port_attr_blk; /* Port Attr Block */
+};
+
+#pragma pack()
+
+#endif /* __BFA_FC_H__ */
diff --git a/drivers/scsi/bfa/bfa_fcbuild.c b/drivers/scsi/bfa/bfa_fcbuild.c
new file mode 100644
index 000000000..dce787f6c
--- /dev/null
+++ b/drivers/scsi/bfa/bfa_fcbuild.c
@@ -0,0 +1,1463 @@
+/*
+ * Copyright (c) 2005-2010 Brocade Communications Systems, Inc.
+ * All rights reserved
+ * www.brocade.com
+ *
+ * Linux driver for Brocade Fibre Channel Host Bus Adapter.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License (GPL) Version 2 as
+ * published by the Free Software Foundation
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ */
+/*
+ * fcbuild.c - FC link service frame building and parsing routines
+ */
+
+#include "bfad_drv.h"
+#include "bfa_fcbuild.h"
+
+/*
+ * static build functions
+ */
+static void fc_els_rsp_build(struct fchs_s *fchs, u32 d_id, u32 s_id,
+ __be16 ox_id);
+static void fc_bls_rsp_build(struct fchs_s *fchs, u32 d_id, u32 s_id,
+ __be16 ox_id);
+static struct fchs_s fc_els_req_tmpl;
+static struct fchs_s fc_els_rsp_tmpl;
+static struct fchs_s fc_bls_req_tmpl;
+static struct fchs_s fc_bls_rsp_tmpl;
+static struct fc_ba_acc_s ba_acc_tmpl;
+static struct fc_logi_s plogi_tmpl;
+static struct fc_prli_s prli_tmpl;
+static struct fc_rrq_s rrq_tmpl;
+static struct fchs_s fcp_fchs_tmpl;
+
+void
+fcbuild_init(void)
+{
+ /*
+ * fc_els_req_tmpl
+ */
+ fc_els_req_tmpl.routing = FC_RTG_EXT_LINK;
+ fc_els_req_tmpl.cat_info = FC_CAT_LD_REQUEST;
+ fc_els_req_tmpl.type = FC_TYPE_ELS;
+ fc_els_req_tmpl.f_ctl =
+ bfa_hton3b(FCTL_SEQ_INI | FCTL_FS_EXCH | FCTL_END_SEQ |
+ FCTL_SI_XFER);
+ fc_els_req_tmpl.rx_id = FC_RXID_ANY;
+
+ /*
+ * fc_els_rsp_tmpl
+ */
+ fc_els_rsp_tmpl.routing = FC_RTG_EXT_LINK;
+ fc_els_rsp_tmpl.cat_info = FC_CAT_LD_REPLY;
+ fc_els_rsp_tmpl.type = FC_TYPE_ELS;
+ fc_els_rsp_tmpl.f_ctl =
+ bfa_hton3b(FCTL_EC_RESP | FCTL_SEQ_INI | FCTL_LS_EXCH |
+ FCTL_END_SEQ | FCTL_SI_XFER);
+ fc_els_rsp_tmpl.rx_id = FC_RXID_ANY;
+
+ /*
+ * fc_bls_req_tmpl
+ */
+ fc_bls_req_tmpl.routing = FC_RTG_BASIC_LINK;
+ fc_bls_req_tmpl.type = FC_TYPE_BLS;
+ fc_bls_req_tmpl.f_ctl = bfa_hton3b(FCTL_END_SEQ | FCTL_SI_XFER);
+ fc_bls_req_tmpl.rx_id = FC_RXID_ANY;
+
+ /*
+ * fc_bls_rsp_tmpl
+ */
+ fc_bls_rsp_tmpl.routing = FC_RTG_BASIC_LINK;
+ fc_bls_rsp_tmpl.cat_info = FC_CAT_BA_ACC;
+ fc_bls_rsp_tmpl.type = FC_TYPE_BLS;
+ fc_bls_rsp_tmpl.f_ctl =
+ bfa_hton3b(FCTL_EC_RESP | FCTL_SEQ_INI | FCTL_LS_EXCH |
+ FCTL_END_SEQ | FCTL_SI_XFER);
+ fc_bls_rsp_tmpl.rx_id = FC_RXID_ANY;
+
+ /*
+ * ba_acc_tmpl
+ */
+ ba_acc_tmpl.seq_id_valid = 0;
+ ba_acc_tmpl.low_seq_cnt = 0;
+ ba_acc_tmpl.high_seq_cnt = 0xFFFF;
+
+ /*
+ * plogi_tmpl
+ */
+ plogi_tmpl.csp.verhi = FC_PH_VER_PH_3;
+ plogi_tmpl.csp.verlo = FC_PH_VER_4_3;
+ plogi_tmpl.csp.ciro = 0x1;
+ plogi_tmpl.csp.cisc = 0x0;
+ plogi_tmpl.csp.altbbcred = 0x0;
+ plogi_tmpl.csp.conseq = cpu_to_be16(0x00FF);
+ plogi_tmpl.csp.ro_bitmap = cpu_to_be16(0x0002);
+ plogi_tmpl.csp.e_d_tov = cpu_to_be32(2000);
+
+ plogi_tmpl.class3.class_valid = 1;
+ plogi_tmpl.class3.sequential = 1;
+ plogi_tmpl.class3.conseq = 0xFF;
+ plogi_tmpl.class3.ospx = 1;
+
+ /*
+ * prli_tmpl
+ */
+ prli_tmpl.command = FC_ELS_PRLI;
+ prli_tmpl.pglen = 0x10;
+ prli_tmpl.pagebytes = cpu_to_be16(0x0014);
+ prli_tmpl.parampage.type = FC_TYPE_FCP;
+ prli_tmpl.parampage.imagepair = 1;
+ prli_tmpl.parampage.servparams.rxrdisab = 1;
+
+ /*
+ * rrq_tmpl
+ */
+ rrq_tmpl.els_cmd.els_code = FC_ELS_RRQ;
+
+ /*
+ * fcp_struct fchs_s mpl
+ */
+ fcp_fchs_tmpl.routing = FC_RTG_FC4_DEV_DATA;
+ fcp_fchs_tmpl.cat_info = FC_CAT_UNSOLICIT_CMD;
+ fcp_fchs_tmpl.type = FC_TYPE_FCP;
+ fcp_fchs_tmpl.f_ctl =
+ bfa_hton3b(FCTL_FS_EXCH | FCTL_END_SEQ | FCTL_SI_XFER);
+ fcp_fchs_tmpl.seq_id = 1;
+ fcp_fchs_tmpl.rx_id = FC_RXID_ANY;
+}
+
+static void
+fc_gs_fchdr_build(struct fchs_s *fchs, u32 d_id, u32 s_id, u32 ox_id)
+{
+ memset(fchs, 0, sizeof(struct fchs_s));
+
+ fchs->routing = FC_RTG_FC4_DEV_DATA;
+ fchs->cat_info = FC_CAT_UNSOLICIT_CTRL;
+ fchs->type = FC_TYPE_SERVICES;
+ fchs->f_ctl =
+ bfa_hton3b(FCTL_SEQ_INI | FCTL_FS_EXCH | FCTL_END_SEQ |
+ FCTL_SI_XFER);
+ fchs->rx_id = FC_RXID_ANY;
+ fchs->d_id = (d_id);
+ fchs->s_id = (s_id);
+ fchs->ox_id = cpu_to_be16(ox_id);
+
+ /*
+ * @todo no need to set ox_id for request
+ * no need to set rx_id for response
+ */
+}
+
+static void
+fc_gsresp_fchdr_build(struct fchs_s *fchs, u32 d_id, u32 s_id, u16 ox_id)
+{
+ memset(fchs, 0, sizeof(struct fchs_s));
+
+ fchs->routing = FC_RTG_FC4_DEV_DATA;
+ fchs->cat_info = FC_CAT_SOLICIT_CTRL;
+ fchs->type = FC_TYPE_SERVICES;
+ fchs->f_ctl =
+ bfa_hton3b(FCTL_EC_RESP | FCTL_SEQ_INI | FCTL_LS_EXCH |
+ FCTL_END_SEQ | FCTL_SI_XFER);
+ fchs->d_id = d_id;
+ fchs->s_id = s_id;
+ fchs->ox_id = ox_id;
+}
+
+void
+fc_els_req_build(struct fchs_s *fchs, u32 d_id, u32 s_id, __be16 ox_id)
+{
+ memcpy(fchs, &fc_els_req_tmpl, sizeof(struct fchs_s));
+ fchs->d_id = (d_id);
+ fchs->s_id = (s_id);
+ fchs->ox_id = cpu_to_be16(ox_id);
+}
+
+static void
+fc_els_rsp_build(struct fchs_s *fchs, u32 d_id, u32 s_id, __be16 ox_id)
+{
+ memcpy(fchs, &fc_els_rsp_tmpl, sizeof(struct fchs_s));
+ fchs->d_id = d_id;
+ fchs->s_id = s_id;
+ fchs->ox_id = ox_id;
+}
+
+enum fc_parse_status
+fc_els_rsp_parse(struct fchs_s *fchs, int len)
+{
+ struct fc_els_cmd_s *els_cmd = (struct fc_els_cmd_s *) (fchs + 1);
+ struct fc_ls_rjt_s *ls_rjt = (struct fc_ls_rjt_s *) els_cmd;
+
+ len = len;
+
+ switch (els_cmd->els_code) {
+ case FC_ELS_LS_RJT:
+ if (ls_rjt->reason_code == FC_LS_RJT_RSN_LOGICAL_BUSY)
+ return FC_PARSE_BUSY;
+ else
+ return FC_PARSE_FAILURE;
+
+ case FC_ELS_ACC:
+ return FC_PARSE_OK;
+ }
+ return FC_PARSE_OK;
+}
+
+static void
+fc_bls_rsp_build(struct fchs_s *fchs, u32 d_id, u32 s_id, __be16 ox_id)
+{
+ memcpy(fchs, &fc_bls_rsp_tmpl, sizeof(struct fchs_s));
+ fchs->d_id = d_id;
+ fchs->s_id = s_id;
+ fchs->ox_id = ox_id;
+}
+
+static u16
+fc_plogi_x_build(struct fchs_s *fchs, void *pld, u32 d_id, u32 s_id,
+ __be16 ox_id, wwn_t port_name, wwn_t node_name,
+ u16 pdu_size, u16 bb_cr, u8 els_code)
+{
+ struct fc_logi_s *plogi = (struct fc_logi_s *) (pld);
+
+ memcpy(plogi, &plogi_tmpl, sizeof(struct fc_logi_s));
+
+ /* For FC AL bb_cr is 0 and altbbcred is 1 */
+ if (!bb_cr)
+ plogi->csp.altbbcred = 1;
+
+ plogi->els_cmd.els_code = els_code;
+ if (els_code == FC_ELS_PLOGI)
+ fc_els_req_build(fchs, d_id, s_id, ox_id);
+ else
+ fc_els_rsp_build(fchs, d_id, s_id, ox_id);
+
+ plogi->csp.rxsz = plogi->class3.rxsz = cpu_to_be16(pdu_size);
+ plogi->csp.bbcred = cpu_to_be16(bb_cr);
+
+ memcpy(&plogi->port_name, &port_name, sizeof(wwn_t));
+ memcpy(&plogi->node_name, &node_name, sizeof(wwn_t));
+
+ return sizeof(struct fc_logi_s);
+}
+
+u16
+fc_flogi_build(struct fchs_s *fchs, struct fc_logi_s *flogi, u32 s_id,
+ u16 ox_id, wwn_t port_name, wwn_t node_name, u16 pdu_size,
+ u8 set_npiv, u8 set_auth, u16 local_bb_credits)
+{
+ u32 d_id = bfa_hton3b(FC_FABRIC_PORT);
+ __be32 *vvl_info;
+
+ memcpy(flogi, &plogi_tmpl, sizeof(struct fc_logi_s));
+
+ flogi->els_cmd.els_code = FC_ELS_FLOGI;
+ fc_els_req_build(fchs, d_id, s_id, ox_id);
+
+ flogi->csp.rxsz = flogi->class3.rxsz = cpu_to_be16(pdu_size);
+ flogi->port_name = port_name;
+ flogi->node_name = node_name;
+
+ /*
+ * Set the NPIV Capability Bit ( word 1, bit 31) of Common
+ * Service Parameters.
+ */
+ flogi->csp.ciro = set_npiv;
+
+ /* set AUTH capability */
+ flogi->csp.security = set_auth;
+
+ flogi->csp.bbcred = cpu_to_be16(local_bb_credits);
+
+ /* Set brcd token in VVL */
+ vvl_info = (u32 *)&flogi->vvl[0];
+
+ /* set the flag to indicate the presence of VVL */
+ flogi->csp.npiv_supp = 1; /* @todo. field name is not correct */
+ vvl_info[0] = cpu_to_be32(FLOGI_VVL_BRCD);
+
+ return sizeof(struct fc_logi_s);
+}
+
+u16
+fc_flogi_acc_build(struct fchs_s *fchs, struct fc_logi_s *flogi, u32 s_id,
+ __be16 ox_id, wwn_t port_name, wwn_t node_name,
+ u16 pdu_size, u16 local_bb_credits, u8 bb_scn)
+{
+ u32 d_id = 0;
+ u16 bbscn_rxsz = (bb_scn << 12) | pdu_size;
+
+ memcpy(flogi, &plogi_tmpl, sizeof(struct fc_logi_s));
+ fc_els_rsp_build(fchs, d_id, s_id, ox_id);
+
+ flogi->els_cmd.els_code = FC_ELS_ACC;
+ flogi->class3.rxsz = cpu_to_be16(pdu_size);
+ flogi->csp.rxsz = cpu_to_be16(bbscn_rxsz); /* bb_scn/rxsz */
+ flogi->port_name = port_name;
+ flogi->node_name = node_name;
+
+ flogi->csp.bbcred = cpu_to_be16(local_bb_credits);
+
+ return sizeof(struct fc_logi_s);
+}
+
+u16
+fc_fdisc_build(struct fchs_s *fchs, struct fc_logi_s *flogi, u32 s_id,
+ u16 ox_id, wwn_t port_name, wwn_t node_name, u16 pdu_size)
+{
+ u32 d_id = bfa_hton3b(FC_FABRIC_PORT);
+
+ memcpy(flogi, &plogi_tmpl, sizeof(struct fc_logi_s));
+
+ flogi->els_cmd.els_code = FC_ELS_FDISC;
+ fc_els_req_build(fchs, d_id, s_id, ox_id);
+
+ flogi->csp.rxsz = flogi->class3.rxsz = cpu_to_be16(pdu_size);
+ flogi->port_name = port_name;
+ flogi->node_name = node_name;
+
+ return sizeof(struct fc_logi_s);
+}
+
+u16
+fc_plogi_build(struct fchs_s *fchs, void *pld, u32 d_id, u32 s_id,
+ u16 ox_id, wwn_t port_name, wwn_t node_name,
+ u16 pdu_size, u16 bb_cr)
+{
+ return fc_plogi_x_build(fchs, pld, d_id, s_id, ox_id, port_name,
+ node_name, pdu_size, bb_cr, FC_ELS_PLOGI);
+}
+
+u16
+fc_plogi_acc_build(struct fchs_s *fchs, void *pld, u32 d_id, u32 s_id,
+ u16 ox_id, wwn_t port_name, wwn_t node_name,
+ u16 pdu_size, u16 bb_cr)
+{
+ return fc_plogi_x_build(fchs, pld, d_id, s_id, ox_id, port_name,
+ node_name, pdu_size, bb_cr, FC_ELS_ACC);
+}
+
+enum fc_parse_status
+fc_plogi_rsp_parse(struct fchs_s *fchs, int len, wwn_t port_name)
+{
+ struct fc_els_cmd_s *els_cmd = (struct fc_els_cmd_s *) (fchs + 1);
+ struct fc_logi_s *plogi;
+ struct fc_ls_rjt_s *ls_rjt;
+
+ switch (els_cmd->els_code) {
+ case FC_ELS_LS_RJT:
+ ls_rjt = (struct fc_ls_rjt_s *) (fchs + 1);
+ if (ls_rjt->reason_code == FC_LS_RJT_RSN_LOGICAL_BUSY)
+ return FC_PARSE_BUSY;
+ else
+ return FC_PARSE_FAILURE;
+ case FC_ELS_ACC:
+ plogi = (struct fc_logi_s *) (fchs + 1);
+ if (len < sizeof(struct fc_logi_s))
+ return FC_PARSE_FAILURE;
+
+ if (!wwn_is_equal(plogi->port_name, port_name))
+ return FC_PARSE_FAILURE;
+
+ if (!plogi->class3.class_valid)
+ return FC_PARSE_FAILURE;
+
+ if (be16_to_cpu(plogi->class3.rxsz) < (FC_MIN_PDUSZ))
+ return FC_PARSE_FAILURE;
+
+ return FC_PARSE_OK;
+ default:
+ return FC_PARSE_FAILURE;
+ }
+}
+
+enum fc_parse_status
+fc_plogi_parse(struct fchs_s *fchs)
+{
+ struct fc_logi_s *plogi = (struct fc_logi_s *) (fchs + 1);
+
+ if (plogi->class3.class_valid != 1)
+ return FC_PARSE_FAILURE;
+
+ if ((be16_to_cpu(plogi->class3.rxsz) < FC_MIN_PDUSZ)
+ || (be16_to_cpu(plogi->class3.rxsz) > FC_MAX_PDUSZ)
+ || (plogi->class3.rxsz == 0))
+ return FC_PARSE_FAILURE;
+
+ return FC_PARSE_OK;
+}
+
+u16
+fc_prli_build(struct fchs_s *fchs, void *pld, u32 d_id, u32 s_id,
+ u16 ox_id)
+{
+ struct fc_prli_s *prli = (struct fc_prli_s *) (pld);
+
+ fc_els_req_build(fchs, d_id, s_id, ox_id);
+ memcpy(prli, &prli_tmpl, sizeof(struct fc_prli_s));
+
+ prli->command = FC_ELS_PRLI;
+ prli->parampage.servparams.initiator = 1;
+ prli->parampage.servparams.retry = 1;
+ prli->parampage.servparams.rec_support = 1;
+ prli->parampage.servparams.task_retry_id = 0;
+ prli->parampage.servparams.confirm = 1;
+
+ return sizeof(struct fc_prli_s);
+}
+
+u16
+fc_prli_acc_build(struct fchs_s *fchs, void *pld, u32 d_id, u32 s_id,
+ __be16 ox_id, enum bfa_lport_role role)
+{
+ struct fc_prli_s *prli = (struct fc_prli_s *) (pld);
+
+ fc_els_rsp_build(fchs, d_id, s_id, ox_id);
+ memcpy(prli, &prli_tmpl, sizeof(struct fc_prli_s));
+
+ prli->command = FC_ELS_ACC;
+
+ prli->parampage.servparams.initiator = 1;
+
+ prli->parampage.rspcode = FC_PRLI_ACC_XQTD;
+
+ return sizeof(struct fc_prli_s);
+}
+
+enum fc_parse_status
+fc_prli_rsp_parse(struct fc_prli_s *prli, int len)
+{
+ if (len < sizeof(struct fc_prli_s))
+ return FC_PARSE_FAILURE;
+
+ if (prli->command != FC_ELS_ACC)
+ return FC_PARSE_FAILURE;
+
+ if ((prli->parampage.rspcode != FC_PRLI_ACC_XQTD)
+ && (prli->parampage.rspcode != FC_PRLI_ACC_PREDEF_IMG))
+ return FC_PARSE_FAILURE;
+
+ if (prli->parampage.servparams.target != 1)
+ return FC_PARSE_FAILURE;
+
+ return FC_PARSE_OK;
+}
+
+enum fc_parse_status
+fc_prli_parse(struct fc_prli_s *prli)
+{
+ if (prli->parampage.type != FC_TYPE_FCP)
+ return FC_PARSE_FAILURE;
+
+ if (!prli->parampage.imagepair)
+ return FC_PARSE_FAILURE;
+
+ if (!prli->parampage.servparams.initiator)
+ return FC_PARSE_FAILURE;
+
+ return FC_PARSE_OK;
+}
+
+u16
+fc_logo_build(struct fchs_s *fchs, struct fc_logo_s *logo, u32 d_id, u32 s_id,
+ u16 ox_id, wwn_t port_name)
+{
+ fc_els_req_build(fchs, d_id, s_id, ox_id);
+
+ memset(logo, '\0', sizeof(struct fc_logo_s));
+ logo->els_cmd.els_code = FC_ELS_LOGO;
+ logo->nport_id = (s_id);
+ logo->orig_port_name = port_name;
+
+ return sizeof(struct fc_logo_s);
+}
+
+static u16
+fc_adisc_x_build(struct fchs_s *fchs, struct fc_adisc_s *adisc, u32 d_id,
+ u32 s_id, __be16 ox_id, wwn_t port_name,
+ wwn_t node_name, u8 els_code)
+{
+ memset(adisc, '\0', sizeof(struct fc_adisc_s));
+
+ adisc->els_cmd.els_code = els_code;
+
+ if (els_code == FC_ELS_ADISC)
+ fc_els_req_build(fchs, d_id, s_id, ox_id);
+ else
+ fc_els_rsp_build(fchs, d_id, s_id, ox_id);
+
+ adisc->orig_HA = 0;
+ adisc->orig_port_name = port_name;
+ adisc->orig_node_name = node_name;
+ adisc->nport_id = (s_id);
+
+ return sizeof(struct fc_adisc_s);
+}
+
+u16
+fc_adisc_build(struct fchs_s *fchs, struct fc_adisc_s *adisc, u32 d_id,
+ u32 s_id, __be16 ox_id, wwn_t port_name, wwn_t node_name)
+{
+ return fc_adisc_x_build(fchs, adisc, d_id, s_id, ox_id, port_name,
+ node_name, FC_ELS_ADISC);
+}
+
+u16
+fc_adisc_acc_build(struct fchs_s *fchs, struct fc_adisc_s *adisc, u32 d_id,
+ u32 s_id, __be16 ox_id, wwn_t port_name,
+ wwn_t node_name)
+{
+ return fc_adisc_x_build(fchs, adisc, d_id, s_id, ox_id, port_name,
+ node_name, FC_ELS_ACC);
+}
+
+enum fc_parse_status
+fc_adisc_rsp_parse(struct fc_adisc_s *adisc, int len, wwn_t port_name,
+ wwn_t node_name)
+{
+
+ if (len < sizeof(struct fc_adisc_s))
+ return FC_PARSE_FAILURE;
+
+ if (adisc->els_cmd.els_code != FC_ELS_ACC)
+ return FC_PARSE_FAILURE;
+
+ if (!wwn_is_equal(adisc->orig_port_name, port_name))
+ return FC_PARSE_FAILURE;
+
+ return FC_PARSE_OK;
+}
+
+enum fc_parse_status
+fc_adisc_parse(struct fchs_s *fchs, void *pld, u32 host_dap, wwn_t node_name,
+ wwn_t port_name)
+{
+ struct fc_adisc_s *adisc = (struct fc_adisc_s *) pld;
+
+ if (adisc->els_cmd.els_code != FC_ELS_ACC)
+ return FC_PARSE_FAILURE;
+
+ if ((adisc->nport_id == (host_dap))
+ && wwn_is_equal(adisc->orig_port_name, port_name)
+ && wwn_is_equal(adisc->orig_node_name, node_name))
+ return FC_PARSE_OK;
+
+ return FC_PARSE_FAILURE;
+}
+
+enum fc_parse_status
+fc_pdisc_parse(struct fchs_s *fchs, wwn_t node_name, wwn_t port_name)
+{
+ struct fc_logi_s *pdisc = (struct fc_logi_s *) (fchs + 1);
+
+ if (pdisc->class3.class_valid != 1)
+ return FC_PARSE_FAILURE;
+
+ if ((be16_to_cpu(pdisc->class3.rxsz) <
+ (FC_MIN_PDUSZ - sizeof(struct fchs_s)))
+ || (pdisc->class3.rxsz == 0))
+ return FC_PARSE_FAILURE;
+
+ if (!wwn_is_equal(pdisc->port_name, port_name))
+ return FC_PARSE_FAILURE;
+
+ if (!wwn_is_equal(pdisc->node_name, node_name))
+ return FC_PARSE_FAILURE;
+
+ return FC_PARSE_OK;
+}
+
+u16
+fc_abts_build(struct fchs_s *fchs, u32 d_id, u32 s_id, u16 ox_id)
+{
+ memcpy(fchs, &fc_bls_req_tmpl, sizeof(struct fchs_s));
+ fchs->cat_info = FC_CAT_ABTS;
+ fchs->d_id = (d_id);
+ fchs->s_id = (s_id);
+ fchs->ox_id = cpu_to_be16(ox_id);
+
+ return sizeof(struct fchs_s);
+}
+
+enum fc_parse_status
+fc_abts_rsp_parse(struct fchs_s *fchs, int len)
+{
+ if ((fchs->cat_info == FC_CAT_BA_ACC)
+ || (fchs->cat_info == FC_CAT_BA_RJT))
+ return FC_PARSE_OK;
+
+ return FC_PARSE_FAILURE;
+}
+
+u16
+fc_rrq_build(struct fchs_s *fchs, struct fc_rrq_s *rrq, u32 d_id, u32 s_id,
+ u16 ox_id, u16 rrq_oxid)
+{
+ fc_els_req_build(fchs, d_id, s_id, ox_id);
+
+ /*
+ * build rrq payload
+ */
+ memcpy(rrq, &rrq_tmpl, sizeof(struct fc_rrq_s));
+ rrq->s_id = (s_id);
+ rrq->ox_id = cpu_to_be16(rrq_oxid);
+ rrq->rx_id = FC_RXID_ANY;
+
+ return sizeof(struct fc_rrq_s);
+}
+
+u16
+fc_logo_acc_build(struct fchs_s *fchs, void *pld, u32 d_id, u32 s_id,
+ __be16 ox_id)
+{
+ struct fc_els_cmd_s *acc = pld;
+
+ fc_els_rsp_build(fchs, d_id, s_id, ox_id);
+
+ memset(acc, 0, sizeof(struct fc_els_cmd_s));
+ acc->els_code = FC_ELS_ACC;
+
+ return sizeof(struct fc_els_cmd_s);
+}
+
+u16
+fc_ls_rjt_build(struct fchs_s *fchs, struct fc_ls_rjt_s *ls_rjt, u32 d_id,
+ u32 s_id, __be16 ox_id, u8 reason_code,
+ u8 reason_code_expl)
+{
+ fc_els_rsp_build(fchs, d_id, s_id, ox_id);
+ memset(ls_rjt, 0, sizeof(struct fc_ls_rjt_s));
+
+ ls_rjt->els_cmd.els_code = FC_ELS_LS_RJT;
+ ls_rjt->reason_code = reason_code;
+ ls_rjt->reason_code_expl = reason_code_expl;
+ ls_rjt->vendor_unique = 0x00;
+
+ return sizeof(struct fc_ls_rjt_s);
+}
+
+u16
+fc_ba_acc_build(struct fchs_s *fchs, struct fc_ba_acc_s *ba_acc, u32 d_id,
+ u32 s_id, __be16 ox_id, u16 rx_id)
+{
+ fc_bls_rsp_build(fchs, d_id, s_id, ox_id);
+
+ memcpy(ba_acc, &ba_acc_tmpl, sizeof(struct fc_ba_acc_s));
+
+ fchs->rx_id = rx_id;
+
+ ba_acc->ox_id = fchs->ox_id;
+ ba_acc->rx_id = fchs->rx_id;
+
+ return sizeof(struct fc_ba_acc_s);
+}
+
+u16
+fc_ls_acc_build(struct fchs_s *fchs, struct fc_els_cmd_s *els_cmd, u32 d_id,
+ u32 s_id, __be16 ox_id)
+{
+ fc_els_rsp_build(fchs, d_id, s_id, ox_id);
+ memset(els_cmd, 0, sizeof(struct fc_els_cmd_s));
+ els_cmd->els_code = FC_ELS_ACC;
+
+ return sizeof(struct fc_els_cmd_s);
+}
+
+int
+fc_logout_params_pages(struct fchs_s *fc_frame, u8 els_code)
+{
+ int num_pages = 0;
+ struct fc_prlo_s *prlo;
+ struct fc_tprlo_s *tprlo;
+
+ if (els_code == FC_ELS_PRLO) {
+ prlo = (struct fc_prlo_s *) (fc_frame + 1);
+ num_pages = (be16_to_cpu(prlo->payload_len) - 4) / 16;
+ } else {
+ tprlo = (struct fc_tprlo_s *) (fc_frame + 1);
+ num_pages = (be16_to_cpu(tprlo->payload_len) - 4) / 16;
+ }
+ return num_pages;
+}
+
+u16
+fc_tprlo_acc_build(struct fchs_s *fchs, struct fc_tprlo_acc_s *tprlo_acc,
+ u32 d_id, u32 s_id, __be16 ox_id, int num_pages)
+{
+ int page;
+
+ fc_els_rsp_build(fchs, d_id, s_id, ox_id);
+
+ memset(tprlo_acc, 0, (num_pages * 16) + 4);
+ tprlo_acc->command = FC_ELS_ACC;
+
+ tprlo_acc->page_len = 0x10;
+ tprlo_acc->payload_len = cpu_to_be16((num_pages * 16) + 4);
+
+ for (page = 0; page < num_pages; page++) {
+ tprlo_acc->tprlo_acc_params[page].opa_valid = 0;
+ tprlo_acc->tprlo_acc_params[page].rpa_valid = 0;
+ tprlo_acc->tprlo_acc_params[page].fc4type_csp = FC_TYPE_FCP;
+ tprlo_acc->tprlo_acc_params[page].orig_process_assc = 0;
+ tprlo_acc->tprlo_acc_params[page].resp_process_assc = 0;
+ }
+ return be16_to_cpu(tprlo_acc->payload_len);
+}
+
+u16
+fc_prlo_acc_build(struct fchs_s *fchs, struct fc_prlo_acc_s *prlo_acc, u32 d_id,
+ u32 s_id, __be16 ox_id, int num_pages)
+{
+ int page;
+
+ fc_els_rsp_build(fchs, d_id, s_id, ox_id);
+
+ memset(prlo_acc, 0, (num_pages * 16) + 4);
+ prlo_acc->command = FC_ELS_ACC;
+ prlo_acc->page_len = 0x10;
+ prlo_acc->payload_len = cpu_to_be16((num_pages * 16) + 4);
+
+ for (page = 0; page < num_pages; page++) {
+ prlo_acc->prlo_acc_params[page].opa_valid = 0;
+ prlo_acc->prlo_acc_params[page].rpa_valid = 0;
+ prlo_acc->prlo_acc_params[page].fc4type_csp = FC_TYPE_FCP;
+ prlo_acc->prlo_acc_params[page].orig_process_assc = 0;
+ prlo_acc->prlo_acc_params[page].resp_process_assc = 0;
+ }
+
+ return be16_to_cpu(prlo_acc->payload_len);
+}
+
+u16
+fc_rnid_build(struct fchs_s *fchs, struct fc_rnid_cmd_s *rnid, u32 d_id,
+ u32 s_id, u16 ox_id, u32 data_format)
+{
+ fc_els_req_build(fchs, d_id, s_id, ox_id);
+
+ memset(rnid, 0, sizeof(struct fc_rnid_cmd_s));
+
+ rnid->els_cmd.els_code = FC_ELS_RNID;
+ rnid->node_id_data_format = data_format;
+
+ return sizeof(struct fc_rnid_cmd_s);
+}
+
+u16
+fc_rnid_acc_build(struct fchs_s *fchs, struct fc_rnid_acc_s *rnid_acc, u32 d_id,
+ u32 s_id, __be16 ox_id, u32 data_format,
+ struct fc_rnid_common_id_data_s *common_id_data,
+ struct fc_rnid_general_topology_data_s *gen_topo_data)
+{
+ memset(rnid_acc, 0, sizeof(struct fc_rnid_acc_s));
+
+ fc_els_rsp_build(fchs, d_id, s_id, ox_id);
+
+ rnid_acc->els_cmd.els_code = FC_ELS_ACC;
+ rnid_acc->node_id_data_format = data_format;
+ rnid_acc->common_id_data_length =
+ sizeof(struct fc_rnid_common_id_data_s);
+ rnid_acc->common_id_data = *common_id_data;
+
+ if (data_format == RNID_NODEID_DATA_FORMAT_DISCOVERY) {
+ rnid_acc->specific_id_data_length =
+ sizeof(struct fc_rnid_general_topology_data_s);
+ rnid_acc->gen_topology_data = *gen_topo_data;
+ return sizeof(struct fc_rnid_acc_s);
+ } else {
+ return sizeof(struct fc_rnid_acc_s) -
+ sizeof(struct fc_rnid_general_topology_data_s);
+ }
+
+}
+
+u16
+fc_rpsc_build(struct fchs_s *fchs, struct fc_rpsc_cmd_s *rpsc, u32 d_id,
+ u32 s_id, u16 ox_id)
+{
+ fc_els_req_build(fchs, d_id, s_id, ox_id);
+
+ memset(rpsc, 0, sizeof(struct fc_rpsc_cmd_s));
+
+ rpsc->els_cmd.els_code = FC_ELS_RPSC;
+ return sizeof(struct fc_rpsc_cmd_s);
+}
+
+u16
+fc_rpsc2_build(struct fchs_s *fchs, struct fc_rpsc2_cmd_s *rpsc2, u32 d_id,
+ u32 s_id, u32 *pid_list, u16 npids)
+{
+ u32 dctlr_id = FC_DOMAIN_CTRLR(bfa_hton3b(d_id));
+ int i = 0;
+
+ fc_els_req_build(fchs, bfa_hton3b(dctlr_id), s_id, 0);
+
+ memset(rpsc2, 0, sizeof(struct fc_rpsc2_cmd_s));
+
+ rpsc2->els_cmd.els_code = FC_ELS_RPSC;
+ rpsc2->token = cpu_to_be32(FC_BRCD_TOKEN);
+ rpsc2->num_pids = cpu_to_be16(npids);
+ for (i = 0; i < npids; i++)
+ rpsc2->pid_list[i].pid = pid_list[i];
+
+ return sizeof(struct fc_rpsc2_cmd_s) + ((npids - 1) * (sizeof(u32)));
+}
+
+u16
+fc_rpsc_acc_build(struct fchs_s *fchs, struct fc_rpsc_acc_s *rpsc_acc,
+ u32 d_id, u32 s_id, __be16 ox_id,
+ struct fc_rpsc_speed_info_s *oper_speed)
+{
+ memset(rpsc_acc, 0, sizeof(struct fc_rpsc_acc_s));
+
+ fc_els_rsp_build(fchs, d_id, s_id, ox_id);
+
+ rpsc_acc->command = FC_ELS_ACC;
+ rpsc_acc->num_entries = cpu_to_be16(1);
+
+ rpsc_acc->speed_info[0].port_speed_cap =
+ cpu_to_be16(oper_speed->port_speed_cap);
+
+ rpsc_acc->speed_info[0].port_op_speed =
+ cpu_to_be16(oper_speed->port_op_speed);
+
+ return sizeof(struct fc_rpsc_acc_s);
+}
+
+u16
+fc_logo_rsp_parse(struct fchs_s *fchs, int len)
+{
+ struct fc_els_cmd_s *els_cmd = (struct fc_els_cmd_s *) (fchs + 1);
+
+ len = len;
+ if (els_cmd->els_code != FC_ELS_ACC)
+ return FC_PARSE_FAILURE;
+
+ return FC_PARSE_OK;
+}
+
+u16
+fc_pdisc_build(struct fchs_s *fchs, u32 d_id, u32 s_id, u16 ox_id,
+ wwn_t port_name, wwn_t node_name, u16 pdu_size)
+{
+ struct fc_logi_s *pdisc = (struct fc_logi_s *) (fchs + 1);
+
+ memcpy(pdisc, &plogi_tmpl, sizeof(struct fc_logi_s));
+
+ pdisc->els_cmd.els_code = FC_ELS_PDISC;
+ fc_els_req_build(fchs, d_id, s_id, ox_id);
+
+ pdisc->csp.rxsz = pdisc->class3.rxsz = cpu_to_be16(pdu_size);
+ pdisc->port_name = port_name;
+ pdisc->node_name = node_name;
+
+ return sizeof(struct fc_logi_s);
+}
+
+u16
+fc_pdisc_rsp_parse(struct fchs_s *fchs, int len, wwn_t port_name)
+{
+ struct fc_logi_s *pdisc = (struct fc_logi_s *) (fchs + 1);
+
+ if (len < sizeof(struct fc_logi_s))
+ return FC_PARSE_LEN_INVAL;
+
+ if (pdisc->els_cmd.els_code != FC_ELS_ACC)
+ return FC_PARSE_ACC_INVAL;
+
+ if (!wwn_is_equal(pdisc->port_name, port_name))
+ return FC_PARSE_PWWN_NOT_EQUAL;
+
+ if (!pdisc->class3.class_valid)
+ return FC_PARSE_NWWN_NOT_EQUAL;
+
+ if (be16_to_cpu(pdisc->class3.rxsz) < (FC_MIN_PDUSZ))
+ return FC_PARSE_RXSZ_INVAL;
+
+ return FC_PARSE_OK;
+}
+
+u16
+fc_prlo_build(struct fchs_s *fchs, u32 d_id, u32 s_id, u16 ox_id,
+ int num_pages)
+{
+ struct fc_prlo_s *prlo = (struct fc_prlo_s *) (fchs + 1);
+ int page;
+
+ fc_els_req_build(fchs, d_id, s_id, ox_id);
+ memset(prlo, 0, (num_pages * 16) + 4);
+ prlo->command = FC_ELS_PRLO;
+ prlo->page_len = 0x10;
+ prlo->payload_len = cpu_to_be16((num_pages * 16) + 4);
+
+ for (page = 0; page < num_pages; page++) {
+ prlo->prlo_params[page].type = FC_TYPE_FCP;
+ prlo->prlo_params[page].opa_valid = 0;
+ prlo->prlo_params[page].rpa_valid = 0;
+ prlo->prlo_params[page].orig_process_assc = 0;
+ prlo->prlo_params[page].resp_process_assc = 0;
+ }
+
+ return be16_to_cpu(prlo->payload_len);
+}
+
+u16
+fc_prlo_rsp_parse(struct fchs_s *fchs, int len)
+{
+ struct fc_prlo_acc_s *prlo = (struct fc_prlo_acc_s *) (fchs + 1);
+ int num_pages = 0;
+ int page = 0;
+
+ len = len;
+
+ if (prlo->command != FC_ELS_ACC)
+ return FC_PARSE_FAILURE;
+
+ num_pages = ((be16_to_cpu(prlo->payload_len)) - 4) / 16;
+
+ for (page = 0; page < num_pages; page++) {
+ if (prlo->prlo_acc_params[page].type != FC_TYPE_FCP)
+ return FC_PARSE_FAILURE;
+
+ if (prlo->prlo_acc_params[page].opa_valid != 0)
+ return FC_PARSE_FAILURE;
+
+ if (prlo->prlo_acc_params[page].rpa_valid != 0)
+ return FC_PARSE_FAILURE;
+
+ if (prlo->prlo_acc_params[page].orig_process_assc != 0)
+ return FC_PARSE_FAILURE;
+
+ if (prlo->prlo_acc_params[page].resp_process_assc != 0)
+ return FC_PARSE_FAILURE;
+ }
+ return FC_PARSE_OK;
+
+}
+
+u16
+fc_tprlo_build(struct fchs_s *fchs, u32 d_id, u32 s_id, u16 ox_id,
+ int num_pages, enum fc_tprlo_type tprlo_type, u32 tpr_id)
+{
+ struct fc_tprlo_s *tprlo = (struct fc_tprlo_s *) (fchs + 1);
+ int page;
+
+ fc_els_req_build(fchs, d_id, s_id, ox_id);
+ memset(tprlo, 0, (num_pages * 16) + 4);
+ tprlo->command = FC_ELS_TPRLO;
+ tprlo->page_len = 0x10;
+ tprlo->payload_len = cpu_to_be16((num_pages * 16) + 4);
+
+ for (page = 0; page < num_pages; page++) {
+ tprlo->tprlo_params[page].type = FC_TYPE_FCP;
+ tprlo->tprlo_params[page].opa_valid = 0;
+ tprlo->tprlo_params[page].rpa_valid = 0;
+ tprlo->tprlo_params[page].orig_process_assc = 0;
+ tprlo->tprlo_params[page].resp_process_assc = 0;
+ if (tprlo_type == FC_GLOBAL_LOGO) {
+ tprlo->tprlo_params[page].global_process_logout = 1;
+ } else if (tprlo_type == FC_TPR_LOGO) {
+ tprlo->tprlo_params[page].tpo_nport_valid = 1;
+ tprlo->tprlo_params[page].tpo_nport_id = (tpr_id);
+ }
+ }
+
+ return be16_to_cpu(tprlo->payload_len);
+}
+
+u16
+fc_tprlo_rsp_parse(struct fchs_s *fchs, int len)
+{
+ struct fc_tprlo_acc_s *tprlo = (struct fc_tprlo_acc_s *) (fchs + 1);
+ int num_pages = 0;
+ int page = 0;
+
+ len = len;
+
+ if (tprlo->command != FC_ELS_ACC)
+ return FC_PARSE_ACC_INVAL;
+
+ num_pages = (be16_to_cpu(tprlo->payload_len) - 4) / 16;
+
+ for (page = 0; page < num_pages; page++) {
+ if (tprlo->tprlo_acc_params[page].type != FC_TYPE_FCP)
+ return FC_PARSE_NOT_FCP;
+ if (tprlo->tprlo_acc_params[page].opa_valid != 0)
+ return FC_PARSE_OPAFLAG_INVAL;
+ if (tprlo->tprlo_acc_params[page].rpa_valid != 0)
+ return FC_PARSE_RPAFLAG_INVAL;
+ if (tprlo->tprlo_acc_params[page].orig_process_assc != 0)
+ return FC_PARSE_OPA_INVAL;
+ if (tprlo->tprlo_acc_params[page].resp_process_assc != 0)
+ return FC_PARSE_RPA_INVAL;
+ }
+ return FC_PARSE_OK;
+}
+
+enum fc_parse_status
+fc_rrq_rsp_parse(struct fchs_s *fchs, int len)
+{
+ struct fc_els_cmd_s *els_cmd = (struct fc_els_cmd_s *) (fchs + 1);
+
+ len = len;
+ if (els_cmd->els_code != FC_ELS_ACC)
+ return FC_PARSE_FAILURE;
+
+ return FC_PARSE_OK;
+}
+
+u16
+fc_ba_rjt_build(struct fchs_s *fchs, u32 d_id, u32 s_id, __be16 ox_id,
+ u32 reason_code, u32 reason_expl)
+{
+ struct fc_ba_rjt_s *ba_rjt = (struct fc_ba_rjt_s *) (fchs + 1);
+
+ fc_bls_rsp_build(fchs, d_id, s_id, ox_id);
+
+ fchs->cat_info = FC_CAT_BA_RJT;
+ ba_rjt->reason_code = reason_code;
+ ba_rjt->reason_expl = reason_expl;
+ return sizeof(struct fc_ba_rjt_s);
+}
+
+static void
+fc_gs_cthdr_build(struct ct_hdr_s *cthdr, u32 s_id, u16 cmd_code)
+{
+ memset(cthdr, 0, sizeof(struct ct_hdr_s));
+ cthdr->rev_id = CT_GS3_REVISION;
+ cthdr->gs_type = CT_GSTYPE_DIRSERVICE;
+ cthdr->gs_sub_type = CT_GSSUBTYPE_NAMESERVER;
+ cthdr->cmd_rsp_code = cpu_to_be16(cmd_code);
+}
+
+static void
+fc_gs_fdmi_cthdr_build(struct ct_hdr_s *cthdr, u32 s_id, u16 cmd_code)
+{
+ memset(cthdr, 0, sizeof(struct ct_hdr_s));
+ cthdr->rev_id = CT_GS3_REVISION;
+ cthdr->gs_type = CT_GSTYPE_MGMTSERVICE;
+ cthdr->gs_sub_type = CT_GSSUBTYPE_HBA_MGMTSERVER;
+ cthdr->cmd_rsp_code = cpu_to_be16(cmd_code);
+}
+
+static void
+fc_gs_ms_cthdr_build(struct ct_hdr_s *cthdr, u32 s_id, u16 cmd_code,
+ u8 sub_type)
+{
+ memset(cthdr, 0, sizeof(struct ct_hdr_s));
+ cthdr->rev_id = CT_GS3_REVISION;
+ cthdr->gs_type = CT_GSTYPE_MGMTSERVICE;
+ cthdr->gs_sub_type = sub_type;
+ cthdr->cmd_rsp_code = cpu_to_be16(cmd_code);
+}
+
+u16
+fc_gidpn_build(struct fchs_s *fchs, void *pyld, u32 s_id, u16 ox_id,
+ wwn_t port_name)
+{
+ struct ct_hdr_s *cthdr = (struct ct_hdr_s *) pyld;
+ struct fcgs_gidpn_req_s *gidpn = (struct fcgs_gidpn_req_s *)(cthdr + 1);
+ u32 d_id = bfa_hton3b(FC_NAME_SERVER);
+
+ fc_gs_fchdr_build(fchs, d_id, s_id, ox_id);
+ fc_gs_cthdr_build(cthdr, s_id, GS_GID_PN);
+
+ memset(gidpn, 0, sizeof(struct fcgs_gidpn_req_s));
+ gidpn->port_name = port_name;
+ return sizeof(struct fcgs_gidpn_req_s) + sizeof(struct ct_hdr_s);
+}
+
+u16
+fc_gpnid_build(struct fchs_s *fchs, void *pyld, u32 s_id, u16 ox_id,
+ u32 port_id)
+{
+ struct ct_hdr_s *cthdr = (struct ct_hdr_s *) pyld;
+ fcgs_gpnid_req_t *gpnid = (fcgs_gpnid_req_t *) (cthdr + 1);
+ u32 d_id = bfa_hton3b(FC_NAME_SERVER);
+
+ fc_gs_fchdr_build(fchs, d_id, s_id, ox_id);
+ fc_gs_cthdr_build(cthdr, s_id, GS_GPN_ID);
+
+ memset(gpnid, 0, sizeof(fcgs_gpnid_req_t));
+ gpnid->dap = port_id;
+ return sizeof(fcgs_gpnid_req_t) + sizeof(struct ct_hdr_s);
+}
+
+u16
+fc_gnnid_build(struct fchs_s *fchs, void *pyld, u32 s_id, u16 ox_id,
+ u32 port_id)
+{
+ struct ct_hdr_s *cthdr = (struct ct_hdr_s *) pyld;
+ fcgs_gnnid_req_t *gnnid = (fcgs_gnnid_req_t *) (cthdr + 1);
+ u32 d_id = bfa_hton3b(FC_NAME_SERVER);
+
+ fc_gs_fchdr_build(fchs, d_id, s_id, ox_id);
+ fc_gs_cthdr_build(cthdr, s_id, GS_GNN_ID);
+
+ memset(gnnid, 0, sizeof(fcgs_gnnid_req_t));
+ gnnid->dap = port_id;
+ return sizeof(fcgs_gnnid_req_t) + sizeof(struct ct_hdr_s);
+}
+
+u16
+fc_ct_rsp_parse(struct ct_hdr_s *cthdr)
+{
+ if (be16_to_cpu(cthdr->cmd_rsp_code) != CT_RSP_ACCEPT) {
+ if (cthdr->reason_code == CT_RSN_LOGICAL_BUSY)
+ return FC_PARSE_BUSY;
+ else
+ return FC_PARSE_FAILURE;
+ }
+
+ return FC_PARSE_OK;
+}
+
+u16
+fc_gs_rjt_build(struct fchs_s *fchs, struct ct_hdr_s *cthdr,
+ u32 d_id, u32 s_id, u16 ox_id, u8 reason_code,
+ u8 reason_code_expl)
+{
+ fc_gsresp_fchdr_build(fchs, d_id, s_id, ox_id);
+
+ cthdr->cmd_rsp_code = cpu_to_be16(CT_RSP_REJECT);
+ cthdr->rev_id = CT_GS3_REVISION;
+
+ cthdr->reason_code = reason_code;
+ cthdr->exp_code = reason_code_expl;
+ return sizeof(struct ct_hdr_s);
+}
+
+u16
+fc_scr_build(struct fchs_s *fchs, struct fc_scr_s *scr,
+ u8 set_br_reg, u32 s_id, u16 ox_id)
+{
+ u32 d_id = bfa_hton3b(FC_FABRIC_CONTROLLER);
+
+ fc_els_req_build(fchs, d_id, s_id, ox_id);
+
+ memset(scr, 0, sizeof(struct fc_scr_s));
+ scr->command = FC_ELS_SCR;
+ scr->reg_func = FC_SCR_REG_FUNC_FULL;
+ if (set_br_reg)
+ scr->vu_reg_func = FC_VU_SCR_REG_FUNC_FABRIC_NAME_CHANGE;
+
+ return sizeof(struct fc_scr_s);
+}
+
+u16
+fc_rscn_build(struct fchs_s *fchs, struct fc_rscn_pl_s *rscn,
+ u32 s_id, u16 ox_id)
+{
+ u32 d_id = bfa_hton3b(FC_FABRIC_CONTROLLER);
+ u16 payldlen;
+
+ fc_els_req_build(fchs, d_id, s_id, ox_id);
+ rscn->command = FC_ELS_RSCN;
+ rscn->pagelen = sizeof(rscn->event[0]);
+
+ payldlen = sizeof(u32) + rscn->pagelen;
+ rscn->payldlen = cpu_to_be16(payldlen);
+
+ rscn->event[0].format = FC_RSCN_FORMAT_PORTID;
+ rscn->event[0].portid = s_id;
+
+ return sizeof(struct fc_rscn_pl_s);
+}
+
+u16
+fc_rftid_build(struct fchs_s *fchs, void *pyld, u32 s_id, u16 ox_id,
+ enum bfa_lport_role roles)
+{
+ struct ct_hdr_s *cthdr = (struct ct_hdr_s *) pyld;
+ struct fcgs_rftid_req_s *rftid = (struct fcgs_rftid_req_s *)(cthdr + 1);
+ u32 type_value, d_id = bfa_hton3b(FC_NAME_SERVER);
+ u8 index;
+
+ fc_gs_fchdr_build(fchs, d_id, s_id, ox_id);
+ fc_gs_cthdr_build(cthdr, s_id, GS_RFT_ID);
+
+ memset(rftid, 0, sizeof(struct fcgs_rftid_req_s));
+
+ rftid->dap = s_id;
+
+ /* By default, FCP FC4 Type is registered */
+ index = FC_TYPE_FCP >> 5;
+ type_value = 1 << (FC_TYPE_FCP % 32);
+ rftid->fc4_type[index] = cpu_to_be32(type_value);
+
+ return sizeof(struct fcgs_rftid_req_s) + sizeof(struct ct_hdr_s);
+}
+
+u16
+fc_rftid_build_sol(struct fchs_s *fchs, void *pyld, u32 s_id, u16 ox_id,
+ u8 *fc4_bitmap, u32 bitmap_size)
+{
+ struct ct_hdr_s *cthdr = (struct ct_hdr_s *) pyld;
+ struct fcgs_rftid_req_s *rftid = (struct fcgs_rftid_req_s *)(cthdr + 1);
+ u32 d_id = bfa_hton3b(FC_NAME_SERVER);
+
+ fc_gs_fchdr_build(fchs, d_id, s_id, ox_id);
+ fc_gs_cthdr_build(cthdr, s_id, GS_RFT_ID);
+
+ memset(rftid, 0, sizeof(struct fcgs_rftid_req_s));
+
+ rftid->dap = s_id;
+ memcpy((void *)rftid->fc4_type, (void *)fc4_bitmap,
+ (bitmap_size < 32 ? bitmap_size : 32));
+
+ return sizeof(struct fcgs_rftid_req_s) + sizeof(struct ct_hdr_s);
+}
+
+u16
+fc_rffid_build(struct fchs_s *fchs, void *pyld, u32 s_id, u16 ox_id,
+ u8 fc4_type, u8 fc4_ftrs)
+{
+ struct ct_hdr_s *cthdr = (struct ct_hdr_s *) pyld;
+ struct fcgs_rffid_req_s *rffid = (struct fcgs_rffid_req_s *)(cthdr + 1);
+ u32 d_id = bfa_hton3b(FC_NAME_SERVER);
+
+ fc_gs_fchdr_build(fchs, d_id, s_id, ox_id);
+ fc_gs_cthdr_build(cthdr, s_id, GS_RFF_ID);
+
+ memset(rffid, 0, sizeof(struct fcgs_rffid_req_s));
+
+ rffid->dap = s_id;
+ rffid->fc4ftr_bits = fc4_ftrs;
+ rffid->fc4_type = fc4_type;
+
+ return sizeof(struct fcgs_rffid_req_s) + sizeof(struct ct_hdr_s);
+}
+
+u16
+fc_rspnid_build(struct fchs_s *fchs, void *pyld, u32 s_id, u16 ox_id,
+ u8 *name)
+{
+
+ struct ct_hdr_s *cthdr = (struct ct_hdr_s *) pyld;
+ struct fcgs_rspnid_req_s *rspnid =
+ (struct fcgs_rspnid_req_s *)(cthdr + 1);
+ u32 d_id = bfa_hton3b(FC_NAME_SERVER);
+
+ fc_gs_fchdr_build(fchs, d_id, s_id, ox_id);
+ fc_gs_cthdr_build(cthdr, s_id, GS_RSPN_ID);
+
+ memset(rspnid, 0, sizeof(struct fcgs_rspnid_req_s));
+
+ rspnid->dap = s_id;
+ rspnid->spn_len = (u8) strlen((char *)name);
+ strncpy((char *)rspnid->spn, (char *)name, rspnid->spn_len);
+
+ return sizeof(struct fcgs_rspnid_req_s) + sizeof(struct ct_hdr_s);
+}
+
+u16
+fc_rsnn_nn_build(struct fchs_s *fchs, void *pyld, u32 s_id,
+ wwn_t node_name, u8 *name)
+{
+ struct ct_hdr_s *cthdr = (struct ct_hdr_s *) pyld;
+ struct fcgs_rsnn_nn_req_s *rsnn_nn =
+ (struct fcgs_rsnn_nn_req_s *) (cthdr + 1);
+ u32 d_id = bfa_hton3b(FC_NAME_SERVER);
+
+ fc_gs_fchdr_build(fchs, d_id, s_id, 0);
+ fc_gs_cthdr_build(cthdr, s_id, GS_RSNN_NN);
+
+ memset(rsnn_nn, 0, sizeof(struct fcgs_rsnn_nn_req_s));
+
+ rsnn_nn->node_name = node_name;
+ rsnn_nn->snn_len = (u8) strlen((char *)name);
+ strncpy((char *)rsnn_nn->snn, (char *)name, rsnn_nn->snn_len);
+
+ return sizeof(struct fcgs_rsnn_nn_req_s) + sizeof(struct ct_hdr_s);
+}
+
+u16
+fc_gid_ft_build(struct fchs_s *fchs, void *pyld, u32 s_id, u8 fc4_type)
+{
+
+ struct ct_hdr_s *cthdr = (struct ct_hdr_s *) pyld;
+ struct fcgs_gidft_req_s *gidft = (struct fcgs_gidft_req_s *)(cthdr + 1);
+ u32 d_id = bfa_hton3b(FC_NAME_SERVER);
+
+ fc_gs_fchdr_build(fchs, d_id, s_id, 0);
+
+ fc_gs_cthdr_build(cthdr, s_id, GS_GID_FT);
+
+ memset(gidft, 0, sizeof(struct fcgs_gidft_req_s));
+ gidft->fc4_type = fc4_type;
+ gidft->domain_id = 0;
+ gidft->area_id = 0;
+
+ return sizeof(struct fcgs_gidft_req_s) + sizeof(struct ct_hdr_s);
+}
+
+u16
+fc_rpnid_build(struct fchs_s *fchs, void *pyld, u32 s_id, u32 port_id,
+ wwn_t port_name)
+{
+ struct ct_hdr_s *cthdr = (struct ct_hdr_s *) pyld;
+ struct fcgs_rpnid_req_s *rpnid = (struct fcgs_rpnid_req_s *)(cthdr + 1);
+ u32 d_id = bfa_hton3b(FC_NAME_SERVER);
+
+ fc_gs_fchdr_build(fchs, d_id, s_id, 0);
+ fc_gs_cthdr_build(cthdr, s_id, GS_RPN_ID);
+
+ memset(rpnid, 0, sizeof(struct fcgs_rpnid_req_s));
+ rpnid->port_id = port_id;
+ rpnid->port_name = port_name;
+
+ return sizeof(struct fcgs_rpnid_req_s) + sizeof(struct ct_hdr_s);
+}
+
+u16
+fc_rnnid_build(struct fchs_s *fchs, void *pyld, u32 s_id, u32 port_id,
+ wwn_t node_name)
+{
+ struct ct_hdr_s *cthdr = (struct ct_hdr_s *) pyld;
+ struct fcgs_rnnid_req_s *rnnid = (struct fcgs_rnnid_req_s *)(cthdr + 1);
+ u32 d_id = bfa_hton3b(FC_NAME_SERVER);
+
+ fc_gs_fchdr_build(fchs, d_id, s_id, 0);
+ fc_gs_cthdr_build(cthdr, s_id, GS_RNN_ID);
+
+ memset(rnnid, 0, sizeof(struct fcgs_rnnid_req_s));
+ rnnid->port_id = port_id;
+ rnnid->node_name = node_name;
+
+ return sizeof(struct fcgs_rnnid_req_s) + sizeof(struct ct_hdr_s);
+}
+
+u16
+fc_rcsid_build(struct fchs_s *fchs, void *pyld, u32 s_id, u32 port_id,
+ u32 cos)
+{
+ struct ct_hdr_s *cthdr = (struct ct_hdr_s *) pyld;
+ struct fcgs_rcsid_req_s *rcsid =
+ (struct fcgs_rcsid_req_s *) (cthdr + 1);
+ u32 d_id = bfa_hton3b(FC_NAME_SERVER);
+
+ fc_gs_fchdr_build(fchs, d_id, s_id, 0);
+ fc_gs_cthdr_build(cthdr, s_id, GS_RCS_ID);
+
+ memset(rcsid, 0, sizeof(struct fcgs_rcsid_req_s));
+ rcsid->port_id = port_id;
+ rcsid->cos = cos;
+
+ return sizeof(struct fcgs_rcsid_req_s) + sizeof(struct ct_hdr_s);
+}
+
+u16
+fc_rptid_build(struct fchs_s *fchs, void *pyld, u32 s_id, u32 port_id,
+ u8 port_type)
+{
+ struct ct_hdr_s *cthdr = (struct ct_hdr_s *) pyld;
+ struct fcgs_rptid_req_s *rptid = (struct fcgs_rptid_req_s *)(cthdr + 1);
+ u32 d_id = bfa_hton3b(FC_NAME_SERVER);
+
+ fc_gs_fchdr_build(fchs, d_id, s_id, 0);
+ fc_gs_cthdr_build(cthdr, s_id, GS_RPT_ID);
+
+ memset(rptid, 0, sizeof(struct fcgs_rptid_req_s));
+ rptid->port_id = port_id;
+ rptid->port_type = port_type;
+
+ return sizeof(struct fcgs_rptid_req_s) + sizeof(struct ct_hdr_s);
+}
+
+u16
+fc_ganxt_build(struct fchs_s *fchs, void *pyld, u32 s_id, u32 port_id)
+{
+ struct ct_hdr_s *cthdr = (struct ct_hdr_s *) pyld;
+ struct fcgs_ganxt_req_s *ganxt = (struct fcgs_ganxt_req_s *)(cthdr + 1);
+ u32 d_id = bfa_hton3b(FC_NAME_SERVER);
+
+ fc_gs_fchdr_build(fchs, d_id, s_id, 0);
+ fc_gs_cthdr_build(cthdr, s_id, GS_GA_NXT);
+
+ memset(ganxt, 0, sizeof(struct fcgs_ganxt_req_s));
+ ganxt->port_id = port_id;
+
+ return sizeof(struct ct_hdr_s) + sizeof(struct fcgs_ganxt_req_s);
+}
+
+/*
+ * Builds fc hdr and ct hdr for FDMI requests.
+ */
+u16
+fc_fdmi_reqhdr_build(struct fchs_s *fchs, void *pyld, u32 s_id,
+ u16 cmd_code)
+{
+
+ struct ct_hdr_s *cthdr = (struct ct_hdr_s *) pyld;
+ u32 d_id = bfa_hton3b(FC_MGMT_SERVER);
+
+ fc_gs_fchdr_build(fchs, d_id, s_id, 0);
+ fc_gs_fdmi_cthdr_build(cthdr, s_id, cmd_code);
+
+ return sizeof(struct ct_hdr_s);
+}
+
+/*
+ * Given a FC4 Type, this function returns a fc4 type bitmask
+ */
+void
+fc_get_fc4type_bitmask(u8 fc4_type, u8 *bit_mask)
+{
+ u8 index;
+ __be32 *ptr = (__be32 *) bit_mask;
+ u32 type_value;
+
+ /*
+ * @todo : Check for bitmask size
+ */
+
+ index = fc4_type >> 5;
+ type_value = 1 << (fc4_type % 32);
+ ptr[index] = cpu_to_be32(type_value);
+
+}
+
+/*
+ * GMAL Request
+ */
+u16
+fc_gmal_req_build(struct fchs_s *fchs, void *pyld, u32 s_id, wwn_t wwn)
+{
+ struct ct_hdr_s *cthdr = (struct ct_hdr_s *) pyld;
+ fcgs_gmal_req_t *gmal = (fcgs_gmal_req_t *) (cthdr + 1);
+ u32 d_id = bfa_hton3b(FC_MGMT_SERVER);
+
+ fc_gs_fchdr_build(fchs, d_id, s_id, 0);
+ fc_gs_ms_cthdr_build(cthdr, s_id, GS_FC_GMAL_CMD,
+ CT_GSSUBTYPE_CFGSERVER);
+
+ memset(gmal, 0, sizeof(fcgs_gmal_req_t));
+ gmal->wwn = wwn;
+
+ return sizeof(struct ct_hdr_s) + sizeof(fcgs_gmal_req_t);
+}
+
+/*
+ * GFN (Get Fabric Name) Request
+ */
+u16
+fc_gfn_req_build(struct fchs_s *fchs, void *pyld, u32 s_id, wwn_t wwn)
+{
+ struct ct_hdr_s *cthdr = (struct ct_hdr_s *) pyld;
+ fcgs_gfn_req_t *gfn = (fcgs_gfn_req_t *) (cthdr + 1);
+ u32 d_id = bfa_hton3b(FC_MGMT_SERVER);
+
+ fc_gs_fchdr_build(fchs, d_id, s_id, 0);
+ fc_gs_ms_cthdr_build(cthdr, s_id, GS_FC_GFN_CMD,
+ CT_GSSUBTYPE_CFGSERVER);
+
+ memset(gfn, 0, sizeof(fcgs_gfn_req_t));
+ gfn->wwn = wwn;
+
+ return sizeof(struct ct_hdr_s) + sizeof(fcgs_gfn_req_t);
+}
diff --git a/drivers/scsi/bfa/bfa_fcbuild.h b/drivers/scsi/bfa/bfa_fcbuild.h
new file mode 100644
index 000000000..03c753d1e
--- /dev/null
+++ b/drivers/scsi/bfa/bfa_fcbuild.h
@@ -0,0 +1,328 @@
+/*
+ * Copyright (c) 2005-2010 Brocade Communications Systems, Inc.
+ * All rights reserved
+ * www.brocade.com
+ *
+ * Linux driver for Brocade Fibre Channel Host Bus Adapter.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License (GPL) Version 2 as
+ * published by the Free Software Foundation
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ */
+/*
+ * fcbuild.h - FC link service frame building and parsing routines
+ */
+
+#ifndef __FCBUILD_H__
+#define __FCBUILD_H__
+
+#include "bfad_drv.h"
+#include "bfa_fc.h"
+#include "bfa_defs_fcs.h"
+
+/*
+ * Utility Macros/functions
+ */
+
+#define wwn_is_equal(_wwn1, _wwn2) \
+ (memcmp(&(_wwn1), &(_wwn2), sizeof(wwn_t)) == 0)
+
+#define fc_roundup(_l, _s) (((_l) + ((_s) - 1)) & ~((_s) - 1))
+
+/*
+ * Given the fc response length, this routine will return
+ * the length of the actual payload bytes following the CT header.
+ *
+ * Assumes the input response length does not include the crc, eof, etc.
+ */
+static inline u32
+fc_get_ctresp_pyld_len(u32 resp_len)
+{
+ return resp_len - sizeof(struct ct_hdr_s);
+}
+
+/*
+ * Convert bfa speed to rpsc speed value.
+ */
+static inline enum bfa_port_speed
+fc_rpsc_operspeed_to_bfa_speed(enum fc_rpsc_op_speed speed)
+{
+ switch (speed) {
+
+ case RPSC_OP_SPEED_1G:
+ return BFA_PORT_SPEED_1GBPS;
+
+ case RPSC_OP_SPEED_2G:
+ return BFA_PORT_SPEED_2GBPS;
+
+ case RPSC_OP_SPEED_4G:
+ return BFA_PORT_SPEED_4GBPS;
+
+ case RPSC_OP_SPEED_8G:
+ return BFA_PORT_SPEED_8GBPS;
+
+ case RPSC_OP_SPEED_16G:
+ return BFA_PORT_SPEED_16GBPS;
+
+ case RPSC_OP_SPEED_10G:
+ return BFA_PORT_SPEED_10GBPS;
+
+ default:
+ return BFA_PORT_SPEED_UNKNOWN;
+ }
+}
+
+/*
+ * Convert RPSC speed to bfa speed value.
+ */
+static inline enum fc_rpsc_op_speed
+fc_bfa_speed_to_rpsc_operspeed(enum bfa_port_speed op_speed)
+{
+ switch (op_speed) {
+
+ case BFA_PORT_SPEED_1GBPS:
+ return RPSC_OP_SPEED_1G;
+
+ case BFA_PORT_SPEED_2GBPS:
+ return RPSC_OP_SPEED_2G;
+
+ case BFA_PORT_SPEED_4GBPS:
+ return RPSC_OP_SPEED_4G;
+
+ case BFA_PORT_SPEED_8GBPS:
+ return RPSC_OP_SPEED_8G;
+
+ case BFA_PORT_SPEED_16GBPS:
+ return RPSC_OP_SPEED_16G;
+
+ case BFA_PORT_SPEED_10GBPS:
+ return RPSC_OP_SPEED_10G;
+
+ default:
+ return RPSC_OP_SPEED_NOT_EST;
+ }
+}
+
+enum fc_parse_status {
+ FC_PARSE_OK = 0,
+ FC_PARSE_FAILURE = 1,
+ FC_PARSE_BUSY = 2,
+ FC_PARSE_LEN_INVAL,
+ FC_PARSE_ACC_INVAL,
+ FC_PARSE_PWWN_NOT_EQUAL,
+ FC_PARSE_NWWN_NOT_EQUAL,
+ FC_PARSE_RXSZ_INVAL,
+ FC_PARSE_NOT_FCP,
+ FC_PARSE_OPAFLAG_INVAL,
+ FC_PARSE_RPAFLAG_INVAL,
+ FC_PARSE_OPA_INVAL,
+ FC_PARSE_RPA_INVAL,
+
+};
+
+struct fc_templates_s {
+ struct fchs_s fc_els_req;
+ struct fchs_s fc_bls_req;
+ struct fc_logi_s plogi;
+ struct fc_rrq_s rrq;
+};
+
+void fcbuild_init(void);
+
+u16 fc_flogi_build(struct fchs_s *fchs, struct fc_logi_s *flogi,
+ u32 s_id, u16 ox_id, wwn_t port_name, wwn_t node_name,
+ u16 pdu_size, u8 set_npiv, u8 set_auth,
+ u16 local_bb_credits);
+
+u16 fc_fdisc_build(struct fchs_s *buf, struct fc_logi_s *flogi, u32 s_id,
+ u16 ox_id, wwn_t port_name, wwn_t node_name,
+ u16 pdu_size);
+
+u16 fc_flogi_acc_build(struct fchs_s *fchs, struct fc_logi_s *flogi,
+ u32 s_id, __be16 ox_id,
+ wwn_t port_name, wwn_t node_name,
+ u16 pdu_size,
+ u16 local_bb_credits, u8 bb_scn);
+
+u16 fc_plogi_build(struct fchs_s *fchs, void *pld, u32 d_id,
+ u32 s_id, u16 ox_id, wwn_t port_name,
+ wwn_t node_name, u16 pdu_size, u16 bb_cr);
+
+enum fc_parse_status fc_plogi_parse(struct fchs_s *fchs);
+
+u16 fc_abts_build(struct fchs_s *buf, u32 d_id, u32 s_id,
+ u16 ox_id);
+
+enum fc_parse_status fc_abts_rsp_parse(struct fchs_s *buf, int len);
+
+u16 fc_rrq_build(struct fchs_s *buf, struct fc_rrq_s *rrq, u32 d_id,
+ u32 s_id, u16 ox_id, u16 rrq_oxid);
+enum fc_parse_status fc_rrq_rsp_parse(struct fchs_s *buf, int len);
+
+u16 fc_rspnid_build(struct fchs_s *fchs, void *pld, u32 s_id,
+ u16 ox_id, u8 *name);
+u16 fc_rsnn_nn_build(struct fchs_s *fchs, void *pld, u32 s_id,
+ wwn_t node_name, u8 *name);
+
+u16 fc_rftid_build(struct fchs_s *fchs, void *pld, u32 s_id,
+ u16 ox_id, enum bfa_lport_role role);
+
+u16 fc_rftid_build_sol(struct fchs_s *fchs, void *pyld, u32 s_id,
+ u16 ox_id, u8 *fc4_bitmap,
+ u32 bitmap_size);
+
+u16 fc_rffid_build(struct fchs_s *fchs, void *pyld, u32 s_id,
+ u16 ox_id, u8 fc4_type, u8 fc4_ftrs);
+
+u16 fc_gidpn_build(struct fchs_s *fchs, void *pyld, u32 s_id,
+ u16 ox_id, wwn_t port_name);
+
+u16 fc_gpnid_build(struct fchs_s *fchs, void *pld, u32 s_id,
+ u16 ox_id, u32 port_id);
+
+u16 fc_gs_rjt_build(struct fchs_s *fchs, struct ct_hdr_s *cthdr,
+ u32 d_id, u32 s_id, u16 ox_id,
+ u8 reason_code, u8 reason_code_expl);
+
+u16 fc_scr_build(struct fchs_s *fchs, struct fc_scr_s *scr,
+ u8 set_br_reg, u32 s_id, u16 ox_id);
+
+u16 fc_plogi_acc_build(struct fchs_s *fchs, void *pld, u32 d_id,
+ u32 s_id, u16 ox_id,
+ wwn_t port_name, wwn_t node_name,
+ u16 pdu_size, u16 bb_cr);
+
+u16 fc_adisc_build(struct fchs_s *fchs, struct fc_adisc_s *adisc,
+ u32 d_id, u32 s_id, __be16 ox_id, wwn_t port_name,
+ wwn_t node_name);
+
+enum fc_parse_status fc_adisc_parse(struct fchs_s *fchs, void *pld,
+ u32 host_dap, wwn_t node_name, wwn_t port_name);
+
+enum fc_parse_status fc_adisc_rsp_parse(struct fc_adisc_s *adisc, int len,
+ wwn_t port_name, wwn_t node_name);
+
+u16 fc_adisc_acc_build(struct fchs_s *fchs, struct fc_adisc_s *adisc,
+ u32 d_id, u32 s_id, __be16 ox_id,
+ wwn_t port_name, wwn_t node_name);
+u16 fc_ls_rjt_build(struct fchs_s *fchs, struct fc_ls_rjt_s *ls_rjt,
+ u32 d_id, u32 s_id, __be16 ox_id,
+ u8 reason_code, u8 reason_code_expl);
+u16 fc_ls_acc_build(struct fchs_s *fchs, struct fc_els_cmd_s *els_cmd,
+ u32 d_id, u32 s_id, __be16 ox_id);
+u16 fc_prli_build(struct fchs_s *fchs, void *pld, u32 d_id,
+ u32 s_id, u16 ox_id);
+
+enum fc_parse_status fc_prli_rsp_parse(struct fc_prli_s *prli, int len);
+
+u16 fc_prli_acc_build(struct fchs_s *fchs, void *pld, u32 d_id,
+ u32 s_id, __be16 ox_id,
+ enum bfa_lport_role role);
+
+u16 fc_rnid_build(struct fchs_s *fchs, struct fc_rnid_cmd_s *rnid,
+ u32 d_id, u32 s_id, u16 ox_id,
+ u32 data_format);
+
+u16 fc_rnid_acc_build(struct fchs_s *fchs,
+ struct fc_rnid_acc_s *rnid_acc, u32 d_id, u32 s_id,
+ __be16 ox_id, u32 data_format,
+ struct fc_rnid_common_id_data_s *common_id_data,
+ struct fc_rnid_general_topology_data_s *gen_topo_data);
+
+u16 fc_rpsc2_build(struct fchs_s *fchs, struct fc_rpsc2_cmd_s *rps2c,
+ u32 d_id, u32 s_id, u32 *pid_list, u16 npids);
+u16 fc_rpsc_build(struct fchs_s *fchs, struct fc_rpsc_cmd_s *rpsc,
+ u32 d_id, u32 s_id, u16 ox_id);
+u16 fc_rpsc_acc_build(struct fchs_s *fchs,
+ struct fc_rpsc_acc_s *rpsc_acc, u32 d_id, u32 s_id,
+ __be16 ox_id, struct fc_rpsc_speed_info_s *oper_speed);
+u16 fc_gid_ft_build(struct fchs_s *fchs, void *pld, u32 s_id,
+ u8 fc4_type);
+
+u16 fc_rpnid_build(struct fchs_s *fchs, void *pyld, u32 s_id,
+ u32 port_id, wwn_t port_name);
+
+u16 fc_rnnid_build(struct fchs_s *fchs, void *pyld, u32 s_id,
+ u32 port_id, wwn_t node_name);
+
+u16 fc_rcsid_build(struct fchs_s *fchs, void *pyld, u32 s_id,
+ u32 port_id, u32 cos);
+
+u16 fc_rptid_build(struct fchs_s *fchs, void *pyld, u32 s_id,
+ u32 port_id, u8 port_type);
+
+u16 fc_ganxt_build(struct fchs_s *fchs, void *pyld, u32 s_id,
+ u32 port_id);
+
+u16 fc_logo_build(struct fchs_s *fchs, struct fc_logo_s *logo, u32 d_id,
+ u32 s_id, u16 ox_id, wwn_t port_name);
+
+u16 fc_logo_acc_build(struct fchs_s *fchs, void *pld, u32 d_id,
+ u32 s_id, __be16 ox_id);
+
+u16 fc_fdmi_reqhdr_build(struct fchs_s *fchs, void *pyld, u32 s_id,
+ u16 cmd_code);
+u16 fc_gmal_req_build(struct fchs_s *fchs, void *pyld, u32 s_id, wwn_t wwn);
+u16 fc_gfn_req_build(struct fchs_s *fchs, void *pyld, u32 s_id, wwn_t wwn);
+
+void fc_get_fc4type_bitmask(u8 fc4_type, u8 *bit_mask);
+
+void fc_els_req_build(struct fchs_s *fchs, u32 d_id, u32 s_id,
+ __be16 ox_id);
+
+enum fc_parse_status fc_els_rsp_parse(struct fchs_s *fchs, int len);
+
+enum fc_parse_status fc_plogi_rsp_parse(struct fchs_s *fchs, int len,
+ wwn_t port_name);
+
+enum fc_parse_status fc_prli_parse(struct fc_prli_s *prli);
+
+enum fc_parse_status fc_pdisc_parse(struct fchs_s *fchs, wwn_t node_name,
+ wwn_t port_name);
+
+u16 fc_ba_acc_build(struct fchs_s *fchs, struct fc_ba_acc_s *ba_acc, u32 d_id,
+ u32 s_id, __be16 ox_id, u16 rx_id);
+
+int fc_logout_params_pages(struct fchs_s *fc_frame, u8 els_code);
+
+u16 fc_tprlo_acc_build(struct fchs_s *fchs, struct fc_tprlo_acc_s *tprlo_acc,
+ u32 d_id, u32 s_id, __be16 ox_id, int num_pages);
+
+u16 fc_prlo_acc_build(struct fchs_s *fchs, struct fc_prlo_acc_s *prlo_acc,
+ u32 d_id, u32 s_id, __be16 ox_id, int num_pages);
+
+u16 fc_logo_rsp_parse(struct fchs_s *fchs, int len);
+
+u16 fc_pdisc_build(struct fchs_s *fchs, u32 d_id, u32 s_id,
+ u16 ox_id, wwn_t port_name, wwn_t node_name,
+ u16 pdu_size);
+
+u16 fc_pdisc_rsp_parse(struct fchs_s *fchs, int len, wwn_t port_name);
+
+u16 fc_prlo_build(struct fchs_s *fchs, u32 d_id, u32 s_id,
+ u16 ox_id, int num_pages);
+
+u16 fc_prlo_rsp_parse(struct fchs_s *fchs, int len);
+
+u16 fc_tprlo_build(struct fchs_s *fchs, u32 d_id, u32 s_id,
+ u16 ox_id, int num_pages, enum fc_tprlo_type tprlo_type,
+ u32 tpr_id);
+
+u16 fc_tprlo_rsp_parse(struct fchs_s *fchs, int len);
+
+u16 fc_ba_rjt_build(struct fchs_s *fchs, u32 d_id, u32 s_id,
+ __be16 ox_id, u32 reason_code, u32 reason_expl);
+
+u16 fc_gnnid_build(struct fchs_s *fchs, void *pyld, u32 s_id, u16 ox_id,
+ u32 port_id);
+
+u16 fc_ct_rsp_parse(struct ct_hdr_s *cthdr);
+
+u16 fc_rscn_build(struct fchs_s *fchs, struct fc_rscn_pl_s *rscn, u32 s_id,
+ u16 ox_id);
+#endif
diff --git a/drivers/scsi/bfa/bfa_fcpim.c b/drivers/scsi/bfa/bfa_fcpim.c
new file mode 100644
index 000000000..d7385d1d9
--- /dev/null
+++ b/drivers/scsi/bfa/bfa_fcpim.c
@@ -0,0 +1,3936 @@
+/*
+ * Copyright (c) 2005-2010 Brocade Communications Systems, Inc.
+ * All rights reserved
+ * www.brocade.com
+ *
+ * Linux driver for Brocade Fibre Channel Host Bus Adapter.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License (GPL) Version 2 as
+ * published by the Free Software Foundation
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ */
+
+#include "bfad_drv.h"
+#include "bfa_modules.h"
+
+BFA_TRC_FILE(HAL, FCPIM);
+
+/*
+ * BFA ITNIM Related definitions
+ */
+static void bfa_itnim_update_del_itn_stats(struct bfa_itnim_s *itnim);
+static void bfa_ioim_lm_init(struct bfa_s *bfa);
+
+#define BFA_ITNIM_FROM_TAG(_fcpim, _tag) \
+ (((_fcpim)->itnim_arr + ((_tag) & ((_fcpim)->num_itnims - 1))))
+
+#define bfa_fcpim_additn(__itnim) \
+ list_add_tail(&(__itnim)->qe, &(__itnim)->fcpim->itnim_q)
+#define bfa_fcpim_delitn(__itnim) do { \
+ WARN_ON(!bfa_q_is_on_q(&(__itnim)->fcpim->itnim_q, __itnim)); \
+ bfa_itnim_update_del_itn_stats(__itnim); \
+ list_del(&(__itnim)->qe); \
+ WARN_ON(!list_empty(&(__itnim)->io_q)); \
+ WARN_ON(!list_empty(&(__itnim)->io_cleanup_q)); \
+ WARN_ON(!list_empty(&(__itnim)->pending_q)); \
+} while (0)
+
+#define bfa_itnim_online_cb(__itnim) do { \
+ if ((__itnim)->bfa->fcs) \
+ bfa_cb_itnim_online((__itnim)->ditn); \
+ else { \
+ bfa_cb_queue((__itnim)->bfa, &(__itnim)->hcb_qe, \
+ __bfa_cb_itnim_online, (__itnim)); \
+ } \
+} while (0)
+
+#define bfa_itnim_offline_cb(__itnim) do { \
+ if ((__itnim)->bfa->fcs) \
+ bfa_cb_itnim_offline((__itnim)->ditn); \
+ else { \
+ bfa_cb_queue((__itnim)->bfa, &(__itnim)->hcb_qe, \
+ __bfa_cb_itnim_offline, (__itnim)); \
+ } \
+} while (0)
+
+#define bfa_itnim_sler_cb(__itnim) do { \
+ if ((__itnim)->bfa->fcs) \
+ bfa_cb_itnim_sler((__itnim)->ditn); \
+ else { \
+ bfa_cb_queue((__itnim)->bfa, &(__itnim)->hcb_qe, \
+ __bfa_cb_itnim_sler, (__itnim)); \
+ } \
+} while (0)
+
+enum bfa_ioim_lm_ua_status {
+ BFA_IOIM_LM_UA_RESET = 0,
+ BFA_IOIM_LM_UA_SET = 1,
+};
+
+/*
+ * itnim state machine event
+ */
+enum bfa_itnim_event {
+ BFA_ITNIM_SM_CREATE = 1, /* itnim is created */
+ BFA_ITNIM_SM_ONLINE = 2, /* itnim is online */
+ BFA_ITNIM_SM_OFFLINE = 3, /* itnim is offline */
+ BFA_ITNIM_SM_FWRSP = 4, /* firmware response */
+ BFA_ITNIM_SM_DELETE = 5, /* deleting an existing itnim */
+ BFA_ITNIM_SM_CLEANUP = 6, /* IO cleanup completion */
+ BFA_ITNIM_SM_SLER = 7, /* second level error recovery */
+ BFA_ITNIM_SM_HWFAIL = 8, /* IOC h/w failure event */
+ BFA_ITNIM_SM_QRESUME = 9, /* queue space available */
+};
+
+/*
+ * BFA IOIM related definitions
+ */
+#define bfa_ioim_move_to_comp_q(__ioim) do { \
+ list_del(&(__ioim)->qe); \
+ list_add_tail(&(__ioim)->qe, &(__ioim)->fcpim->ioim_comp_q); \
+} while (0)
+
+
+#define bfa_ioim_cb_profile_comp(__fcpim, __ioim) do { \
+ if ((__fcpim)->profile_comp) \
+ (__fcpim)->profile_comp(__ioim); \
+} while (0)
+
+#define bfa_ioim_cb_profile_start(__fcpim, __ioim) do { \
+ if ((__fcpim)->profile_start) \
+ (__fcpim)->profile_start(__ioim); \
+} while (0)
+
+/*
+ * IO state machine events
+ */
+enum bfa_ioim_event {
+ BFA_IOIM_SM_START = 1, /* io start request from host */
+ BFA_IOIM_SM_COMP_GOOD = 2, /* io good comp, resource free */
+ BFA_IOIM_SM_COMP = 3, /* io comp, resource is free */
+ BFA_IOIM_SM_COMP_UTAG = 4, /* io comp, resource is free */
+ BFA_IOIM_SM_DONE = 5, /* io comp, resource not free */
+ BFA_IOIM_SM_FREE = 6, /* io resource is freed */
+ BFA_IOIM_SM_ABORT = 7, /* abort request from scsi stack */
+ BFA_IOIM_SM_ABORT_COMP = 8, /* abort from f/w */
+ BFA_IOIM_SM_ABORT_DONE = 9, /* abort completion from f/w */
+ BFA_IOIM_SM_QRESUME = 10, /* CQ space available to queue IO */
+ BFA_IOIM_SM_SGALLOCED = 11, /* SG page allocation successful */
+ BFA_IOIM_SM_SQRETRY = 12, /* sequence recovery retry */
+ BFA_IOIM_SM_HCB = 13, /* bfa callback complete */
+ BFA_IOIM_SM_CLEANUP = 14, /* IO cleanup from itnim */
+ BFA_IOIM_SM_TMSTART = 15, /* IO cleanup from tskim */
+ BFA_IOIM_SM_TMDONE = 16, /* IO cleanup from tskim */
+ BFA_IOIM_SM_HWFAIL = 17, /* IOC h/w failure event */
+ BFA_IOIM_SM_IOTOV = 18, /* ITN offline TOV */
+};
+
+
+/*
+ * BFA TSKIM related definitions
+ */
+
+/*
+ * task management completion handling
+ */
+#define bfa_tskim_qcomp(__tskim, __cbfn) do { \
+ bfa_cb_queue((__tskim)->bfa, &(__tskim)->hcb_qe, __cbfn, (__tskim));\
+ bfa_tskim_notify_comp(__tskim); \
+} while (0)
+
+#define bfa_tskim_notify_comp(__tskim) do { \
+ if ((__tskim)->notify) \
+ bfa_itnim_tskdone((__tskim)->itnim); \
+} while (0)
+
+
+enum bfa_tskim_event {
+ BFA_TSKIM_SM_START = 1, /* TM command start */
+ BFA_TSKIM_SM_DONE = 2, /* TM completion */
+ BFA_TSKIM_SM_QRESUME = 3, /* resume after qfull */
+ BFA_TSKIM_SM_HWFAIL = 5, /* IOC h/w failure event */
+ BFA_TSKIM_SM_HCB = 6, /* BFA callback completion */
+ BFA_TSKIM_SM_IOS_DONE = 7, /* IO and sub TM completions */
+ BFA_TSKIM_SM_CLEANUP = 8, /* TM cleanup on ITN offline */
+ BFA_TSKIM_SM_CLEANUP_DONE = 9, /* TM abort completion */
+ BFA_TSKIM_SM_UTAG = 10, /* TM completion unknown tag */
+};
+
+/*
+ * forward declaration for BFA ITNIM functions
+ */
+static void bfa_itnim_iocdisable_cleanup(struct bfa_itnim_s *itnim);
+static bfa_boolean_t bfa_itnim_send_fwcreate(struct bfa_itnim_s *itnim);
+static bfa_boolean_t bfa_itnim_send_fwdelete(struct bfa_itnim_s *itnim);
+static void bfa_itnim_cleanp_comp(void *itnim_cbarg);
+static void bfa_itnim_cleanup(struct bfa_itnim_s *itnim);
+static void __bfa_cb_itnim_online(void *cbarg, bfa_boolean_t complete);
+static void __bfa_cb_itnim_offline(void *cbarg, bfa_boolean_t complete);
+static void __bfa_cb_itnim_sler(void *cbarg, bfa_boolean_t complete);
+static void bfa_itnim_iotov_online(struct bfa_itnim_s *itnim);
+static void bfa_itnim_iotov_cleanup(struct bfa_itnim_s *itnim);
+static void bfa_itnim_iotov(void *itnim_arg);
+static void bfa_itnim_iotov_start(struct bfa_itnim_s *itnim);
+static void bfa_itnim_iotov_stop(struct bfa_itnim_s *itnim);
+static void bfa_itnim_iotov_delete(struct bfa_itnim_s *itnim);
+
+/*
+ * forward declaration of ITNIM state machine
+ */
+static void bfa_itnim_sm_uninit(struct bfa_itnim_s *itnim,
+ enum bfa_itnim_event event);
+static void bfa_itnim_sm_created(struct bfa_itnim_s *itnim,
+ enum bfa_itnim_event event);
+static void bfa_itnim_sm_fwcreate(struct bfa_itnim_s *itnim,
+ enum bfa_itnim_event event);
+static void bfa_itnim_sm_delete_pending(struct bfa_itnim_s *itnim,
+ enum bfa_itnim_event event);
+static void bfa_itnim_sm_online(struct bfa_itnim_s *itnim,
+ enum bfa_itnim_event event);
+static void bfa_itnim_sm_sler(struct bfa_itnim_s *itnim,
+ enum bfa_itnim_event event);
+static void bfa_itnim_sm_cleanup_offline(struct bfa_itnim_s *itnim,
+ enum bfa_itnim_event event);
+static void bfa_itnim_sm_cleanup_delete(struct bfa_itnim_s *itnim,
+ enum bfa_itnim_event event);
+static void bfa_itnim_sm_fwdelete(struct bfa_itnim_s *itnim,
+ enum bfa_itnim_event event);
+static void bfa_itnim_sm_offline(struct bfa_itnim_s *itnim,
+ enum bfa_itnim_event event);
+static void bfa_itnim_sm_iocdisable(struct bfa_itnim_s *itnim,
+ enum bfa_itnim_event event);
+static void bfa_itnim_sm_deleting(struct bfa_itnim_s *itnim,
+ enum bfa_itnim_event event);
+static void bfa_itnim_sm_fwcreate_qfull(struct bfa_itnim_s *itnim,
+ enum bfa_itnim_event event);
+static void bfa_itnim_sm_fwdelete_qfull(struct bfa_itnim_s *itnim,
+ enum bfa_itnim_event event);
+static void bfa_itnim_sm_deleting_qfull(struct bfa_itnim_s *itnim,
+ enum bfa_itnim_event event);
+
+/*
+ * forward declaration for BFA IOIM functions
+ */
+static bfa_boolean_t bfa_ioim_send_ioreq(struct bfa_ioim_s *ioim);
+static bfa_boolean_t bfa_ioim_sgpg_alloc(struct bfa_ioim_s *ioim);
+static bfa_boolean_t bfa_ioim_send_abort(struct bfa_ioim_s *ioim);
+static void bfa_ioim_notify_cleanup(struct bfa_ioim_s *ioim);
+static void __bfa_cb_ioim_good_comp(void *cbarg, bfa_boolean_t complete);
+static void __bfa_cb_ioim_comp(void *cbarg, bfa_boolean_t complete);
+static void __bfa_cb_ioim_abort(void *cbarg, bfa_boolean_t complete);
+static void __bfa_cb_ioim_failed(void *cbarg, bfa_boolean_t complete);
+static void __bfa_cb_ioim_pathtov(void *cbarg, bfa_boolean_t complete);
+static bfa_boolean_t bfa_ioim_is_abortable(struct bfa_ioim_s *ioim);
+
+/*
+ * forward declaration of BFA IO state machine
+ */
+static void bfa_ioim_sm_uninit(struct bfa_ioim_s *ioim,
+ enum bfa_ioim_event event);
+static void bfa_ioim_sm_sgalloc(struct bfa_ioim_s *ioim,
+ enum bfa_ioim_event event);
+static void bfa_ioim_sm_active(struct bfa_ioim_s *ioim,
+ enum bfa_ioim_event event);
+static void bfa_ioim_sm_abort(struct bfa_ioim_s *ioim,
+ enum bfa_ioim_event event);
+static void bfa_ioim_sm_cleanup(struct bfa_ioim_s *ioim,
+ enum bfa_ioim_event event);
+static void bfa_ioim_sm_qfull(struct bfa_ioim_s *ioim,
+ enum bfa_ioim_event event);
+static void bfa_ioim_sm_abort_qfull(struct bfa_ioim_s *ioim,
+ enum bfa_ioim_event event);
+static void bfa_ioim_sm_cleanup_qfull(struct bfa_ioim_s *ioim,
+ enum bfa_ioim_event event);
+static void bfa_ioim_sm_hcb(struct bfa_ioim_s *ioim,
+ enum bfa_ioim_event event);
+static void bfa_ioim_sm_hcb_free(struct bfa_ioim_s *ioim,
+ enum bfa_ioim_event event);
+static void bfa_ioim_sm_resfree(struct bfa_ioim_s *ioim,
+ enum bfa_ioim_event event);
+static void bfa_ioim_sm_cmnd_retry(struct bfa_ioim_s *ioim,
+ enum bfa_ioim_event event);
+/*
+ * forward declaration for BFA TSKIM functions
+ */
+static void __bfa_cb_tskim_done(void *cbarg, bfa_boolean_t complete);
+static void __bfa_cb_tskim_failed(void *cbarg, bfa_boolean_t complete);
+static bfa_boolean_t bfa_tskim_match_scope(struct bfa_tskim_s *tskim,
+ struct scsi_lun lun);
+static void bfa_tskim_gather_ios(struct bfa_tskim_s *tskim);
+static void bfa_tskim_cleanp_comp(void *tskim_cbarg);
+static void bfa_tskim_cleanup_ios(struct bfa_tskim_s *tskim);
+static bfa_boolean_t bfa_tskim_send(struct bfa_tskim_s *tskim);
+static bfa_boolean_t bfa_tskim_send_abort(struct bfa_tskim_s *tskim);
+static void bfa_tskim_iocdisable_ios(struct bfa_tskim_s *tskim);
+
+/*
+ * forward declaration of BFA TSKIM state machine
+ */
+static void bfa_tskim_sm_uninit(struct bfa_tskim_s *tskim,
+ enum bfa_tskim_event event);
+static void bfa_tskim_sm_active(struct bfa_tskim_s *tskim,
+ enum bfa_tskim_event event);
+static void bfa_tskim_sm_cleanup(struct bfa_tskim_s *tskim,
+ enum bfa_tskim_event event);
+static void bfa_tskim_sm_iocleanup(struct bfa_tskim_s *tskim,
+ enum bfa_tskim_event event);
+static void bfa_tskim_sm_qfull(struct bfa_tskim_s *tskim,
+ enum bfa_tskim_event event);
+static void bfa_tskim_sm_cleanup_qfull(struct bfa_tskim_s *tskim,
+ enum bfa_tskim_event event);
+static void bfa_tskim_sm_hcb(struct bfa_tskim_s *tskim,
+ enum bfa_tskim_event event);
+/*
+ * BFA FCP Initiator Mode module
+ */
+
+/*
+ * Compute and return memory needed by FCP(im) module.
+ */
+static void
+bfa_fcpim_meminfo(struct bfa_iocfc_cfg_s *cfg, u32 *km_len)
+{
+ bfa_itnim_meminfo(cfg, km_len);
+
+ /*
+ * IO memory
+ */
+ *km_len += cfg->fwcfg.num_ioim_reqs *
+ (sizeof(struct bfa_ioim_s) + sizeof(struct bfa_ioim_sp_s));
+
+ /*
+ * task management command memory
+ */
+ if (cfg->fwcfg.num_tskim_reqs < BFA_TSKIM_MIN)
+ cfg->fwcfg.num_tskim_reqs = BFA_TSKIM_MIN;
+ *km_len += cfg->fwcfg.num_tskim_reqs * sizeof(struct bfa_tskim_s);
+}
+
+
+static void
+bfa_fcpim_attach(struct bfa_fcp_mod_s *fcp, void *bfad,
+ struct bfa_iocfc_cfg_s *cfg, struct bfa_pcidev_s *pcidev)
+{
+ struct bfa_fcpim_s *fcpim = &fcp->fcpim;
+ struct bfa_s *bfa = fcp->bfa;
+
+ bfa_trc(bfa, cfg->drvcfg.path_tov);
+ bfa_trc(bfa, cfg->fwcfg.num_rports);
+ bfa_trc(bfa, cfg->fwcfg.num_ioim_reqs);
+ bfa_trc(bfa, cfg->fwcfg.num_tskim_reqs);
+
+ fcpim->fcp = fcp;
+ fcpim->bfa = bfa;
+ fcpim->num_itnims = cfg->fwcfg.num_rports;
+ fcpim->num_tskim_reqs = cfg->fwcfg.num_tskim_reqs;
+ fcpim->path_tov = cfg->drvcfg.path_tov;
+ fcpim->delay_comp = cfg->drvcfg.delay_comp;
+ fcpim->profile_comp = NULL;
+ fcpim->profile_start = NULL;
+
+ bfa_itnim_attach(fcpim);
+ bfa_tskim_attach(fcpim);
+ bfa_ioim_attach(fcpim);
+}
+
+static void
+bfa_fcpim_iocdisable(struct bfa_fcp_mod_s *fcp)
+{
+ struct bfa_fcpim_s *fcpim = &fcp->fcpim;
+ struct bfa_itnim_s *itnim;
+ struct list_head *qe, *qen;
+
+ /* Enqueue unused ioim resources to free_q */
+ list_splice_tail_init(&fcpim->tskim_unused_q, &fcpim->tskim_free_q);
+
+ list_for_each_safe(qe, qen, &fcpim->itnim_q) {
+ itnim = (struct bfa_itnim_s *) qe;
+ bfa_itnim_iocdisable(itnim);
+ }
+}
+
+void
+bfa_fcpim_path_tov_set(struct bfa_s *bfa, u16 path_tov)
+{
+ struct bfa_fcpim_s *fcpim = BFA_FCPIM(bfa);
+
+ fcpim->path_tov = path_tov * 1000;
+ if (fcpim->path_tov > BFA_FCPIM_PATHTOV_MAX)
+ fcpim->path_tov = BFA_FCPIM_PATHTOV_MAX;
+}
+
+u16
+bfa_fcpim_path_tov_get(struct bfa_s *bfa)
+{
+ struct bfa_fcpim_s *fcpim = BFA_FCPIM(bfa);
+
+ return fcpim->path_tov / 1000;
+}
+
+#define bfa_fcpim_add_iostats(__l, __r, __stats) \
+ (__l->__stats += __r->__stats)
+
+void
+bfa_fcpim_add_stats(struct bfa_itnim_iostats_s *lstats,
+ struct bfa_itnim_iostats_s *rstats)
+{
+ bfa_fcpim_add_iostats(lstats, rstats, total_ios);
+ bfa_fcpim_add_iostats(lstats, rstats, qresumes);
+ bfa_fcpim_add_iostats(lstats, rstats, no_iotags);
+ bfa_fcpim_add_iostats(lstats, rstats, io_aborts);
+ bfa_fcpim_add_iostats(lstats, rstats, no_tskims);
+ bfa_fcpim_add_iostats(lstats, rstats, iocomp_ok);
+ bfa_fcpim_add_iostats(lstats, rstats, iocomp_underrun);
+ bfa_fcpim_add_iostats(lstats, rstats, iocomp_overrun);
+ bfa_fcpim_add_iostats(lstats, rstats, iocomp_aborted);
+ bfa_fcpim_add_iostats(lstats, rstats, iocomp_timedout);
+ bfa_fcpim_add_iostats(lstats, rstats, iocom_nexus_abort);
+ bfa_fcpim_add_iostats(lstats, rstats, iocom_proto_err);
+ bfa_fcpim_add_iostats(lstats, rstats, iocom_dif_err);
+ bfa_fcpim_add_iostats(lstats, rstats, iocom_sqer_needed);
+ bfa_fcpim_add_iostats(lstats, rstats, iocom_res_free);
+ bfa_fcpim_add_iostats(lstats, rstats, iocom_hostabrts);
+ bfa_fcpim_add_iostats(lstats, rstats, iocom_utags);
+ bfa_fcpim_add_iostats(lstats, rstats, io_cleanups);
+ bfa_fcpim_add_iostats(lstats, rstats, io_tmaborts);
+ bfa_fcpim_add_iostats(lstats, rstats, onlines);
+ bfa_fcpim_add_iostats(lstats, rstats, offlines);
+ bfa_fcpim_add_iostats(lstats, rstats, creates);
+ bfa_fcpim_add_iostats(lstats, rstats, deletes);
+ bfa_fcpim_add_iostats(lstats, rstats, create_comps);
+ bfa_fcpim_add_iostats(lstats, rstats, delete_comps);
+ bfa_fcpim_add_iostats(lstats, rstats, sler_events);
+ bfa_fcpim_add_iostats(lstats, rstats, fw_create);
+ bfa_fcpim_add_iostats(lstats, rstats, fw_delete);
+ bfa_fcpim_add_iostats(lstats, rstats, ioc_disabled);
+ bfa_fcpim_add_iostats(lstats, rstats, cleanup_comps);
+ bfa_fcpim_add_iostats(lstats, rstats, tm_cmnds);
+ bfa_fcpim_add_iostats(lstats, rstats, tm_fw_rsps);
+ bfa_fcpim_add_iostats(lstats, rstats, tm_success);
+ bfa_fcpim_add_iostats(lstats, rstats, tm_failures);
+ bfa_fcpim_add_iostats(lstats, rstats, tm_io_comps);
+ bfa_fcpim_add_iostats(lstats, rstats, tm_qresumes);
+ bfa_fcpim_add_iostats(lstats, rstats, tm_iocdowns);
+ bfa_fcpim_add_iostats(lstats, rstats, tm_cleanups);
+ bfa_fcpim_add_iostats(lstats, rstats, tm_cleanup_comps);
+ bfa_fcpim_add_iostats(lstats, rstats, io_comps);
+ bfa_fcpim_add_iostats(lstats, rstats, input_reqs);
+ bfa_fcpim_add_iostats(lstats, rstats, output_reqs);
+ bfa_fcpim_add_iostats(lstats, rstats, rd_throughput);
+ bfa_fcpim_add_iostats(lstats, rstats, wr_throughput);
+}
+
+bfa_status_t
+bfa_fcpim_port_iostats(struct bfa_s *bfa,
+ struct bfa_itnim_iostats_s *stats, u8 lp_tag)
+{
+ struct bfa_fcpim_s *fcpim = BFA_FCPIM(bfa);
+ struct list_head *qe, *qen;
+ struct bfa_itnim_s *itnim;
+
+ /* accumulate IO stats from itnim */
+ memset(stats, 0, sizeof(struct bfa_itnim_iostats_s));
+ list_for_each_safe(qe, qen, &fcpim->itnim_q) {
+ itnim = (struct bfa_itnim_s *) qe;
+ if (itnim->rport->rport_info.lp_tag != lp_tag)
+ continue;
+ bfa_fcpim_add_stats(stats, &(itnim->stats));
+ }
+ return BFA_STATUS_OK;
+}
+
+void
+bfa_ioim_profile_comp(struct bfa_ioim_s *ioim)
+{
+ struct bfa_itnim_latency_s *io_lat =
+ &(ioim->itnim->ioprofile.io_latency);
+ u32 val, idx;
+
+ val = (u32)(jiffies - ioim->start_time);
+ idx = bfa_ioim_get_index(scsi_bufflen((struct scsi_cmnd *)ioim->dio));
+ bfa_itnim_ioprofile_update(ioim->itnim, idx);
+
+ io_lat->count[idx]++;
+ io_lat->min[idx] = (io_lat->min[idx] < val) ? io_lat->min[idx] : val;
+ io_lat->max[idx] = (io_lat->max[idx] > val) ? io_lat->max[idx] : val;
+ io_lat->avg[idx] += val;
+}
+
+void
+bfa_ioim_profile_start(struct bfa_ioim_s *ioim)
+{
+ ioim->start_time = jiffies;
+}
+
+bfa_status_t
+bfa_fcpim_profile_on(struct bfa_s *bfa, u32 time)
+{
+ struct bfa_itnim_s *itnim;
+ struct bfa_fcpim_s *fcpim = BFA_FCPIM(bfa);
+ struct list_head *qe, *qen;
+
+ /* accumulate IO stats from itnim */
+ list_for_each_safe(qe, qen, &fcpim->itnim_q) {
+ itnim = (struct bfa_itnim_s *) qe;
+ bfa_itnim_clear_stats(itnim);
+ }
+ fcpim->io_profile = BFA_TRUE;
+ fcpim->io_profile_start_time = time;
+ fcpim->profile_comp = bfa_ioim_profile_comp;
+ fcpim->profile_start = bfa_ioim_profile_start;
+ return BFA_STATUS_OK;
+}
+
+bfa_status_t
+bfa_fcpim_profile_off(struct bfa_s *bfa)
+{
+ struct bfa_fcpim_s *fcpim = BFA_FCPIM(bfa);
+ fcpim->io_profile = BFA_FALSE;
+ fcpim->io_profile_start_time = 0;
+ fcpim->profile_comp = NULL;
+ fcpim->profile_start = NULL;
+ return BFA_STATUS_OK;
+}
+
+u16
+bfa_fcpim_qdepth_get(struct bfa_s *bfa)
+{
+ struct bfa_fcpim_s *fcpim = BFA_FCPIM(bfa);
+
+ return fcpim->q_depth;
+}
+
+/*
+ * BFA ITNIM module state machine functions
+ */
+
+/*
+ * Beginning/unallocated state - no events expected.
+ */
+static void
+bfa_itnim_sm_uninit(struct bfa_itnim_s *itnim, enum bfa_itnim_event event)
+{
+ bfa_trc(itnim->bfa, itnim->rport->rport_tag);
+ bfa_trc(itnim->bfa, event);
+
+ switch (event) {
+ case BFA_ITNIM_SM_CREATE:
+ bfa_sm_set_state(itnim, bfa_itnim_sm_created);
+ itnim->is_online = BFA_FALSE;
+ bfa_fcpim_additn(itnim);
+ break;
+
+ default:
+ bfa_sm_fault(itnim->bfa, event);
+ }
+}
+
+/*
+ * Beginning state, only online event expected.
+ */
+static void
+bfa_itnim_sm_created(struct bfa_itnim_s *itnim, enum bfa_itnim_event event)
+{
+ bfa_trc(itnim->bfa, itnim->rport->rport_tag);
+ bfa_trc(itnim->bfa, event);
+
+ switch (event) {
+ case BFA_ITNIM_SM_ONLINE:
+ if (bfa_itnim_send_fwcreate(itnim))
+ bfa_sm_set_state(itnim, bfa_itnim_sm_fwcreate);
+ else
+ bfa_sm_set_state(itnim, bfa_itnim_sm_fwcreate_qfull);
+ break;
+
+ case BFA_ITNIM_SM_DELETE:
+ bfa_sm_set_state(itnim, bfa_itnim_sm_uninit);
+ bfa_fcpim_delitn(itnim);
+ break;
+
+ case BFA_ITNIM_SM_HWFAIL:
+ bfa_sm_set_state(itnim, bfa_itnim_sm_iocdisable);
+ break;
+
+ default:
+ bfa_sm_fault(itnim->bfa, event);
+ }
+}
+
+/*
+ * Waiting for itnim create response from firmware.
+ */
+static void
+bfa_itnim_sm_fwcreate(struct bfa_itnim_s *itnim, enum bfa_itnim_event event)
+{
+ bfa_trc(itnim->bfa, itnim->rport->rport_tag);
+ bfa_trc(itnim->bfa, event);
+
+ switch (event) {
+ case BFA_ITNIM_SM_FWRSP:
+ bfa_sm_set_state(itnim, bfa_itnim_sm_online);
+ itnim->is_online = BFA_TRUE;
+ bfa_itnim_iotov_online(itnim);
+ bfa_itnim_online_cb(itnim);
+ break;
+
+ case BFA_ITNIM_SM_DELETE:
+ bfa_sm_set_state(itnim, bfa_itnim_sm_delete_pending);
+ break;
+
+ case BFA_ITNIM_SM_OFFLINE:
+ if (bfa_itnim_send_fwdelete(itnim))
+ bfa_sm_set_state(itnim, bfa_itnim_sm_fwdelete);
+ else
+ bfa_sm_set_state(itnim, bfa_itnim_sm_fwdelete_qfull);
+ break;
+
+ case BFA_ITNIM_SM_HWFAIL:
+ bfa_sm_set_state(itnim, bfa_itnim_sm_iocdisable);
+ break;
+
+ default:
+ bfa_sm_fault(itnim->bfa, event);
+ }
+}
+
+static void
+bfa_itnim_sm_fwcreate_qfull(struct bfa_itnim_s *itnim,
+ enum bfa_itnim_event event)
+{
+ bfa_trc(itnim->bfa, itnim->rport->rport_tag);
+ bfa_trc(itnim->bfa, event);
+
+ switch (event) {
+ case BFA_ITNIM_SM_QRESUME:
+ bfa_sm_set_state(itnim, bfa_itnim_sm_fwcreate);
+ bfa_itnim_send_fwcreate(itnim);
+ break;
+
+ case BFA_ITNIM_SM_DELETE:
+ bfa_sm_set_state(itnim, bfa_itnim_sm_uninit);
+ bfa_reqq_wcancel(&itnim->reqq_wait);
+ bfa_fcpim_delitn(itnim);
+ break;
+
+ case BFA_ITNIM_SM_OFFLINE:
+ bfa_sm_set_state(itnim, bfa_itnim_sm_offline);
+ bfa_reqq_wcancel(&itnim->reqq_wait);
+ bfa_itnim_offline_cb(itnim);
+ break;
+
+ case BFA_ITNIM_SM_HWFAIL:
+ bfa_sm_set_state(itnim, bfa_itnim_sm_iocdisable);
+ bfa_reqq_wcancel(&itnim->reqq_wait);
+ break;
+
+ default:
+ bfa_sm_fault(itnim->bfa, event);
+ }
+}
+
+/*
+ * Waiting for itnim create response from firmware, a delete is pending.
+ */
+static void
+bfa_itnim_sm_delete_pending(struct bfa_itnim_s *itnim,
+ enum bfa_itnim_event event)
+{
+ bfa_trc(itnim->bfa, itnim->rport->rport_tag);
+ bfa_trc(itnim->bfa, event);
+
+ switch (event) {
+ case BFA_ITNIM_SM_FWRSP:
+ if (bfa_itnim_send_fwdelete(itnim))
+ bfa_sm_set_state(itnim, bfa_itnim_sm_deleting);
+ else
+ bfa_sm_set_state(itnim, bfa_itnim_sm_deleting_qfull);
+ break;
+
+ case BFA_ITNIM_SM_HWFAIL:
+ bfa_sm_set_state(itnim, bfa_itnim_sm_uninit);
+ bfa_fcpim_delitn(itnim);
+ break;
+
+ default:
+ bfa_sm_fault(itnim->bfa, event);
+ }
+}
+
+/*
+ * Online state - normal parking state.
+ */
+static void
+bfa_itnim_sm_online(struct bfa_itnim_s *itnim, enum bfa_itnim_event event)
+{
+ bfa_trc(itnim->bfa, itnim->rport->rport_tag);
+ bfa_trc(itnim->bfa, event);
+
+ switch (event) {
+ case BFA_ITNIM_SM_OFFLINE:
+ bfa_sm_set_state(itnim, bfa_itnim_sm_cleanup_offline);
+ itnim->is_online = BFA_FALSE;
+ bfa_itnim_iotov_start(itnim);
+ bfa_itnim_cleanup(itnim);
+ break;
+
+ case BFA_ITNIM_SM_DELETE:
+ bfa_sm_set_state(itnim, bfa_itnim_sm_cleanup_delete);
+ itnim->is_online = BFA_FALSE;
+ bfa_itnim_cleanup(itnim);
+ break;
+
+ case BFA_ITNIM_SM_SLER:
+ bfa_sm_set_state(itnim, bfa_itnim_sm_sler);
+ itnim->is_online = BFA_FALSE;
+ bfa_itnim_iotov_start(itnim);
+ bfa_itnim_sler_cb(itnim);
+ break;
+
+ case BFA_ITNIM_SM_HWFAIL:
+ bfa_sm_set_state(itnim, bfa_itnim_sm_iocdisable);
+ itnim->is_online = BFA_FALSE;
+ bfa_itnim_iotov_start(itnim);
+ bfa_itnim_iocdisable_cleanup(itnim);
+ break;
+
+ default:
+ bfa_sm_fault(itnim->bfa, event);
+ }
+}
+
+/*
+ * Second level error recovery need.
+ */
+static void
+bfa_itnim_sm_sler(struct bfa_itnim_s *itnim, enum bfa_itnim_event event)
+{
+ bfa_trc(itnim->bfa, itnim->rport->rport_tag);
+ bfa_trc(itnim->bfa, event);
+
+ switch (event) {
+ case BFA_ITNIM_SM_OFFLINE:
+ bfa_sm_set_state(itnim, bfa_itnim_sm_cleanup_offline);
+ bfa_itnim_cleanup(itnim);
+ break;
+
+ case BFA_ITNIM_SM_DELETE:
+ bfa_sm_set_state(itnim, bfa_itnim_sm_cleanup_delete);
+ bfa_itnim_cleanup(itnim);
+ bfa_itnim_iotov_delete(itnim);
+ break;
+
+ case BFA_ITNIM_SM_HWFAIL:
+ bfa_sm_set_state(itnim, bfa_itnim_sm_iocdisable);
+ bfa_itnim_iocdisable_cleanup(itnim);
+ break;
+
+ default:
+ bfa_sm_fault(itnim->bfa, event);
+ }
+}
+
+/*
+ * Going offline. Waiting for active IO cleanup.
+ */
+static void
+bfa_itnim_sm_cleanup_offline(struct bfa_itnim_s *itnim,
+ enum bfa_itnim_event event)
+{
+ bfa_trc(itnim->bfa, itnim->rport->rport_tag);
+ bfa_trc(itnim->bfa, event);
+
+ switch (event) {
+ case BFA_ITNIM_SM_CLEANUP:
+ if (bfa_itnim_send_fwdelete(itnim))
+ bfa_sm_set_state(itnim, bfa_itnim_sm_fwdelete);
+ else
+ bfa_sm_set_state(itnim, bfa_itnim_sm_fwdelete_qfull);
+ break;
+
+ case BFA_ITNIM_SM_DELETE:
+ bfa_sm_set_state(itnim, bfa_itnim_sm_cleanup_delete);
+ bfa_itnim_iotov_delete(itnim);
+ break;
+
+ case BFA_ITNIM_SM_HWFAIL:
+ bfa_sm_set_state(itnim, bfa_itnim_sm_iocdisable);
+ bfa_itnim_iocdisable_cleanup(itnim);
+ bfa_itnim_offline_cb(itnim);
+ break;
+
+ case BFA_ITNIM_SM_SLER:
+ break;
+
+ default:
+ bfa_sm_fault(itnim->bfa, event);
+ }
+}
+
+/*
+ * Deleting itnim. Waiting for active IO cleanup.
+ */
+static void
+bfa_itnim_sm_cleanup_delete(struct bfa_itnim_s *itnim,
+ enum bfa_itnim_event event)
+{
+ bfa_trc(itnim->bfa, itnim->rport->rport_tag);
+ bfa_trc(itnim->bfa, event);
+
+ switch (event) {
+ case BFA_ITNIM_SM_CLEANUP:
+ if (bfa_itnim_send_fwdelete(itnim))
+ bfa_sm_set_state(itnim, bfa_itnim_sm_deleting);
+ else
+ bfa_sm_set_state(itnim, bfa_itnim_sm_deleting_qfull);
+ break;
+
+ case BFA_ITNIM_SM_HWFAIL:
+ bfa_sm_set_state(itnim, bfa_itnim_sm_iocdisable);
+ bfa_itnim_iocdisable_cleanup(itnim);
+ break;
+
+ default:
+ bfa_sm_fault(itnim->bfa, event);
+ }
+}
+
+/*
+ * Rport offline. Fimrware itnim is being deleted - awaiting f/w response.
+ */
+static void
+bfa_itnim_sm_fwdelete(struct bfa_itnim_s *itnim, enum bfa_itnim_event event)
+{
+ bfa_trc(itnim->bfa, itnim->rport->rport_tag);
+ bfa_trc(itnim->bfa, event);
+
+ switch (event) {
+ case BFA_ITNIM_SM_FWRSP:
+ bfa_sm_set_state(itnim, bfa_itnim_sm_offline);
+ bfa_itnim_offline_cb(itnim);
+ break;
+
+ case BFA_ITNIM_SM_DELETE:
+ bfa_sm_set_state(itnim, bfa_itnim_sm_deleting);
+ break;
+
+ case BFA_ITNIM_SM_HWFAIL:
+ bfa_sm_set_state(itnim, bfa_itnim_sm_iocdisable);
+ bfa_itnim_offline_cb(itnim);
+ break;
+
+ default:
+ bfa_sm_fault(itnim->bfa, event);
+ }
+}
+
+static void
+bfa_itnim_sm_fwdelete_qfull(struct bfa_itnim_s *itnim,
+ enum bfa_itnim_event event)
+{
+ bfa_trc(itnim->bfa, itnim->rport->rport_tag);
+ bfa_trc(itnim->bfa, event);
+
+ switch (event) {
+ case BFA_ITNIM_SM_QRESUME:
+ bfa_sm_set_state(itnim, bfa_itnim_sm_fwdelete);
+ bfa_itnim_send_fwdelete(itnim);
+ break;
+
+ case BFA_ITNIM_SM_DELETE:
+ bfa_sm_set_state(itnim, bfa_itnim_sm_deleting_qfull);
+ break;
+
+ case BFA_ITNIM_SM_HWFAIL:
+ bfa_sm_set_state(itnim, bfa_itnim_sm_iocdisable);
+ bfa_reqq_wcancel(&itnim->reqq_wait);
+ bfa_itnim_offline_cb(itnim);
+ break;
+
+ default:
+ bfa_sm_fault(itnim->bfa, event);
+ }
+}
+
+/*
+ * Offline state.
+ */
+static void
+bfa_itnim_sm_offline(struct bfa_itnim_s *itnim, enum bfa_itnim_event event)
+{
+ bfa_trc(itnim->bfa, itnim->rport->rport_tag);
+ bfa_trc(itnim->bfa, event);
+
+ switch (event) {
+ case BFA_ITNIM_SM_DELETE:
+ bfa_sm_set_state(itnim, bfa_itnim_sm_uninit);
+ bfa_itnim_iotov_delete(itnim);
+ bfa_fcpim_delitn(itnim);
+ break;
+
+ case BFA_ITNIM_SM_ONLINE:
+ if (bfa_itnim_send_fwcreate(itnim))
+ bfa_sm_set_state(itnim, bfa_itnim_sm_fwcreate);
+ else
+ bfa_sm_set_state(itnim, bfa_itnim_sm_fwcreate_qfull);
+ break;
+
+ case BFA_ITNIM_SM_HWFAIL:
+ bfa_sm_set_state(itnim, bfa_itnim_sm_iocdisable);
+ break;
+
+ default:
+ bfa_sm_fault(itnim->bfa, event);
+ }
+}
+
+static void
+bfa_itnim_sm_iocdisable(struct bfa_itnim_s *itnim,
+ enum bfa_itnim_event event)
+{
+ bfa_trc(itnim->bfa, itnim->rport->rport_tag);
+ bfa_trc(itnim->bfa, event);
+
+ switch (event) {
+ case BFA_ITNIM_SM_DELETE:
+ bfa_sm_set_state(itnim, bfa_itnim_sm_uninit);
+ bfa_itnim_iotov_delete(itnim);
+ bfa_fcpim_delitn(itnim);
+ break;
+
+ case BFA_ITNIM_SM_OFFLINE:
+ bfa_itnim_offline_cb(itnim);
+ break;
+
+ case BFA_ITNIM_SM_ONLINE:
+ if (bfa_itnim_send_fwcreate(itnim))
+ bfa_sm_set_state(itnim, bfa_itnim_sm_fwcreate);
+ else
+ bfa_sm_set_state(itnim, bfa_itnim_sm_fwcreate_qfull);
+ break;
+
+ case BFA_ITNIM_SM_HWFAIL:
+ break;
+
+ default:
+ bfa_sm_fault(itnim->bfa, event);
+ }
+}
+
+/*
+ * Itnim is deleted, waiting for firmware response to delete.
+ */
+static void
+bfa_itnim_sm_deleting(struct bfa_itnim_s *itnim, enum bfa_itnim_event event)
+{
+ bfa_trc(itnim->bfa, itnim->rport->rport_tag);
+ bfa_trc(itnim->bfa, event);
+
+ switch (event) {
+ case BFA_ITNIM_SM_FWRSP:
+ case BFA_ITNIM_SM_HWFAIL:
+ bfa_sm_set_state(itnim, bfa_itnim_sm_uninit);
+ bfa_fcpim_delitn(itnim);
+ break;
+
+ default:
+ bfa_sm_fault(itnim->bfa, event);
+ }
+}
+
+static void
+bfa_itnim_sm_deleting_qfull(struct bfa_itnim_s *itnim,
+ enum bfa_itnim_event event)
+{
+ bfa_trc(itnim->bfa, itnim->rport->rport_tag);
+ bfa_trc(itnim->bfa, event);
+
+ switch (event) {
+ case BFA_ITNIM_SM_QRESUME:
+ bfa_sm_set_state(itnim, bfa_itnim_sm_deleting);
+ bfa_itnim_send_fwdelete(itnim);
+ break;
+
+ case BFA_ITNIM_SM_HWFAIL:
+ bfa_sm_set_state(itnim, bfa_itnim_sm_uninit);
+ bfa_reqq_wcancel(&itnim->reqq_wait);
+ bfa_fcpim_delitn(itnim);
+ break;
+
+ default:
+ bfa_sm_fault(itnim->bfa, event);
+ }
+}
+
+/*
+ * Initiate cleanup of all IOs on an IOC failure.
+ */
+static void
+bfa_itnim_iocdisable_cleanup(struct bfa_itnim_s *itnim)
+{
+ struct bfa_tskim_s *tskim;
+ struct bfa_ioim_s *ioim;
+ struct list_head *qe, *qen;
+
+ list_for_each_safe(qe, qen, &itnim->tsk_q) {
+ tskim = (struct bfa_tskim_s *) qe;
+ bfa_tskim_iocdisable(tskim);
+ }
+
+ list_for_each_safe(qe, qen, &itnim->io_q) {
+ ioim = (struct bfa_ioim_s *) qe;
+ bfa_ioim_iocdisable(ioim);
+ }
+
+ /*
+ * For IO request in pending queue, we pretend an early timeout.
+ */
+ list_for_each_safe(qe, qen, &itnim->pending_q) {
+ ioim = (struct bfa_ioim_s *) qe;
+ bfa_ioim_tov(ioim);
+ }
+
+ list_for_each_safe(qe, qen, &itnim->io_cleanup_q) {
+ ioim = (struct bfa_ioim_s *) qe;
+ bfa_ioim_iocdisable(ioim);
+ }
+}
+
+/*
+ * IO cleanup completion
+ */
+static void
+bfa_itnim_cleanp_comp(void *itnim_cbarg)
+{
+ struct bfa_itnim_s *itnim = itnim_cbarg;
+
+ bfa_stats(itnim, cleanup_comps);
+ bfa_sm_send_event(itnim, BFA_ITNIM_SM_CLEANUP);
+}
+
+/*
+ * Initiate cleanup of all IOs.
+ */
+static void
+bfa_itnim_cleanup(struct bfa_itnim_s *itnim)
+{
+ struct bfa_ioim_s *ioim;
+ struct bfa_tskim_s *tskim;
+ struct list_head *qe, *qen;
+
+ bfa_wc_init(&itnim->wc, bfa_itnim_cleanp_comp, itnim);
+
+ list_for_each_safe(qe, qen, &itnim->io_q) {
+ ioim = (struct bfa_ioim_s *) qe;
+
+ /*
+ * Move IO to a cleanup queue from active queue so that a later
+ * TM will not pickup this IO.
+ */
+ list_del(&ioim->qe);
+ list_add_tail(&ioim->qe, &itnim->io_cleanup_q);
+
+ bfa_wc_up(&itnim->wc);
+ bfa_ioim_cleanup(ioim);
+ }
+
+ list_for_each_safe(qe, qen, &itnim->tsk_q) {
+ tskim = (struct bfa_tskim_s *) qe;
+ bfa_wc_up(&itnim->wc);
+ bfa_tskim_cleanup(tskim);
+ }
+
+ bfa_wc_wait(&itnim->wc);
+}
+
+static void
+__bfa_cb_itnim_online(void *cbarg, bfa_boolean_t complete)
+{
+ struct bfa_itnim_s *itnim = cbarg;
+
+ if (complete)
+ bfa_cb_itnim_online(itnim->ditn);
+}
+
+static void
+__bfa_cb_itnim_offline(void *cbarg, bfa_boolean_t complete)
+{
+ struct bfa_itnim_s *itnim = cbarg;
+
+ if (complete)
+ bfa_cb_itnim_offline(itnim->ditn);
+}
+
+static void
+__bfa_cb_itnim_sler(void *cbarg, bfa_boolean_t complete)
+{
+ struct bfa_itnim_s *itnim = cbarg;
+
+ if (complete)
+ bfa_cb_itnim_sler(itnim->ditn);
+}
+
+/*
+ * Call to resume any I/O requests waiting for room in request queue.
+ */
+static void
+bfa_itnim_qresume(void *cbarg)
+{
+ struct bfa_itnim_s *itnim = cbarg;
+
+ bfa_sm_send_event(itnim, BFA_ITNIM_SM_QRESUME);
+}
+
+/*
+ * bfa_itnim_public
+ */
+
+void
+bfa_itnim_iodone(struct bfa_itnim_s *itnim)
+{
+ bfa_wc_down(&itnim->wc);
+}
+
+void
+bfa_itnim_tskdone(struct bfa_itnim_s *itnim)
+{
+ bfa_wc_down(&itnim->wc);
+}
+
+void
+bfa_itnim_meminfo(struct bfa_iocfc_cfg_s *cfg, u32 *km_len)
+{
+ /*
+ * ITN memory
+ */
+ *km_len += cfg->fwcfg.num_rports * sizeof(struct bfa_itnim_s);
+}
+
+void
+bfa_itnim_attach(struct bfa_fcpim_s *fcpim)
+{
+ struct bfa_s *bfa = fcpim->bfa;
+ struct bfa_fcp_mod_s *fcp = fcpim->fcp;
+ struct bfa_itnim_s *itnim;
+ int i, j;
+
+ INIT_LIST_HEAD(&fcpim->itnim_q);
+
+ itnim = (struct bfa_itnim_s *) bfa_mem_kva_curp(fcp);
+ fcpim->itnim_arr = itnim;
+
+ for (i = 0; i < fcpim->num_itnims; i++, itnim++) {
+ memset(itnim, 0, sizeof(struct bfa_itnim_s));
+ itnim->bfa = bfa;
+ itnim->fcpim = fcpim;
+ itnim->reqq = BFA_REQQ_QOS_LO;
+ itnim->rport = BFA_RPORT_FROM_TAG(bfa, i);
+ itnim->iotov_active = BFA_FALSE;
+ bfa_reqq_winit(&itnim->reqq_wait, bfa_itnim_qresume, itnim);
+
+ INIT_LIST_HEAD(&itnim->io_q);
+ INIT_LIST_HEAD(&itnim->io_cleanup_q);
+ INIT_LIST_HEAD(&itnim->pending_q);
+ INIT_LIST_HEAD(&itnim->tsk_q);
+ INIT_LIST_HEAD(&itnim->delay_comp_q);
+ for (j = 0; j < BFA_IOBUCKET_MAX; j++)
+ itnim->ioprofile.io_latency.min[j] = ~0;
+ bfa_sm_set_state(itnim, bfa_itnim_sm_uninit);
+ }
+
+ bfa_mem_kva_curp(fcp) = (u8 *) itnim;
+}
+
+void
+bfa_itnim_iocdisable(struct bfa_itnim_s *itnim)
+{
+ bfa_stats(itnim, ioc_disabled);
+ bfa_sm_send_event(itnim, BFA_ITNIM_SM_HWFAIL);
+}
+
+static bfa_boolean_t
+bfa_itnim_send_fwcreate(struct bfa_itnim_s *itnim)
+{
+ struct bfi_itn_create_req_s *m;
+
+ itnim->msg_no++;
+
+ /*
+ * check for room in queue to send request now
+ */
+ m = bfa_reqq_next(itnim->bfa, itnim->reqq);
+ if (!m) {
+ bfa_reqq_wait(itnim->bfa, itnim->reqq, &itnim->reqq_wait);
+ return BFA_FALSE;
+ }
+
+ bfi_h2i_set(m->mh, BFI_MC_ITN, BFI_ITN_H2I_CREATE_REQ,
+ bfa_fn_lpu(itnim->bfa));
+ m->fw_handle = itnim->rport->fw_handle;
+ m->class = FC_CLASS_3;
+ m->seq_rec = itnim->seq_rec;
+ m->msg_no = itnim->msg_no;
+ bfa_stats(itnim, fw_create);
+
+ /*
+ * queue I/O message to firmware
+ */
+ bfa_reqq_produce(itnim->bfa, itnim->reqq, m->mh);
+ return BFA_TRUE;
+}
+
+static bfa_boolean_t
+bfa_itnim_send_fwdelete(struct bfa_itnim_s *itnim)
+{
+ struct bfi_itn_delete_req_s *m;
+
+ /*
+ * check for room in queue to send request now
+ */
+ m = bfa_reqq_next(itnim->bfa, itnim->reqq);
+ if (!m) {
+ bfa_reqq_wait(itnim->bfa, itnim->reqq, &itnim->reqq_wait);
+ return BFA_FALSE;
+ }
+
+ bfi_h2i_set(m->mh, BFI_MC_ITN, BFI_ITN_H2I_DELETE_REQ,
+ bfa_fn_lpu(itnim->bfa));
+ m->fw_handle = itnim->rport->fw_handle;
+ bfa_stats(itnim, fw_delete);
+
+ /*
+ * queue I/O message to firmware
+ */
+ bfa_reqq_produce(itnim->bfa, itnim->reqq, m->mh);
+ return BFA_TRUE;
+}
+
+/*
+ * Cleanup all pending failed inflight requests.
+ */
+static void
+bfa_itnim_delayed_comp(struct bfa_itnim_s *itnim, bfa_boolean_t iotov)
+{
+ struct bfa_ioim_s *ioim;
+ struct list_head *qe, *qen;
+
+ list_for_each_safe(qe, qen, &itnim->delay_comp_q) {
+ ioim = (struct bfa_ioim_s *)qe;
+ bfa_ioim_delayed_comp(ioim, iotov);
+ }
+}
+
+/*
+ * Start all pending IO requests.
+ */
+static void
+bfa_itnim_iotov_online(struct bfa_itnim_s *itnim)
+{
+ struct bfa_ioim_s *ioim;
+
+ bfa_itnim_iotov_stop(itnim);
+
+ /*
+ * Abort all inflight IO requests in the queue
+ */
+ bfa_itnim_delayed_comp(itnim, BFA_FALSE);
+
+ /*
+ * Start all pending IO requests.
+ */
+ while (!list_empty(&itnim->pending_q)) {
+ bfa_q_deq(&itnim->pending_q, &ioim);
+ list_add_tail(&ioim->qe, &itnim->io_q);
+ bfa_ioim_start(ioim);
+ }
+}
+
+/*
+ * Fail all pending IO requests
+ */
+static void
+bfa_itnim_iotov_cleanup(struct bfa_itnim_s *itnim)
+{
+ struct bfa_ioim_s *ioim;
+
+ /*
+ * Fail all inflight IO requests in the queue
+ */
+ bfa_itnim_delayed_comp(itnim, BFA_TRUE);
+
+ /*
+ * Fail any pending IO requests.
+ */
+ while (!list_empty(&itnim->pending_q)) {
+ bfa_q_deq(&itnim->pending_q, &ioim);
+ list_add_tail(&ioim->qe, &ioim->fcpim->ioim_comp_q);
+ bfa_ioim_tov(ioim);
+ }
+}
+
+/*
+ * IO TOV timer callback. Fail any pending IO requests.
+ */
+static void
+bfa_itnim_iotov(void *itnim_arg)
+{
+ struct bfa_itnim_s *itnim = itnim_arg;
+
+ itnim->iotov_active = BFA_FALSE;
+
+ bfa_cb_itnim_tov_begin(itnim->ditn);
+ bfa_itnim_iotov_cleanup(itnim);
+ bfa_cb_itnim_tov(itnim->ditn);
+}
+
+/*
+ * Start IO TOV timer for failing back pending IO requests in offline state.
+ */
+static void
+bfa_itnim_iotov_start(struct bfa_itnim_s *itnim)
+{
+ if (itnim->fcpim->path_tov > 0) {
+
+ itnim->iotov_active = BFA_TRUE;
+ WARN_ON(!bfa_itnim_hold_io(itnim));
+ bfa_timer_start(itnim->bfa, &itnim->timer,
+ bfa_itnim_iotov, itnim, itnim->fcpim->path_tov);
+ }
+}
+
+/*
+ * Stop IO TOV timer.
+ */
+static void
+bfa_itnim_iotov_stop(struct bfa_itnim_s *itnim)
+{
+ if (itnim->iotov_active) {
+ itnim->iotov_active = BFA_FALSE;
+ bfa_timer_stop(&itnim->timer);
+ }
+}
+
+/*
+ * Stop IO TOV timer.
+ */
+static void
+bfa_itnim_iotov_delete(struct bfa_itnim_s *itnim)
+{
+ bfa_boolean_t pathtov_active = BFA_FALSE;
+
+ if (itnim->iotov_active)
+ pathtov_active = BFA_TRUE;
+
+ bfa_itnim_iotov_stop(itnim);
+ if (pathtov_active)
+ bfa_cb_itnim_tov_begin(itnim->ditn);
+ bfa_itnim_iotov_cleanup(itnim);
+ if (pathtov_active)
+ bfa_cb_itnim_tov(itnim->ditn);
+}
+
+static void
+bfa_itnim_update_del_itn_stats(struct bfa_itnim_s *itnim)
+{
+ struct bfa_fcpim_s *fcpim = BFA_FCPIM(itnim->bfa);
+ fcpim->del_itn_stats.del_itn_iocomp_aborted +=
+ itnim->stats.iocomp_aborted;
+ fcpim->del_itn_stats.del_itn_iocomp_timedout +=
+ itnim->stats.iocomp_timedout;
+ fcpim->del_itn_stats.del_itn_iocom_sqer_needed +=
+ itnim->stats.iocom_sqer_needed;
+ fcpim->del_itn_stats.del_itn_iocom_res_free +=
+ itnim->stats.iocom_res_free;
+ fcpim->del_itn_stats.del_itn_iocom_hostabrts +=
+ itnim->stats.iocom_hostabrts;
+ fcpim->del_itn_stats.del_itn_total_ios += itnim->stats.total_ios;
+ fcpim->del_itn_stats.del_io_iocdowns += itnim->stats.io_iocdowns;
+ fcpim->del_itn_stats.del_tm_iocdowns += itnim->stats.tm_iocdowns;
+}
+
+/*
+ * bfa_itnim_public
+ */
+
+/*
+ * Itnim interrupt processing.
+ */
+void
+bfa_itnim_isr(struct bfa_s *bfa, struct bfi_msg_s *m)
+{
+ struct bfa_fcpim_s *fcpim = BFA_FCPIM(bfa);
+ union bfi_itn_i2h_msg_u msg;
+ struct bfa_itnim_s *itnim;
+
+ bfa_trc(bfa, m->mhdr.msg_id);
+
+ msg.msg = m;
+
+ switch (m->mhdr.msg_id) {
+ case BFI_ITN_I2H_CREATE_RSP:
+ itnim = BFA_ITNIM_FROM_TAG(fcpim,
+ msg.create_rsp->bfa_handle);
+ WARN_ON(msg.create_rsp->status != BFA_STATUS_OK);
+ bfa_stats(itnim, create_comps);
+ bfa_sm_send_event(itnim, BFA_ITNIM_SM_FWRSP);
+ break;
+
+ case BFI_ITN_I2H_DELETE_RSP:
+ itnim = BFA_ITNIM_FROM_TAG(fcpim,
+ msg.delete_rsp->bfa_handle);
+ WARN_ON(msg.delete_rsp->status != BFA_STATUS_OK);
+ bfa_stats(itnim, delete_comps);
+ bfa_sm_send_event(itnim, BFA_ITNIM_SM_FWRSP);
+ break;
+
+ case BFI_ITN_I2H_SLER_EVENT:
+ itnim = BFA_ITNIM_FROM_TAG(fcpim,
+ msg.sler_event->bfa_handle);
+ bfa_stats(itnim, sler_events);
+ bfa_sm_send_event(itnim, BFA_ITNIM_SM_SLER);
+ break;
+
+ default:
+ bfa_trc(bfa, m->mhdr.msg_id);
+ WARN_ON(1);
+ }
+}
+
+/*
+ * bfa_itnim_api
+ */
+
+struct bfa_itnim_s *
+bfa_itnim_create(struct bfa_s *bfa, struct bfa_rport_s *rport, void *ditn)
+{
+ struct bfa_fcpim_s *fcpim = BFA_FCPIM(bfa);
+ struct bfa_itnim_s *itnim;
+
+ bfa_itn_create(bfa, rport, bfa_itnim_isr);
+
+ itnim = BFA_ITNIM_FROM_TAG(fcpim, rport->rport_tag);
+ WARN_ON(itnim->rport != rport);
+
+ itnim->ditn = ditn;
+
+ bfa_stats(itnim, creates);
+ bfa_sm_send_event(itnim, BFA_ITNIM_SM_CREATE);
+
+ return itnim;
+}
+
+void
+bfa_itnim_delete(struct bfa_itnim_s *itnim)
+{
+ bfa_stats(itnim, deletes);
+ bfa_sm_send_event(itnim, BFA_ITNIM_SM_DELETE);
+}
+
+void
+bfa_itnim_online(struct bfa_itnim_s *itnim, bfa_boolean_t seq_rec)
+{
+ itnim->seq_rec = seq_rec;
+ bfa_stats(itnim, onlines);
+ bfa_sm_send_event(itnim, BFA_ITNIM_SM_ONLINE);
+}
+
+void
+bfa_itnim_offline(struct bfa_itnim_s *itnim)
+{
+ bfa_stats(itnim, offlines);
+ bfa_sm_send_event(itnim, BFA_ITNIM_SM_OFFLINE);
+}
+
+/*
+ * Return true if itnim is considered offline for holding off IO request.
+ * IO is not held if itnim is being deleted.
+ */
+bfa_boolean_t
+bfa_itnim_hold_io(struct bfa_itnim_s *itnim)
+{
+ return itnim->fcpim->path_tov && itnim->iotov_active &&
+ (bfa_sm_cmp_state(itnim, bfa_itnim_sm_fwcreate) ||
+ bfa_sm_cmp_state(itnim, bfa_itnim_sm_sler) ||
+ bfa_sm_cmp_state(itnim, bfa_itnim_sm_cleanup_offline) ||
+ bfa_sm_cmp_state(itnim, bfa_itnim_sm_fwdelete) ||
+ bfa_sm_cmp_state(itnim, bfa_itnim_sm_offline) ||
+ bfa_sm_cmp_state(itnim, bfa_itnim_sm_iocdisable));
+}
+
+#define bfa_io_lat_clock_res_div HZ
+#define bfa_io_lat_clock_res_mul 1000
+bfa_status_t
+bfa_itnim_get_ioprofile(struct bfa_itnim_s *itnim,
+ struct bfa_itnim_ioprofile_s *ioprofile)
+{
+ struct bfa_fcpim_s *fcpim;
+
+ if (!itnim)
+ return BFA_STATUS_NO_FCPIM_NEXUS;
+
+ fcpim = BFA_FCPIM(itnim->bfa);
+
+ if (!fcpim->io_profile)
+ return BFA_STATUS_IOPROFILE_OFF;
+
+ itnim->ioprofile.index = BFA_IOBUCKET_MAX;
+ itnim->ioprofile.io_profile_start_time =
+ bfa_io_profile_start_time(itnim->bfa);
+ itnim->ioprofile.clock_res_mul = bfa_io_lat_clock_res_mul;
+ itnim->ioprofile.clock_res_div = bfa_io_lat_clock_res_div;
+ *ioprofile = itnim->ioprofile;
+
+ return BFA_STATUS_OK;
+}
+
+void
+bfa_itnim_clear_stats(struct bfa_itnim_s *itnim)
+{
+ int j;
+
+ if (!itnim)
+ return;
+
+ memset(&itnim->stats, 0, sizeof(itnim->stats));
+ memset(&itnim->ioprofile, 0, sizeof(itnim->ioprofile));
+ for (j = 0; j < BFA_IOBUCKET_MAX; j++)
+ itnim->ioprofile.io_latency.min[j] = ~0;
+}
+
+/*
+ * BFA IO module state machine functions
+ */
+
+/*
+ * IO is not started (unallocated).
+ */
+static void
+bfa_ioim_sm_uninit(struct bfa_ioim_s *ioim, enum bfa_ioim_event event)
+{
+ switch (event) {
+ case BFA_IOIM_SM_START:
+ if (!bfa_itnim_is_online(ioim->itnim)) {
+ if (!bfa_itnim_hold_io(ioim->itnim)) {
+ bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
+ list_del(&ioim->qe);
+ list_add_tail(&ioim->qe,
+ &ioim->fcpim->ioim_comp_q);
+ bfa_cb_queue(ioim->bfa, &ioim->hcb_qe,
+ __bfa_cb_ioim_pathtov, ioim);
+ } else {
+ list_del(&ioim->qe);
+ list_add_tail(&ioim->qe,
+ &ioim->itnim->pending_q);
+ }
+ break;
+ }
+
+ if (ioim->nsges > BFI_SGE_INLINE) {
+ if (!bfa_ioim_sgpg_alloc(ioim)) {
+ bfa_sm_set_state(ioim, bfa_ioim_sm_sgalloc);
+ return;
+ }
+ }
+
+ if (!bfa_ioim_send_ioreq(ioim)) {
+ bfa_sm_set_state(ioim, bfa_ioim_sm_qfull);
+ break;
+ }
+
+ bfa_sm_set_state(ioim, bfa_ioim_sm_active);
+ break;
+
+ case BFA_IOIM_SM_IOTOV:
+ bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
+ bfa_ioim_move_to_comp_q(ioim);
+ bfa_cb_queue(ioim->bfa, &ioim->hcb_qe,
+ __bfa_cb_ioim_pathtov, ioim);
+ break;
+
+ case BFA_IOIM_SM_ABORT:
+ /*
+ * IO in pending queue can get abort requests. Complete abort
+ * requests immediately.
+ */
+ bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
+ WARN_ON(!bfa_q_is_on_q(&ioim->itnim->pending_q, ioim));
+ bfa_cb_queue(ioim->bfa, &ioim->hcb_qe,
+ __bfa_cb_ioim_abort, ioim);
+ break;
+
+ default:
+ bfa_sm_fault(ioim->bfa, event);
+ }
+}
+
+/*
+ * IO is waiting for SG pages.
+ */
+static void
+bfa_ioim_sm_sgalloc(struct bfa_ioim_s *ioim, enum bfa_ioim_event event)
+{
+ bfa_trc(ioim->bfa, ioim->iotag);
+ bfa_trc(ioim->bfa, event);
+
+ switch (event) {
+ case BFA_IOIM_SM_SGALLOCED:
+ if (!bfa_ioim_send_ioreq(ioim)) {
+ bfa_sm_set_state(ioim, bfa_ioim_sm_qfull);
+ break;
+ }
+ bfa_sm_set_state(ioim, bfa_ioim_sm_active);
+ break;
+
+ case BFA_IOIM_SM_CLEANUP:
+ bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
+ bfa_sgpg_wcancel(ioim->bfa, &ioim->iosp->sgpg_wqe);
+ bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, __bfa_cb_ioim_failed,
+ ioim);
+ bfa_ioim_notify_cleanup(ioim);
+ break;
+
+ case BFA_IOIM_SM_ABORT:
+ bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
+ bfa_sgpg_wcancel(ioim->bfa, &ioim->iosp->sgpg_wqe);
+ bfa_ioim_move_to_comp_q(ioim);
+ bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, __bfa_cb_ioim_abort,
+ ioim);
+ break;
+
+ case BFA_IOIM_SM_HWFAIL:
+ bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
+ bfa_sgpg_wcancel(ioim->bfa, &ioim->iosp->sgpg_wqe);
+ bfa_ioim_move_to_comp_q(ioim);
+ bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, __bfa_cb_ioim_failed,
+ ioim);
+ break;
+
+ default:
+ bfa_sm_fault(ioim->bfa, event);
+ }
+}
+
+/*
+ * IO is active.
+ */
+static void
+bfa_ioim_sm_active(struct bfa_ioim_s *ioim, enum bfa_ioim_event event)
+{
+ switch (event) {
+ case BFA_IOIM_SM_COMP_GOOD:
+ bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
+ bfa_ioim_move_to_comp_q(ioim);
+ bfa_cb_queue(ioim->bfa, &ioim->hcb_qe,
+ __bfa_cb_ioim_good_comp, ioim);
+ break;
+
+ case BFA_IOIM_SM_COMP:
+ bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
+ bfa_ioim_move_to_comp_q(ioim);
+ bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, __bfa_cb_ioim_comp,
+ ioim);
+ break;
+
+ case BFA_IOIM_SM_DONE:
+ bfa_sm_set_state(ioim, bfa_ioim_sm_hcb_free);
+ bfa_ioim_move_to_comp_q(ioim);
+ bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, __bfa_cb_ioim_comp,
+ ioim);
+ break;
+
+ case BFA_IOIM_SM_ABORT:
+ ioim->iosp->abort_explicit = BFA_TRUE;
+ ioim->io_cbfn = __bfa_cb_ioim_abort;
+
+ if (bfa_ioim_send_abort(ioim))
+ bfa_sm_set_state(ioim, bfa_ioim_sm_abort);
+ else {
+ bfa_sm_set_state(ioim, bfa_ioim_sm_abort_qfull);
+ bfa_stats(ioim->itnim, qwait);
+ bfa_reqq_wait(ioim->bfa, ioim->reqq,
+ &ioim->iosp->reqq_wait);
+ }
+ break;
+
+ case BFA_IOIM_SM_CLEANUP:
+ ioim->iosp->abort_explicit = BFA_FALSE;
+ ioim->io_cbfn = __bfa_cb_ioim_failed;
+
+ if (bfa_ioim_send_abort(ioim))
+ bfa_sm_set_state(ioim, bfa_ioim_sm_cleanup);
+ else {
+ bfa_sm_set_state(ioim, bfa_ioim_sm_cleanup_qfull);
+ bfa_stats(ioim->itnim, qwait);
+ bfa_reqq_wait(ioim->bfa, ioim->reqq,
+ &ioim->iosp->reqq_wait);
+ }
+ break;
+
+ case BFA_IOIM_SM_HWFAIL:
+ bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
+ bfa_ioim_move_to_comp_q(ioim);
+ bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, __bfa_cb_ioim_failed,
+ ioim);
+ break;
+
+ case BFA_IOIM_SM_SQRETRY:
+ if (bfa_ioim_maxretry_reached(ioim)) {
+ /* max retry reached, free IO */
+ bfa_sm_set_state(ioim, bfa_ioim_sm_hcb_free);
+ bfa_ioim_move_to_comp_q(ioim);
+ bfa_cb_queue(ioim->bfa, &ioim->hcb_qe,
+ __bfa_cb_ioim_failed, ioim);
+ break;
+ }
+ /* waiting for IO tag resource free */
+ bfa_sm_set_state(ioim, bfa_ioim_sm_cmnd_retry);
+ break;
+
+ default:
+ bfa_sm_fault(ioim->bfa, event);
+ }
+}
+
+/*
+ * IO is retried with new tag.
+ */
+static void
+bfa_ioim_sm_cmnd_retry(struct bfa_ioim_s *ioim, enum bfa_ioim_event event)
+{
+ switch (event) {
+ case BFA_IOIM_SM_FREE:
+ /* abts and rrq done. Now retry the IO with new tag */
+ bfa_ioim_update_iotag(ioim);
+ if (!bfa_ioim_send_ioreq(ioim)) {
+ bfa_sm_set_state(ioim, bfa_ioim_sm_qfull);
+ break;
+ }
+ bfa_sm_set_state(ioim, bfa_ioim_sm_active);
+ break;
+
+ case BFA_IOIM_SM_CLEANUP:
+ ioim->iosp->abort_explicit = BFA_FALSE;
+ ioim->io_cbfn = __bfa_cb_ioim_failed;
+
+ if (bfa_ioim_send_abort(ioim))
+ bfa_sm_set_state(ioim, bfa_ioim_sm_cleanup);
+ else {
+ bfa_sm_set_state(ioim, bfa_ioim_sm_cleanup_qfull);
+ bfa_stats(ioim->itnim, qwait);
+ bfa_reqq_wait(ioim->bfa, ioim->reqq,
+ &ioim->iosp->reqq_wait);
+ }
+ break;
+
+ case BFA_IOIM_SM_HWFAIL:
+ bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
+ bfa_ioim_move_to_comp_q(ioim);
+ bfa_cb_queue(ioim->bfa, &ioim->hcb_qe,
+ __bfa_cb_ioim_failed, ioim);
+ break;
+
+ case BFA_IOIM_SM_ABORT:
+ /* in this state IO abort is done.
+ * Waiting for IO tag resource free.
+ */
+ bfa_sm_set_state(ioim, bfa_ioim_sm_hcb_free);
+ bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, __bfa_cb_ioim_abort,
+ ioim);
+ break;
+
+ default:
+ bfa_sm_fault(ioim->bfa, event);
+ }
+}
+
+/*
+ * IO is being aborted, waiting for completion from firmware.
+ */
+static void
+bfa_ioim_sm_abort(struct bfa_ioim_s *ioim, enum bfa_ioim_event event)
+{
+ bfa_trc(ioim->bfa, ioim->iotag);
+ bfa_trc(ioim->bfa, event);
+
+ switch (event) {
+ case BFA_IOIM_SM_COMP_GOOD:
+ case BFA_IOIM_SM_COMP:
+ case BFA_IOIM_SM_DONE:
+ case BFA_IOIM_SM_FREE:
+ break;
+
+ case BFA_IOIM_SM_ABORT_DONE:
+ bfa_sm_set_state(ioim, bfa_ioim_sm_hcb_free);
+ bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, __bfa_cb_ioim_abort,
+ ioim);
+ break;
+
+ case BFA_IOIM_SM_ABORT_COMP:
+ bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
+ bfa_ioim_move_to_comp_q(ioim);
+ bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, __bfa_cb_ioim_abort,
+ ioim);
+ break;
+
+ case BFA_IOIM_SM_COMP_UTAG:
+ bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
+ bfa_ioim_move_to_comp_q(ioim);
+ bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, __bfa_cb_ioim_abort,
+ ioim);
+ break;
+
+ case BFA_IOIM_SM_CLEANUP:
+ WARN_ON(ioim->iosp->abort_explicit != BFA_TRUE);
+ ioim->iosp->abort_explicit = BFA_FALSE;
+
+ if (bfa_ioim_send_abort(ioim))
+ bfa_sm_set_state(ioim, bfa_ioim_sm_cleanup);
+ else {
+ bfa_sm_set_state(ioim, bfa_ioim_sm_cleanup_qfull);
+ bfa_stats(ioim->itnim, qwait);
+ bfa_reqq_wait(ioim->bfa, ioim->reqq,
+ &ioim->iosp->reqq_wait);
+ }
+ break;
+
+ case BFA_IOIM_SM_HWFAIL:
+ bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
+ bfa_ioim_move_to_comp_q(ioim);
+ bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, __bfa_cb_ioim_failed,
+ ioim);
+ break;
+
+ default:
+ bfa_sm_fault(ioim->bfa, event);
+ }
+}
+
+/*
+ * IO is being cleaned up (implicit abort), waiting for completion from
+ * firmware.
+ */
+static void
+bfa_ioim_sm_cleanup(struct bfa_ioim_s *ioim, enum bfa_ioim_event event)
+{
+ bfa_trc(ioim->bfa, ioim->iotag);
+ bfa_trc(ioim->bfa, event);
+
+ switch (event) {
+ case BFA_IOIM_SM_COMP_GOOD:
+ case BFA_IOIM_SM_COMP:
+ case BFA_IOIM_SM_DONE:
+ case BFA_IOIM_SM_FREE:
+ break;
+
+ case BFA_IOIM_SM_ABORT:
+ /*
+ * IO is already being aborted implicitly
+ */
+ ioim->io_cbfn = __bfa_cb_ioim_abort;
+ break;
+
+ case BFA_IOIM_SM_ABORT_DONE:
+ bfa_sm_set_state(ioim, bfa_ioim_sm_hcb_free);
+ bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, ioim->io_cbfn, ioim);
+ bfa_ioim_notify_cleanup(ioim);
+ break;
+
+ case BFA_IOIM_SM_ABORT_COMP:
+ bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
+ bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, ioim->io_cbfn, ioim);
+ bfa_ioim_notify_cleanup(ioim);
+ break;
+
+ case BFA_IOIM_SM_COMP_UTAG:
+ bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
+ bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, ioim->io_cbfn, ioim);
+ bfa_ioim_notify_cleanup(ioim);
+ break;
+
+ case BFA_IOIM_SM_HWFAIL:
+ bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
+ bfa_ioim_move_to_comp_q(ioim);
+ bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, __bfa_cb_ioim_failed,
+ ioim);
+ break;
+
+ case BFA_IOIM_SM_CLEANUP:
+ /*
+ * IO can be in cleanup state already due to TM command.
+ * 2nd cleanup request comes from ITN offline event.
+ */
+ break;
+
+ default:
+ bfa_sm_fault(ioim->bfa, event);
+ }
+}
+
+/*
+ * IO is waiting for room in request CQ
+ */
+static void
+bfa_ioim_sm_qfull(struct bfa_ioim_s *ioim, enum bfa_ioim_event event)
+{
+ bfa_trc(ioim->bfa, ioim->iotag);
+ bfa_trc(ioim->bfa, event);
+
+ switch (event) {
+ case BFA_IOIM_SM_QRESUME:
+ bfa_sm_set_state(ioim, bfa_ioim_sm_active);
+ bfa_ioim_send_ioreq(ioim);
+ break;
+
+ case BFA_IOIM_SM_ABORT:
+ bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
+ bfa_reqq_wcancel(&ioim->iosp->reqq_wait);
+ bfa_ioim_move_to_comp_q(ioim);
+ bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, __bfa_cb_ioim_abort,
+ ioim);
+ break;
+
+ case BFA_IOIM_SM_CLEANUP:
+ bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
+ bfa_reqq_wcancel(&ioim->iosp->reqq_wait);
+ bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, __bfa_cb_ioim_failed,
+ ioim);
+ bfa_ioim_notify_cleanup(ioim);
+ break;
+
+ case BFA_IOIM_SM_HWFAIL:
+ bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
+ bfa_reqq_wcancel(&ioim->iosp->reqq_wait);
+ bfa_ioim_move_to_comp_q(ioim);
+ bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, __bfa_cb_ioim_failed,
+ ioim);
+ break;
+
+ default:
+ bfa_sm_fault(ioim->bfa, event);
+ }
+}
+
+/*
+ * Active IO is being aborted, waiting for room in request CQ.
+ */
+static void
+bfa_ioim_sm_abort_qfull(struct bfa_ioim_s *ioim, enum bfa_ioim_event event)
+{
+ bfa_trc(ioim->bfa, ioim->iotag);
+ bfa_trc(ioim->bfa, event);
+
+ switch (event) {
+ case BFA_IOIM_SM_QRESUME:
+ bfa_sm_set_state(ioim, bfa_ioim_sm_abort);
+ bfa_ioim_send_abort(ioim);
+ break;
+
+ case BFA_IOIM_SM_CLEANUP:
+ WARN_ON(ioim->iosp->abort_explicit != BFA_TRUE);
+ ioim->iosp->abort_explicit = BFA_FALSE;
+ bfa_sm_set_state(ioim, bfa_ioim_sm_cleanup_qfull);
+ break;
+
+ case BFA_IOIM_SM_COMP_GOOD:
+ case BFA_IOIM_SM_COMP:
+ bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
+ bfa_reqq_wcancel(&ioim->iosp->reqq_wait);
+ bfa_ioim_move_to_comp_q(ioim);
+ bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, __bfa_cb_ioim_abort,
+ ioim);
+ break;
+
+ case BFA_IOIM_SM_DONE:
+ bfa_sm_set_state(ioim, bfa_ioim_sm_hcb_free);
+ bfa_reqq_wcancel(&ioim->iosp->reqq_wait);
+ bfa_ioim_move_to_comp_q(ioim);
+ bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, __bfa_cb_ioim_abort,
+ ioim);
+ break;
+
+ case BFA_IOIM_SM_HWFAIL:
+ bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
+ bfa_reqq_wcancel(&ioim->iosp->reqq_wait);
+ bfa_ioim_move_to_comp_q(ioim);
+ bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, __bfa_cb_ioim_failed,
+ ioim);
+ break;
+
+ default:
+ bfa_sm_fault(ioim->bfa, event);
+ }
+}
+
+/*
+ * Active IO is being cleaned up, waiting for room in request CQ.
+ */
+static void
+bfa_ioim_sm_cleanup_qfull(struct bfa_ioim_s *ioim, enum bfa_ioim_event event)
+{
+ bfa_trc(ioim->bfa, ioim->iotag);
+ bfa_trc(ioim->bfa, event);
+
+ switch (event) {
+ case BFA_IOIM_SM_QRESUME:
+ bfa_sm_set_state(ioim, bfa_ioim_sm_cleanup);
+ bfa_ioim_send_abort(ioim);
+ break;
+
+ case BFA_IOIM_SM_ABORT:
+ /*
+ * IO is already being cleaned up implicitly
+ */
+ ioim->io_cbfn = __bfa_cb_ioim_abort;
+ break;
+
+ case BFA_IOIM_SM_COMP_GOOD:
+ case BFA_IOIM_SM_COMP:
+ bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
+ bfa_reqq_wcancel(&ioim->iosp->reqq_wait);
+ bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, ioim->io_cbfn, ioim);
+ bfa_ioim_notify_cleanup(ioim);
+ break;
+
+ case BFA_IOIM_SM_DONE:
+ bfa_sm_set_state(ioim, bfa_ioim_sm_hcb_free);
+ bfa_reqq_wcancel(&ioim->iosp->reqq_wait);
+ bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, ioim->io_cbfn, ioim);
+ bfa_ioim_notify_cleanup(ioim);
+ break;
+
+ case BFA_IOIM_SM_HWFAIL:
+ bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
+ bfa_reqq_wcancel(&ioim->iosp->reqq_wait);
+ bfa_ioim_move_to_comp_q(ioim);
+ bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, __bfa_cb_ioim_failed,
+ ioim);
+ break;
+
+ default:
+ bfa_sm_fault(ioim->bfa, event);
+ }
+}
+
+/*
+ * IO bfa callback is pending.
+ */
+static void
+bfa_ioim_sm_hcb(struct bfa_ioim_s *ioim, enum bfa_ioim_event event)
+{
+ switch (event) {
+ case BFA_IOIM_SM_HCB:
+ bfa_sm_set_state(ioim, bfa_ioim_sm_uninit);
+ bfa_ioim_free(ioim);
+ break;
+
+ case BFA_IOIM_SM_CLEANUP:
+ bfa_ioim_notify_cleanup(ioim);
+ break;
+
+ case BFA_IOIM_SM_HWFAIL:
+ break;
+
+ default:
+ bfa_sm_fault(ioim->bfa, event);
+ }
+}
+
+/*
+ * IO bfa callback is pending. IO resource cannot be freed.
+ */
+static void
+bfa_ioim_sm_hcb_free(struct bfa_ioim_s *ioim, enum bfa_ioim_event event)
+{
+ bfa_trc(ioim->bfa, ioim->iotag);
+ bfa_trc(ioim->bfa, event);
+
+ switch (event) {
+ case BFA_IOIM_SM_HCB:
+ bfa_sm_set_state(ioim, bfa_ioim_sm_resfree);
+ list_del(&ioim->qe);
+ list_add_tail(&ioim->qe, &ioim->fcpim->ioim_resfree_q);
+ break;
+
+ case BFA_IOIM_SM_FREE:
+ bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
+ break;
+
+ case BFA_IOIM_SM_CLEANUP:
+ bfa_ioim_notify_cleanup(ioim);
+ break;
+
+ case BFA_IOIM_SM_HWFAIL:
+ bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
+ break;
+
+ default:
+ bfa_sm_fault(ioim->bfa, event);
+ }
+}
+
+/*
+ * IO is completed, waiting resource free from firmware.
+ */
+static void
+bfa_ioim_sm_resfree(struct bfa_ioim_s *ioim, enum bfa_ioim_event event)
+{
+ bfa_trc(ioim->bfa, ioim->iotag);
+ bfa_trc(ioim->bfa, event);
+
+ switch (event) {
+ case BFA_IOIM_SM_FREE:
+ bfa_sm_set_state(ioim, bfa_ioim_sm_uninit);
+ bfa_ioim_free(ioim);
+ break;
+
+ case BFA_IOIM_SM_CLEANUP:
+ bfa_ioim_notify_cleanup(ioim);
+ break;
+
+ case BFA_IOIM_SM_HWFAIL:
+ break;
+
+ default:
+ bfa_sm_fault(ioim->bfa, event);
+ }
+}
+
+/*
+ * This is called from bfa_fcpim_start after the bfa_init() with flash read
+ * is complete by driver. now invalidate the stale content of lun mask
+ * like unit attention, rp tag and lp tag.
+ */
+static void
+bfa_ioim_lm_init(struct bfa_s *bfa)
+{
+ struct bfa_lun_mask_s *lunm_list;
+ int i;
+
+ if (bfa_get_lun_mask_status(bfa) == BFA_LUNMASK_MINCFG)
+ return;
+
+ lunm_list = bfa_get_lun_mask_list(bfa);
+ for (i = 0; i < MAX_LUN_MASK_CFG; i++) {
+ lunm_list[i].ua = BFA_IOIM_LM_UA_RESET;
+ lunm_list[i].lp_tag = BFA_LP_TAG_INVALID;
+ lunm_list[i].rp_tag = BFA_RPORT_TAG_INVALID;
+ }
+}
+
+static void
+__bfa_cb_ioim_good_comp(void *cbarg, bfa_boolean_t complete)
+{
+ struct bfa_ioim_s *ioim = cbarg;
+
+ if (!complete) {
+ bfa_sm_send_event(ioim, BFA_IOIM_SM_HCB);
+ return;
+ }
+
+ bfa_cb_ioim_good_comp(ioim->bfa->bfad, ioim->dio);
+}
+
+static void
+__bfa_cb_ioim_comp(void *cbarg, bfa_boolean_t complete)
+{
+ struct bfa_ioim_s *ioim = cbarg;
+ struct bfi_ioim_rsp_s *m;
+ u8 *snsinfo = NULL;
+ u8 sns_len = 0;
+ s32 residue = 0;
+
+ if (!complete) {
+ bfa_sm_send_event(ioim, BFA_IOIM_SM_HCB);
+ return;
+ }
+
+ m = (struct bfi_ioim_rsp_s *) &ioim->iosp->comp_rspmsg;
+ if (m->io_status == BFI_IOIM_STS_OK) {
+ /*
+ * setup sense information, if present
+ */
+ if ((m->scsi_status == SCSI_STATUS_CHECK_CONDITION) &&
+ m->sns_len) {
+ sns_len = m->sns_len;
+ snsinfo = BFA_SNSINFO_FROM_TAG(ioim->fcpim->fcp,
+ ioim->iotag);
+ }
+
+ /*
+ * setup residue value correctly for normal completions
+ */
+ if (m->resid_flags == FCP_RESID_UNDER) {
+ residue = be32_to_cpu(m->residue);
+ bfa_stats(ioim->itnim, iocomp_underrun);
+ }
+ if (m->resid_flags == FCP_RESID_OVER) {
+ residue = be32_to_cpu(m->residue);
+ residue = -residue;
+ bfa_stats(ioim->itnim, iocomp_overrun);
+ }
+ }
+
+ bfa_cb_ioim_done(ioim->bfa->bfad, ioim->dio, m->io_status,
+ m->scsi_status, sns_len, snsinfo, residue);
+}
+
+void
+bfa_fcpim_lunmask_rp_update(struct bfa_s *bfa, wwn_t lp_wwn, wwn_t rp_wwn,
+ u16 rp_tag, u8 lp_tag)
+{
+ struct bfa_lun_mask_s *lun_list;
+ u8 i;
+
+ if (bfa_get_lun_mask_status(bfa) == BFA_LUNMASK_MINCFG)
+ return;
+
+ lun_list = bfa_get_lun_mask_list(bfa);
+ for (i = 0; i < MAX_LUN_MASK_CFG; i++) {
+ if (lun_list[i].state == BFA_IOIM_LUN_MASK_ACTIVE) {
+ if ((lun_list[i].lp_wwn == lp_wwn) &&
+ (lun_list[i].rp_wwn == rp_wwn)) {
+ lun_list[i].rp_tag = rp_tag;
+ lun_list[i].lp_tag = lp_tag;
+ }
+ }
+ }
+}
+
+/*
+ * set UA for all active luns in LM DB
+ */
+static void
+bfa_ioim_lm_set_ua(struct bfa_s *bfa)
+{
+ struct bfa_lun_mask_s *lunm_list;
+ int i;
+
+ lunm_list = bfa_get_lun_mask_list(bfa);
+ for (i = 0; i < MAX_LUN_MASK_CFG; i++) {
+ if (lunm_list[i].state != BFA_IOIM_LUN_MASK_ACTIVE)
+ continue;
+ lunm_list[i].ua = BFA_IOIM_LM_UA_SET;
+ }
+}
+
+bfa_status_t
+bfa_fcpim_lunmask_update(struct bfa_s *bfa, u32 update)
+{
+ struct bfa_lunmask_cfg_s *lun_mask;
+
+ bfa_trc(bfa, bfa_get_lun_mask_status(bfa));
+ if (bfa_get_lun_mask_status(bfa) == BFA_LUNMASK_MINCFG)
+ return BFA_STATUS_FAILED;
+
+ if (bfa_get_lun_mask_status(bfa) == update)
+ return BFA_STATUS_NO_CHANGE;
+
+ lun_mask = bfa_get_lun_mask(bfa);
+ lun_mask->status = update;
+
+ if (bfa_get_lun_mask_status(bfa) == BFA_LUNMASK_ENABLED)
+ bfa_ioim_lm_set_ua(bfa);
+
+ return bfa_dconf_update(bfa);
+}
+
+bfa_status_t
+bfa_fcpim_lunmask_clear(struct bfa_s *bfa)
+{
+ int i;
+ struct bfa_lun_mask_s *lunm_list;
+
+ bfa_trc(bfa, bfa_get_lun_mask_status(bfa));
+ if (bfa_get_lun_mask_status(bfa) == BFA_LUNMASK_MINCFG)
+ return BFA_STATUS_FAILED;
+
+ lunm_list = bfa_get_lun_mask_list(bfa);
+ for (i = 0; i < MAX_LUN_MASK_CFG; i++) {
+ if (lunm_list[i].state == BFA_IOIM_LUN_MASK_ACTIVE) {
+ if (lunm_list[i].rp_tag != BFA_RPORT_TAG_INVALID)
+ bfa_rport_unset_lunmask(bfa,
+ BFA_RPORT_FROM_TAG(bfa, lunm_list[i].rp_tag));
+ }
+ }
+
+ memset(lunm_list, 0, sizeof(struct bfa_lun_mask_s) * MAX_LUN_MASK_CFG);
+ return bfa_dconf_update(bfa);
+}
+
+bfa_status_t
+bfa_fcpim_lunmask_query(struct bfa_s *bfa, void *buf)
+{
+ struct bfa_lunmask_cfg_s *lun_mask;
+
+ bfa_trc(bfa, bfa_get_lun_mask_status(bfa));
+ if (bfa_get_lun_mask_status(bfa) == BFA_LUNMASK_MINCFG)
+ return BFA_STATUS_FAILED;
+
+ lun_mask = bfa_get_lun_mask(bfa);
+ memcpy(buf, lun_mask, sizeof(struct bfa_lunmask_cfg_s));
+ return BFA_STATUS_OK;
+}
+
+bfa_status_t
+bfa_fcpim_lunmask_add(struct bfa_s *bfa, u16 vf_id, wwn_t *pwwn,
+ wwn_t rpwwn, struct scsi_lun lun)
+{
+ struct bfa_lun_mask_s *lunm_list;
+ struct bfa_rport_s *rp = NULL;
+ int i, free_index = MAX_LUN_MASK_CFG + 1;
+ struct bfa_fcs_lport_s *port = NULL;
+ struct bfa_fcs_rport_s *rp_fcs;
+
+ bfa_trc(bfa, bfa_get_lun_mask_status(bfa));
+ if (bfa_get_lun_mask_status(bfa) == BFA_LUNMASK_MINCFG)
+ return BFA_STATUS_FAILED;
+
+ port = bfa_fcs_lookup_port(&((struct bfad_s *)bfa->bfad)->bfa_fcs,
+ vf_id, *pwwn);
+ if (port) {
+ *pwwn = port->port_cfg.pwwn;
+ rp_fcs = bfa_fcs_lport_get_rport_by_pwwn(port, rpwwn);
+ if (rp_fcs)
+ rp = rp_fcs->bfa_rport;
+ }
+
+ lunm_list = bfa_get_lun_mask_list(bfa);
+ /* if entry exists */
+ for (i = 0; i < MAX_LUN_MASK_CFG; i++) {
+ if (lunm_list[i].state != BFA_IOIM_LUN_MASK_ACTIVE)
+ free_index = i;
+ if ((lunm_list[i].lp_wwn == *pwwn) &&
+ (lunm_list[i].rp_wwn == rpwwn) &&
+ (scsilun_to_int((struct scsi_lun *)&lunm_list[i].lun) ==
+ scsilun_to_int((struct scsi_lun *)&lun)))
+ return BFA_STATUS_ENTRY_EXISTS;
+ }
+
+ if (free_index > MAX_LUN_MASK_CFG)
+ return BFA_STATUS_MAX_ENTRY_REACHED;
+
+ if (rp) {
+ lunm_list[free_index].lp_tag = bfa_lps_get_tag_from_pid(bfa,
+ rp->rport_info.local_pid);
+ lunm_list[free_index].rp_tag = rp->rport_tag;
+ } else {
+ lunm_list[free_index].lp_tag = BFA_LP_TAG_INVALID;
+ lunm_list[free_index].rp_tag = BFA_RPORT_TAG_INVALID;
+ }
+
+ lunm_list[free_index].lp_wwn = *pwwn;
+ lunm_list[free_index].rp_wwn = rpwwn;
+ lunm_list[free_index].lun = lun;
+ lunm_list[free_index].state = BFA_IOIM_LUN_MASK_ACTIVE;
+
+ /* set for all luns in this rp */
+ for (i = 0; i < MAX_LUN_MASK_CFG; i++) {
+ if ((lunm_list[i].lp_wwn == *pwwn) &&
+ (lunm_list[i].rp_wwn == rpwwn))
+ lunm_list[i].ua = BFA_IOIM_LM_UA_SET;
+ }
+
+ return bfa_dconf_update(bfa);
+}
+
+bfa_status_t
+bfa_fcpim_lunmask_delete(struct bfa_s *bfa, u16 vf_id, wwn_t *pwwn,
+ wwn_t rpwwn, struct scsi_lun lun)
+{
+ struct bfa_lun_mask_s *lunm_list;
+ struct bfa_rport_s *rp = NULL;
+ struct bfa_fcs_lport_s *port = NULL;
+ struct bfa_fcs_rport_s *rp_fcs;
+ int i;
+
+ /* in min cfg lunm_list could be NULL but no commands should run. */
+ if (bfa_get_lun_mask_status(bfa) == BFA_LUNMASK_MINCFG)
+ return BFA_STATUS_FAILED;
+
+ bfa_trc(bfa, bfa_get_lun_mask_status(bfa));
+ bfa_trc(bfa, *pwwn);
+ bfa_trc(bfa, rpwwn);
+ bfa_trc(bfa, scsilun_to_int((struct scsi_lun *)&lun));
+
+ if (*pwwn == 0) {
+ port = bfa_fcs_lookup_port(
+ &((struct bfad_s *)bfa->bfad)->bfa_fcs,
+ vf_id, *pwwn);
+ if (port) {
+ *pwwn = port->port_cfg.pwwn;
+ rp_fcs = bfa_fcs_lport_get_rport_by_pwwn(port, rpwwn);
+ if (rp_fcs)
+ rp = rp_fcs->bfa_rport;
+ }
+ }
+
+ lunm_list = bfa_get_lun_mask_list(bfa);
+ for (i = 0; i < MAX_LUN_MASK_CFG; i++) {
+ if ((lunm_list[i].lp_wwn == *pwwn) &&
+ (lunm_list[i].rp_wwn == rpwwn) &&
+ (scsilun_to_int((struct scsi_lun *)&lunm_list[i].lun) ==
+ scsilun_to_int((struct scsi_lun *)&lun))) {
+ lunm_list[i].lp_wwn = 0;
+ lunm_list[i].rp_wwn = 0;
+ int_to_scsilun(0, &lunm_list[i].lun);
+ lunm_list[i].state = BFA_IOIM_LUN_MASK_INACTIVE;
+ if (lunm_list[i].rp_tag != BFA_RPORT_TAG_INVALID) {
+ lunm_list[i].rp_tag = BFA_RPORT_TAG_INVALID;
+ lunm_list[i].lp_tag = BFA_LP_TAG_INVALID;
+ }
+ return bfa_dconf_update(bfa);
+ }
+ }
+
+ /* set for all luns in this rp */
+ for (i = 0; i < MAX_LUN_MASK_CFG; i++) {
+ if ((lunm_list[i].lp_wwn == *pwwn) &&
+ (lunm_list[i].rp_wwn == rpwwn))
+ lunm_list[i].ua = BFA_IOIM_LM_UA_SET;
+ }
+
+ return BFA_STATUS_ENTRY_NOT_EXISTS;
+}
+
+static void
+__bfa_cb_ioim_failed(void *cbarg, bfa_boolean_t complete)
+{
+ struct bfa_ioim_s *ioim = cbarg;
+
+ if (!complete) {
+ bfa_sm_send_event(ioim, BFA_IOIM_SM_HCB);
+ return;
+ }
+
+ bfa_cb_ioim_done(ioim->bfa->bfad, ioim->dio, BFI_IOIM_STS_ABORTED,
+ 0, 0, NULL, 0);
+}
+
+static void
+__bfa_cb_ioim_pathtov(void *cbarg, bfa_boolean_t complete)
+{
+ struct bfa_ioim_s *ioim = cbarg;
+
+ bfa_stats(ioim->itnim, path_tov_expired);
+ if (!complete) {
+ bfa_sm_send_event(ioim, BFA_IOIM_SM_HCB);
+ return;
+ }
+
+ bfa_cb_ioim_done(ioim->bfa->bfad, ioim->dio, BFI_IOIM_STS_PATHTOV,
+ 0, 0, NULL, 0);
+}
+
+static void
+__bfa_cb_ioim_abort(void *cbarg, bfa_boolean_t complete)
+{
+ struct bfa_ioim_s *ioim = cbarg;
+
+ if (!complete) {
+ bfa_sm_send_event(ioim, BFA_IOIM_SM_HCB);
+ return;
+ }
+
+ bfa_cb_ioim_abort(ioim->bfa->bfad, ioim->dio);
+}
+
+static void
+bfa_ioim_sgpg_alloced(void *cbarg)
+{
+ struct bfa_ioim_s *ioim = cbarg;
+
+ ioim->nsgpgs = BFA_SGPG_NPAGE(ioim->nsges);
+ list_splice_tail_init(&ioim->iosp->sgpg_wqe.sgpg_q, &ioim->sgpg_q);
+ ioim->sgpg = bfa_q_first(&ioim->sgpg_q);
+ bfa_sm_send_event(ioim, BFA_IOIM_SM_SGALLOCED);
+}
+
+/*
+ * Send I/O request to firmware.
+ */
+static bfa_boolean_t
+bfa_ioim_send_ioreq(struct bfa_ioim_s *ioim)
+{
+ struct bfa_itnim_s *itnim = ioim->itnim;
+ struct bfi_ioim_req_s *m;
+ static struct fcp_cmnd_s cmnd_z0 = { { { 0 } } };
+ struct bfi_sge_s *sge, *sgpge;
+ u32 pgdlen = 0;
+ u32 fcp_dl;
+ u64 addr;
+ struct scatterlist *sg;
+ struct bfa_sgpg_s *sgpg;
+ struct scsi_cmnd *cmnd = (struct scsi_cmnd *) ioim->dio;
+ u32 i, sge_id, pgcumsz;
+ enum dma_data_direction dmadir;
+
+ /*
+ * check for room in queue to send request now
+ */
+ m = bfa_reqq_next(ioim->bfa, ioim->reqq);
+ if (!m) {
+ bfa_stats(ioim->itnim, qwait);
+ bfa_reqq_wait(ioim->bfa, ioim->reqq,
+ &ioim->iosp->reqq_wait);
+ return BFA_FALSE;
+ }
+
+ /*
+ * build i/o request message next
+ */
+ m->io_tag = cpu_to_be16(ioim->iotag);
+ m->rport_hdl = ioim->itnim->rport->fw_handle;
+ m->io_timeout = 0;
+
+ sge = &m->sges[0];
+ sgpg = ioim->sgpg;
+ sge_id = 0;
+ sgpge = NULL;
+ pgcumsz = 0;
+ scsi_for_each_sg(cmnd, sg, ioim->nsges, i) {
+ if (i == 0) {
+ /* build inline IO SG element */
+ addr = bfa_sgaddr_le(sg_dma_address(sg));
+ sge->sga = *(union bfi_addr_u *) &addr;
+ pgdlen = sg_dma_len(sg);
+ sge->sg_len = pgdlen;
+ sge->flags = (ioim->nsges > BFI_SGE_INLINE) ?
+ BFI_SGE_DATA_CPL : BFI_SGE_DATA_LAST;
+ bfa_sge_to_be(sge);
+ sge++;
+ } else {
+ if (sge_id == 0)
+ sgpge = sgpg->sgpg->sges;
+
+ addr = bfa_sgaddr_le(sg_dma_address(sg));
+ sgpge->sga = *(union bfi_addr_u *) &addr;
+ sgpge->sg_len = sg_dma_len(sg);
+ pgcumsz += sgpge->sg_len;
+
+ /* set flags */
+ if (i < (ioim->nsges - 1) &&
+ sge_id < (BFI_SGPG_DATA_SGES - 1))
+ sgpge->flags = BFI_SGE_DATA;
+ else if (i < (ioim->nsges - 1))
+ sgpge->flags = BFI_SGE_DATA_CPL;
+ else
+ sgpge->flags = BFI_SGE_DATA_LAST;
+
+ bfa_sge_to_le(sgpge);
+
+ sgpge++;
+ if (i == (ioim->nsges - 1)) {
+ sgpge->flags = BFI_SGE_PGDLEN;
+ sgpge->sga.a32.addr_lo = 0;
+ sgpge->sga.a32.addr_hi = 0;
+ sgpge->sg_len = pgcumsz;
+ bfa_sge_to_le(sgpge);
+ } else if (++sge_id == BFI_SGPG_DATA_SGES) {
+ sgpg = (struct bfa_sgpg_s *) bfa_q_next(sgpg);
+ sgpge->flags = BFI_SGE_LINK;
+ sgpge->sga = sgpg->sgpg_pa;
+ sgpge->sg_len = pgcumsz;
+ bfa_sge_to_le(sgpge);
+ sge_id = 0;
+ pgcumsz = 0;
+ }
+ }
+ }
+
+ if (ioim->nsges > BFI_SGE_INLINE) {
+ sge->sga = ioim->sgpg->sgpg_pa;
+ } else {
+ sge->sga.a32.addr_lo = 0;
+ sge->sga.a32.addr_hi = 0;
+ }
+ sge->sg_len = pgdlen;
+ sge->flags = BFI_SGE_PGDLEN;
+ bfa_sge_to_be(sge);
+
+ /*
+ * set up I/O command parameters
+ */
+ m->cmnd = cmnd_z0;
+ int_to_scsilun(cmnd->device->lun, &m->cmnd.lun);
+ dmadir = cmnd->sc_data_direction;
+ if (dmadir == DMA_TO_DEVICE)
+ m->cmnd.iodir = FCP_IODIR_WRITE;
+ else if (dmadir == DMA_FROM_DEVICE)
+ m->cmnd.iodir = FCP_IODIR_READ;
+ else
+ m->cmnd.iodir = FCP_IODIR_NONE;
+
+ m->cmnd.cdb = *(struct scsi_cdb_s *) cmnd->cmnd;
+ fcp_dl = scsi_bufflen(cmnd);
+ m->cmnd.fcp_dl = cpu_to_be32(fcp_dl);
+
+ /*
+ * set up I/O message header
+ */
+ switch (m->cmnd.iodir) {
+ case FCP_IODIR_READ:
+ bfi_h2i_set(m->mh, BFI_MC_IOIM_READ, 0, bfa_fn_lpu(ioim->bfa));
+ bfa_stats(itnim, input_reqs);
+ ioim->itnim->stats.rd_throughput += fcp_dl;
+ break;
+ case FCP_IODIR_WRITE:
+ bfi_h2i_set(m->mh, BFI_MC_IOIM_WRITE, 0, bfa_fn_lpu(ioim->bfa));
+ bfa_stats(itnim, output_reqs);
+ ioim->itnim->stats.wr_throughput += fcp_dl;
+ break;
+ case FCP_IODIR_RW:
+ bfa_stats(itnim, input_reqs);
+ bfa_stats(itnim, output_reqs);
+ default:
+ bfi_h2i_set(m->mh, BFI_MC_IOIM_IO, 0, bfa_fn_lpu(ioim->bfa));
+ }
+ if (itnim->seq_rec ||
+ (scsi_bufflen(cmnd) & (sizeof(u32) - 1)))
+ bfi_h2i_set(m->mh, BFI_MC_IOIM_IO, 0, bfa_fn_lpu(ioim->bfa));
+
+ /*
+ * queue I/O message to firmware
+ */
+ bfa_reqq_produce(ioim->bfa, ioim->reqq, m->mh);
+ return BFA_TRUE;
+}
+
+/*
+ * Setup any additional SG pages needed.Inline SG element is setup
+ * at queuing time.
+ */
+static bfa_boolean_t
+bfa_ioim_sgpg_alloc(struct bfa_ioim_s *ioim)
+{
+ u16 nsgpgs;
+
+ WARN_ON(ioim->nsges <= BFI_SGE_INLINE);
+
+ /*
+ * allocate SG pages needed
+ */
+ nsgpgs = BFA_SGPG_NPAGE(ioim->nsges);
+ if (!nsgpgs)
+ return BFA_TRUE;
+
+ if (bfa_sgpg_malloc(ioim->bfa, &ioim->sgpg_q, nsgpgs)
+ != BFA_STATUS_OK) {
+ bfa_sgpg_wait(ioim->bfa, &ioim->iosp->sgpg_wqe, nsgpgs);
+ return BFA_FALSE;
+ }
+
+ ioim->nsgpgs = nsgpgs;
+ ioim->sgpg = bfa_q_first(&ioim->sgpg_q);
+
+ return BFA_TRUE;
+}
+
+/*
+ * Send I/O abort request to firmware.
+ */
+static bfa_boolean_t
+bfa_ioim_send_abort(struct bfa_ioim_s *ioim)
+{
+ struct bfi_ioim_abort_req_s *m;
+ enum bfi_ioim_h2i msgop;
+
+ /*
+ * check for room in queue to send request now
+ */
+ m = bfa_reqq_next(ioim->bfa, ioim->reqq);
+ if (!m)
+ return BFA_FALSE;
+
+ /*
+ * build i/o request message next
+ */
+ if (ioim->iosp->abort_explicit)
+ msgop = BFI_IOIM_H2I_IOABORT_REQ;
+ else
+ msgop = BFI_IOIM_H2I_IOCLEANUP_REQ;
+
+ bfi_h2i_set(m->mh, BFI_MC_IOIM, msgop, bfa_fn_lpu(ioim->bfa));
+ m->io_tag = cpu_to_be16(ioim->iotag);
+ m->abort_tag = ++ioim->abort_tag;
+
+ /*
+ * queue I/O message to firmware
+ */
+ bfa_reqq_produce(ioim->bfa, ioim->reqq, m->mh);
+ return BFA_TRUE;
+}
+
+/*
+ * Call to resume any I/O requests waiting for room in request queue.
+ */
+static void
+bfa_ioim_qresume(void *cbarg)
+{
+ struct bfa_ioim_s *ioim = cbarg;
+
+ bfa_stats(ioim->itnim, qresumes);
+ bfa_sm_send_event(ioim, BFA_IOIM_SM_QRESUME);
+}
+
+
+static void
+bfa_ioim_notify_cleanup(struct bfa_ioim_s *ioim)
+{
+ /*
+ * Move IO from itnim queue to fcpim global queue since itnim will be
+ * freed.
+ */
+ list_del(&ioim->qe);
+ list_add_tail(&ioim->qe, &ioim->fcpim->ioim_comp_q);
+
+ if (!ioim->iosp->tskim) {
+ if (ioim->fcpim->delay_comp && ioim->itnim->iotov_active) {
+ bfa_cb_dequeue(&ioim->hcb_qe);
+ list_del(&ioim->qe);
+ list_add_tail(&ioim->qe, &ioim->itnim->delay_comp_q);
+ }
+ bfa_itnim_iodone(ioim->itnim);
+ } else
+ bfa_wc_down(&ioim->iosp->tskim->wc);
+}
+
+static bfa_boolean_t
+bfa_ioim_is_abortable(struct bfa_ioim_s *ioim)
+{
+ if ((bfa_sm_cmp_state(ioim, bfa_ioim_sm_uninit) &&
+ (!bfa_q_is_on_q(&ioim->itnim->pending_q, ioim))) ||
+ (bfa_sm_cmp_state(ioim, bfa_ioim_sm_abort)) ||
+ (bfa_sm_cmp_state(ioim, bfa_ioim_sm_abort_qfull)) ||
+ (bfa_sm_cmp_state(ioim, bfa_ioim_sm_hcb)) ||
+ (bfa_sm_cmp_state(ioim, bfa_ioim_sm_hcb_free)) ||
+ (bfa_sm_cmp_state(ioim, bfa_ioim_sm_resfree)))
+ return BFA_FALSE;
+
+ return BFA_TRUE;
+}
+
+void
+bfa_ioim_delayed_comp(struct bfa_ioim_s *ioim, bfa_boolean_t iotov)
+{
+ /*
+ * If path tov timer expired, failback with PATHTOV status - these
+ * IO requests are not normally retried by IO stack.
+ *
+ * Otherwise device cameback online and fail it with normal failed
+ * status so that IO stack retries these failed IO requests.
+ */
+ if (iotov)
+ ioim->io_cbfn = __bfa_cb_ioim_pathtov;
+ else {
+ ioim->io_cbfn = __bfa_cb_ioim_failed;
+ bfa_stats(ioim->itnim, iocom_nexus_abort);
+ }
+ bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, ioim->io_cbfn, ioim);
+
+ /*
+ * Move IO to fcpim global queue since itnim will be
+ * freed.
+ */
+ list_del(&ioim->qe);
+ list_add_tail(&ioim->qe, &ioim->fcpim->ioim_comp_q);
+}
+
+
+/*
+ * Memory allocation and initialization.
+ */
+void
+bfa_ioim_attach(struct bfa_fcpim_s *fcpim)
+{
+ struct bfa_ioim_s *ioim;
+ struct bfa_fcp_mod_s *fcp = fcpim->fcp;
+ struct bfa_ioim_sp_s *iosp;
+ u16 i;
+
+ /*
+ * claim memory first
+ */
+ ioim = (struct bfa_ioim_s *) bfa_mem_kva_curp(fcp);
+ fcpim->ioim_arr = ioim;
+ bfa_mem_kva_curp(fcp) = (u8 *) (ioim + fcpim->fcp->num_ioim_reqs);
+
+ iosp = (struct bfa_ioim_sp_s *) bfa_mem_kva_curp(fcp);
+ fcpim->ioim_sp_arr = iosp;
+ bfa_mem_kva_curp(fcp) = (u8 *) (iosp + fcpim->fcp->num_ioim_reqs);
+
+ /*
+ * Initialize ioim free queues
+ */
+ INIT_LIST_HEAD(&fcpim->ioim_resfree_q);
+ INIT_LIST_HEAD(&fcpim->ioim_comp_q);
+
+ for (i = 0; i < fcpim->fcp->num_ioim_reqs;
+ i++, ioim++, iosp++) {
+ /*
+ * initialize IOIM
+ */
+ memset(ioim, 0, sizeof(struct bfa_ioim_s));
+ ioim->iotag = i;
+ ioim->bfa = fcpim->bfa;
+ ioim->fcpim = fcpim;
+ ioim->iosp = iosp;
+ INIT_LIST_HEAD(&ioim->sgpg_q);
+ bfa_reqq_winit(&ioim->iosp->reqq_wait,
+ bfa_ioim_qresume, ioim);
+ bfa_sgpg_winit(&ioim->iosp->sgpg_wqe,
+ bfa_ioim_sgpg_alloced, ioim);
+ bfa_sm_set_state(ioim, bfa_ioim_sm_uninit);
+ }
+}
+
+void
+bfa_ioim_isr(struct bfa_s *bfa, struct bfi_msg_s *m)
+{
+ struct bfa_fcpim_s *fcpim = BFA_FCPIM(bfa);
+ struct bfi_ioim_rsp_s *rsp = (struct bfi_ioim_rsp_s *) m;
+ struct bfa_ioim_s *ioim;
+ u16 iotag;
+ enum bfa_ioim_event evt = BFA_IOIM_SM_COMP;
+
+ iotag = be16_to_cpu(rsp->io_tag);
+
+ ioim = BFA_IOIM_FROM_TAG(fcpim, iotag);
+ WARN_ON(ioim->iotag != iotag);
+
+ bfa_trc(ioim->bfa, ioim->iotag);
+ bfa_trc(ioim->bfa, rsp->io_status);
+ bfa_trc(ioim->bfa, rsp->reuse_io_tag);
+
+ if (bfa_sm_cmp_state(ioim, bfa_ioim_sm_active))
+ ioim->iosp->comp_rspmsg = *m;
+
+ switch (rsp->io_status) {
+ case BFI_IOIM_STS_OK:
+ bfa_stats(ioim->itnim, iocomp_ok);
+ if (rsp->reuse_io_tag == 0)
+ evt = BFA_IOIM_SM_DONE;
+ else
+ evt = BFA_IOIM_SM_COMP;
+ break;
+
+ case BFI_IOIM_STS_TIMEDOUT:
+ bfa_stats(ioim->itnim, iocomp_timedout);
+ case BFI_IOIM_STS_ABORTED:
+ rsp->io_status = BFI_IOIM_STS_ABORTED;
+ bfa_stats(ioim->itnim, iocomp_aborted);
+ if (rsp->reuse_io_tag == 0)
+ evt = BFA_IOIM_SM_DONE;
+ else
+ evt = BFA_IOIM_SM_COMP;
+ break;
+
+ case BFI_IOIM_STS_PROTO_ERR:
+ bfa_stats(ioim->itnim, iocom_proto_err);
+ WARN_ON(!rsp->reuse_io_tag);
+ evt = BFA_IOIM_SM_COMP;
+ break;
+
+ case BFI_IOIM_STS_SQER_NEEDED:
+ bfa_stats(ioim->itnim, iocom_sqer_needed);
+ WARN_ON(rsp->reuse_io_tag != 0);
+ evt = BFA_IOIM_SM_SQRETRY;
+ break;
+
+ case BFI_IOIM_STS_RES_FREE:
+ bfa_stats(ioim->itnim, iocom_res_free);
+ evt = BFA_IOIM_SM_FREE;
+ break;
+
+ case BFI_IOIM_STS_HOST_ABORTED:
+ bfa_stats(ioim->itnim, iocom_hostabrts);
+ if (rsp->abort_tag != ioim->abort_tag) {
+ bfa_trc(ioim->bfa, rsp->abort_tag);
+ bfa_trc(ioim->bfa, ioim->abort_tag);
+ return;
+ }
+
+ if (rsp->reuse_io_tag)
+ evt = BFA_IOIM_SM_ABORT_COMP;
+ else
+ evt = BFA_IOIM_SM_ABORT_DONE;
+ break;
+
+ case BFI_IOIM_STS_UTAG:
+ bfa_stats(ioim->itnim, iocom_utags);
+ evt = BFA_IOIM_SM_COMP_UTAG;
+ break;
+
+ default:
+ WARN_ON(1);
+ }
+
+ bfa_sm_send_event(ioim, evt);
+}
+
+void
+bfa_ioim_good_comp_isr(struct bfa_s *bfa, struct bfi_msg_s *m)
+{
+ struct bfa_fcpim_s *fcpim = BFA_FCPIM(bfa);
+ struct bfi_ioim_rsp_s *rsp = (struct bfi_ioim_rsp_s *) m;
+ struct bfa_ioim_s *ioim;
+ u16 iotag;
+
+ iotag = be16_to_cpu(rsp->io_tag);
+
+ ioim = BFA_IOIM_FROM_TAG(fcpim, iotag);
+ WARN_ON(ioim->iotag != iotag);
+
+ bfa_ioim_cb_profile_comp(fcpim, ioim);
+
+ bfa_sm_send_event(ioim, BFA_IOIM_SM_COMP_GOOD);
+}
+
+/*
+ * Called by itnim to clean up IO while going offline.
+ */
+void
+bfa_ioim_cleanup(struct bfa_ioim_s *ioim)
+{
+ bfa_trc(ioim->bfa, ioim->iotag);
+ bfa_stats(ioim->itnim, io_cleanups);
+
+ ioim->iosp->tskim = NULL;
+ bfa_sm_send_event(ioim, BFA_IOIM_SM_CLEANUP);
+}
+
+void
+bfa_ioim_cleanup_tm(struct bfa_ioim_s *ioim, struct bfa_tskim_s *tskim)
+{
+ bfa_trc(ioim->bfa, ioim->iotag);
+ bfa_stats(ioim->itnim, io_tmaborts);
+
+ ioim->iosp->tskim = tskim;
+ bfa_sm_send_event(ioim, BFA_IOIM_SM_CLEANUP);
+}
+
+/*
+ * IOC failure handling.
+ */
+void
+bfa_ioim_iocdisable(struct bfa_ioim_s *ioim)
+{
+ bfa_trc(ioim->bfa, ioim->iotag);
+ bfa_stats(ioim->itnim, io_iocdowns);
+ bfa_sm_send_event(ioim, BFA_IOIM_SM_HWFAIL);
+}
+
+/*
+ * IO offline TOV popped. Fail the pending IO.
+ */
+void
+bfa_ioim_tov(struct bfa_ioim_s *ioim)
+{
+ bfa_trc(ioim->bfa, ioim->iotag);
+ bfa_sm_send_event(ioim, BFA_IOIM_SM_IOTOV);
+}
+
+
+/*
+ * Allocate IOIM resource for initiator mode I/O request.
+ */
+struct bfa_ioim_s *
+bfa_ioim_alloc(struct bfa_s *bfa, struct bfad_ioim_s *dio,
+ struct bfa_itnim_s *itnim, u16 nsges)
+{
+ struct bfa_fcpim_s *fcpim = BFA_FCPIM(bfa);
+ struct bfa_ioim_s *ioim;
+ struct bfa_iotag_s *iotag = NULL;
+
+ /*
+ * alocate IOIM resource
+ */
+ bfa_q_deq(&fcpim->fcp->iotag_ioim_free_q, &iotag);
+ if (!iotag) {
+ bfa_stats(itnim, no_iotags);
+ return NULL;
+ }
+
+ ioim = BFA_IOIM_FROM_TAG(fcpim, iotag->tag);
+
+ ioim->dio = dio;
+ ioim->itnim = itnim;
+ ioim->nsges = nsges;
+ ioim->nsgpgs = 0;
+
+ bfa_stats(itnim, total_ios);
+ fcpim->ios_active++;
+
+ list_add_tail(&ioim->qe, &itnim->io_q);
+
+ return ioim;
+}
+
+void
+bfa_ioim_free(struct bfa_ioim_s *ioim)
+{
+ struct bfa_fcpim_s *fcpim = ioim->fcpim;
+ struct bfa_iotag_s *iotag;
+
+ if (ioim->nsgpgs > 0)
+ bfa_sgpg_mfree(ioim->bfa, &ioim->sgpg_q, ioim->nsgpgs);
+
+ bfa_stats(ioim->itnim, io_comps);
+ fcpim->ios_active--;
+
+ ioim->iotag &= BFA_IOIM_IOTAG_MASK;
+
+ WARN_ON(!(ioim->iotag <
+ (fcpim->fcp->num_ioim_reqs + fcpim->fcp->num_fwtio_reqs)));
+ iotag = BFA_IOTAG_FROM_TAG(fcpim->fcp, ioim->iotag);
+
+ if (ioim->iotag < fcpim->fcp->num_ioim_reqs)
+ list_add_tail(&iotag->qe, &fcpim->fcp->iotag_ioim_free_q);
+ else
+ list_add_tail(&iotag->qe, &fcpim->fcp->iotag_tio_free_q);
+
+ list_del(&ioim->qe);
+}
+
+void
+bfa_ioim_start(struct bfa_ioim_s *ioim)
+{
+ bfa_ioim_cb_profile_start(ioim->fcpim, ioim);
+
+ /*
+ * Obtain the queue over which this request has to be issued
+ */
+ ioim->reqq = bfa_fcpim_ioredirect_enabled(ioim->bfa) ?
+ BFA_FALSE : bfa_itnim_get_reqq(ioim);
+
+ bfa_sm_send_event(ioim, BFA_IOIM_SM_START);
+}
+
+/*
+ * Driver I/O abort request.
+ */
+bfa_status_t
+bfa_ioim_abort(struct bfa_ioim_s *ioim)
+{
+
+ bfa_trc(ioim->bfa, ioim->iotag);
+
+ if (!bfa_ioim_is_abortable(ioim))
+ return BFA_STATUS_FAILED;
+
+ bfa_stats(ioim->itnim, io_aborts);
+ bfa_sm_send_event(ioim, BFA_IOIM_SM_ABORT);
+
+ return BFA_STATUS_OK;
+}
+
+/*
+ * BFA TSKIM state machine functions
+ */
+
+/*
+ * Task management command beginning state.
+ */
+static void
+bfa_tskim_sm_uninit(struct bfa_tskim_s *tskim, enum bfa_tskim_event event)
+{
+ bfa_trc(tskim->bfa, tskim->tsk_tag << 16 | event);
+
+ switch (event) {
+ case BFA_TSKIM_SM_START:
+ bfa_sm_set_state(tskim, bfa_tskim_sm_active);
+ bfa_tskim_gather_ios(tskim);
+
+ /*
+ * If device is offline, do not send TM on wire. Just cleanup
+ * any pending IO requests and complete TM request.
+ */
+ if (!bfa_itnim_is_online(tskim->itnim)) {
+ bfa_sm_set_state(tskim, bfa_tskim_sm_iocleanup);
+ tskim->tsk_status = BFI_TSKIM_STS_OK;
+ bfa_tskim_cleanup_ios(tskim);
+ return;
+ }
+
+ if (!bfa_tskim_send(tskim)) {
+ bfa_sm_set_state(tskim, bfa_tskim_sm_qfull);
+ bfa_stats(tskim->itnim, tm_qwait);
+ bfa_reqq_wait(tskim->bfa, tskim->itnim->reqq,
+ &tskim->reqq_wait);
+ }
+ break;
+
+ default:
+ bfa_sm_fault(tskim->bfa, event);
+ }
+}
+
+/*
+ * TM command is active, awaiting completion from firmware to
+ * cleanup IO requests in TM scope.
+ */
+static void
+bfa_tskim_sm_active(struct bfa_tskim_s *tskim, enum bfa_tskim_event event)
+{
+ bfa_trc(tskim->bfa, tskim->tsk_tag << 16 | event);
+
+ switch (event) {
+ case BFA_TSKIM_SM_DONE:
+ bfa_sm_set_state(tskim, bfa_tskim_sm_iocleanup);
+ bfa_tskim_cleanup_ios(tskim);
+ break;
+
+ case BFA_TSKIM_SM_CLEANUP:
+ bfa_sm_set_state(tskim, bfa_tskim_sm_cleanup);
+ if (!bfa_tskim_send_abort(tskim)) {
+ bfa_sm_set_state(tskim, bfa_tskim_sm_cleanup_qfull);
+ bfa_stats(tskim->itnim, tm_qwait);
+ bfa_reqq_wait(tskim->bfa, tskim->itnim->reqq,
+ &tskim->reqq_wait);
+ }
+ break;
+
+ case BFA_TSKIM_SM_HWFAIL:
+ bfa_sm_set_state(tskim, bfa_tskim_sm_hcb);
+ bfa_tskim_iocdisable_ios(tskim);
+ bfa_tskim_qcomp(tskim, __bfa_cb_tskim_failed);
+ break;
+
+ default:
+ bfa_sm_fault(tskim->bfa, event);
+ }
+}
+
+/*
+ * An active TM is being cleaned up since ITN is offline. Awaiting cleanup
+ * completion event from firmware.
+ */
+static void
+bfa_tskim_sm_cleanup(struct bfa_tskim_s *tskim, enum bfa_tskim_event event)
+{
+ bfa_trc(tskim->bfa, tskim->tsk_tag << 16 | event);
+
+ switch (event) {
+ case BFA_TSKIM_SM_DONE:
+ /*
+ * Ignore and wait for ABORT completion from firmware.
+ */
+ break;
+
+ case BFA_TSKIM_SM_UTAG:
+ case BFA_TSKIM_SM_CLEANUP_DONE:
+ bfa_sm_set_state(tskim, bfa_tskim_sm_iocleanup);
+ bfa_tskim_cleanup_ios(tskim);
+ break;
+
+ case BFA_TSKIM_SM_HWFAIL:
+ bfa_sm_set_state(tskim, bfa_tskim_sm_hcb);
+ bfa_tskim_iocdisable_ios(tskim);
+ bfa_tskim_qcomp(tskim, __bfa_cb_tskim_failed);
+ break;
+
+ default:
+ bfa_sm_fault(tskim->bfa, event);
+ }
+}
+
+static void
+bfa_tskim_sm_iocleanup(struct bfa_tskim_s *tskim, enum bfa_tskim_event event)
+{
+ bfa_trc(tskim->bfa, tskim->tsk_tag << 16 | event);
+
+ switch (event) {
+ case BFA_TSKIM_SM_IOS_DONE:
+ bfa_sm_set_state(tskim, bfa_tskim_sm_hcb);
+ bfa_tskim_qcomp(tskim, __bfa_cb_tskim_done);
+ break;
+
+ case BFA_TSKIM_SM_CLEANUP:
+ /*
+ * Ignore, TM command completed on wire.
+ * Notify TM conmpletion on IO cleanup completion.
+ */
+ break;
+
+ case BFA_TSKIM_SM_HWFAIL:
+ bfa_sm_set_state(tskim, bfa_tskim_sm_hcb);
+ bfa_tskim_iocdisable_ios(tskim);
+ bfa_tskim_qcomp(tskim, __bfa_cb_tskim_failed);
+ break;
+
+ default:
+ bfa_sm_fault(tskim->bfa, event);
+ }
+}
+
+/*
+ * Task management command is waiting for room in request CQ
+ */
+static void
+bfa_tskim_sm_qfull(struct bfa_tskim_s *tskim, enum bfa_tskim_event event)
+{
+ bfa_trc(tskim->bfa, tskim->tsk_tag << 16 | event);
+
+ switch (event) {
+ case BFA_TSKIM_SM_QRESUME:
+ bfa_sm_set_state(tskim, bfa_tskim_sm_active);
+ bfa_tskim_send(tskim);
+ break;
+
+ case BFA_TSKIM_SM_CLEANUP:
+ /*
+ * No need to send TM on wire since ITN is offline.
+ */
+ bfa_sm_set_state(tskim, bfa_tskim_sm_iocleanup);
+ bfa_reqq_wcancel(&tskim->reqq_wait);
+ bfa_tskim_cleanup_ios(tskim);
+ break;
+
+ case BFA_TSKIM_SM_HWFAIL:
+ bfa_sm_set_state(tskim, bfa_tskim_sm_hcb);
+ bfa_reqq_wcancel(&tskim->reqq_wait);
+ bfa_tskim_iocdisable_ios(tskim);
+ bfa_tskim_qcomp(tskim, __bfa_cb_tskim_failed);
+ break;
+
+ default:
+ bfa_sm_fault(tskim->bfa, event);
+ }
+}
+
+/*
+ * Task management command is active, awaiting for room in request CQ
+ * to send clean up request.
+ */
+static void
+bfa_tskim_sm_cleanup_qfull(struct bfa_tskim_s *tskim,
+ enum bfa_tskim_event event)
+{
+ bfa_trc(tskim->bfa, tskim->tsk_tag << 16 | event);
+
+ switch (event) {
+ case BFA_TSKIM_SM_DONE:
+ bfa_reqq_wcancel(&tskim->reqq_wait);
+ /*
+ * Fall through !!!
+ */
+ case BFA_TSKIM_SM_QRESUME:
+ bfa_sm_set_state(tskim, bfa_tskim_sm_cleanup);
+ bfa_tskim_send_abort(tskim);
+ break;
+
+ case BFA_TSKIM_SM_HWFAIL:
+ bfa_sm_set_state(tskim, bfa_tskim_sm_hcb);
+ bfa_reqq_wcancel(&tskim->reqq_wait);
+ bfa_tskim_iocdisable_ios(tskim);
+ bfa_tskim_qcomp(tskim, __bfa_cb_tskim_failed);
+ break;
+
+ default:
+ bfa_sm_fault(tskim->bfa, event);
+ }
+}
+
+/*
+ * BFA callback is pending
+ */
+static void
+bfa_tskim_sm_hcb(struct bfa_tskim_s *tskim, enum bfa_tskim_event event)
+{
+ bfa_trc(tskim->bfa, tskim->tsk_tag << 16 | event);
+
+ switch (event) {
+ case BFA_TSKIM_SM_HCB:
+ bfa_sm_set_state(tskim, bfa_tskim_sm_uninit);
+ bfa_tskim_free(tskim);
+ break;
+
+ case BFA_TSKIM_SM_CLEANUP:
+ bfa_tskim_notify_comp(tskim);
+ break;
+
+ case BFA_TSKIM_SM_HWFAIL:
+ break;
+
+ default:
+ bfa_sm_fault(tskim->bfa, event);
+ }
+}
+
+static void
+__bfa_cb_tskim_done(void *cbarg, bfa_boolean_t complete)
+{
+ struct bfa_tskim_s *tskim = cbarg;
+
+ if (!complete) {
+ bfa_sm_send_event(tskim, BFA_TSKIM_SM_HCB);
+ return;
+ }
+
+ bfa_stats(tskim->itnim, tm_success);
+ bfa_cb_tskim_done(tskim->bfa->bfad, tskim->dtsk, tskim->tsk_status);
+}
+
+static void
+__bfa_cb_tskim_failed(void *cbarg, bfa_boolean_t complete)
+{
+ struct bfa_tskim_s *tskim = cbarg;
+
+ if (!complete) {
+ bfa_sm_send_event(tskim, BFA_TSKIM_SM_HCB);
+ return;
+ }
+
+ bfa_stats(tskim->itnim, tm_failures);
+ bfa_cb_tskim_done(tskim->bfa->bfad, tskim->dtsk,
+ BFI_TSKIM_STS_FAILED);
+}
+
+static bfa_boolean_t
+bfa_tskim_match_scope(struct bfa_tskim_s *tskim, struct scsi_lun lun)
+{
+ switch (tskim->tm_cmnd) {
+ case FCP_TM_TARGET_RESET:
+ return BFA_TRUE;
+
+ case FCP_TM_ABORT_TASK_SET:
+ case FCP_TM_CLEAR_TASK_SET:
+ case FCP_TM_LUN_RESET:
+ case FCP_TM_CLEAR_ACA:
+ return !memcmp(&tskim->lun, &lun, sizeof(lun));
+
+ default:
+ WARN_ON(1);
+ }
+
+ return BFA_FALSE;
+}
+
+/*
+ * Gather affected IO requests and task management commands.
+ */
+static void
+bfa_tskim_gather_ios(struct bfa_tskim_s *tskim)
+{
+ struct bfa_itnim_s *itnim = tskim->itnim;
+ struct bfa_ioim_s *ioim;
+ struct list_head *qe, *qen;
+ struct scsi_cmnd *cmnd;
+ struct scsi_lun scsilun;
+
+ INIT_LIST_HEAD(&tskim->io_q);
+
+ /*
+ * Gather any active IO requests first.
+ */
+ list_for_each_safe(qe, qen, &itnim->io_q) {
+ ioim = (struct bfa_ioim_s *) qe;
+ cmnd = (struct scsi_cmnd *) ioim->dio;
+ int_to_scsilun(cmnd->device->lun, &scsilun);
+ if (bfa_tskim_match_scope(tskim, scsilun)) {
+ list_del(&ioim->qe);
+ list_add_tail(&ioim->qe, &tskim->io_q);
+ }
+ }
+
+ /*
+ * Failback any pending IO requests immediately.
+ */
+ list_for_each_safe(qe, qen, &itnim->pending_q) {
+ ioim = (struct bfa_ioim_s *) qe;
+ cmnd = (struct scsi_cmnd *) ioim->dio;
+ int_to_scsilun(cmnd->device->lun, &scsilun);
+ if (bfa_tskim_match_scope(tskim, scsilun)) {
+ list_del(&ioim->qe);
+ list_add_tail(&ioim->qe, &ioim->fcpim->ioim_comp_q);
+ bfa_ioim_tov(ioim);
+ }
+ }
+}
+
+/*
+ * IO cleanup completion
+ */
+static void
+bfa_tskim_cleanp_comp(void *tskim_cbarg)
+{
+ struct bfa_tskim_s *tskim = tskim_cbarg;
+
+ bfa_stats(tskim->itnim, tm_io_comps);
+ bfa_sm_send_event(tskim, BFA_TSKIM_SM_IOS_DONE);
+}
+
+/*
+ * Gather affected IO requests and task management commands.
+ */
+static void
+bfa_tskim_cleanup_ios(struct bfa_tskim_s *tskim)
+{
+ struct bfa_ioim_s *ioim;
+ struct list_head *qe, *qen;
+
+ bfa_wc_init(&tskim->wc, bfa_tskim_cleanp_comp, tskim);
+
+ list_for_each_safe(qe, qen, &tskim->io_q) {
+ ioim = (struct bfa_ioim_s *) qe;
+ bfa_wc_up(&tskim->wc);
+ bfa_ioim_cleanup_tm(ioim, tskim);
+ }
+
+ bfa_wc_wait(&tskim->wc);
+}
+
+/*
+ * Send task management request to firmware.
+ */
+static bfa_boolean_t
+bfa_tskim_send(struct bfa_tskim_s *tskim)
+{
+ struct bfa_itnim_s *itnim = tskim->itnim;
+ struct bfi_tskim_req_s *m;
+
+ /*
+ * check for room in queue to send request now
+ */
+ m = bfa_reqq_next(tskim->bfa, itnim->reqq);
+ if (!m)
+ return BFA_FALSE;
+
+ /*
+ * build i/o request message next
+ */
+ bfi_h2i_set(m->mh, BFI_MC_TSKIM, BFI_TSKIM_H2I_TM_REQ,
+ bfa_fn_lpu(tskim->bfa));
+
+ m->tsk_tag = cpu_to_be16(tskim->tsk_tag);
+ m->itn_fhdl = tskim->itnim->rport->fw_handle;
+ m->t_secs = tskim->tsecs;
+ m->lun = tskim->lun;
+ m->tm_flags = tskim->tm_cmnd;
+
+ /*
+ * queue I/O message to firmware
+ */
+ bfa_reqq_produce(tskim->bfa, itnim->reqq, m->mh);
+ return BFA_TRUE;
+}
+
+/*
+ * Send abort request to cleanup an active TM to firmware.
+ */
+static bfa_boolean_t
+bfa_tskim_send_abort(struct bfa_tskim_s *tskim)
+{
+ struct bfa_itnim_s *itnim = tskim->itnim;
+ struct bfi_tskim_abortreq_s *m;
+
+ /*
+ * check for room in queue to send request now
+ */
+ m = bfa_reqq_next(tskim->bfa, itnim->reqq);
+ if (!m)
+ return BFA_FALSE;
+
+ /*
+ * build i/o request message next
+ */
+ bfi_h2i_set(m->mh, BFI_MC_TSKIM, BFI_TSKIM_H2I_ABORT_REQ,
+ bfa_fn_lpu(tskim->bfa));
+
+ m->tsk_tag = cpu_to_be16(tskim->tsk_tag);
+
+ /*
+ * queue I/O message to firmware
+ */
+ bfa_reqq_produce(tskim->bfa, itnim->reqq, m->mh);
+ return BFA_TRUE;
+}
+
+/*
+ * Call to resume task management cmnd waiting for room in request queue.
+ */
+static void
+bfa_tskim_qresume(void *cbarg)
+{
+ struct bfa_tskim_s *tskim = cbarg;
+
+ bfa_stats(tskim->itnim, tm_qresumes);
+ bfa_sm_send_event(tskim, BFA_TSKIM_SM_QRESUME);
+}
+
+/*
+ * Cleanup IOs associated with a task mangement command on IOC failures.
+ */
+static void
+bfa_tskim_iocdisable_ios(struct bfa_tskim_s *tskim)
+{
+ struct bfa_ioim_s *ioim;
+ struct list_head *qe, *qen;
+
+ list_for_each_safe(qe, qen, &tskim->io_q) {
+ ioim = (struct bfa_ioim_s *) qe;
+ bfa_ioim_iocdisable(ioim);
+ }
+}
+
+/*
+ * Notification on completions from related ioim.
+ */
+void
+bfa_tskim_iodone(struct bfa_tskim_s *tskim)
+{
+ bfa_wc_down(&tskim->wc);
+}
+
+/*
+ * Handle IOC h/w failure notification from itnim.
+ */
+void
+bfa_tskim_iocdisable(struct bfa_tskim_s *tskim)
+{
+ tskim->notify = BFA_FALSE;
+ bfa_stats(tskim->itnim, tm_iocdowns);
+ bfa_sm_send_event(tskim, BFA_TSKIM_SM_HWFAIL);
+}
+
+/*
+ * Cleanup TM command and associated IOs as part of ITNIM offline.
+ */
+void
+bfa_tskim_cleanup(struct bfa_tskim_s *tskim)
+{
+ tskim->notify = BFA_TRUE;
+ bfa_stats(tskim->itnim, tm_cleanups);
+ bfa_sm_send_event(tskim, BFA_TSKIM_SM_CLEANUP);
+}
+
+/*
+ * Memory allocation and initialization.
+ */
+void
+bfa_tskim_attach(struct bfa_fcpim_s *fcpim)
+{
+ struct bfa_tskim_s *tskim;
+ struct bfa_fcp_mod_s *fcp = fcpim->fcp;
+ u16 i;
+
+ INIT_LIST_HEAD(&fcpim->tskim_free_q);
+ INIT_LIST_HEAD(&fcpim->tskim_unused_q);
+
+ tskim = (struct bfa_tskim_s *) bfa_mem_kva_curp(fcp);
+ fcpim->tskim_arr = tskim;
+
+ for (i = 0; i < fcpim->num_tskim_reqs; i++, tskim++) {
+ /*
+ * initialize TSKIM
+ */
+ memset(tskim, 0, sizeof(struct bfa_tskim_s));
+ tskim->tsk_tag = i;
+ tskim->bfa = fcpim->bfa;
+ tskim->fcpim = fcpim;
+ tskim->notify = BFA_FALSE;
+ bfa_reqq_winit(&tskim->reqq_wait, bfa_tskim_qresume,
+ tskim);
+ bfa_sm_set_state(tskim, bfa_tskim_sm_uninit);
+
+ list_add_tail(&tskim->qe, &fcpim->tskim_free_q);
+ }
+
+ bfa_mem_kva_curp(fcp) = (u8 *) tskim;
+}
+
+void
+bfa_tskim_isr(struct bfa_s *bfa, struct bfi_msg_s *m)
+{
+ struct bfa_fcpim_s *fcpim = BFA_FCPIM(bfa);
+ struct bfi_tskim_rsp_s *rsp = (struct bfi_tskim_rsp_s *) m;
+ struct bfa_tskim_s *tskim;
+ u16 tsk_tag = be16_to_cpu(rsp->tsk_tag);
+
+ tskim = BFA_TSKIM_FROM_TAG(fcpim, tsk_tag);
+ WARN_ON(tskim->tsk_tag != tsk_tag);
+
+ tskim->tsk_status = rsp->tsk_status;
+
+ /*
+ * Firmware sends BFI_TSKIM_STS_ABORTED status for abort
+ * requests. All other statuses are for normal completions.
+ */
+ if (rsp->tsk_status == BFI_TSKIM_STS_ABORTED) {
+ bfa_stats(tskim->itnim, tm_cleanup_comps);
+ bfa_sm_send_event(tskim, BFA_TSKIM_SM_CLEANUP_DONE);
+ } else if (rsp->tsk_status == BFI_TSKIM_STS_UTAG) {
+ bfa_sm_send_event(tskim, BFA_TSKIM_SM_UTAG);
+ } else {
+ bfa_stats(tskim->itnim, tm_fw_rsps);
+ bfa_sm_send_event(tskim, BFA_TSKIM_SM_DONE);
+ }
+}
+
+
+struct bfa_tskim_s *
+bfa_tskim_alloc(struct bfa_s *bfa, struct bfad_tskim_s *dtsk)
+{
+ struct bfa_fcpim_s *fcpim = BFA_FCPIM(bfa);
+ struct bfa_tskim_s *tskim;
+
+ bfa_q_deq(&fcpim->tskim_free_q, &tskim);
+
+ if (tskim)
+ tskim->dtsk = dtsk;
+
+ return tskim;
+}
+
+void
+bfa_tskim_free(struct bfa_tskim_s *tskim)
+{
+ WARN_ON(!bfa_q_is_on_q_func(&tskim->itnim->tsk_q, &tskim->qe));
+ list_del(&tskim->qe);
+ list_add_tail(&tskim->qe, &tskim->fcpim->tskim_free_q);
+}
+
+/*
+ * Start a task management command.
+ *
+ * @param[in] tskim BFA task management command instance
+ * @param[in] itnim i-t nexus for the task management command
+ * @param[in] lun lun, if applicable
+ * @param[in] tm_cmnd Task management command code.
+ * @param[in] t_secs Timeout in seconds
+ *
+ * @return None.
+ */
+void
+bfa_tskim_start(struct bfa_tskim_s *tskim, struct bfa_itnim_s *itnim,
+ struct scsi_lun lun,
+ enum fcp_tm_cmnd tm_cmnd, u8 tsecs)
+{
+ tskim->itnim = itnim;
+ tskim->lun = lun;
+ tskim->tm_cmnd = tm_cmnd;
+ tskim->tsecs = tsecs;
+ tskim->notify = BFA_FALSE;
+ bfa_stats(itnim, tm_cmnds);
+
+ list_add_tail(&tskim->qe, &itnim->tsk_q);
+ bfa_sm_send_event(tskim, BFA_TSKIM_SM_START);
+}
+
+void
+bfa_tskim_res_recfg(struct bfa_s *bfa, u16 num_tskim_fw)
+{
+ struct bfa_fcpim_s *fcpim = BFA_FCPIM(bfa);
+ struct list_head *qe;
+ int i;
+
+ for (i = 0; i < (fcpim->num_tskim_reqs - num_tskim_fw); i++) {
+ bfa_q_deq_tail(&fcpim->tskim_free_q, &qe);
+ list_add_tail(qe, &fcpim->tskim_unused_q);
+ }
+}
+
+/* BFA FCP module - parent module for fcpim */
+
+BFA_MODULE(fcp);
+
+static void
+bfa_fcp_meminfo(struct bfa_iocfc_cfg_s *cfg, struct bfa_meminfo_s *minfo,
+ struct bfa_s *bfa)
+{
+ struct bfa_fcp_mod_s *fcp = BFA_FCP_MOD(bfa);
+ struct bfa_mem_kva_s *fcp_kva = BFA_MEM_FCP_KVA(bfa);
+ struct bfa_mem_dma_s *seg_ptr;
+ u16 nsegs, idx, per_seg_ios, num_io_req;
+ u32 km_len = 0;
+
+ /*
+ * ZERO for num_ioim_reqs and num_fwtio_reqs is allowed config value.
+ * So if the values are non zero, adjust them appropriately.
+ */
+ if (cfg->fwcfg.num_ioim_reqs &&
+ cfg->fwcfg.num_ioim_reqs < BFA_IOIM_MIN)
+ cfg->fwcfg.num_ioim_reqs = BFA_IOIM_MIN;
+ else if (cfg->fwcfg.num_ioim_reqs > BFA_IOIM_MAX)
+ cfg->fwcfg.num_ioim_reqs = BFA_IOIM_MAX;
+
+ if (cfg->fwcfg.num_fwtio_reqs > BFA_FWTIO_MAX)
+ cfg->fwcfg.num_fwtio_reqs = BFA_FWTIO_MAX;
+
+ num_io_req = (cfg->fwcfg.num_ioim_reqs + cfg->fwcfg.num_fwtio_reqs);
+ if (num_io_req > BFA_IO_MAX) {
+ if (cfg->fwcfg.num_ioim_reqs && cfg->fwcfg.num_fwtio_reqs) {
+ cfg->fwcfg.num_ioim_reqs = BFA_IO_MAX/2;
+ cfg->fwcfg.num_fwtio_reqs = BFA_IO_MAX/2;
+ } else if (cfg->fwcfg.num_fwtio_reqs)
+ cfg->fwcfg.num_fwtio_reqs = BFA_FWTIO_MAX;
+ else
+ cfg->fwcfg.num_ioim_reqs = BFA_IOIM_MAX;
+ }
+
+ bfa_fcpim_meminfo(cfg, &km_len);
+
+ num_io_req = (cfg->fwcfg.num_ioim_reqs + cfg->fwcfg.num_fwtio_reqs);
+ km_len += num_io_req * sizeof(struct bfa_iotag_s);
+ km_len += cfg->fwcfg.num_rports * sizeof(struct bfa_itn_s);
+
+ /* dma memory */
+ nsegs = BFI_MEM_DMA_NSEGS(num_io_req, BFI_IOIM_SNSLEN);
+ per_seg_ios = BFI_MEM_NREQS_SEG(BFI_IOIM_SNSLEN);
+
+ bfa_mem_dma_seg_iter(fcp, seg_ptr, nsegs, idx) {
+ if (num_io_req >= per_seg_ios) {
+ num_io_req -= per_seg_ios;
+ bfa_mem_dma_setup(minfo, seg_ptr,
+ per_seg_ios * BFI_IOIM_SNSLEN);
+ } else
+ bfa_mem_dma_setup(minfo, seg_ptr,
+ num_io_req * BFI_IOIM_SNSLEN);
+ }
+
+ /* kva memory */
+ bfa_mem_kva_setup(minfo, fcp_kva, km_len);
+}
+
+static void
+bfa_fcp_attach(struct bfa_s *bfa, void *bfad, struct bfa_iocfc_cfg_s *cfg,
+ struct bfa_pcidev_s *pcidev)
+{
+ struct bfa_fcp_mod_s *fcp = BFA_FCP_MOD(bfa);
+ struct bfa_mem_dma_s *seg_ptr;
+ u16 idx, nsegs, num_io_req;
+
+ fcp->max_ioim_reqs = cfg->fwcfg.num_ioim_reqs;
+ fcp->num_ioim_reqs = cfg->fwcfg.num_ioim_reqs;
+ fcp->num_fwtio_reqs = cfg->fwcfg.num_fwtio_reqs;
+ fcp->num_itns = cfg->fwcfg.num_rports;
+ fcp->bfa = bfa;
+
+ /*
+ * Setup the pool of snsbase addr's, that is passed to fw as
+ * part of bfi_iocfc_cfg_s.
+ */
+ num_io_req = (cfg->fwcfg.num_ioim_reqs + cfg->fwcfg.num_fwtio_reqs);
+ nsegs = BFI_MEM_DMA_NSEGS(num_io_req, BFI_IOIM_SNSLEN);
+
+ bfa_mem_dma_seg_iter(fcp, seg_ptr, nsegs, idx) {
+
+ if (!bfa_mem_dma_virt(seg_ptr))
+ break;
+
+ fcp->snsbase[idx].pa = bfa_mem_dma_phys(seg_ptr);
+ fcp->snsbase[idx].kva = bfa_mem_dma_virt(seg_ptr);
+ bfa_iocfc_set_snsbase(bfa, idx, fcp->snsbase[idx].pa);
+ }
+
+ fcp->throttle_update_required = 1;
+ bfa_fcpim_attach(fcp, bfad, cfg, pcidev);
+
+ bfa_iotag_attach(fcp);
+
+ fcp->itn_arr = (struct bfa_itn_s *) bfa_mem_kva_curp(fcp);
+ bfa_mem_kva_curp(fcp) = (u8 *)fcp->itn_arr +
+ (fcp->num_itns * sizeof(struct bfa_itn_s));
+ memset(fcp->itn_arr, 0,
+ (fcp->num_itns * sizeof(struct bfa_itn_s)));
+}
+
+static void
+bfa_fcp_detach(struct bfa_s *bfa)
+{
+}
+
+static void
+bfa_fcp_start(struct bfa_s *bfa)
+{
+ struct bfa_fcp_mod_s *fcp = BFA_FCP_MOD(bfa);
+
+ /*
+ * bfa_init() with flash read is complete. now invalidate the stale
+ * content of lun mask like unit attention, rp tag and lp tag.
+ */
+ bfa_ioim_lm_init(fcp->bfa);
+}
+
+static void
+bfa_fcp_stop(struct bfa_s *bfa)
+{
+}
+
+static void
+bfa_fcp_iocdisable(struct bfa_s *bfa)
+{
+ struct bfa_fcp_mod_s *fcp = BFA_FCP_MOD(bfa);
+
+ bfa_fcpim_iocdisable(fcp);
+}
+
+void
+bfa_fcp_res_recfg(struct bfa_s *bfa, u16 num_ioim_fw, u16 max_ioim_fw)
+{
+ struct bfa_fcp_mod_s *mod = BFA_FCP_MOD(bfa);
+ struct list_head *qe;
+ int i;
+
+ /* Update io throttle value only once during driver load time */
+ if (!mod->throttle_update_required)
+ return;
+
+ for (i = 0; i < (mod->num_ioim_reqs - num_ioim_fw); i++) {
+ bfa_q_deq_tail(&mod->iotag_ioim_free_q, &qe);
+ list_add_tail(qe, &mod->iotag_unused_q);
+ }
+
+ if (mod->num_ioim_reqs != num_ioim_fw) {
+ bfa_trc(bfa, mod->num_ioim_reqs);
+ bfa_trc(bfa, num_ioim_fw);
+ }
+
+ mod->max_ioim_reqs = max_ioim_fw;
+ mod->num_ioim_reqs = num_ioim_fw;
+ mod->throttle_update_required = 0;
+}
+
+void
+bfa_itn_create(struct bfa_s *bfa, struct bfa_rport_s *rport,
+ void (*isr)(struct bfa_s *bfa, struct bfi_msg_s *m))
+{
+ struct bfa_fcp_mod_s *fcp = BFA_FCP_MOD(bfa);
+ struct bfa_itn_s *itn;
+
+ itn = BFA_ITN_FROM_TAG(fcp, rport->rport_tag);
+ itn->isr = isr;
+}
+
+/*
+ * Itn interrupt processing.
+ */
+void
+bfa_itn_isr(struct bfa_s *bfa, struct bfi_msg_s *m)
+{
+ struct bfa_fcp_mod_s *fcp = BFA_FCP_MOD(bfa);
+ union bfi_itn_i2h_msg_u msg;
+ struct bfa_itn_s *itn;
+
+ msg.msg = m;
+ itn = BFA_ITN_FROM_TAG(fcp, msg.create_rsp->bfa_handle);
+
+ if (itn->isr)
+ itn->isr(bfa, m);
+ else
+ WARN_ON(1);
+}
+
+void
+bfa_iotag_attach(struct bfa_fcp_mod_s *fcp)
+{
+ struct bfa_iotag_s *iotag;
+ u16 num_io_req, i;
+
+ iotag = (struct bfa_iotag_s *) bfa_mem_kva_curp(fcp);
+ fcp->iotag_arr = iotag;
+
+ INIT_LIST_HEAD(&fcp->iotag_ioim_free_q);
+ INIT_LIST_HEAD(&fcp->iotag_tio_free_q);
+ INIT_LIST_HEAD(&fcp->iotag_unused_q);
+
+ num_io_req = fcp->num_ioim_reqs + fcp->num_fwtio_reqs;
+ for (i = 0; i < num_io_req; i++, iotag++) {
+ memset(iotag, 0, sizeof(struct bfa_iotag_s));
+ iotag->tag = i;
+ if (i < fcp->num_ioim_reqs)
+ list_add_tail(&iotag->qe, &fcp->iotag_ioim_free_q);
+ else
+ list_add_tail(&iotag->qe, &fcp->iotag_tio_free_q);
+ }
+
+ bfa_mem_kva_curp(fcp) = (u8 *) iotag;
+}
+
+
+/**
+ * To send config req, first try to use throttle value from flash
+ * If 0, then use driver parameter
+ * We need to use min(flash_val, drv_val) because
+ * memory allocation was done based on this cfg'd value
+ */
+u16
+bfa_fcpim_get_throttle_cfg(struct bfa_s *bfa, u16 drv_cfg_param)
+{
+ u16 tmp;
+ struct bfa_fcp_mod_s *fcp = BFA_FCP_MOD(bfa);
+
+ /*
+ * If throttle value from flash is already in effect after driver is
+ * loaded then until next load, always return current value instead
+ * of actual flash value
+ */
+ if (!fcp->throttle_update_required)
+ return (u16)fcp->num_ioim_reqs;
+
+ tmp = bfa_dconf_read_data_valid(bfa) ? bfa_fcpim_read_throttle(bfa) : 0;
+ if (!tmp || (tmp > drv_cfg_param))
+ tmp = drv_cfg_param;
+
+ return tmp;
+}
+
+bfa_status_t
+bfa_fcpim_write_throttle(struct bfa_s *bfa, u16 value)
+{
+ if (!bfa_dconf_get_min_cfg(bfa)) {
+ BFA_DCONF_MOD(bfa)->dconf->throttle_cfg.value = value;
+ BFA_DCONF_MOD(bfa)->dconf->throttle_cfg.is_valid = 1;
+ return BFA_STATUS_OK;
+ }
+
+ return BFA_STATUS_FAILED;
+}
+
+u16
+bfa_fcpim_read_throttle(struct bfa_s *bfa)
+{
+ struct bfa_throttle_cfg_s *throttle_cfg =
+ &(BFA_DCONF_MOD(bfa)->dconf->throttle_cfg);
+
+ return ((!bfa_dconf_get_min_cfg(bfa)) ?
+ ((throttle_cfg->is_valid == 1) ? (throttle_cfg->value) : 0) : 0);
+}
+
+bfa_status_t
+bfa_fcpim_throttle_set(struct bfa_s *bfa, u16 value)
+{
+ /* in min cfg no commands should run. */
+ if ((bfa_dconf_get_min_cfg(bfa) == BFA_TRUE) ||
+ (!bfa_dconf_read_data_valid(bfa)))
+ return BFA_STATUS_FAILED;
+
+ bfa_fcpim_write_throttle(bfa, value);
+
+ return bfa_dconf_update(bfa);
+}
+
+bfa_status_t
+bfa_fcpim_throttle_get(struct bfa_s *bfa, void *buf)
+{
+ struct bfa_fcpim_s *fcpim = BFA_FCPIM(bfa);
+ struct bfa_defs_fcpim_throttle_s throttle;
+
+ if ((bfa_dconf_get_min_cfg(bfa) == BFA_TRUE) ||
+ (!bfa_dconf_read_data_valid(bfa)))
+ return BFA_STATUS_FAILED;
+
+ memset(&throttle, 0, sizeof(struct bfa_defs_fcpim_throttle_s));
+
+ throttle.cur_value = (u16)(fcpim->fcp->num_ioim_reqs);
+ throttle.cfg_value = bfa_fcpim_read_throttle(bfa);
+ if (!throttle.cfg_value)
+ throttle.cfg_value = throttle.cur_value;
+ throttle.max_value = (u16)(fcpim->fcp->max_ioim_reqs);
+ memcpy(buf, &throttle, sizeof(struct bfa_defs_fcpim_throttle_s));
+
+ return BFA_STATUS_OK;
+}
diff --git a/drivers/scsi/bfa/bfa_fcpim.h b/drivers/scsi/bfa/bfa_fcpim.h
new file mode 100644
index 000000000..e693af6e5
--- /dev/null
+++ b/drivers/scsi/bfa/bfa_fcpim.h
@@ -0,0 +1,429 @@
+/*
+ * Copyright (c) 2005-2010 Brocade Communications Systems, Inc.
+ * All rights reserved
+ * www.brocade.com
+ *
+ * Linux driver for Brocade Fibre Channel Host Bus Adapter.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License (GPL) Version 2 as
+ * published by the Free Software Foundation
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ */
+
+#ifndef __BFA_FCPIM_H__
+#define __BFA_FCPIM_H__
+
+#include "bfa.h"
+#include "bfa_svc.h"
+#include "bfi_ms.h"
+#include "bfa_defs_svc.h"
+#include "bfa_cs.h"
+
+/* FCP module related definitions */
+#define BFA_IO_MAX BFI_IO_MAX
+#define BFA_FWTIO_MAX 2000
+
+struct bfa_fcp_mod_s;
+struct bfa_iotag_s {
+ struct list_head qe; /* queue element */
+ u16 tag; /* FW IO tag */
+};
+
+struct bfa_itn_s {
+ bfa_isr_func_t isr;
+};
+
+void bfa_itn_create(struct bfa_s *bfa, struct bfa_rport_s *rport,
+ void (*isr)(struct bfa_s *bfa, struct bfi_msg_s *m));
+void bfa_itn_isr(struct bfa_s *bfa, struct bfi_msg_s *m);
+void bfa_iotag_attach(struct bfa_fcp_mod_s *fcp);
+void bfa_fcp_res_recfg(struct bfa_s *bfa, u16 num_ioim_fw, u16 max_ioim_fw);
+
+#define BFA_FCP_MOD(_hal) (&(_hal)->modules.fcp_mod)
+#define BFA_MEM_FCP_KVA(__bfa) (&(BFA_FCP_MOD(__bfa)->kva_seg))
+#define BFA_IOTAG_FROM_TAG(_fcp, _tag) \
+ (&(_fcp)->iotag_arr[(_tag & BFA_IOIM_IOTAG_MASK)])
+#define BFA_ITN_FROM_TAG(_fcp, _tag) \
+ ((_fcp)->itn_arr + ((_tag) & ((_fcp)->num_itns - 1)))
+#define BFA_SNSINFO_FROM_TAG(_fcp, _tag) \
+ bfa_mem_get_dmabuf_kva(_fcp, (_tag & BFA_IOIM_IOTAG_MASK), \
+ BFI_IOIM_SNSLEN)
+
+
+#define BFA_ITNIM_MIN 32
+#define BFA_ITNIM_MAX 1024
+
+#define BFA_IOIM_MIN 8
+#define BFA_IOIM_MAX 2000
+
+#define BFA_TSKIM_MIN 4
+#define BFA_TSKIM_MAX 512
+#define BFA_FCPIM_PATHTOV_DEF (30 * 1000) /* in millisecs */
+#define BFA_FCPIM_PATHTOV_MAX (90 * 1000) /* in millisecs */
+
+
+#define bfa_itnim_ioprofile_update(__itnim, __index) \
+ (__itnim->ioprofile.iocomps[__index]++)
+
+#define BFA_IOIM_RETRY_TAG_OFFSET 11
+#define BFA_IOIM_IOTAG_MASK 0x07ff /* 2K IOs */
+#define BFA_IOIM_RETRY_MAX 7
+
+/* Buckets are are 512 bytes to 2MB */
+static inline u32
+bfa_ioim_get_index(u32 n) {
+ int pos = 0;
+ if (n >= (1UL)<<22)
+ return BFA_IOBUCKET_MAX - 1;
+ n >>= 8;
+ if (n >= (1UL)<<16) {
+ n >>= 16;
+ pos += 16;
+ }
+ if (n >= 1 << 8) {
+ n >>= 8;
+ pos += 8;
+ }
+ if (n >= 1 << 4) {
+ n >>= 4;
+ pos += 4;
+ }
+ if (n >= 1 << 2) {
+ n >>= 2;
+ pos += 2;
+ }
+ if (n >= 1 << 1)
+ pos += 1;
+
+ return (n == 0) ? (0) : pos;
+}
+
+/*
+ * forward declarations
+ */
+struct bfa_ioim_s;
+struct bfa_tskim_s;
+struct bfad_ioim_s;
+struct bfad_tskim_s;
+
+typedef void (*bfa_fcpim_profile_t) (struct bfa_ioim_s *ioim);
+
+struct bfa_fcpim_s {
+ struct bfa_s *bfa;
+ struct bfa_fcp_mod_s *fcp;
+ struct bfa_itnim_s *itnim_arr;
+ struct bfa_ioim_s *ioim_arr;
+ struct bfa_ioim_sp_s *ioim_sp_arr;
+ struct bfa_tskim_s *tskim_arr;
+ int num_itnims;
+ int num_tskim_reqs;
+ u32 path_tov;
+ u16 q_depth;
+ u8 reqq; /* Request queue to be used */
+ struct list_head itnim_q; /* queue of active itnim */
+ struct list_head ioim_resfree_q; /* IOs waiting for f/w */
+ struct list_head ioim_comp_q; /* IO global comp Q */
+ struct list_head tskim_free_q;
+ struct list_head tskim_unused_q; /* Unused tskim Q */
+ u32 ios_active; /* current active IOs */
+ u32 delay_comp;
+ struct bfa_fcpim_del_itn_stats_s del_itn_stats;
+ bfa_boolean_t ioredirect;
+ bfa_boolean_t io_profile;
+ u32 io_profile_start_time;
+ bfa_fcpim_profile_t profile_comp;
+ bfa_fcpim_profile_t profile_start;
+};
+
+/* Max FCP dma segs required */
+#define BFA_FCP_DMA_SEGS BFI_IOIM_SNSBUF_SEGS
+
+struct bfa_fcp_mod_s {
+ struct bfa_s *bfa;
+ struct list_head iotag_ioim_free_q; /* free IO resources */
+ struct list_head iotag_tio_free_q; /* free IO resources */
+ struct list_head iotag_unused_q; /* unused IO resources*/
+ struct bfa_iotag_s *iotag_arr;
+ struct bfa_itn_s *itn_arr;
+ int max_ioim_reqs;
+ int num_ioim_reqs;
+ int num_fwtio_reqs;
+ int num_itns;
+ struct bfa_dma_s snsbase[BFA_FCP_DMA_SEGS];
+ struct bfa_fcpim_s fcpim;
+ struct bfa_mem_dma_s dma_seg[BFA_FCP_DMA_SEGS];
+ struct bfa_mem_kva_s kva_seg;
+ int throttle_update_required;
+};
+
+/*
+ * BFA IO (initiator mode)
+ */
+struct bfa_ioim_s {
+ struct list_head qe; /* queue elememt */
+ bfa_sm_t sm; /* BFA ioim state machine */
+ struct bfa_s *bfa; /* BFA module */
+ struct bfa_fcpim_s *fcpim; /* parent fcpim module */
+ struct bfa_itnim_s *itnim; /* i-t-n nexus for this IO */
+ struct bfad_ioim_s *dio; /* driver IO handle */
+ u16 iotag; /* FWI IO tag */
+ u16 abort_tag; /* unqiue abort request tag */
+ u16 nsges; /* number of SG elements */
+ u16 nsgpgs; /* number of SG pages */
+ struct bfa_sgpg_s *sgpg; /* first SG page */
+ struct list_head sgpg_q; /* allocated SG pages */
+ struct bfa_cb_qe_s hcb_qe; /* bfa callback qelem */
+ bfa_cb_cbfn_t io_cbfn; /* IO completion handler */
+ struct bfa_ioim_sp_s *iosp; /* slow-path IO handling */
+ u8 reqq; /* Request queue for I/O */
+ u8 mode; /* IO is passthrough or not */
+ u64 start_time; /* IO's Profile start val */
+};
+
+struct bfa_ioim_sp_s {
+ struct bfi_msg_s comp_rspmsg; /* IO comp f/w response */
+ struct bfa_sgpg_wqe_s sgpg_wqe; /* waitq elem for sgpg */
+ struct bfa_reqq_wait_s reqq_wait; /* to wait for room in reqq */
+ bfa_boolean_t abort_explicit; /* aborted by OS */
+ struct bfa_tskim_s *tskim; /* Relevant TM cmd */
+};
+
+/*
+ * BFA Task management command (initiator mode)
+ */
+struct bfa_tskim_s {
+ struct list_head qe;
+ bfa_sm_t sm;
+ struct bfa_s *bfa; /* BFA module */
+ struct bfa_fcpim_s *fcpim; /* parent fcpim module */
+ struct bfa_itnim_s *itnim; /* i-t-n nexus for this IO */
+ struct bfad_tskim_s *dtsk; /* driver task mgmt cmnd */
+ bfa_boolean_t notify; /* notify itnim on TM comp */
+ struct scsi_lun lun; /* lun if applicable */
+ enum fcp_tm_cmnd tm_cmnd; /* task management command */
+ u16 tsk_tag; /* FWI IO tag */
+ u8 tsecs; /* timeout in seconds */
+ struct bfa_reqq_wait_s reqq_wait; /* to wait for room in reqq */
+ struct list_head io_q; /* queue of affected IOs */
+ struct bfa_wc_s wc; /* waiting counter */
+ struct bfa_cb_qe_s hcb_qe; /* bfa callback qelem */
+ enum bfi_tskim_status tsk_status; /* TM status */
+};
+
+/*
+ * BFA i-t-n (initiator mode)
+ */
+struct bfa_itnim_s {
+ struct list_head qe; /* queue element */
+ bfa_sm_t sm; /* i-t-n im BFA state machine */
+ struct bfa_s *bfa; /* bfa instance */
+ struct bfa_rport_s *rport; /* bfa rport */
+ void *ditn; /* driver i-t-n structure */
+ struct bfi_mhdr_s mhdr; /* pre-built mhdr */
+ u8 msg_no; /* itnim/rport firmware handle */
+ u8 reqq; /* CQ for requests */
+ struct bfa_cb_qe_s hcb_qe; /* bfa callback qelem */
+ struct list_head pending_q; /* queue of pending IO requests */
+ struct list_head io_q; /* queue of active IO requests */
+ struct list_head io_cleanup_q; /* IO being cleaned up */
+ struct list_head tsk_q; /* queue of active TM commands */
+ struct list_head delay_comp_q; /* queue of failed inflight cmds */
+ bfa_boolean_t seq_rec; /* SQER supported */
+ bfa_boolean_t is_online; /* itnim is ONLINE for IO */
+ bfa_boolean_t iotov_active; /* IO TOV timer is active */
+ struct bfa_wc_s wc; /* waiting counter */
+ struct bfa_timer_s timer; /* pending IO TOV */
+ struct bfa_reqq_wait_s reqq_wait; /* to wait for room in reqq */
+ struct bfa_fcpim_s *fcpim; /* fcpim module */
+ struct bfa_itnim_iostats_s stats;
+ struct bfa_itnim_ioprofile_s ioprofile;
+};
+
+#define bfa_itnim_is_online(_itnim) ((_itnim)->is_online)
+#define BFA_FCPIM(_hal) (&(_hal)->modules.fcp_mod.fcpim)
+#define BFA_IOIM_TAG_2_ID(_iotag) ((_iotag) & BFA_IOIM_IOTAG_MASK)
+#define BFA_IOIM_FROM_TAG(_fcpim, _iotag) \
+ (&fcpim->ioim_arr[(_iotag & BFA_IOIM_IOTAG_MASK)])
+#define BFA_TSKIM_FROM_TAG(_fcpim, _tmtag) \
+ (&fcpim->tskim_arr[_tmtag & (fcpim->num_tskim_reqs - 1)])
+
+#define bfa_io_profile_start_time(_bfa) \
+ ((_bfa)->modules.fcp_mod.fcpim.io_profile_start_time)
+#define bfa_fcpim_get_io_profile(_bfa) \
+ ((_bfa)->modules.fcp_mod.fcpim.io_profile)
+#define bfa_ioim_update_iotag(__ioim) do { \
+ uint16_t k = (__ioim)->iotag >> BFA_IOIM_RETRY_TAG_OFFSET; \
+ k++; (__ioim)->iotag &= BFA_IOIM_IOTAG_MASK; \
+ (__ioim)->iotag |= k << BFA_IOIM_RETRY_TAG_OFFSET; \
+} while (0)
+
+static inline bfa_boolean_t
+bfa_ioim_maxretry_reached(struct bfa_ioim_s *ioim)
+{
+ uint16_t k = ioim->iotag >> BFA_IOIM_RETRY_TAG_OFFSET;
+ if (k < BFA_IOIM_RETRY_MAX)
+ return BFA_FALSE;
+ return BFA_TRUE;
+}
+
+/*
+ * function prototypes
+ */
+void bfa_ioim_attach(struct bfa_fcpim_s *fcpim);
+void bfa_ioim_isr(struct bfa_s *bfa, struct bfi_msg_s *msg);
+void bfa_ioim_good_comp_isr(struct bfa_s *bfa,
+ struct bfi_msg_s *msg);
+void bfa_ioim_cleanup(struct bfa_ioim_s *ioim);
+void bfa_ioim_cleanup_tm(struct bfa_ioim_s *ioim,
+ struct bfa_tskim_s *tskim);
+void bfa_ioim_iocdisable(struct bfa_ioim_s *ioim);
+void bfa_ioim_tov(struct bfa_ioim_s *ioim);
+
+void bfa_tskim_attach(struct bfa_fcpim_s *fcpim);
+void bfa_tskim_isr(struct bfa_s *bfa, struct bfi_msg_s *msg);
+void bfa_tskim_iodone(struct bfa_tskim_s *tskim);
+void bfa_tskim_iocdisable(struct bfa_tskim_s *tskim);
+void bfa_tskim_cleanup(struct bfa_tskim_s *tskim);
+void bfa_tskim_res_recfg(struct bfa_s *bfa, u16 num_tskim_fw);
+
+void bfa_itnim_meminfo(struct bfa_iocfc_cfg_s *cfg, u32 *km_len);
+void bfa_itnim_attach(struct bfa_fcpim_s *fcpim);
+void bfa_itnim_iocdisable(struct bfa_itnim_s *itnim);
+void bfa_itnim_isr(struct bfa_s *bfa, struct bfi_msg_s *msg);
+void bfa_itnim_iodone(struct bfa_itnim_s *itnim);
+void bfa_itnim_tskdone(struct bfa_itnim_s *itnim);
+bfa_boolean_t bfa_itnim_hold_io(struct bfa_itnim_s *itnim);
+
+/*
+ * bfa fcpim module API functions
+ */
+void bfa_fcpim_path_tov_set(struct bfa_s *bfa, u16 path_tov);
+u16 bfa_fcpim_path_tov_get(struct bfa_s *bfa);
+u16 bfa_fcpim_qdepth_get(struct bfa_s *bfa);
+bfa_status_t bfa_fcpim_port_iostats(struct bfa_s *bfa,
+ struct bfa_itnim_iostats_s *stats, u8 lp_tag);
+void bfa_fcpim_add_stats(struct bfa_itnim_iostats_s *fcpim_stats,
+ struct bfa_itnim_iostats_s *itnim_stats);
+bfa_status_t bfa_fcpim_profile_on(struct bfa_s *bfa, u32 time);
+bfa_status_t bfa_fcpim_profile_off(struct bfa_s *bfa);
+
+#define bfa_fcpim_ioredirect_enabled(__bfa) \
+ (((struct bfa_fcpim_s *)(BFA_FCPIM(__bfa)))->ioredirect)
+
+#define bfa_fcpim_get_next_reqq(__bfa, __qid) \
+{ \
+ struct bfa_fcpim_s *__fcpim = BFA_FCPIM(__bfa); \
+ __fcpim->reqq++; \
+ __fcpim->reqq &= (BFI_IOC_MAX_CQS - 1); \
+ *(__qid) = __fcpim->reqq; \
+}
+
+#define bfa_iocfc_map_msg_to_qid(__msg, __qid) \
+ *(__qid) = (u8)((__msg) & (BFI_IOC_MAX_CQS - 1));
+/*
+ * bfa itnim API functions
+ */
+struct bfa_itnim_s *bfa_itnim_create(struct bfa_s *bfa,
+ struct bfa_rport_s *rport, void *itnim);
+void bfa_itnim_delete(struct bfa_itnim_s *itnim);
+void bfa_itnim_online(struct bfa_itnim_s *itnim, bfa_boolean_t seq_rec);
+void bfa_itnim_offline(struct bfa_itnim_s *itnim);
+void bfa_itnim_clear_stats(struct bfa_itnim_s *itnim);
+bfa_status_t bfa_itnim_get_ioprofile(struct bfa_itnim_s *itnim,
+ struct bfa_itnim_ioprofile_s *ioprofile);
+
+#define bfa_itnim_get_reqq(__ioim) (((struct bfa_ioim_s *)__ioim)->itnim->reqq)
+
+/*
+ * BFA completion callback for bfa_itnim_online().
+ */
+void bfa_cb_itnim_online(void *itnim);
+
+/*
+ * BFA completion callback for bfa_itnim_offline().
+ */
+void bfa_cb_itnim_offline(void *itnim);
+void bfa_cb_itnim_tov_begin(void *itnim);
+void bfa_cb_itnim_tov(void *itnim);
+
+/*
+ * BFA notification to FCS/driver for second level error recovery.
+ * Atleast one I/O request has timedout and target is unresponsive to
+ * repeated abort requests. Second level error recovery should be initiated
+ * by starting implicit logout and recovery procedures.
+ */
+void bfa_cb_itnim_sler(void *itnim);
+
+/*
+ * bfa ioim API functions
+ */
+struct bfa_ioim_s *bfa_ioim_alloc(struct bfa_s *bfa,
+ struct bfad_ioim_s *dio,
+ struct bfa_itnim_s *itnim,
+ u16 nsgles);
+
+void bfa_ioim_free(struct bfa_ioim_s *ioim);
+void bfa_ioim_start(struct bfa_ioim_s *ioim);
+bfa_status_t bfa_ioim_abort(struct bfa_ioim_s *ioim);
+void bfa_ioim_delayed_comp(struct bfa_ioim_s *ioim,
+ bfa_boolean_t iotov);
+/*
+ * I/O completion notification.
+ *
+ * @param[in] dio driver IO structure
+ * @param[in] io_status IO completion status
+ * @param[in] scsi_status SCSI status returned by target
+ * @param[in] sns_len SCSI sense length, 0 if none
+ * @param[in] sns_info SCSI sense data, if any
+ * @param[in] residue Residual length
+ *
+ * @return None
+ */
+void bfa_cb_ioim_done(void *bfad, struct bfad_ioim_s *dio,
+ enum bfi_ioim_status io_status,
+ u8 scsi_status, int sns_len,
+ u8 *sns_info, s32 residue);
+
+/*
+ * I/O good completion notification.
+ */
+void bfa_cb_ioim_good_comp(void *bfad, struct bfad_ioim_s *dio);
+
+/*
+ * I/O abort completion notification
+ */
+void bfa_cb_ioim_abort(void *bfad, struct bfad_ioim_s *dio);
+
+/*
+ * bfa tskim API functions
+ */
+struct bfa_tskim_s *bfa_tskim_alloc(struct bfa_s *bfa,
+ struct bfad_tskim_s *dtsk);
+void bfa_tskim_free(struct bfa_tskim_s *tskim);
+void bfa_tskim_start(struct bfa_tskim_s *tskim,
+ struct bfa_itnim_s *itnim, struct scsi_lun lun,
+ enum fcp_tm_cmnd tm, u8 t_secs);
+void bfa_cb_tskim_done(void *bfad, struct bfad_tskim_s *dtsk,
+ enum bfi_tskim_status tsk_status);
+
+void bfa_fcpim_lunmask_rp_update(struct bfa_s *bfa, wwn_t lp_wwn,
+ wwn_t rp_wwn, u16 rp_tag, u8 lp_tag);
+bfa_status_t bfa_fcpim_lunmask_update(struct bfa_s *bfa, u32 on_off);
+bfa_status_t bfa_fcpim_lunmask_query(struct bfa_s *bfa, void *buf);
+bfa_status_t bfa_fcpim_lunmask_delete(struct bfa_s *bfa, u16 vf_id,
+ wwn_t *pwwn, wwn_t rpwwn, struct scsi_lun lun);
+bfa_status_t bfa_fcpim_lunmask_add(struct bfa_s *bfa, u16 vf_id,
+ wwn_t *pwwn, wwn_t rpwwn, struct scsi_lun lun);
+bfa_status_t bfa_fcpim_lunmask_clear(struct bfa_s *bfa);
+u16 bfa_fcpim_read_throttle(struct bfa_s *bfa);
+bfa_status_t bfa_fcpim_write_throttle(struct bfa_s *bfa, u16 value);
+bfa_status_t bfa_fcpim_throttle_set(struct bfa_s *bfa, u16 value);
+bfa_status_t bfa_fcpim_throttle_get(struct bfa_s *bfa, void *buf);
+u16 bfa_fcpim_get_throttle_cfg(struct bfa_s *bfa, u16 drv_cfg_param);
+
+#endif /* __BFA_FCPIM_H__ */
diff --git a/drivers/scsi/bfa/bfa_fcs.c b/drivers/scsi/bfa/bfa_fcs.c
new file mode 100644
index 000000000..0f1945595
--- /dev/null
+++ b/drivers/scsi/bfa/bfa_fcs.c
@@ -0,0 +1,1712 @@
+/*
+ * Copyright (c) 2005-2010 Brocade Communications Systems, Inc.
+ * All rights reserved
+ * www.brocade.com
+ *
+ * Linux driver for Brocade Fibre Channel Host Bus Adapter.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License (GPL) Version 2 as
+ * published by the Free Software Foundation
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ */
+
+/*
+ * bfa_fcs.c BFA FCS main
+ */
+
+#include "bfad_drv.h"
+#include "bfad_im.h"
+#include "bfa_fcs.h"
+#include "bfa_fcbuild.h"
+
+BFA_TRC_FILE(FCS, FCS);
+
+/*
+ * FCS sub-modules
+ */
+struct bfa_fcs_mod_s {
+ void (*attach) (struct bfa_fcs_s *fcs);
+ void (*modinit) (struct bfa_fcs_s *fcs);
+ void (*modexit) (struct bfa_fcs_s *fcs);
+};
+
+#define BFA_FCS_MODULE(_mod) { _mod ## _modinit, _mod ## _modexit }
+
+static struct bfa_fcs_mod_s fcs_modules[] = {
+ { bfa_fcs_port_attach, NULL, NULL },
+ { bfa_fcs_uf_attach, NULL, NULL },
+ { bfa_fcs_fabric_attach, bfa_fcs_fabric_modinit,
+ bfa_fcs_fabric_modexit },
+};
+
+/*
+ * fcs_api BFA FCS API
+ */
+
+static void
+bfa_fcs_exit_comp(void *fcs_cbarg)
+{
+ struct bfa_fcs_s *fcs = fcs_cbarg;
+ struct bfad_s *bfad = fcs->bfad;
+
+ complete(&bfad->comp);
+}
+
+
+
+/*
+ * fcs_api BFA FCS API
+ */
+
+/*
+ * fcs attach -- called once to initialize data structures at driver attach time
+ */
+void
+bfa_fcs_attach(struct bfa_fcs_s *fcs, struct bfa_s *bfa, struct bfad_s *bfad,
+ bfa_boolean_t min_cfg)
+{
+ int i;
+ struct bfa_fcs_mod_s *mod;
+
+ fcs->bfa = bfa;
+ fcs->bfad = bfad;
+ fcs->min_cfg = min_cfg;
+ fcs->num_rport_logins = 0;
+
+ bfa->fcs = BFA_TRUE;
+ fcbuild_init();
+
+ for (i = 0; i < ARRAY_SIZE(fcs_modules); i++) {
+ mod = &fcs_modules[i];
+ if (mod->attach)
+ mod->attach(fcs);
+ }
+}
+
+/*
+ * fcs initialization, called once after bfa initialization is complete
+ */
+void
+bfa_fcs_init(struct bfa_fcs_s *fcs)
+{
+ int i;
+ struct bfa_fcs_mod_s *mod;
+
+ for (i = 0; i < ARRAY_SIZE(fcs_modules); i++) {
+ mod = &fcs_modules[i];
+ if (mod->modinit)
+ mod->modinit(fcs);
+ }
+}
+
+/*
+ * FCS update cfg - reset the pwwn/nwwn of fabric base logical port
+ * with values learned during bfa_init firmware GETATTR REQ.
+ */
+void
+bfa_fcs_update_cfg(struct bfa_fcs_s *fcs)
+{
+ struct bfa_fcs_fabric_s *fabric = &fcs->fabric;
+ struct bfa_lport_cfg_s *port_cfg = &fabric->bport.port_cfg;
+ struct bfa_ioc_s *ioc = &fabric->fcs->bfa->ioc;
+
+ port_cfg->nwwn = ioc->attr->nwwn;
+ port_cfg->pwwn = ioc->attr->pwwn;
+}
+
+/*
+ * Stop FCS operations.
+ */
+void
+bfa_fcs_stop(struct bfa_fcs_s *fcs)
+{
+ bfa_wc_init(&fcs->wc, bfa_fcs_exit_comp, fcs);
+ bfa_wc_up(&fcs->wc);
+ bfa_fcs_fabric_modstop(fcs);
+ bfa_wc_wait(&fcs->wc);
+}
+
+/*
+ * fcs pbc vport initialization
+ */
+void
+bfa_fcs_pbc_vport_init(struct bfa_fcs_s *fcs)
+{
+ int i, npbc_vports;
+ struct bfi_pbc_vport_s pbc_vports[BFI_PBC_MAX_VPORTS];
+
+ /* Initialize pbc vports */
+ if (!fcs->min_cfg) {
+ npbc_vports =
+ bfa_iocfc_get_pbc_vports(fcs->bfa, pbc_vports);
+ for (i = 0; i < npbc_vports; i++)
+ bfa_fcb_pbc_vport_create(fcs->bfa->bfad, pbc_vports[i]);
+ }
+}
+
+/*
+ * brief
+ * FCS driver details initialization.
+ *
+ * param[in] fcs FCS instance
+ * param[in] driver_info Driver Details
+ *
+ * return None
+ */
+void
+bfa_fcs_driver_info_init(struct bfa_fcs_s *fcs,
+ struct bfa_fcs_driver_info_s *driver_info)
+{
+
+ fcs->driver_info = *driver_info;
+
+ bfa_fcs_fabric_psymb_init(&fcs->fabric);
+ bfa_fcs_fabric_nsymb_init(&fcs->fabric);
+}
+
+/*
+ * brief
+ * FCS instance cleanup and exit.
+ *
+ * param[in] fcs FCS instance
+ * return None
+ */
+void
+bfa_fcs_exit(struct bfa_fcs_s *fcs)
+{
+ struct bfa_fcs_mod_s *mod;
+ int nmods, i;
+
+ bfa_wc_init(&fcs->wc, bfa_fcs_exit_comp, fcs);
+
+ nmods = ARRAY_SIZE(fcs_modules);
+
+ for (i = 0; i < nmods; i++) {
+
+ mod = &fcs_modules[i];
+ if (mod->modexit) {
+ bfa_wc_up(&fcs->wc);
+ mod->modexit(fcs);
+ }
+ }
+
+ bfa_wc_wait(&fcs->wc);
+}
+
+
+/*
+ * Fabric module implementation.
+ */
+
+#define BFA_FCS_FABRIC_RETRY_DELAY (2000) /* Milliseconds */
+#define BFA_FCS_FABRIC_CLEANUP_DELAY (10000) /* Milliseconds */
+
+#define bfa_fcs_fabric_set_opertype(__fabric) do { \
+ if (bfa_fcport_get_topology((__fabric)->fcs->bfa) \
+ == BFA_PORT_TOPOLOGY_P2P) { \
+ if (fabric->fab_type == BFA_FCS_FABRIC_SWITCHED) \
+ (__fabric)->oper_type = BFA_PORT_TYPE_NPORT; \
+ else \
+ (__fabric)->oper_type = BFA_PORT_TYPE_P2P; \
+ } else \
+ (__fabric)->oper_type = BFA_PORT_TYPE_NLPORT; \
+} while (0)
+
+/*
+ * forward declarations
+ */
+static void bfa_fcs_fabric_init(struct bfa_fcs_fabric_s *fabric);
+static void bfa_fcs_fabric_login(struct bfa_fcs_fabric_s *fabric);
+static void bfa_fcs_fabric_notify_online(struct bfa_fcs_fabric_s *fabric);
+static void bfa_fcs_fabric_notify_offline(struct bfa_fcs_fabric_s *fabric);
+static void bfa_fcs_fabric_delay(void *cbarg);
+static void bfa_fcs_fabric_delete(struct bfa_fcs_fabric_s *fabric);
+static void bfa_fcs_fabric_delete_comp(void *cbarg);
+static void bfa_fcs_fabric_stop(struct bfa_fcs_fabric_s *fabric);
+static void bfa_fcs_fabric_stop_comp(void *cbarg);
+static void bfa_fcs_fabric_process_uf(struct bfa_fcs_fabric_s *fabric,
+ struct fchs_s *fchs, u16 len);
+static void bfa_fcs_fabric_process_flogi(struct bfa_fcs_fabric_s *fabric,
+ struct fchs_s *fchs, u16 len);
+static void bfa_fcs_fabric_send_flogi_acc(struct bfa_fcs_fabric_s *fabric);
+static void bfa_fcs_fabric_flogiacc_comp(void *fcsarg,
+ struct bfa_fcxp_s *fcxp, void *cbarg,
+ bfa_status_t status,
+ u32 rsp_len,
+ u32 resid_len,
+ struct fchs_s *rspfchs);
+
+static void bfa_fcs_fabric_sm_uninit(struct bfa_fcs_fabric_s *fabric,
+ enum bfa_fcs_fabric_event event);
+static void bfa_fcs_fabric_sm_created(struct bfa_fcs_fabric_s *fabric,
+ enum bfa_fcs_fabric_event event);
+static void bfa_fcs_fabric_sm_linkdown(struct bfa_fcs_fabric_s *fabric,
+ enum bfa_fcs_fabric_event event);
+static void bfa_fcs_fabric_sm_flogi(struct bfa_fcs_fabric_s *fabric,
+ enum bfa_fcs_fabric_event event);
+static void bfa_fcs_fabric_sm_flogi_retry(struct bfa_fcs_fabric_s *fabric,
+ enum bfa_fcs_fabric_event event);
+static void bfa_fcs_fabric_sm_auth(struct bfa_fcs_fabric_s *fabric,
+ enum bfa_fcs_fabric_event event);
+static void bfa_fcs_fabric_sm_nofabric(struct bfa_fcs_fabric_s *fabric,
+ enum bfa_fcs_fabric_event event);
+static void bfa_fcs_fabric_sm_evfp(struct bfa_fcs_fabric_s *fabric,
+ enum bfa_fcs_fabric_event event);
+static void bfa_fcs_fabric_sm_evfp_done(struct bfa_fcs_fabric_s *fabric,
+ enum bfa_fcs_fabric_event event);
+static void bfa_fcs_fabric_sm_isolated(struct bfa_fcs_fabric_s *fabric,
+ enum bfa_fcs_fabric_event event);
+static void bfa_fcs_fabric_sm_deleting(struct bfa_fcs_fabric_s *fabric,
+ enum bfa_fcs_fabric_event event);
+static void bfa_fcs_fabric_sm_stopping(struct bfa_fcs_fabric_s *fabric,
+ enum bfa_fcs_fabric_event event);
+static void bfa_fcs_fabric_sm_cleanup(struct bfa_fcs_fabric_s *fabric,
+ enum bfa_fcs_fabric_event event);
+/*
+ * Beginning state before fabric creation.
+ */
+static void
+bfa_fcs_fabric_sm_uninit(struct bfa_fcs_fabric_s *fabric,
+ enum bfa_fcs_fabric_event event)
+{
+ bfa_trc(fabric->fcs, fabric->bport.port_cfg.pwwn);
+ bfa_trc(fabric->fcs, event);
+
+ switch (event) {
+ case BFA_FCS_FABRIC_SM_CREATE:
+ bfa_sm_set_state(fabric, bfa_fcs_fabric_sm_created);
+ bfa_fcs_fabric_init(fabric);
+ bfa_fcs_lport_init(&fabric->bport, &fabric->bport.port_cfg);
+ break;
+
+ case BFA_FCS_FABRIC_SM_LINK_UP:
+ case BFA_FCS_FABRIC_SM_LINK_DOWN:
+ break;
+
+ default:
+ bfa_sm_fault(fabric->fcs, event);
+ }
+}
+
+/*
+ * Beginning state before fabric creation.
+ */
+static void
+bfa_fcs_fabric_sm_created(struct bfa_fcs_fabric_s *fabric,
+ enum bfa_fcs_fabric_event event)
+{
+ struct bfa_s *bfa = fabric->fcs->bfa;
+
+ bfa_trc(fabric->fcs, fabric->bport.port_cfg.pwwn);
+ bfa_trc(fabric->fcs, event);
+
+ switch (event) {
+ case BFA_FCS_FABRIC_SM_START:
+ if (!bfa_fcport_is_linkup(fabric->fcs->bfa)) {
+ bfa_sm_set_state(fabric, bfa_fcs_fabric_sm_linkdown);
+ break;
+ }
+ if (bfa_fcport_get_topology(bfa) ==
+ BFA_PORT_TOPOLOGY_LOOP) {
+ fabric->fab_type = BFA_FCS_FABRIC_LOOP;
+ fabric->bport.pid = bfa_fcport_get_myalpa(bfa);
+ fabric->bport.pid = bfa_hton3b(fabric->bport.pid);
+ bfa_sm_set_state(fabric,
+ bfa_fcs_fabric_sm_online);
+ bfa_fcs_fabric_set_opertype(fabric);
+ bfa_fcs_lport_online(&fabric->bport);
+ } else {
+ bfa_sm_set_state(fabric, bfa_fcs_fabric_sm_flogi);
+ bfa_fcs_fabric_login(fabric);
+ }
+ break;
+
+ case BFA_FCS_FABRIC_SM_LINK_UP:
+ case BFA_FCS_FABRIC_SM_LINK_DOWN:
+ break;
+
+ case BFA_FCS_FABRIC_SM_DELETE:
+ bfa_sm_set_state(fabric, bfa_fcs_fabric_sm_deleting);
+ bfa_fcs_fabric_delete(fabric);
+ break;
+
+ default:
+ bfa_sm_fault(fabric->fcs, event);
+ }
+}
+
+/*
+ * Link is down, awaiting LINK UP event from port. This is also the
+ * first state at fabric creation.
+ */
+static void
+bfa_fcs_fabric_sm_linkdown(struct bfa_fcs_fabric_s *fabric,
+ enum bfa_fcs_fabric_event event)
+{
+ struct bfa_s *bfa = fabric->fcs->bfa;
+
+ bfa_trc(fabric->fcs, fabric->bport.port_cfg.pwwn);
+ bfa_trc(fabric->fcs, event);
+
+ switch (event) {
+ case BFA_FCS_FABRIC_SM_LINK_UP:
+ if (bfa_fcport_get_topology(bfa) != BFA_PORT_TOPOLOGY_LOOP) {
+ bfa_sm_set_state(fabric, bfa_fcs_fabric_sm_flogi);
+ bfa_fcs_fabric_login(fabric);
+ break;
+ }
+ fabric->fab_type = BFA_FCS_FABRIC_LOOP;
+ fabric->bport.pid = bfa_fcport_get_myalpa(bfa);
+ fabric->bport.pid = bfa_hton3b(fabric->bport.pid);
+ bfa_sm_set_state(fabric, bfa_fcs_fabric_sm_online);
+ bfa_fcs_fabric_set_opertype(fabric);
+ bfa_fcs_lport_online(&fabric->bport);
+ break;
+
+ case BFA_FCS_FABRIC_SM_RETRY_OP:
+ case BFA_FCS_FABRIC_SM_LOOPBACK:
+ break;
+
+ case BFA_FCS_FABRIC_SM_DELETE:
+ bfa_sm_set_state(fabric, bfa_fcs_fabric_sm_deleting);
+ bfa_fcs_fabric_delete(fabric);
+ break;
+
+ case BFA_FCS_FABRIC_SM_STOP:
+ bfa_sm_set_state(fabric, bfa_fcs_fabric_sm_cleanup);
+ bfa_fcs_fabric_stop(fabric);
+ break;
+
+ default:
+ bfa_sm_fault(fabric->fcs, event);
+ }
+}
+
+/*
+ * FLOGI is in progress, awaiting FLOGI reply.
+ */
+static void
+bfa_fcs_fabric_sm_flogi(struct bfa_fcs_fabric_s *fabric,
+ enum bfa_fcs_fabric_event event)
+{
+ bfa_trc(fabric->fcs, fabric->bport.port_cfg.pwwn);
+ bfa_trc(fabric->fcs, event);
+
+ switch (event) {
+ case BFA_FCS_FABRIC_SM_CONT_OP:
+
+ bfa_fcport_set_tx_bbcredit(fabric->fcs->bfa,
+ fabric->bb_credit);
+ fabric->fab_type = BFA_FCS_FABRIC_SWITCHED;
+
+ if (fabric->auth_reqd && fabric->is_auth) {
+ bfa_sm_set_state(fabric, bfa_fcs_fabric_sm_auth);
+ bfa_trc(fabric->fcs, event);
+ } else {
+ bfa_sm_set_state(fabric, bfa_fcs_fabric_sm_online);
+ bfa_fcs_fabric_notify_online(fabric);
+ }
+ break;
+
+ case BFA_FCS_FABRIC_SM_RETRY_OP:
+ bfa_sm_set_state(fabric, bfa_fcs_fabric_sm_flogi_retry);
+ bfa_timer_start(fabric->fcs->bfa, &fabric->delay_timer,
+ bfa_fcs_fabric_delay, fabric,
+ BFA_FCS_FABRIC_RETRY_DELAY);
+ break;
+
+ case BFA_FCS_FABRIC_SM_LOOPBACK:
+ bfa_sm_set_state(fabric, bfa_fcs_fabric_sm_loopback);
+ bfa_sm_send_event(fabric->lps, BFA_LPS_SM_OFFLINE);
+ bfa_fcs_fabric_set_opertype(fabric);
+ break;
+
+ case BFA_FCS_FABRIC_SM_NO_FABRIC:
+ fabric->fab_type = BFA_FCS_FABRIC_N2N;
+ bfa_fcport_set_tx_bbcredit(fabric->fcs->bfa,
+ fabric->bb_credit);
+ bfa_fcs_fabric_notify_online(fabric);
+ bfa_sm_set_state(fabric, bfa_fcs_fabric_sm_nofabric);
+ break;
+
+ case BFA_FCS_FABRIC_SM_LINK_DOWN:
+ bfa_sm_set_state(fabric, bfa_fcs_fabric_sm_linkdown);
+ bfa_sm_send_event(fabric->lps, BFA_LPS_SM_OFFLINE);
+ break;
+
+ case BFA_FCS_FABRIC_SM_DELETE:
+ bfa_sm_set_state(fabric, bfa_fcs_fabric_sm_deleting);
+ bfa_sm_send_event(fabric->lps, BFA_LPS_SM_OFFLINE);
+ bfa_fcs_fabric_delete(fabric);
+ break;
+
+ default:
+ bfa_sm_fault(fabric->fcs, event);
+ }
+}
+
+
+static void
+bfa_fcs_fabric_sm_flogi_retry(struct bfa_fcs_fabric_s *fabric,
+ enum bfa_fcs_fabric_event event)
+{
+ bfa_trc(fabric->fcs, fabric->bport.port_cfg.pwwn);
+ bfa_trc(fabric->fcs, event);
+
+ switch (event) {
+ case BFA_FCS_FABRIC_SM_DELAYED:
+ bfa_sm_set_state(fabric, bfa_fcs_fabric_sm_flogi);
+ bfa_fcs_fabric_login(fabric);
+ break;
+
+ case BFA_FCS_FABRIC_SM_LINK_DOWN:
+ bfa_sm_set_state(fabric, bfa_fcs_fabric_sm_linkdown);
+ bfa_timer_stop(&fabric->delay_timer);
+ break;
+
+ case BFA_FCS_FABRIC_SM_DELETE:
+ bfa_sm_set_state(fabric, bfa_fcs_fabric_sm_deleting);
+ bfa_timer_stop(&fabric->delay_timer);
+ bfa_fcs_fabric_delete(fabric);
+ break;
+
+ default:
+ bfa_sm_fault(fabric->fcs, event);
+ }
+}
+
+/*
+ * Authentication is in progress, awaiting authentication results.
+ */
+static void
+bfa_fcs_fabric_sm_auth(struct bfa_fcs_fabric_s *fabric,
+ enum bfa_fcs_fabric_event event)
+{
+ bfa_trc(fabric->fcs, fabric->bport.port_cfg.pwwn);
+ bfa_trc(fabric->fcs, event);
+
+ switch (event) {
+ case BFA_FCS_FABRIC_SM_AUTH_FAILED:
+ bfa_sm_set_state(fabric, bfa_fcs_fabric_sm_auth_failed);
+ bfa_sm_send_event(fabric->lps, BFA_LPS_SM_OFFLINE);
+ break;
+
+ case BFA_FCS_FABRIC_SM_AUTH_SUCCESS:
+ bfa_sm_set_state(fabric, bfa_fcs_fabric_sm_online);
+ bfa_fcs_fabric_notify_online(fabric);
+ break;
+
+ case BFA_FCS_FABRIC_SM_PERF_EVFP:
+ bfa_sm_set_state(fabric, bfa_fcs_fabric_sm_evfp);
+ break;
+
+ case BFA_FCS_FABRIC_SM_LINK_DOWN:
+ bfa_sm_set_state(fabric, bfa_fcs_fabric_sm_linkdown);
+ bfa_sm_send_event(fabric->lps, BFA_LPS_SM_OFFLINE);
+ break;
+
+ case BFA_FCS_FABRIC_SM_DELETE:
+ bfa_sm_set_state(fabric, bfa_fcs_fabric_sm_deleting);
+ bfa_fcs_fabric_delete(fabric);
+ break;
+
+ default:
+ bfa_sm_fault(fabric->fcs, event);
+ }
+}
+
+/*
+ * Authentication failed
+ */
+void
+bfa_fcs_fabric_sm_auth_failed(struct bfa_fcs_fabric_s *fabric,
+ enum bfa_fcs_fabric_event event)
+{
+ bfa_trc(fabric->fcs, fabric->bport.port_cfg.pwwn);
+ bfa_trc(fabric->fcs, event);
+
+ switch (event) {
+ case BFA_FCS_FABRIC_SM_LINK_DOWN:
+ bfa_sm_set_state(fabric, bfa_fcs_fabric_sm_linkdown);
+ bfa_fcs_fabric_notify_offline(fabric);
+ break;
+
+ case BFA_FCS_FABRIC_SM_DELETE:
+ bfa_sm_set_state(fabric, bfa_fcs_fabric_sm_deleting);
+ bfa_fcs_fabric_delete(fabric);
+ break;
+
+ default:
+ bfa_sm_fault(fabric->fcs, event);
+ }
+}
+
+/*
+ * Port is in loopback mode.
+ */
+void
+bfa_fcs_fabric_sm_loopback(struct bfa_fcs_fabric_s *fabric,
+ enum bfa_fcs_fabric_event event)
+{
+ bfa_trc(fabric->fcs, fabric->bport.port_cfg.pwwn);
+ bfa_trc(fabric->fcs, event);
+
+ switch (event) {
+ case BFA_FCS_FABRIC_SM_LINK_DOWN:
+ bfa_sm_set_state(fabric, bfa_fcs_fabric_sm_linkdown);
+ bfa_fcs_fabric_notify_offline(fabric);
+ break;
+
+ case BFA_FCS_FABRIC_SM_DELETE:
+ bfa_sm_set_state(fabric, bfa_fcs_fabric_sm_deleting);
+ bfa_fcs_fabric_delete(fabric);
+ break;
+
+ default:
+ bfa_sm_fault(fabric->fcs, event);
+ }
+}
+
+/*
+ * There is no attached fabric - private loop or NPort-to-NPort topology.
+ */
+static void
+bfa_fcs_fabric_sm_nofabric(struct bfa_fcs_fabric_s *fabric,
+ enum bfa_fcs_fabric_event event)
+{
+ bfa_trc(fabric->fcs, fabric->bport.port_cfg.pwwn);
+ bfa_trc(fabric->fcs, event);
+
+ switch (event) {
+ case BFA_FCS_FABRIC_SM_LINK_DOWN:
+ bfa_sm_set_state(fabric, bfa_fcs_fabric_sm_linkdown);
+ bfa_sm_send_event(fabric->lps, BFA_LPS_SM_OFFLINE);
+ bfa_fcs_fabric_notify_offline(fabric);
+ break;
+
+ case BFA_FCS_FABRIC_SM_DELETE:
+ bfa_sm_set_state(fabric, bfa_fcs_fabric_sm_deleting);
+ bfa_fcs_fabric_delete(fabric);
+ break;
+
+ case BFA_FCS_FABRIC_SM_NO_FABRIC:
+ bfa_trc(fabric->fcs, fabric->bb_credit);
+ bfa_fcport_set_tx_bbcredit(fabric->fcs->bfa,
+ fabric->bb_credit);
+ break;
+
+ case BFA_FCS_FABRIC_SM_RETRY_OP:
+ break;
+
+ default:
+ bfa_sm_fault(fabric->fcs, event);
+ }
+}
+
+/*
+ * Fabric is online - normal operating state.
+ */
+void
+bfa_fcs_fabric_sm_online(struct bfa_fcs_fabric_s *fabric,
+ enum bfa_fcs_fabric_event event)
+{
+ struct bfa_s *bfa = fabric->fcs->bfa;
+
+ bfa_trc(fabric->fcs, fabric->bport.port_cfg.pwwn);
+ bfa_trc(fabric->fcs, event);
+
+ switch (event) {
+ case BFA_FCS_FABRIC_SM_LINK_DOWN:
+ bfa_sm_set_state(fabric, bfa_fcs_fabric_sm_linkdown);
+ if (bfa_fcport_get_topology(bfa) == BFA_PORT_TOPOLOGY_LOOP) {
+ bfa_fcs_lport_offline(&fabric->bport);
+ } else {
+ bfa_sm_send_event(fabric->lps, BFA_LPS_SM_OFFLINE);
+ bfa_fcs_fabric_notify_offline(fabric);
+ }
+ break;
+
+ case BFA_FCS_FABRIC_SM_DELETE:
+ bfa_sm_set_state(fabric, bfa_fcs_fabric_sm_deleting);
+ bfa_fcs_fabric_delete(fabric);
+ break;
+
+ case BFA_FCS_FABRIC_SM_STOP:
+ bfa_sm_set_state(fabric, bfa_fcs_fabric_sm_stopping);
+ bfa_fcs_fabric_stop(fabric);
+ break;
+
+ case BFA_FCS_FABRIC_SM_AUTH_FAILED:
+ bfa_sm_set_state(fabric, bfa_fcs_fabric_sm_auth_failed);
+ bfa_sm_send_event(fabric->lps, BFA_LPS_SM_OFFLINE);
+ break;
+
+ case BFA_FCS_FABRIC_SM_AUTH_SUCCESS:
+ break;
+
+ default:
+ bfa_sm_fault(fabric->fcs, event);
+ }
+}
+
+/*
+ * Exchanging virtual fabric parameters.
+ */
+static void
+bfa_fcs_fabric_sm_evfp(struct bfa_fcs_fabric_s *fabric,
+ enum bfa_fcs_fabric_event event)
+{
+ bfa_trc(fabric->fcs, fabric->bport.port_cfg.pwwn);
+ bfa_trc(fabric->fcs, event);
+
+ switch (event) {
+ case BFA_FCS_FABRIC_SM_CONT_OP:
+ bfa_sm_set_state(fabric, bfa_fcs_fabric_sm_evfp_done);
+ break;
+
+ case BFA_FCS_FABRIC_SM_ISOLATE:
+ bfa_sm_set_state(fabric, bfa_fcs_fabric_sm_isolated);
+ break;
+
+ default:
+ bfa_sm_fault(fabric->fcs, event);
+ }
+}
+
+/*
+ * EVFP exchange complete and VFT tagging is enabled.
+ */
+static void
+bfa_fcs_fabric_sm_evfp_done(struct bfa_fcs_fabric_s *fabric,
+ enum bfa_fcs_fabric_event event)
+{
+ bfa_trc(fabric->fcs, fabric->bport.port_cfg.pwwn);
+ bfa_trc(fabric->fcs, event);
+}
+
+/*
+ * Port is isolated after EVFP exchange due to VF_ID mismatch (N and F).
+ */
+static void
+bfa_fcs_fabric_sm_isolated(struct bfa_fcs_fabric_s *fabric,
+ enum bfa_fcs_fabric_event event)
+{
+ struct bfad_s *bfad = (struct bfad_s *)fabric->fcs->bfad;
+ char pwwn_ptr[BFA_STRING_32];
+
+ bfa_trc(fabric->fcs, fabric->bport.port_cfg.pwwn);
+ bfa_trc(fabric->fcs, event);
+ wwn2str(pwwn_ptr, fabric->bport.port_cfg.pwwn);
+
+ BFA_LOG(KERN_INFO, bfad, bfa_log_level,
+ "Port is isolated due to VF_ID mismatch. "
+ "PWWN: %s Port VF_ID: %04x switch port VF_ID: %04x.",
+ pwwn_ptr, fabric->fcs->port_vfid,
+ fabric->event_arg.swp_vfid);
+}
+
+/*
+ * Fabric is being deleted, awaiting vport delete completions.
+ */
+static void
+bfa_fcs_fabric_sm_deleting(struct bfa_fcs_fabric_s *fabric,
+ enum bfa_fcs_fabric_event event)
+{
+ bfa_trc(fabric->fcs, fabric->bport.port_cfg.pwwn);
+ bfa_trc(fabric->fcs, event);
+
+ switch (event) {
+ case BFA_FCS_FABRIC_SM_DELCOMP:
+ bfa_sm_set_state(fabric, bfa_fcs_fabric_sm_uninit);
+ bfa_wc_down(&fabric->fcs->wc);
+ break;
+
+ case BFA_FCS_FABRIC_SM_LINK_UP:
+ break;
+
+ case BFA_FCS_FABRIC_SM_LINK_DOWN:
+ bfa_fcs_fabric_notify_offline(fabric);
+ break;
+
+ default:
+ bfa_sm_fault(fabric->fcs, event);
+ }
+}
+
+/*
+ * Fabric is being stopped, awaiting vport stop completions.
+ */
+static void
+bfa_fcs_fabric_sm_stopping(struct bfa_fcs_fabric_s *fabric,
+ enum bfa_fcs_fabric_event event)
+{
+ struct bfa_s *bfa = fabric->fcs->bfa;
+
+ bfa_trc(fabric->fcs, fabric->bport.port_cfg.pwwn);
+ bfa_trc(fabric->fcs, event);
+
+ switch (event) {
+ case BFA_FCS_FABRIC_SM_STOPCOMP:
+ if (bfa_fcport_get_topology(bfa) == BFA_PORT_TOPOLOGY_LOOP) {
+ bfa_sm_set_state(fabric, bfa_fcs_fabric_sm_created);
+ } else {
+ bfa_sm_set_state(fabric, bfa_fcs_fabric_sm_cleanup);
+ bfa_sm_send_event(fabric->lps, BFA_LPS_SM_LOGOUT);
+ }
+ break;
+
+ case BFA_FCS_FABRIC_SM_LINK_UP:
+ break;
+
+ case BFA_FCS_FABRIC_SM_LINK_DOWN:
+ if (bfa_fcport_get_topology(bfa) == BFA_PORT_TOPOLOGY_LOOP)
+ bfa_sm_set_state(fabric, bfa_fcs_fabric_sm_created);
+ else
+ bfa_sm_set_state(fabric, bfa_fcs_fabric_sm_cleanup);
+ break;
+
+ default:
+ bfa_sm_fault(fabric->fcs, event);
+ }
+}
+
+/*
+ * Fabric is being stopped, cleanup without FLOGO
+ */
+static void
+bfa_fcs_fabric_sm_cleanup(struct bfa_fcs_fabric_s *fabric,
+ enum bfa_fcs_fabric_event event)
+{
+ bfa_trc(fabric->fcs, fabric->bport.port_cfg.pwwn);
+ bfa_trc(fabric->fcs, event);
+
+ switch (event) {
+ case BFA_FCS_FABRIC_SM_STOPCOMP:
+ case BFA_FCS_FABRIC_SM_LOGOCOMP:
+ bfa_sm_set_state(fabric, bfa_fcs_fabric_sm_created);
+ bfa_wc_down(&(fabric->fcs)->wc);
+ break;
+
+ case BFA_FCS_FABRIC_SM_LINK_DOWN:
+ /*
+ * Ignore - can get this event if we get notified about IOC down
+ * before the fabric completion callbk is done.
+ */
+ break;
+
+ default:
+ bfa_sm_fault(fabric->fcs, event);
+ }
+}
+
+/*
+ * fcs_fabric_private fabric private functions
+ */
+
+static void
+bfa_fcs_fabric_init(struct bfa_fcs_fabric_s *fabric)
+{
+ struct bfa_lport_cfg_s *port_cfg = &fabric->bport.port_cfg;
+
+ port_cfg->roles = BFA_LPORT_ROLE_FCP_IM;
+ port_cfg->nwwn = fabric->fcs->bfa->ioc.attr->nwwn;
+ port_cfg->pwwn = fabric->fcs->bfa->ioc.attr->pwwn;
+}
+
+/*
+ * Port Symbolic Name Creation for base port.
+ */
+void
+bfa_fcs_fabric_psymb_init(struct bfa_fcs_fabric_s *fabric)
+{
+ struct bfa_lport_cfg_s *port_cfg = &fabric->bport.port_cfg;
+ char model[BFA_ADAPTER_MODEL_NAME_LEN] = {0};
+ struct bfa_fcs_driver_info_s *driver_info = &fabric->fcs->driver_info;
+
+ bfa_ioc_get_adapter_model(&fabric->fcs->bfa->ioc, model);
+
+ /* Model name/number */
+ strncpy((char *)&port_cfg->sym_name, model,
+ BFA_FCS_PORT_SYMBNAME_MODEL_SZ);
+ strncat((char *)&port_cfg->sym_name, BFA_FCS_PORT_SYMBNAME_SEPARATOR,
+ sizeof(BFA_FCS_PORT_SYMBNAME_SEPARATOR));
+
+ /* Driver Version */
+ strncat((char *)&port_cfg->sym_name, (char *)driver_info->version,
+ BFA_FCS_PORT_SYMBNAME_VERSION_SZ);
+ strncat((char *)&port_cfg->sym_name, BFA_FCS_PORT_SYMBNAME_SEPARATOR,
+ sizeof(BFA_FCS_PORT_SYMBNAME_SEPARATOR));
+
+ /* Host machine name */
+ strncat((char *)&port_cfg->sym_name,
+ (char *)driver_info->host_machine_name,
+ BFA_FCS_PORT_SYMBNAME_MACHINENAME_SZ);
+ strncat((char *)&port_cfg->sym_name, BFA_FCS_PORT_SYMBNAME_SEPARATOR,
+ sizeof(BFA_FCS_PORT_SYMBNAME_SEPARATOR));
+
+ /*
+ * Host OS Info :
+ * If OS Patch Info is not there, do not truncate any bytes from the
+ * OS name string and instead copy the entire OS info string (64 bytes).
+ */
+ if (driver_info->host_os_patch[0] == '\0') {
+ strncat((char *)&port_cfg->sym_name,
+ (char *)driver_info->host_os_name,
+ BFA_FCS_OS_STR_LEN);
+ strncat((char *)&port_cfg->sym_name,
+ BFA_FCS_PORT_SYMBNAME_SEPARATOR,
+ sizeof(BFA_FCS_PORT_SYMBNAME_SEPARATOR));
+ } else {
+ strncat((char *)&port_cfg->sym_name,
+ (char *)driver_info->host_os_name,
+ BFA_FCS_PORT_SYMBNAME_OSINFO_SZ);
+ strncat((char *)&port_cfg->sym_name,
+ BFA_FCS_PORT_SYMBNAME_SEPARATOR,
+ sizeof(BFA_FCS_PORT_SYMBNAME_SEPARATOR));
+
+ /* Append host OS Patch Info */
+ strncat((char *)&port_cfg->sym_name,
+ (char *)driver_info->host_os_patch,
+ BFA_FCS_PORT_SYMBNAME_OSPATCH_SZ);
+ }
+
+ /* null terminate */
+ port_cfg->sym_name.symname[BFA_SYMNAME_MAXLEN - 1] = 0;
+}
+
+/*
+ * Node Symbolic Name Creation for base port and all vports
+ */
+void
+bfa_fcs_fabric_nsymb_init(struct bfa_fcs_fabric_s *fabric)
+{
+ struct bfa_lport_cfg_s *port_cfg = &fabric->bport.port_cfg;
+ char model[BFA_ADAPTER_MODEL_NAME_LEN] = {0};
+ struct bfa_fcs_driver_info_s *driver_info = &fabric->fcs->driver_info;
+
+ bfa_ioc_get_adapter_model(&fabric->fcs->bfa->ioc, model);
+
+ /* Model name/number */
+ strncpy((char *)&port_cfg->node_sym_name, model,
+ BFA_FCS_PORT_SYMBNAME_MODEL_SZ);
+ strncat((char *)&port_cfg->node_sym_name,
+ BFA_FCS_PORT_SYMBNAME_SEPARATOR,
+ sizeof(BFA_FCS_PORT_SYMBNAME_SEPARATOR));
+
+ /* Driver Version */
+ strncat((char *)&port_cfg->node_sym_name, (char *)driver_info->version,
+ BFA_FCS_PORT_SYMBNAME_VERSION_SZ);
+ strncat((char *)&port_cfg->node_sym_name,
+ BFA_FCS_PORT_SYMBNAME_SEPARATOR,
+ sizeof(BFA_FCS_PORT_SYMBNAME_SEPARATOR));
+
+ /* Host machine name */
+ strncat((char *)&port_cfg->node_sym_name,
+ (char *)driver_info->host_machine_name,
+ BFA_FCS_PORT_SYMBNAME_MACHINENAME_SZ);
+ strncat((char *)&port_cfg->node_sym_name,
+ BFA_FCS_PORT_SYMBNAME_SEPARATOR,
+ sizeof(BFA_FCS_PORT_SYMBNAME_SEPARATOR));
+
+ /* null terminate */
+ port_cfg->node_sym_name.symname[BFA_SYMNAME_MAXLEN - 1] = 0;
+}
+
+/*
+ * bfa lps login completion callback
+ */
+void
+bfa_cb_lps_flogi_comp(void *bfad, void *uarg, bfa_status_t status)
+{
+ struct bfa_fcs_fabric_s *fabric = uarg;
+
+ bfa_trc(fabric->fcs, fabric->bport.port_cfg.pwwn);
+ bfa_trc(fabric->fcs, status);
+
+ switch (status) {
+ case BFA_STATUS_OK:
+ fabric->stats.flogi_accepts++;
+ break;
+
+ case BFA_STATUS_INVALID_MAC:
+ /* Only for CNA */
+ fabric->stats.flogi_acc_err++;
+ bfa_sm_send_event(fabric, BFA_FCS_FABRIC_SM_RETRY_OP);
+
+ return;
+
+ case BFA_STATUS_EPROTOCOL:
+ switch (fabric->lps->ext_status) {
+ case BFA_EPROTO_BAD_ACCEPT:
+ fabric->stats.flogi_acc_err++;
+ break;
+
+ case BFA_EPROTO_UNKNOWN_RSP:
+ fabric->stats.flogi_unknown_rsp++;
+ break;
+
+ default:
+ break;
+ }
+ bfa_sm_send_event(fabric, BFA_FCS_FABRIC_SM_RETRY_OP);
+
+ return;
+
+ case BFA_STATUS_FABRIC_RJT:
+ fabric->stats.flogi_rejects++;
+ bfa_sm_send_event(fabric, BFA_FCS_FABRIC_SM_RETRY_OP);
+ return;
+
+ default:
+ fabric->stats.flogi_rsp_err++;
+ bfa_sm_send_event(fabric, BFA_FCS_FABRIC_SM_RETRY_OP);
+ return;
+ }
+
+ fabric->bb_credit = fabric->lps->pr_bbcred;
+ bfa_trc(fabric->fcs, fabric->bb_credit);
+
+ if (!(fabric->lps->brcd_switch))
+ fabric->fabric_name = fabric->lps->pr_nwwn;
+
+ /*
+ * Check port type. It should be 1 = F-port.
+ */
+ if (fabric->lps->fport) {
+ fabric->bport.pid = fabric->lps->lp_pid;
+ fabric->is_npiv = fabric->lps->npiv_en;
+ fabric->is_auth = fabric->lps->auth_req;
+ bfa_sm_send_event(fabric, BFA_FCS_FABRIC_SM_CONT_OP);
+ } else {
+ /*
+ * Nport-2-Nport direct attached
+ */
+ fabric->bport.port_topo.pn2n.rem_port_wwn =
+ fabric->lps->pr_pwwn;
+ fabric->fab_type = BFA_FCS_FABRIC_N2N;
+ bfa_sm_send_event(fabric, BFA_FCS_FABRIC_SM_NO_FABRIC);
+ }
+
+ bfa_trc(fabric->fcs, fabric->bport.pid);
+ bfa_trc(fabric->fcs, fabric->is_npiv);
+ bfa_trc(fabric->fcs, fabric->is_auth);
+}
+/*
+ * Allocate and send FLOGI.
+ */
+static void
+bfa_fcs_fabric_login(struct bfa_fcs_fabric_s *fabric)
+{
+ struct bfa_s *bfa = fabric->fcs->bfa;
+ struct bfa_lport_cfg_s *pcfg = &fabric->bport.port_cfg;
+ u8 alpa = 0;
+
+
+ bfa_lps_flogi(fabric->lps, fabric, alpa, bfa_fcport_get_maxfrsize(bfa),
+ pcfg->pwwn, pcfg->nwwn, fabric->auth_reqd);
+
+ fabric->stats.flogi_sent++;
+}
+
+static void
+bfa_fcs_fabric_notify_online(struct bfa_fcs_fabric_s *fabric)
+{
+ struct bfa_fcs_vport_s *vport;
+ struct list_head *qe, *qen;
+
+ bfa_trc(fabric->fcs, fabric->fabric_name);
+
+ bfa_fcs_fabric_set_opertype(fabric);
+ fabric->stats.fabric_onlines++;
+
+ /*
+ * notify online event to base and then virtual ports
+ */
+ bfa_fcs_lport_online(&fabric->bport);
+
+ list_for_each_safe(qe, qen, &fabric->vport_q) {
+ vport = (struct bfa_fcs_vport_s *) qe;
+ bfa_fcs_vport_online(vport);
+ }
+}
+
+static void
+bfa_fcs_fabric_notify_offline(struct bfa_fcs_fabric_s *fabric)
+{
+ struct bfa_fcs_vport_s *vport;
+ struct list_head *qe, *qen;
+
+ bfa_trc(fabric->fcs, fabric->fabric_name);
+ fabric->stats.fabric_offlines++;
+
+ /*
+ * notify offline event first to vports and then base port.
+ */
+ list_for_each_safe(qe, qen, &fabric->vport_q) {
+ vport = (struct bfa_fcs_vport_s *) qe;
+ bfa_fcs_vport_offline(vport);
+ }
+
+ bfa_fcs_lport_offline(&fabric->bport);
+
+ fabric->fabric_name = 0;
+ fabric->fabric_ip_addr[0] = 0;
+}
+
+static void
+bfa_fcs_fabric_delay(void *cbarg)
+{
+ struct bfa_fcs_fabric_s *fabric = cbarg;
+
+ bfa_sm_send_event(fabric, BFA_FCS_FABRIC_SM_DELAYED);
+}
+
+/*
+ * Stop all vports and wait for vport stop completions.
+ */
+static void
+bfa_fcs_fabric_stop(struct bfa_fcs_fabric_s *fabric)
+{
+ struct bfa_fcs_vport_s *vport;
+ struct list_head *qe, *qen;
+
+ bfa_wc_init(&fabric->stop_wc, bfa_fcs_fabric_stop_comp, fabric);
+
+ list_for_each_safe(qe, qen, &fabric->vport_q) {
+ vport = (struct bfa_fcs_vport_s *) qe;
+ bfa_wc_up(&fabric->stop_wc);
+ bfa_fcs_vport_fcs_stop(vport);
+ }
+
+ bfa_wc_up(&fabric->stop_wc);
+ bfa_fcs_lport_stop(&fabric->bport);
+ bfa_wc_wait(&fabric->stop_wc);
+}
+
+/*
+ * Delete all vports and wait for vport delete completions.
+ */
+static void
+bfa_fcs_fabric_delete(struct bfa_fcs_fabric_s *fabric)
+{
+ struct bfa_fcs_vport_s *vport;
+ struct list_head *qe, *qen;
+
+ list_for_each_safe(qe, qen, &fabric->vport_q) {
+ vport = (struct bfa_fcs_vport_s *) qe;
+ bfa_fcs_vport_fcs_delete(vport);
+ }
+
+ bfa_fcs_lport_delete(&fabric->bport);
+ bfa_wc_wait(&fabric->wc);
+}
+
+static void
+bfa_fcs_fabric_delete_comp(void *cbarg)
+{
+ struct bfa_fcs_fabric_s *fabric = cbarg;
+
+ bfa_sm_send_event(fabric, BFA_FCS_FABRIC_SM_DELCOMP);
+}
+
+static void
+bfa_fcs_fabric_stop_comp(void *cbarg)
+{
+ struct bfa_fcs_fabric_s *fabric = cbarg;
+
+ bfa_sm_send_event(fabric, BFA_FCS_FABRIC_SM_STOPCOMP);
+}
+
+/*
+ * fcs_fabric_public fabric public functions
+ */
+
+/*
+ * Attach time initialization.
+ */
+void
+bfa_fcs_fabric_attach(struct bfa_fcs_s *fcs)
+{
+ struct bfa_fcs_fabric_s *fabric;
+
+ fabric = &fcs->fabric;
+ memset(fabric, 0, sizeof(struct bfa_fcs_fabric_s));
+
+ /*
+ * Initialize base fabric.
+ */
+ fabric->fcs = fcs;
+ INIT_LIST_HEAD(&fabric->vport_q);
+ INIT_LIST_HEAD(&fabric->vf_q);
+ fabric->lps = bfa_lps_alloc(fcs->bfa);
+ WARN_ON(!fabric->lps);
+
+ /*
+ * Initialize fabric delete completion handler. Fabric deletion is
+ * complete when the last vport delete is complete.
+ */
+ bfa_wc_init(&fabric->wc, bfa_fcs_fabric_delete_comp, fabric);
+ bfa_wc_up(&fabric->wc); /* For the base port */
+
+ bfa_sm_set_state(fabric, bfa_fcs_fabric_sm_uninit);
+ bfa_fcs_lport_attach(&fabric->bport, fabric->fcs, FC_VF_ID_NULL, NULL);
+}
+
+void
+bfa_fcs_fabric_modinit(struct bfa_fcs_s *fcs)
+{
+ bfa_sm_send_event(&fcs->fabric, BFA_FCS_FABRIC_SM_CREATE);
+ bfa_trc(fcs, 0);
+}
+
+/*
+ * Module cleanup
+ */
+void
+bfa_fcs_fabric_modexit(struct bfa_fcs_s *fcs)
+{
+ struct bfa_fcs_fabric_s *fabric;
+
+ bfa_trc(fcs, 0);
+
+ /*
+ * Cleanup base fabric.
+ */
+ fabric = &fcs->fabric;
+ bfa_lps_delete(fabric->lps);
+ bfa_sm_send_event(fabric, BFA_FCS_FABRIC_SM_DELETE);
+}
+
+/*
+ * Fabric module stop -- stop FCS actions
+ */
+void
+bfa_fcs_fabric_modstop(struct bfa_fcs_s *fcs)
+{
+ struct bfa_fcs_fabric_s *fabric;
+
+ bfa_trc(fcs, 0);
+ fabric = &fcs->fabric;
+ bfa_sm_send_event(fabric, BFA_FCS_FABRIC_SM_STOP);
+}
+
+/*
+ * Fabric module start -- kick starts FCS actions
+ */
+void
+bfa_fcs_fabric_modstart(struct bfa_fcs_s *fcs)
+{
+ struct bfa_fcs_fabric_s *fabric;
+
+ bfa_trc(fcs, 0);
+ fabric = &fcs->fabric;
+ bfa_sm_send_event(fabric, BFA_FCS_FABRIC_SM_START);
+}
+
+
+/*
+ * Link up notification from BFA physical port module.
+ */
+void
+bfa_fcs_fabric_link_up(struct bfa_fcs_fabric_s *fabric)
+{
+ bfa_trc(fabric->fcs, fabric->bport.port_cfg.pwwn);
+ bfa_sm_send_event(fabric, BFA_FCS_FABRIC_SM_LINK_UP);
+}
+
+/*
+ * Link down notification from BFA physical port module.
+ */
+void
+bfa_fcs_fabric_link_down(struct bfa_fcs_fabric_s *fabric)
+{
+ bfa_trc(fabric->fcs, fabric->bport.port_cfg.pwwn);
+ bfa_sm_send_event(fabric, BFA_FCS_FABRIC_SM_LINK_DOWN);
+}
+
+/*
+ * A child vport is being created in the fabric.
+ *
+ * Call from vport module at vport creation. A list of base port and vports
+ * belonging to a fabric is maintained to propagate link events.
+ *
+ * param[in] fabric - Fabric instance. This can be a base fabric or vf.
+ * param[in] vport - Vport being created.
+ *
+ * @return None (always succeeds)
+ */
+void
+bfa_fcs_fabric_addvport(struct bfa_fcs_fabric_s *fabric,
+ struct bfa_fcs_vport_s *vport)
+{
+ /*
+ * - add vport to fabric's vport_q
+ */
+ bfa_trc(fabric->fcs, fabric->vf_id);
+
+ list_add_tail(&vport->qe, &fabric->vport_q);
+ fabric->num_vports++;
+ bfa_wc_up(&fabric->wc);
+}
+
+/*
+ * A child vport is being deleted from fabric.
+ *
+ * Vport is being deleted.
+ */
+void
+bfa_fcs_fabric_delvport(struct bfa_fcs_fabric_s *fabric,
+ struct bfa_fcs_vport_s *vport)
+{
+ list_del(&vport->qe);
+ fabric->num_vports--;
+ bfa_wc_down(&fabric->wc);
+}
+
+
+/*
+ * Lookup for a vport within a fabric given its pwwn
+ */
+struct bfa_fcs_vport_s *
+bfa_fcs_fabric_vport_lookup(struct bfa_fcs_fabric_s *fabric, wwn_t pwwn)
+{
+ struct bfa_fcs_vport_s *vport;
+ struct list_head *qe;
+
+ list_for_each(qe, &fabric->vport_q) {
+ vport = (struct bfa_fcs_vport_s *) qe;
+ if (bfa_fcs_lport_get_pwwn(&vport->lport) == pwwn)
+ return vport;
+ }
+
+ return NULL;
+}
+
+
+/*
+ * Get OUI of the attached switch.
+ *
+ * Note : Use of this function should be avoided as much as possible.
+ * This function should be used only if there is any requirement
+* to check for FOS version below 6.3.
+ * To check if the attached fabric is a brocade fabric, use
+ * bfa_lps_is_brcd_fabric() which works for FOS versions 6.3
+ * or above only.
+ */
+
+u16
+bfa_fcs_fabric_get_switch_oui(struct bfa_fcs_fabric_s *fabric)
+{
+ wwn_t fab_nwwn;
+ u8 *tmp;
+ u16 oui;
+
+ fab_nwwn = fabric->lps->pr_nwwn;
+
+ tmp = (u8 *)&fab_nwwn;
+ oui = (tmp[3] << 8) | tmp[4];
+
+ return oui;
+}
+/*
+ * Unsolicited frame receive handling.
+ */
+void
+bfa_fcs_fabric_uf_recv(struct bfa_fcs_fabric_s *fabric, struct fchs_s *fchs,
+ u16 len)
+{
+ u32 pid = fchs->d_id;
+ struct bfa_fcs_vport_s *vport;
+ struct list_head *qe;
+ struct fc_els_cmd_s *els_cmd = (struct fc_els_cmd_s *) (fchs + 1);
+ struct fc_logi_s *flogi = (struct fc_logi_s *) els_cmd;
+
+ bfa_trc(fabric->fcs, len);
+ bfa_trc(fabric->fcs, pid);
+
+ /*
+ * Look for our own FLOGI frames being looped back. This means an
+ * external loopback cable is in place. Our own FLOGI frames are
+ * sometimes looped back when switch port gets temporarily bypassed.
+ */
+ if ((pid == bfa_ntoh3b(FC_FABRIC_PORT)) &&
+ (els_cmd->els_code == FC_ELS_FLOGI) &&
+ (flogi->port_name == bfa_fcs_lport_get_pwwn(&fabric->bport))) {
+ bfa_sm_send_event(fabric, BFA_FCS_FABRIC_SM_LOOPBACK);
+ return;
+ }
+
+ /*
+ * FLOGI/EVFP exchanges should be consumed by base fabric.
+ */
+ if (fchs->d_id == bfa_hton3b(FC_FABRIC_PORT)) {
+ bfa_trc(fabric->fcs, pid);
+ bfa_fcs_fabric_process_uf(fabric, fchs, len);
+ return;
+ }
+
+ if (fabric->bport.pid == pid) {
+ /*
+ * All authentication frames should be routed to auth
+ */
+ bfa_trc(fabric->fcs, els_cmd->els_code);
+ if (els_cmd->els_code == FC_ELS_AUTH) {
+ bfa_trc(fabric->fcs, els_cmd->els_code);
+ return;
+ }
+
+ bfa_trc(fabric->fcs, *(u8 *) ((u8 *) fchs));
+ bfa_fcs_lport_uf_recv(&fabric->bport, fchs, len);
+ return;
+ }
+
+ /*
+ * look for a matching local port ID
+ */
+ list_for_each(qe, &fabric->vport_q) {
+ vport = (struct bfa_fcs_vport_s *) qe;
+ if (vport->lport.pid == pid) {
+ bfa_fcs_lport_uf_recv(&vport->lport, fchs, len);
+ return;
+ }
+ }
+
+ if (!bfa_fcs_fabric_is_switched(fabric))
+ bfa_fcs_lport_uf_recv(&fabric->bport, fchs, len);
+
+ bfa_trc(fabric->fcs, fchs->type);
+}
+
+/*
+ * Unsolicited frames to be processed by fabric.
+ */
+static void
+bfa_fcs_fabric_process_uf(struct bfa_fcs_fabric_s *fabric, struct fchs_s *fchs,
+ u16 len)
+{
+ struct fc_els_cmd_s *els_cmd = (struct fc_els_cmd_s *) (fchs + 1);
+
+ bfa_trc(fabric->fcs, els_cmd->els_code);
+
+ switch (els_cmd->els_code) {
+ case FC_ELS_FLOGI:
+ bfa_fcs_fabric_process_flogi(fabric, fchs, len);
+ break;
+
+ default:
+ /*
+ * need to generate a LS_RJT
+ */
+ break;
+ }
+}
+
+/*
+ * Process incoming FLOGI
+ */
+static void
+bfa_fcs_fabric_process_flogi(struct bfa_fcs_fabric_s *fabric,
+ struct fchs_s *fchs, u16 len)
+{
+ struct fc_logi_s *flogi = (struct fc_logi_s *) (fchs + 1);
+ struct bfa_fcs_lport_s *bport = &fabric->bport;
+
+ bfa_trc(fabric->fcs, fchs->s_id);
+
+ fabric->stats.flogi_rcvd++;
+ /*
+ * Check port type. It should be 0 = n-port.
+ */
+ if (flogi->csp.port_type) {
+ /*
+ * @todo: may need to send a LS_RJT
+ */
+ bfa_trc(fabric->fcs, flogi->port_name);
+ fabric->stats.flogi_rejected++;
+ return;
+ }
+
+ fabric->bb_credit = be16_to_cpu(flogi->csp.bbcred);
+ bport->port_topo.pn2n.rem_port_wwn = flogi->port_name;
+ bport->port_topo.pn2n.reply_oxid = fchs->ox_id;
+
+ /*
+ * Send a Flogi Acc
+ */
+ bfa_fcs_fabric_send_flogi_acc(fabric);
+ bfa_sm_send_event(fabric, BFA_FCS_FABRIC_SM_NO_FABRIC);
+}
+
+static void
+bfa_fcs_fabric_send_flogi_acc(struct bfa_fcs_fabric_s *fabric)
+{
+ struct bfa_lport_cfg_s *pcfg = &fabric->bport.port_cfg;
+ struct bfa_fcs_lport_n2n_s *n2n_port = &fabric->bport.port_topo.pn2n;
+ struct bfa_s *bfa = fabric->fcs->bfa;
+ struct bfa_fcxp_s *fcxp;
+ u16 reqlen;
+ struct fchs_s fchs;
+
+ fcxp = bfa_fcs_fcxp_alloc(fabric->fcs, BFA_FALSE);
+ /*
+ * Do not expect this failure -- expect remote node to retry
+ */
+ if (!fcxp)
+ return;
+
+ reqlen = fc_flogi_acc_build(&fchs, bfa_fcxp_get_reqbuf(fcxp),
+ bfa_hton3b(FC_FABRIC_PORT),
+ n2n_port->reply_oxid, pcfg->pwwn,
+ pcfg->nwwn,
+ bfa_fcport_get_maxfrsize(bfa),
+ bfa_fcport_get_rx_bbcredit(bfa), 0);
+
+ bfa_fcxp_send(fcxp, NULL, fabric->vf_id, fabric->lps->bfa_tag,
+ BFA_FALSE, FC_CLASS_3,
+ reqlen, &fchs, bfa_fcs_fabric_flogiacc_comp, fabric,
+ FC_MAX_PDUSZ, 0);
+}
+
+/*
+ * Flogi Acc completion callback.
+ */
+static void
+bfa_fcs_fabric_flogiacc_comp(void *fcsarg, struct bfa_fcxp_s *fcxp, void *cbarg,
+ bfa_status_t status, u32 rsp_len,
+ u32 resid_len, struct fchs_s *rspfchs)
+{
+ struct bfa_fcs_fabric_s *fabric = cbarg;
+
+ bfa_trc(fabric->fcs, status);
+}
+
+
+/*
+ * Send AEN notification
+ */
+static void
+bfa_fcs_fabric_aen_post(struct bfa_fcs_lport_s *port,
+ enum bfa_port_aen_event event)
+{
+ struct bfad_s *bfad = (struct bfad_s *)port->fabric->fcs->bfad;
+ struct bfa_aen_entry_s *aen_entry;
+
+ bfad_get_aen_entry(bfad, aen_entry);
+ if (!aen_entry)
+ return;
+
+ aen_entry->aen_data.port.pwwn = bfa_fcs_lport_get_pwwn(port);
+ aen_entry->aen_data.port.fwwn = bfa_fcs_lport_get_fabric_name(port);
+
+ /* Send the AEN notification */
+ bfad_im_post_vendor_event(aen_entry, bfad, ++port->fcs->fcs_aen_seq,
+ BFA_AEN_CAT_PORT, event);
+}
+
+/*
+ *
+ * @param[in] fabric - fabric
+ * @param[in] wwn_t - new fabric name
+ *
+ * @return - none
+ */
+void
+bfa_fcs_fabric_set_fabric_name(struct bfa_fcs_fabric_s *fabric,
+ wwn_t fabric_name)
+{
+ struct bfad_s *bfad = (struct bfad_s *)fabric->fcs->bfad;
+ char pwwn_ptr[BFA_STRING_32];
+ char fwwn_ptr[BFA_STRING_32];
+
+ bfa_trc(fabric->fcs, fabric_name);
+
+ if (fabric->fabric_name == 0) {
+ /*
+ * With BRCD switches, we don't get Fabric Name in FLOGI.
+ * Don't generate a fabric name change event in this case.
+ */
+ fabric->fabric_name = fabric_name;
+ } else {
+ fabric->fabric_name = fabric_name;
+ wwn2str(pwwn_ptr, bfa_fcs_lport_get_pwwn(&fabric->bport));
+ wwn2str(fwwn_ptr,
+ bfa_fcs_lport_get_fabric_name(&fabric->bport));
+ BFA_LOG(KERN_WARNING, bfad, bfa_log_level,
+ "Base port WWN = %s Fabric WWN = %s\n",
+ pwwn_ptr, fwwn_ptr);
+ bfa_fcs_fabric_aen_post(&fabric->bport,
+ BFA_PORT_AEN_FABRIC_NAME_CHANGE);
+ }
+}
+
+void
+bfa_cb_lps_flogo_comp(void *bfad, void *uarg)
+{
+ struct bfa_fcs_fabric_s *fabric = uarg;
+ bfa_sm_send_event(fabric, BFA_FCS_FABRIC_SM_LOGOCOMP);
+}
+
+/*
+ * Returns FCS vf structure for a given vf_id.
+ *
+ * param[in] vf_id - VF_ID
+ *
+ * return
+ * If lookup succeeds, retuns fcs vf object, otherwise returns NULL
+ */
+bfa_fcs_vf_t *
+bfa_fcs_vf_lookup(struct bfa_fcs_s *fcs, u16 vf_id)
+{
+ bfa_trc(fcs, vf_id);
+ if (vf_id == FC_VF_ID_NULL)
+ return &fcs->fabric;
+
+ return NULL;
+}
+
+/*
+ * Return the list of local logical ports present in the given VF.
+ *
+ * @param[in] vf vf for which logical ports are returned
+ * @param[out] lpwwn returned logical port wwn list
+ * @param[in,out] nlports in:size of lpwwn list;
+ * out:total elements present,
+ * actual elements returned is limited by the size
+ */
+void
+bfa_fcs_vf_get_ports(bfa_fcs_vf_t *vf, wwn_t lpwwn[], int *nlports)
+{
+ struct list_head *qe;
+ struct bfa_fcs_vport_s *vport;
+ int i = 0;
+ struct bfa_fcs_s *fcs;
+
+ if (vf == NULL || lpwwn == NULL || *nlports == 0)
+ return;
+
+ fcs = vf->fcs;
+
+ bfa_trc(fcs, vf->vf_id);
+ bfa_trc(fcs, (uint32_t) *nlports);
+
+ lpwwn[i++] = vf->bport.port_cfg.pwwn;
+
+ list_for_each(qe, &vf->vport_q) {
+ if (i >= *nlports)
+ break;
+
+ vport = (struct bfa_fcs_vport_s *) qe;
+ lpwwn[i++] = vport->lport.port_cfg.pwwn;
+ }
+
+ bfa_trc(fcs, i);
+ *nlports = i;
+}
+
+/*
+ * BFA FCS PPORT ( physical port)
+ */
+static void
+bfa_fcs_port_event_handler(void *cbarg, enum bfa_port_linkstate event)
+{
+ struct bfa_fcs_s *fcs = cbarg;
+
+ bfa_trc(fcs, event);
+
+ switch (event) {
+ case BFA_PORT_LINKUP:
+ bfa_fcs_fabric_link_up(&fcs->fabric);
+ break;
+
+ case BFA_PORT_LINKDOWN:
+ bfa_fcs_fabric_link_down(&fcs->fabric);
+ break;
+
+ default:
+ WARN_ON(1);
+ }
+}
+
+void
+bfa_fcs_port_attach(struct bfa_fcs_s *fcs)
+{
+ bfa_fcport_event_register(fcs->bfa, bfa_fcs_port_event_handler, fcs);
+}
+
+/*
+ * BFA FCS UF ( Unsolicited Frames)
+ */
+
+/*
+ * BFA callback for unsolicited frame receive handler.
+ *
+ * @param[in] cbarg callback arg for receive handler
+ * @param[in] uf unsolicited frame descriptor
+ *
+ * @return None
+ */
+static void
+bfa_fcs_uf_recv(void *cbarg, struct bfa_uf_s *uf)
+{
+ struct bfa_fcs_s *fcs = (struct bfa_fcs_s *) cbarg;
+ struct fchs_s *fchs = bfa_uf_get_frmbuf(uf);
+ u16 len = bfa_uf_get_frmlen(uf);
+ struct fc_vft_s *vft;
+ struct bfa_fcs_fabric_s *fabric;
+
+ /*
+ * check for VFT header
+ */
+ if (fchs->routing == FC_RTG_EXT_HDR &&
+ fchs->cat_info == FC_CAT_VFT_HDR) {
+ bfa_stats(fcs, uf.tagged);
+ vft = bfa_uf_get_frmbuf(uf);
+ if (fcs->port_vfid == vft->vf_id)
+ fabric = &fcs->fabric;
+ else
+ fabric = bfa_fcs_vf_lookup(fcs, (u16) vft->vf_id);
+
+ /*
+ * drop frame if vfid is unknown
+ */
+ if (!fabric) {
+ WARN_ON(1);
+ bfa_stats(fcs, uf.vfid_unknown);
+ bfa_uf_free(uf);
+ return;
+ }
+
+ /*
+ * skip vft header
+ */
+ fchs = (struct fchs_s *) (vft + 1);
+ len -= sizeof(struct fc_vft_s);
+
+ bfa_trc(fcs, vft->vf_id);
+ } else {
+ bfa_stats(fcs, uf.untagged);
+ fabric = &fcs->fabric;
+ }
+
+ bfa_trc(fcs, ((u32 *) fchs)[0]);
+ bfa_trc(fcs, ((u32 *) fchs)[1]);
+ bfa_trc(fcs, ((u32 *) fchs)[2]);
+ bfa_trc(fcs, ((u32 *) fchs)[3]);
+ bfa_trc(fcs, ((u32 *) fchs)[4]);
+ bfa_trc(fcs, ((u32 *) fchs)[5]);
+ bfa_trc(fcs, len);
+
+ bfa_fcs_fabric_uf_recv(fabric, fchs, len);
+ bfa_uf_free(uf);
+}
+
+void
+bfa_fcs_uf_attach(struct bfa_fcs_s *fcs)
+{
+ bfa_uf_recv_register(fcs->bfa, bfa_fcs_uf_recv, fcs);
+}
diff --git a/drivers/scsi/bfa/bfa_fcs.h b/drivers/scsi/bfa/bfa_fcs.h
new file mode 100644
index 000000000..42bcb9704
--- /dev/null
+++ b/drivers/scsi/bfa/bfa_fcs.h
@@ -0,0 +1,883 @@
+/*
+ * Copyright (c) 2005-2010 Brocade Communications Systems, Inc.
+ * All rights reserved
+ * www.brocade.com
+ *
+ * Linux driver for Brocade Fibre Channel Host Bus Adapter.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License (GPL) Version 2 as
+ * published by the Free Software Foundation
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ */
+
+#ifndef __BFA_FCS_H__
+#define __BFA_FCS_H__
+
+#include "bfa_cs.h"
+#include "bfa_defs.h"
+#include "bfa_defs_fcs.h"
+#include "bfa_modules.h"
+#include "bfa_fc.h"
+
+#define BFA_FCS_OS_STR_LEN 64
+
+/*
+ * lps_pvt BFA LPS private functions
+ */
+
+enum bfa_lps_event {
+ BFA_LPS_SM_LOGIN = 1, /* login request from user */
+ BFA_LPS_SM_LOGOUT = 2, /* logout request from user */
+ BFA_LPS_SM_FWRSP = 3, /* f/w response to login/logout */
+ BFA_LPS_SM_RESUME = 4, /* space present in reqq queue */
+ BFA_LPS_SM_DELETE = 5, /* lps delete from user */
+ BFA_LPS_SM_OFFLINE = 6, /* Link is offline */
+ BFA_LPS_SM_RX_CVL = 7, /* Rx clear virtual link */
+ BFA_LPS_SM_SET_N2N_PID = 8, /* Set assigned PID for n2n */
+};
+
+
+/*
+ * !!! Only append to the enums defined here to avoid any versioning
+ * !!! needed between trace utility and driver version
+ */
+enum {
+ BFA_TRC_FCS_FCS = 1,
+ BFA_TRC_FCS_PORT = 2,
+ BFA_TRC_FCS_RPORT = 3,
+ BFA_TRC_FCS_FCPIM = 4,
+};
+
+
+struct bfa_fcs_s;
+
+#define __fcs_min_cfg(__fcs) ((__fcs)->min_cfg)
+
+#define BFA_FCS_BRCD_SWITCH_OUI 0x051e
+#define N2N_LOCAL_PID 0x010000
+#define N2N_REMOTE_PID 0x020000
+#define BFA_FCS_RETRY_TIMEOUT 2000
+#define BFA_FCS_MAX_NS_RETRIES 5
+#define BFA_FCS_PID_IS_WKA(pid) ((bfa_ntoh3b(pid) > 0xFFF000) ? 1 : 0)
+#define BFA_FCS_MAX_RPORT_LOGINS 1024
+
+struct bfa_fcs_lport_ns_s {
+ bfa_sm_t sm; /* state machine */
+ struct bfa_timer_s timer;
+ struct bfa_fcs_lport_s *port; /* parent port */
+ struct bfa_fcxp_s *fcxp;
+ struct bfa_fcxp_wqe_s fcxp_wqe;
+ u8 num_rnnid_retries;
+ u8 num_rsnn_nn_retries;
+};
+
+
+struct bfa_fcs_lport_scn_s {
+ bfa_sm_t sm; /* state machine */
+ struct bfa_timer_s timer;
+ struct bfa_fcs_lport_s *port; /* parent port */
+ struct bfa_fcxp_s *fcxp;
+ struct bfa_fcxp_wqe_s fcxp_wqe;
+};
+
+
+struct bfa_fcs_lport_fdmi_s {
+ bfa_sm_t sm; /* state machine */
+ struct bfa_timer_s timer;
+ struct bfa_fcs_lport_ms_s *ms; /* parent ms */
+ struct bfa_fcxp_s *fcxp;
+ struct bfa_fcxp_wqe_s fcxp_wqe;
+ u8 retry_cnt; /* retry count */
+ u8 rsvd[3];
+};
+
+
+struct bfa_fcs_lport_ms_s {
+ bfa_sm_t sm; /* state machine */
+ struct bfa_timer_s timer;
+ struct bfa_fcs_lport_s *port; /* parent port */
+ struct bfa_fcxp_s *fcxp;
+ struct bfa_fcxp_wqe_s fcxp_wqe;
+ struct bfa_fcs_lport_fdmi_s fdmi; /* FDMI component of MS */
+ u8 retry_cnt; /* retry count */
+ u8 rsvd[3];
+};
+
+
+struct bfa_fcs_lport_fab_s {
+ struct bfa_fcs_lport_ns_s ns; /* NS component of port */
+ struct bfa_fcs_lport_scn_s scn; /* scn component of port */
+ struct bfa_fcs_lport_ms_s ms; /* MS component of port */
+};
+
+#define MAX_ALPA_COUNT 127
+
+struct bfa_fcs_lport_loop_s {
+ u8 num_alpa; /* Num of ALPA entries in the map */
+ u8 alpabm_valid; /* alpa bitmap valid or not (1 or 0) */
+ u8 alpa_pos_map[MAX_ALPA_COUNT]; /* ALPA Positional Map */
+ struct bfa_fcs_lport_s *port; /* parent port */
+};
+
+struct bfa_fcs_lport_n2n_s {
+ u32 rsvd;
+ __be16 reply_oxid; /* ox_id from the req flogi to be
+ *used in flogi acc */
+ wwn_t rem_port_wwn; /* Attached port's wwn */
+};
+
+
+union bfa_fcs_lport_topo_u {
+ struct bfa_fcs_lport_fab_s pfab;
+ struct bfa_fcs_lport_loop_s ploop;
+ struct bfa_fcs_lport_n2n_s pn2n;
+};
+
+
+struct bfa_fcs_lport_s {
+ struct list_head qe; /* used by port/vport */
+ bfa_sm_t sm; /* state machine */
+ struct bfa_fcs_fabric_s *fabric; /* parent fabric */
+ struct bfa_lport_cfg_s port_cfg; /* port configuration */
+ struct bfa_timer_s link_timer; /* timer for link offline */
+ u32 pid:24; /* FC address */
+ u8 lp_tag; /* lport tag */
+ u16 num_rports; /* Num of r-ports */
+ struct list_head rport_q; /* queue of discovered r-ports */
+ struct bfa_fcs_s *fcs; /* FCS instance */
+ union bfa_fcs_lport_topo_u port_topo; /* fabric/loop/n2n details */
+ struct bfad_port_s *bfad_port; /* driver peer instance */
+ struct bfa_fcs_vport_s *vport; /* NULL for base ports */
+ struct bfa_fcxp_s *fcxp;
+ struct bfa_fcxp_wqe_s fcxp_wqe;
+ struct bfa_lport_stats_s stats;
+ struct bfa_wc_s wc; /* waiting counter for events */
+};
+#define BFA_FCS_GET_HAL_FROM_PORT(port) (port->fcs->bfa)
+#define BFA_FCS_GET_NS_FROM_PORT(port) (&port->port_topo.pfab.ns)
+#define BFA_FCS_GET_SCN_FROM_PORT(port) (&port->port_topo.pfab.scn)
+#define BFA_FCS_GET_MS_FROM_PORT(port) (&port->port_topo.pfab.ms)
+#define BFA_FCS_GET_FDMI_FROM_PORT(port) (&port->port_topo.pfab.ms.fdmi)
+#define BFA_FCS_VPORT_IS_INITIATOR_MODE(port) \
+ (port->port_cfg.roles & BFA_LPORT_ROLE_FCP_IM)
+
+/*
+ * forward declaration
+ */
+struct bfad_vf_s;
+
+enum bfa_fcs_fabric_type {
+ BFA_FCS_FABRIC_UNKNOWN = 0,
+ BFA_FCS_FABRIC_SWITCHED = 1,
+ BFA_FCS_FABRIC_N2N = 2,
+ BFA_FCS_FABRIC_LOOP = 3,
+};
+
+
+struct bfa_fcs_fabric_s {
+ struct list_head qe; /* queue element */
+ bfa_sm_t sm; /* state machine */
+ struct bfa_fcs_s *fcs; /* FCS instance */
+ struct bfa_fcs_lport_s bport; /* base logical port */
+ enum bfa_fcs_fabric_type fab_type; /* fabric type */
+ enum bfa_port_type oper_type; /* current link topology */
+ u8 is_vf; /* is virtual fabric? */
+ u8 is_npiv; /* is NPIV supported ? */
+ u8 is_auth; /* is Security/Auth supported ? */
+ u16 bb_credit; /* BB credit from fabric */
+ u16 vf_id; /* virtual fabric ID */
+ u16 num_vports; /* num vports */
+ u16 rsvd;
+ struct list_head vport_q; /* queue of virtual ports */
+ struct list_head vf_q; /* queue of virtual fabrics */
+ struct bfad_vf_s *vf_drv; /* driver vf structure */
+ struct bfa_timer_s link_timer; /* Link Failure timer. Vport */
+ wwn_t fabric_name; /* attached fabric name */
+ bfa_boolean_t auth_reqd; /* authentication required */
+ struct bfa_timer_s delay_timer; /* delay timer */
+ union {
+ u16 swp_vfid;/* switch port VF id */
+ } event_arg;
+ struct bfa_wc_s wc; /* wait counter for delete */
+ struct bfa_vf_stats_s stats; /* fabric/vf stats */
+ struct bfa_lps_s *lps; /* lport login services */
+ u8 fabric_ip_addr[BFA_FCS_FABRIC_IPADDR_SZ];
+ /* attached fabric's ip addr */
+ struct bfa_wc_s stop_wc; /* wait counter for stop */
+};
+
+#define bfa_fcs_fabric_npiv_capable(__f) ((__f)->is_npiv)
+#define bfa_fcs_fabric_is_switched(__f) \
+ ((__f)->fab_type == BFA_FCS_FABRIC_SWITCHED)
+
+/*
+ * The design calls for a single implementation of base fabric and vf.
+ */
+#define bfa_fcs_vf_t struct bfa_fcs_fabric_s
+
+struct bfa_vf_event_s {
+ u32 undefined;
+};
+
+struct bfa_fcs_s;
+struct bfa_fcs_fabric_s;
+
+/*
+ * @todo : need to move to a global config file.
+ * Maximum Rports supported per port (physical/logical).
+ */
+#define BFA_FCS_MAX_RPORTS_SUPP 256 /* @todo : tentative value */
+
+#define bfa_fcs_lport_t struct bfa_fcs_lport_s
+
+/*
+ * Symbolic Name related defines
+ * Total bytes 255.
+ * Physical Port's symbolic name 128 bytes.
+ * For Vports, Vport's symbolic name is appended to the Physical port's
+ * Symbolic Name.
+ *
+ * Physical Port's symbolic name Format : (Total 128 bytes)
+ * Adapter Model number/name : 16 bytes
+ * Driver Version : 10 bytes
+ * Host Machine Name : 30 bytes
+ * Host OS Info : 44 bytes
+ * Host OS PATCH Info : 16 bytes
+ * ( remaining 12 bytes reserved to be used for separator)
+ */
+#define BFA_FCS_PORT_SYMBNAME_SEPARATOR " | "
+
+#define BFA_FCS_PORT_SYMBNAME_MODEL_SZ 16
+#define BFA_FCS_PORT_SYMBNAME_VERSION_SZ 10
+#define BFA_FCS_PORT_SYMBNAME_MACHINENAME_SZ 30
+#define BFA_FCS_PORT_SYMBNAME_OSINFO_SZ 44
+#define BFA_FCS_PORT_SYMBNAME_OSPATCH_SZ 16
+
+/*
+ * Get FC port ID for a logical port.
+ */
+#define bfa_fcs_lport_get_fcid(_lport) ((_lport)->pid)
+#define bfa_fcs_lport_get_pwwn(_lport) ((_lport)->port_cfg.pwwn)
+#define bfa_fcs_lport_get_nwwn(_lport) ((_lport)->port_cfg.nwwn)
+#define bfa_fcs_lport_get_psym_name(_lport) ((_lport)->port_cfg.sym_name)
+#define bfa_fcs_lport_get_nsym_name(_lport) ((_lport)->port_cfg.node_sym_name)
+#define bfa_fcs_lport_is_initiator(_lport) \
+ ((_lport)->port_cfg.roles & BFA_LPORT_ROLE_FCP_IM)
+#define bfa_fcs_lport_get_nrports(_lport) \
+ ((_lport) ? (_lport)->num_rports : 0)
+
+static inline struct bfad_port_s *
+bfa_fcs_lport_get_drvport(struct bfa_fcs_lport_s *port)
+{
+ return port->bfad_port;
+}
+
+#define bfa_fcs_lport_get_opertype(_lport) ((_lport)->fabric->oper_type)
+#define bfa_fcs_lport_get_fabric_name(_lport) ((_lport)->fabric->fabric_name)
+#define bfa_fcs_lport_get_fabric_ipaddr(_lport) \
+ ((_lport)->fabric->fabric_ip_addr)
+
+/*
+ * bfa fcs port public functions
+ */
+
+bfa_boolean_t bfa_fcs_lport_is_online(struct bfa_fcs_lport_s *port);
+struct bfa_fcs_lport_s *bfa_fcs_get_base_port(struct bfa_fcs_s *fcs);
+void bfa_fcs_lport_get_rport_quals(struct bfa_fcs_lport_s *port,
+ struct bfa_rport_qualifier_s rport[], int *nrports);
+wwn_t bfa_fcs_lport_get_rport(struct bfa_fcs_lport_s *port, wwn_t wwn,
+ int index, int nrports, bfa_boolean_t bwwn);
+
+struct bfa_fcs_lport_s *bfa_fcs_lookup_port(struct bfa_fcs_s *fcs,
+ u16 vf_id, wwn_t lpwwn);
+
+void bfa_fcs_lport_set_symname(struct bfa_fcs_lport_s *port, char *symname);
+void bfa_fcs_lport_get_info(struct bfa_fcs_lport_s *port,
+ struct bfa_lport_info_s *port_info);
+void bfa_fcs_lport_get_attr(struct bfa_fcs_lport_s *port,
+ struct bfa_lport_attr_s *port_attr);
+void bfa_fcs_lport_get_stats(struct bfa_fcs_lport_s *fcs_port,
+ struct bfa_lport_stats_s *port_stats);
+void bfa_fcs_lport_clear_stats(struct bfa_fcs_lport_s *fcs_port);
+enum bfa_port_speed bfa_fcs_lport_get_rport_max_speed(
+ struct bfa_fcs_lport_s *port);
+
+/* MS FCS routines */
+void bfa_fcs_lport_ms_init(struct bfa_fcs_lport_s *port);
+void bfa_fcs_lport_ms_offline(struct bfa_fcs_lport_s *port);
+void bfa_fcs_lport_ms_online(struct bfa_fcs_lport_s *port);
+void bfa_fcs_lport_ms_fabric_rscn(struct bfa_fcs_lport_s *port);
+
+/* FDMI FCS routines */
+void bfa_fcs_lport_fdmi_init(struct bfa_fcs_lport_ms_s *ms);
+void bfa_fcs_lport_fdmi_offline(struct bfa_fcs_lport_ms_s *ms);
+void bfa_fcs_lport_fdmi_online(struct bfa_fcs_lport_ms_s *ms);
+void bfa_fcs_lport_uf_recv(struct bfa_fcs_lport_s *lport, struct fchs_s *fchs,
+ u16 len);
+void bfa_fcs_lport_attach(struct bfa_fcs_lport_s *lport, struct bfa_fcs_s *fcs,
+ u16 vf_id, struct bfa_fcs_vport_s *vport);
+void bfa_fcs_lport_init(struct bfa_fcs_lport_s *lport,
+ struct bfa_lport_cfg_s *port_cfg);
+void bfa_fcs_lport_online(struct bfa_fcs_lport_s *port);
+void bfa_fcs_lport_offline(struct bfa_fcs_lport_s *port);
+void bfa_fcs_lport_delete(struct bfa_fcs_lport_s *port);
+void bfa_fcs_lport_stop(struct bfa_fcs_lport_s *port);
+struct bfa_fcs_rport_s *bfa_fcs_lport_get_rport_by_pid(
+ struct bfa_fcs_lport_s *port, u32 pid);
+struct bfa_fcs_rport_s *bfa_fcs_lport_get_rport_by_old_pid(
+ struct bfa_fcs_lport_s *port, u32 pid);
+struct bfa_fcs_rport_s *bfa_fcs_lport_get_rport_by_pwwn(
+ struct bfa_fcs_lport_s *port, wwn_t pwwn);
+struct bfa_fcs_rport_s *bfa_fcs_lport_get_rport_by_nwwn(
+ struct bfa_fcs_lport_s *port, wwn_t nwwn);
+struct bfa_fcs_rport_s *bfa_fcs_lport_get_rport_by_qualifier(
+ struct bfa_fcs_lport_s *port, wwn_t pwwn, u32 pid);
+void bfa_fcs_lport_add_rport(struct bfa_fcs_lport_s *port,
+ struct bfa_fcs_rport_s *rport);
+void bfa_fcs_lport_del_rport(struct bfa_fcs_lport_s *port,
+ struct bfa_fcs_rport_s *rport);
+void bfa_fcs_lport_ns_init(struct bfa_fcs_lport_s *vport);
+void bfa_fcs_lport_ns_offline(struct bfa_fcs_lport_s *vport);
+void bfa_fcs_lport_ns_online(struct bfa_fcs_lport_s *vport);
+void bfa_fcs_lport_ns_query(struct bfa_fcs_lport_s *port);
+void bfa_fcs_lport_ns_util_send_rspn_id(void *cbarg,
+ struct bfa_fcxp_s *fcxp_alloced);
+void bfa_fcs_lport_scn_init(struct bfa_fcs_lport_s *vport);
+void bfa_fcs_lport_scn_offline(struct bfa_fcs_lport_s *vport);
+void bfa_fcs_lport_fab_scn_online(struct bfa_fcs_lport_s *vport);
+void bfa_fcs_lport_scn_process_rscn(struct bfa_fcs_lport_s *port,
+ struct fchs_s *rx_frame, u32 len);
+void bfa_fcs_lport_lip_scn_online(bfa_fcs_lport_t *port);
+
+struct bfa_fcs_vport_s {
+ struct list_head qe; /* queue elem */
+ bfa_sm_t sm; /* state machine */
+ bfa_fcs_lport_t lport; /* logical port */
+ struct bfa_timer_s timer;
+ struct bfad_vport_s *vport_drv; /* Driver private */
+ struct bfa_vport_stats_s vport_stats; /* vport statistics */
+ struct bfa_lps_s *lps; /* Lport login service*/
+ int fdisc_retries;
+};
+
+#define bfa_fcs_vport_get_port(vport) \
+ ((struct bfa_fcs_lport_s *)(&vport->port))
+
+/*
+ * bfa fcs vport public functions
+ */
+bfa_status_t bfa_fcs_vport_create(struct bfa_fcs_vport_s *vport,
+ struct bfa_fcs_s *fcs, u16 vf_id,
+ struct bfa_lport_cfg_s *port_cfg,
+ struct bfad_vport_s *vport_drv);
+bfa_status_t bfa_fcs_pbc_vport_create(struct bfa_fcs_vport_s *vport,
+ struct bfa_fcs_s *fcs, u16 vf_id,
+ struct bfa_lport_cfg_s *port_cfg,
+ struct bfad_vport_s *vport_drv);
+bfa_boolean_t bfa_fcs_is_pbc_vport(struct bfa_fcs_vport_s *vport);
+bfa_status_t bfa_fcs_vport_delete(struct bfa_fcs_vport_s *vport);
+bfa_status_t bfa_fcs_vport_start(struct bfa_fcs_vport_s *vport);
+bfa_status_t bfa_fcs_vport_stop(struct bfa_fcs_vport_s *vport);
+void bfa_fcs_vport_get_attr(struct bfa_fcs_vport_s *vport,
+ struct bfa_vport_attr_s *vport_attr);
+struct bfa_fcs_vport_s *bfa_fcs_vport_lookup(struct bfa_fcs_s *fcs,
+ u16 vf_id, wwn_t vpwwn);
+void bfa_fcs_vport_cleanup(struct bfa_fcs_vport_s *vport);
+void bfa_fcs_vport_online(struct bfa_fcs_vport_s *vport);
+void bfa_fcs_vport_offline(struct bfa_fcs_vport_s *vport);
+void bfa_fcs_vport_delete_comp(struct bfa_fcs_vport_s *vport);
+void bfa_fcs_vport_fcs_delete(struct bfa_fcs_vport_s *vport);
+void bfa_fcs_vport_fcs_stop(struct bfa_fcs_vport_s *vport);
+void bfa_fcs_vport_stop_comp(struct bfa_fcs_vport_s *vport);
+
+#define BFA_FCS_RPORT_DEF_DEL_TIMEOUT 90 /* in secs */
+#define BFA_FCS_RPORT_MAX_RETRIES (5)
+
+/*
+ * forward declarations
+ */
+struct bfad_rport_s;
+
+struct bfa_fcs_itnim_s;
+struct bfa_fcs_tin_s;
+struct bfa_fcs_iprp_s;
+
+/* Rport Features (RPF) */
+struct bfa_fcs_rpf_s {
+ bfa_sm_t sm; /* state machine */
+ struct bfa_fcs_rport_s *rport; /* parent rport */
+ struct bfa_timer_s timer; /* general purpose timer */
+ struct bfa_fcxp_s *fcxp; /* FCXP needed for discarding */
+ struct bfa_fcxp_wqe_s fcxp_wqe; /* fcxp wait queue element */
+ int rpsc_retries; /* max RPSC retry attempts */
+ enum bfa_port_speed rpsc_speed;
+ /* Current Speed from RPSC. O if RPSC fails */
+ enum bfa_port_speed assigned_speed;
+ /*
+ * Speed assigned by the user. will be used if RPSC is
+ * not supported by the rport.
+ */
+};
+
+struct bfa_fcs_rport_s {
+ struct list_head qe; /* used by port/vport */
+ struct bfa_fcs_lport_s *port; /* parent FCS port */
+ struct bfa_fcs_s *fcs; /* fcs instance */
+ struct bfad_rport_s *rp_drv; /* driver peer instance */
+ u32 pid; /* port ID of rport */
+ u32 old_pid; /* PID before rport goes offline */
+ u16 maxfrsize; /* maximum frame size */
+ __be16 reply_oxid; /* OX_ID of inbound requests */
+ enum fc_cos fc_cos; /* FC classes of service supp */
+ bfa_boolean_t cisc; /* CISC capable device */
+ bfa_boolean_t prlo; /* processing prlo or LOGO */
+ bfa_boolean_t plogi_pending; /* Rx Plogi Pending */
+ wwn_t pwwn; /* port wwn of rport */
+ wwn_t nwwn; /* node wwn of rport */
+ struct bfa_rport_symname_s psym_name; /* port symbolic name */
+ bfa_sm_t sm; /* state machine */
+ struct bfa_timer_s timer; /* general purpose timer */
+ struct bfa_fcs_itnim_s *itnim; /* ITN initiator mode role */
+ struct bfa_fcs_tin_s *tin; /* ITN initiator mode role */
+ struct bfa_fcs_iprp_s *iprp; /* IP/FC role */
+ struct bfa_rport_s *bfa_rport; /* BFA Rport */
+ struct bfa_fcxp_s *fcxp; /* FCXP needed for discarding */
+ int plogi_retries; /* max plogi retry attempts */
+ int ns_retries; /* max NS query retry attempts */
+ struct bfa_fcxp_wqe_s fcxp_wqe; /* fcxp wait queue element */
+ struct bfa_rport_stats_s stats; /* rport stats */
+ enum bfa_rport_function scsi_function; /* Initiator/Target */
+ struct bfa_fcs_rpf_s rpf; /* Rport features module */
+ bfa_boolean_t scn_online; /* SCN online flag */
+};
+
+static inline struct bfa_rport_s *
+bfa_fcs_rport_get_halrport(struct bfa_fcs_rport_s *rport)
+{
+ return rport->bfa_rport;
+}
+
+/*
+ * bfa fcs rport API functions
+ */
+void bfa_fcs_rport_get_attr(struct bfa_fcs_rport_s *rport,
+ struct bfa_rport_attr_s *attr);
+struct bfa_fcs_rport_s *bfa_fcs_rport_lookup(struct bfa_fcs_lport_s *port,
+ wwn_t rpwwn);
+struct bfa_fcs_rport_s *bfa_fcs_rport_lookup_by_nwwn(
+ struct bfa_fcs_lport_s *port, wwn_t rnwwn);
+void bfa_fcs_rport_set_del_timeout(u8 rport_tmo);
+void bfa_fcs_rport_set_max_logins(u32 max_logins);
+void bfa_fcs_rport_uf_recv(struct bfa_fcs_rport_s *rport,
+ struct fchs_s *fchs, u16 len);
+void bfa_fcs_rport_scn(struct bfa_fcs_rport_s *rport);
+
+struct bfa_fcs_rport_s *bfa_fcs_rport_create(struct bfa_fcs_lport_s *port,
+ u32 pid);
+void bfa_fcs_rport_start(struct bfa_fcs_lport_s *port, struct fchs_s *rx_fchs,
+ struct fc_logi_s *plogi_rsp);
+void bfa_fcs_rport_plogi_create(struct bfa_fcs_lport_s *port,
+ struct fchs_s *rx_fchs,
+ struct fc_logi_s *plogi);
+void bfa_fcs_rport_plogi(struct bfa_fcs_rport_s *rport, struct fchs_s *fchs,
+ struct fc_logi_s *plogi);
+void bfa_fcs_rport_prlo(struct bfa_fcs_rport_s *rport, __be16 ox_id);
+
+void bfa_fcs_rport_itntm_ack(struct bfa_fcs_rport_s *rport);
+void bfa_fcs_rport_fcptm_offline_done(struct bfa_fcs_rport_s *rport);
+int bfa_fcs_rport_get_state(struct bfa_fcs_rport_s *rport);
+struct bfa_fcs_rport_s *bfa_fcs_rport_create_by_wwn(
+ struct bfa_fcs_lport_s *port, wwn_t wwn);
+void bfa_fcs_rpf_init(struct bfa_fcs_rport_s *rport);
+void bfa_fcs_rpf_rport_online(struct bfa_fcs_rport_s *rport);
+void bfa_fcs_rpf_rport_offline(struct bfa_fcs_rport_s *rport);
+
+/*
+ * forward declarations
+ */
+struct bfad_itnim_s;
+
+struct bfa_fcs_itnim_s {
+ bfa_sm_t sm; /* state machine */
+ struct bfa_fcs_rport_s *rport; /* parent remote rport */
+ struct bfad_itnim_s *itnim_drv; /* driver peer instance */
+ struct bfa_fcs_s *fcs; /* fcs instance */
+ struct bfa_timer_s timer; /* timer functions */
+ struct bfa_itnim_s *bfa_itnim; /* BFA itnim struct */
+ u32 prli_retries; /* max prli retry attempts */
+ bfa_boolean_t seq_rec; /* seq recovery support */
+ bfa_boolean_t rec_support; /* REC supported */
+ bfa_boolean_t conf_comp; /* FCP_CONF support */
+ bfa_boolean_t task_retry_id; /* task retry id supp */
+ struct bfa_fcxp_wqe_s fcxp_wqe; /* wait qelem for fcxp */
+ struct bfa_fcxp_s *fcxp; /* FCXP in use */
+ struct bfa_itnim_stats_s stats; /* itn statistics */
+};
+#define bfa_fcs_fcxp_alloc(__fcs, __req) \
+ bfa_fcxp_req_rsp_alloc(NULL, (__fcs)->bfa, 0, 0, \
+ NULL, NULL, NULL, NULL, __req)
+#define bfa_fcs_fcxp_alloc_wait(__bfa, __wqe, __alloc_cbfn, \
+ __alloc_cbarg, __req) \
+ bfa_fcxp_req_rsp_alloc_wait(__bfa, __wqe, __alloc_cbfn, \
+ __alloc_cbarg, NULL, 0, 0, NULL, NULL, NULL, NULL, __req)
+
+static inline struct bfad_port_s *
+bfa_fcs_itnim_get_drvport(struct bfa_fcs_itnim_s *itnim)
+{
+ return itnim->rport->port->bfad_port;
+}
+
+
+static inline struct bfa_fcs_lport_s *
+bfa_fcs_itnim_get_port(struct bfa_fcs_itnim_s *itnim)
+{
+ return itnim->rport->port;
+}
+
+
+static inline wwn_t
+bfa_fcs_itnim_get_nwwn(struct bfa_fcs_itnim_s *itnim)
+{
+ return itnim->rport->nwwn;
+}
+
+
+static inline wwn_t
+bfa_fcs_itnim_get_pwwn(struct bfa_fcs_itnim_s *itnim)
+{
+ return itnim->rport->pwwn;
+}
+
+
+static inline u32
+bfa_fcs_itnim_get_fcid(struct bfa_fcs_itnim_s *itnim)
+{
+ return itnim->rport->pid;
+}
+
+
+static inline u32
+bfa_fcs_itnim_get_maxfrsize(struct bfa_fcs_itnim_s *itnim)
+{
+ return itnim->rport->maxfrsize;
+}
+
+
+static inline enum fc_cos
+bfa_fcs_itnim_get_cos(struct bfa_fcs_itnim_s *itnim)
+{
+ return itnim->rport->fc_cos;
+}
+
+
+static inline struct bfad_itnim_s *
+bfa_fcs_itnim_get_drvitn(struct bfa_fcs_itnim_s *itnim)
+{
+ return itnim->itnim_drv;
+}
+
+
+static inline struct bfa_itnim_s *
+bfa_fcs_itnim_get_halitn(struct bfa_fcs_itnim_s *itnim)
+{
+ return itnim->bfa_itnim;
+}
+
+/*
+ * bfa fcs FCP Initiator mode API functions
+ */
+void bfa_fcs_itnim_get_attr(struct bfa_fcs_itnim_s *itnim,
+ struct bfa_itnim_attr_s *attr);
+void bfa_fcs_itnim_get_stats(struct bfa_fcs_itnim_s *itnim,
+ struct bfa_itnim_stats_s *stats);
+struct bfa_fcs_itnim_s *bfa_fcs_itnim_lookup(struct bfa_fcs_lport_s *port,
+ wwn_t rpwwn);
+bfa_status_t bfa_fcs_itnim_attr_get(struct bfa_fcs_lport_s *port, wwn_t rpwwn,
+ struct bfa_itnim_attr_s *attr);
+bfa_status_t bfa_fcs_itnim_stats_get(struct bfa_fcs_lport_s *port, wwn_t rpwwn,
+ struct bfa_itnim_stats_s *stats);
+bfa_status_t bfa_fcs_itnim_stats_clear(struct bfa_fcs_lport_s *port,
+ wwn_t rpwwn);
+struct bfa_fcs_itnim_s *bfa_fcs_itnim_create(struct bfa_fcs_rport_s *rport);
+void bfa_fcs_itnim_delete(struct bfa_fcs_itnim_s *itnim);
+void bfa_fcs_itnim_rport_offline(struct bfa_fcs_itnim_s *itnim);
+void bfa_fcs_itnim_brp_online(struct bfa_fcs_itnim_s *itnim);
+bfa_status_t bfa_fcs_itnim_get_online_state(struct bfa_fcs_itnim_s *itnim);
+void bfa_fcs_itnim_is_initiator(struct bfa_fcs_itnim_s *itnim);
+void bfa_fcs_fcpim_uf_recv(struct bfa_fcs_itnim_s *itnim,
+ struct fchs_s *fchs, u16 len);
+
+#define BFA_FCS_FDMI_SUPP_SPEEDS_4G (FDMI_TRANS_SPEED_1G | \
+ FDMI_TRANS_SPEED_2G | \
+ FDMI_TRANS_SPEED_4G)
+
+#define BFA_FCS_FDMI_SUPP_SPEEDS_8G (FDMI_TRANS_SPEED_1G | \
+ FDMI_TRANS_SPEED_2G | \
+ FDMI_TRANS_SPEED_4G | \
+ FDMI_TRANS_SPEED_8G)
+
+#define BFA_FCS_FDMI_SUPP_SPEEDS_16G (FDMI_TRANS_SPEED_2G | \
+ FDMI_TRANS_SPEED_4G | \
+ FDMI_TRANS_SPEED_8G | \
+ FDMI_TRANS_SPEED_16G)
+
+#define BFA_FCS_FDMI_SUPP_SPEEDS_10G FDMI_TRANS_SPEED_10G
+
+#define BFA_FCS_FDMI_VENDOR_INFO_LEN 8
+#define BFA_FCS_FDMI_FC4_TYPE_LEN 32
+
+/*
+ * HBA Attribute Block : BFA internal representation. Note : Some variable
+ * sizes have been trimmed to suit BFA For Ex : Model will be "Brocade". Based
+ * on this the size has been reduced to 16 bytes from the standard's 64 bytes.
+ */
+struct bfa_fcs_fdmi_hba_attr_s {
+ wwn_t node_name;
+ u8 manufacturer[64];
+ u8 serial_num[64];
+ u8 model[16];
+ u8 model_desc[128];
+ u8 hw_version[8];
+ u8 driver_version[BFA_VERSION_LEN];
+ u8 option_rom_ver[BFA_VERSION_LEN];
+ u8 fw_version[BFA_VERSION_LEN];
+ u8 os_name[256];
+ __be32 max_ct_pyld;
+ struct bfa_lport_symname_s node_sym_name;
+ u8 vendor_info[BFA_FCS_FDMI_VENDOR_INFO_LEN];
+ __be32 num_ports;
+ wwn_t fabric_name;
+ u8 bios_ver[BFA_VERSION_LEN];
+};
+
+/*
+ * Port Attribute Block
+ */
+struct bfa_fcs_fdmi_port_attr_s {
+ u8 supp_fc4_types[BFA_FCS_FDMI_FC4_TYPE_LEN];
+ __be32 supp_speed; /* supported speed */
+ __be32 curr_speed; /* current Speed */
+ __be32 max_frm_size; /* max frame size */
+ u8 os_device_name[256]; /* OS device Name */
+ u8 host_name[256]; /* host name */
+ wwn_t port_name;
+ wwn_t node_name;
+ struct bfa_lport_symname_s port_sym_name;
+ __be32 port_type;
+ enum fc_cos scos;
+ wwn_t port_fabric_name;
+ u8 port_act_fc4_type[BFA_FCS_FDMI_FC4_TYPE_LEN];
+ __be32 port_state;
+ __be32 num_ports;
+};
+
+struct bfa_fcs_stats_s {
+ struct {
+ u32 untagged; /* untagged receive frames */
+ u32 tagged; /* tagged receive frames */
+ u32 vfid_unknown; /* VF id is unknown */
+ } uf;
+};
+
+struct bfa_fcs_driver_info_s {
+ u8 version[BFA_VERSION_LEN]; /* Driver Version */
+ u8 host_machine_name[BFA_FCS_OS_STR_LEN];
+ u8 host_os_name[BFA_FCS_OS_STR_LEN]; /* OS name and version */
+ u8 host_os_patch[BFA_FCS_OS_STR_LEN]; /* patch or service pack */
+ u8 os_device_name[BFA_FCS_OS_STR_LEN]; /* Driver Device Name */
+};
+
+struct bfa_fcs_s {
+ struct bfa_s *bfa; /* corresponding BFA bfa instance */
+ struct bfad_s *bfad; /* corresponding BDA driver instance */
+ struct bfa_trc_mod_s *trcmod; /* tracing module */
+ bfa_boolean_t vf_enabled; /* VF mode is enabled */
+ bfa_boolean_t fdmi_enabled; /* FDMI is enabled */
+ bfa_boolean_t min_cfg; /* min cfg enabled/disabled */
+ u16 port_vfid; /* port default VF ID */
+ struct bfa_fcs_driver_info_s driver_info;
+ struct bfa_fcs_fabric_s fabric; /* base fabric state machine */
+ struct bfa_fcs_stats_s stats; /* FCS statistics */
+ struct bfa_wc_s wc; /* waiting counter */
+ int fcs_aen_seq;
+ u32 num_rport_logins;
+};
+
+/*
+ * fcs_fabric_sm fabric state machine functions
+ */
+
+/*
+ * Fabric state machine events
+ */
+enum bfa_fcs_fabric_event {
+ BFA_FCS_FABRIC_SM_CREATE = 1, /* create from driver */
+ BFA_FCS_FABRIC_SM_DELETE = 2, /* delete from driver */
+ BFA_FCS_FABRIC_SM_LINK_DOWN = 3, /* link down from port */
+ BFA_FCS_FABRIC_SM_LINK_UP = 4, /* link up from port */
+ BFA_FCS_FABRIC_SM_CONT_OP = 5, /* flogi/auth continue op */
+ BFA_FCS_FABRIC_SM_RETRY_OP = 6, /* flogi/auth retry op */
+ BFA_FCS_FABRIC_SM_NO_FABRIC = 7, /* from flogi/auth */
+ BFA_FCS_FABRIC_SM_PERF_EVFP = 8, /* from flogi/auth */
+ BFA_FCS_FABRIC_SM_ISOLATE = 9, /* from EVFP processing */
+ BFA_FCS_FABRIC_SM_NO_TAGGING = 10, /* no VFT tagging from EVFP */
+ BFA_FCS_FABRIC_SM_DELAYED = 11, /* timeout delay event */
+ BFA_FCS_FABRIC_SM_AUTH_FAILED = 12, /* auth failed */
+ BFA_FCS_FABRIC_SM_AUTH_SUCCESS = 13, /* auth successful */
+ BFA_FCS_FABRIC_SM_DELCOMP = 14, /* all vports deleted event */
+ BFA_FCS_FABRIC_SM_LOOPBACK = 15, /* Received our own FLOGI */
+ BFA_FCS_FABRIC_SM_START = 16, /* from driver */
+ BFA_FCS_FABRIC_SM_STOP = 17, /* Stop from driver */
+ BFA_FCS_FABRIC_SM_STOPCOMP = 18, /* Stop completion */
+ BFA_FCS_FABRIC_SM_LOGOCOMP = 19, /* FLOGO completion */
+};
+
+/*
+ * fcs_rport_sm FCS rport state machine events
+ */
+
+enum rport_event {
+ RPSM_EVENT_PLOGI_SEND = 1, /* new rport; start with PLOGI */
+ RPSM_EVENT_PLOGI_RCVD = 2, /* Inbound PLOGI from remote port */
+ RPSM_EVENT_PLOGI_COMP = 3, /* PLOGI completed to rport */
+ RPSM_EVENT_LOGO_RCVD = 4, /* LOGO from remote device */
+ RPSM_EVENT_LOGO_IMP = 5, /* implicit logo for SLER */
+ RPSM_EVENT_FCXP_SENT = 6, /* Frame from has been sent */
+ RPSM_EVENT_DELETE = 7, /* RPORT delete request */
+ RPSM_EVENT_FAB_SCN = 8, /* state change notification */
+ RPSM_EVENT_ACCEPTED = 9, /* Good response from remote device */
+ RPSM_EVENT_FAILED = 10, /* Request to rport failed. */
+ RPSM_EVENT_TIMEOUT = 11, /* Rport SM timeout event */
+ RPSM_EVENT_HCB_ONLINE = 12, /* BFA rport online callback */
+ RPSM_EVENT_HCB_OFFLINE = 13, /* BFA rport offline callback */
+ RPSM_EVENT_FC4_OFFLINE = 14, /* FC-4 offline complete */
+ RPSM_EVENT_ADDRESS_CHANGE = 15, /* Rport's PID has changed */
+ RPSM_EVENT_ADDRESS_DISC = 16, /* Need to Discover rport's PID */
+ RPSM_EVENT_PRLO_RCVD = 17, /* PRLO from remote device */
+ RPSM_EVENT_PLOGI_RETRY = 18, /* Retry PLOGI continuously */
+ RPSM_EVENT_SCN_OFFLINE = 19, /* loop scn offline */
+ RPSM_EVENT_SCN_ONLINE = 20, /* loop scn online */
+ RPSM_EVENT_FC4_FCS_ONLINE = 21, /* FC-4 FCS online complete */
+};
+
+/*
+ * fcs_itnim_sm FCS itnim state machine events
+ */
+enum bfa_fcs_itnim_event {
+ BFA_FCS_ITNIM_SM_FCS_ONLINE = 1, /* rport online event */
+ BFA_FCS_ITNIM_SM_OFFLINE = 2, /* rport offline */
+ BFA_FCS_ITNIM_SM_FRMSENT = 3, /* prli frame is sent */
+ BFA_FCS_ITNIM_SM_RSP_OK = 4, /* good response */
+ BFA_FCS_ITNIM_SM_RSP_ERROR = 5, /* error response */
+ BFA_FCS_ITNIM_SM_TIMEOUT = 6, /* delay timeout */
+ BFA_FCS_ITNIM_SM_HCB_OFFLINE = 7, /* BFA online callback */
+ BFA_FCS_ITNIM_SM_HCB_ONLINE = 8, /* BFA offline callback */
+ BFA_FCS_ITNIM_SM_INITIATOR = 9, /* rport is initiator */
+ BFA_FCS_ITNIM_SM_DELETE = 10, /* delete event from rport */
+ BFA_FCS_ITNIM_SM_PRLO = 11, /* delete event from rport */
+ BFA_FCS_ITNIM_SM_RSP_NOT_SUPP = 12, /* cmd not supported rsp */
+ BFA_FCS_ITNIM_SM_HAL_ONLINE = 13, /* bfa rport online event */
+};
+
+/*
+ * bfa fcs API functions
+ */
+void bfa_fcs_attach(struct bfa_fcs_s *fcs, struct bfa_s *bfa,
+ struct bfad_s *bfad,
+ bfa_boolean_t min_cfg);
+void bfa_fcs_init(struct bfa_fcs_s *fcs);
+void bfa_fcs_pbc_vport_init(struct bfa_fcs_s *fcs);
+void bfa_fcs_update_cfg(struct bfa_fcs_s *fcs);
+void bfa_fcs_driver_info_init(struct bfa_fcs_s *fcs,
+ struct bfa_fcs_driver_info_s *driver_info);
+void bfa_fcs_exit(struct bfa_fcs_s *fcs);
+void bfa_fcs_stop(struct bfa_fcs_s *fcs);
+
+/*
+ * bfa fcs vf public functions
+ */
+bfa_fcs_vf_t *bfa_fcs_vf_lookup(struct bfa_fcs_s *fcs, u16 vf_id);
+void bfa_fcs_vf_get_ports(bfa_fcs_vf_t *vf, wwn_t vpwwn[], int *nports);
+
+/*
+ * fabric protected interface functions
+ */
+void bfa_fcs_fabric_attach(struct bfa_fcs_s *fcs);
+void bfa_fcs_fabric_modinit(struct bfa_fcs_s *fcs);
+void bfa_fcs_fabric_modexit(struct bfa_fcs_s *fcs);
+void bfa_fcs_fabric_link_up(struct bfa_fcs_fabric_s *fabric);
+void bfa_fcs_fabric_link_down(struct bfa_fcs_fabric_s *fabric);
+void bfa_fcs_fabric_addvport(struct bfa_fcs_fabric_s *fabric,
+ struct bfa_fcs_vport_s *vport);
+void bfa_fcs_fabric_delvport(struct bfa_fcs_fabric_s *fabric,
+ struct bfa_fcs_vport_s *vport);
+struct bfa_fcs_vport_s *bfa_fcs_fabric_vport_lookup(
+ struct bfa_fcs_fabric_s *fabric, wwn_t pwwn);
+void bfa_fcs_fabric_modstart(struct bfa_fcs_s *fcs);
+void bfa_fcs_fabric_uf_recv(struct bfa_fcs_fabric_s *fabric,
+ struct fchs_s *fchs, u16 len);
+void bfa_fcs_fabric_psymb_init(struct bfa_fcs_fabric_s *fabric);
+void bfa_fcs_fabric_nsymb_init(struct bfa_fcs_fabric_s *fabric);
+void bfa_fcs_fabric_set_fabric_name(struct bfa_fcs_fabric_s *fabric,
+ wwn_t fabric_name);
+u16 bfa_fcs_fabric_get_switch_oui(struct bfa_fcs_fabric_s *fabric);
+void bfa_fcs_uf_attach(struct bfa_fcs_s *fcs);
+void bfa_fcs_port_attach(struct bfa_fcs_s *fcs);
+void bfa_fcs_fabric_modstop(struct bfa_fcs_s *fcs);
+void bfa_fcs_fabric_sm_online(struct bfa_fcs_fabric_s *fabric,
+ enum bfa_fcs_fabric_event event);
+void bfa_fcs_fabric_sm_loopback(struct bfa_fcs_fabric_s *fabric,
+ enum bfa_fcs_fabric_event event);
+void bfa_fcs_fabric_sm_auth_failed(struct bfa_fcs_fabric_s *fabric,
+ enum bfa_fcs_fabric_event event);
+
+/*
+ * BFA FCS callback interfaces
+ */
+
+/*
+ * fcb Main fcs callbacks
+ */
+
+struct bfad_port_s;
+struct bfad_vf_s;
+struct bfad_vport_s;
+struct bfad_rport_s;
+
+/*
+ * lport callbacks
+ */
+struct bfad_port_s *bfa_fcb_lport_new(struct bfad_s *bfad,
+ struct bfa_fcs_lport_s *port,
+ enum bfa_lport_role roles,
+ struct bfad_vf_s *vf_drv,
+ struct bfad_vport_s *vp_drv);
+
+/*
+ * vport callbacks
+ */
+void bfa_fcb_pbc_vport_create(struct bfad_s *bfad, struct bfi_pbc_vport_s);
+
+/*
+ * rport callbacks
+ */
+bfa_status_t bfa_fcb_rport_alloc(struct bfad_s *bfad,
+ struct bfa_fcs_rport_s **rport,
+ struct bfad_rport_s **rport_drv);
+
+/*
+ * itnim callbacks
+ */
+void bfa_fcb_itnim_alloc(struct bfad_s *bfad, struct bfa_fcs_itnim_s **itnim,
+ struct bfad_itnim_s **itnim_drv);
+void bfa_fcb_itnim_free(struct bfad_s *bfad,
+ struct bfad_itnim_s *itnim_drv);
+void bfa_fcb_itnim_online(struct bfad_itnim_s *itnim_drv);
+void bfa_fcb_itnim_offline(struct bfad_itnim_s *itnim_drv);
+
+#endif /* __BFA_FCS_H__ */
diff --git a/drivers/scsi/bfa/bfa_fcs_fcpim.c b/drivers/scsi/bfa/bfa_fcs_fcpim.c
new file mode 100644
index 000000000..6dc7926a3
--- /dev/null
+++ b/drivers/scsi/bfa/bfa_fcs_fcpim.c
@@ -0,0 +1,839 @@
+/*
+ * Copyright (c) 2005-2010 Brocade Communications Systems, Inc.
+ * All rights reserved
+ * www.brocade.com
+ *
+ * Linux driver for Brocade Fibre Channel Host Bus Adapter.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License (GPL) Version 2 as
+ * published by the Free Software Foundation
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ */
+
+/*
+ * fcpim.c - FCP initiator mode i-t nexus state machine
+ */
+
+#include "bfad_drv.h"
+#include "bfa_fcs.h"
+#include "bfa_fcbuild.h"
+#include "bfad_im.h"
+
+BFA_TRC_FILE(FCS, FCPIM);
+
+/*
+ * forward declarations
+ */
+static void bfa_fcs_itnim_timeout(void *arg);
+static void bfa_fcs_itnim_free(struct bfa_fcs_itnim_s *itnim);
+static void bfa_fcs_itnim_send_prli(void *itnim_cbarg,
+ struct bfa_fcxp_s *fcxp_alloced);
+static void bfa_fcs_itnim_prli_response(void *fcsarg,
+ struct bfa_fcxp_s *fcxp, void *cbarg,
+ bfa_status_t req_status, u32 rsp_len,
+ u32 resid_len, struct fchs_s *rsp_fchs);
+static void bfa_fcs_itnim_aen_post(struct bfa_fcs_itnim_s *itnim,
+ enum bfa_itnim_aen_event event);
+
+static void bfa_fcs_itnim_sm_offline(struct bfa_fcs_itnim_s *itnim,
+ enum bfa_fcs_itnim_event event);
+static void bfa_fcs_itnim_sm_prli_send(struct bfa_fcs_itnim_s *itnim,
+ enum bfa_fcs_itnim_event event);
+static void bfa_fcs_itnim_sm_prli(struct bfa_fcs_itnim_s *itnim,
+ enum bfa_fcs_itnim_event event);
+static void bfa_fcs_itnim_sm_prli_retry(struct bfa_fcs_itnim_s *itnim,
+ enum bfa_fcs_itnim_event event);
+static void bfa_fcs_itnim_sm_hcb_online(struct bfa_fcs_itnim_s *itnim,
+ enum bfa_fcs_itnim_event event);
+static void bfa_fcs_itnim_sm_hal_rport_online(struct bfa_fcs_itnim_s *itnim,
+ enum bfa_fcs_itnim_event event);
+static void bfa_fcs_itnim_sm_online(struct bfa_fcs_itnim_s *itnim,
+ enum bfa_fcs_itnim_event event);
+static void bfa_fcs_itnim_sm_hcb_offline(struct bfa_fcs_itnim_s *itnim,
+ enum bfa_fcs_itnim_event event);
+static void bfa_fcs_itnim_sm_initiator(struct bfa_fcs_itnim_s *itnim,
+ enum bfa_fcs_itnim_event event);
+
+static struct bfa_sm_table_s itnim_sm_table[] = {
+ {BFA_SM(bfa_fcs_itnim_sm_offline), BFA_ITNIM_OFFLINE},
+ {BFA_SM(bfa_fcs_itnim_sm_prli_send), BFA_ITNIM_PRLI_SEND},
+ {BFA_SM(bfa_fcs_itnim_sm_prli), BFA_ITNIM_PRLI_SENT},
+ {BFA_SM(bfa_fcs_itnim_sm_prli_retry), BFA_ITNIM_PRLI_RETRY},
+ {BFA_SM(bfa_fcs_itnim_sm_hcb_online), BFA_ITNIM_HCB_ONLINE},
+ {BFA_SM(bfa_fcs_itnim_sm_online), BFA_ITNIM_ONLINE},
+ {BFA_SM(bfa_fcs_itnim_sm_hcb_offline), BFA_ITNIM_HCB_OFFLINE},
+ {BFA_SM(bfa_fcs_itnim_sm_initiator), BFA_ITNIM_INITIATIOR},
+};
+
+/*
+ * fcs_itnim_sm FCS itnim state machine
+ */
+
+static void
+bfa_fcs_itnim_sm_offline(struct bfa_fcs_itnim_s *itnim,
+ enum bfa_fcs_itnim_event event)
+{
+ bfa_trc(itnim->fcs, itnim->rport->pwwn);
+ bfa_trc(itnim->fcs, event);
+
+ switch (event) {
+ case BFA_FCS_ITNIM_SM_FCS_ONLINE:
+ bfa_sm_set_state(itnim, bfa_fcs_itnim_sm_prli_send);
+ itnim->prli_retries = 0;
+ bfa_fcs_itnim_send_prli(itnim, NULL);
+ break;
+
+ case BFA_FCS_ITNIM_SM_OFFLINE:
+ bfa_sm_send_event(itnim->rport, RPSM_EVENT_FC4_OFFLINE);
+ break;
+
+ case BFA_FCS_ITNIM_SM_INITIATOR:
+ bfa_sm_set_state(itnim, bfa_fcs_itnim_sm_initiator);
+ break;
+
+ case BFA_FCS_ITNIM_SM_DELETE:
+ bfa_fcs_itnim_free(itnim);
+ break;
+
+ default:
+ bfa_sm_fault(itnim->fcs, event);
+ }
+
+}
+
+static void
+bfa_fcs_itnim_sm_prli_send(struct bfa_fcs_itnim_s *itnim,
+ enum bfa_fcs_itnim_event event)
+{
+ bfa_trc(itnim->fcs, itnim->rport->pwwn);
+ bfa_trc(itnim->fcs, event);
+
+ switch (event) {
+ case BFA_FCS_ITNIM_SM_FRMSENT:
+ bfa_sm_set_state(itnim, bfa_fcs_itnim_sm_prli);
+ break;
+
+ case BFA_FCS_ITNIM_SM_INITIATOR:
+ bfa_sm_set_state(itnim, bfa_fcs_itnim_sm_initiator);
+ bfa_fcxp_walloc_cancel(itnim->fcs->bfa, &itnim->fcxp_wqe);
+ bfa_sm_send_event(itnim->rport, RPSM_EVENT_FC4_FCS_ONLINE);
+ break;
+
+ case BFA_FCS_ITNIM_SM_OFFLINE:
+ bfa_sm_set_state(itnim, bfa_fcs_itnim_sm_offline);
+ bfa_fcxp_walloc_cancel(itnim->fcs->bfa, &itnim->fcxp_wqe);
+ bfa_sm_send_event(itnim->rport, RPSM_EVENT_FC4_OFFLINE);
+ break;
+
+ case BFA_FCS_ITNIM_SM_DELETE:
+ bfa_sm_set_state(itnim, bfa_fcs_itnim_sm_offline);
+ bfa_fcxp_walloc_cancel(itnim->fcs->bfa, &itnim->fcxp_wqe);
+ bfa_fcs_itnim_free(itnim);
+ break;
+
+ default:
+ bfa_sm_fault(itnim->fcs, event);
+ }
+}
+
+static void
+bfa_fcs_itnim_sm_prli(struct bfa_fcs_itnim_s *itnim,
+ enum bfa_fcs_itnim_event event)
+{
+ bfa_trc(itnim->fcs, itnim->rport->pwwn);
+ bfa_trc(itnim->fcs, event);
+
+ switch (event) {
+ case BFA_FCS_ITNIM_SM_RSP_OK:
+ if (itnim->rport->scsi_function == BFA_RPORT_INITIATOR)
+ bfa_sm_set_state(itnim, bfa_fcs_itnim_sm_initiator);
+ else
+ bfa_sm_set_state(itnim,
+ bfa_fcs_itnim_sm_hal_rport_online);
+
+ bfa_sm_send_event(itnim->rport, RPSM_EVENT_FC4_FCS_ONLINE);
+ break;
+
+ case BFA_FCS_ITNIM_SM_RSP_ERROR:
+ bfa_sm_set_state(itnim, bfa_fcs_itnim_sm_prli_retry);
+ bfa_timer_start(itnim->fcs->bfa, &itnim->timer,
+ bfa_fcs_itnim_timeout, itnim,
+ BFA_FCS_RETRY_TIMEOUT);
+ break;
+
+ case BFA_FCS_ITNIM_SM_RSP_NOT_SUPP:
+ bfa_sm_set_state(itnim, bfa_fcs_itnim_sm_offline);
+ break;
+
+ case BFA_FCS_ITNIM_SM_OFFLINE:
+ bfa_sm_set_state(itnim, bfa_fcs_itnim_sm_offline);
+ bfa_fcxp_discard(itnim->fcxp);
+ bfa_sm_send_event(itnim->rport, RPSM_EVENT_FC4_OFFLINE);
+ break;
+
+ case BFA_FCS_ITNIM_SM_INITIATOR:
+ bfa_sm_set_state(itnim, bfa_fcs_itnim_sm_initiator);
+ bfa_fcxp_discard(itnim->fcxp);
+ bfa_sm_send_event(itnim->rport, RPSM_EVENT_FC4_FCS_ONLINE);
+ break;
+
+ case BFA_FCS_ITNIM_SM_DELETE:
+ bfa_sm_set_state(itnim, bfa_fcs_itnim_sm_offline);
+ bfa_fcxp_discard(itnim->fcxp);
+ bfa_fcs_itnim_free(itnim);
+ break;
+
+ default:
+ bfa_sm_fault(itnim->fcs, event);
+ }
+}
+
+static void
+bfa_fcs_itnim_sm_hal_rport_online(struct bfa_fcs_itnim_s *itnim,
+ enum bfa_fcs_itnim_event event)
+{
+ bfa_trc(itnim->fcs, itnim->rport->pwwn);
+ bfa_trc(itnim->fcs, event);
+
+ switch (event) {
+ case BFA_FCS_ITNIM_SM_HAL_ONLINE:
+ if (!itnim->bfa_itnim)
+ itnim->bfa_itnim = bfa_itnim_create(itnim->fcs->bfa,
+ itnim->rport->bfa_rport, itnim);
+
+ if (itnim->bfa_itnim) {
+ bfa_sm_set_state(itnim, bfa_fcs_itnim_sm_hcb_online);
+ bfa_itnim_online(itnim->bfa_itnim, itnim->seq_rec);
+ } else {
+ bfa_sm_set_state(itnim, bfa_fcs_itnim_sm_offline);
+ bfa_sm_send_event(itnim->rport, RPSM_EVENT_DELETE);
+ }
+
+ break;
+
+ case BFA_FCS_ITNIM_SM_OFFLINE:
+ bfa_sm_set_state(itnim, bfa_fcs_itnim_sm_offline);
+ bfa_sm_send_event(itnim->rport, RPSM_EVENT_FC4_OFFLINE);
+ break;
+
+ case BFA_FCS_ITNIM_SM_DELETE:
+ bfa_sm_set_state(itnim, bfa_fcs_itnim_sm_offline);
+ bfa_fcs_itnim_free(itnim);
+ break;
+
+ default:
+ bfa_sm_fault(itnim->fcs, event);
+ }
+}
+
+static void
+bfa_fcs_itnim_sm_prli_retry(struct bfa_fcs_itnim_s *itnim,
+ enum bfa_fcs_itnim_event event)
+{
+ bfa_trc(itnim->fcs, itnim->rport->pwwn);
+ bfa_trc(itnim->fcs, event);
+
+ switch (event) {
+ case BFA_FCS_ITNIM_SM_TIMEOUT:
+ if (itnim->prli_retries < BFA_FCS_RPORT_MAX_RETRIES) {
+ itnim->prli_retries++;
+ bfa_trc(itnim->fcs, itnim->prli_retries);
+ bfa_sm_set_state(itnim, bfa_fcs_itnim_sm_prli_send);
+ bfa_fcs_itnim_send_prli(itnim, NULL);
+ } else {
+ /* invoke target offline */
+ bfa_sm_set_state(itnim, bfa_fcs_itnim_sm_offline);
+ bfa_sm_send_event(itnim->rport, RPSM_EVENT_LOGO_IMP);
+ }
+ break;
+
+
+ case BFA_FCS_ITNIM_SM_OFFLINE:
+ bfa_sm_set_state(itnim, bfa_fcs_itnim_sm_offline);
+ bfa_timer_stop(&itnim->timer);
+ bfa_sm_send_event(itnim->rport, RPSM_EVENT_FC4_OFFLINE);
+ break;
+
+ case BFA_FCS_ITNIM_SM_INITIATOR:
+ bfa_sm_set_state(itnim, bfa_fcs_itnim_sm_initiator);
+ bfa_timer_stop(&itnim->timer);
+ bfa_sm_send_event(itnim->rport, RPSM_EVENT_FC4_FCS_ONLINE);
+ break;
+
+ case BFA_FCS_ITNIM_SM_DELETE:
+ bfa_sm_set_state(itnim, bfa_fcs_itnim_sm_offline);
+ bfa_timer_stop(&itnim->timer);
+ bfa_fcs_itnim_free(itnim);
+ break;
+
+ default:
+ bfa_sm_fault(itnim->fcs, event);
+ }
+}
+
+static void
+bfa_fcs_itnim_sm_hcb_online(struct bfa_fcs_itnim_s *itnim,
+ enum bfa_fcs_itnim_event event)
+{
+ struct bfad_s *bfad = (struct bfad_s *)itnim->fcs->bfad;
+ char lpwwn_buf[BFA_STRING_32];
+ char rpwwn_buf[BFA_STRING_32];
+
+ bfa_trc(itnim->fcs, itnim->rport->pwwn);
+ bfa_trc(itnim->fcs, event);
+
+ switch (event) {
+ case BFA_FCS_ITNIM_SM_HCB_ONLINE:
+ bfa_sm_set_state(itnim, bfa_fcs_itnim_sm_online);
+ bfa_fcb_itnim_online(itnim->itnim_drv);
+ wwn2str(lpwwn_buf, bfa_fcs_lport_get_pwwn(itnim->rport->port));
+ wwn2str(rpwwn_buf, itnim->rport->pwwn);
+ BFA_LOG(KERN_INFO, bfad, bfa_log_level,
+ "Target (WWN = %s) is online for initiator (WWN = %s)\n",
+ rpwwn_buf, lpwwn_buf);
+ bfa_fcs_itnim_aen_post(itnim, BFA_ITNIM_AEN_ONLINE);
+ break;
+
+ case BFA_FCS_ITNIM_SM_OFFLINE:
+ bfa_sm_set_state(itnim, bfa_fcs_itnim_sm_hcb_offline);
+ bfa_itnim_offline(itnim->bfa_itnim);
+ break;
+
+ case BFA_FCS_ITNIM_SM_DELETE:
+ bfa_sm_set_state(itnim, bfa_fcs_itnim_sm_offline);
+ bfa_fcs_itnim_free(itnim);
+ break;
+
+ default:
+ bfa_sm_fault(itnim->fcs, event);
+ }
+}
+
+static void
+bfa_fcs_itnim_sm_online(struct bfa_fcs_itnim_s *itnim,
+ enum bfa_fcs_itnim_event event)
+{
+ struct bfad_s *bfad = (struct bfad_s *)itnim->fcs->bfad;
+ char lpwwn_buf[BFA_STRING_32];
+ char rpwwn_buf[BFA_STRING_32];
+
+ bfa_trc(itnim->fcs, itnim->rport->pwwn);
+ bfa_trc(itnim->fcs, event);
+
+ switch (event) {
+ case BFA_FCS_ITNIM_SM_OFFLINE:
+ bfa_sm_set_state(itnim, bfa_fcs_itnim_sm_hcb_offline);
+ bfa_fcb_itnim_offline(itnim->itnim_drv);
+ bfa_itnim_offline(itnim->bfa_itnim);
+ wwn2str(lpwwn_buf, bfa_fcs_lport_get_pwwn(itnim->rport->port));
+ wwn2str(rpwwn_buf, itnim->rport->pwwn);
+ if (bfa_fcs_lport_is_online(itnim->rport->port) == BFA_TRUE) {
+ BFA_LOG(KERN_ERR, bfad, bfa_log_level,
+ "Target (WWN = %s) connectivity lost for "
+ "initiator (WWN = %s)\n", rpwwn_buf, lpwwn_buf);
+ bfa_fcs_itnim_aen_post(itnim, BFA_ITNIM_AEN_DISCONNECT);
+ } else {
+ BFA_LOG(KERN_INFO, bfad, bfa_log_level,
+ "Target (WWN = %s) offlined by initiator (WWN = %s)\n",
+ rpwwn_buf, lpwwn_buf);
+ bfa_fcs_itnim_aen_post(itnim, BFA_ITNIM_AEN_OFFLINE);
+ }
+ break;
+
+ case BFA_FCS_ITNIM_SM_DELETE:
+ bfa_sm_set_state(itnim, bfa_fcs_itnim_sm_offline);
+ bfa_fcs_itnim_free(itnim);
+ break;
+
+ default:
+ bfa_sm_fault(itnim->fcs, event);
+ }
+}
+
+static void
+bfa_fcs_itnim_sm_hcb_offline(struct bfa_fcs_itnim_s *itnim,
+ enum bfa_fcs_itnim_event event)
+{
+ bfa_trc(itnim->fcs, itnim->rport->pwwn);
+ bfa_trc(itnim->fcs, event);
+
+ switch (event) {
+ case BFA_FCS_ITNIM_SM_HCB_OFFLINE:
+ bfa_sm_set_state(itnim, bfa_fcs_itnim_sm_offline);
+ bfa_sm_send_event(itnim->rport, RPSM_EVENT_FC4_OFFLINE);
+ break;
+
+ case BFA_FCS_ITNIM_SM_DELETE:
+ bfa_sm_set_state(itnim, bfa_fcs_itnim_sm_offline);
+ bfa_fcs_itnim_free(itnim);
+ break;
+
+ default:
+ bfa_sm_fault(itnim->fcs, event);
+ }
+}
+
+/*
+ * This state is set when a discovered rport is also in intiator mode.
+ * This ITN is marked as no_op and is not active and will not be truned into
+ * online state.
+ */
+static void
+bfa_fcs_itnim_sm_initiator(struct bfa_fcs_itnim_s *itnim,
+ enum bfa_fcs_itnim_event event)
+{
+ bfa_trc(itnim->fcs, itnim->rport->pwwn);
+ bfa_trc(itnim->fcs, event);
+
+ switch (event) {
+ case BFA_FCS_ITNIM_SM_OFFLINE:
+ bfa_sm_set_state(itnim, bfa_fcs_itnim_sm_offline);
+ bfa_sm_send_event(itnim->rport, RPSM_EVENT_FC4_OFFLINE);
+ break;
+
+ /*
+ * fcs_online is expected here for well known initiator ports
+ */
+ case BFA_FCS_ITNIM_SM_FCS_ONLINE:
+ bfa_sm_send_event(itnim->rport, RPSM_EVENT_FC4_FCS_ONLINE);
+ break;
+
+ case BFA_FCS_ITNIM_SM_RSP_ERROR:
+ case BFA_FCS_ITNIM_SM_INITIATOR:
+ break;
+
+ case BFA_FCS_ITNIM_SM_DELETE:
+ bfa_sm_set_state(itnim, bfa_fcs_itnim_sm_offline);
+ bfa_fcs_itnim_free(itnim);
+ break;
+
+ default:
+ bfa_sm_fault(itnim->fcs, event);
+ }
+}
+
+static void
+bfa_fcs_itnim_aen_post(struct bfa_fcs_itnim_s *itnim,
+ enum bfa_itnim_aen_event event)
+{
+ struct bfa_fcs_rport_s *rport = itnim->rport;
+ struct bfad_s *bfad = (struct bfad_s *)itnim->fcs->bfad;
+ struct bfa_aen_entry_s *aen_entry;
+
+ /* Don't post events for well known addresses */
+ if (BFA_FCS_PID_IS_WKA(rport->pid))
+ return;
+
+ bfad_get_aen_entry(bfad, aen_entry);
+ if (!aen_entry)
+ return;
+
+ aen_entry->aen_data.itnim.vf_id = rport->port->fabric->vf_id;
+ aen_entry->aen_data.itnim.ppwwn = bfa_fcs_lport_get_pwwn(
+ bfa_fcs_get_base_port(itnim->fcs));
+ aen_entry->aen_data.itnim.lpwwn = bfa_fcs_lport_get_pwwn(rport->port);
+ aen_entry->aen_data.itnim.rpwwn = rport->pwwn;
+
+ /* Send the AEN notification */
+ bfad_im_post_vendor_event(aen_entry, bfad, ++rport->fcs->fcs_aen_seq,
+ BFA_AEN_CAT_ITNIM, event);
+}
+
+static void
+bfa_fcs_itnim_send_prli(void *itnim_cbarg, struct bfa_fcxp_s *fcxp_alloced)
+{
+ struct bfa_fcs_itnim_s *itnim = itnim_cbarg;
+ struct bfa_fcs_rport_s *rport = itnim->rport;
+ struct bfa_fcs_lport_s *port = rport->port;
+ struct fchs_s fchs;
+ struct bfa_fcxp_s *fcxp;
+ int len;
+
+ bfa_trc(itnim->fcs, itnim->rport->pwwn);
+
+ fcxp = fcxp_alloced ? fcxp_alloced :
+ bfa_fcs_fcxp_alloc(port->fcs, BFA_TRUE);
+ if (!fcxp) {
+ itnim->stats.fcxp_alloc_wait++;
+ bfa_fcs_fcxp_alloc_wait(port->fcs->bfa, &itnim->fcxp_wqe,
+ bfa_fcs_itnim_send_prli, itnim, BFA_TRUE);
+ return;
+ }
+ itnim->fcxp = fcxp;
+
+ len = fc_prli_build(&fchs, bfa_fcxp_get_reqbuf(fcxp),
+ itnim->rport->pid, bfa_fcs_lport_get_fcid(port), 0);
+
+ bfa_fcxp_send(fcxp, rport->bfa_rport, port->fabric->vf_id, port->lp_tag,
+ BFA_FALSE, FC_CLASS_3, len, &fchs,
+ bfa_fcs_itnim_prli_response, (void *)itnim,
+ FC_MAX_PDUSZ, FC_ELS_TOV);
+
+ itnim->stats.prli_sent++;
+ bfa_sm_send_event(itnim, BFA_FCS_ITNIM_SM_FRMSENT);
+}
+
+static void
+bfa_fcs_itnim_prli_response(void *fcsarg, struct bfa_fcxp_s *fcxp, void *cbarg,
+ bfa_status_t req_status, u32 rsp_len,
+ u32 resid_len, struct fchs_s *rsp_fchs)
+{
+ struct bfa_fcs_itnim_s *itnim = (struct bfa_fcs_itnim_s *) cbarg;
+ struct fc_els_cmd_s *els_cmd;
+ struct fc_prli_s *prli_resp;
+ struct fc_ls_rjt_s *ls_rjt;
+ struct fc_prli_params_s *sparams;
+
+ bfa_trc(itnim->fcs, req_status);
+
+ /*
+ * Sanity Checks
+ */
+ if (req_status != BFA_STATUS_OK) {
+ itnim->stats.prli_rsp_err++;
+ bfa_sm_send_event(itnim, BFA_FCS_ITNIM_SM_RSP_ERROR);
+ return;
+ }
+
+ els_cmd = (struct fc_els_cmd_s *) BFA_FCXP_RSP_PLD(fcxp);
+
+ if (els_cmd->els_code == FC_ELS_ACC) {
+ prli_resp = (struct fc_prli_s *) els_cmd;
+
+ if (fc_prli_rsp_parse(prli_resp, rsp_len) != FC_PARSE_OK) {
+ bfa_trc(itnim->fcs, rsp_len);
+ /*
+ * Check if this r-port is also in Initiator mode.
+ * If so, we need to set this ITN as a no-op.
+ */
+ if (prli_resp->parampage.servparams.initiator) {
+ bfa_trc(itnim->fcs, prli_resp->parampage.type);
+ itnim->rport->scsi_function =
+ BFA_RPORT_INITIATOR;
+ itnim->stats.prli_rsp_acc++;
+ itnim->stats.initiator++;
+ bfa_sm_send_event(itnim,
+ BFA_FCS_ITNIM_SM_RSP_OK);
+ return;
+ }
+
+ itnim->stats.prli_rsp_parse_err++;
+ return;
+ }
+ itnim->rport->scsi_function = BFA_RPORT_TARGET;
+
+ sparams = &prli_resp->parampage.servparams;
+ itnim->seq_rec = sparams->retry;
+ itnim->rec_support = sparams->rec_support;
+ itnim->task_retry_id = sparams->task_retry_id;
+ itnim->conf_comp = sparams->confirm;
+
+ itnim->stats.prli_rsp_acc++;
+ bfa_sm_send_event(itnim, BFA_FCS_ITNIM_SM_RSP_OK);
+ } else {
+ ls_rjt = (struct fc_ls_rjt_s *) BFA_FCXP_RSP_PLD(fcxp);
+
+ bfa_trc(itnim->fcs, ls_rjt->reason_code);
+ bfa_trc(itnim->fcs, ls_rjt->reason_code_expl);
+
+ itnim->stats.prli_rsp_rjt++;
+ if (ls_rjt->reason_code == FC_LS_RJT_RSN_CMD_NOT_SUPP) {
+ bfa_sm_send_event(itnim, BFA_FCS_ITNIM_SM_RSP_NOT_SUPP);
+ return;
+ }
+ bfa_sm_send_event(itnim, BFA_FCS_ITNIM_SM_RSP_ERROR);
+ }
+}
+
+static void
+bfa_fcs_itnim_timeout(void *arg)
+{
+ struct bfa_fcs_itnim_s *itnim = (struct bfa_fcs_itnim_s *) arg;
+
+ itnim->stats.timeout++;
+ bfa_sm_send_event(itnim, BFA_FCS_ITNIM_SM_TIMEOUT);
+}
+
+static void
+bfa_fcs_itnim_free(struct bfa_fcs_itnim_s *itnim)
+{
+ if (itnim->bfa_itnim) {
+ bfa_itnim_delete(itnim->bfa_itnim);
+ itnim->bfa_itnim = NULL;
+ }
+
+ bfa_fcb_itnim_free(itnim->fcs->bfad, itnim->itnim_drv);
+}
+
+
+
+/*
+ * itnim_public FCS ITNIM public interfaces
+ */
+
+/*
+ * Called by rport when a new rport is created.
+ *
+ * @param[in] rport - remote port.
+ */
+struct bfa_fcs_itnim_s *
+bfa_fcs_itnim_create(struct bfa_fcs_rport_s *rport)
+{
+ struct bfa_fcs_lport_s *port = rport->port;
+ struct bfa_fcs_itnim_s *itnim;
+ struct bfad_itnim_s *itnim_drv;
+
+ /*
+ * call bfad to allocate the itnim
+ */
+ bfa_fcb_itnim_alloc(port->fcs->bfad, &itnim, &itnim_drv);
+ if (itnim == NULL) {
+ bfa_trc(port->fcs, rport->pwwn);
+ return NULL;
+ }
+
+ /*
+ * Initialize itnim
+ */
+ itnim->rport = rport;
+ itnim->fcs = rport->fcs;
+ itnim->itnim_drv = itnim_drv;
+
+ itnim->bfa_itnim = NULL;
+ itnim->seq_rec = BFA_FALSE;
+ itnim->rec_support = BFA_FALSE;
+ itnim->conf_comp = BFA_FALSE;
+ itnim->task_retry_id = BFA_FALSE;
+
+ /*
+ * Set State machine
+ */
+ bfa_sm_set_state(itnim, bfa_fcs_itnim_sm_offline);
+
+ return itnim;
+}
+
+/*
+ * Called by rport to delete the instance of FCPIM.
+ *
+ * @param[in] rport - remote port.
+ */
+void
+bfa_fcs_itnim_delete(struct bfa_fcs_itnim_s *itnim)
+{
+ bfa_trc(itnim->fcs, itnim->rport->pid);
+ bfa_sm_send_event(itnim, BFA_FCS_ITNIM_SM_DELETE);
+}
+
+/*
+ * Notification from rport that PLOGI is complete to initiate FC-4 session.
+ */
+void
+bfa_fcs_itnim_brp_online(struct bfa_fcs_itnim_s *itnim)
+{
+ itnim->stats.onlines++;
+
+ if (!BFA_FCS_PID_IS_WKA(itnim->rport->pid))
+ bfa_sm_send_event(itnim, BFA_FCS_ITNIM_SM_HAL_ONLINE);
+}
+
+/*
+ * Called by rport to handle a remote device offline.
+ */
+void
+bfa_fcs_itnim_rport_offline(struct bfa_fcs_itnim_s *itnim)
+{
+ itnim->stats.offlines++;
+ bfa_sm_send_event(itnim, BFA_FCS_ITNIM_SM_OFFLINE);
+}
+
+/*
+ * Called by rport when remote port is known to be an initiator from
+ * PRLI received.
+ */
+void
+bfa_fcs_itnim_is_initiator(struct bfa_fcs_itnim_s *itnim)
+{
+ bfa_trc(itnim->fcs, itnim->rport->pid);
+ itnim->stats.initiator++;
+ bfa_sm_send_event(itnim, BFA_FCS_ITNIM_SM_INITIATOR);
+}
+
+/*
+ * Called by rport to check if the itnim is online.
+ */
+bfa_status_t
+bfa_fcs_itnim_get_online_state(struct bfa_fcs_itnim_s *itnim)
+{
+ bfa_trc(itnim->fcs, itnim->rport->pid);
+ switch (bfa_sm_to_state(itnim_sm_table, itnim->sm)) {
+ case BFA_ITNIM_ONLINE:
+ case BFA_ITNIM_INITIATIOR:
+ return BFA_STATUS_OK;
+
+ default:
+ return BFA_STATUS_NO_FCPIM_NEXUS;
+ }
+}
+
+/*
+ * BFA completion callback for bfa_itnim_online().
+ */
+void
+bfa_cb_itnim_online(void *cbarg)
+{
+ struct bfa_fcs_itnim_s *itnim = (struct bfa_fcs_itnim_s *) cbarg;
+
+ bfa_trc(itnim->fcs, itnim->rport->pwwn);
+ bfa_sm_send_event(itnim, BFA_FCS_ITNIM_SM_HCB_ONLINE);
+}
+
+/*
+ * BFA completion callback for bfa_itnim_offline().
+ */
+void
+bfa_cb_itnim_offline(void *cb_arg)
+{
+ struct bfa_fcs_itnim_s *itnim = (struct bfa_fcs_itnim_s *) cb_arg;
+
+ bfa_trc(itnim->fcs, itnim->rport->pwwn);
+ bfa_sm_send_event(itnim, BFA_FCS_ITNIM_SM_HCB_OFFLINE);
+}
+
+/*
+ * Mark the beginning of PATH TOV handling. IO completion callbacks
+ * are still pending.
+ */
+void
+bfa_cb_itnim_tov_begin(void *cb_arg)
+{
+ struct bfa_fcs_itnim_s *itnim = (struct bfa_fcs_itnim_s *) cb_arg;
+
+ bfa_trc(itnim->fcs, itnim->rport->pwwn);
+}
+
+/*
+ * Mark the end of PATH TOV handling. All pending IOs are already cleaned up.
+ */
+void
+bfa_cb_itnim_tov(void *cb_arg)
+{
+ struct bfa_fcs_itnim_s *itnim = (struct bfa_fcs_itnim_s *) cb_arg;
+ struct bfad_itnim_s *itnim_drv = itnim->itnim_drv;
+
+ bfa_trc(itnim->fcs, itnim->rport->pwwn);
+ itnim_drv->state = ITNIM_STATE_TIMEOUT;
+}
+
+/*
+ * BFA notification to FCS/driver for second level error recovery.
+ *
+ * Atleast one I/O request has timedout and target is unresponsive to
+ * repeated abort requests. Second level error recovery should be initiated
+ * by starting implicit logout and recovery procedures.
+ */
+void
+bfa_cb_itnim_sler(void *cb_arg)
+{
+ struct bfa_fcs_itnim_s *itnim = (struct bfa_fcs_itnim_s *) cb_arg;
+
+ itnim->stats.sler++;
+ bfa_trc(itnim->fcs, itnim->rport->pwwn);
+ bfa_sm_send_event(itnim->rport, RPSM_EVENT_LOGO_IMP);
+}
+
+struct bfa_fcs_itnim_s *
+bfa_fcs_itnim_lookup(struct bfa_fcs_lport_s *port, wwn_t rpwwn)
+{
+ struct bfa_fcs_rport_s *rport;
+ rport = bfa_fcs_rport_lookup(port, rpwwn);
+
+ if (!rport)
+ return NULL;
+
+ WARN_ON(rport->itnim == NULL);
+ return rport->itnim;
+}
+
+bfa_status_t
+bfa_fcs_itnim_attr_get(struct bfa_fcs_lport_s *port, wwn_t rpwwn,
+ struct bfa_itnim_attr_s *attr)
+{
+ struct bfa_fcs_itnim_s *itnim = NULL;
+
+ itnim = bfa_fcs_itnim_lookup(port, rpwwn);
+
+ if (itnim == NULL)
+ return BFA_STATUS_NO_FCPIM_NEXUS;
+
+ attr->state = bfa_sm_to_state(itnim_sm_table, itnim->sm);
+ attr->retry = itnim->seq_rec;
+ attr->rec_support = itnim->rec_support;
+ attr->conf_comp = itnim->conf_comp;
+ attr->task_retry_id = itnim->task_retry_id;
+ return BFA_STATUS_OK;
+}
+
+bfa_status_t
+bfa_fcs_itnim_stats_get(struct bfa_fcs_lport_s *port, wwn_t rpwwn,
+ struct bfa_itnim_stats_s *stats)
+{
+ struct bfa_fcs_itnim_s *itnim = NULL;
+
+ WARN_ON(port == NULL);
+
+ itnim = bfa_fcs_itnim_lookup(port, rpwwn);
+
+ if (itnim == NULL)
+ return BFA_STATUS_NO_FCPIM_NEXUS;
+
+ memcpy(stats, &itnim->stats, sizeof(struct bfa_itnim_stats_s));
+
+ return BFA_STATUS_OK;
+}
+
+bfa_status_t
+bfa_fcs_itnim_stats_clear(struct bfa_fcs_lport_s *port, wwn_t rpwwn)
+{
+ struct bfa_fcs_itnim_s *itnim = NULL;
+
+ WARN_ON(port == NULL);
+
+ itnim = bfa_fcs_itnim_lookup(port, rpwwn);
+
+ if (itnim == NULL)
+ return BFA_STATUS_NO_FCPIM_NEXUS;
+
+ memset(&itnim->stats, 0, sizeof(struct bfa_itnim_stats_s));
+ return BFA_STATUS_OK;
+}
+
+void
+bfa_fcs_fcpim_uf_recv(struct bfa_fcs_itnim_s *itnim,
+ struct fchs_s *fchs, u16 len)
+{
+ struct fc_els_cmd_s *els_cmd;
+
+ bfa_trc(itnim->fcs, fchs->type);
+
+ if (fchs->type != FC_TYPE_ELS)
+ return;
+
+ els_cmd = (struct fc_els_cmd_s *) (fchs + 1);
+
+ bfa_trc(itnim->fcs, els_cmd->els_code);
+
+ switch (els_cmd->els_code) {
+ case FC_ELS_PRLO:
+ bfa_fcs_rport_prlo(itnim->rport, fchs->ox_id);
+ break;
+
+ default:
+ WARN_ON(1);
+ }
+}
diff --git a/drivers/scsi/bfa/bfa_fcs_lport.c b/drivers/scsi/bfa/bfa_fcs_lport.c
new file mode 100644
index 000000000..ff75ef891
--- /dev/null
+++ b/drivers/scsi/bfa/bfa_fcs_lport.c
@@ -0,0 +1,6988 @@
+/*
+ * Copyright (c) 2005-2010 Brocade Communications Systems, Inc.
+ * All rights reserved
+ * www.brocade.com
+ *
+ * Linux driver for Brocade Fibre Channel Host Bus Adapter.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License (GPL) Version 2 as
+ * published by the Free Software Foundation
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ */
+
+#include "bfad_drv.h"
+#include "bfad_im.h"
+#include "bfa_fcs.h"
+#include "bfa_fcbuild.h"
+#include "bfa_fc.h"
+
+BFA_TRC_FILE(FCS, PORT);
+
+/*
+ * ALPA to LIXA bitmap mapping
+ *
+ * ALPA 0x00 (Word 0, Bit 30) is invalid for N_Ports. Also Word 0 Bit 31
+ * is for L_bit (login required) and is filled as ALPA 0x00 here.
+ */
+static const u8 loop_alpa_map[] = {
+ 0x00, 0x00, 0x01, 0x02, 0x04, 0x08, 0x0F, 0x10, /* Word 0 Bits 31..24 */
+ 0x17, 0x18, 0x1B, 0x1D, 0x1E, 0x1F, 0x23, 0x25, /* Word 0 Bits 23..16 */
+ 0x26, 0x27, 0x29, 0x2A, 0x2B, 0x2C, 0x2D, 0x2E, /* Word 0 Bits 15..08 */
+ 0x31, 0x32, 0x33, 0x34, 0x35, 0x36, 0x39, 0x3A, /* Word 0 Bits 07..00 */
+
+ 0x3C, 0x43, 0x45, 0x46, 0x47, 0x49, 0x4A, 0x4B, /* Word 1 Bits 31..24 */
+ 0x4C, 0x4D, 0x4E, 0x51, 0x52, 0x53, 0x54, 0x55, /* Word 1 Bits 23..16 */
+ 0x56, 0x59, 0x5A, 0x5C, 0x63, 0x65, 0x66, 0x67, /* Word 1 Bits 15..08 */
+ 0x69, 0x6A, 0x6B, 0x6C, 0x6D, 0x6E, 0x71, 0x72, /* Word 1 Bits 07..00 */
+
+ 0x73, 0x74, 0x75, 0x76, 0x79, 0x7A, 0x7C, 0x80, /* Word 2 Bits 31..24 */
+ 0x81, 0x82, 0x84, 0x88, 0x8F, 0x90, 0x97, 0x98, /* Word 2 Bits 23..16 */
+ 0x9B, 0x9D, 0x9E, 0x9F, 0xA3, 0xA5, 0xA6, 0xA7, /* Word 2 Bits 15..08 */
+ 0xA9, 0xAA, 0xAB, 0xAC, 0xAD, 0xAE, 0xB1, 0xB2, /* Word 2 Bits 07..00 */
+
+ 0xB3, 0xB4, 0xB5, 0xB6, 0xB9, 0xBA, 0xBC, 0xC3, /* Word 3 Bits 31..24 */
+ 0xC5, 0xC6, 0xC7, 0xC9, 0xCA, 0xCB, 0xCC, 0xCD, /* Word 3 Bits 23..16 */
+ 0xCE, 0xD1, 0xD2, 0xD3, 0xD4, 0xD5, 0xD6, 0xD9, /* Word 3 Bits 15..08 */
+ 0xDA, 0xDC, 0xE0, 0xE1, 0xE2, 0xE4, 0xE8, 0xEF, /* Word 3 Bits 07..00 */
+};
+
+static void bfa_fcs_lport_send_ls_rjt(struct bfa_fcs_lport_s *port,
+ struct fchs_s *rx_fchs, u8 reason_code,
+ u8 reason_code_expl);
+static void bfa_fcs_lport_plogi(struct bfa_fcs_lport_s *port,
+ struct fchs_s *rx_fchs, struct fc_logi_s *plogi);
+static void bfa_fcs_lport_online_actions(struct bfa_fcs_lport_s *port);
+static void bfa_fcs_lport_offline_actions(struct bfa_fcs_lport_s *port);
+static void bfa_fcs_lport_unknown_init(struct bfa_fcs_lport_s *port);
+static void bfa_fcs_lport_unknown_online(struct bfa_fcs_lport_s *port);
+static void bfa_fcs_lport_unknown_offline(struct bfa_fcs_lport_s *port);
+static void bfa_fcs_lport_deleted(struct bfa_fcs_lport_s *port);
+static void bfa_fcs_lport_echo(struct bfa_fcs_lport_s *port,
+ struct fchs_s *rx_fchs,
+ struct fc_echo_s *echo, u16 len);
+static void bfa_fcs_lport_rnid(struct bfa_fcs_lport_s *port,
+ struct fchs_s *rx_fchs,
+ struct fc_rnid_cmd_s *rnid, u16 len);
+static void bfa_fs_port_get_gen_topo_data(struct bfa_fcs_lport_s *port,
+ struct fc_rnid_general_topology_data_s *gen_topo_data);
+
+static void bfa_fcs_lport_fab_init(struct bfa_fcs_lport_s *port);
+static void bfa_fcs_lport_fab_online(struct bfa_fcs_lport_s *port);
+static void bfa_fcs_lport_fab_offline(struct bfa_fcs_lport_s *port);
+
+static void bfa_fcs_lport_n2n_init(struct bfa_fcs_lport_s *port);
+static void bfa_fcs_lport_n2n_online(struct bfa_fcs_lport_s *port);
+static void bfa_fcs_lport_n2n_offline(struct bfa_fcs_lport_s *port);
+
+static void bfa_fcs_lport_loop_init(struct bfa_fcs_lport_s *port);
+static void bfa_fcs_lport_loop_online(struct bfa_fcs_lport_s *port);
+static void bfa_fcs_lport_loop_offline(struct bfa_fcs_lport_s *port);
+
+static struct {
+ void (*init) (struct bfa_fcs_lport_s *port);
+ void (*online) (struct bfa_fcs_lport_s *port);
+ void (*offline) (struct bfa_fcs_lport_s *port);
+} __port_action[] = {
+ {
+ bfa_fcs_lport_unknown_init, bfa_fcs_lport_unknown_online,
+ bfa_fcs_lport_unknown_offline}, {
+ bfa_fcs_lport_fab_init, bfa_fcs_lport_fab_online,
+ bfa_fcs_lport_fab_offline}, {
+ bfa_fcs_lport_n2n_init, bfa_fcs_lport_n2n_online,
+ bfa_fcs_lport_n2n_offline}, {
+ bfa_fcs_lport_loop_init, bfa_fcs_lport_loop_online,
+ bfa_fcs_lport_loop_offline},
+ };
+
+/*
+ * fcs_port_sm FCS logical port state machine
+ */
+
+enum bfa_fcs_lport_event {
+ BFA_FCS_PORT_SM_CREATE = 1,
+ BFA_FCS_PORT_SM_ONLINE = 2,
+ BFA_FCS_PORT_SM_OFFLINE = 3,
+ BFA_FCS_PORT_SM_DELETE = 4,
+ BFA_FCS_PORT_SM_DELRPORT = 5,
+ BFA_FCS_PORT_SM_STOP = 6,
+};
+
+static void bfa_fcs_lport_sm_uninit(struct bfa_fcs_lport_s *port,
+ enum bfa_fcs_lport_event event);
+static void bfa_fcs_lport_sm_init(struct bfa_fcs_lport_s *port,
+ enum bfa_fcs_lport_event event);
+static void bfa_fcs_lport_sm_online(struct bfa_fcs_lport_s *port,
+ enum bfa_fcs_lport_event event);
+static void bfa_fcs_lport_sm_offline(struct bfa_fcs_lport_s *port,
+ enum bfa_fcs_lport_event event);
+static void bfa_fcs_lport_sm_deleting(struct bfa_fcs_lport_s *port,
+ enum bfa_fcs_lport_event event);
+static void bfa_fcs_lport_sm_stopping(struct bfa_fcs_lport_s *port,
+ enum bfa_fcs_lport_event event);
+
+static void
+bfa_fcs_lport_sm_uninit(
+ struct bfa_fcs_lport_s *port,
+ enum bfa_fcs_lport_event event)
+{
+ bfa_trc(port->fcs, port->port_cfg.pwwn);
+ bfa_trc(port->fcs, event);
+
+ switch (event) {
+ case BFA_FCS_PORT_SM_CREATE:
+ bfa_sm_set_state(port, bfa_fcs_lport_sm_init);
+ break;
+
+ default:
+ bfa_sm_fault(port->fcs, event);
+ }
+}
+
+static void
+bfa_fcs_lport_sm_init(struct bfa_fcs_lport_s *port,
+ enum bfa_fcs_lport_event event)
+{
+ bfa_trc(port->fcs, port->port_cfg.pwwn);
+ bfa_trc(port->fcs, event);
+
+ switch (event) {
+ case BFA_FCS_PORT_SM_ONLINE:
+ bfa_sm_set_state(port, bfa_fcs_lport_sm_online);
+ bfa_fcs_lport_online_actions(port);
+ break;
+
+ case BFA_FCS_PORT_SM_DELETE:
+ bfa_sm_set_state(port, bfa_fcs_lport_sm_uninit);
+ bfa_fcs_lport_deleted(port);
+ break;
+
+ case BFA_FCS_PORT_SM_STOP:
+ /* If vport - send completion call back */
+ if (port->vport)
+ bfa_fcs_vport_stop_comp(port->vport);
+ else
+ bfa_wc_down(&(port->fabric->stop_wc));
+ break;
+
+ case BFA_FCS_PORT_SM_OFFLINE:
+ break;
+
+ default:
+ bfa_sm_fault(port->fcs, event);
+ }
+}
+
+static void
+bfa_fcs_lport_sm_online(
+ struct bfa_fcs_lport_s *port,
+ enum bfa_fcs_lport_event event)
+{
+ struct bfa_fcs_rport_s *rport;
+ struct list_head *qe, *qen;
+
+ bfa_trc(port->fcs, port->port_cfg.pwwn);
+ bfa_trc(port->fcs, event);
+
+ switch (event) {
+ case BFA_FCS_PORT_SM_OFFLINE:
+ bfa_sm_set_state(port, bfa_fcs_lport_sm_offline);
+ bfa_fcs_lport_offline_actions(port);
+ break;
+
+ case BFA_FCS_PORT_SM_STOP:
+ __port_action[port->fabric->fab_type].offline(port);
+
+ if (port->num_rports == 0) {
+ bfa_sm_set_state(port, bfa_fcs_lport_sm_init);
+ /* If vport - send completion call back */
+ if (port->vport)
+ bfa_fcs_vport_stop_comp(port->vport);
+ else
+ bfa_wc_down(&(port->fabric->stop_wc));
+ } else {
+ bfa_sm_set_state(port, bfa_fcs_lport_sm_stopping);
+ list_for_each_safe(qe, qen, &port->rport_q) {
+ rport = (struct bfa_fcs_rport_s *) qe;
+ bfa_sm_send_event(rport, RPSM_EVENT_DELETE);
+ }
+ }
+ break;
+
+ case BFA_FCS_PORT_SM_DELETE:
+
+ __port_action[port->fabric->fab_type].offline(port);
+
+ if (port->num_rports == 0) {
+ bfa_sm_set_state(port, bfa_fcs_lport_sm_uninit);
+ bfa_fcs_lport_deleted(port);
+ } else {
+ bfa_sm_set_state(port, bfa_fcs_lport_sm_deleting);
+ list_for_each_safe(qe, qen, &port->rport_q) {
+ rport = (struct bfa_fcs_rport_s *) qe;
+ bfa_sm_send_event(rport, RPSM_EVENT_DELETE);
+ }
+ }
+ break;
+
+ case BFA_FCS_PORT_SM_DELRPORT:
+ break;
+
+ default:
+ bfa_sm_fault(port->fcs, event);
+ }
+}
+
+static void
+bfa_fcs_lport_sm_offline(
+ struct bfa_fcs_lport_s *port,
+ enum bfa_fcs_lport_event event)
+{
+ struct bfa_fcs_rport_s *rport;
+ struct list_head *qe, *qen;
+
+ bfa_trc(port->fcs, port->port_cfg.pwwn);
+ bfa_trc(port->fcs, event);
+
+ switch (event) {
+ case BFA_FCS_PORT_SM_ONLINE:
+ bfa_sm_set_state(port, bfa_fcs_lport_sm_online);
+ bfa_fcs_lport_online_actions(port);
+ break;
+
+ case BFA_FCS_PORT_SM_STOP:
+ if (port->num_rports == 0) {
+ bfa_sm_set_state(port, bfa_fcs_lport_sm_init);
+ /* If vport - send completion call back */
+ if (port->vport)
+ bfa_fcs_vport_stop_comp(port->vport);
+ else
+ bfa_wc_down(&(port->fabric->stop_wc));
+ } else {
+ bfa_sm_set_state(port, bfa_fcs_lport_sm_stopping);
+ list_for_each_safe(qe, qen, &port->rport_q) {
+ rport = (struct bfa_fcs_rport_s *) qe;
+ bfa_sm_send_event(rport, RPSM_EVENT_DELETE);
+ }
+ }
+ break;
+
+ case BFA_FCS_PORT_SM_DELETE:
+ if (port->num_rports == 0) {
+ bfa_sm_set_state(port, bfa_fcs_lport_sm_uninit);
+ bfa_fcs_lport_deleted(port);
+ } else {
+ bfa_sm_set_state(port, bfa_fcs_lport_sm_deleting);
+ list_for_each_safe(qe, qen, &port->rport_q) {
+ rport = (struct bfa_fcs_rport_s *) qe;
+ bfa_sm_send_event(rport, RPSM_EVENT_DELETE);
+ }
+ }
+ break;
+
+ case BFA_FCS_PORT_SM_DELRPORT:
+ case BFA_FCS_PORT_SM_OFFLINE:
+ break;
+
+ default:
+ bfa_sm_fault(port->fcs, event);
+ }
+}
+
+static void
+bfa_fcs_lport_sm_stopping(struct bfa_fcs_lport_s *port,
+ enum bfa_fcs_lport_event event)
+{
+ bfa_trc(port->fcs, port->port_cfg.pwwn);
+ bfa_trc(port->fcs, event);
+
+ switch (event) {
+ case BFA_FCS_PORT_SM_DELRPORT:
+ if (port->num_rports == 0) {
+ bfa_sm_set_state(port, bfa_fcs_lport_sm_init);
+ /* If vport - send completion call back */
+ if (port->vport)
+ bfa_fcs_vport_stop_comp(port->vport);
+ else
+ bfa_wc_down(&(port->fabric->stop_wc));
+ }
+ break;
+
+ default:
+ bfa_sm_fault(port->fcs, event);
+ }
+}
+
+static void
+bfa_fcs_lport_sm_deleting(
+ struct bfa_fcs_lport_s *port,
+ enum bfa_fcs_lport_event event)
+{
+ bfa_trc(port->fcs, port->port_cfg.pwwn);
+ bfa_trc(port->fcs, event);
+
+ switch (event) {
+ case BFA_FCS_PORT_SM_DELRPORT:
+ if (port->num_rports == 0) {
+ bfa_sm_set_state(port, bfa_fcs_lport_sm_uninit);
+ bfa_fcs_lport_deleted(port);
+ }
+ break;
+
+ default:
+ bfa_sm_fault(port->fcs, event);
+ }
+}
+
+/*
+ * fcs_port_pvt
+ */
+
+/*
+ * Send AEN notification
+ */
+static void
+bfa_fcs_lport_aen_post(struct bfa_fcs_lport_s *port,
+ enum bfa_lport_aen_event event)
+{
+ struct bfad_s *bfad = (struct bfad_s *)port->fabric->fcs->bfad;
+ struct bfa_aen_entry_s *aen_entry;
+
+ bfad_get_aen_entry(bfad, aen_entry);
+ if (!aen_entry)
+ return;
+
+ aen_entry->aen_data.lport.vf_id = port->fabric->vf_id;
+ aen_entry->aen_data.lport.roles = port->port_cfg.roles;
+ aen_entry->aen_data.lport.ppwwn = bfa_fcs_lport_get_pwwn(
+ bfa_fcs_get_base_port(port->fcs));
+ aen_entry->aen_data.lport.lpwwn = bfa_fcs_lport_get_pwwn(port);
+
+ /* Send the AEN notification */
+ bfad_im_post_vendor_event(aen_entry, bfad, ++port->fcs->fcs_aen_seq,
+ BFA_AEN_CAT_LPORT, event);
+}
+
+/*
+ * Send a LS reject
+ */
+static void
+bfa_fcs_lport_send_ls_rjt(struct bfa_fcs_lport_s *port, struct fchs_s *rx_fchs,
+ u8 reason_code, u8 reason_code_expl)
+{
+ struct fchs_s fchs;
+ struct bfa_fcxp_s *fcxp;
+ struct bfa_rport_s *bfa_rport = NULL;
+ int len;
+
+ bfa_trc(port->fcs, rx_fchs->d_id);
+ bfa_trc(port->fcs, rx_fchs->s_id);
+
+ fcxp = bfa_fcs_fcxp_alloc(port->fcs, BFA_FALSE);
+ if (!fcxp)
+ return;
+
+ len = fc_ls_rjt_build(&fchs, bfa_fcxp_get_reqbuf(fcxp),
+ rx_fchs->s_id, bfa_fcs_lport_get_fcid(port),
+ rx_fchs->ox_id, reason_code, reason_code_expl);
+
+ bfa_fcxp_send(fcxp, bfa_rport, port->fabric->vf_id, port->lp_tag,
+ BFA_FALSE, FC_CLASS_3, len, &fchs, NULL, NULL,
+ FC_MAX_PDUSZ, 0);
+}
+
+/*
+ * Send a FCCT Reject
+ */
+static void
+bfa_fcs_lport_send_fcgs_rjt(struct bfa_fcs_lport_s *port,
+ struct fchs_s *rx_fchs, u8 reason_code, u8 reason_code_expl)
+{
+ struct fchs_s fchs;
+ struct bfa_fcxp_s *fcxp;
+ struct bfa_rport_s *bfa_rport = NULL;
+ int len;
+ struct ct_hdr_s *rx_cthdr = (struct ct_hdr_s *)(rx_fchs + 1);
+ struct ct_hdr_s *ct_hdr;
+
+ bfa_trc(port->fcs, rx_fchs->d_id);
+ bfa_trc(port->fcs, rx_fchs->s_id);
+
+ fcxp = bfa_fcs_fcxp_alloc(port->fcs, BFA_FALSE);
+ if (!fcxp)
+ return;
+
+ ct_hdr = bfa_fcxp_get_reqbuf(fcxp);
+ ct_hdr->gs_type = rx_cthdr->gs_type;
+ ct_hdr->gs_sub_type = rx_cthdr->gs_sub_type;
+
+ len = fc_gs_rjt_build(&fchs, ct_hdr, rx_fchs->s_id,
+ bfa_fcs_lport_get_fcid(port),
+ rx_fchs->ox_id, reason_code, reason_code_expl);
+
+ bfa_fcxp_send(fcxp, bfa_rport, port->fabric->vf_id, port->lp_tag,
+ BFA_FALSE, FC_CLASS_3, len, &fchs, NULL, NULL,
+ FC_MAX_PDUSZ, 0);
+}
+
+/*
+ * Process incoming plogi from a remote port.
+ */
+static void
+bfa_fcs_lport_plogi(struct bfa_fcs_lport_s *port,
+ struct fchs_s *rx_fchs, struct fc_logi_s *plogi)
+{
+ struct bfa_fcs_rport_s *rport;
+
+ bfa_trc(port->fcs, rx_fchs->d_id);
+ bfa_trc(port->fcs, rx_fchs->s_id);
+
+ /*
+ * If min cfg mode is enabled, drop any incoming PLOGIs
+ */
+ if (__fcs_min_cfg(port->fcs)) {
+ bfa_trc(port->fcs, rx_fchs->s_id);
+ return;
+ }
+
+ if (fc_plogi_parse(rx_fchs) != FC_PARSE_OK) {
+ bfa_trc(port->fcs, rx_fchs->s_id);
+ /*
+ * send a LS reject
+ */
+ bfa_fcs_lport_send_ls_rjt(port, rx_fchs,
+ FC_LS_RJT_RSN_PROTOCOL_ERROR,
+ FC_LS_RJT_EXP_SPARMS_ERR_OPTIONS);
+ return;
+ }
+
+ /*
+ * Direct Attach P2P mode : verify address assigned by the r-port.
+ */
+ if ((!bfa_fcs_fabric_is_switched(port->fabric)) &&
+ (memcmp((void *)&bfa_fcs_lport_get_pwwn(port),
+ (void *)&plogi->port_name, sizeof(wwn_t)) < 0)) {
+ if (BFA_FCS_PID_IS_WKA(rx_fchs->d_id)) {
+ /* Address assigned to us cannot be a WKA */
+ bfa_fcs_lport_send_ls_rjt(port, rx_fchs,
+ FC_LS_RJT_RSN_PROTOCOL_ERROR,
+ FC_LS_RJT_EXP_INVALID_NPORT_ID);
+ return;
+ }
+ port->pid = rx_fchs->d_id;
+ bfa_lps_set_n2n_pid(port->fabric->lps, rx_fchs->d_id);
+ }
+
+ /*
+ * First, check if we know the device by pwwn.
+ */
+ rport = bfa_fcs_lport_get_rport_by_pwwn(port, plogi->port_name);
+ if (rport) {
+ /*
+ * Direct Attach P2P mode : handle address assigned by r-port.
+ */
+ if ((!bfa_fcs_fabric_is_switched(port->fabric)) &&
+ (memcmp((void *)&bfa_fcs_lport_get_pwwn(port),
+ (void *)&plogi->port_name, sizeof(wwn_t)) < 0)) {
+ port->pid = rx_fchs->d_id;
+ bfa_lps_set_n2n_pid(port->fabric->lps, rx_fchs->d_id);
+ rport->pid = rx_fchs->s_id;
+ }
+ bfa_fcs_rport_plogi(rport, rx_fchs, plogi);
+ return;
+ }
+
+ /*
+ * Next, lookup rport by PID.
+ */
+ rport = bfa_fcs_lport_get_rport_by_pid(port, rx_fchs->s_id);
+ if (!rport) {
+ /*
+ * Inbound PLOGI from a new device.
+ */
+ bfa_fcs_rport_plogi_create(port, rx_fchs, plogi);
+ return;
+ }
+
+ /*
+ * Rport is known only by PID.
+ */
+ if (rport->pwwn) {
+ /*
+ * This is a different device with the same pid. Old device
+ * disappeared. Send implicit LOGO to old device.
+ */
+ WARN_ON(rport->pwwn == plogi->port_name);
+ bfa_sm_send_event(rport, RPSM_EVENT_LOGO_IMP);
+
+ /*
+ * Inbound PLOGI from a new device (with old PID).
+ */
+ bfa_fcs_rport_plogi_create(port, rx_fchs, plogi);
+ return;
+ }
+
+ /*
+ * PLOGI crossing each other.
+ */
+ WARN_ON(rport->pwwn != WWN_NULL);
+ bfa_fcs_rport_plogi(rport, rx_fchs, plogi);
+}
+
+/*
+ * Process incoming ECHO.
+ * Since it does not require a login, it is processed here.
+ */
+static void
+bfa_fcs_lport_echo(struct bfa_fcs_lport_s *port, struct fchs_s *rx_fchs,
+ struct fc_echo_s *echo, u16 rx_len)
+{
+ struct fchs_s fchs;
+ struct bfa_fcxp_s *fcxp;
+ struct bfa_rport_s *bfa_rport = NULL;
+ int len, pyld_len;
+
+ bfa_trc(port->fcs, rx_fchs->s_id);
+ bfa_trc(port->fcs, rx_fchs->d_id);
+
+ fcxp = bfa_fcs_fcxp_alloc(port->fcs, BFA_FALSE);
+ if (!fcxp)
+ return;
+
+ len = fc_ls_acc_build(&fchs, bfa_fcxp_get_reqbuf(fcxp),
+ rx_fchs->s_id, bfa_fcs_lport_get_fcid(port),
+ rx_fchs->ox_id);
+
+ /*
+ * Copy the payload (if any) from the echo frame
+ */
+ pyld_len = rx_len - sizeof(struct fchs_s);
+ bfa_trc(port->fcs, rx_len);
+ bfa_trc(port->fcs, pyld_len);
+
+ if (pyld_len > len)
+ memcpy(((u8 *) bfa_fcxp_get_reqbuf(fcxp)) +
+ sizeof(struct fc_echo_s), (echo + 1),
+ (pyld_len - sizeof(struct fc_echo_s)));
+
+ bfa_fcxp_send(fcxp, bfa_rport, port->fabric->vf_id, port->lp_tag,
+ BFA_FALSE, FC_CLASS_3, pyld_len, &fchs, NULL, NULL,
+ FC_MAX_PDUSZ, 0);
+}
+
+/*
+ * Process incoming RNID.
+ * Since it does not require a login, it is processed here.
+ */
+static void
+bfa_fcs_lport_rnid(struct bfa_fcs_lport_s *port, struct fchs_s *rx_fchs,
+ struct fc_rnid_cmd_s *rnid, u16 rx_len)
+{
+ struct fc_rnid_common_id_data_s common_id_data;
+ struct fc_rnid_general_topology_data_s gen_topo_data;
+ struct fchs_s fchs;
+ struct bfa_fcxp_s *fcxp;
+ struct bfa_rport_s *bfa_rport = NULL;
+ u16 len;
+ u32 data_format;
+
+ bfa_trc(port->fcs, rx_fchs->s_id);
+ bfa_trc(port->fcs, rx_fchs->d_id);
+ bfa_trc(port->fcs, rx_len);
+
+ fcxp = bfa_fcs_fcxp_alloc(port->fcs, BFA_FALSE);
+ if (!fcxp)
+ return;
+
+ /*
+ * Check Node Indentification Data Format
+ * We only support General Topology Discovery Format.
+ * For any other requested Data Formats, we return Common Node Id Data
+ * only, as per FC-LS.
+ */
+ bfa_trc(port->fcs, rnid->node_id_data_format);
+ if (rnid->node_id_data_format == RNID_NODEID_DATA_FORMAT_DISCOVERY) {
+ data_format = RNID_NODEID_DATA_FORMAT_DISCOVERY;
+ /*
+ * Get General topology data for this port
+ */
+ bfa_fs_port_get_gen_topo_data(port, &gen_topo_data);
+ } else {
+ data_format = RNID_NODEID_DATA_FORMAT_COMMON;
+ }
+
+ /*
+ * Copy the Node Id Info
+ */
+ common_id_data.port_name = bfa_fcs_lport_get_pwwn(port);
+ common_id_data.node_name = bfa_fcs_lport_get_nwwn(port);
+
+ len = fc_rnid_acc_build(&fchs, bfa_fcxp_get_reqbuf(fcxp),
+ rx_fchs->s_id, bfa_fcs_lport_get_fcid(port),
+ rx_fchs->ox_id, data_format, &common_id_data,
+ &gen_topo_data);
+
+ bfa_fcxp_send(fcxp, bfa_rport, port->fabric->vf_id, port->lp_tag,
+ BFA_FALSE, FC_CLASS_3, len, &fchs, NULL, NULL,
+ FC_MAX_PDUSZ, 0);
+}
+
+/*
+ * Fill out General Topolpgy Discovery Data for RNID ELS.
+ */
+static void
+bfa_fs_port_get_gen_topo_data(struct bfa_fcs_lport_s *port,
+ struct fc_rnid_general_topology_data_s *gen_topo_data)
+{
+ memset(gen_topo_data, 0,
+ sizeof(struct fc_rnid_general_topology_data_s));
+
+ gen_topo_data->asso_type = cpu_to_be32(RNID_ASSOCIATED_TYPE_HOST);
+ gen_topo_data->phy_port_num = 0; /* @todo */
+ gen_topo_data->num_attached_nodes = cpu_to_be32(1);
+}
+
+static void
+bfa_fcs_lport_online_actions(struct bfa_fcs_lport_s *port)
+{
+ struct bfad_s *bfad = (struct bfad_s *)port->fcs->bfad;
+ char lpwwn_buf[BFA_STRING_32];
+
+ bfa_trc(port->fcs, port->fabric->oper_type);
+
+ __port_action[port->fabric->fab_type].init(port);
+ __port_action[port->fabric->fab_type].online(port);
+
+ wwn2str(lpwwn_buf, bfa_fcs_lport_get_pwwn(port));
+ BFA_LOG(KERN_WARNING, bfad, bfa_log_level,
+ "Logical port online: WWN = %s Role = %s\n",
+ lpwwn_buf, "Initiator");
+ bfa_fcs_lport_aen_post(port, BFA_LPORT_AEN_ONLINE);
+
+ bfad->bfad_flags |= BFAD_PORT_ONLINE;
+}
+
+static void
+bfa_fcs_lport_offline_actions(struct bfa_fcs_lport_s *port)
+{
+ struct list_head *qe, *qen;
+ struct bfa_fcs_rport_s *rport;
+ struct bfad_s *bfad = (struct bfad_s *)port->fcs->bfad;
+ char lpwwn_buf[BFA_STRING_32];
+
+ bfa_trc(port->fcs, port->fabric->oper_type);
+
+ __port_action[port->fabric->fab_type].offline(port);
+
+ wwn2str(lpwwn_buf, bfa_fcs_lport_get_pwwn(port));
+ if (bfa_sm_cmp_state(port->fabric,
+ bfa_fcs_fabric_sm_online) == BFA_TRUE) {
+ BFA_LOG(KERN_WARNING, bfad, bfa_log_level,
+ "Logical port lost fabric connectivity: WWN = %s Role = %s\n",
+ lpwwn_buf, "Initiator");
+ bfa_fcs_lport_aen_post(port, BFA_LPORT_AEN_DISCONNECT);
+ } else {
+ BFA_LOG(KERN_WARNING, bfad, bfa_log_level,
+ "Logical port taken offline: WWN = %s Role = %s\n",
+ lpwwn_buf, "Initiator");
+ bfa_fcs_lport_aen_post(port, BFA_LPORT_AEN_OFFLINE);
+ }
+
+ list_for_each_safe(qe, qen, &port->rport_q) {
+ rport = (struct bfa_fcs_rport_s *) qe;
+ bfa_sm_send_event(rport, RPSM_EVENT_LOGO_IMP);
+ }
+}
+
+static void
+bfa_fcs_lport_unknown_init(struct bfa_fcs_lport_s *port)
+{
+ WARN_ON(1);
+}
+
+static void
+bfa_fcs_lport_unknown_online(struct bfa_fcs_lport_s *port)
+{
+ WARN_ON(1);
+}
+
+static void
+bfa_fcs_lport_unknown_offline(struct bfa_fcs_lport_s *port)
+{
+ WARN_ON(1);
+}
+
+static void
+bfa_fcs_lport_abts_acc(struct bfa_fcs_lport_s *port, struct fchs_s *rx_fchs)
+{
+ struct fchs_s fchs;
+ struct bfa_fcxp_s *fcxp;
+ int len;
+
+ bfa_trc(port->fcs, rx_fchs->d_id);
+ bfa_trc(port->fcs, rx_fchs->s_id);
+
+ fcxp = bfa_fcs_fcxp_alloc(port->fcs, BFA_FALSE);
+ if (!fcxp)
+ return;
+
+ len = fc_ba_acc_build(&fchs, bfa_fcxp_get_reqbuf(fcxp),
+ rx_fchs->s_id, bfa_fcs_lport_get_fcid(port),
+ rx_fchs->ox_id, 0);
+
+ bfa_fcxp_send(fcxp, NULL, port->fabric->vf_id, port->lp_tag,
+ BFA_FALSE, FC_CLASS_3, len, &fchs, NULL, NULL,
+ FC_MAX_PDUSZ, 0);
+}
+static void
+bfa_fcs_lport_deleted(struct bfa_fcs_lport_s *port)
+{
+ struct bfad_s *bfad = (struct bfad_s *)port->fcs->bfad;
+ char lpwwn_buf[BFA_STRING_32];
+
+ wwn2str(lpwwn_buf, bfa_fcs_lport_get_pwwn(port));
+ BFA_LOG(KERN_INFO, bfad, bfa_log_level,
+ "Logical port deleted: WWN = %s Role = %s\n",
+ lpwwn_buf, "Initiator");
+ bfa_fcs_lport_aen_post(port, BFA_LPORT_AEN_DELETE);
+
+ /* Base port will be deleted by the OS driver */
+ if (port->vport)
+ bfa_fcs_vport_delete_comp(port->vport);
+ else
+ bfa_wc_down(&port->fabric->wc);
+}
+
+
+/*
+ * Unsolicited frame receive handling.
+ */
+void
+bfa_fcs_lport_uf_recv(struct bfa_fcs_lport_s *lport,
+ struct fchs_s *fchs, u16 len)
+{
+ u32 pid = fchs->s_id;
+ struct bfa_fcs_rport_s *rport = NULL;
+ struct fc_els_cmd_s *els_cmd = (struct fc_els_cmd_s *) (fchs + 1);
+
+ bfa_stats(lport, uf_recvs);
+ bfa_trc(lport->fcs, fchs->type);
+
+ if (!bfa_fcs_lport_is_online(lport)) {
+ /*
+ * In direct attach topology, it is possible to get a PLOGI
+ * before the lport is online due to port feature
+ * (QoS/Trunk/FEC/CR), so send a rjt
+ */
+ if ((fchs->type == FC_TYPE_ELS) &&
+ (els_cmd->els_code == FC_ELS_PLOGI)) {
+ bfa_fcs_lport_send_ls_rjt(lport, fchs,
+ FC_LS_RJT_RSN_UNABLE_TO_PERF_CMD,
+ FC_LS_RJT_EXP_NO_ADDL_INFO);
+ bfa_stats(lport, plogi_rcvd);
+ } else
+ bfa_stats(lport, uf_recv_drops);
+
+ return;
+ }
+
+ /*
+ * First, handle ELSs that donot require a login.
+ */
+ /*
+ * Handle PLOGI first
+ */
+ if ((fchs->type == FC_TYPE_ELS) &&
+ (els_cmd->els_code == FC_ELS_PLOGI)) {
+ bfa_fcs_lport_plogi(lport, fchs, (struct fc_logi_s *) els_cmd);
+ return;
+ }
+
+ /*
+ * Handle ECHO separately.
+ */
+ if ((fchs->type == FC_TYPE_ELS) && (els_cmd->els_code == FC_ELS_ECHO)) {
+ bfa_fcs_lport_echo(lport, fchs,
+ (struct fc_echo_s *)els_cmd, len);
+ return;
+ }
+
+ /*
+ * Handle RNID separately.
+ */
+ if ((fchs->type == FC_TYPE_ELS) && (els_cmd->els_code == FC_ELS_RNID)) {
+ bfa_fcs_lport_rnid(lport, fchs,
+ (struct fc_rnid_cmd_s *) els_cmd, len);
+ return;
+ }
+
+ if (fchs->type == FC_TYPE_BLS) {
+ if ((fchs->routing == FC_RTG_BASIC_LINK) &&
+ (fchs->cat_info == FC_CAT_ABTS))
+ bfa_fcs_lport_abts_acc(lport, fchs);
+ return;
+ }
+
+ if (fchs->type == FC_TYPE_SERVICES) {
+ /*
+ * Unhandled FC-GS frames. Send a FC-CT Reject
+ */
+ bfa_fcs_lport_send_fcgs_rjt(lport, fchs, CT_RSN_NOT_SUPP,
+ CT_NS_EXP_NOADDITIONAL);
+ return;
+ }
+
+ /*
+ * look for a matching remote port ID
+ */
+ rport = bfa_fcs_lport_get_rport_by_pid(lport, pid);
+ if (rport) {
+ bfa_trc(rport->fcs, fchs->s_id);
+ bfa_trc(rport->fcs, fchs->d_id);
+ bfa_trc(rport->fcs, fchs->type);
+
+ bfa_fcs_rport_uf_recv(rport, fchs, len);
+ return;
+ }
+
+ /*
+ * Only handles ELS frames for now.
+ */
+ if (fchs->type != FC_TYPE_ELS) {
+ bfa_trc(lport->fcs, fchs->s_id);
+ bfa_trc(lport->fcs, fchs->d_id);
+ /* ignore type FC_TYPE_FC_FSS */
+ if (fchs->type != FC_TYPE_FC_FSS)
+ bfa_sm_fault(lport->fcs, fchs->type);
+ return;
+ }
+
+ bfa_trc(lport->fcs, els_cmd->els_code);
+ if (els_cmd->els_code == FC_ELS_RSCN) {
+ bfa_fcs_lport_scn_process_rscn(lport, fchs, len);
+ return;
+ }
+
+ if (els_cmd->els_code == FC_ELS_LOGO) {
+ /*
+ * @todo Handle LOGO frames received.
+ */
+ return;
+ }
+
+ if (els_cmd->els_code == FC_ELS_PRLI) {
+ /*
+ * @todo Handle PRLI frames received.
+ */
+ return;
+ }
+
+ /*
+ * Unhandled ELS frames. Send a LS_RJT.
+ */
+ bfa_fcs_lport_send_ls_rjt(lport, fchs, FC_LS_RJT_RSN_CMD_NOT_SUPP,
+ FC_LS_RJT_EXP_NO_ADDL_INFO);
+
+}
+
+/*
+ * PID based Lookup for a R-Port in the Port R-Port Queue
+ */
+struct bfa_fcs_rport_s *
+bfa_fcs_lport_get_rport_by_pid(struct bfa_fcs_lport_s *port, u32 pid)
+{
+ struct bfa_fcs_rport_s *rport;
+ struct list_head *qe;
+
+ list_for_each(qe, &port->rport_q) {
+ rport = (struct bfa_fcs_rport_s *) qe;
+ if (rport->pid == pid)
+ return rport;
+ }
+
+ bfa_trc(port->fcs, pid);
+ return NULL;
+}
+
+/*
+ * OLD_PID based Lookup for a R-Port in the Port R-Port Queue
+ */
+struct bfa_fcs_rport_s *
+bfa_fcs_lport_get_rport_by_old_pid(struct bfa_fcs_lport_s *port, u32 pid)
+{
+ struct bfa_fcs_rport_s *rport;
+ struct list_head *qe;
+
+ list_for_each(qe, &port->rport_q) {
+ rport = (struct bfa_fcs_rport_s *) qe;
+ if (rport->old_pid == pid)
+ return rport;
+ }
+
+ bfa_trc(port->fcs, pid);
+ return NULL;
+}
+
+/*
+ * PWWN based Lookup for a R-Port in the Port R-Port Queue
+ */
+struct bfa_fcs_rport_s *
+bfa_fcs_lport_get_rport_by_pwwn(struct bfa_fcs_lport_s *port, wwn_t pwwn)
+{
+ struct bfa_fcs_rport_s *rport;
+ struct list_head *qe;
+
+ list_for_each(qe, &port->rport_q) {
+ rport = (struct bfa_fcs_rport_s *) qe;
+ if (wwn_is_equal(rport->pwwn, pwwn))
+ return rport;
+ }
+
+ bfa_trc(port->fcs, pwwn);
+ return NULL;
+}
+
+/*
+ * NWWN based Lookup for a R-Port in the Port R-Port Queue
+ */
+struct bfa_fcs_rport_s *
+bfa_fcs_lport_get_rport_by_nwwn(struct bfa_fcs_lport_s *port, wwn_t nwwn)
+{
+ struct bfa_fcs_rport_s *rport;
+ struct list_head *qe;
+
+ list_for_each(qe, &port->rport_q) {
+ rport = (struct bfa_fcs_rport_s *) qe;
+ if (wwn_is_equal(rport->nwwn, nwwn))
+ return rport;
+ }
+
+ bfa_trc(port->fcs, nwwn);
+ return NULL;
+}
+
+/*
+ * PWWN & PID based Lookup for a R-Port in the Port R-Port Queue
+ */
+struct bfa_fcs_rport_s *
+bfa_fcs_lport_get_rport_by_qualifier(struct bfa_fcs_lport_s *port,
+ wwn_t pwwn, u32 pid)
+{
+ struct bfa_fcs_rport_s *rport;
+ struct list_head *qe;
+
+ list_for_each(qe, &port->rport_q) {
+ rport = (struct bfa_fcs_rport_s *) qe;
+ if (wwn_is_equal(rport->pwwn, pwwn) && rport->pid == pid)
+ return rport;
+ }
+
+ bfa_trc(port->fcs, pwwn);
+ return NULL;
+}
+
+/*
+ * Called by rport module when new rports are discovered.
+ */
+void
+bfa_fcs_lport_add_rport(
+ struct bfa_fcs_lport_s *port,
+ struct bfa_fcs_rport_s *rport)
+{
+ list_add_tail(&rport->qe, &port->rport_q);
+ port->num_rports++;
+}
+
+/*
+ * Called by rport module to when rports are deleted.
+ */
+void
+bfa_fcs_lport_del_rport(
+ struct bfa_fcs_lport_s *port,
+ struct bfa_fcs_rport_s *rport)
+{
+ WARN_ON(!bfa_q_is_on_q(&port->rport_q, rport));
+ list_del(&rport->qe);
+ port->num_rports--;
+
+ bfa_sm_send_event(port, BFA_FCS_PORT_SM_DELRPORT);
+}
+
+/*
+ * Called by fabric for base port when fabric login is complete.
+ * Called by vport for virtual ports when FDISC is complete.
+ */
+void
+bfa_fcs_lport_online(struct bfa_fcs_lport_s *port)
+{
+ bfa_sm_send_event(port, BFA_FCS_PORT_SM_ONLINE);
+}
+
+/*
+ * Called by fabric for base port when fabric goes offline.
+ * Called by vport for virtual ports when virtual port becomes offline.
+ */
+void
+bfa_fcs_lport_offline(struct bfa_fcs_lport_s *port)
+{
+ bfa_sm_send_event(port, BFA_FCS_PORT_SM_OFFLINE);
+}
+
+/*
+ * Called by fabric for base port and by vport for virtual ports
+ * when target mode driver is unloaded.
+ */
+void
+bfa_fcs_lport_stop(struct bfa_fcs_lport_s *port)
+{
+ bfa_sm_send_event(port, BFA_FCS_PORT_SM_STOP);
+}
+
+/*
+ * Called by fabric to delete base lport and associated resources.
+ *
+ * Called by vport to delete lport and associated resources. Should call
+ * bfa_fcs_vport_delete_comp() for vports on completion.
+ */
+void
+bfa_fcs_lport_delete(struct bfa_fcs_lport_s *port)
+{
+ bfa_sm_send_event(port, BFA_FCS_PORT_SM_DELETE);
+}
+
+/*
+ * Return TRUE if port is online, else return FALSE
+ */
+bfa_boolean_t
+bfa_fcs_lport_is_online(struct bfa_fcs_lport_s *port)
+{
+ return bfa_sm_cmp_state(port, bfa_fcs_lport_sm_online);
+}
+
+/*
+ * Attach time initialization of logical ports.
+ */
+void
+bfa_fcs_lport_attach(struct bfa_fcs_lport_s *lport, struct bfa_fcs_s *fcs,
+ u16 vf_id, struct bfa_fcs_vport_s *vport)
+{
+ lport->fcs = fcs;
+ lport->fabric = bfa_fcs_vf_lookup(fcs, vf_id);
+ lport->vport = vport;
+ lport->lp_tag = (vport) ? vport->lps->bfa_tag :
+ lport->fabric->lps->bfa_tag;
+
+ INIT_LIST_HEAD(&lport->rport_q);
+ lport->num_rports = 0;
+}
+
+/*
+ * Logical port initialization of base or virtual port.
+ * Called by fabric for base port or by vport for virtual ports.
+ */
+
+void
+bfa_fcs_lport_init(struct bfa_fcs_lport_s *lport,
+ struct bfa_lport_cfg_s *port_cfg)
+{
+ struct bfa_fcs_vport_s *vport = lport->vport;
+ struct bfad_s *bfad = (struct bfad_s *)lport->fcs->bfad;
+ char lpwwn_buf[BFA_STRING_32];
+
+ lport->port_cfg = *port_cfg;
+
+ lport->bfad_port = bfa_fcb_lport_new(lport->fcs->bfad, lport,
+ lport->port_cfg.roles,
+ lport->fabric->vf_drv,
+ vport ? vport->vport_drv : NULL);
+
+ wwn2str(lpwwn_buf, bfa_fcs_lport_get_pwwn(lport));
+ BFA_LOG(KERN_INFO, bfad, bfa_log_level,
+ "New logical port created: WWN = %s Role = %s\n",
+ lpwwn_buf, "Initiator");
+ bfa_fcs_lport_aen_post(lport, BFA_LPORT_AEN_NEW);
+
+ bfa_sm_set_state(lport, bfa_fcs_lport_sm_uninit);
+ bfa_sm_send_event(lport, BFA_FCS_PORT_SM_CREATE);
+}
+
+void
+bfa_fcs_lport_set_symname(struct bfa_fcs_lport_s *port,
+ char *symname)
+{
+ strcpy(port->port_cfg.sym_name.symname, symname);
+
+ if (bfa_sm_cmp_state(port, bfa_fcs_lport_sm_online))
+ bfa_fcs_lport_ns_util_send_rspn_id(
+ BFA_FCS_GET_NS_FROM_PORT(port), NULL);
+}
+
+/*
+ * fcs_lport_api
+ */
+
+void
+bfa_fcs_lport_get_attr(
+ struct bfa_fcs_lport_s *port,
+ struct bfa_lport_attr_s *port_attr)
+{
+ if (bfa_sm_cmp_state(port, bfa_fcs_lport_sm_online))
+ port_attr->pid = port->pid;
+ else
+ port_attr->pid = 0;
+
+ port_attr->port_cfg = port->port_cfg;
+
+ if (port->fabric) {
+ port_attr->port_type = port->fabric->oper_type;
+ port_attr->loopback = bfa_sm_cmp_state(port->fabric,
+ bfa_fcs_fabric_sm_loopback);
+ port_attr->authfail =
+ bfa_sm_cmp_state(port->fabric,
+ bfa_fcs_fabric_sm_auth_failed);
+ port_attr->fabric_name = bfa_fcs_lport_get_fabric_name(port);
+ memcpy(port_attr->fabric_ip_addr,
+ bfa_fcs_lport_get_fabric_ipaddr(port),
+ BFA_FCS_FABRIC_IPADDR_SZ);
+
+ if (port->vport != NULL) {
+ port_attr->port_type = BFA_PORT_TYPE_VPORT;
+ port_attr->fpma_mac =
+ port->vport->lps->lp_mac;
+ } else {
+ port_attr->fpma_mac =
+ port->fabric->lps->lp_mac;
+ }
+ } else {
+ port_attr->port_type = BFA_PORT_TYPE_UNKNOWN;
+ port_attr->state = BFA_LPORT_UNINIT;
+ }
+}
+
+/*
+ * bfa_fcs_lport_fab port fab functions
+ */
+
+/*
+ * Called by port to initialize fabric services of the base port.
+ */
+static void
+bfa_fcs_lport_fab_init(struct bfa_fcs_lport_s *port)
+{
+ bfa_fcs_lport_ns_init(port);
+ bfa_fcs_lport_scn_init(port);
+ bfa_fcs_lport_ms_init(port);
+}
+
+/*
+ * Called by port to notify transition to online state.
+ */
+static void
+bfa_fcs_lport_fab_online(struct bfa_fcs_lport_s *port)
+{
+ bfa_fcs_lport_ns_online(port);
+ bfa_fcs_lport_fab_scn_online(port);
+}
+
+/*
+ * Called by port to notify transition to offline state.
+ */
+static void
+bfa_fcs_lport_fab_offline(struct bfa_fcs_lport_s *port)
+{
+ bfa_fcs_lport_ns_offline(port);
+ bfa_fcs_lport_scn_offline(port);
+ bfa_fcs_lport_ms_offline(port);
+}
+
+/*
+ * bfa_fcs_lport_n2n functions
+ */
+
+/*
+ * Called by fcs/port to initialize N2N topology.
+ */
+static void
+bfa_fcs_lport_n2n_init(struct bfa_fcs_lport_s *port)
+{
+}
+
+/*
+ * Called by fcs/port to notify transition to online state.
+ */
+static void
+bfa_fcs_lport_n2n_online(struct bfa_fcs_lport_s *port)
+{
+ struct bfa_fcs_lport_n2n_s *n2n_port = &port->port_topo.pn2n;
+ struct bfa_lport_cfg_s *pcfg = &port->port_cfg;
+ struct bfa_fcs_rport_s *rport;
+
+ bfa_trc(port->fcs, pcfg->pwwn);
+
+ /*
+ * If our PWWN is > than that of the r-port, we have to initiate PLOGI
+ * and assign an Address. if not, we need to wait for its PLOGI.
+ *
+ * If our PWWN is < than that of the remote port, it will send a PLOGI
+ * with the PIDs assigned. The rport state machine take care of this
+ * incoming PLOGI.
+ */
+ if (memcmp
+ ((void *)&pcfg->pwwn, (void *)&n2n_port->rem_port_wwn,
+ sizeof(wwn_t)) > 0) {
+ port->pid = N2N_LOCAL_PID;
+ bfa_lps_set_n2n_pid(port->fabric->lps, N2N_LOCAL_PID);
+ /*
+ * First, check if we know the device by pwwn.
+ */
+ rport = bfa_fcs_lport_get_rport_by_pwwn(port,
+ n2n_port->rem_port_wwn);
+ if (rport) {
+ bfa_trc(port->fcs, rport->pid);
+ bfa_trc(port->fcs, rport->pwwn);
+ rport->pid = N2N_REMOTE_PID;
+ bfa_sm_send_event(rport, RPSM_EVENT_PLOGI_SEND);
+ return;
+ }
+
+ /*
+ * In n2n there can be only one rport. Delete the old one
+ * whose pid should be zero, because it is offline.
+ */
+ if (port->num_rports > 0) {
+ rport = bfa_fcs_lport_get_rport_by_pid(port, 0);
+ WARN_ON(rport == NULL);
+ if (rport) {
+ bfa_trc(port->fcs, rport->pwwn);
+ bfa_sm_send_event(rport, RPSM_EVENT_DELETE);
+ }
+ }
+ bfa_fcs_rport_create(port, N2N_REMOTE_PID);
+ }
+}
+
+/*
+ * Called by fcs/port to notify transition to offline state.
+ */
+static void
+bfa_fcs_lport_n2n_offline(struct bfa_fcs_lport_s *port)
+{
+ struct bfa_fcs_lport_n2n_s *n2n_port = &port->port_topo.pn2n;
+
+ bfa_trc(port->fcs, port->pid);
+ port->pid = 0;
+ n2n_port->rem_port_wwn = 0;
+ n2n_port->reply_oxid = 0;
+}
+
+void
+bfa_fcport_get_loop_attr(struct bfa_fcs_lport_s *port)
+{
+ int i = 0, j = 0, bit = 0, alpa_bit = 0;
+ u8 k = 0;
+ struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(port->fcs->bfa);
+
+ port->port_topo.ploop.alpabm_valid = fcport->alpabm_valid;
+ port->pid = fcport->myalpa;
+ port->pid = bfa_hton3b(port->pid);
+
+ for (i = 0; i < (FC_ALPA_MAX / 8); i++) {
+ for (j = 0, alpa_bit = 0; j < 8; j++, alpa_bit++) {
+ bfa_trc(port->fcs->bfa, fcport->alpabm.alpa_bm[i]);
+ bit = (fcport->alpabm.alpa_bm[i] & (1 << (7 - j)));
+ if (bit) {
+ port->port_topo.ploop.alpa_pos_map[k] =
+ loop_alpa_map[(i * 8) + alpa_bit];
+ k++;
+ bfa_trc(port->fcs->bfa, k);
+ bfa_trc(port->fcs->bfa,
+ port->port_topo.ploop.alpa_pos_map[k]);
+ }
+ }
+ }
+ port->port_topo.ploop.num_alpa = k;
+}
+
+/*
+ * Called by fcs/port to initialize Loop topology.
+ */
+static void
+bfa_fcs_lport_loop_init(struct bfa_fcs_lport_s *port)
+{
+}
+
+/*
+ * Called by fcs/port to notify transition to online state.
+ */
+static void
+bfa_fcs_lport_loop_online(struct bfa_fcs_lport_s *port)
+{
+ u8 num_alpa = 0, alpabm_valid = 0;
+ struct bfa_fcs_rport_s *rport;
+ u8 *alpa_map = NULL;
+ int i = 0;
+ u32 pid;
+
+ bfa_fcport_get_loop_attr(port);
+
+ num_alpa = port->port_topo.ploop.num_alpa;
+ alpabm_valid = port->port_topo.ploop.alpabm_valid;
+ alpa_map = port->port_topo.ploop.alpa_pos_map;
+
+ bfa_trc(port->fcs->bfa, port->pid);
+ bfa_trc(port->fcs->bfa, num_alpa);
+ if (alpabm_valid == 1) {
+ for (i = 0; i < num_alpa; i++) {
+ bfa_trc(port->fcs->bfa, alpa_map[i]);
+ if (alpa_map[i] != bfa_hton3b(port->pid)) {
+ pid = alpa_map[i];
+ bfa_trc(port->fcs->bfa, pid);
+ rport = bfa_fcs_lport_get_rport_by_pid(port,
+ bfa_hton3b(pid));
+ if (!rport)
+ rport = bfa_fcs_rport_create(port,
+ bfa_hton3b(pid));
+ }
+ }
+ } else {
+ for (i = 0; i < MAX_ALPA_COUNT; i++) {
+ if (alpa_map[i] != port->pid) {
+ pid = loop_alpa_map[i];
+ bfa_trc(port->fcs->bfa, pid);
+ rport = bfa_fcs_lport_get_rport_by_pid(port,
+ bfa_hton3b(pid));
+ if (!rport)
+ rport = bfa_fcs_rport_create(port,
+ bfa_hton3b(pid));
+ }
+ }
+ }
+}
+
+/*
+ * Called by fcs/port to notify transition to offline state.
+ */
+static void
+bfa_fcs_lport_loop_offline(struct bfa_fcs_lport_s *port)
+{
+}
+
+#define BFA_FCS_FDMI_CMD_MAX_RETRIES 2
+
+/*
+ * forward declarations
+ */
+static void bfa_fcs_lport_fdmi_send_rhba(void *fdmi_cbarg,
+ struct bfa_fcxp_s *fcxp_alloced);
+static void bfa_fcs_lport_fdmi_send_rprt(void *fdmi_cbarg,
+ struct bfa_fcxp_s *fcxp_alloced);
+static void bfa_fcs_lport_fdmi_send_rpa(void *fdmi_cbarg,
+ struct bfa_fcxp_s *fcxp_alloced);
+static void bfa_fcs_lport_fdmi_rhba_response(void *fcsarg,
+ struct bfa_fcxp_s *fcxp,
+ void *cbarg,
+ bfa_status_t req_status,
+ u32 rsp_len,
+ u32 resid_len,
+ struct fchs_s *rsp_fchs);
+static void bfa_fcs_lport_fdmi_rprt_response(void *fcsarg,
+ struct bfa_fcxp_s *fcxp,
+ void *cbarg,
+ bfa_status_t req_status,
+ u32 rsp_len,
+ u32 resid_len,
+ struct fchs_s *rsp_fchs);
+static void bfa_fcs_lport_fdmi_rpa_response(void *fcsarg,
+ struct bfa_fcxp_s *fcxp,
+ void *cbarg,
+ bfa_status_t req_status,
+ u32 rsp_len,
+ u32 resid_len,
+ struct fchs_s *rsp_fchs);
+static void bfa_fcs_lport_fdmi_timeout(void *arg);
+static u16 bfa_fcs_lport_fdmi_build_rhba_pyld(struct bfa_fcs_lport_fdmi_s *fdmi,
+ u8 *pyld);
+static u16 bfa_fcs_lport_fdmi_build_rprt_pyld(struct bfa_fcs_lport_fdmi_s *fdmi,
+ u8 *pyld);
+static u16 bfa_fcs_lport_fdmi_build_rpa_pyld(struct bfa_fcs_lport_fdmi_s *fdmi,
+ u8 *pyld);
+static u16 bfa_fcs_lport_fdmi_build_portattr_block(struct bfa_fcs_lport_fdmi_s *
+ fdmi, u8 *pyld);
+static void bfa_fcs_fdmi_get_hbaattr(struct bfa_fcs_lport_fdmi_s *fdmi,
+ struct bfa_fcs_fdmi_hba_attr_s *hba_attr);
+static void bfa_fcs_fdmi_get_portattr(struct bfa_fcs_lport_fdmi_s *fdmi,
+ struct bfa_fcs_fdmi_port_attr_s *port_attr);
+u32 bfa_fcs_fdmi_convert_speed(enum bfa_port_speed pport_speed);
+
+/*
+ * fcs_fdmi_sm FCS FDMI state machine
+ */
+
+/*
+ * FDMI State Machine events
+ */
+enum port_fdmi_event {
+ FDMISM_EVENT_PORT_ONLINE = 1,
+ FDMISM_EVENT_PORT_OFFLINE = 2,
+ FDMISM_EVENT_RSP_OK = 4,
+ FDMISM_EVENT_RSP_ERROR = 5,
+ FDMISM_EVENT_TIMEOUT = 6,
+ FDMISM_EVENT_RHBA_SENT = 7,
+ FDMISM_EVENT_RPRT_SENT = 8,
+ FDMISM_EVENT_RPA_SENT = 9,
+};
+
+static void bfa_fcs_lport_fdmi_sm_offline(struct bfa_fcs_lport_fdmi_s *fdmi,
+ enum port_fdmi_event event);
+static void bfa_fcs_lport_fdmi_sm_sending_rhba(
+ struct bfa_fcs_lport_fdmi_s *fdmi,
+ enum port_fdmi_event event);
+static void bfa_fcs_lport_fdmi_sm_rhba(struct bfa_fcs_lport_fdmi_s *fdmi,
+ enum port_fdmi_event event);
+static void bfa_fcs_lport_fdmi_sm_rhba_retry(
+ struct bfa_fcs_lport_fdmi_s *fdmi,
+ enum port_fdmi_event event);
+static void bfa_fcs_lport_fdmi_sm_sending_rprt(
+ struct bfa_fcs_lport_fdmi_s *fdmi,
+ enum port_fdmi_event event);
+static void bfa_fcs_lport_fdmi_sm_rprt(struct bfa_fcs_lport_fdmi_s *fdmi,
+ enum port_fdmi_event event);
+static void bfa_fcs_lport_fdmi_sm_rprt_retry(
+ struct bfa_fcs_lport_fdmi_s *fdmi,
+ enum port_fdmi_event event);
+static void bfa_fcs_lport_fdmi_sm_sending_rpa(
+ struct bfa_fcs_lport_fdmi_s *fdmi,
+ enum port_fdmi_event event);
+static void bfa_fcs_lport_fdmi_sm_rpa(struct bfa_fcs_lport_fdmi_s *fdmi,
+ enum port_fdmi_event event);
+static void bfa_fcs_lport_fdmi_sm_rpa_retry(
+ struct bfa_fcs_lport_fdmi_s *fdmi,
+ enum port_fdmi_event event);
+static void bfa_fcs_lport_fdmi_sm_online(struct bfa_fcs_lport_fdmi_s *fdmi,
+ enum port_fdmi_event event);
+static void bfa_fcs_lport_fdmi_sm_disabled(
+ struct bfa_fcs_lport_fdmi_s *fdmi,
+ enum port_fdmi_event event);
+/*
+ * Start in offline state - awaiting MS to send start.
+ */
+static void
+bfa_fcs_lport_fdmi_sm_offline(struct bfa_fcs_lport_fdmi_s *fdmi,
+ enum port_fdmi_event event)
+{
+ struct bfa_fcs_lport_s *port = fdmi->ms->port;
+
+ bfa_trc(port->fcs, port->port_cfg.pwwn);
+ bfa_trc(port->fcs, event);
+
+ fdmi->retry_cnt = 0;
+
+ switch (event) {
+ case FDMISM_EVENT_PORT_ONLINE:
+ if (port->vport) {
+ /*
+ * For Vports, register a new port.
+ */
+ bfa_sm_set_state(fdmi,
+ bfa_fcs_lport_fdmi_sm_sending_rprt);
+ bfa_fcs_lport_fdmi_send_rprt(fdmi, NULL);
+ } else {
+ /*
+ * For a base port, we should first register the HBA
+ * attribute. The HBA attribute also contains the base
+ * port registration.
+ */
+ bfa_sm_set_state(fdmi,
+ bfa_fcs_lport_fdmi_sm_sending_rhba);
+ bfa_fcs_lport_fdmi_send_rhba(fdmi, NULL);
+ }
+ break;
+
+ case FDMISM_EVENT_PORT_OFFLINE:
+ break;
+
+ default:
+ bfa_sm_fault(port->fcs, event);
+ }
+}
+
+static void
+bfa_fcs_lport_fdmi_sm_sending_rhba(struct bfa_fcs_lport_fdmi_s *fdmi,
+ enum port_fdmi_event event)
+{
+ struct bfa_fcs_lport_s *port = fdmi->ms->port;
+
+ bfa_trc(port->fcs, port->port_cfg.pwwn);
+ bfa_trc(port->fcs, event);
+
+ switch (event) {
+ case FDMISM_EVENT_RHBA_SENT:
+ bfa_sm_set_state(fdmi, bfa_fcs_lport_fdmi_sm_rhba);
+ break;
+
+ case FDMISM_EVENT_PORT_OFFLINE:
+ bfa_sm_set_state(fdmi, bfa_fcs_lport_fdmi_sm_offline);
+ bfa_fcxp_walloc_cancel(BFA_FCS_GET_HAL_FROM_PORT(port),
+ &fdmi->fcxp_wqe);
+ break;
+
+ default:
+ bfa_sm_fault(port->fcs, event);
+ }
+}
+
+static void
+bfa_fcs_lport_fdmi_sm_rhba(struct bfa_fcs_lport_fdmi_s *fdmi,
+ enum port_fdmi_event event)
+{
+ struct bfa_fcs_lport_s *port = fdmi->ms->port;
+
+ bfa_trc(port->fcs, port->port_cfg.pwwn);
+ bfa_trc(port->fcs, event);
+
+ switch (event) {
+ case FDMISM_EVENT_RSP_ERROR:
+ /*
+ * if max retries have not been reached, start timer for a
+ * delayed retry
+ */
+ if (fdmi->retry_cnt++ < BFA_FCS_FDMI_CMD_MAX_RETRIES) {
+ bfa_sm_set_state(fdmi,
+ bfa_fcs_lport_fdmi_sm_rhba_retry);
+ bfa_timer_start(BFA_FCS_GET_HAL_FROM_PORT(port),
+ &fdmi->timer,
+ bfa_fcs_lport_fdmi_timeout, fdmi,
+ BFA_FCS_RETRY_TIMEOUT);
+ } else {
+ /*
+ * set state to offline
+ */
+ bfa_sm_set_state(fdmi, bfa_fcs_lport_fdmi_sm_offline);
+ }
+ break;
+
+ case FDMISM_EVENT_RSP_OK:
+ /*
+ * Initiate Register Port Attributes
+ */
+ bfa_sm_set_state(fdmi, bfa_fcs_lport_fdmi_sm_sending_rpa);
+ fdmi->retry_cnt = 0;
+ bfa_fcs_lport_fdmi_send_rpa(fdmi, NULL);
+ break;
+
+ case FDMISM_EVENT_PORT_OFFLINE:
+ bfa_fcxp_discard(fdmi->fcxp);
+ bfa_sm_set_state(fdmi, bfa_fcs_lport_fdmi_sm_offline);
+ break;
+
+ default:
+ bfa_sm_fault(port->fcs, event);
+ }
+}
+
+static void
+bfa_fcs_lport_fdmi_sm_rhba_retry(struct bfa_fcs_lport_fdmi_s *fdmi,
+ enum port_fdmi_event event)
+{
+ struct bfa_fcs_lport_s *port = fdmi->ms->port;
+
+ bfa_trc(port->fcs, port->port_cfg.pwwn);
+ bfa_trc(port->fcs, event);
+
+ switch (event) {
+ case FDMISM_EVENT_TIMEOUT:
+ /*
+ * Retry Timer Expired. Re-send
+ */
+ bfa_sm_set_state(fdmi, bfa_fcs_lport_fdmi_sm_sending_rhba);
+ bfa_fcs_lport_fdmi_send_rhba(fdmi, NULL);
+ break;
+
+ case FDMISM_EVENT_PORT_OFFLINE:
+ bfa_sm_set_state(fdmi, bfa_fcs_lport_fdmi_sm_offline);
+ bfa_timer_stop(&fdmi->timer);
+ break;
+
+ default:
+ bfa_sm_fault(port->fcs, event);
+ }
+}
+
+/*
+* RPRT : Register Port
+ */
+static void
+bfa_fcs_lport_fdmi_sm_sending_rprt(struct bfa_fcs_lport_fdmi_s *fdmi,
+ enum port_fdmi_event event)
+{
+ struct bfa_fcs_lport_s *port = fdmi->ms->port;
+
+ bfa_trc(port->fcs, port->port_cfg.pwwn);
+ bfa_trc(port->fcs, event);
+
+ switch (event) {
+ case FDMISM_EVENT_RPRT_SENT:
+ bfa_sm_set_state(fdmi, bfa_fcs_lport_fdmi_sm_rprt);
+ break;
+
+ case FDMISM_EVENT_PORT_OFFLINE:
+ bfa_sm_set_state(fdmi, bfa_fcs_lport_fdmi_sm_offline);
+ bfa_fcxp_walloc_cancel(BFA_FCS_GET_HAL_FROM_PORT(port),
+ &fdmi->fcxp_wqe);
+ break;
+
+ default:
+ bfa_sm_fault(port->fcs, event);
+ }
+}
+
+static void
+bfa_fcs_lport_fdmi_sm_rprt(struct bfa_fcs_lport_fdmi_s *fdmi,
+ enum port_fdmi_event event)
+{
+ struct bfa_fcs_lport_s *port = fdmi->ms->port;
+
+ bfa_trc(port->fcs, port->port_cfg.pwwn);
+ bfa_trc(port->fcs, event);
+
+ switch (event) {
+ case FDMISM_EVENT_RSP_ERROR:
+ /*
+ * if max retries have not been reached, start timer for a
+ * delayed retry
+ */
+ if (fdmi->retry_cnt++ < BFA_FCS_FDMI_CMD_MAX_RETRIES) {
+ bfa_sm_set_state(fdmi,
+ bfa_fcs_lport_fdmi_sm_rprt_retry);
+ bfa_timer_start(BFA_FCS_GET_HAL_FROM_PORT(port),
+ &fdmi->timer,
+ bfa_fcs_lport_fdmi_timeout, fdmi,
+ BFA_FCS_RETRY_TIMEOUT);
+
+ } else {
+ /*
+ * set state to offline
+ */
+ bfa_sm_set_state(fdmi, bfa_fcs_lport_fdmi_sm_offline);
+ fdmi->retry_cnt = 0;
+ }
+ break;
+
+ case FDMISM_EVENT_RSP_OK:
+ fdmi->retry_cnt = 0;
+ bfa_sm_set_state(fdmi, bfa_fcs_lport_fdmi_sm_online);
+ break;
+
+ case FDMISM_EVENT_PORT_OFFLINE:
+ bfa_fcxp_discard(fdmi->fcxp);
+ bfa_sm_set_state(fdmi, bfa_fcs_lport_fdmi_sm_offline);
+ break;
+
+ default:
+ bfa_sm_fault(port->fcs, event);
+ }
+}
+
+static void
+bfa_fcs_lport_fdmi_sm_rprt_retry(struct bfa_fcs_lport_fdmi_s *fdmi,
+ enum port_fdmi_event event)
+{
+ struct bfa_fcs_lport_s *port = fdmi->ms->port;
+
+ bfa_trc(port->fcs, port->port_cfg.pwwn);
+ bfa_trc(port->fcs, event);
+
+ switch (event) {
+ case FDMISM_EVENT_TIMEOUT:
+ /*
+ * Retry Timer Expired. Re-send
+ */
+ bfa_sm_set_state(fdmi, bfa_fcs_lport_fdmi_sm_sending_rprt);
+ bfa_fcs_lport_fdmi_send_rprt(fdmi, NULL);
+ break;
+
+ case FDMISM_EVENT_PORT_OFFLINE:
+ bfa_sm_set_state(fdmi, bfa_fcs_lport_fdmi_sm_offline);
+ bfa_timer_stop(&fdmi->timer);
+ break;
+
+ default:
+ bfa_sm_fault(port->fcs, event);
+ }
+}
+
+/*
+ * Register Port Attributes
+ */
+static void
+bfa_fcs_lport_fdmi_sm_sending_rpa(struct bfa_fcs_lport_fdmi_s *fdmi,
+ enum port_fdmi_event event)
+{
+ struct bfa_fcs_lport_s *port = fdmi->ms->port;
+
+ bfa_trc(port->fcs, port->port_cfg.pwwn);
+ bfa_trc(port->fcs, event);
+
+ switch (event) {
+ case FDMISM_EVENT_RPA_SENT:
+ bfa_sm_set_state(fdmi, bfa_fcs_lport_fdmi_sm_rpa);
+ break;
+
+ case FDMISM_EVENT_PORT_OFFLINE:
+ bfa_sm_set_state(fdmi, bfa_fcs_lport_fdmi_sm_offline);
+ bfa_fcxp_walloc_cancel(BFA_FCS_GET_HAL_FROM_PORT(port),
+ &fdmi->fcxp_wqe);
+ break;
+
+ default:
+ bfa_sm_fault(port->fcs, event);
+ }
+}
+
+static void
+bfa_fcs_lport_fdmi_sm_rpa(struct bfa_fcs_lport_fdmi_s *fdmi,
+ enum port_fdmi_event event)
+{
+ struct bfa_fcs_lport_s *port = fdmi->ms->port;
+
+ bfa_trc(port->fcs, port->port_cfg.pwwn);
+ bfa_trc(port->fcs, event);
+
+ switch (event) {
+ case FDMISM_EVENT_RSP_ERROR:
+ /*
+ * if max retries have not been reached, start timer for a
+ * delayed retry
+ */
+ if (fdmi->retry_cnt++ < BFA_FCS_FDMI_CMD_MAX_RETRIES) {
+ bfa_sm_set_state(fdmi, bfa_fcs_lport_fdmi_sm_rpa_retry);
+ bfa_timer_start(BFA_FCS_GET_HAL_FROM_PORT(port),
+ &fdmi->timer,
+ bfa_fcs_lport_fdmi_timeout, fdmi,
+ BFA_FCS_RETRY_TIMEOUT);
+ } else {
+ /*
+ * set state to offline
+ */
+ bfa_sm_set_state(fdmi, bfa_fcs_lport_fdmi_sm_offline);
+ fdmi->retry_cnt = 0;
+ }
+ break;
+
+ case FDMISM_EVENT_RSP_OK:
+ bfa_sm_set_state(fdmi, bfa_fcs_lport_fdmi_sm_online);
+ fdmi->retry_cnt = 0;
+ break;
+
+ case FDMISM_EVENT_PORT_OFFLINE:
+ bfa_fcxp_discard(fdmi->fcxp);
+ bfa_sm_set_state(fdmi, bfa_fcs_lport_fdmi_sm_offline);
+ break;
+
+ default:
+ bfa_sm_fault(port->fcs, event);
+ }
+}
+
+static void
+bfa_fcs_lport_fdmi_sm_rpa_retry(struct bfa_fcs_lport_fdmi_s *fdmi,
+ enum port_fdmi_event event)
+{
+ struct bfa_fcs_lport_s *port = fdmi->ms->port;
+
+ bfa_trc(port->fcs, port->port_cfg.pwwn);
+ bfa_trc(port->fcs, event);
+
+ switch (event) {
+ case FDMISM_EVENT_TIMEOUT:
+ /*
+ * Retry Timer Expired. Re-send
+ */
+ bfa_sm_set_state(fdmi, bfa_fcs_lport_fdmi_sm_sending_rpa);
+ bfa_fcs_lport_fdmi_send_rpa(fdmi, NULL);
+ break;
+
+ case FDMISM_EVENT_PORT_OFFLINE:
+ bfa_sm_set_state(fdmi, bfa_fcs_lport_fdmi_sm_offline);
+ bfa_timer_stop(&fdmi->timer);
+ break;
+
+ default:
+ bfa_sm_fault(port->fcs, event);
+ }
+}
+
+static void
+bfa_fcs_lport_fdmi_sm_online(struct bfa_fcs_lport_fdmi_s *fdmi,
+ enum port_fdmi_event event)
+{
+ struct bfa_fcs_lport_s *port = fdmi->ms->port;
+
+ bfa_trc(port->fcs, port->port_cfg.pwwn);
+ bfa_trc(port->fcs, event);
+
+ switch (event) {
+ case FDMISM_EVENT_PORT_OFFLINE:
+ bfa_sm_set_state(fdmi, bfa_fcs_lport_fdmi_sm_offline);
+ break;
+
+ default:
+ bfa_sm_fault(port->fcs, event);
+ }
+}
+/*
+ * FDMI is disabled state.
+ */
+static void
+bfa_fcs_lport_fdmi_sm_disabled(struct bfa_fcs_lport_fdmi_s *fdmi,
+ enum port_fdmi_event event)
+{
+ struct bfa_fcs_lport_s *port = fdmi->ms->port;
+
+ bfa_trc(port->fcs, port->port_cfg.pwwn);
+ bfa_trc(port->fcs, event);
+
+ /* No op State. It can only be enabled at Driver Init. */
+}
+
+/*
+* RHBA : Register HBA Attributes.
+ */
+static void
+bfa_fcs_lport_fdmi_send_rhba(void *fdmi_cbarg, struct bfa_fcxp_s *fcxp_alloced)
+{
+ struct bfa_fcs_lport_fdmi_s *fdmi = fdmi_cbarg;
+ struct bfa_fcs_lport_s *port = fdmi->ms->port;
+ struct fchs_s fchs;
+ int len, attr_len;
+ struct bfa_fcxp_s *fcxp;
+ u8 *pyld;
+
+ bfa_trc(port->fcs, port->port_cfg.pwwn);
+
+ fcxp = fcxp_alloced ? fcxp_alloced :
+ bfa_fcs_fcxp_alloc(port->fcs, BFA_TRUE);
+ if (!fcxp) {
+ bfa_fcs_fcxp_alloc_wait(port->fcs->bfa, &fdmi->fcxp_wqe,
+ bfa_fcs_lport_fdmi_send_rhba, fdmi, BFA_TRUE);
+ return;
+ }
+ fdmi->fcxp = fcxp;
+
+ pyld = bfa_fcxp_get_reqbuf(fcxp);
+ memset(pyld, 0, FC_MAX_PDUSZ);
+
+ len = fc_fdmi_reqhdr_build(&fchs, pyld, bfa_fcs_lport_get_fcid(port),
+ FDMI_RHBA);
+
+ attr_len =
+ bfa_fcs_lport_fdmi_build_rhba_pyld(fdmi,
+ (u8 *) ((struct ct_hdr_s *) pyld
+ + 1));
+
+ bfa_fcxp_send(fcxp, NULL, port->fabric->vf_id, port->lp_tag, BFA_FALSE,
+ FC_CLASS_3, (len + attr_len), &fchs,
+ bfa_fcs_lport_fdmi_rhba_response, (void *)fdmi,
+ FC_MAX_PDUSZ, FC_FCCT_TOV);
+
+ bfa_sm_send_event(fdmi, FDMISM_EVENT_RHBA_SENT);
+}
+
+static u16
+bfa_fcs_lport_fdmi_build_rhba_pyld(struct bfa_fcs_lport_fdmi_s *fdmi, u8 *pyld)
+{
+ struct bfa_fcs_lport_s *port = fdmi->ms->port;
+ struct bfa_fcs_fdmi_hba_attr_s hba_attr;
+ struct bfa_fcs_fdmi_hba_attr_s *fcs_hba_attr = &hba_attr;
+ struct fdmi_rhba_s *rhba = (struct fdmi_rhba_s *) pyld;
+ struct fdmi_attr_s *attr;
+ u8 *curr_ptr;
+ u16 len, count;
+ u16 templen;
+
+ /*
+ * get hba attributes
+ */
+ bfa_fcs_fdmi_get_hbaattr(fdmi, fcs_hba_attr);
+
+ rhba->hba_id = bfa_fcs_lport_get_pwwn(port);
+ rhba->port_list.num_ports = cpu_to_be32(1);
+ rhba->port_list.port_entry = bfa_fcs_lport_get_pwwn(port);
+
+ len = sizeof(rhba->hba_id) + sizeof(rhba->port_list);
+
+ count = 0;
+ len += sizeof(rhba->hba_attr_blk.attr_count);
+
+ /*
+ * fill out the invididual entries of the HBA attrib Block
+ */
+ curr_ptr = (u8 *) &rhba->hba_attr_blk.hba_attr;
+
+ /*
+ * Node Name
+ */
+ attr = (struct fdmi_attr_s *) curr_ptr;
+ attr->type = cpu_to_be16(FDMI_HBA_ATTRIB_NODENAME);
+ templen = sizeof(wwn_t);
+ memcpy(attr->value, &bfa_fcs_lport_get_nwwn(port), templen);
+ curr_ptr += sizeof(attr->type) + sizeof(templen) + templen;
+ len += templen;
+ count++;
+ attr->len = cpu_to_be16(templen + sizeof(attr->type) +
+ sizeof(templen));
+
+ /*
+ * Manufacturer
+ */
+ attr = (struct fdmi_attr_s *) curr_ptr;
+ attr->type = cpu_to_be16(FDMI_HBA_ATTRIB_MANUFACTURER);
+ templen = (u16) strlen(fcs_hba_attr->manufacturer);
+ memcpy(attr->value, fcs_hba_attr->manufacturer, templen);
+ templen = fc_roundup(templen, sizeof(u32));
+ curr_ptr += sizeof(attr->type) + sizeof(templen) + templen;
+ len += templen;
+ count++;
+ attr->len = cpu_to_be16(templen + sizeof(attr->type) +
+ sizeof(templen));
+
+ /*
+ * Serial Number
+ */
+ attr = (struct fdmi_attr_s *) curr_ptr;
+ attr->type = cpu_to_be16(FDMI_HBA_ATTRIB_SERIALNUM);
+ templen = (u16) strlen(fcs_hba_attr->serial_num);
+ memcpy(attr->value, fcs_hba_attr->serial_num, templen);
+ templen = fc_roundup(templen, sizeof(u32));
+ curr_ptr += sizeof(attr->type) + sizeof(templen) + templen;
+ len += templen;
+ count++;
+ attr->len = cpu_to_be16(templen + sizeof(attr->type) +
+ sizeof(templen));
+
+ /*
+ * Model
+ */
+ attr = (struct fdmi_attr_s *) curr_ptr;
+ attr->type = cpu_to_be16(FDMI_HBA_ATTRIB_MODEL);
+ templen = (u16) strlen(fcs_hba_attr->model);
+ memcpy(attr->value, fcs_hba_attr->model, templen);
+ templen = fc_roundup(templen, sizeof(u32));
+ curr_ptr += sizeof(attr->type) + sizeof(templen) + templen;
+ len += templen;
+ count++;
+ attr->len = cpu_to_be16(templen + sizeof(attr->type) +
+ sizeof(templen));
+
+ /*
+ * Model Desc
+ */
+ attr = (struct fdmi_attr_s *) curr_ptr;
+ attr->type = cpu_to_be16(FDMI_HBA_ATTRIB_MODEL_DESC);
+ templen = (u16) strlen(fcs_hba_attr->model_desc);
+ memcpy(attr->value, fcs_hba_attr->model_desc, templen);
+ templen = fc_roundup(templen, sizeof(u32));
+ curr_ptr += sizeof(attr->type) + sizeof(templen) + templen;
+ len += templen;
+ count++;
+ attr->len = cpu_to_be16(templen + sizeof(attr->type) +
+ sizeof(templen));
+
+ /*
+ * H/W Version
+ */
+ if (fcs_hba_attr->hw_version[0] != '\0') {
+ attr = (struct fdmi_attr_s *) curr_ptr;
+ attr->type = cpu_to_be16(FDMI_HBA_ATTRIB_HW_VERSION);
+ templen = (u16) strlen(fcs_hba_attr->hw_version);
+ memcpy(attr->value, fcs_hba_attr->hw_version, templen);
+ templen = fc_roundup(templen, sizeof(u32));
+ curr_ptr += sizeof(attr->type) + sizeof(templen) + templen;
+ len += templen;
+ count++;
+ attr->len = cpu_to_be16(templen + sizeof(attr->type) +
+ sizeof(templen));
+ }
+
+ /*
+ * Driver Version
+ */
+ attr = (struct fdmi_attr_s *) curr_ptr;
+ attr->type = cpu_to_be16(FDMI_HBA_ATTRIB_DRIVER_VERSION);
+ templen = (u16) strlen(fcs_hba_attr->driver_version);
+ memcpy(attr->value, fcs_hba_attr->driver_version, templen);
+ templen = fc_roundup(templen, sizeof(u32));
+ curr_ptr += sizeof(attr->type) + sizeof(templen) + templen;
+ len += templen;
+ count++;
+ attr->len = cpu_to_be16(templen + sizeof(attr->type) +
+ sizeof(templen));
+
+ /*
+ * Option Rom Version
+ */
+ if (fcs_hba_attr->option_rom_ver[0] != '\0') {
+ attr = (struct fdmi_attr_s *) curr_ptr;
+ attr->type = cpu_to_be16(FDMI_HBA_ATTRIB_ROM_VERSION);
+ templen = (u16) strlen(fcs_hba_attr->option_rom_ver);
+ memcpy(attr->value, fcs_hba_attr->option_rom_ver, templen);
+ templen = fc_roundup(templen, sizeof(u32));
+ curr_ptr += sizeof(attr->type) + sizeof(templen) + templen;
+ len += templen;
+ count++;
+ attr->len = cpu_to_be16(templen + sizeof(attr->type) +
+ sizeof(templen));
+ }
+
+ attr = (struct fdmi_attr_s *) curr_ptr;
+ attr->type = cpu_to_be16(FDMI_HBA_ATTRIB_FW_VERSION);
+ templen = (u16) strlen(fcs_hba_attr->fw_version);
+ memcpy(attr->value, fcs_hba_attr->fw_version, templen);
+ templen = fc_roundup(templen, sizeof(u32));
+ curr_ptr += sizeof(attr->type) + sizeof(templen) + templen;
+ len += templen;
+ count++;
+ attr->len = cpu_to_be16(templen + sizeof(attr->type) +
+ sizeof(templen));
+
+ /*
+ * OS Name
+ */
+ if (fcs_hba_attr->os_name[0] != '\0') {
+ attr = (struct fdmi_attr_s *) curr_ptr;
+ attr->type = cpu_to_be16(FDMI_HBA_ATTRIB_OS_NAME);
+ templen = (u16) strlen(fcs_hba_attr->os_name);
+ memcpy(attr->value, fcs_hba_attr->os_name, templen);
+ templen = fc_roundup(templen, sizeof(u32));
+ curr_ptr += sizeof(attr->type) + sizeof(templen) + templen;
+ len += templen;
+ count++;
+ attr->len = cpu_to_be16(templen + sizeof(attr->type) +
+ sizeof(templen));
+ }
+
+ /*
+ * MAX_CT_PAYLOAD
+ */
+ attr = (struct fdmi_attr_s *) curr_ptr;
+ attr->type = cpu_to_be16(FDMI_HBA_ATTRIB_MAX_CT);
+ templen = sizeof(fcs_hba_attr->max_ct_pyld);
+ memcpy(attr->value, &fcs_hba_attr->max_ct_pyld, templen);
+ templen = fc_roundup(templen, sizeof(u32));
+ curr_ptr += sizeof(attr->type) + sizeof(templen) + templen;
+ len += templen;
+ count++;
+ attr->len = cpu_to_be16(templen + sizeof(attr->type) +
+ sizeof(templen));
+ /*
+ * Send extended attributes ( FOS 7.1 support )
+ */
+ if (fdmi->retry_cnt == 0) {
+ attr = (struct fdmi_attr_s *) curr_ptr;
+ attr->type = cpu_to_be16(FDMI_HBA_ATTRIB_NODE_SYM_NAME);
+ templen = sizeof(fcs_hba_attr->node_sym_name);
+ memcpy(attr->value, &fcs_hba_attr->node_sym_name, templen);
+ templen = fc_roundup(templen, sizeof(u32));
+ curr_ptr += sizeof(attr->type) + sizeof(templen) + templen;
+ len += templen;
+ count++;
+ attr->len = cpu_to_be16(templen + sizeof(attr->type) +
+ sizeof(templen));
+
+ attr = (struct fdmi_attr_s *) curr_ptr;
+ attr->type = cpu_to_be16(FDMI_HBA_ATTRIB_VENDOR_ID);
+ templen = sizeof(fcs_hba_attr->vendor_info);
+ memcpy(attr->value, &fcs_hba_attr->vendor_info, templen);
+ templen = fc_roundup(templen, sizeof(u32));
+ curr_ptr += sizeof(attr->type) + sizeof(templen) + templen;
+ len += templen;
+ count++;
+ attr->len = cpu_to_be16(templen + sizeof(attr->type) +
+ sizeof(templen));
+
+ attr = (struct fdmi_attr_s *) curr_ptr;
+ attr->type = cpu_to_be16(FDMI_HBA_ATTRIB_NUM_PORTS);
+ templen = sizeof(fcs_hba_attr->num_ports);
+ memcpy(attr->value, &fcs_hba_attr->num_ports, templen);
+ templen = fc_roundup(templen, sizeof(u32));
+ curr_ptr += sizeof(attr->type) + sizeof(templen) + templen;
+ len += templen;
+ count++;
+ attr->len = cpu_to_be16(templen + sizeof(attr->type) +
+ sizeof(templen));
+
+ attr = (struct fdmi_attr_s *) curr_ptr;
+ attr->type = cpu_to_be16(FDMI_HBA_ATTRIB_FABRIC_NAME);
+ templen = sizeof(fcs_hba_attr->fabric_name);
+ memcpy(attr->value, &fcs_hba_attr->fabric_name, templen);
+ templen = fc_roundup(templen, sizeof(u32));
+ curr_ptr += sizeof(attr->type) + sizeof(templen) + templen;
+ len += templen;
+ count++;
+ attr->len = cpu_to_be16(templen + sizeof(attr->type) +
+ sizeof(templen));
+
+ attr = (struct fdmi_attr_s *) curr_ptr;
+ attr->type = cpu_to_be16(FDMI_HBA_ATTRIB_BIOS_VER);
+ templen = sizeof(fcs_hba_attr->bios_ver);
+ memcpy(attr->value, &fcs_hba_attr->bios_ver, templen);
+ templen = fc_roundup(attr->len, sizeof(u32));
+ curr_ptr += sizeof(attr->type) + sizeof(templen) + templen;
+ len += templen;
+ count++;
+ attr->len = cpu_to_be16(templen + sizeof(attr->type) +
+ sizeof(templen));
+ }
+
+ /*
+ * Update size of payload
+ */
+ len += ((sizeof(attr->type) + sizeof(attr->len)) * count);
+
+ rhba->hba_attr_blk.attr_count = cpu_to_be32(count);
+ return len;
+}
+
+static void
+bfa_fcs_lport_fdmi_rhba_response(void *fcsarg, struct bfa_fcxp_s *fcxp,
+ void *cbarg, bfa_status_t req_status,
+ u32 rsp_len, u32 resid_len,
+ struct fchs_s *rsp_fchs)
+{
+ struct bfa_fcs_lport_fdmi_s *fdmi =
+ (struct bfa_fcs_lport_fdmi_s *) cbarg;
+ struct bfa_fcs_lport_s *port = fdmi->ms->port;
+ struct ct_hdr_s *cthdr = NULL;
+
+ bfa_trc(port->fcs, port->port_cfg.pwwn);
+
+ /*
+ * Sanity Checks
+ */
+ if (req_status != BFA_STATUS_OK) {
+ bfa_trc(port->fcs, req_status);
+ bfa_sm_send_event(fdmi, FDMISM_EVENT_RSP_ERROR);
+ return;
+ }
+
+ cthdr = (struct ct_hdr_s *) BFA_FCXP_RSP_PLD(fcxp);
+ cthdr->cmd_rsp_code = be16_to_cpu(cthdr->cmd_rsp_code);
+
+ if (cthdr->cmd_rsp_code == CT_RSP_ACCEPT) {
+ bfa_sm_send_event(fdmi, FDMISM_EVENT_RSP_OK);
+ return;
+ }
+
+ bfa_trc(port->fcs, cthdr->reason_code);
+ bfa_trc(port->fcs, cthdr->exp_code);
+ bfa_sm_send_event(fdmi, FDMISM_EVENT_RSP_ERROR);
+}
+
+/*
+* RPRT : Register Port
+ */
+static void
+bfa_fcs_lport_fdmi_send_rprt(void *fdmi_cbarg, struct bfa_fcxp_s *fcxp_alloced)
+{
+ struct bfa_fcs_lport_fdmi_s *fdmi = fdmi_cbarg;
+ struct bfa_fcs_lport_s *port = fdmi->ms->port;
+ struct fchs_s fchs;
+ u16 len, attr_len;
+ struct bfa_fcxp_s *fcxp;
+ u8 *pyld;
+
+ bfa_trc(port->fcs, port->port_cfg.pwwn);
+
+ fcxp = fcxp_alloced ? fcxp_alloced :
+ bfa_fcs_fcxp_alloc(port->fcs, BFA_TRUE);
+ if (!fcxp) {
+ bfa_fcs_fcxp_alloc_wait(port->fcs->bfa, &fdmi->fcxp_wqe,
+ bfa_fcs_lport_fdmi_send_rprt, fdmi, BFA_TRUE);
+ return;
+ }
+ fdmi->fcxp = fcxp;
+
+ pyld = bfa_fcxp_get_reqbuf(fcxp);
+ memset(pyld, 0, FC_MAX_PDUSZ);
+
+ len = fc_fdmi_reqhdr_build(&fchs, pyld, bfa_fcs_lport_get_fcid(port),
+ FDMI_RPRT);
+
+ attr_len =
+ bfa_fcs_lport_fdmi_build_rprt_pyld(fdmi,
+ (u8 *) ((struct ct_hdr_s *) pyld
+ + 1));
+
+ bfa_fcxp_send(fcxp, NULL, port->fabric->vf_id, port->lp_tag, BFA_FALSE,
+ FC_CLASS_3, len + attr_len, &fchs,
+ bfa_fcs_lport_fdmi_rprt_response, (void *)fdmi,
+ FC_MAX_PDUSZ, FC_FCCT_TOV);
+
+ bfa_sm_send_event(fdmi, FDMISM_EVENT_RPRT_SENT);
+}
+
+/*
+ * This routine builds Port Attribute Block that used in RPA, RPRT commands.
+ */
+static u16
+bfa_fcs_lport_fdmi_build_portattr_block(struct bfa_fcs_lport_fdmi_s *fdmi,
+ u8 *pyld)
+{
+ struct bfa_fcs_fdmi_port_attr_s fcs_port_attr;
+ struct fdmi_port_attr_s *port_attrib = (struct fdmi_port_attr_s *) pyld;
+ struct fdmi_attr_s *attr;
+ u8 *curr_ptr;
+ u16 len;
+ u8 count = 0;
+ u16 templen;
+
+ /*
+ * get port attributes
+ */
+ bfa_fcs_fdmi_get_portattr(fdmi, &fcs_port_attr);
+
+ len = sizeof(port_attrib->attr_count);
+
+ /*
+ * fill out the invididual entries
+ */
+ curr_ptr = (u8 *) &port_attrib->port_attr;
+
+ /*
+ * FC4 Types
+ */
+ attr = (struct fdmi_attr_s *) curr_ptr;
+ attr->type = cpu_to_be16(FDMI_PORT_ATTRIB_FC4_TYPES);
+ templen = sizeof(fcs_port_attr.supp_fc4_types);
+ memcpy(attr->value, fcs_port_attr.supp_fc4_types, templen);
+ curr_ptr += sizeof(attr->type) + sizeof(templen) + templen;
+ len += templen;
+ ++count;
+ attr->len =
+ cpu_to_be16(templen + sizeof(attr->type) +
+ sizeof(templen));
+
+ /*
+ * Supported Speed
+ */
+ attr = (struct fdmi_attr_s *) curr_ptr;
+ attr->type = cpu_to_be16(FDMI_PORT_ATTRIB_SUPP_SPEED);
+ templen = sizeof(fcs_port_attr.supp_speed);
+ memcpy(attr->value, &fcs_port_attr.supp_speed, templen);
+ curr_ptr += sizeof(attr->type) + sizeof(templen) + templen;
+ len += templen;
+ ++count;
+ attr->len =
+ cpu_to_be16(templen + sizeof(attr->type) +
+ sizeof(templen));
+
+ /*
+ * current Port Speed
+ */
+ attr = (struct fdmi_attr_s *) curr_ptr;
+ attr->type = cpu_to_be16(FDMI_PORT_ATTRIB_PORT_SPEED);
+ templen = sizeof(fcs_port_attr.curr_speed);
+ memcpy(attr->value, &fcs_port_attr.curr_speed, templen);
+ curr_ptr += sizeof(attr->type) + sizeof(templen) + templen;
+ len += templen;
+ ++count;
+ attr->len = cpu_to_be16(templen + sizeof(attr->type) +
+ sizeof(templen));
+
+ /*
+ * max frame size
+ */
+ attr = (struct fdmi_attr_s *) curr_ptr;
+ attr->type = cpu_to_be16(FDMI_PORT_ATTRIB_FRAME_SIZE);
+ templen = sizeof(fcs_port_attr.max_frm_size);
+ memcpy(attr->value, &fcs_port_attr.max_frm_size, templen);
+ curr_ptr += sizeof(attr->type) + sizeof(templen) + templen;
+ len += templen;
+ ++count;
+ attr->len = cpu_to_be16(templen + sizeof(attr->type) +
+ sizeof(templen));
+
+ /*
+ * OS Device Name
+ */
+ if (fcs_port_attr.os_device_name[0] != '\0') {
+ attr = (struct fdmi_attr_s *) curr_ptr;
+ attr->type = cpu_to_be16(FDMI_PORT_ATTRIB_DEV_NAME);
+ templen = (u16) strlen(fcs_port_attr.os_device_name);
+ memcpy(attr->value, fcs_port_attr.os_device_name, templen);
+ templen = fc_roundup(templen, sizeof(u32));
+ curr_ptr += sizeof(attr->type) + sizeof(templen) + templen;
+ len += templen;
+ ++count;
+ attr->len = cpu_to_be16(templen + sizeof(attr->type) +
+ sizeof(templen));
+ }
+ /*
+ * Host Name
+ */
+ if (fcs_port_attr.host_name[0] != '\0') {
+ attr = (struct fdmi_attr_s *) curr_ptr;
+ attr->type = cpu_to_be16(FDMI_PORT_ATTRIB_HOST_NAME);
+ templen = (u16) strlen(fcs_port_attr.host_name);
+ memcpy(attr->value, fcs_port_attr.host_name, templen);
+ templen = fc_roundup(templen, sizeof(u32));
+ curr_ptr += sizeof(attr->type) + sizeof(templen) + templen;
+ len += templen;
+ ++count;
+ attr->len = cpu_to_be16(templen + sizeof(attr->type) +
+ sizeof(templen));
+ }
+
+ if (fdmi->retry_cnt == 0) {
+ attr = (struct fdmi_attr_s *) curr_ptr;
+ attr->type = cpu_to_be16(FDMI_PORT_ATTRIB_NODE_NAME);
+ templen = sizeof(fcs_port_attr.node_name);
+ memcpy(attr->value, &fcs_port_attr.node_name, templen);
+ templen = fc_roundup(templen, sizeof(u32));
+ curr_ptr += sizeof(attr->type) + sizeof(templen) + templen;
+ len += templen;
+ ++count;
+ attr->len = cpu_to_be16(templen + sizeof(attr->type) +
+ sizeof(templen));
+
+ attr = (struct fdmi_attr_s *) curr_ptr;
+ attr->type = cpu_to_be16(FDMI_PORT_ATTRIB_PORT_NAME);
+ templen = sizeof(fcs_port_attr.port_name);
+ memcpy(attr->value, &fcs_port_attr.port_name, templen);
+ templen = fc_roundup(templen, sizeof(u32));
+ curr_ptr += sizeof(attr->type) + sizeof(attr->len) + templen;
+ len += templen;
+ ++count;
+ attr->len = cpu_to_be16(templen + sizeof(attr->type) +
+ sizeof(templen));
+
+ if (fcs_port_attr.port_sym_name.symname[0] != '\0') {
+ attr = (struct fdmi_attr_s *) curr_ptr;
+ attr->type =
+ cpu_to_be16(FDMI_PORT_ATTRIB_PORT_SYM_NAME);
+ templen = sizeof(fcs_port_attr.port_sym_name);
+ memcpy(attr->value,
+ &fcs_port_attr.port_sym_name, templen);
+ templen = fc_roundup(templen, sizeof(u32));
+ curr_ptr += sizeof(attr->type) +
+ sizeof(templen) + templen;
+ len += templen;
+ ++count;
+ attr->len = cpu_to_be16(templen +
+ sizeof(attr->type) + sizeof(templen));
+ }
+
+ attr = (struct fdmi_attr_s *) curr_ptr;
+ attr->type = cpu_to_be16(FDMI_PORT_ATTRIB_PORT_TYPE);
+ templen = sizeof(fcs_port_attr.port_type);
+ memcpy(attr->value, &fcs_port_attr.port_type, templen);
+ templen = fc_roundup(templen, sizeof(u32));
+ curr_ptr += sizeof(attr->type) + sizeof(templen) + templen;
+ len += templen;
+ ++count;
+ attr->len = cpu_to_be16(templen + sizeof(attr->type) +
+ sizeof(templen));
+
+ attr = (struct fdmi_attr_s *) curr_ptr;
+ attr->type = cpu_to_be16(FDMI_PORT_ATTRIB_SUPP_COS);
+ templen = sizeof(fcs_port_attr.scos);
+ memcpy(attr->value, &fcs_port_attr.scos, templen);
+ templen = fc_roundup(templen, sizeof(u32));
+ curr_ptr += sizeof(attr->type) + sizeof(templen) + templen;
+ len += templen;
+ ++count;
+ attr->len = cpu_to_be16(templen + sizeof(attr->type) +
+ sizeof(templen));
+
+ attr = (struct fdmi_attr_s *) curr_ptr;
+ attr->type = cpu_to_be16(FDMI_PORT_ATTRIB_PORT_FAB_NAME);
+ templen = sizeof(fcs_port_attr.port_fabric_name);
+ memcpy(attr->value, &fcs_port_attr.port_fabric_name, templen);
+ templen = fc_roundup(templen, sizeof(u32));
+ curr_ptr += sizeof(attr->type) + sizeof(templen) + templen;
+ len += templen;
+ ++count;
+ attr->len = cpu_to_be16(templen + sizeof(attr->type) +
+ sizeof(templen));
+
+ attr = (struct fdmi_attr_s *) curr_ptr;
+ attr->type = cpu_to_be16(FDMI_PORT_ATTRIB_PORT_FC4_TYPE);
+ templen = sizeof(fcs_port_attr.port_act_fc4_type);
+ memcpy(attr->value, fcs_port_attr.port_act_fc4_type,
+ templen);
+ templen = fc_roundup(templen, sizeof(u32));
+ curr_ptr += sizeof(attr->type) + sizeof(templen) + templen;
+ len += templen;
+ ++count;
+ attr->len = cpu_to_be16(templen + sizeof(attr->type) +
+ sizeof(templen));
+
+ attr = (struct fdmi_attr_s *) curr_ptr;
+ attr->type = cpu_to_be16(FDMI_PORT_ATTRIB_PORT_STATE);
+ templen = sizeof(fcs_port_attr.port_state);
+ memcpy(attr->value, &fcs_port_attr.port_state, templen);
+ templen = fc_roundup(templen, sizeof(u32));
+ curr_ptr += sizeof(attr->type) + sizeof(templen) + templen;
+ len += templen;
+ ++count;
+ attr->len = cpu_to_be16(templen + sizeof(attr->type) +
+ sizeof(templen));
+
+ attr = (struct fdmi_attr_s *) curr_ptr;
+ attr->type = cpu_to_be16(FDMI_PORT_ATTRIB_PORT_NUM_RPRT);
+ templen = sizeof(fcs_port_attr.num_ports);
+ memcpy(attr->value, &fcs_port_attr.num_ports, templen);
+ templen = fc_roundup(templen, sizeof(u32));
+ curr_ptr += sizeof(attr->type) + sizeof(templen) + templen;
+ len += templen;
+ ++count;
+ attr->len = cpu_to_be16(templen + sizeof(attr->type) +
+ sizeof(templen));
+ }
+
+ /*
+ * Update size of payload
+ */
+ port_attrib->attr_count = cpu_to_be32(count);
+ len += ((sizeof(attr->type) + sizeof(attr->len)) * count);
+ return len;
+}
+
+static u16
+bfa_fcs_lport_fdmi_build_rprt_pyld(struct bfa_fcs_lport_fdmi_s *fdmi, u8 *pyld)
+{
+ struct bfa_fcs_lport_s *port = fdmi->ms->port;
+ struct fdmi_rprt_s *rprt = (struct fdmi_rprt_s *) pyld;
+ u16 len;
+
+ rprt->hba_id = bfa_fcs_lport_get_pwwn(bfa_fcs_get_base_port(port->fcs));
+ rprt->port_name = bfa_fcs_lport_get_pwwn(port);
+
+ len = bfa_fcs_lport_fdmi_build_portattr_block(fdmi,
+ (u8 *) &rprt->port_attr_blk);
+
+ len += sizeof(rprt->hba_id) + sizeof(rprt->port_name);
+
+ return len;
+}
+
+static void
+bfa_fcs_lport_fdmi_rprt_response(void *fcsarg, struct bfa_fcxp_s *fcxp,
+ void *cbarg, bfa_status_t req_status,
+ u32 rsp_len, u32 resid_len,
+ struct fchs_s *rsp_fchs)
+{
+ struct bfa_fcs_lport_fdmi_s *fdmi =
+ (struct bfa_fcs_lport_fdmi_s *) cbarg;
+ struct bfa_fcs_lport_s *port = fdmi->ms->port;
+ struct ct_hdr_s *cthdr = NULL;
+
+ bfa_trc(port->fcs, port->port_cfg.pwwn);
+
+ /*
+ * Sanity Checks
+ */
+ if (req_status != BFA_STATUS_OK) {
+ bfa_trc(port->fcs, req_status);
+ bfa_sm_send_event(fdmi, FDMISM_EVENT_RSP_ERROR);
+ return;
+ }
+
+ cthdr = (struct ct_hdr_s *) BFA_FCXP_RSP_PLD(fcxp);
+ cthdr->cmd_rsp_code = be16_to_cpu(cthdr->cmd_rsp_code);
+
+ if (cthdr->cmd_rsp_code == CT_RSP_ACCEPT) {
+ bfa_sm_send_event(fdmi, FDMISM_EVENT_RSP_OK);
+ return;
+ }
+
+ bfa_trc(port->fcs, cthdr->reason_code);
+ bfa_trc(port->fcs, cthdr->exp_code);
+ bfa_sm_send_event(fdmi, FDMISM_EVENT_RSP_ERROR);
+}
+
+/*
+* RPA : Register Port Attributes.
+ */
+static void
+bfa_fcs_lport_fdmi_send_rpa(void *fdmi_cbarg, struct bfa_fcxp_s *fcxp_alloced)
+{
+ struct bfa_fcs_lport_fdmi_s *fdmi = fdmi_cbarg;
+ struct bfa_fcs_lport_s *port = fdmi->ms->port;
+ struct fchs_s fchs;
+ u16 len, attr_len;
+ struct bfa_fcxp_s *fcxp;
+ u8 *pyld;
+
+ bfa_trc(port->fcs, port->port_cfg.pwwn);
+
+ fcxp = fcxp_alloced ? fcxp_alloced :
+ bfa_fcs_fcxp_alloc(port->fcs, BFA_TRUE);
+ if (!fcxp) {
+ bfa_fcs_fcxp_alloc_wait(port->fcs->bfa, &fdmi->fcxp_wqe,
+ bfa_fcs_lport_fdmi_send_rpa, fdmi, BFA_TRUE);
+ return;
+ }
+ fdmi->fcxp = fcxp;
+
+ pyld = bfa_fcxp_get_reqbuf(fcxp);
+ memset(pyld, 0, FC_MAX_PDUSZ);
+
+ len = fc_fdmi_reqhdr_build(&fchs, pyld, bfa_fcs_lport_get_fcid(port),
+ FDMI_RPA);
+
+ attr_len = bfa_fcs_lport_fdmi_build_rpa_pyld(fdmi,
+ (u8 *) ((struct ct_hdr_s *) pyld + 1));
+
+ bfa_fcxp_send(fcxp, NULL, port->fabric->vf_id, port->lp_tag, BFA_FALSE,
+ FC_CLASS_3, len + attr_len, &fchs,
+ bfa_fcs_lport_fdmi_rpa_response, (void *)fdmi,
+ FC_MAX_PDUSZ, FC_FCCT_TOV);
+
+ bfa_sm_send_event(fdmi, FDMISM_EVENT_RPA_SENT);
+}
+
+static u16
+bfa_fcs_lport_fdmi_build_rpa_pyld(struct bfa_fcs_lport_fdmi_s *fdmi, u8 *pyld)
+{
+ struct bfa_fcs_lport_s *port = fdmi->ms->port;
+ struct fdmi_rpa_s *rpa = (struct fdmi_rpa_s *) pyld;
+ u16 len;
+
+ rpa->port_name = bfa_fcs_lport_get_pwwn(port);
+
+ len = bfa_fcs_lport_fdmi_build_portattr_block(fdmi,
+ (u8 *) &rpa->port_attr_blk);
+
+ len += sizeof(rpa->port_name);
+
+ return len;
+}
+
+static void
+bfa_fcs_lport_fdmi_rpa_response(void *fcsarg, struct bfa_fcxp_s *fcxp,
+ void *cbarg, bfa_status_t req_status, u32 rsp_len,
+ u32 resid_len, struct fchs_s *rsp_fchs)
+{
+ struct bfa_fcs_lport_fdmi_s *fdmi =
+ (struct bfa_fcs_lport_fdmi_s *) cbarg;
+ struct bfa_fcs_lport_s *port = fdmi->ms->port;
+ struct ct_hdr_s *cthdr = NULL;
+
+ bfa_trc(port->fcs, port->port_cfg.pwwn);
+
+ /*
+ * Sanity Checks
+ */
+ if (req_status != BFA_STATUS_OK) {
+ bfa_trc(port->fcs, req_status);
+ bfa_sm_send_event(fdmi, FDMISM_EVENT_RSP_ERROR);
+ return;
+ }
+
+ cthdr = (struct ct_hdr_s *) BFA_FCXP_RSP_PLD(fcxp);
+ cthdr->cmd_rsp_code = be16_to_cpu(cthdr->cmd_rsp_code);
+
+ if (cthdr->cmd_rsp_code == CT_RSP_ACCEPT) {
+ bfa_sm_send_event(fdmi, FDMISM_EVENT_RSP_OK);
+ return;
+ }
+
+ bfa_trc(port->fcs, cthdr->reason_code);
+ bfa_trc(port->fcs, cthdr->exp_code);
+ bfa_sm_send_event(fdmi, FDMISM_EVENT_RSP_ERROR);
+}
+
+static void
+bfa_fcs_lport_fdmi_timeout(void *arg)
+{
+ struct bfa_fcs_lport_fdmi_s *fdmi = (struct bfa_fcs_lport_fdmi_s *) arg;
+
+ bfa_sm_send_event(fdmi, FDMISM_EVENT_TIMEOUT);
+}
+
+static void
+bfa_fcs_fdmi_get_hbaattr(struct bfa_fcs_lport_fdmi_s *fdmi,
+ struct bfa_fcs_fdmi_hba_attr_s *hba_attr)
+{
+ struct bfa_fcs_lport_s *port = fdmi->ms->port;
+ struct bfa_fcs_driver_info_s *driver_info = &port->fcs->driver_info;
+ struct bfa_fcs_fdmi_port_attr_s fcs_port_attr;
+
+ memset(hba_attr, 0, sizeof(struct bfa_fcs_fdmi_hba_attr_s));
+
+ bfa_ioc_get_adapter_manufacturer(&port->fcs->bfa->ioc,
+ hba_attr->manufacturer);
+ bfa_ioc_get_adapter_serial_num(&port->fcs->bfa->ioc,
+ hba_attr->serial_num);
+ bfa_ioc_get_adapter_model(&port->fcs->bfa->ioc,
+ hba_attr->model);
+ bfa_ioc_get_adapter_model(&port->fcs->bfa->ioc,
+ hba_attr->model_desc);
+ bfa_ioc_get_pci_chip_rev(&port->fcs->bfa->ioc,
+ hba_attr->hw_version);
+ bfa_ioc_get_adapter_optrom_ver(&port->fcs->bfa->ioc,
+ hba_attr->option_rom_ver);
+ bfa_ioc_get_adapter_fw_ver(&port->fcs->bfa->ioc,
+ hba_attr->fw_version);
+
+ strncpy(hba_attr->driver_version, (char *)driver_info->version,
+ sizeof(hba_attr->driver_version));
+
+ strncpy(hba_attr->os_name, driver_info->host_os_name,
+ sizeof(hba_attr->os_name));
+
+ /*
+ * If there is a patch level, append it
+ * to the os name along with a separator
+ */
+ if (driver_info->host_os_patch[0] != '\0') {
+ strncat(hba_attr->os_name, BFA_FCS_PORT_SYMBNAME_SEPARATOR,
+ sizeof(BFA_FCS_PORT_SYMBNAME_SEPARATOR));
+ strncat(hba_attr->os_name, driver_info->host_os_patch,
+ sizeof(driver_info->host_os_patch));
+ }
+
+ /* Retrieve the max frame size from the port attr */
+ bfa_fcs_fdmi_get_portattr(fdmi, &fcs_port_attr);
+ hba_attr->max_ct_pyld = fcs_port_attr.max_frm_size;
+
+ strncpy(hba_attr->node_sym_name.symname,
+ port->port_cfg.node_sym_name.symname, BFA_SYMNAME_MAXLEN);
+ strcpy(hba_attr->vendor_info, "BROCADE");
+ hba_attr->num_ports =
+ cpu_to_be32(bfa_ioc_get_nports(&port->fcs->bfa->ioc));
+ hba_attr->fabric_name = port->fabric->lps->pr_nwwn;
+ strncpy(hba_attr->bios_ver, hba_attr->option_rom_ver, BFA_VERSION_LEN);
+
+}
+
+static void
+bfa_fcs_fdmi_get_portattr(struct bfa_fcs_lport_fdmi_s *fdmi,
+ struct bfa_fcs_fdmi_port_attr_s *port_attr)
+{
+ struct bfa_fcs_lport_s *port = fdmi->ms->port;
+ struct bfa_fcs_driver_info_s *driver_info = &port->fcs->driver_info;
+ struct bfa_port_attr_s pport_attr;
+ struct bfa_lport_attr_s lport_attr;
+
+ memset(port_attr, 0, sizeof(struct bfa_fcs_fdmi_port_attr_s));
+
+ /*
+ * get pport attributes from hal
+ */
+ bfa_fcport_get_attr(port->fcs->bfa, &pport_attr);
+
+ /*
+ * get FC4 type Bitmask
+ */
+ fc_get_fc4type_bitmask(FC_TYPE_FCP, port_attr->supp_fc4_types);
+
+ /*
+ * Supported Speeds
+ */
+ switch (pport_attr.speed_supported) {
+ case BFA_PORT_SPEED_16GBPS:
+ port_attr->supp_speed =
+ cpu_to_be32(BFA_FCS_FDMI_SUPP_SPEEDS_16G);
+ break;
+
+ case BFA_PORT_SPEED_10GBPS:
+ port_attr->supp_speed =
+ cpu_to_be32(BFA_FCS_FDMI_SUPP_SPEEDS_10G);
+ break;
+
+ case BFA_PORT_SPEED_8GBPS:
+ port_attr->supp_speed =
+ cpu_to_be32(BFA_FCS_FDMI_SUPP_SPEEDS_8G);
+ break;
+
+ case BFA_PORT_SPEED_4GBPS:
+ port_attr->supp_speed =
+ cpu_to_be32(BFA_FCS_FDMI_SUPP_SPEEDS_4G);
+ break;
+
+ default:
+ bfa_sm_fault(port->fcs, pport_attr.speed_supported);
+ }
+
+ /*
+ * Current Speed
+ */
+ port_attr->curr_speed = cpu_to_be32(
+ bfa_fcs_fdmi_convert_speed(pport_attr.speed));
+
+ /*
+ * Max PDU Size.
+ */
+ port_attr->max_frm_size = cpu_to_be32(pport_attr.pport_cfg.maxfrsize);
+
+ /*
+ * OS device Name
+ */
+ strncpy(port_attr->os_device_name, (char *)driver_info->os_device_name,
+ sizeof(port_attr->os_device_name));
+
+ /*
+ * Host name
+ */
+ strncpy(port_attr->host_name, (char *)driver_info->host_machine_name,
+ sizeof(port_attr->host_name));
+
+ port_attr->node_name = bfa_fcs_lport_get_nwwn(port);
+ port_attr->port_name = bfa_fcs_lport_get_pwwn(port);
+
+ strncpy(port_attr->port_sym_name.symname,
+ (char *)&bfa_fcs_lport_get_psym_name(port), BFA_SYMNAME_MAXLEN);
+ bfa_fcs_lport_get_attr(port, &lport_attr);
+ port_attr->port_type = cpu_to_be32(lport_attr.port_type);
+ port_attr->scos = pport_attr.cos_supported;
+ port_attr->port_fabric_name = port->fabric->lps->pr_nwwn;
+ fc_get_fc4type_bitmask(FC_TYPE_FCP, port_attr->port_act_fc4_type);
+ port_attr->port_state = cpu_to_be32(pport_attr.port_state);
+ port_attr->num_ports = cpu_to_be32(port->num_rports);
+}
+
+/*
+ * Convert BFA speed to FDMI format.
+ */
+u32
+bfa_fcs_fdmi_convert_speed(bfa_port_speed_t pport_speed)
+{
+ u32 ret;
+
+ switch (pport_speed) {
+ case BFA_PORT_SPEED_1GBPS:
+ case BFA_PORT_SPEED_2GBPS:
+ ret = pport_speed;
+ break;
+
+ case BFA_PORT_SPEED_4GBPS:
+ ret = FDMI_TRANS_SPEED_4G;
+ break;
+
+ case BFA_PORT_SPEED_8GBPS:
+ ret = FDMI_TRANS_SPEED_8G;
+ break;
+
+ case BFA_PORT_SPEED_10GBPS:
+ ret = FDMI_TRANS_SPEED_10G;
+ break;
+
+ case BFA_PORT_SPEED_16GBPS:
+ ret = FDMI_TRANS_SPEED_16G;
+ break;
+
+ default:
+ ret = FDMI_TRANS_SPEED_UNKNOWN;
+ }
+ return ret;
+}
+
+void
+bfa_fcs_lport_fdmi_init(struct bfa_fcs_lport_ms_s *ms)
+{
+ struct bfa_fcs_lport_fdmi_s *fdmi = &ms->fdmi;
+
+ fdmi->ms = ms;
+ if (ms->port->fcs->fdmi_enabled)
+ bfa_sm_set_state(fdmi, bfa_fcs_lport_fdmi_sm_offline);
+ else
+ bfa_sm_set_state(fdmi, bfa_fcs_lport_fdmi_sm_disabled);
+}
+
+void
+bfa_fcs_lport_fdmi_offline(struct bfa_fcs_lport_ms_s *ms)
+{
+ struct bfa_fcs_lport_fdmi_s *fdmi = &ms->fdmi;
+
+ fdmi->ms = ms;
+ bfa_sm_send_event(fdmi, FDMISM_EVENT_PORT_OFFLINE);
+}
+
+void
+bfa_fcs_lport_fdmi_online(struct bfa_fcs_lport_ms_s *ms)
+{
+ struct bfa_fcs_lport_fdmi_s *fdmi = &ms->fdmi;
+
+ fdmi->ms = ms;
+ bfa_sm_send_event(fdmi, FDMISM_EVENT_PORT_ONLINE);
+}
+
+#define BFA_FCS_MS_CMD_MAX_RETRIES 2
+
+/*
+ * forward declarations
+ */
+static void bfa_fcs_lport_ms_send_plogi(void *ms_cbarg,
+ struct bfa_fcxp_s *fcxp_alloced);
+static void bfa_fcs_lport_ms_timeout(void *arg);
+static void bfa_fcs_lport_ms_plogi_response(void *fcsarg,
+ struct bfa_fcxp_s *fcxp,
+ void *cbarg,
+ bfa_status_t req_status,
+ u32 rsp_len,
+ u32 resid_len,
+ struct fchs_s *rsp_fchs);
+
+static void bfa_fcs_lport_ms_send_gmal(void *ms_cbarg,
+ struct bfa_fcxp_s *fcxp_alloced);
+static void bfa_fcs_lport_ms_gmal_response(void *fcsarg,
+ struct bfa_fcxp_s *fcxp,
+ void *cbarg,
+ bfa_status_t req_status,
+ u32 rsp_len,
+ u32 resid_len,
+ struct fchs_s *rsp_fchs);
+static void bfa_fcs_lport_ms_send_gfn(void *ms_cbarg,
+ struct bfa_fcxp_s *fcxp_alloced);
+static void bfa_fcs_lport_ms_gfn_response(void *fcsarg,
+ struct bfa_fcxp_s *fcxp,
+ void *cbarg,
+ bfa_status_t req_status,
+ u32 rsp_len,
+ u32 resid_len,
+ struct fchs_s *rsp_fchs);
+/*
+ * fcs_ms_sm FCS MS state machine
+ */
+
+/*
+ * MS State Machine events
+ */
+enum port_ms_event {
+ MSSM_EVENT_PORT_ONLINE = 1,
+ MSSM_EVENT_PORT_OFFLINE = 2,
+ MSSM_EVENT_RSP_OK = 3,
+ MSSM_EVENT_RSP_ERROR = 4,
+ MSSM_EVENT_TIMEOUT = 5,
+ MSSM_EVENT_FCXP_SENT = 6,
+ MSSM_EVENT_PORT_FABRIC_RSCN = 7
+};
+
+static void bfa_fcs_lport_ms_sm_offline(struct bfa_fcs_lport_ms_s *ms,
+ enum port_ms_event event);
+static void bfa_fcs_lport_ms_sm_plogi_sending(struct bfa_fcs_lport_ms_s *ms,
+ enum port_ms_event event);
+static void bfa_fcs_lport_ms_sm_plogi(struct bfa_fcs_lport_ms_s *ms,
+ enum port_ms_event event);
+static void bfa_fcs_lport_ms_sm_plogi_retry(struct bfa_fcs_lport_ms_s *ms,
+ enum port_ms_event event);
+static void bfa_fcs_lport_ms_sm_gmal_sending(struct bfa_fcs_lport_ms_s *ms,
+ enum port_ms_event event);
+static void bfa_fcs_lport_ms_sm_gmal(struct bfa_fcs_lport_ms_s *ms,
+ enum port_ms_event event);
+static void bfa_fcs_lport_ms_sm_gmal_retry(struct bfa_fcs_lport_ms_s *ms,
+ enum port_ms_event event);
+static void bfa_fcs_lport_ms_sm_gfn_sending(struct bfa_fcs_lport_ms_s *ms,
+ enum port_ms_event event);
+static void bfa_fcs_lport_ms_sm_gfn(struct bfa_fcs_lport_ms_s *ms,
+ enum port_ms_event event);
+static void bfa_fcs_lport_ms_sm_gfn_retry(struct bfa_fcs_lport_ms_s *ms,
+ enum port_ms_event event);
+static void bfa_fcs_lport_ms_sm_online(struct bfa_fcs_lport_ms_s *ms,
+ enum port_ms_event event);
+/*
+ * Start in offline state - awaiting NS to send start.
+ */
+static void
+bfa_fcs_lport_ms_sm_offline(struct bfa_fcs_lport_ms_s *ms,
+ enum port_ms_event event)
+{
+ bfa_trc(ms->port->fcs, ms->port->port_cfg.pwwn);
+ bfa_trc(ms->port->fcs, event);
+
+ switch (event) {
+ case MSSM_EVENT_PORT_ONLINE:
+ bfa_sm_set_state(ms, bfa_fcs_lport_ms_sm_plogi_sending);
+ bfa_fcs_lport_ms_send_plogi(ms, NULL);
+ break;
+
+ case MSSM_EVENT_PORT_OFFLINE:
+ break;
+
+ default:
+ bfa_sm_fault(ms->port->fcs, event);
+ }
+}
+
+static void
+bfa_fcs_lport_ms_sm_plogi_sending(struct bfa_fcs_lport_ms_s *ms,
+ enum port_ms_event event)
+{
+ bfa_trc(ms->port->fcs, ms->port->port_cfg.pwwn);
+ bfa_trc(ms->port->fcs, event);
+
+ switch (event) {
+ case MSSM_EVENT_FCXP_SENT:
+ bfa_sm_set_state(ms, bfa_fcs_lport_ms_sm_plogi);
+ break;
+
+ case MSSM_EVENT_PORT_OFFLINE:
+ bfa_sm_set_state(ms, bfa_fcs_lport_ms_sm_offline);
+ bfa_fcxp_walloc_cancel(BFA_FCS_GET_HAL_FROM_PORT(ms->port),
+ &ms->fcxp_wqe);
+ break;
+
+ default:
+ bfa_sm_fault(ms->port->fcs, event);
+ }
+}
+
+static void
+bfa_fcs_lport_ms_sm_plogi(struct bfa_fcs_lport_ms_s *ms,
+ enum port_ms_event event)
+{
+ bfa_trc(ms->port->fcs, ms->port->port_cfg.pwwn);
+ bfa_trc(ms->port->fcs, event);
+
+ switch (event) {
+ case MSSM_EVENT_RSP_ERROR:
+ /*
+ * Start timer for a delayed retry
+ */
+ bfa_sm_set_state(ms, bfa_fcs_lport_ms_sm_plogi_retry);
+ ms->port->stats.ms_retries++;
+ bfa_timer_start(BFA_FCS_GET_HAL_FROM_PORT(ms->port),
+ &ms->timer, bfa_fcs_lport_ms_timeout, ms,
+ BFA_FCS_RETRY_TIMEOUT);
+ break;
+
+ case MSSM_EVENT_RSP_OK:
+ /*
+ * since plogi is done, now invoke MS related sub-modules
+ */
+ bfa_fcs_lport_fdmi_online(ms);
+
+ /*
+ * if this is a Vport, go to online state.
+ */
+ if (ms->port->vport) {
+ bfa_sm_set_state(ms, bfa_fcs_lport_ms_sm_online);
+ break;
+ }
+
+ /*
+ * For a base port we need to get the
+ * switch's IP address.
+ */
+ bfa_sm_set_state(ms, bfa_fcs_lport_ms_sm_gmal_sending);
+ bfa_fcs_lport_ms_send_gmal(ms, NULL);
+ break;
+
+ case MSSM_EVENT_PORT_OFFLINE:
+ bfa_sm_set_state(ms, bfa_fcs_lport_ms_sm_offline);
+ bfa_fcxp_discard(ms->fcxp);
+ break;
+
+ default:
+ bfa_sm_fault(ms->port->fcs, event);
+ }
+}
+
+static void
+bfa_fcs_lport_ms_sm_plogi_retry(struct bfa_fcs_lport_ms_s *ms,
+ enum port_ms_event event)
+{
+ bfa_trc(ms->port->fcs, ms->port->port_cfg.pwwn);
+ bfa_trc(ms->port->fcs, event);
+
+ switch (event) {
+ case MSSM_EVENT_TIMEOUT:
+ /*
+ * Retry Timer Expired. Re-send
+ */
+ bfa_sm_set_state(ms, bfa_fcs_lport_ms_sm_plogi_sending);
+ bfa_fcs_lport_ms_send_plogi(ms, NULL);
+ break;
+
+ case MSSM_EVENT_PORT_OFFLINE:
+ bfa_sm_set_state(ms, bfa_fcs_lport_ms_sm_offline);
+ bfa_timer_stop(&ms->timer);
+ break;
+
+ default:
+ bfa_sm_fault(ms->port->fcs, event);
+ }
+}
+
+static void
+bfa_fcs_lport_ms_sm_online(struct bfa_fcs_lport_ms_s *ms,
+ enum port_ms_event event)
+{
+ bfa_trc(ms->port->fcs, ms->port->port_cfg.pwwn);
+ bfa_trc(ms->port->fcs, event);
+
+ switch (event) {
+ case MSSM_EVENT_PORT_OFFLINE:
+ bfa_sm_set_state(ms, bfa_fcs_lport_ms_sm_offline);
+ break;
+
+ case MSSM_EVENT_PORT_FABRIC_RSCN:
+ bfa_sm_set_state(ms, bfa_fcs_lport_ms_sm_gfn_sending);
+ ms->retry_cnt = 0;
+ bfa_fcs_lport_ms_send_gfn(ms, NULL);
+ break;
+
+ default:
+ bfa_sm_fault(ms->port->fcs, event);
+ }
+}
+
+static void
+bfa_fcs_lport_ms_sm_gmal_sending(struct bfa_fcs_lport_ms_s *ms,
+ enum port_ms_event event)
+{
+ bfa_trc(ms->port->fcs, ms->port->port_cfg.pwwn);
+ bfa_trc(ms->port->fcs, event);
+
+ switch (event) {
+ case MSSM_EVENT_FCXP_SENT:
+ bfa_sm_set_state(ms, bfa_fcs_lport_ms_sm_gmal);
+ break;
+
+ case MSSM_EVENT_PORT_OFFLINE:
+ bfa_sm_set_state(ms, bfa_fcs_lport_ms_sm_offline);
+ bfa_fcxp_walloc_cancel(BFA_FCS_GET_HAL_FROM_PORT(ms->port),
+ &ms->fcxp_wqe);
+ break;
+
+ default:
+ bfa_sm_fault(ms->port->fcs, event);
+ }
+}
+
+static void
+bfa_fcs_lport_ms_sm_gmal(struct bfa_fcs_lport_ms_s *ms,
+ enum port_ms_event event)
+{
+ bfa_trc(ms->port->fcs, ms->port->port_cfg.pwwn);
+ bfa_trc(ms->port->fcs, event);
+
+ switch (event) {
+ case MSSM_EVENT_RSP_ERROR:
+ /*
+ * Start timer for a delayed retry
+ */
+ if (ms->retry_cnt++ < BFA_FCS_MS_CMD_MAX_RETRIES) {
+ bfa_sm_set_state(ms, bfa_fcs_lport_ms_sm_gmal_retry);
+ ms->port->stats.ms_retries++;
+ bfa_timer_start(BFA_FCS_GET_HAL_FROM_PORT(ms->port),
+ &ms->timer, bfa_fcs_lport_ms_timeout, ms,
+ BFA_FCS_RETRY_TIMEOUT);
+ } else {
+ bfa_sm_set_state(ms, bfa_fcs_lport_ms_sm_gfn_sending);
+ bfa_fcs_lport_ms_send_gfn(ms, NULL);
+ ms->retry_cnt = 0;
+ }
+ break;
+
+ case MSSM_EVENT_RSP_OK:
+ bfa_sm_set_state(ms, bfa_fcs_lport_ms_sm_gfn_sending);
+ bfa_fcs_lport_ms_send_gfn(ms, NULL);
+ break;
+
+ case MSSM_EVENT_PORT_OFFLINE:
+ bfa_sm_set_state(ms, bfa_fcs_lport_ms_sm_offline);
+ bfa_fcxp_discard(ms->fcxp);
+ break;
+
+ default:
+ bfa_sm_fault(ms->port->fcs, event);
+ }
+}
+
+static void
+bfa_fcs_lport_ms_sm_gmal_retry(struct bfa_fcs_lport_ms_s *ms,
+ enum port_ms_event event)
+{
+ bfa_trc(ms->port->fcs, ms->port->port_cfg.pwwn);
+ bfa_trc(ms->port->fcs, event);
+
+ switch (event) {
+ case MSSM_EVENT_TIMEOUT:
+ /*
+ * Retry Timer Expired. Re-send
+ */
+ bfa_sm_set_state(ms, bfa_fcs_lport_ms_sm_gmal_sending);
+ bfa_fcs_lport_ms_send_gmal(ms, NULL);
+ break;
+
+ case MSSM_EVENT_PORT_OFFLINE:
+ bfa_sm_set_state(ms, bfa_fcs_lport_ms_sm_offline);
+ bfa_timer_stop(&ms->timer);
+ break;
+
+ default:
+ bfa_sm_fault(ms->port->fcs, event);
+ }
+}
+/*
+ * ms_pvt MS local functions
+ */
+
+static void
+bfa_fcs_lport_ms_send_gmal(void *ms_cbarg, struct bfa_fcxp_s *fcxp_alloced)
+{
+ struct bfa_fcs_lport_ms_s *ms = ms_cbarg;
+ bfa_fcs_lport_t *port = ms->port;
+ struct fchs_s fchs;
+ int len;
+ struct bfa_fcxp_s *fcxp;
+
+ bfa_trc(port->fcs, port->pid);
+
+ fcxp = fcxp_alloced ? fcxp_alloced :
+ bfa_fcs_fcxp_alloc(port->fcs, BFA_TRUE);
+ if (!fcxp) {
+ bfa_fcs_fcxp_alloc_wait(port->fcs->bfa, &ms->fcxp_wqe,
+ bfa_fcs_lport_ms_send_gmal, ms, BFA_TRUE);
+ return;
+ }
+ ms->fcxp = fcxp;
+
+ len = fc_gmal_req_build(&fchs, bfa_fcxp_get_reqbuf(fcxp),
+ bfa_fcs_lport_get_fcid(port),
+ port->fabric->lps->pr_nwwn);
+
+ bfa_fcxp_send(fcxp, NULL, port->fabric->vf_id, port->lp_tag, BFA_FALSE,
+ FC_CLASS_3, len, &fchs,
+ bfa_fcs_lport_ms_gmal_response, (void *)ms,
+ FC_MAX_PDUSZ, FC_FCCT_TOV);
+
+ bfa_sm_send_event(ms, MSSM_EVENT_FCXP_SENT);
+}
+
+static void
+bfa_fcs_lport_ms_gmal_response(void *fcsarg, struct bfa_fcxp_s *fcxp,
+ void *cbarg, bfa_status_t req_status,
+ u32 rsp_len, u32 resid_len,
+ struct fchs_s *rsp_fchs)
+{
+ struct bfa_fcs_lport_ms_s *ms = (struct bfa_fcs_lport_ms_s *) cbarg;
+ bfa_fcs_lport_t *port = ms->port;
+ struct ct_hdr_s *cthdr = NULL;
+ struct fcgs_gmal_resp_s *gmal_resp;
+ struct fcgs_gmal_entry_s *gmal_entry;
+ u32 num_entries;
+ u8 *rsp_str;
+
+ bfa_trc(port->fcs, req_status);
+ bfa_trc(port->fcs, port->port_cfg.pwwn);
+
+ /*
+ * Sanity Checks
+ */
+ if (req_status != BFA_STATUS_OK) {
+ bfa_trc(port->fcs, req_status);
+ bfa_sm_send_event(ms, MSSM_EVENT_RSP_ERROR);
+ return;
+ }
+
+ cthdr = (struct ct_hdr_s *) BFA_FCXP_RSP_PLD(fcxp);
+ cthdr->cmd_rsp_code = be16_to_cpu(cthdr->cmd_rsp_code);
+
+ if (cthdr->cmd_rsp_code == CT_RSP_ACCEPT) {
+ gmal_resp = (struct fcgs_gmal_resp_s *)(cthdr + 1);
+
+ num_entries = be32_to_cpu(gmal_resp->ms_len);
+ if (num_entries == 0) {
+ bfa_sm_send_event(ms, MSSM_EVENT_RSP_ERROR);
+ return;
+ }
+ /*
+ * The response could contain multiple Entries.
+ * Entries for SNMP interface, etc.
+ * We look for the entry with a telnet prefix.
+ * First "http://" entry refers to IP addr
+ */
+
+ gmal_entry = (struct fcgs_gmal_entry_s *)gmal_resp->ms_ma;
+ while (num_entries > 0) {
+ if (strncmp(gmal_entry->prefix,
+ CT_GMAL_RESP_PREFIX_HTTP,
+ sizeof(gmal_entry->prefix)) == 0) {
+
+ /*
+ * if the IP address is terminating with a '/',
+ * remove it.
+ * Byte 0 consists of the length of the string.
+ */
+ rsp_str = &(gmal_entry->prefix[0]);
+ if (rsp_str[gmal_entry->len-1] == '/')
+ rsp_str[gmal_entry->len-1] = 0;
+
+ /* copy IP Address to fabric */
+ strncpy(bfa_fcs_lport_get_fabric_ipaddr(port),
+ gmal_entry->ip_addr,
+ BFA_FCS_FABRIC_IPADDR_SZ);
+ break;
+ } else {
+ --num_entries;
+ ++gmal_entry;
+ }
+ }
+
+ bfa_sm_send_event(ms, MSSM_EVENT_RSP_OK);
+ return;
+ }
+
+ bfa_trc(port->fcs, cthdr->reason_code);
+ bfa_trc(port->fcs, cthdr->exp_code);
+ bfa_sm_send_event(ms, MSSM_EVENT_RSP_ERROR);
+}
+
+static void
+bfa_fcs_lport_ms_sm_gfn_sending(struct bfa_fcs_lport_ms_s *ms,
+ enum port_ms_event event)
+{
+ bfa_trc(ms->port->fcs, ms->port->port_cfg.pwwn);
+ bfa_trc(ms->port->fcs, event);
+
+ switch (event) {
+ case MSSM_EVENT_FCXP_SENT:
+ bfa_sm_set_state(ms, bfa_fcs_lport_ms_sm_gfn);
+ break;
+
+ case MSSM_EVENT_PORT_OFFLINE:
+ bfa_sm_set_state(ms, bfa_fcs_lport_ms_sm_offline);
+ bfa_fcxp_walloc_cancel(BFA_FCS_GET_HAL_FROM_PORT(ms->port),
+ &ms->fcxp_wqe);
+ break;
+
+ default:
+ bfa_sm_fault(ms->port->fcs, event);
+ }
+}
+
+static void
+bfa_fcs_lport_ms_sm_gfn(struct bfa_fcs_lport_ms_s *ms,
+ enum port_ms_event event)
+{
+ bfa_trc(ms->port->fcs, ms->port->port_cfg.pwwn);
+ bfa_trc(ms->port->fcs, event);
+
+ switch (event) {
+ case MSSM_EVENT_RSP_ERROR:
+ /*
+ * Start timer for a delayed retry
+ */
+ if (ms->retry_cnt++ < BFA_FCS_MS_CMD_MAX_RETRIES) {
+ bfa_sm_set_state(ms, bfa_fcs_lport_ms_sm_gfn_retry);
+ ms->port->stats.ms_retries++;
+ bfa_timer_start(BFA_FCS_GET_HAL_FROM_PORT(ms->port),
+ &ms->timer, bfa_fcs_lport_ms_timeout, ms,
+ BFA_FCS_RETRY_TIMEOUT);
+ } else {
+ bfa_sm_set_state(ms, bfa_fcs_lport_ms_sm_online);
+ ms->retry_cnt = 0;
+ }
+ break;
+
+ case MSSM_EVENT_RSP_OK:
+ bfa_sm_set_state(ms, bfa_fcs_lport_ms_sm_online);
+ break;
+
+ case MSSM_EVENT_PORT_OFFLINE:
+ bfa_sm_set_state(ms, bfa_fcs_lport_ms_sm_offline);
+ bfa_fcxp_discard(ms->fcxp);
+ break;
+
+ default:
+ bfa_sm_fault(ms->port->fcs, event);
+ }
+}
+
+static void
+bfa_fcs_lport_ms_sm_gfn_retry(struct bfa_fcs_lport_ms_s *ms,
+ enum port_ms_event event)
+{
+ bfa_trc(ms->port->fcs, ms->port->port_cfg.pwwn);
+ bfa_trc(ms->port->fcs, event);
+
+ switch (event) {
+ case MSSM_EVENT_TIMEOUT:
+ /*
+ * Retry Timer Expired. Re-send
+ */
+ bfa_sm_set_state(ms, bfa_fcs_lport_ms_sm_gfn_sending);
+ bfa_fcs_lport_ms_send_gfn(ms, NULL);
+ break;
+
+ case MSSM_EVENT_PORT_OFFLINE:
+ bfa_sm_set_state(ms, bfa_fcs_lport_ms_sm_offline);
+ bfa_timer_stop(&ms->timer);
+ break;
+
+ default:
+ bfa_sm_fault(ms->port->fcs, event);
+ }
+}
+/*
+ * ms_pvt MS local functions
+ */
+
+static void
+bfa_fcs_lport_ms_send_gfn(void *ms_cbarg, struct bfa_fcxp_s *fcxp_alloced)
+{
+ struct bfa_fcs_lport_ms_s *ms = ms_cbarg;
+ bfa_fcs_lport_t *port = ms->port;
+ struct fchs_s fchs;
+ int len;
+ struct bfa_fcxp_s *fcxp;
+
+ bfa_trc(port->fcs, port->pid);
+
+ fcxp = fcxp_alloced ? fcxp_alloced :
+ bfa_fcs_fcxp_alloc(port->fcs, BFA_TRUE);
+ if (!fcxp) {
+ bfa_fcs_fcxp_alloc_wait(port->fcs->bfa, &ms->fcxp_wqe,
+ bfa_fcs_lport_ms_send_gfn, ms, BFA_TRUE);
+ return;
+ }
+ ms->fcxp = fcxp;
+
+ len = fc_gfn_req_build(&fchs, bfa_fcxp_get_reqbuf(fcxp),
+ bfa_fcs_lport_get_fcid(port),
+ port->fabric->lps->pr_nwwn);
+
+ bfa_fcxp_send(fcxp, NULL, port->fabric->vf_id, port->lp_tag, BFA_FALSE,
+ FC_CLASS_3, len, &fchs,
+ bfa_fcs_lport_ms_gfn_response, (void *)ms,
+ FC_MAX_PDUSZ, FC_FCCT_TOV);
+
+ bfa_sm_send_event(ms, MSSM_EVENT_FCXP_SENT);
+}
+
+static void
+bfa_fcs_lport_ms_gfn_response(void *fcsarg, struct bfa_fcxp_s *fcxp,
+ void *cbarg, bfa_status_t req_status, u32 rsp_len,
+ u32 resid_len, struct fchs_s *rsp_fchs)
+{
+ struct bfa_fcs_lport_ms_s *ms = (struct bfa_fcs_lport_ms_s *) cbarg;
+ bfa_fcs_lport_t *port = ms->port;
+ struct ct_hdr_s *cthdr = NULL;
+ wwn_t *gfn_resp;
+
+ bfa_trc(port->fcs, req_status);
+ bfa_trc(port->fcs, port->port_cfg.pwwn);
+
+ /*
+ * Sanity Checks
+ */
+ if (req_status != BFA_STATUS_OK) {
+ bfa_trc(port->fcs, req_status);
+ bfa_sm_send_event(ms, MSSM_EVENT_RSP_ERROR);
+ return;
+ }
+
+ cthdr = (struct ct_hdr_s *) BFA_FCXP_RSP_PLD(fcxp);
+ cthdr->cmd_rsp_code = be16_to_cpu(cthdr->cmd_rsp_code);
+
+ if (cthdr->cmd_rsp_code == CT_RSP_ACCEPT) {
+ gfn_resp = (wwn_t *)(cthdr + 1);
+ /* check if it has actually changed */
+ if ((memcmp((void *)&bfa_fcs_lport_get_fabric_name(port),
+ gfn_resp, sizeof(wwn_t)) != 0)) {
+ bfa_fcs_fabric_set_fabric_name(port->fabric, *gfn_resp);
+ }
+ bfa_sm_send_event(ms, MSSM_EVENT_RSP_OK);
+ return;
+ }
+
+ bfa_trc(port->fcs, cthdr->reason_code);
+ bfa_trc(port->fcs, cthdr->exp_code);
+ bfa_sm_send_event(ms, MSSM_EVENT_RSP_ERROR);
+}
+
+/*
+ * ms_pvt MS local functions
+ */
+
+static void
+bfa_fcs_lport_ms_send_plogi(void *ms_cbarg, struct bfa_fcxp_s *fcxp_alloced)
+{
+ struct bfa_fcs_lport_ms_s *ms = ms_cbarg;
+ struct bfa_fcs_lport_s *port = ms->port;
+ struct fchs_s fchs;
+ int len;
+ struct bfa_fcxp_s *fcxp;
+
+ bfa_trc(port->fcs, port->pid);
+
+ fcxp = fcxp_alloced ? fcxp_alloced :
+ bfa_fcs_fcxp_alloc(port->fcs, BFA_TRUE);
+ if (!fcxp) {
+ port->stats.ms_plogi_alloc_wait++;
+ bfa_fcs_fcxp_alloc_wait(port->fcs->bfa, &ms->fcxp_wqe,
+ bfa_fcs_lport_ms_send_plogi, ms, BFA_TRUE);
+ return;
+ }
+ ms->fcxp = fcxp;
+
+ len = fc_plogi_build(&fchs, bfa_fcxp_get_reqbuf(fcxp),
+ bfa_hton3b(FC_MGMT_SERVER),
+ bfa_fcs_lport_get_fcid(port), 0,
+ port->port_cfg.pwwn, port->port_cfg.nwwn,
+ bfa_fcport_get_maxfrsize(port->fcs->bfa),
+ bfa_fcport_get_rx_bbcredit(port->fcs->bfa));
+
+ bfa_fcxp_send(fcxp, NULL, port->fabric->vf_id, port->lp_tag, BFA_FALSE,
+ FC_CLASS_3, len, &fchs,
+ bfa_fcs_lport_ms_plogi_response, (void *)ms,
+ FC_MAX_PDUSZ, FC_ELS_TOV);
+
+ port->stats.ms_plogi_sent++;
+ bfa_sm_send_event(ms, MSSM_EVENT_FCXP_SENT);
+}
+
+static void
+bfa_fcs_lport_ms_plogi_response(void *fcsarg, struct bfa_fcxp_s *fcxp,
+ void *cbarg, bfa_status_t req_status,
+ u32 rsp_len, u32 resid_len, struct fchs_s *rsp_fchs)
+{
+ struct bfa_fcs_lport_ms_s *ms = (struct bfa_fcs_lport_ms_s *) cbarg;
+ struct bfa_fcs_lport_s *port = ms->port;
+ struct fc_els_cmd_s *els_cmd;
+ struct fc_ls_rjt_s *ls_rjt;
+
+ bfa_trc(port->fcs, req_status);
+ bfa_trc(port->fcs, port->port_cfg.pwwn);
+
+ /*
+ * Sanity Checks
+ */
+ if (req_status != BFA_STATUS_OK) {
+ port->stats.ms_plogi_rsp_err++;
+ bfa_trc(port->fcs, req_status);
+ bfa_sm_send_event(ms, MSSM_EVENT_RSP_ERROR);
+ return;
+ }
+
+ els_cmd = (struct fc_els_cmd_s *) BFA_FCXP_RSP_PLD(fcxp);
+
+ switch (els_cmd->els_code) {
+
+ case FC_ELS_ACC:
+ if (rsp_len < sizeof(struct fc_logi_s)) {
+ bfa_trc(port->fcs, rsp_len);
+ port->stats.ms_plogi_acc_err++;
+ bfa_sm_send_event(ms, MSSM_EVENT_RSP_ERROR);
+ break;
+ }
+ port->stats.ms_plogi_accepts++;
+ bfa_sm_send_event(ms, MSSM_EVENT_RSP_OK);
+ break;
+
+ case FC_ELS_LS_RJT:
+ ls_rjt = (struct fc_ls_rjt_s *) BFA_FCXP_RSP_PLD(fcxp);
+
+ bfa_trc(port->fcs, ls_rjt->reason_code);
+ bfa_trc(port->fcs, ls_rjt->reason_code_expl);
+
+ port->stats.ms_rejects++;
+ bfa_sm_send_event(ms, MSSM_EVENT_RSP_ERROR);
+ break;
+
+ default:
+ port->stats.ms_plogi_unknown_rsp++;
+ bfa_trc(port->fcs, els_cmd->els_code);
+ bfa_sm_send_event(ms, MSSM_EVENT_RSP_ERROR);
+ }
+}
+
+static void
+bfa_fcs_lport_ms_timeout(void *arg)
+{
+ struct bfa_fcs_lport_ms_s *ms = (struct bfa_fcs_lport_ms_s *) arg;
+
+ ms->port->stats.ms_timeouts++;
+ bfa_sm_send_event(ms, MSSM_EVENT_TIMEOUT);
+}
+
+
+void
+bfa_fcs_lport_ms_init(struct bfa_fcs_lport_s *port)
+{
+ struct bfa_fcs_lport_ms_s *ms = BFA_FCS_GET_MS_FROM_PORT(port);
+
+ ms->port = port;
+ bfa_sm_set_state(ms, bfa_fcs_lport_ms_sm_offline);
+
+ /*
+ * Invoke init routines of sub modules.
+ */
+ bfa_fcs_lport_fdmi_init(ms);
+}
+
+void
+bfa_fcs_lport_ms_offline(struct bfa_fcs_lport_s *port)
+{
+ struct bfa_fcs_lport_ms_s *ms = BFA_FCS_GET_MS_FROM_PORT(port);
+
+ ms->port = port;
+ bfa_sm_send_event(ms, MSSM_EVENT_PORT_OFFLINE);
+ bfa_fcs_lport_fdmi_offline(ms);
+}
+
+void
+bfa_fcs_lport_ms_online(struct bfa_fcs_lport_s *port)
+{
+ struct bfa_fcs_lport_ms_s *ms = BFA_FCS_GET_MS_FROM_PORT(port);
+
+ ms->port = port;
+ bfa_sm_send_event(ms, MSSM_EVENT_PORT_ONLINE);
+}
+void
+bfa_fcs_lport_ms_fabric_rscn(struct bfa_fcs_lport_s *port)
+{
+ struct bfa_fcs_lport_ms_s *ms = BFA_FCS_GET_MS_FROM_PORT(port);
+
+ /* todo. Handle this only when in Online state */
+ if (bfa_sm_cmp_state(ms, bfa_fcs_lport_ms_sm_online))
+ bfa_sm_send_event(ms, MSSM_EVENT_PORT_FABRIC_RSCN);
+}
+
+/*
+ * @page ns_sm_info VPORT NS State Machine
+ *
+ * @section ns_sm_interactions VPORT NS State Machine Interactions
+ *
+ * @section ns_sm VPORT NS State Machine
+ * img ns_sm.jpg
+ */
+
+/*
+ * forward declarations
+ */
+static void bfa_fcs_lport_ns_send_plogi(void *ns_cbarg,
+ struct bfa_fcxp_s *fcxp_alloced);
+static void bfa_fcs_lport_ns_send_rspn_id(void *ns_cbarg,
+ struct bfa_fcxp_s *fcxp_alloced);
+static void bfa_fcs_lport_ns_send_rft_id(void *ns_cbarg,
+ struct bfa_fcxp_s *fcxp_alloced);
+static void bfa_fcs_lport_ns_send_rff_id(void *ns_cbarg,
+ struct bfa_fcxp_s *fcxp_alloced);
+static void bfa_fcs_lport_ns_send_gid_ft(void *ns_cbarg,
+ struct bfa_fcxp_s *fcxp_alloced);
+static void bfa_fcs_lport_ns_send_rnn_id(void *ns_cbarg,
+ struct bfa_fcxp_s *fcxp_alloced);
+static void bfa_fcs_lport_ns_send_rsnn_nn(void *ns_cbarg,
+ struct bfa_fcxp_s *fcxp_alloced);
+static void bfa_fcs_lport_ns_timeout(void *arg);
+static void bfa_fcs_lport_ns_plogi_response(void *fcsarg,
+ struct bfa_fcxp_s *fcxp,
+ void *cbarg,
+ bfa_status_t req_status,
+ u32 rsp_len,
+ u32 resid_len,
+ struct fchs_s *rsp_fchs);
+static void bfa_fcs_lport_ns_rspn_id_response(void *fcsarg,
+ struct bfa_fcxp_s *fcxp,
+ void *cbarg,
+ bfa_status_t req_status,
+ u32 rsp_len,
+ u32 resid_len,
+ struct fchs_s *rsp_fchs);
+static void bfa_fcs_lport_ns_rft_id_response(void *fcsarg,
+ struct bfa_fcxp_s *fcxp,
+ void *cbarg,
+ bfa_status_t req_status,
+ u32 rsp_len,
+ u32 resid_len,
+ struct fchs_s *rsp_fchs);
+static void bfa_fcs_lport_ns_rff_id_response(void *fcsarg,
+ struct bfa_fcxp_s *fcxp,
+ void *cbarg,
+ bfa_status_t req_status,
+ u32 rsp_len,
+ u32 resid_len,
+ struct fchs_s *rsp_fchs);
+static void bfa_fcs_lport_ns_gid_ft_response(void *fcsarg,
+ struct bfa_fcxp_s *fcxp,
+ void *cbarg,
+ bfa_status_t req_status,
+ u32 rsp_len,
+ u32 resid_len,
+ struct fchs_s *rsp_fchs);
+static void bfa_fcs_lport_ns_rnn_id_response(void *fcsarg,
+ struct bfa_fcxp_s *fcxp,
+ void *cbarg,
+ bfa_status_t req_status,
+ u32 rsp_len,
+ u32 resid_len,
+ struct fchs_s *rsp_fchs);
+static void bfa_fcs_lport_ns_rsnn_nn_response(void *fcsarg,
+ struct bfa_fcxp_s *fcxp,
+ void *cbarg,
+ bfa_status_t req_status,
+ u32 rsp_len,
+ u32 resid_len,
+ struct fchs_s *rsp_fchs);
+static void bfa_fcs_lport_ns_process_gidft_pids(
+ struct bfa_fcs_lport_s *port,
+ u32 *pid_buf, u32 n_pids);
+
+static void bfa_fcs_lport_ns_boot_target_disc(bfa_fcs_lport_t *port);
+/*
+ * fcs_ns_sm FCS nameserver interface state machine
+ */
+
+/*
+ * VPort NS State Machine events
+ */
+enum vport_ns_event {
+ NSSM_EVENT_PORT_ONLINE = 1,
+ NSSM_EVENT_PORT_OFFLINE = 2,
+ NSSM_EVENT_PLOGI_SENT = 3,
+ NSSM_EVENT_RSP_OK = 4,
+ NSSM_EVENT_RSP_ERROR = 5,
+ NSSM_EVENT_TIMEOUT = 6,
+ NSSM_EVENT_NS_QUERY = 7,
+ NSSM_EVENT_RSPNID_SENT = 8,
+ NSSM_EVENT_RFTID_SENT = 9,
+ NSSM_EVENT_RFFID_SENT = 10,
+ NSSM_EVENT_GIDFT_SENT = 11,
+ NSSM_EVENT_RNNID_SENT = 12,
+ NSSM_EVENT_RSNN_NN_SENT = 13,
+};
+
+static void bfa_fcs_lport_ns_sm_offline(struct bfa_fcs_lport_ns_s *ns,
+ enum vport_ns_event event);
+static void bfa_fcs_lport_ns_sm_plogi_sending(struct bfa_fcs_lport_ns_s *ns,
+ enum vport_ns_event event);
+static void bfa_fcs_lport_ns_sm_plogi(struct bfa_fcs_lport_ns_s *ns,
+ enum vport_ns_event event);
+static void bfa_fcs_lport_ns_sm_plogi_retry(struct bfa_fcs_lport_ns_s *ns,
+ enum vport_ns_event event);
+static void bfa_fcs_lport_ns_sm_sending_rspn_id(
+ struct bfa_fcs_lport_ns_s *ns,
+ enum vport_ns_event event);
+static void bfa_fcs_lport_ns_sm_rspn_id(struct bfa_fcs_lport_ns_s *ns,
+ enum vport_ns_event event);
+static void bfa_fcs_lport_ns_sm_rspn_id_retry(struct bfa_fcs_lport_ns_s *ns,
+ enum vport_ns_event event);
+static void bfa_fcs_lport_ns_sm_sending_rft_id(
+ struct bfa_fcs_lport_ns_s *ns,
+ enum vport_ns_event event);
+static void bfa_fcs_lport_ns_sm_rft_id_retry(struct bfa_fcs_lport_ns_s *ns,
+ enum vport_ns_event event);
+static void bfa_fcs_lport_ns_sm_rft_id(struct bfa_fcs_lport_ns_s *ns,
+ enum vport_ns_event event);
+static void bfa_fcs_lport_ns_sm_sending_rff_id(
+ struct bfa_fcs_lport_ns_s *ns,
+ enum vport_ns_event event);
+static void bfa_fcs_lport_ns_sm_rff_id_retry(struct bfa_fcs_lport_ns_s *ns,
+ enum vport_ns_event event);
+static void bfa_fcs_lport_ns_sm_rff_id(struct bfa_fcs_lport_ns_s *ns,
+ enum vport_ns_event event);
+static void bfa_fcs_lport_ns_sm_sending_gid_ft(
+ struct bfa_fcs_lport_ns_s *ns,
+ enum vport_ns_event event);
+static void bfa_fcs_lport_ns_sm_gid_ft(struct bfa_fcs_lport_ns_s *ns,
+ enum vport_ns_event event);
+static void bfa_fcs_lport_ns_sm_gid_ft_retry(struct bfa_fcs_lport_ns_s *ns,
+ enum vport_ns_event event);
+static void bfa_fcs_lport_ns_sm_online(struct bfa_fcs_lport_ns_s *ns,
+ enum vport_ns_event event);
+static void bfa_fcs_lport_ns_sm_sending_rnn_id(
+ struct bfa_fcs_lport_ns_s *ns,
+ enum vport_ns_event event);
+static void bfa_fcs_lport_ns_sm_rnn_id(struct bfa_fcs_lport_ns_s *ns,
+ enum vport_ns_event event);
+static void bfa_fcs_lport_ns_sm_rnn_id_retry(struct bfa_fcs_lport_ns_s *ns,
+ enum vport_ns_event event);
+static void bfa_fcs_lport_ns_sm_sending_rsnn_nn(
+ struct bfa_fcs_lport_ns_s *ns,
+ enum vport_ns_event event);
+static void bfa_fcs_lport_ns_sm_rsnn_nn(struct bfa_fcs_lport_ns_s *ns,
+ enum vport_ns_event event);
+static void bfa_fcs_lport_ns_sm_rsnn_nn_retry(
+ struct bfa_fcs_lport_ns_s *ns,
+ enum vport_ns_event event);
+/*
+ * Start in offline state - awaiting linkup
+ */
+static void
+bfa_fcs_lport_ns_sm_offline(struct bfa_fcs_lport_ns_s *ns,
+ enum vport_ns_event event)
+{
+ bfa_trc(ns->port->fcs, ns->port->port_cfg.pwwn);
+ bfa_trc(ns->port->fcs, event);
+
+ switch (event) {
+ case NSSM_EVENT_PORT_ONLINE:
+ bfa_sm_set_state(ns, bfa_fcs_lport_ns_sm_plogi_sending);
+ bfa_fcs_lport_ns_send_plogi(ns, NULL);
+ break;
+
+ case NSSM_EVENT_PORT_OFFLINE:
+ break;
+
+ default:
+ bfa_sm_fault(ns->port->fcs, event);
+ }
+}
+
+static void
+bfa_fcs_lport_ns_sm_plogi_sending(struct bfa_fcs_lport_ns_s *ns,
+ enum vport_ns_event event)
+{
+ bfa_trc(ns->port->fcs, ns->port->port_cfg.pwwn);
+ bfa_trc(ns->port->fcs, event);
+
+ switch (event) {
+ case NSSM_EVENT_PLOGI_SENT:
+ bfa_sm_set_state(ns, bfa_fcs_lport_ns_sm_plogi);
+ break;
+
+ case NSSM_EVENT_PORT_OFFLINE:
+ bfa_sm_set_state(ns, bfa_fcs_lport_ns_sm_offline);
+ bfa_fcxp_walloc_cancel(BFA_FCS_GET_HAL_FROM_PORT(ns->port),
+ &ns->fcxp_wqe);
+ break;
+
+ default:
+ bfa_sm_fault(ns->port->fcs, event);
+ }
+}
+
+static void
+bfa_fcs_lport_ns_sm_plogi(struct bfa_fcs_lport_ns_s *ns,
+ enum vport_ns_event event)
+{
+ bfa_trc(ns->port->fcs, ns->port->port_cfg.pwwn);
+ bfa_trc(ns->port->fcs, event);
+
+ switch (event) {
+ case NSSM_EVENT_RSP_ERROR:
+ /*
+ * Start timer for a delayed retry
+ */
+ bfa_sm_set_state(ns, bfa_fcs_lport_ns_sm_plogi_retry);
+ ns->port->stats.ns_retries++;
+ bfa_timer_start(BFA_FCS_GET_HAL_FROM_PORT(ns->port),
+ &ns->timer, bfa_fcs_lport_ns_timeout, ns,
+ BFA_FCS_RETRY_TIMEOUT);
+ break;
+
+ case NSSM_EVENT_RSP_OK:
+ bfa_sm_set_state(ns, bfa_fcs_lport_ns_sm_sending_rnn_id);
+ ns->num_rnnid_retries = 0;
+ bfa_fcs_lport_ns_send_rnn_id(ns, NULL);
+ break;
+
+ case NSSM_EVENT_PORT_OFFLINE:
+ bfa_sm_set_state(ns, bfa_fcs_lport_ns_sm_offline);
+ bfa_fcxp_discard(ns->fcxp);
+ break;
+
+ default:
+ bfa_sm_fault(ns->port->fcs, event);
+ }
+}
+
+static void
+bfa_fcs_lport_ns_sm_plogi_retry(struct bfa_fcs_lport_ns_s *ns,
+ enum vport_ns_event event)
+{
+ bfa_trc(ns->port->fcs, ns->port->port_cfg.pwwn);
+ bfa_trc(ns->port->fcs, event);
+
+ switch (event) {
+ case NSSM_EVENT_TIMEOUT:
+ /*
+ * Retry Timer Expired. Re-send
+ */
+ bfa_sm_set_state(ns, bfa_fcs_lport_ns_sm_plogi_sending);
+ bfa_fcs_lport_ns_send_plogi(ns, NULL);
+ break;
+
+ case NSSM_EVENT_PORT_OFFLINE:
+ bfa_sm_set_state(ns, bfa_fcs_lport_ns_sm_offline);
+ bfa_timer_stop(&ns->timer);
+ break;
+
+ default:
+ bfa_sm_fault(ns->port->fcs, event);
+ }
+}
+
+static void
+bfa_fcs_lport_ns_sm_sending_rnn_id(struct bfa_fcs_lport_ns_s *ns,
+ enum vport_ns_event event)
+{
+ bfa_trc(ns->port->fcs, ns->port->port_cfg.pwwn);
+ bfa_trc(ns->port->fcs, event);
+
+ switch (event) {
+ case NSSM_EVENT_RNNID_SENT:
+ bfa_sm_set_state(ns, bfa_fcs_lport_ns_sm_rnn_id);
+ break;
+
+ case NSSM_EVENT_PORT_OFFLINE:
+ bfa_sm_set_state(ns, bfa_fcs_lport_ns_sm_offline);
+ bfa_fcxp_walloc_cancel(BFA_FCS_GET_HAL_FROM_PORT(ns->port),
+ &ns->fcxp_wqe);
+ break;
+ default:
+ bfa_sm_fault(ns->port->fcs, event);
+ }
+}
+
+static void
+bfa_fcs_lport_ns_sm_rnn_id(struct bfa_fcs_lport_ns_s *ns,
+ enum vport_ns_event event)
+{
+ bfa_trc(ns->port->fcs, ns->port->port_cfg.pwwn);
+ bfa_trc(ns->port->fcs, event);
+
+ switch (event) {
+ case NSSM_EVENT_RSP_OK:
+ bfa_sm_set_state(ns, bfa_fcs_lport_ns_sm_sending_rsnn_nn);
+ ns->num_rnnid_retries = 0;
+ ns->num_rsnn_nn_retries = 0;
+ bfa_fcs_lport_ns_send_rsnn_nn(ns, NULL);
+ break;
+
+ case NSSM_EVENT_RSP_ERROR:
+ if (ns->num_rnnid_retries < BFA_FCS_MAX_NS_RETRIES) {
+ bfa_sm_set_state(ns, bfa_fcs_lport_ns_sm_rnn_id_retry);
+ ns->port->stats.ns_retries++;
+ ns->num_rnnid_retries++;
+ bfa_timer_start(BFA_FCS_GET_HAL_FROM_PORT(ns->port),
+ &ns->timer, bfa_fcs_lport_ns_timeout, ns,
+ BFA_FCS_RETRY_TIMEOUT);
+ } else {
+ bfa_sm_set_state(ns,
+ bfa_fcs_lport_ns_sm_sending_rspn_id);
+ bfa_fcs_lport_ns_send_rspn_id(ns, NULL);
+ }
+ break;
+
+ case NSSM_EVENT_PORT_OFFLINE:
+ bfa_fcxp_discard(ns->fcxp);
+ bfa_sm_set_state(ns, bfa_fcs_lport_ns_sm_offline);
+ break;
+
+ default:
+ bfa_sm_fault(ns->port->fcs, event);
+ }
+}
+
+static void
+bfa_fcs_lport_ns_sm_rnn_id_retry(struct bfa_fcs_lport_ns_s *ns,
+ enum vport_ns_event event)
+{
+ bfa_trc(ns->port->fcs, ns->port->port_cfg.pwwn);
+ bfa_trc(ns->port->fcs, event);
+
+ switch (event) {
+ case NSSM_EVENT_TIMEOUT:
+ bfa_sm_set_state(ns, bfa_fcs_lport_ns_sm_sending_rnn_id);
+ bfa_fcs_lport_ns_send_rnn_id(ns, NULL);
+ break;
+
+ case NSSM_EVENT_PORT_OFFLINE:
+ bfa_sm_set_state(ns, bfa_fcs_lport_ns_sm_offline);
+ bfa_timer_stop(&ns->timer);
+ break;
+
+ default:
+ bfa_sm_fault(ns->port->fcs, event);
+ }
+}
+
+static void
+bfa_fcs_lport_ns_sm_sending_rsnn_nn(struct bfa_fcs_lport_ns_s *ns,
+ enum vport_ns_event event)
+{
+ bfa_trc(ns->port->fcs, ns->port->port_cfg.pwwn);
+ bfa_trc(ns->port->fcs, event);
+
+ switch (event) {
+ case NSSM_EVENT_RSNN_NN_SENT:
+ bfa_sm_set_state(ns, bfa_fcs_lport_ns_sm_rsnn_nn);
+ break;
+
+ case NSSM_EVENT_PORT_OFFLINE:
+ bfa_sm_set_state(ns, bfa_fcs_lport_ns_sm_offline);
+ bfa_fcxp_walloc_cancel(BFA_FCS_GET_HAL_FROM_PORT(ns->port),
+ &ns->fcxp_wqe);
+ break;
+
+ default:
+ bfa_sm_fault(ns->port->fcs, event);
+ }
+}
+
+static void
+bfa_fcs_lport_ns_sm_rsnn_nn(struct bfa_fcs_lport_ns_s *ns,
+ enum vport_ns_event event)
+{
+ bfa_trc(ns->port->fcs, ns->port->port_cfg.pwwn);
+ bfa_trc(ns->port->fcs, event);
+
+ switch (event) {
+ case NSSM_EVENT_RSP_OK:
+ bfa_sm_set_state(ns, bfa_fcs_lport_ns_sm_sending_rspn_id);
+ ns->num_rsnn_nn_retries = 0;
+ bfa_fcs_lport_ns_send_rspn_id(ns, NULL);
+ break;
+
+ case NSSM_EVENT_RSP_ERROR:
+ if (ns->num_rsnn_nn_retries < BFA_FCS_MAX_NS_RETRIES) {
+ bfa_sm_set_state(ns, bfa_fcs_lport_ns_sm_rsnn_nn_retry);
+ ns->port->stats.ns_retries++;
+ ns->num_rsnn_nn_retries++;
+ bfa_timer_start(BFA_FCS_GET_HAL_FROM_PORT(ns->port),
+ &ns->timer, bfa_fcs_lport_ns_timeout,
+ ns, BFA_FCS_RETRY_TIMEOUT);
+ } else {
+ bfa_sm_set_state(ns,
+ bfa_fcs_lport_ns_sm_sending_rspn_id);
+ bfa_fcs_lport_ns_send_rspn_id(ns, NULL);
+ }
+ break;
+
+ case NSSM_EVENT_PORT_OFFLINE:
+ bfa_sm_set_state(ns, bfa_fcs_lport_ns_sm_offline);
+ bfa_fcxp_discard(ns->fcxp);
+ break;
+
+ default:
+ bfa_sm_fault(ns->port->fcs, event);
+ }
+}
+
+static void
+bfa_fcs_lport_ns_sm_rsnn_nn_retry(struct bfa_fcs_lport_ns_s *ns,
+ enum vport_ns_event event)
+{
+ bfa_trc(ns->port->fcs, ns->port->port_cfg.pwwn);
+ bfa_trc(ns->port->fcs, event);
+
+ switch (event) {
+ case NSSM_EVENT_TIMEOUT:
+ bfa_sm_set_state(ns, bfa_fcs_lport_ns_sm_sending_rsnn_nn);
+ bfa_fcs_lport_ns_send_rsnn_nn(ns, NULL);
+ break;
+
+ case NSSM_EVENT_PORT_OFFLINE:
+ bfa_sm_set_state(ns, bfa_fcs_lport_ns_sm_offline);
+ bfa_timer_stop(&ns->timer);
+ break;
+
+ default:
+ bfa_sm_fault(ns->port->fcs, event);
+ }
+}
+
+static void
+bfa_fcs_lport_ns_sm_sending_rspn_id(struct bfa_fcs_lport_ns_s *ns,
+ enum vport_ns_event event)
+{
+ bfa_trc(ns->port->fcs, ns->port->port_cfg.pwwn);
+ bfa_trc(ns->port->fcs, event);
+
+ switch (event) {
+ case NSSM_EVENT_RSPNID_SENT:
+ bfa_sm_set_state(ns, bfa_fcs_lport_ns_sm_rspn_id);
+ break;
+
+ case NSSM_EVENT_PORT_OFFLINE:
+ bfa_sm_set_state(ns, bfa_fcs_lport_ns_sm_offline);
+ bfa_fcxp_walloc_cancel(BFA_FCS_GET_HAL_FROM_PORT(ns->port),
+ &ns->fcxp_wqe);
+ break;
+
+ default:
+ bfa_sm_fault(ns->port->fcs, event);
+ }
+}
+
+static void
+bfa_fcs_lport_ns_sm_rspn_id(struct bfa_fcs_lport_ns_s *ns,
+ enum vport_ns_event event)
+{
+ bfa_trc(ns->port->fcs, ns->port->port_cfg.pwwn);
+ bfa_trc(ns->port->fcs, event);
+
+ switch (event) {
+ case NSSM_EVENT_RSP_ERROR:
+ /*
+ * Start timer for a delayed retry
+ */
+ bfa_sm_set_state(ns, bfa_fcs_lport_ns_sm_rspn_id_retry);
+ ns->port->stats.ns_retries++;
+ bfa_timer_start(BFA_FCS_GET_HAL_FROM_PORT(ns->port),
+ &ns->timer, bfa_fcs_lport_ns_timeout, ns,
+ BFA_FCS_RETRY_TIMEOUT);
+ break;
+
+ case NSSM_EVENT_RSP_OK:
+ bfa_sm_set_state(ns, bfa_fcs_lport_ns_sm_sending_rft_id);
+ bfa_fcs_lport_ns_send_rft_id(ns, NULL);
+ break;
+
+ case NSSM_EVENT_PORT_OFFLINE:
+ bfa_fcxp_discard(ns->fcxp);
+ bfa_sm_set_state(ns, bfa_fcs_lport_ns_sm_offline);
+ break;
+
+ default:
+ bfa_sm_fault(ns->port->fcs, event);
+ }
+}
+
+static void
+bfa_fcs_lport_ns_sm_rspn_id_retry(struct bfa_fcs_lport_ns_s *ns,
+ enum vport_ns_event event)
+{
+ bfa_trc(ns->port->fcs, ns->port->port_cfg.pwwn);
+ bfa_trc(ns->port->fcs, event);
+
+ switch (event) {
+ case NSSM_EVENT_TIMEOUT:
+ /*
+ * Retry Timer Expired. Re-send
+ */
+ bfa_sm_set_state(ns, bfa_fcs_lport_ns_sm_sending_rspn_id);
+ bfa_fcs_lport_ns_send_rspn_id(ns, NULL);
+ break;
+
+ case NSSM_EVENT_PORT_OFFLINE:
+ bfa_sm_set_state(ns, bfa_fcs_lport_ns_sm_offline);
+ bfa_timer_stop(&ns->timer);
+ break;
+
+ default:
+ bfa_sm_fault(ns->port->fcs, event);
+ }
+}
+
+static void
+bfa_fcs_lport_ns_sm_sending_rft_id(struct bfa_fcs_lport_ns_s *ns,
+ enum vport_ns_event event)
+{
+ bfa_trc(ns->port->fcs, ns->port->port_cfg.pwwn);
+ bfa_trc(ns->port->fcs, event);
+
+ switch (event) {
+ case NSSM_EVENT_RFTID_SENT:
+ bfa_sm_set_state(ns, bfa_fcs_lport_ns_sm_rft_id);
+ break;
+
+ case NSSM_EVENT_PORT_OFFLINE:
+ bfa_sm_set_state(ns, bfa_fcs_lport_ns_sm_offline);
+ bfa_fcxp_walloc_cancel(BFA_FCS_GET_HAL_FROM_PORT(ns->port),
+ &ns->fcxp_wqe);
+ break;
+
+ default:
+ bfa_sm_fault(ns->port->fcs, event);
+ }
+}
+
+static void
+bfa_fcs_lport_ns_sm_rft_id(struct bfa_fcs_lport_ns_s *ns,
+ enum vport_ns_event event)
+{
+ bfa_trc(ns->port->fcs, ns->port->port_cfg.pwwn);
+ bfa_trc(ns->port->fcs, event);
+
+ switch (event) {
+ case NSSM_EVENT_RSP_OK:
+ /* Now move to register FC4 Features */
+ bfa_sm_set_state(ns, bfa_fcs_lport_ns_sm_sending_rff_id);
+ bfa_fcs_lport_ns_send_rff_id(ns, NULL);
+ break;
+
+ case NSSM_EVENT_RSP_ERROR:
+ /*
+ * Start timer for a delayed retry
+ */
+ bfa_sm_set_state(ns, bfa_fcs_lport_ns_sm_rft_id_retry);
+ ns->port->stats.ns_retries++;
+ bfa_timer_start(BFA_FCS_GET_HAL_FROM_PORT(ns->port),
+ &ns->timer, bfa_fcs_lport_ns_timeout, ns,
+ BFA_FCS_RETRY_TIMEOUT);
+ break;
+
+ case NSSM_EVENT_PORT_OFFLINE:
+ bfa_sm_set_state(ns, bfa_fcs_lport_ns_sm_offline);
+ bfa_fcxp_discard(ns->fcxp);
+ break;
+
+ default:
+ bfa_sm_fault(ns->port->fcs, event);
+ }
+}
+
+static void
+bfa_fcs_lport_ns_sm_rft_id_retry(struct bfa_fcs_lport_ns_s *ns,
+ enum vport_ns_event event)
+{
+ bfa_trc(ns->port->fcs, ns->port->port_cfg.pwwn);
+ bfa_trc(ns->port->fcs, event);
+
+ switch (event) {
+ case NSSM_EVENT_TIMEOUT:
+ bfa_sm_set_state(ns, bfa_fcs_lport_ns_sm_sending_rft_id);
+ bfa_fcs_lport_ns_send_rft_id(ns, NULL);
+ break;
+
+ case NSSM_EVENT_PORT_OFFLINE:
+ bfa_sm_set_state(ns, bfa_fcs_lport_ns_sm_offline);
+ bfa_timer_stop(&ns->timer);
+ break;
+
+ default:
+ bfa_sm_fault(ns->port->fcs, event);
+ }
+}
+
+static void
+bfa_fcs_lport_ns_sm_sending_rff_id(struct bfa_fcs_lport_ns_s *ns,
+ enum vport_ns_event event)
+{
+ bfa_trc(ns->port->fcs, ns->port->port_cfg.pwwn);
+ bfa_trc(ns->port->fcs, event);
+
+ switch (event) {
+ case NSSM_EVENT_RFFID_SENT:
+ bfa_sm_set_state(ns, bfa_fcs_lport_ns_sm_rff_id);
+ break;
+
+ case NSSM_EVENT_PORT_OFFLINE:
+ bfa_sm_set_state(ns, bfa_fcs_lport_ns_sm_offline);
+ bfa_fcxp_walloc_cancel(BFA_FCS_GET_HAL_FROM_PORT(ns->port),
+ &ns->fcxp_wqe);
+ break;
+
+ default:
+ bfa_sm_fault(ns->port->fcs, event);
+ }
+}
+
+static void
+bfa_fcs_lport_ns_sm_rff_id(struct bfa_fcs_lport_ns_s *ns,
+ enum vport_ns_event event)
+{
+ bfa_trc(ns->port->fcs, ns->port->port_cfg.pwwn);
+ bfa_trc(ns->port->fcs, event);
+
+ switch (event) {
+ case NSSM_EVENT_RSP_OK:
+
+ /*
+ * If min cfg mode is enabled, we donot initiate rport
+ * discovery with the fabric. Instead, we will retrieve the
+ * boot targets from HAL/FW.
+ */
+ if (__fcs_min_cfg(ns->port->fcs)) {
+ bfa_fcs_lport_ns_boot_target_disc(ns->port);
+ bfa_sm_set_state(ns, bfa_fcs_lport_ns_sm_online);
+ return;
+ }
+
+ /*
+ * If the port role is Initiator Mode issue NS query.
+ * If it is Target Mode, skip this and go to online.
+ */
+ if (BFA_FCS_VPORT_IS_INITIATOR_MODE(ns->port)) {
+ bfa_sm_set_state(ns,
+ bfa_fcs_lport_ns_sm_sending_gid_ft);
+ bfa_fcs_lport_ns_send_gid_ft(ns, NULL);
+ }
+ /*
+ * kick off mgmt srvr state machine
+ */
+ bfa_fcs_lport_ms_online(ns->port);
+ break;
+
+ case NSSM_EVENT_RSP_ERROR:
+ /*
+ * Start timer for a delayed retry
+ */
+ bfa_sm_set_state(ns, bfa_fcs_lport_ns_sm_rff_id_retry);
+ ns->port->stats.ns_retries++;
+ bfa_timer_start(BFA_FCS_GET_HAL_FROM_PORT(ns->port),
+ &ns->timer, bfa_fcs_lport_ns_timeout, ns,
+ BFA_FCS_RETRY_TIMEOUT);
+ break;
+
+ case NSSM_EVENT_PORT_OFFLINE:
+ bfa_sm_set_state(ns, bfa_fcs_lport_ns_sm_offline);
+ bfa_fcxp_discard(ns->fcxp);
+ break;
+
+ default:
+ bfa_sm_fault(ns->port->fcs, event);
+ }
+}
+
+static void
+bfa_fcs_lport_ns_sm_rff_id_retry(struct bfa_fcs_lport_ns_s *ns,
+ enum vport_ns_event event)
+{
+ bfa_trc(ns->port->fcs, ns->port->port_cfg.pwwn);
+ bfa_trc(ns->port->fcs, event);
+
+ switch (event) {
+ case NSSM_EVENT_TIMEOUT:
+ bfa_sm_set_state(ns, bfa_fcs_lport_ns_sm_sending_rff_id);
+ bfa_fcs_lport_ns_send_rff_id(ns, NULL);
+ break;
+
+ case NSSM_EVENT_PORT_OFFLINE:
+ bfa_sm_set_state(ns, bfa_fcs_lport_ns_sm_offline);
+ bfa_timer_stop(&ns->timer);
+ break;
+
+ default:
+ bfa_sm_fault(ns->port->fcs, event);
+ }
+}
+static void
+bfa_fcs_lport_ns_sm_sending_gid_ft(struct bfa_fcs_lport_ns_s *ns,
+ enum vport_ns_event event)
+{
+ bfa_trc(ns->port->fcs, ns->port->port_cfg.pwwn);
+ bfa_trc(ns->port->fcs, event);
+
+ switch (event) {
+ case NSSM_EVENT_GIDFT_SENT:
+ bfa_sm_set_state(ns, bfa_fcs_lport_ns_sm_gid_ft);
+ break;
+
+ case NSSM_EVENT_PORT_OFFLINE:
+ bfa_sm_set_state(ns, bfa_fcs_lport_ns_sm_offline);
+ bfa_fcxp_walloc_cancel(BFA_FCS_GET_HAL_FROM_PORT(ns->port),
+ &ns->fcxp_wqe);
+ break;
+
+ default:
+ bfa_sm_fault(ns->port->fcs, event);
+ }
+}
+
+static void
+bfa_fcs_lport_ns_sm_gid_ft(struct bfa_fcs_lport_ns_s *ns,
+ enum vport_ns_event event)
+{
+ bfa_trc(ns->port->fcs, ns->port->port_cfg.pwwn);
+ bfa_trc(ns->port->fcs, event);
+
+ switch (event) {
+ case NSSM_EVENT_RSP_OK:
+ bfa_sm_set_state(ns, bfa_fcs_lport_ns_sm_online);
+ break;
+
+ case NSSM_EVENT_RSP_ERROR:
+ /*
+ * TBD: for certain reject codes, we don't need to retry
+ */
+ /*
+ * Start timer for a delayed retry
+ */
+ bfa_sm_set_state(ns, bfa_fcs_lport_ns_sm_gid_ft_retry);
+ ns->port->stats.ns_retries++;
+ bfa_timer_start(BFA_FCS_GET_HAL_FROM_PORT(ns->port),
+ &ns->timer, bfa_fcs_lport_ns_timeout, ns,
+ BFA_FCS_RETRY_TIMEOUT);
+ break;
+
+ case NSSM_EVENT_PORT_OFFLINE:
+ bfa_sm_set_state(ns, bfa_fcs_lport_ns_sm_offline);
+ bfa_fcxp_discard(ns->fcxp);
+ break;
+
+ case NSSM_EVENT_NS_QUERY:
+ break;
+
+ default:
+ bfa_sm_fault(ns->port->fcs, event);
+ }
+}
+
+static void
+bfa_fcs_lport_ns_sm_gid_ft_retry(struct bfa_fcs_lport_ns_s *ns,
+ enum vport_ns_event event)
+{
+ bfa_trc(ns->port->fcs, ns->port->port_cfg.pwwn);
+ bfa_trc(ns->port->fcs, event);
+
+ switch (event) {
+ case NSSM_EVENT_TIMEOUT:
+ bfa_sm_set_state(ns, bfa_fcs_lport_ns_sm_sending_gid_ft);
+ bfa_fcs_lport_ns_send_gid_ft(ns, NULL);
+ break;
+
+ case NSSM_EVENT_PORT_OFFLINE:
+ bfa_sm_set_state(ns, bfa_fcs_lport_ns_sm_offline);
+ bfa_timer_stop(&ns->timer);
+ break;
+
+ default:
+ bfa_sm_fault(ns->port->fcs, event);
+ }
+}
+
+static void
+bfa_fcs_lport_ns_sm_online(struct bfa_fcs_lport_ns_s *ns,
+ enum vport_ns_event event)
+{
+ bfa_trc(ns->port->fcs, ns->port->port_cfg.pwwn);
+ bfa_trc(ns->port->fcs, event);
+
+ switch (event) {
+ case NSSM_EVENT_PORT_OFFLINE:
+ bfa_sm_set_state(ns, bfa_fcs_lport_ns_sm_offline);
+ break;
+
+ case NSSM_EVENT_NS_QUERY:
+ /*
+ * If the port role is Initiator Mode issue NS query.
+ * If it is Target Mode, skip this and go to online.
+ */
+ if (BFA_FCS_VPORT_IS_INITIATOR_MODE(ns->port)) {
+ bfa_sm_set_state(ns,
+ bfa_fcs_lport_ns_sm_sending_gid_ft);
+ bfa_fcs_lport_ns_send_gid_ft(ns, NULL);
+ };
+ break;
+
+ default:
+ bfa_sm_fault(ns->port->fcs, event);
+ }
+}
+
+
+
+/*
+ * ns_pvt Nameserver local functions
+ */
+
+static void
+bfa_fcs_lport_ns_send_plogi(void *ns_cbarg, struct bfa_fcxp_s *fcxp_alloced)
+{
+ struct bfa_fcs_lport_ns_s *ns = ns_cbarg;
+ struct bfa_fcs_lport_s *port = ns->port;
+ struct fchs_s fchs;
+ int len;
+ struct bfa_fcxp_s *fcxp;
+
+ bfa_trc(port->fcs, port->pid);
+
+ fcxp = fcxp_alloced ? fcxp_alloced :
+ bfa_fcs_fcxp_alloc(port->fcs, BFA_TRUE);
+ if (!fcxp) {
+ port->stats.ns_plogi_alloc_wait++;
+ bfa_fcs_fcxp_alloc_wait(port->fcs->bfa, &ns->fcxp_wqe,
+ bfa_fcs_lport_ns_send_plogi, ns, BFA_TRUE);
+ return;
+ }
+ ns->fcxp = fcxp;
+
+ len = fc_plogi_build(&fchs, bfa_fcxp_get_reqbuf(fcxp),
+ bfa_hton3b(FC_NAME_SERVER),
+ bfa_fcs_lport_get_fcid(port), 0,
+ port->port_cfg.pwwn, port->port_cfg.nwwn,
+ bfa_fcport_get_maxfrsize(port->fcs->bfa),
+ bfa_fcport_get_rx_bbcredit(port->fcs->bfa));
+
+ bfa_fcxp_send(fcxp, NULL, port->fabric->vf_id, port->lp_tag, BFA_FALSE,
+ FC_CLASS_3, len, &fchs,
+ bfa_fcs_lport_ns_plogi_response, (void *)ns,
+ FC_MAX_PDUSZ, FC_ELS_TOV);
+ port->stats.ns_plogi_sent++;
+
+ bfa_sm_send_event(ns, NSSM_EVENT_PLOGI_SENT);
+}
+
+static void
+bfa_fcs_lport_ns_plogi_response(void *fcsarg, struct bfa_fcxp_s *fcxp,
+ void *cbarg, bfa_status_t req_status, u32 rsp_len,
+ u32 resid_len, struct fchs_s *rsp_fchs)
+{
+ struct bfa_fcs_lport_ns_s *ns = (struct bfa_fcs_lport_ns_s *) cbarg;
+ struct bfa_fcs_lport_s *port = ns->port;
+ /* struct fc_logi_s *plogi_resp; */
+ struct fc_els_cmd_s *els_cmd;
+ struct fc_ls_rjt_s *ls_rjt;
+
+ bfa_trc(port->fcs, req_status);
+ bfa_trc(port->fcs, port->port_cfg.pwwn);
+
+ /*
+ * Sanity Checks
+ */
+ if (req_status != BFA_STATUS_OK) {
+ bfa_trc(port->fcs, req_status);
+ port->stats.ns_plogi_rsp_err++;
+ bfa_sm_send_event(ns, NSSM_EVENT_RSP_ERROR);
+ return;
+ }
+
+ els_cmd = (struct fc_els_cmd_s *) BFA_FCXP_RSP_PLD(fcxp);
+
+ switch (els_cmd->els_code) {
+
+ case FC_ELS_ACC:
+ if (rsp_len < sizeof(struct fc_logi_s)) {
+ bfa_trc(port->fcs, rsp_len);
+ port->stats.ns_plogi_acc_err++;
+ bfa_sm_send_event(ns, NSSM_EVENT_RSP_ERROR);
+ break;
+ }
+ port->stats.ns_plogi_accepts++;
+ bfa_sm_send_event(ns, NSSM_EVENT_RSP_OK);
+ break;
+
+ case FC_ELS_LS_RJT:
+ ls_rjt = (struct fc_ls_rjt_s *) BFA_FCXP_RSP_PLD(fcxp);
+
+ bfa_trc(port->fcs, ls_rjt->reason_code);
+ bfa_trc(port->fcs, ls_rjt->reason_code_expl);
+
+ port->stats.ns_rejects++;
+
+ bfa_sm_send_event(ns, NSSM_EVENT_RSP_ERROR);
+ break;
+
+ default:
+ port->stats.ns_plogi_unknown_rsp++;
+ bfa_trc(port->fcs, els_cmd->els_code);
+ bfa_sm_send_event(ns, NSSM_EVENT_RSP_ERROR);
+ }
+}
+
+/*
+ * Register node name for port_id
+ */
+static void
+bfa_fcs_lport_ns_send_rnn_id(void *ns_cbarg, struct bfa_fcxp_s *fcxp_alloced)
+{
+ struct bfa_fcs_lport_ns_s *ns = ns_cbarg;
+ struct bfa_fcs_lport_s *port = ns->port;
+ struct fchs_s fchs;
+ int len;
+ struct bfa_fcxp_s *fcxp;
+
+ bfa_trc(port->fcs, port->port_cfg.pwwn);
+
+ fcxp = fcxp_alloced ? fcxp_alloced :
+ bfa_fcs_fcxp_alloc(port->fcs, BFA_TRUE);
+ if (!fcxp) {
+ port->stats.ns_rnnid_alloc_wait++;
+ bfa_fcs_fcxp_alloc_wait(port->fcs->bfa, &ns->fcxp_wqe,
+ bfa_fcs_lport_ns_send_rnn_id, ns, BFA_TRUE);
+ return;
+ }
+
+ ns->fcxp = fcxp;
+
+ len = fc_rnnid_build(&fchs, bfa_fcxp_get_reqbuf(fcxp),
+ bfa_fcs_lport_get_fcid(port),
+ bfa_fcs_lport_get_fcid(port),
+ bfa_fcs_lport_get_nwwn(port));
+
+ bfa_fcxp_send(fcxp, NULL, port->fabric->vf_id, port->lp_tag, BFA_FALSE,
+ FC_CLASS_3, len, &fchs,
+ bfa_fcs_lport_ns_rnn_id_response, (void *)ns,
+ FC_MAX_PDUSZ, FC_FCCT_TOV);
+
+ port->stats.ns_rnnid_sent++;
+ bfa_sm_send_event(ns, NSSM_EVENT_RNNID_SENT);
+}
+
+static void
+bfa_fcs_lport_ns_rnn_id_response(void *fcsarg, struct bfa_fcxp_s *fcxp,
+ void *cbarg, bfa_status_t req_status,
+ u32 rsp_len, u32 resid_len,
+ struct fchs_s *rsp_fchs)
+
+{
+ struct bfa_fcs_lport_ns_s *ns = (struct bfa_fcs_lport_ns_s *) cbarg;
+ struct bfa_fcs_lport_s *port = ns->port;
+ struct ct_hdr_s *cthdr = NULL;
+
+ bfa_trc(port->fcs, port->port_cfg.pwwn);
+
+ /*
+ * Sanity Checks
+ */
+ if (req_status != BFA_STATUS_OK) {
+ bfa_trc(port->fcs, req_status);
+ port->stats.ns_rnnid_rsp_err++;
+ bfa_sm_send_event(ns, NSSM_EVENT_RSP_ERROR);
+ return;
+ }
+
+ cthdr = (struct ct_hdr_s *) BFA_FCXP_RSP_PLD(fcxp);
+ cthdr->cmd_rsp_code = be16_to_cpu(cthdr->cmd_rsp_code);
+
+ if (cthdr->cmd_rsp_code == CT_RSP_ACCEPT) {
+ port->stats.ns_rnnid_accepts++;
+ bfa_sm_send_event(ns, NSSM_EVENT_RSP_OK);
+ return;
+ }
+
+ port->stats.ns_rnnid_rejects++;
+ bfa_trc(port->fcs, cthdr->reason_code);
+ bfa_trc(port->fcs, cthdr->exp_code);
+ bfa_sm_send_event(ns, NSSM_EVENT_RSP_ERROR);
+}
+
+/*
+ * Register the symbolic node name for a given node name.
+ */
+static void
+bfa_fcs_lport_ns_send_rsnn_nn(void *ns_cbarg, struct bfa_fcxp_s *fcxp_alloced)
+{
+ struct bfa_fcs_lport_ns_s *ns = ns_cbarg;
+ struct bfa_fcs_lport_s *port = ns->port;
+ struct fchs_s fchs;
+ int len;
+ struct bfa_fcxp_s *fcxp;
+ u8 *nsymbl;
+
+ bfa_trc(port->fcs, port->port_cfg.pwwn);
+
+ fcxp = fcxp_alloced ? fcxp_alloced :
+ bfa_fcs_fcxp_alloc(port->fcs, BFA_TRUE);
+ if (!fcxp) {
+ port->stats.ns_rsnn_nn_alloc_wait++;
+ bfa_fcs_fcxp_alloc_wait(port->fcs->bfa, &ns->fcxp_wqe,
+ bfa_fcs_lport_ns_send_rsnn_nn, ns, BFA_TRUE);
+ return;
+ }
+ ns->fcxp = fcxp;
+
+ nsymbl = (u8 *) &(bfa_fcs_lport_get_nsym_name(
+ bfa_fcs_get_base_port(port->fcs)));
+
+ len = fc_rsnn_nn_build(&fchs, bfa_fcxp_get_reqbuf(fcxp),
+ bfa_fcs_lport_get_fcid(port),
+ bfa_fcs_lport_get_nwwn(port), nsymbl);
+
+ bfa_fcxp_send(fcxp, NULL, port->fabric->vf_id, port->lp_tag, BFA_FALSE,
+ FC_CLASS_3, len, &fchs,
+ bfa_fcs_lport_ns_rsnn_nn_response, (void *)ns,
+ FC_MAX_PDUSZ, FC_FCCT_TOV);
+
+ port->stats.ns_rsnn_nn_sent++;
+
+ bfa_sm_send_event(ns, NSSM_EVENT_RSNN_NN_SENT);
+}
+
+static void
+bfa_fcs_lport_ns_rsnn_nn_response(void *fcsarg, struct bfa_fcxp_s *fcxp,
+ void *cbarg, bfa_status_t req_status,
+ u32 rsp_len, u32 resid_len,
+ struct fchs_s *rsp_fchs)
+{
+ struct bfa_fcs_lport_ns_s *ns = (struct bfa_fcs_lport_ns_s *) cbarg;
+ struct bfa_fcs_lport_s *port = ns->port;
+ struct ct_hdr_s *cthdr = NULL;
+
+ bfa_trc(port->fcs, port->port_cfg.pwwn);
+
+ /*
+ * Sanity Checks
+ */
+ if (req_status != BFA_STATUS_OK) {
+ bfa_trc(port->fcs, req_status);
+ port->stats.ns_rsnn_nn_rsp_err++;
+ bfa_sm_send_event(ns, NSSM_EVENT_RSP_ERROR);
+ return;
+ }
+
+ cthdr = (struct ct_hdr_s *) BFA_FCXP_RSP_PLD(fcxp);
+ cthdr->cmd_rsp_code = be16_to_cpu(cthdr->cmd_rsp_code);
+
+ if (cthdr->cmd_rsp_code == CT_RSP_ACCEPT) {
+ port->stats.ns_rsnn_nn_accepts++;
+ bfa_sm_send_event(ns, NSSM_EVENT_RSP_OK);
+ return;
+ }
+
+ port->stats.ns_rsnn_nn_rejects++;
+ bfa_trc(port->fcs, cthdr->reason_code);
+ bfa_trc(port->fcs, cthdr->exp_code);
+ bfa_sm_send_event(ns, NSSM_EVENT_RSP_ERROR);
+}
+
+/*
+ * Register the symbolic port name.
+ */
+static void
+bfa_fcs_lport_ns_send_rspn_id(void *ns_cbarg, struct bfa_fcxp_s *fcxp_alloced)
+{
+ struct bfa_fcs_lport_ns_s *ns = ns_cbarg;
+ struct bfa_fcs_lport_s *port = ns->port;
+ struct fchs_s fchs;
+ int len;
+ struct bfa_fcxp_s *fcxp;
+ u8 symbl[256];
+ u8 *psymbl = &symbl[0];
+
+ memset(symbl, 0, sizeof(symbl));
+
+ bfa_trc(port->fcs, port->port_cfg.pwwn);
+
+ fcxp = fcxp_alloced ? fcxp_alloced :
+ bfa_fcs_fcxp_alloc(port->fcs, BFA_TRUE);
+ if (!fcxp) {
+ port->stats.ns_rspnid_alloc_wait++;
+ bfa_fcs_fcxp_alloc_wait(port->fcs->bfa, &ns->fcxp_wqe,
+ bfa_fcs_lport_ns_send_rspn_id, ns, BFA_TRUE);
+ return;
+ }
+ ns->fcxp = fcxp;
+
+ /*
+ * for V-Port, form a Port Symbolic Name
+ */
+ if (port->vport) {
+ /*
+ * For Vports, we append the vport's port symbolic name
+ * to that of the base port.
+ */
+
+ strncpy((char *)psymbl,
+ (char *) &
+ (bfa_fcs_lport_get_psym_name
+ (bfa_fcs_get_base_port(port->fcs))),
+ strlen((char *) &
+ bfa_fcs_lport_get_psym_name(bfa_fcs_get_base_port
+ (port->fcs))));
+
+ /* Ensure we have a null terminating string. */
+ ((char *)psymbl)[strlen((char *) &
+ bfa_fcs_lport_get_psym_name(bfa_fcs_get_base_port
+ (port->fcs)))] = 0;
+ strncat((char *)psymbl,
+ (char *) &(bfa_fcs_lport_get_psym_name(port)),
+ strlen((char *) &bfa_fcs_lport_get_psym_name(port)));
+ } else {
+ psymbl = (u8 *) &(bfa_fcs_lport_get_psym_name(port));
+ }
+
+ len = fc_rspnid_build(&fchs, bfa_fcxp_get_reqbuf(fcxp),
+ bfa_fcs_lport_get_fcid(port), 0, psymbl);
+
+ bfa_fcxp_send(fcxp, NULL, port->fabric->vf_id, port->lp_tag, BFA_FALSE,
+ FC_CLASS_3, len, &fchs,
+ bfa_fcs_lport_ns_rspn_id_response, (void *)ns,
+ FC_MAX_PDUSZ, FC_FCCT_TOV);
+
+ port->stats.ns_rspnid_sent++;
+
+ bfa_sm_send_event(ns, NSSM_EVENT_RSPNID_SENT);
+}
+
+static void
+bfa_fcs_lport_ns_rspn_id_response(void *fcsarg, struct bfa_fcxp_s *fcxp,
+ void *cbarg, bfa_status_t req_status,
+ u32 rsp_len, u32 resid_len,
+ struct fchs_s *rsp_fchs)
+{
+ struct bfa_fcs_lport_ns_s *ns = (struct bfa_fcs_lport_ns_s *) cbarg;
+ struct bfa_fcs_lport_s *port = ns->port;
+ struct ct_hdr_s *cthdr = NULL;
+
+ bfa_trc(port->fcs, port->port_cfg.pwwn);
+
+ /*
+ * Sanity Checks
+ */
+ if (req_status != BFA_STATUS_OK) {
+ bfa_trc(port->fcs, req_status);
+ port->stats.ns_rspnid_rsp_err++;
+ bfa_sm_send_event(ns, NSSM_EVENT_RSP_ERROR);
+ return;
+ }
+
+ cthdr = (struct ct_hdr_s *) BFA_FCXP_RSP_PLD(fcxp);
+ cthdr->cmd_rsp_code = be16_to_cpu(cthdr->cmd_rsp_code);
+
+ if (cthdr->cmd_rsp_code == CT_RSP_ACCEPT) {
+ port->stats.ns_rspnid_accepts++;
+ bfa_sm_send_event(ns, NSSM_EVENT_RSP_OK);
+ return;
+ }
+
+ port->stats.ns_rspnid_rejects++;
+ bfa_trc(port->fcs, cthdr->reason_code);
+ bfa_trc(port->fcs, cthdr->exp_code);
+ bfa_sm_send_event(ns, NSSM_EVENT_RSP_ERROR);
+}
+
+/*
+ * Register FC4-Types
+ */
+static void
+bfa_fcs_lport_ns_send_rft_id(void *ns_cbarg, struct bfa_fcxp_s *fcxp_alloced)
+{
+ struct bfa_fcs_lport_ns_s *ns = ns_cbarg;
+ struct bfa_fcs_lport_s *port = ns->port;
+ struct fchs_s fchs;
+ int len;
+ struct bfa_fcxp_s *fcxp;
+
+ bfa_trc(port->fcs, port->port_cfg.pwwn);
+
+ fcxp = fcxp_alloced ? fcxp_alloced :
+ bfa_fcs_fcxp_alloc(port->fcs, BFA_TRUE);
+ if (!fcxp) {
+ port->stats.ns_rftid_alloc_wait++;
+ bfa_fcs_fcxp_alloc_wait(port->fcs->bfa, &ns->fcxp_wqe,
+ bfa_fcs_lport_ns_send_rft_id, ns, BFA_TRUE);
+ return;
+ }
+ ns->fcxp = fcxp;
+
+ len = fc_rftid_build(&fchs, bfa_fcxp_get_reqbuf(fcxp),
+ bfa_fcs_lport_get_fcid(port), 0, port->port_cfg.roles);
+
+ bfa_fcxp_send(fcxp, NULL, port->fabric->vf_id, port->lp_tag, BFA_FALSE,
+ FC_CLASS_3, len, &fchs,
+ bfa_fcs_lport_ns_rft_id_response, (void *)ns,
+ FC_MAX_PDUSZ, FC_FCCT_TOV);
+
+ port->stats.ns_rftid_sent++;
+ bfa_sm_send_event(ns, NSSM_EVENT_RFTID_SENT);
+}
+
+static void
+bfa_fcs_lport_ns_rft_id_response(void *fcsarg, struct bfa_fcxp_s *fcxp,
+ void *cbarg, bfa_status_t req_status,
+ u32 rsp_len, u32 resid_len,
+ struct fchs_s *rsp_fchs)
+{
+ struct bfa_fcs_lport_ns_s *ns = (struct bfa_fcs_lport_ns_s *) cbarg;
+ struct bfa_fcs_lport_s *port = ns->port;
+ struct ct_hdr_s *cthdr = NULL;
+
+ bfa_trc(port->fcs, port->port_cfg.pwwn);
+
+ /*
+ * Sanity Checks
+ */
+ if (req_status != BFA_STATUS_OK) {
+ bfa_trc(port->fcs, req_status);
+ port->stats.ns_rftid_rsp_err++;
+ bfa_sm_send_event(ns, NSSM_EVENT_RSP_ERROR);
+ return;
+ }
+
+ cthdr = (struct ct_hdr_s *) BFA_FCXP_RSP_PLD(fcxp);
+ cthdr->cmd_rsp_code = be16_to_cpu(cthdr->cmd_rsp_code);
+
+ if (cthdr->cmd_rsp_code == CT_RSP_ACCEPT) {
+ port->stats.ns_rftid_accepts++;
+ bfa_sm_send_event(ns, NSSM_EVENT_RSP_OK);
+ return;
+ }
+
+ port->stats.ns_rftid_rejects++;
+ bfa_trc(port->fcs, cthdr->reason_code);
+ bfa_trc(port->fcs, cthdr->exp_code);
+ bfa_sm_send_event(ns, NSSM_EVENT_RSP_ERROR);
+}
+
+/*
+ * Register FC4-Features : Should be done after RFT_ID
+ */
+static void
+bfa_fcs_lport_ns_send_rff_id(void *ns_cbarg, struct bfa_fcxp_s *fcxp_alloced)
+{
+ struct bfa_fcs_lport_ns_s *ns = ns_cbarg;
+ struct bfa_fcs_lport_s *port = ns->port;
+ struct fchs_s fchs;
+ int len;
+ struct bfa_fcxp_s *fcxp;
+ u8 fc4_ftrs = 0;
+
+ bfa_trc(port->fcs, port->port_cfg.pwwn);
+
+ fcxp = fcxp_alloced ? fcxp_alloced :
+ bfa_fcs_fcxp_alloc(port->fcs, BFA_TRUE);
+ if (!fcxp) {
+ port->stats.ns_rffid_alloc_wait++;
+ bfa_fcs_fcxp_alloc_wait(port->fcs->bfa, &ns->fcxp_wqe,
+ bfa_fcs_lport_ns_send_rff_id, ns, BFA_TRUE);
+ return;
+ }
+ ns->fcxp = fcxp;
+
+ if (BFA_FCS_VPORT_IS_INITIATOR_MODE(ns->port))
+ fc4_ftrs = FC_GS_FCP_FC4_FEATURE_INITIATOR;
+
+ len = fc_rffid_build(&fchs, bfa_fcxp_get_reqbuf(fcxp),
+ bfa_fcs_lport_get_fcid(port), 0,
+ FC_TYPE_FCP, fc4_ftrs);
+
+ bfa_fcxp_send(fcxp, NULL, port->fabric->vf_id, port->lp_tag, BFA_FALSE,
+ FC_CLASS_3, len, &fchs,
+ bfa_fcs_lport_ns_rff_id_response, (void *)ns,
+ FC_MAX_PDUSZ, FC_FCCT_TOV);
+
+ port->stats.ns_rffid_sent++;
+ bfa_sm_send_event(ns, NSSM_EVENT_RFFID_SENT);
+}
+
+static void
+bfa_fcs_lport_ns_rff_id_response(void *fcsarg, struct bfa_fcxp_s *fcxp,
+ void *cbarg, bfa_status_t req_status,
+ u32 rsp_len, u32 resid_len,
+ struct fchs_s *rsp_fchs)
+{
+ struct bfa_fcs_lport_ns_s *ns = (struct bfa_fcs_lport_ns_s *) cbarg;
+ struct bfa_fcs_lport_s *port = ns->port;
+ struct ct_hdr_s *cthdr = NULL;
+
+ bfa_trc(port->fcs, port->port_cfg.pwwn);
+
+ /*
+ * Sanity Checks
+ */
+ if (req_status != BFA_STATUS_OK) {
+ bfa_trc(port->fcs, req_status);
+ port->stats.ns_rffid_rsp_err++;
+ bfa_sm_send_event(ns, NSSM_EVENT_RSP_ERROR);
+ return;
+ }
+
+ cthdr = (struct ct_hdr_s *) BFA_FCXP_RSP_PLD(fcxp);
+ cthdr->cmd_rsp_code = be16_to_cpu(cthdr->cmd_rsp_code);
+
+ if (cthdr->cmd_rsp_code == CT_RSP_ACCEPT) {
+ port->stats.ns_rffid_accepts++;
+ bfa_sm_send_event(ns, NSSM_EVENT_RSP_OK);
+ return;
+ }
+
+ port->stats.ns_rffid_rejects++;
+ bfa_trc(port->fcs, cthdr->reason_code);
+ bfa_trc(port->fcs, cthdr->exp_code);
+
+ if (cthdr->reason_code == CT_RSN_NOT_SUPP) {
+ /* if this command is not supported, we don't retry */
+ bfa_sm_send_event(ns, NSSM_EVENT_RSP_OK);
+ } else
+ bfa_sm_send_event(ns, NSSM_EVENT_RSP_ERROR);
+}
+/*
+ * Query Fabric for FC4-Types Devices.
+ *
+* TBD : Need to use a local (FCS private) response buffer, since the response
+ * can be larger than 2K.
+ */
+static void
+bfa_fcs_lport_ns_send_gid_ft(void *ns_cbarg, struct bfa_fcxp_s *fcxp_alloced)
+{
+ struct bfa_fcs_lport_ns_s *ns = ns_cbarg;
+ struct bfa_fcs_lport_s *port = ns->port;
+ struct fchs_s fchs;
+ int len;
+ struct bfa_fcxp_s *fcxp;
+
+ bfa_trc(port->fcs, port->pid);
+
+ fcxp = fcxp_alloced ? fcxp_alloced :
+ bfa_fcs_fcxp_alloc(port->fcs, BFA_TRUE);
+ if (!fcxp) {
+ port->stats.ns_gidft_alloc_wait++;
+ bfa_fcs_fcxp_alloc_wait(port->fcs->bfa, &ns->fcxp_wqe,
+ bfa_fcs_lport_ns_send_gid_ft, ns, BFA_TRUE);
+ return;
+ }
+ ns->fcxp = fcxp;
+
+ /*
+ * This query is only initiated for FCP initiator mode.
+ */
+ len = fc_gid_ft_build(&fchs, bfa_fcxp_get_reqbuf(fcxp),
+ ns->port->pid, FC_TYPE_FCP);
+
+ bfa_fcxp_send(fcxp, NULL, port->fabric->vf_id, port->lp_tag, BFA_FALSE,
+ FC_CLASS_3, len, &fchs,
+ bfa_fcs_lport_ns_gid_ft_response, (void *)ns,
+ bfa_fcxp_get_maxrsp(port->fcs->bfa), FC_FCCT_TOV);
+
+ port->stats.ns_gidft_sent++;
+
+ bfa_sm_send_event(ns, NSSM_EVENT_GIDFT_SENT);
+}
+
+static void
+bfa_fcs_lport_ns_gid_ft_response(void *fcsarg, struct bfa_fcxp_s *fcxp,
+ void *cbarg, bfa_status_t req_status,
+ u32 rsp_len, u32 resid_len,
+ struct fchs_s *rsp_fchs)
+{
+ struct bfa_fcs_lport_ns_s *ns = (struct bfa_fcs_lport_ns_s *) cbarg;
+ struct bfa_fcs_lport_s *port = ns->port;
+ struct ct_hdr_s *cthdr = NULL;
+ u32 n_pids;
+
+ bfa_trc(port->fcs, port->port_cfg.pwwn);
+
+ /*
+ * Sanity Checks
+ */
+ if (req_status != BFA_STATUS_OK) {
+ bfa_trc(port->fcs, req_status);
+ port->stats.ns_gidft_rsp_err++;
+ bfa_sm_send_event(ns, NSSM_EVENT_RSP_ERROR);
+ return;
+ }
+
+ if (resid_len != 0) {
+ /*
+ * TBD : we will need to allocate a larger buffer & retry the
+ * command
+ */
+ bfa_trc(port->fcs, rsp_len);
+ bfa_trc(port->fcs, resid_len);
+ return;
+ }
+
+ cthdr = (struct ct_hdr_s *) BFA_FCXP_RSP_PLD(fcxp);
+ cthdr->cmd_rsp_code = be16_to_cpu(cthdr->cmd_rsp_code);
+
+ switch (cthdr->cmd_rsp_code) {
+
+ case CT_RSP_ACCEPT:
+
+ port->stats.ns_gidft_accepts++;
+ n_pids = (fc_get_ctresp_pyld_len(rsp_len) / sizeof(u32));
+ bfa_trc(port->fcs, n_pids);
+ bfa_fcs_lport_ns_process_gidft_pids(port,
+ (u32 *) (cthdr + 1),
+ n_pids);
+ bfa_sm_send_event(ns, NSSM_EVENT_RSP_OK);
+ break;
+
+ case CT_RSP_REJECT:
+
+ /*
+ * Check the reason code & explanation.
+ * There may not have been any FC4 devices in the fabric
+ */
+ port->stats.ns_gidft_rejects++;
+ bfa_trc(port->fcs, cthdr->reason_code);
+ bfa_trc(port->fcs, cthdr->exp_code);
+
+ if ((cthdr->reason_code == CT_RSN_UNABLE_TO_PERF)
+ && (cthdr->exp_code == CT_NS_EXP_FT_NOT_REG)) {
+
+ bfa_sm_send_event(ns, NSSM_EVENT_RSP_OK);
+ } else {
+ /*
+ * for all other errors, retry
+ */
+ bfa_sm_send_event(ns, NSSM_EVENT_RSP_ERROR);
+ }
+ break;
+
+ default:
+ port->stats.ns_gidft_unknown_rsp++;
+ bfa_trc(port->fcs, cthdr->cmd_rsp_code);
+ bfa_sm_send_event(ns, NSSM_EVENT_RSP_ERROR);
+ }
+}
+
+/*
+ * This routine will be called by bfa_timer on timer timeouts.
+ *
+ * param[in] port - pointer to bfa_fcs_lport_t.
+ *
+ * return
+ * void
+ *
+ * Special Considerations:
+ *
+ * note
+ */
+static void
+bfa_fcs_lport_ns_timeout(void *arg)
+{
+ struct bfa_fcs_lport_ns_s *ns = (struct bfa_fcs_lport_ns_s *) arg;
+
+ ns->port->stats.ns_timeouts++;
+ bfa_sm_send_event(ns, NSSM_EVENT_TIMEOUT);
+}
+
+/*
+ * Process the PID list in GID_FT response
+ */
+static void
+bfa_fcs_lport_ns_process_gidft_pids(struct bfa_fcs_lport_s *port, u32 *pid_buf,
+ u32 n_pids)
+{
+ struct fcgs_gidft_resp_s *gidft_entry;
+ struct bfa_fcs_rport_s *rport;
+ u32 ii;
+ struct bfa_fcs_fabric_s *fabric = port->fabric;
+ struct bfa_fcs_vport_s *vport;
+ struct list_head *qe;
+ u8 found = 0;
+
+ for (ii = 0; ii < n_pids; ii++) {
+ gidft_entry = (struct fcgs_gidft_resp_s *) &pid_buf[ii];
+
+ if (gidft_entry->pid == port->pid)
+ continue;
+
+ /*
+ * Ignore PID if it is of base port
+ * (Avoid vports discovering base port as remote port)
+ */
+ if (gidft_entry->pid == fabric->bport.pid)
+ continue;
+
+ /*
+ * Ignore PID if it is of vport created on the same base port
+ * (Avoid vport discovering every other vport created on the
+ * same port as remote port)
+ */
+ list_for_each(qe, &fabric->vport_q) {
+ vport = (struct bfa_fcs_vport_s *) qe;
+ if (vport->lport.pid == gidft_entry->pid)
+ found = 1;
+ }
+
+ if (found) {
+ found = 0;
+ continue;
+ }
+
+ /*
+ * Check if this rport already exists
+ */
+ rport = bfa_fcs_lport_get_rport_by_pid(port, gidft_entry->pid);
+ if (rport == NULL) {
+ /*
+ * this is a new device. create rport
+ */
+ rport = bfa_fcs_rport_create(port, gidft_entry->pid);
+ } else {
+ /*
+ * this rport already exists
+ */
+ bfa_fcs_rport_scn(rport);
+ }
+
+ bfa_trc(port->fcs, gidft_entry->pid);
+
+ /*
+ * if the last entry bit is set, bail out.
+ */
+ if (gidft_entry->last)
+ return;
+ }
+}
+
+/*
+ * fcs_ns_public FCS nameserver public interfaces
+ */
+
+/*
+ * Functions called by port/fab.
+ * These will send relevant Events to the ns state machine.
+ */
+void
+bfa_fcs_lport_ns_init(struct bfa_fcs_lport_s *port)
+{
+ struct bfa_fcs_lport_ns_s *ns = BFA_FCS_GET_NS_FROM_PORT(port);
+
+ ns->port = port;
+ bfa_sm_set_state(ns, bfa_fcs_lport_ns_sm_offline);
+}
+
+void
+bfa_fcs_lport_ns_offline(struct bfa_fcs_lport_s *port)
+{
+ struct bfa_fcs_lport_ns_s *ns = BFA_FCS_GET_NS_FROM_PORT(port);
+
+ ns->port = port;
+ bfa_sm_send_event(ns, NSSM_EVENT_PORT_OFFLINE);
+}
+
+void
+bfa_fcs_lport_ns_online(struct bfa_fcs_lport_s *port)
+{
+ struct bfa_fcs_lport_ns_s *ns = BFA_FCS_GET_NS_FROM_PORT(port);
+
+ ns->port = port;
+ bfa_sm_send_event(ns, NSSM_EVENT_PORT_ONLINE);
+}
+
+void
+bfa_fcs_lport_ns_query(struct bfa_fcs_lport_s *port)
+{
+ struct bfa_fcs_lport_ns_s *ns = BFA_FCS_GET_NS_FROM_PORT(port);
+
+ bfa_trc(port->fcs, port->pid);
+ if (bfa_sm_cmp_state(ns, bfa_fcs_lport_ns_sm_online))
+ bfa_sm_send_event(ns, NSSM_EVENT_NS_QUERY);
+}
+
+static void
+bfa_fcs_lport_ns_boot_target_disc(bfa_fcs_lport_t *port)
+{
+
+ struct bfa_fcs_rport_s *rport;
+ u8 nwwns;
+ wwn_t wwns[BFA_PREBOOT_BOOTLUN_MAX];
+ int ii;
+
+ bfa_iocfc_get_bootwwns(port->fcs->bfa, &nwwns, wwns);
+
+ for (ii = 0 ; ii < nwwns; ++ii) {
+ rport = bfa_fcs_rport_create_by_wwn(port, wwns[ii]);
+ WARN_ON(!rport);
+ }
+}
+
+void
+bfa_fcs_lport_ns_util_send_rspn_id(void *cbarg, struct bfa_fcxp_s *fcxp_alloced)
+{
+ struct bfa_fcs_lport_ns_s *ns = cbarg;
+ struct bfa_fcs_lport_s *port = ns->port;
+ struct fchs_s fchs;
+ struct bfa_fcxp_s *fcxp;
+ u8 symbl[256];
+ u8 *psymbl = &symbl[0];
+ int len;
+
+ /* Avoid sending RSPN in the following states. */
+ if (bfa_sm_cmp_state(ns, bfa_fcs_lport_ns_sm_offline) ||
+ bfa_sm_cmp_state(ns, bfa_fcs_lport_ns_sm_plogi_sending) ||
+ bfa_sm_cmp_state(ns, bfa_fcs_lport_ns_sm_plogi) ||
+ bfa_sm_cmp_state(ns, bfa_fcs_lport_ns_sm_plogi_retry) ||
+ bfa_sm_cmp_state(ns, bfa_fcs_lport_ns_sm_rspn_id_retry))
+ return;
+
+ memset(symbl, 0, sizeof(symbl));
+ bfa_trc(port->fcs, port->port_cfg.pwwn);
+
+ fcxp = fcxp_alloced ? fcxp_alloced :
+ bfa_fcs_fcxp_alloc(port->fcs, BFA_FALSE);
+ if (!fcxp) {
+ port->stats.ns_rspnid_alloc_wait++;
+ bfa_fcs_fcxp_alloc_wait(port->fcs->bfa, &ns->fcxp_wqe,
+ bfa_fcs_lport_ns_util_send_rspn_id, ns, BFA_FALSE);
+ return;
+ }
+
+ ns->fcxp = fcxp;
+
+ if (port->vport) {
+ /*
+ * For Vports, we append the vport's port symbolic name
+ * to that of the base port.
+ */
+ strncpy((char *)psymbl, (char *)&(bfa_fcs_lport_get_psym_name
+ (bfa_fcs_get_base_port(port->fcs))),
+ strlen((char *)&bfa_fcs_lport_get_psym_name(
+ bfa_fcs_get_base_port(port->fcs))));
+
+ /* Ensure we have a null terminating string. */
+ ((char *)psymbl)[strlen((char *)&bfa_fcs_lport_get_psym_name(
+ bfa_fcs_get_base_port(port->fcs)))] = 0;
+
+ strncat((char *)psymbl,
+ (char *)&(bfa_fcs_lport_get_psym_name(port)),
+ strlen((char *)&bfa_fcs_lport_get_psym_name(port)));
+ }
+
+ len = fc_rspnid_build(&fchs, bfa_fcxp_get_reqbuf(fcxp),
+ bfa_fcs_lport_get_fcid(port), 0, psymbl);
+
+ bfa_fcxp_send(fcxp, NULL, port->fabric->vf_id, port->lp_tag, BFA_FALSE,
+ FC_CLASS_3, len, &fchs, NULL, NULL, FC_MAX_PDUSZ, 0);
+
+ port->stats.ns_rspnid_sent++;
+}
+
+/*
+ * FCS SCN
+ */
+
+#define FC_QOS_RSCN_EVENT 0x0c
+#define FC_FABRIC_NAME_RSCN_EVENT 0x0d
+
+/*
+ * forward declarations
+ */
+static void bfa_fcs_lport_scn_send_scr(void *scn_cbarg,
+ struct bfa_fcxp_s *fcxp_alloced);
+static void bfa_fcs_lport_scn_scr_response(void *fcsarg,
+ struct bfa_fcxp_s *fcxp,
+ void *cbarg,
+ bfa_status_t req_status,
+ u32 rsp_len,
+ u32 resid_len,
+ struct fchs_s *rsp_fchs);
+static void bfa_fcs_lport_scn_send_ls_acc(struct bfa_fcs_lport_s *port,
+ struct fchs_s *rx_fchs);
+static void bfa_fcs_lport_scn_timeout(void *arg);
+
+/*
+ * fcs_scm_sm FCS SCN state machine
+ */
+
+/*
+ * VPort SCN State Machine events
+ */
+enum port_scn_event {
+ SCNSM_EVENT_PORT_ONLINE = 1,
+ SCNSM_EVENT_PORT_OFFLINE = 2,
+ SCNSM_EVENT_RSP_OK = 3,
+ SCNSM_EVENT_RSP_ERROR = 4,
+ SCNSM_EVENT_TIMEOUT = 5,
+ SCNSM_EVENT_SCR_SENT = 6,
+};
+
+static void bfa_fcs_lport_scn_sm_offline(struct bfa_fcs_lport_scn_s *scn,
+ enum port_scn_event event);
+static void bfa_fcs_lport_scn_sm_sending_scr(
+ struct bfa_fcs_lport_scn_s *scn,
+ enum port_scn_event event);
+static void bfa_fcs_lport_scn_sm_scr(struct bfa_fcs_lport_scn_s *scn,
+ enum port_scn_event event);
+static void bfa_fcs_lport_scn_sm_scr_retry(struct bfa_fcs_lport_scn_s *scn,
+ enum port_scn_event event);
+static void bfa_fcs_lport_scn_sm_online(struct bfa_fcs_lport_scn_s *scn,
+ enum port_scn_event event);
+
+/*
+ * Starting state - awaiting link up.
+ */
+static void
+bfa_fcs_lport_scn_sm_offline(struct bfa_fcs_lport_scn_s *scn,
+ enum port_scn_event event)
+{
+ switch (event) {
+ case SCNSM_EVENT_PORT_ONLINE:
+ bfa_sm_set_state(scn, bfa_fcs_lport_scn_sm_sending_scr);
+ bfa_fcs_lport_scn_send_scr(scn, NULL);
+ break;
+
+ case SCNSM_EVENT_PORT_OFFLINE:
+ break;
+
+ default:
+ bfa_sm_fault(scn->port->fcs, event);
+ }
+}
+
+static void
+bfa_fcs_lport_scn_sm_sending_scr(struct bfa_fcs_lport_scn_s *scn,
+ enum port_scn_event event)
+{
+ switch (event) {
+ case SCNSM_EVENT_SCR_SENT:
+ bfa_sm_set_state(scn, bfa_fcs_lport_scn_sm_scr);
+ break;
+
+ case SCNSM_EVENT_PORT_OFFLINE:
+ bfa_sm_set_state(scn, bfa_fcs_lport_scn_sm_offline);
+ bfa_fcxp_walloc_cancel(scn->port->fcs->bfa, &scn->fcxp_wqe);
+ break;
+
+ default:
+ bfa_sm_fault(scn->port->fcs, event);
+ }
+}
+
+static void
+bfa_fcs_lport_scn_sm_scr(struct bfa_fcs_lport_scn_s *scn,
+ enum port_scn_event event)
+{
+ struct bfa_fcs_lport_s *port = scn->port;
+
+ switch (event) {
+ case SCNSM_EVENT_RSP_OK:
+ bfa_sm_set_state(scn, bfa_fcs_lport_scn_sm_online);
+ break;
+
+ case SCNSM_EVENT_RSP_ERROR:
+ bfa_sm_set_state(scn, bfa_fcs_lport_scn_sm_scr_retry);
+ bfa_timer_start(port->fcs->bfa, &scn->timer,
+ bfa_fcs_lport_scn_timeout, scn,
+ BFA_FCS_RETRY_TIMEOUT);
+ break;
+
+ case SCNSM_EVENT_PORT_OFFLINE:
+ bfa_sm_set_state(scn, bfa_fcs_lport_scn_sm_offline);
+ bfa_fcxp_discard(scn->fcxp);
+ break;
+
+ default:
+ bfa_sm_fault(port->fcs, event);
+ }
+}
+
+static void
+bfa_fcs_lport_scn_sm_scr_retry(struct bfa_fcs_lport_scn_s *scn,
+ enum port_scn_event event)
+{
+ switch (event) {
+ case SCNSM_EVENT_TIMEOUT:
+ bfa_sm_set_state(scn, bfa_fcs_lport_scn_sm_sending_scr);
+ bfa_fcs_lport_scn_send_scr(scn, NULL);
+ break;
+
+ case SCNSM_EVENT_PORT_OFFLINE:
+ bfa_sm_set_state(scn, bfa_fcs_lport_scn_sm_offline);
+ bfa_timer_stop(&scn->timer);
+ break;
+
+ default:
+ bfa_sm_fault(scn->port->fcs, event);
+ }
+}
+
+static void
+bfa_fcs_lport_scn_sm_online(struct bfa_fcs_lport_scn_s *scn,
+ enum port_scn_event event)
+{
+ switch (event) {
+ case SCNSM_EVENT_PORT_OFFLINE:
+ bfa_sm_set_state(scn, bfa_fcs_lport_scn_sm_offline);
+ break;
+
+ default:
+ bfa_sm_fault(scn->port->fcs, event);
+ }
+}
+
+
+
+/*
+ * fcs_scn_private FCS SCN private functions
+ */
+
+/*
+ * This routine will be called to send a SCR command.
+ */
+static void
+bfa_fcs_lport_scn_send_scr(void *scn_cbarg, struct bfa_fcxp_s *fcxp_alloced)
+{
+ struct bfa_fcs_lport_scn_s *scn = scn_cbarg;
+ struct bfa_fcs_lport_s *port = scn->port;
+ struct fchs_s fchs;
+ int len;
+ struct bfa_fcxp_s *fcxp;
+
+ bfa_trc(port->fcs, port->pid);
+ bfa_trc(port->fcs, port->port_cfg.pwwn);
+
+ fcxp = fcxp_alloced ? fcxp_alloced :
+ bfa_fcs_fcxp_alloc(port->fcs, BFA_TRUE);
+ if (!fcxp) {
+ bfa_fcs_fcxp_alloc_wait(port->fcs->bfa, &scn->fcxp_wqe,
+ bfa_fcs_lport_scn_send_scr, scn, BFA_TRUE);
+ return;
+ }
+ scn->fcxp = fcxp;
+
+ /* Handle VU registrations for Base port only */
+ if ((!port->vport) && bfa_ioc_get_fcmode(&port->fcs->bfa->ioc)) {
+ len = fc_scr_build(&fchs, bfa_fcxp_get_reqbuf(fcxp),
+ port->fabric->lps->brcd_switch,
+ port->pid, 0);
+ } else {
+ len = fc_scr_build(&fchs, bfa_fcxp_get_reqbuf(fcxp),
+ BFA_FALSE,
+ port->pid, 0);
+ }
+
+ bfa_fcxp_send(fcxp, NULL, port->fabric->vf_id, port->lp_tag, BFA_FALSE,
+ FC_CLASS_3, len, &fchs,
+ bfa_fcs_lport_scn_scr_response,
+ (void *)scn, FC_MAX_PDUSZ, FC_ELS_TOV);
+
+ bfa_sm_send_event(scn, SCNSM_EVENT_SCR_SENT);
+}
+
+static void
+bfa_fcs_lport_scn_scr_response(void *fcsarg, struct bfa_fcxp_s *fcxp,
+ void *cbarg, bfa_status_t req_status, u32 rsp_len,
+ u32 resid_len, struct fchs_s *rsp_fchs)
+{
+ struct bfa_fcs_lport_scn_s *scn = (struct bfa_fcs_lport_scn_s *) cbarg;
+ struct bfa_fcs_lport_s *port = scn->port;
+ struct fc_els_cmd_s *els_cmd;
+ struct fc_ls_rjt_s *ls_rjt;
+
+ bfa_trc(port->fcs, port->port_cfg.pwwn);
+
+ /*
+ * Sanity Checks
+ */
+ if (req_status != BFA_STATUS_OK) {
+ bfa_trc(port->fcs, req_status);
+ bfa_sm_send_event(scn, SCNSM_EVENT_RSP_ERROR);
+ return;
+ }
+
+ els_cmd = (struct fc_els_cmd_s *) BFA_FCXP_RSP_PLD(fcxp);
+
+ switch (els_cmd->els_code) {
+
+ case FC_ELS_ACC:
+ bfa_sm_send_event(scn, SCNSM_EVENT_RSP_OK);
+ break;
+
+ case FC_ELS_LS_RJT:
+
+ ls_rjt = (struct fc_ls_rjt_s *) BFA_FCXP_RSP_PLD(fcxp);
+
+ bfa_trc(port->fcs, ls_rjt->reason_code);
+ bfa_trc(port->fcs, ls_rjt->reason_code_expl);
+
+ bfa_sm_send_event(scn, SCNSM_EVENT_RSP_ERROR);
+ break;
+
+ default:
+ bfa_sm_send_event(scn, SCNSM_EVENT_RSP_ERROR);
+ }
+}
+
+/*
+ * Send a LS Accept
+ */
+static void
+bfa_fcs_lport_scn_send_ls_acc(struct bfa_fcs_lport_s *port,
+ struct fchs_s *rx_fchs)
+{
+ struct fchs_s fchs;
+ struct bfa_fcxp_s *fcxp;
+ struct bfa_rport_s *bfa_rport = NULL;
+ int len;
+
+ bfa_trc(port->fcs, rx_fchs->s_id);
+
+ fcxp = bfa_fcs_fcxp_alloc(port->fcs, BFA_FALSE);
+ if (!fcxp)
+ return;
+
+ len = fc_ls_acc_build(&fchs, bfa_fcxp_get_reqbuf(fcxp),
+ rx_fchs->s_id, bfa_fcs_lport_get_fcid(port),
+ rx_fchs->ox_id);
+
+ bfa_fcxp_send(fcxp, bfa_rport, port->fabric->vf_id, port->lp_tag,
+ BFA_FALSE, FC_CLASS_3, len, &fchs, NULL, NULL,
+ FC_MAX_PDUSZ, 0);
+}
+
+/*
+ * This routine will be called by bfa_timer on timer timeouts.
+ *
+ * param[in] vport - pointer to bfa_fcs_lport_t.
+ * param[out] vport_status - pointer to return vport status in
+ *
+ * return
+ * void
+ *
+ * Special Considerations:
+ *
+ * note
+ */
+static void
+bfa_fcs_lport_scn_timeout(void *arg)
+{
+ struct bfa_fcs_lport_scn_s *scn = (struct bfa_fcs_lport_scn_s *) arg;
+
+ bfa_sm_send_event(scn, SCNSM_EVENT_TIMEOUT);
+}
+
+
+
+/*
+ * fcs_scn_public FCS state change notification public interfaces
+ */
+
+/*
+ * Functions called by port/fab
+ */
+void
+bfa_fcs_lport_scn_init(struct bfa_fcs_lport_s *port)
+{
+ struct bfa_fcs_lport_scn_s *scn = BFA_FCS_GET_SCN_FROM_PORT(port);
+
+ scn->port = port;
+ bfa_sm_set_state(scn, bfa_fcs_lport_scn_sm_offline);
+}
+
+void
+bfa_fcs_lport_scn_offline(struct bfa_fcs_lport_s *port)
+{
+ struct bfa_fcs_lport_scn_s *scn = BFA_FCS_GET_SCN_FROM_PORT(port);
+
+ scn->port = port;
+ bfa_sm_send_event(scn, SCNSM_EVENT_PORT_OFFLINE);
+}
+
+void
+bfa_fcs_lport_fab_scn_online(struct bfa_fcs_lport_s *port)
+{
+ struct bfa_fcs_lport_scn_s *scn = BFA_FCS_GET_SCN_FROM_PORT(port);
+
+ scn->port = port;
+ bfa_sm_send_event(scn, SCNSM_EVENT_PORT_ONLINE);
+}
+
+static void
+bfa_fcs_lport_scn_portid_rscn(struct bfa_fcs_lport_s *port, u32 rpid)
+{
+ struct bfa_fcs_rport_s *rport;
+ struct bfa_fcs_fabric_s *fabric = port->fabric;
+ struct bfa_fcs_vport_s *vport;
+ struct list_head *qe;
+
+ bfa_trc(port->fcs, rpid);
+
+ /*
+ * Ignore PID if it is of base port or of vports created on the
+ * same base port. It is to avoid vports discovering base port or
+ * other vports created on same base port as remote port
+ */
+ if (rpid == fabric->bport.pid)
+ return;
+
+ list_for_each(qe, &fabric->vport_q) {
+ vport = (struct bfa_fcs_vport_s *) qe;
+ if (vport->lport.pid == rpid)
+ return;
+ }
+ /*
+ * If this is an unknown device, then it just came online.
+ * Otherwise let rport handle the RSCN event.
+ */
+ rport = bfa_fcs_lport_get_rport_by_pid(port, rpid);
+ if (!rport)
+ rport = bfa_fcs_lport_get_rport_by_old_pid(port, rpid);
+
+ if (rport == NULL) {
+ /*
+ * If min cfg mode is enabled, we donot need to
+ * discover any new rports.
+ */
+ if (!__fcs_min_cfg(port->fcs))
+ rport = bfa_fcs_rport_create(port, rpid);
+ } else
+ bfa_fcs_rport_scn(rport);
+}
+
+/*
+ * rscn format based PID comparison
+ */
+#define __fc_pid_match(__c0, __c1, __fmt) \
+ (((__fmt) == FC_RSCN_FORMAT_FABRIC) || \
+ (((__fmt) == FC_RSCN_FORMAT_DOMAIN) && \
+ ((__c0)[0] == (__c1)[0])) || \
+ (((__fmt) == FC_RSCN_FORMAT_AREA) && \
+ ((__c0)[0] == (__c1)[0]) && \
+ ((__c0)[1] == (__c1)[1])))
+
+static void
+bfa_fcs_lport_scn_multiport_rscn(struct bfa_fcs_lport_s *port,
+ enum fc_rscn_format format,
+ u32 rscn_pid)
+{
+ struct bfa_fcs_rport_s *rport;
+ struct list_head *qe, *qe_next;
+ u8 *c0, *c1;
+
+ bfa_trc(port->fcs, format);
+ bfa_trc(port->fcs, rscn_pid);
+
+ c0 = (u8 *) &rscn_pid;
+
+ list_for_each_safe(qe, qe_next, &port->rport_q) {
+ rport = (struct bfa_fcs_rport_s *) qe;
+ c1 = (u8 *) &rport->pid;
+ if (__fc_pid_match(c0, c1, format))
+ bfa_fcs_rport_scn(rport);
+ }
+}
+
+
+void
+bfa_fcs_lport_scn_process_rscn(struct bfa_fcs_lport_s *port,
+ struct fchs_s *fchs, u32 len)
+{
+ struct fc_rscn_pl_s *rscn = (struct fc_rscn_pl_s *) (fchs + 1);
+ int num_entries;
+ u32 rscn_pid;
+ bfa_boolean_t nsquery = BFA_FALSE, found;
+ int i = 0, j;
+
+ num_entries =
+ (be16_to_cpu(rscn->payldlen) -
+ sizeof(u32)) / sizeof(rscn->event[0]);
+
+ bfa_trc(port->fcs, num_entries);
+
+ port->stats.num_rscn++;
+
+ bfa_fcs_lport_scn_send_ls_acc(port, fchs);
+
+ for (i = 0; i < num_entries; i++) {
+ rscn_pid = rscn->event[i].portid;
+
+ bfa_trc(port->fcs, rscn->event[i].format);
+ bfa_trc(port->fcs, rscn_pid);
+
+ /* check for duplicate entries in the list */
+ found = BFA_FALSE;
+ for (j = 0; j < i; j++) {
+ if (rscn->event[j].portid == rscn_pid) {
+ found = BFA_TRUE;
+ break;
+ }
+ }
+
+ /* if found in down the list, pid has been already processed */
+ if (found) {
+ bfa_trc(port->fcs, rscn_pid);
+ continue;
+ }
+
+ switch (rscn->event[i].format) {
+ case FC_RSCN_FORMAT_PORTID:
+ if (rscn->event[i].qualifier == FC_QOS_RSCN_EVENT) {
+ /*
+ * Ignore this event.
+ * f/w would have processed it
+ */
+ bfa_trc(port->fcs, rscn_pid);
+ } else {
+ port->stats.num_portid_rscn++;
+ bfa_fcs_lport_scn_portid_rscn(port, rscn_pid);
+ }
+ break;
+
+ case FC_RSCN_FORMAT_FABRIC:
+ if (rscn->event[i].qualifier ==
+ FC_FABRIC_NAME_RSCN_EVENT) {
+ bfa_fcs_lport_ms_fabric_rscn(port);
+ break;
+ }
+ /* !!!!!!!!! Fall Through !!!!!!!!!!!!! */
+
+ case FC_RSCN_FORMAT_AREA:
+ case FC_RSCN_FORMAT_DOMAIN:
+ nsquery = BFA_TRUE;
+ bfa_fcs_lport_scn_multiport_rscn(port,
+ rscn->event[i].format,
+ rscn_pid);
+ break;
+
+
+ default:
+ WARN_ON(1);
+ nsquery = BFA_TRUE;
+ }
+ }
+
+ /*
+ * If any of area, domain or fabric RSCN is received, do a fresh
+ * discovery to find new devices.
+ */
+ if (nsquery)
+ bfa_fcs_lport_ns_query(port);
+}
+
+/*
+ * BFA FCS port
+ */
+/*
+ * fcs_port_api BFA FCS port API
+ */
+struct bfa_fcs_lport_s *
+bfa_fcs_get_base_port(struct bfa_fcs_s *fcs)
+{
+ return &fcs->fabric.bport;
+}
+
+wwn_t
+bfa_fcs_lport_get_rport(struct bfa_fcs_lport_s *port, wwn_t wwn, int index,
+ int nrports, bfa_boolean_t bwwn)
+{
+ struct list_head *qh, *qe;
+ struct bfa_fcs_rport_s *rport = NULL;
+ int i;
+ struct bfa_fcs_s *fcs;
+
+ if (port == NULL || nrports == 0)
+ return (wwn_t) 0;
+
+ fcs = port->fcs;
+ bfa_trc(fcs, (u32) nrports);
+
+ i = 0;
+ qh = &port->rport_q;
+ qe = bfa_q_first(qh);
+
+ while ((qe != qh) && (i < nrports)) {
+ rport = (struct bfa_fcs_rport_s *) qe;
+ if (bfa_ntoh3b(rport->pid) > 0xFFF000) {
+ qe = bfa_q_next(qe);
+ bfa_trc(fcs, (u32) rport->pwwn);
+ bfa_trc(fcs, rport->pid);
+ bfa_trc(fcs, i);
+ continue;
+ }
+
+ if (bwwn) {
+ if (!memcmp(&wwn, &rport->pwwn, 8))
+ break;
+ } else {
+ if (i == index)
+ break;
+ }
+
+ i++;
+ qe = bfa_q_next(qe);
+ }
+
+ bfa_trc(fcs, i);
+ if (rport)
+ return rport->pwwn;
+ else
+ return (wwn_t) 0;
+}
+
+void
+bfa_fcs_lport_get_rport_quals(struct bfa_fcs_lport_s *port,
+ struct bfa_rport_qualifier_s rports[], int *nrports)
+{
+ struct list_head *qh, *qe;
+ struct bfa_fcs_rport_s *rport = NULL;
+ int i;
+ struct bfa_fcs_s *fcs;
+
+ if (port == NULL || rports == NULL || *nrports == 0)
+ return;
+
+ fcs = port->fcs;
+ bfa_trc(fcs, (u32) *nrports);
+
+ i = 0;
+ qh = &port->rport_q;
+ qe = bfa_q_first(qh);
+
+ while ((qe != qh) && (i < *nrports)) {
+ rport = (struct bfa_fcs_rport_s *) qe;
+ if (bfa_ntoh3b(rport->pid) > 0xFFF000) {
+ qe = bfa_q_next(qe);
+ bfa_trc(fcs, (u32) rport->pwwn);
+ bfa_trc(fcs, rport->pid);
+ bfa_trc(fcs, i);
+ continue;
+ }
+
+ if (!rport->pwwn && !rport->pid) {
+ qe = bfa_q_next(qe);
+ continue;
+ }
+
+ rports[i].pwwn = rport->pwwn;
+ rports[i].pid = rport->pid;
+
+ i++;
+ qe = bfa_q_next(qe);
+ }
+
+ bfa_trc(fcs, i);
+ *nrports = i;
+}
+
+/*
+ * Iterate's through all the rport's in the given port to
+ * determine the maximum operating speed.
+ *
+ * !!!! To be used in TRL Functionality only !!!!
+ */
+bfa_port_speed_t
+bfa_fcs_lport_get_rport_max_speed(bfa_fcs_lport_t *port)
+{
+ struct list_head *qh, *qe;
+ struct bfa_fcs_rport_s *rport = NULL;
+ struct bfa_fcs_s *fcs;
+ bfa_port_speed_t max_speed = 0;
+ struct bfa_port_attr_s port_attr;
+ bfa_port_speed_t port_speed, rport_speed;
+ bfa_boolean_t trl_enabled = bfa_fcport_is_ratelim(port->fcs->bfa);
+
+
+ if (port == NULL)
+ return 0;
+
+ fcs = port->fcs;
+
+ /* Get Physical port's current speed */
+ bfa_fcport_get_attr(port->fcs->bfa, &port_attr);
+ port_speed = port_attr.speed;
+ bfa_trc(fcs, port_speed);
+
+ qh = &port->rport_q;
+ qe = bfa_q_first(qh);
+
+ while (qe != qh) {
+ rport = (struct bfa_fcs_rport_s *) qe;
+ if ((bfa_ntoh3b(rport->pid) > 0xFFF000) ||
+ (bfa_fcs_rport_get_state(rport) == BFA_RPORT_OFFLINE) ||
+ (rport->scsi_function != BFA_RPORT_TARGET)) {
+ qe = bfa_q_next(qe);
+ continue;
+ }
+
+ rport_speed = rport->rpf.rpsc_speed;
+ if ((trl_enabled) && (rport_speed ==
+ BFA_PORT_SPEED_UNKNOWN)) {
+ /* Use default ratelim speed setting */
+ rport_speed =
+ bfa_fcport_get_ratelim_speed(port->fcs->bfa);
+ }
+
+ if (rport_speed > max_speed)
+ max_speed = rport_speed;
+
+ qe = bfa_q_next(qe);
+ }
+
+ if (max_speed > port_speed)
+ max_speed = port_speed;
+
+ bfa_trc(fcs, max_speed);
+ return max_speed;
+}
+
+struct bfa_fcs_lport_s *
+bfa_fcs_lookup_port(struct bfa_fcs_s *fcs, u16 vf_id, wwn_t lpwwn)
+{
+ struct bfa_fcs_vport_s *vport;
+ bfa_fcs_vf_t *vf;
+
+ WARN_ON(fcs == NULL);
+
+ vf = bfa_fcs_vf_lookup(fcs, vf_id);
+ if (vf == NULL) {
+ bfa_trc(fcs, vf_id);
+ return NULL;
+ }
+
+ if (!lpwwn || (vf->bport.port_cfg.pwwn == lpwwn))
+ return &vf->bport;
+
+ vport = bfa_fcs_fabric_vport_lookup(vf, lpwwn);
+ if (vport)
+ return &vport->lport;
+
+ return NULL;
+}
+
+/*
+ * API corresponding to NPIV_VPORT_GETINFO.
+ */
+void
+bfa_fcs_lport_get_info(struct bfa_fcs_lport_s *port,
+ struct bfa_lport_info_s *port_info)
+{
+
+ bfa_trc(port->fcs, port->fabric->fabric_name);
+
+ if (port->vport == NULL) {
+ /*
+ * This is a Physical port
+ */
+ port_info->port_type = BFA_LPORT_TYPE_PHYSICAL;
+
+ /*
+ * @todo : need to fix the state & reason
+ */
+ port_info->port_state = 0;
+ port_info->offline_reason = 0;
+
+ port_info->port_wwn = bfa_fcs_lport_get_pwwn(port);
+ port_info->node_wwn = bfa_fcs_lport_get_nwwn(port);
+
+ port_info->max_vports_supp =
+ bfa_lps_get_max_vport(port->fcs->bfa);
+ port_info->num_vports_inuse =
+ port->fabric->num_vports;
+ port_info->max_rports_supp = BFA_FCS_MAX_RPORTS_SUPP;
+ port_info->num_rports_inuse = port->num_rports;
+ } else {
+ /*
+ * This is a virtual port
+ */
+ port_info->port_type = BFA_LPORT_TYPE_VIRTUAL;
+
+ /*
+ * @todo : need to fix the state & reason
+ */
+ port_info->port_state = 0;
+ port_info->offline_reason = 0;
+
+ port_info->port_wwn = bfa_fcs_lport_get_pwwn(port);
+ port_info->node_wwn = bfa_fcs_lport_get_nwwn(port);
+ }
+}
+
+void
+bfa_fcs_lport_get_stats(struct bfa_fcs_lport_s *fcs_port,
+ struct bfa_lport_stats_s *port_stats)
+{
+ *port_stats = fcs_port->stats;
+}
+
+void
+bfa_fcs_lport_clear_stats(struct bfa_fcs_lport_s *fcs_port)
+{
+ memset(&fcs_port->stats, 0, sizeof(struct bfa_lport_stats_s));
+}
+
+/*
+ * Let new loop map create missing rports
+ */
+void
+bfa_fcs_lport_lip_scn_online(struct bfa_fcs_lport_s *port)
+{
+ bfa_fcs_lport_loop_online(port);
+}
+
+/*
+ * FCS virtual port state machine
+ */
+
+#define __vport_fcs(__vp) ((__vp)->lport.fcs)
+#define __vport_pwwn(__vp) ((__vp)->lport.port_cfg.pwwn)
+#define __vport_nwwn(__vp) ((__vp)->lport.port_cfg.nwwn)
+#define __vport_bfa(__vp) ((__vp)->lport.fcs->bfa)
+#define __vport_fcid(__vp) ((__vp)->lport.pid)
+#define __vport_fabric(__vp) ((__vp)->lport.fabric)
+#define __vport_vfid(__vp) ((__vp)->lport.fabric->vf_id)
+
+#define BFA_FCS_VPORT_MAX_RETRIES 5
+/*
+ * Forward declarations
+ */
+static void bfa_fcs_vport_do_fdisc(struct bfa_fcs_vport_s *vport);
+static void bfa_fcs_vport_timeout(void *vport_arg);
+static void bfa_fcs_vport_do_logo(struct bfa_fcs_vport_s *vport);
+static void bfa_fcs_vport_free(struct bfa_fcs_vport_s *vport);
+
+/*
+ * fcs_vport_sm FCS virtual port state machine
+ */
+
+/*
+ * VPort State Machine events
+ */
+enum bfa_fcs_vport_event {
+ BFA_FCS_VPORT_SM_CREATE = 1, /* vport create event */
+ BFA_FCS_VPORT_SM_DELETE = 2, /* vport delete event */
+ BFA_FCS_VPORT_SM_START = 3, /* vport start request */
+ BFA_FCS_VPORT_SM_STOP = 4, /* stop: unsupported */
+ BFA_FCS_VPORT_SM_ONLINE = 5, /* fabric online */
+ BFA_FCS_VPORT_SM_OFFLINE = 6, /* fabric offline event */
+ BFA_FCS_VPORT_SM_FRMSENT = 7, /* fdisc/logo sent events */
+ BFA_FCS_VPORT_SM_RSP_OK = 8, /* good response */
+ BFA_FCS_VPORT_SM_RSP_ERROR = 9, /* error/bad response */
+ BFA_FCS_VPORT_SM_TIMEOUT = 10, /* delay timer event */
+ BFA_FCS_VPORT_SM_DELCOMP = 11, /* lport delete completion */
+ BFA_FCS_VPORT_SM_RSP_DUP_WWN = 12, /* Dup wnn error*/
+ BFA_FCS_VPORT_SM_RSP_FAILED = 13, /* non-retryable failure */
+ BFA_FCS_VPORT_SM_STOPCOMP = 14, /* vport delete completion */
+ BFA_FCS_VPORT_SM_FABRIC_MAX = 15, /* max vports on fabric */
+};
+
+static void bfa_fcs_vport_sm_uninit(struct bfa_fcs_vport_s *vport,
+ enum bfa_fcs_vport_event event);
+static void bfa_fcs_vport_sm_created(struct bfa_fcs_vport_s *vport,
+ enum bfa_fcs_vport_event event);
+static void bfa_fcs_vport_sm_offline(struct bfa_fcs_vport_s *vport,
+ enum bfa_fcs_vport_event event);
+static void bfa_fcs_vport_sm_fdisc(struct bfa_fcs_vport_s *vport,
+ enum bfa_fcs_vport_event event);
+static void bfa_fcs_vport_sm_fdisc_retry(struct bfa_fcs_vport_s *vport,
+ enum bfa_fcs_vport_event event);
+static void bfa_fcs_vport_sm_fdisc_rsp_wait(struct bfa_fcs_vport_s *vport,
+ enum bfa_fcs_vport_event event);
+static void bfa_fcs_vport_sm_online(struct bfa_fcs_vport_s *vport,
+ enum bfa_fcs_vport_event event);
+static void bfa_fcs_vport_sm_deleting(struct bfa_fcs_vport_s *vport,
+ enum bfa_fcs_vport_event event);
+static void bfa_fcs_vport_sm_cleanup(struct bfa_fcs_vport_s *vport,
+ enum bfa_fcs_vport_event event);
+static void bfa_fcs_vport_sm_logo(struct bfa_fcs_vport_s *vport,
+ enum bfa_fcs_vport_event event);
+static void bfa_fcs_vport_sm_error(struct bfa_fcs_vport_s *vport,
+ enum bfa_fcs_vport_event event);
+static void bfa_fcs_vport_sm_stopping(struct bfa_fcs_vport_s *vport,
+ enum bfa_fcs_vport_event event);
+static void bfa_fcs_vport_sm_logo_for_stop(struct bfa_fcs_vport_s *vport,
+ enum bfa_fcs_vport_event event);
+
+static struct bfa_sm_table_s vport_sm_table[] = {
+ {BFA_SM(bfa_fcs_vport_sm_uninit), BFA_FCS_VPORT_UNINIT},
+ {BFA_SM(bfa_fcs_vport_sm_created), BFA_FCS_VPORT_CREATED},
+ {BFA_SM(bfa_fcs_vport_sm_offline), BFA_FCS_VPORT_OFFLINE},
+ {BFA_SM(bfa_fcs_vport_sm_fdisc), BFA_FCS_VPORT_FDISC},
+ {BFA_SM(bfa_fcs_vport_sm_fdisc_retry), BFA_FCS_VPORT_FDISC_RETRY},
+ {BFA_SM(bfa_fcs_vport_sm_fdisc_rsp_wait), BFA_FCS_VPORT_FDISC_RSP_WAIT},
+ {BFA_SM(bfa_fcs_vport_sm_online), BFA_FCS_VPORT_ONLINE},
+ {BFA_SM(bfa_fcs_vport_sm_deleting), BFA_FCS_VPORT_DELETING},
+ {BFA_SM(bfa_fcs_vport_sm_cleanup), BFA_FCS_VPORT_CLEANUP},
+ {BFA_SM(bfa_fcs_vport_sm_logo), BFA_FCS_VPORT_LOGO},
+ {BFA_SM(bfa_fcs_vport_sm_error), BFA_FCS_VPORT_ERROR}
+};
+
+/*
+ * Beginning state.
+ */
+static void
+bfa_fcs_vport_sm_uninit(struct bfa_fcs_vport_s *vport,
+ enum bfa_fcs_vport_event event)
+{
+ bfa_trc(__vport_fcs(vport), __vport_pwwn(vport));
+ bfa_trc(__vport_fcs(vport), event);
+
+ switch (event) {
+ case BFA_FCS_VPORT_SM_CREATE:
+ bfa_sm_set_state(vport, bfa_fcs_vport_sm_created);
+ bfa_fcs_fabric_addvport(__vport_fabric(vport), vport);
+ break;
+
+ default:
+ bfa_sm_fault(__vport_fcs(vport), event);
+ }
+}
+
+/*
+ * Created state - a start event is required to start up the state machine.
+ */
+static void
+bfa_fcs_vport_sm_created(struct bfa_fcs_vport_s *vport,
+ enum bfa_fcs_vport_event event)
+{
+ bfa_trc(__vport_fcs(vport), __vport_pwwn(vport));
+ bfa_trc(__vport_fcs(vport), event);
+
+ switch (event) {
+ case BFA_FCS_VPORT_SM_START:
+ if (bfa_sm_cmp_state(__vport_fabric(vport),
+ bfa_fcs_fabric_sm_online)
+ && bfa_fcs_fabric_npiv_capable(__vport_fabric(vport))) {
+ bfa_sm_set_state(vport, bfa_fcs_vport_sm_fdisc);
+ bfa_fcs_vport_do_fdisc(vport);
+ } else {
+ /*
+ * Fabric is offline or not NPIV capable, stay in
+ * offline state.
+ */
+ vport->vport_stats.fab_no_npiv++;
+ bfa_sm_set_state(vport, bfa_fcs_vport_sm_offline);
+ }
+ break;
+
+ case BFA_FCS_VPORT_SM_DELETE:
+ bfa_sm_set_state(vport, bfa_fcs_vport_sm_cleanup);
+ bfa_fcs_lport_delete(&vport->lport);
+ break;
+
+ case BFA_FCS_VPORT_SM_ONLINE:
+ case BFA_FCS_VPORT_SM_OFFLINE:
+ /*
+ * Ignore ONLINE/OFFLINE events from fabric
+ * till vport is started.
+ */
+ break;
+
+ default:
+ bfa_sm_fault(__vport_fcs(vport), event);
+ }
+}
+
+/*
+ * Offline state - awaiting ONLINE event from fabric SM.
+ */
+static void
+bfa_fcs_vport_sm_offline(struct bfa_fcs_vport_s *vport,
+ enum bfa_fcs_vport_event event)
+{
+ bfa_trc(__vport_fcs(vport), __vport_pwwn(vport));
+ bfa_trc(__vport_fcs(vport), event);
+
+ switch (event) {
+ case BFA_FCS_VPORT_SM_DELETE:
+ bfa_sm_set_state(vport, bfa_fcs_vport_sm_cleanup);
+ bfa_fcs_lport_delete(&vport->lport);
+ break;
+
+ case BFA_FCS_VPORT_SM_ONLINE:
+ bfa_sm_set_state(vport, bfa_fcs_vport_sm_fdisc);
+ vport->fdisc_retries = 0;
+ bfa_fcs_vport_do_fdisc(vport);
+ break;
+
+ case BFA_FCS_VPORT_SM_STOP:
+ bfa_sm_set_state(vport, bfa_fcs_vport_sm_cleanup);
+ bfa_sm_send_event(&vport->lport, BFA_FCS_PORT_SM_STOP);
+ break;
+
+ case BFA_FCS_VPORT_SM_OFFLINE:
+ /*
+ * This can happen if the vport couldn't be initialzied
+ * due the fact that the npiv was not enabled on the switch.
+ * In that case we will put the vport in offline state.
+ * However, the link can go down and cause the this event to
+ * be sent when we are already offline. Ignore it.
+ */
+ break;
+
+ default:
+ bfa_sm_fault(__vport_fcs(vport), event);
+ }
+}
+
+
+/*
+ * FDISC is sent and awaiting reply from fabric.
+ */
+static void
+bfa_fcs_vport_sm_fdisc(struct bfa_fcs_vport_s *vport,
+ enum bfa_fcs_vport_event event)
+{
+ bfa_trc(__vport_fcs(vport), __vport_pwwn(vport));
+ bfa_trc(__vport_fcs(vport), event);
+
+ switch (event) {
+ case BFA_FCS_VPORT_SM_DELETE:
+ bfa_sm_set_state(vport, bfa_fcs_vport_sm_fdisc_rsp_wait);
+ break;
+
+ case BFA_FCS_VPORT_SM_OFFLINE:
+ bfa_sm_set_state(vport, bfa_fcs_vport_sm_offline);
+ bfa_sm_send_event(vport->lps, BFA_LPS_SM_OFFLINE);
+ break;
+
+ case BFA_FCS_VPORT_SM_RSP_OK:
+ bfa_sm_set_state(vport, bfa_fcs_vport_sm_online);
+ bfa_fcs_lport_online(&vport->lport);
+ break;
+
+ case BFA_FCS_VPORT_SM_RSP_ERROR:
+ bfa_sm_set_state(vport, bfa_fcs_vport_sm_fdisc_retry);
+ bfa_timer_start(__vport_bfa(vport), &vport->timer,
+ bfa_fcs_vport_timeout, vport,
+ BFA_FCS_RETRY_TIMEOUT);
+ break;
+
+ case BFA_FCS_VPORT_SM_RSP_FAILED:
+ case BFA_FCS_VPORT_SM_FABRIC_MAX:
+ bfa_sm_set_state(vport, bfa_fcs_vport_sm_offline);
+ break;
+
+ case BFA_FCS_VPORT_SM_RSP_DUP_WWN:
+ bfa_sm_set_state(vport, bfa_fcs_vport_sm_error);
+ break;
+
+ default:
+ bfa_sm_fault(__vport_fcs(vport), event);
+ }
+}
+
+/*
+ * FDISC attempt failed - a timer is active to retry FDISC.
+ */
+static void
+bfa_fcs_vport_sm_fdisc_retry(struct bfa_fcs_vport_s *vport,
+ enum bfa_fcs_vport_event event)
+{
+ bfa_trc(__vport_fcs(vport), __vport_pwwn(vport));
+ bfa_trc(__vport_fcs(vport), event);
+
+ switch (event) {
+ case BFA_FCS_VPORT_SM_DELETE:
+ bfa_sm_set_state(vport, bfa_fcs_vport_sm_cleanup);
+ bfa_timer_stop(&vport->timer);
+ bfa_fcs_lport_delete(&vport->lport);
+ break;
+
+ case BFA_FCS_VPORT_SM_OFFLINE:
+ bfa_sm_set_state(vport, bfa_fcs_vport_sm_offline);
+ bfa_timer_stop(&vport->timer);
+ break;
+
+ case BFA_FCS_VPORT_SM_TIMEOUT:
+ bfa_sm_set_state(vport, bfa_fcs_vport_sm_fdisc);
+ vport->vport_stats.fdisc_retries++;
+ vport->fdisc_retries++;
+ bfa_fcs_vport_do_fdisc(vport);
+ break;
+
+ default:
+ bfa_sm_fault(__vport_fcs(vport), event);
+ }
+}
+
+/*
+ * FDISC is in progress and we got a vport delete request -
+ * this is a wait state while we wait for fdisc response and
+ * we will transition to the appropriate state - on rsp status.
+ */
+static void
+bfa_fcs_vport_sm_fdisc_rsp_wait(struct bfa_fcs_vport_s *vport,
+ enum bfa_fcs_vport_event event)
+{
+ bfa_trc(__vport_fcs(vport), __vport_pwwn(vport));
+ bfa_trc(__vport_fcs(vport), event);
+
+ switch (event) {
+ case BFA_FCS_VPORT_SM_RSP_OK:
+ bfa_sm_set_state(vport, bfa_fcs_vport_sm_deleting);
+ bfa_fcs_lport_delete(&vport->lport);
+ break;
+
+ case BFA_FCS_VPORT_SM_DELETE:
+ break;
+
+ case BFA_FCS_VPORT_SM_OFFLINE:
+ case BFA_FCS_VPORT_SM_RSP_ERROR:
+ case BFA_FCS_VPORT_SM_RSP_FAILED:
+ case BFA_FCS_VPORT_SM_FABRIC_MAX:
+ case BFA_FCS_VPORT_SM_RSP_DUP_WWN:
+ bfa_sm_set_state(vport, bfa_fcs_vport_sm_cleanup);
+ bfa_sm_send_event(vport->lps, BFA_LPS_SM_OFFLINE);
+ bfa_fcs_lport_delete(&vport->lport);
+ break;
+
+ default:
+ bfa_sm_fault(__vport_fcs(vport), event);
+ }
+}
+
+/*
+ * Vport is online (FDISC is complete).
+ */
+static void
+bfa_fcs_vport_sm_online(struct bfa_fcs_vport_s *vport,
+ enum bfa_fcs_vport_event event)
+{
+ bfa_trc(__vport_fcs(vport), __vport_pwwn(vport));
+ bfa_trc(__vport_fcs(vport), event);
+
+ switch (event) {
+ case BFA_FCS_VPORT_SM_DELETE:
+ bfa_sm_set_state(vport, bfa_fcs_vport_sm_deleting);
+ bfa_fcs_lport_delete(&vport->lport);
+ break;
+
+ case BFA_FCS_VPORT_SM_STOP:
+ bfa_sm_set_state(vport, bfa_fcs_vport_sm_stopping);
+ bfa_sm_send_event(&vport->lport, BFA_FCS_PORT_SM_STOP);
+ break;
+
+ case BFA_FCS_VPORT_SM_OFFLINE:
+ bfa_sm_set_state(vport, bfa_fcs_vport_sm_offline);
+ bfa_sm_send_event(vport->lps, BFA_LPS_SM_OFFLINE);
+ bfa_fcs_lport_offline(&vport->lport);
+ break;
+
+ default:
+ bfa_sm_fault(__vport_fcs(vport), event);
+ }
+}
+
+/*
+ * Vport is being stopped - awaiting lport stop completion to send
+ * LOGO to fabric.
+ */
+static void
+bfa_fcs_vport_sm_stopping(struct bfa_fcs_vport_s *vport,
+ enum bfa_fcs_vport_event event)
+{
+ bfa_trc(__vport_fcs(vport), __vport_pwwn(vport));
+ bfa_trc(__vport_fcs(vport), event);
+
+ switch (event) {
+ case BFA_FCS_VPORT_SM_STOPCOMP:
+ bfa_sm_set_state(vport, bfa_fcs_vport_sm_logo_for_stop);
+ bfa_fcs_vport_do_logo(vport);
+ break;
+
+ case BFA_FCS_VPORT_SM_OFFLINE:
+ bfa_sm_set_state(vport, bfa_fcs_vport_sm_cleanup);
+ break;
+
+ default:
+ bfa_sm_fault(__vport_fcs(vport), event);
+ }
+}
+
+/*
+ * Vport is being deleted - awaiting lport delete completion to send
+ * LOGO to fabric.
+ */
+static void
+bfa_fcs_vport_sm_deleting(struct bfa_fcs_vport_s *vport,
+ enum bfa_fcs_vport_event event)
+{
+ bfa_trc(__vport_fcs(vport), __vport_pwwn(vport));
+ bfa_trc(__vport_fcs(vport), event);
+
+ switch (event) {
+ case BFA_FCS_VPORT_SM_DELETE:
+ break;
+
+ case BFA_FCS_VPORT_SM_DELCOMP:
+ bfa_sm_set_state(vport, bfa_fcs_vport_sm_logo);
+ bfa_fcs_vport_do_logo(vport);
+ break;
+
+ case BFA_FCS_VPORT_SM_OFFLINE:
+ bfa_sm_set_state(vport, bfa_fcs_vport_sm_cleanup);
+ break;
+
+ default:
+ bfa_sm_fault(__vport_fcs(vport), event);
+ }
+}
+
+/*
+ * Error State.
+ * This state will be set when the Vport Creation fails due
+ * to errors like Dup WWN. In this state only operation allowed
+ * is a Vport Delete.
+ */
+static void
+bfa_fcs_vport_sm_error(struct bfa_fcs_vport_s *vport,
+ enum bfa_fcs_vport_event event)
+{
+ bfa_trc(__vport_fcs(vport), __vport_pwwn(vport));
+ bfa_trc(__vport_fcs(vport), event);
+
+ switch (event) {
+ case BFA_FCS_VPORT_SM_DELETE:
+ bfa_sm_set_state(vport, bfa_fcs_vport_sm_cleanup);
+ bfa_fcs_lport_delete(&vport->lport);
+ break;
+
+ default:
+ bfa_trc(__vport_fcs(vport), event);
+ }
+}
+
+/*
+ * Lport cleanup is in progress since vport is being deleted. Fabric is
+ * offline, so no LOGO is needed to complete vport deletion.
+ */
+static void
+bfa_fcs_vport_sm_cleanup(struct bfa_fcs_vport_s *vport,
+ enum bfa_fcs_vport_event event)
+{
+ bfa_trc(__vport_fcs(vport), __vport_pwwn(vport));
+ bfa_trc(__vport_fcs(vport), event);
+
+ switch (event) {
+ case BFA_FCS_VPORT_SM_DELCOMP:
+ bfa_sm_set_state(vport, bfa_fcs_vport_sm_uninit);
+ bfa_fcs_vport_free(vport);
+ break;
+
+ case BFA_FCS_VPORT_SM_STOPCOMP:
+ bfa_sm_set_state(vport, bfa_fcs_vport_sm_created);
+ break;
+
+ case BFA_FCS_VPORT_SM_DELETE:
+ break;
+
+ default:
+ bfa_sm_fault(__vport_fcs(vport), event);
+ }
+}
+
+/*
+ * LOGO is sent to fabric. Vport stop is in progress. Lport stop cleanup
+ * is done.
+ */
+static void
+bfa_fcs_vport_sm_logo_for_stop(struct bfa_fcs_vport_s *vport,
+ enum bfa_fcs_vport_event event)
+{
+ bfa_trc(__vport_fcs(vport), __vport_pwwn(vport));
+ bfa_trc(__vport_fcs(vport), event);
+
+ switch (event) {
+ case BFA_FCS_VPORT_SM_OFFLINE:
+ bfa_sm_send_event(vport->lps, BFA_LPS_SM_OFFLINE);
+ /*
+ * !!! fall through !!!
+ */
+
+ case BFA_FCS_VPORT_SM_RSP_OK:
+ case BFA_FCS_VPORT_SM_RSP_ERROR:
+ bfa_sm_set_state(vport, bfa_fcs_vport_sm_created);
+ break;
+
+ default:
+ bfa_sm_fault(__vport_fcs(vport), event);
+ }
+}
+
+/*
+ * LOGO is sent to fabric. Vport delete is in progress. Lport delete cleanup
+ * is done.
+ */
+static void
+bfa_fcs_vport_sm_logo(struct bfa_fcs_vport_s *vport,
+ enum bfa_fcs_vport_event event)
+{
+ bfa_trc(__vport_fcs(vport), __vport_pwwn(vport));
+ bfa_trc(__vport_fcs(vport), event);
+
+ switch (event) {
+ case BFA_FCS_VPORT_SM_OFFLINE:
+ bfa_sm_send_event(vport->lps, BFA_LPS_SM_OFFLINE);
+ /*
+ * !!! fall through !!!
+ */
+
+ case BFA_FCS_VPORT_SM_RSP_OK:
+ case BFA_FCS_VPORT_SM_RSP_ERROR:
+ bfa_sm_set_state(vport, bfa_fcs_vport_sm_uninit);
+ bfa_fcs_vport_free(vport);
+ break;
+
+ case BFA_FCS_VPORT_SM_DELETE:
+ break;
+
+ default:
+ bfa_sm_fault(__vport_fcs(vport), event);
+ }
+}
+
+
+
+/*
+ * fcs_vport_private FCS virtual port private functions
+ */
+/*
+ * Send AEN notification
+ */
+static void
+bfa_fcs_vport_aen_post(struct bfa_fcs_lport_s *port,
+ enum bfa_lport_aen_event event)
+{
+ struct bfad_s *bfad = (struct bfad_s *)port->fabric->fcs->bfad;
+ struct bfa_aen_entry_s *aen_entry;
+
+ bfad_get_aen_entry(bfad, aen_entry);
+ if (!aen_entry)
+ return;
+
+ aen_entry->aen_data.lport.vf_id = port->fabric->vf_id;
+ aen_entry->aen_data.lport.roles = port->port_cfg.roles;
+ aen_entry->aen_data.lport.ppwwn = bfa_fcs_lport_get_pwwn(
+ bfa_fcs_get_base_port(port->fcs));
+ aen_entry->aen_data.lport.lpwwn = bfa_fcs_lport_get_pwwn(port);
+
+ /* Send the AEN notification */
+ bfad_im_post_vendor_event(aen_entry, bfad, ++port->fcs->fcs_aen_seq,
+ BFA_AEN_CAT_LPORT, event);
+}
+
+/*
+ * This routine will be called to send a FDISC command.
+ */
+static void
+bfa_fcs_vport_do_fdisc(struct bfa_fcs_vport_s *vport)
+{
+ bfa_lps_fdisc(vport->lps, vport,
+ bfa_fcport_get_maxfrsize(__vport_bfa(vport)),
+ __vport_pwwn(vport), __vport_nwwn(vport));
+ vport->vport_stats.fdisc_sent++;
+}
+
+static void
+bfa_fcs_vport_fdisc_rejected(struct bfa_fcs_vport_s *vport)
+{
+ u8 lsrjt_rsn = vport->lps->lsrjt_rsn;
+ u8 lsrjt_expl = vport->lps->lsrjt_expl;
+
+ bfa_trc(__vport_fcs(vport), lsrjt_rsn);
+ bfa_trc(__vport_fcs(vport), lsrjt_expl);
+
+ /* For certain reason codes, we don't want to retry. */
+ switch (vport->lps->lsrjt_expl) {
+ case FC_LS_RJT_EXP_INV_PORT_NAME: /* by brocade */
+ case FC_LS_RJT_EXP_INVALID_NPORT_ID: /* by Cisco */
+ if (vport->fdisc_retries < BFA_FCS_VPORT_MAX_RETRIES)
+ bfa_sm_send_event(vport, BFA_FCS_VPORT_SM_RSP_ERROR);
+ else {
+ bfa_fcs_vport_aen_post(&vport->lport,
+ BFA_LPORT_AEN_NPIV_DUP_WWN);
+ bfa_sm_send_event(vport, BFA_FCS_VPORT_SM_RSP_DUP_WWN);
+ }
+ break;
+
+ case FC_LS_RJT_EXP_INSUFF_RES:
+ /*
+ * This means max logins per port/switch setting on the
+ * switch was exceeded.
+ */
+ if (vport->fdisc_retries < BFA_FCS_VPORT_MAX_RETRIES)
+ bfa_sm_send_event(vport, BFA_FCS_VPORT_SM_RSP_ERROR);
+ else {
+ bfa_fcs_vport_aen_post(&vport->lport,
+ BFA_LPORT_AEN_NPIV_FABRIC_MAX);
+ bfa_sm_send_event(vport, BFA_FCS_VPORT_SM_FABRIC_MAX);
+ }
+ break;
+
+ default:
+ if (vport->fdisc_retries == 0)
+ bfa_fcs_vport_aen_post(&vport->lport,
+ BFA_LPORT_AEN_NPIV_UNKNOWN);
+ bfa_sm_send_event(vport, BFA_FCS_VPORT_SM_RSP_ERROR);
+ }
+}
+
+/*
+ * Called to send a logout to the fabric. Used when a V-Port is
+ * deleted/stopped.
+ */
+static void
+bfa_fcs_vport_do_logo(struct bfa_fcs_vport_s *vport)
+{
+ bfa_trc(__vport_fcs(vport), __vport_pwwn(vport));
+
+ vport->vport_stats.logo_sent++;
+ bfa_lps_fdisclogo(vport->lps);
+}
+
+
+/*
+ * This routine will be called by bfa_timer on timer timeouts.
+ *
+ * param[in] vport - pointer to bfa_fcs_vport_t.
+ * param[out] vport_status - pointer to return vport status in
+ *
+ * return
+ * void
+ *
+ * Special Considerations:
+ *
+ * note
+ */
+static void
+bfa_fcs_vport_timeout(void *vport_arg)
+{
+ struct bfa_fcs_vport_s *vport = (struct bfa_fcs_vport_s *) vport_arg;
+
+ vport->vport_stats.fdisc_timeouts++;
+ bfa_sm_send_event(vport, BFA_FCS_VPORT_SM_TIMEOUT);
+}
+
+static void
+bfa_fcs_vport_free(struct bfa_fcs_vport_s *vport)
+{
+ struct bfad_vport_s *vport_drv =
+ (struct bfad_vport_s *)vport->vport_drv;
+
+ bfa_fcs_fabric_delvport(__vport_fabric(vport), vport);
+ bfa_lps_delete(vport->lps);
+
+ if (vport_drv->comp_del) {
+ complete(vport_drv->comp_del);
+ return;
+ }
+
+ /*
+ * We queue the vport delete work to the IM work_q from here.
+ * The memory for the bfad_vport_s is freed from the FC function
+ * template vport_delete entry point.
+ */
+ bfad_im_port_delete(vport_drv->drv_port.bfad, &vport_drv->drv_port);
+}
+
+/*
+ * fcs_vport_public FCS virtual port public interfaces
+ */
+
+/*
+ * Online notification from fabric SM.
+ */
+void
+bfa_fcs_vport_online(struct bfa_fcs_vport_s *vport)
+{
+ vport->vport_stats.fab_online++;
+ if (bfa_fcs_fabric_npiv_capable(__vport_fabric(vport)))
+ bfa_sm_send_event(vport, BFA_FCS_VPORT_SM_ONLINE);
+ else
+ vport->vport_stats.fab_no_npiv++;
+}
+
+/*
+ * Offline notification from fabric SM.
+ */
+void
+bfa_fcs_vport_offline(struct bfa_fcs_vport_s *vport)
+{
+ vport->vport_stats.fab_offline++;
+ bfa_sm_send_event(vport, BFA_FCS_VPORT_SM_OFFLINE);
+}
+
+/*
+ * Cleanup notification from fabric SM on link timer expiry.
+ */
+void
+bfa_fcs_vport_cleanup(struct bfa_fcs_vport_s *vport)
+{
+ vport->vport_stats.fab_cleanup++;
+}
+
+/*
+ * Stop notification from fabric SM. To be invoked from within FCS.
+ */
+void
+bfa_fcs_vport_fcs_stop(struct bfa_fcs_vport_s *vport)
+{
+ bfa_sm_send_event(vport, BFA_FCS_VPORT_SM_STOP);
+}
+
+/*
+ * delete notification from fabric SM. To be invoked from within FCS.
+ */
+void
+bfa_fcs_vport_fcs_delete(struct bfa_fcs_vport_s *vport)
+{
+ bfa_sm_send_event(vport, BFA_FCS_VPORT_SM_DELETE);
+}
+
+/*
+ * Stop completion callback from associated lport
+ */
+void
+bfa_fcs_vport_stop_comp(struct bfa_fcs_vport_s *vport)
+{
+ bfa_sm_send_event(vport, BFA_FCS_VPORT_SM_STOPCOMP);
+}
+
+/*
+ * Delete completion callback from associated lport
+ */
+void
+bfa_fcs_vport_delete_comp(struct bfa_fcs_vport_s *vport)
+{
+ bfa_sm_send_event(vport, BFA_FCS_VPORT_SM_DELCOMP);
+}
+
+
+
+/*
+ * fcs_vport_api Virtual port API
+ */
+
+/*
+ * Use this function to instantiate a new FCS vport object. This
+ * function will not trigger any HW initialization process (which will be
+ * done in vport_start() call)
+ *
+ * param[in] vport - pointer to bfa_fcs_vport_t. This space
+ * needs to be allocated by the driver.
+ * param[in] fcs - FCS instance
+ * param[in] vport_cfg - vport configuration
+ * param[in] vf_id - VF_ID if vport is created within a VF.
+ * FC_VF_ID_NULL to specify base fabric.
+ * param[in] vport_drv - Opaque handle back to the driver's vport
+ * structure
+ *
+ * retval BFA_STATUS_OK - on success.
+ * retval BFA_STATUS_FAILED - on failure.
+ */
+bfa_status_t
+bfa_fcs_vport_create(struct bfa_fcs_vport_s *vport, struct bfa_fcs_s *fcs,
+ u16 vf_id, struct bfa_lport_cfg_s *vport_cfg,
+ struct bfad_vport_s *vport_drv)
+{
+ if (vport_cfg->pwwn == 0)
+ return BFA_STATUS_INVALID_WWN;
+
+ if (bfa_fcs_lport_get_pwwn(&fcs->fabric.bport) == vport_cfg->pwwn)
+ return BFA_STATUS_VPORT_WWN_BP;
+
+ if (bfa_fcs_vport_lookup(fcs, vf_id, vport_cfg->pwwn) != NULL)
+ return BFA_STATUS_VPORT_EXISTS;
+
+ if (fcs->fabric.num_vports ==
+ bfa_lps_get_max_vport(fcs->bfa))
+ return BFA_STATUS_VPORT_MAX;
+
+ vport->lps = bfa_lps_alloc(fcs->bfa);
+ if (!vport->lps)
+ return BFA_STATUS_VPORT_MAX;
+
+ vport->vport_drv = vport_drv;
+ vport_cfg->preboot_vp = BFA_FALSE;
+
+ bfa_sm_set_state(vport, bfa_fcs_vport_sm_uninit);
+ bfa_fcs_lport_attach(&vport->lport, fcs, vf_id, vport);
+ bfa_fcs_lport_init(&vport->lport, vport_cfg);
+ bfa_sm_send_event(vport, BFA_FCS_VPORT_SM_CREATE);
+
+ return BFA_STATUS_OK;
+}
+
+/*
+ * Use this function to instantiate a new FCS PBC vport object. This
+ * function will not trigger any HW initialization process (which will be
+ * done in vport_start() call)
+ *
+ * param[in] vport - pointer to bfa_fcs_vport_t. This space
+ * needs to be allocated by the driver.
+ * param[in] fcs - FCS instance
+ * param[in] vport_cfg - vport configuration
+ * param[in] vf_id - VF_ID if vport is created within a VF.
+ * FC_VF_ID_NULL to specify base fabric.
+ * param[in] vport_drv - Opaque handle back to the driver's vport
+ * structure
+ *
+ * retval BFA_STATUS_OK - on success.
+ * retval BFA_STATUS_FAILED - on failure.
+ */
+bfa_status_t
+bfa_fcs_pbc_vport_create(struct bfa_fcs_vport_s *vport, struct bfa_fcs_s *fcs,
+ u16 vf_id, struct bfa_lport_cfg_s *vport_cfg,
+ struct bfad_vport_s *vport_drv)
+{
+ bfa_status_t rc;
+
+ rc = bfa_fcs_vport_create(vport, fcs, vf_id, vport_cfg, vport_drv);
+ vport->lport.port_cfg.preboot_vp = BFA_TRUE;
+
+ return rc;
+}
+
+/*
+ * Use this function to findout if this is a pbc vport or not.
+ *
+ * @param[in] vport - pointer to bfa_fcs_vport_t.
+ *
+ * @returns None
+ */
+bfa_boolean_t
+bfa_fcs_is_pbc_vport(struct bfa_fcs_vport_s *vport)
+{
+
+ if (vport && (vport->lport.port_cfg.preboot_vp == BFA_TRUE))
+ return BFA_TRUE;
+ else
+ return BFA_FALSE;
+
+}
+
+/*
+ * Use this function initialize the vport.
+ *
+ * @param[in] vport - pointer to bfa_fcs_vport_t.
+ *
+ * @returns None
+ */
+bfa_status_t
+bfa_fcs_vport_start(struct bfa_fcs_vport_s *vport)
+{
+ bfa_sm_send_event(vport, BFA_FCS_VPORT_SM_START);
+
+ return BFA_STATUS_OK;
+}
+
+/*
+ * Use this function quiese the vport object. This function will return
+ * immediately, when the vport is actually stopped, the
+ * bfa_drv_vport_stop_cb() will be called.
+ *
+ * param[in] vport - pointer to bfa_fcs_vport_t.
+ *
+ * return None
+ */
+bfa_status_t
+bfa_fcs_vport_stop(struct bfa_fcs_vport_s *vport)
+{
+ bfa_sm_send_event(vport, BFA_FCS_VPORT_SM_STOP);
+
+ return BFA_STATUS_OK;
+}
+
+/*
+ * Use this function to delete a vport object. Fabric object should
+ * be stopped before this function call.
+ *
+ * !!!!!!! Donot invoke this from within FCS !!!!!!!
+ *
+ * param[in] vport - pointer to bfa_fcs_vport_t.
+ *
+ * return None
+ */
+bfa_status_t
+bfa_fcs_vport_delete(struct bfa_fcs_vport_s *vport)
+{
+
+ if (vport->lport.port_cfg.preboot_vp)
+ return BFA_STATUS_PBC;
+
+ bfa_sm_send_event(vport, BFA_FCS_VPORT_SM_DELETE);
+
+ return BFA_STATUS_OK;
+}
+
+/*
+ * Use this function to get vport's current status info.
+ *
+ * param[in] vport pointer to bfa_fcs_vport_t.
+ * param[out] attr pointer to return vport attributes
+ *
+ * return None
+ */
+void
+bfa_fcs_vport_get_attr(struct bfa_fcs_vport_s *vport,
+ struct bfa_vport_attr_s *attr)
+{
+ if (vport == NULL || attr == NULL)
+ return;
+
+ memset(attr, 0, sizeof(struct bfa_vport_attr_s));
+
+ bfa_fcs_lport_get_attr(&vport->lport, &attr->port_attr);
+ attr->vport_state = bfa_sm_to_state(vport_sm_table, vport->sm);
+}
+
+
+/*
+ * Lookup a virtual port. Excludes base port from lookup.
+ */
+struct bfa_fcs_vport_s *
+bfa_fcs_vport_lookup(struct bfa_fcs_s *fcs, u16 vf_id, wwn_t vpwwn)
+{
+ struct bfa_fcs_vport_s *vport;
+ struct bfa_fcs_fabric_s *fabric;
+
+ bfa_trc(fcs, vf_id);
+ bfa_trc(fcs, vpwwn);
+
+ fabric = bfa_fcs_vf_lookup(fcs, vf_id);
+ if (!fabric) {
+ bfa_trc(fcs, vf_id);
+ return NULL;
+ }
+
+ vport = bfa_fcs_fabric_vport_lookup(fabric, vpwwn);
+ return vport;
+}
+
+/*
+ * FDISC Response
+ */
+void
+bfa_cb_lps_fdisc_comp(void *bfad, void *uarg, bfa_status_t status)
+{
+ struct bfa_fcs_vport_s *vport = uarg;
+
+ bfa_trc(__vport_fcs(vport), __vport_pwwn(vport));
+ bfa_trc(__vport_fcs(vport), status);
+
+ switch (status) {
+ case BFA_STATUS_OK:
+ /*
+ * Initialize the V-Port fields
+ */
+ __vport_fcid(vport) = vport->lps->lp_pid;
+ vport->vport_stats.fdisc_accepts++;
+ bfa_sm_send_event(vport, BFA_FCS_VPORT_SM_RSP_OK);
+ break;
+
+ case BFA_STATUS_INVALID_MAC:
+ /* Only for CNA */
+ vport->vport_stats.fdisc_acc_bad++;
+ bfa_sm_send_event(vport, BFA_FCS_VPORT_SM_RSP_ERROR);
+
+ break;
+
+ case BFA_STATUS_EPROTOCOL:
+ switch (vport->lps->ext_status) {
+ case BFA_EPROTO_BAD_ACCEPT:
+ vport->vport_stats.fdisc_acc_bad++;
+ break;
+
+ case BFA_EPROTO_UNKNOWN_RSP:
+ vport->vport_stats.fdisc_unknown_rsp++;
+ break;
+
+ default:
+ break;
+ }
+
+ if (vport->fdisc_retries < BFA_FCS_VPORT_MAX_RETRIES)
+ bfa_sm_send_event(vport, BFA_FCS_VPORT_SM_RSP_ERROR);
+ else
+ bfa_sm_send_event(vport, BFA_FCS_VPORT_SM_RSP_FAILED);
+
+ break;
+
+ case BFA_STATUS_ETIMER:
+ vport->vport_stats.fdisc_timeouts++;
+ if (vport->fdisc_retries < BFA_FCS_VPORT_MAX_RETRIES)
+ bfa_sm_send_event(vport, BFA_FCS_VPORT_SM_RSP_ERROR);
+ else
+ bfa_sm_send_event(vport, BFA_FCS_VPORT_SM_RSP_FAILED);
+ break;
+
+ case BFA_STATUS_FABRIC_RJT:
+ vport->vport_stats.fdisc_rejects++;
+ bfa_fcs_vport_fdisc_rejected(vport);
+ break;
+
+ default:
+ vport->vport_stats.fdisc_rsp_err++;
+ bfa_sm_send_event(vport, BFA_FCS_VPORT_SM_RSP_ERROR);
+ }
+}
+
+/*
+ * LOGO response
+ */
+void
+bfa_cb_lps_fdisclogo_comp(void *bfad, void *uarg)
+{
+ struct bfa_fcs_vport_s *vport = uarg;
+ bfa_sm_send_event(vport, BFA_FCS_VPORT_SM_RSP_OK);
+}
+
+/*
+ * Received clear virtual link
+ */
+void
+bfa_cb_lps_cvl_event(void *bfad, void *uarg)
+{
+ struct bfa_fcs_vport_s *vport = uarg;
+
+ /* Send an Offline followed by an ONLINE */
+ bfa_sm_send_event(vport, BFA_FCS_VPORT_SM_OFFLINE);
+ bfa_sm_send_event(vport, BFA_FCS_VPORT_SM_ONLINE);
+}
diff --git a/drivers/scsi/bfa/bfa_fcs_rport.c b/drivers/scsi/bfa/bfa_fcs_rport.c
new file mode 100644
index 000000000..2035b0d64
--- /dev/null
+++ b/drivers/scsi/bfa/bfa_fcs_rport.c
@@ -0,0 +1,3464 @@
+/*
+ * Copyright (c) 2005-2010 Brocade Communications Systems, Inc.
+ * All rights reserved
+ * www.brocade.com
+ *
+ * Linux driver for Brocade Fibre Channel Host Bus Adapter.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License (GPL) Version 2 as
+ * published by the Free Software Foundation
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ */
+
+/*
+ * rport.c Remote port implementation.
+ */
+
+#include "bfad_drv.h"
+#include "bfad_im.h"
+#include "bfa_fcs.h"
+#include "bfa_fcbuild.h"
+
+BFA_TRC_FILE(FCS, RPORT);
+
+static u32
+bfa_fcs_rport_del_timeout = BFA_FCS_RPORT_DEF_DEL_TIMEOUT * 1000;
+ /* In millisecs */
+/*
+ * bfa_fcs_rport_max_logins is max count of bfa_fcs_rports
+ * whereas DEF_CFG_NUM_RPORTS is max count of bfa_rports
+ */
+static u32 bfa_fcs_rport_max_logins = BFA_FCS_MAX_RPORT_LOGINS;
+
+/*
+ * forward declarations
+ */
+static struct bfa_fcs_rport_s *bfa_fcs_rport_alloc(
+ struct bfa_fcs_lport_s *port, wwn_t pwwn, u32 rpid);
+static void bfa_fcs_rport_free(struct bfa_fcs_rport_s *rport);
+static void bfa_fcs_rport_hal_online(struct bfa_fcs_rport_s *rport);
+static void bfa_fcs_rport_fcs_online_action(struct bfa_fcs_rport_s *rport);
+static void bfa_fcs_rport_hal_online_action(struct bfa_fcs_rport_s *rport);
+static void bfa_fcs_rport_fcs_offline_action(struct bfa_fcs_rport_s *rport);
+static void bfa_fcs_rport_hal_offline_action(struct bfa_fcs_rport_s *rport);
+static void bfa_fcs_rport_update(struct bfa_fcs_rport_s *rport,
+ struct fc_logi_s *plogi);
+static void bfa_fcs_rport_timeout(void *arg);
+static void bfa_fcs_rport_send_plogi(void *rport_cbarg,
+ struct bfa_fcxp_s *fcxp_alloced);
+static void bfa_fcs_rport_send_plogiacc(void *rport_cbarg,
+ struct bfa_fcxp_s *fcxp_alloced);
+static void bfa_fcs_rport_plogi_response(void *fcsarg,
+ struct bfa_fcxp_s *fcxp, void *cbarg,
+ bfa_status_t req_status, u32 rsp_len,
+ u32 resid_len, struct fchs_s *rsp_fchs);
+static void bfa_fcs_rport_send_adisc(void *rport_cbarg,
+ struct bfa_fcxp_s *fcxp_alloced);
+static void bfa_fcs_rport_adisc_response(void *fcsarg,
+ struct bfa_fcxp_s *fcxp, void *cbarg,
+ bfa_status_t req_status, u32 rsp_len,
+ u32 resid_len, struct fchs_s *rsp_fchs);
+static void bfa_fcs_rport_send_nsdisc(void *rport_cbarg,
+ struct bfa_fcxp_s *fcxp_alloced);
+static void bfa_fcs_rport_gidpn_response(void *fcsarg,
+ struct bfa_fcxp_s *fcxp, void *cbarg,
+ bfa_status_t req_status, u32 rsp_len,
+ u32 resid_len, struct fchs_s *rsp_fchs);
+static void bfa_fcs_rport_gpnid_response(void *fcsarg,
+ struct bfa_fcxp_s *fcxp, void *cbarg,
+ bfa_status_t req_status, u32 rsp_len,
+ u32 resid_len, struct fchs_s *rsp_fchs);
+static void bfa_fcs_rport_send_logo(void *rport_cbarg,
+ struct bfa_fcxp_s *fcxp_alloced);
+static void bfa_fcs_rport_send_logo_acc(void *rport_cbarg);
+static void bfa_fcs_rport_process_prli(struct bfa_fcs_rport_s *rport,
+ struct fchs_s *rx_fchs, u16 len);
+static void bfa_fcs_rport_send_ls_rjt(struct bfa_fcs_rport_s *rport,
+ struct fchs_s *rx_fchs, u8 reason_code,
+ u8 reason_code_expl);
+static void bfa_fcs_rport_process_adisc(struct bfa_fcs_rport_s *rport,
+ struct fchs_s *rx_fchs, u16 len);
+static void bfa_fcs_rport_send_prlo_acc(struct bfa_fcs_rport_s *rport);
+static void bfa_fcs_rport_hal_offline(struct bfa_fcs_rport_s *rport);
+
+static void bfa_fcs_rport_sm_uninit(struct bfa_fcs_rport_s *rport,
+ enum rport_event event);
+static void bfa_fcs_rport_sm_plogi_sending(struct bfa_fcs_rport_s *rport,
+ enum rport_event event);
+static void bfa_fcs_rport_sm_plogiacc_sending(struct bfa_fcs_rport_s *rport,
+ enum rport_event event);
+static void bfa_fcs_rport_sm_plogi_retry(struct bfa_fcs_rport_s *rport,
+ enum rport_event event);
+static void bfa_fcs_rport_sm_plogi(struct bfa_fcs_rport_s *rport,
+ enum rport_event event);
+static void bfa_fcs_rport_sm_fc4_fcs_online(struct bfa_fcs_rport_s *rport,
+ enum rport_event event);
+static void bfa_fcs_rport_sm_hal_online(struct bfa_fcs_rport_s *rport,
+ enum rport_event event);
+static void bfa_fcs_rport_sm_online(struct bfa_fcs_rport_s *rport,
+ enum rport_event event);
+static void bfa_fcs_rport_sm_nsquery_sending(struct bfa_fcs_rport_s *rport,
+ enum rport_event event);
+static void bfa_fcs_rport_sm_nsquery(struct bfa_fcs_rport_s *rport,
+ enum rport_event event);
+static void bfa_fcs_rport_sm_adisc_online_sending(
+ struct bfa_fcs_rport_s *rport, enum rport_event event);
+static void bfa_fcs_rport_sm_adisc_online(struct bfa_fcs_rport_s *rport,
+ enum rport_event event);
+static void bfa_fcs_rport_sm_adisc_offline_sending(struct bfa_fcs_rport_s
+ *rport, enum rport_event event);
+static void bfa_fcs_rport_sm_adisc_offline(struct bfa_fcs_rport_s *rport,
+ enum rport_event event);
+static void bfa_fcs_rport_sm_fc4_logorcv(struct bfa_fcs_rport_s *rport,
+ enum rport_event event);
+static void bfa_fcs_rport_sm_fc4_logosend(struct bfa_fcs_rport_s *rport,
+ enum rport_event event);
+static void bfa_fcs_rport_sm_fc4_offline(struct bfa_fcs_rport_s *rport,
+ enum rport_event event);
+static void bfa_fcs_rport_sm_hcb_offline(struct bfa_fcs_rport_s *rport,
+ enum rport_event event);
+static void bfa_fcs_rport_sm_hcb_logorcv(struct bfa_fcs_rport_s *rport,
+ enum rport_event event);
+static void bfa_fcs_rport_sm_hcb_logosend(struct bfa_fcs_rport_s *rport,
+ enum rport_event event);
+static void bfa_fcs_rport_sm_logo_sending(struct bfa_fcs_rport_s *rport,
+ enum rport_event event);
+static void bfa_fcs_rport_sm_offline(struct bfa_fcs_rport_s *rport,
+ enum rport_event event);
+static void bfa_fcs_rport_sm_nsdisc_sending(struct bfa_fcs_rport_s *rport,
+ enum rport_event event);
+static void bfa_fcs_rport_sm_nsdisc_retry(struct bfa_fcs_rport_s *rport,
+ enum rport_event event);
+static void bfa_fcs_rport_sm_nsdisc_sent(struct bfa_fcs_rport_s *rport,
+ enum rport_event event);
+static void bfa_fcs_rport_sm_nsdisc_sent(struct bfa_fcs_rport_s *rport,
+ enum rport_event event);
+static void bfa_fcs_rport_sm_fc4_off_delete(struct bfa_fcs_rport_s *rport,
+ enum rport_event event);
+static void bfa_fcs_rport_sm_delete_pending(struct bfa_fcs_rport_s *rport,
+ enum rport_event event);
+
+static struct bfa_sm_table_s rport_sm_table[] = {
+ {BFA_SM(bfa_fcs_rport_sm_uninit), BFA_RPORT_UNINIT},
+ {BFA_SM(bfa_fcs_rport_sm_plogi_sending), BFA_RPORT_PLOGI},
+ {BFA_SM(bfa_fcs_rport_sm_plogiacc_sending), BFA_RPORT_ONLINE},
+ {BFA_SM(bfa_fcs_rport_sm_plogi_retry), BFA_RPORT_PLOGI_RETRY},
+ {BFA_SM(bfa_fcs_rport_sm_plogi), BFA_RPORT_PLOGI},
+ {BFA_SM(bfa_fcs_rport_sm_fc4_fcs_online), BFA_RPORT_ONLINE},
+ {BFA_SM(bfa_fcs_rport_sm_hal_online), BFA_RPORT_ONLINE},
+ {BFA_SM(bfa_fcs_rport_sm_online), BFA_RPORT_ONLINE},
+ {BFA_SM(bfa_fcs_rport_sm_nsquery_sending), BFA_RPORT_NSQUERY},
+ {BFA_SM(bfa_fcs_rport_sm_nsquery), BFA_RPORT_NSQUERY},
+ {BFA_SM(bfa_fcs_rport_sm_adisc_online_sending), BFA_RPORT_ADISC},
+ {BFA_SM(bfa_fcs_rport_sm_adisc_online), BFA_RPORT_ADISC},
+ {BFA_SM(bfa_fcs_rport_sm_adisc_offline_sending), BFA_RPORT_ADISC},
+ {BFA_SM(bfa_fcs_rport_sm_adisc_offline), BFA_RPORT_ADISC},
+ {BFA_SM(bfa_fcs_rport_sm_fc4_logorcv), BFA_RPORT_LOGORCV},
+ {BFA_SM(bfa_fcs_rport_sm_fc4_logosend), BFA_RPORT_LOGO},
+ {BFA_SM(bfa_fcs_rport_sm_fc4_offline), BFA_RPORT_OFFLINE},
+ {BFA_SM(bfa_fcs_rport_sm_hcb_offline), BFA_RPORT_OFFLINE},
+ {BFA_SM(bfa_fcs_rport_sm_hcb_logorcv), BFA_RPORT_LOGORCV},
+ {BFA_SM(bfa_fcs_rport_sm_hcb_logosend), BFA_RPORT_LOGO},
+ {BFA_SM(bfa_fcs_rport_sm_logo_sending), BFA_RPORT_LOGO},
+ {BFA_SM(bfa_fcs_rport_sm_offline), BFA_RPORT_OFFLINE},
+ {BFA_SM(bfa_fcs_rport_sm_nsdisc_sending), BFA_RPORT_NSDISC},
+ {BFA_SM(bfa_fcs_rport_sm_nsdisc_retry), BFA_RPORT_NSDISC},
+ {BFA_SM(bfa_fcs_rport_sm_nsdisc_sent), BFA_RPORT_NSDISC},
+};
+
+/*
+ * Beginning state.
+ */
+static void
+bfa_fcs_rport_sm_uninit(struct bfa_fcs_rport_s *rport, enum rport_event event)
+{
+ bfa_trc(rport->fcs, rport->pwwn);
+ bfa_trc(rport->fcs, rport->pid);
+ bfa_trc(rport->fcs, event);
+
+ switch (event) {
+ case RPSM_EVENT_PLOGI_SEND:
+ bfa_sm_set_state(rport, bfa_fcs_rport_sm_plogi_sending);
+ rport->plogi_retries = 0;
+ bfa_fcs_rport_send_plogi(rport, NULL);
+ break;
+
+ case RPSM_EVENT_PLOGI_RCVD:
+ bfa_sm_set_state(rport, bfa_fcs_rport_sm_plogiacc_sending);
+ bfa_fcs_rport_send_plogiacc(rport, NULL);
+ break;
+
+ case RPSM_EVENT_PLOGI_COMP:
+ bfa_sm_set_state(rport, bfa_fcs_rport_sm_hal_online);
+ bfa_fcs_rport_hal_online(rport);
+ break;
+
+ case RPSM_EVENT_ADDRESS_CHANGE:
+ case RPSM_EVENT_ADDRESS_DISC:
+ bfa_sm_set_state(rport, bfa_fcs_rport_sm_nsdisc_sending);
+ rport->ns_retries = 0;
+ bfa_fcs_rport_send_nsdisc(rport, NULL);
+ break;
+ default:
+ bfa_sm_fault(rport->fcs, event);
+ }
+}
+
+/*
+ * PLOGI is being sent.
+ */
+static void
+bfa_fcs_rport_sm_plogi_sending(struct bfa_fcs_rport_s *rport,
+ enum rport_event event)
+{
+ bfa_trc(rport->fcs, rport->pwwn);
+ bfa_trc(rport->fcs, rport->pid);
+ bfa_trc(rport->fcs, event);
+
+ switch (event) {
+ case RPSM_EVENT_FCXP_SENT:
+ bfa_sm_set_state(rport, bfa_fcs_rport_sm_plogi);
+ break;
+
+ case RPSM_EVENT_DELETE:
+ bfa_sm_set_state(rport, bfa_fcs_rport_sm_uninit);
+ bfa_fcxp_walloc_cancel(rport->fcs->bfa, &rport->fcxp_wqe);
+ bfa_fcs_rport_free(rport);
+ break;
+
+ case RPSM_EVENT_PLOGI_RCVD:
+ bfa_sm_set_state(rport, bfa_fcs_rport_sm_plogiacc_sending);
+ bfa_fcxp_walloc_cancel(rport->fcs->bfa, &rport->fcxp_wqe);
+ bfa_fcs_rport_send_plogiacc(rport, NULL);
+ break;
+
+ case RPSM_EVENT_SCN_OFFLINE:
+ bfa_sm_set_state(rport, bfa_fcs_rport_sm_offline);
+ bfa_fcxp_walloc_cancel(rport->fcs->bfa, &rport->fcxp_wqe);
+ bfa_timer_start(rport->fcs->bfa, &rport->timer,
+ bfa_fcs_rport_timeout, rport,
+ bfa_fcs_rport_del_timeout);
+ break;
+ case RPSM_EVENT_ADDRESS_CHANGE:
+ case RPSM_EVENT_FAB_SCN:
+ /* query the NS */
+ bfa_fcxp_walloc_cancel(rport->fcs->bfa, &rport->fcxp_wqe);
+ WARN_ON(!(bfa_fcport_get_topology(rport->port->fcs->bfa) !=
+ BFA_PORT_TOPOLOGY_LOOP));
+ bfa_sm_set_state(rport, bfa_fcs_rport_sm_nsdisc_sending);
+ rport->ns_retries = 0;
+ bfa_fcs_rport_send_nsdisc(rport, NULL);
+ break;
+
+ case RPSM_EVENT_LOGO_IMP:
+ rport->pid = 0;
+ bfa_sm_set_state(rport, bfa_fcs_rport_sm_offline);
+ bfa_fcxp_walloc_cancel(rport->fcs->bfa, &rport->fcxp_wqe);
+ bfa_timer_start(rport->fcs->bfa, &rport->timer,
+ bfa_fcs_rport_timeout, rport,
+ bfa_fcs_rport_del_timeout);
+ break;
+
+
+ default:
+ bfa_sm_fault(rport->fcs, event);
+ }
+}
+
+/*
+ * PLOGI is being sent.
+ */
+static void
+bfa_fcs_rport_sm_plogiacc_sending(struct bfa_fcs_rport_s *rport,
+ enum rport_event event)
+{
+ bfa_trc(rport->fcs, rport->pwwn);
+ bfa_trc(rport->fcs, rport->pid);
+ bfa_trc(rport->fcs, event);
+
+ switch (event) {
+ case RPSM_EVENT_FCXP_SENT:
+ bfa_sm_set_state(rport, bfa_fcs_rport_sm_fc4_fcs_online);
+ bfa_fcs_rport_fcs_online_action(rport);
+ break;
+
+ case RPSM_EVENT_DELETE:
+ bfa_sm_set_state(rport, bfa_fcs_rport_sm_uninit);
+ bfa_fcxp_walloc_cancel(rport->fcs->bfa, &rport->fcxp_wqe);
+ bfa_fcs_rport_free(rport);
+ break;
+
+ case RPSM_EVENT_PLOGI_RCVD:
+ case RPSM_EVENT_PLOGI_COMP:
+ case RPSM_EVENT_FAB_SCN:
+ /*
+ * Ignore, SCN is possibly online notification.
+ */
+ break;
+
+ case RPSM_EVENT_SCN_OFFLINE:
+ bfa_sm_set_state(rport, bfa_fcs_rport_sm_offline);
+ bfa_fcxp_walloc_cancel(rport->fcs->bfa, &rport->fcxp_wqe);
+ bfa_timer_start(rport->fcs->bfa, &rport->timer,
+ bfa_fcs_rport_timeout, rport,
+ bfa_fcs_rport_del_timeout);
+ break;
+
+ case RPSM_EVENT_ADDRESS_CHANGE:
+ bfa_fcxp_walloc_cancel(rport->fcs->bfa, &rport->fcxp_wqe);
+ bfa_sm_set_state(rport, bfa_fcs_rport_sm_nsdisc_sending);
+ rport->ns_retries = 0;
+ bfa_fcs_rport_send_nsdisc(rport, NULL);
+ break;
+
+ case RPSM_EVENT_LOGO_IMP:
+ rport->pid = 0;
+ bfa_sm_set_state(rport, bfa_fcs_rport_sm_offline);
+ bfa_fcxp_walloc_cancel(rport->fcs->bfa, &rport->fcxp_wqe);
+ bfa_timer_start(rport->fcs->bfa, &rport->timer,
+ bfa_fcs_rport_timeout, rport,
+ bfa_fcs_rport_del_timeout);
+ break;
+
+ case RPSM_EVENT_HCB_OFFLINE:
+ /*
+ * Ignore BFA callback, on a PLOGI receive we call bfa offline.
+ */
+ break;
+
+ default:
+ bfa_sm_fault(rport->fcs, event);
+ }
+}
+
+/*
+ * PLOGI is sent.
+ */
+static void
+bfa_fcs_rport_sm_plogi_retry(struct bfa_fcs_rport_s *rport,
+ enum rport_event event)
+{
+ bfa_trc(rport->fcs, rport->pwwn);
+ bfa_trc(rport->fcs, rport->pid);
+ bfa_trc(rport->fcs, event);
+
+ switch (event) {
+ case RPSM_EVENT_TIMEOUT:
+ bfa_sm_set_state(rport, bfa_fcs_rport_sm_plogi_sending);
+ bfa_fcs_rport_send_plogi(rport, NULL);
+ break;
+
+ case RPSM_EVENT_DELETE:
+ bfa_sm_set_state(rport, bfa_fcs_rport_sm_uninit);
+ bfa_timer_stop(&rport->timer);
+ bfa_fcs_rport_free(rport);
+ break;
+
+ case RPSM_EVENT_PRLO_RCVD:
+ case RPSM_EVENT_LOGO_RCVD:
+ break;
+
+ case RPSM_EVENT_PLOGI_RCVD:
+ bfa_sm_set_state(rport, bfa_fcs_rport_sm_plogiacc_sending);
+ bfa_timer_stop(&rport->timer);
+ bfa_fcs_rport_send_plogiacc(rport, NULL);
+ break;
+
+ case RPSM_EVENT_SCN_OFFLINE:
+ bfa_sm_set_state(rport, bfa_fcs_rport_sm_offline);
+ bfa_timer_stop(&rport->timer);
+ bfa_timer_start(rport->fcs->bfa, &rport->timer,
+ bfa_fcs_rport_timeout, rport,
+ bfa_fcs_rport_del_timeout);
+ break;
+
+ case RPSM_EVENT_ADDRESS_CHANGE:
+ case RPSM_EVENT_FAB_SCN:
+ bfa_timer_stop(&rport->timer);
+ WARN_ON(!(bfa_fcport_get_topology(rport->port->fcs->bfa) !=
+ BFA_PORT_TOPOLOGY_LOOP));
+ bfa_sm_set_state(rport, bfa_fcs_rport_sm_nsdisc_sending);
+ rport->ns_retries = 0;
+ bfa_fcs_rport_send_nsdisc(rport, NULL);
+ break;
+
+ case RPSM_EVENT_LOGO_IMP:
+ rport->pid = 0;
+ bfa_sm_set_state(rport, bfa_fcs_rport_sm_offline);
+ bfa_timer_stop(&rport->timer);
+ bfa_timer_start(rport->fcs->bfa, &rport->timer,
+ bfa_fcs_rport_timeout, rport,
+ bfa_fcs_rport_del_timeout);
+ break;
+
+ case RPSM_EVENT_PLOGI_COMP:
+ bfa_sm_set_state(rport, bfa_fcs_rport_sm_fc4_fcs_online);
+ bfa_timer_stop(&rport->timer);
+ bfa_fcs_rport_fcs_online_action(rport);
+ break;
+
+ default:
+ bfa_sm_fault(rport->fcs, event);
+ }
+}
+
+/*
+ * PLOGI is sent.
+ */
+static void
+bfa_fcs_rport_sm_plogi(struct bfa_fcs_rport_s *rport, enum rport_event event)
+{
+ bfa_trc(rport->fcs, rport->pwwn);
+ bfa_trc(rport->fcs, rport->pid);
+ bfa_trc(rport->fcs, event);
+
+ switch (event) {
+ case RPSM_EVENT_ACCEPTED:
+ bfa_sm_set_state(rport, bfa_fcs_rport_sm_fc4_fcs_online);
+ rport->plogi_retries = 0;
+ bfa_fcs_rport_fcs_online_action(rport);
+ break;
+
+ case RPSM_EVENT_LOGO_RCVD:
+ bfa_fcs_rport_send_logo_acc(rport);
+ /*
+ * !! fall through !!
+ */
+ case RPSM_EVENT_PRLO_RCVD:
+ if (rport->prlo == BFA_TRUE)
+ bfa_fcs_rport_send_prlo_acc(rport);
+
+ bfa_fcxp_discard(rport->fcxp);
+ /*
+ * !! fall through !!
+ */
+ case RPSM_EVENT_FAILED:
+ if (rport->plogi_retries < BFA_FCS_RPORT_MAX_RETRIES) {
+ rport->plogi_retries++;
+ bfa_sm_set_state(rport, bfa_fcs_rport_sm_plogi_retry);
+ bfa_timer_start(rport->fcs->bfa, &rport->timer,
+ bfa_fcs_rport_timeout, rport,
+ BFA_FCS_RETRY_TIMEOUT);
+ } else {
+ bfa_stats(rport->port, rport_del_max_plogi_retry);
+ rport->old_pid = rport->pid;
+ rport->pid = 0;
+ bfa_sm_set_state(rport, bfa_fcs_rport_sm_offline);
+ bfa_timer_start(rport->fcs->bfa, &rport->timer,
+ bfa_fcs_rport_timeout, rport,
+ bfa_fcs_rport_del_timeout);
+ }
+ break;
+
+ case RPSM_EVENT_SCN_ONLINE:
+ break;
+
+ case RPSM_EVENT_SCN_OFFLINE:
+ bfa_sm_set_state(rport, bfa_fcs_rport_sm_offline);
+ bfa_fcxp_discard(rport->fcxp);
+ bfa_timer_start(rport->fcs->bfa, &rport->timer,
+ bfa_fcs_rport_timeout, rport,
+ bfa_fcs_rport_del_timeout);
+ break;
+
+ case RPSM_EVENT_PLOGI_RETRY:
+ rport->plogi_retries = 0;
+ bfa_sm_set_state(rport, bfa_fcs_rport_sm_plogi_retry);
+ bfa_timer_start(rport->fcs->bfa, &rport->timer,
+ bfa_fcs_rport_timeout, rport,
+ (FC_RA_TOV * 1000));
+ break;
+
+ case RPSM_EVENT_LOGO_IMP:
+ rport->pid = 0;
+ bfa_sm_set_state(rport, bfa_fcs_rport_sm_offline);
+ bfa_fcxp_discard(rport->fcxp);
+ bfa_timer_start(rport->fcs->bfa, &rport->timer,
+ bfa_fcs_rport_timeout, rport,
+ bfa_fcs_rport_del_timeout);
+ break;
+
+ case RPSM_EVENT_ADDRESS_CHANGE:
+ case RPSM_EVENT_FAB_SCN:
+ bfa_fcxp_discard(rport->fcxp);
+ WARN_ON(!(bfa_fcport_get_topology(rport->port->fcs->bfa) !=
+ BFA_PORT_TOPOLOGY_LOOP));
+ bfa_sm_set_state(rport, bfa_fcs_rport_sm_nsdisc_sending);
+ rport->ns_retries = 0;
+ bfa_fcs_rport_send_nsdisc(rport, NULL);
+ break;
+
+ case RPSM_EVENT_PLOGI_RCVD:
+ bfa_sm_set_state(rport, bfa_fcs_rport_sm_plogiacc_sending);
+ bfa_fcxp_discard(rport->fcxp);
+ bfa_fcs_rport_send_plogiacc(rport, NULL);
+ break;
+
+ case RPSM_EVENT_DELETE:
+ bfa_sm_set_state(rport, bfa_fcs_rport_sm_uninit);
+ bfa_fcxp_discard(rport->fcxp);
+ bfa_fcs_rport_free(rport);
+ break;
+
+ case RPSM_EVENT_PLOGI_COMP:
+ bfa_sm_set_state(rport, bfa_fcs_rport_sm_fc4_fcs_online);
+ bfa_fcxp_discard(rport->fcxp);
+ bfa_fcs_rport_fcs_online_action(rport);
+ break;
+
+ default:
+ bfa_sm_fault(rport->fcs, event);
+ }
+}
+
+/*
+ * PLOGI is done. Await bfa_fcs_itnim to ascertain the scsi function
+ */
+static void
+bfa_fcs_rport_sm_fc4_fcs_online(struct bfa_fcs_rport_s *rport,
+ enum rport_event event)
+{
+ bfa_trc(rport->fcs, rport->pwwn);
+ bfa_trc(rport->fcs, rport->pid);
+ bfa_trc(rport->fcs, event);
+
+ switch (event) {
+ case RPSM_EVENT_FC4_FCS_ONLINE:
+ if (rport->scsi_function == BFA_RPORT_INITIATOR) {
+ if (!BFA_FCS_PID_IS_WKA(rport->pid))
+ bfa_fcs_rpf_rport_online(rport);
+ bfa_sm_set_state(rport, bfa_fcs_rport_sm_online);
+ break;
+ }
+
+ if (!rport->bfa_rport)
+ rport->bfa_rport =
+ bfa_rport_create(rport->fcs->bfa, rport);
+
+ if (rport->bfa_rport) {
+ bfa_sm_set_state(rport, bfa_fcs_rport_sm_hal_online);
+ bfa_fcs_rport_hal_online(rport);
+ } else {
+ bfa_sm_set_state(rport, bfa_fcs_rport_sm_fc4_logosend);
+ bfa_fcs_rport_fcs_offline_action(rport);
+ }
+ break;
+
+ case RPSM_EVENT_PLOGI_RCVD:
+ bfa_sm_set_state(rport, bfa_fcs_rport_sm_fc4_offline);
+ rport->plogi_pending = BFA_TRUE;
+ bfa_fcs_rport_fcs_offline_action(rport);
+ break;
+
+ case RPSM_EVENT_PLOGI_COMP:
+ case RPSM_EVENT_LOGO_IMP:
+ case RPSM_EVENT_ADDRESS_CHANGE:
+ case RPSM_EVENT_FAB_SCN:
+ case RPSM_EVENT_SCN_OFFLINE:
+ bfa_sm_set_state(rport, bfa_fcs_rport_sm_fc4_offline);
+ bfa_fcs_rport_fcs_offline_action(rport);
+ break;
+
+ case RPSM_EVENT_LOGO_RCVD:
+ case RPSM_EVENT_PRLO_RCVD:
+ bfa_sm_set_state(rport, bfa_fcs_rport_sm_fc4_logorcv);
+ bfa_fcs_rport_fcs_offline_action(rport);
+ break;
+
+ case RPSM_EVENT_DELETE:
+ bfa_sm_set_state(rport, bfa_fcs_rport_sm_fc4_logosend);
+ bfa_fcs_rport_fcs_offline_action(rport);
+ break;
+
+ default:
+ bfa_sm_fault(rport->fcs, event);
+ break;
+ }
+}
+
+/*
+ * PLOGI is complete. Awaiting BFA rport online callback. FC-4s
+ * are offline.
+ */
+static void
+bfa_fcs_rport_sm_hal_online(struct bfa_fcs_rport_s *rport,
+ enum rport_event event)
+{
+ bfa_trc(rport->fcs, rport->pwwn);
+ bfa_trc(rport->fcs, rport->pid);
+ bfa_trc(rport->fcs, event);
+
+ switch (event) {
+ case RPSM_EVENT_HCB_ONLINE:
+ bfa_sm_set_state(rport, bfa_fcs_rport_sm_online);
+ bfa_fcs_rport_hal_online_action(rport);
+ break;
+
+ case RPSM_EVENT_PLOGI_COMP:
+ break;
+
+ case RPSM_EVENT_PRLO_RCVD:
+ case RPSM_EVENT_LOGO_RCVD:
+ bfa_sm_set_state(rport, bfa_fcs_rport_sm_fc4_logorcv);
+ bfa_fcs_rport_fcs_offline_action(rport);
+ break;
+
+ case RPSM_EVENT_FAB_SCN:
+ case RPSM_EVENT_LOGO_IMP:
+ case RPSM_EVENT_ADDRESS_CHANGE:
+ case RPSM_EVENT_SCN_OFFLINE:
+ bfa_sm_set_state(rport, bfa_fcs_rport_sm_fc4_offline);
+ bfa_fcs_rport_fcs_offline_action(rport);
+ break;
+
+ case RPSM_EVENT_PLOGI_RCVD:
+ rport->plogi_pending = BFA_TRUE;
+ bfa_sm_set_state(rport, bfa_fcs_rport_sm_fc4_offline);
+ bfa_fcs_rport_fcs_offline_action(rport);
+ break;
+
+ case RPSM_EVENT_DELETE:
+ bfa_sm_set_state(rport, bfa_fcs_rport_sm_fc4_logosend);
+ bfa_fcs_rport_fcs_offline_action(rport);
+ break;
+
+ default:
+ bfa_sm_fault(rport->fcs, event);
+ }
+}
+
+/*
+ * Rport is ONLINE. FC-4s active.
+ */
+static void
+bfa_fcs_rport_sm_online(struct bfa_fcs_rport_s *rport, enum rport_event event)
+{
+ bfa_trc(rport->fcs, rport->pwwn);
+ bfa_trc(rport->fcs, rport->pid);
+ bfa_trc(rport->fcs, event);
+
+ switch (event) {
+ case RPSM_EVENT_FAB_SCN:
+ if (bfa_fcs_fabric_is_switched(rport->port->fabric)) {
+ bfa_sm_set_state(rport,
+ bfa_fcs_rport_sm_nsquery_sending);
+ rport->ns_retries = 0;
+ bfa_fcs_rport_send_nsdisc(rport, NULL);
+ } else {
+ bfa_sm_set_state(rport,
+ bfa_fcs_rport_sm_adisc_online_sending);
+ bfa_fcs_rport_send_adisc(rport, NULL);
+ }
+ break;
+
+ case RPSM_EVENT_PLOGI_RCVD:
+ case RPSM_EVENT_LOGO_IMP:
+ case RPSM_EVENT_ADDRESS_CHANGE:
+ case RPSM_EVENT_SCN_OFFLINE:
+ bfa_sm_set_state(rport, bfa_fcs_rport_sm_fc4_offline);
+ bfa_fcs_rport_hal_offline_action(rport);
+ break;
+
+ case RPSM_EVENT_DELETE:
+ bfa_sm_set_state(rport, bfa_fcs_rport_sm_fc4_logosend);
+ bfa_fcs_rport_hal_offline_action(rport);
+ break;
+
+ case RPSM_EVENT_LOGO_RCVD:
+ case RPSM_EVENT_PRLO_RCVD:
+ bfa_sm_set_state(rport, bfa_fcs_rport_sm_fc4_logorcv);
+ bfa_fcs_rport_hal_offline_action(rport);
+ break;
+
+ case RPSM_EVENT_SCN_ONLINE:
+ case RPSM_EVENT_PLOGI_COMP:
+ break;
+
+ default:
+ bfa_sm_fault(rport->fcs, event);
+ }
+}
+
+/*
+ * An SCN event is received in ONLINE state. NS query is being sent
+ * prior to ADISC authentication with rport. FC-4s are paused.
+ */
+static void
+bfa_fcs_rport_sm_nsquery_sending(struct bfa_fcs_rport_s *rport,
+ enum rport_event event)
+{
+ bfa_trc(rport->fcs, rport->pwwn);
+ bfa_trc(rport->fcs, rport->pid);
+ bfa_trc(rport->fcs, event);
+
+ switch (event) {
+ case RPSM_EVENT_FCXP_SENT:
+ bfa_sm_set_state(rport, bfa_fcs_rport_sm_nsquery);
+ break;
+
+ case RPSM_EVENT_DELETE:
+ bfa_sm_set_state(rport, bfa_fcs_rport_sm_fc4_logosend);
+ bfa_fcxp_walloc_cancel(rport->fcs->bfa, &rport->fcxp_wqe);
+ bfa_fcs_rport_hal_offline_action(rport);
+ break;
+
+ case RPSM_EVENT_FAB_SCN:
+ /*
+ * ignore SCN, wait for response to query itself
+ */
+ break;
+
+ case RPSM_EVENT_LOGO_RCVD:
+ case RPSM_EVENT_PRLO_RCVD:
+ bfa_sm_set_state(rport, bfa_fcs_rport_sm_fc4_logorcv);
+ bfa_fcxp_walloc_cancel(rport->fcs->bfa, &rport->fcxp_wqe);
+ bfa_fcs_rport_hal_offline_action(rport);
+ break;
+
+ case RPSM_EVENT_LOGO_IMP:
+ case RPSM_EVENT_PLOGI_RCVD:
+ case RPSM_EVENT_ADDRESS_CHANGE:
+ case RPSM_EVENT_PLOGI_COMP:
+ bfa_sm_set_state(rport, bfa_fcs_rport_sm_fc4_offline);
+ bfa_fcxp_walloc_cancel(rport->fcs->bfa, &rport->fcxp_wqe);
+ bfa_fcs_rport_hal_offline_action(rport);
+ break;
+
+ default:
+ bfa_sm_fault(rport->fcs, event);
+ }
+}
+
+/*
+ * An SCN event is received in ONLINE state. NS query is sent to rport.
+ * FC-4s are paused.
+ */
+static void
+bfa_fcs_rport_sm_nsquery(struct bfa_fcs_rport_s *rport, enum rport_event event)
+{
+ bfa_trc(rport->fcs, rport->pwwn);
+ bfa_trc(rport->fcs, rport->pid);
+ bfa_trc(rport->fcs, event);
+
+ switch (event) {
+ case RPSM_EVENT_ACCEPTED:
+ bfa_sm_set_state(rport, bfa_fcs_rport_sm_adisc_online_sending);
+ bfa_fcs_rport_send_adisc(rport, NULL);
+ break;
+
+ case RPSM_EVENT_FAILED:
+ rport->ns_retries++;
+ if (rport->ns_retries < BFA_FCS_RPORT_MAX_RETRIES) {
+ bfa_sm_set_state(rport,
+ bfa_fcs_rport_sm_nsquery_sending);
+ bfa_fcs_rport_send_nsdisc(rport, NULL);
+ } else {
+ bfa_sm_set_state(rport, bfa_fcs_rport_sm_fc4_offline);
+ bfa_fcs_rport_hal_offline_action(rport);
+ }
+ break;
+
+ case RPSM_EVENT_DELETE:
+ bfa_sm_set_state(rport, bfa_fcs_rport_sm_fc4_logosend);
+ bfa_fcxp_discard(rport->fcxp);
+ bfa_fcs_rport_hal_offline_action(rport);
+ break;
+
+ case RPSM_EVENT_FAB_SCN:
+ break;
+
+ case RPSM_EVENT_LOGO_RCVD:
+ case RPSM_EVENT_PRLO_RCVD:
+ bfa_sm_set_state(rport, bfa_fcs_rport_sm_fc4_logorcv);
+ bfa_fcxp_discard(rport->fcxp);
+ bfa_fcs_rport_hal_offline_action(rport);
+ break;
+
+ case RPSM_EVENT_PLOGI_COMP:
+ case RPSM_EVENT_ADDRESS_CHANGE:
+ case RPSM_EVENT_PLOGI_RCVD:
+ case RPSM_EVENT_LOGO_IMP:
+ bfa_sm_set_state(rport, bfa_fcs_rport_sm_fc4_offline);
+ bfa_fcxp_discard(rport->fcxp);
+ bfa_fcs_rport_hal_offline_action(rport);
+ break;
+
+ default:
+ bfa_sm_fault(rport->fcs, event);
+ }
+}
+
+/*
+ * An SCN event is received in ONLINE state. ADISC is being sent for
+ * authenticating with rport. FC-4s are paused.
+ */
+static void
+bfa_fcs_rport_sm_adisc_online_sending(struct bfa_fcs_rport_s *rport,
+ enum rport_event event)
+{
+ bfa_trc(rport->fcs, rport->pwwn);
+ bfa_trc(rport->fcs, rport->pid);
+ bfa_trc(rport->fcs, event);
+
+ switch (event) {
+ case RPSM_EVENT_FCXP_SENT:
+ bfa_sm_set_state(rport, bfa_fcs_rport_sm_adisc_online);
+ break;
+
+ case RPSM_EVENT_DELETE:
+ bfa_sm_set_state(rport, bfa_fcs_rport_sm_fc4_logosend);
+ bfa_fcxp_walloc_cancel(rport->fcs->bfa, &rport->fcxp_wqe);
+ bfa_fcs_rport_hal_offline_action(rport);
+ break;
+
+ case RPSM_EVENT_LOGO_IMP:
+ case RPSM_EVENT_ADDRESS_CHANGE:
+ bfa_sm_set_state(rport, bfa_fcs_rport_sm_fc4_offline);
+ bfa_fcxp_walloc_cancel(rport->fcs->bfa, &rport->fcxp_wqe);
+ bfa_fcs_rport_hal_offline_action(rport);
+ break;
+
+ case RPSM_EVENT_LOGO_RCVD:
+ case RPSM_EVENT_PRLO_RCVD:
+ bfa_sm_set_state(rport, bfa_fcs_rport_sm_fc4_logorcv);
+ bfa_fcxp_walloc_cancel(rport->fcs->bfa, &rport->fcxp_wqe);
+ bfa_fcs_rport_hal_offline_action(rport);
+ break;
+
+ case RPSM_EVENT_FAB_SCN:
+ break;
+
+ case RPSM_EVENT_PLOGI_RCVD:
+ bfa_sm_set_state(rport, bfa_fcs_rport_sm_fc4_offline);
+ bfa_fcxp_walloc_cancel(rport->fcs->bfa, &rport->fcxp_wqe);
+ bfa_fcs_rport_hal_offline_action(rport);
+ break;
+
+ default:
+ bfa_sm_fault(rport->fcs, event);
+ }
+}
+
+/*
+ * An SCN event is received in ONLINE state. ADISC is to rport.
+ * FC-4s are paused.
+ */
+static void
+bfa_fcs_rport_sm_adisc_online(struct bfa_fcs_rport_s *rport,
+ enum rport_event event)
+{
+ bfa_trc(rport->fcs, rport->pwwn);
+ bfa_trc(rport->fcs, rport->pid);
+ bfa_trc(rport->fcs, event);
+
+ switch (event) {
+ case RPSM_EVENT_ACCEPTED:
+ bfa_sm_set_state(rport, bfa_fcs_rport_sm_online);
+ break;
+
+ case RPSM_EVENT_PLOGI_RCVD:
+ /*
+ * Too complex to cleanup FC-4 & rport and then acc to PLOGI.
+ * At least go offline when a PLOGI is received.
+ */
+ bfa_fcxp_discard(rport->fcxp);
+ /*
+ * !!! fall through !!!
+ */
+
+ case RPSM_EVENT_FAILED:
+ case RPSM_EVENT_ADDRESS_CHANGE:
+ bfa_sm_set_state(rport, bfa_fcs_rport_sm_fc4_offline);
+ bfa_fcs_rport_hal_offline_action(rport);
+ break;
+
+ case RPSM_EVENT_DELETE:
+ bfa_sm_set_state(rport, bfa_fcs_rport_sm_fc4_logosend);
+ bfa_fcxp_discard(rport->fcxp);
+ bfa_fcs_rport_hal_offline_action(rport);
+ break;
+
+ case RPSM_EVENT_FAB_SCN:
+ /*
+ * already processing RSCN
+ */
+ break;
+
+ case RPSM_EVENT_LOGO_IMP:
+ bfa_sm_set_state(rport, bfa_fcs_rport_sm_fc4_offline);
+ bfa_fcxp_discard(rport->fcxp);
+ bfa_fcs_rport_hal_offline_action(rport);
+ break;
+
+ case RPSM_EVENT_LOGO_RCVD:
+ case RPSM_EVENT_PRLO_RCVD:
+ bfa_sm_set_state(rport, bfa_fcs_rport_sm_fc4_logorcv);
+ bfa_fcxp_discard(rport->fcxp);
+ bfa_fcs_rport_hal_offline_action(rport);
+ break;
+
+ default:
+ bfa_sm_fault(rport->fcs, event);
+ }
+}
+
+/*
+ * ADISC is being sent for authenticating with rport
+ * Already did offline actions.
+ */
+static void
+bfa_fcs_rport_sm_adisc_offline_sending(struct bfa_fcs_rport_s *rport,
+ enum rport_event event)
+{
+ bfa_trc(rport->fcs, rport->pwwn);
+ bfa_trc(rport->fcs, rport->pid);
+ bfa_trc(rport->fcs, event);
+
+ switch (event) {
+ case RPSM_EVENT_FCXP_SENT:
+ bfa_sm_set_state(rport, bfa_fcs_rport_sm_adisc_offline);
+ break;
+
+ case RPSM_EVENT_DELETE:
+ case RPSM_EVENT_SCN_OFFLINE:
+ case RPSM_EVENT_LOGO_IMP:
+ case RPSM_EVENT_LOGO_RCVD:
+ case RPSM_EVENT_PRLO_RCVD:
+ bfa_sm_set_state(rport, bfa_fcs_rport_sm_offline);
+ bfa_fcxp_walloc_cancel(rport->fcs->bfa,
+ &rport->fcxp_wqe);
+ bfa_timer_start(rport->fcs->bfa, &rport->timer,
+ bfa_fcs_rport_timeout, rport,
+ bfa_fcs_rport_del_timeout);
+ break;
+
+ case RPSM_EVENT_PLOGI_RCVD:
+ bfa_sm_set_state(rport, bfa_fcs_rport_sm_plogiacc_sending);
+ bfa_fcxp_walloc_cancel(rport->fcs->bfa, &rport->fcxp_wqe);
+ bfa_fcs_rport_send_plogiacc(rport, NULL);
+ break;
+
+ default:
+ bfa_sm_fault(rport->fcs, event);
+ }
+}
+
+/*
+ * ADISC to rport
+ * Already did offline actions
+ */
+static void
+bfa_fcs_rport_sm_adisc_offline(struct bfa_fcs_rport_s *rport,
+ enum rport_event event)
+{
+ bfa_trc(rport->fcs, rport->pwwn);
+ bfa_trc(rport->fcs, rport->pid);
+ bfa_trc(rport->fcs, event);
+
+ switch (event) {
+ case RPSM_EVENT_ACCEPTED:
+ bfa_sm_set_state(rport, bfa_fcs_rport_sm_hal_online);
+ bfa_fcs_rport_hal_online(rport);
+ break;
+
+ case RPSM_EVENT_PLOGI_RCVD:
+ bfa_sm_set_state(rport, bfa_fcs_rport_sm_plogiacc_sending);
+ bfa_fcxp_discard(rport->fcxp);
+ bfa_fcs_rport_send_plogiacc(rport, NULL);
+ break;
+
+ case RPSM_EVENT_FAILED:
+ bfa_sm_set_state(rport, bfa_fcs_rport_sm_offline);
+ bfa_timer_start(rport->fcs->bfa, &rport->timer,
+ bfa_fcs_rport_timeout, rport,
+ bfa_fcs_rport_del_timeout);
+ break;
+
+ case RPSM_EVENT_DELETE:
+ case RPSM_EVENT_SCN_OFFLINE:
+ case RPSM_EVENT_LOGO_IMP:
+ case RPSM_EVENT_LOGO_RCVD:
+ case RPSM_EVENT_PRLO_RCVD:
+ bfa_sm_set_state(rport, bfa_fcs_rport_sm_offline);
+ bfa_fcxp_discard(rport->fcxp);
+ bfa_timer_start(rport->fcs->bfa, &rport->timer,
+ bfa_fcs_rport_timeout, rport,
+ bfa_fcs_rport_del_timeout);
+ break;
+
+ default:
+ bfa_sm_fault(rport->fcs, event);
+ }
+}
+
+/*
+ * Rport has sent LOGO. Awaiting FC-4 offline completion callback.
+ */
+static void
+bfa_fcs_rport_sm_fc4_logorcv(struct bfa_fcs_rport_s *rport,
+ enum rport_event event)
+{
+ bfa_trc(rport->fcs, rport->pwwn);
+ bfa_trc(rport->fcs, rport->pid);
+ bfa_trc(rport->fcs, event);
+
+ switch (event) {
+ case RPSM_EVENT_FC4_OFFLINE:
+ bfa_sm_set_state(rport, bfa_fcs_rport_sm_hcb_logorcv);
+ bfa_fcs_rport_hal_offline(rport);
+ break;
+
+ case RPSM_EVENT_DELETE:
+ if (rport->pid && (rport->prlo == BFA_TRUE))
+ bfa_fcs_rport_send_prlo_acc(rport);
+ if (rport->pid && (rport->prlo == BFA_FALSE))
+ bfa_fcs_rport_send_logo_acc(rport);
+
+ bfa_sm_set_state(rport, bfa_fcs_rport_sm_fc4_off_delete);
+ break;
+
+ case RPSM_EVENT_SCN_ONLINE:
+ case RPSM_EVENT_SCN_OFFLINE:
+ case RPSM_EVENT_HCB_ONLINE:
+ case RPSM_EVENT_LOGO_RCVD:
+ case RPSM_EVENT_PRLO_RCVD:
+ case RPSM_EVENT_ADDRESS_CHANGE:
+ break;
+
+ default:
+ bfa_sm_fault(rport->fcs, event);
+ }
+}
+
+/*
+ * LOGO needs to be sent to rport. Awaiting FC-4 offline completion
+ * callback.
+ */
+static void
+bfa_fcs_rport_sm_fc4_logosend(struct bfa_fcs_rport_s *rport,
+ enum rport_event event)
+{
+ bfa_trc(rport->fcs, rport->pwwn);
+ bfa_trc(rport->fcs, rport->pid);
+ bfa_trc(rport->fcs, event);
+
+ switch (event) {
+ case RPSM_EVENT_FC4_OFFLINE:
+ bfa_sm_set_state(rport, bfa_fcs_rport_sm_hcb_logosend);
+ bfa_fcs_rport_hal_offline(rport);
+ break;
+
+ case RPSM_EVENT_LOGO_RCVD:
+ bfa_fcs_rport_send_logo_acc(rport);
+ case RPSM_EVENT_PRLO_RCVD:
+ if (rport->prlo == BFA_TRUE)
+ bfa_fcs_rport_send_prlo_acc(rport);
+ bfa_sm_set_state(rport, bfa_fcs_rport_sm_fc4_off_delete);
+ break;
+
+ case RPSM_EVENT_HCB_ONLINE:
+ case RPSM_EVENT_DELETE:
+ /* Rport is being deleted */
+ break;
+
+ default:
+ bfa_sm_fault(rport->fcs, event);
+ }
+}
+
+/*
+ * Rport is going offline. Awaiting FC-4 offline completion callback.
+ */
+static void
+bfa_fcs_rport_sm_fc4_offline(struct bfa_fcs_rport_s *rport,
+ enum rport_event event)
+{
+ bfa_trc(rport->fcs, rport->pwwn);
+ bfa_trc(rport->fcs, rport->pid);
+ bfa_trc(rport->fcs, event);
+
+ switch (event) {
+ case RPSM_EVENT_FC4_OFFLINE:
+ bfa_sm_set_state(rport, bfa_fcs_rport_sm_hcb_offline);
+ bfa_fcs_rport_hal_offline(rport);
+ break;
+
+ case RPSM_EVENT_SCN_ONLINE:
+ break;
+ case RPSM_EVENT_LOGO_RCVD:
+ /*
+ * Rport is going offline. Just ack the logo
+ */
+ bfa_fcs_rport_send_logo_acc(rport);
+ break;
+
+ case RPSM_EVENT_PRLO_RCVD:
+ bfa_fcs_rport_send_prlo_acc(rport);
+ break;
+
+ case RPSM_EVENT_SCN_OFFLINE:
+ case RPSM_EVENT_HCB_ONLINE:
+ case RPSM_EVENT_FAB_SCN:
+ case RPSM_EVENT_LOGO_IMP:
+ case RPSM_EVENT_ADDRESS_CHANGE:
+ /*
+ * rport is already going offline.
+ * SCN - ignore and wait till transitioning to offline state
+ */
+ break;
+
+ case RPSM_EVENT_DELETE:
+ bfa_sm_set_state(rport, bfa_fcs_rport_sm_fc4_logosend);
+ break;
+
+ default:
+ bfa_sm_fault(rport->fcs, event);
+ }
+}
+
+/*
+ * Rport is offline. FC-4s are offline. Awaiting BFA rport offline
+ * callback.
+ */
+static void
+bfa_fcs_rport_sm_hcb_offline(struct bfa_fcs_rport_s *rport,
+ enum rport_event event)
+{
+ bfa_trc(rport->fcs, rport->pwwn);
+ bfa_trc(rport->fcs, rport->pid);
+ bfa_trc(rport->fcs, event);
+
+ switch (event) {
+ case RPSM_EVENT_HCB_OFFLINE:
+ if (bfa_fcs_lport_is_online(rport->port) &&
+ (rport->plogi_pending)) {
+ rport->plogi_pending = BFA_FALSE;
+ bfa_sm_set_state(rport,
+ bfa_fcs_rport_sm_plogiacc_sending);
+ bfa_fcs_rport_send_plogiacc(rport, NULL);
+ break;
+ }
+ /*
+ * !! fall through !!
+ */
+
+ case RPSM_EVENT_ADDRESS_CHANGE:
+ if (!bfa_fcs_lport_is_online(rport->port)) {
+ rport->pid = 0;
+ bfa_sm_set_state(rport, bfa_fcs_rport_sm_offline);
+ bfa_timer_start(rport->fcs->bfa, &rport->timer,
+ bfa_fcs_rport_timeout, rport,
+ bfa_fcs_rport_del_timeout);
+ break;
+ }
+ if (bfa_fcs_fabric_is_switched(rport->port->fabric)) {
+ bfa_sm_set_state(rport,
+ bfa_fcs_rport_sm_nsdisc_sending);
+ rport->ns_retries = 0;
+ bfa_fcs_rport_send_nsdisc(rport, NULL);
+ } else if (bfa_fcport_get_topology(rport->port->fcs->bfa) ==
+ BFA_PORT_TOPOLOGY_LOOP) {
+ if (rport->scn_online) {
+ bfa_sm_set_state(rport,
+ bfa_fcs_rport_sm_adisc_offline_sending);
+ bfa_fcs_rport_send_adisc(rport, NULL);
+ } else {
+ bfa_sm_set_state(rport,
+ bfa_fcs_rport_sm_offline);
+ bfa_timer_start(rport->fcs->bfa, &rport->timer,
+ bfa_fcs_rport_timeout, rport,
+ bfa_fcs_rport_del_timeout);
+ }
+ } else {
+ bfa_sm_set_state(rport, bfa_fcs_rport_sm_plogi_sending);
+ rport->plogi_retries = 0;
+ bfa_fcs_rport_send_plogi(rport, NULL);
+ }
+ break;
+
+ case RPSM_EVENT_DELETE:
+ bfa_sm_set_state(rport, bfa_fcs_rport_sm_uninit);
+ bfa_fcs_rport_free(rport);
+ break;
+
+ case RPSM_EVENT_SCN_ONLINE:
+ case RPSM_EVENT_SCN_OFFLINE:
+ case RPSM_EVENT_FAB_SCN:
+ case RPSM_EVENT_LOGO_RCVD:
+ case RPSM_EVENT_PRLO_RCVD:
+ case RPSM_EVENT_PLOGI_RCVD:
+ case RPSM_EVENT_LOGO_IMP:
+ /*
+ * Ignore, already offline.
+ */
+ break;
+
+ default:
+ bfa_sm_fault(rport->fcs, event);
+ }
+}
+
+/*
+ * Rport is offline. FC-4s are offline. Awaiting BFA rport offline
+ * callback to send LOGO accept.
+ */
+static void
+bfa_fcs_rport_sm_hcb_logorcv(struct bfa_fcs_rport_s *rport,
+ enum rport_event event)
+{
+ bfa_trc(rport->fcs, rport->pwwn);
+ bfa_trc(rport->fcs, rport->pid);
+ bfa_trc(rport->fcs, event);
+
+ switch (event) {
+ case RPSM_EVENT_HCB_OFFLINE:
+ case RPSM_EVENT_ADDRESS_CHANGE:
+ if (rport->pid && (rport->prlo == BFA_TRUE))
+ bfa_fcs_rport_send_prlo_acc(rport);
+ if (rport->pid && (rport->prlo == BFA_FALSE))
+ bfa_fcs_rport_send_logo_acc(rport);
+ /*
+ * If the lport is online and if the rport is not a well
+ * known address port,
+ * we try to re-discover the r-port.
+ */
+ if (bfa_fcs_lport_is_online(rport->port) &&
+ (!BFA_FCS_PID_IS_WKA(rport->pid))) {
+ if (bfa_fcs_fabric_is_switched(rport->port->fabric)) {
+ bfa_sm_set_state(rport,
+ bfa_fcs_rport_sm_nsdisc_sending);
+ rport->ns_retries = 0;
+ bfa_fcs_rport_send_nsdisc(rport, NULL);
+ } else {
+ /* For N2N Direct Attach, try to re-login */
+ bfa_sm_set_state(rport,
+ bfa_fcs_rport_sm_plogi_sending);
+ rport->plogi_retries = 0;
+ bfa_fcs_rport_send_plogi(rport, NULL);
+ }
+ } else {
+ /*
+ * if it is not a well known address, reset the
+ * pid to 0.
+ */
+ if (!BFA_FCS_PID_IS_WKA(rport->pid))
+ rport->pid = 0;
+ bfa_sm_set_state(rport, bfa_fcs_rport_sm_offline);
+ bfa_timer_start(rport->fcs->bfa, &rport->timer,
+ bfa_fcs_rport_timeout, rport,
+ bfa_fcs_rport_del_timeout);
+ }
+ break;
+
+ case RPSM_EVENT_DELETE:
+ bfa_sm_set_state(rport, bfa_fcs_rport_sm_delete_pending);
+ if (rport->pid && (rport->prlo == BFA_TRUE))
+ bfa_fcs_rport_send_prlo_acc(rport);
+ if (rport->pid && (rport->prlo == BFA_FALSE))
+ bfa_fcs_rport_send_logo_acc(rport);
+ break;
+
+ case RPSM_EVENT_LOGO_IMP:
+ bfa_sm_set_state(rport, bfa_fcs_rport_sm_hcb_offline);
+ break;
+
+ case RPSM_EVENT_SCN_ONLINE:
+ case RPSM_EVENT_SCN_OFFLINE:
+ case RPSM_EVENT_LOGO_RCVD:
+ case RPSM_EVENT_PRLO_RCVD:
+ /*
+ * Ignore - already processing a LOGO.
+ */
+ break;
+
+ default:
+ bfa_sm_fault(rport->fcs, event);
+ }
+}
+
+/*
+ * Rport is being deleted. FC-4s are offline.
+ * Awaiting BFA rport offline
+ * callback to send LOGO.
+ */
+static void
+bfa_fcs_rport_sm_hcb_logosend(struct bfa_fcs_rport_s *rport,
+ enum rport_event event)
+{
+ bfa_trc(rport->fcs, rport->pwwn);
+ bfa_trc(rport->fcs, rport->pid);
+ bfa_trc(rport->fcs, event);
+
+ switch (event) {
+ case RPSM_EVENT_HCB_OFFLINE:
+ bfa_sm_set_state(rport, bfa_fcs_rport_sm_logo_sending);
+ bfa_fcs_rport_send_logo(rport, NULL);
+ break;
+
+ case RPSM_EVENT_LOGO_RCVD:
+ bfa_fcs_rport_send_logo_acc(rport);
+ case RPSM_EVENT_PRLO_RCVD:
+ if (rport->prlo == BFA_TRUE)
+ bfa_fcs_rport_send_prlo_acc(rport);
+
+ bfa_sm_set_state(rport, bfa_fcs_rport_sm_delete_pending);
+ break;
+
+ case RPSM_EVENT_SCN_ONLINE:
+ case RPSM_EVENT_SCN_OFFLINE:
+ case RPSM_EVENT_ADDRESS_CHANGE:
+ break;
+
+ default:
+ bfa_sm_fault(rport->fcs, event);
+ }
+}
+
+/*
+ * Rport is being deleted. FC-4s are offline. LOGO is being sent.
+ */
+static void
+bfa_fcs_rport_sm_logo_sending(struct bfa_fcs_rport_s *rport,
+ enum rport_event event)
+{
+ bfa_trc(rport->fcs, rport->pwwn);
+ bfa_trc(rport->fcs, rport->pid);
+ bfa_trc(rport->fcs, event);
+
+ switch (event) {
+ case RPSM_EVENT_FCXP_SENT:
+ /* Once LOGO is sent, we donot wait for the response */
+ bfa_sm_set_state(rport, bfa_fcs_rport_sm_uninit);
+ bfa_fcs_rport_free(rport);
+ break;
+
+ case RPSM_EVENT_SCN_ONLINE:
+ case RPSM_EVENT_SCN_OFFLINE:
+ case RPSM_EVENT_FAB_SCN:
+ case RPSM_EVENT_ADDRESS_CHANGE:
+ break;
+
+ case RPSM_EVENT_LOGO_RCVD:
+ bfa_fcs_rport_send_logo_acc(rport);
+ case RPSM_EVENT_PRLO_RCVD:
+ if (rport->prlo == BFA_TRUE)
+ bfa_fcs_rport_send_prlo_acc(rport);
+
+ bfa_sm_set_state(rport, bfa_fcs_rport_sm_uninit);
+ bfa_fcxp_walloc_cancel(rport->fcs->bfa, &rport->fcxp_wqe);
+ bfa_fcs_rport_free(rport);
+ break;
+
+ default:
+ bfa_sm_fault(rport->fcs, event);
+ }
+}
+
+/*
+ * Rport is offline. FC-4s are offline. BFA rport is offline.
+ * Timer active to delete stale rport.
+ */
+static void
+bfa_fcs_rport_sm_offline(struct bfa_fcs_rport_s *rport, enum rport_event event)
+{
+ bfa_trc(rport->fcs, rport->pwwn);
+ bfa_trc(rport->fcs, rport->pid);
+ bfa_trc(rport->fcs, event);
+
+ switch (event) {
+ case RPSM_EVENT_TIMEOUT:
+ bfa_sm_set_state(rport, bfa_fcs_rport_sm_uninit);
+ bfa_fcs_rport_free(rport);
+ break;
+
+ case RPSM_EVENT_FAB_SCN:
+ case RPSM_EVENT_ADDRESS_CHANGE:
+ bfa_timer_stop(&rport->timer);
+ WARN_ON(!(bfa_fcport_get_topology(rport->port->fcs->bfa) !=
+ BFA_PORT_TOPOLOGY_LOOP));
+ bfa_sm_set_state(rport, bfa_fcs_rport_sm_nsdisc_sending);
+ rport->ns_retries = 0;
+ bfa_fcs_rport_send_nsdisc(rport, NULL);
+ break;
+
+ case RPSM_EVENT_DELETE:
+ bfa_sm_set_state(rport, bfa_fcs_rport_sm_uninit);
+ bfa_timer_stop(&rport->timer);
+ bfa_fcs_rport_free(rport);
+ break;
+
+ case RPSM_EVENT_PLOGI_RCVD:
+ bfa_sm_set_state(rport, bfa_fcs_rport_sm_plogiacc_sending);
+ bfa_timer_stop(&rport->timer);
+ bfa_fcs_rport_send_plogiacc(rport, NULL);
+ break;
+
+ case RPSM_EVENT_LOGO_RCVD:
+ case RPSM_EVENT_PRLO_RCVD:
+ case RPSM_EVENT_LOGO_IMP:
+ case RPSM_EVENT_SCN_OFFLINE:
+ break;
+
+ case RPSM_EVENT_PLOGI_COMP:
+ bfa_sm_set_state(rport, bfa_fcs_rport_sm_fc4_fcs_online);
+ bfa_timer_stop(&rport->timer);
+ bfa_fcs_rport_fcs_online_action(rport);
+ break;
+
+ case RPSM_EVENT_SCN_ONLINE:
+ bfa_timer_stop(&rport->timer);
+ bfa_sm_set_state(rport, bfa_fcs_rport_sm_plogi_sending);
+ bfa_fcs_rport_send_plogi(rport, NULL);
+ break;
+
+ case RPSM_EVENT_PLOGI_SEND:
+ bfa_timer_stop(&rport->timer);
+ bfa_sm_set_state(rport, bfa_fcs_rport_sm_plogi_sending);
+ rport->plogi_retries = 0;
+ bfa_fcs_rport_send_plogi(rport, NULL);
+ break;
+
+ default:
+ bfa_sm_fault(rport->fcs, event);
+ }
+}
+
+/*
+ * Rport address has changed. Nameserver discovery request is being sent.
+ */
+static void
+bfa_fcs_rport_sm_nsdisc_sending(struct bfa_fcs_rport_s *rport,
+ enum rport_event event)
+{
+ bfa_trc(rport->fcs, rport->pwwn);
+ bfa_trc(rport->fcs, rport->pid);
+ bfa_trc(rport->fcs, event);
+
+ switch (event) {
+ case RPSM_EVENT_FCXP_SENT:
+ bfa_sm_set_state(rport, bfa_fcs_rport_sm_nsdisc_sent);
+ break;
+
+ case RPSM_EVENT_DELETE:
+ bfa_sm_set_state(rport, bfa_fcs_rport_sm_uninit);
+ bfa_fcxp_walloc_cancel(rport->fcs->bfa, &rport->fcxp_wqe);
+ bfa_fcs_rport_free(rport);
+ break;
+
+ case RPSM_EVENT_PLOGI_RCVD:
+ bfa_sm_set_state(rport, bfa_fcs_rport_sm_plogiacc_sending);
+ bfa_fcxp_walloc_cancel(rport->fcs->bfa, &rport->fcxp_wqe);
+ bfa_fcs_rport_send_plogiacc(rport, NULL);
+ break;
+
+ case RPSM_EVENT_FAB_SCN:
+ case RPSM_EVENT_LOGO_RCVD:
+ case RPSM_EVENT_PRLO_RCVD:
+ case RPSM_EVENT_PLOGI_SEND:
+ break;
+
+ case RPSM_EVENT_ADDRESS_CHANGE:
+ rport->ns_retries = 0; /* reset the retry count */
+ break;
+
+ case RPSM_EVENT_LOGO_IMP:
+ bfa_sm_set_state(rport, bfa_fcs_rport_sm_offline);
+ bfa_fcxp_walloc_cancel(rport->fcs->bfa, &rport->fcxp_wqe);
+ bfa_timer_start(rport->fcs->bfa, &rport->timer,
+ bfa_fcs_rport_timeout, rport,
+ bfa_fcs_rport_del_timeout);
+ break;
+
+ case RPSM_EVENT_PLOGI_COMP:
+ bfa_sm_set_state(rport, bfa_fcs_rport_sm_fc4_fcs_online);
+ bfa_fcxp_walloc_cancel(rport->fcs->bfa, &rport->fcxp_wqe);
+ bfa_fcs_rport_fcs_online_action(rport);
+ break;
+
+ default:
+ bfa_sm_fault(rport->fcs, event);
+ }
+}
+
+/*
+ * Nameserver discovery failed. Waiting for timeout to retry.
+ */
+static void
+bfa_fcs_rport_sm_nsdisc_retry(struct bfa_fcs_rport_s *rport,
+ enum rport_event event)
+{
+ bfa_trc(rport->fcs, rport->pwwn);
+ bfa_trc(rport->fcs, rport->pid);
+ bfa_trc(rport->fcs, event);
+
+ switch (event) {
+ case RPSM_EVENT_TIMEOUT:
+ bfa_sm_set_state(rport, bfa_fcs_rport_sm_nsdisc_sending);
+ bfa_fcs_rport_send_nsdisc(rport, NULL);
+ break;
+
+ case RPSM_EVENT_FAB_SCN:
+ case RPSM_EVENT_ADDRESS_CHANGE:
+ bfa_sm_set_state(rport, bfa_fcs_rport_sm_nsdisc_sending);
+ bfa_timer_stop(&rport->timer);
+ rport->ns_retries = 0;
+ bfa_fcs_rport_send_nsdisc(rport, NULL);
+ break;
+
+ case RPSM_EVENT_DELETE:
+ bfa_sm_set_state(rport, bfa_fcs_rport_sm_uninit);
+ bfa_timer_stop(&rport->timer);
+ bfa_fcs_rport_free(rport);
+ break;
+
+ case RPSM_EVENT_PLOGI_RCVD:
+ bfa_sm_set_state(rport, bfa_fcs_rport_sm_plogiacc_sending);
+ bfa_timer_stop(&rport->timer);
+ bfa_fcs_rport_send_plogiacc(rport, NULL);
+ break;
+
+ case RPSM_EVENT_LOGO_IMP:
+ rport->pid = 0;
+ bfa_sm_set_state(rport, bfa_fcs_rport_sm_offline);
+ bfa_timer_stop(&rport->timer);
+ bfa_timer_start(rport->fcs->bfa, &rport->timer,
+ bfa_fcs_rport_timeout, rport,
+ bfa_fcs_rport_del_timeout);
+ break;
+
+ case RPSM_EVENT_LOGO_RCVD:
+ bfa_fcs_rport_send_logo_acc(rport);
+ break;
+ case RPSM_EVENT_PRLO_RCVD:
+ bfa_fcs_rport_send_prlo_acc(rport);
+ break;
+
+ case RPSM_EVENT_PLOGI_COMP:
+ bfa_sm_set_state(rport, bfa_fcs_rport_sm_fc4_fcs_online);
+ bfa_timer_stop(&rport->timer);
+ bfa_fcs_rport_fcs_online_action(rport);
+ break;
+
+ default:
+ bfa_sm_fault(rport->fcs, event);
+ }
+}
+
+/*
+ * Rport address has changed. Nameserver discovery request is sent.
+ */
+static void
+bfa_fcs_rport_sm_nsdisc_sent(struct bfa_fcs_rport_s *rport,
+ enum rport_event event)
+{
+ bfa_trc(rport->fcs, rport->pwwn);
+ bfa_trc(rport->fcs, rport->pid);
+ bfa_trc(rport->fcs, event);
+
+ switch (event) {
+ case RPSM_EVENT_ACCEPTED:
+ case RPSM_EVENT_ADDRESS_CHANGE:
+ if (rport->pid) {
+ bfa_sm_set_state(rport, bfa_fcs_rport_sm_plogi_sending);
+ bfa_fcs_rport_send_plogi(rport, NULL);
+ } else {
+ bfa_sm_set_state(rport,
+ bfa_fcs_rport_sm_nsdisc_sending);
+ rport->ns_retries = 0;
+ bfa_fcs_rport_send_nsdisc(rport, NULL);
+ }
+ break;
+
+ case RPSM_EVENT_FAILED:
+ rport->ns_retries++;
+ if (rport->ns_retries < BFA_FCS_RPORT_MAX_RETRIES) {
+ bfa_sm_set_state(rport,
+ bfa_fcs_rport_sm_nsdisc_sending);
+ bfa_fcs_rport_send_nsdisc(rport, NULL);
+ } else {
+ rport->old_pid = rport->pid;
+ rport->pid = 0;
+ bfa_sm_set_state(rport, bfa_fcs_rport_sm_offline);
+ bfa_timer_start(rport->fcs->bfa, &rport->timer,
+ bfa_fcs_rport_timeout, rport,
+ bfa_fcs_rport_del_timeout);
+ };
+ break;
+
+ case RPSM_EVENT_DELETE:
+ bfa_sm_set_state(rport, bfa_fcs_rport_sm_uninit);
+ bfa_fcxp_discard(rport->fcxp);
+ bfa_fcs_rport_free(rport);
+ break;
+
+ case RPSM_EVENT_PLOGI_RCVD:
+ bfa_sm_set_state(rport, bfa_fcs_rport_sm_plogiacc_sending);
+ bfa_fcxp_discard(rport->fcxp);
+ bfa_fcs_rport_send_plogiacc(rport, NULL);
+ break;
+
+ case RPSM_EVENT_LOGO_IMP:
+ rport->pid = 0;
+ bfa_sm_set_state(rport, bfa_fcs_rport_sm_offline);
+ bfa_fcxp_discard(rport->fcxp);
+ bfa_timer_start(rport->fcs->bfa, &rport->timer,
+ bfa_fcs_rport_timeout, rport,
+ bfa_fcs_rport_del_timeout);
+ break;
+
+
+ case RPSM_EVENT_PRLO_RCVD:
+ bfa_fcs_rport_send_prlo_acc(rport);
+ break;
+ case RPSM_EVENT_FAB_SCN:
+ /*
+ * ignore, wait for NS query response
+ */
+ break;
+
+ case RPSM_EVENT_LOGO_RCVD:
+ /*
+ * Not logged-in yet. Accept LOGO.
+ */
+ bfa_fcs_rport_send_logo_acc(rport);
+ break;
+
+ case RPSM_EVENT_PLOGI_COMP:
+ bfa_sm_set_state(rport, bfa_fcs_rport_sm_fc4_fcs_online);
+ bfa_fcxp_discard(rport->fcxp);
+ bfa_fcs_rport_fcs_online_action(rport);
+ break;
+
+ default:
+ bfa_sm_fault(rport->fcs, event);
+ }
+}
+
+/*
+ * Rport needs to be deleted
+ * waiting for ITNIM clean up to finish
+ */
+static void
+bfa_fcs_rport_sm_fc4_off_delete(struct bfa_fcs_rport_s *rport,
+ enum rport_event event)
+{
+ bfa_trc(rport->fcs, rport->pwwn);
+ bfa_trc(rport->fcs, rport->pid);
+ bfa_trc(rport->fcs, event);
+
+ switch (event) {
+ case RPSM_EVENT_FC4_OFFLINE:
+ bfa_sm_set_state(rport, bfa_fcs_rport_sm_delete_pending);
+ bfa_fcs_rport_hal_offline(rport);
+ break;
+
+ case RPSM_EVENT_DELETE:
+ case RPSM_EVENT_PLOGI_RCVD:
+ /* Ignore these events */
+ break;
+
+ default:
+ bfa_sm_fault(rport->fcs, event);
+ break;
+ }
+}
+
+/*
+ * RPort needs to be deleted
+ * waiting for BFA/FW to finish current processing
+ */
+static void
+bfa_fcs_rport_sm_delete_pending(struct bfa_fcs_rport_s *rport,
+ enum rport_event event)
+{
+ bfa_trc(rport->fcs, rport->pwwn);
+ bfa_trc(rport->fcs, rport->pid);
+ bfa_trc(rport->fcs, event);
+
+ switch (event) {
+ case RPSM_EVENT_HCB_OFFLINE:
+ bfa_sm_set_state(rport, bfa_fcs_rport_sm_uninit);
+ bfa_fcs_rport_free(rport);
+ break;
+
+ case RPSM_EVENT_DELETE:
+ case RPSM_EVENT_LOGO_IMP:
+ case RPSM_EVENT_PLOGI_RCVD:
+ /* Ignore these events */
+ break;
+
+ default:
+ bfa_sm_fault(rport->fcs, event);
+ }
+}
+
+/*
+ * fcs_rport_private FCS RPORT provate functions
+ */
+
+static void
+bfa_fcs_rport_send_plogi(void *rport_cbarg, struct bfa_fcxp_s *fcxp_alloced)
+{
+ struct bfa_fcs_rport_s *rport = rport_cbarg;
+ struct bfa_fcs_lport_s *port = rport->port;
+ struct fchs_s fchs;
+ int len;
+ struct bfa_fcxp_s *fcxp;
+
+ bfa_trc(rport->fcs, rport->pwwn);
+
+ fcxp = fcxp_alloced ? fcxp_alloced :
+ bfa_fcs_fcxp_alloc(port->fcs, BFA_TRUE);
+ if (!fcxp) {
+ bfa_fcs_fcxp_alloc_wait(port->fcs->bfa, &rport->fcxp_wqe,
+ bfa_fcs_rport_send_plogi, rport, BFA_TRUE);
+ return;
+ }
+ rport->fcxp = fcxp;
+
+ len = fc_plogi_build(&fchs, bfa_fcxp_get_reqbuf(fcxp), rport->pid,
+ bfa_fcs_lport_get_fcid(port), 0,
+ port->port_cfg.pwwn, port->port_cfg.nwwn,
+ bfa_fcport_get_maxfrsize(port->fcs->bfa),
+ bfa_fcport_get_rx_bbcredit(port->fcs->bfa));
+
+ bfa_fcxp_send(fcxp, NULL, port->fabric->vf_id, port->lp_tag, BFA_FALSE,
+ FC_CLASS_3, len, &fchs, bfa_fcs_rport_plogi_response,
+ (void *)rport, FC_MAX_PDUSZ, FC_ELS_TOV);
+
+ rport->stats.plogis++;
+ bfa_sm_send_event(rport, RPSM_EVENT_FCXP_SENT);
+}
+
+static void
+bfa_fcs_rport_plogi_response(void *fcsarg, struct bfa_fcxp_s *fcxp, void *cbarg,
+ bfa_status_t req_status, u32 rsp_len,
+ u32 resid_len, struct fchs_s *rsp_fchs)
+{
+ struct bfa_fcs_rport_s *rport = (struct bfa_fcs_rport_s *) cbarg;
+ struct fc_logi_s *plogi_rsp;
+ struct fc_ls_rjt_s *ls_rjt;
+ struct bfa_fcs_rport_s *twin;
+ struct list_head *qe;
+
+ bfa_trc(rport->fcs, rport->pwwn);
+
+ /*
+ * Sanity Checks
+ */
+ if (req_status != BFA_STATUS_OK) {
+ bfa_trc(rport->fcs, req_status);
+ rport->stats.plogi_failed++;
+ bfa_sm_send_event(rport, RPSM_EVENT_FAILED);
+ return;
+ }
+
+ plogi_rsp = (struct fc_logi_s *) BFA_FCXP_RSP_PLD(fcxp);
+
+ /*
+ * Check for failure first.
+ */
+ if (plogi_rsp->els_cmd.els_code != FC_ELS_ACC) {
+ ls_rjt = (struct fc_ls_rjt_s *) BFA_FCXP_RSP_PLD(fcxp);
+
+ bfa_trc(rport->fcs, ls_rjt->reason_code);
+ bfa_trc(rport->fcs, ls_rjt->reason_code_expl);
+
+ if ((ls_rjt->reason_code == FC_LS_RJT_RSN_UNABLE_TO_PERF_CMD) &&
+ (ls_rjt->reason_code_expl == FC_LS_RJT_EXP_INSUFF_RES)) {
+ rport->stats.rjt_insuff_res++;
+ bfa_sm_send_event(rport, RPSM_EVENT_PLOGI_RETRY);
+ return;
+ }
+
+ rport->stats.plogi_rejects++;
+ bfa_sm_send_event(rport, RPSM_EVENT_FAILED);
+ return;
+ }
+
+ /*
+ * PLOGI is complete. Make sure this device is not one of the known
+ * device with a new FC port address.
+ */
+ list_for_each(qe, &rport->port->rport_q) {
+ twin = (struct bfa_fcs_rport_s *) qe;
+ if (twin == rport)
+ continue;
+ if (!rport->pwwn && (plogi_rsp->port_name == twin->pwwn)) {
+ bfa_trc(rport->fcs, twin->pid);
+ bfa_trc(rport->fcs, rport->pid);
+
+ /* Update plogi stats in twin */
+ twin->stats.plogis += rport->stats.plogis;
+ twin->stats.plogi_rejects +=
+ rport->stats.plogi_rejects;
+ twin->stats.plogi_timeouts +=
+ rport->stats.plogi_timeouts;
+ twin->stats.plogi_failed +=
+ rport->stats.plogi_failed;
+ twin->stats.plogi_rcvd += rport->stats.plogi_rcvd;
+ twin->stats.plogi_accs++;
+
+ bfa_sm_send_event(rport, RPSM_EVENT_DELETE);
+
+ bfa_fcs_rport_update(twin, plogi_rsp);
+ twin->pid = rsp_fchs->s_id;
+ bfa_sm_send_event(twin, RPSM_EVENT_PLOGI_COMP);
+ return;
+ }
+ }
+
+ /*
+ * Normal login path -- no evil twins.
+ */
+ rport->stats.plogi_accs++;
+ bfa_fcs_rport_update(rport, plogi_rsp);
+ bfa_sm_send_event(rport, RPSM_EVENT_ACCEPTED);
+}
+
+static void
+bfa_fcs_rport_send_plogiacc(void *rport_cbarg, struct bfa_fcxp_s *fcxp_alloced)
+{
+ struct bfa_fcs_rport_s *rport = rport_cbarg;
+ struct bfa_fcs_lport_s *port = rport->port;
+ struct fchs_s fchs;
+ int len;
+ struct bfa_fcxp_s *fcxp;
+
+ bfa_trc(rport->fcs, rport->pwwn);
+ bfa_trc(rport->fcs, rport->reply_oxid);
+
+ fcxp = fcxp_alloced ? fcxp_alloced :
+ bfa_fcs_fcxp_alloc(port->fcs, BFA_FALSE);
+ if (!fcxp) {
+ bfa_fcs_fcxp_alloc_wait(port->fcs->bfa, &rport->fcxp_wqe,
+ bfa_fcs_rport_send_plogiacc, rport, BFA_FALSE);
+ return;
+ }
+ rport->fcxp = fcxp;
+
+ len = fc_plogi_acc_build(&fchs, bfa_fcxp_get_reqbuf(fcxp),
+ rport->pid, bfa_fcs_lport_get_fcid(port),
+ rport->reply_oxid, port->port_cfg.pwwn,
+ port->port_cfg.nwwn,
+ bfa_fcport_get_maxfrsize(port->fcs->bfa),
+ bfa_fcport_get_rx_bbcredit(port->fcs->bfa));
+
+ bfa_fcxp_send(fcxp, NULL, port->fabric->vf_id, port->lp_tag, BFA_FALSE,
+ FC_CLASS_3, len, &fchs, NULL, NULL, FC_MAX_PDUSZ, 0);
+
+ bfa_sm_send_event(rport, RPSM_EVENT_FCXP_SENT);
+}
+
+static void
+bfa_fcs_rport_send_adisc(void *rport_cbarg, struct bfa_fcxp_s *fcxp_alloced)
+{
+ struct bfa_fcs_rport_s *rport = rport_cbarg;
+ struct bfa_fcs_lport_s *port = rport->port;
+ struct fchs_s fchs;
+ int len;
+ struct bfa_fcxp_s *fcxp;
+
+ bfa_trc(rport->fcs, rport->pwwn);
+
+ fcxp = fcxp_alloced ? fcxp_alloced :
+ bfa_fcs_fcxp_alloc(port->fcs, BFA_TRUE);
+ if (!fcxp) {
+ bfa_fcs_fcxp_alloc_wait(port->fcs->bfa, &rport->fcxp_wqe,
+ bfa_fcs_rport_send_adisc, rport, BFA_TRUE);
+ return;
+ }
+ rport->fcxp = fcxp;
+
+ len = fc_adisc_build(&fchs, bfa_fcxp_get_reqbuf(fcxp), rport->pid,
+ bfa_fcs_lport_get_fcid(port), 0,
+ port->port_cfg.pwwn, port->port_cfg.nwwn);
+
+ bfa_fcxp_send(fcxp, NULL, port->fabric->vf_id, port->lp_tag, BFA_FALSE,
+ FC_CLASS_3, len, &fchs, bfa_fcs_rport_adisc_response,
+ rport, FC_MAX_PDUSZ, FC_ELS_TOV);
+
+ rport->stats.adisc_sent++;
+ bfa_sm_send_event(rport, RPSM_EVENT_FCXP_SENT);
+}
+
+static void
+bfa_fcs_rport_adisc_response(void *fcsarg, struct bfa_fcxp_s *fcxp, void *cbarg,
+ bfa_status_t req_status, u32 rsp_len,
+ u32 resid_len, struct fchs_s *rsp_fchs)
+{
+ struct bfa_fcs_rport_s *rport = (struct bfa_fcs_rport_s *) cbarg;
+ void *pld = bfa_fcxp_get_rspbuf(fcxp);
+ struct fc_ls_rjt_s *ls_rjt;
+
+ if (req_status != BFA_STATUS_OK) {
+ bfa_trc(rport->fcs, req_status);
+ rport->stats.adisc_failed++;
+ bfa_sm_send_event(rport, RPSM_EVENT_FAILED);
+ return;
+ }
+
+ if (fc_adisc_rsp_parse((struct fc_adisc_s *)pld, rsp_len, rport->pwwn,
+ rport->nwwn) == FC_PARSE_OK) {
+ rport->stats.adisc_accs++;
+ bfa_sm_send_event(rport, RPSM_EVENT_ACCEPTED);
+ return;
+ }
+
+ rport->stats.adisc_rejects++;
+ ls_rjt = pld;
+ bfa_trc(rport->fcs, ls_rjt->els_cmd.els_code);
+ bfa_trc(rport->fcs, ls_rjt->reason_code);
+ bfa_trc(rport->fcs, ls_rjt->reason_code_expl);
+ bfa_sm_send_event(rport, RPSM_EVENT_FAILED);
+}
+
+static void
+bfa_fcs_rport_send_nsdisc(void *rport_cbarg, struct bfa_fcxp_s *fcxp_alloced)
+{
+ struct bfa_fcs_rport_s *rport = rport_cbarg;
+ struct bfa_fcs_lport_s *port = rport->port;
+ struct fchs_s fchs;
+ struct bfa_fcxp_s *fcxp;
+ int len;
+ bfa_cb_fcxp_send_t cbfn;
+
+ bfa_trc(rport->fcs, rport->pid);
+
+ fcxp = fcxp_alloced ? fcxp_alloced :
+ bfa_fcs_fcxp_alloc(port->fcs, BFA_TRUE);
+ if (!fcxp) {
+ bfa_fcs_fcxp_alloc_wait(port->fcs->bfa, &rport->fcxp_wqe,
+ bfa_fcs_rport_send_nsdisc, rport, BFA_TRUE);
+ return;
+ }
+ rport->fcxp = fcxp;
+
+ if (rport->pwwn) {
+ len = fc_gidpn_build(&fchs, bfa_fcxp_get_reqbuf(fcxp),
+ bfa_fcs_lport_get_fcid(port), 0, rport->pwwn);
+ cbfn = bfa_fcs_rport_gidpn_response;
+ } else {
+ len = fc_gpnid_build(&fchs, bfa_fcxp_get_reqbuf(fcxp),
+ bfa_fcs_lport_get_fcid(port), 0, rport->pid);
+ cbfn = bfa_fcs_rport_gpnid_response;
+ }
+
+ bfa_fcxp_send(fcxp, NULL, port->fabric->vf_id, port->lp_tag, BFA_FALSE,
+ FC_CLASS_3, len, &fchs, cbfn,
+ (void *)rport, FC_MAX_PDUSZ, FC_FCCT_TOV);
+
+ bfa_sm_send_event(rport, RPSM_EVENT_FCXP_SENT);
+}
+
+static void
+bfa_fcs_rport_gidpn_response(void *fcsarg, struct bfa_fcxp_s *fcxp, void *cbarg,
+ bfa_status_t req_status, u32 rsp_len,
+ u32 resid_len, struct fchs_s *rsp_fchs)
+{
+ struct bfa_fcs_rport_s *rport = (struct bfa_fcs_rport_s *) cbarg;
+ struct ct_hdr_s *cthdr;
+ struct fcgs_gidpn_resp_s *gidpn_rsp;
+ struct bfa_fcs_rport_s *twin;
+ struct list_head *qe;
+
+ bfa_trc(rport->fcs, rport->pwwn);
+
+ cthdr = (struct ct_hdr_s *) BFA_FCXP_RSP_PLD(fcxp);
+ cthdr->cmd_rsp_code = be16_to_cpu(cthdr->cmd_rsp_code);
+
+ if (cthdr->cmd_rsp_code == CT_RSP_ACCEPT) {
+ /* Check if the pid is the same as before. */
+ gidpn_rsp = (struct fcgs_gidpn_resp_s *) (cthdr + 1);
+
+ if (gidpn_rsp->dap == rport->pid) {
+ /* Device is online */
+ bfa_sm_send_event(rport, RPSM_EVENT_ACCEPTED);
+ } else {
+ /*
+ * Device's PID has changed. We need to cleanup
+ * and re-login. If there is another device with
+ * the the newly discovered pid, send an scn notice
+ * so that its new pid can be discovered.
+ */
+ list_for_each(qe, &rport->port->rport_q) {
+ twin = (struct bfa_fcs_rport_s *) qe;
+ if (twin == rport)
+ continue;
+ if (gidpn_rsp->dap == twin->pid) {
+ bfa_trc(rport->fcs, twin->pid);
+ bfa_trc(rport->fcs, rport->pid);
+
+ twin->pid = 0;
+ bfa_sm_send_event(twin,
+ RPSM_EVENT_ADDRESS_CHANGE);
+ }
+ }
+ rport->pid = gidpn_rsp->dap;
+ bfa_sm_send_event(rport, RPSM_EVENT_ADDRESS_CHANGE);
+ }
+ return;
+ }
+
+ /*
+ * Reject Response
+ */
+ switch (cthdr->reason_code) {
+ case CT_RSN_LOGICAL_BUSY:
+ /*
+ * Need to retry
+ */
+ bfa_sm_send_event(rport, RPSM_EVENT_TIMEOUT);
+ break;
+
+ case CT_RSN_UNABLE_TO_PERF:
+ /*
+ * device doesn't exist : Start timer to cleanup this later.
+ */
+ bfa_sm_send_event(rport, RPSM_EVENT_FAILED);
+ break;
+
+ default:
+ bfa_sm_send_event(rport, RPSM_EVENT_FAILED);
+ break;
+ }
+}
+
+static void
+bfa_fcs_rport_gpnid_response(void *fcsarg, struct bfa_fcxp_s *fcxp, void *cbarg,
+ bfa_status_t req_status, u32 rsp_len,
+ u32 resid_len, struct fchs_s *rsp_fchs)
+{
+ struct bfa_fcs_rport_s *rport = (struct bfa_fcs_rport_s *) cbarg;
+ struct ct_hdr_s *cthdr;
+
+ bfa_trc(rport->fcs, rport->pwwn);
+
+ cthdr = (struct ct_hdr_s *) BFA_FCXP_RSP_PLD(fcxp);
+ cthdr->cmd_rsp_code = be16_to_cpu(cthdr->cmd_rsp_code);
+
+ if (cthdr->cmd_rsp_code == CT_RSP_ACCEPT) {
+ bfa_sm_send_event(rport, RPSM_EVENT_ACCEPTED);
+ return;
+ }
+
+ /*
+ * Reject Response
+ */
+ switch (cthdr->reason_code) {
+ case CT_RSN_LOGICAL_BUSY:
+ /*
+ * Need to retry
+ */
+ bfa_sm_send_event(rport, RPSM_EVENT_TIMEOUT);
+ break;
+
+ case CT_RSN_UNABLE_TO_PERF:
+ /*
+ * device doesn't exist : Start timer to cleanup this later.
+ */
+ bfa_sm_send_event(rport, RPSM_EVENT_FAILED);
+ break;
+
+ default:
+ bfa_sm_send_event(rport, RPSM_EVENT_FAILED);
+ break;
+ }
+}
+
+/*
+ * Called to send a logout to the rport.
+ */
+static void
+bfa_fcs_rport_send_logo(void *rport_cbarg, struct bfa_fcxp_s *fcxp_alloced)
+{
+ struct bfa_fcs_rport_s *rport = rport_cbarg;
+ struct bfa_fcs_lport_s *port;
+ struct fchs_s fchs;
+ struct bfa_fcxp_s *fcxp;
+ u16 len;
+
+ bfa_trc(rport->fcs, rport->pid);
+
+ port = rport->port;
+
+ fcxp = fcxp_alloced ? fcxp_alloced :
+ bfa_fcs_fcxp_alloc(port->fcs, BFA_FALSE);
+ if (!fcxp) {
+ bfa_fcs_fcxp_alloc_wait(port->fcs->bfa, &rport->fcxp_wqe,
+ bfa_fcs_rport_send_logo, rport, BFA_FALSE);
+ return;
+ }
+ rport->fcxp = fcxp;
+
+ len = fc_logo_build(&fchs, bfa_fcxp_get_reqbuf(fcxp), rport->pid,
+ bfa_fcs_lport_get_fcid(port), 0,
+ bfa_fcs_lport_get_pwwn(port));
+
+ bfa_fcxp_send(fcxp, NULL, port->fabric->vf_id, port->lp_tag, BFA_FALSE,
+ FC_CLASS_3, len, &fchs, NULL,
+ rport, FC_MAX_PDUSZ, FC_ELS_TOV);
+
+ rport->stats.logos++;
+ bfa_fcxp_discard(rport->fcxp);
+ bfa_sm_send_event(rport, RPSM_EVENT_FCXP_SENT);
+}
+
+/*
+ * Send ACC for a LOGO received.
+ */
+static void
+bfa_fcs_rport_send_logo_acc(void *rport_cbarg)
+{
+ struct bfa_fcs_rport_s *rport = rport_cbarg;
+ struct bfa_fcs_lport_s *port;
+ struct fchs_s fchs;
+ struct bfa_fcxp_s *fcxp;
+ u16 len;
+
+ bfa_trc(rport->fcs, rport->pid);
+
+ port = rport->port;
+
+ fcxp = bfa_fcs_fcxp_alloc(port->fcs, BFA_FALSE);
+ if (!fcxp)
+ return;
+
+ rport->stats.logo_rcvd++;
+ len = fc_logo_acc_build(&fchs, bfa_fcxp_get_reqbuf(fcxp),
+ rport->pid, bfa_fcs_lport_get_fcid(port),
+ rport->reply_oxid);
+
+ bfa_fcxp_send(fcxp, NULL, port->fabric->vf_id, port->lp_tag, BFA_FALSE,
+ FC_CLASS_3, len, &fchs, NULL, NULL, FC_MAX_PDUSZ, 0);
+}
+
+/*
+ * brief
+ * This routine will be called by bfa_timer on timer timeouts.
+ *
+ * param[in] rport - pointer to bfa_fcs_lport_ns_t.
+ * param[out] rport_status - pointer to return vport status in
+ *
+ * return
+ * void
+ *
+ * Special Considerations:
+ *
+ * note
+ */
+static void
+bfa_fcs_rport_timeout(void *arg)
+{
+ struct bfa_fcs_rport_s *rport = (struct bfa_fcs_rport_s *) arg;
+
+ rport->stats.plogi_timeouts++;
+ bfa_stats(rport->port, rport_plogi_timeouts);
+ bfa_sm_send_event(rport, RPSM_EVENT_TIMEOUT);
+}
+
+static void
+bfa_fcs_rport_process_prli(struct bfa_fcs_rport_s *rport,
+ struct fchs_s *rx_fchs, u16 len)
+{
+ struct bfa_fcxp_s *fcxp;
+ struct fchs_s fchs;
+ struct bfa_fcs_lport_s *port = rport->port;
+ struct fc_prli_s *prli;
+
+ bfa_trc(port->fcs, rx_fchs->s_id);
+ bfa_trc(port->fcs, rx_fchs->d_id);
+
+ rport->stats.prli_rcvd++;
+
+ /*
+ * We are in Initiator Mode
+ */
+ prli = (struct fc_prli_s *) (rx_fchs + 1);
+
+ if (prli->parampage.servparams.target) {
+ /*
+ * PRLI from a target ?
+ * Send the Acc.
+ * PRLI sent by us will be used to transition the IT nexus,
+ * once the response is received from the target.
+ */
+ bfa_trc(port->fcs, rx_fchs->s_id);
+ rport->scsi_function = BFA_RPORT_TARGET;
+ } else {
+ bfa_trc(rport->fcs, prli->parampage.type);
+ rport->scsi_function = BFA_RPORT_INITIATOR;
+ bfa_fcs_itnim_is_initiator(rport->itnim);
+ }
+
+ fcxp = bfa_fcs_fcxp_alloc(port->fcs, BFA_FALSE);
+ if (!fcxp)
+ return;
+
+ len = fc_prli_acc_build(&fchs, bfa_fcxp_get_reqbuf(fcxp),
+ rx_fchs->s_id, bfa_fcs_lport_get_fcid(port),
+ rx_fchs->ox_id, port->port_cfg.roles);
+
+ bfa_fcxp_send(fcxp, NULL, port->fabric->vf_id, port->lp_tag, BFA_FALSE,
+ FC_CLASS_3, len, &fchs, NULL, NULL, FC_MAX_PDUSZ, 0);
+}
+
+static void
+bfa_fcs_rport_process_rpsc(struct bfa_fcs_rport_s *rport,
+ struct fchs_s *rx_fchs, u16 len)
+{
+ struct bfa_fcxp_s *fcxp;
+ struct fchs_s fchs;
+ struct bfa_fcs_lport_s *port = rport->port;
+ struct fc_rpsc_speed_info_s speeds;
+ struct bfa_port_attr_s pport_attr;
+
+ bfa_trc(port->fcs, rx_fchs->s_id);
+ bfa_trc(port->fcs, rx_fchs->d_id);
+
+ rport->stats.rpsc_rcvd++;
+ speeds.port_speed_cap =
+ RPSC_SPEED_CAP_1G | RPSC_SPEED_CAP_2G | RPSC_SPEED_CAP_4G |
+ RPSC_SPEED_CAP_8G;
+
+ /*
+ * get curent speed from pport attributes from BFA
+ */
+ bfa_fcport_get_attr(port->fcs->bfa, &pport_attr);
+
+ speeds.port_op_speed = fc_bfa_speed_to_rpsc_operspeed(pport_attr.speed);
+
+ fcxp = bfa_fcs_fcxp_alloc(port->fcs, BFA_FALSE);
+ if (!fcxp)
+ return;
+
+ len = fc_rpsc_acc_build(&fchs, bfa_fcxp_get_reqbuf(fcxp),
+ rx_fchs->s_id, bfa_fcs_lport_get_fcid(port),
+ rx_fchs->ox_id, &speeds);
+
+ bfa_fcxp_send(fcxp, NULL, port->fabric->vf_id, port->lp_tag, BFA_FALSE,
+ FC_CLASS_3, len, &fchs, NULL, NULL, FC_MAX_PDUSZ, 0);
+}
+
+static void
+bfa_fcs_rport_process_adisc(struct bfa_fcs_rport_s *rport,
+ struct fchs_s *rx_fchs, u16 len)
+{
+ struct bfa_fcxp_s *fcxp;
+ struct fchs_s fchs;
+ struct bfa_fcs_lport_s *port = rport->port;
+ struct fc_adisc_s *adisc;
+
+ bfa_trc(port->fcs, rx_fchs->s_id);
+ bfa_trc(port->fcs, rx_fchs->d_id);
+
+ rport->stats.adisc_rcvd++;
+
+ adisc = (struct fc_adisc_s *) (rx_fchs + 1);
+
+ /*
+ * Accept if the itnim for this rport is online.
+ * Else reject the ADISC.
+ */
+ if (bfa_fcs_itnim_get_online_state(rport->itnim) == BFA_STATUS_OK) {
+
+ fcxp = bfa_fcs_fcxp_alloc(port->fcs, BFA_FALSE);
+ if (!fcxp)
+ return;
+
+ len = fc_adisc_acc_build(&fchs, bfa_fcxp_get_reqbuf(fcxp),
+ rx_fchs->s_id, bfa_fcs_lport_get_fcid(port),
+ rx_fchs->ox_id, port->port_cfg.pwwn,
+ port->port_cfg.nwwn);
+
+ bfa_fcxp_send(fcxp, NULL, port->fabric->vf_id, port->lp_tag,
+ BFA_FALSE, FC_CLASS_3, len, &fchs, NULL, NULL,
+ FC_MAX_PDUSZ, 0);
+ } else {
+ rport->stats.adisc_rejected++;
+ bfa_fcs_rport_send_ls_rjt(rport, rx_fchs,
+ FC_LS_RJT_RSN_UNABLE_TO_PERF_CMD,
+ FC_LS_RJT_EXP_LOGIN_REQUIRED);
+ }
+}
+
+static void
+bfa_fcs_rport_hal_online(struct bfa_fcs_rport_s *rport)
+{
+ struct bfa_fcs_lport_s *port = rport->port;
+ struct bfa_rport_info_s rport_info;
+
+ rport_info.pid = rport->pid;
+ rport_info.local_pid = port->pid;
+ rport_info.lp_tag = port->lp_tag;
+ rport_info.vf_id = port->fabric->vf_id;
+ rport_info.vf_en = port->fabric->is_vf;
+ rport_info.fc_class = rport->fc_cos;
+ rport_info.cisc = rport->cisc;
+ rport_info.max_frmsz = rport->maxfrsize;
+ bfa_rport_online(rport->bfa_rport, &rport_info);
+}
+
+static void
+bfa_fcs_rport_hal_offline(struct bfa_fcs_rport_s *rport)
+{
+ if (rport->bfa_rport)
+ bfa_sm_send_event(rport->bfa_rport, BFA_RPORT_SM_OFFLINE);
+ else
+ bfa_cb_rport_offline(rport);
+}
+
+static struct bfa_fcs_rport_s *
+bfa_fcs_rport_alloc(struct bfa_fcs_lport_s *port, wwn_t pwwn, u32 rpid)
+{
+ struct bfa_fcs_s *fcs = port->fcs;
+ struct bfa_fcs_rport_s *rport;
+ struct bfad_rport_s *rport_drv;
+
+ /*
+ * allocate rport
+ */
+ if (fcs->num_rport_logins >= bfa_fcs_rport_max_logins) {
+ bfa_trc(fcs, rpid);
+ return NULL;
+ }
+
+ if (bfa_fcb_rport_alloc(fcs->bfad, &rport, &rport_drv)
+ != BFA_STATUS_OK) {
+ bfa_trc(fcs, rpid);
+ return NULL;
+ }
+
+ /*
+ * Initialize r-port
+ */
+ rport->port = port;
+ rport->fcs = fcs;
+ rport->rp_drv = rport_drv;
+ rport->pid = rpid;
+ rport->pwwn = pwwn;
+ rport->old_pid = 0;
+
+ rport->bfa_rport = NULL;
+
+ /*
+ * allocate FC-4s
+ */
+ WARN_ON(!bfa_fcs_lport_is_initiator(port));
+
+ if (bfa_fcs_lport_is_initiator(port)) {
+ rport->itnim = bfa_fcs_itnim_create(rport);
+ if (!rport->itnim) {
+ bfa_trc(fcs, rpid);
+ kfree(rport_drv);
+ return NULL;
+ }
+ }
+
+ bfa_fcs_lport_add_rport(port, rport);
+ fcs->num_rport_logins++;
+
+ bfa_sm_set_state(rport, bfa_fcs_rport_sm_uninit);
+
+ /* Initialize the Rport Features(RPF) Sub Module */
+ if (!BFA_FCS_PID_IS_WKA(rport->pid))
+ bfa_fcs_rpf_init(rport);
+
+ return rport;
+}
+
+
+static void
+bfa_fcs_rport_free(struct bfa_fcs_rport_s *rport)
+{
+ struct bfa_fcs_lport_s *port = rport->port;
+ struct bfa_fcs_s *fcs = port->fcs;
+
+ /*
+ * - delete FC-4s
+ * - delete BFA rport
+ * - remove from queue of rports
+ */
+ rport->plogi_pending = BFA_FALSE;
+
+ if (bfa_fcs_lport_is_initiator(port)) {
+ bfa_fcs_itnim_delete(rport->itnim);
+ if (rport->pid != 0 && !BFA_FCS_PID_IS_WKA(rport->pid))
+ bfa_fcs_rpf_rport_offline(rport);
+ }
+
+ if (rport->bfa_rport) {
+ bfa_sm_send_event(rport->bfa_rport, BFA_RPORT_SM_DELETE);
+ rport->bfa_rport = NULL;
+ }
+
+ bfa_fcs_lport_del_rport(port, rport);
+ fcs->num_rport_logins--;
+ kfree(rport->rp_drv);
+}
+
+static void
+bfa_fcs_rport_aen_post(struct bfa_fcs_rport_s *rport,
+ enum bfa_rport_aen_event event,
+ struct bfa_rport_aen_data_s *data)
+{
+ struct bfa_fcs_lport_s *port = rport->port;
+ struct bfad_s *bfad = (struct bfad_s *)port->fcs->bfad;
+ struct bfa_aen_entry_s *aen_entry;
+
+ bfad_get_aen_entry(bfad, aen_entry);
+ if (!aen_entry)
+ return;
+
+ if (event == BFA_RPORT_AEN_QOS_PRIO)
+ aen_entry->aen_data.rport.priv.qos = data->priv.qos;
+ else if (event == BFA_RPORT_AEN_QOS_FLOWID)
+ aen_entry->aen_data.rport.priv.qos = data->priv.qos;
+
+ aen_entry->aen_data.rport.vf_id = rport->port->fabric->vf_id;
+ aen_entry->aen_data.rport.ppwwn = bfa_fcs_lport_get_pwwn(
+ bfa_fcs_get_base_port(rport->fcs));
+ aen_entry->aen_data.rport.lpwwn = bfa_fcs_lport_get_pwwn(rport->port);
+ aen_entry->aen_data.rport.rpwwn = rport->pwwn;
+
+ /* Send the AEN notification */
+ bfad_im_post_vendor_event(aen_entry, bfad, ++rport->fcs->fcs_aen_seq,
+ BFA_AEN_CAT_RPORT, event);
+}
+
+static void
+bfa_fcs_rport_fcs_online_action(struct bfa_fcs_rport_s *rport)
+{
+ if ((!rport->pid) || (!rport->pwwn)) {
+ bfa_trc(rport->fcs, rport->pid);
+ bfa_sm_fault(rport->fcs, rport->pid);
+ }
+
+ bfa_sm_send_event(rport->itnim, BFA_FCS_ITNIM_SM_FCS_ONLINE);
+}
+
+static void
+bfa_fcs_rport_hal_online_action(struct bfa_fcs_rport_s *rport)
+{
+ struct bfa_fcs_lport_s *port = rport->port;
+ struct bfad_s *bfad = (struct bfad_s *)port->fcs->bfad;
+ char lpwwn_buf[BFA_STRING_32];
+ char rpwwn_buf[BFA_STRING_32];
+
+ rport->stats.onlines++;
+
+ if ((!rport->pid) || (!rport->pwwn)) {
+ bfa_trc(rport->fcs, rport->pid);
+ bfa_sm_fault(rport->fcs, rport->pid);
+ }
+
+ if (bfa_fcs_lport_is_initiator(port)) {
+ bfa_fcs_itnim_brp_online(rport->itnim);
+ if (!BFA_FCS_PID_IS_WKA(rport->pid))
+ bfa_fcs_rpf_rport_online(rport);
+ };
+
+ wwn2str(lpwwn_buf, bfa_fcs_lport_get_pwwn(port));
+ wwn2str(rpwwn_buf, rport->pwwn);
+ if (!BFA_FCS_PID_IS_WKA(rport->pid)) {
+ BFA_LOG(KERN_INFO, bfad, bfa_log_level,
+ "Remote port (WWN = %s) online for logical port (WWN = %s)\n",
+ rpwwn_buf, lpwwn_buf);
+ bfa_fcs_rport_aen_post(rport, BFA_RPORT_AEN_ONLINE, NULL);
+ }
+}
+
+static void
+bfa_fcs_rport_fcs_offline_action(struct bfa_fcs_rport_s *rport)
+{
+ if (!BFA_FCS_PID_IS_WKA(rport->pid))
+ bfa_fcs_rpf_rport_offline(rport);
+
+ bfa_fcs_itnim_rport_offline(rport->itnim);
+}
+
+static void
+bfa_fcs_rport_hal_offline_action(struct bfa_fcs_rport_s *rport)
+{
+ struct bfa_fcs_lport_s *port = rport->port;
+ struct bfad_s *bfad = (struct bfad_s *)port->fcs->bfad;
+ char lpwwn_buf[BFA_STRING_32];
+ char rpwwn_buf[BFA_STRING_32];
+
+ if (!rport->bfa_rport) {
+ bfa_fcs_rport_fcs_offline_action(rport);
+ return;
+ }
+
+ rport->stats.offlines++;
+
+ wwn2str(lpwwn_buf, bfa_fcs_lport_get_pwwn(port));
+ wwn2str(rpwwn_buf, rport->pwwn);
+ if (!BFA_FCS_PID_IS_WKA(rport->pid)) {
+ if (bfa_fcs_lport_is_online(rport->port) == BFA_TRUE) {
+ BFA_LOG(KERN_ERR, bfad, bfa_log_level,
+ "Remote port (WWN = %s) connectivity lost for "
+ "logical port (WWN = %s)\n",
+ rpwwn_buf, lpwwn_buf);
+ bfa_fcs_rport_aen_post(rport,
+ BFA_RPORT_AEN_DISCONNECT, NULL);
+ } else {
+ BFA_LOG(KERN_INFO, bfad, bfa_log_level,
+ "Remote port (WWN = %s) offlined by "
+ "logical port (WWN = %s)\n",
+ rpwwn_buf, lpwwn_buf);
+ bfa_fcs_rport_aen_post(rport,
+ BFA_RPORT_AEN_OFFLINE, NULL);
+ }
+ }
+
+ if (bfa_fcs_lport_is_initiator(port)) {
+ bfa_fcs_itnim_rport_offline(rport->itnim);
+ if (!BFA_FCS_PID_IS_WKA(rport->pid))
+ bfa_fcs_rpf_rport_offline(rport);
+ }
+}
+
+/*
+ * Update rport parameters from PLOGI or PLOGI accept.
+ */
+static void
+bfa_fcs_rport_update(struct bfa_fcs_rport_s *rport, struct fc_logi_s *plogi)
+{
+ bfa_fcs_lport_t *port = rport->port;
+
+ /*
+ * - port name
+ * - node name
+ */
+ rport->pwwn = plogi->port_name;
+ rport->nwwn = plogi->node_name;
+
+ /*
+ * - class of service
+ */
+ rport->fc_cos = 0;
+ if (plogi->class3.class_valid)
+ rport->fc_cos = FC_CLASS_3;
+
+ if (plogi->class2.class_valid)
+ rport->fc_cos |= FC_CLASS_2;
+
+ /*
+ * - CISC
+ * - MAX receive frame size
+ */
+ rport->cisc = plogi->csp.cisc;
+ if (be16_to_cpu(plogi->class3.rxsz) < be16_to_cpu(plogi->csp.rxsz))
+ rport->maxfrsize = be16_to_cpu(plogi->class3.rxsz);
+ else
+ rport->maxfrsize = be16_to_cpu(plogi->csp.rxsz);
+
+ bfa_trc(port->fcs, be16_to_cpu(plogi->csp.bbcred));
+ bfa_trc(port->fcs, port->fabric->bb_credit);
+ /*
+ * Direct Attach P2P mode :
+ * This is to handle a bug (233476) in IBM targets in Direct Attach
+ * Mode. Basically, in FLOGI Accept the target would have
+ * erroneously set the BB Credit to the value used in the FLOGI
+ * sent by the HBA. It uses the correct value (its own BB credit)
+ * in PLOGI.
+ */
+ if ((!bfa_fcs_fabric_is_switched(port->fabric)) &&
+ (be16_to_cpu(plogi->csp.bbcred) < port->fabric->bb_credit)) {
+
+ bfa_trc(port->fcs, be16_to_cpu(plogi->csp.bbcred));
+ bfa_trc(port->fcs, port->fabric->bb_credit);
+
+ port->fabric->bb_credit = be16_to_cpu(plogi->csp.bbcred);
+ bfa_fcport_set_tx_bbcredit(port->fcs->bfa,
+ port->fabric->bb_credit);
+ }
+
+}
+
+/*
+ * Called to handle LOGO received from an existing remote port.
+ */
+static void
+bfa_fcs_rport_process_logo(struct bfa_fcs_rport_s *rport, struct fchs_s *fchs)
+{
+ rport->reply_oxid = fchs->ox_id;
+ bfa_trc(rport->fcs, rport->reply_oxid);
+
+ rport->prlo = BFA_FALSE;
+ rport->stats.logo_rcvd++;
+ bfa_sm_send_event(rport, RPSM_EVENT_LOGO_RCVD);
+}
+
+
+
+/*
+ * fcs_rport_public FCS rport public interfaces
+ */
+
+/*
+ * Called by bport/vport to create a remote port instance for a discovered
+ * remote device.
+ *
+ * @param[in] port - base port or vport
+ * @param[in] rpid - remote port ID
+ *
+ * @return None
+ */
+struct bfa_fcs_rport_s *
+bfa_fcs_rport_create(struct bfa_fcs_lport_s *port, u32 rpid)
+{
+ struct bfa_fcs_rport_s *rport;
+
+ bfa_trc(port->fcs, rpid);
+ rport = bfa_fcs_rport_alloc(port, WWN_NULL, rpid);
+ if (!rport)
+ return NULL;
+
+ bfa_sm_send_event(rport, RPSM_EVENT_PLOGI_SEND);
+ return rport;
+}
+
+/*
+ * Called to create a rport for which only the wwn is known.
+ *
+ * @param[in] port - base port
+ * @param[in] rpwwn - remote port wwn
+ *
+ * @return None
+ */
+struct bfa_fcs_rport_s *
+bfa_fcs_rport_create_by_wwn(struct bfa_fcs_lport_s *port, wwn_t rpwwn)
+{
+ struct bfa_fcs_rport_s *rport;
+ bfa_trc(port->fcs, rpwwn);
+ rport = bfa_fcs_rport_alloc(port, rpwwn, 0);
+ if (!rport)
+ return NULL;
+
+ bfa_sm_send_event(rport, RPSM_EVENT_ADDRESS_DISC);
+ return rport;
+}
+/*
+ * Called by bport in private loop topology to indicate that a
+ * rport has been discovered and plogi has been completed.
+ *
+ * @param[in] port - base port or vport
+ * @param[in] rpid - remote port ID
+ */
+void
+bfa_fcs_rport_start(struct bfa_fcs_lport_s *port, struct fchs_s *fchs,
+ struct fc_logi_s *plogi)
+{
+ struct bfa_fcs_rport_s *rport;
+
+ rport = bfa_fcs_rport_alloc(port, WWN_NULL, fchs->s_id);
+ if (!rport)
+ return;
+
+ bfa_fcs_rport_update(rport, plogi);
+
+ bfa_sm_send_event(rport, RPSM_EVENT_PLOGI_COMP);
+}
+
+/*
+ * Called by bport/vport to handle PLOGI received from a new remote port.
+ * If an existing rport does a plogi, it will be handled separately.
+ */
+void
+bfa_fcs_rport_plogi_create(struct bfa_fcs_lport_s *port, struct fchs_s *fchs,
+ struct fc_logi_s *plogi)
+{
+ struct bfa_fcs_rport_s *rport;
+
+ rport = bfa_fcs_rport_alloc(port, plogi->port_name, fchs->s_id);
+ if (!rport)
+ return;
+
+ bfa_fcs_rport_update(rport, plogi);
+
+ rport->reply_oxid = fchs->ox_id;
+ bfa_trc(rport->fcs, rport->reply_oxid);
+
+ rport->stats.plogi_rcvd++;
+ bfa_sm_send_event(rport, RPSM_EVENT_PLOGI_RCVD);
+}
+
+/*
+ * Called by bport/vport to handle PLOGI received from an existing
+ * remote port.
+ */
+void
+bfa_fcs_rport_plogi(struct bfa_fcs_rport_s *rport, struct fchs_s *rx_fchs,
+ struct fc_logi_s *plogi)
+{
+ /*
+ * @todo Handle P2P and initiator-initiator.
+ */
+
+ bfa_fcs_rport_update(rport, plogi);
+
+ rport->reply_oxid = rx_fchs->ox_id;
+ bfa_trc(rport->fcs, rport->reply_oxid);
+
+ rport->pid = rx_fchs->s_id;
+ bfa_trc(rport->fcs, rport->pid);
+
+ rport->stats.plogi_rcvd++;
+ bfa_sm_send_event(rport, RPSM_EVENT_PLOGI_RCVD);
+}
+
+
+/*
+ * Called by bport/vport to notify SCN for the remote port
+ */
+void
+bfa_fcs_rport_scn(struct bfa_fcs_rport_s *rport)
+{
+ rport->stats.rscns++;
+ bfa_sm_send_event(rport, RPSM_EVENT_FAB_SCN);
+}
+
+/*
+ * brief
+ * This routine BFA callback for bfa_rport_online() call.
+ *
+ * param[in] cb_arg - rport struct.
+ *
+ * return
+ * void
+ *
+ * Special Considerations:
+ *
+ * note
+ */
+void
+bfa_cb_rport_online(void *cbarg)
+{
+
+ struct bfa_fcs_rport_s *rport = (struct bfa_fcs_rport_s *) cbarg;
+
+ bfa_trc(rport->fcs, rport->pwwn);
+ bfa_sm_send_event(rport, RPSM_EVENT_HCB_ONLINE);
+}
+
+/*
+ * brief
+ * This routine BFA callback for bfa_rport_offline() call.
+ *
+ * param[in] rport -
+ *
+ * return
+ * void
+ *
+ * Special Considerations:
+ *
+ * note
+ */
+void
+bfa_cb_rport_offline(void *cbarg)
+{
+ struct bfa_fcs_rport_s *rport = (struct bfa_fcs_rport_s *) cbarg;
+
+ bfa_trc(rport->fcs, rport->pwwn);
+ bfa_sm_send_event(rport, RPSM_EVENT_HCB_OFFLINE);
+}
+
+/*
+ * brief
+ * This routine is a static BFA callback when there is a QoS flow_id
+ * change notification
+ *
+ * param[in] rport -
+ *
+ * return
+ * void
+ *
+ * Special Considerations:
+ *
+ * note
+ */
+void
+bfa_cb_rport_qos_scn_flowid(void *cbarg,
+ struct bfa_rport_qos_attr_s old_qos_attr,
+ struct bfa_rport_qos_attr_s new_qos_attr)
+{
+ struct bfa_fcs_rport_s *rport = (struct bfa_fcs_rport_s *) cbarg;
+ struct bfa_rport_aen_data_s aen_data;
+
+ bfa_trc(rport->fcs, rport->pwwn);
+ aen_data.priv.qos = new_qos_attr;
+ bfa_fcs_rport_aen_post(rport, BFA_RPORT_AEN_QOS_FLOWID, &aen_data);
+}
+
+void
+bfa_cb_rport_scn_online(struct bfa_s *bfa)
+{
+ struct bfa_fcs_s *fcs = &((struct bfad_s *)bfa->bfad)->bfa_fcs;
+ struct bfa_fcs_lport_s *port = bfa_fcs_get_base_port(fcs);
+ struct bfa_fcs_rport_s *rp;
+ struct list_head *qe;
+
+ list_for_each(qe, &port->rport_q) {
+ rp = (struct bfa_fcs_rport_s *) qe;
+ bfa_sm_send_event(rp, RPSM_EVENT_SCN_ONLINE);
+ rp->scn_online = BFA_TRUE;
+ }
+
+ if (bfa_fcs_lport_is_online(port))
+ bfa_fcs_lport_lip_scn_online(port);
+}
+
+void
+bfa_cb_rport_scn_no_dev(void *rport)
+{
+ struct bfa_fcs_rport_s *rp = rport;
+
+ bfa_sm_send_event(rp, RPSM_EVENT_SCN_OFFLINE);
+ rp->scn_online = BFA_FALSE;
+}
+
+void
+bfa_cb_rport_scn_offline(struct bfa_s *bfa)
+{
+ struct bfa_fcs_s *fcs = &((struct bfad_s *)bfa->bfad)->bfa_fcs;
+ struct bfa_fcs_lport_s *port = bfa_fcs_get_base_port(fcs);
+ struct bfa_fcs_rport_s *rp;
+ struct list_head *qe;
+
+ list_for_each(qe, &port->rport_q) {
+ rp = (struct bfa_fcs_rport_s *) qe;
+ bfa_sm_send_event(rp, RPSM_EVENT_SCN_OFFLINE);
+ rp->scn_online = BFA_FALSE;
+ }
+}
+
+/*
+ * brief
+ * This routine is a static BFA callback when there is a QoS priority
+ * change notification
+ *
+ * param[in] rport -
+ *
+ * return
+ * void
+ *
+ * Special Considerations:
+ *
+ * note
+ */
+void
+bfa_cb_rport_qos_scn_prio(void *cbarg,
+ struct bfa_rport_qos_attr_s old_qos_attr,
+ struct bfa_rport_qos_attr_s new_qos_attr)
+{
+ struct bfa_fcs_rport_s *rport = (struct bfa_fcs_rport_s *) cbarg;
+ struct bfa_rport_aen_data_s aen_data;
+
+ bfa_trc(rport->fcs, rport->pwwn);
+ aen_data.priv.qos = new_qos_attr;
+ bfa_fcs_rport_aen_post(rport, BFA_RPORT_AEN_QOS_PRIO, &aen_data);
+}
+
+/*
+ * Called to process any unsolicted frames from this remote port
+ */
+void
+bfa_fcs_rport_uf_recv(struct bfa_fcs_rport_s *rport,
+ struct fchs_s *fchs, u16 len)
+{
+ struct bfa_fcs_lport_s *port = rport->port;
+ struct fc_els_cmd_s *els_cmd;
+
+ bfa_trc(rport->fcs, fchs->s_id);
+ bfa_trc(rport->fcs, fchs->d_id);
+ bfa_trc(rport->fcs, fchs->type);
+
+ if (fchs->type != FC_TYPE_ELS)
+ return;
+
+ els_cmd = (struct fc_els_cmd_s *) (fchs + 1);
+
+ bfa_trc(rport->fcs, els_cmd->els_code);
+
+ switch (els_cmd->els_code) {
+ case FC_ELS_LOGO:
+ bfa_stats(port, plogi_rcvd);
+ bfa_fcs_rport_process_logo(rport, fchs);
+ break;
+
+ case FC_ELS_ADISC:
+ bfa_stats(port, adisc_rcvd);
+ bfa_fcs_rport_process_adisc(rport, fchs, len);
+ break;
+
+ case FC_ELS_PRLO:
+ bfa_stats(port, prlo_rcvd);
+ if (bfa_fcs_lport_is_initiator(port))
+ bfa_fcs_fcpim_uf_recv(rport->itnim, fchs, len);
+ break;
+
+ case FC_ELS_PRLI:
+ bfa_stats(port, prli_rcvd);
+ bfa_fcs_rport_process_prli(rport, fchs, len);
+ break;
+
+ case FC_ELS_RPSC:
+ bfa_stats(port, rpsc_rcvd);
+ bfa_fcs_rport_process_rpsc(rport, fchs, len);
+ break;
+
+ default:
+ bfa_stats(port, un_handled_els_rcvd);
+ bfa_fcs_rport_send_ls_rjt(rport, fchs,
+ FC_LS_RJT_RSN_CMD_NOT_SUPP,
+ FC_LS_RJT_EXP_NO_ADDL_INFO);
+ break;
+ }
+}
+
+/* send best case acc to prlo */
+static void
+bfa_fcs_rport_send_prlo_acc(struct bfa_fcs_rport_s *rport)
+{
+ struct bfa_fcs_lport_s *port = rport->port;
+ struct fchs_s fchs;
+ struct bfa_fcxp_s *fcxp;
+ int len;
+
+ bfa_trc(rport->fcs, rport->pid);
+
+ fcxp = bfa_fcs_fcxp_alloc(port->fcs, BFA_FALSE);
+ if (!fcxp)
+ return;
+ len = fc_prlo_acc_build(&fchs, bfa_fcxp_get_reqbuf(fcxp),
+ rport->pid, bfa_fcs_lport_get_fcid(port),
+ rport->reply_oxid, 0);
+
+ bfa_fcxp_send(fcxp, rport->bfa_rport, port->fabric->vf_id,
+ port->lp_tag, BFA_FALSE, FC_CLASS_3, len, &fchs,
+ NULL, NULL, FC_MAX_PDUSZ, 0);
+}
+
+/*
+ * Send a LS reject
+ */
+static void
+bfa_fcs_rport_send_ls_rjt(struct bfa_fcs_rport_s *rport, struct fchs_s *rx_fchs,
+ u8 reason_code, u8 reason_code_expl)
+{
+ struct bfa_fcs_lport_s *port = rport->port;
+ struct fchs_s fchs;
+ struct bfa_fcxp_s *fcxp;
+ int len;
+
+ bfa_trc(rport->fcs, rx_fchs->s_id);
+
+ fcxp = bfa_fcs_fcxp_alloc(rport->fcs, BFA_FALSE);
+ if (!fcxp)
+ return;
+
+ len = fc_ls_rjt_build(&fchs, bfa_fcxp_get_reqbuf(fcxp),
+ rx_fchs->s_id, bfa_fcs_lport_get_fcid(port),
+ rx_fchs->ox_id, reason_code, reason_code_expl);
+
+ bfa_fcxp_send(fcxp, NULL, port->fabric->vf_id, port->lp_tag,
+ BFA_FALSE, FC_CLASS_3, len, &fchs, NULL, NULL,
+ FC_MAX_PDUSZ, 0);
+}
+
+/*
+ * Return state of rport.
+ */
+int
+bfa_fcs_rport_get_state(struct bfa_fcs_rport_s *rport)
+{
+ return bfa_sm_to_state(rport_sm_table, rport->sm);
+}
+
+
+/*
+ * brief
+ * Called by the Driver to set rport delete/ageout timeout
+ *
+ * param[in] rport timeout value in seconds.
+ *
+ * return None
+ */
+void
+bfa_fcs_rport_set_del_timeout(u8 rport_tmo)
+{
+ /* convert to Millisecs */
+ if (rport_tmo > 0)
+ bfa_fcs_rport_del_timeout = rport_tmo * 1000;
+}
+void
+bfa_fcs_rport_prlo(struct bfa_fcs_rport_s *rport, __be16 ox_id)
+{
+ bfa_trc(rport->fcs, rport->pid);
+
+ rport->prlo = BFA_TRUE;
+ rport->reply_oxid = ox_id;
+ bfa_sm_send_event(rport, RPSM_EVENT_PRLO_RCVD);
+}
+
+/*
+ * Called by BFAD to set the max limit on number of bfa_fcs_rport allocation
+ * which limits number of concurrent logins to remote ports
+ */
+void
+bfa_fcs_rport_set_max_logins(u32 max_logins)
+{
+ if (max_logins > 0)
+ bfa_fcs_rport_max_logins = max_logins;
+}
+
+void
+bfa_fcs_rport_get_attr(struct bfa_fcs_rport_s *rport,
+ struct bfa_rport_attr_s *rport_attr)
+{
+ struct bfa_rport_qos_attr_s qos_attr;
+ struct bfa_fcs_lport_s *port = rport->port;
+ bfa_port_speed_t rport_speed = rport->rpf.rpsc_speed;
+ struct bfa_port_attr_s port_attr;
+
+ bfa_fcport_get_attr(rport->fcs->bfa, &port_attr);
+
+ memset(rport_attr, 0, sizeof(struct bfa_rport_attr_s));
+ memset(&qos_attr, 0, sizeof(struct bfa_rport_qos_attr_s));
+
+ rport_attr->pid = rport->pid;
+ rport_attr->pwwn = rport->pwwn;
+ rport_attr->nwwn = rport->nwwn;
+ rport_attr->cos_supported = rport->fc_cos;
+ rport_attr->df_sz = rport->maxfrsize;
+ rport_attr->state = bfa_fcs_rport_get_state(rport);
+ rport_attr->fc_cos = rport->fc_cos;
+ rport_attr->cisc = rport->cisc;
+ rport_attr->scsi_function = rport->scsi_function;
+ rport_attr->curr_speed = rport->rpf.rpsc_speed;
+ rport_attr->assigned_speed = rport->rpf.assigned_speed;
+
+ if (rport->bfa_rport) {
+ qos_attr.qos_priority = rport->bfa_rport->qos_attr.qos_priority;
+ qos_attr.qos_flow_id =
+ cpu_to_be32(rport->bfa_rport->qos_attr.qos_flow_id);
+ }
+ rport_attr->qos_attr = qos_attr;
+
+ rport_attr->trl_enforced = BFA_FALSE;
+ if (bfa_fcport_is_ratelim(port->fcs->bfa) &&
+ (rport->scsi_function == BFA_RPORT_TARGET)) {
+ if (rport_speed == BFA_PORT_SPEED_UNKNOWN)
+ rport_speed =
+ bfa_fcport_get_ratelim_speed(rport->fcs->bfa);
+
+ if ((bfa_fcs_lport_get_rport_max_speed(port) !=
+ BFA_PORT_SPEED_UNKNOWN) && (rport_speed < port_attr.speed))
+ rport_attr->trl_enforced = BFA_TRUE;
+ }
+}
+
+/*
+ * Remote port implementation.
+ */
+
+/*
+ * fcs_rport_api FCS rport API.
+ */
+
+struct bfa_fcs_rport_s *
+bfa_fcs_rport_lookup(struct bfa_fcs_lport_s *port, wwn_t rpwwn)
+{
+ struct bfa_fcs_rport_s *rport;
+
+ rport = bfa_fcs_lport_get_rport_by_pwwn(port, rpwwn);
+ if (rport == NULL) {
+ /*
+ * TBD Error handling
+ */
+ }
+
+ return rport;
+}
+
+struct bfa_fcs_rport_s *
+bfa_fcs_rport_lookup_by_nwwn(struct bfa_fcs_lport_s *port, wwn_t rnwwn)
+{
+ struct bfa_fcs_rport_s *rport;
+
+ rport = bfa_fcs_lport_get_rport_by_nwwn(port, rnwwn);
+ if (rport == NULL) {
+ /*
+ * TBD Error handling
+ */
+ }
+
+ return rport;
+}
+
+/*
+ * Remote port features (RPF) implementation.
+ */
+
+#define BFA_FCS_RPF_RETRIES (3)
+#define BFA_FCS_RPF_RETRY_TIMEOUT (1000) /* 1 sec (In millisecs) */
+
+static void bfa_fcs_rpf_send_rpsc2(void *rport_cbarg,
+ struct bfa_fcxp_s *fcxp_alloced);
+static void bfa_fcs_rpf_rpsc2_response(void *fcsarg,
+ struct bfa_fcxp_s *fcxp,
+ void *cbarg,
+ bfa_status_t req_status,
+ u32 rsp_len,
+ u32 resid_len,
+ struct fchs_s *rsp_fchs);
+
+static void bfa_fcs_rpf_timeout(void *arg);
+
+/*
+ * fcs_rport_ftrs_sm FCS rport state machine events
+ */
+
+enum rpf_event {
+ RPFSM_EVENT_RPORT_OFFLINE = 1, /* Rport offline */
+ RPFSM_EVENT_RPORT_ONLINE = 2, /* Rport online */
+ RPFSM_EVENT_FCXP_SENT = 3, /* Frame from has been sent */
+ RPFSM_EVENT_TIMEOUT = 4, /* Rport SM timeout event */
+ RPFSM_EVENT_RPSC_COMP = 5,
+ RPFSM_EVENT_RPSC_FAIL = 6,
+ RPFSM_EVENT_RPSC_ERROR = 7,
+};
+
+static void bfa_fcs_rpf_sm_uninit(struct bfa_fcs_rpf_s *rpf,
+ enum rpf_event event);
+static void bfa_fcs_rpf_sm_rpsc_sending(struct bfa_fcs_rpf_s *rpf,
+ enum rpf_event event);
+static void bfa_fcs_rpf_sm_rpsc(struct bfa_fcs_rpf_s *rpf,
+ enum rpf_event event);
+static void bfa_fcs_rpf_sm_rpsc_retry(struct bfa_fcs_rpf_s *rpf,
+ enum rpf_event event);
+static void bfa_fcs_rpf_sm_offline(struct bfa_fcs_rpf_s *rpf,
+ enum rpf_event event);
+static void bfa_fcs_rpf_sm_online(struct bfa_fcs_rpf_s *rpf,
+ enum rpf_event event);
+
+static void
+bfa_fcs_rpf_sm_uninit(struct bfa_fcs_rpf_s *rpf, enum rpf_event event)
+{
+ struct bfa_fcs_rport_s *rport = rpf->rport;
+ struct bfa_fcs_fabric_s *fabric = &rport->fcs->fabric;
+
+ bfa_trc(rport->fcs, rport->pwwn);
+ bfa_trc(rport->fcs, rport->pid);
+ bfa_trc(rport->fcs, event);
+
+ switch (event) {
+ case RPFSM_EVENT_RPORT_ONLINE:
+ /* Send RPSC2 to a Brocade fabric only. */
+ if ((!BFA_FCS_PID_IS_WKA(rport->pid)) &&
+ ((rport->port->fabric->lps->brcd_switch) ||
+ (bfa_fcs_fabric_get_switch_oui(fabric) ==
+ BFA_FCS_BRCD_SWITCH_OUI))) {
+ bfa_sm_set_state(rpf, bfa_fcs_rpf_sm_rpsc_sending);
+ rpf->rpsc_retries = 0;
+ bfa_fcs_rpf_send_rpsc2(rpf, NULL);
+ }
+ break;
+
+ case RPFSM_EVENT_RPORT_OFFLINE:
+ break;
+
+ default:
+ bfa_sm_fault(rport->fcs, event);
+ }
+}
+
+static void
+bfa_fcs_rpf_sm_rpsc_sending(struct bfa_fcs_rpf_s *rpf, enum rpf_event event)
+{
+ struct bfa_fcs_rport_s *rport = rpf->rport;
+
+ bfa_trc(rport->fcs, event);
+
+ switch (event) {
+ case RPFSM_EVENT_FCXP_SENT:
+ bfa_sm_set_state(rpf, bfa_fcs_rpf_sm_rpsc);
+ break;
+
+ case RPFSM_EVENT_RPORT_OFFLINE:
+ bfa_sm_set_state(rpf, bfa_fcs_rpf_sm_offline);
+ bfa_fcxp_walloc_cancel(rport->fcs->bfa, &rpf->fcxp_wqe);
+ rpf->rpsc_retries = 0;
+ break;
+
+ default:
+ bfa_sm_fault(rport->fcs, event);
+ }
+}
+
+static void
+bfa_fcs_rpf_sm_rpsc(struct bfa_fcs_rpf_s *rpf, enum rpf_event event)
+{
+ struct bfa_fcs_rport_s *rport = rpf->rport;
+
+ bfa_trc(rport->fcs, rport->pid);
+ bfa_trc(rport->fcs, event);
+
+ switch (event) {
+ case RPFSM_EVENT_RPSC_COMP:
+ bfa_sm_set_state(rpf, bfa_fcs_rpf_sm_online);
+ /* Update speed info in f/w via BFA */
+ if (rpf->rpsc_speed != BFA_PORT_SPEED_UNKNOWN)
+ bfa_rport_speed(rport->bfa_rport, rpf->rpsc_speed);
+ else if (rpf->assigned_speed != BFA_PORT_SPEED_UNKNOWN)
+ bfa_rport_speed(rport->bfa_rport, rpf->assigned_speed);
+ break;
+
+ case RPFSM_EVENT_RPSC_FAIL:
+ /* RPSC not supported by rport */
+ bfa_sm_set_state(rpf, bfa_fcs_rpf_sm_online);
+ break;
+
+ case RPFSM_EVENT_RPSC_ERROR:
+ /* need to retry...delayed a bit. */
+ if (rpf->rpsc_retries++ < BFA_FCS_RPF_RETRIES) {
+ bfa_timer_start(rport->fcs->bfa, &rpf->timer,
+ bfa_fcs_rpf_timeout, rpf,
+ BFA_FCS_RPF_RETRY_TIMEOUT);
+ bfa_sm_set_state(rpf, bfa_fcs_rpf_sm_rpsc_retry);
+ } else {
+ bfa_sm_set_state(rpf, bfa_fcs_rpf_sm_online);
+ }
+ break;
+
+ case RPFSM_EVENT_RPORT_OFFLINE:
+ bfa_sm_set_state(rpf, bfa_fcs_rpf_sm_offline);
+ bfa_fcxp_discard(rpf->fcxp);
+ rpf->rpsc_retries = 0;
+ break;
+
+ default:
+ bfa_sm_fault(rport->fcs, event);
+ }
+}
+
+static void
+bfa_fcs_rpf_sm_rpsc_retry(struct bfa_fcs_rpf_s *rpf, enum rpf_event event)
+{
+ struct bfa_fcs_rport_s *rport = rpf->rport;
+
+ bfa_trc(rport->fcs, rport->pid);
+ bfa_trc(rport->fcs, event);
+
+ switch (event) {
+ case RPFSM_EVENT_TIMEOUT:
+ /* re-send the RPSC */
+ bfa_sm_set_state(rpf, bfa_fcs_rpf_sm_rpsc_sending);
+ bfa_fcs_rpf_send_rpsc2(rpf, NULL);
+ break;
+
+ case RPFSM_EVENT_RPORT_OFFLINE:
+ bfa_timer_stop(&rpf->timer);
+ bfa_sm_set_state(rpf, bfa_fcs_rpf_sm_offline);
+ rpf->rpsc_retries = 0;
+ break;
+
+ default:
+ bfa_sm_fault(rport->fcs, event);
+ }
+}
+
+static void
+bfa_fcs_rpf_sm_online(struct bfa_fcs_rpf_s *rpf, enum rpf_event event)
+{
+ struct bfa_fcs_rport_s *rport = rpf->rport;
+
+ bfa_trc(rport->fcs, rport->pwwn);
+ bfa_trc(rport->fcs, rport->pid);
+ bfa_trc(rport->fcs, event);
+
+ switch (event) {
+ case RPFSM_EVENT_RPORT_OFFLINE:
+ bfa_sm_set_state(rpf, bfa_fcs_rpf_sm_offline);
+ rpf->rpsc_retries = 0;
+ break;
+
+ default:
+ bfa_sm_fault(rport->fcs, event);
+ }
+}
+
+static void
+bfa_fcs_rpf_sm_offline(struct bfa_fcs_rpf_s *rpf, enum rpf_event event)
+{
+ struct bfa_fcs_rport_s *rport = rpf->rport;
+
+ bfa_trc(rport->fcs, rport->pwwn);
+ bfa_trc(rport->fcs, rport->pid);
+ bfa_trc(rport->fcs, event);
+
+ switch (event) {
+ case RPFSM_EVENT_RPORT_ONLINE:
+ bfa_sm_set_state(rpf, bfa_fcs_rpf_sm_rpsc_sending);
+ bfa_fcs_rpf_send_rpsc2(rpf, NULL);
+ break;
+
+ case RPFSM_EVENT_RPORT_OFFLINE:
+ break;
+
+ default:
+ bfa_sm_fault(rport->fcs, event);
+ }
+}
+/*
+ * Called when Rport is created.
+ */
+void
+bfa_fcs_rpf_init(struct bfa_fcs_rport_s *rport)
+{
+ struct bfa_fcs_rpf_s *rpf = &rport->rpf;
+
+ bfa_trc(rport->fcs, rport->pid);
+ rpf->rport = rport;
+
+ bfa_sm_set_state(rpf, bfa_fcs_rpf_sm_uninit);
+}
+
+/*
+ * Called when Rport becomes online
+ */
+void
+bfa_fcs_rpf_rport_online(struct bfa_fcs_rport_s *rport)
+{
+ bfa_trc(rport->fcs, rport->pid);
+
+ if (__fcs_min_cfg(rport->port->fcs))
+ return;
+
+ if (bfa_fcs_fabric_is_switched(rport->port->fabric))
+ bfa_sm_send_event(&rport->rpf, RPFSM_EVENT_RPORT_ONLINE);
+}
+
+/*
+ * Called when Rport becomes offline
+ */
+void
+bfa_fcs_rpf_rport_offline(struct bfa_fcs_rport_s *rport)
+{
+ bfa_trc(rport->fcs, rport->pid);
+
+ if (__fcs_min_cfg(rport->port->fcs))
+ return;
+
+ rport->rpf.rpsc_speed = 0;
+ bfa_sm_send_event(&rport->rpf, RPFSM_EVENT_RPORT_OFFLINE);
+}
+
+static void
+bfa_fcs_rpf_timeout(void *arg)
+{
+ struct bfa_fcs_rpf_s *rpf = (struct bfa_fcs_rpf_s *) arg;
+ struct bfa_fcs_rport_s *rport = rpf->rport;
+
+ bfa_trc(rport->fcs, rport->pid);
+ bfa_sm_send_event(rpf, RPFSM_EVENT_TIMEOUT);
+}
+
+static void
+bfa_fcs_rpf_send_rpsc2(void *rpf_cbarg, struct bfa_fcxp_s *fcxp_alloced)
+{
+ struct bfa_fcs_rpf_s *rpf = (struct bfa_fcs_rpf_s *)rpf_cbarg;
+ struct bfa_fcs_rport_s *rport = rpf->rport;
+ struct bfa_fcs_lport_s *port = rport->port;
+ struct fchs_s fchs;
+ int len;
+ struct bfa_fcxp_s *fcxp;
+
+ bfa_trc(rport->fcs, rport->pwwn);
+
+ fcxp = fcxp_alloced ? fcxp_alloced :
+ bfa_fcs_fcxp_alloc(port->fcs, BFA_TRUE);
+ if (!fcxp) {
+ bfa_fcs_fcxp_alloc_wait(port->fcs->bfa, &rpf->fcxp_wqe,
+ bfa_fcs_rpf_send_rpsc2, rpf, BFA_TRUE);
+ return;
+ }
+ rpf->fcxp = fcxp;
+
+ len = fc_rpsc2_build(&fchs, bfa_fcxp_get_reqbuf(fcxp), rport->pid,
+ bfa_fcs_lport_get_fcid(port), &rport->pid, 1);
+
+ bfa_fcxp_send(fcxp, NULL, port->fabric->vf_id, port->lp_tag, BFA_FALSE,
+ FC_CLASS_3, len, &fchs, bfa_fcs_rpf_rpsc2_response,
+ rpf, FC_MAX_PDUSZ, FC_ELS_TOV);
+ rport->stats.rpsc_sent++;
+ bfa_sm_send_event(rpf, RPFSM_EVENT_FCXP_SENT);
+
+}
+
+static void
+bfa_fcs_rpf_rpsc2_response(void *fcsarg, struct bfa_fcxp_s *fcxp, void *cbarg,
+ bfa_status_t req_status, u32 rsp_len,
+ u32 resid_len, struct fchs_s *rsp_fchs)
+{
+ struct bfa_fcs_rpf_s *rpf = (struct bfa_fcs_rpf_s *) cbarg;
+ struct bfa_fcs_rport_s *rport = rpf->rport;
+ struct fc_ls_rjt_s *ls_rjt;
+ struct fc_rpsc2_acc_s *rpsc2_acc;
+ u16 num_ents;
+
+ bfa_trc(rport->fcs, req_status);
+
+ if (req_status != BFA_STATUS_OK) {
+ bfa_trc(rport->fcs, req_status);
+ if (req_status == BFA_STATUS_ETIMER)
+ rport->stats.rpsc_failed++;
+ bfa_sm_send_event(rpf, RPFSM_EVENT_RPSC_ERROR);
+ return;
+ }
+
+ rpsc2_acc = (struct fc_rpsc2_acc_s *) BFA_FCXP_RSP_PLD(fcxp);
+ if (rpsc2_acc->els_cmd == FC_ELS_ACC) {
+ rport->stats.rpsc_accs++;
+ num_ents = be16_to_cpu(rpsc2_acc->num_pids);
+ bfa_trc(rport->fcs, num_ents);
+ if (num_ents > 0) {
+ WARN_ON(be32_to_cpu(rpsc2_acc->port_info[0].pid) !=
+ bfa_ntoh3b(rport->pid));
+ bfa_trc(rport->fcs,
+ be32_to_cpu(rpsc2_acc->port_info[0].pid));
+ bfa_trc(rport->fcs,
+ be16_to_cpu(rpsc2_acc->port_info[0].speed));
+ bfa_trc(rport->fcs,
+ be16_to_cpu(rpsc2_acc->port_info[0].index));
+ bfa_trc(rport->fcs,
+ rpsc2_acc->port_info[0].type);
+
+ if (rpsc2_acc->port_info[0].speed == 0) {
+ bfa_sm_send_event(rpf, RPFSM_EVENT_RPSC_ERROR);
+ return;
+ }
+
+ rpf->rpsc_speed = fc_rpsc_operspeed_to_bfa_speed(
+ be16_to_cpu(rpsc2_acc->port_info[0].speed));
+
+ bfa_sm_send_event(rpf, RPFSM_EVENT_RPSC_COMP);
+ }
+ } else {
+ ls_rjt = (struct fc_ls_rjt_s *) BFA_FCXP_RSP_PLD(fcxp);
+ bfa_trc(rport->fcs, ls_rjt->reason_code);
+ bfa_trc(rport->fcs, ls_rjt->reason_code_expl);
+ rport->stats.rpsc_rejects++;
+ if (ls_rjt->reason_code == FC_LS_RJT_RSN_CMD_NOT_SUPP)
+ bfa_sm_send_event(rpf, RPFSM_EVENT_RPSC_FAIL);
+ else
+ bfa_sm_send_event(rpf, RPFSM_EVENT_RPSC_ERROR);
+ }
+}
diff --git a/drivers/scsi/bfa/bfa_hw_cb.c b/drivers/scsi/bfa/bfa_hw_cb.c
new file mode 100644
index 000000000..ea24d4c6e
--- /dev/null
+++ b/drivers/scsi/bfa/bfa_hw_cb.c
@@ -0,0 +1,191 @@
+/*
+ * Copyright (c) 2005-2010 Brocade Communications Systems, Inc.
+ * All rights reserved
+ * www.brocade.com
+ *
+ * Linux driver for Brocade Fibre Channel Host Bus Adapter.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License (GPL) Version 2 as
+ * published by the Free Software Foundation
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ */
+
+#include "bfad_drv.h"
+#include "bfa_modules.h"
+#include "bfi_reg.h"
+
+void
+bfa_hwcb_reginit(struct bfa_s *bfa)
+{
+ struct bfa_iocfc_regs_s *bfa_regs = &bfa->iocfc.bfa_regs;
+ void __iomem *kva = bfa_ioc_bar0(&bfa->ioc);
+ int fn = bfa_ioc_pcifn(&bfa->ioc);
+
+ if (fn == 0) {
+ bfa_regs->intr_status = (kva + HOSTFN0_INT_STATUS);
+ bfa_regs->intr_mask = (kva + HOSTFN0_INT_MSK);
+ } else {
+ bfa_regs->intr_status = (kva + HOSTFN1_INT_STATUS);
+ bfa_regs->intr_mask = (kva + HOSTFN1_INT_MSK);
+ }
+}
+
+static void
+bfa_hwcb_reqq_ack_msix(struct bfa_s *bfa, int reqq)
+{
+ writel(__HFN_INT_CPE_Q0 << CPE_Q_NUM(bfa_ioc_pcifn(&bfa->ioc), reqq),
+ bfa->iocfc.bfa_regs.intr_status);
+}
+
+/*
+ * Actions to respond RME Interrupt for Crossbow ASIC:
+ * - Write 1 to Interrupt Status register
+ * INTX - done in bfa_intx()
+ * MSIX - done in bfa_hwcb_rspq_ack_msix()
+ * - Update CI (only if new CI)
+ */
+static void
+bfa_hwcb_rspq_ack_msix(struct bfa_s *bfa, int rspq, u32 ci)
+{
+ writel(__HFN_INT_RME_Q0 << RME_Q_NUM(bfa_ioc_pcifn(&bfa->ioc), rspq),
+ bfa->iocfc.bfa_regs.intr_status);
+
+ if (bfa_rspq_ci(bfa, rspq) == ci)
+ return;
+
+ bfa_rspq_ci(bfa, rspq) = ci;
+ writel(ci, bfa->iocfc.bfa_regs.rme_q_ci[rspq]);
+ mmiowb();
+}
+
+void
+bfa_hwcb_rspq_ack(struct bfa_s *bfa, int rspq, u32 ci)
+{
+ if (bfa_rspq_ci(bfa, rspq) == ci)
+ return;
+
+ bfa_rspq_ci(bfa, rspq) = ci;
+ writel(ci, bfa->iocfc.bfa_regs.rme_q_ci[rspq]);
+ mmiowb();
+}
+
+void
+bfa_hwcb_msix_getvecs(struct bfa_s *bfa, u32 *msix_vecs_bmap,
+ u32 *num_vecs, u32 *max_vec_bit)
+{
+#define __HFN_NUMINTS 13
+ if (bfa_ioc_pcifn(&bfa->ioc) == 0) {
+ *msix_vecs_bmap = (__HFN_INT_CPE_Q0 | __HFN_INT_CPE_Q1 |
+ __HFN_INT_CPE_Q2 | __HFN_INT_CPE_Q3 |
+ __HFN_INT_RME_Q0 | __HFN_INT_RME_Q1 |
+ __HFN_INT_RME_Q2 | __HFN_INT_RME_Q3 |
+ __HFN_INT_MBOX_LPU0);
+ *max_vec_bit = __HFN_INT_MBOX_LPU0;
+ } else {
+ *msix_vecs_bmap = (__HFN_INT_CPE_Q4 | __HFN_INT_CPE_Q5 |
+ __HFN_INT_CPE_Q6 | __HFN_INT_CPE_Q7 |
+ __HFN_INT_RME_Q4 | __HFN_INT_RME_Q5 |
+ __HFN_INT_RME_Q6 | __HFN_INT_RME_Q7 |
+ __HFN_INT_MBOX_LPU1);
+ *max_vec_bit = __HFN_INT_MBOX_LPU1;
+ }
+
+ *msix_vecs_bmap |= (__HFN_INT_ERR_EMC | __HFN_INT_ERR_LPU0 |
+ __HFN_INT_ERR_LPU1 | __HFN_INT_ERR_PSS);
+ *num_vecs = __HFN_NUMINTS;
+}
+
+/*
+ * Dummy interrupt handler for handling spurious interrupts.
+ */
+static void
+bfa_hwcb_msix_dummy(struct bfa_s *bfa, int vec)
+{
+}
+
+/*
+ * No special setup required for crossbow -- vector assignments are implicit.
+ */
+void
+bfa_hwcb_msix_init(struct bfa_s *bfa, int nvecs)
+{
+ WARN_ON((nvecs != 1) && (nvecs != __HFN_NUMINTS));
+
+ bfa->msix.nvecs = nvecs;
+ bfa_hwcb_msix_uninstall(bfa);
+}
+
+void
+bfa_hwcb_msix_ctrl_install(struct bfa_s *bfa)
+{
+ int i;
+
+ if (bfa->msix.nvecs == 0)
+ return;
+
+ if (bfa->msix.nvecs == 1) {
+ for (i = BFI_MSIX_CPE_QMIN_CB; i < BFI_MSIX_CB_MAX; i++)
+ bfa->msix.handler[i] = bfa_msix_all;
+ return;
+ }
+
+ for (i = BFI_MSIX_RME_QMAX_CB+1; i < BFI_MSIX_CB_MAX; i++)
+ bfa->msix.handler[i] = bfa_msix_lpu_err;
+}
+
+void
+bfa_hwcb_msix_queue_install(struct bfa_s *bfa)
+{
+ int i;
+
+ if (bfa->msix.nvecs == 0)
+ return;
+
+ if (bfa->msix.nvecs == 1) {
+ for (i = BFI_MSIX_CPE_QMIN_CB; i <= BFI_MSIX_RME_QMAX_CB; i++)
+ bfa->msix.handler[i] = bfa_msix_all;
+ return;
+ }
+
+ for (i = BFI_MSIX_CPE_QMIN_CB; i <= BFI_MSIX_CPE_QMAX_CB; i++)
+ bfa->msix.handler[i] = bfa_msix_reqq;
+
+ for (i = BFI_MSIX_RME_QMIN_CB; i <= BFI_MSIX_RME_QMAX_CB; i++)
+ bfa->msix.handler[i] = bfa_msix_rspq;
+}
+
+void
+bfa_hwcb_msix_uninstall(struct bfa_s *bfa)
+{
+ int i;
+
+ for (i = 0; i < BFI_MSIX_CB_MAX; i++)
+ bfa->msix.handler[i] = bfa_hwcb_msix_dummy;
+}
+
+/*
+ * No special enable/disable -- vector assignments are implicit.
+ */
+void
+bfa_hwcb_isr_mode_set(struct bfa_s *bfa, bfa_boolean_t msix)
+{
+ if (msix) {
+ bfa->iocfc.hwif.hw_reqq_ack = bfa_hwcb_reqq_ack_msix;
+ bfa->iocfc.hwif.hw_rspq_ack = bfa_hwcb_rspq_ack_msix;
+ } else {
+ bfa->iocfc.hwif.hw_reqq_ack = NULL;
+ bfa->iocfc.hwif.hw_rspq_ack = bfa_hwcb_rspq_ack;
+ }
+}
+
+void
+bfa_hwcb_msix_get_rme_range(struct bfa_s *bfa, u32 *start, u32 *end)
+{
+ *start = BFI_MSIX_RME_QMIN_CB;
+ *end = BFI_MSIX_RME_QMAX_CB;
+}
diff --git a/drivers/scsi/bfa/bfa_hw_ct.c b/drivers/scsi/bfa/bfa_hw_ct.c
new file mode 100644
index 000000000..637527f48
--- /dev/null
+++ b/drivers/scsi/bfa/bfa_hw_ct.c
@@ -0,0 +1,178 @@
+/*
+ * Copyright (c) 2005-2010 Brocade Communications Systems, Inc.
+ * All rights reserved
+ * www.brocade.com
+ *
+ * Linux driver for Brocade Fibre Channel Host Bus Adapter.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License (GPL) Version 2 as
+ * published by the Free Software Foundation
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ */
+
+#include "bfad_drv.h"
+#include "bfa_modules.h"
+#include "bfi_reg.h"
+
+BFA_TRC_FILE(HAL, IOCFC_CT);
+
+/*
+ * Dummy interrupt handler for handling spurious interrupt during chip-reinit.
+ */
+static void
+bfa_hwct_msix_dummy(struct bfa_s *bfa, int vec)
+{
+}
+
+void
+bfa_hwct_reginit(struct bfa_s *bfa)
+{
+ struct bfa_iocfc_regs_s *bfa_regs = &bfa->iocfc.bfa_regs;
+ void __iomem *kva = bfa_ioc_bar0(&bfa->ioc);
+ int fn = bfa_ioc_pcifn(&bfa->ioc);
+
+ if (fn == 0) {
+ bfa_regs->intr_status = (kva + HOSTFN0_INT_STATUS);
+ bfa_regs->intr_mask = (kva + HOSTFN0_INT_MSK);
+ } else {
+ bfa_regs->intr_status = (kva + HOSTFN1_INT_STATUS);
+ bfa_regs->intr_mask = (kva + HOSTFN1_INT_MSK);
+ }
+}
+
+void
+bfa_hwct2_reginit(struct bfa_s *bfa)
+{
+ struct bfa_iocfc_regs_s *bfa_regs = &bfa->iocfc.bfa_regs;
+ void __iomem *kva = bfa_ioc_bar0(&bfa->ioc);
+
+ bfa_regs->intr_status = (kva + CT2_HOSTFN_INT_STATUS);
+ bfa_regs->intr_mask = (kva + CT2_HOSTFN_INTR_MASK);
+}
+
+void
+bfa_hwct_reqq_ack(struct bfa_s *bfa, int reqq)
+{
+ u32 r32;
+
+ r32 = readl(bfa->iocfc.bfa_regs.cpe_q_ctrl[reqq]);
+ writel(r32, bfa->iocfc.bfa_regs.cpe_q_ctrl[reqq]);
+}
+
+/*
+ * Actions to respond RME Interrupt for Catapult ASIC:
+ * - Write 1 to Interrupt Status register (INTx only - done in bfa_intx())
+ * - Acknowledge by writing to RME Queue Control register
+ * - Update CI
+ */
+void
+bfa_hwct_rspq_ack(struct bfa_s *bfa, int rspq, u32 ci)
+{
+ u32 r32;
+
+ r32 = readl(bfa->iocfc.bfa_regs.rme_q_ctrl[rspq]);
+ writel(r32, bfa->iocfc.bfa_regs.rme_q_ctrl[rspq]);
+
+ bfa_rspq_ci(bfa, rspq) = ci;
+ writel(ci, bfa->iocfc.bfa_regs.rme_q_ci[rspq]);
+ mmiowb();
+}
+
+/*
+ * Actions to respond RME Interrupt for Catapult2 ASIC:
+ * - Write 1 to Interrupt Status register (INTx only - done in bfa_intx())
+ * - Update CI
+ */
+void
+bfa_hwct2_rspq_ack(struct bfa_s *bfa, int rspq, u32 ci)
+{
+ bfa_rspq_ci(bfa, rspq) = ci;
+ writel(ci, bfa->iocfc.bfa_regs.rme_q_ci[rspq]);
+ mmiowb();
+}
+
+void
+bfa_hwct_msix_getvecs(struct bfa_s *bfa, u32 *msix_vecs_bmap,
+ u32 *num_vecs, u32 *max_vec_bit)
+{
+ *msix_vecs_bmap = (1 << BFI_MSIX_CT_MAX) - 1;
+ *max_vec_bit = (1 << (BFI_MSIX_CT_MAX - 1));
+ *num_vecs = BFI_MSIX_CT_MAX;
+}
+
+/*
+ * Setup MSI-X vector for catapult
+ */
+void
+bfa_hwct_msix_init(struct bfa_s *bfa, int nvecs)
+{
+ WARN_ON((nvecs != 1) && (nvecs != BFI_MSIX_CT_MAX));
+ bfa_trc(bfa, nvecs);
+
+ bfa->msix.nvecs = nvecs;
+ bfa_hwct_msix_uninstall(bfa);
+}
+
+void
+bfa_hwct_msix_ctrl_install(struct bfa_s *bfa)
+{
+ if (bfa->msix.nvecs == 0)
+ return;
+
+ if (bfa->msix.nvecs == 1)
+ bfa->msix.handler[BFI_MSIX_LPU_ERR_CT] = bfa_msix_all;
+ else
+ bfa->msix.handler[BFI_MSIX_LPU_ERR_CT] = bfa_msix_lpu_err;
+}
+
+void
+bfa_hwct_msix_queue_install(struct bfa_s *bfa)
+{
+ int i;
+
+ if (bfa->msix.nvecs == 0)
+ return;
+
+ if (bfa->msix.nvecs == 1) {
+ for (i = BFI_MSIX_CPE_QMIN_CT; i < BFI_MSIX_CT_MAX; i++)
+ bfa->msix.handler[i] = bfa_msix_all;
+ return;
+ }
+
+ for (i = BFI_MSIX_CPE_QMIN_CT; i <= BFI_MSIX_CPE_QMAX_CT; i++)
+ bfa->msix.handler[i] = bfa_msix_reqq;
+
+ for (i = BFI_MSIX_RME_QMIN_CT; i <= BFI_MSIX_RME_QMAX_CT; i++)
+ bfa->msix.handler[i] = bfa_msix_rspq;
+}
+
+void
+bfa_hwct_msix_uninstall(struct bfa_s *bfa)
+{
+ int i;
+
+ for (i = 0; i < BFI_MSIX_CT_MAX; i++)
+ bfa->msix.handler[i] = bfa_hwct_msix_dummy;
+}
+
+/*
+ * Enable MSI-X vectors
+ */
+void
+bfa_hwct_isr_mode_set(struct bfa_s *bfa, bfa_boolean_t msix)
+{
+ bfa_trc(bfa, 0);
+ bfa_ioc_isr_mode_set(&bfa->ioc, msix);
+}
+
+void
+bfa_hwct_msix_get_rme_range(struct bfa_s *bfa, u32 *start, u32 *end)
+{
+ *start = BFI_MSIX_RME_QMIN_CT;
+ *end = BFI_MSIX_RME_QMAX_CT;
+}
diff --git a/drivers/scsi/bfa/bfa_ioc.c b/drivers/scsi/bfa/bfa_ioc.c
new file mode 100644
index 000000000..315d6d6dc
--- /dev/null
+++ b/drivers/scsi/bfa/bfa_ioc.c
@@ -0,0 +1,7065 @@
+/*
+ * Copyright (c) 2005-2010 Brocade Communications Systems, Inc.
+ * All rights reserved
+ * www.brocade.com
+ *
+ * Linux driver for Brocade Fibre Channel Host Bus Adapter.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License (GPL) Version 2 as
+ * published by the Free Software Foundation
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ */
+
+#include "bfad_drv.h"
+#include "bfad_im.h"
+#include "bfa_ioc.h"
+#include "bfi_reg.h"
+#include "bfa_defs.h"
+#include "bfa_defs_svc.h"
+#include "bfi.h"
+
+BFA_TRC_FILE(CNA, IOC);
+
+/*
+ * IOC local definitions
+ */
+#define BFA_IOC_TOV 3000 /* msecs */
+#define BFA_IOC_HWSEM_TOV 500 /* msecs */
+#define BFA_IOC_HB_TOV 500 /* msecs */
+#define BFA_IOC_TOV_RECOVER BFA_IOC_HB_TOV
+#define BFA_IOC_POLL_TOV BFA_TIMER_FREQ
+
+#define bfa_ioc_timer_start(__ioc) \
+ bfa_timer_begin((__ioc)->timer_mod, &(__ioc)->ioc_timer, \
+ bfa_ioc_timeout, (__ioc), BFA_IOC_TOV)
+#define bfa_ioc_timer_stop(__ioc) bfa_timer_stop(&(__ioc)->ioc_timer)
+
+#define bfa_hb_timer_start(__ioc) \
+ bfa_timer_begin((__ioc)->timer_mod, &(__ioc)->hb_timer, \
+ bfa_ioc_hb_check, (__ioc), BFA_IOC_HB_TOV)
+#define bfa_hb_timer_stop(__ioc) bfa_timer_stop(&(__ioc)->hb_timer)
+
+#define BFA_DBG_FWTRC_OFF(_fn) (BFI_IOC_TRC_OFF + BFA_DBG_FWTRC_LEN * (_fn))
+
+#define bfa_ioc_state_disabled(__sm) \
+ (((__sm) == BFI_IOC_UNINIT) || \
+ ((__sm) == BFI_IOC_INITING) || \
+ ((__sm) == BFI_IOC_HWINIT) || \
+ ((__sm) == BFI_IOC_DISABLED) || \
+ ((__sm) == BFI_IOC_FAIL) || \
+ ((__sm) == BFI_IOC_CFG_DISABLED))
+
+/*
+ * Asic specific macros : see bfa_hw_cb.c and bfa_hw_ct.c for details.
+ */
+
+#define bfa_ioc_firmware_lock(__ioc) \
+ ((__ioc)->ioc_hwif->ioc_firmware_lock(__ioc))
+#define bfa_ioc_firmware_unlock(__ioc) \
+ ((__ioc)->ioc_hwif->ioc_firmware_unlock(__ioc))
+#define bfa_ioc_reg_init(__ioc) ((__ioc)->ioc_hwif->ioc_reg_init(__ioc))
+#define bfa_ioc_map_port(__ioc) ((__ioc)->ioc_hwif->ioc_map_port(__ioc))
+#define bfa_ioc_notify_fail(__ioc) \
+ ((__ioc)->ioc_hwif->ioc_notify_fail(__ioc))
+#define bfa_ioc_sync_start(__ioc) \
+ ((__ioc)->ioc_hwif->ioc_sync_start(__ioc))
+#define bfa_ioc_sync_join(__ioc) \
+ ((__ioc)->ioc_hwif->ioc_sync_join(__ioc))
+#define bfa_ioc_sync_leave(__ioc) \
+ ((__ioc)->ioc_hwif->ioc_sync_leave(__ioc))
+#define bfa_ioc_sync_ack(__ioc) \
+ ((__ioc)->ioc_hwif->ioc_sync_ack(__ioc))
+#define bfa_ioc_sync_complete(__ioc) \
+ ((__ioc)->ioc_hwif->ioc_sync_complete(__ioc))
+#define bfa_ioc_set_cur_ioc_fwstate(__ioc, __fwstate) \
+ ((__ioc)->ioc_hwif->ioc_set_fwstate(__ioc, __fwstate))
+#define bfa_ioc_get_cur_ioc_fwstate(__ioc) \
+ ((__ioc)->ioc_hwif->ioc_get_fwstate(__ioc))
+#define bfa_ioc_set_alt_ioc_fwstate(__ioc, __fwstate) \
+ ((__ioc)->ioc_hwif->ioc_set_alt_fwstate(__ioc, __fwstate))
+#define bfa_ioc_get_alt_ioc_fwstate(__ioc) \
+ ((__ioc)->ioc_hwif->ioc_get_alt_fwstate(__ioc))
+
+#define bfa_ioc_mbox_cmd_pending(__ioc) \
+ (!list_empty(&((__ioc)->mbox_mod.cmd_q)) || \
+ readl((__ioc)->ioc_regs.hfn_mbox_cmd))
+
+bfa_boolean_t bfa_auto_recover = BFA_TRUE;
+
+/*
+ * forward declarations
+ */
+static void bfa_ioc_hw_sem_get(struct bfa_ioc_s *ioc);
+static void bfa_ioc_hwinit(struct bfa_ioc_s *ioc, bfa_boolean_t force);
+static void bfa_ioc_timeout(void *ioc);
+static void bfa_ioc_poll_fwinit(struct bfa_ioc_s *ioc);
+static void bfa_ioc_send_enable(struct bfa_ioc_s *ioc);
+static void bfa_ioc_send_disable(struct bfa_ioc_s *ioc);
+static void bfa_ioc_send_getattr(struct bfa_ioc_s *ioc);
+static void bfa_ioc_hb_monitor(struct bfa_ioc_s *ioc);
+static void bfa_ioc_mbox_poll(struct bfa_ioc_s *ioc);
+static void bfa_ioc_mbox_flush(struct bfa_ioc_s *ioc);
+static void bfa_ioc_recover(struct bfa_ioc_s *ioc);
+static void bfa_ioc_event_notify(struct bfa_ioc_s *ioc ,
+ enum bfa_ioc_event_e event);
+static void bfa_ioc_disable_comp(struct bfa_ioc_s *ioc);
+static void bfa_ioc_lpu_stop(struct bfa_ioc_s *ioc);
+static void bfa_ioc_fail_notify(struct bfa_ioc_s *ioc);
+static void bfa_ioc_pf_fwmismatch(struct bfa_ioc_s *ioc);
+static enum bfi_ioc_img_ver_cmp_e bfa_ioc_fw_ver_patch_cmp(
+ struct bfi_ioc_image_hdr_s *base_fwhdr,
+ struct bfi_ioc_image_hdr_s *fwhdr_to_cmp);
+static enum bfi_ioc_img_ver_cmp_e bfa_ioc_flash_fwver_cmp(
+ struct bfa_ioc_s *ioc,
+ struct bfi_ioc_image_hdr_s *base_fwhdr);
+
+/*
+ * IOC state machine definitions/declarations
+ */
+enum ioc_event {
+ IOC_E_RESET = 1, /* IOC reset request */
+ IOC_E_ENABLE = 2, /* IOC enable request */
+ IOC_E_DISABLE = 3, /* IOC disable request */
+ IOC_E_DETACH = 4, /* driver detach cleanup */
+ IOC_E_ENABLED = 5, /* f/w enabled */
+ IOC_E_FWRSP_GETATTR = 6, /* IOC get attribute response */
+ IOC_E_DISABLED = 7, /* f/w disabled */
+ IOC_E_PFFAILED = 8, /* failure notice by iocpf sm */
+ IOC_E_HBFAIL = 9, /* heartbeat failure */
+ IOC_E_HWERROR = 10, /* hardware error interrupt */
+ IOC_E_TIMEOUT = 11, /* timeout */
+ IOC_E_HWFAILED = 12, /* PCI mapping failure notice */
+};
+
+bfa_fsm_state_decl(bfa_ioc, uninit, struct bfa_ioc_s, enum ioc_event);
+bfa_fsm_state_decl(bfa_ioc, reset, struct bfa_ioc_s, enum ioc_event);
+bfa_fsm_state_decl(bfa_ioc, enabling, struct bfa_ioc_s, enum ioc_event);
+bfa_fsm_state_decl(bfa_ioc, getattr, struct bfa_ioc_s, enum ioc_event);
+bfa_fsm_state_decl(bfa_ioc, op, struct bfa_ioc_s, enum ioc_event);
+bfa_fsm_state_decl(bfa_ioc, fail_retry, struct bfa_ioc_s, enum ioc_event);
+bfa_fsm_state_decl(bfa_ioc, fail, struct bfa_ioc_s, enum ioc_event);
+bfa_fsm_state_decl(bfa_ioc, disabling, struct bfa_ioc_s, enum ioc_event);
+bfa_fsm_state_decl(bfa_ioc, disabled, struct bfa_ioc_s, enum ioc_event);
+bfa_fsm_state_decl(bfa_ioc, hwfail, struct bfa_ioc_s, enum ioc_event);
+
+static struct bfa_sm_table_s ioc_sm_table[] = {
+ {BFA_SM(bfa_ioc_sm_uninit), BFA_IOC_UNINIT},
+ {BFA_SM(bfa_ioc_sm_reset), BFA_IOC_RESET},
+ {BFA_SM(bfa_ioc_sm_enabling), BFA_IOC_ENABLING},
+ {BFA_SM(bfa_ioc_sm_getattr), BFA_IOC_GETATTR},
+ {BFA_SM(bfa_ioc_sm_op), BFA_IOC_OPERATIONAL},
+ {BFA_SM(bfa_ioc_sm_fail_retry), BFA_IOC_INITFAIL},
+ {BFA_SM(bfa_ioc_sm_fail), BFA_IOC_FAIL},
+ {BFA_SM(bfa_ioc_sm_disabling), BFA_IOC_DISABLING},
+ {BFA_SM(bfa_ioc_sm_disabled), BFA_IOC_DISABLED},
+ {BFA_SM(bfa_ioc_sm_hwfail), BFA_IOC_HWFAIL},
+};
+
+/*
+ * IOCPF state machine definitions/declarations
+ */
+
+#define bfa_iocpf_timer_start(__ioc) \
+ bfa_timer_begin((__ioc)->timer_mod, &(__ioc)->ioc_timer, \
+ bfa_iocpf_timeout, (__ioc), BFA_IOC_TOV)
+#define bfa_iocpf_timer_stop(__ioc) bfa_timer_stop(&(__ioc)->ioc_timer)
+
+#define bfa_iocpf_poll_timer_start(__ioc) \
+ bfa_timer_begin((__ioc)->timer_mod, &(__ioc)->ioc_timer, \
+ bfa_iocpf_poll_timeout, (__ioc), BFA_IOC_POLL_TOV)
+
+#define bfa_sem_timer_start(__ioc) \
+ bfa_timer_begin((__ioc)->timer_mod, &(__ioc)->sem_timer, \
+ bfa_iocpf_sem_timeout, (__ioc), BFA_IOC_HWSEM_TOV)
+#define bfa_sem_timer_stop(__ioc) bfa_timer_stop(&(__ioc)->sem_timer)
+
+/*
+ * Forward declareations for iocpf state machine
+ */
+static void bfa_iocpf_timeout(void *ioc_arg);
+static void bfa_iocpf_sem_timeout(void *ioc_arg);
+static void bfa_iocpf_poll_timeout(void *ioc_arg);
+
+/*
+ * IOCPF state machine events
+ */
+enum iocpf_event {
+ IOCPF_E_ENABLE = 1, /* IOCPF enable request */
+ IOCPF_E_DISABLE = 2, /* IOCPF disable request */
+ IOCPF_E_STOP = 3, /* stop on driver detach */
+ IOCPF_E_FWREADY = 4, /* f/w initialization done */
+ IOCPF_E_FWRSP_ENABLE = 5, /* enable f/w response */
+ IOCPF_E_FWRSP_DISABLE = 6, /* disable f/w response */
+ IOCPF_E_FAIL = 7, /* failure notice by ioc sm */
+ IOCPF_E_INITFAIL = 8, /* init fail notice by ioc sm */
+ IOCPF_E_GETATTRFAIL = 9, /* init fail notice by ioc sm */
+ IOCPF_E_SEMLOCKED = 10, /* h/w semaphore is locked */
+ IOCPF_E_TIMEOUT = 11, /* f/w response timeout */
+ IOCPF_E_SEM_ERROR = 12, /* h/w sem mapping error */
+};
+
+/*
+ * IOCPF states
+ */
+enum bfa_iocpf_state {
+ BFA_IOCPF_RESET = 1, /* IOC is in reset state */
+ BFA_IOCPF_SEMWAIT = 2, /* Waiting for IOC h/w semaphore */
+ BFA_IOCPF_HWINIT = 3, /* IOC h/w is being initialized */
+ BFA_IOCPF_READY = 4, /* IOCPF is initialized */
+ BFA_IOCPF_INITFAIL = 5, /* IOCPF failed */
+ BFA_IOCPF_FAIL = 6, /* IOCPF failed */
+ BFA_IOCPF_DISABLING = 7, /* IOCPF is being disabled */
+ BFA_IOCPF_DISABLED = 8, /* IOCPF is disabled */
+ BFA_IOCPF_FWMISMATCH = 9, /* IOC f/w different from drivers */
+};
+
+bfa_fsm_state_decl(bfa_iocpf, reset, struct bfa_iocpf_s, enum iocpf_event);
+bfa_fsm_state_decl(bfa_iocpf, fwcheck, struct bfa_iocpf_s, enum iocpf_event);
+bfa_fsm_state_decl(bfa_iocpf, mismatch, struct bfa_iocpf_s, enum iocpf_event);
+bfa_fsm_state_decl(bfa_iocpf, semwait, struct bfa_iocpf_s, enum iocpf_event);
+bfa_fsm_state_decl(bfa_iocpf, hwinit, struct bfa_iocpf_s, enum iocpf_event);
+bfa_fsm_state_decl(bfa_iocpf, enabling, struct bfa_iocpf_s, enum iocpf_event);
+bfa_fsm_state_decl(bfa_iocpf, ready, struct bfa_iocpf_s, enum iocpf_event);
+bfa_fsm_state_decl(bfa_iocpf, initfail_sync, struct bfa_iocpf_s,
+ enum iocpf_event);
+bfa_fsm_state_decl(bfa_iocpf, initfail, struct bfa_iocpf_s, enum iocpf_event);
+bfa_fsm_state_decl(bfa_iocpf, fail_sync, struct bfa_iocpf_s, enum iocpf_event);
+bfa_fsm_state_decl(bfa_iocpf, fail, struct bfa_iocpf_s, enum iocpf_event);
+bfa_fsm_state_decl(bfa_iocpf, disabling, struct bfa_iocpf_s, enum iocpf_event);
+bfa_fsm_state_decl(bfa_iocpf, disabling_sync, struct bfa_iocpf_s,
+ enum iocpf_event);
+bfa_fsm_state_decl(bfa_iocpf, disabled, struct bfa_iocpf_s, enum iocpf_event);
+
+static struct bfa_sm_table_s iocpf_sm_table[] = {
+ {BFA_SM(bfa_iocpf_sm_reset), BFA_IOCPF_RESET},
+ {BFA_SM(bfa_iocpf_sm_fwcheck), BFA_IOCPF_FWMISMATCH},
+ {BFA_SM(bfa_iocpf_sm_mismatch), BFA_IOCPF_FWMISMATCH},
+ {BFA_SM(bfa_iocpf_sm_semwait), BFA_IOCPF_SEMWAIT},
+ {BFA_SM(bfa_iocpf_sm_hwinit), BFA_IOCPF_HWINIT},
+ {BFA_SM(bfa_iocpf_sm_enabling), BFA_IOCPF_HWINIT},
+ {BFA_SM(bfa_iocpf_sm_ready), BFA_IOCPF_READY},
+ {BFA_SM(bfa_iocpf_sm_initfail_sync), BFA_IOCPF_INITFAIL},
+ {BFA_SM(bfa_iocpf_sm_initfail), BFA_IOCPF_INITFAIL},
+ {BFA_SM(bfa_iocpf_sm_fail_sync), BFA_IOCPF_FAIL},
+ {BFA_SM(bfa_iocpf_sm_fail), BFA_IOCPF_FAIL},
+ {BFA_SM(bfa_iocpf_sm_disabling), BFA_IOCPF_DISABLING},
+ {BFA_SM(bfa_iocpf_sm_disabling_sync), BFA_IOCPF_DISABLING},
+ {BFA_SM(bfa_iocpf_sm_disabled), BFA_IOCPF_DISABLED},
+};
+
+/*
+ * IOC State Machine
+ */
+
+/*
+ * Beginning state. IOC uninit state.
+ */
+
+static void
+bfa_ioc_sm_uninit_entry(struct bfa_ioc_s *ioc)
+{
+}
+
+/*
+ * IOC is in uninit state.
+ */
+static void
+bfa_ioc_sm_uninit(struct bfa_ioc_s *ioc, enum ioc_event event)
+{
+ bfa_trc(ioc, event);
+
+ switch (event) {
+ case IOC_E_RESET:
+ bfa_fsm_set_state(ioc, bfa_ioc_sm_reset);
+ break;
+
+ default:
+ bfa_sm_fault(ioc, event);
+ }
+}
+/*
+ * Reset entry actions -- initialize state machine
+ */
+static void
+bfa_ioc_sm_reset_entry(struct bfa_ioc_s *ioc)
+{
+ bfa_fsm_set_state(&ioc->iocpf, bfa_iocpf_sm_reset);
+}
+
+/*
+ * IOC is in reset state.
+ */
+static void
+bfa_ioc_sm_reset(struct bfa_ioc_s *ioc, enum ioc_event event)
+{
+ bfa_trc(ioc, event);
+
+ switch (event) {
+ case IOC_E_ENABLE:
+ bfa_fsm_set_state(ioc, bfa_ioc_sm_enabling);
+ break;
+
+ case IOC_E_DISABLE:
+ bfa_ioc_disable_comp(ioc);
+ break;
+
+ case IOC_E_DETACH:
+ bfa_fsm_set_state(ioc, bfa_ioc_sm_uninit);
+ break;
+
+ default:
+ bfa_sm_fault(ioc, event);
+ }
+}
+
+
+static void
+bfa_ioc_sm_enabling_entry(struct bfa_ioc_s *ioc)
+{
+ bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_ENABLE);
+}
+
+/*
+ * Host IOC function is being enabled, awaiting response from firmware.
+ * Semaphore is acquired.
+ */
+static void
+bfa_ioc_sm_enabling(struct bfa_ioc_s *ioc, enum ioc_event event)
+{
+ bfa_trc(ioc, event);
+
+ switch (event) {
+ case IOC_E_ENABLED:
+ bfa_fsm_set_state(ioc, bfa_ioc_sm_getattr);
+ break;
+
+ case IOC_E_PFFAILED:
+ /* !!! fall through !!! */
+ case IOC_E_HWERROR:
+ ioc->cbfn->enable_cbfn(ioc->bfa, BFA_STATUS_IOC_FAILURE);
+ bfa_fsm_set_state(ioc, bfa_ioc_sm_fail);
+ if (event != IOC_E_PFFAILED)
+ bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_INITFAIL);
+ break;
+
+ case IOC_E_HWFAILED:
+ ioc->cbfn->enable_cbfn(ioc->bfa, BFA_STATUS_IOC_FAILURE);
+ bfa_fsm_set_state(ioc, bfa_ioc_sm_hwfail);
+ break;
+
+ case IOC_E_DISABLE:
+ bfa_fsm_set_state(ioc, bfa_ioc_sm_disabling);
+ break;
+
+ case IOC_E_DETACH:
+ bfa_fsm_set_state(ioc, bfa_ioc_sm_uninit);
+ bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_STOP);
+ break;
+
+ case IOC_E_ENABLE:
+ break;
+
+ default:
+ bfa_sm_fault(ioc, event);
+ }
+}
+
+
+static void
+bfa_ioc_sm_getattr_entry(struct bfa_ioc_s *ioc)
+{
+ bfa_ioc_timer_start(ioc);
+ bfa_ioc_send_getattr(ioc);
+}
+
+/*
+ * IOC configuration in progress. Timer is active.
+ */
+static void
+bfa_ioc_sm_getattr(struct bfa_ioc_s *ioc, enum ioc_event event)
+{
+ bfa_trc(ioc, event);
+
+ switch (event) {
+ case IOC_E_FWRSP_GETATTR:
+ bfa_ioc_timer_stop(ioc);
+ bfa_fsm_set_state(ioc, bfa_ioc_sm_op);
+ break;
+
+ case IOC_E_PFFAILED:
+ case IOC_E_HWERROR:
+ bfa_ioc_timer_stop(ioc);
+ /* !!! fall through !!! */
+ case IOC_E_TIMEOUT:
+ ioc->cbfn->enable_cbfn(ioc->bfa, BFA_STATUS_IOC_FAILURE);
+ bfa_fsm_set_state(ioc, bfa_ioc_sm_fail);
+ if (event != IOC_E_PFFAILED)
+ bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_GETATTRFAIL);
+ break;
+
+ case IOC_E_DISABLE:
+ bfa_ioc_timer_stop(ioc);
+ bfa_fsm_set_state(ioc, bfa_ioc_sm_disabling);
+ break;
+
+ case IOC_E_ENABLE:
+ break;
+
+ default:
+ bfa_sm_fault(ioc, event);
+ }
+}
+
+static void
+bfa_ioc_sm_op_entry(struct bfa_ioc_s *ioc)
+{
+ struct bfad_s *bfad = (struct bfad_s *)ioc->bfa->bfad;
+
+ ioc->cbfn->enable_cbfn(ioc->bfa, BFA_STATUS_OK);
+ bfa_ioc_event_notify(ioc, BFA_IOC_E_ENABLED);
+ bfa_ioc_hb_monitor(ioc);
+ BFA_LOG(KERN_INFO, bfad, bfa_log_level, "IOC enabled\n");
+ bfa_ioc_aen_post(ioc, BFA_IOC_AEN_ENABLE);
+}
+
+static void
+bfa_ioc_sm_op(struct bfa_ioc_s *ioc, enum ioc_event event)
+{
+ bfa_trc(ioc, event);
+
+ switch (event) {
+ case IOC_E_ENABLE:
+ break;
+
+ case IOC_E_DISABLE:
+ bfa_hb_timer_stop(ioc);
+ bfa_fsm_set_state(ioc, bfa_ioc_sm_disabling);
+ break;
+
+ case IOC_E_PFFAILED:
+ case IOC_E_HWERROR:
+ bfa_hb_timer_stop(ioc);
+ /* !!! fall through !!! */
+ case IOC_E_HBFAIL:
+ if (ioc->iocpf.auto_recover)
+ bfa_fsm_set_state(ioc, bfa_ioc_sm_fail_retry);
+ else
+ bfa_fsm_set_state(ioc, bfa_ioc_sm_fail);
+
+ bfa_ioc_fail_notify(ioc);
+
+ if (event != IOC_E_PFFAILED)
+ bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_FAIL);
+ break;
+
+ default:
+ bfa_sm_fault(ioc, event);
+ }
+}
+
+
+static void
+bfa_ioc_sm_disabling_entry(struct bfa_ioc_s *ioc)
+{
+ struct bfad_s *bfad = (struct bfad_s *)ioc->bfa->bfad;
+ bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_DISABLE);
+ BFA_LOG(KERN_INFO, bfad, bfa_log_level, "IOC disabled\n");
+ bfa_ioc_aen_post(ioc, BFA_IOC_AEN_DISABLE);
+}
+
+/*
+ * IOC is being disabled
+ */
+static void
+bfa_ioc_sm_disabling(struct bfa_ioc_s *ioc, enum ioc_event event)
+{
+ bfa_trc(ioc, event);
+
+ switch (event) {
+ case IOC_E_DISABLED:
+ bfa_fsm_set_state(ioc, bfa_ioc_sm_disabled);
+ break;
+
+ case IOC_E_HWERROR:
+ /*
+ * No state change. Will move to disabled state
+ * after iocpf sm completes failure processing and
+ * moves to disabled state.
+ */
+ bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_FAIL);
+ break;
+
+ case IOC_E_HWFAILED:
+ bfa_fsm_set_state(ioc, bfa_ioc_sm_hwfail);
+ bfa_ioc_disable_comp(ioc);
+ break;
+
+ default:
+ bfa_sm_fault(ioc, event);
+ }
+}
+
+/*
+ * IOC disable completion entry.
+ */
+static void
+bfa_ioc_sm_disabled_entry(struct bfa_ioc_s *ioc)
+{
+ bfa_ioc_disable_comp(ioc);
+}
+
+static void
+bfa_ioc_sm_disabled(struct bfa_ioc_s *ioc, enum ioc_event event)
+{
+ bfa_trc(ioc, event);
+
+ switch (event) {
+ case IOC_E_ENABLE:
+ bfa_fsm_set_state(ioc, bfa_ioc_sm_enabling);
+ break;
+
+ case IOC_E_DISABLE:
+ ioc->cbfn->disable_cbfn(ioc->bfa);
+ break;
+
+ case IOC_E_DETACH:
+ bfa_fsm_set_state(ioc, bfa_ioc_sm_uninit);
+ bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_STOP);
+ break;
+
+ default:
+ bfa_sm_fault(ioc, event);
+ }
+}
+
+
+static void
+bfa_ioc_sm_fail_retry_entry(struct bfa_ioc_s *ioc)
+{
+ bfa_trc(ioc, 0);
+}
+
+/*
+ * Hardware initialization retry.
+ */
+static void
+bfa_ioc_sm_fail_retry(struct bfa_ioc_s *ioc, enum ioc_event event)
+{
+ bfa_trc(ioc, event);
+
+ switch (event) {
+ case IOC_E_ENABLED:
+ bfa_fsm_set_state(ioc, bfa_ioc_sm_getattr);
+ break;
+
+ case IOC_E_PFFAILED:
+ case IOC_E_HWERROR:
+ /*
+ * Initialization retry failed.
+ */
+ ioc->cbfn->enable_cbfn(ioc->bfa, BFA_STATUS_IOC_FAILURE);
+ bfa_fsm_set_state(ioc, bfa_ioc_sm_fail);
+ if (event != IOC_E_PFFAILED)
+ bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_INITFAIL);
+ break;
+
+ case IOC_E_HWFAILED:
+ ioc->cbfn->enable_cbfn(ioc->bfa, BFA_STATUS_IOC_FAILURE);
+ bfa_fsm_set_state(ioc, bfa_ioc_sm_hwfail);
+ break;
+
+ case IOC_E_ENABLE:
+ break;
+
+ case IOC_E_DISABLE:
+ bfa_fsm_set_state(ioc, bfa_ioc_sm_disabling);
+ break;
+
+ case IOC_E_DETACH:
+ bfa_fsm_set_state(ioc, bfa_ioc_sm_uninit);
+ bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_STOP);
+ break;
+
+ default:
+ bfa_sm_fault(ioc, event);
+ }
+}
+
+
+static void
+bfa_ioc_sm_fail_entry(struct bfa_ioc_s *ioc)
+{
+ bfa_trc(ioc, 0);
+}
+
+/*
+ * IOC failure.
+ */
+static void
+bfa_ioc_sm_fail(struct bfa_ioc_s *ioc, enum ioc_event event)
+{
+ bfa_trc(ioc, event);
+
+ switch (event) {
+
+ case IOC_E_ENABLE:
+ ioc->cbfn->enable_cbfn(ioc->bfa, BFA_STATUS_IOC_FAILURE);
+ break;
+
+ case IOC_E_DISABLE:
+ bfa_fsm_set_state(ioc, bfa_ioc_sm_disabling);
+ break;
+
+ case IOC_E_DETACH:
+ bfa_fsm_set_state(ioc, bfa_ioc_sm_uninit);
+ bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_STOP);
+ break;
+
+ case IOC_E_HWERROR:
+ case IOC_E_HWFAILED:
+ /*
+ * HB failure / HW error notification, ignore.
+ */
+ break;
+ default:
+ bfa_sm_fault(ioc, event);
+ }
+}
+
+static void
+bfa_ioc_sm_hwfail_entry(struct bfa_ioc_s *ioc)
+{
+ bfa_trc(ioc, 0);
+}
+
+static void
+bfa_ioc_sm_hwfail(struct bfa_ioc_s *ioc, enum ioc_event event)
+{
+ bfa_trc(ioc, event);
+
+ switch (event) {
+ case IOC_E_ENABLE:
+ ioc->cbfn->enable_cbfn(ioc->bfa, BFA_STATUS_IOC_FAILURE);
+ break;
+
+ case IOC_E_DISABLE:
+ ioc->cbfn->disable_cbfn(ioc->bfa);
+ break;
+
+ case IOC_E_DETACH:
+ bfa_fsm_set_state(ioc, bfa_ioc_sm_uninit);
+ break;
+
+ case IOC_E_HWERROR:
+ /* Ignore - already in hwfail state */
+ break;
+
+ default:
+ bfa_sm_fault(ioc, event);
+ }
+}
+
+/*
+ * IOCPF State Machine
+ */
+
+/*
+ * Reset entry actions -- initialize state machine
+ */
+static void
+bfa_iocpf_sm_reset_entry(struct bfa_iocpf_s *iocpf)
+{
+ iocpf->fw_mismatch_notified = BFA_FALSE;
+ iocpf->auto_recover = bfa_auto_recover;
+}
+
+/*
+ * Beginning state. IOC is in reset state.
+ */
+static void
+bfa_iocpf_sm_reset(struct bfa_iocpf_s *iocpf, enum iocpf_event event)
+{
+ struct bfa_ioc_s *ioc = iocpf->ioc;
+
+ bfa_trc(ioc, event);
+
+ switch (event) {
+ case IOCPF_E_ENABLE:
+ bfa_fsm_set_state(iocpf, bfa_iocpf_sm_fwcheck);
+ break;
+
+ case IOCPF_E_STOP:
+ break;
+
+ default:
+ bfa_sm_fault(ioc, event);
+ }
+}
+
+/*
+ * Semaphore should be acquired for version check.
+ */
+static void
+bfa_iocpf_sm_fwcheck_entry(struct bfa_iocpf_s *iocpf)
+{
+ struct bfi_ioc_image_hdr_s fwhdr;
+ u32 r32, fwstate, pgnum, pgoff, loff = 0;
+ int i;
+
+ /*
+ * Spin on init semaphore to serialize.
+ */
+ r32 = readl(iocpf->ioc->ioc_regs.ioc_init_sem_reg);
+ while (r32 & 0x1) {
+ udelay(20);
+ r32 = readl(iocpf->ioc->ioc_regs.ioc_init_sem_reg);
+ }
+
+ /* h/w sem init */
+ fwstate = bfa_ioc_get_cur_ioc_fwstate(iocpf->ioc);
+ if (fwstate == BFI_IOC_UNINIT) {
+ writel(1, iocpf->ioc->ioc_regs.ioc_init_sem_reg);
+ goto sem_get;
+ }
+
+ bfa_ioc_fwver_get(iocpf->ioc, &fwhdr);
+
+ if (swab32(fwhdr.exec) == BFI_FWBOOT_TYPE_NORMAL) {
+ writel(1, iocpf->ioc->ioc_regs.ioc_init_sem_reg);
+ goto sem_get;
+ }
+
+ /*
+ * Clear fwver hdr
+ */
+ pgnum = PSS_SMEM_PGNUM(iocpf->ioc->ioc_regs.smem_pg0, loff);
+ pgoff = PSS_SMEM_PGOFF(loff);
+ writel(pgnum, iocpf->ioc->ioc_regs.host_page_num_fn);
+
+ for (i = 0; i < sizeof(struct bfi_ioc_image_hdr_s) / sizeof(u32); i++) {
+ bfa_mem_write(iocpf->ioc->ioc_regs.smem_page_start, loff, 0);
+ loff += sizeof(u32);
+ }
+
+ bfa_trc(iocpf->ioc, fwstate);
+ bfa_trc(iocpf->ioc, swab32(fwhdr.exec));
+ bfa_ioc_set_cur_ioc_fwstate(iocpf->ioc, BFI_IOC_UNINIT);
+ bfa_ioc_set_alt_ioc_fwstate(iocpf->ioc, BFI_IOC_UNINIT);
+
+ /*
+ * Unlock the hw semaphore. Should be here only once per boot.
+ */
+ bfa_ioc_ownership_reset(iocpf->ioc);
+
+ /*
+ * unlock init semaphore.
+ */
+ writel(1, iocpf->ioc->ioc_regs.ioc_init_sem_reg);
+
+sem_get:
+ bfa_ioc_hw_sem_get(iocpf->ioc);
+}
+
+/*
+ * Awaiting h/w semaphore to continue with version check.
+ */
+static void
+bfa_iocpf_sm_fwcheck(struct bfa_iocpf_s *iocpf, enum iocpf_event event)
+{
+ struct bfa_ioc_s *ioc = iocpf->ioc;
+
+ bfa_trc(ioc, event);
+
+ switch (event) {
+ case IOCPF_E_SEMLOCKED:
+ if (bfa_ioc_firmware_lock(ioc)) {
+ if (bfa_ioc_sync_start(ioc)) {
+ bfa_ioc_sync_join(ioc);
+ bfa_fsm_set_state(iocpf, bfa_iocpf_sm_hwinit);
+ } else {
+ bfa_ioc_firmware_unlock(ioc);
+ writel(1, ioc->ioc_regs.ioc_sem_reg);
+ bfa_sem_timer_start(ioc);
+ }
+ } else {
+ writel(1, ioc->ioc_regs.ioc_sem_reg);
+ bfa_fsm_set_state(iocpf, bfa_iocpf_sm_mismatch);
+ }
+ break;
+
+ case IOCPF_E_SEM_ERROR:
+ bfa_fsm_set_state(iocpf, bfa_iocpf_sm_fail);
+ bfa_fsm_send_event(ioc, IOC_E_HWFAILED);
+ break;
+
+ case IOCPF_E_DISABLE:
+ bfa_sem_timer_stop(ioc);
+ bfa_fsm_set_state(iocpf, bfa_iocpf_sm_reset);
+ bfa_fsm_send_event(ioc, IOC_E_DISABLED);
+ break;
+
+ case IOCPF_E_STOP:
+ bfa_sem_timer_stop(ioc);
+ bfa_fsm_set_state(iocpf, bfa_iocpf_sm_reset);
+ break;
+
+ default:
+ bfa_sm_fault(ioc, event);
+ }
+}
+
+/*
+ * Notify enable completion callback.
+ */
+static void
+bfa_iocpf_sm_mismatch_entry(struct bfa_iocpf_s *iocpf)
+{
+ /*
+ * Call only the first time sm enters fwmismatch state.
+ */
+ if (iocpf->fw_mismatch_notified == BFA_FALSE)
+ bfa_ioc_pf_fwmismatch(iocpf->ioc);
+
+ iocpf->fw_mismatch_notified = BFA_TRUE;
+ bfa_iocpf_timer_start(iocpf->ioc);
+}
+
+/*
+ * Awaiting firmware version match.
+ */
+static void
+bfa_iocpf_sm_mismatch(struct bfa_iocpf_s *iocpf, enum iocpf_event event)
+{
+ struct bfa_ioc_s *ioc = iocpf->ioc;
+
+ bfa_trc(ioc, event);
+
+ switch (event) {
+ case IOCPF_E_TIMEOUT:
+ bfa_fsm_set_state(iocpf, bfa_iocpf_sm_fwcheck);
+ break;
+
+ case IOCPF_E_DISABLE:
+ bfa_iocpf_timer_stop(ioc);
+ bfa_fsm_set_state(iocpf, bfa_iocpf_sm_reset);
+ bfa_fsm_send_event(ioc, IOC_E_DISABLED);
+ break;
+
+ case IOCPF_E_STOP:
+ bfa_iocpf_timer_stop(ioc);
+ bfa_fsm_set_state(iocpf, bfa_iocpf_sm_reset);
+ break;
+
+ default:
+ bfa_sm_fault(ioc, event);
+ }
+}
+
+/*
+ * Request for semaphore.
+ */
+static void
+bfa_iocpf_sm_semwait_entry(struct bfa_iocpf_s *iocpf)
+{
+ bfa_ioc_hw_sem_get(iocpf->ioc);
+}
+
+/*
+ * Awaiting semaphore for h/w initialzation.
+ */
+static void
+bfa_iocpf_sm_semwait(struct bfa_iocpf_s *iocpf, enum iocpf_event event)
+{
+ struct bfa_ioc_s *ioc = iocpf->ioc;
+
+ bfa_trc(ioc, event);
+
+ switch (event) {
+ case IOCPF_E_SEMLOCKED:
+ if (bfa_ioc_sync_complete(ioc)) {
+ bfa_ioc_sync_join(ioc);
+ bfa_fsm_set_state(iocpf, bfa_iocpf_sm_hwinit);
+ } else {
+ writel(1, ioc->ioc_regs.ioc_sem_reg);
+ bfa_sem_timer_start(ioc);
+ }
+ break;
+
+ case IOCPF_E_SEM_ERROR:
+ bfa_fsm_set_state(iocpf, bfa_iocpf_sm_fail);
+ bfa_fsm_send_event(ioc, IOC_E_HWFAILED);
+ break;
+
+ case IOCPF_E_DISABLE:
+ bfa_sem_timer_stop(ioc);
+ bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabling_sync);
+ break;
+
+ default:
+ bfa_sm_fault(ioc, event);
+ }
+}
+
+static void
+bfa_iocpf_sm_hwinit_entry(struct bfa_iocpf_s *iocpf)
+{
+ iocpf->poll_time = 0;
+ bfa_ioc_hwinit(iocpf->ioc, BFA_FALSE);
+}
+
+/*
+ * Hardware is being initialized. Interrupts are enabled.
+ * Holding hardware semaphore lock.
+ */
+static void
+bfa_iocpf_sm_hwinit(struct bfa_iocpf_s *iocpf, enum iocpf_event event)
+{
+ struct bfa_ioc_s *ioc = iocpf->ioc;
+
+ bfa_trc(ioc, event);
+
+ switch (event) {
+ case IOCPF_E_FWREADY:
+ bfa_fsm_set_state(iocpf, bfa_iocpf_sm_enabling);
+ break;
+
+ case IOCPF_E_TIMEOUT:
+ writel(1, ioc->ioc_regs.ioc_sem_reg);
+ bfa_fsm_send_event(ioc, IOC_E_PFFAILED);
+ bfa_fsm_set_state(iocpf, bfa_iocpf_sm_initfail_sync);
+ break;
+
+ case IOCPF_E_DISABLE:
+ bfa_iocpf_timer_stop(ioc);
+ bfa_ioc_sync_leave(ioc);
+ writel(1, ioc->ioc_regs.ioc_sem_reg);
+ bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabled);
+ break;
+
+ default:
+ bfa_sm_fault(ioc, event);
+ }
+}
+
+static void
+bfa_iocpf_sm_enabling_entry(struct bfa_iocpf_s *iocpf)
+{
+ bfa_iocpf_timer_start(iocpf->ioc);
+ /*
+ * Enable Interrupts before sending fw IOC ENABLE cmd.
+ */
+ iocpf->ioc->cbfn->reset_cbfn(iocpf->ioc->bfa);
+ bfa_ioc_send_enable(iocpf->ioc);
+}
+
+/*
+ * Host IOC function is being enabled, awaiting response from firmware.
+ * Semaphore is acquired.
+ */
+static void
+bfa_iocpf_sm_enabling(struct bfa_iocpf_s *iocpf, enum iocpf_event event)
+{
+ struct bfa_ioc_s *ioc = iocpf->ioc;
+
+ bfa_trc(ioc, event);
+
+ switch (event) {
+ case IOCPF_E_FWRSP_ENABLE:
+ bfa_iocpf_timer_stop(ioc);
+ writel(1, ioc->ioc_regs.ioc_sem_reg);
+ bfa_fsm_set_state(iocpf, bfa_iocpf_sm_ready);
+ break;
+
+ case IOCPF_E_INITFAIL:
+ bfa_iocpf_timer_stop(ioc);
+ /*
+ * !!! fall through !!!
+ */
+
+ case IOCPF_E_TIMEOUT:
+ writel(1, ioc->ioc_regs.ioc_sem_reg);
+ if (event == IOCPF_E_TIMEOUT)
+ bfa_fsm_send_event(ioc, IOC_E_PFFAILED);
+ bfa_fsm_set_state(iocpf, bfa_iocpf_sm_initfail_sync);
+ break;
+
+ case IOCPF_E_DISABLE:
+ bfa_iocpf_timer_stop(ioc);
+ writel(1, ioc->ioc_regs.ioc_sem_reg);
+ bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabling);
+ break;
+
+ default:
+ bfa_sm_fault(ioc, event);
+ }
+}
+
+static void
+bfa_iocpf_sm_ready_entry(struct bfa_iocpf_s *iocpf)
+{
+ bfa_fsm_send_event(iocpf->ioc, IOC_E_ENABLED);
+}
+
+static void
+bfa_iocpf_sm_ready(struct bfa_iocpf_s *iocpf, enum iocpf_event event)
+{
+ struct bfa_ioc_s *ioc = iocpf->ioc;
+
+ bfa_trc(ioc, event);
+
+ switch (event) {
+ case IOCPF_E_DISABLE:
+ bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabling);
+ break;
+
+ case IOCPF_E_GETATTRFAIL:
+ bfa_fsm_set_state(iocpf, bfa_iocpf_sm_initfail_sync);
+ break;
+
+ case IOCPF_E_FAIL:
+ bfa_fsm_set_state(iocpf, bfa_iocpf_sm_fail_sync);
+ break;
+
+ default:
+ bfa_sm_fault(ioc, event);
+ }
+}
+
+static void
+bfa_iocpf_sm_disabling_entry(struct bfa_iocpf_s *iocpf)
+{
+ bfa_iocpf_timer_start(iocpf->ioc);
+ bfa_ioc_send_disable(iocpf->ioc);
+}
+
+/*
+ * IOC is being disabled
+ */
+static void
+bfa_iocpf_sm_disabling(struct bfa_iocpf_s *iocpf, enum iocpf_event event)
+{
+ struct bfa_ioc_s *ioc = iocpf->ioc;
+
+ bfa_trc(ioc, event);
+
+ switch (event) {
+ case IOCPF_E_FWRSP_DISABLE:
+ bfa_iocpf_timer_stop(ioc);
+ bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabling_sync);
+ break;
+
+ case IOCPF_E_FAIL:
+ bfa_iocpf_timer_stop(ioc);
+ /*
+ * !!! fall through !!!
+ */
+
+ case IOCPF_E_TIMEOUT:
+ bfa_ioc_set_cur_ioc_fwstate(ioc, BFI_IOC_FAIL);
+ bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabling_sync);
+ break;
+
+ case IOCPF_E_FWRSP_ENABLE:
+ break;
+
+ default:
+ bfa_sm_fault(ioc, event);
+ }
+}
+
+static void
+bfa_iocpf_sm_disabling_sync_entry(struct bfa_iocpf_s *iocpf)
+{
+ bfa_ioc_hw_sem_get(iocpf->ioc);
+}
+
+/*
+ * IOC hb ack request is being removed.
+ */
+static void
+bfa_iocpf_sm_disabling_sync(struct bfa_iocpf_s *iocpf, enum iocpf_event event)
+{
+ struct bfa_ioc_s *ioc = iocpf->ioc;
+
+ bfa_trc(ioc, event);
+
+ switch (event) {
+ case IOCPF_E_SEMLOCKED:
+ bfa_ioc_sync_leave(ioc);
+ writel(1, ioc->ioc_regs.ioc_sem_reg);
+ bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabled);
+ break;
+
+ case IOCPF_E_SEM_ERROR:
+ bfa_fsm_set_state(iocpf, bfa_iocpf_sm_fail);
+ bfa_fsm_send_event(ioc, IOC_E_HWFAILED);
+ break;
+
+ case IOCPF_E_FAIL:
+ break;
+
+ default:
+ bfa_sm_fault(ioc, event);
+ }
+}
+
+/*
+ * IOC disable completion entry.
+ */
+static void
+bfa_iocpf_sm_disabled_entry(struct bfa_iocpf_s *iocpf)
+{
+ bfa_ioc_mbox_flush(iocpf->ioc);
+ bfa_fsm_send_event(iocpf->ioc, IOC_E_DISABLED);
+}
+
+static void
+bfa_iocpf_sm_disabled(struct bfa_iocpf_s *iocpf, enum iocpf_event event)
+{
+ struct bfa_ioc_s *ioc = iocpf->ioc;
+
+ bfa_trc(ioc, event);
+
+ switch (event) {
+ case IOCPF_E_ENABLE:
+ bfa_fsm_set_state(iocpf, bfa_iocpf_sm_semwait);
+ break;
+
+ case IOCPF_E_STOP:
+ bfa_ioc_firmware_unlock(ioc);
+ bfa_fsm_set_state(iocpf, bfa_iocpf_sm_reset);
+ break;
+
+ default:
+ bfa_sm_fault(ioc, event);
+ }
+}
+
+static void
+bfa_iocpf_sm_initfail_sync_entry(struct bfa_iocpf_s *iocpf)
+{
+ bfa_ioc_debug_save_ftrc(iocpf->ioc);
+ bfa_ioc_hw_sem_get(iocpf->ioc);
+}
+
+/*
+ * Hardware initialization failed.
+ */
+static void
+bfa_iocpf_sm_initfail_sync(struct bfa_iocpf_s *iocpf, enum iocpf_event event)
+{
+ struct bfa_ioc_s *ioc = iocpf->ioc;
+
+ bfa_trc(ioc, event);
+
+ switch (event) {
+ case IOCPF_E_SEMLOCKED:
+ bfa_ioc_notify_fail(ioc);
+ bfa_ioc_sync_leave(ioc);
+ bfa_ioc_set_cur_ioc_fwstate(ioc, BFI_IOC_FAIL);
+ writel(1, ioc->ioc_regs.ioc_sem_reg);
+ bfa_fsm_set_state(iocpf, bfa_iocpf_sm_initfail);
+ break;
+
+ case IOCPF_E_SEM_ERROR:
+ bfa_fsm_set_state(iocpf, bfa_iocpf_sm_fail);
+ bfa_fsm_send_event(ioc, IOC_E_HWFAILED);
+ break;
+
+ case IOCPF_E_DISABLE:
+ bfa_sem_timer_stop(ioc);
+ bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabling_sync);
+ break;
+
+ case IOCPF_E_STOP:
+ bfa_sem_timer_stop(ioc);
+ bfa_ioc_firmware_unlock(ioc);
+ bfa_fsm_set_state(iocpf, bfa_iocpf_sm_reset);
+ break;
+
+ case IOCPF_E_FAIL:
+ break;
+
+ default:
+ bfa_sm_fault(ioc, event);
+ }
+}
+
+static void
+bfa_iocpf_sm_initfail_entry(struct bfa_iocpf_s *iocpf)
+{
+ bfa_trc(iocpf->ioc, 0);
+}
+
+/*
+ * Hardware initialization failed.
+ */
+static void
+bfa_iocpf_sm_initfail(struct bfa_iocpf_s *iocpf, enum iocpf_event event)
+{
+ struct bfa_ioc_s *ioc = iocpf->ioc;
+
+ bfa_trc(ioc, event);
+
+ switch (event) {
+ case IOCPF_E_DISABLE:
+ bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabled);
+ break;
+
+ case IOCPF_E_STOP:
+ bfa_ioc_firmware_unlock(ioc);
+ bfa_fsm_set_state(iocpf, bfa_iocpf_sm_reset);
+ break;
+
+ default:
+ bfa_sm_fault(ioc, event);
+ }
+}
+
+static void
+bfa_iocpf_sm_fail_sync_entry(struct bfa_iocpf_s *iocpf)
+{
+ /*
+ * Mark IOC as failed in hardware and stop firmware.
+ */
+ bfa_ioc_lpu_stop(iocpf->ioc);
+
+ /*
+ * Flush any queued up mailbox requests.
+ */
+ bfa_ioc_mbox_flush(iocpf->ioc);
+
+ bfa_ioc_hw_sem_get(iocpf->ioc);
+}
+
+static void
+bfa_iocpf_sm_fail_sync(struct bfa_iocpf_s *iocpf, enum iocpf_event event)
+{
+ struct bfa_ioc_s *ioc = iocpf->ioc;
+
+ bfa_trc(ioc, event);
+
+ switch (event) {
+ case IOCPF_E_SEMLOCKED:
+ bfa_ioc_sync_ack(ioc);
+ bfa_ioc_notify_fail(ioc);
+ if (!iocpf->auto_recover) {
+ bfa_ioc_sync_leave(ioc);
+ bfa_ioc_set_cur_ioc_fwstate(ioc, BFI_IOC_FAIL);
+ writel(1, ioc->ioc_regs.ioc_sem_reg);
+ bfa_fsm_set_state(iocpf, bfa_iocpf_sm_fail);
+ } else {
+ if (bfa_ioc_sync_complete(ioc))
+ bfa_fsm_set_state(iocpf, bfa_iocpf_sm_hwinit);
+ else {
+ writel(1, ioc->ioc_regs.ioc_sem_reg);
+ bfa_fsm_set_state(iocpf, bfa_iocpf_sm_semwait);
+ }
+ }
+ break;
+
+ case IOCPF_E_SEM_ERROR:
+ bfa_fsm_set_state(iocpf, bfa_iocpf_sm_fail);
+ bfa_fsm_send_event(ioc, IOC_E_HWFAILED);
+ break;
+
+ case IOCPF_E_DISABLE:
+ bfa_sem_timer_stop(ioc);
+ bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabling_sync);
+ break;
+
+ case IOCPF_E_FAIL:
+ break;
+
+ default:
+ bfa_sm_fault(ioc, event);
+ }
+}
+
+static void
+bfa_iocpf_sm_fail_entry(struct bfa_iocpf_s *iocpf)
+{
+ bfa_trc(iocpf->ioc, 0);
+}
+
+/*
+ * IOC is in failed state.
+ */
+static void
+bfa_iocpf_sm_fail(struct bfa_iocpf_s *iocpf, enum iocpf_event event)
+{
+ struct bfa_ioc_s *ioc = iocpf->ioc;
+
+ bfa_trc(ioc, event);
+
+ switch (event) {
+ case IOCPF_E_DISABLE:
+ bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabled);
+ break;
+
+ default:
+ bfa_sm_fault(ioc, event);
+ }
+}
+
+/*
+ * BFA IOC private functions
+ */
+
+/*
+ * Notify common modules registered for notification.
+ */
+static void
+bfa_ioc_event_notify(struct bfa_ioc_s *ioc, enum bfa_ioc_event_e event)
+{
+ struct bfa_ioc_notify_s *notify;
+ struct list_head *qe;
+
+ list_for_each(qe, &ioc->notify_q) {
+ notify = (struct bfa_ioc_notify_s *)qe;
+ notify->cbfn(notify->cbarg, event);
+ }
+}
+
+static void
+bfa_ioc_disable_comp(struct bfa_ioc_s *ioc)
+{
+ ioc->cbfn->disable_cbfn(ioc->bfa);
+ bfa_ioc_event_notify(ioc, BFA_IOC_E_DISABLED);
+}
+
+bfa_boolean_t
+bfa_ioc_sem_get(void __iomem *sem_reg)
+{
+ u32 r32;
+ int cnt = 0;
+#define BFA_SEM_SPINCNT 3000
+
+ r32 = readl(sem_reg);
+
+ while ((r32 & 1) && (cnt < BFA_SEM_SPINCNT)) {
+ cnt++;
+ udelay(2);
+ r32 = readl(sem_reg);
+ }
+
+ if (!(r32 & 1))
+ return BFA_TRUE;
+
+ return BFA_FALSE;
+}
+
+static void
+bfa_ioc_hw_sem_get(struct bfa_ioc_s *ioc)
+{
+ u32 r32;
+
+ /*
+ * First read to the semaphore register will return 0, subsequent reads
+ * will return 1. Semaphore is released by writing 1 to the register
+ */
+ r32 = readl(ioc->ioc_regs.ioc_sem_reg);
+ if (r32 == ~0) {
+ WARN_ON(r32 == ~0);
+ bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_SEM_ERROR);
+ return;
+ }
+ if (!(r32 & 1)) {
+ bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_SEMLOCKED);
+ return;
+ }
+
+ bfa_sem_timer_start(ioc);
+}
+
+/*
+ * Initialize LPU local memory (aka secondary memory / SRAM)
+ */
+static void
+bfa_ioc_lmem_init(struct bfa_ioc_s *ioc)
+{
+ u32 pss_ctl;
+ int i;
+#define PSS_LMEM_INIT_TIME 10000
+
+ pss_ctl = readl(ioc->ioc_regs.pss_ctl_reg);
+ pss_ctl &= ~__PSS_LMEM_RESET;
+ pss_ctl |= __PSS_LMEM_INIT_EN;
+
+ /*
+ * i2c workaround 12.5khz clock
+ */
+ pss_ctl |= __PSS_I2C_CLK_DIV(3UL);
+ writel(pss_ctl, ioc->ioc_regs.pss_ctl_reg);
+
+ /*
+ * wait for memory initialization to be complete
+ */
+ i = 0;
+ do {
+ pss_ctl = readl(ioc->ioc_regs.pss_ctl_reg);
+ i++;
+ } while (!(pss_ctl & __PSS_LMEM_INIT_DONE) && (i < PSS_LMEM_INIT_TIME));
+
+ /*
+ * If memory initialization is not successful, IOC timeout will catch
+ * such failures.
+ */
+ WARN_ON(!(pss_ctl & __PSS_LMEM_INIT_DONE));
+ bfa_trc(ioc, pss_ctl);
+
+ pss_ctl &= ~(__PSS_LMEM_INIT_DONE | __PSS_LMEM_INIT_EN);
+ writel(pss_ctl, ioc->ioc_regs.pss_ctl_reg);
+}
+
+static void
+bfa_ioc_lpu_start(struct bfa_ioc_s *ioc)
+{
+ u32 pss_ctl;
+
+ /*
+ * Take processor out of reset.
+ */
+ pss_ctl = readl(ioc->ioc_regs.pss_ctl_reg);
+ pss_ctl &= ~__PSS_LPU0_RESET;
+
+ writel(pss_ctl, ioc->ioc_regs.pss_ctl_reg);
+}
+
+static void
+bfa_ioc_lpu_stop(struct bfa_ioc_s *ioc)
+{
+ u32 pss_ctl;
+
+ /*
+ * Put processors in reset.
+ */
+ pss_ctl = readl(ioc->ioc_regs.pss_ctl_reg);
+ pss_ctl |= (__PSS_LPU0_RESET | __PSS_LPU1_RESET);
+
+ writel(pss_ctl, ioc->ioc_regs.pss_ctl_reg);
+}
+
+/*
+ * Get driver and firmware versions.
+ */
+void
+bfa_ioc_fwver_get(struct bfa_ioc_s *ioc, struct bfi_ioc_image_hdr_s *fwhdr)
+{
+ u32 pgnum, pgoff;
+ u32 loff = 0;
+ int i;
+ u32 *fwsig = (u32 *) fwhdr;
+
+ pgnum = PSS_SMEM_PGNUM(ioc->ioc_regs.smem_pg0, loff);
+ pgoff = PSS_SMEM_PGOFF(loff);
+ writel(pgnum, ioc->ioc_regs.host_page_num_fn);
+
+ for (i = 0; i < (sizeof(struct bfi_ioc_image_hdr_s) / sizeof(u32));
+ i++) {
+ fwsig[i] =
+ bfa_mem_read(ioc->ioc_regs.smem_page_start, loff);
+ loff += sizeof(u32);
+ }
+}
+
+/*
+ * Returns TRUE if driver is willing to work with current smem f/w version.
+ */
+bfa_boolean_t
+bfa_ioc_fwver_cmp(struct bfa_ioc_s *ioc,
+ struct bfi_ioc_image_hdr_s *smem_fwhdr)
+{
+ struct bfi_ioc_image_hdr_s *drv_fwhdr;
+ enum bfi_ioc_img_ver_cmp_e smem_flash_cmp, drv_smem_cmp;
+
+ drv_fwhdr = (struct bfi_ioc_image_hdr_s *)
+ bfa_cb_image_get_chunk(bfa_ioc_asic_gen(ioc), 0);
+
+ /*
+ * If smem is incompatible or old, driver should not work with it.
+ */
+ drv_smem_cmp = bfa_ioc_fw_ver_patch_cmp(drv_fwhdr, smem_fwhdr);
+ if (drv_smem_cmp == BFI_IOC_IMG_VER_INCOMP ||
+ drv_smem_cmp == BFI_IOC_IMG_VER_OLD) {
+ return BFA_FALSE;
+ }
+
+ /*
+ * IF Flash has a better F/W than smem do not work with smem.
+ * If smem f/w == flash f/w, as smem f/w not old | incmp, work with it.
+ * If Flash is old or incomp work with smem iff smem f/w == drv f/w.
+ */
+ smem_flash_cmp = bfa_ioc_flash_fwver_cmp(ioc, smem_fwhdr);
+
+ if (smem_flash_cmp == BFI_IOC_IMG_VER_BETTER) {
+ return BFA_FALSE;
+ } else if (smem_flash_cmp == BFI_IOC_IMG_VER_SAME) {
+ return BFA_TRUE;
+ } else {
+ return (drv_smem_cmp == BFI_IOC_IMG_VER_SAME) ?
+ BFA_TRUE : BFA_FALSE;
+ }
+}
+
+/*
+ * Return true if current running version is valid. Firmware signature and
+ * execution context (driver/bios) must match.
+ */
+static bfa_boolean_t
+bfa_ioc_fwver_valid(struct bfa_ioc_s *ioc, u32 boot_env)
+{
+ struct bfi_ioc_image_hdr_s fwhdr;
+
+ bfa_ioc_fwver_get(ioc, &fwhdr);
+
+ if (swab32(fwhdr.bootenv) != boot_env) {
+ bfa_trc(ioc, fwhdr.bootenv);
+ bfa_trc(ioc, boot_env);
+ return BFA_FALSE;
+ }
+
+ return bfa_ioc_fwver_cmp(ioc, &fwhdr);
+}
+
+static bfa_boolean_t
+bfa_ioc_fwver_md5_check(struct bfi_ioc_image_hdr_s *fwhdr_1,
+ struct bfi_ioc_image_hdr_s *fwhdr_2)
+{
+ int i;
+
+ for (i = 0; i < BFI_IOC_MD5SUM_SZ; i++)
+ if (fwhdr_1->md5sum[i] != fwhdr_2->md5sum[i])
+ return BFA_FALSE;
+
+ return BFA_TRUE;
+}
+
+/*
+ * Returns TRUE if major minor and maintainence are same.
+ * If patch versions are same, check for MD5 Checksum to be same.
+ */
+static bfa_boolean_t
+bfa_ioc_fw_ver_compatible(struct bfi_ioc_image_hdr_s *drv_fwhdr,
+ struct bfi_ioc_image_hdr_s *fwhdr_to_cmp)
+{
+ if (drv_fwhdr->signature != fwhdr_to_cmp->signature)
+ return BFA_FALSE;
+
+ if (drv_fwhdr->fwver.major != fwhdr_to_cmp->fwver.major)
+ return BFA_FALSE;
+
+ if (drv_fwhdr->fwver.minor != fwhdr_to_cmp->fwver.minor)
+ return BFA_FALSE;
+
+ if (drv_fwhdr->fwver.maint != fwhdr_to_cmp->fwver.maint)
+ return BFA_FALSE;
+
+ if (drv_fwhdr->fwver.patch == fwhdr_to_cmp->fwver.patch &&
+ drv_fwhdr->fwver.phase == fwhdr_to_cmp->fwver.phase &&
+ drv_fwhdr->fwver.build == fwhdr_to_cmp->fwver.build) {
+ return bfa_ioc_fwver_md5_check(drv_fwhdr, fwhdr_to_cmp);
+ }
+
+ return BFA_TRUE;
+}
+
+static bfa_boolean_t
+bfa_ioc_flash_fwver_valid(struct bfi_ioc_image_hdr_s *flash_fwhdr)
+{
+ if (flash_fwhdr->fwver.major == 0 || flash_fwhdr->fwver.major == 0xFF)
+ return BFA_FALSE;
+
+ return BFA_TRUE;
+}
+
+static bfa_boolean_t fwhdr_is_ga(struct bfi_ioc_image_hdr_s *fwhdr)
+{
+ if (fwhdr->fwver.phase == 0 &&
+ fwhdr->fwver.build == 0)
+ return BFA_TRUE;
+
+ return BFA_FALSE;
+}
+
+/*
+ * Returns TRUE if both are compatible and patch of fwhdr_to_cmp is better.
+ */
+static enum bfi_ioc_img_ver_cmp_e
+bfa_ioc_fw_ver_patch_cmp(struct bfi_ioc_image_hdr_s *base_fwhdr,
+ struct bfi_ioc_image_hdr_s *fwhdr_to_cmp)
+{
+ if (bfa_ioc_fw_ver_compatible(base_fwhdr, fwhdr_to_cmp) == BFA_FALSE)
+ return BFI_IOC_IMG_VER_INCOMP;
+
+ if (fwhdr_to_cmp->fwver.patch > base_fwhdr->fwver.patch)
+ return BFI_IOC_IMG_VER_BETTER;
+
+ else if (fwhdr_to_cmp->fwver.patch < base_fwhdr->fwver.patch)
+ return BFI_IOC_IMG_VER_OLD;
+
+ /*
+ * GA takes priority over internal builds of the same patch stream.
+ * At this point major minor maint and patch numbers are same.
+ */
+
+ if (fwhdr_is_ga(base_fwhdr) == BFA_TRUE) {
+ if (fwhdr_is_ga(fwhdr_to_cmp))
+ return BFI_IOC_IMG_VER_SAME;
+ else
+ return BFI_IOC_IMG_VER_OLD;
+ } else {
+ if (fwhdr_is_ga(fwhdr_to_cmp))
+ return BFI_IOC_IMG_VER_BETTER;
+ }
+
+ if (fwhdr_to_cmp->fwver.phase > base_fwhdr->fwver.phase)
+ return BFI_IOC_IMG_VER_BETTER;
+ else if (fwhdr_to_cmp->fwver.phase < base_fwhdr->fwver.phase)
+ return BFI_IOC_IMG_VER_OLD;
+
+ if (fwhdr_to_cmp->fwver.build > base_fwhdr->fwver.build)
+ return BFI_IOC_IMG_VER_BETTER;
+ else if (fwhdr_to_cmp->fwver.build < base_fwhdr->fwver.build)
+ return BFI_IOC_IMG_VER_OLD;
+
+ /*
+ * All Version Numbers are equal.
+ * Md5 check to be done as a part of compatibility check.
+ */
+ return BFI_IOC_IMG_VER_SAME;
+}
+
+#define BFA_FLASH_PART_FWIMG_ADDR 0x100000 /* fw image address */
+
+bfa_status_t
+bfa_ioc_flash_img_get_chnk(struct bfa_ioc_s *ioc, u32 off,
+ u32 *fwimg)
+{
+ return bfa_flash_raw_read(ioc->pcidev.pci_bar_kva,
+ BFA_FLASH_PART_FWIMG_ADDR + (off * sizeof(u32)),
+ (char *)fwimg, BFI_FLASH_CHUNK_SZ);
+}
+
+static enum bfi_ioc_img_ver_cmp_e
+bfa_ioc_flash_fwver_cmp(struct bfa_ioc_s *ioc,
+ struct bfi_ioc_image_hdr_s *base_fwhdr)
+{
+ struct bfi_ioc_image_hdr_s *flash_fwhdr;
+ bfa_status_t status;
+ u32 fwimg[BFI_FLASH_CHUNK_SZ_WORDS];
+
+ status = bfa_ioc_flash_img_get_chnk(ioc, 0, fwimg);
+ if (status != BFA_STATUS_OK)
+ return BFI_IOC_IMG_VER_INCOMP;
+
+ flash_fwhdr = (struct bfi_ioc_image_hdr_s *) fwimg;
+ if (bfa_ioc_flash_fwver_valid(flash_fwhdr) == BFA_TRUE)
+ return bfa_ioc_fw_ver_patch_cmp(base_fwhdr, flash_fwhdr);
+ else
+ return BFI_IOC_IMG_VER_INCOMP;
+}
+
+
+/*
+ * Invalidate fwver signature
+ */
+bfa_status_t
+bfa_ioc_fwsig_invalidate(struct bfa_ioc_s *ioc)
+{
+
+ u32 pgnum, pgoff;
+ u32 loff = 0;
+ enum bfi_ioc_state ioc_fwstate;
+
+ ioc_fwstate = bfa_ioc_get_cur_ioc_fwstate(ioc);
+ if (!bfa_ioc_state_disabled(ioc_fwstate))
+ return BFA_STATUS_ADAPTER_ENABLED;
+
+ pgnum = PSS_SMEM_PGNUM(ioc->ioc_regs.smem_pg0, loff);
+ pgoff = PSS_SMEM_PGOFF(loff);
+ writel(pgnum, ioc->ioc_regs.host_page_num_fn);
+ bfa_mem_write(ioc->ioc_regs.smem_page_start, loff, BFA_IOC_FW_INV_SIGN);
+
+ return BFA_STATUS_OK;
+}
+
+/*
+ * Conditionally flush any pending message from firmware at start.
+ */
+static void
+bfa_ioc_msgflush(struct bfa_ioc_s *ioc)
+{
+ u32 r32;
+
+ r32 = readl(ioc->ioc_regs.lpu_mbox_cmd);
+ if (r32)
+ writel(1, ioc->ioc_regs.lpu_mbox_cmd);
+}
+
+static void
+bfa_ioc_hwinit(struct bfa_ioc_s *ioc, bfa_boolean_t force)
+{
+ enum bfi_ioc_state ioc_fwstate;
+ bfa_boolean_t fwvalid;
+ u32 boot_type;
+ u32 boot_env;
+
+ ioc_fwstate = bfa_ioc_get_cur_ioc_fwstate(ioc);
+
+ if (force)
+ ioc_fwstate = BFI_IOC_UNINIT;
+
+ bfa_trc(ioc, ioc_fwstate);
+
+ boot_type = BFI_FWBOOT_TYPE_NORMAL;
+ boot_env = BFI_FWBOOT_ENV_OS;
+
+ /*
+ * check if firmware is valid
+ */
+ fwvalid = (ioc_fwstate == BFI_IOC_UNINIT) ?
+ BFA_FALSE : bfa_ioc_fwver_valid(ioc, boot_env);
+
+ if (!fwvalid) {
+ if (bfa_ioc_boot(ioc, boot_type, boot_env) == BFA_STATUS_OK)
+ bfa_ioc_poll_fwinit(ioc);
+ return;
+ }
+
+ /*
+ * If hardware initialization is in progress (initialized by other IOC),
+ * just wait for an initialization completion interrupt.
+ */
+ if (ioc_fwstate == BFI_IOC_INITING) {
+ bfa_ioc_poll_fwinit(ioc);
+ return;
+ }
+
+ /*
+ * If IOC function is disabled and firmware version is same,
+ * just re-enable IOC.
+ *
+ * If option rom, IOC must not be in operational state. With
+ * convergence, IOC will be in operational state when 2nd driver
+ * is loaded.
+ */
+ if (ioc_fwstate == BFI_IOC_DISABLED || ioc_fwstate == BFI_IOC_OP) {
+
+ /*
+ * When using MSI-X any pending firmware ready event should
+ * be flushed. Otherwise MSI-X interrupts are not delivered.
+ */
+ bfa_ioc_msgflush(ioc);
+ bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_FWREADY);
+ return;
+ }
+
+ /*
+ * Initialize the h/w for any other states.
+ */
+ if (bfa_ioc_boot(ioc, boot_type, boot_env) == BFA_STATUS_OK)
+ bfa_ioc_poll_fwinit(ioc);
+}
+
+static void
+bfa_ioc_timeout(void *ioc_arg)
+{
+ struct bfa_ioc_s *ioc = (struct bfa_ioc_s *) ioc_arg;
+
+ bfa_trc(ioc, 0);
+ bfa_fsm_send_event(ioc, IOC_E_TIMEOUT);
+}
+
+void
+bfa_ioc_mbox_send(struct bfa_ioc_s *ioc, void *ioc_msg, int len)
+{
+ u32 *msgp = (u32 *) ioc_msg;
+ u32 i;
+
+ bfa_trc(ioc, msgp[0]);
+ bfa_trc(ioc, len);
+
+ WARN_ON(len > BFI_IOC_MSGLEN_MAX);
+
+ /*
+ * first write msg to mailbox registers
+ */
+ for (i = 0; i < len / sizeof(u32); i++)
+ writel(cpu_to_le32(msgp[i]),
+ ioc->ioc_regs.hfn_mbox + i * sizeof(u32));
+
+ for (; i < BFI_IOC_MSGLEN_MAX / sizeof(u32); i++)
+ writel(0, ioc->ioc_regs.hfn_mbox + i * sizeof(u32));
+
+ /*
+ * write 1 to mailbox CMD to trigger LPU event
+ */
+ writel(1, ioc->ioc_regs.hfn_mbox_cmd);
+ (void) readl(ioc->ioc_regs.hfn_mbox_cmd);
+}
+
+static void
+bfa_ioc_send_enable(struct bfa_ioc_s *ioc)
+{
+ struct bfi_ioc_ctrl_req_s enable_req;
+ struct timeval tv;
+
+ bfi_h2i_set(enable_req.mh, BFI_MC_IOC, BFI_IOC_H2I_ENABLE_REQ,
+ bfa_ioc_portid(ioc));
+ enable_req.clscode = cpu_to_be16(ioc->clscode);
+ do_gettimeofday(&tv);
+ enable_req.tv_sec = be32_to_cpu(tv.tv_sec);
+ bfa_ioc_mbox_send(ioc, &enable_req, sizeof(struct bfi_ioc_ctrl_req_s));
+}
+
+static void
+bfa_ioc_send_disable(struct bfa_ioc_s *ioc)
+{
+ struct bfi_ioc_ctrl_req_s disable_req;
+
+ bfi_h2i_set(disable_req.mh, BFI_MC_IOC, BFI_IOC_H2I_DISABLE_REQ,
+ bfa_ioc_portid(ioc));
+ bfa_ioc_mbox_send(ioc, &disable_req, sizeof(struct bfi_ioc_ctrl_req_s));
+}
+
+static void
+bfa_ioc_send_getattr(struct bfa_ioc_s *ioc)
+{
+ struct bfi_ioc_getattr_req_s attr_req;
+
+ bfi_h2i_set(attr_req.mh, BFI_MC_IOC, BFI_IOC_H2I_GETATTR_REQ,
+ bfa_ioc_portid(ioc));
+ bfa_dma_be_addr_set(attr_req.attr_addr, ioc->attr_dma.pa);
+ bfa_ioc_mbox_send(ioc, &attr_req, sizeof(attr_req));
+}
+
+static void
+bfa_ioc_hb_check(void *cbarg)
+{
+ struct bfa_ioc_s *ioc = cbarg;
+ u32 hb_count;
+
+ hb_count = readl(ioc->ioc_regs.heartbeat);
+ if (ioc->hb_count == hb_count) {
+ bfa_ioc_recover(ioc);
+ return;
+ } else {
+ ioc->hb_count = hb_count;
+ }
+
+ bfa_ioc_mbox_poll(ioc);
+ bfa_hb_timer_start(ioc);
+}
+
+static void
+bfa_ioc_hb_monitor(struct bfa_ioc_s *ioc)
+{
+ ioc->hb_count = readl(ioc->ioc_regs.heartbeat);
+ bfa_hb_timer_start(ioc);
+}
+
+/*
+ * Initiate a full firmware download.
+ */
+static bfa_status_t
+bfa_ioc_download_fw(struct bfa_ioc_s *ioc, u32 boot_type,
+ u32 boot_env)
+{
+ u32 *fwimg;
+ u32 pgnum, pgoff;
+ u32 loff = 0;
+ u32 chunkno = 0;
+ u32 i;
+ u32 asicmode;
+ u32 fwimg_size;
+ u32 fwimg_buf[BFI_FLASH_CHUNK_SZ_WORDS];
+ bfa_status_t status;
+
+ if (boot_env == BFI_FWBOOT_ENV_OS &&
+ boot_type == BFI_FWBOOT_TYPE_FLASH) {
+ fwimg_size = BFI_FLASH_IMAGE_SZ/sizeof(u32);
+
+ status = bfa_ioc_flash_img_get_chnk(ioc,
+ BFA_IOC_FLASH_CHUNK_ADDR(chunkno), fwimg_buf);
+ if (status != BFA_STATUS_OK)
+ return status;
+
+ fwimg = fwimg_buf;
+ } else {
+ fwimg_size = bfa_cb_image_get_size(bfa_ioc_asic_gen(ioc));
+ fwimg = bfa_cb_image_get_chunk(bfa_ioc_asic_gen(ioc),
+ BFA_IOC_FLASH_CHUNK_ADDR(chunkno));
+ }
+
+ bfa_trc(ioc, fwimg_size);
+
+
+ pgnum = PSS_SMEM_PGNUM(ioc->ioc_regs.smem_pg0, loff);
+ pgoff = PSS_SMEM_PGOFF(loff);
+
+ writel(pgnum, ioc->ioc_regs.host_page_num_fn);
+
+ for (i = 0; i < fwimg_size; i++) {
+
+ if (BFA_IOC_FLASH_CHUNK_NO(i) != chunkno) {
+ chunkno = BFA_IOC_FLASH_CHUNK_NO(i);
+
+ if (boot_env == BFI_FWBOOT_ENV_OS &&
+ boot_type == BFI_FWBOOT_TYPE_FLASH) {
+ status = bfa_ioc_flash_img_get_chnk(ioc,
+ BFA_IOC_FLASH_CHUNK_ADDR(chunkno),
+ fwimg_buf);
+ if (status != BFA_STATUS_OK)
+ return status;
+
+ fwimg = fwimg_buf;
+ } else {
+ fwimg = bfa_cb_image_get_chunk(
+ bfa_ioc_asic_gen(ioc),
+ BFA_IOC_FLASH_CHUNK_ADDR(chunkno));
+ }
+ }
+
+ /*
+ * write smem
+ */
+ bfa_mem_write(ioc->ioc_regs.smem_page_start, loff,
+ fwimg[BFA_IOC_FLASH_OFFSET_IN_CHUNK(i)]);
+
+ loff += sizeof(u32);
+
+ /*
+ * handle page offset wrap around
+ */
+ loff = PSS_SMEM_PGOFF(loff);
+ if (loff == 0) {
+ pgnum++;
+ writel(pgnum, ioc->ioc_regs.host_page_num_fn);
+ }
+ }
+
+ writel(PSS_SMEM_PGNUM(ioc->ioc_regs.smem_pg0, 0),
+ ioc->ioc_regs.host_page_num_fn);
+
+ /*
+ * Set boot type, env and device mode at the end.
+ */
+ if (boot_env == BFI_FWBOOT_ENV_OS &&
+ boot_type == BFI_FWBOOT_TYPE_FLASH) {
+ boot_type = BFI_FWBOOT_TYPE_NORMAL;
+ }
+ asicmode = BFI_FWBOOT_DEVMODE(ioc->asic_gen, ioc->asic_mode,
+ ioc->port0_mode, ioc->port1_mode);
+ bfa_mem_write(ioc->ioc_regs.smem_page_start, BFI_FWBOOT_DEVMODE_OFF,
+ swab32(asicmode));
+ bfa_mem_write(ioc->ioc_regs.smem_page_start, BFI_FWBOOT_TYPE_OFF,
+ swab32(boot_type));
+ bfa_mem_write(ioc->ioc_regs.smem_page_start, BFI_FWBOOT_ENV_OFF,
+ swab32(boot_env));
+ return BFA_STATUS_OK;
+}
+
+
+/*
+ * Update BFA configuration from firmware configuration.
+ */
+static void
+bfa_ioc_getattr_reply(struct bfa_ioc_s *ioc)
+{
+ struct bfi_ioc_attr_s *attr = ioc->attr;
+
+ attr->adapter_prop = be32_to_cpu(attr->adapter_prop);
+ attr->card_type = be32_to_cpu(attr->card_type);
+ attr->maxfrsize = be16_to_cpu(attr->maxfrsize);
+ ioc->fcmode = (attr->port_mode == BFI_PORT_MODE_FC);
+ attr->mfg_year = be16_to_cpu(attr->mfg_year);
+
+ bfa_fsm_send_event(ioc, IOC_E_FWRSP_GETATTR);
+}
+
+/*
+ * Attach time initialization of mbox logic.
+ */
+static void
+bfa_ioc_mbox_attach(struct bfa_ioc_s *ioc)
+{
+ struct bfa_ioc_mbox_mod_s *mod = &ioc->mbox_mod;
+ int mc;
+
+ INIT_LIST_HEAD(&mod->cmd_q);
+ for (mc = 0; mc < BFI_MC_MAX; mc++) {
+ mod->mbhdlr[mc].cbfn = NULL;
+ mod->mbhdlr[mc].cbarg = ioc->bfa;
+ }
+}
+
+/*
+ * Mbox poll timer -- restarts any pending mailbox requests.
+ */
+static void
+bfa_ioc_mbox_poll(struct bfa_ioc_s *ioc)
+{
+ struct bfa_ioc_mbox_mod_s *mod = &ioc->mbox_mod;
+ struct bfa_mbox_cmd_s *cmd;
+ u32 stat;
+
+ /*
+ * If no command pending, do nothing
+ */
+ if (list_empty(&mod->cmd_q))
+ return;
+
+ /*
+ * If previous command is not yet fetched by firmware, do nothing
+ */
+ stat = readl(ioc->ioc_regs.hfn_mbox_cmd);
+ if (stat)
+ return;
+
+ /*
+ * Enqueue command to firmware.
+ */
+ bfa_q_deq(&mod->cmd_q, &cmd);
+ bfa_ioc_mbox_send(ioc, cmd->msg, sizeof(cmd->msg));
+}
+
+/*
+ * Cleanup any pending requests.
+ */
+static void
+bfa_ioc_mbox_flush(struct bfa_ioc_s *ioc)
+{
+ struct bfa_ioc_mbox_mod_s *mod = &ioc->mbox_mod;
+ struct bfa_mbox_cmd_s *cmd;
+
+ while (!list_empty(&mod->cmd_q))
+ bfa_q_deq(&mod->cmd_q, &cmd);
+}
+
+/*
+ * Read data from SMEM to host through PCI memmap
+ *
+ * @param[in] ioc memory for IOC
+ * @param[in] tbuf app memory to store data from smem
+ * @param[in] soff smem offset
+ * @param[in] sz size of smem in bytes
+ */
+static bfa_status_t
+bfa_ioc_smem_read(struct bfa_ioc_s *ioc, void *tbuf, u32 soff, u32 sz)
+{
+ u32 pgnum, loff;
+ __be32 r32;
+ int i, len;
+ u32 *buf = tbuf;
+
+ pgnum = PSS_SMEM_PGNUM(ioc->ioc_regs.smem_pg0, soff);
+ loff = PSS_SMEM_PGOFF(soff);
+ bfa_trc(ioc, pgnum);
+ bfa_trc(ioc, loff);
+ bfa_trc(ioc, sz);
+
+ /*
+ * Hold semaphore to serialize pll init and fwtrc.
+ */
+ if (BFA_FALSE == bfa_ioc_sem_get(ioc->ioc_regs.ioc_init_sem_reg)) {
+ bfa_trc(ioc, 0);
+ return BFA_STATUS_FAILED;
+ }
+
+ writel(pgnum, ioc->ioc_regs.host_page_num_fn);
+
+ len = sz/sizeof(u32);
+ bfa_trc(ioc, len);
+ for (i = 0; i < len; i++) {
+ r32 = bfa_mem_read(ioc->ioc_regs.smem_page_start, loff);
+ buf[i] = swab32(r32);
+ loff += sizeof(u32);
+
+ /*
+ * handle page offset wrap around
+ */
+ loff = PSS_SMEM_PGOFF(loff);
+ if (loff == 0) {
+ pgnum++;
+ writel(pgnum, ioc->ioc_regs.host_page_num_fn);
+ }
+ }
+ writel(PSS_SMEM_PGNUM(ioc->ioc_regs.smem_pg0, 0),
+ ioc->ioc_regs.host_page_num_fn);
+ /*
+ * release semaphore.
+ */
+ readl(ioc->ioc_regs.ioc_init_sem_reg);
+ writel(1, ioc->ioc_regs.ioc_init_sem_reg);
+
+ bfa_trc(ioc, pgnum);
+ return BFA_STATUS_OK;
+}
+
+/*
+ * Clear SMEM data from host through PCI memmap
+ *
+ * @param[in] ioc memory for IOC
+ * @param[in] soff smem offset
+ * @param[in] sz size of smem in bytes
+ */
+static bfa_status_t
+bfa_ioc_smem_clr(struct bfa_ioc_s *ioc, u32 soff, u32 sz)
+{
+ int i, len;
+ u32 pgnum, loff;
+
+ pgnum = PSS_SMEM_PGNUM(ioc->ioc_regs.smem_pg0, soff);
+ loff = PSS_SMEM_PGOFF(soff);
+ bfa_trc(ioc, pgnum);
+ bfa_trc(ioc, loff);
+ bfa_trc(ioc, sz);
+
+ /*
+ * Hold semaphore to serialize pll init and fwtrc.
+ */
+ if (BFA_FALSE == bfa_ioc_sem_get(ioc->ioc_regs.ioc_init_sem_reg)) {
+ bfa_trc(ioc, 0);
+ return BFA_STATUS_FAILED;
+ }
+
+ writel(pgnum, ioc->ioc_regs.host_page_num_fn);
+
+ len = sz/sizeof(u32); /* len in words */
+ bfa_trc(ioc, len);
+ for (i = 0; i < len; i++) {
+ bfa_mem_write(ioc->ioc_regs.smem_page_start, loff, 0);
+ loff += sizeof(u32);
+
+ /*
+ * handle page offset wrap around
+ */
+ loff = PSS_SMEM_PGOFF(loff);
+ if (loff == 0) {
+ pgnum++;
+ writel(pgnum, ioc->ioc_regs.host_page_num_fn);
+ }
+ }
+ writel(PSS_SMEM_PGNUM(ioc->ioc_regs.smem_pg0, 0),
+ ioc->ioc_regs.host_page_num_fn);
+
+ /*
+ * release semaphore.
+ */
+ readl(ioc->ioc_regs.ioc_init_sem_reg);
+ writel(1, ioc->ioc_regs.ioc_init_sem_reg);
+ bfa_trc(ioc, pgnum);
+ return BFA_STATUS_OK;
+}
+
+static void
+bfa_ioc_fail_notify(struct bfa_ioc_s *ioc)
+{
+ struct bfad_s *bfad = (struct bfad_s *)ioc->bfa->bfad;
+
+ /*
+ * Notify driver and common modules registered for notification.
+ */
+ ioc->cbfn->hbfail_cbfn(ioc->bfa);
+ bfa_ioc_event_notify(ioc, BFA_IOC_E_FAILED);
+
+ bfa_ioc_debug_save_ftrc(ioc);
+
+ BFA_LOG(KERN_CRIT, bfad, bfa_log_level,
+ "Heart Beat of IOC has failed\n");
+ bfa_ioc_aen_post(ioc, BFA_IOC_AEN_HBFAIL);
+
+}
+
+static void
+bfa_ioc_pf_fwmismatch(struct bfa_ioc_s *ioc)
+{
+ struct bfad_s *bfad = (struct bfad_s *)ioc->bfa->bfad;
+ /*
+ * Provide enable completion callback.
+ */
+ ioc->cbfn->enable_cbfn(ioc->bfa, BFA_STATUS_IOC_FAILURE);
+ BFA_LOG(KERN_WARNING, bfad, bfa_log_level,
+ "Running firmware version is incompatible "
+ "with the driver version\n");
+ bfa_ioc_aen_post(ioc, BFA_IOC_AEN_FWMISMATCH);
+}
+
+bfa_status_t
+bfa_ioc_pll_init(struct bfa_ioc_s *ioc)
+{
+
+ /*
+ * Hold semaphore so that nobody can access the chip during init.
+ */
+ bfa_ioc_sem_get(ioc->ioc_regs.ioc_init_sem_reg);
+
+ bfa_ioc_pll_init_asic(ioc);
+
+ ioc->pllinit = BFA_TRUE;
+
+ /*
+ * Initialize LMEM
+ */
+ bfa_ioc_lmem_init(ioc);
+
+ /*
+ * release semaphore.
+ */
+ readl(ioc->ioc_regs.ioc_init_sem_reg);
+ writel(1, ioc->ioc_regs.ioc_init_sem_reg);
+
+ return BFA_STATUS_OK;
+}
+
+/*
+ * Interface used by diag module to do firmware boot with memory test
+ * as the entry vector.
+ */
+bfa_status_t
+bfa_ioc_boot(struct bfa_ioc_s *ioc, u32 boot_type, u32 boot_env)
+{
+ struct bfi_ioc_image_hdr_s *drv_fwhdr;
+ bfa_status_t status;
+ bfa_ioc_stats(ioc, ioc_boots);
+
+ if (bfa_ioc_pll_init(ioc) != BFA_STATUS_OK)
+ return BFA_STATUS_FAILED;
+
+ if (boot_env == BFI_FWBOOT_ENV_OS &&
+ boot_type == BFI_FWBOOT_TYPE_NORMAL) {
+
+ drv_fwhdr = (struct bfi_ioc_image_hdr_s *)
+ bfa_cb_image_get_chunk(bfa_ioc_asic_gen(ioc), 0);
+
+ /*
+ * Work with Flash iff flash f/w is better than driver f/w.
+ * Otherwise push drivers firmware.
+ */
+ if (bfa_ioc_flash_fwver_cmp(ioc, drv_fwhdr) ==
+ BFI_IOC_IMG_VER_BETTER)
+ boot_type = BFI_FWBOOT_TYPE_FLASH;
+ }
+
+ /*
+ * Initialize IOC state of all functions on a chip reset.
+ */
+ if (boot_type == BFI_FWBOOT_TYPE_MEMTEST) {
+ bfa_ioc_set_cur_ioc_fwstate(ioc, BFI_IOC_MEMTEST);
+ bfa_ioc_set_alt_ioc_fwstate(ioc, BFI_IOC_MEMTEST);
+ } else {
+ bfa_ioc_set_cur_ioc_fwstate(ioc, BFI_IOC_INITING);
+ bfa_ioc_set_alt_ioc_fwstate(ioc, BFI_IOC_INITING);
+ }
+
+ bfa_ioc_msgflush(ioc);
+ status = bfa_ioc_download_fw(ioc, boot_type, boot_env);
+ if (status == BFA_STATUS_OK)
+ bfa_ioc_lpu_start(ioc);
+ else {
+ WARN_ON(boot_type == BFI_FWBOOT_TYPE_MEMTEST);
+ bfa_iocpf_timeout(ioc);
+ }
+ return status;
+}
+
+/*
+ * Enable/disable IOC failure auto recovery.
+ */
+void
+bfa_ioc_auto_recover(bfa_boolean_t auto_recover)
+{
+ bfa_auto_recover = auto_recover;
+}
+
+
+
+bfa_boolean_t
+bfa_ioc_is_operational(struct bfa_ioc_s *ioc)
+{
+ return bfa_fsm_cmp_state(ioc, bfa_ioc_sm_op);
+}
+
+bfa_boolean_t
+bfa_ioc_is_initialized(struct bfa_ioc_s *ioc)
+{
+ u32 r32 = bfa_ioc_get_cur_ioc_fwstate(ioc);
+
+ return ((r32 != BFI_IOC_UNINIT) &&
+ (r32 != BFI_IOC_INITING) &&
+ (r32 != BFI_IOC_MEMTEST));
+}
+
+bfa_boolean_t
+bfa_ioc_msgget(struct bfa_ioc_s *ioc, void *mbmsg)
+{
+ __be32 *msgp = mbmsg;
+ u32 r32;
+ int i;
+
+ r32 = readl(ioc->ioc_regs.lpu_mbox_cmd);
+ if ((r32 & 1) == 0)
+ return BFA_FALSE;
+
+ /*
+ * read the MBOX msg
+ */
+ for (i = 0; i < (sizeof(union bfi_ioc_i2h_msg_u) / sizeof(u32));
+ i++) {
+ r32 = readl(ioc->ioc_regs.lpu_mbox +
+ i * sizeof(u32));
+ msgp[i] = cpu_to_be32(r32);
+ }
+
+ /*
+ * turn off mailbox interrupt by clearing mailbox status
+ */
+ writel(1, ioc->ioc_regs.lpu_mbox_cmd);
+ readl(ioc->ioc_regs.lpu_mbox_cmd);
+
+ return BFA_TRUE;
+}
+
+void
+bfa_ioc_isr(struct bfa_ioc_s *ioc, struct bfi_mbmsg_s *m)
+{
+ union bfi_ioc_i2h_msg_u *msg;
+ struct bfa_iocpf_s *iocpf = &ioc->iocpf;
+
+ msg = (union bfi_ioc_i2h_msg_u *) m;
+
+ bfa_ioc_stats(ioc, ioc_isrs);
+
+ switch (msg->mh.msg_id) {
+ case BFI_IOC_I2H_HBEAT:
+ break;
+
+ case BFI_IOC_I2H_ENABLE_REPLY:
+ ioc->port_mode = ioc->port_mode_cfg =
+ (enum bfa_mode_s)msg->fw_event.port_mode;
+ ioc->ad_cap_bm = msg->fw_event.cap_bm;
+ bfa_fsm_send_event(iocpf, IOCPF_E_FWRSP_ENABLE);
+ break;
+
+ case BFI_IOC_I2H_DISABLE_REPLY:
+ bfa_fsm_send_event(iocpf, IOCPF_E_FWRSP_DISABLE);
+ break;
+
+ case BFI_IOC_I2H_GETATTR_REPLY:
+ bfa_ioc_getattr_reply(ioc);
+ break;
+
+ default:
+ bfa_trc(ioc, msg->mh.msg_id);
+ WARN_ON(1);
+ }
+}
+
+/*
+ * IOC attach time initialization and setup.
+ *
+ * @param[in] ioc memory for IOC
+ * @param[in] bfa driver instance structure
+ */
+void
+bfa_ioc_attach(struct bfa_ioc_s *ioc, void *bfa, struct bfa_ioc_cbfn_s *cbfn,
+ struct bfa_timer_mod_s *timer_mod)
+{
+ ioc->bfa = bfa;
+ ioc->cbfn = cbfn;
+ ioc->timer_mod = timer_mod;
+ ioc->fcmode = BFA_FALSE;
+ ioc->pllinit = BFA_FALSE;
+ ioc->dbg_fwsave_once = BFA_TRUE;
+ ioc->iocpf.ioc = ioc;
+
+ bfa_ioc_mbox_attach(ioc);
+ INIT_LIST_HEAD(&ioc->notify_q);
+
+ bfa_fsm_set_state(ioc, bfa_ioc_sm_uninit);
+ bfa_fsm_send_event(ioc, IOC_E_RESET);
+}
+
+/*
+ * Driver detach time IOC cleanup.
+ */
+void
+bfa_ioc_detach(struct bfa_ioc_s *ioc)
+{
+ bfa_fsm_send_event(ioc, IOC_E_DETACH);
+ INIT_LIST_HEAD(&ioc->notify_q);
+}
+
+/*
+ * Setup IOC PCI properties.
+ *
+ * @param[in] pcidev PCI device information for this IOC
+ */
+void
+bfa_ioc_pci_init(struct bfa_ioc_s *ioc, struct bfa_pcidev_s *pcidev,
+ enum bfi_pcifn_class clscode)
+{
+ ioc->clscode = clscode;
+ ioc->pcidev = *pcidev;
+
+ /*
+ * Initialize IOC and device personality
+ */
+ ioc->port0_mode = ioc->port1_mode = BFI_PORT_MODE_FC;
+ ioc->asic_mode = BFI_ASIC_MODE_FC;
+
+ switch (pcidev->device_id) {
+ case BFA_PCI_DEVICE_ID_FC_8G1P:
+ case BFA_PCI_DEVICE_ID_FC_8G2P:
+ ioc->asic_gen = BFI_ASIC_GEN_CB;
+ ioc->fcmode = BFA_TRUE;
+ ioc->port_mode = ioc->port_mode_cfg = BFA_MODE_HBA;
+ ioc->ad_cap_bm = BFA_CM_HBA;
+ break;
+
+ case BFA_PCI_DEVICE_ID_CT:
+ ioc->asic_gen = BFI_ASIC_GEN_CT;
+ ioc->port0_mode = ioc->port1_mode = BFI_PORT_MODE_ETH;
+ ioc->asic_mode = BFI_ASIC_MODE_ETH;
+ ioc->port_mode = ioc->port_mode_cfg = BFA_MODE_CNA;
+ ioc->ad_cap_bm = BFA_CM_CNA;
+ break;
+
+ case BFA_PCI_DEVICE_ID_CT_FC:
+ ioc->asic_gen = BFI_ASIC_GEN_CT;
+ ioc->fcmode = BFA_TRUE;
+ ioc->port_mode = ioc->port_mode_cfg = BFA_MODE_HBA;
+ ioc->ad_cap_bm = BFA_CM_HBA;
+ break;
+
+ case BFA_PCI_DEVICE_ID_CT2:
+ case BFA_PCI_DEVICE_ID_CT2_QUAD:
+ ioc->asic_gen = BFI_ASIC_GEN_CT2;
+ if (clscode == BFI_PCIFN_CLASS_FC &&
+ pcidev->ssid == BFA_PCI_CT2_SSID_FC) {
+ ioc->asic_mode = BFI_ASIC_MODE_FC16;
+ ioc->fcmode = BFA_TRUE;
+ ioc->port_mode = ioc->port_mode_cfg = BFA_MODE_HBA;
+ ioc->ad_cap_bm = BFA_CM_HBA;
+ } else {
+ ioc->port0_mode = ioc->port1_mode = BFI_PORT_MODE_ETH;
+ ioc->asic_mode = BFI_ASIC_MODE_ETH;
+ if (pcidev->ssid == BFA_PCI_CT2_SSID_FCoE) {
+ ioc->port_mode =
+ ioc->port_mode_cfg = BFA_MODE_CNA;
+ ioc->ad_cap_bm = BFA_CM_CNA;
+ } else {
+ ioc->port_mode =
+ ioc->port_mode_cfg = BFA_MODE_NIC;
+ ioc->ad_cap_bm = BFA_CM_NIC;
+ }
+ }
+ break;
+
+ default:
+ WARN_ON(1);
+ }
+
+ /*
+ * Set asic specific interfaces. See bfa_ioc_cb.c and bfa_ioc_ct.c
+ */
+ if (ioc->asic_gen == BFI_ASIC_GEN_CB)
+ bfa_ioc_set_cb_hwif(ioc);
+ else if (ioc->asic_gen == BFI_ASIC_GEN_CT)
+ bfa_ioc_set_ct_hwif(ioc);
+ else {
+ WARN_ON(ioc->asic_gen != BFI_ASIC_GEN_CT2);
+ bfa_ioc_set_ct2_hwif(ioc);
+ bfa_ioc_ct2_poweron(ioc);
+ }
+
+ bfa_ioc_map_port(ioc);
+ bfa_ioc_reg_init(ioc);
+}
+
+/*
+ * Initialize IOC dma memory
+ *
+ * @param[in] dm_kva kernel virtual address of IOC dma memory
+ * @param[in] dm_pa physical address of IOC dma memory
+ */
+void
+bfa_ioc_mem_claim(struct bfa_ioc_s *ioc, u8 *dm_kva, u64 dm_pa)
+{
+ /*
+ * dma memory for firmware attribute
+ */
+ ioc->attr_dma.kva = dm_kva;
+ ioc->attr_dma.pa = dm_pa;
+ ioc->attr = (struct bfi_ioc_attr_s *) dm_kva;
+}
+
+void
+bfa_ioc_enable(struct bfa_ioc_s *ioc)
+{
+ bfa_ioc_stats(ioc, ioc_enables);
+ ioc->dbg_fwsave_once = BFA_TRUE;
+
+ bfa_fsm_send_event(ioc, IOC_E_ENABLE);
+}
+
+void
+bfa_ioc_disable(struct bfa_ioc_s *ioc)
+{
+ bfa_ioc_stats(ioc, ioc_disables);
+ bfa_fsm_send_event(ioc, IOC_E_DISABLE);
+}
+
+void
+bfa_ioc_suspend(struct bfa_ioc_s *ioc)
+{
+ ioc->dbg_fwsave_once = BFA_TRUE;
+ bfa_fsm_send_event(ioc, IOC_E_HWERROR);
+}
+
+/*
+ * Initialize memory for saving firmware trace. Driver must initialize
+ * trace memory before call bfa_ioc_enable().
+ */
+void
+bfa_ioc_debug_memclaim(struct bfa_ioc_s *ioc, void *dbg_fwsave)
+{
+ ioc->dbg_fwsave = dbg_fwsave;
+ ioc->dbg_fwsave_len = BFA_DBG_FWTRC_LEN;
+}
+
+/*
+ * Register mailbox message handler functions
+ *
+ * @param[in] ioc IOC instance
+ * @param[in] mcfuncs message class handler functions
+ */
+void
+bfa_ioc_mbox_register(struct bfa_ioc_s *ioc, bfa_ioc_mbox_mcfunc_t *mcfuncs)
+{
+ struct bfa_ioc_mbox_mod_s *mod = &ioc->mbox_mod;
+ int mc;
+
+ for (mc = 0; mc < BFI_MC_MAX; mc++)
+ mod->mbhdlr[mc].cbfn = mcfuncs[mc];
+}
+
+/*
+ * Register mailbox message handler function, to be called by common modules
+ */
+void
+bfa_ioc_mbox_regisr(struct bfa_ioc_s *ioc, enum bfi_mclass mc,
+ bfa_ioc_mbox_mcfunc_t cbfn, void *cbarg)
+{
+ struct bfa_ioc_mbox_mod_s *mod = &ioc->mbox_mod;
+
+ mod->mbhdlr[mc].cbfn = cbfn;
+ mod->mbhdlr[mc].cbarg = cbarg;
+}
+
+/*
+ * Queue a mailbox command request to firmware. Waits if mailbox is busy.
+ * Responsibility of caller to serialize
+ *
+ * @param[in] ioc IOC instance
+ * @param[i] cmd Mailbox command
+ */
+void
+bfa_ioc_mbox_queue(struct bfa_ioc_s *ioc, struct bfa_mbox_cmd_s *cmd)
+{
+ struct bfa_ioc_mbox_mod_s *mod = &ioc->mbox_mod;
+ u32 stat;
+
+ /*
+ * If a previous command is pending, queue new command
+ */
+ if (!list_empty(&mod->cmd_q)) {
+ list_add_tail(&cmd->qe, &mod->cmd_q);
+ return;
+ }
+
+ /*
+ * If mailbox is busy, queue command for poll timer
+ */
+ stat = readl(ioc->ioc_regs.hfn_mbox_cmd);
+ if (stat) {
+ list_add_tail(&cmd->qe, &mod->cmd_q);
+ return;
+ }
+
+ /*
+ * mailbox is free -- queue command to firmware
+ */
+ bfa_ioc_mbox_send(ioc, cmd->msg, sizeof(cmd->msg));
+}
+
+/*
+ * Handle mailbox interrupts
+ */
+void
+bfa_ioc_mbox_isr(struct bfa_ioc_s *ioc)
+{
+ struct bfa_ioc_mbox_mod_s *mod = &ioc->mbox_mod;
+ struct bfi_mbmsg_s m;
+ int mc;
+
+ if (bfa_ioc_msgget(ioc, &m)) {
+ /*
+ * Treat IOC message class as special.
+ */
+ mc = m.mh.msg_class;
+ if (mc == BFI_MC_IOC) {
+ bfa_ioc_isr(ioc, &m);
+ return;
+ }
+
+ if ((mc >= BFI_MC_MAX) || (mod->mbhdlr[mc].cbfn == NULL))
+ return;
+
+ mod->mbhdlr[mc].cbfn(mod->mbhdlr[mc].cbarg, &m);
+ }
+
+ bfa_ioc_lpu_read_stat(ioc);
+
+ /*
+ * Try to send pending mailbox commands
+ */
+ bfa_ioc_mbox_poll(ioc);
+}
+
+void
+bfa_ioc_error_isr(struct bfa_ioc_s *ioc)
+{
+ bfa_ioc_stats(ioc, ioc_hbfails);
+ ioc->stats.hb_count = ioc->hb_count;
+ bfa_fsm_send_event(ioc, IOC_E_HWERROR);
+}
+
+/*
+ * return true if IOC is disabled
+ */
+bfa_boolean_t
+bfa_ioc_is_disabled(struct bfa_ioc_s *ioc)
+{
+ return bfa_fsm_cmp_state(ioc, bfa_ioc_sm_disabling) ||
+ bfa_fsm_cmp_state(ioc, bfa_ioc_sm_disabled);
+}
+
+/*
+ * return true if IOC firmware is different.
+ */
+bfa_boolean_t
+bfa_ioc_fw_mismatch(struct bfa_ioc_s *ioc)
+{
+ return bfa_fsm_cmp_state(ioc, bfa_ioc_sm_reset) ||
+ bfa_fsm_cmp_state(&ioc->iocpf, bfa_iocpf_sm_fwcheck) ||
+ bfa_fsm_cmp_state(&ioc->iocpf, bfa_iocpf_sm_mismatch);
+}
+
+/*
+ * Check if adapter is disabled -- both IOCs should be in a disabled
+ * state.
+ */
+bfa_boolean_t
+bfa_ioc_adapter_is_disabled(struct bfa_ioc_s *ioc)
+{
+ u32 ioc_state;
+
+ if (!bfa_fsm_cmp_state(ioc, bfa_ioc_sm_disabled))
+ return BFA_FALSE;
+
+ ioc_state = bfa_ioc_get_cur_ioc_fwstate(ioc);
+ if (!bfa_ioc_state_disabled(ioc_state))
+ return BFA_FALSE;
+
+ if (ioc->pcidev.device_id != BFA_PCI_DEVICE_ID_FC_8G1P) {
+ ioc_state = bfa_ioc_get_cur_ioc_fwstate(ioc);
+ if (!bfa_ioc_state_disabled(ioc_state))
+ return BFA_FALSE;
+ }
+
+ return BFA_TRUE;
+}
+
+/*
+ * Reset IOC fwstate registers.
+ */
+void
+bfa_ioc_reset_fwstate(struct bfa_ioc_s *ioc)
+{
+ bfa_ioc_set_cur_ioc_fwstate(ioc, BFI_IOC_UNINIT);
+ bfa_ioc_set_alt_ioc_fwstate(ioc, BFI_IOC_UNINIT);
+}
+
+#define BFA_MFG_NAME "Brocade"
+void
+bfa_ioc_get_adapter_attr(struct bfa_ioc_s *ioc,
+ struct bfa_adapter_attr_s *ad_attr)
+{
+ struct bfi_ioc_attr_s *ioc_attr;
+
+ ioc_attr = ioc->attr;
+
+ bfa_ioc_get_adapter_serial_num(ioc, ad_attr->serial_num);
+ bfa_ioc_get_adapter_fw_ver(ioc, ad_attr->fw_ver);
+ bfa_ioc_get_adapter_optrom_ver(ioc, ad_attr->optrom_ver);
+ bfa_ioc_get_adapter_manufacturer(ioc, ad_attr->manufacturer);
+ memcpy(&ad_attr->vpd, &ioc_attr->vpd,
+ sizeof(struct bfa_mfg_vpd_s));
+
+ ad_attr->nports = bfa_ioc_get_nports(ioc);
+ ad_attr->max_speed = bfa_ioc_speed_sup(ioc);
+
+ bfa_ioc_get_adapter_model(ioc, ad_attr->model);
+ /* For now, model descr uses same model string */
+ bfa_ioc_get_adapter_model(ioc, ad_attr->model_descr);
+
+ ad_attr->card_type = ioc_attr->card_type;
+ ad_attr->is_mezz = bfa_mfg_is_mezz(ioc_attr->card_type);
+
+ if (BFI_ADAPTER_IS_SPECIAL(ioc_attr->adapter_prop))
+ ad_attr->prototype = 1;
+ else
+ ad_attr->prototype = 0;
+
+ ad_attr->pwwn = ioc->attr->pwwn;
+ ad_attr->mac = bfa_ioc_get_mac(ioc);
+
+ ad_attr->pcie_gen = ioc_attr->pcie_gen;
+ ad_attr->pcie_lanes = ioc_attr->pcie_lanes;
+ ad_attr->pcie_lanes_orig = ioc_attr->pcie_lanes_orig;
+ ad_attr->asic_rev = ioc_attr->asic_rev;
+
+ bfa_ioc_get_pci_chip_rev(ioc, ad_attr->hw_ver);
+
+ ad_attr->cna_capable = bfa_ioc_is_cna(ioc);
+ ad_attr->trunk_capable = (ad_attr->nports > 1) &&
+ !bfa_ioc_is_cna(ioc) && !ad_attr->is_mezz;
+ ad_attr->mfg_day = ioc_attr->mfg_day;
+ ad_attr->mfg_month = ioc_attr->mfg_month;
+ ad_attr->mfg_year = ioc_attr->mfg_year;
+ memcpy(ad_attr->uuid, ioc_attr->uuid, BFA_ADAPTER_UUID_LEN);
+}
+
+enum bfa_ioc_type_e
+bfa_ioc_get_type(struct bfa_ioc_s *ioc)
+{
+ if (ioc->clscode == BFI_PCIFN_CLASS_ETH)
+ return BFA_IOC_TYPE_LL;
+
+ WARN_ON(ioc->clscode != BFI_PCIFN_CLASS_FC);
+
+ return (ioc->attr->port_mode == BFI_PORT_MODE_FC)
+ ? BFA_IOC_TYPE_FC : BFA_IOC_TYPE_FCoE;
+}
+
+void
+bfa_ioc_get_adapter_serial_num(struct bfa_ioc_s *ioc, char *serial_num)
+{
+ memset((void *)serial_num, 0, BFA_ADAPTER_SERIAL_NUM_LEN);
+ memcpy((void *)serial_num,
+ (void *)ioc->attr->brcd_serialnum,
+ BFA_ADAPTER_SERIAL_NUM_LEN);
+}
+
+void
+bfa_ioc_get_adapter_fw_ver(struct bfa_ioc_s *ioc, char *fw_ver)
+{
+ memset((void *)fw_ver, 0, BFA_VERSION_LEN);
+ memcpy(fw_ver, ioc->attr->fw_version, BFA_VERSION_LEN);
+}
+
+void
+bfa_ioc_get_pci_chip_rev(struct bfa_ioc_s *ioc, char *chip_rev)
+{
+ WARN_ON(!chip_rev);
+
+ memset((void *)chip_rev, 0, BFA_IOC_CHIP_REV_LEN);
+
+ chip_rev[0] = 'R';
+ chip_rev[1] = 'e';
+ chip_rev[2] = 'v';
+ chip_rev[3] = '-';
+ chip_rev[4] = ioc->attr->asic_rev;
+ chip_rev[5] = '\0';
+}
+
+void
+bfa_ioc_get_adapter_optrom_ver(struct bfa_ioc_s *ioc, char *optrom_ver)
+{
+ memset((void *)optrom_ver, 0, BFA_VERSION_LEN);
+ memcpy(optrom_ver, ioc->attr->optrom_version,
+ BFA_VERSION_LEN);
+}
+
+void
+bfa_ioc_get_adapter_manufacturer(struct bfa_ioc_s *ioc, char *manufacturer)
+{
+ memset((void *)manufacturer, 0, BFA_ADAPTER_MFG_NAME_LEN);
+ memcpy(manufacturer, BFA_MFG_NAME, BFA_ADAPTER_MFG_NAME_LEN);
+}
+
+void
+bfa_ioc_get_adapter_model(struct bfa_ioc_s *ioc, char *model)
+{
+ struct bfi_ioc_attr_s *ioc_attr;
+ u8 nports = bfa_ioc_get_nports(ioc);
+
+ WARN_ON(!model);
+ memset((void *)model, 0, BFA_ADAPTER_MODEL_NAME_LEN);
+
+ ioc_attr = ioc->attr;
+
+ if (bfa_asic_id_ct2(ioc->pcidev.device_id) &&
+ (!bfa_mfg_is_mezz(ioc_attr->card_type)))
+ snprintf(model, BFA_ADAPTER_MODEL_NAME_LEN, "%s-%u-%u%s",
+ BFA_MFG_NAME, ioc_attr->card_type, nports, "p");
+ else
+ snprintf(model, BFA_ADAPTER_MODEL_NAME_LEN, "%s-%u",
+ BFA_MFG_NAME, ioc_attr->card_type);
+}
+
+enum bfa_ioc_state
+bfa_ioc_get_state(struct bfa_ioc_s *ioc)
+{
+ enum bfa_iocpf_state iocpf_st;
+ enum bfa_ioc_state ioc_st = bfa_sm_to_state(ioc_sm_table, ioc->fsm);
+
+ if (ioc_st == BFA_IOC_ENABLING ||
+ ioc_st == BFA_IOC_FAIL || ioc_st == BFA_IOC_INITFAIL) {
+
+ iocpf_st = bfa_sm_to_state(iocpf_sm_table, ioc->iocpf.fsm);
+
+ switch (iocpf_st) {
+ case BFA_IOCPF_SEMWAIT:
+ ioc_st = BFA_IOC_SEMWAIT;
+ break;
+
+ case BFA_IOCPF_HWINIT:
+ ioc_st = BFA_IOC_HWINIT;
+ break;
+
+ case BFA_IOCPF_FWMISMATCH:
+ ioc_st = BFA_IOC_FWMISMATCH;
+ break;
+
+ case BFA_IOCPF_FAIL:
+ ioc_st = BFA_IOC_FAIL;
+ break;
+
+ case BFA_IOCPF_INITFAIL:
+ ioc_st = BFA_IOC_INITFAIL;
+ break;
+
+ default:
+ break;
+ }
+ }
+
+ return ioc_st;
+}
+
+void
+bfa_ioc_get_attr(struct bfa_ioc_s *ioc, struct bfa_ioc_attr_s *ioc_attr)
+{
+ memset((void *)ioc_attr, 0, sizeof(struct bfa_ioc_attr_s));
+
+ ioc_attr->state = bfa_ioc_get_state(ioc);
+ ioc_attr->port_id = bfa_ioc_portid(ioc);
+ ioc_attr->port_mode = ioc->port_mode;
+ ioc_attr->port_mode_cfg = ioc->port_mode_cfg;
+ ioc_attr->cap_bm = ioc->ad_cap_bm;
+
+ ioc_attr->ioc_type = bfa_ioc_get_type(ioc);
+
+ bfa_ioc_get_adapter_attr(ioc, &ioc_attr->adapter_attr);
+
+ ioc_attr->pci_attr.device_id = bfa_ioc_devid(ioc);
+ ioc_attr->pci_attr.pcifn = bfa_ioc_pcifn(ioc);
+ ioc_attr->def_fn = (bfa_ioc_pcifn(ioc) == bfa_ioc_portid(ioc));
+ bfa_ioc_get_pci_chip_rev(ioc, ioc_attr->pci_attr.chip_rev);
+}
+
+mac_t
+bfa_ioc_get_mac(struct bfa_ioc_s *ioc)
+{
+ /*
+ * Check the IOC type and return the appropriate MAC
+ */
+ if (bfa_ioc_get_type(ioc) == BFA_IOC_TYPE_FCoE)
+ return ioc->attr->fcoe_mac;
+ else
+ return ioc->attr->mac;
+}
+
+mac_t
+bfa_ioc_get_mfg_mac(struct bfa_ioc_s *ioc)
+{
+ mac_t m;
+
+ m = ioc->attr->mfg_mac;
+ if (bfa_mfg_is_old_wwn_mac_model(ioc->attr->card_type))
+ m.mac[MAC_ADDRLEN - 1] += bfa_ioc_pcifn(ioc);
+ else
+ bfa_mfg_increment_wwn_mac(&(m.mac[MAC_ADDRLEN-3]),
+ bfa_ioc_pcifn(ioc));
+
+ return m;
+}
+
+/*
+ * Send AEN notification
+ */
+void
+bfa_ioc_aen_post(struct bfa_ioc_s *ioc, enum bfa_ioc_aen_event event)
+{
+ struct bfad_s *bfad = (struct bfad_s *)ioc->bfa->bfad;
+ struct bfa_aen_entry_s *aen_entry;
+ enum bfa_ioc_type_e ioc_type;
+
+ bfad_get_aen_entry(bfad, aen_entry);
+ if (!aen_entry)
+ return;
+
+ ioc_type = bfa_ioc_get_type(ioc);
+ switch (ioc_type) {
+ case BFA_IOC_TYPE_FC:
+ aen_entry->aen_data.ioc.pwwn = ioc->attr->pwwn;
+ break;
+ case BFA_IOC_TYPE_FCoE:
+ aen_entry->aen_data.ioc.pwwn = ioc->attr->pwwn;
+ aen_entry->aen_data.ioc.mac = bfa_ioc_get_mac(ioc);
+ break;
+ case BFA_IOC_TYPE_LL:
+ aen_entry->aen_data.ioc.mac = bfa_ioc_get_mac(ioc);
+ break;
+ default:
+ WARN_ON(ioc_type != BFA_IOC_TYPE_FC);
+ break;
+ }
+
+ /* Send the AEN notification */
+ aen_entry->aen_data.ioc.ioc_type = ioc_type;
+ bfad_im_post_vendor_event(aen_entry, bfad, ++ioc->ioc_aen_seq,
+ BFA_AEN_CAT_IOC, event);
+}
+
+/*
+ * Retrieve saved firmware trace from a prior IOC failure.
+ */
+bfa_status_t
+bfa_ioc_debug_fwsave(struct bfa_ioc_s *ioc, void *trcdata, int *trclen)
+{
+ int tlen;
+
+ if (ioc->dbg_fwsave_len == 0)
+ return BFA_STATUS_ENOFSAVE;
+
+ tlen = *trclen;
+ if (tlen > ioc->dbg_fwsave_len)
+ tlen = ioc->dbg_fwsave_len;
+
+ memcpy(trcdata, ioc->dbg_fwsave, tlen);
+ *trclen = tlen;
+ return BFA_STATUS_OK;
+}
+
+
+/*
+ * Retrieve saved firmware trace from a prior IOC failure.
+ */
+bfa_status_t
+bfa_ioc_debug_fwtrc(struct bfa_ioc_s *ioc, void *trcdata, int *trclen)
+{
+ u32 loff = BFA_DBG_FWTRC_OFF(bfa_ioc_portid(ioc));
+ int tlen;
+ bfa_status_t status;
+
+ bfa_trc(ioc, *trclen);
+
+ tlen = *trclen;
+ if (tlen > BFA_DBG_FWTRC_LEN)
+ tlen = BFA_DBG_FWTRC_LEN;
+
+ status = bfa_ioc_smem_read(ioc, trcdata, loff, tlen);
+ *trclen = tlen;
+ return status;
+}
+
+static void
+bfa_ioc_send_fwsync(struct bfa_ioc_s *ioc)
+{
+ struct bfa_mbox_cmd_s cmd;
+ struct bfi_ioc_ctrl_req_s *req = (struct bfi_ioc_ctrl_req_s *) cmd.msg;
+
+ bfi_h2i_set(req->mh, BFI_MC_IOC, BFI_IOC_H2I_DBG_SYNC,
+ bfa_ioc_portid(ioc));
+ req->clscode = cpu_to_be16(ioc->clscode);
+ bfa_ioc_mbox_queue(ioc, &cmd);
+}
+
+static void
+bfa_ioc_fwsync(struct bfa_ioc_s *ioc)
+{
+ u32 fwsync_iter = 1000;
+
+ bfa_ioc_send_fwsync(ioc);
+
+ /*
+ * After sending a fw sync mbox command wait for it to
+ * take effect. We will not wait for a response because
+ * 1. fw_sync mbox cmd doesn't have a response.
+ * 2. Even if we implement that, interrupts might not
+ * be enabled when we call this function.
+ * So, just keep checking if any mbox cmd is pending, and
+ * after waiting for a reasonable amount of time, go ahead.
+ * It is possible that fw has crashed and the mbox command
+ * is never acknowledged.
+ */
+ while (bfa_ioc_mbox_cmd_pending(ioc) && fwsync_iter > 0)
+ fwsync_iter--;
+}
+
+/*
+ * Dump firmware smem
+ */
+bfa_status_t
+bfa_ioc_debug_fwcore(struct bfa_ioc_s *ioc, void *buf,
+ u32 *offset, int *buflen)
+{
+ u32 loff;
+ int dlen;
+ bfa_status_t status;
+ u32 smem_len = BFA_IOC_FW_SMEM_SIZE(ioc);
+
+ if (*offset >= smem_len) {
+ *offset = *buflen = 0;
+ return BFA_STATUS_EINVAL;
+ }
+
+ loff = *offset;
+ dlen = *buflen;
+
+ /*
+ * First smem read, sync smem before proceeding
+ * No need to sync before reading every chunk.
+ */
+ if (loff == 0)
+ bfa_ioc_fwsync(ioc);
+
+ if ((loff + dlen) >= smem_len)
+ dlen = smem_len - loff;
+
+ status = bfa_ioc_smem_read(ioc, buf, loff, dlen);
+
+ if (status != BFA_STATUS_OK) {
+ *offset = *buflen = 0;
+ return status;
+ }
+
+ *offset += dlen;
+
+ if (*offset >= smem_len)
+ *offset = 0;
+
+ *buflen = dlen;
+
+ return status;
+}
+
+/*
+ * Firmware statistics
+ */
+bfa_status_t
+bfa_ioc_fw_stats_get(struct bfa_ioc_s *ioc, void *stats)
+{
+ u32 loff = BFI_IOC_FWSTATS_OFF + \
+ BFI_IOC_FWSTATS_SZ * (bfa_ioc_portid(ioc));
+ int tlen;
+ bfa_status_t status;
+
+ if (ioc->stats_busy) {
+ bfa_trc(ioc, ioc->stats_busy);
+ return BFA_STATUS_DEVBUSY;
+ }
+ ioc->stats_busy = BFA_TRUE;
+
+ tlen = sizeof(struct bfa_fw_stats_s);
+ status = bfa_ioc_smem_read(ioc, stats, loff, tlen);
+
+ ioc->stats_busy = BFA_FALSE;
+ return status;
+}
+
+bfa_status_t
+bfa_ioc_fw_stats_clear(struct bfa_ioc_s *ioc)
+{
+ u32 loff = BFI_IOC_FWSTATS_OFF + \
+ BFI_IOC_FWSTATS_SZ * (bfa_ioc_portid(ioc));
+ int tlen;
+ bfa_status_t status;
+
+ if (ioc->stats_busy) {
+ bfa_trc(ioc, ioc->stats_busy);
+ return BFA_STATUS_DEVBUSY;
+ }
+ ioc->stats_busy = BFA_TRUE;
+
+ tlen = sizeof(struct bfa_fw_stats_s);
+ status = bfa_ioc_smem_clr(ioc, loff, tlen);
+
+ ioc->stats_busy = BFA_FALSE;
+ return status;
+}
+
+/*
+ * Save firmware trace if configured.
+ */
+void
+bfa_ioc_debug_save_ftrc(struct bfa_ioc_s *ioc)
+{
+ int tlen;
+
+ if (ioc->dbg_fwsave_once) {
+ ioc->dbg_fwsave_once = BFA_FALSE;
+ if (ioc->dbg_fwsave_len) {
+ tlen = ioc->dbg_fwsave_len;
+ bfa_ioc_debug_fwtrc(ioc, ioc->dbg_fwsave, &tlen);
+ }
+ }
+}
+
+/*
+ * Firmware failure detected. Start recovery actions.
+ */
+static void
+bfa_ioc_recover(struct bfa_ioc_s *ioc)
+{
+ bfa_ioc_stats(ioc, ioc_hbfails);
+ ioc->stats.hb_count = ioc->hb_count;
+ bfa_fsm_send_event(ioc, IOC_E_HBFAIL);
+}
+
+/*
+ * BFA IOC PF private functions
+ */
+static void
+bfa_iocpf_timeout(void *ioc_arg)
+{
+ struct bfa_ioc_s *ioc = (struct bfa_ioc_s *) ioc_arg;
+
+ bfa_trc(ioc, 0);
+ bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_TIMEOUT);
+}
+
+static void
+bfa_iocpf_sem_timeout(void *ioc_arg)
+{
+ struct bfa_ioc_s *ioc = (struct bfa_ioc_s *) ioc_arg;
+
+ bfa_ioc_hw_sem_get(ioc);
+}
+
+static void
+bfa_ioc_poll_fwinit(struct bfa_ioc_s *ioc)
+{
+ u32 fwstate = bfa_ioc_get_cur_ioc_fwstate(ioc);
+
+ bfa_trc(ioc, fwstate);
+
+ if (fwstate == BFI_IOC_DISABLED) {
+ bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_FWREADY);
+ return;
+ }
+
+ if (ioc->iocpf.poll_time >= (3 * BFA_IOC_TOV))
+ bfa_iocpf_timeout(ioc);
+ else {
+ ioc->iocpf.poll_time += BFA_IOC_POLL_TOV;
+ bfa_iocpf_poll_timer_start(ioc);
+ }
+}
+
+static void
+bfa_iocpf_poll_timeout(void *ioc_arg)
+{
+ struct bfa_ioc_s *ioc = (struct bfa_ioc_s *) ioc_arg;
+
+ bfa_ioc_poll_fwinit(ioc);
+}
+
+/*
+ * bfa timer function
+ */
+void
+bfa_timer_beat(struct bfa_timer_mod_s *mod)
+{
+ struct list_head *qh = &mod->timer_q;
+ struct list_head *qe, *qe_next;
+ struct bfa_timer_s *elem;
+ struct list_head timedout_q;
+
+ INIT_LIST_HEAD(&timedout_q);
+
+ qe = bfa_q_next(qh);
+
+ while (qe != qh) {
+ qe_next = bfa_q_next(qe);
+
+ elem = (struct bfa_timer_s *) qe;
+ if (elem->timeout <= BFA_TIMER_FREQ) {
+ elem->timeout = 0;
+ list_del(&elem->qe);
+ list_add_tail(&elem->qe, &timedout_q);
+ } else {
+ elem->timeout -= BFA_TIMER_FREQ;
+ }
+
+ qe = qe_next; /* go to next elem */
+ }
+
+ /*
+ * Pop all the timeout entries
+ */
+ while (!list_empty(&timedout_q)) {
+ bfa_q_deq(&timedout_q, &elem);
+ elem->timercb(elem->arg);
+ }
+}
+
+/*
+ * Should be called with lock protection
+ */
+void
+bfa_timer_begin(struct bfa_timer_mod_s *mod, struct bfa_timer_s *timer,
+ void (*timercb) (void *), void *arg, unsigned int timeout)
+{
+
+ WARN_ON(timercb == NULL);
+ WARN_ON(bfa_q_is_on_q(&mod->timer_q, timer));
+
+ timer->timeout = timeout;
+ timer->timercb = timercb;
+ timer->arg = arg;
+
+ list_add_tail(&timer->qe, &mod->timer_q);
+}
+
+/*
+ * Should be called with lock protection
+ */
+void
+bfa_timer_stop(struct bfa_timer_s *timer)
+{
+ WARN_ON(list_empty(&timer->qe));
+
+ list_del(&timer->qe);
+}
+
+/*
+ * ASIC block related
+ */
+static void
+bfa_ablk_config_swap(struct bfa_ablk_cfg_s *cfg)
+{
+ struct bfa_ablk_cfg_inst_s *cfg_inst;
+ int i, j;
+ u16 be16;
+
+ for (i = 0; i < BFA_ABLK_MAX; i++) {
+ cfg_inst = &cfg->inst[i];
+ for (j = 0; j < BFA_ABLK_MAX_PFS; j++) {
+ be16 = cfg_inst->pf_cfg[j].pers;
+ cfg_inst->pf_cfg[j].pers = be16_to_cpu(be16);
+ be16 = cfg_inst->pf_cfg[j].num_qpairs;
+ cfg_inst->pf_cfg[j].num_qpairs = be16_to_cpu(be16);
+ be16 = cfg_inst->pf_cfg[j].num_vectors;
+ cfg_inst->pf_cfg[j].num_vectors = be16_to_cpu(be16);
+ be16 = cfg_inst->pf_cfg[j].bw_min;
+ cfg_inst->pf_cfg[j].bw_min = be16_to_cpu(be16);
+ be16 = cfg_inst->pf_cfg[j].bw_max;
+ cfg_inst->pf_cfg[j].bw_max = be16_to_cpu(be16);
+ }
+ }
+}
+
+static void
+bfa_ablk_isr(void *cbarg, struct bfi_mbmsg_s *msg)
+{
+ struct bfa_ablk_s *ablk = (struct bfa_ablk_s *)cbarg;
+ struct bfi_ablk_i2h_rsp_s *rsp = (struct bfi_ablk_i2h_rsp_s *)msg;
+ bfa_ablk_cbfn_t cbfn;
+
+ WARN_ON(msg->mh.msg_class != BFI_MC_ABLK);
+ bfa_trc(ablk->ioc, msg->mh.msg_id);
+
+ switch (msg->mh.msg_id) {
+ case BFI_ABLK_I2H_QUERY:
+ if (rsp->status == BFA_STATUS_OK) {
+ memcpy(ablk->cfg, ablk->dma_addr.kva,
+ sizeof(struct bfa_ablk_cfg_s));
+ bfa_ablk_config_swap(ablk->cfg);
+ ablk->cfg = NULL;
+ }
+ break;
+
+ case BFI_ABLK_I2H_ADPT_CONFIG:
+ case BFI_ABLK_I2H_PORT_CONFIG:
+ /* update config port mode */
+ ablk->ioc->port_mode_cfg = rsp->port_mode;
+
+ case BFI_ABLK_I2H_PF_DELETE:
+ case BFI_ABLK_I2H_PF_UPDATE:
+ case BFI_ABLK_I2H_OPTROM_ENABLE:
+ case BFI_ABLK_I2H_OPTROM_DISABLE:
+ /* No-op */
+ break;
+
+ case BFI_ABLK_I2H_PF_CREATE:
+ *(ablk->pcifn) = rsp->pcifn;
+ ablk->pcifn = NULL;
+ break;
+
+ default:
+ WARN_ON(1);
+ }
+
+ ablk->busy = BFA_FALSE;
+ if (ablk->cbfn) {
+ cbfn = ablk->cbfn;
+ ablk->cbfn = NULL;
+ cbfn(ablk->cbarg, rsp->status);
+ }
+}
+
+static void
+bfa_ablk_notify(void *cbarg, enum bfa_ioc_event_e event)
+{
+ struct bfa_ablk_s *ablk = (struct bfa_ablk_s *)cbarg;
+
+ bfa_trc(ablk->ioc, event);
+
+ switch (event) {
+ case BFA_IOC_E_ENABLED:
+ WARN_ON(ablk->busy != BFA_FALSE);
+ break;
+
+ case BFA_IOC_E_DISABLED:
+ case BFA_IOC_E_FAILED:
+ /* Fail any pending requests */
+ ablk->pcifn = NULL;
+ if (ablk->busy) {
+ if (ablk->cbfn)
+ ablk->cbfn(ablk->cbarg, BFA_STATUS_FAILED);
+ ablk->cbfn = NULL;
+ ablk->busy = BFA_FALSE;
+ }
+ break;
+
+ default:
+ WARN_ON(1);
+ break;
+ }
+}
+
+u32
+bfa_ablk_meminfo(void)
+{
+ return BFA_ROUNDUP(sizeof(struct bfa_ablk_cfg_s), BFA_DMA_ALIGN_SZ);
+}
+
+void
+bfa_ablk_memclaim(struct bfa_ablk_s *ablk, u8 *dma_kva, u64 dma_pa)
+{
+ ablk->dma_addr.kva = dma_kva;
+ ablk->dma_addr.pa = dma_pa;
+}
+
+void
+bfa_ablk_attach(struct bfa_ablk_s *ablk, struct bfa_ioc_s *ioc)
+{
+ ablk->ioc = ioc;
+
+ bfa_ioc_mbox_regisr(ablk->ioc, BFI_MC_ABLK, bfa_ablk_isr, ablk);
+ bfa_q_qe_init(&ablk->ioc_notify);
+ bfa_ioc_notify_init(&ablk->ioc_notify, bfa_ablk_notify, ablk);
+ list_add_tail(&ablk->ioc_notify.qe, &ablk->ioc->notify_q);
+}
+
+bfa_status_t
+bfa_ablk_query(struct bfa_ablk_s *ablk, struct bfa_ablk_cfg_s *ablk_cfg,
+ bfa_ablk_cbfn_t cbfn, void *cbarg)
+{
+ struct bfi_ablk_h2i_query_s *m;
+
+ WARN_ON(!ablk_cfg);
+
+ if (!bfa_ioc_is_operational(ablk->ioc)) {
+ bfa_trc(ablk->ioc, BFA_STATUS_IOC_FAILURE);
+ return BFA_STATUS_IOC_FAILURE;
+ }
+
+ if (ablk->busy) {
+ bfa_trc(ablk->ioc, BFA_STATUS_DEVBUSY);
+ return BFA_STATUS_DEVBUSY;
+ }
+
+ ablk->cfg = ablk_cfg;
+ ablk->cbfn = cbfn;
+ ablk->cbarg = cbarg;
+ ablk->busy = BFA_TRUE;
+
+ m = (struct bfi_ablk_h2i_query_s *)ablk->mb.msg;
+ bfi_h2i_set(m->mh, BFI_MC_ABLK, BFI_ABLK_H2I_QUERY,
+ bfa_ioc_portid(ablk->ioc));
+ bfa_dma_be_addr_set(m->addr, ablk->dma_addr.pa);
+ bfa_ioc_mbox_queue(ablk->ioc, &ablk->mb);
+
+ return BFA_STATUS_OK;
+}
+
+bfa_status_t
+bfa_ablk_pf_create(struct bfa_ablk_s *ablk, u16 *pcifn,
+ u8 port, enum bfi_pcifn_class personality,
+ u16 bw_min, u16 bw_max,
+ bfa_ablk_cbfn_t cbfn, void *cbarg)
+{
+ struct bfi_ablk_h2i_pf_req_s *m;
+
+ if (!bfa_ioc_is_operational(ablk->ioc)) {
+ bfa_trc(ablk->ioc, BFA_STATUS_IOC_FAILURE);
+ return BFA_STATUS_IOC_FAILURE;
+ }
+
+ if (ablk->busy) {
+ bfa_trc(ablk->ioc, BFA_STATUS_DEVBUSY);
+ return BFA_STATUS_DEVBUSY;
+ }
+
+ ablk->pcifn = pcifn;
+ ablk->cbfn = cbfn;
+ ablk->cbarg = cbarg;
+ ablk->busy = BFA_TRUE;
+
+ m = (struct bfi_ablk_h2i_pf_req_s *)ablk->mb.msg;
+ bfi_h2i_set(m->mh, BFI_MC_ABLK, BFI_ABLK_H2I_PF_CREATE,
+ bfa_ioc_portid(ablk->ioc));
+ m->pers = cpu_to_be16((u16)personality);
+ m->bw_min = cpu_to_be16(bw_min);
+ m->bw_max = cpu_to_be16(bw_max);
+ m->port = port;
+ bfa_ioc_mbox_queue(ablk->ioc, &ablk->mb);
+
+ return BFA_STATUS_OK;
+}
+
+bfa_status_t
+bfa_ablk_pf_delete(struct bfa_ablk_s *ablk, int pcifn,
+ bfa_ablk_cbfn_t cbfn, void *cbarg)
+{
+ struct bfi_ablk_h2i_pf_req_s *m;
+
+ if (!bfa_ioc_is_operational(ablk->ioc)) {
+ bfa_trc(ablk->ioc, BFA_STATUS_IOC_FAILURE);
+ return BFA_STATUS_IOC_FAILURE;
+ }
+
+ if (ablk->busy) {
+ bfa_trc(ablk->ioc, BFA_STATUS_DEVBUSY);
+ return BFA_STATUS_DEVBUSY;
+ }
+
+ ablk->cbfn = cbfn;
+ ablk->cbarg = cbarg;
+ ablk->busy = BFA_TRUE;
+
+ m = (struct bfi_ablk_h2i_pf_req_s *)ablk->mb.msg;
+ bfi_h2i_set(m->mh, BFI_MC_ABLK, BFI_ABLK_H2I_PF_DELETE,
+ bfa_ioc_portid(ablk->ioc));
+ m->pcifn = (u8)pcifn;
+ bfa_ioc_mbox_queue(ablk->ioc, &ablk->mb);
+
+ return BFA_STATUS_OK;
+}
+
+bfa_status_t
+bfa_ablk_adapter_config(struct bfa_ablk_s *ablk, enum bfa_mode_s mode,
+ int max_pf, int max_vf, bfa_ablk_cbfn_t cbfn, void *cbarg)
+{
+ struct bfi_ablk_h2i_cfg_req_s *m;
+
+ if (!bfa_ioc_is_operational(ablk->ioc)) {
+ bfa_trc(ablk->ioc, BFA_STATUS_IOC_FAILURE);
+ return BFA_STATUS_IOC_FAILURE;
+ }
+
+ if (ablk->busy) {
+ bfa_trc(ablk->ioc, BFA_STATUS_DEVBUSY);
+ return BFA_STATUS_DEVBUSY;
+ }
+
+ ablk->cbfn = cbfn;
+ ablk->cbarg = cbarg;
+ ablk->busy = BFA_TRUE;
+
+ m = (struct bfi_ablk_h2i_cfg_req_s *)ablk->mb.msg;
+ bfi_h2i_set(m->mh, BFI_MC_ABLK, BFI_ABLK_H2I_ADPT_CONFIG,
+ bfa_ioc_portid(ablk->ioc));
+ m->mode = (u8)mode;
+ m->max_pf = (u8)max_pf;
+ m->max_vf = (u8)max_vf;
+ bfa_ioc_mbox_queue(ablk->ioc, &ablk->mb);
+
+ return BFA_STATUS_OK;
+}
+
+bfa_status_t
+bfa_ablk_port_config(struct bfa_ablk_s *ablk, int port, enum bfa_mode_s mode,
+ int max_pf, int max_vf, bfa_ablk_cbfn_t cbfn, void *cbarg)
+{
+ struct bfi_ablk_h2i_cfg_req_s *m;
+
+ if (!bfa_ioc_is_operational(ablk->ioc)) {
+ bfa_trc(ablk->ioc, BFA_STATUS_IOC_FAILURE);
+ return BFA_STATUS_IOC_FAILURE;
+ }
+
+ if (ablk->busy) {
+ bfa_trc(ablk->ioc, BFA_STATUS_DEVBUSY);
+ return BFA_STATUS_DEVBUSY;
+ }
+
+ ablk->cbfn = cbfn;
+ ablk->cbarg = cbarg;
+ ablk->busy = BFA_TRUE;
+
+ m = (struct bfi_ablk_h2i_cfg_req_s *)ablk->mb.msg;
+ bfi_h2i_set(m->mh, BFI_MC_ABLK, BFI_ABLK_H2I_PORT_CONFIG,
+ bfa_ioc_portid(ablk->ioc));
+ m->port = (u8)port;
+ m->mode = (u8)mode;
+ m->max_pf = (u8)max_pf;
+ m->max_vf = (u8)max_vf;
+ bfa_ioc_mbox_queue(ablk->ioc, &ablk->mb);
+
+ return BFA_STATUS_OK;
+}
+
+bfa_status_t
+bfa_ablk_pf_update(struct bfa_ablk_s *ablk, int pcifn, u16 bw_min,
+ u16 bw_max, bfa_ablk_cbfn_t cbfn, void *cbarg)
+{
+ struct bfi_ablk_h2i_pf_req_s *m;
+
+ if (!bfa_ioc_is_operational(ablk->ioc)) {
+ bfa_trc(ablk->ioc, BFA_STATUS_IOC_FAILURE);
+ return BFA_STATUS_IOC_FAILURE;
+ }
+
+ if (ablk->busy) {
+ bfa_trc(ablk->ioc, BFA_STATUS_DEVBUSY);
+ return BFA_STATUS_DEVBUSY;
+ }
+
+ ablk->cbfn = cbfn;
+ ablk->cbarg = cbarg;
+ ablk->busy = BFA_TRUE;
+
+ m = (struct bfi_ablk_h2i_pf_req_s *)ablk->mb.msg;
+ bfi_h2i_set(m->mh, BFI_MC_ABLK, BFI_ABLK_H2I_PF_UPDATE,
+ bfa_ioc_portid(ablk->ioc));
+ m->pcifn = (u8)pcifn;
+ m->bw_min = cpu_to_be16(bw_min);
+ m->bw_max = cpu_to_be16(bw_max);
+ bfa_ioc_mbox_queue(ablk->ioc, &ablk->mb);
+
+ return BFA_STATUS_OK;
+}
+
+bfa_status_t
+bfa_ablk_optrom_en(struct bfa_ablk_s *ablk, bfa_ablk_cbfn_t cbfn, void *cbarg)
+{
+ struct bfi_ablk_h2i_optrom_s *m;
+
+ if (!bfa_ioc_is_operational(ablk->ioc)) {
+ bfa_trc(ablk->ioc, BFA_STATUS_IOC_FAILURE);
+ return BFA_STATUS_IOC_FAILURE;
+ }
+
+ if (ablk->busy) {
+ bfa_trc(ablk->ioc, BFA_STATUS_DEVBUSY);
+ return BFA_STATUS_DEVBUSY;
+ }
+
+ ablk->cbfn = cbfn;
+ ablk->cbarg = cbarg;
+ ablk->busy = BFA_TRUE;
+
+ m = (struct bfi_ablk_h2i_optrom_s *)ablk->mb.msg;
+ bfi_h2i_set(m->mh, BFI_MC_ABLK, BFI_ABLK_H2I_OPTROM_ENABLE,
+ bfa_ioc_portid(ablk->ioc));
+ bfa_ioc_mbox_queue(ablk->ioc, &ablk->mb);
+
+ return BFA_STATUS_OK;
+}
+
+bfa_status_t
+bfa_ablk_optrom_dis(struct bfa_ablk_s *ablk, bfa_ablk_cbfn_t cbfn, void *cbarg)
+{
+ struct bfi_ablk_h2i_optrom_s *m;
+
+ if (!bfa_ioc_is_operational(ablk->ioc)) {
+ bfa_trc(ablk->ioc, BFA_STATUS_IOC_FAILURE);
+ return BFA_STATUS_IOC_FAILURE;
+ }
+
+ if (ablk->busy) {
+ bfa_trc(ablk->ioc, BFA_STATUS_DEVBUSY);
+ return BFA_STATUS_DEVBUSY;
+ }
+
+ ablk->cbfn = cbfn;
+ ablk->cbarg = cbarg;
+ ablk->busy = BFA_TRUE;
+
+ m = (struct bfi_ablk_h2i_optrom_s *)ablk->mb.msg;
+ bfi_h2i_set(m->mh, BFI_MC_ABLK, BFI_ABLK_H2I_OPTROM_DISABLE,
+ bfa_ioc_portid(ablk->ioc));
+ bfa_ioc_mbox_queue(ablk->ioc, &ablk->mb);
+
+ return BFA_STATUS_OK;
+}
+
+/*
+ * SFP module specific
+ */
+
+/* forward declarations */
+static void bfa_sfp_getdata_send(struct bfa_sfp_s *sfp);
+static void bfa_sfp_media_get(struct bfa_sfp_s *sfp);
+static bfa_status_t bfa_sfp_speed_valid(struct bfa_sfp_s *sfp,
+ enum bfa_port_speed portspeed);
+
+static void
+bfa_cb_sfp_show(struct bfa_sfp_s *sfp)
+{
+ bfa_trc(sfp, sfp->lock);
+ if (sfp->cbfn)
+ sfp->cbfn(sfp->cbarg, sfp->status);
+ sfp->lock = 0;
+ sfp->cbfn = NULL;
+}
+
+static void
+bfa_cb_sfp_state_query(struct bfa_sfp_s *sfp)
+{
+ bfa_trc(sfp, sfp->portspeed);
+ if (sfp->media) {
+ bfa_sfp_media_get(sfp);
+ if (sfp->state_query_cbfn)
+ sfp->state_query_cbfn(sfp->state_query_cbarg,
+ sfp->status);
+ sfp->media = NULL;
+ }
+
+ if (sfp->portspeed) {
+ sfp->status = bfa_sfp_speed_valid(sfp, sfp->portspeed);
+ if (sfp->state_query_cbfn)
+ sfp->state_query_cbfn(sfp->state_query_cbarg,
+ sfp->status);
+ sfp->portspeed = BFA_PORT_SPEED_UNKNOWN;
+ }
+
+ sfp->state_query_lock = 0;
+ sfp->state_query_cbfn = NULL;
+}
+
+/*
+ * IOC event handler.
+ */
+static void
+bfa_sfp_notify(void *sfp_arg, enum bfa_ioc_event_e event)
+{
+ struct bfa_sfp_s *sfp = sfp_arg;
+
+ bfa_trc(sfp, event);
+ bfa_trc(sfp, sfp->lock);
+ bfa_trc(sfp, sfp->state_query_lock);
+
+ switch (event) {
+ case BFA_IOC_E_DISABLED:
+ case BFA_IOC_E_FAILED:
+ if (sfp->lock) {
+ sfp->status = BFA_STATUS_IOC_FAILURE;
+ bfa_cb_sfp_show(sfp);
+ }
+
+ if (sfp->state_query_lock) {
+ sfp->status = BFA_STATUS_IOC_FAILURE;
+ bfa_cb_sfp_state_query(sfp);
+ }
+ break;
+
+ default:
+ break;
+ }
+}
+
+/*
+ * SFP's State Change Notification post to AEN
+ */
+static void
+bfa_sfp_scn_aen_post(struct bfa_sfp_s *sfp, struct bfi_sfp_scn_s *rsp)
+{
+ struct bfad_s *bfad = (struct bfad_s *)sfp->ioc->bfa->bfad;
+ struct bfa_aen_entry_s *aen_entry;
+ enum bfa_port_aen_event aen_evt = 0;
+
+ bfa_trc(sfp, (((u64)rsp->pomlvl) << 16) | (((u64)rsp->sfpid) << 8) |
+ ((u64)rsp->event));
+
+ bfad_get_aen_entry(bfad, aen_entry);
+ if (!aen_entry)
+ return;
+
+ aen_entry->aen_data.port.ioc_type = bfa_ioc_get_type(sfp->ioc);
+ aen_entry->aen_data.port.pwwn = sfp->ioc->attr->pwwn;
+ aen_entry->aen_data.port.mac = bfa_ioc_get_mac(sfp->ioc);
+
+ switch (rsp->event) {
+ case BFA_SFP_SCN_INSERTED:
+ aen_evt = BFA_PORT_AEN_SFP_INSERT;
+ break;
+ case BFA_SFP_SCN_REMOVED:
+ aen_evt = BFA_PORT_AEN_SFP_REMOVE;
+ break;
+ case BFA_SFP_SCN_FAILED:
+ aen_evt = BFA_PORT_AEN_SFP_ACCESS_ERROR;
+ break;
+ case BFA_SFP_SCN_UNSUPPORT:
+ aen_evt = BFA_PORT_AEN_SFP_UNSUPPORT;
+ break;
+ case BFA_SFP_SCN_POM:
+ aen_evt = BFA_PORT_AEN_SFP_POM;
+ aen_entry->aen_data.port.level = rsp->pomlvl;
+ break;
+ default:
+ bfa_trc(sfp, rsp->event);
+ WARN_ON(1);
+ }
+
+ /* Send the AEN notification */
+ bfad_im_post_vendor_event(aen_entry, bfad, ++sfp->ioc->ioc_aen_seq,
+ BFA_AEN_CAT_PORT, aen_evt);
+}
+
+/*
+ * SFP get data send
+ */
+static void
+bfa_sfp_getdata_send(struct bfa_sfp_s *sfp)
+{
+ struct bfi_sfp_req_s *req = (struct bfi_sfp_req_s *)sfp->mbcmd.msg;
+
+ bfa_trc(sfp, req->memtype);
+
+ /* build host command */
+ bfi_h2i_set(req->mh, BFI_MC_SFP, BFI_SFP_H2I_SHOW,
+ bfa_ioc_portid(sfp->ioc));
+
+ /* send mbox cmd */
+ bfa_ioc_mbox_queue(sfp->ioc, &sfp->mbcmd);
+}
+
+/*
+ * SFP is valid, read sfp data
+ */
+static void
+bfa_sfp_getdata(struct bfa_sfp_s *sfp, enum bfi_sfp_mem_e memtype)
+{
+ struct bfi_sfp_req_s *req = (struct bfi_sfp_req_s *)sfp->mbcmd.msg;
+
+ WARN_ON(sfp->lock != 0);
+ bfa_trc(sfp, sfp->state);
+
+ sfp->lock = 1;
+ sfp->memtype = memtype;
+ req->memtype = memtype;
+
+ /* Setup SG list */
+ bfa_alen_set(&req->alen, sizeof(struct sfp_mem_s), sfp->dbuf_pa);
+
+ bfa_sfp_getdata_send(sfp);
+}
+
+/*
+ * SFP scn handler
+ */
+static void
+bfa_sfp_scn(struct bfa_sfp_s *sfp, struct bfi_mbmsg_s *msg)
+{
+ struct bfi_sfp_scn_s *rsp = (struct bfi_sfp_scn_s *) msg;
+
+ switch (rsp->event) {
+ case BFA_SFP_SCN_INSERTED:
+ sfp->state = BFA_SFP_STATE_INSERTED;
+ sfp->data_valid = 0;
+ bfa_sfp_scn_aen_post(sfp, rsp);
+ break;
+ case BFA_SFP_SCN_REMOVED:
+ sfp->state = BFA_SFP_STATE_REMOVED;
+ sfp->data_valid = 0;
+ bfa_sfp_scn_aen_post(sfp, rsp);
+ break;
+ case BFA_SFP_SCN_FAILED:
+ sfp->state = BFA_SFP_STATE_FAILED;
+ sfp->data_valid = 0;
+ bfa_sfp_scn_aen_post(sfp, rsp);
+ break;
+ case BFA_SFP_SCN_UNSUPPORT:
+ sfp->state = BFA_SFP_STATE_UNSUPPORT;
+ bfa_sfp_scn_aen_post(sfp, rsp);
+ if (!sfp->lock)
+ bfa_sfp_getdata(sfp, BFI_SFP_MEM_ALL);
+ break;
+ case BFA_SFP_SCN_POM:
+ bfa_sfp_scn_aen_post(sfp, rsp);
+ break;
+ case BFA_SFP_SCN_VALID:
+ sfp->state = BFA_SFP_STATE_VALID;
+ if (!sfp->lock)
+ bfa_sfp_getdata(sfp, BFI_SFP_MEM_ALL);
+ break;
+ default:
+ bfa_trc(sfp, rsp->event);
+ WARN_ON(1);
+ }
+}
+
+/*
+ * SFP show complete
+ */
+static void
+bfa_sfp_show_comp(struct bfa_sfp_s *sfp, struct bfi_mbmsg_s *msg)
+{
+ struct bfi_sfp_rsp_s *rsp = (struct bfi_sfp_rsp_s *) msg;
+
+ if (!sfp->lock) {
+ /*
+ * receiving response after ioc failure
+ */
+ bfa_trc(sfp, sfp->lock);
+ return;
+ }
+
+ bfa_trc(sfp, rsp->status);
+ if (rsp->status == BFA_STATUS_OK) {
+ sfp->data_valid = 1;
+ if (sfp->state == BFA_SFP_STATE_VALID)
+ sfp->status = BFA_STATUS_OK;
+ else if (sfp->state == BFA_SFP_STATE_UNSUPPORT)
+ sfp->status = BFA_STATUS_SFP_UNSUPP;
+ else
+ bfa_trc(sfp, sfp->state);
+ } else {
+ sfp->data_valid = 0;
+ sfp->status = rsp->status;
+ /* sfpshow shouldn't change sfp state */
+ }
+
+ bfa_trc(sfp, sfp->memtype);
+ if (sfp->memtype == BFI_SFP_MEM_DIAGEXT) {
+ bfa_trc(sfp, sfp->data_valid);
+ if (sfp->data_valid) {
+ u32 size = sizeof(struct sfp_mem_s);
+ u8 *des = (u8 *) &(sfp->sfpmem);
+ memcpy(des, sfp->dbuf_kva, size);
+ }
+ /*
+ * Queue completion callback.
+ */
+ bfa_cb_sfp_show(sfp);
+ } else
+ sfp->lock = 0;
+
+ bfa_trc(sfp, sfp->state_query_lock);
+ if (sfp->state_query_lock) {
+ sfp->state = rsp->state;
+ /* Complete callback */
+ bfa_cb_sfp_state_query(sfp);
+ }
+}
+
+/*
+ * SFP query fw sfp state
+ */
+static void
+bfa_sfp_state_query(struct bfa_sfp_s *sfp)
+{
+ struct bfi_sfp_req_s *req = (struct bfi_sfp_req_s *)sfp->mbcmd.msg;
+
+ /* Should not be doing query if not in _INIT state */
+ WARN_ON(sfp->state != BFA_SFP_STATE_INIT);
+ WARN_ON(sfp->state_query_lock != 0);
+ bfa_trc(sfp, sfp->state);
+
+ sfp->state_query_lock = 1;
+ req->memtype = 0;
+
+ if (!sfp->lock)
+ bfa_sfp_getdata(sfp, BFI_SFP_MEM_ALL);
+}
+
+static void
+bfa_sfp_media_get(struct bfa_sfp_s *sfp)
+{
+ enum bfa_defs_sfp_media_e *media = sfp->media;
+
+ *media = BFA_SFP_MEDIA_UNKNOWN;
+
+ if (sfp->state == BFA_SFP_STATE_UNSUPPORT)
+ *media = BFA_SFP_MEDIA_UNSUPPORT;
+ else if (sfp->state == BFA_SFP_STATE_VALID) {
+ union sfp_xcvr_e10g_code_u e10g;
+ struct sfp_mem_s *sfpmem = (struct sfp_mem_s *)sfp->dbuf_kva;
+ u16 xmtr_tech = (sfpmem->srlid_base.xcvr[4] & 0x3) << 7 |
+ (sfpmem->srlid_base.xcvr[5] >> 1);
+
+ e10g.b = sfpmem->srlid_base.xcvr[0];
+ bfa_trc(sfp, e10g.b);
+ bfa_trc(sfp, xmtr_tech);
+ /* check fc transmitter tech */
+ if ((xmtr_tech & SFP_XMTR_TECH_CU) ||
+ (xmtr_tech & SFP_XMTR_TECH_CP) ||
+ (xmtr_tech & SFP_XMTR_TECH_CA))
+ *media = BFA_SFP_MEDIA_CU;
+ else if ((xmtr_tech & SFP_XMTR_TECH_EL_INTRA) ||
+ (xmtr_tech & SFP_XMTR_TECH_EL_INTER))
+ *media = BFA_SFP_MEDIA_EL;
+ else if ((xmtr_tech & SFP_XMTR_TECH_LL) ||
+ (xmtr_tech & SFP_XMTR_TECH_LC))
+ *media = BFA_SFP_MEDIA_LW;
+ else if ((xmtr_tech & SFP_XMTR_TECH_SL) ||
+ (xmtr_tech & SFP_XMTR_TECH_SN) ||
+ (xmtr_tech & SFP_XMTR_TECH_SA))
+ *media = BFA_SFP_MEDIA_SW;
+ /* Check 10G Ethernet Compilance code */
+ else if (e10g.r.e10g_sr)
+ *media = BFA_SFP_MEDIA_SW;
+ else if (e10g.r.e10g_lrm && e10g.r.e10g_lr)
+ *media = BFA_SFP_MEDIA_LW;
+ else if (e10g.r.e10g_unall)
+ *media = BFA_SFP_MEDIA_UNKNOWN;
+ else
+ bfa_trc(sfp, 0);
+ } else
+ bfa_trc(sfp, sfp->state);
+}
+
+static bfa_status_t
+bfa_sfp_speed_valid(struct bfa_sfp_s *sfp, enum bfa_port_speed portspeed)
+{
+ struct sfp_mem_s *sfpmem = (struct sfp_mem_s *)sfp->dbuf_kva;
+ struct sfp_xcvr_s *xcvr = (struct sfp_xcvr_s *) sfpmem->srlid_base.xcvr;
+ union sfp_xcvr_fc3_code_u fc3 = xcvr->fc3;
+ union sfp_xcvr_e10g_code_u e10g = xcvr->e10g;
+
+ if (portspeed == BFA_PORT_SPEED_10GBPS) {
+ if (e10g.r.e10g_sr || e10g.r.e10g_lr)
+ return BFA_STATUS_OK;
+ else {
+ bfa_trc(sfp, e10g.b);
+ return BFA_STATUS_UNSUPP_SPEED;
+ }
+ }
+ if (((portspeed & BFA_PORT_SPEED_16GBPS) && fc3.r.mb1600) ||
+ ((portspeed & BFA_PORT_SPEED_8GBPS) && fc3.r.mb800) ||
+ ((portspeed & BFA_PORT_SPEED_4GBPS) && fc3.r.mb400) ||
+ ((portspeed & BFA_PORT_SPEED_2GBPS) && fc3.r.mb200) ||
+ ((portspeed & BFA_PORT_SPEED_1GBPS) && fc3.r.mb100))
+ return BFA_STATUS_OK;
+ else {
+ bfa_trc(sfp, portspeed);
+ bfa_trc(sfp, fc3.b);
+ bfa_trc(sfp, e10g.b);
+ return BFA_STATUS_UNSUPP_SPEED;
+ }
+}
+
+/*
+ * SFP hmbox handler
+ */
+void
+bfa_sfp_intr(void *sfparg, struct bfi_mbmsg_s *msg)
+{
+ struct bfa_sfp_s *sfp = sfparg;
+
+ switch (msg->mh.msg_id) {
+ case BFI_SFP_I2H_SHOW:
+ bfa_sfp_show_comp(sfp, msg);
+ break;
+
+ case BFI_SFP_I2H_SCN:
+ bfa_sfp_scn(sfp, msg);
+ break;
+
+ default:
+ bfa_trc(sfp, msg->mh.msg_id);
+ WARN_ON(1);
+ }
+}
+
+/*
+ * Return DMA memory needed by sfp module.
+ */
+u32
+bfa_sfp_meminfo(void)
+{
+ return BFA_ROUNDUP(sizeof(struct sfp_mem_s), BFA_DMA_ALIGN_SZ);
+}
+
+/*
+ * Attach virtual and physical memory for SFP.
+ */
+void
+bfa_sfp_attach(struct bfa_sfp_s *sfp, struct bfa_ioc_s *ioc, void *dev,
+ struct bfa_trc_mod_s *trcmod)
+{
+ sfp->dev = dev;
+ sfp->ioc = ioc;
+ sfp->trcmod = trcmod;
+
+ sfp->cbfn = NULL;
+ sfp->cbarg = NULL;
+ sfp->sfpmem = NULL;
+ sfp->lock = 0;
+ sfp->data_valid = 0;
+ sfp->state = BFA_SFP_STATE_INIT;
+ sfp->state_query_lock = 0;
+ sfp->state_query_cbfn = NULL;
+ sfp->state_query_cbarg = NULL;
+ sfp->media = NULL;
+ sfp->portspeed = BFA_PORT_SPEED_UNKNOWN;
+ sfp->is_elb = BFA_FALSE;
+
+ bfa_ioc_mbox_regisr(sfp->ioc, BFI_MC_SFP, bfa_sfp_intr, sfp);
+ bfa_q_qe_init(&sfp->ioc_notify);
+ bfa_ioc_notify_init(&sfp->ioc_notify, bfa_sfp_notify, sfp);
+ list_add_tail(&sfp->ioc_notify.qe, &sfp->ioc->notify_q);
+}
+
+/*
+ * Claim Memory for SFP
+ */
+void
+bfa_sfp_memclaim(struct bfa_sfp_s *sfp, u8 *dm_kva, u64 dm_pa)
+{
+ sfp->dbuf_kva = dm_kva;
+ sfp->dbuf_pa = dm_pa;
+ memset(sfp->dbuf_kva, 0, sizeof(struct sfp_mem_s));
+
+ dm_kva += BFA_ROUNDUP(sizeof(struct sfp_mem_s), BFA_DMA_ALIGN_SZ);
+ dm_pa += BFA_ROUNDUP(sizeof(struct sfp_mem_s), BFA_DMA_ALIGN_SZ);
+}
+
+/*
+ * Show SFP eeprom content
+ *
+ * @param[in] sfp - bfa sfp module
+ *
+ * @param[out] sfpmem - sfp eeprom data
+ *
+ */
+bfa_status_t
+bfa_sfp_show(struct bfa_sfp_s *sfp, struct sfp_mem_s *sfpmem,
+ bfa_cb_sfp_t cbfn, void *cbarg)
+{
+
+ if (!bfa_ioc_is_operational(sfp->ioc)) {
+ bfa_trc(sfp, 0);
+ return BFA_STATUS_IOC_NON_OP;
+ }
+
+ if (sfp->lock) {
+ bfa_trc(sfp, 0);
+ return BFA_STATUS_DEVBUSY;
+ }
+
+ sfp->cbfn = cbfn;
+ sfp->cbarg = cbarg;
+ sfp->sfpmem = sfpmem;
+
+ bfa_sfp_getdata(sfp, BFI_SFP_MEM_DIAGEXT);
+ return BFA_STATUS_OK;
+}
+
+/*
+ * Return SFP Media type
+ *
+ * @param[in] sfp - bfa sfp module
+ *
+ * @param[out] media - port speed from user
+ *
+ */
+bfa_status_t
+bfa_sfp_media(struct bfa_sfp_s *sfp, enum bfa_defs_sfp_media_e *media,
+ bfa_cb_sfp_t cbfn, void *cbarg)
+{
+ if (!bfa_ioc_is_operational(sfp->ioc)) {
+ bfa_trc(sfp, 0);
+ return BFA_STATUS_IOC_NON_OP;
+ }
+
+ sfp->media = media;
+ if (sfp->state == BFA_SFP_STATE_INIT) {
+ if (sfp->state_query_lock) {
+ bfa_trc(sfp, 0);
+ return BFA_STATUS_DEVBUSY;
+ } else {
+ sfp->state_query_cbfn = cbfn;
+ sfp->state_query_cbarg = cbarg;
+ bfa_sfp_state_query(sfp);
+ return BFA_STATUS_SFP_NOT_READY;
+ }
+ }
+
+ bfa_sfp_media_get(sfp);
+ return BFA_STATUS_OK;
+}
+
+/*
+ * Check if user set port speed is allowed by the SFP
+ *
+ * @param[in] sfp - bfa sfp module
+ * @param[in] portspeed - port speed from user
+ *
+ */
+bfa_status_t
+bfa_sfp_speed(struct bfa_sfp_s *sfp, enum bfa_port_speed portspeed,
+ bfa_cb_sfp_t cbfn, void *cbarg)
+{
+ WARN_ON(portspeed == BFA_PORT_SPEED_UNKNOWN);
+
+ if (!bfa_ioc_is_operational(sfp->ioc))
+ return BFA_STATUS_IOC_NON_OP;
+
+ /* For Mezz card, all speed is allowed */
+ if (bfa_mfg_is_mezz(sfp->ioc->attr->card_type))
+ return BFA_STATUS_OK;
+
+ /* Check SFP state */
+ sfp->portspeed = portspeed;
+ if (sfp->state == BFA_SFP_STATE_INIT) {
+ if (sfp->state_query_lock) {
+ bfa_trc(sfp, 0);
+ return BFA_STATUS_DEVBUSY;
+ } else {
+ sfp->state_query_cbfn = cbfn;
+ sfp->state_query_cbarg = cbarg;
+ bfa_sfp_state_query(sfp);
+ return BFA_STATUS_SFP_NOT_READY;
+ }
+ }
+
+ if (sfp->state == BFA_SFP_STATE_REMOVED ||
+ sfp->state == BFA_SFP_STATE_FAILED) {
+ bfa_trc(sfp, sfp->state);
+ return BFA_STATUS_NO_SFP_DEV;
+ }
+
+ if (sfp->state == BFA_SFP_STATE_INSERTED) {
+ bfa_trc(sfp, sfp->state);
+ return BFA_STATUS_DEVBUSY; /* sfp is reading data */
+ }
+
+ /* For eloopback, all speed is allowed */
+ if (sfp->is_elb)
+ return BFA_STATUS_OK;
+
+ return bfa_sfp_speed_valid(sfp, portspeed);
+}
+
+/*
+ * Flash module specific
+ */
+
+/*
+ * FLASH DMA buffer should be big enough to hold both MFG block and
+ * asic block(64k) at the same time and also should be 2k aligned to
+ * avoid write segement to cross sector boundary.
+ */
+#define BFA_FLASH_SEG_SZ 2048
+#define BFA_FLASH_DMA_BUF_SZ \
+ BFA_ROUNDUP(0x010000 + sizeof(struct bfa_mfg_block_s), BFA_FLASH_SEG_SZ)
+
+static void
+bfa_flash_aen_audit_post(struct bfa_ioc_s *ioc, enum bfa_audit_aen_event event,
+ int inst, int type)
+{
+ struct bfad_s *bfad = (struct bfad_s *)ioc->bfa->bfad;
+ struct bfa_aen_entry_s *aen_entry;
+
+ bfad_get_aen_entry(bfad, aen_entry);
+ if (!aen_entry)
+ return;
+
+ aen_entry->aen_data.audit.pwwn = ioc->attr->pwwn;
+ aen_entry->aen_data.audit.partition_inst = inst;
+ aen_entry->aen_data.audit.partition_type = type;
+
+ /* Send the AEN notification */
+ bfad_im_post_vendor_event(aen_entry, bfad, ++ioc->ioc_aen_seq,
+ BFA_AEN_CAT_AUDIT, event);
+}
+
+static void
+bfa_flash_cb(struct bfa_flash_s *flash)
+{
+ flash->op_busy = 0;
+ if (flash->cbfn)
+ flash->cbfn(flash->cbarg, flash->status);
+}
+
+static void
+bfa_flash_notify(void *cbarg, enum bfa_ioc_event_e event)
+{
+ struct bfa_flash_s *flash = cbarg;
+
+ bfa_trc(flash, event);
+ switch (event) {
+ case BFA_IOC_E_DISABLED:
+ case BFA_IOC_E_FAILED:
+ if (flash->op_busy) {
+ flash->status = BFA_STATUS_IOC_FAILURE;
+ flash->cbfn(flash->cbarg, flash->status);
+ flash->op_busy = 0;
+ }
+ break;
+
+ default:
+ break;
+ }
+}
+
+/*
+ * Send flash attribute query request.
+ *
+ * @param[in] cbarg - callback argument
+ */
+static void
+bfa_flash_query_send(void *cbarg)
+{
+ struct bfa_flash_s *flash = cbarg;
+ struct bfi_flash_query_req_s *msg =
+ (struct bfi_flash_query_req_s *) flash->mb.msg;
+
+ bfi_h2i_set(msg->mh, BFI_MC_FLASH, BFI_FLASH_H2I_QUERY_REQ,
+ bfa_ioc_portid(flash->ioc));
+ bfa_alen_set(&msg->alen, sizeof(struct bfa_flash_attr_s),
+ flash->dbuf_pa);
+ bfa_ioc_mbox_queue(flash->ioc, &flash->mb);
+}
+
+/*
+ * Send flash write request.
+ *
+ * @param[in] cbarg - callback argument
+ */
+static void
+bfa_flash_write_send(struct bfa_flash_s *flash)
+{
+ struct bfi_flash_write_req_s *msg =
+ (struct bfi_flash_write_req_s *) flash->mb.msg;
+ u32 len;
+
+ msg->type = be32_to_cpu(flash->type);
+ msg->instance = flash->instance;
+ msg->offset = be32_to_cpu(flash->addr_off + flash->offset);
+ len = (flash->residue < BFA_FLASH_DMA_BUF_SZ) ?
+ flash->residue : BFA_FLASH_DMA_BUF_SZ;
+ msg->length = be32_to_cpu(len);
+
+ /* indicate if it's the last msg of the whole write operation */
+ msg->last = (len == flash->residue) ? 1 : 0;
+
+ bfi_h2i_set(msg->mh, BFI_MC_FLASH, BFI_FLASH_H2I_WRITE_REQ,
+ bfa_ioc_portid(flash->ioc));
+ bfa_alen_set(&msg->alen, len, flash->dbuf_pa);
+ memcpy(flash->dbuf_kva, flash->ubuf + flash->offset, len);
+ bfa_ioc_mbox_queue(flash->ioc, &flash->mb);
+
+ flash->residue -= len;
+ flash->offset += len;
+}
+
+/*
+ * Send flash read request.
+ *
+ * @param[in] cbarg - callback argument
+ */
+static void
+bfa_flash_read_send(void *cbarg)
+{
+ struct bfa_flash_s *flash = cbarg;
+ struct bfi_flash_read_req_s *msg =
+ (struct bfi_flash_read_req_s *) flash->mb.msg;
+ u32 len;
+
+ msg->type = be32_to_cpu(flash->type);
+ msg->instance = flash->instance;
+ msg->offset = be32_to_cpu(flash->addr_off + flash->offset);
+ len = (flash->residue < BFA_FLASH_DMA_BUF_SZ) ?
+ flash->residue : BFA_FLASH_DMA_BUF_SZ;
+ msg->length = be32_to_cpu(len);
+ bfi_h2i_set(msg->mh, BFI_MC_FLASH, BFI_FLASH_H2I_READ_REQ,
+ bfa_ioc_portid(flash->ioc));
+ bfa_alen_set(&msg->alen, len, flash->dbuf_pa);
+ bfa_ioc_mbox_queue(flash->ioc, &flash->mb);
+}
+
+/*
+ * Send flash erase request.
+ *
+ * @param[in] cbarg - callback argument
+ */
+static void
+bfa_flash_erase_send(void *cbarg)
+{
+ struct bfa_flash_s *flash = cbarg;
+ struct bfi_flash_erase_req_s *msg =
+ (struct bfi_flash_erase_req_s *) flash->mb.msg;
+
+ msg->type = be32_to_cpu(flash->type);
+ msg->instance = flash->instance;
+ bfi_h2i_set(msg->mh, BFI_MC_FLASH, BFI_FLASH_H2I_ERASE_REQ,
+ bfa_ioc_portid(flash->ioc));
+ bfa_ioc_mbox_queue(flash->ioc, &flash->mb);
+}
+
+/*
+ * Process flash response messages upon receiving interrupts.
+ *
+ * @param[in] flasharg - flash structure
+ * @param[in] msg - message structure
+ */
+static void
+bfa_flash_intr(void *flasharg, struct bfi_mbmsg_s *msg)
+{
+ struct bfa_flash_s *flash = flasharg;
+ u32 status;
+
+ union {
+ struct bfi_flash_query_rsp_s *query;
+ struct bfi_flash_erase_rsp_s *erase;
+ struct bfi_flash_write_rsp_s *write;
+ struct bfi_flash_read_rsp_s *read;
+ struct bfi_flash_event_s *event;
+ struct bfi_mbmsg_s *msg;
+ } m;
+
+ m.msg = msg;
+ bfa_trc(flash, msg->mh.msg_id);
+
+ if (!flash->op_busy && msg->mh.msg_id != BFI_FLASH_I2H_EVENT) {
+ /* receiving response after ioc failure */
+ bfa_trc(flash, 0x9999);
+ return;
+ }
+
+ switch (msg->mh.msg_id) {
+ case BFI_FLASH_I2H_QUERY_RSP:
+ status = be32_to_cpu(m.query->status);
+ bfa_trc(flash, status);
+ if (status == BFA_STATUS_OK) {
+ u32 i;
+ struct bfa_flash_attr_s *attr, *f;
+
+ attr = (struct bfa_flash_attr_s *) flash->ubuf;
+ f = (struct bfa_flash_attr_s *) flash->dbuf_kva;
+ attr->status = be32_to_cpu(f->status);
+ attr->npart = be32_to_cpu(f->npart);
+ bfa_trc(flash, attr->status);
+ bfa_trc(flash, attr->npart);
+ for (i = 0; i < attr->npart; i++) {
+ attr->part[i].part_type =
+ be32_to_cpu(f->part[i].part_type);
+ attr->part[i].part_instance =
+ be32_to_cpu(f->part[i].part_instance);
+ attr->part[i].part_off =
+ be32_to_cpu(f->part[i].part_off);
+ attr->part[i].part_size =
+ be32_to_cpu(f->part[i].part_size);
+ attr->part[i].part_len =
+ be32_to_cpu(f->part[i].part_len);
+ attr->part[i].part_status =
+ be32_to_cpu(f->part[i].part_status);
+ }
+ }
+ flash->status = status;
+ bfa_flash_cb(flash);
+ break;
+ case BFI_FLASH_I2H_ERASE_RSP:
+ status = be32_to_cpu(m.erase->status);
+ bfa_trc(flash, status);
+ flash->status = status;
+ bfa_flash_cb(flash);
+ break;
+ case BFI_FLASH_I2H_WRITE_RSP:
+ status = be32_to_cpu(m.write->status);
+ bfa_trc(flash, status);
+ if (status != BFA_STATUS_OK || flash->residue == 0) {
+ flash->status = status;
+ bfa_flash_cb(flash);
+ } else {
+ bfa_trc(flash, flash->offset);
+ bfa_flash_write_send(flash);
+ }
+ break;
+ case BFI_FLASH_I2H_READ_RSP:
+ status = be32_to_cpu(m.read->status);
+ bfa_trc(flash, status);
+ if (status != BFA_STATUS_OK) {
+ flash->status = status;
+ bfa_flash_cb(flash);
+ } else {
+ u32 len = be32_to_cpu(m.read->length);
+ bfa_trc(flash, flash->offset);
+ bfa_trc(flash, len);
+ memcpy(flash->ubuf + flash->offset,
+ flash->dbuf_kva, len);
+ flash->residue -= len;
+ flash->offset += len;
+ if (flash->residue == 0) {
+ flash->status = status;
+ bfa_flash_cb(flash);
+ } else
+ bfa_flash_read_send(flash);
+ }
+ break;
+ case BFI_FLASH_I2H_BOOT_VER_RSP:
+ break;
+ case BFI_FLASH_I2H_EVENT:
+ status = be32_to_cpu(m.event->status);
+ bfa_trc(flash, status);
+ if (status == BFA_STATUS_BAD_FWCFG)
+ bfa_ioc_aen_post(flash->ioc, BFA_IOC_AEN_FWCFG_ERROR);
+ else if (status == BFA_STATUS_INVALID_VENDOR) {
+ u32 param;
+ param = be32_to_cpu(m.event->param);
+ bfa_trc(flash, param);
+ bfa_ioc_aen_post(flash->ioc,
+ BFA_IOC_AEN_INVALID_VENDOR);
+ }
+ break;
+
+ default:
+ WARN_ON(1);
+ }
+}
+
+/*
+ * Flash memory info API.
+ *
+ * @param[in] mincfg - minimal cfg variable
+ */
+u32
+bfa_flash_meminfo(bfa_boolean_t mincfg)
+{
+ /* min driver doesn't need flash */
+ if (mincfg)
+ return 0;
+ return BFA_ROUNDUP(BFA_FLASH_DMA_BUF_SZ, BFA_DMA_ALIGN_SZ);
+}
+
+/*
+ * Flash attach API.
+ *
+ * @param[in] flash - flash structure
+ * @param[in] ioc - ioc structure
+ * @param[in] dev - device structure
+ * @param[in] trcmod - trace module
+ * @param[in] logmod - log module
+ */
+void
+bfa_flash_attach(struct bfa_flash_s *flash, struct bfa_ioc_s *ioc, void *dev,
+ struct bfa_trc_mod_s *trcmod, bfa_boolean_t mincfg)
+{
+ flash->ioc = ioc;
+ flash->trcmod = trcmod;
+ flash->cbfn = NULL;
+ flash->cbarg = NULL;
+ flash->op_busy = 0;
+
+ bfa_ioc_mbox_regisr(flash->ioc, BFI_MC_FLASH, bfa_flash_intr, flash);
+ bfa_q_qe_init(&flash->ioc_notify);
+ bfa_ioc_notify_init(&flash->ioc_notify, bfa_flash_notify, flash);
+ list_add_tail(&flash->ioc_notify.qe, &flash->ioc->notify_q);
+
+ /* min driver doesn't need flash */
+ if (mincfg) {
+ flash->dbuf_kva = NULL;
+ flash->dbuf_pa = 0;
+ }
+}
+
+/*
+ * Claim memory for flash
+ *
+ * @param[in] flash - flash structure
+ * @param[in] dm_kva - pointer to virtual memory address
+ * @param[in] dm_pa - physical memory address
+ * @param[in] mincfg - minimal cfg variable
+ */
+void
+bfa_flash_memclaim(struct bfa_flash_s *flash, u8 *dm_kva, u64 dm_pa,
+ bfa_boolean_t mincfg)
+{
+ if (mincfg)
+ return;
+
+ flash->dbuf_kva = dm_kva;
+ flash->dbuf_pa = dm_pa;
+ memset(flash->dbuf_kva, 0, BFA_FLASH_DMA_BUF_SZ);
+ dm_kva += BFA_ROUNDUP(BFA_FLASH_DMA_BUF_SZ, BFA_DMA_ALIGN_SZ);
+ dm_pa += BFA_ROUNDUP(BFA_FLASH_DMA_BUF_SZ, BFA_DMA_ALIGN_SZ);
+}
+
+/*
+ * Get flash attribute.
+ *
+ * @param[in] flash - flash structure
+ * @param[in] attr - flash attribute structure
+ * @param[in] cbfn - callback function
+ * @param[in] cbarg - callback argument
+ *
+ * Return status.
+ */
+bfa_status_t
+bfa_flash_get_attr(struct bfa_flash_s *flash, struct bfa_flash_attr_s *attr,
+ bfa_cb_flash_t cbfn, void *cbarg)
+{
+ bfa_trc(flash, BFI_FLASH_H2I_QUERY_REQ);
+
+ if (!bfa_ioc_is_operational(flash->ioc))
+ return BFA_STATUS_IOC_NON_OP;
+
+ if (flash->op_busy) {
+ bfa_trc(flash, flash->op_busy);
+ return BFA_STATUS_DEVBUSY;
+ }
+
+ flash->op_busy = 1;
+ flash->cbfn = cbfn;
+ flash->cbarg = cbarg;
+ flash->ubuf = (u8 *) attr;
+ bfa_flash_query_send(flash);
+
+ return BFA_STATUS_OK;
+}
+
+/*
+ * Erase flash partition.
+ *
+ * @param[in] flash - flash structure
+ * @param[in] type - flash partition type
+ * @param[in] instance - flash partition instance
+ * @param[in] cbfn - callback function
+ * @param[in] cbarg - callback argument
+ *
+ * Return status.
+ */
+bfa_status_t
+bfa_flash_erase_part(struct bfa_flash_s *flash, enum bfa_flash_part_type type,
+ u8 instance, bfa_cb_flash_t cbfn, void *cbarg)
+{
+ bfa_trc(flash, BFI_FLASH_H2I_ERASE_REQ);
+ bfa_trc(flash, type);
+ bfa_trc(flash, instance);
+
+ if (!bfa_ioc_is_operational(flash->ioc))
+ return BFA_STATUS_IOC_NON_OP;
+
+ if (flash->op_busy) {
+ bfa_trc(flash, flash->op_busy);
+ return BFA_STATUS_DEVBUSY;
+ }
+
+ flash->op_busy = 1;
+ flash->cbfn = cbfn;
+ flash->cbarg = cbarg;
+ flash->type = type;
+ flash->instance = instance;
+
+ bfa_flash_erase_send(flash);
+ bfa_flash_aen_audit_post(flash->ioc, BFA_AUDIT_AEN_FLASH_ERASE,
+ instance, type);
+ return BFA_STATUS_OK;
+}
+
+/*
+ * Update flash partition.
+ *
+ * @param[in] flash - flash structure
+ * @param[in] type - flash partition type
+ * @param[in] instance - flash partition instance
+ * @param[in] buf - update data buffer
+ * @param[in] len - data buffer length
+ * @param[in] offset - offset relative to the partition starting address
+ * @param[in] cbfn - callback function
+ * @param[in] cbarg - callback argument
+ *
+ * Return status.
+ */
+bfa_status_t
+bfa_flash_update_part(struct bfa_flash_s *flash, enum bfa_flash_part_type type,
+ u8 instance, void *buf, u32 len, u32 offset,
+ bfa_cb_flash_t cbfn, void *cbarg)
+{
+ bfa_trc(flash, BFI_FLASH_H2I_WRITE_REQ);
+ bfa_trc(flash, type);
+ bfa_trc(flash, instance);
+ bfa_trc(flash, len);
+ bfa_trc(flash, offset);
+
+ if (!bfa_ioc_is_operational(flash->ioc))
+ return BFA_STATUS_IOC_NON_OP;
+
+ /*
+ * 'len' must be in word (4-byte) boundary
+ * 'offset' must be in sector (16kb) boundary
+ */
+ if (!len || (len & 0x03) || (offset & 0x00003FFF))
+ return BFA_STATUS_FLASH_BAD_LEN;
+
+ if (type == BFA_FLASH_PART_MFG)
+ return BFA_STATUS_EINVAL;
+
+ if (flash->op_busy) {
+ bfa_trc(flash, flash->op_busy);
+ return BFA_STATUS_DEVBUSY;
+ }
+
+ flash->op_busy = 1;
+ flash->cbfn = cbfn;
+ flash->cbarg = cbarg;
+ flash->type = type;
+ flash->instance = instance;
+ flash->residue = len;
+ flash->offset = 0;
+ flash->addr_off = offset;
+ flash->ubuf = buf;
+
+ bfa_flash_write_send(flash);
+ return BFA_STATUS_OK;
+}
+
+/*
+ * Read flash partition.
+ *
+ * @param[in] flash - flash structure
+ * @param[in] type - flash partition type
+ * @param[in] instance - flash partition instance
+ * @param[in] buf - read data buffer
+ * @param[in] len - data buffer length
+ * @param[in] offset - offset relative to the partition starting address
+ * @param[in] cbfn - callback function
+ * @param[in] cbarg - callback argument
+ *
+ * Return status.
+ */
+bfa_status_t
+bfa_flash_read_part(struct bfa_flash_s *flash, enum bfa_flash_part_type type,
+ u8 instance, void *buf, u32 len, u32 offset,
+ bfa_cb_flash_t cbfn, void *cbarg)
+{
+ bfa_trc(flash, BFI_FLASH_H2I_READ_REQ);
+ bfa_trc(flash, type);
+ bfa_trc(flash, instance);
+ bfa_trc(flash, len);
+ bfa_trc(flash, offset);
+
+ if (!bfa_ioc_is_operational(flash->ioc))
+ return BFA_STATUS_IOC_NON_OP;
+
+ /*
+ * 'len' must be in word (4-byte) boundary
+ * 'offset' must be in sector (16kb) boundary
+ */
+ if (!len || (len & 0x03) || (offset & 0x00003FFF))
+ return BFA_STATUS_FLASH_BAD_LEN;
+
+ if (flash->op_busy) {
+ bfa_trc(flash, flash->op_busy);
+ return BFA_STATUS_DEVBUSY;
+ }
+
+ flash->op_busy = 1;
+ flash->cbfn = cbfn;
+ flash->cbarg = cbarg;
+ flash->type = type;
+ flash->instance = instance;
+ flash->residue = len;
+ flash->offset = 0;
+ flash->addr_off = offset;
+ flash->ubuf = buf;
+ bfa_flash_read_send(flash);
+
+ return BFA_STATUS_OK;
+}
+
+/*
+ * DIAG module specific
+ */
+
+#define BFA_DIAG_MEMTEST_TOV 50000 /* memtest timeout in msec */
+#define CT2_BFA_DIAG_MEMTEST_TOV (9*30*1000) /* 4.5 min */
+
+/* IOC event handler */
+static void
+bfa_diag_notify(void *diag_arg, enum bfa_ioc_event_e event)
+{
+ struct bfa_diag_s *diag = diag_arg;
+
+ bfa_trc(diag, event);
+ bfa_trc(diag, diag->block);
+ bfa_trc(diag, diag->fwping.lock);
+ bfa_trc(diag, diag->tsensor.lock);
+
+ switch (event) {
+ case BFA_IOC_E_DISABLED:
+ case BFA_IOC_E_FAILED:
+ if (diag->fwping.lock) {
+ diag->fwping.status = BFA_STATUS_IOC_FAILURE;
+ diag->fwping.cbfn(diag->fwping.cbarg,
+ diag->fwping.status);
+ diag->fwping.lock = 0;
+ }
+
+ if (diag->tsensor.lock) {
+ diag->tsensor.status = BFA_STATUS_IOC_FAILURE;
+ diag->tsensor.cbfn(diag->tsensor.cbarg,
+ diag->tsensor.status);
+ diag->tsensor.lock = 0;
+ }
+
+ if (diag->block) {
+ if (diag->timer_active) {
+ bfa_timer_stop(&diag->timer);
+ diag->timer_active = 0;
+ }
+
+ diag->status = BFA_STATUS_IOC_FAILURE;
+ diag->cbfn(diag->cbarg, diag->status);
+ diag->block = 0;
+ }
+ break;
+
+ default:
+ break;
+ }
+}
+
+static void
+bfa_diag_memtest_done(void *cbarg)
+{
+ struct bfa_diag_s *diag = cbarg;
+ struct bfa_ioc_s *ioc = diag->ioc;
+ struct bfa_diag_memtest_result *res = diag->result;
+ u32 loff = BFI_BOOT_MEMTEST_RES_ADDR;
+ u32 pgnum, pgoff, i;
+
+ pgnum = PSS_SMEM_PGNUM(ioc->ioc_regs.smem_pg0, loff);
+ pgoff = PSS_SMEM_PGOFF(loff);
+
+ writel(pgnum, ioc->ioc_regs.host_page_num_fn);
+
+ for (i = 0; i < (sizeof(struct bfa_diag_memtest_result) /
+ sizeof(u32)); i++) {
+ /* read test result from smem */
+ *((u32 *) res + i) =
+ bfa_mem_read(ioc->ioc_regs.smem_page_start, loff);
+ loff += sizeof(u32);
+ }
+
+ /* Reset IOC fwstates to BFI_IOC_UNINIT */
+ bfa_ioc_reset_fwstate(ioc);
+
+ res->status = swab32(res->status);
+ bfa_trc(diag, res->status);
+
+ if (res->status == BFI_BOOT_MEMTEST_RES_SIG)
+ diag->status = BFA_STATUS_OK;
+ else {
+ diag->status = BFA_STATUS_MEMTEST_FAILED;
+ res->addr = swab32(res->addr);
+ res->exp = swab32(res->exp);
+ res->act = swab32(res->act);
+ res->err_status = swab32(res->err_status);
+ res->err_status1 = swab32(res->err_status1);
+ res->err_addr = swab32(res->err_addr);
+ bfa_trc(diag, res->addr);
+ bfa_trc(diag, res->exp);
+ bfa_trc(diag, res->act);
+ bfa_trc(diag, res->err_status);
+ bfa_trc(diag, res->err_status1);
+ bfa_trc(diag, res->err_addr);
+ }
+ diag->timer_active = 0;
+ diag->cbfn(diag->cbarg, diag->status);
+ diag->block = 0;
+}
+
+/*
+ * Firmware ping
+ */
+
+/*
+ * Perform DMA test directly
+ */
+static void
+diag_fwping_send(struct bfa_diag_s *diag)
+{
+ struct bfi_diag_fwping_req_s *fwping_req;
+ u32 i;
+
+ bfa_trc(diag, diag->fwping.dbuf_pa);
+
+ /* fill DMA area with pattern */
+ for (i = 0; i < (BFI_DIAG_DMA_BUF_SZ >> 2); i++)
+ *((u32 *)diag->fwping.dbuf_kva + i) = diag->fwping.data;
+
+ /* Fill mbox msg */
+ fwping_req = (struct bfi_diag_fwping_req_s *)diag->fwping.mbcmd.msg;
+
+ /* Setup SG list */
+ bfa_alen_set(&fwping_req->alen, BFI_DIAG_DMA_BUF_SZ,
+ diag->fwping.dbuf_pa);
+ /* Set up dma count */
+ fwping_req->count = cpu_to_be32(diag->fwping.count);
+ /* Set up data pattern */
+ fwping_req->data = diag->fwping.data;
+
+ /* build host command */
+ bfi_h2i_set(fwping_req->mh, BFI_MC_DIAG, BFI_DIAG_H2I_FWPING,
+ bfa_ioc_portid(diag->ioc));
+
+ /* send mbox cmd */
+ bfa_ioc_mbox_queue(diag->ioc, &diag->fwping.mbcmd);
+}
+
+static void
+diag_fwping_comp(struct bfa_diag_s *diag,
+ struct bfi_diag_fwping_rsp_s *diag_rsp)
+{
+ u32 rsp_data = diag_rsp->data;
+ u8 rsp_dma_status = diag_rsp->dma_status;
+
+ bfa_trc(diag, rsp_data);
+ bfa_trc(diag, rsp_dma_status);
+
+ if (rsp_dma_status == BFA_STATUS_OK) {
+ u32 i, pat;
+ pat = (diag->fwping.count & 0x1) ? ~(diag->fwping.data) :
+ diag->fwping.data;
+ /* Check mbox data */
+ if (diag->fwping.data != rsp_data) {
+ bfa_trc(diag, rsp_data);
+ diag->fwping.result->dmastatus =
+ BFA_STATUS_DATACORRUPTED;
+ diag->fwping.status = BFA_STATUS_DATACORRUPTED;
+ diag->fwping.cbfn(diag->fwping.cbarg,
+ diag->fwping.status);
+ diag->fwping.lock = 0;
+ return;
+ }
+ /* Check dma pattern */
+ for (i = 0; i < (BFI_DIAG_DMA_BUF_SZ >> 2); i++) {
+ if (*((u32 *)diag->fwping.dbuf_kva + i) != pat) {
+ bfa_trc(diag, i);
+ bfa_trc(diag, pat);
+ bfa_trc(diag,
+ *((u32 *)diag->fwping.dbuf_kva + i));
+ diag->fwping.result->dmastatus =
+ BFA_STATUS_DATACORRUPTED;
+ diag->fwping.status = BFA_STATUS_DATACORRUPTED;
+ diag->fwping.cbfn(diag->fwping.cbarg,
+ diag->fwping.status);
+ diag->fwping.lock = 0;
+ return;
+ }
+ }
+ diag->fwping.result->dmastatus = BFA_STATUS_OK;
+ diag->fwping.status = BFA_STATUS_OK;
+ diag->fwping.cbfn(diag->fwping.cbarg, diag->fwping.status);
+ diag->fwping.lock = 0;
+ } else {
+ diag->fwping.status = BFA_STATUS_HDMA_FAILED;
+ diag->fwping.cbfn(diag->fwping.cbarg, diag->fwping.status);
+ diag->fwping.lock = 0;
+ }
+}
+
+/*
+ * Temperature Sensor
+ */
+
+static void
+diag_tempsensor_send(struct bfa_diag_s *diag)
+{
+ struct bfi_diag_ts_req_s *msg;
+
+ msg = (struct bfi_diag_ts_req_s *)diag->tsensor.mbcmd.msg;
+ bfa_trc(diag, msg->temp);
+ /* build host command */
+ bfi_h2i_set(msg->mh, BFI_MC_DIAG, BFI_DIAG_H2I_TEMPSENSOR,
+ bfa_ioc_portid(diag->ioc));
+ /* send mbox cmd */
+ bfa_ioc_mbox_queue(diag->ioc, &diag->tsensor.mbcmd);
+}
+
+static void
+diag_tempsensor_comp(struct bfa_diag_s *diag, bfi_diag_ts_rsp_t *rsp)
+{
+ if (!diag->tsensor.lock) {
+ /* receiving response after ioc failure */
+ bfa_trc(diag, diag->tsensor.lock);
+ return;
+ }
+
+ /*
+ * ASIC junction tempsensor is a reg read operation
+ * it will always return OK
+ */
+ diag->tsensor.temp->temp = be16_to_cpu(rsp->temp);
+ diag->tsensor.temp->ts_junc = rsp->ts_junc;
+ diag->tsensor.temp->ts_brd = rsp->ts_brd;
+
+ if (rsp->ts_brd) {
+ /* tsensor.temp->status is brd_temp status */
+ diag->tsensor.temp->status = rsp->status;
+ if (rsp->status == BFA_STATUS_OK) {
+ diag->tsensor.temp->brd_temp =
+ be16_to_cpu(rsp->brd_temp);
+ } else
+ diag->tsensor.temp->brd_temp = 0;
+ }
+
+ bfa_trc(diag, rsp->status);
+ bfa_trc(diag, rsp->ts_junc);
+ bfa_trc(diag, rsp->temp);
+ bfa_trc(diag, rsp->ts_brd);
+ bfa_trc(diag, rsp->brd_temp);
+
+ /* tsensor status is always good bcos we always have junction temp */
+ diag->tsensor.status = BFA_STATUS_OK;
+ diag->tsensor.cbfn(diag->tsensor.cbarg, diag->tsensor.status);
+ diag->tsensor.lock = 0;
+}
+
+/*
+ * LED Test command
+ */
+static void
+diag_ledtest_send(struct bfa_diag_s *diag, struct bfa_diag_ledtest_s *ledtest)
+{
+ struct bfi_diag_ledtest_req_s *msg;
+
+ msg = (struct bfi_diag_ledtest_req_s *)diag->ledtest.mbcmd.msg;
+ /* build host command */
+ bfi_h2i_set(msg->mh, BFI_MC_DIAG, BFI_DIAG_H2I_LEDTEST,
+ bfa_ioc_portid(diag->ioc));
+
+ /*
+ * convert the freq from N blinks per 10 sec to
+ * crossbow ontime value. We do it here because division is need
+ */
+ if (ledtest->freq)
+ ledtest->freq = 500 / ledtest->freq;
+
+ if (ledtest->freq == 0)
+ ledtest->freq = 1;
+
+ bfa_trc(diag, ledtest->freq);
+ /* mcpy(&ledtest_req->req, ledtest, sizeof(bfa_diag_ledtest_t)); */
+ msg->cmd = (u8) ledtest->cmd;
+ msg->color = (u8) ledtest->color;
+ msg->portid = bfa_ioc_portid(diag->ioc);
+ msg->led = ledtest->led;
+ msg->freq = cpu_to_be16(ledtest->freq);
+
+ /* send mbox cmd */
+ bfa_ioc_mbox_queue(diag->ioc, &diag->ledtest.mbcmd);
+}
+
+static void
+diag_ledtest_comp(struct bfa_diag_s *diag, struct bfi_diag_ledtest_rsp_s *msg)
+{
+ bfa_trc(diag, diag->ledtest.lock);
+ diag->ledtest.lock = BFA_FALSE;
+ /* no bfa_cb_queue is needed because driver is not waiting */
+}
+
+/*
+ * Port beaconing
+ */
+static void
+diag_portbeacon_send(struct bfa_diag_s *diag, bfa_boolean_t beacon, u32 sec)
+{
+ struct bfi_diag_portbeacon_req_s *msg;
+
+ msg = (struct bfi_diag_portbeacon_req_s *)diag->beacon.mbcmd.msg;
+ /* build host command */
+ bfi_h2i_set(msg->mh, BFI_MC_DIAG, BFI_DIAG_H2I_PORTBEACON,
+ bfa_ioc_portid(diag->ioc));
+ msg->beacon = beacon;
+ msg->period = cpu_to_be32(sec);
+ /* send mbox cmd */
+ bfa_ioc_mbox_queue(diag->ioc, &diag->beacon.mbcmd);
+}
+
+static void
+diag_portbeacon_comp(struct bfa_diag_s *diag)
+{
+ bfa_trc(diag, diag->beacon.state);
+ diag->beacon.state = BFA_FALSE;
+ if (diag->cbfn_beacon)
+ diag->cbfn_beacon(diag->dev, BFA_FALSE, diag->beacon.link_e2e);
+}
+
+/*
+ * Diag hmbox handler
+ */
+void
+bfa_diag_intr(void *diagarg, struct bfi_mbmsg_s *msg)
+{
+ struct bfa_diag_s *diag = diagarg;
+
+ switch (msg->mh.msg_id) {
+ case BFI_DIAG_I2H_PORTBEACON:
+ diag_portbeacon_comp(diag);
+ break;
+ case BFI_DIAG_I2H_FWPING:
+ diag_fwping_comp(diag, (struct bfi_diag_fwping_rsp_s *) msg);
+ break;
+ case BFI_DIAG_I2H_TEMPSENSOR:
+ diag_tempsensor_comp(diag, (bfi_diag_ts_rsp_t *) msg);
+ break;
+ case BFI_DIAG_I2H_LEDTEST:
+ diag_ledtest_comp(diag, (struct bfi_diag_ledtest_rsp_s *) msg);
+ break;
+ default:
+ bfa_trc(diag, msg->mh.msg_id);
+ WARN_ON(1);
+ }
+}
+
+/*
+ * Gen RAM Test
+ *
+ * @param[in] *diag - diag data struct
+ * @param[in] *memtest - mem test params input from upper layer,
+ * @param[in] pattern - mem test pattern
+ * @param[in] *result - mem test result
+ * @param[in] cbfn - mem test callback functioin
+ * @param[in] cbarg - callback functioin arg
+ *
+ * @param[out]
+ */
+bfa_status_t
+bfa_diag_memtest(struct bfa_diag_s *diag, struct bfa_diag_memtest_s *memtest,
+ u32 pattern, struct bfa_diag_memtest_result *result,
+ bfa_cb_diag_t cbfn, void *cbarg)
+{
+ u32 memtest_tov;
+
+ bfa_trc(diag, pattern);
+
+ if (!bfa_ioc_adapter_is_disabled(diag->ioc))
+ return BFA_STATUS_ADAPTER_ENABLED;
+
+ /* check to see if there is another destructive diag cmd running */
+ if (diag->block) {
+ bfa_trc(diag, diag->block);
+ return BFA_STATUS_DEVBUSY;
+ } else
+ diag->block = 1;
+
+ diag->result = result;
+ diag->cbfn = cbfn;
+ diag->cbarg = cbarg;
+
+ /* download memtest code and take LPU0 out of reset */
+ bfa_ioc_boot(diag->ioc, BFI_FWBOOT_TYPE_MEMTEST, BFI_FWBOOT_ENV_OS);
+
+ memtest_tov = (bfa_ioc_asic_gen(diag->ioc) == BFI_ASIC_GEN_CT2) ?
+ CT2_BFA_DIAG_MEMTEST_TOV : BFA_DIAG_MEMTEST_TOV;
+ bfa_timer_begin(diag->ioc->timer_mod, &diag->timer,
+ bfa_diag_memtest_done, diag, memtest_tov);
+ diag->timer_active = 1;
+ return BFA_STATUS_OK;
+}
+
+/*
+ * DIAG firmware ping command
+ *
+ * @param[in] *diag - diag data struct
+ * @param[in] cnt - dma loop count for testing PCIE
+ * @param[in] data - data pattern to pass in fw
+ * @param[in] *result - pt to bfa_diag_fwping_result_t data struct
+ * @param[in] cbfn - callback function
+ * @param[in] *cbarg - callback functioin arg
+ *
+ * @param[out]
+ */
+bfa_status_t
+bfa_diag_fwping(struct bfa_diag_s *diag, u32 cnt, u32 data,
+ struct bfa_diag_results_fwping *result, bfa_cb_diag_t cbfn,
+ void *cbarg)
+{
+ bfa_trc(diag, cnt);
+ bfa_trc(diag, data);
+
+ if (!bfa_ioc_is_operational(diag->ioc))
+ return BFA_STATUS_IOC_NON_OP;
+
+ if (bfa_asic_id_ct2(bfa_ioc_devid((diag->ioc))) &&
+ ((diag->ioc)->clscode == BFI_PCIFN_CLASS_ETH))
+ return BFA_STATUS_CMD_NOTSUPP;
+
+ /* check to see if there is another destructive diag cmd running */
+ if (diag->block || diag->fwping.lock) {
+ bfa_trc(diag, diag->block);
+ bfa_trc(diag, diag->fwping.lock);
+ return BFA_STATUS_DEVBUSY;
+ }
+
+ /* Initialization */
+ diag->fwping.lock = 1;
+ diag->fwping.cbfn = cbfn;
+ diag->fwping.cbarg = cbarg;
+ diag->fwping.result = result;
+ diag->fwping.data = data;
+ diag->fwping.count = cnt;
+
+ /* Init test results */
+ diag->fwping.result->data = 0;
+ diag->fwping.result->status = BFA_STATUS_OK;
+
+ /* kick off the first ping */
+ diag_fwping_send(diag);
+ return BFA_STATUS_OK;
+}
+
+/*
+ * Read Temperature Sensor
+ *
+ * @param[in] *diag - diag data struct
+ * @param[in] *result - pt to bfa_diag_temp_t data struct
+ * @param[in] cbfn - callback function
+ * @param[in] *cbarg - callback functioin arg
+ *
+ * @param[out]
+ */
+bfa_status_t
+bfa_diag_tsensor_query(struct bfa_diag_s *diag,
+ struct bfa_diag_results_tempsensor_s *result,
+ bfa_cb_diag_t cbfn, void *cbarg)
+{
+ /* check to see if there is a destructive diag cmd running */
+ if (diag->block || diag->tsensor.lock) {
+ bfa_trc(diag, diag->block);
+ bfa_trc(diag, diag->tsensor.lock);
+ return BFA_STATUS_DEVBUSY;
+ }
+
+ if (!bfa_ioc_is_operational(diag->ioc))
+ return BFA_STATUS_IOC_NON_OP;
+
+ /* Init diag mod params */
+ diag->tsensor.lock = 1;
+ diag->tsensor.temp = result;
+ diag->tsensor.cbfn = cbfn;
+ diag->tsensor.cbarg = cbarg;
+ diag->tsensor.status = BFA_STATUS_OK;
+
+ /* Send msg to fw */
+ diag_tempsensor_send(diag);
+
+ return BFA_STATUS_OK;
+}
+
+/*
+ * LED Test command
+ *
+ * @param[in] *diag - diag data struct
+ * @param[in] *ledtest - pt to ledtest data structure
+ *
+ * @param[out]
+ */
+bfa_status_t
+bfa_diag_ledtest(struct bfa_diag_s *diag, struct bfa_diag_ledtest_s *ledtest)
+{
+ bfa_trc(diag, ledtest->cmd);
+
+ if (!bfa_ioc_is_operational(diag->ioc))
+ return BFA_STATUS_IOC_NON_OP;
+
+ if (diag->beacon.state)
+ return BFA_STATUS_BEACON_ON;
+
+ if (diag->ledtest.lock)
+ return BFA_STATUS_LEDTEST_OP;
+
+ /* Send msg to fw */
+ diag->ledtest.lock = BFA_TRUE;
+ diag_ledtest_send(diag, ledtest);
+
+ return BFA_STATUS_OK;
+}
+
+/*
+ * Port beaconing command
+ *
+ * @param[in] *diag - diag data struct
+ * @param[in] beacon - port beaconing 1:ON 0:OFF
+ * @param[in] link_e2e_beacon - link beaconing 1:ON 0:OFF
+ * @param[in] sec - beaconing duration in seconds
+ *
+ * @param[out]
+ */
+bfa_status_t
+bfa_diag_beacon_port(struct bfa_diag_s *diag, bfa_boolean_t beacon,
+ bfa_boolean_t link_e2e_beacon, uint32_t sec)
+{
+ bfa_trc(diag, beacon);
+ bfa_trc(diag, link_e2e_beacon);
+ bfa_trc(diag, sec);
+
+ if (!bfa_ioc_is_operational(diag->ioc))
+ return BFA_STATUS_IOC_NON_OP;
+
+ if (diag->ledtest.lock)
+ return BFA_STATUS_LEDTEST_OP;
+
+ if (diag->beacon.state && beacon) /* beacon alread on */
+ return BFA_STATUS_BEACON_ON;
+
+ diag->beacon.state = beacon;
+ diag->beacon.link_e2e = link_e2e_beacon;
+ if (diag->cbfn_beacon)
+ diag->cbfn_beacon(diag->dev, beacon, link_e2e_beacon);
+
+ /* Send msg to fw */
+ diag_portbeacon_send(diag, beacon, sec);
+
+ return BFA_STATUS_OK;
+}
+
+/*
+ * Return DMA memory needed by diag module.
+ */
+u32
+bfa_diag_meminfo(void)
+{
+ return BFA_ROUNDUP(BFI_DIAG_DMA_BUF_SZ, BFA_DMA_ALIGN_SZ);
+}
+
+/*
+ * Attach virtual and physical memory for Diag.
+ */
+void
+bfa_diag_attach(struct bfa_diag_s *diag, struct bfa_ioc_s *ioc, void *dev,
+ bfa_cb_diag_beacon_t cbfn_beacon, struct bfa_trc_mod_s *trcmod)
+{
+ diag->dev = dev;
+ diag->ioc = ioc;
+ diag->trcmod = trcmod;
+
+ diag->block = 0;
+ diag->cbfn = NULL;
+ diag->cbarg = NULL;
+ diag->result = NULL;
+ diag->cbfn_beacon = cbfn_beacon;
+
+ bfa_ioc_mbox_regisr(diag->ioc, BFI_MC_DIAG, bfa_diag_intr, diag);
+ bfa_q_qe_init(&diag->ioc_notify);
+ bfa_ioc_notify_init(&diag->ioc_notify, bfa_diag_notify, diag);
+ list_add_tail(&diag->ioc_notify.qe, &diag->ioc->notify_q);
+}
+
+void
+bfa_diag_memclaim(struct bfa_diag_s *diag, u8 *dm_kva, u64 dm_pa)
+{
+ diag->fwping.dbuf_kva = dm_kva;
+ diag->fwping.dbuf_pa = dm_pa;
+ memset(diag->fwping.dbuf_kva, 0, BFI_DIAG_DMA_BUF_SZ);
+}
+
+/*
+ * PHY module specific
+ */
+#define BFA_PHY_DMA_BUF_SZ 0x02000 /* 8k dma buffer */
+#define BFA_PHY_LOCK_STATUS 0x018878 /* phy semaphore status reg */
+
+static void
+bfa_phy_ntoh32(u32 *obuf, u32 *ibuf, int sz)
+{
+ int i, m = sz >> 2;
+
+ for (i = 0; i < m; i++)
+ obuf[i] = be32_to_cpu(ibuf[i]);
+}
+
+static bfa_boolean_t
+bfa_phy_present(struct bfa_phy_s *phy)
+{
+ return (phy->ioc->attr->card_type == BFA_MFG_TYPE_LIGHTNING);
+}
+
+static void
+bfa_phy_notify(void *cbarg, enum bfa_ioc_event_e event)
+{
+ struct bfa_phy_s *phy = cbarg;
+
+ bfa_trc(phy, event);
+
+ switch (event) {
+ case BFA_IOC_E_DISABLED:
+ case BFA_IOC_E_FAILED:
+ if (phy->op_busy) {
+ phy->status = BFA_STATUS_IOC_FAILURE;
+ phy->cbfn(phy->cbarg, phy->status);
+ phy->op_busy = 0;
+ }
+ break;
+
+ default:
+ break;
+ }
+}
+
+/*
+ * Send phy attribute query request.
+ *
+ * @param[in] cbarg - callback argument
+ */
+static void
+bfa_phy_query_send(void *cbarg)
+{
+ struct bfa_phy_s *phy = cbarg;
+ struct bfi_phy_query_req_s *msg =
+ (struct bfi_phy_query_req_s *) phy->mb.msg;
+
+ msg->instance = phy->instance;
+ bfi_h2i_set(msg->mh, BFI_MC_PHY, BFI_PHY_H2I_QUERY_REQ,
+ bfa_ioc_portid(phy->ioc));
+ bfa_alen_set(&msg->alen, sizeof(struct bfa_phy_attr_s), phy->dbuf_pa);
+ bfa_ioc_mbox_queue(phy->ioc, &phy->mb);
+}
+
+/*
+ * Send phy write request.
+ *
+ * @param[in] cbarg - callback argument
+ */
+static void
+bfa_phy_write_send(void *cbarg)
+{
+ struct bfa_phy_s *phy = cbarg;
+ struct bfi_phy_write_req_s *msg =
+ (struct bfi_phy_write_req_s *) phy->mb.msg;
+ u32 len;
+ u16 *buf, *dbuf;
+ int i, sz;
+
+ msg->instance = phy->instance;
+ msg->offset = cpu_to_be32(phy->addr_off + phy->offset);
+ len = (phy->residue < BFA_PHY_DMA_BUF_SZ) ?
+ phy->residue : BFA_PHY_DMA_BUF_SZ;
+ msg->length = cpu_to_be32(len);
+
+ /* indicate if it's the last msg of the whole write operation */
+ msg->last = (len == phy->residue) ? 1 : 0;
+
+ bfi_h2i_set(msg->mh, BFI_MC_PHY, BFI_PHY_H2I_WRITE_REQ,
+ bfa_ioc_portid(phy->ioc));
+ bfa_alen_set(&msg->alen, len, phy->dbuf_pa);
+
+ buf = (u16 *) (phy->ubuf + phy->offset);
+ dbuf = (u16 *)phy->dbuf_kva;
+ sz = len >> 1;
+ for (i = 0; i < sz; i++)
+ buf[i] = cpu_to_be16(dbuf[i]);
+
+ bfa_ioc_mbox_queue(phy->ioc, &phy->mb);
+
+ phy->residue -= len;
+ phy->offset += len;
+}
+
+/*
+ * Send phy read request.
+ *
+ * @param[in] cbarg - callback argument
+ */
+static void
+bfa_phy_read_send(void *cbarg)
+{
+ struct bfa_phy_s *phy = cbarg;
+ struct bfi_phy_read_req_s *msg =
+ (struct bfi_phy_read_req_s *) phy->mb.msg;
+ u32 len;
+
+ msg->instance = phy->instance;
+ msg->offset = cpu_to_be32(phy->addr_off + phy->offset);
+ len = (phy->residue < BFA_PHY_DMA_BUF_SZ) ?
+ phy->residue : BFA_PHY_DMA_BUF_SZ;
+ msg->length = cpu_to_be32(len);
+ bfi_h2i_set(msg->mh, BFI_MC_PHY, BFI_PHY_H2I_READ_REQ,
+ bfa_ioc_portid(phy->ioc));
+ bfa_alen_set(&msg->alen, len, phy->dbuf_pa);
+ bfa_ioc_mbox_queue(phy->ioc, &phy->mb);
+}
+
+/*
+ * Send phy stats request.
+ *
+ * @param[in] cbarg - callback argument
+ */
+static void
+bfa_phy_stats_send(void *cbarg)
+{
+ struct bfa_phy_s *phy = cbarg;
+ struct bfi_phy_stats_req_s *msg =
+ (struct bfi_phy_stats_req_s *) phy->mb.msg;
+
+ msg->instance = phy->instance;
+ bfi_h2i_set(msg->mh, BFI_MC_PHY, BFI_PHY_H2I_STATS_REQ,
+ bfa_ioc_portid(phy->ioc));
+ bfa_alen_set(&msg->alen, sizeof(struct bfa_phy_stats_s), phy->dbuf_pa);
+ bfa_ioc_mbox_queue(phy->ioc, &phy->mb);
+}
+
+/*
+ * Flash memory info API.
+ *
+ * @param[in] mincfg - minimal cfg variable
+ */
+u32
+bfa_phy_meminfo(bfa_boolean_t mincfg)
+{
+ /* min driver doesn't need phy */
+ if (mincfg)
+ return 0;
+
+ return BFA_ROUNDUP(BFA_PHY_DMA_BUF_SZ, BFA_DMA_ALIGN_SZ);
+}
+
+/*
+ * Flash attach API.
+ *
+ * @param[in] phy - phy structure
+ * @param[in] ioc - ioc structure
+ * @param[in] dev - device structure
+ * @param[in] trcmod - trace module
+ * @param[in] logmod - log module
+ */
+void
+bfa_phy_attach(struct bfa_phy_s *phy, struct bfa_ioc_s *ioc, void *dev,
+ struct bfa_trc_mod_s *trcmod, bfa_boolean_t mincfg)
+{
+ phy->ioc = ioc;
+ phy->trcmod = trcmod;
+ phy->cbfn = NULL;
+ phy->cbarg = NULL;
+ phy->op_busy = 0;
+
+ bfa_ioc_mbox_regisr(phy->ioc, BFI_MC_PHY, bfa_phy_intr, phy);
+ bfa_q_qe_init(&phy->ioc_notify);
+ bfa_ioc_notify_init(&phy->ioc_notify, bfa_phy_notify, phy);
+ list_add_tail(&phy->ioc_notify.qe, &phy->ioc->notify_q);
+
+ /* min driver doesn't need phy */
+ if (mincfg) {
+ phy->dbuf_kva = NULL;
+ phy->dbuf_pa = 0;
+ }
+}
+
+/*
+ * Claim memory for phy
+ *
+ * @param[in] phy - phy structure
+ * @param[in] dm_kva - pointer to virtual memory address
+ * @param[in] dm_pa - physical memory address
+ * @param[in] mincfg - minimal cfg variable
+ */
+void
+bfa_phy_memclaim(struct bfa_phy_s *phy, u8 *dm_kva, u64 dm_pa,
+ bfa_boolean_t mincfg)
+{
+ if (mincfg)
+ return;
+
+ phy->dbuf_kva = dm_kva;
+ phy->dbuf_pa = dm_pa;
+ memset(phy->dbuf_kva, 0, BFA_PHY_DMA_BUF_SZ);
+ dm_kva += BFA_ROUNDUP(BFA_PHY_DMA_BUF_SZ, BFA_DMA_ALIGN_SZ);
+ dm_pa += BFA_ROUNDUP(BFA_PHY_DMA_BUF_SZ, BFA_DMA_ALIGN_SZ);
+}
+
+bfa_boolean_t
+bfa_phy_busy(struct bfa_ioc_s *ioc)
+{
+ void __iomem *rb;
+
+ rb = bfa_ioc_bar0(ioc);
+ return readl(rb + BFA_PHY_LOCK_STATUS);
+}
+
+/*
+ * Get phy attribute.
+ *
+ * @param[in] phy - phy structure
+ * @param[in] attr - phy attribute structure
+ * @param[in] cbfn - callback function
+ * @param[in] cbarg - callback argument
+ *
+ * Return status.
+ */
+bfa_status_t
+bfa_phy_get_attr(struct bfa_phy_s *phy, u8 instance,
+ struct bfa_phy_attr_s *attr, bfa_cb_phy_t cbfn, void *cbarg)
+{
+ bfa_trc(phy, BFI_PHY_H2I_QUERY_REQ);
+ bfa_trc(phy, instance);
+
+ if (!bfa_phy_present(phy))
+ return BFA_STATUS_PHY_NOT_PRESENT;
+
+ if (!bfa_ioc_is_operational(phy->ioc))
+ return BFA_STATUS_IOC_NON_OP;
+
+ if (phy->op_busy || bfa_phy_busy(phy->ioc)) {
+ bfa_trc(phy, phy->op_busy);
+ return BFA_STATUS_DEVBUSY;
+ }
+
+ phy->op_busy = 1;
+ phy->cbfn = cbfn;
+ phy->cbarg = cbarg;
+ phy->instance = instance;
+ phy->ubuf = (uint8_t *) attr;
+ bfa_phy_query_send(phy);
+
+ return BFA_STATUS_OK;
+}
+
+/*
+ * Get phy stats.
+ *
+ * @param[in] phy - phy structure
+ * @param[in] instance - phy image instance
+ * @param[in] stats - pointer to phy stats
+ * @param[in] cbfn - callback function
+ * @param[in] cbarg - callback argument
+ *
+ * Return status.
+ */
+bfa_status_t
+bfa_phy_get_stats(struct bfa_phy_s *phy, u8 instance,
+ struct bfa_phy_stats_s *stats,
+ bfa_cb_phy_t cbfn, void *cbarg)
+{
+ bfa_trc(phy, BFI_PHY_H2I_STATS_REQ);
+ bfa_trc(phy, instance);
+
+ if (!bfa_phy_present(phy))
+ return BFA_STATUS_PHY_NOT_PRESENT;
+
+ if (!bfa_ioc_is_operational(phy->ioc))
+ return BFA_STATUS_IOC_NON_OP;
+
+ if (phy->op_busy || bfa_phy_busy(phy->ioc)) {
+ bfa_trc(phy, phy->op_busy);
+ return BFA_STATUS_DEVBUSY;
+ }
+
+ phy->op_busy = 1;
+ phy->cbfn = cbfn;
+ phy->cbarg = cbarg;
+ phy->instance = instance;
+ phy->ubuf = (u8 *) stats;
+ bfa_phy_stats_send(phy);
+
+ return BFA_STATUS_OK;
+}
+
+/*
+ * Update phy image.
+ *
+ * @param[in] phy - phy structure
+ * @param[in] instance - phy image instance
+ * @param[in] buf - update data buffer
+ * @param[in] len - data buffer length
+ * @param[in] offset - offset relative to starting address
+ * @param[in] cbfn - callback function
+ * @param[in] cbarg - callback argument
+ *
+ * Return status.
+ */
+bfa_status_t
+bfa_phy_update(struct bfa_phy_s *phy, u8 instance,
+ void *buf, u32 len, u32 offset,
+ bfa_cb_phy_t cbfn, void *cbarg)
+{
+ bfa_trc(phy, BFI_PHY_H2I_WRITE_REQ);
+ bfa_trc(phy, instance);
+ bfa_trc(phy, len);
+ bfa_trc(phy, offset);
+
+ if (!bfa_phy_present(phy))
+ return BFA_STATUS_PHY_NOT_PRESENT;
+
+ if (!bfa_ioc_is_operational(phy->ioc))
+ return BFA_STATUS_IOC_NON_OP;
+
+ /* 'len' must be in word (4-byte) boundary */
+ if (!len || (len & 0x03))
+ return BFA_STATUS_FAILED;
+
+ if (phy->op_busy || bfa_phy_busy(phy->ioc)) {
+ bfa_trc(phy, phy->op_busy);
+ return BFA_STATUS_DEVBUSY;
+ }
+
+ phy->op_busy = 1;
+ phy->cbfn = cbfn;
+ phy->cbarg = cbarg;
+ phy->instance = instance;
+ phy->residue = len;
+ phy->offset = 0;
+ phy->addr_off = offset;
+ phy->ubuf = buf;
+
+ bfa_phy_write_send(phy);
+ return BFA_STATUS_OK;
+}
+
+/*
+ * Read phy image.
+ *
+ * @param[in] phy - phy structure
+ * @param[in] instance - phy image instance
+ * @param[in] buf - read data buffer
+ * @param[in] len - data buffer length
+ * @param[in] offset - offset relative to starting address
+ * @param[in] cbfn - callback function
+ * @param[in] cbarg - callback argument
+ *
+ * Return status.
+ */
+bfa_status_t
+bfa_phy_read(struct bfa_phy_s *phy, u8 instance,
+ void *buf, u32 len, u32 offset,
+ bfa_cb_phy_t cbfn, void *cbarg)
+{
+ bfa_trc(phy, BFI_PHY_H2I_READ_REQ);
+ bfa_trc(phy, instance);
+ bfa_trc(phy, len);
+ bfa_trc(phy, offset);
+
+ if (!bfa_phy_present(phy))
+ return BFA_STATUS_PHY_NOT_PRESENT;
+
+ if (!bfa_ioc_is_operational(phy->ioc))
+ return BFA_STATUS_IOC_NON_OP;
+
+ /* 'len' must be in word (4-byte) boundary */
+ if (!len || (len & 0x03))
+ return BFA_STATUS_FAILED;
+
+ if (phy->op_busy || bfa_phy_busy(phy->ioc)) {
+ bfa_trc(phy, phy->op_busy);
+ return BFA_STATUS_DEVBUSY;
+ }
+
+ phy->op_busy = 1;
+ phy->cbfn = cbfn;
+ phy->cbarg = cbarg;
+ phy->instance = instance;
+ phy->residue = len;
+ phy->offset = 0;
+ phy->addr_off = offset;
+ phy->ubuf = buf;
+ bfa_phy_read_send(phy);
+
+ return BFA_STATUS_OK;
+}
+
+/*
+ * Process phy response messages upon receiving interrupts.
+ *
+ * @param[in] phyarg - phy structure
+ * @param[in] msg - message structure
+ */
+void
+bfa_phy_intr(void *phyarg, struct bfi_mbmsg_s *msg)
+{
+ struct bfa_phy_s *phy = phyarg;
+ u32 status;
+
+ union {
+ struct bfi_phy_query_rsp_s *query;
+ struct bfi_phy_stats_rsp_s *stats;
+ struct bfi_phy_write_rsp_s *write;
+ struct bfi_phy_read_rsp_s *read;
+ struct bfi_mbmsg_s *msg;
+ } m;
+
+ m.msg = msg;
+ bfa_trc(phy, msg->mh.msg_id);
+
+ if (!phy->op_busy) {
+ /* receiving response after ioc failure */
+ bfa_trc(phy, 0x9999);
+ return;
+ }
+
+ switch (msg->mh.msg_id) {
+ case BFI_PHY_I2H_QUERY_RSP:
+ status = be32_to_cpu(m.query->status);
+ bfa_trc(phy, status);
+
+ if (status == BFA_STATUS_OK) {
+ struct bfa_phy_attr_s *attr =
+ (struct bfa_phy_attr_s *) phy->ubuf;
+ bfa_phy_ntoh32((u32 *)attr, (u32 *)phy->dbuf_kva,
+ sizeof(struct bfa_phy_attr_s));
+ bfa_trc(phy, attr->status);
+ bfa_trc(phy, attr->length);
+ }
+
+ phy->status = status;
+ phy->op_busy = 0;
+ if (phy->cbfn)
+ phy->cbfn(phy->cbarg, phy->status);
+ break;
+ case BFI_PHY_I2H_STATS_RSP:
+ status = be32_to_cpu(m.stats->status);
+ bfa_trc(phy, status);
+
+ if (status == BFA_STATUS_OK) {
+ struct bfa_phy_stats_s *stats =
+ (struct bfa_phy_stats_s *) phy->ubuf;
+ bfa_phy_ntoh32((u32 *)stats, (u32 *)phy->dbuf_kva,
+ sizeof(struct bfa_phy_stats_s));
+ bfa_trc(phy, stats->status);
+ }
+
+ phy->status = status;
+ phy->op_busy = 0;
+ if (phy->cbfn)
+ phy->cbfn(phy->cbarg, phy->status);
+ break;
+ case BFI_PHY_I2H_WRITE_RSP:
+ status = be32_to_cpu(m.write->status);
+ bfa_trc(phy, status);
+
+ if (status != BFA_STATUS_OK || phy->residue == 0) {
+ phy->status = status;
+ phy->op_busy = 0;
+ if (phy->cbfn)
+ phy->cbfn(phy->cbarg, phy->status);
+ } else {
+ bfa_trc(phy, phy->offset);
+ bfa_phy_write_send(phy);
+ }
+ break;
+ case BFI_PHY_I2H_READ_RSP:
+ status = be32_to_cpu(m.read->status);
+ bfa_trc(phy, status);
+
+ if (status != BFA_STATUS_OK) {
+ phy->status = status;
+ phy->op_busy = 0;
+ if (phy->cbfn)
+ phy->cbfn(phy->cbarg, phy->status);
+ } else {
+ u32 len = be32_to_cpu(m.read->length);
+ u16 *buf = (u16 *)(phy->ubuf + phy->offset);
+ u16 *dbuf = (u16 *)phy->dbuf_kva;
+ int i, sz = len >> 1;
+
+ bfa_trc(phy, phy->offset);
+ bfa_trc(phy, len);
+
+ for (i = 0; i < sz; i++)
+ buf[i] = be16_to_cpu(dbuf[i]);
+
+ phy->residue -= len;
+ phy->offset += len;
+
+ if (phy->residue == 0) {
+ phy->status = status;
+ phy->op_busy = 0;
+ if (phy->cbfn)
+ phy->cbfn(phy->cbarg, phy->status);
+ } else
+ bfa_phy_read_send(phy);
+ }
+ break;
+ default:
+ WARN_ON(1);
+ }
+}
+
+/*
+ * DCONF module specific
+ */
+
+BFA_MODULE(dconf);
+
+/*
+ * DCONF state machine events
+ */
+enum bfa_dconf_event {
+ BFA_DCONF_SM_INIT = 1, /* dconf Init */
+ BFA_DCONF_SM_FLASH_COMP = 2, /* read/write to flash */
+ BFA_DCONF_SM_WR = 3, /* binding change, map */
+ BFA_DCONF_SM_TIMEOUT = 4, /* Start timer */
+ BFA_DCONF_SM_EXIT = 5, /* exit dconf module */
+ BFA_DCONF_SM_IOCDISABLE = 6, /* IOC disable event */
+};
+
+/* forward declaration of DCONF state machine */
+static void bfa_dconf_sm_uninit(struct bfa_dconf_mod_s *dconf,
+ enum bfa_dconf_event event);
+static void bfa_dconf_sm_flash_read(struct bfa_dconf_mod_s *dconf,
+ enum bfa_dconf_event event);
+static void bfa_dconf_sm_ready(struct bfa_dconf_mod_s *dconf,
+ enum bfa_dconf_event event);
+static void bfa_dconf_sm_dirty(struct bfa_dconf_mod_s *dconf,
+ enum bfa_dconf_event event);
+static void bfa_dconf_sm_sync(struct bfa_dconf_mod_s *dconf,
+ enum bfa_dconf_event event);
+static void bfa_dconf_sm_final_sync(struct bfa_dconf_mod_s *dconf,
+ enum bfa_dconf_event event);
+static void bfa_dconf_sm_iocdown_dirty(struct bfa_dconf_mod_s *dconf,
+ enum bfa_dconf_event event);
+
+static void bfa_dconf_cbfn(void *dconf, bfa_status_t status);
+static void bfa_dconf_timer(void *cbarg);
+static bfa_status_t bfa_dconf_flash_write(struct bfa_dconf_mod_s *dconf);
+static void bfa_dconf_init_cb(void *arg, bfa_status_t status);
+
+/*
+ * Beginning state of dconf module. Waiting for an event to start.
+ */
+static void
+bfa_dconf_sm_uninit(struct bfa_dconf_mod_s *dconf, enum bfa_dconf_event event)
+{
+ bfa_status_t bfa_status;
+ bfa_trc(dconf->bfa, event);
+
+ switch (event) {
+ case BFA_DCONF_SM_INIT:
+ if (dconf->min_cfg) {
+ bfa_trc(dconf->bfa, dconf->min_cfg);
+ bfa_fsm_send_event(&dconf->bfa->iocfc,
+ IOCFC_E_DCONF_DONE);
+ return;
+ }
+ bfa_sm_set_state(dconf, bfa_dconf_sm_flash_read);
+ bfa_timer_start(dconf->bfa, &dconf->timer,
+ bfa_dconf_timer, dconf, 2 * BFA_DCONF_UPDATE_TOV);
+ bfa_status = bfa_flash_read_part(BFA_FLASH(dconf->bfa),
+ BFA_FLASH_PART_DRV, dconf->instance,
+ dconf->dconf,
+ sizeof(struct bfa_dconf_s), 0,
+ bfa_dconf_init_cb, dconf->bfa);
+ if (bfa_status != BFA_STATUS_OK) {
+ bfa_timer_stop(&dconf->timer);
+ bfa_dconf_init_cb(dconf->bfa, BFA_STATUS_FAILED);
+ bfa_sm_set_state(dconf, bfa_dconf_sm_uninit);
+ return;
+ }
+ break;
+ case BFA_DCONF_SM_EXIT:
+ bfa_fsm_send_event(&dconf->bfa->iocfc, IOCFC_E_DCONF_DONE);
+ case BFA_DCONF_SM_IOCDISABLE:
+ case BFA_DCONF_SM_WR:
+ case BFA_DCONF_SM_FLASH_COMP:
+ break;
+ default:
+ bfa_sm_fault(dconf->bfa, event);
+ }
+}
+
+/*
+ * Read flash for dconf entries and make a call back to the driver once done.
+ */
+static void
+bfa_dconf_sm_flash_read(struct bfa_dconf_mod_s *dconf,
+ enum bfa_dconf_event event)
+{
+ bfa_trc(dconf->bfa, event);
+
+ switch (event) {
+ case BFA_DCONF_SM_FLASH_COMP:
+ bfa_timer_stop(&dconf->timer);
+ bfa_sm_set_state(dconf, bfa_dconf_sm_ready);
+ break;
+ case BFA_DCONF_SM_TIMEOUT:
+ bfa_sm_set_state(dconf, bfa_dconf_sm_ready);
+ bfa_ioc_suspend(&dconf->bfa->ioc);
+ break;
+ case BFA_DCONF_SM_EXIT:
+ bfa_timer_stop(&dconf->timer);
+ bfa_sm_set_state(dconf, bfa_dconf_sm_uninit);
+ bfa_fsm_send_event(&dconf->bfa->iocfc, IOCFC_E_DCONF_DONE);
+ break;
+ case BFA_DCONF_SM_IOCDISABLE:
+ bfa_timer_stop(&dconf->timer);
+ bfa_sm_set_state(dconf, bfa_dconf_sm_uninit);
+ break;
+ default:
+ bfa_sm_fault(dconf->bfa, event);
+ }
+}
+
+/*
+ * DCONF Module is in ready state. Has completed the initialization.
+ */
+static void
+bfa_dconf_sm_ready(struct bfa_dconf_mod_s *dconf, enum bfa_dconf_event event)
+{
+ bfa_trc(dconf->bfa, event);
+
+ switch (event) {
+ case BFA_DCONF_SM_WR:
+ bfa_timer_start(dconf->bfa, &dconf->timer,
+ bfa_dconf_timer, dconf, BFA_DCONF_UPDATE_TOV);
+ bfa_sm_set_state(dconf, bfa_dconf_sm_dirty);
+ break;
+ case BFA_DCONF_SM_EXIT:
+ bfa_sm_set_state(dconf, bfa_dconf_sm_uninit);
+ bfa_fsm_send_event(&dconf->bfa->iocfc, IOCFC_E_DCONF_DONE);
+ break;
+ case BFA_DCONF_SM_INIT:
+ case BFA_DCONF_SM_IOCDISABLE:
+ break;
+ default:
+ bfa_sm_fault(dconf->bfa, event);
+ }
+}
+
+/*
+ * entries are dirty, write back to the flash.
+ */
+
+static void
+bfa_dconf_sm_dirty(struct bfa_dconf_mod_s *dconf, enum bfa_dconf_event event)
+{
+ bfa_trc(dconf->bfa, event);
+
+ switch (event) {
+ case BFA_DCONF_SM_TIMEOUT:
+ bfa_sm_set_state(dconf, bfa_dconf_sm_sync);
+ bfa_dconf_flash_write(dconf);
+ break;
+ case BFA_DCONF_SM_WR:
+ bfa_timer_stop(&dconf->timer);
+ bfa_timer_start(dconf->bfa, &dconf->timer,
+ bfa_dconf_timer, dconf, BFA_DCONF_UPDATE_TOV);
+ break;
+ case BFA_DCONF_SM_EXIT:
+ bfa_timer_stop(&dconf->timer);
+ bfa_timer_start(dconf->bfa, &dconf->timer,
+ bfa_dconf_timer, dconf, BFA_DCONF_UPDATE_TOV);
+ bfa_sm_set_state(dconf, bfa_dconf_sm_final_sync);
+ bfa_dconf_flash_write(dconf);
+ break;
+ case BFA_DCONF_SM_FLASH_COMP:
+ break;
+ case BFA_DCONF_SM_IOCDISABLE:
+ bfa_timer_stop(&dconf->timer);
+ bfa_sm_set_state(dconf, bfa_dconf_sm_iocdown_dirty);
+ break;
+ default:
+ bfa_sm_fault(dconf->bfa, event);
+ }
+}
+
+/*
+ * Sync the dconf entries to the flash.
+ */
+static void
+bfa_dconf_sm_final_sync(struct bfa_dconf_mod_s *dconf,
+ enum bfa_dconf_event event)
+{
+ bfa_trc(dconf->bfa, event);
+
+ switch (event) {
+ case BFA_DCONF_SM_IOCDISABLE:
+ case BFA_DCONF_SM_FLASH_COMP:
+ bfa_timer_stop(&dconf->timer);
+ case BFA_DCONF_SM_TIMEOUT:
+ bfa_sm_set_state(dconf, bfa_dconf_sm_uninit);
+ bfa_fsm_send_event(&dconf->bfa->iocfc, IOCFC_E_DCONF_DONE);
+ break;
+ default:
+ bfa_sm_fault(dconf->bfa, event);
+ }
+}
+
+static void
+bfa_dconf_sm_sync(struct bfa_dconf_mod_s *dconf, enum bfa_dconf_event event)
+{
+ bfa_trc(dconf->bfa, event);
+
+ switch (event) {
+ case BFA_DCONF_SM_FLASH_COMP:
+ bfa_sm_set_state(dconf, bfa_dconf_sm_ready);
+ break;
+ case BFA_DCONF_SM_WR:
+ bfa_timer_start(dconf->bfa, &dconf->timer,
+ bfa_dconf_timer, dconf, BFA_DCONF_UPDATE_TOV);
+ bfa_sm_set_state(dconf, bfa_dconf_sm_dirty);
+ break;
+ case BFA_DCONF_SM_EXIT:
+ bfa_timer_start(dconf->bfa, &dconf->timer,
+ bfa_dconf_timer, dconf, BFA_DCONF_UPDATE_TOV);
+ bfa_sm_set_state(dconf, bfa_dconf_sm_final_sync);
+ break;
+ case BFA_DCONF_SM_IOCDISABLE:
+ bfa_sm_set_state(dconf, bfa_dconf_sm_iocdown_dirty);
+ break;
+ default:
+ bfa_sm_fault(dconf->bfa, event);
+ }
+}
+
+static void
+bfa_dconf_sm_iocdown_dirty(struct bfa_dconf_mod_s *dconf,
+ enum bfa_dconf_event event)
+{
+ bfa_trc(dconf->bfa, event);
+
+ switch (event) {
+ case BFA_DCONF_SM_INIT:
+ bfa_timer_start(dconf->bfa, &dconf->timer,
+ bfa_dconf_timer, dconf, BFA_DCONF_UPDATE_TOV);
+ bfa_sm_set_state(dconf, bfa_dconf_sm_dirty);
+ break;
+ case BFA_DCONF_SM_EXIT:
+ bfa_sm_set_state(dconf, bfa_dconf_sm_uninit);
+ bfa_fsm_send_event(&dconf->bfa->iocfc, IOCFC_E_DCONF_DONE);
+ break;
+ case BFA_DCONF_SM_IOCDISABLE:
+ break;
+ default:
+ bfa_sm_fault(dconf->bfa, event);
+ }
+}
+
+/*
+ * Compute and return memory needed by DRV_CFG module.
+ */
+static void
+bfa_dconf_meminfo(struct bfa_iocfc_cfg_s *cfg, struct bfa_meminfo_s *meminfo,
+ struct bfa_s *bfa)
+{
+ struct bfa_mem_kva_s *dconf_kva = BFA_MEM_DCONF_KVA(bfa);
+
+ if (cfg->drvcfg.min_cfg)
+ bfa_mem_kva_setup(meminfo, dconf_kva,
+ sizeof(struct bfa_dconf_hdr_s));
+ else
+ bfa_mem_kva_setup(meminfo, dconf_kva,
+ sizeof(struct bfa_dconf_s));
+}
+
+static void
+bfa_dconf_attach(struct bfa_s *bfa, void *bfad, struct bfa_iocfc_cfg_s *cfg,
+ struct bfa_pcidev_s *pcidev)
+{
+ struct bfa_dconf_mod_s *dconf = BFA_DCONF_MOD(bfa);
+
+ dconf->bfad = bfad;
+ dconf->bfa = bfa;
+ dconf->instance = bfa->ioc.port_id;
+ bfa_trc(bfa, dconf->instance);
+
+ dconf->dconf = (struct bfa_dconf_s *) bfa_mem_kva_curp(dconf);
+ if (cfg->drvcfg.min_cfg) {
+ bfa_mem_kva_curp(dconf) += sizeof(struct bfa_dconf_hdr_s);
+ dconf->min_cfg = BFA_TRUE;
+ } else {
+ dconf->min_cfg = BFA_FALSE;
+ bfa_mem_kva_curp(dconf) += sizeof(struct bfa_dconf_s);
+ }
+
+ bfa_dconf_read_data_valid(bfa) = BFA_FALSE;
+ bfa_sm_set_state(dconf, bfa_dconf_sm_uninit);
+}
+
+static void
+bfa_dconf_init_cb(void *arg, bfa_status_t status)
+{
+ struct bfa_s *bfa = arg;
+ struct bfa_dconf_mod_s *dconf = BFA_DCONF_MOD(bfa);
+
+ if (status == BFA_STATUS_OK) {
+ bfa_dconf_read_data_valid(bfa) = BFA_TRUE;
+ if (dconf->dconf->hdr.signature != BFI_DCONF_SIGNATURE)
+ dconf->dconf->hdr.signature = BFI_DCONF_SIGNATURE;
+ if (dconf->dconf->hdr.version != BFI_DCONF_VERSION)
+ dconf->dconf->hdr.version = BFI_DCONF_VERSION;
+ }
+ bfa_sm_send_event(dconf, BFA_DCONF_SM_FLASH_COMP);
+ bfa_fsm_send_event(&bfa->iocfc, IOCFC_E_DCONF_DONE);
+}
+
+void
+bfa_dconf_modinit(struct bfa_s *bfa)
+{
+ struct bfa_dconf_mod_s *dconf = BFA_DCONF_MOD(bfa);
+ bfa_sm_send_event(dconf, BFA_DCONF_SM_INIT);
+}
+static void
+bfa_dconf_start(struct bfa_s *bfa)
+{
+}
+
+static void
+bfa_dconf_stop(struct bfa_s *bfa)
+{
+}
+
+static void bfa_dconf_timer(void *cbarg)
+{
+ struct bfa_dconf_mod_s *dconf = cbarg;
+ bfa_sm_send_event(dconf, BFA_DCONF_SM_TIMEOUT);
+}
+static void
+bfa_dconf_iocdisable(struct bfa_s *bfa)
+{
+ struct bfa_dconf_mod_s *dconf = BFA_DCONF_MOD(bfa);
+ bfa_sm_send_event(dconf, BFA_DCONF_SM_IOCDISABLE);
+}
+
+static void
+bfa_dconf_detach(struct bfa_s *bfa)
+{
+}
+
+static bfa_status_t
+bfa_dconf_flash_write(struct bfa_dconf_mod_s *dconf)
+{
+ bfa_status_t bfa_status;
+ bfa_trc(dconf->bfa, 0);
+
+ bfa_status = bfa_flash_update_part(BFA_FLASH(dconf->bfa),
+ BFA_FLASH_PART_DRV, dconf->instance,
+ dconf->dconf, sizeof(struct bfa_dconf_s), 0,
+ bfa_dconf_cbfn, dconf);
+ if (bfa_status != BFA_STATUS_OK)
+ WARN_ON(bfa_status);
+ bfa_trc(dconf->bfa, bfa_status);
+
+ return bfa_status;
+}
+
+bfa_status_t
+bfa_dconf_update(struct bfa_s *bfa)
+{
+ struct bfa_dconf_mod_s *dconf = BFA_DCONF_MOD(bfa);
+ bfa_trc(dconf->bfa, 0);
+ if (bfa_sm_cmp_state(dconf, bfa_dconf_sm_iocdown_dirty))
+ return BFA_STATUS_FAILED;
+
+ if (dconf->min_cfg) {
+ bfa_trc(dconf->bfa, dconf->min_cfg);
+ return BFA_STATUS_FAILED;
+ }
+
+ bfa_sm_send_event(dconf, BFA_DCONF_SM_WR);
+ return BFA_STATUS_OK;
+}
+
+static void
+bfa_dconf_cbfn(void *arg, bfa_status_t status)
+{
+ struct bfa_dconf_mod_s *dconf = arg;
+ WARN_ON(status);
+ bfa_sm_send_event(dconf, BFA_DCONF_SM_FLASH_COMP);
+}
+
+void
+bfa_dconf_modexit(struct bfa_s *bfa)
+{
+ struct bfa_dconf_mod_s *dconf = BFA_DCONF_MOD(bfa);
+ bfa_sm_send_event(dconf, BFA_DCONF_SM_EXIT);
+}
+
+/*
+ * FRU specific functions
+ */
+
+#define BFA_FRU_DMA_BUF_SZ 0x02000 /* 8k dma buffer */
+#define BFA_FRU_CHINOOK_MAX_SIZE 0x10000
+#define BFA_FRU_LIGHTNING_MAX_SIZE 0x200
+
+static void
+bfa_fru_notify(void *cbarg, enum bfa_ioc_event_e event)
+{
+ struct bfa_fru_s *fru = cbarg;
+
+ bfa_trc(fru, event);
+
+ switch (event) {
+ case BFA_IOC_E_DISABLED:
+ case BFA_IOC_E_FAILED:
+ if (fru->op_busy) {
+ fru->status = BFA_STATUS_IOC_FAILURE;
+ fru->cbfn(fru->cbarg, fru->status);
+ fru->op_busy = 0;
+ }
+ break;
+
+ default:
+ break;
+ }
+}
+
+/*
+ * Send fru write request.
+ *
+ * @param[in] cbarg - callback argument
+ */
+static void
+bfa_fru_write_send(void *cbarg, enum bfi_fru_h2i_msgs msg_type)
+{
+ struct bfa_fru_s *fru = cbarg;
+ struct bfi_fru_write_req_s *msg =
+ (struct bfi_fru_write_req_s *) fru->mb.msg;
+ u32 len;
+
+ msg->offset = cpu_to_be32(fru->addr_off + fru->offset);
+ len = (fru->residue < BFA_FRU_DMA_BUF_SZ) ?
+ fru->residue : BFA_FRU_DMA_BUF_SZ;
+ msg->length = cpu_to_be32(len);
+
+ /*
+ * indicate if it's the last msg of the whole write operation
+ */
+ msg->last = (len == fru->residue) ? 1 : 0;
+
+ msg->trfr_cmpl = (len == fru->residue) ? fru->trfr_cmpl : 0;
+ bfi_h2i_set(msg->mh, BFI_MC_FRU, msg_type, bfa_ioc_portid(fru->ioc));
+ bfa_alen_set(&msg->alen, len, fru->dbuf_pa);
+
+ memcpy(fru->dbuf_kva, fru->ubuf + fru->offset, len);
+ bfa_ioc_mbox_queue(fru->ioc, &fru->mb);
+
+ fru->residue -= len;
+ fru->offset += len;
+}
+
+/*
+ * Send fru read request.
+ *
+ * @param[in] cbarg - callback argument
+ */
+static void
+bfa_fru_read_send(void *cbarg, enum bfi_fru_h2i_msgs msg_type)
+{
+ struct bfa_fru_s *fru = cbarg;
+ struct bfi_fru_read_req_s *msg =
+ (struct bfi_fru_read_req_s *) fru->mb.msg;
+ u32 len;
+
+ msg->offset = cpu_to_be32(fru->addr_off + fru->offset);
+ len = (fru->residue < BFA_FRU_DMA_BUF_SZ) ?
+ fru->residue : BFA_FRU_DMA_BUF_SZ;
+ msg->length = cpu_to_be32(len);
+ bfi_h2i_set(msg->mh, BFI_MC_FRU, msg_type, bfa_ioc_portid(fru->ioc));
+ bfa_alen_set(&msg->alen, len, fru->dbuf_pa);
+ bfa_ioc_mbox_queue(fru->ioc, &fru->mb);
+}
+
+/*
+ * Flash memory info API.
+ *
+ * @param[in] mincfg - minimal cfg variable
+ */
+u32
+bfa_fru_meminfo(bfa_boolean_t mincfg)
+{
+ /* min driver doesn't need fru */
+ if (mincfg)
+ return 0;
+
+ return BFA_ROUNDUP(BFA_FRU_DMA_BUF_SZ, BFA_DMA_ALIGN_SZ);
+}
+
+/*
+ * Flash attach API.
+ *
+ * @param[in] fru - fru structure
+ * @param[in] ioc - ioc structure
+ * @param[in] dev - device structure
+ * @param[in] trcmod - trace module
+ * @param[in] logmod - log module
+ */
+void
+bfa_fru_attach(struct bfa_fru_s *fru, struct bfa_ioc_s *ioc, void *dev,
+ struct bfa_trc_mod_s *trcmod, bfa_boolean_t mincfg)
+{
+ fru->ioc = ioc;
+ fru->trcmod = trcmod;
+ fru->cbfn = NULL;
+ fru->cbarg = NULL;
+ fru->op_busy = 0;
+
+ bfa_ioc_mbox_regisr(fru->ioc, BFI_MC_FRU, bfa_fru_intr, fru);
+ bfa_q_qe_init(&fru->ioc_notify);
+ bfa_ioc_notify_init(&fru->ioc_notify, bfa_fru_notify, fru);
+ list_add_tail(&fru->ioc_notify.qe, &fru->ioc->notify_q);
+
+ /* min driver doesn't need fru */
+ if (mincfg) {
+ fru->dbuf_kva = NULL;
+ fru->dbuf_pa = 0;
+ }
+}
+
+/*
+ * Claim memory for fru
+ *
+ * @param[in] fru - fru structure
+ * @param[in] dm_kva - pointer to virtual memory address
+ * @param[in] dm_pa - frusical memory address
+ * @param[in] mincfg - minimal cfg variable
+ */
+void
+bfa_fru_memclaim(struct bfa_fru_s *fru, u8 *dm_kva, u64 dm_pa,
+ bfa_boolean_t mincfg)
+{
+ if (mincfg)
+ return;
+
+ fru->dbuf_kva = dm_kva;
+ fru->dbuf_pa = dm_pa;
+ memset(fru->dbuf_kva, 0, BFA_FRU_DMA_BUF_SZ);
+ dm_kva += BFA_ROUNDUP(BFA_FRU_DMA_BUF_SZ, BFA_DMA_ALIGN_SZ);
+ dm_pa += BFA_ROUNDUP(BFA_FRU_DMA_BUF_SZ, BFA_DMA_ALIGN_SZ);
+}
+
+/*
+ * Update fru vpd image.
+ *
+ * @param[in] fru - fru structure
+ * @param[in] buf - update data buffer
+ * @param[in] len - data buffer length
+ * @param[in] offset - offset relative to starting address
+ * @param[in] cbfn - callback function
+ * @param[in] cbarg - callback argument
+ *
+ * Return status.
+ */
+bfa_status_t
+bfa_fruvpd_update(struct bfa_fru_s *fru, void *buf, u32 len, u32 offset,
+ bfa_cb_fru_t cbfn, void *cbarg, u8 trfr_cmpl)
+{
+ bfa_trc(fru, BFI_FRUVPD_H2I_WRITE_REQ);
+ bfa_trc(fru, len);
+ bfa_trc(fru, offset);
+
+ if (fru->ioc->asic_gen != BFI_ASIC_GEN_CT2 &&
+ fru->ioc->attr->card_type != BFA_MFG_TYPE_CHINOOK2)
+ return BFA_STATUS_FRU_NOT_PRESENT;
+
+ if (fru->ioc->attr->card_type != BFA_MFG_TYPE_CHINOOK)
+ return BFA_STATUS_CMD_NOTSUPP;
+
+ if (!bfa_ioc_is_operational(fru->ioc))
+ return BFA_STATUS_IOC_NON_OP;
+
+ if (fru->op_busy) {
+ bfa_trc(fru, fru->op_busy);
+ return BFA_STATUS_DEVBUSY;
+ }
+
+ fru->op_busy = 1;
+
+ fru->cbfn = cbfn;
+ fru->cbarg = cbarg;
+ fru->residue = len;
+ fru->offset = 0;
+ fru->addr_off = offset;
+ fru->ubuf = buf;
+ fru->trfr_cmpl = trfr_cmpl;
+
+ bfa_fru_write_send(fru, BFI_FRUVPD_H2I_WRITE_REQ);
+
+ return BFA_STATUS_OK;
+}
+
+/*
+ * Read fru vpd image.
+ *
+ * @param[in] fru - fru structure
+ * @param[in] buf - read data buffer
+ * @param[in] len - data buffer length
+ * @param[in] offset - offset relative to starting address
+ * @param[in] cbfn - callback function
+ * @param[in] cbarg - callback argument
+ *
+ * Return status.
+ */
+bfa_status_t
+bfa_fruvpd_read(struct bfa_fru_s *fru, void *buf, u32 len, u32 offset,
+ bfa_cb_fru_t cbfn, void *cbarg)
+{
+ bfa_trc(fru, BFI_FRUVPD_H2I_READ_REQ);
+ bfa_trc(fru, len);
+ bfa_trc(fru, offset);
+
+ if (fru->ioc->asic_gen != BFI_ASIC_GEN_CT2)
+ return BFA_STATUS_FRU_NOT_PRESENT;
+
+ if (fru->ioc->attr->card_type != BFA_MFG_TYPE_CHINOOK &&
+ fru->ioc->attr->card_type != BFA_MFG_TYPE_CHINOOK2)
+ return BFA_STATUS_CMD_NOTSUPP;
+
+ if (!bfa_ioc_is_operational(fru->ioc))
+ return BFA_STATUS_IOC_NON_OP;
+
+ if (fru->op_busy) {
+ bfa_trc(fru, fru->op_busy);
+ return BFA_STATUS_DEVBUSY;
+ }
+
+ fru->op_busy = 1;
+
+ fru->cbfn = cbfn;
+ fru->cbarg = cbarg;
+ fru->residue = len;
+ fru->offset = 0;
+ fru->addr_off = offset;
+ fru->ubuf = buf;
+ bfa_fru_read_send(fru, BFI_FRUVPD_H2I_READ_REQ);
+
+ return BFA_STATUS_OK;
+}
+
+/*
+ * Get maximum size fru vpd image.
+ *
+ * @param[in] fru - fru structure
+ * @param[out] size - maximum size of fru vpd data
+ *
+ * Return status.
+ */
+bfa_status_t
+bfa_fruvpd_get_max_size(struct bfa_fru_s *fru, u32 *max_size)
+{
+ if (fru->ioc->asic_gen != BFI_ASIC_GEN_CT2)
+ return BFA_STATUS_FRU_NOT_PRESENT;
+
+ if (!bfa_ioc_is_operational(fru->ioc))
+ return BFA_STATUS_IOC_NON_OP;
+
+ if (fru->ioc->attr->card_type == BFA_MFG_TYPE_CHINOOK ||
+ fru->ioc->attr->card_type == BFA_MFG_TYPE_CHINOOK2)
+ *max_size = BFA_FRU_CHINOOK_MAX_SIZE;
+ else
+ return BFA_STATUS_CMD_NOTSUPP;
+ return BFA_STATUS_OK;
+}
+/*
+ * tfru write.
+ *
+ * @param[in] fru - fru structure
+ * @param[in] buf - update data buffer
+ * @param[in] len - data buffer length
+ * @param[in] offset - offset relative to starting address
+ * @param[in] cbfn - callback function
+ * @param[in] cbarg - callback argument
+ *
+ * Return status.
+ */
+bfa_status_t
+bfa_tfru_write(struct bfa_fru_s *fru, void *buf, u32 len, u32 offset,
+ bfa_cb_fru_t cbfn, void *cbarg)
+{
+ bfa_trc(fru, BFI_TFRU_H2I_WRITE_REQ);
+ bfa_trc(fru, len);
+ bfa_trc(fru, offset);
+ bfa_trc(fru, *((u8 *) buf));
+
+ if (fru->ioc->asic_gen != BFI_ASIC_GEN_CT2)
+ return BFA_STATUS_FRU_NOT_PRESENT;
+
+ if (!bfa_ioc_is_operational(fru->ioc))
+ return BFA_STATUS_IOC_NON_OP;
+
+ if (fru->op_busy) {
+ bfa_trc(fru, fru->op_busy);
+ return BFA_STATUS_DEVBUSY;
+ }
+
+ fru->op_busy = 1;
+
+ fru->cbfn = cbfn;
+ fru->cbarg = cbarg;
+ fru->residue = len;
+ fru->offset = 0;
+ fru->addr_off = offset;
+ fru->ubuf = buf;
+
+ bfa_fru_write_send(fru, BFI_TFRU_H2I_WRITE_REQ);
+
+ return BFA_STATUS_OK;
+}
+
+/*
+ * tfru read.
+ *
+ * @param[in] fru - fru structure
+ * @param[in] buf - read data buffer
+ * @param[in] len - data buffer length
+ * @param[in] offset - offset relative to starting address
+ * @param[in] cbfn - callback function
+ * @param[in] cbarg - callback argument
+ *
+ * Return status.
+ */
+bfa_status_t
+bfa_tfru_read(struct bfa_fru_s *fru, void *buf, u32 len, u32 offset,
+ bfa_cb_fru_t cbfn, void *cbarg)
+{
+ bfa_trc(fru, BFI_TFRU_H2I_READ_REQ);
+ bfa_trc(fru, len);
+ bfa_trc(fru, offset);
+
+ if (fru->ioc->asic_gen != BFI_ASIC_GEN_CT2)
+ return BFA_STATUS_FRU_NOT_PRESENT;
+
+ if (!bfa_ioc_is_operational(fru->ioc))
+ return BFA_STATUS_IOC_NON_OP;
+
+ if (fru->op_busy) {
+ bfa_trc(fru, fru->op_busy);
+ return BFA_STATUS_DEVBUSY;
+ }
+
+ fru->op_busy = 1;
+
+ fru->cbfn = cbfn;
+ fru->cbarg = cbarg;
+ fru->residue = len;
+ fru->offset = 0;
+ fru->addr_off = offset;
+ fru->ubuf = buf;
+ bfa_fru_read_send(fru, BFI_TFRU_H2I_READ_REQ);
+
+ return BFA_STATUS_OK;
+}
+
+/*
+ * Process fru response messages upon receiving interrupts.
+ *
+ * @param[in] fruarg - fru structure
+ * @param[in] msg - message structure
+ */
+void
+bfa_fru_intr(void *fruarg, struct bfi_mbmsg_s *msg)
+{
+ struct bfa_fru_s *fru = fruarg;
+ struct bfi_fru_rsp_s *rsp = (struct bfi_fru_rsp_s *)msg;
+ u32 status;
+
+ bfa_trc(fru, msg->mh.msg_id);
+
+ if (!fru->op_busy) {
+ /*
+ * receiving response after ioc failure
+ */
+ bfa_trc(fru, 0x9999);
+ return;
+ }
+
+ switch (msg->mh.msg_id) {
+ case BFI_FRUVPD_I2H_WRITE_RSP:
+ case BFI_TFRU_I2H_WRITE_RSP:
+ status = be32_to_cpu(rsp->status);
+ bfa_trc(fru, status);
+
+ if (status != BFA_STATUS_OK || fru->residue == 0) {
+ fru->status = status;
+ fru->op_busy = 0;
+ if (fru->cbfn)
+ fru->cbfn(fru->cbarg, fru->status);
+ } else {
+ bfa_trc(fru, fru->offset);
+ if (msg->mh.msg_id == BFI_FRUVPD_I2H_WRITE_RSP)
+ bfa_fru_write_send(fru,
+ BFI_FRUVPD_H2I_WRITE_REQ);
+ else
+ bfa_fru_write_send(fru,
+ BFI_TFRU_H2I_WRITE_REQ);
+ }
+ break;
+ case BFI_FRUVPD_I2H_READ_RSP:
+ case BFI_TFRU_I2H_READ_RSP:
+ status = be32_to_cpu(rsp->status);
+ bfa_trc(fru, status);
+
+ if (status != BFA_STATUS_OK) {
+ fru->status = status;
+ fru->op_busy = 0;
+ if (fru->cbfn)
+ fru->cbfn(fru->cbarg, fru->status);
+ } else {
+ u32 len = be32_to_cpu(rsp->length);
+
+ bfa_trc(fru, fru->offset);
+ bfa_trc(fru, len);
+
+ memcpy(fru->ubuf + fru->offset, fru->dbuf_kva, len);
+ fru->residue -= len;
+ fru->offset += len;
+
+ if (fru->residue == 0) {
+ fru->status = status;
+ fru->op_busy = 0;
+ if (fru->cbfn)
+ fru->cbfn(fru->cbarg, fru->status);
+ } else {
+ if (msg->mh.msg_id == BFI_FRUVPD_I2H_READ_RSP)
+ bfa_fru_read_send(fru,
+ BFI_FRUVPD_H2I_READ_REQ);
+ else
+ bfa_fru_read_send(fru,
+ BFI_TFRU_H2I_READ_REQ);
+ }
+ }
+ break;
+ default:
+ WARN_ON(1);
+ }
+}
+
+/*
+ * register definitions
+ */
+#define FLI_CMD_REG 0x0001d000
+#define FLI_RDDATA_REG 0x0001d010
+#define FLI_ADDR_REG 0x0001d004
+#define FLI_DEV_STATUS_REG 0x0001d014
+
+#define BFA_FLASH_FIFO_SIZE 128 /* fifo size */
+#define BFA_FLASH_CHECK_MAX 10000 /* max # of status check */
+#define BFA_FLASH_BLOCKING_OP_MAX 1000000 /* max # of blocking op check */
+#define BFA_FLASH_WIP_MASK 0x01 /* write in progress bit mask */
+
+enum bfa_flash_cmd {
+ BFA_FLASH_FAST_READ = 0x0b, /* fast read */
+ BFA_FLASH_READ_STATUS = 0x05, /* read status */
+};
+
+/**
+ * @brief hardware error definition
+ */
+enum bfa_flash_err {
+ BFA_FLASH_NOT_PRESENT = -1, /*!< flash not present */
+ BFA_FLASH_UNINIT = -2, /*!< flash not initialized */
+ BFA_FLASH_BAD = -3, /*!< flash bad */
+ BFA_FLASH_BUSY = -4, /*!< flash busy */
+ BFA_FLASH_ERR_CMD_ACT = -5, /*!< command active never cleared */
+ BFA_FLASH_ERR_FIFO_CNT = -6, /*!< fifo count never cleared */
+ BFA_FLASH_ERR_WIP = -7, /*!< write-in-progress never cleared */
+ BFA_FLASH_ERR_TIMEOUT = -8, /*!< fli timeout */
+ BFA_FLASH_ERR_LEN = -9, /*!< invalid length */
+};
+
+/**
+ * @brief flash command register data structure
+ */
+union bfa_flash_cmd_reg_u {
+ struct {
+#ifdef __BIG_ENDIAN
+ u32 act:1;
+ u32 rsv:1;
+ u32 write_cnt:9;
+ u32 read_cnt:9;
+ u32 addr_cnt:4;
+ u32 cmd:8;
+#else
+ u32 cmd:8;
+ u32 addr_cnt:4;
+ u32 read_cnt:9;
+ u32 write_cnt:9;
+ u32 rsv:1;
+ u32 act:1;
+#endif
+ } r;
+ u32 i;
+};
+
+/**
+ * @brief flash device status register data structure
+ */
+union bfa_flash_dev_status_reg_u {
+ struct {
+#ifdef __BIG_ENDIAN
+ u32 rsv:21;
+ u32 fifo_cnt:6;
+ u32 busy:1;
+ u32 init_status:1;
+ u32 present:1;
+ u32 bad:1;
+ u32 good:1;
+#else
+ u32 good:1;
+ u32 bad:1;
+ u32 present:1;
+ u32 init_status:1;
+ u32 busy:1;
+ u32 fifo_cnt:6;
+ u32 rsv:21;
+#endif
+ } r;
+ u32 i;
+};
+
+/**
+ * @brief flash address register data structure
+ */
+union bfa_flash_addr_reg_u {
+ struct {
+#ifdef __BIG_ENDIAN
+ u32 addr:24;
+ u32 dummy:8;
+#else
+ u32 dummy:8;
+ u32 addr:24;
+#endif
+ } r;
+ u32 i;
+};
+
+/**
+ * dg flash_raw_private Flash raw private functions
+ */
+static void
+bfa_flash_set_cmd(void __iomem *pci_bar, u8 wr_cnt,
+ u8 rd_cnt, u8 ad_cnt, u8 op)
+{
+ union bfa_flash_cmd_reg_u cmd;
+
+ cmd.i = 0;
+ cmd.r.act = 1;
+ cmd.r.write_cnt = wr_cnt;
+ cmd.r.read_cnt = rd_cnt;
+ cmd.r.addr_cnt = ad_cnt;
+ cmd.r.cmd = op;
+ writel(cmd.i, (pci_bar + FLI_CMD_REG));
+}
+
+static void
+bfa_flash_set_addr(void __iomem *pci_bar, u32 address)
+{
+ union bfa_flash_addr_reg_u addr;
+
+ addr.r.addr = address & 0x00ffffff;
+ addr.r.dummy = 0;
+ writel(addr.i, (pci_bar + FLI_ADDR_REG));
+}
+
+static int
+bfa_flash_cmd_act_check(void __iomem *pci_bar)
+{
+ union bfa_flash_cmd_reg_u cmd;
+
+ cmd.i = readl(pci_bar + FLI_CMD_REG);
+
+ if (cmd.r.act)
+ return BFA_FLASH_ERR_CMD_ACT;
+
+ return 0;
+}
+
+/**
+ * @brief
+ * Flush FLI data fifo.
+ *
+ * @param[in] pci_bar - pci bar address
+ * @param[in] dev_status - device status
+ *
+ * Return 0 on success, negative error number on error.
+ */
+static u32
+bfa_flash_fifo_flush(void __iomem *pci_bar)
+{
+ u32 i;
+ u32 t;
+ union bfa_flash_dev_status_reg_u dev_status;
+
+ dev_status.i = readl(pci_bar + FLI_DEV_STATUS_REG);
+
+ if (!dev_status.r.fifo_cnt)
+ return 0;
+
+ /* fifo counter in terms of words */
+ for (i = 0; i < dev_status.r.fifo_cnt; i++)
+ t = readl(pci_bar + FLI_RDDATA_REG);
+
+ /*
+ * Check the device status. It may take some time.
+ */
+ for (i = 0; i < BFA_FLASH_CHECK_MAX; i++) {
+ dev_status.i = readl(pci_bar + FLI_DEV_STATUS_REG);
+ if (!dev_status.r.fifo_cnt)
+ break;
+ }
+
+ if (dev_status.r.fifo_cnt)
+ return BFA_FLASH_ERR_FIFO_CNT;
+
+ return 0;
+}
+
+/**
+ * @brief
+ * Read flash status.
+ *
+ * @param[in] pci_bar - pci bar address
+ *
+ * Return 0 on success, negative error number on error.
+*/
+static u32
+bfa_flash_status_read(void __iomem *pci_bar)
+{
+ union bfa_flash_dev_status_reg_u dev_status;
+ int status;
+ u32 ret_status;
+ int i;
+
+ status = bfa_flash_fifo_flush(pci_bar);
+ if (status < 0)
+ return status;
+
+ bfa_flash_set_cmd(pci_bar, 0, 4, 0, BFA_FLASH_READ_STATUS);
+
+ for (i = 0; i < BFA_FLASH_CHECK_MAX; i++) {
+ status = bfa_flash_cmd_act_check(pci_bar);
+ if (!status)
+ break;
+ }
+
+ if (status)
+ return status;
+
+ dev_status.i = readl(pci_bar + FLI_DEV_STATUS_REG);
+ if (!dev_status.r.fifo_cnt)
+ return BFA_FLASH_BUSY;
+
+ ret_status = readl(pci_bar + FLI_RDDATA_REG);
+ ret_status >>= 24;
+
+ status = bfa_flash_fifo_flush(pci_bar);
+ if (status < 0)
+ return status;
+
+ return ret_status;
+}
+
+/**
+ * @brief
+ * Start flash read operation.
+ *
+ * @param[in] pci_bar - pci bar address
+ * @param[in] offset - flash address offset
+ * @param[in] len - read data length
+ * @param[in] buf - read data buffer
+ *
+ * Return 0 on success, negative error number on error.
+ */
+static u32
+bfa_flash_read_start(void __iomem *pci_bar, u32 offset, u32 len,
+ char *buf)
+{
+ int status;
+
+ /*
+ * len must be mutiple of 4 and not exceeding fifo size
+ */
+ if (len == 0 || len > BFA_FLASH_FIFO_SIZE || (len & 0x03) != 0)
+ return BFA_FLASH_ERR_LEN;
+
+ /*
+ * check status
+ */
+ status = bfa_flash_status_read(pci_bar);
+ if (status == BFA_FLASH_BUSY)
+ status = bfa_flash_status_read(pci_bar);
+
+ if (status < 0)
+ return status;
+
+ /*
+ * check if write-in-progress bit is cleared
+ */
+ if (status & BFA_FLASH_WIP_MASK)
+ return BFA_FLASH_ERR_WIP;
+
+ bfa_flash_set_addr(pci_bar, offset);
+
+ bfa_flash_set_cmd(pci_bar, 0, (u8)len, 4, BFA_FLASH_FAST_READ);
+
+ return 0;
+}
+
+/**
+ * @brief
+ * Check flash read operation.
+ *
+ * @param[in] pci_bar - pci bar address
+ *
+ * Return flash device status, 1 if busy, 0 if not.
+ */
+static u32
+bfa_flash_read_check(void __iomem *pci_bar)
+{
+ if (bfa_flash_cmd_act_check(pci_bar))
+ return 1;
+
+ return 0;
+}
+/**
+ * @brief
+ * End flash read operation.
+ *
+ * @param[in] pci_bar - pci bar address
+ * @param[in] len - read data length
+ * @param[in] buf - read data buffer
+ *
+ */
+static void
+bfa_flash_read_end(void __iomem *pci_bar, u32 len, char *buf)
+{
+
+ u32 i;
+
+ /*
+ * read data fifo up to 32 words
+ */
+ for (i = 0; i < len; i += 4) {
+ u32 w = readl(pci_bar + FLI_RDDATA_REG);
+ *((u32 *) (buf + i)) = swab32(w);
+ }
+
+ bfa_flash_fifo_flush(pci_bar);
+}
+
+/**
+ * @brief
+ * Perform flash raw read.
+ *
+ * @param[in] pci_bar - pci bar address
+ * @param[in] offset - flash partition address offset
+ * @param[in] buf - read data buffer
+ * @param[in] len - read data length
+ *
+ * Return status.
+ */
+
+
+#define FLASH_BLOCKING_OP_MAX 500
+#define FLASH_SEM_LOCK_REG 0x18820
+
+static int
+bfa_raw_sem_get(void __iomem *bar)
+{
+ int locked;
+
+ locked = readl((bar + FLASH_SEM_LOCK_REG));
+ return !locked;
+
+}
+
+bfa_status_t
+bfa_flash_sem_get(void __iomem *bar)
+{
+ u32 n = FLASH_BLOCKING_OP_MAX;
+
+ while (!bfa_raw_sem_get(bar)) {
+ if (--n <= 0)
+ return BFA_STATUS_BADFLASH;
+ mdelay(10);
+ }
+ return BFA_STATUS_OK;
+}
+
+void
+bfa_flash_sem_put(void __iomem *bar)
+{
+ writel(0, (bar + FLASH_SEM_LOCK_REG));
+}
+
+bfa_status_t
+bfa_flash_raw_read(void __iomem *pci_bar, u32 offset, char *buf,
+ u32 len)
+{
+ u32 n;
+ int status;
+ u32 off, l, s, residue, fifo_sz;
+
+ residue = len;
+ off = 0;
+ fifo_sz = BFA_FLASH_FIFO_SIZE;
+ status = bfa_flash_sem_get(pci_bar);
+ if (status != BFA_STATUS_OK)
+ return status;
+
+ while (residue) {
+ s = offset + off;
+ n = s / fifo_sz;
+ l = (n + 1) * fifo_sz - s;
+ if (l > residue)
+ l = residue;
+
+ status = bfa_flash_read_start(pci_bar, offset + off, l,
+ &buf[off]);
+ if (status < 0) {
+ bfa_flash_sem_put(pci_bar);
+ return BFA_STATUS_FAILED;
+ }
+
+ n = BFA_FLASH_BLOCKING_OP_MAX;
+ while (bfa_flash_read_check(pci_bar)) {
+ if (--n <= 0) {
+ bfa_flash_sem_put(pci_bar);
+ return BFA_STATUS_FAILED;
+ }
+ }
+
+ bfa_flash_read_end(pci_bar, l, &buf[off]);
+
+ residue -= l;
+ off += l;
+ }
+ bfa_flash_sem_put(pci_bar);
+
+ return BFA_STATUS_OK;
+}
diff --git a/drivers/scsi/bfa/bfa_ioc.h b/drivers/scsi/bfa/bfa_ioc.h
new file mode 100644
index 000000000..a38aafa03
--- /dev/null
+++ b/drivers/scsi/bfa/bfa_ioc.h
@@ -0,0 +1,1048 @@
+/*
+ * Copyright (c) 2005-2010 Brocade Communications Systems, Inc.
+ * All rights reserved
+ * www.brocade.com
+ *
+ * Linux driver for Brocade Fibre Channel Host Bus Adapter.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License (GPL) Version 2 as
+ * published by the Free Software Foundation
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ */
+
+#ifndef __BFA_IOC_H__
+#define __BFA_IOC_H__
+
+#include "bfad_drv.h"
+#include "bfa_cs.h"
+#include "bfi.h"
+
+#define BFA_DBG_FWTRC_ENTS (BFI_IOC_TRC_ENTS)
+#define BFA_DBG_FWTRC_LEN \
+ (BFA_DBG_FWTRC_ENTS * sizeof(struct bfa_trc_s) + \
+ (sizeof(struct bfa_trc_mod_s) - \
+ BFA_TRC_MAX * sizeof(struct bfa_trc_s)))
+/*
+ * BFA timer declarations
+ */
+typedef void (*bfa_timer_cbfn_t)(void *);
+
+/*
+ * BFA timer data structure
+ */
+struct bfa_timer_s {
+ struct list_head qe;
+ bfa_timer_cbfn_t timercb;
+ void *arg;
+ int timeout; /* in millisecs */
+};
+
+/*
+ * Timer module structure
+ */
+struct bfa_timer_mod_s {
+ struct list_head timer_q;
+};
+
+#define BFA_TIMER_FREQ 200 /* specified in millisecs */
+
+void bfa_timer_beat(struct bfa_timer_mod_s *mod);
+void bfa_timer_begin(struct bfa_timer_mod_s *mod, struct bfa_timer_s *timer,
+ bfa_timer_cbfn_t timercb, void *arg,
+ unsigned int timeout);
+void bfa_timer_stop(struct bfa_timer_s *timer);
+
+/*
+ * Generic Scatter Gather Element used by driver
+ */
+struct bfa_sge_s {
+ u32 sg_len;
+ void *sg_addr;
+};
+
+#define bfa_sge_word_swap(__sge) do { \
+ ((u32 *)(__sge))[0] = swab32(((u32 *)(__sge))[0]); \
+ ((u32 *)(__sge))[1] = swab32(((u32 *)(__sge))[1]); \
+ ((u32 *)(__sge))[2] = swab32(((u32 *)(__sge))[2]); \
+} while (0)
+
+#define bfa_swap_words(_x) ( \
+ ((u64)(_x) << 32) | ((u64)(_x) >> 32))
+
+#ifdef __BIG_ENDIAN
+#define bfa_sge_to_be(_x)
+#define bfa_sge_to_le(_x) bfa_sge_word_swap(_x)
+#define bfa_sgaddr_le(_x) bfa_swap_words(_x)
+#else
+#define bfa_sge_to_be(_x) bfa_sge_word_swap(_x)
+#define bfa_sge_to_le(_x)
+#define bfa_sgaddr_le(_x) (_x)
+#endif
+
+/*
+ * BFA memory resources
+ */
+struct bfa_mem_dma_s {
+ struct list_head qe; /* Queue of DMA elements */
+ u32 mem_len; /* Total Length in Bytes */
+ u8 *kva; /* kernel virtual address */
+ u64 dma; /* dma address if DMA memory */
+ u8 *kva_curp; /* kva allocation cursor */
+ u64 dma_curp; /* dma allocation cursor */
+};
+#define bfa_mem_dma_t struct bfa_mem_dma_s
+
+struct bfa_mem_kva_s {
+ struct list_head qe; /* Queue of KVA elements */
+ u32 mem_len; /* Total Length in Bytes */
+ u8 *kva; /* kernel virtual address */
+ u8 *kva_curp; /* kva allocation cursor */
+};
+#define bfa_mem_kva_t struct bfa_mem_kva_s
+
+struct bfa_meminfo_s {
+ struct bfa_mem_dma_s dma_info;
+ struct bfa_mem_kva_s kva_info;
+};
+
+/* BFA memory segment setup macros */
+#define bfa_mem_dma_setup(_meminfo, _dm_ptr, _seg_sz) do { \
+ ((bfa_mem_dma_t *)(_dm_ptr))->mem_len = (_seg_sz); \
+ if (_seg_sz) \
+ list_add_tail(&((bfa_mem_dma_t *)_dm_ptr)->qe, \
+ &(_meminfo)->dma_info.qe); \
+} while (0)
+
+#define bfa_mem_kva_setup(_meminfo, _kva_ptr, _seg_sz) do { \
+ ((bfa_mem_kva_t *)(_kva_ptr))->mem_len = (_seg_sz); \
+ if (_seg_sz) \
+ list_add_tail(&((bfa_mem_kva_t *)_kva_ptr)->qe, \
+ &(_meminfo)->kva_info.qe); \
+} while (0)
+
+/* BFA dma memory segments iterator */
+#define bfa_mem_dma_sptr(_mod, _i) (&(_mod)->dma_seg[(_i)])
+#define bfa_mem_dma_seg_iter(_mod, _sptr, _nr, _i) \
+ for (_i = 0, _sptr = bfa_mem_dma_sptr(_mod, _i); _i < (_nr); \
+ _i++, _sptr = bfa_mem_dma_sptr(_mod, _i))
+
+#define bfa_mem_kva_curp(_mod) ((_mod)->kva_seg.kva_curp)
+#define bfa_mem_dma_virt(_sptr) ((_sptr)->kva_curp)
+#define bfa_mem_dma_phys(_sptr) ((_sptr)->dma_curp)
+#define bfa_mem_dma_len(_sptr) ((_sptr)->mem_len)
+
+/* Get the corresponding dma buf kva for a req - from the tag */
+#define bfa_mem_get_dmabuf_kva(_mod, _tag, _rqsz) \
+ (((u8 *)(_mod)->dma_seg[BFI_MEM_SEG_FROM_TAG(_tag, _rqsz)].kva_curp) +\
+ BFI_MEM_SEG_REQ_OFFSET(_tag, _rqsz) * (_rqsz))
+
+/* Get the corresponding dma buf pa for a req - from the tag */
+#define bfa_mem_get_dmabuf_pa(_mod, _tag, _rqsz) \
+ ((_mod)->dma_seg[BFI_MEM_SEG_FROM_TAG(_tag, _rqsz)].dma_curp + \
+ BFI_MEM_SEG_REQ_OFFSET(_tag, _rqsz) * (_rqsz))
+
+/*
+ * PCI device information required by IOC
+ */
+struct bfa_pcidev_s {
+ int pci_slot;
+ u8 pci_func;
+ u16 device_id;
+ u16 ssid;
+ void __iomem *pci_bar_kva;
+};
+
+/*
+ * Structure used to remember the DMA-able memory block's KVA and Physical
+ * Address
+ */
+struct bfa_dma_s {
+ void *kva; /* ! Kernel virtual address */
+ u64 pa; /* ! Physical address */
+};
+
+#define BFA_DMA_ALIGN_SZ 256
+#define BFA_ROUNDUP(_l, _s) (((_l) + ((_s) - 1)) & ~((_s) - 1))
+
+/*
+ * smem size for Crossbow and Catapult
+ */
+#define BFI_SMEM_CB_SIZE 0x200000U /* ! 2MB for crossbow */
+#define BFI_SMEM_CT_SIZE 0x280000U /* ! 2.5MB for catapult */
+
+#define bfa_dma_be_addr_set(dma_addr, pa) \
+ __bfa_dma_be_addr_set(&dma_addr, (u64)pa)
+static inline void
+__bfa_dma_be_addr_set(union bfi_addr_u *dma_addr, u64 pa)
+{
+ dma_addr->a32.addr_lo = cpu_to_be32(pa);
+ dma_addr->a32.addr_hi = cpu_to_be32(pa >> 32);
+}
+
+#define bfa_alen_set(__alen, __len, __pa) \
+ __bfa_alen_set(__alen, __len, (u64)__pa)
+
+static inline void
+__bfa_alen_set(struct bfi_alen_s *alen, u32 len, u64 pa)
+{
+ alen->al_len = cpu_to_be32(len);
+ bfa_dma_be_addr_set(alen->al_addr, pa);
+}
+
+struct bfa_ioc_regs_s {
+ void __iomem *hfn_mbox_cmd;
+ void __iomem *hfn_mbox;
+ void __iomem *lpu_mbox_cmd;
+ void __iomem *lpu_mbox;
+ void __iomem *lpu_read_stat;
+ void __iomem *pss_ctl_reg;
+ void __iomem *pss_err_status_reg;
+ void __iomem *app_pll_fast_ctl_reg;
+ void __iomem *app_pll_slow_ctl_reg;
+ void __iomem *ioc_sem_reg;
+ void __iomem *ioc_usage_sem_reg;
+ void __iomem *ioc_init_sem_reg;
+ void __iomem *ioc_usage_reg;
+ void __iomem *host_page_num_fn;
+ void __iomem *heartbeat;
+ void __iomem *ioc_fwstate;
+ void __iomem *alt_ioc_fwstate;
+ void __iomem *ll_halt;
+ void __iomem *alt_ll_halt;
+ void __iomem *err_set;
+ void __iomem *ioc_fail_sync;
+ void __iomem *shirq_isr_next;
+ void __iomem *shirq_msk_next;
+ void __iomem *smem_page_start;
+ u32 smem_pg0;
+};
+
+#define bfa_mem_read(_raddr, _off) swab32(readl(((_raddr) + (_off))))
+#define bfa_mem_write(_raddr, _off, _val) \
+ writel(swab32((_val)), ((_raddr) + (_off)))
+/*
+ * IOC Mailbox structures
+ */
+struct bfa_mbox_cmd_s {
+ struct list_head qe;
+ u32 msg[BFI_IOC_MSGSZ];
+};
+
+/*
+ * IOC mailbox module
+ */
+typedef void (*bfa_ioc_mbox_mcfunc_t)(void *cbarg, struct bfi_mbmsg_s *m);
+struct bfa_ioc_mbox_mod_s {
+ struct list_head cmd_q; /* pending mbox queue */
+ int nmclass; /* number of handlers */
+ struct {
+ bfa_ioc_mbox_mcfunc_t cbfn; /* message handlers */
+ void *cbarg;
+ } mbhdlr[BFI_MC_MAX];
+};
+
+/*
+ * IOC callback function interfaces
+ */
+typedef void (*bfa_ioc_enable_cbfn_t)(void *bfa, enum bfa_status status);
+typedef void (*bfa_ioc_disable_cbfn_t)(void *bfa);
+typedef void (*bfa_ioc_hbfail_cbfn_t)(void *bfa);
+typedef void (*bfa_ioc_reset_cbfn_t)(void *bfa);
+struct bfa_ioc_cbfn_s {
+ bfa_ioc_enable_cbfn_t enable_cbfn;
+ bfa_ioc_disable_cbfn_t disable_cbfn;
+ bfa_ioc_hbfail_cbfn_t hbfail_cbfn;
+ bfa_ioc_reset_cbfn_t reset_cbfn;
+};
+
+/*
+ * IOC event notification mechanism.
+ */
+enum bfa_ioc_event_e {
+ BFA_IOC_E_ENABLED = 1,
+ BFA_IOC_E_DISABLED = 2,
+ BFA_IOC_E_FAILED = 3,
+};
+
+typedef void (*bfa_ioc_notify_cbfn_t)(void *, enum bfa_ioc_event_e);
+
+struct bfa_ioc_notify_s {
+ struct list_head qe;
+ bfa_ioc_notify_cbfn_t cbfn;
+ void *cbarg;
+};
+
+/*
+ * Initialize a IOC event notification structure
+ */
+#define bfa_ioc_notify_init(__notify, __cbfn, __cbarg) do { \
+ (__notify)->cbfn = (__cbfn); \
+ (__notify)->cbarg = (__cbarg); \
+} while (0)
+
+struct bfa_iocpf_s {
+ bfa_fsm_t fsm;
+ struct bfa_ioc_s *ioc;
+ bfa_boolean_t fw_mismatch_notified;
+ bfa_boolean_t auto_recover;
+ u32 poll_time;
+};
+
+struct bfa_ioc_s {
+ bfa_fsm_t fsm;
+ struct bfa_s *bfa;
+ struct bfa_pcidev_s pcidev;
+ struct bfa_timer_mod_s *timer_mod;
+ struct bfa_timer_s ioc_timer;
+ struct bfa_timer_s sem_timer;
+ struct bfa_timer_s hb_timer;
+ u32 hb_count;
+ struct list_head notify_q;
+ void *dbg_fwsave;
+ int dbg_fwsave_len;
+ bfa_boolean_t dbg_fwsave_once;
+ enum bfi_pcifn_class clscode;
+ struct bfa_ioc_regs_s ioc_regs;
+ struct bfa_trc_mod_s *trcmod;
+ struct bfa_ioc_drv_stats_s stats;
+ bfa_boolean_t fcmode;
+ bfa_boolean_t pllinit;
+ bfa_boolean_t stats_busy; /* outstanding stats */
+ u8 port_id;
+ struct bfa_dma_s attr_dma;
+ struct bfi_ioc_attr_s *attr;
+ struct bfa_ioc_cbfn_s *cbfn;
+ struct bfa_ioc_mbox_mod_s mbox_mod;
+ struct bfa_ioc_hwif_s *ioc_hwif;
+ struct bfa_iocpf_s iocpf;
+ enum bfi_asic_gen asic_gen;
+ enum bfi_asic_mode asic_mode;
+ enum bfi_port_mode port0_mode;
+ enum bfi_port_mode port1_mode;
+ enum bfa_mode_s port_mode;
+ u8 ad_cap_bm; /* adapter cap bit mask */
+ u8 port_mode_cfg; /* config port mode */
+ int ioc_aen_seq;
+};
+
+struct bfa_ioc_hwif_s {
+ bfa_status_t (*ioc_pll_init) (void __iomem *rb, enum bfi_asic_mode m);
+ bfa_boolean_t (*ioc_firmware_lock) (struct bfa_ioc_s *ioc);
+ void (*ioc_firmware_unlock) (struct bfa_ioc_s *ioc);
+ void (*ioc_reg_init) (struct bfa_ioc_s *ioc);
+ void (*ioc_map_port) (struct bfa_ioc_s *ioc);
+ void (*ioc_isr_mode_set) (struct bfa_ioc_s *ioc,
+ bfa_boolean_t msix);
+ void (*ioc_notify_fail) (struct bfa_ioc_s *ioc);
+ void (*ioc_ownership_reset) (struct bfa_ioc_s *ioc);
+ bfa_boolean_t (*ioc_sync_start) (struct bfa_ioc_s *ioc);
+ void (*ioc_sync_join) (struct bfa_ioc_s *ioc);
+ void (*ioc_sync_leave) (struct bfa_ioc_s *ioc);
+ void (*ioc_sync_ack) (struct bfa_ioc_s *ioc);
+ bfa_boolean_t (*ioc_sync_complete) (struct bfa_ioc_s *ioc);
+ bfa_boolean_t (*ioc_lpu_read_stat) (struct bfa_ioc_s *ioc);
+ void (*ioc_set_fwstate) (struct bfa_ioc_s *ioc,
+ enum bfi_ioc_state fwstate);
+ enum bfi_ioc_state (*ioc_get_fwstate) (struct bfa_ioc_s *ioc);
+ void (*ioc_set_alt_fwstate) (struct bfa_ioc_s *ioc,
+ enum bfi_ioc_state fwstate);
+ enum bfi_ioc_state (*ioc_get_alt_fwstate) (struct bfa_ioc_s *ioc);
+};
+
+/*
+ * Queue element to wait for room in request queue. FIFO order is
+ * maintained when fullfilling requests.
+ */
+struct bfa_reqq_wait_s {
+ struct list_head qe;
+ void (*qresume) (void *cbarg);
+ void *cbarg;
+};
+
+typedef void (*bfa_cb_cbfn_t) (void *cbarg, bfa_boolean_t complete);
+
+/*
+ * Generic BFA callback element.
+ */
+struct bfa_cb_qe_s {
+ struct list_head qe;
+ bfa_cb_cbfn_t cbfn;
+ bfa_boolean_t once;
+ bfa_boolean_t pre_rmv; /* set for stack based qe(s) */
+ bfa_status_t fw_status; /* to access fw status in comp proc */
+ void *cbarg;
+};
+
+/*
+ * IOCFC state machine definitions/declarations
+ */
+enum iocfc_event {
+ IOCFC_E_INIT = 1, /* IOCFC init request */
+ IOCFC_E_START = 2, /* IOCFC mod start request */
+ IOCFC_E_STOP = 3, /* IOCFC stop request */
+ IOCFC_E_ENABLE = 4, /* IOCFC enable request */
+ IOCFC_E_DISABLE = 5, /* IOCFC disable request */
+ IOCFC_E_IOC_ENABLED = 6, /* IOC enabled message */
+ IOCFC_E_IOC_DISABLED = 7, /* IOC disabled message */
+ IOCFC_E_IOC_FAILED = 8, /* failure notice by IOC sm */
+ IOCFC_E_DCONF_DONE = 9, /* dconf read/write done */
+ IOCFC_E_CFG_DONE = 10, /* IOCFC config complete */
+};
+
+/*
+ * ASIC block configurtion related
+ */
+
+typedef void (*bfa_ablk_cbfn_t)(void *, enum bfa_status);
+
+struct bfa_ablk_s {
+ struct bfa_ioc_s *ioc;
+ struct bfa_ablk_cfg_s *cfg;
+ u16 *pcifn;
+ struct bfa_dma_s dma_addr;
+ bfa_boolean_t busy;
+ struct bfa_mbox_cmd_s mb;
+ bfa_ablk_cbfn_t cbfn;
+ void *cbarg;
+ struct bfa_ioc_notify_s ioc_notify;
+ struct bfa_mem_dma_s ablk_dma;
+};
+#define BFA_MEM_ABLK_DMA(__bfa) (&((__bfa)->modules.ablk.ablk_dma))
+
+/*
+ * SFP module specific
+ */
+typedef void (*bfa_cb_sfp_t) (void *cbarg, bfa_status_t status);
+
+struct bfa_sfp_s {
+ void *dev;
+ struct bfa_ioc_s *ioc;
+ struct bfa_trc_mod_s *trcmod;
+ struct sfp_mem_s *sfpmem;
+ bfa_cb_sfp_t cbfn;
+ void *cbarg;
+ enum bfi_sfp_mem_e memtype; /* mem access type */
+ u32 status;
+ struct bfa_mbox_cmd_s mbcmd;
+ u8 *dbuf_kva; /* dma buf virtual address */
+ u64 dbuf_pa; /* dma buf physical address */
+ struct bfa_ioc_notify_s ioc_notify;
+ enum bfa_defs_sfp_media_e *media;
+ enum bfa_port_speed portspeed;
+ bfa_cb_sfp_t state_query_cbfn;
+ void *state_query_cbarg;
+ u8 lock;
+ u8 data_valid; /* data in dbuf is valid */
+ u8 state; /* sfp state */
+ u8 state_query_lock;
+ struct bfa_mem_dma_s sfp_dma;
+ u8 is_elb; /* eloopback */
+};
+
+#define BFA_SFP_MOD(__bfa) (&(__bfa)->modules.sfp)
+#define BFA_MEM_SFP_DMA(__bfa) (&(BFA_SFP_MOD(__bfa)->sfp_dma))
+
+u32 bfa_sfp_meminfo(void);
+
+void bfa_sfp_attach(struct bfa_sfp_s *sfp, struct bfa_ioc_s *ioc,
+ void *dev, struct bfa_trc_mod_s *trcmod);
+
+void bfa_sfp_memclaim(struct bfa_sfp_s *diag, u8 *dm_kva, u64 dm_pa);
+void bfa_sfp_intr(void *bfaarg, struct bfi_mbmsg_s *msg);
+
+bfa_status_t bfa_sfp_show(struct bfa_sfp_s *sfp, struct sfp_mem_s *sfpmem,
+ bfa_cb_sfp_t cbfn, void *cbarg);
+
+bfa_status_t bfa_sfp_media(struct bfa_sfp_s *sfp,
+ enum bfa_defs_sfp_media_e *media,
+ bfa_cb_sfp_t cbfn, void *cbarg);
+
+bfa_status_t bfa_sfp_speed(struct bfa_sfp_s *sfp,
+ enum bfa_port_speed portspeed,
+ bfa_cb_sfp_t cbfn, void *cbarg);
+
+/*
+ * Flash module specific
+ */
+typedef void (*bfa_cb_flash_t) (void *cbarg, bfa_status_t status);
+
+struct bfa_flash_s {
+ struct bfa_ioc_s *ioc; /* back pointer to ioc */
+ struct bfa_trc_mod_s *trcmod;
+ u32 type; /* partition type */
+ u8 instance; /* partition instance */
+ u8 rsv[3];
+ u32 op_busy; /* operation busy flag */
+ u32 residue; /* residual length */
+ u32 offset; /* offset */
+ bfa_status_t status; /* status */
+ u8 *dbuf_kva; /* dma buf virtual address */
+ u64 dbuf_pa; /* dma buf physical address */
+ struct bfa_reqq_wait_s reqq_wait; /* to wait for room in reqq */
+ bfa_cb_flash_t cbfn; /* user callback function */
+ void *cbarg; /* user callback arg */
+ u8 *ubuf; /* user supplied buffer */
+ struct bfa_cb_qe_s hcb_qe; /* comp: BFA callback qelem */
+ u32 addr_off; /* partition address offset */
+ struct bfa_mbox_cmd_s mb; /* mailbox */
+ struct bfa_ioc_notify_s ioc_notify; /* ioc event notify */
+ struct bfa_mem_dma_s flash_dma;
+};
+
+#define BFA_FLASH(__bfa) (&(__bfa)->modules.flash)
+#define BFA_MEM_FLASH_DMA(__bfa) (&(BFA_FLASH(__bfa)->flash_dma))
+
+bfa_status_t bfa_flash_get_attr(struct bfa_flash_s *flash,
+ struct bfa_flash_attr_s *attr,
+ bfa_cb_flash_t cbfn, void *cbarg);
+bfa_status_t bfa_flash_erase_part(struct bfa_flash_s *flash,
+ enum bfa_flash_part_type type, u8 instance,
+ bfa_cb_flash_t cbfn, void *cbarg);
+bfa_status_t bfa_flash_update_part(struct bfa_flash_s *flash,
+ enum bfa_flash_part_type type, u8 instance,
+ void *buf, u32 len, u32 offset,
+ bfa_cb_flash_t cbfn, void *cbarg);
+bfa_status_t bfa_flash_read_part(struct bfa_flash_s *flash,
+ enum bfa_flash_part_type type, u8 instance, void *buf,
+ u32 len, u32 offset, bfa_cb_flash_t cbfn, void *cbarg);
+u32 bfa_flash_meminfo(bfa_boolean_t mincfg);
+void bfa_flash_attach(struct bfa_flash_s *flash, struct bfa_ioc_s *ioc,
+ void *dev, struct bfa_trc_mod_s *trcmod, bfa_boolean_t mincfg);
+void bfa_flash_memclaim(struct bfa_flash_s *flash,
+ u8 *dm_kva, u64 dm_pa, bfa_boolean_t mincfg);
+bfa_status_t bfa_flash_raw_read(void __iomem *pci_bar_kva,
+ u32 offset, char *buf, u32 len);
+
+/*
+ * DIAG module specific
+ */
+
+typedef void (*bfa_cb_diag_t) (void *cbarg, bfa_status_t status);
+typedef void (*bfa_cb_diag_beacon_t) (void *dev, bfa_boolean_t beacon,
+ bfa_boolean_t link_e2e_beacon);
+
+/*
+ * Firmware ping test results
+ */
+struct bfa_diag_results_fwping {
+ u32 data; /* store the corrupted data */
+ u32 status;
+ u32 dmastatus;
+ u8 rsvd[4];
+};
+
+struct bfa_diag_qtest_result_s {
+ u32 status;
+ u16 count; /* successful queue test count */
+ u8 queue;
+ u8 rsvd; /* 64-bit align */
+};
+
+/*
+ * Firmware ping test results
+ */
+struct bfa_diag_fwping_s {
+ struct bfa_diag_results_fwping *result;
+ bfa_cb_diag_t cbfn;
+ void *cbarg;
+ u32 data;
+ u8 lock;
+ u8 rsv[3];
+ u32 status;
+ u32 count;
+ struct bfa_mbox_cmd_s mbcmd;
+ u8 *dbuf_kva; /* dma buf virtual address */
+ u64 dbuf_pa; /* dma buf physical address */
+};
+
+/*
+ * Temperature sensor query results
+ */
+struct bfa_diag_results_tempsensor_s {
+ u32 status;
+ u16 temp; /* 10-bit A/D value */
+ u16 brd_temp; /* 9-bit board temp */
+ u8 ts_junc; /* show junction tempsensor */
+ u8 ts_brd; /* show board tempsensor */
+ u8 rsvd[6]; /* keep 8 bytes alignment */
+};
+
+struct bfa_diag_tsensor_s {
+ bfa_cb_diag_t cbfn;
+ void *cbarg;
+ struct bfa_diag_results_tempsensor_s *temp;
+ u8 lock;
+ u8 rsv[3];
+ u32 status;
+ struct bfa_mbox_cmd_s mbcmd;
+};
+
+struct bfa_diag_sfpshow_s {
+ struct sfp_mem_s *sfpmem;
+ bfa_cb_diag_t cbfn;
+ void *cbarg;
+ u8 lock;
+ u8 static_data;
+ u8 rsv[2];
+ u32 status;
+ struct bfa_mbox_cmd_s mbcmd;
+ u8 *dbuf_kva; /* dma buf virtual address */
+ u64 dbuf_pa; /* dma buf physical address */
+};
+
+struct bfa_diag_led_s {
+ struct bfa_mbox_cmd_s mbcmd;
+ bfa_boolean_t lock; /* 1: ledtest is operating */
+};
+
+struct bfa_diag_beacon_s {
+ struct bfa_mbox_cmd_s mbcmd;
+ bfa_boolean_t state; /* port beacon state */
+ bfa_boolean_t link_e2e; /* link beacon state */
+};
+
+struct bfa_diag_s {
+ void *dev;
+ struct bfa_ioc_s *ioc;
+ struct bfa_trc_mod_s *trcmod;
+ struct bfa_diag_fwping_s fwping;
+ struct bfa_diag_tsensor_s tsensor;
+ struct bfa_diag_sfpshow_s sfpshow;
+ struct bfa_diag_led_s ledtest;
+ struct bfa_diag_beacon_s beacon;
+ void *result;
+ struct bfa_timer_s timer;
+ bfa_cb_diag_beacon_t cbfn_beacon;
+ bfa_cb_diag_t cbfn;
+ void *cbarg;
+ u8 block;
+ u8 timer_active;
+ u8 rsvd[2];
+ u32 status;
+ struct bfa_ioc_notify_s ioc_notify;
+ struct bfa_mem_dma_s diag_dma;
+};
+
+#define BFA_DIAG_MOD(__bfa) (&(__bfa)->modules.diag_mod)
+#define BFA_MEM_DIAG_DMA(__bfa) (&(BFA_DIAG_MOD(__bfa)->diag_dma))
+
+u32 bfa_diag_meminfo(void);
+void bfa_diag_memclaim(struct bfa_diag_s *diag, u8 *dm_kva, u64 dm_pa);
+void bfa_diag_attach(struct bfa_diag_s *diag, struct bfa_ioc_s *ioc, void *dev,
+ bfa_cb_diag_beacon_t cbfn_beacon,
+ struct bfa_trc_mod_s *trcmod);
+bfa_status_t bfa_diag_reg_read(struct bfa_diag_s *diag, u32 offset,
+ u32 len, u32 *buf, u32 force);
+bfa_status_t bfa_diag_reg_write(struct bfa_diag_s *diag, u32 offset,
+ u32 len, u32 value, u32 force);
+bfa_status_t bfa_diag_tsensor_query(struct bfa_diag_s *diag,
+ struct bfa_diag_results_tempsensor_s *result,
+ bfa_cb_diag_t cbfn, void *cbarg);
+bfa_status_t bfa_diag_fwping(struct bfa_diag_s *diag, u32 cnt,
+ u32 pattern, struct bfa_diag_results_fwping *result,
+ bfa_cb_diag_t cbfn, void *cbarg);
+bfa_status_t bfa_diag_sfpshow(struct bfa_diag_s *diag,
+ struct sfp_mem_s *sfpmem, u8 static_data,
+ bfa_cb_diag_t cbfn, void *cbarg);
+bfa_status_t bfa_diag_memtest(struct bfa_diag_s *diag,
+ struct bfa_diag_memtest_s *memtest, u32 pattern,
+ struct bfa_diag_memtest_result *result,
+ bfa_cb_diag_t cbfn, void *cbarg);
+bfa_status_t bfa_diag_ledtest(struct bfa_diag_s *diag,
+ struct bfa_diag_ledtest_s *ledtest);
+bfa_status_t bfa_diag_beacon_port(struct bfa_diag_s *diag,
+ bfa_boolean_t beacon, bfa_boolean_t link_e2e_beacon,
+ u32 sec);
+
+/*
+ * PHY module specific
+ */
+typedef void (*bfa_cb_phy_t) (void *cbarg, bfa_status_t status);
+
+struct bfa_phy_s {
+ struct bfa_ioc_s *ioc; /* back pointer to ioc */
+ struct bfa_trc_mod_s *trcmod; /* trace module */
+ u8 instance; /* port instance */
+ u8 op_busy; /* operation busy flag */
+ u8 rsv[2];
+ u32 residue; /* residual length */
+ u32 offset; /* offset */
+ bfa_status_t status; /* status */
+ u8 *dbuf_kva; /* dma buf virtual address */
+ u64 dbuf_pa; /* dma buf physical address */
+ struct bfa_reqq_wait_s reqq_wait; /* to wait for room in reqq */
+ bfa_cb_phy_t cbfn; /* user callback function */
+ void *cbarg; /* user callback arg */
+ u8 *ubuf; /* user supplied buffer */
+ struct bfa_cb_qe_s hcb_qe; /* comp: BFA callback qelem */
+ u32 addr_off; /* phy address offset */
+ struct bfa_mbox_cmd_s mb; /* mailbox */
+ struct bfa_ioc_notify_s ioc_notify; /* ioc event notify */
+ struct bfa_mem_dma_s phy_dma;
+};
+#define BFA_PHY(__bfa) (&(__bfa)->modules.phy)
+#define BFA_MEM_PHY_DMA(__bfa) (&(BFA_PHY(__bfa)->phy_dma))
+
+bfa_boolean_t bfa_phy_busy(struct bfa_ioc_s *ioc);
+bfa_status_t bfa_phy_get_attr(struct bfa_phy_s *phy, u8 instance,
+ struct bfa_phy_attr_s *attr,
+ bfa_cb_phy_t cbfn, void *cbarg);
+bfa_status_t bfa_phy_get_stats(struct bfa_phy_s *phy, u8 instance,
+ struct bfa_phy_stats_s *stats,
+ bfa_cb_phy_t cbfn, void *cbarg);
+bfa_status_t bfa_phy_update(struct bfa_phy_s *phy, u8 instance,
+ void *buf, u32 len, u32 offset,
+ bfa_cb_phy_t cbfn, void *cbarg);
+bfa_status_t bfa_phy_read(struct bfa_phy_s *phy, u8 instance,
+ void *buf, u32 len, u32 offset,
+ bfa_cb_phy_t cbfn, void *cbarg);
+
+u32 bfa_phy_meminfo(bfa_boolean_t mincfg);
+void bfa_phy_attach(struct bfa_phy_s *phy, struct bfa_ioc_s *ioc,
+ void *dev, struct bfa_trc_mod_s *trcmod, bfa_boolean_t mincfg);
+void bfa_phy_memclaim(struct bfa_phy_s *phy,
+ u8 *dm_kva, u64 dm_pa, bfa_boolean_t mincfg);
+void bfa_phy_intr(void *phyarg, struct bfi_mbmsg_s *msg);
+
+/*
+ * FRU module specific
+ */
+typedef void (*bfa_cb_fru_t) (void *cbarg, bfa_status_t status);
+
+struct bfa_fru_s {
+ struct bfa_ioc_s *ioc; /* back pointer to ioc */
+ struct bfa_trc_mod_s *trcmod; /* trace module */
+ u8 op_busy; /* operation busy flag */
+ u8 rsv[3];
+ u32 residue; /* residual length */
+ u32 offset; /* offset */
+ bfa_status_t status; /* status */
+ u8 *dbuf_kva; /* dma buf virtual address */
+ u64 dbuf_pa; /* dma buf physical address */
+ struct bfa_reqq_wait_s reqq_wait; /* to wait for room in reqq */
+ bfa_cb_fru_t cbfn; /* user callback function */
+ void *cbarg; /* user callback arg */
+ u8 *ubuf; /* user supplied buffer */
+ struct bfa_cb_qe_s hcb_qe; /* comp: BFA callback qelem */
+ u32 addr_off; /* fru address offset */
+ struct bfa_mbox_cmd_s mb; /* mailbox */
+ struct bfa_ioc_notify_s ioc_notify; /* ioc event notify */
+ struct bfa_mem_dma_s fru_dma;
+ u8 trfr_cmpl;
+};
+
+#define BFA_FRU(__bfa) (&(__bfa)->modules.fru)
+#define BFA_MEM_FRU_DMA(__bfa) (&(BFA_FRU(__bfa)->fru_dma))
+
+bfa_status_t bfa_fruvpd_update(struct bfa_fru_s *fru,
+ void *buf, u32 len, u32 offset,
+ bfa_cb_fru_t cbfn, void *cbarg, u8 trfr_cmpl);
+bfa_status_t bfa_fruvpd_read(struct bfa_fru_s *fru,
+ void *buf, u32 len, u32 offset,
+ bfa_cb_fru_t cbfn, void *cbarg);
+bfa_status_t bfa_fruvpd_get_max_size(struct bfa_fru_s *fru, u32 *max_size);
+bfa_status_t bfa_tfru_write(struct bfa_fru_s *fru,
+ void *buf, u32 len, u32 offset,
+ bfa_cb_fru_t cbfn, void *cbarg);
+bfa_status_t bfa_tfru_read(struct bfa_fru_s *fru,
+ void *buf, u32 len, u32 offset,
+ bfa_cb_fru_t cbfn, void *cbarg);
+u32 bfa_fru_meminfo(bfa_boolean_t mincfg);
+void bfa_fru_attach(struct bfa_fru_s *fru, struct bfa_ioc_s *ioc,
+ void *dev, struct bfa_trc_mod_s *trcmod, bfa_boolean_t mincfg);
+void bfa_fru_memclaim(struct bfa_fru_s *fru,
+ u8 *dm_kva, u64 dm_pa, bfa_boolean_t mincfg);
+void bfa_fru_intr(void *fruarg, struct bfi_mbmsg_s *msg);
+
+/*
+ * Driver Config( dconf) specific
+ */
+#define BFI_DCONF_SIGNATURE 0xabcdabcd
+#define BFI_DCONF_VERSION 1
+
+#pragma pack(1)
+struct bfa_dconf_hdr_s {
+ u32 signature;
+ u32 version;
+};
+
+struct bfa_dconf_s {
+ struct bfa_dconf_hdr_s hdr;
+ struct bfa_lunmask_cfg_s lun_mask;
+ struct bfa_throttle_cfg_s throttle_cfg;
+};
+#pragma pack()
+
+struct bfa_dconf_mod_s {
+ bfa_sm_t sm;
+ u8 instance;
+ bfa_boolean_t read_data_valid;
+ bfa_boolean_t min_cfg;
+ struct bfa_timer_s timer;
+ struct bfa_s *bfa;
+ void *bfad;
+ void *trcmod;
+ struct bfa_dconf_s *dconf;
+ struct bfa_mem_kva_s kva_seg;
+};
+
+#define BFA_DCONF_MOD(__bfa) \
+ (&(__bfa)->modules.dconf_mod)
+#define BFA_MEM_DCONF_KVA(__bfa) (&(BFA_DCONF_MOD(__bfa)->kva_seg))
+#define bfa_dconf_read_data_valid(__bfa) \
+ (BFA_DCONF_MOD(__bfa)->read_data_valid)
+#define BFA_DCONF_UPDATE_TOV 5000 /* memtest timeout in msec */
+#define bfa_dconf_get_min_cfg(__bfa) \
+ (BFA_DCONF_MOD(__bfa)->min_cfg)
+
+void bfa_dconf_modinit(struct bfa_s *bfa);
+void bfa_dconf_modexit(struct bfa_s *bfa);
+bfa_status_t bfa_dconf_update(struct bfa_s *bfa);
+
+/*
+ * IOC specfic macros
+ */
+#define bfa_ioc_pcifn(__ioc) ((__ioc)->pcidev.pci_func)
+#define bfa_ioc_devid(__ioc) ((__ioc)->pcidev.device_id)
+#define bfa_ioc_bar0(__ioc) ((__ioc)->pcidev.pci_bar_kva)
+#define bfa_ioc_portid(__ioc) ((__ioc)->port_id)
+#define bfa_ioc_asic_gen(__ioc) ((__ioc)->asic_gen)
+#define bfa_ioc_is_cna(__ioc) \
+ ((bfa_ioc_get_type(__ioc) == BFA_IOC_TYPE_FCoE) || \
+ (bfa_ioc_get_type(__ioc) == BFA_IOC_TYPE_LL))
+#define bfa_ioc_fetch_stats(__ioc, __stats) \
+ (((__stats)->drv_stats) = (__ioc)->stats)
+#define bfa_ioc_clr_stats(__ioc) \
+ memset(&(__ioc)->stats, 0, sizeof((__ioc)->stats))
+#define bfa_ioc_maxfrsize(__ioc) ((__ioc)->attr->maxfrsize)
+#define bfa_ioc_rx_bbcredit(__ioc) ((__ioc)->attr->rx_bbcredit)
+#define bfa_ioc_speed_sup(__ioc) \
+ ((bfa_ioc_is_cna(__ioc)) ? BFA_PORT_SPEED_10GBPS : \
+ BFI_ADAPTER_GETP(SPEED, (__ioc)->attr->adapter_prop))
+#define bfa_ioc_get_nports(__ioc) \
+ BFI_ADAPTER_GETP(NPORTS, (__ioc)->attr->adapter_prop)
+
+#define bfa_ioc_stats(_ioc, _stats) ((_ioc)->stats._stats++)
+#define BFA_IOC_FWIMG_MINSZ (16 * 1024)
+#define BFA_IOC_FW_SMEM_SIZE(__ioc) \
+ ((bfa_ioc_asic_gen(__ioc) == BFI_ASIC_GEN_CB) \
+ ? BFI_SMEM_CB_SIZE : BFI_SMEM_CT_SIZE)
+#define BFA_IOC_FLASH_CHUNK_NO(off) (off / BFI_FLASH_CHUNK_SZ_WORDS)
+#define BFA_IOC_FLASH_OFFSET_IN_CHUNK(off) (off % BFI_FLASH_CHUNK_SZ_WORDS)
+#define BFA_IOC_FLASH_CHUNK_ADDR(chunkno) (chunkno * BFI_FLASH_CHUNK_SZ_WORDS)
+
+/*
+ * IOC mailbox interface
+ */
+void bfa_ioc_mbox_queue(struct bfa_ioc_s *ioc, struct bfa_mbox_cmd_s *cmd);
+void bfa_ioc_mbox_register(struct bfa_ioc_s *ioc,
+ bfa_ioc_mbox_mcfunc_t *mcfuncs);
+void bfa_ioc_mbox_isr(struct bfa_ioc_s *ioc);
+void bfa_ioc_mbox_send(struct bfa_ioc_s *ioc, void *ioc_msg, int len);
+bfa_boolean_t bfa_ioc_msgget(struct bfa_ioc_s *ioc, void *mbmsg);
+void bfa_ioc_mbox_regisr(struct bfa_ioc_s *ioc, enum bfi_mclass mc,
+ bfa_ioc_mbox_mcfunc_t cbfn, void *cbarg);
+
+/*
+ * IOC interfaces
+ */
+
+#define bfa_ioc_pll_init_asic(__ioc) \
+ ((__ioc)->ioc_hwif->ioc_pll_init((__ioc)->pcidev.pci_bar_kva, \
+ (__ioc)->asic_mode))
+
+bfa_status_t bfa_ioc_pll_init(struct bfa_ioc_s *ioc);
+bfa_status_t bfa_ioc_cb_pll_init(void __iomem *rb, enum bfi_asic_mode mode);
+bfa_status_t bfa_ioc_ct_pll_init(void __iomem *rb, enum bfi_asic_mode mode);
+bfa_status_t bfa_ioc_ct2_pll_init(void __iomem *rb, enum bfi_asic_mode mode);
+
+#define bfa_ioc_isr_mode_set(__ioc, __msix) do { \
+ if ((__ioc)->ioc_hwif->ioc_isr_mode_set) \
+ ((__ioc)->ioc_hwif->ioc_isr_mode_set(__ioc, __msix)); \
+} while (0)
+#define bfa_ioc_ownership_reset(__ioc) \
+ ((__ioc)->ioc_hwif->ioc_ownership_reset(__ioc))
+#define bfa_ioc_get_fcmode(__ioc) ((__ioc)->fcmode)
+#define bfa_ioc_lpu_read_stat(__ioc) do { \
+ if ((__ioc)->ioc_hwif->ioc_lpu_read_stat) \
+ ((__ioc)->ioc_hwif->ioc_lpu_read_stat(__ioc)); \
+} while (0)
+
+void bfa_ioc_set_cb_hwif(struct bfa_ioc_s *ioc);
+void bfa_ioc_set_ct_hwif(struct bfa_ioc_s *ioc);
+void bfa_ioc_set_ct2_hwif(struct bfa_ioc_s *ioc);
+void bfa_ioc_ct2_poweron(struct bfa_ioc_s *ioc);
+
+void bfa_ioc_attach(struct bfa_ioc_s *ioc, void *bfa,
+ struct bfa_ioc_cbfn_s *cbfn, struct bfa_timer_mod_s *timer_mod);
+void bfa_ioc_auto_recover(bfa_boolean_t auto_recover);
+void bfa_ioc_detach(struct bfa_ioc_s *ioc);
+void bfa_ioc_suspend(struct bfa_ioc_s *ioc);
+void bfa_ioc_pci_init(struct bfa_ioc_s *ioc, struct bfa_pcidev_s *pcidev,
+ enum bfi_pcifn_class clscode);
+void bfa_ioc_mem_claim(struct bfa_ioc_s *ioc, u8 *dm_kva, u64 dm_pa);
+void bfa_ioc_enable(struct bfa_ioc_s *ioc);
+void bfa_ioc_disable(struct bfa_ioc_s *ioc);
+bfa_boolean_t bfa_ioc_intx_claim(struct bfa_ioc_s *ioc);
+
+bfa_status_t bfa_ioc_boot(struct bfa_ioc_s *ioc, u32 boot_type,
+ u32 boot_env);
+void bfa_ioc_isr(struct bfa_ioc_s *ioc, struct bfi_mbmsg_s *msg);
+void bfa_ioc_error_isr(struct bfa_ioc_s *ioc);
+bfa_boolean_t bfa_ioc_is_operational(struct bfa_ioc_s *ioc);
+bfa_boolean_t bfa_ioc_is_initialized(struct bfa_ioc_s *ioc);
+bfa_boolean_t bfa_ioc_is_disabled(struct bfa_ioc_s *ioc);
+bfa_boolean_t bfa_ioc_is_acq_addr(struct bfa_ioc_s *ioc);
+bfa_boolean_t bfa_ioc_fw_mismatch(struct bfa_ioc_s *ioc);
+bfa_boolean_t bfa_ioc_adapter_is_disabled(struct bfa_ioc_s *ioc);
+void bfa_ioc_reset_fwstate(struct bfa_ioc_s *ioc);
+enum bfa_ioc_type_e bfa_ioc_get_type(struct bfa_ioc_s *ioc);
+void bfa_ioc_get_adapter_serial_num(struct bfa_ioc_s *ioc, char *serial_num);
+void bfa_ioc_get_adapter_fw_ver(struct bfa_ioc_s *ioc, char *fw_ver);
+void bfa_ioc_get_adapter_optrom_ver(struct bfa_ioc_s *ioc, char *optrom_ver);
+void bfa_ioc_get_adapter_model(struct bfa_ioc_s *ioc, char *model);
+void bfa_ioc_get_adapter_manufacturer(struct bfa_ioc_s *ioc,
+ char *manufacturer);
+void bfa_ioc_get_pci_chip_rev(struct bfa_ioc_s *ioc, char *chip_rev);
+enum bfa_ioc_state bfa_ioc_get_state(struct bfa_ioc_s *ioc);
+
+void bfa_ioc_get_attr(struct bfa_ioc_s *ioc, struct bfa_ioc_attr_s *ioc_attr);
+void bfa_ioc_get_adapter_attr(struct bfa_ioc_s *ioc,
+ struct bfa_adapter_attr_s *ad_attr);
+void bfa_ioc_debug_memclaim(struct bfa_ioc_s *ioc, void *dbg_fwsave);
+bfa_status_t bfa_ioc_debug_fwsave(struct bfa_ioc_s *ioc, void *trcdata,
+ int *trclen);
+bfa_status_t bfa_ioc_debug_fwtrc(struct bfa_ioc_s *ioc, void *trcdata,
+ int *trclen);
+bfa_status_t bfa_ioc_debug_fwcore(struct bfa_ioc_s *ioc, void *buf,
+ u32 *offset, int *buflen);
+bfa_status_t bfa_ioc_fwsig_invalidate(struct bfa_ioc_s *ioc);
+bfa_boolean_t bfa_ioc_sem_get(void __iomem *sem_reg);
+void bfa_ioc_fwver_get(struct bfa_ioc_s *ioc,
+ struct bfi_ioc_image_hdr_s *fwhdr);
+bfa_boolean_t bfa_ioc_fwver_cmp(struct bfa_ioc_s *ioc,
+ struct bfi_ioc_image_hdr_s *fwhdr);
+void bfa_ioc_aen_post(struct bfa_ioc_s *ioc, enum bfa_ioc_aen_event event);
+bfa_status_t bfa_ioc_fw_stats_get(struct bfa_ioc_s *ioc, void *stats);
+bfa_status_t bfa_ioc_fw_stats_clear(struct bfa_ioc_s *ioc);
+void bfa_ioc_debug_save_ftrc(struct bfa_ioc_s *ioc);
+
+/*
+ * asic block configuration related APIs
+ */
+u32 bfa_ablk_meminfo(void);
+void bfa_ablk_memclaim(struct bfa_ablk_s *ablk, u8 *dma_kva, u64 dma_pa);
+void bfa_ablk_attach(struct bfa_ablk_s *ablk, struct bfa_ioc_s *ioc);
+bfa_status_t bfa_ablk_query(struct bfa_ablk_s *ablk,
+ struct bfa_ablk_cfg_s *ablk_cfg,
+ bfa_ablk_cbfn_t cbfn, void *cbarg);
+bfa_status_t bfa_ablk_adapter_config(struct bfa_ablk_s *ablk,
+ enum bfa_mode_s mode, int max_pf, int max_vf,
+ bfa_ablk_cbfn_t cbfn, void *cbarg);
+bfa_status_t bfa_ablk_port_config(struct bfa_ablk_s *ablk, int port,
+ enum bfa_mode_s mode, int max_pf, int max_vf,
+ bfa_ablk_cbfn_t cbfn, void *cbarg);
+bfa_status_t bfa_ablk_pf_create(struct bfa_ablk_s *ablk, u16 *pcifn,
+ u8 port, enum bfi_pcifn_class personality,
+ u16 bw_min, u16 bw_max, bfa_ablk_cbfn_t cbfn, void *cbarg);
+bfa_status_t bfa_ablk_pf_delete(struct bfa_ablk_s *ablk, int pcifn,
+ bfa_ablk_cbfn_t cbfn, void *cbarg);
+bfa_status_t bfa_ablk_pf_update(struct bfa_ablk_s *ablk, int pcifn,
+ u16 bw_min, u16 bw_max, bfa_ablk_cbfn_t cbfn, void *cbarg);
+bfa_status_t bfa_ablk_optrom_en(struct bfa_ablk_s *ablk,
+ bfa_ablk_cbfn_t cbfn, void *cbarg);
+bfa_status_t bfa_ablk_optrom_dis(struct bfa_ablk_s *ablk,
+ bfa_ablk_cbfn_t cbfn, void *cbarg);
+
+bfa_status_t bfa_ioc_flash_img_get_chnk(struct bfa_ioc_s *ioc, u32 off,
+ u32 *fwimg);
+/*
+ * bfa mfg wwn API functions
+ */
+mac_t bfa_ioc_get_mac(struct bfa_ioc_s *ioc);
+mac_t bfa_ioc_get_mfg_mac(struct bfa_ioc_s *ioc);
+
+/*
+ * F/W Image Size & Chunk
+ */
+extern u32 bfi_image_cb_size;
+extern u32 bfi_image_ct_size;
+extern u32 bfi_image_ct2_size;
+extern u32 *bfi_image_cb;
+extern u32 *bfi_image_ct;
+extern u32 *bfi_image_ct2;
+
+static inline u32 *
+bfi_image_cb_get_chunk(u32 off)
+{
+ return (u32 *)(bfi_image_cb + off);
+}
+
+static inline u32 *
+bfi_image_ct_get_chunk(u32 off)
+{
+ return (u32 *)(bfi_image_ct + off);
+}
+
+static inline u32 *
+bfi_image_ct2_get_chunk(u32 off)
+{
+ return (u32 *)(bfi_image_ct2 + off);
+}
+
+static inline u32*
+bfa_cb_image_get_chunk(enum bfi_asic_gen asic_gen, u32 off)
+{
+ switch (asic_gen) {
+ case BFI_ASIC_GEN_CB:
+ return bfi_image_cb_get_chunk(off);
+ break;
+ case BFI_ASIC_GEN_CT:
+ return bfi_image_ct_get_chunk(off);
+ break;
+ case BFI_ASIC_GEN_CT2:
+ return bfi_image_ct2_get_chunk(off);
+ break;
+ default:
+ return NULL;
+ }
+}
+
+static inline u32
+bfa_cb_image_get_size(enum bfi_asic_gen asic_gen)
+{
+ switch (asic_gen) {
+ case BFI_ASIC_GEN_CB:
+ return bfi_image_cb_size;
+ break;
+ case BFI_ASIC_GEN_CT:
+ return bfi_image_ct_size;
+ break;
+ case BFI_ASIC_GEN_CT2:
+ return bfi_image_ct2_size;
+ break;
+ default:
+ return 0;
+ }
+}
+
+/*
+ * CNA TRCMOD declaration
+ */
+/*
+ * !!! Only append to the enums defined here to avoid any versioning
+ * !!! needed between trace utility and driver version
+ */
+enum {
+ BFA_TRC_CNA_PORT = 1,
+ BFA_TRC_CNA_IOC = 2,
+ BFA_TRC_CNA_IOC_CB = 3,
+ BFA_TRC_CNA_IOC_CT = 4,
+};
+
+#endif /* __BFA_IOC_H__ */
diff --git a/drivers/scsi/bfa/bfa_ioc_cb.c b/drivers/scsi/bfa/bfa_ioc_cb.c
new file mode 100644
index 000000000..453c2f5b5
--- /dev/null
+++ b/drivers/scsi/bfa/bfa_ioc_cb.c
@@ -0,0 +1,408 @@
+/*
+ * Copyright (c) 2005-2010 Brocade Communications Systems, Inc.
+ * All rights reserved
+ * www.brocade.com
+ *
+ * Linux driver for Brocade Fibre Channel Host Bus Adapter.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License (GPL) Version 2 as
+ * published by the Free Software Foundation
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ */
+
+#include "bfad_drv.h"
+#include "bfa_ioc.h"
+#include "bfi_reg.h"
+#include "bfa_defs.h"
+
+BFA_TRC_FILE(CNA, IOC_CB);
+
+#define bfa_ioc_cb_join_pos(__ioc) ((u32) (1 << BFA_IOC_CB_JOIN_SH))
+
+/*
+ * forward declarations
+ */
+static bfa_boolean_t bfa_ioc_cb_firmware_lock(struct bfa_ioc_s *ioc);
+static void bfa_ioc_cb_firmware_unlock(struct bfa_ioc_s *ioc);
+static void bfa_ioc_cb_reg_init(struct bfa_ioc_s *ioc);
+static void bfa_ioc_cb_map_port(struct bfa_ioc_s *ioc);
+static void bfa_ioc_cb_isr_mode_set(struct bfa_ioc_s *ioc, bfa_boolean_t msix);
+static void bfa_ioc_cb_notify_fail(struct bfa_ioc_s *ioc);
+static void bfa_ioc_cb_ownership_reset(struct bfa_ioc_s *ioc);
+static bfa_boolean_t bfa_ioc_cb_sync_start(struct bfa_ioc_s *ioc);
+static void bfa_ioc_cb_sync_join(struct bfa_ioc_s *ioc);
+static void bfa_ioc_cb_sync_leave(struct bfa_ioc_s *ioc);
+static void bfa_ioc_cb_sync_ack(struct bfa_ioc_s *ioc);
+static bfa_boolean_t bfa_ioc_cb_sync_complete(struct bfa_ioc_s *ioc);
+static void bfa_ioc_cb_set_cur_ioc_fwstate(
+ struct bfa_ioc_s *ioc, enum bfi_ioc_state fwstate);
+static enum bfi_ioc_state bfa_ioc_cb_get_cur_ioc_fwstate(struct bfa_ioc_s *ioc);
+static void bfa_ioc_cb_set_alt_ioc_fwstate(
+ struct bfa_ioc_s *ioc, enum bfi_ioc_state fwstate);
+static enum bfi_ioc_state bfa_ioc_cb_get_alt_ioc_fwstate(struct bfa_ioc_s *ioc);
+
+static struct bfa_ioc_hwif_s hwif_cb;
+
+/*
+ * Called from bfa_ioc_attach() to map asic specific calls.
+ */
+void
+bfa_ioc_set_cb_hwif(struct bfa_ioc_s *ioc)
+{
+ hwif_cb.ioc_pll_init = bfa_ioc_cb_pll_init;
+ hwif_cb.ioc_firmware_lock = bfa_ioc_cb_firmware_lock;
+ hwif_cb.ioc_firmware_unlock = bfa_ioc_cb_firmware_unlock;
+ hwif_cb.ioc_reg_init = bfa_ioc_cb_reg_init;
+ hwif_cb.ioc_map_port = bfa_ioc_cb_map_port;
+ hwif_cb.ioc_isr_mode_set = bfa_ioc_cb_isr_mode_set;
+ hwif_cb.ioc_notify_fail = bfa_ioc_cb_notify_fail;
+ hwif_cb.ioc_ownership_reset = bfa_ioc_cb_ownership_reset;
+ hwif_cb.ioc_sync_start = bfa_ioc_cb_sync_start;
+ hwif_cb.ioc_sync_join = bfa_ioc_cb_sync_join;
+ hwif_cb.ioc_sync_leave = bfa_ioc_cb_sync_leave;
+ hwif_cb.ioc_sync_ack = bfa_ioc_cb_sync_ack;
+ hwif_cb.ioc_sync_complete = bfa_ioc_cb_sync_complete;
+ hwif_cb.ioc_set_fwstate = bfa_ioc_cb_set_cur_ioc_fwstate;
+ hwif_cb.ioc_get_fwstate = bfa_ioc_cb_get_cur_ioc_fwstate;
+ hwif_cb.ioc_set_alt_fwstate = bfa_ioc_cb_set_alt_ioc_fwstate;
+ hwif_cb.ioc_get_alt_fwstate = bfa_ioc_cb_get_alt_ioc_fwstate;
+
+ ioc->ioc_hwif = &hwif_cb;
+}
+
+/*
+ * Return true if firmware of current driver matches the running firmware.
+ */
+static bfa_boolean_t
+bfa_ioc_cb_firmware_lock(struct bfa_ioc_s *ioc)
+{
+ enum bfi_ioc_state alt_fwstate, cur_fwstate;
+ struct bfi_ioc_image_hdr_s fwhdr;
+
+ cur_fwstate = bfa_ioc_cb_get_cur_ioc_fwstate(ioc);
+ bfa_trc(ioc, cur_fwstate);
+ alt_fwstate = bfa_ioc_cb_get_alt_ioc_fwstate(ioc);
+ bfa_trc(ioc, alt_fwstate);
+
+ /*
+ * Uninit implies this is the only driver as of now.
+ */
+ if (cur_fwstate == BFI_IOC_UNINIT)
+ return BFA_TRUE;
+ /*
+ * Check if another driver with a different firmware is active
+ */
+ bfa_ioc_fwver_get(ioc, &fwhdr);
+ if (!bfa_ioc_fwver_cmp(ioc, &fwhdr) &&
+ alt_fwstate != BFI_IOC_DISABLED) {
+ bfa_trc(ioc, alt_fwstate);
+ return BFA_FALSE;
+ }
+
+ return BFA_TRUE;
+}
+
+static void
+bfa_ioc_cb_firmware_unlock(struct bfa_ioc_s *ioc)
+{
+}
+
+/*
+ * Notify other functions on HB failure.
+ */
+static void
+bfa_ioc_cb_notify_fail(struct bfa_ioc_s *ioc)
+{
+ writel(~0U, ioc->ioc_regs.err_set);
+ readl(ioc->ioc_regs.err_set);
+}
+
+/*
+ * Host to LPU mailbox message addresses
+ */
+static struct { u32 hfn_mbox, lpu_mbox, hfn_pgn; } iocreg_fnreg[] = {
+ { HOSTFN0_LPU_MBOX0_0, LPU_HOSTFN0_MBOX0_0, HOST_PAGE_NUM_FN0 },
+ { HOSTFN1_LPU_MBOX0_8, LPU_HOSTFN1_MBOX0_8, HOST_PAGE_NUM_FN1 }
+};
+
+/*
+ * Host <-> LPU mailbox command/status registers
+ */
+static struct { u32 hfn, lpu; } iocreg_mbcmd[] = {
+
+ { HOSTFN0_LPU0_CMD_STAT, LPU0_HOSTFN0_CMD_STAT },
+ { HOSTFN1_LPU1_CMD_STAT, LPU1_HOSTFN1_CMD_STAT }
+};
+
+static void
+bfa_ioc_cb_reg_init(struct bfa_ioc_s *ioc)
+{
+ void __iomem *rb;
+ int pcifn = bfa_ioc_pcifn(ioc);
+
+ rb = bfa_ioc_bar0(ioc);
+
+ ioc->ioc_regs.hfn_mbox = rb + iocreg_fnreg[pcifn].hfn_mbox;
+ ioc->ioc_regs.lpu_mbox = rb + iocreg_fnreg[pcifn].lpu_mbox;
+ ioc->ioc_regs.host_page_num_fn = rb + iocreg_fnreg[pcifn].hfn_pgn;
+
+ if (ioc->port_id == 0) {
+ ioc->ioc_regs.heartbeat = rb + BFA_IOC0_HBEAT_REG;
+ ioc->ioc_regs.ioc_fwstate = rb + BFA_IOC0_STATE_REG;
+ ioc->ioc_regs.alt_ioc_fwstate = rb + BFA_IOC1_STATE_REG;
+ } else {
+ ioc->ioc_regs.heartbeat = (rb + BFA_IOC1_HBEAT_REG);
+ ioc->ioc_regs.ioc_fwstate = (rb + BFA_IOC1_STATE_REG);
+ ioc->ioc_regs.alt_ioc_fwstate = (rb + BFA_IOC0_STATE_REG);
+ }
+
+ /*
+ * Host <-> LPU mailbox command/status registers
+ */
+ ioc->ioc_regs.hfn_mbox_cmd = rb + iocreg_mbcmd[pcifn].hfn;
+ ioc->ioc_regs.lpu_mbox_cmd = rb + iocreg_mbcmd[pcifn].lpu;
+
+ /*
+ * PSS control registers
+ */
+ ioc->ioc_regs.pss_ctl_reg = (rb + PSS_CTL_REG);
+ ioc->ioc_regs.pss_err_status_reg = (rb + PSS_ERR_STATUS_REG);
+ ioc->ioc_regs.app_pll_fast_ctl_reg = (rb + APP_PLL_LCLK_CTL_REG);
+ ioc->ioc_regs.app_pll_slow_ctl_reg = (rb + APP_PLL_SCLK_CTL_REG);
+
+ /*
+ * IOC semaphore registers and serialization
+ */
+ ioc->ioc_regs.ioc_sem_reg = (rb + HOST_SEM0_REG);
+ ioc->ioc_regs.ioc_init_sem_reg = (rb + HOST_SEM2_REG);
+
+ /*
+ * sram memory access
+ */
+ ioc->ioc_regs.smem_page_start = (rb + PSS_SMEM_PAGE_START);
+ ioc->ioc_regs.smem_pg0 = BFI_IOC_SMEM_PG0_CB;
+
+ /*
+ * err set reg : for notification of hb failure
+ */
+ ioc->ioc_regs.err_set = (rb + ERR_SET_REG);
+}
+
+/*
+ * Initialize IOC to port mapping.
+ */
+
+static void
+bfa_ioc_cb_map_port(struct bfa_ioc_s *ioc)
+{
+ /*
+ * For crossbow, port id is same as pci function.
+ */
+ ioc->port_id = bfa_ioc_pcifn(ioc);
+
+ bfa_trc(ioc, ioc->port_id);
+}
+
+/*
+ * Set interrupt mode for a function: INTX or MSIX
+ */
+static void
+bfa_ioc_cb_isr_mode_set(struct bfa_ioc_s *ioc, bfa_boolean_t msix)
+{
+}
+
+/*
+ * Synchronized IOC failure processing routines
+ */
+static bfa_boolean_t
+bfa_ioc_cb_sync_start(struct bfa_ioc_s *ioc)
+{
+ u32 ioc_fwstate = readl(ioc->ioc_regs.ioc_fwstate);
+
+ /**
+ * Driver load time. If the join bit is set,
+ * it is due to an unclean exit by the driver for this
+ * PCI fn in the previous incarnation. Whoever comes here first
+ * should clean it up, no matter which PCI fn.
+ */
+ if (ioc_fwstate & BFA_IOC_CB_JOIN_MASK) {
+ writel(BFI_IOC_UNINIT, ioc->ioc_regs.ioc_fwstate);
+ writel(BFI_IOC_UNINIT, ioc->ioc_regs.alt_ioc_fwstate);
+ return BFA_TRUE;
+ }
+
+ return bfa_ioc_cb_sync_complete(ioc);
+}
+
+/*
+ * Cleanup hw semaphore and usecnt registers
+ */
+static void
+bfa_ioc_cb_ownership_reset(struct bfa_ioc_s *ioc)
+{
+
+ /*
+ * Read the hw sem reg to make sure that it is locked
+ * before we clear it. If it is not locked, writing 1
+ * will lock it instead of clearing it.
+ */
+ readl(ioc->ioc_regs.ioc_sem_reg);
+ writel(1, ioc->ioc_regs.ioc_sem_reg);
+}
+
+/*
+ * Synchronized IOC failure processing routines
+ */
+static void
+bfa_ioc_cb_sync_join(struct bfa_ioc_s *ioc)
+{
+ u32 r32 = readl(ioc->ioc_regs.ioc_fwstate);
+ u32 join_pos = bfa_ioc_cb_join_pos(ioc);
+
+ writel((r32 | join_pos), ioc->ioc_regs.ioc_fwstate);
+}
+
+static void
+bfa_ioc_cb_sync_leave(struct bfa_ioc_s *ioc)
+{
+ u32 r32 = readl(ioc->ioc_regs.ioc_fwstate);
+ u32 join_pos = bfa_ioc_cb_join_pos(ioc);
+
+ writel((r32 & ~join_pos), ioc->ioc_regs.ioc_fwstate);
+}
+
+static void
+bfa_ioc_cb_set_cur_ioc_fwstate(struct bfa_ioc_s *ioc,
+ enum bfi_ioc_state fwstate)
+{
+ u32 r32 = readl(ioc->ioc_regs.ioc_fwstate);
+
+ writel((fwstate | (r32 & BFA_IOC_CB_JOIN_MASK)),
+ ioc->ioc_regs.ioc_fwstate);
+}
+
+static enum bfi_ioc_state
+bfa_ioc_cb_get_cur_ioc_fwstate(struct bfa_ioc_s *ioc)
+{
+ return (enum bfi_ioc_state)(readl(ioc->ioc_regs.ioc_fwstate) &
+ BFA_IOC_CB_FWSTATE_MASK);
+}
+
+static void
+bfa_ioc_cb_set_alt_ioc_fwstate(struct bfa_ioc_s *ioc,
+ enum bfi_ioc_state fwstate)
+{
+ u32 r32 = readl(ioc->ioc_regs.alt_ioc_fwstate);
+
+ writel((fwstate | (r32 & BFA_IOC_CB_JOIN_MASK)),
+ ioc->ioc_regs.alt_ioc_fwstate);
+}
+
+static enum bfi_ioc_state
+bfa_ioc_cb_get_alt_ioc_fwstate(struct bfa_ioc_s *ioc)
+{
+ return (enum bfi_ioc_state)(readl(ioc->ioc_regs.alt_ioc_fwstate) &
+ BFA_IOC_CB_FWSTATE_MASK);
+}
+
+static void
+bfa_ioc_cb_sync_ack(struct bfa_ioc_s *ioc)
+{
+ bfa_ioc_cb_set_cur_ioc_fwstate(ioc, BFI_IOC_FAIL);
+}
+
+static bfa_boolean_t
+bfa_ioc_cb_sync_complete(struct bfa_ioc_s *ioc)
+{
+ u32 fwstate, alt_fwstate;
+ fwstate = bfa_ioc_cb_get_cur_ioc_fwstate(ioc);
+
+ /*
+ * At this point, this IOC is hoding the hw sem in the
+ * start path (fwcheck) OR in the disable/enable path
+ * OR to check if the other IOC has acknowledged failure.
+ *
+ * So, this IOC can be in UNINIT, INITING, DISABLED, FAIL
+ * or in MEMTEST states. In a normal scenario, this IOC
+ * can not be in OP state when this function is called.
+ *
+ * However, this IOC could still be in OP state when
+ * the OS driver is starting up, if the OptROM code has
+ * left it in that state.
+ *
+ * If we had marked this IOC's fwstate as BFI_IOC_FAIL
+ * in the failure case and now, if the fwstate is not
+ * BFI_IOC_FAIL it implies that the other PCI fn have
+ * reinitialized the ASIC or this IOC got disabled, so
+ * return TRUE.
+ */
+ if (fwstate == BFI_IOC_UNINIT ||
+ fwstate == BFI_IOC_INITING ||
+ fwstate == BFI_IOC_DISABLED ||
+ fwstate == BFI_IOC_MEMTEST ||
+ fwstate == BFI_IOC_OP)
+ return BFA_TRUE;
+ else {
+ alt_fwstate = bfa_ioc_cb_get_alt_ioc_fwstate(ioc);
+ if (alt_fwstate == BFI_IOC_FAIL ||
+ alt_fwstate == BFI_IOC_DISABLED ||
+ alt_fwstate == BFI_IOC_UNINIT ||
+ alt_fwstate == BFI_IOC_INITING ||
+ alt_fwstate == BFI_IOC_MEMTEST)
+ return BFA_TRUE;
+ else
+ return BFA_FALSE;
+ }
+}
+
+bfa_status_t
+bfa_ioc_cb_pll_init(void __iomem *rb, enum bfi_asic_mode fcmode)
+{
+ u32 pll_sclk, pll_fclk, join_bits;
+
+ pll_sclk = __APP_PLL_SCLK_ENABLE | __APP_PLL_SCLK_LRESETN |
+ __APP_PLL_SCLK_P0_1(3U) |
+ __APP_PLL_SCLK_JITLMT0_1(3U) |
+ __APP_PLL_SCLK_CNTLMT0_1(3U);
+ pll_fclk = __APP_PLL_LCLK_ENABLE | __APP_PLL_LCLK_LRESETN |
+ __APP_PLL_LCLK_RSEL200500 | __APP_PLL_LCLK_P0_1(3U) |
+ __APP_PLL_LCLK_JITLMT0_1(3U) |
+ __APP_PLL_LCLK_CNTLMT0_1(3U);
+ join_bits = readl(rb + BFA_IOC0_STATE_REG) &
+ BFA_IOC_CB_JOIN_MASK;
+ writel((BFI_IOC_UNINIT | join_bits), (rb + BFA_IOC0_STATE_REG));
+ join_bits = readl(rb + BFA_IOC1_STATE_REG) &
+ BFA_IOC_CB_JOIN_MASK;
+ writel((BFI_IOC_UNINIT | join_bits), (rb + BFA_IOC1_STATE_REG));
+ writel(0xffffffffU, (rb + HOSTFN0_INT_MSK));
+ writel(0xffffffffU, (rb + HOSTFN1_INT_MSK));
+ writel(0xffffffffU, (rb + HOSTFN0_INT_STATUS));
+ writel(0xffffffffU, (rb + HOSTFN1_INT_STATUS));
+ writel(0xffffffffU, (rb + HOSTFN0_INT_MSK));
+ writel(0xffffffffU, (rb + HOSTFN1_INT_MSK));
+ writel(__APP_PLL_SCLK_LOGIC_SOFT_RESET, rb + APP_PLL_SCLK_CTL_REG);
+ writel(__APP_PLL_SCLK_BYPASS | __APP_PLL_SCLK_LOGIC_SOFT_RESET,
+ rb + APP_PLL_SCLK_CTL_REG);
+ writel(__APP_PLL_LCLK_LOGIC_SOFT_RESET, rb + APP_PLL_LCLK_CTL_REG);
+ writel(__APP_PLL_LCLK_BYPASS | __APP_PLL_LCLK_LOGIC_SOFT_RESET,
+ rb + APP_PLL_LCLK_CTL_REG);
+ udelay(2);
+ writel(__APP_PLL_SCLK_LOGIC_SOFT_RESET, rb + APP_PLL_SCLK_CTL_REG);
+ writel(__APP_PLL_LCLK_LOGIC_SOFT_RESET, rb + APP_PLL_LCLK_CTL_REG);
+ writel(pll_sclk | __APP_PLL_SCLK_LOGIC_SOFT_RESET,
+ rb + APP_PLL_SCLK_CTL_REG);
+ writel(pll_fclk | __APP_PLL_LCLK_LOGIC_SOFT_RESET,
+ rb + APP_PLL_LCLK_CTL_REG);
+ udelay(2000);
+ writel(0xffffffffU, (rb + HOSTFN0_INT_STATUS));
+ writel(0xffffffffU, (rb + HOSTFN1_INT_STATUS));
+ writel(pll_sclk, (rb + APP_PLL_SCLK_CTL_REG));
+ writel(pll_fclk, (rb + APP_PLL_LCLK_CTL_REG));
+
+ return BFA_STATUS_OK;
+}
diff --git a/drivers/scsi/bfa/bfa_ioc_ct.c b/drivers/scsi/bfa/bfa_ioc_ct.c
new file mode 100644
index 000000000..bd53150e4
--- /dev/null
+++ b/drivers/scsi/bfa/bfa_ioc_ct.c
@@ -0,0 +1,997 @@
+/*
+ * Copyright (c) 2005-2010 Brocade Communications Systems, Inc.
+ * All rights reserved
+ * www.brocade.com
+ *
+ * Linux driver for Brocade Fibre Channel Host Bus Adapter.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License (GPL) Version 2 as
+ * published by the Free Software Foundation
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ */
+
+#include "bfad_drv.h"
+#include "bfa_ioc.h"
+#include "bfi_reg.h"
+#include "bfa_defs.h"
+
+BFA_TRC_FILE(CNA, IOC_CT);
+
+#define bfa_ioc_ct_sync_pos(__ioc) \
+ ((uint32_t) (1 << bfa_ioc_pcifn(__ioc)))
+#define BFA_IOC_SYNC_REQD_SH 16
+#define bfa_ioc_ct_get_sync_ackd(__val) (__val & 0x0000ffff)
+#define bfa_ioc_ct_clear_sync_ackd(__val) (__val & 0xffff0000)
+#define bfa_ioc_ct_get_sync_reqd(__val) (__val >> BFA_IOC_SYNC_REQD_SH)
+#define bfa_ioc_ct_sync_reqd_pos(__ioc) \
+ (bfa_ioc_ct_sync_pos(__ioc) << BFA_IOC_SYNC_REQD_SH)
+
+/*
+ * forward declarations
+ */
+static bfa_boolean_t bfa_ioc_ct_firmware_lock(struct bfa_ioc_s *ioc);
+static void bfa_ioc_ct_firmware_unlock(struct bfa_ioc_s *ioc);
+static void bfa_ioc_ct_notify_fail(struct bfa_ioc_s *ioc);
+static void bfa_ioc_ct_ownership_reset(struct bfa_ioc_s *ioc);
+static bfa_boolean_t bfa_ioc_ct_sync_start(struct bfa_ioc_s *ioc);
+static void bfa_ioc_ct_sync_join(struct bfa_ioc_s *ioc);
+static void bfa_ioc_ct_sync_leave(struct bfa_ioc_s *ioc);
+static void bfa_ioc_ct_sync_ack(struct bfa_ioc_s *ioc);
+static bfa_boolean_t bfa_ioc_ct_sync_complete(struct bfa_ioc_s *ioc);
+static void bfa_ioc_ct_set_cur_ioc_fwstate(
+ struct bfa_ioc_s *ioc, enum bfi_ioc_state fwstate);
+static enum bfi_ioc_state bfa_ioc_ct_get_cur_ioc_fwstate(struct bfa_ioc_s *ioc);
+static void bfa_ioc_ct_set_alt_ioc_fwstate(
+ struct bfa_ioc_s *ioc, enum bfi_ioc_state fwstate);
+static enum bfi_ioc_state bfa_ioc_ct_get_alt_ioc_fwstate(struct bfa_ioc_s *ioc);
+
+static struct bfa_ioc_hwif_s hwif_ct;
+static struct bfa_ioc_hwif_s hwif_ct2;
+
+/*
+ * Return true if firmware of current driver matches the running firmware.
+ */
+static bfa_boolean_t
+bfa_ioc_ct_firmware_lock(struct bfa_ioc_s *ioc)
+{
+ enum bfi_ioc_state ioc_fwstate;
+ u32 usecnt;
+ struct bfi_ioc_image_hdr_s fwhdr;
+
+ bfa_ioc_sem_get(ioc->ioc_regs.ioc_usage_sem_reg);
+ usecnt = readl(ioc->ioc_regs.ioc_usage_reg);
+
+ /*
+ * If usage count is 0, always return TRUE.
+ */
+ if (usecnt == 0) {
+ writel(1, ioc->ioc_regs.ioc_usage_reg);
+ readl(ioc->ioc_regs.ioc_usage_sem_reg);
+ writel(1, ioc->ioc_regs.ioc_usage_sem_reg);
+ writel(0, ioc->ioc_regs.ioc_fail_sync);
+ bfa_trc(ioc, usecnt);
+ return BFA_TRUE;
+ }
+
+ ioc_fwstate = readl(ioc->ioc_regs.ioc_fwstate);
+ bfa_trc(ioc, ioc_fwstate);
+
+ /*
+ * Use count cannot be non-zero and chip in uninitialized state.
+ */
+ WARN_ON(ioc_fwstate == BFI_IOC_UNINIT);
+
+ /*
+ * Check if another driver with a different firmware is active
+ */
+ bfa_ioc_fwver_get(ioc, &fwhdr);
+ if (!bfa_ioc_fwver_cmp(ioc, &fwhdr)) {
+ readl(ioc->ioc_regs.ioc_usage_sem_reg);
+ writel(1, ioc->ioc_regs.ioc_usage_sem_reg);
+ bfa_trc(ioc, usecnt);
+ return BFA_FALSE;
+ }
+
+ /*
+ * Same firmware version. Increment the reference count.
+ */
+ usecnt++;
+ writel(usecnt, ioc->ioc_regs.ioc_usage_reg);
+ readl(ioc->ioc_regs.ioc_usage_sem_reg);
+ writel(1, ioc->ioc_regs.ioc_usage_sem_reg);
+ bfa_trc(ioc, usecnt);
+ return BFA_TRUE;
+}
+
+static void
+bfa_ioc_ct_firmware_unlock(struct bfa_ioc_s *ioc)
+{
+ u32 usecnt;
+
+ /*
+ * decrement usage count
+ */
+ bfa_ioc_sem_get(ioc->ioc_regs.ioc_usage_sem_reg);
+ usecnt = readl(ioc->ioc_regs.ioc_usage_reg);
+ WARN_ON(usecnt <= 0);
+
+ usecnt--;
+ writel(usecnt, ioc->ioc_regs.ioc_usage_reg);
+ bfa_trc(ioc, usecnt);
+
+ readl(ioc->ioc_regs.ioc_usage_sem_reg);
+ writel(1, ioc->ioc_regs.ioc_usage_sem_reg);
+}
+
+/*
+ * Notify other functions on HB failure.
+ */
+static void
+bfa_ioc_ct_notify_fail(struct bfa_ioc_s *ioc)
+{
+ if (bfa_ioc_is_cna(ioc)) {
+ writel(__FW_INIT_HALT_P, ioc->ioc_regs.ll_halt);
+ writel(__FW_INIT_HALT_P, ioc->ioc_regs.alt_ll_halt);
+ /* Wait for halt to take effect */
+ readl(ioc->ioc_regs.ll_halt);
+ readl(ioc->ioc_regs.alt_ll_halt);
+ } else {
+ writel(~0U, ioc->ioc_regs.err_set);
+ readl(ioc->ioc_regs.err_set);
+ }
+}
+
+/*
+ * Host to LPU mailbox message addresses
+ */
+static struct { u32 hfn_mbox, lpu_mbox, hfn_pgn; } ct_fnreg[] = {
+ { HOSTFN0_LPU_MBOX0_0, LPU_HOSTFN0_MBOX0_0, HOST_PAGE_NUM_FN0 },
+ { HOSTFN1_LPU_MBOX0_8, LPU_HOSTFN1_MBOX0_8, HOST_PAGE_NUM_FN1 },
+ { HOSTFN2_LPU_MBOX0_0, LPU_HOSTFN2_MBOX0_0, HOST_PAGE_NUM_FN2 },
+ { HOSTFN3_LPU_MBOX0_8, LPU_HOSTFN3_MBOX0_8, HOST_PAGE_NUM_FN3 }
+};
+
+/*
+ * Host <-> LPU mailbox command/status registers - port 0
+ */
+static struct { u32 hfn, lpu; } ct_p0reg[] = {
+ { HOSTFN0_LPU0_CMD_STAT, LPU0_HOSTFN0_CMD_STAT },
+ { HOSTFN1_LPU0_CMD_STAT, LPU0_HOSTFN1_CMD_STAT },
+ { HOSTFN2_LPU0_CMD_STAT, LPU0_HOSTFN2_CMD_STAT },
+ { HOSTFN3_LPU0_CMD_STAT, LPU0_HOSTFN3_CMD_STAT }
+};
+
+/*
+ * Host <-> LPU mailbox command/status registers - port 1
+ */
+static struct { u32 hfn, lpu; } ct_p1reg[] = {
+ { HOSTFN0_LPU1_CMD_STAT, LPU1_HOSTFN0_CMD_STAT },
+ { HOSTFN1_LPU1_CMD_STAT, LPU1_HOSTFN1_CMD_STAT },
+ { HOSTFN2_LPU1_CMD_STAT, LPU1_HOSTFN2_CMD_STAT },
+ { HOSTFN3_LPU1_CMD_STAT, LPU1_HOSTFN3_CMD_STAT }
+};
+
+static struct { uint32_t hfn_mbox, lpu_mbox, hfn_pgn, hfn, lpu, lpu_read; }
+ ct2_reg[] = {
+ { CT2_HOSTFN_LPU0_MBOX0, CT2_LPU0_HOSTFN_MBOX0, CT2_HOSTFN_PAGE_NUM,
+ CT2_HOSTFN_LPU0_CMD_STAT, CT2_LPU0_HOSTFN_CMD_STAT,
+ CT2_HOSTFN_LPU0_READ_STAT},
+ { CT2_HOSTFN_LPU1_MBOX0, CT2_LPU1_HOSTFN_MBOX0, CT2_HOSTFN_PAGE_NUM,
+ CT2_HOSTFN_LPU1_CMD_STAT, CT2_LPU1_HOSTFN_CMD_STAT,
+ CT2_HOSTFN_LPU1_READ_STAT},
+};
+
+static void
+bfa_ioc_ct_reg_init(struct bfa_ioc_s *ioc)
+{
+ void __iomem *rb;
+ int pcifn = bfa_ioc_pcifn(ioc);
+
+ rb = bfa_ioc_bar0(ioc);
+
+ ioc->ioc_regs.hfn_mbox = rb + ct_fnreg[pcifn].hfn_mbox;
+ ioc->ioc_regs.lpu_mbox = rb + ct_fnreg[pcifn].lpu_mbox;
+ ioc->ioc_regs.host_page_num_fn = rb + ct_fnreg[pcifn].hfn_pgn;
+
+ if (ioc->port_id == 0) {
+ ioc->ioc_regs.heartbeat = rb + BFA_IOC0_HBEAT_REG;
+ ioc->ioc_regs.ioc_fwstate = rb + BFA_IOC0_STATE_REG;
+ ioc->ioc_regs.alt_ioc_fwstate = rb + BFA_IOC1_STATE_REG;
+ ioc->ioc_regs.hfn_mbox_cmd = rb + ct_p0reg[pcifn].hfn;
+ ioc->ioc_regs.lpu_mbox_cmd = rb + ct_p0reg[pcifn].lpu;
+ ioc->ioc_regs.ll_halt = rb + FW_INIT_HALT_P0;
+ ioc->ioc_regs.alt_ll_halt = rb + FW_INIT_HALT_P1;
+ } else {
+ ioc->ioc_regs.heartbeat = (rb + BFA_IOC1_HBEAT_REG);
+ ioc->ioc_regs.ioc_fwstate = (rb + BFA_IOC1_STATE_REG);
+ ioc->ioc_regs.alt_ioc_fwstate = rb + BFA_IOC0_STATE_REG;
+ ioc->ioc_regs.hfn_mbox_cmd = rb + ct_p1reg[pcifn].hfn;
+ ioc->ioc_regs.lpu_mbox_cmd = rb + ct_p1reg[pcifn].lpu;
+ ioc->ioc_regs.ll_halt = rb + FW_INIT_HALT_P1;
+ ioc->ioc_regs.alt_ll_halt = rb + FW_INIT_HALT_P0;
+ }
+
+ /*
+ * PSS control registers
+ */
+ ioc->ioc_regs.pss_ctl_reg = (rb + PSS_CTL_REG);
+ ioc->ioc_regs.pss_err_status_reg = (rb + PSS_ERR_STATUS_REG);
+ ioc->ioc_regs.app_pll_fast_ctl_reg = (rb + APP_PLL_LCLK_CTL_REG);
+ ioc->ioc_regs.app_pll_slow_ctl_reg = (rb + APP_PLL_SCLK_CTL_REG);
+
+ /*
+ * IOC semaphore registers and serialization
+ */
+ ioc->ioc_regs.ioc_sem_reg = (rb + HOST_SEM0_REG);
+ ioc->ioc_regs.ioc_usage_sem_reg = (rb + HOST_SEM1_REG);
+ ioc->ioc_regs.ioc_init_sem_reg = (rb + HOST_SEM2_REG);
+ ioc->ioc_regs.ioc_usage_reg = (rb + BFA_FW_USE_COUNT);
+ ioc->ioc_regs.ioc_fail_sync = (rb + BFA_IOC_FAIL_SYNC);
+
+ /*
+ * sram memory access
+ */
+ ioc->ioc_regs.smem_page_start = (rb + PSS_SMEM_PAGE_START);
+ ioc->ioc_regs.smem_pg0 = BFI_IOC_SMEM_PG0_CT;
+
+ /*
+ * err set reg : for notification of hb failure in fcmode
+ */
+ ioc->ioc_regs.err_set = (rb + ERR_SET_REG);
+}
+
+static void
+bfa_ioc_ct2_reg_init(struct bfa_ioc_s *ioc)
+{
+ void __iomem *rb;
+ int port = bfa_ioc_portid(ioc);
+
+ rb = bfa_ioc_bar0(ioc);
+
+ ioc->ioc_regs.hfn_mbox = rb + ct2_reg[port].hfn_mbox;
+ ioc->ioc_regs.lpu_mbox = rb + ct2_reg[port].lpu_mbox;
+ ioc->ioc_regs.host_page_num_fn = rb + ct2_reg[port].hfn_pgn;
+ ioc->ioc_regs.hfn_mbox_cmd = rb + ct2_reg[port].hfn;
+ ioc->ioc_regs.lpu_mbox_cmd = rb + ct2_reg[port].lpu;
+ ioc->ioc_regs.lpu_read_stat = rb + ct2_reg[port].lpu_read;
+
+ if (port == 0) {
+ ioc->ioc_regs.heartbeat = rb + CT2_BFA_IOC0_HBEAT_REG;
+ ioc->ioc_regs.ioc_fwstate = rb + CT2_BFA_IOC0_STATE_REG;
+ ioc->ioc_regs.alt_ioc_fwstate = rb + CT2_BFA_IOC1_STATE_REG;
+ ioc->ioc_regs.ll_halt = rb + FW_INIT_HALT_P0;
+ ioc->ioc_regs.alt_ll_halt = rb + FW_INIT_HALT_P1;
+ } else {
+ ioc->ioc_regs.heartbeat = (rb + CT2_BFA_IOC1_HBEAT_REG);
+ ioc->ioc_regs.ioc_fwstate = (rb + CT2_BFA_IOC1_STATE_REG);
+ ioc->ioc_regs.alt_ioc_fwstate = rb + CT2_BFA_IOC0_STATE_REG;
+ ioc->ioc_regs.ll_halt = rb + FW_INIT_HALT_P1;
+ ioc->ioc_regs.alt_ll_halt = rb + FW_INIT_HALT_P0;
+ }
+
+ /*
+ * PSS control registers
+ */
+ ioc->ioc_regs.pss_ctl_reg = (rb + PSS_CTL_REG);
+ ioc->ioc_regs.pss_err_status_reg = (rb + PSS_ERR_STATUS_REG);
+ ioc->ioc_regs.app_pll_fast_ctl_reg = (rb + CT2_APP_PLL_LCLK_CTL_REG);
+ ioc->ioc_regs.app_pll_slow_ctl_reg = (rb + CT2_APP_PLL_SCLK_CTL_REG);
+
+ /*
+ * IOC semaphore registers and serialization
+ */
+ ioc->ioc_regs.ioc_sem_reg = (rb + CT2_HOST_SEM0_REG);
+ ioc->ioc_regs.ioc_usage_sem_reg = (rb + CT2_HOST_SEM1_REG);
+ ioc->ioc_regs.ioc_init_sem_reg = (rb + CT2_HOST_SEM2_REG);
+ ioc->ioc_regs.ioc_usage_reg = (rb + CT2_BFA_FW_USE_COUNT);
+ ioc->ioc_regs.ioc_fail_sync = (rb + CT2_BFA_IOC_FAIL_SYNC);
+
+ /*
+ * sram memory access
+ */
+ ioc->ioc_regs.smem_page_start = (rb + PSS_SMEM_PAGE_START);
+ ioc->ioc_regs.smem_pg0 = BFI_IOC_SMEM_PG0_CT;
+
+ /*
+ * err set reg : for notification of hb failure in fcmode
+ */
+ ioc->ioc_regs.err_set = (rb + ERR_SET_REG);
+}
+
+/*
+ * Initialize IOC to port mapping.
+ */
+
+#define FNC_PERS_FN_SHIFT(__fn) ((__fn) * 8)
+static void
+bfa_ioc_ct_map_port(struct bfa_ioc_s *ioc)
+{
+ void __iomem *rb = ioc->pcidev.pci_bar_kva;
+ u32 r32;
+
+ /*
+ * For catapult, base port id on personality register and IOC type
+ */
+ r32 = readl(rb + FNC_PERS_REG);
+ r32 >>= FNC_PERS_FN_SHIFT(bfa_ioc_pcifn(ioc));
+ ioc->port_id = (r32 & __F0_PORT_MAP_MK) >> __F0_PORT_MAP_SH;
+
+ bfa_trc(ioc, bfa_ioc_pcifn(ioc));
+ bfa_trc(ioc, ioc->port_id);
+}
+
+static void
+bfa_ioc_ct2_map_port(struct bfa_ioc_s *ioc)
+{
+ void __iomem *rb = ioc->pcidev.pci_bar_kva;
+ u32 r32;
+
+ r32 = readl(rb + CT2_HOSTFN_PERSONALITY0);
+ ioc->port_id = ((r32 & __FC_LL_PORT_MAP__MK) >> __FC_LL_PORT_MAP__SH);
+
+ bfa_trc(ioc, bfa_ioc_pcifn(ioc));
+ bfa_trc(ioc, ioc->port_id);
+}
+
+/*
+ * Set interrupt mode for a function: INTX or MSIX
+ */
+static void
+bfa_ioc_ct_isr_mode_set(struct bfa_ioc_s *ioc, bfa_boolean_t msix)
+{
+ void __iomem *rb = ioc->pcidev.pci_bar_kva;
+ u32 r32, mode;
+
+ r32 = readl(rb + FNC_PERS_REG);
+ bfa_trc(ioc, r32);
+
+ mode = (r32 >> FNC_PERS_FN_SHIFT(bfa_ioc_pcifn(ioc))) &
+ __F0_INTX_STATUS;
+
+ /*
+ * If already in desired mode, do not change anything
+ */
+ if ((!msix && mode) || (msix && !mode))
+ return;
+
+ if (msix)
+ mode = __F0_INTX_STATUS_MSIX;
+ else
+ mode = __F0_INTX_STATUS_INTA;
+
+ r32 &= ~(__F0_INTX_STATUS << FNC_PERS_FN_SHIFT(bfa_ioc_pcifn(ioc)));
+ r32 |= (mode << FNC_PERS_FN_SHIFT(bfa_ioc_pcifn(ioc)));
+ bfa_trc(ioc, r32);
+
+ writel(r32, rb + FNC_PERS_REG);
+}
+
+bfa_boolean_t
+bfa_ioc_ct2_lpu_read_stat(struct bfa_ioc_s *ioc)
+{
+ u32 r32;
+
+ r32 = readl(ioc->ioc_regs.lpu_read_stat);
+ if (r32) {
+ writel(1, ioc->ioc_regs.lpu_read_stat);
+ return BFA_TRUE;
+ }
+
+ return BFA_FALSE;
+}
+
+/*
+ * Cleanup hw semaphore and usecnt registers
+ */
+static void
+bfa_ioc_ct_ownership_reset(struct bfa_ioc_s *ioc)
+{
+
+ bfa_ioc_sem_get(ioc->ioc_regs.ioc_usage_sem_reg);
+ writel(0, ioc->ioc_regs.ioc_usage_reg);
+ readl(ioc->ioc_regs.ioc_usage_sem_reg);
+ writel(1, ioc->ioc_regs.ioc_usage_sem_reg);
+
+ writel(0, ioc->ioc_regs.ioc_fail_sync);
+ /*
+ * Read the hw sem reg to make sure that it is locked
+ * before we clear it. If it is not locked, writing 1
+ * will lock it instead of clearing it.
+ */
+ readl(ioc->ioc_regs.ioc_sem_reg);
+ writel(1, ioc->ioc_regs.ioc_sem_reg);
+}
+
+static bfa_boolean_t
+bfa_ioc_ct_sync_start(struct bfa_ioc_s *ioc)
+{
+ uint32_t r32 = readl(ioc->ioc_regs.ioc_fail_sync);
+ uint32_t sync_reqd = bfa_ioc_ct_get_sync_reqd(r32);
+
+ /*
+ * Driver load time. If the sync required bit for this PCI fn
+ * is set, it is due to an unclean exit by the driver for this
+ * PCI fn in the previous incarnation. Whoever comes here first
+ * should clean it up, no matter which PCI fn.
+ */
+
+ if (sync_reqd & bfa_ioc_ct_sync_pos(ioc)) {
+ writel(0, ioc->ioc_regs.ioc_fail_sync);
+ writel(1, ioc->ioc_regs.ioc_usage_reg);
+ writel(BFI_IOC_UNINIT, ioc->ioc_regs.ioc_fwstate);
+ writel(BFI_IOC_UNINIT, ioc->ioc_regs.alt_ioc_fwstate);
+ return BFA_TRUE;
+ }
+
+ return bfa_ioc_ct_sync_complete(ioc);
+}
+
+/*
+ * Synchronized IOC failure processing routines
+ */
+static void
+bfa_ioc_ct_sync_join(struct bfa_ioc_s *ioc)
+{
+ uint32_t r32 = readl(ioc->ioc_regs.ioc_fail_sync);
+ uint32_t sync_pos = bfa_ioc_ct_sync_reqd_pos(ioc);
+
+ writel((r32 | sync_pos), ioc->ioc_regs.ioc_fail_sync);
+}
+
+static void
+bfa_ioc_ct_sync_leave(struct bfa_ioc_s *ioc)
+{
+ uint32_t r32 = readl(ioc->ioc_regs.ioc_fail_sync);
+ uint32_t sync_msk = bfa_ioc_ct_sync_reqd_pos(ioc) |
+ bfa_ioc_ct_sync_pos(ioc);
+
+ writel((r32 & ~sync_msk), ioc->ioc_regs.ioc_fail_sync);
+}
+
+static void
+bfa_ioc_ct_sync_ack(struct bfa_ioc_s *ioc)
+{
+ uint32_t r32 = readl(ioc->ioc_regs.ioc_fail_sync);
+
+ writel((r32 | bfa_ioc_ct_sync_pos(ioc)),
+ ioc->ioc_regs.ioc_fail_sync);
+}
+
+static bfa_boolean_t
+bfa_ioc_ct_sync_complete(struct bfa_ioc_s *ioc)
+{
+ uint32_t r32 = readl(ioc->ioc_regs.ioc_fail_sync);
+ uint32_t sync_reqd = bfa_ioc_ct_get_sync_reqd(r32);
+ uint32_t sync_ackd = bfa_ioc_ct_get_sync_ackd(r32);
+ uint32_t tmp_ackd;
+
+ if (sync_ackd == 0)
+ return BFA_TRUE;
+
+ /*
+ * The check below is to see whether any other PCI fn
+ * has reinitialized the ASIC (reset sync_ackd bits)
+ * and failed again while this IOC was waiting for hw
+ * semaphore (in bfa_iocpf_sm_semwait()).
+ */
+ tmp_ackd = sync_ackd;
+ if ((sync_reqd & bfa_ioc_ct_sync_pos(ioc)) &&
+ !(sync_ackd & bfa_ioc_ct_sync_pos(ioc)))
+ sync_ackd |= bfa_ioc_ct_sync_pos(ioc);
+
+ if (sync_reqd == sync_ackd) {
+ writel(bfa_ioc_ct_clear_sync_ackd(r32),
+ ioc->ioc_regs.ioc_fail_sync);
+ writel(BFI_IOC_FAIL, ioc->ioc_regs.ioc_fwstate);
+ writel(BFI_IOC_FAIL, ioc->ioc_regs.alt_ioc_fwstate);
+ return BFA_TRUE;
+ }
+
+ /*
+ * If another PCI fn reinitialized and failed again while
+ * this IOC was waiting for hw sem, the sync_ackd bit for
+ * this IOC need to be set again to allow reinitialization.
+ */
+ if (tmp_ackd != sync_ackd)
+ writel((r32 | sync_ackd), ioc->ioc_regs.ioc_fail_sync);
+
+ return BFA_FALSE;
+}
+
+/**
+ * Called from bfa_ioc_attach() to map asic specific calls.
+ */
+static void
+bfa_ioc_set_ctx_hwif(struct bfa_ioc_s *ioc, struct bfa_ioc_hwif_s *hwif)
+{
+ hwif->ioc_firmware_lock = bfa_ioc_ct_firmware_lock;
+ hwif->ioc_firmware_unlock = bfa_ioc_ct_firmware_unlock;
+ hwif->ioc_notify_fail = bfa_ioc_ct_notify_fail;
+ hwif->ioc_ownership_reset = bfa_ioc_ct_ownership_reset;
+ hwif->ioc_sync_start = bfa_ioc_ct_sync_start;
+ hwif->ioc_sync_join = bfa_ioc_ct_sync_join;
+ hwif->ioc_sync_leave = bfa_ioc_ct_sync_leave;
+ hwif->ioc_sync_ack = bfa_ioc_ct_sync_ack;
+ hwif->ioc_sync_complete = bfa_ioc_ct_sync_complete;
+ hwif->ioc_set_fwstate = bfa_ioc_ct_set_cur_ioc_fwstate;
+ hwif->ioc_get_fwstate = bfa_ioc_ct_get_cur_ioc_fwstate;
+ hwif->ioc_set_alt_fwstate = bfa_ioc_ct_set_alt_ioc_fwstate;
+ hwif->ioc_get_alt_fwstate = bfa_ioc_ct_get_alt_ioc_fwstate;
+}
+
+/**
+ * Called from bfa_ioc_attach() to map asic specific calls.
+ */
+void
+bfa_ioc_set_ct_hwif(struct bfa_ioc_s *ioc)
+{
+ bfa_ioc_set_ctx_hwif(ioc, &hwif_ct);
+
+ hwif_ct.ioc_pll_init = bfa_ioc_ct_pll_init;
+ hwif_ct.ioc_reg_init = bfa_ioc_ct_reg_init;
+ hwif_ct.ioc_map_port = bfa_ioc_ct_map_port;
+ hwif_ct.ioc_isr_mode_set = bfa_ioc_ct_isr_mode_set;
+ ioc->ioc_hwif = &hwif_ct;
+}
+
+/**
+ * Called from bfa_ioc_attach() to map asic specific calls.
+ */
+void
+bfa_ioc_set_ct2_hwif(struct bfa_ioc_s *ioc)
+{
+ bfa_ioc_set_ctx_hwif(ioc, &hwif_ct2);
+
+ hwif_ct2.ioc_pll_init = bfa_ioc_ct2_pll_init;
+ hwif_ct2.ioc_reg_init = bfa_ioc_ct2_reg_init;
+ hwif_ct2.ioc_map_port = bfa_ioc_ct2_map_port;
+ hwif_ct2.ioc_lpu_read_stat = bfa_ioc_ct2_lpu_read_stat;
+ hwif_ct2.ioc_isr_mode_set = NULL;
+ ioc->ioc_hwif = &hwif_ct2;
+}
+
+/*
+ * Workaround for MSI-X resource allocation for catapult-2 with no asic block
+ */
+#define HOSTFN_MSIX_DEFAULT 64
+#define HOSTFN_MSIX_VT_INDEX_MBOX_ERR 0x30138
+#define HOSTFN_MSIX_VT_OFST_NUMVT 0x3013c
+#define __MSIX_VT_NUMVT__MK 0x003ff800
+#define __MSIX_VT_NUMVT__SH 11
+#define __MSIX_VT_NUMVT_(_v) ((_v) << __MSIX_VT_NUMVT__SH)
+#define __MSIX_VT_OFST_ 0x000007ff
+void
+bfa_ioc_ct2_poweron(struct bfa_ioc_s *ioc)
+{
+ void __iomem *rb = ioc->pcidev.pci_bar_kva;
+ u32 r32;
+
+ r32 = readl(rb + HOSTFN_MSIX_VT_OFST_NUMVT);
+ if (r32 & __MSIX_VT_NUMVT__MK) {
+ writel(r32 & __MSIX_VT_OFST_,
+ rb + HOSTFN_MSIX_VT_INDEX_MBOX_ERR);
+ return;
+ }
+
+ writel(__MSIX_VT_NUMVT_(HOSTFN_MSIX_DEFAULT - 1) |
+ HOSTFN_MSIX_DEFAULT * bfa_ioc_pcifn(ioc),
+ rb + HOSTFN_MSIX_VT_OFST_NUMVT);
+ writel(HOSTFN_MSIX_DEFAULT * bfa_ioc_pcifn(ioc),
+ rb + HOSTFN_MSIX_VT_INDEX_MBOX_ERR);
+}
+
+bfa_status_t
+bfa_ioc_ct_pll_init(void __iomem *rb, enum bfi_asic_mode mode)
+{
+ u32 pll_sclk, pll_fclk, r32;
+ bfa_boolean_t fcmode = (mode == BFI_ASIC_MODE_FC);
+
+ pll_sclk = __APP_PLL_SCLK_LRESETN | __APP_PLL_SCLK_ENARST |
+ __APP_PLL_SCLK_RSEL200500 | __APP_PLL_SCLK_P0_1(3U) |
+ __APP_PLL_SCLK_JITLMT0_1(3U) |
+ __APP_PLL_SCLK_CNTLMT0_1(1U);
+ pll_fclk = __APP_PLL_LCLK_LRESETN | __APP_PLL_LCLK_ENARST |
+ __APP_PLL_LCLK_RSEL200500 | __APP_PLL_LCLK_P0_1(3U) |
+ __APP_PLL_LCLK_JITLMT0_1(3U) |
+ __APP_PLL_LCLK_CNTLMT0_1(1U);
+
+ if (fcmode) {
+ writel(0, (rb + OP_MODE));
+ writel(__APP_EMS_CMLCKSEL | __APP_EMS_REFCKBUFEN2 |
+ __APP_EMS_CHANNEL_SEL, (rb + ETH_MAC_SER_REG));
+ } else {
+ writel(__GLOBAL_FCOE_MODE, (rb + OP_MODE));
+ writel(__APP_EMS_REFCKBUFEN1, (rb + ETH_MAC_SER_REG));
+ }
+ writel(BFI_IOC_UNINIT, (rb + BFA_IOC0_STATE_REG));
+ writel(BFI_IOC_UNINIT, (rb + BFA_IOC1_STATE_REG));
+ writel(0xffffffffU, (rb + HOSTFN0_INT_MSK));
+ writel(0xffffffffU, (rb + HOSTFN1_INT_MSK));
+ writel(0xffffffffU, (rb + HOSTFN0_INT_STATUS));
+ writel(0xffffffffU, (rb + HOSTFN1_INT_STATUS));
+ writel(0xffffffffU, (rb + HOSTFN0_INT_MSK));
+ writel(0xffffffffU, (rb + HOSTFN1_INT_MSK));
+ writel(pll_sclk | __APP_PLL_SCLK_LOGIC_SOFT_RESET,
+ rb + APP_PLL_SCLK_CTL_REG);
+ writel(pll_fclk | __APP_PLL_LCLK_LOGIC_SOFT_RESET,
+ rb + APP_PLL_LCLK_CTL_REG);
+ writel(pll_sclk | __APP_PLL_SCLK_LOGIC_SOFT_RESET |
+ __APP_PLL_SCLK_ENABLE, rb + APP_PLL_SCLK_CTL_REG);
+ writel(pll_fclk | __APP_PLL_LCLK_LOGIC_SOFT_RESET |
+ __APP_PLL_LCLK_ENABLE, rb + APP_PLL_LCLK_CTL_REG);
+ readl(rb + HOSTFN0_INT_MSK);
+ udelay(2000);
+ writel(0xffffffffU, (rb + HOSTFN0_INT_STATUS));
+ writel(0xffffffffU, (rb + HOSTFN1_INT_STATUS));
+ writel(pll_sclk | __APP_PLL_SCLK_ENABLE, rb + APP_PLL_SCLK_CTL_REG);
+ writel(pll_fclk | __APP_PLL_LCLK_ENABLE, rb + APP_PLL_LCLK_CTL_REG);
+
+ if (!fcmode) {
+ writel(__PMM_1T_RESET_P, (rb + PMM_1T_RESET_REG_P0));
+ writel(__PMM_1T_RESET_P, (rb + PMM_1T_RESET_REG_P1));
+ }
+ r32 = readl((rb + PSS_CTL_REG));
+ r32 &= ~__PSS_LMEM_RESET;
+ writel(r32, (rb + PSS_CTL_REG));
+ udelay(1000);
+ if (!fcmode) {
+ writel(0, (rb + PMM_1T_RESET_REG_P0));
+ writel(0, (rb + PMM_1T_RESET_REG_P1));
+ }
+
+ writel(__EDRAM_BISTR_START, (rb + MBIST_CTL_REG));
+ udelay(1000);
+ r32 = readl((rb + MBIST_STAT_REG));
+ writel(0, (rb + MBIST_CTL_REG));
+ return BFA_STATUS_OK;
+}
+
+static void
+bfa_ioc_ct2_sclk_init(void __iomem *rb)
+{
+ u32 r32;
+
+ /*
+ * put s_clk PLL and PLL FSM in reset
+ */
+ r32 = readl((rb + CT2_APP_PLL_SCLK_CTL_REG));
+ r32 &= ~(__APP_PLL_SCLK_ENABLE | __APP_PLL_SCLK_LRESETN);
+ r32 |= (__APP_PLL_SCLK_ENARST | __APP_PLL_SCLK_BYPASS |
+ __APP_PLL_SCLK_LOGIC_SOFT_RESET);
+ writel(r32, (rb + CT2_APP_PLL_SCLK_CTL_REG));
+
+ /*
+ * Ignore mode and program for the max clock (which is FC16)
+ * Firmware/NFC will do the PLL init appropiately
+ */
+ r32 = readl((rb + CT2_APP_PLL_SCLK_CTL_REG));
+ r32 &= ~(__APP_PLL_SCLK_REFCLK_SEL | __APP_PLL_SCLK_CLK_DIV2);
+ writel(r32, (rb + CT2_APP_PLL_SCLK_CTL_REG));
+
+ /*
+ * while doing PLL init dont clock gate ethernet subsystem
+ */
+ r32 = readl((rb + CT2_CHIP_MISC_PRG));
+ writel(r32 | __ETH_CLK_ENABLE_PORT0, (rb + CT2_CHIP_MISC_PRG));
+
+ r32 = readl((rb + CT2_PCIE_MISC_REG));
+ writel(r32 | __ETH_CLK_ENABLE_PORT1, (rb + CT2_PCIE_MISC_REG));
+
+ /*
+ * set sclk value
+ */
+ r32 = readl((rb + CT2_APP_PLL_SCLK_CTL_REG));
+ r32 &= (__P_SCLK_PLL_LOCK | __APP_PLL_SCLK_REFCLK_SEL |
+ __APP_PLL_SCLK_CLK_DIV2);
+ writel(r32 | 0x1061731b, (rb + CT2_APP_PLL_SCLK_CTL_REG));
+
+ /*
+ * poll for s_clk lock or delay 1ms
+ */
+ udelay(1000);
+}
+
+static void
+bfa_ioc_ct2_lclk_init(void __iomem *rb)
+{
+ u32 r32;
+
+ /*
+ * put l_clk PLL and PLL FSM in reset
+ */
+ r32 = readl((rb + CT2_APP_PLL_LCLK_CTL_REG));
+ r32 &= ~(__APP_PLL_LCLK_ENABLE | __APP_PLL_LCLK_LRESETN);
+ r32 |= (__APP_PLL_LCLK_ENARST | __APP_PLL_LCLK_BYPASS |
+ __APP_PLL_LCLK_LOGIC_SOFT_RESET);
+ writel(r32, (rb + CT2_APP_PLL_LCLK_CTL_REG));
+
+ /*
+ * set LPU speed (set for FC16 which will work for other modes)
+ */
+ r32 = readl((rb + CT2_CHIP_MISC_PRG));
+ writel(r32, (rb + CT2_CHIP_MISC_PRG));
+
+ /*
+ * set LPU half speed (set for FC16 which will work for other modes)
+ */
+ r32 = readl((rb + CT2_APP_PLL_LCLK_CTL_REG));
+ writel(r32, (rb + CT2_APP_PLL_LCLK_CTL_REG));
+
+ /*
+ * set lclk for mode (set for FC16)
+ */
+ r32 = readl((rb + CT2_APP_PLL_LCLK_CTL_REG));
+ r32 &= (__P_LCLK_PLL_LOCK | __APP_LPUCLK_HALFSPEED);
+ r32 |= 0x20c1731b;
+ writel(r32, (rb + CT2_APP_PLL_LCLK_CTL_REG));
+
+ /*
+ * poll for s_clk lock or delay 1ms
+ */
+ udelay(1000);
+}
+
+static void
+bfa_ioc_ct2_mem_init(void __iomem *rb)
+{
+ u32 r32;
+
+ r32 = readl((rb + PSS_CTL_REG));
+ r32 &= ~__PSS_LMEM_RESET;
+ writel(r32, (rb + PSS_CTL_REG));
+ udelay(1000);
+
+ writel(__EDRAM_BISTR_START, (rb + CT2_MBIST_CTL_REG));
+ udelay(1000);
+ writel(0, (rb + CT2_MBIST_CTL_REG));
+}
+
+void
+bfa_ioc_ct2_mac_reset(void __iomem *rb)
+{
+ /* put port0, port1 MAC & AHB in reset */
+ writel((__CSI_MAC_RESET | __CSI_MAC_AHB_RESET),
+ rb + CT2_CSI_MAC_CONTROL_REG(0));
+ writel((__CSI_MAC_RESET | __CSI_MAC_AHB_RESET),
+ rb + CT2_CSI_MAC_CONTROL_REG(1));
+}
+
+static void
+bfa_ioc_ct2_enable_flash(void __iomem *rb)
+{
+ u32 r32;
+
+ r32 = readl((rb + PSS_GPIO_OUT_REG));
+ writel(r32 & ~1, (rb + PSS_GPIO_OUT_REG));
+ r32 = readl((rb + PSS_GPIO_OE_REG));
+ writel(r32 | 1, (rb + PSS_GPIO_OE_REG));
+}
+
+#define CT2_NFC_MAX_DELAY 1000
+#define CT2_NFC_PAUSE_MAX_DELAY 4000
+#define CT2_NFC_VER_VALID 0x147
+#define CT2_NFC_STATE_RUNNING 0x20000001
+#define BFA_IOC_PLL_POLL 1000000
+
+static bfa_boolean_t
+bfa_ioc_ct2_nfc_halted(void __iomem *rb)
+{
+ u32 r32;
+
+ r32 = readl(rb + CT2_NFC_CSR_SET_REG);
+ if (r32 & __NFC_CONTROLLER_HALTED)
+ return BFA_TRUE;
+
+ return BFA_FALSE;
+}
+
+static void
+bfa_ioc_ct2_nfc_halt(void __iomem *rb)
+{
+ int i;
+
+ writel(__HALT_NFC_CONTROLLER, rb + CT2_NFC_CSR_SET_REG);
+ for (i = 0; i < CT2_NFC_MAX_DELAY; i++) {
+ if (bfa_ioc_ct2_nfc_halted(rb))
+ break;
+ udelay(1000);
+ }
+ WARN_ON(!bfa_ioc_ct2_nfc_halted(rb));
+}
+
+static void
+bfa_ioc_ct2_nfc_resume(void __iomem *rb)
+{
+ u32 r32;
+ int i;
+
+ writel(__HALT_NFC_CONTROLLER, rb + CT2_NFC_CSR_CLR_REG);
+ for (i = 0; i < CT2_NFC_MAX_DELAY; i++) {
+ r32 = readl(rb + CT2_NFC_CSR_SET_REG);
+ if (!(r32 & __NFC_CONTROLLER_HALTED))
+ return;
+ udelay(1000);
+ }
+ WARN_ON(1);
+}
+
+static void
+bfa_ioc_ct2_clk_reset(void __iomem *rb)
+{
+ u32 r32;
+
+ bfa_ioc_ct2_sclk_init(rb);
+ bfa_ioc_ct2_lclk_init(rb);
+
+ /*
+ * release soft reset on s_clk & l_clk
+ */
+ r32 = readl((rb + CT2_APP_PLL_SCLK_CTL_REG));
+ writel(r32 & ~__APP_PLL_SCLK_LOGIC_SOFT_RESET,
+ (rb + CT2_APP_PLL_SCLK_CTL_REG));
+
+ r32 = readl((rb + CT2_APP_PLL_LCLK_CTL_REG));
+ writel(r32 & ~__APP_PLL_LCLK_LOGIC_SOFT_RESET,
+ (rb + CT2_APP_PLL_LCLK_CTL_REG));
+
+}
+
+static void
+bfa_ioc_ct2_nfc_clk_reset(void __iomem *rb)
+{
+ u32 r32, i;
+
+ r32 = readl((rb + PSS_CTL_REG));
+ r32 |= (__PSS_LPU0_RESET | __PSS_LPU1_RESET);
+ writel(r32, (rb + PSS_CTL_REG));
+
+ writel(__RESET_AND_START_SCLK_LCLK_PLLS, rb + CT2_CSI_FW_CTL_SET_REG);
+
+ for (i = 0; i < BFA_IOC_PLL_POLL; i++) {
+ r32 = readl(rb + CT2_NFC_FLASH_STS_REG);
+
+ if ((r32 & __FLASH_PLL_INIT_AND_RESET_IN_PROGRESS))
+ break;
+ }
+ WARN_ON(!(r32 & __FLASH_PLL_INIT_AND_RESET_IN_PROGRESS));
+
+ for (i = 0; i < BFA_IOC_PLL_POLL; i++) {
+ r32 = readl(rb + CT2_NFC_FLASH_STS_REG);
+
+ if (!(r32 & __FLASH_PLL_INIT_AND_RESET_IN_PROGRESS))
+ break;
+ }
+ WARN_ON((r32 & __FLASH_PLL_INIT_AND_RESET_IN_PROGRESS));
+
+ r32 = readl(rb + CT2_CSI_FW_CTL_REG);
+ WARN_ON((r32 & __RESET_AND_START_SCLK_LCLK_PLLS));
+}
+
+static void
+bfa_ioc_ct2_wait_till_nfc_running(void __iomem *rb)
+{
+ u32 r32;
+ int i;
+
+ if (bfa_ioc_ct2_nfc_halted(rb))
+ bfa_ioc_ct2_nfc_resume(rb);
+ for (i = 0; i < CT2_NFC_PAUSE_MAX_DELAY; i++) {
+ r32 = readl(rb + CT2_NFC_STS_REG);
+ if (r32 == CT2_NFC_STATE_RUNNING)
+ return;
+ udelay(1000);
+ }
+
+ r32 = readl(rb + CT2_NFC_STS_REG);
+ WARN_ON(!(r32 == CT2_NFC_STATE_RUNNING));
+}
+
+bfa_status_t
+bfa_ioc_ct2_pll_init(void __iomem *rb, enum bfi_asic_mode mode)
+{
+ u32 wgn, r32, nfc_ver;
+
+ wgn = readl(rb + CT2_WGN_STATUS);
+
+ if (wgn == (__WGN_READY | __GLBL_PF_VF_CFG_RDY)) {
+ /*
+ * If flash is corrupted, enable flash explicitly
+ */
+ bfa_ioc_ct2_clk_reset(rb);
+ bfa_ioc_ct2_enable_flash(rb);
+
+ bfa_ioc_ct2_mac_reset(rb);
+
+ bfa_ioc_ct2_clk_reset(rb);
+ bfa_ioc_ct2_enable_flash(rb);
+
+ } else {
+ nfc_ver = readl(rb + CT2_RSC_GPR15_REG);
+
+ if ((nfc_ver >= CT2_NFC_VER_VALID) &&
+ (wgn == (__A2T_AHB_LOAD | __WGN_READY))) {
+
+ bfa_ioc_ct2_wait_till_nfc_running(rb);
+
+ bfa_ioc_ct2_nfc_clk_reset(rb);
+ } else {
+ bfa_ioc_ct2_nfc_halt(rb);
+
+ bfa_ioc_ct2_clk_reset(rb);
+ bfa_ioc_ct2_mac_reset(rb);
+ bfa_ioc_ct2_clk_reset(rb);
+
+ }
+ }
+ /*
+ * The very first PCIe DMA Read done by LPU fails with a fatal error,
+ * when Address Translation Cache (ATC) has been enabled by system BIOS.
+ *
+ * Workaround:
+ * Disable Invalidated Tag Match Enable capability by setting the bit 26
+ * of CHIP_MISC_PRG to 0, by default it is set to 1.
+ */
+ r32 = readl(rb + CT2_CHIP_MISC_PRG);
+ writel((r32 & 0xfbffffff), (rb + CT2_CHIP_MISC_PRG));
+
+ /*
+ * Mask the interrupts and clear any
+ * pending interrupts left by BIOS/EFI
+ */
+
+ writel(1, (rb + CT2_LPU0_HOSTFN_MBOX0_MSK));
+ writel(1, (rb + CT2_LPU1_HOSTFN_MBOX0_MSK));
+
+ /* For first time initialization, no need to clear interrupts */
+ r32 = readl(rb + HOST_SEM5_REG);
+ if (r32 & 0x1) {
+ r32 = readl((rb + CT2_LPU0_HOSTFN_CMD_STAT));
+ if (r32 == 1) {
+ writel(1, (rb + CT2_LPU0_HOSTFN_CMD_STAT));
+ readl((rb + CT2_LPU0_HOSTFN_CMD_STAT));
+ }
+ r32 = readl((rb + CT2_LPU1_HOSTFN_CMD_STAT));
+ if (r32 == 1) {
+ writel(1, (rb + CT2_LPU1_HOSTFN_CMD_STAT));
+ readl((rb + CT2_LPU1_HOSTFN_CMD_STAT));
+ }
+ }
+
+ bfa_ioc_ct2_mem_init(rb);
+
+ writel(BFI_IOC_UNINIT, (rb + CT2_BFA_IOC0_STATE_REG));
+ writel(BFI_IOC_UNINIT, (rb + CT2_BFA_IOC1_STATE_REG));
+
+ return BFA_STATUS_OK;
+}
+
+static void
+bfa_ioc_ct_set_cur_ioc_fwstate(struct bfa_ioc_s *ioc,
+ enum bfi_ioc_state fwstate)
+{
+ writel(fwstate, ioc->ioc_regs.ioc_fwstate);
+}
+
+static enum bfi_ioc_state
+bfa_ioc_ct_get_cur_ioc_fwstate(struct bfa_ioc_s *ioc)
+{
+ return (enum bfi_ioc_state)readl(ioc->ioc_regs.ioc_fwstate);
+}
+
+static void
+bfa_ioc_ct_set_alt_ioc_fwstate(struct bfa_ioc_s *ioc,
+ enum bfi_ioc_state fwstate)
+{
+ writel(fwstate, ioc->ioc_regs.alt_ioc_fwstate);
+}
+
+static enum bfi_ioc_state
+bfa_ioc_ct_get_alt_ioc_fwstate(struct bfa_ioc_s *ioc)
+{
+ return (enum bfi_ioc_state) readl(ioc->ioc_regs.alt_ioc_fwstate);
+}
diff --git a/drivers/scsi/bfa/bfa_modules.h b/drivers/scsi/bfa/bfa_modules.h
new file mode 100644
index 000000000..a14c784ff
--- /dev/null
+++ b/drivers/scsi/bfa/bfa_modules.h
@@ -0,0 +1,139 @@
+/*
+ * Copyright (c) 2005-2010 Brocade Communications Systems, Inc.
+ * All rights reserved
+ * www.brocade.com
+ *
+ * Linux driver for Brocade Fibre Channel Host Bus Adapter.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License (GPL) Version 2 as
+ * published by the Free Software Foundation
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ */
+
+/*
+ * bfa_modules.h BFA modules
+ */
+
+#ifndef __BFA_MODULES_H__
+#define __BFA_MODULES_H__
+
+#include "bfa_cs.h"
+#include "bfa.h"
+#include "bfa_svc.h"
+#include "bfa_fcpim.h"
+#include "bfa_port.h"
+
+struct bfa_modules_s {
+ struct bfa_fcdiag_s fcdiag; /* fcdiag module */
+ struct bfa_fcport_s fcport; /* fc port module */
+ struct bfa_fcxp_mod_s fcxp_mod; /* fcxp module */
+ struct bfa_lps_mod_s lps_mod; /* fcxp module */
+ struct bfa_uf_mod_s uf_mod; /* unsolicited frame module */
+ struct bfa_rport_mod_s rport_mod; /* remote port module */
+ struct bfa_fcp_mod_s fcp_mod; /* FCP initiator module */
+ struct bfa_sgpg_mod_s sgpg_mod; /* SG page module */
+ struct bfa_port_s port; /* Physical port module */
+ struct bfa_ablk_s ablk; /* ASIC block config module */
+ struct bfa_cee_s cee; /* CEE Module */
+ struct bfa_sfp_s sfp; /* SFP module */
+ struct bfa_flash_s flash; /* flash module */
+ struct bfa_diag_s diag_mod; /* diagnostics module */
+ struct bfa_phy_s phy; /* phy module */
+ struct bfa_dconf_mod_s dconf_mod; /* DCONF common module */
+ struct bfa_fru_s fru; /* fru module */
+};
+
+/*
+ * !!! Only append to the enums defined here to avoid any versioning
+ * !!! needed between trace utility and driver version
+ */
+enum {
+ BFA_TRC_HAL_CORE = 1,
+ BFA_TRC_HAL_FCXP = 2,
+ BFA_TRC_HAL_FCPIM = 3,
+ BFA_TRC_HAL_IOCFC_CT = 4,
+ BFA_TRC_HAL_IOCFC_CB = 5,
+};
+
+/*
+ * Macro to define a new BFA module
+ */
+#define BFA_MODULE(__mod) \
+ static void bfa_ ## __mod ## _meminfo( \
+ struct bfa_iocfc_cfg_s *cfg, \
+ struct bfa_meminfo_s *meminfo, \
+ struct bfa_s *bfa); \
+ static void bfa_ ## __mod ## _attach(struct bfa_s *bfa, \
+ void *bfad, struct bfa_iocfc_cfg_s *cfg, \
+ struct bfa_pcidev_s *pcidev); \
+ static void bfa_ ## __mod ## _detach(struct bfa_s *bfa); \
+ static void bfa_ ## __mod ## _start(struct bfa_s *bfa); \
+ static void bfa_ ## __mod ## _stop(struct bfa_s *bfa); \
+ static void bfa_ ## __mod ## _iocdisable(struct bfa_s *bfa); \
+ \
+ extern struct bfa_module_s hal_mod_ ## __mod; \
+ struct bfa_module_s hal_mod_ ## __mod = { \
+ bfa_ ## __mod ## _meminfo, \
+ bfa_ ## __mod ## _attach, \
+ bfa_ ## __mod ## _detach, \
+ bfa_ ## __mod ## _start, \
+ bfa_ ## __mod ## _stop, \
+ bfa_ ## __mod ## _iocdisable, \
+ }
+
+#define BFA_CACHELINE_SZ (256)
+
+/*
+ * Structure used to interact between different BFA sub modules
+ *
+ * Each sub module needs to implement only the entry points relevant to it (and
+ * can leave entry points as NULL)
+ */
+struct bfa_module_s {
+ void (*meminfo) (struct bfa_iocfc_cfg_s *cfg,
+ struct bfa_meminfo_s *meminfo,
+ struct bfa_s *bfa);
+ void (*attach) (struct bfa_s *bfa, void *bfad,
+ struct bfa_iocfc_cfg_s *cfg,
+ struct bfa_pcidev_s *pcidev);
+ void (*detach) (struct bfa_s *bfa);
+ void (*start) (struct bfa_s *bfa);
+ void (*stop) (struct bfa_s *bfa);
+ void (*iocdisable) (struct bfa_s *bfa);
+};
+
+
+struct bfa_s {
+ void *bfad; /* BFA driver instance */
+ struct bfa_plog_s *plog; /* portlog buffer */
+ struct bfa_trc_mod_s *trcmod; /* driver tracing */
+ struct bfa_ioc_s ioc; /* IOC module */
+ struct bfa_iocfc_s iocfc; /* IOCFC module */
+ struct bfa_timer_mod_s timer_mod; /* timer module */
+ struct bfa_modules_s modules; /* BFA modules */
+ struct list_head comp_q; /* pending completions */
+ bfa_boolean_t queue_process; /* queue processing enabled */
+ struct list_head reqq_waitq[BFI_IOC_MAX_CQS];
+ bfa_boolean_t fcs; /* FCS is attached to BFA */
+ struct bfa_msix_s msix;
+ int bfa_aen_seq;
+ bfa_boolean_t intr_enabled; /* Status of interrupts */
+};
+
+extern bfa_boolean_t bfa_auto_recover;
+extern struct bfa_module_s hal_mod_fcdiag;
+extern struct bfa_module_s hal_mod_sgpg;
+extern struct bfa_module_s hal_mod_fcport;
+extern struct bfa_module_s hal_mod_fcxp;
+extern struct bfa_module_s hal_mod_lps;
+extern struct bfa_module_s hal_mod_uf;
+extern struct bfa_module_s hal_mod_rport;
+extern struct bfa_module_s hal_mod_fcp;
+extern struct bfa_module_s hal_mod_dconf;
+
+#endif /* __BFA_MODULES_H__ */
diff --git a/drivers/scsi/bfa/bfa_plog.h b/drivers/scsi/bfa/bfa_plog.h
new file mode 100644
index 000000000..1c9baa683
--- /dev/null
+++ b/drivers/scsi/bfa/bfa_plog.h
@@ -0,0 +1,155 @@
+/*
+ * Copyright (c) 2005-2010 Brocade Communications Systems, Inc.
+ * All rights reserved
+ * www.brocade.com
+ *
+ * Linux driver for Brocade Fibre Channel Host Bus Adapter.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License (GPL) Version 2 as
+ * published by the Free Software Foundation
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ */
+#ifndef __BFA_PORTLOG_H__
+#define __BFA_PORTLOG_H__
+
+#include "bfa_fc.h"
+#include "bfa_defs.h"
+
+#define BFA_PL_NLOG_ENTS 256
+#define BFA_PL_LOG_REC_INCR(_x) ((_x)++, (_x) %= BFA_PL_NLOG_ENTS)
+
+#define BFA_PL_STRING_LOG_SZ 32 /* number of chars in string log */
+#define BFA_PL_INT_LOG_SZ 8 /* number of integers in the integer log */
+
+enum bfa_plog_log_type {
+ BFA_PL_LOG_TYPE_INVALID = 0,
+ BFA_PL_LOG_TYPE_INT = 1,
+ BFA_PL_LOG_TYPE_STRING = 2,
+};
+
+/*
+ * the (fixed size) record format for each entry in the portlog
+ */
+struct bfa_plog_rec_s {
+ u64 tv; /* timestamp */
+ u8 port; /* Source port that logged this entry */
+ u8 mid; /* module id */
+ u8 eid; /* indicates Rx, Tx, IOCTL, etc. bfa_plog_eid */
+ u8 log_type; /* string/integer log, bfa_plog_log_type_t */
+ u8 log_num_ints;
+ /*
+ * interpreted only if log_type is INT_LOG. indicates number of
+ * integers in the int_log[] (0-PL_INT_LOG_SZ).
+ */
+ u8 rsvd;
+ u16 misc; /* can be used to indicate fc frame length */
+ union {
+ char string_log[BFA_PL_STRING_LOG_SZ];
+ u32 int_log[BFA_PL_INT_LOG_SZ];
+ } log_entry;
+
+};
+
+/*
+ * the following #defines will be used by the logging entities to indicate
+ * their module id. BFAL will convert the integer value to string format
+ *
+* process to be used while changing the following #defines:
+ * - Always add new entries at the end
+ * - define corresponding string in BFAL
+ * - Do not remove any entry or rearrange the order.
+ */
+enum bfa_plog_mid {
+ BFA_PL_MID_INVALID = 0,
+ BFA_PL_MID_DEBUG = 1,
+ BFA_PL_MID_DRVR = 2,
+ BFA_PL_MID_HAL = 3,
+ BFA_PL_MID_HAL_FCXP = 4,
+ BFA_PL_MID_HAL_UF = 5,
+ BFA_PL_MID_FCS = 6,
+ BFA_PL_MID_LPS = 7,
+ BFA_PL_MID_MAX = 8
+};
+
+#define BFA_PL_MID_STRLEN 8
+struct bfa_plog_mid_strings_s {
+ char m_str[BFA_PL_MID_STRLEN];
+};
+
+/*
+ * the following #defines will be used by the logging entities to indicate
+ * their event type. BFAL will convert the integer value to string format
+ *
+* process to be used while changing the following #defines:
+ * - Always add new entries at the end
+ * - define corresponding string in BFAL
+ * - Do not remove any entry or rearrange the order.
+ */
+enum bfa_plog_eid {
+ BFA_PL_EID_INVALID = 0,
+ BFA_PL_EID_IOC_DISABLE = 1,
+ BFA_PL_EID_IOC_ENABLE = 2,
+ BFA_PL_EID_PORT_DISABLE = 3,
+ BFA_PL_EID_PORT_ENABLE = 4,
+ BFA_PL_EID_PORT_ST_CHANGE = 5,
+ BFA_PL_EID_TX = 6,
+ BFA_PL_EID_TX_ACK1 = 7,
+ BFA_PL_EID_TX_RJT = 8,
+ BFA_PL_EID_TX_BSY = 9,
+ BFA_PL_EID_RX = 10,
+ BFA_PL_EID_RX_ACK1 = 11,
+ BFA_PL_EID_RX_RJT = 12,
+ BFA_PL_EID_RX_BSY = 13,
+ BFA_PL_EID_CT_IN = 14,
+ BFA_PL_EID_CT_OUT = 15,
+ BFA_PL_EID_DRIVER_START = 16,
+ BFA_PL_EID_RSCN = 17,
+ BFA_PL_EID_DEBUG = 18,
+ BFA_PL_EID_MISC = 19,
+ BFA_PL_EID_FIP_FCF_DISC = 20,
+ BFA_PL_EID_FIP_FCF_CVL = 21,
+ BFA_PL_EID_LOGIN = 22,
+ BFA_PL_EID_LOGO = 23,
+ BFA_PL_EID_TRUNK_SCN = 24,
+ BFA_PL_EID_MAX
+};
+
+#define BFA_PL_ENAME_STRLEN 8
+struct bfa_plog_eid_strings_s {
+ char e_str[BFA_PL_ENAME_STRLEN];
+};
+
+#define BFA_PL_SIG_LEN 8
+#define BFA_PL_SIG_STR "12pl123"
+
+/*
+ * per port circular log buffer
+ */
+struct bfa_plog_s {
+ char plog_sig[BFA_PL_SIG_LEN]; /* Start signature */
+ u8 plog_enabled;
+ u8 rsvd[7];
+ u32 ticks;
+ u16 head;
+ u16 tail;
+ struct bfa_plog_rec_s plog_recs[BFA_PL_NLOG_ENTS];
+};
+
+void bfa_plog_init(struct bfa_plog_s *plog);
+void bfa_plog_str(struct bfa_plog_s *plog, enum bfa_plog_mid mid,
+ enum bfa_plog_eid event, u16 misc, char *log_str);
+void bfa_plog_intarr(struct bfa_plog_s *plog, enum bfa_plog_mid mid,
+ enum bfa_plog_eid event, u16 misc,
+ u32 *intarr, u32 num_ints);
+void bfa_plog_fchdr(struct bfa_plog_s *plog, enum bfa_plog_mid mid,
+ enum bfa_plog_eid event, u16 misc, struct fchs_s *fchdr);
+void bfa_plog_fchdr_and_pl(struct bfa_plog_s *plog, enum bfa_plog_mid mid,
+ enum bfa_plog_eid event, u16 misc,
+ struct fchs_s *fchdr, u32 pld_w0);
+
+#endif /* __BFA_PORTLOG_H__ */
diff --git a/drivers/scsi/bfa/bfa_port.c b/drivers/scsi/bfa/bfa_port.c
new file mode 100644
index 000000000..8ea7697de
--- /dev/null
+++ b/drivers/scsi/bfa/bfa_port.c
@@ -0,0 +1,880 @@
+/*
+ * Copyright (c) 2005-2010 Brocade Communications Systems, Inc.
+ * All rights reserved
+ * www.brocade.com
+ *
+ * Linux driver for Brocade Fibre Channel Host Bus Adapter.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License (GPL) Version 2 as
+ * published by the Free Software Foundation
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ */
+
+#include "bfad_drv.h"
+#include "bfa_defs_svc.h"
+#include "bfa_port.h"
+#include "bfi.h"
+#include "bfa_ioc.h"
+
+
+BFA_TRC_FILE(CNA, PORT);
+
+static void
+bfa_port_stats_swap(struct bfa_port_s *port, union bfa_port_stats_u *stats)
+{
+ u32 *dip = (u32 *) stats;
+ __be32 t0, t1;
+ int i;
+
+ for (i = 0; i < sizeof(union bfa_port_stats_u)/sizeof(u32);
+ i += 2) {
+ t0 = dip[i];
+ t1 = dip[i + 1];
+#ifdef __BIG_ENDIAN
+ dip[i] = be32_to_cpu(t0);
+ dip[i + 1] = be32_to_cpu(t1);
+#else
+ dip[i] = be32_to_cpu(t1);
+ dip[i + 1] = be32_to_cpu(t0);
+#endif
+ }
+}
+
+/*
+ * bfa_port_enable_isr()
+ *
+ *
+ * @param[in] port - Pointer to the port module
+ * status - Return status from the f/w
+ *
+ * @return void
+ */
+static void
+bfa_port_enable_isr(struct bfa_port_s *port, bfa_status_t status)
+{
+ bfa_trc(port, status);
+ port->endis_pending = BFA_FALSE;
+ port->endis_cbfn(port->endis_cbarg, status);
+}
+
+/*
+ * bfa_port_disable_isr()
+ *
+ *
+ * @param[in] port - Pointer to the port module
+ * status - Return status from the f/w
+ *
+ * @return void
+ */
+static void
+bfa_port_disable_isr(struct bfa_port_s *port, bfa_status_t status)
+{
+ bfa_trc(port, status);
+ port->endis_pending = BFA_FALSE;
+ port->endis_cbfn(port->endis_cbarg, status);
+}
+
+/*
+ * bfa_port_get_stats_isr()
+ *
+ *
+ * @param[in] port - Pointer to the Port module
+ * status - Return status from the f/w
+ *
+ * @return void
+ */
+static void
+bfa_port_get_stats_isr(struct bfa_port_s *port, bfa_status_t status)
+{
+ port->stats_status = status;
+ port->stats_busy = BFA_FALSE;
+
+ if (status == BFA_STATUS_OK) {
+ struct timeval tv;
+
+ memcpy(port->stats, port->stats_dma.kva,
+ sizeof(union bfa_port_stats_u));
+ bfa_port_stats_swap(port, port->stats);
+
+ do_gettimeofday(&tv);
+ port->stats->fc.secs_reset = tv.tv_sec - port->stats_reset_time;
+ }
+
+ if (port->stats_cbfn) {
+ port->stats_cbfn(port->stats_cbarg, status);
+ port->stats_cbfn = NULL;
+ }
+}
+
+/*
+ * bfa_port_clear_stats_isr()
+ *
+ *
+ * @param[in] port - Pointer to the Port module
+ * status - Return status from the f/w
+ *
+ * @return void
+ */
+static void
+bfa_port_clear_stats_isr(struct bfa_port_s *port, bfa_status_t status)
+{
+ struct timeval tv;
+
+ port->stats_status = status;
+ port->stats_busy = BFA_FALSE;
+
+ /*
+ * re-initialize time stamp for stats reset
+ */
+ do_gettimeofday(&tv);
+ port->stats_reset_time = tv.tv_sec;
+
+ if (port->stats_cbfn) {
+ port->stats_cbfn(port->stats_cbarg, status);
+ port->stats_cbfn = NULL;
+ }
+}
+
+/*
+ * bfa_port_isr()
+ *
+ *
+ * @param[in] Pointer to the Port module data structure.
+ *
+ * @return void
+ */
+static void
+bfa_port_isr(void *cbarg, struct bfi_mbmsg_s *m)
+{
+ struct bfa_port_s *port = (struct bfa_port_s *) cbarg;
+ union bfi_port_i2h_msg_u *i2hmsg;
+
+ i2hmsg = (union bfi_port_i2h_msg_u *) m;
+ bfa_trc(port, m->mh.msg_id);
+
+ switch (m->mh.msg_id) {
+ case BFI_PORT_I2H_ENABLE_RSP:
+ if (port->endis_pending == BFA_FALSE)
+ break;
+ bfa_port_enable_isr(port, i2hmsg->enable_rsp.status);
+ break;
+
+ case BFI_PORT_I2H_DISABLE_RSP:
+ if (port->endis_pending == BFA_FALSE)
+ break;
+ bfa_port_disable_isr(port, i2hmsg->disable_rsp.status);
+ break;
+
+ case BFI_PORT_I2H_GET_STATS_RSP:
+ /* Stats busy flag is still set? (may be cmd timed out) */
+ if (port->stats_busy == BFA_FALSE)
+ break;
+ bfa_port_get_stats_isr(port, i2hmsg->getstats_rsp.status);
+ break;
+
+ case BFI_PORT_I2H_CLEAR_STATS_RSP:
+ if (port->stats_busy == BFA_FALSE)
+ break;
+ bfa_port_clear_stats_isr(port, i2hmsg->clearstats_rsp.status);
+ break;
+
+ default:
+ WARN_ON(1);
+ }
+}
+
+/*
+ * bfa_port_meminfo()
+ *
+ *
+ * @param[in] void
+ *
+ * @return Size of DMA region
+ */
+u32
+bfa_port_meminfo(void)
+{
+ return BFA_ROUNDUP(sizeof(union bfa_port_stats_u), BFA_DMA_ALIGN_SZ);
+}
+
+/*
+ * bfa_port_mem_claim()
+ *
+ *
+ * @param[in] port Port module pointer
+ * dma_kva Kernel Virtual Address of Port DMA Memory
+ * dma_pa Physical Address of Port DMA Memory
+ *
+ * @return void
+ */
+void
+bfa_port_mem_claim(struct bfa_port_s *port, u8 *dma_kva, u64 dma_pa)
+{
+ port->stats_dma.kva = dma_kva;
+ port->stats_dma.pa = dma_pa;
+}
+
+/*
+ * bfa_port_enable()
+ *
+ * Send the Port enable request to the f/w
+ *
+ * @param[in] Pointer to the Port module data structure.
+ *
+ * @return Status
+ */
+bfa_status_t
+bfa_port_enable(struct bfa_port_s *port, bfa_port_endis_cbfn_t cbfn,
+ void *cbarg)
+{
+ struct bfi_port_generic_req_s *m;
+
+ /* If port is PBC disabled, return error */
+ if (port->pbc_disabled) {
+ bfa_trc(port, BFA_STATUS_PBC);
+ return BFA_STATUS_PBC;
+ }
+
+ if (bfa_ioc_is_disabled(port->ioc)) {
+ bfa_trc(port, BFA_STATUS_IOC_DISABLED);
+ return BFA_STATUS_IOC_DISABLED;
+ }
+
+ if (!bfa_ioc_is_operational(port->ioc)) {
+ bfa_trc(port, BFA_STATUS_IOC_FAILURE);
+ return BFA_STATUS_IOC_FAILURE;
+ }
+
+ /* if port is d-port enabled, return error */
+ if (port->dport_enabled) {
+ bfa_trc(port, BFA_STATUS_DPORT_ERR);
+ return BFA_STATUS_DPORT_ERR;
+ }
+
+ if (port->endis_pending) {
+ bfa_trc(port, BFA_STATUS_DEVBUSY);
+ return BFA_STATUS_DEVBUSY;
+ }
+
+ m = (struct bfi_port_generic_req_s *) port->endis_mb.msg;
+
+ port->msgtag++;
+ port->endis_cbfn = cbfn;
+ port->endis_cbarg = cbarg;
+ port->endis_pending = BFA_TRUE;
+
+ bfi_h2i_set(m->mh, BFI_MC_PORT, BFI_PORT_H2I_ENABLE_REQ,
+ bfa_ioc_portid(port->ioc));
+ bfa_ioc_mbox_queue(port->ioc, &port->endis_mb);
+
+ return BFA_STATUS_OK;
+}
+
+/*
+ * bfa_port_disable()
+ *
+ * Send the Port disable request to the f/w
+ *
+ * @param[in] Pointer to the Port module data structure.
+ *
+ * @return Status
+ */
+bfa_status_t
+bfa_port_disable(struct bfa_port_s *port, bfa_port_endis_cbfn_t cbfn,
+ void *cbarg)
+{
+ struct bfi_port_generic_req_s *m;
+
+ /* If port is PBC disabled, return error */
+ if (port->pbc_disabled) {
+ bfa_trc(port, BFA_STATUS_PBC);
+ return BFA_STATUS_PBC;
+ }
+
+ if (bfa_ioc_is_disabled(port->ioc)) {
+ bfa_trc(port, BFA_STATUS_IOC_DISABLED);
+ return BFA_STATUS_IOC_DISABLED;
+ }
+
+ if (!bfa_ioc_is_operational(port->ioc)) {
+ bfa_trc(port, BFA_STATUS_IOC_FAILURE);
+ return BFA_STATUS_IOC_FAILURE;
+ }
+
+ /* if port is d-port enabled, return error */
+ if (port->dport_enabled) {
+ bfa_trc(port, BFA_STATUS_DPORT_ERR);
+ return BFA_STATUS_DPORT_ERR;
+ }
+
+ if (port->endis_pending) {
+ bfa_trc(port, BFA_STATUS_DEVBUSY);
+ return BFA_STATUS_DEVBUSY;
+ }
+
+ m = (struct bfi_port_generic_req_s *) port->endis_mb.msg;
+
+ port->msgtag++;
+ port->endis_cbfn = cbfn;
+ port->endis_cbarg = cbarg;
+ port->endis_pending = BFA_TRUE;
+
+ bfi_h2i_set(m->mh, BFI_MC_PORT, BFI_PORT_H2I_DISABLE_REQ,
+ bfa_ioc_portid(port->ioc));
+ bfa_ioc_mbox_queue(port->ioc, &port->endis_mb);
+
+ return BFA_STATUS_OK;
+}
+
+/*
+ * bfa_port_get_stats()
+ *
+ * Send the request to the f/w to fetch Port statistics.
+ *
+ * @param[in] Pointer to the Port module data structure.
+ *
+ * @return Status
+ */
+bfa_status_t
+bfa_port_get_stats(struct bfa_port_s *port, union bfa_port_stats_u *stats,
+ bfa_port_stats_cbfn_t cbfn, void *cbarg)
+{
+ struct bfi_port_get_stats_req_s *m;
+
+ if (!bfa_ioc_is_operational(port->ioc)) {
+ bfa_trc(port, BFA_STATUS_IOC_FAILURE);
+ return BFA_STATUS_IOC_FAILURE;
+ }
+
+ if (port->stats_busy) {
+ bfa_trc(port, BFA_STATUS_DEVBUSY);
+ return BFA_STATUS_DEVBUSY;
+ }
+
+ m = (struct bfi_port_get_stats_req_s *) port->stats_mb.msg;
+
+ port->stats = stats;
+ port->stats_cbfn = cbfn;
+ port->stats_cbarg = cbarg;
+ port->stats_busy = BFA_TRUE;
+ bfa_dma_be_addr_set(m->dma_addr, port->stats_dma.pa);
+
+ bfi_h2i_set(m->mh, BFI_MC_PORT, BFI_PORT_H2I_GET_STATS_REQ,
+ bfa_ioc_portid(port->ioc));
+ bfa_ioc_mbox_queue(port->ioc, &port->stats_mb);
+
+ return BFA_STATUS_OK;
+}
+
+/*
+ * bfa_port_clear_stats()
+ *
+ *
+ * @param[in] Pointer to the Port module data structure.
+ *
+ * @return Status
+ */
+bfa_status_t
+bfa_port_clear_stats(struct bfa_port_s *port, bfa_port_stats_cbfn_t cbfn,
+ void *cbarg)
+{
+ struct bfi_port_generic_req_s *m;
+
+ if (!bfa_ioc_is_operational(port->ioc)) {
+ bfa_trc(port, BFA_STATUS_IOC_FAILURE);
+ return BFA_STATUS_IOC_FAILURE;
+ }
+
+ if (port->stats_busy) {
+ bfa_trc(port, BFA_STATUS_DEVBUSY);
+ return BFA_STATUS_DEVBUSY;
+ }
+
+ m = (struct bfi_port_generic_req_s *) port->stats_mb.msg;
+
+ port->stats_cbfn = cbfn;
+ port->stats_cbarg = cbarg;
+ port->stats_busy = BFA_TRUE;
+
+ bfi_h2i_set(m->mh, BFI_MC_PORT, BFI_PORT_H2I_CLEAR_STATS_REQ,
+ bfa_ioc_portid(port->ioc));
+ bfa_ioc_mbox_queue(port->ioc, &port->stats_mb);
+
+ return BFA_STATUS_OK;
+}
+
+/*
+ * bfa_port_notify()
+ *
+ * Port module IOC event handler
+ *
+ * @param[in] Pointer to the Port module data structure.
+ * @param[in] IOC event structure
+ *
+ * @return void
+ */
+void
+bfa_port_notify(void *arg, enum bfa_ioc_event_e event)
+{
+ struct bfa_port_s *port = (struct bfa_port_s *) arg;
+
+ switch (event) {
+ case BFA_IOC_E_DISABLED:
+ case BFA_IOC_E_FAILED:
+ /* Fail any pending get_stats/clear_stats requests */
+ if (port->stats_busy) {
+ if (port->stats_cbfn)
+ port->stats_cbfn(port->stats_cbarg,
+ BFA_STATUS_FAILED);
+ port->stats_cbfn = NULL;
+ port->stats_busy = BFA_FALSE;
+ }
+
+ /* Clear any enable/disable is pending */
+ if (port->endis_pending) {
+ if (port->endis_cbfn)
+ port->endis_cbfn(port->endis_cbarg,
+ BFA_STATUS_FAILED);
+ port->endis_cbfn = NULL;
+ port->endis_pending = BFA_FALSE;
+ }
+
+ /* clear D-port mode */
+ if (port->dport_enabled)
+ bfa_port_set_dportenabled(port, BFA_FALSE);
+ break;
+ default:
+ break;
+ }
+}
+
+/*
+ * bfa_port_attach()
+ *
+ *
+ * @param[in] port - Pointer to the Port module data structure
+ * ioc - Pointer to the ioc module data structure
+ * dev - Pointer to the device driver module data structure
+ * The device driver specific mbox ISR functions have
+ * this pointer as one of the parameters.
+ * trcmod -
+ *
+ * @return void
+ */
+void
+bfa_port_attach(struct bfa_port_s *port, struct bfa_ioc_s *ioc,
+ void *dev, struct bfa_trc_mod_s *trcmod)
+{
+ struct timeval tv;
+
+ WARN_ON(!port);
+
+ port->dev = dev;
+ port->ioc = ioc;
+ port->trcmod = trcmod;
+
+ port->stats_busy = BFA_FALSE;
+ port->endis_pending = BFA_FALSE;
+ port->stats_cbfn = NULL;
+ port->endis_cbfn = NULL;
+ port->pbc_disabled = BFA_FALSE;
+ port->dport_enabled = BFA_FALSE;
+
+ bfa_ioc_mbox_regisr(port->ioc, BFI_MC_PORT, bfa_port_isr, port);
+ bfa_q_qe_init(&port->ioc_notify);
+ bfa_ioc_notify_init(&port->ioc_notify, bfa_port_notify, port);
+ list_add_tail(&port->ioc_notify.qe, &port->ioc->notify_q);
+
+ /*
+ * initialize time stamp for stats reset
+ */
+ do_gettimeofday(&tv);
+ port->stats_reset_time = tv.tv_sec;
+
+ bfa_trc(port, 0);
+}
+
+/*
+ * bfa_port_set_dportenabled();
+ *
+ * Port module- set pbc disabled flag
+ *
+ * @param[in] port - Pointer to the Port module data structure
+ *
+ * @return void
+ */
+void
+bfa_port_set_dportenabled(struct bfa_port_s *port, bfa_boolean_t enabled)
+{
+ port->dport_enabled = enabled;
+}
+
+/*
+ * CEE module specific definitions
+ */
+
+/*
+ * bfa_cee_get_attr_isr()
+ *
+ * @brief CEE ISR for get-attributes responses from f/w
+ *
+ * @param[in] cee - Pointer to the CEE module
+ * status - Return status from the f/w
+ *
+ * @return void
+ */
+static void
+bfa_cee_get_attr_isr(struct bfa_cee_s *cee, bfa_status_t status)
+{
+ struct bfa_cee_lldp_cfg_s *lldp_cfg = &cee->attr->lldp_remote;
+
+ cee->get_attr_status = status;
+ bfa_trc(cee, 0);
+ if (status == BFA_STATUS_OK) {
+ bfa_trc(cee, 0);
+ memcpy(cee->attr, cee->attr_dma.kva,
+ sizeof(struct bfa_cee_attr_s));
+ lldp_cfg->time_to_live = be16_to_cpu(lldp_cfg->time_to_live);
+ lldp_cfg->enabled_system_cap =
+ be16_to_cpu(lldp_cfg->enabled_system_cap);
+ }
+ cee->get_attr_pending = BFA_FALSE;
+ if (cee->cbfn.get_attr_cbfn) {
+ bfa_trc(cee, 0);
+ cee->cbfn.get_attr_cbfn(cee->cbfn.get_attr_cbarg, status);
+ }
+}
+
+/*
+ * bfa_cee_get_stats_isr()
+ *
+ * @brief CEE ISR for get-stats responses from f/w
+ *
+ * @param[in] cee - Pointer to the CEE module
+ * status - Return status from the f/w
+ *
+ * @return void
+ */
+static void
+bfa_cee_get_stats_isr(struct bfa_cee_s *cee, bfa_status_t status)
+{
+ u32 *buffer;
+ int i;
+
+ cee->get_stats_status = status;
+ bfa_trc(cee, 0);
+ if (status == BFA_STATUS_OK) {
+ bfa_trc(cee, 0);
+ memcpy(cee->stats, cee->stats_dma.kva,
+ sizeof(struct bfa_cee_stats_s));
+ /* swap the cee stats */
+ buffer = (u32 *)cee->stats;
+ for (i = 0; i < (sizeof(struct bfa_cee_stats_s) /
+ sizeof(u32)); i++)
+ buffer[i] = cpu_to_be32(buffer[i]);
+ }
+ cee->get_stats_pending = BFA_FALSE;
+ bfa_trc(cee, 0);
+ if (cee->cbfn.get_stats_cbfn) {
+ bfa_trc(cee, 0);
+ cee->cbfn.get_stats_cbfn(cee->cbfn.get_stats_cbarg, status);
+ }
+}
+
+/*
+ * bfa_cee_reset_stats_isr()
+ *
+ * @brief CEE ISR for reset-stats responses from f/w
+ *
+ * @param[in] cee - Pointer to the CEE module
+ * status - Return status from the f/w
+ *
+ * @return void
+ */
+static void
+bfa_cee_reset_stats_isr(struct bfa_cee_s *cee, bfa_status_t status)
+{
+ cee->reset_stats_status = status;
+ cee->reset_stats_pending = BFA_FALSE;
+ if (cee->cbfn.reset_stats_cbfn)
+ cee->cbfn.reset_stats_cbfn(cee->cbfn.reset_stats_cbarg, status);
+}
+
+/*
+ * bfa_cee_meminfo()
+ *
+ * @brief Returns the size of the DMA memory needed by CEE module
+ *
+ * @param[in] void
+ *
+ * @return Size of DMA region
+ */
+u32
+bfa_cee_meminfo(void)
+{
+ return BFA_ROUNDUP(sizeof(struct bfa_cee_attr_s), BFA_DMA_ALIGN_SZ) +
+ BFA_ROUNDUP(sizeof(struct bfa_cee_stats_s), BFA_DMA_ALIGN_SZ);
+}
+
+/*
+ * bfa_cee_mem_claim()
+ *
+ * @brief Initialized CEE DMA Memory
+ *
+ * @param[in] cee CEE module pointer
+ * dma_kva Kernel Virtual Address of CEE DMA Memory
+ * dma_pa Physical Address of CEE DMA Memory
+ *
+ * @return void
+ */
+void
+bfa_cee_mem_claim(struct bfa_cee_s *cee, u8 *dma_kva, u64 dma_pa)
+{
+ cee->attr_dma.kva = dma_kva;
+ cee->attr_dma.pa = dma_pa;
+ cee->stats_dma.kva = dma_kva + BFA_ROUNDUP(
+ sizeof(struct bfa_cee_attr_s), BFA_DMA_ALIGN_SZ);
+ cee->stats_dma.pa = dma_pa + BFA_ROUNDUP(
+ sizeof(struct bfa_cee_attr_s), BFA_DMA_ALIGN_SZ);
+ cee->attr = (struct bfa_cee_attr_s *) dma_kva;
+ cee->stats = (struct bfa_cee_stats_s *) (dma_kva + BFA_ROUNDUP(
+ sizeof(struct bfa_cee_attr_s), BFA_DMA_ALIGN_SZ));
+}
+
+/*
+ * bfa_cee_get_attr()
+ *
+ * @brief
+ * Send the request to the f/w to fetch CEE attributes.
+ *
+ * @param[in] Pointer to the CEE module data structure.
+ *
+ * @return Status
+ */
+
+bfa_status_t
+bfa_cee_get_attr(struct bfa_cee_s *cee, struct bfa_cee_attr_s *attr,
+ bfa_cee_get_attr_cbfn_t cbfn, void *cbarg)
+{
+ struct bfi_cee_get_req_s *cmd;
+
+ WARN_ON((cee == NULL) || (cee->ioc == NULL));
+ bfa_trc(cee, 0);
+ if (!bfa_ioc_is_operational(cee->ioc)) {
+ bfa_trc(cee, 0);
+ return BFA_STATUS_IOC_FAILURE;
+ }
+ if (cee->get_attr_pending == BFA_TRUE) {
+ bfa_trc(cee, 0);
+ return BFA_STATUS_DEVBUSY;
+ }
+ cee->get_attr_pending = BFA_TRUE;
+ cmd = (struct bfi_cee_get_req_s *) cee->get_cfg_mb.msg;
+ cee->attr = attr;
+ cee->cbfn.get_attr_cbfn = cbfn;
+ cee->cbfn.get_attr_cbarg = cbarg;
+ bfi_h2i_set(cmd->mh, BFI_MC_CEE, BFI_CEE_H2I_GET_CFG_REQ,
+ bfa_ioc_portid(cee->ioc));
+ bfa_dma_be_addr_set(cmd->dma_addr, cee->attr_dma.pa);
+ bfa_ioc_mbox_queue(cee->ioc, &cee->get_cfg_mb);
+
+ return BFA_STATUS_OK;
+}
+
+/*
+ * bfa_cee_get_stats()
+ *
+ * @brief
+ * Send the request to the f/w to fetch CEE statistics.
+ *
+ * @param[in] Pointer to the CEE module data structure.
+ *
+ * @return Status
+ */
+
+bfa_status_t
+bfa_cee_get_stats(struct bfa_cee_s *cee, struct bfa_cee_stats_s *stats,
+ bfa_cee_get_stats_cbfn_t cbfn, void *cbarg)
+{
+ struct bfi_cee_get_req_s *cmd;
+
+ WARN_ON((cee == NULL) || (cee->ioc == NULL));
+
+ if (!bfa_ioc_is_operational(cee->ioc)) {
+ bfa_trc(cee, 0);
+ return BFA_STATUS_IOC_FAILURE;
+ }
+ if (cee->get_stats_pending == BFA_TRUE) {
+ bfa_trc(cee, 0);
+ return BFA_STATUS_DEVBUSY;
+ }
+ cee->get_stats_pending = BFA_TRUE;
+ cmd = (struct bfi_cee_get_req_s *) cee->get_stats_mb.msg;
+ cee->stats = stats;
+ cee->cbfn.get_stats_cbfn = cbfn;
+ cee->cbfn.get_stats_cbarg = cbarg;
+ bfi_h2i_set(cmd->mh, BFI_MC_CEE, BFI_CEE_H2I_GET_STATS_REQ,
+ bfa_ioc_portid(cee->ioc));
+ bfa_dma_be_addr_set(cmd->dma_addr, cee->stats_dma.pa);
+ bfa_ioc_mbox_queue(cee->ioc, &cee->get_stats_mb);
+
+ return BFA_STATUS_OK;
+}
+
+/*
+ * bfa_cee_reset_stats()
+ *
+ * @brief Clears CEE Stats in the f/w.
+ *
+ * @param[in] Pointer to the CEE module data structure.
+ *
+ * @return Status
+ */
+
+bfa_status_t
+bfa_cee_reset_stats(struct bfa_cee_s *cee,
+ bfa_cee_reset_stats_cbfn_t cbfn, void *cbarg)
+{
+ struct bfi_cee_reset_stats_s *cmd;
+
+ WARN_ON((cee == NULL) || (cee->ioc == NULL));
+ if (!bfa_ioc_is_operational(cee->ioc)) {
+ bfa_trc(cee, 0);
+ return BFA_STATUS_IOC_FAILURE;
+ }
+ if (cee->reset_stats_pending == BFA_TRUE) {
+ bfa_trc(cee, 0);
+ return BFA_STATUS_DEVBUSY;
+ }
+ cee->reset_stats_pending = BFA_TRUE;
+ cmd = (struct bfi_cee_reset_stats_s *) cee->reset_stats_mb.msg;
+ cee->cbfn.reset_stats_cbfn = cbfn;
+ cee->cbfn.reset_stats_cbarg = cbarg;
+ bfi_h2i_set(cmd->mh, BFI_MC_CEE, BFI_CEE_H2I_RESET_STATS,
+ bfa_ioc_portid(cee->ioc));
+ bfa_ioc_mbox_queue(cee->ioc, &cee->reset_stats_mb);
+
+ return BFA_STATUS_OK;
+}
+
+/*
+ * bfa_cee_isrs()
+ *
+ * @brief Handles Mail-box interrupts for CEE module.
+ *
+ * @param[in] Pointer to the CEE module data structure.
+ *
+ * @return void
+ */
+
+void
+bfa_cee_isr(void *cbarg, struct bfi_mbmsg_s *m)
+{
+ union bfi_cee_i2h_msg_u *msg;
+ struct bfi_cee_get_rsp_s *get_rsp;
+ struct bfa_cee_s *cee = (struct bfa_cee_s *) cbarg;
+ msg = (union bfi_cee_i2h_msg_u *) m;
+ get_rsp = (struct bfi_cee_get_rsp_s *) m;
+ bfa_trc(cee, msg->mh.msg_id);
+ switch (msg->mh.msg_id) {
+ case BFI_CEE_I2H_GET_CFG_RSP:
+ bfa_trc(cee, get_rsp->cmd_status);
+ bfa_cee_get_attr_isr(cee, get_rsp->cmd_status);
+ break;
+ case BFI_CEE_I2H_GET_STATS_RSP:
+ bfa_cee_get_stats_isr(cee, get_rsp->cmd_status);
+ break;
+ case BFI_CEE_I2H_RESET_STATS_RSP:
+ bfa_cee_reset_stats_isr(cee, get_rsp->cmd_status);
+ break;
+ default:
+ WARN_ON(1);
+ }
+}
+
+/*
+ * bfa_cee_notify()
+ *
+ * @brief CEE module IOC event handler.
+ *
+ * @param[in] Pointer to the CEE module data structure.
+ * @param[in] IOC event type
+ *
+ * @return void
+ */
+
+void
+bfa_cee_notify(void *arg, enum bfa_ioc_event_e event)
+{
+ struct bfa_cee_s *cee = (struct bfa_cee_s *) arg;
+
+ bfa_trc(cee, event);
+
+ switch (event) {
+ case BFA_IOC_E_DISABLED:
+ case BFA_IOC_E_FAILED:
+ if (cee->get_attr_pending == BFA_TRUE) {
+ cee->get_attr_status = BFA_STATUS_FAILED;
+ cee->get_attr_pending = BFA_FALSE;
+ if (cee->cbfn.get_attr_cbfn) {
+ cee->cbfn.get_attr_cbfn(
+ cee->cbfn.get_attr_cbarg,
+ BFA_STATUS_FAILED);
+ }
+ }
+ if (cee->get_stats_pending == BFA_TRUE) {
+ cee->get_stats_status = BFA_STATUS_FAILED;
+ cee->get_stats_pending = BFA_FALSE;
+ if (cee->cbfn.get_stats_cbfn) {
+ cee->cbfn.get_stats_cbfn(
+ cee->cbfn.get_stats_cbarg,
+ BFA_STATUS_FAILED);
+ }
+ }
+ if (cee->reset_stats_pending == BFA_TRUE) {
+ cee->reset_stats_status = BFA_STATUS_FAILED;
+ cee->reset_stats_pending = BFA_FALSE;
+ if (cee->cbfn.reset_stats_cbfn) {
+ cee->cbfn.reset_stats_cbfn(
+ cee->cbfn.reset_stats_cbarg,
+ BFA_STATUS_FAILED);
+ }
+ }
+ break;
+
+ default:
+ break;
+ }
+}
+
+/*
+ * bfa_cee_attach()
+ *
+ * @brief CEE module-attach API
+ *
+ * @param[in] cee - Pointer to the CEE module data structure
+ * ioc - Pointer to the ioc module data structure
+ * dev - Pointer to the device driver module data structure
+ * The device driver specific mbox ISR functions have
+ * this pointer as one of the parameters.
+ *
+ * @return void
+ */
+void
+bfa_cee_attach(struct bfa_cee_s *cee, struct bfa_ioc_s *ioc,
+ void *dev)
+{
+ WARN_ON(cee == NULL);
+ cee->dev = dev;
+ cee->ioc = ioc;
+
+ bfa_ioc_mbox_regisr(cee->ioc, BFI_MC_CEE, bfa_cee_isr, cee);
+ bfa_q_qe_init(&cee->ioc_notify);
+ bfa_ioc_notify_init(&cee->ioc_notify, bfa_cee_notify, cee);
+ list_add_tail(&cee->ioc_notify.qe, &cee->ioc->notify_q);
+}
diff --git a/drivers/scsi/bfa/bfa_port.h b/drivers/scsi/bfa/bfa_port.h
new file mode 100644
index 000000000..2fcab6bc6
--- /dev/null
+++ b/drivers/scsi/bfa/bfa_port.h
@@ -0,0 +1,126 @@
+/*
+ * Copyright (c) 2005-2010 Brocade Communications Systems, Inc.
+ * All rights reserved
+ * www.brocade.com
+ *
+ * Linux driver for Brocade Fibre Channel Host Bus Adapter.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License (GPL) Version 2 as
+ * published by the Free Software Foundation
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ */
+
+#ifndef __BFA_PORT_H__
+#define __BFA_PORT_H__
+
+#include "bfa_defs_svc.h"
+#include "bfa_ioc.h"
+#include "bfa_cs.h"
+
+typedef void (*bfa_port_stats_cbfn_t) (void *dev, bfa_status_t status);
+typedef void (*bfa_port_endis_cbfn_t) (void *dev, bfa_status_t status);
+
+struct bfa_port_s {
+ void *dev;
+ struct bfa_ioc_s *ioc;
+ struct bfa_trc_mod_s *trcmod;
+ u32 msgtag;
+ bfa_boolean_t stats_busy;
+ struct bfa_mbox_cmd_s stats_mb;
+ bfa_port_stats_cbfn_t stats_cbfn;
+ void *stats_cbarg;
+ bfa_status_t stats_status;
+ u32 stats_reset_time;
+ union bfa_port_stats_u *stats;
+ struct bfa_dma_s stats_dma;
+ bfa_boolean_t endis_pending;
+ struct bfa_mbox_cmd_s endis_mb;
+ bfa_port_endis_cbfn_t endis_cbfn;
+ void *endis_cbarg;
+ bfa_status_t endis_status;
+ struct bfa_ioc_notify_s ioc_notify;
+ bfa_boolean_t pbc_disabled;
+ bfa_boolean_t dport_enabled;
+ struct bfa_mem_dma_s port_dma;
+};
+
+#define BFA_MEM_PORT_DMA(__bfa) (&((__bfa)->modules.port.port_dma))
+
+void bfa_port_attach(struct bfa_port_s *port, struct bfa_ioc_s *ioc,
+ void *dev, struct bfa_trc_mod_s *trcmod);
+void bfa_port_notify(void *arg, enum bfa_ioc_event_e event);
+
+bfa_status_t bfa_port_get_stats(struct bfa_port_s *port,
+ union bfa_port_stats_u *stats,
+ bfa_port_stats_cbfn_t cbfn, void *cbarg);
+bfa_status_t bfa_port_clear_stats(struct bfa_port_s *port,
+ bfa_port_stats_cbfn_t cbfn, void *cbarg);
+bfa_status_t bfa_port_enable(struct bfa_port_s *port,
+ bfa_port_endis_cbfn_t cbfn, void *cbarg);
+bfa_status_t bfa_port_disable(struct bfa_port_s *port,
+ bfa_port_endis_cbfn_t cbfn, void *cbarg);
+u32 bfa_port_meminfo(void);
+void bfa_port_mem_claim(struct bfa_port_s *port,
+ u8 *dma_kva, u64 dma_pa);
+void bfa_port_set_dportenabled(struct bfa_port_s *port,
+ bfa_boolean_t enabled);
+
+/*
+ * CEE declaration
+ */
+typedef void (*bfa_cee_get_attr_cbfn_t) (void *dev, bfa_status_t status);
+typedef void (*bfa_cee_get_stats_cbfn_t) (void *dev, bfa_status_t status);
+typedef void (*bfa_cee_reset_stats_cbfn_t) (void *dev, bfa_status_t status);
+
+struct bfa_cee_cbfn_s {
+ bfa_cee_get_attr_cbfn_t get_attr_cbfn;
+ void *get_attr_cbarg;
+ bfa_cee_get_stats_cbfn_t get_stats_cbfn;
+ void *get_stats_cbarg;
+ bfa_cee_reset_stats_cbfn_t reset_stats_cbfn;
+ void *reset_stats_cbarg;
+};
+
+struct bfa_cee_s {
+ void *dev;
+ bfa_boolean_t get_attr_pending;
+ bfa_boolean_t get_stats_pending;
+ bfa_boolean_t reset_stats_pending;
+ bfa_status_t get_attr_status;
+ bfa_status_t get_stats_status;
+ bfa_status_t reset_stats_status;
+ struct bfa_cee_cbfn_s cbfn;
+ struct bfa_ioc_notify_s ioc_notify;
+ struct bfa_trc_mod_s *trcmod;
+ struct bfa_cee_attr_s *attr;
+ struct bfa_cee_stats_s *stats;
+ struct bfa_dma_s attr_dma;
+ struct bfa_dma_s stats_dma;
+ struct bfa_ioc_s *ioc;
+ struct bfa_mbox_cmd_s get_cfg_mb;
+ struct bfa_mbox_cmd_s get_stats_mb;
+ struct bfa_mbox_cmd_s reset_stats_mb;
+ struct bfa_mem_dma_s cee_dma;
+};
+
+#define BFA_MEM_CEE_DMA(__bfa) (&((__bfa)->modules.cee.cee_dma))
+
+u32 bfa_cee_meminfo(void);
+void bfa_cee_mem_claim(struct bfa_cee_s *cee, u8 *dma_kva, u64 dma_pa);
+void bfa_cee_attach(struct bfa_cee_s *cee,
+ struct bfa_ioc_s *ioc, void *dev);
+bfa_status_t bfa_cee_get_attr(struct bfa_cee_s *cee,
+ struct bfa_cee_attr_s *attr,
+ bfa_cee_get_attr_cbfn_t cbfn, void *cbarg);
+bfa_status_t bfa_cee_get_stats(struct bfa_cee_s *cee,
+ struct bfa_cee_stats_s *stats,
+ bfa_cee_get_stats_cbfn_t cbfn, void *cbarg);
+bfa_status_t bfa_cee_reset_stats(struct bfa_cee_s *cee,
+ bfa_cee_reset_stats_cbfn_t cbfn, void *cbarg);
+
+#endif /* __BFA_PORT_H__ */
diff --git a/drivers/scsi/bfa/bfa_svc.c b/drivers/scsi/bfa/bfa_svc.c
new file mode 100644
index 000000000..625225f31
--- /dev/null
+++ b/drivers/scsi/bfa/bfa_svc.c
@@ -0,0 +1,7062 @@
+/*
+ * Copyright (c) 2005-2010 Brocade Communications Systems, Inc.
+ * All rights reserved
+ * www.brocade.com
+ *
+ * Linux driver for Brocade Fibre Channel Host Bus Adapter.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License (GPL) Version 2 as
+ * published by the Free Software Foundation
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ */
+
+#include "bfad_drv.h"
+#include "bfad_im.h"
+#include "bfa_plog.h"
+#include "bfa_cs.h"
+#include "bfa_modules.h"
+
+BFA_TRC_FILE(HAL, FCXP);
+BFA_MODULE(fcdiag);
+BFA_MODULE(fcxp);
+BFA_MODULE(sgpg);
+BFA_MODULE(lps);
+BFA_MODULE(fcport);
+BFA_MODULE(rport);
+BFA_MODULE(uf);
+
+/*
+ * LPS related definitions
+ */
+#define BFA_LPS_MIN_LPORTS (1)
+#define BFA_LPS_MAX_LPORTS (256)
+
+/*
+ * Maximum Vports supported per physical port or vf.
+ */
+#define BFA_LPS_MAX_VPORTS_SUPP_CB 255
+#define BFA_LPS_MAX_VPORTS_SUPP_CT 190
+
+
+/*
+ * FC PORT related definitions
+ */
+/*
+ * The port is considered disabled if corresponding physical port or IOC are
+ * disabled explicitly
+ */
+#define BFA_PORT_IS_DISABLED(bfa) \
+ ((bfa_fcport_is_disabled(bfa) == BFA_TRUE) || \
+ (bfa_ioc_is_disabled(&bfa->ioc) == BFA_TRUE))
+
+/*
+ * BFA port state machine events
+ */
+enum bfa_fcport_sm_event {
+ BFA_FCPORT_SM_START = 1, /* start port state machine */
+ BFA_FCPORT_SM_STOP = 2, /* stop port state machine */
+ BFA_FCPORT_SM_ENABLE = 3, /* enable port */
+ BFA_FCPORT_SM_DISABLE = 4, /* disable port state machine */
+ BFA_FCPORT_SM_FWRSP = 5, /* firmware enable/disable rsp */
+ BFA_FCPORT_SM_LINKUP = 6, /* firmware linkup event */
+ BFA_FCPORT_SM_LINKDOWN = 7, /* firmware linkup down */
+ BFA_FCPORT_SM_QRESUME = 8, /* CQ space available */
+ BFA_FCPORT_SM_HWFAIL = 9, /* IOC h/w failure */
+ BFA_FCPORT_SM_DPORTENABLE = 10, /* enable dport */
+ BFA_FCPORT_SM_DPORTDISABLE = 11,/* disable dport */
+ BFA_FCPORT_SM_FAA_MISCONFIG = 12, /* FAA misconfiguratin */
+ BFA_FCPORT_SM_DDPORTENABLE = 13, /* enable ddport */
+ BFA_FCPORT_SM_DDPORTDISABLE = 14, /* disable ddport */
+};
+
+/*
+ * BFA port link notification state machine events
+ */
+
+enum bfa_fcport_ln_sm_event {
+ BFA_FCPORT_LN_SM_LINKUP = 1, /* linkup event */
+ BFA_FCPORT_LN_SM_LINKDOWN = 2, /* linkdown event */
+ BFA_FCPORT_LN_SM_NOTIFICATION = 3 /* done notification */
+};
+
+/*
+ * RPORT related definitions
+ */
+#define bfa_rport_offline_cb(__rp) do { \
+ if ((__rp)->bfa->fcs) \
+ bfa_cb_rport_offline((__rp)->rport_drv); \
+ else { \
+ bfa_cb_queue((__rp)->bfa, &(__rp)->hcb_qe, \
+ __bfa_cb_rport_offline, (__rp)); \
+ } \
+} while (0)
+
+#define bfa_rport_online_cb(__rp) do { \
+ if ((__rp)->bfa->fcs) \
+ bfa_cb_rport_online((__rp)->rport_drv); \
+ else { \
+ bfa_cb_queue((__rp)->bfa, &(__rp)->hcb_qe, \
+ __bfa_cb_rport_online, (__rp)); \
+ } \
+} while (0)
+
+/*
+ * forward declarations FCXP related functions
+ */
+static void __bfa_fcxp_send_cbfn(void *cbarg, bfa_boolean_t complete);
+static void hal_fcxp_rx_plog(struct bfa_s *bfa, struct bfa_fcxp_s *fcxp,
+ struct bfi_fcxp_send_rsp_s *fcxp_rsp);
+static void hal_fcxp_tx_plog(struct bfa_s *bfa, u32 reqlen,
+ struct bfa_fcxp_s *fcxp, struct fchs_s *fchs);
+static void bfa_fcxp_qresume(void *cbarg);
+static void bfa_fcxp_queue(struct bfa_fcxp_s *fcxp,
+ struct bfi_fcxp_send_req_s *send_req);
+
+/*
+ * forward declarations for LPS functions
+ */
+static void bfa_lps_meminfo(struct bfa_iocfc_cfg_s *cfg,
+ struct bfa_meminfo_s *minfo, struct bfa_s *bfa);
+static void bfa_lps_attach(struct bfa_s *bfa, void *bfad,
+ struct bfa_iocfc_cfg_s *cfg,
+ struct bfa_pcidev_s *pcidev);
+static void bfa_lps_detach(struct bfa_s *bfa);
+static void bfa_lps_start(struct bfa_s *bfa);
+static void bfa_lps_stop(struct bfa_s *bfa);
+static void bfa_lps_iocdisable(struct bfa_s *bfa);
+static void bfa_lps_login_rsp(struct bfa_s *bfa,
+ struct bfi_lps_login_rsp_s *rsp);
+static void bfa_lps_no_res(struct bfa_lps_s *first_lps, u8 count);
+static void bfa_lps_logout_rsp(struct bfa_s *bfa,
+ struct bfi_lps_logout_rsp_s *rsp);
+static void bfa_lps_reqq_resume(void *lps_arg);
+static void bfa_lps_free(struct bfa_lps_s *lps);
+static void bfa_lps_send_login(struct bfa_lps_s *lps);
+static void bfa_lps_send_logout(struct bfa_lps_s *lps);
+static void bfa_lps_send_set_n2n_pid(struct bfa_lps_s *lps);
+static void bfa_lps_login_comp(struct bfa_lps_s *lps);
+static void bfa_lps_logout_comp(struct bfa_lps_s *lps);
+static void bfa_lps_cvl_event(struct bfa_lps_s *lps);
+
+/*
+ * forward declaration for LPS state machine
+ */
+static void bfa_lps_sm_init(struct bfa_lps_s *lps, enum bfa_lps_event event);
+static void bfa_lps_sm_login(struct bfa_lps_s *lps, enum bfa_lps_event event);
+static void bfa_lps_sm_loginwait(struct bfa_lps_s *lps, enum bfa_lps_event
+ event);
+static void bfa_lps_sm_online(struct bfa_lps_s *lps, enum bfa_lps_event event);
+static void bfa_lps_sm_online_n2n_pid_wait(struct bfa_lps_s *lps,
+ enum bfa_lps_event event);
+static void bfa_lps_sm_logout(struct bfa_lps_s *lps, enum bfa_lps_event event);
+static void bfa_lps_sm_logowait(struct bfa_lps_s *lps, enum bfa_lps_event
+ event);
+
+/*
+ * forward declaration for FC Port functions
+ */
+static bfa_boolean_t bfa_fcport_send_enable(struct bfa_fcport_s *fcport);
+static bfa_boolean_t bfa_fcport_send_disable(struct bfa_fcport_s *fcport);
+static void bfa_fcport_update_linkinfo(struct bfa_fcport_s *fcport);
+static void bfa_fcport_reset_linkinfo(struct bfa_fcport_s *fcport);
+static void bfa_fcport_set_wwns(struct bfa_fcport_s *fcport);
+static void __bfa_cb_fcport_event(void *cbarg, bfa_boolean_t complete);
+static void bfa_fcport_scn(struct bfa_fcport_s *fcport,
+ enum bfa_port_linkstate event, bfa_boolean_t trunk);
+static void bfa_fcport_queue_cb(struct bfa_fcport_ln_s *ln,
+ enum bfa_port_linkstate event);
+static void __bfa_cb_fcport_stats_clr(void *cbarg, bfa_boolean_t complete);
+static void bfa_fcport_stats_get_timeout(void *cbarg);
+static void bfa_fcport_stats_clr_timeout(void *cbarg);
+static void bfa_trunk_iocdisable(struct bfa_s *bfa);
+
+/*
+ * forward declaration for FC PORT state machine
+ */
+static void bfa_fcport_sm_uninit(struct bfa_fcport_s *fcport,
+ enum bfa_fcport_sm_event event);
+static void bfa_fcport_sm_enabling_qwait(struct bfa_fcport_s *fcport,
+ enum bfa_fcport_sm_event event);
+static void bfa_fcport_sm_enabling(struct bfa_fcport_s *fcport,
+ enum bfa_fcport_sm_event event);
+static void bfa_fcport_sm_linkdown(struct bfa_fcport_s *fcport,
+ enum bfa_fcport_sm_event event);
+static void bfa_fcport_sm_linkup(struct bfa_fcport_s *fcport,
+ enum bfa_fcport_sm_event event);
+static void bfa_fcport_sm_disabling(struct bfa_fcport_s *fcport,
+ enum bfa_fcport_sm_event event);
+static void bfa_fcport_sm_disabling_qwait(struct bfa_fcport_s *fcport,
+ enum bfa_fcport_sm_event event);
+static void bfa_fcport_sm_toggling_qwait(struct bfa_fcport_s *fcport,
+ enum bfa_fcport_sm_event event);
+static void bfa_fcport_sm_disabled(struct bfa_fcport_s *fcport,
+ enum bfa_fcport_sm_event event);
+static void bfa_fcport_sm_stopped(struct bfa_fcport_s *fcport,
+ enum bfa_fcport_sm_event event);
+static void bfa_fcport_sm_iocdown(struct bfa_fcport_s *fcport,
+ enum bfa_fcport_sm_event event);
+static void bfa_fcport_sm_iocfail(struct bfa_fcport_s *fcport,
+ enum bfa_fcport_sm_event event);
+static void bfa_fcport_sm_dport(struct bfa_fcport_s *fcport,
+ enum bfa_fcport_sm_event event);
+static void bfa_fcport_sm_ddport(struct bfa_fcport_s *fcport,
+ enum bfa_fcport_sm_event event);
+static void bfa_fcport_sm_faa_misconfig(struct bfa_fcport_s *fcport,
+ enum bfa_fcport_sm_event event);
+
+static void bfa_fcport_ln_sm_dn(struct bfa_fcport_ln_s *ln,
+ enum bfa_fcport_ln_sm_event event);
+static void bfa_fcport_ln_sm_dn_nf(struct bfa_fcport_ln_s *ln,
+ enum bfa_fcport_ln_sm_event event);
+static void bfa_fcport_ln_sm_dn_up_nf(struct bfa_fcport_ln_s *ln,
+ enum bfa_fcport_ln_sm_event event);
+static void bfa_fcport_ln_sm_up(struct bfa_fcport_ln_s *ln,
+ enum bfa_fcport_ln_sm_event event);
+static void bfa_fcport_ln_sm_up_nf(struct bfa_fcport_ln_s *ln,
+ enum bfa_fcport_ln_sm_event event);
+static void bfa_fcport_ln_sm_up_dn_nf(struct bfa_fcport_ln_s *ln,
+ enum bfa_fcport_ln_sm_event event);
+static void bfa_fcport_ln_sm_up_dn_up_nf(struct bfa_fcport_ln_s *ln,
+ enum bfa_fcport_ln_sm_event event);
+
+static struct bfa_sm_table_s hal_port_sm_table[] = {
+ {BFA_SM(bfa_fcport_sm_uninit), BFA_PORT_ST_UNINIT},
+ {BFA_SM(bfa_fcport_sm_enabling_qwait), BFA_PORT_ST_ENABLING_QWAIT},
+ {BFA_SM(bfa_fcport_sm_enabling), BFA_PORT_ST_ENABLING},
+ {BFA_SM(bfa_fcport_sm_linkdown), BFA_PORT_ST_LINKDOWN},
+ {BFA_SM(bfa_fcport_sm_linkup), BFA_PORT_ST_LINKUP},
+ {BFA_SM(bfa_fcport_sm_disabling_qwait), BFA_PORT_ST_DISABLING_QWAIT},
+ {BFA_SM(bfa_fcport_sm_toggling_qwait), BFA_PORT_ST_TOGGLING_QWAIT},
+ {BFA_SM(bfa_fcport_sm_disabling), BFA_PORT_ST_DISABLING},
+ {BFA_SM(bfa_fcport_sm_disabled), BFA_PORT_ST_DISABLED},
+ {BFA_SM(bfa_fcport_sm_stopped), BFA_PORT_ST_STOPPED},
+ {BFA_SM(bfa_fcport_sm_iocdown), BFA_PORT_ST_IOCDOWN},
+ {BFA_SM(bfa_fcport_sm_iocfail), BFA_PORT_ST_IOCDOWN},
+ {BFA_SM(bfa_fcport_sm_dport), BFA_PORT_ST_DPORT},
+ {BFA_SM(bfa_fcport_sm_ddport), BFA_PORT_ST_DDPORT},
+ {BFA_SM(bfa_fcport_sm_faa_misconfig), BFA_PORT_ST_FAA_MISCONFIG},
+};
+
+
+/*
+ * forward declaration for RPORT related functions
+ */
+static struct bfa_rport_s *bfa_rport_alloc(struct bfa_rport_mod_s *rp_mod);
+static void bfa_rport_free(struct bfa_rport_s *rport);
+static bfa_boolean_t bfa_rport_send_fwcreate(struct bfa_rport_s *rp);
+static bfa_boolean_t bfa_rport_send_fwdelete(struct bfa_rport_s *rp);
+static bfa_boolean_t bfa_rport_send_fwspeed(struct bfa_rport_s *rp);
+static void __bfa_cb_rport_online(void *cbarg,
+ bfa_boolean_t complete);
+static void __bfa_cb_rport_offline(void *cbarg,
+ bfa_boolean_t complete);
+
+/*
+ * forward declaration for RPORT state machine
+ */
+static void bfa_rport_sm_uninit(struct bfa_rport_s *rp,
+ enum bfa_rport_event event);
+static void bfa_rport_sm_created(struct bfa_rport_s *rp,
+ enum bfa_rport_event event);
+static void bfa_rport_sm_fwcreate(struct bfa_rport_s *rp,
+ enum bfa_rport_event event);
+static void bfa_rport_sm_online(struct bfa_rport_s *rp,
+ enum bfa_rport_event event);
+static void bfa_rport_sm_fwdelete(struct bfa_rport_s *rp,
+ enum bfa_rport_event event);
+static void bfa_rport_sm_offline(struct bfa_rport_s *rp,
+ enum bfa_rport_event event);
+static void bfa_rport_sm_deleting(struct bfa_rport_s *rp,
+ enum bfa_rport_event event);
+static void bfa_rport_sm_offline_pending(struct bfa_rport_s *rp,
+ enum bfa_rport_event event);
+static void bfa_rport_sm_delete_pending(struct bfa_rport_s *rp,
+ enum bfa_rport_event event);
+static void bfa_rport_sm_iocdisable(struct bfa_rport_s *rp,
+ enum bfa_rport_event event);
+static void bfa_rport_sm_fwcreate_qfull(struct bfa_rport_s *rp,
+ enum bfa_rport_event event);
+static void bfa_rport_sm_fwdelete_qfull(struct bfa_rport_s *rp,
+ enum bfa_rport_event event);
+static void bfa_rport_sm_deleting_qfull(struct bfa_rport_s *rp,
+ enum bfa_rport_event event);
+
+/*
+ * PLOG related definitions
+ */
+static int
+plkd_validate_logrec(struct bfa_plog_rec_s *pl_rec)
+{
+ if ((pl_rec->log_type != BFA_PL_LOG_TYPE_INT) &&
+ (pl_rec->log_type != BFA_PL_LOG_TYPE_STRING))
+ return 1;
+
+ if ((pl_rec->log_type != BFA_PL_LOG_TYPE_INT) &&
+ (pl_rec->log_num_ints > BFA_PL_INT_LOG_SZ))
+ return 1;
+
+ return 0;
+}
+
+static u64
+bfa_get_log_time(void)
+{
+ u64 system_time = 0;
+ struct timeval tv;
+ do_gettimeofday(&tv);
+
+ /* We are interested in seconds only. */
+ system_time = tv.tv_sec;
+ return system_time;
+}
+
+static void
+bfa_plog_add(struct bfa_plog_s *plog, struct bfa_plog_rec_s *pl_rec)
+{
+ u16 tail;
+ struct bfa_plog_rec_s *pl_recp;
+
+ if (plog->plog_enabled == 0)
+ return;
+
+ if (plkd_validate_logrec(pl_rec)) {
+ WARN_ON(1);
+ return;
+ }
+
+ tail = plog->tail;
+
+ pl_recp = &(plog->plog_recs[tail]);
+
+ memcpy(pl_recp, pl_rec, sizeof(struct bfa_plog_rec_s));
+
+ pl_recp->tv = bfa_get_log_time();
+ BFA_PL_LOG_REC_INCR(plog->tail);
+
+ if (plog->head == plog->tail)
+ BFA_PL_LOG_REC_INCR(plog->head);
+}
+
+void
+bfa_plog_init(struct bfa_plog_s *plog)
+{
+ memset((char *)plog, 0, sizeof(struct bfa_plog_s));
+
+ memcpy(plog->plog_sig, BFA_PL_SIG_STR, BFA_PL_SIG_LEN);
+ plog->head = plog->tail = 0;
+ plog->plog_enabled = 1;
+}
+
+void
+bfa_plog_str(struct bfa_plog_s *plog, enum bfa_plog_mid mid,
+ enum bfa_plog_eid event,
+ u16 misc, char *log_str)
+{
+ struct bfa_plog_rec_s lp;
+
+ if (plog->plog_enabled) {
+ memset(&lp, 0, sizeof(struct bfa_plog_rec_s));
+ lp.mid = mid;
+ lp.eid = event;
+ lp.log_type = BFA_PL_LOG_TYPE_STRING;
+ lp.misc = misc;
+ strncpy(lp.log_entry.string_log, log_str,
+ BFA_PL_STRING_LOG_SZ - 1);
+ lp.log_entry.string_log[BFA_PL_STRING_LOG_SZ - 1] = '\0';
+ bfa_plog_add(plog, &lp);
+ }
+}
+
+void
+bfa_plog_intarr(struct bfa_plog_s *plog, enum bfa_plog_mid mid,
+ enum bfa_plog_eid event,
+ u16 misc, u32 *intarr, u32 num_ints)
+{
+ struct bfa_plog_rec_s lp;
+ u32 i;
+
+ if (num_ints > BFA_PL_INT_LOG_SZ)
+ num_ints = BFA_PL_INT_LOG_SZ;
+
+ if (plog->plog_enabled) {
+ memset(&lp, 0, sizeof(struct bfa_plog_rec_s));
+ lp.mid = mid;
+ lp.eid = event;
+ lp.log_type = BFA_PL_LOG_TYPE_INT;
+ lp.misc = misc;
+
+ for (i = 0; i < num_ints; i++)
+ lp.log_entry.int_log[i] = intarr[i];
+
+ lp.log_num_ints = (u8) num_ints;
+
+ bfa_plog_add(plog, &lp);
+ }
+}
+
+void
+bfa_plog_fchdr(struct bfa_plog_s *plog, enum bfa_plog_mid mid,
+ enum bfa_plog_eid event,
+ u16 misc, struct fchs_s *fchdr)
+{
+ struct bfa_plog_rec_s lp;
+ u32 *tmp_int = (u32 *) fchdr;
+ u32 ints[BFA_PL_INT_LOG_SZ];
+
+ if (plog->plog_enabled) {
+ memset(&lp, 0, sizeof(struct bfa_plog_rec_s));
+
+ ints[0] = tmp_int[0];
+ ints[1] = tmp_int[1];
+ ints[2] = tmp_int[4];
+
+ bfa_plog_intarr(plog, mid, event, misc, ints, 3);
+ }
+}
+
+void
+bfa_plog_fchdr_and_pl(struct bfa_plog_s *plog, enum bfa_plog_mid mid,
+ enum bfa_plog_eid event, u16 misc, struct fchs_s *fchdr,
+ u32 pld_w0)
+{
+ struct bfa_plog_rec_s lp;
+ u32 *tmp_int = (u32 *) fchdr;
+ u32 ints[BFA_PL_INT_LOG_SZ];
+
+ if (plog->plog_enabled) {
+ memset(&lp, 0, sizeof(struct bfa_plog_rec_s));
+
+ ints[0] = tmp_int[0];
+ ints[1] = tmp_int[1];
+ ints[2] = tmp_int[4];
+ ints[3] = pld_w0;
+
+ bfa_plog_intarr(plog, mid, event, misc, ints, 4);
+ }
+}
+
+
+/*
+ * fcxp_pvt BFA FCXP private functions
+ */
+
+static void
+claim_fcxps_mem(struct bfa_fcxp_mod_s *mod)
+{
+ u16 i;
+ struct bfa_fcxp_s *fcxp;
+
+ fcxp = (struct bfa_fcxp_s *) bfa_mem_kva_curp(mod);
+ memset(fcxp, 0, sizeof(struct bfa_fcxp_s) * mod->num_fcxps);
+
+ INIT_LIST_HEAD(&mod->fcxp_req_free_q);
+ INIT_LIST_HEAD(&mod->fcxp_rsp_free_q);
+ INIT_LIST_HEAD(&mod->fcxp_active_q);
+ INIT_LIST_HEAD(&mod->fcxp_req_unused_q);
+ INIT_LIST_HEAD(&mod->fcxp_rsp_unused_q);
+
+ mod->fcxp_list = fcxp;
+
+ for (i = 0; i < mod->num_fcxps; i++) {
+ fcxp->fcxp_mod = mod;
+ fcxp->fcxp_tag = i;
+
+ if (i < (mod->num_fcxps / 2)) {
+ list_add_tail(&fcxp->qe, &mod->fcxp_req_free_q);
+ fcxp->req_rsp = BFA_TRUE;
+ } else {
+ list_add_tail(&fcxp->qe, &mod->fcxp_rsp_free_q);
+ fcxp->req_rsp = BFA_FALSE;
+ }
+
+ bfa_reqq_winit(&fcxp->reqq_wqe, bfa_fcxp_qresume, fcxp);
+ fcxp->reqq_waiting = BFA_FALSE;
+
+ fcxp = fcxp + 1;
+ }
+
+ bfa_mem_kva_curp(mod) = (void *)fcxp;
+}
+
+static void
+bfa_fcxp_meminfo(struct bfa_iocfc_cfg_s *cfg, struct bfa_meminfo_s *minfo,
+ struct bfa_s *bfa)
+{
+ struct bfa_fcxp_mod_s *fcxp_mod = BFA_FCXP_MOD(bfa);
+ struct bfa_mem_kva_s *fcxp_kva = BFA_MEM_FCXP_KVA(bfa);
+ struct bfa_mem_dma_s *seg_ptr;
+ u16 nsegs, idx, per_seg_fcxp;
+ u16 num_fcxps = cfg->fwcfg.num_fcxp_reqs;
+ u32 per_fcxp_sz;
+
+ if (num_fcxps == 0)
+ return;
+
+ if (cfg->drvcfg.min_cfg)
+ per_fcxp_sz = 2 * BFA_FCXP_MAX_IBUF_SZ;
+ else
+ per_fcxp_sz = BFA_FCXP_MAX_IBUF_SZ + BFA_FCXP_MAX_LBUF_SZ;
+
+ /* dma memory */
+ nsegs = BFI_MEM_DMA_NSEGS(num_fcxps, per_fcxp_sz);
+ per_seg_fcxp = BFI_MEM_NREQS_SEG(per_fcxp_sz);
+
+ bfa_mem_dma_seg_iter(fcxp_mod, seg_ptr, nsegs, idx) {
+ if (num_fcxps >= per_seg_fcxp) {
+ num_fcxps -= per_seg_fcxp;
+ bfa_mem_dma_setup(minfo, seg_ptr,
+ per_seg_fcxp * per_fcxp_sz);
+ } else
+ bfa_mem_dma_setup(minfo, seg_ptr,
+ num_fcxps * per_fcxp_sz);
+ }
+
+ /* kva memory */
+ bfa_mem_kva_setup(minfo, fcxp_kva,
+ cfg->fwcfg.num_fcxp_reqs * sizeof(struct bfa_fcxp_s));
+}
+
+static void
+bfa_fcxp_attach(struct bfa_s *bfa, void *bfad, struct bfa_iocfc_cfg_s *cfg,
+ struct bfa_pcidev_s *pcidev)
+{
+ struct bfa_fcxp_mod_s *mod = BFA_FCXP_MOD(bfa);
+
+ mod->bfa = bfa;
+ mod->num_fcxps = cfg->fwcfg.num_fcxp_reqs;
+
+ /*
+ * Initialize FCXP request and response payload sizes.
+ */
+ mod->req_pld_sz = mod->rsp_pld_sz = BFA_FCXP_MAX_IBUF_SZ;
+ if (!cfg->drvcfg.min_cfg)
+ mod->rsp_pld_sz = BFA_FCXP_MAX_LBUF_SZ;
+
+ INIT_LIST_HEAD(&mod->req_wait_q);
+ INIT_LIST_HEAD(&mod->rsp_wait_q);
+
+ claim_fcxps_mem(mod);
+}
+
+static void
+bfa_fcxp_detach(struct bfa_s *bfa)
+{
+}
+
+static void
+bfa_fcxp_start(struct bfa_s *bfa)
+{
+}
+
+static void
+bfa_fcxp_stop(struct bfa_s *bfa)
+{
+}
+
+static void
+bfa_fcxp_iocdisable(struct bfa_s *bfa)
+{
+ struct bfa_fcxp_mod_s *mod = BFA_FCXP_MOD(bfa);
+ struct bfa_fcxp_s *fcxp;
+ struct list_head *qe, *qen;
+
+ /* Enqueue unused fcxp resources to free_q */
+ list_splice_tail_init(&mod->fcxp_req_unused_q, &mod->fcxp_req_free_q);
+ list_splice_tail_init(&mod->fcxp_rsp_unused_q, &mod->fcxp_rsp_free_q);
+
+ list_for_each_safe(qe, qen, &mod->fcxp_active_q) {
+ fcxp = (struct bfa_fcxp_s *) qe;
+ if (fcxp->caller == NULL) {
+ fcxp->send_cbfn(fcxp->caller, fcxp, fcxp->send_cbarg,
+ BFA_STATUS_IOC_FAILURE, 0, 0, NULL);
+ bfa_fcxp_free(fcxp);
+ } else {
+ fcxp->rsp_status = BFA_STATUS_IOC_FAILURE;
+ bfa_cb_queue(bfa, &fcxp->hcb_qe,
+ __bfa_fcxp_send_cbfn, fcxp);
+ }
+ }
+}
+
+static struct bfa_fcxp_s *
+bfa_fcxp_get(struct bfa_fcxp_mod_s *fm, bfa_boolean_t req)
+{
+ struct bfa_fcxp_s *fcxp;
+
+ if (req)
+ bfa_q_deq(&fm->fcxp_req_free_q, &fcxp);
+ else
+ bfa_q_deq(&fm->fcxp_rsp_free_q, &fcxp);
+
+ if (fcxp)
+ list_add_tail(&fcxp->qe, &fm->fcxp_active_q);
+
+ return fcxp;
+}
+
+static void
+bfa_fcxp_init_reqrsp(struct bfa_fcxp_s *fcxp,
+ struct bfa_s *bfa,
+ u8 *use_ibuf,
+ u32 *nr_sgles,
+ bfa_fcxp_get_sgaddr_t *r_sga_cbfn,
+ bfa_fcxp_get_sglen_t *r_sglen_cbfn,
+ struct list_head *r_sgpg_q,
+ int n_sgles,
+ bfa_fcxp_get_sgaddr_t sga_cbfn,
+ bfa_fcxp_get_sglen_t sglen_cbfn)
+{
+
+ WARN_ON(bfa == NULL);
+
+ bfa_trc(bfa, fcxp->fcxp_tag);
+
+ if (n_sgles == 0) {
+ *use_ibuf = 1;
+ } else {
+ WARN_ON(*sga_cbfn == NULL);
+ WARN_ON(*sglen_cbfn == NULL);
+
+ *use_ibuf = 0;
+ *r_sga_cbfn = sga_cbfn;
+ *r_sglen_cbfn = sglen_cbfn;
+
+ *nr_sgles = n_sgles;
+
+ /*
+ * alloc required sgpgs
+ */
+ if (n_sgles > BFI_SGE_INLINE)
+ WARN_ON(1);
+ }
+
+}
+
+static void
+bfa_fcxp_init(struct bfa_fcxp_s *fcxp,
+ void *caller, struct bfa_s *bfa, int nreq_sgles,
+ int nrsp_sgles, bfa_fcxp_get_sgaddr_t req_sga_cbfn,
+ bfa_fcxp_get_sglen_t req_sglen_cbfn,
+ bfa_fcxp_get_sgaddr_t rsp_sga_cbfn,
+ bfa_fcxp_get_sglen_t rsp_sglen_cbfn)
+{
+
+ WARN_ON(bfa == NULL);
+
+ bfa_trc(bfa, fcxp->fcxp_tag);
+
+ fcxp->caller = caller;
+
+ bfa_fcxp_init_reqrsp(fcxp, bfa,
+ &fcxp->use_ireqbuf, &fcxp->nreq_sgles, &fcxp->req_sga_cbfn,
+ &fcxp->req_sglen_cbfn, &fcxp->req_sgpg_q,
+ nreq_sgles, req_sga_cbfn, req_sglen_cbfn);
+
+ bfa_fcxp_init_reqrsp(fcxp, bfa,
+ &fcxp->use_irspbuf, &fcxp->nrsp_sgles, &fcxp->rsp_sga_cbfn,
+ &fcxp->rsp_sglen_cbfn, &fcxp->rsp_sgpg_q,
+ nrsp_sgles, rsp_sga_cbfn, rsp_sglen_cbfn);
+
+}
+
+static void
+bfa_fcxp_put(struct bfa_fcxp_s *fcxp)
+{
+ struct bfa_fcxp_mod_s *mod = fcxp->fcxp_mod;
+ struct bfa_fcxp_wqe_s *wqe;
+
+ if (fcxp->req_rsp)
+ bfa_q_deq(&mod->req_wait_q, &wqe);
+ else
+ bfa_q_deq(&mod->rsp_wait_q, &wqe);
+
+ if (wqe) {
+ bfa_trc(mod->bfa, fcxp->fcxp_tag);
+
+ bfa_fcxp_init(fcxp, wqe->caller, wqe->bfa, wqe->nreq_sgles,
+ wqe->nrsp_sgles, wqe->req_sga_cbfn,
+ wqe->req_sglen_cbfn, wqe->rsp_sga_cbfn,
+ wqe->rsp_sglen_cbfn);
+
+ wqe->alloc_cbfn(wqe->alloc_cbarg, fcxp);
+ return;
+ }
+
+ WARN_ON(!bfa_q_is_on_q(&mod->fcxp_active_q, fcxp));
+ list_del(&fcxp->qe);
+
+ if (fcxp->req_rsp)
+ list_add_tail(&fcxp->qe, &mod->fcxp_req_free_q);
+ else
+ list_add_tail(&fcxp->qe, &mod->fcxp_rsp_free_q);
+}
+
+static void
+bfa_fcxp_null_comp(void *bfad_fcxp, struct bfa_fcxp_s *fcxp, void *cbarg,
+ bfa_status_t req_status, u32 rsp_len,
+ u32 resid_len, struct fchs_s *rsp_fchs)
+{
+ /* discarded fcxp completion */
+}
+
+static void
+__bfa_fcxp_send_cbfn(void *cbarg, bfa_boolean_t complete)
+{
+ struct bfa_fcxp_s *fcxp = cbarg;
+
+ if (complete) {
+ fcxp->send_cbfn(fcxp->caller, fcxp, fcxp->send_cbarg,
+ fcxp->rsp_status, fcxp->rsp_len,
+ fcxp->residue_len, &fcxp->rsp_fchs);
+ } else {
+ bfa_fcxp_free(fcxp);
+ }
+}
+
+static void
+hal_fcxp_send_comp(struct bfa_s *bfa, struct bfi_fcxp_send_rsp_s *fcxp_rsp)
+{
+ struct bfa_fcxp_mod_s *mod = BFA_FCXP_MOD(bfa);
+ struct bfa_fcxp_s *fcxp;
+ u16 fcxp_tag = be16_to_cpu(fcxp_rsp->fcxp_tag);
+
+ bfa_trc(bfa, fcxp_tag);
+
+ fcxp_rsp->rsp_len = be32_to_cpu(fcxp_rsp->rsp_len);
+
+ /*
+ * @todo f/w should not set residue to non-0 when everything
+ * is received.
+ */
+ if (fcxp_rsp->req_status == BFA_STATUS_OK)
+ fcxp_rsp->residue_len = 0;
+ else
+ fcxp_rsp->residue_len = be32_to_cpu(fcxp_rsp->residue_len);
+
+ fcxp = BFA_FCXP_FROM_TAG(mod, fcxp_tag);
+
+ WARN_ON(fcxp->send_cbfn == NULL);
+
+ hal_fcxp_rx_plog(mod->bfa, fcxp, fcxp_rsp);
+
+ if (fcxp->send_cbfn != NULL) {
+ bfa_trc(mod->bfa, (NULL == fcxp->caller));
+ if (fcxp->caller == NULL) {
+ fcxp->send_cbfn(fcxp->caller, fcxp, fcxp->send_cbarg,
+ fcxp_rsp->req_status, fcxp_rsp->rsp_len,
+ fcxp_rsp->residue_len, &fcxp_rsp->fchs);
+ /*
+ * fcxp automatically freed on return from the callback
+ */
+ bfa_fcxp_free(fcxp);
+ } else {
+ fcxp->rsp_status = fcxp_rsp->req_status;
+ fcxp->rsp_len = fcxp_rsp->rsp_len;
+ fcxp->residue_len = fcxp_rsp->residue_len;
+ fcxp->rsp_fchs = fcxp_rsp->fchs;
+
+ bfa_cb_queue(bfa, &fcxp->hcb_qe,
+ __bfa_fcxp_send_cbfn, fcxp);
+ }
+ } else {
+ bfa_trc(bfa, (NULL == fcxp->send_cbfn));
+ }
+}
+
+static void
+hal_fcxp_tx_plog(struct bfa_s *bfa, u32 reqlen, struct bfa_fcxp_s *fcxp,
+ struct fchs_s *fchs)
+{
+ /*
+ * TODO: TX ox_id
+ */
+ if (reqlen > 0) {
+ if (fcxp->use_ireqbuf) {
+ u32 pld_w0 =
+ *((u32 *) BFA_FCXP_REQ_PLD(fcxp));
+
+ bfa_plog_fchdr_and_pl(bfa->plog, BFA_PL_MID_HAL_FCXP,
+ BFA_PL_EID_TX,
+ reqlen + sizeof(struct fchs_s), fchs,
+ pld_w0);
+ } else {
+ bfa_plog_fchdr(bfa->plog, BFA_PL_MID_HAL_FCXP,
+ BFA_PL_EID_TX,
+ reqlen + sizeof(struct fchs_s),
+ fchs);
+ }
+ } else {
+ bfa_plog_fchdr(bfa->plog, BFA_PL_MID_HAL_FCXP, BFA_PL_EID_TX,
+ reqlen + sizeof(struct fchs_s), fchs);
+ }
+}
+
+static void
+hal_fcxp_rx_plog(struct bfa_s *bfa, struct bfa_fcxp_s *fcxp,
+ struct bfi_fcxp_send_rsp_s *fcxp_rsp)
+{
+ if (fcxp_rsp->rsp_len > 0) {
+ if (fcxp->use_irspbuf) {
+ u32 pld_w0 =
+ *((u32 *) BFA_FCXP_RSP_PLD(fcxp));
+
+ bfa_plog_fchdr_and_pl(bfa->plog, BFA_PL_MID_HAL_FCXP,
+ BFA_PL_EID_RX,
+ (u16) fcxp_rsp->rsp_len,
+ &fcxp_rsp->fchs, pld_w0);
+ } else {
+ bfa_plog_fchdr(bfa->plog, BFA_PL_MID_HAL_FCXP,
+ BFA_PL_EID_RX,
+ (u16) fcxp_rsp->rsp_len,
+ &fcxp_rsp->fchs);
+ }
+ } else {
+ bfa_plog_fchdr(bfa->plog, BFA_PL_MID_HAL_FCXP, BFA_PL_EID_RX,
+ (u16) fcxp_rsp->rsp_len, &fcxp_rsp->fchs);
+ }
+}
+
+/*
+ * Handler to resume sending fcxp when space in available in cpe queue.
+ */
+static void
+bfa_fcxp_qresume(void *cbarg)
+{
+ struct bfa_fcxp_s *fcxp = cbarg;
+ struct bfa_s *bfa = fcxp->fcxp_mod->bfa;
+ struct bfi_fcxp_send_req_s *send_req;
+
+ fcxp->reqq_waiting = BFA_FALSE;
+ send_req = bfa_reqq_next(bfa, BFA_REQQ_FCXP);
+ bfa_fcxp_queue(fcxp, send_req);
+}
+
+/*
+ * Queue fcxp send request to foimrware.
+ */
+static void
+bfa_fcxp_queue(struct bfa_fcxp_s *fcxp, struct bfi_fcxp_send_req_s *send_req)
+{
+ struct bfa_s *bfa = fcxp->fcxp_mod->bfa;
+ struct bfa_fcxp_req_info_s *reqi = &fcxp->req_info;
+ struct bfa_fcxp_rsp_info_s *rspi = &fcxp->rsp_info;
+ struct bfa_rport_s *rport = reqi->bfa_rport;
+
+ bfi_h2i_set(send_req->mh, BFI_MC_FCXP, BFI_FCXP_H2I_SEND_REQ,
+ bfa_fn_lpu(bfa));
+
+ send_req->fcxp_tag = cpu_to_be16(fcxp->fcxp_tag);
+ if (rport) {
+ send_req->rport_fw_hndl = rport->fw_handle;
+ send_req->max_frmsz = cpu_to_be16(rport->rport_info.max_frmsz);
+ if (send_req->max_frmsz == 0)
+ send_req->max_frmsz = cpu_to_be16(FC_MAX_PDUSZ);
+ } else {
+ send_req->rport_fw_hndl = 0;
+ send_req->max_frmsz = cpu_to_be16(FC_MAX_PDUSZ);
+ }
+
+ send_req->vf_id = cpu_to_be16(reqi->vf_id);
+ send_req->lp_fwtag = bfa_lps_get_fwtag(bfa, reqi->lp_tag);
+ send_req->class = reqi->class;
+ send_req->rsp_timeout = rspi->rsp_timeout;
+ send_req->cts = reqi->cts;
+ send_req->fchs = reqi->fchs;
+
+ send_req->req_len = cpu_to_be32(reqi->req_tot_len);
+ send_req->rsp_maxlen = cpu_to_be32(rspi->rsp_maxlen);
+
+ /*
+ * setup req sgles
+ */
+ if (fcxp->use_ireqbuf == 1) {
+ bfa_alen_set(&send_req->req_alen, reqi->req_tot_len,
+ BFA_FCXP_REQ_PLD_PA(fcxp));
+ } else {
+ if (fcxp->nreq_sgles > 0) {
+ WARN_ON(fcxp->nreq_sgles != 1);
+ bfa_alen_set(&send_req->req_alen, reqi->req_tot_len,
+ fcxp->req_sga_cbfn(fcxp->caller, 0));
+ } else {
+ WARN_ON(reqi->req_tot_len != 0);
+ bfa_alen_set(&send_req->rsp_alen, 0, 0);
+ }
+ }
+
+ /*
+ * setup rsp sgles
+ */
+ if (fcxp->use_irspbuf == 1) {
+ WARN_ON(rspi->rsp_maxlen > BFA_FCXP_MAX_LBUF_SZ);
+
+ bfa_alen_set(&send_req->rsp_alen, rspi->rsp_maxlen,
+ BFA_FCXP_RSP_PLD_PA(fcxp));
+ } else {
+ if (fcxp->nrsp_sgles > 0) {
+ WARN_ON(fcxp->nrsp_sgles != 1);
+ bfa_alen_set(&send_req->rsp_alen, rspi->rsp_maxlen,
+ fcxp->rsp_sga_cbfn(fcxp->caller, 0));
+
+ } else {
+ WARN_ON(rspi->rsp_maxlen != 0);
+ bfa_alen_set(&send_req->rsp_alen, 0, 0);
+ }
+ }
+
+ hal_fcxp_tx_plog(bfa, reqi->req_tot_len, fcxp, &reqi->fchs);
+
+ bfa_reqq_produce(bfa, BFA_REQQ_FCXP, send_req->mh);
+
+ bfa_trc(bfa, bfa_reqq_pi(bfa, BFA_REQQ_FCXP));
+ bfa_trc(bfa, bfa_reqq_ci(bfa, BFA_REQQ_FCXP));
+}
+
+/*
+ * Allocate an FCXP instance to send a response or to send a request
+ * that has a response. Request/response buffers are allocated by caller.
+ *
+ * @param[in] bfa BFA bfa instance
+ * @param[in] nreq_sgles Number of SG elements required for request
+ * buffer. 0, if fcxp internal buffers are used.
+ * Use bfa_fcxp_get_reqbuf() to get the
+ * internal req buffer.
+ * @param[in] req_sgles SG elements describing request buffer. Will be
+ * copied in by BFA and hence can be freed on
+ * return from this function.
+ * @param[in] get_req_sga function ptr to be called to get a request SG
+ * Address (given the sge index).
+ * @param[in] get_req_sglen function ptr to be called to get a request SG
+ * len (given the sge index).
+ * @param[in] get_rsp_sga function ptr to be called to get a response SG
+ * Address (given the sge index).
+ * @param[in] get_rsp_sglen function ptr to be called to get a response SG
+ * len (given the sge index).
+ * @param[in] req Allocated FCXP is used to send req or rsp?
+ * request - BFA_TRUE, response - BFA_FALSE
+ *
+ * @return FCXP instance. NULL on failure.
+ */
+struct bfa_fcxp_s *
+bfa_fcxp_req_rsp_alloc(void *caller, struct bfa_s *bfa, int nreq_sgles,
+ int nrsp_sgles, bfa_fcxp_get_sgaddr_t req_sga_cbfn,
+ bfa_fcxp_get_sglen_t req_sglen_cbfn,
+ bfa_fcxp_get_sgaddr_t rsp_sga_cbfn,
+ bfa_fcxp_get_sglen_t rsp_sglen_cbfn, bfa_boolean_t req)
+{
+ struct bfa_fcxp_s *fcxp = NULL;
+
+ WARN_ON(bfa == NULL);
+
+ fcxp = bfa_fcxp_get(BFA_FCXP_MOD(bfa), req);
+ if (fcxp == NULL)
+ return NULL;
+
+ bfa_trc(bfa, fcxp->fcxp_tag);
+
+ bfa_fcxp_init(fcxp, caller, bfa, nreq_sgles, nrsp_sgles, req_sga_cbfn,
+ req_sglen_cbfn, rsp_sga_cbfn, rsp_sglen_cbfn);
+
+ return fcxp;
+}
+
+/*
+ * Get the internal request buffer pointer
+ *
+ * @param[in] fcxp BFA fcxp pointer
+ *
+ * @return pointer to the internal request buffer
+ */
+void *
+bfa_fcxp_get_reqbuf(struct bfa_fcxp_s *fcxp)
+{
+ struct bfa_fcxp_mod_s *mod = fcxp->fcxp_mod;
+ void *reqbuf;
+
+ WARN_ON(fcxp->use_ireqbuf != 1);
+ reqbuf = bfa_mem_get_dmabuf_kva(mod, fcxp->fcxp_tag,
+ mod->req_pld_sz + mod->rsp_pld_sz);
+ return reqbuf;
+}
+
+u32
+bfa_fcxp_get_reqbufsz(struct bfa_fcxp_s *fcxp)
+{
+ struct bfa_fcxp_mod_s *mod = fcxp->fcxp_mod;
+
+ return mod->req_pld_sz;
+}
+
+/*
+ * Get the internal response buffer pointer
+ *
+ * @param[in] fcxp BFA fcxp pointer
+ *
+ * @return pointer to the internal request buffer
+ */
+void *
+bfa_fcxp_get_rspbuf(struct bfa_fcxp_s *fcxp)
+{
+ struct bfa_fcxp_mod_s *mod = fcxp->fcxp_mod;
+ void *fcxp_buf;
+
+ WARN_ON(fcxp->use_irspbuf != 1);
+
+ fcxp_buf = bfa_mem_get_dmabuf_kva(mod, fcxp->fcxp_tag,
+ mod->req_pld_sz + mod->rsp_pld_sz);
+
+ /* fcxp_buf = req_buf + rsp_buf :- add req_buf_sz to get to rsp_buf */
+ return ((u8 *) fcxp_buf) + mod->req_pld_sz;
+}
+
+/*
+ * Free the BFA FCXP
+ *
+ * @param[in] fcxp BFA fcxp pointer
+ *
+ * @return void
+ */
+void
+bfa_fcxp_free(struct bfa_fcxp_s *fcxp)
+{
+ struct bfa_fcxp_mod_s *mod = fcxp->fcxp_mod;
+
+ WARN_ON(fcxp == NULL);
+ bfa_trc(mod->bfa, fcxp->fcxp_tag);
+ bfa_fcxp_put(fcxp);
+}
+
+/*
+ * Send a FCXP request
+ *
+ * @param[in] fcxp BFA fcxp pointer
+ * @param[in] rport BFA rport pointer. Could be left NULL for WKA rports
+ * @param[in] vf_id virtual Fabric ID
+ * @param[in] lp_tag lport tag
+ * @param[in] cts use Continuous sequence
+ * @param[in] cos fc Class of Service
+ * @param[in] reqlen request length, does not include FCHS length
+ * @param[in] fchs fc Header Pointer. The header content will be copied
+ * in by BFA.
+ *
+ * @param[in] cbfn call back function to be called on receiving
+ * the response
+ * @param[in] cbarg arg for cbfn
+ * @param[in] rsp_timeout
+ * response timeout
+ *
+ * @return bfa_status_t
+ */
+void
+bfa_fcxp_send(struct bfa_fcxp_s *fcxp, struct bfa_rport_s *rport,
+ u16 vf_id, u8 lp_tag, bfa_boolean_t cts, enum fc_cos cos,
+ u32 reqlen, struct fchs_s *fchs, bfa_cb_fcxp_send_t cbfn,
+ void *cbarg, u32 rsp_maxlen, u8 rsp_timeout)
+{
+ struct bfa_s *bfa = fcxp->fcxp_mod->bfa;
+ struct bfa_fcxp_req_info_s *reqi = &fcxp->req_info;
+ struct bfa_fcxp_rsp_info_s *rspi = &fcxp->rsp_info;
+ struct bfi_fcxp_send_req_s *send_req;
+
+ bfa_trc(bfa, fcxp->fcxp_tag);
+
+ /*
+ * setup request/response info
+ */
+ reqi->bfa_rport = rport;
+ reqi->vf_id = vf_id;
+ reqi->lp_tag = lp_tag;
+ reqi->class = cos;
+ rspi->rsp_timeout = rsp_timeout;
+ reqi->cts = cts;
+ reqi->fchs = *fchs;
+ reqi->req_tot_len = reqlen;
+ rspi->rsp_maxlen = rsp_maxlen;
+ fcxp->send_cbfn = cbfn ? cbfn : bfa_fcxp_null_comp;
+ fcxp->send_cbarg = cbarg;
+
+ /*
+ * If no room in CPE queue, wait for space in request queue
+ */
+ send_req = bfa_reqq_next(bfa, BFA_REQQ_FCXP);
+ if (!send_req) {
+ bfa_trc(bfa, fcxp->fcxp_tag);
+ fcxp->reqq_waiting = BFA_TRUE;
+ bfa_reqq_wait(bfa, BFA_REQQ_FCXP, &fcxp->reqq_wqe);
+ return;
+ }
+
+ bfa_fcxp_queue(fcxp, send_req);
+}
+
+/*
+ * Abort a BFA FCXP
+ *
+ * @param[in] fcxp BFA fcxp pointer
+ *
+ * @return void
+ */
+bfa_status_t
+bfa_fcxp_abort(struct bfa_fcxp_s *fcxp)
+{
+ bfa_trc(fcxp->fcxp_mod->bfa, fcxp->fcxp_tag);
+ WARN_ON(1);
+ return BFA_STATUS_OK;
+}
+
+void
+bfa_fcxp_req_rsp_alloc_wait(struct bfa_s *bfa, struct bfa_fcxp_wqe_s *wqe,
+ bfa_fcxp_alloc_cbfn_t alloc_cbfn, void *alloc_cbarg,
+ void *caller, int nreq_sgles,
+ int nrsp_sgles, bfa_fcxp_get_sgaddr_t req_sga_cbfn,
+ bfa_fcxp_get_sglen_t req_sglen_cbfn,
+ bfa_fcxp_get_sgaddr_t rsp_sga_cbfn,
+ bfa_fcxp_get_sglen_t rsp_sglen_cbfn, bfa_boolean_t req)
+{
+ struct bfa_fcxp_mod_s *mod = BFA_FCXP_MOD(bfa);
+
+ if (req)
+ WARN_ON(!list_empty(&mod->fcxp_req_free_q));
+ else
+ WARN_ON(!list_empty(&mod->fcxp_rsp_free_q));
+
+ wqe->alloc_cbfn = alloc_cbfn;
+ wqe->alloc_cbarg = alloc_cbarg;
+ wqe->caller = caller;
+ wqe->bfa = bfa;
+ wqe->nreq_sgles = nreq_sgles;
+ wqe->nrsp_sgles = nrsp_sgles;
+ wqe->req_sga_cbfn = req_sga_cbfn;
+ wqe->req_sglen_cbfn = req_sglen_cbfn;
+ wqe->rsp_sga_cbfn = rsp_sga_cbfn;
+ wqe->rsp_sglen_cbfn = rsp_sglen_cbfn;
+
+ if (req)
+ list_add_tail(&wqe->qe, &mod->req_wait_q);
+ else
+ list_add_tail(&wqe->qe, &mod->rsp_wait_q);
+}
+
+void
+bfa_fcxp_walloc_cancel(struct bfa_s *bfa, struct bfa_fcxp_wqe_s *wqe)
+{
+ struct bfa_fcxp_mod_s *mod = BFA_FCXP_MOD(bfa);
+
+ WARN_ON(!bfa_q_is_on_q(&mod->req_wait_q, wqe) ||
+ !bfa_q_is_on_q(&mod->rsp_wait_q, wqe));
+ list_del(&wqe->qe);
+}
+
+void
+bfa_fcxp_discard(struct bfa_fcxp_s *fcxp)
+{
+ /*
+ * If waiting for room in request queue, cancel reqq wait
+ * and free fcxp.
+ */
+ if (fcxp->reqq_waiting) {
+ fcxp->reqq_waiting = BFA_FALSE;
+ bfa_reqq_wcancel(&fcxp->reqq_wqe);
+ bfa_fcxp_free(fcxp);
+ return;
+ }
+
+ fcxp->send_cbfn = bfa_fcxp_null_comp;
+}
+
+void
+bfa_fcxp_isr(struct bfa_s *bfa, struct bfi_msg_s *msg)
+{
+ switch (msg->mhdr.msg_id) {
+ case BFI_FCXP_I2H_SEND_RSP:
+ hal_fcxp_send_comp(bfa, (struct bfi_fcxp_send_rsp_s *) msg);
+ break;
+
+ default:
+ bfa_trc(bfa, msg->mhdr.msg_id);
+ WARN_ON(1);
+ }
+}
+
+u32
+bfa_fcxp_get_maxrsp(struct bfa_s *bfa)
+{
+ struct bfa_fcxp_mod_s *mod = BFA_FCXP_MOD(bfa);
+
+ return mod->rsp_pld_sz;
+}
+
+void
+bfa_fcxp_res_recfg(struct bfa_s *bfa, u16 num_fcxp_fw)
+{
+ struct bfa_fcxp_mod_s *mod = BFA_FCXP_MOD(bfa);
+ struct list_head *qe;
+ int i;
+
+ for (i = 0; i < (mod->num_fcxps - num_fcxp_fw); i++) {
+ if (i < ((mod->num_fcxps - num_fcxp_fw) / 2)) {
+ bfa_q_deq_tail(&mod->fcxp_req_free_q, &qe);
+ list_add_tail(qe, &mod->fcxp_req_unused_q);
+ } else {
+ bfa_q_deq_tail(&mod->fcxp_rsp_free_q, &qe);
+ list_add_tail(qe, &mod->fcxp_rsp_unused_q);
+ }
+ }
+}
+
+/*
+ * BFA LPS state machine functions
+ */
+
+/*
+ * Init state -- no login
+ */
+static void
+bfa_lps_sm_init(struct bfa_lps_s *lps, enum bfa_lps_event event)
+{
+ bfa_trc(lps->bfa, lps->bfa_tag);
+ bfa_trc(lps->bfa, event);
+
+ switch (event) {
+ case BFA_LPS_SM_LOGIN:
+ if (bfa_reqq_full(lps->bfa, lps->reqq)) {
+ bfa_sm_set_state(lps, bfa_lps_sm_loginwait);
+ bfa_reqq_wait(lps->bfa, lps->reqq, &lps->wqe);
+ } else {
+ bfa_sm_set_state(lps, bfa_lps_sm_login);
+ bfa_lps_send_login(lps);
+ }
+
+ if (lps->fdisc)
+ bfa_plog_str(lps->bfa->plog, BFA_PL_MID_LPS,
+ BFA_PL_EID_LOGIN, 0, "FDISC Request");
+ else
+ bfa_plog_str(lps->bfa->plog, BFA_PL_MID_LPS,
+ BFA_PL_EID_LOGIN, 0, "FLOGI Request");
+ break;
+
+ case BFA_LPS_SM_LOGOUT:
+ bfa_lps_logout_comp(lps);
+ break;
+
+ case BFA_LPS_SM_DELETE:
+ bfa_lps_free(lps);
+ break;
+
+ case BFA_LPS_SM_RX_CVL:
+ case BFA_LPS_SM_OFFLINE:
+ break;
+
+ case BFA_LPS_SM_FWRSP:
+ /*
+ * Could happen when fabric detects loopback and discards
+ * the lps request. Fw will eventually sent out the timeout
+ * Just ignore
+ */
+ break;
+ case BFA_LPS_SM_SET_N2N_PID:
+ /*
+ * When topology is set to loop, bfa_lps_set_n2n_pid() sends
+ * this event. Ignore this event.
+ */
+ break;
+
+ default:
+ bfa_sm_fault(lps->bfa, event);
+ }
+}
+
+/*
+ * login is in progress -- awaiting response from firmware
+ */
+static void
+bfa_lps_sm_login(struct bfa_lps_s *lps, enum bfa_lps_event event)
+{
+ bfa_trc(lps->bfa, lps->bfa_tag);
+ bfa_trc(lps->bfa, event);
+
+ switch (event) {
+ case BFA_LPS_SM_FWRSP:
+ if (lps->status == BFA_STATUS_OK) {
+ bfa_sm_set_state(lps, bfa_lps_sm_online);
+ if (lps->fdisc)
+ bfa_plog_str(lps->bfa->plog, BFA_PL_MID_LPS,
+ BFA_PL_EID_LOGIN, 0, "FDISC Accept");
+ else
+ bfa_plog_str(lps->bfa->plog, BFA_PL_MID_LPS,
+ BFA_PL_EID_LOGIN, 0, "FLOGI Accept");
+ /* If N2N, send the assigned PID to FW */
+ bfa_trc(lps->bfa, lps->fport);
+ bfa_trc(lps->bfa, lps->lp_pid);
+
+ if (!lps->fport && lps->lp_pid)
+ bfa_sm_send_event(lps, BFA_LPS_SM_SET_N2N_PID);
+ } else {
+ bfa_sm_set_state(lps, bfa_lps_sm_init);
+ if (lps->fdisc)
+ bfa_plog_str(lps->bfa->plog, BFA_PL_MID_LPS,
+ BFA_PL_EID_LOGIN, 0,
+ "FDISC Fail (RJT or timeout)");
+ else
+ bfa_plog_str(lps->bfa->plog, BFA_PL_MID_LPS,
+ BFA_PL_EID_LOGIN, 0,
+ "FLOGI Fail (RJT or timeout)");
+ }
+ bfa_lps_login_comp(lps);
+ break;
+
+ case BFA_LPS_SM_OFFLINE:
+ case BFA_LPS_SM_DELETE:
+ bfa_sm_set_state(lps, bfa_lps_sm_init);
+ break;
+
+ case BFA_LPS_SM_SET_N2N_PID:
+ bfa_trc(lps->bfa, lps->fport);
+ bfa_trc(lps->bfa, lps->lp_pid);
+ break;
+
+ default:
+ bfa_sm_fault(lps->bfa, event);
+ }
+}
+
+/*
+ * login pending - awaiting space in request queue
+ */
+static void
+bfa_lps_sm_loginwait(struct bfa_lps_s *lps, enum bfa_lps_event event)
+{
+ bfa_trc(lps->bfa, lps->bfa_tag);
+ bfa_trc(lps->bfa, event);
+
+ switch (event) {
+ case BFA_LPS_SM_RESUME:
+ bfa_sm_set_state(lps, bfa_lps_sm_login);
+ bfa_lps_send_login(lps);
+ break;
+
+ case BFA_LPS_SM_OFFLINE:
+ case BFA_LPS_SM_DELETE:
+ bfa_sm_set_state(lps, bfa_lps_sm_init);
+ bfa_reqq_wcancel(&lps->wqe);
+ break;
+
+ case BFA_LPS_SM_RX_CVL:
+ /*
+ * Login was not even sent out; so when getting out
+ * of this state, it will appear like a login retry
+ * after Clear virtual link
+ */
+ break;
+
+ default:
+ bfa_sm_fault(lps->bfa, event);
+ }
+}
+
+/*
+ * login complete
+ */
+static void
+bfa_lps_sm_online(struct bfa_lps_s *lps, enum bfa_lps_event event)
+{
+ bfa_trc(lps->bfa, lps->bfa_tag);
+ bfa_trc(lps->bfa, event);
+
+ switch (event) {
+ case BFA_LPS_SM_LOGOUT:
+ if (bfa_reqq_full(lps->bfa, lps->reqq)) {
+ bfa_sm_set_state(lps, bfa_lps_sm_logowait);
+ bfa_reqq_wait(lps->bfa, lps->reqq, &lps->wqe);
+ } else {
+ bfa_sm_set_state(lps, bfa_lps_sm_logout);
+ bfa_lps_send_logout(lps);
+ }
+ bfa_plog_str(lps->bfa->plog, BFA_PL_MID_LPS,
+ BFA_PL_EID_LOGO, 0, "Logout");
+ break;
+
+ case BFA_LPS_SM_RX_CVL:
+ bfa_sm_set_state(lps, bfa_lps_sm_init);
+
+ /* Let the vport module know about this event */
+ bfa_lps_cvl_event(lps);
+ bfa_plog_str(lps->bfa->plog, BFA_PL_MID_LPS,
+ BFA_PL_EID_FIP_FCF_CVL, 0, "FCF Clear Virt. Link Rx");
+ break;
+
+ case BFA_LPS_SM_SET_N2N_PID:
+ if (bfa_reqq_full(lps->bfa, lps->reqq)) {
+ bfa_sm_set_state(lps, bfa_lps_sm_online_n2n_pid_wait);
+ bfa_reqq_wait(lps->bfa, lps->reqq, &lps->wqe);
+ } else
+ bfa_lps_send_set_n2n_pid(lps);
+ break;
+
+ case BFA_LPS_SM_OFFLINE:
+ case BFA_LPS_SM_DELETE:
+ bfa_sm_set_state(lps, bfa_lps_sm_init);
+ break;
+
+ default:
+ bfa_sm_fault(lps->bfa, event);
+ }
+}
+
+/*
+ * login complete
+ */
+static void
+bfa_lps_sm_online_n2n_pid_wait(struct bfa_lps_s *lps, enum bfa_lps_event event)
+{
+ bfa_trc(lps->bfa, lps->bfa_tag);
+ bfa_trc(lps->bfa, event);
+
+ switch (event) {
+ case BFA_LPS_SM_RESUME:
+ bfa_sm_set_state(lps, bfa_lps_sm_online);
+ bfa_lps_send_set_n2n_pid(lps);
+ break;
+
+ case BFA_LPS_SM_LOGOUT:
+ bfa_sm_set_state(lps, bfa_lps_sm_logowait);
+ bfa_plog_str(lps->bfa->plog, BFA_PL_MID_LPS,
+ BFA_PL_EID_LOGO, 0, "Logout");
+ break;
+
+ case BFA_LPS_SM_RX_CVL:
+ bfa_sm_set_state(lps, bfa_lps_sm_init);
+ bfa_reqq_wcancel(&lps->wqe);
+
+ /* Let the vport module know about this event */
+ bfa_lps_cvl_event(lps);
+ bfa_plog_str(lps->bfa->plog, BFA_PL_MID_LPS,
+ BFA_PL_EID_FIP_FCF_CVL, 0, "FCF Clear Virt. Link Rx");
+ break;
+
+ case BFA_LPS_SM_OFFLINE:
+ case BFA_LPS_SM_DELETE:
+ bfa_sm_set_state(lps, bfa_lps_sm_init);
+ bfa_reqq_wcancel(&lps->wqe);
+ break;
+
+ default:
+ bfa_sm_fault(lps->bfa, event);
+ }
+}
+
+/*
+ * logout in progress - awaiting firmware response
+ */
+static void
+bfa_lps_sm_logout(struct bfa_lps_s *lps, enum bfa_lps_event event)
+{
+ bfa_trc(lps->bfa, lps->bfa_tag);
+ bfa_trc(lps->bfa, event);
+
+ switch (event) {
+ case BFA_LPS_SM_FWRSP:
+ case BFA_LPS_SM_OFFLINE:
+ bfa_sm_set_state(lps, bfa_lps_sm_init);
+ bfa_lps_logout_comp(lps);
+ break;
+
+ case BFA_LPS_SM_DELETE:
+ bfa_sm_set_state(lps, bfa_lps_sm_init);
+ break;
+
+ default:
+ bfa_sm_fault(lps->bfa, event);
+ }
+}
+
+/*
+ * logout pending -- awaiting space in request queue
+ */
+static void
+bfa_lps_sm_logowait(struct bfa_lps_s *lps, enum bfa_lps_event event)
+{
+ bfa_trc(lps->bfa, lps->bfa_tag);
+ bfa_trc(lps->bfa, event);
+
+ switch (event) {
+ case BFA_LPS_SM_RESUME:
+ bfa_sm_set_state(lps, bfa_lps_sm_logout);
+ bfa_lps_send_logout(lps);
+ break;
+
+ case BFA_LPS_SM_OFFLINE:
+ case BFA_LPS_SM_DELETE:
+ bfa_sm_set_state(lps, bfa_lps_sm_init);
+ bfa_reqq_wcancel(&lps->wqe);
+ break;
+
+ default:
+ bfa_sm_fault(lps->bfa, event);
+ }
+}
+
+
+
+/*
+ * lps_pvt BFA LPS private functions
+ */
+
+/*
+ * return memory requirement
+ */
+static void
+bfa_lps_meminfo(struct bfa_iocfc_cfg_s *cfg, struct bfa_meminfo_s *minfo,
+ struct bfa_s *bfa)
+{
+ struct bfa_mem_kva_s *lps_kva = BFA_MEM_LPS_KVA(bfa);
+
+ if (cfg->drvcfg.min_cfg)
+ bfa_mem_kva_setup(minfo, lps_kva,
+ sizeof(struct bfa_lps_s) * BFA_LPS_MIN_LPORTS);
+ else
+ bfa_mem_kva_setup(minfo, lps_kva,
+ sizeof(struct bfa_lps_s) * BFA_LPS_MAX_LPORTS);
+}
+
+/*
+ * bfa module attach at initialization time
+ */
+static void
+bfa_lps_attach(struct bfa_s *bfa, void *bfad, struct bfa_iocfc_cfg_s *cfg,
+ struct bfa_pcidev_s *pcidev)
+{
+ struct bfa_lps_mod_s *mod = BFA_LPS_MOD(bfa);
+ struct bfa_lps_s *lps;
+ int i;
+
+ mod->num_lps = BFA_LPS_MAX_LPORTS;
+ if (cfg->drvcfg.min_cfg)
+ mod->num_lps = BFA_LPS_MIN_LPORTS;
+ else
+ mod->num_lps = BFA_LPS_MAX_LPORTS;
+ mod->lps_arr = lps = (struct bfa_lps_s *) bfa_mem_kva_curp(mod);
+
+ bfa_mem_kva_curp(mod) += mod->num_lps * sizeof(struct bfa_lps_s);
+
+ INIT_LIST_HEAD(&mod->lps_free_q);
+ INIT_LIST_HEAD(&mod->lps_active_q);
+ INIT_LIST_HEAD(&mod->lps_login_q);
+
+ for (i = 0; i < mod->num_lps; i++, lps++) {
+ lps->bfa = bfa;
+ lps->bfa_tag = (u8) i;
+ lps->reqq = BFA_REQQ_LPS;
+ bfa_reqq_winit(&lps->wqe, bfa_lps_reqq_resume, lps);
+ list_add_tail(&lps->qe, &mod->lps_free_q);
+ }
+}
+
+static void
+bfa_lps_detach(struct bfa_s *bfa)
+{
+}
+
+static void
+bfa_lps_start(struct bfa_s *bfa)
+{
+}
+
+static void
+bfa_lps_stop(struct bfa_s *bfa)
+{
+}
+
+/*
+ * IOC in disabled state -- consider all lps offline
+ */
+static void
+bfa_lps_iocdisable(struct bfa_s *bfa)
+{
+ struct bfa_lps_mod_s *mod = BFA_LPS_MOD(bfa);
+ struct bfa_lps_s *lps;
+ struct list_head *qe, *qen;
+
+ list_for_each_safe(qe, qen, &mod->lps_active_q) {
+ lps = (struct bfa_lps_s *) qe;
+ bfa_sm_send_event(lps, BFA_LPS_SM_OFFLINE);
+ }
+ list_for_each_safe(qe, qen, &mod->lps_login_q) {
+ lps = (struct bfa_lps_s *) qe;
+ bfa_sm_send_event(lps, BFA_LPS_SM_OFFLINE);
+ }
+ list_splice_tail_init(&mod->lps_login_q, &mod->lps_active_q);
+}
+
+/*
+ * Firmware login response
+ */
+static void
+bfa_lps_login_rsp(struct bfa_s *bfa, struct bfi_lps_login_rsp_s *rsp)
+{
+ struct bfa_lps_mod_s *mod = BFA_LPS_MOD(bfa);
+ struct bfa_lps_s *lps;
+
+ WARN_ON(rsp->bfa_tag >= mod->num_lps);
+ lps = BFA_LPS_FROM_TAG(mod, rsp->bfa_tag);
+
+ lps->status = rsp->status;
+ switch (rsp->status) {
+ case BFA_STATUS_OK:
+ lps->fw_tag = rsp->fw_tag;
+ lps->fport = rsp->f_port;
+ if (lps->fport)
+ lps->lp_pid = rsp->lp_pid;
+ lps->npiv_en = rsp->npiv_en;
+ lps->pr_bbcred = be16_to_cpu(rsp->bb_credit);
+ lps->pr_pwwn = rsp->port_name;
+ lps->pr_nwwn = rsp->node_name;
+ lps->auth_req = rsp->auth_req;
+ lps->lp_mac = rsp->lp_mac;
+ lps->brcd_switch = rsp->brcd_switch;
+ lps->fcf_mac = rsp->fcf_mac;
+
+ break;
+
+ case BFA_STATUS_FABRIC_RJT:
+ lps->lsrjt_rsn = rsp->lsrjt_rsn;
+ lps->lsrjt_expl = rsp->lsrjt_expl;
+
+ break;
+
+ case BFA_STATUS_EPROTOCOL:
+ lps->ext_status = rsp->ext_status;
+
+ break;
+
+ case BFA_STATUS_VPORT_MAX:
+ if (rsp->ext_status)
+ bfa_lps_no_res(lps, rsp->ext_status);
+ break;
+
+ default:
+ /* Nothing to do with other status */
+ break;
+ }
+
+ list_del(&lps->qe);
+ list_add_tail(&lps->qe, &mod->lps_active_q);
+ bfa_sm_send_event(lps, BFA_LPS_SM_FWRSP);
+}
+
+static void
+bfa_lps_no_res(struct bfa_lps_s *first_lps, u8 count)
+{
+ struct bfa_s *bfa = first_lps->bfa;
+ struct bfa_lps_mod_s *mod = BFA_LPS_MOD(bfa);
+ struct list_head *qe, *qe_next;
+ struct bfa_lps_s *lps;
+
+ bfa_trc(bfa, count);
+
+ qe = bfa_q_next(first_lps);
+
+ while (count && qe) {
+ qe_next = bfa_q_next(qe);
+ lps = (struct bfa_lps_s *)qe;
+ bfa_trc(bfa, lps->bfa_tag);
+ lps->status = first_lps->status;
+ list_del(&lps->qe);
+ list_add_tail(&lps->qe, &mod->lps_active_q);
+ bfa_sm_send_event(lps, BFA_LPS_SM_FWRSP);
+ qe = qe_next;
+ count--;
+ }
+}
+
+/*
+ * Firmware logout response
+ */
+static void
+bfa_lps_logout_rsp(struct bfa_s *bfa, struct bfi_lps_logout_rsp_s *rsp)
+{
+ struct bfa_lps_mod_s *mod = BFA_LPS_MOD(bfa);
+ struct bfa_lps_s *lps;
+
+ WARN_ON(rsp->bfa_tag >= mod->num_lps);
+ lps = BFA_LPS_FROM_TAG(mod, rsp->bfa_tag);
+
+ bfa_sm_send_event(lps, BFA_LPS_SM_FWRSP);
+}
+
+/*
+ * Firmware received a Clear virtual link request (for FCoE)
+ */
+static void
+bfa_lps_rx_cvl_event(struct bfa_s *bfa, struct bfi_lps_cvl_event_s *cvl)
+{
+ struct bfa_lps_mod_s *mod = BFA_LPS_MOD(bfa);
+ struct bfa_lps_s *lps;
+
+ lps = BFA_LPS_FROM_TAG(mod, cvl->bfa_tag);
+
+ bfa_sm_send_event(lps, BFA_LPS_SM_RX_CVL);
+}
+
+/*
+ * Space is available in request queue, resume queueing request to firmware.
+ */
+static void
+bfa_lps_reqq_resume(void *lps_arg)
+{
+ struct bfa_lps_s *lps = lps_arg;
+
+ bfa_sm_send_event(lps, BFA_LPS_SM_RESUME);
+}
+
+/*
+ * lps is freed -- triggered by vport delete
+ */
+static void
+bfa_lps_free(struct bfa_lps_s *lps)
+{
+ struct bfa_lps_mod_s *mod = BFA_LPS_MOD(lps->bfa);
+
+ lps->lp_pid = 0;
+ list_del(&lps->qe);
+ list_add_tail(&lps->qe, &mod->lps_free_q);
+}
+
+/*
+ * send login request to firmware
+ */
+static void
+bfa_lps_send_login(struct bfa_lps_s *lps)
+{
+ struct bfa_lps_mod_s *mod = BFA_LPS_MOD(lps->bfa);
+ struct bfi_lps_login_req_s *m;
+
+ m = bfa_reqq_next(lps->bfa, lps->reqq);
+ WARN_ON(!m);
+
+ bfi_h2i_set(m->mh, BFI_MC_LPS, BFI_LPS_H2I_LOGIN_REQ,
+ bfa_fn_lpu(lps->bfa));
+
+ m->bfa_tag = lps->bfa_tag;
+ m->alpa = lps->alpa;
+ m->pdu_size = cpu_to_be16(lps->pdusz);
+ m->pwwn = lps->pwwn;
+ m->nwwn = lps->nwwn;
+ m->fdisc = lps->fdisc;
+ m->auth_en = lps->auth_en;
+
+ bfa_reqq_produce(lps->bfa, lps->reqq, m->mh);
+ list_del(&lps->qe);
+ list_add_tail(&lps->qe, &mod->lps_login_q);
+}
+
+/*
+ * send logout request to firmware
+ */
+static void
+bfa_lps_send_logout(struct bfa_lps_s *lps)
+{
+ struct bfi_lps_logout_req_s *m;
+
+ m = bfa_reqq_next(lps->bfa, lps->reqq);
+ WARN_ON(!m);
+
+ bfi_h2i_set(m->mh, BFI_MC_LPS, BFI_LPS_H2I_LOGOUT_REQ,
+ bfa_fn_lpu(lps->bfa));
+
+ m->fw_tag = lps->fw_tag;
+ m->port_name = lps->pwwn;
+ bfa_reqq_produce(lps->bfa, lps->reqq, m->mh);
+}
+
+/*
+ * send n2n pid set request to firmware
+ */
+static void
+bfa_lps_send_set_n2n_pid(struct bfa_lps_s *lps)
+{
+ struct bfi_lps_n2n_pid_req_s *m;
+
+ m = bfa_reqq_next(lps->bfa, lps->reqq);
+ WARN_ON(!m);
+
+ bfi_h2i_set(m->mh, BFI_MC_LPS, BFI_LPS_H2I_N2N_PID_REQ,
+ bfa_fn_lpu(lps->bfa));
+
+ m->fw_tag = lps->fw_tag;
+ m->lp_pid = lps->lp_pid;
+ bfa_reqq_produce(lps->bfa, lps->reqq, m->mh);
+}
+
+/*
+ * Indirect login completion handler for non-fcs
+ */
+static void
+bfa_lps_login_comp_cb(void *arg, bfa_boolean_t complete)
+{
+ struct bfa_lps_s *lps = arg;
+
+ if (!complete)
+ return;
+
+ if (lps->fdisc)
+ bfa_cb_lps_fdisc_comp(lps->bfa->bfad, lps->uarg, lps->status);
+ else
+ bfa_cb_lps_flogi_comp(lps->bfa->bfad, lps->uarg, lps->status);
+}
+
+/*
+ * Login completion handler -- direct call for fcs, queue for others
+ */
+static void
+bfa_lps_login_comp(struct bfa_lps_s *lps)
+{
+ if (!lps->bfa->fcs) {
+ bfa_cb_queue(lps->bfa, &lps->hcb_qe, bfa_lps_login_comp_cb,
+ lps);
+ return;
+ }
+
+ if (lps->fdisc)
+ bfa_cb_lps_fdisc_comp(lps->bfa->bfad, lps->uarg, lps->status);
+ else
+ bfa_cb_lps_flogi_comp(lps->bfa->bfad, lps->uarg, lps->status);
+}
+
+/*
+ * Indirect logout completion handler for non-fcs
+ */
+static void
+bfa_lps_logout_comp_cb(void *arg, bfa_boolean_t complete)
+{
+ struct bfa_lps_s *lps = arg;
+
+ if (!complete)
+ return;
+
+ if (lps->fdisc)
+ bfa_cb_lps_fdisclogo_comp(lps->bfa->bfad, lps->uarg);
+ else
+ bfa_cb_lps_flogo_comp(lps->bfa->bfad, lps->uarg);
+}
+
+/*
+ * Logout completion handler -- direct call for fcs, queue for others
+ */
+static void
+bfa_lps_logout_comp(struct bfa_lps_s *lps)
+{
+ if (!lps->bfa->fcs) {
+ bfa_cb_queue(lps->bfa, &lps->hcb_qe, bfa_lps_logout_comp_cb,
+ lps);
+ return;
+ }
+ if (lps->fdisc)
+ bfa_cb_lps_fdisclogo_comp(lps->bfa->bfad, lps->uarg);
+}
+
+/*
+ * Clear virtual link completion handler for non-fcs
+ */
+static void
+bfa_lps_cvl_event_cb(void *arg, bfa_boolean_t complete)
+{
+ struct bfa_lps_s *lps = arg;
+
+ if (!complete)
+ return;
+
+ /* Clear virtual link to base port will result in link down */
+ if (lps->fdisc)
+ bfa_cb_lps_cvl_event(lps->bfa->bfad, lps->uarg);
+}
+
+/*
+ * Received Clear virtual link event --direct call for fcs,
+ * queue for others
+ */
+static void
+bfa_lps_cvl_event(struct bfa_lps_s *lps)
+{
+ if (!lps->bfa->fcs) {
+ bfa_cb_queue(lps->bfa, &lps->hcb_qe, bfa_lps_cvl_event_cb,
+ lps);
+ return;
+ }
+
+ /* Clear virtual link to base port will result in link down */
+ if (lps->fdisc)
+ bfa_cb_lps_cvl_event(lps->bfa->bfad, lps->uarg);
+}
+
+
+
+/*
+ * lps_public BFA LPS public functions
+ */
+
+u32
+bfa_lps_get_max_vport(struct bfa_s *bfa)
+{
+ if (bfa_ioc_devid(&bfa->ioc) == BFA_PCI_DEVICE_ID_CT)
+ return BFA_LPS_MAX_VPORTS_SUPP_CT;
+ else
+ return BFA_LPS_MAX_VPORTS_SUPP_CB;
+}
+
+/*
+ * Allocate a lport srvice tag.
+ */
+struct bfa_lps_s *
+bfa_lps_alloc(struct bfa_s *bfa)
+{
+ struct bfa_lps_mod_s *mod = BFA_LPS_MOD(bfa);
+ struct bfa_lps_s *lps = NULL;
+
+ bfa_q_deq(&mod->lps_free_q, &lps);
+
+ if (lps == NULL)
+ return NULL;
+
+ list_add_tail(&lps->qe, &mod->lps_active_q);
+
+ bfa_sm_set_state(lps, bfa_lps_sm_init);
+ return lps;
+}
+
+/*
+ * Free lport service tag. This can be called anytime after an alloc.
+ * No need to wait for any pending login/logout completions.
+ */
+void
+bfa_lps_delete(struct bfa_lps_s *lps)
+{
+ bfa_sm_send_event(lps, BFA_LPS_SM_DELETE);
+}
+
+/*
+ * Initiate a lport login.
+ */
+void
+bfa_lps_flogi(struct bfa_lps_s *lps, void *uarg, u8 alpa, u16 pdusz,
+ wwn_t pwwn, wwn_t nwwn, bfa_boolean_t auth_en)
+{
+ lps->uarg = uarg;
+ lps->alpa = alpa;
+ lps->pdusz = pdusz;
+ lps->pwwn = pwwn;
+ lps->nwwn = nwwn;
+ lps->fdisc = BFA_FALSE;
+ lps->auth_en = auth_en;
+ bfa_sm_send_event(lps, BFA_LPS_SM_LOGIN);
+}
+
+/*
+ * Initiate a lport fdisc login.
+ */
+void
+bfa_lps_fdisc(struct bfa_lps_s *lps, void *uarg, u16 pdusz, wwn_t pwwn,
+ wwn_t nwwn)
+{
+ lps->uarg = uarg;
+ lps->alpa = 0;
+ lps->pdusz = pdusz;
+ lps->pwwn = pwwn;
+ lps->nwwn = nwwn;
+ lps->fdisc = BFA_TRUE;
+ lps->auth_en = BFA_FALSE;
+ bfa_sm_send_event(lps, BFA_LPS_SM_LOGIN);
+}
+
+
+/*
+ * Initiate a lport FDSIC logout.
+ */
+void
+bfa_lps_fdisclogo(struct bfa_lps_s *lps)
+{
+ bfa_sm_send_event(lps, BFA_LPS_SM_LOGOUT);
+}
+
+u8
+bfa_lps_get_fwtag(struct bfa_s *bfa, u8 lp_tag)
+{
+ struct bfa_lps_mod_s *mod = BFA_LPS_MOD(bfa);
+
+ return BFA_LPS_FROM_TAG(mod, lp_tag)->fw_tag;
+}
+
+/*
+ * Return lport services tag given the pid
+ */
+u8
+bfa_lps_get_tag_from_pid(struct bfa_s *bfa, u32 pid)
+{
+ struct bfa_lps_mod_s *mod = BFA_LPS_MOD(bfa);
+ struct bfa_lps_s *lps;
+ int i;
+
+ for (i = 0, lps = mod->lps_arr; i < mod->num_lps; i++, lps++) {
+ if (lps->lp_pid == pid)
+ return lps->bfa_tag;
+ }
+
+ /* Return base port tag anyway */
+ return 0;
+}
+
+
+/*
+ * return port id assigned to the base lport
+ */
+u32
+bfa_lps_get_base_pid(struct bfa_s *bfa)
+{
+ struct bfa_lps_mod_s *mod = BFA_LPS_MOD(bfa);
+
+ return BFA_LPS_FROM_TAG(mod, 0)->lp_pid;
+}
+
+/*
+ * Set PID in case of n2n (which is assigned during PLOGI)
+ */
+void
+bfa_lps_set_n2n_pid(struct bfa_lps_s *lps, uint32_t n2n_pid)
+{
+ bfa_trc(lps->bfa, lps->bfa_tag);
+ bfa_trc(lps->bfa, n2n_pid);
+
+ lps->lp_pid = n2n_pid;
+ bfa_sm_send_event(lps, BFA_LPS_SM_SET_N2N_PID);
+}
+
+/*
+ * LPS firmware message class handler.
+ */
+void
+bfa_lps_isr(struct bfa_s *bfa, struct bfi_msg_s *m)
+{
+ union bfi_lps_i2h_msg_u msg;
+
+ bfa_trc(bfa, m->mhdr.msg_id);
+ msg.msg = m;
+
+ switch (m->mhdr.msg_id) {
+ case BFI_LPS_I2H_LOGIN_RSP:
+ bfa_lps_login_rsp(bfa, msg.login_rsp);
+ break;
+
+ case BFI_LPS_I2H_LOGOUT_RSP:
+ bfa_lps_logout_rsp(bfa, msg.logout_rsp);
+ break;
+
+ case BFI_LPS_I2H_CVL_EVENT:
+ bfa_lps_rx_cvl_event(bfa, msg.cvl_event);
+ break;
+
+ default:
+ bfa_trc(bfa, m->mhdr.msg_id);
+ WARN_ON(1);
+ }
+}
+
+static void
+bfa_fcport_aen_post(struct bfa_fcport_s *fcport, enum bfa_port_aen_event event)
+{
+ struct bfad_s *bfad = (struct bfad_s *)fcport->bfa->bfad;
+ struct bfa_aen_entry_s *aen_entry;
+
+ bfad_get_aen_entry(bfad, aen_entry);
+ if (!aen_entry)
+ return;
+
+ aen_entry->aen_data.port.ioc_type = bfa_get_type(fcport->bfa);
+ aen_entry->aen_data.port.pwwn = fcport->pwwn;
+
+ /* Send the AEN notification */
+ bfad_im_post_vendor_event(aen_entry, bfad, ++fcport->bfa->bfa_aen_seq,
+ BFA_AEN_CAT_PORT, event);
+}
+
+/*
+ * FC PORT state machine functions
+ */
+static void
+bfa_fcport_sm_uninit(struct bfa_fcport_s *fcport,
+ enum bfa_fcport_sm_event event)
+{
+ bfa_trc(fcport->bfa, event);
+
+ switch (event) {
+ case BFA_FCPORT_SM_START:
+ /*
+ * Start event after IOC is configured and BFA is started.
+ */
+ fcport->use_flash_cfg = BFA_TRUE;
+
+ if (bfa_fcport_send_enable(fcport)) {
+ bfa_trc(fcport->bfa, BFA_TRUE);
+ bfa_sm_set_state(fcport, bfa_fcport_sm_enabling);
+ } else {
+ bfa_trc(fcport->bfa, BFA_FALSE);
+ bfa_sm_set_state(fcport,
+ bfa_fcport_sm_enabling_qwait);
+ }
+ break;
+
+ case BFA_FCPORT_SM_ENABLE:
+ /*
+ * Port is persistently configured to be in enabled state. Do
+ * not change state. Port enabling is done when START event is
+ * received.
+ */
+ break;
+
+ case BFA_FCPORT_SM_DISABLE:
+ /*
+ * If a port is persistently configured to be disabled, the
+ * first event will a port disable request.
+ */
+ bfa_sm_set_state(fcport, bfa_fcport_sm_disabled);
+ break;
+
+ case BFA_FCPORT_SM_HWFAIL:
+ bfa_sm_set_state(fcport, bfa_fcport_sm_iocdown);
+ break;
+
+ default:
+ bfa_sm_fault(fcport->bfa, event);
+ }
+}
+
+static void
+bfa_fcport_sm_enabling_qwait(struct bfa_fcport_s *fcport,
+ enum bfa_fcport_sm_event event)
+{
+ char pwwn_buf[BFA_STRING_32];
+ struct bfad_s *bfad = (struct bfad_s *)fcport->bfa->bfad;
+ bfa_trc(fcport->bfa, event);
+
+ switch (event) {
+ case BFA_FCPORT_SM_QRESUME:
+ bfa_sm_set_state(fcport, bfa_fcport_sm_enabling);
+ bfa_fcport_send_enable(fcport);
+ break;
+
+ case BFA_FCPORT_SM_STOP:
+ bfa_reqq_wcancel(&fcport->reqq_wait);
+ bfa_sm_set_state(fcport, bfa_fcport_sm_stopped);
+ break;
+
+ case BFA_FCPORT_SM_ENABLE:
+ /*
+ * Already enable is in progress.
+ */
+ break;
+
+ case BFA_FCPORT_SM_DISABLE:
+ /*
+ * Just send disable request to firmware when room becomes
+ * available in request queue.
+ */
+ bfa_sm_set_state(fcport, bfa_fcport_sm_disabled);
+ bfa_reqq_wcancel(&fcport->reqq_wait);
+ bfa_plog_str(fcport->bfa->plog, BFA_PL_MID_HAL,
+ BFA_PL_EID_PORT_DISABLE, 0, "Port Disable");
+ wwn2str(pwwn_buf, fcport->pwwn);
+ BFA_LOG(KERN_INFO, bfad, bfa_log_level,
+ "Base port disabled: WWN = %s\n", pwwn_buf);
+ bfa_fcport_aen_post(fcport, BFA_PORT_AEN_DISABLE);
+ break;
+
+ case BFA_FCPORT_SM_LINKUP:
+ case BFA_FCPORT_SM_LINKDOWN:
+ /*
+ * Possible to get link events when doing back-to-back
+ * enable/disables.
+ */
+ break;
+
+ case BFA_FCPORT_SM_HWFAIL:
+ bfa_reqq_wcancel(&fcport->reqq_wait);
+ bfa_sm_set_state(fcport, bfa_fcport_sm_iocdown);
+ break;
+
+ case BFA_FCPORT_SM_FAA_MISCONFIG:
+ bfa_fcport_reset_linkinfo(fcport);
+ bfa_fcport_aen_post(fcport, BFA_PORT_AEN_DISCONNECT);
+ bfa_sm_set_state(fcport, bfa_fcport_sm_faa_misconfig);
+ break;
+
+ default:
+ bfa_sm_fault(fcport->bfa, event);
+ }
+}
+
+static void
+bfa_fcport_sm_enabling(struct bfa_fcport_s *fcport,
+ enum bfa_fcport_sm_event event)
+{
+ char pwwn_buf[BFA_STRING_32];
+ struct bfad_s *bfad = (struct bfad_s *)fcport->bfa->bfad;
+ bfa_trc(fcport->bfa, event);
+
+ switch (event) {
+ case BFA_FCPORT_SM_FWRSP:
+ case BFA_FCPORT_SM_LINKDOWN:
+ bfa_sm_set_state(fcport, bfa_fcport_sm_linkdown);
+ break;
+
+ case BFA_FCPORT_SM_LINKUP:
+ bfa_fcport_update_linkinfo(fcport);
+ bfa_sm_set_state(fcport, bfa_fcport_sm_linkup);
+
+ WARN_ON(!fcport->event_cbfn);
+ bfa_fcport_scn(fcport, BFA_PORT_LINKUP, BFA_FALSE);
+ break;
+
+ case BFA_FCPORT_SM_ENABLE:
+ /*
+ * Already being enabled.
+ */
+ break;
+
+ case BFA_FCPORT_SM_DISABLE:
+ if (bfa_fcport_send_disable(fcport))
+ bfa_sm_set_state(fcport, bfa_fcport_sm_disabling);
+ else
+ bfa_sm_set_state(fcport,
+ bfa_fcport_sm_disabling_qwait);
+
+ bfa_plog_str(fcport->bfa->plog, BFA_PL_MID_HAL,
+ BFA_PL_EID_PORT_DISABLE, 0, "Port Disable");
+ wwn2str(pwwn_buf, fcport->pwwn);
+ BFA_LOG(KERN_INFO, bfad, bfa_log_level,
+ "Base port disabled: WWN = %s\n", pwwn_buf);
+ bfa_fcport_aen_post(fcport, BFA_PORT_AEN_DISABLE);
+ break;
+
+ case BFA_FCPORT_SM_STOP:
+ bfa_sm_set_state(fcport, bfa_fcport_sm_stopped);
+ break;
+
+ case BFA_FCPORT_SM_HWFAIL:
+ bfa_sm_set_state(fcport, bfa_fcport_sm_iocdown);
+ break;
+
+ case BFA_FCPORT_SM_FAA_MISCONFIG:
+ bfa_fcport_reset_linkinfo(fcport);
+ bfa_fcport_aen_post(fcport, BFA_PORT_AEN_DISCONNECT);
+ bfa_sm_set_state(fcport, bfa_fcport_sm_faa_misconfig);
+ break;
+
+ default:
+ bfa_sm_fault(fcport->bfa, event);
+ }
+}
+
+static void
+bfa_fcport_sm_linkdown(struct bfa_fcport_s *fcport,
+ enum bfa_fcport_sm_event event)
+{
+ struct bfi_fcport_event_s *pevent = fcport->event_arg.i2hmsg.event;
+ char pwwn_buf[BFA_STRING_32];
+ struct bfad_s *bfad = (struct bfad_s *)fcport->bfa->bfad;
+
+ bfa_trc(fcport->bfa, event);
+
+ switch (event) {
+ case BFA_FCPORT_SM_LINKUP:
+ bfa_fcport_update_linkinfo(fcport);
+ bfa_sm_set_state(fcport, bfa_fcport_sm_linkup);
+ WARN_ON(!fcport->event_cbfn);
+ bfa_plog_str(fcport->bfa->plog, BFA_PL_MID_HAL,
+ BFA_PL_EID_PORT_ST_CHANGE, 0, "Port Linkup");
+ if (!bfa_ioc_get_fcmode(&fcport->bfa->ioc)) {
+
+ bfa_trc(fcport->bfa,
+ pevent->link_state.attr.vc_fcf.fcf.fipenabled);
+ bfa_trc(fcport->bfa,
+ pevent->link_state.attr.vc_fcf.fcf.fipfailed);
+
+ if (pevent->link_state.attr.vc_fcf.fcf.fipfailed)
+ bfa_plog_str(fcport->bfa->plog, BFA_PL_MID_HAL,
+ BFA_PL_EID_FIP_FCF_DISC, 0,
+ "FIP FCF Discovery Failed");
+ else
+ bfa_plog_str(fcport->bfa->plog, BFA_PL_MID_HAL,
+ BFA_PL_EID_FIP_FCF_DISC, 0,
+ "FIP FCF Discovered");
+ }
+
+ bfa_fcport_scn(fcport, BFA_PORT_LINKUP, BFA_FALSE);
+ wwn2str(pwwn_buf, fcport->pwwn);
+ BFA_LOG(KERN_INFO, bfad, bfa_log_level,
+ "Base port online: WWN = %s\n", pwwn_buf);
+ bfa_fcport_aen_post(fcport, BFA_PORT_AEN_ONLINE);
+
+ /* If QoS is enabled and it is not online, send AEN */
+ if (fcport->cfg.qos_enabled &&
+ fcport->qos_attr.state != BFA_QOS_ONLINE)
+ bfa_fcport_aen_post(fcport, BFA_PORT_AEN_QOS_NEG);
+ break;
+
+ case BFA_FCPORT_SM_LINKDOWN:
+ /*
+ * Possible to get link down event.
+ */
+ break;
+
+ case BFA_FCPORT_SM_ENABLE:
+ /*
+ * Already enabled.
+ */
+ break;
+
+ case BFA_FCPORT_SM_DISABLE:
+ if (bfa_fcport_send_disable(fcport))
+ bfa_sm_set_state(fcport, bfa_fcport_sm_disabling);
+ else
+ bfa_sm_set_state(fcport,
+ bfa_fcport_sm_disabling_qwait);
+
+ bfa_plog_str(fcport->bfa->plog, BFA_PL_MID_HAL,
+ BFA_PL_EID_PORT_DISABLE, 0, "Port Disable");
+ wwn2str(pwwn_buf, fcport->pwwn);
+ BFA_LOG(KERN_INFO, bfad, bfa_log_level,
+ "Base port disabled: WWN = %s\n", pwwn_buf);
+ bfa_fcport_aen_post(fcport, BFA_PORT_AEN_DISABLE);
+ break;
+
+ case BFA_FCPORT_SM_STOP:
+ bfa_sm_set_state(fcport, bfa_fcport_sm_stopped);
+ break;
+
+ case BFA_FCPORT_SM_HWFAIL:
+ bfa_sm_set_state(fcport, bfa_fcport_sm_iocdown);
+ break;
+
+ case BFA_FCPORT_SM_FAA_MISCONFIG:
+ bfa_fcport_reset_linkinfo(fcport);
+ bfa_fcport_aen_post(fcport, BFA_PORT_AEN_DISCONNECT);
+ bfa_sm_set_state(fcport, bfa_fcport_sm_faa_misconfig);
+ break;
+
+ default:
+ bfa_sm_fault(fcport->bfa, event);
+ }
+}
+
+static void
+bfa_fcport_sm_linkup(struct bfa_fcport_s *fcport,
+ enum bfa_fcport_sm_event event)
+{
+ char pwwn_buf[BFA_STRING_32];
+ struct bfad_s *bfad = (struct bfad_s *)fcport->bfa->bfad;
+
+ bfa_trc(fcport->bfa, event);
+
+ switch (event) {
+ case BFA_FCPORT_SM_ENABLE:
+ /*
+ * Already enabled.
+ */
+ break;
+
+ case BFA_FCPORT_SM_DISABLE:
+ if (bfa_fcport_send_disable(fcport))
+ bfa_sm_set_state(fcport, bfa_fcport_sm_disabling);
+ else
+ bfa_sm_set_state(fcport,
+ bfa_fcport_sm_disabling_qwait);
+
+ bfa_fcport_reset_linkinfo(fcport);
+ bfa_fcport_scn(fcport, BFA_PORT_LINKDOWN, BFA_FALSE);
+ bfa_plog_str(fcport->bfa->plog, BFA_PL_MID_HAL,
+ BFA_PL_EID_PORT_DISABLE, 0, "Port Disable");
+ wwn2str(pwwn_buf, fcport->pwwn);
+ BFA_LOG(KERN_INFO, bfad, bfa_log_level,
+ "Base port offline: WWN = %s\n", pwwn_buf);
+ bfa_fcport_aen_post(fcport, BFA_PORT_AEN_OFFLINE);
+ BFA_LOG(KERN_INFO, bfad, bfa_log_level,
+ "Base port disabled: WWN = %s\n", pwwn_buf);
+ bfa_fcport_aen_post(fcport, BFA_PORT_AEN_DISABLE);
+ break;
+
+ case BFA_FCPORT_SM_LINKDOWN:
+ bfa_sm_set_state(fcport, bfa_fcport_sm_linkdown);
+ bfa_fcport_reset_linkinfo(fcport);
+ bfa_fcport_scn(fcport, BFA_PORT_LINKDOWN, BFA_FALSE);
+ bfa_plog_str(fcport->bfa->plog, BFA_PL_MID_HAL,
+ BFA_PL_EID_PORT_ST_CHANGE, 0, "Port Linkdown");
+ wwn2str(pwwn_buf, fcport->pwwn);
+ if (BFA_PORT_IS_DISABLED(fcport->bfa)) {
+ BFA_LOG(KERN_INFO, bfad, bfa_log_level,
+ "Base port offline: WWN = %s\n", pwwn_buf);
+ bfa_fcport_aen_post(fcport, BFA_PORT_AEN_OFFLINE);
+ } else {
+ BFA_LOG(KERN_ERR, bfad, bfa_log_level,
+ "Base port (WWN = %s) "
+ "lost fabric connectivity\n", pwwn_buf);
+ bfa_fcport_aen_post(fcport, BFA_PORT_AEN_DISCONNECT);
+ }
+ break;
+
+ case BFA_FCPORT_SM_STOP:
+ bfa_sm_set_state(fcport, bfa_fcport_sm_stopped);
+ bfa_fcport_reset_linkinfo(fcport);
+ wwn2str(pwwn_buf, fcport->pwwn);
+ if (BFA_PORT_IS_DISABLED(fcport->bfa)) {
+ BFA_LOG(KERN_INFO, bfad, bfa_log_level,
+ "Base port offline: WWN = %s\n", pwwn_buf);
+ bfa_fcport_aen_post(fcport, BFA_PORT_AEN_OFFLINE);
+ } else {
+ BFA_LOG(KERN_ERR, bfad, bfa_log_level,
+ "Base port (WWN = %s) "
+ "lost fabric connectivity\n", pwwn_buf);
+ bfa_fcport_aen_post(fcport, BFA_PORT_AEN_DISCONNECT);
+ }
+ break;
+
+ case BFA_FCPORT_SM_HWFAIL:
+ bfa_sm_set_state(fcport, bfa_fcport_sm_iocdown);
+ bfa_fcport_reset_linkinfo(fcport);
+ bfa_fcport_scn(fcport, BFA_PORT_LINKDOWN, BFA_FALSE);
+ wwn2str(pwwn_buf, fcport->pwwn);
+ if (BFA_PORT_IS_DISABLED(fcport->bfa)) {
+ BFA_LOG(KERN_INFO, bfad, bfa_log_level,
+ "Base port offline: WWN = %s\n", pwwn_buf);
+ bfa_fcport_aen_post(fcport, BFA_PORT_AEN_OFFLINE);
+ } else {
+ BFA_LOG(KERN_ERR, bfad, bfa_log_level,
+ "Base port (WWN = %s) "
+ "lost fabric connectivity\n", pwwn_buf);
+ bfa_fcport_aen_post(fcport, BFA_PORT_AEN_DISCONNECT);
+ }
+ break;
+
+ case BFA_FCPORT_SM_FAA_MISCONFIG:
+ bfa_fcport_reset_linkinfo(fcport);
+ bfa_fcport_aen_post(fcport, BFA_PORT_AEN_DISCONNECT);
+ bfa_sm_set_state(fcport, bfa_fcport_sm_faa_misconfig);
+ break;
+
+ default:
+ bfa_sm_fault(fcport->bfa, event);
+ }
+}
+
+static void
+bfa_fcport_sm_disabling_qwait(struct bfa_fcport_s *fcport,
+ enum bfa_fcport_sm_event event)
+{
+ bfa_trc(fcport->bfa, event);
+
+ switch (event) {
+ case BFA_FCPORT_SM_QRESUME:
+ bfa_sm_set_state(fcport, bfa_fcport_sm_disabling);
+ bfa_fcport_send_disable(fcport);
+ break;
+
+ case BFA_FCPORT_SM_STOP:
+ bfa_sm_set_state(fcport, bfa_fcport_sm_stopped);
+ bfa_reqq_wcancel(&fcport->reqq_wait);
+ break;
+
+ case BFA_FCPORT_SM_ENABLE:
+ bfa_sm_set_state(fcport, bfa_fcport_sm_toggling_qwait);
+ break;
+
+ case BFA_FCPORT_SM_DISABLE:
+ /*
+ * Already being disabled.
+ */
+ break;
+
+ case BFA_FCPORT_SM_LINKUP:
+ case BFA_FCPORT_SM_LINKDOWN:
+ /*
+ * Possible to get link events when doing back-to-back
+ * enable/disables.
+ */
+ break;
+
+ case BFA_FCPORT_SM_HWFAIL:
+ bfa_sm_set_state(fcport, bfa_fcport_sm_iocfail);
+ bfa_reqq_wcancel(&fcport->reqq_wait);
+ break;
+
+ case BFA_FCPORT_SM_FAA_MISCONFIG:
+ bfa_fcport_reset_linkinfo(fcport);
+ bfa_fcport_aen_post(fcport, BFA_PORT_AEN_DISCONNECT);
+ bfa_sm_set_state(fcport, bfa_fcport_sm_faa_misconfig);
+ break;
+
+ default:
+ bfa_sm_fault(fcport->bfa, event);
+ }
+}
+
+static void
+bfa_fcport_sm_toggling_qwait(struct bfa_fcport_s *fcport,
+ enum bfa_fcport_sm_event event)
+{
+ bfa_trc(fcport->bfa, event);
+
+ switch (event) {
+ case BFA_FCPORT_SM_QRESUME:
+ bfa_sm_set_state(fcport, bfa_fcport_sm_disabling);
+ bfa_fcport_send_disable(fcport);
+ if (bfa_fcport_send_enable(fcport))
+ bfa_sm_set_state(fcport, bfa_fcport_sm_enabling);
+ else
+ bfa_sm_set_state(fcport,
+ bfa_fcport_sm_enabling_qwait);
+ break;
+
+ case BFA_FCPORT_SM_STOP:
+ bfa_sm_set_state(fcport, bfa_fcport_sm_stopped);
+ bfa_reqq_wcancel(&fcport->reqq_wait);
+ break;
+
+ case BFA_FCPORT_SM_ENABLE:
+ break;
+
+ case BFA_FCPORT_SM_DISABLE:
+ bfa_sm_set_state(fcport, bfa_fcport_sm_disabling_qwait);
+ break;
+
+ case BFA_FCPORT_SM_LINKUP:
+ case BFA_FCPORT_SM_LINKDOWN:
+ /*
+ * Possible to get link events when doing back-to-back
+ * enable/disables.
+ */
+ break;
+
+ case BFA_FCPORT_SM_HWFAIL:
+ bfa_sm_set_state(fcport, bfa_fcport_sm_iocfail);
+ bfa_reqq_wcancel(&fcport->reqq_wait);
+ break;
+
+ default:
+ bfa_sm_fault(fcport->bfa, event);
+ }
+}
+
+static void
+bfa_fcport_sm_disabling(struct bfa_fcport_s *fcport,
+ enum bfa_fcport_sm_event event)
+{
+ char pwwn_buf[BFA_STRING_32];
+ struct bfad_s *bfad = (struct bfad_s *)fcport->bfa->bfad;
+ bfa_trc(fcport->bfa, event);
+
+ switch (event) {
+ case BFA_FCPORT_SM_FWRSP:
+ bfa_sm_set_state(fcport, bfa_fcport_sm_disabled);
+ break;
+
+ case BFA_FCPORT_SM_DISABLE:
+ /*
+ * Already being disabled.
+ */
+ break;
+
+ case BFA_FCPORT_SM_ENABLE:
+ if (bfa_fcport_send_enable(fcport))
+ bfa_sm_set_state(fcport, bfa_fcport_sm_enabling);
+ else
+ bfa_sm_set_state(fcport,
+ bfa_fcport_sm_enabling_qwait);
+
+ bfa_plog_str(fcport->bfa->plog, BFA_PL_MID_HAL,
+ BFA_PL_EID_PORT_ENABLE, 0, "Port Enable");
+ wwn2str(pwwn_buf, fcport->pwwn);
+ BFA_LOG(KERN_INFO, bfad, bfa_log_level,
+ "Base port enabled: WWN = %s\n", pwwn_buf);
+ bfa_fcport_aen_post(fcport, BFA_PORT_AEN_ENABLE);
+ break;
+
+ case BFA_FCPORT_SM_STOP:
+ bfa_sm_set_state(fcport, bfa_fcport_sm_stopped);
+ break;
+
+ case BFA_FCPORT_SM_LINKUP:
+ case BFA_FCPORT_SM_LINKDOWN:
+ /*
+ * Possible to get link events when doing back-to-back
+ * enable/disables.
+ */
+ break;
+
+ case BFA_FCPORT_SM_HWFAIL:
+ bfa_sm_set_state(fcport, bfa_fcport_sm_iocfail);
+ break;
+
+ default:
+ bfa_sm_fault(fcport->bfa, event);
+ }
+}
+
+static void
+bfa_fcport_sm_disabled(struct bfa_fcport_s *fcport,
+ enum bfa_fcport_sm_event event)
+{
+ char pwwn_buf[BFA_STRING_32];
+ struct bfad_s *bfad = (struct bfad_s *)fcport->bfa->bfad;
+ bfa_trc(fcport->bfa, event);
+
+ switch (event) {
+ case BFA_FCPORT_SM_START:
+ /*
+ * Ignore start event for a port that is disabled.
+ */
+ break;
+
+ case BFA_FCPORT_SM_STOP:
+ bfa_sm_set_state(fcport, bfa_fcport_sm_stopped);
+ break;
+
+ case BFA_FCPORT_SM_ENABLE:
+ if (bfa_fcport_send_enable(fcport))
+ bfa_sm_set_state(fcport, bfa_fcport_sm_enabling);
+ else
+ bfa_sm_set_state(fcport,
+ bfa_fcport_sm_enabling_qwait);
+
+ bfa_plog_str(fcport->bfa->plog, BFA_PL_MID_HAL,
+ BFA_PL_EID_PORT_ENABLE, 0, "Port Enable");
+ wwn2str(pwwn_buf, fcport->pwwn);
+ BFA_LOG(KERN_INFO, bfad, bfa_log_level,
+ "Base port enabled: WWN = %s\n", pwwn_buf);
+ bfa_fcport_aen_post(fcport, BFA_PORT_AEN_ENABLE);
+ break;
+
+ case BFA_FCPORT_SM_DISABLE:
+ /*
+ * Already disabled.
+ */
+ break;
+
+ case BFA_FCPORT_SM_HWFAIL:
+ bfa_sm_set_state(fcport, bfa_fcport_sm_iocfail);
+ break;
+
+ case BFA_FCPORT_SM_DPORTENABLE:
+ bfa_sm_set_state(fcport, bfa_fcport_sm_dport);
+ break;
+
+ case BFA_FCPORT_SM_DDPORTENABLE:
+ bfa_sm_set_state(fcport, bfa_fcport_sm_ddport);
+ break;
+
+ default:
+ bfa_sm_fault(fcport->bfa, event);
+ }
+}
+
+static void
+bfa_fcport_sm_stopped(struct bfa_fcport_s *fcport,
+ enum bfa_fcport_sm_event event)
+{
+ bfa_trc(fcport->bfa, event);
+
+ switch (event) {
+ case BFA_FCPORT_SM_START:
+ if (bfa_fcport_send_enable(fcport))
+ bfa_sm_set_state(fcport, bfa_fcport_sm_enabling);
+ else
+ bfa_sm_set_state(fcport,
+ bfa_fcport_sm_enabling_qwait);
+ break;
+
+ default:
+ /*
+ * Ignore all other events.
+ */
+ ;
+ }
+}
+
+/*
+ * Port is enabled. IOC is down/failed.
+ */
+static void
+bfa_fcport_sm_iocdown(struct bfa_fcport_s *fcport,
+ enum bfa_fcport_sm_event event)
+{
+ bfa_trc(fcport->bfa, event);
+
+ switch (event) {
+ case BFA_FCPORT_SM_START:
+ if (bfa_fcport_send_enable(fcport))
+ bfa_sm_set_state(fcport, bfa_fcport_sm_enabling);
+ else
+ bfa_sm_set_state(fcport,
+ bfa_fcport_sm_enabling_qwait);
+ break;
+
+ default:
+ /*
+ * Ignore all events.
+ */
+ ;
+ }
+}
+
+/*
+ * Port is disabled. IOC is down/failed.
+ */
+static void
+bfa_fcport_sm_iocfail(struct bfa_fcport_s *fcport,
+ enum bfa_fcport_sm_event event)
+{
+ bfa_trc(fcport->bfa, event);
+
+ switch (event) {
+ case BFA_FCPORT_SM_START:
+ bfa_sm_set_state(fcport, bfa_fcport_sm_disabled);
+ break;
+
+ case BFA_FCPORT_SM_ENABLE:
+ bfa_sm_set_state(fcport, bfa_fcport_sm_iocdown);
+ break;
+
+ default:
+ /*
+ * Ignore all events.
+ */
+ ;
+ }
+}
+
+static void
+bfa_fcport_sm_dport(struct bfa_fcport_s *fcport, enum bfa_fcport_sm_event event)
+{
+ bfa_trc(fcport->bfa, event);
+
+ switch (event) {
+ case BFA_FCPORT_SM_DPORTENABLE:
+ case BFA_FCPORT_SM_DISABLE:
+ case BFA_FCPORT_SM_ENABLE:
+ case BFA_FCPORT_SM_START:
+ /*
+ * Ignore event for a port that is dport
+ */
+ break;
+
+ case BFA_FCPORT_SM_STOP:
+ bfa_sm_set_state(fcport, bfa_fcport_sm_stopped);
+ break;
+
+ case BFA_FCPORT_SM_HWFAIL:
+ bfa_sm_set_state(fcport, bfa_fcport_sm_iocfail);
+ break;
+
+ case BFA_FCPORT_SM_DPORTDISABLE:
+ bfa_sm_set_state(fcport, bfa_fcport_sm_disabled);
+ break;
+
+ default:
+ bfa_sm_fault(fcport->bfa, event);
+ }
+}
+
+static void
+bfa_fcport_sm_ddport(struct bfa_fcport_s *fcport,
+ enum bfa_fcport_sm_event event)
+{
+ bfa_trc(fcport->bfa, event);
+
+ switch (event) {
+ case BFA_FCPORT_SM_DISABLE:
+ case BFA_FCPORT_SM_DDPORTDISABLE:
+ bfa_sm_set_state(fcport, bfa_fcport_sm_disabled);
+ break;
+
+ case BFA_FCPORT_SM_DPORTENABLE:
+ case BFA_FCPORT_SM_DPORTDISABLE:
+ case BFA_FCPORT_SM_ENABLE:
+ case BFA_FCPORT_SM_START:
+ /**
+ * Ignore event for a port that is ddport
+ */
+ break;
+
+ case BFA_FCPORT_SM_STOP:
+ bfa_sm_set_state(fcport, bfa_fcport_sm_stopped);
+ break;
+
+ case BFA_FCPORT_SM_HWFAIL:
+ bfa_sm_set_state(fcport, bfa_fcport_sm_iocfail);
+ break;
+
+ default:
+ bfa_sm_fault(fcport->bfa, event);
+ }
+}
+
+static void
+bfa_fcport_sm_faa_misconfig(struct bfa_fcport_s *fcport,
+ enum bfa_fcport_sm_event event)
+{
+ bfa_trc(fcport->bfa, event);
+
+ switch (event) {
+ case BFA_FCPORT_SM_DPORTENABLE:
+ case BFA_FCPORT_SM_ENABLE:
+ case BFA_FCPORT_SM_START:
+ /*
+ * Ignore event for a port as there is FAA misconfig
+ */
+ break;
+
+ case BFA_FCPORT_SM_DISABLE:
+ if (bfa_fcport_send_disable(fcport))
+ bfa_sm_set_state(fcport, bfa_fcport_sm_disabling);
+ else
+ bfa_sm_set_state(fcport, bfa_fcport_sm_disabling_qwait);
+
+ bfa_fcport_reset_linkinfo(fcport);
+ bfa_fcport_scn(fcport, BFA_PORT_LINKDOWN, BFA_FALSE);
+ bfa_plog_str(fcport->bfa->plog, BFA_PL_MID_HAL,
+ BFA_PL_EID_PORT_DISABLE, 0, "Port Disable");
+ bfa_fcport_aen_post(fcport, BFA_PORT_AEN_DISABLE);
+ break;
+
+ case BFA_FCPORT_SM_STOP:
+ bfa_sm_set_state(fcport, bfa_fcport_sm_stopped);
+ break;
+
+ case BFA_FCPORT_SM_HWFAIL:
+ bfa_fcport_reset_linkinfo(fcport);
+ bfa_fcport_scn(fcport, BFA_PORT_LINKDOWN, BFA_FALSE);
+ bfa_sm_set_state(fcport, bfa_fcport_sm_iocdown);
+ break;
+
+ default:
+ bfa_sm_fault(fcport->bfa, event);
+ }
+}
+
+/*
+ * Link state is down
+ */
+static void
+bfa_fcport_ln_sm_dn(struct bfa_fcport_ln_s *ln,
+ enum bfa_fcport_ln_sm_event event)
+{
+ bfa_trc(ln->fcport->bfa, event);
+
+ switch (event) {
+ case BFA_FCPORT_LN_SM_LINKUP:
+ bfa_sm_set_state(ln, bfa_fcport_ln_sm_up_nf);
+ bfa_fcport_queue_cb(ln, BFA_PORT_LINKUP);
+ break;
+
+ default:
+ bfa_sm_fault(ln->fcport->bfa, event);
+ }
+}
+
+/*
+ * Link state is waiting for down notification
+ */
+static void
+bfa_fcport_ln_sm_dn_nf(struct bfa_fcport_ln_s *ln,
+ enum bfa_fcport_ln_sm_event event)
+{
+ bfa_trc(ln->fcport->bfa, event);
+
+ switch (event) {
+ case BFA_FCPORT_LN_SM_LINKUP:
+ bfa_sm_set_state(ln, bfa_fcport_ln_sm_dn_up_nf);
+ break;
+
+ case BFA_FCPORT_LN_SM_NOTIFICATION:
+ bfa_sm_set_state(ln, bfa_fcport_ln_sm_dn);
+ break;
+
+ default:
+ bfa_sm_fault(ln->fcport->bfa, event);
+ }
+}
+
+/*
+ * Link state is waiting for down notification and there is a pending up
+ */
+static void
+bfa_fcport_ln_sm_dn_up_nf(struct bfa_fcport_ln_s *ln,
+ enum bfa_fcport_ln_sm_event event)
+{
+ bfa_trc(ln->fcport->bfa, event);
+
+ switch (event) {
+ case BFA_FCPORT_LN_SM_LINKDOWN:
+ bfa_sm_set_state(ln, bfa_fcport_ln_sm_dn_nf);
+ break;
+
+ case BFA_FCPORT_LN_SM_NOTIFICATION:
+ bfa_sm_set_state(ln, bfa_fcport_ln_sm_up_nf);
+ bfa_fcport_queue_cb(ln, BFA_PORT_LINKUP);
+ break;
+
+ default:
+ bfa_sm_fault(ln->fcport->bfa, event);
+ }
+}
+
+/*
+ * Link state is up
+ */
+static void
+bfa_fcport_ln_sm_up(struct bfa_fcport_ln_s *ln,
+ enum bfa_fcport_ln_sm_event event)
+{
+ bfa_trc(ln->fcport->bfa, event);
+
+ switch (event) {
+ case BFA_FCPORT_LN_SM_LINKDOWN:
+ bfa_sm_set_state(ln, bfa_fcport_ln_sm_dn_nf);
+ bfa_fcport_queue_cb(ln, BFA_PORT_LINKDOWN);
+ break;
+
+ default:
+ bfa_sm_fault(ln->fcport->bfa, event);
+ }
+}
+
+/*
+ * Link state is waiting for up notification
+ */
+static void
+bfa_fcport_ln_sm_up_nf(struct bfa_fcport_ln_s *ln,
+ enum bfa_fcport_ln_sm_event event)
+{
+ bfa_trc(ln->fcport->bfa, event);
+
+ switch (event) {
+ case BFA_FCPORT_LN_SM_LINKDOWN:
+ bfa_sm_set_state(ln, bfa_fcport_ln_sm_up_dn_nf);
+ break;
+
+ case BFA_FCPORT_LN_SM_NOTIFICATION:
+ bfa_sm_set_state(ln, bfa_fcport_ln_sm_up);
+ break;
+
+ default:
+ bfa_sm_fault(ln->fcport->bfa, event);
+ }
+}
+
+/*
+ * Link state is waiting for up notification and there is a pending down
+ */
+static void
+bfa_fcport_ln_sm_up_dn_nf(struct bfa_fcport_ln_s *ln,
+ enum bfa_fcport_ln_sm_event event)
+{
+ bfa_trc(ln->fcport->bfa, event);
+
+ switch (event) {
+ case BFA_FCPORT_LN_SM_LINKUP:
+ bfa_sm_set_state(ln, bfa_fcport_ln_sm_up_dn_up_nf);
+ break;
+
+ case BFA_FCPORT_LN_SM_NOTIFICATION:
+ bfa_sm_set_state(ln, bfa_fcport_ln_sm_dn_nf);
+ bfa_fcport_queue_cb(ln, BFA_PORT_LINKDOWN);
+ break;
+
+ default:
+ bfa_sm_fault(ln->fcport->bfa, event);
+ }
+}
+
+/*
+ * Link state is waiting for up notification and there are pending down and up
+ */
+static void
+bfa_fcport_ln_sm_up_dn_up_nf(struct bfa_fcport_ln_s *ln,
+ enum bfa_fcport_ln_sm_event event)
+{
+ bfa_trc(ln->fcport->bfa, event);
+
+ switch (event) {
+ case BFA_FCPORT_LN_SM_LINKDOWN:
+ bfa_sm_set_state(ln, bfa_fcport_ln_sm_up_dn_nf);
+ break;
+
+ case BFA_FCPORT_LN_SM_NOTIFICATION:
+ bfa_sm_set_state(ln, bfa_fcport_ln_sm_dn_up_nf);
+ bfa_fcport_queue_cb(ln, BFA_PORT_LINKDOWN);
+ break;
+
+ default:
+ bfa_sm_fault(ln->fcport->bfa, event);
+ }
+}
+
+static void
+__bfa_cb_fcport_event(void *cbarg, bfa_boolean_t complete)
+{
+ struct bfa_fcport_ln_s *ln = cbarg;
+
+ if (complete)
+ ln->fcport->event_cbfn(ln->fcport->event_cbarg, ln->ln_event);
+ else
+ bfa_sm_send_event(ln, BFA_FCPORT_LN_SM_NOTIFICATION);
+}
+
+/*
+ * Send SCN notification to upper layers.
+ * trunk - false if caller is fcport to ignore fcport event in trunked mode
+ */
+static void
+bfa_fcport_scn(struct bfa_fcport_s *fcport, enum bfa_port_linkstate event,
+ bfa_boolean_t trunk)
+{
+ if (fcport->cfg.trunked && !trunk)
+ return;
+
+ switch (event) {
+ case BFA_PORT_LINKUP:
+ bfa_sm_send_event(&fcport->ln, BFA_FCPORT_LN_SM_LINKUP);
+ break;
+ case BFA_PORT_LINKDOWN:
+ bfa_sm_send_event(&fcport->ln, BFA_FCPORT_LN_SM_LINKDOWN);
+ break;
+ default:
+ WARN_ON(1);
+ }
+}
+
+static void
+bfa_fcport_queue_cb(struct bfa_fcport_ln_s *ln, enum bfa_port_linkstate event)
+{
+ struct bfa_fcport_s *fcport = ln->fcport;
+
+ if (fcport->bfa->fcs) {
+ fcport->event_cbfn(fcport->event_cbarg, event);
+ bfa_sm_send_event(ln, BFA_FCPORT_LN_SM_NOTIFICATION);
+ } else {
+ ln->ln_event = event;
+ bfa_cb_queue(fcport->bfa, &ln->ln_qe,
+ __bfa_cb_fcport_event, ln);
+ }
+}
+
+#define FCPORT_STATS_DMA_SZ (BFA_ROUNDUP(sizeof(union bfa_fcport_stats_u), \
+ BFA_CACHELINE_SZ))
+
+static void
+bfa_fcport_meminfo(struct bfa_iocfc_cfg_s *cfg, struct bfa_meminfo_s *minfo,
+ struct bfa_s *bfa)
+{
+ struct bfa_mem_dma_s *fcport_dma = BFA_MEM_FCPORT_DMA(bfa);
+
+ bfa_mem_dma_setup(minfo, fcport_dma, FCPORT_STATS_DMA_SZ);
+}
+
+static void
+bfa_fcport_qresume(void *cbarg)
+{
+ struct bfa_fcport_s *fcport = cbarg;
+
+ bfa_sm_send_event(fcport, BFA_FCPORT_SM_QRESUME);
+}
+
+static void
+bfa_fcport_mem_claim(struct bfa_fcport_s *fcport)
+{
+ struct bfa_mem_dma_s *fcport_dma = &fcport->fcport_dma;
+
+ fcport->stats_kva = bfa_mem_dma_virt(fcport_dma);
+ fcport->stats_pa = bfa_mem_dma_phys(fcport_dma);
+ fcport->stats = (union bfa_fcport_stats_u *)
+ bfa_mem_dma_virt(fcport_dma);
+}
+
+/*
+ * Memory initialization.
+ */
+static void
+bfa_fcport_attach(struct bfa_s *bfa, void *bfad, struct bfa_iocfc_cfg_s *cfg,
+ struct bfa_pcidev_s *pcidev)
+{
+ struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
+ struct bfa_port_cfg_s *port_cfg = &fcport->cfg;
+ struct bfa_fcport_ln_s *ln = &fcport->ln;
+ struct timeval tv;
+
+ fcport->bfa = bfa;
+ ln->fcport = fcport;
+
+ bfa_fcport_mem_claim(fcport);
+
+ bfa_sm_set_state(fcport, bfa_fcport_sm_uninit);
+ bfa_sm_set_state(ln, bfa_fcport_ln_sm_dn);
+
+ /*
+ * initialize time stamp for stats reset
+ */
+ do_gettimeofday(&tv);
+ fcport->stats_reset_time = tv.tv_sec;
+ fcport->stats_dma_ready = BFA_FALSE;
+
+ /*
+ * initialize and set default configuration
+ */
+ port_cfg->topology = BFA_PORT_TOPOLOGY_P2P;
+ port_cfg->speed = BFA_PORT_SPEED_AUTO;
+ port_cfg->trunked = BFA_FALSE;
+ port_cfg->maxfrsize = 0;
+
+ port_cfg->trl_def_speed = BFA_PORT_SPEED_1GBPS;
+ port_cfg->qos_bw.high = BFA_QOS_BW_HIGH;
+ port_cfg->qos_bw.med = BFA_QOS_BW_MED;
+ port_cfg->qos_bw.low = BFA_QOS_BW_LOW;
+
+ fcport->fec_state = BFA_FEC_OFFLINE;
+
+ INIT_LIST_HEAD(&fcport->stats_pending_q);
+ INIT_LIST_HEAD(&fcport->statsclr_pending_q);
+
+ bfa_reqq_winit(&fcport->reqq_wait, bfa_fcport_qresume, fcport);
+}
+
+static void
+bfa_fcport_detach(struct bfa_s *bfa)
+{
+}
+
+/*
+ * Called when IOC is ready.
+ */
+static void
+bfa_fcport_start(struct bfa_s *bfa)
+{
+ bfa_sm_send_event(BFA_FCPORT_MOD(bfa), BFA_FCPORT_SM_START);
+}
+
+/*
+ * Called before IOC is stopped.
+ */
+static void
+bfa_fcport_stop(struct bfa_s *bfa)
+{
+ bfa_sm_send_event(BFA_FCPORT_MOD(bfa), BFA_FCPORT_SM_STOP);
+ bfa_trunk_iocdisable(bfa);
+}
+
+/*
+ * Called when IOC failure is detected.
+ */
+static void
+bfa_fcport_iocdisable(struct bfa_s *bfa)
+{
+ struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
+
+ bfa_sm_send_event(fcport, BFA_FCPORT_SM_HWFAIL);
+ bfa_trunk_iocdisable(bfa);
+}
+
+/*
+ * Update loop info in fcport for SCN online
+ */
+static void
+bfa_fcport_update_loop_info(struct bfa_fcport_s *fcport,
+ struct bfa_fcport_loop_info_s *loop_info)
+{
+ fcport->myalpa = loop_info->myalpa;
+ fcport->alpabm_valid =
+ loop_info->alpabm_val;
+ memcpy(fcport->alpabm.alpa_bm,
+ loop_info->alpabm.alpa_bm,
+ sizeof(struct fc_alpabm_s));
+}
+
+static void
+bfa_fcport_update_linkinfo(struct bfa_fcport_s *fcport)
+{
+ struct bfi_fcport_event_s *pevent = fcport->event_arg.i2hmsg.event;
+ struct bfa_fcport_trunk_s *trunk = &fcport->trunk;
+
+ fcport->speed = pevent->link_state.speed;
+ fcport->topology = pevent->link_state.topology;
+
+ if (fcport->topology == BFA_PORT_TOPOLOGY_LOOP) {
+ bfa_fcport_update_loop_info(fcport,
+ &pevent->link_state.attr.loop_info);
+ return;
+ }
+
+ /* QoS Details */
+ fcport->qos_attr = pevent->link_state.qos_attr;
+ fcport->qos_vc_attr = pevent->link_state.attr.vc_fcf.qos_vc_attr;
+
+ if (fcport->cfg.bb_cr_enabled)
+ fcport->bbcr_attr = pevent->link_state.attr.bbcr_attr;
+
+ fcport->fec_state = pevent->link_state.fec_state;
+
+ /*
+ * update trunk state if applicable
+ */
+ if (!fcport->cfg.trunked)
+ trunk->attr.state = BFA_TRUNK_DISABLED;
+
+ /* update FCoE specific */
+ fcport->fcoe_vlan =
+ be16_to_cpu(pevent->link_state.attr.vc_fcf.fcf.vlan);
+
+ bfa_trc(fcport->bfa, fcport->speed);
+ bfa_trc(fcport->bfa, fcport->topology);
+}
+
+static void
+bfa_fcport_reset_linkinfo(struct bfa_fcport_s *fcport)
+{
+ fcport->speed = BFA_PORT_SPEED_UNKNOWN;
+ fcport->topology = BFA_PORT_TOPOLOGY_NONE;
+ fcport->fec_state = BFA_FEC_OFFLINE;
+}
+
+/*
+ * Send port enable message to firmware.
+ */
+static bfa_boolean_t
+bfa_fcport_send_enable(struct bfa_fcport_s *fcport)
+{
+ struct bfi_fcport_enable_req_s *m;
+
+ /*
+ * Increment message tag before queue check, so that responses to old
+ * requests are discarded.
+ */
+ fcport->msgtag++;
+
+ /*
+ * check for room in queue to send request now
+ */
+ m = bfa_reqq_next(fcport->bfa, BFA_REQQ_PORT);
+ if (!m) {
+ bfa_reqq_wait(fcport->bfa, BFA_REQQ_PORT,
+ &fcport->reqq_wait);
+ return BFA_FALSE;
+ }
+
+ bfi_h2i_set(m->mh, BFI_MC_FCPORT, BFI_FCPORT_H2I_ENABLE_REQ,
+ bfa_fn_lpu(fcport->bfa));
+ m->nwwn = fcport->nwwn;
+ m->pwwn = fcport->pwwn;
+ m->port_cfg = fcport->cfg;
+ m->msgtag = fcport->msgtag;
+ m->port_cfg.maxfrsize = cpu_to_be16(fcport->cfg.maxfrsize);
+ m->use_flash_cfg = fcport->use_flash_cfg;
+ bfa_dma_be_addr_set(m->stats_dma_addr, fcport->stats_pa);
+ bfa_trc(fcport->bfa, m->stats_dma_addr.a32.addr_lo);
+ bfa_trc(fcport->bfa, m->stats_dma_addr.a32.addr_hi);
+
+ /*
+ * queue I/O message to firmware
+ */
+ bfa_reqq_produce(fcport->bfa, BFA_REQQ_PORT, m->mh);
+ return BFA_TRUE;
+}
+
+/*
+ * Send port disable message to firmware.
+ */
+static bfa_boolean_t
+bfa_fcport_send_disable(struct bfa_fcport_s *fcport)
+{
+ struct bfi_fcport_req_s *m;
+
+ /*
+ * Increment message tag before queue check, so that responses to old
+ * requests are discarded.
+ */
+ fcport->msgtag++;
+
+ /*
+ * check for room in queue to send request now
+ */
+ m = bfa_reqq_next(fcport->bfa, BFA_REQQ_PORT);
+ if (!m) {
+ bfa_reqq_wait(fcport->bfa, BFA_REQQ_PORT,
+ &fcport->reqq_wait);
+ return BFA_FALSE;
+ }
+
+ bfi_h2i_set(m->mh, BFI_MC_FCPORT, BFI_FCPORT_H2I_DISABLE_REQ,
+ bfa_fn_lpu(fcport->bfa));
+ m->msgtag = fcport->msgtag;
+
+ /*
+ * queue I/O message to firmware
+ */
+ bfa_reqq_produce(fcport->bfa, BFA_REQQ_PORT, m->mh);
+
+ return BFA_TRUE;
+}
+
+static void
+bfa_fcport_set_wwns(struct bfa_fcport_s *fcport)
+{
+ fcport->pwwn = fcport->bfa->ioc.attr->pwwn;
+ fcport->nwwn = fcport->bfa->ioc.attr->nwwn;
+
+ bfa_trc(fcport->bfa, fcport->pwwn);
+ bfa_trc(fcport->bfa, fcport->nwwn);
+}
+
+static void
+bfa_fcport_qos_stats_swap(struct bfa_qos_stats_s *d,
+ struct bfa_qos_stats_s *s)
+{
+ u32 *dip = (u32 *) d;
+ __be32 *sip = (__be32 *) s;
+ int i;
+
+ /* Now swap the 32 bit fields */
+ for (i = 0; i < (sizeof(struct bfa_qos_stats_s)/sizeof(u32)); ++i)
+ dip[i] = be32_to_cpu(sip[i]);
+}
+
+static void
+bfa_fcport_fcoe_stats_swap(struct bfa_fcoe_stats_s *d,
+ struct bfa_fcoe_stats_s *s)
+{
+ u32 *dip = (u32 *) d;
+ __be32 *sip = (__be32 *) s;
+ int i;
+
+ for (i = 0; i < ((sizeof(struct bfa_fcoe_stats_s))/sizeof(u32));
+ i = i + 2) {
+#ifdef __BIG_ENDIAN
+ dip[i] = be32_to_cpu(sip[i]);
+ dip[i + 1] = be32_to_cpu(sip[i + 1]);
+#else
+ dip[i] = be32_to_cpu(sip[i + 1]);
+ dip[i + 1] = be32_to_cpu(sip[i]);
+#endif
+ }
+}
+
+static void
+__bfa_cb_fcport_stats_get(void *cbarg, bfa_boolean_t complete)
+{
+ struct bfa_fcport_s *fcport = (struct bfa_fcport_s *)cbarg;
+ struct bfa_cb_pending_q_s *cb;
+ struct list_head *qe, *qen;
+ union bfa_fcport_stats_u *ret;
+
+ if (complete) {
+ struct timeval tv;
+ if (fcport->stats_status == BFA_STATUS_OK)
+ do_gettimeofday(&tv);
+
+ list_for_each_safe(qe, qen, &fcport->stats_pending_q) {
+ bfa_q_deq(&fcport->stats_pending_q, &qe);
+ cb = (struct bfa_cb_pending_q_s *)qe;
+ if (fcport->stats_status == BFA_STATUS_OK) {
+ ret = (union bfa_fcport_stats_u *)cb->data;
+ /* Swap FC QoS or FCoE stats */
+ if (bfa_ioc_get_fcmode(&fcport->bfa->ioc))
+ bfa_fcport_qos_stats_swap(&ret->fcqos,
+ &fcport->stats->fcqos);
+ else {
+ bfa_fcport_fcoe_stats_swap(&ret->fcoe,
+ &fcport->stats->fcoe);
+ ret->fcoe.secs_reset =
+ tv.tv_sec - fcport->stats_reset_time;
+ }
+ }
+ bfa_cb_queue_status(fcport->bfa, &cb->hcb_qe,
+ fcport->stats_status);
+ }
+ fcport->stats_status = BFA_STATUS_OK;
+ } else {
+ INIT_LIST_HEAD(&fcport->stats_pending_q);
+ fcport->stats_status = BFA_STATUS_OK;
+ }
+}
+
+static void
+bfa_fcport_stats_get_timeout(void *cbarg)
+{
+ struct bfa_fcport_s *fcport = (struct bfa_fcport_s *) cbarg;
+
+ bfa_trc(fcport->bfa, fcport->stats_qfull);
+
+ if (fcport->stats_qfull) {
+ bfa_reqq_wcancel(&fcport->stats_reqq_wait);
+ fcport->stats_qfull = BFA_FALSE;
+ }
+
+ fcport->stats_status = BFA_STATUS_ETIMER;
+ __bfa_cb_fcport_stats_get(fcport, BFA_TRUE);
+}
+
+static void
+bfa_fcport_send_stats_get(void *cbarg)
+{
+ struct bfa_fcport_s *fcport = (struct bfa_fcport_s *) cbarg;
+ struct bfi_fcport_req_s *msg;
+
+ msg = bfa_reqq_next(fcport->bfa, BFA_REQQ_PORT);
+
+ if (!msg) {
+ fcport->stats_qfull = BFA_TRUE;
+ bfa_reqq_winit(&fcport->stats_reqq_wait,
+ bfa_fcport_send_stats_get, fcport);
+ bfa_reqq_wait(fcport->bfa, BFA_REQQ_PORT,
+ &fcport->stats_reqq_wait);
+ return;
+ }
+ fcport->stats_qfull = BFA_FALSE;
+
+ memset(msg, 0, sizeof(struct bfi_fcport_req_s));
+ bfi_h2i_set(msg->mh, BFI_MC_FCPORT, BFI_FCPORT_H2I_STATS_GET_REQ,
+ bfa_fn_lpu(fcport->bfa));
+ bfa_reqq_produce(fcport->bfa, BFA_REQQ_PORT, msg->mh);
+}
+
+static void
+__bfa_cb_fcport_stats_clr(void *cbarg, bfa_boolean_t complete)
+{
+ struct bfa_fcport_s *fcport = (struct bfa_fcport_s *) cbarg;
+ struct bfa_cb_pending_q_s *cb;
+ struct list_head *qe, *qen;
+
+ if (complete) {
+ struct timeval tv;
+
+ /*
+ * re-initialize time stamp for stats reset
+ */
+ do_gettimeofday(&tv);
+ fcport->stats_reset_time = tv.tv_sec;
+ list_for_each_safe(qe, qen, &fcport->statsclr_pending_q) {
+ bfa_q_deq(&fcport->statsclr_pending_q, &qe);
+ cb = (struct bfa_cb_pending_q_s *)qe;
+ bfa_cb_queue_status(fcport->bfa, &cb->hcb_qe,
+ fcport->stats_status);
+ }
+ fcport->stats_status = BFA_STATUS_OK;
+ } else {
+ INIT_LIST_HEAD(&fcport->statsclr_pending_q);
+ fcport->stats_status = BFA_STATUS_OK;
+ }
+}
+
+static void
+bfa_fcport_stats_clr_timeout(void *cbarg)
+{
+ struct bfa_fcport_s *fcport = (struct bfa_fcport_s *) cbarg;
+
+ bfa_trc(fcport->bfa, fcport->stats_qfull);
+
+ if (fcport->stats_qfull) {
+ bfa_reqq_wcancel(&fcport->stats_reqq_wait);
+ fcport->stats_qfull = BFA_FALSE;
+ }
+
+ fcport->stats_status = BFA_STATUS_ETIMER;
+ __bfa_cb_fcport_stats_clr(fcport, BFA_TRUE);
+}
+
+static void
+bfa_fcport_send_stats_clear(void *cbarg)
+{
+ struct bfa_fcport_s *fcport = (struct bfa_fcport_s *) cbarg;
+ struct bfi_fcport_req_s *msg;
+
+ msg = bfa_reqq_next(fcport->bfa, BFA_REQQ_PORT);
+
+ if (!msg) {
+ fcport->stats_qfull = BFA_TRUE;
+ bfa_reqq_winit(&fcport->stats_reqq_wait,
+ bfa_fcport_send_stats_clear, fcport);
+ bfa_reqq_wait(fcport->bfa, BFA_REQQ_PORT,
+ &fcport->stats_reqq_wait);
+ return;
+ }
+ fcport->stats_qfull = BFA_FALSE;
+
+ memset(msg, 0, sizeof(struct bfi_fcport_req_s));
+ bfi_h2i_set(msg->mh, BFI_MC_FCPORT, BFI_FCPORT_H2I_STATS_CLEAR_REQ,
+ bfa_fn_lpu(fcport->bfa));
+ bfa_reqq_produce(fcport->bfa, BFA_REQQ_PORT, msg->mh);
+}
+
+/*
+ * Handle trunk SCN event from firmware.
+ */
+static void
+bfa_trunk_scn(struct bfa_fcport_s *fcport, struct bfi_fcport_trunk_scn_s *scn)
+{
+ struct bfa_fcport_trunk_s *trunk = &fcport->trunk;
+ struct bfi_fcport_trunk_link_s *tlink;
+ struct bfa_trunk_link_attr_s *lattr;
+ enum bfa_trunk_state state_prev;
+ int i;
+ int link_bm = 0;
+
+ bfa_trc(fcport->bfa, fcport->cfg.trunked);
+ WARN_ON(scn->trunk_state != BFA_TRUNK_ONLINE &&
+ scn->trunk_state != BFA_TRUNK_OFFLINE);
+
+ bfa_trc(fcport->bfa, trunk->attr.state);
+ bfa_trc(fcport->bfa, scn->trunk_state);
+ bfa_trc(fcport->bfa, scn->trunk_speed);
+
+ /*
+ * Save off new state for trunk attribute query
+ */
+ state_prev = trunk->attr.state;
+ if (fcport->cfg.trunked && (trunk->attr.state != BFA_TRUNK_DISABLED))
+ trunk->attr.state = scn->trunk_state;
+ trunk->attr.speed = scn->trunk_speed;
+ for (i = 0; i < BFA_TRUNK_MAX_PORTS; i++) {
+ lattr = &trunk->attr.link_attr[i];
+ tlink = &scn->tlink[i];
+
+ lattr->link_state = tlink->state;
+ lattr->trunk_wwn = tlink->trunk_wwn;
+ lattr->fctl = tlink->fctl;
+ lattr->speed = tlink->speed;
+ lattr->deskew = be32_to_cpu(tlink->deskew);
+
+ if (tlink->state == BFA_TRUNK_LINK_STATE_UP) {
+ fcport->speed = tlink->speed;
+ fcport->topology = BFA_PORT_TOPOLOGY_P2P;
+ link_bm |= 1 << i;
+ }
+
+ bfa_trc(fcport->bfa, lattr->link_state);
+ bfa_trc(fcport->bfa, lattr->trunk_wwn);
+ bfa_trc(fcport->bfa, lattr->fctl);
+ bfa_trc(fcport->bfa, lattr->speed);
+ bfa_trc(fcport->bfa, lattr->deskew);
+ }
+
+ switch (link_bm) {
+ case 3:
+ bfa_plog_str(fcport->bfa->plog, BFA_PL_MID_HAL,
+ BFA_PL_EID_TRUNK_SCN, 0, "Trunk up(0,1)");
+ break;
+ case 2:
+ bfa_plog_str(fcport->bfa->plog, BFA_PL_MID_HAL,
+ BFA_PL_EID_TRUNK_SCN, 0, "Trunk up(-,1)");
+ break;
+ case 1:
+ bfa_plog_str(fcport->bfa->plog, BFA_PL_MID_HAL,
+ BFA_PL_EID_TRUNK_SCN, 0, "Trunk up(0,-)");
+ break;
+ default:
+ bfa_plog_str(fcport->bfa->plog, BFA_PL_MID_HAL,
+ BFA_PL_EID_TRUNK_SCN, 0, "Trunk down");
+ }
+
+ /*
+ * Notify upper layers if trunk state changed.
+ */
+ if ((state_prev != trunk->attr.state) ||
+ (scn->trunk_state == BFA_TRUNK_OFFLINE)) {
+ bfa_fcport_scn(fcport, (scn->trunk_state == BFA_TRUNK_ONLINE) ?
+ BFA_PORT_LINKUP : BFA_PORT_LINKDOWN, BFA_TRUE);
+ }
+}
+
+static void
+bfa_trunk_iocdisable(struct bfa_s *bfa)
+{
+ struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
+ int i = 0;
+
+ /*
+ * In trunked mode, notify upper layers that link is down
+ */
+ if (fcport->cfg.trunked) {
+ if (fcport->trunk.attr.state == BFA_TRUNK_ONLINE)
+ bfa_fcport_scn(fcport, BFA_PORT_LINKDOWN, BFA_TRUE);
+
+ fcport->trunk.attr.state = BFA_TRUNK_OFFLINE;
+ fcport->trunk.attr.speed = BFA_PORT_SPEED_UNKNOWN;
+ for (i = 0; i < BFA_TRUNK_MAX_PORTS; i++) {
+ fcport->trunk.attr.link_attr[i].trunk_wwn = 0;
+ fcport->trunk.attr.link_attr[i].fctl =
+ BFA_TRUNK_LINK_FCTL_NORMAL;
+ fcport->trunk.attr.link_attr[i].link_state =
+ BFA_TRUNK_LINK_STATE_DN_LINKDN;
+ fcport->trunk.attr.link_attr[i].speed =
+ BFA_PORT_SPEED_UNKNOWN;
+ fcport->trunk.attr.link_attr[i].deskew = 0;
+ }
+ }
+}
+
+/*
+ * Called to initialize port attributes
+ */
+void
+bfa_fcport_init(struct bfa_s *bfa)
+{
+ struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
+
+ /*
+ * Initialize port attributes from IOC hardware data.
+ */
+ bfa_fcport_set_wwns(fcport);
+ if (fcport->cfg.maxfrsize == 0)
+ fcport->cfg.maxfrsize = bfa_ioc_maxfrsize(&bfa->ioc);
+ fcport->cfg.rx_bbcredit = bfa_ioc_rx_bbcredit(&bfa->ioc);
+ fcport->speed_sup = bfa_ioc_speed_sup(&bfa->ioc);
+
+ if (bfa_fcport_is_pbcdisabled(bfa))
+ bfa->modules.port.pbc_disabled = BFA_TRUE;
+
+ WARN_ON(!fcport->cfg.maxfrsize);
+ WARN_ON(!fcport->cfg.rx_bbcredit);
+ WARN_ON(!fcport->speed_sup);
+}
+
+/*
+ * Firmware message handler.
+ */
+void
+bfa_fcport_isr(struct bfa_s *bfa, struct bfi_msg_s *msg)
+{
+ struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
+ union bfi_fcport_i2h_msg_u i2hmsg;
+
+ i2hmsg.msg = msg;
+ fcport->event_arg.i2hmsg = i2hmsg;
+
+ bfa_trc(bfa, msg->mhdr.msg_id);
+ bfa_trc(bfa, bfa_sm_to_state(hal_port_sm_table, fcport->sm));
+
+ switch (msg->mhdr.msg_id) {
+ case BFI_FCPORT_I2H_ENABLE_RSP:
+ if (fcport->msgtag == i2hmsg.penable_rsp->msgtag) {
+
+ fcport->stats_dma_ready = BFA_TRUE;
+ if (fcport->use_flash_cfg) {
+ fcport->cfg = i2hmsg.penable_rsp->port_cfg;
+ fcport->cfg.maxfrsize =
+ cpu_to_be16(fcport->cfg.maxfrsize);
+ fcport->cfg.path_tov =
+ cpu_to_be16(fcport->cfg.path_tov);
+ fcport->cfg.q_depth =
+ cpu_to_be16(fcport->cfg.q_depth);
+
+ if (fcport->cfg.trunked)
+ fcport->trunk.attr.state =
+ BFA_TRUNK_OFFLINE;
+ else
+ fcport->trunk.attr.state =
+ BFA_TRUNK_DISABLED;
+ fcport->qos_attr.qos_bw =
+ i2hmsg.penable_rsp->port_cfg.qos_bw;
+ fcport->use_flash_cfg = BFA_FALSE;
+ }
+
+ if (fcport->cfg.qos_enabled)
+ fcport->qos_attr.state = BFA_QOS_OFFLINE;
+ else
+ fcport->qos_attr.state = BFA_QOS_DISABLED;
+
+ fcport->qos_attr.qos_bw_op =
+ i2hmsg.penable_rsp->port_cfg.qos_bw;
+
+ if (fcport->cfg.bb_cr_enabled)
+ fcport->bbcr_attr.state = BFA_BBCR_OFFLINE;
+ else
+ fcport->bbcr_attr.state = BFA_BBCR_DISABLED;
+
+ bfa_sm_send_event(fcport, BFA_FCPORT_SM_FWRSP);
+ }
+ break;
+
+ case BFI_FCPORT_I2H_DISABLE_RSP:
+ if (fcport->msgtag == i2hmsg.penable_rsp->msgtag)
+ bfa_sm_send_event(fcport, BFA_FCPORT_SM_FWRSP);
+ break;
+
+ case BFI_FCPORT_I2H_EVENT:
+ if (fcport->cfg.bb_cr_enabled)
+ fcport->bbcr_attr.state = BFA_BBCR_OFFLINE;
+ else
+ fcport->bbcr_attr.state = BFA_BBCR_DISABLED;
+
+ if (i2hmsg.event->link_state.linkstate == BFA_PORT_LINKUP)
+ bfa_sm_send_event(fcport, BFA_FCPORT_SM_LINKUP);
+ else {
+ if (i2hmsg.event->link_state.linkstate_rsn ==
+ BFA_PORT_LINKSTATE_RSN_FAA_MISCONFIG)
+ bfa_sm_send_event(fcport,
+ BFA_FCPORT_SM_FAA_MISCONFIG);
+ else
+ bfa_sm_send_event(fcport,
+ BFA_FCPORT_SM_LINKDOWN);
+ }
+ fcport->qos_attr.qos_bw_op =
+ i2hmsg.event->link_state.qos_attr.qos_bw_op;
+ break;
+
+ case BFI_FCPORT_I2H_TRUNK_SCN:
+ bfa_trunk_scn(fcport, i2hmsg.trunk_scn);
+ break;
+
+ case BFI_FCPORT_I2H_STATS_GET_RSP:
+ /*
+ * check for timer pop before processing the rsp
+ */
+ if (list_empty(&fcport->stats_pending_q) ||
+ (fcport->stats_status == BFA_STATUS_ETIMER))
+ break;
+
+ bfa_timer_stop(&fcport->timer);
+ fcport->stats_status = i2hmsg.pstatsget_rsp->status;
+ __bfa_cb_fcport_stats_get(fcport, BFA_TRUE);
+ break;
+
+ case BFI_FCPORT_I2H_STATS_CLEAR_RSP:
+ /*
+ * check for timer pop before processing the rsp
+ */
+ if (list_empty(&fcport->statsclr_pending_q) ||
+ (fcport->stats_status == BFA_STATUS_ETIMER))
+ break;
+
+ bfa_timer_stop(&fcport->timer);
+ fcport->stats_status = BFA_STATUS_OK;
+ __bfa_cb_fcport_stats_clr(fcport, BFA_TRUE);
+ break;
+
+ case BFI_FCPORT_I2H_ENABLE_AEN:
+ bfa_sm_send_event(fcport, BFA_FCPORT_SM_ENABLE);
+ break;
+
+ case BFI_FCPORT_I2H_DISABLE_AEN:
+ bfa_sm_send_event(fcport, BFA_FCPORT_SM_DISABLE);
+ break;
+
+ default:
+ WARN_ON(1);
+ break;
+ }
+}
+
+/*
+ * Registered callback for port events.
+ */
+void
+bfa_fcport_event_register(struct bfa_s *bfa,
+ void (*cbfn) (void *cbarg,
+ enum bfa_port_linkstate event),
+ void *cbarg)
+{
+ struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
+
+ fcport->event_cbfn = cbfn;
+ fcport->event_cbarg = cbarg;
+}
+
+bfa_status_t
+bfa_fcport_enable(struct bfa_s *bfa)
+{
+ struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
+
+ if (bfa_fcport_is_pbcdisabled(bfa))
+ return BFA_STATUS_PBC;
+
+ if (bfa_ioc_is_disabled(&bfa->ioc))
+ return BFA_STATUS_IOC_DISABLED;
+
+ if (fcport->diag_busy)
+ return BFA_STATUS_DIAG_BUSY;
+
+ bfa_sm_send_event(BFA_FCPORT_MOD(bfa), BFA_FCPORT_SM_ENABLE);
+ return BFA_STATUS_OK;
+}
+
+bfa_status_t
+bfa_fcport_disable(struct bfa_s *bfa)
+{
+ if (bfa_fcport_is_pbcdisabled(bfa))
+ return BFA_STATUS_PBC;
+
+ if (bfa_ioc_is_disabled(&bfa->ioc))
+ return BFA_STATUS_IOC_DISABLED;
+
+ bfa_sm_send_event(BFA_FCPORT_MOD(bfa), BFA_FCPORT_SM_DISABLE);
+ return BFA_STATUS_OK;
+}
+
+/* If PBC is disabled on port, return error */
+bfa_status_t
+bfa_fcport_is_pbcdisabled(struct bfa_s *bfa)
+{
+ struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
+ struct bfa_iocfc_s *iocfc = &bfa->iocfc;
+ struct bfi_iocfc_cfgrsp_s *cfgrsp = iocfc->cfgrsp;
+
+ if (cfgrsp->pbc_cfg.port_enabled == BFI_PBC_PORT_DISABLED) {
+ bfa_trc(bfa, fcport->pwwn);
+ return BFA_STATUS_PBC;
+ }
+ return BFA_STATUS_OK;
+}
+
+/*
+ * Configure port speed.
+ */
+bfa_status_t
+bfa_fcport_cfg_speed(struct bfa_s *bfa, enum bfa_port_speed speed)
+{
+ struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
+
+ bfa_trc(bfa, speed);
+
+ if (fcport->cfg.trunked == BFA_TRUE)
+ return BFA_STATUS_TRUNK_ENABLED;
+ if ((fcport->cfg.topology == BFA_PORT_TOPOLOGY_LOOP) &&
+ (speed == BFA_PORT_SPEED_16GBPS))
+ return BFA_STATUS_UNSUPP_SPEED;
+ if ((speed != BFA_PORT_SPEED_AUTO) && (speed > fcport->speed_sup)) {
+ bfa_trc(bfa, fcport->speed_sup);
+ return BFA_STATUS_UNSUPP_SPEED;
+ }
+
+ /* Port speed entered needs to be checked */
+ if (bfa_ioc_get_type(&fcport->bfa->ioc) == BFA_IOC_TYPE_FC) {
+ /* For CT2, 1G is not supported */
+ if ((speed == BFA_PORT_SPEED_1GBPS) &&
+ (bfa_asic_id_ct2(bfa->ioc.pcidev.device_id)))
+ return BFA_STATUS_UNSUPP_SPEED;
+
+ /* Already checked for Auto Speed and Max Speed supp */
+ if (!(speed == BFA_PORT_SPEED_1GBPS ||
+ speed == BFA_PORT_SPEED_2GBPS ||
+ speed == BFA_PORT_SPEED_4GBPS ||
+ speed == BFA_PORT_SPEED_8GBPS ||
+ speed == BFA_PORT_SPEED_16GBPS ||
+ speed == BFA_PORT_SPEED_AUTO))
+ return BFA_STATUS_UNSUPP_SPEED;
+ } else {
+ if (speed != BFA_PORT_SPEED_10GBPS)
+ return BFA_STATUS_UNSUPP_SPEED;
+ }
+
+ fcport->cfg.speed = speed;
+
+ return BFA_STATUS_OK;
+}
+
+/*
+ * Get current speed.
+ */
+enum bfa_port_speed
+bfa_fcport_get_speed(struct bfa_s *bfa)
+{
+ struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
+
+ return fcport->speed;
+}
+
+/*
+ * Configure port topology.
+ */
+bfa_status_t
+bfa_fcport_cfg_topology(struct bfa_s *bfa, enum bfa_port_topology topology)
+{
+ struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
+
+ bfa_trc(bfa, topology);
+ bfa_trc(bfa, fcport->cfg.topology);
+
+ switch (topology) {
+ case BFA_PORT_TOPOLOGY_P2P:
+ break;
+
+ case BFA_PORT_TOPOLOGY_LOOP:
+ if ((bfa_fcport_is_qos_enabled(bfa) != BFA_FALSE) ||
+ (fcport->qos_attr.state != BFA_QOS_DISABLED))
+ return BFA_STATUS_ERROR_QOS_ENABLED;
+ if (fcport->cfg.ratelimit != BFA_FALSE)
+ return BFA_STATUS_ERROR_TRL_ENABLED;
+ if ((bfa_fcport_is_trunk_enabled(bfa) != BFA_FALSE) ||
+ (fcport->trunk.attr.state != BFA_TRUNK_DISABLED))
+ return BFA_STATUS_ERROR_TRUNK_ENABLED;
+ if ((bfa_fcport_get_speed(bfa) == BFA_PORT_SPEED_16GBPS) ||
+ (fcport->cfg.speed == BFA_PORT_SPEED_16GBPS))
+ return BFA_STATUS_UNSUPP_SPEED;
+ if (bfa_mfg_is_mezz(bfa->ioc.attr->card_type))
+ return BFA_STATUS_LOOP_UNSUPP_MEZZ;
+ if (bfa_fcport_is_dport(bfa) != BFA_FALSE)
+ return BFA_STATUS_DPORT_ERR;
+ if (bfa_fcport_is_ddport(bfa) != BFA_FALSE)
+ return BFA_STATUS_DPORT_ERR;
+ break;
+
+ case BFA_PORT_TOPOLOGY_AUTO:
+ break;
+
+ default:
+ return BFA_STATUS_EINVAL;
+ }
+
+ fcport->cfg.topology = topology;
+ return BFA_STATUS_OK;
+}
+
+/*
+ * Get current topology.
+ */
+enum bfa_port_topology
+bfa_fcport_get_topology(struct bfa_s *bfa)
+{
+ struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
+
+ return fcport->topology;
+}
+
+/**
+ * Get config topology.
+ */
+enum bfa_port_topology
+bfa_fcport_get_cfg_topology(struct bfa_s *bfa)
+{
+ struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
+
+ return fcport->cfg.topology;
+}
+
+bfa_status_t
+bfa_fcport_cfg_hardalpa(struct bfa_s *bfa, u8 alpa)
+{
+ struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
+
+ bfa_trc(bfa, alpa);
+ bfa_trc(bfa, fcport->cfg.cfg_hardalpa);
+ bfa_trc(bfa, fcport->cfg.hardalpa);
+
+ fcport->cfg.cfg_hardalpa = BFA_TRUE;
+ fcport->cfg.hardalpa = alpa;
+
+ return BFA_STATUS_OK;
+}
+
+bfa_status_t
+bfa_fcport_clr_hardalpa(struct bfa_s *bfa)
+{
+ struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
+
+ bfa_trc(bfa, fcport->cfg.cfg_hardalpa);
+ bfa_trc(bfa, fcport->cfg.hardalpa);
+
+ fcport->cfg.cfg_hardalpa = BFA_FALSE;
+ return BFA_STATUS_OK;
+}
+
+bfa_boolean_t
+bfa_fcport_get_hardalpa(struct bfa_s *bfa, u8 *alpa)
+{
+ struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
+
+ *alpa = fcport->cfg.hardalpa;
+ return fcport->cfg.cfg_hardalpa;
+}
+
+u8
+bfa_fcport_get_myalpa(struct bfa_s *bfa)
+{
+ struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
+
+ return fcport->myalpa;
+}
+
+bfa_status_t
+bfa_fcport_cfg_maxfrsize(struct bfa_s *bfa, u16 maxfrsize)
+{
+ struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
+
+ bfa_trc(bfa, maxfrsize);
+ bfa_trc(bfa, fcport->cfg.maxfrsize);
+
+ /* with in range */
+ if ((maxfrsize > FC_MAX_PDUSZ) || (maxfrsize < FC_MIN_PDUSZ))
+ return BFA_STATUS_INVLD_DFSZ;
+
+ /* power of 2, if not the max frame size of 2112 */
+ if ((maxfrsize != FC_MAX_PDUSZ) && (maxfrsize & (maxfrsize - 1)))
+ return BFA_STATUS_INVLD_DFSZ;
+
+ fcport->cfg.maxfrsize = maxfrsize;
+ return BFA_STATUS_OK;
+}
+
+u16
+bfa_fcport_get_maxfrsize(struct bfa_s *bfa)
+{
+ struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
+
+ return fcport->cfg.maxfrsize;
+}
+
+u8
+bfa_fcport_get_rx_bbcredit(struct bfa_s *bfa)
+{
+ if (bfa_fcport_get_topology(bfa) != BFA_PORT_TOPOLOGY_LOOP)
+ return (BFA_FCPORT_MOD(bfa))->cfg.rx_bbcredit;
+
+ else
+ return 0;
+}
+
+void
+bfa_fcport_set_tx_bbcredit(struct bfa_s *bfa, u16 tx_bbcredit)
+{
+ struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
+
+ fcport->cfg.tx_bbcredit = (u8)tx_bbcredit;
+}
+
+/*
+ * Get port attributes.
+ */
+
+wwn_t
+bfa_fcport_get_wwn(struct bfa_s *bfa, bfa_boolean_t node)
+{
+ struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
+ if (node)
+ return fcport->nwwn;
+ else
+ return fcport->pwwn;
+}
+
+void
+bfa_fcport_get_attr(struct bfa_s *bfa, struct bfa_port_attr_s *attr)
+{
+ struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
+
+ memset(attr, 0, sizeof(struct bfa_port_attr_s));
+
+ attr->nwwn = fcport->nwwn;
+ attr->pwwn = fcport->pwwn;
+
+ attr->factorypwwn = bfa->ioc.attr->mfg_pwwn;
+ attr->factorynwwn = bfa->ioc.attr->mfg_nwwn;
+
+ memcpy(&attr->pport_cfg, &fcport->cfg,
+ sizeof(struct bfa_port_cfg_s));
+ /* speed attributes */
+ attr->pport_cfg.speed = fcport->cfg.speed;
+ attr->speed_supported = fcport->speed_sup;
+ attr->speed = fcport->speed;
+ attr->cos_supported = FC_CLASS_3;
+
+ /* topology attributes */
+ attr->pport_cfg.topology = fcport->cfg.topology;
+ attr->topology = fcport->topology;
+ attr->pport_cfg.trunked = fcport->cfg.trunked;
+
+ /* beacon attributes */
+ attr->beacon = fcport->beacon;
+ attr->link_e2e_beacon = fcport->link_e2e_beacon;
+
+ attr->pport_cfg.path_tov = bfa_fcpim_path_tov_get(bfa);
+ attr->pport_cfg.q_depth = bfa_fcpim_qdepth_get(bfa);
+ attr->port_state = bfa_sm_to_state(hal_port_sm_table, fcport->sm);
+
+ attr->fec_state = fcport->fec_state;
+
+ /* PBC Disabled State */
+ if (bfa_fcport_is_pbcdisabled(bfa))
+ attr->port_state = BFA_PORT_ST_PREBOOT_DISABLED;
+ else {
+ if (bfa_ioc_is_disabled(&fcport->bfa->ioc))
+ attr->port_state = BFA_PORT_ST_IOCDIS;
+ else if (bfa_ioc_fw_mismatch(&fcport->bfa->ioc))
+ attr->port_state = BFA_PORT_ST_FWMISMATCH;
+ }
+
+ /* FCoE vlan */
+ attr->fcoe_vlan = fcport->fcoe_vlan;
+}
+
+#define BFA_FCPORT_STATS_TOV 1000
+
+/*
+ * Fetch port statistics (FCQoS or FCoE).
+ */
+bfa_status_t
+bfa_fcport_get_stats(struct bfa_s *bfa, struct bfa_cb_pending_q_s *cb)
+{
+ struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
+
+ if (!bfa_iocfc_is_operational(bfa) ||
+ !fcport->stats_dma_ready)
+ return BFA_STATUS_IOC_NON_OP;
+
+ if (!list_empty(&fcport->statsclr_pending_q))
+ return BFA_STATUS_DEVBUSY;
+
+ if (list_empty(&fcport->stats_pending_q)) {
+ list_add_tail(&cb->hcb_qe.qe, &fcport->stats_pending_q);
+ bfa_fcport_send_stats_get(fcport);
+ bfa_timer_start(bfa, &fcport->timer,
+ bfa_fcport_stats_get_timeout,
+ fcport, BFA_FCPORT_STATS_TOV);
+ } else
+ list_add_tail(&cb->hcb_qe.qe, &fcport->stats_pending_q);
+
+ return BFA_STATUS_OK;
+}
+
+/*
+ * Reset port statistics (FCQoS or FCoE).
+ */
+bfa_status_t
+bfa_fcport_clear_stats(struct bfa_s *bfa, struct bfa_cb_pending_q_s *cb)
+{
+ struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
+
+ if (!bfa_iocfc_is_operational(bfa) ||
+ !fcport->stats_dma_ready)
+ return BFA_STATUS_IOC_NON_OP;
+
+ if (!list_empty(&fcport->stats_pending_q))
+ return BFA_STATUS_DEVBUSY;
+
+ if (list_empty(&fcport->statsclr_pending_q)) {
+ list_add_tail(&cb->hcb_qe.qe, &fcport->statsclr_pending_q);
+ bfa_fcport_send_stats_clear(fcport);
+ bfa_timer_start(bfa, &fcport->timer,
+ bfa_fcport_stats_clr_timeout,
+ fcport, BFA_FCPORT_STATS_TOV);
+ } else
+ list_add_tail(&cb->hcb_qe.qe, &fcport->statsclr_pending_q);
+
+ return BFA_STATUS_OK;
+}
+
+/*
+ * Fetch port attributes.
+ */
+bfa_boolean_t
+bfa_fcport_is_disabled(struct bfa_s *bfa)
+{
+ struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
+
+ return bfa_sm_to_state(hal_port_sm_table, fcport->sm) ==
+ BFA_PORT_ST_DISABLED;
+
+}
+
+bfa_boolean_t
+bfa_fcport_is_dport(struct bfa_s *bfa)
+{
+ struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
+
+ return (bfa_sm_to_state(hal_port_sm_table, fcport->sm) ==
+ BFA_PORT_ST_DPORT);
+}
+
+bfa_boolean_t
+bfa_fcport_is_ddport(struct bfa_s *bfa)
+{
+ struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
+
+ return (bfa_sm_to_state(hal_port_sm_table, fcport->sm) ==
+ BFA_PORT_ST_DDPORT);
+}
+
+bfa_status_t
+bfa_fcport_set_qos_bw(struct bfa_s *bfa, struct bfa_qos_bw_s *qos_bw)
+{
+ struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
+ enum bfa_ioc_type_e ioc_type = bfa_get_type(bfa);
+
+ bfa_trc(bfa, ioc_type);
+
+ if ((qos_bw->high == 0) || (qos_bw->med == 0) || (qos_bw->low == 0))
+ return BFA_STATUS_QOS_BW_INVALID;
+
+ if ((qos_bw->high + qos_bw->med + qos_bw->low) != 100)
+ return BFA_STATUS_QOS_BW_INVALID;
+
+ if ((qos_bw->med > qos_bw->high) || (qos_bw->low > qos_bw->med) ||
+ (qos_bw->low > qos_bw->high))
+ return BFA_STATUS_QOS_BW_INVALID;
+
+ if ((ioc_type == BFA_IOC_TYPE_FC) &&
+ (fcport->cfg.topology != BFA_PORT_TOPOLOGY_LOOP))
+ fcport->cfg.qos_bw = *qos_bw;
+
+ return BFA_STATUS_OK;
+}
+
+bfa_boolean_t
+bfa_fcport_is_ratelim(struct bfa_s *bfa)
+{
+ struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
+
+ return fcport->cfg.ratelimit ? BFA_TRUE : BFA_FALSE;
+
+}
+
+/*
+ * Enable/Disable FAA feature in port config
+ */
+void
+bfa_fcport_cfg_faa(struct bfa_s *bfa, u8 state)
+{
+ struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
+
+ bfa_trc(bfa, state);
+ fcport->cfg.faa_state = state;
+}
+
+/*
+ * Get default minimum ratelim speed
+ */
+enum bfa_port_speed
+bfa_fcport_get_ratelim_speed(struct bfa_s *bfa)
+{
+ struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
+
+ bfa_trc(bfa, fcport->cfg.trl_def_speed);
+ return fcport->cfg.trl_def_speed;
+
+}
+
+void
+bfa_fcport_beacon(void *dev, bfa_boolean_t beacon,
+ bfa_boolean_t link_e2e_beacon)
+{
+ struct bfa_s *bfa = dev;
+ struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
+
+ bfa_trc(bfa, beacon);
+ bfa_trc(bfa, link_e2e_beacon);
+ bfa_trc(bfa, fcport->beacon);
+ bfa_trc(bfa, fcport->link_e2e_beacon);
+
+ fcport->beacon = beacon;
+ fcport->link_e2e_beacon = link_e2e_beacon;
+}
+
+bfa_boolean_t
+bfa_fcport_is_linkup(struct bfa_s *bfa)
+{
+ struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
+
+ return (!fcport->cfg.trunked &&
+ bfa_sm_cmp_state(fcport, bfa_fcport_sm_linkup)) ||
+ (fcport->cfg.trunked &&
+ fcport->trunk.attr.state == BFA_TRUNK_ONLINE);
+}
+
+bfa_boolean_t
+bfa_fcport_is_qos_enabled(struct bfa_s *bfa)
+{
+ struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
+
+ return fcport->cfg.qos_enabled;
+}
+
+bfa_boolean_t
+bfa_fcport_is_trunk_enabled(struct bfa_s *bfa)
+{
+ struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
+
+ return fcport->cfg.trunked;
+}
+
+bfa_status_t
+bfa_fcport_cfg_bbcr(struct bfa_s *bfa, bfa_boolean_t on_off, u8 bb_scn)
+{
+ struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
+
+ bfa_trc(bfa, on_off);
+
+ if (bfa_ioc_get_type(&fcport->bfa->ioc) != BFA_IOC_TYPE_FC)
+ return BFA_STATUS_BBCR_FC_ONLY;
+
+ if (bfa_mfg_is_mezz(bfa->ioc.attr->card_type) &&
+ (bfa->ioc.attr->card_type != BFA_MFG_TYPE_CHINOOK))
+ return BFA_STATUS_CMD_NOTSUPP_MEZZ;
+
+ if (on_off) {
+ if (fcport->cfg.topology == BFA_PORT_TOPOLOGY_LOOP)
+ return BFA_STATUS_TOPOLOGY_LOOP;
+
+ if (fcport->cfg.qos_enabled)
+ return BFA_STATUS_ERROR_QOS_ENABLED;
+
+ if (fcport->cfg.trunked)
+ return BFA_STATUS_TRUNK_ENABLED;
+
+ if ((fcport->cfg.speed != BFA_PORT_SPEED_AUTO) &&
+ (fcport->cfg.speed < bfa_ioc_speed_sup(&bfa->ioc)))
+ return BFA_STATUS_ERR_BBCR_SPEED_UNSUPPORT;
+
+ if (bfa_ioc_speed_sup(&bfa->ioc) < BFA_PORT_SPEED_8GBPS)
+ return BFA_STATUS_FEATURE_NOT_SUPPORTED;
+
+ if (fcport->cfg.bb_cr_enabled) {
+ if (bb_scn != fcport->cfg.bb_scn)
+ return BFA_STATUS_BBCR_CFG_NO_CHANGE;
+ else
+ return BFA_STATUS_NO_CHANGE;
+ }
+
+ if ((bb_scn == 0) || (bb_scn > BFA_BB_SCN_MAX))
+ bb_scn = BFA_BB_SCN_DEF;
+
+ fcport->cfg.bb_cr_enabled = on_off;
+ fcport->cfg.bb_scn = bb_scn;
+ } else {
+ if (!fcport->cfg.bb_cr_enabled)
+ return BFA_STATUS_NO_CHANGE;
+
+ fcport->cfg.bb_cr_enabled = on_off;
+ fcport->cfg.bb_scn = 0;
+ }
+
+ return BFA_STATUS_OK;
+}
+
+bfa_status_t
+bfa_fcport_get_bbcr_attr(struct bfa_s *bfa,
+ struct bfa_bbcr_attr_s *bbcr_attr)
+{
+ struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
+
+ if (bfa_ioc_get_type(&fcport->bfa->ioc) != BFA_IOC_TYPE_FC)
+ return BFA_STATUS_BBCR_FC_ONLY;
+
+ if (fcport->cfg.topology == BFA_PORT_TOPOLOGY_LOOP)
+ return BFA_STATUS_TOPOLOGY_LOOP;
+
+ *bbcr_attr = fcport->bbcr_attr;
+
+ return BFA_STATUS_OK;
+}
+
+void
+bfa_fcport_dportenable(struct bfa_s *bfa)
+{
+ /*
+ * Assume caller check for port is in disable state
+ */
+ bfa_sm_send_event(BFA_FCPORT_MOD(bfa), BFA_FCPORT_SM_DPORTENABLE);
+ bfa_port_set_dportenabled(&bfa->modules.port, BFA_TRUE);
+}
+
+void
+bfa_fcport_dportdisable(struct bfa_s *bfa)
+{
+ /*
+ * Assume caller check for port is in disable state
+ */
+ bfa_sm_send_event(BFA_FCPORT_MOD(bfa), BFA_FCPORT_SM_DPORTDISABLE);
+ bfa_port_set_dportenabled(&bfa->modules.port, BFA_FALSE);
+}
+
+void
+bfa_fcport_ddportenable(struct bfa_s *bfa)
+{
+ /*
+ * Assume caller check for port is in disable state
+ */
+ bfa_sm_send_event(BFA_FCPORT_MOD(bfa), BFA_FCPORT_SM_DDPORTENABLE);
+}
+
+void
+bfa_fcport_ddportdisable(struct bfa_s *bfa)
+{
+ /*
+ * Assume caller check for port is in disable state
+ */
+ bfa_sm_send_event(BFA_FCPORT_MOD(bfa), BFA_FCPORT_SM_DDPORTDISABLE);
+}
+
+/*
+ * Rport State machine functions
+ */
+/*
+ * Beginning state, only online event expected.
+ */
+static void
+bfa_rport_sm_uninit(struct bfa_rport_s *rp, enum bfa_rport_event event)
+{
+ bfa_trc(rp->bfa, rp->rport_tag);
+ bfa_trc(rp->bfa, event);
+
+ switch (event) {
+ case BFA_RPORT_SM_CREATE:
+ bfa_stats(rp, sm_un_cr);
+ bfa_sm_set_state(rp, bfa_rport_sm_created);
+ break;
+
+ default:
+ bfa_stats(rp, sm_un_unexp);
+ bfa_sm_fault(rp->bfa, event);
+ }
+}
+
+static void
+bfa_rport_sm_created(struct bfa_rport_s *rp, enum bfa_rport_event event)
+{
+ bfa_trc(rp->bfa, rp->rport_tag);
+ bfa_trc(rp->bfa, event);
+
+ switch (event) {
+ case BFA_RPORT_SM_ONLINE:
+ bfa_stats(rp, sm_cr_on);
+ if (bfa_rport_send_fwcreate(rp))
+ bfa_sm_set_state(rp, bfa_rport_sm_fwcreate);
+ else
+ bfa_sm_set_state(rp, bfa_rport_sm_fwcreate_qfull);
+ break;
+
+ case BFA_RPORT_SM_DELETE:
+ bfa_stats(rp, sm_cr_del);
+ bfa_sm_set_state(rp, bfa_rport_sm_uninit);
+ bfa_rport_free(rp);
+ break;
+
+ case BFA_RPORT_SM_HWFAIL:
+ bfa_stats(rp, sm_cr_hwf);
+ bfa_sm_set_state(rp, bfa_rport_sm_iocdisable);
+ break;
+
+ default:
+ bfa_stats(rp, sm_cr_unexp);
+ bfa_sm_fault(rp->bfa, event);
+ }
+}
+
+/*
+ * Waiting for rport create response from firmware.
+ */
+static void
+bfa_rport_sm_fwcreate(struct bfa_rport_s *rp, enum bfa_rport_event event)
+{
+ bfa_trc(rp->bfa, rp->rport_tag);
+ bfa_trc(rp->bfa, event);
+
+ switch (event) {
+ case BFA_RPORT_SM_FWRSP:
+ bfa_stats(rp, sm_fwc_rsp);
+ bfa_sm_set_state(rp, bfa_rport_sm_online);
+ bfa_rport_online_cb(rp);
+ break;
+
+ case BFA_RPORT_SM_DELETE:
+ bfa_stats(rp, sm_fwc_del);
+ bfa_sm_set_state(rp, bfa_rport_sm_delete_pending);
+ break;
+
+ case BFA_RPORT_SM_OFFLINE:
+ bfa_stats(rp, sm_fwc_off);
+ bfa_sm_set_state(rp, bfa_rport_sm_offline_pending);
+ break;
+
+ case BFA_RPORT_SM_HWFAIL:
+ bfa_stats(rp, sm_fwc_hwf);
+ bfa_sm_set_state(rp, bfa_rport_sm_iocdisable);
+ break;
+
+ default:
+ bfa_stats(rp, sm_fwc_unexp);
+ bfa_sm_fault(rp->bfa, event);
+ }
+}
+
+/*
+ * Request queue is full, awaiting queue resume to send create request.
+ */
+static void
+bfa_rport_sm_fwcreate_qfull(struct bfa_rport_s *rp, enum bfa_rport_event event)
+{
+ bfa_trc(rp->bfa, rp->rport_tag);
+ bfa_trc(rp->bfa, event);
+
+ switch (event) {
+ case BFA_RPORT_SM_QRESUME:
+ bfa_sm_set_state(rp, bfa_rport_sm_fwcreate);
+ bfa_rport_send_fwcreate(rp);
+ break;
+
+ case BFA_RPORT_SM_DELETE:
+ bfa_stats(rp, sm_fwc_del);
+ bfa_sm_set_state(rp, bfa_rport_sm_uninit);
+ bfa_reqq_wcancel(&rp->reqq_wait);
+ bfa_rport_free(rp);
+ break;
+
+ case BFA_RPORT_SM_OFFLINE:
+ bfa_stats(rp, sm_fwc_off);
+ bfa_sm_set_state(rp, bfa_rport_sm_offline);
+ bfa_reqq_wcancel(&rp->reqq_wait);
+ bfa_rport_offline_cb(rp);
+ break;
+
+ case BFA_RPORT_SM_HWFAIL:
+ bfa_stats(rp, sm_fwc_hwf);
+ bfa_sm_set_state(rp, bfa_rport_sm_iocdisable);
+ bfa_reqq_wcancel(&rp->reqq_wait);
+ break;
+
+ default:
+ bfa_stats(rp, sm_fwc_unexp);
+ bfa_sm_fault(rp->bfa, event);
+ }
+}
+
+/*
+ * Online state - normal parking state.
+ */
+static void
+bfa_rport_sm_online(struct bfa_rport_s *rp, enum bfa_rport_event event)
+{
+ struct bfi_rport_qos_scn_s *qos_scn;
+
+ bfa_trc(rp->bfa, rp->rport_tag);
+ bfa_trc(rp->bfa, event);
+
+ switch (event) {
+ case BFA_RPORT_SM_OFFLINE:
+ bfa_stats(rp, sm_on_off);
+ if (bfa_rport_send_fwdelete(rp))
+ bfa_sm_set_state(rp, bfa_rport_sm_fwdelete);
+ else
+ bfa_sm_set_state(rp, bfa_rport_sm_fwdelete_qfull);
+ break;
+
+ case BFA_RPORT_SM_DELETE:
+ bfa_stats(rp, sm_on_del);
+ if (bfa_rport_send_fwdelete(rp))
+ bfa_sm_set_state(rp, bfa_rport_sm_deleting);
+ else
+ bfa_sm_set_state(rp, bfa_rport_sm_deleting_qfull);
+ break;
+
+ case BFA_RPORT_SM_HWFAIL:
+ bfa_stats(rp, sm_on_hwf);
+ bfa_sm_set_state(rp, bfa_rport_sm_iocdisable);
+ break;
+
+ case BFA_RPORT_SM_SET_SPEED:
+ bfa_rport_send_fwspeed(rp);
+ break;
+
+ case BFA_RPORT_SM_QOS_SCN:
+ qos_scn = (struct bfi_rport_qos_scn_s *) rp->event_arg.fw_msg;
+ rp->qos_attr = qos_scn->new_qos_attr;
+ bfa_trc(rp->bfa, qos_scn->old_qos_attr.qos_flow_id);
+ bfa_trc(rp->bfa, qos_scn->new_qos_attr.qos_flow_id);
+ bfa_trc(rp->bfa, qos_scn->old_qos_attr.qos_priority);
+ bfa_trc(rp->bfa, qos_scn->new_qos_attr.qos_priority);
+
+ qos_scn->old_qos_attr.qos_flow_id =
+ be32_to_cpu(qos_scn->old_qos_attr.qos_flow_id);
+ qos_scn->new_qos_attr.qos_flow_id =
+ be32_to_cpu(qos_scn->new_qos_attr.qos_flow_id);
+
+ if (qos_scn->old_qos_attr.qos_flow_id !=
+ qos_scn->new_qos_attr.qos_flow_id)
+ bfa_cb_rport_qos_scn_flowid(rp->rport_drv,
+ qos_scn->old_qos_attr,
+ qos_scn->new_qos_attr);
+ if (qos_scn->old_qos_attr.qos_priority !=
+ qos_scn->new_qos_attr.qos_priority)
+ bfa_cb_rport_qos_scn_prio(rp->rport_drv,
+ qos_scn->old_qos_attr,
+ qos_scn->new_qos_attr);
+ break;
+
+ default:
+ bfa_stats(rp, sm_on_unexp);
+ bfa_sm_fault(rp->bfa, event);
+ }
+}
+
+/*
+ * Firmware rport is being deleted - awaiting f/w response.
+ */
+static void
+bfa_rport_sm_fwdelete(struct bfa_rport_s *rp, enum bfa_rport_event event)
+{
+ bfa_trc(rp->bfa, rp->rport_tag);
+ bfa_trc(rp->bfa, event);
+
+ switch (event) {
+ case BFA_RPORT_SM_FWRSP:
+ bfa_stats(rp, sm_fwd_rsp);
+ bfa_sm_set_state(rp, bfa_rport_sm_offline);
+ bfa_rport_offline_cb(rp);
+ break;
+
+ case BFA_RPORT_SM_DELETE:
+ bfa_stats(rp, sm_fwd_del);
+ bfa_sm_set_state(rp, bfa_rport_sm_deleting);
+ break;
+
+ case BFA_RPORT_SM_HWFAIL:
+ bfa_stats(rp, sm_fwd_hwf);
+ bfa_sm_set_state(rp, bfa_rport_sm_iocdisable);
+ bfa_rport_offline_cb(rp);
+ break;
+
+ default:
+ bfa_stats(rp, sm_fwd_unexp);
+ bfa_sm_fault(rp->bfa, event);
+ }
+}
+
+static void
+bfa_rport_sm_fwdelete_qfull(struct bfa_rport_s *rp, enum bfa_rport_event event)
+{
+ bfa_trc(rp->bfa, rp->rport_tag);
+ bfa_trc(rp->bfa, event);
+
+ switch (event) {
+ case BFA_RPORT_SM_QRESUME:
+ bfa_sm_set_state(rp, bfa_rport_sm_fwdelete);
+ bfa_rport_send_fwdelete(rp);
+ break;
+
+ case BFA_RPORT_SM_DELETE:
+ bfa_stats(rp, sm_fwd_del);
+ bfa_sm_set_state(rp, bfa_rport_sm_deleting_qfull);
+ break;
+
+ case BFA_RPORT_SM_HWFAIL:
+ bfa_stats(rp, sm_fwd_hwf);
+ bfa_sm_set_state(rp, bfa_rport_sm_iocdisable);
+ bfa_reqq_wcancel(&rp->reqq_wait);
+ bfa_rport_offline_cb(rp);
+ break;
+
+ default:
+ bfa_stats(rp, sm_fwd_unexp);
+ bfa_sm_fault(rp->bfa, event);
+ }
+}
+
+/*
+ * Offline state.
+ */
+static void
+bfa_rport_sm_offline(struct bfa_rport_s *rp, enum bfa_rport_event event)
+{
+ bfa_trc(rp->bfa, rp->rport_tag);
+ bfa_trc(rp->bfa, event);
+
+ switch (event) {
+ case BFA_RPORT_SM_DELETE:
+ bfa_stats(rp, sm_off_del);
+ bfa_sm_set_state(rp, bfa_rport_sm_uninit);
+ bfa_rport_free(rp);
+ break;
+
+ case BFA_RPORT_SM_ONLINE:
+ bfa_stats(rp, sm_off_on);
+ if (bfa_rport_send_fwcreate(rp))
+ bfa_sm_set_state(rp, bfa_rport_sm_fwcreate);
+ else
+ bfa_sm_set_state(rp, bfa_rport_sm_fwcreate_qfull);
+ break;
+
+ case BFA_RPORT_SM_HWFAIL:
+ bfa_stats(rp, sm_off_hwf);
+ bfa_sm_set_state(rp, bfa_rport_sm_iocdisable);
+ break;
+
+ case BFA_RPORT_SM_OFFLINE:
+ bfa_rport_offline_cb(rp);
+ break;
+
+ default:
+ bfa_stats(rp, sm_off_unexp);
+ bfa_sm_fault(rp->bfa, event);
+ }
+}
+
+/*
+ * Rport is deleted, waiting for firmware response to delete.
+ */
+static void
+bfa_rport_sm_deleting(struct bfa_rport_s *rp, enum bfa_rport_event event)
+{
+ bfa_trc(rp->bfa, rp->rport_tag);
+ bfa_trc(rp->bfa, event);
+
+ switch (event) {
+ case BFA_RPORT_SM_FWRSP:
+ bfa_stats(rp, sm_del_fwrsp);
+ bfa_sm_set_state(rp, bfa_rport_sm_uninit);
+ bfa_rport_free(rp);
+ break;
+
+ case BFA_RPORT_SM_HWFAIL:
+ bfa_stats(rp, sm_del_hwf);
+ bfa_sm_set_state(rp, bfa_rport_sm_uninit);
+ bfa_rport_free(rp);
+ break;
+
+ default:
+ bfa_sm_fault(rp->bfa, event);
+ }
+}
+
+static void
+bfa_rport_sm_deleting_qfull(struct bfa_rport_s *rp, enum bfa_rport_event event)
+{
+ bfa_trc(rp->bfa, rp->rport_tag);
+ bfa_trc(rp->bfa, event);
+
+ switch (event) {
+ case BFA_RPORT_SM_QRESUME:
+ bfa_stats(rp, sm_del_fwrsp);
+ bfa_sm_set_state(rp, bfa_rport_sm_deleting);
+ bfa_rport_send_fwdelete(rp);
+ break;
+
+ case BFA_RPORT_SM_HWFAIL:
+ bfa_stats(rp, sm_del_hwf);
+ bfa_sm_set_state(rp, bfa_rport_sm_uninit);
+ bfa_reqq_wcancel(&rp->reqq_wait);
+ bfa_rport_free(rp);
+ break;
+
+ default:
+ bfa_sm_fault(rp->bfa, event);
+ }
+}
+
+/*
+ * Waiting for rport create response from firmware. A delete is pending.
+ */
+static void
+bfa_rport_sm_delete_pending(struct bfa_rport_s *rp,
+ enum bfa_rport_event event)
+{
+ bfa_trc(rp->bfa, rp->rport_tag);
+ bfa_trc(rp->bfa, event);
+
+ switch (event) {
+ case BFA_RPORT_SM_FWRSP:
+ bfa_stats(rp, sm_delp_fwrsp);
+ if (bfa_rport_send_fwdelete(rp))
+ bfa_sm_set_state(rp, bfa_rport_sm_deleting);
+ else
+ bfa_sm_set_state(rp, bfa_rport_sm_deleting_qfull);
+ break;
+
+ case BFA_RPORT_SM_HWFAIL:
+ bfa_stats(rp, sm_delp_hwf);
+ bfa_sm_set_state(rp, bfa_rport_sm_uninit);
+ bfa_rport_free(rp);
+ break;
+
+ default:
+ bfa_stats(rp, sm_delp_unexp);
+ bfa_sm_fault(rp->bfa, event);
+ }
+}
+
+/*
+ * Waiting for rport create response from firmware. Rport offline is pending.
+ */
+static void
+bfa_rport_sm_offline_pending(struct bfa_rport_s *rp,
+ enum bfa_rport_event event)
+{
+ bfa_trc(rp->bfa, rp->rport_tag);
+ bfa_trc(rp->bfa, event);
+
+ switch (event) {
+ case BFA_RPORT_SM_FWRSP:
+ bfa_stats(rp, sm_offp_fwrsp);
+ if (bfa_rport_send_fwdelete(rp))
+ bfa_sm_set_state(rp, bfa_rport_sm_fwdelete);
+ else
+ bfa_sm_set_state(rp, bfa_rport_sm_fwdelete_qfull);
+ break;
+
+ case BFA_RPORT_SM_DELETE:
+ bfa_stats(rp, sm_offp_del);
+ bfa_sm_set_state(rp, bfa_rport_sm_delete_pending);
+ break;
+
+ case BFA_RPORT_SM_HWFAIL:
+ bfa_stats(rp, sm_offp_hwf);
+ bfa_sm_set_state(rp, bfa_rport_sm_iocdisable);
+ bfa_rport_offline_cb(rp);
+ break;
+
+ default:
+ bfa_stats(rp, sm_offp_unexp);
+ bfa_sm_fault(rp->bfa, event);
+ }
+}
+
+/*
+ * IOC h/w failed.
+ */
+static void
+bfa_rport_sm_iocdisable(struct bfa_rport_s *rp, enum bfa_rport_event event)
+{
+ bfa_trc(rp->bfa, rp->rport_tag);
+ bfa_trc(rp->bfa, event);
+
+ switch (event) {
+ case BFA_RPORT_SM_OFFLINE:
+ bfa_stats(rp, sm_iocd_off);
+ bfa_rport_offline_cb(rp);
+ break;
+
+ case BFA_RPORT_SM_DELETE:
+ bfa_stats(rp, sm_iocd_del);
+ bfa_sm_set_state(rp, bfa_rport_sm_uninit);
+ bfa_rport_free(rp);
+ break;
+
+ case BFA_RPORT_SM_ONLINE:
+ bfa_stats(rp, sm_iocd_on);
+ if (bfa_rport_send_fwcreate(rp))
+ bfa_sm_set_state(rp, bfa_rport_sm_fwcreate);
+ else
+ bfa_sm_set_state(rp, bfa_rport_sm_fwcreate_qfull);
+ break;
+
+ case BFA_RPORT_SM_HWFAIL:
+ break;
+
+ default:
+ bfa_stats(rp, sm_iocd_unexp);
+ bfa_sm_fault(rp->bfa, event);
+ }
+}
+
+
+
+/*
+ * bfa_rport_private BFA rport private functions
+ */
+
+static void
+__bfa_cb_rport_online(void *cbarg, bfa_boolean_t complete)
+{
+ struct bfa_rport_s *rp = cbarg;
+
+ if (complete)
+ bfa_cb_rport_online(rp->rport_drv);
+}
+
+static void
+__bfa_cb_rport_offline(void *cbarg, bfa_boolean_t complete)
+{
+ struct bfa_rport_s *rp = cbarg;
+
+ if (complete)
+ bfa_cb_rport_offline(rp->rport_drv);
+}
+
+static void
+bfa_rport_qresume(void *cbarg)
+{
+ struct bfa_rport_s *rp = cbarg;
+
+ bfa_sm_send_event(rp, BFA_RPORT_SM_QRESUME);
+}
+
+static void
+bfa_rport_meminfo(struct bfa_iocfc_cfg_s *cfg, struct bfa_meminfo_s *minfo,
+ struct bfa_s *bfa)
+{
+ struct bfa_mem_kva_s *rport_kva = BFA_MEM_RPORT_KVA(bfa);
+
+ if (cfg->fwcfg.num_rports < BFA_RPORT_MIN)
+ cfg->fwcfg.num_rports = BFA_RPORT_MIN;
+
+ /* kva memory */
+ bfa_mem_kva_setup(minfo, rport_kva,
+ cfg->fwcfg.num_rports * sizeof(struct bfa_rport_s));
+}
+
+static void
+bfa_rport_attach(struct bfa_s *bfa, void *bfad, struct bfa_iocfc_cfg_s *cfg,
+ struct bfa_pcidev_s *pcidev)
+{
+ struct bfa_rport_mod_s *mod = BFA_RPORT_MOD(bfa);
+ struct bfa_rport_s *rp;
+ u16 i;
+
+ INIT_LIST_HEAD(&mod->rp_free_q);
+ INIT_LIST_HEAD(&mod->rp_active_q);
+ INIT_LIST_HEAD(&mod->rp_unused_q);
+
+ rp = (struct bfa_rport_s *) bfa_mem_kva_curp(mod);
+ mod->rps_list = rp;
+ mod->num_rports = cfg->fwcfg.num_rports;
+
+ WARN_ON(!mod->num_rports ||
+ (mod->num_rports & (mod->num_rports - 1)));
+
+ for (i = 0; i < mod->num_rports; i++, rp++) {
+ memset(rp, 0, sizeof(struct bfa_rport_s));
+ rp->bfa = bfa;
+ rp->rport_tag = i;
+ bfa_sm_set_state(rp, bfa_rport_sm_uninit);
+
+ /*
+ * - is unused
+ */
+ if (i)
+ list_add_tail(&rp->qe, &mod->rp_free_q);
+
+ bfa_reqq_winit(&rp->reqq_wait, bfa_rport_qresume, rp);
+ }
+
+ /*
+ * consume memory
+ */
+ bfa_mem_kva_curp(mod) = (u8 *) rp;
+}
+
+static void
+bfa_rport_detach(struct bfa_s *bfa)
+{
+}
+
+static void
+bfa_rport_start(struct bfa_s *bfa)
+{
+}
+
+static void
+bfa_rport_stop(struct bfa_s *bfa)
+{
+}
+
+static void
+bfa_rport_iocdisable(struct bfa_s *bfa)
+{
+ struct bfa_rport_mod_s *mod = BFA_RPORT_MOD(bfa);
+ struct bfa_rport_s *rport;
+ struct list_head *qe, *qen;
+
+ /* Enqueue unused rport resources to free_q */
+ list_splice_tail_init(&mod->rp_unused_q, &mod->rp_free_q);
+
+ list_for_each_safe(qe, qen, &mod->rp_active_q) {
+ rport = (struct bfa_rport_s *) qe;
+ bfa_sm_send_event(rport, BFA_RPORT_SM_HWFAIL);
+ }
+}
+
+static struct bfa_rport_s *
+bfa_rport_alloc(struct bfa_rport_mod_s *mod)
+{
+ struct bfa_rport_s *rport;
+
+ bfa_q_deq(&mod->rp_free_q, &rport);
+ if (rport)
+ list_add_tail(&rport->qe, &mod->rp_active_q);
+
+ return rport;
+}
+
+static void
+bfa_rport_free(struct bfa_rport_s *rport)
+{
+ struct bfa_rport_mod_s *mod = BFA_RPORT_MOD(rport->bfa);
+
+ WARN_ON(!bfa_q_is_on_q(&mod->rp_active_q, rport));
+ list_del(&rport->qe);
+ list_add_tail(&rport->qe, &mod->rp_free_q);
+}
+
+static bfa_boolean_t
+bfa_rport_send_fwcreate(struct bfa_rport_s *rp)
+{
+ struct bfi_rport_create_req_s *m;
+
+ /*
+ * check for room in queue to send request now
+ */
+ m = bfa_reqq_next(rp->bfa, BFA_REQQ_RPORT);
+ if (!m) {
+ bfa_reqq_wait(rp->bfa, BFA_REQQ_RPORT, &rp->reqq_wait);
+ return BFA_FALSE;
+ }
+
+ bfi_h2i_set(m->mh, BFI_MC_RPORT, BFI_RPORT_H2I_CREATE_REQ,
+ bfa_fn_lpu(rp->bfa));
+ m->bfa_handle = rp->rport_tag;
+ m->max_frmsz = cpu_to_be16(rp->rport_info.max_frmsz);
+ m->pid = rp->rport_info.pid;
+ m->lp_fwtag = bfa_lps_get_fwtag(rp->bfa, (u8)rp->rport_info.lp_tag);
+ m->local_pid = rp->rport_info.local_pid;
+ m->fc_class = rp->rport_info.fc_class;
+ m->vf_en = rp->rport_info.vf_en;
+ m->vf_id = rp->rport_info.vf_id;
+ m->cisc = rp->rport_info.cisc;
+
+ /*
+ * queue I/O message to firmware
+ */
+ bfa_reqq_produce(rp->bfa, BFA_REQQ_RPORT, m->mh);
+ return BFA_TRUE;
+}
+
+static bfa_boolean_t
+bfa_rport_send_fwdelete(struct bfa_rport_s *rp)
+{
+ struct bfi_rport_delete_req_s *m;
+
+ /*
+ * check for room in queue to send request now
+ */
+ m = bfa_reqq_next(rp->bfa, BFA_REQQ_RPORT);
+ if (!m) {
+ bfa_reqq_wait(rp->bfa, BFA_REQQ_RPORT, &rp->reqq_wait);
+ return BFA_FALSE;
+ }
+
+ bfi_h2i_set(m->mh, BFI_MC_RPORT, BFI_RPORT_H2I_DELETE_REQ,
+ bfa_fn_lpu(rp->bfa));
+ m->fw_handle = rp->fw_handle;
+
+ /*
+ * queue I/O message to firmware
+ */
+ bfa_reqq_produce(rp->bfa, BFA_REQQ_RPORT, m->mh);
+ return BFA_TRUE;
+}
+
+static bfa_boolean_t
+bfa_rport_send_fwspeed(struct bfa_rport_s *rp)
+{
+ struct bfa_rport_speed_req_s *m;
+
+ /*
+ * check for room in queue to send request now
+ */
+ m = bfa_reqq_next(rp->bfa, BFA_REQQ_RPORT);
+ if (!m) {
+ bfa_trc(rp->bfa, rp->rport_info.speed);
+ return BFA_FALSE;
+ }
+
+ bfi_h2i_set(m->mh, BFI_MC_RPORT, BFI_RPORT_H2I_SET_SPEED_REQ,
+ bfa_fn_lpu(rp->bfa));
+ m->fw_handle = rp->fw_handle;
+ m->speed = (u8)rp->rport_info.speed;
+
+ /*
+ * queue I/O message to firmware
+ */
+ bfa_reqq_produce(rp->bfa, BFA_REQQ_RPORT, m->mh);
+ return BFA_TRUE;
+}
+
+
+
+/*
+ * bfa_rport_public
+ */
+
+/*
+ * Rport interrupt processing.
+ */
+void
+bfa_rport_isr(struct bfa_s *bfa, struct bfi_msg_s *m)
+{
+ union bfi_rport_i2h_msg_u msg;
+ struct bfa_rport_s *rp;
+
+ bfa_trc(bfa, m->mhdr.msg_id);
+
+ msg.msg = m;
+
+ switch (m->mhdr.msg_id) {
+ case BFI_RPORT_I2H_CREATE_RSP:
+ rp = BFA_RPORT_FROM_TAG(bfa, msg.create_rsp->bfa_handle);
+ rp->fw_handle = msg.create_rsp->fw_handle;
+ rp->qos_attr = msg.create_rsp->qos_attr;
+ bfa_rport_set_lunmask(bfa, rp);
+ WARN_ON(msg.create_rsp->status != BFA_STATUS_OK);
+ bfa_sm_send_event(rp, BFA_RPORT_SM_FWRSP);
+ break;
+
+ case BFI_RPORT_I2H_DELETE_RSP:
+ rp = BFA_RPORT_FROM_TAG(bfa, msg.delete_rsp->bfa_handle);
+ WARN_ON(msg.delete_rsp->status != BFA_STATUS_OK);
+ bfa_rport_unset_lunmask(bfa, rp);
+ bfa_sm_send_event(rp, BFA_RPORT_SM_FWRSP);
+ break;
+
+ case BFI_RPORT_I2H_QOS_SCN:
+ rp = BFA_RPORT_FROM_TAG(bfa, msg.qos_scn_evt->bfa_handle);
+ rp->event_arg.fw_msg = msg.qos_scn_evt;
+ bfa_sm_send_event(rp, BFA_RPORT_SM_QOS_SCN);
+ break;
+
+ case BFI_RPORT_I2H_LIP_SCN_ONLINE:
+ bfa_fcport_update_loop_info(BFA_FCPORT_MOD(bfa),
+ &msg.lip_scn->loop_info);
+ bfa_cb_rport_scn_online(bfa);
+ break;
+
+ case BFI_RPORT_I2H_LIP_SCN_OFFLINE:
+ bfa_cb_rport_scn_offline(bfa);
+ break;
+
+ case BFI_RPORT_I2H_NO_DEV:
+ rp = BFA_RPORT_FROM_TAG(bfa, msg.lip_scn->bfa_handle);
+ bfa_cb_rport_scn_no_dev(rp->rport_drv);
+ break;
+
+ default:
+ bfa_trc(bfa, m->mhdr.msg_id);
+ WARN_ON(1);
+ }
+}
+
+void
+bfa_rport_res_recfg(struct bfa_s *bfa, u16 num_rport_fw)
+{
+ struct bfa_rport_mod_s *mod = BFA_RPORT_MOD(bfa);
+ struct list_head *qe;
+ int i;
+
+ for (i = 0; i < (mod->num_rports - num_rport_fw); i++) {
+ bfa_q_deq_tail(&mod->rp_free_q, &qe);
+ list_add_tail(qe, &mod->rp_unused_q);
+ }
+}
+
+/*
+ * bfa_rport_api
+ */
+
+struct bfa_rport_s *
+bfa_rport_create(struct bfa_s *bfa, void *rport_drv)
+{
+ struct bfa_rport_s *rp;
+
+ rp = bfa_rport_alloc(BFA_RPORT_MOD(bfa));
+
+ if (rp == NULL)
+ return NULL;
+
+ rp->bfa = bfa;
+ rp->rport_drv = rport_drv;
+ memset(&rp->stats, 0, sizeof(rp->stats));
+
+ WARN_ON(!bfa_sm_cmp_state(rp, bfa_rport_sm_uninit));
+ bfa_sm_send_event(rp, BFA_RPORT_SM_CREATE);
+
+ return rp;
+}
+
+void
+bfa_rport_online(struct bfa_rport_s *rport, struct bfa_rport_info_s *rport_info)
+{
+ WARN_ON(rport_info->max_frmsz == 0);
+
+ /*
+ * Some JBODs are seen to be not setting PDU size correctly in PLOGI
+ * responses. Default to minimum size.
+ */
+ if (rport_info->max_frmsz == 0) {
+ bfa_trc(rport->bfa, rport->rport_tag);
+ rport_info->max_frmsz = FC_MIN_PDUSZ;
+ }
+
+ rport->rport_info = *rport_info;
+ bfa_sm_send_event(rport, BFA_RPORT_SM_ONLINE);
+}
+
+void
+bfa_rport_speed(struct bfa_rport_s *rport, enum bfa_port_speed speed)
+{
+ WARN_ON(speed == 0);
+ WARN_ON(speed == BFA_PORT_SPEED_AUTO);
+
+ if (rport) {
+ rport->rport_info.speed = speed;
+ bfa_sm_send_event(rport, BFA_RPORT_SM_SET_SPEED);
+ }
+}
+
+/* Set Rport LUN Mask */
+void
+bfa_rport_set_lunmask(struct bfa_s *bfa, struct bfa_rport_s *rp)
+{
+ struct bfa_lps_mod_s *lps_mod = BFA_LPS_MOD(bfa);
+ wwn_t lp_wwn, rp_wwn;
+ u8 lp_tag = (u8)rp->rport_info.lp_tag;
+
+ rp_wwn = ((struct bfa_fcs_rport_s *)rp->rport_drv)->pwwn;
+ lp_wwn = (BFA_LPS_FROM_TAG(lps_mod, rp->rport_info.lp_tag))->pwwn;
+
+ BFA_LPS_FROM_TAG(lps_mod, rp->rport_info.lp_tag)->lun_mask =
+ rp->lun_mask = BFA_TRUE;
+ bfa_fcpim_lunmask_rp_update(bfa, lp_wwn, rp_wwn, rp->rport_tag, lp_tag);
+}
+
+/* Unset Rport LUN mask */
+void
+bfa_rport_unset_lunmask(struct bfa_s *bfa, struct bfa_rport_s *rp)
+{
+ struct bfa_lps_mod_s *lps_mod = BFA_LPS_MOD(bfa);
+ wwn_t lp_wwn, rp_wwn;
+
+ rp_wwn = ((struct bfa_fcs_rport_s *)rp->rport_drv)->pwwn;
+ lp_wwn = (BFA_LPS_FROM_TAG(lps_mod, rp->rport_info.lp_tag))->pwwn;
+
+ BFA_LPS_FROM_TAG(lps_mod, rp->rport_info.lp_tag)->lun_mask =
+ rp->lun_mask = BFA_FALSE;
+ bfa_fcpim_lunmask_rp_update(bfa, lp_wwn, rp_wwn,
+ BFA_RPORT_TAG_INVALID, BFA_LP_TAG_INVALID);
+}
+
+/*
+ * SGPG related functions
+ */
+
+/*
+ * Compute and return memory needed by FCP(im) module.
+ */
+static void
+bfa_sgpg_meminfo(struct bfa_iocfc_cfg_s *cfg, struct bfa_meminfo_s *minfo,
+ struct bfa_s *bfa)
+{
+ struct bfa_sgpg_mod_s *sgpg_mod = BFA_SGPG_MOD(bfa);
+ struct bfa_mem_kva_s *sgpg_kva = BFA_MEM_SGPG_KVA(bfa);
+ struct bfa_mem_dma_s *seg_ptr;
+ u16 nsegs, idx, per_seg_sgpg, num_sgpg;
+ u32 sgpg_sz = sizeof(struct bfi_sgpg_s);
+
+ if (cfg->drvcfg.num_sgpgs < BFA_SGPG_MIN)
+ cfg->drvcfg.num_sgpgs = BFA_SGPG_MIN;
+ else if (cfg->drvcfg.num_sgpgs > BFA_SGPG_MAX)
+ cfg->drvcfg.num_sgpgs = BFA_SGPG_MAX;
+
+ num_sgpg = cfg->drvcfg.num_sgpgs;
+
+ nsegs = BFI_MEM_DMA_NSEGS(num_sgpg, sgpg_sz);
+ per_seg_sgpg = BFI_MEM_NREQS_SEG(sgpg_sz);
+
+ bfa_mem_dma_seg_iter(sgpg_mod, seg_ptr, nsegs, idx) {
+ if (num_sgpg >= per_seg_sgpg) {
+ num_sgpg -= per_seg_sgpg;
+ bfa_mem_dma_setup(minfo, seg_ptr,
+ per_seg_sgpg * sgpg_sz);
+ } else
+ bfa_mem_dma_setup(minfo, seg_ptr,
+ num_sgpg * sgpg_sz);
+ }
+
+ /* kva memory */
+ bfa_mem_kva_setup(minfo, sgpg_kva,
+ cfg->drvcfg.num_sgpgs * sizeof(struct bfa_sgpg_s));
+}
+
+static void
+bfa_sgpg_attach(struct bfa_s *bfa, void *bfad, struct bfa_iocfc_cfg_s *cfg,
+ struct bfa_pcidev_s *pcidev)
+{
+ struct bfa_sgpg_mod_s *mod = BFA_SGPG_MOD(bfa);
+ struct bfa_sgpg_s *hsgpg;
+ struct bfi_sgpg_s *sgpg;
+ u64 align_len;
+ struct bfa_mem_dma_s *seg_ptr;
+ u32 sgpg_sz = sizeof(struct bfi_sgpg_s);
+ u16 i, idx, nsegs, per_seg_sgpg, num_sgpg;
+
+ union {
+ u64 pa;
+ union bfi_addr_u addr;
+ } sgpg_pa, sgpg_pa_tmp;
+
+ INIT_LIST_HEAD(&mod->sgpg_q);
+ INIT_LIST_HEAD(&mod->sgpg_wait_q);
+
+ bfa_trc(bfa, cfg->drvcfg.num_sgpgs);
+
+ mod->free_sgpgs = mod->num_sgpgs = cfg->drvcfg.num_sgpgs;
+
+ num_sgpg = cfg->drvcfg.num_sgpgs;
+ nsegs = BFI_MEM_DMA_NSEGS(num_sgpg, sgpg_sz);
+
+ /* dma/kva mem claim */
+ hsgpg = (struct bfa_sgpg_s *) bfa_mem_kva_curp(mod);
+
+ bfa_mem_dma_seg_iter(mod, seg_ptr, nsegs, idx) {
+
+ if (!bfa_mem_dma_virt(seg_ptr))
+ break;
+
+ align_len = BFA_SGPG_ROUNDUP(bfa_mem_dma_phys(seg_ptr)) -
+ bfa_mem_dma_phys(seg_ptr);
+
+ sgpg = (struct bfi_sgpg_s *)
+ (((u8 *) bfa_mem_dma_virt(seg_ptr)) + align_len);
+ sgpg_pa.pa = bfa_mem_dma_phys(seg_ptr) + align_len;
+ WARN_ON(sgpg_pa.pa & (sgpg_sz - 1));
+
+ per_seg_sgpg = (seg_ptr->mem_len - (u32)align_len) / sgpg_sz;
+
+ for (i = 0; num_sgpg > 0 && i < per_seg_sgpg; i++, num_sgpg--) {
+ memset(hsgpg, 0, sizeof(*hsgpg));
+ memset(sgpg, 0, sizeof(*sgpg));
+
+ hsgpg->sgpg = sgpg;
+ sgpg_pa_tmp.pa = bfa_sgaddr_le(sgpg_pa.pa);
+ hsgpg->sgpg_pa = sgpg_pa_tmp.addr;
+ list_add_tail(&hsgpg->qe, &mod->sgpg_q);
+
+ sgpg++;
+ hsgpg++;
+ sgpg_pa.pa += sgpg_sz;
+ }
+ }
+
+ bfa_mem_kva_curp(mod) = (u8 *) hsgpg;
+}
+
+static void
+bfa_sgpg_detach(struct bfa_s *bfa)
+{
+}
+
+static void
+bfa_sgpg_start(struct bfa_s *bfa)
+{
+}
+
+static void
+bfa_sgpg_stop(struct bfa_s *bfa)
+{
+}
+
+static void
+bfa_sgpg_iocdisable(struct bfa_s *bfa)
+{
+}
+
+bfa_status_t
+bfa_sgpg_malloc(struct bfa_s *bfa, struct list_head *sgpg_q, int nsgpgs)
+{
+ struct bfa_sgpg_mod_s *mod = BFA_SGPG_MOD(bfa);
+ struct bfa_sgpg_s *hsgpg;
+ int i;
+
+ if (mod->free_sgpgs < nsgpgs)
+ return BFA_STATUS_ENOMEM;
+
+ for (i = 0; i < nsgpgs; i++) {
+ bfa_q_deq(&mod->sgpg_q, &hsgpg);
+ WARN_ON(!hsgpg);
+ list_add_tail(&hsgpg->qe, sgpg_q);
+ }
+
+ mod->free_sgpgs -= nsgpgs;
+ return BFA_STATUS_OK;
+}
+
+void
+bfa_sgpg_mfree(struct bfa_s *bfa, struct list_head *sgpg_q, int nsgpg)
+{
+ struct bfa_sgpg_mod_s *mod = BFA_SGPG_MOD(bfa);
+ struct bfa_sgpg_wqe_s *wqe;
+
+ mod->free_sgpgs += nsgpg;
+ WARN_ON(mod->free_sgpgs > mod->num_sgpgs);
+
+ list_splice_tail_init(sgpg_q, &mod->sgpg_q);
+
+ if (list_empty(&mod->sgpg_wait_q))
+ return;
+
+ /*
+ * satisfy as many waiting requests as possible
+ */
+ do {
+ wqe = bfa_q_first(&mod->sgpg_wait_q);
+ if (mod->free_sgpgs < wqe->nsgpg)
+ nsgpg = mod->free_sgpgs;
+ else
+ nsgpg = wqe->nsgpg;
+ bfa_sgpg_malloc(bfa, &wqe->sgpg_q, nsgpg);
+ wqe->nsgpg -= nsgpg;
+ if (wqe->nsgpg == 0) {
+ list_del(&wqe->qe);
+ wqe->cbfn(wqe->cbarg);
+ }
+ } while (mod->free_sgpgs && !list_empty(&mod->sgpg_wait_q));
+}
+
+void
+bfa_sgpg_wait(struct bfa_s *bfa, struct bfa_sgpg_wqe_s *wqe, int nsgpg)
+{
+ struct bfa_sgpg_mod_s *mod = BFA_SGPG_MOD(bfa);
+
+ WARN_ON(nsgpg <= 0);
+ WARN_ON(nsgpg <= mod->free_sgpgs);
+
+ wqe->nsgpg_total = wqe->nsgpg = nsgpg;
+
+ /*
+ * allocate any left to this one first
+ */
+ if (mod->free_sgpgs) {
+ /*
+ * no one else is waiting for SGPG
+ */
+ WARN_ON(!list_empty(&mod->sgpg_wait_q));
+ list_splice_tail_init(&mod->sgpg_q, &wqe->sgpg_q);
+ wqe->nsgpg -= mod->free_sgpgs;
+ mod->free_sgpgs = 0;
+ }
+
+ list_add_tail(&wqe->qe, &mod->sgpg_wait_q);
+}
+
+void
+bfa_sgpg_wcancel(struct bfa_s *bfa, struct bfa_sgpg_wqe_s *wqe)
+{
+ struct bfa_sgpg_mod_s *mod = BFA_SGPG_MOD(bfa);
+
+ WARN_ON(!bfa_q_is_on_q(&mod->sgpg_wait_q, wqe));
+ list_del(&wqe->qe);
+
+ if (wqe->nsgpg_total != wqe->nsgpg)
+ bfa_sgpg_mfree(bfa, &wqe->sgpg_q,
+ wqe->nsgpg_total - wqe->nsgpg);
+}
+
+void
+bfa_sgpg_winit(struct bfa_sgpg_wqe_s *wqe, void (*cbfn) (void *cbarg),
+ void *cbarg)
+{
+ INIT_LIST_HEAD(&wqe->sgpg_q);
+ wqe->cbfn = cbfn;
+ wqe->cbarg = cbarg;
+}
+
+/*
+ * UF related functions
+ */
+/*
+ *****************************************************************************
+ * Internal functions
+ *****************************************************************************
+ */
+static void
+__bfa_cb_uf_recv(void *cbarg, bfa_boolean_t complete)
+{
+ struct bfa_uf_s *uf = cbarg;
+ struct bfa_uf_mod_s *ufm = BFA_UF_MOD(uf->bfa);
+
+ if (complete)
+ ufm->ufrecv(ufm->cbarg, uf);
+}
+
+static void
+claim_uf_post_msgs(struct bfa_uf_mod_s *ufm)
+{
+ struct bfi_uf_buf_post_s *uf_bp_msg;
+ u16 i;
+ u16 buf_len;
+
+ ufm->uf_buf_posts = (struct bfi_uf_buf_post_s *) bfa_mem_kva_curp(ufm);
+ uf_bp_msg = ufm->uf_buf_posts;
+
+ for (i = 0, uf_bp_msg = ufm->uf_buf_posts; i < ufm->num_ufs;
+ i++, uf_bp_msg++) {
+ memset(uf_bp_msg, 0, sizeof(struct bfi_uf_buf_post_s));
+
+ uf_bp_msg->buf_tag = i;
+ buf_len = sizeof(struct bfa_uf_buf_s);
+ uf_bp_msg->buf_len = cpu_to_be16(buf_len);
+ bfi_h2i_set(uf_bp_msg->mh, BFI_MC_UF, BFI_UF_H2I_BUF_POST,
+ bfa_fn_lpu(ufm->bfa));
+ bfa_alen_set(&uf_bp_msg->alen, buf_len, ufm_pbs_pa(ufm, i));
+ }
+
+ /*
+ * advance pointer beyond consumed memory
+ */
+ bfa_mem_kva_curp(ufm) = (u8 *) uf_bp_msg;
+}
+
+static void
+claim_ufs(struct bfa_uf_mod_s *ufm)
+{
+ u16 i;
+ struct bfa_uf_s *uf;
+
+ /*
+ * Claim block of memory for UF list
+ */
+ ufm->uf_list = (struct bfa_uf_s *) bfa_mem_kva_curp(ufm);
+
+ /*
+ * Initialize UFs and queue it in UF free queue
+ */
+ for (i = 0, uf = ufm->uf_list; i < ufm->num_ufs; i++, uf++) {
+ memset(uf, 0, sizeof(struct bfa_uf_s));
+ uf->bfa = ufm->bfa;
+ uf->uf_tag = i;
+ uf->pb_len = BFA_PER_UF_DMA_SZ;
+ uf->buf_kva = bfa_mem_get_dmabuf_kva(ufm, i, BFA_PER_UF_DMA_SZ);
+ uf->buf_pa = ufm_pbs_pa(ufm, i);
+ list_add_tail(&uf->qe, &ufm->uf_free_q);
+ }
+
+ /*
+ * advance memory pointer
+ */
+ bfa_mem_kva_curp(ufm) = (u8 *) uf;
+}
+
+static void
+uf_mem_claim(struct bfa_uf_mod_s *ufm)
+{
+ claim_ufs(ufm);
+ claim_uf_post_msgs(ufm);
+}
+
+static void
+bfa_uf_meminfo(struct bfa_iocfc_cfg_s *cfg, struct bfa_meminfo_s *minfo,
+ struct bfa_s *bfa)
+{
+ struct bfa_uf_mod_s *ufm = BFA_UF_MOD(bfa);
+ struct bfa_mem_kva_s *uf_kva = BFA_MEM_UF_KVA(bfa);
+ u32 num_ufs = cfg->fwcfg.num_uf_bufs;
+ struct bfa_mem_dma_s *seg_ptr;
+ u16 nsegs, idx, per_seg_uf = 0;
+
+ nsegs = BFI_MEM_DMA_NSEGS(num_ufs, BFA_PER_UF_DMA_SZ);
+ per_seg_uf = BFI_MEM_NREQS_SEG(BFA_PER_UF_DMA_SZ);
+
+ bfa_mem_dma_seg_iter(ufm, seg_ptr, nsegs, idx) {
+ if (num_ufs >= per_seg_uf) {
+ num_ufs -= per_seg_uf;
+ bfa_mem_dma_setup(minfo, seg_ptr,
+ per_seg_uf * BFA_PER_UF_DMA_SZ);
+ } else
+ bfa_mem_dma_setup(minfo, seg_ptr,
+ num_ufs * BFA_PER_UF_DMA_SZ);
+ }
+
+ /* kva memory */
+ bfa_mem_kva_setup(minfo, uf_kva, cfg->fwcfg.num_uf_bufs *
+ (sizeof(struct bfa_uf_s) + sizeof(struct bfi_uf_buf_post_s)));
+}
+
+static void
+bfa_uf_attach(struct bfa_s *bfa, void *bfad, struct bfa_iocfc_cfg_s *cfg,
+ struct bfa_pcidev_s *pcidev)
+{
+ struct bfa_uf_mod_s *ufm = BFA_UF_MOD(bfa);
+
+ ufm->bfa = bfa;
+ ufm->num_ufs = cfg->fwcfg.num_uf_bufs;
+ INIT_LIST_HEAD(&ufm->uf_free_q);
+ INIT_LIST_HEAD(&ufm->uf_posted_q);
+ INIT_LIST_HEAD(&ufm->uf_unused_q);
+
+ uf_mem_claim(ufm);
+}
+
+static void
+bfa_uf_detach(struct bfa_s *bfa)
+{
+}
+
+static struct bfa_uf_s *
+bfa_uf_get(struct bfa_uf_mod_s *uf_mod)
+{
+ struct bfa_uf_s *uf;
+
+ bfa_q_deq(&uf_mod->uf_free_q, &uf);
+ return uf;
+}
+
+static void
+bfa_uf_put(struct bfa_uf_mod_s *uf_mod, struct bfa_uf_s *uf)
+{
+ list_add_tail(&uf->qe, &uf_mod->uf_free_q);
+}
+
+static bfa_status_t
+bfa_uf_post(struct bfa_uf_mod_s *ufm, struct bfa_uf_s *uf)
+{
+ struct bfi_uf_buf_post_s *uf_post_msg;
+
+ uf_post_msg = bfa_reqq_next(ufm->bfa, BFA_REQQ_FCXP);
+ if (!uf_post_msg)
+ return BFA_STATUS_FAILED;
+
+ memcpy(uf_post_msg, &ufm->uf_buf_posts[uf->uf_tag],
+ sizeof(struct bfi_uf_buf_post_s));
+ bfa_reqq_produce(ufm->bfa, BFA_REQQ_FCXP, uf_post_msg->mh);
+
+ bfa_trc(ufm->bfa, uf->uf_tag);
+
+ list_add_tail(&uf->qe, &ufm->uf_posted_q);
+ return BFA_STATUS_OK;
+}
+
+static void
+bfa_uf_post_all(struct bfa_uf_mod_s *uf_mod)
+{
+ struct bfa_uf_s *uf;
+
+ while ((uf = bfa_uf_get(uf_mod)) != NULL) {
+ if (bfa_uf_post(uf_mod, uf) != BFA_STATUS_OK)
+ break;
+ }
+}
+
+static void
+uf_recv(struct bfa_s *bfa, struct bfi_uf_frm_rcvd_s *m)
+{
+ struct bfa_uf_mod_s *ufm = BFA_UF_MOD(bfa);
+ u16 uf_tag = m->buf_tag;
+ struct bfa_uf_s *uf = &ufm->uf_list[uf_tag];
+ struct bfa_uf_buf_s *uf_buf;
+ uint8_t *buf;
+ struct fchs_s *fchs;
+
+ uf_buf = (struct bfa_uf_buf_s *)
+ bfa_mem_get_dmabuf_kva(ufm, uf_tag, uf->pb_len);
+ buf = &uf_buf->d[0];
+
+ m->frm_len = be16_to_cpu(m->frm_len);
+ m->xfr_len = be16_to_cpu(m->xfr_len);
+
+ fchs = (struct fchs_s *)uf_buf;
+
+ list_del(&uf->qe); /* dequeue from posted queue */
+
+ uf->data_ptr = buf;
+ uf->data_len = m->xfr_len;
+
+ WARN_ON(uf->data_len < sizeof(struct fchs_s));
+
+ if (uf->data_len == sizeof(struct fchs_s)) {
+ bfa_plog_fchdr(bfa->plog, BFA_PL_MID_HAL_UF, BFA_PL_EID_RX,
+ uf->data_len, (struct fchs_s *)buf);
+ } else {
+ u32 pld_w0 = *((u32 *) (buf + sizeof(struct fchs_s)));
+ bfa_plog_fchdr_and_pl(bfa->plog, BFA_PL_MID_HAL_UF,
+ BFA_PL_EID_RX, uf->data_len,
+ (struct fchs_s *)buf, pld_w0);
+ }
+
+ if (bfa->fcs)
+ __bfa_cb_uf_recv(uf, BFA_TRUE);
+ else
+ bfa_cb_queue(bfa, &uf->hcb_qe, __bfa_cb_uf_recv, uf);
+}
+
+static void
+bfa_uf_stop(struct bfa_s *bfa)
+{
+}
+
+static void
+bfa_uf_iocdisable(struct bfa_s *bfa)
+{
+ struct bfa_uf_mod_s *ufm = BFA_UF_MOD(bfa);
+ struct bfa_uf_s *uf;
+ struct list_head *qe, *qen;
+
+ /* Enqueue unused uf resources to free_q */
+ list_splice_tail_init(&ufm->uf_unused_q, &ufm->uf_free_q);
+
+ list_for_each_safe(qe, qen, &ufm->uf_posted_q) {
+ uf = (struct bfa_uf_s *) qe;
+ list_del(&uf->qe);
+ bfa_uf_put(ufm, uf);
+ }
+}
+
+static void
+bfa_uf_start(struct bfa_s *bfa)
+{
+ bfa_uf_post_all(BFA_UF_MOD(bfa));
+}
+
+/*
+ * Register handler for all unsolicted receive frames.
+ *
+ * @param[in] bfa BFA instance
+ * @param[in] ufrecv receive handler function
+ * @param[in] cbarg receive handler arg
+ */
+void
+bfa_uf_recv_register(struct bfa_s *bfa, bfa_cb_uf_recv_t ufrecv, void *cbarg)
+{
+ struct bfa_uf_mod_s *ufm = BFA_UF_MOD(bfa);
+
+ ufm->ufrecv = ufrecv;
+ ufm->cbarg = cbarg;
+}
+
+/*
+ * Free an unsolicited frame back to BFA.
+ *
+ * @param[in] uf unsolicited frame to be freed
+ *
+ * @return None
+ */
+void
+bfa_uf_free(struct bfa_uf_s *uf)
+{
+ bfa_uf_put(BFA_UF_MOD(uf->bfa), uf);
+ bfa_uf_post_all(BFA_UF_MOD(uf->bfa));
+}
+
+
+
+/*
+ * uf_pub BFA uf module public functions
+ */
+void
+bfa_uf_isr(struct bfa_s *bfa, struct bfi_msg_s *msg)
+{
+ bfa_trc(bfa, msg->mhdr.msg_id);
+
+ switch (msg->mhdr.msg_id) {
+ case BFI_UF_I2H_FRM_RCVD:
+ uf_recv(bfa, (struct bfi_uf_frm_rcvd_s *) msg);
+ break;
+
+ default:
+ bfa_trc(bfa, msg->mhdr.msg_id);
+ WARN_ON(1);
+ }
+}
+
+void
+bfa_uf_res_recfg(struct bfa_s *bfa, u16 num_uf_fw)
+{
+ struct bfa_uf_mod_s *mod = BFA_UF_MOD(bfa);
+ struct list_head *qe;
+ int i;
+
+ for (i = 0; i < (mod->num_ufs - num_uf_fw); i++) {
+ bfa_q_deq_tail(&mod->uf_free_q, &qe);
+ list_add_tail(qe, &mod->uf_unused_q);
+ }
+}
+
+/*
+ * Dport forward declaration
+ */
+
+enum bfa_dport_test_state_e {
+ BFA_DPORT_ST_DISABLED = 0, /*!< dport is disabled */
+ BFA_DPORT_ST_INP = 1, /*!< test in progress */
+ BFA_DPORT_ST_COMP = 2, /*!< test complete successfully */
+ BFA_DPORT_ST_NO_SFP = 3, /*!< sfp is not present */
+ BFA_DPORT_ST_NOTSTART = 4, /*!< test not start dport is enabled */
+};
+
+/*
+ * BFA DPORT state machine events
+ */
+enum bfa_dport_sm_event {
+ BFA_DPORT_SM_ENABLE = 1, /* dport enable event */
+ BFA_DPORT_SM_DISABLE = 2, /* dport disable event */
+ BFA_DPORT_SM_FWRSP = 3, /* fw enable/disable rsp */
+ BFA_DPORT_SM_QRESUME = 4, /* CQ space available */
+ BFA_DPORT_SM_HWFAIL = 5, /* IOC h/w failure */
+ BFA_DPORT_SM_START = 6, /* re-start dport test */
+ BFA_DPORT_SM_REQFAIL = 7, /* request failure */
+ BFA_DPORT_SM_SCN = 8, /* state change notify frm fw */
+};
+
+static void bfa_dport_sm_disabled(struct bfa_dport_s *dport,
+ enum bfa_dport_sm_event event);
+static void bfa_dport_sm_enabling_qwait(struct bfa_dport_s *dport,
+ enum bfa_dport_sm_event event);
+static void bfa_dport_sm_enabling(struct bfa_dport_s *dport,
+ enum bfa_dport_sm_event event);
+static void bfa_dport_sm_enabled(struct bfa_dport_s *dport,
+ enum bfa_dport_sm_event event);
+static void bfa_dport_sm_disabling_qwait(struct bfa_dport_s *dport,
+ enum bfa_dport_sm_event event);
+static void bfa_dport_sm_disabling(struct bfa_dport_s *dport,
+ enum bfa_dport_sm_event event);
+static void bfa_dport_sm_starting_qwait(struct bfa_dport_s *dport,
+ enum bfa_dport_sm_event event);
+static void bfa_dport_sm_starting(struct bfa_dport_s *dport,
+ enum bfa_dport_sm_event event);
+static void bfa_dport_sm_dynamic_disabling(struct bfa_dport_s *dport,
+ enum bfa_dport_sm_event event);
+static void bfa_dport_sm_dynamic_disabling_qwait(struct bfa_dport_s *dport,
+ enum bfa_dport_sm_event event);
+static void bfa_dport_qresume(void *cbarg);
+static void bfa_dport_req_comp(struct bfa_dport_s *dport,
+ struct bfi_diag_dport_rsp_s *msg);
+static void bfa_dport_scn(struct bfa_dport_s *dport,
+ struct bfi_diag_dport_scn_s *msg);
+
+/*
+ * BFA fcdiag module
+ */
+#define BFA_DIAG_QTEST_TOV 1000 /* msec */
+
+/*
+ * Set port status to busy
+ */
+static void
+bfa_fcdiag_set_busy_status(struct bfa_fcdiag_s *fcdiag)
+{
+ struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(fcdiag->bfa);
+
+ if (fcdiag->lb.lock)
+ fcport->diag_busy = BFA_TRUE;
+ else
+ fcport->diag_busy = BFA_FALSE;
+}
+
+static void
+bfa_fcdiag_meminfo(struct bfa_iocfc_cfg_s *cfg, struct bfa_meminfo_s *meminfo,
+ struct bfa_s *bfa)
+{
+}
+
+static void
+bfa_fcdiag_attach(struct bfa_s *bfa, void *bfad, struct bfa_iocfc_cfg_s *cfg,
+ struct bfa_pcidev_s *pcidev)
+{
+ struct bfa_fcdiag_s *fcdiag = BFA_FCDIAG_MOD(bfa);
+ struct bfa_dport_s *dport = &fcdiag->dport;
+
+ fcdiag->bfa = bfa;
+ fcdiag->trcmod = bfa->trcmod;
+ /* The common DIAG attach bfa_diag_attach() will do all memory claim */
+ dport->bfa = bfa;
+ bfa_sm_set_state(dport, bfa_dport_sm_disabled);
+ bfa_reqq_winit(&dport->reqq_wait, bfa_dport_qresume, dport);
+ dport->cbfn = NULL;
+ dport->cbarg = NULL;
+ dport->test_state = BFA_DPORT_ST_DISABLED;
+ memset(&dport->result, 0, sizeof(struct bfa_diag_dport_result_s));
+}
+
+static void
+bfa_fcdiag_iocdisable(struct bfa_s *bfa)
+{
+ struct bfa_fcdiag_s *fcdiag = BFA_FCDIAG_MOD(bfa);
+ struct bfa_dport_s *dport = &fcdiag->dport;
+
+ bfa_trc(fcdiag, fcdiag->lb.lock);
+ if (fcdiag->lb.lock) {
+ fcdiag->lb.status = BFA_STATUS_IOC_FAILURE;
+ fcdiag->lb.cbfn(fcdiag->lb.cbarg, fcdiag->lb.status);
+ fcdiag->lb.lock = 0;
+ bfa_fcdiag_set_busy_status(fcdiag);
+ }
+
+ bfa_sm_send_event(dport, BFA_DPORT_SM_HWFAIL);
+}
+
+static void
+bfa_fcdiag_detach(struct bfa_s *bfa)
+{
+}
+
+static void
+bfa_fcdiag_start(struct bfa_s *bfa)
+{
+}
+
+static void
+bfa_fcdiag_stop(struct bfa_s *bfa)
+{
+}
+
+static void
+bfa_fcdiag_queuetest_timeout(void *cbarg)
+{
+ struct bfa_fcdiag_s *fcdiag = cbarg;
+ struct bfa_diag_qtest_result_s *res = fcdiag->qtest.result;
+
+ bfa_trc(fcdiag, fcdiag->qtest.all);
+ bfa_trc(fcdiag, fcdiag->qtest.count);
+
+ fcdiag->qtest.timer_active = 0;
+
+ res->status = BFA_STATUS_ETIMER;
+ res->count = QTEST_CNT_DEFAULT - fcdiag->qtest.count;
+ if (fcdiag->qtest.all)
+ res->queue = fcdiag->qtest.all;
+
+ bfa_trc(fcdiag, BFA_STATUS_ETIMER);
+ fcdiag->qtest.status = BFA_STATUS_ETIMER;
+ fcdiag->qtest.cbfn(fcdiag->qtest.cbarg, fcdiag->qtest.status);
+ fcdiag->qtest.lock = 0;
+}
+
+static bfa_status_t
+bfa_fcdiag_queuetest_send(struct bfa_fcdiag_s *fcdiag)
+{
+ u32 i;
+ struct bfi_diag_qtest_req_s *req;
+
+ req = bfa_reqq_next(fcdiag->bfa, fcdiag->qtest.queue);
+ if (!req)
+ return BFA_STATUS_DEVBUSY;
+
+ /* build host command */
+ bfi_h2i_set(req->mh, BFI_MC_DIAG, BFI_DIAG_H2I_QTEST,
+ bfa_fn_lpu(fcdiag->bfa));
+
+ for (i = 0; i < BFI_LMSG_PL_WSZ; i++)
+ req->data[i] = QTEST_PAT_DEFAULT;
+
+ bfa_trc(fcdiag, fcdiag->qtest.queue);
+ /* ring door bell */
+ bfa_reqq_produce(fcdiag->bfa, fcdiag->qtest.queue, req->mh);
+ return BFA_STATUS_OK;
+}
+
+static void
+bfa_fcdiag_queuetest_comp(struct bfa_fcdiag_s *fcdiag,
+ bfi_diag_qtest_rsp_t *rsp)
+{
+ struct bfa_diag_qtest_result_s *res = fcdiag->qtest.result;
+ bfa_status_t status = BFA_STATUS_OK;
+ int i;
+
+ /* Check timer, should still be active */
+ if (!fcdiag->qtest.timer_active) {
+ bfa_trc(fcdiag, fcdiag->qtest.timer_active);
+ return;
+ }
+
+ /* update count */
+ fcdiag->qtest.count--;
+
+ /* Check result */
+ for (i = 0; i < BFI_LMSG_PL_WSZ; i++) {
+ if (rsp->data[i] != ~(QTEST_PAT_DEFAULT)) {
+ res->status = BFA_STATUS_DATACORRUPTED;
+ break;
+ }
+ }
+
+ if (res->status == BFA_STATUS_OK) {
+ if (fcdiag->qtest.count > 0) {
+ status = bfa_fcdiag_queuetest_send(fcdiag);
+ if (status == BFA_STATUS_OK)
+ return;
+ else
+ res->status = status;
+ } else if (fcdiag->qtest.all > 0 &&
+ fcdiag->qtest.queue < (BFI_IOC_MAX_CQS - 1)) {
+ fcdiag->qtest.count = QTEST_CNT_DEFAULT;
+ fcdiag->qtest.queue++;
+ status = bfa_fcdiag_queuetest_send(fcdiag);
+ if (status == BFA_STATUS_OK)
+ return;
+ else
+ res->status = status;
+ }
+ }
+
+ /* Stop timer when we comp all queue */
+ if (fcdiag->qtest.timer_active) {
+ bfa_timer_stop(&fcdiag->qtest.timer);
+ fcdiag->qtest.timer_active = 0;
+ }
+ res->queue = fcdiag->qtest.queue;
+ res->count = QTEST_CNT_DEFAULT - fcdiag->qtest.count;
+ bfa_trc(fcdiag, res->count);
+ bfa_trc(fcdiag, res->status);
+ fcdiag->qtest.status = res->status;
+ fcdiag->qtest.cbfn(fcdiag->qtest.cbarg, fcdiag->qtest.status);
+ fcdiag->qtest.lock = 0;
+}
+
+static void
+bfa_fcdiag_loopback_comp(struct bfa_fcdiag_s *fcdiag,
+ struct bfi_diag_lb_rsp_s *rsp)
+{
+ struct bfa_diag_loopback_result_s *res = fcdiag->lb.result;
+
+ res->numtxmfrm = be32_to_cpu(rsp->res.numtxmfrm);
+ res->numosffrm = be32_to_cpu(rsp->res.numosffrm);
+ res->numrcvfrm = be32_to_cpu(rsp->res.numrcvfrm);
+ res->badfrminf = be32_to_cpu(rsp->res.badfrminf);
+ res->badfrmnum = be32_to_cpu(rsp->res.badfrmnum);
+ res->status = rsp->res.status;
+ fcdiag->lb.status = rsp->res.status;
+ bfa_trc(fcdiag, fcdiag->lb.status);
+ fcdiag->lb.cbfn(fcdiag->lb.cbarg, fcdiag->lb.status);
+ fcdiag->lb.lock = 0;
+ bfa_fcdiag_set_busy_status(fcdiag);
+}
+
+static bfa_status_t
+bfa_fcdiag_loopback_send(struct bfa_fcdiag_s *fcdiag,
+ struct bfa_diag_loopback_s *loopback)
+{
+ struct bfi_diag_lb_req_s *lb_req;
+
+ lb_req = bfa_reqq_next(fcdiag->bfa, BFA_REQQ_DIAG);
+ if (!lb_req)
+ return BFA_STATUS_DEVBUSY;
+
+ /* build host command */
+ bfi_h2i_set(lb_req->mh, BFI_MC_DIAG, BFI_DIAG_H2I_LOOPBACK,
+ bfa_fn_lpu(fcdiag->bfa));
+
+ lb_req->lb_mode = loopback->lb_mode;
+ lb_req->speed = loopback->speed;
+ lb_req->loopcnt = loopback->loopcnt;
+ lb_req->pattern = loopback->pattern;
+
+ /* ring door bell */
+ bfa_reqq_produce(fcdiag->bfa, BFA_REQQ_DIAG, lb_req->mh);
+
+ bfa_trc(fcdiag, loopback->lb_mode);
+ bfa_trc(fcdiag, loopback->speed);
+ bfa_trc(fcdiag, loopback->loopcnt);
+ bfa_trc(fcdiag, loopback->pattern);
+ return BFA_STATUS_OK;
+}
+
+/*
+ * cpe/rme intr handler
+ */
+void
+bfa_fcdiag_intr(struct bfa_s *bfa, struct bfi_msg_s *msg)
+{
+ struct bfa_fcdiag_s *fcdiag = BFA_FCDIAG_MOD(bfa);
+
+ switch (msg->mhdr.msg_id) {
+ case BFI_DIAG_I2H_LOOPBACK:
+ bfa_fcdiag_loopback_comp(fcdiag,
+ (struct bfi_diag_lb_rsp_s *) msg);
+ break;
+ case BFI_DIAG_I2H_QTEST:
+ bfa_fcdiag_queuetest_comp(fcdiag, (bfi_diag_qtest_rsp_t *)msg);
+ break;
+ case BFI_DIAG_I2H_DPORT:
+ bfa_dport_req_comp(&fcdiag->dport,
+ (struct bfi_diag_dport_rsp_s *)msg);
+ break;
+ case BFI_DIAG_I2H_DPORT_SCN:
+ bfa_dport_scn(&fcdiag->dport,
+ (struct bfi_diag_dport_scn_s *)msg);
+ break;
+ default:
+ bfa_trc(fcdiag, msg->mhdr.msg_id);
+ WARN_ON(1);
+ }
+}
+
+/*
+ * Loopback test
+ *
+ * @param[in] *bfa - bfa data struct
+ * @param[in] opmode - port operation mode
+ * @param[in] speed - port speed
+ * @param[in] lpcnt - loop count
+ * @param[in] pat - pattern to build packet
+ * @param[in] *result - pt to bfa_diag_loopback_result_t data struct
+ * @param[in] cbfn - callback function
+ * @param[in] cbarg - callback functioin arg
+ *
+ * @param[out]
+ */
+bfa_status_t
+bfa_fcdiag_loopback(struct bfa_s *bfa, enum bfa_port_opmode opmode,
+ enum bfa_port_speed speed, u32 lpcnt, u32 pat,
+ struct bfa_diag_loopback_result_s *result, bfa_cb_diag_t cbfn,
+ void *cbarg)
+{
+ struct bfa_diag_loopback_s loopback;
+ struct bfa_port_attr_s attr;
+ bfa_status_t status;
+ struct bfa_fcdiag_s *fcdiag = BFA_FCDIAG_MOD(bfa);
+
+ if (!bfa_iocfc_is_operational(bfa))
+ return BFA_STATUS_IOC_NON_OP;
+
+ /* if port is PBC disabled, return error */
+ if (bfa_fcport_is_pbcdisabled(bfa)) {
+ bfa_trc(fcdiag, BFA_STATUS_PBC);
+ return BFA_STATUS_PBC;
+ }
+
+ if (bfa_fcport_is_disabled(bfa) == BFA_FALSE) {
+ bfa_trc(fcdiag, opmode);
+ return BFA_STATUS_PORT_NOT_DISABLED;
+ }
+
+ /*
+ * Check if input speed is supported by the port mode
+ */
+ if (bfa_ioc_get_type(&bfa->ioc) == BFA_IOC_TYPE_FC) {
+ if (!(speed == BFA_PORT_SPEED_1GBPS ||
+ speed == BFA_PORT_SPEED_2GBPS ||
+ speed == BFA_PORT_SPEED_4GBPS ||
+ speed == BFA_PORT_SPEED_8GBPS ||
+ speed == BFA_PORT_SPEED_16GBPS ||
+ speed == BFA_PORT_SPEED_AUTO)) {
+ bfa_trc(fcdiag, speed);
+ return BFA_STATUS_UNSUPP_SPEED;
+ }
+ bfa_fcport_get_attr(bfa, &attr);
+ bfa_trc(fcdiag, attr.speed_supported);
+ if (speed > attr.speed_supported)
+ return BFA_STATUS_UNSUPP_SPEED;
+ } else {
+ if (speed != BFA_PORT_SPEED_10GBPS) {
+ bfa_trc(fcdiag, speed);
+ return BFA_STATUS_UNSUPP_SPEED;
+ }
+ }
+
+ /*
+ * For CT2, 1G is not supported
+ */
+ if ((speed == BFA_PORT_SPEED_1GBPS) &&
+ (bfa_asic_id_ct2(bfa->ioc.pcidev.device_id))) {
+ bfa_trc(fcdiag, speed);
+ return BFA_STATUS_UNSUPP_SPEED;
+ }
+
+ /* For Mezz card, port speed entered needs to be checked */
+ if (bfa_mfg_is_mezz(bfa->ioc.attr->card_type)) {
+ if (bfa_ioc_get_type(&bfa->ioc) == BFA_IOC_TYPE_FC) {
+ if (!(speed == BFA_PORT_SPEED_1GBPS ||
+ speed == BFA_PORT_SPEED_2GBPS ||
+ speed == BFA_PORT_SPEED_4GBPS ||
+ speed == BFA_PORT_SPEED_8GBPS ||
+ speed == BFA_PORT_SPEED_16GBPS ||
+ speed == BFA_PORT_SPEED_AUTO))
+ return BFA_STATUS_UNSUPP_SPEED;
+ } else {
+ if (speed != BFA_PORT_SPEED_10GBPS)
+ return BFA_STATUS_UNSUPP_SPEED;
+ }
+ }
+ /* check to see if fcport is dport */
+ if (bfa_fcport_is_dport(bfa)) {
+ bfa_trc(fcdiag, fcdiag->lb.lock);
+ return BFA_STATUS_DPORT_ENABLED;
+ }
+ /* check to see if there is another destructive diag cmd running */
+ if (fcdiag->lb.lock) {
+ bfa_trc(fcdiag, fcdiag->lb.lock);
+ return BFA_STATUS_DEVBUSY;
+ }
+
+ fcdiag->lb.lock = 1;
+ loopback.lb_mode = opmode;
+ loopback.speed = speed;
+ loopback.loopcnt = lpcnt;
+ loopback.pattern = pat;
+ fcdiag->lb.result = result;
+ fcdiag->lb.cbfn = cbfn;
+ fcdiag->lb.cbarg = cbarg;
+ memset(result, 0, sizeof(struct bfa_diag_loopback_result_s));
+ bfa_fcdiag_set_busy_status(fcdiag);
+
+ /* Send msg to fw */
+ status = bfa_fcdiag_loopback_send(fcdiag, &loopback);
+ return status;
+}
+
+/*
+ * DIAG queue test command
+ *
+ * @param[in] *bfa - bfa data struct
+ * @param[in] force - 1: don't do ioc op checking
+ * @param[in] queue - queue no. to test
+ * @param[in] *result - pt to bfa_diag_qtest_result_t data struct
+ * @param[in] cbfn - callback function
+ * @param[in] *cbarg - callback functioin arg
+ *
+ * @param[out]
+ */
+bfa_status_t
+bfa_fcdiag_queuetest(struct bfa_s *bfa, u32 force, u32 queue,
+ struct bfa_diag_qtest_result_s *result, bfa_cb_diag_t cbfn,
+ void *cbarg)
+{
+ struct bfa_fcdiag_s *fcdiag = BFA_FCDIAG_MOD(bfa);
+ bfa_status_t status;
+ bfa_trc(fcdiag, force);
+ bfa_trc(fcdiag, queue);
+
+ if (!force && !bfa_iocfc_is_operational(bfa))
+ return BFA_STATUS_IOC_NON_OP;
+
+ /* check to see if there is another destructive diag cmd running */
+ if (fcdiag->qtest.lock) {
+ bfa_trc(fcdiag, fcdiag->qtest.lock);
+ return BFA_STATUS_DEVBUSY;
+ }
+
+ /* Initialization */
+ fcdiag->qtest.lock = 1;
+ fcdiag->qtest.cbfn = cbfn;
+ fcdiag->qtest.cbarg = cbarg;
+ fcdiag->qtest.result = result;
+ fcdiag->qtest.count = QTEST_CNT_DEFAULT;
+
+ /* Init test results */
+ fcdiag->qtest.result->status = BFA_STATUS_OK;
+ fcdiag->qtest.result->count = 0;
+
+ /* send */
+ if (queue < BFI_IOC_MAX_CQS) {
+ fcdiag->qtest.result->queue = (u8)queue;
+ fcdiag->qtest.queue = (u8)queue;
+ fcdiag->qtest.all = 0;
+ } else {
+ fcdiag->qtest.result->queue = 0;
+ fcdiag->qtest.queue = 0;
+ fcdiag->qtest.all = 1;
+ }
+ status = bfa_fcdiag_queuetest_send(fcdiag);
+
+ /* Start a timer */
+ if (status == BFA_STATUS_OK) {
+ bfa_timer_start(bfa, &fcdiag->qtest.timer,
+ bfa_fcdiag_queuetest_timeout, fcdiag,
+ BFA_DIAG_QTEST_TOV);
+ fcdiag->qtest.timer_active = 1;
+ }
+ return status;
+}
+
+/*
+ * DIAG PLB is running
+ *
+ * @param[in] *bfa - bfa data struct
+ *
+ * @param[out]
+ */
+bfa_status_t
+bfa_fcdiag_lb_is_running(struct bfa_s *bfa)
+{
+ struct bfa_fcdiag_s *fcdiag = BFA_FCDIAG_MOD(bfa);
+ return fcdiag->lb.lock ? BFA_STATUS_DIAG_BUSY : BFA_STATUS_OK;
+}
+
+/*
+ * D-port
+ */
+#define bfa_dport_result_start(__dport, __mode) do { \
+ (__dport)->result.start_time = bfa_get_log_time(); \
+ (__dport)->result.status = DPORT_TEST_ST_INPRG; \
+ (__dport)->result.mode = (__mode); \
+ (__dport)->result.rp_pwwn = (__dport)->rp_pwwn; \
+ (__dport)->result.rp_nwwn = (__dport)->rp_nwwn; \
+ (__dport)->result.lpcnt = (__dport)->lpcnt; \
+} while (0)
+
+static bfa_boolean_t bfa_dport_send_req(struct bfa_dport_s *dport,
+ enum bfi_dport_req req);
+static void
+bfa_cb_fcdiag_dport(struct bfa_dport_s *dport, bfa_status_t bfa_status)
+{
+ if (dport->cbfn != NULL) {
+ dport->cbfn(dport->cbarg, bfa_status);
+ dport->cbfn = NULL;
+ dport->cbarg = NULL;
+ }
+}
+
+static void
+bfa_dport_sm_disabled(struct bfa_dport_s *dport, enum bfa_dport_sm_event event)
+{
+ bfa_trc(dport->bfa, event);
+
+ switch (event) {
+ case BFA_DPORT_SM_ENABLE:
+ bfa_fcport_dportenable(dport->bfa);
+ if (bfa_dport_send_req(dport, BFI_DPORT_ENABLE))
+ bfa_sm_set_state(dport, bfa_dport_sm_enabling);
+ else
+ bfa_sm_set_state(dport, bfa_dport_sm_enabling_qwait);
+ break;
+
+ case BFA_DPORT_SM_DISABLE:
+ /* Already disabled */
+ break;
+
+ case BFA_DPORT_SM_HWFAIL:
+ /* ignore */
+ break;
+
+ case BFA_DPORT_SM_SCN:
+ if (dport->i2hmsg.scn.state == BFI_DPORT_SCN_DDPORT_ENABLE) {
+ bfa_fcport_ddportenable(dport->bfa);
+ dport->dynamic = BFA_TRUE;
+ dport->test_state = BFA_DPORT_ST_NOTSTART;
+ bfa_sm_set_state(dport, bfa_dport_sm_enabled);
+ } else {
+ bfa_trc(dport->bfa, dport->i2hmsg.scn.state);
+ WARN_ON(1);
+ }
+ break;
+
+ default:
+ bfa_sm_fault(dport->bfa, event);
+ }
+}
+
+static void
+bfa_dport_sm_enabling_qwait(struct bfa_dport_s *dport,
+ enum bfa_dport_sm_event event)
+{
+ bfa_trc(dport->bfa, event);
+
+ switch (event) {
+ case BFA_DPORT_SM_QRESUME:
+ bfa_sm_set_state(dport, bfa_dport_sm_enabling);
+ bfa_dport_send_req(dport, BFI_DPORT_ENABLE);
+ break;
+
+ case BFA_DPORT_SM_HWFAIL:
+ bfa_reqq_wcancel(&dport->reqq_wait);
+ bfa_sm_set_state(dport, bfa_dport_sm_disabled);
+ bfa_cb_fcdiag_dport(dport, BFA_STATUS_FAILED);
+ break;
+
+ default:
+ bfa_sm_fault(dport->bfa, event);
+ }
+}
+
+static void
+bfa_dport_sm_enabling(struct bfa_dport_s *dport, enum bfa_dport_sm_event event)
+{
+ bfa_trc(dport->bfa, event);
+
+ switch (event) {
+ case BFA_DPORT_SM_FWRSP:
+ memset(&dport->result, 0,
+ sizeof(struct bfa_diag_dport_result_s));
+ if (dport->i2hmsg.rsp.status == BFA_STATUS_DPORT_INV_SFP) {
+ dport->test_state = BFA_DPORT_ST_NO_SFP;
+ } else {
+ dport->test_state = BFA_DPORT_ST_INP;
+ bfa_dport_result_start(dport, BFA_DPORT_OPMODE_AUTO);
+ }
+ bfa_sm_set_state(dport, bfa_dport_sm_enabled);
+ break;
+
+ case BFA_DPORT_SM_REQFAIL:
+ dport->test_state = BFA_DPORT_ST_DISABLED;
+ bfa_fcport_dportdisable(dport->bfa);
+ bfa_sm_set_state(dport, bfa_dport_sm_disabled);
+ break;
+
+ case BFA_DPORT_SM_HWFAIL:
+ bfa_sm_set_state(dport, bfa_dport_sm_disabled);
+ bfa_cb_fcdiag_dport(dport, BFA_STATUS_FAILED);
+ break;
+
+ default:
+ bfa_sm_fault(dport->bfa, event);
+ }
+}
+
+static void
+bfa_dport_sm_enabled(struct bfa_dport_s *dport, enum bfa_dport_sm_event event)
+{
+ bfa_trc(dport->bfa, event);
+
+ switch (event) {
+ case BFA_DPORT_SM_START:
+ if (bfa_dport_send_req(dport, BFI_DPORT_START))
+ bfa_sm_set_state(dport, bfa_dport_sm_starting);
+ else
+ bfa_sm_set_state(dport, bfa_dport_sm_starting_qwait);
+ break;
+
+ case BFA_DPORT_SM_DISABLE:
+ bfa_fcport_dportdisable(dport->bfa);
+ if (bfa_dport_send_req(dport, BFI_DPORT_DISABLE))
+ bfa_sm_set_state(dport, bfa_dport_sm_disabling);
+ else
+ bfa_sm_set_state(dport, bfa_dport_sm_disabling_qwait);
+ break;
+
+ case BFA_DPORT_SM_HWFAIL:
+ bfa_sm_set_state(dport, bfa_dport_sm_disabled);
+ break;
+
+ case BFA_DPORT_SM_SCN:
+ switch (dport->i2hmsg.scn.state) {
+ case BFI_DPORT_SCN_TESTCOMP:
+ dport->test_state = BFA_DPORT_ST_COMP;
+ break;
+
+ case BFI_DPORT_SCN_TESTSTART:
+ dport->test_state = BFA_DPORT_ST_INP;
+ break;
+
+ case BFI_DPORT_SCN_TESTSKIP:
+ case BFI_DPORT_SCN_SUBTESTSTART:
+ /* no state change */
+ break;
+
+ case BFI_DPORT_SCN_SFP_REMOVED:
+ dport->test_state = BFA_DPORT_ST_NO_SFP;
+ break;
+
+ case BFI_DPORT_SCN_DDPORT_DISABLE:
+ bfa_fcport_ddportdisable(dport->bfa);
+
+ if (bfa_dport_send_req(dport, BFI_DPORT_DYN_DISABLE))
+ bfa_sm_set_state(dport,
+ bfa_dport_sm_dynamic_disabling);
+ else
+ bfa_sm_set_state(dport,
+ bfa_dport_sm_dynamic_disabling_qwait);
+ break;
+
+ case BFI_DPORT_SCN_FCPORT_DISABLE:
+ bfa_fcport_ddportdisable(dport->bfa);
+
+ bfa_sm_set_state(dport, bfa_dport_sm_disabled);
+ dport->dynamic = BFA_FALSE;
+ break;
+
+ default:
+ bfa_trc(dport->bfa, dport->i2hmsg.scn.state);
+ bfa_sm_fault(dport->bfa, event);
+ }
+ break;
+ default:
+ bfa_sm_fault(dport->bfa, event);
+ }
+}
+
+static void
+bfa_dport_sm_disabling_qwait(struct bfa_dport_s *dport,
+ enum bfa_dport_sm_event event)
+{
+ bfa_trc(dport->bfa, event);
+
+ switch (event) {
+ case BFA_DPORT_SM_QRESUME:
+ bfa_sm_set_state(dport, bfa_dport_sm_disabling);
+ bfa_dport_send_req(dport, BFI_DPORT_DISABLE);
+ break;
+
+ case BFA_DPORT_SM_HWFAIL:
+ bfa_sm_set_state(dport, bfa_dport_sm_disabled);
+ bfa_reqq_wcancel(&dport->reqq_wait);
+ bfa_cb_fcdiag_dport(dport, BFA_STATUS_OK);
+ break;
+
+ case BFA_DPORT_SM_SCN:
+ /* ignore */
+ break;
+
+ default:
+ bfa_sm_fault(dport->bfa, event);
+ }
+}
+
+static void
+bfa_dport_sm_disabling(struct bfa_dport_s *dport, enum bfa_dport_sm_event event)
+{
+ bfa_trc(dport->bfa, event);
+
+ switch (event) {
+ case BFA_DPORT_SM_FWRSP:
+ dport->test_state = BFA_DPORT_ST_DISABLED;
+ bfa_sm_set_state(dport, bfa_dport_sm_disabled);
+ break;
+
+ case BFA_DPORT_SM_HWFAIL:
+ bfa_sm_set_state(dport, bfa_dport_sm_disabled);
+ bfa_cb_fcdiag_dport(dport, BFA_STATUS_OK);
+ break;
+
+ case BFA_DPORT_SM_SCN:
+ /* no state change */
+ break;
+
+ default:
+ bfa_sm_fault(dport->bfa, event);
+ }
+}
+
+static void
+bfa_dport_sm_starting_qwait(struct bfa_dport_s *dport,
+ enum bfa_dport_sm_event event)
+{
+ bfa_trc(dport->bfa, event);
+
+ switch (event) {
+ case BFA_DPORT_SM_QRESUME:
+ bfa_sm_set_state(dport, bfa_dport_sm_starting);
+ bfa_dport_send_req(dport, BFI_DPORT_START);
+ break;
+
+ case BFA_DPORT_SM_HWFAIL:
+ bfa_reqq_wcancel(&dport->reqq_wait);
+ bfa_sm_set_state(dport, bfa_dport_sm_disabled);
+ bfa_cb_fcdiag_dport(dport, BFA_STATUS_FAILED);
+ break;
+
+ default:
+ bfa_sm_fault(dport->bfa, event);
+ }
+}
+
+static void
+bfa_dport_sm_starting(struct bfa_dport_s *dport, enum bfa_dport_sm_event event)
+{
+ bfa_trc(dport->bfa, event);
+
+ switch (event) {
+ case BFA_DPORT_SM_FWRSP:
+ memset(&dport->result, 0,
+ sizeof(struct bfa_diag_dport_result_s));
+ if (dport->i2hmsg.rsp.status == BFA_STATUS_DPORT_INV_SFP) {
+ dport->test_state = BFA_DPORT_ST_NO_SFP;
+ } else {
+ dport->test_state = BFA_DPORT_ST_INP;
+ bfa_dport_result_start(dport, BFA_DPORT_OPMODE_MANU);
+ }
+ /* fall thru */
+
+ case BFA_DPORT_SM_REQFAIL:
+ bfa_sm_set_state(dport, bfa_dport_sm_enabled);
+ break;
+
+ case BFA_DPORT_SM_HWFAIL:
+ bfa_sm_set_state(dport, bfa_dport_sm_disabled);
+ bfa_cb_fcdiag_dport(dport, BFA_STATUS_FAILED);
+ break;
+
+ default:
+ bfa_sm_fault(dport->bfa, event);
+ }
+}
+
+static void
+bfa_dport_sm_dynamic_disabling(struct bfa_dport_s *dport,
+ enum bfa_dport_sm_event event)
+{
+ bfa_trc(dport->bfa, event);
+
+ switch (event) {
+ case BFA_DPORT_SM_SCN:
+ switch (dport->i2hmsg.scn.state) {
+ case BFI_DPORT_SCN_DDPORT_DISABLED:
+ bfa_sm_set_state(dport, bfa_dport_sm_disabled);
+ dport->dynamic = BFA_FALSE;
+ bfa_fcport_enable(dport->bfa);
+ break;
+
+ default:
+ bfa_trc(dport->bfa, dport->i2hmsg.scn.state);
+ bfa_sm_fault(dport->bfa, event);
+
+ }
+ break;
+
+ case BFA_DPORT_SM_HWFAIL:
+ bfa_sm_set_state(dport, bfa_dport_sm_disabled);
+ bfa_cb_fcdiag_dport(dport, BFA_STATUS_OK);
+ break;
+
+ default:
+ bfa_sm_fault(dport->bfa, event);
+ }
+}
+
+static void
+bfa_dport_sm_dynamic_disabling_qwait(struct bfa_dport_s *dport,
+ enum bfa_dport_sm_event event)
+{
+ bfa_trc(dport->bfa, event);
+
+ switch (event) {
+ case BFA_DPORT_SM_QRESUME:
+ bfa_sm_set_state(dport, bfa_dport_sm_dynamic_disabling);
+ bfa_dport_send_req(dport, BFI_DPORT_DYN_DISABLE);
+ break;
+
+ case BFA_DPORT_SM_HWFAIL:
+ bfa_sm_set_state(dport, bfa_dport_sm_disabled);
+ bfa_reqq_wcancel(&dport->reqq_wait);
+ bfa_cb_fcdiag_dport(dport, BFA_STATUS_OK);
+ break;
+
+ case BFA_DPORT_SM_SCN:
+ /* ignore */
+ break;
+
+ default:
+ bfa_sm_fault(dport->bfa, event);
+ }
+}
+
+static bfa_boolean_t
+bfa_dport_send_req(struct bfa_dport_s *dport, enum bfi_dport_req req)
+{
+ struct bfi_diag_dport_req_s *m;
+
+ /*
+ * check for room in queue to send request now
+ */
+ m = bfa_reqq_next(dport->bfa, BFA_REQQ_DIAG);
+ if (!m) {
+ bfa_reqq_wait(dport->bfa, BFA_REQQ_PORT, &dport->reqq_wait);
+ return BFA_FALSE;
+ }
+
+ bfi_h2i_set(m->mh, BFI_MC_DIAG, BFI_DIAG_H2I_DPORT,
+ bfa_fn_lpu(dport->bfa));
+ m->req = req;
+ if ((req == BFI_DPORT_ENABLE) || (req == BFI_DPORT_START)) {
+ m->lpcnt = cpu_to_be32(dport->lpcnt);
+ m->payload = cpu_to_be32(dport->payload);
+ }
+
+ /*
+ * queue I/O message to firmware
+ */
+ bfa_reqq_produce(dport->bfa, BFA_REQQ_DIAG, m->mh);
+
+ return BFA_TRUE;
+}
+
+static void
+bfa_dport_qresume(void *cbarg)
+{
+ struct bfa_dport_s *dport = cbarg;
+
+ bfa_sm_send_event(dport, BFA_DPORT_SM_QRESUME);
+}
+
+static void
+bfa_dport_req_comp(struct bfa_dport_s *dport, struct bfi_diag_dport_rsp_s *msg)
+{
+ msg->status = cpu_to_be32(msg->status);
+ dport->i2hmsg.rsp.status = msg->status;
+ dport->rp_pwwn = msg->pwwn;
+ dport->rp_nwwn = msg->nwwn;
+
+ if ((msg->status == BFA_STATUS_OK) ||
+ (msg->status == BFA_STATUS_DPORT_NO_SFP)) {
+ bfa_trc(dport->bfa, msg->status);
+ bfa_trc(dport->bfa, dport->rp_pwwn);
+ bfa_trc(dport->bfa, dport->rp_nwwn);
+ bfa_sm_send_event(dport, BFA_DPORT_SM_FWRSP);
+
+ } else {
+ bfa_trc(dport->bfa, msg->status);
+ bfa_sm_send_event(dport, BFA_DPORT_SM_REQFAIL);
+ }
+ bfa_cb_fcdiag_dport(dport, msg->status);
+}
+
+static bfa_boolean_t
+bfa_dport_is_sending_req(struct bfa_dport_s *dport)
+{
+ if (bfa_sm_cmp_state(dport, bfa_dport_sm_enabling) ||
+ bfa_sm_cmp_state(dport, bfa_dport_sm_enabling_qwait) ||
+ bfa_sm_cmp_state(dport, bfa_dport_sm_disabling) ||
+ bfa_sm_cmp_state(dport, bfa_dport_sm_disabling_qwait) ||
+ bfa_sm_cmp_state(dport, bfa_dport_sm_starting) ||
+ bfa_sm_cmp_state(dport, bfa_dport_sm_starting_qwait)) {
+ return BFA_TRUE;
+ } else {
+ return BFA_FALSE;
+ }
+}
+
+static void
+bfa_dport_scn(struct bfa_dport_s *dport, struct bfi_diag_dport_scn_s *msg)
+{
+ int i;
+ uint8_t subtesttype;
+
+ bfa_trc(dport->bfa, msg->state);
+ dport->i2hmsg.scn.state = msg->state;
+
+ switch (dport->i2hmsg.scn.state) {
+ case BFI_DPORT_SCN_TESTCOMP:
+ dport->result.end_time = bfa_get_log_time();
+ bfa_trc(dport->bfa, dport->result.end_time);
+
+ dport->result.status = msg->info.testcomp.status;
+ bfa_trc(dport->bfa, dport->result.status);
+
+ dport->result.roundtrip_latency =
+ cpu_to_be32(msg->info.testcomp.latency);
+ dport->result.est_cable_distance =
+ cpu_to_be32(msg->info.testcomp.distance);
+ dport->result.buffer_required =
+ be16_to_cpu(msg->info.testcomp.numbuffer);
+
+ dport->result.frmsz = be16_to_cpu(msg->info.testcomp.frm_sz);
+ dport->result.speed = msg->info.testcomp.speed;
+
+ bfa_trc(dport->bfa, dport->result.roundtrip_latency);
+ bfa_trc(dport->bfa, dport->result.est_cable_distance);
+ bfa_trc(dport->bfa, dport->result.buffer_required);
+ bfa_trc(dport->bfa, dport->result.frmsz);
+ bfa_trc(dport->bfa, dport->result.speed);
+
+ for (i = DPORT_TEST_ELOOP; i < DPORT_TEST_MAX; i++) {
+ dport->result.subtest[i].status =
+ msg->info.testcomp.subtest_status[i];
+ bfa_trc(dport->bfa, dport->result.subtest[i].status);
+ }
+ break;
+
+ case BFI_DPORT_SCN_TESTSKIP:
+ case BFI_DPORT_SCN_DDPORT_ENABLE:
+ memset(&dport->result, 0,
+ sizeof(struct bfa_diag_dport_result_s));
+ break;
+
+ case BFI_DPORT_SCN_TESTSTART:
+ memset(&dport->result, 0,
+ sizeof(struct bfa_diag_dport_result_s));
+ dport->rp_pwwn = msg->info.teststart.pwwn;
+ dport->rp_nwwn = msg->info.teststart.nwwn;
+ dport->lpcnt = cpu_to_be32(msg->info.teststart.numfrm);
+ bfa_dport_result_start(dport, msg->info.teststart.mode);
+ break;
+
+ case BFI_DPORT_SCN_SUBTESTSTART:
+ subtesttype = msg->info.teststart.type;
+ dport->result.subtest[subtesttype].start_time =
+ bfa_get_log_time();
+ dport->result.subtest[subtesttype].status =
+ DPORT_TEST_ST_INPRG;
+
+ bfa_trc(dport->bfa, subtesttype);
+ bfa_trc(dport->bfa,
+ dport->result.subtest[subtesttype].start_time);
+ break;
+
+ case BFI_DPORT_SCN_SFP_REMOVED:
+ case BFI_DPORT_SCN_DDPORT_DISABLED:
+ case BFI_DPORT_SCN_DDPORT_DISABLE:
+ case BFI_DPORT_SCN_FCPORT_DISABLE:
+ dport->result.status = DPORT_TEST_ST_IDLE;
+ break;
+
+ default:
+ bfa_sm_fault(dport->bfa, msg->state);
+ }
+
+ bfa_sm_send_event(dport, BFA_DPORT_SM_SCN);
+}
+
+/*
+ * Dport enable
+ *
+ * @param[in] *bfa - bfa data struct
+ */
+bfa_status_t
+bfa_dport_enable(struct bfa_s *bfa, u32 lpcnt, u32 pat,
+ bfa_cb_diag_t cbfn, void *cbarg)
+{
+ struct bfa_fcdiag_s *fcdiag = BFA_FCDIAG_MOD(bfa);
+ struct bfa_dport_s *dport = &fcdiag->dport;
+
+ /*
+ * Dport is not support in MEZZ card
+ */
+ if (bfa_mfg_is_mezz(dport->bfa->ioc.attr->card_type)) {
+ bfa_trc(dport->bfa, BFA_STATUS_PBC);
+ return BFA_STATUS_CMD_NOTSUPP_MEZZ;
+ }
+
+ /*
+ * Dport is supported in CT2 or above
+ */
+ if (!(bfa_asic_id_ct2(dport->bfa->ioc.pcidev.device_id))) {
+ bfa_trc(dport->bfa, dport->bfa->ioc.pcidev.device_id);
+ return BFA_STATUS_FEATURE_NOT_SUPPORTED;
+ }
+
+ /*
+ * Check to see if IOC is down
+ */
+ if (!bfa_iocfc_is_operational(bfa))
+ return BFA_STATUS_IOC_NON_OP;
+
+ /* if port is PBC disabled, return error */
+ if (bfa_fcport_is_pbcdisabled(bfa)) {
+ bfa_trc(dport->bfa, BFA_STATUS_PBC);
+ return BFA_STATUS_PBC;
+ }
+
+ /*
+ * Check if port mode is FC port
+ */
+ if (bfa_ioc_get_type(&bfa->ioc) != BFA_IOC_TYPE_FC) {
+ bfa_trc(dport->bfa, bfa_ioc_get_type(&bfa->ioc));
+ return BFA_STATUS_CMD_NOTSUPP_CNA;
+ }
+
+ /*
+ * Check if port is in LOOP mode
+ */
+ if ((bfa_fcport_get_cfg_topology(bfa) == BFA_PORT_TOPOLOGY_LOOP) ||
+ (bfa_fcport_get_topology(bfa) == BFA_PORT_TOPOLOGY_LOOP)) {
+ bfa_trc(dport->bfa, 0);
+ return BFA_STATUS_TOPOLOGY_LOOP;
+ }
+
+ /*
+ * Check if port is TRUNK mode
+ */
+ if (bfa_fcport_is_trunk_enabled(bfa)) {
+ bfa_trc(dport->bfa, 0);
+ return BFA_STATUS_ERROR_TRUNK_ENABLED;
+ }
+
+ /*
+ * Check if diag loopback is running
+ */
+ if (bfa_fcdiag_lb_is_running(bfa)) {
+ bfa_trc(dport->bfa, 0);
+ return BFA_STATUS_DIAG_BUSY;
+ }
+
+ /*
+ * Check to see if port is disable or in dport state
+ */
+ if ((bfa_fcport_is_disabled(bfa) == BFA_FALSE) &&
+ (bfa_fcport_is_dport(bfa) == BFA_FALSE)) {
+ bfa_trc(dport->bfa, 0);
+ return BFA_STATUS_PORT_NOT_DISABLED;
+ }
+
+ /*
+ * Check if dport is in dynamic mode
+ */
+ if (dport->dynamic)
+ return BFA_STATUS_DDPORT_ERR;
+
+ /*
+ * Check if dport is busy
+ */
+ if (bfa_dport_is_sending_req(dport))
+ return BFA_STATUS_DEVBUSY;
+
+ /*
+ * Check if dport is already enabled
+ */
+ if (bfa_sm_cmp_state(dport, bfa_dport_sm_enabled)) {
+ bfa_trc(dport->bfa, 0);
+ return BFA_STATUS_DPORT_ENABLED;
+ }
+
+ bfa_trc(dport->bfa, lpcnt);
+ bfa_trc(dport->bfa, pat);
+ dport->lpcnt = (lpcnt) ? lpcnt : DPORT_ENABLE_LOOPCNT_DEFAULT;
+ dport->payload = (pat) ? pat : LB_PATTERN_DEFAULT;
+ dport->cbfn = cbfn;
+ dport->cbarg = cbarg;
+
+ bfa_sm_send_event(dport, BFA_DPORT_SM_ENABLE);
+ return BFA_STATUS_OK;
+}
+
+/*
+ * Dport disable
+ *
+ * @param[in] *bfa - bfa data struct
+ */
+bfa_status_t
+bfa_dport_disable(struct bfa_s *bfa, bfa_cb_diag_t cbfn, void *cbarg)
+{
+ struct bfa_fcdiag_s *fcdiag = BFA_FCDIAG_MOD(bfa);
+ struct bfa_dport_s *dport = &fcdiag->dport;
+
+ if (bfa_ioc_is_disabled(&bfa->ioc))
+ return BFA_STATUS_IOC_DISABLED;
+
+ /* if port is PBC disabled, return error */
+ if (bfa_fcport_is_pbcdisabled(bfa)) {
+ bfa_trc(dport->bfa, BFA_STATUS_PBC);
+ return BFA_STATUS_PBC;
+ }
+
+ /*
+ * Check if dport is in dynamic mode
+ */
+ if (dport->dynamic) {
+ return BFA_STATUS_DDPORT_ERR;
+ }
+
+ /*
+ * Check to see if port is disable or in dport state
+ */
+ if ((bfa_fcport_is_disabled(bfa) == BFA_FALSE) &&
+ (bfa_fcport_is_dport(bfa) == BFA_FALSE)) {
+ bfa_trc(dport->bfa, 0);
+ return BFA_STATUS_PORT_NOT_DISABLED;
+ }
+
+ /*
+ * Check if dport is busy
+ */
+ if (bfa_dport_is_sending_req(dport))
+ return BFA_STATUS_DEVBUSY;
+
+ /*
+ * Check if dport is already disabled
+ */
+ if (bfa_sm_cmp_state(dport, bfa_dport_sm_disabled)) {
+ bfa_trc(dport->bfa, 0);
+ return BFA_STATUS_DPORT_DISABLED;
+ }
+
+ dport->cbfn = cbfn;
+ dport->cbarg = cbarg;
+
+ bfa_sm_send_event(dport, BFA_DPORT_SM_DISABLE);
+ return BFA_STATUS_OK;
+}
+
+/*
+ * Dport start -- restart dport test
+ *
+ * @param[in] *bfa - bfa data struct
+ */
+bfa_status_t
+bfa_dport_start(struct bfa_s *bfa, u32 lpcnt, u32 pat,
+ bfa_cb_diag_t cbfn, void *cbarg)
+{
+ struct bfa_fcdiag_s *fcdiag = BFA_FCDIAG_MOD(bfa);
+ struct bfa_dport_s *dport = &fcdiag->dport;
+
+ /*
+ * Check to see if IOC is down
+ */
+ if (!bfa_iocfc_is_operational(bfa))
+ return BFA_STATUS_IOC_NON_OP;
+
+ /*
+ * Check if dport is in dynamic mode
+ */
+ if (dport->dynamic)
+ return BFA_STATUS_DDPORT_ERR;
+
+ /*
+ * Check if dport is busy
+ */
+ if (bfa_dport_is_sending_req(dport))
+ return BFA_STATUS_DEVBUSY;
+
+ /*
+ * Check if dport is in enabled state.
+ * Test can only be restart when previous test has completed
+ */
+ if (!bfa_sm_cmp_state(dport, bfa_dport_sm_enabled)) {
+ bfa_trc(dport->bfa, 0);
+ return BFA_STATUS_DPORT_DISABLED;
+
+ } else {
+ if (dport->test_state == BFA_DPORT_ST_NO_SFP)
+ return BFA_STATUS_DPORT_INV_SFP;
+
+ if (dport->test_state == BFA_DPORT_ST_INP)
+ return BFA_STATUS_DEVBUSY;
+
+ WARN_ON(dport->test_state != BFA_DPORT_ST_COMP);
+ }
+
+ bfa_trc(dport->bfa, lpcnt);
+ bfa_trc(dport->bfa, pat);
+
+ dport->lpcnt = (lpcnt) ? lpcnt : DPORT_ENABLE_LOOPCNT_DEFAULT;
+ dport->payload = (pat) ? pat : LB_PATTERN_DEFAULT;
+
+ dport->cbfn = cbfn;
+ dport->cbarg = cbarg;
+
+ bfa_sm_send_event(dport, BFA_DPORT_SM_START);
+ return BFA_STATUS_OK;
+}
+
+/*
+ * Dport show -- return dport test result
+ *
+ * @param[in] *bfa - bfa data struct
+ */
+bfa_status_t
+bfa_dport_show(struct bfa_s *bfa, struct bfa_diag_dport_result_s *result)
+{
+ struct bfa_fcdiag_s *fcdiag = BFA_FCDIAG_MOD(bfa);
+ struct bfa_dport_s *dport = &fcdiag->dport;
+
+ /*
+ * Check to see if IOC is down
+ */
+ if (!bfa_iocfc_is_operational(bfa))
+ return BFA_STATUS_IOC_NON_OP;
+
+ /*
+ * Check if dport is busy
+ */
+ if (bfa_dport_is_sending_req(dport))
+ return BFA_STATUS_DEVBUSY;
+
+ /*
+ * Check if dport is in enabled state.
+ */
+ if (!bfa_sm_cmp_state(dport, bfa_dport_sm_enabled)) {
+ bfa_trc(dport->bfa, 0);
+ return BFA_STATUS_DPORT_DISABLED;
+
+ }
+
+ /*
+ * Check if there is SFP
+ */
+ if (dport->test_state == BFA_DPORT_ST_NO_SFP)
+ return BFA_STATUS_DPORT_INV_SFP;
+
+ memcpy(result, &dport->result, sizeof(struct bfa_diag_dport_result_s));
+
+ return BFA_STATUS_OK;
+}
diff --git a/drivers/scsi/bfa/bfa_svc.h b/drivers/scsi/bfa/bfa_svc.h
new file mode 100644
index 000000000..ef0736599
--- /dev/null
+++ b/drivers/scsi/bfa/bfa_svc.h
@@ -0,0 +1,763 @@
+/*
+ * Copyright (c) 2005-2010 Brocade Communications Systems, Inc.
+ * All rights reserved
+ * www.brocade.com
+ *
+ * Linux driver for Brocade Fibre Channel Host Bus Adapter.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License (GPL) Version 2 as
+ * published by the Free Software Foundation
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ */
+
+#ifndef __BFA_SVC_H__
+#define __BFA_SVC_H__
+
+#include "bfa_cs.h"
+#include "bfi_ms.h"
+
+
+/*
+ * Scatter-gather DMA related defines
+ */
+#define BFA_SGPG_MIN (16)
+#define BFA_SGPG_MAX (8192)
+
+/*
+ * Alignment macro for SG page allocation
+ */
+#define BFA_SGPG_ROUNDUP(_l) (((_l) + (sizeof(struct bfi_sgpg_s) - 1)) \
+ & ~(sizeof(struct bfi_sgpg_s) - 1))
+
+struct bfa_sgpg_wqe_s {
+ struct list_head qe; /* queue sg page element */
+ int nsgpg; /* pages to be allocated */
+ int nsgpg_total; /* total pages required */
+ void (*cbfn) (void *cbarg); /* callback function */
+ void *cbarg; /* callback arg */
+ struct list_head sgpg_q; /* queue of alloced sgpgs */
+};
+
+struct bfa_sgpg_s {
+ struct list_head qe; /* queue sg page element */
+ struct bfi_sgpg_s *sgpg; /* va of SG page */
+ union bfi_addr_u sgpg_pa; /* pa of SG page */
+};
+
+/*
+ * Given number of SG elements, BFA_SGPG_NPAGE() returns the number of
+ * SG pages required.
+ */
+#define BFA_SGPG_NPAGE(_nsges) (((_nsges) / BFI_SGPG_DATA_SGES) + 1)
+
+/* Max SGPG dma segs required */
+#define BFA_SGPG_DMA_SEGS \
+ BFI_MEM_DMA_NSEGS(BFA_SGPG_MAX, (uint32_t)sizeof(struct bfi_sgpg_s))
+
+struct bfa_sgpg_mod_s {
+ struct bfa_s *bfa;
+ int num_sgpgs; /* number of SG pages */
+ int free_sgpgs; /* number of free SG pages */
+ struct list_head sgpg_q; /* queue of free SG pages */
+ struct list_head sgpg_wait_q; /* wait queue for SG pages */
+ struct bfa_mem_dma_s dma_seg[BFA_SGPG_DMA_SEGS];
+ struct bfa_mem_kva_s kva_seg;
+};
+#define BFA_SGPG_MOD(__bfa) (&(__bfa)->modules.sgpg_mod)
+#define BFA_MEM_SGPG_KVA(__bfa) (&(BFA_SGPG_MOD(__bfa)->kva_seg))
+
+bfa_status_t bfa_sgpg_malloc(struct bfa_s *bfa, struct list_head *sgpg_q,
+ int nsgpgs);
+void bfa_sgpg_mfree(struct bfa_s *bfa, struct list_head *sgpg_q, int nsgpgs);
+void bfa_sgpg_winit(struct bfa_sgpg_wqe_s *wqe,
+ void (*cbfn) (void *cbarg), void *cbarg);
+void bfa_sgpg_wait(struct bfa_s *bfa, struct bfa_sgpg_wqe_s *wqe, int nsgpgs);
+void bfa_sgpg_wcancel(struct bfa_s *bfa, struct bfa_sgpg_wqe_s *wqe);
+
+
+/*
+ * FCXP related defines
+ */
+#define BFA_FCXP_MIN (1)
+#define BFA_FCXP_MAX (256)
+#define BFA_FCXP_MAX_IBUF_SZ (2 * 1024 + 256)
+#define BFA_FCXP_MAX_LBUF_SZ (4 * 1024 + 256)
+
+/* Max FCXP dma segs required */
+#define BFA_FCXP_DMA_SEGS \
+ BFI_MEM_DMA_NSEGS(BFA_FCXP_MAX, \
+ (u32)BFA_FCXP_MAX_IBUF_SZ + BFA_FCXP_MAX_LBUF_SZ)
+
+struct bfa_fcxp_mod_s {
+ struct bfa_s *bfa; /* backpointer to BFA */
+ struct bfa_fcxp_s *fcxp_list; /* array of FCXPs */
+ u16 num_fcxps; /* max num FCXP requests */
+ struct list_head fcxp_req_free_q; /* free FCXPs used for sending req */
+ struct list_head fcxp_rsp_free_q; /* free FCXPs used for sending req */
+ struct list_head fcxp_active_q; /* active FCXPs */
+ struct list_head req_wait_q; /* wait queue for free req_fcxp */
+ struct list_head rsp_wait_q; /* wait queue for free rsp_fcxp */
+ struct list_head fcxp_req_unused_q; /* unused req_fcxps */
+ struct list_head fcxp_rsp_unused_q; /* unused rsp_fcxps */
+ u32 req_pld_sz;
+ u32 rsp_pld_sz;
+ struct bfa_mem_dma_s dma_seg[BFA_FCXP_DMA_SEGS];
+ struct bfa_mem_kva_s kva_seg;
+};
+
+#define BFA_FCXP_MOD(__bfa) (&(__bfa)->modules.fcxp_mod)
+#define BFA_FCXP_FROM_TAG(__mod, __tag) (&(__mod)->fcxp_list[__tag])
+#define BFA_MEM_FCXP_KVA(__bfa) (&(BFA_FCXP_MOD(__bfa)->kva_seg))
+
+typedef void (*fcxp_send_cb_t) (struct bfa_s *ioc, struct bfa_fcxp_s *fcxp,
+ void *cb_arg, bfa_status_t req_status,
+ u32 rsp_len, u32 resid_len,
+ struct fchs_s *rsp_fchs);
+
+typedef u64 (*bfa_fcxp_get_sgaddr_t) (void *bfad_fcxp, int sgeid);
+typedef u32 (*bfa_fcxp_get_sglen_t) (void *bfad_fcxp, int sgeid);
+typedef void (*bfa_cb_fcxp_send_t) (void *bfad_fcxp, struct bfa_fcxp_s *fcxp,
+ void *cbarg, enum bfa_status req_status,
+ u32 rsp_len, u32 resid_len,
+ struct fchs_s *rsp_fchs);
+typedef void (*bfa_fcxp_alloc_cbfn_t) (void *cbarg, struct bfa_fcxp_s *fcxp);
+
+
+
+/*
+ * Information needed for a FCXP request
+ */
+struct bfa_fcxp_req_info_s {
+ struct bfa_rport_s *bfa_rport;
+ /* Pointer to the bfa rport that was
+ * returned from bfa_rport_create().
+ * This could be left NULL for WKA or
+ * for FCXP interactions before the
+ * rport nexus is established
+ */
+ struct fchs_s fchs; /* request FC header structure */
+ u8 cts; /* continuous sequence */
+ u8 class; /* FC class for the request/response */
+ u16 max_frmsz; /* max send frame size */
+ u16 vf_id; /* vsan tag if applicable */
+ u8 lp_tag; /* lport tag */
+ u32 req_tot_len; /* request payload total length */
+};
+
+struct bfa_fcxp_rsp_info_s {
+ struct fchs_s rsp_fchs;
+ /* Response frame's FC header will
+ * be sent back in this field */
+ u8 rsp_timeout;
+ /* timeout in seconds, 0-no response */
+ u8 rsvd2[3];
+ u32 rsp_maxlen; /* max response length expected */
+};
+
+struct bfa_fcxp_s {
+ struct list_head qe; /* fcxp queue element */
+ bfa_sm_t sm; /* state machine */
+ void *caller; /* driver or fcs */
+ struct bfa_fcxp_mod_s *fcxp_mod;
+ /* back pointer to fcxp mod */
+ u16 fcxp_tag; /* internal tag */
+ struct bfa_fcxp_req_info_s req_info;
+ /* request info */
+ struct bfa_fcxp_rsp_info_s rsp_info;
+ /* response info */
+ u8 use_ireqbuf; /* use internal req buf */
+ u8 use_irspbuf; /* use internal rsp buf */
+ u32 nreq_sgles; /* num request SGLEs */
+ u32 nrsp_sgles; /* num response SGLEs */
+ struct list_head req_sgpg_q; /* SG pages for request buf */
+ struct list_head req_sgpg_wqe; /* wait queue for req SG page */
+ struct list_head rsp_sgpg_q; /* SG pages for response buf */
+ struct list_head rsp_sgpg_wqe; /* wait queue for rsp SG page */
+
+ bfa_fcxp_get_sgaddr_t req_sga_cbfn;
+ /* SG elem addr user function */
+ bfa_fcxp_get_sglen_t req_sglen_cbfn;
+ /* SG elem len user function */
+ bfa_fcxp_get_sgaddr_t rsp_sga_cbfn;
+ /* SG elem addr user function */
+ bfa_fcxp_get_sglen_t rsp_sglen_cbfn;
+ /* SG elem len user function */
+ bfa_cb_fcxp_send_t send_cbfn; /* send completion callback */
+ void *send_cbarg; /* callback arg */
+ struct bfa_sge_s req_sge[BFA_FCXP_MAX_SGES];
+ /* req SG elems */
+ struct bfa_sge_s rsp_sge[BFA_FCXP_MAX_SGES];
+ /* rsp SG elems */
+ u8 rsp_status; /* comp: rsp status */
+ u32 rsp_len; /* comp: actual response len */
+ u32 residue_len; /* comp: residual rsp length */
+ struct fchs_s rsp_fchs; /* comp: response fchs */
+ struct bfa_cb_qe_s hcb_qe; /* comp: callback qelem */
+ struct bfa_reqq_wait_s reqq_wqe;
+ bfa_boolean_t reqq_waiting;
+ bfa_boolean_t req_rsp; /* Used to track req/rsp fcxp */
+};
+
+struct bfa_fcxp_wqe_s {
+ struct list_head qe;
+ bfa_fcxp_alloc_cbfn_t alloc_cbfn;
+ void *alloc_cbarg;
+ void *caller;
+ struct bfa_s *bfa;
+ int nreq_sgles;
+ int nrsp_sgles;
+ bfa_fcxp_get_sgaddr_t req_sga_cbfn;
+ bfa_fcxp_get_sglen_t req_sglen_cbfn;
+ bfa_fcxp_get_sgaddr_t rsp_sga_cbfn;
+ bfa_fcxp_get_sglen_t rsp_sglen_cbfn;
+};
+
+#define BFA_FCXP_REQ_PLD(_fcxp) (bfa_fcxp_get_reqbuf(_fcxp))
+#define BFA_FCXP_RSP_FCHS(_fcxp) (&((_fcxp)->rsp_info.fchs))
+#define BFA_FCXP_RSP_PLD(_fcxp) (bfa_fcxp_get_rspbuf(_fcxp))
+
+#define BFA_FCXP_REQ_PLD_PA(_fcxp) \
+ bfa_mem_get_dmabuf_pa((_fcxp)->fcxp_mod, (_fcxp)->fcxp_tag, \
+ (_fcxp)->fcxp_mod->req_pld_sz + (_fcxp)->fcxp_mod->rsp_pld_sz)
+
+/* fcxp_buf = req_buf + rsp_buf :- add req_buf_sz to get to rsp_buf */
+#define BFA_FCXP_RSP_PLD_PA(_fcxp) \
+ (bfa_mem_get_dmabuf_pa((_fcxp)->fcxp_mod, (_fcxp)->fcxp_tag, \
+ (_fcxp)->fcxp_mod->req_pld_sz + (_fcxp)->fcxp_mod->rsp_pld_sz) + \
+ (_fcxp)->fcxp_mod->req_pld_sz)
+
+void bfa_fcxp_isr(struct bfa_s *bfa, struct bfi_msg_s *msg);
+
+
+/*
+ * RPORT related defines
+ */
+enum bfa_rport_event {
+ BFA_RPORT_SM_CREATE = 1, /* rport create event */
+ BFA_RPORT_SM_DELETE = 2, /* deleting an existing rport */
+ BFA_RPORT_SM_ONLINE = 3, /* rport is online */
+ BFA_RPORT_SM_OFFLINE = 4, /* rport is offline */
+ BFA_RPORT_SM_FWRSP = 5, /* firmware response */
+ BFA_RPORT_SM_HWFAIL = 6, /* IOC h/w failure */
+ BFA_RPORT_SM_QOS_SCN = 7, /* QoS SCN from firmware */
+ BFA_RPORT_SM_SET_SPEED = 8, /* Set Rport Speed */
+ BFA_RPORT_SM_QRESUME = 9, /* space in requeue queue */
+};
+
+#define BFA_RPORT_MIN 4
+
+struct bfa_rport_mod_s {
+ struct bfa_rport_s *rps_list; /* list of rports */
+ struct list_head rp_free_q; /* free bfa_rports */
+ struct list_head rp_active_q; /* free bfa_rports */
+ struct list_head rp_unused_q; /* unused bfa rports */
+ u16 num_rports; /* number of rports */
+ struct bfa_mem_kva_s kva_seg;
+};
+
+#define BFA_RPORT_MOD(__bfa) (&(__bfa)->modules.rport_mod)
+#define BFA_MEM_RPORT_KVA(__bfa) (&(BFA_RPORT_MOD(__bfa)->kva_seg))
+
+/*
+ * Convert rport tag to RPORT
+ */
+#define BFA_RPORT_FROM_TAG(__bfa, _tag) \
+ (BFA_RPORT_MOD(__bfa)->rps_list + \
+ ((_tag) & (BFA_RPORT_MOD(__bfa)->num_rports - 1)))
+
+/*
+ * protected functions
+ */
+void bfa_rport_isr(struct bfa_s *bfa, struct bfi_msg_s *msg);
+void bfa_rport_res_recfg(struct bfa_s *bfa, u16 num_rport_fw);
+
+/*
+ * BFA rport information.
+ */
+struct bfa_rport_info_s {
+ u16 max_frmsz; /* max rcv pdu size */
+ u32 pid:24, /* remote port ID */
+ lp_tag:8; /* tag */
+ u32 local_pid:24, /* local port ID */
+ cisc:8; /* CIRO supported */
+ u8 fc_class; /* supported FC classes. enum fc_cos */
+ u8 vf_en; /* virtual fabric enable */
+ u16 vf_id; /* virtual fabric ID */
+ enum bfa_port_speed speed; /* Rport's current speed */
+};
+
+/*
+ * BFA rport data structure
+ */
+struct bfa_rport_s {
+ struct list_head qe; /* queue element */
+ bfa_sm_t sm; /* state machine */
+ struct bfa_s *bfa; /* backpointer to BFA */
+ void *rport_drv; /* fcs/driver rport object */
+ u16 fw_handle; /* firmware rport handle */
+ u16 rport_tag; /* BFA rport tag */
+ u8 lun_mask; /* LUN mask flag */
+ struct bfa_rport_info_s rport_info; /* rport info from fcs/driver */
+ struct bfa_reqq_wait_s reqq_wait; /* to wait for room in reqq */
+ struct bfa_cb_qe_s hcb_qe; /* BFA callback qelem */
+ struct bfa_rport_hal_stats_s stats; /* BFA rport statistics */
+ struct bfa_rport_qos_attr_s qos_attr;
+ union a {
+ bfa_status_t status; /* f/w status */
+ void *fw_msg; /* QoS scn event */
+ } event_arg;
+};
+#define BFA_RPORT_FC_COS(_rport) ((_rport)->rport_info.fc_class)
+
+
+/*
+ * UF - unsolicited receive related defines
+ */
+
+#define BFA_UF_MIN (4)
+#define BFA_UF_MAX (256)
+
+struct bfa_uf_s {
+ struct list_head qe; /* queue element */
+ struct bfa_s *bfa; /* bfa instance */
+ u16 uf_tag; /* identifying tag fw msgs */
+ u16 vf_id;
+ u16 src_rport_handle;
+ u16 rsvd;
+ u8 *data_ptr;
+ u16 data_len; /* actual receive length */
+ u16 pb_len; /* posted buffer length */
+ void *buf_kva; /* buffer virtual address */
+ u64 buf_pa; /* buffer physical address */
+ struct bfa_cb_qe_s hcb_qe; /* comp: BFA comp qelem */
+ struct bfa_sge_s sges[BFI_SGE_INLINE_MAX];
+};
+
+/*
+ * Callback prototype for unsolicited frame receive handler.
+ *
+ * @param[in] cbarg callback arg for receive handler
+ * @param[in] uf unsolicited frame descriptor
+ *
+ * @return None
+ */
+typedef void (*bfa_cb_uf_recv_t) (void *cbarg, struct bfa_uf_s *uf);
+
+#define BFA_UF_BUFSZ (2 * 1024 + 256)
+
+struct bfa_uf_buf_s {
+ u8 d[BFA_UF_BUFSZ];
+};
+
+#define BFA_PER_UF_DMA_SZ \
+ (u32)BFA_ROUNDUP(sizeof(struct bfa_uf_buf_s), BFA_DMA_ALIGN_SZ)
+
+/* Max UF dma segs required */
+#define BFA_UF_DMA_SEGS BFI_MEM_DMA_NSEGS(BFA_UF_MAX, BFA_PER_UF_DMA_SZ)
+
+struct bfa_uf_mod_s {
+ struct bfa_s *bfa; /* back pointer to BFA */
+ struct bfa_uf_s *uf_list; /* array of UFs */
+ u16 num_ufs; /* num unsolicited rx frames */
+ struct list_head uf_free_q; /* free UFs */
+ struct list_head uf_posted_q; /* UFs posted to IOC */
+ struct list_head uf_unused_q; /* unused UF's */
+ struct bfi_uf_buf_post_s *uf_buf_posts;
+ /* pre-built UF post msgs */
+ bfa_cb_uf_recv_t ufrecv; /* uf recv handler function */
+ void *cbarg; /* uf receive handler arg */
+ struct bfa_mem_dma_s dma_seg[BFA_UF_DMA_SEGS];
+ struct bfa_mem_kva_s kva_seg;
+};
+
+#define BFA_UF_MOD(__bfa) (&(__bfa)->modules.uf_mod)
+#define BFA_MEM_UF_KVA(__bfa) (&(BFA_UF_MOD(__bfa)->kva_seg))
+
+#define ufm_pbs_pa(_ufmod, _uftag) \
+ bfa_mem_get_dmabuf_pa(_ufmod, _uftag, BFA_PER_UF_DMA_SZ)
+
+void bfa_uf_isr(struct bfa_s *bfa, struct bfi_msg_s *msg);
+void bfa_uf_res_recfg(struct bfa_s *bfa, u16 num_uf_fw);
+
+/*
+ * LPS - bfa lport login/logout service interface
+ */
+struct bfa_lps_s {
+ struct list_head qe; /* queue element */
+ struct bfa_s *bfa; /* parent bfa instance */
+ bfa_sm_t sm; /* finite state machine */
+ u8 bfa_tag; /* lport tag */
+ u8 fw_tag; /* lport fw tag */
+ u8 reqq; /* lport request queue */
+ u8 alpa; /* ALPA for loop topologies */
+ u32 lp_pid; /* lport port ID */
+ bfa_boolean_t fdisc; /* snd FDISC instead of FLOGI */
+ bfa_boolean_t auth_en; /* enable authentication */
+ bfa_boolean_t auth_req; /* authentication required */
+ bfa_boolean_t npiv_en; /* NPIV is allowed by peer */
+ bfa_boolean_t fport; /* attached peer is F_PORT */
+ bfa_boolean_t brcd_switch; /* attached peer is brcd sw */
+ bfa_status_t status; /* login status */
+ u16 pdusz; /* max receive PDU size */
+ u16 pr_bbcred; /* BB_CREDIT from peer */
+ u8 lsrjt_rsn; /* LSRJT reason */
+ u8 lsrjt_expl; /* LSRJT explanation */
+ u8 lun_mask; /* LUN mask flag */
+ wwn_t pwwn; /* port wwn of lport */
+ wwn_t nwwn; /* node wwn of lport */
+ wwn_t pr_pwwn; /* port wwn of lport peer */
+ wwn_t pr_nwwn; /* node wwn of lport peer */
+ mac_t lp_mac; /* fpma/spma MAC for lport */
+ mac_t fcf_mac; /* FCF MAC of lport */
+ struct bfa_reqq_wait_s wqe; /* request wait queue element */
+ void *uarg; /* user callback arg */
+ struct bfa_cb_qe_s hcb_qe; /* comp: callback qelem */
+ struct bfi_lps_login_rsp_s *loginrsp;
+ bfa_eproto_status_t ext_status;
+};
+
+struct bfa_lps_mod_s {
+ struct list_head lps_free_q;
+ struct list_head lps_active_q;
+ struct list_head lps_login_q;
+ struct bfa_lps_s *lps_arr;
+ int num_lps;
+ struct bfa_mem_kva_s kva_seg;
+};
+
+#define BFA_LPS_MOD(__bfa) (&(__bfa)->modules.lps_mod)
+#define BFA_LPS_FROM_TAG(__mod, __tag) (&(__mod)->lps_arr[__tag])
+#define BFA_MEM_LPS_KVA(__bfa) (&(BFA_LPS_MOD(__bfa)->kva_seg))
+
+/*
+ * external functions
+ */
+void bfa_lps_isr(struct bfa_s *bfa, struct bfi_msg_s *msg);
+
+
+/*
+ * FCPORT related defines
+ */
+
+#define BFA_FCPORT(_bfa) (&((_bfa)->modules.port))
+
+/*
+ * Link notification data structure
+ */
+struct bfa_fcport_ln_s {
+ struct bfa_fcport_s *fcport;
+ bfa_sm_t sm;
+ struct bfa_cb_qe_s ln_qe; /* BFA callback queue elem for ln */
+ enum bfa_port_linkstate ln_event; /* ln event for callback */
+};
+
+struct bfa_fcport_trunk_s {
+ struct bfa_trunk_attr_s attr;
+};
+
+/*
+ * BFA FC port data structure
+ */
+struct bfa_fcport_s {
+ struct bfa_s *bfa; /* parent BFA instance */
+ bfa_sm_t sm; /* port state machine */
+ wwn_t nwwn; /* node wwn of physical port */
+ wwn_t pwwn; /* port wwn of physical oprt */
+ enum bfa_port_speed speed_sup;
+ /* supported speeds */
+ enum bfa_port_speed speed; /* current speed */
+ enum bfa_port_topology topology; /* current topology */
+ u8 rsvd[3];
+ u8 myalpa; /* my ALPA in LOOP topology */
+ u8 alpabm_valid; /* alpa bitmap valid or not */
+ struct fc_alpabm_s alpabm; /* alpa bitmap */
+ struct bfa_port_cfg_s cfg; /* current port configuration */
+ bfa_boolean_t use_flash_cfg; /* get port cfg from flash */
+ struct bfa_qos_attr_s qos_attr; /* QoS Attributes */
+ struct bfa_qos_vc_attr_s qos_vc_attr; /* VC info from ELP */
+ struct bfa_reqq_wait_s reqq_wait;
+ /* to wait for room in reqq */
+ struct bfa_reqq_wait_s svcreq_wait;
+ /* to wait for room in reqq */
+ struct bfa_reqq_wait_s stats_reqq_wait;
+ /* to wait for room in reqq (stats) */
+ void *event_cbarg;
+ void (*event_cbfn) (void *cbarg,
+ enum bfa_port_linkstate event);
+ union {
+ union bfi_fcport_i2h_msg_u i2hmsg;
+ } event_arg;
+ void *bfad; /* BFA driver handle */
+ struct bfa_fcport_ln_s ln; /* Link Notification */
+ struct bfa_cb_qe_s hcb_qe; /* BFA callback queue elem */
+ struct bfa_timer_s timer; /* timer */
+ u32 msgtag; /* fimrware msg tag for reply */
+ u8 *stats_kva;
+ u64 stats_pa;
+ union bfa_fcport_stats_u *stats;
+ bfa_status_t stats_status; /* stats/statsclr status */
+ struct list_head stats_pending_q;
+ struct list_head statsclr_pending_q;
+ bfa_boolean_t stats_qfull;
+ u32 stats_reset_time; /* stats reset time stamp */
+ bfa_boolean_t diag_busy; /* diag busy status */
+ bfa_boolean_t beacon; /* port beacon status */
+ bfa_boolean_t link_e2e_beacon; /* link beacon status */
+ struct bfa_fcport_trunk_s trunk;
+ u16 fcoe_vlan;
+ struct bfa_mem_dma_s fcport_dma;
+ bfa_boolean_t stats_dma_ready;
+ struct bfa_bbcr_attr_s bbcr_attr;
+ enum bfa_fec_state_s fec_state;
+};
+
+#define BFA_FCPORT_MOD(__bfa) (&(__bfa)->modules.fcport)
+#define BFA_MEM_FCPORT_DMA(__bfa) (&(BFA_FCPORT_MOD(__bfa)->fcport_dma))
+
+/*
+ * protected functions
+ */
+void bfa_fcport_init(struct bfa_s *bfa);
+void bfa_fcport_isr(struct bfa_s *bfa, struct bfi_msg_s *msg);
+
+/*
+ * bfa fcport API functions
+ */
+bfa_status_t bfa_fcport_enable(struct bfa_s *bfa);
+bfa_status_t bfa_fcport_disable(struct bfa_s *bfa);
+bfa_status_t bfa_fcport_cfg_speed(struct bfa_s *bfa,
+ enum bfa_port_speed speed);
+enum bfa_port_speed bfa_fcport_get_speed(struct bfa_s *bfa);
+bfa_status_t bfa_fcport_cfg_topology(struct bfa_s *bfa,
+ enum bfa_port_topology topo);
+enum bfa_port_topology bfa_fcport_get_topology(struct bfa_s *bfa);
+enum bfa_port_topology bfa_fcport_get_cfg_topology(struct bfa_s *bfa);
+bfa_status_t bfa_fcport_cfg_hardalpa(struct bfa_s *bfa, u8 alpa);
+bfa_boolean_t bfa_fcport_get_hardalpa(struct bfa_s *bfa, u8 *alpa);
+u8 bfa_fcport_get_myalpa(struct bfa_s *bfa);
+bfa_status_t bfa_fcport_clr_hardalpa(struct bfa_s *bfa);
+bfa_status_t bfa_fcport_cfg_maxfrsize(struct bfa_s *bfa, u16 maxsize);
+u16 bfa_fcport_get_maxfrsize(struct bfa_s *bfa);
+u8 bfa_fcport_get_rx_bbcredit(struct bfa_s *bfa);
+void bfa_fcport_get_attr(struct bfa_s *bfa, struct bfa_port_attr_s *attr);
+wwn_t bfa_fcport_get_wwn(struct bfa_s *bfa, bfa_boolean_t node);
+void bfa_fcport_event_register(struct bfa_s *bfa,
+ void (*event_cbfn) (void *cbarg,
+ enum bfa_port_linkstate event), void *event_cbarg);
+bfa_boolean_t bfa_fcport_is_disabled(struct bfa_s *bfa);
+bfa_boolean_t bfa_fcport_is_dport(struct bfa_s *bfa);
+bfa_boolean_t bfa_fcport_is_ddport(struct bfa_s *bfa);
+bfa_status_t bfa_fcport_set_qos_bw(struct bfa_s *bfa,
+ struct bfa_qos_bw_s *qos_bw);
+enum bfa_port_speed bfa_fcport_get_ratelim_speed(struct bfa_s *bfa);
+
+void bfa_fcport_set_tx_bbcredit(struct bfa_s *bfa, u16 tx_bbcredit);
+bfa_boolean_t bfa_fcport_is_ratelim(struct bfa_s *bfa);
+void bfa_fcport_beacon(void *dev, bfa_boolean_t beacon,
+ bfa_boolean_t link_e2e_beacon);
+bfa_boolean_t bfa_fcport_is_linkup(struct bfa_s *bfa);
+bfa_status_t bfa_fcport_get_stats(struct bfa_s *bfa,
+ struct bfa_cb_pending_q_s *cb);
+bfa_status_t bfa_fcport_clear_stats(struct bfa_s *bfa,
+ struct bfa_cb_pending_q_s *cb);
+bfa_boolean_t bfa_fcport_is_qos_enabled(struct bfa_s *bfa);
+bfa_boolean_t bfa_fcport_is_trunk_enabled(struct bfa_s *bfa);
+void bfa_fcport_dportenable(struct bfa_s *bfa);
+void bfa_fcport_dportdisable(struct bfa_s *bfa);
+bfa_status_t bfa_fcport_is_pbcdisabled(struct bfa_s *bfa);
+void bfa_fcport_cfg_faa(struct bfa_s *bfa, u8 state);
+bfa_status_t bfa_fcport_cfg_bbcr(struct bfa_s *bfa,
+ bfa_boolean_t on_off, u8 bb_scn);
+bfa_status_t bfa_fcport_get_bbcr_attr(struct bfa_s *bfa,
+ struct bfa_bbcr_attr_s *bbcr_attr);
+
+/*
+ * bfa rport API functions
+ */
+struct bfa_rport_s *bfa_rport_create(struct bfa_s *bfa, void *rport_drv);
+void bfa_rport_online(struct bfa_rport_s *rport,
+ struct bfa_rport_info_s *rport_info);
+void bfa_rport_speed(struct bfa_rport_s *rport, enum bfa_port_speed speed);
+void bfa_cb_rport_online(void *rport);
+void bfa_cb_rport_offline(void *rport);
+void bfa_cb_rport_qos_scn_flowid(void *rport,
+ struct bfa_rport_qos_attr_s old_qos_attr,
+ struct bfa_rport_qos_attr_s new_qos_attr);
+void bfa_cb_rport_scn_online(struct bfa_s *bfa);
+void bfa_cb_rport_scn_offline(struct bfa_s *bfa);
+void bfa_cb_rport_scn_no_dev(void *rp);
+void bfa_cb_rport_qos_scn_prio(void *rport,
+ struct bfa_rport_qos_attr_s old_qos_attr,
+ struct bfa_rport_qos_attr_s new_qos_attr);
+
+/*
+ * Rport LUN masking related
+ */
+#define BFA_RPORT_TAG_INVALID 0xffff
+#define BFA_LP_TAG_INVALID 0xff
+void bfa_rport_set_lunmask(struct bfa_s *bfa, struct bfa_rport_s *rp);
+void bfa_rport_unset_lunmask(struct bfa_s *bfa, struct bfa_rport_s *rp);
+
+/*
+ * bfa fcxp API functions
+ */
+struct bfa_fcxp_s *bfa_fcxp_req_rsp_alloc(void *bfad_fcxp, struct bfa_s *bfa,
+ int nreq_sgles, int nrsp_sgles,
+ bfa_fcxp_get_sgaddr_t get_req_sga,
+ bfa_fcxp_get_sglen_t get_req_sglen,
+ bfa_fcxp_get_sgaddr_t get_rsp_sga,
+ bfa_fcxp_get_sglen_t get_rsp_sglen,
+ bfa_boolean_t req);
+void bfa_fcxp_req_rsp_alloc_wait(struct bfa_s *bfa, struct bfa_fcxp_wqe_s *wqe,
+ bfa_fcxp_alloc_cbfn_t alloc_cbfn,
+ void *cbarg, void *bfad_fcxp,
+ int nreq_sgles, int nrsp_sgles,
+ bfa_fcxp_get_sgaddr_t get_req_sga,
+ bfa_fcxp_get_sglen_t get_req_sglen,
+ bfa_fcxp_get_sgaddr_t get_rsp_sga,
+ bfa_fcxp_get_sglen_t get_rsp_sglen,
+ bfa_boolean_t req);
+void bfa_fcxp_walloc_cancel(struct bfa_s *bfa,
+ struct bfa_fcxp_wqe_s *wqe);
+void bfa_fcxp_discard(struct bfa_fcxp_s *fcxp);
+
+void *bfa_fcxp_get_reqbuf(struct bfa_fcxp_s *fcxp);
+void *bfa_fcxp_get_rspbuf(struct bfa_fcxp_s *fcxp);
+
+void bfa_fcxp_free(struct bfa_fcxp_s *fcxp);
+
+void bfa_fcxp_send(struct bfa_fcxp_s *fcxp, struct bfa_rport_s *rport,
+ u16 vf_id, u8 lp_tag,
+ bfa_boolean_t cts, enum fc_cos cos,
+ u32 reqlen, struct fchs_s *fchs,
+ bfa_cb_fcxp_send_t cbfn,
+ void *cbarg,
+ u32 rsp_maxlen, u8 rsp_timeout);
+bfa_status_t bfa_fcxp_abort(struct bfa_fcxp_s *fcxp);
+u32 bfa_fcxp_get_reqbufsz(struct bfa_fcxp_s *fcxp);
+u32 bfa_fcxp_get_maxrsp(struct bfa_s *bfa);
+void bfa_fcxp_res_recfg(struct bfa_s *bfa, u16 num_fcxp_fw);
+
+static inline void *
+bfa_uf_get_frmbuf(struct bfa_uf_s *uf)
+{
+ return uf->data_ptr;
+}
+
+static inline u16
+bfa_uf_get_frmlen(struct bfa_uf_s *uf)
+{
+ return uf->data_len;
+}
+
+/*
+ * bfa uf API functions
+ */
+void bfa_uf_recv_register(struct bfa_s *bfa, bfa_cb_uf_recv_t ufrecv,
+ void *cbarg);
+void bfa_uf_free(struct bfa_uf_s *uf);
+
+/*
+ * bfa lport service api
+ */
+
+u32 bfa_lps_get_max_vport(struct bfa_s *bfa);
+struct bfa_lps_s *bfa_lps_alloc(struct bfa_s *bfa);
+void bfa_lps_delete(struct bfa_lps_s *lps);
+void bfa_lps_flogi(struct bfa_lps_s *lps, void *uarg, u8 alpa,
+ u16 pdusz, wwn_t pwwn, wwn_t nwwn,
+ bfa_boolean_t auth_en);
+void bfa_lps_fdisc(struct bfa_lps_s *lps, void *uarg, u16 pdusz,
+ wwn_t pwwn, wwn_t nwwn);
+void bfa_lps_fdisclogo(struct bfa_lps_s *lps);
+void bfa_lps_set_n2n_pid(struct bfa_lps_s *lps, u32 n2n_pid);
+u8 bfa_lps_get_fwtag(struct bfa_s *bfa, u8 lp_tag);
+u32 bfa_lps_get_base_pid(struct bfa_s *bfa);
+u8 bfa_lps_get_tag_from_pid(struct bfa_s *bfa, u32 pid);
+void bfa_cb_lps_flogi_comp(void *bfad, void *uarg, bfa_status_t status);
+void bfa_cb_lps_flogo_comp(void *bfad, void *uarg);
+void bfa_cb_lps_fdisc_comp(void *bfad, void *uarg, bfa_status_t status);
+void bfa_cb_lps_fdisclogo_comp(void *bfad, void *uarg);
+void bfa_cb_lps_cvl_event(void *bfad, void *uarg);
+
+/* FAA specific APIs */
+bfa_status_t bfa_faa_query(struct bfa_s *bfa, struct bfa_faa_attr_s *attr,
+ bfa_cb_iocfc_t cbfn, void *cbarg);
+
+/*
+ * FC DIAG data structure
+ */
+struct bfa_fcdiag_qtest_s {
+ struct bfa_diag_qtest_result_s *result;
+ bfa_cb_diag_t cbfn;
+ void *cbarg;
+ struct bfa_timer_s timer;
+ u32 status;
+ u32 count;
+ u8 lock;
+ u8 queue;
+ u8 all;
+ u8 timer_active;
+};
+
+struct bfa_fcdiag_lb_s {
+ bfa_cb_diag_t cbfn;
+ void *cbarg;
+ void *result;
+ bfa_boolean_t lock;
+ u32 status;
+};
+
+struct bfa_dport_s {
+ struct bfa_s *bfa; /* Back pointer to BFA */
+ bfa_sm_t sm; /* finite state machine */
+ struct bfa_reqq_wait_s reqq_wait;
+ bfa_cb_diag_t cbfn;
+ void *cbarg;
+ union bfi_diag_dport_msg_u i2hmsg;
+ u8 test_state; /* enum dport_test_state */
+ u8 dynamic; /* boolean_t */
+ u8 rsvd[2];
+ u32 lpcnt;
+ u32 payload; /* user defined payload pattern */
+ wwn_t rp_pwwn;
+ wwn_t rp_nwwn;
+ struct bfa_diag_dport_result_s result;
+};
+
+struct bfa_fcdiag_s {
+ struct bfa_s *bfa; /* Back pointer to BFA */
+ struct bfa_trc_mod_s *trcmod;
+ struct bfa_fcdiag_lb_s lb;
+ struct bfa_fcdiag_qtest_s qtest;
+ struct bfa_dport_s dport;
+};
+
+#define BFA_FCDIAG_MOD(__bfa) (&(__bfa)->modules.fcdiag)
+
+void bfa_fcdiag_intr(struct bfa_s *bfa, struct bfi_msg_s *msg);
+
+bfa_status_t bfa_fcdiag_loopback(struct bfa_s *bfa,
+ enum bfa_port_opmode opmode,
+ enum bfa_port_speed speed, u32 lpcnt, u32 pat,
+ struct bfa_diag_loopback_result_s *result,
+ bfa_cb_diag_t cbfn, void *cbarg);
+bfa_status_t bfa_fcdiag_queuetest(struct bfa_s *bfa, u32 ignore,
+ u32 queue, struct bfa_diag_qtest_result_s *result,
+ bfa_cb_diag_t cbfn, void *cbarg);
+bfa_status_t bfa_fcdiag_lb_is_running(struct bfa_s *bfa);
+bfa_status_t bfa_dport_enable(struct bfa_s *bfa, u32 lpcnt, u32 pat,
+ bfa_cb_diag_t cbfn, void *cbarg);
+bfa_status_t bfa_dport_disable(struct bfa_s *bfa, bfa_cb_diag_t cbfn,
+ void *cbarg);
+bfa_status_t bfa_dport_start(struct bfa_s *bfa, u32 lpcnt, u32 pat,
+ bfa_cb_diag_t cbfn, void *cbarg);
+bfa_status_t bfa_dport_show(struct bfa_s *bfa,
+ struct bfa_diag_dport_result_s *result);
+
+#endif /* __BFA_SVC_H__ */
diff --git a/drivers/scsi/bfa/bfad.c b/drivers/scsi/bfa/bfad.c
new file mode 100644
index 000000000..f95adcadb
--- /dev/null
+++ b/drivers/scsi/bfa/bfad.c
@@ -0,0 +1,1820 @@
+/*
+ * Copyright (c) 2005-2010 Brocade Communications Systems, Inc.
+ * All rights reserved
+ * www.brocade.com
+ *
+ * Linux driver for Brocade Fibre Channel Host Bus Adapter.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License (GPL) Version 2 as
+ * published by the Free Software Foundation
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ */
+
+/*
+ * bfad.c Linux driver PCI interface module.
+ */
+#include <linux/module.h>
+#include <linux/kthread.h>
+#include <linux/errno.h>
+#include <linux/sched.h>
+#include <linux/init.h>
+#include <linux/fs.h>
+#include <linux/pci.h>
+#include <linux/firmware.h>
+#include <asm/uaccess.h>
+#include <asm/fcntl.h>
+
+#include "bfad_drv.h"
+#include "bfad_im.h"
+#include "bfa_fcs.h"
+#include "bfa_defs.h"
+#include "bfa.h"
+
+BFA_TRC_FILE(LDRV, BFAD);
+DEFINE_MUTEX(bfad_mutex);
+LIST_HEAD(bfad_list);
+
+static int bfad_inst;
+static int num_sgpgs_parm;
+int supported_fc4s;
+char *host_name, *os_name, *os_patch;
+int num_rports, num_ios, num_tms;
+int num_fcxps, num_ufbufs;
+int reqq_size, rspq_size, num_sgpgs;
+int rport_del_timeout = BFA_FCS_RPORT_DEF_DEL_TIMEOUT;
+int bfa_lun_queue_depth = BFAD_LUN_QUEUE_DEPTH;
+int bfa_io_max_sge = BFAD_IO_MAX_SGE;
+int bfa_log_level = 3; /* WARNING log level */
+int ioc_auto_recover = BFA_TRUE;
+int bfa_linkup_delay = -1;
+int fdmi_enable = BFA_TRUE;
+int pcie_max_read_reqsz;
+int bfa_debugfs_enable = 1;
+int msix_disable_cb = 0, msix_disable_ct = 0;
+int max_xfer_size = BFAD_MAX_SECTORS >> 1;
+int max_rport_logins = BFA_FCS_MAX_RPORT_LOGINS;
+
+/* Firmware releated */
+u32 bfi_image_cb_size, bfi_image_ct_size, bfi_image_ct2_size;
+u32 *bfi_image_cb, *bfi_image_ct, *bfi_image_ct2;
+
+#define BFAD_FW_FILE_CB "/*(DEBLOBBED)*/"
+#define BFAD_FW_FILE_CT "/*(DEBLOBBED)*/"
+#define BFAD_FW_FILE_CT2 "/*(DEBLOBBED)*/"
+
+static u32 *bfad_load_fwimg(struct pci_dev *pdev);
+static void bfad_free_fwimg(void);
+static void bfad_read_firmware(struct pci_dev *pdev, u32 **bfi_image,
+ u32 *bfi_image_size, char *fw_name);
+
+static const char *msix_name_ct[] = {
+ "ctrl",
+ "cpe0", "cpe1", "cpe2", "cpe3",
+ "rme0", "rme1", "rme2", "rme3" };
+
+static const char *msix_name_cb[] = {
+ "cpe0", "cpe1", "cpe2", "cpe3",
+ "rme0", "rme1", "rme2", "rme3",
+ "eemc", "elpu0", "elpu1", "epss", "mlpu" };
+
+/*(DEBLOBBED)*/
+
+module_param(os_name, charp, S_IRUGO | S_IWUSR);
+MODULE_PARM_DESC(os_name, "OS name of the hba host machine");
+module_param(os_patch, charp, S_IRUGO | S_IWUSR);
+MODULE_PARM_DESC(os_patch, "OS patch level of the hba host machine");
+module_param(host_name, charp, S_IRUGO | S_IWUSR);
+MODULE_PARM_DESC(host_name, "Hostname of the hba host machine");
+module_param(num_rports, int, S_IRUGO | S_IWUSR);
+MODULE_PARM_DESC(num_rports, "Max number of rports supported per port "
+ "(physical/logical), default=1024");
+module_param(num_ios, int, S_IRUGO | S_IWUSR);
+MODULE_PARM_DESC(num_ios, "Max number of ioim requests, default=2000");
+module_param(num_tms, int, S_IRUGO | S_IWUSR);
+MODULE_PARM_DESC(num_tms, "Max number of task im requests, default=128");
+module_param(num_fcxps, int, S_IRUGO | S_IWUSR);
+MODULE_PARM_DESC(num_fcxps, "Max number of fcxp requests, default=64");
+module_param(num_ufbufs, int, S_IRUGO | S_IWUSR);
+MODULE_PARM_DESC(num_ufbufs, "Max number of unsolicited frame "
+ "buffers, default=64");
+module_param(reqq_size, int, S_IRUGO | S_IWUSR);
+MODULE_PARM_DESC(reqq_size, "Max number of request queue elements, "
+ "default=256");
+module_param(rspq_size, int, S_IRUGO | S_IWUSR);
+MODULE_PARM_DESC(rspq_size, "Max number of response queue elements, "
+ "default=64");
+module_param(num_sgpgs, int, S_IRUGO | S_IWUSR);
+MODULE_PARM_DESC(num_sgpgs, "Number of scatter/gather pages, default=2048");
+module_param(rport_del_timeout, int, S_IRUGO | S_IWUSR);
+MODULE_PARM_DESC(rport_del_timeout, "Rport delete timeout, default=90 secs, "
+ "Range[>0]");
+module_param(bfa_lun_queue_depth, int, S_IRUGO | S_IWUSR);
+MODULE_PARM_DESC(bfa_lun_queue_depth, "Lun queue depth, default=32, Range[>0]");
+module_param(bfa_io_max_sge, int, S_IRUGO | S_IWUSR);
+MODULE_PARM_DESC(bfa_io_max_sge, "Max io scatter/gather elements, default=255");
+module_param(bfa_log_level, int, S_IRUGO | S_IWUSR);
+MODULE_PARM_DESC(bfa_log_level, "Driver log level, default=3, "
+ "Range[Critical:1|Error:2|Warning:3|Info:4]");
+module_param(ioc_auto_recover, int, S_IRUGO | S_IWUSR);
+MODULE_PARM_DESC(ioc_auto_recover, "IOC auto recovery, default=1, "
+ "Range[off:0|on:1]");
+module_param(bfa_linkup_delay, int, S_IRUGO | S_IWUSR);
+MODULE_PARM_DESC(bfa_linkup_delay, "Link up delay, default=30 secs for "
+ "boot port. Otherwise 10 secs in RHEL4 & 0 for "
+ "[RHEL5, SLES10, ESX40] Range[>0]");
+module_param(msix_disable_cb, int, S_IRUGO | S_IWUSR);
+MODULE_PARM_DESC(msix_disable_cb, "Disable Message Signaled Interrupts "
+ "for Brocade-415/425/815/825 cards, default=0, "
+ " Range[false:0|true:1]");
+module_param(msix_disable_ct, int, S_IRUGO | S_IWUSR);
+MODULE_PARM_DESC(msix_disable_ct, "Disable Message Signaled Interrupts "
+ "if possible for Brocade-1010/1020/804/1007/902/1741 "
+ "cards, default=0, Range[false:0|true:1]");
+module_param(fdmi_enable, int, S_IRUGO | S_IWUSR);
+MODULE_PARM_DESC(fdmi_enable, "Enables fdmi registration, default=1, "
+ "Range[false:0|true:1]");
+module_param(pcie_max_read_reqsz, int, S_IRUGO | S_IWUSR);
+MODULE_PARM_DESC(pcie_max_read_reqsz, "PCIe max read request size, default=0 "
+ "(use system setting), Range[128|256|512|1024|2048|4096]");
+module_param(bfa_debugfs_enable, int, S_IRUGO | S_IWUSR);
+MODULE_PARM_DESC(bfa_debugfs_enable, "Enables debugfs feature, default=1,"
+ " Range[false:0|true:1]");
+module_param(max_xfer_size, int, S_IRUGO | S_IWUSR);
+MODULE_PARM_DESC(max_xfer_size, "default=32MB,"
+ " Range[64k|128k|256k|512k|1024k|2048k]");
+module_param(max_rport_logins, int, S_IRUGO | S_IWUSR);
+MODULE_PARM_DESC(max_rport_logins, "Max number of logins to initiator and target rports on a port (physical/logical), default=1024");
+
+static void
+bfad_sm_uninit(struct bfad_s *bfad, enum bfad_sm_event event);
+static void
+bfad_sm_created(struct bfad_s *bfad, enum bfad_sm_event event);
+static void
+bfad_sm_initializing(struct bfad_s *bfad, enum bfad_sm_event event);
+static void
+bfad_sm_operational(struct bfad_s *bfad, enum bfad_sm_event event);
+static void
+bfad_sm_stopping(struct bfad_s *bfad, enum bfad_sm_event event);
+static void
+bfad_sm_failed(struct bfad_s *bfad, enum bfad_sm_event event);
+static void
+bfad_sm_fcs_exit(struct bfad_s *bfad, enum bfad_sm_event event);
+
+/*
+ * Beginning state for the driver instance, awaiting the pci_probe event
+ */
+static void
+bfad_sm_uninit(struct bfad_s *bfad, enum bfad_sm_event event)
+{
+ bfa_trc(bfad, event);
+
+ switch (event) {
+ case BFAD_E_CREATE:
+ bfa_sm_set_state(bfad, bfad_sm_created);
+ bfad->bfad_tsk = kthread_create(bfad_worker, (void *) bfad,
+ "%s", "bfad_worker");
+ if (IS_ERR(bfad->bfad_tsk)) {
+ printk(KERN_INFO "bfad[%d]: Kernel thread "
+ "creation failed!\n", bfad->inst_no);
+ bfa_sm_send_event(bfad, BFAD_E_KTHREAD_CREATE_FAILED);
+ }
+ bfa_sm_send_event(bfad, BFAD_E_INIT);
+ break;
+
+ case BFAD_E_STOP:
+ /* Ignore stop; already in uninit */
+ break;
+
+ default:
+ bfa_sm_fault(bfad, event);
+ }
+}
+
+/*
+ * Driver Instance is created, awaiting event INIT to initialize the bfad
+ */
+static void
+bfad_sm_created(struct bfad_s *bfad, enum bfad_sm_event event)
+{
+ unsigned long flags;
+ bfa_status_t ret;
+
+ bfa_trc(bfad, event);
+
+ switch (event) {
+ case BFAD_E_INIT:
+ bfa_sm_set_state(bfad, bfad_sm_initializing);
+
+ init_completion(&bfad->comp);
+
+ /* Enable Interrupt and wait bfa_init completion */
+ if (bfad_setup_intr(bfad)) {
+ printk(KERN_WARNING "bfad%d: bfad_setup_intr failed\n",
+ bfad->inst_no);
+ bfa_sm_send_event(bfad, BFAD_E_INIT_FAILED);
+ break;
+ }
+
+ spin_lock_irqsave(&bfad->bfad_lock, flags);
+ bfa_iocfc_init(&bfad->bfa);
+ spin_unlock_irqrestore(&bfad->bfad_lock, flags);
+
+ /* Set up interrupt handler for each vectors */
+ if ((bfad->bfad_flags & BFAD_MSIX_ON) &&
+ bfad_install_msix_handler(bfad)) {
+ printk(KERN_WARNING "%s: install_msix failed, bfad%d\n",
+ __func__, bfad->inst_no);
+ }
+
+ bfad_init_timer(bfad);
+
+ wait_for_completion(&bfad->comp);
+
+ if ((bfad->bfad_flags & BFAD_HAL_INIT_DONE)) {
+ bfa_sm_send_event(bfad, BFAD_E_INIT_SUCCESS);
+ } else {
+ printk(KERN_WARNING
+ "bfa %s: bfa init failed\n",
+ bfad->pci_name);
+ spin_lock_irqsave(&bfad->bfad_lock, flags);
+ bfa_fcs_init(&bfad->bfa_fcs);
+ spin_unlock_irqrestore(&bfad->bfad_lock, flags);
+
+ ret = bfad_cfg_pport(bfad, BFA_LPORT_ROLE_FCP_IM);
+ if (ret != BFA_STATUS_OK) {
+ init_completion(&bfad->comp);
+
+ spin_lock_irqsave(&bfad->bfad_lock, flags);
+ bfad->pport.flags |= BFAD_PORT_DELETE;
+ bfa_fcs_exit(&bfad->bfa_fcs);
+ spin_unlock_irqrestore(&bfad->bfad_lock, flags);
+
+ wait_for_completion(&bfad->comp);
+
+ bfa_sm_send_event(bfad, BFAD_E_INIT_FAILED);
+ break;
+ }
+ bfad->bfad_flags |= BFAD_HAL_INIT_FAIL;
+ bfa_sm_send_event(bfad, BFAD_E_HAL_INIT_FAILED);
+ }
+
+ break;
+
+ case BFAD_E_KTHREAD_CREATE_FAILED:
+ bfa_sm_set_state(bfad, bfad_sm_uninit);
+ break;
+
+ default:
+ bfa_sm_fault(bfad, event);
+ }
+}
+
+static void
+bfad_sm_initializing(struct bfad_s *bfad, enum bfad_sm_event event)
+{
+ int retval;
+ unsigned long flags;
+
+ bfa_trc(bfad, event);
+
+ switch (event) {
+ case BFAD_E_INIT_SUCCESS:
+ kthread_stop(bfad->bfad_tsk);
+ spin_lock_irqsave(&bfad->bfad_lock, flags);
+ bfad->bfad_tsk = NULL;
+ spin_unlock_irqrestore(&bfad->bfad_lock, flags);
+
+ retval = bfad_start_ops(bfad);
+ if (retval != BFA_STATUS_OK) {
+ bfa_sm_set_state(bfad, bfad_sm_failed);
+ break;
+ }
+ bfa_sm_set_state(bfad, bfad_sm_operational);
+ break;
+
+ case BFAD_E_INIT_FAILED:
+ bfa_sm_set_state(bfad, bfad_sm_uninit);
+ kthread_stop(bfad->bfad_tsk);
+ spin_lock_irqsave(&bfad->bfad_lock, flags);
+ bfad->bfad_tsk = NULL;
+ spin_unlock_irqrestore(&bfad->bfad_lock, flags);
+ break;
+
+ case BFAD_E_HAL_INIT_FAILED:
+ bfa_sm_set_state(bfad, bfad_sm_failed);
+ break;
+ default:
+ bfa_sm_fault(bfad, event);
+ }
+}
+
+static void
+bfad_sm_failed(struct bfad_s *bfad, enum bfad_sm_event event)
+{
+ int retval;
+
+ bfa_trc(bfad, event);
+
+ switch (event) {
+ case BFAD_E_INIT_SUCCESS:
+ retval = bfad_start_ops(bfad);
+ if (retval != BFA_STATUS_OK)
+ break;
+ bfa_sm_set_state(bfad, bfad_sm_operational);
+ break;
+
+ case BFAD_E_STOP:
+ bfa_sm_set_state(bfad, bfad_sm_fcs_exit);
+ bfa_sm_send_event(bfad, BFAD_E_FCS_EXIT_COMP);
+ break;
+
+ case BFAD_E_EXIT_COMP:
+ bfa_sm_set_state(bfad, bfad_sm_uninit);
+ bfad_remove_intr(bfad);
+ del_timer_sync(&bfad->hal_tmo);
+ break;
+
+ default:
+ bfa_sm_fault(bfad, event);
+ }
+}
+
+static void
+bfad_sm_operational(struct bfad_s *bfad, enum bfad_sm_event event)
+{
+ bfa_trc(bfad, event);
+
+ switch (event) {
+ case BFAD_E_STOP:
+ bfa_sm_set_state(bfad, bfad_sm_fcs_exit);
+ bfad_fcs_stop(bfad);
+ break;
+
+ default:
+ bfa_sm_fault(bfad, event);
+ }
+}
+
+static void
+bfad_sm_fcs_exit(struct bfad_s *bfad, enum bfad_sm_event event)
+{
+ bfa_trc(bfad, event);
+
+ switch (event) {
+ case BFAD_E_FCS_EXIT_COMP:
+ bfa_sm_set_state(bfad, bfad_sm_stopping);
+ bfad_stop(bfad);
+ break;
+
+ default:
+ bfa_sm_fault(bfad, event);
+ }
+}
+
+static void
+bfad_sm_stopping(struct bfad_s *bfad, enum bfad_sm_event event)
+{
+ bfa_trc(bfad, event);
+
+ switch (event) {
+ case BFAD_E_EXIT_COMP:
+ bfa_sm_set_state(bfad, bfad_sm_uninit);
+ bfad_remove_intr(bfad);
+ del_timer_sync(&bfad->hal_tmo);
+ bfad_im_probe_undo(bfad);
+ bfad->bfad_flags &= ~BFAD_FC4_PROBE_DONE;
+ bfad_uncfg_pport(bfad);
+ break;
+
+ default:
+ bfa_sm_fault(bfad, event);
+ break;
+ }
+}
+
+/*
+ * BFA callbacks
+ */
+void
+bfad_hcb_comp(void *arg, bfa_status_t status)
+{
+ struct bfad_hal_comp *fcomp = (struct bfad_hal_comp *)arg;
+
+ fcomp->status = status;
+ complete(&fcomp->comp);
+}
+
+/*
+ * bfa_init callback
+ */
+void
+bfa_cb_init(void *drv, bfa_status_t init_status)
+{
+ struct bfad_s *bfad = drv;
+
+ if (init_status == BFA_STATUS_OK) {
+ bfad->bfad_flags |= BFAD_HAL_INIT_DONE;
+
+ /*
+ * If BFAD_HAL_INIT_FAIL flag is set:
+ * Wake up the kernel thread to start
+ * the bfad operations after HAL init done
+ */
+ if ((bfad->bfad_flags & BFAD_HAL_INIT_FAIL)) {
+ bfad->bfad_flags &= ~BFAD_HAL_INIT_FAIL;
+ wake_up_process(bfad->bfad_tsk);
+ }
+ }
+
+ complete(&bfad->comp);
+}
+
+/*
+ * BFA_FCS callbacks
+ */
+struct bfad_port_s *
+bfa_fcb_lport_new(struct bfad_s *bfad, struct bfa_fcs_lport_s *port,
+ enum bfa_lport_role roles, struct bfad_vf_s *vf_drv,
+ struct bfad_vport_s *vp_drv)
+{
+ bfa_status_t rc;
+ struct bfad_port_s *port_drv;
+
+ if (!vp_drv && !vf_drv) {
+ port_drv = &bfad->pport;
+ port_drv->pvb_type = BFAD_PORT_PHYS_BASE;
+ } else if (!vp_drv && vf_drv) {
+ port_drv = &vf_drv->base_port;
+ port_drv->pvb_type = BFAD_PORT_VF_BASE;
+ } else if (vp_drv && !vf_drv) {
+ port_drv = &vp_drv->drv_port;
+ port_drv->pvb_type = BFAD_PORT_PHYS_VPORT;
+ } else {
+ port_drv = &vp_drv->drv_port;
+ port_drv->pvb_type = BFAD_PORT_VF_VPORT;
+ }
+
+ port_drv->fcs_port = port;
+ port_drv->roles = roles;
+
+ if (roles & BFA_LPORT_ROLE_FCP_IM) {
+ rc = bfad_im_port_new(bfad, port_drv);
+ if (rc != BFA_STATUS_OK) {
+ bfad_im_port_delete(bfad, port_drv);
+ port_drv = NULL;
+ }
+ }
+
+ return port_drv;
+}
+
+/*
+ * FCS RPORT alloc callback, after successful PLOGI by FCS
+ */
+bfa_status_t
+bfa_fcb_rport_alloc(struct bfad_s *bfad, struct bfa_fcs_rport_s **rport,
+ struct bfad_rport_s **rport_drv)
+{
+ bfa_status_t rc = BFA_STATUS_OK;
+
+ *rport_drv = kzalloc(sizeof(struct bfad_rport_s), GFP_ATOMIC);
+ if (*rport_drv == NULL) {
+ rc = BFA_STATUS_ENOMEM;
+ goto ext;
+ }
+
+ *rport = &(*rport_drv)->fcs_rport;
+
+ext:
+ return rc;
+}
+
+/*
+ * FCS PBC VPORT Create
+ */
+void
+bfa_fcb_pbc_vport_create(struct bfad_s *bfad, struct bfi_pbc_vport_s pbc_vport)
+{
+
+ struct bfa_lport_cfg_s port_cfg = {0};
+ struct bfad_vport_s *vport;
+ int rc;
+
+ vport = kzalloc(sizeof(struct bfad_vport_s), GFP_ATOMIC);
+ if (!vport) {
+ bfa_trc(bfad, 0);
+ return;
+ }
+
+ vport->drv_port.bfad = bfad;
+ port_cfg.roles = BFA_LPORT_ROLE_FCP_IM;
+ port_cfg.pwwn = pbc_vport.vp_pwwn;
+ port_cfg.nwwn = pbc_vport.vp_nwwn;
+ port_cfg.preboot_vp = BFA_TRUE;
+
+ rc = bfa_fcs_pbc_vport_create(&vport->fcs_vport, &bfad->bfa_fcs, 0,
+ &port_cfg, vport);
+
+ if (rc != BFA_STATUS_OK) {
+ bfa_trc(bfad, 0);
+ return;
+ }
+
+ list_add_tail(&vport->list_entry, &bfad->pbc_vport_list);
+}
+
+void
+bfad_hal_mem_release(struct bfad_s *bfad)
+{
+ struct bfa_meminfo_s *hal_meminfo = &bfad->meminfo;
+ struct bfa_mem_dma_s *dma_info, *dma_elem;
+ struct bfa_mem_kva_s *kva_info, *kva_elem;
+ struct list_head *dm_qe, *km_qe;
+
+ dma_info = &hal_meminfo->dma_info;
+ kva_info = &hal_meminfo->kva_info;
+
+ /* Iterate through the KVA meminfo queue */
+ list_for_each(km_qe, &kva_info->qe) {
+ kva_elem = (struct bfa_mem_kva_s *) km_qe;
+ vfree(kva_elem->kva);
+ }
+
+ /* Iterate through the DMA meminfo queue */
+ list_for_each(dm_qe, &dma_info->qe) {
+ dma_elem = (struct bfa_mem_dma_s *) dm_qe;
+ dma_free_coherent(&bfad->pcidev->dev,
+ dma_elem->mem_len, dma_elem->kva,
+ (dma_addr_t) dma_elem->dma);
+ }
+
+ memset(hal_meminfo, 0, sizeof(struct bfa_meminfo_s));
+}
+
+void
+bfad_update_hal_cfg(struct bfa_iocfc_cfg_s *bfa_cfg)
+{
+ if (num_rports > 0)
+ bfa_cfg->fwcfg.num_rports = num_rports;
+ if (num_ios > 0)
+ bfa_cfg->fwcfg.num_ioim_reqs = num_ios;
+ if (num_tms > 0)
+ bfa_cfg->fwcfg.num_tskim_reqs = num_tms;
+ if (num_fcxps > 0 && num_fcxps <= BFA_FCXP_MAX)
+ bfa_cfg->fwcfg.num_fcxp_reqs = num_fcxps;
+ if (num_ufbufs > 0 && num_ufbufs <= BFA_UF_MAX)
+ bfa_cfg->fwcfg.num_uf_bufs = num_ufbufs;
+ if (reqq_size > 0)
+ bfa_cfg->drvcfg.num_reqq_elems = reqq_size;
+ if (rspq_size > 0)
+ bfa_cfg->drvcfg.num_rspq_elems = rspq_size;
+ if (num_sgpgs > 0 && num_sgpgs <= BFA_SGPG_MAX)
+ bfa_cfg->drvcfg.num_sgpgs = num_sgpgs;
+
+ /*
+ * populate the hal values back to the driver for sysfs use.
+ * otherwise, the default values will be shown as 0 in sysfs
+ */
+ num_rports = bfa_cfg->fwcfg.num_rports;
+ num_ios = bfa_cfg->fwcfg.num_ioim_reqs;
+ num_tms = bfa_cfg->fwcfg.num_tskim_reqs;
+ num_fcxps = bfa_cfg->fwcfg.num_fcxp_reqs;
+ num_ufbufs = bfa_cfg->fwcfg.num_uf_bufs;
+ reqq_size = bfa_cfg->drvcfg.num_reqq_elems;
+ rspq_size = bfa_cfg->drvcfg.num_rspq_elems;
+ num_sgpgs = bfa_cfg->drvcfg.num_sgpgs;
+}
+
+bfa_status_t
+bfad_hal_mem_alloc(struct bfad_s *bfad)
+{
+ struct bfa_meminfo_s *hal_meminfo = &bfad->meminfo;
+ struct bfa_mem_dma_s *dma_info, *dma_elem;
+ struct bfa_mem_kva_s *kva_info, *kva_elem;
+ struct list_head *dm_qe, *km_qe;
+ bfa_status_t rc = BFA_STATUS_OK;
+ dma_addr_t phys_addr;
+
+ bfa_cfg_get_default(&bfad->ioc_cfg);
+ bfad_update_hal_cfg(&bfad->ioc_cfg);
+ bfad->cfg_data.ioc_queue_depth = bfad->ioc_cfg.fwcfg.num_ioim_reqs;
+ bfa_cfg_get_meminfo(&bfad->ioc_cfg, hal_meminfo, &bfad->bfa);
+
+ dma_info = &hal_meminfo->dma_info;
+ kva_info = &hal_meminfo->kva_info;
+
+ /* Iterate through the KVA meminfo queue */
+ list_for_each(km_qe, &kva_info->qe) {
+ kva_elem = (struct bfa_mem_kva_s *) km_qe;
+ kva_elem->kva = vmalloc(kva_elem->mem_len);
+ if (kva_elem->kva == NULL) {
+ bfad_hal_mem_release(bfad);
+ rc = BFA_STATUS_ENOMEM;
+ goto ext;
+ }
+ memset(kva_elem->kva, 0, kva_elem->mem_len);
+ }
+
+ /* Iterate through the DMA meminfo queue */
+ list_for_each(dm_qe, &dma_info->qe) {
+ dma_elem = (struct bfa_mem_dma_s *) dm_qe;
+ dma_elem->kva = dma_alloc_coherent(&bfad->pcidev->dev,
+ dma_elem->mem_len,
+ &phys_addr, GFP_KERNEL);
+ if (dma_elem->kva == NULL) {
+ bfad_hal_mem_release(bfad);
+ rc = BFA_STATUS_ENOMEM;
+ goto ext;
+ }
+ dma_elem->dma = phys_addr;
+ memset(dma_elem->kva, 0, dma_elem->mem_len);
+ }
+ext:
+ return rc;
+}
+
+/*
+ * Create a vport under a vf.
+ */
+bfa_status_t
+bfad_vport_create(struct bfad_s *bfad, u16 vf_id,
+ struct bfa_lport_cfg_s *port_cfg, struct device *dev)
+{
+ struct bfad_vport_s *vport;
+ int rc = BFA_STATUS_OK;
+ unsigned long flags;
+ struct completion fcomp;
+
+ vport = kzalloc(sizeof(struct bfad_vport_s), GFP_KERNEL);
+ if (!vport) {
+ rc = BFA_STATUS_ENOMEM;
+ goto ext;
+ }
+
+ vport->drv_port.bfad = bfad;
+ spin_lock_irqsave(&bfad->bfad_lock, flags);
+ rc = bfa_fcs_vport_create(&vport->fcs_vport, &bfad->bfa_fcs, vf_id,
+ port_cfg, vport);
+ spin_unlock_irqrestore(&bfad->bfad_lock, flags);
+
+ if (rc != BFA_STATUS_OK)
+ goto ext_free_vport;
+
+ if (port_cfg->roles & BFA_LPORT_ROLE_FCP_IM) {
+ rc = bfad_im_scsi_host_alloc(bfad, vport->drv_port.im_port,
+ dev);
+ if (rc != BFA_STATUS_OK)
+ goto ext_free_fcs_vport;
+ }
+
+ spin_lock_irqsave(&bfad->bfad_lock, flags);
+ bfa_fcs_vport_start(&vport->fcs_vport);
+ list_add_tail(&vport->list_entry, &bfad->vport_list);
+ spin_unlock_irqrestore(&bfad->bfad_lock, flags);
+
+ return BFA_STATUS_OK;
+
+ext_free_fcs_vport:
+ spin_lock_irqsave(&bfad->bfad_lock, flags);
+ vport->comp_del = &fcomp;
+ init_completion(vport->comp_del);
+ bfa_fcs_vport_delete(&vport->fcs_vport);
+ spin_unlock_irqrestore(&bfad->bfad_lock, flags);
+ wait_for_completion(vport->comp_del);
+ext_free_vport:
+ kfree(vport);
+ext:
+ return rc;
+}
+
+void
+bfad_bfa_tmo(unsigned long data)
+{
+ struct bfad_s *bfad = (struct bfad_s *) data;
+ unsigned long flags;
+ struct list_head doneq;
+
+ spin_lock_irqsave(&bfad->bfad_lock, flags);
+
+ bfa_timer_beat(&bfad->bfa.timer_mod);
+
+ bfa_comp_deq(&bfad->bfa, &doneq);
+ spin_unlock_irqrestore(&bfad->bfad_lock, flags);
+
+ if (!list_empty(&doneq)) {
+ bfa_comp_process(&bfad->bfa, &doneq);
+ spin_lock_irqsave(&bfad->bfad_lock, flags);
+ bfa_comp_free(&bfad->bfa, &doneq);
+ spin_unlock_irqrestore(&bfad->bfad_lock, flags);
+ }
+
+ mod_timer(&bfad->hal_tmo,
+ jiffies + msecs_to_jiffies(BFA_TIMER_FREQ));
+}
+
+void
+bfad_init_timer(struct bfad_s *bfad)
+{
+ init_timer(&bfad->hal_tmo);
+ bfad->hal_tmo.function = bfad_bfa_tmo;
+ bfad->hal_tmo.data = (unsigned long)bfad;
+
+ mod_timer(&bfad->hal_tmo,
+ jiffies + msecs_to_jiffies(BFA_TIMER_FREQ));
+}
+
+int
+bfad_pci_init(struct pci_dev *pdev, struct bfad_s *bfad)
+{
+ int rc = -ENODEV;
+
+ if (pci_enable_device(pdev)) {
+ printk(KERN_ERR "pci_enable_device fail %p\n", pdev);
+ goto out;
+ }
+
+ if (pci_request_regions(pdev, BFAD_DRIVER_NAME))
+ goto out_disable_device;
+
+ pci_set_master(pdev);
+
+
+ if ((pci_set_dma_mask(pdev, DMA_BIT_MASK(64)) != 0) ||
+ (pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64)) != 0)) {
+ if ((pci_set_dma_mask(pdev, DMA_BIT_MASK(32)) != 0) ||
+ (pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32)) != 0)) {
+ printk(KERN_ERR "pci_set_dma_mask fail %p\n", pdev);
+ goto out_release_region;
+ }
+ }
+
+ /* Enable PCIE Advanced Error Recovery (AER) if kernel supports */
+ pci_enable_pcie_error_reporting(pdev);
+
+ bfad->pci_bar0_kva = pci_iomap(pdev, 0, pci_resource_len(pdev, 0));
+ bfad->pci_bar2_kva = pci_iomap(pdev, 2, pci_resource_len(pdev, 2));
+
+ if (bfad->pci_bar0_kva == NULL) {
+ printk(KERN_ERR "Fail to map bar0\n");
+ goto out_release_region;
+ }
+
+ bfad->hal_pcidev.pci_slot = PCI_SLOT(pdev->devfn);
+ bfad->hal_pcidev.pci_func = PCI_FUNC(pdev->devfn);
+ bfad->hal_pcidev.pci_bar_kva = bfad->pci_bar0_kva;
+ bfad->hal_pcidev.device_id = pdev->device;
+ bfad->hal_pcidev.ssid = pdev->subsystem_device;
+ bfad->pci_name = pci_name(pdev);
+
+ bfad->pci_attr.vendor_id = pdev->vendor;
+ bfad->pci_attr.device_id = pdev->device;
+ bfad->pci_attr.ssid = pdev->subsystem_device;
+ bfad->pci_attr.ssvid = pdev->subsystem_vendor;
+ bfad->pci_attr.pcifn = PCI_FUNC(pdev->devfn);
+
+ bfad->pcidev = pdev;
+
+ /* Adjust PCIe Maximum Read Request Size */
+ if (pci_is_pcie(pdev) && pcie_max_read_reqsz) {
+ if (pcie_max_read_reqsz >= 128 &&
+ pcie_max_read_reqsz <= 4096 &&
+ is_power_of_2(pcie_max_read_reqsz)) {
+ int max_rq = pcie_get_readrq(pdev);
+ printk(KERN_WARNING "BFA[%s]: "
+ "pcie_max_read_request_size is %d, "
+ "reset to %d\n", bfad->pci_name, max_rq,
+ pcie_max_read_reqsz);
+ pcie_set_readrq(pdev, pcie_max_read_reqsz);
+ } else {
+ printk(KERN_WARNING "BFA[%s]: invalid "
+ "pcie_max_read_request_size %d ignored\n",
+ bfad->pci_name, pcie_max_read_reqsz);
+ }
+ }
+
+ pci_save_state(pdev);
+
+ return 0;
+
+out_release_region:
+ pci_release_regions(pdev);
+out_disable_device:
+ pci_disable_device(pdev);
+out:
+ return rc;
+}
+
+void
+bfad_pci_uninit(struct pci_dev *pdev, struct bfad_s *bfad)
+{
+ pci_iounmap(pdev, bfad->pci_bar0_kva);
+ pci_iounmap(pdev, bfad->pci_bar2_kva);
+ pci_release_regions(pdev);
+ /* Disable PCIE Advanced Error Recovery (AER) */
+ pci_disable_pcie_error_reporting(pdev);
+ pci_disable_device(pdev);
+}
+
+bfa_status_t
+bfad_drv_init(struct bfad_s *bfad)
+{
+ bfa_status_t rc;
+ unsigned long flags;
+
+ bfad->cfg_data.rport_del_timeout = rport_del_timeout;
+ bfad->cfg_data.lun_queue_depth = bfa_lun_queue_depth;
+ bfad->cfg_data.io_max_sge = bfa_io_max_sge;
+ bfad->cfg_data.binding_method = FCP_PWWN_BINDING;
+
+ rc = bfad_hal_mem_alloc(bfad);
+ if (rc != BFA_STATUS_OK) {
+ printk(KERN_WARNING "bfad%d bfad_hal_mem_alloc failure\n",
+ bfad->inst_no);
+ printk(KERN_WARNING
+ "Not enough memory to attach all Brocade HBA ports, %s",
+ "System may need more memory.\n");
+ return BFA_STATUS_FAILED;
+ }
+
+ bfad->bfa.trcmod = bfad->trcmod;
+ bfad->bfa.plog = &bfad->plog_buf;
+ bfa_plog_init(&bfad->plog_buf);
+ bfa_plog_str(&bfad->plog_buf, BFA_PL_MID_DRVR, BFA_PL_EID_DRIVER_START,
+ 0, "Driver Attach");
+
+ bfa_attach(&bfad->bfa, bfad, &bfad->ioc_cfg, &bfad->meminfo,
+ &bfad->hal_pcidev);
+
+ /* FCS INIT */
+ spin_lock_irqsave(&bfad->bfad_lock, flags);
+ bfad->bfa_fcs.trcmod = bfad->trcmod;
+ bfa_fcs_attach(&bfad->bfa_fcs, &bfad->bfa, bfad, BFA_FALSE);
+ bfad->bfa_fcs.fdmi_enabled = fdmi_enable;
+ spin_unlock_irqrestore(&bfad->bfad_lock, flags);
+
+ bfad->bfad_flags |= BFAD_DRV_INIT_DONE;
+
+ return BFA_STATUS_OK;
+}
+
+void
+bfad_drv_uninit(struct bfad_s *bfad)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&bfad->bfad_lock, flags);
+ init_completion(&bfad->comp);
+ bfa_iocfc_stop(&bfad->bfa);
+ spin_unlock_irqrestore(&bfad->bfad_lock, flags);
+ wait_for_completion(&bfad->comp);
+
+ del_timer_sync(&bfad->hal_tmo);
+ bfa_isr_disable(&bfad->bfa);
+ bfa_detach(&bfad->bfa);
+ bfad_remove_intr(bfad);
+ bfad_hal_mem_release(bfad);
+
+ bfad->bfad_flags &= ~BFAD_DRV_INIT_DONE;
+}
+
+void
+bfad_drv_start(struct bfad_s *bfad)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&bfad->bfad_lock, flags);
+ bfa_iocfc_start(&bfad->bfa);
+ bfa_fcs_pbc_vport_init(&bfad->bfa_fcs);
+ bfa_fcs_fabric_modstart(&bfad->bfa_fcs);
+ bfad->bfad_flags |= BFAD_HAL_START_DONE;
+ spin_unlock_irqrestore(&bfad->bfad_lock, flags);
+
+ if (bfad->im)
+ flush_workqueue(bfad->im->drv_workq);
+}
+
+void
+bfad_fcs_stop(struct bfad_s *bfad)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&bfad->bfad_lock, flags);
+ init_completion(&bfad->comp);
+ bfad->pport.flags |= BFAD_PORT_DELETE;
+ bfa_fcs_exit(&bfad->bfa_fcs);
+ spin_unlock_irqrestore(&bfad->bfad_lock, flags);
+ wait_for_completion(&bfad->comp);
+
+ bfa_sm_send_event(bfad, BFAD_E_FCS_EXIT_COMP);
+}
+
+void
+bfad_stop(struct bfad_s *bfad)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&bfad->bfad_lock, flags);
+ init_completion(&bfad->comp);
+ bfa_iocfc_stop(&bfad->bfa);
+ bfad->bfad_flags &= ~BFAD_HAL_START_DONE;
+ spin_unlock_irqrestore(&bfad->bfad_lock, flags);
+ wait_for_completion(&bfad->comp);
+
+ bfa_sm_send_event(bfad, BFAD_E_EXIT_COMP);
+}
+
+bfa_status_t
+bfad_cfg_pport(struct bfad_s *bfad, enum bfa_lport_role role)
+{
+ int rc = BFA_STATUS_OK;
+
+ /* Allocate scsi_host for the physical port */
+ if ((supported_fc4s & BFA_LPORT_ROLE_FCP_IM) &&
+ (role & BFA_LPORT_ROLE_FCP_IM)) {
+ if (bfad->pport.im_port == NULL) {
+ rc = BFA_STATUS_FAILED;
+ goto out;
+ }
+
+ rc = bfad_im_scsi_host_alloc(bfad, bfad->pport.im_port,
+ &bfad->pcidev->dev);
+ if (rc != BFA_STATUS_OK)
+ goto out;
+
+ bfad->pport.roles |= BFA_LPORT_ROLE_FCP_IM;
+ }
+
+ bfad->bfad_flags |= BFAD_CFG_PPORT_DONE;
+
+out:
+ return rc;
+}
+
+void
+bfad_uncfg_pport(struct bfad_s *bfad)
+{
+ if ((supported_fc4s & BFA_LPORT_ROLE_FCP_IM) &&
+ (bfad->pport.roles & BFA_LPORT_ROLE_FCP_IM)) {
+ bfad_im_scsi_host_free(bfad, bfad->pport.im_port);
+ bfad_im_port_clean(bfad->pport.im_port);
+ kfree(bfad->pport.im_port);
+ bfad->pport.roles &= ~BFA_LPORT_ROLE_FCP_IM;
+ }
+
+ bfad->bfad_flags &= ~BFAD_CFG_PPORT_DONE;
+}
+
+bfa_status_t
+bfad_start_ops(struct bfad_s *bfad) {
+
+ int retval;
+ unsigned long flags;
+ struct bfad_vport_s *vport, *vport_new;
+ struct bfa_fcs_driver_info_s driver_info;
+
+ /* Limit min/max. xfer size to [64k-32MB] */
+ if (max_xfer_size < BFAD_MIN_SECTORS >> 1)
+ max_xfer_size = BFAD_MIN_SECTORS >> 1;
+ if (max_xfer_size > BFAD_MAX_SECTORS >> 1)
+ max_xfer_size = BFAD_MAX_SECTORS >> 1;
+
+ /* Fill the driver_info info to fcs*/
+ memset(&driver_info, 0, sizeof(driver_info));
+ strncpy(driver_info.version, BFAD_DRIVER_VERSION,
+ sizeof(driver_info.version) - 1);
+ if (host_name)
+ strncpy(driver_info.host_machine_name, host_name,
+ sizeof(driver_info.host_machine_name) - 1);
+ if (os_name)
+ strncpy(driver_info.host_os_name, os_name,
+ sizeof(driver_info.host_os_name) - 1);
+ if (os_patch)
+ strncpy(driver_info.host_os_patch, os_patch,
+ sizeof(driver_info.host_os_patch) - 1);
+
+ strncpy(driver_info.os_device_name, bfad->pci_name,
+ sizeof(driver_info.os_device_name) - 1);
+
+ /* FCS driver info init */
+ spin_lock_irqsave(&bfad->bfad_lock, flags);
+ bfa_fcs_driver_info_init(&bfad->bfa_fcs, &driver_info);
+
+ if (bfad->bfad_flags & BFAD_CFG_PPORT_DONE)
+ bfa_fcs_update_cfg(&bfad->bfa_fcs);
+ else
+ bfa_fcs_init(&bfad->bfa_fcs);
+
+ spin_unlock_irqrestore(&bfad->bfad_lock, flags);
+
+ if (!(bfad->bfad_flags & BFAD_CFG_PPORT_DONE)) {
+ retval = bfad_cfg_pport(bfad, BFA_LPORT_ROLE_FCP_IM);
+ if (retval != BFA_STATUS_OK)
+ return BFA_STATUS_FAILED;
+ }
+
+ /* Setup fc host fixed attribute if the lk supports */
+ bfad_fc_host_init(bfad->pport.im_port);
+
+ /* BFAD level FC4 IM specific resource allocation */
+ retval = bfad_im_probe(bfad);
+ if (retval != BFA_STATUS_OK) {
+ printk(KERN_WARNING "bfad_im_probe failed\n");
+ if (bfa_sm_cmp_state(bfad, bfad_sm_initializing))
+ bfa_sm_set_state(bfad, bfad_sm_failed);
+ return BFA_STATUS_FAILED;
+ } else
+ bfad->bfad_flags |= BFAD_FC4_PROBE_DONE;
+
+ bfad_drv_start(bfad);
+
+ /* Complete pbc vport create */
+ list_for_each_entry_safe(vport, vport_new, &bfad->pbc_vport_list,
+ list_entry) {
+ struct fc_vport_identifiers vid;
+ struct fc_vport *fc_vport;
+ char pwwn_buf[BFA_STRING_32];
+
+ memset(&vid, 0, sizeof(vid));
+ vid.roles = FC_PORT_ROLE_FCP_INITIATOR;
+ vid.vport_type = FC_PORTTYPE_NPIV;
+ vid.disable = false;
+ vid.node_name = wwn_to_u64((u8 *)
+ (&((vport->fcs_vport).lport.port_cfg.nwwn)));
+ vid.port_name = wwn_to_u64((u8 *)
+ (&((vport->fcs_vport).lport.port_cfg.pwwn)));
+ fc_vport = fc_vport_create(bfad->pport.im_port->shost, 0, &vid);
+ if (!fc_vport) {
+ wwn2str(pwwn_buf, vid.port_name);
+ printk(KERN_WARNING "bfad%d: failed to create pbc vport"
+ " %s\n", bfad->inst_no, pwwn_buf);
+ }
+ list_del(&vport->list_entry);
+ kfree(vport);
+ }
+
+ /*
+ * If bfa_linkup_delay is set to -1 default; try to retrive the
+ * value using the bfad_get_linkup_delay(); else use the
+ * passed in module param value as the bfa_linkup_delay.
+ */
+ if (bfa_linkup_delay < 0) {
+ bfa_linkup_delay = bfad_get_linkup_delay(bfad);
+ bfad_rport_online_wait(bfad);
+ bfa_linkup_delay = -1;
+ } else
+ bfad_rport_online_wait(bfad);
+
+ BFA_LOG(KERN_INFO, bfad, bfa_log_level, "bfa device claimed\n");
+
+ return BFA_STATUS_OK;
+}
+
+int
+bfad_worker(void *ptr)
+{
+ struct bfad_s *bfad = ptr;
+ unsigned long flags;
+
+ if (kthread_should_stop())
+ return 0;
+
+ /* Send event BFAD_E_INIT_SUCCESS */
+ bfa_sm_send_event(bfad, BFAD_E_INIT_SUCCESS);
+
+ spin_lock_irqsave(&bfad->bfad_lock, flags);
+ bfad->bfad_tsk = NULL;
+ spin_unlock_irqrestore(&bfad->bfad_lock, flags);
+
+ return 0;
+}
+
+/*
+ * BFA driver interrupt functions
+ */
+irqreturn_t
+bfad_intx(int irq, void *dev_id)
+{
+ struct bfad_s *bfad = dev_id;
+ struct list_head doneq;
+ unsigned long flags;
+ bfa_boolean_t rc;
+
+ spin_lock_irqsave(&bfad->bfad_lock, flags);
+ rc = bfa_intx(&bfad->bfa);
+ if (!rc) {
+ spin_unlock_irqrestore(&bfad->bfad_lock, flags);
+ return IRQ_NONE;
+ }
+
+ bfa_comp_deq(&bfad->bfa, &doneq);
+ spin_unlock_irqrestore(&bfad->bfad_lock, flags);
+
+ if (!list_empty(&doneq)) {
+ bfa_comp_process(&bfad->bfa, &doneq);
+
+ spin_lock_irqsave(&bfad->bfad_lock, flags);
+ bfa_comp_free(&bfad->bfa, &doneq);
+ spin_unlock_irqrestore(&bfad->bfad_lock, flags);
+ }
+
+ return IRQ_HANDLED;
+
+}
+
+static irqreturn_t
+bfad_msix(int irq, void *dev_id)
+{
+ struct bfad_msix_s *vec = dev_id;
+ struct bfad_s *bfad = vec->bfad;
+ struct list_head doneq;
+ unsigned long flags;
+
+ spin_lock_irqsave(&bfad->bfad_lock, flags);
+
+ bfa_msix(&bfad->bfa, vec->msix.entry);
+ bfa_comp_deq(&bfad->bfa, &doneq);
+ spin_unlock_irqrestore(&bfad->bfad_lock, flags);
+
+ if (!list_empty(&doneq)) {
+ bfa_comp_process(&bfad->bfa, &doneq);
+
+ spin_lock_irqsave(&bfad->bfad_lock, flags);
+ bfa_comp_free(&bfad->bfa, &doneq);
+ spin_unlock_irqrestore(&bfad->bfad_lock, flags);
+ }
+
+ return IRQ_HANDLED;
+}
+
+/*
+ * Initialize the MSIX entry table.
+ */
+static void
+bfad_init_msix_entry(struct bfad_s *bfad, struct msix_entry *msix_entries,
+ int mask, int max_bit)
+{
+ int i;
+ int match = 0x00000001;
+
+ for (i = 0, bfad->nvec = 0; i < MAX_MSIX_ENTRY; i++) {
+ if (mask & match) {
+ bfad->msix_tab[bfad->nvec].msix.entry = i;
+ bfad->msix_tab[bfad->nvec].bfad = bfad;
+ msix_entries[bfad->nvec].entry = i;
+ bfad->nvec++;
+ }
+
+ match <<= 1;
+ }
+
+}
+
+int
+bfad_install_msix_handler(struct bfad_s *bfad)
+{
+ int i, error = 0;
+
+ for (i = 0; i < bfad->nvec; i++) {
+ sprintf(bfad->msix_tab[i].name, "bfa-%s-%s",
+ bfad->pci_name,
+ ((bfa_asic_id_cb(bfad->hal_pcidev.device_id)) ?
+ msix_name_cb[i] : msix_name_ct[i]));
+
+ error = request_irq(bfad->msix_tab[i].msix.vector,
+ (irq_handler_t) bfad_msix, 0,
+ bfad->msix_tab[i].name, &bfad->msix_tab[i]);
+ bfa_trc(bfad, i);
+ bfa_trc(bfad, bfad->msix_tab[i].msix.vector);
+ if (error) {
+ int j;
+
+ for (j = 0; j < i; j++)
+ free_irq(bfad->msix_tab[j].msix.vector,
+ &bfad->msix_tab[j]);
+
+ bfad->bfad_flags &= ~BFAD_MSIX_ON;
+ pci_disable_msix(bfad->pcidev);
+
+ return 1;
+ }
+ }
+
+ return 0;
+}
+
+/*
+ * Setup MSIX based interrupt.
+ */
+int
+bfad_setup_intr(struct bfad_s *bfad)
+{
+ int error;
+ u32 mask = 0, i, num_bit = 0, max_bit = 0;
+ struct msix_entry msix_entries[MAX_MSIX_ENTRY];
+ struct pci_dev *pdev = bfad->pcidev;
+ u16 reg;
+
+ /* Call BFA to get the msix map for this PCI function. */
+ bfa_msix_getvecs(&bfad->bfa, &mask, &num_bit, &max_bit);
+
+ /* Set up the msix entry table */
+ bfad_init_msix_entry(bfad, msix_entries, mask, max_bit);
+
+ if ((bfa_asic_id_ctc(pdev->device) && !msix_disable_ct) ||
+ (bfa_asic_id_cb(pdev->device) && !msix_disable_cb)) {
+
+ error = pci_enable_msix_exact(bfad->pcidev,
+ msix_entries, bfad->nvec);
+ /* In CT1 & CT2, try to allocate just one vector */
+ if (error == -ENOSPC && bfa_asic_id_ctc(pdev->device)) {
+ printk(KERN_WARNING "bfa %s: trying one msix "
+ "vector failed to allocate %d[%d]\n",
+ bfad->pci_name, bfad->nvec, error);
+ bfad->nvec = 1;
+ error = pci_enable_msix_exact(bfad->pcidev,
+ msix_entries, 1);
+ }
+
+ if (error) {
+ printk(KERN_WARNING "bfad%d: "
+ "pci_enable_msix_exact failed (%d), "
+ "use line based.\n",
+ bfad->inst_no, error);
+ goto line_based;
+ }
+
+ /* Disable INTX in MSI-X mode */
+ pci_read_config_word(pdev, PCI_COMMAND, &reg);
+
+ if (!(reg & PCI_COMMAND_INTX_DISABLE))
+ pci_write_config_word(pdev, PCI_COMMAND,
+ reg | PCI_COMMAND_INTX_DISABLE);
+
+ /* Save the vectors */
+ for (i = 0; i < bfad->nvec; i++) {
+ bfa_trc(bfad, msix_entries[i].vector);
+ bfad->msix_tab[i].msix.vector = msix_entries[i].vector;
+ }
+
+ bfa_msix_init(&bfad->bfa, bfad->nvec);
+
+ bfad->bfad_flags |= BFAD_MSIX_ON;
+
+ return 0;
+ }
+
+line_based:
+ error = request_irq(bfad->pcidev->irq, (irq_handler_t)bfad_intx,
+ BFAD_IRQ_FLAGS, BFAD_DRIVER_NAME, bfad);
+ if (error)
+ return error;
+
+ bfad->bfad_flags |= BFAD_INTX_ON;
+
+ return 0;
+}
+
+void
+bfad_remove_intr(struct bfad_s *bfad)
+{
+ int i;
+
+ if (bfad->bfad_flags & BFAD_MSIX_ON) {
+ for (i = 0; i < bfad->nvec; i++)
+ free_irq(bfad->msix_tab[i].msix.vector,
+ &bfad->msix_tab[i]);
+
+ pci_disable_msix(bfad->pcidev);
+ bfad->bfad_flags &= ~BFAD_MSIX_ON;
+ } else if (bfad->bfad_flags & BFAD_INTX_ON) {
+ free_irq(bfad->pcidev->irq, bfad);
+ }
+}
+
+/*
+ * PCI probe entry.
+ */
+int
+bfad_pci_probe(struct pci_dev *pdev, const struct pci_device_id *pid)
+{
+ struct bfad_s *bfad;
+ int error = -ENODEV, retval, i;
+
+ /* For single port cards - only claim function 0 */
+ if ((pdev->device == BFA_PCI_DEVICE_ID_FC_8G1P) &&
+ (PCI_FUNC(pdev->devfn) != 0))
+ return -ENODEV;
+
+ bfad = kzalloc(sizeof(struct bfad_s), GFP_KERNEL);
+ if (!bfad) {
+ error = -ENOMEM;
+ goto out;
+ }
+
+ bfad->trcmod = kzalloc(sizeof(struct bfa_trc_mod_s), GFP_KERNEL);
+ if (!bfad->trcmod) {
+ printk(KERN_WARNING "Error alloc trace buffer!\n");
+ error = -ENOMEM;
+ goto out_alloc_trace_failure;
+ }
+
+ /* TRACE INIT */
+ bfa_trc_init(bfad->trcmod);
+ bfa_trc(bfad, bfad_inst);
+
+ /* AEN INIT */
+ INIT_LIST_HEAD(&bfad->free_aen_q);
+ INIT_LIST_HEAD(&bfad->active_aen_q);
+ for (i = 0; i < BFA_AEN_MAX_ENTRY; i++)
+ list_add_tail(&bfad->aen_list[i].qe, &bfad->free_aen_q);
+
+ if (!(bfad_load_fwimg(pdev))) {
+ kfree(bfad->trcmod);
+ goto out_alloc_trace_failure;
+ }
+
+ retval = bfad_pci_init(pdev, bfad);
+ if (retval) {
+ printk(KERN_WARNING "bfad_pci_init failure!\n");
+ error = retval;
+ goto out_pci_init_failure;
+ }
+
+ mutex_lock(&bfad_mutex);
+ bfad->inst_no = bfad_inst++;
+ list_add_tail(&bfad->list_entry, &bfad_list);
+ mutex_unlock(&bfad_mutex);
+
+ /* Initializing the state machine: State set to uninit */
+ bfa_sm_set_state(bfad, bfad_sm_uninit);
+
+ spin_lock_init(&bfad->bfad_lock);
+ spin_lock_init(&bfad->bfad_aen_spinlock);
+
+ pci_set_drvdata(pdev, bfad);
+
+ bfad->ref_count = 0;
+ bfad->pport.bfad = bfad;
+ INIT_LIST_HEAD(&bfad->pbc_vport_list);
+ INIT_LIST_HEAD(&bfad->vport_list);
+
+ /* Setup the debugfs node for this bfad */
+ if (bfa_debugfs_enable)
+ bfad_debugfs_init(&bfad->pport);
+
+ retval = bfad_drv_init(bfad);
+ if (retval != BFA_STATUS_OK)
+ goto out_drv_init_failure;
+
+ bfa_sm_send_event(bfad, BFAD_E_CREATE);
+
+ if (bfa_sm_cmp_state(bfad, bfad_sm_uninit))
+ goto out_bfad_sm_failure;
+
+ return 0;
+
+out_bfad_sm_failure:
+ bfad_hal_mem_release(bfad);
+out_drv_init_failure:
+ /* Remove the debugfs node for this bfad */
+ kfree(bfad->regdata);
+ bfad_debugfs_exit(&bfad->pport);
+ mutex_lock(&bfad_mutex);
+ bfad_inst--;
+ list_del(&bfad->list_entry);
+ mutex_unlock(&bfad_mutex);
+ bfad_pci_uninit(pdev, bfad);
+out_pci_init_failure:
+ kfree(bfad->trcmod);
+out_alloc_trace_failure:
+ kfree(bfad);
+out:
+ return error;
+}
+
+/*
+ * PCI remove entry.
+ */
+void
+bfad_pci_remove(struct pci_dev *pdev)
+{
+ struct bfad_s *bfad = pci_get_drvdata(pdev);
+ unsigned long flags;
+
+ bfa_trc(bfad, bfad->inst_no);
+
+ spin_lock_irqsave(&bfad->bfad_lock, flags);
+ if (bfad->bfad_tsk != NULL) {
+ spin_unlock_irqrestore(&bfad->bfad_lock, flags);
+ kthread_stop(bfad->bfad_tsk);
+ } else {
+ spin_unlock_irqrestore(&bfad->bfad_lock, flags);
+ }
+
+ /* Send Event BFAD_E_STOP */
+ bfa_sm_send_event(bfad, BFAD_E_STOP);
+
+ /* Driver detach and dealloc mem */
+ spin_lock_irqsave(&bfad->bfad_lock, flags);
+ bfa_detach(&bfad->bfa);
+ spin_unlock_irqrestore(&bfad->bfad_lock, flags);
+ bfad_hal_mem_release(bfad);
+
+ /* Remove the debugfs node for this bfad */
+ kfree(bfad->regdata);
+ bfad_debugfs_exit(&bfad->pport);
+
+ /* Cleaning the BFAD instance */
+ mutex_lock(&bfad_mutex);
+ bfad_inst--;
+ list_del(&bfad->list_entry);
+ mutex_unlock(&bfad_mutex);
+ bfad_pci_uninit(pdev, bfad);
+
+ kfree(bfad->trcmod);
+ kfree(bfad);
+}
+
+/*
+ * PCI Error Recovery entry, error detected.
+ */
+static pci_ers_result_t
+bfad_pci_error_detected(struct pci_dev *pdev, pci_channel_state_t state)
+{
+ struct bfad_s *bfad = pci_get_drvdata(pdev);
+ unsigned long flags;
+ pci_ers_result_t ret = PCI_ERS_RESULT_NONE;
+
+ dev_printk(KERN_ERR, &pdev->dev,
+ "error detected state: %d - flags: 0x%x\n",
+ state, bfad->bfad_flags);
+
+ switch (state) {
+ case pci_channel_io_normal: /* non-fatal error */
+ spin_lock_irqsave(&bfad->bfad_lock, flags);
+ bfad->bfad_flags &= ~BFAD_EEH_BUSY;
+ /* Suspend/fail all bfa operations */
+ bfa_ioc_suspend(&bfad->bfa.ioc);
+ spin_unlock_irqrestore(&bfad->bfad_lock, flags);
+ del_timer_sync(&bfad->hal_tmo);
+ ret = PCI_ERS_RESULT_CAN_RECOVER;
+ break;
+ case pci_channel_io_frozen: /* fatal error */
+ init_completion(&bfad->comp);
+ spin_lock_irqsave(&bfad->bfad_lock, flags);
+ bfad->bfad_flags |= BFAD_EEH_BUSY;
+ /* Suspend/fail all bfa operations */
+ bfa_ioc_suspend(&bfad->bfa.ioc);
+ bfa_fcs_stop(&bfad->bfa_fcs);
+ spin_unlock_irqrestore(&bfad->bfad_lock, flags);
+ wait_for_completion(&bfad->comp);
+
+ bfad_remove_intr(bfad);
+ del_timer_sync(&bfad->hal_tmo);
+ pci_disable_device(pdev);
+ ret = PCI_ERS_RESULT_NEED_RESET;
+ break;
+ case pci_channel_io_perm_failure: /* PCI Card is DEAD */
+ spin_lock_irqsave(&bfad->bfad_lock, flags);
+ bfad->bfad_flags |= BFAD_EEH_BUSY |
+ BFAD_EEH_PCI_CHANNEL_IO_PERM_FAILURE;
+ spin_unlock_irqrestore(&bfad->bfad_lock, flags);
+
+ /* If the error_detected handler is called with the reason
+ * pci_channel_io_perm_failure - it will subsequently call
+ * pci_remove() entry point to remove the pci device from the
+ * system - So defer the cleanup to pci_remove(); cleaning up
+ * here causes inconsistent state during pci_remove().
+ */
+ ret = PCI_ERS_RESULT_DISCONNECT;
+ break;
+ default:
+ WARN_ON(1);
+ }
+
+ return ret;
+}
+
+int
+restart_bfa(struct bfad_s *bfad)
+{
+ unsigned long flags;
+ struct pci_dev *pdev = bfad->pcidev;
+
+ bfa_attach(&bfad->bfa, bfad, &bfad->ioc_cfg,
+ &bfad->meminfo, &bfad->hal_pcidev);
+
+ /* Enable Interrupt and wait bfa_init completion */
+ if (bfad_setup_intr(bfad)) {
+ dev_printk(KERN_WARNING, &pdev->dev,
+ "%s: bfad_setup_intr failed\n", bfad->pci_name);
+ bfa_sm_send_event(bfad, BFAD_E_INIT_FAILED);
+ return -1;
+ }
+
+ init_completion(&bfad->comp);
+ spin_lock_irqsave(&bfad->bfad_lock, flags);
+ bfa_iocfc_init(&bfad->bfa);
+ spin_unlock_irqrestore(&bfad->bfad_lock, flags);
+
+ /* Set up interrupt handler for each vectors */
+ if ((bfad->bfad_flags & BFAD_MSIX_ON) &&
+ bfad_install_msix_handler(bfad))
+ dev_printk(KERN_WARNING, &pdev->dev,
+ "%s: install_msix failed.\n", bfad->pci_name);
+
+ bfad_init_timer(bfad);
+ wait_for_completion(&bfad->comp);
+ bfad_drv_start(bfad);
+
+ return 0;
+}
+
+/*
+ * PCI Error Recovery entry, re-initialize the chip.
+ */
+static pci_ers_result_t
+bfad_pci_slot_reset(struct pci_dev *pdev)
+{
+ struct bfad_s *bfad = pci_get_drvdata(pdev);
+ u8 byte;
+
+ dev_printk(KERN_ERR, &pdev->dev,
+ "bfad_pci_slot_reset flags: 0x%x\n", bfad->bfad_flags);
+
+ if (pci_enable_device(pdev)) {
+ dev_printk(KERN_ERR, &pdev->dev, "Cannot re-enable "
+ "PCI device after reset.\n");
+ return PCI_ERS_RESULT_DISCONNECT;
+ }
+
+ pci_restore_state(pdev);
+
+ /*
+ * Read some byte (e.g. DMA max. payload size which can't
+ * be 0xff any time) to make sure - we did not hit another PCI error
+ * in the middle of recovery. If we did, then declare permanent failure.
+ */
+ pci_read_config_byte(pdev, 0x68, &byte);
+ if (byte == 0xff) {
+ dev_printk(KERN_ERR, &pdev->dev,
+ "slot_reset failed ... got another PCI error !\n");
+ goto out_disable_device;
+ }
+
+ pci_save_state(pdev);
+ pci_set_master(pdev);
+
+ if (pci_set_dma_mask(bfad->pcidev, DMA_BIT_MASK(64)) != 0)
+ if (pci_set_dma_mask(bfad->pcidev, DMA_BIT_MASK(32)) != 0)
+ goto out_disable_device;
+
+ pci_cleanup_aer_uncorrect_error_status(pdev);
+
+ if (restart_bfa(bfad) == -1)
+ goto out_disable_device;
+
+ pci_enable_pcie_error_reporting(pdev);
+ dev_printk(KERN_WARNING, &pdev->dev,
+ "slot_reset completed flags: 0x%x!\n", bfad->bfad_flags);
+
+ return PCI_ERS_RESULT_RECOVERED;
+
+out_disable_device:
+ pci_disable_device(pdev);
+ return PCI_ERS_RESULT_DISCONNECT;
+}
+
+static pci_ers_result_t
+bfad_pci_mmio_enabled(struct pci_dev *pdev)
+{
+ unsigned long flags;
+ struct bfad_s *bfad = pci_get_drvdata(pdev);
+
+ dev_printk(KERN_INFO, &pdev->dev, "mmio_enabled\n");
+
+ /* Fetch FW diagnostic information */
+ bfa_ioc_debug_save_ftrc(&bfad->bfa.ioc);
+
+ /* Cancel all pending IOs */
+ spin_lock_irqsave(&bfad->bfad_lock, flags);
+ init_completion(&bfad->comp);
+ bfa_fcs_stop(&bfad->bfa_fcs);
+ spin_unlock_irqrestore(&bfad->bfad_lock, flags);
+ wait_for_completion(&bfad->comp);
+
+ bfad_remove_intr(bfad);
+ del_timer_sync(&bfad->hal_tmo);
+ pci_disable_device(pdev);
+
+ return PCI_ERS_RESULT_NEED_RESET;
+}
+
+static void
+bfad_pci_resume(struct pci_dev *pdev)
+{
+ unsigned long flags;
+ struct bfad_s *bfad = pci_get_drvdata(pdev);
+
+ dev_printk(KERN_WARNING, &pdev->dev, "resume\n");
+
+ /* wait until the link is online */
+ bfad_rport_online_wait(bfad);
+
+ spin_lock_irqsave(&bfad->bfad_lock, flags);
+ bfad->bfad_flags &= ~BFAD_EEH_BUSY;
+ spin_unlock_irqrestore(&bfad->bfad_lock, flags);
+}
+
+struct pci_device_id bfad_id_table[] = {
+ {
+ .vendor = BFA_PCI_VENDOR_ID_BROCADE,
+ .device = BFA_PCI_DEVICE_ID_FC_8G2P,
+ .subvendor = PCI_ANY_ID,
+ .subdevice = PCI_ANY_ID,
+ },
+ {
+ .vendor = BFA_PCI_VENDOR_ID_BROCADE,
+ .device = BFA_PCI_DEVICE_ID_FC_8G1P,
+ .subvendor = PCI_ANY_ID,
+ .subdevice = PCI_ANY_ID,
+ },
+ {
+ .vendor = BFA_PCI_VENDOR_ID_BROCADE,
+ .device = BFA_PCI_DEVICE_ID_CT,
+ .subvendor = PCI_ANY_ID,
+ .subdevice = PCI_ANY_ID,
+ .class = (PCI_CLASS_SERIAL_FIBER << 8),
+ .class_mask = ~0,
+ },
+ {
+ .vendor = BFA_PCI_VENDOR_ID_BROCADE,
+ .device = BFA_PCI_DEVICE_ID_CT_FC,
+ .subvendor = PCI_ANY_ID,
+ .subdevice = PCI_ANY_ID,
+ .class = (PCI_CLASS_SERIAL_FIBER << 8),
+ .class_mask = ~0,
+ },
+ {
+ .vendor = BFA_PCI_VENDOR_ID_BROCADE,
+ .device = BFA_PCI_DEVICE_ID_CT2,
+ .subvendor = PCI_ANY_ID,
+ .subdevice = PCI_ANY_ID,
+ .class = (PCI_CLASS_SERIAL_FIBER << 8),
+ .class_mask = ~0,
+ },
+
+ {
+ .vendor = BFA_PCI_VENDOR_ID_BROCADE,
+ .device = BFA_PCI_DEVICE_ID_CT2_QUAD,
+ .subvendor = PCI_ANY_ID,
+ .subdevice = PCI_ANY_ID,
+ .class = (PCI_CLASS_SERIAL_FIBER << 8),
+ .class_mask = ~0,
+ },
+ {0, 0},
+};
+
+MODULE_DEVICE_TABLE(pci, bfad_id_table);
+
+/*
+ * PCI error recovery handlers.
+ */
+static struct pci_error_handlers bfad_err_handler = {
+ .error_detected = bfad_pci_error_detected,
+ .slot_reset = bfad_pci_slot_reset,
+ .mmio_enabled = bfad_pci_mmio_enabled,
+ .resume = bfad_pci_resume,
+};
+
+static struct pci_driver bfad_pci_driver = {
+ .name = BFAD_DRIVER_NAME,
+ .id_table = bfad_id_table,
+ .probe = bfad_pci_probe,
+ .remove = bfad_pci_remove,
+ .err_handler = &bfad_err_handler,
+};
+
+/*
+ * Driver module init.
+ */
+static int __init
+bfad_init(void)
+{
+ int error = 0;
+
+ printk(KERN_INFO "Brocade BFA FC/FCOE SCSI driver - version: %s\n",
+ BFAD_DRIVER_VERSION);
+
+ if (num_sgpgs > 0)
+ num_sgpgs_parm = num_sgpgs;
+
+ error = bfad_im_module_init();
+ if (error) {
+ error = -ENOMEM;
+ printk(KERN_WARNING "bfad_im_module_init failure\n");
+ goto ext;
+ }
+
+ if (strcmp(FCPI_NAME, " fcpim") == 0)
+ supported_fc4s |= BFA_LPORT_ROLE_FCP_IM;
+
+ bfa_auto_recover = ioc_auto_recover;
+ bfa_fcs_rport_set_del_timeout(rport_del_timeout);
+ bfa_fcs_rport_set_max_logins(max_rport_logins);
+
+ error = pci_register_driver(&bfad_pci_driver);
+ if (error) {
+ printk(KERN_WARNING "pci_register_driver failure\n");
+ goto ext;
+ }
+
+ return 0;
+
+ext:
+ bfad_im_module_exit();
+ return error;
+}
+
+/*
+ * Driver module exit.
+ */
+static void __exit
+bfad_exit(void)
+{
+ pci_unregister_driver(&bfad_pci_driver);
+ bfad_im_module_exit();
+ bfad_free_fwimg();
+}
+
+/* Firmware handling */
+static void
+bfad_read_firmware(struct pci_dev *pdev, u32 **bfi_image,
+ u32 *bfi_image_size, char *fw_name)
+{
+ const struct firmware *fw;
+
+ if (reject_firmware(&fw, fw_name, &pdev->dev)) {
+ printk(KERN_ALERT "Can't locate firmware %s\n", fw_name);
+ *bfi_image = NULL;
+ goto out;
+ }
+
+ *bfi_image = vmalloc(fw->size);
+ if (NULL == *bfi_image) {
+ printk(KERN_ALERT "Fail to allocate buffer for fw image "
+ "size=%x!\n", (u32) fw->size);
+ goto out;
+ }
+
+ memcpy(*bfi_image, fw->data, fw->size);
+ *bfi_image_size = fw->size/sizeof(u32);
+out:
+ release_firmware(fw);
+}
+
+static u32 *
+bfad_load_fwimg(struct pci_dev *pdev)
+{
+ if (bfa_asic_id_ct2(pdev->device)) {
+ if (bfi_image_ct2_size == 0)
+ bfad_read_firmware(pdev, &bfi_image_ct2,
+ &bfi_image_ct2_size, BFAD_FW_FILE_CT2);
+ return bfi_image_ct2;
+ } else if (bfa_asic_id_ct(pdev->device)) {
+ if (bfi_image_ct_size == 0)
+ bfad_read_firmware(pdev, &bfi_image_ct,
+ &bfi_image_ct_size, BFAD_FW_FILE_CT);
+ return bfi_image_ct;
+ } else if (bfa_asic_id_cb(pdev->device)) {
+ if (bfi_image_cb_size == 0)
+ bfad_read_firmware(pdev, &bfi_image_cb,
+ &bfi_image_cb_size, BFAD_FW_FILE_CB);
+ return bfi_image_cb;
+ }
+
+ return NULL;
+}
+
+static void
+bfad_free_fwimg(void)
+{
+ if (bfi_image_ct2_size && bfi_image_ct2)
+ vfree(bfi_image_ct2);
+ if (bfi_image_ct_size && bfi_image_ct)
+ vfree(bfi_image_ct);
+ if (bfi_image_cb_size && bfi_image_cb)
+ vfree(bfi_image_cb);
+}
+
+module_init(bfad_init);
+module_exit(bfad_exit);
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("Brocade Fibre Channel HBA Driver" BFAD_PROTO_NAME);
+MODULE_AUTHOR("Brocade Communications Systems, Inc.");
+MODULE_VERSION(BFAD_DRIVER_VERSION);
diff --git a/drivers/scsi/bfa/bfad_attr.c b/drivers/scsi/bfa/bfad_attr.c
new file mode 100644
index 000000000..40be670a1
--- /dev/null
+++ b/drivers/scsi/bfa/bfad_attr.c
@@ -0,0 +1,999 @@
+/*
+ * Copyright (c) 2005-2010 Brocade Communications Systems, Inc.
+ * All rights reserved
+ * www.brocade.com
+ *
+ * Linux driver for Brocade Fibre Channel Host Bus Adapter.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License (GPL) Version 2 as
+ * published by the Free Software Foundation
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ */
+
+/*
+ * bfa_attr.c Linux driver configuration interface module.
+ */
+
+#include "bfad_drv.h"
+#include "bfad_im.h"
+
+/*
+ * FC transport template entry, get SCSI target port ID.
+ */
+static void
+bfad_im_get_starget_port_id(struct scsi_target *starget)
+{
+ struct Scsi_Host *shost;
+ struct bfad_im_port_s *im_port;
+ struct bfad_s *bfad;
+ struct bfad_itnim_s *itnim = NULL;
+ u32 fc_id = -1;
+ unsigned long flags;
+
+ shost = dev_to_shost(starget->dev.parent);
+ im_port = (struct bfad_im_port_s *) shost->hostdata[0];
+ bfad = im_port->bfad;
+ spin_lock_irqsave(&bfad->bfad_lock, flags);
+
+ itnim = bfad_get_itnim(im_port, starget->id);
+ if (itnim)
+ fc_id = bfa_fcs_itnim_get_fcid(&itnim->fcs_itnim);
+
+ fc_starget_port_id(starget) = fc_id;
+ spin_unlock_irqrestore(&bfad->bfad_lock, flags);
+}
+
+/*
+ * FC transport template entry, get SCSI target nwwn.
+ */
+static void
+bfad_im_get_starget_node_name(struct scsi_target *starget)
+{
+ struct Scsi_Host *shost;
+ struct bfad_im_port_s *im_port;
+ struct bfad_s *bfad;
+ struct bfad_itnim_s *itnim = NULL;
+ u64 node_name = 0;
+ unsigned long flags;
+
+ shost = dev_to_shost(starget->dev.parent);
+ im_port = (struct bfad_im_port_s *) shost->hostdata[0];
+ bfad = im_port->bfad;
+ spin_lock_irqsave(&bfad->bfad_lock, flags);
+
+ itnim = bfad_get_itnim(im_port, starget->id);
+ if (itnim)
+ node_name = bfa_fcs_itnim_get_nwwn(&itnim->fcs_itnim);
+
+ fc_starget_node_name(starget) = cpu_to_be64(node_name);
+ spin_unlock_irqrestore(&bfad->bfad_lock, flags);
+}
+
+/*
+ * FC transport template entry, get SCSI target pwwn.
+ */
+static void
+bfad_im_get_starget_port_name(struct scsi_target *starget)
+{
+ struct Scsi_Host *shost;
+ struct bfad_im_port_s *im_port;
+ struct bfad_s *bfad;
+ struct bfad_itnim_s *itnim = NULL;
+ u64 port_name = 0;
+ unsigned long flags;
+
+ shost = dev_to_shost(starget->dev.parent);
+ im_port = (struct bfad_im_port_s *) shost->hostdata[0];
+ bfad = im_port->bfad;
+ spin_lock_irqsave(&bfad->bfad_lock, flags);
+
+ itnim = bfad_get_itnim(im_port, starget->id);
+ if (itnim)
+ port_name = bfa_fcs_itnim_get_pwwn(&itnim->fcs_itnim);
+
+ fc_starget_port_name(starget) = cpu_to_be64(port_name);
+ spin_unlock_irqrestore(&bfad->bfad_lock, flags);
+}
+
+/*
+ * FC transport template entry, get SCSI host port ID.
+ */
+static void
+bfad_im_get_host_port_id(struct Scsi_Host *shost)
+{
+ struct bfad_im_port_s *im_port =
+ (struct bfad_im_port_s *) shost->hostdata[0];
+ struct bfad_port_s *port = im_port->port;
+
+ fc_host_port_id(shost) =
+ bfa_hton3b(bfa_fcs_lport_get_fcid(port->fcs_port));
+}
+
+/*
+ * FC transport template entry, get SCSI host port type.
+ */
+static void
+bfad_im_get_host_port_type(struct Scsi_Host *shost)
+{
+ struct bfad_im_port_s *im_port =
+ (struct bfad_im_port_s *) shost->hostdata[0];
+ struct bfad_s *bfad = im_port->bfad;
+ struct bfa_lport_attr_s port_attr;
+
+ bfa_fcs_lport_get_attr(&bfad->bfa_fcs.fabric.bport, &port_attr);
+
+ switch (port_attr.port_type) {
+ case BFA_PORT_TYPE_NPORT:
+ fc_host_port_type(shost) = FC_PORTTYPE_NPORT;
+ break;
+ case BFA_PORT_TYPE_NLPORT:
+ fc_host_port_type(shost) = FC_PORTTYPE_NLPORT;
+ break;
+ case BFA_PORT_TYPE_P2P:
+ fc_host_port_type(shost) = FC_PORTTYPE_PTP;
+ break;
+ case BFA_PORT_TYPE_LPORT:
+ fc_host_port_type(shost) = FC_PORTTYPE_LPORT;
+ break;
+ default:
+ fc_host_port_type(shost) = FC_PORTTYPE_UNKNOWN;
+ break;
+ }
+}
+
+/*
+ * FC transport template entry, get SCSI host port state.
+ */
+static void
+bfad_im_get_host_port_state(struct Scsi_Host *shost)
+{
+ struct bfad_im_port_s *im_port =
+ (struct bfad_im_port_s *) shost->hostdata[0];
+ struct bfad_s *bfad = im_port->bfad;
+ struct bfa_port_attr_s attr;
+
+ bfa_fcport_get_attr(&bfad->bfa, &attr);
+
+ switch (attr.port_state) {
+ case BFA_PORT_ST_LINKDOWN:
+ fc_host_port_state(shost) = FC_PORTSTATE_LINKDOWN;
+ break;
+ case BFA_PORT_ST_LINKUP:
+ fc_host_port_state(shost) = FC_PORTSTATE_ONLINE;
+ break;
+ case BFA_PORT_ST_DISABLED:
+ case BFA_PORT_ST_STOPPED:
+ case BFA_PORT_ST_IOCDOWN:
+ case BFA_PORT_ST_IOCDIS:
+ fc_host_port_state(shost) = FC_PORTSTATE_OFFLINE;
+ break;
+ case BFA_PORT_ST_UNINIT:
+ case BFA_PORT_ST_ENABLING_QWAIT:
+ case BFA_PORT_ST_ENABLING:
+ case BFA_PORT_ST_DISABLING_QWAIT:
+ case BFA_PORT_ST_DISABLING:
+ default:
+ fc_host_port_state(shost) = FC_PORTSTATE_UNKNOWN;
+ break;
+ }
+}
+
+/*
+ * FC transport template entry, get SCSI host active fc4s.
+ */
+static void
+bfad_im_get_host_active_fc4s(struct Scsi_Host *shost)
+{
+ struct bfad_im_port_s *im_port =
+ (struct bfad_im_port_s *) shost->hostdata[0];
+ struct bfad_port_s *port = im_port->port;
+
+ memset(fc_host_active_fc4s(shost), 0,
+ sizeof(fc_host_active_fc4s(shost)));
+
+ if (port->supported_fc4s & BFA_LPORT_ROLE_FCP_IM)
+ fc_host_active_fc4s(shost)[2] = 1;
+
+ fc_host_active_fc4s(shost)[7] = 1;
+}
+
+/*
+ * FC transport template entry, get SCSI host link speed.
+ */
+static void
+bfad_im_get_host_speed(struct Scsi_Host *shost)
+{
+ struct bfad_im_port_s *im_port =
+ (struct bfad_im_port_s *) shost->hostdata[0];
+ struct bfad_s *bfad = im_port->bfad;
+ struct bfa_port_attr_s attr;
+
+ bfa_fcport_get_attr(&bfad->bfa, &attr);
+ switch (attr.speed) {
+ case BFA_PORT_SPEED_10GBPS:
+ fc_host_speed(shost) = FC_PORTSPEED_10GBIT;
+ break;
+ case BFA_PORT_SPEED_16GBPS:
+ fc_host_speed(shost) = FC_PORTSPEED_16GBIT;
+ break;
+ case BFA_PORT_SPEED_8GBPS:
+ fc_host_speed(shost) = FC_PORTSPEED_8GBIT;
+ break;
+ case BFA_PORT_SPEED_4GBPS:
+ fc_host_speed(shost) = FC_PORTSPEED_4GBIT;
+ break;
+ case BFA_PORT_SPEED_2GBPS:
+ fc_host_speed(shost) = FC_PORTSPEED_2GBIT;
+ break;
+ case BFA_PORT_SPEED_1GBPS:
+ fc_host_speed(shost) = FC_PORTSPEED_1GBIT;
+ break;
+ default:
+ fc_host_speed(shost) = FC_PORTSPEED_UNKNOWN;
+ break;
+ }
+}
+
+/*
+ * FC transport template entry, get SCSI host port type.
+ */
+static void
+bfad_im_get_host_fabric_name(struct Scsi_Host *shost)
+{
+ struct bfad_im_port_s *im_port =
+ (struct bfad_im_port_s *) shost->hostdata[0];
+ struct bfad_port_s *port = im_port->port;
+ wwn_t fabric_nwwn = 0;
+
+ fabric_nwwn = bfa_fcs_lport_get_fabric_name(port->fcs_port);
+
+ fc_host_fabric_name(shost) = cpu_to_be64(fabric_nwwn);
+
+}
+
+/*
+ * FC transport template entry, get BFAD statistics.
+ */
+static struct fc_host_statistics *
+bfad_im_get_stats(struct Scsi_Host *shost)
+{
+ struct bfad_im_port_s *im_port =
+ (struct bfad_im_port_s *) shost->hostdata[0];
+ struct bfad_s *bfad = im_port->bfad;
+ struct bfad_hal_comp fcomp;
+ union bfa_port_stats_u *fcstats;
+ struct fc_host_statistics *hstats;
+ bfa_status_t rc;
+ unsigned long flags;
+
+ fcstats = kzalloc(sizeof(union bfa_port_stats_u), GFP_KERNEL);
+ if (fcstats == NULL)
+ return NULL;
+
+ hstats = &bfad->link_stats;
+ init_completion(&fcomp.comp);
+ spin_lock_irqsave(&bfad->bfad_lock, flags);
+ memset(hstats, 0, sizeof(struct fc_host_statistics));
+ rc = bfa_port_get_stats(BFA_FCPORT(&bfad->bfa),
+ fcstats, bfad_hcb_comp, &fcomp);
+ spin_unlock_irqrestore(&bfad->bfad_lock, flags);
+ if (rc != BFA_STATUS_OK)
+ return NULL;
+
+ wait_for_completion(&fcomp.comp);
+
+ /* Fill the fc_host_statistics structure */
+ hstats->seconds_since_last_reset = fcstats->fc.secs_reset;
+ hstats->tx_frames = fcstats->fc.tx_frames;
+ hstats->tx_words = fcstats->fc.tx_words;
+ hstats->rx_frames = fcstats->fc.rx_frames;
+ hstats->rx_words = fcstats->fc.rx_words;
+ hstats->lip_count = fcstats->fc.lip_count;
+ hstats->nos_count = fcstats->fc.nos_count;
+ hstats->error_frames = fcstats->fc.error_frames;
+ hstats->dumped_frames = fcstats->fc.dropped_frames;
+ hstats->link_failure_count = fcstats->fc.link_failures;
+ hstats->loss_of_sync_count = fcstats->fc.loss_of_syncs;
+ hstats->loss_of_signal_count = fcstats->fc.loss_of_signals;
+ hstats->prim_seq_protocol_err_count = fcstats->fc.primseq_errs;
+ hstats->invalid_crc_count = fcstats->fc.invalid_crcs;
+
+ kfree(fcstats);
+ return hstats;
+}
+
+/*
+ * FC transport template entry, reset BFAD statistics.
+ */
+static void
+bfad_im_reset_stats(struct Scsi_Host *shost)
+{
+ struct bfad_im_port_s *im_port =
+ (struct bfad_im_port_s *) shost->hostdata[0];
+ struct bfad_s *bfad = im_port->bfad;
+ struct bfad_hal_comp fcomp;
+ unsigned long flags;
+ bfa_status_t rc;
+
+ init_completion(&fcomp.comp);
+ spin_lock_irqsave(&bfad->bfad_lock, flags);
+ rc = bfa_port_clear_stats(BFA_FCPORT(&bfad->bfa), bfad_hcb_comp,
+ &fcomp);
+ spin_unlock_irqrestore(&bfad->bfad_lock, flags);
+
+ if (rc != BFA_STATUS_OK)
+ return;
+
+ wait_for_completion(&fcomp.comp);
+
+ return;
+}
+
+/*
+ * FC transport template entry, set rport loss timeout.
+ * Update dev_loss_tmo based on the value pushed down by the stack
+ * In case it is lesser than path_tov of driver, set it to path_tov + 1
+ * to ensure that the driver times out before the application
+ */
+static void
+bfad_im_set_rport_loss_tmo(struct fc_rport *rport, u32 timeout)
+{
+ struct bfad_itnim_data_s *itnim_data = rport->dd_data;
+ struct bfad_itnim_s *itnim = itnim_data->itnim;
+ struct bfad_s *bfad = itnim->im->bfad;
+ uint16_t path_tov = bfa_fcpim_path_tov_get(&bfad->bfa);
+
+ rport->dev_loss_tmo = timeout;
+ if (timeout < path_tov)
+ rport->dev_loss_tmo = path_tov + 1;
+}
+
+static int
+bfad_im_vport_create(struct fc_vport *fc_vport, bool disable)
+{
+ char *vname = fc_vport->symbolic_name;
+ struct Scsi_Host *shost = fc_vport->shost;
+ struct bfad_im_port_s *im_port =
+ (struct bfad_im_port_s *) shost->hostdata[0];
+ struct bfad_s *bfad = im_port->bfad;
+ struct bfa_lport_cfg_s port_cfg;
+ struct bfad_vport_s *vp;
+ int status = 0, rc;
+ unsigned long flags;
+
+ memset(&port_cfg, 0, sizeof(port_cfg));
+ u64_to_wwn(fc_vport->node_name, (u8 *)&port_cfg.nwwn);
+ u64_to_wwn(fc_vport->port_name, (u8 *)&port_cfg.pwwn);
+ if (strlen(vname) > 0)
+ strcpy((char *)&port_cfg.sym_name, vname);
+ port_cfg.roles = BFA_LPORT_ROLE_FCP_IM;
+
+ spin_lock_irqsave(&bfad->bfad_lock, flags);
+ list_for_each_entry(vp, &bfad->pbc_vport_list, list_entry) {
+ if (port_cfg.pwwn ==
+ vp->fcs_vport.lport.port_cfg.pwwn) {
+ port_cfg.preboot_vp =
+ vp->fcs_vport.lport.port_cfg.preboot_vp;
+ break;
+ }
+ }
+ spin_unlock_irqrestore(&bfad->bfad_lock, flags);
+
+ rc = bfad_vport_create(bfad, 0, &port_cfg, &fc_vport->dev);
+ if (rc == BFA_STATUS_OK) {
+ struct bfad_vport_s *vport;
+ struct bfa_fcs_vport_s *fcs_vport;
+ struct Scsi_Host *vshost;
+
+ spin_lock_irqsave(&bfad->bfad_lock, flags);
+ fcs_vport = bfa_fcs_vport_lookup(&bfad->bfa_fcs, 0,
+ port_cfg.pwwn);
+ spin_unlock_irqrestore(&bfad->bfad_lock, flags);
+ if (fcs_vport == NULL)
+ return VPCERR_BAD_WWN;
+
+ fc_vport_set_state(fc_vport, FC_VPORT_ACTIVE);
+ if (disable) {
+ spin_lock_irqsave(&bfad->bfad_lock, flags);
+ bfa_fcs_vport_stop(fcs_vport);
+ spin_unlock_irqrestore(&bfad->bfad_lock, flags);
+ fc_vport_set_state(fc_vport, FC_VPORT_DISABLED);
+ }
+
+ vport = fcs_vport->vport_drv;
+ vshost = vport->drv_port.im_port->shost;
+ fc_host_node_name(vshost) = wwn_to_u64((u8 *)&port_cfg.nwwn);
+ fc_host_port_name(vshost) = wwn_to_u64((u8 *)&port_cfg.pwwn);
+ fc_host_supported_classes(vshost) = FC_COS_CLASS3;
+
+ memset(fc_host_supported_fc4s(vshost), 0,
+ sizeof(fc_host_supported_fc4s(vshost)));
+
+ /* For FCP type 0x08 */
+ if (supported_fc4s & BFA_LPORT_ROLE_FCP_IM)
+ fc_host_supported_fc4s(vshost)[2] = 1;
+
+ /* For fibre channel services type 0x20 */
+ fc_host_supported_fc4s(vshost)[7] = 1;
+
+ fc_host_supported_speeds(vshost) =
+ bfad_im_supported_speeds(&bfad->bfa);
+ fc_host_maxframe_size(vshost) =
+ bfa_fcport_get_maxfrsize(&bfad->bfa);
+
+ fc_vport->dd_data = vport;
+ vport->drv_port.im_port->fc_vport = fc_vport;
+ } else if (rc == BFA_STATUS_INVALID_WWN)
+ return VPCERR_BAD_WWN;
+ else if (rc == BFA_STATUS_VPORT_EXISTS)
+ return VPCERR_BAD_WWN;
+ else if (rc == BFA_STATUS_VPORT_MAX)
+ return VPCERR_NO_FABRIC_SUPP;
+ else if (rc == BFA_STATUS_VPORT_WWN_BP)
+ return VPCERR_BAD_WWN;
+ else
+ return FC_VPORT_FAILED;
+
+ return status;
+}
+
+int
+bfad_im_issue_fc_host_lip(struct Scsi_Host *shost)
+{
+ struct bfad_im_port_s *im_port =
+ (struct bfad_im_port_s *) shost->hostdata[0];
+ struct bfad_s *bfad = im_port->bfad;
+ struct bfad_hal_comp fcomp;
+ unsigned long flags;
+ uint32_t status;
+
+ init_completion(&fcomp.comp);
+ spin_lock_irqsave(&bfad->bfad_lock, flags);
+ status = bfa_port_disable(&bfad->bfa.modules.port,
+ bfad_hcb_comp, &fcomp);
+ spin_unlock_irqrestore(&bfad->bfad_lock, flags);
+
+ if (status != BFA_STATUS_OK)
+ return -EIO;
+
+ wait_for_completion(&fcomp.comp);
+ if (fcomp.status != BFA_STATUS_OK)
+ return -EIO;
+
+ spin_lock_irqsave(&bfad->bfad_lock, flags);
+ status = bfa_port_enable(&bfad->bfa.modules.port,
+ bfad_hcb_comp, &fcomp);
+ spin_unlock_irqrestore(&bfad->bfad_lock, flags);
+ if (status != BFA_STATUS_OK)
+ return -EIO;
+
+ wait_for_completion(&fcomp.comp);
+ if (fcomp.status != BFA_STATUS_OK)
+ return -EIO;
+
+ return 0;
+}
+
+static int
+bfad_im_vport_delete(struct fc_vport *fc_vport)
+{
+ struct bfad_vport_s *vport = (struct bfad_vport_s *)fc_vport->dd_data;
+ struct bfad_im_port_s *im_port =
+ (struct bfad_im_port_s *) vport->drv_port.im_port;
+ struct bfad_s *bfad = im_port->bfad;
+ struct bfad_port_s *port;
+ struct bfa_fcs_vport_s *fcs_vport;
+ struct Scsi_Host *vshost;
+ wwn_t pwwn;
+ int rc;
+ unsigned long flags;
+ struct completion fcomp;
+
+ if (im_port->flags & BFAD_PORT_DELETE) {
+ bfad_scsi_host_free(bfad, im_port);
+ list_del(&vport->list_entry);
+ kfree(vport);
+ return 0;
+ }
+
+ port = im_port->port;
+
+ vshost = vport->drv_port.im_port->shost;
+ u64_to_wwn(fc_host_port_name(vshost), (u8 *)&pwwn);
+
+ spin_lock_irqsave(&bfad->bfad_lock, flags);
+ fcs_vport = bfa_fcs_vport_lookup(&bfad->bfa_fcs, 0, pwwn);
+ spin_unlock_irqrestore(&bfad->bfad_lock, flags);
+
+ if (fcs_vport == NULL)
+ return VPCERR_BAD_WWN;
+
+ vport->drv_port.flags |= BFAD_PORT_DELETE;
+
+ vport->comp_del = &fcomp;
+ init_completion(vport->comp_del);
+
+ spin_lock_irqsave(&bfad->bfad_lock, flags);
+ rc = bfa_fcs_vport_delete(&vport->fcs_vport);
+ spin_unlock_irqrestore(&bfad->bfad_lock, flags);
+
+ if (rc == BFA_STATUS_PBC) {
+ vport->drv_port.flags &= ~BFAD_PORT_DELETE;
+ vport->comp_del = NULL;
+ return -1;
+ }
+
+ wait_for_completion(vport->comp_del);
+
+ bfad_scsi_host_free(bfad, im_port);
+ list_del(&vport->list_entry);
+ kfree(vport);
+
+ return 0;
+}
+
+static int
+bfad_im_vport_disable(struct fc_vport *fc_vport, bool disable)
+{
+ struct bfad_vport_s *vport;
+ struct bfad_s *bfad;
+ struct bfa_fcs_vport_s *fcs_vport;
+ struct Scsi_Host *vshost;
+ wwn_t pwwn;
+ unsigned long flags;
+
+ vport = (struct bfad_vport_s *)fc_vport->dd_data;
+ bfad = vport->drv_port.bfad;
+ vshost = vport->drv_port.im_port->shost;
+ u64_to_wwn(fc_host_port_name(vshost), (u8 *)&pwwn);
+
+ spin_lock_irqsave(&bfad->bfad_lock, flags);
+ fcs_vport = bfa_fcs_vport_lookup(&bfad->bfa_fcs, 0, pwwn);
+ spin_unlock_irqrestore(&bfad->bfad_lock, flags);
+
+ if (fcs_vport == NULL)
+ return VPCERR_BAD_WWN;
+
+ if (disable) {
+ bfa_fcs_vport_stop(fcs_vport);
+ fc_vport_set_state(fc_vport, FC_VPORT_DISABLED);
+ } else {
+ bfa_fcs_vport_start(fcs_vport);
+ fc_vport_set_state(fc_vport, FC_VPORT_ACTIVE);
+ }
+
+ return 0;
+}
+
+void
+bfad_im_vport_set_symbolic_name(struct fc_vport *fc_vport)
+{
+ struct bfad_vport_s *vport = (struct bfad_vport_s *)fc_vport->dd_data;
+ struct bfad_im_port_s *im_port =
+ (struct bfad_im_port_s *)vport->drv_port.im_port;
+ struct bfad_s *bfad = im_port->bfad;
+ struct Scsi_Host *vshost = vport->drv_port.im_port->shost;
+ char *sym_name = fc_vport->symbolic_name;
+ struct bfa_fcs_vport_s *fcs_vport;
+ wwn_t pwwn;
+ unsigned long flags;
+
+ u64_to_wwn(fc_host_port_name(vshost), (u8 *)&pwwn);
+
+ spin_lock_irqsave(&bfad->bfad_lock, flags);
+ fcs_vport = bfa_fcs_vport_lookup(&bfad->bfa_fcs, 0, pwwn);
+ spin_unlock_irqrestore(&bfad->bfad_lock, flags);
+
+ if (fcs_vport == NULL)
+ return;
+
+ spin_lock_irqsave(&bfad->bfad_lock, flags);
+ if (strlen(sym_name) > 0)
+ bfa_fcs_lport_set_symname(&fcs_vport->lport, sym_name);
+ spin_unlock_irqrestore(&bfad->bfad_lock, flags);
+}
+
+struct fc_function_template bfad_im_fc_function_template = {
+
+ /* Target dynamic attributes */
+ .get_starget_port_id = bfad_im_get_starget_port_id,
+ .show_starget_port_id = 1,
+ .get_starget_node_name = bfad_im_get_starget_node_name,
+ .show_starget_node_name = 1,
+ .get_starget_port_name = bfad_im_get_starget_port_name,
+ .show_starget_port_name = 1,
+
+ /* Host dynamic attribute */
+ .get_host_port_id = bfad_im_get_host_port_id,
+ .show_host_port_id = 1,
+
+ /* Host fixed attributes */
+ .show_host_node_name = 1,
+ .show_host_port_name = 1,
+ .show_host_supported_classes = 1,
+ .show_host_supported_fc4s = 1,
+ .show_host_supported_speeds = 1,
+ .show_host_maxframe_size = 1,
+
+ /* More host dynamic attributes */
+ .show_host_port_type = 1,
+ .get_host_port_type = bfad_im_get_host_port_type,
+ .show_host_port_state = 1,
+ .get_host_port_state = bfad_im_get_host_port_state,
+ .show_host_active_fc4s = 1,
+ .get_host_active_fc4s = bfad_im_get_host_active_fc4s,
+ .show_host_speed = 1,
+ .get_host_speed = bfad_im_get_host_speed,
+ .show_host_fabric_name = 1,
+ .get_host_fabric_name = bfad_im_get_host_fabric_name,
+
+ .show_host_symbolic_name = 1,
+
+ /* Statistics */
+ .get_fc_host_stats = bfad_im_get_stats,
+ .reset_fc_host_stats = bfad_im_reset_stats,
+
+ /* Allocation length for host specific data */
+ .dd_fcrport_size = sizeof(struct bfad_itnim_data_s *),
+
+ /* Remote port fixed attributes */
+ .show_rport_maxframe_size = 1,
+ .show_rport_supported_classes = 1,
+ .show_rport_dev_loss_tmo = 1,
+ .set_rport_dev_loss_tmo = bfad_im_set_rport_loss_tmo,
+ .issue_fc_host_lip = bfad_im_issue_fc_host_lip,
+ .vport_create = bfad_im_vport_create,
+ .vport_delete = bfad_im_vport_delete,
+ .vport_disable = bfad_im_vport_disable,
+ .set_vport_symbolic_name = bfad_im_vport_set_symbolic_name,
+ .bsg_request = bfad_im_bsg_request,
+ .bsg_timeout = bfad_im_bsg_timeout,
+};
+
+struct fc_function_template bfad_im_vport_fc_function_template = {
+
+ /* Target dynamic attributes */
+ .get_starget_port_id = bfad_im_get_starget_port_id,
+ .show_starget_port_id = 1,
+ .get_starget_node_name = bfad_im_get_starget_node_name,
+ .show_starget_node_name = 1,
+ .get_starget_port_name = bfad_im_get_starget_port_name,
+ .show_starget_port_name = 1,
+
+ /* Host dynamic attribute */
+ .get_host_port_id = bfad_im_get_host_port_id,
+ .show_host_port_id = 1,
+
+ /* Host fixed attributes */
+ .show_host_node_name = 1,
+ .show_host_port_name = 1,
+ .show_host_supported_classes = 1,
+ .show_host_supported_fc4s = 1,
+ .show_host_supported_speeds = 1,
+ .show_host_maxframe_size = 1,
+
+ /* More host dynamic attributes */
+ .show_host_port_type = 1,
+ .get_host_port_type = bfad_im_get_host_port_type,
+ .show_host_port_state = 1,
+ .get_host_port_state = bfad_im_get_host_port_state,
+ .show_host_active_fc4s = 1,
+ .get_host_active_fc4s = bfad_im_get_host_active_fc4s,
+ .show_host_speed = 1,
+ .get_host_speed = bfad_im_get_host_speed,
+ .show_host_fabric_name = 1,
+ .get_host_fabric_name = bfad_im_get_host_fabric_name,
+
+ .show_host_symbolic_name = 1,
+
+ /* Statistics */
+ .get_fc_host_stats = bfad_im_get_stats,
+ .reset_fc_host_stats = bfad_im_reset_stats,
+
+ /* Allocation length for host specific data */
+ .dd_fcrport_size = sizeof(struct bfad_itnim_data_s *),
+
+ /* Remote port fixed attributes */
+ .show_rport_maxframe_size = 1,
+ .show_rport_supported_classes = 1,
+ .show_rport_dev_loss_tmo = 1,
+ .set_rport_dev_loss_tmo = bfad_im_set_rport_loss_tmo,
+};
+
+/*
+ * Scsi_Host_attrs SCSI host attributes
+ */
+static ssize_t
+bfad_im_serial_num_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct Scsi_Host *shost = class_to_shost(dev);
+ struct bfad_im_port_s *im_port =
+ (struct bfad_im_port_s *) shost->hostdata[0];
+ struct bfad_s *bfad = im_port->bfad;
+ char serial_num[BFA_ADAPTER_SERIAL_NUM_LEN];
+
+ bfa_get_adapter_serial_num(&bfad->bfa, serial_num);
+ return snprintf(buf, PAGE_SIZE, "%s\n", serial_num);
+}
+
+static ssize_t
+bfad_im_model_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct Scsi_Host *shost = class_to_shost(dev);
+ struct bfad_im_port_s *im_port =
+ (struct bfad_im_port_s *) shost->hostdata[0];
+ struct bfad_s *bfad = im_port->bfad;
+ char model[BFA_ADAPTER_MODEL_NAME_LEN];
+
+ bfa_get_adapter_model(&bfad->bfa, model);
+ return snprintf(buf, PAGE_SIZE, "%s\n", model);
+}
+
+static ssize_t
+bfad_im_model_desc_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct Scsi_Host *shost = class_to_shost(dev);
+ struct bfad_im_port_s *im_port =
+ (struct bfad_im_port_s *) shost->hostdata[0];
+ struct bfad_s *bfad = im_port->bfad;
+ char model[BFA_ADAPTER_MODEL_NAME_LEN];
+ char model_descr[BFA_ADAPTER_MODEL_DESCR_LEN];
+ int nports = 0;
+
+ bfa_get_adapter_model(&bfad->bfa, model);
+ nports = bfa_get_nports(&bfad->bfa);
+ if (!strcmp(model, "Brocade-425"))
+ snprintf(model_descr, BFA_ADAPTER_MODEL_DESCR_LEN,
+ "Brocade 4Gbps PCIe dual port FC HBA");
+ else if (!strcmp(model, "Brocade-825"))
+ snprintf(model_descr, BFA_ADAPTER_MODEL_DESCR_LEN,
+ "Brocade 8Gbps PCIe dual port FC HBA");
+ else if (!strcmp(model, "Brocade-42B"))
+ snprintf(model_descr, BFA_ADAPTER_MODEL_DESCR_LEN,
+ "Brocade 4Gbps PCIe dual port FC HBA for HP");
+ else if (!strcmp(model, "Brocade-82B"))
+ snprintf(model_descr, BFA_ADAPTER_MODEL_DESCR_LEN,
+ "Brocade 8Gbps PCIe dual port FC HBA for HP");
+ else if (!strcmp(model, "Brocade-1010"))
+ snprintf(model_descr, BFA_ADAPTER_MODEL_DESCR_LEN,
+ "Brocade 10Gbps single port CNA");
+ else if (!strcmp(model, "Brocade-1020"))
+ snprintf(model_descr, BFA_ADAPTER_MODEL_DESCR_LEN,
+ "Brocade 10Gbps dual port CNA");
+ else if (!strcmp(model, "Brocade-1007"))
+ snprintf(model_descr, BFA_ADAPTER_MODEL_DESCR_LEN,
+ "Brocade 10Gbps CNA for IBM Blade Center");
+ else if (!strcmp(model, "Brocade-415"))
+ snprintf(model_descr, BFA_ADAPTER_MODEL_DESCR_LEN,
+ "Brocade 4Gbps PCIe single port FC HBA");
+ else if (!strcmp(model, "Brocade-815"))
+ snprintf(model_descr, BFA_ADAPTER_MODEL_DESCR_LEN,
+ "Brocade 8Gbps PCIe single port FC HBA");
+ else if (!strcmp(model, "Brocade-41B"))
+ snprintf(model_descr, BFA_ADAPTER_MODEL_DESCR_LEN,
+ "Brocade 4Gbps PCIe single port FC HBA for HP");
+ else if (!strcmp(model, "Brocade-81B"))
+ snprintf(model_descr, BFA_ADAPTER_MODEL_DESCR_LEN,
+ "Brocade 8Gbps PCIe single port FC HBA for HP");
+ else if (!strcmp(model, "Brocade-804"))
+ snprintf(model_descr, BFA_ADAPTER_MODEL_DESCR_LEN,
+ "Brocade 8Gbps FC HBA for HP Bladesystem C-class");
+ else if (!strcmp(model, "Brocade-1741"))
+ snprintf(model_descr, BFA_ADAPTER_MODEL_DESCR_LEN,
+ "Brocade 10Gbps CNA for Dell M-Series Blade Servers");
+ else if (strstr(model, "Brocade-1860")) {
+ if (nports == 1 && bfa_ioc_is_cna(&bfad->bfa.ioc))
+ snprintf(model_descr, BFA_ADAPTER_MODEL_DESCR_LEN,
+ "Brocade 10Gbps single port CNA");
+ else if (nports == 1 && !bfa_ioc_is_cna(&bfad->bfa.ioc))
+ snprintf(model_descr, BFA_ADAPTER_MODEL_DESCR_LEN,
+ "Brocade 16Gbps PCIe single port FC HBA");
+ else if (nports == 2 && bfa_ioc_is_cna(&bfad->bfa.ioc))
+ snprintf(model_descr, BFA_ADAPTER_MODEL_DESCR_LEN,
+ "Brocade 10Gbps dual port CNA");
+ else if (nports == 2 && !bfa_ioc_is_cna(&bfad->bfa.ioc))
+ snprintf(model_descr, BFA_ADAPTER_MODEL_DESCR_LEN,
+ "Brocade 16Gbps PCIe dual port FC HBA");
+ } else if (!strcmp(model, "Brocade-1867")) {
+ if (nports == 1 && !bfa_ioc_is_cna(&bfad->bfa.ioc))
+ snprintf(model_descr, BFA_ADAPTER_MODEL_DESCR_LEN,
+ "Brocade 16Gbps PCIe single port FC HBA for IBM");
+ else if (nports == 2 && !bfa_ioc_is_cna(&bfad->bfa.ioc))
+ snprintf(model_descr, BFA_ADAPTER_MODEL_DESCR_LEN,
+ "Brocade 16Gbps PCIe dual port FC HBA for IBM");
+ } else
+ snprintf(model_descr, BFA_ADAPTER_MODEL_DESCR_LEN,
+ "Invalid Model");
+
+ return snprintf(buf, PAGE_SIZE, "%s\n", model_descr);
+}
+
+static ssize_t
+bfad_im_node_name_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct Scsi_Host *shost = class_to_shost(dev);
+ struct bfad_im_port_s *im_port =
+ (struct bfad_im_port_s *) shost->hostdata[0];
+ struct bfad_port_s *port = im_port->port;
+ u64 nwwn;
+
+ nwwn = bfa_fcs_lport_get_nwwn(port->fcs_port);
+ return snprintf(buf, PAGE_SIZE, "0x%llx\n", cpu_to_be64(nwwn));
+}
+
+static ssize_t
+bfad_im_symbolic_name_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct Scsi_Host *shost = class_to_shost(dev);
+ struct bfad_im_port_s *im_port =
+ (struct bfad_im_port_s *) shost->hostdata[0];
+ struct bfad_s *bfad = im_port->bfad;
+ struct bfa_lport_attr_s port_attr;
+ char symname[BFA_SYMNAME_MAXLEN];
+
+ bfa_fcs_lport_get_attr(&bfad->bfa_fcs.fabric.bport, &port_attr);
+ strncpy(symname, port_attr.port_cfg.sym_name.symname,
+ BFA_SYMNAME_MAXLEN);
+ return snprintf(buf, PAGE_SIZE, "%s\n", symname);
+}
+
+static ssize_t
+bfad_im_hw_version_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct Scsi_Host *shost = class_to_shost(dev);
+ struct bfad_im_port_s *im_port =
+ (struct bfad_im_port_s *) shost->hostdata[0];
+ struct bfad_s *bfad = im_port->bfad;
+ char hw_ver[BFA_VERSION_LEN];
+
+ bfa_get_pci_chip_rev(&bfad->bfa, hw_ver);
+ return snprintf(buf, PAGE_SIZE, "%s\n", hw_ver);
+}
+
+static ssize_t
+bfad_im_drv_version_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ return snprintf(buf, PAGE_SIZE, "%s\n", BFAD_DRIVER_VERSION);
+}
+
+static ssize_t
+bfad_im_optionrom_version_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct Scsi_Host *shost = class_to_shost(dev);
+ struct bfad_im_port_s *im_port =
+ (struct bfad_im_port_s *) shost->hostdata[0];
+ struct bfad_s *bfad = im_port->bfad;
+ char optrom_ver[BFA_VERSION_LEN];
+
+ bfa_get_adapter_optrom_ver(&bfad->bfa, optrom_ver);
+ return snprintf(buf, PAGE_SIZE, "%s\n", optrom_ver);
+}
+
+static ssize_t
+bfad_im_fw_version_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct Scsi_Host *shost = class_to_shost(dev);
+ struct bfad_im_port_s *im_port =
+ (struct bfad_im_port_s *) shost->hostdata[0];
+ struct bfad_s *bfad = im_port->bfad;
+ char fw_ver[BFA_VERSION_LEN];
+
+ bfa_get_adapter_fw_ver(&bfad->bfa, fw_ver);
+ return snprintf(buf, PAGE_SIZE, "%s\n", fw_ver);
+}
+
+static ssize_t
+bfad_im_num_of_ports_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct Scsi_Host *shost = class_to_shost(dev);
+ struct bfad_im_port_s *im_port =
+ (struct bfad_im_port_s *) shost->hostdata[0];
+ struct bfad_s *bfad = im_port->bfad;
+
+ return snprintf(buf, PAGE_SIZE, "%d\n",
+ bfa_get_nports(&bfad->bfa));
+}
+
+static ssize_t
+bfad_im_drv_name_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ return snprintf(buf, PAGE_SIZE, "%s\n", BFAD_DRIVER_NAME);
+}
+
+static ssize_t
+bfad_im_num_of_discovered_ports_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct Scsi_Host *shost = class_to_shost(dev);
+ struct bfad_im_port_s *im_port =
+ (struct bfad_im_port_s *) shost->hostdata[0];
+ struct bfad_port_s *port = im_port->port;
+ struct bfad_s *bfad = im_port->bfad;
+ int nrports = 2048;
+ struct bfa_rport_qualifier_s *rports = NULL;
+ unsigned long flags;
+
+ rports = kzalloc(sizeof(struct bfa_rport_qualifier_s) * nrports,
+ GFP_ATOMIC);
+ if (rports == NULL)
+ return snprintf(buf, PAGE_SIZE, "Failed\n");
+
+ spin_lock_irqsave(&bfad->bfad_lock, flags);
+ bfa_fcs_lport_get_rport_quals(port->fcs_port, rports, &nrports);
+ spin_unlock_irqrestore(&bfad->bfad_lock, flags);
+ kfree(rports);
+
+ return snprintf(buf, PAGE_SIZE, "%d\n", nrports);
+}
+
+static DEVICE_ATTR(serial_number, S_IRUGO,
+ bfad_im_serial_num_show, NULL);
+static DEVICE_ATTR(model, S_IRUGO, bfad_im_model_show, NULL);
+static DEVICE_ATTR(model_description, S_IRUGO,
+ bfad_im_model_desc_show, NULL);
+static DEVICE_ATTR(node_name, S_IRUGO, bfad_im_node_name_show, NULL);
+static DEVICE_ATTR(symbolic_name, S_IRUGO,
+ bfad_im_symbolic_name_show, NULL);
+static DEVICE_ATTR(hardware_version, S_IRUGO,
+ bfad_im_hw_version_show, NULL);
+static DEVICE_ATTR(driver_version, S_IRUGO,
+ bfad_im_drv_version_show, NULL);
+static DEVICE_ATTR(option_rom_version, S_IRUGO,
+ bfad_im_optionrom_version_show, NULL);
+static DEVICE_ATTR(firmware_version, S_IRUGO,
+ bfad_im_fw_version_show, NULL);
+static DEVICE_ATTR(number_of_ports, S_IRUGO,
+ bfad_im_num_of_ports_show, NULL);
+static DEVICE_ATTR(driver_name, S_IRUGO, bfad_im_drv_name_show, NULL);
+static DEVICE_ATTR(number_of_discovered_ports, S_IRUGO,
+ bfad_im_num_of_discovered_ports_show, NULL);
+
+struct device_attribute *bfad_im_host_attrs[] = {
+ &dev_attr_serial_number,
+ &dev_attr_model,
+ &dev_attr_model_description,
+ &dev_attr_node_name,
+ &dev_attr_symbolic_name,
+ &dev_attr_hardware_version,
+ &dev_attr_driver_version,
+ &dev_attr_option_rom_version,
+ &dev_attr_firmware_version,
+ &dev_attr_number_of_ports,
+ &dev_attr_driver_name,
+ &dev_attr_number_of_discovered_ports,
+ NULL,
+};
+
+struct device_attribute *bfad_im_vport_attrs[] = {
+ &dev_attr_serial_number,
+ &dev_attr_model,
+ &dev_attr_model_description,
+ &dev_attr_node_name,
+ &dev_attr_symbolic_name,
+ &dev_attr_hardware_version,
+ &dev_attr_driver_version,
+ &dev_attr_option_rom_version,
+ &dev_attr_firmware_version,
+ &dev_attr_number_of_ports,
+ &dev_attr_driver_name,
+ &dev_attr_number_of_discovered_ports,
+ NULL,
+};
+
+
diff --git a/drivers/scsi/bfa/bfad_bsg.c b/drivers/scsi/bfa/bfad_bsg.c
new file mode 100644
index 000000000..023b9d42a
--- /dev/null
+++ b/drivers/scsi/bfa/bfad_bsg.c
@@ -0,0 +1,3593 @@
+/*
+ * Copyright (c) 2005-2010 Brocade Communications Systems, Inc.
+ * All rights reserved
+ * www.brocade.com
+ *
+ * Linux driver for Brocade Fibre Channel Host Bus Adapter.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License (GPL) Version 2 as
+ * published by the Free Software Foundation
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ */
+
+#include <linux/uaccess.h>
+#include "bfad_drv.h"
+#include "bfad_im.h"
+#include "bfad_bsg.h"
+
+BFA_TRC_FILE(LDRV, BSG);
+
+int
+bfad_iocmd_ioc_enable(struct bfad_s *bfad, void *cmd)
+{
+ struct bfa_bsg_gen_s *iocmd = (struct bfa_bsg_gen_s *)cmd;
+ unsigned long flags;
+
+ spin_lock_irqsave(&bfad->bfad_lock, flags);
+ /* If IOC is not in disabled state - return */
+ if (!bfa_ioc_is_disabled(&bfad->bfa.ioc)) {
+ spin_unlock_irqrestore(&bfad->bfad_lock, flags);
+ iocmd->status = BFA_STATUS_OK;
+ return 0;
+ }
+
+ init_completion(&bfad->enable_comp);
+ bfa_iocfc_enable(&bfad->bfa);
+ iocmd->status = BFA_STATUS_OK;
+ spin_unlock_irqrestore(&bfad->bfad_lock, flags);
+ wait_for_completion(&bfad->enable_comp);
+
+ return 0;
+}
+
+int
+bfad_iocmd_ioc_disable(struct bfad_s *bfad, void *cmd)
+{
+ struct bfa_bsg_gen_s *iocmd = (struct bfa_bsg_gen_s *)cmd;
+ unsigned long flags;
+
+ spin_lock_irqsave(&bfad->bfad_lock, flags);
+ if (bfa_ioc_is_disabled(&bfad->bfa.ioc)) {
+ spin_unlock_irqrestore(&bfad->bfad_lock, flags);
+ iocmd->status = BFA_STATUS_OK;
+ return 0;
+ }
+
+ if (bfad->disable_active) {
+ spin_unlock_irqrestore(&bfad->bfad_lock, flags);
+ return -EBUSY;
+ }
+
+ bfad->disable_active = BFA_TRUE;
+ init_completion(&bfad->disable_comp);
+ bfa_iocfc_disable(&bfad->bfa);
+ spin_unlock_irqrestore(&bfad->bfad_lock, flags);
+
+ wait_for_completion(&bfad->disable_comp);
+ bfad->disable_active = BFA_FALSE;
+ iocmd->status = BFA_STATUS_OK;
+
+ return 0;
+}
+
+static int
+bfad_iocmd_ioc_get_info(struct bfad_s *bfad, void *cmd)
+{
+ int i;
+ struct bfa_bsg_ioc_info_s *iocmd = (struct bfa_bsg_ioc_info_s *)cmd;
+ struct bfad_im_port_s *im_port;
+ struct bfa_port_attr_s pattr;
+ unsigned long flags;
+
+ spin_lock_irqsave(&bfad->bfad_lock, flags);
+ bfa_fcport_get_attr(&bfad->bfa, &pattr);
+ iocmd->nwwn = pattr.nwwn;
+ iocmd->pwwn = pattr.pwwn;
+ iocmd->ioc_type = bfa_get_type(&bfad->bfa);
+ iocmd->mac = bfa_get_mac(&bfad->bfa);
+ iocmd->factory_mac = bfa_get_mfg_mac(&bfad->bfa);
+ bfa_get_adapter_serial_num(&bfad->bfa, iocmd->serialnum);
+ iocmd->factorynwwn = pattr.factorynwwn;
+ iocmd->factorypwwn = pattr.factorypwwn;
+ iocmd->bfad_num = bfad->inst_no;
+ im_port = bfad->pport.im_port;
+ iocmd->host = im_port->shost->host_no;
+ spin_unlock_irqrestore(&bfad->bfad_lock, flags);
+
+ strcpy(iocmd->name, bfad->adapter_name);
+ strcpy(iocmd->port_name, bfad->port_name);
+ strcpy(iocmd->hwpath, bfad->pci_name);
+
+ /* set adapter hw path */
+ strcpy(iocmd->adapter_hwpath, bfad->pci_name);
+ for (i = 0; iocmd->adapter_hwpath[i] != ':' && i < BFA_STRING_32; i++)
+ ;
+ for (; iocmd->adapter_hwpath[++i] != ':' && i < BFA_STRING_32; )
+ ;
+ iocmd->adapter_hwpath[i] = '\0';
+ iocmd->status = BFA_STATUS_OK;
+ return 0;
+}
+
+static int
+bfad_iocmd_ioc_get_attr(struct bfad_s *bfad, void *cmd)
+{
+ struct bfa_bsg_ioc_attr_s *iocmd = (struct bfa_bsg_ioc_attr_s *)cmd;
+ unsigned long flags;
+
+ spin_lock_irqsave(&bfad->bfad_lock, flags);
+ bfa_ioc_get_attr(&bfad->bfa.ioc, &iocmd->ioc_attr);
+ spin_unlock_irqrestore(&bfad->bfad_lock, flags);
+
+ /* fill in driver attr info */
+ strcpy(iocmd->ioc_attr.driver_attr.driver, BFAD_DRIVER_NAME);
+ strncpy(iocmd->ioc_attr.driver_attr.driver_ver,
+ BFAD_DRIVER_VERSION, BFA_VERSION_LEN);
+ strcpy(iocmd->ioc_attr.driver_attr.fw_ver,
+ iocmd->ioc_attr.adapter_attr.fw_ver);
+ strcpy(iocmd->ioc_attr.driver_attr.bios_ver,
+ iocmd->ioc_attr.adapter_attr.optrom_ver);
+
+ /* copy chip rev info first otherwise it will be overwritten */
+ memcpy(bfad->pci_attr.chip_rev, iocmd->ioc_attr.pci_attr.chip_rev,
+ sizeof(bfad->pci_attr.chip_rev));
+ memcpy(&iocmd->ioc_attr.pci_attr, &bfad->pci_attr,
+ sizeof(struct bfa_ioc_pci_attr_s));
+
+ iocmd->status = BFA_STATUS_OK;
+ return 0;
+}
+
+int
+bfad_iocmd_ioc_get_stats(struct bfad_s *bfad, void *cmd)
+{
+ struct bfa_bsg_ioc_stats_s *iocmd = (struct bfa_bsg_ioc_stats_s *)cmd;
+
+ bfa_ioc_get_stats(&bfad->bfa, &iocmd->ioc_stats);
+ iocmd->status = BFA_STATUS_OK;
+ return 0;
+}
+
+int
+bfad_iocmd_ioc_get_fwstats(struct bfad_s *bfad, void *cmd,
+ unsigned int payload_len)
+{
+ struct bfa_bsg_ioc_fwstats_s *iocmd =
+ (struct bfa_bsg_ioc_fwstats_s *)cmd;
+ void *iocmd_bufptr;
+ unsigned long flags;
+
+ if (bfad_chk_iocmd_sz(payload_len,
+ sizeof(struct bfa_bsg_ioc_fwstats_s),
+ sizeof(struct bfa_fw_stats_s)) != BFA_STATUS_OK) {
+ iocmd->status = BFA_STATUS_VERSION_FAIL;
+ goto out;
+ }
+
+ iocmd_bufptr = (char *)iocmd + sizeof(struct bfa_bsg_ioc_fwstats_s);
+ spin_lock_irqsave(&bfad->bfad_lock, flags);
+ iocmd->status = bfa_ioc_fw_stats_get(&bfad->bfa.ioc, iocmd_bufptr);
+ spin_unlock_irqrestore(&bfad->bfad_lock, flags);
+
+ if (iocmd->status != BFA_STATUS_OK) {
+ bfa_trc(bfad, iocmd->status);
+ goto out;
+ }
+out:
+ bfa_trc(bfad, 0x6666);
+ return 0;
+}
+
+int
+bfad_iocmd_ioc_reset_stats(struct bfad_s *bfad, void *cmd, unsigned int v_cmd)
+{
+ struct bfa_bsg_gen_s *iocmd = (struct bfa_bsg_gen_s *)cmd;
+ unsigned long flags;
+
+ if (v_cmd == IOCMD_IOC_RESET_STATS) {
+ bfa_ioc_clear_stats(&bfad->bfa);
+ iocmd->status = BFA_STATUS_OK;
+ } else if (v_cmd == IOCMD_IOC_RESET_FWSTATS) {
+ spin_lock_irqsave(&bfad->bfad_lock, flags);
+ iocmd->status = bfa_ioc_fw_stats_clear(&bfad->bfa.ioc);
+ spin_unlock_irqrestore(&bfad->bfad_lock, flags);
+ }
+
+ return 0;
+}
+
+int
+bfad_iocmd_ioc_set_name(struct bfad_s *bfad, void *cmd, unsigned int v_cmd)
+{
+ struct bfa_bsg_ioc_name_s *iocmd = (struct bfa_bsg_ioc_name_s *) cmd;
+
+ if (v_cmd == IOCMD_IOC_SET_ADAPTER_NAME)
+ strcpy(bfad->adapter_name, iocmd->name);
+ else if (v_cmd == IOCMD_IOC_SET_PORT_NAME)
+ strcpy(bfad->port_name, iocmd->name);
+
+ iocmd->status = BFA_STATUS_OK;
+ return 0;
+}
+
+int
+bfad_iocmd_iocfc_get_attr(struct bfad_s *bfad, void *cmd)
+{
+ struct bfa_bsg_iocfc_attr_s *iocmd = (struct bfa_bsg_iocfc_attr_s *)cmd;
+
+ iocmd->status = BFA_STATUS_OK;
+ bfa_iocfc_get_attr(&bfad->bfa, &iocmd->iocfc_attr);
+
+ return 0;
+}
+
+int
+bfad_iocmd_ioc_fw_sig_inv(struct bfad_s *bfad, void *cmd)
+{
+ struct bfa_bsg_gen_s *iocmd = (struct bfa_bsg_gen_s *)cmd;
+ unsigned long flags;
+
+ spin_lock_irqsave(&bfad->bfad_lock, flags);
+ iocmd->status = bfa_ioc_fwsig_invalidate(&bfad->bfa.ioc);
+ spin_unlock_irqrestore(&bfad->bfad_lock, flags);
+ return 0;
+}
+
+int
+bfad_iocmd_iocfc_set_intr(struct bfad_s *bfad, void *cmd)
+{
+ struct bfa_bsg_iocfc_intr_s *iocmd = (struct bfa_bsg_iocfc_intr_s *)cmd;
+ unsigned long flags;
+
+ spin_lock_irqsave(&bfad->bfad_lock, flags);
+ iocmd->status = bfa_iocfc_israttr_set(&bfad->bfa, &iocmd->attr);
+ spin_unlock_irqrestore(&bfad->bfad_lock, flags);
+
+ return 0;
+}
+
+int
+bfad_iocmd_port_enable(struct bfad_s *bfad, void *cmd)
+{
+ struct bfa_bsg_gen_s *iocmd = (struct bfa_bsg_gen_s *)cmd;
+ struct bfad_hal_comp fcomp;
+ unsigned long flags;
+
+ init_completion(&fcomp.comp);
+ spin_lock_irqsave(&bfad->bfad_lock, flags);
+ iocmd->status = bfa_port_enable(&bfad->bfa.modules.port,
+ bfad_hcb_comp, &fcomp);
+ spin_unlock_irqrestore(&bfad->bfad_lock, flags);
+ if (iocmd->status != BFA_STATUS_OK) {
+ bfa_trc(bfad, iocmd->status);
+ return 0;
+ }
+ wait_for_completion(&fcomp.comp);
+ iocmd->status = fcomp.status;
+ return 0;
+}
+
+int
+bfad_iocmd_port_disable(struct bfad_s *bfad, void *cmd)
+{
+ struct bfa_bsg_gen_s *iocmd = (struct bfa_bsg_gen_s *)cmd;
+ struct bfad_hal_comp fcomp;
+ unsigned long flags;
+
+ init_completion(&fcomp.comp);
+ spin_lock_irqsave(&bfad->bfad_lock, flags);
+ iocmd->status = bfa_port_disable(&bfad->bfa.modules.port,
+ bfad_hcb_comp, &fcomp);
+ spin_unlock_irqrestore(&bfad->bfad_lock, flags);
+
+ if (iocmd->status != BFA_STATUS_OK) {
+ bfa_trc(bfad, iocmd->status);
+ return 0;
+ }
+ wait_for_completion(&fcomp.comp);
+ iocmd->status = fcomp.status;
+ return 0;
+}
+
+static int
+bfad_iocmd_port_get_attr(struct bfad_s *bfad, void *cmd)
+{
+ struct bfa_bsg_port_attr_s *iocmd = (struct bfa_bsg_port_attr_s *)cmd;
+ struct bfa_lport_attr_s port_attr;
+ unsigned long flags;
+
+ spin_lock_irqsave(&bfad->bfad_lock, flags);
+ bfa_fcport_get_attr(&bfad->bfa, &iocmd->attr);
+ bfa_fcs_lport_get_attr(&bfad->bfa_fcs.fabric.bport, &port_attr);
+ spin_unlock_irqrestore(&bfad->bfad_lock, flags);
+
+ if (iocmd->attr.topology != BFA_PORT_TOPOLOGY_NONE)
+ iocmd->attr.pid = port_attr.pid;
+ else
+ iocmd->attr.pid = 0;
+
+ iocmd->attr.port_type = port_attr.port_type;
+ iocmd->attr.loopback = port_attr.loopback;
+ iocmd->attr.authfail = port_attr.authfail;
+ strncpy(iocmd->attr.port_symname.symname,
+ port_attr.port_cfg.sym_name.symname,
+ sizeof(port_attr.port_cfg.sym_name.symname));
+
+ iocmd->status = BFA_STATUS_OK;
+ return 0;
+}
+
+int
+bfad_iocmd_port_get_stats(struct bfad_s *bfad, void *cmd,
+ unsigned int payload_len)
+{
+ struct bfa_bsg_port_stats_s *iocmd = (struct bfa_bsg_port_stats_s *)cmd;
+ struct bfad_hal_comp fcomp;
+ void *iocmd_bufptr;
+ unsigned long flags;
+
+ if (bfad_chk_iocmd_sz(payload_len,
+ sizeof(struct bfa_bsg_port_stats_s),
+ sizeof(union bfa_port_stats_u)) != BFA_STATUS_OK) {
+ iocmd->status = BFA_STATUS_VERSION_FAIL;
+ return 0;
+ }
+
+ iocmd_bufptr = (char *)iocmd + sizeof(struct bfa_bsg_port_stats_s);
+
+ init_completion(&fcomp.comp);
+ spin_lock_irqsave(&bfad->bfad_lock, flags);
+ iocmd->status = bfa_port_get_stats(&bfad->bfa.modules.port,
+ iocmd_bufptr, bfad_hcb_comp, &fcomp);
+ spin_unlock_irqrestore(&bfad->bfad_lock, flags);
+ if (iocmd->status != BFA_STATUS_OK) {
+ bfa_trc(bfad, iocmd->status);
+ goto out;
+ }
+
+ wait_for_completion(&fcomp.comp);
+ iocmd->status = fcomp.status;
+out:
+ return 0;
+}
+
+int
+bfad_iocmd_port_reset_stats(struct bfad_s *bfad, void *cmd)
+{
+ struct bfa_bsg_gen_s *iocmd = (struct bfa_bsg_gen_s *)cmd;
+ struct bfad_hal_comp fcomp;
+ unsigned long flags;
+
+ init_completion(&fcomp.comp);
+ spin_lock_irqsave(&bfad->bfad_lock, flags);
+ iocmd->status = bfa_port_clear_stats(&bfad->bfa.modules.port,
+ bfad_hcb_comp, &fcomp);
+ spin_unlock_irqrestore(&bfad->bfad_lock, flags);
+ if (iocmd->status != BFA_STATUS_OK) {
+ bfa_trc(bfad, iocmd->status);
+ return 0;
+ }
+ wait_for_completion(&fcomp.comp);
+ iocmd->status = fcomp.status;
+ return 0;
+}
+
+int
+bfad_iocmd_set_port_cfg(struct bfad_s *bfad, void *iocmd, unsigned int v_cmd)
+{
+ struct bfa_bsg_port_cfg_s *cmd = (struct bfa_bsg_port_cfg_s *)iocmd;
+ unsigned long flags;
+
+ spin_lock_irqsave(&bfad->bfad_lock, flags);
+ if (v_cmd == IOCMD_PORT_CFG_TOPO)
+ cmd->status = bfa_fcport_cfg_topology(&bfad->bfa, cmd->param);
+ else if (v_cmd == IOCMD_PORT_CFG_SPEED)
+ cmd->status = bfa_fcport_cfg_speed(&bfad->bfa, cmd->param);
+ else if (v_cmd == IOCMD_PORT_CFG_ALPA)
+ cmd->status = bfa_fcport_cfg_hardalpa(&bfad->bfa, cmd->param);
+ else if (v_cmd == IOCMD_PORT_CLR_ALPA)
+ cmd->status = bfa_fcport_clr_hardalpa(&bfad->bfa);
+ spin_unlock_irqrestore(&bfad->bfad_lock, flags);
+
+ return 0;
+}
+
+int
+bfad_iocmd_port_cfg_maxfrsize(struct bfad_s *bfad, void *cmd)
+{
+ struct bfa_bsg_port_cfg_maxfrsize_s *iocmd =
+ (struct bfa_bsg_port_cfg_maxfrsize_s *)cmd;
+ unsigned long flags;
+
+ spin_lock_irqsave(&bfad->bfad_lock, flags);
+ iocmd->status = bfa_fcport_cfg_maxfrsize(&bfad->bfa, iocmd->maxfrsize);
+ spin_unlock_irqrestore(&bfad->bfad_lock, flags);
+
+ return 0;
+}
+
+int
+bfad_iocmd_port_cfg_bbcr(struct bfad_s *bfad, unsigned int cmd, void *pcmd)
+{
+ struct bfa_bsg_bbcr_enable_s *iocmd =
+ (struct bfa_bsg_bbcr_enable_s *)pcmd;
+ unsigned long flags;
+ int rc;
+
+ spin_lock_irqsave(&bfad->bfad_lock, flags);
+ if (cmd == IOCMD_PORT_BBCR_ENABLE)
+ rc = bfa_fcport_cfg_bbcr(&bfad->bfa, BFA_TRUE, iocmd->bb_scn);
+ else if (cmd == IOCMD_PORT_BBCR_DISABLE)
+ rc = bfa_fcport_cfg_bbcr(&bfad->bfa, BFA_FALSE, 0);
+ else {
+ spin_unlock_irqrestore(&bfad->bfad_lock, flags);
+ return -EINVAL;
+ }
+ spin_unlock_irqrestore(&bfad->bfad_lock, flags);
+
+ iocmd->status = rc;
+ return 0;
+}
+
+int
+bfad_iocmd_port_get_bbcr_attr(struct bfad_s *bfad, void *pcmd)
+{
+ struct bfa_bsg_bbcr_attr_s *iocmd = (struct bfa_bsg_bbcr_attr_s *) pcmd;
+ unsigned long flags;
+
+ spin_lock_irqsave(&bfad->bfad_lock, flags);
+ iocmd->status =
+ bfa_fcport_get_bbcr_attr(&bfad->bfa, &iocmd->attr);
+ spin_unlock_irqrestore(&bfad->bfad_lock, flags);
+
+ return 0;
+}
+
+
+static int
+bfad_iocmd_lport_get_attr(struct bfad_s *bfad, void *cmd)
+{
+ struct bfa_fcs_lport_s *fcs_port;
+ struct bfa_bsg_lport_attr_s *iocmd = (struct bfa_bsg_lport_attr_s *)cmd;
+ unsigned long flags;
+
+ spin_lock_irqsave(&bfad->bfad_lock, flags);
+ fcs_port = bfa_fcs_lookup_port(&bfad->bfa_fcs,
+ iocmd->vf_id, iocmd->pwwn);
+ if (fcs_port == NULL) {
+ spin_unlock_irqrestore(&bfad->bfad_lock, flags);
+ iocmd->status = BFA_STATUS_UNKNOWN_LWWN;
+ goto out;
+ }
+
+ bfa_fcs_lport_get_attr(fcs_port, &iocmd->port_attr);
+ spin_unlock_irqrestore(&bfad->bfad_lock, flags);
+ iocmd->status = BFA_STATUS_OK;
+out:
+ return 0;
+}
+
+int
+bfad_iocmd_lport_get_stats(struct bfad_s *bfad, void *cmd)
+{
+ struct bfa_fcs_lport_s *fcs_port;
+ struct bfa_bsg_lport_stats_s *iocmd =
+ (struct bfa_bsg_lport_stats_s *)cmd;
+ unsigned long flags;
+
+ spin_lock_irqsave(&bfad->bfad_lock, flags);
+ fcs_port = bfa_fcs_lookup_port(&bfad->bfa_fcs,
+ iocmd->vf_id, iocmd->pwwn);
+ if (fcs_port == NULL) {
+ spin_unlock_irqrestore(&bfad->bfad_lock, flags);
+ iocmd->status = BFA_STATUS_UNKNOWN_LWWN;
+ goto out;
+ }
+
+ bfa_fcs_lport_get_stats(fcs_port, &iocmd->port_stats);
+ spin_unlock_irqrestore(&bfad->bfad_lock, flags);
+ iocmd->status = BFA_STATUS_OK;
+out:
+ return 0;
+}
+
+int
+bfad_iocmd_lport_reset_stats(struct bfad_s *bfad, void *cmd)
+{
+ struct bfa_fcs_lport_s *fcs_port;
+ struct bfa_bsg_reset_stats_s *iocmd =
+ (struct bfa_bsg_reset_stats_s *)cmd;
+ struct bfa_fcpim_s *fcpim = BFA_FCPIM(&bfad->bfa);
+ struct list_head *qe, *qen;
+ struct bfa_itnim_s *itnim;
+ unsigned long flags;
+
+ spin_lock_irqsave(&bfad->bfad_lock, flags);
+ fcs_port = bfa_fcs_lookup_port(&bfad->bfa_fcs,
+ iocmd->vf_id, iocmd->vpwwn);
+ if (fcs_port == NULL) {
+ spin_unlock_irqrestore(&bfad->bfad_lock, flags);
+ iocmd->status = BFA_STATUS_UNKNOWN_LWWN;
+ goto out;
+ }
+
+ bfa_fcs_lport_clear_stats(fcs_port);
+ /* clear IO stats from all active itnims */
+ list_for_each_safe(qe, qen, &fcpim->itnim_q) {
+ itnim = (struct bfa_itnim_s *) qe;
+ if (itnim->rport->rport_info.lp_tag != fcs_port->lp_tag)
+ continue;
+ bfa_itnim_clear_stats(itnim);
+ }
+ spin_unlock_irqrestore(&bfad->bfad_lock, flags);
+ iocmd->status = BFA_STATUS_OK;
+out:
+ return 0;
+}
+
+int
+bfad_iocmd_lport_get_iostats(struct bfad_s *bfad, void *cmd)
+{
+ struct bfa_fcs_lport_s *fcs_port;
+ struct bfa_bsg_lport_iostats_s *iocmd =
+ (struct bfa_bsg_lport_iostats_s *)cmd;
+ unsigned long flags;
+
+ spin_lock_irqsave(&bfad->bfad_lock, flags);
+ fcs_port = bfa_fcs_lookup_port(&bfad->bfa_fcs,
+ iocmd->vf_id, iocmd->pwwn);
+ if (fcs_port == NULL) {
+ spin_unlock_irqrestore(&bfad->bfad_lock, flags);
+ iocmd->status = BFA_STATUS_UNKNOWN_LWWN;
+ goto out;
+ }
+
+ bfa_fcpim_port_iostats(&bfad->bfa, &iocmd->iostats,
+ fcs_port->lp_tag);
+ spin_unlock_irqrestore(&bfad->bfad_lock, flags);
+ iocmd->status = BFA_STATUS_OK;
+out:
+ return 0;
+}
+
+int
+bfad_iocmd_lport_get_rports(struct bfad_s *bfad, void *cmd,
+ unsigned int payload_len)
+{
+ struct bfa_bsg_lport_get_rports_s *iocmd =
+ (struct bfa_bsg_lport_get_rports_s *)cmd;
+ struct bfa_fcs_lport_s *fcs_port;
+ unsigned long flags;
+ void *iocmd_bufptr;
+
+ if (iocmd->nrports == 0)
+ return -EINVAL;
+
+ if (bfad_chk_iocmd_sz(payload_len,
+ sizeof(struct bfa_bsg_lport_get_rports_s),
+ sizeof(struct bfa_rport_qualifier_s) * iocmd->nrports)
+ != BFA_STATUS_OK) {
+ iocmd->status = BFA_STATUS_VERSION_FAIL;
+ return 0;
+ }
+
+ iocmd_bufptr = (char *)iocmd +
+ sizeof(struct bfa_bsg_lport_get_rports_s);
+ spin_lock_irqsave(&bfad->bfad_lock, flags);
+ fcs_port = bfa_fcs_lookup_port(&bfad->bfa_fcs,
+ iocmd->vf_id, iocmd->pwwn);
+ if (fcs_port == NULL) {
+ spin_unlock_irqrestore(&bfad->bfad_lock, flags);
+ bfa_trc(bfad, 0);
+ iocmd->status = BFA_STATUS_UNKNOWN_LWWN;
+ goto out;
+ }
+
+ bfa_fcs_lport_get_rport_quals(fcs_port,
+ (struct bfa_rport_qualifier_s *)iocmd_bufptr,
+ &iocmd->nrports);
+ spin_unlock_irqrestore(&bfad->bfad_lock, flags);
+ iocmd->status = BFA_STATUS_OK;
+out:
+ return 0;
+}
+
+int
+bfad_iocmd_rport_get_attr(struct bfad_s *bfad, void *cmd)
+{
+ struct bfa_bsg_rport_attr_s *iocmd = (struct bfa_bsg_rport_attr_s *)cmd;
+ struct bfa_fcs_lport_s *fcs_port;
+ struct bfa_fcs_rport_s *fcs_rport;
+ unsigned long flags;
+
+ spin_lock_irqsave(&bfad->bfad_lock, flags);
+ fcs_port = bfa_fcs_lookup_port(&bfad->bfa_fcs,
+ iocmd->vf_id, iocmd->pwwn);
+ if (fcs_port == NULL) {
+ bfa_trc(bfad, 0);
+ spin_unlock_irqrestore(&bfad->bfad_lock, flags);
+ iocmd->status = BFA_STATUS_UNKNOWN_LWWN;
+ goto out;
+ }
+
+ if (iocmd->pid)
+ fcs_rport = bfa_fcs_lport_get_rport_by_qualifier(fcs_port,
+ iocmd->rpwwn, iocmd->pid);
+ else
+ fcs_rport = bfa_fcs_rport_lookup(fcs_port, iocmd->rpwwn);
+ if (fcs_rport == NULL) {
+ bfa_trc(bfad, 0);
+ spin_unlock_irqrestore(&bfad->bfad_lock, flags);
+ iocmd->status = BFA_STATUS_UNKNOWN_RWWN;
+ goto out;
+ }
+
+ bfa_fcs_rport_get_attr(fcs_rport, &iocmd->attr);
+ spin_unlock_irqrestore(&bfad->bfad_lock, flags);
+ iocmd->status = BFA_STATUS_OK;
+out:
+ return 0;
+}
+
+static int
+bfad_iocmd_rport_get_addr(struct bfad_s *bfad, void *cmd)
+{
+ struct bfa_bsg_rport_scsi_addr_s *iocmd =
+ (struct bfa_bsg_rport_scsi_addr_s *)cmd;
+ struct bfa_fcs_lport_s *fcs_port;
+ struct bfa_fcs_itnim_s *fcs_itnim;
+ struct bfad_itnim_s *drv_itnim;
+ unsigned long flags;
+
+ spin_lock_irqsave(&bfad->bfad_lock, flags);
+ fcs_port = bfa_fcs_lookup_port(&bfad->bfa_fcs,
+ iocmd->vf_id, iocmd->pwwn);
+ if (fcs_port == NULL) {
+ bfa_trc(bfad, 0);
+ spin_unlock_irqrestore(&bfad->bfad_lock, flags);
+ iocmd->status = BFA_STATUS_UNKNOWN_LWWN;
+ goto out;
+ }
+
+ fcs_itnim = bfa_fcs_itnim_lookup(fcs_port, iocmd->rpwwn);
+ if (fcs_itnim == NULL) {
+ bfa_trc(bfad, 0);
+ spin_unlock_irqrestore(&bfad->bfad_lock, flags);
+ iocmd->status = BFA_STATUS_UNKNOWN_RWWN;
+ goto out;
+ }
+
+ drv_itnim = fcs_itnim->itnim_drv;
+
+ if (drv_itnim && drv_itnim->im_port)
+ iocmd->host = drv_itnim->im_port->shost->host_no;
+ else {
+ bfa_trc(bfad, 0);
+ spin_unlock_irqrestore(&bfad->bfad_lock, flags);
+ iocmd->status = BFA_STATUS_UNKNOWN_RWWN;
+ goto out;
+ }
+
+ iocmd->target = drv_itnim->scsi_tgt_id;
+ spin_unlock_irqrestore(&bfad->bfad_lock, flags);
+
+ iocmd->bus = 0;
+ iocmd->lun = 0;
+ iocmd->status = BFA_STATUS_OK;
+out:
+ return 0;
+}
+
+int
+bfad_iocmd_rport_get_stats(struct bfad_s *bfad, void *cmd)
+{
+ struct bfa_bsg_rport_stats_s *iocmd =
+ (struct bfa_bsg_rport_stats_s *)cmd;
+ struct bfa_fcs_lport_s *fcs_port;
+ struct bfa_fcs_rport_s *fcs_rport;
+ unsigned long flags;
+
+ spin_lock_irqsave(&bfad->bfad_lock, flags);
+ fcs_port = bfa_fcs_lookup_port(&bfad->bfa_fcs,
+ iocmd->vf_id, iocmd->pwwn);
+ if (fcs_port == NULL) {
+ bfa_trc(bfad, 0);
+ spin_unlock_irqrestore(&bfad->bfad_lock, flags);
+ iocmd->status = BFA_STATUS_UNKNOWN_LWWN;
+ goto out;
+ }
+
+ fcs_rport = bfa_fcs_rport_lookup(fcs_port, iocmd->rpwwn);
+ if (fcs_rport == NULL) {
+ bfa_trc(bfad, 0);
+ spin_unlock_irqrestore(&bfad->bfad_lock, flags);
+ iocmd->status = BFA_STATUS_UNKNOWN_RWWN;
+ goto out;
+ }
+
+ memcpy((void *)&iocmd->stats, (void *)&fcs_rport->stats,
+ sizeof(struct bfa_rport_stats_s));
+ if (bfa_fcs_rport_get_halrport(fcs_rport)) {
+ memcpy((void *)&iocmd->stats.hal_stats,
+ (void *)&(bfa_fcs_rport_get_halrport(fcs_rport)->stats),
+ sizeof(struct bfa_rport_hal_stats_s));
+ }
+
+ spin_unlock_irqrestore(&bfad->bfad_lock, flags);
+ iocmd->status = BFA_STATUS_OK;
+out:
+ return 0;
+}
+
+int
+bfad_iocmd_rport_clr_stats(struct bfad_s *bfad, void *cmd)
+{
+ struct bfa_bsg_rport_reset_stats_s *iocmd =
+ (struct bfa_bsg_rport_reset_stats_s *)cmd;
+ struct bfa_fcs_lport_s *fcs_port;
+ struct bfa_fcs_rport_s *fcs_rport;
+ struct bfa_rport_s *rport;
+ unsigned long flags;
+
+ spin_lock_irqsave(&bfad->bfad_lock, flags);
+ fcs_port = bfa_fcs_lookup_port(&bfad->bfa_fcs,
+ iocmd->vf_id, iocmd->pwwn);
+ if (fcs_port == NULL) {
+ spin_unlock_irqrestore(&bfad->bfad_lock, flags);
+ iocmd->status = BFA_STATUS_UNKNOWN_LWWN;
+ goto out;
+ }
+
+ fcs_rport = bfa_fcs_rport_lookup(fcs_port, iocmd->rpwwn);
+ if (fcs_rport == NULL) {
+ spin_unlock_irqrestore(&bfad->bfad_lock, flags);
+ iocmd->status = BFA_STATUS_UNKNOWN_RWWN;
+ goto out;
+ }
+
+ memset((char *)&fcs_rport->stats, 0, sizeof(struct bfa_rport_stats_s));
+ rport = bfa_fcs_rport_get_halrport(fcs_rport);
+ if (rport)
+ memset(&rport->stats, 0, sizeof(rport->stats));
+ spin_unlock_irqrestore(&bfad->bfad_lock, flags);
+ iocmd->status = BFA_STATUS_OK;
+out:
+ return 0;
+}
+
+int
+bfad_iocmd_rport_set_speed(struct bfad_s *bfad, void *cmd)
+{
+ struct bfa_bsg_rport_set_speed_s *iocmd =
+ (struct bfa_bsg_rport_set_speed_s *)cmd;
+ struct bfa_fcs_lport_s *fcs_port;
+ struct bfa_fcs_rport_s *fcs_rport;
+ unsigned long flags;
+
+ spin_lock_irqsave(&bfad->bfad_lock, flags);
+ fcs_port = bfa_fcs_lookup_port(&bfad->bfa_fcs,
+ iocmd->vf_id, iocmd->pwwn);
+ if (fcs_port == NULL) {
+ spin_unlock_irqrestore(&bfad->bfad_lock, flags);
+ iocmd->status = BFA_STATUS_UNKNOWN_LWWN;
+ goto out;
+ }
+
+ fcs_rport = bfa_fcs_rport_lookup(fcs_port, iocmd->rpwwn);
+ if (fcs_rport == NULL) {
+ spin_unlock_irqrestore(&bfad->bfad_lock, flags);
+ iocmd->status = BFA_STATUS_UNKNOWN_RWWN;
+ goto out;
+ }
+
+ fcs_rport->rpf.assigned_speed = iocmd->speed;
+ /* Set this speed in f/w only if the RPSC speed is not available */
+ if (fcs_rport->rpf.rpsc_speed == BFA_PORT_SPEED_UNKNOWN)
+ if (fcs_rport->bfa_rport)
+ bfa_rport_speed(fcs_rport->bfa_rport, iocmd->speed);
+ spin_unlock_irqrestore(&bfad->bfad_lock, flags);
+ iocmd->status = BFA_STATUS_OK;
+out:
+ return 0;
+}
+
+int
+bfad_iocmd_vport_get_attr(struct bfad_s *bfad, void *cmd)
+{
+ struct bfa_fcs_vport_s *fcs_vport;
+ struct bfa_bsg_vport_attr_s *iocmd = (struct bfa_bsg_vport_attr_s *)cmd;
+ unsigned long flags;
+
+ spin_lock_irqsave(&bfad->bfad_lock, flags);
+ fcs_vport = bfa_fcs_vport_lookup(&bfad->bfa_fcs,
+ iocmd->vf_id, iocmd->vpwwn);
+ if (fcs_vport == NULL) {
+ spin_unlock_irqrestore(&bfad->bfad_lock, flags);
+ iocmd->status = BFA_STATUS_UNKNOWN_VWWN;
+ goto out;
+ }
+
+ bfa_fcs_vport_get_attr(fcs_vport, &iocmd->vport_attr);
+ spin_unlock_irqrestore(&bfad->bfad_lock, flags);
+ iocmd->status = BFA_STATUS_OK;
+out:
+ return 0;
+}
+
+int
+bfad_iocmd_vport_get_stats(struct bfad_s *bfad, void *cmd)
+{
+ struct bfa_fcs_vport_s *fcs_vport;
+ struct bfa_bsg_vport_stats_s *iocmd =
+ (struct bfa_bsg_vport_stats_s *)cmd;
+ unsigned long flags;
+
+ spin_lock_irqsave(&bfad->bfad_lock, flags);
+ fcs_vport = bfa_fcs_vport_lookup(&bfad->bfa_fcs,
+ iocmd->vf_id, iocmd->vpwwn);
+ if (fcs_vport == NULL) {
+ spin_unlock_irqrestore(&bfad->bfad_lock, flags);
+ iocmd->status = BFA_STATUS_UNKNOWN_VWWN;
+ goto out;
+ }
+
+ memcpy((void *)&iocmd->vport_stats, (void *)&fcs_vport->vport_stats,
+ sizeof(struct bfa_vport_stats_s));
+ memcpy((void *)&iocmd->vport_stats.port_stats,
+ (void *)&fcs_vport->lport.stats,
+ sizeof(struct bfa_lport_stats_s));
+ spin_unlock_irqrestore(&bfad->bfad_lock, flags);
+ iocmd->status = BFA_STATUS_OK;
+out:
+ return 0;
+}
+
+int
+bfad_iocmd_vport_clr_stats(struct bfad_s *bfad, void *cmd)
+{
+ struct bfa_fcs_vport_s *fcs_vport;
+ struct bfa_bsg_reset_stats_s *iocmd =
+ (struct bfa_bsg_reset_stats_s *)cmd;
+ unsigned long flags;
+
+ spin_lock_irqsave(&bfad->bfad_lock, flags);
+ fcs_vport = bfa_fcs_vport_lookup(&bfad->bfa_fcs,
+ iocmd->vf_id, iocmd->vpwwn);
+ if (fcs_vport == NULL) {
+ spin_unlock_irqrestore(&bfad->bfad_lock, flags);
+ iocmd->status = BFA_STATUS_UNKNOWN_VWWN;
+ goto out;
+ }
+
+ memset(&fcs_vport->vport_stats, 0, sizeof(struct bfa_vport_stats_s));
+ memset(&fcs_vport->lport.stats, 0, sizeof(struct bfa_lport_stats_s));
+ spin_unlock_irqrestore(&bfad->bfad_lock, flags);
+ iocmd->status = BFA_STATUS_OK;
+out:
+ return 0;
+}
+
+static int
+bfad_iocmd_fabric_get_lports(struct bfad_s *bfad, void *cmd,
+ unsigned int payload_len)
+{
+ struct bfa_bsg_fabric_get_lports_s *iocmd =
+ (struct bfa_bsg_fabric_get_lports_s *)cmd;
+ bfa_fcs_vf_t *fcs_vf;
+ uint32_t nports = iocmd->nports;
+ unsigned long flags;
+ void *iocmd_bufptr;
+
+ if (nports == 0) {
+ iocmd->status = BFA_STATUS_EINVAL;
+ goto out;
+ }
+
+ if (bfad_chk_iocmd_sz(payload_len,
+ sizeof(struct bfa_bsg_fabric_get_lports_s),
+ sizeof(wwn_t[iocmd->nports])) != BFA_STATUS_OK) {
+ iocmd->status = BFA_STATUS_VERSION_FAIL;
+ goto out;
+ }
+
+ iocmd_bufptr = (char *)iocmd +
+ sizeof(struct bfa_bsg_fabric_get_lports_s);
+
+ spin_lock_irqsave(&bfad->bfad_lock, flags);
+ fcs_vf = bfa_fcs_vf_lookup(&bfad->bfa_fcs, iocmd->vf_id);
+ if (fcs_vf == NULL) {
+ spin_unlock_irqrestore(&bfad->bfad_lock, flags);
+ iocmd->status = BFA_STATUS_UNKNOWN_VFID;
+ goto out;
+ }
+ bfa_fcs_vf_get_ports(fcs_vf, (wwn_t *)iocmd_bufptr, &nports);
+ spin_unlock_irqrestore(&bfad->bfad_lock, flags);
+
+ iocmd->nports = nports;
+ iocmd->status = BFA_STATUS_OK;
+out:
+ return 0;
+}
+
+int
+bfad_iocmd_qos_set_bw(struct bfad_s *bfad, void *pcmd)
+{
+ struct bfa_bsg_qos_bw_s *iocmd = (struct bfa_bsg_qos_bw_s *)pcmd;
+ unsigned long flags;
+
+ spin_lock_irqsave(&bfad->bfad_lock, flags);
+ iocmd->status = bfa_fcport_set_qos_bw(&bfad->bfa, &iocmd->qos_bw);
+ spin_unlock_irqrestore(&bfad->bfad_lock, flags);
+
+ return 0;
+}
+
+int
+bfad_iocmd_ratelim(struct bfad_s *bfad, unsigned int cmd, void *pcmd)
+{
+ struct bfa_bsg_gen_s *iocmd = (struct bfa_bsg_gen_s *)pcmd;
+ struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(&bfad->bfa);
+ unsigned long flags;
+
+ spin_lock_irqsave(&bfad->bfad_lock, flags);
+
+ if ((fcport->cfg.topology == BFA_PORT_TOPOLOGY_LOOP) &&
+ (fcport->topology == BFA_PORT_TOPOLOGY_LOOP))
+ iocmd->status = BFA_STATUS_TOPOLOGY_LOOP;
+ else {
+ if (cmd == IOCMD_RATELIM_ENABLE)
+ fcport->cfg.ratelimit = BFA_TRUE;
+ else if (cmd == IOCMD_RATELIM_DISABLE)
+ fcport->cfg.ratelimit = BFA_FALSE;
+
+ if (fcport->cfg.trl_def_speed == BFA_PORT_SPEED_UNKNOWN)
+ fcport->cfg.trl_def_speed = BFA_PORT_SPEED_1GBPS;
+
+ iocmd->status = BFA_STATUS_OK;
+ }
+
+ spin_unlock_irqrestore(&bfad->bfad_lock, flags);
+
+ return 0;
+}
+
+int
+bfad_iocmd_ratelim_speed(struct bfad_s *bfad, unsigned int cmd, void *pcmd)
+{
+ struct bfa_bsg_trl_speed_s *iocmd = (struct bfa_bsg_trl_speed_s *)pcmd;
+ struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(&bfad->bfa);
+ unsigned long flags;
+
+ spin_lock_irqsave(&bfad->bfad_lock, flags);
+
+ /* Auto and speeds greater than the supported speed, are invalid */
+ if ((iocmd->speed == BFA_PORT_SPEED_AUTO) ||
+ (iocmd->speed > fcport->speed_sup)) {
+ iocmd->status = BFA_STATUS_UNSUPP_SPEED;
+ spin_unlock_irqrestore(&bfad->bfad_lock, flags);
+ return 0;
+ }
+
+ if ((fcport->cfg.topology == BFA_PORT_TOPOLOGY_LOOP) &&
+ (fcport->topology == BFA_PORT_TOPOLOGY_LOOP))
+ iocmd->status = BFA_STATUS_TOPOLOGY_LOOP;
+ else {
+ fcport->cfg.trl_def_speed = iocmd->speed;
+ iocmd->status = BFA_STATUS_OK;
+ }
+ spin_unlock_irqrestore(&bfad->bfad_lock, flags);
+
+ return 0;
+}
+
+int
+bfad_iocmd_cfg_fcpim(struct bfad_s *bfad, void *cmd)
+{
+ struct bfa_bsg_fcpim_s *iocmd = (struct bfa_bsg_fcpim_s *)cmd;
+ unsigned long flags;
+
+ spin_lock_irqsave(&bfad->bfad_lock, flags);
+ bfa_fcpim_path_tov_set(&bfad->bfa, iocmd->param);
+ spin_unlock_irqrestore(&bfad->bfad_lock, flags);
+ iocmd->status = BFA_STATUS_OK;
+ return 0;
+}
+
+int
+bfad_iocmd_fcpim_get_modstats(struct bfad_s *bfad, void *cmd)
+{
+ struct bfa_bsg_fcpim_modstats_s *iocmd =
+ (struct bfa_bsg_fcpim_modstats_s *)cmd;
+ struct bfa_fcpim_s *fcpim = BFA_FCPIM(&bfad->bfa);
+ struct list_head *qe, *qen;
+ struct bfa_itnim_s *itnim;
+ unsigned long flags;
+
+ spin_lock_irqsave(&bfad->bfad_lock, flags);
+ /* accumulate IO stats from itnim */
+ memset((void *)&iocmd->modstats, 0, sizeof(struct bfa_itnim_iostats_s));
+ list_for_each_safe(qe, qen, &fcpim->itnim_q) {
+ itnim = (struct bfa_itnim_s *) qe;
+ bfa_fcpim_add_stats(&iocmd->modstats, &(itnim->stats));
+ }
+ spin_unlock_irqrestore(&bfad->bfad_lock, flags);
+ iocmd->status = BFA_STATUS_OK;
+ return 0;
+}
+
+int
+bfad_iocmd_fcpim_clr_modstats(struct bfad_s *bfad, void *cmd)
+{
+ struct bfa_bsg_fcpim_modstatsclr_s *iocmd =
+ (struct bfa_bsg_fcpim_modstatsclr_s *)cmd;
+ struct bfa_fcpim_s *fcpim = BFA_FCPIM(&bfad->bfa);
+ struct list_head *qe, *qen;
+ struct bfa_itnim_s *itnim;
+ unsigned long flags;
+
+ spin_lock_irqsave(&bfad->bfad_lock, flags);
+ list_for_each_safe(qe, qen, &fcpim->itnim_q) {
+ itnim = (struct bfa_itnim_s *) qe;
+ bfa_itnim_clear_stats(itnim);
+ }
+ memset(&fcpim->del_itn_stats, 0,
+ sizeof(struct bfa_fcpim_del_itn_stats_s));
+ spin_unlock_irqrestore(&bfad->bfad_lock, flags);
+ iocmd->status = BFA_STATUS_OK;
+ return 0;
+}
+
+int
+bfad_iocmd_fcpim_get_del_itn_stats(struct bfad_s *bfad, void *cmd)
+{
+ struct bfa_bsg_fcpim_del_itn_stats_s *iocmd =
+ (struct bfa_bsg_fcpim_del_itn_stats_s *)cmd;
+ struct bfa_fcpim_s *fcpim = BFA_FCPIM(&bfad->bfa);
+ unsigned long flags;
+
+ spin_lock_irqsave(&bfad->bfad_lock, flags);
+ memcpy((void *)&iocmd->modstats, (void *)&fcpim->del_itn_stats,
+ sizeof(struct bfa_fcpim_del_itn_stats_s));
+ spin_unlock_irqrestore(&bfad->bfad_lock, flags);
+
+ iocmd->status = BFA_STATUS_OK;
+ return 0;
+}
+
+static int
+bfad_iocmd_itnim_get_attr(struct bfad_s *bfad, void *cmd)
+{
+ struct bfa_bsg_itnim_attr_s *iocmd = (struct bfa_bsg_itnim_attr_s *)cmd;
+ struct bfa_fcs_lport_s *fcs_port;
+ unsigned long flags;
+
+ spin_lock_irqsave(&bfad->bfad_lock, flags);
+ fcs_port = bfa_fcs_lookup_port(&bfad->bfa_fcs,
+ iocmd->vf_id, iocmd->lpwwn);
+ if (!fcs_port)
+ iocmd->status = BFA_STATUS_UNKNOWN_LWWN;
+ else
+ iocmd->status = bfa_fcs_itnim_attr_get(fcs_port,
+ iocmd->rpwwn, &iocmd->attr);
+ spin_unlock_irqrestore(&bfad->bfad_lock, flags);
+ return 0;
+}
+
+static int
+bfad_iocmd_itnim_get_iostats(struct bfad_s *bfad, void *cmd)
+{
+ struct bfa_bsg_itnim_iostats_s *iocmd =
+ (struct bfa_bsg_itnim_iostats_s *)cmd;
+ struct bfa_fcs_lport_s *fcs_port;
+ struct bfa_fcs_itnim_s *itnim;
+ unsigned long flags;
+
+ spin_lock_irqsave(&bfad->bfad_lock, flags);
+ fcs_port = bfa_fcs_lookup_port(&bfad->bfa_fcs,
+ iocmd->vf_id, iocmd->lpwwn);
+ if (!fcs_port) {
+ iocmd->status = BFA_STATUS_UNKNOWN_LWWN;
+ bfa_trc(bfad, 0);
+ } else {
+ itnim = bfa_fcs_itnim_lookup(fcs_port, iocmd->rpwwn);
+ if (itnim == NULL)
+ iocmd->status = BFA_STATUS_UNKNOWN_RWWN;
+ else {
+ iocmd->status = BFA_STATUS_OK;
+ if (bfa_fcs_itnim_get_halitn(itnim))
+ memcpy((void *)&iocmd->iostats, (void *)
+ &(bfa_fcs_itnim_get_halitn(itnim)->stats),
+ sizeof(struct bfa_itnim_iostats_s));
+ }
+ }
+ spin_unlock_irqrestore(&bfad->bfad_lock, flags);
+ return 0;
+}
+
+static int
+bfad_iocmd_itnim_reset_stats(struct bfad_s *bfad, void *cmd)
+{
+ struct bfa_bsg_rport_reset_stats_s *iocmd =
+ (struct bfa_bsg_rport_reset_stats_s *)cmd;
+ struct bfa_fcs_lport_s *fcs_port;
+ struct bfa_fcs_itnim_s *itnim;
+ unsigned long flags;
+
+ spin_lock_irqsave(&bfad->bfad_lock, flags);
+ fcs_port = bfa_fcs_lookup_port(&bfad->bfa_fcs,
+ iocmd->vf_id, iocmd->pwwn);
+ if (!fcs_port)
+ iocmd->status = BFA_STATUS_UNKNOWN_LWWN;
+ else {
+ itnim = bfa_fcs_itnim_lookup(fcs_port, iocmd->rpwwn);
+ if (itnim == NULL)
+ iocmd->status = BFA_STATUS_UNKNOWN_RWWN;
+ else {
+ iocmd->status = BFA_STATUS_OK;
+ bfa_fcs_itnim_stats_clear(fcs_port, iocmd->rpwwn);
+ bfa_itnim_clear_stats(bfa_fcs_itnim_get_halitn(itnim));
+ }
+ }
+ spin_unlock_irqrestore(&bfad->bfad_lock, flags);
+
+ return 0;
+}
+
+static int
+bfad_iocmd_itnim_get_itnstats(struct bfad_s *bfad, void *cmd)
+{
+ struct bfa_bsg_itnim_itnstats_s *iocmd =
+ (struct bfa_bsg_itnim_itnstats_s *)cmd;
+ struct bfa_fcs_lport_s *fcs_port;
+ struct bfa_fcs_itnim_s *itnim;
+ unsigned long flags;
+
+ spin_lock_irqsave(&bfad->bfad_lock, flags);
+ fcs_port = bfa_fcs_lookup_port(&bfad->bfa_fcs,
+ iocmd->vf_id, iocmd->lpwwn);
+ if (!fcs_port) {
+ iocmd->status = BFA_STATUS_UNKNOWN_LWWN;
+ bfa_trc(bfad, 0);
+ } else {
+ itnim = bfa_fcs_itnim_lookup(fcs_port, iocmd->rpwwn);
+ if (itnim == NULL)
+ iocmd->status = BFA_STATUS_UNKNOWN_RWWN;
+ else {
+ iocmd->status = BFA_STATUS_OK;
+ bfa_fcs_itnim_stats_get(fcs_port, iocmd->rpwwn,
+ &iocmd->itnstats);
+ }
+ }
+ spin_unlock_irqrestore(&bfad->bfad_lock, flags);
+ return 0;
+}
+
+int
+bfad_iocmd_fcport_enable(struct bfad_s *bfad, void *cmd)
+{
+ struct bfa_bsg_gen_s *iocmd = (struct bfa_bsg_gen_s *)cmd;
+ unsigned long flags;
+
+ spin_lock_irqsave(&bfad->bfad_lock, flags);
+ iocmd->status = bfa_fcport_enable(&bfad->bfa);
+ spin_unlock_irqrestore(&bfad->bfad_lock, flags);
+
+ return 0;
+}
+
+int
+bfad_iocmd_fcport_disable(struct bfad_s *bfad, void *cmd)
+{
+ struct bfa_bsg_gen_s *iocmd = (struct bfa_bsg_gen_s *)cmd;
+ unsigned long flags;
+
+ spin_lock_irqsave(&bfad->bfad_lock, flags);
+ iocmd->status = bfa_fcport_disable(&bfad->bfa);
+ spin_unlock_irqrestore(&bfad->bfad_lock, flags);
+
+ return 0;
+}
+
+int
+bfad_iocmd_ioc_get_pcifn_cfg(struct bfad_s *bfad, void *cmd)
+{
+ struct bfa_bsg_pcifn_cfg_s *iocmd = (struct bfa_bsg_pcifn_cfg_s *)cmd;
+ struct bfad_hal_comp fcomp;
+ unsigned long flags;
+
+ init_completion(&fcomp.comp);
+ spin_lock_irqsave(&bfad->bfad_lock, flags);
+ iocmd->status = bfa_ablk_query(&bfad->bfa.modules.ablk,
+ &iocmd->pcifn_cfg,
+ bfad_hcb_comp, &fcomp);
+ spin_unlock_irqrestore(&bfad->bfad_lock, flags);
+ if (iocmd->status != BFA_STATUS_OK)
+ goto out;
+
+ wait_for_completion(&fcomp.comp);
+ iocmd->status = fcomp.status;
+out:
+ return 0;
+}
+
+int
+bfad_iocmd_pcifn_create(struct bfad_s *bfad, void *cmd)
+{
+ struct bfa_bsg_pcifn_s *iocmd = (struct bfa_bsg_pcifn_s *)cmd;
+ struct bfad_hal_comp fcomp;
+ unsigned long flags;
+
+ init_completion(&fcomp.comp);
+ spin_lock_irqsave(&bfad->bfad_lock, flags);
+ iocmd->status = bfa_ablk_pf_create(&bfad->bfa.modules.ablk,
+ &iocmd->pcifn_id, iocmd->port,
+ iocmd->pcifn_class, iocmd->bw_min,
+ iocmd->bw_max, bfad_hcb_comp, &fcomp);
+ spin_unlock_irqrestore(&bfad->bfad_lock, flags);
+ if (iocmd->status != BFA_STATUS_OK)
+ goto out;
+
+ wait_for_completion(&fcomp.comp);
+ iocmd->status = fcomp.status;
+out:
+ return 0;
+}
+
+int
+bfad_iocmd_pcifn_delete(struct bfad_s *bfad, void *cmd)
+{
+ struct bfa_bsg_pcifn_s *iocmd = (struct bfa_bsg_pcifn_s *)cmd;
+ struct bfad_hal_comp fcomp;
+ unsigned long flags;
+
+ init_completion(&fcomp.comp);
+ spin_lock_irqsave(&bfad->bfad_lock, flags);
+ iocmd->status = bfa_ablk_pf_delete(&bfad->bfa.modules.ablk,
+ iocmd->pcifn_id,
+ bfad_hcb_comp, &fcomp);
+ spin_unlock_irqrestore(&bfad->bfad_lock, flags);
+ if (iocmd->status != BFA_STATUS_OK)
+ goto out;
+
+ wait_for_completion(&fcomp.comp);
+ iocmd->status = fcomp.status;
+out:
+ return 0;
+}
+
+int
+bfad_iocmd_pcifn_bw(struct bfad_s *bfad, void *cmd)
+{
+ struct bfa_bsg_pcifn_s *iocmd = (struct bfa_bsg_pcifn_s *)cmd;
+ struct bfad_hal_comp fcomp;
+ unsigned long flags;
+
+ init_completion(&fcomp.comp);
+ spin_lock_irqsave(&bfad->bfad_lock, flags);
+ iocmd->status = bfa_ablk_pf_update(&bfad->bfa.modules.ablk,
+ iocmd->pcifn_id, iocmd->bw_min,
+ iocmd->bw_max, bfad_hcb_comp, &fcomp);
+ spin_unlock_irqrestore(&bfad->bfad_lock, flags);
+ bfa_trc(bfad, iocmd->status);
+ if (iocmd->status != BFA_STATUS_OK)
+ goto out;
+
+ wait_for_completion(&fcomp.comp);
+ iocmd->status = fcomp.status;
+ bfa_trc(bfad, iocmd->status);
+out:
+ return 0;
+}
+
+int
+bfad_iocmd_adapter_cfg_mode(struct bfad_s *bfad, void *cmd)
+{
+ struct bfa_bsg_adapter_cfg_mode_s *iocmd =
+ (struct bfa_bsg_adapter_cfg_mode_s *)cmd;
+ struct bfad_hal_comp fcomp;
+ unsigned long flags = 0;
+
+ init_completion(&fcomp.comp);
+ spin_lock_irqsave(&bfad->bfad_lock, flags);
+ iocmd->status = bfa_ablk_adapter_config(&bfad->bfa.modules.ablk,
+ iocmd->cfg.mode, iocmd->cfg.max_pf,
+ iocmd->cfg.max_vf, bfad_hcb_comp, &fcomp);
+ spin_unlock_irqrestore(&bfad->bfad_lock, flags);
+ if (iocmd->status != BFA_STATUS_OK)
+ goto out;
+
+ wait_for_completion(&fcomp.comp);
+ iocmd->status = fcomp.status;
+out:
+ return 0;
+}
+
+int
+bfad_iocmd_port_cfg_mode(struct bfad_s *bfad, void *cmd)
+{
+ struct bfa_bsg_port_cfg_mode_s *iocmd =
+ (struct bfa_bsg_port_cfg_mode_s *)cmd;
+ struct bfad_hal_comp fcomp;
+ unsigned long flags = 0;
+
+ init_completion(&fcomp.comp);
+ spin_lock_irqsave(&bfad->bfad_lock, flags);
+ iocmd->status = bfa_ablk_port_config(&bfad->bfa.modules.ablk,
+ iocmd->instance, iocmd->cfg.mode,
+ iocmd->cfg.max_pf, iocmd->cfg.max_vf,
+ bfad_hcb_comp, &fcomp);
+ spin_unlock_irqrestore(&bfad->bfad_lock, flags);
+ if (iocmd->status != BFA_STATUS_OK)
+ goto out;
+
+ wait_for_completion(&fcomp.comp);
+ iocmd->status = fcomp.status;
+out:
+ return 0;
+}
+
+int
+bfad_iocmd_ablk_optrom(struct bfad_s *bfad, unsigned int cmd, void *pcmd)
+{
+ struct bfa_bsg_gen_s *iocmd = (struct bfa_bsg_gen_s *)pcmd;
+ struct bfad_hal_comp fcomp;
+ unsigned long flags;
+
+ init_completion(&fcomp.comp);
+ spin_lock_irqsave(&bfad->bfad_lock, flags);
+ if (cmd == IOCMD_FLASH_ENABLE_OPTROM)
+ iocmd->status = bfa_ablk_optrom_en(&bfad->bfa.modules.ablk,
+ bfad_hcb_comp, &fcomp);
+ else
+ iocmd->status = bfa_ablk_optrom_dis(&bfad->bfa.modules.ablk,
+ bfad_hcb_comp, &fcomp);
+ spin_unlock_irqrestore(&bfad->bfad_lock, flags);
+
+ if (iocmd->status != BFA_STATUS_OK)
+ goto out;
+
+ wait_for_completion(&fcomp.comp);
+ iocmd->status = fcomp.status;
+out:
+ return 0;
+}
+
+int
+bfad_iocmd_faa_query(struct bfad_s *bfad, void *cmd)
+{
+ struct bfa_bsg_faa_attr_s *iocmd = (struct bfa_bsg_faa_attr_s *)cmd;
+ struct bfad_hal_comp fcomp;
+ unsigned long flags;
+
+ init_completion(&fcomp.comp);
+ iocmd->status = BFA_STATUS_OK;
+ spin_lock_irqsave(&bfad->bfad_lock, flags);
+ iocmd->status = bfa_faa_query(&bfad->bfa, &iocmd->faa_attr,
+ bfad_hcb_comp, &fcomp);
+ spin_unlock_irqrestore(&bfad->bfad_lock, flags);
+
+ if (iocmd->status != BFA_STATUS_OK)
+ goto out;
+
+ wait_for_completion(&fcomp.comp);
+ iocmd->status = fcomp.status;
+out:
+ return 0;
+}
+
+int
+bfad_iocmd_cee_attr(struct bfad_s *bfad, void *cmd, unsigned int payload_len)
+{
+ struct bfa_bsg_cee_attr_s *iocmd =
+ (struct bfa_bsg_cee_attr_s *)cmd;
+ void *iocmd_bufptr;
+ struct bfad_hal_comp cee_comp;
+ unsigned long flags;
+
+ if (bfad_chk_iocmd_sz(payload_len,
+ sizeof(struct bfa_bsg_cee_attr_s),
+ sizeof(struct bfa_cee_attr_s)) != BFA_STATUS_OK) {
+ iocmd->status = BFA_STATUS_VERSION_FAIL;
+ return 0;
+ }
+
+ iocmd_bufptr = (char *)iocmd + sizeof(struct bfa_bsg_cee_attr_s);
+
+ cee_comp.status = 0;
+ init_completion(&cee_comp.comp);
+ mutex_lock(&bfad_mutex);
+ spin_lock_irqsave(&bfad->bfad_lock, flags);
+ iocmd->status = bfa_cee_get_attr(&bfad->bfa.modules.cee, iocmd_bufptr,
+ bfad_hcb_comp, &cee_comp);
+ spin_unlock_irqrestore(&bfad->bfad_lock, flags);
+ if (iocmd->status != BFA_STATUS_OK) {
+ mutex_unlock(&bfad_mutex);
+ bfa_trc(bfad, 0x5555);
+ goto out;
+ }
+ wait_for_completion(&cee_comp.comp);
+ mutex_unlock(&bfad_mutex);
+out:
+ return 0;
+}
+
+int
+bfad_iocmd_cee_get_stats(struct bfad_s *bfad, void *cmd,
+ unsigned int payload_len)
+{
+ struct bfa_bsg_cee_stats_s *iocmd =
+ (struct bfa_bsg_cee_stats_s *)cmd;
+ void *iocmd_bufptr;
+ struct bfad_hal_comp cee_comp;
+ unsigned long flags;
+
+ if (bfad_chk_iocmd_sz(payload_len,
+ sizeof(struct bfa_bsg_cee_stats_s),
+ sizeof(struct bfa_cee_stats_s)) != BFA_STATUS_OK) {
+ iocmd->status = BFA_STATUS_VERSION_FAIL;
+ return 0;
+ }
+
+ iocmd_bufptr = (char *)iocmd + sizeof(struct bfa_bsg_cee_stats_s);
+
+ cee_comp.status = 0;
+ init_completion(&cee_comp.comp);
+ mutex_lock(&bfad_mutex);
+ spin_lock_irqsave(&bfad->bfad_lock, flags);
+ iocmd->status = bfa_cee_get_stats(&bfad->bfa.modules.cee, iocmd_bufptr,
+ bfad_hcb_comp, &cee_comp);
+ spin_unlock_irqrestore(&bfad->bfad_lock, flags);
+ if (iocmd->status != BFA_STATUS_OK) {
+ mutex_unlock(&bfad_mutex);
+ bfa_trc(bfad, 0x5555);
+ goto out;
+ }
+ wait_for_completion(&cee_comp.comp);
+ mutex_unlock(&bfad_mutex);
+out:
+ return 0;
+}
+
+int
+bfad_iocmd_cee_reset_stats(struct bfad_s *bfad, void *cmd)
+{
+ struct bfa_bsg_gen_s *iocmd = (struct bfa_bsg_gen_s *)cmd;
+ unsigned long flags;
+
+ spin_lock_irqsave(&bfad->bfad_lock, flags);
+ iocmd->status = bfa_cee_reset_stats(&bfad->bfa.modules.cee, NULL, NULL);
+ spin_unlock_irqrestore(&bfad->bfad_lock, flags);
+ if (iocmd->status != BFA_STATUS_OK)
+ bfa_trc(bfad, 0x5555);
+ return 0;
+}
+
+int
+bfad_iocmd_sfp_media(struct bfad_s *bfad, void *cmd)
+{
+ struct bfa_bsg_sfp_media_s *iocmd = (struct bfa_bsg_sfp_media_s *)cmd;
+ struct bfad_hal_comp fcomp;
+ unsigned long flags;
+
+ init_completion(&fcomp.comp);
+ spin_lock_irqsave(&bfad->bfad_lock, flags);
+ iocmd->status = bfa_sfp_media(BFA_SFP_MOD(&bfad->bfa), &iocmd->media,
+ bfad_hcb_comp, &fcomp);
+ spin_unlock_irqrestore(&bfad->bfad_lock, flags);
+ bfa_trc(bfad, iocmd->status);
+ if (iocmd->status != BFA_STATUS_SFP_NOT_READY)
+ goto out;
+
+ wait_for_completion(&fcomp.comp);
+ iocmd->status = fcomp.status;
+out:
+ return 0;
+}
+
+int
+bfad_iocmd_sfp_speed(struct bfad_s *bfad, void *cmd)
+{
+ struct bfa_bsg_sfp_speed_s *iocmd = (struct bfa_bsg_sfp_speed_s *)cmd;
+ struct bfad_hal_comp fcomp;
+ unsigned long flags;
+
+ init_completion(&fcomp.comp);
+ spin_lock_irqsave(&bfad->bfad_lock, flags);
+ iocmd->status = bfa_sfp_speed(BFA_SFP_MOD(&bfad->bfa), iocmd->speed,
+ bfad_hcb_comp, &fcomp);
+ spin_unlock_irqrestore(&bfad->bfad_lock, flags);
+ bfa_trc(bfad, iocmd->status);
+ if (iocmd->status != BFA_STATUS_SFP_NOT_READY)
+ goto out;
+ wait_for_completion(&fcomp.comp);
+ iocmd->status = fcomp.status;
+out:
+ return 0;
+}
+
+int
+bfad_iocmd_flash_get_attr(struct bfad_s *bfad, void *cmd)
+{
+ struct bfa_bsg_flash_attr_s *iocmd =
+ (struct bfa_bsg_flash_attr_s *)cmd;
+ struct bfad_hal_comp fcomp;
+ unsigned long flags;
+
+ init_completion(&fcomp.comp);
+ spin_lock_irqsave(&bfad->bfad_lock, flags);
+ iocmd->status = bfa_flash_get_attr(BFA_FLASH(&bfad->bfa), &iocmd->attr,
+ bfad_hcb_comp, &fcomp);
+ spin_unlock_irqrestore(&bfad->bfad_lock, flags);
+ if (iocmd->status != BFA_STATUS_OK)
+ goto out;
+ wait_for_completion(&fcomp.comp);
+ iocmd->status = fcomp.status;
+out:
+ return 0;
+}
+
+int
+bfad_iocmd_flash_erase_part(struct bfad_s *bfad, void *cmd)
+{
+ struct bfa_bsg_flash_s *iocmd = (struct bfa_bsg_flash_s *)cmd;
+ struct bfad_hal_comp fcomp;
+ unsigned long flags;
+
+ init_completion(&fcomp.comp);
+ spin_lock_irqsave(&bfad->bfad_lock, flags);
+ iocmd->status = bfa_flash_erase_part(BFA_FLASH(&bfad->bfa), iocmd->type,
+ iocmd->instance, bfad_hcb_comp, &fcomp);
+ spin_unlock_irqrestore(&bfad->bfad_lock, flags);
+ if (iocmd->status != BFA_STATUS_OK)
+ goto out;
+ wait_for_completion(&fcomp.comp);
+ iocmd->status = fcomp.status;
+out:
+ return 0;
+}
+
+int
+bfad_iocmd_flash_update_part(struct bfad_s *bfad, void *cmd,
+ unsigned int payload_len)
+{
+ struct bfa_bsg_flash_s *iocmd = (struct bfa_bsg_flash_s *)cmd;
+ void *iocmd_bufptr;
+ struct bfad_hal_comp fcomp;
+ unsigned long flags;
+
+ if (bfad_chk_iocmd_sz(payload_len,
+ sizeof(struct bfa_bsg_flash_s),
+ iocmd->bufsz) != BFA_STATUS_OK) {
+ iocmd->status = BFA_STATUS_VERSION_FAIL;
+ return 0;
+ }
+
+ iocmd_bufptr = (char *)iocmd + sizeof(struct bfa_bsg_flash_s);
+
+ init_completion(&fcomp.comp);
+ spin_lock_irqsave(&bfad->bfad_lock, flags);
+ iocmd->status = bfa_flash_update_part(BFA_FLASH(&bfad->bfa),
+ iocmd->type, iocmd->instance, iocmd_bufptr,
+ iocmd->bufsz, 0, bfad_hcb_comp, &fcomp);
+ spin_unlock_irqrestore(&bfad->bfad_lock, flags);
+ if (iocmd->status != BFA_STATUS_OK)
+ goto out;
+ wait_for_completion(&fcomp.comp);
+ iocmd->status = fcomp.status;
+out:
+ return 0;
+}
+
+int
+bfad_iocmd_flash_read_part(struct bfad_s *bfad, void *cmd,
+ unsigned int payload_len)
+{
+ struct bfa_bsg_flash_s *iocmd = (struct bfa_bsg_flash_s *)cmd;
+ struct bfad_hal_comp fcomp;
+ void *iocmd_bufptr;
+ unsigned long flags;
+
+ if (bfad_chk_iocmd_sz(payload_len,
+ sizeof(struct bfa_bsg_flash_s),
+ iocmd->bufsz) != BFA_STATUS_OK) {
+ iocmd->status = BFA_STATUS_VERSION_FAIL;
+ return 0;
+ }
+
+ iocmd_bufptr = (char *)iocmd + sizeof(struct bfa_bsg_flash_s);
+
+ init_completion(&fcomp.comp);
+ spin_lock_irqsave(&bfad->bfad_lock, flags);
+ iocmd->status = bfa_flash_read_part(BFA_FLASH(&bfad->bfa), iocmd->type,
+ iocmd->instance, iocmd_bufptr, iocmd->bufsz, 0,
+ bfad_hcb_comp, &fcomp);
+ spin_unlock_irqrestore(&bfad->bfad_lock, flags);
+ if (iocmd->status != BFA_STATUS_OK)
+ goto out;
+ wait_for_completion(&fcomp.comp);
+ iocmd->status = fcomp.status;
+out:
+ return 0;
+}
+
+int
+bfad_iocmd_diag_temp(struct bfad_s *bfad, void *cmd)
+{
+ struct bfa_bsg_diag_get_temp_s *iocmd =
+ (struct bfa_bsg_diag_get_temp_s *)cmd;
+ struct bfad_hal_comp fcomp;
+ unsigned long flags;
+
+ init_completion(&fcomp.comp);
+ spin_lock_irqsave(&bfad->bfad_lock, flags);
+ iocmd->status = bfa_diag_tsensor_query(BFA_DIAG_MOD(&bfad->bfa),
+ &iocmd->result, bfad_hcb_comp, &fcomp);
+ spin_unlock_irqrestore(&bfad->bfad_lock, flags);
+ bfa_trc(bfad, iocmd->status);
+ if (iocmd->status != BFA_STATUS_OK)
+ goto out;
+ wait_for_completion(&fcomp.comp);
+ iocmd->status = fcomp.status;
+out:
+ return 0;
+}
+
+int
+bfad_iocmd_diag_memtest(struct bfad_s *bfad, void *cmd)
+{
+ struct bfa_bsg_diag_memtest_s *iocmd =
+ (struct bfa_bsg_diag_memtest_s *)cmd;
+ struct bfad_hal_comp fcomp;
+ unsigned long flags;
+
+ init_completion(&fcomp.comp);
+ spin_lock_irqsave(&bfad->bfad_lock, flags);
+ iocmd->status = bfa_diag_memtest(BFA_DIAG_MOD(&bfad->bfa),
+ &iocmd->memtest, iocmd->pat,
+ &iocmd->result, bfad_hcb_comp, &fcomp);
+ spin_unlock_irqrestore(&bfad->bfad_lock, flags);
+ bfa_trc(bfad, iocmd->status);
+ if (iocmd->status != BFA_STATUS_OK)
+ goto out;
+ wait_for_completion(&fcomp.comp);
+ iocmd->status = fcomp.status;
+out:
+ return 0;
+}
+
+int
+bfad_iocmd_diag_loopback(struct bfad_s *bfad, void *cmd)
+{
+ struct bfa_bsg_diag_loopback_s *iocmd =
+ (struct bfa_bsg_diag_loopback_s *)cmd;
+ struct bfad_hal_comp fcomp;
+ unsigned long flags;
+
+ init_completion(&fcomp.comp);
+ spin_lock_irqsave(&bfad->bfad_lock, flags);
+ iocmd->status = bfa_fcdiag_loopback(&bfad->bfa, iocmd->opmode,
+ iocmd->speed, iocmd->lpcnt, iocmd->pat,
+ &iocmd->result, bfad_hcb_comp, &fcomp);
+ spin_unlock_irqrestore(&bfad->bfad_lock, flags);
+ bfa_trc(bfad, iocmd->status);
+ if (iocmd->status != BFA_STATUS_OK)
+ goto out;
+ wait_for_completion(&fcomp.comp);
+ iocmd->status = fcomp.status;
+out:
+ return 0;
+}
+
+int
+bfad_iocmd_diag_fwping(struct bfad_s *bfad, void *cmd)
+{
+ struct bfa_bsg_diag_fwping_s *iocmd =
+ (struct bfa_bsg_diag_fwping_s *)cmd;
+ struct bfad_hal_comp fcomp;
+ unsigned long flags;
+
+ init_completion(&fcomp.comp);
+ spin_lock_irqsave(&bfad->bfad_lock, flags);
+ iocmd->status = bfa_diag_fwping(BFA_DIAG_MOD(&bfad->bfa), iocmd->cnt,
+ iocmd->pattern, &iocmd->result,
+ bfad_hcb_comp, &fcomp);
+ spin_unlock_irqrestore(&bfad->bfad_lock, flags);
+ bfa_trc(bfad, iocmd->status);
+ if (iocmd->status != BFA_STATUS_OK)
+ goto out;
+ bfa_trc(bfad, 0x77771);
+ wait_for_completion(&fcomp.comp);
+ iocmd->status = fcomp.status;
+out:
+ return 0;
+}
+
+int
+bfad_iocmd_diag_queuetest(struct bfad_s *bfad, void *cmd)
+{
+ struct bfa_bsg_diag_qtest_s *iocmd = (struct bfa_bsg_diag_qtest_s *)cmd;
+ struct bfad_hal_comp fcomp;
+ unsigned long flags;
+
+ init_completion(&fcomp.comp);
+ spin_lock_irqsave(&bfad->bfad_lock, flags);
+ iocmd->status = bfa_fcdiag_queuetest(&bfad->bfa, iocmd->force,
+ iocmd->queue, &iocmd->result,
+ bfad_hcb_comp, &fcomp);
+ spin_unlock_irqrestore(&bfad->bfad_lock, flags);
+ if (iocmd->status != BFA_STATUS_OK)
+ goto out;
+ wait_for_completion(&fcomp.comp);
+ iocmd->status = fcomp.status;
+out:
+ return 0;
+}
+
+int
+bfad_iocmd_diag_sfp(struct bfad_s *bfad, void *cmd)
+{
+ struct bfa_bsg_sfp_show_s *iocmd =
+ (struct bfa_bsg_sfp_show_s *)cmd;
+ struct bfad_hal_comp fcomp;
+ unsigned long flags;
+
+ init_completion(&fcomp.comp);
+ spin_lock_irqsave(&bfad->bfad_lock, flags);
+ iocmd->status = bfa_sfp_show(BFA_SFP_MOD(&bfad->bfa), &iocmd->sfp,
+ bfad_hcb_comp, &fcomp);
+ spin_unlock_irqrestore(&bfad->bfad_lock, flags);
+ bfa_trc(bfad, iocmd->status);
+ if (iocmd->status != BFA_STATUS_OK)
+ goto out;
+ wait_for_completion(&fcomp.comp);
+ iocmd->status = fcomp.status;
+ bfa_trc(bfad, iocmd->status);
+out:
+ return 0;
+}
+
+int
+bfad_iocmd_diag_led(struct bfad_s *bfad, void *cmd)
+{
+ struct bfa_bsg_diag_led_s *iocmd = (struct bfa_bsg_diag_led_s *)cmd;
+ unsigned long flags;
+
+ spin_lock_irqsave(&bfad->bfad_lock, flags);
+ iocmd->status = bfa_diag_ledtest(BFA_DIAG_MOD(&bfad->bfa),
+ &iocmd->ledtest);
+ spin_unlock_irqrestore(&bfad->bfad_lock, flags);
+ return 0;
+}
+
+int
+bfad_iocmd_diag_beacon_lport(struct bfad_s *bfad, void *cmd)
+{
+ struct bfa_bsg_diag_beacon_s *iocmd =
+ (struct bfa_bsg_diag_beacon_s *)cmd;
+ unsigned long flags;
+
+ spin_lock_irqsave(&bfad->bfad_lock, flags);
+ iocmd->status = bfa_diag_beacon_port(BFA_DIAG_MOD(&bfad->bfa),
+ iocmd->beacon, iocmd->link_e2e_beacon,
+ iocmd->second);
+ spin_unlock_irqrestore(&bfad->bfad_lock, flags);
+ return 0;
+}
+
+int
+bfad_iocmd_diag_lb_stat(struct bfad_s *bfad, void *cmd)
+{
+ struct bfa_bsg_diag_lb_stat_s *iocmd =
+ (struct bfa_bsg_diag_lb_stat_s *)cmd;
+ unsigned long flags;
+
+ spin_lock_irqsave(&bfad->bfad_lock, flags);
+ iocmd->status = bfa_fcdiag_lb_is_running(&bfad->bfa);
+ spin_unlock_irqrestore(&bfad->bfad_lock, flags);
+ bfa_trc(bfad, iocmd->status);
+
+ return 0;
+}
+
+int
+bfad_iocmd_diag_dport_enable(struct bfad_s *bfad, void *pcmd)
+{
+ struct bfa_bsg_dport_enable_s *iocmd =
+ (struct bfa_bsg_dport_enable_s *)pcmd;
+ unsigned long flags;
+ struct bfad_hal_comp fcomp;
+
+ init_completion(&fcomp.comp);
+ spin_lock_irqsave(&bfad->bfad_lock, flags);
+ iocmd->status = bfa_dport_enable(&bfad->bfa, iocmd->lpcnt,
+ iocmd->pat, bfad_hcb_comp, &fcomp);
+ spin_unlock_irqrestore(&bfad->bfad_lock, flags);
+ if (iocmd->status != BFA_STATUS_OK)
+ bfa_trc(bfad, iocmd->status);
+ else {
+ wait_for_completion(&fcomp.comp);
+ iocmd->status = fcomp.status;
+ }
+ return 0;
+}
+
+int
+bfad_iocmd_diag_dport_disable(struct bfad_s *bfad, void *pcmd)
+{
+ struct bfa_bsg_gen_s *iocmd = (struct bfa_bsg_gen_s *)pcmd;
+ unsigned long flags;
+ struct bfad_hal_comp fcomp;
+
+ init_completion(&fcomp.comp);
+ spin_lock_irqsave(&bfad->bfad_lock, flags);
+ iocmd->status = bfa_dport_disable(&bfad->bfa, bfad_hcb_comp, &fcomp);
+ spin_unlock_irqrestore(&bfad->bfad_lock, flags);
+ if (iocmd->status != BFA_STATUS_OK)
+ bfa_trc(bfad, iocmd->status);
+ else {
+ wait_for_completion(&fcomp.comp);
+ iocmd->status = fcomp.status;
+ }
+ return 0;
+}
+
+int
+bfad_iocmd_diag_dport_start(struct bfad_s *bfad, void *pcmd)
+{
+ struct bfa_bsg_dport_enable_s *iocmd =
+ (struct bfa_bsg_dport_enable_s *)pcmd;
+ unsigned long flags;
+ struct bfad_hal_comp fcomp;
+
+ init_completion(&fcomp.comp);
+ spin_lock_irqsave(&bfad->bfad_lock, flags);
+ iocmd->status = bfa_dport_start(&bfad->bfa, iocmd->lpcnt,
+ iocmd->pat, bfad_hcb_comp,
+ &fcomp);
+ spin_unlock_irqrestore(&bfad->bfad_lock, flags);
+
+ if (iocmd->status != BFA_STATUS_OK) {
+ bfa_trc(bfad, iocmd->status);
+ } else {
+ wait_for_completion(&fcomp.comp);
+ iocmd->status = fcomp.status;
+ }
+
+ return 0;
+}
+
+int
+bfad_iocmd_diag_dport_show(struct bfad_s *bfad, void *pcmd)
+{
+ struct bfa_bsg_diag_dport_show_s *iocmd =
+ (struct bfa_bsg_diag_dport_show_s *)pcmd;
+ unsigned long flags;
+
+ spin_lock_irqsave(&bfad->bfad_lock, flags);
+ iocmd->status = bfa_dport_show(&bfad->bfa, &iocmd->result);
+ spin_unlock_irqrestore(&bfad->bfad_lock, flags);
+
+ return 0;
+}
+
+
+int
+bfad_iocmd_phy_get_attr(struct bfad_s *bfad, void *cmd)
+{
+ struct bfa_bsg_phy_attr_s *iocmd =
+ (struct bfa_bsg_phy_attr_s *)cmd;
+ struct bfad_hal_comp fcomp;
+ unsigned long flags;
+
+ init_completion(&fcomp.comp);
+ spin_lock_irqsave(&bfad->bfad_lock, flags);
+ iocmd->status = bfa_phy_get_attr(BFA_PHY(&bfad->bfa), iocmd->instance,
+ &iocmd->attr, bfad_hcb_comp, &fcomp);
+ spin_unlock_irqrestore(&bfad->bfad_lock, flags);
+ if (iocmd->status != BFA_STATUS_OK)
+ goto out;
+ wait_for_completion(&fcomp.comp);
+ iocmd->status = fcomp.status;
+out:
+ return 0;
+}
+
+int
+bfad_iocmd_phy_get_stats(struct bfad_s *bfad, void *cmd)
+{
+ struct bfa_bsg_phy_stats_s *iocmd =
+ (struct bfa_bsg_phy_stats_s *)cmd;
+ struct bfad_hal_comp fcomp;
+ unsigned long flags;
+
+ init_completion(&fcomp.comp);
+ spin_lock_irqsave(&bfad->bfad_lock, flags);
+ iocmd->status = bfa_phy_get_stats(BFA_PHY(&bfad->bfa), iocmd->instance,
+ &iocmd->stats, bfad_hcb_comp, &fcomp);
+ spin_unlock_irqrestore(&bfad->bfad_lock, flags);
+ if (iocmd->status != BFA_STATUS_OK)
+ goto out;
+ wait_for_completion(&fcomp.comp);
+ iocmd->status = fcomp.status;
+out:
+ return 0;
+}
+
+int
+bfad_iocmd_phy_read(struct bfad_s *bfad, void *cmd, unsigned int payload_len)
+{
+ struct bfa_bsg_phy_s *iocmd = (struct bfa_bsg_phy_s *)cmd;
+ struct bfad_hal_comp fcomp;
+ void *iocmd_bufptr;
+ unsigned long flags;
+
+ if (bfad_chk_iocmd_sz(payload_len,
+ sizeof(struct bfa_bsg_phy_s),
+ iocmd->bufsz) != BFA_STATUS_OK) {
+ iocmd->status = BFA_STATUS_VERSION_FAIL;
+ return 0;
+ }
+
+ iocmd_bufptr = (char *)iocmd + sizeof(struct bfa_bsg_phy_s);
+ init_completion(&fcomp.comp);
+ spin_lock_irqsave(&bfad->bfad_lock, flags);
+ iocmd->status = bfa_phy_read(BFA_PHY(&bfad->bfa),
+ iocmd->instance, iocmd_bufptr, iocmd->bufsz,
+ 0, bfad_hcb_comp, &fcomp);
+ spin_unlock_irqrestore(&bfad->bfad_lock, flags);
+ if (iocmd->status != BFA_STATUS_OK)
+ goto out;
+ wait_for_completion(&fcomp.comp);
+ iocmd->status = fcomp.status;
+ if (iocmd->status != BFA_STATUS_OK)
+ goto out;
+out:
+ return 0;
+}
+
+int
+bfad_iocmd_vhba_query(struct bfad_s *bfad, void *cmd)
+{
+ struct bfa_bsg_vhba_attr_s *iocmd =
+ (struct bfa_bsg_vhba_attr_s *)cmd;
+ struct bfa_vhba_attr_s *attr = &iocmd->attr;
+ unsigned long flags;
+
+ spin_lock_irqsave(&bfad->bfad_lock, flags);
+ attr->pwwn = bfad->bfa.ioc.attr->pwwn;
+ attr->nwwn = bfad->bfa.ioc.attr->nwwn;
+ attr->plog_enabled = (bfa_boolean_t)bfad->bfa.plog->plog_enabled;
+ attr->io_profile = bfa_fcpim_get_io_profile(&bfad->bfa);
+ attr->path_tov = bfa_fcpim_path_tov_get(&bfad->bfa);
+ iocmd->status = BFA_STATUS_OK;
+ spin_unlock_irqrestore(&bfad->bfad_lock, flags);
+ return 0;
+}
+
+int
+bfad_iocmd_phy_update(struct bfad_s *bfad, void *cmd, unsigned int payload_len)
+{
+ struct bfa_bsg_phy_s *iocmd = (struct bfa_bsg_phy_s *)cmd;
+ void *iocmd_bufptr;
+ struct bfad_hal_comp fcomp;
+ unsigned long flags;
+
+ if (bfad_chk_iocmd_sz(payload_len,
+ sizeof(struct bfa_bsg_phy_s),
+ iocmd->bufsz) != BFA_STATUS_OK) {
+ iocmd->status = BFA_STATUS_VERSION_FAIL;
+ return 0;
+ }
+
+ iocmd_bufptr = (char *)iocmd + sizeof(struct bfa_bsg_phy_s);
+ init_completion(&fcomp.comp);
+ spin_lock_irqsave(&bfad->bfad_lock, flags);
+ iocmd->status = bfa_phy_update(BFA_PHY(&bfad->bfa),
+ iocmd->instance, iocmd_bufptr, iocmd->bufsz,
+ 0, bfad_hcb_comp, &fcomp);
+ spin_unlock_irqrestore(&bfad->bfad_lock, flags);
+ if (iocmd->status != BFA_STATUS_OK)
+ goto out;
+ wait_for_completion(&fcomp.comp);
+ iocmd->status = fcomp.status;
+out:
+ return 0;
+}
+
+int
+bfad_iocmd_porglog_get(struct bfad_s *bfad, void *cmd)
+{
+ struct bfa_bsg_debug_s *iocmd = (struct bfa_bsg_debug_s *)cmd;
+ void *iocmd_bufptr;
+
+ if (iocmd->bufsz < sizeof(struct bfa_plog_s)) {
+ bfa_trc(bfad, sizeof(struct bfa_plog_s));
+ iocmd->status = BFA_STATUS_EINVAL;
+ goto out;
+ }
+
+ iocmd->status = BFA_STATUS_OK;
+ iocmd_bufptr = (char *)iocmd + sizeof(struct bfa_bsg_debug_s);
+ memcpy(iocmd_bufptr, (u8 *) &bfad->plog_buf, sizeof(struct bfa_plog_s));
+out:
+ return 0;
+}
+
+#define BFA_DEBUG_FW_CORE_CHUNK_SZ 0x4000U /* 16K chunks for FW dump */
+int
+bfad_iocmd_debug_fw_core(struct bfad_s *bfad, void *cmd,
+ unsigned int payload_len)
+{
+ struct bfa_bsg_debug_s *iocmd = (struct bfa_bsg_debug_s *)cmd;
+ void *iocmd_bufptr;
+ unsigned long flags;
+ u32 offset;
+
+ if (bfad_chk_iocmd_sz(payload_len, sizeof(struct bfa_bsg_debug_s),
+ BFA_DEBUG_FW_CORE_CHUNK_SZ) != BFA_STATUS_OK) {
+ iocmd->status = BFA_STATUS_VERSION_FAIL;
+ return 0;
+ }
+
+ if (iocmd->bufsz < BFA_DEBUG_FW_CORE_CHUNK_SZ ||
+ !IS_ALIGNED(iocmd->bufsz, sizeof(u16)) ||
+ !IS_ALIGNED(iocmd->offset, sizeof(u32))) {
+ bfa_trc(bfad, BFA_DEBUG_FW_CORE_CHUNK_SZ);
+ iocmd->status = BFA_STATUS_EINVAL;
+ goto out;
+ }
+
+ iocmd_bufptr = (char *)iocmd + sizeof(struct bfa_bsg_debug_s);
+ spin_lock_irqsave(&bfad->bfad_lock, flags);
+ offset = iocmd->offset;
+ iocmd->status = bfa_ioc_debug_fwcore(&bfad->bfa.ioc, iocmd_bufptr,
+ &offset, &iocmd->bufsz);
+ iocmd->offset = offset;
+ spin_unlock_irqrestore(&bfad->bfad_lock, flags);
+out:
+ return 0;
+}
+
+int
+bfad_iocmd_debug_ctl(struct bfad_s *bfad, void *cmd, unsigned int v_cmd)
+{
+ struct bfa_bsg_gen_s *iocmd = (struct bfa_bsg_gen_s *)cmd;
+ unsigned long flags;
+
+ if (v_cmd == IOCMD_DEBUG_FW_STATE_CLR) {
+ spin_lock_irqsave(&bfad->bfad_lock, flags);
+ bfad->bfa.ioc.dbg_fwsave_once = BFA_TRUE;
+ spin_unlock_irqrestore(&bfad->bfad_lock, flags);
+ } else if (v_cmd == IOCMD_DEBUG_PORTLOG_CLR)
+ bfad->plog_buf.head = bfad->plog_buf.tail = 0;
+ else if (v_cmd == IOCMD_DEBUG_START_DTRC)
+ bfa_trc_init(bfad->trcmod);
+ else if (v_cmd == IOCMD_DEBUG_STOP_DTRC)
+ bfa_trc_stop(bfad->trcmod);
+
+ iocmd->status = BFA_STATUS_OK;
+ return 0;
+}
+
+int
+bfad_iocmd_porglog_ctl(struct bfad_s *bfad, void *cmd)
+{
+ struct bfa_bsg_portlogctl_s *iocmd = (struct bfa_bsg_portlogctl_s *)cmd;
+
+ if (iocmd->ctl == BFA_TRUE)
+ bfad->plog_buf.plog_enabled = 1;
+ else
+ bfad->plog_buf.plog_enabled = 0;
+
+ iocmd->status = BFA_STATUS_OK;
+ return 0;
+}
+
+int
+bfad_iocmd_fcpim_cfg_profile(struct bfad_s *bfad, void *cmd, unsigned int v_cmd)
+{
+ struct bfa_bsg_fcpim_profile_s *iocmd =
+ (struct bfa_bsg_fcpim_profile_s *)cmd;
+ struct timeval tv;
+ unsigned long flags;
+
+ do_gettimeofday(&tv);
+ spin_lock_irqsave(&bfad->bfad_lock, flags);
+ if (v_cmd == IOCMD_FCPIM_PROFILE_ON)
+ iocmd->status = bfa_fcpim_profile_on(&bfad->bfa, tv.tv_sec);
+ else if (v_cmd == IOCMD_FCPIM_PROFILE_OFF)
+ iocmd->status = bfa_fcpim_profile_off(&bfad->bfa);
+ spin_unlock_irqrestore(&bfad->bfad_lock, flags);
+
+ return 0;
+}
+
+static int
+bfad_iocmd_itnim_get_ioprofile(struct bfad_s *bfad, void *cmd)
+{
+ struct bfa_bsg_itnim_ioprofile_s *iocmd =
+ (struct bfa_bsg_itnim_ioprofile_s *)cmd;
+ struct bfa_fcs_lport_s *fcs_port;
+ struct bfa_fcs_itnim_s *itnim;
+ unsigned long flags;
+
+ spin_lock_irqsave(&bfad->bfad_lock, flags);
+ fcs_port = bfa_fcs_lookup_port(&bfad->bfa_fcs,
+ iocmd->vf_id, iocmd->lpwwn);
+ if (!fcs_port)
+ iocmd->status = BFA_STATUS_UNKNOWN_LWWN;
+ else {
+ itnim = bfa_fcs_itnim_lookup(fcs_port, iocmd->rpwwn);
+ if (itnim == NULL)
+ iocmd->status = BFA_STATUS_UNKNOWN_RWWN;
+ else
+ iocmd->status = bfa_itnim_get_ioprofile(
+ bfa_fcs_itnim_get_halitn(itnim),
+ &iocmd->ioprofile);
+ }
+ spin_unlock_irqrestore(&bfad->bfad_lock, flags);
+ return 0;
+}
+
+int
+bfad_iocmd_fcport_get_stats(struct bfad_s *bfad, void *cmd)
+{
+ struct bfa_bsg_fcport_stats_s *iocmd =
+ (struct bfa_bsg_fcport_stats_s *)cmd;
+ struct bfad_hal_comp fcomp;
+ unsigned long flags;
+ struct bfa_cb_pending_q_s cb_qe;
+
+ init_completion(&fcomp.comp);
+ bfa_pending_q_init(&cb_qe, (bfa_cb_cbfn_t)bfad_hcb_comp,
+ &fcomp, &iocmd->stats);
+ spin_lock_irqsave(&bfad->bfad_lock, flags);
+ iocmd->status = bfa_fcport_get_stats(&bfad->bfa, &cb_qe);
+ spin_unlock_irqrestore(&bfad->bfad_lock, flags);
+ if (iocmd->status != BFA_STATUS_OK) {
+ bfa_trc(bfad, iocmd->status);
+ goto out;
+ }
+ wait_for_completion(&fcomp.comp);
+ iocmd->status = fcomp.status;
+out:
+ return 0;
+}
+
+int
+bfad_iocmd_fcport_reset_stats(struct bfad_s *bfad, void *cmd)
+{
+ struct bfa_bsg_gen_s *iocmd = (struct bfa_bsg_gen_s *)cmd;
+ struct bfad_hal_comp fcomp;
+ unsigned long flags;
+ struct bfa_cb_pending_q_s cb_qe;
+
+ init_completion(&fcomp.comp);
+ bfa_pending_q_init(&cb_qe, (bfa_cb_cbfn_t)bfad_hcb_comp, &fcomp, NULL);
+
+ spin_lock_irqsave(&bfad->bfad_lock, flags);
+ iocmd->status = bfa_fcport_clear_stats(&bfad->bfa, &cb_qe);
+ spin_unlock_irqrestore(&bfad->bfad_lock, flags);
+ if (iocmd->status != BFA_STATUS_OK) {
+ bfa_trc(bfad, iocmd->status);
+ goto out;
+ }
+ wait_for_completion(&fcomp.comp);
+ iocmd->status = fcomp.status;
+out:
+ return 0;
+}
+
+int
+bfad_iocmd_boot_cfg(struct bfad_s *bfad, void *cmd)
+{
+ struct bfa_bsg_boot_s *iocmd = (struct bfa_bsg_boot_s *)cmd;
+ struct bfad_hal_comp fcomp;
+ unsigned long flags;
+
+ init_completion(&fcomp.comp);
+ spin_lock_irqsave(&bfad->bfad_lock, flags);
+ iocmd->status = bfa_flash_update_part(BFA_FLASH(&bfad->bfa),
+ BFA_FLASH_PART_BOOT, bfad->bfa.ioc.port_id,
+ &iocmd->cfg, sizeof(struct bfa_boot_cfg_s), 0,
+ bfad_hcb_comp, &fcomp);
+ spin_unlock_irqrestore(&bfad->bfad_lock, flags);
+ if (iocmd->status != BFA_STATUS_OK)
+ goto out;
+ wait_for_completion(&fcomp.comp);
+ iocmd->status = fcomp.status;
+out:
+ return 0;
+}
+
+int
+bfad_iocmd_boot_query(struct bfad_s *bfad, void *cmd)
+{
+ struct bfa_bsg_boot_s *iocmd = (struct bfa_bsg_boot_s *)cmd;
+ struct bfad_hal_comp fcomp;
+ unsigned long flags;
+
+ init_completion(&fcomp.comp);
+ spin_lock_irqsave(&bfad->bfad_lock, flags);
+ iocmd->status = bfa_flash_read_part(BFA_FLASH(&bfad->bfa),
+ BFA_FLASH_PART_BOOT, bfad->bfa.ioc.port_id,
+ &iocmd->cfg, sizeof(struct bfa_boot_cfg_s), 0,
+ bfad_hcb_comp, &fcomp);
+ spin_unlock_irqrestore(&bfad->bfad_lock, flags);
+ if (iocmd->status != BFA_STATUS_OK)
+ goto out;
+ wait_for_completion(&fcomp.comp);
+ iocmd->status = fcomp.status;
+out:
+ return 0;
+}
+
+int
+bfad_iocmd_preboot_query(struct bfad_s *bfad, void *cmd)
+{
+ struct bfa_bsg_preboot_s *iocmd = (struct bfa_bsg_preboot_s *)cmd;
+ struct bfi_iocfc_cfgrsp_s *cfgrsp = bfad->bfa.iocfc.cfgrsp;
+ struct bfa_boot_pbc_s *pbcfg = &iocmd->cfg;
+ unsigned long flags;
+
+ spin_lock_irqsave(&bfad->bfad_lock, flags);
+ pbcfg->enable = cfgrsp->pbc_cfg.boot_enabled;
+ pbcfg->nbluns = cfgrsp->pbc_cfg.nbluns;
+ pbcfg->speed = cfgrsp->pbc_cfg.port_speed;
+ memcpy(pbcfg->pblun, cfgrsp->pbc_cfg.blun, sizeof(pbcfg->pblun));
+ iocmd->status = BFA_STATUS_OK;
+ spin_unlock_irqrestore(&bfad->bfad_lock, flags);
+
+ return 0;
+}
+
+int
+bfad_iocmd_ethboot_cfg(struct bfad_s *bfad, void *cmd)
+{
+ struct bfa_bsg_ethboot_s *iocmd = (struct bfa_bsg_ethboot_s *)cmd;
+ struct bfad_hal_comp fcomp;
+ unsigned long flags;
+
+ init_completion(&fcomp.comp);
+ spin_lock_irqsave(&bfad->bfad_lock, flags);
+ iocmd->status = bfa_flash_update_part(BFA_FLASH(&bfad->bfa),
+ BFA_FLASH_PART_PXECFG,
+ bfad->bfa.ioc.port_id, &iocmd->cfg,
+ sizeof(struct bfa_ethboot_cfg_s), 0,
+ bfad_hcb_comp, &fcomp);
+ spin_unlock_irqrestore(&bfad->bfad_lock, flags);
+ if (iocmd->status != BFA_STATUS_OK)
+ goto out;
+ wait_for_completion(&fcomp.comp);
+ iocmd->status = fcomp.status;
+out:
+ return 0;
+}
+
+int
+bfad_iocmd_ethboot_query(struct bfad_s *bfad, void *cmd)
+{
+ struct bfa_bsg_ethboot_s *iocmd = (struct bfa_bsg_ethboot_s *)cmd;
+ struct bfad_hal_comp fcomp;
+ unsigned long flags;
+
+ init_completion(&fcomp.comp);
+ spin_lock_irqsave(&bfad->bfad_lock, flags);
+ iocmd->status = bfa_flash_read_part(BFA_FLASH(&bfad->bfa),
+ BFA_FLASH_PART_PXECFG,
+ bfad->bfa.ioc.port_id, &iocmd->cfg,
+ sizeof(struct bfa_ethboot_cfg_s), 0,
+ bfad_hcb_comp, &fcomp);
+ spin_unlock_irqrestore(&bfad->bfad_lock, flags);
+ if (iocmd->status != BFA_STATUS_OK)
+ goto out;
+ wait_for_completion(&fcomp.comp);
+ iocmd->status = fcomp.status;
+out:
+ return 0;
+}
+
+int
+bfad_iocmd_cfg_trunk(struct bfad_s *bfad, void *cmd, unsigned int v_cmd)
+{
+ struct bfa_bsg_gen_s *iocmd = (struct bfa_bsg_gen_s *)cmd;
+ struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(&bfad->bfa);
+ struct bfa_fcport_trunk_s *trunk = &fcport->trunk;
+ unsigned long flags;
+
+ spin_lock_irqsave(&bfad->bfad_lock, flags);
+
+ if (bfa_fcport_is_dport(&bfad->bfa)) {
+ spin_unlock_irqrestore(&bfad->bfad_lock, flags);
+ return BFA_STATUS_DPORT_ERR;
+ }
+
+ if ((fcport->cfg.topology == BFA_PORT_TOPOLOGY_LOOP) ||
+ (fcport->topology == BFA_PORT_TOPOLOGY_LOOP))
+ iocmd->status = BFA_STATUS_TOPOLOGY_LOOP;
+ else {
+ if (v_cmd == IOCMD_TRUNK_ENABLE) {
+ trunk->attr.state = BFA_TRUNK_OFFLINE;
+ bfa_fcport_disable(&bfad->bfa);
+ fcport->cfg.trunked = BFA_TRUE;
+ } else if (v_cmd == IOCMD_TRUNK_DISABLE) {
+ trunk->attr.state = BFA_TRUNK_DISABLED;
+ bfa_fcport_disable(&bfad->bfa);
+ fcport->cfg.trunked = BFA_FALSE;
+ }
+
+ if (!bfa_fcport_is_disabled(&bfad->bfa))
+ bfa_fcport_enable(&bfad->bfa);
+
+ iocmd->status = BFA_STATUS_OK;
+ }
+
+ spin_unlock_irqrestore(&bfad->bfad_lock, flags);
+
+ return 0;
+}
+
+int
+bfad_iocmd_trunk_get_attr(struct bfad_s *bfad, void *cmd)
+{
+ struct bfa_bsg_trunk_attr_s *iocmd = (struct bfa_bsg_trunk_attr_s *)cmd;
+ struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(&bfad->bfa);
+ struct bfa_fcport_trunk_s *trunk = &fcport->trunk;
+ unsigned long flags;
+
+ spin_lock_irqsave(&bfad->bfad_lock, flags);
+ if ((fcport->cfg.topology == BFA_PORT_TOPOLOGY_LOOP) ||
+ (fcport->topology == BFA_PORT_TOPOLOGY_LOOP))
+ iocmd->status = BFA_STATUS_TOPOLOGY_LOOP;
+ else {
+ memcpy((void *)&iocmd->attr, (void *)&trunk->attr,
+ sizeof(struct bfa_trunk_attr_s));
+ iocmd->attr.port_id = bfa_lps_get_base_pid(&bfad->bfa);
+ iocmd->status = BFA_STATUS_OK;
+ }
+ spin_unlock_irqrestore(&bfad->bfad_lock, flags);
+
+ return 0;
+}
+
+int
+bfad_iocmd_qos(struct bfad_s *bfad, void *cmd, unsigned int v_cmd)
+{
+ struct bfa_bsg_gen_s *iocmd = (struct bfa_bsg_gen_s *)cmd;
+ struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(&bfad->bfa);
+ unsigned long flags;
+
+ spin_lock_irqsave(&bfad->bfad_lock, flags);
+ if (bfa_ioc_get_type(&bfad->bfa.ioc) == BFA_IOC_TYPE_FC) {
+ if ((fcport->cfg.topology == BFA_PORT_TOPOLOGY_LOOP) &&
+ (fcport->topology == BFA_PORT_TOPOLOGY_LOOP))
+ iocmd->status = BFA_STATUS_TOPOLOGY_LOOP;
+ else {
+ if (v_cmd == IOCMD_QOS_ENABLE)
+ fcport->cfg.qos_enabled = BFA_TRUE;
+ else if (v_cmd == IOCMD_QOS_DISABLE) {
+ fcport->cfg.qos_enabled = BFA_FALSE;
+ fcport->cfg.qos_bw.high = BFA_QOS_BW_HIGH;
+ fcport->cfg.qos_bw.med = BFA_QOS_BW_MED;
+ fcport->cfg.qos_bw.low = BFA_QOS_BW_LOW;
+ }
+ }
+ }
+ spin_unlock_irqrestore(&bfad->bfad_lock, flags);
+
+ return 0;
+}
+
+int
+bfad_iocmd_qos_get_attr(struct bfad_s *bfad, void *cmd)
+{
+ struct bfa_bsg_qos_attr_s *iocmd = (struct bfa_bsg_qos_attr_s *)cmd;
+ struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(&bfad->bfa);
+ unsigned long flags;
+
+ spin_lock_irqsave(&bfad->bfad_lock, flags);
+ if ((fcport->cfg.topology == BFA_PORT_TOPOLOGY_LOOP) &&
+ (fcport->topology == BFA_PORT_TOPOLOGY_LOOP))
+ iocmd->status = BFA_STATUS_TOPOLOGY_LOOP;
+ else {
+ iocmd->attr.state = fcport->qos_attr.state;
+ iocmd->attr.total_bb_cr =
+ be32_to_cpu(fcport->qos_attr.total_bb_cr);
+ iocmd->attr.qos_bw.high = fcport->cfg.qos_bw.high;
+ iocmd->attr.qos_bw.med = fcport->cfg.qos_bw.med;
+ iocmd->attr.qos_bw.low = fcport->cfg.qos_bw.low;
+ iocmd->attr.qos_bw_op = fcport->qos_attr.qos_bw_op;
+ iocmd->status = BFA_STATUS_OK;
+ }
+ spin_unlock_irqrestore(&bfad->bfad_lock, flags);
+
+ return 0;
+}
+
+int
+bfad_iocmd_qos_get_vc_attr(struct bfad_s *bfad, void *cmd)
+{
+ struct bfa_bsg_qos_vc_attr_s *iocmd =
+ (struct bfa_bsg_qos_vc_attr_s *)cmd;
+ struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(&bfad->bfa);
+ struct bfa_qos_vc_attr_s *bfa_vc_attr = &fcport->qos_vc_attr;
+ unsigned long flags;
+ u32 i = 0;
+
+ spin_lock_irqsave(&bfad->bfad_lock, flags);
+ iocmd->attr.total_vc_count = be16_to_cpu(bfa_vc_attr->total_vc_count);
+ iocmd->attr.shared_credit = be16_to_cpu(bfa_vc_attr->shared_credit);
+ iocmd->attr.elp_opmode_flags =
+ be32_to_cpu(bfa_vc_attr->elp_opmode_flags);
+
+ /* Individual VC info */
+ while (i < iocmd->attr.total_vc_count) {
+ iocmd->attr.vc_info[i].vc_credit =
+ bfa_vc_attr->vc_info[i].vc_credit;
+ iocmd->attr.vc_info[i].borrow_credit =
+ bfa_vc_attr->vc_info[i].borrow_credit;
+ iocmd->attr.vc_info[i].priority =
+ bfa_vc_attr->vc_info[i].priority;
+ i++;
+ }
+ spin_unlock_irqrestore(&bfad->bfad_lock, flags);
+
+ iocmd->status = BFA_STATUS_OK;
+ return 0;
+}
+
+int
+bfad_iocmd_qos_get_stats(struct bfad_s *bfad, void *cmd)
+{
+ struct bfa_bsg_fcport_stats_s *iocmd =
+ (struct bfa_bsg_fcport_stats_s *)cmd;
+ struct bfad_hal_comp fcomp;
+ unsigned long flags;
+ struct bfa_cb_pending_q_s cb_qe;
+ struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(&bfad->bfa);
+
+ init_completion(&fcomp.comp);
+ bfa_pending_q_init(&cb_qe, (bfa_cb_cbfn_t)bfad_hcb_comp,
+ &fcomp, &iocmd->stats);
+
+ spin_lock_irqsave(&bfad->bfad_lock, flags);
+ WARN_ON(!bfa_ioc_get_fcmode(&bfad->bfa.ioc));
+ if ((fcport->cfg.topology == BFA_PORT_TOPOLOGY_LOOP) &&
+ (fcport->topology == BFA_PORT_TOPOLOGY_LOOP))
+ iocmd->status = BFA_STATUS_TOPOLOGY_LOOP;
+ else
+ iocmd->status = bfa_fcport_get_stats(&bfad->bfa, &cb_qe);
+ spin_unlock_irqrestore(&bfad->bfad_lock, flags);
+ if (iocmd->status != BFA_STATUS_OK) {
+ bfa_trc(bfad, iocmd->status);
+ goto out;
+ }
+ wait_for_completion(&fcomp.comp);
+ iocmd->status = fcomp.status;
+out:
+ return 0;
+}
+
+int
+bfad_iocmd_qos_reset_stats(struct bfad_s *bfad, void *cmd)
+{
+ struct bfa_bsg_gen_s *iocmd = (struct bfa_bsg_gen_s *)cmd;
+ struct bfad_hal_comp fcomp;
+ unsigned long flags;
+ struct bfa_cb_pending_q_s cb_qe;
+ struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(&bfad->bfa);
+
+ init_completion(&fcomp.comp);
+ bfa_pending_q_init(&cb_qe, (bfa_cb_cbfn_t)bfad_hcb_comp,
+ &fcomp, NULL);
+
+ spin_lock_irqsave(&bfad->bfad_lock, flags);
+ WARN_ON(!bfa_ioc_get_fcmode(&bfad->bfa.ioc));
+ if ((fcport->cfg.topology == BFA_PORT_TOPOLOGY_LOOP) &&
+ (fcport->topology == BFA_PORT_TOPOLOGY_LOOP))
+ iocmd->status = BFA_STATUS_TOPOLOGY_LOOP;
+ else
+ iocmd->status = bfa_fcport_clear_stats(&bfad->bfa, &cb_qe);
+ spin_unlock_irqrestore(&bfad->bfad_lock, flags);
+ if (iocmd->status != BFA_STATUS_OK) {
+ bfa_trc(bfad, iocmd->status);
+ goto out;
+ }
+ wait_for_completion(&fcomp.comp);
+ iocmd->status = fcomp.status;
+out:
+ return 0;
+}
+
+int
+bfad_iocmd_vf_get_stats(struct bfad_s *bfad, void *cmd)
+{
+ struct bfa_bsg_vf_stats_s *iocmd =
+ (struct bfa_bsg_vf_stats_s *)cmd;
+ struct bfa_fcs_fabric_s *fcs_vf;
+ unsigned long flags;
+
+ spin_lock_irqsave(&bfad->bfad_lock, flags);
+ fcs_vf = bfa_fcs_vf_lookup(&bfad->bfa_fcs, iocmd->vf_id);
+ if (fcs_vf == NULL) {
+ spin_unlock_irqrestore(&bfad->bfad_lock, flags);
+ iocmd->status = BFA_STATUS_UNKNOWN_VFID;
+ goto out;
+ }
+ memcpy((void *)&iocmd->stats, (void *)&fcs_vf->stats,
+ sizeof(struct bfa_vf_stats_s));
+ spin_unlock_irqrestore(&bfad->bfad_lock, flags);
+ iocmd->status = BFA_STATUS_OK;
+out:
+ return 0;
+}
+
+int
+bfad_iocmd_vf_clr_stats(struct bfad_s *bfad, void *cmd)
+{
+ struct bfa_bsg_vf_reset_stats_s *iocmd =
+ (struct bfa_bsg_vf_reset_stats_s *)cmd;
+ struct bfa_fcs_fabric_s *fcs_vf;
+ unsigned long flags;
+
+ spin_lock_irqsave(&bfad->bfad_lock, flags);
+ fcs_vf = bfa_fcs_vf_lookup(&bfad->bfa_fcs, iocmd->vf_id);
+ if (fcs_vf == NULL) {
+ spin_unlock_irqrestore(&bfad->bfad_lock, flags);
+ iocmd->status = BFA_STATUS_UNKNOWN_VFID;
+ goto out;
+ }
+ memset((void *)&fcs_vf->stats, 0, sizeof(struct bfa_vf_stats_s));
+ spin_unlock_irqrestore(&bfad->bfad_lock, flags);
+ iocmd->status = BFA_STATUS_OK;
+out:
+ return 0;
+}
+
+/* Function to reset the LUN SCAN mode */
+static void
+bfad_iocmd_lunmask_reset_lunscan_mode(struct bfad_s *bfad, int lunmask_cfg)
+{
+ struct bfad_im_port_s *pport_im = bfad->pport.im_port;
+ struct bfad_vport_s *vport = NULL;
+
+ /* Set the scsi device LUN SCAN flags for base port */
+ bfad_reset_sdev_bflags(pport_im, lunmask_cfg);
+
+ /* Set the scsi device LUN SCAN flags for the vports */
+ list_for_each_entry(vport, &bfad->vport_list, list_entry)
+ bfad_reset_sdev_bflags(vport->drv_port.im_port, lunmask_cfg);
+}
+
+int
+bfad_iocmd_lunmask(struct bfad_s *bfad, void *pcmd, unsigned int v_cmd)
+{
+ struct bfa_bsg_gen_s *iocmd = (struct bfa_bsg_gen_s *)pcmd;
+ unsigned long flags;
+
+ spin_lock_irqsave(&bfad->bfad_lock, flags);
+ if (v_cmd == IOCMD_FCPIM_LUNMASK_ENABLE) {
+ iocmd->status = bfa_fcpim_lunmask_update(&bfad->bfa, BFA_TRUE);
+ /* Set the LUN Scanning mode to be Sequential scan */
+ if (iocmd->status == BFA_STATUS_OK)
+ bfad_iocmd_lunmask_reset_lunscan_mode(bfad, BFA_TRUE);
+ } else if (v_cmd == IOCMD_FCPIM_LUNMASK_DISABLE) {
+ iocmd->status = bfa_fcpim_lunmask_update(&bfad->bfa, BFA_FALSE);
+ /* Set the LUN Scanning mode to default REPORT_LUNS scan */
+ if (iocmd->status == BFA_STATUS_OK)
+ bfad_iocmd_lunmask_reset_lunscan_mode(bfad, BFA_FALSE);
+ } else if (v_cmd == IOCMD_FCPIM_LUNMASK_CLEAR)
+ iocmd->status = bfa_fcpim_lunmask_clear(&bfad->bfa);
+ spin_unlock_irqrestore(&bfad->bfad_lock, flags);
+ return 0;
+}
+
+int
+bfad_iocmd_fcpim_lunmask_query(struct bfad_s *bfad, void *cmd)
+{
+ struct bfa_bsg_fcpim_lunmask_query_s *iocmd =
+ (struct bfa_bsg_fcpim_lunmask_query_s *)cmd;
+ struct bfa_lunmask_cfg_s *lun_mask = &iocmd->lun_mask;
+ unsigned long flags;
+
+ spin_lock_irqsave(&bfad->bfad_lock, flags);
+ iocmd->status = bfa_fcpim_lunmask_query(&bfad->bfa, lun_mask);
+ spin_unlock_irqrestore(&bfad->bfad_lock, flags);
+ return 0;
+}
+
+int
+bfad_iocmd_fcpim_cfg_lunmask(struct bfad_s *bfad, void *cmd, unsigned int v_cmd)
+{
+ struct bfa_bsg_fcpim_lunmask_s *iocmd =
+ (struct bfa_bsg_fcpim_lunmask_s *)cmd;
+ unsigned long flags;
+
+ spin_lock_irqsave(&bfad->bfad_lock, flags);
+ if (v_cmd == IOCMD_FCPIM_LUNMASK_ADD)
+ iocmd->status = bfa_fcpim_lunmask_add(&bfad->bfa, iocmd->vf_id,
+ &iocmd->pwwn, iocmd->rpwwn, iocmd->lun);
+ else if (v_cmd == IOCMD_FCPIM_LUNMASK_DELETE)
+ iocmd->status = bfa_fcpim_lunmask_delete(&bfad->bfa,
+ iocmd->vf_id, &iocmd->pwwn,
+ iocmd->rpwwn, iocmd->lun);
+ spin_unlock_irqrestore(&bfad->bfad_lock, flags);
+ return 0;
+}
+
+int
+bfad_iocmd_fcpim_throttle_query(struct bfad_s *bfad, void *cmd)
+{
+ struct bfa_bsg_fcpim_throttle_s *iocmd =
+ (struct bfa_bsg_fcpim_throttle_s *)cmd;
+ unsigned long flags;
+
+ spin_lock_irqsave(&bfad->bfad_lock, flags);
+ iocmd->status = bfa_fcpim_throttle_get(&bfad->bfa,
+ (void *)&iocmd->throttle);
+ spin_unlock_irqrestore(&bfad->bfad_lock, flags);
+
+ return 0;
+}
+
+int
+bfad_iocmd_fcpim_throttle_set(struct bfad_s *bfad, void *cmd)
+{
+ struct bfa_bsg_fcpim_throttle_s *iocmd =
+ (struct bfa_bsg_fcpim_throttle_s *)cmd;
+ unsigned long flags;
+
+ spin_lock_irqsave(&bfad->bfad_lock, flags);
+ iocmd->status = bfa_fcpim_throttle_set(&bfad->bfa,
+ iocmd->throttle.cfg_value);
+ spin_unlock_irqrestore(&bfad->bfad_lock, flags);
+
+ return 0;
+}
+
+int
+bfad_iocmd_tfru_read(struct bfad_s *bfad, void *cmd)
+{
+ struct bfa_bsg_tfru_s *iocmd =
+ (struct bfa_bsg_tfru_s *)cmd;
+ struct bfad_hal_comp fcomp;
+ unsigned long flags = 0;
+
+ init_completion(&fcomp.comp);
+ spin_lock_irqsave(&bfad->bfad_lock, flags);
+ iocmd->status = bfa_tfru_read(BFA_FRU(&bfad->bfa),
+ &iocmd->data, iocmd->len, iocmd->offset,
+ bfad_hcb_comp, &fcomp);
+ spin_unlock_irqrestore(&bfad->bfad_lock, flags);
+ if (iocmd->status == BFA_STATUS_OK) {
+ wait_for_completion(&fcomp.comp);
+ iocmd->status = fcomp.status;
+ }
+
+ return 0;
+}
+
+int
+bfad_iocmd_tfru_write(struct bfad_s *bfad, void *cmd)
+{
+ struct bfa_bsg_tfru_s *iocmd =
+ (struct bfa_bsg_tfru_s *)cmd;
+ struct bfad_hal_comp fcomp;
+ unsigned long flags = 0;
+
+ init_completion(&fcomp.comp);
+ spin_lock_irqsave(&bfad->bfad_lock, flags);
+ iocmd->status = bfa_tfru_write(BFA_FRU(&bfad->bfa),
+ &iocmd->data, iocmd->len, iocmd->offset,
+ bfad_hcb_comp, &fcomp);
+ spin_unlock_irqrestore(&bfad->bfad_lock, flags);
+ if (iocmd->status == BFA_STATUS_OK) {
+ wait_for_completion(&fcomp.comp);
+ iocmd->status = fcomp.status;
+ }
+
+ return 0;
+}
+
+int
+bfad_iocmd_fruvpd_read(struct bfad_s *bfad, void *cmd)
+{
+ struct bfa_bsg_fruvpd_s *iocmd =
+ (struct bfa_bsg_fruvpd_s *)cmd;
+ struct bfad_hal_comp fcomp;
+ unsigned long flags = 0;
+
+ init_completion(&fcomp.comp);
+ spin_lock_irqsave(&bfad->bfad_lock, flags);
+ iocmd->status = bfa_fruvpd_read(BFA_FRU(&bfad->bfa),
+ &iocmd->data, iocmd->len, iocmd->offset,
+ bfad_hcb_comp, &fcomp);
+ spin_unlock_irqrestore(&bfad->bfad_lock, flags);
+ if (iocmd->status == BFA_STATUS_OK) {
+ wait_for_completion(&fcomp.comp);
+ iocmd->status = fcomp.status;
+ }
+
+ return 0;
+}
+
+int
+bfad_iocmd_fruvpd_update(struct bfad_s *bfad, void *cmd)
+{
+ struct bfa_bsg_fruvpd_s *iocmd =
+ (struct bfa_bsg_fruvpd_s *)cmd;
+ struct bfad_hal_comp fcomp;
+ unsigned long flags = 0;
+
+ init_completion(&fcomp.comp);
+ spin_lock_irqsave(&bfad->bfad_lock, flags);
+ iocmd->status = bfa_fruvpd_update(BFA_FRU(&bfad->bfa),
+ &iocmd->data, iocmd->len, iocmd->offset,
+ bfad_hcb_comp, &fcomp, iocmd->trfr_cmpl);
+ spin_unlock_irqrestore(&bfad->bfad_lock, flags);
+ if (iocmd->status == BFA_STATUS_OK) {
+ wait_for_completion(&fcomp.comp);
+ iocmd->status = fcomp.status;
+ }
+
+ return 0;
+}
+
+int
+bfad_iocmd_fruvpd_get_max_size(struct bfad_s *bfad, void *cmd)
+{
+ struct bfa_bsg_fruvpd_max_size_s *iocmd =
+ (struct bfa_bsg_fruvpd_max_size_s *)cmd;
+ unsigned long flags = 0;
+
+ spin_lock_irqsave(&bfad->bfad_lock, flags);
+ iocmd->status = bfa_fruvpd_get_max_size(BFA_FRU(&bfad->bfa),
+ &iocmd->max_size);
+ spin_unlock_irqrestore(&bfad->bfad_lock, flags);
+
+ return 0;
+}
+
+static int
+bfad_iocmd_handler(struct bfad_s *bfad, unsigned int cmd, void *iocmd,
+ unsigned int payload_len)
+{
+ int rc = -EINVAL;
+
+ switch (cmd) {
+ case IOCMD_IOC_ENABLE:
+ rc = bfad_iocmd_ioc_enable(bfad, iocmd);
+ break;
+ case IOCMD_IOC_DISABLE:
+ rc = bfad_iocmd_ioc_disable(bfad, iocmd);
+ break;
+ case IOCMD_IOC_GET_INFO:
+ rc = bfad_iocmd_ioc_get_info(bfad, iocmd);
+ break;
+ case IOCMD_IOC_GET_ATTR:
+ rc = bfad_iocmd_ioc_get_attr(bfad, iocmd);
+ break;
+ case IOCMD_IOC_GET_STATS:
+ rc = bfad_iocmd_ioc_get_stats(bfad, iocmd);
+ break;
+ case IOCMD_IOC_GET_FWSTATS:
+ rc = bfad_iocmd_ioc_get_fwstats(bfad, iocmd, payload_len);
+ break;
+ case IOCMD_IOC_RESET_STATS:
+ case IOCMD_IOC_RESET_FWSTATS:
+ rc = bfad_iocmd_ioc_reset_stats(bfad, iocmd, cmd);
+ break;
+ case IOCMD_IOC_SET_ADAPTER_NAME:
+ case IOCMD_IOC_SET_PORT_NAME:
+ rc = bfad_iocmd_ioc_set_name(bfad, iocmd, cmd);
+ break;
+ case IOCMD_IOCFC_GET_ATTR:
+ rc = bfad_iocmd_iocfc_get_attr(bfad, iocmd);
+ break;
+ case IOCMD_IOCFC_SET_INTR:
+ rc = bfad_iocmd_iocfc_set_intr(bfad, iocmd);
+ break;
+ case IOCMD_PORT_ENABLE:
+ rc = bfad_iocmd_port_enable(bfad, iocmd);
+ break;
+ case IOCMD_PORT_DISABLE:
+ rc = bfad_iocmd_port_disable(bfad, iocmd);
+ break;
+ case IOCMD_PORT_GET_ATTR:
+ rc = bfad_iocmd_port_get_attr(bfad, iocmd);
+ break;
+ case IOCMD_PORT_GET_STATS:
+ rc = bfad_iocmd_port_get_stats(bfad, iocmd, payload_len);
+ break;
+ case IOCMD_PORT_RESET_STATS:
+ rc = bfad_iocmd_port_reset_stats(bfad, iocmd);
+ break;
+ case IOCMD_PORT_CFG_TOPO:
+ case IOCMD_PORT_CFG_SPEED:
+ case IOCMD_PORT_CFG_ALPA:
+ case IOCMD_PORT_CLR_ALPA:
+ rc = bfad_iocmd_set_port_cfg(bfad, iocmd, cmd);
+ break;
+ case IOCMD_PORT_CFG_MAXFRSZ:
+ rc = bfad_iocmd_port_cfg_maxfrsize(bfad, iocmd);
+ break;
+ case IOCMD_PORT_BBCR_ENABLE:
+ case IOCMD_PORT_BBCR_DISABLE:
+ rc = bfad_iocmd_port_cfg_bbcr(bfad, cmd, iocmd);
+ break;
+ case IOCMD_PORT_BBCR_GET_ATTR:
+ rc = bfad_iocmd_port_get_bbcr_attr(bfad, iocmd);
+ break;
+ case IOCMD_LPORT_GET_ATTR:
+ rc = bfad_iocmd_lport_get_attr(bfad, iocmd);
+ break;
+ case IOCMD_LPORT_GET_STATS:
+ rc = bfad_iocmd_lport_get_stats(bfad, iocmd);
+ break;
+ case IOCMD_LPORT_RESET_STATS:
+ rc = bfad_iocmd_lport_reset_stats(bfad, iocmd);
+ break;
+ case IOCMD_LPORT_GET_IOSTATS:
+ rc = bfad_iocmd_lport_get_iostats(bfad, iocmd);
+ break;
+ case IOCMD_LPORT_GET_RPORTS:
+ rc = bfad_iocmd_lport_get_rports(bfad, iocmd, payload_len);
+ break;
+ case IOCMD_RPORT_GET_ATTR:
+ rc = bfad_iocmd_rport_get_attr(bfad, iocmd);
+ break;
+ case IOCMD_RPORT_GET_ADDR:
+ rc = bfad_iocmd_rport_get_addr(bfad, iocmd);
+ break;
+ case IOCMD_RPORT_GET_STATS:
+ rc = bfad_iocmd_rport_get_stats(bfad, iocmd);
+ break;
+ case IOCMD_RPORT_RESET_STATS:
+ rc = bfad_iocmd_rport_clr_stats(bfad, iocmd);
+ break;
+ case IOCMD_RPORT_SET_SPEED:
+ rc = bfad_iocmd_rport_set_speed(bfad, iocmd);
+ break;
+ case IOCMD_VPORT_GET_ATTR:
+ rc = bfad_iocmd_vport_get_attr(bfad, iocmd);
+ break;
+ case IOCMD_VPORT_GET_STATS:
+ rc = bfad_iocmd_vport_get_stats(bfad, iocmd);
+ break;
+ case IOCMD_VPORT_RESET_STATS:
+ rc = bfad_iocmd_vport_clr_stats(bfad, iocmd);
+ break;
+ case IOCMD_FABRIC_GET_LPORTS:
+ rc = bfad_iocmd_fabric_get_lports(bfad, iocmd, payload_len);
+ break;
+ case IOCMD_RATELIM_ENABLE:
+ case IOCMD_RATELIM_DISABLE:
+ rc = bfad_iocmd_ratelim(bfad, cmd, iocmd);
+ break;
+ case IOCMD_RATELIM_DEF_SPEED:
+ rc = bfad_iocmd_ratelim_speed(bfad, cmd, iocmd);
+ break;
+ case IOCMD_FCPIM_FAILOVER:
+ rc = bfad_iocmd_cfg_fcpim(bfad, iocmd);
+ break;
+ case IOCMD_FCPIM_MODSTATS:
+ rc = bfad_iocmd_fcpim_get_modstats(bfad, iocmd);
+ break;
+ case IOCMD_FCPIM_MODSTATSCLR:
+ rc = bfad_iocmd_fcpim_clr_modstats(bfad, iocmd);
+ break;
+ case IOCMD_FCPIM_DEL_ITN_STATS:
+ rc = bfad_iocmd_fcpim_get_del_itn_stats(bfad, iocmd);
+ break;
+ case IOCMD_ITNIM_GET_ATTR:
+ rc = bfad_iocmd_itnim_get_attr(bfad, iocmd);
+ break;
+ case IOCMD_ITNIM_GET_IOSTATS:
+ rc = bfad_iocmd_itnim_get_iostats(bfad, iocmd);
+ break;
+ case IOCMD_ITNIM_RESET_STATS:
+ rc = bfad_iocmd_itnim_reset_stats(bfad, iocmd);
+ break;
+ case IOCMD_ITNIM_GET_ITNSTATS:
+ rc = bfad_iocmd_itnim_get_itnstats(bfad, iocmd);
+ break;
+ case IOCMD_FCPORT_ENABLE:
+ rc = bfad_iocmd_fcport_enable(bfad, iocmd);
+ break;
+ case IOCMD_FCPORT_DISABLE:
+ rc = bfad_iocmd_fcport_disable(bfad, iocmd);
+ break;
+ case IOCMD_IOC_PCIFN_CFG:
+ rc = bfad_iocmd_ioc_get_pcifn_cfg(bfad, iocmd);
+ break;
+ case IOCMD_IOC_FW_SIG_INV:
+ rc = bfad_iocmd_ioc_fw_sig_inv(bfad, iocmd);
+ break;
+ case IOCMD_PCIFN_CREATE:
+ rc = bfad_iocmd_pcifn_create(bfad, iocmd);
+ break;
+ case IOCMD_PCIFN_DELETE:
+ rc = bfad_iocmd_pcifn_delete(bfad, iocmd);
+ break;
+ case IOCMD_PCIFN_BW:
+ rc = bfad_iocmd_pcifn_bw(bfad, iocmd);
+ break;
+ case IOCMD_ADAPTER_CFG_MODE:
+ rc = bfad_iocmd_adapter_cfg_mode(bfad, iocmd);
+ break;
+ case IOCMD_PORT_CFG_MODE:
+ rc = bfad_iocmd_port_cfg_mode(bfad, iocmd);
+ break;
+ case IOCMD_FLASH_ENABLE_OPTROM:
+ case IOCMD_FLASH_DISABLE_OPTROM:
+ rc = bfad_iocmd_ablk_optrom(bfad, cmd, iocmd);
+ break;
+ case IOCMD_FAA_QUERY:
+ rc = bfad_iocmd_faa_query(bfad, iocmd);
+ break;
+ case IOCMD_CEE_GET_ATTR:
+ rc = bfad_iocmd_cee_attr(bfad, iocmd, payload_len);
+ break;
+ case IOCMD_CEE_GET_STATS:
+ rc = bfad_iocmd_cee_get_stats(bfad, iocmd, payload_len);
+ break;
+ case IOCMD_CEE_RESET_STATS:
+ rc = bfad_iocmd_cee_reset_stats(bfad, iocmd);
+ break;
+ case IOCMD_SFP_MEDIA:
+ rc = bfad_iocmd_sfp_media(bfad, iocmd);
+ break;
+ case IOCMD_SFP_SPEED:
+ rc = bfad_iocmd_sfp_speed(bfad, iocmd);
+ break;
+ case IOCMD_FLASH_GET_ATTR:
+ rc = bfad_iocmd_flash_get_attr(bfad, iocmd);
+ break;
+ case IOCMD_FLASH_ERASE_PART:
+ rc = bfad_iocmd_flash_erase_part(bfad, iocmd);
+ break;
+ case IOCMD_FLASH_UPDATE_PART:
+ rc = bfad_iocmd_flash_update_part(bfad, iocmd, payload_len);
+ break;
+ case IOCMD_FLASH_READ_PART:
+ rc = bfad_iocmd_flash_read_part(bfad, iocmd, payload_len);
+ break;
+ case IOCMD_DIAG_TEMP:
+ rc = bfad_iocmd_diag_temp(bfad, iocmd);
+ break;
+ case IOCMD_DIAG_MEMTEST:
+ rc = bfad_iocmd_diag_memtest(bfad, iocmd);
+ break;
+ case IOCMD_DIAG_LOOPBACK:
+ rc = bfad_iocmd_diag_loopback(bfad, iocmd);
+ break;
+ case IOCMD_DIAG_FWPING:
+ rc = bfad_iocmd_diag_fwping(bfad, iocmd);
+ break;
+ case IOCMD_DIAG_QUEUETEST:
+ rc = bfad_iocmd_diag_queuetest(bfad, iocmd);
+ break;
+ case IOCMD_DIAG_SFP:
+ rc = bfad_iocmd_diag_sfp(bfad, iocmd);
+ break;
+ case IOCMD_DIAG_LED:
+ rc = bfad_iocmd_diag_led(bfad, iocmd);
+ break;
+ case IOCMD_DIAG_BEACON_LPORT:
+ rc = bfad_iocmd_diag_beacon_lport(bfad, iocmd);
+ break;
+ case IOCMD_DIAG_LB_STAT:
+ rc = bfad_iocmd_diag_lb_stat(bfad, iocmd);
+ break;
+ case IOCMD_DIAG_DPORT_ENABLE:
+ rc = bfad_iocmd_diag_dport_enable(bfad, iocmd);
+ break;
+ case IOCMD_DIAG_DPORT_DISABLE:
+ rc = bfad_iocmd_diag_dport_disable(bfad, iocmd);
+ break;
+ case IOCMD_DIAG_DPORT_SHOW:
+ rc = bfad_iocmd_diag_dport_show(bfad, iocmd);
+ break;
+ case IOCMD_DIAG_DPORT_START:
+ rc = bfad_iocmd_diag_dport_start(bfad, iocmd);
+ break;
+ case IOCMD_PHY_GET_ATTR:
+ rc = bfad_iocmd_phy_get_attr(bfad, iocmd);
+ break;
+ case IOCMD_PHY_GET_STATS:
+ rc = bfad_iocmd_phy_get_stats(bfad, iocmd);
+ break;
+ case IOCMD_PHY_UPDATE_FW:
+ rc = bfad_iocmd_phy_update(bfad, iocmd, payload_len);
+ break;
+ case IOCMD_PHY_READ_FW:
+ rc = bfad_iocmd_phy_read(bfad, iocmd, payload_len);
+ break;
+ case IOCMD_VHBA_QUERY:
+ rc = bfad_iocmd_vhba_query(bfad, iocmd);
+ break;
+ case IOCMD_DEBUG_PORTLOG:
+ rc = bfad_iocmd_porglog_get(bfad, iocmd);
+ break;
+ case IOCMD_DEBUG_FW_CORE:
+ rc = bfad_iocmd_debug_fw_core(bfad, iocmd, payload_len);
+ break;
+ case IOCMD_DEBUG_FW_STATE_CLR:
+ case IOCMD_DEBUG_PORTLOG_CLR:
+ case IOCMD_DEBUG_START_DTRC:
+ case IOCMD_DEBUG_STOP_DTRC:
+ rc = bfad_iocmd_debug_ctl(bfad, iocmd, cmd);
+ break;
+ case IOCMD_DEBUG_PORTLOG_CTL:
+ rc = bfad_iocmd_porglog_ctl(bfad, iocmd);
+ break;
+ case IOCMD_FCPIM_PROFILE_ON:
+ case IOCMD_FCPIM_PROFILE_OFF:
+ rc = bfad_iocmd_fcpim_cfg_profile(bfad, iocmd, cmd);
+ break;
+ case IOCMD_ITNIM_GET_IOPROFILE:
+ rc = bfad_iocmd_itnim_get_ioprofile(bfad, iocmd);
+ break;
+ case IOCMD_FCPORT_GET_STATS:
+ rc = bfad_iocmd_fcport_get_stats(bfad, iocmd);
+ break;
+ case IOCMD_FCPORT_RESET_STATS:
+ rc = bfad_iocmd_fcport_reset_stats(bfad, iocmd);
+ break;
+ case IOCMD_BOOT_CFG:
+ rc = bfad_iocmd_boot_cfg(bfad, iocmd);
+ break;
+ case IOCMD_BOOT_QUERY:
+ rc = bfad_iocmd_boot_query(bfad, iocmd);
+ break;
+ case IOCMD_PREBOOT_QUERY:
+ rc = bfad_iocmd_preboot_query(bfad, iocmd);
+ break;
+ case IOCMD_ETHBOOT_CFG:
+ rc = bfad_iocmd_ethboot_cfg(bfad, iocmd);
+ break;
+ case IOCMD_ETHBOOT_QUERY:
+ rc = bfad_iocmd_ethboot_query(bfad, iocmd);
+ break;
+ case IOCMD_TRUNK_ENABLE:
+ case IOCMD_TRUNK_DISABLE:
+ rc = bfad_iocmd_cfg_trunk(bfad, iocmd, cmd);
+ break;
+ case IOCMD_TRUNK_GET_ATTR:
+ rc = bfad_iocmd_trunk_get_attr(bfad, iocmd);
+ break;
+ case IOCMD_QOS_ENABLE:
+ case IOCMD_QOS_DISABLE:
+ rc = bfad_iocmd_qos(bfad, iocmd, cmd);
+ break;
+ case IOCMD_QOS_GET_ATTR:
+ rc = bfad_iocmd_qos_get_attr(bfad, iocmd);
+ break;
+ case IOCMD_QOS_GET_VC_ATTR:
+ rc = bfad_iocmd_qos_get_vc_attr(bfad, iocmd);
+ break;
+ case IOCMD_QOS_GET_STATS:
+ rc = bfad_iocmd_qos_get_stats(bfad, iocmd);
+ break;
+ case IOCMD_QOS_RESET_STATS:
+ rc = bfad_iocmd_qos_reset_stats(bfad, iocmd);
+ break;
+ case IOCMD_QOS_SET_BW:
+ rc = bfad_iocmd_qos_set_bw(bfad, iocmd);
+ break;
+ case IOCMD_VF_GET_STATS:
+ rc = bfad_iocmd_vf_get_stats(bfad, iocmd);
+ break;
+ case IOCMD_VF_RESET_STATS:
+ rc = bfad_iocmd_vf_clr_stats(bfad, iocmd);
+ break;
+ case IOCMD_FCPIM_LUNMASK_ENABLE:
+ case IOCMD_FCPIM_LUNMASK_DISABLE:
+ case IOCMD_FCPIM_LUNMASK_CLEAR:
+ rc = bfad_iocmd_lunmask(bfad, iocmd, cmd);
+ break;
+ case IOCMD_FCPIM_LUNMASK_QUERY:
+ rc = bfad_iocmd_fcpim_lunmask_query(bfad, iocmd);
+ break;
+ case IOCMD_FCPIM_LUNMASK_ADD:
+ case IOCMD_FCPIM_LUNMASK_DELETE:
+ rc = bfad_iocmd_fcpim_cfg_lunmask(bfad, iocmd, cmd);
+ break;
+ case IOCMD_FCPIM_THROTTLE_QUERY:
+ rc = bfad_iocmd_fcpim_throttle_query(bfad, iocmd);
+ break;
+ case IOCMD_FCPIM_THROTTLE_SET:
+ rc = bfad_iocmd_fcpim_throttle_set(bfad, iocmd);
+ break;
+ /* TFRU */
+ case IOCMD_TFRU_READ:
+ rc = bfad_iocmd_tfru_read(bfad, iocmd);
+ break;
+ case IOCMD_TFRU_WRITE:
+ rc = bfad_iocmd_tfru_write(bfad, iocmd);
+ break;
+ /* FRU */
+ case IOCMD_FRUVPD_READ:
+ rc = bfad_iocmd_fruvpd_read(bfad, iocmd);
+ break;
+ case IOCMD_FRUVPD_UPDATE:
+ rc = bfad_iocmd_fruvpd_update(bfad, iocmd);
+ break;
+ case IOCMD_FRUVPD_GET_MAX_SIZE:
+ rc = bfad_iocmd_fruvpd_get_max_size(bfad, iocmd);
+ break;
+ default:
+ rc = -EINVAL;
+ break;
+ }
+ return rc;
+}
+
+static int
+bfad_im_bsg_vendor_request(struct fc_bsg_job *job)
+{
+ uint32_t vendor_cmd = job->request->rqst_data.h_vendor.vendor_cmd[0];
+ struct bfad_im_port_s *im_port =
+ (struct bfad_im_port_s *) job->shost->hostdata[0];
+ struct bfad_s *bfad = im_port->bfad;
+ struct request_queue *request_q = job->req->q;
+ void *payload_kbuf;
+ int rc = -EINVAL;
+
+ /*
+ * Set the BSG device request_queue size to 256 to support
+ * payloads larger than 512*1024K bytes.
+ */
+ blk_queue_max_segments(request_q, 256);
+
+ /* Allocate a temp buffer to hold the passed in user space command */
+ payload_kbuf = kzalloc(job->request_payload.payload_len, GFP_KERNEL);
+ if (!payload_kbuf) {
+ rc = -ENOMEM;
+ goto out;
+ }
+
+ /* Copy the sg_list passed in to a linear buffer: holds the cmnd data */
+ sg_copy_to_buffer(job->request_payload.sg_list,
+ job->request_payload.sg_cnt, payload_kbuf,
+ job->request_payload.payload_len);
+
+ /* Invoke IOCMD handler - to handle all the vendor command requests */
+ rc = bfad_iocmd_handler(bfad, vendor_cmd, payload_kbuf,
+ job->request_payload.payload_len);
+ if (rc != BFA_STATUS_OK)
+ goto error;
+
+ /* Copy the response data to the job->reply_payload sg_list */
+ sg_copy_from_buffer(job->reply_payload.sg_list,
+ job->reply_payload.sg_cnt,
+ payload_kbuf,
+ job->reply_payload.payload_len);
+
+ /* free the command buffer */
+ kfree(payload_kbuf);
+
+ /* Fill the BSG job reply data */
+ job->reply_len = job->reply_payload.payload_len;
+ job->reply->reply_payload_rcv_len = job->reply_payload.payload_len;
+ job->reply->result = rc;
+
+ job->job_done(job);
+ return rc;
+error:
+ /* free the command buffer */
+ kfree(payload_kbuf);
+out:
+ job->reply->result = rc;
+ job->reply_len = sizeof(uint32_t);
+ job->reply->reply_payload_rcv_len = 0;
+ return rc;
+}
+
+/* FC passthru call backs */
+u64
+bfad_fcxp_get_req_sgaddr_cb(void *bfad_fcxp, int sgeid)
+{
+ struct bfad_fcxp *drv_fcxp = bfad_fcxp;
+ struct bfa_sge_s *sge;
+ u64 addr;
+
+ sge = drv_fcxp->req_sge + sgeid;
+ addr = (u64)(size_t) sge->sg_addr;
+ return addr;
+}
+
+u32
+bfad_fcxp_get_req_sglen_cb(void *bfad_fcxp, int sgeid)
+{
+ struct bfad_fcxp *drv_fcxp = bfad_fcxp;
+ struct bfa_sge_s *sge;
+
+ sge = drv_fcxp->req_sge + sgeid;
+ return sge->sg_len;
+}
+
+u64
+bfad_fcxp_get_rsp_sgaddr_cb(void *bfad_fcxp, int sgeid)
+{
+ struct bfad_fcxp *drv_fcxp = bfad_fcxp;
+ struct bfa_sge_s *sge;
+ u64 addr;
+
+ sge = drv_fcxp->rsp_sge + sgeid;
+ addr = (u64)(size_t) sge->sg_addr;
+ return addr;
+}
+
+u32
+bfad_fcxp_get_rsp_sglen_cb(void *bfad_fcxp, int sgeid)
+{
+ struct bfad_fcxp *drv_fcxp = bfad_fcxp;
+ struct bfa_sge_s *sge;
+
+ sge = drv_fcxp->rsp_sge + sgeid;
+ return sge->sg_len;
+}
+
+void
+bfad_send_fcpt_cb(void *bfad_fcxp, struct bfa_fcxp_s *fcxp, void *cbarg,
+ bfa_status_t req_status, u32 rsp_len, u32 resid_len,
+ struct fchs_s *rsp_fchs)
+{
+ struct bfad_fcxp *drv_fcxp = bfad_fcxp;
+
+ drv_fcxp->req_status = req_status;
+ drv_fcxp->rsp_len = rsp_len;
+
+ /* bfa_fcxp will be automatically freed by BFA */
+ drv_fcxp->bfa_fcxp = NULL;
+ complete(&drv_fcxp->comp);
+}
+
+struct bfad_buf_info *
+bfad_fcxp_map_sg(struct bfad_s *bfad, void *payload_kbuf,
+ uint32_t payload_len, uint32_t *num_sgles)
+{
+ struct bfad_buf_info *buf_base, *buf_info;
+ struct bfa_sge_s *sg_table;
+ int sge_num = 1;
+
+ buf_base = kzalloc((sizeof(struct bfad_buf_info) +
+ sizeof(struct bfa_sge_s)) * sge_num, GFP_KERNEL);
+ if (!buf_base)
+ return NULL;
+
+ sg_table = (struct bfa_sge_s *) (((uint8_t *)buf_base) +
+ (sizeof(struct bfad_buf_info) * sge_num));
+
+ /* Allocate dma coherent memory */
+ buf_info = buf_base;
+ buf_info->size = payload_len;
+ buf_info->virt = dma_zalloc_coherent(&bfad->pcidev->dev,
+ buf_info->size, &buf_info->phys,
+ GFP_KERNEL);
+ if (!buf_info->virt)
+ goto out_free_mem;
+
+ /* copy the linear bsg buffer to buf_info */
+ memcpy(buf_info->virt, payload_kbuf, buf_info->size);
+
+ /*
+ * Setup SG table
+ */
+ sg_table->sg_len = buf_info->size;
+ sg_table->sg_addr = (void *)(size_t) buf_info->phys;
+
+ *num_sgles = sge_num;
+
+ return buf_base;
+
+out_free_mem:
+ kfree(buf_base);
+ return NULL;
+}
+
+void
+bfad_fcxp_free_mem(struct bfad_s *bfad, struct bfad_buf_info *buf_base,
+ uint32_t num_sgles)
+{
+ int i;
+ struct bfad_buf_info *buf_info = buf_base;
+
+ if (buf_base) {
+ for (i = 0; i < num_sgles; buf_info++, i++) {
+ if (buf_info->virt != NULL)
+ dma_free_coherent(&bfad->pcidev->dev,
+ buf_info->size, buf_info->virt,
+ buf_info->phys);
+ }
+ kfree(buf_base);
+ }
+}
+
+int
+bfad_fcxp_bsg_send(struct fc_bsg_job *job, struct bfad_fcxp *drv_fcxp,
+ bfa_bsg_fcpt_t *bsg_fcpt)
+{
+ struct bfa_fcxp_s *hal_fcxp;
+ struct bfad_s *bfad = drv_fcxp->port->bfad;
+ unsigned long flags;
+ uint8_t lp_tag;
+
+ spin_lock_irqsave(&bfad->bfad_lock, flags);
+
+ /* Allocate bfa_fcxp structure */
+ hal_fcxp = bfa_fcxp_req_rsp_alloc(drv_fcxp, &bfad->bfa,
+ drv_fcxp->num_req_sgles,
+ drv_fcxp->num_rsp_sgles,
+ bfad_fcxp_get_req_sgaddr_cb,
+ bfad_fcxp_get_req_sglen_cb,
+ bfad_fcxp_get_rsp_sgaddr_cb,
+ bfad_fcxp_get_rsp_sglen_cb, BFA_TRUE);
+ if (!hal_fcxp) {
+ bfa_trc(bfad, 0);
+ spin_unlock_irqrestore(&bfad->bfad_lock, flags);
+ return BFA_STATUS_ENOMEM;
+ }
+
+ drv_fcxp->bfa_fcxp = hal_fcxp;
+
+ lp_tag = bfa_lps_get_tag_from_pid(&bfad->bfa, bsg_fcpt->fchs.s_id);
+
+ bfa_fcxp_send(hal_fcxp, drv_fcxp->bfa_rport, bsg_fcpt->vf_id, lp_tag,
+ bsg_fcpt->cts, bsg_fcpt->cos,
+ job->request_payload.payload_len,
+ &bsg_fcpt->fchs, bfad_send_fcpt_cb, bfad,
+ job->reply_payload.payload_len, bsg_fcpt->tsecs);
+
+ spin_unlock_irqrestore(&bfad->bfad_lock, flags);
+
+ return BFA_STATUS_OK;
+}
+
+int
+bfad_im_bsg_els_ct_request(struct fc_bsg_job *job)
+{
+ struct bfa_bsg_data *bsg_data;
+ struct bfad_im_port_s *im_port =
+ (struct bfad_im_port_s *) job->shost->hostdata[0];
+ struct bfad_s *bfad = im_port->bfad;
+ bfa_bsg_fcpt_t *bsg_fcpt;
+ struct bfad_fcxp *drv_fcxp;
+ struct bfa_fcs_lport_s *fcs_port;
+ struct bfa_fcs_rport_s *fcs_rport;
+ uint32_t command_type = job->request->msgcode;
+ unsigned long flags;
+ struct bfad_buf_info *rsp_buf_info;
+ void *req_kbuf = NULL, *rsp_kbuf = NULL;
+ int rc = -EINVAL;
+
+ job->reply_len = sizeof(uint32_t); /* Atleast uint32_t reply_len */
+ job->reply->reply_payload_rcv_len = 0;
+
+ /* Get the payload passed in from userspace */
+ bsg_data = (struct bfa_bsg_data *) (((char *)job->request) +
+ sizeof(struct fc_bsg_request));
+ if (bsg_data == NULL)
+ goto out;
+
+ /*
+ * Allocate buffer for bsg_fcpt and do a copy_from_user op for payload
+ * buffer of size bsg_data->payload_len
+ */
+ bsg_fcpt = kzalloc(bsg_data->payload_len, GFP_KERNEL);
+ if (!bsg_fcpt) {
+ rc = -ENOMEM;
+ goto out;
+ }
+
+ if (copy_from_user((uint8_t *)bsg_fcpt,
+ (void *)(unsigned long)bsg_data->payload,
+ bsg_data->payload_len)) {
+ kfree(bsg_fcpt);
+ rc = -EIO;
+ goto out;
+ }
+
+ drv_fcxp = kzalloc(sizeof(struct bfad_fcxp), GFP_KERNEL);
+ if (drv_fcxp == NULL) {
+ kfree(bsg_fcpt);
+ rc = -ENOMEM;
+ goto out;
+ }
+
+ spin_lock_irqsave(&bfad->bfad_lock, flags);
+ fcs_port = bfa_fcs_lookup_port(&bfad->bfa_fcs, bsg_fcpt->vf_id,
+ bsg_fcpt->lpwwn);
+ if (fcs_port == NULL) {
+ bsg_fcpt->status = BFA_STATUS_UNKNOWN_LWWN;
+ spin_unlock_irqrestore(&bfad->bfad_lock, flags);
+ goto out_free_mem;
+ }
+
+ /* Check if the port is online before sending FC Passthru cmd */
+ if (!bfa_fcs_lport_is_online(fcs_port)) {
+ bsg_fcpt->status = BFA_STATUS_PORT_OFFLINE;
+ spin_unlock_irqrestore(&bfad->bfad_lock, flags);
+ goto out_free_mem;
+ }
+
+ drv_fcxp->port = fcs_port->bfad_port;
+
+ if (drv_fcxp->port->bfad == 0)
+ drv_fcxp->port->bfad = bfad;
+
+ /* Fetch the bfa_rport - if nexus needed */
+ if (command_type == FC_BSG_HST_ELS_NOLOGIN ||
+ command_type == FC_BSG_HST_CT) {
+ /* BSG HST commands: no nexus needed */
+ drv_fcxp->bfa_rport = NULL;
+
+ } else if (command_type == FC_BSG_RPT_ELS ||
+ command_type == FC_BSG_RPT_CT) {
+ /* BSG RPT commands: nexus needed */
+ fcs_rport = bfa_fcs_lport_get_rport_by_pwwn(fcs_port,
+ bsg_fcpt->dpwwn);
+ if (fcs_rport == NULL) {
+ bsg_fcpt->status = BFA_STATUS_UNKNOWN_RWWN;
+ spin_unlock_irqrestore(&bfad->bfad_lock, flags);
+ goto out_free_mem;
+ }
+
+ drv_fcxp->bfa_rport = fcs_rport->bfa_rport;
+
+ } else { /* Unknown BSG msgcode; return -EINVAL */
+ spin_unlock_irqrestore(&bfad->bfad_lock, flags);
+ goto out_free_mem;
+ }
+
+ spin_unlock_irqrestore(&bfad->bfad_lock, flags);
+
+ /* allocate memory for req / rsp buffers */
+ req_kbuf = kzalloc(job->request_payload.payload_len, GFP_KERNEL);
+ if (!req_kbuf) {
+ printk(KERN_INFO "bfa %s: fcpt request buffer alloc failed\n",
+ bfad->pci_name);
+ rc = -ENOMEM;
+ goto out_free_mem;
+ }
+
+ rsp_kbuf = kzalloc(job->reply_payload.payload_len, GFP_KERNEL);
+ if (!rsp_kbuf) {
+ printk(KERN_INFO "bfa %s: fcpt response buffer alloc failed\n",
+ bfad->pci_name);
+ rc = -ENOMEM;
+ goto out_free_mem;
+ }
+
+ /* map req sg - copy the sg_list passed in to the linear buffer */
+ sg_copy_to_buffer(job->request_payload.sg_list,
+ job->request_payload.sg_cnt, req_kbuf,
+ job->request_payload.payload_len);
+
+ drv_fcxp->reqbuf_info = bfad_fcxp_map_sg(bfad, req_kbuf,
+ job->request_payload.payload_len,
+ &drv_fcxp->num_req_sgles);
+ if (!drv_fcxp->reqbuf_info) {
+ printk(KERN_INFO "bfa %s: fcpt request fcxp_map_sg failed\n",
+ bfad->pci_name);
+ rc = -ENOMEM;
+ goto out_free_mem;
+ }
+
+ drv_fcxp->req_sge = (struct bfa_sge_s *)
+ (((uint8_t *)drv_fcxp->reqbuf_info) +
+ (sizeof(struct bfad_buf_info) *
+ drv_fcxp->num_req_sgles));
+
+ /* map rsp sg */
+ drv_fcxp->rspbuf_info = bfad_fcxp_map_sg(bfad, rsp_kbuf,
+ job->reply_payload.payload_len,
+ &drv_fcxp->num_rsp_sgles);
+ if (!drv_fcxp->rspbuf_info) {
+ printk(KERN_INFO "bfa %s: fcpt response fcxp_map_sg failed\n",
+ bfad->pci_name);
+ rc = -ENOMEM;
+ goto out_free_mem;
+ }
+
+ rsp_buf_info = (struct bfad_buf_info *)drv_fcxp->rspbuf_info;
+ drv_fcxp->rsp_sge = (struct bfa_sge_s *)
+ (((uint8_t *)drv_fcxp->rspbuf_info) +
+ (sizeof(struct bfad_buf_info) *
+ drv_fcxp->num_rsp_sgles));
+
+ /* fcxp send */
+ init_completion(&drv_fcxp->comp);
+ rc = bfad_fcxp_bsg_send(job, drv_fcxp, bsg_fcpt);
+ if (rc == BFA_STATUS_OK) {
+ wait_for_completion(&drv_fcxp->comp);
+ bsg_fcpt->status = drv_fcxp->req_status;
+ } else {
+ bsg_fcpt->status = rc;
+ goto out_free_mem;
+ }
+
+ /* fill the job->reply data */
+ if (drv_fcxp->req_status == BFA_STATUS_OK) {
+ job->reply_len = drv_fcxp->rsp_len;
+ job->reply->reply_payload_rcv_len = drv_fcxp->rsp_len;
+ job->reply->reply_data.ctels_reply.status = FC_CTELS_STATUS_OK;
+ } else {
+ job->reply->reply_payload_rcv_len =
+ sizeof(struct fc_bsg_ctels_reply);
+ job->reply_len = sizeof(uint32_t);
+ job->reply->reply_data.ctels_reply.status =
+ FC_CTELS_STATUS_REJECT;
+ }
+
+ /* Copy the response data to the reply_payload sg list */
+ sg_copy_from_buffer(job->reply_payload.sg_list,
+ job->reply_payload.sg_cnt,
+ (uint8_t *)rsp_buf_info->virt,
+ job->reply_payload.payload_len);
+
+out_free_mem:
+ bfad_fcxp_free_mem(bfad, drv_fcxp->rspbuf_info,
+ drv_fcxp->num_rsp_sgles);
+ bfad_fcxp_free_mem(bfad, drv_fcxp->reqbuf_info,
+ drv_fcxp->num_req_sgles);
+ kfree(req_kbuf);
+ kfree(rsp_kbuf);
+
+ /* Need a copy to user op */
+ if (copy_to_user((void *)(unsigned long)bsg_data->payload,
+ (void *)bsg_fcpt, bsg_data->payload_len))
+ rc = -EIO;
+
+ kfree(bsg_fcpt);
+ kfree(drv_fcxp);
+out:
+ job->reply->result = rc;
+
+ if (rc == BFA_STATUS_OK)
+ job->job_done(job);
+
+ return rc;
+}
+
+int
+bfad_im_bsg_request(struct fc_bsg_job *job)
+{
+ uint32_t rc = BFA_STATUS_OK;
+
+ switch (job->request->msgcode) {
+ case FC_BSG_HST_VENDOR:
+ /* Process BSG HST Vendor requests */
+ rc = bfad_im_bsg_vendor_request(job);
+ break;
+ case FC_BSG_HST_ELS_NOLOGIN:
+ case FC_BSG_RPT_ELS:
+ case FC_BSG_HST_CT:
+ case FC_BSG_RPT_CT:
+ /* Process BSG ELS/CT commands */
+ rc = bfad_im_bsg_els_ct_request(job);
+ break;
+ default:
+ job->reply->result = rc = -EINVAL;
+ job->reply->reply_payload_rcv_len = 0;
+ break;
+ }
+
+ return rc;
+}
+
+int
+bfad_im_bsg_timeout(struct fc_bsg_job *job)
+{
+ /* Don't complete the BSG job request - return -EAGAIN
+ * to reset bsg job timeout : for ELS/CT pass thru we
+ * already have timer to track the request.
+ */
+ return -EAGAIN;
+}
diff --git a/drivers/scsi/bfa/bfad_bsg.h b/drivers/scsi/bfa/bfad_bsg.h
new file mode 100644
index 000000000..90abef691
--- /dev/null
+++ b/drivers/scsi/bfa/bfad_bsg.h
@@ -0,0 +1,836 @@
+/*
+ * Copyright (c) 2005-2010 Brocade Communications Systems, Inc.
+ * All rights reserved
+ * www.brocade.com
+ *
+ * Linux driver for Brocade Fibre Channel Host Bus Adapter.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License (GPL) Version 2 as
+ * published by the Free Software Foundation
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ */
+#ifndef BFAD_BSG_H
+#define BFAD_BSG_H
+
+#include "bfa_defs.h"
+#include "bfa_defs_fcs.h"
+
+/* Definitions of vendor unique structures and command codes passed in
+ * using FC_BSG_HST_VENDOR message code.
+ */
+enum {
+ IOCMD_IOC_ENABLE = 0x1,
+ IOCMD_IOC_DISABLE,
+ IOCMD_IOC_GET_ATTR,
+ IOCMD_IOC_GET_INFO,
+ IOCMD_IOC_GET_STATS,
+ IOCMD_IOC_GET_FWSTATS,
+ IOCMD_IOC_RESET_STATS,
+ IOCMD_IOC_RESET_FWSTATS,
+ IOCMD_IOC_SET_ADAPTER_NAME,
+ IOCMD_IOC_SET_PORT_NAME,
+ IOCMD_IOC_FW_SIG_INV,
+ IOCMD_IOCFC_GET_ATTR,
+ IOCMD_IOCFC_SET_INTR,
+ IOCMD_PORT_ENABLE,
+ IOCMD_PORT_DISABLE,
+ IOCMD_PORT_GET_ATTR,
+ IOCMD_PORT_GET_STATS,
+ IOCMD_PORT_RESET_STATS,
+ IOCMD_PORT_CFG_TOPO,
+ IOCMD_PORT_CFG_SPEED,
+ IOCMD_PORT_CFG_ALPA,
+ IOCMD_PORT_CFG_MAXFRSZ,
+ IOCMD_PORT_CLR_ALPA,
+ IOCMD_PORT_BBCR_ENABLE,
+ IOCMD_PORT_BBCR_DISABLE,
+ IOCMD_PORT_BBCR_GET_ATTR,
+ IOCMD_LPORT_GET_ATTR,
+ IOCMD_LPORT_GET_RPORTS,
+ IOCMD_LPORT_GET_STATS,
+ IOCMD_LPORT_RESET_STATS,
+ IOCMD_LPORT_GET_IOSTATS,
+ IOCMD_RPORT_GET_ATTR,
+ IOCMD_RPORT_GET_ADDR,
+ IOCMD_RPORT_GET_STATS,
+ IOCMD_RPORT_RESET_STATS,
+ IOCMD_RPORT_SET_SPEED,
+ IOCMD_VPORT_GET_ATTR,
+ IOCMD_VPORT_GET_STATS,
+ IOCMD_VPORT_RESET_STATS,
+ IOCMD_FABRIC_GET_LPORTS,
+ IOCMD_RATELIM_ENABLE,
+ IOCMD_RATELIM_DISABLE,
+ IOCMD_RATELIM_DEF_SPEED,
+ IOCMD_FCPIM_FAILOVER,
+ IOCMD_FCPIM_MODSTATS,
+ IOCMD_FCPIM_MODSTATSCLR,
+ IOCMD_FCPIM_DEL_ITN_STATS,
+ IOCMD_ITNIM_GET_ATTR,
+ IOCMD_ITNIM_GET_IOSTATS,
+ IOCMD_ITNIM_RESET_STATS,
+ IOCMD_ITNIM_GET_ITNSTATS,
+ IOCMD_IOC_PCIFN_CFG,
+ IOCMD_FCPORT_ENABLE,
+ IOCMD_FCPORT_DISABLE,
+ IOCMD_PCIFN_CREATE,
+ IOCMD_PCIFN_DELETE,
+ IOCMD_PCIFN_BW,
+ IOCMD_ADAPTER_CFG_MODE,
+ IOCMD_PORT_CFG_MODE,
+ IOCMD_FLASH_ENABLE_OPTROM,
+ IOCMD_FLASH_DISABLE_OPTROM,
+ IOCMD_FAA_QUERY,
+ IOCMD_CEE_GET_ATTR,
+ IOCMD_CEE_GET_STATS,
+ IOCMD_CEE_RESET_STATS,
+ IOCMD_SFP_MEDIA,
+ IOCMD_SFP_SPEED,
+ IOCMD_FLASH_GET_ATTR,
+ IOCMD_FLASH_ERASE_PART,
+ IOCMD_FLASH_UPDATE_PART,
+ IOCMD_FLASH_READ_PART,
+ IOCMD_DIAG_TEMP,
+ IOCMD_DIAG_MEMTEST,
+ IOCMD_DIAG_LOOPBACK,
+ IOCMD_DIAG_FWPING,
+ IOCMD_DIAG_QUEUETEST,
+ IOCMD_DIAG_SFP,
+ IOCMD_DIAG_LED,
+ IOCMD_DIAG_BEACON_LPORT,
+ IOCMD_DIAG_LB_STAT,
+ IOCMD_PHY_GET_ATTR,
+ IOCMD_PHY_GET_STATS,
+ IOCMD_PHY_UPDATE_FW,
+ IOCMD_PHY_READ_FW,
+ IOCMD_VHBA_QUERY,
+ IOCMD_DEBUG_PORTLOG,
+ IOCMD_DEBUG_FW_CORE,
+ IOCMD_DEBUG_FW_STATE_CLR,
+ IOCMD_DEBUG_PORTLOG_CLR,
+ IOCMD_DEBUG_START_DTRC,
+ IOCMD_DEBUG_STOP_DTRC,
+ IOCMD_DEBUG_PORTLOG_CTL,
+ IOCMD_FCPIM_PROFILE_ON,
+ IOCMD_FCPIM_PROFILE_OFF,
+ IOCMD_ITNIM_GET_IOPROFILE,
+ IOCMD_FCPORT_GET_STATS,
+ IOCMD_FCPORT_RESET_STATS,
+ IOCMD_BOOT_CFG,
+ IOCMD_BOOT_QUERY,
+ IOCMD_PREBOOT_QUERY,
+ IOCMD_ETHBOOT_CFG,
+ IOCMD_ETHBOOT_QUERY,
+ IOCMD_TRUNK_ENABLE,
+ IOCMD_TRUNK_DISABLE,
+ IOCMD_TRUNK_GET_ATTR,
+ IOCMD_QOS_ENABLE,
+ IOCMD_QOS_DISABLE,
+ IOCMD_QOS_GET_ATTR,
+ IOCMD_QOS_GET_VC_ATTR,
+ IOCMD_QOS_GET_STATS,
+ IOCMD_QOS_RESET_STATS,
+ IOCMD_VF_GET_STATS,
+ IOCMD_VF_RESET_STATS,
+ IOCMD_FCPIM_LUNMASK_ENABLE,
+ IOCMD_FCPIM_LUNMASK_DISABLE,
+ IOCMD_FCPIM_LUNMASK_CLEAR,
+ IOCMD_FCPIM_LUNMASK_QUERY,
+ IOCMD_FCPIM_LUNMASK_ADD,
+ IOCMD_FCPIM_LUNMASK_DELETE,
+ IOCMD_DIAG_DPORT_ENABLE,
+ IOCMD_DIAG_DPORT_DISABLE,
+ IOCMD_QOS_SET_BW,
+ IOCMD_FCPIM_THROTTLE_QUERY,
+ IOCMD_FCPIM_THROTTLE_SET,
+ IOCMD_TFRU_READ,
+ IOCMD_TFRU_WRITE,
+ IOCMD_FRUVPD_READ,
+ IOCMD_FRUVPD_UPDATE,
+ IOCMD_FRUVPD_GET_MAX_SIZE,
+ IOCMD_DIAG_DPORT_SHOW,
+ IOCMD_DIAG_DPORT_START,
+};
+
+struct bfa_bsg_gen_s {
+ bfa_status_t status;
+ u16 bfad_num;
+ u16 rsvd;
+};
+
+struct bfa_bsg_portlogctl_s {
+ bfa_status_t status;
+ u16 bfad_num;
+ u16 rsvd;
+ bfa_boolean_t ctl;
+ int inst_no;
+};
+
+struct bfa_bsg_fcpim_profile_s {
+ bfa_status_t status;
+ u16 bfad_num;
+ u16 rsvd;
+};
+
+struct bfa_bsg_itnim_ioprofile_s {
+ bfa_status_t status;
+ u16 bfad_num;
+ u16 vf_id;
+ wwn_t lpwwn;
+ wwn_t rpwwn;
+ struct bfa_itnim_ioprofile_s ioprofile;
+};
+
+struct bfa_bsg_fcport_stats_s {
+ bfa_status_t status;
+ u16 bfad_num;
+ u16 rsvd;
+ union bfa_fcport_stats_u stats;
+};
+
+struct bfa_bsg_ioc_name_s {
+ bfa_status_t status;
+ u16 bfad_num;
+ u16 rsvd;
+ char name[BFA_ADAPTER_SYM_NAME_LEN];
+};
+
+struct bfa_bsg_ioc_info_s {
+ bfa_status_t status;
+ u16 bfad_num;
+ u16 rsvd;
+ char serialnum[64];
+ char hwpath[BFA_STRING_32];
+ char adapter_hwpath[BFA_STRING_32];
+ char guid[BFA_ADAPTER_SYM_NAME_LEN*2];
+ char name[BFA_ADAPTER_SYM_NAME_LEN];
+ char port_name[BFA_ADAPTER_SYM_NAME_LEN];
+ char eth_name[BFA_ADAPTER_SYM_NAME_LEN];
+ wwn_t pwwn;
+ wwn_t nwwn;
+ wwn_t factorypwwn;
+ wwn_t factorynwwn;
+ mac_t mac;
+ mac_t factory_mac; /* Factory mac address */
+ mac_t current_mac; /* Currently assigned mac address */
+ enum bfa_ioc_type_e ioc_type;
+ u16 pvid; /* Port vlan id */
+ u16 rsvd1;
+ u32 host;
+ u32 bandwidth; /* For PF support */
+ u32 rsvd2;
+};
+
+struct bfa_bsg_ioc_attr_s {
+ bfa_status_t status;
+ u16 bfad_num;
+ u16 rsvd;
+ struct bfa_ioc_attr_s ioc_attr;
+};
+
+struct bfa_bsg_ioc_stats_s {
+ bfa_status_t status;
+ u16 bfad_num;
+ u16 rsvd;
+ struct bfa_ioc_stats_s ioc_stats;
+};
+
+struct bfa_bsg_ioc_fwstats_s {
+ bfa_status_t status;
+ u16 bfad_num;
+ u16 rsvd;
+ u32 buf_size;
+ u32 rsvd1;
+ u64 buf_ptr;
+};
+
+struct bfa_bsg_iocfc_attr_s {
+ bfa_status_t status;
+ u16 bfad_num;
+ u16 rsvd;
+ struct bfa_iocfc_attr_s iocfc_attr;
+};
+
+struct bfa_bsg_iocfc_intr_s {
+ bfa_status_t status;
+ u16 bfad_num;
+ u16 rsvd;
+ struct bfa_iocfc_intr_attr_s attr;
+};
+
+struct bfa_bsg_port_attr_s {
+ bfa_status_t status;
+ u16 bfad_num;
+ u16 rsvd;
+ struct bfa_port_attr_s attr;
+};
+
+struct bfa_bsg_port_cfg_s {
+ bfa_status_t status;
+ u16 bfad_num;
+ u16 rsvd;
+ u32 param;
+ u32 rsvd1;
+};
+
+struct bfa_bsg_port_cfg_maxfrsize_s {
+ bfa_status_t status;
+ u16 bfad_num;
+ u16 maxfrsize;
+};
+
+struct bfa_bsg_port_stats_s {
+ bfa_status_t status;
+ u16 bfad_num;
+ u16 rsvd;
+ u32 buf_size;
+ u32 rsvd1;
+ u64 buf_ptr;
+};
+
+struct bfa_bsg_lport_attr_s {
+ bfa_status_t status;
+ u16 bfad_num;
+ u16 vf_id;
+ wwn_t pwwn;
+ struct bfa_lport_attr_s port_attr;
+};
+
+struct bfa_bsg_lport_stats_s {
+ bfa_status_t status;
+ u16 bfad_num;
+ u16 vf_id;
+ wwn_t pwwn;
+ struct bfa_lport_stats_s port_stats;
+};
+
+struct bfa_bsg_lport_iostats_s {
+ bfa_status_t status;
+ u16 bfad_num;
+ u16 vf_id;
+ wwn_t pwwn;
+ struct bfa_itnim_iostats_s iostats;
+};
+
+struct bfa_bsg_lport_get_rports_s {
+ bfa_status_t status;
+ u16 bfad_num;
+ u16 vf_id;
+ wwn_t pwwn;
+ u64 rbuf_ptr;
+ u32 nrports;
+ u32 rsvd;
+};
+
+struct bfa_bsg_rport_attr_s {
+ bfa_status_t status;
+ u16 bfad_num;
+ u16 vf_id;
+ wwn_t pwwn;
+ wwn_t rpwwn;
+ u32 pid;
+ u32 rsvd;
+ struct bfa_rport_attr_s attr;
+};
+
+struct bfa_bsg_rport_stats_s {
+ bfa_status_t status;
+ u16 bfad_num;
+ u16 vf_id;
+ wwn_t pwwn;
+ wwn_t rpwwn;
+ struct bfa_rport_stats_s stats;
+};
+
+struct bfa_bsg_rport_scsi_addr_s {
+ bfa_status_t status;
+ u16 bfad_num;
+ u16 vf_id;
+ wwn_t pwwn;
+ wwn_t rpwwn;
+ u32 host;
+ u32 bus;
+ u32 target;
+ u32 lun;
+};
+
+struct bfa_bsg_rport_reset_stats_s {
+ bfa_status_t status;
+ u16 bfad_num;
+ u16 vf_id;
+ wwn_t pwwn;
+ wwn_t rpwwn;
+};
+
+struct bfa_bsg_rport_set_speed_s {
+ bfa_status_t status;
+ u16 bfad_num;
+ u16 vf_id;
+ enum bfa_port_speed speed;
+ u32 rsvd;
+ wwn_t pwwn;
+ wwn_t rpwwn;
+};
+
+struct bfa_bsg_vport_attr_s {
+ bfa_status_t status;
+ u16 bfad_num;
+ u16 vf_id;
+ wwn_t vpwwn;
+ struct bfa_vport_attr_s vport_attr;
+};
+
+struct bfa_bsg_vport_stats_s {
+ bfa_status_t status;
+ u16 bfad_num;
+ u16 vf_id;
+ wwn_t vpwwn;
+ struct bfa_vport_stats_s vport_stats;
+};
+
+struct bfa_bsg_reset_stats_s {
+ bfa_status_t status;
+ u16 bfad_num;
+ u16 vf_id;
+ wwn_t vpwwn;
+};
+
+struct bfa_bsg_fabric_get_lports_s {
+ bfa_status_t status;
+ u16 bfad_num;
+ u16 vf_id;
+ u64 buf_ptr;
+ u32 nports;
+ u32 rsvd;
+};
+
+struct bfa_bsg_trl_speed_s {
+ bfa_status_t status;
+ u16 bfad_num;
+ u16 rsvd;
+ enum bfa_port_speed speed;
+};
+
+struct bfa_bsg_fcpim_s {
+ bfa_status_t status;
+ u16 bfad_num;
+ u16 param;
+};
+
+struct bfa_bsg_fcpim_modstats_s {
+ bfa_status_t status;
+ u16 bfad_num;
+ struct bfa_itnim_iostats_s modstats;
+};
+
+struct bfa_bsg_fcpim_del_itn_stats_s {
+ bfa_status_t status;
+ u16 bfad_num;
+ struct bfa_fcpim_del_itn_stats_s modstats;
+};
+
+struct bfa_bsg_fcpim_modstatsclr_s {
+ bfa_status_t status;
+ u16 bfad_num;
+};
+
+struct bfa_bsg_itnim_attr_s {
+ bfa_status_t status;
+ u16 bfad_num;
+ u16 vf_id;
+ wwn_t lpwwn;
+ wwn_t rpwwn;
+ struct bfa_itnim_attr_s attr;
+};
+
+struct bfa_bsg_itnim_iostats_s {
+ bfa_status_t status;
+ u16 bfad_num;
+ u16 vf_id;
+ wwn_t lpwwn;
+ wwn_t rpwwn;
+ struct bfa_itnim_iostats_s iostats;
+};
+
+struct bfa_bsg_itnim_itnstats_s {
+ bfa_status_t status;
+ u16 bfad_num;
+ u16 vf_id;
+ wwn_t lpwwn;
+ wwn_t rpwwn;
+ struct bfa_itnim_stats_s itnstats;
+};
+
+struct bfa_bsg_pcifn_cfg_s {
+ bfa_status_t status;
+ u16 bfad_num;
+ u16 rsvd;
+ struct bfa_ablk_cfg_s pcifn_cfg;
+};
+
+struct bfa_bsg_pcifn_s {
+ bfa_status_t status;
+ u16 bfad_num;
+ u16 pcifn_id;
+ u16 bw_min;
+ u16 bw_max;
+ u8 port;
+ enum bfi_pcifn_class pcifn_class;
+ u8 rsvd[1];
+};
+
+struct bfa_bsg_adapter_cfg_mode_s {
+ bfa_status_t status;
+ u16 bfad_num;
+ u16 rsvd;
+ struct bfa_adapter_cfg_mode_s cfg;
+};
+
+struct bfa_bsg_port_cfg_mode_s {
+ bfa_status_t status;
+ u16 bfad_num;
+ u16 instance;
+ struct bfa_port_cfg_mode_s cfg;
+};
+
+struct bfa_bsg_bbcr_enable_s {
+ bfa_status_t status;
+ u16 bfad_num;
+ u8 bb_scn;
+ u8 rsvd;
+};
+
+struct bfa_bsg_bbcr_attr_s {
+ bfa_status_t status;
+ u16 bfad_num;
+ u16 rsvd;
+ struct bfa_bbcr_attr_s attr;
+};
+
+struct bfa_bsg_faa_attr_s {
+ bfa_status_t status;
+ u16 bfad_num;
+ u16 rsvd;
+ struct bfa_faa_attr_s faa_attr;
+};
+
+struct bfa_bsg_cee_attr_s {
+ bfa_status_t status;
+ u16 bfad_num;
+ u16 rsvd;
+ u32 buf_size;
+ u32 rsvd1;
+ u64 buf_ptr;
+};
+
+struct bfa_bsg_cee_stats_s {
+ bfa_status_t status;
+ u16 bfad_num;
+ u16 rsvd;
+ u32 buf_size;
+ u32 rsvd1;
+ u64 buf_ptr;
+};
+
+struct bfa_bsg_sfp_media_s {
+ bfa_status_t status;
+ u16 bfad_num;
+ u16 rsvd;
+ enum bfa_defs_sfp_media_e media;
+};
+
+struct bfa_bsg_sfp_speed_s {
+ bfa_status_t status;
+ u16 bfad_num;
+ u16 rsvd;
+ enum bfa_port_speed speed;
+};
+
+struct bfa_bsg_flash_attr_s {
+ bfa_status_t status;
+ u16 bfad_num;
+ u16 rsvd;
+ struct bfa_flash_attr_s attr;
+};
+
+struct bfa_bsg_flash_s {
+ bfa_status_t status;
+ u16 bfad_num;
+ u8 instance;
+ u8 rsvd;
+ enum bfa_flash_part_type type;
+ int bufsz;
+ u64 buf_ptr;
+};
+
+struct bfa_bsg_diag_get_temp_s {
+ bfa_status_t status;
+ u16 bfad_num;
+ u16 rsvd;
+ struct bfa_diag_results_tempsensor_s result;
+};
+
+struct bfa_bsg_diag_memtest_s {
+ bfa_status_t status;
+ u16 bfad_num;
+ u16 rsvd[3];
+ u32 pat;
+ struct bfa_diag_memtest_result result;
+ struct bfa_diag_memtest_s memtest;
+};
+
+struct bfa_bsg_diag_loopback_s {
+ bfa_status_t status;
+ u16 bfad_num;
+ u16 rsvd;
+ enum bfa_port_opmode opmode;
+ enum bfa_port_speed speed;
+ u32 lpcnt;
+ u32 pat;
+ struct bfa_diag_loopback_result_s result;
+};
+
+struct bfa_bsg_diag_dport_show_s {
+ bfa_status_t status;
+ u16 bfad_num;
+ u16 rsvd;
+ struct bfa_diag_dport_result_s result;
+};
+
+struct bfa_bsg_dport_enable_s {
+ bfa_status_t status;
+ u16 bfad_num;
+ u16 rsvd;
+ u16 lpcnt;
+ u16 pat;
+};
+
+struct bfa_bsg_diag_fwping_s {
+ bfa_status_t status;
+ u16 bfad_num;
+ u16 rsvd;
+ u32 cnt;
+ u32 pattern;
+ struct bfa_diag_results_fwping result;
+};
+
+struct bfa_bsg_diag_qtest_s {
+ bfa_status_t status;
+ u16 bfad_num;
+ u16 rsvd;
+ u32 force;
+ u32 queue;
+ struct bfa_diag_qtest_result_s result;
+};
+
+struct bfa_bsg_sfp_show_s {
+ bfa_status_t status;
+ u16 bfad_num;
+ u16 rsvd;
+ struct sfp_mem_s sfp;
+};
+
+struct bfa_bsg_diag_led_s {
+ bfa_status_t status;
+ u16 bfad_num;
+ u16 rsvd;
+ struct bfa_diag_ledtest_s ledtest;
+};
+
+struct bfa_bsg_diag_beacon_s {
+ bfa_status_t status;
+ u16 bfad_num;
+ u16 rsvd;
+ bfa_boolean_t beacon;
+ bfa_boolean_t link_e2e_beacon;
+ u32 second;
+};
+
+struct bfa_bsg_diag_lb_stat_s {
+ bfa_status_t status;
+ u16 bfad_num;
+ u16 rsvd;
+};
+
+struct bfa_bsg_phy_attr_s {
+ bfa_status_t status;
+ u16 bfad_num;
+ u16 instance;
+ struct bfa_phy_attr_s attr;
+};
+
+struct bfa_bsg_phy_s {
+ bfa_status_t status;
+ u16 bfad_num;
+ u16 instance;
+ u64 bufsz;
+ u64 buf_ptr;
+};
+
+struct bfa_bsg_debug_s {
+ bfa_status_t status;
+ u16 bfad_num;
+ u16 rsvd;
+ u32 bufsz;
+ int inst_no;
+ u64 buf_ptr;
+ u64 offset;
+};
+
+struct bfa_bsg_phy_stats_s {
+ bfa_status_t status;
+ u16 bfad_num;
+ u16 instance;
+ struct bfa_phy_stats_s stats;
+};
+
+struct bfa_bsg_vhba_attr_s {
+ bfa_status_t status;
+ u16 bfad_num;
+ u16 pcifn_id;
+ struct bfa_vhba_attr_s attr;
+};
+
+struct bfa_bsg_boot_s {
+ bfa_status_t status;
+ u16 bfad_num;
+ u16 rsvd;
+ struct bfa_boot_cfg_s cfg;
+};
+
+struct bfa_bsg_preboot_s {
+ bfa_status_t status;
+ u16 bfad_num;
+ u16 rsvd;
+ struct bfa_boot_pbc_s cfg;
+};
+
+struct bfa_bsg_ethboot_s {
+ bfa_status_t status;
+ u16 bfad_num;
+ u16 rsvd;
+ struct bfa_ethboot_cfg_s cfg;
+};
+
+struct bfa_bsg_trunk_attr_s {
+ bfa_status_t status;
+ u16 bfad_num;
+ u16 rsvd;
+ struct bfa_trunk_attr_s attr;
+};
+
+struct bfa_bsg_qos_attr_s {
+ bfa_status_t status;
+ u16 bfad_num;
+ u16 rsvd;
+ struct bfa_qos_attr_s attr;
+};
+
+struct bfa_bsg_qos_vc_attr_s {
+ bfa_status_t status;
+ u16 bfad_num;
+ u16 rsvd;
+ struct bfa_qos_vc_attr_s attr;
+};
+
+struct bfa_bsg_qos_bw_s {
+ bfa_status_t status;
+ u16 bfad_num;
+ u16 rsvd;
+ struct bfa_qos_bw_s qos_bw;
+};
+
+struct bfa_bsg_vf_stats_s {
+ bfa_status_t status;
+ u16 bfad_num;
+ u16 vf_id;
+ struct bfa_vf_stats_s stats;
+};
+
+struct bfa_bsg_vf_reset_stats_s {
+ bfa_status_t status;
+ u16 bfad_num;
+ u16 vf_id;
+};
+
+struct bfa_bsg_fcpim_lunmask_query_s {
+ bfa_status_t status;
+ u16 bfad_num;
+ struct bfa_lunmask_cfg_s lun_mask;
+};
+
+struct bfa_bsg_fcpim_lunmask_s {
+ bfa_status_t status;
+ u16 bfad_num;
+ u16 vf_id;
+ wwn_t pwwn;
+ wwn_t rpwwn;
+ struct scsi_lun lun;
+};
+
+struct bfa_bsg_fcpim_throttle_s {
+ bfa_status_t status;
+ u16 bfad_num;
+ u16 vf_id;
+ struct bfa_defs_fcpim_throttle_s throttle;
+};
+
+#define BFA_TFRU_DATA_SIZE 64
+#define BFA_MAX_FRUVPD_TRANSFER_SIZE 0x1000
+
+struct bfa_bsg_tfru_s {
+ bfa_status_t status;
+ u16 bfad_num;
+ u16 rsvd;
+ u32 offset;
+ u32 len;
+ u8 data[BFA_TFRU_DATA_SIZE];
+};
+
+struct bfa_bsg_fruvpd_s {
+ bfa_status_t status;
+ u16 bfad_num;
+ u16 rsvd1;
+ u32 offset;
+ u32 len;
+ u8 data[BFA_MAX_FRUVPD_TRANSFER_SIZE];
+ u8 trfr_cmpl;
+ u8 rsvd2[3];
+};
+
+struct bfa_bsg_fruvpd_max_size_s {
+ bfa_status_t status;
+ u16 bfad_num;
+ u16 rsvd;
+ u32 max_size;
+};
+
+struct bfa_bsg_fcpt_s {
+ bfa_status_t status;
+ u16 vf_id;
+ wwn_t lpwwn;
+ wwn_t dpwwn;
+ u32 tsecs;
+ int cts;
+ enum fc_cos cos;
+ struct fchs_s fchs;
+};
+#define bfa_bsg_fcpt_t struct bfa_bsg_fcpt_s
+
+#pragma pack(1)
+struct bfa_bsg_data {
+ int payload_len;
+ u64 payload;
+};
+#pragma pack()
+
+#define bfad_chk_iocmd_sz(__payload_len, __hdrsz, __bufsz) \
+ (((__payload_len) != ((__hdrsz) + (__bufsz))) ? \
+ BFA_STATUS_FAILED : BFA_STATUS_OK)
+
+#endif /* BFAD_BSG_H */
diff --git a/drivers/scsi/bfa/bfad_debugfs.c b/drivers/scsi/bfa/bfad_debugfs.c
new file mode 100644
index 000000000..74a307c0a
--- /dev/null
+++ b/drivers/scsi/bfa/bfad_debugfs.c
@@ -0,0 +1,532 @@
+/*
+ * Copyright (c) 2005-2010 Brocade Communications Systems, Inc.
+ * All rights reserved
+ * www.brocade.com
+ *
+ * Linux driver for Brocade Fibre Channel Host Bus Adapter.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License (GPL) Version 2 as
+ * published by the Free Software Foundation
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ */
+
+#include <linux/debugfs.h>
+#include <linux/export.h>
+
+#include "bfad_drv.h"
+#include "bfad_im.h"
+
+/*
+ * BFA debufs interface
+ *
+ * To access the interface, debugfs file system should be mounted
+ * if not already mounted using:
+ * mount -t debugfs none /sys/kernel/debug
+ *
+ * BFA Hierarchy:
+ * - bfa/pci_dev:<pci_name>
+ * where the pci_name corresponds to the one under /sys/bus/pci/drivers/bfa
+ *
+ * Debugging service available per pci_dev:
+ * fwtrc: To collect current firmware trace.
+ * drvtrc: To collect current driver trace
+ * fwsave: To collect last saved fw trace as a result of firmware crash.
+ * regwr: To write one word to chip register
+ * regrd: To read one or more words from chip register.
+ */
+
+struct bfad_debug_info {
+ char *debug_buffer;
+ void *i_private;
+ int buffer_len;
+};
+
+static int
+bfad_debugfs_open_drvtrc(struct inode *inode, struct file *file)
+{
+ struct bfad_port_s *port = inode->i_private;
+ struct bfad_s *bfad = port->bfad;
+ struct bfad_debug_info *debug;
+
+ debug = kzalloc(sizeof(struct bfad_debug_info), GFP_KERNEL);
+ if (!debug)
+ return -ENOMEM;
+
+ debug->debug_buffer = (void *) bfad->trcmod;
+ debug->buffer_len = sizeof(struct bfa_trc_mod_s);
+
+ file->private_data = debug;
+
+ return 0;
+}
+
+static int
+bfad_debugfs_open_fwtrc(struct inode *inode, struct file *file)
+{
+ struct bfad_port_s *port = inode->i_private;
+ struct bfad_s *bfad = port->bfad;
+ struct bfad_debug_info *fw_debug;
+ unsigned long flags;
+ int rc;
+
+ fw_debug = kzalloc(sizeof(struct bfad_debug_info), GFP_KERNEL);
+ if (!fw_debug)
+ return -ENOMEM;
+
+ fw_debug->buffer_len = sizeof(struct bfa_trc_mod_s);
+
+ fw_debug->debug_buffer = vmalloc(fw_debug->buffer_len);
+ if (!fw_debug->debug_buffer) {
+ kfree(fw_debug);
+ printk(KERN_INFO "bfad[%d]: Failed to allocate fwtrc buffer\n",
+ bfad->inst_no);
+ return -ENOMEM;
+ }
+
+ memset(fw_debug->debug_buffer, 0, fw_debug->buffer_len);
+
+ spin_lock_irqsave(&bfad->bfad_lock, flags);
+ rc = bfa_ioc_debug_fwtrc(&bfad->bfa.ioc,
+ fw_debug->debug_buffer,
+ &fw_debug->buffer_len);
+ spin_unlock_irqrestore(&bfad->bfad_lock, flags);
+ if (rc != BFA_STATUS_OK) {
+ vfree(fw_debug->debug_buffer);
+ fw_debug->debug_buffer = NULL;
+ kfree(fw_debug);
+ printk(KERN_INFO "bfad[%d]: Failed to collect fwtrc\n",
+ bfad->inst_no);
+ return -ENOMEM;
+ }
+
+ file->private_data = fw_debug;
+
+ return 0;
+}
+
+static int
+bfad_debugfs_open_fwsave(struct inode *inode, struct file *file)
+{
+ struct bfad_port_s *port = inode->i_private;
+ struct bfad_s *bfad = port->bfad;
+ struct bfad_debug_info *fw_debug;
+ unsigned long flags;
+ int rc;
+
+ fw_debug = kzalloc(sizeof(struct bfad_debug_info), GFP_KERNEL);
+ if (!fw_debug)
+ return -ENOMEM;
+
+ fw_debug->buffer_len = sizeof(struct bfa_trc_mod_s);
+
+ fw_debug->debug_buffer = vmalloc(fw_debug->buffer_len);
+ if (!fw_debug->debug_buffer) {
+ kfree(fw_debug);
+ printk(KERN_INFO "bfad[%d]: Failed to allocate fwsave buffer\n",
+ bfad->inst_no);
+ return -ENOMEM;
+ }
+
+ memset(fw_debug->debug_buffer, 0, fw_debug->buffer_len);
+
+ spin_lock_irqsave(&bfad->bfad_lock, flags);
+ rc = bfa_ioc_debug_fwsave(&bfad->bfa.ioc,
+ fw_debug->debug_buffer,
+ &fw_debug->buffer_len);
+ spin_unlock_irqrestore(&bfad->bfad_lock, flags);
+ if (rc != BFA_STATUS_OK) {
+ vfree(fw_debug->debug_buffer);
+ fw_debug->debug_buffer = NULL;
+ kfree(fw_debug);
+ printk(KERN_INFO "bfad[%d]: Failed to collect fwsave\n",
+ bfad->inst_no);
+ return -ENOMEM;
+ }
+
+ file->private_data = fw_debug;
+
+ return 0;
+}
+
+static int
+bfad_debugfs_open_reg(struct inode *inode, struct file *file)
+{
+ struct bfad_debug_info *reg_debug;
+
+ reg_debug = kzalloc(sizeof(struct bfad_debug_info), GFP_KERNEL);
+ if (!reg_debug)
+ return -ENOMEM;
+
+ reg_debug->i_private = inode->i_private;
+
+ file->private_data = reg_debug;
+
+ return 0;
+}
+
+/* Changes the current file position */
+static loff_t
+bfad_debugfs_lseek(struct file *file, loff_t offset, int orig)
+{
+ struct bfad_debug_info *debug = file->private_data;
+ return fixed_size_llseek(file, offset, orig,
+ debug->buffer_len);
+}
+
+static ssize_t
+bfad_debugfs_read(struct file *file, char __user *buf,
+ size_t nbytes, loff_t *pos)
+{
+ struct bfad_debug_info *debug = file->private_data;
+
+ if (!debug || !debug->debug_buffer)
+ return 0;
+
+ return simple_read_from_buffer(buf, nbytes, pos,
+ debug->debug_buffer, debug->buffer_len);
+}
+
+#define BFA_REG_CT_ADDRSZ (0x40000)
+#define BFA_REG_CB_ADDRSZ (0x20000)
+#define BFA_REG_ADDRSZ(__ioc) \
+ ((u32)(bfa_asic_id_ctc(bfa_ioc_devid(__ioc)) ? \
+ BFA_REG_CT_ADDRSZ : BFA_REG_CB_ADDRSZ))
+#define BFA_REG_ADDRMSK(__ioc) (BFA_REG_ADDRSZ(__ioc) - 1)
+
+static bfa_status_t
+bfad_reg_offset_check(struct bfa_s *bfa, u32 offset, u32 len)
+{
+ u8 area;
+
+ /* check [16:15] */
+ area = (offset >> 15) & 0x7;
+ if (area == 0) {
+ /* PCIe core register */
+ if ((offset + (len<<2)) > 0x8000) /* 8k dwords or 32KB */
+ return BFA_STATUS_EINVAL;
+ } else if (area == 0x1) {
+ /* CB 32 KB memory page */
+ if ((offset + (len<<2)) > 0x10000) /* 8k dwords or 32KB */
+ return BFA_STATUS_EINVAL;
+ } else {
+ /* CB register space 64KB */
+ if ((offset + (len<<2)) > BFA_REG_ADDRMSK(&bfa->ioc))
+ return BFA_STATUS_EINVAL;
+ }
+ return BFA_STATUS_OK;
+}
+
+static ssize_t
+bfad_debugfs_read_regrd(struct file *file, char __user *buf,
+ size_t nbytes, loff_t *pos)
+{
+ struct bfad_debug_info *regrd_debug = file->private_data;
+ struct bfad_port_s *port = (struct bfad_port_s *)regrd_debug->i_private;
+ struct bfad_s *bfad = port->bfad;
+ ssize_t rc;
+
+ if (!bfad->regdata)
+ return 0;
+
+ rc = simple_read_from_buffer(buf, nbytes, pos,
+ bfad->regdata, bfad->reglen);
+
+ if ((*pos + nbytes) >= bfad->reglen) {
+ kfree(bfad->regdata);
+ bfad->regdata = NULL;
+ bfad->reglen = 0;
+ }
+
+ return rc;
+}
+
+static ssize_t
+bfad_debugfs_write_regrd(struct file *file, const char __user *buf,
+ size_t nbytes, loff_t *ppos)
+{
+ struct bfad_debug_info *regrd_debug = file->private_data;
+ struct bfad_port_s *port = (struct bfad_port_s *)regrd_debug->i_private;
+ struct bfad_s *bfad = port->bfad;
+ struct bfa_s *bfa = &bfad->bfa;
+ struct bfa_ioc_s *ioc = &bfa->ioc;
+ int addr, len, rc, i;
+ u32 *regbuf;
+ void __iomem *rb, *reg_addr;
+ unsigned long flags;
+ void *kern_buf;
+
+ kern_buf = memdup_user(buf, nbytes);
+ if (IS_ERR(kern_buf))
+ return PTR_ERR(kern_buf);
+
+ rc = sscanf(kern_buf, "%x:%x", &addr, &len);
+ if (rc < 2) {
+ printk(KERN_INFO
+ "bfad[%d]: %s failed to read user buf\n",
+ bfad->inst_no, __func__);
+ kfree(kern_buf);
+ return -EINVAL;
+ }
+
+ kfree(kern_buf);
+ kfree(bfad->regdata);
+ bfad->regdata = NULL;
+ bfad->reglen = 0;
+
+ bfad->regdata = kzalloc(len << 2, GFP_KERNEL);
+ if (!bfad->regdata) {
+ printk(KERN_INFO "bfad[%d]: Failed to allocate regrd buffer\n",
+ bfad->inst_no);
+ return -ENOMEM;
+ }
+
+ bfad->reglen = len << 2;
+ rb = bfa_ioc_bar0(ioc);
+ addr &= BFA_REG_ADDRMSK(ioc);
+
+ /* offset and len sanity check */
+ rc = bfad_reg_offset_check(bfa, addr, len);
+ if (rc) {
+ printk(KERN_INFO "bfad[%d]: Failed reg offset check\n",
+ bfad->inst_no);
+ kfree(bfad->regdata);
+ bfad->regdata = NULL;
+ bfad->reglen = 0;
+ return -EINVAL;
+ }
+
+ reg_addr = rb + addr;
+ regbuf = (u32 *)bfad->regdata;
+ spin_lock_irqsave(&bfad->bfad_lock, flags);
+ for (i = 0; i < len; i++) {
+ *regbuf = readl(reg_addr);
+ regbuf++;
+ reg_addr += sizeof(u32);
+ }
+ spin_unlock_irqrestore(&bfad->bfad_lock, flags);
+
+ return nbytes;
+}
+
+static ssize_t
+bfad_debugfs_write_regwr(struct file *file, const char __user *buf,
+ size_t nbytes, loff_t *ppos)
+{
+ struct bfad_debug_info *debug = file->private_data;
+ struct bfad_port_s *port = (struct bfad_port_s *)debug->i_private;
+ struct bfad_s *bfad = port->bfad;
+ struct bfa_s *bfa = &bfad->bfa;
+ struct bfa_ioc_s *ioc = &bfa->ioc;
+ int addr, val, rc;
+ void __iomem *reg_addr;
+ unsigned long flags;
+ void *kern_buf;
+
+ kern_buf = memdup_user(buf, nbytes);
+ if (IS_ERR(kern_buf))
+ return PTR_ERR(kern_buf);
+
+ rc = sscanf(kern_buf, "%x:%x", &addr, &val);
+ if (rc < 2) {
+ printk(KERN_INFO
+ "bfad[%d]: %s failed to read user buf\n",
+ bfad->inst_no, __func__);
+ kfree(kern_buf);
+ return -EINVAL;
+ }
+ kfree(kern_buf);
+
+ addr &= BFA_REG_ADDRMSK(ioc); /* offset only 17 bit and word align */
+
+ /* offset and len sanity check */
+ rc = bfad_reg_offset_check(bfa, addr, 1);
+ if (rc) {
+ printk(KERN_INFO
+ "bfad[%d]: Failed reg offset check\n",
+ bfad->inst_no);
+ return -EINVAL;
+ }
+
+ reg_addr = (bfa_ioc_bar0(ioc)) + addr;
+ spin_lock_irqsave(&bfad->bfad_lock, flags);
+ writel(val, reg_addr);
+ spin_unlock_irqrestore(&bfad->bfad_lock, flags);
+
+ return nbytes;
+}
+
+static int
+bfad_debugfs_release(struct inode *inode, struct file *file)
+{
+ struct bfad_debug_info *debug = file->private_data;
+
+ if (!debug)
+ return 0;
+
+ file->private_data = NULL;
+ kfree(debug);
+ return 0;
+}
+
+static int
+bfad_debugfs_release_fwtrc(struct inode *inode, struct file *file)
+{
+ struct bfad_debug_info *fw_debug = file->private_data;
+
+ if (!fw_debug)
+ return 0;
+
+ if (fw_debug->debug_buffer)
+ vfree(fw_debug->debug_buffer);
+
+ file->private_data = NULL;
+ kfree(fw_debug);
+ return 0;
+}
+
+static const struct file_operations bfad_debugfs_op_drvtrc = {
+ .owner = THIS_MODULE,
+ .open = bfad_debugfs_open_drvtrc,
+ .llseek = bfad_debugfs_lseek,
+ .read = bfad_debugfs_read,
+ .release = bfad_debugfs_release,
+};
+
+static const struct file_operations bfad_debugfs_op_fwtrc = {
+ .owner = THIS_MODULE,
+ .open = bfad_debugfs_open_fwtrc,
+ .llseek = bfad_debugfs_lseek,
+ .read = bfad_debugfs_read,
+ .release = bfad_debugfs_release_fwtrc,
+};
+
+static const struct file_operations bfad_debugfs_op_fwsave = {
+ .owner = THIS_MODULE,
+ .open = bfad_debugfs_open_fwsave,
+ .llseek = bfad_debugfs_lseek,
+ .read = bfad_debugfs_read,
+ .release = bfad_debugfs_release_fwtrc,
+};
+
+static const struct file_operations bfad_debugfs_op_regrd = {
+ .owner = THIS_MODULE,
+ .open = bfad_debugfs_open_reg,
+ .llseek = bfad_debugfs_lseek,
+ .read = bfad_debugfs_read_regrd,
+ .write = bfad_debugfs_write_regrd,
+ .release = bfad_debugfs_release,
+};
+
+static const struct file_operations bfad_debugfs_op_regwr = {
+ .owner = THIS_MODULE,
+ .open = bfad_debugfs_open_reg,
+ .llseek = bfad_debugfs_lseek,
+ .write = bfad_debugfs_write_regwr,
+ .release = bfad_debugfs_release,
+};
+
+struct bfad_debugfs_entry {
+ const char *name;
+ umode_t mode;
+ const struct file_operations *fops;
+};
+
+static const struct bfad_debugfs_entry bfad_debugfs_files[] = {
+ { "drvtrc", S_IFREG|S_IRUGO, &bfad_debugfs_op_drvtrc, },
+ { "fwtrc", S_IFREG|S_IRUGO, &bfad_debugfs_op_fwtrc, },
+ { "fwsave", S_IFREG|S_IRUGO, &bfad_debugfs_op_fwsave, },
+ { "regrd", S_IFREG|S_IRUGO|S_IWUSR, &bfad_debugfs_op_regrd, },
+ { "regwr", S_IFREG|S_IWUSR, &bfad_debugfs_op_regwr, },
+};
+
+static struct dentry *bfa_debugfs_root;
+static atomic_t bfa_debugfs_port_count;
+
+inline void
+bfad_debugfs_init(struct bfad_port_s *port)
+{
+ struct bfad_s *bfad = port->bfad;
+ const struct bfad_debugfs_entry *file;
+ char name[64];
+ int i;
+
+ if (!bfa_debugfs_enable)
+ return;
+
+ /* Setup the BFA debugfs root directory*/
+ if (!bfa_debugfs_root) {
+ bfa_debugfs_root = debugfs_create_dir("bfa", NULL);
+ atomic_set(&bfa_debugfs_port_count, 0);
+ if (!bfa_debugfs_root) {
+ printk(KERN_WARNING
+ "BFA debugfs root dir creation failed\n");
+ goto err;
+ }
+ }
+
+ /* Setup the pci_dev debugfs directory for the port */
+ snprintf(name, sizeof(name), "pci_dev:%s", bfad->pci_name);
+ if (!port->port_debugfs_root) {
+ port->port_debugfs_root =
+ debugfs_create_dir(name, bfa_debugfs_root);
+ if (!port->port_debugfs_root) {
+ printk(KERN_WARNING
+ "bfa %s: debugfs root creation failed\n",
+ bfad->pci_name);
+ goto err;
+ }
+
+ atomic_inc(&bfa_debugfs_port_count);
+
+ for (i = 0; i < ARRAY_SIZE(bfad_debugfs_files); i++) {
+ file = &bfad_debugfs_files[i];
+ bfad->bfad_dentry_files[i] =
+ debugfs_create_file(file->name,
+ file->mode,
+ port->port_debugfs_root,
+ port,
+ file->fops);
+ if (!bfad->bfad_dentry_files[i]) {
+ printk(KERN_WARNING
+ "bfa %s: debugfs %s creation failed\n",
+ bfad->pci_name, file->name);
+ goto err;
+ }
+ }
+ }
+
+err:
+ return;
+}
+
+inline void
+bfad_debugfs_exit(struct bfad_port_s *port)
+{
+ struct bfad_s *bfad = port->bfad;
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(bfad_debugfs_files); i++) {
+ if (bfad->bfad_dentry_files[i]) {
+ debugfs_remove(bfad->bfad_dentry_files[i]);
+ bfad->bfad_dentry_files[i] = NULL;
+ }
+ }
+
+ /* Remove the pci_dev debugfs directory for the port */
+ if (port->port_debugfs_root) {
+ debugfs_remove(port->port_debugfs_root);
+ port->port_debugfs_root = NULL;
+ atomic_dec(&bfa_debugfs_port_count);
+ }
+
+ /* Remove the BFA debugfs root directory */
+ if (atomic_read(&bfa_debugfs_port_count) == 0) {
+ debugfs_remove(bfa_debugfs_root);
+ bfa_debugfs_root = NULL;
+ }
+}
diff --git a/drivers/scsi/bfa/bfad_drv.h b/drivers/scsi/bfa/bfad_drv.h
new file mode 100644
index 000000000..8b97877d4
--- /dev/null
+++ b/drivers/scsi/bfa/bfad_drv.h
@@ -0,0 +1,360 @@
+/*
+ * Copyright (c) 2005-2010 Brocade Communications Systems, Inc.
+ * All rights reserved
+ * www.brocade.com
+ *
+ * Linux driver for Brocade Fibre Channel Host Bus Adapter.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License (GPL) Version 2 as
+ * published by the Free Software Foundation
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ */
+
+/*
+ * Contains base driver definitions.
+ */
+
+/*
+ * bfa_drv.h Linux driver data structures.
+ */
+
+#ifndef __BFAD_DRV_H__
+#define __BFAD_DRV_H__
+
+#include <linux/types.h>
+#include <linux/pci.h>
+#include <linux/dma-mapping.h>
+#include <linux/idr.h>
+#include <linux/interrupt.h>
+#include <linux/cdev.h>
+#include <linux/fs.h>
+#include <linux/delay.h>
+#include <linux/vmalloc.h>
+#include <linux/workqueue.h>
+#include <linux/bitops.h>
+#include <linux/aer.h>
+#include <scsi/scsi.h>
+#include <scsi/scsi_host.h>
+#include <scsi/scsi_tcq.h>
+#include <scsi/scsi_transport_fc.h>
+#include <scsi/scsi_transport.h>
+#include <scsi/scsi_bsg_fc.h>
+#include <scsi/scsi_devinfo.h>
+
+#include "bfa_modules.h"
+#include "bfa_fcs.h"
+#include "bfa_defs_fcs.h"
+
+#include "bfa_plog.h"
+#include "bfa_cs.h"
+
+#define BFAD_DRIVER_NAME "bfa"
+#ifdef BFA_DRIVER_VERSION
+#define BFAD_DRIVER_VERSION BFA_DRIVER_VERSION
+#else
+#define BFAD_DRIVER_VERSION "3.2.23.0"
+#endif
+
+#define BFAD_PROTO_NAME FCPI_NAME
+#define BFAD_IRQ_FLAGS IRQF_SHARED
+
+#ifndef FC_PORTSPEED_8GBIT
+#define FC_PORTSPEED_8GBIT 0x10
+#endif
+
+/*
+ * BFAD flags
+ */
+#define BFAD_MSIX_ON 0x00000001
+#define BFAD_HAL_INIT_DONE 0x00000002
+#define BFAD_DRV_INIT_DONE 0x00000004
+#define BFAD_CFG_PPORT_DONE 0x00000008
+#define BFAD_HAL_START_DONE 0x00000010
+#define BFAD_PORT_ONLINE 0x00000020
+#define BFAD_RPORT_ONLINE 0x00000040
+#define BFAD_FCS_INIT_DONE 0x00000080
+#define BFAD_HAL_INIT_FAIL 0x00000100
+#define BFAD_FC4_PROBE_DONE 0x00000200
+#define BFAD_PORT_DELETE 0x00000001
+#define BFAD_INTX_ON 0x00000400
+#define BFAD_EEH_BUSY 0x00000800
+#define BFAD_EEH_PCI_CHANNEL_IO_PERM_FAILURE 0x00001000
+/*
+ * BFAD related definition
+ */
+#define SCSI_SCAN_DELAY HZ
+#define BFAD_STOP_TIMEOUT 30
+#define BFAD_SUSPEND_TIMEOUT BFAD_STOP_TIMEOUT
+
+/*
+ * BFAD configuration parameter default values
+ */
+#define BFAD_LUN_QUEUE_DEPTH 32
+#define BFAD_IO_MAX_SGE SG_ALL
+#define BFAD_MIN_SECTORS 128 /* 64k */
+#define BFAD_MAX_SECTORS 0xFFFF /* 32 MB */
+
+#define bfad_isr_t irq_handler_t
+
+#define MAX_MSIX_ENTRY 22
+
+struct bfad_msix_s {
+ struct bfad_s *bfad;
+ struct msix_entry msix;
+ char name[32];
+};
+
+/*
+ * Only append to the enums defined here to avoid any versioning
+ * needed between trace utility and driver version
+ */
+enum {
+ BFA_TRC_LDRV_BFAD = 1,
+ BFA_TRC_LDRV_IM = 2,
+ BFA_TRC_LDRV_BSG = 3,
+};
+
+enum bfad_port_pvb_type {
+ BFAD_PORT_PHYS_BASE = 0,
+ BFAD_PORT_PHYS_VPORT = 1,
+ BFAD_PORT_VF_BASE = 2,
+ BFAD_PORT_VF_VPORT = 3,
+};
+
+/*
+ * PORT data structure
+ */
+struct bfad_port_s {
+ struct list_head list_entry;
+ struct bfad_s *bfad;
+ struct bfa_fcs_lport_s *fcs_port;
+ u32 roles;
+ s32 flags;
+ u32 supported_fc4s;
+ enum bfad_port_pvb_type pvb_type;
+ struct bfad_im_port_s *im_port; /* IM specific data */
+ /* port debugfs specific data */
+ struct dentry *port_debugfs_root;
+};
+
+/*
+ * VPORT data structure
+ */
+struct bfad_vport_s {
+ struct bfad_port_s drv_port;
+ struct bfa_fcs_vport_s fcs_vport;
+ struct completion *comp_del;
+ struct list_head list_entry;
+};
+
+/*
+ * VF data structure
+ */
+struct bfad_vf_s {
+ bfa_fcs_vf_t fcs_vf;
+ struct bfad_port_s base_port; /* base port for vf */
+ struct bfad_s *bfad;
+};
+
+struct bfad_cfg_param_s {
+ u32 rport_del_timeout;
+ u32 ioc_queue_depth;
+ u32 lun_queue_depth;
+ u32 io_max_sge;
+ u32 binding_method;
+};
+
+union bfad_tmp_buf {
+ /* From struct bfa_adapter_attr_s */
+ char manufacturer[BFA_ADAPTER_MFG_NAME_LEN];
+ char serial_num[BFA_ADAPTER_SERIAL_NUM_LEN];
+ char model[BFA_ADAPTER_MODEL_NAME_LEN];
+ char fw_ver[BFA_VERSION_LEN];
+ char optrom_ver[BFA_VERSION_LEN];
+
+ /* From struct bfa_ioc_pci_attr_s */
+ u8 chip_rev[BFA_IOC_CHIP_REV_LEN]; /* chip revision */
+
+ wwn_t wwn[BFA_FCS_MAX_LPORTS];
+};
+
+/*
+ * BFAD (PCI function) data structure
+ */
+struct bfad_s {
+ bfa_sm_t sm; /* state machine */
+ struct list_head list_entry;
+ struct bfa_s bfa;
+ struct bfa_fcs_s bfa_fcs;
+ struct pci_dev *pcidev;
+ const char *pci_name;
+ struct bfa_pcidev_s hal_pcidev;
+ struct bfa_ioc_pci_attr_s pci_attr;
+ void __iomem *pci_bar0_kva;
+ void __iomem *pci_bar2_kva;
+ struct completion comp;
+ struct completion suspend;
+ struct completion enable_comp;
+ struct completion disable_comp;
+ bfa_boolean_t disable_active;
+ struct bfad_port_s pport; /* physical port of the BFAD */
+ struct bfa_meminfo_s meminfo;
+ struct bfa_iocfc_cfg_s ioc_cfg;
+ u32 inst_no; /* BFAD instance number */
+ u32 bfad_flags;
+ spinlock_t bfad_lock;
+ struct task_struct *bfad_tsk;
+ struct bfad_cfg_param_s cfg_data;
+ struct bfad_msix_s msix_tab[MAX_MSIX_ENTRY];
+ int nvec;
+ char adapter_name[BFA_ADAPTER_SYM_NAME_LEN];
+ char port_name[BFA_ADAPTER_SYM_NAME_LEN];
+ struct timer_list hal_tmo;
+ unsigned long hs_start;
+ struct bfad_im_s *im; /* IM specific data */
+ struct bfa_trc_mod_s *trcmod;
+ struct bfa_plog_s plog_buf;
+ int ref_count;
+ union bfad_tmp_buf tmp_buf;
+ struct fc_host_statistics link_stats;
+ struct list_head pbc_vport_list;
+ /* debugfs specific data */
+ char *regdata;
+ u32 reglen;
+ struct dentry *bfad_dentry_files[5];
+ struct list_head free_aen_q;
+ struct list_head active_aen_q;
+ struct bfa_aen_entry_s aen_list[BFA_AEN_MAX_ENTRY];
+ spinlock_t bfad_aen_spinlock;
+ struct list_head vport_list;
+};
+
+/* BFAD state machine events */
+enum bfad_sm_event {
+ BFAD_E_CREATE = 1,
+ BFAD_E_KTHREAD_CREATE_FAILED = 2,
+ BFAD_E_INIT = 3,
+ BFAD_E_INIT_SUCCESS = 4,
+ BFAD_E_HAL_INIT_FAILED = 5,
+ BFAD_E_INIT_FAILED = 6,
+ BFAD_E_FCS_EXIT_COMP = 7,
+ BFAD_E_EXIT_COMP = 8,
+ BFAD_E_STOP = 9
+};
+
+/*
+ * RPORT data structure
+ */
+struct bfad_rport_s {
+ struct bfa_fcs_rport_s fcs_rport;
+};
+
+struct bfad_buf_info {
+ void *virt;
+ dma_addr_t phys;
+ u32 size;
+};
+
+struct bfad_fcxp {
+ struct bfad_port_s *port;
+ struct bfa_rport_s *bfa_rport;
+ bfa_status_t req_status;
+ u16 tag;
+ u16 rsp_len;
+ u16 rsp_maxlen;
+ u8 use_ireqbuf;
+ u8 use_irspbuf;
+ u32 num_req_sgles;
+ u32 num_rsp_sgles;
+ struct fchs_s fchs;
+ void *reqbuf_info;
+ void *rspbuf_info;
+ struct bfa_sge_s *req_sge;
+ struct bfa_sge_s *rsp_sge;
+ fcxp_send_cb_t send_cbfn;
+ void *send_cbarg;
+ void *bfa_fcxp;
+ struct completion comp;
+};
+
+struct bfad_hal_comp {
+ bfa_status_t status;
+ struct completion comp;
+};
+
+#define BFA_LOG(level, bfad, mask, fmt, arg...) \
+do { \
+ if (((mask) == 4) || (level[1] <= '4')) \
+ dev_printk(level, &((bfad)->pcidev)->dev, fmt, ##arg); \
+} while (0)
+
+bfa_status_t bfad_vport_create(struct bfad_s *bfad, u16 vf_id,
+ struct bfa_lport_cfg_s *port_cfg,
+ struct device *dev);
+bfa_status_t bfad_vf_create(struct bfad_s *bfad, u16 vf_id,
+ struct bfa_lport_cfg_s *port_cfg);
+bfa_status_t bfad_cfg_pport(struct bfad_s *bfad, enum bfa_lport_role role);
+bfa_status_t bfad_drv_init(struct bfad_s *bfad);
+bfa_status_t bfad_start_ops(struct bfad_s *bfad);
+void bfad_drv_start(struct bfad_s *bfad);
+void bfad_uncfg_pport(struct bfad_s *bfad);
+void bfad_stop(struct bfad_s *bfad);
+void bfad_fcs_stop(struct bfad_s *bfad);
+void bfad_remove_intr(struct bfad_s *bfad);
+void bfad_hal_mem_release(struct bfad_s *bfad);
+void bfad_hcb_comp(void *arg, bfa_status_t status);
+
+int bfad_setup_intr(struct bfad_s *bfad);
+void bfad_remove_intr(struct bfad_s *bfad);
+void bfad_update_hal_cfg(struct bfa_iocfc_cfg_s *bfa_cfg);
+bfa_status_t bfad_hal_mem_alloc(struct bfad_s *bfad);
+void bfad_bfa_tmo(unsigned long data);
+void bfad_init_timer(struct bfad_s *bfad);
+int bfad_pci_init(struct pci_dev *pdev, struct bfad_s *bfad);
+void bfad_pci_uninit(struct pci_dev *pdev, struct bfad_s *bfad);
+void bfad_drv_uninit(struct bfad_s *bfad);
+int bfad_worker(void *ptr);
+void bfad_debugfs_init(struct bfad_port_s *port);
+void bfad_debugfs_exit(struct bfad_port_s *port);
+
+void bfad_pci_remove(struct pci_dev *pdev);
+int bfad_pci_probe(struct pci_dev *pdev, const struct pci_device_id *pid);
+void bfad_rport_online_wait(struct bfad_s *bfad);
+int bfad_get_linkup_delay(struct bfad_s *bfad);
+int bfad_install_msix_handler(struct bfad_s *bfad);
+
+extern struct idr bfad_im_port_index;
+extern struct pci_device_id bfad_id_table[];
+extern struct list_head bfad_list;
+extern char *os_name;
+extern char *os_patch;
+extern char *host_name;
+extern int num_rports;
+extern int num_ios;
+extern int num_tms;
+extern int num_fcxps;
+extern int num_ufbufs;
+extern int reqq_size;
+extern int rspq_size;
+extern int num_sgpgs;
+extern int rport_del_timeout;
+extern int bfa_lun_queue_depth;
+extern int bfa_io_max_sge;
+extern int bfa_log_level;
+extern int ioc_auto_recover;
+extern int bfa_linkup_delay;
+extern int msix_disable_cb;
+extern int msix_disable_ct;
+extern int fdmi_enable;
+extern int supported_fc4s;
+extern int pcie_max_read_reqsz;
+extern int max_xfer_size;
+extern int bfa_debugfs_enable;
+extern struct mutex bfad_mutex;
+
+#endif /* __BFAD_DRV_H__ */
diff --git a/drivers/scsi/bfa/bfad_im.c b/drivers/scsi/bfa/bfad_im.c
new file mode 100644
index 000000000..7223b0006
--- /dev/null
+++ b/drivers/scsi/bfa/bfad_im.c
@@ -0,0 +1,1320 @@
+/*
+ * Copyright (c) 2005-2010 Brocade Communications Systems, Inc.
+ * All rights reserved
+ * www.brocade.com
+ *
+ * Linux driver for Brocade Fibre Channel Host Bus Adapter.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License (GPL) Version 2 as
+ * published by the Free Software Foundation
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ */
+
+/*
+ * bfad_im.c Linux driver IM module.
+ */
+
+#include <linux/export.h>
+
+#include "bfad_drv.h"
+#include "bfad_im.h"
+#include "bfa_fcs.h"
+
+BFA_TRC_FILE(LDRV, IM);
+
+DEFINE_IDR(bfad_im_port_index);
+struct scsi_transport_template *bfad_im_scsi_transport_template;
+struct scsi_transport_template *bfad_im_scsi_vport_transport_template;
+static void bfad_im_itnim_work_handler(struct work_struct *work);
+static int bfad_im_queuecommand(struct Scsi_Host *h, struct scsi_cmnd *cmnd);
+static int bfad_im_slave_alloc(struct scsi_device *sdev);
+static void bfad_im_fc_rport_add(struct bfad_im_port_s *im_port,
+ struct bfad_itnim_s *itnim);
+
+void
+bfa_cb_ioim_done(void *drv, struct bfad_ioim_s *dio,
+ enum bfi_ioim_status io_status, u8 scsi_status,
+ int sns_len, u8 *sns_info, s32 residue)
+{
+ struct scsi_cmnd *cmnd = (struct scsi_cmnd *)dio;
+ struct bfad_s *bfad = drv;
+ struct bfad_itnim_data_s *itnim_data;
+ struct bfad_itnim_s *itnim;
+ u8 host_status = DID_OK;
+
+ switch (io_status) {
+ case BFI_IOIM_STS_OK:
+ bfa_trc(bfad, scsi_status);
+ scsi_set_resid(cmnd, 0);
+
+ if (sns_len > 0) {
+ bfa_trc(bfad, sns_len);
+ if (sns_len > SCSI_SENSE_BUFFERSIZE)
+ sns_len = SCSI_SENSE_BUFFERSIZE;
+ memcpy(cmnd->sense_buffer, sns_info, sns_len);
+ }
+
+ if (residue > 0) {
+ bfa_trc(bfad, residue);
+ scsi_set_resid(cmnd, residue);
+ if (!sns_len && (scsi_status == SAM_STAT_GOOD) &&
+ (scsi_bufflen(cmnd) - residue) <
+ cmnd->underflow) {
+ bfa_trc(bfad, 0);
+ host_status = DID_ERROR;
+ }
+ }
+ cmnd->result = ScsiResult(host_status, scsi_status);
+
+ break;
+
+ case BFI_IOIM_STS_TIMEDOUT:
+ host_status = DID_TIME_OUT;
+ cmnd->result = ScsiResult(host_status, 0);
+ break;
+ case BFI_IOIM_STS_PATHTOV:
+ host_status = DID_TRANSPORT_DISRUPTED;
+ cmnd->result = ScsiResult(host_status, 0);
+ break;
+ default:
+ host_status = DID_ERROR;
+ cmnd->result = ScsiResult(host_status, 0);
+ }
+
+ /* Unmap DMA, if host is NULL, it means a scsi passthru cmd */
+ if (cmnd->device->host != NULL)
+ scsi_dma_unmap(cmnd);
+
+ cmnd->host_scribble = NULL;
+ bfa_trc(bfad, cmnd->result);
+
+ itnim_data = cmnd->device->hostdata;
+ if (itnim_data) {
+ itnim = itnim_data->itnim;
+ if (!cmnd->result && itnim &&
+ (bfa_lun_queue_depth > cmnd->device->queue_depth)) {
+ /* Queue depth adjustment for good status completion */
+ bfad_ramp_up_qdepth(itnim, cmnd->device);
+ } else if (cmnd->result == SAM_STAT_TASK_SET_FULL && itnim) {
+ /* qfull handling */
+ bfad_handle_qfull(itnim, cmnd->device);
+ }
+ }
+
+ cmnd->scsi_done(cmnd);
+}
+
+void
+bfa_cb_ioim_good_comp(void *drv, struct bfad_ioim_s *dio)
+{
+ struct scsi_cmnd *cmnd = (struct scsi_cmnd *)dio;
+ struct bfad_itnim_data_s *itnim_data;
+ struct bfad_itnim_s *itnim;
+
+ cmnd->result = ScsiResult(DID_OK, SCSI_STATUS_GOOD);
+
+ /* Unmap DMA, if host is NULL, it means a scsi passthru cmd */
+ if (cmnd->device->host != NULL)
+ scsi_dma_unmap(cmnd);
+
+ cmnd->host_scribble = NULL;
+
+ /* Queue depth adjustment */
+ if (bfa_lun_queue_depth > cmnd->device->queue_depth) {
+ itnim_data = cmnd->device->hostdata;
+ if (itnim_data) {
+ itnim = itnim_data->itnim;
+ if (itnim)
+ bfad_ramp_up_qdepth(itnim, cmnd->device);
+ }
+ }
+
+ cmnd->scsi_done(cmnd);
+}
+
+void
+bfa_cb_ioim_abort(void *drv, struct bfad_ioim_s *dio)
+{
+ struct scsi_cmnd *cmnd = (struct scsi_cmnd *)dio;
+ struct bfad_s *bfad = drv;
+
+ cmnd->result = ScsiResult(DID_ERROR, 0);
+
+ /* Unmap DMA, if host is NULL, it means a scsi passthru cmd */
+ if (cmnd->device->host != NULL)
+ scsi_dma_unmap(cmnd);
+
+ bfa_trc(bfad, cmnd->result);
+ cmnd->host_scribble = NULL;
+}
+
+void
+bfa_cb_tskim_done(void *bfad, struct bfad_tskim_s *dtsk,
+ enum bfi_tskim_status tsk_status)
+{
+ struct scsi_cmnd *cmnd = (struct scsi_cmnd *)dtsk;
+ wait_queue_head_t *wq;
+
+ cmnd->SCp.Status |= tsk_status << 1;
+ set_bit(IO_DONE_BIT, (unsigned long *)&cmnd->SCp.Status);
+ wq = (wait_queue_head_t *) cmnd->SCp.ptr;
+ cmnd->SCp.ptr = NULL;
+
+ if (wq)
+ wake_up(wq);
+}
+
+/*
+ * Scsi_Host_template SCSI host template
+ */
+/*
+ * Scsi_Host template entry, returns BFAD PCI info.
+ */
+static const char *
+bfad_im_info(struct Scsi_Host *shost)
+{
+ static char bfa_buf[256];
+ struct bfad_im_port_s *im_port =
+ (struct bfad_im_port_s *) shost->hostdata[0];
+ struct bfad_s *bfad = im_port->bfad;
+
+ memset(bfa_buf, 0, sizeof(bfa_buf));
+ snprintf(bfa_buf, sizeof(bfa_buf),
+ "Brocade FC/FCOE Adapter, " "hwpath: %s driver: %s",
+ bfad->pci_name, BFAD_DRIVER_VERSION);
+
+ return bfa_buf;
+}
+
+/*
+ * Scsi_Host template entry, aborts the specified SCSI command.
+ *
+ * Returns: SUCCESS or FAILED.
+ */
+static int
+bfad_im_abort_handler(struct scsi_cmnd *cmnd)
+{
+ struct Scsi_Host *shost = cmnd->device->host;
+ struct bfad_im_port_s *im_port =
+ (struct bfad_im_port_s *) shost->hostdata[0];
+ struct bfad_s *bfad = im_port->bfad;
+ struct bfa_ioim_s *hal_io;
+ unsigned long flags;
+ u32 timeout;
+ int rc = FAILED;
+
+ spin_lock_irqsave(&bfad->bfad_lock, flags);
+ hal_io = (struct bfa_ioim_s *) cmnd->host_scribble;
+ if (!hal_io) {
+ /* IO has been completed, return success */
+ rc = SUCCESS;
+ goto out;
+ }
+ if (hal_io->dio != (struct bfad_ioim_s *) cmnd) {
+ rc = FAILED;
+ goto out;
+ }
+
+ bfa_trc(bfad, hal_io->iotag);
+ BFA_LOG(KERN_INFO, bfad, bfa_log_level,
+ "scsi%d: abort cmnd %p iotag %x\n",
+ im_port->shost->host_no, cmnd, hal_io->iotag);
+ (void) bfa_ioim_abort(hal_io);
+ spin_unlock_irqrestore(&bfad->bfad_lock, flags);
+
+ /* Need to wait until the command get aborted */
+ timeout = 10;
+ while ((struct bfa_ioim_s *) cmnd->host_scribble == hal_io) {
+ set_current_state(TASK_UNINTERRUPTIBLE);
+ schedule_timeout(timeout);
+ if (timeout < 4 * HZ)
+ timeout *= 2;
+ }
+
+ cmnd->scsi_done(cmnd);
+ bfa_trc(bfad, hal_io->iotag);
+ BFA_LOG(KERN_INFO, bfad, bfa_log_level,
+ "scsi%d: complete abort 0x%p iotag 0x%x\n",
+ im_port->shost->host_no, cmnd, hal_io->iotag);
+ return SUCCESS;
+out:
+ spin_unlock_irqrestore(&bfad->bfad_lock, flags);
+ return rc;
+}
+
+static bfa_status_t
+bfad_im_target_reset_send(struct bfad_s *bfad, struct scsi_cmnd *cmnd,
+ struct bfad_itnim_s *itnim)
+{
+ struct bfa_tskim_s *tskim;
+ struct bfa_itnim_s *bfa_itnim;
+ bfa_status_t rc = BFA_STATUS_OK;
+ struct scsi_lun scsilun;
+
+ tskim = bfa_tskim_alloc(&bfad->bfa, (struct bfad_tskim_s *) cmnd);
+ if (!tskim) {
+ BFA_LOG(KERN_ERR, bfad, bfa_log_level,
+ "target reset, fail to allocate tskim\n");
+ rc = BFA_STATUS_FAILED;
+ goto out;
+ }
+
+ /*
+ * Set host_scribble to NULL to avoid aborting a task command if
+ * happens.
+ */
+ cmnd->host_scribble = NULL;
+ cmnd->SCp.Status = 0;
+ bfa_itnim = bfa_fcs_itnim_get_halitn(&itnim->fcs_itnim);
+ memset(&scsilun, 0, sizeof(scsilun));
+ bfa_tskim_start(tskim, bfa_itnim, scsilun,
+ FCP_TM_TARGET_RESET, BFAD_TARGET_RESET_TMO);
+out:
+ return rc;
+}
+
+/*
+ * Scsi_Host template entry, resets a LUN and abort its all commands.
+ *
+ * Returns: SUCCESS or FAILED.
+ *
+ */
+static int
+bfad_im_reset_lun_handler(struct scsi_cmnd *cmnd)
+{
+ struct Scsi_Host *shost = cmnd->device->host;
+ struct bfad_im_port_s *im_port =
+ (struct bfad_im_port_s *) shost->hostdata[0];
+ struct bfad_itnim_data_s *itnim_data = cmnd->device->hostdata;
+ struct bfad_s *bfad = im_port->bfad;
+ struct bfa_tskim_s *tskim;
+ struct bfad_itnim_s *itnim;
+ struct bfa_itnim_s *bfa_itnim;
+ DECLARE_WAIT_QUEUE_HEAD_ONSTACK(wq);
+ int rc = SUCCESS;
+ unsigned long flags;
+ enum bfi_tskim_status task_status;
+ struct scsi_lun scsilun;
+
+ spin_lock_irqsave(&bfad->bfad_lock, flags);
+ itnim = itnim_data->itnim;
+ if (!itnim) {
+ spin_unlock_irqrestore(&bfad->bfad_lock, flags);
+ rc = FAILED;
+ goto out;
+ }
+
+ tskim = bfa_tskim_alloc(&bfad->bfa, (struct bfad_tskim_s *) cmnd);
+ if (!tskim) {
+ BFA_LOG(KERN_ERR, bfad, bfa_log_level,
+ "LUN reset, fail to allocate tskim");
+ spin_unlock_irqrestore(&bfad->bfad_lock, flags);
+ rc = FAILED;
+ goto out;
+ }
+
+ /*
+ * Set host_scribble to NULL to avoid aborting a task command
+ * if happens.
+ */
+ cmnd->host_scribble = NULL;
+ cmnd->SCp.ptr = (char *)&wq;
+ cmnd->SCp.Status = 0;
+ bfa_itnim = bfa_fcs_itnim_get_halitn(&itnim->fcs_itnim);
+ int_to_scsilun(cmnd->device->lun, &scsilun);
+ bfa_tskim_start(tskim, bfa_itnim, scsilun,
+ FCP_TM_LUN_RESET, BFAD_LUN_RESET_TMO);
+ spin_unlock_irqrestore(&bfad->bfad_lock, flags);
+
+ wait_event(wq, test_bit(IO_DONE_BIT,
+ (unsigned long *)&cmnd->SCp.Status));
+
+ task_status = cmnd->SCp.Status >> 1;
+ if (task_status != BFI_TSKIM_STS_OK) {
+ BFA_LOG(KERN_ERR, bfad, bfa_log_level,
+ "LUN reset failure, status: %d\n", task_status);
+ rc = FAILED;
+ }
+
+out:
+ return rc;
+}
+
+/*
+ * Scsi_Host template entry, resets the bus and abort all commands.
+ */
+static int
+bfad_im_reset_bus_handler(struct scsi_cmnd *cmnd)
+{
+ struct Scsi_Host *shost = cmnd->device->host;
+ struct bfad_im_port_s *im_port =
+ (struct bfad_im_port_s *) shost->hostdata[0];
+ struct bfad_s *bfad = im_port->bfad;
+ struct bfad_itnim_s *itnim;
+ unsigned long flags;
+ u32 i, rc, err_cnt = 0;
+ DECLARE_WAIT_QUEUE_HEAD_ONSTACK(wq);
+ enum bfi_tskim_status task_status;
+
+ spin_lock_irqsave(&bfad->bfad_lock, flags);
+ for (i = 0; i < MAX_FCP_TARGET; i++) {
+ itnim = bfad_get_itnim(im_port, i);
+ if (itnim) {
+ cmnd->SCp.ptr = (char *)&wq;
+ rc = bfad_im_target_reset_send(bfad, cmnd, itnim);
+ if (rc != BFA_STATUS_OK) {
+ err_cnt++;
+ continue;
+ }
+
+ /* wait target reset to complete */
+ spin_unlock_irqrestore(&bfad->bfad_lock, flags);
+ wait_event(wq, test_bit(IO_DONE_BIT,
+ (unsigned long *)&cmnd->SCp.Status));
+ spin_lock_irqsave(&bfad->bfad_lock, flags);
+
+ task_status = cmnd->SCp.Status >> 1;
+ if (task_status != BFI_TSKIM_STS_OK) {
+ BFA_LOG(KERN_ERR, bfad, bfa_log_level,
+ "target reset failure,"
+ " status: %d\n", task_status);
+ err_cnt++;
+ }
+ }
+ }
+ spin_unlock_irqrestore(&bfad->bfad_lock, flags);
+
+ if (err_cnt)
+ return FAILED;
+
+ return SUCCESS;
+}
+
+/*
+ * Scsi_Host template entry slave_destroy.
+ */
+static void
+bfad_im_slave_destroy(struct scsi_device *sdev)
+{
+ sdev->hostdata = NULL;
+ return;
+}
+
+/*
+ * BFA FCS itnim callbacks
+ */
+
+/*
+ * BFA FCS itnim alloc callback, after successful PRLI
+ * Context: Interrupt
+ */
+void
+bfa_fcb_itnim_alloc(struct bfad_s *bfad, struct bfa_fcs_itnim_s **itnim,
+ struct bfad_itnim_s **itnim_drv)
+{
+ *itnim_drv = kzalloc(sizeof(struct bfad_itnim_s), GFP_ATOMIC);
+ if (*itnim_drv == NULL)
+ return;
+
+ (*itnim_drv)->im = bfad->im;
+ *itnim = &(*itnim_drv)->fcs_itnim;
+ (*itnim_drv)->state = ITNIM_STATE_NONE;
+
+ /*
+ * Initiaze the itnim_work
+ */
+ INIT_WORK(&(*itnim_drv)->itnim_work, bfad_im_itnim_work_handler);
+ bfad->bfad_flags |= BFAD_RPORT_ONLINE;
+}
+
+/*
+ * BFA FCS itnim free callback.
+ * Context: Interrupt. bfad_lock is held
+ */
+void
+bfa_fcb_itnim_free(struct bfad_s *bfad, struct bfad_itnim_s *itnim_drv)
+{
+ struct bfad_port_s *port;
+ wwn_t wwpn;
+ u32 fcid;
+ char wwpn_str[32], fcid_str[16];
+ struct bfad_im_s *im = itnim_drv->im;
+
+ /* online to free state transtion should not happen */
+ WARN_ON(itnim_drv->state == ITNIM_STATE_ONLINE);
+
+ itnim_drv->queue_work = 1;
+ /* offline request is not yet done, use the same request to free */
+ if (itnim_drv->state == ITNIM_STATE_OFFLINE_PENDING)
+ itnim_drv->queue_work = 0;
+
+ itnim_drv->state = ITNIM_STATE_FREE;
+ port = bfa_fcs_itnim_get_drvport(&itnim_drv->fcs_itnim);
+ itnim_drv->im_port = port->im_port;
+ wwpn = bfa_fcs_itnim_get_pwwn(&itnim_drv->fcs_itnim);
+ fcid = bfa_fcs_itnim_get_fcid(&itnim_drv->fcs_itnim);
+ wwn2str(wwpn_str, wwpn);
+ fcid2str(fcid_str, fcid);
+ BFA_LOG(KERN_INFO, bfad, bfa_log_level,
+ "ITNIM FREE scsi%d: FCID: %s WWPN: %s\n",
+ port->im_port->shost->host_no,
+ fcid_str, wwpn_str);
+
+ /* ITNIM processing */
+ if (itnim_drv->queue_work)
+ queue_work(im->drv_workq, &itnim_drv->itnim_work);
+}
+
+/*
+ * BFA FCS itnim online callback.
+ * Context: Interrupt. bfad_lock is held
+ */
+void
+bfa_fcb_itnim_online(struct bfad_itnim_s *itnim_drv)
+{
+ struct bfad_port_s *port;
+ struct bfad_im_s *im = itnim_drv->im;
+
+ itnim_drv->bfa_itnim = bfa_fcs_itnim_get_halitn(&itnim_drv->fcs_itnim);
+ port = bfa_fcs_itnim_get_drvport(&itnim_drv->fcs_itnim);
+ itnim_drv->state = ITNIM_STATE_ONLINE;
+ itnim_drv->queue_work = 1;
+ itnim_drv->im_port = port->im_port;
+
+ /* ITNIM processing */
+ if (itnim_drv->queue_work)
+ queue_work(im->drv_workq, &itnim_drv->itnim_work);
+}
+
+/*
+ * BFA FCS itnim offline callback.
+ * Context: Interrupt. bfad_lock is held
+ */
+void
+bfa_fcb_itnim_offline(struct bfad_itnim_s *itnim_drv)
+{
+ struct bfad_port_s *port;
+ struct bfad_s *bfad;
+ struct bfad_im_s *im = itnim_drv->im;
+
+ port = bfa_fcs_itnim_get_drvport(&itnim_drv->fcs_itnim);
+ bfad = port->bfad;
+ if ((bfad->pport.flags & BFAD_PORT_DELETE) ||
+ (port->flags & BFAD_PORT_DELETE)) {
+ itnim_drv->state = ITNIM_STATE_OFFLINE;
+ return;
+ }
+ itnim_drv->im_port = port->im_port;
+ itnim_drv->state = ITNIM_STATE_OFFLINE_PENDING;
+ itnim_drv->queue_work = 1;
+
+ /* ITNIM processing */
+ if (itnim_drv->queue_work)
+ queue_work(im->drv_workq, &itnim_drv->itnim_work);
+}
+
+/*
+ * Allocate a Scsi_Host for a port.
+ */
+int
+bfad_im_scsi_host_alloc(struct bfad_s *bfad, struct bfad_im_port_s *im_port,
+ struct device *dev)
+{
+ int error = 1;
+
+ mutex_lock(&bfad_mutex);
+ error = idr_alloc(&bfad_im_port_index, im_port, 0, 0, GFP_KERNEL);
+ if (error < 0) {
+ mutex_unlock(&bfad_mutex);
+ printk(KERN_WARNING "idr_alloc failure\n");
+ goto out;
+ }
+ im_port->idr_id = error;
+ mutex_unlock(&bfad_mutex);
+
+ im_port->shost = bfad_scsi_host_alloc(im_port, bfad);
+ if (!im_port->shost) {
+ error = 1;
+ goto out_free_idr;
+ }
+
+ im_port->shost->hostdata[0] = (unsigned long)im_port;
+ im_port->shost->unique_id = im_port->idr_id;
+ im_port->shost->this_id = -1;
+ im_port->shost->max_id = MAX_FCP_TARGET;
+ im_port->shost->max_lun = MAX_FCP_LUN;
+ im_port->shost->max_cmd_len = 16;
+ im_port->shost->can_queue = bfad->cfg_data.ioc_queue_depth;
+ if (im_port->port->pvb_type == BFAD_PORT_PHYS_BASE)
+ im_port->shost->transportt = bfad_im_scsi_transport_template;
+ else
+ im_port->shost->transportt =
+ bfad_im_scsi_vport_transport_template;
+
+ error = scsi_add_host_with_dma(im_port->shost, dev, &bfad->pcidev->dev);
+ if (error) {
+ printk(KERN_WARNING "scsi_add_host failure %d\n", error);
+ goto out_fc_rel;
+ }
+
+ return 0;
+
+out_fc_rel:
+ scsi_host_put(im_port->shost);
+ im_port->shost = NULL;
+out_free_idr:
+ mutex_lock(&bfad_mutex);
+ idr_remove(&bfad_im_port_index, im_port->idr_id);
+ mutex_unlock(&bfad_mutex);
+out:
+ return error;
+}
+
+void
+bfad_im_scsi_host_free(struct bfad_s *bfad, struct bfad_im_port_s *im_port)
+{
+ bfa_trc(bfad, bfad->inst_no);
+ BFA_LOG(KERN_INFO, bfad, bfa_log_level, "Free scsi%d\n",
+ im_port->shost->host_no);
+
+ fc_remove_host(im_port->shost);
+
+ scsi_remove_host(im_port->shost);
+ scsi_host_put(im_port->shost);
+
+ mutex_lock(&bfad_mutex);
+ idr_remove(&bfad_im_port_index, im_port->idr_id);
+ mutex_unlock(&bfad_mutex);
+}
+
+static void
+bfad_im_port_delete_handler(struct work_struct *work)
+{
+ struct bfad_im_port_s *im_port =
+ container_of(work, struct bfad_im_port_s, port_delete_work);
+
+ if (im_port->port->pvb_type != BFAD_PORT_PHYS_BASE) {
+ im_port->flags |= BFAD_PORT_DELETE;
+ fc_vport_terminate(im_port->fc_vport);
+ }
+}
+
+bfa_status_t
+bfad_im_port_new(struct bfad_s *bfad, struct bfad_port_s *port)
+{
+ int rc = BFA_STATUS_OK;
+ struct bfad_im_port_s *im_port;
+
+ im_port = kzalloc(sizeof(struct bfad_im_port_s), GFP_ATOMIC);
+ if (im_port == NULL) {
+ rc = BFA_STATUS_ENOMEM;
+ goto ext;
+ }
+ port->im_port = im_port;
+ im_port->port = port;
+ im_port->bfad = bfad;
+
+ INIT_WORK(&im_port->port_delete_work, bfad_im_port_delete_handler);
+ INIT_LIST_HEAD(&im_port->itnim_mapped_list);
+ INIT_LIST_HEAD(&im_port->binding_list);
+
+ext:
+ return rc;
+}
+
+void
+bfad_im_port_delete(struct bfad_s *bfad, struct bfad_port_s *port)
+{
+ struct bfad_im_port_s *im_port = port->im_port;
+
+ queue_work(bfad->im->drv_workq,
+ &im_port->port_delete_work);
+}
+
+void
+bfad_im_port_clean(struct bfad_im_port_s *im_port)
+{
+ struct bfad_fcp_binding *bp, *bp_new;
+ unsigned long flags;
+ struct bfad_s *bfad = im_port->bfad;
+
+ spin_lock_irqsave(&bfad->bfad_lock, flags);
+ list_for_each_entry_safe(bp, bp_new, &im_port->binding_list,
+ list_entry) {
+ list_del(&bp->list_entry);
+ kfree(bp);
+ }
+
+ /* the itnim_mapped_list must be empty at this time */
+ WARN_ON(!list_empty(&im_port->itnim_mapped_list));
+
+ spin_unlock_irqrestore(&bfad->bfad_lock, flags);
+}
+
+static void bfad_aen_im_notify_handler(struct work_struct *work)
+{
+ struct bfad_im_s *im =
+ container_of(work, struct bfad_im_s, aen_im_notify_work);
+ struct bfa_aen_entry_s *aen_entry;
+ struct bfad_s *bfad = im->bfad;
+ struct Scsi_Host *shost = bfad->pport.im_port->shost;
+ void *event_data;
+ unsigned long flags;
+
+ while (!list_empty(&bfad->active_aen_q)) {
+ spin_lock_irqsave(&bfad->bfad_aen_spinlock, flags);
+ bfa_q_deq(&bfad->active_aen_q, &aen_entry);
+ spin_unlock_irqrestore(&bfad->bfad_aen_spinlock, flags);
+ event_data = (char *)aen_entry + sizeof(struct list_head);
+ fc_host_post_vendor_event(shost, fc_get_event_number(),
+ sizeof(struct bfa_aen_entry_s) -
+ sizeof(struct list_head),
+ (char *)event_data, BFAD_NL_VENDOR_ID);
+ spin_lock_irqsave(&bfad->bfad_aen_spinlock, flags);
+ list_add_tail(&aen_entry->qe, &bfad->free_aen_q);
+ spin_unlock_irqrestore(&bfad->bfad_aen_spinlock, flags);
+ }
+}
+
+bfa_status_t
+bfad_im_probe(struct bfad_s *bfad)
+{
+ struct bfad_im_s *im;
+
+ im = kzalloc(sizeof(struct bfad_im_s), GFP_KERNEL);
+ if (im == NULL)
+ return BFA_STATUS_ENOMEM;
+
+ bfad->im = im;
+ im->bfad = bfad;
+
+ if (bfad_thread_workq(bfad) != BFA_STATUS_OK) {
+ kfree(im);
+ return BFA_STATUS_FAILED;
+ }
+
+ INIT_WORK(&im->aen_im_notify_work, bfad_aen_im_notify_handler);
+ return BFA_STATUS_OK;
+}
+
+void
+bfad_im_probe_undo(struct bfad_s *bfad)
+{
+ if (bfad->im) {
+ bfad_destroy_workq(bfad->im);
+ kfree(bfad->im);
+ bfad->im = NULL;
+ }
+}
+
+struct Scsi_Host *
+bfad_scsi_host_alloc(struct bfad_im_port_s *im_port, struct bfad_s *bfad)
+{
+ struct scsi_host_template *sht;
+
+ if (im_port->port->pvb_type == BFAD_PORT_PHYS_BASE)
+ sht = &bfad_im_scsi_host_template;
+ else
+ sht = &bfad_im_vport_template;
+
+ if (max_xfer_size != BFAD_MAX_SECTORS >> 1)
+ sht->max_sectors = max_xfer_size << 1;
+
+ sht->sg_tablesize = bfad->cfg_data.io_max_sge;
+
+ return scsi_host_alloc(sht, sizeof(unsigned long));
+}
+
+void
+bfad_scsi_host_free(struct bfad_s *bfad, struct bfad_im_port_s *im_port)
+{
+ if (!(im_port->flags & BFAD_PORT_DELETE))
+ flush_workqueue(bfad->im->drv_workq);
+ bfad_im_scsi_host_free(im_port->bfad, im_port);
+ bfad_im_port_clean(im_port);
+ kfree(im_port);
+}
+
+void
+bfad_destroy_workq(struct bfad_im_s *im)
+{
+ if (im && im->drv_workq) {
+ flush_workqueue(im->drv_workq);
+ destroy_workqueue(im->drv_workq);
+ im->drv_workq = NULL;
+ }
+}
+
+bfa_status_t
+bfad_thread_workq(struct bfad_s *bfad)
+{
+ struct bfad_im_s *im = bfad->im;
+
+ bfa_trc(bfad, 0);
+ snprintf(im->drv_workq_name, KOBJ_NAME_LEN, "bfad_wq_%d",
+ bfad->inst_no);
+ im->drv_workq = create_singlethread_workqueue(im->drv_workq_name);
+ if (!im->drv_workq)
+ return BFA_STATUS_FAILED;
+
+ return BFA_STATUS_OK;
+}
+
+/*
+ * Scsi_Host template entry.
+ *
+ * Description:
+ * OS entry point to adjust the queue_depths on a per-device basis.
+ * Called once per device during the bus scan.
+ * Return non-zero if fails.
+ */
+static int
+bfad_im_slave_configure(struct scsi_device *sdev)
+{
+ scsi_change_queue_depth(sdev, bfa_lun_queue_depth);
+ return 0;
+}
+
+struct scsi_host_template bfad_im_scsi_host_template = {
+ .module = THIS_MODULE,
+ .name = BFAD_DRIVER_NAME,
+ .info = bfad_im_info,
+ .queuecommand = bfad_im_queuecommand,
+ .eh_abort_handler = bfad_im_abort_handler,
+ .eh_device_reset_handler = bfad_im_reset_lun_handler,
+ .eh_bus_reset_handler = bfad_im_reset_bus_handler,
+
+ .slave_alloc = bfad_im_slave_alloc,
+ .slave_configure = bfad_im_slave_configure,
+ .slave_destroy = bfad_im_slave_destroy,
+
+ .this_id = -1,
+ .sg_tablesize = BFAD_IO_MAX_SGE,
+ .cmd_per_lun = 3,
+ .use_clustering = ENABLE_CLUSTERING,
+ .shost_attrs = bfad_im_host_attrs,
+ .max_sectors = BFAD_MAX_SECTORS,
+ .vendor_id = BFA_PCI_VENDOR_ID_BROCADE,
+ .use_blk_tags = 1,
+};
+
+struct scsi_host_template bfad_im_vport_template = {
+ .module = THIS_MODULE,
+ .name = BFAD_DRIVER_NAME,
+ .info = bfad_im_info,
+ .queuecommand = bfad_im_queuecommand,
+ .eh_abort_handler = bfad_im_abort_handler,
+ .eh_device_reset_handler = bfad_im_reset_lun_handler,
+ .eh_bus_reset_handler = bfad_im_reset_bus_handler,
+
+ .slave_alloc = bfad_im_slave_alloc,
+ .slave_configure = bfad_im_slave_configure,
+ .slave_destroy = bfad_im_slave_destroy,
+
+ .this_id = -1,
+ .sg_tablesize = BFAD_IO_MAX_SGE,
+ .cmd_per_lun = 3,
+ .use_clustering = ENABLE_CLUSTERING,
+ .shost_attrs = bfad_im_vport_attrs,
+ .max_sectors = BFAD_MAX_SECTORS,
+ .use_blk_tags = 1,
+};
+
+bfa_status_t
+bfad_im_module_init(void)
+{
+ bfad_im_scsi_transport_template =
+ fc_attach_transport(&bfad_im_fc_function_template);
+ if (!bfad_im_scsi_transport_template)
+ return BFA_STATUS_ENOMEM;
+
+ bfad_im_scsi_vport_transport_template =
+ fc_attach_transport(&bfad_im_vport_fc_function_template);
+ if (!bfad_im_scsi_vport_transport_template) {
+ fc_release_transport(bfad_im_scsi_transport_template);
+ return BFA_STATUS_ENOMEM;
+ }
+
+ return BFA_STATUS_OK;
+}
+
+void
+bfad_im_module_exit(void)
+{
+ if (bfad_im_scsi_transport_template)
+ fc_release_transport(bfad_im_scsi_transport_template);
+
+ if (bfad_im_scsi_vport_transport_template)
+ fc_release_transport(bfad_im_scsi_vport_transport_template);
+}
+
+void
+bfad_ramp_up_qdepth(struct bfad_itnim_s *itnim, struct scsi_device *sdev)
+{
+ struct scsi_device *tmp_sdev;
+
+ if (((jiffies - itnim->last_ramp_up_time) >
+ BFA_QUEUE_FULL_RAMP_UP_TIME * HZ) &&
+ ((jiffies - itnim->last_queue_full_time) >
+ BFA_QUEUE_FULL_RAMP_UP_TIME * HZ)) {
+ shost_for_each_device(tmp_sdev, sdev->host) {
+ if (bfa_lun_queue_depth > tmp_sdev->queue_depth) {
+ if (tmp_sdev->id != sdev->id)
+ continue;
+ scsi_change_queue_depth(tmp_sdev,
+ tmp_sdev->queue_depth + 1);
+
+ itnim->last_ramp_up_time = jiffies;
+ }
+ }
+ }
+}
+
+void
+bfad_handle_qfull(struct bfad_itnim_s *itnim, struct scsi_device *sdev)
+{
+ struct scsi_device *tmp_sdev;
+
+ itnim->last_queue_full_time = jiffies;
+
+ shost_for_each_device(tmp_sdev, sdev->host) {
+ if (tmp_sdev->id != sdev->id)
+ continue;
+ scsi_track_queue_full(tmp_sdev, tmp_sdev->queue_depth - 1);
+ }
+}
+
+struct bfad_itnim_s *
+bfad_get_itnim(struct bfad_im_port_s *im_port, int id)
+{
+ struct bfad_itnim_s *itnim = NULL;
+
+ /* Search the mapped list for this target ID */
+ list_for_each_entry(itnim, &im_port->itnim_mapped_list, list_entry) {
+ if (id == itnim->scsi_tgt_id)
+ return itnim;
+ }
+
+ return NULL;
+}
+
+/*
+ * Function is invoked from the SCSI Host Template slave_alloc() entry point.
+ * Has the logic to query the LUN Mask database to check if this LUN needs to
+ * be made visible to the SCSI mid-layer or not.
+ *
+ * Returns BFA_STATUS_OK if this LUN needs to be added to the OS stack.
+ * Returns -ENXIO to notify SCSI mid-layer to not add this LUN to the OS stack.
+ */
+static int
+bfad_im_check_if_make_lun_visible(struct scsi_device *sdev,
+ struct fc_rport *rport)
+{
+ struct bfad_itnim_data_s *itnim_data =
+ (struct bfad_itnim_data_s *) rport->dd_data;
+ struct bfa_s *bfa = itnim_data->itnim->bfa_itnim->bfa;
+ struct bfa_rport_s *bfa_rport = itnim_data->itnim->bfa_itnim->rport;
+ struct bfa_lun_mask_s *lun_list = bfa_get_lun_mask_list(bfa);
+ int i = 0, ret = -ENXIO;
+
+ for (i = 0; i < MAX_LUN_MASK_CFG; i++) {
+ if (lun_list[i].state == BFA_IOIM_LUN_MASK_ACTIVE &&
+ scsilun_to_int(&lun_list[i].lun) == sdev->lun &&
+ lun_list[i].rp_tag == bfa_rport->rport_tag &&
+ lun_list[i].lp_tag == (u8)bfa_rport->rport_info.lp_tag) {
+ ret = BFA_STATUS_OK;
+ break;
+ }
+ }
+ return ret;
+}
+
+/*
+ * Scsi_Host template entry slave_alloc
+ */
+static int
+bfad_im_slave_alloc(struct scsi_device *sdev)
+{
+ struct fc_rport *rport = starget_to_rport(scsi_target(sdev));
+ struct bfad_itnim_data_s *itnim_data;
+ struct bfa_s *bfa;
+
+ if (!rport || fc_remote_port_chkready(rport))
+ return -ENXIO;
+
+ itnim_data = (struct bfad_itnim_data_s *) rport->dd_data;
+ bfa = itnim_data->itnim->bfa_itnim->bfa;
+
+ if (bfa_get_lun_mask_status(bfa) == BFA_LUNMASK_ENABLED) {
+ /*
+ * We should not mask LUN 0 - since this will translate
+ * to no LUN / TARGET for SCSI ml resulting no scan.
+ */
+ if (sdev->lun == 0) {
+ sdev->sdev_bflags |= BLIST_NOREPORTLUN |
+ BLIST_SPARSELUN;
+ goto done;
+ }
+
+ /*
+ * Query LUN Mask configuration - to expose this LUN
+ * to the SCSI mid-layer or to mask it.
+ */
+ if (bfad_im_check_if_make_lun_visible(sdev, rport) !=
+ BFA_STATUS_OK)
+ return -ENXIO;
+ }
+done:
+ sdev->hostdata = rport->dd_data;
+
+ return 0;
+}
+
+u32
+bfad_im_supported_speeds(struct bfa_s *bfa)
+{
+ struct bfa_ioc_attr_s *ioc_attr;
+ u32 supported_speed = 0;
+
+ ioc_attr = kzalloc(sizeof(struct bfa_ioc_attr_s), GFP_KERNEL);
+ if (!ioc_attr)
+ return 0;
+
+ bfa_ioc_get_attr(&bfa->ioc, ioc_attr);
+ if (ioc_attr->adapter_attr.max_speed == BFA_PORT_SPEED_16GBPS)
+ supported_speed |= FC_PORTSPEED_16GBIT | FC_PORTSPEED_8GBIT |
+ FC_PORTSPEED_4GBIT | FC_PORTSPEED_2GBIT;
+ else if (ioc_attr->adapter_attr.max_speed == BFA_PORT_SPEED_8GBPS) {
+ if (ioc_attr->adapter_attr.is_mezz) {
+ supported_speed |= FC_PORTSPEED_8GBIT |
+ FC_PORTSPEED_4GBIT |
+ FC_PORTSPEED_2GBIT | FC_PORTSPEED_1GBIT;
+ } else {
+ supported_speed |= FC_PORTSPEED_8GBIT |
+ FC_PORTSPEED_4GBIT |
+ FC_PORTSPEED_2GBIT;
+ }
+ } else if (ioc_attr->adapter_attr.max_speed == BFA_PORT_SPEED_4GBPS) {
+ supported_speed |= FC_PORTSPEED_4GBIT | FC_PORTSPEED_2GBIT |
+ FC_PORTSPEED_1GBIT;
+ } else if (ioc_attr->adapter_attr.max_speed == BFA_PORT_SPEED_10GBPS) {
+ supported_speed |= FC_PORTSPEED_10GBIT;
+ }
+ kfree(ioc_attr);
+ return supported_speed;
+}
+
+void
+bfad_fc_host_init(struct bfad_im_port_s *im_port)
+{
+ struct Scsi_Host *host = im_port->shost;
+ struct bfad_s *bfad = im_port->bfad;
+ struct bfad_port_s *port = im_port->port;
+ char symname[BFA_SYMNAME_MAXLEN];
+ struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(&bfad->bfa);
+
+ fc_host_node_name(host) =
+ cpu_to_be64((bfa_fcs_lport_get_nwwn(port->fcs_port)));
+ fc_host_port_name(host) =
+ cpu_to_be64((bfa_fcs_lport_get_pwwn(port->fcs_port)));
+ fc_host_max_npiv_vports(host) = bfa_lps_get_max_vport(&bfad->bfa);
+
+ fc_host_supported_classes(host) = FC_COS_CLASS3;
+
+ memset(fc_host_supported_fc4s(host), 0,
+ sizeof(fc_host_supported_fc4s(host)));
+ if (supported_fc4s & BFA_LPORT_ROLE_FCP_IM)
+ /* For FCP type 0x08 */
+ fc_host_supported_fc4s(host)[2] = 1;
+ /* For fibre channel services type 0x20 */
+ fc_host_supported_fc4s(host)[7] = 1;
+
+ strlcpy(symname, bfad->bfa_fcs.fabric.bport.port_cfg.sym_name.symname,
+ BFA_SYMNAME_MAXLEN);
+ sprintf(fc_host_symbolic_name(host), "%s", symname);
+
+ fc_host_supported_speeds(host) = bfad_im_supported_speeds(&bfad->bfa);
+ fc_host_maxframe_size(host) = fcport->cfg.maxfrsize;
+}
+
+static void
+bfad_im_fc_rport_add(struct bfad_im_port_s *im_port, struct bfad_itnim_s *itnim)
+{
+ struct fc_rport_identifiers rport_ids;
+ struct fc_rport *fc_rport;
+ struct bfad_itnim_data_s *itnim_data;
+
+ rport_ids.node_name =
+ cpu_to_be64(bfa_fcs_itnim_get_nwwn(&itnim->fcs_itnim));
+ rport_ids.port_name =
+ cpu_to_be64(bfa_fcs_itnim_get_pwwn(&itnim->fcs_itnim));
+ rport_ids.port_id =
+ bfa_hton3b(bfa_fcs_itnim_get_fcid(&itnim->fcs_itnim));
+ rport_ids.roles = FC_RPORT_ROLE_UNKNOWN;
+
+ itnim->fc_rport = fc_rport =
+ fc_remote_port_add(im_port->shost, 0, &rport_ids);
+
+ if (!fc_rport)
+ return;
+
+ fc_rport->maxframe_size =
+ bfa_fcs_itnim_get_maxfrsize(&itnim->fcs_itnim);
+ fc_rport->supported_classes = bfa_fcs_itnim_get_cos(&itnim->fcs_itnim);
+
+ itnim_data = fc_rport->dd_data;
+ itnim_data->itnim = itnim;
+
+ rport_ids.roles |= FC_RPORT_ROLE_FCP_TARGET;
+
+ if (rport_ids.roles != FC_RPORT_ROLE_UNKNOWN)
+ fc_remote_port_rolechg(fc_rport, rport_ids.roles);
+
+ if ((fc_rport->scsi_target_id != -1)
+ && (fc_rport->scsi_target_id < MAX_FCP_TARGET))
+ itnim->scsi_tgt_id = fc_rport->scsi_target_id;
+
+ itnim->channel = fc_rport->channel;
+
+ return;
+}
+
+/*
+ * Work queue handler using FC transport service
+* Context: kernel
+ */
+static void
+bfad_im_itnim_work_handler(struct work_struct *work)
+{
+ struct bfad_itnim_s *itnim = container_of(work, struct bfad_itnim_s,
+ itnim_work);
+ struct bfad_im_s *im = itnim->im;
+ struct bfad_s *bfad = im->bfad;
+ struct bfad_im_port_s *im_port;
+ unsigned long flags;
+ struct fc_rport *fc_rport;
+ wwn_t wwpn;
+ u32 fcid;
+ char wwpn_str[32], fcid_str[16];
+
+ spin_lock_irqsave(&bfad->bfad_lock, flags);
+ im_port = itnim->im_port;
+ bfa_trc(bfad, itnim->state);
+ switch (itnim->state) {
+ case ITNIM_STATE_ONLINE:
+ if (!itnim->fc_rport) {
+ spin_unlock_irqrestore(&bfad->bfad_lock, flags);
+ bfad_im_fc_rport_add(im_port, itnim);
+ spin_lock_irqsave(&bfad->bfad_lock, flags);
+ wwpn = bfa_fcs_itnim_get_pwwn(&itnim->fcs_itnim);
+ fcid = bfa_fcs_itnim_get_fcid(&itnim->fcs_itnim);
+ wwn2str(wwpn_str, wwpn);
+ fcid2str(fcid_str, fcid);
+ list_add_tail(&itnim->list_entry,
+ &im_port->itnim_mapped_list);
+ BFA_LOG(KERN_INFO, bfad, bfa_log_level,
+ "ITNIM ONLINE Target: %d:0:%d "
+ "FCID: %s WWPN: %s\n",
+ im_port->shost->host_no,
+ itnim->scsi_tgt_id,
+ fcid_str, wwpn_str);
+ } else {
+ printk(KERN_WARNING
+ "%s: itnim %llx is already in online state\n",
+ __func__,
+ bfa_fcs_itnim_get_pwwn(&itnim->fcs_itnim));
+ }
+
+ break;
+ case ITNIM_STATE_OFFLINE_PENDING:
+ itnim->state = ITNIM_STATE_OFFLINE;
+ if (itnim->fc_rport) {
+ fc_rport = itnim->fc_rport;
+ ((struct bfad_itnim_data_s *)
+ fc_rport->dd_data)->itnim = NULL;
+ itnim->fc_rport = NULL;
+ if (!(im_port->port->flags & BFAD_PORT_DELETE)) {
+ spin_unlock_irqrestore(&bfad->bfad_lock, flags);
+ fc_rport->dev_loss_tmo =
+ bfa_fcpim_path_tov_get(&bfad->bfa) + 1;
+ fc_remote_port_delete(fc_rport);
+ spin_lock_irqsave(&bfad->bfad_lock, flags);
+ }
+ wwpn = bfa_fcs_itnim_get_pwwn(&itnim->fcs_itnim);
+ fcid = bfa_fcs_itnim_get_fcid(&itnim->fcs_itnim);
+ wwn2str(wwpn_str, wwpn);
+ fcid2str(fcid_str, fcid);
+ list_del(&itnim->list_entry);
+ BFA_LOG(KERN_INFO, bfad, bfa_log_level,
+ "ITNIM OFFLINE Target: %d:0:%d "
+ "FCID: %s WWPN: %s\n",
+ im_port->shost->host_no,
+ itnim->scsi_tgt_id,
+ fcid_str, wwpn_str);
+ }
+ break;
+ case ITNIM_STATE_FREE:
+ if (itnim->fc_rport) {
+ fc_rport = itnim->fc_rport;
+ ((struct bfad_itnim_data_s *)
+ fc_rport->dd_data)->itnim = NULL;
+ itnim->fc_rport = NULL;
+ if (!(im_port->port->flags & BFAD_PORT_DELETE)) {
+ spin_unlock_irqrestore(&bfad->bfad_lock, flags);
+ fc_rport->dev_loss_tmo =
+ bfa_fcpim_path_tov_get(&bfad->bfa) + 1;
+ fc_remote_port_delete(fc_rport);
+ spin_lock_irqsave(&bfad->bfad_lock, flags);
+ }
+ list_del(&itnim->list_entry);
+ }
+
+ kfree(itnim);
+ break;
+ default:
+ WARN_ON(1);
+ break;
+ }
+
+ spin_unlock_irqrestore(&bfad->bfad_lock, flags);
+}
+
+/*
+ * Scsi_Host template entry, queue a SCSI command to the BFAD.
+ */
+static int
+bfad_im_queuecommand_lck(struct scsi_cmnd *cmnd, void (*done) (struct scsi_cmnd *))
+{
+ struct bfad_im_port_s *im_port =
+ (struct bfad_im_port_s *) cmnd->device->host->hostdata[0];
+ struct bfad_s *bfad = im_port->bfad;
+ struct bfad_itnim_data_s *itnim_data = cmnd->device->hostdata;
+ struct bfad_itnim_s *itnim;
+ struct bfa_ioim_s *hal_io;
+ unsigned long flags;
+ int rc;
+ int sg_cnt = 0;
+ struct fc_rport *rport = starget_to_rport(scsi_target(cmnd->device));
+
+ rc = fc_remote_port_chkready(rport);
+ if (rc) {
+ cmnd->result = rc;
+ done(cmnd);
+ return 0;
+ }
+
+ if (bfad->bfad_flags & BFAD_EEH_BUSY) {
+ if (bfad->bfad_flags & BFAD_EEH_PCI_CHANNEL_IO_PERM_FAILURE)
+ cmnd->result = DID_NO_CONNECT << 16;
+ else
+ cmnd->result = DID_REQUEUE << 16;
+ done(cmnd);
+ return 0;
+ }
+
+ sg_cnt = scsi_dma_map(cmnd);
+ if (sg_cnt < 0)
+ return SCSI_MLQUEUE_HOST_BUSY;
+
+ cmnd->scsi_done = done;
+
+ spin_lock_irqsave(&bfad->bfad_lock, flags);
+ if (!(bfad->bfad_flags & BFAD_HAL_START_DONE)) {
+ printk(KERN_WARNING
+ "bfad%d, queuecommand %p %x failed, BFA stopped\n",
+ bfad->inst_no, cmnd, cmnd->cmnd[0]);
+ cmnd->result = ScsiResult(DID_NO_CONNECT, 0);
+ goto out_fail_cmd;
+ }
+
+
+ itnim = itnim_data->itnim;
+ if (!itnim) {
+ cmnd->result = ScsiResult(DID_IMM_RETRY, 0);
+ goto out_fail_cmd;
+ }
+
+ hal_io = bfa_ioim_alloc(&bfad->bfa, (struct bfad_ioim_s *) cmnd,
+ itnim->bfa_itnim, sg_cnt);
+ if (!hal_io) {
+ printk(KERN_WARNING "hal_io failure\n");
+ spin_unlock_irqrestore(&bfad->bfad_lock, flags);
+ scsi_dma_unmap(cmnd);
+ return SCSI_MLQUEUE_HOST_BUSY;
+ }
+
+ cmnd->host_scribble = (char *)hal_io;
+ bfa_ioim_start(hal_io);
+ spin_unlock_irqrestore(&bfad->bfad_lock, flags);
+
+ return 0;
+
+out_fail_cmd:
+ spin_unlock_irqrestore(&bfad->bfad_lock, flags);
+ scsi_dma_unmap(cmnd);
+ if (done)
+ done(cmnd);
+
+ return 0;
+}
+
+static DEF_SCSI_QCMD(bfad_im_queuecommand)
+
+void
+bfad_rport_online_wait(struct bfad_s *bfad)
+{
+ int i;
+ int rport_delay = 10;
+
+ for (i = 0; !(bfad->bfad_flags & BFAD_PORT_ONLINE)
+ && i < bfa_linkup_delay; i++) {
+ set_current_state(TASK_UNINTERRUPTIBLE);
+ schedule_timeout(HZ);
+ }
+
+ if (bfad->bfad_flags & BFAD_PORT_ONLINE) {
+ rport_delay = rport_delay < bfa_linkup_delay ?
+ rport_delay : bfa_linkup_delay;
+ for (i = 0; !(bfad->bfad_flags & BFAD_RPORT_ONLINE)
+ && i < rport_delay; i++) {
+ set_current_state(TASK_UNINTERRUPTIBLE);
+ schedule_timeout(HZ);
+ }
+
+ if (rport_delay > 0 && (bfad->bfad_flags & BFAD_RPORT_ONLINE)) {
+ set_current_state(TASK_UNINTERRUPTIBLE);
+ schedule_timeout(rport_delay * HZ);
+ }
+ }
+}
+
+int
+bfad_get_linkup_delay(struct bfad_s *bfad)
+{
+ u8 nwwns = 0;
+ wwn_t wwns[BFA_PREBOOT_BOOTLUN_MAX];
+ int linkup_delay;
+
+ /*
+ * Querying for the boot target port wwns
+ * -- read from boot information in flash.
+ * If nwwns > 0 => boot over SAN and set linkup_delay = 30
+ * else => local boot machine set linkup_delay = 0
+ */
+
+ bfa_iocfc_get_bootwwns(&bfad->bfa, &nwwns, wwns);
+
+ if (nwwns > 0)
+ /* If Boot over SAN set linkup_delay = 30sec */
+ linkup_delay = 30;
+ else
+ /* If local boot; no linkup_delay */
+ linkup_delay = 0;
+
+ return linkup_delay;
+}
diff --git a/drivers/scsi/bfa/bfad_im.h b/drivers/scsi/bfa/bfad_im.h
new file mode 100644
index 000000000..f6c1023e5
--- /dev/null
+++ b/drivers/scsi/bfa/bfad_im.h
@@ -0,0 +1,197 @@
+/*
+ * Copyright (c) 2005-2010 Brocade Communications Systems, Inc.
+ * All rights reserved
+ * www.brocade.com
+ *
+ * Linux driver for Brocade Fibre Channel Host Bus Adapter.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License (GPL) Version 2 as
+ * published by the Free Software Foundation
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ */
+
+#ifndef __BFAD_IM_H__
+#define __BFAD_IM_H__
+
+#include "bfa_fcs.h"
+
+#define FCPI_NAME " fcpim"
+
+#ifndef KOBJ_NAME_LEN
+#define KOBJ_NAME_LEN 20
+#endif
+
+bfa_status_t bfad_im_module_init(void);
+void bfad_im_module_exit(void);
+bfa_status_t bfad_im_probe(struct bfad_s *bfad);
+void bfad_im_probe_undo(struct bfad_s *bfad);
+bfa_status_t bfad_im_port_new(struct bfad_s *bfad, struct bfad_port_s *port);
+void bfad_im_port_delete(struct bfad_s *bfad, struct bfad_port_s *port);
+void bfad_im_port_clean(struct bfad_im_port_s *im_port);
+int bfad_im_scsi_host_alloc(struct bfad_s *bfad,
+ struct bfad_im_port_s *im_port, struct device *dev);
+void bfad_im_scsi_host_free(struct bfad_s *bfad,
+ struct bfad_im_port_s *im_port);
+u32 bfad_im_supported_speeds(struct bfa_s *bfa);
+
+#define MAX_FCP_TARGET 1024
+#define MAX_FCP_LUN 16384
+#define BFAD_TARGET_RESET_TMO 60
+#define BFAD_LUN_RESET_TMO 60
+#define ScsiResult(host_code, scsi_code) (((host_code) << 16) | scsi_code)
+#define BFA_QUEUE_FULL_RAMP_UP_TIME 120
+
+/*
+ * itnim flags
+ */
+#define IO_DONE_BIT 0
+
+struct bfad_itnim_data_s {
+ struct bfad_itnim_s *itnim;
+};
+
+struct bfad_im_port_s {
+ struct bfad_s *bfad;
+ struct bfad_port_s *port;
+ struct work_struct port_delete_work;
+ int idr_id;
+ u16 cur_scsi_id;
+ u16 flags;
+ struct list_head binding_list;
+ struct Scsi_Host *shost;
+ struct list_head itnim_mapped_list;
+ struct fc_vport *fc_vport;
+};
+
+enum bfad_itnim_state {
+ ITNIM_STATE_NONE,
+ ITNIM_STATE_ONLINE,
+ ITNIM_STATE_OFFLINE_PENDING,
+ ITNIM_STATE_OFFLINE,
+ ITNIM_STATE_TIMEOUT,
+ ITNIM_STATE_FREE,
+};
+
+/*
+ * Per itnim data structure
+ */
+struct bfad_itnim_s {
+ struct list_head list_entry;
+ struct bfa_fcs_itnim_s fcs_itnim;
+ struct work_struct itnim_work;
+ u32 flags;
+ enum bfad_itnim_state state;
+ struct bfad_im_s *im;
+ struct bfad_im_port_s *im_port;
+ struct bfad_rport_s *drv_rport;
+ struct fc_rport *fc_rport;
+ struct bfa_itnim_s *bfa_itnim;
+ u16 scsi_tgt_id;
+ u16 channel;
+ u16 queue_work;
+ unsigned long last_ramp_up_time;
+ unsigned long last_queue_full_time;
+};
+
+enum bfad_binding_type {
+ FCP_PWWN_BINDING = 0x1,
+ FCP_NWWN_BINDING = 0x2,
+ FCP_FCID_BINDING = 0x3,
+};
+
+struct bfad_fcp_binding {
+ struct list_head list_entry;
+ enum bfad_binding_type binding_type;
+ u16 scsi_target_id;
+ u32 fc_id;
+ wwn_t nwwn;
+ wwn_t pwwn;
+};
+
+struct bfad_im_s {
+ struct bfad_s *bfad;
+ struct workqueue_struct *drv_workq;
+ char drv_workq_name[KOBJ_NAME_LEN];
+ struct work_struct aen_im_notify_work;
+};
+
+#define bfad_get_aen_entry(_drv, _entry) do { \
+ unsigned long _flags; \
+ spin_lock_irqsave(&(_drv)->bfad_aen_spinlock, _flags); \
+ bfa_q_deq(&(_drv)->free_aen_q, &(_entry)); \
+ if (_entry) \
+ list_add_tail(&(_entry)->qe, &(_drv)->active_aen_q); \
+ spin_unlock_irqrestore(&(_drv)->bfad_aen_spinlock, _flags); \
+} while (0)
+
+/* post fc_host vendor event */
+#define bfad_im_post_vendor_event(_entry, _drv, _cnt, _cat, _evt) do { \
+ do_gettimeofday(&(_entry)->aen_tv); \
+ (_entry)->bfad_num = (_drv)->inst_no; \
+ (_entry)->seq_num = (_cnt); \
+ (_entry)->aen_category = (_cat); \
+ (_entry)->aen_type = (_evt); \
+ if ((_drv)->bfad_flags & BFAD_FC4_PROBE_DONE) \
+ queue_work((_drv)->im->drv_workq, \
+ &(_drv)->im->aen_im_notify_work); \
+} while (0)
+
+struct Scsi_Host *bfad_scsi_host_alloc(struct bfad_im_port_s *im_port,
+ struct bfad_s *);
+bfa_status_t bfad_thread_workq(struct bfad_s *bfad);
+void bfad_destroy_workq(struct bfad_im_s *im);
+void bfad_fc_host_init(struct bfad_im_port_s *im_port);
+void bfad_scsi_host_free(struct bfad_s *bfad,
+ struct bfad_im_port_s *im_port);
+void bfad_ramp_up_qdepth(struct bfad_itnim_s *itnim,
+ struct scsi_device *sdev);
+void bfad_handle_qfull(struct bfad_itnim_s *itnim, struct scsi_device *sdev);
+struct bfad_itnim_s *bfad_get_itnim(struct bfad_im_port_s *im_port, int id);
+
+extern struct scsi_host_template bfad_im_scsi_host_template;
+extern struct scsi_host_template bfad_im_vport_template;
+extern struct fc_function_template bfad_im_fc_function_template;
+extern struct fc_function_template bfad_im_vport_fc_function_template;
+extern struct scsi_transport_template *bfad_im_scsi_transport_template;
+extern struct scsi_transport_template *bfad_im_scsi_vport_transport_template;
+
+extern struct device_attribute *bfad_im_host_attrs[];
+extern struct device_attribute *bfad_im_vport_attrs[];
+
+irqreturn_t bfad_intx(int irq, void *dev_id);
+
+int bfad_im_bsg_request(struct fc_bsg_job *job);
+int bfad_im_bsg_timeout(struct fc_bsg_job *job);
+
+/*
+ * Macro to set the SCSI device sdev_bflags - sdev_bflags are used by the
+ * SCSI mid-layer to choose LUN Scanning mode REPORT_LUNS vs. Sequential Scan
+ *
+ * Internally iterate's over all the ITNIM's part of the im_port & set's the
+ * sdev_bflags for the scsi_device associated with LUN #0.
+ */
+#define bfad_reset_sdev_bflags(__im_port, __lunmask_cfg) do { \
+ struct scsi_device *__sdev = NULL; \
+ struct bfad_itnim_s *__itnim = NULL; \
+ u32 scan_flags = BLIST_NOREPORTLUN | BLIST_SPARSELUN; \
+ list_for_each_entry(__itnim, &((__im_port)->itnim_mapped_list), \
+ list_entry) { \
+ __sdev = scsi_device_lookup((__im_port)->shost, \
+ __itnim->channel, \
+ __itnim->scsi_tgt_id, 0); \
+ if (__sdev) { \
+ if ((__lunmask_cfg) == BFA_TRUE) \
+ __sdev->sdev_bflags |= scan_flags; \
+ else \
+ __sdev->sdev_bflags &= ~scan_flags; \
+ scsi_device_put(__sdev); \
+ } \
+ } \
+} while (0)
+
+#endif
diff --git a/drivers/scsi/bfa/bfi.h b/drivers/scsi/bfa/bfi.h
new file mode 100644
index 000000000..9ef91f907
--- /dev/null
+++ b/drivers/scsi/bfa/bfi.h
@@ -0,0 +1,1324 @@
+/*
+ * Copyright (c) 2005-2010 Brocade Communications Systems, Inc.
+ * All rights reserved
+ * www.brocade.com
+ *
+ * Linux driver for Brocade Fibre Channel Host Bus Adapter.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License (GPL) Version 2 as
+ * published by the Free Software Foundation
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ */
+
+#ifndef __BFI_H__
+#define __BFI_H__
+
+#include "bfa_defs.h"
+#include "bfa_defs_svc.h"
+
+#pragma pack(1)
+
+/* Per dma segment max size */
+#define BFI_MEM_DMA_SEG_SZ (131072)
+
+/* Get number of dma segments required */
+#define BFI_MEM_DMA_NSEGS(_num_reqs, _req_sz) \
+ ((u16)(((((_num_reqs) * (_req_sz)) + BFI_MEM_DMA_SEG_SZ - 1) & \
+ ~(BFI_MEM_DMA_SEG_SZ - 1)) / BFI_MEM_DMA_SEG_SZ))
+
+/* Get num dma reqs - that fit in a segment */
+#define BFI_MEM_NREQS_SEG(_rqsz) (BFI_MEM_DMA_SEG_SZ / (_rqsz))
+
+/* Get segment num from tag */
+#define BFI_MEM_SEG_FROM_TAG(_tag, _rqsz) ((_tag) / BFI_MEM_NREQS_SEG(_rqsz))
+
+/* Get dma req offset in a segment */
+#define BFI_MEM_SEG_REQ_OFFSET(_tag, _sz) \
+ ((_tag) - (BFI_MEM_SEG_FROM_TAG(_tag, _sz) * BFI_MEM_NREQS_SEG(_sz)))
+
+/*
+ * BFI FW image type
+ */
+#define BFI_FLASH_CHUNK_SZ 256 /* Flash chunk size */
+#define BFI_FLASH_CHUNK_SZ_WORDS (BFI_FLASH_CHUNK_SZ/sizeof(u32))
+#define BFI_FLASH_IMAGE_SZ 0x100000
+
+/*
+ * Msg header common to all msgs
+ */
+struct bfi_mhdr_s {
+ u8 msg_class; /* @ref bfi_mclass_t */
+ u8 msg_id; /* msg opcode with in the class */
+ union {
+ struct {
+ u8 qid;
+ u8 fn_lpu; /* msg destination */
+ } h2i;
+ u16 i2htok; /* token in msgs to host */
+ } mtag;
+};
+
+#define bfi_fn_lpu(__fn, __lpu) ((__fn) << 1 | (__lpu))
+#define bfi_mhdr_2_fn(_mh) ((_mh)->mtag.h2i.fn_lpu >> 1)
+
+#define bfi_h2i_set(_mh, _mc, _op, _fn_lpu) do { \
+ (_mh).msg_class = (_mc); \
+ (_mh).msg_id = (_op); \
+ (_mh).mtag.h2i.fn_lpu = (_fn_lpu); \
+} while (0)
+
+#define bfi_i2h_set(_mh, _mc, _op, _i2htok) do { \
+ (_mh).msg_class = (_mc); \
+ (_mh).msg_id = (_op); \
+ (_mh).mtag.i2htok = (_i2htok); \
+} while (0)
+
+/*
+ * Message opcodes: 0-127 to firmware, 128-255 to host
+ */
+#define BFI_I2H_OPCODE_BASE 128
+#define BFA_I2HM(_x) ((_x) + BFI_I2H_OPCODE_BASE)
+
+/*
+ ****************************************************************************
+ *
+ * Scatter Gather Element and Page definition
+ *
+ ****************************************************************************
+ */
+
+#define BFI_SGE_INLINE 1
+#define BFI_SGE_INLINE_MAX (BFI_SGE_INLINE + 1)
+
+/*
+ * SG Flags
+ */
+enum {
+ BFI_SGE_DATA = 0, /* data address, not last */
+ BFI_SGE_DATA_CPL = 1, /* data addr, last in current page */
+ BFI_SGE_DATA_LAST = 3, /* data address, last */
+ BFI_SGE_LINK = 2, /* link address */
+ BFI_SGE_PGDLEN = 2, /* cumulative data length for page */
+};
+
+/*
+ * DMA addresses
+ */
+union bfi_addr_u {
+ struct {
+ __be32 addr_lo;
+ __be32 addr_hi;
+ } a32;
+};
+
+/*
+ * Scatter Gather Element used for fast-path IO requests
+ */
+struct bfi_sge_s {
+#ifdef __BIG_ENDIAN
+ u32 flags:2,
+ rsvd:2,
+ sg_len:28;
+#else
+ u32 sg_len:28,
+ rsvd:2,
+ flags:2;
+#endif
+ union bfi_addr_u sga;
+};
+
+/**
+ * Generic DMA addr-len pair.
+ */
+struct bfi_alen_s {
+ union bfi_addr_u al_addr; /* DMA addr of buffer */
+ u32 al_len; /* length of buffer */
+};
+
+/*
+ * Scatter Gather Page
+ */
+#define BFI_SGPG_DATA_SGES 7
+#define BFI_SGPG_SGES_MAX (BFI_SGPG_DATA_SGES + 1)
+#define BFI_SGPG_RSVD_WD_LEN 8
+struct bfi_sgpg_s {
+ struct bfi_sge_s sges[BFI_SGPG_SGES_MAX];
+ u32 rsvd[BFI_SGPG_RSVD_WD_LEN];
+};
+
+/* FCP module definitions */
+#define BFI_IO_MAX (2000)
+#define BFI_IOIM_SNSLEN (256)
+#define BFI_IOIM_SNSBUF_SEGS \
+ BFI_MEM_DMA_NSEGS(BFI_IO_MAX, BFI_IOIM_SNSLEN)
+
+/*
+ * Large Message structure - 128 Bytes size Msgs
+ */
+#define BFI_LMSG_SZ 128
+#define BFI_LMSG_PL_WSZ \
+ ((BFI_LMSG_SZ - sizeof(struct bfi_mhdr_s)) / 4)
+
+struct bfi_msg_s {
+ struct bfi_mhdr_s mhdr;
+ u32 pl[BFI_LMSG_PL_WSZ];
+};
+
+/*
+ * Mailbox message structure
+ */
+#define BFI_MBMSG_SZ 7
+struct bfi_mbmsg_s {
+ struct bfi_mhdr_s mh;
+ u32 pl[BFI_MBMSG_SZ];
+};
+
+/*
+ * Supported PCI function class codes (personality)
+ */
+enum bfi_pcifn_class {
+ BFI_PCIFN_CLASS_FC = 0x0c04,
+ BFI_PCIFN_CLASS_ETH = 0x0200,
+};
+
+/*
+ * Message Classes
+ */
+enum bfi_mclass {
+ BFI_MC_IOC = 1, /* IO Controller (IOC) */
+ BFI_MC_DIAG = 2, /* Diagnostic Msgs */
+ BFI_MC_FLASH = 3, /* Flash message class */
+ BFI_MC_CEE = 4, /* CEE */
+ BFI_MC_FCPORT = 5, /* FC port */
+ BFI_MC_IOCFC = 6, /* FC - IO Controller (IOC) */
+ BFI_MC_ABLK = 7, /* ASIC block configuration */
+ BFI_MC_UF = 8, /* Unsolicited frame receive */
+ BFI_MC_FCXP = 9, /* FC Transport */
+ BFI_MC_LPS = 10, /* lport fc login services */
+ BFI_MC_RPORT = 11, /* Remote port */
+ BFI_MC_ITN = 12, /* I-T nexus (Initiator mode) */
+ BFI_MC_IOIM_READ = 13, /* read IO (Initiator mode) */
+ BFI_MC_IOIM_WRITE = 14, /* write IO (Initiator mode) */
+ BFI_MC_IOIM_IO = 15, /* IO (Initiator mode) */
+ BFI_MC_IOIM = 16, /* IO (Initiator mode) */
+ BFI_MC_IOIM_IOCOM = 17, /* good IO completion */
+ BFI_MC_TSKIM = 18, /* Initiator Task management */
+ BFI_MC_PORT = 21, /* Physical port */
+ BFI_MC_SFP = 22, /* SFP module */
+ BFI_MC_PHY = 25, /* External PHY message class */
+ BFI_MC_FRU = 34,
+ BFI_MC_MAX = 35
+};
+
+#define BFI_IOC_MAX_CQS 4
+#define BFI_IOC_MAX_CQS_ASIC 8
+#define BFI_IOC_MSGLEN_MAX 32 /* 32 bytes */
+
+/*
+ *----------------------------------------------------------------------
+ * IOC
+ *----------------------------------------------------------------------
+ */
+
+/*
+ * Different asic generations
+ */
+enum bfi_asic_gen {
+ BFI_ASIC_GEN_CB = 1, /* crossbow 8G FC */
+ BFI_ASIC_GEN_CT = 2, /* catapult 8G FC or 10G CNA */
+ BFI_ASIC_GEN_CT2 = 3, /* catapult-2 16G FC or 10G CNA */
+};
+
+enum bfi_asic_mode {
+ BFI_ASIC_MODE_FC = 1, /* FC upto 8G speed */
+ BFI_ASIC_MODE_FC16 = 2, /* FC upto 16G speed */
+ BFI_ASIC_MODE_ETH = 3, /* Ethernet ports */
+ BFI_ASIC_MODE_COMBO = 4, /* FC 16G and Ethernet 10G port */
+};
+
+enum bfi_ioc_h2i_msgs {
+ BFI_IOC_H2I_ENABLE_REQ = 1,
+ BFI_IOC_H2I_DISABLE_REQ = 2,
+ BFI_IOC_H2I_GETATTR_REQ = 3,
+ BFI_IOC_H2I_DBG_SYNC = 4,
+ BFI_IOC_H2I_DBG_DUMP = 5,
+};
+
+enum bfi_ioc_i2h_msgs {
+ BFI_IOC_I2H_ENABLE_REPLY = BFA_I2HM(1),
+ BFI_IOC_I2H_DISABLE_REPLY = BFA_I2HM(2),
+ BFI_IOC_I2H_GETATTR_REPLY = BFA_I2HM(3),
+ BFI_IOC_I2H_HBEAT = BFA_I2HM(4),
+ BFI_IOC_I2H_ACQ_ADDR_REPLY = BFA_I2HM(5),
+};
+
+/*
+ * BFI_IOC_H2I_GETATTR_REQ message
+ */
+struct bfi_ioc_getattr_req_s {
+ struct bfi_mhdr_s mh;
+ union bfi_addr_u attr_addr;
+};
+
+#define BFI_IOC_ATTR_UUID_SZ 16
+struct bfi_ioc_attr_s {
+ wwn_t mfg_pwwn; /* Mfg port wwn */
+ wwn_t mfg_nwwn; /* Mfg node wwn */
+ mac_t mfg_mac; /* Mfg mac */
+ u8 port_mode; /* bfi_port_mode */
+ u8 rsvd_a;
+ wwn_t pwwn;
+ wwn_t nwwn;
+ mac_t mac; /* PBC or Mfg mac */
+ u16 rsvd_b;
+ mac_t fcoe_mac;
+ u16 rsvd_c;
+ char brcd_serialnum[STRSZ(BFA_MFG_SERIALNUM_SIZE)];
+ u8 pcie_gen;
+ u8 pcie_lanes_orig;
+ u8 pcie_lanes;
+ u8 rx_bbcredit; /* receive buffer credits */
+ u32 adapter_prop; /* adapter properties */
+ u16 maxfrsize; /* max receive frame size */
+ char asic_rev;
+ u8 rsvd_d;
+ char fw_version[BFA_VERSION_LEN];
+ char optrom_version[BFA_VERSION_LEN];
+ struct bfa_mfg_vpd_s vpd;
+ u32 card_type; /* card type */
+ u8 mfg_day; /* manufacturing day */
+ u8 mfg_month; /* manufacturing month */
+ u16 mfg_year; /* manufacturing year */
+ u8 uuid[BFI_IOC_ATTR_UUID_SZ]; /*!< chinook uuid */
+};
+
+/*
+ * BFI_IOC_I2H_GETATTR_REPLY message
+ */
+struct bfi_ioc_getattr_reply_s {
+ struct bfi_mhdr_s mh; /* Common msg header */
+ u8 status; /* cfg reply status */
+ u8 rsvd[3];
+};
+
+/*
+ * Firmware memory page offsets
+ */
+#define BFI_IOC_SMEM_PG0_CB (0x40)
+#define BFI_IOC_SMEM_PG0_CT (0x180)
+
+/*
+ * Firmware statistic offset
+ */
+#define BFI_IOC_FWSTATS_OFF (0x6B40)
+#define BFI_IOC_FWSTATS_SZ (4096)
+
+/*
+ * Firmware trace offset
+ */
+#define BFI_IOC_TRC_OFF (0x4b00)
+#define BFI_IOC_TRC_ENTS 256
+
+#define BFI_IOC_FW_SIGNATURE (0xbfadbfad)
+#define BFA_IOC_FW_INV_SIGN (0xdeaddead)
+#define BFI_IOC_MD5SUM_SZ 4
+
+struct bfi_ioc_fwver_s {
+#ifdef __BIG_ENDIAN
+ uint8_t patch;
+ uint8_t maint;
+ uint8_t minor;
+ uint8_t major;
+ uint8_t rsvd[2];
+ uint8_t build;
+ uint8_t phase;
+#else
+ uint8_t major;
+ uint8_t minor;
+ uint8_t maint;
+ uint8_t patch;
+ uint8_t phase;
+ uint8_t build;
+ uint8_t rsvd[2];
+#endif
+};
+
+struct bfi_ioc_image_hdr_s {
+ u32 signature; /* constant signature */
+ u8 asic_gen; /* asic generation */
+ u8 asic_mode;
+ u8 port0_mode; /* device mode for port 0 */
+ u8 port1_mode; /* device mode for port 1 */
+ u32 exec; /* exec vector */
+ u32 bootenv; /* fimware boot env */
+ u32 rsvd_b[2];
+ struct bfi_ioc_fwver_s fwver;
+ u32 md5sum[BFI_IOC_MD5SUM_SZ];
+};
+
+enum bfi_ioc_img_ver_cmp_e {
+ BFI_IOC_IMG_VER_INCOMP,
+ BFI_IOC_IMG_VER_OLD,
+ BFI_IOC_IMG_VER_SAME,
+ BFI_IOC_IMG_VER_BETTER
+};
+
+#define BFI_FWBOOT_DEVMODE_OFF 4
+#define BFI_FWBOOT_TYPE_OFF 8
+#define BFI_FWBOOT_ENV_OFF 12
+#define BFI_FWBOOT_DEVMODE(__asic_gen, __asic_mode, __p0_mode, __p1_mode) \
+ (((u32)(__asic_gen)) << 24 | \
+ ((u32)(__asic_mode)) << 16 | \
+ ((u32)(__p0_mode)) << 8 | \
+ ((u32)(__p1_mode)))
+
+enum bfi_fwboot_type {
+ BFI_FWBOOT_TYPE_NORMAL = 0,
+ BFI_FWBOOT_TYPE_FLASH = 1,
+ BFI_FWBOOT_TYPE_MEMTEST = 2,
+};
+
+#define BFI_FWBOOT_TYPE_NORMAL 0
+#define BFI_FWBOOT_TYPE_MEMTEST 2
+#define BFI_FWBOOT_ENV_OS 0
+
+enum bfi_port_mode {
+ BFI_PORT_MODE_FC = 1,
+ BFI_PORT_MODE_ETH = 2,
+};
+
+struct bfi_ioc_hbeat_s {
+ struct bfi_mhdr_s mh; /* common msg header */
+ u32 hb_count; /* current heart beat count */
+};
+
+/*
+ * IOC hardware/firmware state
+ */
+enum bfi_ioc_state {
+ BFI_IOC_UNINIT = 0, /* not initialized */
+ BFI_IOC_INITING = 1, /* h/w is being initialized */
+ BFI_IOC_HWINIT = 2, /* h/w is initialized */
+ BFI_IOC_CFG = 3, /* IOC configuration in progress */
+ BFI_IOC_OP = 4, /* IOC is operational */
+ BFI_IOC_DISABLING = 5, /* IOC is being disabled */
+ BFI_IOC_DISABLED = 6, /* IOC is disabled */
+ BFI_IOC_CFG_DISABLED = 7, /* IOC is being disabled;transient */
+ BFI_IOC_FAIL = 8, /* IOC heart-beat failure */
+ BFI_IOC_MEMTEST = 9, /* IOC is doing memtest */
+};
+
+#define BFA_IOC_CB_JOIN_SH 16
+#define BFA_IOC_CB_FWSTATE_MASK 0x0000ffff
+#define BFA_IOC_CB_JOIN_MASK 0xffff0000
+
+#define BFI_IOC_ENDIAN_SIG 0x12345678
+
+enum {
+ BFI_ADAPTER_TYPE_FC = 0x01, /* FC adapters */
+ BFI_ADAPTER_TYPE_MK = 0x0f0000, /* adapter type mask */
+ BFI_ADAPTER_TYPE_SH = 16, /* adapter type shift */
+ BFI_ADAPTER_NPORTS_MK = 0xff00, /* number of ports mask */
+ BFI_ADAPTER_NPORTS_SH = 8, /* number of ports shift */
+ BFI_ADAPTER_SPEED_MK = 0xff, /* adapter speed mask */
+ BFI_ADAPTER_SPEED_SH = 0, /* adapter speed shift */
+ BFI_ADAPTER_PROTO = 0x100000, /* prototype adapaters */
+ BFI_ADAPTER_TTV = 0x200000, /* TTV debug capable */
+ BFI_ADAPTER_UNSUPP = 0x400000, /* unknown adapter type */
+};
+
+#define BFI_ADAPTER_GETP(__prop, __adap_prop) \
+ (((__adap_prop) & BFI_ADAPTER_ ## __prop ## _MK) >> \
+ BFI_ADAPTER_ ## __prop ## _SH)
+#define BFI_ADAPTER_SETP(__prop, __val) \
+ ((__val) << BFI_ADAPTER_ ## __prop ## _SH)
+#define BFI_ADAPTER_IS_PROTO(__adap_type) \
+ ((__adap_type) & BFI_ADAPTER_PROTO)
+#define BFI_ADAPTER_IS_TTV(__adap_type) \
+ ((__adap_type) & BFI_ADAPTER_TTV)
+#define BFI_ADAPTER_IS_UNSUPP(__adap_type) \
+ ((__adap_type) & BFI_ADAPTER_UNSUPP)
+#define BFI_ADAPTER_IS_SPECIAL(__adap_type) \
+ ((__adap_type) & (BFI_ADAPTER_TTV | BFI_ADAPTER_PROTO | \
+ BFI_ADAPTER_UNSUPP))
+
+/*
+ * BFI_IOC_H2I_ENABLE_REQ & BFI_IOC_H2I_DISABLE_REQ messages
+ */
+struct bfi_ioc_ctrl_req_s {
+ struct bfi_mhdr_s mh;
+ u16 clscode;
+ u16 rsvd;
+ u32 tv_sec;
+};
+#define bfi_ioc_enable_req_t struct bfi_ioc_ctrl_req_s;
+#define bfi_ioc_disable_req_t struct bfi_ioc_ctrl_req_s;
+
+/*
+ * BFI_IOC_I2H_ENABLE_REPLY & BFI_IOC_I2H_DISABLE_REPLY messages
+ */
+struct bfi_ioc_ctrl_reply_s {
+ struct bfi_mhdr_s mh; /* Common msg header */
+ u8 status; /* enable/disable status */
+ u8 port_mode; /* bfa_mode_s */
+ u8 cap_bm; /* capability bit mask */
+ u8 rsvd;
+};
+#define bfi_ioc_enable_reply_t struct bfi_ioc_ctrl_reply_s;
+#define bfi_ioc_disable_reply_t struct bfi_ioc_ctrl_reply_s;
+
+#define BFI_IOC_MSGSZ 8
+/*
+ * H2I Messages
+ */
+union bfi_ioc_h2i_msg_u {
+ struct bfi_mhdr_s mh;
+ struct bfi_ioc_ctrl_req_s enable_req;
+ struct bfi_ioc_ctrl_req_s disable_req;
+ struct bfi_ioc_getattr_req_s getattr_req;
+ u32 mboxmsg[BFI_IOC_MSGSZ];
+};
+
+/*
+ * I2H Messages
+ */
+union bfi_ioc_i2h_msg_u {
+ struct bfi_mhdr_s mh;
+ struct bfi_ioc_ctrl_reply_s fw_event;
+ u32 mboxmsg[BFI_IOC_MSGSZ];
+};
+
+
+/*
+ *----------------------------------------------------------------------
+ * PBC
+ *----------------------------------------------------------------------
+ */
+
+#define BFI_PBC_MAX_BLUNS 8
+#define BFI_PBC_MAX_VPORTS 16
+#define BFI_PBC_PORT_DISABLED 2
+
+/*
+ * PBC boot lun configuration
+ */
+struct bfi_pbc_blun_s {
+ wwn_t tgt_pwwn;
+ struct scsi_lun tgt_lun;
+};
+
+/*
+ * PBC virtual port configuration
+ */
+struct bfi_pbc_vport_s {
+ wwn_t vp_pwwn;
+ wwn_t vp_nwwn;
+};
+
+/*
+ * BFI pre-boot configuration information
+ */
+struct bfi_pbc_s {
+ u8 port_enabled;
+ u8 boot_enabled;
+ u8 nbluns;
+ u8 nvports;
+ u8 port_speed;
+ u8 rsvd_a;
+ u16 hss;
+ wwn_t pbc_pwwn;
+ wwn_t pbc_nwwn;
+ struct bfi_pbc_blun_s blun[BFI_PBC_MAX_BLUNS];
+ struct bfi_pbc_vport_s vport[BFI_PBC_MAX_VPORTS];
+};
+
+/*
+ *----------------------------------------------------------------------
+ * MSGQ
+ *----------------------------------------------------------------------
+ */
+#define BFI_MSGQ_FULL(_q) (((_q->pi + 1) % _q->q_depth) == _q->ci)
+#define BFI_MSGQ_EMPTY(_q) (_q->pi == _q->ci)
+#define BFI_MSGQ_UPDATE_CI(_q) (_q->ci = (_q->ci + 1) % _q->q_depth)
+#define BFI_MSGQ_UPDATE_PI(_q) (_q->pi = (_q->pi + 1) % _q->q_depth)
+
+/* q_depth must be power of 2 */
+#define BFI_MSGQ_FREE_CNT(_q) ((_q->ci - _q->pi - 1) & (_q->q_depth - 1))
+
+enum bfi_msgq_h2i_msgs_e {
+ BFI_MSGQ_H2I_INIT_REQ = 1,
+ BFI_MSGQ_H2I_DOORBELL = 2,
+ BFI_MSGQ_H2I_SHUTDOWN = 3,
+};
+
+enum bfi_msgq_i2h_msgs_e {
+ BFI_MSGQ_I2H_INIT_RSP = 1,
+ BFI_MSGQ_I2H_DOORBELL = 2,
+};
+
+
+/* Messages(commands/responsed/AENS will have the following header */
+struct bfi_msgq_mhdr_s {
+ u8 msg_class;
+ u8 msg_id;
+ u16 msg_token;
+ u16 num_entries;
+ u8 enet_id;
+ u8 rsvd[1];
+};
+
+#define bfi_msgq_mhdr_set(_mh, _mc, _mid, _tok, _enet_id) do { \
+ (_mh).msg_class = (_mc); \
+ (_mh).msg_id = (_mid); \
+ (_mh).msg_token = (_tok); \
+ (_mh).enet_id = (_enet_id); \
+} while (0)
+
+/*
+ * Mailbox for messaging interface
+ *
+*/
+#define BFI_MSGQ_CMD_ENTRY_SIZE (64) /* TBD */
+#define BFI_MSGQ_RSP_ENTRY_SIZE (64) /* TBD */
+#define BFI_MSGQ_MSG_SIZE_MAX (2048) /* TBD */
+
+struct bfi_msgq_s {
+ union bfi_addr_u addr;
+ u16 q_depth; /* Total num of entries in the queue */
+ u8 rsvd[2];
+};
+
+/* BFI_ENET_MSGQ_CFG_REQ TBD init or cfg? */
+struct bfi_msgq_cfg_req_s {
+ struct bfi_mhdr_s mh;
+ struct bfi_msgq_s cmdq;
+ struct bfi_msgq_s rspq;
+};
+
+/* BFI_ENET_MSGQ_CFG_RSP */
+struct bfi_msgq_cfg_rsp_s {
+ struct bfi_mhdr_s mh;
+ u8 cmd_status;
+ u8 rsvd[3];
+};
+
+
+/* BFI_MSGQ_H2I_DOORBELL */
+struct bfi_msgq_h2i_db_s {
+ struct bfi_mhdr_s mh;
+ u16 cmdq_pi;
+ u16 rspq_ci;
+};
+
+/* BFI_MSGQ_I2H_DOORBELL */
+struct bfi_msgq_i2h_db_s {
+ struct bfi_mhdr_s mh;
+ u16 rspq_pi;
+ u16 cmdq_ci;
+};
+
+#pragma pack()
+
+/* BFI port specific */
+#pragma pack(1)
+
+enum bfi_port_h2i {
+ BFI_PORT_H2I_ENABLE_REQ = (1),
+ BFI_PORT_H2I_DISABLE_REQ = (2),
+ BFI_PORT_H2I_GET_STATS_REQ = (3),
+ BFI_PORT_H2I_CLEAR_STATS_REQ = (4),
+};
+
+enum bfi_port_i2h {
+ BFI_PORT_I2H_ENABLE_RSP = BFA_I2HM(1),
+ BFI_PORT_I2H_DISABLE_RSP = BFA_I2HM(2),
+ BFI_PORT_I2H_GET_STATS_RSP = BFA_I2HM(3),
+ BFI_PORT_I2H_CLEAR_STATS_RSP = BFA_I2HM(4),
+};
+
+/*
+ * Generic REQ type
+ */
+struct bfi_port_generic_req_s {
+ struct bfi_mhdr_s mh; /* msg header */
+ u32 msgtag; /* msgtag for reply */
+ u32 rsvd;
+};
+
+/*
+ * Generic RSP type
+ */
+struct bfi_port_generic_rsp_s {
+ struct bfi_mhdr_s mh; /* common msg header */
+ u8 status; /* port enable status */
+ u8 rsvd[3];
+ u32 msgtag; /* msgtag for reply */
+};
+
+/*
+ * BFI_PORT_H2I_GET_STATS_REQ
+ */
+struct bfi_port_get_stats_req_s {
+ struct bfi_mhdr_s mh; /* common msg header */
+ union bfi_addr_u dma_addr;
+};
+
+union bfi_port_h2i_msg_u {
+ struct bfi_mhdr_s mh;
+ struct bfi_port_generic_req_s enable_req;
+ struct bfi_port_generic_req_s disable_req;
+ struct bfi_port_get_stats_req_s getstats_req;
+ struct bfi_port_generic_req_s clearstats_req;
+};
+
+union bfi_port_i2h_msg_u {
+ struct bfi_mhdr_s mh;
+ struct bfi_port_generic_rsp_s enable_rsp;
+ struct bfi_port_generic_rsp_s disable_rsp;
+ struct bfi_port_generic_rsp_s getstats_rsp;
+ struct bfi_port_generic_rsp_s clearstats_rsp;
+};
+
+/*
+ *----------------------------------------------------------------------
+ * ABLK
+ *----------------------------------------------------------------------
+ */
+enum bfi_ablk_h2i_msgs_e {
+ BFI_ABLK_H2I_QUERY = 1,
+ BFI_ABLK_H2I_ADPT_CONFIG = 2,
+ BFI_ABLK_H2I_PORT_CONFIG = 3,
+ BFI_ABLK_H2I_PF_CREATE = 4,
+ BFI_ABLK_H2I_PF_DELETE = 5,
+ BFI_ABLK_H2I_PF_UPDATE = 6,
+ BFI_ABLK_H2I_OPTROM_ENABLE = 7,
+ BFI_ABLK_H2I_OPTROM_DISABLE = 8,
+};
+
+enum bfi_ablk_i2h_msgs_e {
+ BFI_ABLK_I2H_QUERY = BFA_I2HM(BFI_ABLK_H2I_QUERY),
+ BFI_ABLK_I2H_ADPT_CONFIG = BFA_I2HM(BFI_ABLK_H2I_ADPT_CONFIG),
+ BFI_ABLK_I2H_PORT_CONFIG = BFA_I2HM(BFI_ABLK_H2I_PORT_CONFIG),
+ BFI_ABLK_I2H_PF_CREATE = BFA_I2HM(BFI_ABLK_H2I_PF_CREATE),
+ BFI_ABLK_I2H_PF_DELETE = BFA_I2HM(BFI_ABLK_H2I_PF_DELETE),
+ BFI_ABLK_I2H_PF_UPDATE = BFA_I2HM(BFI_ABLK_H2I_PF_UPDATE),
+ BFI_ABLK_I2H_OPTROM_ENABLE = BFA_I2HM(BFI_ABLK_H2I_OPTROM_ENABLE),
+ BFI_ABLK_I2H_OPTROM_DISABLE = BFA_I2HM(BFI_ABLK_H2I_OPTROM_DISABLE),
+};
+
+/* BFI_ABLK_H2I_QUERY */
+struct bfi_ablk_h2i_query_s {
+ struct bfi_mhdr_s mh;
+ union bfi_addr_u addr;
+};
+
+/* BFI_ABL_H2I_ADPT_CONFIG, BFI_ABLK_H2I_PORT_CONFIG */
+struct bfi_ablk_h2i_cfg_req_s {
+ struct bfi_mhdr_s mh;
+ u8 mode;
+ u8 port;
+ u8 max_pf;
+ u8 max_vf;
+};
+
+/*
+ * BFI_ABLK_H2I_PF_CREATE, BFI_ABLK_H2I_PF_DELETE,
+ */
+struct bfi_ablk_h2i_pf_req_s {
+ struct bfi_mhdr_s mh;
+ u8 pcifn;
+ u8 port;
+ u16 pers;
+ u16 bw_min; /* percent BW @ max speed */
+ u16 bw_max; /* percent BW @ max speed */
+};
+
+/* BFI_ABLK_H2I_OPTROM_ENABLE, BFI_ABLK_H2I_OPTROM_DISABLE */
+struct bfi_ablk_h2i_optrom_s {
+ struct bfi_mhdr_s mh;
+};
+
+/*
+ * BFI_ABLK_I2H_QUERY
+ * BFI_ABLK_I2H_PORT_CONFIG
+ * BFI_ABLK_I2H_PF_CREATE
+ * BFI_ABLK_I2H_PF_DELETE
+ * BFI_ABLK_I2H_PF_UPDATE
+ * BFI_ABLK_I2H_OPTROM_ENABLE
+ * BFI_ABLK_I2H_OPTROM_DISABLE
+ */
+struct bfi_ablk_i2h_rsp_s {
+ struct bfi_mhdr_s mh;
+ u8 status;
+ u8 pcifn;
+ u8 port_mode;
+};
+
+
+/*
+ * CEE module specific messages
+ */
+
+/* Mailbox commands from host to firmware */
+enum bfi_cee_h2i_msgs_e {
+ BFI_CEE_H2I_GET_CFG_REQ = 1,
+ BFI_CEE_H2I_RESET_STATS = 2,
+ BFI_CEE_H2I_GET_STATS_REQ = 3,
+};
+
+enum bfi_cee_i2h_msgs_e {
+ BFI_CEE_I2H_GET_CFG_RSP = BFA_I2HM(1),
+ BFI_CEE_I2H_RESET_STATS_RSP = BFA_I2HM(2),
+ BFI_CEE_I2H_GET_STATS_RSP = BFA_I2HM(3),
+};
+
+/*
+ * H2I command structure for resetting the stats
+ */
+struct bfi_cee_reset_stats_s {
+ struct bfi_mhdr_s mh;
+};
+
+/*
+ * Get configuration command from host
+ */
+struct bfi_cee_get_req_s {
+ struct bfi_mhdr_s mh;
+ union bfi_addr_u dma_addr;
+};
+
+/*
+ * Reply message from firmware
+ */
+struct bfi_cee_get_rsp_s {
+ struct bfi_mhdr_s mh;
+ u8 cmd_status;
+ u8 rsvd[3];
+};
+
+/*
+ * Reply message from firmware
+ */
+struct bfi_cee_stats_rsp_s {
+ struct bfi_mhdr_s mh;
+ u8 cmd_status;
+ u8 rsvd[3];
+};
+
+/* Mailbox message structures from firmware to host */
+union bfi_cee_i2h_msg_u {
+ struct bfi_mhdr_s mh;
+ struct bfi_cee_get_rsp_s get_rsp;
+ struct bfi_cee_stats_rsp_s stats_rsp;
+};
+
+/*
+ * SFP related
+ */
+
+enum bfi_sfp_h2i_e {
+ BFI_SFP_H2I_SHOW = 1,
+ BFI_SFP_H2I_SCN = 2,
+};
+
+enum bfi_sfp_i2h_e {
+ BFI_SFP_I2H_SHOW = BFA_I2HM(BFI_SFP_H2I_SHOW),
+ BFI_SFP_I2H_SCN = BFA_I2HM(BFI_SFP_H2I_SCN),
+};
+
+/*
+ * SFP state change notification
+ */
+struct bfi_sfp_scn_s {
+ struct bfi_mhdr_s mhr; /* host msg header */
+ u8 event;
+ u8 sfpid;
+ u8 pomlvl; /* pom level: normal/warning/alarm */
+ u8 is_elb; /* e-loopback */
+};
+
+/*
+ * SFP state
+ */
+enum bfa_sfp_stat_e {
+ BFA_SFP_STATE_INIT = 0, /* SFP state is uninit */
+ BFA_SFP_STATE_REMOVED = 1, /* SFP is removed */
+ BFA_SFP_STATE_INSERTED = 2, /* SFP is inserted */
+ BFA_SFP_STATE_VALID = 3, /* SFP is valid */
+ BFA_SFP_STATE_UNSUPPORT = 4, /* SFP is unsupport */
+ BFA_SFP_STATE_FAILED = 5, /* SFP i2c read fail */
+};
+
+/*
+ * SFP memory access type
+ */
+enum bfi_sfp_mem_e {
+ BFI_SFP_MEM_ALL = 0x1, /* access all data field */
+ BFI_SFP_MEM_DIAGEXT = 0x2, /* access diag ext data field only */
+};
+
+struct bfi_sfp_req_s {
+ struct bfi_mhdr_s mh;
+ u8 memtype;
+ u8 rsvd[3];
+ struct bfi_alen_s alen;
+};
+
+struct bfi_sfp_rsp_s {
+ struct bfi_mhdr_s mh;
+ u8 status;
+ u8 state;
+ u8 rsvd[2];
+};
+
+/*
+ * FLASH module specific
+ */
+enum bfi_flash_h2i_msgs {
+ BFI_FLASH_H2I_QUERY_REQ = 1,
+ BFI_FLASH_H2I_ERASE_REQ = 2,
+ BFI_FLASH_H2I_WRITE_REQ = 3,
+ BFI_FLASH_H2I_READ_REQ = 4,
+ BFI_FLASH_H2I_BOOT_VER_REQ = 5,
+};
+
+enum bfi_flash_i2h_msgs {
+ BFI_FLASH_I2H_QUERY_RSP = BFA_I2HM(1),
+ BFI_FLASH_I2H_ERASE_RSP = BFA_I2HM(2),
+ BFI_FLASH_I2H_WRITE_RSP = BFA_I2HM(3),
+ BFI_FLASH_I2H_READ_RSP = BFA_I2HM(4),
+ BFI_FLASH_I2H_BOOT_VER_RSP = BFA_I2HM(5),
+ BFI_FLASH_I2H_EVENT = BFA_I2HM(127),
+};
+
+/*
+ * Flash query request
+ */
+struct bfi_flash_query_req_s {
+ struct bfi_mhdr_s mh; /* Common msg header */
+ struct bfi_alen_s alen;
+};
+
+/*
+ * Flash erase request
+ */
+struct bfi_flash_erase_req_s {
+ struct bfi_mhdr_s mh; /* Common msg header */
+ u32 type; /* partition type */
+ u8 instance; /* partition instance */
+ u8 rsv[3];
+};
+
+/*
+ * Flash write request
+ */
+struct bfi_flash_write_req_s {
+ struct bfi_mhdr_s mh; /* Common msg header */
+ struct bfi_alen_s alen;
+ u32 type; /* partition type */
+ u8 instance; /* partition instance */
+ u8 last;
+ u8 rsv[2];
+ u32 offset;
+ u32 length;
+};
+
+/*
+ * Flash read request
+ */
+struct bfi_flash_read_req_s {
+ struct bfi_mhdr_s mh; /* Common msg header */
+ u32 type; /* partition type */
+ u8 instance; /* partition instance */
+ u8 rsv[3];
+ u32 offset;
+ u32 length;
+ struct bfi_alen_s alen;
+};
+
+/*
+ * Flash query response
+ */
+struct bfi_flash_query_rsp_s {
+ struct bfi_mhdr_s mh; /* Common msg header */
+ u32 status;
+};
+
+/*
+ * Flash read response
+ */
+struct bfi_flash_read_rsp_s {
+ struct bfi_mhdr_s mh; /* Common msg header */
+ u32 type; /* partition type */
+ u8 instance; /* partition instance */
+ u8 rsv[3];
+ u32 status;
+ u32 length;
+};
+
+/*
+ * Flash write response
+ */
+struct bfi_flash_write_rsp_s {
+ struct bfi_mhdr_s mh; /* Common msg header */
+ u32 type; /* partition type */
+ u8 instance; /* partition instance */
+ u8 rsv[3];
+ u32 status;
+ u32 length;
+};
+
+/*
+ * Flash erase response
+ */
+struct bfi_flash_erase_rsp_s {
+ struct bfi_mhdr_s mh; /* Common msg header */
+ u32 type; /* partition type */
+ u8 instance; /* partition instance */
+ u8 rsv[3];
+ u32 status;
+};
+
+/*
+ * Flash event notification
+ */
+struct bfi_flash_event_s {
+ struct bfi_mhdr_s mh; /* Common msg header */
+ bfa_status_t status;
+ u32 param;
+};
+
+/*
+ *----------------------------------------------------------------------
+ * DIAG
+ *----------------------------------------------------------------------
+ */
+enum bfi_diag_h2i {
+ BFI_DIAG_H2I_PORTBEACON = 1,
+ BFI_DIAG_H2I_LOOPBACK = 2,
+ BFI_DIAG_H2I_FWPING = 3,
+ BFI_DIAG_H2I_TEMPSENSOR = 4,
+ BFI_DIAG_H2I_LEDTEST = 5,
+ BFI_DIAG_H2I_QTEST = 6,
+ BFI_DIAG_H2I_DPORT = 7,
+};
+
+enum bfi_diag_i2h {
+ BFI_DIAG_I2H_PORTBEACON = BFA_I2HM(BFI_DIAG_H2I_PORTBEACON),
+ BFI_DIAG_I2H_LOOPBACK = BFA_I2HM(BFI_DIAG_H2I_LOOPBACK),
+ BFI_DIAG_I2H_FWPING = BFA_I2HM(BFI_DIAG_H2I_FWPING),
+ BFI_DIAG_I2H_TEMPSENSOR = BFA_I2HM(BFI_DIAG_H2I_TEMPSENSOR),
+ BFI_DIAG_I2H_LEDTEST = BFA_I2HM(BFI_DIAG_H2I_LEDTEST),
+ BFI_DIAG_I2H_QTEST = BFA_I2HM(BFI_DIAG_H2I_QTEST),
+ BFI_DIAG_I2H_DPORT = BFA_I2HM(BFI_DIAG_H2I_DPORT),
+ BFI_DIAG_I2H_DPORT_SCN = BFA_I2HM(8),
+};
+
+#define BFI_DIAG_MAX_SGES 2
+#define BFI_DIAG_DMA_BUF_SZ (2 * 1024)
+#define BFI_BOOT_MEMTEST_RES_ADDR 0x900
+#define BFI_BOOT_MEMTEST_RES_SIG 0xA0A1A2A3
+
+struct bfi_diag_lb_req_s {
+ struct bfi_mhdr_s mh;
+ u32 loopcnt;
+ u32 pattern;
+ u8 lb_mode; /*!< bfa_port_opmode_t */
+ u8 speed; /*!< bfa_port_speed_t */
+ u8 rsvd[2];
+};
+
+struct bfi_diag_lb_rsp_s {
+ struct bfi_mhdr_s mh; /* 4 bytes */
+ struct bfa_diag_loopback_result_s res; /* 16 bytes */
+};
+
+struct bfi_diag_fwping_req_s {
+ struct bfi_mhdr_s mh; /* 4 bytes */
+ struct bfi_alen_s alen; /* 12 bytes */
+ u32 data; /* user input data pattern */
+ u32 count; /* user input dma count */
+ u8 qtag; /* track CPE vc */
+ u8 rsv[3];
+};
+
+struct bfi_diag_fwping_rsp_s {
+ struct bfi_mhdr_s mh; /* 4 bytes */
+ u32 data; /* user input data pattern */
+ u8 qtag; /* track CPE vc */
+ u8 dma_status; /* dma status */
+ u8 rsv[2];
+};
+
+/*
+ * Temperature Sensor
+ */
+struct bfi_diag_ts_req_s {
+ struct bfi_mhdr_s mh; /* 4 bytes */
+ u16 temp; /* 10-bit A/D value */
+ u16 brd_temp; /* 9-bit board temp */
+ u8 status;
+ u8 ts_junc; /* show junction tempsensor */
+ u8 ts_brd; /* show board tempsensor */
+ u8 rsv;
+};
+#define bfi_diag_ts_rsp_t struct bfi_diag_ts_req_s
+
+struct bfi_diag_ledtest_req_s {
+ struct bfi_mhdr_s mh; /* 4 bytes */
+ u8 cmd;
+ u8 color;
+ u8 portid;
+ u8 led; /* bitmap of LEDs to be tested */
+ u16 freq; /* no. of blinks every 10 secs */
+ u8 rsv[2];
+};
+
+/* notify host led operation is done */
+struct bfi_diag_ledtest_rsp_s {
+ struct bfi_mhdr_s mh; /* 4 bytes */
+};
+
+struct bfi_diag_portbeacon_req_s {
+ struct bfi_mhdr_s mh; /* 4 bytes */
+ u32 period; /* beaconing period */
+ u8 beacon; /* 1: beacon on */
+ u8 rsvd[3];
+};
+
+/* notify host the beacon is off */
+struct bfi_diag_portbeacon_rsp_s {
+ struct bfi_mhdr_s mh; /* 4 bytes */
+};
+
+struct bfi_diag_qtest_req_s {
+ struct bfi_mhdr_s mh; /* 4 bytes */
+ u32 data[BFI_LMSG_PL_WSZ]; /* fill up tcm prefetch area */
+};
+#define bfi_diag_qtest_rsp_t struct bfi_diag_qtest_req_s
+
+/*
+ * D-port test
+ */
+enum bfi_dport_req {
+ BFI_DPORT_DISABLE = 0, /* disable dport request */
+ BFI_DPORT_ENABLE = 1, /* enable dport request */
+ BFI_DPORT_START = 2, /* start dport request */
+ BFI_DPORT_SHOW = 3, /* show dport request */
+ BFI_DPORT_DYN_DISABLE = 4, /* disable dynamic dport request */
+};
+
+enum bfi_dport_scn {
+ BFI_DPORT_SCN_TESTSTART = 1,
+ BFI_DPORT_SCN_TESTCOMP = 2,
+ BFI_DPORT_SCN_SFP_REMOVED = 3,
+ BFI_DPORT_SCN_DDPORT_ENABLE = 4,
+ BFI_DPORT_SCN_DDPORT_DISABLE = 5,
+ BFI_DPORT_SCN_FCPORT_DISABLE = 6,
+ BFI_DPORT_SCN_SUBTESTSTART = 7,
+ BFI_DPORT_SCN_TESTSKIP = 8,
+ BFI_DPORT_SCN_DDPORT_DISABLED = 9,
+};
+
+struct bfi_diag_dport_req_s {
+ struct bfi_mhdr_s mh; /* 4 bytes */
+ u8 req; /* request 1: enable 0: disable */
+ u8 rsvd[3];
+ u32 lpcnt;
+ u32 payload;
+};
+
+struct bfi_diag_dport_rsp_s {
+ struct bfi_mhdr_s mh; /* header 4 bytes */
+ bfa_status_t status; /* reply status */
+ wwn_t pwwn; /* switch port wwn. 8 bytes */
+ wwn_t nwwn; /* switch node wwn. 8 bytes */
+};
+
+struct bfi_diag_dport_scn_teststart_s {
+ wwn_t pwwn; /* switch port wwn. 8 bytes */
+ wwn_t nwwn; /* switch node wwn. 8 bytes */
+ u8 type; /* bfa_diag_dport_test_type_e */
+ u8 mode; /* bfa_diag_dport_test_opmode */
+ u8 rsvd[2];
+ u32 numfrm; /* from switch uint in 1M */
+};
+
+struct bfi_diag_dport_scn_testcomp_s {
+ u8 status; /* bfa_diag_dport_test_status_e */
+ u8 speed; /* bfa_port_speed_t */
+ u16 numbuffer; /* from switch */
+ u8 subtest_status[DPORT_TEST_MAX]; /* 4 bytes */
+ u32 latency; /* from switch */
+ u32 distance; /* from swtich unit in meters */
+ /* Buffers required to saturate the link */
+ u16 frm_sz; /* from switch for buf_reqd */
+ u8 rsvd[2];
+};
+
+struct bfi_diag_dport_scn_s { /* max size == RDS_RMESZ */
+ struct bfi_mhdr_s mh; /* header 4 bytes */
+ u8 state; /* new state */
+ u8 rsvd[3];
+ union {
+ struct bfi_diag_dport_scn_teststart_s teststart;
+ struct bfi_diag_dport_scn_testcomp_s testcomp;
+ } info;
+};
+
+union bfi_diag_dport_msg_u {
+ struct bfi_diag_dport_req_s req;
+ struct bfi_diag_dport_rsp_s rsp;
+ struct bfi_diag_dport_scn_s scn;
+};
+
+/*
+ * PHY module specific
+ */
+enum bfi_phy_h2i_msgs_e {
+ BFI_PHY_H2I_QUERY_REQ = 1,
+ BFI_PHY_H2I_STATS_REQ = 2,
+ BFI_PHY_H2I_WRITE_REQ = 3,
+ BFI_PHY_H2I_READ_REQ = 4,
+};
+
+enum bfi_phy_i2h_msgs_e {
+ BFI_PHY_I2H_QUERY_RSP = BFA_I2HM(1),
+ BFI_PHY_I2H_STATS_RSP = BFA_I2HM(2),
+ BFI_PHY_I2H_WRITE_RSP = BFA_I2HM(3),
+ BFI_PHY_I2H_READ_RSP = BFA_I2HM(4),
+};
+
+/*
+ * External PHY query request
+ */
+struct bfi_phy_query_req_s {
+ struct bfi_mhdr_s mh; /* Common msg header */
+ u8 instance;
+ u8 rsv[3];
+ struct bfi_alen_s alen;
+};
+
+/*
+ * External PHY stats request
+ */
+struct bfi_phy_stats_req_s {
+ struct bfi_mhdr_s mh; /* Common msg header */
+ u8 instance;
+ u8 rsv[3];
+ struct bfi_alen_s alen;
+};
+
+/*
+ * External PHY write request
+ */
+struct bfi_phy_write_req_s {
+ struct bfi_mhdr_s mh; /* Common msg header */
+ u8 instance;
+ u8 last;
+ u8 rsv[2];
+ u32 offset;
+ u32 length;
+ struct bfi_alen_s alen;
+};
+
+/*
+ * External PHY read request
+ */
+struct bfi_phy_read_req_s {
+ struct bfi_mhdr_s mh; /* Common msg header */
+ u8 instance;
+ u8 rsv[3];
+ u32 offset;
+ u32 length;
+ struct bfi_alen_s alen;
+};
+
+/*
+ * External PHY query response
+ */
+struct bfi_phy_query_rsp_s {
+ struct bfi_mhdr_s mh; /* Common msg header */
+ u32 status;
+};
+
+/*
+ * External PHY stats response
+ */
+struct bfi_phy_stats_rsp_s {
+ struct bfi_mhdr_s mh; /* Common msg header */
+ u32 status;
+};
+
+/*
+ * External PHY read response
+ */
+struct bfi_phy_read_rsp_s {
+ struct bfi_mhdr_s mh; /* Common msg header */
+ u32 status;
+ u32 length;
+};
+
+/*
+ * External PHY write response
+ */
+struct bfi_phy_write_rsp_s {
+ struct bfi_mhdr_s mh; /* Common msg header */
+ u32 status;
+ u32 length;
+};
+
+enum bfi_fru_h2i_msgs {
+ BFI_FRUVPD_H2I_WRITE_REQ = 1,
+ BFI_FRUVPD_H2I_READ_REQ = 2,
+ BFI_TFRU_H2I_WRITE_REQ = 3,
+ BFI_TFRU_H2I_READ_REQ = 4,
+};
+
+enum bfi_fru_i2h_msgs {
+ BFI_FRUVPD_I2H_WRITE_RSP = BFA_I2HM(1),
+ BFI_FRUVPD_I2H_READ_RSP = BFA_I2HM(2),
+ BFI_TFRU_I2H_WRITE_RSP = BFA_I2HM(3),
+ BFI_TFRU_I2H_READ_RSP = BFA_I2HM(4),
+};
+
+/*
+ * FRU write request
+ */
+struct bfi_fru_write_req_s {
+ struct bfi_mhdr_s mh; /* Common msg header */
+ u8 last;
+ u8 rsv_1[3];
+ u8 trfr_cmpl;
+ u8 rsv_2[3];
+ u32 offset;
+ u32 length;
+ struct bfi_alen_s alen;
+};
+
+/*
+ * FRU read request
+ */
+struct bfi_fru_read_req_s {
+ struct bfi_mhdr_s mh; /* Common msg header */
+ u32 offset;
+ u32 length;
+ struct bfi_alen_s alen;
+};
+
+/*
+ * FRU response
+ */
+struct bfi_fru_rsp_s {
+ struct bfi_mhdr_s mh; /* Common msg header */
+ u32 status;
+ u32 length;
+};
+#pragma pack()
+
+#endif /* __BFI_H__ */
diff --git a/drivers/scsi/bfa/bfi_ms.h b/drivers/scsi/bfa/bfi_ms.h
new file mode 100644
index 000000000..1a3fe5ad5
--- /dev/null
+++ b/drivers/scsi/bfa/bfi_ms.h
@@ -0,0 +1,878 @@
+/*
+ * Copyright (c) 2005-2010 Brocade Communications Systems, Inc.
+ * All rights reserved
+ * www.brocade.com
+ *
+ * Linux driver for Brocade Fibre Channel Host Bus Adapter.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License (GPL) Version 2 as
+ * published by the Free Software Foundation
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ */
+
+#ifndef __BFI_MS_H__
+#define __BFI_MS_H__
+
+#include "bfi.h"
+#include "bfa_fc.h"
+#include "bfa_defs_svc.h"
+
+#pragma pack(1)
+
+enum bfi_iocfc_h2i_msgs {
+ BFI_IOCFC_H2I_CFG_REQ = 1,
+ BFI_IOCFC_H2I_SET_INTR_REQ = 2,
+ BFI_IOCFC_H2I_UPDATEQ_REQ = 3,
+ BFI_IOCFC_H2I_FAA_QUERY_REQ = 4,
+ BFI_IOCFC_H2I_ADDR_REQ = 5,
+};
+
+enum bfi_iocfc_i2h_msgs {
+ BFI_IOCFC_I2H_CFG_REPLY = BFA_I2HM(1),
+ BFI_IOCFC_I2H_UPDATEQ_RSP = BFA_I2HM(3),
+ BFI_IOCFC_I2H_FAA_QUERY_RSP = BFA_I2HM(4),
+ BFI_IOCFC_I2H_ADDR_MSG = BFA_I2HM(5),
+};
+
+struct bfi_iocfc_cfg_s {
+ u8 num_cqs; /* Number of CQs to be used */
+ u8 sense_buf_len; /* SCSI sense length */
+ u16 rsvd_1;
+ u32 endian_sig; /* endian signature of host */
+ u8 rsvd_2;
+ u8 single_msix_vec;
+ u8 rsvd[2];
+ __be16 num_ioim_reqs;
+ __be16 num_fwtio_reqs;
+
+
+ /*
+ * Request and response circular queue base addresses, size and
+ * shadow index pointers.
+ */
+ union bfi_addr_u req_cq_ba[BFI_IOC_MAX_CQS];
+ union bfi_addr_u req_shadow_ci[BFI_IOC_MAX_CQS];
+ __be16 req_cq_elems[BFI_IOC_MAX_CQS];
+ union bfi_addr_u rsp_cq_ba[BFI_IOC_MAX_CQS];
+ union bfi_addr_u rsp_shadow_pi[BFI_IOC_MAX_CQS];
+ __be16 rsp_cq_elems[BFI_IOC_MAX_CQS];
+
+ union bfi_addr_u stats_addr; /* DMA-able address for stats */
+ union bfi_addr_u cfgrsp_addr; /* config response dma address */
+ union bfi_addr_u ioim_snsbase[BFI_IOIM_SNSBUF_SEGS];
+ /* IO sense buf base addr segments */
+ struct bfa_iocfc_intr_attr_s intr_attr; /* IOC interrupt attributes */
+};
+
+/*
+ * Boot target wwn information for this port. This contains either the stored
+ * or discovered boot target port wwns for the port.
+ */
+struct bfi_iocfc_bootwwns {
+ wwn_t wwn[BFA_BOOT_BOOTLUN_MAX];
+ u8 nwwns;
+ u8 rsvd[7];
+};
+
+/**
+ * Queue configuration response from firmware
+ */
+struct bfi_iocfc_qreg_s {
+ u32 cpe_q_ci_off[BFI_IOC_MAX_CQS];
+ u32 cpe_q_pi_off[BFI_IOC_MAX_CQS];
+ u32 cpe_qctl_off[BFI_IOC_MAX_CQS];
+ u32 rme_q_ci_off[BFI_IOC_MAX_CQS];
+ u32 rme_q_pi_off[BFI_IOC_MAX_CQS];
+ u32 rme_qctl_off[BFI_IOC_MAX_CQS];
+ u8 hw_qid[BFI_IOC_MAX_CQS];
+};
+
+struct bfi_iocfc_cfgrsp_s {
+ struct bfa_iocfc_fwcfg_s fwcfg;
+ struct bfa_iocfc_intr_attr_s intr_attr;
+ struct bfi_iocfc_bootwwns bootwwns;
+ struct bfi_pbc_s pbc_cfg;
+ struct bfi_iocfc_qreg_s qreg;
+};
+
+/*
+ * BFI_IOCFC_H2I_CFG_REQ message
+ */
+struct bfi_iocfc_cfg_req_s {
+ struct bfi_mhdr_s mh;
+ union bfi_addr_u ioc_cfg_dma_addr;
+};
+
+
+/*
+ * BFI_IOCFC_I2H_CFG_REPLY message
+ */
+struct bfi_iocfc_cfg_reply_s {
+ struct bfi_mhdr_s mh; /* Common msg header */
+ u8 cfg_success; /* cfg reply status */
+ u8 lpu_bm; /* LPUs assigned for this IOC */
+ u8 rsvd[2];
+};
+
+
+/*
+ * BFI_IOCFC_H2I_SET_INTR_REQ message
+ */
+struct bfi_iocfc_set_intr_req_s {
+ struct bfi_mhdr_s mh; /* common msg header */
+ u8 coalesce; /* enable intr coalescing */
+ u8 rsvd[3];
+ __be16 delay; /* delay timer 0..1125us */
+ __be16 latency; /* latency timer 0..225us */
+};
+
+
+/*
+ * BFI_IOCFC_H2I_UPDATEQ_REQ message
+ */
+struct bfi_iocfc_updateq_req_s {
+ struct bfi_mhdr_s mh; /* common msg header */
+ u32 reqq_ba; /* reqq base addr */
+ u32 rspq_ba; /* rspq base addr */
+ u32 reqq_sci; /* reqq shadow ci */
+ u32 rspq_spi; /* rspq shadow pi */
+};
+
+
+/*
+ * BFI_IOCFC_I2H_UPDATEQ_RSP message
+ */
+struct bfi_iocfc_updateq_rsp_s {
+ struct bfi_mhdr_s mh; /* common msg header */
+ u8 status; /* updateq status */
+ u8 rsvd[3];
+};
+
+
+/*
+ * H2I Messages
+ */
+union bfi_iocfc_h2i_msg_u {
+ struct bfi_mhdr_s mh;
+ struct bfi_iocfc_cfg_req_s cfg_req;
+ struct bfi_iocfc_updateq_req_s updateq_req;
+ u32 mboxmsg[BFI_IOC_MSGSZ];
+};
+
+
+/*
+ * I2H Messages
+ */
+union bfi_iocfc_i2h_msg_u {
+ struct bfi_mhdr_s mh;
+ struct bfi_iocfc_cfg_reply_s cfg_reply;
+ struct bfi_iocfc_updateq_rsp_s updateq_rsp;
+ u32 mboxmsg[BFI_IOC_MSGSZ];
+};
+
+/*
+ * BFI_IOCFC_H2I_FAA_ENABLE_REQ BFI_IOCFC_H2I_FAA_DISABLE_REQ message
+ */
+struct bfi_faa_en_dis_s {
+ struct bfi_mhdr_s mh; /* common msg header */
+};
+
+struct bfi_faa_addr_msg_s {
+ struct bfi_mhdr_s mh; /* common msg header */
+ u8 rsvd[4];
+ wwn_t pwwn; /* Fabric acquired PWWN */
+ wwn_t nwwn; /* Fabric acquired PWWN */
+};
+
+/*
+ * BFI_IOCFC_H2I_FAA_QUERY_REQ message
+ */
+struct bfi_faa_query_s {
+ struct bfi_mhdr_s mh; /* common msg header */
+ u8 faa_status; /* FAA status */
+ u8 addr_source; /* PWWN source */
+ u8 rsvd[2];
+ wwn_t faa; /* Fabric acquired PWWN */
+};
+
+/*
+ * BFI_IOCFC_I2H_FAA_ENABLE_RSP, BFI_IOCFC_I2H_FAA_DISABLE_RSP message
+ */
+struct bfi_faa_en_dis_rsp_s {
+ struct bfi_mhdr_s mh; /* common msg header */
+ u8 status; /* updateq status */
+ u8 rsvd[3];
+};
+
+/*
+ * BFI_IOCFC_I2H_FAA_QUERY_RSP message
+ */
+#define bfi_faa_query_rsp_t struct bfi_faa_query_s
+
+enum bfi_fcport_h2i {
+ BFI_FCPORT_H2I_ENABLE_REQ = (1),
+ BFI_FCPORT_H2I_DISABLE_REQ = (2),
+ BFI_FCPORT_H2I_SET_SVC_PARAMS_REQ = (3),
+ BFI_FCPORT_H2I_STATS_GET_REQ = (4),
+ BFI_FCPORT_H2I_STATS_CLEAR_REQ = (5),
+};
+
+
+enum bfi_fcport_i2h {
+ BFI_FCPORT_I2H_ENABLE_RSP = BFA_I2HM(1),
+ BFI_FCPORT_I2H_DISABLE_RSP = BFA_I2HM(2),
+ BFI_FCPORT_I2H_SET_SVC_PARAMS_RSP = BFA_I2HM(3),
+ BFI_FCPORT_I2H_STATS_GET_RSP = BFA_I2HM(4),
+ BFI_FCPORT_I2H_STATS_CLEAR_RSP = BFA_I2HM(5),
+ BFI_FCPORT_I2H_EVENT = BFA_I2HM(6),
+ BFI_FCPORT_I2H_TRUNK_SCN = BFA_I2HM(7),
+ BFI_FCPORT_I2H_ENABLE_AEN = BFA_I2HM(8),
+ BFI_FCPORT_I2H_DISABLE_AEN = BFA_I2HM(9),
+};
+
+
+/*
+ * Generic REQ type
+ */
+struct bfi_fcport_req_s {
+ struct bfi_mhdr_s mh; /* msg header */
+ u32 msgtag; /* msgtag for reply */
+};
+
+/*
+ * Generic RSP type
+ */
+struct bfi_fcport_rsp_s {
+ struct bfi_mhdr_s mh; /* common msg header */
+ u8 status; /* port enable status */
+ u8 rsvd[3];
+ struct bfa_port_cfg_s port_cfg;/* port configuration */
+ u32 msgtag; /* msgtag for reply */
+};
+
+/*
+ * BFI_FCPORT_H2I_ENABLE_REQ
+ */
+struct bfi_fcport_enable_req_s {
+ struct bfi_mhdr_s mh; /* msg header */
+ u32 rsvd1;
+ wwn_t nwwn; /* node wwn of physical port */
+ wwn_t pwwn; /* port wwn of physical port */
+ struct bfa_port_cfg_s port_cfg; /* port configuration */
+ union bfi_addr_u stats_dma_addr; /* DMA address for stats */
+ u32 msgtag; /* msgtag for reply */
+ u8 use_flash_cfg; /* get prot cfg from flash */
+ u8 rsvd2[3];
+};
+
+/*
+ * BFI_FCPORT_H2I_SET_SVC_PARAMS_REQ
+ */
+struct bfi_fcport_set_svc_params_req_s {
+ struct bfi_mhdr_s mh; /* msg header */
+ __be16 tx_bbcredit; /* Tx credits */
+ u8 rsvd[2];
+};
+
+/*
+ * BFI_FCPORT_I2H_EVENT
+ */
+struct bfi_fcport_event_s {
+ struct bfi_mhdr_s mh; /* common msg header */
+ struct bfa_port_link_s link_state;
+};
+
+/*
+ * BFI_FCPORT_I2H_TRUNK_SCN
+ */
+struct bfi_fcport_trunk_link_s {
+ wwn_t trunk_wwn;
+ u8 fctl; /* bfa_trunk_link_fctl_t */
+ u8 state; /* bfa_trunk_link_state_t */
+ u8 speed; /* bfa_port_speed_t */
+ u8 rsvd;
+ __be32 deskew;
+};
+
+#define BFI_FCPORT_MAX_LINKS 2
+struct bfi_fcport_trunk_scn_s {
+ struct bfi_mhdr_s mh;
+ u8 trunk_state; /* bfa_trunk_state_t */
+ u8 trunk_speed; /* bfa_port_speed_t */
+ u8 rsvd_a[2];
+ struct bfi_fcport_trunk_link_s tlink[BFI_FCPORT_MAX_LINKS];
+};
+
+/*
+ * fcport H2I message
+ */
+union bfi_fcport_h2i_msg_u {
+ struct bfi_mhdr_s *mhdr;
+ struct bfi_fcport_enable_req_s *penable;
+ struct bfi_fcport_req_s *pdisable;
+ struct bfi_fcport_set_svc_params_req_s *psetsvcparams;
+ struct bfi_fcport_req_s *pstatsget;
+ struct bfi_fcport_req_s *pstatsclear;
+};
+
+/*
+ * fcport I2H message
+ */
+union bfi_fcport_i2h_msg_u {
+ struct bfi_msg_s *msg;
+ struct bfi_fcport_rsp_s *penable_rsp;
+ struct bfi_fcport_rsp_s *pdisable_rsp;
+ struct bfi_fcport_rsp_s *psetsvcparams_rsp;
+ struct bfi_fcport_rsp_s *pstatsget_rsp;
+ struct bfi_fcport_rsp_s *pstatsclear_rsp;
+ struct bfi_fcport_event_s *event;
+ struct bfi_fcport_trunk_scn_s *trunk_scn;
+};
+
+enum bfi_fcxp_h2i {
+ BFI_FCXP_H2I_SEND_REQ = 1,
+};
+
+enum bfi_fcxp_i2h {
+ BFI_FCXP_I2H_SEND_RSP = BFA_I2HM(1),
+};
+
+#define BFA_FCXP_MAX_SGES 2
+
+/*
+ * FCXP send request structure
+ */
+struct bfi_fcxp_send_req_s {
+ struct bfi_mhdr_s mh; /* Common msg header */
+ __be16 fcxp_tag; /* driver request tag */
+ __be16 max_frmsz; /* max send frame size */
+ __be16 vf_id; /* vsan tag if applicable */
+ u16 rport_fw_hndl; /* FW Handle for the remote port */
+ u8 class; /* FC class used for req/rsp */
+ u8 rsp_timeout; /* timeout in secs, 0-no response */
+ u8 cts; /* continue sequence */
+ u8 lp_fwtag; /* lport tag */
+ struct fchs_s fchs; /* request FC header structure */
+ __be32 req_len; /* request payload length */
+ __be32 rsp_maxlen; /* max response length expected */
+ struct bfi_alen_s req_alen; /* request buffer */
+ struct bfi_alen_s rsp_alen; /* response buffer */
+};
+
+/*
+ * FCXP send response structure
+ */
+struct bfi_fcxp_send_rsp_s {
+ struct bfi_mhdr_s mh; /* Common msg header */
+ __be16 fcxp_tag; /* send request tag */
+ u8 req_status; /* request status */
+ u8 rsvd;
+ __be32 rsp_len; /* actual response length */
+ __be32 residue_len; /* residual response length */
+ struct fchs_s fchs; /* response FC header structure */
+};
+
+enum bfi_uf_h2i {
+ BFI_UF_H2I_BUF_POST = 1,
+};
+
+enum bfi_uf_i2h {
+ BFI_UF_I2H_FRM_RCVD = BFA_I2HM(1),
+};
+
+#define BFA_UF_MAX_SGES 2
+
+struct bfi_uf_buf_post_s {
+ struct bfi_mhdr_s mh; /* Common msg header */
+ u16 buf_tag; /* buffer tag */
+ __be16 buf_len; /* total buffer length */
+ struct bfi_alen_s alen; /* buffer address/len pair */
+};
+
+struct bfi_uf_frm_rcvd_s {
+ struct bfi_mhdr_s mh; /* Common msg header */
+ u16 buf_tag; /* buffer tag */
+ u16 rsvd;
+ u16 frm_len; /* received frame length */
+ u16 xfr_len; /* tranferred length */
+};
+
+enum bfi_lps_h2i_msgs {
+ BFI_LPS_H2I_LOGIN_REQ = 1,
+ BFI_LPS_H2I_LOGOUT_REQ = 2,
+ BFI_LPS_H2I_N2N_PID_REQ = 3,
+};
+
+enum bfi_lps_i2h_msgs {
+ BFI_LPS_I2H_LOGIN_RSP = BFA_I2HM(1),
+ BFI_LPS_I2H_LOGOUT_RSP = BFA_I2HM(2),
+ BFI_LPS_I2H_CVL_EVENT = BFA_I2HM(3),
+};
+
+struct bfi_lps_login_req_s {
+ struct bfi_mhdr_s mh; /* common msg header */
+ u8 bfa_tag;
+ u8 alpa;
+ __be16 pdu_size;
+ wwn_t pwwn;
+ wwn_t nwwn;
+ u8 fdisc;
+ u8 auth_en;
+ u8 lps_role;
+ u8 bb_scn;
+ u32 vvl_flag;
+};
+
+struct bfi_lps_login_rsp_s {
+ struct bfi_mhdr_s mh; /* common msg header */
+ u8 fw_tag;
+ u8 status;
+ u8 lsrjt_rsn;
+ u8 lsrjt_expl;
+ wwn_t port_name;
+ wwn_t node_name;
+ __be16 bb_credit;
+ u8 f_port;
+ u8 npiv_en;
+ u32 lp_pid:24;
+ u32 auth_req:8;
+ mac_t lp_mac;
+ mac_t fcf_mac;
+ u8 ext_status;
+ u8 brcd_switch; /* attached peer is brcd switch */
+ u8 bfa_tag;
+ u8 rsvd;
+};
+
+struct bfi_lps_logout_req_s {
+ struct bfi_mhdr_s mh; /* common msg header */
+ u8 fw_tag;
+ u8 rsvd[3];
+ wwn_t port_name;
+};
+
+struct bfi_lps_logout_rsp_s {
+ struct bfi_mhdr_s mh; /* common msg header */
+ u8 bfa_tag;
+ u8 status;
+ u8 rsvd[2];
+};
+
+struct bfi_lps_cvl_event_s {
+ struct bfi_mhdr_s mh; /* common msg header */
+ u8 bfa_tag;
+ u8 rsvd[3];
+};
+
+struct bfi_lps_n2n_pid_req_s {
+ struct bfi_mhdr_s mh; /* common msg header */
+ u8 fw_tag;
+ u32 lp_pid:24;
+};
+
+union bfi_lps_h2i_msg_u {
+ struct bfi_mhdr_s *msg;
+ struct bfi_lps_login_req_s *login_req;
+ struct bfi_lps_logout_req_s *logout_req;
+ struct bfi_lps_n2n_pid_req_s *n2n_pid_req;
+};
+
+union bfi_lps_i2h_msg_u {
+ struct bfi_msg_s *msg;
+ struct bfi_lps_login_rsp_s *login_rsp;
+ struct bfi_lps_logout_rsp_s *logout_rsp;
+ struct bfi_lps_cvl_event_s *cvl_event;
+};
+
+enum bfi_rport_h2i_msgs {
+ BFI_RPORT_H2I_CREATE_REQ = 1,
+ BFI_RPORT_H2I_DELETE_REQ = 2,
+ BFI_RPORT_H2I_SET_SPEED_REQ = 3,
+};
+
+enum bfi_rport_i2h_msgs {
+ BFI_RPORT_I2H_CREATE_RSP = BFA_I2HM(1),
+ BFI_RPORT_I2H_DELETE_RSP = BFA_I2HM(2),
+ BFI_RPORT_I2H_QOS_SCN = BFA_I2HM(3),
+ BFI_RPORT_I2H_LIP_SCN_ONLINE = BFA_I2HM(4),
+ BFI_RPORT_I2H_LIP_SCN_OFFLINE = BFA_I2HM(5),
+ BFI_RPORT_I2H_NO_DEV = BFA_I2HM(6),
+};
+
+struct bfi_rport_create_req_s {
+ struct bfi_mhdr_s mh; /* common msg header */
+ u16 bfa_handle; /* host rport handle */
+ __be16 max_frmsz; /* max rcv pdu size */
+ u32 pid:24, /* remote port ID */
+ lp_fwtag:8; /* local port tag */
+ u32 local_pid:24, /* local port ID */
+ cisc:8;
+ u8 fc_class; /* supported FC classes */
+ u8 vf_en; /* virtual fabric enable */
+ u16 vf_id; /* virtual fabric ID */
+};
+
+struct bfi_rport_create_rsp_s {
+ struct bfi_mhdr_s mh; /* common msg header */
+ u8 status; /* rport creation status */
+ u8 rsvd[3];
+ u16 bfa_handle; /* host rport handle */
+ u16 fw_handle; /* firmware rport handle */
+ struct bfa_rport_qos_attr_s qos_attr; /* QoS Attributes */
+};
+
+struct bfa_rport_speed_req_s {
+ struct bfi_mhdr_s mh; /* common msg header */
+ u16 fw_handle; /* firmware rport handle */
+ u8 speed; /* rport's speed via RPSC */
+ u8 rsvd;
+};
+
+struct bfi_rport_delete_req_s {
+ struct bfi_mhdr_s mh; /* common msg header */
+ u16 fw_handle; /* firmware rport handle */
+ u16 rsvd;
+};
+
+struct bfi_rport_delete_rsp_s {
+ struct bfi_mhdr_s mh; /* common msg header */
+ u16 bfa_handle; /* host rport handle */
+ u8 status; /* rport deletion status */
+ u8 rsvd;
+};
+
+struct bfi_rport_qos_scn_s {
+ struct bfi_mhdr_s mh; /* common msg header */
+ u16 bfa_handle; /* host rport handle */
+ u16 rsvd;
+ struct bfa_rport_qos_attr_s old_qos_attr; /* Old QoS Attributes */
+ struct bfa_rport_qos_attr_s new_qos_attr; /* New QoS Attributes */
+};
+
+struct bfi_rport_lip_scn_s {
+ struct bfi_mhdr_s mh; /*!< common msg header */
+ u16 bfa_handle; /*!< host rport handle */
+ u8 status; /*!< scn online status */
+ u8 rsvd;
+ struct bfa_fcport_loop_info_s loop_info;
+};
+
+union bfi_rport_h2i_msg_u {
+ struct bfi_msg_s *msg;
+ struct bfi_rport_create_req_s *create_req;
+ struct bfi_rport_delete_req_s *delete_req;
+ struct bfi_rport_speed_req_s *speed_req;
+};
+
+union bfi_rport_i2h_msg_u {
+ struct bfi_msg_s *msg;
+ struct bfi_rport_create_rsp_s *create_rsp;
+ struct bfi_rport_delete_rsp_s *delete_rsp;
+ struct bfi_rport_qos_scn_s *qos_scn_evt;
+ struct bfi_rport_lip_scn_s *lip_scn;
+};
+
+/*
+ * Initiator mode I-T nexus interface defines.
+ */
+
+enum bfi_itn_h2i {
+ BFI_ITN_H2I_CREATE_REQ = 1, /* i-t nexus creation */
+ BFI_ITN_H2I_DELETE_REQ = 2, /* i-t nexus deletion */
+};
+
+enum bfi_itn_i2h {
+ BFI_ITN_I2H_CREATE_RSP = BFA_I2HM(1),
+ BFI_ITN_I2H_DELETE_RSP = BFA_I2HM(2),
+ BFI_ITN_I2H_SLER_EVENT = BFA_I2HM(3),
+};
+
+struct bfi_itn_create_req_s {
+ struct bfi_mhdr_s mh; /* common msg header */
+ u16 fw_handle; /* f/w handle for itnim */
+ u8 class; /* FC class for IO */
+ u8 seq_rec; /* sequence recovery support */
+ u8 msg_no; /* seq id of the msg */
+ u8 role;
+};
+
+struct bfi_itn_create_rsp_s {
+ struct bfi_mhdr_s mh; /* common msg header */
+ u16 bfa_handle; /* bfa handle for itnim */
+ u8 status; /* fcp request status */
+ u8 seq_id; /* seq id of the msg */
+};
+
+struct bfi_itn_delete_req_s {
+ struct bfi_mhdr_s mh; /* common msg header */
+ u16 fw_handle; /* f/w itnim handle */
+ u8 seq_id; /* seq id of the msg */
+ u8 rsvd;
+};
+
+struct bfi_itn_delete_rsp_s {
+ struct bfi_mhdr_s mh; /* common msg header */
+ u16 bfa_handle; /* bfa handle for itnim */
+ u8 status; /* fcp request status */
+ u8 seq_id; /* seq id of the msg */
+};
+
+struct bfi_itn_sler_event_s {
+ struct bfi_mhdr_s mh; /* common msg header */
+ u16 bfa_handle; /* bfa handle for itnim */
+ u16 rsvd;
+};
+
+union bfi_itn_h2i_msg_u {
+ struct bfi_itn_create_req_s *create_req;
+ struct bfi_itn_delete_req_s *delete_req;
+ struct bfi_msg_s *msg;
+};
+
+union bfi_itn_i2h_msg_u {
+ struct bfi_itn_create_rsp_s *create_rsp;
+ struct bfi_itn_delete_rsp_s *delete_rsp;
+ struct bfi_itn_sler_event_s *sler_event;
+ struct bfi_msg_s *msg;
+};
+
+/*
+ * Initiator mode IO interface defines.
+ */
+
+enum bfi_ioim_h2i {
+ BFI_IOIM_H2I_IOABORT_REQ = 1, /* IO abort request */
+ BFI_IOIM_H2I_IOCLEANUP_REQ = 2, /* IO cleanup request */
+};
+
+enum bfi_ioim_i2h {
+ BFI_IOIM_I2H_IO_RSP = BFA_I2HM(1), /* non-fp IO response */
+ BFI_IOIM_I2H_IOABORT_RSP = BFA_I2HM(2), /* ABORT rsp */
+};
+
+/*
+ * IO command DIF info
+ */
+struct bfi_ioim_dif_s {
+ u32 dif_info[4];
+};
+
+/*
+ * FCP IO messages overview
+ *
+ * @note
+ * - Max CDB length supported is 64 bytes.
+ * - SCSI Linked commands and SCSI bi-directional Commands not
+ * supported.
+ *
+ */
+struct bfi_ioim_req_s {
+ struct bfi_mhdr_s mh; /* Common msg header */
+ __be16 io_tag; /* I/O tag */
+ u16 rport_hdl; /* itnim/rport firmware handle */
+ struct fcp_cmnd_s cmnd; /* IO request info */
+
+ /*
+ * SG elements array within the IO request must be double word
+ * aligned. This aligment is required to optimize SGM setup for the IO.
+ */
+ struct bfi_sge_s sges[BFI_SGE_INLINE_MAX];
+ u8 io_timeout;
+ u8 dif_en;
+ u8 rsvd_a[2];
+ struct bfi_ioim_dif_s dif;
+};
+
+/*
+ * This table shows various IO status codes from firmware and their
+ * meaning. Host driver can use these status codes to further process
+ * IO completions.
+ *
+ * BFI_IOIM_STS_OK : IO completed with error free SCSI &
+ * transport status.
+ * io-tag can be reused.
+ *
+ * BFA_IOIM_STS_SCSI_ERR : IO completed with scsi error.
+ * - io-tag can be reused.
+ *
+ * BFI_IOIM_STS_HOST_ABORTED : IO was aborted successfully due to
+ * host request.
+ * - io-tag cannot be reused yet.
+ *
+ * BFI_IOIM_STS_ABORTED : IO was aborted successfully
+ * internally by f/w.
+ * - io-tag cannot be reused yet.
+ *
+ * BFI_IOIM_STS_TIMEDOUT : IO timedout and ABTS/RRQ is happening
+ * in the firmware and
+ * - io-tag cannot be reused yet.
+ *
+ * BFI_IOIM_STS_SQER_NEEDED : Firmware could not recover the IO
+ * with sequence level error
+ * logic and hence host needs to retry
+ * this IO with a different IO tag
+ * - io-tag cannot be used yet.
+ *
+ * BFI_IOIM_STS_NEXUS_ABORT : Second Level Error Recovery from host
+ * is required because 2 consecutive ABTS
+ * timedout and host needs logout and
+ * re-login with the target
+ * - io-tag cannot be used yet.
+ *
+ * BFI_IOIM_STS_UNDERRUN : IO completed with SCSI status good,
+ * but the data tranferred is less than
+ * the fcp data length in the command.
+ * ex. SCSI INQUIRY where transferred
+ * data length and residue count in FCP
+ * response accounts for total fcp-dl
+ * - io-tag can be reused.
+ *
+ * BFI_IOIM_STS_OVERRUN : IO completed with SCSI status good,
+ * but the data transerred is more than
+ * fcp data length in the command. ex.
+ * TAPE IOs where blocks can of unequal
+ * lengths.
+ * - io-tag can be reused.
+ *
+ * BFI_IOIM_STS_RES_FREE : Firmware has completed using io-tag
+ * during abort process
+ * - io-tag can be reused.
+ *
+ * BFI_IOIM_STS_PROTO_ERR : Firmware detected a protocol error.
+ * ex target sent more data than
+ * requested, or there was data frame
+ * loss and other reasons
+ * - io-tag cannot be used yet.
+ *
+ * BFI_IOIM_STS_DIF_ERR : Firwmare detected DIF error. ex: DIF
+ * CRC err or Ref Tag err or App tag err.
+ * - io-tag can be reused.
+ *
+ * BFA_IOIM_STS_TSK_MGT_ABORT : IO was aborted because of Task
+ * Management command from the host
+ * - io-tag can be reused.
+ *
+ * BFI_IOIM_STS_UTAG : Firmware does not know about this
+ * io_tag.
+ * - io-tag can be reused.
+ */
+enum bfi_ioim_status {
+ BFI_IOIM_STS_OK = 0,
+ BFI_IOIM_STS_HOST_ABORTED = 1,
+ BFI_IOIM_STS_ABORTED = 2,
+ BFI_IOIM_STS_TIMEDOUT = 3,
+ BFI_IOIM_STS_RES_FREE = 4,
+ BFI_IOIM_STS_SQER_NEEDED = 5,
+ BFI_IOIM_STS_PROTO_ERR = 6,
+ BFI_IOIM_STS_UTAG = 7,
+ BFI_IOIM_STS_PATHTOV = 8,
+};
+
+/*
+ * I/O response message
+ */
+struct bfi_ioim_rsp_s {
+ struct bfi_mhdr_s mh; /* common msg header */
+ __be16 io_tag; /* completed IO tag */
+ u16 bfa_rport_hndl; /* releated rport handle */
+ u8 io_status; /* IO completion status */
+ u8 reuse_io_tag; /* IO tag can be reused */
+ u16 abort_tag; /* host abort request tag */
+ u8 scsi_status; /* scsi status from target */
+ u8 sns_len; /* scsi sense length */
+ u8 resid_flags; /* IO residue flags */
+ u8 rsvd_a;
+ __be32 residue; /* IO residual length in bytes */
+ u32 rsvd_b[3];
+};
+
+struct bfi_ioim_abort_req_s {
+ struct bfi_mhdr_s mh; /* Common msg header */
+ __be16 io_tag; /* I/O tag */
+ u16 abort_tag; /* unique request tag */
+};
+
+/*
+ * Initiator mode task management command interface defines.
+ */
+
+enum bfi_tskim_h2i {
+ BFI_TSKIM_H2I_TM_REQ = 1, /* task-mgmt command */
+ BFI_TSKIM_H2I_ABORT_REQ = 2, /* task-mgmt command */
+};
+
+enum bfi_tskim_i2h {
+ BFI_TSKIM_I2H_TM_RSP = BFA_I2HM(1),
+};
+
+struct bfi_tskim_req_s {
+ struct bfi_mhdr_s mh; /* Common msg header */
+ __be16 tsk_tag; /* task management tag */
+ u16 itn_fhdl; /* itn firmware handle */
+ struct scsi_lun lun; /* LU number */
+ u8 tm_flags; /* see enum fcp_tm_cmnd */
+ u8 t_secs; /* Timeout value in seconds */
+ u8 rsvd[2];
+};
+
+struct bfi_tskim_abortreq_s {
+ struct bfi_mhdr_s mh; /* Common msg header */
+ __be16 tsk_tag; /* task management tag */
+ u16 rsvd;
+};
+
+enum bfi_tskim_status {
+ /*
+ * Following are FCP-4 spec defined status codes,
+ * **DO NOT CHANGE THEM **
+ */
+ BFI_TSKIM_STS_OK = 0,
+ BFI_TSKIM_STS_NOT_SUPP = 4,
+ BFI_TSKIM_STS_FAILED = 5,
+
+ /*
+ * Defined by BFA
+ */
+ BFI_TSKIM_STS_TIMEOUT = 10, /* TM request timedout */
+ BFI_TSKIM_STS_ABORTED = 11, /* Aborted on host request */
+ BFI_TSKIM_STS_UTAG = 12, /* unknown tag for request */
+};
+
+struct bfi_tskim_rsp_s {
+ struct bfi_mhdr_s mh; /* Common msg header */
+ __be16 tsk_tag; /* task mgmt cmnd tag */
+ u8 tsk_status; /* @ref bfi_tskim_status */
+ u8 rsvd;
+};
+
+#pragma pack()
+
+/*
+ * Crossbow PCI MSI-X vector defines
+ */
+enum {
+ BFI_MSIX_CPE_QMIN_CB = 0,
+ BFI_MSIX_CPE_QMAX_CB = 7,
+ BFI_MSIX_RME_QMIN_CB = 8,
+ BFI_MSIX_RME_QMAX_CB = 15,
+ BFI_MSIX_CB_MAX = 22,
+};
+
+/*
+ * Catapult FC PCI MSI-X vector defines
+ */
+enum {
+ BFI_MSIX_LPU_ERR_CT = 0,
+ BFI_MSIX_CPE_QMIN_CT = 1,
+ BFI_MSIX_CPE_QMAX_CT = 4,
+ BFI_MSIX_RME_QMIN_CT = 5,
+ BFI_MSIX_RME_QMAX_CT = 8,
+ BFI_MSIX_CT_MAX = 9,
+};
+
+#endif /* __BFI_MS_H__ */
diff --git a/drivers/scsi/bfa/bfi_reg.h b/drivers/scsi/bfa/bfi_reg.h
new file mode 100644
index 000000000..99133bcf5
--- /dev/null
+++ b/drivers/scsi/bfa/bfi_reg.h
@@ -0,0 +1,459 @@
+/*
+ * Copyright (c) 2005-2010 Brocade Communications Systems, Inc.
+ * All rights reserved
+ * www.brocade.com
+ *
+ * Linux driver for Brocade Fibre Channel Host Bus Adapter.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License (GPL) Version 2 as
+ * published by the Free Software Foundation
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ */
+
+/*
+ * bfi_reg.h ASIC register defines for all Brocade adapter ASICs
+ */
+
+#ifndef __BFI_REG_H__
+#define __BFI_REG_H__
+
+#define HOSTFN0_INT_STATUS 0x00014000 /* cb/ct */
+#define HOSTFN1_INT_STATUS 0x00014100 /* cb/ct */
+#define HOSTFN2_INT_STATUS 0x00014300 /* ct */
+#define HOSTFN3_INT_STATUS 0x00014400 /* ct */
+#define HOSTFN0_INT_MSK 0x00014004 /* cb/ct */
+#define HOSTFN1_INT_MSK 0x00014104 /* cb/ct */
+#define HOSTFN2_INT_MSK 0x00014304 /* ct */
+#define HOSTFN3_INT_MSK 0x00014404 /* ct */
+
+#define HOST_PAGE_NUM_FN0 0x00014008 /* cb/ct */
+#define HOST_PAGE_NUM_FN1 0x00014108 /* cb/ct */
+#define HOST_PAGE_NUM_FN2 0x00014308 /* ct */
+#define HOST_PAGE_NUM_FN3 0x00014408 /* ct */
+
+#define APP_PLL_LCLK_CTL_REG 0x00014204 /* cb/ct */
+#define __P_LCLK_PLL_LOCK 0x80000000
+#define __APP_PLL_LCLK_SRAM_USE_100MHZ 0x00100000
+#define __APP_PLL_LCLK_RESET_TIMER_MK 0x000e0000
+#define __APP_PLL_LCLK_RESET_TIMER_SH 17
+#define __APP_PLL_LCLK_RESET_TIMER(_v) ((_v) << __APP_PLL_LCLK_RESET_TIMER_SH)
+#define __APP_PLL_LCLK_LOGIC_SOFT_RESET 0x00010000
+#define __APP_PLL_LCLK_CNTLMT0_1_MK 0x0000c000
+#define __APP_PLL_LCLK_CNTLMT0_1_SH 14
+#define __APP_PLL_LCLK_CNTLMT0_1(_v) ((_v) << __APP_PLL_LCLK_CNTLMT0_1_SH)
+#define __APP_PLL_LCLK_JITLMT0_1_MK 0x00003000
+#define __APP_PLL_LCLK_JITLMT0_1_SH 12
+#define __APP_PLL_LCLK_JITLMT0_1(_v) ((_v) << __APP_PLL_LCLK_JITLMT0_1_SH)
+#define __APP_PLL_LCLK_HREF 0x00000800
+#define __APP_PLL_LCLK_HDIV 0x00000400
+#define __APP_PLL_LCLK_P0_1_MK 0x00000300
+#define __APP_PLL_LCLK_P0_1_SH 8
+#define __APP_PLL_LCLK_P0_1(_v) ((_v) << __APP_PLL_LCLK_P0_1_SH)
+#define __APP_PLL_LCLK_Z0_2_MK 0x000000e0
+#define __APP_PLL_LCLK_Z0_2_SH 5
+#define __APP_PLL_LCLK_Z0_2(_v) ((_v) << __APP_PLL_LCLK_Z0_2_SH)
+#define __APP_PLL_LCLK_RSEL200500 0x00000010
+#define __APP_PLL_LCLK_ENARST 0x00000008
+#define __APP_PLL_LCLK_BYPASS 0x00000004
+#define __APP_PLL_LCLK_LRESETN 0x00000002
+#define __APP_PLL_LCLK_ENABLE 0x00000001
+#define APP_PLL_SCLK_CTL_REG 0x00014208 /* cb/ct */
+#define __P_SCLK_PLL_LOCK 0x80000000
+#define __APP_PLL_SCLK_RESET_TIMER_MK 0x000e0000
+#define __APP_PLL_SCLK_RESET_TIMER_SH 17
+#define __APP_PLL_SCLK_RESET_TIMER(_v) ((_v) << __APP_PLL_SCLK_RESET_TIMER_SH)
+#define __APP_PLL_SCLK_LOGIC_SOFT_RESET 0x00010000
+#define __APP_PLL_SCLK_CNTLMT0_1_MK 0x0000c000
+#define __APP_PLL_SCLK_CNTLMT0_1_SH 14
+#define __APP_PLL_SCLK_CNTLMT0_1(_v) ((_v) << __APP_PLL_SCLK_CNTLMT0_1_SH)
+#define __APP_PLL_SCLK_JITLMT0_1_MK 0x00003000
+#define __APP_PLL_SCLK_JITLMT0_1_SH 12
+#define __APP_PLL_SCLK_JITLMT0_1(_v) ((_v) << __APP_PLL_SCLK_JITLMT0_1_SH)
+#define __APP_PLL_SCLK_HREF 0x00000800
+#define __APP_PLL_SCLK_HDIV 0x00000400
+#define __APP_PLL_SCLK_P0_1_MK 0x00000300
+#define __APP_PLL_SCLK_P0_1_SH 8
+#define __APP_PLL_SCLK_P0_1(_v) ((_v) << __APP_PLL_SCLK_P0_1_SH)
+#define __APP_PLL_SCLK_Z0_2_MK 0x000000e0
+#define __APP_PLL_SCLK_Z0_2_SH 5
+#define __APP_PLL_SCLK_Z0_2(_v) ((_v) << __APP_PLL_SCLK_Z0_2_SH)
+#define __APP_PLL_SCLK_RSEL200500 0x00000010
+#define __APP_PLL_SCLK_ENARST 0x00000008
+#define __APP_PLL_SCLK_BYPASS 0x00000004
+#define __APP_PLL_SCLK_LRESETN 0x00000002
+#define __APP_PLL_SCLK_ENABLE 0x00000001
+#define __ENABLE_MAC_AHB_1 0x00800000 /* ct */
+#define __ENABLE_MAC_AHB_0 0x00400000 /* ct */
+#define __ENABLE_MAC_1 0x00200000 /* ct */
+#define __ENABLE_MAC_0 0x00100000 /* ct */
+
+#define HOST_SEM0_REG 0x00014230 /* cb/ct */
+#define HOST_SEM1_REG 0x00014234 /* cb/ct */
+#define HOST_SEM2_REG 0x00014238 /* cb/ct */
+#define HOST_SEM3_REG 0x0001423c /* cb/ct */
+#define HOST_SEM4_REG 0x00014610 /* cb/ct */
+#define HOST_SEM5_REG 0x00014614 /* cb/ct */
+#define HOST_SEM6_REG 0x00014618 /* cb/ct */
+#define HOST_SEM7_REG 0x0001461c /* cb/ct */
+#define HOST_SEM0_INFO_REG 0x00014240 /* cb/ct */
+#define HOST_SEM1_INFO_REG 0x00014244 /* cb/ct */
+#define HOST_SEM2_INFO_REG 0x00014248 /* cb/ct */
+#define HOST_SEM3_INFO_REG 0x0001424c /* cb/ct */
+#define HOST_SEM4_INFO_REG 0x00014620 /* cb/ct */
+#define HOST_SEM5_INFO_REG 0x00014624 /* cb/ct */
+#define HOST_SEM6_INFO_REG 0x00014628 /* cb/ct */
+#define HOST_SEM7_INFO_REG 0x0001462c /* cb/ct */
+
+#define HOSTFN0_LPU0_CMD_STAT 0x00019000 /* cb/ct */
+#define HOSTFN0_LPU1_CMD_STAT 0x00019004 /* cb/ct */
+#define HOSTFN1_LPU0_CMD_STAT 0x00019010 /* cb/ct */
+#define HOSTFN1_LPU1_CMD_STAT 0x00019014 /* cb/ct */
+#define HOSTFN2_LPU0_CMD_STAT 0x00019150 /* ct */
+#define HOSTFN2_LPU1_CMD_STAT 0x00019154 /* ct */
+#define HOSTFN3_LPU0_CMD_STAT 0x00019160 /* ct */
+#define HOSTFN3_LPU1_CMD_STAT 0x00019164 /* ct */
+#define LPU0_HOSTFN0_CMD_STAT 0x00019008 /* cb/ct */
+#define LPU1_HOSTFN0_CMD_STAT 0x0001900c /* cb/ct */
+#define LPU0_HOSTFN1_CMD_STAT 0x00019018 /* cb/ct */
+#define LPU1_HOSTFN1_CMD_STAT 0x0001901c /* cb/ct */
+#define LPU0_HOSTFN2_CMD_STAT 0x00019158 /* ct */
+#define LPU1_HOSTFN2_CMD_STAT 0x0001915c /* ct */
+#define LPU0_HOSTFN3_CMD_STAT 0x00019168 /* ct */
+#define LPU1_HOSTFN3_CMD_STAT 0x0001916c /* ct */
+
+#define PSS_CTL_REG 0x00018800 /* cb/ct */
+#define __PSS_I2C_CLK_DIV_MK 0x007f0000
+#define __PSS_I2C_CLK_DIV_SH 16
+#define __PSS_I2C_CLK_DIV(_v) ((_v) << __PSS_I2C_CLK_DIV_SH)
+#define __PSS_LMEM_INIT_DONE 0x00001000
+#define __PSS_LMEM_RESET 0x00000200
+#define __PSS_LMEM_INIT_EN 0x00000100
+#define __PSS_LPU1_RESET 0x00000002
+#define __PSS_LPU0_RESET 0x00000001
+#define PSS_ERR_STATUS_REG 0x00018810 /* cb/ct */
+#define ERR_SET_REG 0x00018818 /* cb/ct */
+#define PSS_GPIO_OUT_REG 0x000188c0 /* cb/ct */
+#define __PSS_GPIO_OUT_REG 0x00000fff
+#define PSS_GPIO_OE_REG 0x000188c8 /* cb/ct */
+#define __PSS_GPIO_OE_REG 0x000000ff
+
+#define HOSTFN0_LPU_MBOX0_0 0x00019200 /* cb/ct */
+#define HOSTFN1_LPU_MBOX0_8 0x00019260 /* cb/ct */
+#define LPU_HOSTFN0_MBOX0_0 0x00019280 /* cb/ct */
+#define LPU_HOSTFN1_MBOX0_8 0x000192e0 /* cb/ct */
+#define HOSTFN2_LPU_MBOX0_0 0x00019400 /* ct */
+#define HOSTFN3_LPU_MBOX0_8 0x00019460 /* ct */
+#define LPU_HOSTFN2_MBOX0_0 0x00019480 /* ct */
+#define LPU_HOSTFN3_MBOX0_8 0x000194e0 /* ct */
+
+#define HOST_MSIX_ERR_INDEX_FN0 0x0001400c /* ct */
+#define HOST_MSIX_ERR_INDEX_FN1 0x0001410c /* ct */
+#define HOST_MSIX_ERR_INDEX_FN2 0x0001430c /* ct */
+#define HOST_MSIX_ERR_INDEX_FN3 0x0001440c /* ct */
+
+#define MBIST_CTL_REG 0x00014220 /* ct */
+#define __EDRAM_BISTR_START 0x00000004
+#define MBIST_STAT_REG 0x00014224 /* ct */
+#define ETH_MAC_SER_REG 0x00014288 /* ct */
+#define __APP_EMS_CKBUFAMPIN 0x00000020
+#define __APP_EMS_REFCLKSEL 0x00000010
+#define __APP_EMS_CMLCKSEL 0x00000008
+#define __APP_EMS_REFCKBUFEN2 0x00000004
+#define __APP_EMS_REFCKBUFEN1 0x00000002
+#define __APP_EMS_CHANNEL_SEL 0x00000001
+#define FNC_PERS_REG 0x00014604 /* ct */
+#define __F3_FUNCTION_ACTIVE 0x80000000
+#define __F3_FUNCTION_MODE 0x40000000
+#define __F3_PORT_MAP_MK 0x30000000
+#define __F3_PORT_MAP_SH 28
+#define __F3_PORT_MAP(_v) ((_v) << __F3_PORT_MAP_SH)
+#define __F3_VM_MODE 0x08000000
+#define __F3_INTX_STATUS_MK 0x07000000
+#define __F3_INTX_STATUS_SH 24
+#define __F3_INTX_STATUS(_v) ((_v) << __F3_INTX_STATUS_SH)
+#define __F2_FUNCTION_ACTIVE 0x00800000
+#define __F2_FUNCTION_MODE 0x00400000
+#define __F2_PORT_MAP_MK 0x00300000
+#define __F2_PORT_MAP_SH 20
+#define __F2_PORT_MAP(_v) ((_v) << __F2_PORT_MAP_SH)
+#define __F2_VM_MODE 0x00080000
+#define __F2_INTX_STATUS_MK 0x00070000
+#define __F2_INTX_STATUS_SH 16
+#define __F2_INTX_STATUS(_v) ((_v) << __F2_INTX_STATUS_SH)
+#define __F1_FUNCTION_ACTIVE 0x00008000
+#define __F1_FUNCTION_MODE 0x00004000
+#define __F1_PORT_MAP_MK 0x00003000
+#define __F1_PORT_MAP_SH 12
+#define __F1_PORT_MAP(_v) ((_v) << __F1_PORT_MAP_SH)
+#define __F1_VM_MODE 0x00000800
+#define __F1_INTX_STATUS_MK 0x00000700
+#define __F1_INTX_STATUS_SH 8
+#define __F1_INTX_STATUS(_v) ((_v) << __F1_INTX_STATUS_SH)
+#define __F0_FUNCTION_ACTIVE 0x00000080
+#define __F0_FUNCTION_MODE 0x00000040
+#define __F0_PORT_MAP_MK 0x00000030
+#define __F0_PORT_MAP_SH 4
+#define __F0_PORT_MAP(_v) ((_v) << __F0_PORT_MAP_SH)
+#define __F0_VM_MODE 0x00000008
+#define __F0_INTX_STATUS 0x00000007
+enum {
+ __F0_INTX_STATUS_MSIX = 0x0,
+ __F0_INTX_STATUS_INTA = 0x1,
+ __F0_INTX_STATUS_INTB = 0x2,
+ __F0_INTX_STATUS_INTC = 0x3,
+ __F0_INTX_STATUS_INTD = 0x4,
+};
+
+#define OP_MODE 0x0001460c /* ct */
+#define __APP_ETH_CLK_LOWSPEED 0x00000004
+#define __GLOBAL_CORECLK_HALFSPEED 0x00000002
+#define __GLOBAL_FCOE_MODE 0x00000001
+#define FW_INIT_HALT_P0 0x000191ac /* ct */
+#define __FW_INIT_HALT_P 0x00000001
+#define FW_INIT_HALT_P1 0x000191bc /* ct */
+#define PMM_1T_RESET_REG_P0 0x0002381c /* ct */
+#define __PMM_1T_RESET_P 0x00000001
+#define PMM_1T_RESET_REG_P1 0x00023c1c /* ct */
+
+/**
+ * Catapult-2 specific defines
+ */
+#define CT2_PCI_CPQ_BASE 0x00030000
+#define CT2_PCI_APP_BASE 0x00030100
+#define CT2_PCI_ETH_BASE 0x00030400
+
+/*
+ * APP block registers
+ */
+#define CT2_HOSTFN_INT_STATUS (CT2_PCI_APP_BASE + 0x00)
+#define CT2_HOSTFN_INTR_MASK (CT2_PCI_APP_BASE + 0x04)
+#define CT2_HOSTFN_PERSONALITY0 (CT2_PCI_APP_BASE + 0x08)
+#define __PME_STATUS_ 0x00200000
+#define __PF_VF_BAR_SIZE_MODE__MK 0x00180000
+#define __PF_VF_BAR_SIZE_MODE__SH 19
+#define __PF_VF_BAR_SIZE_MODE_(_v) ((_v) << __PF_VF_BAR_SIZE_MODE__SH)
+#define __FC_LL_PORT_MAP__MK 0x00060000
+#define __FC_LL_PORT_MAP__SH 17
+#define __FC_LL_PORT_MAP_(_v) ((_v) << __FC_LL_PORT_MAP__SH)
+#define __PF_VF_ACTIVE_ 0x00010000
+#define __PF_VF_CFG_RDY_ 0x00008000
+#define __PF_VF_ENABLE_ 0x00004000
+#define __PF_DRIVER_ACTIVE_ 0x00002000
+#define __PF_PME_SEND_ENABLE_ 0x00001000
+#define __PF_EXROM_OFFSET__MK 0x00000ff0
+#define __PF_EXROM_OFFSET__SH 4
+#define __PF_EXROM_OFFSET_(_v) ((_v) << __PF_EXROM_OFFSET__SH)
+#define __FC_LL_MODE_ 0x00000008
+#define __PF_INTX_PIN_ 0x00000007
+#define CT2_HOSTFN_PERSONALITY1 (CT2_PCI_APP_BASE + 0x0C)
+#define __PF_NUM_QUEUES1__MK 0xff000000
+#define __PF_NUM_QUEUES1__SH 24
+#define __PF_NUM_QUEUES1_(_v) ((_v) << __PF_NUM_QUEUES1__SH)
+#define __PF_VF_QUE_OFFSET1__MK 0x00ff0000
+#define __PF_VF_QUE_OFFSET1__SH 16
+#define __PF_VF_QUE_OFFSET1_(_v) ((_v) << __PF_VF_QUE_OFFSET1__SH)
+#define __PF_VF_NUM_QUEUES__MK 0x0000ff00
+#define __PF_VF_NUM_QUEUES__SH 8
+#define __PF_VF_NUM_QUEUES_(_v) ((_v) << __PF_VF_NUM_QUEUES__SH)
+#define __PF_VF_QUE_OFFSET_ 0x000000ff
+#define CT2_HOSTFN_PAGE_NUM (CT2_PCI_APP_BASE + 0x18)
+#define CT2_HOSTFN_MSIX_VT_INDEX_MBOX_ERR (CT2_PCI_APP_BASE + 0x38)
+
+/*
+ * Catapult-2 CPQ block registers
+ */
+#define CT2_HOSTFN_LPU0_MBOX0 (CT2_PCI_CPQ_BASE + 0x00)
+#define CT2_HOSTFN_LPU1_MBOX0 (CT2_PCI_CPQ_BASE + 0x20)
+#define CT2_LPU0_HOSTFN_MBOX0 (CT2_PCI_CPQ_BASE + 0x40)
+#define CT2_LPU1_HOSTFN_MBOX0 (CT2_PCI_CPQ_BASE + 0x60)
+#define CT2_HOSTFN_LPU0_CMD_STAT (CT2_PCI_CPQ_BASE + 0x80)
+#define CT2_HOSTFN_LPU1_CMD_STAT (CT2_PCI_CPQ_BASE + 0x84)
+#define CT2_LPU0_HOSTFN_CMD_STAT (CT2_PCI_CPQ_BASE + 0x88)
+#define CT2_LPU1_HOSTFN_CMD_STAT (CT2_PCI_CPQ_BASE + 0x8c)
+#define CT2_HOSTFN_LPU0_READ_STAT (CT2_PCI_CPQ_BASE + 0x90)
+#define CT2_HOSTFN_LPU1_READ_STAT (CT2_PCI_CPQ_BASE + 0x94)
+#define CT2_LPU0_HOSTFN_MBOX0_MSK (CT2_PCI_CPQ_BASE + 0x98)
+#define CT2_LPU1_HOSTFN_MBOX0_MSK (CT2_PCI_CPQ_BASE + 0x9C)
+#define CT2_HOST_SEM0_REG 0x000148f0
+#define CT2_HOST_SEM1_REG 0x000148f4
+#define CT2_HOST_SEM2_REG 0x000148f8
+#define CT2_HOST_SEM3_REG 0x000148fc
+#define CT2_HOST_SEM4_REG 0x00014900
+#define CT2_HOST_SEM5_REG 0x00014904
+#define CT2_HOST_SEM6_REG 0x00014908
+#define CT2_HOST_SEM7_REG 0x0001490c
+#define CT2_HOST_SEM0_INFO_REG 0x000148b0
+#define CT2_HOST_SEM1_INFO_REG 0x000148b4
+#define CT2_HOST_SEM2_INFO_REG 0x000148b8
+#define CT2_HOST_SEM3_INFO_REG 0x000148bc
+#define CT2_HOST_SEM4_INFO_REG 0x000148c0
+#define CT2_HOST_SEM5_INFO_REG 0x000148c4
+#define CT2_HOST_SEM6_INFO_REG 0x000148c8
+#define CT2_HOST_SEM7_INFO_REG 0x000148cc
+
+#define CT2_APP_PLL_LCLK_CTL_REG 0x00014808
+#define __APP_LPUCLK_HALFSPEED 0x40000000
+#define __APP_PLL_LCLK_LOAD 0x20000000
+#define __APP_PLL_LCLK_FBCNT_MK 0x1fe00000
+#define __APP_PLL_LCLK_FBCNT_SH 21
+#define __APP_PLL_LCLK_FBCNT(_v) ((_v) << __APP_PLL_SCLK_FBCNT_SH)
+enum {
+ __APP_PLL_LCLK_FBCNT_425_MHZ = 6,
+ __APP_PLL_LCLK_FBCNT_468_MHZ = 4,
+};
+#define __APP_PLL_LCLK_EXTFB 0x00000800
+#define __APP_PLL_LCLK_ENOUTS 0x00000400
+#define __APP_PLL_LCLK_RATE 0x00000010
+#define CT2_APP_PLL_SCLK_CTL_REG 0x0001480c
+#define __P_SCLK_PLL_LOCK 0x80000000
+#define __APP_PLL_SCLK_REFCLK_SEL 0x40000000
+#define __APP_PLL_SCLK_CLK_DIV2 0x20000000
+#define __APP_PLL_SCLK_LOAD 0x10000000
+#define __APP_PLL_SCLK_FBCNT_MK 0x0ff00000
+#define __APP_PLL_SCLK_FBCNT_SH 20
+#define __APP_PLL_SCLK_FBCNT(_v) ((_v) << __APP_PLL_SCLK_FBCNT_SH)
+enum {
+ __APP_PLL_SCLK_FBCNT_NORM = 6,
+ __APP_PLL_SCLK_FBCNT_10G_FC = 10,
+};
+#define __APP_PLL_SCLK_EXTFB 0x00000800
+#define __APP_PLL_SCLK_ENOUTS 0x00000400
+#define __APP_PLL_SCLK_RATE 0x00000010
+#define CT2_PCIE_MISC_REG 0x00014804
+#define __ETH_CLK_ENABLE_PORT1 0x00000010
+#define CT2_CHIP_MISC_PRG 0x000148a4
+#define __ETH_CLK_ENABLE_PORT0 0x00004000
+#define __APP_LPU_SPEED 0x00000002
+#define CT2_MBIST_STAT_REG 0x00014818
+#define CT2_MBIST_CTL_REG 0x0001481c
+#define CT2_PMM_1T_CONTROL_REG_P0 0x0002381c
+#define __PMM_1T_PNDB_P 0x00000002
+#define CT2_PMM_1T_CONTROL_REG_P1 0x00023c1c
+#define CT2_WGN_STATUS 0x00014990
+#define __A2T_AHB_LOAD 0x00000800
+#define __WGN_READY 0x00000400
+#define __GLBL_PF_VF_CFG_RDY 0x00000200
+#define CT2_NFC_STS_REG 0x00027410
+#define CT2_NFC_CSR_CLR_REG 0x00027420
+#define CT2_NFC_CSR_SET_REG 0x00027424
+#define __HALT_NFC_CONTROLLER 0x00000002
+#define __NFC_CONTROLLER_HALTED 0x00001000
+#define CT2_RSC_GPR15_REG 0x0002765c
+#define CT2_CSI_FW_CTL_REG 0x00027080
+#define CT2_CSI_FW_CTL_SET_REG 0x00027088
+#define __RESET_AND_START_SCLK_LCLK_PLLS 0x00010000
+
+#define CT2_CSI_MAC0_CONTROL_REG 0x000270d0
+#define __CSI_MAC_RESET 0x00000010
+#define __CSI_MAC_AHB_RESET 0x00000008
+#define CT2_CSI_MAC1_CONTROL_REG 0x000270d4
+#define CT2_CSI_MAC_CONTROL_REG(__n) \
+ (CT2_CSI_MAC0_CONTROL_REG + \
+ (__n) * (CT2_CSI_MAC1_CONTROL_REG - CT2_CSI_MAC0_CONTROL_REG))
+
+#define CT2_NFC_FLASH_STS_REG 0x00014834
+#define __FLASH_PLL_INIT_AND_RESET_IN_PROGRESS 0x00000020
+/*
+ * Name semaphore registers based on usage
+ */
+#define BFA_IOC0_HBEAT_REG HOST_SEM0_INFO_REG
+#define BFA_IOC0_STATE_REG HOST_SEM1_INFO_REG
+#define BFA_IOC1_HBEAT_REG HOST_SEM2_INFO_REG
+#define BFA_IOC1_STATE_REG HOST_SEM3_INFO_REG
+#define BFA_FW_USE_COUNT HOST_SEM4_INFO_REG
+#define BFA_IOC_FAIL_SYNC HOST_SEM5_INFO_REG
+
+/*
+ * CT2 semaphore register locations changed
+ */
+#define CT2_BFA_IOC0_HBEAT_REG CT2_HOST_SEM0_INFO_REG
+#define CT2_BFA_IOC0_STATE_REG CT2_HOST_SEM1_INFO_REG
+#define CT2_BFA_IOC1_HBEAT_REG CT2_HOST_SEM2_INFO_REG
+#define CT2_BFA_IOC1_STATE_REG CT2_HOST_SEM3_INFO_REG
+#define CT2_BFA_FW_USE_COUNT CT2_HOST_SEM4_INFO_REG
+#define CT2_BFA_IOC_FAIL_SYNC CT2_HOST_SEM5_INFO_REG
+
+#define CPE_Q_NUM(__fn, __q) (((__fn) << 2) + (__q))
+#define RME_Q_NUM(__fn, __q) (((__fn) << 2) + (__q))
+
+/*
+ * And corresponding host interrupt status bit field defines
+ */
+#define __HFN_INT_CPE_Q0 0x00000001U
+#define __HFN_INT_CPE_Q1 0x00000002U
+#define __HFN_INT_CPE_Q2 0x00000004U
+#define __HFN_INT_CPE_Q3 0x00000008U
+#define __HFN_INT_CPE_Q4 0x00000010U
+#define __HFN_INT_CPE_Q5 0x00000020U
+#define __HFN_INT_CPE_Q6 0x00000040U
+#define __HFN_INT_CPE_Q7 0x00000080U
+#define __HFN_INT_RME_Q0 0x00000100U
+#define __HFN_INT_RME_Q1 0x00000200U
+#define __HFN_INT_RME_Q2 0x00000400U
+#define __HFN_INT_RME_Q3 0x00000800U
+#define __HFN_INT_RME_Q4 0x00001000U
+#define __HFN_INT_RME_Q5 0x00002000U
+#define __HFN_INT_RME_Q6 0x00004000U
+#define __HFN_INT_RME_Q7 0x00008000U
+#define __HFN_INT_ERR_EMC 0x00010000U
+#define __HFN_INT_ERR_LPU0 0x00020000U
+#define __HFN_INT_ERR_LPU1 0x00040000U
+#define __HFN_INT_ERR_PSS 0x00080000U
+#define __HFN_INT_MBOX_LPU0 0x00100000U
+#define __HFN_INT_MBOX_LPU1 0x00200000U
+#define __HFN_INT_MBOX1_LPU0 0x00400000U
+#define __HFN_INT_MBOX1_LPU1 0x00800000U
+#define __HFN_INT_LL_HALT 0x01000000U
+#define __HFN_INT_CPE_MASK 0x000000ffU
+#define __HFN_INT_RME_MASK 0x0000ff00U
+#define __HFN_INT_ERR_MASK \
+ (__HFN_INT_ERR_EMC | __HFN_INT_ERR_LPU0 | __HFN_INT_ERR_LPU1 | \
+ __HFN_INT_ERR_PSS | __HFN_INT_LL_HALT)
+#define __HFN_INT_FN0_MASK \
+ (__HFN_INT_CPE_Q0 | __HFN_INT_CPE_Q1 | __HFN_INT_CPE_Q2 | \
+ __HFN_INT_CPE_Q3 | __HFN_INT_RME_Q0 | __HFN_INT_RME_Q1 | \
+ __HFN_INT_RME_Q2 | __HFN_INT_RME_Q3 | __HFN_INT_MBOX_LPU0)
+#define __HFN_INT_FN1_MASK \
+ (__HFN_INT_CPE_Q4 | __HFN_INT_CPE_Q5 | __HFN_INT_CPE_Q6 | \
+ __HFN_INT_CPE_Q7 | __HFN_INT_RME_Q4 | __HFN_INT_RME_Q5 | \
+ __HFN_INT_RME_Q6 | __HFN_INT_RME_Q7 | __HFN_INT_MBOX_LPU1)
+
+/*
+ * Host interrupt status defines for catapult-2
+ */
+#define __HFN_INT_MBOX_LPU0_CT2 0x00010000U
+#define __HFN_INT_MBOX_LPU1_CT2 0x00020000U
+#define __HFN_INT_ERR_PSS_CT2 0x00040000U
+#define __HFN_INT_ERR_LPU0_CT2 0x00080000U
+#define __HFN_INT_ERR_LPU1_CT2 0x00100000U
+#define __HFN_INT_CPQ_HALT_CT2 0x00200000U
+#define __HFN_INT_ERR_WGN_CT2 0x00400000U
+#define __HFN_INT_ERR_LEHRX_CT2 0x00800000U
+#define __HFN_INT_ERR_LEHTX_CT2 0x01000000U
+#define __HFN_INT_ERR_MASK_CT2 \
+ (__HFN_INT_ERR_PSS_CT2 | __HFN_INT_ERR_LPU0_CT2 | \
+ __HFN_INT_ERR_LPU1_CT2 | __HFN_INT_CPQ_HALT_CT2 | \
+ __HFN_INT_ERR_WGN_CT2 | __HFN_INT_ERR_LEHRX_CT2 | \
+ __HFN_INT_ERR_LEHTX_CT2)
+#define __HFN_INT_FN0_MASK_CT2 \
+ (__HFN_INT_CPE_Q0 | __HFN_INT_CPE_Q1 | __HFN_INT_CPE_Q2 | \
+ __HFN_INT_CPE_Q3 | __HFN_INT_RME_Q0 | __HFN_INT_RME_Q1 | \
+ __HFN_INT_RME_Q2 | __HFN_INT_RME_Q3 | __HFN_INT_MBOX_LPU0_CT2)
+#define __HFN_INT_FN1_MASK_CT2 \
+ (__HFN_INT_CPE_Q4 | __HFN_INT_CPE_Q5 | __HFN_INT_CPE_Q6 | \
+ __HFN_INT_CPE_Q7 | __HFN_INT_RME_Q4 | __HFN_INT_RME_Q5 | \
+ __HFN_INT_RME_Q6 | __HFN_INT_RME_Q7 | __HFN_INT_MBOX_LPU1_CT2)
+
+/*
+ * asic memory map.
+ */
+#define PSS_SMEM_PAGE_START 0x8000
+#define PSS_SMEM_PGNUM(_pg0, _ma) ((_pg0) + ((_ma) >> 15))
+#define PSS_SMEM_PGOFF(_ma) ((_ma) & 0x7fff)
+
+#endif /* __BFI_REG_H__ */
diff --git a/drivers/scsi/bnx2fc/57xx_hsi_bnx2fc.h b/drivers/scsi/bnx2fc/57xx_hsi_bnx2fc.h
new file mode 100644
index 000000000..fe2106c91
--- /dev/null
+++ b/drivers/scsi/bnx2fc/57xx_hsi_bnx2fc.h
@@ -0,0 +1,1003 @@
+/* 57xx_hsi_bnx2fc.h: QLogic NetXtreme II Linux FCoE offload driver.
+ * Handles operations such as session offload/upload etc, and manages
+ * session resources such as connection id and qp resources.
+ *
+ * Copyright (c) 2008 - 2013 Broadcom Corporation
+ * Copyright (c) 2014, QLogic Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation.
+ *
+ */
+
+#ifndef __57XX_FCOE_HSI_LINUX_LE__
+#define __57XX_FCOE_HSI_LINUX_LE__
+
+/*
+ * common data for all protocols
+ */
+struct b577xx_doorbell_hdr {
+ u8 header;
+#define B577XX_DOORBELL_HDR_RX (0x1<<0)
+#define B577XX_DOORBELL_HDR_RX_SHIFT 0
+#define B577XX_DOORBELL_HDR_DB_TYPE (0x1<<1)
+#define B577XX_DOORBELL_HDR_DB_TYPE_SHIFT 1
+#define B577XX_DOORBELL_HDR_DPM_SIZE (0x3<<2)
+#define B577XX_DOORBELL_HDR_DPM_SIZE_SHIFT 2
+#define B577XX_DOORBELL_HDR_CONN_TYPE (0xF<<4)
+#define B577XX_DOORBELL_HDR_CONN_TYPE_SHIFT 4
+};
+
+/*
+ * doorbell message sent to the chip
+ */
+struct b577xx_doorbell {
+#if defined(__BIG_ENDIAN)
+ u16 zero_fill2;
+ u8 zero_fill1;
+ struct b577xx_doorbell_hdr header;
+#elif defined(__LITTLE_ENDIAN)
+ struct b577xx_doorbell_hdr header;
+ u8 zero_fill1;
+ u16 zero_fill2;
+#endif
+};
+
+
+
+/*
+ * doorbell message sent to the chip
+ */
+struct b577xx_doorbell_set_prod {
+#if defined(__BIG_ENDIAN)
+ u16 prod;
+ u8 zero_fill1;
+ struct b577xx_doorbell_hdr header;
+#elif defined(__LITTLE_ENDIAN)
+ struct b577xx_doorbell_hdr header;
+ u8 zero_fill1;
+ u16 prod;
+#endif
+};
+
+
+struct regpair {
+ __le32 lo;
+ __le32 hi;
+};
+
+
+/*
+ * ABTS info $$KEEP_ENDIANNESS$$
+ */
+struct fcoe_abts_info {
+ __le16 aborted_task_id;
+ __le16 reserved0;
+ __le32 reserved1;
+};
+
+
+/*
+ * Fixed size structure in order to plant it in Union structure
+ * $$KEEP_ENDIANNESS$$
+ */
+struct fcoe_abts_rsp_union {
+ u8 r_ctl;
+ u8 rsrv[3];
+ __le32 abts_rsp_payload[7];
+};
+
+
+/*
+ * 4 regs size $$KEEP_ENDIANNESS$$
+ */
+struct fcoe_bd_ctx {
+ __le32 buf_addr_hi;
+ __le32 buf_addr_lo;
+ __le16 buf_len;
+ __le16 rsrv0;
+ __le16 flags;
+ __le16 rsrv1;
+};
+
+
+/*
+ * FCoE cached sges context $$KEEP_ENDIANNESS$$
+ */
+struct fcoe_cached_sge_ctx {
+ struct regpair cur_buf_addr;
+ __le16 cur_buf_rem;
+ __le16 second_buf_rem;
+ struct regpair second_buf_addr;
+};
+
+
+/*
+ * Cleanup info $$KEEP_ENDIANNESS$$
+ */
+struct fcoe_cleanup_info {
+ __le16 cleaned_task_id;
+ __le16 rolled_tx_seq_cnt;
+ __le32 rolled_tx_data_offset;
+};
+
+
+/*
+ * Fcp RSP flags $$KEEP_ENDIANNESS$$
+ */
+struct fcoe_fcp_rsp_flags {
+ u8 flags;
+#define FCOE_FCP_RSP_FLAGS_FCP_RSP_LEN_VALID (0x1<<0)
+#define FCOE_FCP_RSP_FLAGS_FCP_RSP_LEN_VALID_SHIFT 0
+#define FCOE_FCP_RSP_FLAGS_FCP_SNS_LEN_VALID (0x1<<1)
+#define FCOE_FCP_RSP_FLAGS_FCP_SNS_LEN_VALID_SHIFT 1
+#define FCOE_FCP_RSP_FLAGS_FCP_RESID_OVER (0x1<<2)
+#define FCOE_FCP_RSP_FLAGS_FCP_RESID_OVER_SHIFT 2
+#define FCOE_FCP_RSP_FLAGS_FCP_RESID_UNDER (0x1<<3)
+#define FCOE_FCP_RSP_FLAGS_FCP_RESID_UNDER_SHIFT 3
+#define FCOE_FCP_RSP_FLAGS_FCP_CONF_REQ (0x1<<4)
+#define FCOE_FCP_RSP_FLAGS_FCP_CONF_REQ_SHIFT 4
+#define FCOE_FCP_RSP_FLAGS_FCP_BIDI_FLAGS (0x7<<5)
+#define FCOE_FCP_RSP_FLAGS_FCP_BIDI_FLAGS_SHIFT 5
+};
+
+/*
+ * Fcp RSP payload $$KEEP_ENDIANNESS$$
+ */
+struct fcoe_fcp_rsp_payload {
+ struct regpair reserved0;
+ __le32 fcp_resid;
+ u8 scsi_status_code;
+ struct fcoe_fcp_rsp_flags fcp_flags;
+ __le16 retry_delay_timer;
+ __le32 fcp_rsp_len;
+ __le32 fcp_sns_len;
+};
+
+/*
+ * Fixed size structure in order to plant it in Union structure
+ * $$KEEP_ENDIANNESS$$
+ */
+struct fcoe_fcp_rsp_union {
+ struct fcoe_fcp_rsp_payload payload;
+ struct regpair reserved0;
+};
+
+/*
+ * FC header $$KEEP_ENDIANNESS$$
+ */
+struct fcoe_fc_hdr {
+ u8 s_id[3];
+ u8 cs_ctl;
+ u8 d_id[3];
+ u8 r_ctl;
+ __le16 seq_cnt;
+ u8 df_ctl;
+ u8 seq_id;
+ u8 f_ctl[3];
+ u8 type;
+ __le32 parameters;
+ __le16 rx_id;
+ __le16 ox_id;
+};
+
+/*
+ * FC header union $$KEEP_ENDIANNESS$$
+ */
+struct fcoe_mp_rsp_union {
+ struct fcoe_fc_hdr fc_hdr;
+ __le32 mp_payload_len;
+ __le32 rsrv;
+};
+
+/*
+ * Completion information $$KEEP_ENDIANNESS$$
+ */
+union fcoe_comp_flow_info {
+ struct fcoe_fcp_rsp_union fcp_rsp;
+ struct fcoe_abts_rsp_union abts_rsp;
+ struct fcoe_mp_rsp_union mp_rsp;
+ __le32 opaque[8];
+};
+
+
+/*
+ * External ABTS info $$KEEP_ENDIANNESS$$
+ */
+struct fcoe_ext_abts_info {
+ __le32 rsrv0[6];
+ struct fcoe_abts_info ctx;
+};
+
+
+/*
+ * External cleanup info $$KEEP_ENDIANNESS$$
+ */
+struct fcoe_ext_cleanup_info {
+ __le32 rsrv0[6];
+ struct fcoe_cleanup_info ctx;
+};
+
+
+/*
+ * Fcoe FW Tx sequence context $$KEEP_ENDIANNESS$$
+ */
+struct fcoe_fw_tx_seq_ctx {
+ __le32 data_offset;
+ __le16 seq_cnt;
+ __le16 rsrv0;
+};
+
+/*
+ * Fcoe external FW Tx sequence context $$KEEP_ENDIANNESS$$
+ */
+struct fcoe_ext_fw_tx_seq_ctx {
+ __le32 rsrv0[6];
+ struct fcoe_fw_tx_seq_ctx ctx;
+};
+
+
+/*
+ * FCoE multiple sges context $$KEEP_ENDIANNESS$$
+ */
+struct fcoe_mul_sges_ctx {
+ struct regpair cur_sge_addr;
+ __le16 cur_sge_off;
+ u8 cur_sge_idx;
+ u8 sgl_size;
+};
+
+/*
+ * FCoE external multiple sges context $$KEEP_ENDIANNESS$$
+ */
+struct fcoe_ext_mul_sges_ctx {
+ struct fcoe_mul_sges_ctx mul_sgl;
+ struct regpair rsrv0;
+};
+
+
+/*
+ * FCP CMD payload $$KEEP_ENDIANNESS$$
+ */
+struct fcoe_fcp_cmd_payload {
+ __le32 opaque[8];
+};
+
+
+
+
+
+/*
+ * Fcp xfr rdy payload $$KEEP_ENDIANNESS$$
+ */
+struct fcoe_fcp_xfr_rdy_payload {
+ __le32 burst_len;
+ __le32 data_ro;
+};
+
+
+/*
+ * FC frame $$KEEP_ENDIANNESS$$
+ */
+struct fcoe_fc_frame {
+ struct fcoe_fc_hdr fc_hdr;
+ __le32 reserved0[2];
+};
+
+
+
+
+/*
+ * FCoE KCQ CQE parameters $$KEEP_ENDIANNESS$$
+ */
+union fcoe_kcqe_params {
+ __le32 reserved0[4];
+};
+
+/*
+ * FCoE KCQ CQE $$KEEP_ENDIANNESS$$
+ */
+struct fcoe_kcqe {
+ __le32 fcoe_conn_id;
+ __le32 completion_status;
+ __le32 fcoe_conn_context_id;
+ union fcoe_kcqe_params params;
+ __le16 qe_self_seq;
+ u8 op_code;
+ u8 flags;
+#define FCOE_KCQE_RESERVED0 (0x7<<0)
+#define FCOE_KCQE_RESERVED0_SHIFT 0
+#define FCOE_KCQE_RAMROD_COMPLETION (0x1<<3)
+#define FCOE_KCQE_RAMROD_COMPLETION_SHIFT 3
+#define FCOE_KCQE_LAYER_CODE (0x7<<4)
+#define FCOE_KCQE_LAYER_CODE_SHIFT 4
+#define FCOE_KCQE_LINKED_WITH_NEXT (0x1<<7)
+#define FCOE_KCQE_LINKED_WITH_NEXT_SHIFT 7
+};
+
+
+
+/*
+ * FCoE KWQE header $$KEEP_ENDIANNESS$$
+ */
+struct fcoe_kwqe_header {
+ u8 op_code;
+ u8 flags;
+#define FCOE_KWQE_HEADER_RESERVED0 (0xF<<0)
+#define FCOE_KWQE_HEADER_RESERVED0_SHIFT 0
+#define FCOE_KWQE_HEADER_LAYER_CODE (0x7<<4)
+#define FCOE_KWQE_HEADER_LAYER_CODE_SHIFT 4
+#define FCOE_KWQE_HEADER_RESERVED1 (0x1<<7)
+#define FCOE_KWQE_HEADER_RESERVED1_SHIFT 7
+};
+
+/*
+ * FCoE firmware init request 1 $$KEEP_ENDIANNESS$$
+ */
+struct fcoe_kwqe_init1 {
+ __le16 num_tasks;
+ struct fcoe_kwqe_header hdr;
+ __le32 task_list_pbl_addr_lo;
+ __le32 task_list_pbl_addr_hi;
+ __le32 dummy_buffer_addr_lo;
+ __le32 dummy_buffer_addr_hi;
+ __le16 sq_num_wqes;
+ __le16 rq_num_wqes;
+ __le16 rq_buffer_log_size;
+ __le16 cq_num_wqes;
+ __le16 mtu;
+ u8 num_sessions_log;
+ u8 flags;
+#define FCOE_KWQE_INIT1_LOG_PAGE_SIZE (0xF<<0)
+#define FCOE_KWQE_INIT1_LOG_PAGE_SIZE_SHIFT 0
+#define FCOE_KWQE_INIT1_LOG_CACHED_PBES_PER_FUNC (0x7<<4)
+#define FCOE_KWQE_INIT1_LOG_CACHED_PBES_PER_FUNC_SHIFT 4
+#define FCOE_KWQE_INIT1_RESERVED1 (0x1<<7)
+#define FCOE_KWQE_INIT1_RESERVED1_SHIFT 7
+};
+
+/*
+ * FCoE firmware init request 2 $$KEEP_ENDIANNESS$$
+ */
+struct fcoe_kwqe_init2 {
+ u8 hsi_major_version;
+ u8 hsi_minor_version;
+ struct fcoe_kwqe_header hdr;
+ __le32 hash_tbl_pbl_addr_lo;
+ __le32 hash_tbl_pbl_addr_hi;
+ __le32 t2_hash_tbl_addr_lo;
+ __le32 t2_hash_tbl_addr_hi;
+ __le32 t2_ptr_hash_tbl_addr_lo;
+ __le32 t2_ptr_hash_tbl_addr_hi;
+ __le32 free_list_count;
+};
+
+/*
+ * FCoE firmware init request 3 $$KEEP_ENDIANNESS$$
+ */
+struct fcoe_kwqe_init3 {
+ __le16 reserved0;
+ struct fcoe_kwqe_header hdr;
+ __le32 error_bit_map_lo;
+ __le32 error_bit_map_hi;
+ u8 perf_config;
+ u8 reserved21[3];
+ __le32 reserved2[4];
+};
+
+/*
+ * FCoE connection offload request 1 $$KEEP_ENDIANNESS$$
+ */
+struct fcoe_kwqe_conn_offload1 {
+ __le16 fcoe_conn_id;
+ struct fcoe_kwqe_header hdr;
+ __le32 sq_addr_lo;
+ __le32 sq_addr_hi;
+ __le32 rq_pbl_addr_lo;
+ __le32 rq_pbl_addr_hi;
+ __le32 rq_first_pbe_addr_lo;
+ __le32 rq_first_pbe_addr_hi;
+ __le16 rq_prod;
+ __le16 reserved0;
+};
+
+/*
+ * FCoE connection offload request 2 $$KEEP_ENDIANNESS$$
+ */
+struct fcoe_kwqe_conn_offload2 {
+ __le16 tx_max_fc_pay_len;
+ struct fcoe_kwqe_header hdr;
+ __le32 cq_addr_lo;
+ __le32 cq_addr_hi;
+ __le32 xferq_addr_lo;
+ __le32 xferq_addr_hi;
+ __le32 conn_db_addr_lo;
+ __le32 conn_db_addr_hi;
+ __le32 reserved1;
+};
+
+/*
+ * FCoE connection offload request 3 $$KEEP_ENDIANNESS$$
+ */
+struct fcoe_kwqe_conn_offload3 {
+ __le16 vlan_tag;
+#define FCOE_KWQE_CONN_OFFLOAD3_VLAN_ID (0xFFF<<0)
+#define FCOE_KWQE_CONN_OFFLOAD3_VLAN_ID_SHIFT 0
+#define FCOE_KWQE_CONN_OFFLOAD3_CFI (0x1<<12)
+#define FCOE_KWQE_CONN_OFFLOAD3_CFI_SHIFT 12
+#define FCOE_KWQE_CONN_OFFLOAD3_PRIORITY (0x7<<13)
+#define FCOE_KWQE_CONN_OFFLOAD3_PRIORITY_SHIFT 13
+ struct fcoe_kwqe_header hdr;
+ u8 s_id[3];
+ u8 tx_max_conc_seqs_c3;
+ u8 d_id[3];
+ u8 flags;
+#define FCOE_KWQE_CONN_OFFLOAD3_B_MUL_N_PORT_IDS (0x1<<0)
+#define FCOE_KWQE_CONN_OFFLOAD3_B_MUL_N_PORT_IDS_SHIFT 0
+#define FCOE_KWQE_CONN_OFFLOAD3_B_E_D_TOV_RES (0x1<<1)
+#define FCOE_KWQE_CONN_OFFLOAD3_B_E_D_TOV_RES_SHIFT 1
+#define FCOE_KWQE_CONN_OFFLOAD3_B_CONT_INCR_SEQ_CNT (0x1<<2)
+#define FCOE_KWQE_CONN_OFFLOAD3_B_CONT_INCR_SEQ_CNT_SHIFT 2
+#define FCOE_KWQE_CONN_OFFLOAD3_B_CONF_REQ (0x1<<3)
+#define FCOE_KWQE_CONN_OFFLOAD3_B_CONF_REQ_SHIFT 3
+#define FCOE_KWQE_CONN_OFFLOAD3_B_REC_VALID (0x1<<4)
+#define FCOE_KWQE_CONN_OFFLOAD3_B_REC_VALID_SHIFT 4
+#define FCOE_KWQE_CONN_OFFLOAD3_B_C2_VALID (0x1<<5)
+#define FCOE_KWQE_CONN_OFFLOAD3_B_C2_VALID_SHIFT 5
+#define FCOE_KWQE_CONN_OFFLOAD3_B_ACK_0 (0x1<<6)
+#define FCOE_KWQE_CONN_OFFLOAD3_B_ACK_0_SHIFT 6
+#define FCOE_KWQE_CONN_OFFLOAD3_B_VLAN_FLAG (0x1<<7)
+#define FCOE_KWQE_CONN_OFFLOAD3_B_VLAN_FLAG_SHIFT 7
+ __le32 reserved;
+ __le32 confq_first_pbe_addr_lo;
+ __le32 confq_first_pbe_addr_hi;
+ __le16 tx_total_conc_seqs;
+ __le16 rx_max_fc_pay_len;
+ __le16 rx_total_conc_seqs;
+ u8 rx_max_conc_seqs_c3;
+ u8 rx_open_seqs_exch_c3;
+};
+
+/*
+ * FCoE connection offload request 4 $$KEEP_ENDIANNESS$$
+ */
+struct fcoe_kwqe_conn_offload4 {
+ u8 e_d_tov_timer_val;
+ u8 reserved2;
+ struct fcoe_kwqe_header hdr;
+ u8 src_mac_addr_lo[2];
+ u8 src_mac_addr_mid[2];
+ u8 src_mac_addr_hi[2];
+ u8 dst_mac_addr_hi[2];
+ u8 dst_mac_addr_lo[2];
+ u8 dst_mac_addr_mid[2];
+ __le32 lcq_addr_lo;
+ __le32 lcq_addr_hi;
+ __le32 confq_pbl_base_addr_lo;
+ __le32 confq_pbl_base_addr_hi;
+};
+
+/*
+ * FCoE connection enable request $$KEEP_ENDIANNESS$$
+ */
+struct fcoe_kwqe_conn_enable_disable {
+ __le16 reserved0;
+ struct fcoe_kwqe_header hdr;
+ u8 src_mac_addr_lo[2];
+ u8 src_mac_addr_mid[2];
+ u8 src_mac_addr_hi[2];
+ u16 vlan_tag;
+#define FCOE_KWQE_CONN_ENABLE_DISABLE_VLAN_ID (0xFFF<<0)
+#define FCOE_KWQE_CONN_ENABLE_DISABLE_VLAN_ID_SHIFT 0
+#define FCOE_KWQE_CONN_ENABLE_DISABLE_CFI (0x1<<12)
+#define FCOE_KWQE_CONN_ENABLE_DISABLE_CFI_SHIFT 12
+#define FCOE_KWQE_CONN_ENABLE_DISABLE_PRIORITY (0x7<<13)
+#define FCOE_KWQE_CONN_ENABLE_DISABLE_PRIORITY_SHIFT 13
+ u8 dst_mac_addr_lo[2];
+ u8 dst_mac_addr_mid[2];
+ u8 dst_mac_addr_hi[2];
+ __le16 reserved1;
+ u8 s_id[3];
+ u8 vlan_flag;
+ u8 d_id[3];
+ u8 reserved3;
+ __le32 context_id;
+ __le32 conn_id;
+ __le32 reserved4;
+};
+
+/*
+ * FCoE connection destroy request $$KEEP_ENDIANNESS$$
+ */
+struct fcoe_kwqe_conn_destroy {
+ __le16 reserved0;
+ struct fcoe_kwqe_header hdr;
+ __le32 context_id;
+ __le32 conn_id;
+ __le32 reserved1[5];
+};
+
+/*
+ * FCoe destroy request $$KEEP_ENDIANNESS$$
+ */
+struct fcoe_kwqe_destroy {
+ __le16 reserved0;
+ struct fcoe_kwqe_header hdr;
+ __le32 reserved1[7];
+};
+
+/*
+ * FCoe statistics request $$KEEP_ENDIANNESS$$
+ */
+struct fcoe_kwqe_stat {
+ __le16 reserved0;
+ struct fcoe_kwqe_header hdr;
+ __le32 stat_params_addr_lo;
+ __le32 stat_params_addr_hi;
+ __le32 reserved1[5];
+};
+
+/*
+ * FCoE KWQ WQE $$KEEP_ENDIANNESS$$
+ */
+union fcoe_kwqe {
+ struct fcoe_kwqe_init1 init1;
+ struct fcoe_kwqe_init2 init2;
+ struct fcoe_kwqe_init3 init3;
+ struct fcoe_kwqe_conn_offload1 conn_offload1;
+ struct fcoe_kwqe_conn_offload2 conn_offload2;
+ struct fcoe_kwqe_conn_offload3 conn_offload3;
+ struct fcoe_kwqe_conn_offload4 conn_offload4;
+ struct fcoe_kwqe_conn_enable_disable conn_enable_disable;
+ struct fcoe_kwqe_conn_destroy conn_destroy;
+ struct fcoe_kwqe_destroy destroy;
+ struct fcoe_kwqe_stat statistics;
+};
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+/*
+ * TX SGL context $$KEEP_ENDIANNESS$$
+ */
+union fcoe_sgl_union_ctx {
+ struct fcoe_cached_sge_ctx cached_sge;
+ struct fcoe_ext_mul_sges_ctx sgl;
+ __le32 opaque[5];
+};
+
+/*
+ * Data-In/ELS/BLS information $$KEEP_ENDIANNESS$$
+ */
+struct fcoe_read_flow_info {
+ union fcoe_sgl_union_ctx sgl_ctx;
+ __le32 rsrv0[3];
+};
+
+
+/*
+ * Fcoe stat context $$KEEP_ENDIANNESS$$
+ */
+struct fcoe_s_stat_ctx {
+ u8 flags;
+#define FCOE_S_STAT_CTX_ACTIVE (0x1<<0)
+#define FCOE_S_STAT_CTX_ACTIVE_SHIFT 0
+#define FCOE_S_STAT_CTX_ACK_ABORT_SEQ_COND (0x1<<1)
+#define FCOE_S_STAT_CTX_ACK_ABORT_SEQ_COND_SHIFT 1
+#define FCOE_S_STAT_CTX_ABTS_PERFORMED (0x1<<2)
+#define FCOE_S_STAT_CTX_ABTS_PERFORMED_SHIFT 2
+#define FCOE_S_STAT_CTX_SEQ_TIMEOUT (0x1<<3)
+#define FCOE_S_STAT_CTX_SEQ_TIMEOUT_SHIFT 3
+#define FCOE_S_STAT_CTX_P_RJT (0x1<<4)
+#define FCOE_S_STAT_CTX_P_RJT_SHIFT 4
+#define FCOE_S_STAT_CTX_ACK_EOFT (0x1<<5)
+#define FCOE_S_STAT_CTX_ACK_EOFT_SHIFT 5
+#define FCOE_S_STAT_CTX_RSRV1 (0x3<<6)
+#define FCOE_S_STAT_CTX_RSRV1_SHIFT 6
+};
+
+/*
+ * Fcoe rx seq context $$KEEP_ENDIANNESS$$
+ */
+struct fcoe_rx_seq_ctx {
+ u8 seq_id;
+ struct fcoe_s_stat_ctx s_stat;
+ __le16 seq_cnt;
+ __le32 low_exp_ro;
+ __le32 high_exp_ro;
+};
+
+
+/*
+ * Fcoe rx_wr union context $$KEEP_ENDIANNESS$$
+ */
+union fcoe_rx_wr_union_ctx {
+ struct fcoe_read_flow_info read_info;
+ union fcoe_comp_flow_info comp_info;
+ __le32 opaque[8];
+};
+
+
+
+/*
+ * FCoE SQ element $$KEEP_ENDIANNESS$$
+ */
+struct fcoe_sqe {
+ __le16 wqe;
+#define FCOE_SQE_TASK_ID (0x7FFF<<0)
+#define FCOE_SQE_TASK_ID_SHIFT 0
+#define FCOE_SQE_TOGGLE_BIT (0x1<<15)
+#define FCOE_SQE_TOGGLE_BIT_SHIFT 15
+};
+
+
+
+/*
+ * 14 regs $$KEEP_ENDIANNESS$$
+ */
+struct fcoe_tce_tx_only {
+ union fcoe_sgl_union_ctx sgl_ctx;
+ __le32 rsrv0;
+};
+
+/*
+ * 32 bytes (8 regs) used for TX only purposes $$KEEP_ENDIANNESS$$
+ */
+union fcoe_tx_wr_rx_rd_union_ctx {
+ struct fcoe_fc_frame tx_frame;
+ struct fcoe_fcp_cmd_payload fcp_cmd;
+ struct fcoe_ext_cleanup_info cleanup;
+ struct fcoe_ext_abts_info abts;
+ struct fcoe_ext_fw_tx_seq_ctx tx_seq;
+ __le32 opaque[8];
+};
+
+/*
+ * tce_tx_wr_rx_rd_const $$KEEP_ENDIANNESS$$
+ */
+struct fcoe_tce_tx_wr_rx_rd_const {
+ u8 init_flags;
+#define FCOE_TCE_TX_WR_RX_RD_CONST_TASK_TYPE (0x7<<0)
+#define FCOE_TCE_TX_WR_RX_RD_CONST_TASK_TYPE_SHIFT 0
+#define FCOE_TCE_TX_WR_RX_RD_CONST_DEV_TYPE (0x1<<3)
+#define FCOE_TCE_TX_WR_RX_RD_CONST_DEV_TYPE_SHIFT 3
+#define FCOE_TCE_TX_WR_RX_RD_CONST_CLASS_TYPE (0x1<<4)
+#define FCOE_TCE_TX_WR_RX_RD_CONST_CLASS_TYPE_SHIFT 4
+#define FCOE_TCE_TX_WR_RX_RD_CONST_CACHED_SGE (0x3<<5)
+#define FCOE_TCE_TX_WR_RX_RD_CONST_CACHED_SGE_SHIFT 5
+#define FCOE_TCE_TX_WR_RX_RD_CONST_SUPPORT_REC_TOV (0x1<<7)
+#define FCOE_TCE_TX_WR_RX_RD_CONST_SUPPORT_REC_TOV_SHIFT 7
+ u8 tx_flags;
+#define FCOE_TCE_TX_WR_RX_RD_CONST_TX_VALID (0x1<<0)
+#define FCOE_TCE_TX_WR_RX_RD_CONST_TX_VALID_SHIFT 0
+#define FCOE_TCE_TX_WR_RX_RD_CONST_TX_STATE (0xF<<1)
+#define FCOE_TCE_TX_WR_RX_RD_CONST_TX_STATE_SHIFT 1
+#define FCOE_TCE_TX_WR_RX_RD_CONST_RSRV1 (0x1<<5)
+#define FCOE_TCE_TX_WR_RX_RD_CONST_RSRV1_SHIFT 5
+#define FCOE_TCE_TX_WR_RX_RD_CONST_TX_SEQ_INIT (0x1<<6)
+#define FCOE_TCE_TX_WR_RX_RD_CONST_TX_SEQ_INIT_SHIFT 6
+#define FCOE_TCE_TX_WR_RX_RD_CONST_RSRV2 (0x1<<7)
+#define FCOE_TCE_TX_WR_RX_RD_CONST_RSRV2_SHIFT 7
+ __le16 rsrv3;
+ __le32 verify_tx_seq;
+};
+
+/*
+ * tce_tx_wr_rx_rd $$KEEP_ENDIANNESS$$
+ */
+struct fcoe_tce_tx_wr_rx_rd {
+ union fcoe_tx_wr_rx_rd_union_ctx union_ctx;
+ struct fcoe_tce_tx_wr_rx_rd_const const_ctx;
+};
+
+/*
+ * tce_rx_wr_tx_rd_const $$KEEP_ENDIANNESS$$
+ */
+struct fcoe_tce_rx_wr_tx_rd_const {
+ __le32 data_2_trns;
+ __le32 init_flags;
+#define FCOE_TCE_RX_WR_TX_RD_CONST_CID (0xFFFFFF<<0)
+#define FCOE_TCE_RX_WR_TX_RD_CONST_CID_SHIFT 0
+#define FCOE_TCE_RX_WR_TX_RD_CONST_RSRV0 (0xFF<<24)
+#define FCOE_TCE_RX_WR_TX_RD_CONST_RSRV0_SHIFT 24
+};
+
+/*
+ * tce_rx_wr_tx_rd_var $$KEEP_ENDIANNESS$$
+ */
+struct fcoe_tce_rx_wr_tx_rd_var {
+ __le16 rx_flags;
+#define FCOE_TCE_RX_WR_TX_RD_VAR_RSRV1 (0xF<<0)
+#define FCOE_TCE_RX_WR_TX_RD_VAR_RSRV1_SHIFT 0
+#define FCOE_TCE_RX_WR_TX_RD_VAR_NUM_RQ_WQE (0x7<<4)
+#define FCOE_TCE_RX_WR_TX_RD_VAR_NUM_RQ_WQE_SHIFT 4
+#define FCOE_TCE_RX_WR_TX_RD_VAR_CONF_REQ (0x1<<7)
+#define FCOE_TCE_RX_WR_TX_RD_VAR_CONF_REQ_SHIFT 7
+#define FCOE_TCE_RX_WR_TX_RD_VAR_RX_STATE (0xF<<8)
+#define FCOE_TCE_RX_WR_TX_RD_VAR_RX_STATE_SHIFT 8
+#define FCOE_TCE_RX_WR_TX_RD_VAR_EXP_FIRST_FRAME (0x1<<12)
+#define FCOE_TCE_RX_WR_TX_RD_VAR_EXP_FIRST_FRAME_SHIFT 12
+#define FCOE_TCE_RX_WR_TX_RD_VAR_RX_SEQ_INIT (0x1<<13)
+#define FCOE_TCE_RX_WR_TX_RD_VAR_RX_SEQ_INIT_SHIFT 13
+#define FCOE_TCE_RX_WR_TX_RD_VAR_RSRV2 (0x1<<14)
+#define FCOE_TCE_RX_WR_TX_RD_VAR_RSRV2_SHIFT 14
+#define FCOE_TCE_RX_WR_TX_RD_VAR_RX_VALID (0x1<<15)
+#define FCOE_TCE_RX_WR_TX_RD_VAR_RX_VALID_SHIFT 15
+ __le16 rx_id;
+ struct fcoe_fcp_xfr_rdy_payload fcp_xfr_rdy;
+};
+
+/*
+ * tce_rx_wr_tx_rd $$KEEP_ENDIANNESS$$
+ */
+struct fcoe_tce_rx_wr_tx_rd {
+ struct fcoe_tce_rx_wr_tx_rd_const const_ctx;
+ struct fcoe_tce_rx_wr_tx_rd_var var_ctx;
+};
+
+/*
+ * tce_rx_only $$KEEP_ENDIANNESS$$
+ */
+struct fcoe_tce_rx_only {
+ struct fcoe_rx_seq_ctx rx_seq_ctx;
+ union fcoe_rx_wr_union_ctx union_ctx;
+};
+
+/*
+ * task_ctx_entry $$KEEP_ENDIANNESS$$
+ */
+struct fcoe_task_ctx_entry {
+ struct fcoe_tce_tx_only txwr_only;
+ struct fcoe_tce_tx_wr_rx_rd txwr_rxrd;
+ struct fcoe_tce_rx_wr_tx_rd rxwr_txrd;
+ struct fcoe_tce_rx_only rxwr_only;
+};
+
+
+
+
+
+
+
+
+
+
+/*
+ * FCoE XFRQ element $$KEEP_ENDIANNESS$$
+ */
+struct fcoe_xfrqe {
+ __le16 wqe;
+#define FCOE_XFRQE_TASK_ID (0x7FFF<<0)
+#define FCOE_XFRQE_TASK_ID_SHIFT 0
+#define FCOE_XFRQE_TOGGLE_BIT (0x1<<15)
+#define FCOE_XFRQE_TOGGLE_BIT_SHIFT 15
+};
+
+
+/*
+ * fcoe rx doorbell message sent to the chip $$KEEP_ENDIANNESS$$
+ */
+struct b577xx_fcoe_rx_doorbell {
+ struct b577xx_doorbell_hdr hdr;
+ u8 params;
+#define B577XX_FCOE_RX_DOORBELL_NEGATIVE_ARM (0x1F<<0)
+#define B577XX_FCOE_RX_DOORBELL_NEGATIVE_ARM_SHIFT 0
+#define B577XX_FCOE_RX_DOORBELL_OPCODE (0x7<<5)
+#define B577XX_FCOE_RX_DOORBELL_OPCODE_SHIFT 5
+ __le16 doorbell_cq_cons;
+};
+
+
+/*
+ * FCoE CONFQ element $$KEEP_ENDIANNESS$$
+ */
+struct fcoe_confqe {
+ __le16 ox_id;
+ __le16 rx_id;
+ __le32 param;
+};
+
+
+/*
+ * FCoE conection data base
+ */
+struct fcoe_conn_db {
+#if defined(__BIG_ENDIAN)
+ u16 rsrv0;
+ u16 rq_prod;
+#elif defined(__LITTLE_ENDIAN)
+ u16 rq_prod;
+ u16 rsrv0;
+#endif
+ u32 rsrv1;
+ struct regpair cq_arm;
+};
+
+
+/*
+ * FCoE CQ element $$KEEP_ENDIANNESS$$
+ */
+struct fcoe_cqe {
+ __le16 wqe;
+#define FCOE_CQE_CQE_INFO (0x3FFF<<0)
+#define FCOE_CQE_CQE_INFO_SHIFT 0
+#define FCOE_CQE_CQE_TYPE (0x1<<14)
+#define FCOE_CQE_CQE_TYPE_SHIFT 14
+#define FCOE_CQE_TOGGLE_BIT (0x1<<15)
+#define FCOE_CQE_TOGGLE_BIT_SHIFT 15
+};
+
+
+/*
+ * FCoE error/warning reporting entry $$KEEP_ENDIANNESS$$
+ */
+struct fcoe_partial_err_report_entry {
+ __le32 err_warn_bitmap_lo;
+ __le32 err_warn_bitmap_hi;
+ __le32 tx_buf_off;
+ __le32 rx_buf_off;
+};
+
+/*
+ * FCoE error/warning reporting entry $$KEEP_ENDIANNESS$$
+ */
+struct fcoe_err_report_entry {
+ struct fcoe_partial_err_report_entry data;
+ struct fcoe_fc_hdr fc_hdr;
+};
+
+
+/*
+ * FCoE hash table entry (32 bytes) $$KEEP_ENDIANNESS$$
+ */
+struct fcoe_hash_table_entry {
+ u8 s_id_0;
+ u8 s_id_1;
+ u8 s_id_2;
+ u8 d_id_0;
+ u8 d_id_1;
+ u8 d_id_2;
+ __le16 dst_mac_addr_hi;
+ __le16 dst_mac_addr_mid;
+ __le16 dst_mac_addr_lo;
+ __le16 src_mac_addr_hi;
+ __le16 vlan_id;
+ __le16 src_mac_addr_lo;
+ __le16 src_mac_addr_mid;
+ u8 vlan_flag;
+ u8 reserved0;
+ __le16 reserved1;
+ __le32 reserved2;
+ __le32 field_id;
+#define FCOE_HASH_TABLE_ENTRY_CID (0xFFFFFF<<0)
+#define FCOE_HASH_TABLE_ENTRY_CID_SHIFT 0
+#define FCOE_HASH_TABLE_ENTRY_RESERVED3 (0x7F<<24)
+#define FCOE_HASH_TABLE_ENTRY_RESERVED3_SHIFT 24
+#define FCOE_HASH_TABLE_ENTRY_VALID (0x1<<31)
+#define FCOE_HASH_TABLE_ENTRY_VALID_SHIFT 31
+};
+
+
+/*
+ * FCoE LCQ element $$KEEP_ENDIANNESS$$
+ */
+struct fcoe_lcqe {
+ __le32 wqe;
+#define FCOE_LCQE_TASK_ID (0xFFFF<<0)
+#define FCOE_LCQE_TASK_ID_SHIFT 0
+#define FCOE_LCQE_LCQE_TYPE (0xFF<<16)
+#define FCOE_LCQE_LCQE_TYPE_SHIFT 16
+#define FCOE_LCQE_RESERVED (0xFF<<24)
+#define FCOE_LCQE_RESERVED_SHIFT 24
+};
+
+
+
+/*
+ * FCoE pending work request CQE $$KEEP_ENDIANNESS$$
+ */
+struct fcoe_pend_wq_cqe {
+ __le16 wqe;
+#define FCOE_PEND_WQ_CQE_TASK_ID (0x3FFF<<0)
+#define FCOE_PEND_WQ_CQE_TASK_ID_SHIFT 0
+#define FCOE_PEND_WQ_CQE_CQE_TYPE (0x1<<14)
+#define FCOE_PEND_WQ_CQE_CQE_TYPE_SHIFT 14
+#define FCOE_PEND_WQ_CQE_TOGGLE_BIT (0x1<<15)
+#define FCOE_PEND_WQ_CQE_TOGGLE_BIT_SHIFT 15
+};
+
+
+/*
+ * FCoE RX statistics parameters section#0 $$KEEP_ENDIANNESS$$
+ */
+struct fcoe_rx_stat_params_section0 {
+ __le32 fcoe_rx_pkt_cnt;
+ __le32 fcoe_rx_byte_cnt;
+};
+
+
+/*
+ * FCoE RX statistics parameters section#1 $$KEEP_ENDIANNESS$$
+ */
+struct fcoe_rx_stat_params_section1 {
+ __le32 fcoe_ver_cnt;
+ __le32 fcoe_rx_drop_pkt_cnt;
+};
+
+
+/*
+ * FCoE RX statistics parameters section#2 $$KEEP_ENDIANNESS$$
+ */
+struct fcoe_rx_stat_params_section2 {
+ __le32 fc_crc_cnt;
+ __le32 eofa_del_cnt;
+ __le32 miss_frame_cnt;
+ __le32 seq_timeout_cnt;
+ __le32 drop_seq_cnt;
+ __le32 fcoe_rx_drop_pkt_cnt;
+ __le32 fcp_rx_pkt_cnt;
+ __le32 reserved0;
+};
+
+
+/*
+ * FCoE TX statistics parameters $$KEEP_ENDIANNESS$$
+ */
+struct fcoe_tx_stat_params {
+ __le32 fcoe_tx_pkt_cnt;
+ __le32 fcoe_tx_byte_cnt;
+ __le32 fcp_tx_pkt_cnt;
+ __le32 reserved0;
+};
+
+/*
+ * FCoE statistics parameters $$KEEP_ENDIANNESS$$
+ */
+struct fcoe_statistics_params {
+ struct fcoe_tx_stat_params tx_stat;
+ struct fcoe_rx_stat_params_section0 rx_stat0;
+ struct fcoe_rx_stat_params_section1 rx_stat1;
+ struct fcoe_rx_stat_params_section2 rx_stat2;
+};
+
+
+/*
+ * FCoE t2 hash table entry (64 bytes) $$KEEP_ENDIANNESS$$
+ */
+struct fcoe_t2_hash_table_entry {
+ struct fcoe_hash_table_entry data;
+ struct regpair next;
+ struct regpair reserved0[3];
+};
+
+
+
+/*
+ * FCoE unsolicited CQE $$KEEP_ENDIANNESS$$
+ */
+struct fcoe_unsolicited_cqe {
+ __le16 wqe;
+#define FCOE_UNSOLICITED_CQE_SUBTYPE (0x3<<0)
+#define FCOE_UNSOLICITED_CQE_SUBTYPE_SHIFT 0
+#define FCOE_UNSOLICITED_CQE_PKT_LEN (0xFFF<<2)
+#define FCOE_UNSOLICITED_CQE_PKT_LEN_SHIFT 2
+#define FCOE_UNSOLICITED_CQE_CQE_TYPE (0x1<<14)
+#define FCOE_UNSOLICITED_CQE_CQE_TYPE_SHIFT 14
+#define FCOE_UNSOLICITED_CQE_TOGGLE_BIT (0x1<<15)
+#define FCOE_UNSOLICITED_CQE_TOGGLE_BIT_SHIFT 15
+};
+
+#endif /* __57XX_FCOE_HSI_LINUX_LE__ */
diff --git a/drivers/scsi/bnx2fc/Kconfig b/drivers/scsi/bnx2fc/Kconfig
new file mode 100644
index 000000000..097882882
--- /dev/null
+++ b/drivers/scsi/bnx2fc/Kconfig
@@ -0,0 +1,13 @@
+config SCSI_BNX2X_FCOE
+ tristate "QLogic NetXtreme II FCoE support"
+ depends on PCI
+ depends on (IPV6 || IPV6=n)
+ depends on LIBFC
+ depends on LIBFCOE
+ select NETDEVICES
+ select ETHERNET
+ select NET_VENDOR_BROADCOM
+ select CNIC
+ ---help---
+ This driver supports FCoE offload for the QLogic NetXtreme II
+ devices.
diff --git a/drivers/scsi/bnx2fc/Makefile b/drivers/scsi/bnx2fc/Makefile
new file mode 100644
index 000000000..141149e8c
--- /dev/null
+++ b/drivers/scsi/bnx2fc/Makefile
@@ -0,0 +1,4 @@
+obj-$(CONFIG_SCSI_BNX2X_FCOE) += bnx2fc.o
+
+bnx2fc-y := bnx2fc_els.o bnx2fc_fcoe.o bnx2fc_hwi.o bnx2fc_io.o bnx2fc_tgt.o \
+ bnx2fc_debug.o
diff --git a/drivers/scsi/bnx2fc/bnx2fc.h b/drivers/scsi/bnx2fc/bnx2fc.h
new file mode 100644
index 000000000..1346e052e
--- /dev/null
+++ b/drivers/scsi/bnx2fc/bnx2fc.h
@@ -0,0 +1,594 @@
+/* bnx2fc.h: QLogic NetXtreme II Linux FCoE offload driver.
+ *
+ * Copyright (c) 2008 - 2013 Broadcom Corporation
+ * Copyright (c) 2014, QLogic Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation.
+ *
+ * Written by: Bhanu Prakash Gollapudi (bprakash@broadcom.com)
+ */
+
+#ifndef _BNX2FC_H_
+#define _BNX2FC_H_
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/kernel.h>
+#include <linux/skbuff.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/if_ether.h>
+#include <linux/if_vlan.h>
+#include <linux/kthread.h>
+#include <linux/crc32.h>
+#include <linux/cpu.h>
+#include <linux/types.h>
+#include <linux/list.h>
+#include <linux/delay.h>
+#include <linux/timer.h>
+#include <linux/errno.h>
+#include <linux/pci.h>
+#include <linux/init.h>
+#include <linux/dma-mapping.h>
+#include <linux/workqueue.h>
+#include <linux/mutex.h>
+#include <linux/spinlock.h>
+#include <linux/bitops.h>
+#include <linux/log2.h>
+#include <linux/interrupt.h>
+#include <linux/sched.h>
+#include <linux/io.h>
+
+#include <scsi/scsi.h>
+#include <scsi/scsi_host.h>
+#include <scsi/scsi_device.h>
+#include <scsi/scsi_cmnd.h>
+#include <scsi/scsi_eh.h>
+#include <scsi/scsi_tcq.h>
+#include <scsi/libfc.h>
+#include <scsi/libfcoe.h>
+#include <scsi/fc_encode.h>
+#include <scsi/scsi_transport.h>
+#include <scsi/scsi_transport_fc.h>
+#include <scsi/fc/fc_fip.h>
+#include <scsi/fc/fc_fc2.h>
+#include <scsi/fc_frame.h>
+#include <scsi/fc/fc_fcoe.h>
+#include <scsi/fc/fc_fcp.h>
+
+#include "57xx_hsi_bnx2fc.h"
+#include "../../net/ethernet/broadcom/cnic_if.h"
+#include "../../net/ethernet/broadcom/bnx2x/bnx2x_mfw_req.h"
+#include "bnx2fc_constants.h"
+
+#define BNX2FC_NAME "bnx2fc"
+#define BNX2FC_VERSION "2.4.2"
+
+#define PFX "bnx2fc: "
+
+#define BCM_CHIP_LEN 16
+
+#define BNX2X_DOORBELL_PCI_BAR 2
+
+#define BNX2FC_MAX_BD_LEN 0xffff
+#define BNX2FC_BD_SPLIT_SZ 0x8000
+#define BNX2FC_MAX_BDS_PER_CMD 256
+
+#define BNX2FC_SQ_WQES_MAX 256
+
+#define BNX2FC_SCSI_MAX_SQES ((3 * BNX2FC_SQ_WQES_MAX) / 8)
+#define BNX2FC_TM_MAX_SQES ((BNX2FC_SQ_WQES_MAX) / 2)
+#define BNX2FC_ELS_MAX_SQES (BNX2FC_TM_MAX_SQES - 1)
+
+#define BNX2FC_RQ_WQES_MAX 16
+#define BNX2FC_CQ_WQES_MAX (BNX2FC_SQ_WQES_MAX + BNX2FC_RQ_WQES_MAX)
+
+#define BNX2FC_NUM_MAX_SESS 1024
+#define BNX2FC_NUM_MAX_SESS_LOG (ilog2(BNX2FC_NUM_MAX_SESS))
+
+#define BNX2FC_MAX_NPIV 256
+
+#define BNX2FC_MIN_PAYLOAD 256
+#define BNX2FC_MAX_PAYLOAD 2048
+#define BNX2FC_MFS \
+ (BNX2FC_MAX_PAYLOAD + sizeof(struct fc_frame_header))
+#define BNX2FC_MINI_JUMBO_MTU 2500
+
+
+#define BNX2FC_RQ_BUF_SZ 256
+#define BNX2FC_RQ_BUF_LOG_SZ (ilog2(BNX2FC_RQ_BUF_SZ))
+
+#define BNX2FC_SQ_WQE_SIZE (sizeof(struct fcoe_sqe))
+#define BNX2FC_CQ_WQE_SIZE (sizeof(struct fcoe_cqe))
+#define BNX2FC_RQ_WQE_SIZE (BNX2FC_RQ_BUF_SZ)
+#define BNX2FC_XFERQ_WQE_SIZE (sizeof(struct fcoe_xfrqe))
+#define BNX2FC_CONFQ_WQE_SIZE (sizeof(struct fcoe_confqe))
+#define BNX2X_DB_SHIFT 3
+
+#define BNX2FC_TASK_SIZE 128
+#define BNX2FC_TASKS_PER_PAGE (PAGE_SIZE/BNX2FC_TASK_SIZE)
+
+#define BNX2FC_MAX_ROWS_IN_HASH_TBL 8
+#define BNX2FC_HASH_TBL_CHUNK_SIZE (16 * 1024)
+
+#define BNX2FC_MAX_SEQS 255
+#define BNX2FC_MAX_RETRY_CNT 3
+#define BNX2FC_MAX_RPORT_RETRY_CNT 255
+
+#define BNX2FC_READ (1 << 1)
+#define BNX2FC_WRITE (1 << 0)
+
+#define BNX2FC_MIN_XID 0
+#define FCOE_MAX_NUM_XIDS 0x2000
+#define FCOE_MAX_XID_OFFSET (FCOE_MAX_NUM_XIDS - 1)
+#define FCOE_XIDS_PER_CPU_OFFSET ((512 * nr_cpu_ids) - 1)
+#define BNX2FC_MAX_LUN 0xFFFF
+#define BNX2FC_MAX_FCP_TGT 256
+#define BNX2FC_MAX_CMD_LEN 16
+
+#define BNX2FC_TM_TIMEOUT 60 /* secs */
+#define BNX2FC_IO_TIMEOUT 20000UL /* msecs */
+
+#define BNX2FC_WAIT_CNT 1200
+#define BNX2FC_FW_TIMEOUT (3 * HZ)
+#define PORT_MAX 2
+
+#define CMD_SCSI_STATUS(Cmnd) ((Cmnd)->SCp.Status)
+
+/* FC FCP Status */
+#define FC_GOOD 0
+
+#define BNX2FC_RNID_HBA 0x7
+
+#define SRR_RETRY_COUNT 5
+#define REC_RETRY_COUNT 1
+#define BNX2FC_NUM_ERR_BITS 63
+
+#define BNX2FC_RELOGIN_WAIT_TIME 200
+#define BNX2FC_RELOGIN_WAIT_CNT 10
+
+#define BNX2FC_STATS(hba, stat, cnt) \
+ do { \
+ u32 val; \
+ \
+ val = fw_stats->stat.cnt; \
+ if (hba->prev_stats.stat.cnt <= val) \
+ val -= hba->prev_stats.stat.cnt; \
+ else \
+ val += (0xfffffff - hba->prev_stats.stat.cnt); \
+ hba->bfw_stats.cnt += val; \
+ } while (0)
+
+/* bnx2fc driver uses only one instance of fcoe_percpu_s */
+extern struct fcoe_percpu_s bnx2fc_global;
+
+extern struct workqueue_struct *bnx2fc_wq;
+
+struct bnx2fc_percpu_s {
+ struct task_struct *iothread;
+ struct list_head work_list;
+ spinlock_t fp_work_lock;
+};
+
+struct bnx2fc_fw_stats {
+ u64 fc_crc_cnt;
+ u64 fcoe_tx_pkt_cnt;
+ u64 fcoe_rx_pkt_cnt;
+ u64 fcoe_tx_byte_cnt;
+ u64 fcoe_rx_byte_cnt;
+};
+
+struct bnx2fc_hba {
+ struct list_head list;
+ struct cnic_dev *cnic;
+ struct pci_dev *pcidev;
+ struct net_device *phys_dev;
+ unsigned long reg_with_cnic;
+ #define BNX2FC_CNIC_REGISTERED 1
+ struct bnx2fc_cmd_mgr *cmd_mgr;
+ spinlock_t hba_lock;
+ struct mutex hba_mutex;
+ unsigned long adapter_state;
+ #define ADAPTER_STATE_UP 0
+ #define ADAPTER_STATE_GOING_DOWN 1
+ #define ADAPTER_STATE_LINK_DOWN 2
+ #define ADAPTER_STATE_READY 3
+ unsigned long flags;
+ #define BNX2FC_FLAG_FW_INIT_DONE 0
+ #define BNX2FC_FLAG_DESTROY_CMPL 1
+ u32 next_conn_id;
+
+ /* xid resources */
+ u16 max_xid;
+ u32 max_tasks;
+ u32 max_outstanding_cmds;
+ u32 elstm_xids;
+
+ struct fcoe_task_ctx_entry **task_ctx;
+ dma_addr_t *task_ctx_dma;
+ struct regpair *task_ctx_bd_tbl;
+ dma_addr_t task_ctx_bd_dma;
+
+ int hash_tbl_segment_count;
+ void **hash_tbl_segments;
+ void *hash_tbl_pbl;
+ dma_addr_t hash_tbl_pbl_dma;
+ struct fcoe_t2_hash_table_entry *t2_hash_tbl;
+ dma_addr_t t2_hash_tbl_dma;
+ char *t2_hash_tbl_ptr;
+ dma_addr_t t2_hash_tbl_ptr_dma;
+
+ char *dummy_buffer;
+ dma_addr_t dummy_buf_dma;
+
+ /* Active list of offloaded sessions */
+ struct bnx2fc_rport **tgt_ofld_list;
+
+ /* statistics */
+ struct bnx2fc_fw_stats bfw_stats;
+ struct fcoe_statistics_params prev_stats;
+ struct fcoe_statistics_params *stats_buffer;
+ dma_addr_t stats_buf_dma;
+ struct completion stat_req_done;
+ struct fcoe_capabilities fcoe_cap;
+
+ /*destroy handling */
+ struct timer_list destroy_timer;
+ wait_queue_head_t destroy_wait;
+
+ /* linkdown handling */
+ wait_queue_head_t shutdown_wait;
+ int wait_for_link_down;
+ int num_ofld_sess;
+ struct list_head vports;
+
+ char chip_num[BCM_CHIP_LEN];
+};
+
+struct bnx2fc_interface {
+ struct list_head list;
+ unsigned long if_flags;
+ #define BNX2FC_CTLR_INIT_DONE 0
+ struct bnx2fc_hba *hba;
+ struct net_device *netdev;
+ struct packet_type fcoe_packet_type;
+ struct packet_type fip_packet_type;
+ struct workqueue_struct *timer_work_queue;
+ struct kref kref;
+ u8 vlan_enabled;
+ int vlan_id;
+ bool enabled;
+};
+
+#define bnx2fc_from_ctlr(x) \
+ ((struct bnx2fc_interface *)((x) + 1))
+
+#define bnx2fc_to_ctlr(x) \
+ ((struct fcoe_ctlr *)(((struct fcoe_ctlr *)(x)) - 1))
+
+struct bnx2fc_lport {
+ struct list_head list;
+ struct fc_lport *lport;
+};
+
+struct bnx2fc_cmd_mgr {
+ struct bnx2fc_hba *hba;
+ u16 next_idx;
+ struct list_head *free_list;
+ spinlock_t *free_list_lock;
+ struct io_bdt **io_bdt_pool;
+ struct bnx2fc_cmd **cmds;
+};
+
+struct bnx2fc_rport {
+ struct fcoe_port *port;
+ struct fc_rport *rport;
+ struct fc_rport_priv *rdata;
+ void __iomem *ctx_base;
+#define DPM_TRIGER_TYPE 0x40
+ u32 io_timeout;
+ u32 fcoe_conn_id;
+ u32 context_id;
+ u32 sid;
+ int dev_type;
+
+ unsigned long flags;
+#define BNX2FC_FLAG_SESSION_READY 0x1
+#define BNX2FC_FLAG_OFFLOADED 0x2
+#define BNX2FC_FLAG_DISABLED 0x3
+#define BNX2FC_FLAG_DESTROYED 0x4
+#define BNX2FC_FLAG_OFLD_REQ_CMPL 0x5
+#define BNX2FC_FLAG_CTX_ALLOC_FAILURE 0x6
+#define BNX2FC_FLAG_UPLD_REQ_COMPL 0x7
+#define BNX2FC_FLAG_EXPL_LOGO 0x8
+#define BNX2FC_FLAG_DISABLE_FAILED 0x9
+#define BNX2FC_FLAG_ENABLED 0xa
+
+ u8 src_addr[ETH_ALEN];
+ u32 max_sqes;
+ u32 max_rqes;
+ u32 max_cqes;
+ atomic_t free_sqes;
+
+ struct b577xx_doorbell_set_prod sq_db;
+ struct b577xx_fcoe_rx_doorbell rx_db;
+
+ struct fcoe_sqe *sq;
+ dma_addr_t sq_dma;
+ u16 sq_prod_idx;
+ u8 sq_curr_toggle_bit;
+ u32 sq_mem_size;
+
+ struct fcoe_cqe *cq;
+ dma_addr_t cq_dma;
+ u16 cq_cons_idx;
+ u8 cq_curr_toggle_bit;
+ u32 cq_mem_size;
+
+ void *rq;
+ dma_addr_t rq_dma;
+ u32 rq_prod_idx;
+ u32 rq_cons_idx;
+ u32 rq_mem_size;
+
+ void *rq_pbl;
+ dma_addr_t rq_pbl_dma;
+ u32 rq_pbl_size;
+
+ struct fcoe_xfrqe *xferq;
+ dma_addr_t xferq_dma;
+ u32 xferq_mem_size;
+
+ struct fcoe_confqe *confq;
+ dma_addr_t confq_dma;
+ u32 confq_mem_size;
+
+ void *confq_pbl;
+ dma_addr_t confq_pbl_dma;
+ u32 confq_pbl_size;
+
+ struct fcoe_conn_db *conn_db;
+ dma_addr_t conn_db_dma;
+ u32 conn_db_mem_size;
+
+ struct fcoe_sqe *lcq;
+ dma_addr_t lcq_dma;
+ u32 lcq_mem_size;
+
+ void *ofld_req[4];
+ dma_addr_t ofld_req_dma[4];
+ void *enbl_req;
+ dma_addr_t enbl_req_dma;
+
+ spinlock_t tgt_lock;
+ spinlock_t cq_lock;
+ atomic_t num_active_ios;
+ u32 flush_in_prog;
+ unsigned long timestamp;
+ unsigned long retry_delay_timestamp;
+ struct list_head free_task_list;
+ struct bnx2fc_cmd *pending_queue[BNX2FC_SQ_WQES_MAX+1];
+ struct list_head active_cmd_queue;
+ struct list_head els_queue;
+ struct list_head io_retire_queue;
+ struct list_head active_tm_queue;
+
+ struct timer_list ofld_timer;
+ wait_queue_head_t ofld_wait;
+
+ struct timer_list upld_timer;
+ wait_queue_head_t upld_wait;
+};
+
+struct bnx2fc_mp_req {
+ u8 tm_flags;
+
+ u32 req_len;
+ void *req_buf;
+ dma_addr_t req_buf_dma;
+ struct fcoe_bd_ctx *mp_req_bd;
+ dma_addr_t mp_req_bd_dma;
+ struct fc_frame_header req_fc_hdr;
+
+ u32 resp_len;
+ void *resp_buf;
+ dma_addr_t resp_buf_dma;
+ struct fcoe_bd_ctx *mp_resp_bd;
+ dma_addr_t mp_resp_bd_dma;
+ struct fc_frame_header resp_fc_hdr;
+};
+
+struct bnx2fc_els_cb_arg {
+ struct bnx2fc_cmd *aborted_io_req;
+ struct bnx2fc_cmd *io_req;
+ u16 l2_oxid;
+ u32 offset;
+ enum fc_rctl r_ctl;
+};
+
+/* bnx2fc command structure */
+struct bnx2fc_cmd {
+ struct list_head link;
+ u8 on_active_queue;
+ u8 on_tmf_queue;
+ u8 cmd_type;
+#define BNX2FC_SCSI_CMD 1
+#define BNX2FC_TASK_MGMT_CMD 2
+#define BNX2FC_ABTS 3
+#define BNX2FC_ELS 4
+#define BNX2FC_CLEANUP 5
+#define BNX2FC_SEQ_CLEANUP 6
+ u8 io_req_flags;
+ struct kref refcount;
+ struct fcoe_port *port;
+ struct bnx2fc_rport *tgt;
+ struct scsi_cmnd *sc_cmd;
+ struct bnx2fc_cmd_mgr *cmd_mgr;
+ struct bnx2fc_mp_req mp_req;
+ void (*cb_func)(struct bnx2fc_els_cb_arg *cb_arg);
+ struct bnx2fc_els_cb_arg *cb_arg;
+ struct delayed_work timeout_work; /* timer for ULP timeouts */
+ struct completion tm_done;
+ int wait_for_comp;
+ u16 xid;
+ struct fcoe_err_report_entry err_entry;
+ struct fcoe_task_ctx_entry *task;
+ struct io_bdt *bd_tbl;
+ struct fcp_rsp *rsp;
+ size_t data_xfer_len;
+ unsigned long req_flags;
+#define BNX2FC_FLAG_ISSUE_RRQ 0x1
+#define BNX2FC_FLAG_ISSUE_ABTS 0x2
+#define BNX2FC_FLAG_ABTS_DONE 0x3
+#define BNX2FC_FLAG_TM_COMPL 0x4
+#define BNX2FC_FLAG_TM_TIMEOUT 0x5
+#define BNX2FC_FLAG_IO_CLEANUP 0x6
+#define BNX2FC_FLAG_RETIRE_OXID 0x7
+#define BNX2FC_FLAG_EH_ABORT 0x8
+#define BNX2FC_FLAG_IO_COMPL 0x9
+#define BNX2FC_FLAG_ELS_DONE 0xa
+#define BNX2FC_FLAG_ELS_TIMEOUT 0xb
+#define BNX2FC_FLAG_CMD_LOST 0xc
+#define BNX2FC_FLAG_SRR_SENT 0xd
+ u8 rec_retry;
+ u8 srr_retry;
+ u32 srr_offset;
+ u8 srr_rctl;
+ u32 fcp_resid;
+ u32 fcp_rsp_len;
+ u32 fcp_sns_len;
+ u8 cdb_status; /* SCSI IO status */
+ u8 fcp_status; /* FCP IO status */
+ u8 fcp_rsp_code;
+ u8 scsi_comp_flags;
+};
+
+struct io_bdt {
+ struct bnx2fc_cmd *io_req;
+ struct fcoe_bd_ctx *bd_tbl;
+ dma_addr_t bd_tbl_dma;
+ u16 bd_valid;
+};
+
+struct bnx2fc_work {
+ struct list_head list;
+ struct bnx2fc_rport *tgt;
+ u16 wqe;
+};
+struct bnx2fc_unsol_els {
+ struct fc_lport *lport;
+ struct fc_frame *fp;
+ struct bnx2fc_hba *hba;
+ struct work_struct unsol_els_work;
+};
+
+
+
+struct bnx2fc_cmd *bnx2fc_cmd_alloc(struct bnx2fc_rport *tgt);
+struct bnx2fc_cmd *bnx2fc_elstm_alloc(struct bnx2fc_rport *tgt, int type);
+void bnx2fc_cmd_release(struct kref *ref);
+int bnx2fc_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *sc_cmd);
+int bnx2fc_send_fw_fcoe_init_msg(struct bnx2fc_hba *hba);
+int bnx2fc_send_fw_fcoe_destroy_msg(struct bnx2fc_hba *hba);
+int bnx2fc_send_session_ofld_req(struct fcoe_port *port,
+ struct bnx2fc_rport *tgt);
+int bnx2fc_send_session_enable_req(struct fcoe_port *port,
+ struct bnx2fc_rport *tgt);
+int bnx2fc_send_session_disable_req(struct fcoe_port *port,
+ struct bnx2fc_rport *tgt);
+int bnx2fc_send_session_destroy_req(struct bnx2fc_hba *hba,
+ struct bnx2fc_rport *tgt);
+int bnx2fc_map_doorbell(struct bnx2fc_rport *tgt);
+void bnx2fc_indicate_kcqe(void *context, struct kcqe *kcq[],
+ u32 num_cqe);
+int bnx2fc_setup_task_ctx(struct bnx2fc_hba *hba);
+void bnx2fc_free_task_ctx(struct bnx2fc_hba *hba);
+int bnx2fc_setup_fw_resc(struct bnx2fc_hba *hba);
+void bnx2fc_free_fw_resc(struct bnx2fc_hba *hba);
+struct bnx2fc_cmd_mgr *bnx2fc_cmd_mgr_alloc(struct bnx2fc_hba *hba);
+void bnx2fc_cmd_mgr_free(struct bnx2fc_cmd_mgr *cmgr);
+void bnx2fc_get_link_state(struct bnx2fc_hba *hba);
+char *bnx2fc_get_next_rqe(struct bnx2fc_rport *tgt, u8 num_items);
+void bnx2fc_return_rqe(struct bnx2fc_rport *tgt, u8 num_items);
+int bnx2fc_get_paged_crc_eof(struct sk_buff *skb, int tlen);
+int bnx2fc_send_rrq(struct bnx2fc_cmd *aborted_io_req);
+int bnx2fc_send_adisc(struct bnx2fc_rport *tgt, struct fc_frame *fp);
+int bnx2fc_send_logo(struct bnx2fc_rport *tgt, struct fc_frame *fp);
+int bnx2fc_send_rls(struct bnx2fc_rport *tgt, struct fc_frame *fp);
+int bnx2fc_initiate_cleanup(struct bnx2fc_cmd *io_req);
+int bnx2fc_initiate_abts(struct bnx2fc_cmd *io_req);
+void bnx2fc_cmd_timer_set(struct bnx2fc_cmd *io_req,
+ unsigned int timer_msec);
+int bnx2fc_init_mp_req(struct bnx2fc_cmd *io_req);
+void bnx2fc_init_cleanup_task(struct bnx2fc_cmd *io_req,
+ struct fcoe_task_ctx_entry *task,
+ u16 orig_xid);
+void bnx2fc_init_seq_cleanup_task(struct bnx2fc_cmd *seq_clnup_req,
+ struct fcoe_task_ctx_entry *task,
+ struct bnx2fc_cmd *orig_io_req,
+ u32 offset);
+void bnx2fc_init_mp_task(struct bnx2fc_cmd *io_req,
+ struct fcoe_task_ctx_entry *task);
+void bnx2fc_init_task(struct bnx2fc_cmd *io_req,
+ struct fcoe_task_ctx_entry *task);
+void bnx2fc_add_2_sq(struct bnx2fc_rport *tgt, u16 xid);
+void bnx2fc_ring_doorbell(struct bnx2fc_rport *tgt);
+int bnx2fc_eh_abort(struct scsi_cmnd *sc_cmd);
+int bnx2fc_eh_host_reset(struct scsi_cmnd *sc_cmd);
+int bnx2fc_eh_target_reset(struct scsi_cmnd *sc_cmd);
+int bnx2fc_eh_device_reset(struct scsi_cmnd *sc_cmd);
+void bnx2fc_rport_event_handler(struct fc_lport *lport,
+ struct fc_rport_priv *rport,
+ enum fc_rport_event event);
+void bnx2fc_process_scsi_cmd_compl(struct bnx2fc_cmd *io_req,
+ struct fcoe_task_ctx_entry *task,
+ u8 num_rq);
+void bnx2fc_process_cleanup_compl(struct bnx2fc_cmd *io_req,
+ struct fcoe_task_ctx_entry *task,
+ u8 num_rq);
+void bnx2fc_process_abts_compl(struct bnx2fc_cmd *io_req,
+ struct fcoe_task_ctx_entry *task,
+ u8 num_rq);
+void bnx2fc_process_tm_compl(struct bnx2fc_cmd *io_req,
+ struct fcoe_task_ctx_entry *task,
+ u8 num_rq);
+void bnx2fc_process_els_compl(struct bnx2fc_cmd *els_req,
+ struct fcoe_task_ctx_entry *task,
+ u8 num_rq);
+void bnx2fc_build_fcp_cmnd(struct bnx2fc_cmd *io_req,
+ struct fcp_cmnd *fcp_cmnd);
+
+
+
+void bnx2fc_flush_active_ios(struct bnx2fc_rport *tgt);
+struct fc_seq *bnx2fc_elsct_send(struct fc_lport *lport, u32 did,
+ struct fc_frame *fp, unsigned int op,
+ void (*resp)(struct fc_seq *,
+ struct fc_frame *,
+ void *),
+ void *arg, u32 timeout);
+void bnx2fc_arm_cq(struct bnx2fc_rport *tgt);
+int bnx2fc_process_new_cqes(struct bnx2fc_rport *tgt);
+void bnx2fc_process_cq_compl(struct bnx2fc_rport *tgt, u16 wqe);
+struct bnx2fc_rport *bnx2fc_tgt_lookup(struct fcoe_port *port,
+ u32 port_id);
+void bnx2fc_process_l2_frame_compl(struct bnx2fc_rport *tgt,
+ unsigned char *buf,
+ u32 frame_len, u16 l2_oxid);
+int bnx2fc_send_stat_req(struct bnx2fc_hba *hba);
+int bnx2fc_post_io_req(struct bnx2fc_rport *tgt, struct bnx2fc_cmd *io_req);
+int bnx2fc_send_rec(struct bnx2fc_cmd *orig_io_req);
+int bnx2fc_send_srr(struct bnx2fc_cmd *orig_io_req, u32 offset, u8 r_ctl);
+void bnx2fc_process_seq_cleanup_compl(struct bnx2fc_cmd *seq_clnup_req,
+ struct fcoe_task_ctx_entry *task,
+ u8 rx_state);
+int bnx2fc_initiate_seq_cleanup(struct bnx2fc_cmd *orig_io_req, u32 offset,
+ enum fc_rctl r_ctl);
+
+
+#include "bnx2fc_debug.h"
+
+#endif
diff --git a/drivers/scsi/bnx2fc/bnx2fc_constants.h b/drivers/scsi/bnx2fc/bnx2fc_constants.h
new file mode 100644
index 000000000..e147cc7ee
--- /dev/null
+++ b/drivers/scsi/bnx2fc/bnx2fc_constants.h
@@ -0,0 +1,287 @@
+/* bnx2fc_constants.h: QLogic NetXtreme II Linux FCoE offload driver.
+ * Handles operations such as session offload/upload etc, and manages
+ * session resources such as connection id and qp resources.
+ *
+ * Copyright (c) 2008 - 2013 Broadcom Corporation
+ * Copyright (c) 2014, QLogic Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation.
+ *
+ */
+
+#ifndef __BNX2FC_CONSTANTS_H_
+#define __BNX2FC_CONSTANTS_H_
+
+/**
+ * This file defines HSI constants for the FCoE flows
+ */
+
+/* Current FCoE HSI version number composed of two fields (16 bit) */
+/* Implies on a change broken previous HSI */
+#define FCOE_HSI_MAJOR_VERSION (2)
+/* Implies on a change which does not broken previous HSI */
+#define FCOE_HSI_MINOR_VERSION (1)
+
+/* KWQ/KCQ FCoE layer code */
+#define FCOE_KWQE_LAYER_CODE (7)
+
+/* KWQ (kernel work queue) request op codes */
+#define FCOE_KWQE_OPCODE_INIT1 (0)
+#define FCOE_KWQE_OPCODE_INIT2 (1)
+#define FCOE_KWQE_OPCODE_INIT3 (2)
+#define FCOE_KWQE_OPCODE_OFFLOAD_CONN1 (3)
+#define FCOE_KWQE_OPCODE_OFFLOAD_CONN2 (4)
+#define FCOE_KWQE_OPCODE_OFFLOAD_CONN3 (5)
+#define FCOE_KWQE_OPCODE_OFFLOAD_CONN4 (6)
+#define FCOE_KWQE_OPCODE_ENABLE_CONN (7)
+#define FCOE_KWQE_OPCODE_DISABLE_CONN (8)
+#define FCOE_KWQE_OPCODE_DESTROY_CONN (9)
+#define FCOE_KWQE_OPCODE_DESTROY (10)
+#define FCOE_KWQE_OPCODE_STAT (11)
+
+/* KCQ (kernel completion queue) response op codes */
+#define FCOE_KCQE_OPCODE_INIT_FUNC (0x10)
+#define FCOE_KCQE_OPCODE_DESTROY_FUNC (0x11)
+#define FCOE_KCQE_OPCODE_STAT_FUNC (0x12)
+#define FCOE_KCQE_OPCODE_OFFLOAD_CONN (0x15)
+#define FCOE_KCQE_OPCODE_ENABLE_CONN (0x16)
+#define FCOE_KCQE_OPCODE_DISABLE_CONN (0x17)
+#define FCOE_KCQE_OPCODE_DESTROY_CONN (0x18)
+#define FCOE_KCQE_OPCODE_CQ_EVENT_NOTIFICATION (0x20)
+#define FCOE_KCQE_OPCODE_FCOE_ERROR (0x21)
+
+/* KCQ (kernel completion queue) completion status */
+#define FCOE_KCQE_COMPLETION_STATUS_SUCCESS (0x0)
+#define FCOE_KCQE_COMPLETION_STATUS_ERROR (0x1)
+#define FCOE_KCQE_COMPLETION_STATUS_INVALID_OPCODE (0x2)
+#define FCOE_KCQE_COMPLETION_STATUS_CTX_ALLOC_FAILURE (0x3)
+#define FCOE_KCQE_COMPLETION_STATUS_CTX_FREE_FAILURE (0x4)
+#define FCOE_KCQE_COMPLETION_STATUS_NIC_ERROR (0x5)
+#define FCOE_KCQE_COMPLETION_STATUS_WRONG_HSI_VERSION (0x6)
+#define FCOE_KCQE_COMPLETION_STATUS_PARITY_ERROR (0x81)
+
+/* CQE type */
+#define FCOE_PENDING_CQE_TYPE 0
+#define FCOE_UNSOLIC_CQE_TYPE 1
+
+/* Unsolicited CQE type */
+#define FCOE_UNSOLICITED_FRAME_CQE_TYPE 0
+#define FCOE_ERROR_DETECTION_CQE_TYPE 1
+#define FCOE_WARNING_DETECTION_CQE_TYPE 2
+
+/* E_D_TOV timer resolution in ms */
+#define FCOE_E_D_TOV_TIMER_RESOLUTION_MS (20)
+
+/* E_D_TOV timer resolution for SDM (4 micro) */
+#define FCOE_E_D_TOV_SDM_TIMER_RESOLUTION \
+ (FCOE_E_D_TOV_TIMER_RESOLUTION_MS * 1000 / 4)
+
+/* REC timer resolution in ms */
+#define FCOE_REC_TIMER_RESOLUTION_MS (20)
+
+/* REC timer resolution for SDM (4 micro) */
+#define FCOE_REC_SDM_TIMER_RESOLUTION (FCOE_REC_TIMER_RESOLUTION_MS * 1000 / 4)
+
+/* E_D_TOV timer default wraparound value (2 sec) in 20 ms resolution */
+#define FCOE_E_D_TOV_DEFAULT_WRAPAROUND_VAL \
+ (2000 / FCOE_E_D_TOV_TIMER_RESOLUTION_MS)
+
+/* REC_TOV timer default wraparound value (3 sec) in 20 ms resolution */
+#define FCOE_REC_TOV_DEFAULT_WRAPAROUND_VAL \
+ (3000 / FCOE_REC_TIMER_RESOLUTION_MS)
+
+#define FCOE_NUM_OF_TIMER_TASKS (8 * 1024)
+
+#define FCOE_NUM_OF_CACHED_TASKS_TIMER (8)
+
+/* Task context constants */
+/******** Remove FCP_CMD write tce sleep ***********************/
+/* In case timer services are required then shall be updated by Xstorm after
+ * start processing the task. In case no timer facilities are required then the
+ * driver would initialize the state to this value
+ *
+#define FCOE_TASK_TX_STATE_NORMAL 0
+ * After driver has initialize the task in case timer services required *
+#define FCOE_TASK_TX_STATE_INIT 1
+******** Remove FCP_CMD write tce sleep ***********************/
+/* After driver has initialize the task in case timer services required */
+#define FCOE_TASK_TX_STATE_INIT 0
+/* In case timer services are required then shall be updated by Xstorm after
+ * start processing the task. In case no timer facilities are required then the
+ * driver would initialize the state to this value
+ */
+#define FCOE_TASK_TX_STATE_NORMAL 1
+/* Task is under abort procedure. Updated in order to stop processing of
+ * pending WQEs on this task
+ */
+#define FCOE_TASK_TX_STATE_ABORT 2
+/* For E_D_T_TOV timer expiration in Xstorm (Class 2 only) */
+#define FCOE_TASK_TX_STATE_ERROR 3
+/* For REC_TOV timer expiration indication received from Xstorm */
+#define FCOE_TASK_TX_STATE_WARNING 4
+/* For completed unsolicited task */
+#define FCOE_TASK_TX_STATE_UNSOLICITED_COMPLETED 5
+/* For exchange cleanup request task */
+#define FCOE_TASK_TX_STATE_EXCHANGE_CLEANUP 6
+/* For sequence cleanup request task */
+#define FCOE_TASK_TX_STATE_SEQUENCE_CLEANUP 7
+/* For completion the ABTS task. */
+#define FCOE_TASK_TX_STATE_ABTS_TX 8
+
+#define FCOE_TASK_RX_STATE_NORMAL 0
+#define FCOE_TASK_RX_STATE_COMPLETED 1
+/* Obsolete: Intermediate completion (middle path with local completion) */
+#define FCOE_TASK_RX_STATE_INTER_COMP 2
+/* For REC_TOV timer expiration indication received from Xstorm */
+#define FCOE_TASK_RX_STATE_WARNING 3
+/* For E_D_T_TOV timer expiration in Ustorm */
+#define FCOE_TASK_RX_STATE_ERROR 4
+/* FW only: First visit at rx-path, part of the abts round trip */
+#define FCOE_TASK_RX_STATE_ABTS_IN_PROCESS 5
+/* FW only: Second visit at rx-path, after ABTS frame transmitted */
+#define FCOE_TASK_RX_STATE_ABTS_TRANSMITTED 6
+/* Special completion indication in case of task was aborted. */
+#define FCOE_TASK_RX_STATE_ABTS_COMPLETED 7
+/* FW only: First visit at rx-path, part of the cleanup round trip */
+#define FCOE_TASK_RX_STATE_EXCHANGE_CLEANUP_IN_PROCESS 8
+/* FW only: Special completion indication in case of task was cleaned. */
+#define FCOE_TASK_RX_STATE_EXCHANGE_CLEANUP_COMPLETED 9
+/* Not in used: Special completion indication (in task requested the exchange
+ * cleanup) in case cleaned task is in non-valid.
+ */
+#define FCOE_TASK_RX_STATE_ABORT_CLEANUP_COMPLETED 10
+/* Special completion indication (in task requested the sequence cleanup) in
+ * case cleaned task was already returned to normal.
+ */
+#define FCOE_TASK_RX_STATE_IGNORED_SEQUENCE_CLEANUP 11
+
+
+#define FCOE_TASK_TYPE_WRITE 0
+#define FCOE_TASK_TYPE_READ 1
+#define FCOE_TASK_TYPE_MIDPATH 2
+#define FCOE_TASK_TYPE_UNSOLICITED 3
+#define FCOE_TASK_TYPE_ABTS 4
+#define FCOE_TASK_TYPE_EXCHANGE_CLEANUP 5
+#define FCOE_TASK_TYPE_SEQUENCE_CLEANUP 6
+
+#define FCOE_TASK_DEV_TYPE_DISK 0
+#define FCOE_TASK_DEV_TYPE_TAPE 1
+
+#define FCOE_TASK_CLASS_TYPE_3 0
+#define FCOE_TASK_CLASS_TYPE_2 1
+
+/* FCoE/FC packet fields */
+#define FCOE_ETH_TYPE 0x8906
+
+/* FCoE maximum elements in hash table */
+#define FCOE_MAX_ELEMENTS_IN_HASH_TABLE_ROW 8
+
+/* FCoE half of the elements in hash table */
+#define FCOE_HALF_ELEMENTS_IN_HASH_TABLE_ROW \
+ (FCOE_MAX_ELEMENTS_IN_HASH_TABLE_ROW / 2)
+
+/* FcoE number of cached T2 entries */
+#define T_FCOE_NUMBER_OF_CACHED_T2_ENTRIES (4)
+
+/* FCoE maximum elements in hash table */
+#define FCOE_HASH_TBL_CHUNK_SIZE 16384
+
+/* Everest FCoE connection type */
+#define B577XX_FCOE_CONNECTION_TYPE 4
+
+/* FCoE number of rows (in log). This number derives
+ * from the maximum connections supported which is 2048.
+ * TBA: Need a different constant for E2
+ */
+#define FCOE_MAX_NUM_SESSIONS_LOG 11
+
+#define FC_ABTS_REPLY_MAX_PAYLOAD_LEN 12
+
+/* Error codes for Error Reporting in slow path flows */
+#define FCOE_SLOW_PATH_ERROR_CODE_TOO_MANY_FUNCS 0
+#define FCOE_SLOW_PATH_ERROR_CODE_NO_LICENSE 1
+
+/* Error codes for Error Reporting in fast path flows
+ * XFER error codes
+ */
+#define FCOE_ERROR_CODE_XFER_OOO_RO 0
+#define FCOE_ERROR_CODE_XFER_RO_NOT_ALIGNED 1
+#define FCOE_ERROR_CODE_XFER_NULL_BURST_LEN 2
+#define FCOE_ERROR_CODE_XFER_RO_GREATER_THAN_DATA2TRNS 3
+#define FCOE_ERROR_CODE_XFER_INVALID_PAYLOAD_SIZE 4
+#define FCOE_ERROR_CODE_XFER_TASK_TYPE_NOT_WRITE 5
+#define FCOE_ERROR_CODE_XFER_PEND_XFER_SET 6
+#define FCOE_ERROR_CODE_XFER_OPENED_SEQ 7
+#define FCOE_ERROR_CODE_XFER_FCTL 8
+
+/* FCP RSP error codes */
+#define FCOE_ERROR_CODE_FCP_RSP_BIDI_FLAGS_SET 9
+#define FCOE_ERROR_CODE_FCP_RSP_UNDERFLOW 10
+#define FCOE_ERROR_CODE_FCP_RSP_OVERFLOW 11
+#define FCOE_ERROR_CODE_FCP_RSP_INVALID_LENGTH_FIELD 12
+#define FCOE_ERROR_CODE_FCP_RSP_INVALID_SNS_FIELD 13
+#define FCOE_ERROR_CODE_FCP_RSP_INVALID_PAYLOAD_SIZE 14
+#define FCOE_ERROR_CODE_FCP_RSP_PEND_XFER_SET 15
+#define FCOE_ERROR_CODE_FCP_RSP_OPENED_SEQ 16
+#define FCOE_ERROR_CODE_FCP_RSP_FCTL 17
+#define FCOE_ERROR_CODE_FCP_RSP_LAST_SEQ_RESET 18
+#define FCOE_ERROR_CODE_FCP_RSP_CONF_REQ_NOT_SUPPORTED_YET 19
+
+/* FCP DATA error codes */
+#define FCOE_ERROR_CODE_DATA_OOO_RO 20
+#define FCOE_ERROR_CODE_DATA_EXCEEDS_DEFINED_MAX_FRAME_SIZE 21
+#define FCOE_ERROR_CODE_DATA_EXCEEDS_DATA2TRNS 22
+#define FCOE_ERROR_CODE_DATA_SOFI3_SEQ_ACTIVE_SET 23
+#define FCOE_ERROR_CODE_DATA_SOFN_SEQ_ACTIVE_RESET 24
+#define FCOE_ERROR_CODE_DATA_EOFN_END_SEQ_SET 25
+#define FCOE_ERROR_CODE_DATA_EOFT_END_SEQ_RESET 26
+#define FCOE_ERROR_CODE_DATA_TASK_TYPE_NOT_READ 27
+#define FCOE_ERROR_CODE_DATA_FCTL 28
+
+/* Middle path error codes */
+#define FCOE_ERROR_CODE_MIDPATH_INVALID_TYPE 29
+#define FCOE_ERROR_CODE_MIDPATH_SOFI3_SEQ_ACTIVE_SET 30
+#define FCOE_ERROR_CODE_MIDPATH_SOFN_SEQ_ACTIVE_RESET 31
+#define FCOE_ERROR_CODE_MIDPATH_EOFN_END_SEQ_SET 32
+#define FCOE_ERROR_CODE_MIDPATH_EOFT_END_SEQ_RESET 33
+#define FCOE_ERROR_CODE_MIDPATH_REPLY_FCTL 34
+#define FCOE_ERROR_CODE_MIDPATH_INVALID_REPLY 35
+#define FCOE_ERROR_CODE_MIDPATH_ELS_REPLY_RCTL 36
+
+/* ABTS error codes */
+#define FCOE_ERROR_CODE_ABTS_REPLY_F_CTL 37
+#define FCOE_ERROR_CODE_ABTS_REPLY_DDF_RCTL_FIELD 38
+#define FCOE_ERROR_CODE_ABTS_REPLY_INVALID_BLS_RCTL 39
+#define FCOE_ERROR_CODE_ABTS_REPLY_INVALID_RCTL 40
+#define FCOE_ERROR_CODE_ABTS_REPLY_RCTL_GENERAL_MISMATCH 41
+
+/* Common error codes */
+#define FCOE_ERROR_CODE_COMMON_MIDDLE_FRAME_WITH_PAD 42
+#define FCOE_ERROR_CODE_COMMON_SEQ_INIT_IN_TCE 43
+#define FCOE_ERROR_CODE_COMMON_FC_HDR_RX_ID_MISMATCH 44
+#define FCOE_ERROR_CODE_COMMON_INCORRECT_SEQ_CNT 45
+#define FCOE_ERROR_CODE_COMMON_DATA_FC_HDR_FCP_TYPE_MISMATCH 46
+#define FCOE_ERROR_CODE_COMMON_DATA_NO_MORE_SGES 47
+#define FCOE_ERROR_CODE_COMMON_OPTIONAL_FC_HDR 48
+#define FCOE_ERROR_CODE_COMMON_READ_TCE_OX_ID_TOO_BIG 49
+#define FCOE_ERROR_CODE_COMMON_DATA_WAS_NOT_TRANSMITTED 50
+
+/* Unsolicited Rx error codes */
+#define FCOE_ERROR_CODE_UNSOLICITED_TYPE_NOT_ELS 51
+#define FCOE_ERROR_CODE_UNSOLICITED_TYPE_NOT_BLS 52
+#define FCOE_ERROR_CODE_UNSOLICITED_FCTL_ELS 53
+#define FCOE_ERROR_CODE_UNSOLICITED_FCTL_BLS 54
+#define FCOE_ERROR_CODE_UNSOLICITED_R_CTL 55
+
+#define FCOE_ERROR_CODE_RW_TASK_DDF_RCTL_INFO_FIELD 56
+#define FCOE_ERROR_CODE_RW_TASK_INVALID_RCTL 57
+#define FCOE_ERROR_CODE_RW_TASK_RCTL_GENERAL_MISMATCH 58
+
+/* Timer error codes */
+#define FCOE_ERROR_CODE_E_D_TOV_TIMER_EXPIRATION 60
+#define FCOE_ERROR_CODE_REC_TOV_TIMER_EXPIRATION 61
+
+
+#endif /* BNX2FC_CONSTANTS_H_ */
diff --git a/drivers/scsi/bnx2fc/bnx2fc_debug.c b/drivers/scsi/bnx2fc/bnx2fc_debug.c
new file mode 100644
index 000000000..d055df01f
--- /dev/null
+++ b/drivers/scsi/bnx2fc/bnx2fc_debug.c
@@ -0,0 +1,83 @@
+/* bnx2fc_debug.c: QLogic NetXtreme II Linux FCoE offload driver.
+ * Handles operations such as session offload/upload etc, and manages
+ * session resources such as connection id and qp resources.
+ *
+ * Copyright (c) 2008 - 2013 Broadcom Corporation
+ * Copyright (c) 2014, QLogic Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation.
+ *
+ */
+
+#include "bnx2fc.h"
+
+void BNX2FC_IO_DBG(const struct bnx2fc_cmd *io_req, const char *fmt, ...)
+{
+ struct va_format vaf;
+ va_list args;
+
+ if (likely(!(bnx2fc_debug_level & LOG_IO)))
+ return;
+
+ va_start(args, fmt);
+
+ vaf.fmt = fmt;
+ vaf.va = &args;
+
+ if (io_req && io_req->port && io_req->port->lport &&
+ io_req->port->lport->host)
+ shost_printk(KERN_INFO, io_req->port->lport->host,
+ PFX "xid:0x%x %pV",
+ io_req->xid, &vaf);
+ else
+ pr_info("NULL %pV", &vaf);
+
+ va_end(args);
+}
+
+void BNX2FC_TGT_DBG(const struct bnx2fc_rport *tgt, const char *fmt, ...)
+{
+ struct va_format vaf;
+ va_list args;
+
+ if (likely(!(bnx2fc_debug_level & LOG_TGT)))
+ return;
+
+ va_start(args, fmt);
+
+ vaf.fmt = fmt;
+ vaf.va = &args;
+
+ if (tgt && tgt->port && tgt->port->lport && tgt->port->lport->host &&
+ tgt->rport)
+ shost_printk(KERN_INFO, tgt->port->lport->host,
+ PFX "port:%x %pV",
+ tgt->rport->port_id, &vaf);
+ else
+ pr_info("NULL %pV", &vaf);
+
+ va_end(args);
+}
+
+void BNX2FC_HBA_DBG(const struct fc_lport *lport, const char *fmt, ...)
+{
+ struct va_format vaf;
+ va_list args;
+
+ if (likely(!(bnx2fc_debug_level & LOG_HBA)))
+ return;
+
+ va_start(args, fmt);
+
+ vaf.fmt = fmt;
+ vaf.va = &args;
+
+ if (lport && lport->host)
+ shost_printk(KERN_INFO, lport->host, PFX "%pV", &vaf);
+ else
+ pr_info("NULL %pV", &vaf);
+
+ va_end(args);
+}
diff --git a/drivers/scsi/bnx2fc/bnx2fc_debug.h b/drivers/scsi/bnx2fc/bnx2fc_debug.h
new file mode 100644
index 000000000..2b9006774
--- /dev/null
+++ b/drivers/scsi/bnx2fc/bnx2fc_debug.h
@@ -0,0 +1,46 @@
+/* bnx2fc_debug.h: QLogic NetXtreme II Linux FCoE offload driver.
+ * Handles operations such as session offload/upload etc, and manages
+ * session resources such as connection id and qp resources.
+ *
+ * Copyright (c) 2008 - 2013 Broadcom Corporation
+ * Copyright (c) 2014, QLogic Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation.
+ *
+ */
+
+#ifndef __BNX2FC_DEBUG__
+#define __BNX2FC_DEBUG__
+
+/* Log level bit mask */
+#define LOG_IO 0x01 /* scsi cmd error, cleanup */
+#define LOG_TGT 0x02 /* Session setup, cleanup, etc' */
+#define LOG_HBA 0x04 /* lport events, link, mtu, etc' */
+#define LOG_ELS 0x08 /* ELS logs */
+#define LOG_MISC 0x10 /* fcoe L2 frame related logs*/
+#define LOG_ALL 0xff /* LOG all messages */
+
+extern unsigned int bnx2fc_debug_level;
+
+#define BNX2FC_ELS_DBG(fmt, ...) \
+do { \
+ if (unlikely(bnx2fc_debug_level & LOG_ELS)) \
+ pr_info(fmt, ##__VA_ARGS__); \
+} while (0)
+
+#define BNX2FC_MISC_DBG(fmt, ...) \
+do { \
+ if (unlikely(bnx2fc_debug_level & LOG_MISC)) \
+ pr_info(fmt, ##__VA_ARGS__); \
+} while (0)
+
+__printf(2, 3)
+void BNX2FC_IO_DBG(const struct bnx2fc_cmd *io_req, const char *fmt, ...);
+__printf(2, 3)
+void BNX2FC_TGT_DBG(const struct bnx2fc_rport *tgt, const char *fmt, ...);
+__printf(2, 3)
+void BNX2FC_HBA_DBG(const struct fc_lport *lport, const char *fmt, ...);
+
+#endif
diff --git a/drivers/scsi/bnx2fc/bnx2fc_els.c b/drivers/scsi/bnx2fc/bnx2fc_els.c
new file mode 100644
index 000000000..ef355c13c
--- /dev/null
+++ b/drivers/scsi/bnx2fc/bnx2fc_els.c
@@ -0,0 +1,918 @@
+/*
+ * bnx2fc_els.c: QLogic NetXtreme II Linux FCoE offload driver.
+ * This file contains helper routines that handle ELS requests
+ * and responses.
+ *
+ * Copyright (c) 2008 - 2013 Broadcom Corporation
+ * Copyright (c) 2014, QLogic Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation.
+ *
+ * Written by: Bhanu Prakash Gollapudi (bprakash@broadcom.com)
+ */
+
+#include "bnx2fc.h"
+
+static void bnx2fc_logo_resp(struct fc_seq *seq, struct fc_frame *fp,
+ void *arg);
+static void bnx2fc_flogi_resp(struct fc_seq *seq, struct fc_frame *fp,
+ void *arg);
+static int bnx2fc_initiate_els(struct bnx2fc_rport *tgt, unsigned int op,
+ void *data, u32 data_len,
+ void (*cb_func)(struct bnx2fc_els_cb_arg *cb_arg),
+ struct bnx2fc_els_cb_arg *cb_arg, u32 timer_msec);
+
+static void bnx2fc_rrq_compl(struct bnx2fc_els_cb_arg *cb_arg)
+{
+ struct bnx2fc_cmd *orig_io_req;
+ struct bnx2fc_cmd *rrq_req;
+ int rc = 0;
+
+ BUG_ON(!cb_arg);
+ rrq_req = cb_arg->io_req;
+ orig_io_req = cb_arg->aborted_io_req;
+ BUG_ON(!orig_io_req);
+ BNX2FC_ELS_DBG("rrq_compl: orig xid = 0x%x, rrq_xid = 0x%x\n",
+ orig_io_req->xid, rrq_req->xid);
+
+ kref_put(&orig_io_req->refcount, bnx2fc_cmd_release);
+
+ if (test_and_clear_bit(BNX2FC_FLAG_ELS_TIMEOUT, &rrq_req->req_flags)) {
+ /*
+ * els req is timed out. cleanup the IO with FW and
+ * drop the completion. Remove from active_cmd_queue.
+ */
+ BNX2FC_ELS_DBG("rrq xid - 0x%x timed out, clean it up\n",
+ rrq_req->xid);
+
+ if (rrq_req->on_active_queue) {
+ list_del_init(&rrq_req->link);
+ rrq_req->on_active_queue = 0;
+ rc = bnx2fc_initiate_cleanup(rrq_req);
+ BUG_ON(rc);
+ }
+ }
+ kfree(cb_arg);
+}
+int bnx2fc_send_rrq(struct bnx2fc_cmd *aborted_io_req)
+{
+
+ struct fc_els_rrq rrq;
+ struct bnx2fc_rport *tgt = aborted_io_req->tgt;
+ struct fc_lport *lport = tgt->rdata->local_port;
+ struct bnx2fc_els_cb_arg *cb_arg = NULL;
+ u32 sid = tgt->sid;
+ u32 r_a_tov = lport->r_a_tov;
+ unsigned long start = jiffies;
+ int rc;
+
+ BNX2FC_ELS_DBG("Sending RRQ orig_xid = 0x%x\n",
+ aborted_io_req->xid);
+ memset(&rrq, 0, sizeof(rrq));
+
+ cb_arg = kzalloc(sizeof(struct bnx2fc_els_cb_arg), GFP_NOIO);
+ if (!cb_arg) {
+ printk(KERN_ERR PFX "Unable to allocate cb_arg for RRQ\n");
+ rc = -ENOMEM;
+ goto rrq_err;
+ }
+
+ cb_arg->aborted_io_req = aborted_io_req;
+
+ rrq.rrq_cmd = ELS_RRQ;
+ hton24(rrq.rrq_s_id, sid);
+ rrq.rrq_ox_id = htons(aborted_io_req->xid);
+ rrq.rrq_rx_id = htons(aborted_io_req->task->rxwr_txrd.var_ctx.rx_id);
+
+retry_rrq:
+ rc = bnx2fc_initiate_els(tgt, ELS_RRQ, &rrq, sizeof(rrq),
+ bnx2fc_rrq_compl, cb_arg,
+ r_a_tov);
+ if (rc == -ENOMEM) {
+ if (time_after(jiffies, start + (10 * HZ))) {
+ BNX2FC_ELS_DBG("rrq Failed\n");
+ rc = FAILED;
+ goto rrq_err;
+ }
+ msleep(20);
+ goto retry_rrq;
+ }
+rrq_err:
+ if (rc) {
+ BNX2FC_ELS_DBG("RRQ failed - release orig io req 0x%x\n",
+ aborted_io_req->xid);
+ kfree(cb_arg);
+ spin_lock_bh(&tgt->tgt_lock);
+ kref_put(&aborted_io_req->refcount, bnx2fc_cmd_release);
+ spin_unlock_bh(&tgt->tgt_lock);
+ }
+ return rc;
+}
+
+static void bnx2fc_l2_els_compl(struct bnx2fc_els_cb_arg *cb_arg)
+{
+ struct bnx2fc_cmd *els_req;
+ struct bnx2fc_rport *tgt;
+ struct bnx2fc_mp_req *mp_req;
+ struct fc_frame_header *fc_hdr;
+ unsigned char *buf;
+ void *resp_buf;
+ u32 resp_len, hdr_len;
+ u16 l2_oxid;
+ int frame_len;
+ int rc = 0;
+
+ l2_oxid = cb_arg->l2_oxid;
+ BNX2FC_ELS_DBG("ELS COMPL - l2_oxid = 0x%x\n", l2_oxid);
+
+ els_req = cb_arg->io_req;
+ if (test_and_clear_bit(BNX2FC_FLAG_ELS_TIMEOUT, &els_req->req_flags)) {
+ /*
+ * els req is timed out. cleanup the IO with FW and
+ * drop the completion. libfc will handle the els timeout
+ */
+ if (els_req->on_active_queue) {
+ list_del_init(&els_req->link);
+ els_req->on_active_queue = 0;
+ rc = bnx2fc_initiate_cleanup(els_req);
+ BUG_ON(rc);
+ }
+ goto free_arg;
+ }
+
+ tgt = els_req->tgt;
+ mp_req = &(els_req->mp_req);
+ fc_hdr = &(mp_req->resp_fc_hdr);
+ resp_len = mp_req->resp_len;
+ resp_buf = mp_req->resp_buf;
+
+ buf = kzalloc(PAGE_SIZE, GFP_ATOMIC);
+ if (!buf) {
+ printk(KERN_ERR PFX "Unable to alloc mp buf\n");
+ goto free_arg;
+ }
+ hdr_len = sizeof(*fc_hdr);
+ if (hdr_len + resp_len > PAGE_SIZE) {
+ printk(KERN_ERR PFX "l2_els_compl: resp len is "
+ "beyond page size\n");
+ goto free_buf;
+ }
+ memcpy(buf, fc_hdr, hdr_len);
+ memcpy(buf + hdr_len, resp_buf, resp_len);
+ frame_len = hdr_len + resp_len;
+
+ bnx2fc_process_l2_frame_compl(tgt, buf, frame_len, l2_oxid);
+
+free_buf:
+ kfree(buf);
+free_arg:
+ kfree(cb_arg);
+}
+
+int bnx2fc_send_adisc(struct bnx2fc_rport *tgt, struct fc_frame *fp)
+{
+ struct fc_els_adisc *adisc;
+ struct fc_frame_header *fh;
+ struct bnx2fc_els_cb_arg *cb_arg;
+ struct fc_lport *lport = tgt->rdata->local_port;
+ u32 r_a_tov = lport->r_a_tov;
+ int rc;
+
+ fh = fc_frame_header_get(fp);
+ cb_arg = kzalloc(sizeof(struct bnx2fc_els_cb_arg), GFP_ATOMIC);
+ if (!cb_arg) {
+ printk(KERN_ERR PFX "Unable to allocate cb_arg for ADISC\n");
+ return -ENOMEM;
+ }
+
+ cb_arg->l2_oxid = ntohs(fh->fh_ox_id);
+
+ BNX2FC_ELS_DBG("send ADISC: l2_oxid = 0x%x\n", cb_arg->l2_oxid);
+ adisc = fc_frame_payload_get(fp, sizeof(*adisc));
+ /* adisc is initialized by libfc */
+ rc = bnx2fc_initiate_els(tgt, ELS_ADISC, adisc, sizeof(*adisc),
+ bnx2fc_l2_els_compl, cb_arg, 2 * r_a_tov);
+ if (rc)
+ kfree(cb_arg);
+ return rc;
+}
+
+int bnx2fc_send_logo(struct bnx2fc_rport *tgt, struct fc_frame *fp)
+{
+ struct fc_els_logo *logo;
+ struct fc_frame_header *fh;
+ struct bnx2fc_els_cb_arg *cb_arg;
+ struct fc_lport *lport = tgt->rdata->local_port;
+ u32 r_a_tov = lport->r_a_tov;
+ int rc;
+
+ fh = fc_frame_header_get(fp);
+ cb_arg = kzalloc(sizeof(struct bnx2fc_els_cb_arg), GFP_ATOMIC);
+ if (!cb_arg) {
+ printk(KERN_ERR PFX "Unable to allocate cb_arg for LOGO\n");
+ return -ENOMEM;
+ }
+
+ cb_arg->l2_oxid = ntohs(fh->fh_ox_id);
+
+ BNX2FC_ELS_DBG("Send LOGO: l2_oxid = 0x%x\n", cb_arg->l2_oxid);
+ logo = fc_frame_payload_get(fp, sizeof(*logo));
+ /* logo is initialized by libfc */
+ rc = bnx2fc_initiate_els(tgt, ELS_LOGO, logo, sizeof(*logo),
+ bnx2fc_l2_els_compl, cb_arg, 2 * r_a_tov);
+ if (rc)
+ kfree(cb_arg);
+ return rc;
+}
+
+int bnx2fc_send_rls(struct bnx2fc_rport *tgt, struct fc_frame *fp)
+{
+ struct fc_els_rls *rls;
+ struct fc_frame_header *fh;
+ struct bnx2fc_els_cb_arg *cb_arg;
+ struct fc_lport *lport = tgt->rdata->local_port;
+ u32 r_a_tov = lport->r_a_tov;
+ int rc;
+
+ fh = fc_frame_header_get(fp);
+ cb_arg = kzalloc(sizeof(struct bnx2fc_els_cb_arg), GFP_ATOMIC);
+ if (!cb_arg) {
+ printk(KERN_ERR PFX "Unable to allocate cb_arg for LOGO\n");
+ return -ENOMEM;
+ }
+
+ cb_arg->l2_oxid = ntohs(fh->fh_ox_id);
+
+ rls = fc_frame_payload_get(fp, sizeof(*rls));
+ /* rls is initialized by libfc */
+ rc = bnx2fc_initiate_els(tgt, ELS_RLS, rls, sizeof(*rls),
+ bnx2fc_l2_els_compl, cb_arg, 2 * r_a_tov);
+ if (rc)
+ kfree(cb_arg);
+ return rc;
+}
+
+void bnx2fc_srr_compl(struct bnx2fc_els_cb_arg *cb_arg)
+{
+ struct bnx2fc_mp_req *mp_req;
+ struct fc_frame_header *fc_hdr, *fh;
+ struct bnx2fc_cmd *srr_req;
+ struct bnx2fc_cmd *orig_io_req;
+ struct fc_frame *fp;
+ unsigned char *buf;
+ void *resp_buf;
+ u32 resp_len, hdr_len;
+ u8 opcode;
+ int rc = 0;
+
+ orig_io_req = cb_arg->aborted_io_req;
+ srr_req = cb_arg->io_req;
+ if (test_and_clear_bit(BNX2FC_FLAG_ELS_TIMEOUT, &srr_req->req_flags)) {
+ /* SRR timedout */
+ BNX2FC_IO_DBG(srr_req, "srr timed out, abort "
+ "orig_io - 0x%x\n",
+ orig_io_req->xid);
+ rc = bnx2fc_initiate_abts(srr_req);
+ if (rc != SUCCESS) {
+ BNX2FC_IO_DBG(srr_req, "srr_compl: initiate_abts "
+ "failed. issue cleanup\n");
+ bnx2fc_initiate_cleanup(srr_req);
+ }
+ if (test_bit(BNX2FC_FLAG_IO_COMPL, &orig_io_req->req_flags) ||
+ test_bit(BNX2FC_FLAG_ISSUE_ABTS, &orig_io_req->req_flags)) {
+ BNX2FC_IO_DBG(srr_req, "srr_compl:xid 0x%x flags = %lx",
+ orig_io_req->xid, orig_io_req->req_flags);
+ goto srr_compl_done;
+ }
+ orig_io_req->srr_retry++;
+ if (orig_io_req->srr_retry <= SRR_RETRY_COUNT) {
+ struct bnx2fc_rport *tgt = orig_io_req->tgt;
+ spin_unlock_bh(&tgt->tgt_lock);
+ rc = bnx2fc_send_srr(orig_io_req,
+ orig_io_req->srr_offset,
+ orig_io_req->srr_rctl);
+ spin_lock_bh(&tgt->tgt_lock);
+ if (!rc)
+ goto srr_compl_done;
+ }
+
+ rc = bnx2fc_initiate_abts(orig_io_req);
+ if (rc != SUCCESS) {
+ BNX2FC_IO_DBG(srr_req, "srr_compl: initiate_abts "
+ "failed xid = 0x%x. issue cleanup\n",
+ orig_io_req->xid);
+ bnx2fc_initiate_cleanup(orig_io_req);
+ }
+ goto srr_compl_done;
+ }
+ if (test_bit(BNX2FC_FLAG_IO_COMPL, &orig_io_req->req_flags) ||
+ test_bit(BNX2FC_FLAG_ISSUE_ABTS, &orig_io_req->req_flags)) {
+ BNX2FC_IO_DBG(srr_req, "srr_compl:xid - 0x%x flags = %lx",
+ orig_io_req->xid, orig_io_req->req_flags);
+ goto srr_compl_done;
+ }
+ mp_req = &(srr_req->mp_req);
+ fc_hdr = &(mp_req->resp_fc_hdr);
+ resp_len = mp_req->resp_len;
+ resp_buf = mp_req->resp_buf;
+
+ hdr_len = sizeof(*fc_hdr);
+ buf = kzalloc(PAGE_SIZE, GFP_ATOMIC);
+ if (!buf) {
+ printk(KERN_ERR PFX "srr buf: mem alloc failure\n");
+ goto srr_compl_done;
+ }
+ memcpy(buf, fc_hdr, hdr_len);
+ memcpy(buf + hdr_len, resp_buf, resp_len);
+
+ fp = fc_frame_alloc(NULL, resp_len);
+ if (!fp) {
+ printk(KERN_ERR PFX "fc_frame_alloc failure\n");
+ goto free_buf;
+ }
+
+ fh = (struct fc_frame_header *) fc_frame_header_get(fp);
+ /* Copy FC Frame header and payload into the frame */
+ memcpy(fh, buf, hdr_len + resp_len);
+
+ opcode = fc_frame_payload_op(fp);
+ switch (opcode) {
+ case ELS_LS_ACC:
+ BNX2FC_IO_DBG(srr_req, "SRR success\n");
+ break;
+ case ELS_LS_RJT:
+ BNX2FC_IO_DBG(srr_req, "SRR rejected\n");
+ rc = bnx2fc_initiate_abts(orig_io_req);
+ if (rc != SUCCESS) {
+ BNX2FC_IO_DBG(srr_req, "srr_compl: initiate_abts "
+ "failed xid = 0x%x. issue cleanup\n",
+ orig_io_req->xid);
+ bnx2fc_initiate_cleanup(orig_io_req);
+ }
+ break;
+ default:
+ BNX2FC_IO_DBG(srr_req, "srr compl - invalid opcode = %d\n",
+ opcode);
+ break;
+ }
+ fc_frame_free(fp);
+free_buf:
+ kfree(buf);
+srr_compl_done:
+ kref_put(&orig_io_req->refcount, bnx2fc_cmd_release);
+}
+
+void bnx2fc_rec_compl(struct bnx2fc_els_cb_arg *cb_arg)
+{
+ struct bnx2fc_cmd *orig_io_req, *new_io_req;
+ struct bnx2fc_cmd *rec_req;
+ struct bnx2fc_mp_req *mp_req;
+ struct fc_frame_header *fc_hdr, *fh;
+ struct fc_els_ls_rjt *rjt;
+ struct fc_els_rec_acc *acc;
+ struct bnx2fc_rport *tgt;
+ struct fcoe_err_report_entry *err_entry;
+ struct scsi_cmnd *sc_cmd;
+ enum fc_rctl r_ctl;
+ unsigned char *buf;
+ void *resp_buf;
+ struct fc_frame *fp;
+ u8 opcode;
+ u32 offset;
+ u32 e_stat;
+ u32 resp_len, hdr_len;
+ int rc = 0;
+ bool send_seq_clnp = false;
+ bool abort_io = false;
+
+ BNX2FC_MISC_DBG("Entered rec_compl callback\n");
+ rec_req = cb_arg->io_req;
+ orig_io_req = cb_arg->aborted_io_req;
+ BNX2FC_IO_DBG(rec_req, "rec_compl: orig xid = 0x%x", orig_io_req->xid);
+ tgt = orig_io_req->tgt;
+
+ /* Handle REC timeout case */
+ if (test_and_clear_bit(BNX2FC_FLAG_ELS_TIMEOUT, &rec_req->req_flags)) {
+ BNX2FC_IO_DBG(rec_req, "timed out, abort "
+ "orig_io - 0x%x\n",
+ orig_io_req->xid);
+ /* els req is timed out. send abts for els */
+ rc = bnx2fc_initiate_abts(rec_req);
+ if (rc != SUCCESS) {
+ BNX2FC_IO_DBG(rec_req, "rec_compl: initiate_abts "
+ "failed. issue cleanup\n");
+ bnx2fc_initiate_cleanup(rec_req);
+ }
+ orig_io_req->rec_retry++;
+ /* REC timedout. send ABTS to the orig IO req */
+ if (orig_io_req->rec_retry <= REC_RETRY_COUNT) {
+ spin_unlock_bh(&tgt->tgt_lock);
+ rc = bnx2fc_send_rec(orig_io_req);
+ spin_lock_bh(&tgt->tgt_lock);
+ if (!rc)
+ goto rec_compl_done;
+ }
+ rc = bnx2fc_initiate_abts(orig_io_req);
+ if (rc != SUCCESS) {
+ BNX2FC_IO_DBG(rec_req, "rec_compl: initiate_abts "
+ "failed xid = 0x%x. issue cleanup\n",
+ orig_io_req->xid);
+ bnx2fc_initiate_cleanup(orig_io_req);
+ }
+ goto rec_compl_done;
+ }
+
+ if (test_bit(BNX2FC_FLAG_IO_COMPL, &orig_io_req->req_flags)) {
+ BNX2FC_IO_DBG(rec_req, "completed"
+ "orig_io - 0x%x\n",
+ orig_io_req->xid);
+ goto rec_compl_done;
+ }
+ if (test_bit(BNX2FC_FLAG_ISSUE_ABTS, &orig_io_req->req_flags)) {
+ BNX2FC_IO_DBG(rec_req, "abts in prog "
+ "orig_io - 0x%x\n",
+ orig_io_req->xid);
+ goto rec_compl_done;
+ }
+
+ mp_req = &(rec_req->mp_req);
+ fc_hdr = &(mp_req->resp_fc_hdr);
+ resp_len = mp_req->resp_len;
+ acc = resp_buf = mp_req->resp_buf;
+
+ hdr_len = sizeof(*fc_hdr);
+
+ buf = kzalloc(PAGE_SIZE, GFP_ATOMIC);
+ if (!buf) {
+ printk(KERN_ERR PFX "rec buf: mem alloc failure\n");
+ goto rec_compl_done;
+ }
+ memcpy(buf, fc_hdr, hdr_len);
+ memcpy(buf + hdr_len, resp_buf, resp_len);
+
+ fp = fc_frame_alloc(NULL, resp_len);
+ if (!fp) {
+ printk(KERN_ERR PFX "fc_frame_alloc failure\n");
+ goto free_buf;
+ }
+
+ fh = (struct fc_frame_header *) fc_frame_header_get(fp);
+ /* Copy FC Frame header and payload into the frame */
+ memcpy(fh, buf, hdr_len + resp_len);
+
+ opcode = fc_frame_payload_op(fp);
+ if (opcode == ELS_LS_RJT) {
+ BNX2FC_IO_DBG(rec_req, "opcode is RJT\n");
+ rjt = fc_frame_payload_get(fp, sizeof(*rjt));
+ if ((rjt->er_reason == ELS_RJT_LOGIC ||
+ rjt->er_reason == ELS_RJT_UNAB) &&
+ rjt->er_explan == ELS_EXPL_OXID_RXID) {
+ BNX2FC_IO_DBG(rec_req, "handle CMD LOST case\n");
+ new_io_req = bnx2fc_cmd_alloc(tgt);
+ if (!new_io_req)
+ goto abort_io;
+ new_io_req->sc_cmd = orig_io_req->sc_cmd;
+ /* cleanup orig_io_req that is with the FW */
+ set_bit(BNX2FC_FLAG_CMD_LOST,
+ &orig_io_req->req_flags);
+ bnx2fc_initiate_cleanup(orig_io_req);
+ /* Post a new IO req with the same sc_cmd */
+ BNX2FC_IO_DBG(rec_req, "Post IO request again\n");
+ rc = bnx2fc_post_io_req(tgt, new_io_req);
+ if (!rc)
+ goto free_frame;
+ BNX2FC_IO_DBG(rec_req, "REC: io post err\n");
+ }
+abort_io:
+ rc = bnx2fc_initiate_abts(orig_io_req);
+ if (rc != SUCCESS) {
+ BNX2FC_IO_DBG(rec_req, "rec_compl: initiate_abts "
+ "failed. issue cleanup\n");
+ bnx2fc_initiate_cleanup(orig_io_req);
+ }
+ } else if (opcode == ELS_LS_ACC) {
+ /* REVISIT: Check if the exchange is already aborted */
+ offset = ntohl(acc->reca_fc4value);
+ e_stat = ntohl(acc->reca_e_stat);
+ if (e_stat & ESB_ST_SEQ_INIT) {
+ BNX2FC_IO_DBG(rec_req, "target has the seq init\n");
+ goto free_frame;
+ }
+ BNX2FC_IO_DBG(rec_req, "e_stat = 0x%x, offset = 0x%x\n",
+ e_stat, offset);
+ /* Seq initiative is with us */
+ err_entry = (struct fcoe_err_report_entry *)
+ &orig_io_req->err_entry;
+ sc_cmd = orig_io_req->sc_cmd;
+ if (sc_cmd->sc_data_direction == DMA_TO_DEVICE) {
+ /* SCSI WRITE command */
+ if (offset == orig_io_req->data_xfer_len) {
+ BNX2FC_IO_DBG(rec_req, "WRITE - resp lost\n");
+ /* FCP_RSP lost */
+ r_ctl = FC_RCTL_DD_CMD_STATUS;
+ offset = 0;
+ } else {
+ /* start transmitting from offset */
+ BNX2FC_IO_DBG(rec_req, "XFER_RDY/DATA lost\n");
+ send_seq_clnp = true;
+ r_ctl = FC_RCTL_DD_DATA_DESC;
+ if (bnx2fc_initiate_seq_cleanup(orig_io_req,
+ offset, r_ctl))
+ abort_io = true;
+ /* XFER_RDY */
+ }
+ } else {
+ /* SCSI READ command */
+ if (err_entry->data.rx_buf_off ==
+ orig_io_req->data_xfer_len) {
+ /* FCP_RSP lost */
+ BNX2FC_IO_DBG(rec_req, "READ - resp lost\n");
+ r_ctl = FC_RCTL_DD_CMD_STATUS;
+ offset = 0;
+ } else {
+ /* request retransmission from this offset */
+ send_seq_clnp = true;
+ offset = err_entry->data.rx_buf_off;
+ BNX2FC_IO_DBG(rec_req, "RD DATA lost\n");
+ /* FCP_DATA lost */
+ r_ctl = FC_RCTL_DD_SOL_DATA;
+ if (bnx2fc_initiate_seq_cleanup(orig_io_req,
+ offset, r_ctl))
+ abort_io = true;
+ }
+ }
+ if (abort_io) {
+ rc = bnx2fc_initiate_abts(orig_io_req);
+ if (rc != SUCCESS) {
+ BNX2FC_IO_DBG(rec_req, "rec_compl:initiate_abts"
+ " failed. issue cleanup\n");
+ bnx2fc_initiate_cleanup(orig_io_req);
+ }
+ } else if (!send_seq_clnp) {
+ BNX2FC_IO_DBG(rec_req, "Send SRR - FCP_RSP\n");
+ spin_unlock_bh(&tgt->tgt_lock);
+ rc = bnx2fc_send_srr(orig_io_req, offset, r_ctl);
+ spin_lock_bh(&tgt->tgt_lock);
+
+ if (rc) {
+ BNX2FC_IO_DBG(rec_req, "Unable to send SRR"
+ " IO will abort\n");
+ }
+ }
+ }
+free_frame:
+ fc_frame_free(fp);
+free_buf:
+ kfree(buf);
+rec_compl_done:
+ kref_put(&orig_io_req->refcount, bnx2fc_cmd_release);
+ kfree(cb_arg);
+}
+
+int bnx2fc_send_rec(struct bnx2fc_cmd *orig_io_req)
+{
+ struct fc_els_rec rec;
+ struct bnx2fc_rport *tgt = orig_io_req->tgt;
+ struct fc_lport *lport = tgt->rdata->local_port;
+ struct bnx2fc_els_cb_arg *cb_arg = NULL;
+ u32 sid = tgt->sid;
+ u32 r_a_tov = lport->r_a_tov;
+ int rc;
+
+ BNX2FC_IO_DBG(orig_io_req, "Sending REC\n");
+ memset(&rec, 0, sizeof(rec));
+
+ cb_arg = kzalloc(sizeof(struct bnx2fc_els_cb_arg), GFP_ATOMIC);
+ if (!cb_arg) {
+ printk(KERN_ERR PFX "Unable to allocate cb_arg for REC\n");
+ rc = -ENOMEM;
+ goto rec_err;
+ }
+ kref_get(&orig_io_req->refcount);
+
+ cb_arg->aborted_io_req = orig_io_req;
+
+ rec.rec_cmd = ELS_REC;
+ hton24(rec.rec_s_id, sid);
+ rec.rec_ox_id = htons(orig_io_req->xid);
+ rec.rec_rx_id = htons(orig_io_req->task->rxwr_txrd.var_ctx.rx_id);
+
+ rc = bnx2fc_initiate_els(tgt, ELS_REC, &rec, sizeof(rec),
+ bnx2fc_rec_compl, cb_arg,
+ r_a_tov);
+rec_err:
+ if (rc) {
+ BNX2FC_IO_DBG(orig_io_req, "REC failed - release\n");
+ spin_lock_bh(&tgt->tgt_lock);
+ kref_put(&orig_io_req->refcount, bnx2fc_cmd_release);
+ spin_unlock_bh(&tgt->tgt_lock);
+ kfree(cb_arg);
+ }
+ return rc;
+}
+
+int bnx2fc_send_srr(struct bnx2fc_cmd *orig_io_req, u32 offset, u8 r_ctl)
+{
+ struct fcp_srr srr;
+ struct bnx2fc_rport *tgt = orig_io_req->tgt;
+ struct fc_lport *lport = tgt->rdata->local_port;
+ struct bnx2fc_els_cb_arg *cb_arg = NULL;
+ u32 r_a_tov = lport->r_a_tov;
+ int rc;
+
+ BNX2FC_IO_DBG(orig_io_req, "Sending SRR\n");
+ memset(&srr, 0, sizeof(srr));
+
+ cb_arg = kzalloc(sizeof(struct bnx2fc_els_cb_arg), GFP_ATOMIC);
+ if (!cb_arg) {
+ printk(KERN_ERR PFX "Unable to allocate cb_arg for SRR\n");
+ rc = -ENOMEM;
+ goto srr_err;
+ }
+ kref_get(&orig_io_req->refcount);
+
+ cb_arg->aborted_io_req = orig_io_req;
+
+ srr.srr_op = ELS_SRR;
+ srr.srr_ox_id = htons(orig_io_req->xid);
+ srr.srr_rx_id = htons(orig_io_req->task->rxwr_txrd.var_ctx.rx_id);
+ srr.srr_rel_off = htonl(offset);
+ srr.srr_r_ctl = r_ctl;
+ orig_io_req->srr_offset = offset;
+ orig_io_req->srr_rctl = r_ctl;
+
+ rc = bnx2fc_initiate_els(tgt, ELS_SRR, &srr, sizeof(srr),
+ bnx2fc_srr_compl, cb_arg,
+ r_a_tov);
+srr_err:
+ if (rc) {
+ BNX2FC_IO_DBG(orig_io_req, "SRR failed - release\n");
+ spin_lock_bh(&tgt->tgt_lock);
+ kref_put(&orig_io_req->refcount, bnx2fc_cmd_release);
+ spin_unlock_bh(&tgt->tgt_lock);
+ kfree(cb_arg);
+ } else
+ set_bit(BNX2FC_FLAG_SRR_SENT, &orig_io_req->req_flags);
+
+ return rc;
+}
+
+static int bnx2fc_initiate_els(struct bnx2fc_rport *tgt, unsigned int op,
+ void *data, u32 data_len,
+ void (*cb_func)(struct bnx2fc_els_cb_arg *cb_arg),
+ struct bnx2fc_els_cb_arg *cb_arg, u32 timer_msec)
+{
+ struct fcoe_port *port = tgt->port;
+ struct bnx2fc_interface *interface = port->priv;
+ struct fc_rport *rport = tgt->rport;
+ struct fc_lport *lport = port->lport;
+ struct bnx2fc_cmd *els_req;
+ struct bnx2fc_mp_req *mp_req;
+ struct fc_frame_header *fc_hdr;
+ struct fcoe_task_ctx_entry *task;
+ struct fcoe_task_ctx_entry *task_page;
+ int rc = 0;
+ int task_idx, index;
+ u32 did, sid;
+ u16 xid;
+
+ rc = fc_remote_port_chkready(rport);
+ if (rc) {
+ printk(KERN_ERR PFX "els 0x%x: rport not ready\n", op);
+ rc = -EINVAL;
+ goto els_err;
+ }
+ if (lport->state != LPORT_ST_READY || !(lport->link_up)) {
+ printk(KERN_ERR PFX "els 0x%x: link is not ready\n", op);
+ rc = -EINVAL;
+ goto els_err;
+ }
+ if (!(test_bit(BNX2FC_FLAG_SESSION_READY, &tgt->flags)) ||
+ (test_bit(BNX2FC_FLAG_EXPL_LOGO, &tgt->flags))) {
+ printk(KERN_ERR PFX "els 0x%x: tgt not ready\n", op);
+ rc = -EINVAL;
+ goto els_err;
+ }
+ els_req = bnx2fc_elstm_alloc(tgt, BNX2FC_ELS);
+ if (!els_req) {
+ rc = -ENOMEM;
+ goto els_err;
+ }
+
+ els_req->sc_cmd = NULL;
+ els_req->port = port;
+ els_req->tgt = tgt;
+ els_req->cb_func = cb_func;
+ cb_arg->io_req = els_req;
+ els_req->cb_arg = cb_arg;
+
+ mp_req = (struct bnx2fc_mp_req *)&(els_req->mp_req);
+ rc = bnx2fc_init_mp_req(els_req);
+ if (rc == FAILED) {
+ printk(KERN_ERR PFX "ELS MP request init failed\n");
+ spin_lock_bh(&tgt->tgt_lock);
+ kref_put(&els_req->refcount, bnx2fc_cmd_release);
+ spin_unlock_bh(&tgt->tgt_lock);
+ rc = -ENOMEM;
+ goto els_err;
+ } else {
+ /* rc SUCCESS */
+ rc = 0;
+ }
+
+ /* Set the data_xfer_len to the size of ELS payload */
+ mp_req->req_len = data_len;
+ els_req->data_xfer_len = mp_req->req_len;
+
+ /* Fill ELS Payload */
+ if ((op >= ELS_LS_RJT) && (op <= ELS_AUTH_ELS)) {
+ memcpy(mp_req->req_buf, data, data_len);
+ } else {
+ printk(KERN_ERR PFX "Invalid ELS op 0x%x\n", op);
+ els_req->cb_func = NULL;
+ els_req->cb_arg = NULL;
+ spin_lock_bh(&tgt->tgt_lock);
+ kref_put(&els_req->refcount, bnx2fc_cmd_release);
+ spin_unlock_bh(&tgt->tgt_lock);
+ rc = -EINVAL;
+ }
+
+ if (rc)
+ goto els_err;
+
+ /* Fill FC header */
+ fc_hdr = &(mp_req->req_fc_hdr);
+
+ did = tgt->rport->port_id;
+ sid = tgt->sid;
+
+ if (op == ELS_SRR)
+ __fc_fill_fc_hdr(fc_hdr, FC_RCTL_ELS4_REQ, did, sid,
+ FC_TYPE_FCP, FC_FC_FIRST_SEQ |
+ FC_FC_END_SEQ | FC_FC_SEQ_INIT, 0);
+ else
+ __fc_fill_fc_hdr(fc_hdr, FC_RCTL_ELS_REQ, did, sid,
+ FC_TYPE_ELS, FC_FC_FIRST_SEQ |
+ FC_FC_END_SEQ | FC_FC_SEQ_INIT, 0);
+
+ /* Obtain exchange id */
+ xid = els_req->xid;
+ task_idx = xid/BNX2FC_TASKS_PER_PAGE;
+ index = xid % BNX2FC_TASKS_PER_PAGE;
+
+ /* Initialize task context for this IO request */
+ task_page = (struct fcoe_task_ctx_entry *)
+ interface->hba->task_ctx[task_idx];
+ task = &(task_page[index]);
+ bnx2fc_init_mp_task(els_req, task);
+
+ spin_lock_bh(&tgt->tgt_lock);
+
+ if (!test_bit(BNX2FC_FLAG_SESSION_READY, &tgt->flags)) {
+ printk(KERN_ERR PFX "initiate_els.. session not ready\n");
+ els_req->cb_func = NULL;
+ els_req->cb_arg = NULL;
+ kref_put(&els_req->refcount, bnx2fc_cmd_release);
+ spin_unlock_bh(&tgt->tgt_lock);
+ return -EINVAL;
+ }
+
+ if (timer_msec)
+ bnx2fc_cmd_timer_set(els_req, timer_msec);
+ bnx2fc_add_2_sq(tgt, xid);
+
+ els_req->on_active_queue = 1;
+ list_add_tail(&els_req->link, &tgt->els_queue);
+
+ /* Ring doorbell */
+ bnx2fc_ring_doorbell(tgt);
+ spin_unlock_bh(&tgt->tgt_lock);
+
+els_err:
+ return rc;
+}
+
+void bnx2fc_process_els_compl(struct bnx2fc_cmd *els_req,
+ struct fcoe_task_ctx_entry *task, u8 num_rq)
+{
+ struct bnx2fc_mp_req *mp_req;
+ struct fc_frame_header *fc_hdr;
+ u64 *hdr;
+ u64 *temp_hdr;
+
+ BNX2FC_ELS_DBG("Entered process_els_compl xid = 0x%x"
+ "cmd_type = %d\n", els_req->xid, els_req->cmd_type);
+
+ if (test_and_set_bit(BNX2FC_FLAG_ELS_DONE,
+ &els_req->req_flags)) {
+ BNX2FC_ELS_DBG("Timer context finished processing this "
+ "els - 0x%x\n", els_req->xid);
+ /* This IO doesn't receive cleanup completion */
+ kref_put(&els_req->refcount, bnx2fc_cmd_release);
+ return;
+ }
+
+ /* Cancel the timeout_work, as we received the response */
+ if (cancel_delayed_work(&els_req->timeout_work))
+ kref_put(&els_req->refcount,
+ bnx2fc_cmd_release); /* drop timer hold */
+
+ if (els_req->on_active_queue) {
+ list_del_init(&els_req->link);
+ els_req->on_active_queue = 0;
+ }
+
+ mp_req = &(els_req->mp_req);
+ fc_hdr = &(mp_req->resp_fc_hdr);
+
+ hdr = (u64 *)fc_hdr;
+ temp_hdr = (u64 *)
+ &task->rxwr_only.union_ctx.comp_info.mp_rsp.fc_hdr;
+ hdr[0] = cpu_to_be64(temp_hdr[0]);
+ hdr[1] = cpu_to_be64(temp_hdr[1]);
+ hdr[2] = cpu_to_be64(temp_hdr[2]);
+
+ mp_req->resp_len =
+ task->rxwr_only.union_ctx.comp_info.mp_rsp.mp_payload_len;
+
+ /* Parse ELS response */
+ if ((els_req->cb_func) && (els_req->cb_arg)) {
+ els_req->cb_func(els_req->cb_arg);
+ els_req->cb_arg = NULL;
+ }
+
+ kref_put(&els_req->refcount, bnx2fc_cmd_release);
+}
+
+static void bnx2fc_flogi_resp(struct fc_seq *seq, struct fc_frame *fp,
+ void *arg)
+{
+ struct fcoe_ctlr *fip = arg;
+ struct fc_exch *exch = fc_seq_exch(seq);
+ struct fc_lport *lport = exch->lp;
+ u8 *mac;
+ u8 op;
+
+ if (IS_ERR(fp))
+ goto done;
+
+ mac = fr_cb(fp)->granted_mac;
+ if (is_zero_ether_addr(mac)) {
+ op = fc_frame_payload_op(fp);
+ if (lport->vport) {
+ if (op == ELS_LS_RJT) {
+ printk(KERN_ERR PFX "bnx2fc_flogi_resp is LS_RJT\n");
+ fc_vport_terminate(lport->vport);
+ fc_frame_free(fp);
+ return;
+ }
+ }
+ fcoe_ctlr_recv_flogi(fip, lport, fp);
+ }
+ if (!is_zero_ether_addr(mac))
+ fip->update_mac(lport, mac);
+done:
+ fc_lport_flogi_resp(seq, fp, lport);
+}
+
+static void bnx2fc_logo_resp(struct fc_seq *seq, struct fc_frame *fp,
+ void *arg)
+{
+ struct fcoe_ctlr *fip = arg;
+ struct fc_exch *exch = fc_seq_exch(seq);
+ struct fc_lport *lport = exch->lp;
+ static u8 zero_mac[ETH_ALEN] = { 0 };
+
+ if (!IS_ERR(fp))
+ fip->update_mac(lport, zero_mac);
+ fc_lport_logo_resp(seq, fp, lport);
+}
+
+struct fc_seq *bnx2fc_elsct_send(struct fc_lport *lport, u32 did,
+ struct fc_frame *fp, unsigned int op,
+ void (*resp)(struct fc_seq *,
+ struct fc_frame *,
+ void *),
+ void *arg, u32 timeout)
+{
+ struct fcoe_port *port = lport_priv(lport);
+ struct bnx2fc_interface *interface = port->priv;
+ struct fcoe_ctlr *fip = bnx2fc_to_ctlr(interface);
+ struct fc_frame_header *fh = fc_frame_header_get(fp);
+
+ switch (op) {
+ case ELS_FLOGI:
+ case ELS_FDISC:
+ return fc_elsct_send(lport, did, fp, op, bnx2fc_flogi_resp,
+ fip, timeout);
+ case ELS_LOGO:
+ /* only hook onto fabric logouts, not port logouts */
+ if (ntoh24(fh->fh_d_id) != FC_FID_FLOGI)
+ break;
+ return fc_elsct_send(lport, did, fp, op, bnx2fc_logo_resp,
+ fip, timeout);
+ }
+ return fc_elsct_send(lport, did, fp, op, resp, arg, timeout);
+}
diff --git a/drivers/scsi/bnx2fc/bnx2fc_fcoe.c b/drivers/scsi/bnx2fc/bnx2fc_fcoe.c
new file mode 100644
index 000000000..98d06d151
--- /dev/null
+++ b/drivers/scsi/bnx2fc/bnx2fc_fcoe.c
@@ -0,0 +1,2826 @@
+/* bnx2fc_fcoe.c: QLogic NetXtreme II Linux FCoE offload driver.
+ * This file contains the code that interacts with libfc, libfcoe,
+ * cnic modules to create FCoE instances, send/receive non-offloaded
+ * FIP/FCoE packets, listen to link events etc.
+ *
+ * Copyright (c) 2008 - 2013 Broadcom Corporation
+ * Copyright (c) 2014, QLogic Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation.
+ *
+ * Written by: Bhanu Prakash Gollapudi (bprakash@broadcom.com)
+ */
+
+#include "bnx2fc.h"
+
+static struct list_head adapter_list;
+static struct list_head if_list;
+static u32 adapter_count;
+static DEFINE_MUTEX(bnx2fc_dev_lock);
+DEFINE_PER_CPU(struct bnx2fc_percpu_s, bnx2fc_percpu);
+
+#define DRV_MODULE_NAME "bnx2fc"
+#define DRV_MODULE_VERSION BNX2FC_VERSION
+#define DRV_MODULE_RELDATE "Dec 11, 2013"
+
+
+static char version[] =
+ "QLogic NetXtreme II FCoE Driver " DRV_MODULE_NAME \
+ " v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
+
+
+MODULE_AUTHOR("Bhanu Prakash Gollapudi <bprakash@broadcom.com>");
+MODULE_DESCRIPTION("QLogic NetXtreme II BCM57710 FCoE Driver");
+MODULE_LICENSE("GPL");
+MODULE_VERSION(DRV_MODULE_VERSION);
+
+#define BNX2FC_MAX_QUEUE_DEPTH 256
+#define BNX2FC_MIN_QUEUE_DEPTH 32
+#define FCOE_WORD_TO_BYTE 4
+
+static struct scsi_transport_template *bnx2fc_transport_template;
+static struct scsi_transport_template *bnx2fc_vport_xport_template;
+
+struct workqueue_struct *bnx2fc_wq;
+
+/* bnx2fc structure needs only one instance of the fcoe_percpu_s structure.
+ * Here the io threads are per cpu but the l2 thread is just one
+ */
+struct fcoe_percpu_s bnx2fc_global;
+DEFINE_SPINLOCK(bnx2fc_global_lock);
+
+static struct cnic_ulp_ops bnx2fc_cnic_cb;
+static struct libfc_function_template bnx2fc_libfc_fcn_templ;
+static struct scsi_host_template bnx2fc_shost_template;
+static struct fc_function_template bnx2fc_transport_function;
+static struct fcoe_sysfs_function_template bnx2fc_fcoe_sysfs_templ;
+static struct fc_function_template bnx2fc_vport_xport_function;
+static int bnx2fc_create(struct net_device *netdev, enum fip_state fip_mode);
+static void __bnx2fc_destroy(struct bnx2fc_interface *interface);
+static int bnx2fc_destroy(struct net_device *net_device);
+static int bnx2fc_enable(struct net_device *netdev);
+static int bnx2fc_disable(struct net_device *netdev);
+
+/* fcoe_syfs control interface handlers */
+static int bnx2fc_ctlr_alloc(struct net_device *netdev);
+static int bnx2fc_ctlr_enabled(struct fcoe_ctlr_device *cdev);
+
+static void bnx2fc_recv_frame(struct sk_buff *skb);
+
+static void bnx2fc_start_disc(struct bnx2fc_interface *interface);
+static int bnx2fc_shost_config(struct fc_lport *lport, struct device *dev);
+static int bnx2fc_lport_config(struct fc_lport *lport);
+static int bnx2fc_em_config(struct fc_lport *lport, struct bnx2fc_hba *hba);
+static int bnx2fc_bind_adapter_devices(struct bnx2fc_hba *hba);
+static void bnx2fc_unbind_adapter_devices(struct bnx2fc_hba *hba);
+static int bnx2fc_bind_pcidev(struct bnx2fc_hba *hba);
+static void bnx2fc_unbind_pcidev(struct bnx2fc_hba *hba);
+static struct fc_lport *bnx2fc_if_create(struct bnx2fc_interface *interface,
+ struct device *parent, int npiv);
+static void bnx2fc_destroy_work(struct work_struct *work);
+
+static struct bnx2fc_hba *bnx2fc_hba_lookup(struct net_device *phys_dev);
+static struct bnx2fc_interface *bnx2fc_interface_lookup(struct net_device
+ *phys_dev);
+static inline void bnx2fc_interface_put(struct bnx2fc_interface *interface);
+static struct bnx2fc_hba *bnx2fc_find_hba_for_cnic(struct cnic_dev *cnic);
+
+static int bnx2fc_fw_init(struct bnx2fc_hba *hba);
+static void bnx2fc_fw_destroy(struct bnx2fc_hba *hba);
+
+static void bnx2fc_port_shutdown(struct fc_lport *lport);
+static void bnx2fc_stop(struct bnx2fc_interface *interface);
+static int __init bnx2fc_mod_init(void);
+static void __exit bnx2fc_mod_exit(void);
+
+unsigned int bnx2fc_debug_level;
+module_param_named(debug_logging, bnx2fc_debug_level, int, S_IRUGO|S_IWUSR);
+
+static int bnx2fc_cpu_callback(struct notifier_block *nfb,
+ unsigned long action, void *hcpu);
+/* notification function for CPU hotplug events */
+static struct notifier_block bnx2fc_cpu_notifier = {
+ .notifier_call = bnx2fc_cpu_callback,
+};
+
+static inline struct net_device *bnx2fc_netdev(const struct fc_lport *lport)
+{
+ return ((struct bnx2fc_interface *)
+ ((struct fcoe_port *)lport_priv(lport))->priv)->netdev;
+}
+
+static void bnx2fc_fcf_get_vlan_id(struct fcoe_fcf_device *fcf_dev)
+{
+ struct fcoe_ctlr_device *ctlr_dev =
+ fcoe_fcf_dev_to_ctlr_dev(fcf_dev);
+ struct fcoe_ctlr *ctlr = fcoe_ctlr_device_priv(ctlr_dev);
+ struct bnx2fc_interface *fcoe = fcoe_ctlr_priv(ctlr);
+
+ fcf_dev->vlan_id = fcoe->vlan_id;
+}
+
+static void bnx2fc_clean_rx_queue(struct fc_lport *lp)
+{
+ struct fcoe_percpu_s *bg;
+ struct fcoe_rcv_info *fr;
+ struct sk_buff_head *list;
+ struct sk_buff *skb, *next;
+ struct sk_buff *head;
+
+ bg = &bnx2fc_global;
+ spin_lock_bh(&bg->fcoe_rx_list.lock);
+ list = &bg->fcoe_rx_list;
+ head = list->next;
+ for (skb = head; skb != (struct sk_buff *)list;
+ skb = next) {
+ next = skb->next;
+ fr = fcoe_dev_from_skb(skb);
+ if (fr->fr_dev == lp) {
+ __skb_unlink(skb, list);
+ kfree_skb(skb);
+ }
+ }
+ spin_unlock_bh(&bg->fcoe_rx_list.lock);
+}
+
+int bnx2fc_get_paged_crc_eof(struct sk_buff *skb, int tlen)
+{
+ int rc;
+ spin_lock(&bnx2fc_global_lock);
+ rc = fcoe_get_paged_crc_eof(skb, tlen, &bnx2fc_global);
+ spin_unlock(&bnx2fc_global_lock);
+
+ return rc;
+}
+
+static void bnx2fc_abort_io(struct fc_lport *lport)
+{
+ /*
+ * This function is no-op for bnx2fc, but we do
+ * not want to leave it as NULL either, as libfc
+ * can call the default function which is
+ * fc_fcp_abort_io.
+ */
+}
+
+static void bnx2fc_cleanup(struct fc_lport *lport)
+{
+ struct fcoe_port *port = lport_priv(lport);
+ struct bnx2fc_interface *interface = port->priv;
+ struct bnx2fc_hba *hba = interface->hba;
+ struct bnx2fc_rport *tgt;
+ int i;
+
+ BNX2FC_MISC_DBG("Entered %s\n", __func__);
+ mutex_lock(&hba->hba_mutex);
+ spin_lock_bh(&hba->hba_lock);
+ for (i = 0; i < BNX2FC_NUM_MAX_SESS; i++) {
+ tgt = hba->tgt_ofld_list[i];
+ if (tgt) {
+ /* Cleanup IOs belonging to requested vport */
+ if (tgt->port == port) {
+ spin_unlock_bh(&hba->hba_lock);
+ BNX2FC_TGT_DBG(tgt, "flush/cleanup\n");
+ bnx2fc_flush_active_ios(tgt);
+ spin_lock_bh(&hba->hba_lock);
+ }
+ }
+ }
+ spin_unlock_bh(&hba->hba_lock);
+ mutex_unlock(&hba->hba_mutex);
+}
+
+static int bnx2fc_xmit_l2_frame(struct bnx2fc_rport *tgt,
+ struct fc_frame *fp)
+{
+ struct fc_rport_priv *rdata = tgt->rdata;
+ struct fc_frame_header *fh;
+ int rc = 0;
+
+ fh = fc_frame_header_get(fp);
+ BNX2FC_TGT_DBG(tgt, "Xmit L2 frame rport = 0x%x, oxid = 0x%x, "
+ "r_ctl = 0x%x\n", rdata->ids.port_id,
+ ntohs(fh->fh_ox_id), fh->fh_r_ctl);
+ if ((fh->fh_type == FC_TYPE_ELS) &&
+ (fh->fh_r_ctl == FC_RCTL_ELS_REQ)) {
+
+ switch (fc_frame_payload_op(fp)) {
+ case ELS_ADISC:
+ rc = bnx2fc_send_adisc(tgt, fp);
+ break;
+ case ELS_LOGO:
+ rc = bnx2fc_send_logo(tgt, fp);
+ break;
+ case ELS_RLS:
+ rc = bnx2fc_send_rls(tgt, fp);
+ break;
+ default:
+ break;
+ }
+ } else if ((fh->fh_type == FC_TYPE_BLS) &&
+ (fh->fh_r_ctl == FC_RCTL_BA_ABTS))
+ BNX2FC_TGT_DBG(tgt, "ABTS frame\n");
+ else {
+ BNX2FC_TGT_DBG(tgt, "Send L2 frame type 0x%x "
+ "rctl 0x%x thru non-offload path\n",
+ fh->fh_type, fh->fh_r_ctl);
+ return -ENODEV;
+ }
+ if (rc)
+ return -ENOMEM;
+ else
+ return 0;
+}
+
+/**
+ * bnx2fc_xmit - bnx2fc's FCoE frame transmit function
+ *
+ * @lport: the associated local port
+ * @fp: the fc_frame to be transmitted
+ */
+static int bnx2fc_xmit(struct fc_lport *lport, struct fc_frame *fp)
+{
+ struct ethhdr *eh;
+ struct fcoe_crc_eof *cp;
+ struct sk_buff *skb;
+ struct fc_frame_header *fh;
+ struct bnx2fc_interface *interface;
+ struct fcoe_ctlr *ctlr;
+ struct bnx2fc_hba *hba;
+ struct fcoe_port *port;
+ struct fcoe_hdr *hp;
+ struct bnx2fc_rport *tgt;
+ struct fc_stats *stats;
+ u8 sof, eof;
+ u32 crc;
+ unsigned int hlen, tlen, elen;
+ int wlen, rc = 0;
+
+ port = (struct fcoe_port *)lport_priv(lport);
+ interface = port->priv;
+ ctlr = bnx2fc_to_ctlr(interface);
+ hba = interface->hba;
+
+ fh = fc_frame_header_get(fp);
+
+ skb = fp_skb(fp);
+ if (!lport->link_up) {
+ BNX2FC_HBA_DBG(lport, "bnx2fc_xmit link down\n");
+ kfree_skb(skb);
+ return 0;
+ }
+
+ if (unlikely(fh->fh_r_ctl == FC_RCTL_ELS_REQ)) {
+ if (!ctlr->sel_fcf) {
+ BNX2FC_HBA_DBG(lport, "FCF not selected yet!\n");
+ kfree_skb(skb);
+ return -EINVAL;
+ }
+ if (fcoe_ctlr_els_send(ctlr, lport, skb))
+ return 0;
+ }
+
+ sof = fr_sof(fp);
+ eof = fr_eof(fp);
+
+ /*
+ * Snoop the frame header to check if the frame is for
+ * an offloaded session
+ */
+ /*
+ * tgt_ofld_list access is synchronized using
+ * both hba mutex and hba lock. Atleast hba mutex or
+ * hba lock needs to be held for read access.
+ */
+
+ spin_lock_bh(&hba->hba_lock);
+ tgt = bnx2fc_tgt_lookup(port, ntoh24(fh->fh_d_id));
+ if (tgt && (test_bit(BNX2FC_FLAG_SESSION_READY, &tgt->flags))) {
+ /* This frame is for offloaded session */
+ BNX2FC_HBA_DBG(lport, "xmit: Frame is for offloaded session "
+ "port_id = 0x%x\n", ntoh24(fh->fh_d_id));
+ spin_unlock_bh(&hba->hba_lock);
+ rc = bnx2fc_xmit_l2_frame(tgt, fp);
+ if (rc != -ENODEV) {
+ kfree_skb(skb);
+ return rc;
+ }
+ } else {
+ spin_unlock_bh(&hba->hba_lock);
+ }
+
+ elen = sizeof(struct ethhdr);
+ hlen = sizeof(struct fcoe_hdr);
+ tlen = sizeof(struct fcoe_crc_eof);
+ wlen = (skb->len - tlen + sizeof(crc)) / FCOE_WORD_TO_BYTE;
+
+ skb->ip_summed = CHECKSUM_NONE;
+ crc = fcoe_fc_crc(fp);
+
+ /* copy port crc and eof to the skb buff */
+ if (skb_is_nonlinear(skb)) {
+ skb_frag_t *frag;
+ if (bnx2fc_get_paged_crc_eof(skb, tlen)) {
+ kfree_skb(skb);
+ return -ENOMEM;
+ }
+ frag = &skb_shinfo(skb)->frags[skb_shinfo(skb)->nr_frags - 1];
+ cp = kmap_atomic(skb_frag_page(frag)) + frag->page_offset;
+ } else {
+ cp = (struct fcoe_crc_eof *)skb_put(skb, tlen);
+ }
+
+ memset(cp, 0, sizeof(*cp));
+ cp->fcoe_eof = eof;
+ cp->fcoe_crc32 = cpu_to_le32(~crc);
+ if (skb_is_nonlinear(skb)) {
+ kunmap_atomic(cp);
+ cp = NULL;
+ }
+
+ /* adjust skb network/transport offsets to match mac/fcoe/port */
+ skb_push(skb, elen + hlen);
+ skb_reset_mac_header(skb);
+ skb_reset_network_header(skb);
+ skb->mac_len = elen;
+ skb->protocol = htons(ETH_P_FCOE);
+ skb->dev = interface->netdev;
+
+ /* fill up mac and fcoe headers */
+ eh = eth_hdr(skb);
+ eh->h_proto = htons(ETH_P_FCOE);
+ if (ctlr->map_dest)
+ fc_fcoe_set_mac(eh->h_dest, fh->fh_d_id);
+ else
+ /* insert GW address */
+ memcpy(eh->h_dest, ctlr->dest_addr, ETH_ALEN);
+
+ if (unlikely(ctlr->flogi_oxid != FC_XID_UNKNOWN))
+ memcpy(eh->h_source, ctlr->ctl_src_addr, ETH_ALEN);
+ else
+ memcpy(eh->h_source, port->data_src_addr, ETH_ALEN);
+
+ hp = (struct fcoe_hdr *)(eh + 1);
+ memset(hp, 0, sizeof(*hp));
+ if (FC_FCOE_VER)
+ FC_FCOE_ENCAPS_VER(hp, FC_FCOE_VER);
+ hp->fcoe_sof = sof;
+
+ /* fcoe lso, mss is in max_payload which is non-zero for FCP data */
+ if (lport->seq_offload && fr_max_payload(fp)) {
+ skb_shinfo(skb)->gso_type = SKB_GSO_FCOE;
+ skb_shinfo(skb)->gso_size = fr_max_payload(fp);
+ } else {
+ skb_shinfo(skb)->gso_type = 0;
+ skb_shinfo(skb)->gso_size = 0;
+ }
+
+ /*update tx stats */
+ stats = per_cpu_ptr(lport->stats, get_cpu());
+ stats->TxFrames++;
+ stats->TxWords += wlen;
+ put_cpu();
+
+ /* send down to lld */
+ fr_dev(fp) = lport;
+ if (port->fcoe_pending_queue.qlen)
+ fcoe_check_wait_queue(lport, skb);
+ else if (fcoe_start_io(skb))
+ fcoe_check_wait_queue(lport, skb);
+
+ return 0;
+}
+
+/**
+ * bnx2fc_rcv - This is bnx2fc's receive function called by NET_RX_SOFTIRQ
+ *
+ * @skb: the receive socket buffer
+ * @dev: associated net device
+ * @ptype: context
+ * @olddev: last device
+ *
+ * This function receives the packet and builds FC frame and passes it up
+ */
+static int bnx2fc_rcv(struct sk_buff *skb, struct net_device *dev,
+ struct packet_type *ptype, struct net_device *olddev)
+{
+ struct fc_lport *lport;
+ struct bnx2fc_interface *interface;
+ struct fcoe_ctlr *ctlr;
+ struct fc_frame_header *fh;
+ struct fcoe_rcv_info *fr;
+ struct fcoe_percpu_s *bg;
+ struct sk_buff *tmp_skb;
+ unsigned short oxid;
+
+ interface = container_of(ptype, struct bnx2fc_interface,
+ fcoe_packet_type);
+ ctlr = bnx2fc_to_ctlr(interface);
+ lport = ctlr->lp;
+
+ if (unlikely(lport == NULL)) {
+ printk(KERN_ERR PFX "bnx2fc_rcv: lport is NULL\n");
+ goto err;
+ }
+
+ tmp_skb = skb_share_check(skb, GFP_ATOMIC);
+ if (!tmp_skb)
+ goto err;
+
+ skb = tmp_skb;
+
+ if (unlikely(eth_hdr(skb)->h_proto != htons(ETH_P_FCOE))) {
+ printk(KERN_ERR PFX "bnx2fc_rcv: Wrong FC type frame\n");
+ goto err;
+ }
+
+ /*
+ * Check for minimum frame length, and make sure required FCoE
+ * and FC headers are pulled into the linear data area.
+ */
+ if (unlikely((skb->len < FCOE_MIN_FRAME) ||
+ !pskb_may_pull(skb, FCOE_HEADER_LEN)))
+ goto err;
+
+ skb_set_transport_header(skb, sizeof(struct fcoe_hdr));
+ fh = (struct fc_frame_header *) skb_transport_header(skb);
+
+ oxid = ntohs(fh->fh_ox_id);
+
+ fr = fcoe_dev_from_skb(skb);
+ fr->fr_dev = lport;
+
+ bg = &bnx2fc_global;
+ spin_lock(&bg->fcoe_rx_list.lock);
+
+ __skb_queue_tail(&bg->fcoe_rx_list, skb);
+ if (bg->fcoe_rx_list.qlen == 1)
+ wake_up_process(bg->thread);
+
+ spin_unlock(&bg->fcoe_rx_list.lock);
+
+ return 0;
+err:
+ kfree_skb(skb);
+ return -1;
+}
+
+static int bnx2fc_l2_rcv_thread(void *arg)
+{
+ struct fcoe_percpu_s *bg = arg;
+ struct sk_buff *skb;
+
+ set_user_nice(current, MIN_NICE);
+ set_current_state(TASK_INTERRUPTIBLE);
+ while (!kthread_should_stop()) {
+ schedule();
+ spin_lock_bh(&bg->fcoe_rx_list.lock);
+ while ((skb = __skb_dequeue(&bg->fcoe_rx_list)) != NULL) {
+ spin_unlock_bh(&bg->fcoe_rx_list.lock);
+ bnx2fc_recv_frame(skb);
+ spin_lock_bh(&bg->fcoe_rx_list.lock);
+ }
+ __set_current_state(TASK_INTERRUPTIBLE);
+ spin_unlock_bh(&bg->fcoe_rx_list.lock);
+ }
+ __set_current_state(TASK_RUNNING);
+ return 0;
+}
+
+
+static void bnx2fc_recv_frame(struct sk_buff *skb)
+{
+ u32 fr_len;
+ struct fc_lport *lport;
+ struct fcoe_rcv_info *fr;
+ struct fc_stats *stats;
+ struct fc_frame_header *fh;
+ struct fcoe_crc_eof crc_eof;
+ struct fc_frame *fp;
+ struct fc_lport *vn_port;
+ struct fcoe_port *port;
+ u8 *mac = NULL;
+ u8 *dest_mac = NULL;
+ struct fcoe_hdr *hp;
+
+ fr = fcoe_dev_from_skb(skb);
+ lport = fr->fr_dev;
+ if (unlikely(lport == NULL)) {
+ printk(KERN_ERR PFX "Invalid lport struct\n");
+ kfree_skb(skb);
+ return;
+ }
+
+ if (skb_is_nonlinear(skb))
+ skb_linearize(skb);
+ mac = eth_hdr(skb)->h_source;
+ dest_mac = eth_hdr(skb)->h_dest;
+
+ /* Pull the header */
+ hp = (struct fcoe_hdr *) skb_network_header(skb);
+ fh = (struct fc_frame_header *) skb_transport_header(skb);
+ skb_pull(skb, sizeof(struct fcoe_hdr));
+ fr_len = skb->len - sizeof(struct fcoe_crc_eof);
+
+ fp = (struct fc_frame *)skb;
+ fc_frame_init(fp);
+ fr_dev(fp) = lport;
+ fr_sof(fp) = hp->fcoe_sof;
+ if (skb_copy_bits(skb, fr_len, &crc_eof, sizeof(crc_eof))) {
+ kfree_skb(skb);
+ return;
+ }
+ fr_eof(fp) = crc_eof.fcoe_eof;
+ fr_crc(fp) = crc_eof.fcoe_crc32;
+ if (pskb_trim(skb, fr_len)) {
+ kfree_skb(skb);
+ return;
+ }
+
+ fh = fc_frame_header_get(fp);
+
+ vn_port = fc_vport_id_lookup(lport, ntoh24(fh->fh_d_id));
+ if (vn_port) {
+ port = lport_priv(vn_port);
+ if (!ether_addr_equal(port->data_src_addr, dest_mac)) {
+ BNX2FC_HBA_DBG(lport, "fpma mismatch\n");
+ kfree_skb(skb);
+ return;
+ }
+ }
+ if (fh->fh_r_ctl == FC_RCTL_DD_SOL_DATA &&
+ fh->fh_type == FC_TYPE_FCP) {
+ /* Drop FCP data. We dont this in L2 path */
+ kfree_skb(skb);
+ return;
+ }
+ if (fh->fh_r_ctl == FC_RCTL_ELS_REQ &&
+ fh->fh_type == FC_TYPE_ELS) {
+ switch (fc_frame_payload_op(fp)) {
+ case ELS_LOGO:
+ if (ntoh24(fh->fh_s_id) == FC_FID_FLOGI) {
+ /* drop non-FIP LOGO */
+ kfree_skb(skb);
+ return;
+ }
+ break;
+ }
+ }
+
+ if (fh->fh_r_ctl == FC_RCTL_BA_ABTS) {
+ /* Drop incoming ABTS */
+ kfree_skb(skb);
+ return;
+ }
+
+ stats = per_cpu_ptr(lport->stats, smp_processor_id());
+ stats->RxFrames++;
+ stats->RxWords += fr_len / FCOE_WORD_TO_BYTE;
+
+ if (le32_to_cpu(fr_crc(fp)) !=
+ ~crc32(~0, skb->data, fr_len)) {
+ if (stats->InvalidCRCCount < 5)
+ printk(KERN_WARNING PFX "dropping frame with "
+ "CRC error\n");
+ stats->InvalidCRCCount++;
+ kfree_skb(skb);
+ return;
+ }
+ fc_exch_recv(lport, fp);
+}
+
+/**
+ * bnx2fc_percpu_io_thread - thread per cpu for ios
+ *
+ * @arg: ptr to bnx2fc_percpu_info structure
+ */
+int bnx2fc_percpu_io_thread(void *arg)
+{
+ struct bnx2fc_percpu_s *p = arg;
+ struct bnx2fc_work *work, *tmp;
+ LIST_HEAD(work_list);
+
+ set_user_nice(current, MIN_NICE);
+ set_current_state(TASK_INTERRUPTIBLE);
+ while (!kthread_should_stop()) {
+ schedule();
+ spin_lock_bh(&p->fp_work_lock);
+ while (!list_empty(&p->work_list)) {
+ list_splice_init(&p->work_list, &work_list);
+ spin_unlock_bh(&p->fp_work_lock);
+
+ list_for_each_entry_safe(work, tmp, &work_list, list) {
+ list_del_init(&work->list);
+ bnx2fc_process_cq_compl(work->tgt, work->wqe);
+ kfree(work);
+ }
+
+ spin_lock_bh(&p->fp_work_lock);
+ }
+ __set_current_state(TASK_INTERRUPTIBLE);
+ spin_unlock_bh(&p->fp_work_lock);
+ }
+ __set_current_state(TASK_RUNNING);
+
+ return 0;
+}
+
+static struct fc_host_statistics *bnx2fc_get_host_stats(struct Scsi_Host *shost)
+{
+ struct fc_host_statistics *bnx2fc_stats;
+ struct fc_lport *lport = shost_priv(shost);
+ struct fcoe_port *port = lport_priv(lport);
+ struct bnx2fc_interface *interface = port->priv;
+ struct bnx2fc_hba *hba = interface->hba;
+ struct fcoe_statistics_params *fw_stats;
+ int rc = 0;
+
+ fw_stats = (struct fcoe_statistics_params *)hba->stats_buffer;
+ if (!fw_stats)
+ return NULL;
+
+ bnx2fc_stats = fc_get_host_stats(shost);
+
+ init_completion(&hba->stat_req_done);
+ if (bnx2fc_send_stat_req(hba))
+ return bnx2fc_stats;
+ rc = wait_for_completion_timeout(&hba->stat_req_done, (2 * HZ));
+ if (!rc) {
+ BNX2FC_HBA_DBG(lport, "FW stat req timed out\n");
+ return bnx2fc_stats;
+ }
+ BNX2FC_STATS(hba, rx_stat2, fc_crc_cnt);
+ bnx2fc_stats->invalid_crc_count += hba->bfw_stats.fc_crc_cnt;
+ BNX2FC_STATS(hba, tx_stat, fcoe_tx_pkt_cnt);
+ bnx2fc_stats->tx_frames += hba->bfw_stats.fcoe_tx_pkt_cnt;
+ BNX2FC_STATS(hba, tx_stat, fcoe_tx_byte_cnt);
+ bnx2fc_stats->tx_words += ((hba->bfw_stats.fcoe_tx_byte_cnt) / 4);
+ BNX2FC_STATS(hba, rx_stat0, fcoe_rx_pkt_cnt);
+ bnx2fc_stats->rx_frames += hba->bfw_stats.fcoe_rx_pkt_cnt;
+ BNX2FC_STATS(hba, rx_stat0, fcoe_rx_byte_cnt);
+ bnx2fc_stats->rx_words += ((hba->bfw_stats.fcoe_rx_byte_cnt) / 4);
+
+ bnx2fc_stats->dumped_frames = 0;
+ bnx2fc_stats->lip_count = 0;
+ bnx2fc_stats->nos_count = 0;
+ bnx2fc_stats->loss_of_sync_count = 0;
+ bnx2fc_stats->loss_of_signal_count = 0;
+ bnx2fc_stats->prim_seq_protocol_err_count = 0;
+
+ memcpy(&hba->prev_stats, hba->stats_buffer,
+ sizeof(struct fcoe_statistics_params));
+ return bnx2fc_stats;
+}
+
+static int bnx2fc_shost_config(struct fc_lport *lport, struct device *dev)
+{
+ struct fcoe_port *port = lport_priv(lport);
+ struct bnx2fc_interface *interface = port->priv;
+ struct bnx2fc_hba *hba = interface->hba;
+ struct Scsi_Host *shost = lport->host;
+ int rc = 0;
+
+ shost->max_cmd_len = BNX2FC_MAX_CMD_LEN;
+ shost->max_lun = BNX2FC_MAX_LUN;
+ shost->max_id = BNX2FC_MAX_FCP_TGT;
+ shost->max_channel = 0;
+ if (lport->vport)
+ shost->transportt = bnx2fc_vport_xport_template;
+ else
+ shost->transportt = bnx2fc_transport_template;
+
+ /* Add the new host to SCSI-ml */
+ rc = scsi_add_host(lport->host, dev);
+ if (rc) {
+ printk(KERN_ERR PFX "Error on scsi_add_host\n");
+ return rc;
+ }
+ if (!lport->vport)
+ fc_host_max_npiv_vports(lport->host) = USHRT_MAX;
+ snprintf(fc_host_symbolic_name(lport->host), 256,
+ "%s (QLogic %s) v%s over %s",
+ BNX2FC_NAME, hba->chip_num, BNX2FC_VERSION,
+ interface->netdev->name);
+
+ return 0;
+}
+
+static int bnx2fc_link_ok(struct fc_lport *lport)
+{
+ struct fcoe_port *port = lport_priv(lport);
+ struct bnx2fc_interface *interface = port->priv;
+ struct bnx2fc_hba *hba = interface->hba;
+ struct net_device *dev = hba->phys_dev;
+ int rc = 0;
+
+ if ((dev->flags & IFF_UP) && netif_carrier_ok(dev))
+ clear_bit(ADAPTER_STATE_LINK_DOWN, &hba->adapter_state);
+ else {
+ set_bit(ADAPTER_STATE_LINK_DOWN, &hba->adapter_state);
+ rc = -1;
+ }
+ return rc;
+}
+
+/**
+ * bnx2fc_get_link_state - get network link state
+ *
+ * @hba: adapter instance pointer
+ *
+ * updates adapter structure flag based on netdev state
+ */
+void bnx2fc_get_link_state(struct bnx2fc_hba *hba)
+{
+ if (test_bit(__LINK_STATE_NOCARRIER, &hba->phys_dev->state))
+ set_bit(ADAPTER_STATE_LINK_DOWN, &hba->adapter_state);
+ else
+ clear_bit(ADAPTER_STATE_LINK_DOWN, &hba->adapter_state);
+}
+
+static int bnx2fc_net_config(struct fc_lport *lport, struct net_device *netdev)
+{
+ struct bnx2fc_hba *hba;
+ struct bnx2fc_interface *interface;
+ struct fcoe_ctlr *ctlr;
+ struct fcoe_port *port;
+ u64 wwnn, wwpn;
+
+ port = lport_priv(lport);
+ interface = port->priv;
+ ctlr = bnx2fc_to_ctlr(interface);
+ hba = interface->hba;
+
+ /* require support for get_pauseparam ethtool op. */
+ if (!hba->phys_dev->ethtool_ops ||
+ !hba->phys_dev->ethtool_ops->get_pauseparam)
+ return -EOPNOTSUPP;
+
+ if (fc_set_mfs(lport, BNX2FC_MFS))
+ return -EINVAL;
+
+ skb_queue_head_init(&port->fcoe_pending_queue);
+ port->fcoe_pending_queue_active = 0;
+ setup_timer(&port->timer, fcoe_queue_timer, (unsigned long) lport);
+
+ fcoe_link_speed_update(lport);
+
+ if (!lport->vport) {
+ if (fcoe_get_wwn(netdev, &wwnn, NETDEV_FCOE_WWNN))
+ wwnn = fcoe_wwn_from_mac(ctlr->ctl_src_addr,
+ 1, 0);
+ BNX2FC_HBA_DBG(lport, "WWNN = 0x%llx\n", wwnn);
+ fc_set_wwnn(lport, wwnn);
+
+ if (fcoe_get_wwn(netdev, &wwpn, NETDEV_FCOE_WWPN))
+ wwpn = fcoe_wwn_from_mac(ctlr->ctl_src_addr,
+ 2, 0);
+
+ BNX2FC_HBA_DBG(lport, "WWPN = 0x%llx\n", wwpn);
+ fc_set_wwpn(lport, wwpn);
+ }
+
+ return 0;
+}
+
+static void bnx2fc_destroy_timer(unsigned long data)
+{
+ struct bnx2fc_hba *hba = (struct bnx2fc_hba *)data;
+
+ printk(KERN_ERR PFX "ERROR:bnx2fc_destroy_timer - "
+ "Destroy compl not received!!\n");
+ set_bit(BNX2FC_FLAG_DESTROY_CMPL, &hba->flags);
+ wake_up_interruptible(&hba->destroy_wait);
+}
+
+/**
+ * bnx2fc_indicate_netevent - Generic netdev event handler
+ *
+ * @context: adapter structure pointer
+ * @event: event type
+ * @vlan_id: vlan id - associated vlan id with this event
+ *
+ * Handles NETDEV_UP, NETDEV_DOWN, NETDEV_GOING_DOWN,NETDEV_CHANGE and
+ * NETDEV_CHANGE_MTU events. Handle NETDEV_UNREGISTER only for vlans.
+ */
+static void bnx2fc_indicate_netevent(void *context, unsigned long event,
+ u16 vlan_id)
+{
+ struct bnx2fc_hba *hba = (struct bnx2fc_hba *)context;
+ struct fcoe_ctlr_device *cdev;
+ struct fc_lport *lport;
+ struct fc_lport *vport;
+ struct bnx2fc_interface *interface, *tmp;
+ struct fcoe_ctlr *ctlr;
+ int wait_for_upload = 0;
+ u32 link_possible = 1;
+
+ if (vlan_id != 0 && event != NETDEV_UNREGISTER)
+ return;
+
+ switch (event) {
+ case NETDEV_UP:
+ if (!test_bit(ADAPTER_STATE_UP, &hba->adapter_state))
+ printk(KERN_ERR "indicate_netevent: "\
+ "hba is not UP!!\n");
+ break;
+
+ case NETDEV_DOWN:
+ clear_bit(ADAPTER_STATE_GOING_DOWN, &hba->adapter_state);
+ clear_bit(ADAPTER_STATE_UP, &hba->adapter_state);
+ link_possible = 0;
+ break;
+
+ case NETDEV_GOING_DOWN:
+ set_bit(ADAPTER_STATE_GOING_DOWN, &hba->adapter_state);
+ link_possible = 0;
+ break;
+
+ case NETDEV_CHANGE:
+ break;
+
+ case NETDEV_UNREGISTER:
+ if (!vlan_id)
+ return;
+ mutex_lock(&bnx2fc_dev_lock);
+ list_for_each_entry_safe(interface, tmp, &if_list, list) {
+ if (interface->hba == hba &&
+ interface->vlan_id == (vlan_id & VLAN_VID_MASK))
+ __bnx2fc_destroy(interface);
+ }
+ mutex_unlock(&bnx2fc_dev_lock);
+
+ /* Ensure ALL destroy work has been completed before return */
+ flush_workqueue(bnx2fc_wq);
+ return;
+
+ default:
+ printk(KERN_ERR PFX "Unknown netevent %ld", event);
+ return;
+ }
+
+ mutex_lock(&bnx2fc_dev_lock);
+ list_for_each_entry(interface, &if_list, list) {
+
+ if (interface->hba != hba)
+ continue;
+
+ ctlr = bnx2fc_to_ctlr(interface);
+ lport = ctlr->lp;
+ BNX2FC_HBA_DBG(lport, "netevent handler - event=%s %ld\n",
+ interface->netdev->name, event);
+
+ fcoe_link_speed_update(lport);
+
+ cdev = fcoe_ctlr_to_ctlr_dev(ctlr);
+
+ if (link_possible && !bnx2fc_link_ok(lport)) {
+ switch (cdev->enabled) {
+ case FCOE_CTLR_DISABLED:
+ pr_info("Link up while interface is disabled.\n");
+ break;
+ case FCOE_CTLR_ENABLED:
+ case FCOE_CTLR_UNUSED:
+ /* Reset max recv frame size to default */
+ fc_set_mfs(lport, BNX2FC_MFS);
+ /*
+ * ctlr link up will only be handled during
+ * enable to avoid sending discovery
+ * solicitation on a stale vlan
+ */
+ if (interface->enabled)
+ fcoe_ctlr_link_up(ctlr);
+ };
+ } else if (fcoe_ctlr_link_down(ctlr)) {
+ switch (cdev->enabled) {
+ case FCOE_CTLR_DISABLED:
+ pr_info("Link down while interface is disabled.\n");
+ break;
+ case FCOE_CTLR_ENABLED:
+ case FCOE_CTLR_UNUSED:
+ mutex_lock(&lport->lp_mutex);
+ list_for_each_entry(vport, &lport->vports, list)
+ fc_host_port_type(vport->host) =
+ FC_PORTTYPE_UNKNOWN;
+ mutex_unlock(&lport->lp_mutex);
+ fc_host_port_type(lport->host) =
+ FC_PORTTYPE_UNKNOWN;
+ per_cpu_ptr(lport->stats,
+ get_cpu())->LinkFailureCount++;
+ put_cpu();
+ fcoe_clean_pending_queue(lport);
+ wait_for_upload = 1;
+ };
+ }
+ }
+ mutex_unlock(&bnx2fc_dev_lock);
+
+ if (wait_for_upload) {
+ clear_bit(ADAPTER_STATE_READY, &hba->adapter_state);
+ init_waitqueue_head(&hba->shutdown_wait);
+ BNX2FC_MISC_DBG("indicate_netevent "
+ "num_ofld_sess = %d\n",
+ hba->num_ofld_sess);
+ hba->wait_for_link_down = 1;
+ wait_event_interruptible(hba->shutdown_wait,
+ (hba->num_ofld_sess == 0));
+ BNX2FC_MISC_DBG("wakeup - num_ofld_sess = %d\n",
+ hba->num_ofld_sess);
+ hba->wait_for_link_down = 0;
+
+ if (signal_pending(current))
+ flush_signals(current);
+ }
+}
+
+static int bnx2fc_libfc_config(struct fc_lport *lport)
+{
+
+ /* Set the function pointers set by bnx2fc driver */
+ memcpy(&lport->tt, &bnx2fc_libfc_fcn_templ,
+ sizeof(struct libfc_function_template));
+ fc_elsct_init(lport);
+ fc_exch_init(lport);
+ fc_rport_init(lport);
+ fc_disc_init(lport);
+ fc_disc_config(lport, lport);
+ return 0;
+}
+
+static int bnx2fc_em_config(struct fc_lport *lport, struct bnx2fc_hba *hba)
+{
+ int fcoe_min_xid, fcoe_max_xid;
+
+ fcoe_min_xid = hba->max_xid + 1;
+ if (nr_cpu_ids <= 2)
+ fcoe_max_xid = hba->max_xid + FCOE_XIDS_PER_CPU_OFFSET;
+ else
+ fcoe_max_xid = hba->max_xid + FCOE_MAX_XID_OFFSET;
+ if (!fc_exch_mgr_alloc(lport, FC_CLASS_3, fcoe_min_xid,
+ fcoe_max_xid, NULL)) {
+ printk(KERN_ERR PFX "em_config:fc_exch_mgr_alloc failed\n");
+ return -ENOMEM;
+ }
+
+ return 0;
+}
+
+static int bnx2fc_lport_config(struct fc_lport *lport)
+{
+ lport->link_up = 0;
+ lport->qfull = 0;
+ lport->max_retry_count = BNX2FC_MAX_RETRY_CNT;
+ lport->max_rport_retry_count = BNX2FC_MAX_RPORT_RETRY_CNT;
+ lport->e_d_tov = 2 * 1000;
+ lport->r_a_tov = 10 * 1000;
+
+ lport->service_params = (FCP_SPPF_INIT_FCN | FCP_SPPF_RD_XRDY_DIS |
+ FCP_SPPF_RETRY | FCP_SPPF_CONF_COMPL);
+ lport->does_npiv = 1;
+
+ memset(&lport->rnid_gen, 0, sizeof(struct fc_els_rnid_gen));
+ lport->rnid_gen.rnid_atype = BNX2FC_RNID_HBA;
+
+ /* alloc stats structure */
+ if (fc_lport_init_stats(lport))
+ return -ENOMEM;
+
+ /* Finish fc_lport configuration */
+ fc_lport_config(lport);
+
+ return 0;
+}
+
+/**
+ * bnx2fc_fip_recv - handle a received FIP frame.
+ *
+ * @skb: the received skb
+ * @dev: associated &net_device
+ * @ptype: the &packet_type structure which was used to register this handler.
+ * @orig_dev: original receive &net_device, in case @ dev is a bond.
+ *
+ * Returns: 0 for success
+ */
+static int bnx2fc_fip_recv(struct sk_buff *skb, struct net_device *dev,
+ struct packet_type *ptype,
+ struct net_device *orig_dev)
+{
+ struct bnx2fc_interface *interface;
+ struct fcoe_ctlr *ctlr;
+ interface = container_of(ptype, struct bnx2fc_interface,
+ fip_packet_type);
+ ctlr = bnx2fc_to_ctlr(interface);
+ fcoe_ctlr_recv(ctlr, skb);
+ return 0;
+}
+
+/**
+ * bnx2fc_update_src_mac - Update Ethernet MAC filters.
+ *
+ * @fip: FCoE controller.
+ * @old: Unicast MAC address to delete if the MAC is non-zero.
+ * @new: Unicast MAC address to add.
+ *
+ * Remove any previously-set unicast MAC filter.
+ * Add secondary FCoE MAC address filter for our OUI.
+ */
+static void bnx2fc_update_src_mac(struct fc_lport *lport, u8 *addr)
+{
+ struct fcoe_port *port = lport_priv(lport);
+
+ memcpy(port->data_src_addr, addr, ETH_ALEN);
+}
+
+/**
+ * bnx2fc_get_src_mac - return the ethernet source address for an lport
+ *
+ * @lport: libfc port
+ */
+static u8 *bnx2fc_get_src_mac(struct fc_lport *lport)
+{
+ struct fcoe_port *port;
+
+ port = (struct fcoe_port *)lport_priv(lport);
+ return port->data_src_addr;
+}
+
+/**
+ * bnx2fc_fip_send - send an Ethernet-encapsulated FIP frame.
+ *
+ * @fip: FCoE controller.
+ * @skb: FIP Packet.
+ */
+static void bnx2fc_fip_send(struct fcoe_ctlr *fip, struct sk_buff *skb)
+{
+ skb->dev = bnx2fc_from_ctlr(fip)->netdev;
+ dev_queue_xmit(skb);
+}
+
+static int bnx2fc_vport_create(struct fc_vport *vport, bool disabled)
+{
+ struct Scsi_Host *shost = vport_to_shost(vport);
+ struct fc_lport *n_port = shost_priv(shost);
+ struct fcoe_port *port = lport_priv(n_port);
+ struct bnx2fc_interface *interface = port->priv;
+ struct net_device *netdev = interface->netdev;
+ struct fc_lport *vn_port;
+ int rc;
+ char buf[32];
+
+ rc = fcoe_validate_vport_create(vport);
+ if (rc) {
+ fcoe_wwn_to_str(vport->port_name, buf, sizeof(buf));
+ printk(KERN_ERR PFX "Failed to create vport, "
+ "WWPN (0x%s) already exists\n",
+ buf);
+ return rc;
+ }
+
+ if (!test_bit(BNX2FC_FLAG_FW_INIT_DONE, &interface->hba->flags)) {
+ printk(KERN_ERR PFX "vn ports cannot be created on"
+ "this interface\n");
+ return -EIO;
+ }
+ rtnl_lock();
+ mutex_lock(&bnx2fc_dev_lock);
+ vn_port = bnx2fc_if_create(interface, &vport->dev, 1);
+ mutex_unlock(&bnx2fc_dev_lock);
+ rtnl_unlock();
+
+ if (!vn_port) {
+ printk(KERN_ERR PFX "bnx2fc_vport_create (%s) failed\n",
+ netdev->name);
+ return -EIO;
+ }
+
+ if (disabled) {
+ fc_vport_set_state(vport, FC_VPORT_DISABLED);
+ } else {
+ vn_port->boot_time = jiffies;
+ fc_lport_init(vn_port);
+ fc_fabric_login(vn_port);
+ fc_vport_setlink(vn_port);
+ }
+ return 0;
+}
+
+static void bnx2fc_free_vport(struct bnx2fc_hba *hba, struct fc_lport *lport)
+{
+ struct bnx2fc_lport *blport, *tmp;
+
+ spin_lock_bh(&hba->hba_lock);
+ list_for_each_entry_safe(blport, tmp, &hba->vports, list) {
+ if (blport->lport == lport) {
+ list_del(&blport->list);
+ kfree(blport);
+ }
+ }
+ spin_unlock_bh(&hba->hba_lock);
+}
+
+static int bnx2fc_vport_destroy(struct fc_vport *vport)
+{
+ struct Scsi_Host *shost = vport_to_shost(vport);
+ struct fc_lport *n_port = shost_priv(shost);
+ struct fc_lport *vn_port = vport->dd_data;
+ struct fcoe_port *port = lport_priv(vn_port);
+ struct bnx2fc_interface *interface = port->priv;
+ struct fc_lport *v_port;
+ bool found = false;
+
+ mutex_lock(&n_port->lp_mutex);
+ list_for_each_entry(v_port, &n_port->vports, list)
+ if (v_port->vport == vport) {
+ found = true;
+ break;
+ }
+
+ if (!found) {
+ mutex_unlock(&n_port->lp_mutex);
+ return -ENOENT;
+ }
+ list_del(&vn_port->list);
+ mutex_unlock(&n_port->lp_mutex);
+ bnx2fc_free_vport(interface->hba, port->lport);
+ bnx2fc_port_shutdown(port->lport);
+ bnx2fc_interface_put(interface);
+ queue_work(bnx2fc_wq, &port->destroy_work);
+ return 0;
+}
+
+static int bnx2fc_vport_disable(struct fc_vport *vport, bool disable)
+{
+ struct fc_lport *lport = vport->dd_data;
+
+ if (disable) {
+ fc_vport_set_state(vport, FC_VPORT_DISABLED);
+ fc_fabric_logoff(lport);
+ } else {
+ lport->boot_time = jiffies;
+ fc_fabric_login(lport);
+ fc_vport_setlink(lport);
+ }
+ return 0;
+}
+
+
+static int bnx2fc_interface_setup(struct bnx2fc_interface *interface)
+{
+ struct net_device *netdev = interface->netdev;
+ struct net_device *physdev = interface->hba->phys_dev;
+ struct fcoe_ctlr *ctlr = bnx2fc_to_ctlr(interface);
+ struct netdev_hw_addr *ha;
+ int sel_san_mac = 0;
+
+ /* setup Source MAC Address */
+ rcu_read_lock();
+ for_each_dev_addr(physdev, ha) {
+ BNX2FC_MISC_DBG("net_config: ha->type = %d, fip_mac = ",
+ ha->type);
+ printk(KERN_INFO "%2x:%2x:%2x:%2x:%2x:%2x\n", ha->addr[0],
+ ha->addr[1], ha->addr[2], ha->addr[3],
+ ha->addr[4], ha->addr[5]);
+
+ if ((ha->type == NETDEV_HW_ADDR_T_SAN) &&
+ (is_valid_ether_addr(ha->addr))) {
+ memcpy(ctlr->ctl_src_addr, ha->addr,
+ ETH_ALEN);
+ sel_san_mac = 1;
+ BNX2FC_MISC_DBG("Found SAN MAC\n");
+ }
+ }
+ rcu_read_unlock();
+
+ if (!sel_san_mac)
+ return -ENODEV;
+
+ interface->fip_packet_type.func = bnx2fc_fip_recv;
+ interface->fip_packet_type.type = htons(ETH_P_FIP);
+ interface->fip_packet_type.dev = netdev;
+ dev_add_pack(&interface->fip_packet_type);
+
+ interface->fcoe_packet_type.func = bnx2fc_rcv;
+ interface->fcoe_packet_type.type = __constant_htons(ETH_P_FCOE);
+ interface->fcoe_packet_type.dev = netdev;
+ dev_add_pack(&interface->fcoe_packet_type);
+
+ return 0;
+}
+
+static int bnx2fc_attach_transport(void)
+{
+ bnx2fc_transport_template =
+ fc_attach_transport(&bnx2fc_transport_function);
+
+ if (bnx2fc_transport_template == NULL) {
+ printk(KERN_ERR PFX "Failed to attach FC transport\n");
+ return -ENODEV;
+ }
+
+ bnx2fc_vport_xport_template =
+ fc_attach_transport(&bnx2fc_vport_xport_function);
+ if (bnx2fc_vport_xport_template == NULL) {
+ printk(KERN_ERR PFX
+ "Failed to attach FC transport for vport\n");
+ fc_release_transport(bnx2fc_transport_template);
+ bnx2fc_transport_template = NULL;
+ return -ENODEV;
+ }
+ return 0;
+}
+static void bnx2fc_release_transport(void)
+{
+ fc_release_transport(bnx2fc_transport_template);
+ fc_release_transport(bnx2fc_vport_xport_template);
+ bnx2fc_transport_template = NULL;
+ bnx2fc_vport_xport_template = NULL;
+}
+
+static void bnx2fc_interface_release(struct kref *kref)
+{
+ struct fcoe_ctlr_device *ctlr_dev;
+ struct bnx2fc_interface *interface;
+ struct fcoe_ctlr *ctlr;
+ struct net_device *netdev;
+
+ interface = container_of(kref, struct bnx2fc_interface, kref);
+ BNX2FC_MISC_DBG("Interface is being released\n");
+
+ ctlr = bnx2fc_to_ctlr(interface);
+ ctlr_dev = fcoe_ctlr_to_ctlr_dev(ctlr);
+ netdev = interface->netdev;
+
+ /* tear-down FIP controller */
+ if (test_and_clear_bit(BNX2FC_CTLR_INIT_DONE, &interface->if_flags))
+ fcoe_ctlr_destroy(ctlr);
+
+ fcoe_ctlr_device_delete(ctlr_dev);
+
+ dev_put(netdev);
+ module_put(THIS_MODULE);
+}
+
+static inline void bnx2fc_interface_get(struct bnx2fc_interface *interface)
+{
+ kref_get(&interface->kref);
+}
+
+static inline void bnx2fc_interface_put(struct bnx2fc_interface *interface)
+{
+ kref_put(&interface->kref, bnx2fc_interface_release);
+}
+static void bnx2fc_hba_destroy(struct bnx2fc_hba *hba)
+{
+ /* Free the command manager */
+ if (hba->cmd_mgr) {
+ bnx2fc_cmd_mgr_free(hba->cmd_mgr);
+ hba->cmd_mgr = NULL;
+ }
+ kfree(hba->tgt_ofld_list);
+ bnx2fc_unbind_pcidev(hba);
+ kfree(hba);
+}
+
+/**
+ * bnx2fc_hba_create - create a new bnx2fc hba
+ *
+ * @cnic: pointer to cnic device
+ *
+ * Creates a new FCoE hba on the given device.
+ *
+ */
+static struct bnx2fc_hba *bnx2fc_hba_create(struct cnic_dev *cnic)
+{
+ struct bnx2fc_hba *hba;
+ struct fcoe_capabilities *fcoe_cap;
+ int rc;
+
+ hba = kzalloc(sizeof(*hba), GFP_KERNEL);
+ if (!hba) {
+ printk(KERN_ERR PFX "Unable to allocate hba structure\n");
+ return NULL;
+ }
+ spin_lock_init(&hba->hba_lock);
+ mutex_init(&hba->hba_mutex);
+
+ hba->cnic = cnic;
+
+ hba->max_tasks = cnic->max_fcoe_exchanges;
+ hba->elstm_xids = (hba->max_tasks / 2);
+ hba->max_outstanding_cmds = hba->elstm_xids;
+ hba->max_xid = (hba->max_tasks - 1);
+
+ rc = bnx2fc_bind_pcidev(hba);
+ if (rc) {
+ printk(KERN_ERR PFX "create_adapter: bind error\n");
+ goto bind_err;
+ }
+ hba->phys_dev = cnic->netdev;
+ hba->next_conn_id = 0;
+
+ hba->tgt_ofld_list =
+ kzalloc(sizeof(struct bnx2fc_rport *) * BNX2FC_NUM_MAX_SESS,
+ GFP_KERNEL);
+ if (!hba->tgt_ofld_list) {
+ printk(KERN_ERR PFX "Unable to allocate tgt offload list\n");
+ goto tgtofld_err;
+ }
+
+ hba->num_ofld_sess = 0;
+
+ hba->cmd_mgr = bnx2fc_cmd_mgr_alloc(hba);
+ if (!hba->cmd_mgr) {
+ printk(KERN_ERR PFX "em_config:bnx2fc_cmd_mgr_alloc failed\n");
+ goto cmgr_err;
+ }
+ fcoe_cap = &hba->fcoe_cap;
+
+ fcoe_cap->capability1 = BNX2FC_TM_MAX_SQES <<
+ FCOE_IOS_PER_CONNECTION_SHIFT;
+ fcoe_cap->capability1 |= BNX2FC_NUM_MAX_SESS <<
+ FCOE_LOGINS_PER_PORT_SHIFT;
+ fcoe_cap->capability2 = hba->max_outstanding_cmds <<
+ FCOE_NUMBER_OF_EXCHANGES_SHIFT;
+ fcoe_cap->capability2 |= BNX2FC_MAX_NPIV <<
+ FCOE_NPIV_WWN_PER_PORT_SHIFT;
+ fcoe_cap->capability3 = BNX2FC_NUM_MAX_SESS <<
+ FCOE_TARGETS_SUPPORTED_SHIFT;
+ fcoe_cap->capability3 |= hba->max_outstanding_cmds <<
+ FCOE_OUTSTANDING_COMMANDS_SHIFT;
+ fcoe_cap->capability4 = FCOE_CAPABILITY4_STATEFUL;
+
+ init_waitqueue_head(&hba->shutdown_wait);
+ init_waitqueue_head(&hba->destroy_wait);
+ INIT_LIST_HEAD(&hba->vports);
+
+ return hba;
+
+cmgr_err:
+ kfree(hba->tgt_ofld_list);
+tgtofld_err:
+ bnx2fc_unbind_pcidev(hba);
+bind_err:
+ kfree(hba);
+ return NULL;
+}
+
+struct bnx2fc_interface *bnx2fc_interface_create(struct bnx2fc_hba *hba,
+ struct net_device *netdev,
+ enum fip_state fip_mode)
+{
+ struct fcoe_ctlr_device *ctlr_dev;
+ struct bnx2fc_interface *interface;
+ struct fcoe_ctlr *ctlr;
+ int size;
+ int rc = 0;
+
+ size = (sizeof(*interface) + sizeof(struct fcoe_ctlr));
+ ctlr_dev = fcoe_ctlr_device_add(&netdev->dev, &bnx2fc_fcoe_sysfs_templ,
+ size);
+ if (!ctlr_dev) {
+ printk(KERN_ERR PFX "Unable to allocate interface structure\n");
+ return NULL;
+ }
+ ctlr = fcoe_ctlr_device_priv(ctlr_dev);
+ ctlr->cdev = ctlr_dev;
+ interface = fcoe_ctlr_priv(ctlr);
+ dev_hold(netdev);
+ kref_init(&interface->kref);
+ interface->hba = hba;
+ interface->netdev = netdev;
+
+ /* Initialize FIP */
+ fcoe_ctlr_init(ctlr, fip_mode);
+ ctlr->send = bnx2fc_fip_send;
+ ctlr->update_mac = bnx2fc_update_src_mac;
+ ctlr->get_src_addr = bnx2fc_get_src_mac;
+ set_bit(BNX2FC_CTLR_INIT_DONE, &interface->if_flags);
+
+ rc = bnx2fc_interface_setup(interface);
+ if (!rc)
+ return interface;
+
+ fcoe_ctlr_destroy(ctlr);
+ dev_put(netdev);
+ fcoe_ctlr_device_delete(ctlr_dev);
+ return NULL;
+}
+
+/**
+ * bnx2fc_if_create - Create FCoE instance on a given interface
+ *
+ * @interface: FCoE interface to create a local port on
+ * @parent: Device pointer to be the parent in sysfs for the SCSI host
+ * @npiv: Indicates if the port is vport or not
+ *
+ * Creates a fc_lport instance and a Scsi_Host instance and configure them.
+ *
+ * Returns: Allocated fc_lport or an error pointer
+ */
+static struct fc_lport *bnx2fc_if_create(struct bnx2fc_interface *interface,
+ struct device *parent, int npiv)
+{
+ struct fcoe_ctlr *ctlr = bnx2fc_to_ctlr(interface);
+ struct fc_lport *lport, *n_port;
+ struct fcoe_port *port;
+ struct Scsi_Host *shost;
+ struct fc_vport *vport = dev_to_vport(parent);
+ struct bnx2fc_lport *blport;
+ struct bnx2fc_hba *hba = interface->hba;
+ int rc = 0;
+
+ blport = kzalloc(sizeof(struct bnx2fc_lport), GFP_KERNEL);
+ if (!blport) {
+ BNX2FC_HBA_DBG(ctlr->lp, "Unable to alloc blport\n");
+ return NULL;
+ }
+
+ /* Allocate Scsi_Host structure */
+ bnx2fc_shost_template.can_queue = hba->max_outstanding_cmds;
+ if (!npiv)
+ lport = libfc_host_alloc(&bnx2fc_shost_template, sizeof(*port));
+ else
+ lport = libfc_vport_create(vport, sizeof(*port));
+
+ if (!lport) {
+ printk(KERN_ERR PFX "could not allocate scsi host structure\n");
+ goto free_blport;
+ }
+ shost = lport->host;
+ port = lport_priv(lport);
+ port->lport = lport;
+ port->priv = interface;
+ port->get_netdev = bnx2fc_netdev;
+ INIT_WORK(&port->destroy_work, bnx2fc_destroy_work);
+
+ /* Configure fcoe_port */
+ rc = bnx2fc_lport_config(lport);
+ if (rc)
+ goto lp_config_err;
+
+ if (npiv) {
+ printk(KERN_ERR PFX "Setting vport names, 0x%llX 0x%llX\n",
+ vport->node_name, vport->port_name);
+ fc_set_wwnn(lport, vport->node_name);
+ fc_set_wwpn(lport, vport->port_name);
+ }
+ /* Configure netdev and networking properties of the lport */
+ rc = bnx2fc_net_config(lport, interface->netdev);
+ if (rc) {
+ printk(KERN_ERR PFX "Error on bnx2fc_net_config\n");
+ goto lp_config_err;
+ }
+
+ rc = bnx2fc_shost_config(lport, parent);
+ if (rc) {
+ printk(KERN_ERR PFX "Couldnt configure shost for %s\n",
+ interface->netdev->name);
+ goto lp_config_err;
+ }
+
+ /* Initialize the libfc library */
+ rc = bnx2fc_libfc_config(lport);
+ if (rc) {
+ printk(KERN_ERR PFX "Couldnt configure libfc\n");
+ goto shost_err;
+ }
+ fc_host_port_type(lport->host) = FC_PORTTYPE_UNKNOWN;
+
+ /* Allocate exchange manager */
+ if (!npiv)
+ rc = bnx2fc_em_config(lport, hba);
+ else {
+ shost = vport_to_shost(vport);
+ n_port = shost_priv(shost);
+ rc = fc_exch_mgr_list_clone(n_port, lport);
+ }
+
+ if (rc) {
+ printk(KERN_ERR PFX "Error on bnx2fc_em_config\n");
+ goto shost_err;
+ }
+
+ bnx2fc_interface_get(interface);
+
+ spin_lock_bh(&hba->hba_lock);
+ blport->lport = lport;
+ list_add_tail(&blport->list, &hba->vports);
+ spin_unlock_bh(&hba->hba_lock);
+
+ return lport;
+
+shost_err:
+ scsi_remove_host(shost);
+lp_config_err:
+ scsi_host_put(lport->host);
+free_blport:
+ kfree(blport);
+ return NULL;
+}
+
+static void bnx2fc_net_cleanup(struct bnx2fc_interface *interface)
+{
+ /* Dont listen for Ethernet packets anymore */
+ __dev_remove_pack(&interface->fcoe_packet_type);
+ __dev_remove_pack(&interface->fip_packet_type);
+ synchronize_net();
+}
+
+static void bnx2fc_interface_cleanup(struct bnx2fc_interface *interface)
+{
+ struct fcoe_ctlr *ctlr = bnx2fc_to_ctlr(interface);
+ struct fc_lport *lport = ctlr->lp;
+ struct fcoe_port *port = lport_priv(lport);
+ struct bnx2fc_hba *hba = interface->hba;
+
+ /* Stop the transmit retry timer */
+ del_timer_sync(&port->timer);
+
+ /* Free existing transmit skbs */
+ fcoe_clean_pending_queue(lport);
+
+ bnx2fc_net_cleanup(interface);
+
+ bnx2fc_free_vport(hba, lport);
+}
+
+static void bnx2fc_if_destroy(struct fc_lport *lport)
+{
+
+ /* Free queued packets for the receive thread */
+ bnx2fc_clean_rx_queue(lport);
+
+ /* Detach from scsi-ml */
+ fc_remove_host(lport->host);
+ scsi_remove_host(lport->host);
+
+ /*
+ * Note that only the physical lport will have the exchange manager.
+ * for vports, this function is NOP
+ */
+ fc_exch_mgr_free(lport);
+
+ /* Free memory used by statistical counters */
+ fc_lport_free_stats(lport);
+
+ /* Release Scsi_Host */
+ scsi_host_put(lport->host);
+}
+
+static void __bnx2fc_destroy(struct bnx2fc_interface *interface)
+{
+ struct fcoe_ctlr *ctlr = bnx2fc_to_ctlr(interface);
+ struct fc_lport *lport = ctlr->lp;
+ struct fcoe_port *port = lport_priv(lport);
+
+ bnx2fc_interface_cleanup(interface);
+ bnx2fc_stop(interface);
+ list_del(&interface->list);
+ bnx2fc_interface_put(interface);
+ queue_work(bnx2fc_wq, &port->destroy_work);
+}
+
+/**
+ * bnx2fc_destroy - Destroy a bnx2fc FCoE interface
+ *
+ * @buffer: The name of the Ethernet interface to be destroyed
+ * @kp: The associated kernel parameter
+ *
+ * Called from sysfs.
+ *
+ * Returns: 0 for success
+ */
+static int bnx2fc_destroy(struct net_device *netdev)
+{
+ struct bnx2fc_interface *interface = NULL;
+ struct workqueue_struct *timer_work_queue;
+ struct fcoe_ctlr *ctlr;
+ int rc = 0;
+
+ rtnl_lock();
+ mutex_lock(&bnx2fc_dev_lock);
+
+ interface = bnx2fc_interface_lookup(netdev);
+ ctlr = bnx2fc_to_ctlr(interface);
+ if (!interface || !ctlr->lp) {
+ rc = -ENODEV;
+ printk(KERN_ERR PFX "bnx2fc_destroy: interface or lport not found\n");
+ goto netdev_err;
+ }
+
+ timer_work_queue = interface->timer_work_queue;
+ __bnx2fc_destroy(interface);
+ destroy_workqueue(timer_work_queue);
+
+netdev_err:
+ mutex_unlock(&bnx2fc_dev_lock);
+ rtnl_unlock();
+ return rc;
+}
+
+static void bnx2fc_destroy_work(struct work_struct *work)
+{
+ struct fcoe_port *port;
+ struct fc_lport *lport;
+
+ port = container_of(work, struct fcoe_port, destroy_work);
+ lport = port->lport;
+
+ BNX2FC_HBA_DBG(lport, "Entered bnx2fc_destroy_work\n");
+
+ bnx2fc_if_destroy(lport);
+}
+
+static void bnx2fc_unbind_adapter_devices(struct bnx2fc_hba *hba)
+{
+ bnx2fc_free_fw_resc(hba);
+ bnx2fc_free_task_ctx(hba);
+}
+
+/**
+ * bnx2fc_bind_adapter_devices - binds bnx2fc adapter with the associated
+ * pci structure
+ *
+ * @hba: Adapter instance
+ */
+static int bnx2fc_bind_adapter_devices(struct bnx2fc_hba *hba)
+{
+ if (bnx2fc_setup_task_ctx(hba))
+ goto mem_err;
+
+ if (bnx2fc_setup_fw_resc(hba))
+ goto mem_err;
+
+ return 0;
+mem_err:
+ bnx2fc_unbind_adapter_devices(hba);
+ return -ENOMEM;
+}
+
+static int bnx2fc_bind_pcidev(struct bnx2fc_hba *hba)
+{
+ struct cnic_dev *cnic;
+ struct pci_dev *pdev;
+
+ if (!hba->cnic) {
+ printk(KERN_ERR PFX "cnic is NULL\n");
+ return -ENODEV;
+ }
+ cnic = hba->cnic;
+ pdev = hba->pcidev = cnic->pcidev;
+ if (!hba->pcidev)
+ return -ENODEV;
+
+ switch (pdev->device) {
+ case PCI_DEVICE_ID_NX2_57710:
+ strncpy(hba->chip_num, "BCM57710", BCM_CHIP_LEN);
+ break;
+ case PCI_DEVICE_ID_NX2_57711:
+ strncpy(hba->chip_num, "BCM57711", BCM_CHIP_LEN);
+ break;
+ case PCI_DEVICE_ID_NX2_57712:
+ case PCI_DEVICE_ID_NX2_57712_MF:
+ case PCI_DEVICE_ID_NX2_57712_VF:
+ strncpy(hba->chip_num, "BCM57712", BCM_CHIP_LEN);
+ break;
+ case PCI_DEVICE_ID_NX2_57800:
+ case PCI_DEVICE_ID_NX2_57800_MF:
+ case PCI_DEVICE_ID_NX2_57800_VF:
+ strncpy(hba->chip_num, "BCM57800", BCM_CHIP_LEN);
+ break;
+ case PCI_DEVICE_ID_NX2_57810:
+ case PCI_DEVICE_ID_NX2_57810_MF:
+ case PCI_DEVICE_ID_NX2_57810_VF:
+ strncpy(hba->chip_num, "BCM57810", BCM_CHIP_LEN);
+ break;
+ case PCI_DEVICE_ID_NX2_57840:
+ case PCI_DEVICE_ID_NX2_57840_MF:
+ case PCI_DEVICE_ID_NX2_57840_VF:
+ case PCI_DEVICE_ID_NX2_57840_2_20:
+ case PCI_DEVICE_ID_NX2_57840_4_10:
+ strncpy(hba->chip_num, "BCM57840", BCM_CHIP_LEN);
+ break;
+ default:
+ pr_err(PFX "Unknown device id 0x%x\n", pdev->device);
+ break;
+ }
+ pci_dev_get(hba->pcidev);
+ return 0;
+}
+
+static void bnx2fc_unbind_pcidev(struct bnx2fc_hba *hba)
+{
+ if (hba->pcidev) {
+ hba->chip_num[0] = '\0';
+ pci_dev_put(hba->pcidev);
+ }
+ hba->pcidev = NULL;
+}
+
+/**
+ * bnx2fc_ulp_get_stats - cnic callback to populate FCoE stats
+ *
+ * @handle: transport handle pointing to adapter struture
+ */
+static int bnx2fc_ulp_get_stats(void *handle)
+{
+ struct bnx2fc_hba *hba = handle;
+ struct cnic_dev *cnic;
+ struct fcoe_stats_info *stats_addr;
+
+ if (!hba)
+ return -EINVAL;
+
+ cnic = hba->cnic;
+ stats_addr = &cnic->stats_addr->fcoe_stat;
+ if (!stats_addr)
+ return -EINVAL;
+
+ strncpy(stats_addr->version, BNX2FC_VERSION,
+ sizeof(stats_addr->version));
+ stats_addr->txq_size = BNX2FC_SQ_WQES_MAX;
+ stats_addr->rxq_size = BNX2FC_CQ_WQES_MAX;
+
+ return 0;
+}
+
+
+/**
+ * bnx2fc_ulp_start - cnic callback to initialize & start adapter instance
+ *
+ * @handle: transport handle pointing to adapter structure
+ *
+ * This function maps adapter structure to pcidev structure and initiates
+ * firmware handshake to enable/initialize on-chip FCoE components.
+ * This bnx2fc - cnic interface api callback is used after following
+ * conditions are met -
+ * a) underlying network interface is up (marked by event NETDEV_UP
+ * from netdev
+ * b) bnx2fc adatper structure is registered.
+ */
+static void bnx2fc_ulp_start(void *handle)
+{
+ struct bnx2fc_hba *hba = handle;
+ struct bnx2fc_interface *interface;
+ struct fcoe_ctlr *ctlr;
+ struct fc_lport *lport;
+
+ mutex_lock(&bnx2fc_dev_lock);
+
+ if (!test_bit(BNX2FC_FLAG_FW_INIT_DONE, &hba->flags))
+ bnx2fc_fw_init(hba);
+
+ BNX2FC_MISC_DBG("bnx2fc started.\n");
+
+ list_for_each_entry(interface, &if_list, list) {
+ if (interface->hba == hba) {
+ ctlr = bnx2fc_to_ctlr(interface);
+ lport = ctlr->lp;
+ /* Kick off Fabric discovery*/
+ printk(KERN_ERR PFX "ulp_init: start discovery\n");
+ lport->tt.frame_send = bnx2fc_xmit;
+ bnx2fc_start_disc(interface);
+ }
+ }
+
+ mutex_unlock(&bnx2fc_dev_lock);
+}
+
+static void bnx2fc_port_shutdown(struct fc_lport *lport)
+{
+ BNX2FC_MISC_DBG("Entered %s\n", __func__);
+ fc_fabric_logoff(lport);
+ fc_lport_destroy(lport);
+}
+
+static void bnx2fc_stop(struct bnx2fc_interface *interface)
+{
+ struct fcoe_ctlr *ctlr = bnx2fc_to_ctlr(interface);
+ struct fc_lport *lport;
+ struct fc_lport *vport;
+
+ if (!test_bit(BNX2FC_FLAG_FW_INIT_DONE, &interface->hba->flags))
+ return;
+
+ lport = ctlr->lp;
+ bnx2fc_port_shutdown(lport);
+
+ mutex_lock(&lport->lp_mutex);
+ list_for_each_entry(vport, &lport->vports, list)
+ fc_host_port_type(vport->host) =
+ FC_PORTTYPE_UNKNOWN;
+ mutex_unlock(&lport->lp_mutex);
+ fc_host_port_type(lport->host) = FC_PORTTYPE_UNKNOWN;
+ fcoe_ctlr_link_down(ctlr);
+ fcoe_clean_pending_queue(lport);
+}
+
+static int bnx2fc_fw_init(struct bnx2fc_hba *hba)
+{
+#define BNX2FC_INIT_POLL_TIME (1000 / HZ)
+ int rc = -1;
+ int i = HZ;
+
+ rc = bnx2fc_bind_adapter_devices(hba);
+ if (rc) {
+ printk(KERN_ALERT PFX
+ "bnx2fc_bind_adapter_devices failed - rc = %d\n", rc);
+ goto err_out;
+ }
+
+ rc = bnx2fc_send_fw_fcoe_init_msg(hba);
+ if (rc) {
+ printk(KERN_ALERT PFX
+ "bnx2fc_send_fw_fcoe_init_msg failed - rc = %d\n", rc);
+ goto err_unbind;
+ }
+
+ /*
+ * Wait until the adapter init message is complete, and adapter
+ * state is UP.
+ */
+ while (!test_bit(ADAPTER_STATE_UP, &hba->adapter_state) && i--)
+ msleep(BNX2FC_INIT_POLL_TIME);
+
+ if (!test_bit(ADAPTER_STATE_UP, &hba->adapter_state)) {
+ printk(KERN_ERR PFX "bnx2fc_start: %s failed to initialize. "
+ "Ignoring...\n",
+ hba->cnic->netdev->name);
+ rc = -1;
+ goto err_unbind;
+ }
+
+
+ set_bit(BNX2FC_FLAG_FW_INIT_DONE, &hba->flags);
+ return 0;
+
+err_unbind:
+ bnx2fc_unbind_adapter_devices(hba);
+err_out:
+ return rc;
+}
+
+static void bnx2fc_fw_destroy(struct bnx2fc_hba *hba)
+{
+ if (test_and_clear_bit(BNX2FC_FLAG_FW_INIT_DONE, &hba->flags)) {
+ if (bnx2fc_send_fw_fcoe_destroy_msg(hba) == 0) {
+ init_timer(&hba->destroy_timer);
+ hba->destroy_timer.expires = BNX2FC_FW_TIMEOUT +
+ jiffies;
+ hba->destroy_timer.function = bnx2fc_destroy_timer;
+ hba->destroy_timer.data = (unsigned long)hba;
+ add_timer(&hba->destroy_timer);
+ wait_event_interruptible(hba->destroy_wait,
+ test_bit(BNX2FC_FLAG_DESTROY_CMPL,
+ &hba->flags));
+ clear_bit(BNX2FC_FLAG_DESTROY_CMPL, &hba->flags);
+ /* This should never happen */
+ if (signal_pending(current))
+ flush_signals(current);
+
+ del_timer_sync(&hba->destroy_timer);
+ }
+ bnx2fc_unbind_adapter_devices(hba);
+ }
+}
+
+/**
+ * bnx2fc_ulp_stop - cnic callback to shutdown adapter instance
+ *
+ * @handle: transport handle pointing to adapter structure
+ *
+ * Driver checks if adapter is already in shutdown mode, if not start
+ * the shutdown process.
+ */
+static void bnx2fc_ulp_stop(void *handle)
+{
+ struct bnx2fc_hba *hba = handle;
+ struct bnx2fc_interface *interface;
+
+ printk(KERN_ERR "ULP_STOP\n");
+
+ mutex_lock(&bnx2fc_dev_lock);
+ if (!test_bit(BNX2FC_FLAG_FW_INIT_DONE, &hba->flags))
+ goto exit;
+ list_for_each_entry(interface, &if_list, list) {
+ if (interface->hba == hba)
+ bnx2fc_stop(interface);
+ }
+ BUG_ON(hba->num_ofld_sess != 0);
+
+ mutex_lock(&hba->hba_mutex);
+ clear_bit(ADAPTER_STATE_UP, &hba->adapter_state);
+ clear_bit(ADAPTER_STATE_GOING_DOWN,
+ &hba->adapter_state);
+
+ clear_bit(ADAPTER_STATE_READY, &hba->adapter_state);
+ mutex_unlock(&hba->hba_mutex);
+
+ bnx2fc_fw_destroy(hba);
+exit:
+ mutex_unlock(&bnx2fc_dev_lock);
+}
+
+static void bnx2fc_start_disc(struct bnx2fc_interface *interface)
+{
+ struct fcoe_ctlr *ctlr = bnx2fc_to_ctlr(interface);
+ struct fc_lport *lport;
+ int wait_cnt = 0;
+
+ BNX2FC_MISC_DBG("Entered %s\n", __func__);
+ /* Kick off FIP/FLOGI */
+ if (!test_bit(BNX2FC_FLAG_FW_INIT_DONE, &interface->hba->flags)) {
+ printk(KERN_ERR PFX "Init not done yet\n");
+ return;
+ }
+
+ lport = ctlr->lp;
+ BNX2FC_HBA_DBG(lport, "calling fc_fabric_login\n");
+
+ if (!bnx2fc_link_ok(lport) && interface->enabled) {
+ BNX2FC_HBA_DBG(lport, "ctlr_link_up\n");
+ fcoe_ctlr_link_up(ctlr);
+ fc_host_port_type(lport->host) = FC_PORTTYPE_NPORT;
+ set_bit(ADAPTER_STATE_READY, &interface->hba->adapter_state);
+ }
+
+ /* wait for the FCF to be selected before issuing FLOGI */
+ while (!ctlr->sel_fcf) {
+ msleep(250);
+ /* give up after 3 secs */
+ if (++wait_cnt > 12)
+ break;
+ }
+
+ /* Reset max receive frame size to default */
+ if (fc_set_mfs(lport, BNX2FC_MFS))
+ return;
+
+ fc_lport_init(lport);
+ fc_fabric_login(lport);
+}
+
+
+/**
+ * bnx2fc_ulp_init - Initialize an adapter instance
+ *
+ * @dev : cnic device handle
+ * Called from cnic_register_driver() context to initialize all
+ * enumerated cnic devices. This routine allocates adapter structure
+ * and other device specific resources.
+ */
+static void bnx2fc_ulp_init(struct cnic_dev *dev)
+{
+ struct bnx2fc_hba *hba;
+ int rc = 0;
+
+ BNX2FC_MISC_DBG("Entered %s\n", __func__);
+ /* bnx2fc works only when bnx2x is loaded */
+ if (!test_bit(CNIC_F_BNX2X_CLASS, &dev->flags) ||
+ (dev->max_fcoe_conn == 0)) {
+ printk(KERN_ERR PFX "bnx2fc FCoE not supported on %s,"
+ " flags: %lx fcoe_conn: %d\n",
+ dev->netdev->name, dev->flags, dev->max_fcoe_conn);
+ return;
+ }
+
+ hba = bnx2fc_hba_create(dev);
+ if (!hba) {
+ printk(KERN_ERR PFX "hba initialization failed\n");
+ return;
+ }
+
+ /* Add HBA to the adapter list */
+ mutex_lock(&bnx2fc_dev_lock);
+ list_add_tail(&hba->list, &adapter_list);
+ adapter_count++;
+ mutex_unlock(&bnx2fc_dev_lock);
+
+ dev->fcoe_cap = &hba->fcoe_cap;
+ clear_bit(BNX2FC_CNIC_REGISTERED, &hba->reg_with_cnic);
+ rc = dev->register_device(dev, CNIC_ULP_FCOE,
+ (void *) hba);
+ if (rc)
+ printk(KERN_ERR PFX "register_device failed, rc = %d\n", rc);
+ else
+ set_bit(BNX2FC_CNIC_REGISTERED, &hba->reg_with_cnic);
+}
+
+/* Assumes rtnl_lock and the bnx2fc_dev_lock are already taken */
+static int __bnx2fc_disable(struct fcoe_ctlr *ctlr)
+{
+ struct bnx2fc_interface *interface = fcoe_ctlr_priv(ctlr);
+
+ if (interface->enabled == true) {
+ if (!ctlr->lp) {
+ pr_err(PFX "__bnx2fc_disable: lport not found\n");
+ return -ENODEV;
+ } else {
+ interface->enabled = false;
+ fcoe_ctlr_link_down(ctlr);
+ fcoe_clean_pending_queue(ctlr->lp);
+ }
+ }
+ return 0;
+}
+
+/**
+ * Deperecated: Use bnx2fc_enabled()
+ */
+static int bnx2fc_disable(struct net_device *netdev)
+{
+ struct bnx2fc_interface *interface;
+ struct fcoe_ctlr *ctlr;
+ int rc = 0;
+
+ rtnl_lock();
+ mutex_lock(&bnx2fc_dev_lock);
+
+ interface = bnx2fc_interface_lookup(netdev);
+ ctlr = bnx2fc_to_ctlr(interface);
+
+ if (!interface) {
+ rc = -ENODEV;
+ pr_err(PFX "bnx2fc_disable: interface not found\n");
+ } else {
+ rc = __bnx2fc_disable(ctlr);
+ }
+ mutex_unlock(&bnx2fc_dev_lock);
+ rtnl_unlock();
+ return rc;
+}
+
+static int __bnx2fc_enable(struct fcoe_ctlr *ctlr)
+{
+ struct bnx2fc_interface *interface = fcoe_ctlr_priv(ctlr);
+
+ if (interface->enabled == false) {
+ if (!ctlr->lp) {
+ pr_err(PFX "__bnx2fc_enable: lport not found\n");
+ return -ENODEV;
+ } else if (!bnx2fc_link_ok(ctlr->lp)) {
+ fcoe_ctlr_link_up(ctlr);
+ interface->enabled = true;
+ }
+ }
+ return 0;
+}
+
+/**
+ * Deprecated: Use bnx2fc_enabled()
+ */
+static int bnx2fc_enable(struct net_device *netdev)
+{
+ struct bnx2fc_interface *interface;
+ struct fcoe_ctlr *ctlr;
+ int rc = 0;
+
+ rtnl_lock();
+ mutex_lock(&bnx2fc_dev_lock);
+
+ interface = bnx2fc_interface_lookup(netdev);
+ ctlr = bnx2fc_to_ctlr(interface);
+ if (!interface) {
+ rc = -ENODEV;
+ pr_err(PFX "bnx2fc_enable: interface not found\n");
+ } else {
+ rc = __bnx2fc_enable(ctlr);
+ }
+
+ mutex_unlock(&bnx2fc_dev_lock);
+ rtnl_unlock();
+ return rc;
+}
+
+/**
+ * bnx2fc_ctlr_enabled() - Enable or disable an FCoE Controller
+ * @cdev: The FCoE Controller that is being enabled or disabled
+ *
+ * fcoe_sysfs will ensure that the state of 'enabled' has
+ * changed, so no checking is necessary here. This routine simply
+ * calls fcoe_enable or fcoe_disable, both of which are deprecated.
+ * When those routines are removed the functionality can be merged
+ * here.
+ */
+static int bnx2fc_ctlr_enabled(struct fcoe_ctlr_device *cdev)
+{
+ struct fcoe_ctlr *ctlr = fcoe_ctlr_device_priv(cdev);
+
+ switch (cdev->enabled) {
+ case FCOE_CTLR_ENABLED:
+ return __bnx2fc_enable(ctlr);
+ case FCOE_CTLR_DISABLED:
+ return __bnx2fc_disable(ctlr);
+ case FCOE_CTLR_UNUSED:
+ default:
+ return -ENOTSUPP;
+ };
+}
+
+enum bnx2fc_create_link_state {
+ BNX2FC_CREATE_LINK_DOWN,
+ BNX2FC_CREATE_LINK_UP,
+};
+
+/**
+ * _bnx2fc_create() - Create bnx2fc FCoE interface
+ * @netdev : The net_device object the Ethernet interface to create on
+ * @fip_mode: The FIP mode for this creation
+ * @link_state: The ctlr link state on creation
+ *
+ * Called from either the libfcoe 'create' module parameter
+ * via fcoe_create or from fcoe_syfs's ctlr_create file.
+ *
+ * libfcoe's 'create' module parameter is deprecated so some
+ * consolidation of code can be done when that interface is
+ * removed.
+ *
+ * Returns: 0 for success
+ */
+static int _bnx2fc_create(struct net_device *netdev,
+ enum fip_state fip_mode,
+ enum bnx2fc_create_link_state link_state)
+{
+ struct fcoe_ctlr_device *cdev;
+ struct fcoe_ctlr *ctlr;
+ struct bnx2fc_interface *interface;
+ struct bnx2fc_hba *hba;
+ struct net_device *phys_dev = netdev;
+ struct fc_lport *lport;
+ struct ethtool_drvinfo drvinfo;
+ int rc = 0;
+ int vlan_id = 0;
+
+ BNX2FC_MISC_DBG("Entered bnx2fc_create\n");
+ if (fip_mode != FIP_MODE_FABRIC) {
+ printk(KERN_ERR "fip mode not FABRIC\n");
+ return -EIO;
+ }
+
+ rtnl_lock();
+
+ mutex_lock(&bnx2fc_dev_lock);
+
+ if (!try_module_get(THIS_MODULE)) {
+ rc = -EINVAL;
+ goto mod_err;
+ }
+
+ /* obtain physical netdev */
+ if (netdev->priv_flags & IFF_802_1Q_VLAN)
+ phys_dev = vlan_dev_real_dev(netdev);
+
+ /* verify if the physical device is a netxtreme2 device */
+ if (phys_dev->ethtool_ops && phys_dev->ethtool_ops->get_drvinfo) {
+ memset(&drvinfo, 0, sizeof(drvinfo));
+ phys_dev->ethtool_ops->get_drvinfo(phys_dev, &drvinfo);
+ if (strncmp(drvinfo.driver, "bnx2x", strlen("bnx2x"))) {
+ printk(KERN_ERR PFX "Not a netxtreme2 device\n");
+ rc = -EINVAL;
+ goto netdev_err;
+ }
+ } else {
+ printk(KERN_ERR PFX "unable to obtain drv_info\n");
+ rc = -EINVAL;
+ goto netdev_err;
+ }
+
+ /* obtain interface and initialize rest of the structure */
+ hba = bnx2fc_hba_lookup(phys_dev);
+ if (!hba) {
+ rc = -ENODEV;
+ printk(KERN_ERR PFX "bnx2fc_create: hba not found\n");
+ goto netdev_err;
+ }
+
+ if (bnx2fc_interface_lookup(netdev)) {
+ rc = -EEXIST;
+ goto netdev_err;
+ }
+
+ interface = bnx2fc_interface_create(hba, netdev, fip_mode);
+ if (!interface) {
+ printk(KERN_ERR PFX "bnx2fc_interface_create failed\n");
+ rc = -ENOMEM;
+ goto ifput_err;
+ }
+
+ if (netdev->priv_flags & IFF_802_1Q_VLAN) {
+ vlan_id = vlan_dev_vlan_id(netdev);
+ interface->vlan_enabled = 1;
+ }
+
+ ctlr = bnx2fc_to_ctlr(interface);
+ cdev = fcoe_ctlr_to_ctlr_dev(ctlr);
+ interface->vlan_id = vlan_id;
+
+ interface->timer_work_queue =
+ create_singlethread_workqueue("bnx2fc_timer_wq");
+ if (!interface->timer_work_queue) {
+ printk(KERN_ERR PFX "ulp_init could not create timer_wq\n");
+ rc = -EINVAL;
+ goto ifput_err;
+ }
+
+ lport = bnx2fc_if_create(interface, &cdev->dev, 0);
+ if (!lport) {
+ printk(KERN_ERR PFX "Failed to create interface (%s)\n",
+ netdev->name);
+ rc = -EINVAL;
+ goto if_create_err;
+ }
+
+ /* Add interface to if_list */
+ list_add_tail(&interface->list, &if_list);
+
+ lport->boot_time = jiffies;
+
+ /* Make this master N_port */
+ ctlr->lp = lport;
+
+ if (link_state == BNX2FC_CREATE_LINK_UP)
+ cdev->enabled = FCOE_CTLR_ENABLED;
+ else
+ cdev->enabled = FCOE_CTLR_DISABLED;
+
+ if (link_state == BNX2FC_CREATE_LINK_UP &&
+ !bnx2fc_link_ok(lport)) {
+ fcoe_ctlr_link_up(ctlr);
+ fc_host_port_type(lport->host) = FC_PORTTYPE_NPORT;
+ set_bit(ADAPTER_STATE_READY, &interface->hba->adapter_state);
+ }
+
+ BNX2FC_HBA_DBG(lport, "create: START DISC\n");
+ bnx2fc_start_disc(interface);
+
+ if (link_state == BNX2FC_CREATE_LINK_UP)
+ interface->enabled = true;
+
+ /*
+ * Release from kref_init in bnx2fc_interface_setup, on success
+ * lport should be holding a reference taken in bnx2fc_if_create
+ */
+ bnx2fc_interface_put(interface);
+ /* put netdev that was held while calling dev_get_by_name */
+ mutex_unlock(&bnx2fc_dev_lock);
+ rtnl_unlock();
+ return 0;
+
+if_create_err:
+ destroy_workqueue(interface->timer_work_queue);
+ifput_err:
+ bnx2fc_net_cleanup(interface);
+ bnx2fc_interface_put(interface);
+ goto mod_err;
+netdev_err:
+ module_put(THIS_MODULE);
+mod_err:
+ mutex_unlock(&bnx2fc_dev_lock);
+ rtnl_unlock();
+ return rc;
+}
+
+/**
+ * bnx2fc_create() - Create a bnx2fc interface
+ * @netdev : The net_device object the Ethernet interface to create on
+ * @fip_mode: The FIP mode for this creation
+ *
+ * Called from fcoe transport
+ *
+ * Returns: 0 for success
+ */
+static int bnx2fc_create(struct net_device *netdev, enum fip_state fip_mode)
+{
+ return _bnx2fc_create(netdev, fip_mode, BNX2FC_CREATE_LINK_UP);
+}
+
+/**
+ * bnx2fc_ctlr_alloc() - Allocate a bnx2fc interface from fcoe_sysfs
+ * @netdev: The net_device to be used by the allocated FCoE Controller
+ *
+ * This routine is called from fcoe_sysfs. It will start the fcoe_ctlr
+ * in a link_down state. The allows the user an opportunity to configure
+ * the FCoE Controller from sysfs before enabling the FCoE Controller.
+ *
+ * Creating in with this routine starts the FCoE Controller in Fabric
+ * mode. The user can change to VN2VN or another mode before enabling.
+ */
+static int bnx2fc_ctlr_alloc(struct net_device *netdev)
+{
+ return _bnx2fc_create(netdev, FIP_MODE_FABRIC,
+ BNX2FC_CREATE_LINK_DOWN);
+}
+
+/**
+ * bnx2fc_find_hba_for_cnic - maps cnic instance to bnx2fc hba instance
+ *
+ * @cnic: Pointer to cnic device instance
+ *
+ **/
+static struct bnx2fc_hba *bnx2fc_find_hba_for_cnic(struct cnic_dev *cnic)
+{
+ struct bnx2fc_hba *hba;
+
+ /* Called with bnx2fc_dev_lock held */
+ list_for_each_entry(hba, &adapter_list, list) {
+ if (hba->cnic == cnic)
+ return hba;
+ }
+ return NULL;
+}
+
+static struct bnx2fc_interface *bnx2fc_interface_lookup(struct net_device
+ *netdev)
+{
+ struct bnx2fc_interface *interface;
+
+ /* Called with bnx2fc_dev_lock held */
+ list_for_each_entry(interface, &if_list, list) {
+ if (interface->netdev == netdev)
+ return interface;
+ }
+ return NULL;
+}
+
+static struct bnx2fc_hba *bnx2fc_hba_lookup(struct net_device
+ *phys_dev)
+{
+ struct bnx2fc_hba *hba;
+
+ /* Called with bnx2fc_dev_lock held */
+ list_for_each_entry(hba, &adapter_list, list) {
+ if (hba->phys_dev == phys_dev)
+ return hba;
+ }
+ printk(KERN_ERR PFX "adapter_lookup: hba NULL\n");
+ return NULL;
+}
+
+/**
+ * bnx2fc_ulp_exit - shuts down adapter instance and frees all resources
+ *
+ * @dev cnic device handle
+ */
+static void bnx2fc_ulp_exit(struct cnic_dev *dev)
+{
+ struct bnx2fc_hba *hba;
+ struct bnx2fc_interface *interface, *tmp;
+
+ BNX2FC_MISC_DBG("Entered bnx2fc_ulp_exit\n");
+
+ if (!test_bit(CNIC_F_BNX2X_CLASS, &dev->flags)) {
+ printk(KERN_ERR PFX "bnx2fc port check: %s, flags: %lx\n",
+ dev->netdev->name, dev->flags);
+ return;
+ }
+
+ mutex_lock(&bnx2fc_dev_lock);
+ hba = bnx2fc_find_hba_for_cnic(dev);
+ if (!hba) {
+ printk(KERN_ERR PFX "bnx2fc_ulp_exit: hba not found, dev 0%p\n",
+ dev);
+ mutex_unlock(&bnx2fc_dev_lock);
+ return;
+ }
+
+ list_del_init(&hba->list);
+ adapter_count--;
+
+ list_for_each_entry_safe(interface, tmp, &if_list, list)
+ /* destroy not called yet, move to quiesced list */
+ if (interface->hba == hba)
+ __bnx2fc_destroy(interface);
+ mutex_unlock(&bnx2fc_dev_lock);
+
+ /* Ensure ALL destroy work has been completed before return */
+ flush_workqueue(bnx2fc_wq);
+
+ bnx2fc_ulp_stop(hba);
+ /* unregister cnic device */
+ if (test_and_clear_bit(BNX2FC_CNIC_REGISTERED, &hba->reg_with_cnic))
+ hba->cnic->unregister_device(hba->cnic, CNIC_ULP_FCOE);
+ bnx2fc_hba_destroy(hba);
+}
+
+/**
+ * bnx2fc_fcoe_reset - Resets the fcoe
+ *
+ * @shost: shost the reset is from
+ *
+ * Returns: always 0
+ */
+static int bnx2fc_fcoe_reset(struct Scsi_Host *shost)
+{
+ struct fc_lport *lport = shost_priv(shost);
+ fc_lport_reset(lport);
+ return 0;
+}
+
+
+static bool bnx2fc_match(struct net_device *netdev)
+{
+ struct net_device *phys_dev = netdev;
+
+ mutex_lock(&bnx2fc_dev_lock);
+ if (netdev->priv_flags & IFF_802_1Q_VLAN)
+ phys_dev = vlan_dev_real_dev(netdev);
+
+ if (bnx2fc_hba_lookup(phys_dev)) {
+ mutex_unlock(&bnx2fc_dev_lock);
+ return true;
+ }
+
+ mutex_unlock(&bnx2fc_dev_lock);
+ return false;
+}
+
+
+static struct fcoe_transport bnx2fc_transport = {
+ .name = {"bnx2fc"},
+ .attached = false,
+ .list = LIST_HEAD_INIT(bnx2fc_transport.list),
+ .alloc = bnx2fc_ctlr_alloc,
+ .match = bnx2fc_match,
+ .create = bnx2fc_create,
+ .destroy = bnx2fc_destroy,
+ .enable = bnx2fc_enable,
+ .disable = bnx2fc_disable,
+};
+
+/**
+ * bnx2fc_percpu_thread_create - Create a receive thread for an
+ * online CPU
+ *
+ * @cpu: cpu index for the online cpu
+ */
+static void bnx2fc_percpu_thread_create(unsigned int cpu)
+{
+ struct bnx2fc_percpu_s *p;
+ struct task_struct *thread;
+
+ p = &per_cpu(bnx2fc_percpu, cpu);
+
+ thread = kthread_create_on_node(bnx2fc_percpu_io_thread,
+ (void *)p, cpu_to_node(cpu),
+ "bnx2fc_thread/%d", cpu);
+ /* bind thread to the cpu */
+ if (likely(!IS_ERR(thread))) {
+ kthread_bind(thread, cpu);
+ p->iothread = thread;
+ wake_up_process(thread);
+ }
+}
+
+static void bnx2fc_percpu_thread_destroy(unsigned int cpu)
+{
+ struct bnx2fc_percpu_s *p;
+ struct task_struct *thread;
+ struct bnx2fc_work *work, *tmp;
+
+ BNX2FC_MISC_DBG("destroying io thread for CPU %d\n", cpu);
+
+ /* Prevent any new work from being queued for this CPU */
+ p = &per_cpu(bnx2fc_percpu, cpu);
+ spin_lock_bh(&p->fp_work_lock);
+ thread = p->iothread;
+ p->iothread = NULL;
+
+
+ /* Free all work in the list */
+ list_for_each_entry_safe(work, tmp, &p->work_list, list) {
+ list_del_init(&work->list);
+ bnx2fc_process_cq_compl(work->tgt, work->wqe);
+ kfree(work);
+ }
+
+ spin_unlock_bh(&p->fp_work_lock);
+
+ if (thread)
+ kthread_stop(thread);
+}
+
+/**
+ * bnx2fc_cpu_callback - Handler for CPU hotplug events
+ *
+ * @nfb: The callback data block
+ * @action: The event triggering the callback
+ * @hcpu: The index of the CPU that the event is for
+ *
+ * This creates or destroys per-CPU data for fcoe
+ *
+ * Returns NOTIFY_OK always.
+ */
+static int bnx2fc_cpu_callback(struct notifier_block *nfb,
+ unsigned long action, void *hcpu)
+{
+ unsigned cpu = (unsigned long)hcpu;
+
+ switch (action) {
+ case CPU_ONLINE:
+ case CPU_ONLINE_FROZEN:
+ printk(PFX "CPU %x online: Create Rx thread\n", cpu);
+ bnx2fc_percpu_thread_create(cpu);
+ break;
+ case CPU_DEAD:
+ case CPU_DEAD_FROZEN:
+ printk(PFX "CPU %x offline: Remove Rx thread\n", cpu);
+ bnx2fc_percpu_thread_destroy(cpu);
+ break;
+ default:
+ break;
+ }
+ return NOTIFY_OK;
+}
+
+/**
+ * bnx2fc_mod_init - module init entry point
+ *
+ * Initialize driver wide global data structures, and register
+ * with cnic module
+ **/
+static int __init bnx2fc_mod_init(void)
+{
+ struct fcoe_percpu_s *bg;
+ struct task_struct *l2_thread;
+ int rc = 0;
+ unsigned int cpu = 0;
+ struct bnx2fc_percpu_s *p;
+
+ printk(KERN_INFO PFX "%s", version);
+
+ /* register as a fcoe transport */
+ rc = fcoe_transport_attach(&bnx2fc_transport);
+ if (rc) {
+ printk(KERN_ERR "failed to register an fcoe transport, check "
+ "if libfcoe is loaded\n");
+ goto out;
+ }
+
+ INIT_LIST_HEAD(&adapter_list);
+ INIT_LIST_HEAD(&if_list);
+ mutex_init(&bnx2fc_dev_lock);
+ adapter_count = 0;
+
+ /* Attach FC transport template */
+ rc = bnx2fc_attach_transport();
+ if (rc)
+ goto detach_ft;
+
+ bnx2fc_wq = alloc_workqueue("bnx2fc", 0, 0);
+ if (!bnx2fc_wq) {
+ rc = -ENOMEM;
+ goto release_bt;
+ }
+
+ bg = &bnx2fc_global;
+ skb_queue_head_init(&bg->fcoe_rx_list);
+ l2_thread = kthread_create(bnx2fc_l2_rcv_thread,
+ (void *)bg,
+ "bnx2fc_l2_thread");
+ if (IS_ERR(l2_thread)) {
+ rc = PTR_ERR(l2_thread);
+ goto free_wq;
+ }
+ wake_up_process(l2_thread);
+ spin_lock_bh(&bg->fcoe_rx_list.lock);
+ bg->thread = l2_thread;
+ spin_unlock_bh(&bg->fcoe_rx_list.lock);
+
+ for_each_possible_cpu(cpu) {
+ p = &per_cpu(bnx2fc_percpu, cpu);
+ INIT_LIST_HEAD(&p->work_list);
+ spin_lock_init(&p->fp_work_lock);
+ }
+
+ cpu_notifier_register_begin();
+
+ for_each_online_cpu(cpu) {
+ bnx2fc_percpu_thread_create(cpu);
+ }
+
+ /* Initialize per CPU interrupt thread */
+ __register_hotcpu_notifier(&bnx2fc_cpu_notifier);
+
+ cpu_notifier_register_done();
+
+ cnic_register_driver(CNIC_ULP_FCOE, &bnx2fc_cnic_cb);
+
+ return 0;
+
+free_wq:
+ destroy_workqueue(bnx2fc_wq);
+release_bt:
+ bnx2fc_release_transport();
+detach_ft:
+ fcoe_transport_detach(&bnx2fc_transport);
+out:
+ return rc;
+}
+
+static void __exit bnx2fc_mod_exit(void)
+{
+ LIST_HEAD(to_be_deleted);
+ struct bnx2fc_hba *hba, *next;
+ struct fcoe_percpu_s *bg;
+ struct task_struct *l2_thread;
+ struct sk_buff *skb;
+ unsigned int cpu = 0;
+
+ /*
+ * NOTE: Since cnic calls register_driver routine rtnl_lock,
+ * it will have higher precedence than bnx2fc_dev_lock.
+ * unregister_device() cannot be called with bnx2fc_dev_lock
+ * held.
+ */
+ mutex_lock(&bnx2fc_dev_lock);
+ list_splice(&adapter_list, &to_be_deleted);
+ INIT_LIST_HEAD(&adapter_list);
+ adapter_count = 0;
+ mutex_unlock(&bnx2fc_dev_lock);
+
+ /* Unregister with cnic */
+ list_for_each_entry_safe(hba, next, &to_be_deleted, list) {
+ list_del_init(&hba->list);
+ printk(KERN_ERR PFX "MOD_EXIT:destroy hba = 0x%p\n",
+ hba);
+ bnx2fc_ulp_stop(hba);
+ /* unregister cnic device */
+ if (test_and_clear_bit(BNX2FC_CNIC_REGISTERED,
+ &hba->reg_with_cnic))
+ hba->cnic->unregister_device(hba->cnic,
+ CNIC_ULP_FCOE);
+ bnx2fc_hba_destroy(hba);
+ }
+ cnic_unregister_driver(CNIC_ULP_FCOE);
+
+ /* Destroy global thread */
+ bg = &bnx2fc_global;
+ spin_lock_bh(&bg->fcoe_rx_list.lock);
+ l2_thread = bg->thread;
+ bg->thread = NULL;
+ while ((skb = __skb_dequeue(&bg->fcoe_rx_list)) != NULL)
+ kfree_skb(skb);
+
+ spin_unlock_bh(&bg->fcoe_rx_list.lock);
+
+ if (l2_thread)
+ kthread_stop(l2_thread);
+
+ cpu_notifier_register_begin();
+
+ /* Destroy per cpu threads */
+ for_each_online_cpu(cpu) {
+ bnx2fc_percpu_thread_destroy(cpu);
+ }
+
+ __unregister_hotcpu_notifier(&bnx2fc_cpu_notifier);
+
+ cpu_notifier_register_done();
+
+ destroy_workqueue(bnx2fc_wq);
+ /*
+ * detach from scsi transport
+ * must happen after all destroys are done
+ */
+ bnx2fc_release_transport();
+
+ /* detach from fcoe transport */
+ fcoe_transport_detach(&bnx2fc_transport);
+}
+
+module_init(bnx2fc_mod_init);
+module_exit(bnx2fc_mod_exit);
+
+static struct fcoe_sysfs_function_template bnx2fc_fcoe_sysfs_templ = {
+ .set_fcoe_ctlr_enabled = bnx2fc_ctlr_enabled,
+ .get_fcoe_ctlr_link_fail = fcoe_ctlr_get_lesb,
+ .get_fcoe_ctlr_vlink_fail = fcoe_ctlr_get_lesb,
+ .get_fcoe_ctlr_miss_fka = fcoe_ctlr_get_lesb,
+ .get_fcoe_ctlr_symb_err = fcoe_ctlr_get_lesb,
+ .get_fcoe_ctlr_err_block = fcoe_ctlr_get_lesb,
+ .get_fcoe_ctlr_fcs_error = fcoe_ctlr_get_lesb,
+
+ .get_fcoe_fcf_selected = fcoe_fcf_get_selected,
+ .get_fcoe_fcf_vlan_id = bnx2fc_fcf_get_vlan_id,
+};
+
+static struct fc_function_template bnx2fc_transport_function = {
+ .show_host_node_name = 1,
+ .show_host_port_name = 1,
+ .show_host_supported_classes = 1,
+ .show_host_supported_fc4s = 1,
+ .show_host_active_fc4s = 1,
+ .show_host_maxframe_size = 1,
+
+ .show_host_port_id = 1,
+ .show_host_supported_speeds = 1,
+ .get_host_speed = fc_get_host_speed,
+ .show_host_speed = 1,
+ .show_host_port_type = 1,
+ .get_host_port_state = fc_get_host_port_state,
+ .show_host_port_state = 1,
+ .show_host_symbolic_name = 1,
+
+ .dd_fcrport_size = (sizeof(struct fc_rport_libfc_priv) +
+ sizeof(struct bnx2fc_rport)),
+ .show_rport_maxframe_size = 1,
+ .show_rport_supported_classes = 1,
+
+ .show_host_fabric_name = 1,
+ .show_starget_node_name = 1,
+ .show_starget_port_name = 1,
+ .show_starget_port_id = 1,
+ .set_rport_dev_loss_tmo = fc_set_rport_loss_tmo,
+ .show_rport_dev_loss_tmo = 1,
+ .get_fc_host_stats = bnx2fc_get_host_stats,
+
+ .issue_fc_host_lip = bnx2fc_fcoe_reset,
+
+ .terminate_rport_io = fc_rport_terminate_io,
+
+ .vport_create = bnx2fc_vport_create,
+ .vport_delete = bnx2fc_vport_destroy,
+ .vport_disable = bnx2fc_vport_disable,
+ .bsg_request = fc_lport_bsg_request,
+};
+
+static struct fc_function_template bnx2fc_vport_xport_function = {
+ .show_host_node_name = 1,
+ .show_host_port_name = 1,
+ .show_host_supported_classes = 1,
+ .show_host_supported_fc4s = 1,
+ .show_host_active_fc4s = 1,
+ .show_host_maxframe_size = 1,
+
+ .show_host_port_id = 1,
+ .show_host_supported_speeds = 1,
+ .get_host_speed = fc_get_host_speed,
+ .show_host_speed = 1,
+ .show_host_port_type = 1,
+ .get_host_port_state = fc_get_host_port_state,
+ .show_host_port_state = 1,
+ .show_host_symbolic_name = 1,
+
+ .dd_fcrport_size = (sizeof(struct fc_rport_libfc_priv) +
+ sizeof(struct bnx2fc_rport)),
+ .show_rport_maxframe_size = 1,
+ .show_rport_supported_classes = 1,
+
+ .show_host_fabric_name = 1,
+ .show_starget_node_name = 1,
+ .show_starget_port_name = 1,
+ .show_starget_port_id = 1,
+ .set_rport_dev_loss_tmo = fc_set_rport_loss_tmo,
+ .show_rport_dev_loss_tmo = 1,
+ .get_fc_host_stats = fc_get_host_stats,
+ .issue_fc_host_lip = bnx2fc_fcoe_reset,
+ .terminate_rport_io = fc_rport_terminate_io,
+ .bsg_request = fc_lport_bsg_request,
+};
+
+/**
+ * scsi_host_template structure used while registering with SCSI-ml
+ */
+static struct scsi_host_template bnx2fc_shost_template = {
+ .module = THIS_MODULE,
+ .name = "QLogic Offload FCoE Initiator",
+ .queuecommand = bnx2fc_queuecommand,
+ .eh_abort_handler = bnx2fc_eh_abort, /* abts */
+ .eh_device_reset_handler = bnx2fc_eh_device_reset, /* lun reset */
+ .eh_target_reset_handler = bnx2fc_eh_target_reset, /* tgt reset */
+ .eh_host_reset_handler = fc_eh_host_reset,
+ .slave_alloc = fc_slave_alloc,
+ .change_queue_depth = scsi_change_queue_depth,
+ .this_id = -1,
+ .cmd_per_lun = 3,
+ .use_clustering = ENABLE_CLUSTERING,
+ .sg_tablesize = BNX2FC_MAX_BDS_PER_CMD,
+ .max_sectors = 1024,
+ .use_blk_tags = 1,
+ .track_queue_depth = 1,
+};
+
+static struct libfc_function_template bnx2fc_libfc_fcn_templ = {
+ .frame_send = bnx2fc_xmit,
+ .elsct_send = bnx2fc_elsct_send,
+ .fcp_abort_io = bnx2fc_abort_io,
+ .fcp_cleanup = bnx2fc_cleanup,
+ .get_lesb = fcoe_get_lesb,
+ .rport_event_callback = bnx2fc_rport_event_handler,
+};
+
+/**
+ * bnx2fc_cnic_cb - global template of bnx2fc - cnic driver interface
+ * structure carrying callback function pointers
+ */
+static struct cnic_ulp_ops bnx2fc_cnic_cb = {
+ .owner = THIS_MODULE,
+ .cnic_init = bnx2fc_ulp_init,
+ .cnic_exit = bnx2fc_ulp_exit,
+ .cnic_start = bnx2fc_ulp_start,
+ .cnic_stop = bnx2fc_ulp_stop,
+ .indicate_kcqes = bnx2fc_indicate_kcqe,
+ .indicate_netevent = bnx2fc_indicate_netevent,
+ .cnic_get_stats = bnx2fc_ulp_get_stats,
+};
diff --git a/drivers/scsi/bnx2fc/bnx2fc_hwi.c b/drivers/scsi/bnx2fc/bnx2fc_hwi.c
new file mode 100644
index 000000000..c6688d72a
--- /dev/null
+++ b/drivers/scsi/bnx2fc/bnx2fc_hwi.c
@@ -0,0 +1,2195 @@
+/* bnx2fc_hwi.c: QLogic NetXtreme II Linux FCoE offload driver.
+ * This file contains the code that low level functions that interact
+ * with 57712 FCoE firmware.
+ *
+ * Copyright (c) 2008 - 2013 Broadcom Corporation
+ * Copyright (c) 2014, QLogic Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation.
+ *
+ * Written by: Bhanu Prakash Gollapudi (bprakash@broadcom.com)
+ */
+
+#include "bnx2fc.h"
+
+DECLARE_PER_CPU(struct bnx2fc_percpu_s, bnx2fc_percpu);
+
+static void bnx2fc_fastpath_notification(struct bnx2fc_hba *hba,
+ struct fcoe_kcqe *new_cqe_kcqe);
+static void bnx2fc_process_ofld_cmpl(struct bnx2fc_hba *hba,
+ struct fcoe_kcqe *ofld_kcqe);
+static void bnx2fc_process_enable_conn_cmpl(struct bnx2fc_hba *hba,
+ struct fcoe_kcqe *ofld_kcqe);
+static void bnx2fc_init_failure(struct bnx2fc_hba *hba, u32 err_code);
+static void bnx2fc_process_conn_destroy_cmpl(struct bnx2fc_hba *hba,
+ struct fcoe_kcqe *destroy_kcqe);
+
+int bnx2fc_send_stat_req(struct bnx2fc_hba *hba)
+{
+ struct fcoe_kwqe_stat stat_req;
+ struct kwqe *kwqe_arr[2];
+ int num_kwqes = 1;
+ int rc = 0;
+
+ memset(&stat_req, 0x00, sizeof(struct fcoe_kwqe_stat));
+ stat_req.hdr.op_code = FCOE_KWQE_OPCODE_STAT;
+ stat_req.hdr.flags =
+ (FCOE_KWQE_LAYER_CODE << FCOE_KWQE_HEADER_LAYER_CODE_SHIFT);
+
+ stat_req.stat_params_addr_lo = (u32) hba->stats_buf_dma;
+ stat_req.stat_params_addr_hi = (u32) ((u64)hba->stats_buf_dma >> 32);
+
+ kwqe_arr[0] = (struct kwqe *) &stat_req;
+
+ if (hba->cnic && hba->cnic->submit_kwqes)
+ rc = hba->cnic->submit_kwqes(hba->cnic, kwqe_arr, num_kwqes);
+
+ return rc;
+}
+
+/**
+ * bnx2fc_send_fw_fcoe_init_msg - initiates initial handshake with FCoE f/w
+ *
+ * @hba: adapter structure pointer
+ *
+ * Send down FCoE firmware init KWQEs which initiates the initial handshake
+ * with the f/w.
+ *
+ */
+int bnx2fc_send_fw_fcoe_init_msg(struct bnx2fc_hba *hba)
+{
+ struct fcoe_kwqe_init1 fcoe_init1;
+ struct fcoe_kwqe_init2 fcoe_init2;
+ struct fcoe_kwqe_init3 fcoe_init3;
+ struct kwqe *kwqe_arr[3];
+ int num_kwqes = 3;
+ int rc = 0;
+
+ if (!hba->cnic) {
+ printk(KERN_ERR PFX "hba->cnic NULL during fcoe fw init\n");
+ return -ENODEV;
+ }
+
+ /* fill init1 KWQE */
+ memset(&fcoe_init1, 0x00, sizeof(struct fcoe_kwqe_init1));
+ fcoe_init1.hdr.op_code = FCOE_KWQE_OPCODE_INIT1;
+ fcoe_init1.hdr.flags = (FCOE_KWQE_LAYER_CODE <<
+ FCOE_KWQE_HEADER_LAYER_CODE_SHIFT);
+
+ fcoe_init1.num_tasks = hba->max_tasks;
+ fcoe_init1.sq_num_wqes = BNX2FC_SQ_WQES_MAX;
+ fcoe_init1.rq_num_wqes = BNX2FC_RQ_WQES_MAX;
+ fcoe_init1.rq_buffer_log_size = BNX2FC_RQ_BUF_LOG_SZ;
+ fcoe_init1.cq_num_wqes = BNX2FC_CQ_WQES_MAX;
+ fcoe_init1.dummy_buffer_addr_lo = (u32) hba->dummy_buf_dma;
+ fcoe_init1.dummy_buffer_addr_hi = (u32) ((u64)hba->dummy_buf_dma >> 32);
+ fcoe_init1.task_list_pbl_addr_lo = (u32) hba->task_ctx_bd_dma;
+ fcoe_init1.task_list_pbl_addr_hi =
+ (u32) ((u64) hba->task_ctx_bd_dma >> 32);
+ fcoe_init1.mtu = BNX2FC_MINI_JUMBO_MTU;
+
+ fcoe_init1.flags = (PAGE_SHIFT <<
+ FCOE_KWQE_INIT1_LOG_PAGE_SIZE_SHIFT);
+
+ fcoe_init1.num_sessions_log = BNX2FC_NUM_MAX_SESS_LOG;
+
+ /* fill init2 KWQE */
+ memset(&fcoe_init2, 0x00, sizeof(struct fcoe_kwqe_init2));
+ fcoe_init2.hdr.op_code = FCOE_KWQE_OPCODE_INIT2;
+ fcoe_init2.hdr.flags = (FCOE_KWQE_LAYER_CODE <<
+ FCOE_KWQE_HEADER_LAYER_CODE_SHIFT);
+
+ fcoe_init2.hsi_major_version = FCOE_HSI_MAJOR_VERSION;
+ fcoe_init2.hsi_minor_version = FCOE_HSI_MINOR_VERSION;
+
+
+ fcoe_init2.hash_tbl_pbl_addr_lo = (u32) hba->hash_tbl_pbl_dma;
+ fcoe_init2.hash_tbl_pbl_addr_hi = (u32)
+ ((u64) hba->hash_tbl_pbl_dma >> 32);
+
+ fcoe_init2.t2_hash_tbl_addr_lo = (u32) hba->t2_hash_tbl_dma;
+ fcoe_init2.t2_hash_tbl_addr_hi = (u32)
+ ((u64) hba->t2_hash_tbl_dma >> 32);
+
+ fcoe_init2.t2_ptr_hash_tbl_addr_lo = (u32) hba->t2_hash_tbl_ptr_dma;
+ fcoe_init2.t2_ptr_hash_tbl_addr_hi = (u32)
+ ((u64) hba->t2_hash_tbl_ptr_dma >> 32);
+
+ fcoe_init2.free_list_count = BNX2FC_NUM_MAX_SESS;
+
+ /* fill init3 KWQE */
+ memset(&fcoe_init3, 0x00, sizeof(struct fcoe_kwqe_init3));
+ fcoe_init3.hdr.op_code = FCOE_KWQE_OPCODE_INIT3;
+ fcoe_init3.hdr.flags = (FCOE_KWQE_LAYER_CODE <<
+ FCOE_KWQE_HEADER_LAYER_CODE_SHIFT);
+ fcoe_init3.error_bit_map_lo = 0xffffffff;
+ fcoe_init3.error_bit_map_hi = 0xffffffff;
+
+ /*
+ * enable both cached connection and cached tasks
+ * 0 = none, 1 = cached connection, 2 = cached tasks, 3 = both
+ */
+ fcoe_init3.perf_config = 3;
+
+ kwqe_arr[0] = (struct kwqe *) &fcoe_init1;
+ kwqe_arr[1] = (struct kwqe *) &fcoe_init2;
+ kwqe_arr[2] = (struct kwqe *) &fcoe_init3;
+
+ if (hba->cnic && hba->cnic->submit_kwqes)
+ rc = hba->cnic->submit_kwqes(hba->cnic, kwqe_arr, num_kwqes);
+
+ return rc;
+}
+int bnx2fc_send_fw_fcoe_destroy_msg(struct bnx2fc_hba *hba)
+{
+ struct fcoe_kwqe_destroy fcoe_destroy;
+ struct kwqe *kwqe_arr[2];
+ int num_kwqes = 1;
+ int rc = -1;
+
+ /* fill destroy KWQE */
+ memset(&fcoe_destroy, 0x00, sizeof(struct fcoe_kwqe_destroy));
+ fcoe_destroy.hdr.op_code = FCOE_KWQE_OPCODE_DESTROY;
+ fcoe_destroy.hdr.flags = (FCOE_KWQE_LAYER_CODE <<
+ FCOE_KWQE_HEADER_LAYER_CODE_SHIFT);
+ kwqe_arr[0] = (struct kwqe *) &fcoe_destroy;
+
+ if (hba->cnic && hba->cnic->submit_kwqes)
+ rc = hba->cnic->submit_kwqes(hba->cnic, kwqe_arr, num_kwqes);
+ return rc;
+}
+
+/**
+ * bnx2fc_send_session_ofld_req - initiates FCoE Session offload process
+ *
+ * @port: port structure pointer
+ * @tgt: bnx2fc_rport structure pointer
+ */
+int bnx2fc_send_session_ofld_req(struct fcoe_port *port,
+ struct bnx2fc_rport *tgt)
+{
+ struct fc_lport *lport = port->lport;
+ struct bnx2fc_interface *interface = port->priv;
+ struct fcoe_ctlr *ctlr = bnx2fc_to_ctlr(interface);
+ struct bnx2fc_hba *hba = interface->hba;
+ struct kwqe *kwqe_arr[4];
+ struct fcoe_kwqe_conn_offload1 ofld_req1;
+ struct fcoe_kwqe_conn_offload2 ofld_req2;
+ struct fcoe_kwqe_conn_offload3 ofld_req3;
+ struct fcoe_kwqe_conn_offload4 ofld_req4;
+ struct fc_rport_priv *rdata = tgt->rdata;
+ struct fc_rport *rport = tgt->rport;
+ int num_kwqes = 4;
+ u32 port_id;
+ int rc = 0;
+ u16 conn_id;
+
+ /* Initialize offload request 1 structure */
+ memset(&ofld_req1, 0x00, sizeof(struct fcoe_kwqe_conn_offload1));
+
+ ofld_req1.hdr.op_code = FCOE_KWQE_OPCODE_OFFLOAD_CONN1;
+ ofld_req1.hdr.flags =
+ (FCOE_KWQE_LAYER_CODE << FCOE_KWQE_HEADER_LAYER_CODE_SHIFT);
+
+
+ conn_id = (u16)tgt->fcoe_conn_id;
+ ofld_req1.fcoe_conn_id = conn_id;
+
+
+ ofld_req1.sq_addr_lo = (u32) tgt->sq_dma;
+ ofld_req1.sq_addr_hi = (u32)((u64) tgt->sq_dma >> 32);
+
+ ofld_req1.rq_pbl_addr_lo = (u32) tgt->rq_pbl_dma;
+ ofld_req1.rq_pbl_addr_hi = (u32)((u64) tgt->rq_pbl_dma >> 32);
+
+ ofld_req1.rq_first_pbe_addr_lo = (u32) tgt->rq_dma;
+ ofld_req1.rq_first_pbe_addr_hi =
+ (u32)((u64) tgt->rq_dma >> 32);
+
+ ofld_req1.rq_prod = 0x8000;
+
+ /* Initialize offload request 2 structure */
+ memset(&ofld_req2, 0x00, sizeof(struct fcoe_kwqe_conn_offload2));
+
+ ofld_req2.hdr.op_code = FCOE_KWQE_OPCODE_OFFLOAD_CONN2;
+ ofld_req2.hdr.flags =
+ (FCOE_KWQE_LAYER_CODE << FCOE_KWQE_HEADER_LAYER_CODE_SHIFT);
+
+ ofld_req2.tx_max_fc_pay_len = rdata->maxframe_size;
+
+ ofld_req2.cq_addr_lo = (u32) tgt->cq_dma;
+ ofld_req2.cq_addr_hi = (u32)((u64)tgt->cq_dma >> 32);
+
+ ofld_req2.xferq_addr_lo = (u32) tgt->xferq_dma;
+ ofld_req2.xferq_addr_hi = (u32)((u64)tgt->xferq_dma >> 32);
+
+ ofld_req2.conn_db_addr_lo = (u32)tgt->conn_db_dma;
+ ofld_req2.conn_db_addr_hi = (u32)((u64)tgt->conn_db_dma >> 32);
+
+ /* Initialize offload request 3 structure */
+ memset(&ofld_req3, 0x00, sizeof(struct fcoe_kwqe_conn_offload3));
+
+ ofld_req3.hdr.op_code = FCOE_KWQE_OPCODE_OFFLOAD_CONN3;
+ ofld_req3.hdr.flags =
+ (FCOE_KWQE_LAYER_CODE << FCOE_KWQE_HEADER_LAYER_CODE_SHIFT);
+
+ ofld_req3.vlan_tag = interface->vlan_id <<
+ FCOE_KWQE_CONN_OFFLOAD3_VLAN_ID_SHIFT;
+ ofld_req3.vlan_tag |= 3 << FCOE_KWQE_CONN_OFFLOAD3_PRIORITY_SHIFT;
+
+ port_id = fc_host_port_id(lport->host);
+ if (port_id == 0) {
+ BNX2FC_HBA_DBG(lport, "ofld_req: port_id = 0, link down?\n");
+ return -EINVAL;
+ }
+
+ /*
+ * Store s_id of the initiator for further reference. This will
+ * be used during disable/destroy during linkdown processing as
+ * when the lport is reset, the port_id also is reset to 0
+ */
+ tgt->sid = port_id;
+ ofld_req3.s_id[0] = (port_id & 0x000000FF);
+ ofld_req3.s_id[1] = (port_id & 0x0000FF00) >> 8;
+ ofld_req3.s_id[2] = (port_id & 0x00FF0000) >> 16;
+
+ port_id = rport->port_id;
+ ofld_req3.d_id[0] = (port_id & 0x000000FF);
+ ofld_req3.d_id[1] = (port_id & 0x0000FF00) >> 8;
+ ofld_req3.d_id[2] = (port_id & 0x00FF0000) >> 16;
+
+ ofld_req3.tx_total_conc_seqs = rdata->max_seq;
+
+ ofld_req3.tx_max_conc_seqs_c3 = rdata->max_seq;
+ ofld_req3.rx_max_fc_pay_len = lport->mfs;
+
+ ofld_req3.rx_total_conc_seqs = BNX2FC_MAX_SEQS;
+ ofld_req3.rx_max_conc_seqs_c3 = BNX2FC_MAX_SEQS;
+ ofld_req3.rx_open_seqs_exch_c3 = 1;
+
+ ofld_req3.confq_first_pbe_addr_lo = tgt->confq_dma;
+ ofld_req3.confq_first_pbe_addr_hi = (u32)((u64) tgt->confq_dma >> 32);
+
+ /* set mul_n_port_ids supported flag to 0, until it is supported */
+ ofld_req3.flags = 0;
+ /*
+ ofld_req3.flags |= (((lport->send_sp_features & FC_SP_FT_MNA) ? 1:0) <<
+ FCOE_KWQE_CONN_OFFLOAD3_B_MUL_N_PORT_IDS_SHIFT);
+ */
+ /* Info from PLOGI response */
+ ofld_req3.flags |= (((rdata->sp_features & FC_SP_FT_EDTR) ? 1 : 0) <<
+ FCOE_KWQE_CONN_OFFLOAD3_B_E_D_TOV_RES_SHIFT);
+
+ ofld_req3.flags |= (((rdata->sp_features & FC_SP_FT_SEQC) ? 1 : 0) <<
+ FCOE_KWQE_CONN_OFFLOAD3_B_CONT_INCR_SEQ_CNT_SHIFT);
+
+ /*
+ * Info from PRLI response, this info is used for sequence level error
+ * recovery support
+ */
+ if (tgt->dev_type == TYPE_TAPE) {
+ ofld_req3.flags |= 1 <<
+ FCOE_KWQE_CONN_OFFLOAD3_B_CONF_REQ_SHIFT;
+ ofld_req3.flags |= (((rdata->flags & FC_RP_FLAGS_REC_SUPPORTED)
+ ? 1 : 0) <<
+ FCOE_KWQE_CONN_OFFLOAD3_B_REC_VALID_SHIFT);
+ }
+
+ /* vlan flag */
+ ofld_req3.flags |= (interface->vlan_enabled <<
+ FCOE_KWQE_CONN_OFFLOAD3_B_VLAN_FLAG_SHIFT);
+
+ /* C2_VALID and ACK flags are not set as they are not supported */
+
+
+ /* Initialize offload request 4 structure */
+ memset(&ofld_req4, 0x00, sizeof(struct fcoe_kwqe_conn_offload4));
+ ofld_req4.hdr.op_code = FCOE_KWQE_OPCODE_OFFLOAD_CONN4;
+ ofld_req4.hdr.flags =
+ (FCOE_KWQE_LAYER_CODE << FCOE_KWQE_HEADER_LAYER_CODE_SHIFT);
+
+ ofld_req4.e_d_tov_timer_val = lport->e_d_tov / 20;
+
+
+ ofld_req4.src_mac_addr_lo[0] = port->data_src_addr[5];
+ /* local mac */
+ ofld_req4.src_mac_addr_lo[1] = port->data_src_addr[4];
+ ofld_req4.src_mac_addr_mid[0] = port->data_src_addr[3];
+ ofld_req4.src_mac_addr_mid[1] = port->data_src_addr[2];
+ ofld_req4.src_mac_addr_hi[0] = port->data_src_addr[1];
+ ofld_req4.src_mac_addr_hi[1] = port->data_src_addr[0];
+ ofld_req4.dst_mac_addr_lo[0] = ctlr->dest_addr[5];
+ /* fcf mac */
+ ofld_req4.dst_mac_addr_lo[1] = ctlr->dest_addr[4];
+ ofld_req4.dst_mac_addr_mid[0] = ctlr->dest_addr[3];
+ ofld_req4.dst_mac_addr_mid[1] = ctlr->dest_addr[2];
+ ofld_req4.dst_mac_addr_hi[0] = ctlr->dest_addr[1];
+ ofld_req4.dst_mac_addr_hi[1] = ctlr->dest_addr[0];
+
+ ofld_req4.lcq_addr_lo = (u32) tgt->lcq_dma;
+ ofld_req4.lcq_addr_hi = (u32)((u64) tgt->lcq_dma >> 32);
+
+ ofld_req4.confq_pbl_base_addr_lo = (u32) tgt->confq_pbl_dma;
+ ofld_req4.confq_pbl_base_addr_hi =
+ (u32)((u64) tgt->confq_pbl_dma >> 32);
+
+ kwqe_arr[0] = (struct kwqe *) &ofld_req1;
+ kwqe_arr[1] = (struct kwqe *) &ofld_req2;
+ kwqe_arr[2] = (struct kwqe *) &ofld_req3;
+ kwqe_arr[3] = (struct kwqe *) &ofld_req4;
+
+ if (hba->cnic && hba->cnic->submit_kwqes)
+ rc = hba->cnic->submit_kwqes(hba->cnic, kwqe_arr, num_kwqes);
+
+ return rc;
+}
+
+/**
+ * bnx2fc_send_session_enable_req - initiates FCoE Session enablement
+ *
+ * @port: port structure pointer
+ * @tgt: bnx2fc_rport structure pointer
+ */
+int bnx2fc_send_session_enable_req(struct fcoe_port *port,
+ struct bnx2fc_rport *tgt)
+{
+ struct kwqe *kwqe_arr[2];
+ struct bnx2fc_interface *interface = port->priv;
+ struct fcoe_ctlr *ctlr = bnx2fc_to_ctlr(interface);
+ struct bnx2fc_hba *hba = interface->hba;
+ struct fcoe_kwqe_conn_enable_disable enbl_req;
+ struct fc_lport *lport = port->lport;
+ struct fc_rport *rport = tgt->rport;
+ int num_kwqes = 1;
+ int rc = 0;
+ u32 port_id;
+
+ memset(&enbl_req, 0x00,
+ sizeof(struct fcoe_kwqe_conn_enable_disable));
+ enbl_req.hdr.op_code = FCOE_KWQE_OPCODE_ENABLE_CONN;
+ enbl_req.hdr.flags =
+ (FCOE_KWQE_LAYER_CODE << FCOE_KWQE_HEADER_LAYER_CODE_SHIFT);
+
+ enbl_req.src_mac_addr_lo[0] = port->data_src_addr[5];
+ /* local mac */
+ enbl_req.src_mac_addr_lo[1] = port->data_src_addr[4];
+ enbl_req.src_mac_addr_mid[0] = port->data_src_addr[3];
+ enbl_req.src_mac_addr_mid[1] = port->data_src_addr[2];
+ enbl_req.src_mac_addr_hi[0] = port->data_src_addr[1];
+ enbl_req.src_mac_addr_hi[1] = port->data_src_addr[0];
+ memcpy(tgt->src_addr, port->data_src_addr, ETH_ALEN);
+
+ enbl_req.dst_mac_addr_lo[0] = ctlr->dest_addr[5];
+ enbl_req.dst_mac_addr_lo[1] = ctlr->dest_addr[4];
+ enbl_req.dst_mac_addr_mid[0] = ctlr->dest_addr[3];
+ enbl_req.dst_mac_addr_mid[1] = ctlr->dest_addr[2];
+ enbl_req.dst_mac_addr_hi[0] = ctlr->dest_addr[1];
+ enbl_req.dst_mac_addr_hi[1] = ctlr->dest_addr[0];
+
+ port_id = fc_host_port_id(lport->host);
+ if (port_id != tgt->sid) {
+ printk(KERN_ERR PFX "WARN: enable_req port_id = 0x%x,"
+ "sid = 0x%x\n", port_id, tgt->sid);
+ port_id = tgt->sid;
+ }
+ enbl_req.s_id[0] = (port_id & 0x000000FF);
+ enbl_req.s_id[1] = (port_id & 0x0000FF00) >> 8;
+ enbl_req.s_id[2] = (port_id & 0x00FF0000) >> 16;
+
+ port_id = rport->port_id;
+ enbl_req.d_id[0] = (port_id & 0x000000FF);
+ enbl_req.d_id[1] = (port_id & 0x0000FF00) >> 8;
+ enbl_req.d_id[2] = (port_id & 0x00FF0000) >> 16;
+ enbl_req.vlan_tag = interface->vlan_id <<
+ FCOE_KWQE_CONN_ENABLE_DISABLE_VLAN_ID_SHIFT;
+ enbl_req.vlan_tag |= 3 << FCOE_KWQE_CONN_ENABLE_DISABLE_PRIORITY_SHIFT;
+ enbl_req.vlan_flag = interface->vlan_enabled;
+ enbl_req.context_id = tgt->context_id;
+ enbl_req.conn_id = tgt->fcoe_conn_id;
+
+ kwqe_arr[0] = (struct kwqe *) &enbl_req;
+
+ if (hba->cnic && hba->cnic->submit_kwqes)
+ rc = hba->cnic->submit_kwqes(hba->cnic, kwqe_arr, num_kwqes);
+ return rc;
+}
+
+/**
+ * bnx2fc_send_session_disable_req - initiates FCoE Session disable
+ *
+ * @port: port structure pointer
+ * @tgt: bnx2fc_rport structure pointer
+ */
+int bnx2fc_send_session_disable_req(struct fcoe_port *port,
+ struct bnx2fc_rport *tgt)
+{
+ struct bnx2fc_interface *interface = port->priv;
+ struct fcoe_ctlr *ctlr = bnx2fc_to_ctlr(interface);
+ struct bnx2fc_hba *hba = interface->hba;
+ struct fcoe_kwqe_conn_enable_disable disable_req;
+ struct kwqe *kwqe_arr[2];
+ struct fc_rport *rport = tgt->rport;
+ int num_kwqes = 1;
+ int rc = 0;
+ u32 port_id;
+
+ memset(&disable_req, 0x00,
+ sizeof(struct fcoe_kwqe_conn_enable_disable));
+ disable_req.hdr.op_code = FCOE_KWQE_OPCODE_DISABLE_CONN;
+ disable_req.hdr.flags =
+ (FCOE_KWQE_LAYER_CODE << FCOE_KWQE_HEADER_LAYER_CODE_SHIFT);
+
+ disable_req.src_mac_addr_lo[0] = tgt->src_addr[5];
+ disable_req.src_mac_addr_lo[1] = tgt->src_addr[4];
+ disable_req.src_mac_addr_mid[0] = tgt->src_addr[3];
+ disable_req.src_mac_addr_mid[1] = tgt->src_addr[2];
+ disable_req.src_mac_addr_hi[0] = tgt->src_addr[1];
+ disable_req.src_mac_addr_hi[1] = tgt->src_addr[0];
+
+ disable_req.dst_mac_addr_lo[0] = ctlr->dest_addr[5];
+ disable_req.dst_mac_addr_lo[1] = ctlr->dest_addr[4];
+ disable_req.dst_mac_addr_mid[0] = ctlr->dest_addr[3];
+ disable_req.dst_mac_addr_mid[1] = ctlr->dest_addr[2];
+ disable_req.dst_mac_addr_hi[0] = ctlr->dest_addr[1];
+ disable_req.dst_mac_addr_hi[1] = ctlr->dest_addr[0];
+
+ port_id = tgt->sid;
+ disable_req.s_id[0] = (port_id & 0x000000FF);
+ disable_req.s_id[1] = (port_id & 0x0000FF00) >> 8;
+ disable_req.s_id[2] = (port_id & 0x00FF0000) >> 16;
+
+
+ port_id = rport->port_id;
+ disable_req.d_id[0] = (port_id & 0x000000FF);
+ disable_req.d_id[1] = (port_id & 0x0000FF00) >> 8;
+ disable_req.d_id[2] = (port_id & 0x00FF0000) >> 16;
+ disable_req.context_id = tgt->context_id;
+ disable_req.conn_id = tgt->fcoe_conn_id;
+ disable_req.vlan_tag = interface->vlan_id <<
+ FCOE_KWQE_CONN_ENABLE_DISABLE_VLAN_ID_SHIFT;
+ disable_req.vlan_tag |=
+ 3 << FCOE_KWQE_CONN_ENABLE_DISABLE_PRIORITY_SHIFT;
+ disable_req.vlan_flag = interface->vlan_enabled;
+
+ kwqe_arr[0] = (struct kwqe *) &disable_req;
+
+ if (hba->cnic && hba->cnic->submit_kwqes)
+ rc = hba->cnic->submit_kwqes(hba->cnic, kwqe_arr, num_kwqes);
+
+ return rc;
+}
+
+/**
+ * bnx2fc_send_session_destroy_req - initiates FCoE Session destroy
+ *
+ * @port: port structure pointer
+ * @tgt: bnx2fc_rport structure pointer
+ */
+int bnx2fc_send_session_destroy_req(struct bnx2fc_hba *hba,
+ struct bnx2fc_rport *tgt)
+{
+ struct fcoe_kwqe_conn_destroy destroy_req;
+ struct kwqe *kwqe_arr[2];
+ int num_kwqes = 1;
+ int rc = 0;
+
+ memset(&destroy_req, 0x00, sizeof(struct fcoe_kwqe_conn_destroy));
+ destroy_req.hdr.op_code = FCOE_KWQE_OPCODE_DESTROY_CONN;
+ destroy_req.hdr.flags =
+ (FCOE_KWQE_LAYER_CODE << FCOE_KWQE_HEADER_LAYER_CODE_SHIFT);
+
+ destroy_req.context_id = tgt->context_id;
+ destroy_req.conn_id = tgt->fcoe_conn_id;
+
+ kwqe_arr[0] = (struct kwqe *) &destroy_req;
+
+ if (hba->cnic && hba->cnic->submit_kwqes)
+ rc = hba->cnic->submit_kwqes(hba->cnic, kwqe_arr, num_kwqes);
+
+ return rc;
+}
+
+static bool is_valid_lport(struct bnx2fc_hba *hba, struct fc_lport *lport)
+{
+ struct bnx2fc_lport *blport;
+
+ spin_lock_bh(&hba->hba_lock);
+ list_for_each_entry(blport, &hba->vports, list) {
+ if (blport->lport == lport) {
+ spin_unlock_bh(&hba->hba_lock);
+ return true;
+ }
+ }
+ spin_unlock_bh(&hba->hba_lock);
+ return false;
+
+}
+
+
+static void bnx2fc_unsol_els_work(struct work_struct *work)
+{
+ struct bnx2fc_unsol_els *unsol_els;
+ struct fc_lport *lport;
+ struct bnx2fc_hba *hba;
+ struct fc_frame *fp;
+
+ unsol_els = container_of(work, struct bnx2fc_unsol_els, unsol_els_work);
+ lport = unsol_els->lport;
+ fp = unsol_els->fp;
+ hba = unsol_els->hba;
+ if (is_valid_lport(hba, lport))
+ fc_exch_recv(lport, fp);
+ kfree(unsol_els);
+}
+
+void bnx2fc_process_l2_frame_compl(struct bnx2fc_rport *tgt,
+ unsigned char *buf,
+ u32 frame_len, u16 l2_oxid)
+{
+ struct fcoe_port *port = tgt->port;
+ struct fc_lport *lport = port->lport;
+ struct bnx2fc_interface *interface = port->priv;
+ struct bnx2fc_unsol_els *unsol_els;
+ struct fc_frame_header *fh;
+ struct fc_frame *fp;
+ struct sk_buff *skb;
+ u32 payload_len;
+ u32 crc;
+ u8 op;
+
+
+ unsol_els = kzalloc(sizeof(*unsol_els), GFP_ATOMIC);
+ if (!unsol_els) {
+ BNX2FC_TGT_DBG(tgt, "Unable to allocate unsol_work\n");
+ return;
+ }
+
+ BNX2FC_TGT_DBG(tgt, "l2_frame_compl l2_oxid = 0x%x, frame_len = %d\n",
+ l2_oxid, frame_len);
+
+ payload_len = frame_len - sizeof(struct fc_frame_header);
+
+ fp = fc_frame_alloc(lport, payload_len);
+ if (!fp) {
+ printk(KERN_ERR PFX "fc_frame_alloc failure\n");
+ kfree(unsol_els);
+ return;
+ }
+
+ fh = (struct fc_frame_header *) fc_frame_header_get(fp);
+ /* Copy FC Frame header and payload into the frame */
+ memcpy(fh, buf, frame_len);
+
+ if (l2_oxid != FC_XID_UNKNOWN)
+ fh->fh_ox_id = htons(l2_oxid);
+
+ skb = fp_skb(fp);
+
+ if ((fh->fh_r_ctl == FC_RCTL_ELS_REQ) ||
+ (fh->fh_r_ctl == FC_RCTL_ELS_REP)) {
+
+ if (fh->fh_type == FC_TYPE_ELS) {
+ op = fc_frame_payload_op(fp);
+ if ((op == ELS_TEST) || (op == ELS_ESTC) ||
+ (op == ELS_FAN) || (op == ELS_CSU)) {
+ /*
+ * No need to reply for these
+ * ELS requests
+ */
+ printk(KERN_ERR PFX "dropping ELS 0x%x\n", op);
+ kfree_skb(skb);
+ kfree(unsol_els);
+ return;
+ }
+ }
+ crc = fcoe_fc_crc(fp);
+ fc_frame_init(fp);
+ fr_dev(fp) = lport;
+ fr_sof(fp) = FC_SOF_I3;
+ fr_eof(fp) = FC_EOF_T;
+ fr_crc(fp) = cpu_to_le32(~crc);
+ unsol_els->lport = lport;
+ unsol_els->hba = interface->hba;
+ unsol_els->fp = fp;
+ INIT_WORK(&unsol_els->unsol_els_work, bnx2fc_unsol_els_work);
+ queue_work(bnx2fc_wq, &unsol_els->unsol_els_work);
+ } else {
+ BNX2FC_HBA_DBG(lport, "fh_r_ctl = 0x%x\n", fh->fh_r_ctl);
+ kfree_skb(skb);
+ kfree(unsol_els);
+ }
+}
+
+static void bnx2fc_process_unsol_compl(struct bnx2fc_rport *tgt, u16 wqe)
+{
+ u8 num_rq;
+ struct fcoe_err_report_entry *err_entry;
+ unsigned char *rq_data;
+ unsigned char *buf = NULL, *buf1;
+ int i;
+ u16 xid;
+ u32 frame_len, len;
+ struct bnx2fc_cmd *io_req = NULL;
+ struct fcoe_task_ctx_entry *task, *task_page;
+ struct bnx2fc_interface *interface = tgt->port->priv;
+ struct bnx2fc_hba *hba = interface->hba;
+ int task_idx, index;
+ int rc = 0;
+ u64 err_warn_bit_map;
+ u8 err_warn = 0xff;
+
+
+ BNX2FC_TGT_DBG(tgt, "Entered UNSOL COMPLETION wqe = 0x%x\n", wqe);
+ switch (wqe & FCOE_UNSOLICITED_CQE_SUBTYPE) {
+ case FCOE_UNSOLICITED_FRAME_CQE_TYPE:
+ frame_len = (wqe & FCOE_UNSOLICITED_CQE_PKT_LEN) >>
+ FCOE_UNSOLICITED_CQE_PKT_LEN_SHIFT;
+
+ num_rq = (frame_len + BNX2FC_RQ_BUF_SZ - 1) / BNX2FC_RQ_BUF_SZ;
+
+ spin_lock_bh(&tgt->tgt_lock);
+ rq_data = (unsigned char *)bnx2fc_get_next_rqe(tgt, num_rq);
+ spin_unlock_bh(&tgt->tgt_lock);
+
+ if (rq_data) {
+ buf = rq_data;
+ } else {
+ buf1 = buf = kmalloc((num_rq * BNX2FC_RQ_BUF_SZ),
+ GFP_ATOMIC);
+
+ if (!buf1) {
+ BNX2FC_TGT_DBG(tgt, "Memory alloc failure\n");
+ break;
+ }
+
+ for (i = 0; i < num_rq; i++) {
+ spin_lock_bh(&tgt->tgt_lock);
+ rq_data = (unsigned char *)
+ bnx2fc_get_next_rqe(tgt, 1);
+ spin_unlock_bh(&tgt->tgt_lock);
+ len = BNX2FC_RQ_BUF_SZ;
+ memcpy(buf1, rq_data, len);
+ buf1 += len;
+ }
+ }
+ bnx2fc_process_l2_frame_compl(tgt, buf, frame_len,
+ FC_XID_UNKNOWN);
+
+ if (buf != rq_data)
+ kfree(buf);
+ spin_lock_bh(&tgt->tgt_lock);
+ bnx2fc_return_rqe(tgt, num_rq);
+ spin_unlock_bh(&tgt->tgt_lock);
+ break;
+
+ case FCOE_ERROR_DETECTION_CQE_TYPE:
+ /*
+ * In case of error reporting CQE a single RQ entry
+ * is consumed.
+ */
+ spin_lock_bh(&tgt->tgt_lock);
+ num_rq = 1;
+ err_entry = (struct fcoe_err_report_entry *)
+ bnx2fc_get_next_rqe(tgt, 1);
+ xid = err_entry->fc_hdr.ox_id;
+ BNX2FC_TGT_DBG(tgt, "Unsol Error Frame OX_ID = 0x%x\n", xid);
+ BNX2FC_TGT_DBG(tgt, "err_warn_bitmap = %08x:%08x\n",
+ err_entry->data.err_warn_bitmap_hi,
+ err_entry->data.err_warn_bitmap_lo);
+ BNX2FC_TGT_DBG(tgt, "buf_offsets - tx = 0x%x, rx = 0x%x\n",
+ err_entry->data.tx_buf_off, err_entry->data.rx_buf_off);
+
+
+ if (xid > hba->max_xid) {
+ BNX2FC_TGT_DBG(tgt, "xid(0x%x) out of FW range\n",
+ xid);
+ goto ret_err_rqe;
+ }
+
+ task_idx = xid / BNX2FC_TASKS_PER_PAGE;
+ index = xid % BNX2FC_TASKS_PER_PAGE;
+ task_page = (struct fcoe_task_ctx_entry *)
+ hba->task_ctx[task_idx];
+ task = &(task_page[index]);
+
+ io_req = (struct bnx2fc_cmd *)hba->cmd_mgr->cmds[xid];
+ if (!io_req)
+ goto ret_err_rqe;
+
+ if (io_req->cmd_type != BNX2FC_SCSI_CMD) {
+ printk(KERN_ERR PFX "err_warn: Not a SCSI cmd\n");
+ goto ret_err_rqe;
+ }
+
+ if (test_and_clear_bit(BNX2FC_FLAG_IO_CLEANUP,
+ &io_req->req_flags)) {
+ BNX2FC_IO_DBG(io_req, "unsol_err: cleanup in "
+ "progress.. ignore unsol err\n");
+ goto ret_err_rqe;
+ }
+
+ err_warn_bit_map = (u64)
+ ((u64)err_entry->data.err_warn_bitmap_hi << 32) |
+ (u64)err_entry->data.err_warn_bitmap_lo;
+ for (i = 0; i < BNX2FC_NUM_ERR_BITS; i++) {
+ if (err_warn_bit_map & (u64)((u64)1 << i)) {
+ err_warn = i;
+ break;
+ }
+ }
+
+ /*
+ * If ABTS is already in progress, and FW error is
+ * received after that, do not cancel the timeout_work
+ * and let the error recovery continue by explicitly
+ * logging out the target, when the ABTS eventually
+ * times out.
+ */
+ if (test_bit(BNX2FC_FLAG_ISSUE_ABTS, &io_req->req_flags)) {
+ printk(KERN_ERR PFX "err_warn: io_req (0x%x) already "
+ "in ABTS processing\n", xid);
+ goto ret_err_rqe;
+ }
+ BNX2FC_TGT_DBG(tgt, "err = 0x%x\n", err_warn);
+ if (tgt->dev_type != TYPE_TAPE)
+ goto skip_rec;
+ switch (err_warn) {
+ case FCOE_ERROR_CODE_REC_TOV_TIMER_EXPIRATION:
+ case FCOE_ERROR_CODE_DATA_OOO_RO:
+ case FCOE_ERROR_CODE_COMMON_INCORRECT_SEQ_CNT:
+ case FCOE_ERROR_CODE_DATA_SOFI3_SEQ_ACTIVE_SET:
+ case FCOE_ERROR_CODE_FCP_RSP_OPENED_SEQ:
+ case FCOE_ERROR_CODE_DATA_SOFN_SEQ_ACTIVE_RESET:
+ BNX2FC_TGT_DBG(tgt, "REC TOV popped for xid - 0x%x\n",
+ xid);
+ memcpy(&io_req->err_entry, err_entry,
+ sizeof(struct fcoe_err_report_entry));
+ if (!test_bit(BNX2FC_FLAG_SRR_SENT,
+ &io_req->req_flags)) {
+ spin_unlock_bh(&tgt->tgt_lock);
+ rc = bnx2fc_send_rec(io_req);
+ spin_lock_bh(&tgt->tgt_lock);
+
+ if (rc)
+ goto skip_rec;
+ } else
+ printk(KERN_ERR PFX "SRR in progress\n");
+ goto ret_err_rqe;
+ break;
+ default:
+ break;
+ }
+
+skip_rec:
+ set_bit(BNX2FC_FLAG_ISSUE_ABTS, &io_req->req_flags);
+ /*
+ * Cancel the timeout_work, as we received IO
+ * completion with FW error.
+ */
+ if (cancel_delayed_work(&io_req->timeout_work))
+ kref_put(&io_req->refcount, bnx2fc_cmd_release);
+
+ rc = bnx2fc_initiate_abts(io_req);
+ if (rc != SUCCESS) {
+ printk(KERN_ERR PFX "err_warn: initiate_abts "
+ "failed xid = 0x%x. issue cleanup\n",
+ io_req->xid);
+ bnx2fc_initiate_cleanup(io_req);
+ }
+ret_err_rqe:
+ bnx2fc_return_rqe(tgt, 1);
+ spin_unlock_bh(&tgt->tgt_lock);
+ break;
+
+ case FCOE_WARNING_DETECTION_CQE_TYPE:
+ /*
+ *In case of warning reporting CQE a single RQ entry
+ * is consumes.
+ */
+ spin_lock_bh(&tgt->tgt_lock);
+ num_rq = 1;
+ err_entry = (struct fcoe_err_report_entry *)
+ bnx2fc_get_next_rqe(tgt, 1);
+ xid = cpu_to_be16(err_entry->fc_hdr.ox_id);
+ BNX2FC_TGT_DBG(tgt, "Unsol Warning Frame OX_ID = 0x%x\n", xid);
+ BNX2FC_TGT_DBG(tgt, "err_warn_bitmap = %08x:%08x",
+ err_entry->data.err_warn_bitmap_hi,
+ err_entry->data.err_warn_bitmap_lo);
+ BNX2FC_TGT_DBG(tgt, "buf_offsets - tx = 0x%x, rx = 0x%x",
+ err_entry->data.tx_buf_off, err_entry->data.rx_buf_off);
+
+ if (xid > hba->max_xid) {
+ BNX2FC_TGT_DBG(tgt, "xid(0x%x) out of FW range\n", xid);
+ goto ret_warn_rqe;
+ }
+
+ err_warn_bit_map = (u64)
+ ((u64)err_entry->data.err_warn_bitmap_hi << 32) |
+ (u64)err_entry->data.err_warn_bitmap_lo;
+ for (i = 0; i < BNX2FC_NUM_ERR_BITS; i++) {
+ if (err_warn_bit_map & (u64) (1 << i)) {
+ err_warn = i;
+ break;
+ }
+ }
+ BNX2FC_TGT_DBG(tgt, "warn = 0x%x\n", err_warn);
+
+ task_idx = xid / BNX2FC_TASKS_PER_PAGE;
+ index = xid % BNX2FC_TASKS_PER_PAGE;
+ task_page = (struct fcoe_task_ctx_entry *)
+ interface->hba->task_ctx[task_idx];
+ task = &(task_page[index]);
+ io_req = (struct bnx2fc_cmd *)hba->cmd_mgr->cmds[xid];
+ if (!io_req)
+ goto ret_warn_rqe;
+
+ if (io_req->cmd_type != BNX2FC_SCSI_CMD) {
+ printk(KERN_ERR PFX "err_warn: Not a SCSI cmd\n");
+ goto ret_warn_rqe;
+ }
+
+ memcpy(&io_req->err_entry, err_entry,
+ sizeof(struct fcoe_err_report_entry));
+
+ if (err_warn == FCOE_ERROR_CODE_REC_TOV_TIMER_EXPIRATION)
+ /* REC_TOV is not a warning code */
+ BUG_ON(1);
+ else
+ BNX2FC_TGT_DBG(tgt, "Unsolicited warning\n");
+ret_warn_rqe:
+ bnx2fc_return_rqe(tgt, 1);
+ spin_unlock_bh(&tgt->tgt_lock);
+ break;
+
+ default:
+ printk(KERN_ERR PFX "Unsol Compl: Invalid CQE Subtype\n");
+ break;
+ }
+}
+
+void bnx2fc_process_cq_compl(struct bnx2fc_rport *tgt, u16 wqe)
+{
+ struct fcoe_task_ctx_entry *task;
+ struct fcoe_task_ctx_entry *task_page;
+ struct fcoe_port *port = tgt->port;
+ struct bnx2fc_interface *interface = port->priv;
+ struct bnx2fc_hba *hba = interface->hba;
+ struct bnx2fc_cmd *io_req;
+ int task_idx, index;
+ u16 xid;
+ u8 cmd_type;
+ u8 rx_state = 0;
+ u8 num_rq;
+
+ spin_lock_bh(&tgt->tgt_lock);
+ xid = wqe & FCOE_PEND_WQ_CQE_TASK_ID;
+ if (xid >= hba->max_tasks) {
+ printk(KERN_ERR PFX "ERROR:xid out of range\n");
+ spin_unlock_bh(&tgt->tgt_lock);
+ return;
+ }
+ task_idx = xid / BNX2FC_TASKS_PER_PAGE;
+ index = xid % BNX2FC_TASKS_PER_PAGE;
+ task_page = (struct fcoe_task_ctx_entry *)hba->task_ctx[task_idx];
+ task = &(task_page[index]);
+
+ num_rq = ((task->rxwr_txrd.var_ctx.rx_flags &
+ FCOE_TCE_RX_WR_TX_RD_VAR_NUM_RQ_WQE) >>
+ FCOE_TCE_RX_WR_TX_RD_VAR_NUM_RQ_WQE_SHIFT);
+
+ io_req = (struct bnx2fc_cmd *)hba->cmd_mgr->cmds[xid];
+
+ if (io_req == NULL) {
+ printk(KERN_ERR PFX "ERROR? cq_compl - io_req is NULL\n");
+ spin_unlock_bh(&tgt->tgt_lock);
+ return;
+ }
+
+ /* Timestamp IO completion time */
+ cmd_type = io_req->cmd_type;
+
+ rx_state = ((task->rxwr_txrd.var_ctx.rx_flags &
+ FCOE_TCE_RX_WR_TX_RD_VAR_RX_STATE) >>
+ FCOE_TCE_RX_WR_TX_RD_VAR_RX_STATE_SHIFT);
+
+ /* Process other IO completion types */
+ switch (cmd_type) {
+ case BNX2FC_SCSI_CMD:
+ if (rx_state == FCOE_TASK_RX_STATE_COMPLETED) {
+ bnx2fc_process_scsi_cmd_compl(io_req, task, num_rq);
+ spin_unlock_bh(&tgt->tgt_lock);
+ return;
+ }
+
+ if (rx_state == FCOE_TASK_RX_STATE_ABTS_COMPLETED)
+ bnx2fc_process_abts_compl(io_req, task, num_rq);
+ else if (rx_state ==
+ FCOE_TASK_RX_STATE_EXCHANGE_CLEANUP_COMPLETED)
+ bnx2fc_process_cleanup_compl(io_req, task, num_rq);
+ else
+ printk(KERN_ERR PFX "Invalid rx state - %d\n",
+ rx_state);
+ break;
+
+ case BNX2FC_TASK_MGMT_CMD:
+ BNX2FC_IO_DBG(io_req, "Processing TM complete\n");
+ bnx2fc_process_tm_compl(io_req, task, num_rq);
+ break;
+
+ case BNX2FC_ABTS:
+ /*
+ * ABTS request received by firmware. ABTS response
+ * will be delivered to the task belonging to the IO
+ * that was aborted
+ */
+ BNX2FC_IO_DBG(io_req, "cq_compl- ABTS sent out by fw\n");
+ kref_put(&io_req->refcount, bnx2fc_cmd_release);
+ break;
+
+ case BNX2FC_ELS:
+ if (rx_state == FCOE_TASK_RX_STATE_COMPLETED)
+ bnx2fc_process_els_compl(io_req, task, num_rq);
+ else if (rx_state == FCOE_TASK_RX_STATE_ABTS_COMPLETED)
+ bnx2fc_process_abts_compl(io_req, task, num_rq);
+ else if (rx_state ==
+ FCOE_TASK_RX_STATE_EXCHANGE_CLEANUP_COMPLETED)
+ bnx2fc_process_cleanup_compl(io_req, task, num_rq);
+ else
+ printk(KERN_ERR PFX "Invalid rx state = %d\n",
+ rx_state);
+ break;
+
+ case BNX2FC_CLEANUP:
+ BNX2FC_IO_DBG(io_req, "cq_compl- cleanup resp rcvd\n");
+ kref_put(&io_req->refcount, bnx2fc_cmd_release);
+ break;
+
+ case BNX2FC_SEQ_CLEANUP:
+ BNX2FC_IO_DBG(io_req, "cq_compl(0x%x) - seq cleanup resp\n",
+ io_req->xid);
+ bnx2fc_process_seq_cleanup_compl(io_req, task, rx_state);
+ kref_put(&io_req->refcount, bnx2fc_cmd_release);
+ break;
+
+ default:
+ printk(KERN_ERR PFX "Invalid cmd_type %d\n", cmd_type);
+ break;
+ }
+ spin_unlock_bh(&tgt->tgt_lock);
+}
+
+void bnx2fc_arm_cq(struct bnx2fc_rport *tgt)
+{
+ struct b577xx_fcoe_rx_doorbell *rx_db = &tgt->rx_db;
+ u32 msg;
+
+ wmb();
+ rx_db->doorbell_cq_cons = tgt->cq_cons_idx | (tgt->cq_curr_toggle_bit <<
+ FCOE_CQE_TOGGLE_BIT_SHIFT);
+ msg = *((u32 *)rx_db);
+ writel(cpu_to_le32(msg), tgt->ctx_base);
+ mmiowb();
+
+}
+
+struct bnx2fc_work *bnx2fc_alloc_work(struct bnx2fc_rport *tgt, u16 wqe)
+{
+ struct bnx2fc_work *work;
+ work = kzalloc(sizeof(struct bnx2fc_work), GFP_ATOMIC);
+ if (!work)
+ return NULL;
+
+ INIT_LIST_HEAD(&work->list);
+ work->tgt = tgt;
+ work->wqe = wqe;
+ return work;
+}
+
+int bnx2fc_process_new_cqes(struct bnx2fc_rport *tgt)
+{
+ struct fcoe_cqe *cq;
+ u32 cq_cons;
+ struct fcoe_cqe *cqe;
+ u32 num_free_sqes = 0;
+ u32 num_cqes = 0;
+ u16 wqe;
+
+ /*
+ * cq_lock is a low contention lock used to protect
+ * the CQ data structure from being freed up during
+ * the upload operation
+ */
+ spin_lock_bh(&tgt->cq_lock);
+
+ if (!tgt->cq) {
+ printk(KERN_ERR PFX "process_new_cqes: cq is NULL\n");
+ spin_unlock_bh(&tgt->cq_lock);
+ return 0;
+ }
+ cq = tgt->cq;
+ cq_cons = tgt->cq_cons_idx;
+ cqe = &cq[cq_cons];
+
+ while (((wqe = cqe->wqe) & FCOE_CQE_TOGGLE_BIT) ==
+ (tgt->cq_curr_toggle_bit <<
+ FCOE_CQE_TOGGLE_BIT_SHIFT)) {
+
+ /* new entry on the cq */
+ if (wqe & FCOE_CQE_CQE_TYPE) {
+ /* Unsolicited event notification */
+ bnx2fc_process_unsol_compl(tgt, wqe);
+ } else {
+ /* Pending work request completion */
+ struct bnx2fc_work *work = NULL;
+ struct bnx2fc_percpu_s *fps = NULL;
+ unsigned int cpu = wqe % num_possible_cpus();
+
+ fps = &per_cpu(bnx2fc_percpu, cpu);
+ spin_lock_bh(&fps->fp_work_lock);
+ if (unlikely(!fps->iothread))
+ goto unlock;
+
+ work = bnx2fc_alloc_work(tgt, wqe);
+ if (work)
+ list_add_tail(&work->list,
+ &fps->work_list);
+unlock:
+ spin_unlock_bh(&fps->fp_work_lock);
+
+ /* Pending work request completion */
+ if (fps->iothread && work)
+ wake_up_process(fps->iothread);
+ else
+ bnx2fc_process_cq_compl(tgt, wqe);
+ num_free_sqes++;
+ }
+ cqe++;
+ tgt->cq_cons_idx++;
+ num_cqes++;
+
+ if (tgt->cq_cons_idx == BNX2FC_CQ_WQES_MAX) {
+ tgt->cq_cons_idx = 0;
+ cqe = cq;
+ tgt->cq_curr_toggle_bit =
+ 1 - tgt->cq_curr_toggle_bit;
+ }
+ }
+ if (num_cqes) {
+ /* Arm CQ only if doorbell is mapped */
+ if (tgt->ctx_base)
+ bnx2fc_arm_cq(tgt);
+ atomic_add(num_free_sqes, &tgt->free_sqes);
+ }
+ spin_unlock_bh(&tgt->cq_lock);
+ return 0;
+}
+
+/**
+ * bnx2fc_fastpath_notification - process global event queue (KCQ)
+ *
+ * @hba: adapter structure pointer
+ * @new_cqe_kcqe: pointer to newly DMA'd KCQ entry
+ *
+ * Fast path event notification handler
+ */
+static void bnx2fc_fastpath_notification(struct bnx2fc_hba *hba,
+ struct fcoe_kcqe *new_cqe_kcqe)
+{
+ u32 conn_id = new_cqe_kcqe->fcoe_conn_id;
+ struct bnx2fc_rport *tgt = hba->tgt_ofld_list[conn_id];
+
+ if (!tgt) {
+ printk(KERN_ERR PFX "conn_id 0x%x not valid\n", conn_id);
+ return;
+ }
+
+ bnx2fc_process_new_cqes(tgt);
+}
+
+/**
+ * bnx2fc_process_ofld_cmpl - process FCoE session offload completion
+ *
+ * @hba: adapter structure pointer
+ * @ofld_kcqe: connection offload kcqe pointer
+ *
+ * handle session offload completion, enable the session if offload is
+ * successful.
+ */
+static void bnx2fc_process_ofld_cmpl(struct bnx2fc_hba *hba,
+ struct fcoe_kcqe *ofld_kcqe)
+{
+ struct bnx2fc_rport *tgt;
+ struct fcoe_port *port;
+ struct bnx2fc_interface *interface;
+ u32 conn_id;
+ u32 context_id;
+
+ conn_id = ofld_kcqe->fcoe_conn_id;
+ context_id = ofld_kcqe->fcoe_conn_context_id;
+ tgt = hba->tgt_ofld_list[conn_id];
+ if (!tgt) {
+ printk(KERN_ALERT PFX "ERROR:ofld_cmpl: No pending ofld req\n");
+ return;
+ }
+ BNX2FC_TGT_DBG(tgt, "Entered ofld compl - context_id = 0x%x\n",
+ ofld_kcqe->fcoe_conn_context_id);
+ port = tgt->port;
+ interface = tgt->port->priv;
+ if (hba != interface->hba) {
+ printk(KERN_ERR PFX "ERROR:ofld_cmpl: HBA mis-match\n");
+ goto ofld_cmpl_err;
+ }
+ /*
+ * cnic has allocated a context_id for this session; use this
+ * while enabling the session.
+ */
+ tgt->context_id = context_id;
+ if (ofld_kcqe->completion_status) {
+ if (ofld_kcqe->completion_status ==
+ FCOE_KCQE_COMPLETION_STATUS_CTX_ALLOC_FAILURE) {
+ printk(KERN_ERR PFX "unable to allocate FCoE context "
+ "resources\n");
+ set_bit(BNX2FC_FLAG_CTX_ALLOC_FAILURE, &tgt->flags);
+ }
+ } else {
+ /* FW offload request successfully completed */
+ set_bit(BNX2FC_FLAG_OFFLOADED, &tgt->flags);
+ }
+ofld_cmpl_err:
+ set_bit(BNX2FC_FLAG_OFLD_REQ_CMPL, &tgt->flags);
+ wake_up_interruptible(&tgt->ofld_wait);
+}
+
+/**
+ * bnx2fc_process_enable_conn_cmpl - process FCoE session enable completion
+ *
+ * @hba: adapter structure pointer
+ * @ofld_kcqe: connection offload kcqe pointer
+ *
+ * handle session enable completion, mark the rport as ready
+ */
+
+static void bnx2fc_process_enable_conn_cmpl(struct bnx2fc_hba *hba,
+ struct fcoe_kcqe *ofld_kcqe)
+{
+ struct bnx2fc_rport *tgt;
+ struct bnx2fc_interface *interface;
+ u32 conn_id;
+ u32 context_id;
+
+ context_id = ofld_kcqe->fcoe_conn_context_id;
+ conn_id = ofld_kcqe->fcoe_conn_id;
+ tgt = hba->tgt_ofld_list[conn_id];
+ if (!tgt) {
+ printk(KERN_ERR PFX "ERROR:enbl_cmpl: No pending ofld req\n");
+ return;
+ }
+
+ BNX2FC_TGT_DBG(tgt, "Enable compl - context_id = 0x%x\n",
+ ofld_kcqe->fcoe_conn_context_id);
+
+ /*
+ * context_id should be the same for this target during offload
+ * and enable
+ */
+ if (tgt->context_id != context_id) {
+ printk(KERN_ERR PFX "context id mis-match\n");
+ return;
+ }
+ interface = tgt->port->priv;
+ if (hba != interface->hba) {
+ printk(KERN_ERR PFX "bnx2fc-enbl_cmpl: HBA mis-match\n");
+ goto enbl_cmpl_err;
+ }
+ if (!ofld_kcqe->completion_status)
+ /* enable successful - rport ready for issuing IOs */
+ set_bit(BNX2FC_FLAG_ENABLED, &tgt->flags);
+
+enbl_cmpl_err:
+ set_bit(BNX2FC_FLAG_OFLD_REQ_CMPL, &tgt->flags);
+ wake_up_interruptible(&tgt->ofld_wait);
+}
+
+static void bnx2fc_process_conn_disable_cmpl(struct bnx2fc_hba *hba,
+ struct fcoe_kcqe *disable_kcqe)
+{
+
+ struct bnx2fc_rport *tgt;
+ u32 conn_id;
+
+ conn_id = disable_kcqe->fcoe_conn_id;
+ tgt = hba->tgt_ofld_list[conn_id];
+ if (!tgt) {
+ printk(KERN_ERR PFX "ERROR: disable_cmpl: No disable req\n");
+ return;
+ }
+
+ BNX2FC_TGT_DBG(tgt, PFX "disable_cmpl: conn_id %d\n", conn_id);
+
+ if (disable_kcqe->completion_status) {
+ printk(KERN_ERR PFX "Disable failed with cmpl status %d\n",
+ disable_kcqe->completion_status);
+ set_bit(BNX2FC_FLAG_DISABLE_FAILED, &tgt->flags);
+ set_bit(BNX2FC_FLAG_UPLD_REQ_COMPL, &tgt->flags);
+ wake_up_interruptible(&tgt->upld_wait);
+ } else {
+ /* disable successful */
+ BNX2FC_TGT_DBG(tgt, "disable successful\n");
+ clear_bit(BNX2FC_FLAG_OFFLOADED, &tgt->flags);
+ clear_bit(BNX2FC_FLAG_ENABLED, &tgt->flags);
+ set_bit(BNX2FC_FLAG_DISABLED, &tgt->flags);
+ set_bit(BNX2FC_FLAG_UPLD_REQ_COMPL, &tgt->flags);
+ wake_up_interruptible(&tgt->upld_wait);
+ }
+}
+
+static void bnx2fc_process_conn_destroy_cmpl(struct bnx2fc_hba *hba,
+ struct fcoe_kcqe *destroy_kcqe)
+{
+ struct bnx2fc_rport *tgt;
+ u32 conn_id;
+
+ conn_id = destroy_kcqe->fcoe_conn_id;
+ tgt = hba->tgt_ofld_list[conn_id];
+ if (!tgt) {
+ printk(KERN_ERR PFX "destroy_cmpl: No destroy req\n");
+ return;
+ }
+
+ BNX2FC_TGT_DBG(tgt, "destroy_cmpl: conn_id %d\n", conn_id);
+
+ if (destroy_kcqe->completion_status) {
+ printk(KERN_ERR PFX "Destroy conn failed, cmpl status %d\n",
+ destroy_kcqe->completion_status);
+ return;
+ } else {
+ /* destroy successful */
+ BNX2FC_TGT_DBG(tgt, "upload successful\n");
+ clear_bit(BNX2FC_FLAG_DISABLED, &tgt->flags);
+ set_bit(BNX2FC_FLAG_DESTROYED, &tgt->flags);
+ set_bit(BNX2FC_FLAG_UPLD_REQ_COMPL, &tgt->flags);
+ wake_up_interruptible(&tgt->upld_wait);
+ }
+}
+
+static void bnx2fc_init_failure(struct bnx2fc_hba *hba, u32 err_code)
+{
+ switch (err_code) {
+ case FCOE_KCQE_COMPLETION_STATUS_INVALID_OPCODE:
+ printk(KERN_ERR PFX "init_failure due to invalid opcode\n");
+ break;
+
+ case FCOE_KCQE_COMPLETION_STATUS_CTX_ALLOC_FAILURE:
+ printk(KERN_ERR PFX "init failed due to ctx alloc failure\n");
+ break;
+
+ case FCOE_KCQE_COMPLETION_STATUS_NIC_ERROR:
+ printk(KERN_ERR PFX "init_failure due to NIC error\n");
+ break;
+ case FCOE_KCQE_COMPLETION_STATUS_ERROR:
+ printk(KERN_ERR PFX "init failure due to compl status err\n");
+ break;
+ case FCOE_KCQE_COMPLETION_STATUS_WRONG_HSI_VERSION:
+ printk(KERN_ERR PFX "init failure due to HSI mismatch\n");
+ break;
+ default:
+ printk(KERN_ERR PFX "Unknown Error code %d\n", err_code);
+ }
+}
+
+/**
+ * bnx2fc_indicae_kcqe - process KCQE
+ *
+ * @hba: adapter structure pointer
+ * @kcqe: kcqe pointer
+ * @num_cqe: Number of completion queue elements
+ *
+ * Generic KCQ event handler
+ */
+void bnx2fc_indicate_kcqe(void *context, struct kcqe *kcq[],
+ u32 num_cqe)
+{
+ struct bnx2fc_hba *hba = (struct bnx2fc_hba *)context;
+ int i = 0;
+ struct fcoe_kcqe *kcqe = NULL;
+
+ while (i < num_cqe) {
+ kcqe = (struct fcoe_kcqe *) kcq[i++];
+
+ switch (kcqe->op_code) {
+ case FCOE_KCQE_OPCODE_CQ_EVENT_NOTIFICATION:
+ bnx2fc_fastpath_notification(hba, kcqe);
+ break;
+
+ case FCOE_KCQE_OPCODE_OFFLOAD_CONN:
+ bnx2fc_process_ofld_cmpl(hba, kcqe);
+ break;
+
+ case FCOE_KCQE_OPCODE_ENABLE_CONN:
+ bnx2fc_process_enable_conn_cmpl(hba, kcqe);
+ break;
+
+ case FCOE_KCQE_OPCODE_INIT_FUNC:
+ if (kcqe->completion_status !=
+ FCOE_KCQE_COMPLETION_STATUS_SUCCESS) {
+ bnx2fc_init_failure(hba,
+ kcqe->completion_status);
+ } else {
+ set_bit(ADAPTER_STATE_UP, &hba->adapter_state);
+ bnx2fc_get_link_state(hba);
+ printk(KERN_INFO PFX "[%.2x]: FCOE_INIT passed\n",
+ (u8)hba->pcidev->bus->number);
+ }
+ break;
+
+ case FCOE_KCQE_OPCODE_DESTROY_FUNC:
+ if (kcqe->completion_status !=
+ FCOE_KCQE_COMPLETION_STATUS_SUCCESS) {
+
+ printk(KERN_ERR PFX "DESTROY failed\n");
+ } else {
+ printk(KERN_ERR PFX "DESTROY success\n");
+ }
+ set_bit(BNX2FC_FLAG_DESTROY_CMPL, &hba->flags);
+ wake_up_interruptible(&hba->destroy_wait);
+ break;
+
+ case FCOE_KCQE_OPCODE_DISABLE_CONN:
+ bnx2fc_process_conn_disable_cmpl(hba, kcqe);
+ break;
+
+ case FCOE_KCQE_OPCODE_DESTROY_CONN:
+ bnx2fc_process_conn_destroy_cmpl(hba, kcqe);
+ break;
+
+ case FCOE_KCQE_OPCODE_STAT_FUNC:
+ if (kcqe->completion_status !=
+ FCOE_KCQE_COMPLETION_STATUS_SUCCESS)
+ printk(KERN_ERR PFX "STAT failed\n");
+ complete(&hba->stat_req_done);
+ break;
+
+ case FCOE_KCQE_OPCODE_FCOE_ERROR:
+ /* fall thru */
+ default:
+ printk(KERN_ERR PFX "unknown opcode 0x%x\n",
+ kcqe->op_code);
+ }
+ }
+}
+
+void bnx2fc_add_2_sq(struct bnx2fc_rport *tgt, u16 xid)
+{
+ struct fcoe_sqe *sqe;
+
+ sqe = &tgt->sq[tgt->sq_prod_idx];
+
+ /* Fill SQ WQE */
+ sqe->wqe = xid << FCOE_SQE_TASK_ID_SHIFT;
+ sqe->wqe |= tgt->sq_curr_toggle_bit << FCOE_SQE_TOGGLE_BIT_SHIFT;
+
+ /* Advance SQ Prod Idx */
+ if (++tgt->sq_prod_idx == BNX2FC_SQ_WQES_MAX) {
+ tgt->sq_prod_idx = 0;
+ tgt->sq_curr_toggle_bit = 1 - tgt->sq_curr_toggle_bit;
+ }
+}
+
+void bnx2fc_ring_doorbell(struct bnx2fc_rport *tgt)
+{
+ struct b577xx_doorbell_set_prod *sq_db = &tgt->sq_db;
+ u32 msg;
+
+ wmb();
+ sq_db->prod = tgt->sq_prod_idx |
+ (tgt->sq_curr_toggle_bit << 15);
+ msg = *((u32 *)sq_db);
+ writel(cpu_to_le32(msg), tgt->ctx_base);
+ mmiowb();
+
+}
+
+int bnx2fc_map_doorbell(struct bnx2fc_rport *tgt)
+{
+ u32 context_id = tgt->context_id;
+ struct fcoe_port *port = tgt->port;
+ u32 reg_off;
+ resource_size_t reg_base;
+ struct bnx2fc_interface *interface = port->priv;
+ struct bnx2fc_hba *hba = interface->hba;
+
+ reg_base = pci_resource_start(hba->pcidev,
+ BNX2X_DOORBELL_PCI_BAR);
+ reg_off = (1 << BNX2X_DB_SHIFT) * (context_id & 0x1FFFF);
+ tgt->ctx_base = ioremap_nocache(reg_base + reg_off, 4);
+ if (!tgt->ctx_base)
+ return -ENOMEM;
+ return 0;
+}
+
+char *bnx2fc_get_next_rqe(struct bnx2fc_rport *tgt, u8 num_items)
+{
+ char *buf = (char *)tgt->rq + (tgt->rq_cons_idx * BNX2FC_RQ_BUF_SZ);
+
+ if (tgt->rq_cons_idx + num_items > BNX2FC_RQ_WQES_MAX)
+ return NULL;
+
+ tgt->rq_cons_idx += num_items;
+
+ if (tgt->rq_cons_idx >= BNX2FC_RQ_WQES_MAX)
+ tgt->rq_cons_idx -= BNX2FC_RQ_WQES_MAX;
+
+ return buf;
+}
+
+void bnx2fc_return_rqe(struct bnx2fc_rport *tgt, u8 num_items)
+{
+ /* return the rq buffer */
+ u32 next_prod_idx = tgt->rq_prod_idx + num_items;
+ if ((next_prod_idx & 0x7fff) == BNX2FC_RQ_WQES_MAX) {
+ /* Wrap around RQ */
+ next_prod_idx += 0x8000 - BNX2FC_RQ_WQES_MAX;
+ }
+ tgt->rq_prod_idx = next_prod_idx;
+ tgt->conn_db->rq_prod = tgt->rq_prod_idx;
+}
+
+void bnx2fc_init_seq_cleanup_task(struct bnx2fc_cmd *seq_clnp_req,
+ struct fcoe_task_ctx_entry *task,
+ struct bnx2fc_cmd *orig_io_req,
+ u32 offset)
+{
+ struct scsi_cmnd *sc_cmd = orig_io_req->sc_cmd;
+ struct bnx2fc_rport *tgt = seq_clnp_req->tgt;
+ struct bnx2fc_interface *interface = tgt->port->priv;
+ struct fcoe_bd_ctx *bd = orig_io_req->bd_tbl->bd_tbl;
+ struct fcoe_task_ctx_entry *orig_task;
+ struct fcoe_task_ctx_entry *task_page;
+ struct fcoe_ext_mul_sges_ctx *sgl;
+ u8 task_type = FCOE_TASK_TYPE_SEQUENCE_CLEANUP;
+ u8 orig_task_type;
+ u16 orig_xid = orig_io_req->xid;
+ u32 context_id = tgt->context_id;
+ u64 phys_addr = (u64)orig_io_req->bd_tbl->bd_tbl_dma;
+ u32 orig_offset = offset;
+ int bd_count;
+ int orig_task_idx, index;
+ int i;
+
+ memset(task, 0, sizeof(struct fcoe_task_ctx_entry));
+
+ if (sc_cmd->sc_data_direction == DMA_TO_DEVICE)
+ orig_task_type = FCOE_TASK_TYPE_WRITE;
+ else
+ orig_task_type = FCOE_TASK_TYPE_READ;
+
+ /* Tx flags */
+ task->txwr_rxrd.const_ctx.tx_flags =
+ FCOE_TASK_TX_STATE_SEQUENCE_CLEANUP <<
+ FCOE_TCE_TX_WR_RX_RD_CONST_TX_STATE_SHIFT;
+ /* init flags */
+ task->txwr_rxrd.const_ctx.init_flags = task_type <<
+ FCOE_TCE_TX_WR_RX_RD_CONST_TASK_TYPE_SHIFT;
+ task->txwr_rxrd.const_ctx.init_flags |= FCOE_TASK_CLASS_TYPE_3 <<
+ FCOE_TCE_TX_WR_RX_RD_CONST_CLASS_TYPE_SHIFT;
+ task->rxwr_txrd.const_ctx.init_flags = context_id <<
+ FCOE_TCE_RX_WR_TX_RD_CONST_CID_SHIFT;
+ task->rxwr_txrd.const_ctx.init_flags = context_id <<
+ FCOE_TCE_RX_WR_TX_RD_CONST_CID_SHIFT;
+
+ task->txwr_rxrd.union_ctx.cleanup.ctx.cleaned_task_id = orig_xid;
+
+ task->txwr_rxrd.union_ctx.cleanup.ctx.rolled_tx_seq_cnt = 0;
+ task->txwr_rxrd.union_ctx.cleanup.ctx.rolled_tx_data_offset = offset;
+
+ bd_count = orig_io_req->bd_tbl->bd_valid;
+
+ /* obtain the appropriate bd entry from relative offset */
+ for (i = 0; i < bd_count; i++) {
+ if (offset < bd[i].buf_len)
+ break;
+ offset -= bd[i].buf_len;
+ }
+ phys_addr += (i * sizeof(struct fcoe_bd_ctx));
+
+ if (orig_task_type == FCOE_TASK_TYPE_WRITE) {
+ task->txwr_only.sgl_ctx.sgl.mul_sgl.cur_sge_addr.lo =
+ (u32)phys_addr;
+ task->txwr_only.sgl_ctx.sgl.mul_sgl.cur_sge_addr.hi =
+ (u32)((u64)phys_addr >> 32);
+ task->txwr_only.sgl_ctx.sgl.mul_sgl.sgl_size =
+ bd_count;
+ task->txwr_only.sgl_ctx.sgl.mul_sgl.cur_sge_off =
+ offset; /* adjusted offset */
+ task->txwr_only.sgl_ctx.sgl.mul_sgl.cur_sge_idx = i;
+ } else {
+ orig_task_idx = orig_xid / BNX2FC_TASKS_PER_PAGE;
+ index = orig_xid % BNX2FC_TASKS_PER_PAGE;
+
+ task_page = (struct fcoe_task_ctx_entry *)
+ interface->hba->task_ctx[orig_task_idx];
+ orig_task = &(task_page[index]);
+
+ /* Multiple SGEs were used for this IO */
+ sgl = &task->rxwr_only.union_ctx.read_info.sgl_ctx.sgl;
+ sgl->mul_sgl.cur_sge_addr.lo = (u32)phys_addr;
+ sgl->mul_sgl.cur_sge_addr.hi = (u32)((u64)phys_addr >> 32);
+ sgl->mul_sgl.sgl_size = bd_count;
+ sgl->mul_sgl.cur_sge_off = offset; /*adjusted offset */
+ sgl->mul_sgl.cur_sge_idx = i;
+
+ memset(&task->rxwr_only.rx_seq_ctx, 0,
+ sizeof(struct fcoe_rx_seq_ctx));
+ task->rxwr_only.rx_seq_ctx.low_exp_ro = orig_offset;
+ task->rxwr_only.rx_seq_ctx.high_exp_ro = orig_offset;
+ }
+}
+void bnx2fc_init_cleanup_task(struct bnx2fc_cmd *io_req,
+ struct fcoe_task_ctx_entry *task,
+ u16 orig_xid)
+{
+ u8 task_type = FCOE_TASK_TYPE_EXCHANGE_CLEANUP;
+ struct bnx2fc_rport *tgt = io_req->tgt;
+ u32 context_id = tgt->context_id;
+
+ memset(task, 0, sizeof(struct fcoe_task_ctx_entry));
+
+ /* Tx Write Rx Read */
+ /* init flags */
+ task->txwr_rxrd.const_ctx.init_flags = task_type <<
+ FCOE_TCE_TX_WR_RX_RD_CONST_TASK_TYPE_SHIFT;
+ task->txwr_rxrd.const_ctx.init_flags |= FCOE_TASK_CLASS_TYPE_3 <<
+ FCOE_TCE_TX_WR_RX_RD_CONST_CLASS_TYPE_SHIFT;
+ if (tgt->dev_type == TYPE_TAPE)
+ task->txwr_rxrd.const_ctx.init_flags |=
+ FCOE_TASK_DEV_TYPE_TAPE <<
+ FCOE_TCE_TX_WR_RX_RD_CONST_DEV_TYPE_SHIFT;
+ else
+ task->txwr_rxrd.const_ctx.init_flags |=
+ FCOE_TASK_DEV_TYPE_DISK <<
+ FCOE_TCE_TX_WR_RX_RD_CONST_DEV_TYPE_SHIFT;
+ task->txwr_rxrd.union_ctx.cleanup.ctx.cleaned_task_id = orig_xid;
+
+ /* Tx flags */
+ task->txwr_rxrd.const_ctx.tx_flags =
+ FCOE_TASK_TX_STATE_EXCHANGE_CLEANUP <<
+ FCOE_TCE_TX_WR_RX_RD_CONST_TX_STATE_SHIFT;
+
+ /* Rx Read Tx Write */
+ task->rxwr_txrd.const_ctx.init_flags = context_id <<
+ FCOE_TCE_RX_WR_TX_RD_CONST_CID_SHIFT;
+ task->rxwr_txrd.var_ctx.rx_flags |= 1 <<
+ FCOE_TCE_RX_WR_TX_RD_VAR_EXP_FIRST_FRAME_SHIFT;
+}
+
+void bnx2fc_init_mp_task(struct bnx2fc_cmd *io_req,
+ struct fcoe_task_ctx_entry *task)
+{
+ struct bnx2fc_mp_req *mp_req = &(io_req->mp_req);
+ struct bnx2fc_rport *tgt = io_req->tgt;
+ struct fc_frame_header *fc_hdr;
+ struct fcoe_ext_mul_sges_ctx *sgl;
+ u8 task_type = 0;
+ u64 *hdr;
+ u64 temp_hdr[3];
+ u32 context_id;
+
+
+ /* Obtain task_type */
+ if ((io_req->cmd_type == BNX2FC_TASK_MGMT_CMD) ||
+ (io_req->cmd_type == BNX2FC_ELS)) {
+ task_type = FCOE_TASK_TYPE_MIDPATH;
+ } else if (io_req->cmd_type == BNX2FC_ABTS) {
+ task_type = FCOE_TASK_TYPE_ABTS;
+ }
+
+ memset(task, 0, sizeof(struct fcoe_task_ctx_entry));
+
+ /* Setup the task from io_req for easy reference */
+ io_req->task = task;
+
+ BNX2FC_IO_DBG(io_req, "Init MP task for cmd_type = %d task_type = %d\n",
+ io_req->cmd_type, task_type);
+
+ /* Tx only */
+ if ((task_type == FCOE_TASK_TYPE_MIDPATH) ||
+ (task_type == FCOE_TASK_TYPE_UNSOLICITED)) {
+ task->txwr_only.sgl_ctx.sgl.mul_sgl.cur_sge_addr.lo =
+ (u32)mp_req->mp_req_bd_dma;
+ task->txwr_only.sgl_ctx.sgl.mul_sgl.cur_sge_addr.hi =
+ (u32)((u64)mp_req->mp_req_bd_dma >> 32);
+ task->txwr_only.sgl_ctx.sgl.mul_sgl.sgl_size = 1;
+ }
+
+ /* Tx Write Rx Read */
+ /* init flags */
+ task->txwr_rxrd.const_ctx.init_flags = task_type <<
+ FCOE_TCE_TX_WR_RX_RD_CONST_TASK_TYPE_SHIFT;
+ if (tgt->dev_type == TYPE_TAPE)
+ task->txwr_rxrd.const_ctx.init_flags |=
+ FCOE_TASK_DEV_TYPE_TAPE <<
+ FCOE_TCE_TX_WR_RX_RD_CONST_DEV_TYPE_SHIFT;
+ else
+ task->txwr_rxrd.const_ctx.init_flags |=
+ FCOE_TASK_DEV_TYPE_DISK <<
+ FCOE_TCE_TX_WR_RX_RD_CONST_DEV_TYPE_SHIFT;
+ task->txwr_rxrd.const_ctx.init_flags |= FCOE_TASK_CLASS_TYPE_3 <<
+ FCOE_TCE_TX_WR_RX_RD_CONST_CLASS_TYPE_SHIFT;
+
+ /* tx flags */
+ task->txwr_rxrd.const_ctx.tx_flags = FCOE_TASK_TX_STATE_INIT <<
+ FCOE_TCE_TX_WR_RX_RD_CONST_TX_STATE_SHIFT;
+
+ /* Rx Write Tx Read */
+ task->rxwr_txrd.const_ctx.data_2_trns = io_req->data_xfer_len;
+
+ /* rx flags */
+ task->rxwr_txrd.var_ctx.rx_flags |= 1 <<
+ FCOE_TCE_RX_WR_TX_RD_VAR_EXP_FIRST_FRAME_SHIFT;
+
+ context_id = tgt->context_id;
+ task->rxwr_txrd.const_ctx.init_flags = context_id <<
+ FCOE_TCE_RX_WR_TX_RD_CONST_CID_SHIFT;
+
+ fc_hdr = &(mp_req->req_fc_hdr);
+ if (task_type == FCOE_TASK_TYPE_MIDPATH) {
+ fc_hdr->fh_ox_id = cpu_to_be16(io_req->xid);
+ fc_hdr->fh_rx_id = htons(0xffff);
+ task->rxwr_txrd.var_ctx.rx_id = 0xffff;
+ } else if (task_type == FCOE_TASK_TYPE_UNSOLICITED) {
+ fc_hdr->fh_rx_id = cpu_to_be16(io_req->xid);
+ }
+
+ /* Fill FC Header into middle path buffer */
+ hdr = (u64 *) &task->txwr_rxrd.union_ctx.tx_frame.fc_hdr;
+ memcpy(temp_hdr, fc_hdr, sizeof(temp_hdr));
+ hdr[0] = cpu_to_be64(temp_hdr[0]);
+ hdr[1] = cpu_to_be64(temp_hdr[1]);
+ hdr[2] = cpu_to_be64(temp_hdr[2]);
+
+ /* Rx Only */
+ if (task_type == FCOE_TASK_TYPE_MIDPATH) {
+ sgl = &task->rxwr_only.union_ctx.read_info.sgl_ctx.sgl;
+
+ sgl->mul_sgl.cur_sge_addr.lo = (u32)mp_req->mp_resp_bd_dma;
+ sgl->mul_sgl.cur_sge_addr.hi =
+ (u32)((u64)mp_req->mp_resp_bd_dma >> 32);
+ sgl->mul_sgl.sgl_size = 1;
+ }
+}
+
+void bnx2fc_init_task(struct bnx2fc_cmd *io_req,
+ struct fcoe_task_ctx_entry *task)
+{
+ u8 task_type;
+ struct scsi_cmnd *sc_cmd = io_req->sc_cmd;
+ struct io_bdt *bd_tbl = io_req->bd_tbl;
+ struct bnx2fc_rport *tgt = io_req->tgt;
+ struct fcoe_cached_sge_ctx *cached_sge;
+ struct fcoe_ext_mul_sges_ctx *sgl;
+ int dev_type = tgt->dev_type;
+ u64 *fcp_cmnd;
+ u64 tmp_fcp_cmnd[4];
+ u32 context_id;
+ int cnt, i;
+ int bd_count;
+
+ memset(task, 0, sizeof(struct fcoe_task_ctx_entry));
+
+ /* Setup the task from io_req for easy reference */
+ io_req->task = task;
+
+ if (sc_cmd->sc_data_direction == DMA_TO_DEVICE)
+ task_type = FCOE_TASK_TYPE_WRITE;
+ else
+ task_type = FCOE_TASK_TYPE_READ;
+
+ /* Tx only */
+ bd_count = bd_tbl->bd_valid;
+ cached_sge = &task->rxwr_only.union_ctx.read_info.sgl_ctx.cached_sge;
+ if (task_type == FCOE_TASK_TYPE_WRITE) {
+ if ((dev_type == TYPE_DISK) && (bd_count == 1)) {
+ struct fcoe_bd_ctx *fcoe_bd_tbl = bd_tbl->bd_tbl;
+
+ task->txwr_only.sgl_ctx.cached_sge.cur_buf_addr.lo =
+ cached_sge->cur_buf_addr.lo =
+ fcoe_bd_tbl->buf_addr_lo;
+ task->txwr_only.sgl_ctx.cached_sge.cur_buf_addr.hi =
+ cached_sge->cur_buf_addr.hi =
+ fcoe_bd_tbl->buf_addr_hi;
+ task->txwr_only.sgl_ctx.cached_sge.cur_buf_rem =
+ cached_sge->cur_buf_rem =
+ fcoe_bd_tbl->buf_len;
+
+ task->txwr_rxrd.const_ctx.init_flags |= 1 <<
+ FCOE_TCE_TX_WR_RX_RD_CONST_CACHED_SGE_SHIFT;
+ } else {
+ task->txwr_only.sgl_ctx.sgl.mul_sgl.cur_sge_addr.lo =
+ (u32)bd_tbl->bd_tbl_dma;
+ task->txwr_only.sgl_ctx.sgl.mul_sgl.cur_sge_addr.hi =
+ (u32)((u64)bd_tbl->bd_tbl_dma >> 32);
+ task->txwr_only.sgl_ctx.sgl.mul_sgl.sgl_size =
+ bd_tbl->bd_valid;
+ }
+ }
+
+ /*Tx Write Rx Read */
+ /* Init state to NORMAL */
+ task->txwr_rxrd.const_ctx.init_flags |= task_type <<
+ FCOE_TCE_TX_WR_RX_RD_CONST_TASK_TYPE_SHIFT;
+ if (dev_type == TYPE_TAPE) {
+ task->txwr_rxrd.const_ctx.init_flags |=
+ FCOE_TASK_DEV_TYPE_TAPE <<
+ FCOE_TCE_TX_WR_RX_RD_CONST_DEV_TYPE_SHIFT;
+ io_req->rec_retry = 0;
+ io_req->rec_retry = 0;
+ } else
+ task->txwr_rxrd.const_ctx.init_flags |=
+ FCOE_TASK_DEV_TYPE_DISK <<
+ FCOE_TCE_TX_WR_RX_RD_CONST_DEV_TYPE_SHIFT;
+ task->txwr_rxrd.const_ctx.init_flags |= FCOE_TASK_CLASS_TYPE_3 <<
+ FCOE_TCE_TX_WR_RX_RD_CONST_CLASS_TYPE_SHIFT;
+ /* tx flags */
+ task->txwr_rxrd.const_ctx.tx_flags = FCOE_TASK_TX_STATE_NORMAL <<
+ FCOE_TCE_TX_WR_RX_RD_CONST_TX_STATE_SHIFT;
+
+ /* Set initial seq counter */
+ task->txwr_rxrd.union_ctx.tx_seq.ctx.seq_cnt = 1;
+
+ /* Fill FCP_CMND IU */
+ fcp_cmnd = (u64 *)
+ task->txwr_rxrd.union_ctx.fcp_cmd.opaque;
+ bnx2fc_build_fcp_cmnd(io_req, (struct fcp_cmnd *)&tmp_fcp_cmnd);
+
+ /* swap fcp_cmnd */
+ cnt = sizeof(struct fcp_cmnd) / sizeof(u64);
+
+ for (i = 0; i < cnt; i++) {
+ *fcp_cmnd = cpu_to_be64(tmp_fcp_cmnd[i]);
+ fcp_cmnd++;
+ }
+
+ /* Rx Write Tx Read */
+ task->rxwr_txrd.const_ctx.data_2_trns = io_req->data_xfer_len;
+
+ context_id = tgt->context_id;
+ task->rxwr_txrd.const_ctx.init_flags = context_id <<
+ FCOE_TCE_RX_WR_TX_RD_CONST_CID_SHIFT;
+
+ /* rx flags */
+ /* Set state to "waiting for the first packet" */
+ task->rxwr_txrd.var_ctx.rx_flags |= 1 <<
+ FCOE_TCE_RX_WR_TX_RD_VAR_EXP_FIRST_FRAME_SHIFT;
+
+ task->rxwr_txrd.var_ctx.rx_id = 0xffff;
+
+ /* Rx Only */
+ if (task_type != FCOE_TASK_TYPE_READ)
+ return;
+
+ sgl = &task->rxwr_only.union_ctx.read_info.sgl_ctx.sgl;
+ bd_count = bd_tbl->bd_valid;
+
+ if (dev_type == TYPE_DISK) {
+ if (bd_count == 1) {
+
+ struct fcoe_bd_ctx *fcoe_bd_tbl = bd_tbl->bd_tbl;
+
+ cached_sge->cur_buf_addr.lo = fcoe_bd_tbl->buf_addr_lo;
+ cached_sge->cur_buf_addr.hi = fcoe_bd_tbl->buf_addr_hi;
+ cached_sge->cur_buf_rem = fcoe_bd_tbl->buf_len;
+ task->txwr_rxrd.const_ctx.init_flags |= 1 <<
+ FCOE_TCE_TX_WR_RX_RD_CONST_CACHED_SGE_SHIFT;
+ } else if (bd_count == 2) {
+ struct fcoe_bd_ctx *fcoe_bd_tbl = bd_tbl->bd_tbl;
+
+ cached_sge->cur_buf_addr.lo = fcoe_bd_tbl->buf_addr_lo;
+ cached_sge->cur_buf_addr.hi = fcoe_bd_tbl->buf_addr_hi;
+ cached_sge->cur_buf_rem = fcoe_bd_tbl->buf_len;
+
+ fcoe_bd_tbl++;
+ cached_sge->second_buf_addr.lo =
+ fcoe_bd_tbl->buf_addr_lo;
+ cached_sge->second_buf_addr.hi =
+ fcoe_bd_tbl->buf_addr_hi;
+ cached_sge->second_buf_rem = fcoe_bd_tbl->buf_len;
+ task->txwr_rxrd.const_ctx.init_flags |= 1 <<
+ FCOE_TCE_TX_WR_RX_RD_CONST_CACHED_SGE_SHIFT;
+ } else {
+
+ sgl->mul_sgl.cur_sge_addr.lo = (u32)bd_tbl->bd_tbl_dma;
+ sgl->mul_sgl.cur_sge_addr.hi =
+ (u32)((u64)bd_tbl->bd_tbl_dma >> 32);
+ sgl->mul_sgl.sgl_size = bd_count;
+ }
+ } else {
+ sgl->mul_sgl.cur_sge_addr.lo = (u32)bd_tbl->bd_tbl_dma;
+ sgl->mul_sgl.cur_sge_addr.hi =
+ (u32)((u64)bd_tbl->bd_tbl_dma >> 32);
+ sgl->mul_sgl.sgl_size = bd_count;
+ }
+}
+
+/**
+ * bnx2fc_setup_task_ctx - allocate and map task context
+ *
+ * @hba: pointer to adapter structure
+ *
+ * allocate memory for task context, and associated BD table to be used
+ * by firmware
+ *
+ */
+int bnx2fc_setup_task_ctx(struct bnx2fc_hba *hba)
+{
+ int rc = 0;
+ struct regpair *task_ctx_bdt;
+ dma_addr_t addr;
+ int task_ctx_arr_sz;
+ int i;
+
+ /*
+ * Allocate task context bd table. A page size of bd table
+ * can map 256 buffers. Each buffer contains 32 task context
+ * entries. Hence the limit with one page is 8192 task context
+ * entries.
+ */
+ hba->task_ctx_bd_tbl = dma_alloc_coherent(&hba->pcidev->dev,
+ PAGE_SIZE,
+ &hba->task_ctx_bd_dma,
+ GFP_KERNEL);
+ if (!hba->task_ctx_bd_tbl) {
+ printk(KERN_ERR PFX "unable to allocate task context BDT\n");
+ rc = -1;
+ goto out;
+ }
+ memset(hba->task_ctx_bd_tbl, 0, PAGE_SIZE);
+
+ /*
+ * Allocate task_ctx which is an array of pointers pointing to
+ * a page containing 32 task contexts
+ */
+ task_ctx_arr_sz = (hba->max_tasks / BNX2FC_TASKS_PER_PAGE);
+ hba->task_ctx = kzalloc((task_ctx_arr_sz * sizeof(void *)),
+ GFP_KERNEL);
+ if (!hba->task_ctx) {
+ printk(KERN_ERR PFX "unable to allocate task context array\n");
+ rc = -1;
+ goto out1;
+ }
+
+ /*
+ * Allocate task_ctx_dma which is an array of dma addresses
+ */
+ hba->task_ctx_dma = kmalloc((task_ctx_arr_sz *
+ sizeof(dma_addr_t)), GFP_KERNEL);
+ if (!hba->task_ctx_dma) {
+ printk(KERN_ERR PFX "unable to alloc context mapping array\n");
+ rc = -1;
+ goto out2;
+ }
+
+ task_ctx_bdt = (struct regpair *)hba->task_ctx_bd_tbl;
+ for (i = 0; i < task_ctx_arr_sz; i++) {
+
+ hba->task_ctx[i] = dma_alloc_coherent(&hba->pcidev->dev,
+ PAGE_SIZE,
+ &hba->task_ctx_dma[i],
+ GFP_KERNEL);
+ if (!hba->task_ctx[i]) {
+ printk(KERN_ERR PFX "unable to alloc task context\n");
+ rc = -1;
+ goto out3;
+ }
+ memset(hba->task_ctx[i], 0, PAGE_SIZE);
+ addr = (u64)hba->task_ctx_dma[i];
+ task_ctx_bdt->hi = cpu_to_le32((u64)addr >> 32);
+ task_ctx_bdt->lo = cpu_to_le32((u32)addr);
+ task_ctx_bdt++;
+ }
+ return 0;
+
+out3:
+ for (i = 0; i < task_ctx_arr_sz; i++) {
+ if (hba->task_ctx[i]) {
+
+ dma_free_coherent(&hba->pcidev->dev, PAGE_SIZE,
+ hba->task_ctx[i], hba->task_ctx_dma[i]);
+ hba->task_ctx[i] = NULL;
+ }
+ }
+
+ kfree(hba->task_ctx_dma);
+ hba->task_ctx_dma = NULL;
+out2:
+ kfree(hba->task_ctx);
+ hba->task_ctx = NULL;
+out1:
+ dma_free_coherent(&hba->pcidev->dev, PAGE_SIZE,
+ hba->task_ctx_bd_tbl, hba->task_ctx_bd_dma);
+ hba->task_ctx_bd_tbl = NULL;
+out:
+ return rc;
+}
+
+void bnx2fc_free_task_ctx(struct bnx2fc_hba *hba)
+{
+ int task_ctx_arr_sz;
+ int i;
+
+ if (hba->task_ctx_bd_tbl) {
+ dma_free_coherent(&hba->pcidev->dev, PAGE_SIZE,
+ hba->task_ctx_bd_tbl,
+ hba->task_ctx_bd_dma);
+ hba->task_ctx_bd_tbl = NULL;
+ }
+
+ task_ctx_arr_sz = (hba->max_tasks / BNX2FC_TASKS_PER_PAGE);
+ if (hba->task_ctx) {
+ for (i = 0; i < task_ctx_arr_sz; i++) {
+ if (hba->task_ctx[i]) {
+ dma_free_coherent(&hba->pcidev->dev, PAGE_SIZE,
+ hba->task_ctx[i],
+ hba->task_ctx_dma[i]);
+ hba->task_ctx[i] = NULL;
+ }
+ }
+ kfree(hba->task_ctx);
+ hba->task_ctx = NULL;
+ }
+
+ kfree(hba->task_ctx_dma);
+ hba->task_ctx_dma = NULL;
+}
+
+static void bnx2fc_free_hash_table(struct bnx2fc_hba *hba)
+{
+ int i;
+ int segment_count;
+ u32 *pbl;
+
+ if (hba->hash_tbl_segments) {
+
+ pbl = hba->hash_tbl_pbl;
+ if (pbl) {
+ segment_count = hba->hash_tbl_segment_count;
+ for (i = 0; i < segment_count; ++i) {
+ dma_addr_t dma_address;
+
+ dma_address = le32_to_cpu(*pbl);
+ ++pbl;
+ dma_address += ((u64)le32_to_cpu(*pbl)) << 32;
+ ++pbl;
+ dma_free_coherent(&hba->pcidev->dev,
+ BNX2FC_HASH_TBL_CHUNK_SIZE,
+ hba->hash_tbl_segments[i],
+ dma_address);
+ }
+ }
+
+ kfree(hba->hash_tbl_segments);
+ hba->hash_tbl_segments = NULL;
+ }
+
+ if (hba->hash_tbl_pbl) {
+ dma_free_coherent(&hba->pcidev->dev, PAGE_SIZE,
+ hba->hash_tbl_pbl,
+ hba->hash_tbl_pbl_dma);
+ hba->hash_tbl_pbl = NULL;
+ }
+}
+
+static int bnx2fc_allocate_hash_table(struct bnx2fc_hba *hba)
+{
+ int i;
+ int hash_table_size;
+ int segment_count;
+ int segment_array_size;
+ int dma_segment_array_size;
+ dma_addr_t *dma_segment_array;
+ u32 *pbl;
+
+ hash_table_size = BNX2FC_NUM_MAX_SESS * BNX2FC_MAX_ROWS_IN_HASH_TBL *
+ sizeof(struct fcoe_hash_table_entry);
+
+ segment_count = hash_table_size + BNX2FC_HASH_TBL_CHUNK_SIZE - 1;
+ segment_count /= BNX2FC_HASH_TBL_CHUNK_SIZE;
+ hba->hash_tbl_segment_count = segment_count;
+
+ segment_array_size = segment_count * sizeof(*hba->hash_tbl_segments);
+ hba->hash_tbl_segments = kzalloc(segment_array_size, GFP_KERNEL);
+ if (!hba->hash_tbl_segments) {
+ printk(KERN_ERR PFX "hash table pointers alloc failed\n");
+ return -ENOMEM;
+ }
+ dma_segment_array_size = segment_count * sizeof(*dma_segment_array);
+ dma_segment_array = kzalloc(dma_segment_array_size, GFP_KERNEL);
+ if (!dma_segment_array) {
+ printk(KERN_ERR PFX "hash table pointers (dma) alloc failed\n");
+ goto cleanup_ht;
+ }
+
+ for (i = 0; i < segment_count; ++i) {
+ hba->hash_tbl_segments[i] =
+ dma_alloc_coherent(&hba->pcidev->dev,
+ BNX2FC_HASH_TBL_CHUNK_SIZE,
+ &dma_segment_array[i],
+ GFP_KERNEL);
+ if (!hba->hash_tbl_segments[i]) {
+ printk(KERN_ERR PFX "hash segment alloc failed\n");
+ goto cleanup_dma;
+ }
+ memset(hba->hash_tbl_segments[i], 0,
+ BNX2FC_HASH_TBL_CHUNK_SIZE);
+ }
+
+ hba->hash_tbl_pbl = dma_alloc_coherent(&hba->pcidev->dev,
+ PAGE_SIZE,
+ &hba->hash_tbl_pbl_dma,
+ GFP_KERNEL);
+ if (!hba->hash_tbl_pbl) {
+ printk(KERN_ERR PFX "hash table pbl alloc failed\n");
+ goto cleanup_dma;
+ }
+ memset(hba->hash_tbl_pbl, 0, PAGE_SIZE);
+
+ pbl = hba->hash_tbl_pbl;
+ for (i = 0; i < segment_count; ++i) {
+ u64 paddr = dma_segment_array[i];
+ *pbl = cpu_to_le32((u32) paddr);
+ ++pbl;
+ *pbl = cpu_to_le32((u32) (paddr >> 32));
+ ++pbl;
+ }
+ pbl = hba->hash_tbl_pbl;
+ i = 0;
+ while (*pbl && *(pbl + 1)) {
+ u32 lo;
+ u32 hi;
+ lo = *pbl;
+ ++pbl;
+ hi = *pbl;
+ ++pbl;
+ ++i;
+ }
+ kfree(dma_segment_array);
+ return 0;
+
+cleanup_dma:
+ for (i = 0; i < segment_count; ++i) {
+ if (hba->hash_tbl_segments[i])
+ dma_free_coherent(&hba->pcidev->dev,
+ BNX2FC_HASH_TBL_CHUNK_SIZE,
+ hba->hash_tbl_segments[i],
+ dma_segment_array[i]);
+ }
+
+ kfree(dma_segment_array);
+
+cleanup_ht:
+ kfree(hba->hash_tbl_segments);
+ hba->hash_tbl_segments = NULL;
+ return -ENOMEM;
+}
+
+/**
+ * bnx2fc_setup_fw_resc - Allocate and map hash table and dummy buffer
+ *
+ * @hba: Pointer to adapter structure
+ *
+ */
+int bnx2fc_setup_fw_resc(struct bnx2fc_hba *hba)
+{
+ u64 addr;
+ u32 mem_size;
+ int i;
+
+ if (bnx2fc_allocate_hash_table(hba))
+ return -ENOMEM;
+
+ mem_size = BNX2FC_NUM_MAX_SESS * sizeof(struct regpair);
+ hba->t2_hash_tbl_ptr = dma_alloc_coherent(&hba->pcidev->dev, mem_size,
+ &hba->t2_hash_tbl_ptr_dma,
+ GFP_KERNEL);
+ if (!hba->t2_hash_tbl_ptr) {
+ printk(KERN_ERR PFX "unable to allocate t2 hash table ptr\n");
+ bnx2fc_free_fw_resc(hba);
+ return -ENOMEM;
+ }
+ memset(hba->t2_hash_tbl_ptr, 0x00, mem_size);
+
+ mem_size = BNX2FC_NUM_MAX_SESS *
+ sizeof(struct fcoe_t2_hash_table_entry);
+ hba->t2_hash_tbl = dma_alloc_coherent(&hba->pcidev->dev, mem_size,
+ &hba->t2_hash_tbl_dma,
+ GFP_KERNEL);
+ if (!hba->t2_hash_tbl) {
+ printk(KERN_ERR PFX "unable to allocate t2 hash table\n");
+ bnx2fc_free_fw_resc(hba);
+ return -ENOMEM;
+ }
+ memset(hba->t2_hash_tbl, 0x00, mem_size);
+ for (i = 0; i < BNX2FC_NUM_MAX_SESS; i++) {
+ addr = (unsigned long) hba->t2_hash_tbl_dma +
+ ((i+1) * sizeof(struct fcoe_t2_hash_table_entry));
+ hba->t2_hash_tbl[i].next.lo = addr & 0xffffffff;
+ hba->t2_hash_tbl[i].next.hi = addr >> 32;
+ }
+
+ hba->dummy_buffer = dma_alloc_coherent(&hba->pcidev->dev,
+ PAGE_SIZE, &hba->dummy_buf_dma,
+ GFP_KERNEL);
+ if (!hba->dummy_buffer) {
+ printk(KERN_ERR PFX "unable to alloc MP Dummy Buffer\n");
+ bnx2fc_free_fw_resc(hba);
+ return -ENOMEM;
+ }
+
+ hba->stats_buffer = dma_alloc_coherent(&hba->pcidev->dev,
+ PAGE_SIZE,
+ &hba->stats_buf_dma,
+ GFP_KERNEL);
+ if (!hba->stats_buffer) {
+ printk(KERN_ERR PFX "unable to alloc Stats Buffer\n");
+ bnx2fc_free_fw_resc(hba);
+ return -ENOMEM;
+ }
+ memset(hba->stats_buffer, 0x00, PAGE_SIZE);
+
+ return 0;
+}
+
+void bnx2fc_free_fw_resc(struct bnx2fc_hba *hba)
+{
+ u32 mem_size;
+
+ if (hba->stats_buffer) {
+ dma_free_coherent(&hba->pcidev->dev, PAGE_SIZE,
+ hba->stats_buffer, hba->stats_buf_dma);
+ hba->stats_buffer = NULL;
+ }
+
+ if (hba->dummy_buffer) {
+ dma_free_coherent(&hba->pcidev->dev, PAGE_SIZE,
+ hba->dummy_buffer, hba->dummy_buf_dma);
+ hba->dummy_buffer = NULL;
+ }
+
+ if (hba->t2_hash_tbl_ptr) {
+ mem_size = BNX2FC_NUM_MAX_SESS * sizeof(struct regpair);
+ dma_free_coherent(&hba->pcidev->dev, mem_size,
+ hba->t2_hash_tbl_ptr,
+ hba->t2_hash_tbl_ptr_dma);
+ hba->t2_hash_tbl_ptr = NULL;
+ }
+
+ if (hba->t2_hash_tbl) {
+ mem_size = BNX2FC_NUM_MAX_SESS *
+ sizeof(struct fcoe_t2_hash_table_entry);
+ dma_free_coherent(&hba->pcidev->dev, mem_size,
+ hba->t2_hash_tbl, hba->t2_hash_tbl_dma);
+ hba->t2_hash_tbl = NULL;
+ }
+ bnx2fc_free_hash_table(hba);
+}
diff --git a/drivers/scsi/bnx2fc/bnx2fc_io.c b/drivers/scsi/bnx2fc/bnx2fc_io.c
new file mode 100644
index 000000000..9ecca8504
--- /dev/null
+++ b/drivers/scsi/bnx2fc/bnx2fc_io.c
@@ -0,0 +1,2081 @@
+/* bnx2fc_io.c: QLogic NetXtreme II Linux FCoE offload driver.
+ * IO manager and SCSI IO processing.
+ *
+ * Copyright (c) 2008 - 2013 Broadcom Corporation
+ * Copyright (c) 2014, QLogic Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation.
+ *
+ * Written by: Bhanu Prakash Gollapudi (bprakash@broadcom.com)
+ */
+
+#include "bnx2fc.h"
+
+#define RESERVE_FREE_LIST_INDEX num_possible_cpus()
+
+static int bnx2fc_split_bd(struct bnx2fc_cmd *io_req, u64 addr, int sg_len,
+ int bd_index);
+static int bnx2fc_map_sg(struct bnx2fc_cmd *io_req);
+static int bnx2fc_build_bd_list_from_sg(struct bnx2fc_cmd *io_req);
+static void bnx2fc_unmap_sg_list(struct bnx2fc_cmd *io_req);
+static void bnx2fc_free_mp_resc(struct bnx2fc_cmd *io_req);
+static void bnx2fc_parse_fcp_rsp(struct bnx2fc_cmd *io_req,
+ struct fcoe_fcp_rsp_payload *fcp_rsp,
+ u8 num_rq);
+
+void bnx2fc_cmd_timer_set(struct bnx2fc_cmd *io_req,
+ unsigned int timer_msec)
+{
+ struct bnx2fc_interface *interface = io_req->port->priv;
+
+ if (queue_delayed_work(interface->timer_work_queue,
+ &io_req->timeout_work,
+ msecs_to_jiffies(timer_msec)))
+ kref_get(&io_req->refcount);
+}
+
+static void bnx2fc_cmd_timeout(struct work_struct *work)
+{
+ struct bnx2fc_cmd *io_req = container_of(work, struct bnx2fc_cmd,
+ timeout_work.work);
+ struct fc_lport *lport;
+ struct fc_rport_priv *rdata;
+ u8 cmd_type = io_req->cmd_type;
+ struct bnx2fc_rport *tgt = io_req->tgt;
+ int logo_issued;
+ int rc;
+
+ BNX2FC_IO_DBG(io_req, "cmd_timeout, cmd_type = %d,"
+ "req_flags = %lx\n", cmd_type, io_req->req_flags);
+
+ spin_lock_bh(&tgt->tgt_lock);
+ if (test_and_clear_bit(BNX2FC_FLAG_ISSUE_RRQ, &io_req->req_flags)) {
+ clear_bit(BNX2FC_FLAG_RETIRE_OXID, &io_req->req_flags);
+ /*
+ * ideally we should hold the io_req until RRQ complets,
+ * and release io_req from timeout hold.
+ */
+ spin_unlock_bh(&tgt->tgt_lock);
+ bnx2fc_send_rrq(io_req);
+ return;
+ }
+ if (test_and_clear_bit(BNX2FC_FLAG_RETIRE_OXID, &io_req->req_flags)) {
+ BNX2FC_IO_DBG(io_req, "IO ready for reuse now\n");
+ goto done;
+ }
+
+ switch (cmd_type) {
+ case BNX2FC_SCSI_CMD:
+ if (test_and_clear_bit(BNX2FC_FLAG_EH_ABORT,
+ &io_req->req_flags)) {
+ /* Handle eh_abort timeout */
+ BNX2FC_IO_DBG(io_req, "eh_abort timed out\n");
+ complete(&io_req->tm_done);
+ } else if (test_bit(BNX2FC_FLAG_ISSUE_ABTS,
+ &io_req->req_flags)) {
+ /* Handle internally generated ABTS timeout */
+ BNX2FC_IO_DBG(io_req, "ABTS timed out refcnt = %d\n",
+ io_req->refcount.refcount.counter);
+ if (!(test_and_set_bit(BNX2FC_FLAG_ABTS_DONE,
+ &io_req->req_flags))) {
+
+ lport = io_req->port->lport;
+ rdata = io_req->tgt->rdata;
+ logo_issued = test_and_set_bit(
+ BNX2FC_FLAG_EXPL_LOGO,
+ &tgt->flags);
+ kref_put(&io_req->refcount, bnx2fc_cmd_release);
+ spin_unlock_bh(&tgt->tgt_lock);
+
+ /* Explicitly logo the target */
+ if (!logo_issued) {
+ BNX2FC_IO_DBG(io_req, "Explicit "
+ "logo - tgt flags = 0x%lx\n",
+ tgt->flags);
+
+ mutex_lock(&lport->disc.disc_mutex);
+ lport->tt.rport_logoff(rdata);
+ mutex_unlock(&lport->disc.disc_mutex);
+ }
+ return;
+ }
+ } else {
+ /* Hanlde IO timeout */
+ BNX2FC_IO_DBG(io_req, "IO timed out. issue ABTS\n");
+ if (test_and_set_bit(BNX2FC_FLAG_IO_COMPL,
+ &io_req->req_flags)) {
+ BNX2FC_IO_DBG(io_req, "IO completed before "
+ " timer expiry\n");
+ goto done;
+ }
+
+ if (!test_and_set_bit(BNX2FC_FLAG_ISSUE_ABTS,
+ &io_req->req_flags)) {
+ rc = bnx2fc_initiate_abts(io_req);
+ if (rc == SUCCESS)
+ goto done;
+ /*
+ * Explicitly logo the target if
+ * abts initiation fails
+ */
+ lport = io_req->port->lport;
+ rdata = io_req->tgt->rdata;
+ logo_issued = test_and_set_bit(
+ BNX2FC_FLAG_EXPL_LOGO,
+ &tgt->flags);
+ kref_put(&io_req->refcount, bnx2fc_cmd_release);
+ spin_unlock_bh(&tgt->tgt_lock);
+
+ if (!logo_issued) {
+ BNX2FC_IO_DBG(io_req, "Explicit "
+ "logo - tgt flags = 0x%lx\n",
+ tgt->flags);
+
+
+ mutex_lock(&lport->disc.disc_mutex);
+ lport->tt.rport_logoff(rdata);
+ mutex_unlock(&lport->disc.disc_mutex);
+ }
+ return;
+ } else {
+ BNX2FC_IO_DBG(io_req, "IO already in "
+ "ABTS processing\n");
+ }
+ }
+ break;
+ case BNX2FC_ELS:
+
+ if (test_bit(BNX2FC_FLAG_ISSUE_ABTS, &io_req->req_flags)) {
+ BNX2FC_IO_DBG(io_req, "ABTS for ELS timed out\n");
+
+ if (!test_and_set_bit(BNX2FC_FLAG_ABTS_DONE,
+ &io_req->req_flags)) {
+ lport = io_req->port->lport;
+ rdata = io_req->tgt->rdata;
+ logo_issued = test_and_set_bit(
+ BNX2FC_FLAG_EXPL_LOGO,
+ &tgt->flags);
+ kref_put(&io_req->refcount, bnx2fc_cmd_release);
+ spin_unlock_bh(&tgt->tgt_lock);
+
+ /* Explicitly logo the target */
+ if (!logo_issued) {
+ BNX2FC_IO_DBG(io_req, "Explicitly logo"
+ "(els)\n");
+ mutex_lock(&lport->disc.disc_mutex);
+ lport->tt.rport_logoff(rdata);
+ mutex_unlock(&lport->disc.disc_mutex);
+ }
+ return;
+ }
+ } else {
+ /*
+ * Handle ELS timeout.
+ * tgt_lock is used to sync compl path and timeout
+ * path. If els compl path is processing this IO, we
+ * have nothing to do here, just release the timer hold
+ */
+ BNX2FC_IO_DBG(io_req, "ELS timed out\n");
+ if (test_and_set_bit(BNX2FC_FLAG_ELS_DONE,
+ &io_req->req_flags))
+ goto done;
+
+ /* Indicate the cb_func that this ELS is timed out */
+ set_bit(BNX2FC_FLAG_ELS_TIMEOUT, &io_req->req_flags);
+
+ if ((io_req->cb_func) && (io_req->cb_arg)) {
+ io_req->cb_func(io_req->cb_arg);
+ io_req->cb_arg = NULL;
+ }
+ }
+ break;
+ default:
+ printk(KERN_ERR PFX "cmd_timeout: invalid cmd_type %d\n",
+ cmd_type);
+ break;
+ }
+
+done:
+ /* release the cmd that was held when timer was set */
+ kref_put(&io_req->refcount, bnx2fc_cmd_release);
+ spin_unlock_bh(&tgt->tgt_lock);
+}
+
+static void bnx2fc_scsi_done(struct bnx2fc_cmd *io_req, int err_code)
+{
+ /* Called with host lock held */
+ struct scsi_cmnd *sc_cmd = io_req->sc_cmd;
+
+ /*
+ * active_cmd_queue may have other command types as well,
+ * and during flush operation, we want to error back only
+ * scsi commands.
+ */
+ if (io_req->cmd_type != BNX2FC_SCSI_CMD)
+ return;
+
+ BNX2FC_IO_DBG(io_req, "scsi_done. err_code = 0x%x\n", err_code);
+ if (test_bit(BNX2FC_FLAG_CMD_LOST, &io_req->req_flags)) {
+ /* Do not call scsi done for this IO */
+ return;
+ }
+
+ bnx2fc_unmap_sg_list(io_req);
+ io_req->sc_cmd = NULL;
+ if (!sc_cmd) {
+ printk(KERN_ERR PFX "scsi_done - sc_cmd NULL. "
+ "IO(0x%x) already cleaned up\n",
+ io_req->xid);
+ return;
+ }
+ sc_cmd->result = err_code << 16;
+
+ BNX2FC_IO_DBG(io_req, "sc=%p, result=0x%x, retries=%d, allowed=%d\n",
+ sc_cmd, host_byte(sc_cmd->result), sc_cmd->retries,
+ sc_cmd->allowed);
+ scsi_set_resid(sc_cmd, scsi_bufflen(sc_cmd));
+ sc_cmd->SCp.ptr = NULL;
+ sc_cmd->scsi_done(sc_cmd);
+}
+
+struct bnx2fc_cmd_mgr *bnx2fc_cmd_mgr_alloc(struct bnx2fc_hba *hba)
+{
+ struct bnx2fc_cmd_mgr *cmgr;
+ struct io_bdt *bdt_info;
+ struct bnx2fc_cmd *io_req;
+ size_t len;
+ u32 mem_size;
+ u16 xid;
+ int i;
+ int num_ios, num_pri_ios;
+ size_t bd_tbl_sz;
+ int arr_sz = num_possible_cpus() + 1;
+ u16 min_xid = BNX2FC_MIN_XID;
+ u16 max_xid = hba->max_xid;
+
+ if (max_xid <= min_xid || max_xid == FC_XID_UNKNOWN) {
+ printk(KERN_ERR PFX "cmd_mgr_alloc: Invalid min_xid 0x%x \
+ and max_xid 0x%x\n", min_xid, max_xid);
+ return NULL;
+ }
+ BNX2FC_MISC_DBG("min xid 0x%x, max xid 0x%x\n", min_xid, max_xid);
+
+ num_ios = max_xid - min_xid + 1;
+ len = (num_ios * (sizeof(struct bnx2fc_cmd *)));
+ len += sizeof(struct bnx2fc_cmd_mgr);
+
+ cmgr = kzalloc(len, GFP_KERNEL);
+ if (!cmgr) {
+ printk(KERN_ERR PFX "failed to alloc cmgr\n");
+ return NULL;
+ }
+
+ cmgr->free_list = kzalloc(sizeof(*cmgr->free_list) *
+ arr_sz, GFP_KERNEL);
+ if (!cmgr->free_list) {
+ printk(KERN_ERR PFX "failed to alloc free_list\n");
+ goto mem_err;
+ }
+
+ cmgr->free_list_lock = kzalloc(sizeof(*cmgr->free_list_lock) *
+ arr_sz, GFP_KERNEL);
+ if (!cmgr->free_list_lock) {
+ printk(KERN_ERR PFX "failed to alloc free_list_lock\n");
+ kfree(cmgr->free_list);
+ cmgr->free_list = NULL;
+ goto mem_err;
+ }
+
+ cmgr->hba = hba;
+ cmgr->cmds = (struct bnx2fc_cmd **)(cmgr + 1);
+
+ for (i = 0; i < arr_sz; i++) {
+ INIT_LIST_HEAD(&cmgr->free_list[i]);
+ spin_lock_init(&cmgr->free_list_lock[i]);
+ }
+
+ /*
+ * Pre-allocated pool of bnx2fc_cmds.
+ * Last entry in the free list array is the free list
+ * of slow path requests.
+ */
+ xid = BNX2FC_MIN_XID;
+ num_pri_ios = num_ios - hba->elstm_xids;
+ for (i = 0; i < num_ios; i++) {
+ io_req = kzalloc(sizeof(*io_req), GFP_KERNEL);
+
+ if (!io_req) {
+ printk(KERN_ERR PFX "failed to alloc io_req\n");
+ goto mem_err;
+ }
+
+ INIT_LIST_HEAD(&io_req->link);
+ INIT_DELAYED_WORK(&io_req->timeout_work, bnx2fc_cmd_timeout);
+
+ io_req->xid = xid++;
+ if (i < num_pri_ios)
+ list_add_tail(&io_req->link,
+ &cmgr->free_list[io_req->xid %
+ num_possible_cpus()]);
+ else
+ list_add_tail(&io_req->link,
+ &cmgr->free_list[num_possible_cpus()]);
+ io_req++;
+ }
+
+ /* Allocate pool of io_bdts - one for each bnx2fc_cmd */
+ mem_size = num_ios * sizeof(struct io_bdt *);
+ cmgr->io_bdt_pool = kmalloc(mem_size, GFP_KERNEL);
+ if (!cmgr->io_bdt_pool) {
+ printk(KERN_ERR PFX "failed to alloc io_bdt_pool\n");
+ goto mem_err;
+ }
+
+ mem_size = sizeof(struct io_bdt);
+ for (i = 0; i < num_ios; i++) {
+ cmgr->io_bdt_pool[i] = kmalloc(mem_size, GFP_KERNEL);
+ if (!cmgr->io_bdt_pool[i]) {
+ printk(KERN_ERR PFX "failed to alloc "
+ "io_bdt_pool[%d]\n", i);
+ goto mem_err;
+ }
+ }
+
+ /* Allocate an map fcoe_bdt_ctx structures */
+ bd_tbl_sz = BNX2FC_MAX_BDS_PER_CMD * sizeof(struct fcoe_bd_ctx);
+ for (i = 0; i < num_ios; i++) {
+ bdt_info = cmgr->io_bdt_pool[i];
+ bdt_info->bd_tbl = dma_alloc_coherent(&hba->pcidev->dev,
+ bd_tbl_sz,
+ &bdt_info->bd_tbl_dma,
+ GFP_KERNEL);
+ if (!bdt_info->bd_tbl) {
+ printk(KERN_ERR PFX "failed to alloc "
+ "bdt_tbl[%d]\n", i);
+ goto mem_err;
+ }
+ }
+
+ return cmgr;
+
+mem_err:
+ bnx2fc_cmd_mgr_free(cmgr);
+ return NULL;
+}
+
+void bnx2fc_cmd_mgr_free(struct bnx2fc_cmd_mgr *cmgr)
+{
+ struct io_bdt *bdt_info;
+ struct bnx2fc_hba *hba = cmgr->hba;
+ size_t bd_tbl_sz;
+ u16 min_xid = BNX2FC_MIN_XID;
+ u16 max_xid = hba->max_xid;
+ int num_ios;
+ int i;
+
+ num_ios = max_xid - min_xid + 1;
+
+ /* Free fcoe_bdt_ctx structures */
+ if (!cmgr->io_bdt_pool)
+ goto free_cmd_pool;
+
+ bd_tbl_sz = BNX2FC_MAX_BDS_PER_CMD * sizeof(struct fcoe_bd_ctx);
+ for (i = 0; i < num_ios; i++) {
+ bdt_info = cmgr->io_bdt_pool[i];
+ if (bdt_info->bd_tbl) {
+ dma_free_coherent(&hba->pcidev->dev, bd_tbl_sz,
+ bdt_info->bd_tbl,
+ bdt_info->bd_tbl_dma);
+ bdt_info->bd_tbl = NULL;
+ }
+ }
+
+ /* Destroy io_bdt pool */
+ for (i = 0; i < num_ios; i++) {
+ kfree(cmgr->io_bdt_pool[i]);
+ cmgr->io_bdt_pool[i] = NULL;
+ }
+
+ kfree(cmgr->io_bdt_pool);
+ cmgr->io_bdt_pool = NULL;
+
+free_cmd_pool:
+ kfree(cmgr->free_list_lock);
+
+ /* Destroy cmd pool */
+ if (!cmgr->free_list)
+ goto free_cmgr;
+
+ for (i = 0; i < num_possible_cpus() + 1; i++) {
+ struct bnx2fc_cmd *tmp, *io_req;
+
+ list_for_each_entry_safe(io_req, tmp,
+ &cmgr->free_list[i], link) {
+ list_del(&io_req->link);
+ kfree(io_req);
+ }
+ }
+ kfree(cmgr->free_list);
+free_cmgr:
+ /* Free command manager itself */
+ kfree(cmgr);
+}
+
+struct bnx2fc_cmd *bnx2fc_elstm_alloc(struct bnx2fc_rport *tgt, int type)
+{
+ struct fcoe_port *port = tgt->port;
+ struct bnx2fc_interface *interface = port->priv;
+ struct bnx2fc_cmd_mgr *cmd_mgr = interface->hba->cmd_mgr;
+ struct bnx2fc_cmd *io_req;
+ struct list_head *listp;
+ struct io_bdt *bd_tbl;
+ int index = RESERVE_FREE_LIST_INDEX;
+ u32 free_sqes;
+ u32 max_sqes;
+ u16 xid;
+
+ max_sqes = tgt->max_sqes;
+ switch (type) {
+ case BNX2FC_TASK_MGMT_CMD:
+ max_sqes = BNX2FC_TM_MAX_SQES;
+ break;
+ case BNX2FC_ELS:
+ max_sqes = BNX2FC_ELS_MAX_SQES;
+ break;
+ default:
+ break;
+ }
+
+ /*
+ * NOTE: Free list insertions and deletions are protected with
+ * cmgr lock
+ */
+ spin_lock_bh(&cmd_mgr->free_list_lock[index]);
+ free_sqes = atomic_read(&tgt->free_sqes);
+ if ((list_empty(&(cmd_mgr->free_list[index]))) ||
+ (tgt->num_active_ios.counter >= max_sqes) ||
+ (free_sqes + max_sqes <= BNX2FC_SQ_WQES_MAX)) {
+ BNX2FC_TGT_DBG(tgt, "No free els_tm cmds available "
+ "ios(%d):sqes(%d)\n",
+ tgt->num_active_ios.counter, tgt->max_sqes);
+ if (list_empty(&(cmd_mgr->free_list[index])))
+ printk(KERN_ERR PFX "elstm_alloc: list_empty\n");
+ spin_unlock_bh(&cmd_mgr->free_list_lock[index]);
+ return NULL;
+ }
+
+ listp = (struct list_head *)
+ cmd_mgr->free_list[index].next;
+ list_del_init(listp);
+ io_req = (struct bnx2fc_cmd *) listp;
+ xid = io_req->xid;
+ cmd_mgr->cmds[xid] = io_req;
+ atomic_inc(&tgt->num_active_ios);
+ atomic_dec(&tgt->free_sqes);
+ spin_unlock_bh(&cmd_mgr->free_list_lock[index]);
+
+ INIT_LIST_HEAD(&io_req->link);
+
+ io_req->port = port;
+ io_req->cmd_mgr = cmd_mgr;
+ io_req->req_flags = 0;
+ io_req->cmd_type = type;
+
+ /* Bind io_bdt for this io_req */
+ /* Have a static link between io_req and io_bdt_pool */
+ bd_tbl = io_req->bd_tbl = cmd_mgr->io_bdt_pool[xid];
+ bd_tbl->io_req = io_req;
+
+ /* Hold the io_req against deletion */
+ kref_init(&io_req->refcount);
+ return io_req;
+}
+
+struct bnx2fc_cmd *bnx2fc_cmd_alloc(struct bnx2fc_rport *tgt)
+{
+ struct fcoe_port *port = tgt->port;
+ struct bnx2fc_interface *interface = port->priv;
+ struct bnx2fc_cmd_mgr *cmd_mgr = interface->hba->cmd_mgr;
+ struct bnx2fc_cmd *io_req;
+ struct list_head *listp;
+ struct io_bdt *bd_tbl;
+ u32 free_sqes;
+ u32 max_sqes;
+ u16 xid;
+ int index = get_cpu();
+
+ max_sqes = BNX2FC_SCSI_MAX_SQES;
+ /*
+ * NOTE: Free list insertions and deletions are protected with
+ * cmgr lock
+ */
+ spin_lock_bh(&cmd_mgr->free_list_lock[index]);
+ free_sqes = atomic_read(&tgt->free_sqes);
+ if ((list_empty(&cmd_mgr->free_list[index])) ||
+ (tgt->num_active_ios.counter >= max_sqes) ||
+ (free_sqes + max_sqes <= BNX2FC_SQ_WQES_MAX)) {
+ spin_unlock_bh(&cmd_mgr->free_list_lock[index]);
+ put_cpu();
+ return NULL;
+ }
+
+ listp = (struct list_head *)
+ cmd_mgr->free_list[index].next;
+ list_del_init(listp);
+ io_req = (struct bnx2fc_cmd *) listp;
+ xid = io_req->xid;
+ cmd_mgr->cmds[xid] = io_req;
+ atomic_inc(&tgt->num_active_ios);
+ atomic_dec(&tgt->free_sqes);
+ spin_unlock_bh(&cmd_mgr->free_list_lock[index]);
+ put_cpu();
+
+ INIT_LIST_HEAD(&io_req->link);
+
+ io_req->port = port;
+ io_req->cmd_mgr = cmd_mgr;
+ io_req->req_flags = 0;
+
+ /* Bind io_bdt for this io_req */
+ /* Have a static link between io_req and io_bdt_pool */
+ bd_tbl = io_req->bd_tbl = cmd_mgr->io_bdt_pool[xid];
+ bd_tbl->io_req = io_req;
+
+ /* Hold the io_req against deletion */
+ kref_init(&io_req->refcount);
+ return io_req;
+}
+
+void bnx2fc_cmd_release(struct kref *ref)
+{
+ struct bnx2fc_cmd *io_req = container_of(ref,
+ struct bnx2fc_cmd, refcount);
+ struct bnx2fc_cmd_mgr *cmd_mgr = io_req->cmd_mgr;
+ int index;
+
+ if (io_req->cmd_type == BNX2FC_SCSI_CMD)
+ index = io_req->xid % num_possible_cpus();
+ else
+ index = RESERVE_FREE_LIST_INDEX;
+
+
+ spin_lock_bh(&cmd_mgr->free_list_lock[index]);
+ if (io_req->cmd_type != BNX2FC_SCSI_CMD)
+ bnx2fc_free_mp_resc(io_req);
+ cmd_mgr->cmds[io_req->xid] = NULL;
+ /* Delete IO from retire queue */
+ list_del_init(&io_req->link);
+ /* Add it to the free list */
+ list_add(&io_req->link,
+ &cmd_mgr->free_list[index]);
+ atomic_dec(&io_req->tgt->num_active_ios);
+ spin_unlock_bh(&cmd_mgr->free_list_lock[index]);
+
+}
+
+static void bnx2fc_free_mp_resc(struct bnx2fc_cmd *io_req)
+{
+ struct bnx2fc_mp_req *mp_req = &(io_req->mp_req);
+ struct bnx2fc_interface *interface = io_req->port->priv;
+ struct bnx2fc_hba *hba = interface->hba;
+ size_t sz = sizeof(struct fcoe_bd_ctx);
+
+ /* clear tm flags */
+ mp_req->tm_flags = 0;
+ if (mp_req->mp_req_bd) {
+ dma_free_coherent(&hba->pcidev->dev, sz,
+ mp_req->mp_req_bd,
+ mp_req->mp_req_bd_dma);
+ mp_req->mp_req_bd = NULL;
+ }
+ if (mp_req->mp_resp_bd) {
+ dma_free_coherent(&hba->pcidev->dev, sz,
+ mp_req->mp_resp_bd,
+ mp_req->mp_resp_bd_dma);
+ mp_req->mp_resp_bd = NULL;
+ }
+ if (mp_req->req_buf) {
+ dma_free_coherent(&hba->pcidev->dev, CNIC_PAGE_SIZE,
+ mp_req->req_buf,
+ mp_req->req_buf_dma);
+ mp_req->req_buf = NULL;
+ }
+ if (mp_req->resp_buf) {
+ dma_free_coherent(&hba->pcidev->dev, CNIC_PAGE_SIZE,
+ mp_req->resp_buf,
+ mp_req->resp_buf_dma);
+ mp_req->resp_buf = NULL;
+ }
+}
+
+int bnx2fc_init_mp_req(struct bnx2fc_cmd *io_req)
+{
+ struct bnx2fc_mp_req *mp_req;
+ struct fcoe_bd_ctx *mp_req_bd;
+ struct fcoe_bd_ctx *mp_resp_bd;
+ struct bnx2fc_interface *interface = io_req->port->priv;
+ struct bnx2fc_hba *hba = interface->hba;
+ dma_addr_t addr;
+ size_t sz;
+
+ mp_req = (struct bnx2fc_mp_req *)&(io_req->mp_req);
+ memset(mp_req, 0, sizeof(struct bnx2fc_mp_req));
+
+ mp_req->req_len = sizeof(struct fcp_cmnd);
+ io_req->data_xfer_len = mp_req->req_len;
+ mp_req->req_buf = dma_alloc_coherent(&hba->pcidev->dev, CNIC_PAGE_SIZE,
+ &mp_req->req_buf_dma,
+ GFP_ATOMIC);
+ if (!mp_req->req_buf) {
+ printk(KERN_ERR PFX "unable to alloc MP req buffer\n");
+ bnx2fc_free_mp_resc(io_req);
+ return FAILED;
+ }
+
+ mp_req->resp_buf = dma_alloc_coherent(&hba->pcidev->dev, CNIC_PAGE_SIZE,
+ &mp_req->resp_buf_dma,
+ GFP_ATOMIC);
+ if (!mp_req->resp_buf) {
+ printk(KERN_ERR PFX "unable to alloc TM resp buffer\n");
+ bnx2fc_free_mp_resc(io_req);
+ return FAILED;
+ }
+ memset(mp_req->req_buf, 0, CNIC_PAGE_SIZE);
+ memset(mp_req->resp_buf, 0, CNIC_PAGE_SIZE);
+
+ /* Allocate and map mp_req_bd and mp_resp_bd */
+ sz = sizeof(struct fcoe_bd_ctx);
+ mp_req->mp_req_bd = dma_alloc_coherent(&hba->pcidev->dev, sz,
+ &mp_req->mp_req_bd_dma,
+ GFP_ATOMIC);
+ if (!mp_req->mp_req_bd) {
+ printk(KERN_ERR PFX "unable to alloc MP req bd\n");
+ bnx2fc_free_mp_resc(io_req);
+ return FAILED;
+ }
+ mp_req->mp_resp_bd = dma_alloc_coherent(&hba->pcidev->dev, sz,
+ &mp_req->mp_resp_bd_dma,
+ GFP_ATOMIC);
+ if (!mp_req->mp_resp_bd) {
+ printk(KERN_ERR PFX "unable to alloc MP resp bd\n");
+ bnx2fc_free_mp_resc(io_req);
+ return FAILED;
+ }
+ /* Fill bd table */
+ addr = mp_req->req_buf_dma;
+ mp_req_bd = mp_req->mp_req_bd;
+ mp_req_bd->buf_addr_lo = (u32)addr & 0xffffffff;
+ mp_req_bd->buf_addr_hi = (u32)((u64)addr >> 32);
+ mp_req_bd->buf_len = CNIC_PAGE_SIZE;
+ mp_req_bd->flags = 0;
+
+ /*
+ * MP buffer is either a task mgmt command or an ELS.
+ * So the assumption is that it consumes a single bd
+ * entry in the bd table
+ */
+ mp_resp_bd = mp_req->mp_resp_bd;
+ addr = mp_req->resp_buf_dma;
+ mp_resp_bd->buf_addr_lo = (u32)addr & 0xffffffff;
+ mp_resp_bd->buf_addr_hi = (u32)((u64)addr >> 32);
+ mp_resp_bd->buf_len = CNIC_PAGE_SIZE;
+ mp_resp_bd->flags = 0;
+
+ return SUCCESS;
+}
+
+static int bnx2fc_initiate_tmf(struct scsi_cmnd *sc_cmd, u8 tm_flags)
+{
+ struct fc_lport *lport;
+ struct fc_rport *rport;
+ struct fc_rport_libfc_priv *rp;
+ struct fcoe_port *port;
+ struct bnx2fc_interface *interface;
+ struct bnx2fc_rport *tgt;
+ struct bnx2fc_cmd *io_req;
+ struct bnx2fc_mp_req *tm_req;
+ struct fcoe_task_ctx_entry *task;
+ struct fcoe_task_ctx_entry *task_page;
+ struct Scsi_Host *host = sc_cmd->device->host;
+ struct fc_frame_header *fc_hdr;
+ struct fcp_cmnd *fcp_cmnd;
+ int task_idx, index;
+ int rc = SUCCESS;
+ u16 xid;
+ u32 sid, did;
+ unsigned long start = jiffies;
+
+ lport = shost_priv(host);
+ rport = starget_to_rport(scsi_target(sc_cmd->device));
+ port = lport_priv(lport);
+ interface = port->priv;
+
+ if (rport == NULL) {
+ printk(KERN_ERR PFX "device_reset: rport is NULL\n");
+ rc = FAILED;
+ goto tmf_err;
+ }
+ rp = rport->dd_data;
+
+ rc = fc_block_scsi_eh(sc_cmd);
+ if (rc)
+ return rc;
+
+ if (lport->state != LPORT_ST_READY || !(lport->link_up)) {
+ printk(KERN_ERR PFX "device_reset: link is not ready\n");
+ rc = FAILED;
+ goto tmf_err;
+ }
+ /* rport and tgt are allocated together, so tgt should be non-NULL */
+ tgt = (struct bnx2fc_rport *)&rp[1];
+
+ if (!(test_bit(BNX2FC_FLAG_SESSION_READY, &tgt->flags))) {
+ printk(KERN_ERR PFX "device_reset: tgt not offloaded\n");
+ rc = FAILED;
+ goto tmf_err;
+ }
+retry_tmf:
+ io_req = bnx2fc_elstm_alloc(tgt, BNX2FC_TASK_MGMT_CMD);
+ if (!io_req) {
+ if (time_after(jiffies, start + HZ)) {
+ printk(KERN_ERR PFX "tmf: Failed TMF");
+ rc = FAILED;
+ goto tmf_err;
+ }
+ msleep(20);
+ goto retry_tmf;
+ }
+ /* Initialize rest of io_req fields */
+ io_req->sc_cmd = sc_cmd;
+ io_req->port = port;
+ io_req->tgt = tgt;
+
+ tm_req = (struct bnx2fc_mp_req *)&(io_req->mp_req);
+
+ rc = bnx2fc_init_mp_req(io_req);
+ if (rc == FAILED) {
+ printk(KERN_ERR PFX "Task mgmt MP request init failed\n");
+ spin_lock_bh(&tgt->tgt_lock);
+ kref_put(&io_req->refcount, bnx2fc_cmd_release);
+ spin_unlock_bh(&tgt->tgt_lock);
+ goto tmf_err;
+ }
+
+ /* Set TM flags */
+ io_req->io_req_flags = 0;
+ tm_req->tm_flags = tm_flags;
+
+ /* Fill FCP_CMND */
+ bnx2fc_build_fcp_cmnd(io_req, (struct fcp_cmnd *)tm_req->req_buf);
+ fcp_cmnd = (struct fcp_cmnd *)tm_req->req_buf;
+ memset(fcp_cmnd->fc_cdb, 0, sc_cmd->cmd_len);
+ fcp_cmnd->fc_dl = 0;
+
+ /* Fill FC header */
+ fc_hdr = &(tm_req->req_fc_hdr);
+ sid = tgt->sid;
+ did = rport->port_id;
+ __fc_fill_fc_hdr(fc_hdr, FC_RCTL_DD_UNSOL_CMD, did, sid,
+ FC_TYPE_FCP, FC_FC_FIRST_SEQ | FC_FC_END_SEQ |
+ FC_FC_SEQ_INIT, 0);
+ /* Obtain exchange id */
+ xid = io_req->xid;
+
+ BNX2FC_TGT_DBG(tgt, "Initiate TMF - xid = 0x%x\n", xid);
+ task_idx = xid/BNX2FC_TASKS_PER_PAGE;
+ index = xid % BNX2FC_TASKS_PER_PAGE;
+
+ /* Initialize task context for this IO request */
+ task_page = (struct fcoe_task_ctx_entry *)
+ interface->hba->task_ctx[task_idx];
+ task = &(task_page[index]);
+ bnx2fc_init_mp_task(io_req, task);
+
+ sc_cmd->SCp.ptr = (char *)io_req;
+
+ /* Obtain free SQ entry */
+ spin_lock_bh(&tgt->tgt_lock);
+ bnx2fc_add_2_sq(tgt, xid);
+
+ /* Enqueue the io_req to active_tm_queue */
+ io_req->on_tmf_queue = 1;
+ list_add_tail(&io_req->link, &tgt->active_tm_queue);
+
+ init_completion(&io_req->tm_done);
+ io_req->wait_for_comp = 1;
+
+ /* Ring doorbell */
+ bnx2fc_ring_doorbell(tgt);
+ spin_unlock_bh(&tgt->tgt_lock);
+
+ rc = wait_for_completion_timeout(&io_req->tm_done,
+ BNX2FC_TM_TIMEOUT * HZ);
+ spin_lock_bh(&tgt->tgt_lock);
+
+ io_req->wait_for_comp = 0;
+ if (!(test_bit(BNX2FC_FLAG_TM_COMPL, &io_req->req_flags))) {
+ set_bit(BNX2FC_FLAG_TM_TIMEOUT, &io_req->req_flags);
+ if (io_req->on_tmf_queue) {
+ list_del_init(&io_req->link);
+ io_req->on_tmf_queue = 0;
+ }
+ io_req->wait_for_comp = 1;
+ bnx2fc_initiate_cleanup(io_req);
+ spin_unlock_bh(&tgt->tgt_lock);
+ rc = wait_for_completion_timeout(&io_req->tm_done,
+ BNX2FC_FW_TIMEOUT);
+ spin_lock_bh(&tgt->tgt_lock);
+ io_req->wait_for_comp = 0;
+ if (!rc)
+ kref_put(&io_req->refcount, bnx2fc_cmd_release);
+ }
+
+ spin_unlock_bh(&tgt->tgt_lock);
+
+ if (!rc) {
+ BNX2FC_TGT_DBG(tgt, "task mgmt command failed...\n");
+ rc = FAILED;
+ } else {
+ BNX2FC_TGT_DBG(tgt, "task mgmt command success...\n");
+ rc = SUCCESS;
+ }
+tmf_err:
+ return rc;
+}
+
+int bnx2fc_initiate_abts(struct bnx2fc_cmd *io_req)
+{
+ struct fc_lport *lport;
+ struct bnx2fc_rport *tgt = io_req->tgt;
+ struct fc_rport *rport = tgt->rport;
+ struct fc_rport_priv *rdata = tgt->rdata;
+ struct bnx2fc_interface *interface;
+ struct fcoe_port *port;
+ struct bnx2fc_cmd *abts_io_req;
+ struct fcoe_task_ctx_entry *task;
+ struct fcoe_task_ctx_entry *task_page;
+ struct fc_frame_header *fc_hdr;
+ struct bnx2fc_mp_req *abts_req;
+ int task_idx, index;
+ u32 sid, did;
+ u16 xid;
+ int rc = SUCCESS;
+ u32 r_a_tov = rdata->r_a_tov;
+
+ /* called with tgt_lock held */
+ BNX2FC_IO_DBG(io_req, "Entered bnx2fc_initiate_abts\n");
+
+ port = io_req->port;
+ interface = port->priv;
+ lport = port->lport;
+
+ if (!test_bit(BNX2FC_FLAG_SESSION_READY, &tgt->flags)) {
+ printk(KERN_ERR PFX "initiate_abts: tgt not offloaded\n");
+ rc = FAILED;
+ goto abts_err;
+ }
+
+ if (rport == NULL) {
+ printk(KERN_ERR PFX "initiate_abts: rport is NULL\n");
+ rc = FAILED;
+ goto abts_err;
+ }
+
+ if (lport->state != LPORT_ST_READY || !(lport->link_up)) {
+ printk(KERN_ERR PFX "initiate_abts: link is not ready\n");
+ rc = FAILED;
+ goto abts_err;
+ }
+
+ abts_io_req = bnx2fc_elstm_alloc(tgt, BNX2FC_ABTS);
+ if (!abts_io_req) {
+ printk(KERN_ERR PFX "abts: couldnt allocate cmd\n");
+ rc = FAILED;
+ goto abts_err;
+ }
+
+ /* Initialize rest of io_req fields */
+ abts_io_req->sc_cmd = NULL;
+ abts_io_req->port = port;
+ abts_io_req->tgt = tgt;
+ abts_io_req->data_xfer_len = 0; /* No data transfer for ABTS */
+
+ abts_req = (struct bnx2fc_mp_req *)&(abts_io_req->mp_req);
+ memset(abts_req, 0, sizeof(struct bnx2fc_mp_req));
+
+ /* Fill FC header */
+ fc_hdr = &(abts_req->req_fc_hdr);
+
+ /* Obtain oxid and rxid for the original exchange to be aborted */
+ fc_hdr->fh_ox_id = htons(io_req->xid);
+ fc_hdr->fh_rx_id = htons(io_req->task->rxwr_txrd.var_ctx.rx_id);
+
+ sid = tgt->sid;
+ did = rport->port_id;
+
+ __fc_fill_fc_hdr(fc_hdr, FC_RCTL_BA_ABTS, did, sid,
+ FC_TYPE_BLS, FC_FC_FIRST_SEQ | FC_FC_END_SEQ |
+ FC_FC_SEQ_INIT, 0);
+
+ xid = abts_io_req->xid;
+ BNX2FC_IO_DBG(abts_io_req, "ABTS io_req\n");
+ task_idx = xid/BNX2FC_TASKS_PER_PAGE;
+ index = xid % BNX2FC_TASKS_PER_PAGE;
+
+ /* Initialize task context for this IO request */
+ task_page = (struct fcoe_task_ctx_entry *)
+ interface->hba->task_ctx[task_idx];
+ task = &(task_page[index]);
+ bnx2fc_init_mp_task(abts_io_req, task);
+
+ /*
+ * ABTS task is a temporary task that will be cleaned up
+ * irrespective of ABTS response. We need to start the timer
+ * for the original exchange, as the CQE is posted for the original
+ * IO request.
+ *
+ * Timer for ABTS is started only when it is originated by a
+ * TM request. For the ABTS issued as part of ULP timeout,
+ * scsi-ml maintains the timers.
+ */
+
+ /* if (test_bit(BNX2FC_FLAG_ISSUE_ABTS, &io_req->req_flags))*/
+ bnx2fc_cmd_timer_set(io_req, 2 * r_a_tov);
+
+ /* Obtain free SQ entry */
+ bnx2fc_add_2_sq(tgt, xid);
+
+ /* Ring doorbell */
+ bnx2fc_ring_doorbell(tgt);
+
+abts_err:
+ return rc;
+}
+
+int bnx2fc_initiate_seq_cleanup(struct bnx2fc_cmd *orig_io_req, u32 offset,
+ enum fc_rctl r_ctl)
+{
+ struct fc_lport *lport;
+ struct bnx2fc_rport *tgt = orig_io_req->tgt;
+ struct bnx2fc_interface *interface;
+ struct fcoe_port *port;
+ struct bnx2fc_cmd *seq_clnp_req;
+ struct fcoe_task_ctx_entry *task;
+ struct fcoe_task_ctx_entry *task_page;
+ struct bnx2fc_els_cb_arg *cb_arg = NULL;
+ int task_idx, index;
+ u16 xid;
+ int rc = 0;
+
+ BNX2FC_IO_DBG(orig_io_req, "bnx2fc_initiate_seq_cleanup xid = 0x%x\n",
+ orig_io_req->xid);
+ kref_get(&orig_io_req->refcount);
+
+ port = orig_io_req->port;
+ interface = port->priv;
+ lport = port->lport;
+
+ cb_arg = kzalloc(sizeof(struct bnx2fc_els_cb_arg), GFP_ATOMIC);
+ if (!cb_arg) {
+ printk(KERN_ERR PFX "Unable to alloc cb_arg for seq clnup\n");
+ rc = -ENOMEM;
+ goto cleanup_err;
+ }
+
+ seq_clnp_req = bnx2fc_elstm_alloc(tgt, BNX2FC_SEQ_CLEANUP);
+ if (!seq_clnp_req) {
+ printk(KERN_ERR PFX "cleanup: couldnt allocate cmd\n");
+ rc = -ENOMEM;
+ kfree(cb_arg);
+ goto cleanup_err;
+ }
+ /* Initialize rest of io_req fields */
+ seq_clnp_req->sc_cmd = NULL;
+ seq_clnp_req->port = port;
+ seq_clnp_req->tgt = tgt;
+ seq_clnp_req->data_xfer_len = 0; /* No data transfer for cleanup */
+
+ xid = seq_clnp_req->xid;
+
+ task_idx = xid/BNX2FC_TASKS_PER_PAGE;
+ index = xid % BNX2FC_TASKS_PER_PAGE;
+
+ /* Initialize task context for this IO request */
+ task_page = (struct fcoe_task_ctx_entry *)
+ interface->hba->task_ctx[task_idx];
+ task = &(task_page[index]);
+ cb_arg->aborted_io_req = orig_io_req;
+ cb_arg->io_req = seq_clnp_req;
+ cb_arg->r_ctl = r_ctl;
+ cb_arg->offset = offset;
+ seq_clnp_req->cb_arg = cb_arg;
+
+ printk(KERN_ERR PFX "call init_seq_cleanup_task\n");
+ bnx2fc_init_seq_cleanup_task(seq_clnp_req, task, orig_io_req, offset);
+
+ /* Obtain free SQ entry */
+ bnx2fc_add_2_sq(tgt, xid);
+
+ /* Ring doorbell */
+ bnx2fc_ring_doorbell(tgt);
+cleanup_err:
+ return rc;
+}
+
+int bnx2fc_initiate_cleanup(struct bnx2fc_cmd *io_req)
+{
+ struct fc_lport *lport;
+ struct bnx2fc_rport *tgt = io_req->tgt;
+ struct bnx2fc_interface *interface;
+ struct fcoe_port *port;
+ struct bnx2fc_cmd *cleanup_io_req;
+ struct fcoe_task_ctx_entry *task;
+ struct fcoe_task_ctx_entry *task_page;
+ int task_idx, index;
+ u16 xid, orig_xid;
+ int rc = 0;
+
+ /* ASSUMPTION: called with tgt_lock held */
+ BNX2FC_IO_DBG(io_req, "Entered bnx2fc_initiate_cleanup\n");
+
+ port = io_req->port;
+ interface = port->priv;
+ lport = port->lport;
+
+ cleanup_io_req = bnx2fc_elstm_alloc(tgt, BNX2FC_CLEANUP);
+ if (!cleanup_io_req) {
+ printk(KERN_ERR PFX "cleanup: couldnt allocate cmd\n");
+ rc = -1;
+ goto cleanup_err;
+ }
+
+ /* Initialize rest of io_req fields */
+ cleanup_io_req->sc_cmd = NULL;
+ cleanup_io_req->port = port;
+ cleanup_io_req->tgt = tgt;
+ cleanup_io_req->data_xfer_len = 0; /* No data transfer for cleanup */
+
+ xid = cleanup_io_req->xid;
+
+ task_idx = xid/BNX2FC_TASKS_PER_PAGE;
+ index = xid % BNX2FC_TASKS_PER_PAGE;
+
+ /* Initialize task context for this IO request */
+ task_page = (struct fcoe_task_ctx_entry *)
+ interface->hba->task_ctx[task_idx];
+ task = &(task_page[index]);
+ orig_xid = io_req->xid;
+
+ BNX2FC_IO_DBG(io_req, "CLEANUP io_req xid = 0x%x\n", xid);
+
+ bnx2fc_init_cleanup_task(cleanup_io_req, task, orig_xid);
+
+ /* Obtain free SQ entry */
+ bnx2fc_add_2_sq(tgt, xid);
+
+ /* Ring doorbell */
+ bnx2fc_ring_doorbell(tgt);
+
+cleanup_err:
+ return rc;
+}
+
+/**
+ * bnx2fc_eh_target_reset: Reset a target
+ *
+ * @sc_cmd: SCSI command
+ *
+ * Set from SCSI host template to send task mgmt command to the target
+ * and wait for the response
+ */
+int bnx2fc_eh_target_reset(struct scsi_cmnd *sc_cmd)
+{
+ return bnx2fc_initiate_tmf(sc_cmd, FCP_TMF_TGT_RESET);
+}
+
+/**
+ * bnx2fc_eh_device_reset - Reset a single LUN
+ *
+ * @sc_cmd: SCSI command
+ *
+ * Set from SCSI host template to send task mgmt command to the target
+ * and wait for the response
+ */
+int bnx2fc_eh_device_reset(struct scsi_cmnd *sc_cmd)
+{
+ return bnx2fc_initiate_tmf(sc_cmd, FCP_TMF_LUN_RESET);
+}
+
+int bnx2fc_expl_logo(struct fc_lport *lport, struct bnx2fc_cmd *io_req)
+{
+ struct bnx2fc_rport *tgt = io_req->tgt;
+ struct fc_rport_priv *rdata = tgt->rdata;
+ int logo_issued;
+ int rc = SUCCESS;
+ int wait_cnt = 0;
+
+ BNX2FC_IO_DBG(io_req, "Expl logo - tgt flags = 0x%lx\n",
+ tgt->flags);
+ logo_issued = test_and_set_bit(BNX2FC_FLAG_EXPL_LOGO,
+ &tgt->flags);
+ io_req->wait_for_comp = 1;
+ bnx2fc_initiate_cleanup(io_req);
+
+ spin_unlock_bh(&tgt->tgt_lock);
+
+ wait_for_completion(&io_req->tm_done);
+
+ io_req->wait_for_comp = 0;
+ /*
+ * release the reference taken in eh_abort to allow the
+ * target to re-login after flushing IOs
+ */
+ kref_put(&io_req->refcount, bnx2fc_cmd_release);
+
+ if (!logo_issued) {
+ clear_bit(BNX2FC_FLAG_SESSION_READY, &tgt->flags);
+ mutex_lock(&lport->disc.disc_mutex);
+ lport->tt.rport_logoff(rdata);
+ mutex_unlock(&lport->disc.disc_mutex);
+ do {
+ msleep(BNX2FC_RELOGIN_WAIT_TIME);
+ if (wait_cnt++ > BNX2FC_RELOGIN_WAIT_CNT) {
+ rc = FAILED;
+ break;
+ }
+ } while (!test_bit(BNX2FC_FLAG_SESSION_READY, &tgt->flags));
+ }
+ spin_lock_bh(&tgt->tgt_lock);
+ return rc;
+}
+/**
+ * bnx2fc_eh_abort - eh_abort_handler api to abort an outstanding
+ * SCSI command
+ *
+ * @sc_cmd: SCSI_ML command pointer
+ *
+ * SCSI abort request handler
+ */
+int bnx2fc_eh_abort(struct scsi_cmnd *sc_cmd)
+{
+ struct fc_rport *rport = starget_to_rport(scsi_target(sc_cmd->device));
+ struct fc_rport_libfc_priv *rp = rport->dd_data;
+ struct bnx2fc_cmd *io_req;
+ struct fc_lport *lport;
+ struct bnx2fc_rport *tgt;
+ int rc = FAILED;
+
+
+ rc = fc_block_scsi_eh(sc_cmd);
+ if (rc)
+ return rc;
+
+ lport = shost_priv(sc_cmd->device->host);
+ if ((lport->state != LPORT_ST_READY) || !(lport->link_up)) {
+ printk(KERN_ERR PFX "eh_abort: link not ready\n");
+ return rc;
+ }
+
+ tgt = (struct bnx2fc_rport *)&rp[1];
+
+ BNX2FC_TGT_DBG(tgt, "Entered bnx2fc_eh_abort\n");
+
+ spin_lock_bh(&tgt->tgt_lock);
+ io_req = (struct bnx2fc_cmd *)sc_cmd->SCp.ptr;
+ if (!io_req) {
+ /* Command might have just completed */
+ printk(KERN_ERR PFX "eh_abort: io_req is NULL\n");
+ spin_unlock_bh(&tgt->tgt_lock);
+ return SUCCESS;
+ }
+ BNX2FC_IO_DBG(io_req, "eh_abort - refcnt = %d\n",
+ io_req->refcount.refcount.counter);
+
+ /* Hold IO request across abort processing */
+ kref_get(&io_req->refcount);
+
+ BUG_ON(tgt != io_req->tgt);
+
+ /* Remove the io_req from the active_q. */
+ /*
+ * Task Mgmt functions (LUN RESET & TGT RESET) will not
+ * issue an ABTS on this particular IO req, as the
+ * io_req is no longer in the active_q.
+ */
+ if (tgt->flush_in_prog) {
+ printk(KERN_ERR PFX "eh_abort: io_req (xid = 0x%x) "
+ "flush in progress\n", io_req->xid);
+ kref_put(&io_req->refcount, bnx2fc_cmd_release);
+ spin_unlock_bh(&tgt->tgt_lock);
+ return SUCCESS;
+ }
+
+ if (io_req->on_active_queue == 0) {
+ printk(KERN_ERR PFX "eh_abort: io_req (xid = 0x%x) "
+ "not on active_q\n", io_req->xid);
+ /*
+ * This condition can happen only due to the FW bug,
+ * where we do not receive cleanup response from
+ * the FW. Handle this case gracefully by erroring
+ * back the IO request to SCSI-ml
+ */
+ bnx2fc_scsi_done(io_req, DID_ABORT);
+
+ kref_put(&io_req->refcount, bnx2fc_cmd_release);
+ spin_unlock_bh(&tgt->tgt_lock);
+ return SUCCESS;
+ }
+
+ /*
+ * Only eh_abort processing will remove the IO from
+ * active_cmd_q before processing the request. this is
+ * done to avoid race conditions between IOs aborted
+ * as part of task management completion and eh_abort
+ * processing
+ */
+ list_del_init(&io_req->link);
+ io_req->on_active_queue = 0;
+ /* Move IO req to retire queue */
+ list_add_tail(&io_req->link, &tgt->io_retire_queue);
+
+ init_completion(&io_req->tm_done);
+
+ if (test_and_set_bit(BNX2FC_FLAG_ISSUE_ABTS, &io_req->req_flags)) {
+ printk(KERN_ERR PFX "eh_abort: io_req (xid = 0x%x) "
+ "already in abts processing\n", io_req->xid);
+ if (cancel_delayed_work(&io_req->timeout_work))
+ kref_put(&io_req->refcount,
+ bnx2fc_cmd_release); /* drop timer hold */
+ rc = bnx2fc_expl_logo(lport, io_req);
+ /* This only occurs when an task abort was requested while ABTS
+ is in progress. Setting the IO_CLEANUP flag will skip the
+ RRQ process in the case when the fw generated SCSI_CMD cmpl
+ was a result from the ABTS request rather than the CLEANUP
+ request */
+ set_bit(BNX2FC_FLAG_IO_CLEANUP, &io_req->req_flags);
+ goto out;
+ }
+
+ /* Cancel the current timer running on this io_req */
+ if (cancel_delayed_work(&io_req->timeout_work))
+ kref_put(&io_req->refcount,
+ bnx2fc_cmd_release); /* drop timer hold */
+ set_bit(BNX2FC_FLAG_EH_ABORT, &io_req->req_flags);
+ io_req->wait_for_comp = 1;
+ rc = bnx2fc_initiate_abts(io_req);
+ if (rc == FAILED) {
+ bnx2fc_initiate_cleanup(io_req);
+ spin_unlock_bh(&tgt->tgt_lock);
+ wait_for_completion(&io_req->tm_done);
+ spin_lock_bh(&tgt->tgt_lock);
+ io_req->wait_for_comp = 0;
+ goto done;
+ }
+ spin_unlock_bh(&tgt->tgt_lock);
+
+ wait_for_completion(&io_req->tm_done);
+
+ spin_lock_bh(&tgt->tgt_lock);
+ io_req->wait_for_comp = 0;
+ if (test_bit(BNX2FC_FLAG_IO_COMPL, &io_req->req_flags)) {
+ BNX2FC_IO_DBG(io_req, "IO completed in a different context\n");
+ rc = SUCCESS;
+ } else if (!(test_and_set_bit(BNX2FC_FLAG_ABTS_DONE,
+ &io_req->req_flags))) {
+ /* Let the scsi-ml try to recover this command */
+ printk(KERN_ERR PFX "abort failed, xid = 0x%x\n",
+ io_req->xid);
+ rc = bnx2fc_expl_logo(lport, io_req);
+ goto out;
+ } else {
+ /*
+ * We come here even when there was a race condition
+ * between timeout and abts completion, and abts
+ * completion happens just in time.
+ */
+ BNX2FC_IO_DBG(io_req, "abort succeeded\n");
+ rc = SUCCESS;
+ bnx2fc_scsi_done(io_req, DID_ABORT);
+ kref_put(&io_req->refcount, bnx2fc_cmd_release);
+ }
+done:
+ /* release the reference taken in eh_abort */
+ kref_put(&io_req->refcount, bnx2fc_cmd_release);
+out:
+ spin_unlock_bh(&tgt->tgt_lock);
+ return rc;
+}
+
+void bnx2fc_process_seq_cleanup_compl(struct bnx2fc_cmd *seq_clnp_req,
+ struct fcoe_task_ctx_entry *task,
+ u8 rx_state)
+{
+ struct bnx2fc_els_cb_arg *cb_arg = seq_clnp_req->cb_arg;
+ struct bnx2fc_cmd *orig_io_req = cb_arg->aborted_io_req;
+ u32 offset = cb_arg->offset;
+ enum fc_rctl r_ctl = cb_arg->r_ctl;
+ int rc = 0;
+ struct bnx2fc_rport *tgt = orig_io_req->tgt;
+
+ BNX2FC_IO_DBG(orig_io_req, "Entered process_cleanup_compl xid = 0x%x"
+ "cmd_type = %d\n",
+ seq_clnp_req->xid, seq_clnp_req->cmd_type);
+
+ if (rx_state == FCOE_TASK_RX_STATE_IGNORED_SEQUENCE_CLEANUP) {
+ printk(KERN_ERR PFX "seq cleanup ignored - xid = 0x%x\n",
+ seq_clnp_req->xid);
+ goto free_cb_arg;
+ }
+
+ spin_unlock_bh(&tgt->tgt_lock);
+ rc = bnx2fc_send_srr(orig_io_req, offset, r_ctl);
+ spin_lock_bh(&tgt->tgt_lock);
+
+ if (rc)
+ printk(KERN_ERR PFX "clnup_compl: Unable to send SRR"
+ " IO will abort\n");
+ seq_clnp_req->cb_arg = NULL;
+ kref_put(&orig_io_req->refcount, bnx2fc_cmd_release);
+free_cb_arg:
+ kfree(cb_arg);
+ return;
+}
+
+void bnx2fc_process_cleanup_compl(struct bnx2fc_cmd *io_req,
+ struct fcoe_task_ctx_entry *task,
+ u8 num_rq)
+{
+ BNX2FC_IO_DBG(io_req, "Entered process_cleanup_compl "
+ "refcnt = %d, cmd_type = %d\n",
+ io_req->refcount.refcount.counter, io_req->cmd_type);
+ bnx2fc_scsi_done(io_req, DID_ERROR);
+ kref_put(&io_req->refcount, bnx2fc_cmd_release);
+ if (io_req->wait_for_comp)
+ complete(&io_req->tm_done);
+}
+
+void bnx2fc_process_abts_compl(struct bnx2fc_cmd *io_req,
+ struct fcoe_task_ctx_entry *task,
+ u8 num_rq)
+{
+ u32 r_ctl;
+ u32 r_a_tov = FC_DEF_R_A_TOV;
+ u8 issue_rrq = 0;
+ struct bnx2fc_rport *tgt = io_req->tgt;
+
+ BNX2FC_IO_DBG(io_req, "Entered process_abts_compl xid = 0x%x"
+ "refcnt = %d, cmd_type = %d\n",
+ io_req->xid,
+ io_req->refcount.refcount.counter, io_req->cmd_type);
+
+ if (test_and_set_bit(BNX2FC_FLAG_ABTS_DONE,
+ &io_req->req_flags)) {
+ BNX2FC_IO_DBG(io_req, "Timer context finished processing"
+ " this io\n");
+ return;
+ }
+
+ /* Do not issue RRQ as this IO is already cleanedup */
+ if (test_and_set_bit(BNX2FC_FLAG_IO_CLEANUP,
+ &io_req->req_flags))
+ goto io_compl;
+
+ /*
+ * For ABTS issued due to SCSI eh_abort_handler, timeout
+ * values are maintained by scsi-ml itself. Cancel timeout
+ * in case ABTS issued as part of task management function
+ * or due to FW error.
+ */
+ if (test_bit(BNX2FC_FLAG_ISSUE_ABTS, &io_req->req_flags))
+ if (cancel_delayed_work(&io_req->timeout_work))
+ kref_put(&io_req->refcount,
+ bnx2fc_cmd_release); /* drop timer hold */
+
+ r_ctl = (u8)task->rxwr_only.union_ctx.comp_info.abts_rsp.r_ctl;
+
+ switch (r_ctl) {
+ case FC_RCTL_BA_ACC:
+ /*
+ * Dont release this cmd yet. It will be relesed
+ * after we get RRQ response
+ */
+ BNX2FC_IO_DBG(io_req, "ABTS response - ACC Send RRQ\n");
+ issue_rrq = 1;
+ break;
+
+ case FC_RCTL_BA_RJT:
+ BNX2FC_IO_DBG(io_req, "ABTS response - RJT\n");
+ break;
+ default:
+ printk(KERN_ERR PFX "Unknown ABTS response\n");
+ break;
+ }
+
+ if (issue_rrq) {
+ BNX2FC_IO_DBG(io_req, "Issue RRQ after R_A_TOV\n");
+ set_bit(BNX2FC_FLAG_ISSUE_RRQ, &io_req->req_flags);
+ }
+ set_bit(BNX2FC_FLAG_RETIRE_OXID, &io_req->req_flags);
+ bnx2fc_cmd_timer_set(io_req, r_a_tov);
+
+io_compl:
+ if (io_req->wait_for_comp) {
+ if (test_and_clear_bit(BNX2FC_FLAG_EH_ABORT,
+ &io_req->req_flags))
+ complete(&io_req->tm_done);
+ } else {
+ /*
+ * We end up here when ABTS is issued as
+ * in asynchronous context, i.e., as part
+ * of task management completion, or
+ * when FW error is received or when the
+ * ABTS is issued when the IO is timed
+ * out.
+ */
+
+ if (io_req->on_active_queue) {
+ list_del_init(&io_req->link);
+ io_req->on_active_queue = 0;
+ /* Move IO req to retire queue */
+ list_add_tail(&io_req->link, &tgt->io_retire_queue);
+ }
+ bnx2fc_scsi_done(io_req, DID_ERROR);
+ kref_put(&io_req->refcount, bnx2fc_cmd_release);
+ }
+}
+
+static void bnx2fc_lun_reset_cmpl(struct bnx2fc_cmd *io_req)
+{
+ struct scsi_cmnd *sc_cmd = io_req->sc_cmd;
+ struct bnx2fc_rport *tgt = io_req->tgt;
+ struct bnx2fc_cmd *cmd, *tmp;
+ u64 tm_lun = sc_cmd->device->lun;
+ u64 lun;
+ int rc = 0;
+
+ /* called with tgt_lock held */
+ BNX2FC_IO_DBG(io_req, "Entered bnx2fc_lun_reset_cmpl\n");
+ /*
+ * Walk thru the active_ios queue and ABORT the IO
+ * that matches with the LUN that was reset
+ */
+ list_for_each_entry_safe(cmd, tmp, &tgt->active_cmd_queue, link) {
+ BNX2FC_TGT_DBG(tgt, "LUN RST cmpl: scan for pending IOs\n");
+ lun = cmd->sc_cmd->device->lun;
+ if (lun == tm_lun) {
+ /* Initiate ABTS on this cmd */
+ if (!test_and_set_bit(BNX2FC_FLAG_ISSUE_ABTS,
+ &cmd->req_flags)) {
+ /* cancel the IO timeout */
+ if (cancel_delayed_work(&io_req->timeout_work))
+ kref_put(&io_req->refcount,
+ bnx2fc_cmd_release);
+ /* timer hold */
+ rc = bnx2fc_initiate_abts(cmd);
+ /* abts shouldn't fail in this context */
+ WARN_ON(rc != SUCCESS);
+ } else
+ printk(KERN_ERR PFX "lun_rst: abts already in"
+ " progress for this IO 0x%x\n",
+ cmd->xid);
+ }
+ }
+}
+
+static void bnx2fc_tgt_reset_cmpl(struct bnx2fc_cmd *io_req)
+{
+ struct bnx2fc_rport *tgt = io_req->tgt;
+ struct bnx2fc_cmd *cmd, *tmp;
+ int rc = 0;
+
+ /* called with tgt_lock held */
+ BNX2FC_IO_DBG(io_req, "Entered bnx2fc_tgt_reset_cmpl\n");
+ /*
+ * Walk thru the active_ios queue and ABORT the IO
+ * that matches with the LUN that was reset
+ */
+ list_for_each_entry_safe(cmd, tmp, &tgt->active_cmd_queue, link) {
+ BNX2FC_TGT_DBG(tgt, "TGT RST cmpl: scan for pending IOs\n");
+ /* Initiate ABTS */
+ if (!test_and_set_bit(BNX2FC_FLAG_ISSUE_ABTS,
+ &cmd->req_flags)) {
+ /* cancel the IO timeout */
+ if (cancel_delayed_work(&io_req->timeout_work))
+ kref_put(&io_req->refcount,
+ bnx2fc_cmd_release); /* timer hold */
+ rc = bnx2fc_initiate_abts(cmd);
+ /* abts shouldn't fail in this context */
+ WARN_ON(rc != SUCCESS);
+
+ } else
+ printk(KERN_ERR PFX "tgt_rst: abts already in progress"
+ " for this IO 0x%x\n", cmd->xid);
+ }
+}
+
+void bnx2fc_process_tm_compl(struct bnx2fc_cmd *io_req,
+ struct fcoe_task_ctx_entry *task, u8 num_rq)
+{
+ struct bnx2fc_mp_req *tm_req;
+ struct fc_frame_header *fc_hdr;
+ struct scsi_cmnd *sc_cmd = io_req->sc_cmd;
+ u64 *hdr;
+ u64 *temp_hdr;
+ void *rsp_buf;
+
+ /* Called with tgt_lock held */
+ BNX2FC_IO_DBG(io_req, "Entered process_tm_compl\n");
+
+ if (!(test_bit(BNX2FC_FLAG_TM_TIMEOUT, &io_req->req_flags)))
+ set_bit(BNX2FC_FLAG_TM_COMPL, &io_req->req_flags);
+ else {
+ /* TM has already timed out and we got
+ * delayed completion. Ignore completion
+ * processing.
+ */
+ return;
+ }
+
+ tm_req = &(io_req->mp_req);
+ fc_hdr = &(tm_req->resp_fc_hdr);
+ hdr = (u64 *)fc_hdr;
+ temp_hdr = (u64 *)
+ &task->rxwr_only.union_ctx.comp_info.mp_rsp.fc_hdr;
+ hdr[0] = cpu_to_be64(temp_hdr[0]);
+ hdr[1] = cpu_to_be64(temp_hdr[1]);
+ hdr[2] = cpu_to_be64(temp_hdr[2]);
+
+ tm_req->resp_len =
+ task->rxwr_only.union_ctx.comp_info.mp_rsp.mp_payload_len;
+
+ rsp_buf = tm_req->resp_buf;
+
+ if (fc_hdr->fh_r_ctl == FC_RCTL_DD_CMD_STATUS) {
+ bnx2fc_parse_fcp_rsp(io_req,
+ (struct fcoe_fcp_rsp_payload *)
+ rsp_buf, num_rq);
+ if (io_req->fcp_rsp_code == 0) {
+ /* TM successful */
+ if (tm_req->tm_flags & FCP_TMF_LUN_RESET)
+ bnx2fc_lun_reset_cmpl(io_req);
+ else if (tm_req->tm_flags & FCP_TMF_TGT_RESET)
+ bnx2fc_tgt_reset_cmpl(io_req);
+ }
+ } else {
+ printk(KERN_ERR PFX "tmf's fc_hdr r_ctl = 0x%x\n",
+ fc_hdr->fh_r_ctl);
+ }
+ if (!sc_cmd->SCp.ptr) {
+ printk(KERN_ERR PFX "tm_compl: SCp.ptr is NULL\n");
+ return;
+ }
+ switch (io_req->fcp_status) {
+ case FC_GOOD:
+ if (io_req->cdb_status == 0) {
+ /* Good IO completion */
+ sc_cmd->result = DID_OK << 16;
+ } else {
+ /* Transport status is good, SCSI status not good */
+ sc_cmd->result = (DID_OK << 16) | io_req->cdb_status;
+ }
+ if (io_req->fcp_resid)
+ scsi_set_resid(sc_cmd, io_req->fcp_resid);
+ break;
+
+ default:
+ BNX2FC_IO_DBG(io_req, "process_tm_compl: fcp_status = %d\n",
+ io_req->fcp_status);
+ break;
+ }
+
+ sc_cmd = io_req->sc_cmd;
+ io_req->sc_cmd = NULL;
+
+ /* check if the io_req exists in tgt's tmf_q */
+ if (io_req->on_tmf_queue) {
+
+ list_del_init(&io_req->link);
+ io_req->on_tmf_queue = 0;
+ } else {
+
+ printk(KERN_ERR PFX "Command not on active_cmd_queue!\n");
+ return;
+ }
+
+ sc_cmd->SCp.ptr = NULL;
+ sc_cmd->scsi_done(sc_cmd);
+
+ kref_put(&io_req->refcount, bnx2fc_cmd_release);
+ if (io_req->wait_for_comp) {
+ BNX2FC_IO_DBG(io_req, "tm_compl - wake up the waiter\n");
+ complete(&io_req->tm_done);
+ }
+}
+
+static int bnx2fc_split_bd(struct bnx2fc_cmd *io_req, u64 addr, int sg_len,
+ int bd_index)
+{
+ struct fcoe_bd_ctx *bd = io_req->bd_tbl->bd_tbl;
+ int frag_size, sg_frags;
+
+ sg_frags = 0;
+ while (sg_len) {
+ if (sg_len >= BNX2FC_BD_SPLIT_SZ)
+ frag_size = BNX2FC_BD_SPLIT_SZ;
+ else
+ frag_size = sg_len;
+ bd[bd_index + sg_frags].buf_addr_lo = addr & 0xffffffff;
+ bd[bd_index + sg_frags].buf_addr_hi = addr >> 32;
+ bd[bd_index + sg_frags].buf_len = (u16)frag_size;
+ bd[bd_index + sg_frags].flags = 0;
+
+ addr += (u64) frag_size;
+ sg_frags++;
+ sg_len -= frag_size;
+ }
+ return sg_frags;
+
+}
+
+static int bnx2fc_map_sg(struct bnx2fc_cmd *io_req)
+{
+ struct bnx2fc_interface *interface = io_req->port->priv;
+ struct bnx2fc_hba *hba = interface->hba;
+ struct scsi_cmnd *sc = io_req->sc_cmd;
+ struct fcoe_bd_ctx *bd = io_req->bd_tbl->bd_tbl;
+ struct scatterlist *sg;
+ int byte_count = 0;
+ int sg_count = 0;
+ int bd_count = 0;
+ int sg_frags;
+ unsigned int sg_len;
+ u64 addr;
+ int i;
+
+ /*
+ * Use dma_map_sg directly to ensure we're using the correct
+ * dev struct off of pcidev.
+ */
+ sg_count = dma_map_sg(&hba->pcidev->dev, scsi_sglist(sc),
+ scsi_sg_count(sc), sc->sc_data_direction);
+ scsi_for_each_sg(sc, sg, sg_count, i) {
+ sg_len = sg_dma_len(sg);
+ addr = sg_dma_address(sg);
+ if (sg_len > BNX2FC_MAX_BD_LEN) {
+ sg_frags = bnx2fc_split_bd(io_req, addr, sg_len,
+ bd_count);
+ } else {
+
+ sg_frags = 1;
+ bd[bd_count].buf_addr_lo = addr & 0xffffffff;
+ bd[bd_count].buf_addr_hi = addr >> 32;
+ bd[bd_count].buf_len = (u16)sg_len;
+ bd[bd_count].flags = 0;
+ }
+ bd_count += sg_frags;
+ byte_count += sg_len;
+ }
+ if (byte_count != scsi_bufflen(sc))
+ printk(KERN_ERR PFX "byte_count = %d != scsi_bufflen = %d, "
+ "task_id = 0x%x\n", byte_count, scsi_bufflen(sc),
+ io_req->xid);
+ return bd_count;
+}
+
+static int bnx2fc_build_bd_list_from_sg(struct bnx2fc_cmd *io_req)
+{
+ struct scsi_cmnd *sc = io_req->sc_cmd;
+ struct fcoe_bd_ctx *bd = io_req->bd_tbl->bd_tbl;
+ int bd_count;
+
+ if (scsi_sg_count(sc)) {
+ bd_count = bnx2fc_map_sg(io_req);
+ if (bd_count == 0)
+ return -ENOMEM;
+ } else {
+ bd_count = 0;
+ bd[0].buf_addr_lo = bd[0].buf_addr_hi = 0;
+ bd[0].buf_len = bd[0].flags = 0;
+ }
+ io_req->bd_tbl->bd_valid = bd_count;
+
+ return 0;
+}
+
+static void bnx2fc_unmap_sg_list(struct bnx2fc_cmd *io_req)
+{
+ struct scsi_cmnd *sc = io_req->sc_cmd;
+ struct bnx2fc_interface *interface = io_req->port->priv;
+ struct bnx2fc_hba *hba = interface->hba;
+
+ /*
+ * Use dma_unmap_sg directly to ensure we're using the correct
+ * dev struct off of pcidev.
+ */
+ if (io_req->bd_tbl->bd_valid && sc && scsi_sg_count(sc)) {
+ dma_unmap_sg(&hba->pcidev->dev, scsi_sglist(sc),
+ scsi_sg_count(sc), sc->sc_data_direction);
+ io_req->bd_tbl->bd_valid = 0;
+ }
+}
+
+void bnx2fc_build_fcp_cmnd(struct bnx2fc_cmd *io_req,
+ struct fcp_cmnd *fcp_cmnd)
+{
+ struct scsi_cmnd *sc_cmd = io_req->sc_cmd;
+
+ memset(fcp_cmnd, 0, sizeof(struct fcp_cmnd));
+
+ int_to_scsilun(sc_cmd->device->lun, &fcp_cmnd->fc_lun);
+
+ fcp_cmnd->fc_dl = htonl(io_req->data_xfer_len);
+ memcpy(fcp_cmnd->fc_cdb, sc_cmd->cmnd, sc_cmd->cmd_len);
+
+ fcp_cmnd->fc_cmdref = 0;
+ fcp_cmnd->fc_pri_ta = 0;
+ fcp_cmnd->fc_tm_flags = io_req->mp_req.tm_flags;
+ fcp_cmnd->fc_flags = io_req->io_req_flags;
+ fcp_cmnd->fc_pri_ta = FCP_PTA_SIMPLE;
+}
+
+static void bnx2fc_parse_fcp_rsp(struct bnx2fc_cmd *io_req,
+ struct fcoe_fcp_rsp_payload *fcp_rsp,
+ u8 num_rq)
+{
+ struct scsi_cmnd *sc_cmd = io_req->sc_cmd;
+ struct bnx2fc_rport *tgt = io_req->tgt;
+ u8 rsp_flags = fcp_rsp->fcp_flags.flags;
+ u32 rq_buff_len = 0;
+ int i;
+ unsigned char *rq_data;
+ unsigned char *dummy;
+ int fcp_sns_len = 0;
+ int fcp_rsp_len = 0;
+
+ io_req->fcp_status = FC_GOOD;
+ io_req->fcp_resid = fcp_rsp->fcp_resid;
+
+ io_req->scsi_comp_flags = rsp_flags;
+ CMD_SCSI_STATUS(sc_cmd) = io_req->cdb_status =
+ fcp_rsp->scsi_status_code;
+
+ /* Fetch fcp_rsp_info and fcp_sns_info if available */
+ if (num_rq) {
+
+ /*
+ * We do not anticipate num_rq >1, as the linux defined
+ * SCSI_SENSE_BUFFERSIZE is 96 bytes + 8 bytes of FCP_RSP_INFO
+ * 256 bytes of single rq buffer is good enough to hold this.
+ */
+
+ if (rsp_flags &
+ FCOE_FCP_RSP_FLAGS_FCP_RSP_LEN_VALID) {
+ fcp_rsp_len = rq_buff_len
+ = fcp_rsp->fcp_rsp_len;
+ }
+
+ if (rsp_flags &
+ FCOE_FCP_RSP_FLAGS_FCP_SNS_LEN_VALID) {
+ fcp_sns_len = fcp_rsp->fcp_sns_len;
+ rq_buff_len += fcp_rsp->fcp_sns_len;
+ }
+
+ io_req->fcp_rsp_len = fcp_rsp_len;
+ io_req->fcp_sns_len = fcp_sns_len;
+
+ if (rq_buff_len > num_rq * BNX2FC_RQ_BUF_SZ) {
+ /* Invalid sense sense length. */
+ printk(KERN_ERR PFX "invalid sns length %d\n",
+ rq_buff_len);
+ /* reset rq_buff_len */
+ rq_buff_len = num_rq * BNX2FC_RQ_BUF_SZ;
+ }
+
+ rq_data = bnx2fc_get_next_rqe(tgt, 1);
+
+ if (num_rq > 1) {
+ /* We do not need extra sense data */
+ for (i = 1; i < num_rq; i++)
+ dummy = bnx2fc_get_next_rqe(tgt, 1);
+ }
+
+ /* fetch fcp_rsp_code */
+ if ((fcp_rsp_len == 4) || (fcp_rsp_len == 8)) {
+ /* Only for task management function */
+ io_req->fcp_rsp_code = rq_data[3];
+ printk(KERN_ERR PFX "fcp_rsp_code = %d\n",
+ io_req->fcp_rsp_code);
+ }
+
+ /* fetch sense data */
+ rq_data += fcp_rsp_len;
+
+ if (fcp_sns_len > SCSI_SENSE_BUFFERSIZE) {
+ printk(KERN_ERR PFX "Truncating sense buffer\n");
+ fcp_sns_len = SCSI_SENSE_BUFFERSIZE;
+ }
+
+ memset(sc_cmd->sense_buffer, 0, SCSI_SENSE_BUFFERSIZE);
+ if (fcp_sns_len)
+ memcpy(sc_cmd->sense_buffer, rq_data, fcp_sns_len);
+
+ /* return RQ entries */
+ for (i = 0; i < num_rq; i++)
+ bnx2fc_return_rqe(tgt, 1);
+ }
+}
+
+/**
+ * bnx2fc_queuecommand - Queuecommand function of the scsi template
+ *
+ * @host: The Scsi_Host the command was issued to
+ * @sc_cmd: struct scsi_cmnd to be executed
+ *
+ * This is the IO strategy routine, called by SCSI-ML
+ **/
+int bnx2fc_queuecommand(struct Scsi_Host *host,
+ struct scsi_cmnd *sc_cmd)
+{
+ struct fc_lport *lport = shost_priv(host);
+ struct fc_rport *rport = starget_to_rport(scsi_target(sc_cmd->device));
+ struct fc_rport_libfc_priv *rp = rport->dd_data;
+ struct bnx2fc_rport *tgt;
+ struct bnx2fc_cmd *io_req;
+ int rc = 0;
+ int rval;
+
+ rval = fc_remote_port_chkready(rport);
+ if (rval) {
+ sc_cmd->result = rval;
+ sc_cmd->scsi_done(sc_cmd);
+ return 0;
+ }
+
+ if ((lport->state != LPORT_ST_READY) || !(lport->link_up)) {
+ rc = SCSI_MLQUEUE_HOST_BUSY;
+ goto exit_qcmd;
+ }
+
+ /* rport and tgt are allocated together, so tgt should be non-NULL */
+ tgt = (struct bnx2fc_rport *)&rp[1];
+
+ if (!test_bit(BNX2FC_FLAG_SESSION_READY, &tgt->flags)) {
+ /*
+ * Session is not offloaded yet. Let SCSI-ml retry
+ * the command.
+ */
+ rc = SCSI_MLQUEUE_TARGET_BUSY;
+ goto exit_qcmd;
+ }
+ if (tgt->retry_delay_timestamp) {
+ if (time_after(jiffies, tgt->retry_delay_timestamp)) {
+ tgt->retry_delay_timestamp = 0;
+ } else {
+ /* If retry_delay timer is active, flow off the ML */
+ rc = SCSI_MLQUEUE_TARGET_BUSY;
+ goto exit_qcmd;
+ }
+ }
+
+ spin_lock_bh(&tgt->tgt_lock);
+
+ io_req = bnx2fc_cmd_alloc(tgt);
+ if (!io_req) {
+ rc = SCSI_MLQUEUE_HOST_BUSY;
+ goto exit_qcmd_tgtlock;
+ }
+ io_req->sc_cmd = sc_cmd;
+
+ if (bnx2fc_post_io_req(tgt, io_req)) {
+ printk(KERN_ERR PFX "Unable to post io_req\n");
+ rc = SCSI_MLQUEUE_HOST_BUSY;
+ goto exit_qcmd_tgtlock;
+ }
+
+exit_qcmd_tgtlock:
+ spin_unlock_bh(&tgt->tgt_lock);
+exit_qcmd:
+ return rc;
+}
+
+void bnx2fc_process_scsi_cmd_compl(struct bnx2fc_cmd *io_req,
+ struct fcoe_task_ctx_entry *task,
+ u8 num_rq)
+{
+ struct fcoe_fcp_rsp_payload *fcp_rsp;
+ struct bnx2fc_rport *tgt = io_req->tgt;
+ struct scsi_cmnd *sc_cmd;
+ struct Scsi_Host *host;
+
+
+ /* scsi_cmd_cmpl is called with tgt lock held */
+
+ if (test_and_set_bit(BNX2FC_FLAG_IO_COMPL, &io_req->req_flags)) {
+ /* we will not receive ABTS response for this IO */
+ BNX2FC_IO_DBG(io_req, "Timer context finished processing "
+ "this scsi cmd\n");
+ }
+
+ /* Cancel the timeout_work, as we received IO completion */
+ if (cancel_delayed_work(&io_req->timeout_work))
+ kref_put(&io_req->refcount,
+ bnx2fc_cmd_release); /* drop timer hold */
+
+ sc_cmd = io_req->sc_cmd;
+ if (sc_cmd == NULL) {
+ printk(KERN_ERR PFX "scsi_cmd_compl - sc_cmd is NULL\n");
+ return;
+ }
+
+ /* Fetch fcp_rsp from task context and perform cmd completion */
+ fcp_rsp = (struct fcoe_fcp_rsp_payload *)
+ &(task->rxwr_only.union_ctx.comp_info.fcp_rsp.payload);
+
+ /* parse fcp_rsp and obtain sense data from RQ if available */
+ bnx2fc_parse_fcp_rsp(io_req, fcp_rsp, num_rq);
+
+ host = sc_cmd->device->host;
+ if (!sc_cmd->SCp.ptr) {
+ printk(KERN_ERR PFX "SCp.ptr is NULL\n");
+ return;
+ }
+
+ if (io_req->on_active_queue) {
+ list_del_init(&io_req->link);
+ io_req->on_active_queue = 0;
+ /* Move IO req to retire queue */
+ list_add_tail(&io_req->link, &tgt->io_retire_queue);
+ } else {
+ /* This should not happen, but could have been pulled
+ * by bnx2fc_flush_active_ios(), or during a race
+ * between command abort and (late) completion.
+ */
+ BNX2FC_IO_DBG(io_req, "xid not on active_cmd_queue\n");
+ if (io_req->wait_for_comp)
+ if (test_and_clear_bit(BNX2FC_FLAG_EH_ABORT,
+ &io_req->req_flags))
+ complete(&io_req->tm_done);
+ }
+
+ bnx2fc_unmap_sg_list(io_req);
+ io_req->sc_cmd = NULL;
+
+ switch (io_req->fcp_status) {
+ case FC_GOOD:
+ if (io_req->cdb_status == 0) {
+ /* Good IO completion */
+ sc_cmd->result = DID_OK << 16;
+ } else {
+ /* Transport status is good, SCSI status not good */
+ BNX2FC_IO_DBG(io_req, "scsi_cmpl: cdb_status = %d"
+ " fcp_resid = 0x%x\n",
+ io_req->cdb_status, io_req->fcp_resid);
+ sc_cmd->result = (DID_OK << 16) | io_req->cdb_status;
+
+ if (io_req->cdb_status == SAM_STAT_TASK_SET_FULL ||
+ io_req->cdb_status == SAM_STAT_BUSY) {
+ /* Set the jiffies + retry_delay_timer * 100ms
+ for the rport/tgt */
+ tgt->retry_delay_timestamp = jiffies +
+ fcp_rsp->retry_delay_timer * HZ / 10;
+ }
+
+ }
+ if (io_req->fcp_resid)
+ scsi_set_resid(sc_cmd, io_req->fcp_resid);
+ break;
+ default:
+ printk(KERN_ERR PFX "scsi_cmd_compl: fcp_status = %d\n",
+ io_req->fcp_status);
+ break;
+ }
+ sc_cmd->SCp.ptr = NULL;
+ sc_cmd->scsi_done(sc_cmd);
+ kref_put(&io_req->refcount, bnx2fc_cmd_release);
+}
+
+int bnx2fc_post_io_req(struct bnx2fc_rport *tgt,
+ struct bnx2fc_cmd *io_req)
+{
+ struct fcoe_task_ctx_entry *task;
+ struct fcoe_task_ctx_entry *task_page;
+ struct scsi_cmnd *sc_cmd = io_req->sc_cmd;
+ struct fcoe_port *port = tgt->port;
+ struct bnx2fc_interface *interface = port->priv;
+ struct bnx2fc_hba *hba = interface->hba;
+ struct fc_lport *lport = port->lport;
+ struct fc_stats *stats;
+ int task_idx, index;
+ u16 xid;
+
+ /* bnx2fc_post_io_req() is called with the tgt_lock held */
+
+ /* Initialize rest of io_req fields */
+ io_req->cmd_type = BNX2FC_SCSI_CMD;
+ io_req->port = port;
+ io_req->tgt = tgt;
+ io_req->data_xfer_len = scsi_bufflen(sc_cmd);
+ sc_cmd->SCp.ptr = (char *)io_req;
+
+ stats = per_cpu_ptr(lport->stats, get_cpu());
+ if (sc_cmd->sc_data_direction == DMA_FROM_DEVICE) {
+ io_req->io_req_flags = BNX2FC_READ;
+ stats->InputRequests++;
+ stats->InputBytes += io_req->data_xfer_len;
+ } else if (sc_cmd->sc_data_direction == DMA_TO_DEVICE) {
+ io_req->io_req_flags = BNX2FC_WRITE;
+ stats->OutputRequests++;
+ stats->OutputBytes += io_req->data_xfer_len;
+ } else {
+ io_req->io_req_flags = 0;
+ stats->ControlRequests++;
+ }
+ put_cpu();
+
+ xid = io_req->xid;
+
+ /* Build buffer descriptor list for firmware from sg list */
+ if (bnx2fc_build_bd_list_from_sg(io_req)) {
+ printk(KERN_ERR PFX "BD list creation failed\n");
+ kref_put(&io_req->refcount, bnx2fc_cmd_release);
+ return -EAGAIN;
+ }
+
+ task_idx = xid / BNX2FC_TASKS_PER_PAGE;
+ index = xid % BNX2FC_TASKS_PER_PAGE;
+
+ /* Initialize task context for this IO request */
+ task_page = (struct fcoe_task_ctx_entry *) hba->task_ctx[task_idx];
+ task = &(task_page[index]);
+ bnx2fc_init_task(io_req, task);
+
+ if (tgt->flush_in_prog) {
+ printk(KERN_ERR PFX "Flush in progress..Host Busy\n");
+ kref_put(&io_req->refcount, bnx2fc_cmd_release);
+ return -EAGAIN;
+ }
+
+ if (!test_bit(BNX2FC_FLAG_SESSION_READY, &tgt->flags)) {
+ printk(KERN_ERR PFX "Session not ready...post_io\n");
+ kref_put(&io_req->refcount, bnx2fc_cmd_release);
+ return -EAGAIN;
+ }
+
+ /* Time IO req */
+ if (tgt->io_timeout)
+ bnx2fc_cmd_timer_set(io_req, BNX2FC_IO_TIMEOUT);
+ /* Obtain free SQ entry */
+ bnx2fc_add_2_sq(tgt, xid);
+
+ /* Enqueue the io_req to active_cmd_queue */
+
+ io_req->on_active_queue = 1;
+ /* move io_req from pending_queue to active_queue */
+ list_add_tail(&io_req->link, &tgt->active_cmd_queue);
+
+ /* Ring doorbell */
+ bnx2fc_ring_doorbell(tgt);
+ return 0;
+}
diff --git a/drivers/scsi/bnx2fc/bnx2fc_tgt.c b/drivers/scsi/bnx2fc/bnx2fc_tgt.c
new file mode 100644
index 000000000..c66c70841
--- /dev/null
+++ b/drivers/scsi/bnx2fc/bnx2fc_tgt.c
@@ -0,0 +1,910 @@
+/* bnx2fc_tgt.c: QLogic NetXtreme II Linux FCoE offload driver.
+ * Handles operations such as session offload/upload etc, and manages
+ * session resources such as connection id and qp resources.
+ *
+ * Copyright (c) 2008 - 2013 Broadcom Corporation
+ * Copyright (c) 2014, QLogic Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation.
+ *
+ * Written by: Bhanu Prakash Gollapudi (bprakash@broadcom.com)
+ */
+
+#include "bnx2fc.h"
+static void bnx2fc_upld_timer(unsigned long data);
+static void bnx2fc_ofld_timer(unsigned long data);
+static int bnx2fc_init_tgt(struct bnx2fc_rport *tgt,
+ struct fcoe_port *port,
+ struct fc_rport_priv *rdata);
+static u32 bnx2fc_alloc_conn_id(struct bnx2fc_hba *hba,
+ struct bnx2fc_rport *tgt);
+static int bnx2fc_alloc_session_resc(struct bnx2fc_hba *hba,
+ struct bnx2fc_rport *tgt);
+static void bnx2fc_free_session_resc(struct bnx2fc_hba *hba,
+ struct bnx2fc_rport *tgt);
+static void bnx2fc_free_conn_id(struct bnx2fc_hba *hba, u32 conn_id);
+
+static void bnx2fc_upld_timer(unsigned long data)
+{
+
+ struct bnx2fc_rport *tgt = (struct bnx2fc_rport *)data;
+
+ BNX2FC_TGT_DBG(tgt, "upld_timer - Upload compl not received!!\n");
+ /* fake upload completion */
+ clear_bit(BNX2FC_FLAG_OFFLOADED, &tgt->flags);
+ clear_bit(BNX2FC_FLAG_ENABLED, &tgt->flags);
+ set_bit(BNX2FC_FLAG_UPLD_REQ_COMPL, &tgt->flags);
+ wake_up_interruptible(&tgt->upld_wait);
+}
+
+static void bnx2fc_ofld_timer(unsigned long data)
+{
+
+ struct bnx2fc_rport *tgt = (struct bnx2fc_rport *)data;
+
+ BNX2FC_TGT_DBG(tgt, "entered bnx2fc_ofld_timer\n");
+ /* NOTE: This function should never be called, as
+ * offload should never timeout
+ */
+ /*
+ * If the timer has expired, this session is dead
+ * Clear offloaded flag and logout of this device.
+ * Since OFFLOADED flag is cleared, this case
+ * will be considered as offload error and the
+ * port will be logged off, and conn_id, session
+ * resources are freed up in bnx2fc_offload_session
+ */
+ clear_bit(BNX2FC_FLAG_OFFLOADED, &tgt->flags);
+ clear_bit(BNX2FC_FLAG_ENABLED, &tgt->flags);
+ set_bit(BNX2FC_FLAG_OFLD_REQ_CMPL, &tgt->flags);
+ wake_up_interruptible(&tgt->ofld_wait);
+}
+
+static void bnx2fc_ofld_wait(struct bnx2fc_rport *tgt)
+{
+ setup_timer(&tgt->ofld_timer, bnx2fc_ofld_timer, (unsigned long)tgt);
+ mod_timer(&tgt->ofld_timer, jiffies + BNX2FC_FW_TIMEOUT);
+
+ wait_event_interruptible(tgt->ofld_wait,
+ (test_bit(
+ BNX2FC_FLAG_OFLD_REQ_CMPL,
+ &tgt->flags)));
+ if (signal_pending(current))
+ flush_signals(current);
+ del_timer_sync(&tgt->ofld_timer);
+}
+
+static void bnx2fc_offload_session(struct fcoe_port *port,
+ struct bnx2fc_rport *tgt,
+ struct fc_rport_priv *rdata)
+{
+ struct fc_lport *lport = rdata->local_port;
+ struct fc_rport *rport = rdata->rport;
+ struct bnx2fc_interface *interface = port->priv;
+ struct bnx2fc_hba *hba = interface->hba;
+ int rval;
+ int i = 0;
+
+ /* Initialize bnx2fc_rport */
+ /* NOTE: tgt is already bzero'd */
+ rval = bnx2fc_init_tgt(tgt, port, rdata);
+ if (rval) {
+ printk(KERN_ERR PFX "Failed to allocate conn id for "
+ "port_id (%6x)\n", rport->port_id);
+ goto tgt_init_err;
+ }
+
+ /* Allocate session resources */
+ rval = bnx2fc_alloc_session_resc(hba, tgt);
+ if (rval) {
+ printk(KERN_ERR PFX "Failed to allocate resources\n");
+ goto ofld_err;
+ }
+
+ /*
+ * Initialize FCoE session offload process.
+ * Upon completion of offload process add
+ * rport to list of rports
+ */
+retry_ofld:
+ clear_bit(BNX2FC_FLAG_OFLD_REQ_CMPL, &tgt->flags);
+ rval = bnx2fc_send_session_ofld_req(port, tgt);
+ if (rval) {
+ printk(KERN_ERR PFX "ofld_req failed\n");
+ goto ofld_err;
+ }
+
+ /*
+ * wait for the session is offloaded and enabled. 3 Secs
+ * should be ample time for this process to complete.
+ */
+ bnx2fc_ofld_wait(tgt);
+
+ if (!(test_bit(BNX2FC_FLAG_OFFLOADED, &tgt->flags))) {
+ if (test_and_clear_bit(BNX2FC_FLAG_CTX_ALLOC_FAILURE,
+ &tgt->flags)) {
+ BNX2FC_TGT_DBG(tgt, "ctx_alloc_failure, "
+ "retry ofld..%d\n", i++);
+ msleep_interruptible(1000);
+ if (i > 3) {
+ i = 0;
+ goto ofld_err;
+ }
+ goto retry_ofld;
+ }
+ goto ofld_err;
+ }
+ if (bnx2fc_map_doorbell(tgt)) {
+ printk(KERN_ERR PFX "map doorbell failed - no mem\n");
+ goto ofld_err;
+ }
+ clear_bit(BNX2FC_FLAG_OFLD_REQ_CMPL, &tgt->flags);
+ rval = bnx2fc_send_session_enable_req(port, tgt);
+ if (rval) {
+ pr_err(PFX "enable session failed\n");
+ goto ofld_err;
+ }
+ bnx2fc_ofld_wait(tgt);
+ if (!(test_bit(BNX2FC_FLAG_ENABLED, &tgt->flags)))
+ goto ofld_err;
+ return;
+
+ofld_err:
+ /* couldn't offload the session. log off from this rport */
+ BNX2FC_TGT_DBG(tgt, "bnx2fc_offload_session - offload error\n");
+ clear_bit(BNX2FC_FLAG_OFFLOADED, &tgt->flags);
+ /* Free session resources */
+ bnx2fc_free_session_resc(hba, tgt);
+tgt_init_err:
+ if (tgt->fcoe_conn_id != -1)
+ bnx2fc_free_conn_id(hba, tgt->fcoe_conn_id);
+ lport->tt.rport_logoff(rdata);
+}
+
+void bnx2fc_flush_active_ios(struct bnx2fc_rport *tgt)
+{
+ struct bnx2fc_cmd *io_req;
+ struct bnx2fc_cmd *tmp;
+ int rc;
+ int i = 0;
+ BNX2FC_TGT_DBG(tgt, "Entered flush_active_ios - %d\n",
+ tgt->num_active_ios.counter);
+
+ spin_lock_bh(&tgt->tgt_lock);
+ tgt->flush_in_prog = 1;
+
+ list_for_each_entry_safe(io_req, tmp, &tgt->active_cmd_queue, link) {
+ i++;
+ list_del_init(&io_req->link);
+ io_req->on_active_queue = 0;
+ BNX2FC_IO_DBG(io_req, "cmd_queue cleanup\n");
+
+ if (cancel_delayed_work(&io_req->timeout_work)) {
+ if (test_and_clear_bit(BNX2FC_FLAG_EH_ABORT,
+ &io_req->req_flags)) {
+ /* Handle eh_abort timeout */
+ BNX2FC_IO_DBG(io_req, "eh_abort for IO "
+ "cleaned up\n");
+ complete(&io_req->tm_done);
+ }
+ kref_put(&io_req->refcount,
+ bnx2fc_cmd_release); /* drop timer hold */
+ }
+
+ set_bit(BNX2FC_FLAG_IO_COMPL, &io_req->req_flags);
+ set_bit(BNX2FC_FLAG_IO_CLEANUP, &io_req->req_flags);
+
+ /* Do not issue cleanup when disable request failed */
+ if (test_bit(BNX2FC_FLAG_DISABLE_FAILED, &tgt->flags))
+ bnx2fc_process_cleanup_compl(io_req, io_req->task, 0);
+ else {
+ rc = bnx2fc_initiate_cleanup(io_req);
+ BUG_ON(rc);
+ }
+ }
+
+ list_for_each_entry_safe(io_req, tmp, &tgt->active_tm_queue, link) {
+ i++;
+ list_del_init(&io_req->link);
+ io_req->on_tmf_queue = 0;
+ BNX2FC_IO_DBG(io_req, "tm_queue cleanup\n");
+ if (io_req->wait_for_comp)
+ complete(&io_req->tm_done);
+ }
+
+ list_for_each_entry_safe(io_req, tmp, &tgt->els_queue, link) {
+ i++;
+ list_del_init(&io_req->link);
+ io_req->on_active_queue = 0;
+
+ BNX2FC_IO_DBG(io_req, "els_queue cleanup\n");
+
+ if (cancel_delayed_work(&io_req->timeout_work))
+ kref_put(&io_req->refcount,
+ bnx2fc_cmd_release); /* drop timer hold */
+
+ if ((io_req->cb_func) && (io_req->cb_arg)) {
+ io_req->cb_func(io_req->cb_arg);
+ io_req->cb_arg = NULL;
+ }
+
+ /* Do not issue cleanup when disable request failed */
+ if (test_bit(BNX2FC_FLAG_DISABLE_FAILED, &tgt->flags))
+ bnx2fc_process_cleanup_compl(io_req, io_req->task, 0);
+ else {
+ rc = bnx2fc_initiate_cleanup(io_req);
+ BUG_ON(rc);
+ }
+ }
+
+ list_for_each_entry_safe(io_req, tmp, &tgt->io_retire_queue, link) {
+ i++;
+ list_del_init(&io_req->link);
+
+ BNX2FC_IO_DBG(io_req, "retire_queue flush\n");
+
+ if (cancel_delayed_work(&io_req->timeout_work)) {
+ if (test_and_clear_bit(BNX2FC_FLAG_EH_ABORT,
+ &io_req->req_flags)) {
+ /* Handle eh_abort timeout */
+ BNX2FC_IO_DBG(io_req, "eh_abort for IO "
+ "in retire_q\n");
+ if (io_req->wait_for_comp)
+ complete(&io_req->tm_done);
+ }
+ kref_put(&io_req->refcount, bnx2fc_cmd_release);
+ }
+
+ clear_bit(BNX2FC_FLAG_ISSUE_RRQ, &io_req->req_flags);
+ }
+
+ BNX2FC_TGT_DBG(tgt, "IOs flushed = %d\n", i);
+ i = 0;
+ spin_unlock_bh(&tgt->tgt_lock);
+ /* wait for active_ios to go to 0 */
+ while ((tgt->num_active_ios.counter != 0) && (i++ < BNX2FC_WAIT_CNT))
+ msleep(25);
+ if (tgt->num_active_ios.counter != 0)
+ printk(KERN_ERR PFX "CLEANUP on port 0x%x:"
+ " active_ios = %d\n",
+ tgt->rdata->ids.port_id, tgt->num_active_ios.counter);
+ spin_lock_bh(&tgt->tgt_lock);
+ tgt->flush_in_prog = 0;
+ spin_unlock_bh(&tgt->tgt_lock);
+}
+
+static void bnx2fc_upld_wait(struct bnx2fc_rport *tgt)
+{
+ setup_timer(&tgt->upld_timer, bnx2fc_upld_timer, (unsigned long)tgt);
+ mod_timer(&tgt->upld_timer, jiffies + BNX2FC_FW_TIMEOUT);
+ wait_event_interruptible(tgt->upld_wait,
+ (test_bit(
+ BNX2FC_FLAG_UPLD_REQ_COMPL,
+ &tgt->flags)));
+ if (signal_pending(current))
+ flush_signals(current);
+ del_timer_sync(&tgt->upld_timer);
+}
+
+static void bnx2fc_upload_session(struct fcoe_port *port,
+ struct bnx2fc_rport *tgt)
+{
+ struct bnx2fc_interface *interface = port->priv;
+ struct bnx2fc_hba *hba = interface->hba;
+
+ BNX2FC_TGT_DBG(tgt, "upload_session: active_ios = %d\n",
+ tgt->num_active_ios.counter);
+
+ /*
+ * Called with hba->hba_mutex held.
+ * This is a blocking call
+ */
+ clear_bit(BNX2FC_FLAG_UPLD_REQ_COMPL, &tgt->flags);
+ bnx2fc_send_session_disable_req(port, tgt);
+
+ /*
+ * wait for upload to complete. 3 Secs
+ * should be sufficient time for this process to complete.
+ */
+ BNX2FC_TGT_DBG(tgt, "waiting for disable compl\n");
+ bnx2fc_upld_wait(tgt);
+
+ /*
+ * traverse thru the active_q and tmf_q and cleanup
+ * IOs in these lists
+ */
+ BNX2FC_TGT_DBG(tgt, "flush/upload - disable wait flags = 0x%lx\n",
+ tgt->flags);
+ bnx2fc_flush_active_ios(tgt);
+
+ /* Issue destroy KWQE */
+ if (test_bit(BNX2FC_FLAG_DISABLED, &tgt->flags)) {
+ BNX2FC_TGT_DBG(tgt, "send destroy req\n");
+ clear_bit(BNX2FC_FLAG_UPLD_REQ_COMPL, &tgt->flags);
+ bnx2fc_send_session_destroy_req(hba, tgt);
+
+ /* wait for destroy to complete */
+ bnx2fc_upld_wait(tgt);
+
+ if (!(test_bit(BNX2FC_FLAG_DESTROYED, &tgt->flags)))
+ printk(KERN_ERR PFX "ERROR!! destroy timed out\n");
+
+ BNX2FC_TGT_DBG(tgt, "destroy wait complete flags = 0x%lx\n",
+ tgt->flags);
+
+ } else if (test_bit(BNX2FC_FLAG_DISABLE_FAILED, &tgt->flags)) {
+ printk(KERN_ERR PFX "ERROR!! DISABLE req failed, destroy"
+ " not sent to FW\n");
+ } else {
+ printk(KERN_ERR PFX "ERROR!! DISABLE req timed out, destroy"
+ " not sent to FW\n");
+ }
+
+ /* Free session resources */
+ bnx2fc_free_session_resc(hba, tgt);
+ bnx2fc_free_conn_id(hba, tgt->fcoe_conn_id);
+}
+
+static int bnx2fc_init_tgt(struct bnx2fc_rport *tgt,
+ struct fcoe_port *port,
+ struct fc_rport_priv *rdata)
+{
+
+ struct fc_rport *rport = rdata->rport;
+ struct bnx2fc_interface *interface = port->priv;
+ struct bnx2fc_hba *hba = interface->hba;
+ struct b577xx_doorbell_set_prod *sq_db = &tgt->sq_db;
+ struct b577xx_fcoe_rx_doorbell *rx_db = &tgt->rx_db;
+
+ tgt->rport = rport;
+ tgt->rdata = rdata;
+ tgt->port = port;
+
+ if (hba->num_ofld_sess >= BNX2FC_NUM_MAX_SESS) {
+ BNX2FC_TGT_DBG(tgt, "exceeded max sessions. logoff this tgt\n");
+ tgt->fcoe_conn_id = -1;
+ return -1;
+ }
+
+ tgt->fcoe_conn_id = bnx2fc_alloc_conn_id(hba, tgt);
+ if (tgt->fcoe_conn_id == -1)
+ return -1;
+
+ BNX2FC_TGT_DBG(tgt, "init_tgt - conn_id = 0x%x\n", tgt->fcoe_conn_id);
+
+ tgt->max_sqes = BNX2FC_SQ_WQES_MAX;
+ tgt->max_rqes = BNX2FC_RQ_WQES_MAX;
+ tgt->max_cqes = BNX2FC_CQ_WQES_MAX;
+ atomic_set(&tgt->free_sqes, BNX2FC_SQ_WQES_MAX);
+
+ /* Initialize the toggle bit */
+ tgt->sq_curr_toggle_bit = 1;
+ tgt->cq_curr_toggle_bit = 1;
+ tgt->sq_prod_idx = 0;
+ tgt->cq_cons_idx = 0;
+ tgt->rq_prod_idx = 0x8000;
+ tgt->rq_cons_idx = 0;
+ atomic_set(&tgt->num_active_ios, 0);
+ tgt->retry_delay_timestamp = 0;
+
+ if (rdata->flags & FC_RP_FLAGS_RETRY &&
+ rdata->ids.roles & FC_RPORT_ROLE_FCP_TARGET &&
+ !(rdata->ids.roles & FC_RPORT_ROLE_FCP_INITIATOR)) {
+ tgt->dev_type = TYPE_TAPE;
+ tgt->io_timeout = 0; /* use default ULP timeout */
+ } else {
+ tgt->dev_type = TYPE_DISK;
+ tgt->io_timeout = BNX2FC_IO_TIMEOUT;
+ }
+
+ /* initialize sq doorbell */
+ sq_db->header.header = B577XX_DOORBELL_HDR_DB_TYPE;
+ sq_db->header.header |= B577XX_FCOE_CONNECTION_TYPE <<
+ B577XX_DOORBELL_HDR_CONN_TYPE_SHIFT;
+ /* initialize rx doorbell */
+ rx_db->hdr.header = ((0x1 << B577XX_DOORBELL_HDR_RX_SHIFT) |
+ (0x1 << B577XX_DOORBELL_HDR_DB_TYPE_SHIFT) |
+ (B577XX_FCOE_CONNECTION_TYPE <<
+ B577XX_DOORBELL_HDR_CONN_TYPE_SHIFT));
+ rx_db->params = (0x2 << B577XX_FCOE_RX_DOORBELL_NEGATIVE_ARM_SHIFT) |
+ (0x3 << B577XX_FCOE_RX_DOORBELL_OPCODE_SHIFT);
+
+ spin_lock_init(&tgt->tgt_lock);
+ spin_lock_init(&tgt->cq_lock);
+
+ /* Initialize active_cmd_queue list */
+ INIT_LIST_HEAD(&tgt->active_cmd_queue);
+
+ /* Initialize IO retire queue */
+ INIT_LIST_HEAD(&tgt->io_retire_queue);
+
+ INIT_LIST_HEAD(&tgt->els_queue);
+
+ /* Initialize active_tm_queue list */
+ INIT_LIST_HEAD(&tgt->active_tm_queue);
+
+ init_waitqueue_head(&tgt->ofld_wait);
+ init_waitqueue_head(&tgt->upld_wait);
+
+ return 0;
+}
+
+/**
+ * This event_callback is called after successful completion of libfc
+ * initiated target login. bnx2fc can proceed with initiating the session
+ * establishment.
+ */
+void bnx2fc_rport_event_handler(struct fc_lport *lport,
+ struct fc_rport_priv *rdata,
+ enum fc_rport_event event)
+{
+ struct fcoe_port *port = lport_priv(lport);
+ struct bnx2fc_interface *interface = port->priv;
+ struct bnx2fc_hba *hba = interface->hba;
+ struct fc_rport *rport = rdata->rport;
+ struct fc_rport_libfc_priv *rp;
+ struct bnx2fc_rport *tgt;
+ u32 port_id;
+
+ BNX2FC_HBA_DBG(lport, "rport_event_hdlr: event = %d, port_id = 0x%x\n",
+ event, rdata->ids.port_id);
+ switch (event) {
+ case RPORT_EV_READY:
+ if (!rport) {
+ printk(KERN_ERR PFX "rport is NULL: ERROR!\n");
+ break;
+ }
+
+ rp = rport->dd_data;
+ if (rport->port_id == FC_FID_DIR_SERV) {
+ /*
+ * bnx2fc_rport structure doesn't exist for
+ * directory server.
+ * We should not come here, as lport will
+ * take care of fabric login
+ */
+ printk(KERN_ERR PFX "%x - rport_event_handler ERROR\n",
+ rdata->ids.port_id);
+ break;
+ }
+
+ if (rdata->spp_type != FC_TYPE_FCP) {
+ BNX2FC_HBA_DBG(lport, "not FCP type target."
+ " not offloading\n");
+ break;
+ }
+ if (!(rdata->ids.roles & FC_RPORT_ROLE_FCP_TARGET)) {
+ BNX2FC_HBA_DBG(lport, "not FCP_TARGET"
+ " not offloading\n");
+ break;
+ }
+
+ /*
+ * Offlaod process is protected with hba mutex.
+ * Use the same mutex_lock for upload process too
+ */
+ mutex_lock(&hba->hba_mutex);
+ tgt = (struct bnx2fc_rport *)&rp[1];
+
+ /* This can happen when ADISC finds the same target */
+ if (test_bit(BNX2FC_FLAG_ENABLED, &tgt->flags)) {
+ BNX2FC_TGT_DBG(tgt, "already offloaded\n");
+ mutex_unlock(&hba->hba_mutex);
+ return;
+ }
+
+ /*
+ * Offload the session. This is a blocking call, and will
+ * wait until the session is offloaded.
+ */
+ bnx2fc_offload_session(port, tgt, rdata);
+
+ BNX2FC_TGT_DBG(tgt, "OFFLOAD num_ofld_sess = %d\n",
+ hba->num_ofld_sess);
+
+ if (test_bit(BNX2FC_FLAG_ENABLED, &tgt->flags)) {
+ /* Session is offloaded and enabled. */
+ BNX2FC_TGT_DBG(tgt, "sess offloaded\n");
+ /* This counter is protected with hba mutex */
+ hba->num_ofld_sess++;
+
+ set_bit(BNX2FC_FLAG_SESSION_READY, &tgt->flags);
+ } else {
+ /*
+ * Offload or enable would have failed.
+ * In offload/enable completion path, the
+ * rport would have already been removed
+ */
+ BNX2FC_TGT_DBG(tgt, "Port is being logged off as "
+ "offloaded flag not set\n");
+ }
+ mutex_unlock(&hba->hba_mutex);
+ break;
+ case RPORT_EV_LOGO:
+ case RPORT_EV_FAILED:
+ case RPORT_EV_STOP:
+ port_id = rdata->ids.port_id;
+ if (port_id == FC_FID_DIR_SERV)
+ break;
+
+ if (!rport) {
+ printk(KERN_INFO PFX "%x - rport not created Yet!!\n",
+ port_id);
+ break;
+ }
+ rp = rport->dd_data;
+ mutex_lock(&hba->hba_mutex);
+ /*
+ * Perform session upload. Note that rdata->peers is already
+ * removed from disc->rports list before we get this event.
+ */
+ tgt = (struct bnx2fc_rport *)&rp[1];
+
+ if (!(test_bit(BNX2FC_FLAG_ENABLED, &tgt->flags))) {
+ mutex_unlock(&hba->hba_mutex);
+ break;
+ }
+ clear_bit(BNX2FC_FLAG_SESSION_READY, &tgt->flags);
+
+ bnx2fc_upload_session(port, tgt);
+ hba->num_ofld_sess--;
+ BNX2FC_TGT_DBG(tgt, "UPLOAD num_ofld_sess = %d\n",
+ hba->num_ofld_sess);
+ /*
+ * Try to wake up the linkdown wait thread. If num_ofld_sess
+ * is 0, the waiting therad wakes up
+ */
+ if ((hba->wait_for_link_down) &&
+ (hba->num_ofld_sess == 0)) {
+ wake_up_interruptible(&hba->shutdown_wait);
+ }
+ if (test_bit(BNX2FC_FLAG_EXPL_LOGO, &tgt->flags)) {
+ printk(KERN_ERR PFX "Relogin to the tgt\n");
+ mutex_lock(&lport->disc.disc_mutex);
+ lport->tt.rport_login(rdata);
+ mutex_unlock(&lport->disc.disc_mutex);
+ }
+ mutex_unlock(&hba->hba_mutex);
+
+ break;
+
+ case RPORT_EV_NONE:
+ break;
+ }
+}
+
+/**
+ * bnx2fc_tgt_lookup() - Lookup a bnx2fc_rport by port_id
+ *
+ * @port: fcoe_port struct to lookup the target port on
+ * @port_id: The remote port ID to look up
+ */
+struct bnx2fc_rport *bnx2fc_tgt_lookup(struct fcoe_port *port,
+ u32 port_id)
+{
+ struct bnx2fc_interface *interface = port->priv;
+ struct bnx2fc_hba *hba = interface->hba;
+ struct bnx2fc_rport *tgt;
+ struct fc_rport_priv *rdata;
+ int i;
+
+ for (i = 0; i < BNX2FC_NUM_MAX_SESS; i++) {
+ tgt = hba->tgt_ofld_list[i];
+ if ((tgt) && (tgt->port == port)) {
+ rdata = tgt->rdata;
+ if (rdata->ids.port_id == port_id) {
+ if (rdata->rp_state != RPORT_ST_DELETE) {
+ BNX2FC_TGT_DBG(tgt, "rport "
+ "obtained\n");
+ return tgt;
+ } else {
+ BNX2FC_TGT_DBG(tgt, "rport 0x%x "
+ "is in DELETED state\n",
+ rdata->ids.port_id);
+ return NULL;
+ }
+ }
+ }
+ }
+ return NULL;
+}
+
+
+/**
+ * bnx2fc_alloc_conn_id - allocates FCOE Connection id
+ *
+ * @hba: pointer to adapter structure
+ * @tgt: pointer to bnx2fc_rport structure
+ */
+static u32 bnx2fc_alloc_conn_id(struct bnx2fc_hba *hba,
+ struct bnx2fc_rport *tgt)
+{
+ u32 conn_id, next;
+
+ /* called with hba mutex held */
+
+ /*
+ * tgt_ofld_list access is synchronized using
+ * both hba mutex and hba lock. Atleast hba mutex or
+ * hba lock needs to be held for read access.
+ */
+
+ spin_lock_bh(&hba->hba_lock);
+ next = hba->next_conn_id;
+ conn_id = hba->next_conn_id++;
+ if (hba->next_conn_id == BNX2FC_NUM_MAX_SESS)
+ hba->next_conn_id = 0;
+
+ while (hba->tgt_ofld_list[conn_id] != NULL) {
+ conn_id++;
+ if (conn_id == BNX2FC_NUM_MAX_SESS)
+ conn_id = 0;
+
+ if (conn_id == next) {
+ /* No free conn_ids are available */
+ spin_unlock_bh(&hba->hba_lock);
+ return -1;
+ }
+ }
+ hba->tgt_ofld_list[conn_id] = tgt;
+ tgt->fcoe_conn_id = conn_id;
+ spin_unlock_bh(&hba->hba_lock);
+ return conn_id;
+}
+
+static void bnx2fc_free_conn_id(struct bnx2fc_hba *hba, u32 conn_id)
+{
+ /* called with hba mutex held */
+ spin_lock_bh(&hba->hba_lock);
+ hba->tgt_ofld_list[conn_id] = NULL;
+ spin_unlock_bh(&hba->hba_lock);
+}
+
+/**
+ *bnx2fc_alloc_session_resc - Allocate qp resources for the session
+ *
+ */
+static int bnx2fc_alloc_session_resc(struct bnx2fc_hba *hba,
+ struct bnx2fc_rport *tgt)
+{
+ dma_addr_t page;
+ int num_pages;
+ u32 *pbl;
+
+ /* Allocate and map SQ */
+ tgt->sq_mem_size = tgt->max_sqes * BNX2FC_SQ_WQE_SIZE;
+ tgt->sq_mem_size = (tgt->sq_mem_size + (CNIC_PAGE_SIZE - 1)) &
+ CNIC_PAGE_MASK;
+
+ tgt->sq = dma_alloc_coherent(&hba->pcidev->dev, tgt->sq_mem_size,
+ &tgt->sq_dma, GFP_KERNEL);
+ if (!tgt->sq) {
+ printk(KERN_ERR PFX "unable to allocate SQ memory %d\n",
+ tgt->sq_mem_size);
+ goto mem_alloc_failure;
+ }
+ memset(tgt->sq, 0, tgt->sq_mem_size);
+
+ /* Allocate and map CQ */
+ tgt->cq_mem_size = tgt->max_cqes * BNX2FC_CQ_WQE_SIZE;
+ tgt->cq_mem_size = (tgt->cq_mem_size + (CNIC_PAGE_SIZE - 1)) &
+ CNIC_PAGE_MASK;
+
+ tgt->cq = dma_alloc_coherent(&hba->pcidev->dev, tgt->cq_mem_size,
+ &tgt->cq_dma, GFP_KERNEL);
+ if (!tgt->cq) {
+ printk(KERN_ERR PFX "unable to allocate CQ memory %d\n",
+ tgt->cq_mem_size);
+ goto mem_alloc_failure;
+ }
+ memset(tgt->cq, 0, tgt->cq_mem_size);
+
+ /* Allocate and map RQ and RQ PBL */
+ tgt->rq_mem_size = tgt->max_rqes * BNX2FC_RQ_WQE_SIZE;
+ tgt->rq_mem_size = (tgt->rq_mem_size + (CNIC_PAGE_SIZE - 1)) &
+ CNIC_PAGE_MASK;
+
+ tgt->rq = dma_alloc_coherent(&hba->pcidev->dev, tgt->rq_mem_size,
+ &tgt->rq_dma, GFP_KERNEL);
+ if (!tgt->rq) {
+ printk(KERN_ERR PFX "unable to allocate RQ memory %d\n",
+ tgt->rq_mem_size);
+ goto mem_alloc_failure;
+ }
+ memset(tgt->rq, 0, tgt->rq_mem_size);
+
+ tgt->rq_pbl_size = (tgt->rq_mem_size / CNIC_PAGE_SIZE) * sizeof(void *);
+ tgt->rq_pbl_size = (tgt->rq_pbl_size + (CNIC_PAGE_SIZE - 1)) &
+ CNIC_PAGE_MASK;
+
+ tgt->rq_pbl = dma_alloc_coherent(&hba->pcidev->dev, tgt->rq_pbl_size,
+ &tgt->rq_pbl_dma, GFP_KERNEL);
+ if (!tgt->rq_pbl) {
+ printk(KERN_ERR PFX "unable to allocate RQ PBL %d\n",
+ tgt->rq_pbl_size);
+ goto mem_alloc_failure;
+ }
+
+ memset(tgt->rq_pbl, 0, tgt->rq_pbl_size);
+ num_pages = tgt->rq_mem_size / CNIC_PAGE_SIZE;
+ page = tgt->rq_dma;
+ pbl = (u32 *)tgt->rq_pbl;
+
+ while (num_pages--) {
+ *pbl = (u32)page;
+ pbl++;
+ *pbl = (u32)((u64)page >> 32);
+ pbl++;
+ page += CNIC_PAGE_SIZE;
+ }
+
+ /* Allocate and map XFERQ */
+ tgt->xferq_mem_size = tgt->max_sqes * BNX2FC_XFERQ_WQE_SIZE;
+ tgt->xferq_mem_size = (tgt->xferq_mem_size + (CNIC_PAGE_SIZE - 1)) &
+ CNIC_PAGE_MASK;
+
+ tgt->xferq = dma_alloc_coherent(&hba->pcidev->dev, tgt->xferq_mem_size,
+ &tgt->xferq_dma, GFP_KERNEL);
+ if (!tgt->xferq) {
+ printk(KERN_ERR PFX "unable to allocate XFERQ %d\n",
+ tgt->xferq_mem_size);
+ goto mem_alloc_failure;
+ }
+ memset(tgt->xferq, 0, tgt->xferq_mem_size);
+
+ /* Allocate and map CONFQ & CONFQ PBL */
+ tgt->confq_mem_size = tgt->max_sqes * BNX2FC_CONFQ_WQE_SIZE;
+ tgt->confq_mem_size = (tgt->confq_mem_size + (CNIC_PAGE_SIZE - 1)) &
+ CNIC_PAGE_MASK;
+
+ tgt->confq = dma_alloc_coherent(&hba->pcidev->dev, tgt->confq_mem_size,
+ &tgt->confq_dma, GFP_KERNEL);
+ if (!tgt->confq) {
+ printk(KERN_ERR PFX "unable to allocate CONFQ %d\n",
+ tgt->confq_mem_size);
+ goto mem_alloc_failure;
+ }
+ memset(tgt->confq, 0, tgt->confq_mem_size);
+
+ tgt->confq_pbl_size =
+ (tgt->confq_mem_size / CNIC_PAGE_SIZE) * sizeof(void *);
+ tgt->confq_pbl_size =
+ (tgt->confq_pbl_size + (CNIC_PAGE_SIZE - 1)) & CNIC_PAGE_MASK;
+
+ tgt->confq_pbl = dma_alloc_coherent(&hba->pcidev->dev,
+ tgt->confq_pbl_size,
+ &tgt->confq_pbl_dma, GFP_KERNEL);
+ if (!tgt->confq_pbl) {
+ printk(KERN_ERR PFX "unable to allocate CONFQ PBL %d\n",
+ tgt->confq_pbl_size);
+ goto mem_alloc_failure;
+ }
+
+ memset(tgt->confq_pbl, 0, tgt->confq_pbl_size);
+ num_pages = tgt->confq_mem_size / CNIC_PAGE_SIZE;
+ page = tgt->confq_dma;
+ pbl = (u32 *)tgt->confq_pbl;
+
+ while (num_pages--) {
+ *pbl = (u32)page;
+ pbl++;
+ *pbl = (u32)((u64)page >> 32);
+ pbl++;
+ page += CNIC_PAGE_SIZE;
+ }
+
+ /* Allocate and map ConnDB */
+ tgt->conn_db_mem_size = sizeof(struct fcoe_conn_db);
+
+ tgt->conn_db = dma_alloc_coherent(&hba->pcidev->dev,
+ tgt->conn_db_mem_size,
+ &tgt->conn_db_dma, GFP_KERNEL);
+ if (!tgt->conn_db) {
+ printk(KERN_ERR PFX "unable to allocate conn_db %d\n",
+ tgt->conn_db_mem_size);
+ goto mem_alloc_failure;
+ }
+ memset(tgt->conn_db, 0, tgt->conn_db_mem_size);
+
+
+ /* Allocate and map LCQ */
+ tgt->lcq_mem_size = (tgt->max_sqes + 8) * BNX2FC_SQ_WQE_SIZE;
+ tgt->lcq_mem_size = (tgt->lcq_mem_size + (CNIC_PAGE_SIZE - 1)) &
+ CNIC_PAGE_MASK;
+
+ tgt->lcq = dma_alloc_coherent(&hba->pcidev->dev, tgt->lcq_mem_size,
+ &tgt->lcq_dma, GFP_KERNEL);
+
+ if (!tgt->lcq) {
+ printk(KERN_ERR PFX "unable to allocate lcq %d\n",
+ tgt->lcq_mem_size);
+ goto mem_alloc_failure;
+ }
+ memset(tgt->lcq, 0, tgt->lcq_mem_size);
+
+ tgt->conn_db->rq_prod = 0x8000;
+
+ return 0;
+
+mem_alloc_failure:
+ return -ENOMEM;
+}
+
+/**
+ * bnx2i_free_session_resc - free qp resources for the session
+ *
+ * @hba: adapter structure pointer
+ * @tgt: bnx2fc_rport structure pointer
+ *
+ * Free QP resources - SQ/RQ/CQ/XFERQ memory and PBL
+ */
+static void bnx2fc_free_session_resc(struct bnx2fc_hba *hba,
+ struct bnx2fc_rport *tgt)
+{
+ void __iomem *ctx_base_ptr;
+
+ BNX2FC_TGT_DBG(tgt, "Freeing up session resources\n");
+
+ spin_lock_bh(&tgt->cq_lock);
+ ctx_base_ptr = tgt->ctx_base;
+ tgt->ctx_base = NULL;
+
+ /* Free LCQ */
+ if (tgt->lcq) {
+ dma_free_coherent(&hba->pcidev->dev, tgt->lcq_mem_size,
+ tgt->lcq, tgt->lcq_dma);
+ tgt->lcq = NULL;
+ }
+ /* Free connDB */
+ if (tgt->conn_db) {
+ dma_free_coherent(&hba->pcidev->dev, tgt->conn_db_mem_size,
+ tgt->conn_db, tgt->conn_db_dma);
+ tgt->conn_db = NULL;
+ }
+ /* Free confq and confq pbl */
+ if (tgt->confq_pbl) {
+ dma_free_coherent(&hba->pcidev->dev, tgt->confq_pbl_size,
+ tgt->confq_pbl, tgt->confq_pbl_dma);
+ tgt->confq_pbl = NULL;
+ }
+ if (tgt->confq) {
+ dma_free_coherent(&hba->pcidev->dev, tgt->confq_mem_size,
+ tgt->confq, tgt->confq_dma);
+ tgt->confq = NULL;
+ }
+ /* Free XFERQ */
+ if (tgt->xferq) {
+ dma_free_coherent(&hba->pcidev->dev, tgt->xferq_mem_size,
+ tgt->xferq, tgt->xferq_dma);
+ tgt->xferq = NULL;
+ }
+ /* Free RQ PBL and RQ */
+ if (tgt->rq_pbl) {
+ dma_free_coherent(&hba->pcidev->dev, tgt->rq_pbl_size,
+ tgt->rq_pbl, tgt->rq_pbl_dma);
+ tgt->rq_pbl = NULL;
+ }
+ if (tgt->rq) {
+ dma_free_coherent(&hba->pcidev->dev, tgt->rq_mem_size,
+ tgt->rq, tgt->rq_dma);
+ tgt->rq = NULL;
+ }
+ /* Free CQ */
+ if (tgt->cq) {
+ dma_free_coherent(&hba->pcidev->dev, tgt->cq_mem_size,
+ tgt->cq, tgt->cq_dma);
+ tgt->cq = NULL;
+ }
+ /* Free SQ */
+ if (tgt->sq) {
+ dma_free_coherent(&hba->pcidev->dev, tgt->sq_mem_size,
+ tgt->sq, tgt->sq_dma);
+ tgt->sq = NULL;
+ }
+ spin_unlock_bh(&tgt->cq_lock);
+
+ if (ctx_base_ptr)
+ iounmap(ctx_base_ptr);
+}
diff --git a/drivers/scsi/bnx2i/57xx_iscsi_constants.h b/drivers/scsi/bnx2i/57xx_iscsi_constants.h
new file mode 100644
index 000000000..917534109
--- /dev/null
+++ b/drivers/scsi/bnx2i/57xx_iscsi_constants.h
@@ -0,0 +1,161 @@
+/* 57xx_iscsi_constants.h: QLogic NetXtreme II iSCSI HSI
+ *
+ * Copyright (c) 2006 - 2013 Broadcom Corporation
+ * Copyright (c) 2014, QLogic Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation.
+ *
+ * Written by: Anil Veerabhadrappa (anilgv@broadcom.com)
+ * Previously Maintained by: Eddie Wai (eddie.wai@broadcom.com)
+ * Maintained by: QLogic-Storage-Upstream@qlogic.com
+ */
+#ifndef __57XX_ISCSI_CONSTANTS_H_
+#define __57XX_ISCSI_CONSTANTS_H_
+
+/**
+* This file defines HSI constants for the iSCSI flows
+*/
+
+/* iSCSI request op codes */
+#define ISCSI_OPCODE_CLEANUP_REQUEST (7)
+
+/* iSCSI response/messages op codes */
+#define ISCSI_OPCODE_CLEANUP_RESPONSE (0x27)
+#define ISCSI_OPCODE_NOPOUT_LOCAL_COMPLETION (0)
+
+/* iSCSI task types */
+#define ISCSI_TASK_TYPE_READ (0)
+#define ISCSI_TASK_TYPE_WRITE (1)
+#define ISCSI_TASK_TYPE_MPATH (2)
+
+/* initial CQ sequence numbers */
+#define ISCSI_INITIAL_SN (1)
+
+/* KWQ (kernel work queue) layer codes */
+#define ISCSI_KWQE_LAYER_CODE (6)
+
+/* KWQ (kernel work queue) request op codes */
+#define ISCSI_KWQE_OPCODE_OFFLOAD_CONN1 (0)
+#define ISCSI_KWQE_OPCODE_OFFLOAD_CONN2 (1)
+#define ISCSI_KWQE_OPCODE_UPDATE_CONN (2)
+#define ISCSI_KWQE_OPCODE_DESTROY_CONN (3)
+#define ISCSI_KWQE_OPCODE_INIT1 (4)
+#define ISCSI_KWQE_OPCODE_INIT2 (5)
+
+/* KCQ (kernel completion queue) response op codes */
+#define ISCSI_KCQE_OPCODE_OFFLOAD_CONN (0x10)
+#define ISCSI_KCQE_OPCODE_UPDATE_CONN (0x12)
+#define ISCSI_KCQE_OPCODE_DESTROY_CONN (0x13)
+#define ISCSI_KCQE_OPCODE_INIT (0x14)
+#define ISCSI_KCQE_OPCODE_FW_CLEAN_TASK (0x15)
+#define ISCSI_KCQE_OPCODE_TCP_RESET (0x16)
+#define ISCSI_KCQE_OPCODE_TCP_SYN (0x17)
+#define ISCSI_KCQE_OPCODE_TCP_FIN (0X18)
+#define ISCSI_KCQE_OPCODE_TCP_ERROR (0x19)
+#define ISCSI_KCQE_OPCODE_CQ_EVENT_NOTIFICATION (0x20)
+#define ISCSI_KCQE_OPCODE_ISCSI_ERROR (0x21)
+
+/* KCQ (kernel completion queue) completion status */
+#define ISCSI_KCQE_COMPLETION_STATUS_SUCCESS (0x0)
+#define ISCSI_KCQE_COMPLETION_STATUS_INVALID_OPCODE (0x1)
+#define ISCSI_KCQE_COMPLETION_STATUS_CTX_ALLOC_FAILURE (0x2)
+#define ISCSI_KCQE_COMPLETION_STATUS_CTX_FREE_FAILURE (0x3)
+#define ISCSI_KCQE_COMPLETION_STATUS_NIC_ERROR (0x4)
+
+#define ISCSI_KCQE_COMPLETION_STATUS_HDR_DIG_ERR (0x5)
+#define ISCSI_KCQE_COMPLETION_STATUS_DATA_DIG_ERR (0x6)
+
+#define ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_UNEXPECTED_OPCODE (0xa)
+#define ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_OPCODE (0xb)
+#define ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_AHS_LEN (0xc)
+#define ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_ITT (0xd)
+#define ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_STATSN (0xe)
+
+/* Response */
+#define ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_EXP_DATASN (0xf)
+#define ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_PEND_R2T (0x10)
+#define ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_DATA_SEG_LEN_IS_ZERO (0x2c)
+#define ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_DATA_SEG_LEN_TOO_BIG (0x2d)
+#define ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_O_U_0 (0x11)
+#define ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_O_U_1 (0x12)
+#define ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_O_U_2 (0x13)
+#define ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_O_U_3 (0x14)
+#define ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_O_U_4 (0x15)
+#define ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_O_U_5 (0x16)
+#define ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_O_U_6 (0x17)
+
+/* Data-In */
+#define ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_REMAIN_RCV_LEN (0x18)
+#define ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_MAX_RCV_PDU_LEN (0x19)
+#define ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_F_BIT_ZERO (0x1a)
+#define ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_TTT_NOT_RSRV (0x1b)
+#define ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_DATASN (0x1c)
+#define ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_REMAIN_BURST_LEN (0x1d)
+
+/* R2T */
+#define ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_BUFFER_OFF (0x1f)
+#define ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_LUN (0x20)
+#define ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_R2TSN (0x21)
+#define ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_DESIRED_DATA_TRNS_LEN_0 (0x22)
+#define ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_DESIRED_DATA_TRNS_LEN_1 (0x23)
+#define ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_PEND_R2T_EXCEED (0x24)
+#define ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_TTT_IS_RSRV (0x25)
+#define ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_MAX_BURST_LEN (0x26)
+#define ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_DATA_SEG_LEN_NOT_ZERO (0x27)
+
+/* TMF */
+#define ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_REJECT_PDU_LEN (0x28)
+#define ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_ASYNC_PDU_LEN (0x29)
+#define ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_NOPIN_PDU_LEN (0x2a)
+#define ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_PEND_R2T_IN_CLEANUP (0x2b)
+
+/* IP/TCP processing errors: */
+#define ISCI_KCQE_COMPLETION_STATUS_TCP_ERROR_IP_FRAGMENT (0x40)
+#define ISCI_KCQE_COMPLETION_STATUS_TCP_ERROR_IP_OPTIONS (0x41)
+#define ISCI_KCQE_COMPLETION_STATUS_TCP_ERROR_URGENT_FLAG (0x42)
+#define ISCI_KCQE_COMPLETION_STATUS_TCP_ERROR_MAX_RTRANS (0x43)
+
+/* iSCSI licensing errors */
+/* general iSCSI license not installed */
+#define ISCSI_KCQE_COMPLETION_STATUS_ISCSI_NOT_SUPPORTED (0x50)
+/* additional LOM specific iSCSI license not installed */
+#define ISCSI_KCQE_COMPLETION_STATUS_LOM_ISCSI_NOT_ENABLED (0x51)
+
+#define ISCSI_KCQE_COMPLETION_STATUS_CID_BUSY (0x80)
+#define ISCSI_KCQE_COMPLETION_STATUS_PARITY_ERR (0x81)
+
+/* SQ/RQ/CQ DB structure sizes */
+#define ISCSI_SQ_DB_SIZE (16)
+#define ISCSI_RQ_DB_SIZE (64)
+#define ISCSI_CQ_DB_SIZE (80)
+
+#define ISCSI_SQN_TO_NOTIFY_NOT_VALID 0xFFFF
+
+/* Page size codes (for flags field in connection offload request) */
+#define ISCSI_PAGE_SIZE_256 (0)
+#define ISCSI_PAGE_SIZE_512 (1)
+#define ISCSI_PAGE_SIZE_1K (2)
+#define ISCSI_PAGE_SIZE_2K (3)
+#define ISCSI_PAGE_SIZE_4K (4)
+#define ISCSI_PAGE_SIZE_8K (5)
+#define ISCSI_PAGE_SIZE_16K (6)
+#define ISCSI_PAGE_SIZE_32K (7)
+#define ISCSI_PAGE_SIZE_64K (8)
+#define ISCSI_PAGE_SIZE_128K (9)
+#define ISCSI_PAGE_SIZE_256K (10)
+#define ISCSI_PAGE_SIZE_512K (11)
+#define ISCSI_PAGE_SIZE_1M (12)
+#define ISCSI_PAGE_SIZE_2M (13)
+#define ISCSI_PAGE_SIZE_4M (14)
+#define ISCSI_PAGE_SIZE_8M (15)
+
+/* Iscsi PDU related defines */
+#define ISCSI_HEADER_SIZE (48)
+#define ISCSI_DIGEST_SHIFT (2)
+#define ISCSI_DIGEST_SIZE (4)
+
+#define B577XX_ISCSI_CONNECTION_TYPE 3
+
+#endif /*__57XX_ISCSI_CONSTANTS_H_ */
diff --git a/drivers/scsi/bnx2i/57xx_iscsi_hsi.h b/drivers/scsi/bnx2i/57xx_iscsi_hsi.h
new file mode 100644
index 000000000..19b3a97db
--- /dev/null
+++ b/drivers/scsi/bnx2i/57xx_iscsi_hsi.h
@@ -0,0 +1,1526 @@
+/* 57xx_iscsi_hsi.h: QLogic NetXtreme II iSCSI HSI.
+ *
+ * Copyright (c) 2006 - 2013 Broadcom Corporation
+ * Copyright (c) 2014, QLogic Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation.
+ *
+ * Written by: Anil Veerabhadrappa (anilgv@broadcom.com)
+ * Previously Maintained by: Eddie Wai (eddie.wai@broadcom.com)
+ * Maintained by: QLogic-Storage-Upstream@qlogic.com
+ */
+#ifndef __57XX_ISCSI_HSI_LINUX_LE__
+#define __57XX_ISCSI_HSI_LINUX_LE__
+
+/*
+ * iSCSI Async CQE
+ */
+struct bnx2i_async_msg {
+#if defined(__BIG_ENDIAN)
+ u8 op_code;
+ u8 reserved1;
+ u16 reserved0;
+#elif defined(__LITTLE_ENDIAN)
+ u16 reserved0;
+ u8 reserved1;
+ u8 op_code;
+#endif
+ u32 reserved2;
+ u32 exp_cmd_sn;
+ u32 max_cmd_sn;
+ u32 reserved3[2];
+#if defined(__BIG_ENDIAN)
+ u16 reserved5;
+ u8 err_code;
+ u8 reserved4;
+#elif defined(__LITTLE_ENDIAN)
+ u8 reserved4;
+ u8 err_code;
+ u16 reserved5;
+#endif
+ u32 reserved6;
+ u32 lun[2];
+#if defined(__BIG_ENDIAN)
+ u8 async_event;
+ u8 async_vcode;
+ u16 param1;
+#elif defined(__LITTLE_ENDIAN)
+ u16 param1;
+ u8 async_vcode;
+ u8 async_event;
+#endif
+#if defined(__BIG_ENDIAN)
+ u16 param2;
+ u16 param3;
+#elif defined(__LITTLE_ENDIAN)
+ u16 param3;
+ u16 param2;
+#endif
+ u32 reserved7[3];
+ u32 cq_req_sn;
+};
+
+
+/*
+ * iSCSI Buffer Descriptor (BD)
+ */
+struct iscsi_bd {
+ u32 buffer_addr_hi;
+ u32 buffer_addr_lo;
+#if defined(__BIG_ENDIAN)
+ u16 reserved0;
+ u16 buffer_length;
+#elif defined(__LITTLE_ENDIAN)
+ u16 buffer_length;
+ u16 reserved0;
+#endif
+#if defined(__BIG_ENDIAN)
+ u16 reserved3;
+ u16 flags;
+#define ISCSI_BD_RESERVED1 (0x3F<<0)
+#define ISCSI_BD_RESERVED1_SHIFT 0
+#define ISCSI_BD_LAST_IN_BD_CHAIN (0x1<<6)
+#define ISCSI_BD_LAST_IN_BD_CHAIN_SHIFT 6
+#define ISCSI_BD_FIRST_IN_BD_CHAIN (0x1<<7)
+#define ISCSI_BD_FIRST_IN_BD_CHAIN_SHIFT 7
+#define ISCSI_BD_RESERVED2 (0xFF<<8)
+#define ISCSI_BD_RESERVED2_SHIFT 8
+#elif defined(__LITTLE_ENDIAN)
+ u16 flags;
+#define ISCSI_BD_RESERVED1 (0x3F<<0)
+#define ISCSI_BD_RESERVED1_SHIFT 0
+#define ISCSI_BD_LAST_IN_BD_CHAIN (0x1<<6)
+#define ISCSI_BD_LAST_IN_BD_CHAIN_SHIFT 6
+#define ISCSI_BD_FIRST_IN_BD_CHAIN (0x1<<7)
+#define ISCSI_BD_FIRST_IN_BD_CHAIN_SHIFT 7
+#define ISCSI_BD_RESERVED2 (0xFF<<8)
+#define ISCSI_BD_RESERVED2_SHIFT 8
+ u16 reserved3;
+#endif
+};
+
+
+/*
+ * iSCSI Cleanup SQ WQE
+ */
+struct bnx2i_cleanup_request {
+#if defined(__BIG_ENDIAN)
+ u8 op_code;
+ u8 reserved1;
+ u16 reserved0;
+#elif defined(__LITTLE_ENDIAN)
+ u16 reserved0;
+ u8 reserved1;
+ u8 op_code;
+#endif
+ u32 reserved2[3];
+#if defined(__BIG_ENDIAN)
+ u16 reserved3;
+ u16 itt;
+#define ISCSI_CLEANUP_REQUEST_INDEX (0x3FFF<<0)
+#define ISCSI_CLEANUP_REQUEST_INDEX_SHIFT 0
+#define ISCSI_CLEANUP_REQUEST_TYPE (0x3<<14)
+#define ISCSI_CLEANUP_REQUEST_TYPE_SHIFT 14
+#elif defined(__LITTLE_ENDIAN)
+ u16 itt;
+#define ISCSI_CLEANUP_REQUEST_INDEX (0x3FFF<<0)
+#define ISCSI_CLEANUP_REQUEST_INDEX_SHIFT 0
+#define ISCSI_CLEANUP_REQUEST_TYPE (0x3<<14)
+#define ISCSI_CLEANUP_REQUEST_TYPE_SHIFT 14
+ u16 reserved3;
+#endif
+ u32 reserved4[10];
+#if defined(__BIG_ENDIAN)
+ u8 cq_index;
+ u8 reserved6;
+ u16 reserved5;
+#elif defined(__LITTLE_ENDIAN)
+ u16 reserved5;
+ u8 reserved6;
+ u8 cq_index;
+#endif
+};
+
+
+/*
+ * iSCSI Cleanup CQE
+ */
+struct bnx2i_cleanup_response {
+#if defined(__BIG_ENDIAN)
+ u8 op_code;
+ u8 status;
+ u16 reserved0;
+#elif defined(__LITTLE_ENDIAN)
+ u16 reserved0;
+ u8 status;
+ u8 op_code;
+#endif
+ u32 reserved1[3];
+ u32 reserved2[2];
+#if defined(__BIG_ENDIAN)
+ u16 reserved4;
+ u8 err_code;
+ u8 reserved3;
+#elif defined(__LITTLE_ENDIAN)
+ u8 reserved3;
+ u8 err_code;
+ u16 reserved4;
+#endif
+ u32 reserved5[7];
+#if defined(__BIG_ENDIAN)
+ u16 reserved6;
+ u16 itt;
+#define ISCSI_CLEANUP_RESPONSE_INDEX (0x3FFF<<0)
+#define ISCSI_CLEANUP_RESPONSE_INDEX_SHIFT 0
+#define ISCSI_CLEANUP_RESPONSE_TYPE (0x3<<14)
+#define ISCSI_CLEANUP_RESPONSE_TYPE_SHIFT 14
+#elif defined(__LITTLE_ENDIAN)
+ u16 itt;
+#define ISCSI_CLEANUP_RESPONSE_INDEX (0x3FFF<<0)
+#define ISCSI_CLEANUP_RESPONSE_INDEX_SHIFT 0
+#define ISCSI_CLEANUP_RESPONSE_TYPE (0x3<<14)
+#define ISCSI_CLEANUP_RESPONSE_TYPE_SHIFT 14
+ u16 reserved6;
+#endif
+ u32 cq_req_sn;
+};
+
+
+/*
+ * SCSI read/write SQ WQE
+ */
+struct bnx2i_cmd_request {
+#if defined(__BIG_ENDIAN)
+ u8 op_code;
+ u8 op_attr;
+#define ISCSI_CMD_REQUEST_TASK_ATTR (0x7<<0)
+#define ISCSI_CMD_REQUEST_TASK_ATTR_SHIFT 0
+#define ISCSI_CMD_REQUEST_RESERVED1 (0x3<<3)
+#define ISCSI_CMD_REQUEST_RESERVED1_SHIFT 3
+#define ISCSI_CMD_REQUEST_WRITE (0x1<<5)
+#define ISCSI_CMD_REQUEST_WRITE_SHIFT 5
+#define ISCSI_CMD_REQUEST_READ (0x1<<6)
+#define ISCSI_CMD_REQUEST_READ_SHIFT 6
+#define ISCSI_CMD_REQUEST_FINAL (0x1<<7)
+#define ISCSI_CMD_REQUEST_FINAL_SHIFT 7
+ u16 reserved0;
+#elif defined(__LITTLE_ENDIAN)
+ u16 reserved0;
+ u8 op_attr;
+#define ISCSI_CMD_REQUEST_TASK_ATTR (0x7<<0)
+#define ISCSI_CMD_REQUEST_TASK_ATTR_SHIFT 0
+#define ISCSI_CMD_REQUEST_RESERVED1 (0x3<<3)
+#define ISCSI_CMD_REQUEST_RESERVED1_SHIFT 3
+#define ISCSI_CMD_REQUEST_WRITE (0x1<<5)
+#define ISCSI_CMD_REQUEST_WRITE_SHIFT 5
+#define ISCSI_CMD_REQUEST_READ (0x1<<6)
+#define ISCSI_CMD_REQUEST_READ_SHIFT 6
+#define ISCSI_CMD_REQUEST_FINAL (0x1<<7)
+#define ISCSI_CMD_REQUEST_FINAL_SHIFT 7
+ u8 op_code;
+#endif
+#if defined(__BIG_ENDIAN)
+ u16 ud_buffer_offset;
+ u16 sd_buffer_offset;
+#elif defined(__LITTLE_ENDIAN)
+ u16 sd_buffer_offset;
+ u16 ud_buffer_offset;
+#endif
+ u32 lun[2];
+#if defined(__BIG_ENDIAN)
+ u16 reserved2;
+ u16 itt;
+#define ISCSI_CMD_REQUEST_INDEX (0x3FFF<<0)
+#define ISCSI_CMD_REQUEST_INDEX_SHIFT 0
+#define ISCSI_CMD_REQUEST_TYPE (0x3<<14)
+#define ISCSI_CMD_REQUEST_TYPE_SHIFT 14
+#elif defined(__LITTLE_ENDIAN)
+ u16 itt;
+#define ISCSI_CMD_REQUEST_INDEX (0x3FFF<<0)
+#define ISCSI_CMD_REQUEST_INDEX_SHIFT 0
+#define ISCSI_CMD_REQUEST_TYPE (0x3<<14)
+#define ISCSI_CMD_REQUEST_TYPE_SHIFT 14
+ u16 reserved2;
+#endif
+ u32 total_data_transfer_length;
+ u32 cmd_sn;
+ u32 reserved3;
+ u32 cdb[4];
+ u32 zero_fill;
+ u32 bd_list_addr_lo;
+ u32 bd_list_addr_hi;
+#if defined(__BIG_ENDIAN)
+ u8 cq_index;
+ u8 sd_start_bd_index;
+ u8 ud_start_bd_index;
+ u8 num_bds;
+#elif defined(__LITTLE_ENDIAN)
+ u8 num_bds;
+ u8 ud_start_bd_index;
+ u8 sd_start_bd_index;
+ u8 cq_index;
+#endif
+};
+
+
+/*
+ * task statistics for write response
+ */
+struct bnx2i_write_resp_task_stat {
+#if defined(__BIG_ENDIAN)
+ u16 num_r2ts;
+ u16 num_data_outs;
+#elif defined(__LITTLE_ENDIAN)
+ u16 num_data_outs;
+ u16 num_r2ts;
+#endif
+};
+
+/*
+ * task statistics for read response
+ */
+struct bnx2i_read_resp_task_stat {
+#if defined(__BIG_ENDIAN)
+ u16 reserved;
+ u16 num_data_ins;
+#elif defined(__LITTLE_ENDIAN)
+ u16 num_data_ins;
+ u16 reserved;
+#endif
+};
+
+/*
+ * task statistics for iSCSI cmd response
+ */
+union bnx2i_cmd_resp_task_stat {
+ struct bnx2i_write_resp_task_stat write_stat;
+ struct bnx2i_read_resp_task_stat read_stat;
+};
+
+/*
+ * SCSI Command CQE
+ */
+struct bnx2i_cmd_response {
+#if defined(__BIG_ENDIAN)
+ u8 op_code;
+ u8 response_flags;
+#define ISCSI_CMD_RESPONSE_RESERVED0 (0x1<<0)
+#define ISCSI_CMD_RESPONSE_RESERVED0_SHIFT 0
+#define ISCSI_CMD_RESPONSE_RESIDUAL_UNDERFLOW (0x1<<1)
+#define ISCSI_CMD_RESPONSE_RESIDUAL_UNDERFLOW_SHIFT 1
+#define ISCSI_CMD_RESPONSE_RESIDUAL_OVERFLOW (0x1<<2)
+#define ISCSI_CMD_RESPONSE_RESIDUAL_OVERFLOW_SHIFT 2
+#define ISCSI_CMD_RESPONSE_BR_RESIDUAL_UNDERFLOW (0x1<<3)
+#define ISCSI_CMD_RESPONSE_BR_RESIDUAL_UNDERFLOW_SHIFT 3
+#define ISCSI_CMD_RESPONSE_BR_RESIDUAL_OVERFLOW (0x1<<4)
+#define ISCSI_CMD_RESPONSE_BR_RESIDUAL_OVERFLOW_SHIFT 4
+#define ISCSI_CMD_RESPONSE_RESERVED1 (0x7<<5)
+#define ISCSI_CMD_RESPONSE_RESERVED1_SHIFT 5
+ u8 response;
+ u8 status;
+#elif defined(__LITTLE_ENDIAN)
+ u8 status;
+ u8 response;
+ u8 response_flags;
+#define ISCSI_CMD_RESPONSE_RESERVED0 (0x1<<0)
+#define ISCSI_CMD_RESPONSE_RESERVED0_SHIFT 0
+#define ISCSI_CMD_RESPONSE_RESIDUAL_UNDERFLOW (0x1<<1)
+#define ISCSI_CMD_RESPONSE_RESIDUAL_UNDERFLOW_SHIFT 1
+#define ISCSI_CMD_RESPONSE_RESIDUAL_OVERFLOW (0x1<<2)
+#define ISCSI_CMD_RESPONSE_RESIDUAL_OVERFLOW_SHIFT 2
+#define ISCSI_CMD_RESPONSE_BR_RESIDUAL_UNDERFLOW (0x1<<3)
+#define ISCSI_CMD_RESPONSE_BR_RESIDUAL_UNDERFLOW_SHIFT 3
+#define ISCSI_CMD_RESPONSE_BR_RESIDUAL_OVERFLOW (0x1<<4)
+#define ISCSI_CMD_RESPONSE_BR_RESIDUAL_OVERFLOW_SHIFT 4
+#define ISCSI_CMD_RESPONSE_RESERVED1 (0x7<<5)
+#define ISCSI_CMD_RESPONSE_RESERVED1_SHIFT 5
+ u8 op_code;
+#endif
+ u32 data_length;
+ u32 exp_cmd_sn;
+ u32 max_cmd_sn;
+ u32 reserved2;
+ u32 residual_count;
+#if defined(__BIG_ENDIAN)
+ u16 reserved4;
+ u8 err_code;
+ u8 reserved3;
+#elif defined(__LITTLE_ENDIAN)
+ u8 reserved3;
+ u8 err_code;
+ u16 reserved4;
+#endif
+ u32 reserved5[5];
+ union bnx2i_cmd_resp_task_stat task_stat;
+ u32 reserved6;
+#if defined(__BIG_ENDIAN)
+ u16 reserved7;
+ u16 itt;
+#define ISCSI_CMD_RESPONSE_INDEX (0x3FFF<<0)
+#define ISCSI_CMD_RESPONSE_INDEX_SHIFT 0
+#define ISCSI_CMD_RESPONSE_TYPE (0x3<<14)
+#define ISCSI_CMD_RESPONSE_TYPE_SHIFT 14
+#elif defined(__LITTLE_ENDIAN)
+ u16 itt;
+#define ISCSI_CMD_RESPONSE_INDEX (0x3FFF<<0)
+#define ISCSI_CMD_RESPONSE_INDEX_SHIFT 0
+#define ISCSI_CMD_RESPONSE_TYPE (0x3<<14)
+#define ISCSI_CMD_RESPONSE_TYPE_SHIFT 14
+ u16 reserved7;
+#endif
+ u32 cq_req_sn;
+};
+
+
+
+/*
+ * firmware middle-path request SQ WQE
+ */
+struct bnx2i_fw_mp_request {
+#if defined(__BIG_ENDIAN)
+ u8 op_code;
+ u8 op_attr;
+ u16 hdr_opaque1;
+#elif defined(__LITTLE_ENDIAN)
+ u16 hdr_opaque1;
+ u8 op_attr;
+ u8 op_code;
+#endif
+ u32 data_length;
+ u32 hdr_opaque2[2];
+#if defined(__BIG_ENDIAN)
+ u16 reserved0;
+ u16 itt;
+#define ISCSI_FW_MP_REQUEST_INDEX (0x3FFF<<0)
+#define ISCSI_FW_MP_REQUEST_INDEX_SHIFT 0
+#define ISCSI_FW_MP_REQUEST_TYPE (0x3<<14)
+#define ISCSI_FW_MP_REQUEST_TYPE_SHIFT 14
+#elif defined(__LITTLE_ENDIAN)
+ u16 itt;
+#define ISCSI_FW_MP_REQUEST_INDEX (0x3FFF<<0)
+#define ISCSI_FW_MP_REQUEST_INDEX_SHIFT 0
+#define ISCSI_FW_MP_REQUEST_TYPE (0x3<<14)
+#define ISCSI_FW_MP_REQUEST_TYPE_SHIFT 14
+ u16 reserved0;
+#endif
+ u32 hdr_opaque3[4];
+ u32 resp_bd_list_addr_lo;
+ u32 resp_bd_list_addr_hi;
+ u32 resp_buffer;
+#define ISCSI_FW_MP_REQUEST_RESP_BUFFER_LENGTH (0xFFFFFF<<0)
+#define ISCSI_FW_MP_REQUEST_RESP_BUFFER_LENGTH_SHIFT 0
+#define ISCSI_FW_MP_REQUEST_NUM_RESP_BDS (0xFF<<24)
+#define ISCSI_FW_MP_REQUEST_NUM_RESP_BDS_SHIFT 24
+#if defined(__BIG_ENDIAN)
+ u16 reserved4;
+ u8 reserved3;
+ u8 flags;
+#define ISCSI_FW_MP_REQUEST_RESERVED1 (0x1<<0)
+#define ISCSI_FW_MP_REQUEST_RESERVED1_SHIFT 0
+#define ISCSI_FW_MP_REQUEST_LOCAL_COMPLETION (0x1<<1)
+#define ISCSI_FW_MP_REQUEST_LOCAL_COMPLETION_SHIFT 1
+#define ISCSI_FW_MP_REQUEST_UPDATE_EXP_STAT_SN (0x1<<2)
+#define ISCSI_FW_MP_REQUEST_UPDATE_EXP_STAT_SN_SHIFT 2
+#define ISCSI_FW_MP_REQUEST_RESERVED2 (0x1F<<3)
+#define ISCSI_FW_MP_REQUEST_RESERVED2_SHIFT 3
+#elif defined(__LITTLE_ENDIAN)
+ u8 flags;
+#define ISCSI_FW_MP_REQUEST_RESERVED1 (0x1<<0)
+#define ISCSI_FW_MP_REQUEST_RESERVED1_SHIFT 0
+#define ISCSI_FW_MP_REQUEST_LOCAL_COMPLETION (0x1<<1)
+#define ISCSI_FW_MP_REQUEST_LOCAL_COMPLETION_SHIFT 1
+#define ISCSI_FW_MP_REQUEST_UPDATE_EXP_STAT_SN (0x1<<2)
+#define ISCSI_FW_MP_REQUEST_UPDATE_EXP_STAT_SN_SHIFT 2
+#define ISCSI_FW_MP_REQUEST_RESERVED2 (0x1F<<3)
+#define ISCSI_FW_MP_REQUEST_RESERVED2_SHIFT 3
+ u8 reserved3;
+ u16 reserved4;
+#endif
+ u32 bd_list_addr_lo;
+ u32 bd_list_addr_hi;
+#if defined(__BIG_ENDIAN)
+ u8 cq_index;
+ u8 reserved6;
+ u8 reserved5;
+ u8 num_bds;
+#elif defined(__LITTLE_ENDIAN)
+ u8 num_bds;
+ u8 reserved5;
+ u8 reserved6;
+ u8 cq_index;
+#endif
+};
+
+
+/*
+ * firmware response - CQE: used only by firmware
+ */
+struct bnx2i_fw_response {
+ u32 hdr_dword1[2];
+ u32 hdr_exp_cmd_sn;
+ u32 hdr_max_cmd_sn;
+ u32 hdr_ttt;
+ u32 hdr_res_cnt;
+ u32 cqe_flags;
+#define ISCSI_FW_RESPONSE_RESERVED2 (0xFF<<0)
+#define ISCSI_FW_RESPONSE_RESERVED2_SHIFT 0
+#define ISCSI_FW_RESPONSE_ERR_CODE (0xFF<<8)
+#define ISCSI_FW_RESPONSE_ERR_CODE_SHIFT 8
+#define ISCSI_FW_RESPONSE_RESERVED3 (0xFFFF<<16)
+#define ISCSI_FW_RESPONSE_RESERVED3_SHIFT 16
+ u32 stat_sn;
+ u32 hdr_dword2[2];
+ u32 hdr_dword3[2];
+ u32 task_stat;
+ u32 reserved0;
+ u32 hdr_itt;
+ u32 cq_req_sn;
+};
+
+
+/*
+ * iSCSI KCQ CQE parameters
+ */
+union iscsi_kcqe_params {
+ u32 reserved0[4];
+};
+
+/*
+ * iSCSI KCQ CQE
+ */
+struct iscsi_kcqe {
+ u32 iscsi_conn_id;
+ u32 completion_status;
+ u32 iscsi_conn_context_id;
+ union iscsi_kcqe_params params;
+#if defined(__BIG_ENDIAN)
+ u8 flags;
+#define ISCSI_KCQE_RESERVED0 (0xF<<0)
+#define ISCSI_KCQE_RESERVED0_SHIFT 0
+#define ISCSI_KCQE_LAYER_CODE (0x7<<4)
+#define ISCSI_KCQE_LAYER_CODE_SHIFT 4
+#define ISCSI_KCQE_RESERVED1 (0x1<<7)
+#define ISCSI_KCQE_RESERVED1_SHIFT 7
+ u8 op_code;
+ u16 qe_self_seq;
+#elif defined(__LITTLE_ENDIAN)
+ u16 qe_self_seq;
+ u8 op_code;
+ u8 flags;
+#define ISCSI_KCQE_RESERVED0 (0xF<<0)
+#define ISCSI_KCQE_RESERVED0_SHIFT 0
+#define ISCSI_KCQE_LAYER_CODE (0x7<<4)
+#define ISCSI_KCQE_LAYER_CODE_SHIFT 4
+#define ISCSI_KCQE_RESERVED1 (0x1<<7)
+#define ISCSI_KCQE_RESERVED1_SHIFT 7
+#endif
+};
+
+
+
+/*
+ * iSCSI KWQE header
+ */
+struct iscsi_kwqe_header {
+#if defined(__BIG_ENDIAN)
+ u8 flags;
+#define ISCSI_KWQE_HEADER_RESERVED0 (0xF<<0)
+#define ISCSI_KWQE_HEADER_RESERVED0_SHIFT 0
+#define ISCSI_KWQE_HEADER_LAYER_CODE (0x7<<4)
+#define ISCSI_KWQE_HEADER_LAYER_CODE_SHIFT 4
+#define ISCSI_KWQE_HEADER_RESERVED1 (0x1<<7)
+#define ISCSI_KWQE_HEADER_RESERVED1_SHIFT 7
+ u8 op_code;
+#elif defined(__LITTLE_ENDIAN)
+ u8 op_code;
+ u8 flags;
+#define ISCSI_KWQE_HEADER_RESERVED0 (0xF<<0)
+#define ISCSI_KWQE_HEADER_RESERVED0_SHIFT 0
+#define ISCSI_KWQE_HEADER_LAYER_CODE (0x7<<4)
+#define ISCSI_KWQE_HEADER_LAYER_CODE_SHIFT 4
+#define ISCSI_KWQE_HEADER_RESERVED1 (0x1<<7)
+#define ISCSI_KWQE_HEADER_RESERVED1_SHIFT 7
+#endif
+};
+
+/*
+ * iSCSI firmware init request 1
+ */
+struct iscsi_kwqe_init1 {
+#if defined(__BIG_ENDIAN)
+ struct iscsi_kwqe_header hdr;
+ u8 reserved0;
+ u8 num_cqs;
+#elif defined(__LITTLE_ENDIAN)
+ u8 num_cqs;
+ u8 reserved0;
+ struct iscsi_kwqe_header hdr;
+#endif
+ u32 dummy_buffer_addr_lo;
+ u32 dummy_buffer_addr_hi;
+#if defined(__BIG_ENDIAN)
+ u16 num_ccells_per_conn;
+ u16 num_tasks_per_conn;
+#elif defined(__LITTLE_ENDIAN)
+ u16 num_tasks_per_conn;
+ u16 num_ccells_per_conn;
+#endif
+#if defined(__BIG_ENDIAN)
+ u16 sq_wqes_per_page;
+ u16 sq_num_wqes;
+#elif defined(__LITTLE_ENDIAN)
+ u16 sq_num_wqes;
+ u16 sq_wqes_per_page;
+#endif
+#if defined(__BIG_ENDIAN)
+ u8 cq_log_wqes_per_page;
+ u8 flags;
+#define ISCSI_KWQE_INIT1_PAGE_SIZE (0xF<<0)
+#define ISCSI_KWQE_INIT1_PAGE_SIZE_SHIFT 0
+#define ISCSI_KWQE_INIT1_DELAYED_ACK_ENABLE (0x1<<4)
+#define ISCSI_KWQE_INIT1_DELAYED_ACK_ENABLE_SHIFT 4
+#define ISCSI_KWQE_INIT1_KEEP_ALIVE_ENABLE (0x1<<5)
+#define ISCSI_KWQE_INIT1_KEEP_ALIVE_ENABLE_SHIFT 5
+#define ISCSI_KWQE_INIT1_TIME_STAMPS_ENABLE (0x1<<6)
+#define ISCSI_KWQE_INIT1_TIME_STAMPS_ENABLE_SHIFT 6
+#define ISCSI_KWQE_INIT1_RESERVED1 (0x1<<7)
+#define ISCSI_KWQE_INIT1_RESERVED1_SHIFT 7
+ u16 cq_num_wqes;
+#elif defined(__LITTLE_ENDIAN)
+ u16 cq_num_wqes;
+ u8 flags;
+#define ISCSI_KWQE_INIT1_PAGE_SIZE (0xF<<0)
+#define ISCSI_KWQE_INIT1_PAGE_SIZE_SHIFT 0
+#define ISCSI_KWQE_INIT1_DELAYED_ACK_ENABLE (0x1<<4)
+#define ISCSI_KWQE_INIT1_DELAYED_ACK_ENABLE_SHIFT 4
+#define ISCSI_KWQE_INIT1_KEEP_ALIVE_ENABLE (0x1<<5)
+#define ISCSI_KWQE_INIT1_KEEP_ALIVE_ENABLE_SHIFT 5
+#define ISCSI_KWQE_INIT1_TIME_STAMPS_ENABLE (0x1<<6)
+#define ISCSI_KWQE_INIT1_TIME_STAMPS_ENABLE_SHIFT 6
+#define ISCSI_KWQE_INIT1_RESERVED1 (0x1<<7)
+#define ISCSI_KWQE_INIT1_RESERVED1_SHIFT 7
+ u8 cq_log_wqes_per_page;
+#endif
+#if defined(__BIG_ENDIAN)
+ u16 cq_num_pages;
+ u16 sq_num_pages;
+#elif defined(__LITTLE_ENDIAN)
+ u16 sq_num_pages;
+ u16 cq_num_pages;
+#endif
+#if defined(__BIG_ENDIAN)
+ u16 rq_buffer_size;
+ u16 rq_num_wqes;
+#elif defined(__LITTLE_ENDIAN)
+ u16 rq_num_wqes;
+ u16 rq_buffer_size;
+#endif
+};
+
+/*
+ * iSCSI firmware init request 2
+ */
+struct iscsi_kwqe_init2 {
+#if defined(__BIG_ENDIAN)
+ struct iscsi_kwqe_header hdr;
+ u16 max_cq_sqn;
+#elif defined(__LITTLE_ENDIAN)
+ u16 max_cq_sqn;
+ struct iscsi_kwqe_header hdr;
+#endif
+ u32 error_bit_map[2];
+ u32 reserved1[5];
+};
+
+/*
+ * Initial iSCSI connection offload request 1
+ */
+struct iscsi_kwqe_conn_offload1 {
+#if defined(__BIG_ENDIAN)
+ struct iscsi_kwqe_header hdr;
+ u16 iscsi_conn_id;
+#elif defined(__LITTLE_ENDIAN)
+ u16 iscsi_conn_id;
+ struct iscsi_kwqe_header hdr;
+#endif
+ u32 sq_page_table_addr_lo;
+ u32 sq_page_table_addr_hi;
+ u32 cq_page_table_addr_lo;
+ u32 cq_page_table_addr_hi;
+ u32 reserved0[3];
+};
+
+/*
+ * iSCSI Page Table Entry (PTE)
+ */
+struct iscsi_pte {
+ u32 hi;
+ u32 lo;
+};
+
+/*
+ * Initial iSCSI connection offload request 2
+ */
+struct iscsi_kwqe_conn_offload2 {
+#if defined(__BIG_ENDIAN)
+ struct iscsi_kwqe_header hdr;
+ u16 reserved0;
+#elif defined(__LITTLE_ENDIAN)
+ u16 reserved0;
+ struct iscsi_kwqe_header hdr;
+#endif
+ u32 rq_page_table_addr_lo;
+ u32 rq_page_table_addr_hi;
+ struct iscsi_pte sq_first_pte;
+ struct iscsi_pte cq_first_pte;
+ u32 num_additional_wqes;
+};
+
+
+/*
+ * Initial iSCSI connection offload request 3
+ */
+struct iscsi_kwqe_conn_offload3 {
+#if defined(__BIG_ENDIAN)
+ struct iscsi_kwqe_header hdr;
+ u16 reserved0;
+#elif defined(__LITTLE_ENDIAN)
+ u16 reserved0;
+ struct iscsi_kwqe_header hdr;
+#endif
+ u32 reserved1;
+ struct iscsi_pte qp_first_pte[3];
+};
+
+
+/*
+ * iSCSI connection update request
+ */
+struct iscsi_kwqe_conn_update {
+#if defined(__BIG_ENDIAN)
+ struct iscsi_kwqe_header hdr;
+ u16 reserved0;
+#elif defined(__LITTLE_ENDIAN)
+ u16 reserved0;
+ struct iscsi_kwqe_header hdr;
+#endif
+#if defined(__BIG_ENDIAN)
+ u8 session_error_recovery_level;
+ u8 max_outstanding_r2ts;
+ u8 reserved2;
+ u8 conn_flags;
+#define ISCSI_KWQE_CONN_UPDATE_HEADER_DIGEST (0x1<<0)
+#define ISCSI_KWQE_CONN_UPDATE_HEADER_DIGEST_SHIFT 0
+#define ISCSI_KWQE_CONN_UPDATE_DATA_DIGEST (0x1<<1)
+#define ISCSI_KWQE_CONN_UPDATE_DATA_DIGEST_SHIFT 1
+#define ISCSI_KWQE_CONN_UPDATE_INITIAL_R2T (0x1<<2)
+#define ISCSI_KWQE_CONN_UPDATE_INITIAL_R2T_SHIFT 2
+#define ISCSI_KWQE_CONN_UPDATE_IMMEDIATE_DATA (0x1<<3)
+#define ISCSI_KWQE_CONN_UPDATE_IMMEDIATE_DATA_SHIFT 3
+#define ISCSI_KWQE_CONN_UPDATE_OOO_SUPPORT_MODE (0x3<<4)
+#define ISCSI_KWQE_CONN_UPDATE_OOO_SUPPORT_MODE_SHIFT 4
+#define ISCSI_KWQE_CONN_UPDATE_RESERVED1 (0x3<<6)
+#define ISCSI_KWQE_CONN_UPDATE_RESERVED1_SHIFT 6
+#elif defined(__LITTLE_ENDIAN)
+ u8 conn_flags;
+#define ISCSI_KWQE_CONN_UPDATE_HEADER_DIGEST (0x1<<0)
+#define ISCSI_KWQE_CONN_UPDATE_HEADER_DIGEST_SHIFT 0
+#define ISCSI_KWQE_CONN_UPDATE_DATA_DIGEST (0x1<<1)
+#define ISCSI_KWQE_CONN_UPDATE_DATA_DIGEST_SHIFT 1
+#define ISCSI_KWQE_CONN_UPDATE_INITIAL_R2T (0x1<<2)
+#define ISCSI_KWQE_CONN_UPDATE_INITIAL_R2T_SHIFT 2
+#define ISCSI_KWQE_CONN_UPDATE_IMMEDIATE_DATA (0x1<<3)
+#define ISCSI_KWQE_CONN_UPDATE_IMMEDIATE_DATA_SHIFT 3
+#define ISCSI_KWQE_CONN_UPDATE_OOO_SUPPORT_MODE (0x3<<4)
+#define ISCSI_KWQE_CONN_UPDATE_OOO_SUPPORT_MODE_SHIFT 4
+#define ISCSI_KWQE_CONN_UPDATE_RESERVED1 (0x3<<6)
+#define ISCSI_KWQE_CONN_UPDATE_RESERVED1_SHIFT 6
+ u8 reserved2;
+ u8 max_outstanding_r2ts;
+ u8 session_error_recovery_level;
+#endif
+ u32 context_id;
+ u32 max_send_pdu_length;
+ u32 max_recv_pdu_length;
+ u32 first_burst_length;
+ u32 max_burst_length;
+ u32 exp_stat_sn;
+};
+
+/*
+ * iSCSI destroy connection request
+ */
+struct iscsi_kwqe_conn_destroy {
+#if defined(__BIG_ENDIAN)
+ struct iscsi_kwqe_header hdr;
+ u16 reserved0;
+#elif defined(__LITTLE_ENDIAN)
+ u16 reserved0;
+ struct iscsi_kwqe_header hdr;
+#endif
+ u32 context_id;
+ u32 reserved1[6];
+};
+
+/*
+ * iSCSI KWQ WQE
+ */
+union iscsi_kwqe {
+ struct iscsi_kwqe_init1 init1;
+ struct iscsi_kwqe_init2 init2;
+ struct iscsi_kwqe_conn_offload1 conn_offload1;
+ struct iscsi_kwqe_conn_offload2 conn_offload2;
+ struct iscsi_kwqe_conn_update conn_update;
+ struct iscsi_kwqe_conn_destroy conn_destroy;
+};
+
+/*
+ * iSCSI Login SQ WQE
+ */
+struct bnx2i_login_request {
+#if defined(__BIG_ENDIAN)
+ u8 op_code;
+ u8 op_attr;
+#define ISCSI_LOGIN_REQUEST_NEXT_STAGE (0x3<<0)
+#define ISCSI_LOGIN_REQUEST_NEXT_STAGE_SHIFT 0
+#define ISCSI_LOGIN_REQUEST_CURRENT_STAGE (0x3<<2)
+#define ISCSI_LOGIN_REQUEST_CURRENT_STAGE_SHIFT 2
+#define ISCSI_LOGIN_REQUEST_RESERVED0 (0x3<<4)
+#define ISCSI_LOGIN_REQUEST_RESERVED0_SHIFT 4
+#define ISCSI_LOGIN_REQUEST_CONT (0x1<<6)
+#define ISCSI_LOGIN_REQUEST_CONT_SHIFT 6
+#define ISCSI_LOGIN_REQUEST_TRANSIT (0x1<<7)
+#define ISCSI_LOGIN_REQUEST_TRANSIT_SHIFT 7
+ u8 version_max;
+ u8 version_min;
+#elif defined(__LITTLE_ENDIAN)
+ u8 version_min;
+ u8 version_max;
+ u8 op_attr;
+#define ISCSI_LOGIN_REQUEST_NEXT_STAGE (0x3<<0)
+#define ISCSI_LOGIN_REQUEST_NEXT_STAGE_SHIFT 0
+#define ISCSI_LOGIN_REQUEST_CURRENT_STAGE (0x3<<2)
+#define ISCSI_LOGIN_REQUEST_CURRENT_STAGE_SHIFT 2
+#define ISCSI_LOGIN_REQUEST_RESERVED0 (0x3<<4)
+#define ISCSI_LOGIN_REQUEST_RESERVED0_SHIFT 4
+#define ISCSI_LOGIN_REQUEST_CONT (0x1<<6)
+#define ISCSI_LOGIN_REQUEST_CONT_SHIFT 6
+#define ISCSI_LOGIN_REQUEST_TRANSIT (0x1<<7)
+#define ISCSI_LOGIN_REQUEST_TRANSIT_SHIFT 7
+ u8 op_code;
+#endif
+ u32 data_length;
+ u32 isid_lo;
+#if defined(__BIG_ENDIAN)
+ u16 isid_hi;
+ u16 tsih;
+#elif defined(__LITTLE_ENDIAN)
+ u16 tsih;
+ u16 isid_hi;
+#endif
+#if defined(__BIG_ENDIAN)
+ u16 reserved2;
+ u16 itt;
+#define ISCSI_LOGIN_REQUEST_INDEX (0x3FFF<<0)
+#define ISCSI_LOGIN_REQUEST_INDEX_SHIFT 0
+#define ISCSI_LOGIN_REQUEST_TYPE (0x3<<14)
+#define ISCSI_LOGIN_REQUEST_TYPE_SHIFT 14
+#elif defined(__LITTLE_ENDIAN)
+ u16 itt;
+#define ISCSI_LOGIN_REQUEST_INDEX (0x3FFF<<0)
+#define ISCSI_LOGIN_REQUEST_INDEX_SHIFT 0
+#define ISCSI_LOGIN_REQUEST_TYPE (0x3<<14)
+#define ISCSI_LOGIN_REQUEST_TYPE_SHIFT 14
+ u16 reserved2;
+#endif
+#if defined(__BIG_ENDIAN)
+ u16 cid;
+ u16 reserved3;
+#elif defined(__LITTLE_ENDIAN)
+ u16 reserved3;
+ u16 cid;
+#endif
+ u32 cmd_sn;
+ u32 exp_stat_sn;
+ u32 reserved4;
+ u32 resp_bd_list_addr_lo;
+ u32 resp_bd_list_addr_hi;
+ u32 resp_buffer;
+#define ISCSI_LOGIN_REQUEST_RESP_BUFFER_LENGTH (0xFFFFFF<<0)
+#define ISCSI_LOGIN_REQUEST_RESP_BUFFER_LENGTH_SHIFT 0
+#define ISCSI_LOGIN_REQUEST_NUM_RESP_BDS (0xFF<<24)
+#define ISCSI_LOGIN_REQUEST_NUM_RESP_BDS_SHIFT 24
+#if defined(__BIG_ENDIAN)
+ u16 reserved8;
+ u8 reserved7;
+ u8 flags;
+#define ISCSI_LOGIN_REQUEST_RESERVED5 (0x3<<0)
+#define ISCSI_LOGIN_REQUEST_RESERVED5_SHIFT 0
+#define ISCSI_LOGIN_REQUEST_UPDATE_EXP_STAT_SN (0x1<<2)
+#define ISCSI_LOGIN_REQUEST_UPDATE_EXP_STAT_SN_SHIFT 2
+#define ISCSI_LOGIN_REQUEST_RESERVED6 (0x1F<<3)
+#define ISCSI_LOGIN_REQUEST_RESERVED6_SHIFT 3
+#elif defined(__LITTLE_ENDIAN)
+ u8 flags;
+#define ISCSI_LOGIN_REQUEST_RESERVED5 (0x3<<0)
+#define ISCSI_LOGIN_REQUEST_RESERVED5_SHIFT 0
+#define ISCSI_LOGIN_REQUEST_UPDATE_EXP_STAT_SN (0x1<<2)
+#define ISCSI_LOGIN_REQUEST_UPDATE_EXP_STAT_SN_SHIFT 2
+#define ISCSI_LOGIN_REQUEST_RESERVED6 (0x1F<<3)
+#define ISCSI_LOGIN_REQUEST_RESERVED6_SHIFT 3
+ u8 reserved7;
+ u16 reserved8;
+#endif
+ u32 bd_list_addr_lo;
+ u32 bd_list_addr_hi;
+#if defined(__BIG_ENDIAN)
+ u8 cq_index;
+ u8 reserved10;
+ u8 reserved9;
+ u8 num_bds;
+#elif defined(__LITTLE_ENDIAN)
+ u8 num_bds;
+ u8 reserved9;
+ u8 reserved10;
+ u8 cq_index;
+#endif
+};
+
+
+/*
+ * iSCSI Login CQE
+ */
+struct bnx2i_login_response {
+#if defined(__BIG_ENDIAN)
+ u8 op_code;
+ u8 response_flags;
+#define ISCSI_LOGIN_RESPONSE_NEXT_STAGE (0x3<<0)
+#define ISCSI_LOGIN_RESPONSE_NEXT_STAGE_SHIFT 0
+#define ISCSI_LOGIN_RESPONSE_CURRENT_STAGE (0x3<<2)
+#define ISCSI_LOGIN_RESPONSE_CURRENT_STAGE_SHIFT 2
+#define ISCSI_LOGIN_RESPONSE_RESERVED0 (0x3<<4)
+#define ISCSI_LOGIN_RESPONSE_RESERVED0_SHIFT 4
+#define ISCSI_LOGIN_RESPONSE_CONT (0x1<<6)
+#define ISCSI_LOGIN_RESPONSE_CONT_SHIFT 6
+#define ISCSI_LOGIN_RESPONSE_TRANSIT (0x1<<7)
+#define ISCSI_LOGIN_RESPONSE_TRANSIT_SHIFT 7
+ u8 version_max;
+ u8 version_active;
+#elif defined(__LITTLE_ENDIAN)
+ u8 version_active;
+ u8 version_max;
+ u8 response_flags;
+#define ISCSI_LOGIN_RESPONSE_NEXT_STAGE (0x3<<0)
+#define ISCSI_LOGIN_RESPONSE_NEXT_STAGE_SHIFT 0
+#define ISCSI_LOGIN_RESPONSE_CURRENT_STAGE (0x3<<2)
+#define ISCSI_LOGIN_RESPONSE_CURRENT_STAGE_SHIFT 2
+#define ISCSI_LOGIN_RESPONSE_RESERVED0 (0x3<<4)
+#define ISCSI_LOGIN_RESPONSE_RESERVED0_SHIFT 4
+#define ISCSI_LOGIN_RESPONSE_CONT (0x1<<6)
+#define ISCSI_LOGIN_RESPONSE_CONT_SHIFT 6
+#define ISCSI_LOGIN_RESPONSE_TRANSIT (0x1<<7)
+#define ISCSI_LOGIN_RESPONSE_TRANSIT_SHIFT 7
+ u8 op_code;
+#endif
+ u32 data_length;
+ u32 exp_cmd_sn;
+ u32 max_cmd_sn;
+ u32 reserved1[2];
+#if defined(__BIG_ENDIAN)
+ u16 reserved3;
+ u8 err_code;
+ u8 reserved2;
+#elif defined(__LITTLE_ENDIAN)
+ u8 reserved2;
+ u8 err_code;
+ u16 reserved3;
+#endif
+ u32 stat_sn;
+ u32 isid_lo;
+#if defined(__BIG_ENDIAN)
+ u16 isid_hi;
+ u16 tsih;
+#elif defined(__LITTLE_ENDIAN)
+ u16 tsih;
+ u16 isid_hi;
+#endif
+#if defined(__BIG_ENDIAN)
+ u8 status_class;
+ u8 status_detail;
+ u16 reserved4;
+#elif defined(__LITTLE_ENDIAN)
+ u16 reserved4;
+ u8 status_detail;
+ u8 status_class;
+#endif
+ u32 reserved5[3];
+#if defined(__BIG_ENDIAN)
+ u16 reserved6;
+ u16 itt;
+#define ISCSI_LOGIN_RESPONSE_INDEX (0x3FFF<<0)
+#define ISCSI_LOGIN_RESPONSE_INDEX_SHIFT 0
+#define ISCSI_LOGIN_RESPONSE_TYPE (0x3<<14)
+#define ISCSI_LOGIN_RESPONSE_TYPE_SHIFT 14
+#elif defined(__LITTLE_ENDIAN)
+ u16 itt;
+#define ISCSI_LOGIN_RESPONSE_INDEX (0x3FFF<<0)
+#define ISCSI_LOGIN_RESPONSE_INDEX_SHIFT 0
+#define ISCSI_LOGIN_RESPONSE_TYPE (0x3<<14)
+#define ISCSI_LOGIN_RESPONSE_TYPE_SHIFT 14
+ u16 reserved6;
+#endif
+ u32 cq_req_sn;
+};
+
+
+/*
+ * iSCSI Logout SQ WQE
+ */
+struct bnx2i_logout_request {
+#if defined(__BIG_ENDIAN)
+ u8 op_code;
+ u8 op_attr;
+#define ISCSI_LOGOUT_REQUEST_REASON (0x7F<<0)
+#define ISCSI_LOGOUT_REQUEST_REASON_SHIFT 0
+#define ISCSI_LOGOUT_REQUEST_ALWAYS_ONE (0x1<<7)
+#define ISCSI_LOGOUT_REQUEST_ALWAYS_ONE_SHIFT 7
+ u16 reserved0;
+#elif defined(__LITTLE_ENDIAN)
+ u16 reserved0;
+ u8 op_attr;
+#define ISCSI_LOGOUT_REQUEST_REASON (0x7F<<0)
+#define ISCSI_LOGOUT_REQUEST_REASON_SHIFT 0
+#define ISCSI_LOGOUT_REQUEST_ALWAYS_ONE (0x1<<7)
+#define ISCSI_LOGOUT_REQUEST_ALWAYS_ONE_SHIFT 7
+ u8 op_code;
+#endif
+ u32 data_length;
+ u32 reserved1[2];
+#if defined(__BIG_ENDIAN)
+ u16 reserved2;
+ u16 itt;
+#define ISCSI_LOGOUT_REQUEST_INDEX (0x3FFF<<0)
+#define ISCSI_LOGOUT_REQUEST_INDEX_SHIFT 0
+#define ISCSI_LOGOUT_REQUEST_TYPE (0x3<<14)
+#define ISCSI_LOGOUT_REQUEST_TYPE_SHIFT 14
+#elif defined(__LITTLE_ENDIAN)
+ u16 itt;
+#define ISCSI_LOGOUT_REQUEST_INDEX (0x3FFF<<0)
+#define ISCSI_LOGOUT_REQUEST_INDEX_SHIFT 0
+#define ISCSI_LOGOUT_REQUEST_TYPE (0x3<<14)
+#define ISCSI_LOGOUT_REQUEST_TYPE_SHIFT 14
+ u16 reserved2;
+#endif
+#if defined(__BIG_ENDIAN)
+ u16 cid;
+ u16 reserved3;
+#elif defined(__LITTLE_ENDIAN)
+ u16 reserved3;
+ u16 cid;
+#endif
+ u32 cmd_sn;
+ u32 reserved4[5];
+ u32 zero_fill;
+ u32 bd_list_addr_lo;
+ u32 bd_list_addr_hi;
+#if defined(__BIG_ENDIAN)
+ u8 cq_index;
+ u8 reserved6;
+ u8 reserved5;
+ u8 num_bds;
+#elif defined(__LITTLE_ENDIAN)
+ u8 num_bds;
+ u8 reserved5;
+ u8 reserved6;
+ u8 cq_index;
+#endif
+};
+
+
+/*
+ * iSCSI Logout CQE
+ */
+struct bnx2i_logout_response {
+#if defined(__BIG_ENDIAN)
+ u8 op_code;
+ u8 reserved1;
+ u8 response;
+ u8 reserved0;
+#elif defined(__LITTLE_ENDIAN)
+ u8 reserved0;
+ u8 response;
+ u8 reserved1;
+ u8 op_code;
+#endif
+ u32 reserved2;
+ u32 exp_cmd_sn;
+ u32 max_cmd_sn;
+ u32 reserved3[2];
+#if defined(__BIG_ENDIAN)
+ u16 reserved5;
+ u8 err_code;
+ u8 reserved4;
+#elif defined(__LITTLE_ENDIAN)
+ u8 reserved4;
+ u8 err_code;
+ u16 reserved5;
+#endif
+ u32 reserved6[3];
+#if defined(__BIG_ENDIAN)
+ u16 time_to_wait;
+ u16 time_to_retain;
+#elif defined(__LITTLE_ENDIAN)
+ u16 time_to_retain;
+ u16 time_to_wait;
+#endif
+ u32 reserved7[3];
+#if defined(__BIG_ENDIAN)
+ u16 reserved8;
+ u16 itt;
+#define ISCSI_LOGOUT_RESPONSE_INDEX (0x3FFF<<0)
+#define ISCSI_LOGOUT_RESPONSE_INDEX_SHIFT 0
+#define ISCSI_LOGOUT_RESPONSE_TYPE (0x3<<14)
+#define ISCSI_LOGOUT_RESPONSE_TYPE_SHIFT 14
+#elif defined(__LITTLE_ENDIAN)
+ u16 itt;
+#define ISCSI_LOGOUT_RESPONSE_INDEX (0x3FFF<<0)
+#define ISCSI_LOGOUT_RESPONSE_INDEX_SHIFT 0
+#define ISCSI_LOGOUT_RESPONSE_TYPE (0x3<<14)
+#define ISCSI_LOGOUT_RESPONSE_TYPE_SHIFT 14
+ u16 reserved8;
+#endif
+ u32 cq_req_sn;
+};
+
+
+/*
+ * iSCSI Nop-In CQE
+ */
+struct bnx2i_nop_in_msg {
+#if defined(__BIG_ENDIAN)
+ u8 op_code;
+ u8 reserved1;
+ u16 reserved0;
+#elif defined(__LITTLE_ENDIAN)
+ u16 reserved0;
+ u8 reserved1;
+ u8 op_code;
+#endif
+ u32 data_length;
+ u32 exp_cmd_sn;
+ u32 max_cmd_sn;
+ u32 ttt;
+ u32 reserved2;
+#if defined(__BIG_ENDIAN)
+ u16 reserved4;
+ u8 err_code;
+ u8 reserved3;
+#elif defined(__LITTLE_ENDIAN)
+ u8 reserved3;
+ u8 err_code;
+ u16 reserved4;
+#endif
+ u32 reserved5;
+ u32 lun[2];
+ u32 reserved6[4];
+#if defined(__BIG_ENDIAN)
+ u16 reserved7;
+ u16 itt;
+#define ISCSI_NOP_IN_MSG_INDEX (0x3FFF<<0)
+#define ISCSI_NOP_IN_MSG_INDEX_SHIFT 0
+#define ISCSI_NOP_IN_MSG_TYPE (0x3<<14)
+#define ISCSI_NOP_IN_MSG_TYPE_SHIFT 14
+#elif defined(__LITTLE_ENDIAN)
+ u16 itt;
+#define ISCSI_NOP_IN_MSG_INDEX (0x3FFF<<0)
+#define ISCSI_NOP_IN_MSG_INDEX_SHIFT 0
+#define ISCSI_NOP_IN_MSG_TYPE (0x3<<14)
+#define ISCSI_NOP_IN_MSG_TYPE_SHIFT 14
+ u16 reserved7;
+#endif
+ u32 cq_req_sn;
+};
+
+
+/*
+ * iSCSI NOP-OUT SQ WQE
+ */
+struct bnx2i_nop_out_request {
+#if defined(__BIG_ENDIAN)
+ u8 op_code;
+ u8 op_attr;
+#define ISCSI_NOP_OUT_REQUEST_RESERVED1 (0x7F<<0)
+#define ISCSI_NOP_OUT_REQUEST_RESERVED1_SHIFT 0
+#define ISCSI_NOP_OUT_REQUEST_ALWAYS_ONE (0x1<<7)
+#define ISCSI_NOP_OUT_REQUEST_ALWAYS_ONE_SHIFT 7
+ u16 reserved0;
+#elif defined(__LITTLE_ENDIAN)
+ u16 reserved0;
+ u8 op_attr;
+#define ISCSI_NOP_OUT_REQUEST_RESERVED1 (0x7F<<0)
+#define ISCSI_NOP_OUT_REQUEST_RESERVED1_SHIFT 0
+#define ISCSI_NOP_OUT_REQUEST_ALWAYS_ONE (0x1<<7)
+#define ISCSI_NOP_OUT_REQUEST_ALWAYS_ONE_SHIFT 7
+ u8 op_code;
+#endif
+ u32 data_length;
+ u32 lun[2];
+#if defined(__BIG_ENDIAN)
+ u16 reserved2;
+ u16 itt;
+#define ISCSI_NOP_OUT_REQUEST_INDEX (0x3FFF<<0)
+#define ISCSI_NOP_OUT_REQUEST_INDEX_SHIFT 0
+#define ISCSI_NOP_OUT_REQUEST_TYPE (0x3<<14)
+#define ISCSI_NOP_OUT_REQUEST_TYPE_SHIFT 14
+#elif defined(__LITTLE_ENDIAN)
+ u16 itt;
+#define ISCSI_NOP_OUT_REQUEST_INDEX (0x3FFF<<0)
+#define ISCSI_NOP_OUT_REQUEST_INDEX_SHIFT 0
+#define ISCSI_NOP_OUT_REQUEST_TYPE (0x3<<14)
+#define ISCSI_NOP_OUT_REQUEST_TYPE_SHIFT 14
+ u16 reserved2;
+#endif
+ u32 ttt;
+ u32 cmd_sn;
+ u32 reserved3[2];
+ u32 resp_bd_list_addr_lo;
+ u32 resp_bd_list_addr_hi;
+ u32 resp_buffer;
+#define ISCSI_NOP_OUT_REQUEST_RESP_BUFFER_LENGTH (0xFFFFFF<<0)
+#define ISCSI_NOP_OUT_REQUEST_RESP_BUFFER_LENGTH_SHIFT 0
+#define ISCSI_NOP_OUT_REQUEST_NUM_RESP_BDS (0xFF<<24)
+#define ISCSI_NOP_OUT_REQUEST_NUM_RESP_BDS_SHIFT 24
+#if defined(__BIG_ENDIAN)
+ u16 reserved7;
+ u8 reserved6;
+ u8 flags;
+#define ISCSI_NOP_OUT_REQUEST_RESERVED4 (0x1<<0)
+#define ISCSI_NOP_OUT_REQUEST_RESERVED4_SHIFT 0
+#define ISCSI_NOP_OUT_REQUEST_LOCAL_COMPLETION (0x1<<1)
+#define ISCSI_NOP_OUT_REQUEST_LOCAL_COMPLETION_SHIFT 1
+#define ISCSI_NOP_OUT_REQUEST_ZERO_FILL (0x3F<<2)
+#define ISCSI_NOP_OUT_REQUEST_ZERO_FILL_SHIFT 2
+#elif defined(__LITTLE_ENDIAN)
+ u8 flags;
+#define ISCSI_NOP_OUT_REQUEST_RESERVED4 (0x1<<0)
+#define ISCSI_NOP_OUT_REQUEST_RESERVED4_SHIFT 0
+#define ISCSI_NOP_OUT_REQUEST_LOCAL_COMPLETION (0x1<<1)
+#define ISCSI_NOP_OUT_REQUEST_LOCAL_COMPLETION_SHIFT 1
+#define ISCSI_NOP_OUT_REQUEST_ZERO_FILL (0x3F<<2)
+#define ISCSI_NOP_OUT_REQUEST_ZERO_FILL_SHIFT 2
+ u8 reserved6;
+ u16 reserved7;
+#endif
+ u32 bd_list_addr_lo;
+ u32 bd_list_addr_hi;
+#if defined(__BIG_ENDIAN)
+ u8 cq_index;
+ u8 reserved9;
+ u8 reserved8;
+ u8 num_bds;
+#elif defined(__LITTLE_ENDIAN)
+ u8 num_bds;
+ u8 reserved8;
+ u8 reserved9;
+ u8 cq_index;
+#endif
+};
+
+/*
+ * iSCSI Reject CQE
+ */
+struct bnx2i_reject_msg {
+#if defined(__BIG_ENDIAN)
+ u8 op_code;
+ u8 reserved1;
+ u8 reason;
+ u8 reserved0;
+#elif defined(__LITTLE_ENDIAN)
+ u8 reserved0;
+ u8 reason;
+ u8 reserved1;
+ u8 op_code;
+#endif
+ u32 data_length;
+ u32 exp_cmd_sn;
+ u32 max_cmd_sn;
+ u32 reserved2[2];
+#if defined(__BIG_ENDIAN)
+ u16 reserved4;
+ u8 err_code;
+ u8 reserved3;
+#elif defined(__LITTLE_ENDIAN)
+ u8 reserved3;
+ u8 err_code;
+ u16 reserved4;
+#endif
+ u32 reserved5[8];
+ u32 cq_req_sn;
+};
+
+/*
+ * bnx2i iSCSI TMF SQ WQE
+ */
+struct bnx2i_tmf_request {
+#if defined(__BIG_ENDIAN)
+ u8 op_code;
+ u8 op_attr;
+#define ISCSI_TMF_REQUEST_FUNCTION (0x7F<<0)
+#define ISCSI_TMF_REQUEST_FUNCTION_SHIFT 0
+#define ISCSI_TMF_REQUEST_ALWAYS_ONE (0x1<<7)
+#define ISCSI_TMF_REQUEST_ALWAYS_ONE_SHIFT 7
+ u16 reserved0;
+#elif defined(__LITTLE_ENDIAN)
+ u16 reserved0;
+ u8 op_attr;
+#define ISCSI_TMF_REQUEST_FUNCTION (0x7F<<0)
+#define ISCSI_TMF_REQUEST_FUNCTION_SHIFT 0
+#define ISCSI_TMF_REQUEST_ALWAYS_ONE (0x1<<7)
+#define ISCSI_TMF_REQUEST_ALWAYS_ONE_SHIFT 7
+ u8 op_code;
+#endif
+ u32 data_length;
+ u32 lun[2];
+#if defined(__BIG_ENDIAN)
+ u16 reserved1;
+ u16 itt;
+#define ISCSI_TMF_REQUEST_INDEX (0x3FFF<<0)
+#define ISCSI_TMF_REQUEST_INDEX_SHIFT 0
+#define ISCSI_TMF_REQUEST_TYPE (0x3<<14)
+#define ISCSI_TMF_REQUEST_TYPE_SHIFT 14
+#elif defined(__LITTLE_ENDIAN)
+ u16 itt;
+#define ISCSI_TMF_REQUEST_INDEX (0x3FFF<<0)
+#define ISCSI_TMF_REQUEST_INDEX_SHIFT 0
+#define ISCSI_TMF_REQUEST_TYPE (0x3<<14)
+#define ISCSI_TMF_REQUEST_TYPE_SHIFT 14
+ u16 reserved1;
+#endif
+ u32 ref_itt;
+ u32 cmd_sn;
+ u32 reserved2;
+ u32 ref_cmd_sn;
+ u32 reserved3[3];
+ u32 zero_fill;
+ u32 bd_list_addr_lo;
+ u32 bd_list_addr_hi;
+#if defined(__BIG_ENDIAN)
+ u8 cq_index;
+ u8 reserved5;
+ u8 reserved4;
+ u8 num_bds;
+#elif defined(__LITTLE_ENDIAN)
+ u8 num_bds;
+ u8 reserved4;
+ u8 reserved5;
+ u8 cq_index;
+#endif
+};
+
+/*
+ * iSCSI Text SQ WQE
+ */
+struct bnx2i_text_request {
+#if defined(__BIG_ENDIAN)
+ u8 op_code;
+ u8 op_attr;
+#define ISCSI_TEXT_REQUEST_RESERVED1 (0x3F<<0)
+#define ISCSI_TEXT_REQUEST_RESERVED1_SHIFT 0
+#define ISCSI_TEXT_REQUEST_CONT (0x1<<6)
+#define ISCSI_TEXT_REQUEST_CONT_SHIFT 6
+#define ISCSI_TEXT_REQUEST_FINAL (0x1<<7)
+#define ISCSI_TEXT_REQUEST_FINAL_SHIFT 7
+ u16 reserved0;
+#elif defined(__LITTLE_ENDIAN)
+ u16 reserved0;
+ u8 op_attr;
+#define ISCSI_TEXT_REQUEST_RESERVED1 (0x3F<<0)
+#define ISCSI_TEXT_REQUEST_RESERVED1_SHIFT 0
+#define ISCSI_TEXT_REQUEST_CONT (0x1<<6)
+#define ISCSI_TEXT_REQUEST_CONT_SHIFT 6
+#define ISCSI_TEXT_REQUEST_FINAL (0x1<<7)
+#define ISCSI_TEXT_REQUEST_FINAL_SHIFT 7
+ u8 op_code;
+#endif
+ u32 data_length;
+ u32 lun[2];
+#if defined(__BIG_ENDIAN)
+ u16 reserved3;
+ u16 itt;
+#define ISCSI_TEXT_REQUEST_INDEX (0x3FFF<<0)
+#define ISCSI_TEXT_REQUEST_INDEX_SHIFT 0
+#define ISCSI_TEXT_REQUEST_TYPE (0x3<<14)
+#define ISCSI_TEXT_REQUEST_TYPE_SHIFT 14
+#elif defined(__LITTLE_ENDIAN)
+ u16 itt;
+#define ISCSI_TEXT_REQUEST_INDEX (0x3FFF<<0)
+#define ISCSI_TEXT_REQUEST_INDEX_SHIFT 0
+#define ISCSI_TEXT_REQUEST_TYPE (0x3<<14)
+#define ISCSI_TEXT_REQUEST_TYPE_SHIFT 14
+ u16 reserved3;
+#endif
+ u32 ttt;
+ u32 cmd_sn;
+ u32 reserved4[2];
+ u32 resp_bd_list_addr_lo;
+ u32 resp_bd_list_addr_hi;
+ u32 resp_buffer;
+#define ISCSI_TEXT_REQUEST_RESP_BUFFER_LENGTH (0xFFFFFF<<0)
+#define ISCSI_TEXT_REQUEST_RESP_BUFFER_LENGTH_SHIFT 0
+#define ISCSI_TEXT_REQUEST_NUM_RESP_BDS (0xFF<<24)
+#define ISCSI_TEXT_REQUEST_NUM_RESP_BDS_SHIFT 24
+ u32 zero_fill;
+ u32 bd_list_addr_lo;
+ u32 bd_list_addr_hi;
+#if defined(__BIG_ENDIAN)
+ u8 cq_index;
+ u8 reserved7;
+ u8 reserved6;
+ u8 num_bds;
+#elif defined(__LITTLE_ENDIAN)
+ u8 num_bds;
+ u8 reserved6;
+ u8 reserved7;
+ u8 cq_index;
+#endif
+};
+
+/*
+ * iSCSI SQ WQE
+ */
+union iscsi_request {
+ struct bnx2i_cmd_request cmd;
+ struct bnx2i_tmf_request tmf;
+ struct bnx2i_nop_out_request nop_out;
+ struct bnx2i_login_request login_req;
+ struct bnx2i_text_request text;
+ struct bnx2i_logout_request logout_req;
+ struct bnx2i_cleanup_request cleanup;
+};
+
+
+/*
+ * iSCSI TMF CQE
+ */
+struct bnx2i_tmf_response {
+#if defined(__BIG_ENDIAN)
+ u8 op_code;
+ u8 reserved1;
+ u8 response;
+ u8 reserved0;
+#elif defined(__LITTLE_ENDIAN)
+ u8 reserved0;
+ u8 response;
+ u8 reserved1;
+ u8 op_code;
+#endif
+ u32 reserved2;
+ u32 exp_cmd_sn;
+ u32 max_cmd_sn;
+ u32 reserved3[2];
+#if defined(__BIG_ENDIAN)
+ u16 reserved5;
+ u8 err_code;
+ u8 reserved4;
+#elif defined(__LITTLE_ENDIAN)
+ u8 reserved4;
+ u8 err_code;
+ u16 reserved5;
+#endif
+ u32 reserved6[7];
+#if defined(__BIG_ENDIAN)
+ u16 reserved7;
+ u16 itt;
+#define ISCSI_TMF_RESPONSE_INDEX (0x3FFF<<0)
+#define ISCSI_TMF_RESPONSE_INDEX_SHIFT 0
+#define ISCSI_TMF_RESPONSE_TYPE (0x3<<14)
+#define ISCSI_TMF_RESPONSE_TYPE_SHIFT 14
+#elif defined(__LITTLE_ENDIAN)
+ u16 itt;
+#define ISCSI_TMF_RESPONSE_INDEX (0x3FFF<<0)
+#define ISCSI_TMF_RESPONSE_INDEX_SHIFT 0
+#define ISCSI_TMF_RESPONSE_TYPE (0x3<<14)
+#define ISCSI_TMF_RESPONSE_TYPE_SHIFT 14
+ u16 reserved7;
+#endif
+ u32 cq_req_sn;
+};
+
+/*
+ * iSCSI Text CQE
+ */
+struct bnx2i_text_response {
+#if defined(__BIG_ENDIAN)
+ u8 op_code;
+ u8 response_flags;
+#define ISCSI_TEXT_RESPONSE_RESERVED1 (0x3F<<0)
+#define ISCSI_TEXT_RESPONSE_RESERVED1_SHIFT 0
+#define ISCSI_TEXT_RESPONSE_CONT (0x1<<6)
+#define ISCSI_TEXT_RESPONSE_CONT_SHIFT 6
+#define ISCSI_TEXT_RESPONSE_FINAL (0x1<<7)
+#define ISCSI_TEXT_RESPONSE_FINAL_SHIFT 7
+ u16 reserved0;
+#elif defined(__LITTLE_ENDIAN)
+ u16 reserved0;
+ u8 response_flags;
+#define ISCSI_TEXT_RESPONSE_RESERVED1 (0x3F<<0)
+#define ISCSI_TEXT_RESPONSE_RESERVED1_SHIFT 0
+#define ISCSI_TEXT_RESPONSE_CONT (0x1<<6)
+#define ISCSI_TEXT_RESPONSE_CONT_SHIFT 6
+#define ISCSI_TEXT_RESPONSE_FINAL (0x1<<7)
+#define ISCSI_TEXT_RESPONSE_FINAL_SHIFT 7
+ u8 op_code;
+#endif
+ u32 data_length;
+ u32 exp_cmd_sn;
+ u32 max_cmd_sn;
+ u32 ttt;
+ u32 reserved2;
+#if defined(__BIG_ENDIAN)
+ u16 reserved4;
+ u8 err_code;
+ u8 reserved3;
+#elif defined(__LITTLE_ENDIAN)
+ u8 reserved3;
+ u8 err_code;
+ u16 reserved4;
+#endif
+ u32 reserved5;
+ u32 lun[2];
+ u32 reserved6[4];
+#if defined(__BIG_ENDIAN)
+ u16 reserved7;
+ u16 itt;
+#define ISCSI_TEXT_RESPONSE_INDEX (0x3FFF<<0)
+#define ISCSI_TEXT_RESPONSE_INDEX_SHIFT 0
+#define ISCSI_TEXT_RESPONSE_TYPE (0x3<<14)
+#define ISCSI_TEXT_RESPONSE_TYPE_SHIFT 14
+#elif defined(__LITTLE_ENDIAN)
+ u16 itt;
+#define ISCSI_TEXT_RESPONSE_INDEX (0x3FFF<<0)
+#define ISCSI_TEXT_RESPONSE_INDEX_SHIFT 0
+#define ISCSI_TEXT_RESPONSE_TYPE (0x3<<14)
+#define ISCSI_TEXT_RESPONSE_TYPE_SHIFT 14
+ u16 reserved7;
+#endif
+ u32 cq_req_sn;
+};
+
+/*
+ * iSCSI CQE
+ */
+union iscsi_response {
+ struct bnx2i_cmd_response cmd;
+ struct bnx2i_tmf_response tmf;
+ struct bnx2i_login_response login_resp;
+ struct bnx2i_text_response text;
+ struct bnx2i_logout_response logout_resp;
+ struct bnx2i_cleanup_response cleanup;
+ struct bnx2i_reject_msg reject;
+ struct bnx2i_async_msg async;
+ struct bnx2i_nop_in_msg nop_in;
+};
+
+#endif /* __57XX_ISCSI_HSI_LINUX_LE__ */
diff --git a/drivers/scsi/bnx2i/Kconfig b/drivers/scsi/bnx2i/Kconfig
new file mode 100644
index 000000000..ba30ff86d
--- /dev/null
+++ b/drivers/scsi/bnx2i/Kconfig
@@ -0,0 +1,13 @@
+config SCSI_BNX2_ISCSI
+ tristate "QLogic NetXtreme II iSCSI support"
+ depends on NET
+ depends on PCI
+ depends on (IPV6 || IPV6=n)
+ select SCSI_ISCSI_ATTRS
+ select NETDEVICES
+ select ETHERNET
+ select NET_VENDOR_BROADCOM
+ select CNIC
+ ---help---
+ This driver supports iSCSI offload for the QLogic NetXtreme II
+ devices.
diff --git a/drivers/scsi/bnx2i/Makefile b/drivers/scsi/bnx2i/Makefile
new file mode 100644
index 000000000..b5802bd2e
--- /dev/null
+++ b/drivers/scsi/bnx2i/Makefile
@@ -0,0 +1,3 @@
+bnx2i-y := bnx2i_init.o bnx2i_hwi.o bnx2i_iscsi.o bnx2i_sysfs.o
+
+obj-$(CONFIG_SCSI_BNX2_ISCSI) += bnx2i.o
diff --git a/drivers/scsi/bnx2i/bnx2i.h b/drivers/scsi/bnx2i/bnx2i.h
new file mode 100644
index 000000000..ed7f3228e
--- /dev/null
+++ b/drivers/scsi/bnx2i/bnx2i.h
@@ -0,0 +1,882 @@
+/* bnx2i.h: QLogic NetXtreme II iSCSI driver.
+ *
+ * Copyright (c) 2006 - 2013 Broadcom Corporation
+ * Copyright (c) 2007, 2008 Red Hat, Inc. All rights reserved.
+ * Copyright (c) 2007, 2008 Mike Christie
+ * Copyright (c) 2014, QLogic Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation.
+ *
+ * Written by: Anil Veerabhadrappa (anilgv@broadcom.com)
+ * Previously Maintained by: Eddie Wai (eddie.wai@broadcom.com)
+ * Maintained by: QLogic-Storage-Upstream@qlogic.com
+ */
+
+#ifndef _BNX2I_H_
+#define _BNX2I_H_
+
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+
+#include <linux/errno.h>
+#include <linux/pci.h>
+#include <linux/spinlock.h>
+#include <linux/interrupt.h>
+#include <linux/delay.h>
+#include <linux/sched.h>
+#include <linux/in.h>
+#include <linux/kfifo.h>
+#include <linux/netdevice.h>
+#include <linux/completion.h>
+#include <linux/kthread.h>
+#include <linux/cpu.h>
+
+#include <scsi/scsi_cmnd.h>
+#include <scsi/scsi_device.h>
+#include <scsi/scsi_eh.h>
+#include <scsi/scsi_host.h>
+#include <scsi/scsi.h>
+#include <scsi/iscsi_proto.h>
+#include <scsi/libiscsi.h>
+#include <scsi/scsi_transport_iscsi.h>
+
+#include "../../net/ethernet/broadcom/cnic_if.h"
+#include "57xx_iscsi_hsi.h"
+#include "57xx_iscsi_constants.h"
+
+#include "../../net/ethernet/broadcom/bnx2x/bnx2x_mfw_req.h"
+
+#define BNX2_ISCSI_DRIVER_NAME "bnx2i"
+
+#define BNX2I_MAX_ADAPTERS 8
+
+#define ISCSI_MAX_CONNS_PER_HBA 128
+#define ISCSI_MAX_SESS_PER_HBA ISCSI_MAX_CONNS_PER_HBA
+#define ISCSI_MAX_CMDS_PER_SESS 128
+
+/* Total active commands across all connections supported by devices */
+#define ISCSI_MAX_CMDS_PER_HBA_5708 (28 * (ISCSI_MAX_CMDS_PER_SESS - 1))
+#define ISCSI_MAX_CMDS_PER_HBA_5709 (128 * (ISCSI_MAX_CMDS_PER_SESS - 1))
+#define ISCSI_MAX_CMDS_PER_HBA_57710 (256 * (ISCSI_MAX_CMDS_PER_SESS - 1))
+
+#define ISCSI_MAX_BDS_PER_CMD 32
+
+#define MAX_PAGES_PER_CTRL_STRUCT_POOL 8
+#define BNX2I_RESERVED_SLOW_PATH_CMD_SLOTS 4
+
+#define BNX2X_DB_SHIFT 3
+
+/* 5706/08 hardware has limit on maximum buffer size per BD it can handle */
+#define MAX_BD_LENGTH 65535
+#define BD_SPLIT_SIZE 32768
+
+/* min, max & default values for SQ/RQ/CQ size, configurable via' modparam */
+#define BNX2I_SQ_WQES_MIN 16
+#define BNX2I_570X_SQ_WQES_MAX 128
+#define BNX2I_5770X_SQ_WQES_MAX 512
+#define BNX2I_570X_SQ_WQES_DEFAULT 128
+#define BNX2I_5770X_SQ_WQES_DEFAULT 128
+
+#define BNX2I_570X_CQ_WQES_MAX 128
+#define BNX2I_5770X_CQ_WQES_MAX 512
+
+#define BNX2I_RQ_WQES_MIN 16
+#define BNX2I_RQ_WQES_MAX 32
+#define BNX2I_RQ_WQES_DEFAULT 16
+
+/* CCELLs per conn */
+#define BNX2I_CCELLS_MIN 16
+#define BNX2I_CCELLS_MAX 96
+#define BNX2I_CCELLS_DEFAULT 64
+
+#define ITT_INVALID_SIGNATURE 0xFFFF
+
+#define ISCSI_CMD_CLEANUP_TIMEOUT 100
+
+#define BNX2I_CONN_CTX_BUF_SIZE 16384
+
+#define BNX2I_SQ_WQE_SIZE 64
+#define BNX2I_RQ_WQE_SIZE 256
+#define BNX2I_CQE_SIZE 64
+
+#define MB_KERNEL_CTX_SHIFT 8
+#define MB_KERNEL_CTX_SIZE (1 << MB_KERNEL_CTX_SHIFT)
+
+#define CTX_SHIFT 7
+#define GET_CID_NUM(cid_addr) ((cid_addr) >> CTX_SHIFT)
+
+#define CTX_OFFSET 0x10000
+#define MAX_CID_CNT 0x4000
+
+#define BNX2I_570X_PAGE_SIZE_DEFAULT 4096
+
+/* 5709 context registers */
+#define BNX2_MQ_CONFIG2 0x00003d00
+#define BNX2_MQ_CONFIG2_CONT_SZ (0x7L<<4)
+#define BNX2_MQ_CONFIG2_FIRST_L4L5 (0x1fL<<8)
+
+/* 57710's BAR2 is mapped to doorbell registers */
+#define BNX2X_DOORBELL_PCI_BAR 2
+#define BNX2X_MAX_CQS 8
+
+#define CNIC_ARM_CQE 1
+#define CNIC_ARM_CQE_FP 2
+#define CNIC_DISARM_CQE 0
+
+#define REG_RD(__hba, offset) \
+ readl(__hba->regview + offset)
+#define REG_WR(__hba, offset, val) \
+ writel(val, __hba->regview + offset)
+
+#ifdef CONFIG_32BIT
+#define GET_STATS_64(__hba, dst, field) \
+ do { \
+ spin_lock_bh(&__hba->stat_lock); \
+ dst->field##_lo = __hba->stats.field##_lo; \
+ dst->field##_hi = __hba->stats.field##_hi; \
+ spin_unlock_bh(&__hba->stat_lock); \
+ } while (0)
+
+#define ADD_STATS_64(__hba, field, len) \
+ do { \
+ if (spin_trylock(&__hba->stat_lock)) { \
+ if (__hba->stats.field##_lo + len < \
+ __hba->stats.field##_lo) \
+ __hba->stats.field##_hi++; \
+ __hba->stats.field##_lo += len; \
+ spin_unlock(&__hba->stat_lock); \
+ } \
+ } while (0)
+
+#else
+#define GET_STATS_64(__hba, dst, field) \
+ do { \
+ u64 val, *out; \
+ \
+ val = __hba->bnx2i_stats.field; \
+ out = (u64 *)&__hba->stats.field##_lo; \
+ *out = cpu_to_le64(val); \
+ out = (u64 *)&dst->field##_lo; \
+ *out = cpu_to_le64(val); \
+ } while (0)
+
+#define ADD_STATS_64(__hba, field, len) \
+ do { \
+ __hba->bnx2i_stats.field += len; \
+ } while (0)
+#endif
+
+/**
+ * struct generic_pdu_resc - login pdu resource structure
+ *
+ * @req_buf: driver buffer used to stage payload associated with
+ * the login request
+ * @req_dma_addr: dma address for iscsi login request payload buffer
+ * @req_buf_size: actual login request payload length
+ * @req_wr_ptr: pointer into login request buffer when next data is
+ * to be written
+ * @resp_hdr: iscsi header where iscsi login response header is to
+ * be recreated
+ * @resp_buf: buffer to stage login response payload
+ * @resp_dma_addr: login response payload buffer dma address
+ * @resp_buf_size: login response paylod length
+ * @resp_wr_ptr: pointer into login response buffer when next data is
+ * to be written
+ * @req_bd_tbl: iscsi login request payload BD table
+ * @req_bd_dma: login request BD table dma address
+ * @resp_bd_tbl: iscsi login response payload BD table
+ * @resp_bd_dma: login request BD table dma address
+ *
+ * following structure defines buffer info for generic pdus such as iSCSI Login,
+ * Logout and NOP
+ */
+struct generic_pdu_resc {
+ char *req_buf;
+ dma_addr_t req_dma_addr;
+ u32 req_buf_size;
+ char *req_wr_ptr;
+ struct iscsi_hdr resp_hdr;
+ char *resp_buf;
+ dma_addr_t resp_dma_addr;
+ u32 resp_buf_size;
+ char *resp_wr_ptr;
+ char *req_bd_tbl;
+ dma_addr_t req_bd_dma;
+ char *resp_bd_tbl;
+ dma_addr_t resp_bd_dma;
+};
+
+
+/**
+ * struct bd_resc_page - tracks DMA'able memory allocated for BD tables
+ *
+ * @link: list head to link elements
+ * @max_ptrs: maximun pointers that can be stored in this page
+ * @num_valid: number of pointer valid in this page
+ * @page: base addess for page pointer array
+ *
+ * structure to track DMA'able memory allocated for command BD tables
+ */
+struct bd_resc_page {
+ struct list_head link;
+ u32 max_ptrs;
+ u32 num_valid;
+ void *page[1];
+};
+
+
+/**
+ * struct io_bdt - I/O buffer destricptor table
+ *
+ * @bd_tbl: BD table's virtual address
+ * @bd_tbl_dma: BD table's dma address
+ * @bd_valid: num valid BD entries
+ *
+ * IO BD table
+ */
+struct io_bdt {
+ struct iscsi_bd *bd_tbl;
+ dma_addr_t bd_tbl_dma;
+ u16 bd_valid;
+};
+
+
+/**
+ * bnx2i_cmd - iscsi command structure
+ *
+ * @hdr: iSCSI header
+ * @conn: iscsi_conn pointer
+ * @scsi_cmd: SCSI-ML task pointer corresponding to this iscsi cmd
+ * @sg: SG list
+ * @io_tbl: buffer descriptor (BD) table
+ * @bd_tbl_dma: buffer descriptor (BD) table's dma address
+ * @req: bnx2i specific command request struct
+ */
+struct bnx2i_cmd {
+ struct iscsi_hdr hdr;
+ struct bnx2i_conn *conn;
+ struct scsi_cmnd *scsi_cmd;
+ struct scatterlist *sg;
+ struct io_bdt io_tbl;
+ dma_addr_t bd_tbl_dma;
+ struct bnx2i_cmd_request req;
+};
+
+
+/**
+ * struct bnx2i_conn - iscsi connection structure
+ *
+ * @cls_conn: pointer to iscsi cls conn
+ * @hba: adapter structure pointer
+ * @iscsi_conn_cid: iscsi conn id
+ * @fw_cid: firmware iscsi context id
+ * @ep: endpoint structure pointer
+ * @gen_pdu: login/nopout/logout pdu resources
+ * @violation_notified: bit mask used to track iscsi error/warning messages
+ * already printed out
+ * @work_cnt: keeps track of the number of outstanding work
+ *
+ * iSCSI connection structure
+ */
+struct bnx2i_conn {
+ struct iscsi_cls_conn *cls_conn;
+ struct bnx2i_hba *hba;
+ struct completion cmd_cleanup_cmpl;
+
+ u32 iscsi_conn_cid;
+#define BNX2I_CID_RESERVED 0x5AFF
+ u32 fw_cid;
+
+ struct timer_list poll_timer;
+ /*
+ * Queue Pair (QP) related structure elements.
+ */
+ struct bnx2i_endpoint *ep;
+
+ /*
+ * Buffer for login negotiation process
+ */
+ struct generic_pdu_resc gen_pdu;
+ u64 violation_notified;
+
+ atomic_t work_cnt;
+};
+
+
+
+/**
+ * struct iscsi_cid_queue - Per adapter iscsi cid queue
+ *
+ * @cid_que_base: queue base memory
+ * @cid_que: queue memory pointer
+ * @cid_q_prod_idx: produce index
+ * @cid_q_cons_idx: consumer index
+ * @cid_q_max_idx: max index. used to detect wrap around condition
+ * @cid_free_cnt: queue size
+ * @conn_cid_tbl: iscsi cid to conn structure mapping table
+ *
+ * Per adapter iSCSI CID Queue
+ */
+struct iscsi_cid_queue {
+ void *cid_que_base;
+ u32 *cid_que;
+ u32 cid_q_prod_idx;
+ u32 cid_q_cons_idx;
+ u32 cid_q_max_idx;
+ u32 cid_free_cnt;
+ struct bnx2i_conn **conn_cid_tbl;
+};
+
+
+struct bnx2i_stats_info {
+ u64 rx_pdus;
+ u64 rx_bytes;
+ u64 tx_pdus;
+ u64 tx_bytes;
+};
+
+
+/**
+ * struct bnx2i_hba - bnx2i adapter structure
+ *
+ * @link: list head to link elements
+ * @cnic: pointer to cnic device
+ * @pcidev: pointer to pci dev
+ * @netdev: pointer to netdev structure
+ * @regview: mapped PCI register space
+ * @age: age, incremented by every recovery
+ * @cnic_dev_type: cnic device type, 5706/5708/5709/57710
+ * @mail_queue_access: mailbox queue access mode, applicable to 5709 only
+ * @reg_with_cnic: indicates whether the device is register with CNIC
+ * @adapter_state: adapter state, UP, GOING_DOWN, LINK_DOWN
+ * @mtu_supported: Ethernet MTU supported
+ * @shost: scsi host pointer
+ * @max_sqes: SQ size
+ * @max_rqes: RQ size
+ * @max_cqes: CQ size
+ * @num_ccell: number of command cells per connection
+ * @ofld_conns_active: active connection list
+ * @eh_wait: wait queue for the endpoint to shutdown
+ * @max_active_conns: max offload connections supported by this device
+ * @cid_que: iscsi cid queue
+ * @ep_rdwr_lock: read / write lock to synchronize various ep lists
+ * @ep_ofld_list: connection list for pending offload completion
+ * @ep_active_list: connection list for active offload endpoints
+ * @ep_destroy_list: connection list for pending offload completion
+ * @mp_bd_tbl: BD table to be used with middle path requests
+ * @mp_bd_dma: DMA address of 'mp_bd_tbl' memory buffer
+ * @dummy_buffer: Dummy buffer to be used with zero length scsicmd reqs
+ * @dummy_buf_dma: DMA address of 'dummy_buffer' memory buffer
+ * @lock: lock to synchonize access to hba structure
+ * @hba_shutdown_tmo: Timeout value to shutdown each connection
+ * @conn_teardown_tmo: Timeout value to tear down each connection
+ * @conn_ctx_destroy_tmo: Timeout value to destroy context of each connection
+ * @pci_did: PCI device ID
+ * @pci_vid: PCI vendor ID
+ * @pci_sdid: PCI subsystem device ID
+ * @pci_svid: PCI subsystem vendor ID
+ * @pci_func: PCI function number in system pci tree
+ * @pci_devno: PCI device number in system pci tree
+ * @num_wqe_sent: statistic counter, total wqe's sent
+ * @num_cqe_rcvd: statistic counter, total cqe's received
+ * @num_intr_claimed: statistic counter, total interrupts claimed
+ * @link_changed_count: statistic counter, num of link change notifications
+ * received
+ * @ipaddr_changed_count: statistic counter, num times IP address changed while
+ * at least one connection is offloaded
+ * @num_sess_opened: statistic counter, total num sessions opened
+ * @num_conn_opened: statistic counter, total num conns opened on this hba
+ * @ctx_ccell_tasks: captures number of ccells and tasks supported by
+ * currently offloaded connection, used to decode
+ * context memory
+ * @stat_lock: spin lock used by the statistic collector (32 bit)
+ * @stats: local iSCSI statistic collection place holder
+ *
+ * Adapter Data Structure
+ */
+struct bnx2i_hba {
+ struct list_head link;
+ struct cnic_dev *cnic;
+ struct pci_dev *pcidev;
+ struct net_device *netdev;
+ void __iomem *regview;
+ resource_size_t reg_base;
+
+ u32 age;
+ unsigned long cnic_dev_type;
+ #define BNX2I_NX2_DEV_5706 0x0
+ #define BNX2I_NX2_DEV_5708 0x1
+ #define BNX2I_NX2_DEV_5709 0x2
+ #define BNX2I_NX2_DEV_57710 0x3
+ u32 mail_queue_access;
+ #define BNX2I_MQ_KERNEL_MODE 0x0
+ #define BNX2I_MQ_KERNEL_BYPASS_MODE 0x1
+ #define BNX2I_MQ_BIN_MODE 0x2
+ unsigned long reg_with_cnic;
+ #define BNX2I_CNIC_REGISTERED 1
+
+ unsigned long adapter_state;
+ #define ADAPTER_STATE_UP 0
+ #define ADAPTER_STATE_GOING_DOWN 1
+ #define ADAPTER_STATE_LINK_DOWN 2
+ #define ADAPTER_STATE_INIT_FAILED 31
+ unsigned int mtu_supported;
+ #define BNX2I_MAX_MTU_SUPPORTED 9000
+
+ struct Scsi_Host *shost;
+
+ u32 max_sqes;
+ u32 max_rqes;
+ u32 max_cqes;
+ u32 num_ccell;
+
+ int ofld_conns_active;
+ wait_queue_head_t eh_wait;
+
+ int max_active_conns;
+ struct iscsi_cid_queue cid_que;
+
+ rwlock_t ep_rdwr_lock;
+ struct list_head ep_ofld_list;
+ struct list_head ep_active_list;
+ struct list_head ep_destroy_list;
+
+ /*
+ * BD table to be used with MP (Middle Path requests.
+ */
+ char *mp_bd_tbl;
+ dma_addr_t mp_bd_dma;
+ char *dummy_buffer;
+ dma_addr_t dummy_buf_dma;
+
+ spinlock_t lock; /* protects hba structure access */
+ struct mutex net_dev_lock;/* sync net device access */
+
+ int hba_shutdown_tmo;
+ int conn_teardown_tmo;
+ int conn_ctx_destroy_tmo;
+ /*
+ * PCI related info.
+ */
+ u16 pci_did;
+ u16 pci_vid;
+ u16 pci_sdid;
+ u16 pci_svid;
+ u16 pci_func;
+ u16 pci_devno;
+
+ /*
+ * Following are a bunch of statistics useful during development
+ * and later stage for score boarding.
+ */
+ u32 num_wqe_sent;
+ u32 num_cqe_rcvd;
+ u32 num_intr_claimed;
+ u32 link_changed_count;
+ u32 ipaddr_changed_count;
+ u32 num_sess_opened;
+ u32 num_conn_opened;
+ unsigned int ctx_ccell_tasks;
+
+#ifdef CONFIG_32BIT
+ spinlock_t stat_lock;
+#endif
+ struct bnx2i_stats_info bnx2i_stats;
+ struct iscsi_stats_info stats;
+};
+
+
+/*******************************************************************************
+ * QP [ SQ / RQ / CQ ] info.
+ ******************************************************************************/
+
+/*
+ * SQ/RQ/CQ generic structure definition
+ */
+struct sqe {
+ u8 sqe_byte[BNX2I_SQ_WQE_SIZE];
+};
+
+struct rqe {
+ u8 rqe_byte[BNX2I_RQ_WQE_SIZE];
+};
+
+struct cqe {
+ u8 cqe_byte[BNX2I_CQE_SIZE];
+};
+
+
+enum {
+#if defined(__LITTLE_ENDIAN)
+ CNIC_EVENT_COAL_INDEX = 0x0,
+ CNIC_SEND_DOORBELL = 0x4,
+ CNIC_EVENT_CQ_ARM = 0x7,
+ CNIC_RECV_DOORBELL = 0x8
+#elif defined(__BIG_ENDIAN)
+ CNIC_EVENT_COAL_INDEX = 0x2,
+ CNIC_SEND_DOORBELL = 0x6,
+ CNIC_EVENT_CQ_ARM = 0x4,
+ CNIC_RECV_DOORBELL = 0xa
+#endif
+};
+
+
+/*
+ * CQ DB
+ */
+struct bnx2x_iscsi_cq_pend_cmpl {
+ /* CQ producer, updated by Ustorm */
+ u16 ustrom_prod;
+ /* CQ pending completion counter */
+ u16 pend_cntr;
+};
+
+
+struct bnx2i_5771x_cq_db {
+ struct bnx2x_iscsi_cq_pend_cmpl qp_pend_cmpl[BNX2X_MAX_CQS];
+ /* CQ pending completion ITT array */
+ u16 itt[BNX2X_MAX_CQS];
+ /* Cstorm CQ sequence to notify array, updated by driver */;
+ u16 sqn[BNX2X_MAX_CQS];
+ u32 reserved[4] /* 16 byte allignment */;
+};
+
+
+struct bnx2i_5771x_sq_rq_db {
+ u16 prod_idx;
+ u8 reserved0[62]; /* Pad structure size to 64 bytes */
+};
+
+
+struct bnx2i_5771x_dbell_hdr {
+ u8 header;
+ /* 1 for rx doorbell, 0 for tx doorbell */
+#define B577XX_DOORBELL_HDR_RX (0x1<<0)
+#define B577XX_DOORBELL_HDR_RX_SHIFT 0
+ /* 0 for normal doorbell, 1 for advertise wnd doorbell */
+#define B577XX_DOORBELL_HDR_DB_TYPE (0x1<<1)
+#define B577XX_DOORBELL_HDR_DB_TYPE_SHIFT 1
+ /* rdma tx only: DPM transaction size specifier (64/128/256/512B) */
+#define B577XX_DOORBELL_HDR_DPM_SIZE (0x3<<2)
+#define B577XX_DOORBELL_HDR_DPM_SIZE_SHIFT 2
+ /* connection type */
+#define B577XX_DOORBELL_HDR_CONN_TYPE (0xF<<4)
+#define B577XX_DOORBELL_HDR_CONN_TYPE_SHIFT 4
+};
+
+struct bnx2i_5771x_dbell {
+ struct bnx2i_5771x_dbell_hdr dbell;
+ u8 pad[3];
+
+};
+
+/**
+ * struct qp_info - QP (share queue region) atrributes structure
+ *
+ * @ctx_base: ioremapped pci register base to access doorbell register
+ * pertaining to this offloaded connection
+ * @sq_virt: virtual address of send queue (SQ) region
+ * @sq_phys: DMA address of SQ memory region
+ * @sq_mem_size: SQ size
+ * @sq_prod_qe: SQ producer entry pointer
+ * @sq_cons_qe: SQ consumer entry pointer
+ * @sq_first_qe: virtual address of first entry in SQ
+ * @sq_last_qe: virtual address of last entry in SQ
+ * @sq_prod_idx: SQ producer index
+ * @sq_cons_idx: SQ consumer index
+ * @sqe_left: number sq entry left
+ * @sq_pgtbl_virt: page table describing buffer consituting SQ region
+ * @sq_pgtbl_phys: dma address of 'sq_pgtbl_virt'
+ * @sq_pgtbl_size: SQ page table size
+ * @cq_virt: virtual address of completion queue (CQ) region
+ * @cq_phys: DMA address of RQ memory region
+ * @cq_mem_size: CQ size
+ * @cq_prod_qe: CQ producer entry pointer
+ * @cq_cons_qe: CQ consumer entry pointer
+ * @cq_first_qe: virtual address of first entry in CQ
+ * @cq_last_qe: virtual address of last entry in CQ
+ * @cq_prod_idx: CQ producer index
+ * @cq_cons_idx: CQ consumer index
+ * @cqe_left: number cq entry left
+ * @cqe_size: size of each CQ entry
+ * @cqe_exp_seq_sn: next expected CQE sequence number
+ * @cq_pgtbl_virt: page table describing buffer consituting CQ region
+ * @cq_pgtbl_phys: dma address of 'cq_pgtbl_virt'
+ * @cq_pgtbl_size: CQ page table size
+ * @rq_virt: virtual address of receive queue (RQ) region
+ * @rq_phys: DMA address of RQ memory region
+ * @rq_mem_size: RQ size
+ * @rq_prod_qe: RQ producer entry pointer
+ * @rq_cons_qe: RQ consumer entry pointer
+ * @rq_first_qe: virtual address of first entry in RQ
+ * @rq_last_qe: virtual address of last entry in RQ
+ * @rq_prod_idx: RQ producer index
+ * @rq_cons_idx: RQ consumer index
+ * @rqe_left: number rq entry left
+ * @rq_pgtbl_virt: page table describing buffer consituting RQ region
+ * @rq_pgtbl_phys: dma address of 'rq_pgtbl_virt'
+ * @rq_pgtbl_size: RQ page table size
+ *
+ * queue pair (QP) is a per connection shared data structure which is used
+ * to send work requests (SQ), receive completion notifications (CQ)
+ * and receive asynchoronous / scsi sense info (RQ). 'qp_info' structure
+ * below holds queue memory, consumer/producer indexes and page table
+ * information
+ */
+struct qp_info {
+ void __iomem *ctx_base;
+#define DPM_TRIGER_TYPE 0x40
+
+#define BNX2I_570x_QUE_DB_SIZE 0
+#define BNX2I_5771x_QUE_DB_SIZE 16
+ struct sqe *sq_virt;
+ dma_addr_t sq_phys;
+ u32 sq_mem_size;
+
+ struct sqe *sq_prod_qe;
+ struct sqe *sq_cons_qe;
+ struct sqe *sq_first_qe;
+ struct sqe *sq_last_qe;
+ u16 sq_prod_idx;
+ u16 sq_cons_idx;
+ u32 sqe_left;
+
+ void *sq_pgtbl_virt;
+ dma_addr_t sq_pgtbl_phys;
+ u32 sq_pgtbl_size; /* set to PAGE_SIZE for 5708 & 5709 */
+
+ struct cqe *cq_virt;
+ dma_addr_t cq_phys;
+ u32 cq_mem_size;
+
+ struct cqe *cq_prod_qe;
+ struct cqe *cq_cons_qe;
+ struct cqe *cq_first_qe;
+ struct cqe *cq_last_qe;
+ u16 cq_prod_idx;
+ u16 cq_cons_idx;
+ u32 cqe_left;
+ u32 cqe_size;
+ u32 cqe_exp_seq_sn;
+
+ void *cq_pgtbl_virt;
+ dma_addr_t cq_pgtbl_phys;
+ u32 cq_pgtbl_size; /* set to PAGE_SIZE for 5708 & 5709 */
+
+ struct rqe *rq_virt;
+ dma_addr_t rq_phys;
+ u32 rq_mem_size;
+
+ struct rqe *rq_prod_qe;
+ struct rqe *rq_cons_qe;
+ struct rqe *rq_first_qe;
+ struct rqe *rq_last_qe;
+ u16 rq_prod_idx;
+ u16 rq_cons_idx;
+ u32 rqe_left;
+
+ void *rq_pgtbl_virt;
+ dma_addr_t rq_pgtbl_phys;
+ u32 rq_pgtbl_size; /* set to PAGE_SIZE for 5708 & 5709 */
+};
+
+
+
+/*
+ * CID handles
+ */
+struct ep_handles {
+ u32 fw_cid;
+ u32 drv_iscsi_cid;
+ u16 pg_cid;
+ u16 rsvd;
+};
+
+
+enum {
+ EP_STATE_IDLE = 0x0,
+ EP_STATE_PG_OFLD_START = 0x1,
+ EP_STATE_PG_OFLD_COMPL = 0x2,
+ EP_STATE_OFLD_START = 0x4,
+ EP_STATE_OFLD_COMPL = 0x8,
+ EP_STATE_CONNECT_START = 0x10,
+ EP_STATE_CONNECT_COMPL = 0x20,
+ EP_STATE_ULP_UPDATE_START = 0x40,
+ EP_STATE_ULP_UPDATE_COMPL = 0x80,
+ EP_STATE_DISCONN_START = 0x100,
+ EP_STATE_DISCONN_COMPL = 0x200,
+ EP_STATE_CLEANUP_START = 0x400,
+ EP_STATE_CLEANUP_CMPL = 0x800,
+ EP_STATE_TCP_FIN_RCVD = 0x1000,
+ EP_STATE_TCP_RST_RCVD = 0x2000,
+ EP_STATE_LOGOUT_SENT = 0x4000,
+ EP_STATE_LOGOUT_RESP_RCVD = 0x8000,
+ EP_STATE_PG_OFLD_FAILED = 0x1000000,
+ EP_STATE_ULP_UPDATE_FAILED = 0x2000000,
+ EP_STATE_CLEANUP_FAILED = 0x4000000,
+ EP_STATE_OFLD_FAILED = 0x8000000,
+ EP_STATE_CONNECT_FAILED = 0x10000000,
+ EP_STATE_DISCONN_TIMEDOUT = 0x20000000,
+ EP_STATE_OFLD_FAILED_CID_BUSY = 0x80000000,
+};
+
+/**
+ * struct bnx2i_endpoint - representation of tcp connection in NX2 world
+ *
+ * @link: list head to link elements
+ * @hba: adapter to which this connection belongs
+ * @conn: iscsi connection this EP is linked to
+ * @cls_ep: associated iSCSI endpoint pointer
+ * @cm_sk: cnic sock struct
+ * @hba_age: age to detect if 'iscsid' issues ep_disconnect()
+ * after HBA reset is completed by bnx2i/cnic/bnx2
+ * modules
+ * @state: tracks offload connection state machine
+ * @timestamp: tracks the start time when the ep begins to connect
+ * @num_active_cmds: tracks the number of outstanding commands for this ep
+ * @ec_shift: the amount of shift as part of the event coal calc
+ * @qp: QP information
+ * @ids: contains chip allocated *context id* & driver assigned
+ * *iscsi cid*
+ * @ofld_timer: offload timer to detect timeout
+ * @ofld_wait: wait queue
+ *
+ * Endpoint Structure - equivalent of tcp socket structure
+ */
+struct bnx2i_endpoint {
+ struct list_head link;
+ struct bnx2i_hba *hba;
+ struct bnx2i_conn *conn;
+ struct iscsi_endpoint *cls_ep;
+ struct cnic_sock *cm_sk;
+ u32 hba_age;
+ u32 state;
+ unsigned long timestamp;
+ atomic_t num_active_cmds;
+ u32 ec_shift;
+
+ struct qp_info qp;
+ struct ep_handles ids;
+ #define ep_iscsi_cid ids.drv_iscsi_cid
+ #define ep_cid ids.fw_cid
+ #define ep_pg_cid ids.pg_cid
+ struct timer_list ofld_timer;
+ wait_queue_head_t ofld_wait;
+};
+
+
+struct bnx2i_work {
+ struct list_head list;
+ struct iscsi_session *session;
+ struct bnx2i_conn *bnx2i_conn;
+ struct cqe cqe;
+};
+
+struct bnx2i_percpu_s {
+ struct task_struct *iothread;
+ struct list_head work_list;
+ spinlock_t p_work_lock;
+};
+
+
+/* Global variables */
+extern unsigned int error_mask1, error_mask2;
+extern u64 iscsi_error_mask;
+extern unsigned int en_tcp_dack;
+extern unsigned int event_coal_div;
+extern unsigned int event_coal_min;
+
+extern struct scsi_transport_template *bnx2i_scsi_xport_template;
+extern struct iscsi_transport bnx2i_iscsi_transport;
+extern struct cnic_ulp_ops bnx2i_cnic_cb;
+
+extern unsigned int sq_size;
+extern unsigned int rq_size;
+
+extern struct device_attribute *bnx2i_dev_attributes[];
+
+
+
+/*
+ * Function Prototypes
+ */
+extern void bnx2i_identify_device(struct bnx2i_hba *hba, struct cnic_dev *dev);
+
+extern void bnx2i_ulp_init(struct cnic_dev *dev);
+extern void bnx2i_ulp_exit(struct cnic_dev *dev);
+extern void bnx2i_start(void *handle);
+extern void bnx2i_stop(void *handle);
+extern int bnx2i_get_stats(void *handle);
+
+extern struct bnx2i_hba *get_adapter_list_head(void);
+
+struct bnx2i_conn *bnx2i_get_conn_from_id(struct bnx2i_hba *hba,
+ u16 iscsi_cid);
+
+int bnx2i_alloc_ep_pool(void);
+void bnx2i_release_ep_pool(void);
+struct bnx2i_endpoint *bnx2i_ep_ofld_list_next(struct bnx2i_hba *hba);
+struct bnx2i_endpoint *bnx2i_ep_destroy_list_next(struct bnx2i_hba *hba);
+
+struct bnx2i_hba *bnx2i_find_hba_for_cnic(struct cnic_dev *cnic);
+
+struct bnx2i_hba *bnx2i_alloc_hba(struct cnic_dev *cnic);
+void bnx2i_free_hba(struct bnx2i_hba *hba);
+
+void bnx2i_get_rq_buf(struct bnx2i_conn *conn, char *ptr, int len);
+void bnx2i_put_rq_buf(struct bnx2i_conn *conn, int count);
+
+void bnx2i_iscsi_unmap_sg_list(struct bnx2i_cmd *cmd);
+
+void bnx2i_drop_session(struct iscsi_cls_session *session);
+
+extern int bnx2i_send_fw_iscsi_init_msg(struct bnx2i_hba *hba);
+extern int bnx2i_send_iscsi_login(struct bnx2i_conn *conn,
+ struct iscsi_task *mtask);
+extern int bnx2i_send_iscsi_tmf(struct bnx2i_conn *conn,
+ struct iscsi_task *mtask);
+extern int bnx2i_send_iscsi_text(struct bnx2i_conn *conn,
+ struct iscsi_task *mtask);
+extern int bnx2i_send_iscsi_scsicmd(struct bnx2i_conn *conn,
+ struct bnx2i_cmd *cmnd);
+extern int bnx2i_send_iscsi_nopout(struct bnx2i_conn *conn,
+ struct iscsi_task *mtask,
+ char *datap, int data_len, int unsol);
+extern int bnx2i_send_iscsi_logout(struct bnx2i_conn *conn,
+ struct iscsi_task *mtask);
+extern void bnx2i_send_cmd_cleanup_req(struct bnx2i_hba *hba,
+ struct bnx2i_cmd *cmd);
+extern int bnx2i_send_conn_ofld_req(struct bnx2i_hba *hba,
+ struct bnx2i_endpoint *ep);
+extern void bnx2i_update_iscsi_conn(struct iscsi_conn *conn);
+extern int bnx2i_send_conn_destroy(struct bnx2i_hba *hba,
+ struct bnx2i_endpoint *ep);
+
+extern int bnx2i_alloc_qp_resc(struct bnx2i_hba *hba,
+ struct bnx2i_endpoint *ep);
+extern void bnx2i_free_qp_resc(struct bnx2i_hba *hba,
+ struct bnx2i_endpoint *ep);
+extern void bnx2i_ep_ofld_timer(unsigned long data);
+extern struct bnx2i_endpoint *bnx2i_find_ep_in_ofld_list(
+ struct bnx2i_hba *hba, u32 iscsi_cid);
+extern struct bnx2i_endpoint *bnx2i_find_ep_in_destroy_list(
+ struct bnx2i_hba *hba, u32 iscsi_cid);
+
+extern int bnx2i_map_ep_dbell_regs(struct bnx2i_endpoint *ep);
+extern int bnx2i_arm_cq_event_coalescing(struct bnx2i_endpoint *ep, u8 action);
+
+extern int bnx2i_hw_ep_disconnect(struct bnx2i_endpoint *bnx2i_ep);
+
+/* Debug related function prototypes */
+extern void bnx2i_print_pend_cmd_queue(struct bnx2i_conn *conn);
+extern void bnx2i_print_active_cmd_queue(struct bnx2i_conn *conn);
+extern void bnx2i_print_xmit_pdu_queue(struct bnx2i_conn *conn);
+extern void bnx2i_print_recv_state(struct bnx2i_conn *conn);
+
+extern int bnx2i_percpu_io_thread(void *arg);
+extern int bnx2i_process_scsi_cmd_resp(struct iscsi_session *session,
+ struct bnx2i_conn *bnx2i_conn,
+ struct cqe *cqe);
+#endif
diff --git a/drivers/scsi/bnx2i/bnx2i_hwi.c b/drivers/scsi/bnx2i/bnx2i_hwi.c
new file mode 100644
index 000000000..fb072cc5e
--- /dev/null
+++ b/drivers/scsi/bnx2i/bnx2i_hwi.c
@@ -0,0 +1,2771 @@
+/* bnx2i_hwi.c: QLogic NetXtreme II iSCSI driver.
+ *
+ * Copyright (c) 2006 - 2013 Broadcom Corporation
+ * Copyright (c) 2007, 2008 Red Hat, Inc. All rights reserved.
+ * Copyright (c) 2007, 2008 Mike Christie
+ * Copyright (c) 2014, QLogic Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation.
+ *
+ * Written by: Anil Veerabhadrappa (anilgv@broadcom.com)
+ * Previously Maintained by: Eddie Wai (eddie.wai@broadcom.com)
+ * Maintained by: QLogic-Storage-Upstream@qlogic.com
+ */
+
+#include <linux/gfp.h>
+#include <scsi/scsi_tcq.h>
+#include <scsi/libiscsi.h>
+#include "bnx2i.h"
+
+DECLARE_PER_CPU(struct bnx2i_percpu_s, bnx2i_percpu);
+
+/**
+ * bnx2i_get_cid_num - get cid from ep
+ * @ep: endpoint pointer
+ *
+ * Only applicable to 57710 family of devices
+ */
+static u32 bnx2i_get_cid_num(struct bnx2i_endpoint *ep)
+{
+ u32 cid;
+
+ if (test_bit(BNX2I_NX2_DEV_57710, &ep->hba->cnic_dev_type))
+ cid = ep->ep_cid;
+ else
+ cid = GET_CID_NUM(ep->ep_cid);
+ return cid;
+}
+
+
+/**
+ * bnx2i_adjust_qp_size - Adjust SQ/RQ/CQ size for 57710 device type
+ * @hba: Adapter for which adjustments is to be made
+ *
+ * Only applicable to 57710 family of devices
+ */
+static void bnx2i_adjust_qp_size(struct bnx2i_hba *hba)
+{
+ u32 num_elements_per_pg;
+
+ if (test_bit(BNX2I_NX2_DEV_5706, &hba->cnic_dev_type) ||
+ test_bit(BNX2I_NX2_DEV_5708, &hba->cnic_dev_type) ||
+ test_bit(BNX2I_NX2_DEV_5709, &hba->cnic_dev_type)) {
+ if (!is_power_of_2(hba->max_sqes))
+ hba->max_sqes = rounddown_pow_of_two(hba->max_sqes);
+
+ if (!is_power_of_2(hba->max_rqes))
+ hba->max_rqes = rounddown_pow_of_two(hba->max_rqes);
+ }
+
+ /* Adjust each queue size if the user selection does not
+ * yield integral num of page buffers
+ */
+ /* adjust SQ */
+ num_elements_per_pg = CNIC_PAGE_SIZE / BNX2I_SQ_WQE_SIZE;
+ if (hba->max_sqes < num_elements_per_pg)
+ hba->max_sqes = num_elements_per_pg;
+ else if (hba->max_sqes % num_elements_per_pg)
+ hba->max_sqes = (hba->max_sqes + num_elements_per_pg - 1) &
+ ~(num_elements_per_pg - 1);
+
+ /* adjust CQ */
+ num_elements_per_pg = CNIC_PAGE_SIZE / BNX2I_CQE_SIZE;
+ if (hba->max_cqes < num_elements_per_pg)
+ hba->max_cqes = num_elements_per_pg;
+ else if (hba->max_cqes % num_elements_per_pg)
+ hba->max_cqes = (hba->max_cqes + num_elements_per_pg - 1) &
+ ~(num_elements_per_pg - 1);
+
+ /* adjust RQ */
+ num_elements_per_pg = CNIC_PAGE_SIZE / BNX2I_RQ_WQE_SIZE;
+ if (hba->max_rqes < num_elements_per_pg)
+ hba->max_rqes = num_elements_per_pg;
+ else if (hba->max_rqes % num_elements_per_pg)
+ hba->max_rqes = (hba->max_rqes + num_elements_per_pg - 1) &
+ ~(num_elements_per_pg - 1);
+}
+
+
+/**
+ * bnx2i_get_link_state - get network interface link state
+ * @hba: adapter instance pointer
+ *
+ * updates adapter structure flag based on netdev state
+ */
+static void bnx2i_get_link_state(struct bnx2i_hba *hba)
+{
+ if (test_bit(__LINK_STATE_NOCARRIER, &hba->netdev->state))
+ set_bit(ADAPTER_STATE_LINK_DOWN, &hba->adapter_state);
+ else
+ clear_bit(ADAPTER_STATE_LINK_DOWN, &hba->adapter_state);
+}
+
+
+/**
+ * bnx2i_iscsi_license_error - displays iscsi license related error message
+ * @hba: adapter instance pointer
+ * @error_code: error classification
+ *
+ * Puts out an error log when driver is unable to offload iscsi connection
+ * due to license restrictions
+ */
+static void bnx2i_iscsi_license_error(struct bnx2i_hba *hba, u32 error_code)
+{
+ if (error_code == ISCSI_KCQE_COMPLETION_STATUS_ISCSI_NOT_SUPPORTED)
+ /* iSCSI offload not supported on this device */
+ printk(KERN_ERR "bnx2i: iSCSI not supported, dev=%s\n",
+ hba->netdev->name);
+ if (error_code == ISCSI_KCQE_COMPLETION_STATUS_LOM_ISCSI_NOT_ENABLED)
+ /* iSCSI offload not supported on this LOM device */
+ printk(KERN_ERR "bnx2i: LOM is not enable to "
+ "offload iSCSI connections, dev=%s\n",
+ hba->netdev->name);
+ set_bit(ADAPTER_STATE_INIT_FAILED, &hba->adapter_state);
+}
+
+
+/**
+ * bnx2i_arm_cq_event_coalescing - arms CQ to enable EQ notification
+ * @ep: endpoint (transport identifier) structure
+ * @action: action, ARM or DISARM. For now only ARM_CQE is used
+ *
+ * Arm'ing CQ will enable chip to generate global EQ events inorder to interrupt
+ * the driver. EQ event is generated CQ index is hit or at least 1 CQ is
+ * outstanding and on chip timer expires
+ */
+int bnx2i_arm_cq_event_coalescing(struct bnx2i_endpoint *ep, u8 action)
+{
+ struct bnx2i_5771x_cq_db *cq_db;
+ u16 cq_index;
+ u16 next_index = 0;
+ u32 num_active_cmds;
+
+ /* Coalesce CQ entries only on 10G devices */
+ if (!test_bit(BNX2I_NX2_DEV_57710, &ep->hba->cnic_dev_type))
+ return 0;
+
+ /* Do not update CQ DB multiple times before firmware writes
+ * '0xFFFF' to CQDB->SQN field. Deviation may cause spurious
+ * interrupts and other unwanted results
+ */
+ cq_db = (struct bnx2i_5771x_cq_db *) ep->qp.cq_pgtbl_virt;
+
+ if (action != CNIC_ARM_CQE_FP)
+ if (cq_db->sqn[0] && cq_db->sqn[0] != 0xFFFF)
+ return 0;
+
+ if (action == CNIC_ARM_CQE || action == CNIC_ARM_CQE_FP) {
+ num_active_cmds = atomic_read(&ep->num_active_cmds);
+ if (num_active_cmds <= event_coal_min)
+ next_index = 1;
+ else {
+ next_index = num_active_cmds >> ep->ec_shift;
+ if (next_index > num_active_cmds - event_coal_min)
+ next_index = num_active_cmds - event_coal_min;
+ }
+ if (!next_index)
+ next_index = 1;
+ cq_index = ep->qp.cqe_exp_seq_sn + next_index - 1;
+ if (cq_index > ep->qp.cqe_size * 2)
+ cq_index -= ep->qp.cqe_size * 2;
+ if (!cq_index)
+ cq_index = 1;
+
+ cq_db->sqn[0] = cq_index;
+ }
+ return next_index;
+}
+
+
+/**
+ * bnx2i_get_rq_buf - copy RQ buffer contents to driver buffer
+ * @conn: iscsi connection on which RQ event occurred
+ * @ptr: driver buffer to which RQ buffer contents is to
+ * be copied
+ * @len: length of valid data inside RQ buf
+ *
+ * Copies RQ buffer contents from shared (DMA'able) memory region to
+ * driver buffer. RQ is used to DMA unsolicitated iscsi pdu's and
+ * scsi sense info
+ */
+void bnx2i_get_rq_buf(struct bnx2i_conn *bnx2i_conn, char *ptr, int len)
+{
+ if (!bnx2i_conn->ep->qp.rqe_left)
+ return;
+
+ bnx2i_conn->ep->qp.rqe_left--;
+ memcpy(ptr, (u8 *) bnx2i_conn->ep->qp.rq_cons_qe, len);
+ if (bnx2i_conn->ep->qp.rq_cons_qe == bnx2i_conn->ep->qp.rq_last_qe) {
+ bnx2i_conn->ep->qp.rq_cons_qe = bnx2i_conn->ep->qp.rq_first_qe;
+ bnx2i_conn->ep->qp.rq_cons_idx = 0;
+ } else {
+ bnx2i_conn->ep->qp.rq_cons_qe++;
+ bnx2i_conn->ep->qp.rq_cons_idx++;
+ }
+}
+
+
+static void bnx2i_ring_577xx_doorbell(struct bnx2i_conn *conn)
+{
+ struct bnx2i_5771x_dbell dbell;
+ u32 msg;
+
+ memset(&dbell, 0, sizeof(dbell));
+ dbell.dbell.header = (B577XX_ISCSI_CONNECTION_TYPE <<
+ B577XX_DOORBELL_HDR_CONN_TYPE_SHIFT);
+ msg = *((u32 *)&dbell);
+ /* TODO : get doorbell register mapping */
+ writel(cpu_to_le32(msg), conn->ep->qp.ctx_base);
+}
+
+
+/**
+ * bnx2i_put_rq_buf - Replenish RQ buffer, if required ring on chip doorbell
+ * @conn: iscsi connection on which event to post
+ * @count: number of RQ buffer being posted to chip
+ *
+ * No need to ring hardware doorbell for 57710 family of devices
+ */
+void bnx2i_put_rq_buf(struct bnx2i_conn *bnx2i_conn, int count)
+{
+ struct bnx2i_5771x_sq_rq_db *rq_db;
+ u16 hi_bit = (bnx2i_conn->ep->qp.rq_prod_idx & 0x8000);
+ struct bnx2i_endpoint *ep = bnx2i_conn->ep;
+
+ ep->qp.rqe_left += count;
+ ep->qp.rq_prod_idx &= 0x7FFF;
+ ep->qp.rq_prod_idx += count;
+
+ if (ep->qp.rq_prod_idx > bnx2i_conn->hba->max_rqes) {
+ ep->qp.rq_prod_idx %= bnx2i_conn->hba->max_rqes;
+ if (!hi_bit)
+ ep->qp.rq_prod_idx |= 0x8000;
+ } else
+ ep->qp.rq_prod_idx |= hi_bit;
+
+ if (test_bit(BNX2I_NX2_DEV_57710, &ep->hba->cnic_dev_type)) {
+ rq_db = (struct bnx2i_5771x_sq_rq_db *) ep->qp.rq_pgtbl_virt;
+ rq_db->prod_idx = ep->qp.rq_prod_idx;
+ /* no need to ring hardware doorbell for 57710 */
+ } else {
+ writew(ep->qp.rq_prod_idx,
+ ep->qp.ctx_base + CNIC_RECV_DOORBELL);
+ }
+ mmiowb();
+}
+
+
+/**
+ * bnx2i_ring_sq_dbell - Ring SQ doorbell to wake-up the processing engine
+ * @conn: iscsi connection to which new SQ entries belong
+ * @count: number of SQ WQEs to post
+ *
+ * SQ DB is updated in host memory and TX Doorbell is rung for 57710 family
+ * of devices. For 5706/5708/5709 new SQ WQE count is written into the
+ * doorbell register
+ */
+static void bnx2i_ring_sq_dbell(struct bnx2i_conn *bnx2i_conn, int count)
+{
+ struct bnx2i_5771x_sq_rq_db *sq_db;
+ struct bnx2i_endpoint *ep = bnx2i_conn->ep;
+
+ atomic_inc(&ep->num_active_cmds);
+ wmb(); /* flush SQ WQE memory before the doorbell is rung */
+ if (test_bit(BNX2I_NX2_DEV_57710, &ep->hba->cnic_dev_type)) {
+ sq_db = (struct bnx2i_5771x_sq_rq_db *) ep->qp.sq_pgtbl_virt;
+ sq_db->prod_idx = ep->qp.sq_prod_idx;
+ bnx2i_ring_577xx_doorbell(bnx2i_conn);
+ } else
+ writew(count, ep->qp.ctx_base + CNIC_SEND_DOORBELL);
+
+ mmiowb(); /* flush posted PCI writes */
+}
+
+
+/**
+ * bnx2i_ring_dbell_update_sq_params - update SQ driver parameters
+ * @conn: iscsi connection to which new SQ entries belong
+ * @count: number of SQ WQEs to post
+ *
+ * this routine will update SQ driver parameters and ring the doorbell
+ */
+static void bnx2i_ring_dbell_update_sq_params(struct bnx2i_conn *bnx2i_conn,
+ int count)
+{
+ int tmp_cnt;
+
+ if (count == 1) {
+ if (bnx2i_conn->ep->qp.sq_prod_qe ==
+ bnx2i_conn->ep->qp.sq_last_qe)
+ bnx2i_conn->ep->qp.sq_prod_qe =
+ bnx2i_conn->ep->qp.sq_first_qe;
+ else
+ bnx2i_conn->ep->qp.sq_prod_qe++;
+ } else {
+ if ((bnx2i_conn->ep->qp.sq_prod_qe + count) <=
+ bnx2i_conn->ep->qp.sq_last_qe)
+ bnx2i_conn->ep->qp.sq_prod_qe += count;
+ else {
+ tmp_cnt = bnx2i_conn->ep->qp.sq_last_qe -
+ bnx2i_conn->ep->qp.sq_prod_qe;
+ bnx2i_conn->ep->qp.sq_prod_qe =
+ &bnx2i_conn->ep->qp.sq_first_qe[count -
+ (tmp_cnt + 1)];
+ }
+ }
+ bnx2i_conn->ep->qp.sq_prod_idx += count;
+ /* Ring the doorbell */
+ bnx2i_ring_sq_dbell(bnx2i_conn, bnx2i_conn->ep->qp.sq_prod_idx);
+}
+
+
+/**
+ * bnx2i_send_iscsi_login - post iSCSI login request MP WQE to hardware
+ * @conn: iscsi connection
+ * @cmd: driver command structure which is requesting
+ * a WQE to sent to chip for further processing
+ *
+ * prepare and post an iSCSI Login request WQE to CNIC firmware
+ */
+int bnx2i_send_iscsi_login(struct bnx2i_conn *bnx2i_conn,
+ struct iscsi_task *task)
+{
+ struct bnx2i_cmd *bnx2i_cmd;
+ struct bnx2i_login_request *login_wqe;
+ struct iscsi_login_req *login_hdr;
+ u32 dword;
+
+ bnx2i_cmd = (struct bnx2i_cmd *)task->dd_data;
+ login_hdr = (struct iscsi_login_req *)task->hdr;
+ login_wqe = (struct bnx2i_login_request *)
+ bnx2i_conn->ep->qp.sq_prod_qe;
+
+ login_wqe->op_code = login_hdr->opcode;
+ login_wqe->op_attr = login_hdr->flags;
+ login_wqe->version_max = login_hdr->max_version;
+ login_wqe->version_min = login_hdr->min_version;
+ login_wqe->data_length = ntoh24(login_hdr->dlength);
+ login_wqe->isid_lo = *((u32 *) login_hdr->isid);
+ login_wqe->isid_hi = *((u16 *) login_hdr->isid + 2);
+ login_wqe->tsih = login_hdr->tsih;
+ login_wqe->itt = task->itt |
+ (ISCSI_TASK_TYPE_MPATH << ISCSI_LOGIN_REQUEST_TYPE_SHIFT);
+ login_wqe->cid = login_hdr->cid;
+
+ login_wqe->cmd_sn = be32_to_cpu(login_hdr->cmdsn);
+ login_wqe->exp_stat_sn = be32_to_cpu(login_hdr->exp_statsn);
+ login_wqe->flags = ISCSI_LOGIN_REQUEST_UPDATE_EXP_STAT_SN;
+
+ login_wqe->resp_bd_list_addr_lo = (u32) bnx2i_conn->gen_pdu.resp_bd_dma;
+ login_wqe->resp_bd_list_addr_hi =
+ (u32) ((u64) bnx2i_conn->gen_pdu.resp_bd_dma >> 32);
+
+ dword = ((1 << ISCSI_LOGIN_REQUEST_NUM_RESP_BDS_SHIFT) |
+ (bnx2i_conn->gen_pdu.resp_buf_size <<
+ ISCSI_LOGIN_REQUEST_RESP_BUFFER_LENGTH_SHIFT));
+ login_wqe->resp_buffer = dword;
+ login_wqe->bd_list_addr_lo = (u32) bnx2i_conn->gen_pdu.req_bd_dma;
+ login_wqe->bd_list_addr_hi =
+ (u32) ((u64) bnx2i_conn->gen_pdu.req_bd_dma >> 32);
+ login_wqe->num_bds = 1;
+ login_wqe->cq_index = 0; /* CQ# used for completion, 5771x only */
+
+ bnx2i_ring_dbell_update_sq_params(bnx2i_conn, 1);
+ return 0;
+}
+
+/**
+ * bnx2i_send_iscsi_tmf - post iSCSI task management request MP WQE to hardware
+ * @conn: iscsi connection
+ * @mtask: driver command structure which is requesting
+ * a WQE to sent to chip for further processing
+ *
+ * prepare and post an iSCSI Login request WQE to CNIC firmware
+ */
+int bnx2i_send_iscsi_tmf(struct bnx2i_conn *bnx2i_conn,
+ struct iscsi_task *mtask)
+{
+ struct iscsi_conn *conn = bnx2i_conn->cls_conn->dd_data;
+ struct iscsi_tm *tmfabort_hdr;
+ struct scsi_cmnd *ref_sc;
+ struct iscsi_task *ctask;
+ struct bnx2i_cmd *bnx2i_cmd;
+ struct bnx2i_tmf_request *tmfabort_wqe;
+ u32 dword;
+ u32 scsi_lun[2];
+
+ bnx2i_cmd = (struct bnx2i_cmd *)mtask->dd_data;
+ tmfabort_hdr = (struct iscsi_tm *)mtask->hdr;
+ tmfabort_wqe = (struct bnx2i_tmf_request *)
+ bnx2i_conn->ep->qp.sq_prod_qe;
+
+ tmfabort_wqe->op_code = tmfabort_hdr->opcode;
+ tmfabort_wqe->op_attr = tmfabort_hdr->flags;
+
+ tmfabort_wqe->itt = (mtask->itt | (ISCSI_TASK_TYPE_MPATH << 14));
+ tmfabort_wqe->reserved2 = 0;
+ tmfabort_wqe->cmd_sn = be32_to_cpu(tmfabort_hdr->cmdsn);
+
+ switch (tmfabort_hdr->flags & ISCSI_FLAG_TM_FUNC_MASK) {
+ case ISCSI_TM_FUNC_ABORT_TASK:
+ case ISCSI_TM_FUNC_TASK_REASSIGN:
+ ctask = iscsi_itt_to_task(conn, tmfabort_hdr->rtt);
+ if (!ctask || !ctask->sc)
+ /*
+ * the iscsi layer must have completed the cmd while
+ * was starting up.
+ *
+ * Note: In the case of a SCSI cmd timeout, the task's
+ * sc is still active; hence ctask->sc != 0
+ * In this case, the task must be aborted
+ */
+ return 0;
+
+ ref_sc = ctask->sc;
+ if (ref_sc->sc_data_direction == DMA_TO_DEVICE)
+ dword = (ISCSI_TASK_TYPE_WRITE <<
+ ISCSI_CMD_REQUEST_TYPE_SHIFT);
+ else
+ dword = (ISCSI_TASK_TYPE_READ <<
+ ISCSI_CMD_REQUEST_TYPE_SHIFT);
+ tmfabort_wqe->ref_itt = (dword |
+ (tmfabort_hdr->rtt & ISCSI_ITT_MASK));
+ break;
+ default:
+ tmfabort_wqe->ref_itt = RESERVED_ITT;
+ }
+ memcpy(scsi_lun, &tmfabort_hdr->lun, sizeof(struct scsi_lun));
+ tmfabort_wqe->lun[0] = be32_to_cpu(scsi_lun[0]);
+ tmfabort_wqe->lun[1] = be32_to_cpu(scsi_lun[1]);
+
+ tmfabort_wqe->ref_cmd_sn = be32_to_cpu(tmfabort_hdr->refcmdsn);
+
+ tmfabort_wqe->bd_list_addr_lo = (u32) bnx2i_conn->hba->mp_bd_dma;
+ tmfabort_wqe->bd_list_addr_hi = (u32)
+ ((u64) bnx2i_conn->hba->mp_bd_dma >> 32);
+ tmfabort_wqe->num_bds = 1;
+ tmfabort_wqe->cq_index = 0; /* CQ# used for completion, 5771x only */
+
+ bnx2i_ring_dbell_update_sq_params(bnx2i_conn, 1);
+ return 0;
+}
+
+/**
+ * bnx2i_send_iscsi_text - post iSCSI text WQE to hardware
+ * @conn: iscsi connection
+ * @mtask: driver command structure which is requesting
+ * a WQE to sent to chip for further processing
+ *
+ * prepare and post an iSCSI Text request WQE to CNIC firmware
+ */
+int bnx2i_send_iscsi_text(struct bnx2i_conn *bnx2i_conn,
+ struct iscsi_task *mtask)
+{
+ struct bnx2i_cmd *bnx2i_cmd;
+ struct bnx2i_text_request *text_wqe;
+ struct iscsi_text *text_hdr;
+ u32 dword;
+
+ bnx2i_cmd = (struct bnx2i_cmd *)mtask->dd_data;
+ text_hdr = (struct iscsi_text *)mtask->hdr;
+ text_wqe = (struct bnx2i_text_request *) bnx2i_conn->ep->qp.sq_prod_qe;
+
+ memset(text_wqe, 0, sizeof(struct bnx2i_text_request));
+
+ text_wqe->op_code = text_hdr->opcode;
+ text_wqe->op_attr = text_hdr->flags;
+ text_wqe->data_length = ntoh24(text_hdr->dlength);
+ text_wqe->itt = mtask->itt |
+ (ISCSI_TASK_TYPE_MPATH << ISCSI_TEXT_REQUEST_TYPE_SHIFT);
+ text_wqe->ttt = be32_to_cpu(text_hdr->ttt);
+
+ text_wqe->cmd_sn = be32_to_cpu(text_hdr->cmdsn);
+
+ text_wqe->resp_bd_list_addr_lo = (u32) bnx2i_conn->gen_pdu.resp_bd_dma;
+ text_wqe->resp_bd_list_addr_hi =
+ (u32) ((u64) bnx2i_conn->gen_pdu.resp_bd_dma >> 32);
+
+ dword = ((1 << ISCSI_TEXT_REQUEST_NUM_RESP_BDS_SHIFT) |
+ (bnx2i_conn->gen_pdu.resp_buf_size <<
+ ISCSI_TEXT_REQUEST_RESP_BUFFER_LENGTH_SHIFT));
+ text_wqe->resp_buffer = dword;
+ text_wqe->bd_list_addr_lo = (u32) bnx2i_conn->gen_pdu.req_bd_dma;
+ text_wqe->bd_list_addr_hi =
+ (u32) ((u64) bnx2i_conn->gen_pdu.req_bd_dma >> 32);
+ text_wqe->num_bds = 1;
+ text_wqe->cq_index = 0; /* CQ# used for completion, 5771x only */
+
+ bnx2i_ring_dbell_update_sq_params(bnx2i_conn, 1);
+ return 0;
+}
+
+
+/**
+ * bnx2i_send_iscsi_scsicmd - post iSCSI scsicmd request WQE to hardware
+ * @conn: iscsi connection
+ * @cmd: driver command structure which is requesting
+ * a WQE to sent to chip for further processing
+ *
+ * prepare and post an iSCSI SCSI-CMD request WQE to CNIC firmware
+ */
+int bnx2i_send_iscsi_scsicmd(struct bnx2i_conn *bnx2i_conn,
+ struct bnx2i_cmd *cmd)
+{
+ struct bnx2i_cmd_request *scsi_cmd_wqe;
+
+ scsi_cmd_wqe = (struct bnx2i_cmd_request *)
+ bnx2i_conn->ep->qp.sq_prod_qe;
+ memcpy(scsi_cmd_wqe, &cmd->req, sizeof(struct bnx2i_cmd_request));
+ scsi_cmd_wqe->cq_index = 0; /* CQ# used for completion, 5771x only */
+
+ bnx2i_ring_dbell_update_sq_params(bnx2i_conn, 1);
+ return 0;
+}
+
+/**
+ * bnx2i_send_iscsi_nopout - post iSCSI NOPOUT request WQE to hardware
+ * @conn: iscsi connection
+ * @cmd: driver command structure which is requesting
+ * a WQE to sent to chip for further processing
+ * @datap: payload buffer pointer
+ * @data_len: payload data length
+ * @unsol: indicated whether nopout pdu is unsolicited pdu or
+ * in response to target's NOPIN w/ TTT != FFFFFFFF
+ *
+ * prepare and post a nopout request WQE to CNIC firmware
+ */
+int bnx2i_send_iscsi_nopout(struct bnx2i_conn *bnx2i_conn,
+ struct iscsi_task *task,
+ char *datap, int data_len, int unsol)
+{
+ struct bnx2i_endpoint *ep = bnx2i_conn->ep;
+ struct bnx2i_cmd *bnx2i_cmd;
+ struct bnx2i_nop_out_request *nopout_wqe;
+ struct iscsi_nopout *nopout_hdr;
+
+ bnx2i_cmd = (struct bnx2i_cmd *)task->dd_data;
+ nopout_hdr = (struct iscsi_nopout *)task->hdr;
+ nopout_wqe = (struct bnx2i_nop_out_request *)ep->qp.sq_prod_qe;
+
+ memset(nopout_wqe, 0x00, sizeof(struct bnx2i_nop_out_request));
+
+ nopout_wqe->op_code = nopout_hdr->opcode;
+ nopout_wqe->op_attr = ISCSI_FLAG_CMD_FINAL;
+ memcpy(nopout_wqe->lun, &nopout_hdr->lun, 8);
+
+ if (test_bit(BNX2I_NX2_DEV_57710, &ep->hba->cnic_dev_type)) {
+ u32 tmp = nopout_wqe->lun[0];
+ /* 57710 requires LUN field to be swapped */
+ nopout_wqe->lun[0] = nopout_wqe->lun[1];
+ nopout_wqe->lun[1] = tmp;
+ }
+
+ nopout_wqe->itt = ((u16)task->itt |
+ (ISCSI_TASK_TYPE_MPATH <<
+ ISCSI_TMF_REQUEST_TYPE_SHIFT));
+ nopout_wqe->ttt = be32_to_cpu(nopout_hdr->ttt);
+ nopout_wqe->flags = 0;
+ if (!unsol)
+ nopout_wqe->flags = ISCSI_NOP_OUT_REQUEST_LOCAL_COMPLETION;
+ else if (nopout_hdr->itt == RESERVED_ITT)
+ nopout_wqe->flags = ISCSI_NOP_OUT_REQUEST_LOCAL_COMPLETION;
+
+ nopout_wqe->cmd_sn = be32_to_cpu(nopout_hdr->cmdsn);
+ nopout_wqe->data_length = data_len;
+ if (data_len) {
+ /* handle payload data, not required in first release */
+ printk(KERN_ALERT "NOPOUT: WARNING!! payload len != 0\n");
+ } else {
+ nopout_wqe->bd_list_addr_lo = (u32)
+ bnx2i_conn->hba->mp_bd_dma;
+ nopout_wqe->bd_list_addr_hi =
+ (u32) ((u64) bnx2i_conn->hba->mp_bd_dma >> 32);
+ nopout_wqe->num_bds = 1;
+ }
+ nopout_wqe->cq_index = 0; /* CQ# used for completion, 5771x only */
+
+ bnx2i_ring_dbell_update_sq_params(bnx2i_conn, 1);
+ return 0;
+}
+
+
+/**
+ * bnx2i_send_iscsi_logout - post iSCSI logout request WQE to hardware
+ * @conn: iscsi connection
+ * @cmd: driver command structure which is requesting
+ * a WQE to sent to chip for further processing
+ *
+ * prepare and post logout request WQE to CNIC firmware
+ */
+int bnx2i_send_iscsi_logout(struct bnx2i_conn *bnx2i_conn,
+ struct iscsi_task *task)
+{
+ struct bnx2i_cmd *bnx2i_cmd;
+ struct bnx2i_logout_request *logout_wqe;
+ struct iscsi_logout *logout_hdr;
+
+ bnx2i_cmd = (struct bnx2i_cmd *)task->dd_data;
+ logout_hdr = (struct iscsi_logout *)task->hdr;
+
+ logout_wqe = (struct bnx2i_logout_request *)
+ bnx2i_conn->ep->qp.sq_prod_qe;
+ memset(logout_wqe, 0x00, sizeof(struct bnx2i_logout_request));
+
+ logout_wqe->op_code = logout_hdr->opcode;
+ logout_wqe->cmd_sn = be32_to_cpu(logout_hdr->cmdsn);
+ logout_wqe->op_attr =
+ logout_hdr->flags | ISCSI_LOGOUT_REQUEST_ALWAYS_ONE;
+ logout_wqe->itt = ((u16)task->itt |
+ (ISCSI_TASK_TYPE_MPATH <<
+ ISCSI_LOGOUT_REQUEST_TYPE_SHIFT));
+ logout_wqe->data_length = 0;
+ logout_wqe->cid = 0;
+
+ logout_wqe->bd_list_addr_lo = (u32) bnx2i_conn->hba->mp_bd_dma;
+ logout_wqe->bd_list_addr_hi = (u32)
+ ((u64) bnx2i_conn->hba->mp_bd_dma >> 32);
+ logout_wqe->num_bds = 1;
+ logout_wqe->cq_index = 0; /* CQ# used for completion, 5771x only */
+
+ bnx2i_conn->ep->state = EP_STATE_LOGOUT_SENT;
+
+ bnx2i_ring_dbell_update_sq_params(bnx2i_conn, 1);
+ return 0;
+}
+
+
+/**
+ * bnx2i_update_iscsi_conn - post iSCSI logout request WQE to hardware
+ * @conn: iscsi connection which requires iscsi parameter update
+ *
+ * sends down iSCSI Conn Update request to move iSCSI conn to FFP
+ */
+void bnx2i_update_iscsi_conn(struct iscsi_conn *conn)
+{
+ struct bnx2i_conn *bnx2i_conn = conn->dd_data;
+ struct bnx2i_hba *hba = bnx2i_conn->hba;
+ struct kwqe *kwqe_arr[2];
+ struct iscsi_kwqe_conn_update *update_wqe;
+ struct iscsi_kwqe_conn_update conn_update_kwqe;
+
+ update_wqe = &conn_update_kwqe;
+
+ update_wqe->hdr.op_code = ISCSI_KWQE_OPCODE_UPDATE_CONN;
+ update_wqe->hdr.flags =
+ (ISCSI_KWQE_LAYER_CODE << ISCSI_KWQE_HEADER_LAYER_CODE_SHIFT);
+
+ /* 5771x requires conn context id to be passed as is */
+ if (test_bit(BNX2I_NX2_DEV_57710, &bnx2i_conn->ep->hba->cnic_dev_type))
+ update_wqe->context_id = bnx2i_conn->ep->ep_cid;
+ else
+ update_wqe->context_id = (bnx2i_conn->ep->ep_cid >> 7);
+ update_wqe->conn_flags = 0;
+ if (conn->hdrdgst_en)
+ update_wqe->conn_flags |= ISCSI_KWQE_CONN_UPDATE_HEADER_DIGEST;
+ if (conn->datadgst_en)
+ update_wqe->conn_flags |= ISCSI_KWQE_CONN_UPDATE_DATA_DIGEST;
+ if (conn->session->initial_r2t_en)
+ update_wqe->conn_flags |= ISCSI_KWQE_CONN_UPDATE_INITIAL_R2T;
+ if (conn->session->imm_data_en)
+ update_wqe->conn_flags |= ISCSI_KWQE_CONN_UPDATE_IMMEDIATE_DATA;
+
+ update_wqe->max_send_pdu_length = conn->max_xmit_dlength;
+ update_wqe->max_recv_pdu_length = conn->max_recv_dlength;
+ update_wqe->first_burst_length = conn->session->first_burst;
+ update_wqe->max_burst_length = conn->session->max_burst;
+ update_wqe->exp_stat_sn = conn->exp_statsn;
+ update_wqe->max_outstanding_r2ts = conn->session->max_r2t;
+ update_wqe->session_error_recovery_level = conn->session->erl;
+ iscsi_conn_printk(KERN_ALERT, conn,
+ "bnx2i: conn update - MBL 0x%x FBL 0x%x"
+ "MRDSL_I 0x%x MRDSL_T 0x%x \n",
+ update_wqe->max_burst_length,
+ update_wqe->first_burst_length,
+ update_wqe->max_recv_pdu_length,
+ update_wqe->max_send_pdu_length);
+
+ kwqe_arr[0] = (struct kwqe *) update_wqe;
+ if (hba->cnic && hba->cnic->submit_kwqes)
+ hba->cnic->submit_kwqes(hba->cnic, kwqe_arr, 1);
+}
+
+
+/**
+ * bnx2i_ep_ofld_timer - post iSCSI logout request WQE to hardware
+ * @data: endpoint (transport handle) structure pointer
+ *
+ * routine to handle connection offload/destroy request timeout
+ */
+void bnx2i_ep_ofld_timer(unsigned long data)
+{
+ struct bnx2i_endpoint *ep = (struct bnx2i_endpoint *) data;
+
+ if (ep->state == EP_STATE_OFLD_START) {
+ printk(KERN_ALERT "ofld_timer: CONN_OFLD timeout\n");
+ ep->state = EP_STATE_OFLD_FAILED;
+ } else if (ep->state == EP_STATE_DISCONN_START) {
+ printk(KERN_ALERT "ofld_timer: CONN_DISCON timeout\n");
+ ep->state = EP_STATE_DISCONN_TIMEDOUT;
+ } else if (ep->state == EP_STATE_CLEANUP_START) {
+ printk(KERN_ALERT "ofld_timer: CONN_CLEANUP timeout\n");
+ ep->state = EP_STATE_CLEANUP_FAILED;
+ }
+
+ wake_up_interruptible(&ep->ofld_wait);
+}
+
+
+static int bnx2i_power_of2(u32 val)
+{
+ u32 power = 0;
+ if (val & (val - 1))
+ return power;
+ val--;
+ while (val) {
+ val = val >> 1;
+ power++;
+ }
+ return power;
+}
+
+
+/**
+ * bnx2i_send_cmd_cleanup_req - send iscsi cmd context clean-up request
+ * @hba: adapter structure pointer
+ * @cmd: driver command structure which is requesting
+ * a WQE to sent to chip for further processing
+ *
+ * prepares and posts CONN_OFLD_REQ1/2 KWQE
+ */
+void bnx2i_send_cmd_cleanup_req(struct bnx2i_hba *hba, struct bnx2i_cmd *cmd)
+{
+ struct bnx2i_cleanup_request *cmd_cleanup;
+
+ cmd_cleanup =
+ (struct bnx2i_cleanup_request *)cmd->conn->ep->qp.sq_prod_qe;
+ memset(cmd_cleanup, 0x00, sizeof(struct bnx2i_cleanup_request));
+
+ cmd_cleanup->op_code = ISCSI_OPCODE_CLEANUP_REQUEST;
+ cmd_cleanup->itt = cmd->req.itt;
+ cmd_cleanup->cq_index = 0; /* CQ# used for completion, 5771x only */
+
+ bnx2i_ring_dbell_update_sq_params(cmd->conn, 1);
+}
+
+
+/**
+ * bnx2i_send_conn_destroy - initiates iscsi connection teardown process
+ * @hba: adapter structure pointer
+ * @ep: endpoint (transport identifier) structure
+ *
+ * this routine prepares and posts CONN_OFLD_REQ1/2 KWQE to initiate
+ * iscsi connection context clean-up process
+ */
+int bnx2i_send_conn_destroy(struct bnx2i_hba *hba, struct bnx2i_endpoint *ep)
+{
+ struct kwqe *kwqe_arr[2];
+ struct iscsi_kwqe_conn_destroy conn_cleanup;
+ int rc = -EINVAL;
+
+ memset(&conn_cleanup, 0x00, sizeof(struct iscsi_kwqe_conn_destroy));
+
+ conn_cleanup.hdr.op_code = ISCSI_KWQE_OPCODE_DESTROY_CONN;
+ conn_cleanup.hdr.flags =
+ (ISCSI_KWQE_LAYER_CODE << ISCSI_KWQE_HEADER_LAYER_CODE_SHIFT);
+ /* 5771x requires conn context id to be passed as is */
+ if (test_bit(BNX2I_NX2_DEV_57710, &ep->hba->cnic_dev_type))
+ conn_cleanup.context_id = ep->ep_cid;
+ else
+ conn_cleanup.context_id = (ep->ep_cid >> 7);
+
+ conn_cleanup.reserved0 = (u16)ep->ep_iscsi_cid;
+
+ kwqe_arr[0] = (struct kwqe *) &conn_cleanup;
+ if (hba->cnic && hba->cnic->submit_kwqes)
+ rc = hba->cnic->submit_kwqes(hba->cnic, kwqe_arr, 1);
+
+ return rc;
+}
+
+
+/**
+ * bnx2i_570x_send_conn_ofld_req - initiates iscsi conn context setup process
+ * @hba: adapter structure pointer
+ * @ep: endpoint (transport identifier) structure
+ *
+ * 5706/5708/5709 specific - prepares and posts CONN_OFLD_REQ1/2 KWQE
+ */
+static int bnx2i_570x_send_conn_ofld_req(struct bnx2i_hba *hba,
+ struct bnx2i_endpoint *ep)
+{
+ struct kwqe *kwqe_arr[2];
+ struct iscsi_kwqe_conn_offload1 ofld_req1;
+ struct iscsi_kwqe_conn_offload2 ofld_req2;
+ dma_addr_t dma_addr;
+ int num_kwqes = 2;
+ u32 *ptbl;
+ int rc = -EINVAL;
+
+ ofld_req1.hdr.op_code = ISCSI_KWQE_OPCODE_OFFLOAD_CONN1;
+ ofld_req1.hdr.flags =
+ (ISCSI_KWQE_LAYER_CODE << ISCSI_KWQE_HEADER_LAYER_CODE_SHIFT);
+
+ ofld_req1.iscsi_conn_id = (u16) ep->ep_iscsi_cid;
+
+ dma_addr = ep->qp.sq_pgtbl_phys;
+ ofld_req1.sq_page_table_addr_lo = (u32) dma_addr;
+ ofld_req1.sq_page_table_addr_hi = (u32) ((u64) dma_addr >> 32);
+
+ dma_addr = ep->qp.cq_pgtbl_phys;
+ ofld_req1.cq_page_table_addr_lo = (u32) dma_addr;
+ ofld_req1.cq_page_table_addr_hi = (u32) ((u64) dma_addr >> 32);
+
+ ofld_req2.hdr.op_code = ISCSI_KWQE_OPCODE_OFFLOAD_CONN2;
+ ofld_req2.hdr.flags =
+ (ISCSI_KWQE_LAYER_CODE << ISCSI_KWQE_HEADER_LAYER_CODE_SHIFT);
+
+ dma_addr = ep->qp.rq_pgtbl_phys;
+ ofld_req2.rq_page_table_addr_lo = (u32) dma_addr;
+ ofld_req2.rq_page_table_addr_hi = (u32) ((u64) dma_addr >> 32);
+
+ ptbl = (u32 *) ep->qp.sq_pgtbl_virt;
+
+ ofld_req2.sq_first_pte.hi = *ptbl++;
+ ofld_req2.sq_first_pte.lo = *ptbl;
+
+ ptbl = (u32 *) ep->qp.cq_pgtbl_virt;
+ ofld_req2.cq_first_pte.hi = *ptbl++;
+ ofld_req2.cq_first_pte.lo = *ptbl;
+
+ kwqe_arr[0] = (struct kwqe *) &ofld_req1;
+ kwqe_arr[1] = (struct kwqe *) &ofld_req2;
+ ofld_req2.num_additional_wqes = 0;
+
+ if (hba->cnic && hba->cnic->submit_kwqes)
+ rc = hba->cnic->submit_kwqes(hba->cnic, kwqe_arr, num_kwqes);
+
+ return rc;
+}
+
+
+/**
+ * bnx2i_5771x_send_conn_ofld_req - initiates iscsi connection context creation
+ * @hba: adapter structure pointer
+ * @ep: endpoint (transport identifier) structure
+ *
+ * 57710 specific - prepares and posts CONN_OFLD_REQ1/2 KWQE
+ */
+static int bnx2i_5771x_send_conn_ofld_req(struct bnx2i_hba *hba,
+ struct bnx2i_endpoint *ep)
+{
+ struct kwqe *kwqe_arr[5];
+ struct iscsi_kwqe_conn_offload1 ofld_req1;
+ struct iscsi_kwqe_conn_offload2 ofld_req2;
+ struct iscsi_kwqe_conn_offload3 ofld_req3[1];
+ dma_addr_t dma_addr;
+ int num_kwqes = 2;
+ u32 *ptbl;
+ int rc = -EINVAL;
+
+ ofld_req1.hdr.op_code = ISCSI_KWQE_OPCODE_OFFLOAD_CONN1;
+ ofld_req1.hdr.flags =
+ (ISCSI_KWQE_LAYER_CODE << ISCSI_KWQE_HEADER_LAYER_CODE_SHIFT);
+
+ ofld_req1.iscsi_conn_id = (u16) ep->ep_iscsi_cid;
+
+ dma_addr = ep->qp.sq_pgtbl_phys + ISCSI_SQ_DB_SIZE;
+ ofld_req1.sq_page_table_addr_lo = (u32) dma_addr;
+ ofld_req1.sq_page_table_addr_hi = (u32) ((u64) dma_addr >> 32);
+
+ dma_addr = ep->qp.cq_pgtbl_phys + ISCSI_CQ_DB_SIZE;
+ ofld_req1.cq_page_table_addr_lo = (u32) dma_addr;
+ ofld_req1.cq_page_table_addr_hi = (u32) ((u64) dma_addr >> 32);
+
+ ofld_req2.hdr.op_code = ISCSI_KWQE_OPCODE_OFFLOAD_CONN2;
+ ofld_req2.hdr.flags =
+ (ISCSI_KWQE_LAYER_CODE << ISCSI_KWQE_HEADER_LAYER_CODE_SHIFT);
+
+ dma_addr = ep->qp.rq_pgtbl_phys + ISCSI_RQ_DB_SIZE;
+ ofld_req2.rq_page_table_addr_lo = (u32) dma_addr;
+ ofld_req2.rq_page_table_addr_hi = (u32) ((u64) dma_addr >> 32);
+
+ ptbl = (u32 *)((u8 *)ep->qp.sq_pgtbl_virt + ISCSI_SQ_DB_SIZE);
+ ofld_req2.sq_first_pte.hi = *ptbl++;
+ ofld_req2.sq_first_pte.lo = *ptbl;
+
+ ptbl = (u32 *)((u8 *)ep->qp.cq_pgtbl_virt + ISCSI_CQ_DB_SIZE);
+ ofld_req2.cq_first_pte.hi = *ptbl++;
+ ofld_req2.cq_first_pte.lo = *ptbl;
+
+ kwqe_arr[0] = (struct kwqe *) &ofld_req1;
+ kwqe_arr[1] = (struct kwqe *) &ofld_req2;
+
+ ofld_req2.num_additional_wqes = 1;
+ memset(ofld_req3, 0x00, sizeof(ofld_req3[0]));
+ ptbl = (u32 *)((u8 *)ep->qp.rq_pgtbl_virt + ISCSI_RQ_DB_SIZE);
+ ofld_req3[0].qp_first_pte[0].hi = *ptbl++;
+ ofld_req3[0].qp_first_pte[0].lo = *ptbl;
+
+ kwqe_arr[2] = (struct kwqe *) ofld_req3;
+ /* need if we decide to go with multiple KCQE's per conn */
+ num_kwqes += 1;
+
+ if (hba->cnic && hba->cnic->submit_kwqes)
+ rc = hba->cnic->submit_kwqes(hba->cnic, kwqe_arr, num_kwqes);
+
+ return rc;
+}
+
+/**
+ * bnx2i_send_conn_ofld_req - initiates iscsi connection context setup process
+ *
+ * @hba: adapter structure pointer
+ * @ep: endpoint (transport identifier) structure
+ *
+ * this routine prepares and posts CONN_OFLD_REQ1/2 KWQE
+ */
+int bnx2i_send_conn_ofld_req(struct bnx2i_hba *hba, struct bnx2i_endpoint *ep)
+{
+ int rc;
+
+ if (test_bit(BNX2I_NX2_DEV_57710, &hba->cnic_dev_type))
+ rc = bnx2i_5771x_send_conn_ofld_req(hba, ep);
+ else
+ rc = bnx2i_570x_send_conn_ofld_req(hba, ep);
+
+ return rc;
+}
+
+
+/**
+ * setup_qp_page_tables - iscsi QP page table setup function
+ * @ep: endpoint (transport identifier) structure
+ *
+ * Sets up page tables for SQ/RQ/CQ, 1G/sec (5706/5708/5709) devices requires
+ * 64-bit address in big endian format. Whereas 10G/sec (57710) requires
+ * PT in little endian format
+ */
+static void setup_qp_page_tables(struct bnx2i_endpoint *ep)
+{
+ int num_pages;
+ u32 *ptbl;
+ dma_addr_t page;
+ int cnic_dev_10g;
+
+ if (test_bit(BNX2I_NX2_DEV_57710, &ep->hba->cnic_dev_type))
+ cnic_dev_10g = 1;
+ else
+ cnic_dev_10g = 0;
+
+ /* SQ page table */
+ memset(ep->qp.sq_pgtbl_virt, 0, ep->qp.sq_pgtbl_size);
+ num_pages = ep->qp.sq_mem_size / CNIC_PAGE_SIZE;
+ page = ep->qp.sq_phys;
+
+ if (cnic_dev_10g)
+ ptbl = (u32 *)((u8 *)ep->qp.sq_pgtbl_virt + ISCSI_SQ_DB_SIZE);
+ else
+ ptbl = (u32 *) ep->qp.sq_pgtbl_virt;
+ while (num_pages--) {
+ if (cnic_dev_10g) {
+ /* PTE is written in little endian format for 57710 */
+ *ptbl = (u32) page;
+ ptbl++;
+ *ptbl = (u32) ((u64) page >> 32);
+ ptbl++;
+ page += CNIC_PAGE_SIZE;
+ } else {
+ /* PTE is written in big endian format for
+ * 5706/5708/5709 devices */
+ *ptbl = (u32) ((u64) page >> 32);
+ ptbl++;
+ *ptbl = (u32) page;
+ ptbl++;
+ page += CNIC_PAGE_SIZE;
+ }
+ }
+
+ /* RQ page table */
+ memset(ep->qp.rq_pgtbl_virt, 0, ep->qp.rq_pgtbl_size);
+ num_pages = ep->qp.rq_mem_size / CNIC_PAGE_SIZE;
+ page = ep->qp.rq_phys;
+
+ if (cnic_dev_10g)
+ ptbl = (u32 *)((u8 *)ep->qp.rq_pgtbl_virt + ISCSI_RQ_DB_SIZE);
+ else
+ ptbl = (u32 *) ep->qp.rq_pgtbl_virt;
+ while (num_pages--) {
+ if (cnic_dev_10g) {
+ /* PTE is written in little endian format for 57710 */
+ *ptbl = (u32) page;
+ ptbl++;
+ *ptbl = (u32) ((u64) page >> 32);
+ ptbl++;
+ page += CNIC_PAGE_SIZE;
+ } else {
+ /* PTE is written in big endian format for
+ * 5706/5708/5709 devices */
+ *ptbl = (u32) ((u64) page >> 32);
+ ptbl++;
+ *ptbl = (u32) page;
+ ptbl++;
+ page += CNIC_PAGE_SIZE;
+ }
+ }
+
+ /* CQ page table */
+ memset(ep->qp.cq_pgtbl_virt, 0, ep->qp.cq_pgtbl_size);
+ num_pages = ep->qp.cq_mem_size / CNIC_PAGE_SIZE;
+ page = ep->qp.cq_phys;
+
+ if (cnic_dev_10g)
+ ptbl = (u32 *)((u8 *)ep->qp.cq_pgtbl_virt + ISCSI_CQ_DB_SIZE);
+ else
+ ptbl = (u32 *) ep->qp.cq_pgtbl_virt;
+ while (num_pages--) {
+ if (cnic_dev_10g) {
+ /* PTE is written in little endian format for 57710 */
+ *ptbl = (u32) page;
+ ptbl++;
+ *ptbl = (u32) ((u64) page >> 32);
+ ptbl++;
+ page += CNIC_PAGE_SIZE;
+ } else {
+ /* PTE is written in big endian format for
+ * 5706/5708/5709 devices */
+ *ptbl = (u32) ((u64) page >> 32);
+ ptbl++;
+ *ptbl = (u32) page;
+ ptbl++;
+ page += CNIC_PAGE_SIZE;
+ }
+ }
+}
+
+
+/**
+ * bnx2i_alloc_qp_resc - allocates required resources for QP.
+ * @hba: adapter structure pointer
+ * @ep: endpoint (transport identifier) structure
+ *
+ * Allocate QP (transport layer for iSCSI connection) resources, DMA'able
+ * memory for SQ/RQ/CQ and page tables. EP structure elements such
+ * as producer/consumer indexes/pointers, queue sizes and page table
+ * contents are setup
+ */
+int bnx2i_alloc_qp_resc(struct bnx2i_hba *hba, struct bnx2i_endpoint *ep)
+{
+ struct bnx2i_5771x_cq_db *cq_db;
+
+ ep->hba = hba;
+ ep->conn = NULL;
+ ep->ep_cid = ep->ep_iscsi_cid = ep->ep_pg_cid = 0;
+
+ /* Allocate page table memory for SQ which is page aligned */
+ ep->qp.sq_mem_size = hba->max_sqes * BNX2I_SQ_WQE_SIZE;
+ ep->qp.sq_mem_size =
+ (ep->qp.sq_mem_size + (CNIC_PAGE_SIZE - 1)) & CNIC_PAGE_MASK;
+ ep->qp.sq_pgtbl_size =
+ (ep->qp.sq_mem_size / CNIC_PAGE_SIZE) * sizeof(void *);
+ ep->qp.sq_pgtbl_size =
+ (ep->qp.sq_pgtbl_size + (CNIC_PAGE_SIZE - 1)) & CNIC_PAGE_MASK;
+
+ ep->qp.sq_pgtbl_virt =
+ dma_alloc_coherent(&hba->pcidev->dev, ep->qp.sq_pgtbl_size,
+ &ep->qp.sq_pgtbl_phys, GFP_KERNEL);
+ if (!ep->qp.sq_pgtbl_virt) {
+ printk(KERN_ALERT "bnx2i: unable to alloc SQ PT mem (%d)\n",
+ ep->qp.sq_pgtbl_size);
+ goto mem_alloc_err;
+ }
+
+ /* Allocate memory area for actual SQ element */
+ ep->qp.sq_virt =
+ dma_alloc_coherent(&hba->pcidev->dev, ep->qp.sq_mem_size,
+ &ep->qp.sq_phys, GFP_KERNEL);
+ if (!ep->qp.sq_virt) {
+ printk(KERN_ALERT "bnx2i: unable to alloc SQ BD memory %d\n",
+ ep->qp.sq_mem_size);
+ goto mem_alloc_err;
+ }
+
+ memset(ep->qp.sq_virt, 0x00, ep->qp.sq_mem_size);
+ ep->qp.sq_first_qe = ep->qp.sq_virt;
+ ep->qp.sq_prod_qe = ep->qp.sq_first_qe;
+ ep->qp.sq_cons_qe = ep->qp.sq_first_qe;
+ ep->qp.sq_last_qe = &ep->qp.sq_first_qe[hba->max_sqes - 1];
+ ep->qp.sq_prod_idx = 0;
+ ep->qp.sq_cons_idx = 0;
+ ep->qp.sqe_left = hba->max_sqes;
+
+ /* Allocate page table memory for CQ which is page aligned */
+ ep->qp.cq_mem_size = hba->max_cqes * BNX2I_CQE_SIZE;
+ ep->qp.cq_mem_size =
+ (ep->qp.cq_mem_size + (CNIC_PAGE_SIZE - 1)) & CNIC_PAGE_MASK;
+ ep->qp.cq_pgtbl_size =
+ (ep->qp.cq_mem_size / CNIC_PAGE_SIZE) * sizeof(void *);
+ ep->qp.cq_pgtbl_size =
+ (ep->qp.cq_pgtbl_size + (CNIC_PAGE_SIZE - 1)) & CNIC_PAGE_MASK;
+
+ ep->qp.cq_pgtbl_virt =
+ dma_alloc_coherent(&hba->pcidev->dev, ep->qp.cq_pgtbl_size,
+ &ep->qp.cq_pgtbl_phys, GFP_KERNEL);
+ if (!ep->qp.cq_pgtbl_virt) {
+ printk(KERN_ALERT "bnx2i: unable to alloc CQ PT memory %d\n",
+ ep->qp.cq_pgtbl_size);
+ goto mem_alloc_err;
+ }
+
+ /* Allocate memory area for actual CQ element */
+ ep->qp.cq_virt =
+ dma_alloc_coherent(&hba->pcidev->dev, ep->qp.cq_mem_size,
+ &ep->qp.cq_phys, GFP_KERNEL);
+ if (!ep->qp.cq_virt) {
+ printk(KERN_ALERT "bnx2i: unable to alloc CQ BD memory %d\n",
+ ep->qp.cq_mem_size);
+ goto mem_alloc_err;
+ }
+ memset(ep->qp.cq_virt, 0x00, ep->qp.cq_mem_size);
+
+ ep->qp.cq_first_qe = ep->qp.cq_virt;
+ ep->qp.cq_prod_qe = ep->qp.cq_first_qe;
+ ep->qp.cq_cons_qe = ep->qp.cq_first_qe;
+ ep->qp.cq_last_qe = &ep->qp.cq_first_qe[hba->max_cqes - 1];
+ ep->qp.cq_prod_idx = 0;
+ ep->qp.cq_cons_idx = 0;
+ ep->qp.cqe_left = hba->max_cqes;
+ ep->qp.cqe_exp_seq_sn = ISCSI_INITIAL_SN;
+ ep->qp.cqe_size = hba->max_cqes;
+
+ /* Invalidate all EQ CQE index, req only for 57710 */
+ cq_db = (struct bnx2i_5771x_cq_db *) ep->qp.cq_pgtbl_virt;
+ memset(cq_db->sqn, 0xFF, sizeof(cq_db->sqn[0]) * BNX2X_MAX_CQS);
+
+ /* Allocate page table memory for RQ which is page aligned */
+ ep->qp.rq_mem_size = hba->max_rqes * BNX2I_RQ_WQE_SIZE;
+ ep->qp.rq_mem_size =
+ (ep->qp.rq_mem_size + (CNIC_PAGE_SIZE - 1)) & CNIC_PAGE_MASK;
+ ep->qp.rq_pgtbl_size =
+ (ep->qp.rq_mem_size / CNIC_PAGE_SIZE) * sizeof(void *);
+ ep->qp.rq_pgtbl_size =
+ (ep->qp.rq_pgtbl_size + (CNIC_PAGE_SIZE - 1)) & CNIC_PAGE_MASK;
+
+ ep->qp.rq_pgtbl_virt =
+ dma_alloc_coherent(&hba->pcidev->dev, ep->qp.rq_pgtbl_size,
+ &ep->qp.rq_pgtbl_phys, GFP_KERNEL);
+ if (!ep->qp.rq_pgtbl_virt) {
+ printk(KERN_ALERT "bnx2i: unable to alloc RQ PT mem %d\n",
+ ep->qp.rq_pgtbl_size);
+ goto mem_alloc_err;
+ }
+
+ /* Allocate memory area for actual RQ element */
+ ep->qp.rq_virt =
+ dma_alloc_coherent(&hba->pcidev->dev, ep->qp.rq_mem_size,
+ &ep->qp.rq_phys, GFP_KERNEL);
+ if (!ep->qp.rq_virt) {
+ printk(KERN_ALERT "bnx2i: unable to alloc RQ BD memory %d\n",
+ ep->qp.rq_mem_size);
+ goto mem_alloc_err;
+ }
+
+ ep->qp.rq_first_qe = ep->qp.rq_virt;
+ ep->qp.rq_prod_qe = ep->qp.rq_first_qe;
+ ep->qp.rq_cons_qe = ep->qp.rq_first_qe;
+ ep->qp.rq_last_qe = &ep->qp.rq_first_qe[hba->max_rqes - 1];
+ ep->qp.rq_prod_idx = 0x8000;
+ ep->qp.rq_cons_idx = 0;
+ ep->qp.rqe_left = hba->max_rqes;
+
+ setup_qp_page_tables(ep);
+
+ return 0;
+
+mem_alloc_err:
+ bnx2i_free_qp_resc(hba, ep);
+ return -ENOMEM;
+}
+
+
+
+/**
+ * bnx2i_free_qp_resc - free memory resources held by QP
+ * @hba: adapter structure pointer
+ * @ep: endpoint (transport identifier) structure
+ *
+ * Free QP resources - SQ/RQ/CQ memory and page tables.
+ */
+void bnx2i_free_qp_resc(struct bnx2i_hba *hba, struct bnx2i_endpoint *ep)
+{
+ if (ep->qp.ctx_base) {
+ iounmap(ep->qp.ctx_base);
+ ep->qp.ctx_base = NULL;
+ }
+ /* Free SQ mem */
+ if (ep->qp.sq_pgtbl_virt) {
+ dma_free_coherent(&hba->pcidev->dev, ep->qp.sq_pgtbl_size,
+ ep->qp.sq_pgtbl_virt, ep->qp.sq_pgtbl_phys);
+ ep->qp.sq_pgtbl_virt = NULL;
+ ep->qp.sq_pgtbl_phys = 0;
+ }
+ if (ep->qp.sq_virt) {
+ dma_free_coherent(&hba->pcidev->dev, ep->qp.sq_mem_size,
+ ep->qp.sq_virt, ep->qp.sq_phys);
+ ep->qp.sq_virt = NULL;
+ ep->qp.sq_phys = 0;
+ }
+
+ /* Free RQ mem */
+ if (ep->qp.rq_pgtbl_virt) {
+ dma_free_coherent(&hba->pcidev->dev, ep->qp.rq_pgtbl_size,
+ ep->qp.rq_pgtbl_virt, ep->qp.rq_pgtbl_phys);
+ ep->qp.rq_pgtbl_virt = NULL;
+ ep->qp.rq_pgtbl_phys = 0;
+ }
+ if (ep->qp.rq_virt) {
+ dma_free_coherent(&hba->pcidev->dev, ep->qp.rq_mem_size,
+ ep->qp.rq_virt, ep->qp.rq_phys);
+ ep->qp.rq_virt = NULL;
+ ep->qp.rq_phys = 0;
+ }
+
+ /* Free CQ mem */
+ if (ep->qp.cq_pgtbl_virt) {
+ dma_free_coherent(&hba->pcidev->dev, ep->qp.cq_pgtbl_size,
+ ep->qp.cq_pgtbl_virt, ep->qp.cq_pgtbl_phys);
+ ep->qp.cq_pgtbl_virt = NULL;
+ ep->qp.cq_pgtbl_phys = 0;
+ }
+ if (ep->qp.cq_virt) {
+ dma_free_coherent(&hba->pcidev->dev, ep->qp.cq_mem_size,
+ ep->qp.cq_virt, ep->qp.cq_phys);
+ ep->qp.cq_virt = NULL;
+ ep->qp.cq_phys = 0;
+ }
+}
+
+
+/**
+ * bnx2i_send_fw_iscsi_init_msg - initiates initial handshake with iscsi f/w
+ * @hba: adapter structure pointer
+ *
+ * Send down iscsi_init KWQEs which initiates the initial handshake with the f/w
+ * This results in iSCSi support validation and on-chip context manager
+ * initialization. Firmware completes this handshake with a CQE carrying
+ * the result of iscsi support validation. Parameter carried by
+ * iscsi init request determines the number of offloaded connection and
+ * tolerance level for iscsi protocol violation this hba/chip can support
+ */
+int bnx2i_send_fw_iscsi_init_msg(struct bnx2i_hba *hba)
+{
+ struct kwqe *kwqe_arr[3];
+ struct iscsi_kwqe_init1 iscsi_init;
+ struct iscsi_kwqe_init2 iscsi_init2;
+ int rc = 0;
+ u64 mask64;
+
+ memset(&iscsi_init, 0x00, sizeof(struct iscsi_kwqe_init1));
+ memset(&iscsi_init2, 0x00, sizeof(struct iscsi_kwqe_init2));
+
+ bnx2i_adjust_qp_size(hba);
+
+ iscsi_init.flags =
+ (CNIC_PAGE_BITS - 8) << ISCSI_KWQE_INIT1_PAGE_SIZE_SHIFT;
+ if (en_tcp_dack)
+ iscsi_init.flags |= ISCSI_KWQE_INIT1_DELAYED_ACK_ENABLE;
+ iscsi_init.reserved0 = 0;
+ iscsi_init.num_cqs = 1;
+ iscsi_init.hdr.op_code = ISCSI_KWQE_OPCODE_INIT1;
+ iscsi_init.hdr.flags =
+ (ISCSI_KWQE_LAYER_CODE << ISCSI_KWQE_HEADER_LAYER_CODE_SHIFT);
+
+ iscsi_init.dummy_buffer_addr_lo = (u32) hba->dummy_buf_dma;
+ iscsi_init.dummy_buffer_addr_hi =
+ (u32) ((u64) hba->dummy_buf_dma >> 32);
+
+ hba->num_ccell = hba->max_sqes >> 1;
+ hba->ctx_ccell_tasks =
+ ((hba->num_ccell & 0xFFFF) | (hba->max_sqes << 16));
+ iscsi_init.num_ccells_per_conn = hba->num_ccell;
+ iscsi_init.num_tasks_per_conn = hba->max_sqes;
+ iscsi_init.sq_wqes_per_page = CNIC_PAGE_SIZE / BNX2I_SQ_WQE_SIZE;
+ iscsi_init.sq_num_wqes = hba->max_sqes;
+ iscsi_init.cq_log_wqes_per_page =
+ (u8) bnx2i_power_of2(CNIC_PAGE_SIZE / BNX2I_CQE_SIZE);
+ iscsi_init.cq_num_wqes = hba->max_cqes;
+ iscsi_init.cq_num_pages = (hba->max_cqes * BNX2I_CQE_SIZE +
+ (CNIC_PAGE_SIZE - 1)) / CNIC_PAGE_SIZE;
+ iscsi_init.sq_num_pages = (hba->max_sqes * BNX2I_SQ_WQE_SIZE +
+ (CNIC_PAGE_SIZE - 1)) / CNIC_PAGE_SIZE;
+ iscsi_init.rq_buffer_size = BNX2I_RQ_WQE_SIZE;
+ iscsi_init.rq_num_wqes = hba->max_rqes;
+
+
+ iscsi_init2.hdr.op_code = ISCSI_KWQE_OPCODE_INIT2;
+ iscsi_init2.hdr.flags =
+ (ISCSI_KWQE_LAYER_CODE << ISCSI_KWQE_HEADER_LAYER_CODE_SHIFT);
+ iscsi_init2.max_cq_sqn = hba->max_cqes * 2 + 1;
+ mask64 = 0x0ULL;
+ mask64 |= (
+ /* CISCO MDS */
+ (1UL <<
+ ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_TTT_NOT_RSRV) |
+ /* HP MSA1510i */
+ (1UL <<
+ ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_EXP_DATASN) |
+ /* EMC */
+ (1ULL << ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_LUN));
+ if (error_mask1) {
+ iscsi_init2.error_bit_map[0] = error_mask1;
+ mask64 ^= (u32)(mask64);
+ mask64 |= error_mask1;
+ } else
+ iscsi_init2.error_bit_map[0] = (u32) mask64;
+
+ if (error_mask2) {
+ iscsi_init2.error_bit_map[1] = error_mask2;
+ mask64 &= 0xffffffff;
+ mask64 |= ((u64)error_mask2 << 32);
+ } else
+ iscsi_init2.error_bit_map[1] = (u32) (mask64 >> 32);
+
+ iscsi_error_mask = mask64;
+
+ kwqe_arr[0] = (struct kwqe *) &iscsi_init;
+ kwqe_arr[1] = (struct kwqe *) &iscsi_init2;
+
+ if (hba->cnic && hba->cnic->submit_kwqes)
+ rc = hba->cnic->submit_kwqes(hba->cnic, kwqe_arr, 2);
+ return rc;
+}
+
+
+/**
+ * bnx2i_process_scsi_cmd_resp - this function handles scsi cmd completion.
+ * @session: iscsi session
+ * @bnx2i_conn: bnx2i connection
+ * @cqe: pointer to newly DMA'ed CQE entry for processing
+ *
+ * process SCSI CMD Response CQE & complete the request to SCSI-ML
+ */
+int bnx2i_process_scsi_cmd_resp(struct iscsi_session *session,
+ struct bnx2i_conn *bnx2i_conn,
+ struct cqe *cqe)
+{
+ struct iscsi_conn *conn = bnx2i_conn->cls_conn->dd_data;
+ struct bnx2i_hba *hba = bnx2i_conn->hba;
+ struct bnx2i_cmd_response *resp_cqe;
+ struct bnx2i_cmd *bnx2i_cmd;
+ struct iscsi_task *task;
+ struct iscsi_scsi_rsp *hdr;
+ u32 datalen = 0;
+
+ resp_cqe = (struct bnx2i_cmd_response *)cqe;
+ spin_lock_bh(&session->back_lock);
+ task = iscsi_itt_to_task(conn,
+ resp_cqe->itt & ISCSI_CMD_RESPONSE_INDEX);
+ if (!task)
+ goto fail;
+
+ bnx2i_cmd = task->dd_data;
+
+ if (bnx2i_cmd->req.op_attr & ISCSI_CMD_REQUEST_READ) {
+ conn->datain_pdus_cnt +=
+ resp_cqe->task_stat.read_stat.num_data_ins;
+ conn->rxdata_octets +=
+ bnx2i_cmd->req.total_data_transfer_length;
+ ADD_STATS_64(hba, rx_pdus,
+ resp_cqe->task_stat.read_stat.num_data_ins);
+ ADD_STATS_64(hba, rx_bytes,
+ bnx2i_cmd->req.total_data_transfer_length);
+ } else {
+ conn->dataout_pdus_cnt +=
+ resp_cqe->task_stat.write_stat.num_data_outs;
+ conn->r2t_pdus_cnt +=
+ resp_cqe->task_stat.write_stat.num_r2ts;
+ conn->txdata_octets +=
+ bnx2i_cmd->req.total_data_transfer_length;
+ ADD_STATS_64(hba, tx_pdus,
+ resp_cqe->task_stat.write_stat.num_data_outs);
+ ADD_STATS_64(hba, tx_bytes,
+ bnx2i_cmd->req.total_data_transfer_length);
+ ADD_STATS_64(hba, rx_pdus,
+ resp_cqe->task_stat.write_stat.num_r2ts);
+ }
+ bnx2i_iscsi_unmap_sg_list(bnx2i_cmd);
+
+ hdr = (struct iscsi_scsi_rsp *)task->hdr;
+ resp_cqe = (struct bnx2i_cmd_response *)cqe;
+ hdr->opcode = resp_cqe->op_code;
+ hdr->max_cmdsn = cpu_to_be32(resp_cqe->max_cmd_sn);
+ hdr->exp_cmdsn = cpu_to_be32(resp_cqe->exp_cmd_sn);
+ hdr->response = resp_cqe->response;
+ hdr->cmd_status = resp_cqe->status;
+ hdr->flags = resp_cqe->response_flags;
+ hdr->residual_count = cpu_to_be32(resp_cqe->residual_count);
+
+ if (resp_cqe->op_code == ISCSI_OP_SCSI_DATA_IN)
+ goto done;
+
+ if (resp_cqe->status == SAM_STAT_CHECK_CONDITION) {
+ datalen = resp_cqe->data_length;
+ if (datalen < 2)
+ goto done;
+
+ if (datalen > BNX2I_RQ_WQE_SIZE) {
+ iscsi_conn_printk(KERN_ERR, conn,
+ "sense data len %d > RQ sz\n",
+ datalen);
+ datalen = BNX2I_RQ_WQE_SIZE;
+ } else if (datalen > ISCSI_DEF_MAX_RECV_SEG_LEN) {
+ iscsi_conn_printk(KERN_ERR, conn,
+ "sense data len %d > conn data\n",
+ datalen);
+ datalen = ISCSI_DEF_MAX_RECV_SEG_LEN;
+ }
+
+ bnx2i_get_rq_buf(bnx2i_cmd->conn, conn->data, datalen);
+ bnx2i_put_rq_buf(bnx2i_cmd->conn, 1);
+ }
+
+done:
+ __iscsi_complete_pdu(conn, (struct iscsi_hdr *)hdr,
+ conn->data, datalen);
+fail:
+ spin_unlock_bh(&session->back_lock);
+ return 0;
+}
+
+
+/**
+ * bnx2i_process_login_resp - this function handles iscsi login response
+ * @session: iscsi session pointer
+ * @bnx2i_conn: iscsi connection pointer
+ * @cqe: pointer to newly DMA'ed CQE entry for processing
+ *
+ * process Login Response CQE & complete it to open-iscsi user daemon
+ */
+static int bnx2i_process_login_resp(struct iscsi_session *session,
+ struct bnx2i_conn *bnx2i_conn,
+ struct cqe *cqe)
+{
+ struct iscsi_conn *conn = bnx2i_conn->cls_conn->dd_data;
+ struct iscsi_task *task;
+ struct bnx2i_login_response *login;
+ struct iscsi_login_rsp *resp_hdr;
+ int pld_len;
+ int pad_len;
+
+ login = (struct bnx2i_login_response *) cqe;
+ spin_lock(&session->back_lock);
+ task = iscsi_itt_to_task(conn,
+ login->itt & ISCSI_LOGIN_RESPONSE_INDEX);
+ if (!task)
+ goto done;
+
+ resp_hdr = (struct iscsi_login_rsp *) &bnx2i_conn->gen_pdu.resp_hdr;
+ memset(resp_hdr, 0, sizeof(struct iscsi_hdr));
+ resp_hdr->opcode = login->op_code;
+ resp_hdr->flags = login->response_flags;
+ resp_hdr->max_version = login->version_max;
+ resp_hdr->active_version = login->version_active;
+ resp_hdr->hlength = 0;
+
+ hton24(resp_hdr->dlength, login->data_length);
+ memcpy(resp_hdr->isid, &login->isid_lo, 6);
+ resp_hdr->tsih = cpu_to_be16(login->tsih);
+ resp_hdr->itt = task->hdr->itt;
+ resp_hdr->statsn = cpu_to_be32(login->stat_sn);
+ resp_hdr->exp_cmdsn = cpu_to_be32(login->exp_cmd_sn);
+ resp_hdr->max_cmdsn = cpu_to_be32(login->max_cmd_sn);
+ resp_hdr->status_class = login->status_class;
+ resp_hdr->status_detail = login->status_detail;
+ pld_len = login->data_length;
+ bnx2i_conn->gen_pdu.resp_wr_ptr =
+ bnx2i_conn->gen_pdu.resp_buf + pld_len;
+
+ pad_len = 0;
+ if (pld_len & 0x3)
+ pad_len = 4 - (pld_len % 4);
+
+ if (pad_len) {
+ int i = 0;
+ for (i = 0; i < pad_len; i++) {
+ bnx2i_conn->gen_pdu.resp_wr_ptr[0] = 0;
+ bnx2i_conn->gen_pdu.resp_wr_ptr++;
+ }
+ }
+
+ __iscsi_complete_pdu(conn, (struct iscsi_hdr *)resp_hdr,
+ bnx2i_conn->gen_pdu.resp_buf,
+ bnx2i_conn->gen_pdu.resp_wr_ptr - bnx2i_conn->gen_pdu.resp_buf);
+done:
+ spin_unlock(&session->back_lock);
+ return 0;
+}
+
+
+/**
+ * bnx2i_process_text_resp - this function handles iscsi text response
+ * @session: iscsi session pointer
+ * @bnx2i_conn: iscsi connection pointer
+ * @cqe: pointer to newly DMA'ed CQE entry for processing
+ *
+ * process iSCSI Text Response CQE& complete it to open-iscsi user daemon
+ */
+static int bnx2i_process_text_resp(struct iscsi_session *session,
+ struct bnx2i_conn *bnx2i_conn,
+ struct cqe *cqe)
+{
+ struct iscsi_conn *conn = bnx2i_conn->cls_conn->dd_data;
+ struct iscsi_task *task;
+ struct bnx2i_text_response *text;
+ struct iscsi_text_rsp *resp_hdr;
+ int pld_len;
+ int pad_len;
+
+ text = (struct bnx2i_text_response *) cqe;
+ spin_lock(&session->back_lock);
+ task = iscsi_itt_to_task(conn, text->itt & ISCSI_LOGIN_RESPONSE_INDEX);
+ if (!task)
+ goto done;
+
+ resp_hdr = (struct iscsi_text_rsp *)&bnx2i_conn->gen_pdu.resp_hdr;
+ memset(resp_hdr, 0, sizeof(struct iscsi_hdr));
+ resp_hdr->opcode = text->op_code;
+ resp_hdr->flags = text->response_flags;
+ resp_hdr->hlength = 0;
+
+ hton24(resp_hdr->dlength, text->data_length);
+ resp_hdr->itt = task->hdr->itt;
+ resp_hdr->ttt = cpu_to_be32(text->ttt);
+ resp_hdr->statsn = task->hdr->exp_statsn;
+ resp_hdr->exp_cmdsn = cpu_to_be32(text->exp_cmd_sn);
+ resp_hdr->max_cmdsn = cpu_to_be32(text->max_cmd_sn);
+ pld_len = text->data_length;
+ bnx2i_conn->gen_pdu.resp_wr_ptr = bnx2i_conn->gen_pdu.resp_buf +
+ pld_len;
+ pad_len = 0;
+ if (pld_len & 0x3)
+ pad_len = 4 - (pld_len % 4);
+
+ if (pad_len) {
+ int i = 0;
+ for (i = 0; i < pad_len; i++) {
+ bnx2i_conn->gen_pdu.resp_wr_ptr[0] = 0;
+ bnx2i_conn->gen_pdu.resp_wr_ptr++;
+ }
+ }
+ __iscsi_complete_pdu(conn, (struct iscsi_hdr *)resp_hdr,
+ bnx2i_conn->gen_pdu.resp_buf,
+ bnx2i_conn->gen_pdu.resp_wr_ptr -
+ bnx2i_conn->gen_pdu.resp_buf);
+done:
+ spin_unlock(&session->back_lock);
+ return 0;
+}
+
+
+/**
+ * bnx2i_process_tmf_resp - this function handles iscsi TMF response
+ * @session: iscsi session pointer
+ * @bnx2i_conn: iscsi connection pointer
+ * @cqe: pointer to newly DMA'ed CQE entry for processing
+ *
+ * process iSCSI TMF Response CQE and wake up the driver eh thread.
+ */
+static int bnx2i_process_tmf_resp(struct iscsi_session *session,
+ struct bnx2i_conn *bnx2i_conn,
+ struct cqe *cqe)
+{
+ struct iscsi_conn *conn = bnx2i_conn->cls_conn->dd_data;
+ struct iscsi_task *task;
+ struct bnx2i_tmf_response *tmf_cqe;
+ struct iscsi_tm_rsp *resp_hdr;
+
+ tmf_cqe = (struct bnx2i_tmf_response *)cqe;
+ spin_lock(&session->back_lock);
+ task = iscsi_itt_to_task(conn,
+ tmf_cqe->itt & ISCSI_TMF_RESPONSE_INDEX);
+ if (!task)
+ goto done;
+
+ resp_hdr = (struct iscsi_tm_rsp *) &bnx2i_conn->gen_pdu.resp_hdr;
+ memset(resp_hdr, 0, sizeof(struct iscsi_hdr));
+ resp_hdr->opcode = tmf_cqe->op_code;
+ resp_hdr->max_cmdsn = cpu_to_be32(tmf_cqe->max_cmd_sn);
+ resp_hdr->exp_cmdsn = cpu_to_be32(tmf_cqe->exp_cmd_sn);
+ resp_hdr->itt = task->hdr->itt;
+ resp_hdr->response = tmf_cqe->response;
+
+ __iscsi_complete_pdu(conn, (struct iscsi_hdr *)resp_hdr, NULL, 0);
+done:
+ spin_unlock(&session->back_lock);
+ return 0;
+}
+
+/**
+ * bnx2i_process_logout_resp - this function handles iscsi logout response
+ * @session: iscsi session pointer
+ * @bnx2i_conn: iscsi connection pointer
+ * @cqe: pointer to newly DMA'ed CQE entry for processing
+ *
+ * process iSCSI Logout Response CQE & make function call to
+ * notify the user daemon.
+ */
+static int bnx2i_process_logout_resp(struct iscsi_session *session,
+ struct bnx2i_conn *bnx2i_conn,
+ struct cqe *cqe)
+{
+ struct iscsi_conn *conn = bnx2i_conn->cls_conn->dd_data;
+ struct iscsi_task *task;
+ struct bnx2i_logout_response *logout;
+ struct iscsi_logout_rsp *resp_hdr;
+
+ logout = (struct bnx2i_logout_response *) cqe;
+ spin_lock(&session->back_lock);
+ task = iscsi_itt_to_task(conn,
+ logout->itt & ISCSI_LOGOUT_RESPONSE_INDEX);
+ if (!task)
+ goto done;
+
+ resp_hdr = (struct iscsi_logout_rsp *) &bnx2i_conn->gen_pdu.resp_hdr;
+ memset(resp_hdr, 0, sizeof(struct iscsi_hdr));
+ resp_hdr->opcode = logout->op_code;
+ resp_hdr->flags = logout->response;
+ resp_hdr->hlength = 0;
+
+ resp_hdr->itt = task->hdr->itt;
+ resp_hdr->statsn = task->hdr->exp_statsn;
+ resp_hdr->exp_cmdsn = cpu_to_be32(logout->exp_cmd_sn);
+ resp_hdr->max_cmdsn = cpu_to_be32(logout->max_cmd_sn);
+
+ resp_hdr->t2wait = cpu_to_be32(logout->time_to_wait);
+ resp_hdr->t2retain = cpu_to_be32(logout->time_to_retain);
+
+ __iscsi_complete_pdu(conn, (struct iscsi_hdr *)resp_hdr, NULL, 0);
+
+ bnx2i_conn->ep->state = EP_STATE_LOGOUT_RESP_RCVD;
+done:
+ spin_unlock(&session->back_lock);
+ return 0;
+}
+
+/**
+ * bnx2i_process_nopin_local_cmpl - this function handles iscsi nopin CQE
+ * @session: iscsi session pointer
+ * @bnx2i_conn: iscsi connection pointer
+ * @cqe: pointer to newly DMA'ed CQE entry for processing
+ *
+ * process iSCSI NOPIN local completion CQE, frees IIT and command structures
+ */
+static void bnx2i_process_nopin_local_cmpl(struct iscsi_session *session,
+ struct bnx2i_conn *bnx2i_conn,
+ struct cqe *cqe)
+{
+ struct iscsi_conn *conn = bnx2i_conn->cls_conn->dd_data;
+ struct bnx2i_nop_in_msg *nop_in;
+ struct iscsi_task *task;
+
+ nop_in = (struct bnx2i_nop_in_msg *)cqe;
+ spin_lock(&session->back_lock);
+ task = iscsi_itt_to_task(conn,
+ nop_in->itt & ISCSI_NOP_IN_MSG_INDEX);
+ if (task)
+ __iscsi_put_task(task);
+ spin_unlock(&session->back_lock);
+}
+
+/**
+ * bnx2i_unsol_pdu_adjust_rq - makes adjustments to RQ after unsol pdu is recvd
+ * @conn: iscsi connection
+ *
+ * Firmware advances RQ producer index for every unsolicited PDU even if
+ * payload data length is '0'. This function makes corresponding
+ * adjustments on the driver side to match this f/w behavior
+ */
+static void bnx2i_unsol_pdu_adjust_rq(struct bnx2i_conn *bnx2i_conn)
+{
+ char dummy_rq_data[2];
+ bnx2i_get_rq_buf(bnx2i_conn, dummy_rq_data, 1);
+ bnx2i_put_rq_buf(bnx2i_conn, 1);
+}
+
+
+/**
+ * bnx2i_process_nopin_mesg - this function handles iscsi nopin CQE
+ * @session: iscsi session pointer
+ * @bnx2i_conn: iscsi connection pointer
+ * @cqe: pointer to newly DMA'ed CQE entry for processing
+ *
+ * process iSCSI target's proactive iSCSI NOPIN request
+ */
+static int bnx2i_process_nopin_mesg(struct iscsi_session *session,
+ struct bnx2i_conn *bnx2i_conn,
+ struct cqe *cqe)
+{
+ struct iscsi_conn *conn = bnx2i_conn->cls_conn->dd_data;
+ struct iscsi_task *task;
+ struct bnx2i_nop_in_msg *nop_in;
+ struct iscsi_nopin *hdr;
+ int tgt_async_nop = 0;
+
+ nop_in = (struct bnx2i_nop_in_msg *)cqe;
+
+ spin_lock(&session->back_lock);
+ hdr = (struct iscsi_nopin *)&bnx2i_conn->gen_pdu.resp_hdr;
+ memset(hdr, 0, sizeof(struct iscsi_hdr));
+ hdr->opcode = nop_in->op_code;
+ hdr->max_cmdsn = cpu_to_be32(nop_in->max_cmd_sn);
+ hdr->exp_cmdsn = cpu_to_be32(nop_in->exp_cmd_sn);
+ hdr->ttt = cpu_to_be32(nop_in->ttt);
+
+ if (nop_in->itt == (u16) RESERVED_ITT) {
+ bnx2i_unsol_pdu_adjust_rq(bnx2i_conn);
+ hdr->itt = RESERVED_ITT;
+ tgt_async_nop = 1;
+ goto done;
+ }
+
+ /* this is a response to one of our nop-outs */
+ task = iscsi_itt_to_task(conn,
+ (itt_t) (nop_in->itt & ISCSI_NOP_IN_MSG_INDEX));
+ if (task) {
+ hdr->flags = ISCSI_FLAG_CMD_FINAL;
+ hdr->itt = task->hdr->itt;
+ hdr->ttt = cpu_to_be32(nop_in->ttt);
+ memcpy(&hdr->lun, nop_in->lun, 8);
+ }
+done:
+ __iscsi_complete_pdu(conn, (struct iscsi_hdr *)hdr, NULL, 0);
+ spin_unlock(&session->back_lock);
+
+ return tgt_async_nop;
+}
+
+
+/**
+ * bnx2i_process_async_mesg - this function handles iscsi async message
+ * @session: iscsi session pointer
+ * @bnx2i_conn: iscsi connection pointer
+ * @cqe: pointer to newly DMA'ed CQE entry for processing
+ *
+ * process iSCSI ASYNC Message
+ */
+static void bnx2i_process_async_mesg(struct iscsi_session *session,
+ struct bnx2i_conn *bnx2i_conn,
+ struct cqe *cqe)
+{
+ struct bnx2i_async_msg *async_cqe;
+ struct iscsi_async *resp_hdr;
+ u8 async_event;
+
+ bnx2i_unsol_pdu_adjust_rq(bnx2i_conn);
+
+ async_cqe = (struct bnx2i_async_msg *)cqe;
+ async_event = async_cqe->async_event;
+
+ if (async_event == ISCSI_ASYNC_MSG_SCSI_EVENT) {
+ iscsi_conn_printk(KERN_ALERT, bnx2i_conn->cls_conn->dd_data,
+ "async: scsi events not supported\n");
+ return;
+ }
+
+ spin_lock(&session->back_lock);
+ resp_hdr = (struct iscsi_async *) &bnx2i_conn->gen_pdu.resp_hdr;
+ memset(resp_hdr, 0, sizeof(struct iscsi_hdr));
+ resp_hdr->opcode = async_cqe->op_code;
+ resp_hdr->flags = 0x80;
+
+ memcpy(&resp_hdr->lun, async_cqe->lun, 8);
+ resp_hdr->exp_cmdsn = cpu_to_be32(async_cqe->exp_cmd_sn);
+ resp_hdr->max_cmdsn = cpu_to_be32(async_cqe->max_cmd_sn);
+
+ resp_hdr->async_event = async_cqe->async_event;
+ resp_hdr->async_vcode = async_cqe->async_vcode;
+
+ resp_hdr->param1 = cpu_to_be16(async_cqe->param1);
+ resp_hdr->param2 = cpu_to_be16(async_cqe->param2);
+ resp_hdr->param3 = cpu_to_be16(async_cqe->param3);
+
+ __iscsi_complete_pdu(bnx2i_conn->cls_conn->dd_data,
+ (struct iscsi_hdr *)resp_hdr, NULL, 0);
+ spin_unlock(&session->back_lock);
+}
+
+
+/**
+ * bnx2i_process_reject_mesg - process iscsi reject pdu
+ * @session: iscsi session pointer
+ * @bnx2i_conn: iscsi connection pointer
+ * @cqe: pointer to newly DMA'ed CQE entry for processing
+ *
+ * process iSCSI REJECT message
+ */
+static void bnx2i_process_reject_mesg(struct iscsi_session *session,
+ struct bnx2i_conn *bnx2i_conn,
+ struct cqe *cqe)
+{
+ struct iscsi_conn *conn = bnx2i_conn->cls_conn->dd_data;
+ struct bnx2i_reject_msg *reject;
+ struct iscsi_reject *hdr;
+
+ reject = (struct bnx2i_reject_msg *) cqe;
+ if (reject->data_length) {
+ bnx2i_get_rq_buf(bnx2i_conn, conn->data, reject->data_length);
+ bnx2i_put_rq_buf(bnx2i_conn, 1);
+ } else
+ bnx2i_unsol_pdu_adjust_rq(bnx2i_conn);
+
+ spin_lock(&session->back_lock);
+ hdr = (struct iscsi_reject *) &bnx2i_conn->gen_pdu.resp_hdr;
+ memset(hdr, 0, sizeof(struct iscsi_hdr));
+ hdr->opcode = reject->op_code;
+ hdr->reason = reject->reason;
+ hton24(hdr->dlength, reject->data_length);
+ hdr->max_cmdsn = cpu_to_be32(reject->max_cmd_sn);
+ hdr->exp_cmdsn = cpu_to_be32(reject->exp_cmd_sn);
+ hdr->ffffffff = cpu_to_be32(RESERVED_ITT);
+ __iscsi_complete_pdu(conn, (struct iscsi_hdr *)hdr, conn->data,
+ reject->data_length);
+ spin_unlock(&session->back_lock);
+}
+
+/**
+ * bnx2i_process_cmd_cleanup_resp - process scsi command clean-up completion
+ * @session: iscsi session pointer
+ * @bnx2i_conn: iscsi connection pointer
+ * @cqe: pointer to newly DMA'ed CQE entry for processing
+ *
+ * process command cleanup response CQE during conn shutdown or error recovery
+ */
+static void bnx2i_process_cmd_cleanup_resp(struct iscsi_session *session,
+ struct bnx2i_conn *bnx2i_conn,
+ struct cqe *cqe)
+{
+ struct bnx2i_cleanup_response *cmd_clean_rsp;
+ struct iscsi_conn *conn = bnx2i_conn->cls_conn->dd_data;
+ struct iscsi_task *task;
+
+ cmd_clean_rsp = (struct bnx2i_cleanup_response *)cqe;
+ spin_lock(&session->back_lock);
+ task = iscsi_itt_to_task(conn,
+ cmd_clean_rsp->itt & ISCSI_CLEANUP_RESPONSE_INDEX);
+ if (!task)
+ printk(KERN_ALERT "bnx2i: cmd clean ITT %x not active\n",
+ cmd_clean_rsp->itt & ISCSI_CLEANUP_RESPONSE_INDEX);
+ spin_unlock(&session->back_lock);
+ complete(&bnx2i_conn->cmd_cleanup_cmpl);
+}
+
+
+/**
+ * bnx2i_percpu_io_thread - thread per cpu for ios
+ *
+ * @arg: ptr to bnx2i_percpu_info structure
+ */
+int bnx2i_percpu_io_thread(void *arg)
+{
+ struct bnx2i_percpu_s *p = arg;
+ struct bnx2i_work *work, *tmp;
+ LIST_HEAD(work_list);
+
+ set_user_nice(current, MIN_NICE);
+
+ while (!kthread_should_stop()) {
+ spin_lock_bh(&p->p_work_lock);
+ while (!list_empty(&p->work_list)) {
+ list_splice_init(&p->work_list, &work_list);
+ spin_unlock_bh(&p->p_work_lock);
+
+ list_for_each_entry_safe(work, tmp, &work_list, list) {
+ list_del_init(&work->list);
+ /* work allocated in the bh, freed here */
+ bnx2i_process_scsi_cmd_resp(work->session,
+ work->bnx2i_conn,
+ &work->cqe);
+ atomic_dec(&work->bnx2i_conn->work_cnt);
+ kfree(work);
+ }
+ spin_lock_bh(&p->p_work_lock);
+ }
+ set_current_state(TASK_INTERRUPTIBLE);
+ spin_unlock_bh(&p->p_work_lock);
+ schedule();
+ }
+ __set_current_state(TASK_RUNNING);
+
+ return 0;
+}
+
+
+/**
+ * bnx2i_queue_scsi_cmd_resp - queue cmd completion to the percpu thread
+ * @bnx2i_conn: bnx2i connection
+ *
+ * this function is called by generic KCQ handler to queue all pending cmd
+ * completion CQEs
+ *
+ * The implementation is to queue the cmd response based on the
+ * last recorded command for the given connection. The
+ * cpu_id gets recorded upon task_xmit. No out-of-order completion!
+ */
+static int bnx2i_queue_scsi_cmd_resp(struct iscsi_session *session,
+ struct bnx2i_conn *bnx2i_conn,
+ struct bnx2i_nop_in_msg *cqe)
+{
+ struct bnx2i_work *bnx2i_work = NULL;
+ struct bnx2i_percpu_s *p = NULL;
+ struct iscsi_task *task;
+ struct scsi_cmnd *sc;
+ int rc = 0;
+ int cpu;
+
+ spin_lock(&session->back_lock);
+ task = iscsi_itt_to_task(bnx2i_conn->cls_conn->dd_data,
+ cqe->itt & ISCSI_CMD_RESPONSE_INDEX);
+ if (!task || !task->sc) {
+ spin_unlock(&session->back_lock);
+ return -EINVAL;
+ }
+ sc = task->sc;
+
+ if (!blk_rq_cpu_valid(sc->request))
+ cpu = smp_processor_id();
+ else
+ cpu = sc->request->cpu;
+
+ spin_unlock(&session->back_lock);
+
+ p = &per_cpu(bnx2i_percpu, cpu);
+ spin_lock(&p->p_work_lock);
+ if (unlikely(!p->iothread)) {
+ rc = -EINVAL;
+ goto err;
+ }
+ /* Alloc and copy to the cqe */
+ bnx2i_work = kzalloc(sizeof(struct bnx2i_work), GFP_ATOMIC);
+ if (bnx2i_work) {
+ INIT_LIST_HEAD(&bnx2i_work->list);
+ bnx2i_work->session = session;
+ bnx2i_work->bnx2i_conn = bnx2i_conn;
+ memcpy(&bnx2i_work->cqe, cqe, sizeof(struct cqe));
+ list_add_tail(&bnx2i_work->list, &p->work_list);
+ atomic_inc(&bnx2i_conn->work_cnt);
+ wake_up_process(p->iothread);
+ spin_unlock(&p->p_work_lock);
+ goto done;
+ } else
+ rc = -ENOMEM;
+err:
+ spin_unlock(&p->p_work_lock);
+ bnx2i_process_scsi_cmd_resp(session, bnx2i_conn, (struct cqe *)cqe);
+done:
+ return rc;
+}
+
+
+/**
+ * bnx2i_process_new_cqes - process newly DMA'ed CQE's
+ * @bnx2i_conn: bnx2i connection
+ *
+ * this function is called by generic KCQ handler to process all pending CQE's
+ */
+static int bnx2i_process_new_cqes(struct bnx2i_conn *bnx2i_conn)
+{
+ struct iscsi_conn *conn = bnx2i_conn->cls_conn->dd_data;
+ struct iscsi_session *session = conn->session;
+ struct bnx2i_hba *hba = bnx2i_conn->hba;
+ struct qp_info *qp;
+ struct bnx2i_nop_in_msg *nopin;
+ int tgt_async_msg;
+ int cqe_cnt = 0;
+
+ if (bnx2i_conn->ep == NULL)
+ return 0;
+
+ qp = &bnx2i_conn->ep->qp;
+
+ if (!qp->cq_virt) {
+ printk(KERN_ALERT "bnx2i (%s): cq resr freed in bh execution!",
+ hba->netdev->name);
+ goto out;
+ }
+ while (1) {
+ nopin = (struct bnx2i_nop_in_msg *) qp->cq_cons_qe;
+ if (nopin->cq_req_sn != qp->cqe_exp_seq_sn)
+ break;
+
+ if (unlikely(test_bit(ISCSI_SUSPEND_BIT, &conn->suspend_rx))) {
+ if (nopin->op_code == ISCSI_OP_NOOP_IN &&
+ nopin->itt == (u16) RESERVED_ITT) {
+ printk(KERN_ALERT "bnx2i: Unsolicited "
+ "NOP-In detected for suspended "
+ "connection dev=%s!\n",
+ hba->netdev->name);
+ bnx2i_unsol_pdu_adjust_rq(bnx2i_conn);
+ goto cqe_out;
+ }
+ break;
+ }
+ tgt_async_msg = 0;
+
+ switch (nopin->op_code) {
+ case ISCSI_OP_SCSI_CMD_RSP:
+ case ISCSI_OP_SCSI_DATA_IN:
+ /* Run the kthread engine only for data cmds
+ All other cmds will be completed in this bh! */
+ bnx2i_queue_scsi_cmd_resp(session, bnx2i_conn, nopin);
+ goto done;
+ case ISCSI_OP_LOGIN_RSP:
+ bnx2i_process_login_resp(session, bnx2i_conn,
+ qp->cq_cons_qe);
+ break;
+ case ISCSI_OP_SCSI_TMFUNC_RSP:
+ bnx2i_process_tmf_resp(session, bnx2i_conn,
+ qp->cq_cons_qe);
+ break;
+ case ISCSI_OP_TEXT_RSP:
+ bnx2i_process_text_resp(session, bnx2i_conn,
+ qp->cq_cons_qe);
+ break;
+ case ISCSI_OP_LOGOUT_RSP:
+ bnx2i_process_logout_resp(session, bnx2i_conn,
+ qp->cq_cons_qe);
+ break;
+ case ISCSI_OP_NOOP_IN:
+ if (bnx2i_process_nopin_mesg(session, bnx2i_conn,
+ qp->cq_cons_qe))
+ tgt_async_msg = 1;
+ break;
+ case ISCSI_OPCODE_NOPOUT_LOCAL_COMPLETION:
+ bnx2i_process_nopin_local_cmpl(session, bnx2i_conn,
+ qp->cq_cons_qe);
+ break;
+ case ISCSI_OP_ASYNC_EVENT:
+ bnx2i_process_async_mesg(session, bnx2i_conn,
+ qp->cq_cons_qe);
+ tgt_async_msg = 1;
+ break;
+ case ISCSI_OP_REJECT:
+ bnx2i_process_reject_mesg(session, bnx2i_conn,
+ qp->cq_cons_qe);
+ break;
+ case ISCSI_OPCODE_CLEANUP_RESPONSE:
+ bnx2i_process_cmd_cleanup_resp(session, bnx2i_conn,
+ qp->cq_cons_qe);
+ break;
+ default:
+ printk(KERN_ALERT "bnx2i: unknown opcode 0x%x\n",
+ nopin->op_code);
+ }
+
+ ADD_STATS_64(hba, rx_pdus, 1);
+ ADD_STATS_64(hba, rx_bytes, nopin->data_length);
+done:
+ if (!tgt_async_msg) {
+ if (!atomic_read(&bnx2i_conn->ep->num_active_cmds))
+ printk(KERN_ALERT "bnx2i (%s): no active cmd! "
+ "op 0x%x\n",
+ hba->netdev->name,
+ nopin->op_code);
+ else
+ atomic_dec(&bnx2i_conn->ep->num_active_cmds);
+ }
+cqe_out:
+ /* clear out in production version only, till beta keep opcode
+ * field intact, will be helpful in debugging (context dump)
+ * nopin->op_code = 0;
+ */
+ cqe_cnt++;
+ qp->cqe_exp_seq_sn++;
+ if (qp->cqe_exp_seq_sn == (qp->cqe_size * 2 + 1))
+ qp->cqe_exp_seq_sn = ISCSI_INITIAL_SN;
+
+ if (qp->cq_cons_qe == qp->cq_last_qe) {
+ qp->cq_cons_qe = qp->cq_first_qe;
+ qp->cq_cons_idx = 0;
+ } else {
+ qp->cq_cons_qe++;
+ qp->cq_cons_idx++;
+ }
+ }
+out:
+ return cqe_cnt;
+}
+
+/**
+ * bnx2i_fastpath_notification - process global event queue (KCQ)
+ * @hba: adapter structure pointer
+ * @new_cqe_kcqe: pointer to newly DMA'ed KCQE entry
+ *
+ * Fast path event notification handler, KCQ entry carries context id
+ * of the connection that has 1 or more pending CQ entries
+ */
+static void bnx2i_fastpath_notification(struct bnx2i_hba *hba,
+ struct iscsi_kcqe *new_cqe_kcqe)
+{
+ struct bnx2i_conn *bnx2i_conn;
+ u32 iscsi_cid;
+ int nxt_idx;
+
+ iscsi_cid = new_cqe_kcqe->iscsi_conn_id;
+ bnx2i_conn = bnx2i_get_conn_from_id(hba, iscsi_cid);
+
+ if (!bnx2i_conn) {
+ printk(KERN_ALERT "cid #%x not valid\n", iscsi_cid);
+ return;
+ }
+ if (!bnx2i_conn->ep) {
+ printk(KERN_ALERT "cid #%x - ep not bound\n", iscsi_cid);
+ return;
+ }
+
+ bnx2i_process_new_cqes(bnx2i_conn);
+ nxt_idx = bnx2i_arm_cq_event_coalescing(bnx2i_conn->ep,
+ CNIC_ARM_CQE_FP);
+ if (nxt_idx && nxt_idx == bnx2i_process_new_cqes(bnx2i_conn))
+ bnx2i_arm_cq_event_coalescing(bnx2i_conn->ep, CNIC_ARM_CQE_FP);
+}
+
+
+/**
+ * bnx2i_process_update_conn_cmpl - process iscsi conn update completion KCQE
+ * @hba: adapter structure pointer
+ * @update_kcqe: kcqe pointer
+ *
+ * CONN_UPDATE completion handler, this completes iSCSI connection FFP migration
+ */
+static void bnx2i_process_update_conn_cmpl(struct bnx2i_hba *hba,
+ struct iscsi_kcqe *update_kcqe)
+{
+ struct bnx2i_conn *conn;
+ u32 iscsi_cid;
+
+ iscsi_cid = update_kcqe->iscsi_conn_id;
+ conn = bnx2i_get_conn_from_id(hba, iscsi_cid);
+
+ if (!conn) {
+ printk(KERN_ALERT "conn_update: cid %x not valid\n", iscsi_cid);
+ return;
+ }
+ if (!conn->ep) {
+ printk(KERN_ALERT "cid %x does not have ep bound\n", iscsi_cid);
+ return;
+ }
+
+ if (update_kcqe->completion_status) {
+ printk(KERN_ALERT "request failed cid %x\n", iscsi_cid);
+ conn->ep->state = EP_STATE_ULP_UPDATE_FAILED;
+ } else
+ conn->ep->state = EP_STATE_ULP_UPDATE_COMPL;
+
+ wake_up_interruptible(&conn->ep->ofld_wait);
+}
+
+
+/**
+ * bnx2i_recovery_que_add_conn - add connection to recovery queue
+ * @hba: adapter structure pointer
+ * @bnx2i_conn: iscsi connection
+ *
+ * Add connection to recovery queue and schedule adapter eh worker
+ */
+static void bnx2i_recovery_que_add_conn(struct bnx2i_hba *hba,
+ struct bnx2i_conn *bnx2i_conn)
+{
+ iscsi_conn_failure(bnx2i_conn->cls_conn->dd_data,
+ ISCSI_ERR_CONN_FAILED);
+}
+
+
+/**
+ * bnx2i_process_tcp_error - process error notification on a given connection
+ *
+ * @hba: adapter structure pointer
+ * @tcp_err: tcp error kcqe pointer
+ *
+ * handles tcp level error notifications from FW.
+ */
+static void bnx2i_process_tcp_error(struct bnx2i_hba *hba,
+ struct iscsi_kcqe *tcp_err)
+{
+ struct bnx2i_conn *bnx2i_conn;
+ u32 iscsi_cid;
+
+ iscsi_cid = tcp_err->iscsi_conn_id;
+ bnx2i_conn = bnx2i_get_conn_from_id(hba, iscsi_cid);
+
+ if (!bnx2i_conn) {
+ printk(KERN_ALERT "bnx2i - cid 0x%x not valid\n", iscsi_cid);
+ return;
+ }
+
+ printk(KERN_ALERT "bnx2i - cid 0x%x had TCP errors, error code 0x%x\n",
+ iscsi_cid, tcp_err->completion_status);
+ bnx2i_recovery_que_add_conn(bnx2i_conn->hba, bnx2i_conn);
+}
+
+
+/**
+ * bnx2i_process_iscsi_error - process error notification on a given connection
+ * @hba: adapter structure pointer
+ * @iscsi_err: iscsi error kcqe pointer
+ *
+ * handles iscsi error notifications from the FW. Firmware based in initial
+ * handshake classifies iscsi protocol / TCP rfc violation into either
+ * warning or error indications. If indication is of "Error" type, driver
+ * will initiate session recovery for that connection/session. For
+ * "Warning" type indication, driver will put out a system log message
+ * (there will be only one message for each type for the life of the
+ * session, this is to avoid un-necessarily overloading the system)
+ */
+static void bnx2i_process_iscsi_error(struct bnx2i_hba *hba,
+ struct iscsi_kcqe *iscsi_err)
+{
+ struct bnx2i_conn *bnx2i_conn;
+ u32 iscsi_cid;
+ char warn_notice[] = "iscsi_warning";
+ char error_notice[] = "iscsi_error";
+ char additional_notice[64];
+ char *message;
+ int need_recovery;
+ u64 err_mask64;
+
+ iscsi_cid = iscsi_err->iscsi_conn_id;
+ bnx2i_conn = bnx2i_get_conn_from_id(hba, iscsi_cid);
+ if (!bnx2i_conn) {
+ printk(KERN_ALERT "bnx2i - cid 0x%x not valid\n", iscsi_cid);
+ return;
+ }
+
+ err_mask64 = (0x1ULL << iscsi_err->completion_status);
+
+ if (err_mask64 & iscsi_error_mask) {
+ need_recovery = 0;
+ message = warn_notice;
+ } else {
+ need_recovery = 1;
+ message = error_notice;
+ }
+
+ switch (iscsi_err->completion_status) {
+ case ISCSI_KCQE_COMPLETION_STATUS_HDR_DIG_ERR:
+ strcpy(additional_notice, "hdr digest err");
+ break;
+ case ISCSI_KCQE_COMPLETION_STATUS_DATA_DIG_ERR:
+ strcpy(additional_notice, "data digest err");
+ break;
+ case ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_OPCODE:
+ strcpy(additional_notice, "wrong opcode rcvd");
+ break;
+ case ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_AHS_LEN:
+ strcpy(additional_notice, "AHS len > 0 rcvd");
+ break;
+ case ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_ITT:
+ strcpy(additional_notice, "invalid ITT rcvd");
+ break;
+ case ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_STATSN:
+ strcpy(additional_notice, "wrong StatSN rcvd");
+ break;
+ case ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_EXP_DATASN:
+ strcpy(additional_notice, "wrong DataSN rcvd");
+ break;
+ case ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_PEND_R2T:
+ strcpy(additional_notice, "pend R2T violation");
+ break;
+ case ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_O_U_0:
+ strcpy(additional_notice, "ERL0, UO");
+ break;
+ case ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_O_U_1:
+ strcpy(additional_notice, "ERL0, U1");
+ break;
+ case ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_O_U_2:
+ strcpy(additional_notice, "ERL0, U2");
+ break;
+ case ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_O_U_3:
+ strcpy(additional_notice, "ERL0, U3");
+ break;
+ case ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_O_U_4:
+ strcpy(additional_notice, "ERL0, U4");
+ break;
+ case ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_O_U_5:
+ strcpy(additional_notice, "ERL0, U5");
+ break;
+ case ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_O_U_6:
+ strcpy(additional_notice, "ERL0, U6");
+ break;
+ case ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_REMAIN_RCV_LEN:
+ strcpy(additional_notice, "invalid resi len");
+ break;
+ case ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_MAX_RCV_PDU_LEN:
+ strcpy(additional_notice, "MRDSL violation");
+ break;
+ case ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_F_BIT_ZERO:
+ strcpy(additional_notice, "F-bit not set");
+ break;
+ case ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_TTT_NOT_RSRV:
+ strcpy(additional_notice, "invalid TTT");
+ break;
+ case ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_DATASN:
+ strcpy(additional_notice, "invalid DataSN");
+ break;
+ case ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_REMAIN_BURST_LEN:
+ strcpy(additional_notice, "burst len violation");
+ break;
+ case ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_BUFFER_OFF:
+ strcpy(additional_notice, "buf offset violation");
+ break;
+ case ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_LUN:
+ strcpy(additional_notice, "invalid LUN field");
+ break;
+ case ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_R2TSN:
+ strcpy(additional_notice, "invalid R2TSN field");
+ break;
+#define BNX2I_ERR_DESIRED_DATA_TRNS_LEN_0 \
+ ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_DESIRED_DATA_TRNS_LEN_0
+ case BNX2I_ERR_DESIRED_DATA_TRNS_LEN_0:
+ strcpy(additional_notice, "invalid cmd len1");
+ break;
+#define BNX2I_ERR_DESIRED_DATA_TRNS_LEN_1 \
+ ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_DESIRED_DATA_TRNS_LEN_1
+ case BNX2I_ERR_DESIRED_DATA_TRNS_LEN_1:
+ strcpy(additional_notice, "invalid cmd len2");
+ break;
+ case ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_PEND_R2T_EXCEED:
+ strcpy(additional_notice,
+ "pend r2t exceeds MaxOutstandingR2T value");
+ break;
+ case ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_TTT_IS_RSRV:
+ strcpy(additional_notice, "TTT is rsvd");
+ break;
+ case ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_MAX_BURST_LEN:
+ strcpy(additional_notice, "MBL violation");
+ break;
+#define BNX2I_ERR_DATA_SEG_LEN_NOT_ZERO \
+ ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_DATA_SEG_LEN_NOT_ZERO
+ case BNX2I_ERR_DATA_SEG_LEN_NOT_ZERO:
+ strcpy(additional_notice, "data seg len != 0");
+ break;
+ case ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_REJECT_PDU_LEN:
+ strcpy(additional_notice, "reject pdu len error");
+ break;
+ case ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_ASYNC_PDU_LEN:
+ strcpy(additional_notice, "async pdu len error");
+ break;
+ case ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_NOPIN_PDU_LEN:
+ strcpy(additional_notice, "nopin pdu len error");
+ break;
+#define BNX2_ERR_PEND_R2T_IN_CLEANUP \
+ ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_PEND_R2T_IN_CLEANUP
+ case BNX2_ERR_PEND_R2T_IN_CLEANUP:
+ strcpy(additional_notice, "pend r2t in cleanup");
+ break;
+
+ case ISCI_KCQE_COMPLETION_STATUS_TCP_ERROR_IP_FRAGMENT:
+ strcpy(additional_notice, "IP fragments rcvd");
+ break;
+ case ISCI_KCQE_COMPLETION_STATUS_TCP_ERROR_IP_OPTIONS:
+ strcpy(additional_notice, "IP options error");
+ break;
+ case ISCI_KCQE_COMPLETION_STATUS_TCP_ERROR_URGENT_FLAG:
+ strcpy(additional_notice, "urgent flag error");
+ break;
+ default:
+ printk(KERN_ALERT "iscsi_err - unknown err %x\n",
+ iscsi_err->completion_status);
+ }
+
+ if (need_recovery) {
+ iscsi_conn_printk(KERN_ALERT,
+ bnx2i_conn->cls_conn->dd_data,
+ "bnx2i: %s - %s\n",
+ message, additional_notice);
+
+ iscsi_conn_printk(KERN_ALERT,
+ bnx2i_conn->cls_conn->dd_data,
+ "conn_err - hostno %d conn %p, "
+ "iscsi_cid %x cid %x\n",
+ bnx2i_conn->hba->shost->host_no,
+ bnx2i_conn, bnx2i_conn->ep->ep_iscsi_cid,
+ bnx2i_conn->ep->ep_cid);
+ bnx2i_recovery_que_add_conn(bnx2i_conn->hba, bnx2i_conn);
+ } else
+ if (!test_and_set_bit(iscsi_err->completion_status,
+ (void *) &bnx2i_conn->violation_notified))
+ iscsi_conn_printk(KERN_ALERT,
+ bnx2i_conn->cls_conn->dd_data,
+ "bnx2i: %s - %s\n",
+ message, additional_notice);
+}
+
+
+/**
+ * bnx2i_process_conn_destroy_cmpl - process iscsi conn destroy completion
+ * @hba: adapter structure pointer
+ * @conn_destroy: conn destroy kcqe pointer
+ *
+ * handles connection destroy completion request.
+ */
+static void bnx2i_process_conn_destroy_cmpl(struct bnx2i_hba *hba,
+ struct iscsi_kcqe *conn_destroy)
+{
+ struct bnx2i_endpoint *ep;
+
+ ep = bnx2i_find_ep_in_destroy_list(hba, conn_destroy->iscsi_conn_id);
+ if (!ep) {
+ printk(KERN_ALERT "bnx2i_conn_destroy_cmpl: no pending "
+ "offload request, unexpected complection\n");
+ return;
+ }
+
+ if (hba != ep->hba) {
+ printk(KERN_ALERT "conn destroy- error hba mis-match\n");
+ return;
+ }
+
+ if (conn_destroy->completion_status) {
+ printk(KERN_ALERT "conn_destroy_cmpl: op failed\n");
+ ep->state = EP_STATE_CLEANUP_FAILED;
+ } else
+ ep->state = EP_STATE_CLEANUP_CMPL;
+ wake_up_interruptible(&ep->ofld_wait);
+}
+
+
+/**
+ * bnx2i_process_ofld_cmpl - process initial iscsi conn offload completion
+ * @hba: adapter structure pointer
+ * @ofld_kcqe: conn offload kcqe pointer
+ *
+ * handles initial connection offload completion, ep_connect() thread is
+ * woken-up to continue with LLP connect process
+ */
+static void bnx2i_process_ofld_cmpl(struct bnx2i_hba *hba,
+ struct iscsi_kcqe *ofld_kcqe)
+{
+ u32 cid_addr;
+ struct bnx2i_endpoint *ep;
+ u32 cid_num;
+
+ ep = bnx2i_find_ep_in_ofld_list(hba, ofld_kcqe->iscsi_conn_id);
+ if (!ep) {
+ printk(KERN_ALERT "ofld_cmpl: no pend offload request\n");
+ return;
+ }
+
+ if (hba != ep->hba) {
+ printk(KERN_ALERT "ofld_cmpl: error hba mis-match\n");
+ return;
+ }
+
+ if (ofld_kcqe->completion_status) {
+ ep->state = EP_STATE_OFLD_FAILED;
+ if (ofld_kcqe->completion_status ==
+ ISCSI_KCQE_COMPLETION_STATUS_CTX_ALLOC_FAILURE)
+ printk(KERN_ALERT "bnx2i (%s): ofld1 cmpl - unable "
+ "to allocate iSCSI context resources\n",
+ hba->netdev->name);
+ else if (ofld_kcqe->completion_status ==
+ ISCSI_KCQE_COMPLETION_STATUS_INVALID_OPCODE)
+ printk(KERN_ALERT "bnx2i (%s): ofld1 cmpl - invalid "
+ "opcode\n", hba->netdev->name);
+ else if (ofld_kcqe->completion_status ==
+ ISCSI_KCQE_COMPLETION_STATUS_CID_BUSY)
+ /* error status code valid only for 5771x chipset */
+ ep->state = EP_STATE_OFLD_FAILED_CID_BUSY;
+ else
+ printk(KERN_ALERT "bnx2i (%s): ofld1 cmpl - invalid "
+ "error code %d\n", hba->netdev->name,
+ ofld_kcqe->completion_status);
+ } else {
+ ep->state = EP_STATE_OFLD_COMPL;
+ cid_addr = ofld_kcqe->iscsi_conn_context_id;
+ cid_num = bnx2i_get_cid_num(ep);
+ ep->ep_cid = cid_addr;
+ ep->qp.ctx_base = NULL;
+ }
+ wake_up_interruptible(&ep->ofld_wait);
+}
+
+/**
+ * bnx2i_indicate_kcqe - process iscsi conn update completion KCQE
+ * @hba: adapter structure pointer
+ * @update_kcqe: kcqe pointer
+ *
+ * Generic KCQ event handler/dispatcher
+ */
+static void bnx2i_indicate_kcqe(void *context, struct kcqe *kcqe[],
+ u32 num_cqe)
+{
+ struct bnx2i_hba *hba = context;
+ int i = 0;
+ struct iscsi_kcqe *ikcqe = NULL;
+
+ while (i < num_cqe) {
+ ikcqe = (struct iscsi_kcqe *) kcqe[i++];
+
+ if (ikcqe->op_code ==
+ ISCSI_KCQE_OPCODE_CQ_EVENT_NOTIFICATION)
+ bnx2i_fastpath_notification(hba, ikcqe);
+ else if (ikcqe->op_code == ISCSI_KCQE_OPCODE_OFFLOAD_CONN)
+ bnx2i_process_ofld_cmpl(hba, ikcqe);
+ else if (ikcqe->op_code == ISCSI_KCQE_OPCODE_UPDATE_CONN)
+ bnx2i_process_update_conn_cmpl(hba, ikcqe);
+ else if (ikcqe->op_code == ISCSI_KCQE_OPCODE_INIT) {
+ if (ikcqe->completion_status !=
+ ISCSI_KCQE_COMPLETION_STATUS_SUCCESS)
+ bnx2i_iscsi_license_error(hba, ikcqe->\
+ completion_status);
+ else {
+ set_bit(ADAPTER_STATE_UP, &hba->adapter_state);
+ bnx2i_get_link_state(hba);
+ printk(KERN_INFO "bnx2i [%.2x:%.2x.%.2x]: "
+ "ISCSI_INIT passed\n",
+ (u8)hba->pcidev->bus->number,
+ hba->pci_devno,
+ (u8)hba->pci_func);
+
+
+ }
+ } else if (ikcqe->op_code == ISCSI_KCQE_OPCODE_DESTROY_CONN)
+ bnx2i_process_conn_destroy_cmpl(hba, ikcqe);
+ else if (ikcqe->op_code == ISCSI_KCQE_OPCODE_ISCSI_ERROR)
+ bnx2i_process_iscsi_error(hba, ikcqe);
+ else if (ikcqe->op_code == ISCSI_KCQE_OPCODE_TCP_ERROR)
+ bnx2i_process_tcp_error(hba, ikcqe);
+ else
+ printk(KERN_ALERT "bnx2i: unknown opcode 0x%x\n",
+ ikcqe->op_code);
+ }
+}
+
+
+/**
+ * bnx2i_indicate_netevent - Generic netdev event handler
+ * @context: adapter structure pointer
+ * @event: event type
+ * @vlan_id: vlans id - associated vlan id with this event
+ *
+ * Handles four netdev events, NETDEV_UP, NETDEV_DOWN,
+ * NETDEV_GOING_DOWN and NETDEV_CHANGE
+ */
+static void bnx2i_indicate_netevent(void *context, unsigned long event,
+ u16 vlan_id)
+{
+ struct bnx2i_hba *hba = context;
+
+ /* Ignore all netevent coming from vlans */
+ if (vlan_id != 0)
+ return;
+
+ switch (event) {
+ case NETDEV_UP:
+ if (!test_bit(ADAPTER_STATE_UP, &hba->adapter_state))
+ bnx2i_send_fw_iscsi_init_msg(hba);
+ break;
+ case NETDEV_DOWN:
+ clear_bit(ADAPTER_STATE_GOING_DOWN, &hba->adapter_state);
+ clear_bit(ADAPTER_STATE_UP, &hba->adapter_state);
+ break;
+ case NETDEV_GOING_DOWN:
+ set_bit(ADAPTER_STATE_GOING_DOWN, &hba->adapter_state);
+ iscsi_host_for_each_session(hba->shost,
+ bnx2i_drop_session);
+ break;
+ case NETDEV_CHANGE:
+ bnx2i_get_link_state(hba);
+ break;
+ default:
+ ;
+ }
+}
+
+
+/**
+ * bnx2i_cm_connect_cmpl - process iscsi conn establishment completion
+ * @cm_sk: cnic sock structure pointer
+ *
+ * function callback exported via bnx2i - cnic driver interface to
+ * indicate completion of option-2 TCP connect request.
+ */
+static void bnx2i_cm_connect_cmpl(struct cnic_sock *cm_sk)
+{
+ struct bnx2i_endpoint *ep = (struct bnx2i_endpoint *) cm_sk->context;
+
+ if (test_bit(ADAPTER_STATE_GOING_DOWN, &ep->hba->adapter_state))
+ ep->state = EP_STATE_CONNECT_FAILED;
+ else if (test_bit(SK_F_OFFLD_COMPLETE, &cm_sk->flags))
+ ep->state = EP_STATE_CONNECT_COMPL;
+ else
+ ep->state = EP_STATE_CONNECT_FAILED;
+
+ wake_up_interruptible(&ep->ofld_wait);
+}
+
+
+/**
+ * bnx2i_cm_close_cmpl - process tcp conn close completion
+ * @cm_sk: cnic sock structure pointer
+ *
+ * function callback exported via bnx2i - cnic driver interface to
+ * indicate completion of option-2 graceful TCP connect shutdown
+ */
+static void bnx2i_cm_close_cmpl(struct cnic_sock *cm_sk)
+{
+ struct bnx2i_endpoint *ep = (struct bnx2i_endpoint *) cm_sk->context;
+
+ ep->state = EP_STATE_DISCONN_COMPL;
+ wake_up_interruptible(&ep->ofld_wait);
+}
+
+
+/**
+ * bnx2i_cm_abort_cmpl - process abortive tcp conn teardown completion
+ * @cm_sk: cnic sock structure pointer
+ *
+ * function callback exported via bnx2i - cnic driver interface to
+ * indicate completion of option-2 abortive TCP connect termination
+ */
+static void bnx2i_cm_abort_cmpl(struct cnic_sock *cm_sk)
+{
+ struct bnx2i_endpoint *ep = (struct bnx2i_endpoint *) cm_sk->context;
+
+ ep->state = EP_STATE_DISCONN_COMPL;
+ wake_up_interruptible(&ep->ofld_wait);
+}
+
+
+/**
+ * bnx2i_cm_remote_close - process received TCP FIN
+ * @hba: adapter structure pointer
+ * @update_kcqe: kcqe pointer
+ *
+ * function callback exported via bnx2i - cnic driver interface to indicate
+ * async TCP events such as FIN
+ */
+static void bnx2i_cm_remote_close(struct cnic_sock *cm_sk)
+{
+ struct bnx2i_endpoint *ep = (struct bnx2i_endpoint *) cm_sk->context;
+
+ ep->state = EP_STATE_TCP_FIN_RCVD;
+ if (ep->conn)
+ bnx2i_recovery_que_add_conn(ep->hba, ep->conn);
+}
+
+/**
+ * bnx2i_cm_remote_abort - process TCP RST and start conn cleanup
+ * @hba: adapter structure pointer
+ * @update_kcqe: kcqe pointer
+ *
+ * function callback exported via bnx2i - cnic driver interface to
+ * indicate async TCP events (RST) sent by the peer.
+ */
+static void bnx2i_cm_remote_abort(struct cnic_sock *cm_sk)
+{
+ struct bnx2i_endpoint *ep = (struct bnx2i_endpoint *) cm_sk->context;
+ u32 old_state = ep->state;
+
+ ep->state = EP_STATE_TCP_RST_RCVD;
+ if (old_state == EP_STATE_DISCONN_START)
+ wake_up_interruptible(&ep->ofld_wait);
+ else
+ if (ep->conn)
+ bnx2i_recovery_que_add_conn(ep->hba, ep->conn);
+}
+
+
+static int bnx2i_send_nl_mesg(void *context, u32 msg_type,
+ char *buf, u16 buflen)
+{
+ struct bnx2i_hba *hba = context;
+ int rc;
+
+ if (!hba)
+ return -ENODEV;
+
+ rc = iscsi_offload_mesg(hba->shost, &bnx2i_iscsi_transport,
+ msg_type, buf, buflen);
+ if (rc)
+ printk(KERN_ALERT "bnx2i: private nl message send error\n");
+
+ return rc;
+}
+
+
+/**
+ * bnx2i_cnic_cb - global template of bnx2i - cnic driver interface structure
+ * carrying callback function pointers
+ *
+ */
+struct cnic_ulp_ops bnx2i_cnic_cb = {
+ .cnic_init = bnx2i_ulp_init,
+ .cnic_exit = bnx2i_ulp_exit,
+ .cnic_start = bnx2i_start,
+ .cnic_stop = bnx2i_stop,
+ .indicate_kcqes = bnx2i_indicate_kcqe,
+ .indicate_netevent = bnx2i_indicate_netevent,
+ .cm_connect_complete = bnx2i_cm_connect_cmpl,
+ .cm_close_complete = bnx2i_cm_close_cmpl,
+ .cm_abort_complete = bnx2i_cm_abort_cmpl,
+ .cm_remote_close = bnx2i_cm_remote_close,
+ .cm_remote_abort = bnx2i_cm_remote_abort,
+ .iscsi_nl_send_msg = bnx2i_send_nl_mesg,
+ .cnic_get_stats = bnx2i_get_stats,
+ .owner = THIS_MODULE
+};
+
+
+/**
+ * bnx2i_map_ep_dbell_regs - map connection doorbell registers
+ * @ep: bnx2i endpoint
+ *
+ * maps connection's SQ and RQ doorbell registers, 5706/5708/5709 hosts these
+ * register in BAR #0. Whereas in 57710 these register are accessed by
+ * mapping BAR #1
+ */
+int bnx2i_map_ep_dbell_regs(struct bnx2i_endpoint *ep)
+{
+ u32 cid_num;
+ u32 reg_off;
+ u32 first_l4l5;
+ u32 ctx_sz;
+ u32 config2;
+ resource_size_t reg_base;
+
+ cid_num = bnx2i_get_cid_num(ep);
+
+ if (test_bit(BNX2I_NX2_DEV_57710, &ep->hba->cnic_dev_type)) {
+ reg_base = pci_resource_start(ep->hba->pcidev,
+ BNX2X_DOORBELL_PCI_BAR);
+ reg_off = (1 << BNX2X_DB_SHIFT) * (cid_num & 0x1FFFF);
+ ep->qp.ctx_base = ioremap_nocache(reg_base + reg_off, 4);
+ goto arm_cq;
+ }
+
+ if ((test_bit(BNX2I_NX2_DEV_5709, &ep->hba->cnic_dev_type)) &&
+ (ep->hba->mail_queue_access == BNX2I_MQ_BIN_MODE)) {
+ config2 = REG_RD(ep->hba, BNX2_MQ_CONFIG2);
+ first_l4l5 = config2 & BNX2_MQ_CONFIG2_FIRST_L4L5;
+ ctx_sz = (config2 & BNX2_MQ_CONFIG2_CONT_SZ) >> 3;
+ if (ctx_sz)
+ reg_off = CTX_OFFSET + MAX_CID_CNT * MB_KERNEL_CTX_SIZE
+ + BNX2I_570X_PAGE_SIZE_DEFAULT *
+ (((cid_num - first_l4l5) / ctx_sz) + 256);
+ else
+ reg_off = CTX_OFFSET + (MB_KERNEL_CTX_SIZE * cid_num);
+ } else
+ /* 5709 device in normal node and 5706/5708 devices */
+ reg_off = CTX_OFFSET + (MB_KERNEL_CTX_SIZE * cid_num);
+
+ ep->qp.ctx_base = ioremap_nocache(ep->hba->reg_base + reg_off,
+ MB_KERNEL_CTX_SIZE);
+ if (!ep->qp.ctx_base)
+ return -ENOMEM;
+
+arm_cq:
+ bnx2i_arm_cq_event_coalescing(ep, CNIC_ARM_CQE);
+ return 0;
+}
diff --git a/drivers/scsi/bnx2i/bnx2i_init.c b/drivers/scsi/bnx2i/bnx2i_init.c
new file mode 100644
index 000000000..c8b410c24
--- /dev/null
+++ b/drivers/scsi/bnx2i/bnx2i_init.c
@@ -0,0 +1,604 @@
+/* bnx2i.c: QLogic NetXtreme II iSCSI driver.
+ *
+ * Copyright (c) 2006 - 2013 Broadcom Corporation
+ * Copyright (c) 2007, 2008 Red Hat, Inc. All rights reserved.
+ * Copyright (c) 2007, 2008 Mike Christie
+ * Copyright (c) 2014, QLogic Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation.
+ *
+ * Written by: Anil Veerabhadrappa (anilgv@broadcom.com)
+ * Previously Maintained by: Eddie Wai (eddie.wai@broadcom.com)
+ * Maintained by: QLogic-Storage-Upstream@qlogic.com
+ */
+
+#include "bnx2i.h"
+
+static struct list_head adapter_list = LIST_HEAD_INIT(adapter_list);
+static u32 adapter_count;
+
+#define DRV_MODULE_NAME "bnx2i"
+#define DRV_MODULE_VERSION "2.7.10.1"
+#define DRV_MODULE_RELDATE "Jul 16, 2014"
+
+static char version[] =
+ "QLogic NetXtreme II iSCSI Driver " DRV_MODULE_NAME \
+ " v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
+
+
+MODULE_AUTHOR("Anil Veerabhadrappa <anilgv@broadcom.com> and "
+ "Eddie Wai <eddie.wai@broadcom.com>");
+
+MODULE_DESCRIPTION("QLogic NetXtreme II BCM5706/5708/5709/57710/57711/57712"
+ "/57800/57810/57840 iSCSI Driver");
+MODULE_LICENSE("GPL");
+MODULE_VERSION(DRV_MODULE_VERSION);
+
+static DEFINE_MUTEX(bnx2i_dev_lock);
+
+unsigned int event_coal_min = 24;
+module_param(event_coal_min, int, 0664);
+MODULE_PARM_DESC(event_coal_min, "Event Coalescing Minimum Commands");
+
+unsigned int event_coal_div = 2;
+module_param(event_coal_div, int, 0664);
+MODULE_PARM_DESC(event_coal_div, "Event Coalescing Divide Factor");
+
+unsigned int en_tcp_dack = 1;
+module_param(en_tcp_dack, int, 0664);
+MODULE_PARM_DESC(en_tcp_dack, "Enable TCP Delayed ACK");
+
+unsigned int error_mask1 = 0x00;
+module_param(error_mask1, uint, 0664);
+MODULE_PARM_DESC(error_mask1, "Config FW iSCSI Error Mask #1");
+
+unsigned int error_mask2 = 0x00;
+module_param(error_mask2, uint, 0664);
+MODULE_PARM_DESC(error_mask2, "Config FW iSCSI Error Mask #2");
+
+unsigned int sq_size;
+module_param(sq_size, int, 0664);
+MODULE_PARM_DESC(sq_size, "Configure SQ size");
+
+unsigned int rq_size = BNX2I_RQ_WQES_DEFAULT;
+module_param(rq_size, int, 0664);
+MODULE_PARM_DESC(rq_size, "Configure RQ size");
+
+u64 iscsi_error_mask = 0x00;
+
+DEFINE_PER_CPU(struct bnx2i_percpu_s, bnx2i_percpu);
+
+static int bnx2i_cpu_callback(struct notifier_block *nfb,
+ unsigned long action, void *hcpu);
+/* notification function for CPU hotplug events */
+static struct notifier_block bnx2i_cpu_notifier = {
+ .notifier_call = bnx2i_cpu_callback,
+};
+
+
+/**
+ * bnx2i_identify_device - identifies NetXtreme II device type
+ * @hba: Adapter structure pointer
+ * @cnic: Corresponding cnic device
+ *
+ * This function identifies the NX2 device type and sets appropriate
+ * queue mailbox register access method, 5709 requires driver to
+ * access MBOX regs using *bin* mode
+ */
+void bnx2i_identify_device(struct bnx2i_hba *hba, struct cnic_dev *dev)
+{
+ hba->cnic_dev_type = 0;
+ if (test_bit(CNIC_F_BNX2_CLASS, &dev->flags)) {
+ if (hba->pci_did == PCI_DEVICE_ID_NX2_5706 ||
+ hba->pci_did == PCI_DEVICE_ID_NX2_5706S) {
+ set_bit(BNX2I_NX2_DEV_5706, &hba->cnic_dev_type);
+ } else if (hba->pci_did == PCI_DEVICE_ID_NX2_5708 ||
+ hba->pci_did == PCI_DEVICE_ID_NX2_5708S) {
+ set_bit(BNX2I_NX2_DEV_5708, &hba->cnic_dev_type);
+ } else if (hba->pci_did == PCI_DEVICE_ID_NX2_5709 ||
+ hba->pci_did == PCI_DEVICE_ID_NX2_5709S) {
+ set_bit(BNX2I_NX2_DEV_5709, &hba->cnic_dev_type);
+ hba->mail_queue_access = BNX2I_MQ_BIN_MODE;
+ }
+ } else if (test_bit(CNIC_F_BNX2X_CLASS, &dev->flags)) {
+ set_bit(BNX2I_NX2_DEV_57710, &hba->cnic_dev_type);
+ } else {
+ printk(KERN_ALERT "bnx2i: unknown device, 0x%x\n",
+ hba->pci_did);
+ }
+}
+
+
+/**
+ * get_adapter_list_head - returns head of adapter list
+ */
+struct bnx2i_hba *get_adapter_list_head(void)
+{
+ struct bnx2i_hba *hba = NULL;
+ struct bnx2i_hba *tmp_hba;
+
+ if (!adapter_count)
+ goto hba_not_found;
+
+ mutex_lock(&bnx2i_dev_lock);
+ list_for_each_entry(tmp_hba, &adapter_list, link) {
+ if (tmp_hba->cnic && tmp_hba->cnic->cm_select_dev) {
+ hba = tmp_hba;
+ break;
+ }
+ }
+ mutex_unlock(&bnx2i_dev_lock);
+hba_not_found:
+ return hba;
+}
+
+
+/**
+ * bnx2i_find_hba_for_cnic - maps cnic device instance to bnx2i adapter instance
+ * @cnic: pointer to cnic device instance
+ *
+ */
+struct bnx2i_hba *bnx2i_find_hba_for_cnic(struct cnic_dev *cnic)
+{
+ struct bnx2i_hba *hba, *temp;
+
+ mutex_lock(&bnx2i_dev_lock);
+ list_for_each_entry_safe(hba, temp, &adapter_list, link) {
+ if (hba->cnic == cnic) {
+ mutex_unlock(&bnx2i_dev_lock);
+ return hba;
+ }
+ }
+ mutex_unlock(&bnx2i_dev_lock);
+ return NULL;
+}
+
+
+/**
+ * bnx2i_start - cnic callback to initialize & start adapter instance
+ * @handle: transparent handle pointing to adapter structure
+ *
+ * This function maps adapter structure to pcidev structure and initiates
+ * firmware handshake to enable/initialize on chip iscsi components
+ * This bnx2i - cnic interface api callback is issued after following
+ * 2 conditions are met -
+ * a) underlying network interface is up (marked by event 'NETDEV_UP'
+ * from netdev
+ * b) bnx2i adapter instance is registered
+ */
+void bnx2i_start(void *handle)
+{
+#define BNX2I_INIT_POLL_TIME (1000 / HZ)
+ struct bnx2i_hba *hba = handle;
+ int i = HZ;
+
+ /* On some bnx2x devices, it is possible that iSCSI is no
+ * longer supported after firmware is downloaded. In that
+ * case, the iscsi_init_msg will return failure.
+ */
+
+ bnx2i_send_fw_iscsi_init_msg(hba);
+ while (!test_bit(ADAPTER_STATE_UP, &hba->adapter_state) &&
+ !test_bit(ADAPTER_STATE_INIT_FAILED, &hba->adapter_state) && i--)
+ msleep(BNX2I_INIT_POLL_TIME);
+}
+
+
+/**
+ * bnx2i_chip_cleanup - local routine to handle chip cleanup
+ * @hba: Adapter instance to register
+ *
+ * Driver checks if adapter still has any active connections before
+ * executing the cleanup process
+ */
+static void bnx2i_chip_cleanup(struct bnx2i_hba *hba)
+{
+ struct bnx2i_endpoint *bnx2i_ep;
+ struct list_head *pos, *tmp;
+
+ if (hba->ofld_conns_active) {
+ /* Stage to force the disconnection
+ * This is the case where the daemon is either slow or
+ * not present
+ */
+ printk(KERN_ALERT "bnx2i: (%s) chip cleanup for %d active "
+ "connections\n", hba->netdev->name,
+ hba->ofld_conns_active);
+ mutex_lock(&hba->net_dev_lock);
+ list_for_each_safe(pos, tmp, &hba->ep_active_list) {
+ bnx2i_ep = list_entry(pos, struct bnx2i_endpoint, link);
+ /* Clean up the chip only */
+ bnx2i_hw_ep_disconnect(bnx2i_ep);
+ bnx2i_ep->cm_sk = NULL;
+ }
+ mutex_unlock(&hba->net_dev_lock);
+ }
+}
+
+
+/**
+ * bnx2i_stop - cnic callback to shutdown adapter instance
+ * @handle: transparent handle pointing to adapter structure
+ *
+ * driver checks if adapter is already in shutdown mode, if not start
+ * the shutdown process
+ */
+void bnx2i_stop(void *handle)
+{
+ struct bnx2i_hba *hba = handle;
+ int conns_active;
+ int wait_delay = 1 * HZ;
+
+ /* check if cleanup happened in GOING_DOWN context */
+ if (!test_and_set_bit(ADAPTER_STATE_GOING_DOWN,
+ &hba->adapter_state)) {
+ iscsi_host_for_each_session(hba->shost,
+ bnx2i_drop_session);
+ wait_delay = hba->hba_shutdown_tmo;
+ }
+ /* Wait for inflight offload connection tasks to complete before
+ * proceeding. Forcefully terminate all connection recovery in
+ * progress at the earliest, either in bind(), send_pdu(LOGIN),
+ * or conn_start()
+ */
+ wait_event_interruptible_timeout(hba->eh_wait,
+ (list_empty(&hba->ep_ofld_list) &&
+ list_empty(&hba->ep_destroy_list)),
+ 2 * HZ);
+ /* Wait for all endpoints to be torn down, Chip will be reset once
+ * control returns to network driver. So it is required to cleanup and
+ * release all connection resources before returning from this routine.
+ */
+ while (hba->ofld_conns_active) {
+ conns_active = hba->ofld_conns_active;
+ wait_event_interruptible_timeout(hba->eh_wait,
+ (hba->ofld_conns_active != conns_active),
+ wait_delay);
+ if (hba->ofld_conns_active == conns_active)
+ break;
+ }
+ bnx2i_chip_cleanup(hba);
+
+ /* This flag should be cleared last so that ep_disconnect() gracefully
+ * cleans up connection context
+ */
+ clear_bit(ADAPTER_STATE_GOING_DOWN, &hba->adapter_state);
+ clear_bit(ADAPTER_STATE_UP, &hba->adapter_state);
+}
+
+
+/**
+ * bnx2i_init_one - initialize an adapter instance and allocate memory resources
+ * @hba: bnx2i adapter instance
+ * @cnic: cnic device handle
+ *
+ * Global resource lock is held during critical sections below. This routine is
+ * called from either cnic_register_driver() or device hot plug context and
+ * and does majority of device specific initialization
+ */
+static int bnx2i_init_one(struct bnx2i_hba *hba, struct cnic_dev *cnic)
+{
+ int rc;
+
+ mutex_lock(&bnx2i_dev_lock);
+ if (!cnic->max_iscsi_conn) {
+ printk(KERN_ALERT "bnx2i: dev %s does not support "
+ "iSCSI\n", hba->netdev->name);
+ rc = -EOPNOTSUPP;
+ goto out;
+ }
+
+ hba->cnic = cnic;
+ rc = cnic->register_device(cnic, CNIC_ULP_ISCSI, hba);
+ if (!rc) {
+ hba->age++;
+ set_bit(BNX2I_CNIC_REGISTERED, &hba->reg_with_cnic);
+ list_add_tail(&hba->link, &adapter_list);
+ adapter_count++;
+ } else if (rc == -EBUSY) /* duplicate registration */
+ printk(KERN_ALERT "bnx2i, duplicate registration"
+ "hba=%p, cnic=%p\n", hba, cnic);
+ else if (rc == -EAGAIN)
+ printk(KERN_ERR "bnx2i, driver not registered\n");
+ else if (rc == -EINVAL)
+ printk(KERN_ERR "bnx2i, invalid type %d\n", CNIC_ULP_ISCSI);
+ else
+ printk(KERN_ERR "bnx2i dev reg, unknown error, %d\n", rc);
+
+out:
+ mutex_unlock(&bnx2i_dev_lock);
+
+ return rc;
+}
+
+
+/**
+ * bnx2i_ulp_init - initialize an adapter instance
+ * @dev: cnic device handle
+ *
+ * Called from cnic_register_driver() context to initialize all enumerated
+ * cnic devices. This routine allocate adapter structure and other
+ * device specific resources.
+ */
+void bnx2i_ulp_init(struct cnic_dev *dev)
+{
+ struct bnx2i_hba *hba;
+
+ /* Allocate a HBA structure for this device */
+ hba = bnx2i_alloc_hba(dev);
+ if (!hba) {
+ printk(KERN_ERR "bnx2i init: hba initialization failed\n");
+ return;
+ }
+
+ /* Get PCI related information and update hba struct members */
+ clear_bit(BNX2I_CNIC_REGISTERED, &hba->reg_with_cnic);
+ if (bnx2i_init_one(hba, dev)) {
+ printk(KERN_ERR "bnx2i - hba %p init failed\n", hba);
+ bnx2i_free_hba(hba);
+ }
+}
+
+
+/**
+ * bnx2i_ulp_exit - shuts down adapter instance and frees all resources
+ * @dev: cnic device handle
+ *
+ */
+void bnx2i_ulp_exit(struct cnic_dev *dev)
+{
+ struct bnx2i_hba *hba;
+
+ hba = bnx2i_find_hba_for_cnic(dev);
+ if (!hba) {
+ printk(KERN_INFO "bnx2i_ulp_exit: hba not "
+ "found, dev 0x%p\n", dev);
+ return;
+ }
+ mutex_lock(&bnx2i_dev_lock);
+ list_del_init(&hba->link);
+ adapter_count--;
+
+ if (test_bit(BNX2I_CNIC_REGISTERED, &hba->reg_with_cnic)) {
+ hba->cnic->unregister_device(hba->cnic, CNIC_ULP_ISCSI);
+ clear_bit(BNX2I_CNIC_REGISTERED, &hba->reg_with_cnic);
+ }
+ mutex_unlock(&bnx2i_dev_lock);
+
+ bnx2i_free_hba(hba);
+}
+
+
+/**
+ * bnx2i_get_stats - Retrieve various statistic from iSCSI offload
+ * @handle: bnx2i_hba
+ *
+ * function callback exported via bnx2i - cnic driver interface to
+ * retrieve various iSCSI offload related statistics.
+ */
+int bnx2i_get_stats(void *handle)
+{
+ struct bnx2i_hba *hba = handle;
+ struct iscsi_stats_info *stats;
+
+ if (!hba)
+ return -EINVAL;
+
+ stats = (struct iscsi_stats_info *)hba->cnic->stats_addr;
+
+ if (!stats)
+ return -ENOMEM;
+
+ strlcpy(stats->version, DRV_MODULE_VERSION, sizeof(stats->version));
+ memcpy(stats->mac_add1 + 2, hba->cnic->mac_addr, ETH_ALEN);
+
+ stats->max_frame_size = hba->netdev->mtu;
+ stats->txq_size = hba->max_sqes;
+ stats->rxq_size = hba->max_cqes;
+
+ stats->txq_avg_depth = 0;
+ stats->rxq_avg_depth = 0;
+
+ GET_STATS_64(hba, stats, rx_pdus);
+ GET_STATS_64(hba, stats, rx_bytes);
+
+ GET_STATS_64(hba, stats, tx_pdus);
+ GET_STATS_64(hba, stats, tx_bytes);
+
+ return 0;
+}
+
+
+/**
+ * bnx2i_percpu_thread_create - Create a receive thread for an
+ * online CPU
+ *
+ * @cpu: cpu index for the online cpu
+ */
+static void bnx2i_percpu_thread_create(unsigned int cpu)
+{
+ struct bnx2i_percpu_s *p;
+ struct task_struct *thread;
+
+ p = &per_cpu(bnx2i_percpu, cpu);
+
+ thread = kthread_create_on_node(bnx2i_percpu_io_thread, (void *)p,
+ cpu_to_node(cpu),
+ "bnx2i_thread/%d", cpu);
+ /* bind thread to the cpu */
+ if (likely(!IS_ERR(thread))) {
+ kthread_bind(thread, cpu);
+ p->iothread = thread;
+ wake_up_process(thread);
+ }
+}
+
+
+static void bnx2i_percpu_thread_destroy(unsigned int cpu)
+{
+ struct bnx2i_percpu_s *p;
+ struct task_struct *thread;
+ struct bnx2i_work *work, *tmp;
+
+ /* Prevent any new work from being queued for this CPU */
+ p = &per_cpu(bnx2i_percpu, cpu);
+ spin_lock_bh(&p->p_work_lock);
+ thread = p->iothread;
+ p->iothread = NULL;
+
+ /* Free all work in the list */
+ list_for_each_entry_safe(work, tmp, &p->work_list, list) {
+ list_del_init(&work->list);
+ bnx2i_process_scsi_cmd_resp(work->session,
+ work->bnx2i_conn, &work->cqe);
+ kfree(work);
+ }
+
+ spin_unlock_bh(&p->p_work_lock);
+ if (thread)
+ kthread_stop(thread);
+}
+
+
+/**
+ * bnx2i_cpu_callback - Handler for CPU hotplug events
+ *
+ * @nfb: The callback data block
+ * @action: The event triggering the callback
+ * @hcpu: The index of the CPU that the event is for
+ *
+ * This creates or destroys per-CPU data for iSCSI
+ *
+ * Returns NOTIFY_OK always.
+ */
+static int bnx2i_cpu_callback(struct notifier_block *nfb,
+ unsigned long action, void *hcpu)
+{
+ unsigned cpu = (unsigned long)hcpu;
+
+ switch (action) {
+ case CPU_ONLINE:
+ case CPU_ONLINE_FROZEN:
+ printk(KERN_INFO "bnx2i: CPU %x online: Create Rx thread\n",
+ cpu);
+ bnx2i_percpu_thread_create(cpu);
+ break;
+ case CPU_DEAD:
+ case CPU_DEAD_FROZEN:
+ printk(KERN_INFO "CPU %x offline: Remove Rx thread\n", cpu);
+ bnx2i_percpu_thread_destroy(cpu);
+ break;
+ default:
+ break;
+ }
+ return NOTIFY_OK;
+}
+
+
+/**
+ * bnx2i_mod_init - module init entry point
+ *
+ * initialize any driver wide global data structures such as endpoint pool,
+ * tcp port manager/queue, sysfs. finally driver will register itself
+ * with the cnic module
+ */
+static int __init bnx2i_mod_init(void)
+{
+ int err;
+ unsigned cpu = 0;
+ struct bnx2i_percpu_s *p;
+
+ printk(KERN_INFO "%s", version);
+
+ if (sq_size && !is_power_of_2(sq_size))
+ sq_size = roundup_pow_of_two(sq_size);
+
+ mutex_init(&bnx2i_dev_lock);
+
+ bnx2i_scsi_xport_template =
+ iscsi_register_transport(&bnx2i_iscsi_transport);
+ if (!bnx2i_scsi_xport_template) {
+ printk(KERN_ERR "Could not register bnx2i transport.\n");
+ err = -ENOMEM;
+ goto out;
+ }
+
+ err = cnic_register_driver(CNIC_ULP_ISCSI, &bnx2i_cnic_cb);
+ if (err) {
+ printk(KERN_ERR "Could not register bnx2i cnic driver.\n");
+ goto unreg_xport;
+ }
+
+ /* Create percpu kernel threads to handle iSCSI I/O completions */
+ for_each_possible_cpu(cpu) {
+ p = &per_cpu(bnx2i_percpu, cpu);
+ INIT_LIST_HEAD(&p->work_list);
+ spin_lock_init(&p->p_work_lock);
+ p->iothread = NULL;
+ }
+
+ cpu_notifier_register_begin();
+
+ for_each_online_cpu(cpu)
+ bnx2i_percpu_thread_create(cpu);
+
+ /* Initialize per CPU interrupt thread */
+ __register_hotcpu_notifier(&bnx2i_cpu_notifier);
+
+ cpu_notifier_register_done();
+
+ return 0;
+
+unreg_xport:
+ iscsi_unregister_transport(&bnx2i_iscsi_transport);
+out:
+ return err;
+}
+
+
+/**
+ * bnx2i_mod_exit - module cleanup/exit entry point
+ *
+ * Global resource lock and host adapter lock is held during critical sections
+ * in this function. Driver will browse through the adapter list, cleans-up
+ * each instance, unregisters iscsi transport name and finally driver will
+ * unregister itself with the cnic module
+ */
+static void __exit bnx2i_mod_exit(void)
+{
+ struct bnx2i_hba *hba;
+ unsigned cpu = 0;
+
+ mutex_lock(&bnx2i_dev_lock);
+ while (!list_empty(&adapter_list)) {
+ hba = list_entry(adapter_list.next, struct bnx2i_hba, link);
+ list_del(&hba->link);
+ adapter_count--;
+
+ if (test_bit(BNX2I_CNIC_REGISTERED, &hba->reg_with_cnic)) {
+ bnx2i_chip_cleanup(hba);
+ hba->cnic->unregister_device(hba->cnic, CNIC_ULP_ISCSI);
+ clear_bit(BNX2I_CNIC_REGISTERED, &hba->reg_with_cnic);
+ }
+
+ bnx2i_free_hba(hba);
+ }
+ mutex_unlock(&bnx2i_dev_lock);
+
+ cpu_notifier_register_begin();
+
+ for_each_online_cpu(cpu)
+ bnx2i_percpu_thread_destroy(cpu);
+
+ __unregister_hotcpu_notifier(&bnx2i_cpu_notifier);
+
+ cpu_notifier_register_done();
+
+ iscsi_unregister_transport(&bnx2i_iscsi_transport);
+ cnic_unregister_driver(CNIC_ULP_ISCSI);
+}
+
+module_init(bnx2i_mod_init);
+module_exit(bnx2i_mod_exit);
diff --git a/drivers/scsi/bnx2i/bnx2i_iscsi.c b/drivers/scsi/bnx2i/bnx2i_iscsi.c
new file mode 100644
index 000000000..e53078d03
--- /dev/null
+++ b/drivers/scsi/bnx2i/bnx2i_iscsi.c
@@ -0,0 +1,2305 @@
+/*
+ * bnx2i_iscsi.c: QLogic NetXtreme II iSCSI driver.
+ *
+ * Copyright (c) 2006 - 2013 Broadcom Corporation
+ * Copyright (c) 2007, 2008 Red Hat, Inc. All rights reserved.
+ * Copyright (c) 2007, 2008 Mike Christie
+ * Copyright (c) 2014, QLogic Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation.
+ *
+ * Written by: Anil Veerabhadrappa (anilgv@broadcom.com)
+ * Previously Maintained by: Eddie Wai (eddie.wai@broadcom.com)
+ * Maintained by: QLogic-Storage-Upstream@qlogic.com
+ */
+
+#include <linux/slab.h>
+#include <scsi/scsi_tcq.h>
+#include <scsi/libiscsi.h>
+#include "bnx2i.h"
+
+struct scsi_transport_template *bnx2i_scsi_xport_template;
+struct iscsi_transport bnx2i_iscsi_transport;
+static struct scsi_host_template bnx2i_host_template;
+
+/*
+ * Global endpoint resource info
+ */
+static DEFINE_SPINLOCK(bnx2i_resc_lock); /* protects global resources */
+
+DECLARE_PER_CPU(struct bnx2i_percpu_s, bnx2i_percpu);
+
+static int bnx2i_adapter_ready(struct bnx2i_hba *hba)
+{
+ int retval = 0;
+
+ if (!hba || !test_bit(ADAPTER_STATE_UP, &hba->adapter_state) ||
+ test_bit(ADAPTER_STATE_GOING_DOWN, &hba->adapter_state) ||
+ test_bit(ADAPTER_STATE_LINK_DOWN, &hba->adapter_state))
+ retval = -EPERM;
+ return retval;
+}
+
+/**
+ * bnx2i_get_write_cmd_bd_idx - identifies various BD bookmarks
+ * @cmd: iscsi cmd struct pointer
+ * @buf_off: absolute buffer offset
+ * @start_bd_off: u32 pointer to return the offset within the BD
+ * indicated by 'start_bd_idx' on which 'buf_off' falls
+ * @start_bd_idx: index of the BD on which 'buf_off' falls
+ *
+ * identifies & marks various bd info for scsi command's imm data,
+ * unsolicited data and the first solicited data seq.
+ */
+static void bnx2i_get_write_cmd_bd_idx(struct bnx2i_cmd *cmd, u32 buf_off,
+ u32 *start_bd_off, u32 *start_bd_idx)
+{
+ struct iscsi_bd *bd_tbl = cmd->io_tbl.bd_tbl;
+ u32 cur_offset = 0;
+ u32 cur_bd_idx = 0;
+
+ if (buf_off) {
+ while (buf_off >= (cur_offset + bd_tbl->buffer_length)) {
+ cur_offset += bd_tbl->buffer_length;
+ cur_bd_idx++;
+ bd_tbl++;
+ }
+ }
+
+ *start_bd_off = buf_off - cur_offset;
+ *start_bd_idx = cur_bd_idx;
+}
+
+/**
+ * bnx2i_setup_write_cmd_bd_info - sets up BD various information
+ * @task: transport layer's cmd struct pointer
+ *
+ * identifies & marks various bd info for scsi command's immediate data,
+ * unsolicited data and first solicited data seq which includes BD start
+ * index & BD buf off. his function takes into account iscsi parameter such
+ * as immediate data and unsolicited data is support on this connection.
+ */
+static void bnx2i_setup_write_cmd_bd_info(struct iscsi_task *task)
+{
+ struct bnx2i_cmd *cmd = task->dd_data;
+ u32 start_bd_offset;
+ u32 start_bd_idx;
+ u32 buffer_offset = 0;
+ u32 cmd_len = cmd->req.total_data_transfer_length;
+
+ /* if ImmediateData is turned off & IntialR2T is turned on,
+ * there will be no immediate or unsolicited data, just return.
+ */
+ if (!iscsi_task_has_unsol_data(task) && !task->imm_count)
+ return;
+
+ /* Immediate data */
+ buffer_offset += task->imm_count;
+ if (task->imm_count == cmd_len)
+ return;
+
+ if (iscsi_task_has_unsol_data(task)) {
+ bnx2i_get_write_cmd_bd_idx(cmd, buffer_offset,
+ &start_bd_offset, &start_bd_idx);
+ cmd->req.ud_buffer_offset = start_bd_offset;
+ cmd->req.ud_start_bd_index = start_bd_idx;
+ buffer_offset += task->unsol_r2t.data_length;
+ }
+
+ if (buffer_offset != cmd_len) {
+ bnx2i_get_write_cmd_bd_idx(cmd, buffer_offset,
+ &start_bd_offset, &start_bd_idx);
+ if ((start_bd_offset > task->conn->session->first_burst) ||
+ (start_bd_idx > scsi_sg_count(cmd->scsi_cmd))) {
+ int i = 0;
+
+ iscsi_conn_printk(KERN_ALERT, task->conn,
+ "bnx2i- error, buf offset 0x%x "
+ "bd_valid %d use_sg %d\n",
+ buffer_offset, cmd->io_tbl.bd_valid,
+ scsi_sg_count(cmd->scsi_cmd));
+ for (i = 0; i < cmd->io_tbl.bd_valid; i++)
+ iscsi_conn_printk(KERN_ALERT, task->conn,
+ "bnx2i err, bd[%d]: len %x\n",
+ i, cmd->io_tbl.bd_tbl[i].\
+ buffer_length);
+ }
+ cmd->req.sd_buffer_offset = start_bd_offset;
+ cmd->req.sd_start_bd_index = start_bd_idx;
+ }
+}
+
+
+
+/**
+ * bnx2i_map_scsi_sg - maps IO buffer and prepares the BD table
+ * @hba: adapter instance
+ * @cmd: iscsi cmd struct pointer
+ *
+ * map SG list
+ */
+static int bnx2i_map_scsi_sg(struct bnx2i_hba *hba, struct bnx2i_cmd *cmd)
+{
+ struct scsi_cmnd *sc = cmd->scsi_cmd;
+ struct iscsi_bd *bd = cmd->io_tbl.bd_tbl;
+ struct scatterlist *sg;
+ int byte_count = 0;
+ int bd_count = 0;
+ int sg_count;
+ int sg_len;
+ u64 addr;
+ int i;
+
+ BUG_ON(scsi_sg_count(sc) > ISCSI_MAX_BDS_PER_CMD);
+
+ sg_count = scsi_dma_map(sc);
+
+ scsi_for_each_sg(sc, sg, sg_count, i) {
+ sg_len = sg_dma_len(sg);
+ addr = (u64) sg_dma_address(sg);
+ bd[bd_count].buffer_addr_lo = addr & 0xffffffff;
+ bd[bd_count].buffer_addr_hi = addr >> 32;
+ bd[bd_count].buffer_length = sg_len;
+ bd[bd_count].flags = 0;
+ if (bd_count == 0)
+ bd[bd_count].flags = ISCSI_BD_FIRST_IN_BD_CHAIN;
+
+ byte_count += sg_len;
+ bd_count++;
+ }
+
+ if (bd_count)
+ bd[bd_count - 1].flags |= ISCSI_BD_LAST_IN_BD_CHAIN;
+
+ BUG_ON(byte_count != scsi_bufflen(sc));
+ return bd_count;
+}
+
+/**
+ * bnx2i_iscsi_map_sg_list - maps SG list
+ * @cmd: iscsi cmd struct pointer
+ *
+ * creates BD list table for the command
+ */
+static void bnx2i_iscsi_map_sg_list(struct bnx2i_cmd *cmd)
+{
+ int bd_count;
+
+ bd_count = bnx2i_map_scsi_sg(cmd->conn->hba, cmd);
+ if (!bd_count) {
+ struct iscsi_bd *bd = cmd->io_tbl.bd_tbl;
+
+ bd[0].buffer_addr_lo = bd[0].buffer_addr_hi = 0;
+ bd[0].buffer_length = bd[0].flags = 0;
+ }
+ cmd->io_tbl.bd_valid = bd_count;
+}
+
+
+/**
+ * bnx2i_iscsi_unmap_sg_list - unmaps SG list
+ * @cmd: iscsi cmd struct pointer
+ *
+ * unmap IO buffers and invalidate the BD table
+ */
+void bnx2i_iscsi_unmap_sg_list(struct bnx2i_cmd *cmd)
+{
+ struct scsi_cmnd *sc = cmd->scsi_cmd;
+
+ if (cmd->io_tbl.bd_valid && sc) {
+ scsi_dma_unmap(sc);
+ cmd->io_tbl.bd_valid = 0;
+ }
+}
+
+static void bnx2i_setup_cmd_wqe_template(struct bnx2i_cmd *cmd)
+{
+ memset(&cmd->req, 0x00, sizeof(cmd->req));
+ cmd->req.op_code = 0xFF;
+ cmd->req.bd_list_addr_lo = (u32) cmd->io_tbl.bd_tbl_dma;
+ cmd->req.bd_list_addr_hi =
+ (u32) ((u64) cmd->io_tbl.bd_tbl_dma >> 32);
+
+}
+
+
+/**
+ * bnx2i_bind_conn_to_iscsi_cid - bind conn structure to 'iscsi_cid'
+ * @hba: pointer to adapter instance
+ * @conn: pointer to iscsi connection
+ * @iscsi_cid: iscsi context ID, range 0 - (MAX_CONN - 1)
+ *
+ * update iscsi cid table entry with connection pointer. This enables
+ * driver to quickly get hold of connection structure pointer in
+ * completion/interrupt thread using iscsi context ID
+ */
+static int bnx2i_bind_conn_to_iscsi_cid(struct bnx2i_hba *hba,
+ struct bnx2i_conn *bnx2i_conn,
+ u32 iscsi_cid)
+{
+ if (hba && hba->cid_que.conn_cid_tbl[iscsi_cid]) {
+ iscsi_conn_printk(KERN_ALERT, bnx2i_conn->cls_conn->dd_data,
+ "conn bind - entry #%d not free\n", iscsi_cid);
+ return -EBUSY;
+ }
+
+ hba->cid_que.conn_cid_tbl[iscsi_cid] = bnx2i_conn;
+ return 0;
+}
+
+
+/**
+ * bnx2i_get_conn_from_id - maps an iscsi cid to corresponding conn ptr
+ * @hba: pointer to adapter instance
+ * @iscsi_cid: iscsi context ID, range 0 - (MAX_CONN - 1)
+ */
+struct bnx2i_conn *bnx2i_get_conn_from_id(struct bnx2i_hba *hba,
+ u16 iscsi_cid)
+{
+ if (!hba->cid_que.conn_cid_tbl) {
+ printk(KERN_ERR "bnx2i: ERROR - missing conn<->cid table\n");
+ return NULL;
+
+ } else if (iscsi_cid >= hba->max_active_conns) {
+ printk(KERN_ERR "bnx2i: wrong cid #%d\n", iscsi_cid);
+ return NULL;
+ }
+ return hba->cid_que.conn_cid_tbl[iscsi_cid];
+}
+
+
+/**
+ * bnx2i_alloc_iscsi_cid - allocates a iscsi_cid from free pool
+ * @hba: pointer to adapter instance
+ */
+static u32 bnx2i_alloc_iscsi_cid(struct bnx2i_hba *hba)
+{
+ int idx;
+
+ if (!hba->cid_que.cid_free_cnt)
+ return -1;
+
+ idx = hba->cid_que.cid_q_cons_idx;
+ hba->cid_que.cid_q_cons_idx++;
+ if (hba->cid_que.cid_q_cons_idx == hba->cid_que.cid_q_max_idx)
+ hba->cid_que.cid_q_cons_idx = 0;
+
+ hba->cid_que.cid_free_cnt--;
+ return hba->cid_que.cid_que[idx];
+}
+
+
+/**
+ * bnx2i_free_iscsi_cid - returns tcp port to free list
+ * @hba: pointer to adapter instance
+ * @iscsi_cid: iscsi context ID to free
+ */
+static void bnx2i_free_iscsi_cid(struct bnx2i_hba *hba, u16 iscsi_cid)
+{
+ int idx;
+
+ if (iscsi_cid == (u16) -1)
+ return;
+
+ hba->cid_que.cid_free_cnt++;
+
+ idx = hba->cid_que.cid_q_prod_idx;
+ hba->cid_que.cid_que[idx] = iscsi_cid;
+ hba->cid_que.conn_cid_tbl[iscsi_cid] = NULL;
+ hba->cid_que.cid_q_prod_idx++;
+ if (hba->cid_que.cid_q_prod_idx == hba->cid_que.cid_q_max_idx)
+ hba->cid_que.cid_q_prod_idx = 0;
+}
+
+
+/**
+ * bnx2i_setup_free_cid_que - sets up free iscsi cid queue
+ * @hba: pointer to adapter instance
+ *
+ * allocates memory for iscsi cid queue & 'cid - conn ptr' mapping table,
+ * and initialize table attributes
+ */
+static int bnx2i_setup_free_cid_que(struct bnx2i_hba *hba)
+{
+ int mem_size;
+ int i;
+
+ mem_size = hba->max_active_conns * sizeof(u32);
+ mem_size = (mem_size + (PAGE_SIZE - 1)) & PAGE_MASK;
+
+ hba->cid_que.cid_que_base = kmalloc(mem_size, GFP_KERNEL);
+ if (!hba->cid_que.cid_que_base)
+ return -ENOMEM;
+
+ mem_size = hba->max_active_conns * sizeof(struct bnx2i_conn *);
+ mem_size = (mem_size + (PAGE_SIZE - 1)) & PAGE_MASK;
+ hba->cid_que.conn_cid_tbl = kmalloc(mem_size, GFP_KERNEL);
+ if (!hba->cid_que.conn_cid_tbl) {
+ kfree(hba->cid_que.cid_que_base);
+ hba->cid_que.cid_que_base = NULL;
+ return -ENOMEM;
+ }
+
+ hba->cid_que.cid_que = (u32 *)hba->cid_que.cid_que_base;
+ hba->cid_que.cid_q_prod_idx = 0;
+ hba->cid_que.cid_q_cons_idx = 0;
+ hba->cid_que.cid_q_max_idx = hba->max_active_conns;
+ hba->cid_que.cid_free_cnt = hba->max_active_conns;
+
+ for (i = 0; i < hba->max_active_conns; i++) {
+ hba->cid_que.cid_que[i] = i;
+ hba->cid_que.conn_cid_tbl[i] = NULL;
+ }
+ return 0;
+}
+
+
+/**
+ * bnx2i_release_free_cid_que - releases 'iscsi_cid' queue resources
+ * @hba: pointer to adapter instance
+ */
+static void bnx2i_release_free_cid_que(struct bnx2i_hba *hba)
+{
+ kfree(hba->cid_que.cid_que_base);
+ hba->cid_que.cid_que_base = NULL;
+
+ kfree(hba->cid_que.conn_cid_tbl);
+ hba->cid_que.conn_cid_tbl = NULL;
+}
+
+
+/**
+ * bnx2i_alloc_ep - allocates ep structure from global pool
+ * @hba: pointer to adapter instance
+ *
+ * routine allocates a free endpoint structure from global pool and
+ * a tcp port to be used for this connection. Global resource lock,
+ * 'bnx2i_resc_lock' is held while accessing shared global data structures
+ */
+static struct iscsi_endpoint *bnx2i_alloc_ep(struct bnx2i_hba *hba)
+{
+ struct iscsi_endpoint *ep;
+ struct bnx2i_endpoint *bnx2i_ep;
+ u32 ec_div;
+
+ ep = iscsi_create_endpoint(sizeof(*bnx2i_ep));
+ if (!ep) {
+ printk(KERN_ERR "bnx2i: Could not allocate ep\n");
+ return NULL;
+ }
+
+ bnx2i_ep = ep->dd_data;
+ bnx2i_ep->cls_ep = ep;
+ INIT_LIST_HEAD(&bnx2i_ep->link);
+ bnx2i_ep->state = EP_STATE_IDLE;
+ bnx2i_ep->ep_iscsi_cid = (u16) -1;
+ bnx2i_ep->hba = hba;
+ bnx2i_ep->hba_age = hba->age;
+
+ ec_div = event_coal_div;
+ while (ec_div >>= 1)
+ bnx2i_ep->ec_shift += 1;
+
+ hba->ofld_conns_active++;
+ init_waitqueue_head(&bnx2i_ep->ofld_wait);
+ return ep;
+}
+
+
+/**
+ * bnx2i_free_ep - free endpoint
+ * @ep: pointer to iscsi endpoint structure
+ */
+static void bnx2i_free_ep(struct iscsi_endpoint *ep)
+{
+ struct bnx2i_endpoint *bnx2i_ep = ep->dd_data;
+ unsigned long flags;
+
+ spin_lock_irqsave(&bnx2i_resc_lock, flags);
+ bnx2i_ep->state = EP_STATE_IDLE;
+ bnx2i_ep->hba->ofld_conns_active--;
+
+ if (bnx2i_ep->ep_iscsi_cid != (u16) -1)
+ bnx2i_free_iscsi_cid(bnx2i_ep->hba, bnx2i_ep->ep_iscsi_cid);
+
+ if (bnx2i_ep->conn) {
+ bnx2i_ep->conn->ep = NULL;
+ bnx2i_ep->conn = NULL;
+ }
+
+ bnx2i_ep->hba = NULL;
+ spin_unlock_irqrestore(&bnx2i_resc_lock, flags);
+ iscsi_destroy_endpoint(ep);
+}
+
+
+/**
+ * bnx2i_alloc_bdt - allocates buffer descriptor (BD) table for the command
+ * @hba: adapter instance pointer
+ * @session: iscsi session pointer
+ * @cmd: iscsi command structure
+ */
+static int bnx2i_alloc_bdt(struct bnx2i_hba *hba, struct iscsi_session *session,
+ struct bnx2i_cmd *cmd)
+{
+ struct io_bdt *io = &cmd->io_tbl;
+ struct iscsi_bd *bd;
+
+ io->bd_tbl = dma_alloc_coherent(&hba->pcidev->dev,
+ ISCSI_MAX_BDS_PER_CMD * sizeof(*bd),
+ &io->bd_tbl_dma, GFP_KERNEL);
+ if (!io->bd_tbl) {
+ iscsi_session_printk(KERN_ERR, session, "Could not "
+ "allocate bdt.\n");
+ return -ENOMEM;
+ }
+ io->bd_valid = 0;
+ return 0;
+}
+
+/**
+ * bnx2i_destroy_cmd_pool - destroys iscsi command pool and release BD table
+ * @hba: adapter instance pointer
+ * @session: iscsi session pointer
+ * @cmd: iscsi command structure
+ */
+static void bnx2i_destroy_cmd_pool(struct bnx2i_hba *hba,
+ struct iscsi_session *session)
+{
+ int i;
+
+ for (i = 0; i < session->cmds_max; i++) {
+ struct iscsi_task *task = session->cmds[i];
+ struct bnx2i_cmd *cmd = task->dd_data;
+
+ if (cmd->io_tbl.bd_tbl)
+ dma_free_coherent(&hba->pcidev->dev,
+ ISCSI_MAX_BDS_PER_CMD *
+ sizeof(struct iscsi_bd),
+ cmd->io_tbl.bd_tbl,
+ cmd->io_tbl.bd_tbl_dma);
+ }
+
+}
+
+
+/**
+ * bnx2i_setup_cmd_pool - sets up iscsi command pool for the session
+ * @hba: adapter instance pointer
+ * @session: iscsi session pointer
+ */
+static int bnx2i_setup_cmd_pool(struct bnx2i_hba *hba,
+ struct iscsi_session *session)
+{
+ int i;
+
+ for (i = 0; i < session->cmds_max; i++) {
+ struct iscsi_task *task = session->cmds[i];
+ struct bnx2i_cmd *cmd = task->dd_data;
+
+ task->hdr = &cmd->hdr;
+ task->hdr_max = sizeof(struct iscsi_hdr);
+
+ if (bnx2i_alloc_bdt(hba, session, cmd))
+ goto free_bdts;
+ }
+
+ return 0;
+
+free_bdts:
+ bnx2i_destroy_cmd_pool(hba, session);
+ return -ENOMEM;
+}
+
+
+/**
+ * bnx2i_setup_mp_bdt - allocate BD table resources
+ * @hba: pointer to adapter structure
+ *
+ * Allocate memory for dummy buffer and associated BD
+ * table to be used by middle path (MP) requests
+ */
+static int bnx2i_setup_mp_bdt(struct bnx2i_hba *hba)
+{
+ int rc = 0;
+ struct iscsi_bd *mp_bdt;
+ u64 addr;
+
+ hba->mp_bd_tbl = dma_alloc_coherent(&hba->pcidev->dev, CNIC_PAGE_SIZE,
+ &hba->mp_bd_dma, GFP_KERNEL);
+ if (!hba->mp_bd_tbl) {
+ printk(KERN_ERR "unable to allocate Middle Path BDT\n");
+ rc = -1;
+ goto out;
+ }
+
+ hba->dummy_buffer = dma_alloc_coherent(&hba->pcidev->dev,
+ CNIC_PAGE_SIZE,
+ &hba->dummy_buf_dma, GFP_KERNEL);
+ if (!hba->dummy_buffer) {
+ printk(KERN_ERR "unable to alloc Middle Path Dummy Buffer\n");
+ dma_free_coherent(&hba->pcidev->dev, CNIC_PAGE_SIZE,
+ hba->mp_bd_tbl, hba->mp_bd_dma);
+ hba->mp_bd_tbl = NULL;
+ rc = -1;
+ goto out;
+ }
+
+ mp_bdt = (struct iscsi_bd *) hba->mp_bd_tbl;
+ addr = (unsigned long) hba->dummy_buf_dma;
+ mp_bdt->buffer_addr_lo = addr & 0xffffffff;
+ mp_bdt->buffer_addr_hi = addr >> 32;
+ mp_bdt->buffer_length = CNIC_PAGE_SIZE;
+ mp_bdt->flags = ISCSI_BD_LAST_IN_BD_CHAIN |
+ ISCSI_BD_FIRST_IN_BD_CHAIN;
+out:
+ return rc;
+}
+
+
+/**
+ * bnx2i_free_mp_bdt - releases ITT back to free pool
+ * @hba: pointer to adapter instance
+ *
+ * free MP dummy buffer and associated BD table
+ */
+static void bnx2i_free_mp_bdt(struct bnx2i_hba *hba)
+{
+ if (hba->mp_bd_tbl) {
+ dma_free_coherent(&hba->pcidev->dev, CNIC_PAGE_SIZE,
+ hba->mp_bd_tbl, hba->mp_bd_dma);
+ hba->mp_bd_tbl = NULL;
+ }
+ if (hba->dummy_buffer) {
+ dma_free_coherent(&hba->pcidev->dev, CNIC_PAGE_SIZE,
+ hba->dummy_buffer, hba->dummy_buf_dma);
+ hba->dummy_buffer = NULL;
+ }
+ return;
+}
+
+/**
+ * bnx2i_drop_session - notifies iscsid of connection error.
+ * @hba: adapter instance pointer
+ * @session: iscsi session pointer
+ *
+ * This notifies iscsid that there is a error, so it can initiate
+ * recovery.
+ *
+ * This relies on caller using the iscsi class iterator so the object
+ * is refcounted and does not disapper from under us.
+ */
+void bnx2i_drop_session(struct iscsi_cls_session *cls_session)
+{
+ iscsi_session_failure(cls_session->dd_data, ISCSI_ERR_CONN_FAILED);
+}
+
+/**
+ * bnx2i_ep_destroy_list_add - add an entry to EP destroy list
+ * @hba: pointer to adapter instance
+ * @ep: pointer to endpoint (transport identifier) structure
+ *
+ * EP destroy queue manager
+ */
+static int bnx2i_ep_destroy_list_add(struct bnx2i_hba *hba,
+ struct bnx2i_endpoint *ep)
+{
+ write_lock_bh(&hba->ep_rdwr_lock);
+ list_add_tail(&ep->link, &hba->ep_destroy_list);
+ write_unlock_bh(&hba->ep_rdwr_lock);
+ return 0;
+}
+
+/**
+ * bnx2i_ep_destroy_list_del - add an entry to EP destroy list
+ *
+ * @hba: pointer to adapter instance
+ * @ep: pointer to endpoint (transport identifier) structure
+ *
+ * EP destroy queue manager
+ */
+static int bnx2i_ep_destroy_list_del(struct bnx2i_hba *hba,
+ struct bnx2i_endpoint *ep)
+{
+ write_lock_bh(&hba->ep_rdwr_lock);
+ list_del_init(&ep->link);
+ write_unlock_bh(&hba->ep_rdwr_lock);
+
+ return 0;
+}
+
+/**
+ * bnx2i_ep_ofld_list_add - add an entry to ep offload pending list
+ * @hba: pointer to adapter instance
+ * @ep: pointer to endpoint (transport identifier) structure
+ *
+ * pending conn offload completion queue manager
+ */
+static int bnx2i_ep_ofld_list_add(struct bnx2i_hba *hba,
+ struct bnx2i_endpoint *ep)
+{
+ write_lock_bh(&hba->ep_rdwr_lock);
+ list_add_tail(&ep->link, &hba->ep_ofld_list);
+ write_unlock_bh(&hba->ep_rdwr_lock);
+ return 0;
+}
+
+/**
+ * bnx2i_ep_ofld_list_del - add an entry to ep offload pending list
+ * @hba: pointer to adapter instance
+ * @ep: pointer to endpoint (transport identifier) structure
+ *
+ * pending conn offload completion queue manager
+ */
+static int bnx2i_ep_ofld_list_del(struct bnx2i_hba *hba,
+ struct bnx2i_endpoint *ep)
+{
+ write_lock_bh(&hba->ep_rdwr_lock);
+ list_del_init(&ep->link);
+ write_unlock_bh(&hba->ep_rdwr_lock);
+ return 0;
+}
+
+
+/**
+ * bnx2i_find_ep_in_ofld_list - find iscsi_cid in pending list of endpoints
+ *
+ * @hba: pointer to adapter instance
+ * @iscsi_cid: iscsi context ID to find
+ *
+ */
+struct bnx2i_endpoint *
+bnx2i_find_ep_in_ofld_list(struct bnx2i_hba *hba, u32 iscsi_cid)
+{
+ struct list_head *list;
+ struct list_head *tmp;
+ struct bnx2i_endpoint *ep;
+
+ read_lock_bh(&hba->ep_rdwr_lock);
+ list_for_each_safe(list, tmp, &hba->ep_ofld_list) {
+ ep = (struct bnx2i_endpoint *)list;
+
+ if (ep->ep_iscsi_cid == iscsi_cid)
+ break;
+ ep = NULL;
+ }
+ read_unlock_bh(&hba->ep_rdwr_lock);
+
+ if (!ep)
+ printk(KERN_ERR "l5 cid %d not found\n", iscsi_cid);
+ return ep;
+}
+
+/**
+ * bnx2i_find_ep_in_destroy_list - find iscsi_cid in destroy list
+ * @hba: pointer to adapter instance
+ * @iscsi_cid: iscsi context ID to find
+ *
+ */
+struct bnx2i_endpoint *
+bnx2i_find_ep_in_destroy_list(struct bnx2i_hba *hba, u32 iscsi_cid)
+{
+ struct list_head *list;
+ struct list_head *tmp;
+ struct bnx2i_endpoint *ep;
+
+ read_lock_bh(&hba->ep_rdwr_lock);
+ list_for_each_safe(list, tmp, &hba->ep_destroy_list) {
+ ep = (struct bnx2i_endpoint *)list;
+
+ if (ep->ep_iscsi_cid == iscsi_cid)
+ break;
+ ep = NULL;
+ }
+ read_unlock_bh(&hba->ep_rdwr_lock);
+
+ if (!ep)
+ printk(KERN_ERR "l5 cid %d not found\n", iscsi_cid);
+
+ return ep;
+}
+
+/**
+ * bnx2i_ep_active_list_add - add an entry to ep active list
+ * @hba: pointer to adapter instance
+ * @ep: pointer to endpoint (transport identifier) structure
+ *
+ * current active conn queue manager
+ */
+static void bnx2i_ep_active_list_add(struct bnx2i_hba *hba,
+ struct bnx2i_endpoint *ep)
+{
+ write_lock_bh(&hba->ep_rdwr_lock);
+ list_add_tail(&ep->link, &hba->ep_active_list);
+ write_unlock_bh(&hba->ep_rdwr_lock);
+}
+
+
+/**
+ * bnx2i_ep_active_list_del - deletes an entry to ep active list
+ * @hba: pointer to adapter instance
+ * @ep: pointer to endpoint (transport identifier) structure
+ *
+ * current active conn queue manager
+ */
+static void bnx2i_ep_active_list_del(struct bnx2i_hba *hba,
+ struct bnx2i_endpoint *ep)
+{
+ write_lock_bh(&hba->ep_rdwr_lock);
+ list_del_init(&ep->link);
+ write_unlock_bh(&hba->ep_rdwr_lock);
+}
+
+
+/**
+ * bnx2i_setup_host_queue_size - assigns shost->can_queue param
+ * @hba: pointer to adapter instance
+ * @shost: scsi host pointer
+ *
+ * Initializes 'can_queue' parameter based on how many outstanding commands
+ * the device can handle. Each device 5708/5709/57710 has different
+ * capabilities
+ */
+static void bnx2i_setup_host_queue_size(struct bnx2i_hba *hba,
+ struct Scsi_Host *shost)
+{
+ if (test_bit(BNX2I_NX2_DEV_5708, &hba->cnic_dev_type))
+ shost->can_queue = ISCSI_MAX_CMDS_PER_HBA_5708;
+ else if (test_bit(BNX2I_NX2_DEV_5709, &hba->cnic_dev_type))
+ shost->can_queue = ISCSI_MAX_CMDS_PER_HBA_5709;
+ else if (test_bit(BNX2I_NX2_DEV_57710, &hba->cnic_dev_type))
+ shost->can_queue = ISCSI_MAX_CMDS_PER_HBA_57710;
+ else
+ shost->can_queue = ISCSI_MAX_CMDS_PER_HBA_5708;
+}
+
+
+/**
+ * bnx2i_alloc_hba - allocate and init adapter instance
+ * @cnic: cnic device pointer
+ *
+ * allocate & initialize adapter structure and call other
+ * support routines to do per adapter initialization
+ */
+struct bnx2i_hba *bnx2i_alloc_hba(struct cnic_dev *cnic)
+{
+ struct Scsi_Host *shost;
+ struct bnx2i_hba *hba;
+
+ shost = iscsi_host_alloc(&bnx2i_host_template, sizeof(*hba), 0);
+ if (!shost)
+ return NULL;
+ shost->dma_boundary = cnic->pcidev->dma_mask;
+ shost->transportt = bnx2i_scsi_xport_template;
+ shost->max_id = ISCSI_MAX_CONNS_PER_HBA;
+ shost->max_channel = 0;
+ shost->max_lun = 512;
+ shost->max_cmd_len = 16;
+
+ hba = iscsi_host_priv(shost);
+ hba->shost = shost;
+ hba->netdev = cnic->netdev;
+ /* Get PCI related information and update hba struct members */
+ hba->pcidev = cnic->pcidev;
+ pci_dev_get(hba->pcidev);
+ hba->pci_did = hba->pcidev->device;
+ hba->pci_vid = hba->pcidev->vendor;
+ hba->pci_sdid = hba->pcidev->subsystem_device;
+ hba->pci_svid = hba->pcidev->subsystem_vendor;
+ hba->pci_func = PCI_FUNC(hba->pcidev->devfn);
+ hba->pci_devno = PCI_SLOT(hba->pcidev->devfn);
+
+ bnx2i_identify_device(hba, cnic);
+ bnx2i_setup_host_queue_size(hba, shost);
+
+ hba->reg_base = pci_resource_start(hba->pcidev, 0);
+ if (test_bit(BNX2I_NX2_DEV_5709, &hba->cnic_dev_type)) {
+ hba->regview = pci_iomap(hba->pcidev, 0, BNX2_MQ_CONFIG2);
+ if (!hba->regview)
+ goto ioreg_map_err;
+ } else if (test_bit(BNX2I_NX2_DEV_57710, &hba->cnic_dev_type)) {
+ hba->regview = pci_iomap(hba->pcidev, 0, 4096);
+ if (!hba->regview)
+ goto ioreg_map_err;
+ }
+
+ if (bnx2i_setup_mp_bdt(hba))
+ goto mp_bdt_mem_err;
+
+ INIT_LIST_HEAD(&hba->ep_ofld_list);
+ INIT_LIST_HEAD(&hba->ep_active_list);
+ INIT_LIST_HEAD(&hba->ep_destroy_list);
+ rwlock_init(&hba->ep_rdwr_lock);
+
+ hba->mtu_supported = BNX2I_MAX_MTU_SUPPORTED;
+
+ /* different values for 5708/5709/57710 */
+ hba->max_active_conns = ISCSI_MAX_CONNS_PER_HBA;
+
+ if (bnx2i_setup_free_cid_que(hba))
+ goto cid_que_err;
+
+ /* SQ/RQ/CQ size can be changed via sysfx interface */
+ if (test_bit(BNX2I_NX2_DEV_57710, &hba->cnic_dev_type)) {
+ if (sq_size && sq_size <= BNX2I_5770X_SQ_WQES_MAX)
+ hba->max_sqes = sq_size;
+ else
+ hba->max_sqes = BNX2I_5770X_SQ_WQES_DEFAULT;
+ } else { /* 5706/5708/5709 */
+ if (sq_size && sq_size <= BNX2I_570X_SQ_WQES_MAX)
+ hba->max_sqes = sq_size;
+ else
+ hba->max_sqes = BNX2I_570X_SQ_WQES_DEFAULT;
+ }
+
+ hba->max_rqes = rq_size;
+ hba->max_cqes = hba->max_sqes + rq_size;
+ if (test_bit(BNX2I_NX2_DEV_57710, &hba->cnic_dev_type)) {
+ if (hba->max_cqes > BNX2I_5770X_CQ_WQES_MAX)
+ hba->max_cqes = BNX2I_5770X_CQ_WQES_MAX;
+ } else if (hba->max_cqes > BNX2I_570X_CQ_WQES_MAX)
+ hba->max_cqes = BNX2I_570X_CQ_WQES_MAX;
+
+ hba->num_ccell = hba->max_sqes / 2;
+
+ spin_lock_init(&hba->lock);
+ mutex_init(&hba->net_dev_lock);
+ init_waitqueue_head(&hba->eh_wait);
+ if (test_bit(BNX2I_NX2_DEV_57710, &hba->cnic_dev_type)) {
+ hba->hba_shutdown_tmo = 30 * HZ;
+ hba->conn_teardown_tmo = 20 * HZ;
+ hba->conn_ctx_destroy_tmo = 6 * HZ;
+ } else { /* 5706/5708/5709 */
+ hba->hba_shutdown_tmo = 20 * HZ;
+ hba->conn_teardown_tmo = 10 * HZ;
+ hba->conn_ctx_destroy_tmo = 2 * HZ;
+ }
+
+#ifdef CONFIG_32BIT
+ spin_lock_init(&hba->stat_lock);
+#endif
+ memset(&hba->stats, 0, sizeof(struct iscsi_stats_info));
+
+ if (iscsi_host_add(shost, &hba->pcidev->dev))
+ goto free_dump_mem;
+ return hba;
+
+free_dump_mem:
+ bnx2i_release_free_cid_que(hba);
+cid_que_err:
+ bnx2i_free_mp_bdt(hba);
+mp_bdt_mem_err:
+ if (hba->regview) {
+ pci_iounmap(hba->pcidev, hba->regview);
+ hba->regview = NULL;
+ }
+ioreg_map_err:
+ pci_dev_put(hba->pcidev);
+ scsi_host_put(shost);
+ return NULL;
+}
+
+/**
+ * bnx2i_free_hba- releases hba structure and resources held by the adapter
+ * @hba: pointer to adapter instance
+ *
+ * free adapter structure and call various cleanup routines.
+ */
+void bnx2i_free_hba(struct bnx2i_hba *hba)
+{
+ struct Scsi_Host *shost = hba->shost;
+
+ iscsi_host_remove(shost);
+ INIT_LIST_HEAD(&hba->ep_ofld_list);
+ INIT_LIST_HEAD(&hba->ep_active_list);
+ INIT_LIST_HEAD(&hba->ep_destroy_list);
+ pci_dev_put(hba->pcidev);
+
+ if (hba->regview) {
+ pci_iounmap(hba->pcidev, hba->regview);
+ hba->regview = NULL;
+ }
+ bnx2i_free_mp_bdt(hba);
+ bnx2i_release_free_cid_que(hba);
+ iscsi_host_free(shost);
+}
+
+/**
+ * bnx2i_conn_free_login_resources - free DMA resources used for login process
+ * @hba: pointer to adapter instance
+ * @bnx2i_conn: iscsi connection pointer
+ *
+ * Login related resources, mostly BDT & payload DMA memory is freed
+ */
+static void bnx2i_conn_free_login_resources(struct bnx2i_hba *hba,
+ struct bnx2i_conn *bnx2i_conn)
+{
+ if (bnx2i_conn->gen_pdu.resp_bd_tbl) {
+ dma_free_coherent(&hba->pcidev->dev, CNIC_PAGE_SIZE,
+ bnx2i_conn->gen_pdu.resp_bd_tbl,
+ bnx2i_conn->gen_pdu.resp_bd_dma);
+ bnx2i_conn->gen_pdu.resp_bd_tbl = NULL;
+ }
+
+ if (bnx2i_conn->gen_pdu.req_bd_tbl) {
+ dma_free_coherent(&hba->pcidev->dev, CNIC_PAGE_SIZE,
+ bnx2i_conn->gen_pdu.req_bd_tbl,
+ bnx2i_conn->gen_pdu.req_bd_dma);
+ bnx2i_conn->gen_pdu.req_bd_tbl = NULL;
+ }
+
+ if (bnx2i_conn->gen_pdu.resp_buf) {
+ dma_free_coherent(&hba->pcidev->dev,
+ ISCSI_DEF_MAX_RECV_SEG_LEN,
+ bnx2i_conn->gen_pdu.resp_buf,
+ bnx2i_conn->gen_pdu.resp_dma_addr);
+ bnx2i_conn->gen_pdu.resp_buf = NULL;
+ }
+
+ if (bnx2i_conn->gen_pdu.req_buf) {
+ dma_free_coherent(&hba->pcidev->dev,
+ ISCSI_DEF_MAX_RECV_SEG_LEN,
+ bnx2i_conn->gen_pdu.req_buf,
+ bnx2i_conn->gen_pdu.req_dma_addr);
+ bnx2i_conn->gen_pdu.req_buf = NULL;
+ }
+}
+
+/**
+ * bnx2i_conn_alloc_login_resources - alloc DMA resources for login/nop.
+ * @hba: pointer to adapter instance
+ * @bnx2i_conn: iscsi connection pointer
+ *
+ * Mgmt task DNA resources are allocated in this routine.
+ */
+static int bnx2i_conn_alloc_login_resources(struct bnx2i_hba *hba,
+ struct bnx2i_conn *bnx2i_conn)
+{
+ /* Allocate memory for login request/response buffers */
+ bnx2i_conn->gen_pdu.req_buf =
+ dma_alloc_coherent(&hba->pcidev->dev,
+ ISCSI_DEF_MAX_RECV_SEG_LEN,
+ &bnx2i_conn->gen_pdu.req_dma_addr,
+ GFP_KERNEL);
+ if (bnx2i_conn->gen_pdu.req_buf == NULL)
+ goto login_req_buf_failure;
+
+ bnx2i_conn->gen_pdu.req_buf_size = 0;
+ bnx2i_conn->gen_pdu.req_wr_ptr = bnx2i_conn->gen_pdu.req_buf;
+
+ bnx2i_conn->gen_pdu.resp_buf =
+ dma_alloc_coherent(&hba->pcidev->dev,
+ ISCSI_DEF_MAX_RECV_SEG_LEN,
+ &bnx2i_conn->gen_pdu.resp_dma_addr,
+ GFP_KERNEL);
+ if (bnx2i_conn->gen_pdu.resp_buf == NULL)
+ goto login_resp_buf_failure;
+
+ bnx2i_conn->gen_pdu.resp_buf_size = ISCSI_DEF_MAX_RECV_SEG_LEN;
+ bnx2i_conn->gen_pdu.resp_wr_ptr = bnx2i_conn->gen_pdu.resp_buf;
+
+ bnx2i_conn->gen_pdu.req_bd_tbl =
+ dma_alloc_coherent(&hba->pcidev->dev, CNIC_PAGE_SIZE,
+ &bnx2i_conn->gen_pdu.req_bd_dma, GFP_KERNEL);
+ if (bnx2i_conn->gen_pdu.req_bd_tbl == NULL)
+ goto login_req_bd_tbl_failure;
+
+ bnx2i_conn->gen_pdu.resp_bd_tbl =
+ dma_alloc_coherent(&hba->pcidev->dev, CNIC_PAGE_SIZE,
+ &bnx2i_conn->gen_pdu.resp_bd_dma,
+ GFP_KERNEL);
+ if (bnx2i_conn->gen_pdu.resp_bd_tbl == NULL)
+ goto login_resp_bd_tbl_failure;
+
+ return 0;
+
+login_resp_bd_tbl_failure:
+ dma_free_coherent(&hba->pcidev->dev, CNIC_PAGE_SIZE,
+ bnx2i_conn->gen_pdu.req_bd_tbl,
+ bnx2i_conn->gen_pdu.req_bd_dma);
+ bnx2i_conn->gen_pdu.req_bd_tbl = NULL;
+
+login_req_bd_tbl_failure:
+ dma_free_coherent(&hba->pcidev->dev, ISCSI_DEF_MAX_RECV_SEG_LEN,
+ bnx2i_conn->gen_pdu.resp_buf,
+ bnx2i_conn->gen_pdu.resp_dma_addr);
+ bnx2i_conn->gen_pdu.resp_buf = NULL;
+login_resp_buf_failure:
+ dma_free_coherent(&hba->pcidev->dev, ISCSI_DEF_MAX_RECV_SEG_LEN,
+ bnx2i_conn->gen_pdu.req_buf,
+ bnx2i_conn->gen_pdu.req_dma_addr);
+ bnx2i_conn->gen_pdu.req_buf = NULL;
+login_req_buf_failure:
+ iscsi_conn_printk(KERN_ERR, bnx2i_conn->cls_conn->dd_data,
+ "login resource alloc failed!!\n");
+ return -ENOMEM;
+
+}
+
+
+/**
+ * bnx2i_iscsi_prep_generic_pdu_bd - prepares BD table.
+ * @bnx2i_conn: iscsi connection pointer
+ *
+ * Allocates buffers and BD tables before shipping requests to cnic
+ * for PDUs prepared by 'iscsid' daemon
+ */
+static void bnx2i_iscsi_prep_generic_pdu_bd(struct bnx2i_conn *bnx2i_conn)
+{
+ struct iscsi_bd *bd_tbl;
+
+ bd_tbl = (struct iscsi_bd *) bnx2i_conn->gen_pdu.req_bd_tbl;
+
+ bd_tbl->buffer_addr_hi =
+ (u32) ((u64) bnx2i_conn->gen_pdu.req_dma_addr >> 32);
+ bd_tbl->buffer_addr_lo = (u32) bnx2i_conn->gen_pdu.req_dma_addr;
+ bd_tbl->buffer_length = bnx2i_conn->gen_pdu.req_wr_ptr -
+ bnx2i_conn->gen_pdu.req_buf;
+ bd_tbl->reserved0 = 0;
+ bd_tbl->flags = ISCSI_BD_LAST_IN_BD_CHAIN |
+ ISCSI_BD_FIRST_IN_BD_CHAIN;
+
+ bd_tbl = (struct iscsi_bd *) bnx2i_conn->gen_pdu.resp_bd_tbl;
+ bd_tbl->buffer_addr_hi = (u64) bnx2i_conn->gen_pdu.resp_dma_addr >> 32;
+ bd_tbl->buffer_addr_lo = (u32) bnx2i_conn->gen_pdu.resp_dma_addr;
+ bd_tbl->buffer_length = ISCSI_DEF_MAX_RECV_SEG_LEN;
+ bd_tbl->reserved0 = 0;
+ bd_tbl->flags = ISCSI_BD_LAST_IN_BD_CHAIN |
+ ISCSI_BD_FIRST_IN_BD_CHAIN;
+}
+
+
+/**
+ * bnx2i_iscsi_send_generic_request - called to send mgmt tasks.
+ * @task: transport layer task pointer
+ *
+ * called to transmit PDUs prepared by the 'iscsid' daemon. iSCSI login,
+ * Nop-out and Logout requests flow through this path.
+ */
+static int bnx2i_iscsi_send_generic_request(struct iscsi_task *task)
+{
+ struct bnx2i_cmd *cmd = task->dd_data;
+ struct bnx2i_conn *bnx2i_conn = cmd->conn;
+ int rc = 0;
+ char *buf;
+ int data_len;
+
+ bnx2i_iscsi_prep_generic_pdu_bd(bnx2i_conn);
+ switch (task->hdr->opcode & ISCSI_OPCODE_MASK) {
+ case ISCSI_OP_LOGIN:
+ bnx2i_send_iscsi_login(bnx2i_conn, task);
+ break;
+ case ISCSI_OP_NOOP_OUT:
+ data_len = bnx2i_conn->gen_pdu.req_buf_size;
+ buf = bnx2i_conn->gen_pdu.req_buf;
+ if (data_len)
+ rc = bnx2i_send_iscsi_nopout(bnx2i_conn, task,
+ buf, data_len, 1);
+ else
+ rc = bnx2i_send_iscsi_nopout(bnx2i_conn, task,
+ NULL, 0, 1);
+ break;
+ case ISCSI_OP_LOGOUT:
+ rc = bnx2i_send_iscsi_logout(bnx2i_conn, task);
+ break;
+ case ISCSI_OP_SCSI_TMFUNC:
+ rc = bnx2i_send_iscsi_tmf(bnx2i_conn, task);
+ break;
+ case ISCSI_OP_TEXT:
+ rc = bnx2i_send_iscsi_text(bnx2i_conn, task);
+ break;
+ default:
+ iscsi_conn_printk(KERN_ALERT, bnx2i_conn->cls_conn->dd_data,
+ "send_gen: unsupported op 0x%x\n",
+ task->hdr->opcode);
+ }
+ return rc;
+}
+
+
+/**********************************************************************
+ * SCSI-ML Interface
+ **********************************************************************/
+
+/**
+ * bnx2i_cpy_scsi_cdb - copies LUN & CDB fields in required format to sq wqe
+ * @sc: SCSI-ML command pointer
+ * @cmd: iscsi cmd pointer
+ */
+static void bnx2i_cpy_scsi_cdb(struct scsi_cmnd *sc, struct bnx2i_cmd *cmd)
+{
+ u32 dword;
+ int lpcnt;
+ u8 *srcp;
+ u32 *dstp;
+ u32 scsi_lun[2];
+
+ int_to_scsilun(sc->device->lun, (struct scsi_lun *) scsi_lun);
+ cmd->req.lun[0] = be32_to_cpu(scsi_lun[0]);
+ cmd->req.lun[1] = be32_to_cpu(scsi_lun[1]);
+
+ lpcnt = cmd->scsi_cmd->cmd_len / sizeof(dword);
+ srcp = (u8 *) sc->cmnd;
+ dstp = (u32 *) cmd->req.cdb;
+ while (lpcnt--) {
+ memcpy(&dword, (const void *) srcp, 4);
+ *dstp = cpu_to_be32(dword);
+ srcp += 4;
+ dstp++;
+ }
+ if (sc->cmd_len & 0x3) {
+ dword = (u32) srcp[0] | ((u32) srcp[1] << 8);
+ *dstp = cpu_to_be32(dword);
+ }
+}
+
+static void bnx2i_cleanup_task(struct iscsi_task *task)
+{
+ struct iscsi_conn *conn = task->conn;
+ struct bnx2i_conn *bnx2i_conn = conn->dd_data;
+ struct bnx2i_hba *hba = bnx2i_conn->hba;
+
+ /*
+ * mgmt task or cmd was never sent to us to transmit.
+ */
+ if (!task->sc || task->state == ISCSI_TASK_PENDING)
+ return;
+ /*
+ * need to clean-up task context to claim dma buffers
+ */
+ if (task->state == ISCSI_TASK_ABRT_TMF) {
+ bnx2i_send_cmd_cleanup_req(hba, task->dd_data);
+
+ spin_unlock_bh(&conn->session->back_lock);
+ wait_for_completion_timeout(&bnx2i_conn->cmd_cleanup_cmpl,
+ msecs_to_jiffies(ISCSI_CMD_CLEANUP_TIMEOUT));
+ spin_lock_bh(&conn->session->back_lock);
+ }
+ bnx2i_iscsi_unmap_sg_list(task->dd_data);
+}
+
+/**
+ * bnx2i_mtask_xmit - transmit mtask to chip for further processing
+ * @conn: transport layer conn structure pointer
+ * @task: transport layer command structure pointer
+ */
+static int
+bnx2i_mtask_xmit(struct iscsi_conn *conn, struct iscsi_task *task)
+{
+ struct bnx2i_conn *bnx2i_conn = conn->dd_data;
+ struct bnx2i_hba *hba = bnx2i_conn->hba;
+ struct bnx2i_cmd *cmd = task->dd_data;
+
+ memset(bnx2i_conn->gen_pdu.req_buf, 0, ISCSI_DEF_MAX_RECV_SEG_LEN);
+
+ bnx2i_setup_cmd_wqe_template(cmd);
+ bnx2i_conn->gen_pdu.req_buf_size = task->data_count;
+
+ /* Tx PDU/data length count */
+ ADD_STATS_64(hba, tx_pdus, 1);
+ ADD_STATS_64(hba, tx_bytes, task->data_count);
+
+ if (task->data_count) {
+ memcpy(bnx2i_conn->gen_pdu.req_buf, task->data,
+ task->data_count);
+ bnx2i_conn->gen_pdu.req_wr_ptr =
+ bnx2i_conn->gen_pdu.req_buf + task->data_count;
+ }
+ cmd->conn = conn->dd_data;
+ cmd->scsi_cmd = NULL;
+ return bnx2i_iscsi_send_generic_request(task);
+}
+
+/**
+ * bnx2i_task_xmit - transmit iscsi command to chip for further processing
+ * @task: transport layer command structure pointer
+ *
+ * maps SG buffers and send request to chip/firmware in the form of SQ WQE
+ */
+static int bnx2i_task_xmit(struct iscsi_task *task)
+{
+ struct iscsi_conn *conn = task->conn;
+ struct iscsi_session *session = conn->session;
+ struct Scsi_Host *shost = iscsi_session_to_shost(session->cls_session);
+ struct bnx2i_hba *hba = iscsi_host_priv(shost);
+ struct bnx2i_conn *bnx2i_conn = conn->dd_data;
+ struct scsi_cmnd *sc = task->sc;
+ struct bnx2i_cmd *cmd = task->dd_data;
+ struct iscsi_scsi_req *hdr = (struct iscsi_scsi_req *)task->hdr;
+
+ if (atomic_read(&bnx2i_conn->ep->num_active_cmds) + 1 >
+ hba->max_sqes)
+ return -ENOMEM;
+
+ /*
+ * If there is no scsi_cmnd this must be a mgmt task
+ */
+ if (!sc)
+ return bnx2i_mtask_xmit(conn, task);
+
+ bnx2i_setup_cmd_wqe_template(cmd);
+ cmd->req.op_code = ISCSI_OP_SCSI_CMD;
+ cmd->conn = bnx2i_conn;
+ cmd->scsi_cmd = sc;
+ cmd->req.total_data_transfer_length = scsi_bufflen(sc);
+ cmd->req.cmd_sn = be32_to_cpu(hdr->cmdsn);
+
+ bnx2i_iscsi_map_sg_list(cmd);
+ bnx2i_cpy_scsi_cdb(sc, cmd);
+
+ cmd->req.op_attr = ISCSI_ATTR_SIMPLE;
+ if (sc->sc_data_direction == DMA_TO_DEVICE) {
+ cmd->req.op_attr |= ISCSI_CMD_REQUEST_WRITE;
+ cmd->req.itt = task->itt |
+ (ISCSI_TASK_TYPE_WRITE << ISCSI_CMD_REQUEST_TYPE_SHIFT);
+ bnx2i_setup_write_cmd_bd_info(task);
+ } else {
+ if (scsi_bufflen(sc))
+ cmd->req.op_attr |= ISCSI_CMD_REQUEST_READ;
+ cmd->req.itt = task->itt |
+ (ISCSI_TASK_TYPE_READ << ISCSI_CMD_REQUEST_TYPE_SHIFT);
+ }
+
+ cmd->req.num_bds = cmd->io_tbl.bd_valid;
+ if (!cmd->io_tbl.bd_valid) {
+ cmd->req.bd_list_addr_lo = (u32) hba->mp_bd_dma;
+ cmd->req.bd_list_addr_hi = (u32) ((u64) hba->mp_bd_dma >> 32);
+ cmd->req.num_bds = 1;
+ }
+
+ bnx2i_send_iscsi_scsicmd(bnx2i_conn, cmd);
+ return 0;
+}
+
+/**
+ * bnx2i_session_create - create a new iscsi session
+ * @cmds_max: max commands supported
+ * @qdepth: scsi queue depth to support
+ * @initial_cmdsn: initial iscsi CMDSN to be used for this session
+ *
+ * Creates a new iSCSI session instance on given device.
+ */
+static struct iscsi_cls_session *
+bnx2i_session_create(struct iscsi_endpoint *ep,
+ uint16_t cmds_max, uint16_t qdepth,
+ uint32_t initial_cmdsn)
+{
+ struct Scsi_Host *shost;
+ struct iscsi_cls_session *cls_session;
+ struct bnx2i_hba *hba;
+ struct bnx2i_endpoint *bnx2i_ep;
+
+ if (!ep) {
+ printk(KERN_ERR "bnx2i: missing ep.\n");
+ return NULL;
+ }
+
+ bnx2i_ep = ep->dd_data;
+ shost = bnx2i_ep->hba->shost;
+ hba = iscsi_host_priv(shost);
+ if (bnx2i_adapter_ready(hba))
+ return NULL;
+
+ /*
+ * user can override hw limit as long as it is within
+ * the min/max.
+ */
+ if (cmds_max > hba->max_sqes)
+ cmds_max = hba->max_sqes;
+ else if (cmds_max < BNX2I_SQ_WQES_MIN)
+ cmds_max = BNX2I_SQ_WQES_MIN;
+
+ cls_session = iscsi_session_setup(&bnx2i_iscsi_transport, shost,
+ cmds_max, 0, sizeof(struct bnx2i_cmd),
+ initial_cmdsn, ISCSI_MAX_TARGET);
+ if (!cls_session)
+ return NULL;
+
+ if (bnx2i_setup_cmd_pool(hba, cls_session->dd_data))
+ goto session_teardown;
+ return cls_session;
+
+session_teardown:
+ iscsi_session_teardown(cls_session);
+ return NULL;
+}
+
+
+/**
+ * bnx2i_session_destroy - destroys iscsi session
+ * @cls_session: pointer to iscsi cls session
+ *
+ * Destroys previously created iSCSI session instance and releases
+ * all resources held by it
+ */
+static void bnx2i_session_destroy(struct iscsi_cls_session *cls_session)
+{
+ struct iscsi_session *session = cls_session->dd_data;
+ struct Scsi_Host *shost = iscsi_session_to_shost(cls_session);
+ struct bnx2i_hba *hba = iscsi_host_priv(shost);
+
+ bnx2i_destroy_cmd_pool(hba, session);
+ iscsi_session_teardown(cls_session);
+}
+
+
+/**
+ * bnx2i_conn_create - create iscsi connection instance
+ * @cls_session: pointer to iscsi cls session
+ * @cid: iscsi cid as per rfc (not NX2's CID terminology)
+ *
+ * Creates a new iSCSI connection instance for a given session
+ */
+static struct iscsi_cls_conn *
+bnx2i_conn_create(struct iscsi_cls_session *cls_session, uint32_t cid)
+{
+ struct Scsi_Host *shost = iscsi_session_to_shost(cls_session);
+ struct bnx2i_hba *hba = iscsi_host_priv(shost);
+ struct bnx2i_conn *bnx2i_conn;
+ struct iscsi_cls_conn *cls_conn;
+ struct iscsi_conn *conn;
+
+ cls_conn = iscsi_conn_setup(cls_session, sizeof(*bnx2i_conn),
+ cid);
+ if (!cls_conn)
+ return NULL;
+ conn = cls_conn->dd_data;
+
+ bnx2i_conn = conn->dd_data;
+ bnx2i_conn->cls_conn = cls_conn;
+ bnx2i_conn->hba = hba;
+
+ atomic_set(&bnx2i_conn->work_cnt, 0);
+
+ /* 'ep' ptr will be assigned in bind() call */
+ bnx2i_conn->ep = NULL;
+ init_completion(&bnx2i_conn->cmd_cleanup_cmpl);
+
+ if (bnx2i_conn_alloc_login_resources(hba, bnx2i_conn)) {
+ iscsi_conn_printk(KERN_ALERT, conn,
+ "conn_new: login resc alloc failed!!\n");
+ goto free_conn;
+ }
+
+ return cls_conn;
+
+free_conn:
+ iscsi_conn_teardown(cls_conn);
+ return NULL;
+}
+
+/**
+ * bnx2i_conn_bind - binds iscsi sess, conn and ep objects together
+ * @cls_session: pointer to iscsi cls session
+ * @cls_conn: pointer to iscsi cls conn
+ * @transport_fd: 64-bit EP handle
+ * @is_leading: leading connection on this session?
+ *
+ * Binds together iSCSI session instance, iSCSI connection instance
+ * and the TCP connection. This routine returns error code if
+ * TCP connection does not belong on the device iSCSI sess/conn
+ * is bound
+ */
+static int bnx2i_conn_bind(struct iscsi_cls_session *cls_session,
+ struct iscsi_cls_conn *cls_conn,
+ uint64_t transport_fd, int is_leading)
+{
+ struct iscsi_conn *conn = cls_conn->dd_data;
+ struct bnx2i_conn *bnx2i_conn = conn->dd_data;
+ struct Scsi_Host *shost = iscsi_session_to_shost(cls_session);
+ struct bnx2i_hba *hba = iscsi_host_priv(shost);
+ struct bnx2i_endpoint *bnx2i_ep;
+ struct iscsi_endpoint *ep;
+ int ret_code;
+
+ ep = iscsi_lookup_endpoint(transport_fd);
+ if (!ep)
+ return -EINVAL;
+ /*
+ * Forcefully terminate all in progress connection recovery at the
+ * earliest, either in bind(), send_pdu(LOGIN), or conn_start()
+ */
+ if (bnx2i_adapter_ready(hba))
+ return -EIO;
+
+ bnx2i_ep = ep->dd_data;
+ if ((bnx2i_ep->state == EP_STATE_TCP_FIN_RCVD) ||
+ (bnx2i_ep->state == EP_STATE_TCP_RST_RCVD))
+ /* Peer disconnect via' FIN or RST */
+ return -EINVAL;
+
+ if (iscsi_conn_bind(cls_session, cls_conn, is_leading))
+ return -EINVAL;
+
+ if (bnx2i_ep->hba != hba) {
+ /* Error - TCP connection does not belong to this device
+ */
+ iscsi_conn_printk(KERN_ALERT, cls_conn->dd_data,
+ "conn bind, ep=0x%p (%s) does not",
+ bnx2i_ep, bnx2i_ep->hba->netdev->name);
+ iscsi_conn_printk(KERN_ALERT, cls_conn->dd_data,
+ "belong to hba (%s)\n",
+ hba->netdev->name);
+ return -EEXIST;
+ }
+ bnx2i_ep->conn = bnx2i_conn;
+ bnx2i_conn->ep = bnx2i_ep;
+ bnx2i_conn->iscsi_conn_cid = bnx2i_ep->ep_iscsi_cid;
+ bnx2i_conn->fw_cid = bnx2i_ep->ep_cid;
+
+ ret_code = bnx2i_bind_conn_to_iscsi_cid(hba, bnx2i_conn,
+ bnx2i_ep->ep_iscsi_cid);
+
+ /* 5706/5708/5709 FW takes RQ as full when initiated, but for 57710
+ * driver needs to explicitly replenish RQ index during setup.
+ */
+ if (test_bit(BNX2I_NX2_DEV_57710, &bnx2i_ep->hba->cnic_dev_type))
+ bnx2i_put_rq_buf(bnx2i_conn, 0);
+
+ bnx2i_arm_cq_event_coalescing(bnx2i_conn->ep, CNIC_ARM_CQE);
+ return ret_code;
+}
+
+
+/**
+ * bnx2i_conn_destroy - destroy iscsi connection instance & release resources
+ * @cls_conn: pointer to iscsi cls conn
+ *
+ * Destroy an iSCSI connection instance and release memory resources held by
+ * this connection
+ */
+static void bnx2i_conn_destroy(struct iscsi_cls_conn *cls_conn)
+{
+ struct iscsi_conn *conn = cls_conn->dd_data;
+ struct bnx2i_conn *bnx2i_conn = conn->dd_data;
+ struct Scsi_Host *shost;
+ struct bnx2i_hba *hba;
+ struct bnx2i_work *work, *tmp;
+ unsigned cpu = 0;
+ struct bnx2i_percpu_s *p;
+
+ shost = iscsi_session_to_shost(iscsi_conn_to_session(cls_conn));
+ hba = iscsi_host_priv(shost);
+
+ bnx2i_conn_free_login_resources(hba, bnx2i_conn);
+
+ if (atomic_read(&bnx2i_conn->work_cnt)) {
+ for_each_online_cpu(cpu) {
+ p = &per_cpu(bnx2i_percpu, cpu);
+ spin_lock_bh(&p->p_work_lock);
+ list_for_each_entry_safe(work, tmp,
+ &p->work_list, list) {
+ if (work->session == conn->session &&
+ work->bnx2i_conn == bnx2i_conn) {
+ list_del_init(&work->list);
+ kfree(work);
+ if (!atomic_dec_and_test(
+ &bnx2i_conn->work_cnt))
+ break;
+ }
+ }
+ spin_unlock_bh(&p->p_work_lock);
+ }
+ }
+
+ iscsi_conn_teardown(cls_conn);
+}
+
+
+/**
+ * bnx2i_ep_get_param - return iscsi ep parameter to caller
+ * @ep: pointer to iscsi endpoint
+ * @param: parameter type identifier
+ * @buf: buffer pointer
+ *
+ * returns iSCSI ep parameters
+ */
+static int bnx2i_ep_get_param(struct iscsi_endpoint *ep,
+ enum iscsi_param param, char *buf)
+{
+ struct bnx2i_endpoint *bnx2i_ep = ep->dd_data;
+ struct bnx2i_hba *hba = bnx2i_ep->hba;
+ int len = -ENOTCONN;
+
+ if (!hba)
+ return -ENOTCONN;
+
+ switch (param) {
+ case ISCSI_PARAM_CONN_PORT:
+ mutex_lock(&hba->net_dev_lock);
+ if (bnx2i_ep->cm_sk)
+ len = sprintf(buf, "%hu\n", bnx2i_ep->cm_sk->dst_port);
+ mutex_unlock(&hba->net_dev_lock);
+ break;
+ case ISCSI_PARAM_CONN_ADDRESS:
+ mutex_lock(&hba->net_dev_lock);
+ if (bnx2i_ep->cm_sk)
+ len = sprintf(buf, "%pI4\n", &bnx2i_ep->cm_sk->dst_ip);
+ mutex_unlock(&hba->net_dev_lock);
+ break;
+ default:
+ return -ENOSYS;
+ }
+
+ return len;
+}
+
+/**
+ * bnx2i_host_get_param - returns host (adapter) related parameters
+ * @shost: scsi host pointer
+ * @param: parameter type identifier
+ * @buf: buffer pointer
+ */
+static int bnx2i_host_get_param(struct Scsi_Host *shost,
+ enum iscsi_host_param param, char *buf)
+{
+ struct bnx2i_hba *hba = iscsi_host_priv(shost);
+ int len = 0;
+
+ switch (param) {
+ case ISCSI_HOST_PARAM_HWADDRESS:
+ len = sysfs_format_mac(buf, hba->cnic->mac_addr, 6);
+ break;
+ case ISCSI_HOST_PARAM_NETDEV_NAME:
+ len = sprintf(buf, "%s\n", hba->netdev->name);
+ break;
+ case ISCSI_HOST_PARAM_IPADDRESS: {
+ struct list_head *active_list = &hba->ep_active_list;
+
+ read_lock_bh(&hba->ep_rdwr_lock);
+ if (!list_empty(&hba->ep_active_list)) {
+ struct bnx2i_endpoint *bnx2i_ep;
+ struct cnic_sock *csk;
+
+ bnx2i_ep = list_first_entry(active_list,
+ struct bnx2i_endpoint,
+ link);
+ csk = bnx2i_ep->cm_sk;
+ if (test_bit(SK_F_IPV6, &csk->flags))
+ len = sprintf(buf, "%pI6\n", csk->src_ip);
+ else
+ len = sprintf(buf, "%pI4\n", csk->src_ip);
+ }
+ read_unlock_bh(&hba->ep_rdwr_lock);
+ break;
+ }
+ default:
+ return iscsi_host_get_param(shost, param, buf);
+ }
+ return len;
+}
+
+/**
+ * bnx2i_conn_start - completes iscsi connection migration to FFP
+ * @cls_conn: pointer to iscsi cls conn
+ *
+ * last call in FFP migration to handover iscsi conn to the driver
+ */
+static int bnx2i_conn_start(struct iscsi_cls_conn *cls_conn)
+{
+ struct iscsi_conn *conn = cls_conn->dd_data;
+ struct bnx2i_conn *bnx2i_conn = conn->dd_data;
+
+ bnx2i_conn->ep->state = EP_STATE_ULP_UPDATE_START;
+ bnx2i_update_iscsi_conn(conn);
+
+ /*
+ * this should normally not sleep for a long time so it should
+ * not disrupt the caller.
+ */
+ bnx2i_conn->ep->ofld_timer.expires = 1 * HZ + jiffies;
+ bnx2i_conn->ep->ofld_timer.function = bnx2i_ep_ofld_timer;
+ bnx2i_conn->ep->ofld_timer.data = (unsigned long) bnx2i_conn->ep;
+ add_timer(&bnx2i_conn->ep->ofld_timer);
+ /* update iSCSI context for this conn, wait for CNIC to complete */
+ wait_event_interruptible(bnx2i_conn->ep->ofld_wait,
+ bnx2i_conn->ep->state != EP_STATE_ULP_UPDATE_START);
+
+ if (signal_pending(current))
+ flush_signals(current);
+ del_timer_sync(&bnx2i_conn->ep->ofld_timer);
+
+ iscsi_conn_start(cls_conn);
+ return 0;
+}
+
+
+/**
+ * bnx2i_conn_get_stats - returns iSCSI stats
+ * @cls_conn: pointer to iscsi cls conn
+ * @stats: pointer to iscsi statistic struct
+ */
+static void bnx2i_conn_get_stats(struct iscsi_cls_conn *cls_conn,
+ struct iscsi_stats *stats)
+{
+ struct iscsi_conn *conn = cls_conn->dd_data;
+
+ stats->txdata_octets = conn->txdata_octets;
+ stats->rxdata_octets = conn->rxdata_octets;
+ stats->scsicmd_pdus = conn->scsicmd_pdus_cnt;
+ stats->dataout_pdus = conn->dataout_pdus_cnt;
+ stats->scsirsp_pdus = conn->scsirsp_pdus_cnt;
+ stats->datain_pdus = conn->datain_pdus_cnt;
+ stats->r2t_pdus = conn->r2t_pdus_cnt;
+ stats->tmfcmd_pdus = conn->tmfcmd_pdus_cnt;
+ stats->tmfrsp_pdus = conn->tmfrsp_pdus_cnt;
+ stats->digest_err = 0;
+ stats->timeout_err = 0;
+ strcpy(stats->custom[0].desc, "eh_abort_cnt");
+ stats->custom[0].value = conn->eh_abort_cnt;
+ stats->custom_length = 1;
+}
+
+
+/**
+ * bnx2i_check_route - checks if target IP route belongs to one of NX2 devices
+ * @dst_addr: target IP address
+ *
+ * check if route resolves to BNX2 device
+ */
+static struct bnx2i_hba *bnx2i_check_route(struct sockaddr *dst_addr)
+{
+ struct sockaddr_in *desti = (struct sockaddr_in *) dst_addr;
+ struct bnx2i_hba *hba;
+ struct cnic_dev *cnic = NULL;
+
+ hba = get_adapter_list_head();
+ if (hba && hba->cnic)
+ cnic = hba->cnic->cm_select_dev(desti, CNIC_ULP_ISCSI);
+ if (!cnic) {
+ printk(KERN_ALERT "bnx2i: no route,"
+ "can't connect using cnic\n");
+ goto no_nx2_route;
+ }
+ hba = bnx2i_find_hba_for_cnic(cnic);
+ if (!hba)
+ goto no_nx2_route;
+
+ if (bnx2i_adapter_ready(hba)) {
+ printk(KERN_ALERT "bnx2i: check route, hba not found\n");
+ goto no_nx2_route;
+ }
+ if (hba->netdev->mtu > hba->mtu_supported) {
+ printk(KERN_ALERT "bnx2i: %s network i/f mtu is set to %d\n",
+ hba->netdev->name, hba->netdev->mtu);
+ printk(KERN_ALERT "bnx2i: iSCSI HBA can support mtu of %d\n",
+ hba->mtu_supported);
+ goto no_nx2_route;
+ }
+ return hba;
+no_nx2_route:
+ return NULL;
+}
+
+
+/**
+ * bnx2i_tear_down_conn - tear down iscsi/tcp connection and free resources
+ * @hba: pointer to adapter instance
+ * @ep: endpoint (transport identifier) structure
+ *
+ * destroys cm_sock structure and on chip iscsi context
+ */
+static int bnx2i_tear_down_conn(struct bnx2i_hba *hba,
+ struct bnx2i_endpoint *ep)
+{
+ if (test_bit(BNX2I_CNIC_REGISTERED, &hba->reg_with_cnic) && ep->cm_sk)
+ hba->cnic->cm_destroy(ep->cm_sk);
+
+ if (test_bit(BNX2I_NX2_DEV_57710, &hba->cnic_dev_type) &&
+ ep->state == EP_STATE_DISCONN_TIMEDOUT) {
+ if (ep->conn && ep->conn->cls_conn &&
+ ep->conn->cls_conn->dd_data) {
+ struct iscsi_conn *conn = ep->conn->cls_conn->dd_data;
+
+ /* Must suspend all rx queue activity for this ep */
+ set_bit(ISCSI_SUSPEND_BIT, &conn->suspend_rx);
+ }
+ /* CONN_DISCONNECT timeout may or may not be an issue depending
+ * on what transcribed in TCP layer, different targets behave
+ * differently
+ */
+ printk(KERN_ALERT "bnx2i (%s): - WARN - CONN_DISCON timed out, "
+ "please submit GRC Dump, NW/PCIe trace, "
+ "driver msgs to developers for analysis\n",
+ hba->netdev->name);
+ }
+
+ ep->state = EP_STATE_CLEANUP_START;
+ init_timer(&ep->ofld_timer);
+ ep->ofld_timer.expires = hba->conn_ctx_destroy_tmo + jiffies;
+ ep->ofld_timer.function = bnx2i_ep_ofld_timer;
+ ep->ofld_timer.data = (unsigned long) ep;
+ add_timer(&ep->ofld_timer);
+
+ bnx2i_ep_destroy_list_add(hba, ep);
+
+ /* destroy iSCSI context, wait for it to complete */
+ if (bnx2i_send_conn_destroy(hba, ep))
+ ep->state = EP_STATE_CLEANUP_CMPL;
+
+ wait_event_interruptible(ep->ofld_wait,
+ (ep->state != EP_STATE_CLEANUP_START));
+
+ if (signal_pending(current))
+ flush_signals(current);
+ del_timer_sync(&ep->ofld_timer);
+
+ bnx2i_ep_destroy_list_del(hba, ep);
+
+ if (ep->state != EP_STATE_CLEANUP_CMPL)
+ /* should never happen */
+ printk(KERN_ALERT "bnx2i - conn destroy failed\n");
+
+ return 0;
+}
+
+
+/**
+ * bnx2i_ep_connect - establish TCP connection to target portal
+ * @shost: scsi host
+ * @dst_addr: target IP address
+ * @non_blocking: blocking or non-blocking call
+ *
+ * this routine initiates the TCP/IP connection by invoking Option-2 i/f
+ * with l5_core and the CNIC. This is a multi-step process of resolving
+ * route to target, create a iscsi connection context, handshaking with
+ * CNIC module to create/initialize the socket struct and finally
+ * sending down option-2 request to complete TCP 3-way handshake
+ */
+static struct iscsi_endpoint *bnx2i_ep_connect(struct Scsi_Host *shost,
+ struct sockaddr *dst_addr,
+ int non_blocking)
+{
+ u32 iscsi_cid = BNX2I_CID_RESERVED;
+ struct sockaddr_in *desti = (struct sockaddr_in *) dst_addr;
+ struct sockaddr_in6 *desti6;
+ struct bnx2i_endpoint *bnx2i_ep;
+ struct bnx2i_hba *hba;
+ struct cnic_dev *cnic;
+ struct cnic_sockaddr saddr;
+ struct iscsi_endpoint *ep;
+ int rc = 0;
+
+ if (shost) {
+ /* driver is given scsi host to work with */
+ hba = iscsi_host_priv(shost);
+ } else
+ /*
+ * check if the given destination can be reached through
+ * a iscsi capable NetXtreme2 device
+ */
+ hba = bnx2i_check_route(dst_addr);
+
+ if (!hba) {
+ rc = -EINVAL;
+ goto nohba;
+ }
+ mutex_lock(&hba->net_dev_lock);
+
+ if (bnx2i_adapter_ready(hba) || !hba->cid_que.cid_free_cnt) {
+ rc = -EPERM;
+ goto check_busy;
+ }
+ cnic = hba->cnic;
+ ep = bnx2i_alloc_ep(hba);
+ if (!ep) {
+ rc = -ENOMEM;
+ goto check_busy;
+ }
+ bnx2i_ep = ep->dd_data;
+
+ atomic_set(&bnx2i_ep->num_active_cmds, 0);
+ iscsi_cid = bnx2i_alloc_iscsi_cid(hba);
+ if (iscsi_cid == -1) {
+ printk(KERN_ALERT "bnx2i (%s): alloc_ep - unable to allocate "
+ "iscsi cid\n", hba->netdev->name);
+ rc = -ENOMEM;
+ bnx2i_free_ep(ep);
+ goto check_busy;
+ }
+ bnx2i_ep->hba_age = hba->age;
+
+ rc = bnx2i_alloc_qp_resc(hba, bnx2i_ep);
+ if (rc != 0) {
+ printk(KERN_ALERT "bnx2i (%s): ep_conn - alloc QP resc error"
+ "\n", hba->netdev->name);
+ rc = -ENOMEM;
+ goto qp_resc_err;
+ }
+
+ bnx2i_ep->ep_iscsi_cid = (u16)iscsi_cid;
+ bnx2i_ep->state = EP_STATE_OFLD_START;
+ bnx2i_ep_ofld_list_add(hba, bnx2i_ep);
+
+ init_timer(&bnx2i_ep->ofld_timer);
+ bnx2i_ep->ofld_timer.expires = 2 * HZ + jiffies;
+ bnx2i_ep->ofld_timer.function = bnx2i_ep_ofld_timer;
+ bnx2i_ep->ofld_timer.data = (unsigned long) bnx2i_ep;
+ add_timer(&bnx2i_ep->ofld_timer);
+
+ if (bnx2i_send_conn_ofld_req(hba, bnx2i_ep)) {
+ if (bnx2i_ep->state == EP_STATE_OFLD_FAILED_CID_BUSY) {
+ printk(KERN_ALERT "bnx2i (%s): iscsi cid %d is busy\n",
+ hba->netdev->name, bnx2i_ep->ep_iscsi_cid);
+ rc = -EBUSY;
+ } else
+ rc = -ENOSPC;
+ printk(KERN_ALERT "bnx2i (%s): unable to send conn offld kwqe"
+ "\n", hba->netdev->name);
+ bnx2i_ep_ofld_list_del(hba, bnx2i_ep);
+ goto conn_failed;
+ }
+
+ /* Wait for CNIC hardware to setup conn context and return 'cid' */
+ wait_event_interruptible(bnx2i_ep->ofld_wait,
+ bnx2i_ep->state != EP_STATE_OFLD_START);
+
+ if (signal_pending(current))
+ flush_signals(current);
+ del_timer_sync(&bnx2i_ep->ofld_timer);
+
+ bnx2i_ep_ofld_list_del(hba, bnx2i_ep);
+
+ if (bnx2i_ep->state != EP_STATE_OFLD_COMPL) {
+ if (bnx2i_ep->state == EP_STATE_OFLD_FAILED_CID_BUSY) {
+ printk(KERN_ALERT "bnx2i (%s): iscsi cid %d is busy\n",
+ hba->netdev->name, bnx2i_ep->ep_iscsi_cid);
+ rc = -EBUSY;
+ } else
+ rc = -ENOSPC;
+ goto conn_failed;
+ }
+
+ rc = cnic->cm_create(cnic, CNIC_ULP_ISCSI, bnx2i_ep->ep_cid,
+ iscsi_cid, &bnx2i_ep->cm_sk, bnx2i_ep);
+ if (rc) {
+ rc = -EINVAL;
+ /* Need to terminate and cleanup the connection */
+ goto release_ep;
+ }
+
+ bnx2i_ep->cm_sk->rcv_buf = 256 * 1024;
+ bnx2i_ep->cm_sk->snd_buf = 256 * 1024;
+ clear_bit(SK_TCP_TIMESTAMP, &bnx2i_ep->cm_sk->tcp_flags);
+
+ memset(&saddr, 0, sizeof(saddr));
+ if (dst_addr->sa_family == AF_INET) {
+ desti = (struct sockaddr_in *) dst_addr;
+ saddr.remote.v4 = *desti;
+ saddr.local.v4.sin_family = desti->sin_family;
+ } else if (dst_addr->sa_family == AF_INET6) {
+ desti6 = (struct sockaddr_in6 *) dst_addr;
+ saddr.remote.v6 = *desti6;
+ saddr.local.v6.sin6_family = desti6->sin6_family;
+ }
+
+ bnx2i_ep->timestamp = jiffies;
+ bnx2i_ep->state = EP_STATE_CONNECT_START;
+ if (!test_bit(BNX2I_CNIC_REGISTERED, &hba->reg_with_cnic)) {
+ rc = -EINVAL;
+ goto conn_failed;
+ } else
+ rc = cnic->cm_connect(bnx2i_ep->cm_sk, &saddr);
+ if (rc)
+ goto release_ep;
+
+ bnx2i_ep_active_list_add(hba, bnx2i_ep);
+
+ if (bnx2i_map_ep_dbell_regs(bnx2i_ep))
+ goto del_active_ep;
+
+ mutex_unlock(&hba->net_dev_lock);
+ return ep;
+
+del_active_ep:
+ bnx2i_ep_active_list_del(hba, bnx2i_ep);
+release_ep:
+ if (bnx2i_tear_down_conn(hba, bnx2i_ep)) {
+ mutex_unlock(&hba->net_dev_lock);
+ return ERR_PTR(rc);
+ }
+conn_failed:
+ bnx2i_free_qp_resc(hba, bnx2i_ep);
+qp_resc_err:
+ bnx2i_free_ep(ep);
+check_busy:
+ mutex_unlock(&hba->net_dev_lock);
+nohba:
+ return ERR_PTR(rc);
+}
+
+
+/**
+ * bnx2i_ep_poll - polls for TCP connection establishement
+ * @ep: TCP connection (endpoint) handle
+ * @timeout_ms: timeout value in milli secs
+ *
+ * polls for TCP connect request to complete
+ */
+static int bnx2i_ep_poll(struct iscsi_endpoint *ep, int timeout_ms)
+{
+ struct bnx2i_endpoint *bnx2i_ep;
+ int rc = 0;
+
+ bnx2i_ep = ep->dd_data;
+ if ((bnx2i_ep->state == EP_STATE_IDLE) ||
+ (bnx2i_ep->state == EP_STATE_CONNECT_FAILED) ||
+ (bnx2i_ep->state == EP_STATE_OFLD_FAILED))
+ return -1;
+ if (bnx2i_ep->state == EP_STATE_CONNECT_COMPL)
+ return 1;
+
+ rc = wait_event_interruptible_timeout(bnx2i_ep->ofld_wait,
+ ((bnx2i_ep->state ==
+ EP_STATE_OFLD_FAILED) ||
+ (bnx2i_ep->state ==
+ EP_STATE_CONNECT_FAILED) ||
+ (bnx2i_ep->state ==
+ EP_STATE_CONNECT_COMPL)),
+ msecs_to_jiffies(timeout_ms));
+ if (bnx2i_ep->state == EP_STATE_OFLD_FAILED)
+ rc = -1;
+
+ if (rc > 0)
+ return 1;
+ else if (!rc)
+ return 0; /* timeout */
+ else
+ return rc;
+}
+
+
+/**
+ * bnx2i_ep_tcp_conn_active - check EP state transition
+ * @ep: endpoint pointer
+ *
+ * check if underlying TCP connection is active
+ */
+static int bnx2i_ep_tcp_conn_active(struct bnx2i_endpoint *bnx2i_ep)
+{
+ int ret;
+ int cnic_dev_10g = 0;
+
+ if (test_bit(BNX2I_NX2_DEV_57710, &bnx2i_ep->hba->cnic_dev_type))
+ cnic_dev_10g = 1;
+
+ switch (bnx2i_ep->state) {
+ case EP_STATE_CLEANUP_FAILED:
+ case EP_STATE_OFLD_FAILED:
+ case EP_STATE_DISCONN_TIMEDOUT:
+ ret = 0;
+ break;
+ case EP_STATE_CONNECT_START:
+ case EP_STATE_CONNECT_FAILED:
+ case EP_STATE_CONNECT_COMPL:
+ case EP_STATE_ULP_UPDATE_START:
+ case EP_STATE_ULP_UPDATE_COMPL:
+ case EP_STATE_TCP_FIN_RCVD:
+ case EP_STATE_LOGOUT_SENT:
+ case EP_STATE_LOGOUT_RESP_RCVD:
+ case EP_STATE_ULP_UPDATE_FAILED:
+ ret = 1;
+ break;
+ case EP_STATE_TCP_RST_RCVD:
+ if (cnic_dev_10g)
+ ret = 0;
+ else
+ ret = 1;
+ break;
+ default:
+ ret = 0;
+ }
+
+ return ret;
+}
+
+
+/*
+ * bnx2i_hw_ep_disconnect - executes TCP connection teardown process in the hw
+ * @ep: TCP connection (bnx2i endpoint) handle
+ *
+ * executes TCP connection teardown process
+ */
+int bnx2i_hw_ep_disconnect(struct bnx2i_endpoint *bnx2i_ep)
+{
+ struct bnx2i_hba *hba = bnx2i_ep->hba;
+ struct cnic_dev *cnic;
+ struct iscsi_session *session = NULL;
+ struct iscsi_conn *conn = NULL;
+ int ret = 0;
+ int close = 0;
+ int close_ret = 0;
+
+ if (!hba)
+ return 0;
+
+ cnic = hba->cnic;
+ if (!cnic)
+ return 0;
+
+ if (bnx2i_ep->state == EP_STATE_IDLE ||
+ bnx2i_ep->state == EP_STATE_DISCONN_TIMEDOUT)
+ return 0;
+
+ if (!bnx2i_ep_tcp_conn_active(bnx2i_ep))
+ goto destroy_conn;
+
+ if (bnx2i_ep->conn) {
+ conn = bnx2i_ep->conn->cls_conn->dd_data;
+ session = conn->session;
+ }
+
+ init_timer(&bnx2i_ep->ofld_timer);
+ bnx2i_ep->ofld_timer.expires = hba->conn_teardown_tmo + jiffies;
+ bnx2i_ep->ofld_timer.function = bnx2i_ep_ofld_timer;
+ bnx2i_ep->ofld_timer.data = (unsigned long) bnx2i_ep;
+ add_timer(&bnx2i_ep->ofld_timer);
+
+ if (!test_bit(BNX2I_CNIC_REGISTERED, &hba->reg_with_cnic))
+ goto out;
+
+ if (session) {
+ spin_lock_bh(&session->frwd_lock);
+ if (bnx2i_ep->state != EP_STATE_TCP_FIN_RCVD) {
+ if (session->state == ISCSI_STATE_LOGGING_OUT) {
+ if (bnx2i_ep->state == EP_STATE_LOGOUT_SENT) {
+ /* Logout sent, but no resp */
+ printk(KERN_ALERT "bnx2i (%s): WARNING"
+ " logout response was not "
+ "received!\n",
+ bnx2i_ep->hba->netdev->name);
+ } else if (bnx2i_ep->state ==
+ EP_STATE_LOGOUT_RESP_RCVD)
+ close = 1;
+ }
+ } else
+ close = 1;
+
+ spin_unlock_bh(&session->frwd_lock);
+ }
+
+ bnx2i_ep->state = EP_STATE_DISCONN_START;
+
+ if (close)
+ close_ret = cnic->cm_close(bnx2i_ep->cm_sk);
+ else
+ close_ret = cnic->cm_abort(bnx2i_ep->cm_sk);
+
+ if (close_ret)
+ printk(KERN_ALERT "bnx2i (%s): close/abort(%d) returned %d\n",
+ bnx2i_ep->hba->netdev->name, close, close_ret);
+ else
+ /* wait for option-2 conn teardown */
+ wait_event_interruptible(bnx2i_ep->ofld_wait,
+ bnx2i_ep->state != EP_STATE_DISCONN_START);
+
+ if (signal_pending(current))
+ flush_signals(current);
+ del_timer_sync(&bnx2i_ep->ofld_timer);
+
+destroy_conn:
+ bnx2i_ep_active_list_del(hba, bnx2i_ep);
+ if (bnx2i_tear_down_conn(hba, bnx2i_ep))
+ return -EINVAL;
+out:
+ bnx2i_ep->state = EP_STATE_IDLE;
+ return ret;
+}
+
+
+/**
+ * bnx2i_ep_disconnect - executes TCP connection teardown process
+ * @ep: TCP connection (iscsi endpoint) handle
+ *
+ * executes TCP connection teardown process
+ */
+static void bnx2i_ep_disconnect(struct iscsi_endpoint *ep)
+{
+ struct bnx2i_endpoint *bnx2i_ep;
+ struct bnx2i_conn *bnx2i_conn = NULL;
+ struct iscsi_conn *conn = NULL;
+ struct bnx2i_hba *hba;
+
+ bnx2i_ep = ep->dd_data;
+
+ /* driver should not attempt connection cleanup until TCP_CONNECT
+ * completes either successfully or fails. Timeout is 9-secs, so
+ * wait for it to complete
+ */
+ while ((bnx2i_ep->state == EP_STATE_CONNECT_START) &&
+ !time_after(jiffies, bnx2i_ep->timestamp + (12 * HZ)))
+ msleep(250);
+
+ if (bnx2i_ep->conn) {
+ bnx2i_conn = bnx2i_ep->conn;
+ conn = bnx2i_conn->cls_conn->dd_data;
+ iscsi_suspend_queue(conn);
+ }
+ hba = bnx2i_ep->hba;
+
+ mutex_lock(&hba->net_dev_lock);
+
+ if (bnx2i_ep->state == EP_STATE_DISCONN_TIMEDOUT)
+ goto out;
+
+ if (bnx2i_ep->state == EP_STATE_IDLE)
+ goto free_resc;
+
+ if (!test_bit(ADAPTER_STATE_UP, &hba->adapter_state) ||
+ (bnx2i_ep->hba_age != hba->age)) {
+ bnx2i_ep_active_list_del(hba, bnx2i_ep);
+ goto free_resc;
+ }
+
+ /* Do all chip cleanup here */
+ if (bnx2i_hw_ep_disconnect(bnx2i_ep)) {
+ mutex_unlock(&hba->net_dev_lock);
+ return;
+ }
+free_resc:
+ bnx2i_free_qp_resc(hba, bnx2i_ep);
+
+ if (bnx2i_conn)
+ bnx2i_conn->ep = NULL;
+
+ bnx2i_free_ep(ep);
+out:
+ mutex_unlock(&hba->net_dev_lock);
+
+ wake_up_interruptible(&hba->eh_wait);
+}
+
+
+/**
+ * bnx2i_nl_set_path - ISCSI_UEVENT_PATH_UPDATE user message handler
+ * @buf: pointer to buffer containing iscsi path message
+ *
+ */
+static int bnx2i_nl_set_path(struct Scsi_Host *shost, struct iscsi_path *params)
+{
+ struct bnx2i_hba *hba = iscsi_host_priv(shost);
+ char *buf = (char *) params;
+ u16 len = sizeof(*params);
+
+ /* handled by cnic driver */
+ hba->cnic->iscsi_nl_msg_recv(hba->cnic, ISCSI_UEVENT_PATH_UPDATE, buf,
+ len);
+
+ return 0;
+}
+
+static umode_t bnx2i_attr_is_visible(int param_type, int param)
+{
+ switch (param_type) {
+ case ISCSI_HOST_PARAM:
+ switch (param) {
+ case ISCSI_HOST_PARAM_NETDEV_NAME:
+ case ISCSI_HOST_PARAM_HWADDRESS:
+ case ISCSI_HOST_PARAM_IPADDRESS:
+ return S_IRUGO;
+ default:
+ return 0;
+ }
+ case ISCSI_PARAM:
+ switch (param) {
+ case ISCSI_PARAM_MAX_RECV_DLENGTH:
+ case ISCSI_PARAM_MAX_XMIT_DLENGTH:
+ case ISCSI_PARAM_HDRDGST_EN:
+ case ISCSI_PARAM_DATADGST_EN:
+ case ISCSI_PARAM_CONN_ADDRESS:
+ case ISCSI_PARAM_CONN_PORT:
+ case ISCSI_PARAM_EXP_STATSN:
+ case ISCSI_PARAM_PERSISTENT_ADDRESS:
+ case ISCSI_PARAM_PERSISTENT_PORT:
+ case ISCSI_PARAM_PING_TMO:
+ case ISCSI_PARAM_RECV_TMO:
+ case ISCSI_PARAM_INITIAL_R2T_EN:
+ case ISCSI_PARAM_MAX_R2T:
+ case ISCSI_PARAM_IMM_DATA_EN:
+ case ISCSI_PARAM_FIRST_BURST:
+ case ISCSI_PARAM_MAX_BURST:
+ case ISCSI_PARAM_PDU_INORDER_EN:
+ case ISCSI_PARAM_DATASEQ_INORDER_EN:
+ case ISCSI_PARAM_ERL:
+ case ISCSI_PARAM_TARGET_NAME:
+ case ISCSI_PARAM_TPGT:
+ case ISCSI_PARAM_USERNAME:
+ case ISCSI_PARAM_PASSWORD:
+ case ISCSI_PARAM_USERNAME_IN:
+ case ISCSI_PARAM_PASSWORD_IN:
+ case ISCSI_PARAM_FAST_ABORT:
+ case ISCSI_PARAM_ABORT_TMO:
+ case ISCSI_PARAM_LU_RESET_TMO:
+ case ISCSI_PARAM_TGT_RESET_TMO:
+ case ISCSI_PARAM_IFACE_NAME:
+ case ISCSI_PARAM_INITIATOR_NAME:
+ case ISCSI_PARAM_BOOT_ROOT:
+ case ISCSI_PARAM_BOOT_NIC:
+ case ISCSI_PARAM_BOOT_TARGET:
+ return S_IRUGO;
+ default:
+ return 0;
+ }
+ }
+
+ return 0;
+}
+
+/*
+ * 'Scsi_Host_Template' structure and 'iscsi_tranport' structure template
+ * used while registering with the scsi host and iSCSI transport module.
+ */
+static struct scsi_host_template bnx2i_host_template = {
+ .module = THIS_MODULE,
+ .name = "QLogic Offload iSCSI Initiator",
+ .proc_name = "bnx2i",
+ .queuecommand = iscsi_queuecommand,
+ .eh_abort_handler = iscsi_eh_abort,
+ .eh_device_reset_handler = iscsi_eh_device_reset,
+ .eh_target_reset_handler = iscsi_eh_recover_target,
+ .change_queue_depth = scsi_change_queue_depth,
+ .target_alloc = iscsi_target_alloc,
+ .can_queue = 2048,
+ .max_sectors = 127,
+ .cmd_per_lun = 128,
+ .this_id = -1,
+ .use_clustering = ENABLE_CLUSTERING,
+ .sg_tablesize = ISCSI_MAX_BDS_PER_CMD,
+ .shost_attrs = bnx2i_dev_attributes,
+ .track_queue_depth = 1,
+};
+
+struct iscsi_transport bnx2i_iscsi_transport = {
+ .owner = THIS_MODULE,
+ .name = "bnx2i",
+ .caps = CAP_RECOVERY_L0 | CAP_HDRDGST |
+ CAP_MULTI_R2T | CAP_DATADGST |
+ CAP_DATA_PATH_OFFLOAD |
+ CAP_TEXT_NEGO,
+ .create_session = bnx2i_session_create,
+ .destroy_session = bnx2i_session_destroy,
+ .create_conn = bnx2i_conn_create,
+ .bind_conn = bnx2i_conn_bind,
+ .destroy_conn = bnx2i_conn_destroy,
+ .attr_is_visible = bnx2i_attr_is_visible,
+ .set_param = iscsi_set_param,
+ .get_conn_param = iscsi_conn_get_param,
+ .get_session_param = iscsi_session_get_param,
+ .get_host_param = bnx2i_host_get_param,
+ .start_conn = bnx2i_conn_start,
+ .stop_conn = iscsi_conn_stop,
+ .send_pdu = iscsi_conn_send_pdu,
+ .xmit_task = bnx2i_task_xmit,
+ .get_stats = bnx2i_conn_get_stats,
+ /* TCP connect - disconnect - option-2 interface calls */
+ .get_ep_param = bnx2i_ep_get_param,
+ .ep_connect = bnx2i_ep_connect,
+ .ep_poll = bnx2i_ep_poll,
+ .ep_disconnect = bnx2i_ep_disconnect,
+ .set_path = bnx2i_nl_set_path,
+ /* Error recovery timeout call */
+ .session_recovery_timedout = iscsi_session_recovery_timedout,
+ .cleanup_task = bnx2i_cleanup_task,
+};
diff --git a/drivers/scsi/bnx2i/bnx2i_sysfs.c b/drivers/scsi/bnx2i/bnx2i_sysfs.c
new file mode 100644
index 000000000..6d56fd60c
--- /dev/null
+++ b/drivers/scsi/bnx2i/bnx2i_sysfs.c
@@ -0,0 +1,145 @@
+/* bnx2i_sysfs.c: QLogic NetXtreme II iSCSI driver.
+ *
+ * Copyright (c) 2004 - 2013 Broadcom Corporation
+ * Copyright (c) 2014, QLogic Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation.
+ *
+ * Written by: Anil Veerabhadrappa (anilgv@broadcom.com)
+ * Previously Maintained by: Eddie Wai (eddie.wai@broadcom.com)
+ * Maintained by: QLogic-Storage-Upstream@qlogic.com
+ */
+
+#include "bnx2i.h"
+
+/**
+ * bnx2i_dev_to_hba - maps dev pointer to adapter struct
+ * @dev: device pointer
+ *
+ * Map device to hba structure
+ */
+static inline struct bnx2i_hba *bnx2i_dev_to_hba(struct device *dev)
+{
+ struct Scsi_Host *shost = class_to_shost(dev);
+ return iscsi_host_priv(shost);
+}
+
+
+/**
+ * bnx2i_show_sq_info - return(s currently configured send queue (SQ) size
+ * @dev: device pointer
+ * @buf: buffer to return current SQ size parameter
+ *
+ * Returns current SQ size parameter, this paramater determines the number
+ * outstanding iSCSI commands supported on a connection
+ */
+static ssize_t bnx2i_show_sq_info(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct bnx2i_hba *hba = bnx2i_dev_to_hba(dev);
+
+ return sprintf(buf, "0x%x\n", hba->max_sqes);
+}
+
+
+/**
+ * bnx2i_set_sq_info - update send queue (SQ) size parameter
+ * @dev: device pointer
+ * @buf: buffer to return current SQ size parameter
+ * @count: parameter buffer size
+ *
+ * Interface for user to change shared queue size allocated for each conn
+ * Must be within SQ limits and a power of 2. For the latter this is needed
+ * because of how libiscsi preallocates tasks.
+ */
+static ssize_t bnx2i_set_sq_info(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct bnx2i_hba *hba = bnx2i_dev_to_hba(dev);
+ u32 val;
+ int max_sq_size;
+
+ if (hba->ofld_conns_active)
+ goto skip_config;
+
+ if (test_bit(BNX2I_NX2_DEV_57710, &hba->cnic_dev_type))
+ max_sq_size = BNX2I_5770X_SQ_WQES_MAX;
+ else
+ max_sq_size = BNX2I_570X_SQ_WQES_MAX;
+
+ if (sscanf(buf, " 0x%x ", &val) > 0) {
+ if ((val >= BNX2I_SQ_WQES_MIN) && (val <= max_sq_size) &&
+ (is_power_of_2(val)))
+ hba->max_sqes = val;
+ }
+
+ return count;
+
+skip_config:
+ printk(KERN_ERR "bnx2i: device busy, cannot change SQ size\n");
+ return 0;
+}
+
+
+/**
+ * bnx2i_show_ccell_info - returns command cell (HQ) size
+ * @dev: device pointer
+ * @buf: buffer to return current SQ size parameter
+ *
+ * returns per-connection TCP history queue size parameter
+ */
+static ssize_t bnx2i_show_ccell_info(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct bnx2i_hba *hba = bnx2i_dev_to_hba(dev);
+
+ return sprintf(buf, "0x%x\n", hba->num_ccell);
+}
+
+
+/**
+ * bnx2i_get_link_state - set command cell (HQ) size
+ * @dev: device pointer
+ * @buf: buffer to return current SQ size parameter
+ * @count: parameter buffer size
+ *
+ * updates per-connection TCP history queue size parameter
+ */
+static ssize_t bnx2i_set_ccell_info(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ u32 val;
+ struct bnx2i_hba *hba = bnx2i_dev_to_hba(dev);
+
+ if (hba->ofld_conns_active)
+ goto skip_config;
+
+ if (sscanf(buf, " 0x%x ", &val) > 0) {
+ if ((val >= BNX2I_CCELLS_MIN) &&
+ (val <= BNX2I_CCELLS_MAX)) {
+ hba->num_ccell = val;
+ }
+ }
+
+ return count;
+
+skip_config:
+ printk(KERN_ERR "bnx2i: device busy, cannot change CCELL size\n");
+ return 0;
+}
+
+
+static DEVICE_ATTR(sq_size, S_IRUGO | S_IWUSR,
+ bnx2i_show_sq_info, bnx2i_set_sq_info);
+static DEVICE_ATTR(num_ccell, S_IRUGO | S_IWUSR,
+ bnx2i_show_ccell_info, bnx2i_set_ccell_info);
+
+struct device_attribute *bnx2i_dev_attributes[] = {
+ &dev_attr_sq_size,
+ &dev_attr_num_ccell,
+ NULL
+};
diff --git a/drivers/scsi/bvme6000_scsi.c b/drivers/scsi/bvme6000_scsi.c
new file mode 100644
index 000000000..0f846ae2f
--- /dev/null
+++ b/drivers/scsi/bvme6000_scsi.c
@@ -0,0 +1,138 @@
+/*
+ * Detection routine for the NCR53c710 based BVME6000 SCSI Controllers for Linux.
+ *
+ * Based on work by Alan Hourihane and Kars de Jong
+ *
+ * Rewritten to use 53c700.c by Richard Hirst <richard@sleepie.demon.co.uk>
+ */
+
+#include <linux/module.h>
+#include <linux/blkdev.h>
+#include <linux/device.h>
+#include <linux/platform_device.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/slab.h>
+#include <asm/bvme6000hw.h>
+#include <scsi/scsi_host.h>
+#include <scsi/scsi_device.h>
+#include <scsi/scsi_transport.h>
+#include <scsi/scsi_transport_spi.h>
+
+#include "53c700.h"
+
+MODULE_AUTHOR("Richard Hirst <richard@sleepie.demon.co.uk>");
+MODULE_DESCRIPTION("BVME6000 NCR53C710 driver");
+MODULE_LICENSE("GPL");
+
+static struct scsi_host_template bvme6000_scsi_driver_template = {
+ .name = "BVME6000 NCR53c710 SCSI",
+ .proc_name = "BVME6000",
+ .this_id = 7,
+ .module = THIS_MODULE,
+};
+
+static struct platform_device *bvme6000_scsi_device;
+
+static int
+bvme6000_probe(struct platform_device *dev)
+{
+ struct Scsi_Host *host;
+ struct NCR_700_Host_Parameters *hostdata;
+
+ if (!MACH_IS_BVME6000)
+ goto out;
+
+ hostdata = kzalloc(sizeof(struct NCR_700_Host_Parameters), GFP_KERNEL);
+ if (!hostdata) {
+ printk(KERN_ERR "bvme6000-scsi: "
+ "Failed to allocate host data\n");
+ goto out;
+ }
+
+ /* Fill in the required pieces of hostdata */
+ hostdata->base = (void __iomem *)BVME_NCR53C710_BASE;
+ hostdata->clock = 40; /* XXX - depends on the CPU clock! */
+ hostdata->chip710 = 1;
+ hostdata->dmode_extra = DMODE_FC2;
+ hostdata->dcntl_extra = EA_710;
+ hostdata->ctest7_extra = CTEST7_TT1;
+
+ /* and register the chip */
+ host = NCR_700_detect(&bvme6000_scsi_driver_template, hostdata,
+ &dev->dev);
+ if (!host) {
+ printk(KERN_ERR "bvme6000-scsi: No host detected; "
+ "board configuration problem?\n");
+ goto out_free;
+ }
+ host->base = BVME_NCR53C710_BASE;
+ host->this_id = 7;
+ host->irq = BVME_IRQ_SCSI;
+ if (request_irq(BVME_IRQ_SCSI, NCR_700_intr, 0, "bvme6000-scsi",
+ host)) {
+ printk(KERN_ERR "bvme6000-scsi: request_irq failed\n");
+ goto out_put_host;
+ }
+
+ platform_set_drvdata(dev, host);
+ scsi_scan_host(host);
+
+ return 0;
+
+ out_put_host:
+ scsi_host_put(host);
+ out_free:
+ kfree(hostdata);
+ out:
+ return -ENODEV;
+}
+
+static int
+bvme6000_device_remove(struct platform_device *dev)
+{
+ struct Scsi_Host *host = platform_get_drvdata(dev);
+ struct NCR_700_Host_Parameters *hostdata = shost_priv(host);
+
+ scsi_remove_host(host);
+ NCR_700_release(host);
+ kfree(hostdata);
+ free_irq(host->irq, host);
+
+ return 0;
+}
+
+static struct platform_driver bvme6000_scsi_driver = {
+ .driver = {
+ .name = "bvme6000-scsi",
+ },
+ .probe = bvme6000_probe,
+ .remove = bvme6000_device_remove,
+};
+
+static int __init bvme6000_scsi_init(void)
+{
+ int err;
+
+ err = platform_driver_register(&bvme6000_scsi_driver);
+ if (err)
+ return err;
+
+ bvme6000_scsi_device = platform_device_register_simple("bvme6000-scsi",
+ -1, NULL, 0);
+ if (IS_ERR(bvme6000_scsi_device)) {
+ platform_driver_unregister(&bvme6000_scsi_driver);
+ return PTR_ERR(bvme6000_scsi_device);
+ }
+
+ return 0;
+}
+
+static void __exit bvme6000_scsi_exit(void)
+{
+ platform_device_unregister(bvme6000_scsi_device);
+ platform_driver_unregister(&bvme6000_scsi_driver);
+}
+
+module_init(bvme6000_scsi_init);
+module_exit(bvme6000_scsi_exit);
diff --git a/drivers/scsi/ch.c b/drivers/scsi/ch.c
new file mode 100644
index 000000000..dad959fcf
--- /dev/null
+++ b/drivers/scsi/ch.c
@@ -0,0 +1,1031 @@
+/*
+ * SCSI Media Changer device driver for Linux 2.6
+ *
+ * (c) 1996-2003 Gerd Knorr <kraxel@bytesex.org>
+ *
+ */
+
+#define VERSION "0.25"
+
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/fs.h>
+#include <linux/kernel.h>
+#include <linux/mm.h>
+#include <linux/major.h>
+#include <linux/string.h>
+#include <linux/errno.h>
+#include <linux/interrupt.h>
+#include <linux/blkdev.h>
+#include <linux/completion.h>
+#include <linux/compat.h>
+#include <linux/chio.h> /* here are all the ioctls */
+#include <linux/mutex.h>
+#include <linux/idr.h>
+#include <linux/slab.h>
+
+#include <scsi/scsi.h>
+#include <scsi/scsi_cmnd.h>
+#include <scsi/scsi_driver.h>
+#include <scsi/scsi_ioctl.h>
+#include <scsi/scsi_host.h>
+#include <scsi/scsi_device.h>
+#include <scsi/scsi_eh.h>
+#include <scsi/scsi_dbg.h>
+
+#define CH_DT_MAX 16
+#define CH_TYPES 8
+#define CH_MAX_DEVS 128
+
+MODULE_DESCRIPTION("device driver for scsi media changer devices");
+MODULE_AUTHOR("Gerd Knorr <kraxel@bytesex.org>");
+MODULE_LICENSE("GPL");
+MODULE_ALIAS_CHARDEV_MAJOR(SCSI_CHANGER_MAJOR);
+MODULE_ALIAS_SCSI_DEVICE(TYPE_MEDIUM_CHANGER);
+
+static DEFINE_MUTEX(ch_mutex);
+static int init = 1;
+module_param(init, int, 0444);
+MODULE_PARM_DESC(init, \
+ "initialize element status on driver load (default: on)");
+
+static int timeout_move = 300;
+module_param(timeout_move, int, 0644);
+MODULE_PARM_DESC(timeout_move,"timeout for move commands "
+ "(default: 300 seconds)");
+
+static int timeout_init = 3600;
+module_param(timeout_init, int, 0644);
+MODULE_PARM_DESC(timeout_init,"timeout for INITIALIZE ELEMENT STATUS "
+ "(default: 3600 seconds)");
+
+static int verbose = 1;
+module_param(verbose, int, 0644);
+MODULE_PARM_DESC(verbose,"be verbose (default: on)");
+
+static int debug = 0;
+module_param(debug, int, 0644);
+MODULE_PARM_DESC(debug,"enable/disable debug messages, also prints more "
+ "detailed sense codes on scsi errors (default: off)");
+
+static int dt_id[CH_DT_MAX] = { [ 0 ... (CH_DT_MAX-1) ] = -1 };
+static int dt_lun[CH_DT_MAX];
+module_param_array(dt_id, int, NULL, 0444);
+module_param_array(dt_lun, int, NULL, 0444);
+
+/* tell the driver about vendor-specific slots */
+static int vendor_firsts[CH_TYPES-4];
+static int vendor_counts[CH_TYPES-4];
+module_param_array(vendor_firsts, int, NULL, 0444);
+module_param_array(vendor_counts, int, NULL, 0444);
+
+static const char * vendor_labels[CH_TYPES-4] = {
+ "v0", "v1", "v2", "v3"
+};
+// module_param_string_array(vendor_labels, NULL, 0444);
+
+#define ch_printk(prefix, ch, fmt, a...) \
+ sdev_prefix_printk(prefix, (ch)->device, (ch)->name, fmt, ##a)
+
+#define DPRINTK(fmt, arg...) \
+do { \
+ if (debug) \
+ ch_printk(KERN_DEBUG, ch, fmt, ##arg); \
+} while (0)
+#define VPRINTK(level, fmt, arg...) \
+do { \
+ if (verbose) \
+ ch_printk(level, ch, fmt, ##arg); \
+} while (0)
+
+/* ------------------------------------------------------------------- */
+
+#define MAX_RETRIES 1
+
+static struct class * ch_sysfs_class;
+
+typedef struct {
+ struct list_head list;
+ int minor;
+ char name[8];
+ struct scsi_device *device;
+ struct scsi_device **dt; /* ptrs to data transfer elements */
+ u_int firsts[CH_TYPES];
+ u_int counts[CH_TYPES];
+ u_int unit_attention;
+ u_int voltags;
+ struct mutex lock;
+} scsi_changer;
+
+static DEFINE_IDR(ch_index_idr);
+static DEFINE_SPINLOCK(ch_index_lock);
+
+static const struct {
+ unsigned char sense;
+ unsigned char asc;
+ unsigned char ascq;
+ int errno;
+} ch_err[] = {
+/* Just filled in what looks right. Hav'nt checked any standard paper for
+ these errno assignments, so they may be wrong... */
+ {
+ .sense = ILLEGAL_REQUEST,
+ .asc = 0x21,
+ .ascq = 0x01,
+ .errno = EBADSLT, /* Invalid element address */
+ },{
+ .sense = ILLEGAL_REQUEST,
+ .asc = 0x28,
+ .ascq = 0x01,
+ .errno = EBADE, /* Import or export element accessed */
+ },{
+ .sense = ILLEGAL_REQUEST,
+ .asc = 0x3B,
+ .ascq = 0x0D,
+ .errno = EXFULL, /* Medium destination element full */
+ },{
+ .sense = ILLEGAL_REQUEST,
+ .asc = 0x3B,
+ .ascq = 0x0E,
+ .errno = EBADE, /* Medium source element empty */
+ },{
+ .sense = ILLEGAL_REQUEST,
+ .asc = 0x20,
+ .ascq = 0x00,
+ .errno = EBADRQC, /* Invalid command operation code */
+ },{
+ /* end of list */
+ }
+};
+
+/* ------------------------------------------------------------------- */
+
+static int ch_find_errno(struct scsi_sense_hdr *sshdr)
+{
+ int i,errno = 0;
+
+ /* Check to see if additional sense information is available */
+ if (scsi_sense_valid(sshdr) &&
+ sshdr->asc != 0) {
+ for (i = 0; ch_err[i].errno != 0; i++) {
+ if (ch_err[i].sense == sshdr->sense_key &&
+ ch_err[i].asc == sshdr->asc &&
+ ch_err[i].ascq == sshdr->ascq) {
+ errno = -ch_err[i].errno;
+ break;
+ }
+ }
+ }
+ if (errno == 0)
+ errno = -EIO;
+ return errno;
+}
+
+static int
+ch_do_scsi(scsi_changer *ch, unsigned char *cmd, int cmd_len,
+ void *buffer, unsigned buflength,
+ enum dma_data_direction direction)
+{
+ int errno, retries = 0, timeout, result;
+ struct scsi_sense_hdr sshdr;
+
+ timeout = (cmd[0] == INITIALIZE_ELEMENT_STATUS)
+ ? timeout_init : timeout_move;
+
+ retry:
+ errno = 0;
+ result = scsi_execute_req(ch->device, cmd, direction, buffer,
+ buflength, &sshdr, timeout * HZ,
+ MAX_RETRIES, NULL);
+
+ if (driver_byte(result) & DRIVER_SENSE) {
+ if (debug)
+ scsi_print_sense_hdr(ch->device, ch->name, &sshdr);
+ errno = ch_find_errno(&sshdr);
+
+ switch(sshdr.sense_key) {
+ case UNIT_ATTENTION:
+ ch->unit_attention = 1;
+ if (retries++ < 3)
+ goto retry;
+ break;
+ }
+ }
+ return errno;
+}
+
+/* ------------------------------------------------------------------------ */
+
+static int
+ch_elem_to_typecode(scsi_changer *ch, u_int elem)
+{
+ int i;
+
+ for (i = 0; i < CH_TYPES; i++) {
+ if (elem >= ch->firsts[i] &&
+ elem < ch->firsts[i] +
+ ch->counts[i])
+ return i+1;
+ }
+ return 0;
+}
+
+static int
+ch_read_element_status(scsi_changer *ch, u_int elem, char *data)
+{
+ u_char cmd[12];
+ u_char *buffer;
+ int result;
+
+ buffer = kmalloc(512, GFP_KERNEL | GFP_DMA);
+ if(!buffer)
+ return -ENOMEM;
+
+ retry:
+ memset(cmd,0,sizeof(cmd));
+ cmd[0] = READ_ELEMENT_STATUS;
+ cmd[1] = ((ch->device->lun & 0x7) << 5) |
+ (ch->voltags ? 0x10 : 0) |
+ ch_elem_to_typecode(ch,elem);
+ cmd[2] = (elem >> 8) & 0xff;
+ cmd[3] = elem & 0xff;
+ cmd[5] = 1;
+ cmd[9] = 255;
+ if (0 == (result = ch_do_scsi(ch, cmd, 12,
+ buffer, 256, DMA_FROM_DEVICE))) {
+ if (((buffer[16] << 8) | buffer[17]) != elem) {
+ DPRINTK("asked for element 0x%02x, got 0x%02x\n",
+ elem,(buffer[16] << 8) | buffer[17]);
+ kfree(buffer);
+ return -EIO;
+ }
+ memcpy(data,buffer+16,16);
+ } else {
+ if (ch->voltags) {
+ ch->voltags = 0;
+ VPRINTK(KERN_INFO, "device has no volume tag support\n");
+ goto retry;
+ }
+ DPRINTK("READ ELEMENT STATUS for element 0x%x failed\n",elem);
+ }
+ kfree(buffer);
+ return result;
+}
+
+static int
+ch_init_elem(scsi_changer *ch)
+{
+ int err;
+ u_char cmd[6];
+
+ VPRINTK(KERN_INFO, "INITIALIZE ELEMENT STATUS, may take some time ...\n");
+ memset(cmd,0,sizeof(cmd));
+ cmd[0] = INITIALIZE_ELEMENT_STATUS;
+ cmd[1] = (ch->device->lun & 0x7) << 5;
+ err = ch_do_scsi(ch, cmd, 6, NULL, 0, DMA_NONE);
+ VPRINTK(KERN_INFO, "... finished\n");
+ return err;
+}
+
+static int
+ch_readconfig(scsi_changer *ch)
+{
+ u_char cmd[10], data[16];
+ u_char *buffer;
+ int result,id,lun,i;
+ u_int elem;
+
+ buffer = kzalloc(512, GFP_KERNEL | GFP_DMA);
+ if (!buffer)
+ return -ENOMEM;
+
+ memset(cmd,0,sizeof(cmd));
+ cmd[0] = MODE_SENSE;
+ cmd[1] = (ch->device->lun & 0x7) << 5;
+ cmd[2] = 0x1d;
+ cmd[4] = 255;
+ result = ch_do_scsi(ch, cmd, 10, buffer, 255, DMA_FROM_DEVICE);
+ if (0 != result) {
+ cmd[1] |= (1<<3);
+ result = ch_do_scsi(ch, cmd, 10, buffer, 255, DMA_FROM_DEVICE);
+ }
+ if (0 == result) {
+ ch->firsts[CHET_MT] =
+ (buffer[buffer[3]+ 6] << 8) | buffer[buffer[3]+ 7];
+ ch->counts[CHET_MT] =
+ (buffer[buffer[3]+ 8] << 8) | buffer[buffer[3]+ 9];
+ ch->firsts[CHET_ST] =
+ (buffer[buffer[3]+10] << 8) | buffer[buffer[3]+11];
+ ch->counts[CHET_ST] =
+ (buffer[buffer[3]+12] << 8) | buffer[buffer[3]+13];
+ ch->firsts[CHET_IE] =
+ (buffer[buffer[3]+14] << 8) | buffer[buffer[3]+15];
+ ch->counts[CHET_IE] =
+ (buffer[buffer[3]+16] << 8) | buffer[buffer[3]+17];
+ ch->firsts[CHET_DT] =
+ (buffer[buffer[3]+18] << 8) | buffer[buffer[3]+19];
+ ch->counts[CHET_DT] =
+ (buffer[buffer[3]+20] << 8) | buffer[buffer[3]+21];
+ VPRINTK(KERN_INFO, "type #1 (mt): 0x%x+%d [medium transport]\n",
+ ch->firsts[CHET_MT],
+ ch->counts[CHET_MT]);
+ VPRINTK(KERN_INFO, "type #2 (st): 0x%x+%d [storage]\n",
+ ch->firsts[CHET_ST],
+ ch->counts[CHET_ST]);
+ VPRINTK(KERN_INFO, "type #3 (ie): 0x%x+%d [import/export]\n",
+ ch->firsts[CHET_IE],
+ ch->counts[CHET_IE]);
+ VPRINTK(KERN_INFO, "type #4 (dt): 0x%x+%d [data transfer]\n",
+ ch->firsts[CHET_DT],
+ ch->counts[CHET_DT]);
+ } else {
+ VPRINTK(KERN_INFO, "reading element address assignment page failed!\n");
+ }
+
+ /* vendor specific element types */
+ for (i = 0; i < 4; i++) {
+ if (0 == vendor_counts[i])
+ continue;
+ if (NULL == vendor_labels[i])
+ continue;
+ ch->firsts[CHET_V1+i] = vendor_firsts[i];
+ ch->counts[CHET_V1+i] = vendor_counts[i];
+ VPRINTK(KERN_INFO, "type #%d (v%d): 0x%x+%d [%s, vendor specific]\n",
+ i+5,i+1,vendor_firsts[i],vendor_counts[i],
+ vendor_labels[i]);
+ }
+
+ /* look up the devices of the data transfer elements */
+ ch->dt = kcalloc(ch->counts[CHET_DT], sizeof(*ch->dt),
+ GFP_KERNEL);
+
+ if (!ch->dt) {
+ kfree(buffer);
+ return -ENOMEM;
+ }
+
+ for (elem = 0; elem < ch->counts[CHET_DT]; elem++) {
+ id = -1;
+ lun = 0;
+ if (elem < CH_DT_MAX && -1 != dt_id[elem]) {
+ id = dt_id[elem];
+ lun = dt_lun[elem];
+ VPRINTK(KERN_INFO, "dt 0x%x: [insmod option] ",
+ elem+ch->firsts[CHET_DT]);
+ } else if (0 != ch_read_element_status
+ (ch,elem+ch->firsts[CHET_DT],data)) {
+ VPRINTK(KERN_INFO, "dt 0x%x: READ ELEMENT STATUS failed\n",
+ elem+ch->firsts[CHET_DT]);
+ } else {
+ VPRINTK(KERN_INFO, "dt 0x%x: ",elem+ch->firsts[CHET_DT]);
+ if (data[6] & 0x80) {
+ VPRINTK(KERN_CONT, "not this SCSI bus\n");
+ ch->dt[elem] = NULL;
+ } else if (0 == (data[6] & 0x30)) {
+ VPRINTK(KERN_CONT, "ID/LUN unknown\n");
+ ch->dt[elem] = NULL;
+ } else {
+ id = ch->device->id;
+ lun = 0;
+ if (data[6] & 0x20) id = data[7];
+ if (data[6] & 0x10) lun = data[6] & 7;
+ }
+ }
+ if (-1 != id) {
+ VPRINTK(KERN_CONT, "ID %i, LUN %i, ",id,lun);
+ ch->dt[elem] =
+ scsi_device_lookup(ch->device->host,
+ ch->device->channel,
+ id,lun);
+ if (!ch->dt[elem]) {
+ /* should not happen */
+ VPRINTK(KERN_CONT, "Huh? device not found!\n");
+ } else {
+ VPRINTK(KERN_CONT, "name: %8.8s %16.16s %4.4s\n",
+ ch->dt[elem]->vendor,
+ ch->dt[elem]->model,
+ ch->dt[elem]->rev);
+ }
+ }
+ }
+ ch->voltags = 1;
+ kfree(buffer);
+
+ return 0;
+}
+
+/* ------------------------------------------------------------------------ */
+
+static int
+ch_position(scsi_changer *ch, u_int trans, u_int elem, int rotate)
+{
+ u_char cmd[10];
+
+ DPRINTK("position: 0x%x\n",elem);
+ if (0 == trans)
+ trans = ch->firsts[CHET_MT];
+ memset(cmd,0,sizeof(cmd));
+ cmd[0] = POSITION_TO_ELEMENT;
+ cmd[1] = (ch->device->lun & 0x7) << 5;
+ cmd[2] = (trans >> 8) & 0xff;
+ cmd[3] = trans & 0xff;
+ cmd[4] = (elem >> 8) & 0xff;
+ cmd[5] = elem & 0xff;
+ cmd[8] = rotate ? 1 : 0;
+ return ch_do_scsi(ch, cmd, 10, NULL, 0, DMA_NONE);
+}
+
+static int
+ch_move(scsi_changer *ch, u_int trans, u_int src, u_int dest, int rotate)
+{
+ u_char cmd[12];
+
+ DPRINTK("move: 0x%x => 0x%x\n",src,dest);
+ if (0 == trans)
+ trans = ch->firsts[CHET_MT];
+ memset(cmd,0,sizeof(cmd));
+ cmd[0] = MOVE_MEDIUM;
+ cmd[1] = (ch->device->lun & 0x7) << 5;
+ cmd[2] = (trans >> 8) & 0xff;
+ cmd[3] = trans & 0xff;
+ cmd[4] = (src >> 8) & 0xff;
+ cmd[5] = src & 0xff;
+ cmd[6] = (dest >> 8) & 0xff;
+ cmd[7] = dest & 0xff;
+ cmd[10] = rotate ? 1 : 0;
+ return ch_do_scsi(ch, cmd, 12, NULL,0, DMA_NONE);
+}
+
+static int
+ch_exchange(scsi_changer *ch, u_int trans, u_int src,
+ u_int dest1, u_int dest2, int rotate1, int rotate2)
+{
+ u_char cmd[12];
+
+ DPRINTK("exchange: 0x%x => 0x%x => 0x%x\n",
+ src,dest1,dest2);
+ if (0 == trans)
+ trans = ch->firsts[CHET_MT];
+ memset(cmd,0,sizeof(cmd));
+ cmd[0] = EXCHANGE_MEDIUM;
+ cmd[1] = (ch->device->lun & 0x7) << 5;
+ cmd[2] = (trans >> 8) & 0xff;
+ cmd[3] = trans & 0xff;
+ cmd[4] = (src >> 8) & 0xff;
+ cmd[5] = src & 0xff;
+ cmd[6] = (dest1 >> 8) & 0xff;
+ cmd[7] = dest1 & 0xff;
+ cmd[8] = (dest2 >> 8) & 0xff;
+ cmd[9] = dest2 & 0xff;
+ cmd[10] = (rotate1 ? 1 : 0) | (rotate2 ? 2 : 0);
+
+ return ch_do_scsi(ch, cmd, 12, NULL, 0, DMA_NONE);
+}
+
+static void
+ch_check_voltag(char *tag)
+{
+ int i;
+
+ for (i = 0; i < 32; i++) {
+ /* restrict to ascii */
+ if (tag[i] >= 0x7f || tag[i] < 0x20)
+ tag[i] = ' ';
+ /* don't allow search wildcards */
+ if (tag[i] == '?' ||
+ tag[i] == '*')
+ tag[i] = ' ';
+ }
+}
+
+static int
+ch_set_voltag(scsi_changer *ch, u_int elem,
+ int alternate, int clear, u_char *tag)
+{
+ u_char cmd[12];
+ u_char *buffer;
+ int result;
+
+ buffer = kzalloc(512, GFP_KERNEL);
+ if (!buffer)
+ return -ENOMEM;
+
+ DPRINTK("%s %s voltag: 0x%x => \"%s\"\n",
+ clear ? "clear" : "set",
+ alternate ? "alternate" : "primary",
+ elem, tag);
+ memset(cmd,0,sizeof(cmd));
+ cmd[0] = SEND_VOLUME_TAG;
+ cmd[1] = ((ch->device->lun & 0x7) << 5) |
+ ch_elem_to_typecode(ch,elem);
+ cmd[2] = (elem >> 8) & 0xff;
+ cmd[3] = elem & 0xff;
+ cmd[5] = clear
+ ? (alternate ? 0x0d : 0x0c)
+ : (alternate ? 0x0b : 0x0a);
+
+ cmd[9] = 255;
+
+ memcpy(buffer,tag,32);
+ ch_check_voltag(buffer);
+
+ result = ch_do_scsi(ch, cmd, 12, buffer, 256, DMA_TO_DEVICE);
+ kfree(buffer);
+ return result;
+}
+
+static int ch_gstatus(scsi_changer *ch, int type, unsigned char __user *dest)
+{
+ int retval = 0;
+ u_char data[16];
+ unsigned int i;
+
+ mutex_lock(&ch->lock);
+ for (i = 0; i < ch->counts[type]; i++) {
+ if (0 != ch_read_element_status
+ (ch, ch->firsts[type]+i,data)) {
+ retval = -EIO;
+ break;
+ }
+ put_user(data[2], dest+i);
+ if (data[2] & CESTATUS_EXCEPT)
+ VPRINTK(KERN_INFO, "element 0x%x: asc=0x%x, ascq=0x%x\n",
+ ch->firsts[type]+i,
+ (int)data[4],(int)data[5]);
+ retval = ch_read_element_status
+ (ch, ch->firsts[type]+i,data);
+ if (0 != retval)
+ break;
+ }
+ mutex_unlock(&ch->lock);
+ return retval;
+}
+
+/* ------------------------------------------------------------------------ */
+
+static int
+ch_release(struct inode *inode, struct file *file)
+{
+ scsi_changer *ch = file->private_data;
+
+ scsi_device_put(ch->device);
+ file->private_data = NULL;
+ return 0;
+}
+
+static int
+ch_open(struct inode *inode, struct file *file)
+{
+ scsi_changer *ch;
+ int minor = iminor(inode);
+
+ mutex_lock(&ch_mutex);
+ spin_lock(&ch_index_lock);
+ ch = idr_find(&ch_index_idr, minor);
+
+ if (NULL == ch || scsi_device_get(ch->device)) {
+ spin_unlock(&ch_index_lock);
+ mutex_unlock(&ch_mutex);
+ return -ENXIO;
+ }
+ spin_unlock(&ch_index_lock);
+
+ file->private_data = ch;
+ mutex_unlock(&ch_mutex);
+ return 0;
+}
+
+static int
+ch_checkrange(scsi_changer *ch, unsigned int type, unsigned int unit)
+{
+ if (type >= CH_TYPES || unit >= ch->counts[type])
+ return -1;
+ return 0;
+}
+
+static long ch_ioctl(struct file *file,
+ unsigned int cmd, unsigned long arg)
+{
+ scsi_changer *ch = file->private_data;
+ int retval;
+ void __user *argp = (void __user *)arg;
+
+ retval = scsi_ioctl_block_when_processing_errors(ch->device, cmd,
+ file->f_flags & O_NDELAY);
+ if (retval)
+ return retval;
+
+ switch (cmd) {
+ case CHIOGPARAMS:
+ {
+ struct changer_params params;
+
+ params.cp_curpicker = 0;
+ params.cp_npickers = ch->counts[CHET_MT];
+ params.cp_nslots = ch->counts[CHET_ST];
+ params.cp_nportals = ch->counts[CHET_IE];
+ params.cp_ndrives = ch->counts[CHET_DT];
+
+ if (copy_to_user(argp, &params, sizeof(params)))
+ return -EFAULT;
+ return 0;
+ }
+ case CHIOGVPARAMS:
+ {
+ struct changer_vendor_params vparams;
+
+ memset(&vparams,0,sizeof(vparams));
+ if (ch->counts[CHET_V1]) {
+ vparams.cvp_n1 = ch->counts[CHET_V1];
+ strncpy(vparams.cvp_label1,vendor_labels[0],16);
+ }
+ if (ch->counts[CHET_V2]) {
+ vparams.cvp_n2 = ch->counts[CHET_V2];
+ strncpy(vparams.cvp_label2,vendor_labels[1],16);
+ }
+ if (ch->counts[CHET_V3]) {
+ vparams.cvp_n3 = ch->counts[CHET_V3];
+ strncpy(vparams.cvp_label3,vendor_labels[2],16);
+ }
+ if (ch->counts[CHET_V4]) {
+ vparams.cvp_n4 = ch->counts[CHET_V4];
+ strncpy(vparams.cvp_label4,vendor_labels[3],16);
+ }
+ if (copy_to_user(argp, &vparams, sizeof(vparams)))
+ return -EFAULT;
+ return 0;
+ }
+
+ case CHIOPOSITION:
+ {
+ struct changer_position pos;
+
+ if (copy_from_user(&pos, argp, sizeof (pos)))
+ return -EFAULT;
+
+ if (0 != ch_checkrange(ch, pos.cp_type, pos.cp_unit)) {
+ DPRINTK("CHIOPOSITION: invalid parameter\n");
+ return -EBADSLT;
+ }
+ mutex_lock(&ch->lock);
+ retval = ch_position(ch,0,
+ ch->firsts[pos.cp_type] + pos.cp_unit,
+ pos.cp_flags & CP_INVERT);
+ mutex_unlock(&ch->lock);
+ return retval;
+ }
+
+ case CHIOMOVE:
+ {
+ struct changer_move mv;
+
+ if (copy_from_user(&mv, argp, sizeof (mv)))
+ return -EFAULT;
+
+ if (0 != ch_checkrange(ch, mv.cm_fromtype, mv.cm_fromunit) ||
+ 0 != ch_checkrange(ch, mv.cm_totype, mv.cm_tounit )) {
+ DPRINTK("CHIOMOVE: invalid parameter\n");
+ return -EBADSLT;
+ }
+
+ mutex_lock(&ch->lock);
+ retval = ch_move(ch,0,
+ ch->firsts[mv.cm_fromtype] + mv.cm_fromunit,
+ ch->firsts[mv.cm_totype] + mv.cm_tounit,
+ mv.cm_flags & CM_INVERT);
+ mutex_unlock(&ch->lock);
+ return retval;
+ }
+
+ case CHIOEXCHANGE:
+ {
+ struct changer_exchange mv;
+
+ if (copy_from_user(&mv, argp, sizeof (mv)))
+ return -EFAULT;
+
+ if (0 != ch_checkrange(ch, mv.ce_srctype, mv.ce_srcunit ) ||
+ 0 != ch_checkrange(ch, mv.ce_fdsttype, mv.ce_fdstunit) ||
+ 0 != ch_checkrange(ch, mv.ce_sdsttype, mv.ce_sdstunit)) {
+ DPRINTK("CHIOEXCHANGE: invalid parameter\n");
+ return -EBADSLT;
+ }
+
+ mutex_lock(&ch->lock);
+ retval = ch_exchange
+ (ch,0,
+ ch->firsts[mv.ce_srctype] + mv.ce_srcunit,
+ ch->firsts[mv.ce_fdsttype] + mv.ce_fdstunit,
+ ch->firsts[mv.ce_sdsttype] + mv.ce_sdstunit,
+ mv.ce_flags & CE_INVERT1, mv.ce_flags & CE_INVERT2);
+ mutex_unlock(&ch->lock);
+ return retval;
+ }
+
+ case CHIOGSTATUS:
+ {
+ struct changer_element_status ces;
+
+ if (copy_from_user(&ces, argp, sizeof (ces)))
+ return -EFAULT;
+ if (ces.ces_type < 0 || ces.ces_type >= CH_TYPES)
+ return -EINVAL;
+
+ return ch_gstatus(ch, ces.ces_type, ces.ces_data);
+ }
+
+ case CHIOGELEM:
+ {
+ struct changer_get_element cge;
+ u_char ch_cmd[12];
+ u_char *buffer;
+ unsigned int elem;
+ int result,i;
+
+ if (copy_from_user(&cge, argp, sizeof (cge)))
+ return -EFAULT;
+
+ if (0 != ch_checkrange(ch, cge.cge_type, cge.cge_unit))
+ return -EINVAL;
+ elem = ch->firsts[cge.cge_type] + cge.cge_unit;
+
+ buffer = kmalloc(512, GFP_KERNEL | GFP_DMA);
+ if (!buffer)
+ return -ENOMEM;
+ mutex_lock(&ch->lock);
+
+ voltag_retry:
+ memset(ch_cmd, 0, sizeof(ch_cmd));
+ ch_cmd[0] = READ_ELEMENT_STATUS;
+ ch_cmd[1] = ((ch->device->lun & 0x7) << 5) |
+ (ch->voltags ? 0x10 : 0) |
+ ch_elem_to_typecode(ch,elem);
+ ch_cmd[2] = (elem >> 8) & 0xff;
+ ch_cmd[3] = elem & 0xff;
+ ch_cmd[5] = 1;
+ ch_cmd[9] = 255;
+
+ result = ch_do_scsi(ch, ch_cmd, 12,
+ buffer, 256, DMA_FROM_DEVICE);
+ if (!result) {
+ cge.cge_status = buffer[18];
+ cge.cge_flags = 0;
+ if (buffer[18] & CESTATUS_EXCEPT) {
+ cge.cge_errno = EIO;
+ }
+ if (buffer[25] & 0x80) {
+ cge.cge_flags |= CGE_SRC;
+ if (buffer[25] & 0x40)
+ cge.cge_flags |= CGE_INVERT;
+ elem = (buffer[26]<<8) | buffer[27];
+ for (i = 0; i < 4; i++) {
+ if (elem >= ch->firsts[i] &&
+ elem < ch->firsts[i] + ch->counts[i]) {
+ cge.cge_srctype = i;
+ cge.cge_srcunit = elem-ch->firsts[i];
+ }
+ }
+ }
+ if ((buffer[22] & 0x30) == 0x30) {
+ cge.cge_flags |= CGE_IDLUN;
+ cge.cge_id = buffer[23];
+ cge.cge_lun = buffer[22] & 7;
+ }
+ if (buffer[9] & 0x80) {
+ cge.cge_flags |= CGE_PVOLTAG;
+ memcpy(cge.cge_pvoltag,buffer+28,36);
+ }
+ if (buffer[9] & 0x40) {
+ cge.cge_flags |= CGE_AVOLTAG;
+ memcpy(cge.cge_avoltag,buffer+64,36);
+ }
+ } else if (ch->voltags) {
+ ch->voltags = 0;
+ VPRINTK(KERN_INFO, "device has no volume tag support\n");
+ goto voltag_retry;
+ }
+ kfree(buffer);
+ mutex_unlock(&ch->lock);
+
+ if (copy_to_user(argp, &cge, sizeof (cge)))
+ return -EFAULT;
+ return result;
+ }
+
+ case CHIOINITELEM:
+ {
+ mutex_lock(&ch->lock);
+ retval = ch_init_elem(ch);
+ mutex_unlock(&ch->lock);
+ return retval;
+ }
+
+ case CHIOSVOLTAG:
+ {
+ struct changer_set_voltag csv;
+ int elem;
+
+ if (copy_from_user(&csv, argp, sizeof(csv)))
+ return -EFAULT;
+
+ if (0 != ch_checkrange(ch, csv.csv_type, csv.csv_unit)) {
+ DPRINTK("CHIOSVOLTAG: invalid parameter\n");
+ return -EBADSLT;
+ }
+ elem = ch->firsts[csv.csv_type] + csv.csv_unit;
+ mutex_lock(&ch->lock);
+ retval = ch_set_voltag(ch, elem,
+ csv.csv_flags & CSV_AVOLTAG,
+ csv.csv_flags & CSV_CLEARTAG,
+ csv.csv_voltag);
+ mutex_unlock(&ch->lock);
+ return retval;
+ }
+
+ default:
+ return scsi_ioctl(ch->device, cmd, argp);
+
+ }
+}
+
+#ifdef CONFIG_COMPAT
+
+struct changer_element_status32 {
+ int ces_type;
+ compat_uptr_t ces_data;
+};
+#define CHIOGSTATUS32 _IOW('c', 8,struct changer_element_status32)
+
+static long ch_ioctl_compat(struct file * file,
+ unsigned int cmd, unsigned long arg)
+{
+ scsi_changer *ch = file->private_data;
+
+ switch (cmd) {
+ case CHIOGPARAMS:
+ case CHIOGVPARAMS:
+ case CHIOPOSITION:
+ case CHIOMOVE:
+ case CHIOEXCHANGE:
+ case CHIOGELEM:
+ case CHIOINITELEM:
+ case CHIOSVOLTAG:
+ /* compatible */
+ return ch_ioctl(file, cmd, arg);
+ case CHIOGSTATUS32:
+ {
+ struct changer_element_status32 ces32;
+ unsigned char __user *data;
+
+ if (copy_from_user(&ces32, (void __user *)arg, sizeof (ces32)))
+ return -EFAULT;
+ if (ces32.ces_type < 0 || ces32.ces_type >= CH_TYPES)
+ return -EINVAL;
+
+ data = compat_ptr(ces32.ces_data);
+ return ch_gstatus(ch, ces32.ces_type, data);
+ }
+ default:
+ // return scsi_ioctl_compat(ch->device, cmd, (void*)arg);
+ return -ENOIOCTLCMD;
+
+ }
+}
+#endif
+
+/* ------------------------------------------------------------------------ */
+
+static int ch_probe(struct device *dev)
+{
+ struct scsi_device *sd = to_scsi_device(dev);
+ struct device *class_dev;
+ int ret;
+ scsi_changer *ch;
+
+ if (sd->type != TYPE_MEDIUM_CHANGER)
+ return -ENODEV;
+
+ ch = kzalloc(sizeof(*ch), GFP_KERNEL);
+ if (NULL == ch)
+ return -ENOMEM;
+
+ idr_preload(GFP_KERNEL);
+ spin_lock(&ch_index_lock);
+ ret = idr_alloc(&ch_index_idr, ch, 0, CH_MAX_DEVS + 1, GFP_NOWAIT);
+ spin_unlock(&ch_index_lock);
+ idr_preload_end();
+
+ if (ret < 0) {
+ if (ret == -ENOSPC)
+ ret = -ENODEV;
+ goto free_ch;
+ }
+
+ ch->minor = ret;
+ sprintf(ch->name,"ch%d",ch->minor);
+
+ class_dev = device_create(ch_sysfs_class, dev,
+ MKDEV(SCSI_CHANGER_MAJOR, ch->minor), ch,
+ "s%s", ch->name);
+ if (IS_ERR(class_dev)) {
+ sdev_printk(KERN_WARNING, sd, "ch%d: device_create failed\n",
+ ch->minor);
+ ret = PTR_ERR(class_dev);
+ goto remove_idr;
+ }
+
+ mutex_init(&ch->lock);
+ ch->device = sd;
+ ch_readconfig(ch);
+ if (init)
+ ch_init_elem(ch);
+
+ dev_set_drvdata(dev, ch);
+ sdev_printk(KERN_INFO, sd, "Attached scsi changer %s\n", ch->name);
+
+ return 0;
+remove_idr:
+ idr_remove(&ch_index_idr, ch->minor);
+free_ch:
+ kfree(ch);
+ return ret;
+}
+
+static int ch_remove(struct device *dev)
+{
+ scsi_changer *ch = dev_get_drvdata(dev);
+
+ spin_lock(&ch_index_lock);
+ idr_remove(&ch_index_idr, ch->minor);
+ spin_unlock(&ch_index_lock);
+
+ device_destroy(ch_sysfs_class, MKDEV(SCSI_CHANGER_MAJOR,ch->minor));
+ kfree(ch->dt);
+ kfree(ch);
+ return 0;
+}
+
+static struct scsi_driver ch_template = {
+ .gendrv = {
+ .name = "ch",
+ .owner = THIS_MODULE,
+ .probe = ch_probe,
+ .remove = ch_remove,
+ },
+};
+
+static const struct file_operations changer_fops = {
+ .owner = THIS_MODULE,
+ .open = ch_open,
+ .release = ch_release,
+ .unlocked_ioctl = ch_ioctl,
+#ifdef CONFIG_COMPAT
+ .compat_ioctl = ch_ioctl_compat,
+#endif
+ .llseek = noop_llseek,
+};
+
+static int __init init_ch_module(void)
+{
+ int rc;
+
+ printk(KERN_INFO "SCSI Media Changer driver v" VERSION " \n");
+ ch_sysfs_class = class_create(THIS_MODULE, "scsi_changer");
+ if (IS_ERR(ch_sysfs_class)) {
+ rc = PTR_ERR(ch_sysfs_class);
+ return rc;
+ }
+ rc = register_chrdev(SCSI_CHANGER_MAJOR,"ch",&changer_fops);
+ if (rc < 0) {
+ printk("Unable to get major %d for SCSI-Changer\n",
+ SCSI_CHANGER_MAJOR);
+ goto fail1;
+ }
+ rc = scsi_register_driver(&ch_template.gendrv);
+ if (rc < 0)
+ goto fail2;
+ return 0;
+
+ fail2:
+ unregister_chrdev(SCSI_CHANGER_MAJOR, "ch");
+ fail1:
+ class_destroy(ch_sysfs_class);
+ return rc;
+}
+
+static void __exit exit_ch_module(void)
+{
+ scsi_unregister_driver(&ch_template.gendrv);
+ unregister_chrdev(SCSI_CHANGER_MAJOR, "ch");
+ class_destroy(ch_sysfs_class);
+ idr_destroy(&ch_index_idr);
+}
+
+module_init(init_ch_module);
+module_exit(exit_ch_module);
+
+/*
+ * Local variables:
+ * c-basic-offset: 8
+ * End:
+ */
diff --git a/drivers/scsi/constants.c b/drivers/scsi/constants.c
new file mode 100644
index 000000000..fa09d4be2
--- /dev/null
+++ b/drivers/scsi/constants.c
@@ -0,0 +1,1274 @@
+/*
+ * ASCII values for a number of symbolic constants, printing functions,
+ * etc.
+ * Additions for SCSI 2 and Linux 2.2.x by D. Gilbert (990422)
+ * Additions for SCSI 3+ (SPC-3 T10/1416-D Rev 07 3 May 2002)
+ * by D. Gilbert and aeb (20020609)
+ * Updated to SPC-4 T10/1713-D Rev 36g, D. Gilbert 20130701
+ */
+
+#include <linux/blkdev.h>
+#include <linux/module.h>
+#include <linux/kernel.h>
+
+#include <scsi/scsi.h>
+#include <scsi/scsi_cmnd.h>
+#include <scsi/scsi_device.h>
+#include <scsi/scsi_host.h>
+#include <scsi/scsi_eh.h>
+#include <scsi/scsi_dbg.h>
+
+/* Commands with service actions that change the command name */
+#define THIRD_PARTY_COPY_OUT 0x83
+#define THIRD_PARTY_COPY_IN 0x84
+
+struct sa_name_list {
+ int opcode;
+ const struct value_name_pair *arr;
+ int arr_sz;
+};
+
+struct value_name_pair {
+ int value;
+ const char * name;
+};
+
+static const char * cdb_byte0_names[] = {
+/* 00-03 */ "Test Unit Ready", "Rezero Unit/Rewind", NULL, "Request Sense",
+/* 04-07 */ "Format Unit/Medium", "Read Block Limits", NULL,
+ "Reassign Blocks",
+/* 08-0d */ "Read(6)", NULL, "Write(6)", "Seek(6)", NULL, NULL,
+/* 0e-12 */ NULL, "Read Reverse", "Write Filemarks", "Space", "Inquiry",
+/* 13-16 */ "Verify(6)", "Recover Buffered Data", "Mode Select(6)",
+ "Reserve(6)",
+/* 17-1a */ "Release(6)", "Copy", "Erase", "Mode Sense(6)",
+/* 1b-1d */ "Start/Stop Unit", "Receive Diagnostic", "Send Diagnostic",
+/* 1e-1f */ "Prevent/Allow Medium Removal", NULL,
+/* 20-22 */ NULL, NULL, NULL,
+/* 23-28 */ "Read Format Capacities", "Set Window",
+ "Read Capacity(10)", NULL, NULL, "Read(10)",
+/* 29-2d */ "Read Generation", "Write(10)", "Seek(10)", "Erase(10)",
+ "Read updated block",
+/* 2e-31 */ "Write Verify(10)", "Verify(10)", "Search High", "Search Equal",
+/* 32-34 */ "Search Low", "Set Limits", "Prefetch/Read Position",
+/* 35-37 */ "Synchronize Cache(10)", "Lock/Unlock Cache(10)",
+ "Read Defect Data(10)",
+/* 38-3c */ "Medium Scan", "Compare", "Copy Verify", "Write Buffer",
+ "Read Buffer",
+/* 3d-3f */ "Update Block", "Read Long(10)", "Write Long(10)",
+/* 40-41 */ "Change Definition", "Write Same(10)",
+/* 42-48 */ "Unmap/Read sub-channel", "Read TOC/PMA/ATIP",
+ "Read density support", "Play audio(10)", "Get configuration",
+ "Play audio msf", "Sanitize/Play audio track/index",
+/* 49-4f */ "Play track relative(10)", "Get event status notification",
+ "Pause/resume", "Log Select", "Log Sense", "Stop play/scan",
+ NULL,
+/* 50-55 */ "Xdwrite", "Xpwrite, Read disk info", "Xdread, Read track info",
+ "Reserve track", "Send OPC info", "Mode Select(10)",
+/* 56-5b */ "Reserve(10)", "Release(10)", "Repair track", "Read master cue",
+ "Mode Sense(10)", "Close track/session",
+/* 5c-5f */ "Read buffer capacity", "Send cue sheet", "Persistent reserve in",
+ "Persistent reserve out",
+/* 60-67 */ NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
+/* 68-6f */ NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
+/* 70-77 */ NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
+/* 78-7f */ NULL, NULL, NULL, NULL, NULL, NULL, "Extended CDB",
+ "Variable length",
+/* 80-84 */ "Xdwrite(16)", "Rebuild(16)", "Regenerate(16)",
+ "Third party copy out", "Third party copy in",
+/* 85-89 */ "ATA command pass through(16)", "Access control in",
+ "Access control out", "Read(16)", "Compare and Write",
+/* 8a-8f */ "Write(16)", "ORWrite", "Read attributes", "Write attributes",
+ "Write and verify(16)", "Verify(16)",
+/* 90-94 */ "Pre-fetch(16)", "Synchronize cache(16)",
+ "Lock/unlock cache(16)", "Write same(16)", NULL,
+/* 95-99 */ NULL, NULL, NULL, NULL, NULL,
+/* 9a-9f */ NULL, NULL, NULL, "Service action bidirectional",
+ "Service action in(16)", "Service action out(16)",
+/* a0-a5 */ "Report luns", "ATA command pass through(12)/Blank",
+ "Security protocol in", "Maintenance in", "Maintenance out",
+ "Move medium/play audio(12)",
+/* a6-a9 */ "Exchange medium", "Move medium attached", "Read(12)",
+ "Play track relative(12)",
+/* aa-ae */ "Write(12)", NULL, "Erase(12), Get Performance",
+ "Read DVD structure", "Write and verify(12)",
+/* af-b1 */ "Verify(12)", "Search data high(12)", "Search data equal(12)",
+/* b2-b4 */ "Search data low(12)", "Set limits(12)",
+ "Read element status attached",
+/* b5-b6 */ "Security protocol out", "Send volume tag, set streaming",
+/* b7-b9 */ "Read defect data(12)", "Read element status", "Read CD msf",
+/* ba-bc */ "Redundancy group (in), Scan",
+ "Redundancy group (out), Set cd-rom speed", "Spare (in), Play cd",
+/* bd-bf */ "Spare (out), Mechanism status", "Volume set (in), Read cd",
+ "Volume set (out), Send DVD structure",
+};
+
+static const struct value_name_pair maint_in_arr[] = {
+ {0x5, "Report identifying information"},
+ {0xa, "Report target port groups"},
+ {0xb, "Report aliases"},
+ {0xc, "Report supported operation codes"},
+ {0xd, "Report supported task management functions"},
+ {0xe, "Report priority"},
+ {0xf, "Report timestamp"},
+ {0x10, "Management protocol in"},
+};
+#define MAINT_IN_SZ ARRAY_SIZE(maint_in_arr)
+
+static const struct value_name_pair maint_out_arr[] = {
+ {0x6, "Set identifying information"},
+ {0xa, "Set target port groups"},
+ {0xb, "Change aliases"},
+ {0xc, "Remove I_T nexus"},
+ {0xe, "Set priority"},
+ {0xf, "Set timestamp"},
+ {0x10, "Management protocol out"},
+};
+#define MAINT_OUT_SZ ARRAY_SIZE(maint_out_arr)
+
+static const struct value_name_pair serv_in12_arr[] = {
+ {0x1, "Read media serial number"},
+};
+#define SERV_IN12_SZ ARRAY_SIZE(serv_in12_arr)
+
+static const struct value_name_pair serv_out12_arr[] = {
+ {-1, "dummy entry"},
+};
+#define SERV_OUT12_SZ ARRAY_SIZE(serv_out12_arr)
+
+static const struct value_name_pair serv_bidi_arr[] = {
+ {-1, "dummy entry"},
+};
+#define SERV_BIDI_SZ ARRAY_SIZE(serv_bidi_arr)
+
+static const struct value_name_pair serv_in16_arr[] = {
+ {0x10, "Read capacity(16)"},
+ {0x11, "Read long(16)"},
+ {0x12, "Get LBA status"},
+ {0x13, "Report referrals"},
+};
+#define SERV_IN16_SZ ARRAY_SIZE(serv_in16_arr)
+
+static const struct value_name_pair serv_out16_arr[] = {
+ {0x11, "Write long(16)"},
+ {0x1f, "Notify data transfer device(16)"},
+};
+#define SERV_OUT16_SZ ARRAY_SIZE(serv_out16_arr)
+
+static const struct value_name_pair pr_in_arr[] = {
+ {0x0, "Persistent reserve in, read keys"},
+ {0x1, "Persistent reserve in, read reservation"},
+ {0x2, "Persistent reserve in, report capabilities"},
+ {0x3, "Persistent reserve in, read full status"},
+};
+#define PR_IN_SZ ARRAY_SIZE(pr_in_arr)
+
+static const struct value_name_pair pr_out_arr[] = {
+ {0x0, "Persistent reserve out, register"},
+ {0x1, "Persistent reserve out, reserve"},
+ {0x2, "Persistent reserve out, release"},
+ {0x3, "Persistent reserve out, clear"},
+ {0x4, "Persistent reserve out, preempt"},
+ {0x5, "Persistent reserve out, preempt and abort"},
+ {0x6, "Persistent reserve out, register and ignore existing key"},
+ {0x7, "Persistent reserve out, register and move"},
+};
+#define PR_OUT_SZ ARRAY_SIZE(pr_out_arr)
+
+/* SPC-4 rev 34 renamed the Extended Copy opcode to Third Party Copy Out.
+ LID1 (List Identifier length: 1 byte) is the Extended Copy found in SPC-2
+ and SPC-3 */
+static const struct value_name_pair tpc_out_arr[] = {
+ {0x0, "Extended copy(LID1)"},
+ {0x1, "Extended copy(LID4)"},
+ {0x10, "Populate token"},
+ {0x11, "Write using token"},
+ {0x1c, "Copy operation abort"},
+};
+#define TPC_OUT_SZ ARRAY_SIZE(tpc_out_arr)
+
+static const struct value_name_pair tpc_in_arr[] = {
+ {0x0, "Receive copy status(LID1)"},
+ {0x1, "Receive copy data(LID1)"},
+ {0x3, "Receive copy operating parameters"},
+ {0x4, "Receive copy failure details(LID1)"},
+ {0x5, "Receive copy status(LID4)"},
+ {0x6, "Receive copy data(LID4)"},
+ {0x7, "Receive ROD token information"},
+ {0x8, "Report all ROD tokens"},
+};
+#define TPC_IN_SZ ARRAY_SIZE(tpc_in_arr)
+
+
+static const struct value_name_pair variable_length_arr[] = {
+ {0x1, "Rebuild(32)"},
+ {0x2, "Regenerate(32)"},
+ {0x3, "Xdread(32)"},
+ {0x4, "Xdwrite(32)"},
+ {0x5, "Xdwrite extended(32)"},
+ {0x6, "Xpwrite(32)"},
+ {0x7, "Xdwriteread(32)"},
+ {0x8, "Xdwrite extended(64)"},
+ {0x9, "Read(32)"},
+ {0xa, "Verify(32)"},
+ {0xb, "Write(32)"},
+ {0xc, "Write an verify(32)"},
+ {0xd, "Write same(32)"},
+ {0x8801, "Format OSD"},
+ {0x8802, "Create (osd)"},
+ {0x8803, "List (osd)"},
+ {0x8805, "Read (osd)"},
+ {0x8806, "Write (osd)"},
+ {0x8807, "Append (osd)"},
+ {0x8808, "Flush (osd)"},
+ {0x880a, "Remove (osd)"},
+ {0x880b, "Create partition (osd)"},
+ {0x880c, "Remove partition (osd)"},
+ {0x880e, "Get attributes (osd)"},
+ {0x880f, "Set attributes (osd)"},
+ {0x8812, "Create and write (osd)"},
+ {0x8815, "Create collection (osd)"},
+ {0x8816, "Remove collection (osd)"},
+ {0x8817, "List collection (osd)"},
+ {0x8818, "Set key (osd)"},
+ {0x8819, "Set master key (osd)"},
+ {0x881a, "Flush collection (osd)"},
+ {0x881b, "Flush partition (osd)"},
+ {0x881c, "Flush OSD"},
+ {0x8f7e, "Perform SCSI command (osd)"},
+ {0x8f7f, "Perform task management function (osd)"},
+};
+#define VARIABLE_LENGTH_SZ ARRAY_SIZE(variable_length_arr)
+
+static struct sa_name_list sa_names_arr[] = {
+ {VARIABLE_LENGTH_CMD, variable_length_arr, VARIABLE_LENGTH_SZ},
+ {MAINTENANCE_IN, maint_in_arr, MAINT_IN_SZ},
+ {MAINTENANCE_OUT, maint_out_arr, MAINT_OUT_SZ},
+ {PERSISTENT_RESERVE_IN, pr_in_arr, PR_IN_SZ},
+ {PERSISTENT_RESERVE_OUT, pr_out_arr, PR_OUT_SZ},
+ {SERVICE_ACTION_IN_12, serv_in12_arr, SERV_IN12_SZ},
+ {SERVICE_ACTION_OUT_12, serv_out12_arr, SERV_OUT12_SZ},
+ {SERVICE_ACTION_BIDIRECTIONAL, serv_bidi_arr, SERV_BIDI_SZ},
+ {SERVICE_ACTION_IN_16, serv_in16_arr, SERV_IN16_SZ},
+ {SERVICE_ACTION_OUT_16, serv_out16_arr, SERV_OUT16_SZ},
+ {THIRD_PARTY_COPY_IN, tpc_in_arr, TPC_IN_SZ},
+ {THIRD_PARTY_COPY_OUT, tpc_out_arr, TPC_OUT_SZ},
+ {0, NULL, 0},
+};
+
+bool scsi_opcode_sa_name(int opcode, int service_action,
+ const char **cdb_name, const char **sa_name)
+{
+ struct sa_name_list *sa_name_ptr;
+ const struct value_name_pair *arr = NULL;
+ int arr_sz, k;
+
+ *cdb_name = NULL;
+ if (opcode >= VENDOR_SPECIFIC_CDB)
+ return false;
+
+ if (opcode < ARRAY_SIZE(cdb_byte0_names))
+ *cdb_name = cdb_byte0_names[opcode];
+
+ for (sa_name_ptr = sa_names_arr; sa_name_ptr->arr; ++sa_name_ptr) {
+ if (sa_name_ptr->opcode == opcode) {
+ arr = sa_name_ptr->arr;
+ arr_sz = sa_name_ptr->arr_sz;
+ break;
+ }
+ }
+ if (!arr)
+ return false;
+
+ for (k = 0; k < arr_sz; ++k, ++arr) {
+ if (service_action == arr->value)
+ break;
+ }
+ if (k < arr_sz)
+ *sa_name = arr->name;
+
+ return true;
+}
+
+struct error_info {
+ unsigned short code12; /* 0x0302 looks better than 0x03,0x02 */
+ const char * text;
+};
+
+/*
+ * The canonical list of T10 Additional Sense Codes is available at:
+ * http://www.t10.org/lists/asc-num.txt [most recent: 20141221]
+ */
+
+static const struct error_info additional[] =
+{
+ {0x0000, "No additional sense information"},
+ {0x0001, "Filemark detected"},
+ {0x0002, "End-of-partition/medium detected"},
+ {0x0003, "Setmark detected"},
+ {0x0004, "Beginning-of-partition/medium detected"},
+ {0x0005, "End-of-data detected"},
+ {0x0006, "I/O process terminated"},
+ {0x0007, "Programmable early warning detected"},
+ {0x0011, "Audio play operation in progress"},
+ {0x0012, "Audio play operation paused"},
+ {0x0013, "Audio play operation successfully completed"},
+ {0x0014, "Audio play operation stopped due to error"},
+ {0x0015, "No current audio status to return"},
+ {0x0016, "Operation in progress"},
+ {0x0017, "Cleaning requested"},
+ {0x0018, "Erase operation in progress"},
+ {0x0019, "Locate operation in progress"},
+ {0x001A, "Rewind operation in progress"},
+ {0x001B, "Set capacity operation in progress"},
+ {0x001C, "Verify operation in progress"},
+ {0x001D, "ATA pass through information available"},
+ {0x001E, "Conflicting SA creation request"},
+ {0x001F, "Logical unit transitioning to another power condition"},
+ {0x0020, "Extended copy information available"},
+ {0x0021, "Atomic command aborted due to ACA"},
+
+ {0x0100, "No index/sector signal"},
+
+ {0x0200, "No seek complete"},
+
+ {0x0300, "Peripheral device write fault"},
+ {0x0301, "No write current"},
+ {0x0302, "Excessive write errors"},
+
+ {0x0400, "Logical unit not ready, cause not reportable"},
+ {0x0401, "Logical unit is in process of becoming ready"},
+ {0x0402, "Logical unit not ready, initializing command required"},
+ {0x0403, "Logical unit not ready, manual intervention required"},
+ {0x0404, "Logical unit not ready, format in progress"},
+ {0x0405, "Logical unit not ready, rebuild in progress"},
+ {0x0406, "Logical unit not ready, recalculation in progress"},
+ {0x0407, "Logical unit not ready, operation in progress"},
+ {0x0408, "Logical unit not ready, long write in progress"},
+ {0x0409, "Logical unit not ready, self-test in progress"},
+ {0x040A, "Logical unit not accessible, asymmetric access state "
+ "transition"},
+ {0x040B, "Logical unit not accessible, target port in standby state"},
+ {0x040C, "Logical unit not accessible, target port in unavailable "
+ "state"},
+ {0x040D, "Logical unit not ready, structure check required"},
+ {0x040E, "Logical unit not ready, security session in progress"},
+ {0x0410, "Logical unit not ready, auxiliary memory not accessible"},
+ {0x0411, "Logical unit not ready, notify (enable spinup) required"},
+ {0x0412, "Logical unit not ready, offline"},
+ {0x0413, "Logical unit not ready, SA creation in progress"},
+ {0x0414, "Logical unit not ready, space allocation in progress"},
+ {0x0415, "Logical unit not ready, robotics disabled"},
+ {0x0416, "Logical unit not ready, configuration required"},
+ {0x0417, "Logical unit not ready, calibration required"},
+ {0x0418, "Logical unit not ready, a door is open"},
+ {0x0419, "Logical unit not ready, operating in sequential mode"},
+ {0x041A, "Logical unit not ready, start stop unit command in "
+ "progress"},
+ {0x041B, "Logical unit not ready, sanitize in progress"},
+ {0x041C, "Logical unit not ready, additional power use not yet "
+ "granted"},
+ {0x041D, "Logical unit not ready, configuration in progress"},
+ {0x041E, "Logical unit not ready, microcode activation required"},
+ {0x041F, "Logical unit not ready, microcode download required"},
+ {0x0420, "Logical unit not ready, logical unit reset required"},
+ {0x0421, "Logical unit not ready, hard reset required"},
+ {0x0422, "Logical unit not ready, power cycle required"},
+
+ {0x0500, "Logical unit does not respond to selection"},
+
+ {0x0600, "No reference position found"},
+
+ {0x0700, "Multiple peripheral devices selected"},
+
+ {0x0800, "Logical unit communication failure"},
+ {0x0801, "Logical unit communication time-out"},
+ {0x0802, "Logical unit communication parity error"},
+ {0x0803, "Logical unit communication CRC error (Ultra-DMA/32)"},
+ {0x0804, "Unreachable copy target"},
+
+ {0x0900, "Track following error"},
+ {0x0901, "Tracking servo failure"},
+ {0x0902, "Focus servo failure"},
+ {0x0903, "Spindle servo failure"},
+ {0x0904, "Head select fault"},
+ {0x0905, "Vibration induced tracking error"},
+
+ {0x0A00, "Error log overflow"},
+
+ {0x0B00, "Warning"},
+ {0x0B01, "Warning - specified temperature exceeded"},
+ {0x0B02, "Warning - enclosure degraded"},
+ {0x0B03, "Warning - background self-test failed"},
+ {0x0B04, "Warning - background pre-scan detected medium error"},
+ {0x0B05, "Warning - background medium scan detected medium error"},
+ {0x0B06, "Warning - non-volatile cache now volatile"},
+ {0x0B07, "Warning - degraded power to non-volatile cache"},
+ {0x0B08, "Warning - power loss expected"},
+ {0x0B09, "Warning - device statistics notification active"},
+
+ {0x0C00, "Write error"},
+ {0x0C01, "Write error - recovered with auto reallocation"},
+ {0x0C02, "Write error - auto reallocation failed"},
+ {0x0C03, "Write error - recommend reassignment"},
+ {0x0C04, "Compression check miscompare error"},
+ {0x0C05, "Data expansion occurred during compression"},
+ {0x0C06, "Block not compressible"},
+ {0x0C07, "Write error - recovery needed"},
+ {0x0C08, "Write error - recovery failed"},
+ {0x0C09, "Write error - loss of streaming"},
+ {0x0C0A, "Write error - padding blocks added"},
+ {0x0C0B, "Auxiliary memory write error"},
+ {0x0C0C, "Write error - unexpected unsolicited data"},
+ {0x0C0D, "Write error - not enough unsolicited data"},
+ {0x0C0E, "Multiple write errors"},
+ {0x0C0F, "Defects in error window"},
+ {0x0C10, "Incomplete multiple atomic write operations"},
+
+ {0x0D00, "Error detected by third party temporary initiator"},
+ {0x0D01, "Third party device failure"},
+ {0x0D02, "Copy target device not reachable"},
+ {0x0D03, "Incorrect copy target device type"},
+ {0x0D04, "Copy target device data underrun"},
+ {0x0D05, "Copy target device data overrun"},
+
+ {0x0E00, "Invalid information unit"},
+ {0x0E01, "Information unit too short"},
+ {0x0E02, "Information unit too long"},
+ {0x0E03, "Invalid field in command information unit"},
+
+ {0x1000, "Id CRC or ECC error"},
+ {0x1001, "Logical block guard check failed"},
+ {0x1002, "Logical block application tag check failed"},
+ {0x1003, "Logical block reference tag check failed"},
+ {0x1004, "Logical block protection error on recover buffered data"},
+ {0x1005, "Logical block protection method error"},
+
+ {0x1100, "Unrecovered read error"},
+ {0x1101, "Read retries exhausted"},
+ {0x1102, "Error too long to correct"},
+ {0x1103, "Multiple read errors"},
+ {0x1104, "Unrecovered read error - auto reallocate failed"},
+ {0x1105, "L-EC uncorrectable error"},
+ {0x1106, "CIRC unrecovered error"},
+ {0x1107, "Data re-synchronization error"},
+ {0x1108, "Incomplete block read"},
+ {0x1109, "No gap found"},
+ {0x110A, "Miscorrected error"},
+ {0x110B, "Unrecovered read error - recommend reassignment"},
+ {0x110C, "Unrecovered read error - recommend rewrite the data"},
+ {0x110D, "De-compression CRC error"},
+ {0x110E, "Cannot decompress using declared algorithm"},
+ {0x110F, "Error reading UPC/EAN number"},
+ {0x1110, "Error reading ISRC number"},
+ {0x1111, "Read error - loss of streaming"},
+ {0x1112, "Auxiliary memory read error"},
+ {0x1113, "Read error - failed retransmission request"},
+ {0x1114, "Read error - lba marked bad by application client"},
+ {0x1115, "Write after sanitize required"},
+
+ {0x1200, "Address mark not found for id field"},
+
+ {0x1300, "Address mark not found for data field"},
+
+ {0x1400, "Recorded entity not found"},
+ {0x1401, "Record not found"},
+ {0x1402, "Filemark or setmark not found"},
+ {0x1403, "End-of-data not found"},
+ {0x1404, "Block sequence error"},
+ {0x1405, "Record not found - recommend reassignment"},
+ {0x1406, "Record not found - data auto-reallocated"},
+ {0x1407, "Locate operation failure"},
+
+ {0x1500, "Random positioning error"},
+ {0x1501, "Mechanical positioning error"},
+ {0x1502, "Positioning error detected by read of medium"},
+
+ {0x1600, "Data synchronization mark error"},
+ {0x1601, "Data sync error - data rewritten"},
+ {0x1602, "Data sync error - recommend rewrite"},
+ {0x1603, "Data sync error - data auto-reallocated"},
+ {0x1604, "Data sync error - recommend reassignment"},
+
+ {0x1700, "Recovered data with no error correction applied"},
+ {0x1701, "Recovered data with retries"},
+ {0x1702, "Recovered data with positive head offset"},
+ {0x1703, "Recovered data with negative head offset"},
+ {0x1704, "Recovered data with retries and/or circ applied"},
+ {0x1705, "Recovered data using previous sector id"},
+ {0x1706, "Recovered data without ECC - data auto-reallocated"},
+ {0x1707, "Recovered data without ECC - recommend reassignment"},
+ {0x1708, "Recovered data without ECC - recommend rewrite"},
+ {0x1709, "Recovered data without ECC - data rewritten"},
+
+ {0x1800, "Recovered data with error correction applied"},
+ {0x1801, "Recovered data with error corr. & retries applied"},
+ {0x1802, "Recovered data - data auto-reallocated"},
+ {0x1803, "Recovered data with CIRC"},
+ {0x1804, "Recovered data with L-EC"},
+ {0x1805, "Recovered data - recommend reassignment"},
+ {0x1806, "Recovered data - recommend rewrite"},
+ {0x1807, "Recovered data with ECC - data rewritten"},
+ {0x1808, "Recovered data with linking"},
+
+ {0x1900, "Defect list error"},
+ {0x1901, "Defect list not available"},
+ {0x1902, "Defect list error in primary list"},
+ {0x1903, "Defect list error in grown list"},
+
+ {0x1A00, "Parameter list length error"},
+
+ {0x1B00, "Synchronous data transfer error"},
+
+ {0x1C00, "Defect list not found"},
+ {0x1C01, "Primary defect list not found"},
+ {0x1C02, "Grown defect list not found"},
+
+ {0x1D00, "Miscompare during verify operation"},
+ {0x1D01, "Miscompare verify of unmapped LBA"},
+
+ {0x1E00, "Recovered id with ECC correction"},
+
+ {0x1F00, "Partial defect list transfer"},
+
+ {0x2000, "Invalid command operation code"},
+ {0x2001, "Access denied - initiator pending-enrolled"},
+ {0x2002, "Access denied - no access rights"},
+ {0x2003, "Access denied - invalid mgmt id key"},
+ {0x2004, "Illegal command while in write capable state"},
+ {0x2005, "Obsolete"},
+ {0x2006, "Illegal command while in explicit address mode"},
+ {0x2007, "Illegal command while in implicit address mode"},
+ {0x2008, "Access denied - enrollment conflict"},
+ {0x2009, "Access denied - invalid LU identifier"},
+ {0x200A, "Access denied - invalid proxy token"},
+ {0x200B, "Access denied - ACL LUN conflict"},
+ {0x200C, "Illegal command when not in append-only mode"},
+
+ {0x2100, "Logical block address out of range"},
+ {0x2101, "Invalid element address"},
+ {0x2102, "Invalid address for write"},
+ {0x2103, "Invalid write crossing layer jump"},
+ {0x2104, "Unaligned write command"},
+ {0x2105, "Write boundary violation"},
+ {0x2106, "Attempt to read invalid data"},
+ {0x2107, "Read boundary violation"},
+
+ {0x2200, "Illegal function (use 20 00, 24 00, or 26 00)"},
+
+ {0x2300, "Invalid token operation, cause not reportable"},
+ {0x2301, "Invalid token operation, unsupported token type"},
+ {0x2302, "Invalid token operation, remote token usage not supported"},
+ {0x2303, "Invalid token operation, remote rod token creation not "
+ "supported"},
+ {0x2304, "Invalid token operation, token unknown"},
+ {0x2305, "Invalid token operation, token corrupt"},
+ {0x2306, "Invalid token operation, token revoked"},
+ {0x2307, "Invalid token operation, token expired"},
+ {0x2308, "Invalid token operation, token cancelled"},
+ {0x2309, "Invalid token operation, token deleted"},
+ {0x230A, "Invalid token operation, invalid token length"},
+
+ {0x2400, "Invalid field in cdb"},
+ {0x2401, "CDB decryption error"},
+ {0x2402, "Obsolete"},
+ {0x2403, "Obsolete"},
+ {0x2404, "Security audit value frozen"},
+ {0x2405, "Security working key frozen"},
+ {0x2406, "Nonce not unique"},
+ {0x2407, "Nonce timestamp out of range"},
+ {0x2408, "Invalid XCDB"},
+
+ {0x2500, "Logical unit not supported"},
+
+ {0x2600, "Invalid field in parameter list"},
+ {0x2601, "Parameter not supported"},
+ {0x2602, "Parameter value invalid"},
+ {0x2603, "Threshold parameters not supported"},
+ {0x2604, "Invalid release of persistent reservation"},
+ {0x2605, "Data decryption error"},
+ {0x2606, "Too many target descriptors"},
+ {0x2607, "Unsupported target descriptor type code"},
+ {0x2608, "Too many segment descriptors"},
+ {0x2609, "Unsupported segment descriptor type code"},
+ {0x260A, "Unexpected inexact segment"},
+ {0x260B, "Inline data length exceeded"},
+ {0x260C, "Invalid operation for copy source or destination"},
+ {0x260D, "Copy segment granularity violation"},
+ {0x260E, "Invalid parameter while port is enabled"},
+ {0x260F, "Invalid data-out buffer integrity check value"},
+ {0x2610, "Data decryption key fail limit reached"},
+ {0x2611, "Incomplete key-associated data set"},
+ {0x2612, "Vendor specific key reference not found"},
+
+ {0x2700, "Write protected"},
+ {0x2701, "Hardware write protected"},
+ {0x2702, "Logical unit software write protected"},
+ {0x2703, "Associated write protect"},
+ {0x2704, "Persistent write protect"},
+ {0x2705, "Permanent write protect"},
+ {0x2706, "Conditional write protect"},
+ {0x2707, "Space allocation failed write protect"},
+ {0x2708, "Zone is read only"},
+
+ {0x2800, "Not ready to ready change, medium may have changed"},
+ {0x2801, "Import or export element accessed"},
+ {0x2802, "Format-layer may have changed"},
+ {0x2803, "Import/export element accessed, medium changed"},
+
+ {0x2900, "Power on, reset, or bus device reset occurred"},
+ {0x2901, "Power on occurred"},
+ {0x2902, "Scsi bus reset occurred"},
+ {0x2903, "Bus device reset function occurred"},
+ {0x2904, "Device internal reset"},
+ {0x2905, "Transceiver mode changed to single-ended"},
+ {0x2906, "Transceiver mode changed to lvd"},
+ {0x2907, "I_T nexus loss occurred"},
+
+ {0x2A00, "Parameters changed"},
+ {0x2A01, "Mode parameters changed"},
+ {0x2A02, "Log parameters changed"},
+ {0x2A03, "Reservations preempted"},
+ {0x2A04, "Reservations released"},
+ {0x2A05, "Registrations preempted"},
+ {0x2A06, "Asymmetric access state changed"},
+ {0x2A07, "Implicit asymmetric access state transition failed"},
+ {0x2A08, "Priority changed"},
+ {0x2A09, "Capacity data has changed"},
+ {0x2A0A, "Error history I_T nexus cleared"},
+ {0x2A0B, "Error history snapshot released"},
+ {0x2A0C, "Error recovery attributes have changed"},
+ {0x2A0D, "Data encryption capabilities changed"},
+ {0x2A10, "Timestamp changed"},
+ {0x2A11, "Data encryption parameters changed by another i_t nexus"},
+ {0x2A12, "Data encryption parameters changed by vendor specific "
+ "event"},
+ {0x2A13, "Data encryption key instance counter has changed"},
+ {0x2A14, "SA creation capabilities data has changed"},
+ {0x2A15, "Medium removal prevention preempted"},
+
+ {0x2B00, "Copy cannot execute since host cannot disconnect"},
+
+ {0x2C00, "Command sequence error"},
+ {0x2C01, "Too many windows specified"},
+ {0x2C02, "Invalid combination of windows specified"},
+ {0x2C03, "Current program area is not empty"},
+ {0x2C04, "Current program area is empty"},
+ {0x2C05, "Illegal power condition request"},
+ {0x2C06, "Persistent prevent conflict"},
+ {0x2C07, "Previous busy status"},
+ {0x2C08, "Previous task set full status"},
+ {0x2C09, "Previous reservation conflict status"},
+ {0x2C0A, "Partition or collection contains user objects"},
+ {0x2C0B, "Not reserved"},
+ {0x2C0C, "Orwrite generation does not match"},
+ {0x2C0D, "Reset write pointer not allowed"},
+ {0x2C0E, "Zone is offline"},
+
+ {0x2D00, "Overwrite error on update in place"},
+
+ {0x2E00, "Insufficient time for operation"},
+ {0x2E01, "Command timeout before processing"},
+ {0x2E02, "Command timeout during processing"},
+ {0x2E03, "Command timeout during processing due to error recovery"},
+
+ {0x2F00, "Commands cleared by another initiator"},
+ {0x2F01, "Commands cleared by power loss notification"},
+ {0x2F02, "Commands cleared by device server"},
+ {0x2F03, "Some commands cleared by queuing layer event"},
+
+ {0x3000, "Incompatible medium installed"},
+ {0x3001, "Cannot read medium - unknown format"},
+ {0x3002, "Cannot read medium - incompatible format"},
+ {0x3003, "Cleaning cartridge installed"},
+ {0x3004, "Cannot write medium - unknown format"},
+ {0x3005, "Cannot write medium - incompatible format"},
+ {0x3006, "Cannot format medium - incompatible medium"},
+ {0x3007, "Cleaning failure"},
+ {0x3008, "Cannot write - application code mismatch"},
+ {0x3009, "Current session not fixated for append"},
+ {0x300A, "Cleaning request rejected"},
+ {0x300C, "WORM medium - overwrite attempted"},
+ {0x300D, "WORM medium - integrity check"},
+ {0x3010, "Medium not formatted"},
+ {0x3011, "Incompatible volume type"},
+ {0x3012, "Incompatible volume qualifier"},
+ {0x3013, "Cleaning volume expired"},
+
+ {0x3100, "Medium format corrupted"},
+ {0x3101, "Format command failed"},
+ {0x3102, "Zoned formatting failed due to spare linking"},
+ {0x3103, "Sanitize command failed"},
+
+ {0x3200, "No defect spare location available"},
+ {0x3201, "Defect list update failure"},
+
+ {0x3300, "Tape length error"},
+
+ {0x3400, "Enclosure failure"},
+
+ {0x3500, "Enclosure services failure"},
+ {0x3501, "Unsupported enclosure function"},
+ {0x3502, "Enclosure services unavailable"},
+ {0x3503, "Enclosure services transfer failure"},
+ {0x3504, "Enclosure services transfer refused"},
+ {0x3505, "Enclosure services checksum error"},
+
+ {0x3600, "Ribbon, ink, or toner failure"},
+
+ {0x3700, "Rounded parameter"},
+
+ {0x3800, "Event status notification"},
+ {0x3802, "Esn - power management class event"},
+ {0x3804, "Esn - media class event"},
+ {0x3806, "Esn - device busy class event"},
+ {0x3807, "Thin Provisioning soft threshold reached"},
+
+ {0x3900, "Saving parameters not supported"},
+
+ {0x3A00, "Medium not present"},
+ {0x3A01, "Medium not present - tray closed"},
+ {0x3A02, "Medium not present - tray open"},
+ {0x3A03, "Medium not present - loadable"},
+ {0x3A04, "Medium not present - medium auxiliary memory accessible"},
+
+ {0x3B00, "Sequential positioning error"},
+ {0x3B01, "Tape position error at beginning-of-medium"},
+ {0x3B02, "Tape position error at end-of-medium"},
+ {0x3B03, "Tape or electronic vertical forms unit not ready"},
+ {0x3B04, "Slew failure"},
+ {0x3B05, "Paper jam"},
+ {0x3B06, "Failed to sense top-of-form"},
+ {0x3B07, "Failed to sense bottom-of-form"},
+ {0x3B08, "Reposition error"},
+ {0x3B09, "Read past end of medium"},
+ {0x3B0A, "Read past beginning of medium"},
+ {0x3B0B, "Position past end of medium"},
+ {0x3B0C, "Position past beginning of medium"},
+ {0x3B0D, "Medium destination element full"},
+ {0x3B0E, "Medium source element empty"},
+ {0x3B0F, "End of medium reached"},
+ {0x3B11, "Medium magazine not accessible"},
+ {0x3B12, "Medium magazine removed"},
+ {0x3B13, "Medium magazine inserted"},
+ {0x3B14, "Medium magazine locked"},
+ {0x3B15, "Medium magazine unlocked"},
+ {0x3B16, "Mechanical positioning or changer error"},
+ {0x3B17, "Read past end of user object"},
+ {0x3B18, "Element disabled"},
+ {0x3B19, "Element enabled"},
+ {0x3B1A, "Data transfer device removed"},
+ {0x3B1B, "Data transfer device inserted"},
+ {0x3B1C, "Too many logical objects on partition to support "
+ "operation"},
+
+ {0x3D00, "Invalid bits in identify message"},
+
+ {0x3E00, "Logical unit has not self-configured yet"},
+ {0x3E01, "Logical unit failure"},
+ {0x3E02, "Timeout on logical unit"},
+ {0x3E03, "Logical unit failed self-test"},
+ {0x3E04, "Logical unit unable to update self-test log"},
+
+ {0x3F00, "Target operating conditions have changed"},
+ {0x3F01, "Microcode has been changed"},
+ {0x3F02, "Changed operating definition"},
+ {0x3F03, "Inquiry data has changed"},
+ {0x3F04, "Component device attached"},
+ {0x3F05, "Device identifier changed"},
+ {0x3F06, "Redundancy group created or modified"},
+ {0x3F07, "Redundancy group deleted"},
+ {0x3F08, "Spare created or modified"},
+ {0x3F09, "Spare deleted"},
+ {0x3F0A, "Volume set created or modified"},
+ {0x3F0B, "Volume set deleted"},
+ {0x3F0C, "Volume set deassigned"},
+ {0x3F0D, "Volume set reassigned"},
+ {0x3F0E, "Reported luns data has changed"},
+ {0x3F0F, "Echo buffer overwritten"},
+ {0x3F10, "Medium loadable"},
+ {0x3F11, "Medium auxiliary memory accessible"},
+ {0x3F12, "iSCSI IP address added"},
+ {0x3F13, "iSCSI IP address removed"},
+ {0x3F14, "iSCSI IP address changed"},
+ {0x3F15, "Inspect referrals sense descriptors"},
+ {0x3F16, "Microcode has been changed without reset"},
+/*
+ * {0x40NN, "Ram failure"},
+ * {0x40NN, "Diagnostic failure on component nn"},
+ * {0x41NN, "Data path failure"},
+ * {0x42NN, "Power-on or self-test failure"},
+ */
+ {0x4300, "Message error"},
+
+ {0x4400, "Internal target failure"},
+ {0x4401, "Persistent reservation information lost"},
+ {0x4471, "ATA device failed set features"},
+
+ {0x4500, "Select or reselect failure"},
+
+ {0x4600, "Unsuccessful soft reset"},
+
+ {0x4700, "Scsi parity error"},
+ {0x4701, "Data phase CRC error detected"},
+ {0x4702, "Scsi parity error detected during st data phase"},
+ {0x4703, "Information unit iuCRC error detected"},
+ {0x4704, "Asynchronous information protection error detected"},
+ {0x4705, "Protocol service CRC error"},
+ {0x4706, "Phy test function in progress"},
+ {0x477f, "Some commands cleared by iSCSI Protocol event"},
+
+ {0x4800, "Initiator detected error message received"},
+
+ {0x4900, "Invalid message error"},
+
+ {0x4A00, "Command phase error"},
+
+ {0x4B00, "Data phase error"},
+ {0x4B01, "Invalid target port transfer tag received"},
+ {0x4B02, "Too much write data"},
+ {0x4B03, "Ack/nak timeout"},
+ {0x4B04, "Nak received"},
+ {0x4B05, "Data offset error"},
+ {0x4B06, "Initiator response timeout"},
+ {0x4B07, "Connection lost"},
+ {0x4B08, "Data-in buffer overflow - data buffer size"},
+ {0x4B09, "Data-in buffer overflow - data buffer descriptor area"},
+ {0x4B0A, "Data-in buffer error"},
+ {0x4B0B, "Data-out buffer overflow - data buffer size"},
+ {0x4B0C, "Data-out buffer overflow - data buffer descriptor area"},
+ {0x4B0D, "Data-out buffer error"},
+ {0x4B0E, "PCIe fabric error"},
+ {0x4B0F, "PCIe completion timeout"},
+ {0x4B10, "PCIe completer abort"},
+ {0x4B11, "PCIe poisoned tlp received"},
+ {0x4B12, "PCIe eCRC check failed"},
+ {0x4B13, "PCIe unsupported request"},
+ {0x4B14, "PCIe acs violation"},
+ {0x4B15, "PCIe tlp prefix blocked"},
+
+ {0x4C00, "Logical unit failed self-configuration"},
+/*
+ * {0x4DNN, "Tagged overlapped commands (nn = queue tag)"},
+ */
+ {0x4E00, "Overlapped commands attempted"},
+
+ {0x5000, "Write append error"},
+ {0x5001, "Write append position error"},
+ {0x5002, "Position error related to timing"},
+
+ {0x5100, "Erase failure"},
+ {0x5101, "Erase failure - incomplete erase operation detected"},
+
+ {0x5200, "Cartridge fault"},
+
+ {0x5300, "Media load or eject failed"},
+ {0x5301, "Unload tape failure"},
+ {0x5302, "Medium removal prevented"},
+ {0x5303, "Medium removal prevented by data transfer element"},
+ {0x5304, "Medium thread or unthread failure"},
+ {0x5305, "Volume identifier invalid"},
+ {0x5306, "Volume identifier missing"},
+ {0x5307, "Duplicate volume identifier"},
+ {0x5308, "Element status unknown"},
+ {0x5309, "Data transfer device error - load failed"},
+ {0x530a, "Data transfer device error - unload failed"},
+ {0x530b, "Data transfer device error - unload missing"},
+ {0x530c, "Data transfer device error - eject failed"},
+ {0x530d, "Data transfer device error - library communication failed"},
+
+ {0x5400, "Scsi to host system interface failure"},
+
+ {0x5500, "System resource failure"},
+ {0x5501, "System buffer full"},
+ {0x5502, "Insufficient reservation resources"},
+ {0x5503, "Insufficient resources"},
+ {0x5504, "Insufficient registration resources"},
+ {0x5505, "Insufficient access control resources"},
+ {0x5506, "Auxiliary memory out of space"},
+ {0x5507, "Quota error"},
+ {0x5508, "Maximum number of supplemental decryption keys exceeded"},
+ {0x5509, "Medium auxiliary memory not accessible"},
+ {0x550A, "Data currently unavailable"},
+ {0x550B, "Insufficient power for operation"},
+ {0x550C, "Insufficient resources to create rod"},
+ {0x550D, "Insufficient resources to create rod token"},
+ {0x550E, "Insufficient zone resources"},
+
+ {0x5700, "Unable to recover table-of-contents"},
+
+ {0x5800, "Generation does not exist"},
+
+ {0x5900, "Updated block read"},
+
+ {0x5A00, "Operator request or state change input"},
+ {0x5A01, "Operator medium removal request"},
+ {0x5A02, "Operator selected write protect"},
+ {0x5A03, "Operator selected write permit"},
+
+ {0x5B00, "Log exception"},
+ {0x5B01, "Threshold condition met"},
+ {0x5B02, "Log counter at maximum"},
+ {0x5B03, "Log list codes exhausted"},
+
+ {0x5C00, "Rpl status change"},
+ {0x5C01, "Spindles synchronized"},
+ {0x5C02, "Spindles not synchronized"},
+
+ {0x5D00, "Failure prediction threshold exceeded"},
+ {0x5D01, "Media failure prediction threshold exceeded"},
+ {0x5D02, "Logical unit failure prediction threshold exceeded"},
+ {0x5D03, "Spare area exhaustion prediction threshold exceeded"},
+ {0x5D10, "Hardware impending failure general hard drive failure"},
+ {0x5D11, "Hardware impending failure drive error rate too high"},
+ {0x5D12, "Hardware impending failure data error rate too high"},
+ {0x5D13, "Hardware impending failure seek error rate too high"},
+ {0x5D14, "Hardware impending failure too many block reassigns"},
+ {0x5D15, "Hardware impending failure access times too high"},
+ {0x5D16, "Hardware impending failure start unit times too high"},
+ {0x5D17, "Hardware impending failure channel parametrics"},
+ {0x5D18, "Hardware impending failure controller detected"},
+ {0x5D19, "Hardware impending failure throughput performance"},
+ {0x5D1A, "Hardware impending failure seek time performance"},
+ {0x5D1B, "Hardware impending failure spin-up retry count"},
+ {0x5D1C, "Hardware impending failure drive calibration retry count"},
+ {0x5D20, "Controller impending failure general hard drive failure"},
+ {0x5D21, "Controller impending failure drive error rate too high"},
+ {0x5D22, "Controller impending failure data error rate too high"},
+ {0x5D23, "Controller impending failure seek error rate too high"},
+ {0x5D24, "Controller impending failure too many block reassigns"},
+ {0x5D25, "Controller impending failure access times too high"},
+ {0x5D26, "Controller impending failure start unit times too high"},
+ {0x5D27, "Controller impending failure channel parametrics"},
+ {0x5D28, "Controller impending failure controller detected"},
+ {0x5D29, "Controller impending failure throughput performance"},
+ {0x5D2A, "Controller impending failure seek time performance"},
+ {0x5D2B, "Controller impending failure spin-up retry count"},
+ {0x5D2C, "Controller impending failure drive calibration retry count"},
+ {0x5D30, "Data channel impending failure general hard drive failure"},
+ {0x5D31, "Data channel impending failure drive error rate too high"},
+ {0x5D32, "Data channel impending failure data error rate too high"},
+ {0x5D33, "Data channel impending failure seek error rate too high"},
+ {0x5D34, "Data channel impending failure too many block reassigns"},
+ {0x5D35, "Data channel impending failure access times too high"},
+ {0x5D36, "Data channel impending failure start unit times too high"},
+ {0x5D37, "Data channel impending failure channel parametrics"},
+ {0x5D38, "Data channel impending failure controller detected"},
+ {0x5D39, "Data channel impending failure throughput performance"},
+ {0x5D3A, "Data channel impending failure seek time performance"},
+ {0x5D3B, "Data channel impending failure spin-up retry count"},
+ {0x5D3C, "Data channel impending failure drive calibration retry "
+ "count"},
+ {0x5D40, "Servo impending failure general hard drive failure"},
+ {0x5D41, "Servo impending failure drive error rate too high"},
+ {0x5D42, "Servo impending failure data error rate too high"},
+ {0x5D43, "Servo impending failure seek error rate too high"},
+ {0x5D44, "Servo impending failure too many block reassigns"},
+ {0x5D45, "Servo impending failure access times too high"},
+ {0x5D46, "Servo impending failure start unit times too high"},
+ {0x5D47, "Servo impending failure channel parametrics"},
+ {0x5D48, "Servo impending failure controller detected"},
+ {0x5D49, "Servo impending failure throughput performance"},
+ {0x5D4A, "Servo impending failure seek time performance"},
+ {0x5D4B, "Servo impending failure spin-up retry count"},
+ {0x5D4C, "Servo impending failure drive calibration retry count"},
+ {0x5D50, "Spindle impending failure general hard drive failure"},
+ {0x5D51, "Spindle impending failure drive error rate too high"},
+ {0x5D52, "Spindle impending failure data error rate too high"},
+ {0x5D53, "Spindle impending failure seek error rate too high"},
+ {0x5D54, "Spindle impending failure too many block reassigns"},
+ {0x5D55, "Spindle impending failure access times too high"},
+ {0x5D56, "Spindle impending failure start unit times too high"},
+ {0x5D57, "Spindle impending failure channel parametrics"},
+ {0x5D58, "Spindle impending failure controller detected"},
+ {0x5D59, "Spindle impending failure throughput performance"},
+ {0x5D5A, "Spindle impending failure seek time performance"},
+ {0x5D5B, "Spindle impending failure spin-up retry count"},
+ {0x5D5C, "Spindle impending failure drive calibration retry count"},
+ {0x5D60, "Firmware impending failure general hard drive failure"},
+ {0x5D61, "Firmware impending failure drive error rate too high"},
+ {0x5D62, "Firmware impending failure data error rate too high"},
+ {0x5D63, "Firmware impending failure seek error rate too high"},
+ {0x5D64, "Firmware impending failure too many block reassigns"},
+ {0x5D65, "Firmware impending failure access times too high"},
+ {0x5D66, "Firmware impending failure start unit times too high"},
+ {0x5D67, "Firmware impending failure channel parametrics"},
+ {0x5D68, "Firmware impending failure controller detected"},
+ {0x5D69, "Firmware impending failure throughput performance"},
+ {0x5D6A, "Firmware impending failure seek time performance"},
+ {0x5D6B, "Firmware impending failure spin-up retry count"},
+ {0x5D6C, "Firmware impending failure drive calibration retry count"},
+ {0x5DFF, "Failure prediction threshold exceeded (false)"},
+
+ {0x5E00, "Low power condition on"},
+ {0x5E01, "Idle condition activated by timer"},
+ {0x5E02, "Standby condition activated by timer"},
+ {0x5E03, "Idle condition activated by command"},
+ {0x5E04, "Standby condition activated by command"},
+ {0x5E05, "Idle_b condition activated by timer"},
+ {0x5E06, "Idle_b condition activated by command"},
+ {0x5E07, "Idle_c condition activated by timer"},
+ {0x5E08, "Idle_c condition activated by command"},
+ {0x5E09, "Standby_y condition activated by timer"},
+ {0x5E0A, "Standby_y condition activated by command"},
+ {0x5E41, "Power state change to active"},
+ {0x5E42, "Power state change to idle"},
+ {0x5E43, "Power state change to standby"},
+ {0x5E45, "Power state change to sleep"},
+ {0x5E47, "Power state change to device control"},
+
+ {0x6000, "Lamp failure"},
+
+ {0x6100, "Video acquisition error"},
+ {0x6101, "Unable to acquire video"},
+ {0x6102, "Out of focus"},
+
+ {0x6200, "Scan head positioning error"},
+
+ {0x6300, "End of user area encountered on this track"},
+ {0x6301, "Packet does not fit in available space"},
+
+ {0x6400, "Illegal mode for this track"},
+ {0x6401, "Invalid packet size"},
+
+ {0x6500, "Voltage fault"},
+
+ {0x6600, "Automatic document feeder cover up"},
+ {0x6601, "Automatic document feeder lift up"},
+ {0x6602, "Document jam in automatic document feeder"},
+ {0x6603, "Document miss feed automatic in document feeder"},
+
+ {0x6700, "Configuration failure"},
+ {0x6701, "Configuration of incapable logical units failed"},
+ {0x6702, "Add logical unit failed"},
+ {0x6703, "Modification of logical unit failed"},
+ {0x6704, "Exchange of logical unit failed"},
+ {0x6705, "Remove of logical unit failed"},
+ {0x6706, "Attachment of logical unit failed"},
+ {0x6707, "Creation of logical unit failed"},
+ {0x6708, "Assign failure occurred"},
+ {0x6709, "Multiply assigned logical unit"},
+ {0x670A, "Set target port groups command failed"},
+ {0x670B, "ATA device feature not enabled"},
+
+ {0x6800, "Logical unit not configured"},
+ {0x6801, "Subsidiary logical unit not configured"},
+
+ {0x6900, "Data loss on logical unit"},
+ {0x6901, "Multiple logical unit failures"},
+ {0x6902, "Parity/data mismatch"},
+
+ {0x6A00, "Informational, refer to log"},
+
+ {0x6B00, "State change has occurred"},
+ {0x6B01, "Redundancy level got better"},
+ {0x6B02, "Redundancy level got worse"},
+
+ {0x6C00, "Rebuild failure occurred"},
+
+ {0x6D00, "Recalculate failure occurred"},
+
+ {0x6E00, "Command to logical unit failed"},
+
+ {0x6F00, "Copy protection key exchange failure - authentication "
+ "failure"},
+ {0x6F01, "Copy protection key exchange failure - key not present"},
+ {0x6F02, "Copy protection key exchange failure - key not established"},
+ {0x6F03, "Read of scrambled sector without authentication"},
+ {0x6F04, "Media region code is mismatched to logical unit region"},
+ {0x6F05, "Drive region must be permanent/region reset count error"},
+ {0x6F06, "Insufficient block count for binding nonce recording"},
+ {0x6F07, "Conflict in binding nonce recording"},
+/*
+ * {0x70NN, "Decompression exception short algorithm id of nn"},
+ */
+ {0x7100, "Decompression exception long algorithm id"},
+
+ {0x7200, "Session fixation error"},
+ {0x7201, "Session fixation error writing lead-in"},
+ {0x7202, "Session fixation error writing lead-out"},
+ {0x7203, "Session fixation error - incomplete track in session"},
+ {0x7204, "Empty or partially written reserved track"},
+ {0x7205, "No more track reservations allowed"},
+ {0x7206, "RMZ extension is not allowed"},
+ {0x7207, "No more test zone extensions are allowed"},
+
+ {0x7300, "Cd control error"},
+ {0x7301, "Power calibration area almost full"},
+ {0x7302, "Power calibration area is full"},
+ {0x7303, "Power calibration area error"},
+ {0x7304, "Program memory area update failure"},
+ {0x7305, "Program memory area is full"},
+ {0x7306, "RMA/PMA is almost full"},
+ {0x7310, "Current power calibration area almost full"},
+ {0x7311, "Current power calibration area is full"},
+ {0x7317, "RDZ is full"},
+
+ {0x7400, "Security error"},
+ {0x7401, "Unable to decrypt data"},
+ {0x7402, "Unencrypted data encountered while decrypting"},
+ {0x7403, "Incorrect data encryption key"},
+ {0x7404, "Cryptographic integrity validation failed"},
+ {0x7405, "Error decrypting data"},
+ {0x7406, "Unknown signature verification key"},
+ {0x7407, "Encryption parameters not useable"},
+ {0x7408, "Digital signature validation failure"},
+ {0x7409, "Encryption mode mismatch on read"},
+ {0x740A, "Encrypted block not raw read enabled"},
+ {0x740B, "Incorrect Encryption parameters"},
+ {0x740C, "Unable to decrypt parameter list"},
+ {0x740D, "Encryption algorithm disabled"},
+ {0x7410, "SA creation parameter value invalid"},
+ {0x7411, "SA creation parameter value rejected"},
+ {0x7412, "Invalid SA usage"},
+ {0x7421, "Data Encryption configuration prevented"},
+ {0x7430, "SA creation parameter not supported"},
+ {0x7440, "Authentication failed"},
+ {0x7461, "External data encryption key manager access error"},
+ {0x7462, "External data encryption key manager error"},
+ {0x7463, "External data encryption key not found"},
+ {0x7464, "External data encryption request not authorized"},
+ {0x746E, "External data encryption control timeout"},
+ {0x746F, "External data encryption control error"},
+ {0x7471, "Logical unit access not authorized"},
+ {0x7479, "Security conflict in translated device"},
+
+ {0, NULL}
+};
+
+struct error_info2 {
+ unsigned char code1, code2_min, code2_max;
+ const char * str;
+ const char * fmt;
+};
+
+static const struct error_info2 additional2[] =
+{
+ {0x40, 0x00, 0x7f, "Ram failure", ""},
+ {0x40, 0x80, 0xff, "Diagnostic failure on component", ""},
+ {0x41, 0x00, 0xff, "Data path failure", ""},
+ {0x42, 0x00, 0xff, "Power-on or self-test failure", ""},
+ {0x4D, 0x00, 0xff, "Tagged overlapped commands", "task tag "},
+ {0x70, 0x00, 0xff, "Decompression exception", "short algorithm id of "},
+ {0, 0, 0, NULL, NULL}
+};
+
+/* description of the sense key values */
+static const char * const snstext[] = {
+ "No Sense", /* 0: There is no sense information */
+ "Recovered Error", /* 1: The last command completed successfully
+ but used error correction */
+ "Not Ready", /* 2: The addressed target is not ready */
+ "Medium Error", /* 3: Data error detected on the medium */
+ "Hardware Error", /* 4: Controller or device failure */
+ "Illegal Request", /* 5: Error in request */
+ "Unit Attention", /* 6: Removable medium was changed, or
+ the target has been reset, or ... */
+ "Data Protect", /* 7: Access to the data is blocked */
+ "Blank Check", /* 8: Reached unexpected written or unwritten
+ region of the medium */
+ "Vendor Specific(9)",
+ "Copy Aborted", /* A: COPY or COMPARE was aborted */
+ "Aborted Command", /* B: The target aborted the command */
+ "Equal", /* C: A SEARCH DATA command found data equal,
+ reserved in SPC-4 rev 36 */
+ "Volume Overflow", /* D: Medium full with still data to be written */
+ "Miscompare", /* E: Source data and data on the medium
+ do not agree */
+ "Completed", /* F: command completed sense data reported,
+ may occur for successful command */
+};
+
+/* Get sense key string or NULL if not available */
+const char *
+scsi_sense_key_string(unsigned char key) {
+ if (key <= 0xE)
+ return snstext[key];
+ return NULL;
+}
+EXPORT_SYMBOL(scsi_sense_key_string);
+
+/*
+ * Get additional sense code string or NULL if not available.
+ * This string may contain a "%x" and should be printed with ascq as arg.
+ */
+const char *
+scsi_extd_sense_format(unsigned char asc, unsigned char ascq, const char **fmt)
+{
+ int i;
+ unsigned short code = ((asc << 8) | ascq);
+
+ *fmt = NULL;
+ for (i = 0; additional[i].text; i++)
+ if (additional[i].code12 == code)
+ return additional[i].text;
+ for (i = 0; additional2[i].fmt; i++) {
+ if (additional2[i].code1 == asc &&
+ ascq >= additional2[i].code2_min &&
+ ascq <= additional2[i].code2_max) {
+ *fmt = additional2[i].fmt;
+ return additional2[i].str;
+ }
+ }
+ return NULL;
+}
+EXPORT_SYMBOL(scsi_extd_sense_format);
+
+static const char * const hostbyte_table[]={
+"DID_OK", "DID_NO_CONNECT", "DID_BUS_BUSY", "DID_TIME_OUT", "DID_BAD_TARGET",
+"DID_ABORT", "DID_PARITY", "DID_ERROR", "DID_RESET", "DID_BAD_INTR",
+"DID_PASSTHROUGH", "DID_SOFT_ERROR", "DID_IMM_RETRY", "DID_REQUEUE",
+"DID_TRANSPORT_DISRUPTED", "DID_TRANSPORT_FAILFAST", "DID_TARGET_FAILURE",
+"DID_NEXUS_FAILURE" };
+
+static const char * const driverbyte_table[]={
+"DRIVER_OK", "DRIVER_BUSY", "DRIVER_SOFT", "DRIVER_MEDIA", "DRIVER_ERROR",
+"DRIVER_INVALID", "DRIVER_TIMEOUT", "DRIVER_HARD", "DRIVER_SENSE"};
+
+const char *scsi_hostbyte_string(int result)
+{
+ const char *hb_string = NULL;
+ int hb = host_byte(result);
+
+ if (hb < ARRAY_SIZE(hostbyte_table))
+ hb_string = hostbyte_table[hb];
+ return hb_string;
+}
+EXPORT_SYMBOL(scsi_hostbyte_string);
+
+const char *scsi_driverbyte_string(int result)
+{
+ const char *db_string = NULL;
+ int db = driver_byte(result);
+
+ if (db < ARRAY_SIZE(driverbyte_table))
+ db_string = driverbyte_table[db];
+ return db_string;
+}
+EXPORT_SYMBOL(scsi_driverbyte_string);
+
+#define scsi_mlreturn_name(result) { result, #result }
+static const struct value_name_pair scsi_mlreturn_arr[] = {
+ scsi_mlreturn_name(NEEDS_RETRY),
+ scsi_mlreturn_name(SUCCESS),
+ scsi_mlreturn_name(FAILED),
+ scsi_mlreturn_name(QUEUED),
+ scsi_mlreturn_name(SOFT_ERROR),
+ scsi_mlreturn_name(ADD_TO_MLQUEUE),
+ scsi_mlreturn_name(TIMEOUT_ERROR),
+ scsi_mlreturn_name(SCSI_RETURN_NOT_HANDLED),
+ scsi_mlreturn_name(FAST_IO_FAIL)
+};
+
+const char *scsi_mlreturn_string(int result)
+{
+ const struct value_name_pair *arr = scsi_mlreturn_arr;
+ int k;
+
+ for (k = 0; k < ARRAY_SIZE(scsi_mlreturn_arr); ++k, ++arr) {
+ if (result == arr->value)
+ return arr->name;
+ }
+ return NULL;
+}
+EXPORT_SYMBOL(scsi_mlreturn_string);
diff --git a/drivers/scsi/csiostor/Kconfig b/drivers/scsi/csiostor/Kconfig
new file mode 100644
index 000000000..7c7e50859
--- /dev/null
+++ b/drivers/scsi/csiostor/Kconfig
@@ -0,0 +1,19 @@
+config SCSI_CHELSIO_FCOE
+ tristate "Chelsio Communications FCoE support"
+ depends on PCI && SCSI
+ depends on SCSI_FC_ATTRS
+ select FW_LOADER
+ help
+ This driver supports FCoE Offload functionality over
+ Chelsio T4-based 10Gb Converged Network Adapters.
+
+ For general information about Chelsio and our products, visit
+ our website at <http://www.chelsio.com>.
+
+ For customer support, please visit our customer support page at
+ <http://www.chelsio.com/support.html>.
+
+ Please send feedback to <linux-bugs@chelsio.com>.
+
+ To compile this driver as a module choose M here; the module
+ will be called csiostor.
diff --git a/drivers/scsi/csiostor/Makefile b/drivers/scsi/csiostor/Makefile
new file mode 100644
index 000000000..3681a3fbd
--- /dev/null
+++ b/drivers/scsi/csiostor/Makefile
@@ -0,0 +1,12 @@
+#
+## Chelsio FCoE driver
+#
+##
+
+ccflags-y += -I$(srctree)/drivers/net/ethernet/chelsio/cxgb4
+
+obj-$(CONFIG_SCSI_CHELSIO_FCOE) += csiostor.o
+
+csiostor-objs := csio_attr.o csio_init.o csio_lnode.o csio_scsi.o \
+ csio_hw.o csio_hw_t5.o csio_isr.o \
+ csio_mb.o csio_rnode.o csio_wr.o
diff --git a/drivers/scsi/csiostor/csio_attr.c b/drivers/scsi/csiostor/csio_attr.c
new file mode 100644
index 000000000..2d1c4ebd4
--- /dev/null
+++ b/drivers/scsi/csiostor/csio_attr.c
@@ -0,0 +1,796 @@
+/*
+ * This file is part of the Chelsio FCoE driver for Linux.
+ *
+ * Copyright (c) 2008-2012 Chelsio Communications, Inc. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include <linux/kernel.h>
+#include <linux/string.h>
+#include <linux/delay.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/pci.h>
+#include <linux/mm.h>
+#include <linux/jiffies.h>
+#include <scsi/fc/fc_fs.h>
+
+#include "csio_init.h"
+
+static void
+csio_vport_set_state(struct csio_lnode *ln);
+
+/*
+ * csio_reg_rnode - Register a remote port with FC transport.
+ * @rn: Rnode representing remote port.
+ *
+ * Call fc_remote_port_add() to register this remote port with FC transport.
+ * If remote port is Initiator OR Target OR both, change the role appropriately.
+ *
+ */
+void
+csio_reg_rnode(struct csio_rnode *rn)
+{
+ struct csio_lnode *ln = csio_rnode_to_lnode(rn);
+ struct Scsi_Host *shost = csio_ln_to_shost(ln);
+ struct fc_rport_identifiers ids;
+ struct fc_rport *rport;
+ struct csio_service_parms *sp;
+
+ ids.node_name = wwn_to_u64(csio_rn_wwnn(rn));
+ ids.port_name = wwn_to_u64(csio_rn_wwpn(rn));
+ ids.port_id = rn->nport_id;
+ ids.roles = FC_RPORT_ROLE_UNKNOWN;
+
+ if (rn->role & CSIO_RNFR_INITIATOR || rn->role & CSIO_RNFR_TARGET) {
+ rport = rn->rport;
+ CSIO_ASSERT(rport != NULL);
+ goto update_role;
+ }
+
+ rn->rport = fc_remote_port_add(shost, 0, &ids);
+ if (!rn->rport) {
+ csio_ln_err(ln, "Failed to register rport = 0x%x.\n",
+ rn->nport_id);
+ return;
+ }
+
+ ln->num_reg_rnodes++;
+ rport = rn->rport;
+ spin_lock_irq(shost->host_lock);
+ *((struct csio_rnode **)rport->dd_data) = rn;
+ spin_unlock_irq(shost->host_lock);
+
+ sp = &rn->rn_sparm;
+ rport->maxframe_size = ntohs(sp->csp.sp_bb_data);
+ if (ntohs(sp->clsp[2].cp_class) & FC_CPC_VALID)
+ rport->supported_classes = FC_COS_CLASS3;
+ else
+ rport->supported_classes = FC_COS_UNSPECIFIED;
+update_role:
+ if (rn->role & CSIO_RNFR_INITIATOR)
+ ids.roles |= FC_RPORT_ROLE_FCP_INITIATOR;
+ if (rn->role & CSIO_RNFR_TARGET)
+ ids.roles |= FC_RPORT_ROLE_FCP_TARGET;
+
+ if (ids.roles != FC_RPORT_ROLE_UNKNOWN)
+ fc_remote_port_rolechg(rport, ids.roles);
+
+ rn->scsi_id = rport->scsi_target_id;
+
+ csio_ln_dbg(ln, "Remote port x%x role 0x%x registered\n",
+ rn->nport_id, ids.roles);
+}
+
+/*
+ * csio_unreg_rnode - Unregister a remote port with FC transport.
+ * @rn: Rnode representing remote port.
+ *
+ * Call fc_remote_port_delete() to unregister this remote port with FC
+ * transport.
+ *
+ */
+void
+csio_unreg_rnode(struct csio_rnode *rn)
+{
+ struct csio_lnode *ln = csio_rnode_to_lnode(rn);
+ struct fc_rport *rport = rn->rport;
+
+ rn->role &= ~(CSIO_RNFR_INITIATOR | CSIO_RNFR_TARGET);
+ fc_remote_port_delete(rport);
+ ln->num_reg_rnodes--;
+
+ csio_ln_dbg(ln, "Remote port x%x un-registered\n", rn->nport_id);
+}
+
+/*
+ * csio_lnode_async_event - Async events from local port.
+ * @ln: lnode representing local port.
+ *
+ * Async events from local node that FC transport/SCSI ML
+ * should be made aware of (Eg: RSCN).
+ */
+void
+csio_lnode_async_event(struct csio_lnode *ln, enum csio_ln_fc_evt fc_evt)
+{
+ switch (fc_evt) {
+ case CSIO_LN_FC_RSCN:
+ /* Get payload of rscn from ln */
+ /* For each RSCN entry */
+ /*
+ * fc_host_post_event(shost,
+ * fc_get_event_number(),
+ * FCH_EVT_RSCN,
+ * rscn_entry);
+ */
+ break;
+ case CSIO_LN_FC_LINKUP:
+ /* send fc_host_post_event */
+ /* set vport state */
+ if (csio_is_npiv_ln(ln))
+ csio_vport_set_state(ln);
+
+ break;
+ case CSIO_LN_FC_LINKDOWN:
+ /* send fc_host_post_event */
+ /* set vport state */
+ if (csio_is_npiv_ln(ln))
+ csio_vport_set_state(ln);
+
+ break;
+ case CSIO_LN_FC_ATTRIB_UPDATE:
+ csio_fchost_attr_init(ln);
+ break;
+ default:
+ break;
+ }
+}
+
+/*
+ * csio_fchost_attr_init - Initialize FC transport attributes
+ * @ln: Lnode.
+ *
+ */
+void
+csio_fchost_attr_init(struct csio_lnode *ln)
+{
+ struct Scsi_Host *shost = csio_ln_to_shost(ln);
+
+ fc_host_node_name(shost) = wwn_to_u64(csio_ln_wwnn(ln));
+ fc_host_port_name(shost) = wwn_to_u64(csio_ln_wwpn(ln));
+
+ fc_host_supported_classes(shost) = FC_COS_CLASS3;
+ fc_host_max_npiv_vports(shost) =
+ (csio_lnode_to_hw(ln))->fres_info.max_vnps;
+ fc_host_supported_speeds(shost) = FC_PORTSPEED_10GBIT |
+ FC_PORTSPEED_1GBIT;
+
+ fc_host_maxframe_size(shost) = ntohs(ln->ln_sparm.csp.sp_bb_data);
+ memset(fc_host_supported_fc4s(shost), 0,
+ sizeof(fc_host_supported_fc4s(shost)));
+ fc_host_supported_fc4s(shost)[7] = 1;
+
+ memset(fc_host_active_fc4s(shost), 0,
+ sizeof(fc_host_active_fc4s(shost)));
+ fc_host_active_fc4s(shost)[7] = 1;
+}
+
+/*
+ * csio_get_host_port_id - sysfs entries for nport_id is
+ * populated/cached from this function
+ */
+static void
+csio_get_host_port_id(struct Scsi_Host *shost)
+{
+ struct csio_lnode *ln = shost_priv(shost);
+ struct csio_hw *hw = csio_lnode_to_hw(ln);
+
+ spin_lock_irq(&hw->lock);
+ fc_host_port_id(shost) = ln->nport_id;
+ spin_unlock_irq(&hw->lock);
+}
+
+/*
+ * csio_get_port_type - Return FC local port type.
+ * @shost: scsi host.
+ *
+ */
+static void
+csio_get_host_port_type(struct Scsi_Host *shost)
+{
+ struct csio_lnode *ln = shost_priv(shost);
+ struct csio_hw *hw = csio_lnode_to_hw(ln);
+
+ spin_lock_irq(&hw->lock);
+ if (csio_is_npiv_ln(ln))
+ fc_host_port_type(shost) = FC_PORTTYPE_NPIV;
+ else
+ fc_host_port_type(shost) = FC_PORTTYPE_NPORT;
+ spin_unlock_irq(&hw->lock);
+}
+
+/*
+ * csio_get_port_state - Return FC local port state.
+ * @shost: scsi host.
+ *
+ */
+static void
+csio_get_host_port_state(struct Scsi_Host *shost)
+{
+ struct csio_lnode *ln = shost_priv(shost);
+ struct csio_hw *hw = csio_lnode_to_hw(ln);
+ char state[16];
+
+ spin_lock_irq(&hw->lock);
+
+ csio_lnode_state_to_str(ln, state);
+ if (!strcmp(state, "READY"))
+ fc_host_port_state(shost) = FC_PORTSTATE_ONLINE;
+ else if (!strcmp(state, "OFFLINE"))
+ fc_host_port_state(shost) = FC_PORTSTATE_LINKDOWN;
+ else
+ fc_host_port_state(shost) = FC_PORTSTATE_UNKNOWN;
+
+ spin_unlock_irq(&hw->lock);
+}
+
+/*
+ * csio_get_host_speed - Return link speed to FC transport.
+ * @shost: scsi host.
+ *
+ */
+static void
+csio_get_host_speed(struct Scsi_Host *shost)
+{
+ struct csio_lnode *ln = shost_priv(shost);
+ struct csio_hw *hw = csio_lnode_to_hw(ln);
+
+ spin_lock_irq(&hw->lock);
+ switch (hw->pport[ln->portid].link_speed) {
+ case FW_PORT_CAP_SPEED_1G:
+ fc_host_speed(shost) = FC_PORTSPEED_1GBIT;
+ break;
+ case FW_PORT_CAP_SPEED_10G:
+ fc_host_speed(shost) = FC_PORTSPEED_10GBIT;
+ break;
+ default:
+ fc_host_speed(shost) = FC_PORTSPEED_UNKNOWN;
+ break;
+ }
+ spin_unlock_irq(&hw->lock);
+}
+
+/*
+ * csio_get_host_fabric_name - Return fabric name
+ * @shost: scsi host.
+ *
+ */
+static void
+csio_get_host_fabric_name(struct Scsi_Host *shost)
+{
+ struct csio_lnode *ln = shost_priv(shost);
+ struct csio_rnode *rn = NULL;
+ struct csio_hw *hw = csio_lnode_to_hw(ln);
+
+ spin_lock_irq(&hw->lock);
+ rn = csio_rnode_lookup_portid(ln, FC_FID_FLOGI);
+ if (rn)
+ fc_host_fabric_name(shost) = wwn_to_u64(csio_rn_wwnn(rn));
+ else
+ fc_host_fabric_name(shost) = 0;
+ spin_unlock_irq(&hw->lock);
+}
+
+/*
+ * csio_get_host_speed - Return FC transport statistics.
+ * @ln: Lnode.
+ *
+ */
+static struct fc_host_statistics *
+csio_get_stats(struct Scsi_Host *shost)
+{
+ struct csio_lnode *ln = shost_priv(shost);
+ struct csio_hw *hw = csio_lnode_to_hw(ln);
+ struct fc_host_statistics *fhs = &ln->fch_stats;
+ struct fw_fcoe_port_stats fcoe_port_stats;
+ uint64_t seconds;
+
+ memset(&fcoe_port_stats, 0, sizeof(struct fw_fcoe_port_stats));
+ csio_get_phy_port_stats(hw, ln->portid, &fcoe_port_stats);
+
+ fhs->tx_frames += (be64_to_cpu(fcoe_port_stats.tx_bcast_frames) +
+ be64_to_cpu(fcoe_port_stats.tx_mcast_frames) +
+ be64_to_cpu(fcoe_port_stats.tx_ucast_frames) +
+ be64_to_cpu(fcoe_port_stats.tx_offload_frames));
+ fhs->tx_words += (be64_to_cpu(fcoe_port_stats.tx_bcast_bytes) +
+ be64_to_cpu(fcoe_port_stats.tx_mcast_bytes) +
+ be64_to_cpu(fcoe_port_stats.tx_ucast_bytes) +
+ be64_to_cpu(fcoe_port_stats.tx_offload_bytes)) /
+ CSIO_WORD_TO_BYTE;
+ fhs->rx_frames += (be64_to_cpu(fcoe_port_stats.rx_bcast_frames) +
+ be64_to_cpu(fcoe_port_stats.rx_mcast_frames) +
+ be64_to_cpu(fcoe_port_stats.rx_ucast_frames));
+ fhs->rx_words += (be64_to_cpu(fcoe_port_stats.rx_bcast_bytes) +
+ be64_to_cpu(fcoe_port_stats.rx_mcast_bytes) +
+ be64_to_cpu(fcoe_port_stats.rx_ucast_bytes)) /
+ CSIO_WORD_TO_BYTE;
+ fhs->error_frames += be64_to_cpu(fcoe_port_stats.rx_err_frames);
+ fhs->fcp_input_requests += ln->stats.n_input_requests;
+ fhs->fcp_output_requests += ln->stats.n_output_requests;
+ fhs->fcp_control_requests += ln->stats.n_control_requests;
+ fhs->fcp_input_megabytes += ln->stats.n_input_bytes >> 20;
+ fhs->fcp_output_megabytes += ln->stats.n_output_bytes >> 20;
+ fhs->link_failure_count = ln->stats.n_link_down;
+ /* Reset stats for the device */
+ seconds = jiffies_to_msecs(jiffies) - hw->stats.n_reset_start;
+ do_div(seconds, 1000);
+ fhs->seconds_since_last_reset = seconds;
+
+ return fhs;
+}
+
+/*
+ * csio_set_rport_loss_tmo - Set the rport dev loss timeout
+ * @rport: fc rport.
+ * @timeout: new value for dev loss tmo.
+ *
+ * If timeout is non zero set the dev_loss_tmo to timeout, else set
+ * dev_loss_tmo to one.
+ */
+static void
+csio_set_rport_loss_tmo(struct fc_rport *rport, uint32_t timeout)
+{
+ if (timeout)
+ rport->dev_loss_tmo = timeout;
+ else
+ rport->dev_loss_tmo = 1;
+}
+
+static void
+csio_vport_set_state(struct csio_lnode *ln)
+{
+ struct fc_vport *fc_vport = ln->fc_vport;
+ struct csio_lnode *pln = ln->pln;
+ char state[16];
+
+ /* Set fc vport state based on phyiscal lnode */
+ csio_lnode_state_to_str(pln, state);
+ if (strcmp(state, "READY")) {
+ fc_vport_set_state(fc_vport, FC_VPORT_LINKDOWN);
+ return;
+ }
+
+ if (!(pln->flags & CSIO_LNF_NPIVSUPP)) {
+ fc_vport_set_state(fc_vport, FC_VPORT_NO_FABRIC_SUPP);
+ return;
+ }
+
+ /* Set fc vport state based on virtual lnode */
+ csio_lnode_state_to_str(ln, state);
+ if (strcmp(state, "READY")) {
+ fc_vport_set_state(fc_vport, FC_VPORT_LINKDOWN);
+ return;
+ }
+ fc_vport_set_state(fc_vport, FC_VPORT_ACTIVE);
+}
+
+static int
+csio_fcoe_alloc_vnp(struct csio_hw *hw, struct csio_lnode *ln)
+{
+ struct csio_lnode *pln;
+ struct csio_mb *mbp;
+ struct fw_fcoe_vnp_cmd *rsp;
+ int ret = 0;
+ int retry = 0;
+
+ /* Issue VNP cmd to alloc vport */
+ /* Allocate Mbox request */
+ spin_lock_irq(&hw->lock);
+ mbp = mempool_alloc(hw->mb_mempool, GFP_ATOMIC);
+ if (!mbp) {
+ CSIO_INC_STATS(hw, n_err_nomem);
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ pln = ln->pln;
+ ln->fcf_flowid = pln->fcf_flowid;
+ ln->portid = pln->portid;
+
+ csio_fcoe_vnp_alloc_init_mb(ln, mbp, CSIO_MB_DEFAULT_TMO,
+ pln->fcf_flowid, pln->vnp_flowid, 0,
+ csio_ln_wwnn(ln), csio_ln_wwpn(ln), NULL);
+
+ for (retry = 0; retry < 3; retry++) {
+ /* FW is expected to complete vnp cmd in immediate mode
+ * without much delay.
+ * Otherwise, there will be increase in IO latency since HW
+ * lock is held till completion of vnp mbox cmd.
+ */
+ ret = csio_mb_issue(hw, mbp);
+ if (ret != -EBUSY)
+ break;
+
+ /* Retry if mbox returns busy */
+ spin_unlock_irq(&hw->lock);
+ msleep(2000);
+ spin_lock_irq(&hw->lock);
+ }
+
+ if (ret) {
+ csio_ln_err(ln, "Failed to issue mbox FCoE VNP command\n");
+ goto out_free;
+ }
+
+ /* Process Mbox response of VNP command */
+ rsp = (struct fw_fcoe_vnp_cmd *)(mbp->mb);
+ if (FW_CMD_RETVAL_G(ntohl(rsp->alloc_to_len16)) != FW_SUCCESS) {
+ csio_ln_err(ln, "FCOE VNP ALLOC cmd returned 0x%x!\n",
+ FW_CMD_RETVAL_G(ntohl(rsp->alloc_to_len16)));
+ ret = -EINVAL;
+ goto out_free;
+ }
+
+ ln->vnp_flowid = FW_FCOE_VNP_CMD_VNPI_GET(
+ ntohl(rsp->gen_wwn_to_vnpi));
+ memcpy(csio_ln_wwnn(ln), rsp->vnport_wwnn, 8);
+ memcpy(csio_ln_wwpn(ln), rsp->vnport_wwpn, 8);
+
+ csio_ln_dbg(ln, "FCOE VNPI: 0x%x\n", ln->vnp_flowid);
+ csio_ln_dbg(ln, "\tWWNN: %x%x%x%x%x%x%x%x\n",
+ ln->ln_sparm.wwnn[0], ln->ln_sparm.wwnn[1],
+ ln->ln_sparm.wwnn[2], ln->ln_sparm.wwnn[3],
+ ln->ln_sparm.wwnn[4], ln->ln_sparm.wwnn[5],
+ ln->ln_sparm.wwnn[6], ln->ln_sparm.wwnn[7]);
+ csio_ln_dbg(ln, "\tWWPN: %x%x%x%x%x%x%x%x\n",
+ ln->ln_sparm.wwpn[0], ln->ln_sparm.wwpn[1],
+ ln->ln_sparm.wwpn[2], ln->ln_sparm.wwpn[3],
+ ln->ln_sparm.wwpn[4], ln->ln_sparm.wwpn[5],
+ ln->ln_sparm.wwpn[6], ln->ln_sparm.wwpn[7]);
+
+out_free:
+ mempool_free(mbp, hw->mb_mempool);
+out:
+ spin_unlock_irq(&hw->lock);
+ return ret;
+}
+
+static int
+csio_fcoe_free_vnp(struct csio_hw *hw, struct csio_lnode *ln)
+{
+ struct csio_lnode *pln;
+ struct csio_mb *mbp;
+ struct fw_fcoe_vnp_cmd *rsp;
+ int ret = 0;
+ int retry = 0;
+
+ /* Issue VNP cmd to free vport */
+ /* Allocate Mbox request */
+
+ spin_lock_irq(&hw->lock);
+ mbp = mempool_alloc(hw->mb_mempool, GFP_ATOMIC);
+ if (!mbp) {
+ CSIO_INC_STATS(hw, n_err_nomem);
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ pln = ln->pln;
+
+ csio_fcoe_vnp_free_init_mb(ln, mbp, CSIO_MB_DEFAULT_TMO,
+ ln->fcf_flowid, ln->vnp_flowid,
+ NULL);
+
+ for (retry = 0; retry < 3; retry++) {
+ ret = csio_mb_issue(hw, mbp);
+ if (ret != -EBUSY)
+ break;
+
+ /* Retry if mbox returns busy */
+ spin_unlock_irq(&hw->lock);
+ msleep(2000);
+ spin_lock_irq(&hw->lock);
+ }
+
+ if (ret) {
+ csio_ln_err(ln, "Failed to issue mbox FCoE VNP command\n");
+ goto out_free;
+ }
+
+ /* Process Mbox response of VNP command */
+ rsp = (struct fw_fcoe_vnp_cmd *)(mbp->mb);
+ if (FW_CMD_RETVAL_G(ntohl(rsp->alloc_to_len16)) != FW_SUCCESS) {
+ csio_ln_err(ln, "FCOE VNP FREE cmd returned 0x%x!\n",
+ FW_CMD_RETVAL_G(ntohl(rsp->alloc_to_len16)));
+ ret = -EINVAL;
+ }
+
+out_free:
+ mempool_free(mbp, hw->mb_mempool);
+out:
+ spin_unlock_irq(&hw->lock);
+ return ret;
+}
+
+static int
+csio_vport_create(struct fc_vport *fc_vport, bool disable)
+{
+ struct Scsi_Host *shost = fc_vport->shost;
+ struct csio_lnode *pln = shost_priv(shost);
+ struct csio_lnode *ln = NULL;
+ struct csio_hw *hw = csio_lnode_to_hw(pln);
+ uint8_t wwn[8];
+ int ret = -1;
+
+ ln = csio_shost_init(hw, &fc_vport->dev, false, pln);
+ if (!ln)
+ goto error;
+
+ if (fc_vport->node_name != 0) {
+ u64_to_wwn(fc_vport->node_name, wwn);
+
+ if (!CSIO_VALID_WWN(wwn)) {
+ csio_ln_err(ln,
+ "vport create failed. Invalid wwnn\n");
+ goto error;
+ }
+ memcpy(csio_ln_wwnn(ln), wwn, 8);
+ }
+
+ if (fc_vport->port_name != 0) {
+ u64_to_wwn(fc_vport->port_name, wwn);
+
+ if (!CSIO_VALID_WWN(wwn)) {
+ csio_ln_err(ln,
+ "vport create failed. Invalid wwpn\n");
+ goto error;
+ }
+
+ if (csio_lnode_lookup_by_wwpn(hw, wwn)) {
+ csio_ln_err(ln,
+ "vport create failed. wwpn already exists\n");
+ goto error;
+ }
+ memcpy(csio_ln_wwpn(ln), wwn, 8);
+ }
+
+ fc_vport_set_state(fc_vport, FC_VPORT_INITIALIZING);
+
+ if (csio_fcoe_alloc_vnp(hw, ln))
+ goto error;
+
+ *(struct csio_lnode **)fc_vport->dd_data = ln;
+ ln->fc_vport = fc_vport;
+ if (!fc_vport->node_name)
+ fc_vport->node_name = wwn_to_u64(csio_ln_wwnn(ln));
+ if (!fc_vport->port_name)
+ fc_vport->port_name = wwn_to_u64(csio_ln_wwpn(ln));
+ csio_fchost_attr_init(ln);
+ return 0;
+error:
+ if (ln)
+ csio_shost_exit(ln);
+
+ return ret;
+}
+
+static int
+csio_vport_delete(struct fc_vport *fc_vport)
+{
+ struct csio_lnode *ln = *(struct csio_lnode **)fc_vport->dd_data;
+ struct Scsi_Host *shost = csio_ln_to_shost(ln);
+ struct csio_hw *hw = csio_lnode_to_hw(ln);
+ int rmv;
+
+ spin_lock_irq(&hw->lock);
+ rmv = csio_is_hw_removing(hw);
+ spin_unlock_irq(&hw->lock);
+
+ if (rmv) {
+ csio_shost_exit(ln);
+ return 0;
+ }
+
+ /* Quiesce ios and send remove event to lnode */
+ scsi_block_requests(shost);
+ spin_lock_irq(&hw->lock);
+ csio_scsim_cleanup_io_lnode(csio_hw_to_scsim(hw), ln);
+ csio_lnode_close(ln);
+ spin_unlock_irq(&hw->lock);
+ scsi_unblock_requests(shost);
+
+ /* Free vnp */
+ if (fc_vport->vport_state != FC_VPORT_DISABLED)
+ csio_fcoe_free_vnp(hw, ln);
+
+ csio_shost_exit(ln);
+ return 0;
+}
+
+static int
+csio_vport_disable(struct fc_vport *fc_vport, bool disable)
+{
+ struct csio_lnode *ln = *(struct csio_lnode **)fc_vport->dd_data;
+ struct Scsi_Host *shost = csio_ln_to_shost(ln);
+ struct csio_hw *hw = csio_lnode_to_hw(ln);
+
+ /* disable vport */
+ if (disable) {
+ /* Quiesce ios and send stop event to lnode */
+ scsi_block_requests(shost);
+ spin_lock_irq(&hw->lock);
+ csio_scsim_cleanup_io_lnode(csio_hw_to_scsim(hw), ln);
+ csio_lnode_stop(ln);
+ spin_unlock_irq(&hw->lock);
+ scsi_unblock_requests(shost);
+
+ /* Free vnp */
+ csio_fcoe_free_vnp(hw, ln);
+ fc_vport_set_state(fc_vport, FC_VPORT_DISABLED);
+ csio_ln_err(ln, "vport disabled\n");
+ return 0;
+ } else {
+ /* enable vport */
+ fc_vport_set_state(fc_vport, FC_VPORT_INITIALIZING);
+ if (csio_fcoe_alloc_vnp(hw, ln)) {
+ csio_ln_err(ln, "vport enabled failed.\n");
+ return -1;
+ }
+ csio_ln_err(ln, "vport enabled\n");
+ return 0;
+ }
+}
+
+static void
+csio_dev_loss_tmo_callbk(struct fc_rport *rport)
+{
+ struct csio_rnode *rn;
+ struct csio_hw *hw;
+ struct csio_lnode *ln;
+
+ rn = *((struct csio_rnode **)rport->dd_data);
+ ln = csio_rnode_to_lnode(rn);
+ hw = csio_lnode_to_hw(ln);
+
+ spin_lock_irq(&hw->lock);
+
+ /* return if driver is being removed or same rnode comes back online */
+ if (csio_is_hw_removing(hw) || csio_is_rnode_ready(rn))
+ goto out;
+
+ csio_ln_dbg(ln, "devloss timeout on rnode:%p portid:x%x flowid:x%x\n",
+ rn, rn->nport_id, csio_rn_flowid(rn));
+
+ CSIO_INC_STATS(ln, n_dev_loss_tmo);
+
+ /*
+ * enqueue devloss event to event worker thread to serialize all
+ * rnode events.
+ */
+ if (csio_enqueue_evt(hw, CSIO_EVT_DEV_LOSS, &rn, sizeof(rn))) {
+ CSIO_INC_STATS(hw, n_evt_drop);
+ goto out;
+ }
+
+ if (!(hw->flags & CSIO_HWF_FWEVT_PENDING)) {
+ hw->flags |= CSIO_HWF_FWEVT_PENDING;
+ spin_unlock_irq(&hw->lock);
+ schedule_work(&hw->evtq_work);
+ return;
+ }
+
+out:
+ spin_unlock_irq(&hw->lock);
+}
+
+/* FC transport functions template - Physical port */
+struct fc_function_template csio_fc_transport_funcs = {
+ .show_host_node_name = 1,
+ .show_host_port_name = 1,
+ .show_host_supported_classes = 1,
+ .show_host_supported_fc4s = 1,
+ .show_host_maxframe_size = 1,
+
+ .get_host_port_id = csio_get_host_port_id,
+ .show_host_port_id = 1,
+
+ .get_host_port_type = csio_get_host_port_type,
+ .show_host_port_type = 1,
+
+ .get_host_port_state = csio_get_host_port_state,
+ .show_host_port_state = 1,
+
+ .show_host_active_fc4s = 1,
+ .get_host_speed = csio_get_host_speed,
+ .show_host_speed = 1,
+ .get_host_fabric_name = csio_get_host_fabric_name,
+ .show_host_fabric_name = 1,
+
+ .get_fc_host_stats = csio_get_stats,
+
+ .dd_fcrport_size = sizeof(struct csio_rnode *),
+ .show_rport_maxframe_size = 1,
+ .show_rport_supported_classes = 1,
+
+ .set_rport_dev_loss_tmo = csio_set_rport_loss_tmo,
+ .show_rport_dev_loss_tmo = 1,
+
+ .show_starget_port_id = 1,
+ .show_starget_node_name = 1,
+ .show_starget_port_name = 1,
+
+ .dev_loss_tmo_callbk = csio_dev_loss_tmo_callbk,
+ .dd_fcvport_size = sizeof(struct csio_lnode *),
+
+ .vport_create = csio_vport_create,
+ .vport_disable = csio_vport_disable,
+ .vport_delete = csio_vport_delete,
+};
+
+/* FC transport functions template - Virtual port */
+struct fc_function_template csio_fc_transport_vport_funcs = {
+ .show_host_node_name = 1,
+ .show_host_port_name = 1,
+ .show_host_supported_classes = 1,
+ .show_host_supported_fc4s = 1,
+ .show_host_maxframe_size = 1,
+
+ .get_host_port_id = csio_get_host_port_id,
+ .show_host_port_id = 1,
+
+ .get_host_port_type = csio_get_host_port_type,
+ .show_host_port_type = 1,
+
+ .get_host_port_state = csio_get_host_port_state,
+ .show_host_port_state = 1,
+ .show_host_active_fc4s = 1,
+
+ .get_host_speed = csio_get_host_speed,
+ .show_host_speed = 1,
+
+ .get_host_fabric_name = csio_get_host_fabric_name,
+ .show_host_fabric_name = 1,
+
+ .get_fc_host_stats = csio_get_stats,
+
+ .dd_fcrport_size = sizeof(struct csio_rnode *),
+ .show_rport_maxframe_size = 1,
+ .show_rport_supported_classes = 1,
+
+ .set_rport_dev_loss_tmo = csio_set_rport_loss_tmo,
+ .show_rport_dev_loss_tmo = 1,
+
+ .show_starget_port_id = 1,
+ .show_starget_node_name = 1,
+ .show_starget_port_name = 1,
+
+ .dev_loss_tmo_callbk = csio_dev_loss_tmo_callbk,
+
+};
diff --git a/drivers/scsi/csiostor/csio_defs.h b/drivers/scsi/csiostor/csio_defs.h
new file mode 100644
index 000000000..c38017b4a
--- /dev/null
+++ b/drivers/scsi/csiostor/csio_defs.h
@@ -0,0 +1,121 @@
+/*
+ * This file is part of the Chelsio FCoE driver for Linux.
+ *
+ * Copyright (c) 2008-2012 Chelsio Communications, Inc. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#ifndef __CSIO_DEFS_H__
+#define __CSIO_DEFS_H__
+
+#include <linux/kernel.h>
+#include <linux/stddef.h>
+#include <linux/timer.h>
+#include <linux/list.h>
+#include <linux/bug.h>
+#include <linux/pci.h>
+#include <linux/jiffies.h>
+
+#define CSIO_INVALID_IDX 0xFFFFFFFF
+#define CSIO_INC_STATS(elem, val) ((elem)->stats.val++)
+#define CSIO_DEC_STATS(elem, val) ((elem)->stats.val--)
+#define CSIO_VALID_WWN(__n) ((*__n >> 4) == 0x5 ? true : false)
+#define CSIO_DID_MASK 0xFFFFFF
+#define CSIO_WORD_TO_BYTE 4
+
+#ifndef readq
+static inline u64 readq(void __iomem *addr)
+{
+ return readl(addr) + ((u64)readl(addr + 4) << 32);
+}
+
+static inline void writeq(u64 val, void __iomem *addr)
+{
+ writel(val, addr);
+ writel(val >> 32, addr + 4);
+}
+#endif
+
+static inline int
+csio_list_deleted(struct list_head *list)
+{
+ return ((list->next == list) && (list->prev == list));
+}
+
+#define csio_list_next(elem) (((struct list_head *)(elem))->next)
+#define csio_list_prev(elem) (((struct list_head *)(elem))->prev)
+
+/* State machine */
+typedef void (*csio_sm_state_t)(void *, uint32_t);
+
+struct csio_sm {
+ struct list_head sm_list;
+ csio_sm_state_t sm_state;
+};
+
+static inline void
+csio_set_state(void *smp, void *state)
+{
+ ((struct csio_sm *)smp)->sm_state = (csio_sm_state_t)state;
+}
+
+static inline void
+csio_init_state(struct csio_sm *smp, void *state)
+{
+ csio_set_state(smp, state);
+}
+
+static inline void
+csio_post_event(void *smp, uint32_t evt)
+{
+ ((struct csio_sm *)smp)->sm_state(smp, evt);
+}
+
+static inline csio_sm_state_t
+csio_get_state(void *smp)
+{
+ return ((struct csio_sm *)smp)->sm_state;
+}
+
+static inline bool
+csio_match_state(void *smp, void *state)
+{
+ return (csio_get_state(smp) == (csio_sm_state_t)state);
+}
+
+#define CSIO_ASSERT(cond) BUG_ON(!(cond))
+
+#ifdef __CSIO_DEBUG__
+#define CSIO_DB_ASSERT(__c) CSIO_ASSERT((__c))
+#else
+#define CSIO_DB_ASSERT(__c)
+#endif
+
+#endif /* ifndef __CSIO_DEFS_H__ */
diff --git a/drivers/scsi/csiostor/csio_hw.c b/drivers/scsi/csiostor/csio_hw.c
new file mode 100644
index 000000000..9ebc18d98
--- /dev/null
+++ b/drivers/scsi/csiostor/csio_hw.c
@@ -0,0 +1,3970 @@
+/*
+ * This file is part of the Chelsio FCoE driver for Linux.
+ *
+ * Copyright (c) 2008-2012 Chelsio Communications, Inc. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include <linux/pci.h>
+#include <linux/pci_regs.h>
+#include <linux/firmware.h>
+#include <linux/stddef.h>
+#include <linux/delay.h>
+#include <linux/string.h>
+#include <linux/compiler.h>
+#include <linux/jiffies.h>
+#include <linux/kernel.h>
+#include <linux/log2.h>
+
+#include "csio_hw.h"
+#include "csio_lnode.h"
+#include "csio_rnode.h"
+
+int csio_dbg_level = 0xFEFF;
+unsigned int csio_port_mask = 0xf;
+
+/* Default FW event queue entries. */
+static uint32_t csio_evtq_sz = CSIO_EVTQ_SIZE;
+
+/* Default MSI param level */
+int csio_msi = 2;
+
+/* FCoE function instances */
+static int dev_num;
+
+/* FCoE Adapter types & its description */
+static const struct csio_adap_desc csio_t5_fcoe_adapters[] = {
+ {"T580-Dbg 10G", "Chelsio T580-Dbg 10G [FCoE]"},
+ {"T520-CR 10G", "Chelsio T520-CR 10G [FCoE]"},
+ {"T522-CR 10G/1G", "Chelsio T522-CR 10G/1G [FCoE]"},
+ {"T540-CR 10G", "Chelsio T540-CR 10G [FCoE]"},
+ {"T520-BCH 10G", "Chelsio T520-BCH 10G [FCoE]"},
+ {"T540-BCH 10G", "Chelsio T540-BCH 10G [FCoE]"},
+ {"T540-CH 10G", "Chelsio T540-CH 10G [FCoE]"},
+ {"T520-SO 10G", "Chelsio T520-SO 10G [FCoE]"},
+ {"T520-CX4 10G", "Chelsio T520-CX4 10G [FCoE]"},
+ {"T520-BT 10G", "Chelsio T520-BT 10G [FCoE]"},
+ {"T504-BT 1G", "Chelsio T504-BT 1G [FCoE]"},
+ {"B520-SR 10G", "Chelsio B520-SR 10G [FCoE]"},
+ {"B504-BT 1G", "Chelsio B504-BT 1G [FCoE]"},
+ {"T580-CR 10G", "Chelsio T580-CR 10G [FCoE]"},
+ {"T540-LP-CR 10G", "Chelsio T540-LP-CR 10G [FCoE]"},
+ {"AMSTERDAM 10G", "Chelsio AMSTERDAM 10G [FCoE]"},
+ {"T580-LP-CR 40G", "Chelsio T580-LP-CR 40G [FCoE]"},
+ {"T520-LL-CR 10G", "Chelsio T520-LL-CR 10G [FCoE]"},
+ {"T560-CR 40G", "Chelsio T560-CR 40G [FCoE]"},
+ {"T580-CR 40G", "Chelsio T580-CR 40G [FCoE]"},
+ {"T580-SO 40G", "Chelsio T580-SO 40G [FCoE]"},
+ {"T502-BT 1G", "Chelsio T502-BT 1G [FCoE]"}
+};
+
+static void csio_mgmtm_cleanup(struct csio_mgmtm *);
+static void csio_hw_mbm_cleanup(struct csio_hw *);
+
+/* State machine forward declarations */
+static void csio_hws_uninit(struct csio_hw *, enum csio_hw_ev);
+static void csio_hws_configuring(struct csio_hw *, enum csio_hw_ev);
+static void csio_hws_initializing(struct csio_hw *, enum csio_hw_ev);
+static void csio_hws_ready(struct csio_hw *, enum csio_hw_ev);
+static void csio_hws_quiescing(struct csio_hw *, enum csio_hw_ev);
+static void csio_hws_quiesced(struct csio_hw *, enum csio_hw_ev);
+static void csio_hws_resetting(struct csio_hw *, enum csio_hw_ev);
+static void csio_hws_removing(struct csio_hw *, enum csio_hw_ev);
+static void csio_hws_pcierr(struct csio_hw *, enum csio_hw_ev);
+
+static void csio_hw_initialize(struct csio_hw *hw);
+static void csio_evtq_stop(struct csio_hw *hw);
+static void csio_evtq_start(struct csio_hw *hw);
+
+int csio_is_hw_ready(struct csio_hw *hw)
+{
+ return csio_match_state(hw, csio_hws_ready);
+}
+
+int csio_is_hw_removing(struct csio_hw *hw)
+{
+ return csio_match_state(hw, csio_hws_removing);
+}
+
+
+/*
+ * csio_hw_wait_op_done_val - wait until an operation is completed
+ * @hw: the HW module
+ * @reg: the register to check for completion
+ * @mask: a single-bit field within @reg that indicates completion
+ * @polarity: the value of the field when the operation is completed
+ * @attempts: number of check iterations
+ * @delay: delay in usecs between iterations
+ * @valp: where to store the value of the register at completion time
+ *
+ * Wait until an operation is completed by checking a bit in a register
+ * up to @attempts times. If @valp is not NULL the value of the register
+ * at the time it indicated completion is stored there. Returns 0 if the
+ * operation completes and -EAGAIN otherwise.
+ */
+int
+csio_hw_wait_op_done_val(struct csio_hw *hw, int reg, uint32_t mask,
+ int polarity, int attempts, int delay, uint32_t *valp)
+{
+ uint32_t val;
+ while (1) {
+ val = csio_rd_reg32(hw, reg);
+
+ if (!!(val & mask) == polarity) {
+ if (valp)
+ *valp = val;
+ return 0;
+ }
+
+ if (--attempts == 0)
+ return -EAGAIN;
+ if (delay)
+ udelay(delay);
+ }
+}
+
+/*
+ * csio_hw_tp_wr_bits_indirect - set/clear bits in an indirect TP register
+ * @hw: the adapter
+ * @addr: the indirect TP register address
+ * @mask: specifies the field within the register to modify
+ * @val: new value for the field
+ *
+ * Sets a field of an indirect TP register to the given value.
+ */
+void
+csio_hw_tp_wr_bits_indirect(struct csio_hw *hw, unsigned int addr,
+ unsigned int mask, unsigned int val)
+{
+ csio_wr_reg32(hw, addr, TP_PIO_ADDR_A);
+ val |= csio_rd_reg32(hw, TP_PIO_DATA_A) & ~mask;
+ csio_wr_reg32(hw, val, TP_PIO_DATA_A);
+}
+
+void
+csio_set_reg_field(struct csio_hw *hw, uint32_t reg, uint32_t mask,
+ uint32_t value)
+{
+ uint32_t val = csio_rd_reg32(hw, reg) & ~mask;
+
+ csio_wr_reg32(hw, val | value, reg);
+ /* Flush */
+ csio_rd_reg32(hw, reg);
+
+}
+
+static int
+csio_memory_write(struct csio_hw *hw, int mtype, u32 addr, u32 len, u32 *buf)
+{
+ return hw->chip_ops->chip_memory_rw(hw, MEMWIN_CSIOSTOR, mtype,
+ addr, len, buf, 0);
+}
+
+/*
+ * EEPROM reads take a few tens of us while writes can take a bit over 5 ms.
+ */
+#define EEPROM_MAX_RD_POLL 40
+#define EEPROM_MAX_WR_POLL 6
+#define EEPROM_STAT_ADDR 0x7bfc
+#define VPD_BASE 0x400
+#define VPD_BASE_OLD 0
+#define VPD_LEN 1024
+#define VPD_INFO_FLD_HDR_SIZE 3
+
+/*
+ * csio_hw_seeprom_read - read a serial EEPROM location
+ * @hw: hw to read
+ * @addr: EEPROM virtual address
+ * @data: where to store the read data
+ *
+ * Read a 32-bit word from a location in serial EEPROM using the card's PCI
+ * VPD capability. Note that this function must be called with a virtual
+ * address.
+ */
+static int
+csio_hw_seeprom_read(struct csio_hw *hw, uint32_t addr, uint32_t *data)
+{
+ uint16_t val = 0;
+ int attempts = EEPROM_MAX_RD_POLL;
+ uint32_t base = hw->params.pci.vpd_cap_addr;
+
+ if (addr >= EEPROMVSIZE || (addr & 3))
+ return -EINVAL;
+
+ pci_write_config_word(hw->pdev, base + PCI_VPD_ADDR, (uint16_t)addr);
+
+ do {
+ udelay(10);
+ pci_read_config_word(hw->pdev, base + PCI_VPD_ADDR, &val);
+ } while (!(val & PCI_VPD_ADDR_F) && --attempts);
+
+ if (!(val & PCI_VPD_ADDR_F)) {
+ csio_err(hw, "reading EEPROM address 0x%x failed\n", addr);
+ return -EINVAL;
+ }
+
+ pci_read_config_dword(hw->pdev, base + PCI_VPD_DATA, data);
+ *data = le32_to_cpu(*(__le32 *)data);
+
+ return 0;
+}
+
+/*
+ * Partial EEPROM Vital Product Data structure. Includes only the ID and
+ * VPD-R sections.
+ */
+struct t4_vpd_hdr {
+ u8 id_tag;
+ u8 id_len[2];
+ u8 id_data[ID_LEN];
+ u8 vpdr_tag;
+ u8 vpdr_len[2];
+};
+
+/*
+ * csio_hw_get_vpd_keyword_val - Locates an information field keyword in
+ * the VPD
+ * @v: Pointer to buffered vpd data structure
+ * @kw: The keyword to search for
+ *
+ * Returns the value of the information field keyword or
+ * -EINVAL otherwise.
+ */
+static int
+csio_hw_get_vpd_keyword_val(const struct t4_vpd_hdr *v, const char *kw)
+{
+ int32_t i;
+ int32_t offset , len;
+ const uint8_t *buf = &v->id_tag;
+ const uint8_t *vpdr_len = &v->vpdr_tag;
+ offset = sizeof(struct t4_vpd_hdr);
+ len = (uint16_t)vpdr_len[1] + ((uint16_t)vpdr_len[2] << 8);
+
+ if (len + sizeof(struct t4_vpd_hdr) > VPD_LEN)
+ return -EINVAL;
+
+ for (i = offset; (i + VPD_INFO_FLD_HDR_SIZE) <= (offset + len);) {
+ if (memcmp(buf + i , kw, 2) == 0) {
+ i += VPD_INFO_FLD_HDR_SIZE;
+ return i;
+ }
+
+ i += VPD_INFO_FLD_HDR_SIZE + buf[i+2];
+ }
+
+ return -EINVAL;
+}
+
+static int
+csio_pci_capability(struct pci_dev *pdev, int cap, int *pos)
+{
+ *pos = pci_find_capability(pdev, cap);
+ if (*pos)
+ return 0;
+
+ return -1;
+}
+
+/*
+ * csio_hw_get_vpd_params - read VPD parameters from VPD EEPROM
+ * @hw: HW module
+ * @p: where to store the parameters
+ *
+ * Reads card parameters stored in VPD EEPROM.
+ */
+static int
+csio_hw_get_vpd_params(struct csio_hw *hw, struct csio_vpd *p)
+{
+ int i, ret, ec, sn, addr;
+ uint8_t *vpd, csum;
+ const struct t4_vpd_hdr *v;
+ /* To get around compilation warning from strstrip */
+ char *s;
+
+ if (csio_is_valid_vpd(hw))
+ return 0;
+
+ ret = csio_pci_capability(hw->pdev, PCI_CAP_ID_VPD,
+ &hw->params.pci.vpd_cap_addr);
+ if (ret)
+ return -EINVAL;
+
+ vpd = kzalloc(VPD_LEN, GFP_ATOMIC);
+ if (vpd == NULL)
+ return -ENOMEM;
+
+ /*
+ * Card information normally starts at VPD_BASE but early cards had
+ * it at 0.
+ */
+ ret = csio_hw_seeprom_read(hw, VPD_BASE, (uint32_t *)(vpd));
+ addr = *vpd == 0x82 ? VPD_BASE : VPD_BASE_OLD;
+
+ for (i = 0; i < VPD_LEN; i += 4) {
+ ret = csio_hw_seeprom_read(hw, addr + i, (uint32_t *)(vpd + i));
+ if (ret) {
+ kfree(vpd);
+ return ret;
+ }
+ }
+
+ /* Reset the VPD flag! */
+ hw->flags &= (~CSIO_HWF_VPD_VALID);
+
+ v = (const struct t4_vpd_hdr *)vpd;
+
+#define FIND_VPD_KW(var, name) do { \
+ var = csio_hw_get_vpd_keyword_val(v, name); \
+ if (var < 0) { \
+ csio_err(hw, "missing VPD keyword " name "\n"); \
+ kfree(vpd); \
+ return -EINVAL; \
+ } \
+} while (0)
+
+ FIND_VPD_KW(i, "RV");
+ for (csum = 0; i >= 0; i--)
+ csum += vpd[i];
+
+ if (csum) {
+ csio_err(hw, "corrupted VPD EEPROM, actual csum %u\n", csum);
+ kfree(vpd);
+ return -EINVAL;
+ }
+ FIND_VPD_KW(ec, "EC");
+ FIND_VPD_KW(sn, "SN");
+#undef FIND_VPD_KW
+
+ memcpy(p->id, v->id_data, ID_LEN);
+ s = strstrip(p->id);
+ memcpy(p->ec, vpd + ec, EC_LEN);
+ s = strstrip(p->ec);
+ i = vpd[sn - VPD_INFO_FLD_HDR_SIZE + 2];
+ memcpy(p->sn, vpd + sn, min(i, SERNUM_LEN));
+ s = strstrip(p->sn);
+
+ csio_valid_vpd_copied(hw);
+
+ kfree(vpd);
+ return 0;
+}
+
+/*
+ * csio_hw_sf1_read - read data from the serial flash
+ * @hw: the HW module
+ * @byte_cnt: number of bytes to read
+ * @cont: whether another operation will be chained
+ * @lock: whether to lock SF for PL access only
+ * @valp: where to store the read data
+ *
+ * Reads up to 4 bytes of data from the serial flash. The location of
+ * the read needs to be specified prior to calling this by issuing the
+ * appropriate commands to the serial flash.
+ */
+static int
+csio_hw_sf1_read(struct csio_hw *hw, uint32_t byte_cnt, int32_t cont,
+ int32_t lock, uint32_t *valp)
+{
+ int ret;
+
+ if (!byte_cnt || byte_cnt > 4)
+ return -EINVAL;
+ if (csio_rd_reg32(hw, SF_OP_A) & SF_BUSY_F)
+ return -EBUSY;
+
+ csio_wr_reg32(hw, SF_LOCK_V(lock) | SF_CONT_V(cont) |
+ BYTECNT_V(byte_cnt - 1), SF_OP_A);
+ ret = csio_hw_wait_op_done_val(hw, SF_OP_A, SF_BUSY_F, 0, SF_ATTEMPTS,
+ 10, NULL);
+ if (!ret)
+ *valp = csio_rd_reg32(hw, SF_DATA_A);
+ return ret;
+}
+
+/*
+ * csio_hw_sf1_write - write data to the serial flash
+ * @hw: the HW module
+ * @byte_cnt: number of bytes to write
+ * @cont: whether another operation will be chained
+ * @lock: whether to lock SF for PL access only
+ * @val: value to write
+ *
+ * Writes up to 4 bytes of data to the serial flash. The location of
+ * the write needs to be specified prior to calling this by issuing the
+ * appropriate commands to the serial flash.
+ */
+static int
+csio_hw_sf1_write(struct csio_hw *hw, uint32_t byte_cnt, uint32_t cont,
+ int32_t lock, uint32_t val)
+{
+ if (!byte_cnt || byte_cnt > 4)
+ return -EINVAL;
+ if (csio_rd_reg32(hw, SF_OP_A) & SF_BUSY_F)
+ return -EBUSY;
+
+ csio_wr_reg32(hw, val, SF_DATA_A);
+ csio_wr_reg32(hw, SF_CONT_V(cont) | BYTECNT_V(byte_cnt - 1) |
+ OP_V(1) | SF_LOCK_V(lock), SF_OP_A);
+
+ return csio_hw_wait_op_done_val(hw, SF_OP_A, SF_BUSY_F, 0, SF_ATTEMPTS,
+ 10, NULL);
+}
+
+/*
+ * csio_hw_flash_wait_op - wait for a flash operation to complete
+ * @hw: the HW module
+ * @attempts: max number of polls of the status register
+ * @delay: delay between polls in ms
+ *
+ * Wait for a flash operation to complete by polling the status register.
+ */
+static int
+csio_hw_flash_wait_op(struct csio_hw *hw, int32_t attempts, int32_t delay)
+{
+ int ret;
+ uint32_t status;
+
+ while (1) {
+ ret = csio_hw_sf1_write(hw, 1, 1, 1, SF_RD_STATUS);
+ if (ret != 0)
+ return ret;
+
+ ret = csio_hw_sf1_read(hw, 1, 0, 1, &status);
+ if (ret != 0)
+ return ret;
+
+ if (!(status & 1))
+ return 0;
+ if (--attempts == 0)
+ return -EAGAIN;
+ if (delay)
+ msleep(delay);
+ }
+}
+
+/*
+ * csio_hw_read_flash - read words from serial flash
+ * @hw: the HW module
+ * @addr: the start address for the read
+ * @nwords: how many 32-bit words to read
+ * @data: where to store the read data
+ * @byte_oriented: whether to store data as bytes or as words
+ *
+ * Read the specified number of 32-bit words from the serial flash.
+ * If @byte_oriented is set the read data is stored as a byte array
+ * (i.e., big-endian), otherwise as 32-bit words in the platform's
+ * natural endianess.
+ */
+static int
+csio_hw_read_flash(struct csio_hw *hw, uint32_t addr, uint32_t nwords,
+ uint32_t *data, int32_t byte_oriented)
+{
+ int ret;
+
+ if (addr + nwords * sizeof(uint32_t) > hw->params.sf_size || (addr & 3))
+ return -EINVAL;
+
+ addr = swab32(addr) | SF_RD_DATA_FAST;
+
+ ret = csio_hw_sf1_write(hw, 4, 1, 0, addr);
+ if (ret != 0)
+ return ret;
+
+ ret = csio_hw_sf1_read(hw, 1, 1, 0, data);
+ if (ret != 0)
+ return ret;
+
+ for ( ; nwords; nwords--, data++) {
+ ret = csio_hw_sf1_read(hw, 4, nwords > 1, nwords == 1, data);
+ if (nwords == 1)
+ csio_wr_reg32(hw, 0, SF_OP_A); /* unlock SF */
+ if (ret)
+ return ret;
+ if (byte_oriented)
+ *data = (__force __u32) htonl(*data);
+ }
+ return 0;
+}
+
+/*
+ * csio_hw_write_flash - write up to a page of data to the serial flash
+ * @hw: the hw
+ * @addr: the start address to write
+ * @n: length of data to write in bytes
+ * @data: the data to write
+ *
+ * Writes up to a page of data (256 bytes) to the serial flash starting
+ * at the given address. All the data must be written to the same page.
+ */
+static int
+csio_hw_write_flash(struct csio_hw *hw, uint32_t addr,
+ uint32_t n, const uint8_t *data)
+{
+ int ret = -EINVAL;
+ uint32_t buf[64];
+ uint32_t i, c, left, val, offset = addr & 0xff;
+
+ if (addr >= hw->params.sf_size || offset + n > SF_PAGE_SIZE)
+ return -EINVAL;
+
+ val = swab32(addr) | SF_PROG_PAGE;
+
+ ret = csio_hw_sf1_write(hw, 1, 0, 1, SF_WR_ENABLE);
+ if (ret != 0)
+ goto unlock;
+
+ ret = csio_hw_sf1_write(hw, 4, 1, 1, val);
+ if (ret != 0)
+ goto unlock;
+
+ for (left = n; left; left -= c) {
+ c = min(left, 4U);
+ for (val = 0, i = 0; i < c; ++i)
+ val = (val << 8) + *data++;
+
+ ret = csio_hw_sf1_write(hw, c, c != left, 1, val);
+ if (ret)
+ goto unlock;
+ }
+ ret = csio_hw_flash_wait_op(hw, 8, 1);
+ if (ret)
+ goto unlock;
+
+ csio_wr_reg32(hw, 0, SF_OP_A); /* unlock SF */
+
+ /* Read the page to verify the write succeeded */
+ ret = csio_hw_read_flash(hw, addr & ~0xff, ARRAY_SIZE(buf), buf, 1);
+ if (ret)
+ return ret;
+
+ if (memcmp(data - n, (uint8_t *)buf + offset, n)) {
+ csio_err(hw,
+ "failed to correctly write the flash page at %#x\n",
+ addr);
+ return -EINVAL;
+ }
+
+ return 0;
+
+unlock:
+ csio_wr_reg32(hw, 0, SF_OP_A); /* unlock SF */
+ return ret;
+}
+
+/*
+ * csio_hw_flash_erase_sectors - erase a range of flash sectors
+ * @hw: the HW module
+ * @start: the first sector to erase
+ * @end: the last sector to erase
+ *
+ * Erases the sectors in the given inclusive range.
+ */
+static int
+csio_hw_flash_erase_sectors(struct csio_hw *hw, int32_t start, int32_t end)
+{
+ int ret = 0;
+
+ while (start <= end) {
+
+ ret = csio_hw_sf1_write(hw, 1, 0, 1, SF_WR_ENABLE);
+ if (ret != 0)
+ goto out;
+
+ ret = csio_hw_sf1_write(hw, 4, 0, 1,
+ SF_ERASE_SECTOR | (start << 8));
+ if (ret != 0)
+ goto out;
+
+ ret = csio_hw_flash_wait_op(hw, 14, 500);
+ if (ret != 0)
+ goto out;
+
+ start++;
+ }
+out:
+ if (ret)
+ csio_err(hw, "erase of flash sector %d failed, error %d\n",
+ start, ret);
+ csio_wr_reg32(hw, 0, SF_OP_A); /* unlock SF */
+ return 0;
+}
+
+static void
+csio_hw_print_fw_version(struct csio_hw *hw, char *str)
+{
+ csio_info(hw, "%s: %u.%u.%u.%u\n", str,
+ FW_HDR_FW_VER_MAJOR_G(hw->fwrev),
+ FW_HDR_FW_VER_MINOR_G(hw->fwrev),
+ FW_HDR_FW_VER_MICRO_G(hw->fwrev),
+ FW_HDR_FW_VER_BUILD_G(hw->fwrev));
+}
+
+/*
+ * csio_hw_get_fw_version - read the firmware version
+ * @hw: HW module
+ * @vers: where to place the version
+ *
+ * Reads the FW version from flash.
+ */
+static int
+csio_hw_get_fw_version(struct csio_hw *hw, uint32_t *vers)
+{
+ return csio_hw_read_flash(hw, FLASH_FW_START +
+ offsetof(struct fw_hdr, fw_ver), 1,
+ vers, 0);
+}
+
+/*
+ * csio_hw_get_tp_version - read the TP microcode version
+ * @hw: HW module
+ * @vers: where to place the version
+ *
+ * Reads the TP microcode version from flash.
+ */
+static int
+csio_hw_get_tp_version(struct csio_hw *hw, u32 *vers)
+{
+ return csio_hw_read_flash(hw, FLASH_FW_START +
+ offsetof(struct fw_hdr, tp_microcode_ver), 1,
+ vers, 0);
+}
+
+/*
+ * csio_hw_fw_dload - download firmware.
+ * @hw: HW module
+ * @fw_data: firmware image to write.
+ * @size: image size
+ *
+ * Write the supplied firmware image to the card's serial flash.
+ */
+static int
+csio_hw_fw_dload(struct csio_hw *hw, uint8_t *fw_data, uint32_t size)
+{
+ uint32_t csum;
+ int32_t addr;
+ int ret;
+ uint32_t i;
+ uint8_t first_page[SF_PAGE_SIZE];
+ const __be32 *p = (const __be32 *)fw_data;
+ struct fw_hdr *hdr = (struct fw_hdr *)fw_data;
+ uint32_t sf_sec_size;
+
+ if ((!hw->params.sf_size) || (!hw->params.sf_nsec)) {
+ csio_err(hw, "Serial Flash data invalid\n");
+ return -EINVAL;
+ }
+
+ if (!size) {
+ csio_err(hw, "FW image has no data\n");
+ return -EINVAL;
+ }
+
+ if (size & 511) {
+ csio_err(hw, "FW image size not multiple of 512 bytes\n");
+ return -EINVAL;
+ }
+
+ if (ntohs(hdr->len512) * 512 != size) {
+ csio_err(hw, "FW image size differs from size in FW header\n");
+ return -EINVAL;
+ }
+
+ if (size > FLASH_FW_MAX_SIZE) {
+ csio_err(hw, "FW image too large, max is %u bytes\n",
+ FLASH_FW_MAX_SIZE);
+ return -EINVAL;
+ }
+
+ for (csum = 0, i = 0; i < size / sizeof(csum); i++)
+ csum += ntohl(p[i]);
+
+ if (csum != 0xffffffff) {
+ csio_err(hw, "corrupted firmware image, checksum %#x\n", csum);
+ return -EINVAL;
+ }
+
+ sf_sec_size = hw->params.sf_size / hw->params.sf_nsec;
+ i = DIV_ROUND_UP(size, sf_sec_size); /* # of sectors spanned */
+
+ csio_dbg(hw, "Erasing sectors... start:%d end:%d\n",
+ FLASH_FW_START_SEC, FLASH_FW_START_SEC + i - 1);
+
+ ret = csio_hw_flash_erase_sectors(hw, FLASH_FW_START_SEC,
+ FLASH_FW_START_SEC + i - 1);
+ if (ret) {
+ csio_err(hw, "Flash Erase failed\n");
+ goto out;
+ }
+
+ /*
+ * We write the correct version at the end so the driver can see a bad
+ * version if the FW write fails. Start by writing a copy of the
+ * first page with a bad version.
+ */
+ memcpy(first_page, fw_data, SF_PAGE_SIZE);
+ ((struct fw_hdr *)first_page)->fw_ver = htonl(0xffffffff);
+ ret = csio_hw_write_flash(hw, FLASH_FW_START, SF_PAGE_SIZE, first_page);
+ if (ret)
+ goto out;
+
+ csio_dbg(hw, "Writing Flash .. start:%d end:%d\n",
+ FW_IMG_START, FW_IMG_START + size);
+
+ addr = FLASH_FW_START;
+ for (size -= SF_PAGE_SIZE; size; size -= SF_PAGE_SIZE) {
+ addr += SF_PAGE_SIZE;
+ fw_data += SF_PAGE_SIZE;
+ ret = csio_hw_write_flash(hw, addr, SF_PAGE_SIZE, fw_data);
+ if (ret)
+ goto out;
+ }
+
+ ret = csio_hw_write_flash(hw,
+ FLASH_FW_START +
+ offsetof(struct fw_hdr, fw_ver),
+ sizeof(hdr->fw_ver),
+ (const uint8_t *)&hdr->fw_ver);
+
+out:
+ if (ret)
+ csio_err(hw, "firmware download failed, error %d\n", ret);
+ return ret;
+}
+
+static int
+csio_hw_get_flash_params(struct csio_hw *hw)
+{
+ int ret;
+ uint32_t info = 0;
+
+ ret = csio_hw_sf1_write(hw, 1, 1, 0, SF_RD_ID);
+ if (!ret)
+ ret = csio_hw_sf1_read(hw, 3, 0, 1, &info);
+ csio_wr_reg32(hw, 0, SF_OP_A); /* unlock SF */
+ if (ret != 0)
+ return ret;
+
+ if ((info & 0xff) != 0x20) /* not a Numonix flash */
+ return -EINVAL;
+ info >>= 16; /* log2 of size */
+ if (info >= 0x14 && info < 0x18)
+ hw->params.sf_nsec = 1 << (info - 16);
+ else if (info == 0x18)
+ hw->params.sf_nsec = 64;
+ else
+ return -EINVAL;
+ hw->params.sf_size = 1 << info;
+
+ return 0;
+}
+
+/*****************************************************************************/
+/* HW State machine assists */
+/*****************************************************************************/
+
+static int
+csio_hw_dev_ready(struct csio_hw *hw)
+{
+ uint32_t reg;
+ int cnt = 6;
+
+ while (((reg = csio_rd_reg32(hw, PL_WHOAMI_A)) == 0xFFFFFFFF) &&
+ (--cnt != 0))
+ mdelay(100);
+
+ if ((cnt == 0) && (((int32_t)(SOURCEPF_G(reg)) < 0) ||
+ (SOURCEPF_G(reg) >= CSIO_MAX_PFN))) {
+ csio_err(hw, "PL_WHOAMI returned 0x%x, cnt:%d\n", reg, cnt);
+ return -EIO;
+ }
+
+ hw->pfn = SOURCEPF_G(reg);
+
+ return 0;
+}
+
+/*
+ * csio_do_hello - Perform the HELLO FW Mailbox command and process response.
+ * @hw: HW module
+ * @state: Device state
+ *
+ * FW_HELLO_CMD has to be polled for completion.
+ */
+static int
+csio_do_hello(struct csio_hw *hw, enum csio_dev_state *state)
+{
+ struct csio_mb *mbp;
+ int rv = 0;
+ enum fw_retval retval;
+ uint8_t mpfn;
+ char state_str[16];
+ int retries = FW_CMD_HELLO_RETRIES;
+
+ memset(state_str, 0, sizeof(state_str));
+
+ mbp = mempool_alloc(hw->mb_mempool, GFP_ATOMIC);
+ if (!mbp) {
+ rv = -ENOMEM;
+ CSIO_INC_STATS(hw, n_err_nomem);
+ goto out;
+ }
+
+retry:
+ csio_mb_hello(hw, mbp, CSIO_MB_DEFAULT_TMO, hw->pfn,
+ hw->pfn, CSIO_MASTER_MAY, NULL);
+
+ rv = csio_mb_issue(hw, mbp);
+ if (rv) {
+ csio_err(hw, "failed to issue HELLO cmd. ret:%d.\n", rv);
+ goto out_free_mb;
+ }
+
+ csio_mb_process_hello_rsp(hw, mbp, &retval, state, &mpfn);
+ if (retval != FW_SUCCESS) {
+ csio_err(hw, "HELLO cmd failed with ret: %d\n", retval);
+ rv = -EINVAL;
+ goto out_free_mb;
+ }
+
+ /* Firmware has designated us to be master */
+ if (hw->pfn == mpfn) {
+ hw->flags |= CSIO_HWF_MASTER;
+ } else if (*state == CSIO_DEV_STATE_UNINIT) {
+ /*
+ * If we're not the Master PF then we need to wait around for
+ * the Master PF Driver to finish setting up the adapter.
+ *
+ * Note that we also do this wait if we're a non-Master-capable
+ * PF and there is no current Master PF; a Master PF may show up
+ * momentarily and we wouldn't want to fail pointlessly. (This
+ * can happen when an OS loads lots of different drivers rapidly
+ * at the same time). In this case, the Master PF returned by
+ * the firmware will be PCIE_FW_MASTER_MASK so the test below
+ * will work ...
+ */
+
+ int waiting = FW_CMD_HELLO_TIMEOUT;
+
+ /*
+ * Wait for the firmware to either indicate an error or
+ * initialized state. If we see either of these we bail out
+ * and report the issue to the caller. If we exhaust the
+ * "hello timeout" and we haven't exhausted our retries, try
+ * again. Otherwise bail with a timeout error.
+ */
+ for (;;) {
+ uint32_t pcie_fw;
+
+ spin_unlock_irq(&hw->lock);
+ msleep(50);
+ spin_lock_irq(&hw->lock);
+ waiting -= 50;
+
+ /*
+ * If neither Error nor Initialialized are indicated
+ * by the firmware keep waiting till we exaust our
+ * timeout ... and then retry if we haven't exhausted
+ * our retries ...
+ */
+ pcie_fw = csio_rd_reg32(hw, PCIE_FW_A);
+ if (!(pcie_fw & (PCIE_FW_ERR_F|PCIE_FW_INIT_F))) {
+ if (waiting <= 0) {
+ if (retries-- > 0)
+ goto retry;
+
+ rv = -ETIMEDOUT;
+ break;
+ }
+ continue;
+ }
+
+ /*
+ * We either have an Error or Initialized condition
+ * report errors preferentially.
+ */
+ if (state) {
+ if (pcie_fw & PCIE_FW_ERR_F) {
+ *state = CSIO_DEV_STATE_ERR;
+ rv = -ETIMEDOUT;
+ } else if (pcie_fw & PCIE_FW_INIT_F)
+ *state = CSIO_DEV_STATE_INIT;
+ }
+
+ /*
+ * If we arrived before a Master PF was selected and
+ * there's not a valid Master PF, grab its identity
+ * for our caller.
+ */
+ if (mpfn == PCIE_FW_MASTER_M &&
+ (pcie_fw & PCIE_FW_MASTER_VLD_F))
+ mpfn = PCIE_FW_MASTER_G(pcie_fw);
+ break;
+ }
+ hw->flags &= ~CSIO_HWF_MASTER;
+ }
+
+ switch (*state) {
+ case CSIO_DEV_STATE_UNINIT:
+ strcpy(state_str, "Initializing");
+ break;
+ case CSIO_DEV_STATE_INIT:
+ strcpy(state_str, "Initialized");
+ break;
+ case CSIO_DEV_STATE_ERR:
+ strcpy(state_str, "Error");
+ break;
+ default:
+ strcpy(state_str, "Unknown");
+ break;
+ }
+
+ if (hw->pfn == mpfn)
+ csio_info(hw, "PF: %d, Coming up as MASTER, HW state: %s\n",
+ hw->pfn, state_str);
+ else
+ csio_info(hw,
+ "PF: %d, Coming up as SLAVE, Master PF: %d, HW state: %s\n",
+ hw->pfn, mpfn, state_str);
+
+out_free_mb:
+ mempool_free(mbp, hw->mb_mempool);
+out:
+ return rv;
+}
+
+/*
+ * csio_do_bye - Perform the BYE FW Mailbox command and process response.
+ * @hw: HW module
+ *
+ */
+static int
+csio_do_bye(struct csio_hw *hw)
+{
+ struct csio_mb *mbp;
+ enum fw_retval retval;
+
+ mbp = mempool_alloc(hw->mb_mempool, GFP_ATOMIC);
+ if (!mbp) {
+ CSIO_INC_STATS(hw, n_err_nomem);
+ return -ENOMEM;
+ }
+
+ csio_mb_bye(hw, mbp, CSIO_MB_DEFAULT_TMO, NULL);
+
+ if (csio_mb_issue(hw, mbp)) {
+ csio_err(hw, "Issue of BYE command failed\n");
+ mempool_free(mbp, hw->mb_mempool);
+ return -EINVAL;
+ }
+
+ retval = csio_mb_fw_retval(mbp);
+ if (retval != FW_SUCCESS) {
+ mempool_free(mbp, hw->mb_mempool);
+ return -EINVAL;
+ }
+
+ mempool_free(mbp, hw->mb_mempool);
+
+ return 0;
+}
+
+/*
+ * csio_do_reset- Perform the device reset.
+ * @hw: HW module
+ * @fw_rst: FW reset
+ *
+ * If fw_rst is set, issues FW reset mbox cmd otherwise
+ * does PIO reset.
+ * Performs reset of the function.
+ */
+static int
+csio_do_reset(struct csio_hw *hw, bool fw_rst)
+{
+ struct csio_mb *mbp;
+ enum fw_retval retval;
+
+ if (!fw_rst) {
+ /* PIO reset */
+ csio_wr_reg32(hw, PIORSTMODE_F | PIORST_F, PL_RST_A);
+ mdelay(2000);
+ return 0;
+ }
+
+ mbp = mempool_alloc(hw->mb_mempool, GFP_ATOMIC);
+ if (!mbp) {
+ CSIO_INC_STATS(hw, n_err_nomem);
+ return -ENOMEM;
+ }
+
+ csio_mb_reset(hw, mbp, CSIO_MB_DEFAULT_TMO,
+ PIORSTMODE_F | PIORST_F, 0, NULL);
+
+ if (csio_mb_issue(hw, mbp)) {
+ csio_err(hw, "Issue of RESET command failed.n");
+ mempool_free(mbp, hw->mb_mempool);
+ return -EINVAL;
+ }
+
+ retval = csio_mb_fw_retval(mbp);
+ if (retval != FW_SUCCESS) {
+ csio_err(hw, "RESET cmd failed with ret:0x%x.\n", retval);
+ mempool_free(mbp, hw->mb_mempool);
+ return -EINVAL;
+ }
+
+ mempool_free(mbp, hw->mb_mempool);
+
+ return 0;
+}
+
+static int
+csio_hw_validate_caps(struct csio_hw *hw, struct csio_mb *mbp)
+{
+ struct fw_caps_config_cmd *rsp = (struct fw_caps_config_cmd *)mbp->mb;
+ uint16_t caps;
+
+ caps = ntohs(rsp->fcoecaps);
+
+ if (!(caps & FW_CAPS_CONFIG_FCOE_INITIATOR)) {
+ csio_err(hw, "No FCoE Initiator capability in the firmware.\n");
+ return -EINVAL;
+ }
+
+ if (!(caps & FW_CAPS_CONFIG_FCOE_CTRL_OFLD)) {
+ csio_err(hw, "No FCoE Control Offload capability\n");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+/*
+ * csio_hw_fw_halt - issue a reset/halt to FW and put uP into RESET
+ * @hw: the HW module
+ * @mbox: mailbox to use for the FW RESET command (if desired)
+ * @force: force uP into RESET even if FW RESET command fails
+ *
+ * Issues a RESET command to firmware (if desired) with a HALT indication
+ * and then puts the microprocessor into RESET state. The RESET command
+ * will only be issued if a legitimate mailbox is provided (mbox <=
+ * PCIE_FW_MASTER_MASK).
+ *
+ * This is generally used in order for the host to safely manipulate the
+ * adapter without fear of conflicting with whatever the firmware might
+ * be doing. The only way out of this state is to RESTART the firmware
+ * ...
+ */
+static int
+csio_hw_fw_halt(struct csio_hw *hw, uint32_t mbox, int32_t force)
+{
+ enum fw_retval retval = 0;
+
+ /*
+ * If a legitimate mailbox is provided, issue a RESET command
+ * with a HALT indication.
+ */
+ if (mbox <= PCIE_FW_MASTER_M) {
+ struct csio_mb *mbp;
+
+ mbp = mempool_alloc(hw->mb_mempool, GFP_ATOMIC);
+ if (!mbp) {
+ CSIO_INC_STATS(hw, n_err_nomem);
+ return -ENOMEM;
+ }
+
+ csio_mb_reset(hw, mbp, CSIO_MB_DEFAULT_TMO,
+ PIORSTMODE_F | PIORST_F, FW_RESET_CMD_HALT_F,
+ NULL);
+
+ if (csio_mb_issue(hw, mbp)) {
+ csio_err(hw, "Issue of RESET command failed!\n");
+ mempool_free(mbp, hw->mb_mempool);
+ return -EINVAL;
+ }
+
+ retval = csio_mb_fw_retval(mbp);
+ mempool_free(mbp, hw->mb_mempool);
+ }
+
+ /*
+ * Normally we won't complete the operation if the firmware RESET
+ * command fails but if our caller insists we'll go ahead and put the
+ * uP into RESET. This can be useful if the firmware is hung or even
+ * missing ... We'll have to take the risk of putting the uP into
+ * RESET without the cooperation of firmware in that case.
+ *
+ * We also force the firmware's HALT flag to be on in case we bypassed
+ * the firmware RESET command above or we're dealing with old firmware
+ * which doesn't have the HALT capability. This will serve as a flag
+ * for the incoming firmware to know that it's coming out of a HALT
+ * rather than a RESET ... if it's new enough to understand that ...
+ */
+ if (retval == 0 || force) {
+ csio_set_reg_field(hw, CIM_BOOT_CFG_A, UPCRST_F, UPCRST_F);
+ csio_set_reg_field(hw, PCIE_FW_A, PCIE_FW_HALT_F,
+ PCIE_FW_HALT_F);
+ }
+
+ /*
+ * And we always return the result of the firmware RESET command
+ * even when we force the uP into RESET ...
+ */
+ return retval ? -EINVAL : 0;
+}
+
+/*
+ * csio_hw_fw_restart - restart the firmware by taking the uP out of RESET
+ * @hw: the HW module
+ * @reset: if we want to do a RESET to restart things
+ *
+ * Restart firmware previously halted by csio_hw_fw_halt(). On successful
+ * return the previous PF Master remains as the new PF Master and there
+ * is no need to issue a new HELLO command, etc.
+ *
+ * We do this in two ways:
+ *
+ * 1. If we're dealing with newer firmware we'll simply want to take
+ * the chip's microprocessor out of RESET. This will cause the
+ * firmware to start up from its start vector. And then we'll loop
+ * until the firmware indicates it's started again (PCIE_FW.HALT
+ * reset to 0) or we timeout.
+ *
+ * 2. If we're dealing with older firmware then we'll need to RESET
+ * the chip since older firmware won't recognize the PCIE_FW.HALT
+ * flag and automatically RESET itself on startup.
+ */
+static int
+csio_hw_fw_restart(struct csio_hw *hw, uint32_t mbox, int32_t reset)
+{
+ if (reset) {
+ /*
+ * Since we're directing the RESET instead of the firmware
+ * doing it automatically, we need to clear the PCIE_FW.HALT
+ * bit.
+ */
+ csio_set_reg_field(hw, PCIE_FW_A, PCIE_FW_HALT_F, 0);
+
+ /*
+ * If we've been given a valid mailbox, first try to get the
+ * firmware to do the RESET. If that works, great and we can
+ * return success. Otherwise, if we haven't been given a
+ * valid mailbox or the RESET command failed, fall back to
+ * hitting the chip with a hammer.
+ */
+ if (mbox <= PCIE_FW_MASTER_M) {
+ csio_set_reg_field(hw, CIM_BOOT_CFG_A, UPCRST_F, 0);
+ msleep(100);
+ if (csio_do_reset(hw, true) == 0)
+ return 0;
+ }
+
+ csio_wr_reg32(hw, PIORSTMODE_F | PIORST_F, PL_RST_A);
+ msleep(2000);
+ } else {
+ int ms;
+
+ csio_set_reg_field(hw, CIM_BOOT_CFG_A, UPCRST_F, 0);
+ for (ms = 0; ms < FW_CMD_MAX_TIMEOUT; ) {
+ if (!(csio_rd_reg32(hw, PCIE_FW_A) & PCIE_FW_HALT_F))
+ return 0;
+ msleep(100);
+ ms += 100;
+ }
+ return -ETIMEDOUT;
+ }
+ return 0;
+}
+
+/*
+ * csio_hw_fw_upgrade - perform all of the steps necessary to upgrade FW
+ * @hw: the HW module
+ * @mbox: mailbox to use for the FW RESET command (if desired)
+ * @fw_data: the firmware image to write
+ * @size: image size
+ * @force: force upgrade even if firmware doesn't cooperate
+ *
+ * Perform all of the steps necessary for upgrading an adapter's
+ * firmware image. Normally this requires the cooperation of the
+ * existing firmware in order to halt all existing activities
+ * but if an invalid mailbox token is passed in we skip that step
+ * (though we'll still put the adapter microprocessor into RESET in
+ * that case).
+ *
+ * On successful return the new firmware will have been loaded and
+ * the adapter will have been fully RESET losing all previous setup
+ * state. On unsuccessful return the adapter may be completely hosed ...
+ * positive errno indicates that the adapter is ~probably~ intact, a
+ * negative errno indicates that things are looking bad ...
+ */
+static int
+csio_hw_fw_upgrade(struct csio_hw *hw, uint32_t mbox,
+ const u8 *fw_data, uint32_t size, int32_t force)
+{
+ const struct fw_hdr *fw_hdr = (const struct fw_hdr *)fw_data;
+ int reset, ret;
+
+ ret = csio_hw_fw_halt(hw, mbox, force);
+ if (ret != 0 && !force)
+ return ret;
+
+ ret = csio_hw_fw_dload(hw, (uint8_t *) fw_data, size);
+ if (ret != 0)
+ return ret;
+
+ /*
+ * Older versions of the firmware don't understand the new
+ * PCIE_FW.HALT flag and so won't know to perform a RESET when they
+ * restart. So for newly loaded older firmware we'll have to do the
+ * RESET for it so it starts up on a clean slate. We can tell if
+ * the newly loaded firmware will handle this right by checking
+ * its header flags to see if it advertises the capability.
+ */
+ reset = ((ntohl(fw_hdr->flags) & FW_HDR_FLAGS_RESET_HALT) == 0);
+ return csio_hw_fw_restart(hw, mbox, reset);
+}
+
+/*
+ * csio_get_device_params - Get device parameters.
+ * @hw: HW module
+ *
+ */
+static int
+csio_get_device_params(struct csio_hw *hw)
+{
+ struct csio_wrm *wrm = csio_hw_to_wrm(hw);
+ struct csio_mb *mbp;
+ enum fw_retval retval;
+ u32 param[6];
+ int i, j = 0;
+
+ /* Initialize portids to -1 */
+ for (i = 0; i < CSIO_MAX_PPORTS; i++)
+ hw->pport[i].portid = -1;
+
+ mbp = mempool_alloc(hw->mb_mempool, GFP_ATOMIC);
+ if (!mbp) {
+ CSIO_INC_STATS(hw, n_err_nomem);
+ return -ENOMEM;
+ }
+
+ /* Get port vec information. */
+ param[0] = FW_PARAM_DEV(PORTVEC);
+
+ /* Get Core clock. */
+ param[1] = FW_PARAM_DEV(CCLK);
+
+ /* Get EQ id start and end. */
+ param[2] = FW_PARAM_PFVF(EQ_START);
+ param[3] = FW_PARAM_PFVF(EQ_END);
+
+ /* Get IQ id start and end. */
+ param[4] = FW_PARAM_PFVF(IQFLINT_START);
+ param[5] = FW_PARAM_PFVF(IQFLINT_END);
+
+ csio_mb_params(hw, mbp, CSIO_MB_DEFAULT_TMO, hw->pfn, 0,
+ ARRAY_SIZE(param), param, NULL, false, NULL);
+ if (csio_mb_issue(hw, mbp)) {
+ csio_err(hw, "Issue of FW_PARAMS_CMD(read) failed!\n");
+ mempool_free(mbp, hw->mb_mempool);
+ return -EINVAL;
+ }
+
+ csio_mb_process_read_params_rsp(hw, mbp, &retval,
+ ARRAY_SIZE(param), param);
+ if (retval != FW_SUCCESS) {
+ csio_err(hw, "FW_PARAMS_CMD(read) failed with ret:0x%x!\n",
+ retval);
+ mempool_free(mbp, hw->mb_mempool);
+ return -EINVAL;
+ }
+
+ /* cache the information. */
+ hw->port_vec = param[0];
+ hw->vpd.cclk = param[1];
+ wrm->fw_eq_start = param[2];
+ wrm->fw_iq_start = param[4];
+
+ /* Using FW configured max iqs & eqs */
+ if ((hw->flags & CSIO_HWF_USING_SOFT_PARAMS) ||
+ !csio_is_hw_master(hw)) {
+ hw->cfg_niq = param[5] - param[4] + 1;
+ hw->cfg_neq = param[3] - param[2] + 1;
+ csio_dbg(hw, "Using fwconfig max niqs %d neqs %d\n",
+ hw->cfg_niq, hw->cfg_neq);
+ }
+
+ hw->port_vec &= csio_port_mask;
+
+ hw->num_pports = hweight32(hw->port_vec);
+
+ csio_dbg(hw, "Port vector: 0x%x, #ports: %d\n",
+ hw->port_vec, hw->num_pports);
+
+ for (i = 0; i < hw->num_pports; i++) {
+ while ((hw->port_vec & (1 << j)) == 0)
+ j++;
+ hw->pport[i].portid = j++;
+ csio_dbg(hw, "Found Port:%d\n", hw->pport[i].portid);
+ }
+ mempool_free(mbp, hw->mb_mempool);
+
+ return 0;
+}
+
+
+/*
+ * csio_config_device_caps - Get and set device capabilities.
+ * @hw: HW module
+ *
+ */
+static int
+csio_config_device_caps(struct csio_hw *hw)
+{
+ struct csio_mb *mbp;
+ enum fw_retval retval;
+ int rv = -EINVAL;
+
+ mbp = mempool_alloc(hw->mb_mempool, GFP_ATOMIC);
+ if (!mbp) {
+ CSIO_INC_STATS(hw, n_err_nomem);
+ return -ENOMEM;
+ }
+
+ /* Get device capabilities */
+ csio_mb_caps_config(hw, mbp, CSIO_MB_DEFAULT_TMO, 0, 0, 0, 0, NULL);
+
+ if (csio_mb_issue(hw, mbp)) {
+ csio_err(hw, "Issue of FW_CAPS_CONFIG_CMD(r) failed!\n");
+ goto out;
+ }
+
+ retval = csio_mb_fw_retval(mbp);
+ if (retval != FW_SUCCESS) {
+ csio_err(hw, "FW_CAPS_CONFIG_CMD(r) returned %d!\n", retval);
+ goto out;
+ }
+
+ /* Validate device capabilities */
+ rv = csio_hw_validate_caps(hw, mbp);
+ if (rv != 0)
+ goto out;
+
+ /* Don't config device capabilities if already configured */
+ if (hw->fw_state == CSIO_DEV_STATE_INIT) {
+ rv = 0;
+ goto out;
+ }
+
+ /* Write back desired device capabilities */
+ csio_mb_caps_config(hw, mbp, CSIO_MB_DEFAULT_TMO, true, true,
+ false, true, NULL);
+
+ if (csio_mb_issue(hw, mbp)) {
+ csio_err(hw, "Issue of FW_CAPS_CONFIG_CMD(w) failed!\n");
+ goto out;
+ }
+
+ retval = csio_mb_fw_retval(mbp);
+ if (retval != FW_SUCCESS) {
+ csio_err(hw, "FW_CAPS_CONFIG_CMD(w) returned %d!\n", retval);
+ goto out;
+ }
+
+ rv = 0;
+out:
+ mempool_free(mbp, hw->mb_mempool);
+ return rv;
+}
+
+/*
+ * csio_enable_ports - Bring up all available ports.
+ * @hw: HW module.
+ *
+ */
+static int
+csio_enable_ports(struct csio_hw *hw)
+{
+ struct csio_mb *mbp;
+ enum fw_retval retval;
+ uint8_t portid;
+ int i;
+
+ mbp = mempool_alloc(hw->mb_mempool, GFP_ATOMIC);
+ if (!mbp) {
+ CSIO_INC_STATS(hw, n_err_nomem);
+ return -ENOMEM;
+ }
+
+ for (i = 0; i < hw->num_pports; i++) {
+ portid = hw->pport[i].portid;
+
+ /* Read PORT information */
+ csio_mb_port(hw, mbp, CSIO_MB_DEFAULT_TMO, portid,
+ false, 0, 0, NULL);
+
+ if (csio_mb_issue(hw, mbp)) {
+ csio_err(hw, "failed to issue FW_PORT_CMD(r) port:%d\n",
+ portid);
+ mempool_free(mbp, hw->mb_mempool);
+ return -EINVAL;
+ }
+
+ csio_mb_process_read_port_rsp(hw, mbp, &retval,
+ &hw->pport[i].pcap);
+ if (retval != FW_SUCCESS) {
+ csio_err(hw, "FW_PORT_CMD(r) port:%d failed: 0x%x\n",
+ portid, retval);
+ mempool_free(mbp, hw->mb_mempool);
+ return -EINVAL;
+ }
+
+ /* Write back PORT information */
+ csio_mb_port(hw, mbp, CSIO_MB_DEFAULT_TMO, portid, true,
+ (PAUSE_RX | PAUSE_TX), hw->pport[i].pcap, NULL);
+
+ if (csio_mb_issue(hw, mbp)) {
+ csio_err(hw, "failed to issue FW_PORT_CMD(w) port:%d\n",
+ portid);
+ mempool_free(mbp, hw->mb_mempool);
+ return -EINVAL;
+ }
+
+ retval = csio_mb_fw_retval(mbp);
+ if (retval != FW_SUCCESS) {
+ csio_err(hw, "FW_PORT_CMD(w) port:%d failed :0x%x\n",
+ portid, retval);
+ mempool_free(mbp, hw->mb_mempool);
+ return -EINVAL;
+ }
+
+ } /* For all ports */
+
+ mempool_free(mbp, hw->mb_mempool);
+
+ return 0;
+}
+
+/*
+ * csio_get_fcoe_resinfo - Read fcoe fw resource info.
+ * @hw: HW module
+ * Issued with lock held.
+ */
+static int
+csio_get_fcoe_resinfo(struct csio_hw *hw)
+{
+ struct csio_fcoe_res_info *res_info = &hw->fres_info;
+ struct fw_fcoe_res_info_cmd *rsp;
+ struct csio_mb *mbp;
+ enum fw_retval retval;
+
+ mbp = mempool_alloc(hw->mb_mempool, GFP_ATOMIC);
+ if (!mbp) {
+ CSIO_INC_STATS(hw, n_err_nomem);
+ return -ENOMEM;
+ }
+
+ /* Get FCoE FW resource information */
+ csio_fcoe_read_res_info_init_mb(hw, mbp, CSIO_MB_DEFAULT_TMO, NULL);
+
+ if (csio_mb_issue(hw, mbp)) {
+ csio_err(hw, "failed to issue FW_FCOE_RES_INFO_CMD\n");
+ mempool_free(mbp, hw->mb_mempool);
+ return -EINVAL;
+ }
+
+ rsp = (struct fw_fcoe_res_info_cmd *)(mbp->mb);
+ retval = FW_CMD_RETVAL_G(ntohl(rsp->retval_len16));
+ if (retval != FW_SUCCESS) {
+ csio_err(hw, "FW_FCOE_RES_INFO_CMD failed with ret x%x\n",
+ retval);
+ mempool_free(mbp, hw->mb_mempool);
+ return -EINVAL;
+ }
+
+ res_info->e_d_tov = ntohs(rsp->e_d_tov);
+ res_info->r_a_tov_seq = ntohs(rsp->r_a_tov_seq);
+ res_info->r_a_tov_els = ntohs(rsp->r_a_tov_els);
+ res_info->r_r_tov = ntohs(rsp->r_r_tov);
+ res_info->max_xchgs = ntohl(rsp->max_xchgs);
+ res_info->max_ssns = ntohl(rsp->max_ssns);
+ res_info->used_xchgs = ntohl(rsp->used_xchgs);
+ res_info->used_ssns = ntohl(rsp->used_ssns);
+ res_info->max_fcfs = ntohl(rsp->max_fcfs);
+ res_info->max_vnps = ntohl(rsp->max_vnps);
+ res_info->used_fcfs = ntohl(rsp->used_fcfs);
+ res_info->used_vnps = ntohl(rsp->used_vnps);
+
+ csio_dbg(hw, "max ssns:%d max xchgs:%d\n", res_info->max_ssns,
+ res_info->max_xchgs);
+ mempool_free(mbp, hw->mb_mempool);
+
+ return 0;
+}
+
+static int
+csio_hw_check_fwconfig(struct csio_hw *hw, u32 *param)
+{
+ struct csio_mb *mbp;
+ enum fw_retval retval;
+ u32 _param[1];
+
+ mbp = mempool_alloc(hw->mb_mempool, GFP_ATOMIC);
+ if (!mbp) {
+ CSIO_INC_STATS(hw, n_err_nomem);
+ return -ENOMEM;
+ }
+
+ /*
+ * Find out whether we're dealing with a version of
+ * the firmware which has configuration file support.
+ */
+ _param[0] = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_DEV) |
+ FW_PARAMS_PARAM_X_V(FW_PARAMS_PARAM_DEV_CF));
+
+ csio_mb_params(hw, mbp, CSIO_MB_DEFAULT_TMO, hw->pfn, 0,
+ ARRAY_SIZE(_param), _param, NULL, false, NULL);
+ if (csio_mb_issue(hw, mbp)) {
+ csio_err(hw, "Issue of FW_PARAMS_CMD(read) failed!\n");
+ mempool_free(mbp, hw->mb_mempool);
+ return -EINVAL;
+ }
+
+ csio_mb_process_read_params_rsp(hw, mbp, &retval,
+ ARRAY_SIZE(_param), _param);
+ if (retval != FW_SUCCESS) {
+ csio_err(hw, "FW_PARAMS_CMD(read) failed with ret:0x%x!\n",
+ retval);
+ mempool_free(mbp, hw->mb_mempool);
+ return -EINVAL;
+ }
+
+ mempool_free(mbp, hw->mb_mempool);
+ *param = _param[0];
+
+ return 0;
+}
+
+static int
+csio_hw_flash_config(struct csio_hw *hw, u32 *fw_cfg_param, char *path)
+{
+ int ret = 0;
+ const struct firmware *cf;
+ struct pci_dev *pci_dev = hw->pdev;
+ struct device *dev = &pci_dev->dev;
+ unsigned int mtype = 0, maddr = 0;
+ uint32_t *cfg_data;
+ int value_to_add = 0;
+
+ if (reject_firmware(&cf, FW_CFG_NAME_T5, dev) < 0) {
+ csio_err(hw, "could not find config file %s, err: %d\n",
+ FW_CFG_NAME_T5, ret);
+ return -ENOENT;
+ }
+
+ if (cf->size%4 != 0)
+ value_to_add = 4 - (cf->size % 4);
+
+ cfg_data = kzalloc(cf->size+value_to_add, GFP_KERNEL);
+ if (cfg_data == NULL) {
+ ret = -ENOMEM;
+ goto leave;
+ }
+
+ memcpy((void *)cfg_data, (const void *)cf->data, cf->size);
+ if (csio_hw_check_fwconfig(hw, fw_cfg_param) != 0) {
+ ret = -EINVAL;
+ goto leave;
+ }
+
+ mtype = FW_PARAMS_PARAM_Y_G(*fw_cfg_param);
+ maddr = FW_PARAMS_PARAM_Z_G(*fw_cfg_param) << 16;
+
+ ret = csio_memory_write(hw, mtype, maddr,
+ cf->size + value_to_add, cfg_data);
+
+ if ((ret == 0) && (value_to_add != 0)) {
+ union {
+ u32 word;
+ char buf[4];
+ } last;
+ size_t size = cf->size & ~0x3;
+ int i;
+
+ last.word = cfg_data[size >> 2];
+ for (i = value_to_add; i < 4; i++)
+ last.buf[i] = 0;
+ ret = csio_memory_write(hw, mtype, maddr + size, 4, &last.word);
+ }
+ if (ret == 0) {
+ csio_info(hw, "config file upgraded to %s\n",
+ FW_CFG_NAME_T5);
+ snprintf(path, 64, "%s%s", "/lib/firmware/", FW_CFG_NAME_T5);
+ }
+
+leave:
+ kfree(cfg_data);
+ release_firmware(cf);
+ return ret;
+}
+
+/*
+ * HW initialization: contact FW, obtain config, perform basic init.
+ *
+ * If the firmware we're dealing with has Configuration File support, then
+ * we use that to perform all configuration -- either using the configuration
+ * file stored in flash on the adapter or using a filesystem-local file
+ * if available.
+ *
+ * If we don't have configuration file support in the firmware, then we'll
+ * have to set things up the old fashioned way with hard-coded register
+ * writes and firmware commands ...
+ */
+
+/*
+ * Attempt to initialize the HW via a Firmware Configuration File.
+ */
+static int
+csio_hw_use_fwconfig(struct csio_hw *hw, int reset, u32 *fw_cfg_param)
+{
+ struct csio_mb *mbp = NULL;
+ struct fw_caps_config_cmd *caps_cmd;
+ unsigned int mtype, maddr;
+ int rv = -EINVAL;
+ uint32_t finiver = 0, finicsum = 0, cfcsum = 0;
+ char path[64];
+ char *config_name = NULL;
+
+ /*
+ * Reset device if necessary
+ */
+ if (reset) {
+ rv = csio_do_reset(hw, true);
+ if (rv != 0)
+ goto bye;
+ }
+
+ /*
+ * If we have a configuration file in host ,
+ * then use that. Otherwise, use the configuration file stored
+ * in the HW flash ...
+ */
+ spin_unlock_irq(&hw->lock);
+ rv = csio_hw_flash_config(hw, fw_cfg_param, path);
+ spin_lock_irq(&hw->lock);
+ if (rv != 0) {
+ /*
+ * config file was not found. Use default
+ * config file from flash.
+ */
+ config_name = "On FLASH";
+ mtype = FW_MEMTYPE_CF_FLASH;
+ maddr = hw->chip_ops->chip_flash_cfg_addr(hw);
+ } else {
+ config_name = path;
+ mtype = FW_PARAMS_PARAM_Y_G(*fw_cfg_param);
+ maddr = FW_PARAMS_PARAM_Z_G(*fw_cfg_param) << 16;
+ }
+
+ mbp = mempool_alloc(hw->mb_mempool, GFP_ATOMIC);
+ if (!mbp) {
+ CSIO_INC_STATS(hw, n_err_nomem);
+ return -ENOMEM;
+ }
+ /*
+ * Tell the firmware to process the indicated Configuration File.
+ * If there are no errors and the caller has provided return value
+ * pointers for the [fini] section version, checksum and computed
+ * checksum, pass those back to the caller.
+ */
+ caps_cmd = (struct fw_caps_config_cmd *)(mbp->mb);
+ CSIO_INIT_MBP(mbp, caps_cmd, CSIO_MB_DEFAULT_TMO, hw, NULL, 1);
+ caps_cmd->op_to_write =
+ htonl(FW_CMD_OP_V(FW_CAPS_CONFIG_CMD) |
+ FW_CMD_REQUEST_F |
+ FW_CMD_READ_F);
+ caps_cmd->cfvalid_to_len16 =
+ htonl(FW_CAPS_CONFIG_CMD_CFVALID_F |
+ FW_CAPS_CONFIG_CMD_MEMTYPE_CF_V(mtype) |
+ FW_CAPS_CONFIG_CMD_MEMADDR64K_CF_V(maddr >> 16) |
+ FW_LEN16(*caps_cmd));
+
+ if (csio_mb_issue(hw, mbp)) {
+ rv = -EINVAL;
+ goto bye;
+ }
+
+ rv = csio_mb_fw_retval(mbp);
+ /* If the CAPS_CONFIG failed with an ENOENT (for a Firmware
+ * Configuration File in FLASH), our last gasp effort is to use the
+ * Firmware Configuration File which is embedded in the
+ * firmware. A very few early versions of the firmware didn't
+ * have one embedded but we can ignore those.
+ */
+ if (rv == ENOENT) {
+ CSIO_INIT_MBP(mbp, caps_cmd, CSIO_MB_DEFAULT_TMO, hw, NULL, 1);
+ caps_cmd->op_to_write = htonl(FW_CMD_OP_V(FW_CAPS_CONFIG_CMD) |
+ FW_CMD_REQUEST_F |
+ FW_CMD_READ_F);
+ caps_cmd->cfvalid_to_len16 = htonl(FW_LEN16(*caps_cmd));
+
+ if (csio_mb_issue(hw, mbp)) {
+ rv = -EINVAL;
+ goto bye;
+ }
+
+ rv = csio_mb_fw_retval(mbp);
+ config_name = "Firmware Default";
+ }
+ if (rv != FW_SUCCESS)
+ goto bye;
+
+ finiver = ntohl(caps_cmd->finiver);
+ finicsum = ntohl(caps_cmd->finicsum);
+ cfcsum = ntohl(caps_cmd->cfcsum);
+
+ /*
+ * And now tell the firmware to use the configuration we just loaded.
+ */
+ caps_cmd->op_to_write =
+ htonl(FW_CMD_OP_V(FW_CAPS_CONFIG_CMD) |
+ FW_CMD_REQUEST_F |
+ FW_CMD_WRITE_F);
+ caps_cmd->cfvalid_to_len16 = htonl(FW_LEN16(*caps_cmd));
+
+ if (csio_mb_issue(hw, mbp)) {
+ rv = -EINVAL;
+ goto bye;
+ }
+
+ rv = csio_mb_fw_retval(mbp);
+ if (rv != FW_SUCCESS) {
+ csio_dbg(hw, "FW_CAPS_CONFIG_CMD returned %d!\n", rv);
+ goto bye;
+ }
+
+ mempool_free(mbp, hw->mb_mempool);
+ if (finicsum != cfcsum) {
+ csio_warn(hw,
+ "Config File checksum mismatch: csum=%#x, computed=%#x\n",
+ finicsum, cfcsum);
+ }
+
+ /* Validate device capabilities */
+ rv = csio_hw_validate_caps(hw, mbp);
+ if (rv != 0)
+ goto bye;
+ /*
+ * Note that we're operating with parameters
+ * not supplied by the driver, rather than from hard-wired
+ * initialization constants buried in the driver.
+ */
+ hw->flags |= CSIO_HWF_USING_SOFT_PARAMS;
+
+ /* device parameters */
+ rv = csio_get_device_params(hw);
+ if (rv != 0)
+ goto bye;
+
+ /* Configure SGE */
+ csio_wr_sge_init(hw);
+
+ /*
+ * And finally tell the firmware to initialize itself using the
+ * parameters from the Configuration File.
+ */
+ /* Post event to notify completion of configuration */
+ csio_post_event(&hw->sm, CSIO_HWE_INIT);
+
+ csio_info(hw, "Successfully configure using Firmware "
+ "Configuration File %s, version %#x, computed checksum %#x\n",
+ config_name, finiver, cfcsum);
+ return 0;
+
+ /*
+ * Something bad happened. Return the error ...
+ */
+bye:
+ if (mbp)
+ mempool_free(mbp, hw->mb_mempool);
+ hw->flags &= ~CSIO_HWF_USING_SOFT_PARAMS;
+ csio_warn(hw, "Configuration file error %d\n", rv);
+ return rv;
+}
+
+/* Is the given firmware API compatible with the one the driver was compiled
+ * with?
+ */
+static int fw_compatible(const struct fw_hdr *hdr1, const struct fw_hdr *hdr2)
+{
+
+ /* short circuit if it's the exact same firmware version */
+ if (hdr1->chip == hdr2->chip && hdr1->fw_ver == hdr2->fw_ver)
+ return 1;
+
+#define SAME_INTF(x) (hdr1->intfver_##x == hdr2->intfver_##x)
+ if (hdr1->chip == hdr2->chip && SAME_INTF(nic) && SAME_INTF(vnic) &&
+ SAME_INTF(ri) && SAME_INTF(iscsi) && SAME_INTF(fcoe))
+ return 1;
+#undef SAME_INTF
+
+ return 0;
+}
+
+/* The firmware in the filesystem is usable, but should it be installed?
+ * This routine explains itself in detail if it indicates the filesystem
+ * firmware should be installed.
+ */
+static int csio_should_install_fs_fw(struct csio_hw *hw, int card_fw_usable,
+ int k, int c)
+{
+ const char *reason;
+
+ if (!card_fw_usable) {
+ reason = "incompatible or unusable";
+ goto install;
+ }
+
+ if (k > c) {
+ reason = "older than the version supported with this driver";
+ goto install;
+ }
+
+ return 0;
+
+install:
+ csio_err(hw, "firmware on card (%u.%u.%u.%u) is %s, "
+ "installing firmware %u.%u.%u.%u on card.\n",
+ FW_HDR_FW_VER_MAJOR_G(c), FW_HDR_FW_VER_MINOR_G(c),
+ FW_HDR_FW_VER_MICRO_G(c), FW_HDR_FW_VER_BUILD_G(c), reason,
+ FW_HDR_FW_VER_MAJOR_G(k), FW_HDR_FW_VER_MINOR_G(k),
+ FW_HDR_FW_VER_MICRO_G(k), FW_HDR_FW_VER_BUILD_G(k));
+
+ return 1;
+}
+
+static struct fw_info fw_info_array[] = {
+ {
+ .chip = CHELSIO_T5,
+ .fs_name = FW_CFG_NAME_T5,
+ .fw_mod_name = FW_FNAME_T5,
+ .fw_hdr = {
+ .chip = FW_HDR_CHIP_T5,
+ .fw_ver = __cpu_to_be32(FW_VERSION(T5)),
+ .intfver_nic = FW_INTFVER(T5, NIC),
+ .intfver_vnic = FW_INTFVER(T5, VNIC),
+ .intfver_ri = FW_INTFVER(T5, RI),
+ .intfver_iscsi = FW_INTFVER(T5, ISCSI),
+ .intfver_fcoe = FW_INTFVER(T5, FCOE),
+ },
+ }
+};
+
+static struct fw_info *find_fw_info(int chip)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(fw_info_array); i++) {
+ if (fw_info_array[i].chip == chip)
+ return &fw_info_array[i];
+ }
+ return NULL;
+}
+
+static int csio_hw_prep_fw(struct csio_hw *hw, struct fw_info *fw_info,
+ const u8 *fw_data, unsigned int fw_size,
+ struct fw_hdr *card_fw, enum csio_dev_state state,
+ int *reset)
+{
+ int ret, card_fw_usable, fs_fw_usable;
+ const struct fw_hdr *fs_fw;
+ const struct fw_hdr *drv_fw;
+
+ drv_fw = &fw_info->fw_hdr;
+
+ /* Read the header of the firmware on the card */
+ ret = csio_hw_read_flash(hw, FLASH_FW_START,
+ sizeof(*card_fw) / sizeof(uint32_t),
+ (uint32_t *)card_fw, 1);
+ if (ret == 0) {
+ card_fw_usable = fw_compatible(drv_fw, (const void *)card_fw);
+ } else {
+ csio_err(hw,
+ "Unable to read card's firmware header: %d\n", ret);
+ card_fw_usable = 0;
+ }
+
+ if (fw_data != NULL) {
+ fs_fw = (const void *)fw_data;
+ fs_fw_usable = fw_compatible(drv_fw, fs_fw);
+ } else {
+ fs_fw = NULL;
+ fs_fw_usable = 0;
+ }
+
+ if (card_fw_usable && card_fw->fw_ver == drv_fw->fw_ver &&
+ (!fs_fw_usable || fs_fw->fw_ver == drv_fw->fw_ver)) {
+ /* Common case: the firmware on the card is an exact match and
+ * the filesystem one is an exact match too, or the filesystem
+ * one is absent/incompatible.
+ */
+ } else if (fs_fw_usable && state == CSIO_DEV_STATE_UNINIT &&
+ csio_should_install_fs_fw(hw, card_fw_usable,
+ be32_to_cpu(fs_fw->fw_ver),
+ be32_to_cpu(card_fw->fw_ver))) {
+ ret = csio_hw_fw_upgrade(hw, hw->pfn, fw_data,
+ fw_size, 0);
+ if (ret != 0) {
+ csio_err(hw,
+ "failed to install firmware: %d\n", ret);
+ goto bye;
+ }
+
+ /* Installed successfully, update the cached header too. */
+ memcpy(card_fw, fs_fw, sizeof(*card_fw));
+ card_fw_usable = 1;
+ *reset = 0; /* already reset as part of load_fw */
+ }
+
+ if (!card_fw_usable) {
+ uint32_t d, c, k;
+
+ d = be32_to_cpu(drv_fw->fw_ver);
+ c = be32_to_cpu(card_fw->fw_ver);
+ k = fs_fw ? be32_to_cpu(fs_fw->fw_ver) : 0;
+
+ csio_err(hw, "Cannot find a usable firmware: "
+ "chip state %d, "
+ "driver compiled with %d.%d.%d.%d, "
+ "card has %d.%d.%d.%d, filesystem has %d.%d.%d.%d\n",
+ state,
+ FW_HDR_FW_VER_MAJOR_G(d), FW_HDR_FW_VER_MINOR_G(d),
+ FW_HDR_FW_VER_MICRO_G(d), FW_HDR_FW_VER_BUILD_G(d),
+ FW_HDR_FW_VER_MAJOR_G(c), FW_HDR_FW_VER_MINOR_G(c),
+ FW_HDR_FW_VER_MICRO_G(c), FW_HDR_FW_VER_BUILD_G(c),
+ FW_HDR_FW_VER_MAJOR_G(k), FW_HDR_FW_VER_MINOR_G(k),
+ FW_HDR_FW_VER_MICRO_G(k), FW_HDR_FW_VER_BUILD_G(k));
+ ret = EINVAL;
+ goto bye;
+ }
+
+ /* We're using whatever's on the card and it's known to be good. */
+ hw->fwrev = be32_to_cpu(card_fw->fw_ver);
+ hw->tp_vers = be32_to_cpu(card_fw->tp_microcode_ver);
+
+bye:
+ return ret;
+}
+
+/*
+ * Returns -EINVAL if attempts to flash the firmware failed
+ * else returns 0,
+ * if flashing was not attempted because the card had the
+ * latest firmware ECANCELED is returned
+ */
+static int
+csio_hw_flash_fw(struct csio_hw *hw, int *reset)
+{
+ int ret = -ECANCELED;
+ const struct firmware *fw;
+ struct fw_info *fw_info;
+ struct fw_hdr *card_fw;
+ struct pci_dev *pci_dev = hw->pdev;
+ struct device *dev = &pci_dev->dev ;
+ const u8 *fw_data = NULL;
+ unsigned int fw_size = 0;
+
+ /* This is the firmware whose headers the driver was compiled
+ * against
+ */
+ fw_info = find_fw_info(CHELSIO_CHIP_VERSION(hw->chip_id));
+ if (fw_info == NULL) {
+ csio_err(hw,
+ "unable to get firmware info for chip %d.\n",
+ CHELSIO_CHIP_VERSION(hw->chip_id));
+ return -EINVAL;
+ }
+
+ if (reject_firmware(&fw, FW_FNAME_T5, dev) < 0) {
+ csio_err(hw, "could not find firmware image %s, err: %d\n",
+ FW_FNAME_T5, ret);
+ } else {
+ fw_data = fw->data;
+ fw_size = fw->size;
+ }
+
+ /* allocate memory to read the header of the firmware on the
+ * card
+ */
+ card_fw = kmalloc(sizeof(*card_fw), GFP_KERNEL);
+
+ /* upgrade FW logic */
+ ret = csio_hw_prep_fw(hw, fw_info, fw_data, fw_size, card_fw,
+ hw->fw_state, reset);
+
+ /* Cleaning up */
+ if (fw != NULL)
+ release_firmware(fw);
+ kfree(card_fw);
+ return ret;
+}
+
+/*
+ * csio_hw_configure - Configure HW
+ * @hw - HW module
+ *
+ */
+static void
+csio_hw_configure(struct csio_hw *hw)
+{
+ int reset = 1;
+ int rv;
+ u32 param[1];
+
+ rv = csio_hw_dev_ready(hw);
+ if (rv != 0) {
+ CSIO_INC_STATS(hw, n_err_fatal);
+ csio_post_event(&hw->sm, CSIO_HWE_FATAL);
+ goto out;
+ }
+
+ /* HW version */
+ hw->chip_ver = (char)csio_rd_reg32(hw, PL_REV_A);
+
+ /* Needed for FW download */
+ rv = csio_hw_get_flash_params(hw);
+ if (rv != 0) {
+ csio_err(hw, "Failed to get serial flash params rv:%d\n", rv);
+ csio_post_event(&hw->sm, CSIO_HWE_FATAL);
+ goto out;
+ }
+
+ /* Set PCIe completion timeout to 4 seconds */
+ if (pci_is_pcie(hw->pdev))
+ pcie_capability_clear_and_set_word(hw->pdev, PCI_EXP_DEVCTL2,
+ PCI_EXP_DEVCTL2_COMP_TIMEOUT, 0xd);
+
+ hw->chip_ops->chip_set_mem_win(hw, MEMWIN_CSIOSTOR);
+
+ rv = csio_hw_get_fw_version(hw, &hw->fwrev);
+ if (rv != 0)
+ goto out;
+
+ csio_hw_print_fw_version(hw, "Firmware revision");
+
+ rv = csio_do_hello(hw, &hw->fw_state);
+ if (rv != 0) {
+ CSIO_INC_STATS(hw, n_err_fatal);
+ csio_post_event(&hw->sm, CSIO_HWE_FATAL);
+ goto out;
+ }
+
+ /* Read vpd */
+ rv = csio_hw_get_vpd_params(hw, &hw->vpd);
+ if (rv != 0)
+ goto out;
+
+ csio_hw_get_fw_version(hw, &hw->fwrev);
+ csio_hw_get_tp_version(hw, &hw->tp_vers);
+ if (csio_is_hw_master(hw) && hw->fw_state != CSIO_DEV_STATE_INIT) {
+
+ /* Do firmware update */
+ spin_unlock_irq(&hw->lock);
+ rv = csio_hw_flash_fw(hw, &reset);
+ spin_lock_irq(&hw->lock);
+
+ if (rv != 0)
+ goto out;
+
+ /* If the firmware doesn't support Configuration Files,
+ * return an error.
+ */
+ rv = csio_hw_check_fwconfig(hw, param);
+ if (rv != 0) {
+ csio_info(hw, "Firmware doesn't support "
+ "Firmware Configuration files\n");
+ goto out;
+ }
+
+ /* The firmware provides us with a memory buffer where we can
+ * load a Configuration File from the host if we want to
+ * override the Configuration File in flash.
+ */
+ rv = csio_hw_use_fwconfig(hw, reset, param);
+ if (rv == -ENOENT) {
+ csio_info(hw, "Could not initialize "
+ "adapter, error%d\n", rv);
+ goto out;
+ }
+ if (rv != 0) {
+ csio_info(hw, "Could not initialize "
+ "adapter, error%d\n", rv);
+ goto out;
+ }
+
+ } else {
+ if (hw->fw_state == CSIO_DEV_STATE_INIT) {
+
+ hw->flags |= CSIO_HWF_USING_SOFT_PARAMS;
+
+ /* device parameters */
+ rv = csio_get_device_params(hw);
+ if (rv != 0)
+ goto out;
+
+ /* Get device capabilities */
+ rv = csio_config_device_caps(hw);
+ if (rv != 0)
+ goto out;
+
+ /* Configure SGE */
+ csio_wr_sge_init(hw);
+
+ /* Post event to notify completion of configuration */
+ csio_post_event(&hw->sm, CSIO_HWE_INIT);
+ goto out;
+ }
+ } /* if not master */
+
+out:
+ return;
+}
+
+/*
+ * csio_hw_initialize - Initialize HW
+ * @hw - HW module
+ *
+ */
+static void
+csio_hw_initialize(struct csio_hw *hw)
+{
+ struct csio_mb *mbp;
+ enum fw_retval retval;
+ int rv;
+ int i;
+
+ if (csio_is_hw_master(hw) && hw->fw_state != CSIO_DEV_STATE_INIT) {
+ mbp = mempool_alloc(hw->mb_mempool, GFP_ATOMIC);
+ if (!mbp)
+ goto out;
+
+ csio_mb_initialize(hw, mbp, CSIO_MB_DEFAULT_TMO, NULL);
+
+ if (csio_mb_issue(hw, mbp)) {
+ csio_err(hw, "Issue of FW_INITIALIZE_CMD failed!\n");
+ goto free_and_out;
+ }
+
+ retval = csio_mb_fw_retval(mbp);
+ if (retval != FW_SUCCESS) {
+ csio_err(hw, "FW_INITIALIZE_CMD returned 0x%x!\n",
+ retval);
+ goto free_and_out;
+ }
+
+ mempool_free(mbp, hw->mb_mempool);
+ }
+
+ rv = csio_get_fcoe_resinfo(hw);
+ if (rv != 0) {
+ csio_err(hw, "Failed to read fcoe resource info: %d\n", rv);
+ goto out;
+ }
+
+ spin_unlock_irq(&hw->lock);
+ rv = csio_config_queues(hw);
+ spin_lock_irq(&hw->lock);
+
+ if (rv != 0) {
+ csio_err(hw, "Config of queues failed!: %d\n", rv);
+ goto out;
+ }
+
+ for (i = 0; i < hw->num_pports; i++)
+ hw->pport[i].mod_type = FW_PORT_MOD_TYPE_NA;
+
+ if (csio_is_hw_master(hw) && hw->fw_state != CSIO_DEV_STATE_INIT) {
+ rv = csio_enable_ports(hw);
+ if (rv != 0) {
+ csio_err(hw, "Failed to enable ports: %d\n", rv);
+ goto out;
+ }
+ }
+
+ csio_post_event(&hw->sm, CSIO_HWE_INIT_DONE);
+ return;
+
+free_and_out:
+ mempool_free(mbp, hw->mb_mempool);
+out:
+ return;
+}
+
+#define PF_INTR_MASK (PFSW_F | PFCIM_F)
+
+/*
+ * csio_hw_intr_enable - Enable HW interrupts
+ * @hw: Pointer to HW module.
+ *
+ * Enable interrupts in HW registers.
+ */
+static void
+csio_hw_intr_enable(struct csio_hw *hw)
+{
+ uint16_t vec = (uint16_t)csio_get_mb_intr_idx(csio_hw_to_mbm(hw));
+ uint32_t pf = SOURCEPF_G(csio_rd_reg32(hw, PL_WHOAMI_A));
+ uint32_t pl = csio_rd_reg32(hw, PL_INT_ENABLE_A);
+
+ /*
+ * Set aivec for MSI/MSIX. PCIE_PF_CFG.INTXType is set up
+ * by FW, so do nothing for INTX.
+ */
+ if (hw->intr_mode == CSIO_IM_MSIX)
+ csio_set_reg_field(hw, MYPF_REG(PCIE_PF_CFG_A),
+ AIVEC_V(AIVEC_M), vec);
+ else if (hw->intr_mode == CSIO_IM_MSI)
+ csio_set_reg_field(hw, MYPF_REG(PCIE_PF_CFG_A),
+ AIVEC_V(AIVEC_M), 0);
+
+ csio_wr_reg32(hw, PF_INTR_MASK, MYPF_REG(PL_PF_INT_ENABLE_A));
+
+ /* Turn on MB interrupts - this will internally flush PIO as well */
+ csio_mb_intr_enable(hw);
+
+ /* These are common registers - only a master can modify them */
+ if (csio_is_hw_master(hw)) {
+ /*
+ * Disable the Serial FLASH interrupt, if enabled!
+ */
+ pl &= (~SF_F);
+ csio_wr_reg32(hw, pl, PL_INT_ENABLE_A);
+
+ csio_wr_reg32(hw, ERR_CPL_EXCEED_IQE_SIZE_F |
+ EGRESS_SIZE_ERR_F | ERR_INVALID_CIDX_INC_F |
+ ERR_CPL_OPCODE_0_F | ERR_DROPPED_DB_F |
+ ERR_DATA_CPL_ON_HIGH_QID1_F |
+ ERR_DATA_CPL_ON_HIGH_QID0_F | ERR_BAD_DB_PIDX3_F |
+ ERR_BAD_DB_PIDX2_F | ERR_BAD_DB_PIDX1_F |
+ ERR_BAD_DB_PIDX0_F | ERR_ING_CTXT_PRIO_F |
+ ERR_EGR_CTXT_PRIO_F | INGRESS_SIZE_ERR_F,
+ SGE_INT_ENABLE3_A);
+ csio_set_reg_field(hw, PL_INT_MAP0_A, 0, 1 << pf);
+ }
+
+ hw->flags |= CSIO_HWF_HW_INTR_ENABLED;
+
+}
+
+/*
+ * csio_hw_intr_disable - Disable HW interrupts
+ * @hw: Pointer to HW module.
+ *
+ * Turn off Mailbox and PCI_PF_CFG interrupts.
+ */
+void
+csio_hw_intr_disable(struct csio_hw *hw)
+{
+ uint32_t pf = SOURCEPF_G(csio_rd_reg32(hw, PL_WHOAMI_A));
+
+ if (!(hw->flags & CSIO_HWF_HW_INTR_ENABLED))
+ return;
+
+ hw->flags &= ~CSIO_HWF_HW_INTR_ENABLED;
+
+ csio_wr_reg32(hw, 0, MYPF_REG(PL_PF_INT_ENABLE_A));
+ if (csio_is_hw_master(hw))
+ csio_set_reg_field(hw, PL_INT_MAP0_A, 1 << pf, 0);
+
+ /* Turn off MB interrupts */
+ csio_mb_intr_disable(hw);
+
+}
+
+void
+csio_hw_fatal_err(struct csio_hw *hw)
+{
+ csio_set_reg_field(hw, SGE_CONTROL_A, GLOBALENABLE_F, 0);
+ csio_hw_intr_disable(hw);
+
+ /* Do not reset HW, we may need FW state for debugging */
+ csio_fatal(hw, "HW Fatal error encountered!\n");
+}
+
+/*****************************************************************************/
+/* START: HW SM */
+/*****************************************************************************/
+/*
+ * csio_hws_uninit - Uninit state
+ * @hw - HW module
+ * @evt - Event
+ *
+ */
+static void
+csio_hws_uninit(struct csio_hw *hw, enum csio_hw_ev evt)
+{
+ hw->prev_evt = hw->cur_evt;
+ hw->cur_evt = evt;
+ CSIO_INC_STATS(hw, n_evt_sm[evt]);
+
+ switch (evt) {
+ case CSIO_HWE_CFG:
+ csio_set_state(&hw->sm, csio_hws_configuring);
+ csio_hw_configure(hw);
+ break;
+
+ default:
+ CSIO_INC_STATS(hw, n_evt_unexp);
+ break;
+ }
+}
+
+/*
+ * csio_hws_configuring - Configuring state
+ * @hw - HW module
+ * @evt - Event
+ *
+ */
+static void
+csio_hws_configuring(struct csio_hw *hw, enum csio_hw_ev evt)
+{
+ hw->prev_evt = hw->cur_evt;
+ hw->cur_evt = evt;
+ CSIO_INC_STATS(hw, n_evt_sm[evt]);
+
+ switch (evt) {
+ case CSIO_HWE_INIT:
+ csio_set_state(&hw->sm, csio_hws_initializing);
+ csio_hw_initialize(hw);
+ break;
+
+ case CSIO_HWE_INIT_DONE:
+ csio_set_state(&hw->sm, csio_hws_ready);
+ /* Fan out event to all lnode SMs */
+ csio_notify_lnodes(hw, CSIO_LN_NOTIFY_HWREADY);
+ break;
+
+ case CSIO_HWE_FATAL:
+ csio_set_state(&hw->sm, csio_hws_uninit);
+ break;
+
+ case CSIO_HWE_PCI_REMOVE:
+ csio_do_bye(hw);
+ break;
+ default:
+ CSIO_INC_STATS(hw, n_evt_unexp);
+ break;
+ }
+}
+
+/*
+ * csio_hws_initializing - Initialiazing state
+ * @hw - HW module
+ * @evt - Event
+ *
+ */
+static void
+csio_hws_initializing(struct csio_hw *hw, enum csio_hw_ev evt)
+{
+ hw->prev_evt = hw->cur_evt;
+ hw->cur_evt = evt;
+ CSIO_INC_STATS(hw, n_evt_sm[evt]);
+
+ switch (evt) {
+ case CSIO_HWE_INIT_DONE:
+ csio_set_state(&hw->sm, csio_hws_ready);
+
+ /* Fan out event to all lnode SMs */
+ csio_notify_lnodes(hw, CSIO_LN_NOTIFY_HWREADY);
+
+ /* Enable interrupts */
+ csio_hw_intr_enable(hw);
+ break;
+
+ case CSIO_HWE_FATAL:
+ csio_set_state(&hw->sm, csio_hws_uninit);
+ break;
+
+ case CSIO_HWE_PCI_REMOVE:
+ csio_do_bye(hw);
+ break;
+
+ default:
+ CSIO_INC_STATS(hw, n_evt_unexp);
+ break;
+ }
+}
+
+/*
+ * csio_hws_ready - Ready state
+ * @hw - HW module
+ * @evt - Event
+ *
+ */
+static void
+csio_hws_ready(struct csio_hw *hw, enum csio_hw_ev evt)
+{
+ /* Remember the event */
+ hw->evtflag = evt;
+
+ hw->prev_evt = hw->cur_evt;
+ hw->cur_evt = evt;
+ CSIO_INC_STATS(hw, n_evt_sm[evt]);
+
+ switch (evt) {
+ case CSIO_HWE_HBA_RESET:
+ case CSIO_HWE_FW_DLOAD:
+ case CSIO_HWE_SUSPEND:
+ case CSIO_HWE_PCI_REMOVE:
+ case CSIO_HWE_PCIERR_DETECTED:
+ csio_set_state(&hw->sm, csio_hws_quiescing);
+ /* cleanup all outstanding cmds */
+ if (evt == CSIO_HWE_HBA_RESET ||
+ evt == CSIO_HWE_PCIERR_DETECTED)
+ csio_scsim_cleanup_io(csio_hw_to_scsim(hw), false);
+ else
+ csio_scsim_cleanup_io(csio_hw_to_scsim(hw), true);
+
+ csio_hw_intr_disable(hw);
+ csio_hw_mbm_cleanup(hw);
+ csio_evtq_stop(hw);
+ csio_notify_lnodes(hw, CSIO_LN_NOTIFY_HWSTOP);
+ csio_evtq_flush(hw);
+ csio_mgmtm_cleanup(csio_hw_to_mgmtm(hw));
+ csio_post_event(&hw->sm, CSIO_HWE_QUIESCED);
+ break;
+
+ case CSIO_HWE_FATAL:
+ csio_set_state(&hw->sm, csio_hws_uninit);
+ break;
+
+ default:
+ CSIO_INC_STATS(hw, n_evt_unexp);
+ break;
+ }
+}
+
+/*
+ * csio_hws_quiescing - Quiescing state
+ * @hw - HW module
+ * @evt - Event
+ *
+ */
+static void
+csio_hws_quiescing(struct csio_hw *hw, enum csio_hw_ev evt)
+{
+ hw->prev_evt = hw->cur_evt;
+ hw->cur_evt = evt;
+ CSIO_INC_STATS(hw, n_evt_sm[evt]);
+
+ switch (evt) {
+ case CSIO_HWE_QUIESCED:
+ switch (hw->evtflag) {
+ case CSIO_HWE_FW_DLOAD:
+ csio_set_state(&hw->sm, csio_hws_resetting);
+ /* Download firmware */
+ /* Fall through */
+
+ case CSIO_HWE_HBA_RESET:
+ csio_set_state(&hw->sm, csio_hws_resetting);
+ /* Start reset of the HBA */
+ csio_notify_lnodes(hw, CSIO_LN_NOTIFY_HWRESET);
+ csio_wr_destroy_queues(hw, false);
+ csio_do_reset(hw, false);
+ csio_post_event(&hw->sm, CSIO_HWE_HBA_RESET_DONE);
+ break;
+
+ case CSIO_HWE_PCI_REMOVE:
+ csio_set_state(&hw->sm, csio_hws_removing);
+ csio_notify_lnodes(hw, CSIO_LN_NOTIFY_HWREMOVE);
+ csio_wr_destroy_queues(hw, true);
+ /* Now send the bye command */
+ csio_do_bye(hw);
+ break;
+
+ case CSIO_HWE_SUSPEND:
+ csio_set_state(&hw->sm, csio_hws_quiesced);
+ break;
+
+ case CSIO_HWE_PCIERR_DETECTED:
+ csio_set_state(&hw->sm, csio_hws_pcierr);
+ csio_wr_destroy_queues(hw, false);
+ break;
+
+ default:
+ CSIO_INC_STATS(hw, n_evt_unexp);
+ break;
+
+ }
+ break;
+
+ default:
+ CSIO_INC_STATS(hw, n_evt_unexp);
+ break;
+ }
+}
+
+/*
+ * csio_hws_quiesced - Quiesced state
+ * @hw - HW module
+ * @evt - Event
+ *
+ */
+static void
+csio_hws_quiesced(struct csio_hw *hw, enum csio_hw_ev evt)
+{
+ hw->prev_evt = hw->cur_evt;
+ hw->cur_evt = evt;
+ CSIO_INC_STATS(hw, n_evt_sm[evt]);
+
+ switch (evt) {
+ case CSIO_HWE_RESUME:
+ csio_set_state(&hw->sm, csio_hws_configuring);
+ csio_hw_configure(hw);
+ break;
+
+ default:
+ CSIO_INC_STATS(hw, n_evt_unexp);
+ break;
+ }
+}
+
+/*
+ * csio_hws_resetting - HW Resetting state
+ * @hw - HW module
+ * @evt - Event
+ *
+ */
+static void
+csio_hws_resetting(struct csio_hw *hw, enum csio_hw_ev evt)
+{
+ hw->prev_evt = hw->cur_evt;
+ hw->cur_evt = evt;
+ CSIO_INC_STATS(hw, n_evt_sm[evt]);
+
+ switch (evt) {
+ case CSIO_HWE_HBA_RESET_DONE:
+ csio_evtq_start(hw);
+ csio_set_state(&hw->sm, csio_hws_configuring);
+ csio_hw_configure(hw);
+ break;
+
+ default:
+ CSIO_INC_STATS(hw, n_evt_unexp);
+ break;
+ }
+}
+
+/*
+ * csio_hws_removing - PCI Hotplug removing state
+ * @hw - HW module
+ * @evt - Event
+ *
+ */
+static void
+csio_hws_removing(struct csio_hw *hw, enum csio_hw_ev evt)
+{
+ hw->prev_evt = hw->cur_evt;
+ hw->cur_evt = evt;
+ CSIO_INC_STATS(hw, n_evt_sm[evt]);
+
+ switch (evt) {
+ case CSIO_HWE_HBA_RESET:
+ if (!csio_is_hw_master(hw))
+ break;
+ /*
+ * The BYE should have alerady been issued, so we cant
+ * use the mailbox interface. Hence we use the PL_RST
+ * register directly.
+ */
+ csio_err(hw, "Resetting HW and waiting 2 seconds...\n");
+ csio_wr_reg32(hw, PIORSTMODE_F | PIORST_F, PL_RST_A);
+ mdelay(2000);
+ break;
+
+ /* Should never receive any new events */
+ default:
+ CSIO_INC_STATS(hw, n_evt_unexp);
+ break;
+
+ }
+}
+
+/*
+ * csio_hws_pcierr - PCI Error state
+ * @hw - HW module
+ * @evt - Event
+ *
+ */
+static void
+csio_hws_pcierr(struct csio_hw *hw, enum csio_hw_ev evt)
+{
+ hw->prev_evt = hw->cur_evt;
+ hw->cur_evt = evt;
+ CSIO_INC_STATS(hw, n_evt_sm[evt]);
+
+ switch (evt) {
+ case CSIO_HWE_PCIERR_SLOT_RESET:
+ csio_evtq_start(hw);
+ csio_set_state(&hw->sm, csio_hws_configuring);
+ csio_hw_configure(hw);
+ break;
+
+ default:
+ CSIO_INC_STATS(hw, n_evt_unexp);
+ break;
+ }
+}
+
+/*****************************************************************************/
+/* END: HW SM */
+/*****************************************************************************/
+
+/*
+ * csio_handle_intr_status - table driven interrupt handler
+ * @hw: HW instance
+ * @reg: the interrupt status register to process
+ * @acts: table of interrupt actions
+ *
+ * A table driven interrupt handler that applies a set of masks to an
+ * interrupt status word and performs the corresponding actions if the
+ * interrupts described by the mask have occured. The actions include
+ * optionally emitting a warning or alert message. The table is terminated
+ * by an entry specifying mask 0. Returns the number of fatal interrupt
+ * conditions.
+ */
+int
+csio_handle_intr_status(struct csio_hw *hw, unsigned int reg,
+ const struct intr_info *acts)
+{
+ int fatal = 0;
+ unsigned int mask = 0;
+ unsigned int status = csio_rd_reg32(hw, reg);
+
+ for ( ; acts->mask; ++acts) {
+ if (!(status & acts->mask))
+ continue;
+ if (acts->fatal) {
+ fatal++;
+ csio_fatal(hw, "Fatal %s (0x%x)\n",
+ acts->msg, status & acts->mask);
+ } else if (acts->msg)
+ csio_info(hw, "%s (0x%x)\n",
+ acts->msg, status & acts->mask);
+ mask |= acts->mask;
+ }
+ status &= mask;
+ if (status) /* clear processed interrupts */
+ csio_wr_reg32(hw, status, reg);
+ return fatal;
+}
+
+/*
+ * TP interrupt handler.
+ */
+static void csio_tp_intr_handler(struct csio_hw *hw)
+{
+ static struct intr_info tp_intr_info[] = {
+ { 0x3fffffff, "TP parity error", -1, 1 },
+ { FLMTXFLSTEMPTY_F, "TP out of Tx pages", -1, 1 },
+ { 0, NULL, 0, 0 }
+ };
+
+ if (csio_handle_intr_status(hw, TP_INT_CAUSE_A, tp_intr_info))
+ csio_hw_fatal_err(hw);
+}
+
+/*
+ * SGE interrupt handler.
+ */
+static void csio_sge_intr_handler(struct csio_hw *hw)
+{
+ uint64_t v;
+
+ static struct intr_info sge_intr_info[] = {
+ { ERR_CPL_EXCEED_IQE_SIZE_F,
+ "SGE received CPL exceeding IQE size", -1, 1 },
+ { ERR_INVALID_CIDX_INC_F,
+ "SGE GTS CIDX increment too large", -1, 0 },
+ { ERR_CPL_OPCODE_0_F, "SGE received 0-length CPL", -1, 0 },
+ { ERR_DROPPED_DB_F, "SGE doorbell dropped", -1, 0 },
+ { ERR_DATA_CPL_ON_HIGH_QID1_F | ERR_DATA_CPL_ON_HIGH_QID0_F,
+ "SGE IQID > 1023 received CPL for FL", -1, 0 },
+ { ERR_BAD_DB_PIDX3_F, "SGE DBP 3 pidx increment too large", -1,
+ 0 },
+ { ERR_BAD_DB_PIDX2_F, "SGE DBP 2 pidx increment too large", -1,
+ 0 },
+ { ERR_BAD_DB_PIDX1_F, "SGE DBP 1 pidx increment too large", -1,
+ 0 },
+ { ERR_BAD_DB_PIDX0_F, "SGE DBP 0 pidx increment too large", -1,
+ 0 },
+ { ERR_ING_CTXT_PRIO_F,
+ "SGE too many priority ingress contexts", -1, 0 },
+ { ERR_EGR_CTXT_PRIO_F,
+ "SGE too many priority egress contexts", -1, 0 },
+ { INGRESS_SIZE_ERR_F, "SGE illegal ingress QID", -1, 0 },
+ { EGRESS_SIZE_ERR_F, "SGE illegal egress QID", -1, 0 },
+ { 0, NULL, 0, 0 }
+ };
+
+ v = (uint64_t)csio_rd_reg32(hw, SGE_INT_CAUSE1_A) |
+ ((uint64_t)csio_rd_reg32(hw, SGE_INT_CAUSE2_A) << 32);
+ if (v) {
+ csio_fatal(hw, "SGE parity error (%#llx)\n",
+ (unsigned long long)v);
+ csio_wr_reg32(hw, (uint32_t)(v & 0xFFFFFFFF),
+ SGE_INT_CAUSE1_A);
+ csio_wr_reg32(hw, (uint32_t)(v >> 32), SGE_INT_CAUSE2_A);
+ }
+
+ v |= csio_handle_intr_status(hw, SGE_INT_CAUSE3_A, sge_intr_info);
+
+ if (csio_handle_intr_status(hw, SGE_INT_CAUSE3_A, sge_intr_info) ||
+ v != 0)
+ csio_hw_fatal_err(hw);
+}
+
+#define CIM_OBQ_INTR (OBQULP0PARERR_F | OBQULP1PARERR_F | OBQULP2PARERR_F |\
+ OBQULP3PARERR_F | OBQSGEPARERR_F | OBQNCSIPARERR_F)
+#define CIM_IBQ_INTR (IBQTP0PARERR_F | IBQTP1PARERR_F | IBQULPPARERR_F |\
+ IBQSGEHIPARERR_F | IBQSGELOPARERR_F | IBQNCSIPARERR_F)
+
+/*
+ * CIM interrupt handler.
+ */
+static void csio_cim_intr_handler(struct csio_hw *hw)
+{
+ static struct intr_info cim_intr_info[] = {
+ { PREFDROPINT_F, "CIM control register prefetch drop", -1, 1 },
+ { CIM_OBQ_INTR, "CIM OBQ parity error", -1, 1 },
+ { CIM_IBQ_INTR, "CIM IBQ parity error", -1, 1 },
+ { MBUPPARERR_F, "CIM mailbox uP parity error", -1, 1 },
+ { MBHOSTPARERR_F, "CIM mailbox host parity error", -1, 1 },
+ { TIEQINPARERRINT_F, "CIM TIEQ outgoing parity error", -1, 1 },
+ { TIEQOUTPARERRINT_F, "CIM TIEQ incoming parity error", -1, 1 },
+ { 0, NULL, 0, 0 }
+ };
+ static struct intr_info cim_upintr_info[] = {
+ { RSVDSPACEINT_F, "CIM reserved space access", -1, 1 },
+ { ILLTRANSINT_F, "CIM illegal transaction", -1, 1 },
+ { ILLWRINT_F, "CIM illegal write", -1, 1 },
+ { ILLRDINT_F, "CIM illegal read", -1, 1 },
+ { ILLRDBEINT_F, "CIM illegal read BE", -1, 1 },
+ { ILLWRBEINT_F, "CIM illegal write BE", -1, 1 },
+ { SGLRDBOOTINT_F, "CIM single read from boot space", -1, 1 },
+ { SGLWRBOOTINT_F, "CIM single write to boot space", -1, 1 },
+ { BLKWRBOOTINT_F, "CIM block write to boot space", -1, 1 },
+ { SGLRDFLASHINT_F, "CIM single read from flash space", -1, 1 },
+ { SGLWRFLASHINT_F, "CIM single write to flash space", -1, 1 },
+ { BLKWRFLASHINT_F, "CIM block write to flash space", -1, 1 },
+ { SGLRDEEPROMINT_F, "CIM single EEPROM read", -1, 1 },
+ { SGLWREEPROMINT_F, "CIM single EEPROM write", -1, 1 },
+ { BLKRDEEPROMINT_F, "CIM block EEPROM read", -1, 1 },
+ { BLKWREEPROMINT_F, "CIM block EEPROM write", -1, 1 },
+ { SGLRDCTLINT_F, "CIM single read from CTL space", -1, 1 },
+ { SGLWRCTLINT_F, "CIM single write to CTL space", -1, 1 },
+ { BLKRDCTLINT_F, "CIM block read from CTL space", -1, 1 },
+ { BLKWRCTLINT_F, "CIM block write to CTL space", -1, 1 },
+ { SGLRDPLINT_F, "CIM single read from PL space", -1, 1 },
+ { SGLWRPLINT_F, "CIM single write to PL space", -1, 1 },
+ { BLKRDPLINT_F, "CIM block read from PL space", -1, 1 },
+ { BLKWRPLINT_F, "CIM block write to PL space", -1, 1 },
+ { REQOVRLOOKUPINT_F, "CIM request FIFO overwrite", -1, 1 },
+ { RSPOVRLOOKUPINT_F, "CIM response FIFO overwrite", -1, 1 },
+ { TIMEOUTINT_F, "CIM PIF timeout", -1, 1 },
+ { TIMEOUTMAINT_F, "CIM PIF MA timeout", -1, 1 },
+ { 0, NULL, 0, 0 }
+ };
+
+ int fat;
+
+ fat = csio_handle_intr_status(hw, CIM_HOST_INT_CAUSE_A,
+ cim_intr_info) +
+ csio_handle_intr_status(hw, CIM_HOST_UPACC_INT_CAUSE_A,
+ cim_upintr_info);
+ if (fat)
+ csio_hw_fatal_err(hw);
+}
+
+/*
+ * ULP RX interrupt handler.
+ */
+static void csio_ulprx_intr_handler(struct csio_hw *hw)
+{
+ static struct intr_info ulprx_intr_info[] = {
+ { 0x1800000, "ULPRX context error", -1, 1 },
+ { 0x7fffff, "ULPRX parity error", -1, 1 },
+ { 0, NULL, 0, 0 }
+ };
+
+ if (csio_handle_intr_status(hw, ULP_RX_INT_CAUSE_A, ulprx_intr_info))
+ csio_hw_fatal_err(hw);
+}
+
+/*
+ * ULP TX interrupt handler.
+ */
+static void csio_ulptx_intr_handler(struct csio_hw *hw)
+{
+ static struct intr_info ulptx_intr_info[] = {
+ { PBL_BOUND_ERR_CH3_F, "ULPTX channel 3 PBL out of bounds", -1,
+ 0 },
+ { PBL_BOUND_ERR_CH2_F, "ULPTX channel 2 PBL out of bounds", -1,
+ 0 },
+ { PBL_BOUND_ERR_CH1_F, "ULPTX channel 1 PBL out of bounds", -1,
+ 0 },
+ { PBL_BOUND_ERR_CH0_F, "ULPTX channel 0 PBL out of bounds", -1,
+ 0 },
+ { 0xfffffff, "ULPTX parity error", -1, 1 },
+ { 0, NULL, 0, 0 }
+ };
+
+ if (csio_handle_intr_status(hw, ULP_TX_INT_CAUSE_A, ulptx_intr_info))
+ csio_hw_fatal_err(hw);
+}
+
+/*
+ * PM TX interrupt handler.
+ */
+static void csio_pmtx_intr_handler(struct csio_hw *hw)
+{
+ static struct intr_info pmtx_intr_info[] = {
+ { PCMD_LEN_OVFL0_F, "PMTX channel 0 pcmd too large", -1, 1 },
+ { PCMD_LEN_OVFL1_F, "PMTX channel 1 pcmd too large", -1, 1 },
+ { PCMD_LEN_OVFL2_F, "PMTX channel 2 pcmd too large", -1, 1 },
+ { ZERO_C_CMD_ERROR_F, "PMTX 0-length pcmd", -1, 1 },
+ { 0xffffff0, "PMTX framing error", -1, 1 },
+ { OESPI_PAR_ERROR_F, "PMTX oespi parity error", -1, 1 },
+ { DB_OPTIONS_PAR_ERROR_F, "PMTX db_options parity error", -1,
+ 1 },
+ { ICSPI_PAR_ERROR_F, "PMTX icspi parity error", -1, 1 },
+ { PMTX_C_PCMD_PAR_ERROR_F, "PMTX c_pcmd parity error", -1, 1},
+ { 0, NULL, 0, 0 }
+ };
+
+ if (csio_handle_intr_status(hw, PM_TX_INT_CAUSE_A, pmtx_intr_info))
+ csio_hw_fatal_err(hw);
+}
+
+/*
+ * PM RX interrupt handler.
+ */
+static void csio_pmrx_intr_handler(struct csio_hw *hw)
+{
+ static struct intr_info pmrx_intr_info[] = {
+ { ZERO_E_CMD_ERROR_F, "PMRX 0-length pcmd", -1, 1 },
+ { 0x3ffff0, "PMRX framing error", -1, 1 },
+ { OCSPI_PAR_ERROR_F, "PMRX ocspi parity error", -1, 1 },
+ { DB_OPTIONS_PAR_ERROR_F, "PMRX db_options parity error", -1,
+ 1 },
+ { IESPI_PAR_ERROR_F, "PMRX iespi parity error", -1, 1 },
+ { PMRX_E_PCMD_PAR_ERROR_F, "PMRX e_pcmd parity error", -1, 1},
+ { 0, NULL, 0, 0 }
+ };
+
+ if (csio_handle_intr_status(hw, PM_RX_INT_CAUSE_A, pmrx_intr_info))
+ csio_hw_fatal_err(hw);
+}
+
+/*
+ * CPL switch interrupt handler.
+ */
+static void csio_cplsw_intr_handler(struct csio_hw *hw)
+{
+ static struct intr_info cplsw_intr_info[] = {
+ { CIM_OP_MAP_PERR_F, "CPLSW CIM op_map parity error", -1, 1 },
+ { CIM_OVFL_ERROR_F, "CPLSW CIM overflow", -1, 1 },
+ { TP_FRAMING_ERROR_F, "CPLSW TP framing error", -1, 1 },
+ { SGE_FRAMING_ERROR_F, "CPLSW SGE framing error", -1, 1 },
+ { CIM_FRAMING_ERROR_F, "CPLSW CIM framing error", -1, 1 },
+ { ZERO_SWITCH_ERROR_F, "CPLSW no-switch error", -1, 1 },
+ { 0, NULL, 0, 0 }
+ };
+
+ if (csio_handle_intr_status(hw, CPL_INTR_CAUSE_A, cplsw_intr_info))
+ csio_hw_fatal_err(hw);
+}
+
+/*
+ * LE interrupt handler.
+ */
+static void csio_le_intr_handler(struct csio_hw *hw)
+{
+ static struct intr_info le_intr_info[] = {
+ { LIPMISS_F, "LE LIP miss", -1, 0 },
+ { LIP0_F, "LE 0 LIP error", -1, 0 },
+ { PARITYERR_F, "LE parity error", -1, 1 },
+ { UNKNOWNCMD_F, "LE unknown command", -1, 1 },
+ { REQQPARERR_F, "LE request queue parity error", -1, 1 },
+ { 0, NULL, 0, 0 }
+ };
+
+ if (csio_handle_intr_status(hw, LE_DB_INT_CAUSE_A, le_intr_info))
+ csio_hw_fatal_err(hw);
+}
+
+/*
+ * MPS interrupt handler.
+ */
+static void csio_mps_intr_handler(struct csio_hw *hw)
+{
+ static struct intr_info mps_rx_intr_info[] = {
+ { 0xffffff, "MPS Rx parity error", -1, 1 },
+ { 0, NULL, 0, 0 }
+ };
+ static struct intr_info mps_tx_intr_info[] = {
+ { TPFIFO_V(TPFIFO_M), "MPS Tx TP FIFO parity error", -1, 1 },
+ { NCSIFIFO_F, "MPS Tx NC-SI FIFO parity error", -1, 1 },
+ { TXDATAFIFO_V(TXDATAFIFO_M), "MPS Tx data FIFO parity error",
+ -1, 1 },
+ { TXDESCFIFO_V(TXDESCFIFO_M), "MPS Tx desc FIFO parity error",
+ -1, 1 },
+ { BUBBLE_F, "MPS Tx underflow", -1, 1 },
+ { SECNTERR_F, "MPS Tx SOP/EOP error", -1, 1 },
+ { FRMERR_F, "MPS Tx framing error", -1, 1 },
+ { 0, NULL, 0, 0 }
+ };
+ static struct intr_info mps_trc_intr_info[] = {
+ { FILTMEM_V(FILTMEM_M), "MPS TRC filter parity error", -1, 1 },
+ { PKTFIFO_V(PKTFIFO_M), "MPS TRC packet FIFO parity error",
+ -1, 1 },
+ { MISCPERR_F, "MPS TRC misc parity error", -1, 1 },
+ { 0, NULL, 0, 0 }
+ };
+ static struct intr_info mps_stat_sram_intr_info[] = {
+ { 0x1fffff, "MPS statistics SRAM parity error", -1, 1 },
+ { 0, NULL, 0, 0 }
+ };
+ static struct intr_info mps_stat_tx_intr_info[] = {
+ { 0xfffff, "MPS statistics Tx FIFO parity error", -1, 1 },
+ { 0, NULL, 0, 0 }
+ };
+ static struct intr_info mps_stat_rx_intr_info[] = {
+ { 0xffffff, "MPS statistics Rx FIFO parity error", -1, 1 },
+ { 0, NULL, 0, 0 }
+ };
+ static struct intr_info mps_cls_intr_info[] = {
+ { MATCHSRAM_F, "MPS match SRAM parity error", -1, 1 },
+ { MATCHTCAM_F, "MPS match TCAM parity error", -1, 1 },
+ { HASHSRAM_F, "MPS hash SRAM parity error", -1, 1 },
+ { 0, NULL, 0, 0 }
+ };
+
+ int fat;
+
+ fat = csio_handle_intr_status(hw, MPS_RX_PERR_INT_CAUSE_A,
+ mps_rx_intr_info) +
+ csio_handle_intr_status(hw, MPS_TX_INT_CAUSE_A,
+ mps_tx_intr_info) +
+ csio_handle_intr_status(hw, MPS_TRC_INT_CAUSE_A,
+ mps_trc_intr_info) +
+ csio_handle_intr_status(hw, MPS_STAT_PERR_INT_CAUSE_SRAM_A,
+ mps_stat_sram_intr_info) +
+ csio_handle_intr_status(hw, MPS_STAT_PERR_INT_CAUSE_TX_FIFO_A,
+ mps_stat_tx_intr_info) +
+ csio_handle_intr_status(hw, MPS_STAT_PERR_INT_CAUSE_RX_FIFO_A,
+ mps_stat_rx_intr_info) +
+ csio_handle_intr_status(hw, MPS_CLS_INT_CAUSE_A,
+ mps_cls_intr_info);
+
+ csio_wr_reg32(hw, 0, MPS_INT_CAUSE_A);
+ csio_rd_reg32(hw, MPS_INT_CAUSE_A); /* flush */
+ if (fat)
+ csio_hw_fatal_err(hw);
+}
+
+#define MEM_INT_MASK (PERR_INT_CAUSE_F | ECC_CE_INT_CAUSE_F | \
+ ECC_UE_INT_CAUSE_F)
+
+/*
+ * EDC/MC interrupt handler.
+ */
+static void csio_mem_intr_handler(struct csio_hw *hw, int idx)
+{
+ static const char name[3][5] = { "EDC0", "EDC1", "MC" };
+
+ unsigned int addr, cnt_addr, v;
+
+ if (idx <= MEM_EDC1) {
+ addr = EDC_REG(EDC_INT_CAUSE_A, idx);
+ cnt_addr = EDC_REG(EDC_ECC_STATUS_A, idx);
+ } else {
+ addr = MC_INT_CAUSE_A;
+ cnt_addr = MC_ECC_STATUS_A;
+ }
+
+ v = csio_rd_reg32(hw, addr) & MEM_INT_MASK;
+ if (v & PERR_INT_CAUSE_F)
+ csio_fatal(hw, "%s FIFO parity error\n", name[idx]);
+ if (v & ECC_CE_INT_CAUSE_F) {
+ uint32_t cnt = ECC_CECNT_G(csio_rd_reg32(hw, cnt_addr));
+
+ csio_wr_reg32(hw, ECC_CECNT_V(ECC_CECNT_M), cnt_addr);
+ csio_warn(hw, "%u %s correctable ECC data error%s\n",
+ cnt, name[idx], cnt > 1 ? "s" : "");
+ }
+ if (v & ECC_UE_INT_CAUSE_F)
+ csio_fatal(hw, "%s uncorrectable ECC data error\n", name[idx]);
+
+ csio_wr_reg32(hw, v, addr);
+ if (v & (PERR_INT_CAUSE_F | ECC_UE_INT_CAUSE_F))
+ csio_hw_fatal_err(hw);
+}
+
+/*
+ * MA interrupt handler.
+ */
+static void csio_ma_intr_handler(struct csio_hw *hw)
+{
+ uint32_t v, status = csio_rd_reg32(hw, MA_INT_CAUSE_A);
+
+ if (status & MEM_PERR_INT_CAUSE_F)
+ csio_fatal(hw, "MA parity error, parity status %#x\n",
+ csio_rd_reg32(hw, MA_PARITY_ERROR_STATUS_A));
+ if (status & MEM_WRAP_INT_CAUSE_F) {
+ v = csio_rd_reg32(hw, MA_INT_WRAP_STATUS_A);
+ csio_fatal(hw,
+ "MA address wrap-around error by client %u to address %#x\n",
+ MEM_WRAP_CLIENT_NUM_G(v), MEM_WRAP_ADDRESS_G(v) << 4);
+ }
+ csio_wr_reg32(hw, status, MA_INT_CAUSE_A);
+ csio_hw_fatal_err(hw);
+}
+
+/*
+ * SMB interrupt handler.
+ */
+static void csio_smb_intr_handler(struct csio_hw *hw)
+{
+ static struct intr_info smb_intr_info[] = {
+ { MSTTXFIFOPARINT_F, "SMB master Tx FIFO parity error", -1, 1 },
+ { MSTRXFIFOPARINT_F, "SMB master Rx FIFO parity error", -1, 1 },
+ { SLVFIFOPARINT_F, "SMB slave FIFO parity error", -1, 1 },
+ { 0, NULL, 0, 0 }
+ };
+
+ if (csio_handle_intr_status(hw, SMB_INT_CAUSE_A, smb_intr_info))
+ csio_hw_fatal_err(hw);
+}
+
+/*
+ * NC-SI interrupt handler.
+ */
+static void csio_ncsi_intr_handler(struct csio_hw *hw)
+{
+ static struct intr_info ncsi_intr_info[] = {
+ { CIM_DM_PRTY_ERR_F, "NC-SI CIM parity error", -1, 1 },
+ { MPS_DM_PRTY_ERR_F, "NC-SI MPS parity error", -1, 1 },
+ { TXFIFO_PRTY_ERR_F, "NC-SI Tx FIFO parity error", -1, 1 },
+ { RXFIFO_PRTY_ERR_F, "NC-SI Rx FIFO parity error", -1, 1 },
+ { 0, NULL, 0, 0 }
+ };
+
+ if (csio_handle_intr_status(hw, NCSI_INT_CAUSE_A, ncsi_intr_info))
+ csio_hw_fatal_err(hw);
+}
+
+/*
+ * XGMAC interrupt handler.
+ */
+static void csio_xgmac_intr_handler(struct csio_hw *hw, int port)
+{
+ uint32_t v = csio_rd_reg32(hw, T5_PORT_REG(port, MAC_PORT_INT_CAUSE_A));
+
+ v &= TXFIFO_PRTY_ERR_F | RXFIFO_PRTY_ERR_F;
+ if (!v)
+ return;
+
+ if (v & TXFIFO_PRTY_ERR_F)
+ csio_fatal(hw, "XGMAC %d Tx FIFO parity error\n", port);
+ if (v & RXFIFO_PRTY_ERR_F)
+ csio_fatal(hw, "XGMAC %d Rx FIFO parity error\n", port);
+ csio_wr_reg32(hw, v, T5_PORT_REG(port, MAC_PORT_INT_CAUSE_A));
+ csio_hw_fatal_err(hw);
+}
+
+/*
+ * PL interrupt handler.
+ */
+static void csio_pl_intr_handler(struct csio_hw *hw)
+{
+ static struct intr_info pl_intr_info[] = {
+ { FATALPERR_F, "T4 fatal parity error", -1, 1 },
+ { PERRVFID_F, "PL VFID_MAP parity error", -1, 1 },
+ { 0, NULL, 0, 0 }
+ };
+
+ if (csio_handle_intr_status(hw, PL_PL_INT_CAUSE_A, pl_intr_info))
+ csio_hw_fatal_err(hw);
+}
+
+/*
+ * csio_hw_slow_intr_handler - control path interrupt handler
+ * @hw: HW module
+ *
+ * Interrupt handler for non-data global interrupt events, e.g., errors.
+ * The designation 'slow' is because it involves register reads, while
+ * data interrupts typically don't involve any MMIOs.
+ */
+int
+csio_hw_slow_intr_handler(struct csio_hw *hw)
+{
+ uint32_t cause = csio_rd_reg32(hw, PL_INT_CAUSE_A);
+
+ if (!(cause & CSIO_GLBL_INTR_MASK)) {
+ CSIO_INC_STATS(hw, n_plint_unexp);
+ return 0;
+ }
+
+ csio_dbg(hw, "Slow interrupt! cause: 0x%x\n", cause);
+
+ CSIO_INC_STATS(hw, n_plint_cnt);
+
+ if (cause & CIM_F)
+ csio_cim_intr_handler(hw);
+
+ if (cause & MPS_F)
+ csio_mps_intr_handler(hw);
+
+ if (cause & NCSI_F)
+ csio_ncsi_intr_handler(hw);
+
+ if (cause & PL_F)
+ csio_pl_intr_handler(hw);
+
+ if (cause & SMB_F)
+ csio_smb_intr_handler(hw);
+
+ if (cause & XGMAC0_F)
+ csio_xgmac_intr_handler(hw, 0);
+
+ if (cause & XGMAC1_F)
+ csio_xgmac_intr_handler(hw, 1);
+
+ if (cause & XGMAC_KR0_F)
+ csio_xgmac_intr_handler(hw, 2);
+
+ if (cause & XGMAC_KR1_F)
+ csio_xgmac_intr_handler(hw, 3);
+
+ if (cause & PCIE_F)
+ hw->chip_ops->chip_pcie_intr_handler(hw);
+
+ if (cause & MC_F)
+ csio_mem_intr_handler(hw, MEM_MC);
+
+ if (cause & EDC0_F)
+ csio_mem_intr_handler(hw, MEM_EDC0);
+
+ if (cause & EDC1_F)
+ csio_mem_intr_handler(hw, MEM_EDC1);
+
+ if (cause & LE_F)
+ csio_le_intr_handler(hw);
+
+ if (cause & TP_F)
+ csio_tp_intr_handler(hw);
+
+ if (cause & MA_F)
+ csio_ma_intr_handler(hw);
+
+ if (cause & PM_TX_F)
+ csio_pmtx_intr_handler(hw);
+
+ if (cause & PM_RX_F)
+ csio_pmrx_intr_handler(hw);
+
+ if (cause & ULP_RX_F)
+ csio_ulprx_intr_handler(hw);
+
+ if (cause & CPL_SWITCH_F)
+ csio_cplsw_intr_handler(hw);
+
+ if (cause & SGE_F)
+ csio_sge_intr_handler(hw);
+
+ if (cause & ULP_TX_F)
+ csio_ulptx_intr_handler(hw);
+
+ /* Clear the interrupts just processed for which we are the master. */
+ csio_wr_reg32(hw, cause & CSIO_GLBL_INTR_MASK, PL_INT_CAUSE_A);
+ csio_rd_reg32(hw, PL_INT_CAUSE_A); /* flush */
+
+ return 1;
+}
+
+/*****************************************************************************
+ * HW <--> mailbox interfacing routines.
+ ****************************************************************************/
+/*
+ * csio_mberr_worker - Worker thread (dpc) for mailbox/error completions
+ *
+ * @data: Private data pointer.
+ *
+ * Called from worker thread context.
+ */
+static void
+csio_mberr_worker(void *data)
+{
+ struct csio_hw *hw = (struct csio_hw *)data;
+ struct csio_mbm *mbm = &hw->mbm;
+ LIST_HEAD(cbfn_q);
+ struct csio_mb *mbp_next;
+ int rv;
+
+ del_timer_sync(&mbm->timer);
+
+ spin_lock_irq(&hw->lock);
+ if (list_empty(&mbm->cbfn_q)) {
+ spin_unlock_irq(&hw->lock);
+ return;
+ }
+
+ list_splice_tail_init(&mbm->cbfn_q, &cbfn_q);
+ mbm->stats.n_cbfnq = 0;
+
+ /* Try to start waiting mailboxes */
+ if (!list_empty(&mbm->req_q)) {
+ mbp_next = list_first_entry(&mbm->req_q, struct csio_mb, list);
+ list_del_init(&mbp_next->list);
+
+ rv = csio_mb_issue(hw, mbp_next);
+ if (rv != 0)
+ list_add_tail(&mbp_next->list, &mbm->req_q);
+ else
+ CSIO_DEC_STATS(mbm, n_activeq);
+ }
+ spin_unlock_irq(&hw->lock);
+
+ /* Now callback completions */
+ csio_mb_completions(hw, &cbfn_q);
+}
+
+/*
+ * csio_hw_mb_timer - Top-level Mailbox timeout handler.
+ *
+ * @data: private data pointer
+ *
+ **/
+static void
+csio_hw_mb_timer(uintptr_t data)
+{
+ struct csio_hw *hw = (struct csio_hw *)data;
+ struct csio_mb *mbp = NULL;
+
+ spin_lock_irq(&hw->lock);
+ mbp = csio_mb_tmo_handler(hw);
+ spin_unlock_irq(&hw->lock);
+
+ /* Call back the function for the timed-out Mailbox */
+ if (mbp)
+ mbp->mb_cbfn(hw, mbp);
+
+}
+
+/*
+ * csio_hw_mbm_cleanup - Cleanup Mailbox module.
+ * @hw: HW module
+ *
+ * Called with lock held, should exit with lock held.
+ * Cancels outstanding mailboxes (waiting, in-flight) and gathers them
+ * into a local queue. Drops lock and calls the completions. Holds
+ * lock and returns.
+ */
+static void
+csio_hw_mbm_cleanup(struct csio_hw *hw)
+{
+ LIST_HEAD(cbfn_q);
+
+ csio_mb_cancel_all(hw, &cbfn_q);
+
+ spin_unlock_irq(&hw->lock);
+ csio_mb_completions(hw, &cbfn_q);
+ spin_lock_irq(&hw->lock);
+}
+
+/*****************************************************************************
+ * Event handling
+ ****************************************************************************/
+int
+csio_enqueue_evt(struct csio_hw *hw, enum csio_evt type, void *evt_msg,
+ uint16_t len)
+{
+ struct csio_evt_msg *evt_entry = NULL;
+
+ if (type >= CSIO_EVT_MAX)
+ return -EINVAL;
+
+ if (len > CSIO_EVT_MSG_SIZE)
+ return -EINVAL;
+
+ if (hw->flags & CSIO_HWF_FWEVT_STOP)
+ return -EINVAL;
+
+ if (list_empty(&hw->evt_free_q)) {
+ csio_err(hw, "Failed to alloc evt entry, msg type %d len %d\n",
+ type, len);
+ return -ENOMEM;
+ }
+
+ evt_entry = list_first_entry(&hw->evt_free_q,
+ struct csio_evt_msg, list);
+ list_del_init(&evt_entry->list);
+
+ /* copy event msg and queue the event */
+ evt_entry->type = type;
+ memcpy((void *)evt_entry->data, evt_msg, len);
+ list_add_tail(&evt_entry->list, &hw->evt_active_q);
+
+ CSIO_DEC_STATS(hw, n_evt_freeq);
+ CSIO_INC_STATS(hw, n_evt_activeq);
+
+ return 0;
+}
+
+static int
+csio_enqueue_evt_lock(struct csio_hw *hw, enum csio_evt type, void *evt_msg,
+ uint16_t len, bool msg_sg)
+{
+ struct csio_evt_msg *evt_entry = NULL;
+ struct csio_fl_dma_buf *fl_sg;
+ uint32_t off = 0;
+ unsigned long flags;
+ int n, ret = 0;
+
+ if (type >= CSIO_EVT_MAX)
+ return -EINVAL;
+
+ if (len > CSIO_EVT_MSG_SIZE)
+ return -EINVAL;
+
+ spin_lock_irqsave(&hw->lock, flags);
+ if (hw->flags & CSIO_HWF_FWEVT_STOP) {
+ ret = -EINVAL;
+ goto out;
+ }
+
+ if (list_empty(&hw->evt_free_q)) {
+ csio_err(hw, "Failed to alloc evt entry, msg type %d len %d\n",
+ type, len);
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ evt_entry = list_first_entry(&hw->evt_free_q,
+ struct csio_evt_msg, list);
+ list_del_init(&evt_entry->list);
+
+ /* copy event msg and queue the event */
+ evt_entry->type = type;
+
+ /* If Payload in SG list*/
+ if (msg_sg) {
+ fl_sg = (struct csio_fl_dma_buf *) evt_msg;
+ for (n = 0; (n < CSIO_MAX_FLBUF_PER_IQWR && off < len); n++) {
+ memcpy((void *)((uintptr_t)evt_entry->data + off),
+ fl_sg->flbufs[n].vaddr,
+ fl_sg->flbufs[n].len);
+ off += fl_sg->flbufs[n].len;
+ }
+ } else
+ memcpy((void *)evt_entry->data, evt_msg, len);
+
+ list_add_tail(&evt_entry->list, &hw->evt_active_q);
+ CSIO_DEC_STATS(hw, n_evt_freeq);
+ CSIO_INC_STATS(hw, n_evt_activeq);
+out:
+ spin_unlock_irqrestore(&hw->lock, flags);
+ return ret;
+}
+
+static void
+csio_free_evt(struct csio_hw *hw, struct csio_evt_msg *evt_entry)
+{
+ if (evt_entry) {
+ spin_lock_irq(&hw->lock);
+ list_del_init(&evt_entry->list);
+ list_add_tail(&evt_entry->list, &hw->evt_free_q);
+ CSIO_DEC_STATS(hw, n_evt_activeq);
+ CSIO_INC_STATS(hw, n_evt_freeq);
+ spin_unlock_irq(&hw->lock);
+ }
+}
+
+void
+csio_evtq_flush(struct csio_hw *hw)
+{
+ uint32_t count;
+ count = 30;
+ while (hw->flags & CSIO_HWF_FWEVT_PENDING && count--) {
+ spin_unlock_irq(&hw->lock);
+ msleep(2000);
+ spin_lock_irq(&hw->lock);
+ }
+
+ CSIO_DB_ASSERT(!(hw->flags & CSIO_HWF_FWEVT_PENDING));
+}
+
+static void
+csio_evtq_stop(struct csio_hw *hw)
+{
+ hw->flags |= CSIO_HWF_FWEVT_STOP;
+}
+
+static void
+csio_evtq_start(struct csio_hw *hw)
+{
+ hw->flags &= ~CSIO_HWF_FWEVT_STOP;
+}
+
+static void
+csio_evtq_cleanup(struct csio_hw *hw)
+{
+ struct list_head *evt_entry, *next_entry;
+
+ /* Release outstanding events from activeq to freeq*/
+ if (!list_empty(&hw->evt_active_q))
+ list_splice_tail_init(&hw->evt_active_q, &hw->evt_free_q);
+
+ hw->stats.n_evt_activeq = 0;
+ hw->flags &= ~CSIO_HWF_FWEVT_PENDING;
+
+ /* Freeup event entry */
+ list_for_each_safe(evt_entry, next_entry, &hw->evt_free_q) {
+ kfree(evt_entry);
+ CSIO_DEC_STATS(hw, n_evt_freeq);
+ }
+
+ hw->stats.n_evt_freeq = 0;
+}
+
+
+static void
+csio_process_fwevtq_entry(struct csio_hw *hw, void *wr, uint32_t len,
+ struct csio_fl_dma_buf *flb, void *priv)
+{
+ __u8 op;
+ void *msg = NULL;
+ uint32_t msg_len = 0;
+ bool msg_sg = 0;
+
+ op = ((struct rss_header *) wr)->opcode;
+ if (op == CPL_FW6_PLD) {
+ CSIO_INC_STATS(hw, n_cpl_fw6_pld);
+ if (!flb || !flb->totlen) {
+ CSIO_INC_STATS(hw, n_cpl_unexp);
+ return;
+ }
+
+ msg = (void *) flb;
+ msg_len = flb->totlen;
+ msg_sg = 1;
+ } else if (op == CPL_FW6_MSG || op == CPL_FW4_MSG) {
+
+ CSIO_INC_STATS(hw, n_cpl_fw6_msg);
+ /* skip RSS header */
+ msg = (void *)((uintptr_t)wr + sizeof(__be64));
+ msg_len = (op == CPL_FW6_MSG) ? sizeof(struct cpl_fw6_msg) :
+ sizeof(struct cpl_fw4_msg);
+ } else {
+ csio_warn(hw, "unexpected CPL %#x on FW event queue\n", op);
+ CSIO_INC_STATS(hw, n_cpl_unexp);
+ return;
+ }
+
+ /*
+ * Enqueue event to EventQ. Events processing happens
+ * in Event worker thread context
+ */
+ if (csio_enqueue_evt_lock(hw, CSIO_EVT_FW, msg,
+ (uint16_t)msg_len, msg_sg))
+ CSIO_INC_STATS(hw, n_evt_drop);
+}
+
+void
+csio_evtq_worker(struct work_struct *work)
+{
+ struct csio_hw *hw = container_of(work, struct csio_hw, evtq_work);
+ struct list_head *evt_entry, *next_entry;
+ LIST_HEAD(evt_q);
+ struct csio_evt_msg *evt_msg;
+ struct cpl_fw6_msg *msg;
+ struct csio_rnode *rn;
+ int rv = 0;
+ uint8_t evtq_stop = 0;
+
+ csio_dbg(hw, "event worker thread active evts#%d\n",
+ hw->stats.n_evt_activeq);
+
+ spin_lock_irq(&hw->lock);
+ while (!list_empty(&hw->evt_active_q)) {
+ list_splice_tail_init(&hw->evt_active_q, &evt_q);
+ spin_unlock_irq(&hw->lock);
+
+ list_for_each_safe(evt_entry, next_entry, &evt_q) {
+ evt_msg = (struct csio_evt_msg *) evt_entry;
+
+ /* Drop events if queue is STOPPED */
+ spin_lock_irq(&hw->lock);
+ if (hw->flags & CSIO_HWF_FWEVT_STOP)
+ evtq_stop = 1;
+ spin_unlock_irq(&hw->lock);
+ if (evtq_stop) {
+ CSIO_INC_STATS(hw, n_evt_drop);
+ goto free_evt;
+ }
+
+ switch (evt_msg->type) {
+ case CSIO_EVT_FW:
+ msg = (struct cpl_fw6_msg *)(evt_msg->data);
+
+ if ((msg->opcode == CPL_FW6_MSG ||
+ msg->opcode == CPL_FW4_MSG) &&
+ !msg->type) {
+ rv = csio_mb_fwevt_handler(hw,
+ msg->data);
+ if (!rv)
+ break;
+ /* Handle any remaining fw events */
+ csio_fcoe_fwevt_handler(hw,
+ msg->opcode, msg->data);
+ } else if (msg->opcode == CPL_FW6_PLD) {
+
+ csio_fcoe_fwevt_handler(hw,
+ msg->opcode, msg->data);
+ } else {
+ csio_warn(hw,
+ "Unhandled FW msg op %x type %x\n",
+ msg->opcode, msg->type);
+ CSIO_INC_STATS(hw, n_evt_drop);
+ }
+ break;
+
+ case CSIO_EVT_MBX:
+ csio_mberr_worker(hw);
+ break;
+
+ case CSIO_EVT_DEV_LOSS:
+ memcpy(&rn, evt_msg->data, sizeof(rn));
+ csio_rnode_devloss_handler(rn);
+ break;
+
+ default:
+ csio_warn(hw, "Unhandled event %x on evtq\n",
+ evt_msg->type);
+ CSIO_INC_STATS(hw, n_evt_unexp);
+ break;
+ }
+free_evt:
+ csio_free_evt(hw, evt_msg);
+ }
+
+ spin_lock_irq(&hw->lock);
+ }
+ hw->flags &= ~CSIO_HWF_FWEVT_PENDING;
+ spin_unlock_irq(&hw->lock);
+}
+
+int
+csio_fwevtq_handler(struct csio_hw *hw)
+{
+ int rv;
+
+ if (csio_q_iqid(hw, hw->fwevt_iq_idx) == CSIO_MAX_QID) {
+ CSIO_INC_STATS(hw, n_int_stray);
+ return -EINVAL;
+ }
+
+ rv = csio_wr_process_iq_idx(hw, hw->fwevt_iq_idx,
+ csio_process_fwevtq_entry, NULL);
+ return rv;
+}
+
+/****************************************************************************
+ * Entry points
+ ****************************************************************************/
+
+/* Management module */
+/*
+ * csio_mgmt_req_lookup - Lookup the given IO req exist in Active Q.
+ * mgmt - mgmt module
+ * @io_req - io request
+ *
+ * Return - 0:if given IO Req exists in active Q.
+ * -EINVAL :if lookup fails.
+ */
+int
+csio_mgmt_req_lookup(struct csio_mgmtm *mgmtm, struct csio_ioreq *io_req)
+{
+ struct list_head *tmp;
+
+ /* Lookup ioreq in the ACTIVEQ */
+ list_for_each(tmp, &mgmtm->active_q) {
+ if (io_req == (struct csio_ioreq *)tmp)
+ return 0;
+ }
+ return -EINVAL;
+}
+
+#define ECM_MIN_TMO 1000 /* Minimum timeout value for req */
+
+/*
+ * csio_mgmts_tmo_handler - MGMT IO Timeout handler.
+ * @data - Event data.
+ *
+ * Return - none.
+ */
+static void
+csio_mgmt_tmo_handler(uintptr_t data)
+{
+ struct csio_mgmtm *mgmtm = (struct csio_mgmtm *) data;
+ struct list_head *tmp;
+ struct csio_ioreq *io_req;
+
+ csio_dbg(mgmtm->hw, "Mgmt timer invoked!\n");
+
+ spin_lock_irq(&mgmtm->hw->lock);
+
+ list_for_each(tmp, &mgmtm->active_q) {
+ io_req = (struct csio_ioreq *) tmp;
+ io_req->tmo -= min_t(uint32_t, io_req->tmo, ECM_MIN_TMO);
+
+ if (!io_req->tmo) {
+ /* Dequeue the request from retry Q. */
+ tmp = csio_list_prev(tmp);
+ list_del_init(&io_req->sm.sm_list);
+ if (io_req->io_cbfn) {
+ /* io_req will be freed by completion handler */
+ io_req->wr_status = -ETIMEDOUT;
+ io_req->io_cbfn(mgmtm->hw, io_req);
+ } else {
+ CSIO_DB_ASSERT(0);
+ }
+ }
+ }
+
+ /* If retry queue is not empty, re-arm timer */
+ if (!list_empty(&mgmtm->active_q))
+ mod_timer(&mgmtm->mgmt_timer,
+ jiffies + msecs_to_jiffies(ECM_MIN_TMO));
+ spin_unlock_irq(&mgmtm->hw->lock);
+}
+
+static void
+csio_mgmtm_cleanup(struct csio_mgmtm *mgmtm)
+{
+ struct csio_hw *hw = mgmtm->hw;
+ struct csio_ioreq *io_req;
+ struct list_head *tmp;
+ uint32_t count;
+
+ count = 30;
+ /* Wait for all outstanding req to complete gracefully */
+ while ((!list_empty(&mgmtm->active_q)) && count--) {
+ spin_unlock_irq(&hw->lock);
+ msleep(2000);
+ spin_lock_irq(&hw->lock);
+ }
+
+ /* release outstanding req from ACTIVEQ */
+ list_for_each(tmp, &mgmtm->active_q) {
+ io_req = (struct csio_ioreq *) tmp;
+ tmp = csio_list_prev(tmp);
+ list_del_init(&io_req->sm.sm_list);
+ mgmtm->stats.n_active--;
+ if (io_req->io_cbfn) {
+ /* io_req will be freed by completion handler */
+ io_req->wr_status = -ETIMEDOUT;
+ io_req->io_cbfn(mgmtm->hw, io_req);
+ }
+ }
+}
+
+/*
+ * csio_mgmt_init - Mgmt module init entry point
+ * @mgmtsm - mgmt module
+ * @hw - HW module
+ *
+ * Initialize mgmt timer, resource wait queue, active queue,
+ * completion q. Allocate Egress and Ingress
+ * WR queues and save off the queue index returned by the WR
+ * module for future use. Allocate and save off mgmt reqs in the
+ * mgmt_req_freelist for future use. Make sure their SM is initialized
+ * to uninit state.
+ * Returns: 0 - on success
+ * -ENOMEM - on error.
+ */
+static int
+csio_mgmtm_init(struct csio_mgmtm *mgmtm, struct csio_hw *hw)
+{
+ struct timer_list *timer = &mgmtm->mgmt_timer;
+
+ init_timer(timer);
+ timer->function = csio_mgmt_tmo_handler;
+ timer->data = (unsigned long)mgmtm;
+
+ INIT_LIST_HEAD(&mgmtm->active_q);
+ INIT_LIST_HEAD(&mgmtm->cbfn_q);
+
+ mgmtm->hw = hw;
+ /*mgmtm->iq_idx = hw->fwevt_iq_idx;*/
+
+ return 0;
+}
+
+/*
+ * csio_mgmtm_exit - MGMT module exit entry point
+ * @mgmtsm - mgmt module
+ *
+ * This function called during MGMT module uninit.
+ * Stop timers, free ioreqs allocated.
+ * Returns: None
+ *
+ */
+static void
+csio_mgmtm_exit(struct csio_mgmtm *mgmtm)
+{
+ del_timer_sync(&mgmtm->mgmt_timer);
+}
+
+
+/**
+ * csio_hw_start - Kicks off the HW State machine
+ * @hw: Pointer to HW module.
+ *
+ * It is assumed that the initialization is a synchronous operation.
+ * So when we return afer posting the event, the HW SM should be in
+ * the ready state, if there were no errors during init.
+ */
+int
+csio_hw_start(struct csio_hw *hw)
+{
+ spin_lock_irq(&hw->lock);
+ csio_post_event(&hw->sm, CSIO_HWE_CFG);
+ spin_unlock_irq(&hw->lock);
+
+ if (csio_is_hw_ready(hw))
+ return 0;
+ else
+ return -EINVAL;
+}
+
+int
+csio_hw_stop(struct csio_hw *hw)
+{
+ csio_post_event(&hw->sm, CSIO_HWE_PCI_REMOVE);
+
+ if (csio_is_hw_removing(hw))
+ return 0;
+ else
+ return -EINVAL;
+}
+
+/* Max reset retries */
+#define CSIO_MAX_RESET_RETRIES 3
+
+/**
+ * csio_hw_reset - Reset the hardware
+ * @hw: HW module.
+ *
+ * Caller should hold lock across this function.
+ */
+int
+csio_hw_reset(struct csio_hw *hw)
+{
+ if (!csio_is_hw_master(hw))
+ return -EPERM;
+
+ if (hw->rst_retries >= CSIO_MAX_RESET_RETRIES) {
+ csio_dbg(hw, "Max hw reset attempts reached..");
+ return -EINVAL;
+ }
+
+ hw->rst_retries++;
+ csio_post_event(&hw->sm, CSIO_HWE_HBA_RESET);
+
+ if (csio_is_hw_ready(hw)) {
+ hw->rst_retries = 0;
+ hw->stats.n_reset_start = jiffies_to_msecs(jiffies);
+ return 0;
+ } else
+ return -EINVAL;
+}
+
+/*
+ * csio_hw_get_device_id - Caches the Adapter's vendor & device id.
+ * @hw: HW module.
+ */
+static void
+csio_hw_get_device_id(struct csio_hw *hw)
+{
+ /* Is the adapter device id cached already ?*/
+ if (csio_is_dev_id_cached(hw))
+ return;
+
+ /* Get the PCI vendor & device id */
+ pci_read_config_word(hw->pdev, PCI_VENDOR_ID,
+ &hw->params.pci.vendor_id);
+ pci_read_config_word(hw->pdev, PCI_DEVICE_ID,
+ &hw->params.pci.device_id);
+
+ csio_dev_id_cached(hw);
+ hw->chip_id = (hw->params.pci.device_id & CSIO_HW_CHIP_MASK);
+
+} /* csio_hw_get_device_id */
+
+/*
+ * csio_hw_set_description - Set the model, description of the hw.
+ * @hw: HW module.
+ * @ven_id: PCI Vendor ID
+ * @dev_id: PCI Device ID
+ */
+static void
+csio_hw_set_description(struct csio_hw *hw, uint16_t ven_id, uint16_t dev_id)
+{
+ uint32_t adap_type, prot_type;
+
+ if (ven_id == CSIO_VENDOR_ID) {
+ prot_type = (dev_id & CSIO_ASIC_DEVID_PROTO_MASK);
+ adap_type = (dev_id & CSIO_ASIC_DEVID_TYPE_MASK);
+
+ if (prot_type == CSIO_T5_FCOE_ASIC) {
+ memcpy(hw->hw_ver,
+ csio_t5_fcoe_adapters[adap_type].model_no, 16);
+ memcpy(hw->model_desc,
+ csio_t5_fcoe_adapters[adap_type].description,
+ 32);
+ } else {
+ char tempName[32] = "Chelsio FCoE Controller";
+ memcpy(hw->model_desc, tempName, 32);
+ }
+ }
+} /* csio_hw_set_description */
+
+/**
+ * csio_hw_init - Initialize HW module.
+ * @hw: Pointer to HW module.
+ *
+ * Initialize the members of the HW module.
+ */
+int
+csio_hw_init(struct csio_hw *hw)
+{
+ int rv = -EINVAL;
+ uint32_t i;
+ uint16_t ven_id, dev_id;
+ struct csio_evt_msg *evt_entry;
+
+ INIT_LIST_HEAD(&hw->sm.sm_list);
+ csio_init_state(&hw->sm, csio_hws_uninit);
+ spin_lock_init(&hw->lock);
+ INIT_LIST_HEAD(&hw->sln_head);
+
+ /* Get the PCI vendor & device id */
+ csio_hw_get_device_id(hw);
+
+ strcpy(hw->name, CSIO_HW_NAME);
+
+ /* Initialize the HW chip ops T5 specific ops */
+ hw->chip_ops = &t5_ops;
+
+ /* Set the model & its description */
+
+ ven_id = hw->params.pci.vendor_id;
+ dev_id = hw->params.pci.device_id;
+
+ csio_hw_set_description(hw, ven_id, dev_id);
+
+ /* Initialize default log level */
+ hw->params.log_level = (uint32_t) csio_dbg_level;
+
+ csio_set_fwevt_intr_idx(hw, -1);
+ csio_set_nondata_intr_idx(hw, -1);
+
+ /* Init all the modules: Mailbox, WorkRequest and Transport */
+ if (csio_mbm_init(csio_hw_to_mbm(hw), hw, csio_hw_mb_timer))
+ goto err;
+
+ rv = csio_wrm_init(csio_hw_to_wrm(hw), hw);
+ if (rv)
+ goto err_mbm_exit;
+
+ rv = csio_scsim_init(csio_hw_to_scsim(hw), hw);
+ if (rv)
+ goto err_wrm_exit;
+
+ rv = csio_mgmtm_init(csio_hw_to_mgmtm(hw), hw);
+ if (rv)
+ goto err_scsim_exit;
+ /* Pre-allocate evtq and initialize them */
+ INIT_LIST_HEAD(&hw->evt_active_q);
+ INIT_LIST_HEAD(&hw->evt_free_q);
+ for (i = 0; i < csio_evtq_sz; i++) {
+
+ evt_entry = kzalloc(sizeof(struct csio_evt_msg), GFP_KERNEL);
+ if (!evt_entry) {
+ csio_err(hw, "Failed to initialize eventq");
+ goto err_evtq_cleanup;
+ }
+
+ list_add_tail(&evt_entry->list, &hw->evt_free_q);
+ CSIO_INC_STATS(hw, n_evt_freeq);
+ }
+
+ hw->dev_num = dev_num;
+ dev_num++;
+
+ return 0;
+
+err_evtq_cleanup:
+ csio_evtq_cleanup(hw);
+ csio_mgmtm_exit(csio_hw_to_mgmtm(hw));
+err_scsim_exit:
+ csio_scsim_exit(csio_hw_to_scsim(hw));
+err_wrm_exit:
+ csio_wrm_exit(csio_hw_to_wrm(hw), hw);
+err_mbm_exit:
+ csio_mbm_exit(csio_hw_to_mbm(hw));
+err:
+ return rv;
+}
+
+/**
+ * csio_hw_exit - Un-initialize HW module.
+ * @hw: Pointer to HW module.
+ *
+ */
+void
+csio_hw_exit(struct csio_hw *hw)
+{
+ csio_evtq_cleanup(hw);
+ csio_mgmtm_exit(csio_hw_to_mgmtm(hw));
+ csio_scsim_exit(csio_hw_to_scsim(hw));
+ csio_wrm_exit(csio_hw_to_wrm(hw), hw);
+ csio_mbm_exit(csio_hw_to_mbm(hw));
+}
diff --git a/drivers/scsi/csiostor/csio_hw.h b/drivers/scsi/csiostor/csio_hw.h
new file mode 100644
index 000000000..029bef82c
--- /dev/null
+++ b/drivers/scsi/csiostor/csio_hw.h
@@ -0,0 +1,603 @@
+/*
+ * This file is part of the Chelsio FCoE driver for Linux.
+ *
+ * Copyright (c) 2008-2012 Chelsio Communications, Inc. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#ifndef __CSIO_HW_H__
+#define __CSIO_HW_H__
+
+#include <linux/kernel.h>
+#include <linux/pci.h>
+#include <linux/device.h>
+#include <linux/workqueue.h>
+#include <linux/compiler.h>
+#include <linux/cdev.h>
+#include <linux/list.h>
+#include <linux/mempool.h>
+#include <linux/io.h>
+#include <linux/spinlock_types.h>
+#include <scsi/scsi_device.h>
+#include <scsi/scsi_transport_fc.h>
+
+#include "t4_hw.h"
+#include "csio_hw_chip.h"
+#include "csio_wr.h"
+#include "csio_mb.h"
+#include "csio_scsi.h"
+#include "csio_defs.h"
+#include "t4_regs.h"
+#include "t4_msg.h"
+
+/*
+ * An error value used by host. Should not clash with FW defined return values.
+ */
+#define FW_HOSTERROR 255
+
+#define CSIO_HW_NAME "Chelsio FCoE Adapter"
+#define CSIO_MAX_PFN 8
+#define CSIO_MAX_PPORTS 4
+
+#define CSIO_MAX_LUN 0xFFFF
+#define CSIO_MAX_QUEUE 2048
+#define CSIO_MAX_CMD_PER_LUN 32
+#define CSIO_MAX_DDP_BUF_SIZE (1024 * 1024)
+#define CSIO_MAX_SECTOR_SIZE 128
+
+/* Interrupts */
+#define CSIO_EXTRA_MSI_IQS 2 /* Extra iqs for INTX/MSI mode
+ * (Forward intr iq + fw iq) */
+#define CSIO_EXTRA_VECS 2 /* non-data + FW evt */
+#define CSIO_MAX_SCSI_CPU 128
+#define CSIO_MAX_SCSI_QSETS (CSIO_MAX_SCSI_CPU * CSIO_MAX_PPORTS)
+#define CSIO_MAX_MSIX_VECS (CSIO_MAX_SCSI_QSETS + CSIO_EXTRA_VECS)
+
+/* Queues */
+enum {
+ CSIO_INTR_WRSIZE = 128,
+ CSIO_INTR_IQSIZE = ((CSIO_MAX_MSIX_VECS + 1) * CSIO_INTR_WRSIZE),
+ CSIO_FWEVT_WRSIZE = 128,
+ CSIO_FWEVT_IQLEN = 128,
+ CSIO_FWEVT_FLBUFS = 64,
+ CSIO_FWEVT_IQSIZE = (CSIO_FWEVT_WRSIZE * CSIO_FWEVT_IQLEN),
+ CSIO_HW_NIQ = 1,
+ CSIO_HW_NFLQ = 1,
+ CSIO_HW_NEQ = 1,
+ CSIO_HW_NINTXQ = 1,
+};
+
+struct csio_msix_entries {
+ unsigned short vector; /* Assigned MSI-X vector */
+ void *dev_id; /* Priv object associated w/ this msix*/
+ char desc[24]; /* Description of this vector */
+};
+
+struct csio_scsi_qset {
+ int iq_idx; /* Ingress index */
+ int eq_idx; /* Egress index */
+ uint32_t intr_idx; /* MSIX Vector index */
+};
+
+struct csio_scsi_cpu_info {
+ int16_t max_cpus;
+};
+
+extern int csio_dbg_level;
+extern unsigned int csio_port_mask;
+extern int csio_msi;
+
+#define CSIO_VENDOR_ID 0x1425
+#define CSIO_ASIC_DEVID_PROTO_MASK 0xFF00
+#define CSIO_ASIC_DEVID_TYPE_MASK 0x00FF
+
+#define CSIO_GLBL_INTR_MASK (CIM_F | MPS_F | PL_F | PCIE_F | MC_F | \
+ EDC0_F | EDC1_F | LE_F | TP_F | MA_F | \
+ PM_TX_F | PM_RX_F | ULP_RX_F | \
+ CPL_SWITCH_F | SGE_F | ULP_TX_F | SF_F)
+
+/*
+ * Hard parameters used to initialize the card in the absence of a
+ * configuration file.
+ */
+enum {
+ /* General */
+ CSIO_SGE_DBFIFO_INT_THRESH = 10,
+
+ CSIO_SGE_RX_DMA_OFFSET = 2,
+
+ CSIO_SGE_FLBUF_SIZE1 = 65536,
+ CSIO_SGE_FLBUF_SIZE2 = 1536,
+ CSIO_SGE_FLBUF_SIZE3 = 9024,
+ CSIO_SGE_FLBUF_SIZE4 = 9216,
+ CSIO_SGE_FLBUF_SIZE5 = 2048,
+ CSIO_SGE_FLBUF_SIZE6 = 128,
+ CSIO_SGE_FLBUF_SIZE7 = 8192,
+ CSIO_SGE_FLBUF_SIZE8 = 16384,
+
+ CSIO_SGE_TIMER_VAL_0 = 5,
+ CSIO_SGE_TIMER_VAL_1 = 10,
+ CSIO_SGE_TIMER_VAL_2 = 20,
+ CSIO_SGE_TIMER_VAL_3 = 50,
+ CSIO_SGE_TIMER_VAL_4 = 100,
+ CSIO_SGE_TIMER_VAL_5 = 200,
+
+ CSIO_SGE_INT_CNT_VAL_0 = 1,
+ CSIO_SGE_INT_CNT_VAL_1 = 4,
+ CSIO_SGE_INT_CNT_VAL_2 = 8,
+ CSIO_SGE_INT_CNT_VAL_3 = 16,
+};
+
+/* Slowpath events */
+enum csio_evt {
+ CSIO_EVT_FW = 0, /* FW event */
+ CSIO_EVT_MBX, /* MBX event */
+ CSIO_EVT_SCN, /* State change notification */
+ CSIO_EVT_DEV_LOSS, /* Device loss event */
+ CSIO_EVT_MAX, /* Max supported event */
+};
+
+#define CSIO_EVT_MSG_SIZE 512
+#define CSIO_EVTQ_SIZE 512
+
+/* Event msg */
+struct csio_evt_msg {
+ struct list_head list; /* evt queue*/
+ enum csio_evt type;
+ uint8_t data[CSIO_EVT_MSG_SIZE];
+};
+
+enum {
+ SERNUM_LEN = 16, /* Serial # length */
+ EC_LEN = 16, /* E/C length */
+ ID_LEN = 16, /* ID length */
+};
+
+enum {
+ SF_SIZE = SF_SEC_SIZE * 16, /* serial flash size */
+};
+
+/* serial flash and firmware constants */
+enum {
+ SF_ATTEMPTS = 10, /* max retries for SF operations */
+
+ /* flash command opcodes */
+ SF_PROG_PAGE = 2, /* program page */
+ SF_WR_DISABLE = 4, /* disable writes */
+ SF_RD_STATUS = 5, /* read status register */
+ SF_WR_ENABLE = 6, /* enable writes */
+ SF_RD_DATA_FAST = 0xb, /* read flash */
+ SF_RD_ID = 0x9f, /* read ID */
+ SF_ERASE_SECTOR = 0xd8, /* erase sector */
+};
+
+/* Management module */
+enum {
+ CSIO_MGMT_EQ_WRSIZE = 512,
+ CSIO_MGMT_IQ_WRSIZE = 128,
+ CSIO_MGMT_EQLEN = 64,
+ CSIO_MGMT_IQLEN = 64,
+};
+
+#define CSIO_MGMT_EQSIZE (CSIO_MGMT_EQLEN * CSIO_MGMT_EQ_WRSIZE)
+#define CSIO_MGMT_IQSIZE (CSIO_MGMT_IQLEN * CSIO_MGMT_IQ_WRSIZE)
+
+/* mgmt module stats */
+struct csio_mgmtm_stats {
+ uint32_t n_abort_req; /* Total abort request */
+ uint32_t n_abort_rsp; /* Total abort response */
+ uint32_t n_close_req; /* Total close request */
+ uint32_t n_close_rsp; /* Total close response */
+ uint32_t n_err; /* Total Errors */
+ uint32_t n_drop; /* Total request dropped */
+ uint32_t n_active; /* Count of active_q */
+ uint32_t n_cbfn; /* Count of cbfn_q */
+};
+
+/* MGMT module */
+struct csio_mgmtm {
+ struct csio_hw *hw; /* Pointer to HW moduel */
+ int eq_idx; /* Egress queue index */
+ int iq_idx; /* Ingress queue index */
+ int msi_vec; /* MSI vector */
+ struct list_head active_q; /* Outstanding ELS/CT */
+ struct list_head abort_q; /* Outstanding abort req */
+ struct list_head cbfn_q; /* Completion queue */
+ struct list_head mgmt_req_freelist; /* Free poll of reqs */
+ /* ELSCT request freelist*/
+ struct timer_list mgmt_timer; /* MGMT timer */
+ struct csio_mgmtm_stats stats; /* ELS/CT stats */
+};
+
+struct csio_adap_desc {
+ char model_no[16];
+ char description[32];
+};
+
+struct pci_params {
+ uint16_t vendor_id;
+ uint16_t device_id;
+ int vpd_cap_addr;
+ uint16_t speed;
+ uint8_t width;
+};
+
+/* User configurable hw parameters */
+struct csio_hw_params {
+ uint32_t sf_size; /* serial flash
+ * size in bytes
+ */
+ uint32_t sf_nsec; /* # of flash sectors */
+ struct pci_params pci;
+ uint32_t log_level; /* Module-level for
+ * debug log.
+ */
+};
+
+struct csio_vpd {
+ uint32_t cclk;
+ uint8_t ec[EC_LEN + 1];
+ uint8_t sn[SERNUM_LEN + 1];
+ uint8_t id[ID_LEN + 1];
+};
+
+struct csio_pport {
+ uint16_t pcap;
+ uint8_t portid;
+ uint8_t link_status;
+ uint16_t link_speed;
+ uint8_t mac[6];
+ uint8_t mod_type;
+ uint8_t rsvd1;
+ uint8_t rsvd2;
+ uint8_t rsvd3;
+};
+
+/* fcoe resource information */
+struct csio_fcoe_res_info {
+ uint16_t e_d_tov;
+ uint16_t r_a_tov_seq;
+ uint16_t r_a_tov_els;
+ uint16_t r_r_tov;
+ uint32_t max_xchgs;
+ uint32_t max_ssns;
+ uint32_t used_xchgs;
+ uint32_t used_ssns;
+ uint32_t max_fcfs;
+ uint32_t max_vnps;
+ uint32_t used_fcfs;
+ uint32_t used_vnps;
+};
+
+/* HW State machine Events */
+enum csio_hw_ev {
+ CSIO_HWE_CFG = (uint32_t)1, /* Starts off the State machine */
+ CSIO_HWE_INIT, /* Config done, start Init */
+ CSIO_HWE_INIT_DONE, /* Init Mailboxes sent, HW ready */
+ CSIO_HWE_FATAL, /* Fatal error during initialization */
+ CSIO_HWE_PCIERR_DETECTED,/* PCI error recovery detetced */
+ CSIO_HWE_PCIERR_SLOT_RESET, /* Slot reset after PCI recoviery */
+ CSIO_HWE_PCIERR_RESUME, /* Resume after PCI error recovery */
+ CSIO_HWE_QUIESCED, /* HBA quiesced */
+ CSIO_HWE_HBA_RESET, /* HBA reset requested */
+ CSIO_HWE_HBA_RESET_DONE, /* HBA reset completed */
+ CSIO_HWE_FW_DLOAD, /* FW download requested */
+ CSIO_HWE_PCI_REMOVE, /* PCI de-instantiation */
+ CSIO_HWE_SUSPEND, /* HW suspend for Online(hot) replacement */
+ CSIO_HWE_RESUME, /* HW resume for Online(hot) replacement */
+ CSIO_HWE_MAX, /* Max HW event */
+};
+
+/* hw stats */
+struct csio_hw_stats {
+ uint32_t n_evt_activeq; /* Number of event in active Q */
+ uint32_t n_evt_freeq; /* Number of event in free Q */
+ uint32_t n_evt_drop; /* Number of event droped */
+ uint32_t n_evt_unexp; /* Number of unexpected events */
+ uint32_t n_pcich_offline;/* Number of pci channel offline */
+ uint32_t n_lnlkup_miss; /* Number of lnode lookup miss */
+ uint32_t n_cpl_fw6_msg; /* Number of cpl fw6 message*/
+ uint32_t n_cpl_fw6_pld; /* Number of cpl fw6 payload*/
+ uint32_t n_cpl_unexp; /* Number of unexpected cpl */
+ uint32_t n_mbint_unexp; /* Number of unexpected mbox */
+ /* interrupt */
+ uint32_t n_plint_unexp; /* Number of unexpected PL */
+ /* interrupt */
+ uint32_t n_plint_cnt; /* Number of PL interrupt */
+ uint32_t n_int_stray; /* Number of stray interrupt */
+ uint32_t n_err; /* Number of hw errors */
+ uint32_t n_err_fatal; /* Number of fatal errors */
+ uint32_t n_err_nomem; /* Number of memory alloc failure */
+ uint32_t n_err_io; /* Number of IO failure */
+ enum csio_hw_ev n_evt_sm[CSIO_HWE_MAX]; /* Number of sm events */
+ uint64_t n_reset_start; /* Start time after the reset */
+ uint32_t rsvd1;
+};
+
+/* Defines for hw->flags */
+#define CSIO_HWF_MASTER 0x00000001 /* This is the Master
+ * function for the
+ * card.
+ */
+#define CSIO_HWF_HW_INTR_ENABLED 0x00000002 /* Are HW Interrupt
+ * enable bit set?
+ */
+#define CSIO_HWF_FWEVT_PENDING 0x00000004 /* FW events pending */
+#define CSIO_HWF_Q_MEM_ALLOCED 0x00000008 /* Queues have been
+ * allocated memory.
+ */
+#define CSIO_HWF_Q_FW_ALLOCED 0x00000010 /* Queues have been
+ * allocated in FW.
+ */
+#define CSIO_HWF_VPD_VALID 0x00000020 /* Valid VPD copied */
+#define CSIO_HWF_DEVID_CACHED 0X00000040 /* PCI vendor & device
+ * id cached */
+#define CSIO_HWF_FWEVT_STOP 0x00000080 /* Stop processing
+ * FW events
+ */
+#define CSIO_HWF_USING_SOFT_PARAMS 0x00000100 /* Using FW config
+ * params
+ */
+#define CSIO_HWF_HOST_INTR_ENABLED 0x00000200 /* Are host interrupts
+ * enabled?
+ */
+
+#define csio_is_hw_intr_enabled(__hw) \
+ ((__hw)->flags & CSIO_HWF_HW_INTR_ENABLED)
+#define csio_is_host_intr_enabled(__hw) \
+ ((__hw)->flags & CSIO_HWF_HOST_INTR_ENABLED)
+#define csio_is_hw_master(__hw) ((__hw)->flags & CSIO_HWF_MASTER)
+#define csio_is_valid_vpd(__hw) ((__hw)->flags & CSIO_HWF_VPD_VALID)
+#define csio_is_dev_id_cached(__hw) ((__hw)->flags & CSIO_HWF_DEVID_CACHED)
+#define csio_valid_vpd_copied(__hw) ((__hw)->flags |= CSIO_HWF_VPD_VALID)
+#define csio_dev_id_cached(__hw) ((__hw)->flags |= CSIO_HWF_DEVID_CACHED)
+
+/* Defines for intr_mode */
+enum csio_intr_mode {
+ CSIO_IM_NONE = 0,
+ CSIO_IM_INTX = 1,
+ CSIO_IM_MSI = 2,
+ CSIO_IM_MSIX = 3,
+};
+
+/* Master HW structure: One per function */
+struct csio_hw {
+ struct csio_sm sm; /* State machine: should
+ * be the 1st member.
+ */
+ spinlock_t lock; /* Lock for hw */
+
+ struct csio_scsim scsim; /* SCSI module*/
+ struct csio_wrm wrm; /* Work request module*/
+ struct pci_dev *pdev; /* PCI device */
+
+ void __iomem *regstart; /* Virtual address of
+ * register map
+ */
+ /* SCSI queue sets */
+ uint32_t num_sqsets; /* Number of SCSI
+ * queue sets */
+ uint32_t num_scsi_msix_cpus; /* Number of CPUs that
+ * will be used
+ * for ingress
+ * processing.
+ */
+
+ struct csio_scsi_qset sqset[CSIO_MAX_PPORTS][CSIO_MAX_SCSI_CPU];
+ struct csio_scsi_cpu_info scsi_cpu_info[CSIO_MAX_PPORTS];
+
+ uint32_t evtflag; /* Event flag */
+ uint32_t flags; /* HW flags */
+
+ struct csio_mgmtm mgmtm; /* management module */
+ struct csio_mbm mbm; /* Mailbox module */
+
+ /* Lnodes */
+ uint32_t num_lns; /* Number of lnodes */
+ struct csio_lnode *rln; /* Root lnode */
+ struct list_head sln_head; /* Sibling node list
+ * list
+ */
+ int intr_iq_idx; /* Forward interrupt
+ * queue.
+ */
+ int fwevt_iq_idx; /* FW evt queue */
+ struct work_struct evtq_work; /* Worker thread for
+ * HW events.
+ */
+ struct list_head evt_free_q; /* freelist of evt
+ * elements
+ */
+ struct list_head evt_active_q; /* active evt queue*/
+
+ /* board related info */
+ char name[32];
+ char hw_ver[16];
+ char model_desc[32];
+ char drv_version[32];
+ char fwrev_str[32];
+ uint32_t optrom_ver;
+ uint32_t fwrev;
+ uint32_t tp_vers;
+ char chip_ver;
+ uint16_t chip_id; /* Tells T4/T5 chip */
+ enum csio_dev_state fw_state;
+ struct csio_vpd vpd;
+
+ uint8_t pfn; /* Physical Function
+ * number
+ */
+ uint32_t port_vec; /* Port vector */
+ uint8_t num_pports; /* Number of physical
+ * ports.
+ */
+ uint8_t rst_retries; /* Reset retries */
+ uint8_t cur_evt; /* current s/m evt */
+ uint8_t prev_evt; /* Previous s/m evt */
+ uint32_t dev_num; /* device number */
+ struct csio_pport pport[CSIO_MAX_PPORTS]; /* Ports (XGMACs) */
+ struct csio_hw_params params; /* Hw parameters */
+
+ struct pci_pool *scsi_pci_pool; /* PCI pool for SCSI */
+ mempool_t *mb_mempool; /* Mailbox memory pool*/
+ mempool_t *rnode_mempool; /* rnode memory pool */
+
+ /* Interrupt */
+ enum csio_intr_mode intr_mode; /* INTx, MSI, MSIX */
+ uint32_t fwevt_intr_idx; /* FW evt MSIX/interrupt
+ * index
+ */
+ uint32_t nondata_intr_idx; /* nondata MSIX/intr
+ * idx
+ */
+
+ uint8_t cfg_neq; /* FW configured no of
+ * egress queues
+ */
+ uint8_t cfg_niq; /* FW configured no of
+ * iq queues.
+ */
+
+ struct csio_fcoe_res_info fres_info; /* Fcoe resource info */
+ struct csio_hw_chip_ops *chip_ops; /* T4/T5 Chip specific
+ * Operations
+ */
+
+ /* MSIX vectors */
+ struct csio_msix_entries msix_entries[CSIO_MAX_MSIX_VECS];
+
+ struct dentry *debugfs_root; /* Debug FS */
+ struct csio_hw_stats stats; /* Hw statistics */
+};
+
+/* Register access macros */
+#define csio_reg(_b, _r) ((_b) + (_r))
+
+#define csio_rd_reg8(_h, _r) readb(csio_reg((_h)->regstart, (_r)))
+#define csio_rd_reg16(_h, _r) readw(csio_reg((_h)->regstart, (_r)))
+#define csio_rd_reg32(_h, _r) readl(csio_reg((_h)->regstart, (_r)))
+#define csio_rd_reg64(_h, _r) readq(csio_reg((_h)->regstart, (_r)))
+
+#define csio_wr_reg8(_h, _v, _r) writeb((_v), \
+ csio_reg((_h)->regstart, (_r)))
+#define csio_wr_reg16(_h, _v, _r) writew((_v), \
+ csio_reg((_h)->regstart, (_r)))
+#define csio_wr_reg32(_h, _v, _r) writel((_v), \
+ csio_reg((_h)->regstart, (_r)))
+#define csio_wr_reg64(_h, _v, _r) writeq((_v), \
+ csio_reg((_h)->regstart, (_r)))
+
+void csio_set_reg_field(struct csio_hw *, uint32_t, uint32_t, uint32_t);
+
+/* Core clocks <==> uSecs */
+static inline uint32_t
+csio_core_ticks_to_us(struct csio_hw *hw, uint32_t ticks)
+{
+ /* add Core Clock / 2 to round ticks to nearest uS */
+ return (ticks * 1000 + hw->vpd.cclk/2) / hw->vpd.cclk;
+}
+
+static inline uint32_t
+csio_us_to_core_ticks(struct csio_hw *hw, uint32_t us)
+{
+ return (us * hw->vpd.cclk) / 1000;
+}
+
+/* Easy access macros */
+#define csio_hw_to_wrm(hw) ((struct csio_wrm *)(&(hw)->wrm))
+#define csio_hw_to_mbm(hw) ((struct csio_mbm *)(&(hw)->mbm))
+#define csio_hw_to_scsim(hw) ((struct csio_scsim *)(&(hw)->scsim))
+#define csio_hw_to_mgmtm(hw) ((struct csio_mgmtm *)(&(hw)->mgmtm))
+
+#define CSIO_PCI_BUS(hw) ((hw)->pdev->bus->number)
+#define CSIO_PCI_DEV(hw) (PCI_SLOT((hw)->pdev->devfn))
+#define CSIO_PCI_FUNC(hw) (PCI_FUNC((hw)->pdev->devfn))
+
+#define csio_set_fwevt_intr_idx(_h, _i) ((_h)->fwevt_intr_idx = (_i))
+#define csio_get_fwevt_intr_idx(_h) ((_h)->fwevt_intr_idx)
+#define csio_set_nondata_intr_idx(_h, _i) ((_h)->nondata_intr_idx = (_i))
+#define csio_get_nondata_intr_idx(_h) ((_h)->nondata_intr_idx)
+
+/* Printing/logging */
+#define CSIO_DEVID(__dev) ((__dev)->dev_num)
+#define CSIO_DEVID_LO(__dev) (CSIO_DEVID((__dev)) & 0xFFFF)
+#define CSIO_DEVID_HI(__dev) ((CSIO_DEVID((__dev)) >> 16) & 0xFFFF)
+
+#define csio_info(__hw, __fmt, ...) \
+ dev_info(&(__hw)->pdev->dev, __fmt, ##__VA_ARGS__)
+
+#define csio_fatal(__hw, __fmt, ...) \
+ dev_crit(&(__hw)->pdev->dev, __fmt, ##__VA_ARGS__)
+
+#define csio_err(__hw, __fmt, ...) \
+ dev_err(&(__hw)->pdev->dev, __fmt, ##__VA_ARGS__)
+
+#define csio_warn(__hw, __fmt, ...) \
+ dev_warn(&(__hw)->pdev->dev, __fmt, ##__VA_ARGS__)
+
+#ifdef __CSIO_DEBUG__
+#define csio_dbg(__hw, __fmt, ...) \
+ csio_info((__hw), __fmt, ##__VA_ARGS__);
+#else
+#define csio_dbg(__hw, __fmt, ...)
+#endif
+
+int csio_hw_wait_op_done_val(struct csio_hw *, int, uint32_t, int,
+ int, int, uint32_t *);
+void csio_hw_tp_wr_bits_indirect(struct csio_hw *, unsigned int,
+ unsigned int, unsigned int);
+int csio_mgmt_req_lookup(struct csio_mgmtm *, struct csio_ioreq *);
+void csio_hw_intr_disable(struct csio_hw *);
+int csio_hw_slow_intr_handler(struct csio_hw *);
+int csio_handle_intr_status(struct csio_hw *, unsigned int,
+ const struct intr_info *);
+
+int csio_hw_start(struct csio_hw *);
+int csio_hw_stop(struct csio_hw *);
+int csio_hw_reset(struct csio_hw *);
+int csio_is_hw_ready(struct csio_hw *);
+int csio_is_hw_removing(struct csio_hw *);
+
+int csio_fwevtq_handler(struct csio_hw *);
+void csio_evtq_worker(struct work_struct *);
+int csio_enqueue_evt(struct csio_hw *, enum csio_evt, void *, uint16_t);
+void csio_evtq_flush(struct csio_hw *hw);
+
+int csio_request_irqs(struct csio_hw *);
+void csio_intr_enable(struct csio_hw *);
+void csio_intr_disable(struct csio_hw *, bool);
+void csio_hw_fatal_err(struct csio_hw *);
+
+struct csio_lnode *csio_lnode_alloc(struct csio_hw *);
+int csio_config_queues(struct csio_hw *);
+
+int csio_hw_init(struct csio_hw *);
+void csio_hw_exit(struct csio_hw *);
+#endif /* ifndef __CSIO_HW_H__ */
diff --git a/drivers/scsi/csiostor/csio_hw_chip.h b/drivers/scsi/csiostor/csio_hw_chip.h
new file mode 100644
index 000000000..6b284c117
--- /dev/null
+++ b/drivers/scsi/csiostor/csio_hw_chip.h
@@ -0,0 +1,121 @@
+/*
+ * This file is part of the Chelsio FCoE driver for Linux.
+ *
+ * Copyright (c) 2008-2013 Chelsio Communications, Inc. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#ifndef __CSIO_HW_CHIP_H__
+#define __CSIO_HW_CHIP_H__
+
+#include "csio_defs.h"
+
+/* Define MACRO values */
+#define CSIO_HW_T5 0x5000
+#define CSIO_T5_FCOE_ASIC 0x5600
+#define CSIO_HW_CHIP_MASK 0xF000
+
+#define T5_REGMAP_SIZE (332 * 1024)
+#define FW_FNAME_T5 "/*(DEBLOBBED)*/"
+#define FW_CFG_NAME_T5 "cxgb4/t5-config.txt"
+
+#define CHELSIO_CHIP_CODE(version, revision) (((version) << 4) | (revision))
+#define CHELSIO_CHIP_FPGA 0x100
+#define CHELSIO_CHIP_VERSION(code) (((code) >> 12) & 0xf)
+#define CHELSIO_CHIP_RELEASE(code) ((code) & 0xf)
+
+#define CHELSIO_T5 0x5
+
+enum chip_type {
+ T5_A0 = CHELSIO_CHIP_CODE(CHELSIO_T5, 0),
+ T5_A1 = CHELSIO_CHIP_CODE(CHELSIO_T5, 1),
+ T5_FIRST_REV = T5_A0,
+ T5_LAST_REV = T5_A1,
+};
+
+static inline int csio_is_t5(uint16_t chip)
+{
+ return (chip == CSIO_HW_T5);
+}
+
+/* Define MACRO DEFINITIONS */
+#define CSIO_DEVICE(devid, idx) \
+ { PCI_VENDOR_ID_CHELSIO, (devid), PCI_ANY_ID, PCI_ANY_ID, 0, 0, (idx) }
+
+#include "t4fw_api.h"
+#include "t4fw_version.h"
+
+#define FW_VERSION(chip) ( \
+ FW_HDR_FW_VER_MAJOR_G(chip##FW_VERSION_MAJOR) | \
+ FW_HDR_FW_VER_MINOR_G(chip##FW_VERSION_MINOR) | \
+ FW_HDR_FW_VER_MICRO_G(chip##FW_VERSION_MICRO) | \
+ FW_HDR_FW_VER_BUILD_G(chip##FW_VERSION_BUILD))
+#define FW_INTFVER(chip, intf) (FW_HDR_INTFVER_##intf)
+
+struct fw_info {
+ u8 chip;
+ char *fs_name;
+ char *fw_mod_name;
+ struct fw_hdr fw_hdr;
+};
+
+/* Declare ENUMS */
+enum { MEM_EDC0, MEM_EDC1, MEM_MC, MEM_MC0 = MEM_MC, MEM_MC1 };
+
+enum {
+ MEMWIN_APERTURE = 2048,
+ MEMWIN_BASE = 0x1b800,
+};
+
+/* Slow path handlers */
+struct intr_info {
+ unsigned int mask; /* bits to check in interrupt status */
+ const char *msg; /* message to print or NULL */
+ short stat_idx; /* stat counter to increment or -1 */
+ unsigned short fatal; /* whether the condition reported is fatal */
+};
+
+/* T4/T5 Chip specific ops */
+struct csio_hw;
+struct csio_hw_chip_ops {
+ int (*chip_set_mem_win)(struct csio_hw *, uint32_t);
+ void (*chip_pcie_intr_handler)(struct csio_hw *);
+ uint32_t (*chip_flash_cfg_addr)(struct csio_hw *);
+ int (*chip_mc_read)(struct csio_hw *, int, uint32_t,
+ __be32 *, uint64_t *);
+ int (*chip_edc_read)(struct csio_hw *, int, uint32_t,
+ __be32 *, uint64_t *);
+ int (*chip_memory_rw)(struct csio_hw *, u32, int, u32,
+ u32, uint32_t *, int);
+ void (*chip_dfs_create_ext_mem)(struct csio_hw *);
+};
+
+extern struct csio_hw_chip_ops t5_ops;
+
+#endif /* #ifndef __CSIO_HW_CHIP_H__ */
diff --git a/drivers/scsi/csiostor/csio_hw_t5.c b/drivers/scsi/csiostor/csio_hw_t5.c
new file mode 100644
index 000000000..3267f4f62
--- /dev/null
+++ b/drivers/scsi/csiostor/csio_hw_t5.c
@@ -0,0 +1,398 @@
+/*
+ * This file is part of the Chelsio FCoE driver for Linux.
+ *
+ * Copyright (c) 2008-2013 Chelsio Communications, Inc. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include "csio_hw.h"
+#include "csio_init.h"
+
+static int
+csio_t5_set_mem_win(struct csio_hw *hw, uint32_t win)
+{
+ u32 mem_win_base;
+ /*
+ * Truncation intentional: we only read the bottom 32-bits of the
+ * 64-bit BAR0/BAR1 ... We use the hardware backdoor mechanism to
+ * read BAR0 instead of using pci_resource_start() because we could be
+ * operating from within a Virtual Machine which is trapping our
+ * accesses to our Configuration Space and we need to set up the PCI-E
+ * Memory Window decoders with the actual addresses which will be
+ * coming across the PCI-E link.
+ */
+
+ /* For T5, only relative offset inside the PCIe BAR is passed */
+ mem_win_base = MEMWIN_BASE;
+
+ /*
+ * Set up memory window for accessing adapter memory ranges. (Read
+ * back MA register to ensure that changes propagate before we attempt
+ * to use the new values.)
+ */
+ csio_wr_reg32(hw, mem_win_base | BIR_V(0) |
+ WINDOW_V(ilog2(MEMWIN_APERTURE) - 10),
+ PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_BASE_WIN_A, win));
+ csio_rd_reg32(hw,
+ PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_BASE_WIN_A, win));
+
+ return 0;
+}
+
+/*
+ * Interrupt handler for the PCIE module.
+ */
+static void
+csio_t5_pcie_intr_handler(struct csio_hw *hw)
+{
+ static struct intr_info sysbus_intr_info[] = {
+ { RNPP_F, "RXNP array parity error", -1, 1 },
+ { RPCP_F, "RXPC array parity error", -1, 1 },
+ { RCIP_F, "RXCIF array parity error", -1, 1 },
+ { RCCP_F, "Rx completions control array parity error", -1, 1 },
+ { RFTP_F, "RXFT array parity error", -1, 1 },
+ { 0, NULL, 0, 0 }
+ };
+ static struct intr_info pcie_port_intr_info[] = {
+ { TPCP_F, "TXPC array parity error", -1, 1 },
+ { TNPP_F, "TXNP array parity error", -1, 1 },
+ { TFTP_F, "TXFT array parity error", -1, 1 },
+ { TCAP_F, "TXCA array parity error", -1, 1 },
+ { TCIP_F, "TXCIF array parity error", -1, 1 },
+ { RCAP_F, "RXCA array parity error", -1, 1 },
+ { OTDD_F, "outbound request TLP discarded", -1, 1 },
+ { RDPE_F, "Rx data parity error", -1, 1 },
+ { TDUE_F, "Tx uncorrectable data error", -1, 1 },
+ { 0, NULL, 0, 0 }
+ };
+
+ static struct intr_info pcie_intr_info[] = {
+ { MSTGRPPERR_F, "Master Response Read Queue parity error",
+ -1, 1 },
+ { MSTTIMEOUTPERR_F, "Master Timeout FIFO parity error", -1, 1 },
+ { MSIXSTIPERR_F, "MSI-X STI SRAM parity error", -1, 1 },
+ { MSIXADDRLPERR_F, "MSI-X AddrL parity error", -1, 1 },
+ { MSIXADDRHPERR_F, "MSI-X AddrH parity error", -1, 1 },
+ { MSIXDATAPERR_F, "MSI-X data parity error", -1, 1 },
+ { MSIXDIPERR_F, "MSI-X DI parity error", -1, 1 },
+ { PIOCPLGRPPERR_F, "PCI PIO completion Group FIFO parity error",
+ -1, 1 },
+ { PIOREQGRPPERR_F, "PCI PIO request Group FIFO parity error",
+ -1, 1 },
+ { TARTAGPERR_F, "PCI PCI target tag FIFO parity error", -1, 1 },
+ { MSTTAGQPERR_F, "PCI master tag queue parity error", -1, 1 },
+ { CREQPERR_F, "PCI CMD channel request parity error", -1, 1 },
+ { CRSPPERR_F, "PCI CMD channel response parity error", -1, 1 },
+ { DREQWRPERR_F, "PCI DMA channel write request parity error",
+ -1, 1 },
+ { DREQPERR_F, "PCI DMA channel request parity error", -1, 1 },
+ { DRSPPERR_F, "PCI DMA channel response parity error", -1, 1 },
+ { HREQWRPERR_F, "PCI HMA channel count parity error", -1, 1 },
+ { HREQPERR_F, "PCI HMA channel request parity error", -1, 1 },
+ { HRSPPERR_F, "PCI HMA channel response parity error", -1, 1 },
+ { CFGSNPPERR_F, "PCI config snoop FIFO parity error", -1, 1 },
+ { FIDPERR_F, "PCI FID parity error", -1, 1 },
+ { VFIDPERR_F, "PCI INTx clear parity error", -1, 1 },
+ { MAGRPPERR_F, "PCI MA group FIFO parity error", -1, 1 },
+ { PIOTAGPERR_F, "PCI PIO tag parity error", -1, 1 },
+ { IPRXHDRGRPPERR_F, "PCI IP Rx header group parity error",
+ -1, 1 },
+ { IPRXDATAGRPPERR_F, "PCI IP Rx data group parity error",
+ -1, 1 },
+ { RPLPERR_F, "PCI IP replay buffer parity error", -1, 1 },
+ { IPSOTPERR_F, "PCI IP SOT buffer parity error", -1, 1 },
+ { TRGT1GRPPERR_F, "PCI TRGT1 group FIFOs parity error", -1, 1 },
+ { READRSPERR_F, "Outbound read error", -1, 0 },
+ { 0, NULL, 0, 0 }
+ };
+
+ int fat;
+ fat = csio_handle_intr_status(hw,
+ PCIE_CORE_UTL_SYSTEM_BUS_AGENT_STATUS_A,
+ sysbus_intr_info) +
+ csio_handle_intr_status(hw,
+ PCIE_CORE_UTL_PCI_EXPRESS_PORT_STATUS_A,
+ pcie_port_intr_info) +
+ csio_handle_intr_status(hw, PCIE_INT_CAUSE_A, pcie_intr_info);
+ if (fat)
+ csio_hw_fatal_err(hw);
+}
+
+/*
+ * csio_t5_flash_cfg_addr - return the address of the flash configuration file
+ * @hw: the HW module
+ *
+ * Return the address within the flash where the Firmware Configuration
+ * File is stored.
+ */
+static unsigned int
+csio_t5_flash_cfg_addr(struct csio_hw *hw)
+{
+ return FLASH_CFG_START;
+}
+
+/*
+ * csio_t5_mc_read - read from MC through backdoor accesses
+ * @hw: the hw module
+ * @idx: index to the register
+ * @addr: address of first byte requested
+ * @data: 64 bytes of data containing the requested address
+ * @ecc: where to store the corresponding 64-bit ECC word
+ *
+ * Read 64 bytes of data from MC starting at a 64-byte-aligned address
+ * that covers the requested address @addr. If @parity is not %NULL it
+ * is assigned the 64-bit ECC word for the read data.
+ */
+static int
+csio_t5_mc_read(struct csio_hw *hw, int idx, uint32_t addr, __be32 *data,
+ uint64_t *ecc)
+{
+ int i;
+ uint32_t mc_bist_cmd_reg, mc_bist_cmd_addr_reg, mc_bist_cmd_len_reg;
+ uint32_t mc_bist_status_rdata_reg, mc_bist_data_pattern_reg;
+
+ mc_bist_cmd_reg = MC_REG(MC_P_BIST_CMD_A, idx);
+ mc_bist_cmd_addr_reg = MC_REG(MC_P_BIST_CMD_ADDR_A, idx);
+ mc_bist_cmd_len_reg = MC_REG(MC_P_BIST_CMD_LEN_A, idx);
+ mc_bist_status_rdata_reg = MC_REG(MC_P_BIST_STATUS_RDATA_A, idx);
+ mc_bist_data_pattern_reg = MC_REG(MC_P_BIST_DATA_PATTERN_A, idx);
+
+ if (csio_rd_reg32(hw, mc_bist_cmd_reg) & START_BIST_F)
+ return -EBUSY;
+ csio_wr_reg32(hw, addr & ~0x3fU, mc_bist_cmd_addr_reg);
+ csio_wr_reg32(hw, 64, mc_bist_cmd_len_reg);
+ csio_wr_reg32(hw, 0xc, mc_bist_data_pattern_reg);
+ csio_wr_reg32(hw, BIST_OPCODE_V(1) | START_BIST_F | BIST_CMD_GAP_V(1),
+ mc_bist_cmd_reg);
+ i = csio_hw_wait_op_done_val(hw, mc_bist_cmd_reg, START_BIST_F,
+ 0, 10, 1, NULL);
+ if (i)
+ return i;
+
+#define MC_DATA(i) MC_BIST_STATUS_REG(MC_BIST_STATUS_RDATA_A, i)
+
+ for (i = 15; i >= 0; i--)
+ *data++ = htonl(csio_rd_reg32(hw, MC_DATA(i)));
+ if (ecc)
+ *ecc = csio_rd_reg64(hw, MC_DATA(16));
+#undef MC_DATA
+ return 0;
+}
+
+/*
+ * csio_t5_edc_read - read from EDC through backdoor accesses
+ * @hw: the hw module
+ * @idx: which EDC to access
+ * @addr: address of first byte requested
+ * @data: 64 bytes of data containing the requested address
+ * @ecc: where to store the corresponding 64-bit ECC word
+ *
+ * Read 64 bytes of data from EDC starting at a 64-byte-aligned address
+ * that covers the requested address @addr. If @parity is not %NULL it
+ * is assigned the 64-bit ECC word for the read data.
+ */
+static int
+csio_t5_edc_read(struct csio_hw *hw, int idx, uint32_t addr, __be32 *data,
+ uint64_t *ecc)
+{
+ int i;
+ uint32_t edc_bist_cmd_reg, edc_bist_cmd_addr_reg, edc_bist_cmd_len_reg;
+ uint32_t edc_bist_cmd_data_pattern, edc_bist_status_rdata_reg;
+
+/*
+ * These macro are missing in t4_regs.h file.
+ */
+#define EDC_STRIDE_T5 (EDC_T51_BASE_ADDR - EDC_T50_BASE_ADDR)
+#define EDC_REG_T5(reg, idx) (reg + EDC_STRIDE_T5 * idx)
+
+ edc_bist_cmd_reg = EDC_REG_T5(EDC_H_BIST_CMD_A, idx);
+ edc_bist_cmd_addr_reg = EDC_REG_T5(EDC_H_BIST_CMD_ADDR_A, idx);
+ edc_bist_cmd_len_reg = EDC_REG_T5(EDC_H_BIST_CMD_LEN_A, idx);
+ edc_bist_cmd_data_pattern = EDC_REG_T5(EDC_H_BIST_DATA_PATTERN_A, idx);
+ edc_bist_status_rdata_reg = EDC_REG_T5(EDC_H_BIST_STATUS_RDATA_A, idx);
+#undef EDC_REG_T5
+#undef EDC_STRIDE_T5
+
+ if (csio_rd_reg32(hw, edc_bist_cmd_reg) & START_BIST_F)
+ return -EBUSY;
+ csio_wr_reg32(hw, addr & ~0x3fU, edc_bist_cmd_addr_reg);
+ csio_wr_reg32(hw, 64, edc_bist_cmd_len_reg);
+ csio_wr_reg32(hw, 0xc, edc_bist_cmd_data_pattern);
+ csio_wr_reg32(hw, BIST_OPCODE_V(1) | START_BIST_F | BIST_CMD_GAP_V(1),
+ edc_bist_cmd_reg);
+ i = csio_hw_wait_op_done_val(hw, edc_bist_cmd_reg, START_BIST_F,
+ 0, 10, 1, NULL);
+ if (i)
+ return i;
+
+#define EDC_DATA(i) (EDC_BIST_STATUS_REG(EDC_BIST_STATUS_RDATA_A, i) + idx)
+
+ for (i = 15; i >= 0; i--)
+ *data++ = htonl(csio_rd_reg32(hw, EDC_DATA(i)));
+ if (ecc)
+ *ecc = csio_rd_reg64(hw, EDC_DATA(16));
+#undef EDC_DATA
+ return 0;
+}
+
+/*
+ * csio_t5_memory_rw - read/write EDC 0, EDC 1 or MC via PCIE memory window
+ * @hw: the csio_hw
+ * @win: PCI-E memory Window to use
+ * @mtype: memory type: MEM_EDC0, MEM_EDC1, MEM_MC0 (or MEM_MC) or MEM_MC1
+ * @addr: address within indicated memory type
+ * @len: amount of memory to transfer
+ * @buf: host memory buffer
+ * @dir: direction of transfer 1 => read, 0 => write
+ *
+ * Reads/writes an [almost] arbitrary memory region in the firmware: the
+ * firmware memory address, length and host buffer must be aligned on
+ * 32-bit boudaries. The memory is transferred as a raw byte sequence
+ * from/to the firmware's memory. If this memory contains data
+ * structures which contain multi-byte integers, it's the callers
+ * responsibility to perform appropriate byte order conversions.
+ */
+static int
+csio_t5_memory_rw(struct csio_hw *hw, u32 win, int mtype, u32 addr,
+ u32 len, uint32_t *buf, int dir)
+{
+ u32 pos, start, offset, memoffset;
+ u32 edc_size, mc_size, win_pf, mem_reg, mem_aperture, mem_base;
+
+ /*
+ * Argument sanity checks ...
+ */
+ if ((addr & 0x3) || (len & 0x3))
+ return -EINVAL;
+
+ /* Offset into the region of memory which is being accessed
+ * MEM_EDC0 = 0
+ * MEM_EDC1 = 1
+ * MEM_MC = 2 -- T4
+ * MEM_MC0 = 2 -- For T5
+ * MEM_MC1 = 3 -- For T5
+ */
+ edc_size = EDRAM0_SIZE_G(csio_rd_reg32(hw, MA_EDRAM0_BAR_A));
+ if (mtype != MEM_MC1)
+ memoffset = (mtype * (edc_size * 1024 * 1024));
+ else {
+ mc_size = EXT_MEM_SIZE_G(csio_rd_reg32(hw,
+ MA_EXT_MEMORY_BAR_A));
+ memoffset = (MEM_MC0 * edc_size + mc_size) * 1024 * 1024;
+ }
+
+ /* Determine the PCIE_MEM_ACCESS_OFFSET */
+ addr = addr + memoffset;
+
+ /*
+ * Each PCI-E Memory Window is programmed with a window size -- or
+ * "aperture" -- which controls the granularity of its mapping onto
+ * adapter memory. We need to grab that aperture in order to know
+ * how to use the specified window. The window is also programmed
+ * with the base address of the Memory Window in BAR0's address
+ * space. For T4 this is an absolute PCI-E Bus Address. For T5
+ * the address is relative to BAR0.
+ */
+ mem_reg = csio_rd_reg32(hw,
+ PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_BASE_WIN_A, win));
+ mem_aperture = 1 << (WINDOW_V(mem_reg) + 10);
+ mem_base = PCIEOFST_G(mem_reg) << 10;
+
+ start = addr & ~(mem_aperture-1);
+ offset = addr - start;
+ win_pf = PFNUM_V(hw->pfn);
+
+ csio_dbg(hw, "csio_t5_memory_rw: mem_reg: 0x%x, mem_aperture: 0x%x\n",
+ mem_reg, mem_aperture);
+ csio_dbg(hw, "csio_t5_memory_rw: mem_base: 0x%x, mem_offset: 0x%x\n",
+ mem_base, memoffset);
+ csio_dbg(hw, "csio_t5_memory_rw: start:0x%x, offset:0x%x, win_pf:%d\n",
+ start, offset, win_pf);
+ csio_dbg(hw, "csio_t5_memory_rw: mtype: %d, addr: 0x%x, len: %d\n",
+ mtype, addr, len);
+
+ for (pos = start; len > 0; pos += mem_aperture, offset = 0) {
+ /*
+ * Move PCI-E Memory Window to our current transfer
+ * position. Read it back to ensure that changes propagate
+ * before we attempt to use the new value.
+ */
+ csio_wr_reg32(hw, pos | win_pf,
+ PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_OFFSET_A, win));
+ csio_rd_reg32(hw,
+ PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_OFFSET_A, win));
+
+ while (offset < mem_aperture && len > 0) {
+ if (dir)
+ *buf++ = csio_rd_reg32(hw, mem_base + offset);
+ else
+ csio_wr_reg32(hw, *buf++, mem_base + offset);
+
+ offset += sizeof(__be32);
+ len -= sizeof(__be32);
+ }
+ }
+ return 0;
+}
+
+/*
+ * csio_t5_dfs_create_ext_mem - setup debugfs for MC0 or MC1 to read the values
+ * @hw: the csio_hw
+ *
+ * This function creates files in the debugfs with external memory region
+ * MC0 & MC1.
+ */
+static void
+csio_t5_dfs_create_ext_mem(struct csio_hw *hw)
+{
+ u32 size;
+ int i = csio_rd_reg32(hw, MA_TARGET_MEM_ENABLE_A);
+
+ if (i & EXT_MEM_ENABLE_F) {
+ size = csio_rd_reg32(hw, MA_EXT_MEMORY_BAR_A);
+ csio_add_debugfs_mem(hw, "mc0", MEM_MC0,
+ EXT_MEM_SIZE_G(size));
+ }
+ if (i & EXT_MEM1_ENABLE_F) {
+ size = csio_rd_reg32(hw, MA_EXT_MEMORY1_BAR_A);
+ csio_add_debugfs_mem(hw, "mc1", MEM_MC1,
+ EXT_MEM_SIZE_G(size));
+ }
+}
+
+/* T5 adapter specific function */
+struct csio_hw_chip_ops t5_ops = {
+ .chip_set_mem_win = csio_t5_set_mem_win,
+ .chip_pcie_intr_handler = csio_t5_pcie_intr_handler,
+ .chip_flash_cfg_addr = csio_t5_flash_cfg_addr,
+ .chip_mc_read = csio_t5_mc_read,
+ .chip_edc_read = csio_t5_edc_read,
+ .chip_memory_rw = csio_t5_memory_rw,
+ .chip_dfs_create_ext_mem = csio_t5_dfs_create_ext_mem,
+};
diff --git a/drivers/scsi/csiostor/csio_init.c b/drivers/scsi/csiostor/csio_init.c
new file mode 100644
index 000000000..e46a333c6
--- /dev/null
+++ b/drivers/scsi/csiostor/csio_init.c
@@ -0,0 +1,1255 @@
+/*
+ * This file is part of the Chelsio FCoE driver for Linux.
+ *
+ * Copyright (c) 2008-2012 Chelsio Communications, Inc. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/pci.h>
+#include <linux/aer.h>
+#include <linux/mm.h>
+#include <linux/notifier.h>
+#include <linux/kdebug.h>
+#include <linux/seq_file.h>
+#include <linux/debugfs.h>
+#include <linux/string.h>
+#include <linux/export.h>
+
+#include "csio_init.h"
+#include "csio_defs.h"
+
+#define CSIO_MIN_MEMPOOL_SZ 64
+
+static struct dentry *csio_debugfs_root;
+
+static struct scsi_transport_template *csio_fcoe_transport;
+static struct scsi_transport_template *csio_fcoe_transport_vport;
+
+/*
+ * debugfs support
+ */
+static ssize_t
+csio_mem_read(struct file *file, char __user *buf, size_t count, loff_t *ppos)
+{
+ loff_t pos = *ppos;
+ loff_t avail = file_inode(file)->i_size;
+ unsigned int mem = (uintptr_t)file->private_data & 3;
+ struct csio_hw *hw = file->private_data - mem;
+
+ if (pos < 0)
+ return -EINVAL;
+ if (pos >= avail)
+ return 0;
+ if (count > avail - pos)
+ count = avail - pos;
+
+ while (count) {
+ size_t len;
+ int ret, ofst;
+ __be32 data[16];
+
+ if (mem == MEM_MC)
+ ret = hw->chip_ops->chip_mc_read(hw, 0, pos,
+ data, NULL);
+ else
+ ret = hw->chip_ops->chip_edc_read(hw, mem, pos,
+ data, NULL);
+ if (ret)
+ return ret;
+
+ ofst = pos % sizeof(data);
+ len = min(count, sizeof(data) - ofst);
+ if (copy_to_user(buf, (u8 *)data + ofst, len))
+ return -EFAULT;
+
+ buf += len;
+ pos += len;
+ count -= len;
+ }
+ count = pos - *ppos;
+ *ppos = pos;
+ return count;
+}
+
+static const struct file_operations csio_mem_debugfs_fops = {
+ .owner = THIS_MODULE,
+ .open = simple_open,
+ .read = csio_mem_read,
+ .llseek = default_llseek,
+};
+
+void csio_add_debugfs_mem(struct csio_hw *hw, const char *name,
+ unsigned int idx, unsigned int size_mb)
+{
+ debugfs_create_file_size(name, S_IRUSR, hw->debugfs_root,
+ (void *)hw + idx, &csio_mem_debugfs_fops,
+ size_mb << 20);
+}
+
+static int csio_setup_debugfs(struct csio_hw *hw)
+{
+ int i;
+
+ if (IS_ERR_OR_NULL(hw->debugfs_root))
+ return -1;
+
+ i = csio_rd_reg32(hw, MA_TARGET_MEM_ENABLE_A);
+ if (i & EDRAM0_ENABLE_F)
+ csio_add_debugfs_mem(hw, "edc0", MEM_EDC0, 5);
+ if (i & EDRAM1_ENABLE_F)
+ csio_add_debugfs_mem(hw, "edc1", MEM_EDC1, 5);
+
+ hw->chip_ops->chip_dfs_create_ext_mem(hw);
+ return 0;
+}
+
+/*
+ * csio_dfs_create - Creates and sets up per-hw debugfs.
+ *
+ */
+static int
+csio_dfs_create(struct csio_hw *hw)
+{
+ if (csio_debugfs_root) {
+ hw->debugfs_root = debugfs_create_dir(pci_name(hw->pdev),
+ csio_debugfs_root);
+ csio_setup_debugfs(hw);
+ }
+
+ return 0;
+}
+
+/*
+ * csio_dfs_destroy - Destroys per-hw debugfs.
+ */
+static int
+csio_dfs_destroy(struct csio_hw *hw)
+{
+ if (hw->debugfs_root)
+ debugfs_remove_recursive(hw->debugfs_root);
+
+ return 0;
+}
+
+/*
+ * csio_dfs_init - Debug filesystem initialization for the module.
+ *
+ */
+static int
+csio_dfs_init(void)
+{
+ csio_debugfs_root = debugfs_create_dir(KBUILD_MODNAME, NULL);
+ if (!csio_debugfs_root)
+ pr_warn("Could not create debugfs entry, continuing\n");
+
+ return 0;
+}
+
+/*
+ * csio_dfs_exit - debugfs cleanup for the module.
+ */
+static void
+csio_dfs_exit(void)
+{
+ debugfs_remove(csio_debugfs_root);
+}
+
+/*
+ * csio_pci_init - PCI initialization.
+ * @pdev: PCI device.
+ * @bars: Bitmask of bars to be requested.
+ *
+ * Initializes the PCI function by enabling MMIO, setting bus
+ * mastership and setting DMA mask.
+ */
+static int
+csio_pci_init(struct pci_dev *pdev, int *bars)
+{
+ int rv = -ENODEV;
+
+ *bars = pci_select_bars(pdev, IORESOURCE_MEM);
+
+ if (pci_enable_device_mem(pdev))
+ goto err;
+
+ if (pci_request_selected_regions(pdev, *bars, KBUILD_MODNAME))
+ goto err_disable_device;
+
+ pci_set_master(pdev);
+ pci_try_set_mwi(pdev);
+
+ if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(64))) {
+ pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
+ } else if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(32))) {
+ pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
+ } else {
+ dev_err(&pdev->dev, "No suitable DMA available.\n");
+ goto err_release_regions;
+ }
+
+ return 0;
+
+err_release_regions:
+ pci_release_selected_regions(pdev, *bars);
+err_disable_device:
+ pci_disable_device(pdev);
+err:
+ return rv;
+
+}
+
+/*
+ * csio_pci_exit - PCI unitialization.
+ * @pdev: PCI device.
+ * @bars: Bars to be released.
+ *
+ */
+static void
+csio_pci_exit(struct pci_dev *pdev, int *bars)
+{
+ pci_release_selected_regions(pdev, *bars);
+ pci_disable_device(pdev);
+}
+
+/*
+ * csio_hw_init_workers - Initialize the HW module's worker threads.
+ * @hw: HW module.
+ *
+ */
+static void
+csio_hw_init_workers(struct csio_hw *hw)
+{
+ INIT_WORK(&hw->evtq_work, csio_evtq_worker);
+}
+
+static void
+csio_hw_exit_workers(struct csio_hw *hw)
+{
+ cancel_work_sync(&hw->evtq_work);
+ flush_scheduled_work();
+}
+
+static int
+csio_create_queues(struct csio_hw *hw)
+{
+ int i, j;
+ struct csio_mgmtm *mgmtm = csio_hw_to_mgmtm(hw);
+ int rv;
+ struct csio_scsi_cpu_info *info;
+
+ if (hw->flags & CSIO_HWF_Q_FW_ALLOCED)
+ return 0;
+
+ if (hw->intr_mode != CSIO_IM_MSIX) {
+ rv = csio_wr_iq_create(hw, NULL, hw->intr_iq_idx,
+ 0, hw->pport[0].portid, false, NULL);
+ if (rv != 0) {
+ csio_err(hw, " Forward Interrupt IQ failed!: %d\n", rv);
+ return rv;
+ }
+ }
+
+ /* FW event queue */
+ rv = csio_wr_iq_create(hw, NULL, hw->fwevt_iq_idx,
+ csio_get_fwevt_intr_idx(hw),
+ hw->pport[0].portid, true, NULL);
+ if (rv != 0) {
+ csio_err(hw, "FW event IQ config failed!: %d\n", rv);
+ return rv;
+ }
+
+ /* Create mgmt queue */
+ rv = csio_wr_eq_create(hw, NULL, mgmtm->eq_idx,
+ mgmtm->iq_idx, hw->pport[0].portid, NULL);
+
+ if (rv != 0) {
+ csio_err(hw, "Mgmt EQ create failed!: %d\n", rv);
+ goto err;
+ }
+
+ /* Create SCSI queues */
+ for (i = 0; i < hw->num_pports; i++) {
+ info = &hw->scsi_cpu_info[i];
+
+ for (j = 0; j < info->max_cpus; j++) {
+ struct csio_scsi_qset *sqset = &hw->sqset[i][j];
+
+ rv = csio_wr_iq_create(hw, NULL, sqset->iq_idx,
+ sqset->intr_idx, i, false, NULL);
+ if (rv != 0) {
+ csio_err(hw,
+ "SCSI module IQ config failed [%d][%d]:%d\n",
+ i, j, rv);
+ goto err;
+ }
+ rv = csio_wr_eq_create(hw, NULL, sqset->eq_idx,
+ sqset->iq_idx, i, NULL);
+ if (rv != 0) {
+ csio_err(hw,
+ "SCSI module EQ config failed [%d][%d]:%d\n",
+ i, j, rv);
+ goto err;
+ }
+ } /* for all CPUs */
+ } /* For all ports */
+
+ hw->flags |= CSIO_HWF_Q_FW_ALLOCED;
+ return 0;
+err:
+ csio_wr_destroy_queues(hw, true);
+ return -EINVAL;
+}
+
+/*
+ * csio_config_queues - Configure the DMA queues.
+ * @hw: HW module.
+ *
+ * Allocates memory for queues are registers them with FW.
+ */
+int
+csio_config_queues(struct csio_hw *hw)
+{
+ int i, j, idx, k = 0;
+ int rv;
+ struct csio_scsi_qset *sqset;
+ struct csio_mgmtm *mgmtm = csio_hw_to_mgmtm(hw);
+ struct csio_scsi_qset *orig;
+ struct csio_scsi_cpu_info *info;
+
+ if (hw->flags & CSIO_HWF_Q_MEM_ALLOCED)
+ return csio_create_queues(hw);
+
+ /* Calculate number of SCSI queues for MSIX we would like */
+ hw->num_scsi_msix_cpus = num_online_cpus();
+ hw->num_sqsets = num_online_cpus() * hw->num_pports;
+
+ if (hw->num_sqsets > CSIO_MAX_SCSI_QSETS) {
+ hw->num_sqsets = CSIO_MAX_SCSI_QSETS;
+ hw->num_scsi_msix_cpus = CSIO_MAX_SCSI_CPU;
+ }
+
+ /* Initialize max_cpus, may get reduced during msix allocations */
+ for (i = 0; i < hw->num_pports; i++)
+ hw->scsi_cpu_info[i].max_cpus = hw->num_scsi_msix_cpus;
+
+ csio_dbg(hw, "nsqsets:%d scpus:%d\n",
+ hw->num_sqsets, hw->num_scsi_msix_cpus);
+
+ csio_intr_enable(hw);
+
+ if (hw->intr_mode != CSIO_IM_MSIX) {
+
+ /* Allocate Forward interrupt iq. */
+ hw->intr_iq_idx = csio_wr_alloc_q(hw, CSIO_INTR_IQSIZE,
+ CSIO_INTR_WRSIZE, CSIO_INGRESS,
+ (void *)hw, 0, 0, NULL);
+ if (hw->intr_iq_idx == -1) {
+ csio_err(hw,
+ "Forward interrupt queue creation failed\n");
+ goto intr_disable;
+ }
+ }
+
+ /* Allocate the FW evt queue */
+ hw->fwevt_iq_idx = csio_wr_alloc_q(hw, CSIO_FWEVT_IQSIZE,
+ CSIO_FWEVT_WRSIZE,
+ CSIO_INGRESS, (void *)hw,
+ CSIO_FWEVT_FLBUFS, 0,
+ csio_fwevt_intx_handler);
+ if (hw->fwevt_iq_idx == -1) {
+ csio_err(hw, "FW evt queue creation failed\n");
+ goto intr_disable;
+ }
+
+ /* Allocate the mgmt queue */
+ mgmtm->eq_idx = csio_wr_alloc_q(hw, CSIO_MGMT_EQSIZE,
+ CSIO_MGMT_EQ_WRSIZE,
+ CSIO_EGRESS, (void *)hw, 0, 0, NULL);
+ if (mgmtm->eq_idx == -1) {
+ csio_err(hw, "Failed to alloc egress queue for mgmt module\n");
+ goto intr_disable;
+ }
+
+ /* Use FW IQ for MGMT req completion */
+ mgmtm->iq_idx = hw->fwevt_iq_idx;
+
+ /* Allocate SCSI queues */
+ for (i = 0; i < hw->num_pports; i++) {
+ info = &hw->scsi_cpu_info[i];
+
+ for (j = 0; j < hw->num_scsi_msix_cpus; j++) {
+ sqset = &hw->sqset[i][j];
+
+ if (j >= info->max_cpus) {
+ k = j % info->max_cpus;
+ orig = &hw->sqset[i][k];
+ sqset->eq_idx = orig->eq_idx;
+ sqset->iq_idx = orig->iq_idx;
+ continue;
+ }
+
+ idx = csio_wr_alloc_q(hw, csio_scsi_eqsize, 0,
+ CSIO_EGRESS, (void *)hw, 0, 0,
+ NULL);
+ if (idx == -1) {
+ csio_err(hw, "EQ creation failed for idx:%d\n",
+ idx);
+ goto intr_disable;
+ }
+
+ sqset->eq_idx = idx;
+
+ idx = csio_wr_alloc_q(hw, CSIO_SCSI_IQSIZE,
+ CSIO_SCSI_IQ_WRSZ, CSIO_INGRESS,
+ (void *)hw, 0, 0,
+ csio_scsi_intx_handler);
+ if (idx == -1) {
+ csio_err(hw, "IQ creation failed for idx:%d\n",
+ idx);
+ goto intr_disable;
+ }
+ sqset->iq_idx = idx;
+ } /* for all CPUs */
+ } /* For all ports */
+
+ hw->flags |= CSIO_HWF_Q_MEM_ALLOCED;
+
+ rv = csio_create_queues(hw);
+ if (rv != 0)
+ goto intr_disable;
+
+ /*
+ * Now request IRQs for the vectors. In the event of a failure,
+ * cleanup is handled internally by this function.
+ */
+ rv = csio_request_irqs(hw);
+ if (rv != 0)
+ return -EINVAL;
+
+ return 0;
+
+intr_disable:
+ csio_intr_disable(hw, false);
+
+ return -EINVAL;
+}
+
+static int
+csio_resource_alloc(struct csio_hw *hw)
+{
+ struct csio_wrm *wrm = csio_hw_to_wrm(hw);
+ int rv = -ENOMEM;
+
+ wrm->num_q = ((CSIO_MAX_SCSI_QSETS * 2) + CSIO_HW_NIQ +
+ CSIO_HW_NEQ + CSIO_HW_NFLQ + CSIO_HW_NINTXQ);
+
+ hw->mb_mempool = mempool_create_kmalloc_pool(CSIO_MIN_MEMPOOL_SZ,
+ sizeof(struct csio_mb));
+ if (!hw->mb_mempool)
+ goto err;
+
+ hw->rnode_mempool = mempool_create_kmalloc_pool(CSIO_MIN_MEMPOOL_SZ,
+ sizeof(struct csio_rnode));
+ if (!hw->rnode_mempool)
+ goto err_free_mb_mempool;
+
+ hw->scsi_pci_pool = pci_pool_create("csio_scsi_pci_pool", hw->pdev,
+ CSIO_SCSI_RSP_LEN, 8, 0);
+ if (!hw->scsi_pci_pool)
+ goto err_free_rn_pool;
+
+ return 0;
+
+err_free_rn_pool:
+ mempool_destroy(hw->rnode_mempool);
+ hw->rnode_mempool = NULL;
+err_free_mb_mempool:
+ mempool_destroy(hw->mb_mempool);
+ hw->mb_mempool = NULL;
+err:
+ return rv;
+}
+
+static void
+csio_resource_free(struct csio_hw *hw)
+{
+ pci_pool_destroy(hw->scsi_pci_pool);
+ hw->scsi_pci_pool = NULL;
+ mempool_destroy(hw->rnode_mempool);
+ hw->rnode_mempool = NULL;
+ mempool_destroy(hw->mb_mempool);
+ hw->mb_mempool = NULL;
+}
+
+/*
+ * csio_hw_alloc - Allocate and initialize the HW module.
+ * @pdev: PCI device.
+ *
+ * Allocates HW structure, DMA, memory resources, maps BARS to
+ * host memory and initializes HW module.
+ */
+static struct csio_hw *csio_hw_alloc(struct pci_dev *pdev)
+{
+ struct csio_hw *hw;
+
+ hw = kzalloc(sizeof(struct csio_hw), GFP_KERNEL);
+ if (!hw)
+ goto err;
+
+ hw->pdev = pdev;
+ strncpy(hw->drv_version, CSIO_DRV_VERSION, 32);
+
+ /* memory pool/DMA pool allocation */
+ if (csio_resource_alloc(hw))
+ goto err_free_hw;
+
+ /* Get the start address of registers from BAR 0 */
+ hw->regstart = ioremap_nocache(pci_resource_start(pdev, 0),
+ pci_resource_len(pdev, 0));
+ if (!hw->regstart) {
+ csio_err(hw, "Could not map BAR 0, regstart = %p\n",
+ hw->regstart);
+ goto err_resource_free;
+ }
+
+ csio_hw_init_workers(hw);
+
+ if (csio_hw_init(hw))
+ goto err_unmap_bar;
+
+ csio_dfs_create(hw);
+
+ csio_dbg(hw, "hw:%p\n", hw);
+
+ return hw;
+
+err_unmap_bar:
+ csio_hw_exit_workers(hw);
+ iounmap(hw->regstart);
+err_resource_free:
+ csio_resource_free(hw);
+err_free_hw:
+ kfree(hw);
+err:
+ return NULL;
+}
+
+/*
+ * csio_hw_free - Uninitialize and free the HW module.
+ * @hw: The HW module
+ *
+ * Disable interrupts, uninit the HW module, free resources, free hw.
+ */
+static void
+csio_hw_free(struct csio_hw *hw)
+{
+ csio_intr_disable(hw, true);
+ csio_hw_exit_workers(hw);
+ csio_hw_exit(hw);
+ iounmap(hw->regstart);
+ csio_dfs_destroy(hw);
+ csio_resource_free(hw);
+ kfree(hw);
+}
+
+/**
+ * csio_shost_init - Create and initialize the lnode module.
+ * @hw: The HW module.
+ * @dev: The device associated with this invocation.
+ * @probe: Called from probe context or not?
+ * @os_pln: Parent lnode if any.
+ *
+ * Allocates lnode structure via scsi_host_alloc, initializes
+ * shost, initializes lnode module and registers with SCSI ML
+ * via scsi_host_add. This function is shared between physical and
+ * virtual node ports.
+ */
+struct csio_lnode *
+csio_shost_init(struct csio_hw *hw, struct device *dev,
+ bool probe, struct csio_lnode *pln)
+{
+ struct Scsi_Host *shost = NULL;
+ struct csio_lnode *ln;
+
+ csio_fcoe_shost_template.cmd_per_lun = csio_lun_qdepth;
+ csio_fcoe_shost_vport_template.cmd_per_lun = csio_lun_qdepth;
+
+ /*
+ * hw->pdev is the physical port's PCI dev structure,
+ * which will be different from the NPIV dev structure.
+ */
+ if (dev == &hw->pdev->dev)
+ shost = scsi_host_alloc(
+ &csio_fcoe_shost_template,
+ sizeof(struct csio_lnode));
+ else
+ shost = scsi_host_alloc(
+ &csio_fcoe_shost_vport_template,
+ sizeof(struct csio_lnode));
+
+ if (!shost)
+ goto err;
+
+ ln = shost_priv(shost);
+ memset(ln, 0, sizeof(struct csio_lnode));
+
+ /* Link common lnode to this lnode */
+ ln->dev_num = (shost->host_no << 16);
+
+ shost->can_queue = CSIO_MAX_QUEUE;
+ shost->this_id = -1;
+ shost->unique_id = shost->host_no;
+ shost->max_cmd_len = 16; /* Max CDB length supported */
+ shost->max_id = min_t(uint32_t, csio_fcoe_rnodes,
+ hw->fres_info.max_ssns);
+ shost->max_lun = CSIO_MAX_LUN;
+ if (dev == &hw->pdev->dev)
+ shost->transportt = csio_fcoe_transport;
+ else
+ shost->transportt = csio_fcoe_transport_vport;
+
+ /* root lnode */
+ if (!hw->rln)
+ hw->rln = ln;
+
+ /* Other initialization here: Common, Transport specific */
+ if (csio_lnode_init(ln, hw, pln))
+ goto err_shost_put;
+
+ if (scsi_add_host(shost, dev))
+ goto err_lnode_exit;
+
+ return ln;
+
+err_lnode_exit:
+ csio_lnode_exit(ln);
+err_shost_put:
+ scsi_host_put(shost);
+err:
+ return NULL;
+}
+
+/**
+ * csio_shost_exit - De-instantiate the shost.
+ * @ln: The lnode module corresponding to the shost.
+ *
+ */
+void
+csio_shost_exit(struct csio_lnode *ln)
+{
+ struct Scsi_Host *shost = csio_ln_to_shost(ln);
+ struct csio_hw *hw = csio_lnode_to_hw(ln);
+
+ /* Inform transport */
+ fc_remove_host(shost);
+
+ /* Inform SCSI ML */
+ scsi_remove_host(shost);
+
+ /* Flush all the events, so that any rnode removal events
+ * already queued are all handled, before we remove the lnode.
+ */
+ spin_lock_irq(&hw->lock);
+ csio_evtq_flush(hw);
+ spin_unlock_irq(&hw->lock);
+
+ csio_lnode_exit(ln);
+ scsi_host_put(shost);
+}
+
+struct csio_lnode *
+csio_lnode_alloc(struct csio_hw *hw)
+{
+ return csio_shost_init(hw, &hw->pdev->dev, false, NULL);
+}
+
+void
+csio_lnodes_block_request(struct csio_hw *hw)
+{
+ struct Scsi_Host *shost;
+ struct csio_lnode *sln;
+ struct csio_lnode *ln;
+ struct list_head *cur_ln, *cur_cln;
+ struct csio_lnode **lnode_list;
+ int cur_cnt = 0, ii;
+
+ lnode_list = kzalloc((sizeof(struct csio_lnode *) * hw->num_lns),
+ GFP_KERNEL);
+ if (!lnode_list) {
+ csio_err(hw, "Failed to allocate lnodes_list");
+ return;
+ }
+
+ spin_lock_irq(&hw->lock);
+ /* Traverse sibling lnodes */
+ list_for_each(cur_ln, &hw->sln_head) {
+ sln = (struct csio_lnode *) cur_ln;
+ lnode_list[cur_cnt++] = sln;
+
+ /* Traverse children lnodes */
+ list_for_each(cur_cln, &sln->cln_head)
+ lnode_list[cur_cnt++] = (struct csio_lnode *) cur_cln;
+ }
+ spin_unlock_irq(&hw->lock);
+
+ for (ii = 0; ii < cur_cnt; ii++) {
+ csio_dbg(hw, "Blocking IOs on lnode: %p\n", lnode_list[ii]);
+ ln = lnode_list[ii];
+ shost = csio_ln_to_shost(ln);
+ scsi_block_requests(shost);
+
+ }
+ kfree(lnode_list);
+}
+
+void
+csio_lnodes_unblock_request(struct csio_hw *hw)
+{
+ struct csio_lnode *ln;
+ struct Scsi_Host *shost;
+ struct csio_lnode *sln;
+ struct list_head *cur_ln, *cur_cln;
+ struct csio_lnode **lnode_list;
+ int cur_cnt = 0, ii;
+
+ lnode_list = kzalloc((sizeof(struct csio_lnode *) * hw->num_lns),
+ GFP_KERNEL);
+ if (!lnode_list) {
+ csio_err(hw, "Failed to allocate lnodes_list");
+ return;
+ }
+
+ spin_lock_irq(&hw->lock);
+ /* Traverse sibling lnodes */
+ list_for_each(cur_ln, &hw->sln_head) {
+ sln = (struct csio_lnode *) cur_ln;
+ lnode_list[cur_cnt++] = sln;
+
+ /* Traverse children lnodes */
+ list_for_each(cur_cln, &sln->cln_head)
+ lnode_list[cur_cnt++] = (struct csio_lnode *) cur_cln;
+ }
+ spin_unlock_irq(&hw->lock);
+
+ for (ii = 0; ii < cur_cnt; ii++) {
+ csio_dbg(hw, "unblocking IOs on lnode: %p\n", lnode_list[ii]);
+ ln = lnode_list[ii];
+ shost = csio_ln_to_shost(ln);
+ scsi_unblock_requests(shost);
+ }
+ kfree(lnode_list);
+}
+
+void
+csio_lnodes_block_by_port(struct csio_hw *hw, uint8_t portid)
+{
+ struct csio_lnode *ln;
+ struct Scsi_Host *shost;
+ struct csio_lnode *sln;
+ struct list_head *cur_ln, *cur_cln;
+ struct csio_lnode **lnode_list;
+ int cur_cnt = 0, ii;
+
+ lnode_list = kzalloc((sizeof(struct csio_lnode *) * hw->num_lns),
+ GFP_KERNEL);
+ if (!lnode_list) {
+ csio_err(hw, "Failed to allocate lnodes_list");
+ return;
+ }
+
+ spin_lock_irq(&hw->lock);
+ /* Traverse sibling lnodes */
+ list_for_each(cur_ln, &hw->sln_head) {
+ sln = (struct csio_lnode *) cur_ln;
+ if (sln->portid != portid)
+ continue;
+
+ lnode_list[cur_cnt++] = sln;
+
+ /* Traverse children lnodes */
+ list_for_each(cur_cln, &sln->cln_head)
+ lnode_list[cur_cnt++] = (struct csio_lnode *) cur_cln;
+ }
+ spin_unlock_irq(&hw->lock);
+
+ for (ii = 0; ii < cur_cnt; ii++) {
+ csio_dbg(hw, "Blocking IOs on lnode: %p\n", lnode_list[ii]);
+ ln = lnode_list[ii];
+ shost = csio_ln_to_shost(ln);
+ scsi_block_requests(shost);
+ }
+ kfree(lnode_list);
+}
+
+void
+csio_lnodes_unblock_by_port(struct csio_hw *hw, uint8_t portid)
+{
+ struct csio_lnode *ln;
+ struct Scsi_Host *shost;
+ struct csio_lnode *sln;
+ struct list_head *cur_ln, *cur_cln;
+ struct csio_lnode **lnode_list;
+ int cur_cnt = 0, ii;
+
+ lnode_list = kzalloc((sizeof(struct csio_lnode *) * hw->num_lns),
+ GFP_KERNEL);
+ if (!lnode_list) {
+ csio_err(hw, "Failed to allocate lnodes_list");
+ return;
+ }
+
+ spin_lock_irq(&hw->lock);
+ /* Traverse sibling lnodes */
+ list_for_each(cur_ln, &hw->sln_head) {
+ sln = (struct csio_lnode *) cur_ln;
+ if (sln->portid != portid)
+ continue;
+ lnode_list[cur_cnt++] = sln;
+
+ /* Traverse children lnodes */
+ list_for_each(cur_cln, &sln->cln_head)
+ lnode_list[cur_cnt++] = (struct csio_lnode *) cur_cln;
+ }
+ spin_unlock_irq(&hw->lock);
+
+ for (ii = 0; ii < cur_cnt; ii++) {
+ csio_dbg(hw, "unblocking IOs on lnode: %p\n", lnode_list[ii]);
+ ln = lnode_list[ii];
+ shost = csio_ln_to_shost(ln);
+ scsi_unblock_requests(shost);
+ }
+ kfree(lnode_list);
+}
+
+void
+csio_lnodes_exit(struct csio_hw *hw, bool npiv)
+{
+ struct csio_lnode *sln;
+ struct csio_lnode *ln;
+ struct list_head *cur_ln, *cur_cln;
+ struct csio_lnode **lnode_list;
+ int cur_cnt = 0, ii;
+
+ lnode_list = kzalloc((sizeof(struct csio_lnode *) * hw->num_lns),
+ GFP_KERNEL);
+ if (!lnode_list) {
+ csio_err(hw, "lnodes_exit: Failed to allocate lnodes_list.\n");
+ return;
+ }
+
+ /* Get all child lnodes(NPIV ports) */
+ spin_lock_irq(&hw->lock);
+ list_for_each(cur_ln, &hw->sln_head) {
+ sln = (struct csio_lnode *) cur_ln;
+
+ /* Traverse children lnodes */
+ list_for_each(cur_cln, &sln->cln_head)
+ lnode_list[cur_cnt++] = (struct csio_lnode *) cur_cln;
+ }
+ spin_unlock_irq(&hw->lock);
+
+ /* Delete NPIV lnodes */
+ for (ii = 0; ii < cur_cnt; ii++) {
+ csio_dbg(hw, "Deleting child lnode: %p\n", lnode_list[ii]);
+ ln = lnode_list[ii];
+ fc_vport_terminate(ln->fc_vport);
+ }
+
+ /* Delete only npiv lnodes */
+ if (npiv)
+ goto free_lnodes;
+
+ cur_cnt = 0;
+ /* Get all physical lnodes */
+ spin_lock_irq(&hw->lock);
+ /* Traverse sibling lnodes */
+ list_for_each(cur_ln, &hw->sln_head) {
+ sln = (struct csio_lnode *) cur_ln;
+ lnode_list[cur_cnt++] = sln;
+ }
+ spin_unlock_irq(&hw->lock);
+
+ /* Delete physical lnodes */
+ for (ii = 0; ii < cur_cnt; ii++) {
+ csio_dbg(hw, "Deleting parent lnode: %p\n", lnode_list[ii]);
+ csio_shost_exit(lnode_list[ii]);
+ }
+
+free_lnodes:
+ kfree(lnode_list);
+}
+
+/*
+ * csio_lnode_init_post: Set lnode attributes after starting HW.
+ * @ln: lnode.
+ *
+ */
+static void
+csio_lnode_init_post(struct csio_lnode *ln)
+{
+ struct Scsi_Host *shost = csio_ln_to_shost(ln);
+
+ csio_fchost_attr_init(ln);
+
+ scsi_scan_host(shost);
+}
+
+/*
+ * csio_probe_one - Instantiate this function.
+ * @pdev: PCI device
+ * @id: Device ID
+ *
+ * This is the .probe() callback of the driver. This function:
+ * - Initializes the PCI function by enabling MMIO, setting bus
+ * mastership and setting DMA mask.
+ * - Allocates HW structure, DMA, memory resources, maps BARS to
+ * host memory and initializes HW module.
+ * - Allocates lnode structure via scsi_host_alloc, initializes
+ * shost, initialized lnode module and registers with SCSI ML
+ * via scsi_host_add.
+ * - Enables interrupts, and starts the chip by kicking off the
+ * HW state machine.
+ * - Once hardware is ready, initiated scan of the host via
+ * scsi_scan_host.
+ */
+static int csio_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
+{
+ int rv;
+ int bars;
+ int i;
+ struct csio_hw *hw;
+ struct csio_lnode *ln;
+
+ /* probe only T5 cards */
+ if (!csio_is_t5((pdev->device & CSIO_HW_CHIP_MASK)))
+ return -ENODEV;
+
+ rv = csio_pci_init(pdev, &bars);
+ if (rv)
+ goto err;
+
+ hw = csio_hw_alloc(pdev);
+ if (!hw) {
+ rv = -ENODEV;
+ goto err_pci_exit;
+ }
+
+ pci_set_drvdata(pdev, hw);
+
+ if (csio_hw_start(hw) != 0) {
+ dev_err(&pdev->dev,
+ "Failed to start FW, continuing in debug mode.\n");
+ return 0;
+ }
+
+ sprintf(hw->fwrev_str, "%u.%u.%u.%u\n",
+ FW_HDR_FW_VER_MAJOR_G(hw->fwrev),
+ FW_HDR_FW_VER_MINOR_G(hw->fwrev),
+ FW_HDR_FW_VER_MICRO_G(hw->fwrev),
+ FW_HDR_FW_VER_BUILD_G(hw->fwrev));
+
+ for (i = 0; i < hw->num_pports; i++) {
+ ln = csio_shost_init(hw, &pdev->dev, true, NULL);
+ if (!ln) {
+ rv = -ENODEV;
+ break;
+ }
+ /* Initialize portid */
+ ln->portid = hw->pport[i].portid;
+
+ spin_lock_irq(&hw->lock);
+ if (csio_lnode_start(ln) != 0)
+ rv = -ENODEV;
+ spin_unlock_irq(&hw->lock);
+
+ if (rv)
+ break;
+
+ csio_lnode_init_post(ln);
+ }
+
+ if (rv)
+ goto err_lnode_exit;
+
+ return 0;
+
+err_lnode_exit:
+ csio_lnodes_block_request(hw);
+ spin_lock_irq(&hw->lock);
+ csio_hw_stop(hw);
+ spin_unlock_irq(&hw->lock);
+ csio_lnodes_unblock_request(hw);
+ csio_lnodes_exit(hw, 0);
+ csio_hw_free(hw);
+err_pci_exit:
+ csio_pci_exit(pdev, &bars);
+err:
+ dev_err(&pdev->dev, "probe of device failed: %d\n", rv);
+ return rv;
+}
+
+/*
+ * csio_remove_one - Remove one instance of the driver at this PCI function.
+ * @pdev: PCI device
+ *
+ * Used during hotplug operation.
+ */
+static void csio_remove_one(struct pci_dev *pdev)
+{
+ struct csio_hw *hw = pci_get_drvdata(pdev);
+ int bars = pci_select_bars(pdev, IORESOURCE_MEM);
+
+ csio_lnodes_block_request(hw);
+ spin_lock_irq(&hw->lock);
+
+ /* Stops lnode, Rnode s/m
+ * Quiesce IOs.
+ * All sessions with remote ports are unregistered.
+ */
+ csio_hw_stop(hw);
+ spin_unlock_irq(&hw->lock);
+ csio_lnodes_unblock_request(hw);
+
+ csio_lnodes_exit(hw, 0);
+ csio_hw_free(hw);
+ csio_pci_exit(pdev, &bars);
+}
+
+/*
+ * csio_pci_error_detected - PCI error was detected
+ * @pdev: PCI device
+ *
+ */
+static pci_ers_result_t
+csio_pci_error_detected(struct pci_dev *pdev, pci_channel_state_t state)
+{
+ struct csio_hw *hw = pci_get_drvdata(pdev);
+
+ csio_lnodes_block_request(hw);
+ spin_lock_irq(&hw->lock);
+
+ /* Post PCI error detected evt to HW s/m
+ * HW s/m handles this evt by quiescing IOs, unregisters rports
+ * and finally takes the device to offline.
+ */
+ csio_post_event(&hw->sm, CSIO_HWE_PCIERR_DETECTED);
+ spin_unlock_irq(&hw->lock);
+ csio_lnodes_unblock_request(hw);
+ csio_lnodes_exit(hw, 0);
+ csio_intr_disable(hw, true);
+ pci_disable_device(pdev);
+ return state == pci_channel_io_perm_failure ?
+ PCI_ERS_RESULT_DISCONNECT : PCI_ERS_RESULT_NEED_RESET;
+}
+
+/*
+ * csio_pci_slot_reset - PCI slot has been reset.
+ * @pdev: PCI device
+ *
+ */
+static pci_ers_result_t
+csio_pci_slot_reset(struct pci_dev *pdev)
+{
+ struct csio_hw *hw = pci_get_drvdata(pdev);
+ int ready;
+
+ if (pci_enable_device(pdev)) {
+ dev_err(&pdev->dev, "cannot re-enable device in slot reset\n");
+ return PCI_ERS_RESULT_DISCONNECT;
+ }
+
+ pci_set_master(pdev);
+ pci_restore_state(pdev);
+ pci_save_state(pdev);
+ pci_cleanup_aer_uncorrect_error_status(pdev);
+
+ /* Bring HW s/m to ready state.
+ * but don't resume IOs.
+ */
+ spin_lock_irq(&hw->lock);
+ csio_post_event(&hw->sm, CSIO_HWE_PCIERR_SLOT_RESET);
+ ready = csio_is_hw_ready(hw);
+ spin_unlock_irq(&hw->lock);
+
+ if (ready) {
+ return PCI_ERS_RESULT_RECOVERED;
+ } else {
+ dev_err(&pdev->dev, "Can't initialize HW when in slot reset\n");
+ return PCI_ERS_RESULT_DISCONNECT;
+ }
+}
+
+/*
+ * csio_pci_resume - Resume normal operations
+ * @pdev: PCI device
+ *
+ */
+static void
+csio_pci_resume(struct pci_dev *pdev)
+{
+ struct csio_hw *hw = pci_get_drvdata(pdev);
+ struct csio_lnode *ln;
+ int rv = 0;
+ int i;
+
+ /* Bring the LINK UP and Resume IO */
+
+ for (i = 0; i < hw->num_pports; i++) {
+ ln = csio_shost_init(hw, &pdev->dev, true, NULL);
+ if (!ln) {
+ rv = -ENODEV;
+ break;
+ }
+ /* Initialize portid */
+ ln->portid = hw->pport[i].portid;
+
+ spin_lock_irq(&hw->lock);
+ if (csio_lnode_start(ln) != 0)
+ rv = -ENODEV;
+ spin_unlock_irq(&hw->lock);
+
+ if (rv)
+ break;
+
+ csio_lnode_init_post(ln);
+ }
+
+ if (rv)
+ goto err_resume_exit;
+
+ return;
+
+err_resume_exit:
+ csio_lnodes_block_request(hw);
+ spin_lock_irq(&hw->lock);
+ csio_hw_stop(hw);
+ spin_unlock_irq(&hw->lock);
+ csio_lnodes_unblock_request(hw);
+ csio_lnodes_exit(hw, 0);
+ csio_hw_free(hw);
+ dev_err(&pdev->dev, "resume of device failed: %d\n", rv);
+}
+
+static struct pci_error_handlers csio_err_handler = {
+ .error_detected = csio_pci_error_detected,
+ .slot_reset = csio_pci_slot_reset,
+ .resume = csio_pci_resume,
+};
+
+/*
+ * Macros needed to support the PCI Device ID Table ...
+ */
+#define CH_PCI_DEVICE_ID_TABLE_DEFINE_BEGIN \
+ static const struct pci_device_id csio_pci_tbl[] = {
+/* Define for FCoE uses PF6 */
+#define CH_PCI_DEVICE_ID_FUNCTION 0x6
+
+#define CH_PCI_ID_TABLE_ENTRY(devid) \
+ { PCI_VDEVICE(CHELSIO, (devid)), 0 }
+
+#define CH_PCI_DEVICE_ID_TABLE_DEFINE_END { 0, } }
+
+#include "t4_pci_id_tbl.h"
+
+static struct pci_driver csio_pci_driver = {
+ .name = KBUILD_MODNAME,
+ .driver = {
+ .owner = THIS_MODULE,
+ },
+ .id_table = csio_pci_tbl,
+ .probe = csio_probe_one,
+ .remove = csio_remove_one,
+ .err_handler = &csio_err_handler,
+};
+
+/*
+ * csio_init - Chelsio storage driver initialization function.
+ *
+ */
+static int __init
+csio_init(void)
+{
+ int rv = -ENOMEM;
+
+ pr_info("%s %s\n", CSIO_DRV_DESC, CSIO_DRV_VERSION);
+
+ csio_dfs_init();
+
+ csio_fcoe_transport = fc_attach_transport(&csio_fc_transport_funcs);
+ if (!csio_fcoe_transport)
+ goto err;
+
+ csio_fcoe_transport_vport =
+ fc_attach_transport(&csio_fc_transport_vport_funcs);
+ if (!csio_fcoe_transport_vport)
+ goto err_vport;
+
+ rv = pci_register_driver(&csio_pci_driver);
+ if (rv)
+ goto err_pci;
+
+ return 0;
+
+err_pci:
+ fc_release_transport(csio_fcoe_transport_vport);
+err_vport:
+ fc_release_transport(csio_fcoe_transport);
+err:
+ csio_dfs_exit();
+ return rv;
+}
+
+/*
+ * csio_exit - Chelsio storage driver uninitialization .
+ *
+ * Function that gets called in the unload path.
+ */
+static void __exit
+csio_exit(void)
+{
+ pci_unregister_driver(&csio_pci_driver);
+ csio_dfs_exit();
+ fc_release_transport(csio_fcoe_transport_vport);
+ fc_release_transport(csio_fcoe_transport);
+}
+
+module_init(csio_init);
+module_exit(csio_exit);
+MODULE_AUTHOR(CSIO_DRV_AUTHOR);
+MODULE_DESCRIPTION(CSIO_DRV_DESC);
+MODULE_LICENSE(CSIO_DRV_LICENSE);
+MODULE_DEVICE_TABLE(pci, csio_pci_tbl);
+MODULE_VERSION(CSIO_DRV_VERSION);
+/*(DEBLOBBED)*/
diff --git a/drivers/scsi/csiostor/csio_init.h b/drivers/scsi/csiostor/csio_init.h
new file mode 100644
index 000000000..5cc5d317a
--- /dev/null
+++ b/drivers/scsi/csiostor/csio_init.h
@@ -0,0 +1,137 @@
+/*
+ * This file is part of the Chelsio FCoE driver for Linux.
+ *
+ * Copyright (c) 2008-2012 Chelsio Communications, Inc. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#ifndef __CSIO_INIT_H__
+#define __CSIO_INIT_H__
+
+#include <linux/pci.h>
+#include <linux/if_ether.h>
+#include <scsi/scsi.h>
+#include <scsi/scsi_device.h>
+#include <scsi/scsi_host.h>
+#include <scsi/scsi_transport_fc.h>
+
+#include "csio_scsi.h"
+#include "csio_lnode.h"
+#include "csio_rnode.h"
+#include "csio_hw.h"
+
+#define CSIO_DRV_AUTHOR "Chelsio Communications"
+#define CSIO_DRV_LICENSE "Dual BSD/GPL"
+#define CSIO_DRV_DESC "Chelsio FCoE driver"
+#define CSIO_DRV_VERSION "1.0.0"
+
+extern struct fc_function_template csio_fc_transport_funcs;
+extern struct fc_function_template csio_fc_transport_vport_funcs;
+
+void csio_fchost_attr_init(struct csio_lnode *);
+
+/* INTx handlers */
+void csio_scsi_intx_handler(struct csio_hw *, void *, uint32_t,
+ struct csio_fl_dma_buf *, void *);
+
+void csio_fwevt_intx_handler(struct csio_hw *, void *, uint32_t,
+ struct csio_fl_dma_buf *, void *);
+
+/* Common os lnode APIs */
+void csio_lnodes_block_request(struct csio_hw *);
+void csio_lnodes_unblock_request(struct csio_hw *);
+void csio_lnodes_block_by_port(struct csio_hw *, uint8_t);
+void csio_lnodes_unblock_by_port(struct csio_hw *, uint8_t);
+
+struct csio_lnode *csio_shost_init(struct csio_hw *, struct device *, bool,
+ struct csio_lnode *);
+void csio_shost_exit(struct csio_lnode *);
+void csio_lnodes_exit(struct csio_hw *, bool);
+
+/* DebugFS helper routines */
+void csio_add_debugfs_mem(struct csio_hw *, const char *,
+ unsigned int, unsigned int);
+
+static inline struct Scsi_Host *
+csio_ln_to_shost(struct csio_lnode *ln)
+{
+ return container_of((void *)ln, struct Scsi_Host, hostdata[0]);
+}
+
+/* SCSI -- locking version of get/put ioreqs */
+static inline struct csio_ioreq *
+csio_get_scsi_ioreq_lock(struct csio_hw *hw, struct csio_scsim *scsim)
+{
+ struct csio_ioreq *ioreq;
+ unsigned long flags;
+
+ spin_lock_irqsave(&scsim->freelist_lock, flags);
+ ioreq = csio_get_scsi_ioreq(scsim);
+ spin_unlock_irqrestore(&scsim->freelist_lock, flags);
+
+ return ioreq;
+}
+
+static inline void
+csio_put_scsi_ioreq_lock(struct csio_hw *hw, struct csio_scsim *scsim,
+ struct csio_ioreq *ioreq)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&scsim->freelist_lock, flags);
+ csio_put_scsi_ioreq(scsim, ioreq);
+ spin_unlock_irqrestore(&scsim->freelist_lock, flags);
+}
+
+/* Called in interrupt context */
+static inline void
+csio_put_scsi_ioreq_list_lock(struct csio_hw *hw, struct csio_scsim *scsim,
+ struct list_head *reqlist, int n)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&scsim->freelist_lock, flags);
+ csio_put_scsi_ioreq_list(scsim, reqlist, n);
+ spin_unlock_irqrestore(&scsim->freelist_lock, flags);
+}
+
+/* Called in interrupt context */
+static inline void
+csio_put_scsi_ddp_list_lock(struct csio_hw *hw, struct csio_scsim *scsim,
+ struct list_head *reqlist, int n)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&hw->lock, flags);
+ csio_put_scsi_ddp_list(scsim, reqlist, n);
+ spin_unlock_irqrestore(&hw->lock, flags);
+}
+
+#endif /* ifndef __CSIO_INIT_H__ */
diff --git a/drivers/scsi/csiostor/csio_isr.c b/drivers/scsi/csiostor/csio_isr.c
new file mode 100644
index 000000000..2fb71c6c3
--- /dev/null
+++ b/drivers/scsi/csiostor/csio_isr.c
@@ -0,0 +1,618 @@
+/*
+ * This file is part of the Chelsio FCoE driver for Linux.
+ *
+ * Copyright (c) 2008-2012 Chelsio Communications, Inc. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include <linux/kernel.h>
+#include <linux/pci.h>
+#include <linux/interrupt.h>
+#include <linux/cpumask.h>
+#include <linux/string.h>
+
+#include "csio_init.h"
+#include "csio_hw.h"
+
+static irqreturn_t
+csio_nondata_isr(int irq, void *dev_id)
+{
+ struct csio_hw *hw = (struct csio_hw *) dev_id;
+ int rv;
+ unsigned long flags;
+
+ if (unlikely(!hw))
+ return IRQ_NONE;
+
+ if (unlikely(pci_channel_offline(hw->pdev))) {
+ CSIO_INC_STATS(hw, n_pcich_offline);
+ return IRQ_NONE;
+ }
+
+ spin_lock_irqsave(&hw->lock, flags);
+ csio_hw_slow_intr_handler(hw);
+ rv = csio_mb_isr_handler(hw);
+
+ if (rv == 0 && !(hw->flags & CSIO_HWF_FWEVT_PENDING)) {
+ hw->flags |= CSIO_HWF_FWEVT_PENDING;
+ spin_unlock_irqrestore(&hw->lock, flags);
+ schedule_work(&hw->evtq_work);
+ return IRQ_HANDLED;
+ }
+ spin_unlock_irqrestore(&hw->lock, flags);
+ return IRQ_HANDLED;
+}
+
+/*
+ * csio_fwevt_handler - Common FW event handler routine.
+ * @hw: HW module.
+ *
+ * This is the ISR for FW events. It is shared b/w MSIX
+ * and INTx handlers.
+ */
+static void
+csio_fwevt_handler(struct csio_hw *hw)
+{
+ int rv;
+ unsigned long flags;
+
+ rv = csio_fwevtq_handler(hw);
+
+ spin_lock_irqsave(&hw->lock, flags);
+ if (rv == 0 && !(hw->flags & CSIO_HWF_FWEVT_PENDING)) {
+ hw->flags |= CSIO_HWF_FWEVT_PENDING;
+ spin_unlock_irqrestore(&hw->lock, flags);
+ schedule_work(&hw->evtq_work);
+ return;
+ }
+ spin_unlock_irqrestore(&hw->lock, flags);
+
+} /* csio_fwevt_handler */
+
+/*
+ * csio_fwevt_isr() - FW events MSIX ISR
+ * @irq:
+ * @dev_id:
+ *
+ * Process WRs on the FW event queue.
+ *
+ */
+static irqreturn_t
+csio_fwevt_isr(int irq, void *dev_id)
+{
+ struct csio_hw *hw = (struct csio_hw *) dev_id;
+
+ if (unlikely(!hw))
+ return IRQ_NONE;
+
+ if (unlikely(pci_channel_offline(hw->pdev))) {
+ CSIO_INC_STATS(hw, n_pcich_offline);
+ return IRQ_NONE;
+ }
+
+ csio_fwevt_handler(hw);
+
+ return IRQ_HANDLED;
+}
+
+/*
+ * csio_fwevt_isr() - INTx wrapper for handling FW events.
+ * @irq:
+ * @dev_id:
+ */
+void
+csio_fwevt_intx_handler(struct csio_hw *hw, void *wr, uint32_t len,
+ struct csio_fl_dma_buf *flb, void *priv)
+{
+ csio_fwevt_handler(hw);
+} /* csio_fwevt_intx_handler */
+
+/*
+ * csio_process_scsi_cmpl - Process a SCSI WR completion.
+ * @hw: HW module.
+ * @wr: The completed WR from the ingress queue.
+ * @len: Length of the WR.
+ * @flb: Freelist buffer array.
+ *
+ */
+static void
+csio_process_scsi_cmpl(struct csio_hw *hw, void *wr, uint32_t len,
+ struct csio_fl_dma_buf *flb, void *cbfn_q)
+{
+ struct csio_ioreq *ioreq;
+ uint8_t *scsiwr;
+ uint8_t subop;
+ void *cmnd;
+ unsigned long flags;
+
+ ioreq = csio_scsi_cmpl_handler(hw, wr, len, flb, NULL, &scsiwr);
+ if (likely(ioreq)) {
+ if (unlikely(*scsiwr == FW_SCSI_ABRT_CLS_WR)) {
+ subop = FW_SCSI_ABRT_CLS_WR_SUB_OPCODE_GET(
+ ((struct fw_scsi_abrt_cls_wr *)
+ scsiwr)->sub_opcode_to_chk_all_io);
+
+ csio_dbg(hw, "%s cmpl recvd ioreq:%p status:%d\n",
+ subop ? "Close" : "Abort",
+ ioreq, ioreq->wr_status);
+
+ spin_lock_irqsave(&hw->lock, flags);
+ if (subop)
+ csio_scsi_closed(ioreq,
+ (struct list_head *)cbfn_q);
+ else
+ csio_scsi_aborted(ioreq,
+ (struct list_head *)cbfn_q);
+ /*
+ * We call scsi_done for I/Os that driver thinks aborts
+ * have timed out. If there is a race caused by FW
+ * completing abort at the exact same time that the
+ * driver has deteced the abort timeout, the following
+ * check prevents calling of scsi_done twice for the
+ * same command: once from the eh_abort_handler, another
+ * from csio_scsi_isr_handler(). This also avoids the
+ * need to check if csio_scsi_cmnd(req) is NULL in the
+ * fast path.
+ */
+ cmnd = csio_scsi_cmnd(ioreq);
+ if (unlikely(cmnd == NULL))
+ list_del_init(&ioreq->sm.sm_list);
+
+ spin_unlock_irqrestore(&hw->lock, flags);
+
+ if (unlikely(cmnd == NULL))
+ csio_put_scsi_ioreq_lock(hw,
+ csio_hw_to_scsim(hw), ioreq);
+ } else {
+ spin_lock_irqsave(&hw->lock, flags);
+ csio_scsi_completed(ioreq, (struct list_head *)cbfn_q);
+ spin_unlock_irqrestore(&hw->lock, flags);
+ }
+ }
+}
+
+/*
+ * csio_scsi_isr_handler() - Common SCSI ISR handler.
+ * @iq: Ingress queue pointer.
+ *
+ * Processes SCSI completions on the SCSI IQ indicated by scm->iq_idx
+ * by calling csio_wr_process_iq_idx. If there are completions on the
+ * isr_cbfn_q, yank them out into a local queue and call their io_cbfns.
+ * Once done, add these completions onto the freelist.
+ * This routine is shared b/w MSIX and INTx.
+ */
+static inline irqreturn_t
+csio_scsi_isr_handler(struct csio_q *iq)
+{
+ struct csio_hw *hw = (struct csio_hw *)iq->owner;
+ LIST_HEAD(cbfn_q);
+ struct list_head *tmp;
+ struct csio_scsim *scm;
+ struct csio_ioreq *ioreq;
+ int isr_completions = 0;
+
+ scm = csio_hw_to_scsim(hw);
+
+ if (unlikely(csio_wr_process_iq(hw, iq, csio_process_scsi_cmpl,
+ &cbfn_q) != 0))
+ return IRQ_NONE;
+
+ /* Call back the completion routines */
+ list_for_each(tmp, &cbfn_q) {
+ ioreq = (struct csio_ioreq *)tmp;
+ isr_completions++;
+ ioreq->io_cbfn(hw, ioreq);
+ /* Release ddp buffer if used for this req */
+ if (unlikely(ioreq->dcopy))
+ csio_put_scsi_ddp_list_lock(hw, scm, &ioreq->gen_list,
+ ioreq->nsge);
+ }
+
+ if (isr_completions) {
+ /* Return the ioreqs back to ioreq->freelist */
+ csio_put_scsi_ioreq_list_lock(hw, scm, &cbfn_q,
+ isr_completions);
+ }
+
+ return IRQ_HANDLED;
+}
+
+/*
+ * csio_scsi_isr() - SCSI MSIX handler
+ * @irq:
+ * @dev_id:
+ *
+ * This is the top level SCSI MSIX handler. Calls csio_scsi_isr_handler()
+ * for handling SCSI completions.
+ */
+static irqreturn_t
+csio_scsi_isr(int irq, void *dev_id)
+{
+ struct csio_q *iq = (struct csio_q *) dev_id;
+ struct csio_hw *hw;
+
+ if (unlikely(!iq))
+ return IRQ_NONE;
+
+ hw = (struct csio_hw *)iq->owner;
+
+ if (unlikely(pci_channel_offline(hw->pdev))) {
+ CSIO_INC_STATS(hw, n_pcich_offline);
+ return IRQ_NONE;
+ }
+
+ csio_scsi_isr_handler(iq);
+
+ return IRQ_HANDLED;
+}
+
+/*
+ * csio_scsi_intx_handler() - SCSI INTx handler
+ * @irq:
+ * @dev_id:
+ *
+ * This is the top level SCSI INTx handler. Calls csio_scsi_isr_handler()
+ * for handling SCSI completions.
+ */
+void
+csio_scsi_intx_handler(struct csio_hw *hw, void *wr, uint32_t len,
+ struct csio_fl_dma_buf *flb, void *priv)
+{
+ struct csio_q *iq = priv;
+
+ csio_scsi_isr_handler(iq);
+
+} /* csio_scsi_intx_handler */
+
+/*
+ * csio_fcoe_isr() - INTx/MSI interrupt service routine for FCoE.
+ * @irq:
+ * @dev_id:
+ *
+ *
+ */
+static irqreturn_t
+csio_fcoe_isr(int irq, void *dev_id)
+{
+ struct csio_hw *hw = (struct csio_hw *) dev_id;
+ struct csio_q *intx_q = NULL;
+ int rv;
+ irqreturn_t ret = IRQ_NONE;
+ unsigned long flags;
+
+ if (unlikely(!hw))
+ return IRQ_NONE;
+
+ if (unlikely(pci_channel_offline(hw->pdev))) {
+ CSIO_INC_STATS(hw, n_pcich_offline);
+ return IRQ_NONE;
+ }
+
+ /* Disable the interrupt for this PCI function. */
+ if (hw->intr_mode == CSIO_IM_INTX)
+ csio_wr_reg32(hw, 0, MYPF_REG(PCIE_PF_CLI_A));
+
+ /*
+ * The read in the following function will flush the
+ * above write.
+ */
+ if (csio_hw_slow_intr_handler(hw))
+ ret = IRQ_HANDLED;
+
+ /* Get the INTx Forward interrupt IQ. */
+ intx_q = csio_get_q(hw, hw->intr_iq_idx);
+
+ CSIO_DB_ASSERT(intx_q);
+
+ /* IQ handler is not possible for intx_q, hence pass in NULL */
+ if (likely(csio_wr_process_iq(hw, intx_q, NULL, NULL) == 0))
+ ret = IRQ_HANDLED;
+
+ spin_lock_irqsave(&hw->lock, flags);
+ rv = csio_mb_isr_handler(hw);
+ if (rv == 0 && !(hw->flags & CSIO_HWF_FWEVT_PENDING)) {
+ hw->flags |= CSIO_HWF_FWEVT_PENDING;
+ spin_unlock_irqrestore(&hw->lock, flags);
+ schedule_work(&hw->evtq_work);
+ return IRQ_HANDLED;
+ }
+ spin_unlock_irqrestore(&hw->lock, flags);
+
+ return ret;
+}
+
+static void
+csio_add_msix_desc(struct csio_hw *hw)
+{
+ int i;
+ struct csio_msix_entries *entryp = &hw->msix_entries[0];
+ int k = CSIO_EXTRA_VECS;
+ int len = sizeof(entryp->desc) - 1;
+ int cnt = hw->num_sqsets + k;
+
+ /* Non-data vector */
+ memset(entryp->desc, 0, len + 1);
+ snprintf(entryp->desc, len, "csio-%02x:%02x:%x-nondata",
+ CSIO_PCI_BUS(hw), CSIO_PCI_DEV(hw), CSIO_PCI_FUNC(hw));
+
+ entryp++;
+ memset(entryp->desc, 0, len + 1);
+ snprintf(entryp->desc, len, "csio-%02x:%02x:%x-fwevt",
+ CSIO_PCI_BUS(hw), CSIO_PCI_DEV(hw), CSIO_PCI_FUNC(hw));
+ entryp++;
+
+ /* Name SCSI vecs */
+ for (i = k; i < cnt; i++, entryp++) {
+ memset(entryp->desc, 0, len + 1);
+ snprintf(entryp->desc, len, "csio-%02x:%02x:%x-scsi%d",
+ CSIO_PCI_BUS(hw), CSIO_PCI_DEV(hw),
+ CSIO_PCI_FUNC(hw), i - CSIO_EXTRA_VECS);
+ }
+}
+
+int
+csio_request_irqs(struct csio_hw *hw)
+{
+ int rv, i, j, k = 0;
+ struct csio_msix_entries *entryp = &hw->msix_entries[0];
+ struct csio_scsi_cpu_info *info;
+
+ if (hw->intr_mode != CSIO_IM_MSIX) {
+ rv = request_irq(hw->pdev->irq, csio_fcoe_isr,
+ (hw->intr_mode == CSIO_IM_MSI) ?
+ 0 : IRQF_SHARED,
+ KBUILD_MODNAME, hw);
+ if (rv) {
+ if (hw->intr_mode == CSIO_IM_MSI)
+ pci_disable_msi(hw->pdev);
+ csio_err(hw, "Failed to allocate interrupt line.\n");
+ return -EINVAL;
+ }
+
+ goto out;
+ }
+
+ /* Add the MSIX vector descriptions */
+ csio_add_msix_desc(hw);
+
+ rv = request_irq(entryp[k].vector, csio_nondata_isr, 0,
+ entryp[k].desc, hw);
+ if (rv) {
+ csio_err(hw, "IRQ request failed for vec %d err:%d\n",
+ entryp[k].vector, rv);
+ goto err;
+ }
+
+ entryp[k++].dev_id = (void *)hw;
+
+ rv = request_irq(entryp[k].vector, csio_fwevt_isr, 0,
+ entryp[k].desc, hw);
+ if (rv) {
+ csio_err(hw, "IRQ request failed for vec %d err:%d\n",
+ entryp[k].vector, rv);
+ goto err;
+ }
+
+ entryp[k++].dev_id = (void *)hw;
+
+ /* Allocate IRQs for SCSI */
+ for (i = 0; i < hw->num_pports; i++) {
+ info = &hw->scsi_cpu_info[i];
+ for (j = 0; j < info->max_cpus; j++, k++) {
+ struct csio_scsi_qset *sqset = &hw->sqset[i][j];
+ struct csio_q *q = hw->wrm.q_arr[sqset->iq_idx];
+
+ rv = request_irq(entryp[k].vector, csio_scsi_isr, 0,
+ entryp[k].desc, q);
+ if (rv) {
+ csio_err(hw,
+ "IRQ request failed for vec %d err:%d\n",
+ entryp[k].vector, rv);
+ goto err;
+ }
+
+ entryp[k].dev_id = (void *)q;
+
+ } /* for all scsi cpus */
+ } /* for all ports */
+
+out:
+ hw->flags |= CSIO_HWF_HOST_INTR_ENABLED;
+
+ return 0;
+
+err:
+ for (i = 0; i < k; i++) {
+ entryp = &hw->msix_entries[i];
+ free_irq(entryp->vector, entryp->dev_id);
+ }
+ pci_disable_msix(hw->pdev);
+
+ return -EINVAL;
+}
+
+static void
+csio_disable_msix(struct csio_hw *hw, bool free)
+{
+ int i;
+ struct csio_msix_entries *entryp;
+ int cnt = hw->num_sqsets + CSIO_EXTRA_VECS;
+
+ if (free) {
+ for (i = 0; i < cnt; i++) {
+ entryp = &hw->msix_entries[i];
+ free_irq(entryp->vector, entryp->dev_id);
+ }
+ }
+ pci_disable_msix(hw->pdev);
+}
+
+/* Reduce per-port max possible CPUs */
+static void
+csio_reduce_sqsets(struct csio_hw *hw, int cnt)
+{
+ int i;
+ struct csio_scsi_cpu_info *info;
+
+ while (cnt < hw->num_sqsets) {
+ for (i = 0; i < hw->num_pports; i++) {
+ info = &hw->scsi_cpu_info[i];
+ if (info->max_cpus > 1) {
+ info->max_cpus--;
+ hw->num_sqsets--;
+ if (hw->num_sqsets <= cnt)
+ break;
+ }
+ }
+ }
+
+ csio_dbg(hw, "Reduced sqsets to %d\n", hw->num_sqsets);
+}
+
+static int
+csio_enable_msix(struct csio_hw *hw)
+{
+ int i, j, k, n, min, cnt;
+ struct csio_msix_entries *entryp;
+ struct msix_entry *entries;
+ int extra = CSIO_EXTRA_VECS;
+ struct csio_scsi_cpu_info *info;
+
+ min = hw->num_pports + extra;
+ cnt = hw->num_sqsets + extra;
+
+ /* Max vectors required based on #niqs configured in fw */
+ if (hw->flags & CSIO_HWF_USING_SOFT_PARAMS || !csio_is_hw_master(hw))
+ cnt = min_t(uint8_t, hw->cfg_niq, cnt);
+
+ entries = kzalloc(sizeof(struct msix_entry) * cnt, GFP_KERNEL);
+ if (!entries)
+ return -ENOMEM;
+
+ for (i = 0; i < cnt; i++)
+ entries[i].entry = (uint16_t)i;
+
+ csio_dbg(hw, "FW supp #niq:%d, trying %d msix's\n", hw->cfg_niq, cnt);
+
+ cnt = pci_enable_msix_range(hw->pdev, entries, min, cnt);
+ if (cnt < 0) {
+ kfree(entries);
+ return cnt;
+ }
+
+ if (cnt < (hw->num_sqsets + extra)) {
+ csio_dbg(hw, "Reducing sqsets to %d\n", cnt - extra);
+ csio_reduce_sqsets(hw, cnt - extra);
+ }
+
+ /* Save off vectors */
+ for (i = 0; i < cnt; i++) {
+ entryp = &hw->msix_entries[i];
+ entryp->vector = entries[i].vector;
+ }
+
+ /* Distribute vectors */
+ k = 0;
+ csio_set_nondata_intr_idx(hw, entries[k].entry);
+ csio_set_mb_intr_idx(csio_hw_to_mbm(hw), entries[k++].entry);
+ csio_set_fwevt_intr_idx(hw, entries[k++].entry);
+
+ for (i = 0; i < hw->num_pports; i++) {
+ info = &hw->scsi_cpu_info[i];
+
+ for (j = 0; j < hw->num_scsi_msix_cpus; j++) {
+ n = (j % info->max_cpus) + k;
+ hw->sqset[i][j].intr_idx = entries[n].entry;
+ }
+
+ k += info->max_cpus;
+ }
+
+ kfree(entries);
+ return 0;
+}
+
+void
+csio_intr_enable(struct csio_hw *hw)
+{
+ hw->intr_mode = CSIO_IM_NONE;
+ hw->flags &= ~CSIO_HWF_HOST_INTR_ENABLED;
+
+ /* Try MSIX, then MSI or fall back to INTx */
+ if ((csio_msi == 2) && !csio_enable_msix(hw))
+ hw->intr_mode = CSIO_IM_MSIX;
+ else {
+ /* Max iqs required based on #niqs configured in fw */
+ if (hw->flags & CSIO_HWF_USING_SOFT_PARAMS ||
+ !csio_is_hw_master(hw)) {
+ int extra = CSIO_EXTRA_MSI_IQS;
+
+ if (hw->cfg_niq < (hw->num_sqsets + extra)) {
+ csio_dbg(hw, "Reducing sqsets to %d\n",
+ hw->cfg_niq - extra);
+ csio_reduce_sqsets(hw, hw->cfg_niq - extra);
+ }
+ }
+
+ if ((csio_msi == 1) && !pci_enable_msi(hw->pdev))
+ hw->intr_mode = CSIO_IM_MSI;
+ else
+ hw->intr_mode = CSIO_IM_INTX;
+ }
+
+ csio_dbg(hw, "Using %s interrupt mode.\n",
+ (hw->intr_mode == CSIO_IM_MSIX) ? "MSIX" :
+ ((hw->intr_mode == CSIO_IM_MSI) ? "MSI" : "INTx"));
+}
+
+void
+csio_intr_disable(struct csio_hw *hw, bool free)
+{
+ csio_hw_intr_disable(hw);
+
+ switch (hw->intr_mode) {
+ case CSIO_IM_MSIX:
+ csio_disable_msix(hw, free);
+ break;
+ case CSIO_IM_MSI:
+ if (free)
+ free_irq(hw->pdev->irq, hw);
+ pci_disable_msi(hw->pdev);
+ break;
+ case CSIO_IM_INTX:
+ if (free)
+ free_irq(hw->pdev->irq, hw);
+ break;
+ default:
+ break;
+ }
+ hw->intr_mode = CSIO_IM_NONE;
+ hw->flags &= ~CSIO_HWF_HOST_INTR_ENABLED;
+}
diff --git a/drivers/scsi/csiostor/csio_lnode.c b/drivers/scsi/csiostor/csio_lnode.c
new file mode 100644
index 000000000..c00b2ff72
--- /dev/null
+++ b/drivers/scsi/csiostor/csio_lnode.c
@@ -0,0 +1,2135 @@
+/*
+ * This file is part of the Chelsio FCoE driver for Linux.
+ *
+ * Copyright (c) 2008-2012 Chelsio Communications, Inc. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include <linux/kernel.h>
+#include <linux/delay.h>
+#include <linux/slab.h>
+#include <linux/utsname.h>
+#include <scsi/scsi_device.h>
+#include <scsi/scsi_transport_fc.h>
+#include <asm/unaligned.h>
+#include <scsi/fc/fc_els.h>
+#include <scsi/fc/fc_fs.h>
+#include <scsi/fc/fc_gs.h>
+#include <scsi/fc/fc_ms.h>
+
+#include "csio_hw.h"
+#include "csio_mb.h"
+#include "csio_lnode.h"
+#include "csio_rnode.h"
+
+int csio_fcoe_rnodes = 1024;
+int csio_fdmi_enable = 1;
+
+#define PORT_ID_PTR(_x) ((uint8_t *)(&_x) + 1)
+
+/* Lnode SM declarations */
+static void csio_lns_uninit(struct csio_lnode *, enum csio_ln_ev);
+static void csio_lns_online(struct csio_lnode *, enum csio_ln_ev);
+static void csio_lns_ready(struct csio_lnode *, enum csio_ln_ev);
+static void csio_lns_offline(struct csio_lnode *, enum csio_ln_ev);
+
+static int csio_ln_mgmt_submit_req(struct csio_ioreq *,
+ void (*io_cbfn) (struct csio_hw *, struct csio_ioreq *),
+ enum fcoe_cmn_type, struct csio_dma_buf *, uint32_t);
+
+/* LN event mapping */
+static enum csio_ln_ev fwevt_to_lnevt[] = {
+ CSIO_LNE_NONE, /* None */
+ CSIO_LNE_NONE, /* PLOGI_ACC_RCVD */
+ CSIO_LNE_NONE, /* PLOGI_RJT_RCVD */
+ CSIO_LNE_NONE, /* PLOGI_RCVD */
+ CSIO_LNE_NONE, /* PLOGO_RCVD */
+ CSIO_LNE_NONE, /* PRLI_ACC_RCVD */
+ CSIO_LNE_NONE, /* PRLI_RJT_RCVD */
+ CSIO_LNE_NONE, /* PRLI_RCVD */
+ CSIO_LNE_NONE, /* PRLO_RCVD */
+ CSIO_LNE_NONE, /* NPORT_ID_CHGD */
+ CSIO_LNE_LOGO, /* FLOGO_RCVD */
+ CSIO_LNE_LOGO, /* CLR_VIRT_LNK_RCVD */
+ CSIO_LNE_FAB_INIT_DONE,/* FLOGI_ACC_RCVD */
+ CSIO_LNE_NONE, /* FLOGI_RJT_RCVD */
+ CSIO_LNE_FAB_INIT_DONE,/* FDISC_ACC_RCVD */
+ CSIO_LNE_NONE, /* FDISC_RJT_RCVD */
+ CSIO_LNE_NONE, /* FLOGI_TMO_MAX_RETRY */
+ CSIO_LNE_NONE, /* IMPL_LOGO_ADISC_ACC */
+ CSIO_LNE_NONE, /* IMPL_LOGO_ADISC_RJT */
+ CSIO_LNE_NONE, /* IMPL_LOGO_ADISC_CNFLT */
+ CSIO_LNE_NONE, /* PRLI_TMO */
+ CSIO_LNE_NONE, /* ADISC_TMO */
+ CSIO_LNE_NONE, /* RSCN_DEV_LOST */
+ CSIO_LNE_NONE, /* SCR_ACC_RCVD */
+ CSIO_LNE_NONE, /* ADISC_RJT_RCVD */
+ CSIO_LNE_NONE, /* LOGO_SNT */
+ CSIO_LNE_NONE, /* PROTO_ERR_IMPL_LOGO */
+};
+
+#define CSIO_FWE_TO_LNE(_evt) ((_evt > PROTO_ERR_IMPL_LOGO) ? \
+ CSIO_LNE_NONE : \
+ fwevt_to_lnevt[_evt])
+
+#define csio_ct_rsp(cp) (((struct fc_ct_hdr *)cp)->ct_cmd)
+#define csio_ct_reason(cp) (((struct fc_ct_hdr *)cp)->ct_reason)
+#define csio_ct_expl(cp) (((struct fc_ct_hdr *)cp)->ct_explan)
+#define csio_ct_get_pld(cp) ((void *)(((uint8_t *)cp) + FC_CT_HDR_LEN))
+
+/*
+ * csio_ln_match_by_portid - lookup lnode using given portid.
+ * @hw: HW module
+ * @portid: port-id.
+ *
+ * If found, returns lnode matching given portid otherwise returns NULL.
+ */
+static struct csio_lnode *
+csio_ln_lookup_by_portid(struct csio_hw *hw, uint8_t portid)
+{
+ struct csio_lnode *ln = hw->rln;
+ struct list_head *tmp;
+
+ /* Match siblings lnode with portid */
+ list_for_each(tmp, &hw->sln_head) {
+ ln = (struct csio_lnode *) tmp;
+ if (ln->portid == portid)
+ return ln;
+ }
+
+ return NULL;
+}
+
+/*
+ * csio_ln_lookup_by_vnpi - Lookup lnode using given vnp id.
+ * @hw - HW module
+ * @vnpi - vnp index.
+ * Returns - If found, returns lnode matching given vnp id
+ * otherwise returns NULL.
+ */
+static struct csio_lnode *
+csio_ln_lookup_by_vnpi(struct csio_hw *hw, uint32_t vnp_id)
+{
+ struct list_head *tmp1, *tmp2;
+ struct csio_lnode *sln = NULL, *cln = NULL;
+
+ if (list_empty(&hw->sln_head)) {
+ CSIO_INC_STATS(hw, n_lnlkup_miss);
+ return NULL;
+ }
+ /* Traverse sibling lnodes */
+ list_for_each(tmp1, &hw->sln_head) {
+ sln = (struct csio_lnode *) tmp1;
+
+ /* Match sibling lnode */
+ if (sln->vnp_flowid == vnp_id)
+ return sln;
+
+ if (list_empty(&sln->cln_head))
+ continue;
+
+ /* Traverse children lnodes */
+ list_for_each(tmp2, &sln->cln_head) {
+ cln = (struct csio_lnode *) tmp2;
+
+ if (cln->vnp_flowid == vnp_id)
+ return cln;
+ }
+ }
+ CSIO_INC_STATS(hw, n_lnlkup_miss);
+ return NULL;
+}
+
+/**
+ * csio_lnode_lookup_by_wwpn - Lookup lnode using given wwpn.
+ * @hw: HW module.
+ * @wwpn: WWPN.
+ *
+ * If found, returns lnode matching given wwpn, returns NULL otherwise.
+ */
+struct csio_lnode *
+csio_lnode_lookup_by_wwpn(struct csio_hw *hw, uint8_t *wwpn)
+{
+ struct list_head *tmp1, *tmp2;
+ struct csio_lnode *sln = NULL, *cln = NULL;
+
+ if (list_empty(&hw->sln_head)) {
+ CSIO_INC_STATS(hw, n_lnlkup_miss);
+ return NULL;
+ }
+ /* Traverse sibling lnodes */
+ list_for_each(tmp1, &hw->sln_head) {
+ sln = (struct csio_lnode *) tmp1;
+
+ /* Match sibling lnode */
+ if (!memcmp(csio_ln_wwpn(sln), wwpn, 8))
+ return sln;
+
+ if (list_empty(&sln->cln_head))
+ continue;
+
+ /* Traverse children lnodes */
+ list_for_each(tmp2, &sln->cln_head) {
+ cln = (struct csio_lnode *) tmp2;
+
+ if (!memcmp(csio_ln_wwpn(cln), wwpn, 8))
+ return cln;
+ }
+ }
+ return NULL;
+}
+
+/* FDMI */
+static void
+csio_fill_ct_iu(void *buf, uint8_t type, uint8_t sub_type, uint16_t op)
+{
+ struct fc_ct_hdr *cmd = (struct fc_ct_hdr *)buf;
+ cmd->ct_rev = FC_CT_REV;
+ cmd->ct_fs_type = type;
+ cmd->ct_fs_subtype = sub_type;
+ cmd->ct_cmd = htons(op);
+}
+
+static int
+csio_hostname(uint8_t *buf, size_t buf_len)
+{
+ if (snprintf(buf, buf_len, "%s", init_utsname()->nodename) > 0)
+ return 0;
+ return -1;
+}
+
+static int
+csio_osname(uint8_t *buf, size_t buf_len)
+{
+ if (snprintf(buf, buf_len, "%s %s %s",
+ init_utsname()->sysname,
+ init_utsname()->release,
+ init_utsname()->version) > 0)
+ return 0;
+
+ return -1;
+}
+
+static inline void
+csio_append_attrib(uint8_t **ptr, uint16_t type, uint8_t *val, uint16_t len)
+{
+ struct fc_fdmi_attr_entry *ae = (struct fc_fdmi_attr_entry *)*ptr;
+ ae->type = htons(type);
+ len += 4; /* includes attribute type and length */
+ len = (len + 3) & ~3; /* should be multiple of 4 bytes */
+ ae->len = htons(len);
+ memcpy(ae->value, val, len);
+ *ptr += len;
+}
+
+/*
+ * csio_ln_fdmi_done - FDMI registeration completion
+ * @hw: HW context
+ * @fdmi_req: fdmi request
+ */
+static void
+csio_ln_fdmi_done(struct csio_hw *hw, struct csio_ioreq *fdmi_req)
+{
+ void *cmd;
+ struct csio_lnode *ln = fdmi_req->lnode;
+
+ if (fdmi_req->wr_status != FW_SUCCESS) {
+ csio_ln_dbg(ln, "WR error:%x in processing fdmi rpa cmd\n",
+ fdmi_req->wr_status);
+ CSIO_INC_STATS(ln, n_fdmi_err);
+ }
+
+ cmd = fdmi_req->dma_buf.vaddr;
+ if (ntohs(csio_ct_rsp(cmd)) != FC_FS_ACC) {
+ csio_ln_dbg(ln, "fdmi rpa cmd rejected reason %x expl %x\n",
+ csio_ct_reason(cmd), csio_ct_expl(cmd));
+ }
+}
+
+/*
+ * csio_ln_fdmi_rhba_cbfn - RHBA completion
+ * @hw: HW context
+ * @fdmi_req: fdmi request
+ */
+static void
+csio_ln_fdmi_rhba_cbfn(struct csio_hw *hw, struct csio_ioreq *fdmi_req)
+{
+ void *cmd;
+ uint8_t *pld;
+ uint32_t len = 0;
+ __be32 val;
+ __be16 mfs;
+ uint32_t numattrs = 0;
+ struct csio_lnode *ln = fdmi_req->lnode;
+ struct fs_fdmi_attrs *attrib_blk;
+ struct fc_fdmi_port_name *port_name;
+ uint8_t buf[64];
+ uint8_t *fc4_type;
+
+ if (fdmi_req->wr_status != FW_SUCCESS) {
+ csio_ln_dbg(ln, "WR error:%x in processing fdmi rhba cmd\n",
+ fdmi_req->wr_status);
+ CSIO_INC_STATS(ln, n_fdmi_err);
+ }
+
+ cmd = fdmi_req->dma_buf.vaddr;
+ if (ntohs(csio_ct_rsp(cmd)) != FC_FS_ACC) {
+ csio_ln_dbg(ln, "fdmi rhba cmd rejected reason %x expl %x\n",
+ csio_ct_reason(cmd), csio_ct_expl(cmd));
+ }
+
+ if (!csio_is_rnode_ready(fdmi_req->rnode)) {
+ CSIO_INC_STATS(ln, n_fdmi_err);
+ return;
+ }
+
+ /* Prepare CT hdr for RPA cmd */
+ memset(cmd, 0, FC_CT_HDR_LEN);
+ csio_fill_ct_iu(cmd, FC_FST_MGMT, FC_FDMI_SUBTYPE, FC_FDMI_RPA);
+
+ /* Prepare RPA payload */
+ pld = (uint8_t *)csio_ct_get_pld(cmd);
+ port_name = (struct fc_fdmi_port_name *)pld;
+ memcpy(&port_name->portname, csio_ln_wwpn(ln), 8);
+ pld += sizeof(*port_name);
+
+ /* Start appending Port attributes */
+ attrib_blk = (struct fs_fdmi_attrs *)pld;
+ attrib_blk->numattrs = 0;
+ len += sizeof(attrib_blk->numattrs);
+ pld += sizeof(attrib_blk->numattrs);
+
+ fc4_type = &buf[0];
+ memset(fc4_type, 0, FC_FDMI_PORT_ATTR_FC4TYPES_LEN);
+ fc4_type[2] = 1;
+ fc4_type[7] = 1;
+ csio_append_attrib(&pld, FC_FDMI_PORT_ATTR_FC4TYPES,
+ fc4_type, FC_FDMI_PORT_ATTR_FC4TYPES_LEN);
+ numattrs++;
+ val = htonl(FC_PORTSPEED_1GBIT | FC_PORTSPEED_10GBIT);
+ csio_append_attrib(&pld, FC_FDMI_PORT_ATTR_SUPPORTEDSPEED,
+ (uint8_t *)&val,
+ FC_FDMI_PORT_ATTR_SUPPORTEDSPEED_LEN);
+ numattrs++;
+
+ if (hw->pport[ln->portid].link_speed == FW_PORT_CAP_SPEED_1G)
+ val = htonl(FC_PORTSPEED_1GBIT);
+ else if (hw->pport[ln->portid].link_speed == FW_PORT_CAP_SPEED_10G)
+ val = htonl(FC_PORTSPEED_10GBIT);
+ else
+ val = htonl(CSIO_HBA_PORTSPEED_UNKNOWN);
+ csio_append_attrib(&pld, FC_FDMI_PORT_ATTR_CURRENTPORTSPEED,
+ (uint8_t *)&val,
+ FC_FDMI_PORT_ATTR_CURRENTPORTSPEED_LEN);
+ numattrs++;
+
+ mfs = ln->ln_sparm.csp.sp_bb_data;
+ csio_append_attrib(&pld, FC_FDMI_PORT_ATTR_MAXFRAMESIZE,
+ (uint8_t *)&mfs, FC_FDMI_PORT_ATTR_MAXFRAMESIZE_LEN);
+ numattrs++;
+
+ strcpy(buf, "csiostor");
+ csio_append_attrib(&pld, FC_FDMI_PORT_ATTR_OSDEVICENAME, buf,
+ (uint16_t)strlen(buf));
+ numattrs++;
+
+ if (!csio_hostname(buf, sizeof(buf))) {
+ csio_append_attrib(&pld, FC_FDMI_PORT_ATTR_HOSTNAME,
+ buf, (uint16_t)strlen(buf));
+ numattrs++;
+ }
+ attrib_blk->numattrs = htonl(numattrs);
+ len = (uint32_t)(pld - (uint8_t *)cmd);
+
+ /* Submit FDMI RPA request */
+ spin_lock_irq(&hw->lock);
+ if (csio_ln_mgmt_submit_req(fdmi_req, csio_ln_fdmi_done,
+ FCOE_CT, &fdmi_req->dma_buf, len)) {
+ CSIO_INC_STATS(ln, n_fdmi_err);
+ csio_ln_dbg(ln, "Failed to issue fdmi rpa req\n");
+ }
+ spin_unlock_irq(&hw->lock);
+}
+
+/*
+ * csio_ln_fdmi_dprt_cbfn - DPRT completion
+ * @hw: HW context
+ * @fdmi_req: fdmi request
+ */
+static void
+csio_ln_fdmi_dprt_cbfn(struct csio_hw *hw, struct csio_ioreq *fdmi_req)
+{
+ void *cmd;
+ uint8_t *pld;
+ uint32_t len = 0;
+ uint32_t numattrs = 0;
+ __be32 maxpayload = htonl(65536);
+ struct fc_fdmi_hba_identifier *hbaid;
+ struct csio_lnode *ln = fdmi_req->lnode;
+ struct fc_fdmi_rpl *reg_pl;
+ struct fs_fdmi_attrs *attrib_blk;
+ uint8_t buf[64];
+
+ if (fdmi_req->wr_status != FW_SUCCESS) {
+ csio_ln_dbg(ln, "WR error:%x in processing fdmi dprt cmd\n",
+ fdmi_req->wr_status);
+ CSIO_INC_STATS(ln, n_fdmi_err);
+ }
+
+ if (!csio_is_rnode_ready(fdmi_req->rnode)) {
+ CSIO_INC_STATS(ln, n_fdmi_err);
+ return;
+ }
+ cmd = fdmi_req->dma_buf.vaddr;
+ if (ntohs(csio_ct_rsp(cmd)) != FC_FS_ACC) {
+ csio_ln_dbg(ln, "fdmi dprt cmd rejected reason %x expl %x\n",
+ csio_ct_reason(cmd), csio_ct_expl(cmd));
+ }
+
+ /* Prepare CT hdr for RHBA cmd */
+ memset(cmd, 0, FC_CT_HDR_LEN);
+ csio_fill_ct_iu(cmd, FC_FST_MGMT, FC_FDMI_SUBTYPE, FC_FDMI_RHBA);
+ len = FC_CT_HDR_LEN;
+
+ /* Prepare RHBA payload */
+ pld = (uint8_t *)csio_ct_get_pld(cmd);
+ hbaid = (struct fc_fdmi_hba_identifier *)pld;
+ memcpy(&hbaid->id, csio_ln_wwpn(ln), 8); /* HBA identifer */
+ pld += sizeof(*hbaid);
+
+ /* Register one port per hba */
+ reg_pl = (struct fc_fdmi_rpl *)pld;
+ reg_pl->numport = htonl(1);
+ memcpy(&reg_pl->port[0].portname, csio_ln_wwpn(ln), 8);
+ pld += sizeof(*reg_pl);
+
+ /* Start appending HBA attributes hba */
+ attrib_blk = (struct fs_fdmi_attrs *)pld;
+ attrib_blk->numattrs = 0;
+ len += sizeof(attrib_blk->numattrs);
+ pld += sizeof(attrib_blk->numattrs);
+
+ csio_append_attrib(&pld, FC_FDMI_HBA_ATTR_NODENAME, csio_ln_wwnn(ln),
+ FC_FDMI_HBA_ATTR_NODENAME_LEN);
+ numattrs++;
+
+ memset(buf, 0, sizeof(buf));
+
+ strcpy(buf, "Chelsio Communications");
+ csio_append_attrib(&pld, FC_FDMI_HBA_ATTR_MANUFACTURER, buf,
+ (uint16_t)strlen(buf));
+ numattrs++;
+ csio_append_attrib(&pld, FC_FDMI_HBA_ATTR_SERIALNUMBER,
+ hw->vpd.sn, (uint16_t)sizeof(hw->vpd.sn));
+ numattrs++;
+ csio_append_attrib(&pld, FC_FDMI_HBA_ATTR_MODEL, hw->vpd.id,
+ (uint16_t)sizeof(hw->vpd.id));
+ numattrs++;
+ csio_append_attrib(&pld, FC_FDMI_HBA_ATTR_MODELDESCRIPTION,
+ hw->model_desc, (uint16_t)strlen(hw->model_desc));
+ numattrs++;
+ csio_append_attrib(&pld, FC_FDMI_HBA_ATTR_HARDWAREVERSION,
+ hw->hw_ver, (uint16_t)sizeof(hw->hw_ver));
+ numattrs++;
+ csio_append_attrib(&pld, FC_FDMI_HBA_ATTR_FIRMWAREVERSION,
+ hw->fwrev_str, (uint16_t)strlen(hw->fwrev_str));
+ numattrs++;
+
+ if (!csio_osname(buf, sizeof(buf))) {
+ csio_append_attrib(&pld, FC_FDMI_HBA_ATTR_OSNAMEVERSION,
+ buf, (uint16_t)strlen(buf));
+ numattrs++;
+ }
+
+ csio_append_attrib(&pld, FC_FDMI_HBA_ATTR_MAXCTPAYLOAD,
+ (uint8_t *)&maxpayload,
+ FC_FDMI_HBA_ATTR_MAXCTPAYLOAD_LEN);
+ len = (uint32_t)(pld - (uint8_t *)cmd);
+ numattrs++;
+ attrib_blk->numattrs = htonl(numattrs);
+
+ /* Submit FDMI RHBA request */
+ spin_lock_irq(&hw->lock);
+ if (csio_ln_mgmt_submit_req(fdmi_req, csio_ln_fdmi_rhba_cbfn,
+ FCOE_CT, &fdmi_req->dma_buf, len)) {
+ CSIO_INC_STATS(ln, n_fdmi_err);
+ csio_ln_dbg(ln, "Failed to issue fdmi rhba req\n");
+ }
+ spin_unlock_irq(&hw->lock);
+}
+
+/*
+ * csio_ln_fdmi_dhba_cbfn - DHBA completion
+ * @hw: HW context
+ * @fdmi_req: fdmi request
+ */
+static void
+csio_ln_fdmi_dhba_cbfn(struct csio_hw *hw, struct csio_ioreq *fdmi_req)
+{
+ struct csio_lnode *ln = fdmi_req->lnode;
+ void *cmd;
+ struct fc_fdmi_port_name *port_name;
+ uint32_t len;
+
+ if (fdmi_req->wr_status != FW_SUCCESS) {
+ csio_ln_dbg(ln, "WR error:%x in processing fdmi dhba cmd\n",
+ fdmi_req->wr_status);
+ CSIO_INC_STATS(ln, n_fdmi_err);
+ }
+
+ if (!csio_is_rnode_ready(fdmi_req->rnode)) {
+ CSIO_INC_STATS(ln, n_fdmi_err);
+ return;
+ }
+ cmd = fdmi_req->dma_buf.vaddr;
+ if (ntohs(csio_ct_rsp(cmd)) != FC_FS_ACC) {
+ csio_ln_dbg(ln, "fdmi dhba cmd rejected reason %x expl %x\n",
+ csio_ct_reason(cmd), csio_ct_expl(cmd));
+ }
+
+ /* Send FDMI cmd to de-register any Port attributes if registered
+ * before
+ */
+
+ /* Prepare FDMI DPRT cmd */
+ memset(cmd, 0, FC_CT_HDR_LEN);
+ csio_fill_ct_iu(cmd, FC_FST_MGMT, FC_FDMI_SUBTYPE, FC_FDMI_DPRT);
+ len = FC_CT_HDR_LEN;
+ port_name = (struct fc_fdmi_port_name *)csio_ct_get_pld(cmd);
+ memcpy(&port_name->portname, csio_ln_wwpn(ln), 8);
+ len += sizeof(*port_name);
+
+ /* Submit FDMI request */
+ spin_lock_irq(&hw->lock);
+ if (csio_ln_mgmt_submit_req(fdmi_req, csio_ln_fdmi_dprt_cbfn,
+ FCOE_CT, &fdmi_req->dma_buf, len)) {
+ CSIO_INC_STATS(ln, n_fdmi_err);
+ csio_ln_dbg(ln, "Failed to issue fdmi dprt req\n");
+ }
+ spin_unlock_irq(&hw->lock);
+}
+
+/**
+ * csio_ln_fdmi_start - Start an FDMI request.
+ * @ln: lnode
+ * @context: session context
+ *
+ * Issued with lock held.
+ */
+int
+csio_ln_fdmi_start(struct csio_lnode *ln, void *context)
+{
+ struct csio_ioreq *fdmi_req;
+ struct csio_rnode *fdmi_rn = (struct csio_rnode *)context;
+ void *cmd;
+ struct fc_fdmi_hba_identifier *hbaid;
+ uint32_t len;
+
+ if (!(ln->flags & CSIO_LNF_FDMI_ENABLE))
+ return -EPROTONOSUPPORT;
+
+ if (!csio_is_rnode_ready(fdmi_rn))
+ CSIO_INC_STATS(ln, n_fdmi_err);
+
+ /* Send FDMI cmd to de-register any HBA attributes if registered
+ * before
+ */
+
+ fdmi_req = ln->mgmt_req;
+ fdmi_req->lnode = ln;
+ fdmi_req->rnode = fdmi_rn;
+
+ /* Prepare FDMI DHBA cmd */
+ cmd = fdmi_req->dma_buf.vaddr;
+ memset(cmd, 0, FC_CT_HDR_LEN);
+ csio_fill_ct_iu(cmd, FC_FST_MGMT, FC_FDMI_SUBTYPE, FC_FDMI_DHBA);
+ len = FC_CT_HDR_LEN;
+
+ hbaid = (struct fc_fdmi_hba_identifier *)csio_ct_get_pld(cmd);
+ memcpy(&hbaid->id, csio_ln_wwpn(ln), 8);
+ len += sizeof(*hbaid);
+
+ /* Submit FDMI request */
+ if (csio_ln_mgmt_submit_req(fdmi_req, csio_ln_fdmi_dhba_cbfn,
+ FCOE_CT, &fdmi_req->dma_buf, len)) {
+ CSIO_INC_STATS(ln, n_fdmi_err);
+ csio_ln_dbg(ln, "Failed to issue fdmi dhba req\n");
+ }
+
+ return 0;
+}
+
+/*
+ * csio_ln_vnp_read_cbfn - vnp read completion handler.
+ * @hw: HW lnode
+ * @cbfn: Completion handler.
+ *
+ * Reads vnp response and updates ln parameters.
+ */
+static void
+csio_ln_vnp_read_cbfn(struct csio_hw *hw, struct csio_mb *mbp)
+{
+ struct csio_lnode *ln = ((struct csio_lnode *)mbp->priv);
+ struct fw_fcoe_vnp_cmd *rsp = (struct fw_fcoe_vnp_cmd *)(mbp->mb);
+ struct fc_els_csp *csp;
+ struct fc_els_cssp *clsp;
+ enum fw_retval retval;
+ __be32 nport_id;
+
+ retval = FW_CMD_RETVAL_G(ntohl(rsp->alloc_to_len16));
+ if (retval != FW_SUCCESS) {
+ csio_err(hw, "FCOE VNP read cmd returned error:0x%x\n", retval);
+ mempool_free(mbp, hw->mb_mempool);
+ return;
+ }
+
+ spin_lock_irq(&hw->lock);
+
+ memcpy(ln->mac, rsp->vnport_mac, sizeof(ln->mac));
+ memcpy(&nport_id, &rsp->vnport_mac[3], sizeof(uint8_t)*3);
+ ln->nport_id = ntohl(nport_id);
+ ln->nport_id = ln->nport_id >> 8;
+
+ /* Update WWNs */
+ /*
+ * This may look like a duplication of what csio_fcoe_enable_link()
+ * does, but is absolutely necessary if the vnpi changes between
+ * a FCOE LINK UP and FCOE LINK DOWN.
+ */
+ memcpy(csio_ln_wwnn(ln), rsp->vnport_wwnn, 8);
+ memcpy(csio_ln_wwpn(ln), rsp->vnport_wwpn, 8);
+
+ /* Copy common sparam */
+ csp = (struct fc_els_csp *)rsp->cmn_srv_parms;
+ ln->ln_sparm.csp.sp_hi_ver = csp->sp_hi_ver;
+ ln->ln_sparm.csp.sp_lo_ver = csp->sp_lo_ver;
+ ln->ln_sparm.csp.sp_bb_cred = csp->sp_bb_cred;
+ ln->ln_sparm.csp.sp_features = csp->sp_features;
+ ln->ln_sparm.csp.sp_bb_data = csp->sp_bb_data;
+ ln->ln_sparm.csp.sp_r_a_tov = csp->sp_r_a_tov;
+ ln->ln_sparm.csp.sp_e_d_tov = csp->sp_e_d_tov;
+
+ /* Copy word 0 & word 1 of class sparam */
+ clsp = (struct fc_els_cssp *)rsp->clsp_word_0_1;
+ ln->ln_sparm.clsp[2].cp_class = clsp->cp_class;
+ ln->ln_sparm.clsp[2].cp_init = clsp->cp_init;
+ ln->ln_sparm.clsp[2].cp_recip = clsp->cp_recip;
+ ln->ln_sparm.clsp[2].cp_rdfs = clsp->cp_rdfs;
+
+ spin_unlock_irq(&hw->lock);
+
+ mempool_free(mbp, hw->mb_mempool);
+
+ /* Send an event to update local attribs */
+ csio_lnode_async_event(ln, CSIO_LN_FC_ATTRIB_UPDATE);
+}
+
+/*
+ * csio_ln_vnp_read - Read vnp params.
+ * @ln: lnode
+ * @cbfn: Completion handler.
+ *
+ * Issued with lock held.
+ */
+static int
+csio_ln_vnp_read(struct csio_lnode *ln,
+ void (*cbfn) (struct csio_hw *, struct csio_mb *))
+{
+ struct csio_hw *hw = ln->hwp;
+ struct csio_mb *mbp;
+
+ /* Allocate Mbox request */
+ mbp = mempool_alloc(hw->mb_mempool, GFP_ATOMIC);
+ if (!mbp) {
+ CSIO_INC_STATS(hw, n_err_nomem);
+ return -ENOMEM;
+ }
+
+ /* Prepare VNP Command */
+ csio_fcoe_vnp_read_init_mb(ln, mbp,
+ CSIO_MB_DEFAULT_TMO,
+ ln->fcf_flowid,
+ ln->vnp_flowid,
+ cbfn);
+
+ /* Issue MBOX cmd */
+ if (csio_mb_issue(hw, mbp)) {
+ csio_err(hw, "Failed to issue mbox FCoE VNP command\n");
+ mempool_free(mbp, hw->mb_mempool);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+/*
+ * csio_fcoe_enable_link - Enable fcoe link.
+ * @ln: lnode
+ * @enable: enable/disable
+ * Issued with lock held.
+ * Issues mbox cmd to bring up FCOE link on port associated with given ln.
+ */
+static int
+csio_fcoe_enable_link(struct csio_lnode *ln, bool enable)
+{
+ struct csio_hw *hw = ln->hwp;
+ struct csio_mb *mbp;
+ enum fw_retval retval;
+ uint8_t portid;
+ uint8_t sub_op;
+ struct fw_fcoe_link_cmd *lcmd;
+ int i;
+
+ mbp = mempool_alloc(hw->mb_mempool, GFP_ATOMIC);
+ if (!mbp) {
+ CSIO_INC_STATS(hw, n_err_nomem);
+ return -ENOMEM;
+ }
+
+ portid = ln->portid;
+ sub_op = enable ? FCOE_LINK_UP : FCOE_LINK_DOWN;
+
+ csio_dbg(hw, "bringing FCOE LINK %s on Port:%d\n",
+ sub_op ? "UP" : "DOWN", portid);
+
+ csio_write_fcoe_link_cond_init_mb(ln, mbp, CSIO_MB_DEFAULT_TMO,
+ portid, sub_op, 0, 0, 0, NULL);
+
+ if (csio_mb_issue(hw, mbp)) {
+ csio_err(hw, "failed to issue FCOE LINK cmd on port[%d]\n",
+ portid);
+ mempool_free(mbp, hw->mb_mempool);
+ return -EINVAL;
+ }
+
+ retval = csio_mb_fw_retval(mbp);
+ if (retval != FW_SUCCESS) {
+ csio_err(hw,
+ "FCOE LINK %s cmd on port[%d] failed with "
+ "ret:x%x\n", sub_op ? "UP" : "DOWN", portid, retval);
+ mempool_free(mbp, hw->mb_mempool);
+ return -EINVAL;
+ }
+
+ if (!enable)
+ goto out;
+
+ lcmd = (struct fw_fcoe_link_cmd *)mbp->mb;
+
+ memcpy(csio_ln_wwnn(ln), lcmd->vnport_wwnn, 8);
+ memcpy(csio_ln_wwpn(ln), lcmd->vnport_wwpn, 8);
+
+ for (i = 0; i < CSIO_MAX_PPORTS; i++)
+ if (hw->pport[i].portid == portid)
+ memcpy(hw->pport[i].mac, lcmd->phy_mac, 6);
+
+out:
+ mempool_free(mbp, hw->mb_mempool);
+ return 0;
+}
+
+/*
+ * csio_ln_read_fcf_cbfn - Read fcf parameters
+ * @ln: lnode
+ *
+ * read fcf response and Update ln fcf information.
+ */
+static void
+csio_ln_read_fcf_cbfn(struct csio_hw *hw, struct csio_mb *mbp)
+{
+ struct csio_lnode *ln = (struct csio_lnode *)mbp->priv;
+ struct csio_fcf_info *fcf_info;
+ struct fw_fcoe_fcf_cmd *rsp =
+ (struct fw_fcoe_fcf_cmd *)(mbp->mb);
+ enum fw_retval retval;
+
+ retval = FW_CMD_RETVAL_G(ntohl(rsp->retval_len16));
+ if (retval != FW_SUCCESS) {
+ csio_ln_err(ln, "FCOE FCF cmd failed with ret x%x\n",
+ retval);
+ mempool_free(mbp, hw->mb_mempool);
+ return;
+ }
+
+ spin_lock_irq(&hw->lock);
+ fcf_info = ln->fcfinfo;
+ fcf_info->priority = FW_FCOE_FCF_CMD_PRIORITY_GET(
+ ntohs(rsp->priority_pkd));
+ fcf_info->vf_id = ntohs(rsp->vf_id);
+ fcf_info->vlan_id = rsp->vlan_id;
+ fcf_info->max_fcoe_size = ntohs(rsp->max_fcoe_size);
+ fcf_info->fka_adv = be32_to_cpu(rsp->fka_adv);
+ fcf_info->fcfi = FW_FCOE_FCF_CMD_FCFI_GET(ntohl(rsp->op_to_fcfi));
+ fcf_info->fpma = FW_FCOE_FCF_CMD_FPMA_GET(rsp->fpma_to_portid);
+ fcf_info->spma = FW_FCOE_FCF_CMD_SPMA_GET(rsp->fpma_to_portid);
+ fcf_info->login = FW_FCOE_FCF_CMD_LOGIN_GET(rsp->fpma_to_portid);
+ fcf_info->portid = FW_FCOE_FCF_CMD_PORTID_GET(rsp->fpma_to_portid);
+ memcpy(fcf_info->fc_map, rsp->fc_map, sizeof(fcf_info->fc_map));
+ memcpy(fcf_info->mac, rsp->mac, sizeof(fcf_info->mac));
+ memcpy(fcf_info->name_id, rsp->name_id, sizeof(fcf_info->name_id));
+ memcpy(fcf_info->fabric, rsp->fabric, sizeof(fcf_info->fabric));
+ memcpy(fcf_info->spma_mac, rsp->spma_mac, sizeof(fcf_info->spma_mac));
+
+ spin_unlock_irq(&hw->lock);
+
+ mempool_free(mbp, hw->mb_mempool);
+}
+
+/*
+ * csio_ln_read_fcf_entry - Read fcf entry.
+ * @ln: lnode
+ * @cbfn: Completion handler.
+ *
+ * Issued with lock held.
+ */
+static int
+csio_ln_read_fcf_entry(struct csio_lnode *ln,
+ void (*cbfn) (struct csio_hw *, struct csio_mb *))
+{
+ struct csio_hw *hw = ln->hwp;
+ struct csio_mb *mbp;
+
+ mbp = mempool_alloc(hw->mb_mempool, GFP_ATOMIC);
+ if (!mbp) {
+ CSIO_INC_STATS(hw, n_err_nomem);
+ return -ENOMEM;
+ }
+
+ /* Get FCoE FCF information */
+ csio_fcoe_read_fcf_init_mb(ln, mbp, CSIO_MB_DEFAULT_TMO,
+ ln->portid, ln->fcf_flowid, cbfn);
+
+ if (csio_mb_issue(hw, mbp)) {
+ csio_err(hw, "failed to issue FCOE FCF cmd\n");
+ mempool_free(mbp, hw->mb_mempool);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+/*
+ * csio_handle_link_up - Logical Linkup event.
+ * @hw - HW module.
+ * @portid - Physical port number
+ * @fcfi - FCF index.
+ * @vnpi - VNP index.
+ * Returns - none.
+ *
+ * This event is received from FW, when virtual link is established between
+ * Physical port[ENode] and FCF. If its new vnpi, then local node object is
+ * created on this FCF and set to [ONLINE] state.
+ * Lnode waits for FW_RDEV_CMD event to be received indicating that
+ * Fabric login is completed and lnode moves to [READY] state.
+ *
+ * This called with hw lock held
+ */
+static void
+csio_handle_link_up(struct csio_hw *hw, uint8_t portid, uint32_t fcfi,
+ uint32_t vnpi)
+{
+ struct csio_lnode *ln = NULL;
+
+ /* Lookup lnode based on vnpi */
+ ln = csio_ln_lookup_by_vnpi(hw, vnpi);
+ if (!ln) {
+ /* Pick lnode based on portid */
+ ln = csio_ln_lookup_by_portid(hw, portid);
+ if (!ln) {
+ csio_err(hw, "failed to lookup fcoe lnode on port:%d\n",
+ portid);
+ CSIO_DB_ASSERT(0);
+ return;
+ }
+
+ /* Check if lnode has valid vnp flowid */
+ if (ln->vnp_flowid != CSIO_INVALID_IDX) {
+ /* New VN-Port */
+ spin_unlock_irq(&hw->lock);
+ csio_lnode_alloc(hw);
+ spin_lock_irq(&hw->lock);
+ if (!ln) {
+ csio_err(hw,
+ "failed to allocate fcoe lnode"
+ "for port:%d vnpi:x%x\n",
+ portid, vnpi);
+ CSIO_DB_ASSERT(0);
+ return;
+ }
+ ln->portid = portid;
+ }
+ ln->vnp_flowid = vnpi;
+ ln->dev_num &= ~0xFFFF;
+ ln->dev_num |= vnpi;
+ }
+
+ /*Initialize fcfi */
+ ln->fcf_flowid = fcfi;
+
+ csio_info(hw, "Port:%d - FCOE LINK UP\n", portid);
+
+ CSIO_INC_STATS(ln, n_link_up);
+
+ /* Send LINKUP event to SM */
+ csio_post_event(&ln->sm, CSIO_LNE_LINKUP);
+}
+
+/*
+ * csio_post_event_rns
+ * @ln - FCOE lnode
+ * @evt - Given rnode event
+ * Returns - none
+ *
+ * Posts given rnode event to all FCOE rnodes connected with given Lnode.
+ * This routine is invoked when lnode receives LINK_DOWN/DOWN_LINK/CLOSE
+ * event.
+ *
+ * This called with hw lock held
+ */
+static void
+csio_post_event_rns(struct csio_lnode *ln, enum csio_rn_ev evt)
+{
+ struct csio_rnode *rnhead = (struct csio_rnode *) &ln->rnhead;
+ struct list_head *tmp, *next;
+ struct csio_rnode *rn;
+
+ list_for_each_safe(tmp, next, &rnhead->sm.sm_list) {
+ rn = (struct csio_rnode *) tmp;
+ csio_post_event(&rn->sm, evt);
+ }
+}
+
+/*
+ * csio_cleanup_rns
+ * @ln - FCOE lnode
+ * Returns - none
+ *
+ * Frees all FCOE rnodes connected with given Lnode.
+ *
+ * This called with hw lock held
+ */
+static void
+csio_cleanup_rns(struct csio_lnode *ln)
+{
+ struct csio_rnode *rnhead = (struct csio_rnode *) &ln->rnhead;
+ struct list_head *tmp, *next_rn;
+ struct csio_rnode *rn;
+
+ list_for_each_safe(tmp, next_rn, &rnhead->sm.sm_list) {
+ rn = (struct csio_rnode *) tmp;
+ csio_put_rnode(ln, rn);
+ }
+
+}
+
+/*
+ * csio_post_event_lns
+ * @ln - FCOE lnode
+ * @evt - Given lnode event
+ * Returns - none
+ *
+ * Posts given lnode event to all FCOE lnodes connected with given Lnode.
+ * This routine is invoked when lnode receives LINK_DOWN/DOWN_LINK/CLOSE
+ * event.
+ *
+ * This called with hw lock held
+ */
+static void
+csio_post_event_lns(struct csio_lnode *ln, enum csio_ln_ev evt)
+{
+ struct list_head *tmp;
+ struct csio_lnode *cln, *sln;
+
+ /* If NPIV lnode, send evt only to that and return */
+ if (csio_is_npiv_ln(ln)) {
+ csio_post_event(&ln->sm, evt);
+ return;
+ }
+
+ sln = ln;
+ /* Traverse children lnodes list and send evt */
+ list_for_each(tmp, &sln->cln_head) {
+ cln = (struct csio_lnode *) tmp;
+ csio_post_event(&cln->sm, evt);
+ }
+
+ /* Send evt to parent lnode */
+ csio_post_event(&ln->sm, evt);
+}
+
+/*
+ * csio_ln_down - Lcoal nport is down
+ * @ln - FCOE Lnode
+ * Returns - none
+ *
+ * Sends LINK_DOWN events to Lnode and its associated NPIVs lnodes.
+ *
+ * This called with hw lock held
+ */
+static void
+csio_ln_down(struct csio_lnode *ln)
+{
+ csio_post_event_lns(ln, CSIO_LNE_LINK_DOWN);
+}
+
+/*
+ * csio_handle_link_down - Logical Linkdown event.
+ * @hw - HW module.
+ * @portid - Physical port number
+ * @fcfi - FCF index.
+ * @vnpi - VNP index.
+ * Returns - none
+ *
+ * This event is received from FW, when virtual link goes down between
+ * Physical port[ENode] and FCF. Lnode and its associated NPIVs lnode hosted on
+ * this vnpi[VN-Port] will be de-instantiated.
+ *
+ * This called with hw lock held
+ */
+static void
+csio_handle_link_down(struct csio_hw *hw, uint8_t portid, uint32_t fcfi,
+ uint32_t vnpi)
+{
+ struct csio_fcf_info *fp;
+ struct csio_lnode *ln;
+
+ /* Lookup lnode based on vnpi */
+ ln = csio_ln_lookup_by_vnpi(hw, vnpi);
+ if (ln) {
+ fp = ln->fcfinfo;
+ CSIO_INC_STATS(ln, n_link_down);
+
+ /*Warn if linkdown received if lnode is not in ready state */
+ if (!csio_is_lnode_ready(ln)) {
+ csio_ln_warn(ln,
+ "warn: FCOE link is already in offline "
+ "Ignoring Fcoe linkdown event on portid %d\n",
+ portid);
+ CSIO_INC_STATS(ln, n_evt_drop);
+ return;
+ }
+
+ /* Verify portid */
+ if (fp->portid != portid) {
+ csio_ln_warn(ln,
+ "warn: FCOE linkdown recv with "
+ "invalid port %d\n", portid);
+ CSIO_INC_STATS(ln, n_evt_drop);
+ return;
+ }
+
+ /* verify fcfi */
+ if (ln->fcf_flowid != fcfi) {
+ csio_ln_warn(ln,
+ "warn: FCOE linkdown recv with "
+ "invalid fcfi x%x\n", fcfi);
+ CSIO_INC_STATS(ln, n_evt_drop);
+ return;
+ }
+
+ csio_info(hw, "Port:%d - FCOE LINK DOWN\n", portid);
+
+ /* Send LINK_DOWN event to lnode s/m */
+ csio_ln_down(ln);
+
+ return;
+ } else {
+ csio_warn(hw,
+ "warn: FCOE linkdown recv with invalid vnpi x%x\n",
+ vnpi);
+ CSIO_INC_STATS(hw, n_evt_drop);
+ }
+}
+
+/*
+ * csio_is_lnode_ready - Checks FCOE lnode is in ready state.
+ * @ln: Lnode module
+ *
+ * Returns True if FCOE lnode is in ready state.
+ */
+int
+csio_is_lnode_ready(struct csio_lnode *ln)
+{
+ return (csio_get_state(ln) == ((csio_sm_state_t)csio_lns_ready));
+}
+
+/*****************************************************************************/
+/* START: Lnode SM */
+/*****************************************************************************/
+/*
+ * csio_lns_uninit - The request in uninit state.
+ * @ln - FCOE lnode.
+ * @evt - Event to be processed.
+ *
+ * Process the given lnode event which is currently in "uninit" state.
+ * Invoked with HW lock held.
+ * Return - none.
+ */
+static void
+csio_lns_uninit(struct csio_lnode *ln, enum csio_ln_ev evt)
+{
+ struct csio_hw *hw = csio_lnode_to_hw(ln);
+ struct csio_lnode *rln = hw->rln;
+ int rv;
+
+ CSIO_INC_STATS(ln, n_evt_sm[evt]);
+ switch (evt) {
+ case CSIO_LNE_LINKUP:
+ csio_set_state(&ln->sm, csio_lns_online);
+ /* Read FCF only for physical lnode */
+ if (csio_is_phys_ln(ln)) {
+ rv = csio_ln_read_fcf_entry(ln,
+ csio_ln_read_fcf_cbfn);
+ if (rv != 0) {
+ /* TODO: Send HW RESET event */
+ CSIO_INC_STATS(ln, n_err);
+ break;
+ }
+
+ /* Add FCF record */
+ list_add_tail(&ln->fcfinfo->list, &rln->fcf_lsthead);
+ }
+
+ rv = csio_ln_vnp_read(ln, csio_ln_vnp_read_cbfn);
+ if (rv != 0) {
+ /* TODO: Send HW RESET event */
+ CSIO_INC_STATS(ln, n_err);
+ }
+ break;
+
+ case CSIO_LNE_DOWN_LINK:
+ break;
+
+ default:
+ csio_ln_dbg(ln,
+ "unexp ln event %d recv from did:x%x in "
+ "ln state[uninit].\n", evt, ln->nport_id);
+ CSIO_INC_STATS(ln, n_evt_unexp);
+ break;
+ } /* switch event */
+}
+
+/*
+ * csio_lns_online - The request in online state.
+ * @ln - FCOE lnode.
+ * @evt - Event to be processed.
+ *
+ * Process the given lnode event which is currently in "online" state.
+ * Invoked with HW lock held.
+ * Return - none.
+ */
+static void
+csio_lns_online(struct csio_lnode *ln, enum csio_ln_ev evt)
+{
+ struct csio_hw *hw = csio_lnode_to_hw(ln);
+
+ CSIO_INC_STATS(ln, n_evt_sm[evt]);
+ switch (evt) {
+ case CSIO_LNE_LINKUP:
+ csio_ln_warn(ln,
+ "warn: FCOE link is up already "
+ "Ignoring linkup on port:%d\n", ln->portid);
+ CSIO_INC_STATS(ln, n_evt_drop);
+ break;
+
+ case CSIO_LNE_FAB_INIT_DONE:
+ csio_set_state(&ln->sm, csio_lns_ready);
+
+ spin_unlock_irq(&hw->lock);
+ csio_lnode_async_event(ln, CSIO_LN_FC_LINKUP);
+ spin_lock_irq(&hw->lock);
+
+ break;
+
+ case CSIO_LNE_LINK_DOWN:
+ /* Fall through */
+ case CSIO_LNE_DOWN_LINK:
+ csio_set_state(&ln->sm, csio_lns_uninit);
+ if (csio_is_phys_ln(ln)) {
+ /* Remove FCF entry */
+ list_del_init(&ln->fcfinfo->list);
+ }
+ break;
+
+ default:
+ csio_ln_dbg(ln,
+ "unexp ln event %d recv from did:x%x in "
+ "ln state[uninit].\n", evt, ln->nport_id);
+ CSIO_INC_STATS(ln, n_evt_unexp);
+
+ break;
+ } /* switch event */
+}
+
+/*
+ * csio_lns_ready - The request in ready state.
+ * @ln - FCOE lnode.
+ * @evt - Event to be processed.
+ *
+ * Process the given lnode event which is currently in "ready" state.
+ * Invoked with HW lock held.
+ * Return - none.
+ */
+static void
+csio_lns_ready(struct csio_lnode *ln, enum csio_ln_ev evt)
+{
+ struct csio_hw *hw = csio_lnode_to_hw(ln);
+
+ CSIO_INC_STATS(ln, n_evt_sm[evt]);
+ switch (evt) {
+ case CSIO_LNE_FAB_INIT_DONE:
+ csio_ln_dbg(ln,
+ "ignoring event %d recv from did x%x"
+ "in ln state[ready].\n", evt, ln->nport_id);
+ CSIO_INC_STATS(ln, n_evt_drop);
+ break;
+
+ case CSIO_LNE_LINK_DOWN:
+ csio_set_state(&ln->sm, csio_lns_offline);
+ csio_post_event_rns(ln, CSIO_RNFE_DOWN);
+
+ spin_unlock_irq(&hw->lock);
+ csio_lnode_async_event(ln, CSIO_LN_FC_LINKDOWN);
+ spin_lock_irq(&hw->lock);
+
+ if (csio_is_phys_ln(ln)) {
+ /* Remove FCF entry */
+ list_del_init(&ln->fcfinfo->list);
+ }
+ break;
+
+ case CSIO_LNE_DOWN_LINK:
+ csio_set_state(&ln->sm, csio_lns_offline);
+ csio_post_event_rns(ln, CSIO_RNFE_DOWN);
+
+ /* Host need to issue aborts in case if FW has not returned
+ * WRs with status "ABORTED"
+ */
+ spin_unlock_irq(&hw->lock);
+ csio_lnode_async_event(ln, CSIO_LN_FC_LINKDOWN);
+ spin_lock_irq(&hw->lock);
+
+ if (csio_is_phys_ln(ln)) {
+ /* Remove FCF entry */
+ list_del_init(&ln->fcfinfo->list);
+ }
+ break;
+
+ case CSIO_LNE_CLOSE:
+ csio_set_state(&ln->sm, csio_lns_uninit);
+ csio_post_event_rns(ln, CSIO_RNFE_CLOSE);
+ break;
+
+ case CSIO_LNE_LOGO:
+ csio_set_state(&ln->sm, csio_lns_offline);
+ csio_post_event_rns(ln, CSIO_RNFE_DOWN);
+ break;
+
+ default:
+ csio_ln_dbg(ln,
+ "unexp ln event %d recv from did:x%x in "
+ "ln state[uninit].\n", evt, ln->nport_id);
+ CSIO_INC_STATS(ln, n_evt_unexp);
+ CSIO_DB_ASSERT(0);
+ break;
+ } /* switch event */
+}
+
+/*
+ * csio_lns_offline - The request in offline state.
+ * @ln - FCOE lnode.
+ * @evt - Event to be processed.
+ *
+ * Process the given lnode event which is currently in "offline" state.
+ * Invoked with HW lock held.
+ * Return - none.
+ */
+static void
+csio_lns_offline(struct csio_lnode *ln, enum csio_ln_ev evt)
+{
+ struct csio_hw *hw = csio_lnode_to_hw(ln);
+ struct csio_lnode *rln = hw->rln;
+ int rv;
+
+ CSIO_INC_STATS(ln, n_evt_sm[evt]);
+ switch (evt) {
+ case CSIO_LNE_LINKUP:
+ csio_set_state(&ln->sm, csio_lns_online);
+ /* Read FCF only for physical lnode */
+ if (csio_is_phys_ln(ln)) {
+ rv = csio_ln_read_fcf_entry(ln,
+ csio_ln_read_fcf_cbfn);
+ if (rv != 0) {
+ /* TODO: Send HW RESET event */
+ CSIO_INC_STATS(ln, n_err);
+ break;
+ }
+
+ /* Add FCF record */
+ list_add_tail(&ln->fcfinfo->list, &rln->fcf_lsthead);
+ }
+
+ rv = csio_ln_vnp_read(ln, csio_ln_vnp_read_cbfn);
+ if (rv != 0) {
+ /* TODO: Send HW RESET event */
+ CSIO_INC_STATS(ln, n_err);
+ }
+ break;
+
+ case CSIO_LNE_LINK_DOWN:
+ case CSIO_LNE_DOWN_LINK:
+ case CSIO_LNE_LOGO:
+ csio_ln_dbg(ln,
+ "ignoring event %d recv from did x%x"
+ "in ln state[offline].\n", evt, ln->nport_id);
+ CSIO_INC_STATS(ln, n_evt_drop);
+ break;
+
+ case CSIO_LNE_CLOSE:
+ csio_set_state(&ln->sm, csio_lns_uninit);
+ csio_post_event_rns(ln, CSIO_RNFE_CLOSE);
+ break;
+
+ default:
+ csio_ln_dbg(ln,
+ "unexp ln event %d recv from did:x%x in "
+ "ln state[offline]\n", evt, ln->nport_id);
+ CSIO_INC_STATS(ln, n_evt_unexp);
+ CSIO_DB_ASSERT(0);
+ break;
+ } /* switch event */
+}
+
+/*****************************************************************************/
+/* END: Lnode SM */
+/*****************************************************************************/
+
+static void
+csio_free_fcfinfo(struct kref *kref)
+{
+ struct csio_fcf_info *fcfinfo = container_of(kref,
+ struct csio_fcf_info, kref);
+ kfree(fcfinfo);
+}
+
+/* Helper routines for attributes */
+/*
+ * csio_lnode_state_to_str - Get current state of FCOE lnode.
+ * @ln - lnode
+ * @str - state of lnode.
+ *
+ */
+void
+csio_lnode_state_to_str(struct csio_lnode *ln, int8_t *str)
+{
+ if (csio_get_state(ln) == ((csio_sm_state_t)csio_lns_uninit)) {
+ strcpy(str, "UNINIT");
+ return;
+ }
+ if (csio_get_state(ln) == ((csio_sm_state_t)csio_lns_ready)) {
+ strcpy(str, "READY");
+ return;
+ }
+ if (csio_get_state(ln) == ((csio_sm_state_t)csio_lns_offline)) {
+ strcpy(str, "OFFLINE");
+ return;
+ }
+ strcpy(str, "UNKNOWN");
+} /* csio_lnode_state_to_str */
+
+
+int
+csio_get_phy_port_stats(struct csio_hw *hw, uint8_t portid,
+ struct fw_fcoe_port_stats *port_stats)
+{
+ struct csio_mb *mbp;
+ struct fw_fcoe_port_cmd_params portparams;
+ enum fw_retval retval;
+ int idx;
+
+ mbp = mempool_alloc(hw->mb_mempool, GFP_ATOMIC);
+ if (!mbp) {
+ csio_err(hw, "FCoE FCF PARAMS command out of memory!\n");
+ return -EINVAL;
+ }
+ portparams.portid = portid;
+
+ for (idx = 1; idx <= 3; idx++) {
+ portparams.idx = (idx-1)*6 + 1;
+ portparams.nstats = 6;
+ if (idx == 3)
+ portparams.nstats = 4;
+ csio_fcoe_read_portparams_init_mb(hw, mbp, CSIO_MB_DEFAULT_TMO,
+ &portparams, NULL);
+ if (csio_mb_issue(hw, mbp)) {
+ csio_err(hw, "Issue of FCoE port params failed!\n");
+ mempool_free(mbp, hw->mb_mempool);
+ return -EINVAL;
+ }
+ csio_mb_process_portparams_rsp(hw, mbp, &retval,
+ &portparams, port_stats);
+ }
+
+ mempool_free(mbp, hw->mb_mempool);
+ return 0;
+}
+
+/*
+ * csio_ln_mgmt_wr_handler -Mgmt Work Request handler.
+ * @wr - WR.
+ * @len - WR len.
+ * This handler is invoked when an outstanding mgmt WR is completed.
+ * Its invoked in the context of FW event worker thread for every
+ * mgmt event received.
+ * Return - none.
+ */
+
+static void
+csio_ln_mgmt_wr_handler(struct csio_hw *hw, void *wr, uint32_t len)
+{
+ struct csio_mgmtm *mgmtm = csio_hw_to_mgmtm(hw);
+ struct csio_ioreq *io_req = NULL;
+ struct fw_fcoe_els_ct_wr *wr_cmd;
+
+
+ wr_cmd = (struct fw_fcoe_els_ct_wr *) wr;
+
+ if (len < sizeof(struct fw_fcoe_els_ct_wr)) {
+ csio_err(mgmtm->hw,
+ "Invalid ELS CT WR length recvd, len:%x\n", len);
+ mgmtm->stats.n_err++;
+ return;
+ }
+
+ io_req = (struct csio_ioreq *) ((uintptr_t) wr_cmd->cookie);
+ io_req->wr_status = csio_wr_status(wr_cmd);
+
+ /* lookup ioreq exists in our active Q */
+ spin_lock_irq(&hw->lock);
+ if (csio_mgmt_req_lookup(mgmtm, io_req) != 0) {
+ csio_err(mgmtm->hw,
+ "Error- Invalid IO handle recv in WR. handle: %p\n",
+ io_req);
+ mgmtm->stats.n_err++;
+ spin_unlock_irq(&hw->lock);
+ return;
+ }
+
+ mgmtm = csio_hw_to_mgmtm(hw);
+
+ /* Dequeue from active queue */
+ list_del_init(&io_req->sm.sm_list);
+ mgmtm->stats.n_active--;
+ spin_unlock_irq(&hw->lock);
+
+ /* io_req will be freed by completion handler */
+ if (io_req->io_cbfn)
+ io_req->io_cbfn(hw, io_req);
+}
+
+/**
+ * csio_fcoe_fwevt_handler - Event handler for Firmware FCoE events.
+ * @hw: HW module
+ * @cpl_op: CPL opcode
+ * @cmd: FW cmd/WR.
+ *
+ * Process received FCoE cmd/WR event from FW.
+ */
+void
+csio_fcoe_fwevt_handler(struct csio_hw *hw, __u8 cpl_op, __be64 *cmd)
+{
+ struct csio_lnode *ln;
+ struct csio_rnode *rn;
+ uint8_t portid, opcode = *(uint8_t *)cmd;
+ struct fw_fcoe_link_cmd *lcmd;
+ struct fw_wr_hdr *wr;
+ struct fw_rdev_wr *rdev_wr;
+ enum fw_fcoe_link_status lstatus;
+ uint32_t fcfi, rdev_flowid, vnpi;
+ enum csio_ln_ev evt;
+
+ if (cpl_op == CPL_FW6_MSG && opcode == FW_FCOE_LINK_CMD) {
+
+ lcmd = (struct fw_fcoe_link_cmd *)cmd;
+ lstatus = lcmd->lstatus;
+ portid = FW_FCOE_LINK_CMD_PORTID_GET(
+ ntohl(lcmd->op_to_portid));
+ fcfi = FW_FCOE_LINK_CMD_FCFI_GET(ntohl(lcmd->sub_opcode_fcfi));
+ vnpi = FW_FCOE_LINK_CMD_VNPI_GET(ntohl(lcmd->vnpi_pkd));
+
+ if (lstatus == FCOE_LINKUP) {
+
+ /* HW lock here */
+ spin_lock_irq(&hw->lock);
+ csio_handle_link_up(hw, portid, fcfi, vnpi);
+ spin_unlock_irq(&hw->lock);
+ /* HW un lock here */
+
+ } else if (lstatus == FCOE_LINKDOWN) {
+
+ /* HW lock here */
+ spin_lock_irq(&hw->lock);
+ csio_handle_link_down(hw, portid, fcfi, vnpi);
+ spin_unlock_irq(&hw->lock);
+ /* HW un lock here */
+ } else {
+ csio_warn(hw, "Unexpected FCOE LINK status:0x%x\n",
+ lcmd->lstatus);
+ CSIO_INC_STATS(hw, n_cpl_unexp);
+ }
+ } else if (cpl_op == CPL_FW6_PLD) {
+ wr = (struct fw_wr_hdr *) (cmd + 4);
+ if (FW_WR_OP_G(be32_to_cpu(wr->hi))
+ == FW_RDEV_WR) {
+
+ rdev_wr = (struct fw_rdev_wr *) (cmd + 4);
+
+ rdev_flowid = FW_RDEV_WR_FLOWID_GET(
+ ntohl(rdev_wr->alloc_to_len16));
+ vnpi = FW_RDEV_WR_ASSOC_FLOWID_GET(
+ ntohl(rdev_wr->flags_to_assoc_flowid));
+
+ csio_dbg(hw,
+ "FW_RDEV_WR: flowid:x%x ev_cause:x%x "
+ "vnpi:0x%x\n", rdev_flowid,
+ rdev_wr->event_cause, vnpi);
+
+ if (rdev_wr->protocol != PROT_FCOE) {
+ csio_err(hw,
+ "FW_RDEV_WR: invalid proto:x%x "
+ "received with flowid:x%x\n",
+ rdev_wr->protocol,
+ rdev_flowid);
+ CSIO_INC_STATS(hw, n_evt_drop);
+ return;
+ }
+
+ /* HW lock here */
+ spin_lock_irq(&hw->lock);
+ ln = csio_ln_lookup_by_vnpi(hw, vnpi);
+ if (!ln) {
+ csio_err(hw,
+ "FW_DEV_WR: invalid vnpi:x%x received "
+ "with flowid:x%x\n", vnpi, rdev_flowid);
+ CSIO_INC_STATS(hw, n_evt_drop);
+ goto out_pld;
+ }
+
+ rn = csio_confirm_rnode(ln, rdev_flowid,
+ &rdev_wr->u.fcoe_rdev);
+ if (!rn) {
+ csio_ln_dbg(ln,
+ "Failed to confirm rnode "
+ "for flowid:x%x\n", rdev_flowid);
+ CSIO_INC_STATS(hw, n_evt_drop);
+ goto out_pld;
+ }
+
+ /* save previous event for debugging */
+ ln->prev_evt = ln->cur_evt;
+ ln->cur_evt = rdev_wr->event_cause;
+ CSIO_INC_STATS(ln, n_evt_fw[rdev_wr->event_cause]);
+
+ /* Translate all the fabric events to lnode SM events */
+ evt = CSIO_FWE_TO_LNE(rdev_wr->event_cause);
+ if (evt) {
+ csio_ln_dbg(ln,
+ "Posting event to lnode event:%d "
+ "cause:%d flowid:x%x\n", evt,
+ rdev_wr->event_cause, rdev_flowid);
+ csio_post_event(&ln->sm, evt);
+ }
+
+ /* Handover event to rn SM here. */
+ csio_rnode_fwevt_handler(rn, rdev_wr->event_cause);
+out_pld:
+ spin_unlock_irq(&hw->lock);
+ return;
+ } else {
+ csio_warn(hw, "unexpected WR op(0x%x) recv\n",
+ FW_WR_OP_G(be32_to_cpu((wr->hi))));
+ CSIO_INC_STATS(hw, n_cpl_unexp);
+ }
+ } else if (cpl_op == CPL_FW6_MSG) {
+ wr = (struct fw_wr_hdr *) (cmd);
+ if (FW_WR_OP_G(be32_to_cpu(wr->hi)) == FW_FCOE_ELS_CT_WR) {
+ csio_ln_mgmt_wr_handler(hw, wr,
+ sizeof(struct fw_fcoe_els_ct_wr));
+ } else {
+ csio_warn(hw, "unexpected WR op(0x%x) recv\n",
+ FW_WR_OP_G(be32_to_cpu((wr->hi))));
+ CSIO_INC_STATS(hw, n_cpl_unexp);
+ }
+ } else {
+ csio_warn(hw, "unexpected CPL op(0x%x) recv\n", opcode);
+ CSIO_INC_STATS(hw, n_cpl_unexp);
+ }
+}
+
+/**
+ * csio_lnode_start - Kickstart lnode discovery.
+ * @ln: lnode
+ *
+ * This routine kickstarts the discovery by issuing an FCOE_LINK (up) command.
+ */
+int
+csio_lnode_start(struct csio_lnode *ln)
+{
+ int rv = 0;
+ if (csio_is_phys_ln(ln) && !(ln->flags & CSIO_LNF_LINK_ENABLE)) {
+ rv = csio_fcoe_enable_link(ln, 1);
+ ln->flags |= CSIO_LNF_LINK_ENABLE;
+ }
+
+ return rv;
+}
+
+/**
+ * csio_lnode_stop - Stop the lnode.
+ * @ln: lnode
+ *
+ * This routine is invoked by HW module to stop lnode and its associated NPIV
+ * lnodes.
+ */
+void
+csio_lnode_stop(struct csio_lnode *ln)
+{
+ csio_post_event_lns(ln, CSIO_LNE_DOWN_LINK);
+ if (csio_is_phys_ln(ln) && (ln->flags & CSIO_LNF_LINK_ENABLE)) {
+ csio_fcoe_enable_link(ln, 0);
+ ln->flags &= ~CSIO_LNF_LINK_ENABLE;
+ }
+ csio_ln_dbg(ln, "stopping ln :%p\n", ln);
+}
+
+/**
+ * csio_lnode_close - Close an lnode.
+ * @ln: lnode
+ *
+ * This routine is invoked by HW module to close an lnode and its
+ * associated NPIV lnodes. Lnode and its associated NPIV lnodes are
+ * set to uninitialized state.
+ */
+void
+csio_lnode_close(struct csio_lnode *ln)
+{
+ csio_post_event_lns(ln, CSIO_LNE_CLOSE);
+ if (csio_is_phys_ln(ln))
+ ln->vnp_flowid = CSIO_INVALID_IDX;
+
+ csio_ln_dbg(ln, "closed ln :%p\n", ln);
+}
+
+/*
+ * csio_ln_prep_ecwr - Prepare ELS/CT WR.
+ * @io_req - IO request.
+ * @wr_len - WR len
+ * @immd_len - WR immediate data
+ * @sub_op - Sub opcode
+ * @sid - source portid.
+ * @did - destination portid
+ * @flow_id - flowid
+ * @fw_wr - ELS/CT WR to be prepared.
+ * Returns: 0 - on success
+ */
+static int
+csio_ln_prep_ecwr(struct csio_ioreq *io_req, uint32_t wr_len,
+ uint32_t immd_len, uint8_t sub_op, uint32_t sid,
+ uint32_t did, uint32_t flow_id, uint8_t *fw_wr)
+{
+ struct fw_fcoe_els_ct_wr *wr;
+ __be32 port_id;
+
+ wr = (struct fw_fcoe_els_ct_wr *)fw_wr;
+ wr->op_immdlen = cpu_to_be32(FW_WR_OP_V(FW_FCOE_ELS_CT_WR) |
+ FW_FCOE_ELS_CT_WR_IMMDLEN(immd_len));
+
+ wr_len = DIV_ROUND_UP(wr_len, 16);
+ wr->flowid_len16 = cpu_to_be32(FW_WR_FLOWID_V(flow_id) |
+ FW_WR_LEN16_V(wr_len));
+ wr->els_ct_type = sub_op;
+ wr->ctl_pri = 0;
+ wr->cp_en_class = 0;
+ wr->cookie = io_req->fw_handle;
+ wr->iqid = cpu_to_be16(csio_q_physiqid(
+ io_req->lnode->hwp, io_req->iq_idx));
+ wr->fl_to_sp = FW_FCOE_ELS_CT_WR_SP(1);
+ wr->tmo_val = (uint8_t) io_req->tmo;
+ port_id = htonl(sid);
+ memcpy(wr->l_id, PORT_ID_PTR(port_id), 3);
+ port_id = htonl(did);
+ memcpy(wr->r_id, PORT_ID_PTR(port_id), 3);
+
+ /* Prepare RSP SGL */
+ wr->rsp_dmalen = cpu_to_be32(io_req->dma_buf.len);
+ wr->rsp_dmaaddr = cpu_to_be64(io_req->dma_buf.paddr);
+ return 0;
+}
+
+/*
+ * csio_ln_mgmt_submit_wr - Post elsct work request.
+ * @mgmtm - mgmtm
+ * @io_req - io request.
+ * @sub_op - ELS or CT request type
+ * @pld - Dma Payload buffer
+ * @pld_len - Payload len
+ * Prepares ELSCT Work request and sents it to FW.
+ * Returns: 0 - on success
+ */
+static int
+csio_ln_mgmt_submit_wr(struct csio_mgmtm *mgmtm, struct csio_ioreq *io_req,
+ uint8_t sub_op, struct csio_dma_buf *pld,
+ uint32_t pld_len)
+{
+ struct csio_wr_pair wrp;
+ struct csio_lnode *ln = io_req->lnode;
+ struct csio_rnode *rn = io_req->rnode;
+ struct csio_hw *hw = mgmtm->hw;
+ uint8_t fw_wr[64];
+ struct ulptx_sgl dsgl;
+ uint32_t wr_size = 0;
+ uint8_t im_len = 0;
+ uint32_t wr_off = 0;
+
+ int ret = 0;
+
+ /* Calculate WR Size for this ELS REQ */
+ wr_size = sizeof(struct fw_fcoe_els_ct_wr);
+
+ /* Send as immediate data if pld < 256 */
+ if (pld_len < 256) {
+ wr_size += ALIGN(pld_len, 8);
+ im_len = (uint8_t)pld_len;
+ } else
+ wr_size += sizeof(struct ulptx_sgl);
+
+ /* Roundup WR size in units of 16 bytes */
+ wr_size = ALIGN(wr_size, 16);
+
+ /* Get WR to send ELS REQ */
+ ret = csio_wr_get(hw, mgmtm->eq_idx, wr_size, &wrp);
+ if (ret != 0) {
+ csio_err(hw, "Failed to get WR for ec_req %p ret:%d\n",
+ io_req, ret);
+ return ret;
+ }
+
+ /* Prepare Generic WR used by all ELS/CT cmd */
+ csio_ln_prep_ecwr(io_req, wr_size, im_len, sub_op,
+ ln->nport_id, rn->nport_id,
+ csio_rn_flowid(rn),
+ &fw_wr[0]);
+
+ /* Copy ELS/CT WR CMD */
+ csio_wr_copy_to_wrp(&fw_wr[0], &wrp, wr_off,
+ sizeof(struct fw_fcoe_els_ct_wr));
+ wr_off += sizeof(struct fw_fcoe_els_ct_wr);
+
+ /* Copy payload to Immediate section of WR */
+ if (im_len)
+ csio_wr_copy_to_wrp(pld->vaddr, &wrp, wr_off, im_len);
+ else {
+ /* Program DSGL to dma payload */
+ dsgl.cmd_nsge = htonl(ULPTX_CMD_V(ULP_TX_SC_DSGL) |
+ ULPTX_MORE_F | ULPTX_NSGE_V(1));
+ dsgl.len0 = cpu_to_be32(pld_len);
+ dsgl.addr0 = cpu_to_be64(pld->paddr);
+ csio_wr_copy_to_wrp(&dsgl, &wrp, ALIGN(wr_off, 8),
+ sizeof(struct ulptx_sgl));
+ }
+
+ /* Issue work request to xmit ELS/CT req to FW */
+ csio_wr_issue(mgmtm->hw, mgmtm->eq_idx, false);
+ return ret;
+}
+
+/*
+ * csio_ln_mgmt_submit_req - Submit FCOE Mgmt request.
+ * @io_req - IO Request
+ * @io_cbfn - Completion handler.
+ * @req_type - ELS or CT request type
+ * @pld - Dma Payload buffer
+ * @pld_len - Payload len
+ *
+ *
+ * This API used submit managment ELS/CT request.
+ * This called with hw lock held
+ * Returns: 0 - on success
+ * -ENOMEM - on error.
+ */
+static int
+csio_ln_mgmt_submit_req(struct csio_ioreq *io_req,
+ void (*io_cbfn) (struct csio_hw *, struct csio_ioreq *),
+ enum fcoe_cmn_type req_type, struct csio_dma_buf *pld,
+ uint32_t pld_len)
+{
+ struct csio_hw *hw = csio_lnode_to_hw(io_req->lnode);
+ struct csio_mgmtm *mgmtm = csio_hw_to_mgmtm(hw);
+ int rv;
+
+ io_req->io_cbfn = io_cbfn; /* Upper layer callback handler */
+ io_req->fw_handle = (uintptr_t) (io_req);
+ io_req->eq_idx = mgmtm->eq_idx;
+ io_req->iq_idx = mgmtm->iq_idx;
+
+ rv = csio_ln_mgmt_submit_wr(mgmtm, io_req, req_type, pld, pld_len);
+ if (rv == 0) {
+ list_add_tail(&io_req->sm.sm_list, &mgmtm->active_q);
+ mgmtm->stats.n_active++;
+ }
+ return rv;
+}
+
+/*
+ * csio_ln_fdmi_init - FDMI Init entry point.
+ * @ln: lnode
+ */
+static int
+csio_ln_fdmi_init(struct csio_lnode *ln)
+{
+ struct csio_hw *hw = csio_lnode_to_hw(ln);
+ struct csio_dma_buf *dma_buf;
+
+ /* Allocate MGMT request required for FDMI */
+ ln->mgmt_req = kzalloc(sizeof(struct csio_ioreq), GFP_KERNEL);
+ if (!ln->mgmt_req) {
+ csio_ln_err(ln, "Failed to alloc ioreq for FDMI\n");
+ CSIO_INC_STATS(hw, n_err_nomem);
+ return -ENOMEM;
+ }
+
+ /* Allocate Dma buffers for FDMI response Payload */
+ dma_buf = &ln->mgmt_req->dma_buf;
+ dma_buf->len = 2048;
+ dma_buf->vaddr = pci_alloc_consistent(hw->pdev, dma_buf->len,
+ &dma_buf->paddr);
+ if (!dma_buf->vaddr) {
+ csio_err(hw, "Failed to alloc DMA buffer for FDMI!\n");
+ kfree(ln->mgmt_req);
+ ln->mgmt_req = NULL;
+ return -ENOMEM;
+ }
+
+ ln->flags |= CSIO_LNF_FDMI_ENABLE;
+ return 0;
+}
+
+/*
+ * csio_ln_fdmi_exit - FDMI exit entry point.
+ * @ln: lnode
+ */
+static int
+csio_ln_fdmi_exit(struct csio_lnode *ln)
+{
+ struct csio_dma_buf *dma_buf;
+ struct csio_hw *hw = csio_lnode_to_hw(ln);
+
+ if (!ln->mgmt_req)
+ return 0;
+
+ dma_buf = &ln->mgmt_req->dma_buf;
+ if (dma_buf->vaddr)
+ pci_free_consistent(hw->pdev, dma_buf->len, dma_buf->vaddr,
+ dma_buf->paddr);
+
+ kfree(ln->mgmt_req);
+ return 0;
+}
+
+int
+csio_scan_done(struct csio_lnode *ln, unsigned long ticks,
+ unsigned long time, unsigned long max_scan_ticks,
+ unsigned long delta_scan_ticks)
+{
+ int rv = 0;
+
+ if (time >= max_scan_ticks)
+ return 1;
+
+ if (!ln->tgt_scan_tick)
+ ln->tgt_scan_tick = ticks;
+
+ if (((ticks - ln->tgt_scan_tick) >= delta_scan_ticks)) {
+ if (!ln->last_scan_ntgts)
+ ln->last_scan_ntgts = ln->n_scsi_tgts;
+ else {
+ if (ln->last_scan_ntgts == ln->n_scsi_tgts)
+ return 1;
+
+ ln->last_scan_ntgts = ln->n_scsi_tgts;
+ }
+ ln->tgt_scan_tick = ticks;
+ }
+ return rv;
+}
+
+/*
+ * csio_notify_lnodes:
+ * @hw: HW module
+ * @note: Notification
+ *
+ * Called from the HW SM to fan out notifications to the
+ * Lnode SM. Since the HW SM is entered with lock held,
+ * there is no need to hold locks here.
+ *
+ */
+void
+csio_notify_lnodes(struct csio_hw *hw, enum csio_ln_notify note)
+{
+ struct list_head *tmp;
+ struct csio_lnode *ln;
+
+ csio_dbg(hw, "Notifying all nodes of event %d\n", note);
+
+ /* Traverse children lnodes list and send evt */
+ list_for_each(tmp, &hw->sln_head) {
+ ln = (struct csio_lnode *) tmp;
+
+ switch (note) {
+ case CSIO_LN_NOTIFY_HWREADY:
+ csio_lnode_start(ln);
+ break;
+
+ case CSIO_LN_NOTIFY_HWRESET:
+ case CSIO_LN_NOTIFY_HWREMOVE:
+ csio_lnode_close(ln);
+ break;
+
+ case CSIO_LN_NOTIFY_HWSTOP:
+ csio_lnode_stop(ln);
+ break;
+
+ default:
+ break;
+
+ }
+ }
+}
+
+/*
+ * csio_disable_lnodes:
+ * @hw: HW module
+ * @portid:port id
+ * @disable: disable/enable flag.
+ * If disable=1, disables all lnode hosted on given physical port.
+ * otherwise enables all the lnodes on given phsysical port.
+ * This routine need to called with hw lock held.
+ */
+void
+csio_disable_lnodes(struct csio_hw *hw, uint8_t portid, bool disable)
+{
+ struct list_head *tmp;
+ struct csio_lnode *ln;
+
+ csio_dbg(hw, "Notifying event to all nodes of port:%d\n", portid);
+
+ /* Traverse sibling lnodes list and send evt */
+ list_for_each(tmp, &hw->sln_head) {
+ ln = (struct csio_lnode *) tmp;
+ if (ln->portid != portid)
+ continue;
+
+ if (disable)
+ csio_lnode_stop(ln);
+ else
+ csio_lnode_start(ln);
+ }
+}
+
+/*
+ * csio_ln_init - Initialize an lnode.
+ * @ln: lnode
+ *
+ */
+static int
+csio_ln_init(struct csio_lnode *ln)
+{
+ int rv = -EINVAL;
+ struct csio_lnode *rln, *pln;
+ struct csio_hw *hw = csio_lnode_to_hw(ln);
+
+ csio_init_state(&ln->sm, csio_lns_uninit);
+ ln->vnp_flowid = CSIO_INVALID_IDX;
+ ln->fcf_flowid = CSIO_INVALID_IDX;
+
+ if (csio_is_root_ln(ln)) {
+
+ /* This is the lnode used during initialization */
+
+ ln->fcfinfo = kzalloc(sizeof(struct csio_fcf_info), GFP_KERNEL);
+ if (!ln->fcfinfo) {
+ csio_ln_err(ln, "Failed to alloc FCF record\n");
+ CSIO_INC_STATS(hw, n_err_nomem);
+ goto err;
+ }
+
+ INIT_LIST_HEAD(&ln->fcf_lsthead);
+ kref_init(&ln->fcfinfo->kref);
+
+ if (csio_fdmi_enable && csio_ln_fdmi_init(ln))
+ goto err;
+
+ } else { /* Either a non-root physical or a virtual lnode */
+
+ /*
+ * THe rest is common for non-root physical and NPIV lnodes.
+ * Just get references to all other modules
+ */
+ rln = csio_root_lnode(ln);
+
+ if (csio_is_npiv_ln(ln)) {
+ /* NPIV */
+ pln = csio_parent_lnode(ln);
+ kref_get(&pln->fcfinfo->kref);
+ ln->fcfinfo = pln->fcfinfo;
+ } else {
+ /* Another non-root physical lnode (FCF) */
+ ln->fcfinfo = kzalloc(sizeof(struct csio_fcf_info),
+ GFP_KERNEL);
+ if (!ln->fcfinfo) {
+ csio_ln_err(ln, "Failed to alloc FCF info\n");
+ CSIO_INC_STATS(hw, n_err_nomem);
+ goto err;
+ }
+
+ kref_init(&ln->fcfinfo->kref);
+
+ if (csio_fdmi_enable && csio_ln_fdmi_init(ln))
+ goto err;
+ }
+
+ } /* if (!csio_is_root_ln(ln)) */
+
+ return 0;
+err:
+ return rv;
+}
+
+static void
+csio_ln_exit(struct csio_lnode *ln)
+{
+ struct csio_lnode *pln;
+
+ csio_cleanup_rns(ln);
+ if (csio_is_npiv_ln(ln)) {
+ pln = csio_parent_lnode(ln);
+ kref_put(&pln->fcfinfo->kref, csio_free_fcfinfo);
+ } else {
+ kref_put(&ln->fcfinfo->kref, csio_free_fcfinfo);
+ if (csio_fdmi_enable)
+ csio_ln_fdmi_exit(ln);
+ }
+ ln->fcfinfo = NULL;
+}
+
+/**
+ * csio_lnode_init - Initialize the members of an lnode.
+ * @ln: lnode
+ *
+ */
+int
+csio_lnode_init(struct csio_lnode *ln, struct csio_hw *hw,
+ struct csio_lnode *pln)
+{
+ int rv = -EINVAL;
+
+ /* Link this lnode to hw */
+ csio_lnode_to_hw(ln) = hw;
+
+ /* Link child to parent if child lnode */
+ if (pln)
+ ln->pln = pln;
+ else
+ ln->pln = NULL;
+
+ /* Initialize scsi_tgt and timers to zero */
+ ln->n_scsi_tgts = 0;
+ ln->last_scan_ntgts = 0;
+ ln->tgt_scan_tick = 0;
+
+ /* Initialize rnode list */
+ INIT_LIST_HEAD(&ln->rnhead);
+ INIT_LIST_HEAD(&ln->cln_head);
+
+ /* Initialize log level for debug */
+ ln->params.log_level = hw->params.log_level;
+
+ if (csio_ln_init(ln))
+ goto err;
+
+ /* Add lnode to list of sibling or children lnodes */
+ spin_lock_irq(&hw->lock);
+ list_add_tail(&ln->sm.sm_list, pln ? &pln->cln_head : &hw->sln_head);
+ if (pln)
+ pln->num_vports++;
+ spin_unlock_irq(&hw->lock);
+
+ hw->num_lns++;
+
+ return 0;
+err:
+ csio_lnode_to_hw(ln) = NULL;
+ return rv;
+}
+
+/**
+ * csio_lnode_exit - De-instantiate an lnode.
+ * @ln: lnode
+ *
+ */
+void
+csio_lnode_exit(struct csio_lnode *ln)
+{
+ struct csio_hw *hw = csio_lnode_to_hw(ln);
+
+ csio_ln_exit(ln);
+
+ /* Remove this lnode from hw->sln_head */
+ spin_lock_irq(&hw->lock);
+
+ list_del_init(&ln->sm.sm_list);
+
+ /* If it is children lnode, decrement the
+ * counter in its parent lnode
+ */
+ if (ln->pln)
+ ln->pln->num_vports--;
+
+ /* Update root lnode pointer */
+ if (list_empty(&hw->sln_head))
+ hw->rln = NULL;
+ else
+ hw->rln = (struct csio_lnode *)csio_list_next(&hw->sln_head);
+
+ spin_unlock_irq(&hw->lock);
+
+ csio_lnode_to_hw(ln) = NULL;
+ hw->num_lns--;
+}
diff --git a/drivers/scsi/csiostor/csio_lnode.h b/drivers/scsi/csiostor/csio_lnode.h
new file mode 100644
index 000000000..372a67d12
--- /dev/null
+++ b/drivers/scsi/csiostor/csio_lnode.h
@@ -0,0 +1,255 @@
+/*
+ * This file is part of the Chelsio FCoE driver for Linux.
+ *
+ * Copyright (c) 2008-2012 Chelsio Communications, Inc. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#ifndef __CSIO_LNODE_H__
+#define __CSIO_LNODE_H__
+
+#include <linux/kref.h>
+#include <linux/timer.h>
+#include <linux/workqueue.h>
+#include <scsi/fc/fc_els.h>
+
+
+#include "csio_defs.h"
+#include "csio_hw.h"
+
+#define CSIO_FCOE_MAX_NPIV 128
+#define CSIO_FCOE_MAX_RNODES 2048
+
+/* FDMI port attribute unknown speed */
+#define CSIO_HBA_PORTSPEED_UNKNOWN 0x8000
+
+extern int csio_fcoe_rnodes;
+extern int csio_fdmi_enable;
+
+/* State machine evets */
+enum csio_ln_ev {
+ CSIO_LNE_NONE = (uint32_t)0,
+ CSIO_LNE_LINKUP,
+ CSIO_LNE_FAB_INIT_DONE,
+ CSIO_LNE_LINK_DOWN,
+ CSIO_LNE_DOWN_LINK,
+ CSIO_LNE_LOGO,
+ CSIO_LNE_CLOSE,
+ CSIO_LNE_MAX_EVENT,
+};
+
+
+struct csio_fcf_info {
+ struct list_head list;
+ uint8_t priority;
+ uint8_t mac[6];
+ uint8_t name_id[8];
+ uint8_t fabric[8];
+ uint16_t vf_id;
+ uint8_t vlan_id;
+ uint16_t max_fcoe_size;
+ uint8_t fc_map[3];
+ uint32_t fka_adv;
+ uint32_t fcfi;
+ uint8_t get_next:1;
+ uint8_t link_aff:1;
+ uint8_t fpma:1;
+ uint8_t spma:1;
+ uint8_t login:1;
+ uint8_t portid;
+ uint8_t spma_mac[6];
+ struct kref kref;
+};
+
+/* Defines for flags */
+#define CSIO_LNF_FIPSUPP 0x00000001 /* Fip Supported */
+#define CSIO_LNF_NPIVSUPP 0x00000002 /* NPIV supported */
+#define CSIO_LNF_LINK_ENABLE 0x00000004 /* Link enabled */
+#define CSIO_LNF_FDMI_ENABLE 0x00000008 /* FDMI support */
+
+/* Transport events */
+enum csio_ln_fc_evt {
+ CSIO_LN_FC_LINKUP = 1,
+ CSIO_LN_FC_LINKDOWN,
+ CSIO_LN_FC_RSCN,
+ CSIO_LN_FC_ATTRIB_UPDATE,
+};
+
+/* Lnode stats */
+struct csio_lnode_stats {
+ uint32_t n_link_up; /* Link down */
+ uint32_t n_link_down; /* Link up */
+ uint32_t n_err; /* error */
+ uint32_t n_err_nomem; /* memory not available */
+ uint32_t n_inval_parm; /* Invalid parameters */
+ uint32_t n_evt_unexp; /* unexpected event */
+ uint32_t n_evt_drop; /* dropped event */
+ uint32_t n_rnode_match; /* matched rnode */
+ uint32_t n_dev_loss_tmo; /* Device loss timeout */
+ uint32_t n_fdmi_err; /* fdmi err */
+ uint32_t n_evt_fw[PROTO_ERR_IMPL_LOGO + 1]; /* fw events */
+ enum csio_ln_ev n_evt_sm[CSIO_LNE_MAX_EVENT]; /* State m/c events */
+ uint32_t n_rnode_alloc; /* rnode allocated */
+ uint32_t n_rnode_free; /* rnode freed */
+ uint32_t n_rnode_nomem; /* rnode alloc failure */
+ uint32_t n_input_requests; /* Input Requests */
+ uint32_t n_output_requests; /* Output Requests */
+ uint32_t n_control_requests; /* Control Requests */
+ uint32_t n_input_bytes; /* Input Bytes */
+ uint32_t n_output_bytes; /* Output Bytes */
+ uint32_t rsvd1;
+};
+
+/* Common Lnode params */
+struct csio_lnode_params {
+ uint32_t ra_tov;
+ uint32_t fcfi;
+ uint32_t log_level; /* Module level for debugging */
+};
+
+struct csio_service_parms {
+ struct fc_els_csp csp; /* Common service parms */
+ uint8_t wwpn[8]; /* WWPN */
+ uint8_t wwnn[8]; /* WWNN */
+ struct fc_els_cssp clsp[4]; /* Class service params */
+ uint8_t vvl[16]; /* Vendor version level */
+};
+
+/* Lnode */
+struct csio_lnode {
+ struct csio_sm sm; /* State machine + sibling
+ * lnode list.
+ */
+ struct csio_hw *hwp; /* Pointer to the HW module */
+ uint8_t portid; /* Port ID */
+ uint8_t rsvd1;
+ uint16_t rsvd2;
+ uint32_t dev_num; /* Device number */
+ uint32_t flags; /* Flags */
+ struct list_head fcf_lsthead; /* FCF entries */
+ struct csio_fcf_info *fcfinfo; /* FCF in use */
+ struct csio_ioreq *mgmt_req; /* MGMT request */
+
+ /* FCoE identifiers */
+ uint8_t mac[6];
+ uint32_t nport_id;
+ struct csio_service_parms ln_sparm; /* Service parms */
+
+ /* Firmware identifiers */
+ uint32_t fcf_flowid; /*fcf flowid */
+ uint32_t vnp_flowid;
+ uint16_t ssn_cnt; /* Registered Session */
+ uint8_t cur_evt; /* Current event */
+ uint8_t prev_evt; /* Previous event */
+
+ /* Children */
+ struct list_head cln_head; /* Head of the children lnode
+ * list.
+ */
+ uint32_t num_vports; /* Total NPIV/children LNodes*/
+ struct csio_lnode *pln; /* Parent lnode of child
+ * lnodes.
+ */
+ struct list_head cmpl_q; /* Pending I/Os on this lnode */
+
+ /* Remote node information */
+ struct list_head rnhead; /* Head of rnode list */
+ uint32_t num_reg_rnodes; /* Number of rnodes registered
+ * with the host.
+ */
+ uint32_t n_scsi_tgts; /* Number of scsi targets
+ * found
+ */
+ uint32_t last_scan_ntgts;/* Number of scsi targets
+ * found per last scan.
+ */
+ uint32_t tgt_scan_tick; /* timer started after
+ * new tgt found
+ */
+ /* FC transport data */
+ struct fc_vport *fc_vport;
+ struct fc_host_statistics fch_stats;
+
+ struct csio_lnode_stats stats; /* Common lnode stats */
+ struct csio_lnode_params params; /* Common lnode params */
+};
+
+#define csio_lnode_to_hw(ln) ((ln)->hwp)
+#define csio_root_lnode(ln) (csio_lnode_to_hw((ln))->rln)
+#define csio_parent_lnode(ln) ((ln)->pln)
+#define csio_ln_flowid(ln) ((ln)->vnp_flowid)
+#define csio_ln_wwpn(ln) ((ln)->ln_sparm.wwpn)
+#define csio_ln_wwnn(ln) ((ln)->ln_sparm.wwnn)
+
+#define csio_is_root_ln(ln) (((ln) == csio_root_lnode((ln))) ? 1 : 0)
+#define csio_is_phys_ln(ln) (((ln)->pln == NULL) ? 1 : 0)
+#define csio_is_npiv_ln(ln) (((ln)->pln != NULL) ? 1 : 0)
+
+
+#define csio_ln_dbg(_ln, _fmt, ...) \
+ csio_dbg(_ln->hwp, "%x:%x "_fmt, CSIO_DEVID_HI(_ln), \
+ CSIO_DEVID_LO(_ln), ##__VA_ARGS__);
+
+#define csio_ln_err(_ln, _fmt, ...) \
+ csio_err(_ln->hwp, "%x:%x "_fmt, CSIO_DEVID_HI(_ln), \
+ CSIO_DEVID_LO(_ln), ##__VA_ARGS__);
+
+#define csio_ln_warn(_ln, _fmt, ...) \
+ csio_warn(_ln->hwp, "%x:%x "_fmt, CSIO_DEVID_HI(_ln), \
+ CSIO_DEVID_LO(_ln), ##__VA_ARGS__);
+
+/* HW->Lnode notifications */
+enum csio_ln_notify {
+ CSIO_LN_NOTIFY_HWREADY = 1,
+ CSIO_LN_NOTIFY_HWSTOP,
+ CSIO_LN_NOTIFY_HWREMOVE,
+ CSIO_LN_NOTIFY_HWRESET,
+};
+
+void csio_fcoe_fwevt_handler(struct csio_hw *, __u8 cpl_op, __be64 *);
+int csio_is_lnode_ready(struct csio_lnode *);
+void csio_lnode_state_to_str(struct csio_lnode *ln, int8_t *str);
+struct csio_lnode *csio_lnode_lookup_by_wwpn(struct csio_hw *, uint8_t *);
+int csio_get_phy_port_stats(struct csio_hw *, uint8_t ,
+ struct fw_fcoe_port_stats *);
+int csio_scan_done(struct csio_lnode *, unsigned long, unsigned long,
+ unsigned long, unsigned long);
+void csio_notify_lnodes(struct csio_hw *, enum csio_ln_notify);
+void csio_disable_lnodes(struct csio_hw *, uint8_t, bool);
+void csio_lnode_async_event(struct csio_lnode *, enum csio_ln_fc_evt);
+int csio_ln_fdmi_start(struct csio_lnode *, void *);
+int csio_lnode_start(struct csio_lnode *);
+void csio_lnode_stop(struct csio_lnode *);
+void csio_lnode_close(struct csio_lnode *);
+int csio_lnode_init(struct csio_lnode *, struct csio_hw *,
+ struct csio_lnode *);
+void csio_lnode_exit(struct csio_lnode *);
+
+#endif /* ifndef __CSIO_LNODE_H__ */
diff --git a/drivers/scsi/csiostor/csio_mb.c b/drivers/scsi/csiostor/csio_mb.c
new file mode 100644
index 000000000..9451787ca
--- /dev/null
+++ b/drivers/scsi/csiostor/csio_mb.c
@@ -0,0 +1,1676 @@
+/*
+ * This file is part of the Chelsio FCoE driver for Linux.
+ *
+ * Copyright (c) 2008-2012 Chelsio Communications, Inc. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include <linux/delay.h>
+#include <linux/jiffies.h>
+#include <linux/string.h>
+#include <scsi/scsi_device.h>
+#include <scsi/scsi_transport_fc.h>
+
+#include "csio_hw.h"
+#include "csio_lnode.h"
+#include "csio_rnode.h"
+#include "csio_mb.h"
+#include "csio_wr.h"
+
+#define csio_mb_is_host_owner(__owner) ((__owner) == CSIO_MBOWNER_PL)
+
+/* MB Command/Response Helpers */
+/*
+ * csio_mb_fw_retval - FW return value from a mailbox response.
+ * @mbp: Mailbox structure
+ *
+ */
+enum fw_retval
+csio_mb_fw_retval(struct csio_mb *mbp)
+{
+ struct fw_cmd_hdr *hdr;
+
+ hdr = (struct fw_cmd_hdr *)(mbp->mb);
+
+ return FW_CMD_RETVAL_G(ntohl(hdr->lo));
+}
+
+/*
+ * csio_mb_hello - FW HELLO command helper
+ * @hw: The HW structure
+ * @mbp: Mailbox structure
+ * @m_mbox: Master mailbox number, if any.
+ * @a_mbox: Mailbox number for asycn notifications.
+ * @master: Device mastership.
+ * @cbfn: Callback, if any.
+ *
+ */
+void
+csio_mb_hello(struct csio_hw *hw, struct csio_mb *mbp, uint32_t tmo,
+ uint32_t m_mbox, uint32_t a_mbox, enum csio_dev_master master,
+ void (*cbfn) (struct csio_hw *, struct csio_mb *))
+{
+ struct fw_hello_cmd *cmdp = (struct fw_hello_cmd *)(mbp->mb);
+
+ CSIO_INIT_MBP(mbp, cmdp, tmo, hw, cbfn, 1);
+
+ cmdp->op_to_write = htonl(FW_CMD_OP_V(FW_HELLO_CMD) |
+ FW_CMD_REQUEST_F | FW_CMD_WRITE_F);
+ cmdp->retval_len16 = htonl(FW_CMD_LEN16_V(sizeof(*cmdp) / 16));
+ cmdp->err_to_clearinit = htonl(
+ FW_HELLO_CMD_MASTERDIS_V(master == CSIO_MASTER_CANT) |
+ FW_HELLO_CMD_MASTERFORCE_V(master == CSIO_MASTER_MUST) |
+ FW_HELLO_CMD_MBMASTER_V(master == CSIO_MASTER_MUST ?
+ m_mbox : FW_HELLO_CMD_MBMASTER_M) |
+ FW_HELLO_CMD_MBASYNCNOT_V(a_mbox) |
+ FW_HELLO_CMD_STAGE_V(fw_hello_cmd_stage_os) |
+ FW_HELLO_CMD_CLEARINIT_F);
+
+}
+
+/*
+ * csio_mb_process_hello_rsp - FW HELLO response processing helper
+ * @hw: The HW structure
+ * @mbp: Mailbox structure
+ * @retval: Mailbox return value from Firmware
+ * @state: State that the function is in.
+ * @mpfn: Master pfn
+ *
+ */
+void
+csio_mb_process_hello_rsp(struct csio_hw *hw, struct csio_mb *mbp,
+ enum fw_retval *retval, enum csio_dev_state *state,
+ uint8_t *mpfn)
+{
+ struct fw_hello_cmd *rsp = (struct fw_hello_cmd *)(mbp->mb);
+ uint32_t value;
+
+ *retval = FW_CMD_RETVAL_G(ntohl(rsp->retval_len16));
+
+ if (*retval == FW_SUCCESS) {
+ hw->fwrev = ntohl(rsp->fwrev);
+
+ value = ntohl(rsp->err_to_clearinit);
+ *mpfn = FW_HELLO_CMD_MBMASTER_G(value);
+
+ if (value & FW_HELLO_CMD_INIT_F)
+ *state = CSIO_DEV_STATE_INIT;
+ else if (value & FW_HELLO_CMD_ERR_F)
+ *state = CSIO_DEV_STATE_ERR;
+ else
+ *state = CSIO_DEV_STATE_UNINIT;
+ }
+}
+
+/*
+ * csio_mb_bye - FW BYE command helper
+ * @hw: The HW structure
+ * @mbp: Mailbox structure
+ * @cbfn: Callback, if any.
+ *
+ */
+void
+csio_mb_bye(struct csio_hw *hw, struct csio_mb *mbp, uint32_t tmo,
+ void (*cbfn) (struct csio_hw *, struct csio_mb *))
+{
+ struct fw_bye_cmd *cmdp = (struct fw_bye_cmd *)(mbp->mb);
+
+ CSIO_INIT_MBP(mbp, cmdp, tmo, hw, cbfn, 1);
+
+ cmdp->op_to_write = htonl(FW_CMD_OP_V(FW_BYE_CMD) |
+ FW_CMD_REQUEST_F | FW_CMD_WRITE_F);
+ cmdp->retval_len16 = htonl(FW_CMD_LEN16_V(sizeof(*cmdp) / 16));
+
+}
+
+/*
+ * csio_mb_reset - FW RESET command helper
+ * @hw: The HW structure
+ * @mbp: Mailbox structure
+ * @reset: Type of reset.
+ * @cbfn: Callback, if any.
+ *
+ */
+void
+csio_mb_reset(struct csio_hw *hw, struct csio_mb *mbp, uint32_t tmo,
+ int reset, int halt,
+ void (*cbfn) (struct csio_hw *, struct csio_mb *))
+{
+ struct fw_reset_cmd *cmdp = (struct fw_reset_cmd *)(mbp->mb);
+
+ CSIO_INIT_MBP(mbp, cmdp, tmo, hw, cbfn, 1);
+
+ cmdp->op_to_write = htonl(FW_CMD_OP_V(FW_RESET_CMD) |
+ FW_CMD_REQUEST_F | FW_CMD_WRITE_F);
+ cmdp->retval_len16 = htonl(FW_CMD_LEN16_V(sizeof(*cmdp) / 16));
+ cmdp->val = htonl(reset);
+ cmdp->halt_pkd = htonl(halt);
+
+}
+
+/*
+ * csio_mb_params - FW PARAMS command helper
+ * @hw: The HW structure
+ * @mbp: Mailbox structure
+ * @tmo: Command timeout.
+ * @pf: PF number.
+ * @vf: VF number.
+ * @nparams: Number of parameters
+ * @params: Parameter mnemonic array.
+ * @val: Parameter value array.
+ * @wr: Write/Read PARAMS.
+ * @cbfn: Callback, if any.
+ *
+ */
+void
+csio_mb_params(struct csio_hw *hw, struct csio_mb *mbp, uint32_t tmo,
+ unsigned int pf, unsigned int vf, unsigned int nparams,
+ const u32 *params, u32 *val, bool wr,
+ void (*cbfn)(struct csio_hw *, struct csio_mb *))
+{
+ uint32_t i;
+ uint32_t temp_params = 0, temp_val = 0;
+ struct fw_params_cmd *cmdp = (struct fw_params_cmd *)(mbp->mb);
+ __be32 *p = &cmdp->param[0].mnem;
+
+ CSIO_INIT_MBP(mbp, cmdp, tmo, hw, cbfn, 1);
+
+ cmdp->op_to_vfn = htonl(FW_CMD_OP_V(FW_PARAMS_CMD) |
+ FW_CMD_REQUEST_F |
+ (wr ? FW_CMD_WRITE_F : FW_CMD_READ_F) |
+ FW_PARAMS_CMD_PFN_V(pf) |
+ FW_PARAMS_CMD_VFN_V(vf));
+ cmdp->retval_len16 = htonl(FW_CMD_LEN16_V(sizeof(*cmdp) / 16));
+
+ /* Write Params */
+ if (wr) {
+ while (nparams--) {
+ temp_params = *params++;
+ temp_val = *val++;
+
+ *p++ = htonl(temp_params);
+ *p++ = htonl(temp_val);
+ }
+ } else {
+ for (i = 0; i < nparams; i++, p += 2) {
+ temp_params = *params++;
+ *p = htonl(temp_params);
+ }
+ }
+
+}
+
+/*
+ * csio_mb_process_read_params_rsp - FW PARAMS response processing helper
+ * @hw: The HW structure
+ * @mbp: Mailbox structure
+ * @retval: Mailbox return value from Firmware
+ * @nparams: Number of parameters
+ * @val: Parameter value array.
+ *
+ */
+void
+csio_mb_process_read_params_rsp(struct csio_hw *hw, struct csio_mb *mbp,
+ enum fw_retval *retval, unsigned int nparams,
+ u32 *val)
+{
+ struct fw_params_cmd *rsp = (struct fw_params_cmd *)(mbp->mb);
+ uint32_t i;
+ __be32 *p = &rsp->param[0].val;
+
+ *retval = FW_CMD_RETVAL_G(ntohl(rsp->retval_len16));
+
+ if (*retval == FW_SUCCESS)
+ for (i = 0; i < nparams; i++, p += 2)
+ *val++ = ntohl(*p);
+}
+
+/*
+ * csio_mb_ldst - FW LDST command
+ * @hw: The HW structure
+ * @mbp: Mailbox structure
+ * @tmo: timeout
+ * @reg: register
+ *
+ */
+void
+csio_mb_ldst(struct csio_hw *hw, struct csio_mb *mbp, uint32_t tmo, int reg)
+{
+ struct fw_ldst_cmd *ldst_cmd = (struct fw_ldst_cmd *)(mbp->mb);
+ CSIO_INIT_MBP(mbp, ldst_cmd, tmo, hw, NULL, 1);
+
+ /*
+ * Construct and send the Firmware LDST Command to retrieve the
+ * specified PCI-E Configuration Space register.
+ */
+ ldst_cmd->op_to_addrspace =
+ htonl(FW_CMD_OP_V(FW_LDST_CMD) |
+ FW_CMD_REQUEST_F |
+ FW_CMD_READ_F |
+ FW_LDST_CMD_ADDRSPACE_V(FW_LDST_ADDRSPC_FUNC_PCIE));
+ ldst_cmd->cycles_to_len16 = htonl(FW_LEN16(struct fw_ldst_cmd));
+ ldst_cmd->u.pcie.select_naccess = FW_LDST_CMD_NACCESS_V(1);
+ ldst_cmd->u.pcie.ctrl_to_fn =
+ (FW_LDST_CMD_LC_F | FW_LDST_CMD_FN_V(hw->pfn));
+ ldst_cmd->u.pcie.r = (uint8_t)reg;
+}
+
+/*
+ *
+ * csio_mb_caps_config - FW Read/Write Capabilities command helper
+ * @hw: The HW structure
+ * @mbp: Mailbox structure
+ * @wr: Write if 1, Read if 0
+ * @init: Turn on initiator mode.
+ * @tgt: Turn on target mode.
+ * @cofld: If 1, Control Offload for FCoE
+ * @cbfn: Callback, if any.
+ *
+ * This helper assumes that cmdp has MB payload from a previous CAPS
+ * read command.
+ */
+void
+csio_mb_caps_config(struct csio_hw *hw, struct csio_mb *mbp, uint32_t tmo,
+ bool wr, bool init, bool tgt, bool cofld,
+ void (*cbfn) (struct csio_hw *, struct csio_mb *))
+{
+ struct fw_caps_config_cmd *cmdp =
+ (struct fw_caps_config_cmd *)(mbp->mb);
+
+ CSIO_INIT_MBP(mbp, cmdp, tmo, hw, cbfn, wr ? 0 : 1);
+
+ cmdp->op_to_write = htonl(FW_CMD_OP_V(FW_CAPS_CONFIG_CMD) |
+ FW_CMD_REQUEST_F |
+ (wr ? FW_CMD_WRITE_F : FW_CMD_READ_F));
+ cmdp->cfvalid_to_len16 = htonl(FW_CMD_LEN16_V(sizeof(*cmdp) / 16));
+
+ /* Read config */
+ if (!wr)
+ return;
+
+ /* Write config */
+ cmdp->fcoecaps = 0;
+
+ if (cofld)
+ cmdp->fcoecaps |= htons(FW_CAPS_CONFIG_FCOE_CTRL_OFLD);
+ if (init)
+ cmdp->fcoecaps |= htons(FW_CAPS_CONFIG_FCOE_INITIATOR);
+ if (tgt)
+ cmdp->fcoecaps |= htons(FW_CAPS_CONFIG_FCOE_TARGET);
+}
+
+#define CSIO_ADVERT_MASK (FW_PORT_CAP_SPEED_100M | FW_PORT_CAP_SPEED_1G |\
+ FW_PORT_CAP_SPEED_10G | FW_PORT_CAP_SPEED_40G |\
+ FW_PORT_CAP_ANEG)
+
+/*
+ * csio_mb_port- FW PORT command helper
+ * @hw: The HW structure
+ * @mbp: Mailbox structure
+ * @tmo: COmmand timeout
+ * @portid: Port ID to get/set info
+ * @wr: Write/Read PORT information.
+ * @fc: Flow control
+ * @caps: Port capabilites to set.
+ * @cbfn: Callback, if any.
+ *
+ */
+void
+csio_mb_port(struct csio_hw *hw, struct csio_mb *mbp, uint32_t tmo,
+ uint8_t portid, bool wr, uint32_t fc, uint16_t caps,
+ void (*cbfn) (struct csio_hw *, struct csio_mb *))
+{
+ struct fw_port_cmd *cmdp = (struct fw_port_cmd *)(mbp->mb);
+ unsigned int lfc = 0, mdi = FW_PORT_CAP_MDI_V(FW_PORT_CAP_MDI_AUTO);
+
+ CSIO_INIT_MBP(mbp, cmdp, tmo, hw, cbfn, 1);
+
+ cmdp->op_to_portid = htonl(FW_CMD_OP_V(FW_PORT_CMD) |
+ FW_CMD_REQUEST_F |
+ (wr ? FW_CMD_EXEC_F : FW_CMD_READ_F) |
+ FW_PORT_CMD_PORTID_V(portid));
+ if (!wr) {
+ cmdp->action_to_len16 = htonl(
+ FW_PORT_CMD_ACTION_V(FW_PORT_ACTION_GET_PORT_INFO) |
+ FW_CMD_LEN16_V(sizeof(*cmdp) / 16));
+ return;
+ }
+
+ /* Set port */
+ cmdp->action_to_len16 = htonl(
+ FW_PORT_CMD_ACTION_V(FW_PORT_ACTION_L1_CFG) |
+ FW_CMD_LEN16_V(sizeof(*cmdp) / 16));
+
+ if (fc & PAUSE_RX)
+ lfc |= FW_PORT_CAP_FC_RX;
+ if (fc & PAUSE_TX)
+ lfc |= FW_PORT_CAP_FC_TX;
+
+ if (!(caps & FW_PORT_CAP_ANEG))
+ cmdp->u.l1cfg.rcap = htonl((caps & CSIO_ADVERT_MASK) | lfc);
+ else
+ cmdp->u.l1cfg.rcap = htonl((caps & CSIO_ADVERT_MASK) |
+ lfc | mdi);
+}
+
+/*
+ * csio_mb_process_read_port_rsp - FW PORT command response processing helper
+ * @hw: The HW structure
+ * @mbp: Mailbox structure
+ * @retval: Mailbox return value from Firmware
+ * @caps: port capabilities
+ *
+ */
+void
+csio_mb_process_read_port_rsp(struct csio_hw *hw, struct csio_mb *mbp,
+ enum fw_retval *retval, uint16_t *caps)
+{
+ struct fw_port_cmd *rsp = (struct fw_port_cmd *)(mbp->mb);
+
+ *retval = FW_CMD_RETVAL_G(ntohl(rsp->action_to_len16));
+
+ if (*retval == FW_SUCCESS)
+ *caps = ntohs(rsp->u.info.pcap);
+}
+
+/*
+ * csio_mb_initialize - FW INITIALIZE command helper
+ * @hw: The HW structure
+ * @mbp: Mailbox structure
+ * @tmo: COmmand timeout
+ * @cbfn: Callback, if any.
+ *
+ */
+void
+csio_mb_initialize(struct csio_hw *hw, struct csio_mb *mbp, uint32_t tmo,
+ void (*cbfn) (struct csio_hw *, struct csio_mb *))
+{
+ struct fw_initialize_cmd *cmdp = (struct fw_initialize_cmd *)(mbp->mb);
+
+ CSIO_INIT_MBP(mbp, cmdp, tmo, hw, cbfn, 1);
+
+ cmdp->op_to_write = htonl(FW_CMD_OP_V(FW_INITIALIZE_CMD) |
+ FW_CMD_REQUEST_F | FW_CMD_WRITE_F);
+ cmdp->retval_len16 = htonl(FW_CMD_LEN16_V(sizeof(*cmdp) / 16));
+
+}
+
+/*
+ * csio_mb_iq_alloc - Initializes the mailbox to allocate an
+ * Ingress DMA queue in the firmware.
+ *
+ * @hw: The hw structure
+ * @mbp: Mailbox structure to initialize
+ * @priv: Private object
+ * @mb_tmo: Mailbox time-out period (in ms).
+ * @iq_params: Ingress queue params needed for allocation.
+ * @cbfn: The call-back function
+ *
+ *
+ */
+static void
+csio_mb_iq_alloc(struct csio_hw *hw, struct csio_mb *mbp, void *priv,
+ uint32_t mb_tmo, struct csio_iq_params *iq_params,
+ void (*cbfn) (struct csio_hw *, struct csio_mb *))
+{
+ struct fw_iq_cmd *cmdp = (struct fw_iq_cmd *)(mbp->mb);
+
+ CSIO_INIT_MBP(mbp, cmdp, mb_tmo, priv, cbfn, 1);
+
+ cmdp->op_to_vfn = htonl(FW_CMD_OP_V(FW_IQ_CMD) |
+ FW_CMD_REQUEST_F | FW_CMD_EXEC_F |
+ FW_IQ_CMD_PFN_V(iq_params->pfn) |
+ FW_IQ_CMD_VFN_V(iq_params->vfn));
+
+ cmdp->alloc_to_len16 = htonl(FW_IQ_CMD_ALLOC_F |
+ FW_CMD_LEN16_V(sizeof(*cmdp) / 16));
+
+ cmdp->type_to_iqandstindex = htonl(
+ FW_IQ_CMD_VIID_V(iq_params->viid) |
+ FW_IQ_CMD_TYPE_V(iq_params->type) |
+ FW_IQ_CMD_IQASYNCH_V(iq_params->iqasynch));
+
+ cmdp->fl0size = htons(iq_params->fl0size);
+ cmdp->fl0size = htons(iq_params->fl1size);
+
+} /* csio_mb_iq_alloc */
+
+/*
+ * csio_mb_iq_write - Initializes the mailbox for writing into an
+ * Ingress DMA Queue.
+ *
+ * @hw: The HW structure
+ * @mbp: Mailbox structure to initialize
+ * @priv: Private object
+ * @mb_tmo: Mailbox time-out period (in ms).
+ * @cascaded_req: TRUE - if this request is cascased with iq-alloc request.
+ * @iq_params: Ingress queue params needed for writing.
+ * @cbfn: The call-back function
+ *
+ * NOTE: We OR relevant bits with cmdp->XXX, instead of just equating,
+ * because this IQ write request can be cascaded with a previous
+ * IQ alloc request, and we dont want to over-write the bits set by
+ * that request. This logic will work even in a non-cascaded case, since the
+ * cmdp structure is zeroed out by CSIO_INIT_MBP.
+ */
+static void
+csio_mb_iq_write(struct csio_hw *hw, struct csio_mb *mbp, void *priv,
+ uint32_t mb_tmo, bool cascaded_req,
+ struct csio_iq_params *iq_params,
+ void (*cbfn) (struct csio_hw *, struct csio_mb *))
+{
+ struct fw_iq_cmd *cmdp = (struct fw_iq_cmd *)(mbp->mb);
+
+ uint32_t iq_start_stop = (iq_params->iq_start) ?
+ FW_IQ_CMD_IQSTART_F :
+ FW_IQ_CMD_IQSTOP_F;
+
+ /*
+ * If this IQ write is cascaded with IQ alloc request, do not
+ * re-initialize with 0's.
+ *
+ */
+ if (!cascaded_req)
+ CSIO_INIT_MBP(mbp, cmdp, mb_tmo, priv, cbfn, 1);
+
+ cmdp->op_to_vfn |= htonl(FW_CMD_OP_V(FW_IQ_CMD) |
+ FW_CMD_REQUEST_F | FW_CMD_WRITE_F |
+ FW_IQ_CMD_PFN_V(iq_params->pfn) |
+ FW_IQ_CMD_VFN_V(iq_params->vfn));
+ cmdp->alloc_to_len16 |= htonl(iq_start_stop |
+ FW_CMD_LEN16_V(sizeof(*cmdp) / 16));
+ cmdp->iqid |= htons(iq_params->iqid);
+ cmdp->fl0id |= htons(iq_params->fl0id);
+ cmdp->fl1id |= htons(iq_params->fl1id);
+ cmdp->type_to_iqandstindex |= htonl(
+ FW_IQ_CMD_IQANDST_V(iq_params->iqandst) |
+ FW_IQ_CMD_IQANUS_V(iq_params->iqanus) |
+ FW_IQ_CMD_IQANUD_V(iq_params->iqanud) |
+ FW_IQ_CMD_IQANDSTINDEX_V(iq_params->iqandstindex));
+ cmdp->iqdroprss_to_iqesize |= htons(
+ FW_IQ_CMD_IQPCIECH_V(iq_params->iqpciech) |
+ FW_IQ_CMD_IQDCAEN_V(iq_params->iqdcaen) |
+ FW_IQ_CMD_IQDCACPU_V(iq_params->iqdcacpu) |
+ FW_IQ_CMD_IQINTCNTTHRESH_V(iq_params->iqintcntthresh) |
+ FW_IQ_CMD_IQCPRIO_V(iq_params->iqcprio) |
+ FW_IQ_CMD_IQESIZE_V(iq_params->iqesize));
+
+ cmdp->iqsize |= htons(iq_params->iqsize);
+ cmdp->iqaddr |= cpu_to_be64(iq_params->iqaddr);
+
+ if (iq_params->type == 0) {
+ cmdp->iqns_to_fl0congen |= htonl(
+ FW_IQ_CMD_IQFLINTIQHSEN_V(iq_params->iqflintiqhsen)|
+ FW_IQ_CMD_IQFLINTCONGEN_V(iq_params->iqflintcongen));
+ }
+
+ if (iq_params->fl0size && iq_params->fl0addr &&
+ (iq_params->fl0id != 0xFFFF)) {
+
+ cmdp->iqns_to_fl0congen |= htonl(
+ FW_IQ_CMD_FL0HOSTFCMODE_V(iq_params->fl0hostfcmode)|
+ FW_IQ_CMD_FL0CPRIO_V(iq_params->fl0cprio) |
+ FW_IQ_CMD_FL0PADEN_V(iq_params->fl0paden) |
+ FW_IQ_CMD_FL0PACKEN_V(iq_params->fl0packen));
+ cmdp->fl0dcaen_to_fl0cidxfthresh |= htons(
+ FW_IQ_CMD_FL0DCAEN_V(iq_params->fl0dcaen) |
+ FW_IQ_CMD_FL0DCACPU_V(iq_params->fl0dcacpu) |
+ FW_IQ_CMD_FL0FBMIN_V(iq_params->fl0fbmin) |
+ FW_IQ_CMD_FL0FBMAX_V(iq_params->fl0fbmax) |
+ FW_IQ_CMD_FL0CIDXFTHRESH_V(iq_params->fl0cidxfthresh));
+ cmdp->fl0size |= htons(iq_params->fl0size);
+ cmdp->fl0addr |= cpu_to_be64(iq_params->fl0addr);
+ }
+} /* csio_mb_iq_write */
+
+/*
+ * csio_mb_iq_alloc_write - Initializes the mailbox for allocating an
+ * Ingress DMA Queue.
+ *
+ * @hw: The HW structure
+ * @mbp: Mailbox structure to initialize
+ * @priv: Private data.
+ * @mb_tmo: Mailbox time-out period (in ms).
+ * @iq_params: Ingress queue params needed for allocation & writing.
+ * @cbfn: The call-back function
+ *
+ *
+ */
+void
+csio_mb_iq_alloc_write(struct csio_hw *hw, struct csio_mb *mbp, void *priv,
+ uint32_t mb_tmo, struct csio_iq_params *iq_params,
+ void (*cbfn) (struct csio_hw *, struct csio_mb *))
+{
+ csio_mb_iq_alloc(hw, mbp, priv, mb_tmo, iq_params, cbfn);
+ csio_mb_iq_write(hw, mbp, priv, mb_tmo, true, iq_params, cbfn);
+} /* csio_mb_iq_alloc_write */
+
+/*
+ * csio_mb_iq_alloc_write_rsp - Process the allocation & writing
+ * of ingress DMA queue mailbox's response.
+ *
+ * @hw: The HW structure.
+ * @mbp: Mailbox structure to initialize.
+ * @retval: Firmware return value.
+ * @iq_params: Ingress queue parameters, after allocation and write.
+ *
+ */
+void
+csio_mb_iq_alloc_write_rsp(struct csio_hw *hw, struct csio_mb *mbp,
+ enum fw_retval *ret_val,
+ struct csio_iq_params *iq_params)
+{
+ struct fw_iq_cmd *rsp = (struct fw_iq_cmd *)(mbp->mb);
+
+ *ret_val = FW_CMD_RETVAL_G(ntohl(rsp->alloc_to_len16));
+ if (*ret_val == FW_SUCCESS) {
+ iq_params->physiqid = ntohs(rsp->physiqid);
+ iq_params->iqid = ntohs(rsp->iqid);
+ iq_params->fl0id = ntohs(rsp->fl0id);
+ iq_params->fl1id = ntohs(rsp->fl1id);
+ } else {
+ iq_params->physiqid = iq_params->iqid =
+ iq_params->fl0id = iq_params->fl1id = 0;
+ }
+} /* csio_mb_iq_alloc_write_rsp */
+
+/*
+ * csio_mb_iq_free - Initializes the mailbox for freeing a
+ * specified Ingress DMA Queue.
+ *
+ * @hw: The HW structure
+ * @mbp: Mailbox structure to initialize
+ * @priv: Private data
+ * @mb_tmo: Mailbox time-out period (in ms).
+ * @iq_params: Parameters of ingress queue, that is to be freed.
+ * @cbfn: The call-back function
+ *
+ *
+ */
+void
+csio_mb_iq_free(struct csio_hw *hw, struct csio_mb *mbp, void *priv,
+ uint32_t mb_tmo, struct csio_iq_params *iq_params,
+ void (*cbfn) (struct csio_hw *, struct csio_mb *))
+{
+ struct fw_iq_cmd *cmdp = (struct fw_iq_cmd *)(mbp->mb);
+
+ CSIO_INIT_MBP(mbp, cmdp, mb_tmo, priv, cbfn, 1);
+
+ cmdp->op_to_vfn = htonl(FW_CMD_OP_V(FW_IQ_CMD) |
+ FW_CMD_REQUEST_F | FW_CMD_EXEC_F |
+ FW_IQ_CMD_PFN_V(iq_params->pfn) |
+ FW_IQ_CMD_VFN_V(iq_params->vfn));
+ cmdp->alloc_to_len16 = htonl(FW_IQ_CMD_FREE_F |
+ FW_CMD_LEN16_V(sizeof(*cmdp) / 16));
+ cmdp->type_to_iqandstindex = htonl(FW_IQ_CMD_TYPE_V(iq_params->type));
+
+ cmdp->iqid = htons(iq_params->iqid);
+ cmdp->fl0id = htons(iq_params->fl0id);
+ cmdp->fl1id = htons(iq_params->fl1id);
+
+} /* csio_mb_iq_free */
+
+/*
+ * csio_mb_eq_ofld_alloc - Initializes the mailbox for allocating
+ * an offload-egress queue.
+ *
+ * @hw: The HW structure
+ * @mbp: Mailbox structure to initialize
+ * @priv: Private data
+ * @mb_tmo: Mailbox time-out period (in ms).
+ * @eq_ofld_params: (Offload) Egress queue parameters.
+ * @cbfn: The call-back function
+ *
+ *
+ */
+static void
+csio_mb_eq_ofld_alloc(struct csio_hw *hw, struct csio_mb *mbp, void *priv,
+ uint32_t mb_tmo, struct csio_eq_params *eq_ofld_params,
+ void (*cbfn) (struct csio_hw *, struct csio_mb *))
+{
+ struct fw_eq_ofld_cmd *cmdp = (struct fw_eq_ofld_cmd *)(mbp->mb);
+
+ CSIO_INIT_MBP(mbp, cmdp, mb_tmo, priv, cbfn, 1);
+ cmdp->op_to_vfn = htonl(FW_CMD_OP_V(FW_EQ_OFLD_CMD) |
+ FW_CMD_REQUEST_F | FW_CMD_EXEC_F |
+ FW_EQ_OFLD_CMD_PFN_V(eq_ofld_params->pfn) |
+ FW_EQ_OFLD_CMD_VFN_V(eq_ofld_params->vfn));
+ cmdp->alloc_to_len16 = htonl(FW_EQ_OFLD_CMD_ALLOC_F |
+ FW_CMD_LEN16_V(sizeof(*cmdp) / 16));
+
+} /* csio_mb_eq_ofld_alloc */
+
+/*
+ * csio_mb_eq_ofld_write - Initializes the mailbox for writing
+ * an alloacted offload-egress queue.
+ *
+ * @hw: The HW structure
+ * @mbp: Mailbox structure to initialize
+ * @priv: Private data
+ * @mb_tmo: Mailbox time-out period (in ms).
+ * @cascaded_req: TRUE - if this request is cascased with Eq-alloc request.
+ * @eq_ofld_params: (Offload) Egress queue parameters.
+ * @cbfn: The call-back function
+ *
+ *
+ * NOTE: We OR relevant bits with cmdp->XXX, instead of just equating,
+ * because this EQ write request can be cascaded with a previous
+ * EQ alloc request, and we dont want to over-write the bits set by
+ * that request. This logic will work even in a non-cascaded case, since the
+ * cmdp structure is zeroed out by CSIO_INIT_MBP.
+ */
+static void
+csio_mb_eq_ofld_write(struct csio_hw *hw, struct csio_mb *mbp, void *priv,
+ uint32_t mb_tmo, bool cascaded_req,
+ struct csio_eq_params *eq_ofld_params,
+ void (*cbfn) (struct csio_hw *, struct csio_mb *))
+{
+ struct fw_eq_ofld_cmd *cmdp = (struct fw_eq_ofld_cmd *)(mbp->mb);
+
+ uint32_t eq_start_stop = (eq_ofld_params->eqstart) ?
+ FW_EQ_OFLD_CMD_EQSTART_F :
+ FW_EQ_OFLD_CMD_EQSTOP_F;
+
+ /*
+ * If this EQ write is cascaded with EQ alloc request, do not
+ * re-initialize with 0's.
+ *
+ */
+ if (!cascaded_req)
+ CSIO_INIT_MBP(mbp, cmdp, mb_tmo, priv, cbfn, 1);
+
+ cmdp->op_to_vfn |= htonl(FW_CMD_OP_V(FW_EQ_OFLD_CMD) |
+ FW_CMD_REQUEST_F | FW_CMD_WRITE_F |
+ FW_EQ_OFLD_CMD_PFN_V(eq_ofld_params->pfn) |
+ FW_EQ_OFLD_CMD_VFN_V(eq_ofld_params->vfn));
+ cmdp->alloc_to_len16 |= htonl(eq_start_stop |
+ FW_CMD_LEN16_V(sizeof(*cmdp) / 16));
+
+ cmdp->eqid_pkd |= htonl(FW_EQ_OFLD_CMD_EQID_V(eq_ofld_params->eqid));
+
+ cmdp->fetchszm_to_iqid |= htonl(
+ FW_EQ_OFLD_CMD_HOSTFCMODE_V(eq_ofld_params->hostfcmode) |
+ FW_EQ_OFLD_CMD_CPRIO_V(eq_ofld_params->cprio) |
+ FW_EQ_OFLD_CMD_PCIECHN_V(eq_ofld_params->pciechn) |
+ FW_EQ_OFLD_CMD_IQID_V(eq_ofld_params->iqid));
+
+ cmdp->dcaen_to_eqsize |= htonl(
+ FW_EQ_OFLD_CMD_DCAEN_V(eq_ofld_params->dcaen) |
+ FW_EQ_OFLD_CMD_DCACPU_V(eq_ofld_params->dcacpu) |
+ FW_EQ_OFLD_CMD_FBMIN_V(eq_ofld_params->fbmin) |
+ FW_EQ_OFLD_CMD_FBMAX_V(eq_ofld_params->fbmax) |
+ FW_EQ_OFLD_CMD_CIDXFTHRESHO_V(eq_ofld_params->cidxfthresho) |
+ FW_EQ_OFLD_CMD_CIDXFTHRESH_V(eq_ofld_params->cidxfthresh) |
+ FW_EQ_OFLD_CMD_EQSIZE_V(eq_ofld_params->eqsize));
+
+ cmdp->eqaddr |= cpu_to_be64(eq_ofld_params->eqaddr);
+
+} /* csio_mb_eq_ofld_write */
+
+/*
+ * csio_mb_eq_ofld_alloc_write - Initializes the mailbox for allocation
+ * writing into an Engress DMA Queue.
+ *
+ * @hw: The HW structure
+ * @mbp: Mailbox structure to initialize
+ * @priv: Private data.
+ * @mb_tmo: Mailbox time-out period (in ms).
+ * @eq_ofld_params: (Offload) Egress queue parameters.
+ * @cbfn: The call-back function
+ *
+ *
+ */
+void
+csio_mb_eq_ofld_alloc_write(struct csio_hw *hw, struct csio_mb *mbp,
+ void *priv, uint32_t mb_tmo,
+ struct csio_eq_params *eq_ofld_params,
+ void (*cbfn) (struct csio_hw *, struct csio_mb *))
+{
+ csio_mb_eq_ofld_alloc(hw, mbp, priv, mb_tmo, eq_ofld_params, cbfn);
+ csio_mb_eq_ofld_write(hw, mbp, priv, mb_tmo, true,
+ eq_ofld_params, cbfn);
+} /* csio_mb_eq_ofld_alloc_write */
+
+/*
+ * csio_mb_eq_ofld_alloc_write_rsp - Process the allocation
+ * & write egress DMA queue mailbox's response.
+ *
+ * @hw: The HW structure.
+ * @mbp: Mailbox structure to initialize.
+ * @retval: Firmware return value.
+ * @eq_ofld_params: (Offload) Egress queue parameters.
+ *
+ */
+void
+csio_mb_eq_ofld_alloc_write_rsp(struct csio_hw *hw,
+ struct csio_mb *mbp, enum fw_retval *ret_val,
+ struct csio_eq_params *eq_ofld_params)
+{
+ struct fw_eq_ofld_cmd *rsp = (struct fw_eq_ofld_cmd *)(mbp->mb);
+
+ *ret_val = FW_CMD_RETVAL_G(ntohl(rsp->alloc_to_len16));
+
+ if (*ret_val == FW_SUCCESS) {
+ eq_ofld_params->eqid = FW_EQ_OFLD_CMD_EQID_G(
+ ntohl(rsp->eqid_pkd));
+ eq_ofld_params->physeqid = FW_EQ_OFLD_CMD_PHYSEQID_G(
+ ntohl(rsp->physeqid_pkd));
+ } else
+ eq_ofld_params->eqid = 0;
+
+} /* csio_mb_eq_ofld_alloc_write_rsp */
+
+/*
+ * csio_mb_eq_ofld_free - Initializes the mailbox for freeing a
+ * specified Engress DMA Queue.
+ *
+ * @hw: The HW structure
+ * @mbp: Mailbox structure to initialize
+ * @priv: Private data area.
+ * @mb_tmo: Mailbox time-out period (in ms).
+ * @eq_ofld_params: (Offload) Egress queue parameters, that is to be freed.
+ * @cbfn: The call-back function
+ *
+ *
+ */
+void
+csio_mb_eq_ofld_free(struct csio_hw *hw, struct csio_mb *mbp, void *priv,
+ uint32_t mb_tmo, struct csio_eq_params *eq_ofld_params,
+ void (*cbfn) (struct csio_hw *, struct csio_mb *))
+{
+ struct fw_eq_ofld_cmd *cmdp = (struct fw_eq_ofld_cmd *)(mbp->mb);
+
+ CSIO_INIT_MBP(mbp, cmdp, mb_tmo, priv, cbfn, 1);
+
+ cmdp->op_to_vfn = htonl(FW_CMD_OP_V(FW_EQ_OFLD_CMD) |
+ FW_CMD_REQUEST_F | FW_CMD_EXEC_F |
+ FW_EQ_OFLD_CMD_PFN_V(eq_ofld_params->pfn) |
+ FW_EQ_OFLD_CMD_VFN_V(eq_ofld_params->vfn));
+ cmdp->alloc_to_len16 = htonl(FW_EQ_OFLD_CMD_FREE_F |
+ FW_CMD_LEN16_V(sizeof(*cmdp) / 16));
+ cmdp->eqid_pkd = htonl(FW_EQ_OFLD_CMD_EQID_V(eq_ofld_params->eqid));
+
+} /* csio_mb_eq_ofld_free */
+
+/*
+ * csio_write_fcoe_link_cond_init_mb - Initialize Mailbox to write FCoE link
+ * condition.
+ *
+ * @ln: The Lnode structure
+ * @mbp: Mailbox structure to initialize
+ * @mb_tmo: Mailbox time-out period (in ms).
+ * @cbfn: The call back function.
+ *
+ *
+ */
+void
+csio_write_fcoe_link_cond_init_mb(struct csio_lnode *ln, struct csio_mb *mbp,
+ uint32_t mb_tmo, uint8_t port_id, uint32_t sub_opcode,
+ uint8_t cos, bool link_status, uint32_t fcfi,
+ void (*cbfn) (struct csio_hw *, struct csio_mb *))
+{
+ struct fw_fcoe_link_cmd *cmdp =
+ (struct fw_fcoe_link_cmd *)(mbp->mb);
+
+ CSIO_INIT_MBP(mbp, cmdp, mb_tmo, ln, cbfn, 1);
+
+ cmdp->op_to_portid = htonl((
+ FW_CMD_OP_V(FW_FCOE_LINK_CMD) |
+ FW_CMD_REQUEST_F |
+ FW_CMD_WRITE_F |
+ FW_FCOE_LINK_CMD_PORTID(port_id)));
+ cmdp->sub_opcode_fcfi = htonl(
+ FW_FCOE_LINK_CMD_SUB_OPCODE(sub_opcode) |
+ FW_FCOE_LINK_CMD_FCFI(fcfi));
+ cmdp->lstatus = link_status;
+ cmdp->retval_len16 = htonl(FW_CMD_LEN16_V(sizeof(*cmdp) / 16));
+
+} /* csio_write_fcoe_link_cond_init_mb */
+
+/*
+ * csio_fcoe_read_res_info_init_mb - Initializes the mailbox for reading FCoE
+ * resource information(FW_GET_RES_INFO_CMD).
+ *
+ * @hw: The HW structure
+ * @mbp: Mailbox structure to initialize
+ * @mb_tmo: Mailbox time-out period (in ms).
+ * @cbfn: The call-back function
+ *
+ *
+ */
+void
+csio_fcoe_read_res_info_init_mb(struct csio_hw *hw, struct csio_mb *mbp,
+ uint32_t mb_tmo,
+ void (*cbfn) (struct csio_hw *, struct csio_mb *))
+{
+ struct fw_fcoe_res_info_cmd *cmdp =
+ (struct fw_fcoe_res_info_cmd *)(mbp->mb);
+
+ CSIO_INIT_MBP(mbp, cmdp, mb_tmo, hw, cbfn, 1);
+
+ cmdp->op_to_read = htonl((FW_CMD_OP_V(FW_FCOE_RES_INFO_CMD) |
+ FW_CMD_REQUEST_F |
+ FW_CMD_READ_F));
+
+ cmdp->retval_len16 = htonl(FW_CMD_LEN16_V(sizeof(*cmdp) / 16));
+
+} /* csio_fcoe_read_res_info_init_mb */
+
+/*
+ * csio_fcoe_vnp_alloc_init_mb - Initializes the mailbox for allocating VNP
+ * in the firmware (FW_FCOE_VNP_CMD).
+ *
+ * @ln: The Lnode structure.
+ * @mbp: Mailbox structure to initialize.
+ * @mb_tmo: Mailbox time-out period (in ms).
+ * @fcfi: FCF Index.
+ * @vnpi: vnpi
+ * @iqid: iqid
+ * @vnport_wwnn: vnport WWNN
+ * @vnport_wwpn: vnport WWPN
+ * @cbfn: The call-back function.
+ *
+ *
+ */
+void
+csio_fcoe_vnp_alloc_init_mb(struct csio_lnode *ln, struct csio_mb *mbp,
+ uint32_t mb_tmo, uint32_t fcfi, uint32_t vnpi, uint16_t iqid,
+ uint8_t vnport_wwnn[8], uint8_t vnport_wwpn[8],
+ void (*cbfn) (struct csio_hw *, struct csio_mb *))
+{
+ struct fw_fcoe_vnp_cmd *cmdp =
+ (struct fw_fcoe_vnp_cmd *)(mbp->mb);
+
+ CSIO_INIT_MBP(mbp, cmdp, mb_tmo, ln, cbfn, 1);
+
+ cmdp->op_to_fcfi = htonl((FW_CMD_OP_V(FW_FCOE_VNP_CMD) |
+ FW_CMD_REQUEST_F |
+ FW_CMD_EXEC_F |
+ FW_FCOE_VNP_CMD_FCFI(fcfi)));
+
+ cmdp->alloc_to_len16 = htonl(FW_FCOE_VNP_CMD_ALLOC |
+ FW_CMD_LEN16_V(sizeof(*cmdp) / 16));
+
+ cmdp->gen_wwn_to_vnpi = htonl(FW_FCOE_VNP_CMD_VNPI(vnpi));
+
+ cmdp->iqid = htons(iqid);
+
+ if (!wwn_to_u64(vnport_wwnn) && !wwn_to_u64(vnport_wwpn))
+ cmdp->gen_wwn_to_vnpi |= htonl(FW_FCOE_VNP_CMD_GEN_WWN);
+
+ if (vnport_wwnn)
+ memcpy(cmdp->vnport_wwnn, vnport_wwnn, 8);
+ if (vnport_wwpn)
+ memcpy(cmdp->vnport_wwpn, vnport_wwpn, 8);
+
+} /* csio_fcoe_vnp_alloc_init_mb */
+
+/*
+ * csio_fcoe_vnp_read_init_mb - Prepares VNP read cmd.
+ * @ln: The Lnode structure.
+ * @mbp: Mailbox structure to initialize.
+ * @mb_tmo: Mailbox time-out period (in ms).
+ * @fcfi: FCF Index.
+ * @vnpi: vnpi
+ * @cbfn: The call-back handler.
+ */
+void
+csio_fcoe_vnp_read_init_mb(struct csio_lnode *ln, struct csio_mb *mbp,
+ uint32_t mb_tmo, uint32_t fcfi, uint32_t vnpi,
+ void (*cbfn) (struct csio_hw *, struct csio_mb *))
+{
+ struct fw_fcoe_vnp_cmd *cmdp =
+ (struct fw_fcoe_vnp_cmd *)(mbp->mb);
+
+ CSIO_INIT_MBP(mbp, cmdp, mb_tmo, ln, cbfn, 1);
+ cmdp->op_to_fcfi = htonl(FW_CMD_OP_V(FW_FCOE_VNP_CMD) |
+ FW_CMD_REQUEST_F |
+ FW_CMD_READ_F |
+ FW_FCOE_VNP_CMD_FCFI(fcfi));
+ cmdp->alloc_to_len16 = htonl(FW_CMD_LEN16_V(sizeof(*cmdp) / 16));
+ cmdp->gen_wwn_to_vnpi = htonl(FW_FCOE_VNP_CMD_VNPI(vnpi));
+}
+
+/*
+ * csio_fcoe_vnp_free_init_mb - Initializes the mailbox for freeing an
+ * alloacted VNP in the firmware (FW_FCOE_VNP_CMD).
+ *
+ * @ln: The Lnode structure.
+ * @mbp: Mailbox structure to initialize.
+ * @mb_tmo: Mailbox time-out period (in ms).
+ * @fcfi: FCF flow id
+ * @vnpi: VNP flow id
+ * @cbfn: The call-back function.
+ * Return: None
+ */
+void
+csio_fcoe_vnp_free_init_mb(struct csio_lnode *ln, struct csio_mb *mbp,
+ uint32_t mb_tmo, uint32_t fcfi, uint32_t vnpi,
+ void (*cbfn) (struct csio_hw *, struct csio_mb *))
+{
+ struct fw_fcoe_vnp_cmd *cmdp =
+ (struct fw_fcoe_vnp_cmd *)(mbp->mb);
+
+ CSIO_INIT_MBP(mbp, cmdp, mb_tmo, ln, cbfn, 1);
+
+ cmdp->op_to_fcfi = htonl(FW_CMD_OP_V(FW_FCOE_VNP_CMD) |
+ FW_CMD_REQUEST_F |
+ FW_CMD_EXEC_F |
+ FW_FCOE_VNP_CMD_FCFI(fcfi));
+ cmdp->alloc_to_len16 = htonl(FW_FCOE_VNP_CMD_FREE |
+ FW_CMD_LEN16_V(sizeof(*cmdp) / 16));
+ cmdp->gen_wwn_to_vnpi = htonl(FW_FCOE_VNP_CMD_VNPI(vnpi));
+}
+
+/*
+ * csio_fcoe_read_fcf_init_mb - Initializes the mailbox to read the
+ * FCF records.
+ *
+ * @ln: The Lnode structure
+ * @mbp: Mailbox structure to initialize
+ * @mb_tmo: Mailbox time-out period (in ms).
+ * @fcf_params: FC-Forwarder parameters.
+ * @cbfn: The call-back function
+ *
+ *
+ */
+void
+csio_fcoe_read_fcf_init_mb(struct csio_lnode *ln, struct csio_mb *mbp,
+ uint32_t mb_tmo, uint32_t portid, uint32_t fcfi,
+ void (*cbfn) (struct csio_hw *, struct csio_mb *))
+{
+ struct fw_fcoe_fcf_cmd *cmdp =
+ (struct fw_fcoe_fcf_cmd *)(mbp->mb);
+
+ CSIO_INIT_MBP(mbp, cmdp, mb_tmo, ln, cbfn, 1);
+
+ cmdp->op_to_fcfi = htonl(FW_CMD_OP_V(FW_FCOE_FCF_CMD) |
+ FW_CMD_REQUEST_F |
+ FW_CMD_READ_F |
+ FW_FCOE_FCF_CMD_FCFI(fcfi));
+ cmdp->retval_len16 = htonl(FW_CMD_LEN16_V(sizeof(*cmdp) / 16));
+
+} /* csio_fcoe_read_fcf_init_mb */
+
+void
+csio_fcoe_read_portparams_init_mb(struct csio_hw *hw, struct csio_mb *mbp,
+ uint32_t mb_tmo,
+ struct fw_fcoe_port_cmd_params *portparams,
+ void (*cbfn)(struct csio_hw *,
+ struct csio_mb *))
+{
+ struct fw_fcoe_stats_cmd *cmdp = (struct fw_fcoe_stats_cmd *)(mbp->mb);
+
+ CSIO_INIT_MBP(mbp, cmdp, mb_tmo, hw, cbfn, 1);
+ mbp->mb_size = 64;
+
+ cmdp->op_to_flowid = htonl(FW_CMD_OP_V(FW_FCOE_STATS_CMD) |
+ FW_CMD_REQUEST_F | FW_CMD_READ_F);
+ cmdp->free_to_len16 = htonl(FW_CMD_LEN16_V(CSIO_MAX_MB_SIZE/16));
+
+ cmdp->u.ctl.nstats_port = FW_FCOE_STATS_CMD_NSTATS(portparams->nstats) |
+ FW_FCOE_STATS_CMD_PORT(portparams->portid);
+
+ cmdp->u.ctl.port_valid_ix = FW_FCOE_STATS_CMD_IX(portparams->idx) |
+ FW_FCOE_STATS_CMD_PORT_VALID;
+
+} /* csio_fcoe_read_portparams_init_mb */
+
+void
+csio_mb_process_portparams_rsp(struct csio_hw *hw,
+ struct csio_mb *mbp,
+ enum fw_retval *retval,
+ struct fw_fcoe_port_cmd_params *portparams,
+ struct fw_fcoe_port_stats *portstats)
+{
+ struct fw_fcoe_stats_cmd *rsp = (struct fw_fcoe_stats_cmd *)(mbp->mb);
+ struct fw_fcoe_port_stats stats;
+ uint8_t *src;
+ uint8_t *dst;
+
+ *retval = FW_CMD_RETVAL_G(ntohl(rsp->free_to_len16));
+
+ memset(&stats, 0, sizeof(struct fw_fcoe_port_stats));
+
+ if (*retval == FW_SUCCESS) {
+ dst = (uint8_t *)(&stats) + ((portparams->idx - 1) * 8);
+ src = (uint8_t *)rsp + (CSIO_STATS_OFFSET * 8);
+ memcpy(dst, src, (portparams->nstats * 8));
+ if (portparams->idx == 1) {
+ /* Get the first 6 flits from the Mailbox */
+ portstats->tx_bcast_bytes = stats.tx_bcast_bytes;
+ portstats->tx_bcast_frames = stats.tx_bcast_frames;
+ portstats->tx_mcast_bytes = stats.tx_mcast_bytes;
+ portstats->tx_mcast_frames = stats.tx_mcast_frames;
+ portstats->tx_ucast_bytes = stats.tx_ucast_bytes;
+ portstats->tx_ucast_frames = stats.tx_ucast_frames;
+ }
+ if (portparams->idx == 7) {
+ /* Get the second 6 flits from the Mailbox */
+ portstats->tx_drop_frames = stats.tx_drop_frames;
+ portstats->tx_offload_bytes = stats.tx_offload_bytes;
+ portstats->tx_offload_frames = stats.tx_offload_frames;
+#if 0
+ portstats->rx_pf_bytes = stats.rx_pf_bytes;
+ portstats->rx_pf_frames = stats.rx_pf_frames;
+#endif
+ portstats->rx_bcast_bytes = stats.rx_bcast_bytes;
+ portstats->rx_bcast_frames = stats.rx_bcast_frames;
+ portstats->rx_mcast_bytes = stats.rx_mcast_bytes;
+ }
+ if (portparams->idx == 13) {
+ /* Get the last 4 flits from the Mailbox */
+ portstats->rx_mcast_frames = stats.rx_mcast_frames;
+ portstats->rx_ucast_bytes = stats.rx_ucast_bytes;
+ portstats->rx_ucast_frames = stats.rx_ucast_frames;
+ portstats->rx_err_frames = stats.rx_err_frames;
+ }
+ }
+}
+
+/* Entry points/APIs for MB module */
+/*
+ * csio_mb_intr_enable - Enable Interrupts from mailboxes.
+ * @hw: The HW structure
+ *
+ * Enables CIM interrupt bit in appropriate INT_ENABLE registers.
+ */
+void
+csio_mb_intr_enable(struct csio_hw *hw)
+{
+ csio_wr_reg32(hw, MBMSGRDYINTEN_F, MYPF_REG(CIM_PF_HOST_INT_ENABLE_A));
+ csio_rd_reg32(hw, MYPF_REG(CIM_PF_HOST_INT_ENABLE_A));
+}
+
+/*
+ * csio_mb_intr_disable - Disable Interrupts from mailboxes.
+ * @hw: The HW structure
+ *
+ * Disable bit in HostInterruptEnable CIM register.
+ */
+void
+csio_mb_intr_disable(struct csio_hw *hw)
+{
+ csio_wr_reg32(hw, MBMSGRDYINTEN_V(0),
+ MYPF_REG(CIM_PF_HOST_INT_ENABLE_A));
+ csio_rd_reg32(hw, MYPF_REG(CIM_PF_HOST_INT_ENABLE_A));
+}
+
+static void
+csio_mb_dump_fw_dbg(struct csio_hw *hw, __be64 *cmd)
+{
+ struct fw_debug_cmd *dbg = (struct fw_debug_cmd *)cmd;
+
+ if ((FW_DEBUG_CMD_TYPE_G(ntohl(dbg->op_type))) == 1) {
+ csio_info(hw, "FW print message:\n");
+ csio_info(hw, "\tdebug->dprtstridx = %d\n",
+ ntohs(dbg->u.prt.dprtstridx));
+ csio_info(hw, "\tdebug->dprtstrparam0 = 0x%x\n",
+ ntohl(dbg->u.prt.dprtstrparam0));
+ csio_info(hw, "\tdebug->dprtstrparam1 = 0x%x\n",
+ ntohl(dbg->u.prt.dprtstrparam1));
+ csio_info(hw, "\tdebug->dprtstrparam2 = 0x%x\n",
+ ntohl(dbg->u.prt.dprtstrparam2));
+ csio_info(hw, "\tdebug->dprtstrparam3 = 0x%x\n",
+ ntohl(dbg->u.prt.dprtstrparam3));
+ } else {
+ /* This is a FW assertion */
+ csio_fatal(hw, "FW assertion at %.16s:%u, val0 %#x, val1 %#x\n",
+ dbg->u.assert.filename_0_7,
+ ntohl(dbg->u.assert.line),
+ ntohl(dbg->u.assert.x),
+ ntohl(dbg->u.assert.y));
+ }
+}
+
+static void
+csio_mb_debug_cmd_handler(struct csio_hw *hw)
+{
+ int i;
+ __be64 cmd[CSIO_MB_MAX_REGS];
+ uint32_t ctl_reg = PF_REG(hw->pfn, CIM_PF_MAILBOX_CTRL_A);
+ uint32_t data_reg = PF_REG(hw->pfn, CIM_PF_MAILBOX_DATA_A);
+ int size = sizeof(struct fw_debug_cmd);
+
+ /* Copy mailbox data */
+ for (i = 0; i < size; i += 8)
+ cmd[i / 8] = cpu_to_be64(csio_rd_reg64(hw, data_reg + i));
+
+ csio_mb_dump_fw_dbg(hw, cmd);
+
+ /* Notify FW of mailbox by setting owner as UP */
+ csio_wr_reg32(hw, MBMSGVALID_F | MBINTREQ_F |
+ MBOWNER_V(CSIO_MBOWNER_FW), ctl_reg);
+
+ csio_rd_reg32(hw, ctl_reg);
+ wmb();
+}
+
+/*
+ * csio_mb_issue - generic routine for issuing Mailbox commands.
+ * @hw: The HW structure
+ * @mbp: Mailbox command to issue
+ *
+ * Caller should hold hw lock across this call.
+ */
+int
+csio_mb_issue(struct csio_hw *hw, struct csio_mb *mbp)
+{
+ uint32_t owner, ctl;
+ int i;
+ uint32_t ii;
+ __be64 *cmd = mbp->mb;
+ __be64 hdr;
+ struct csio_mbm *mbm = &hw->mbm;
+ uint32_t ctl_reg = PF_REG(hw->pfn, CIM_PF_MAILBOX_CTRL_A);
+ uint32_t data_reg = PF_REG(hw->pfn, CIM_PF_MAILBOX_DATA_A);
+ int size = mbp->mb_size;
+ int rv = -EINVAL;
+ struct fw_cmd_hdr *fw_hdr;
+
+ /* Determine mode */
+ if (mbp->mb_cbfn == NULL) {
+ /* Need to issue/get results in the same context */
+ if (mbp->tmo < CSIO_MB_POLL_FREQ) {
+ csio_err(hw, "Invalid tmo: 0x%x\n", mbp->tmo);
+ goto error_out;
+ }
+ } else if (!csio_is_host_intr_enabled(hw) ||
+ !csio_is_hw_intr_enabled(hw)) {
+ csio_err(hw, "Cannot issue mailbox in interrupt mode 0x%x\n",
+ *((uint8_t *)mbp->mb));
+ goto error_out;
+ }
+
+ if (mbm->mcurrent != NULL) {
+ /* Queue mbox cmd, if another mbox cmd is active */
+ if (mbp->mb_cbfn == NULL) {
+ rv = -EBUSY;
+ csio_dbg(hw, "Couldnt own Mailbox %x op:0x%x\n",
+ hw->pfn, *((uint8_t *)mbp->mb));
+
+ goto error_out;
+ } else {
+ list_add_tail(&mbp->list, &mbm->req_q);
+ CSIO_INC_STATS(mbm, n_activeq);
+
+ return 0;
+ }
+ }
+
+ /* Now get ownership of mailbox */
+ owner = MBOWNER_G(csio_rd_reg32(hw, ctl_reg));
+
+ if (!csio_mb_is_host_owner(owner)) {
+
+ for (i = 0; (owner == CSIO_MBOWNER_NONE) && (i < 3); i++)
+ owner = MBOWNER_G(csio_rd_reg32(hw, ctl_reg));
+ /*
+ * Mailbox unavailable. In immediate mode, fail the command.
+ * In other modes, enqueue the request.
+ */
+ if (!csio_mb_is_host_owner(owner)) {
+ if (mbp->mb_cbfn == NULL) {
+ rv = owner ? -EBUSY : -ETIMEDOUT;
+
+ csio_dbg(hw,
+ "Couldnt own Mailbox %x op:0x%x "
+ "owner:%x\n",
+ hw->pfn, *((uint8_t *)mbp->mb), owner);
+ goto error_out;
+ } else {
+ if (mbm->mcurrent == NULL) {
+ csio_err(hw,
+ "Couldnt own Mailbox %x "
+ "op:0x%x owner:%x\n",
+ hw->pfn, *((uint8_t *)mbp->mb),
+ owner);
+ csio_err(hw,
+ "No outstanding driver"
+ " mailbox as well\n");
+ goto error_out;
+ }
+ }
+ }
+ }
+
+ /* Mailbox is available, copy mailbox data into it */
+ for (i = 0; i < size; i += 8) {
+ csio_wr_reg64(hw, be64_to_cpu(*cmd), data_reg + i);
+ cmd++;
+ }
+
+ CSIO_DUMP_MB(hw, hw->pfn, data_reg);
+
+ /* Start completion timers in non-immediate modes and notify FW */
+ if (mbp->mb_cbfn != NULL) {
+ mbm->mcurrent = mbp;
+ mod_timer(&mbm->timer, jiffies + msecs_to_jiffies(mbp->tmo));
+ csio_wr_reg32(hw, MBMSGVALID_F | MBINTREQ_F |
+ MBOWNER_V(CSIO_MBOWNER_FW), ctl_reg);
+ } else
+ csio_wr_reg32(hw, MBMSGVALID_F | MBOWNER_V(CSIO_MBOWNER_FW),
+ ctl_reg);
+
+ /* Flush posted writes */
+ csio_rd_reg32(hw, ctl_reg);
+ wmb();
+
+ CSIO_INC_STATS(mbm, n_req);
+
+ if (mbp->mb_cbfn)
+ return 0;
+
+ /* Poll for completion in immediate mode */
+ cmd = mbp->mb;
+
+ for (ii = 0; ii < mbp->tmo; ii += CSIO_MB_POLL_FREQ) {
+ mdelay(CSIO_MB_POLL_FREQ);
+
+ /* Check for response */
+ ctl = csio_rd_reg32(hw, ctl_reg);
+ if (csio_mb_is_host_owner(MBOWNER_G(ctl))) {
+
+ if (!(ctl & MBMSGVALID_F)) {
+ csio_wr_reg32(hw, 0, ctl_reg);
+ continue;
+ }
+
+ CSIO_DUMP_MB(hw, hw->pfn, data_reg);
+
+ hdr = cpu_to_be64(csio_rd_reg64(hw, data_reg));
+ fw_hdr = (struct fw_cmd_hdr *)&hdr;
+
+ switch (FW_CMD_OP_G(ntohl(fw_hdr->hi))) {
+ case FW_DEBUG_CMD:
+ csio_mb_debug_cmd_handler(hw);
+ continue;
+ }
+
+ /* Copy response */
+ for (i = 0; i < size; i += 8)
+ *cmd++ = cpu_to_be64(csio_rd_reg64
+ (hw, data_reg + i));
+ csio_wr_reg32(hw, 0, ctl_reg);
+
+ if (csio_mb_fw_retval(mbp) != FW_SUCCESS)
+ CSIO_INC_STATS(mbm, n_err);
+
+ CSIO_INC_STATS(mbm, n_rsp);
+ return 0;
+ }
+ }
+
+ CSIO_INC_STATS(mbm, n_tmo);
+
+ csio_err(hw, "Mailbox %x op:0x%x timed out!\n",
+ hw->pfn, *((uint8_t *)cmd));
+
+ return -ETIMEDOUT;
+
+error_out:
+ CSIO_INC_STATS(mbm, n_err);
+ return rv;
+}
+
+/*
+ * csio_mb_completions - Completion handler for Mailbox commands
+ * @hw: The HW structure
+ * @cbfn_q: Completion queue.
+ *
+ */
+void
+csio_mb_completions(struct csio_hw *hw, struct list_head *cbfn_q)
+{
+ struct csio_mb *mbp;
+ struct csio_mbm *mbm = &hw->mbm;
+ enum fw_retval rv;
+
+ while (!list_empty(cbfn_q)) {
+ mbp = list_first_entry(cbfn_q, struct csio_mb, list);
+ list_del_init(&mbp->list);
+
+ rv = csio_mb_fw_retval(mbp);
+ if ((rv != FW_SUCCESS) && (rv != FW_HOSTERROR))
+ CSIO_INC_STATS(mbm, n_err);
+ else if (rv != FW_HOSTERROR)
+ CSIO_INC_STATS(mbm, n_rsp);
+
+ if (mbp->mb_cbfn)
+ mbp->mb_cbfn(hw, mbp);
+ }
+}
+
+static void
+csio_mb_portmod_changed(struct csio_hw *hw, uint8_t port_id)
+{
+ static char *mod_str[] = {
+ NULL, "LR", "SR", "ER", "TWINAX", "active TWINAX", "LRM"
+ };
+
+ struct csio_pport *port = &hw->pport[port_id];
+
+ if (port->mod_type == FW_PORT_MOD_TYPE_NONE)
+ csio_info(hw, "Port:%d - port module unplugged\n", port_id);
+ else if (port->mod_type < ARRAY_SIZE(mod_str))
+ csio_info(hw, "Port:%d - %s port module inserted\n", port_id,
+ mod_str[port->mod_type]);
+ else if (port->mod_type == FW_PORT_MOD_TYPE_NOTSUPPORTED)
+ csio_info(hw,
+ "Port:%d - unsupported optical port module "
+ "inserted\n", port_id);
+ else if (port->mod_type == FW_PORT_MOD_TYPE_UNKNOWN)
+ csio_info(hw,
+ "Port:%d - unknown port module inserted, forcing "
+ "TWINAX\n", port_id);
+ else if (port->mod_type == FW_PORT_MOD_TYPE_ERROR)
+ csio_info(hw, "Port:%d - transceiver module error\n", port_id);
+ else
+ csio_info(hw, "Port:%d - unknown module type %d inserted\n",
+ port_id, port->mod_type);
+}
+
+int
+csio_mb_fwevt_handler(struct csio_hw *hw, __be64 *cmd)
+{
+ uint8_t opcode = *(uint8_t *)cmd;
+ struct fw_port_cmd *pcmd;
+ uint8_t port_id;
+ uint32_t link_status;
+ uint16_t action;
+ uint8_t mod_type;
+
+ if (opcode == FW_PORT_CMD) {
+ pcmd = (struct fw_port_cmd *)cmd;
+ port_id = FW_PORT_CMD_PORTID_G(
+ ntohl(pcmd->op_to_portid));
+ action = FW_PORT_CMD_ACTION_G(
+ ntohl(pcmd->action_to_len16));
+ if (action != FW_PORT_ACTION_GET_PORT_INFO) {
+ csio_err(hw, "Unhandled FW_PORT_CMD action: %u\n",
+ action);
+ return -EINVAL;
+ }
+
+ link_status = ntohl(pcmd->u.info.lstatus_to_modtype);
+ mod_type = FW_PORT_CMD_MODTYPE_G(link_status);
+
+ hw->pport[port_id].link_status =
+ FW_PORT_CMD_LSTATUS_G(link_status);
+ hw->pport[port_id].link_speed =
+ FW_PORT_CMD_LSPEED_G(link_status);
+
+ csio_info(hw, "Port:%x - LINK %s\n", port_id,
+ FW_PORT_CMD_LSTATUS_G(link_status) ? "UP" : "DOWN");
+
+ if (mod_type != hw->pport[port_id].mod_type) {
+ hw->pport[port_id].mod_type = mod_type;
+ csio_mb_portmod_changed(hw, port_id);
+ }
+ } else if (opcode == FW_DEBUG_CMD) {
+ csio_mb_dump_fw_dbg(hw, cmd);
+ } else {
+ csio_dbg(hw, "Gen MB can't handle op:0x%x on evtq.\n", opcode);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+/*
+ * csio_mb_isr_handler - Handle mailboxes related interrupts.
+ * @hw: The HW structure
+ *
+ * Called from the ISR to handle Mailbox related interrupts.
+ * HW Lock should be held across this call.
+ */
+int
+csio_mb_isr_handler(struct csio_hw *hw)
+{
+ struct csio_mbm *mbm = &hw->mbm;
+ struct csio_mb *mbp = mbm->mcurrent;
+ __be64 *cmd;
+ uint32_t ctl, cim_cause, pl_cause;
+ int i;
+ uint32_t ctl_reg = PF_REG(hw->pfn, CIM_PF_MAILBOX_CTRL_A);
+ uint32_t data_reg = PF_REG(hw->pfn, CIM_PF_MAILBOX_DATA_A);
+ int size;
+ __be64 hdr;
+ struct fw_cmd_hdr *fw_hdr;
+
+ pl_cause = csio_rd_reg32(hw, MYPF_REG(PL_PF_INT_CAUSE_A));
+ cim_cause = csio_rd_reg32(hw, MYPF_REG(CIM_PF_HOST_INT_CAUSE_A));
+
+ if (!(pl_cause & PFCIM_F) || !(cim_cause & MBMSGRDYINT_F)) {
+ CSIO_INC_STATS(hw, n_mbint_unexp);
+ return -EINVAL;
+ }
+
+ /*
+ * The cause registers below HAVE to be cleared in the SAME
+ * order as below: The low level cause register followed by
+ * the upper level cause register. In other words, CIM-cause
+ * first followed by PL-Cause next.
+ */
+ csio_wr_reg32(hw, MBMSGRDYINT_F, MYPF_REG(CIM_PF_HOST_INT_CAUSE_A));
+ csio_wr_reg32(hw, PFCIM_F, MYPF_REG(PL_PF_INT_CAUSE_A));
+
+ ctl = csio_rd_reg32(hw, ctl_reg);
+
+ if (csio_mb_is_host_owner(MBOWNER_G(ctl))) {
+
+ CSIO_DUMP_MB(hw, hw->pfn, data_reg);
+
+ if (!(ctl & MBMSGVALID_F)) {
+ csio_warn(hw,
+ "Stray mailbox interrupt recvd,"
+ " mailbox data not valid\n");
+ csio_wr_reg32(hw, 0, ctl_reg);
+ /* Flush */
+ csio_rd_reg32(hw, ctl_reg);
+ return -EINVAL;
+ }
+
+ hdr = cpu_to_be64(csio_rd_reg64(hw, data_reg));
+ fw_hdr = (struct fw_cmd_hdr *)&hdr;
+
+ switch (FW_CMD_OP_G(ntohl(fw_hdr->hi))) {
+ case FW_DEBUG_CMD:
+ csio_mb_debug_cmd_handler(hw);
+ return -EINVAL;
+#if 0
+ case FW_ERROR_CMD:
+ case FW_INITIALIZE_CMD: /* When we are not master */
+#endif
+ }
+
+ CSIO_ASSERT(mbp != NULL);
+
+ cmd = mbp->mb;
+ size = mbp->mb_size;
+ /* Get response */
+ for (i = 0; i < size; i += 8)
+ *cmd++ = cpu_to_be64(csio_rd_reg64
+ (hw, data_reg + i));
+
+ csio_wr_reg32(hw, 0, ctl_reg);
+ /* Flush */
+ csio_rd_reg32(hw, ctl_reg);
+
+ mbm->mcurrent = NULL;
+
+ /* Add completion to tail of cbfn queue */
+ list_add_tail(&mbp->list, &mbm->cbfn_q);
+ CSIO_INC_STATS(mbm, n_cbfnq);
+
+ /*
+ * Enqueue event to EventQ. Events processing happens
+ * in Event worker thread context
+ */
+ if (csio_enqueue_evt(hw, CSIO_EVT_MBX, mbp, sizeof(mbp)))
+ CSIO_INC_STATS(hw, n_evt_drop);
+
+ return 0;
+
+ } else {
+ /*
+ * We can get here if mailbox MSIX vector is shared,
+ * or in INTx case. Or a stray interrupt.
+ */
+ csio_dbg(hw, "Host not owner, no mailbox interrupt\n");
+ CSIO_INC_STATS(hw, n_int_stray);
+ return -EINVAL;
+ }
+}
+
+/*
+ * csio_mb_tmo_handler - Timeout handler
+ * @hw: The HW structure
+ *
+ */
+struct csio_mb *
+csio_mb_tmo_handler(struct csio_hw *hw)
+{
+ struct csio_mbm *mbm = &hw->mbm;
+ struct csio_mb *mbp = mbm->mcurrent;
+ struct fw_cmd_hdr *fw_hdr;
+
+ /*
+ * Could be a race b/w the completion handler and the timer
+ * and the completion handler won that race.
+ */
+ if (mbp == NULL) {
+ CSIO_DB_ASSERT(0);
+ return NULL;
+ }
+
+ fw_hdr = (struct fw_cmd_hdr *)(mbp->mb);
+
+ csio_dbg(hw, "Mailbox num:%x op:0x%x timed out\n", hw->pfn,
+ FW_CMD_OP_G(ntohl(fw_hdr->hi)));
+
+ mbm->mcurrent = NULL;
+ CSIO_INC_STATS(mbm, n_tmo);
+ fw_hdr->lo = htonl(FW_CMD_RETVAL_V(FW_ETIMEDOUT));
+
+ return mbp;
+}
+
+/*
+ * csio_mb_cancel_all - Cancel all waiting commands.
+ * @hw: The HW structure
+ * @cbfn_q: The callback queue.
+ *
+ * Caller should hold hw lock across this call.
+ */
+void
+csio_mb_cancel_all(struct csio_hw *hw, struct list_head *cbfn_q)
+{
+ struct csio_mb *mbp;
+ struct csio_mbm *mbm = &hw->mbm;
+ struct fw_cmd_hdr *hdr;
+ struct list_head *tmp;
+
+ if (mbm->mcurrent) {
+ mbp = mbm->mcurrent;
+
+ /* Stop mailbox completion timer */
+ del_timer_sync(&mbm->timer);
+
+ /* Add completion to tail of cbfn queue */
+ list_add_tail(&mbp->list, cbfn_q);
+ mbm->mcurrent = NULL;
+ }
+
+ if (!list_empty(&mbm->req_q)) {
+ list_splice_tail_init(&mbm->req_q, cbfn_q);
+ mbm->stats.n_activeq = 0;
+ }
+
+ if (!list_empty(&mbm->cbfn_q)) {
+ list_splice_tail_init(&mbm->cbfn_q, cbfn_q);
+ mbm->stats.n_cbfnq = 0;
+ }
+
+ if (list_empty(cbfn_q))
+ return;
+
+ list_for_each(tmp, cbfn_q) {
+ mbp = (struct csio_mb *)tmp;
+ hdr = (struct fw_cmd_hdr *)(mbp->mb);
+
+ csio_dbg(hw, "Cancelling pending mailbox num %x op:%x\n",
+ hw->pfn, FW_CMD_OP_G(ntohl(hdr->hi)));
+
+ CSIO_INC_STATS(mbm, n_cancel);
+ hdr->lo = htonl(FW_CMD_RETVAL_V(FW_HOSTERROR));
+ }
+}
+
+/*
+ * csio_mbm_init - Initialize Mailbox module
+ * @mbm: Mailbox module
+ * @hw: The HW structure
+ * @timer: Timing function for interrupting mailboxes
+ *
+ * Initialize timer and the request/response queues.
+ */
+int
+csio_mbm_init(struct csio_mbm *mbm, struct csio_hw *hw,
+ void (*timer_fn)(uintptr_t))
+{
+ struct timer_list *timer = &mbm->timer;
+
+ init_timer(timer);
+ timer->function = timer_fn;
+ timer->data = (unsigned long)hw;
+
+ INIT_LIST_HEAD(&mbm->req_q);
+ INIT_LIST_HEAD(&mbm->cbfn_q);
+ csio_set_mb_intr_idx(mbm, -1);
+
+ return 0;
+}
+
+/*
+ * csio_mbm_exit - Uninitialize mailbox module
+ * @mbm: Mailbox module
+ *
+ * Stop timer.
+ */
+void
+csio_mbm_exit(struct csio_mbm *mbm)
+{
+ del_timer_sync(&mbm->timer);
+
+ CSIO_DB_ASSERT(mbm->mcurrent == NULL);
+ CSIO_DB_ASSERT(list_empty(&mbm->req_q));
+ CSIO_DB_ASSERT(list_empty(&mbm->cbfn_q));
+}
diff --git a/drivers/scsi/csiostor/csio_mb.h b/drivers/scsi/csiostor/csio_mb.h
new file mode 100644
index 000000000..1bc82d0bc
--- /dev/null
+++ b/drivers/scsi/csiostor/csio_mb.h
@@ -0,0 +1,267 @@
+/*
+ * This file is part of the Chelsio FCoE driver for Linux.
+ *
+ * Copyright (c) 2008-2012 Chelsio Communications, Inc. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#ifndef __CSIO_MB_H__
+#define __CSIO_MB_H__
+
+#include <linux/timer.h>
+#include <linux/completion.h>
+
+#include "t4fw_api.h"
+#include "t4fw_api_stor.h"
+#include "csio_defs.h"
+
+#define CSIO_STATS_OFFSET (2)
+#define CSIO_NUM_STATS_PER_MB (6)
+
+struct fw_fcoe_port_cmd_params {
+ uint8_t portid;
+ uint8_t idx;
+ uint8_t nstats;
+};
+
+#define CSIO_DUMP_MB(__hw, __num, __mb) \
+ csio_dbg(__hw, "\t%llx %llx %llx %llx %llx %llx %llx %llx\n", \
+ (unsigned long long)csio_rd_reg64(__hw, __mb), \
+ (unsigned long long)csio_rd_reg64(__hw, __mb + 8), \
+ (unsigned long long)csio_rd_reg64(__hw, __mb + 16), \
+ (unsigned long long)csio_rd_reg64(__hw, __mb + 24), \
+ (unsigned long long)csio_rd_reg64(__hw, __mb + 32), \
+ (unsigned long long)csio_rd_reg64(__hw, __mb + 40), \
+ (unsigned long long)csio_rd_reg64(__hw, __mb + 48), \
+ (unsigned long long)csio_rd_reg64(__hw, __mb + 56))
+
+#define CSIO_MB_MAX_REGS 8
+#define CSIO_MAX_MB_SIZE 64
+#define CSIO_MB_POLL_FREQ 5 /* 5 ms */
+#define CSIO_MB_DEFAULT_TMO FW_CMD_MAX_TIMEOUT
+
+/* Device master in HELLO command */
+enum csio_dev_master { CSIO_MASTER_CANT, CSIO_MASTER_MAY, CSIO_MASTER_MUST };
+
+enum csio_mb_owner { CSIO_MBOWNER_NONE, CSIO_MBOWNER_FW, CSIO_MBOWNER_PL };
+
+enum csio_dev_state {
+ CSIO_DEV_STATE_UNINIT,
+ CSIO_DEV_STATE_INIT,
+ CSIO_DEV_STATE_ERR
+};
+
+#define FW_PARAM_DEV(param) \
+ (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_DEV) | \
+ FW_PARAMS_PARAM_X_V(FW_PARAMS_PARAM_DEV_##param))
+
+#define FW_PARAM_PFVF(param) \
+ (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_PFVF) | \
+ FW_PARAMS_PARAM_X_V(FW_PARAMS_PARAM_PFVF_##param)| \
+ FW_PARAMS_PARAM_Y_V(0) | \
+ FW_PARAMS_PARAM_Z_V(0))
+
+enum {
+ PAUSE_RX = 1 << 0,
+ PAUSE_TX = 1 << 1,
+ PAUSE_AUTONEG = 1 << 2
+};
+
+#define CSIO_INIT_MBP(__mbp, __cp, __tmo, __priv, __fn, __clear) \
+do { \
+ if (__clear) \
+ memset((__cp), 0, \
+ CSIO_MB_MAX_REGS * sizeof(__be64)); \
+ INIT_LIST_HEAD(&(__mbp)->list); \
+ (__mbp)->tmo = (__tmo); \
+ (__mbp)->priv = (void *)(__priv); \
+ (__mbp)->mb_cbfn = (__fn); \
+ (__mbp)->mb_size = sizeof(*(__cp)); \
+} while (0)
+
+struct csio_mbm_stats {
+ uint32_t n_req; /* number of mbox req */
+ uint32_t n_rsp; /* number of mbox rsp */
+ uint32_t n_activeq; /* number of mbox req active Q */
+ uint32_t n_cbfnq; /* number of mbox req cbfn Q */
+ uint32_t n_tmo; /* number of mbox timeout */
+ uint32_t n_cancel; /* number of mbox cancel */
+ uint32_t n_err; /* number of mbox error */
+};
+
+/* Driver version of Mailbox */
+struct csio_mb {
+ struct list_head list; /* for req/resp */
+ /* queue in driver */
+ __be64 mb[CSIO_MB_MAX_REGS]; /* MB in HW format */
+ int mb_size; /* Size of this
+ * mailbox.
+ */
+ uint32_t tmo; /* Timeout */
+ struct completion cmplobj; /* MB Completion
+ * object
+ */
+ void (*mb_cbfn) (struct csio_hw *, struct csio_mb *);
+ /* Callback fn */
+ void *priv; /* Owner private ptr */
+};
+
+struct csio_mbm {
+ uint32_t a_mbox; /* Async mbox num */
+ uint32_t intr_idx; /* Interrupt index */
+ struct timer_list timer; /* Mbox timer */
+ struct list_head req_q; /* Mbox request queue */
+ struct list_head cbfn_q; /* Mbox completion q */
+ struct csio_mb *mcurrent; /* Current mailbox */
+ uint32_t req_q_cnt; /* Outstanding mbox
+ * cmds
+ */
+ struct csio_mbm_stats stats; /* Statistics */
+};
+
+#define csio_set_mb_intr_idx(_m, _i) ((_m)->intr_idx = (_i))
+#define csio_get_mb_intr_idx(_m) ((_m)->intr_idx)
+
+struct csio_iq_params;
+struct csio_eq_params;
+
+enum fw_retval csio_mb_fw_retval(struct csio_mb *);
+
+/* MB helpers */
+void csio_mb_hello(struct csio_hw *, struct csio_mb *, uint32_t,
+ uint32_t, uint32_t, enum csio_dev_master,
+ void (*)(struct csio_hw *, struct csio_mb *));
+
+void csio_mb_process_hello_rsp(struct csio_hw *, struct csio_mb *,
+ enum fw_retval *, enum csio_dev_state *,
+ uint8_t *);
+
+void csio_mb_bye(struct csio_hw *, struct csio_mb *, uint32_t,
+ void (*)(struct csio_hw *, struct csio_mb *));
+
+void csio_mb_reset(struct csio_hw *, struct csio_mb *, uint32_t, int, int,
+ void (*)(struct csio_hw *, struct csio_mb *));
+
+void csio_mb_params(struct csio_hw *, struct csio_mb *, uint32_t, unsigned int,
+ unsigned int, unsigned int, const u32 *, u32 *, bool,
+ void (*)(struct csio_hw *, struct csio_mb *));
+
+void csio_mb_process_read_params_rsp(struct csio_hw *, struct csio_mb *,
+ enum fw_retval *, unsigned int , u32 *);
+
+void csio_mb_ldst(struct csio_hw *hw, struct csio_mb *mbp, uint32_t tmo,
+ int reg);
+
+void csio_mb_caps_config(struct csio_hw *, struct csio_mb *, uint32_t,
+ bool, bool, bool, bool,
+ void (*)(struct csio_hw *, struct csio_mb *));
+
+void csio_mb_port(struct csio_hw *, struct csio_mb *, uint32_t,
+ uint8_t, bool, uint32_t, uint16_t,
+ void (*) (struct csio_hw *, struct csio_mb *));
+
+void csio_mb_process_read_port_rsp(struct csio_hw *, struct csio_mb *,
+ enum fw_retval *, uint16_t *);
+
+void csio_mb_initialize(struct csio_hw *, struct csio_mb *, uint32_t,
+ void (*)(struct csio_hw *, struct csio_mb *));
+
+void csio_mb_iq_alloc_write(struct csio_hw *, struct csio_mb *, void *,
+ uint32_t, struct csio_iq_params *,
+ void (*) (struct csio_hw *, struct csio_mb *));
+
+void csio_mb_iq_alloc_write_rsp(struct csio_hw *, struct csio_mb *,
+ enum fw_retval *, struct csio_iq_params *);
+
+void csio_mb_iq_free(struct csio_hw *, struct csio_mb *, void *,
+ uint32_t, struct csio_iq_params *,
+ void (*) (struct csio_hw *, struct csio_mb *));
+
+void csio_mb_eq_ofld_alloc_write(struct csio_hw *, struct csio_mb *, void *,
+ uint32_t, struct csio_eq_params *,
+ void (*) (struct csio_hw *, struct csio_mb *));
+
+void csio_mb_eq_ofld_alloc_write_rsp(struct csio_hw *, struct csio_mb *,
+ enum fw_retval *, struct csio_eq_params *);
+
+void csio_mb_eq_ofld_free(struct csio_hw *, struct csio_mb *, void *,
+ uint32_t , struct csio_eq_params *,
+ void (*) (struct csio_hw *, struct csio_mb *));
+
+void csio_fcoe_read_res_info_init_mb(struct csio_hw *, struct csio_mb *,
+ uint32_t,
+ void (*) (struct csio_hw *, struct csio_mb *));
+
+void csio_write_fcoe_link_cond_init_mb(struct csio_lnode *, struct csio_mb *,
+ uint32_t, uint8_t, uint32_t, uint8_t, bool, uint32_t,
+ void (*) (struct csio_hw *, struct csio_mb *));
+
+void csio_fcoe_vnp_alloc_init_mb(struct csio_lnode *, struct csio_mb *,
+ uint32_t, uint32_t , uint32_t , uint16_t,
+ uint8_t [8], uint8_t [8],
+ void (*) (struct csio_hw *, struct csio_mb *));
+
+void csio_fcoe_vnp_read_init_mb(struct csio_lnode *, struct csio_mb *,
+ uint32_t, uint32_t , uint32_t ,
+ void (*) (struct csio_hw *, struct csio_mb *));
+
+void csio_fcoe_vnp_free_init_mb(struct csio_lnode *, struct csio_mb *,
+ uint32_t , uint32_t, uint32_t ,
+ void (*) (struct csio_hw *, struct csio_mb *));
+
+void csio_fcoe_read_fcf_init_mb(struct csio_lnode *, struct csio_mb *,
+ uint32_t, uint32_t, uint32_t,
+ void (*cbfn) (struct csio_hw *, struct csio_mb *));
+
+void csio_fcoe_read_portparams_init_mb(struct csio_hw *hw,
+ struct csio_mb *mbp, uint32_t mb_tmo,
+ struct fw_fcoe_port_cmd_params *portparams,
+ void (*cbfn)(struct csio_hw *, struct csio_mb *));
+
+void csio_mb_process_portparams_rsp(struct csio_hw *hw, struct csio_mb *mbp,
+ enum fw_retval *retval,
+ struct fw_fcoe_port_cmd_params *portparams,
+ struct fw_fcoe_port_stats *portstats);
+
+/* MB module functions */
+int csio_mbm_init(struct csio_mbm *, struct csio_hw *,
+ void (*)(uintptr_t));
+void csio_mbm_exit(struct csio_mbm *);
+void csio_mb_intr_enable(struct csio_hw *);
+void csio_mb_intr_disable(struct csio_hw *);
+
+int csio_mb_issue(struct csio_hw *, struct csio_mb *);
+void csio_mb_completions(struct csio_hw *, struct list_head *);
+int csio_mb_fwevt_handler(struct csio_hw *, __be64 *);
+int csio_mb_isr_handler(struct csio_hw *);
+struct csio_mb *csio_mb_tmo_handler(struct csio_hw *);
+void csio_mb_cancel_all(struct csio_hw *, struct list_head *);
+
+#endif /* ifndef __CSIO_MB_H__ */
diff --git a/drivers/scsi/csiostor/csio_rnode.c b/drivers/scsi/csiostor/csio_rnode.c
new file mode 100644
index 000000000..e9c3b045f
--- /dev/null
+++ b/drivers/scsi/csiostor/csio_rnode.c
@@ -0,0 +1,921 @@
+/*
+ * This file is part of the Chelsio FCoE driver for Linux.
+ *
+ * Copyright (c) 2008-2012 Chelsio Communications, Inc. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include <linux/string.h>
+#include <scsi/scsi_device.h>
+#include <scsi/scsi_transport_fc.h>
+#include <scsi/fc/fc_els.h>
+#include <scsi/fc/fc_fs.h>
+
+#include "csio_hw.h"
+#include "csio_lnode.h"
+#include "csio_rnode.h"
+
+static int csio_rnode_init(struct csio_rnode *, struct csio_lnode *);
+static void csio_rnode_exit(struct csio_rnode *);
+
+/* Static machine forward declarations */
+static void csio_rns_uninit(struct csio_rnode *, enum csio_rn_ev);
+static void csio_rns_ready(struct csio_rnode *, enum csio_rn_ev);
+static void csio_rns_offline(struct csio_rnode *, enum csio_rn_ev);
+static void csio_rns_disappeared(struct csio_rnode *, enum csio_rn_ev);
+
+/* RNF event mapping */
+static enum csio_rn_ev fwevt_to_rnevt[] = {
+ CSIO_RNFE_NONE, /* None */
+ CSIO_RNFE_LOGGED_IN, /* PLOGI_ACC_RCVD */
+ CSIO_RNFE_NONE, /* PLOGI_RJT_RCVD */
+ CSIO_RNFE_PLOGI_RECV, /* PLOGI_RCVD */
+ CSIO_RNFE_LOGO_RECV, /* PLOGO_RCVD */
+ CSIO_RNFE_PRLI_DONE, /* PRLI_ACC_RCVD */
+ CSIO_RNFE_NONE, /* PRLI_RJT_RCVD */
+ CSIO_RNFE_PRLI_RECV, /* PRLI_RCVD */
+ CSIO_RNFE_PRLO_RECV, /* PRLO_RCVD */
+ CSIO_RNFE_NONE, /* NPORT_ID_CHGD */
+ CSIO_RNFE_LOGO_RECV, /* FLOGO_RCVD */
+ CSIO_RNFE_NONE, /* CLR_VIRT_LNK_RCVD */
+ CSIO_RNFE_LOGGED_IN, /* FLOGI_ACC_RCVD */
+ CSIO_RNFE_NONE, /* FLOGI_RJT_RCVD */
+ CSIO_RNFE_LOGGED_IN, /* FDISC_ACC_RCVD */
+ CSIO_RNFE_NONE, /* FDISC_RJT_RCVD */
+ CSIO_RNFE_NONE, /* FLOGI_TMO_MAX_RETRY */
+ CSIO_RNFE_NONE, /* IMPL_LOGO_ADISC_ACC */
+ CSIO_RNFE_NONE, /* IMPL_LOGO_ADISC_RJT */
+ CSIO_RNFE_NONE, /* IMPL_LOGO_ADISC_CNFLT */
+ CSIO_RNFE_NONE, /* PRLI_TMO */
+ CSIO_RNFE_NONE, /* ADISC_TMO */
+ CSIO_RNFE_NAME_MISSING, /* RSCN_DEV_LOST */
+ CSIO_RNFE_NONE, /* SCR_ACC_RCVD */
+ CSIO_RNFE_NONE, /* ADISC_RJT_RCVD */
+ CSIO_RNFE_NONE, /* LOGO_SNT */
+ CSIO_RNFE_LOGO_RECV, /* PROTO_ERR_IMPL_LOGO */
+};
+
+#define CSIO_FWE_TO_RNFE(_evt) ((_evt > PROTO_ERR_IMPL_LOGO) ? \
+ CSIO_RNFE_NONE : \
+ fwevt_to_rnevt[_evt])
+int
+csio_is_rnode_ready(struct csio_rnode *rn)
+{
+ return csio_match_state(rn, csio_rns_ready);
+}
+
+static int
+csio_is_rnode_uninit(struct csio_rnode *rn)
+{
+ return csio_match_state(rn, csio_rns_uninit);
+}
+
+static int
+csio_is_rnode_wka(uint8_t rport_type)
+{
+ if ((rport_type == FLOGI_VFPORT) ||
+ (rport_type == FDISC_VFPORT) ||
+ (rport_type == NS_VNPORT) ||
+ (rport_type == FDMI_VNPORT))
+ return 1;
+
+ return 0;
+}
+
+/*
+ * csio_rn_lookup - Finds the rnode with the given flowid
+ * @ln - lnode
+ * @flowid - flowid.
+ *
+ * Does the rnode lookup on the given lnode and flowid.If no matching entry
+ * found, NULL is returned.
+ */
+static struct csio_rnode *
+csio_rn_lookup(struct csio_lnode *ln, uint32_t flowid)
+{
+ struct csio_rnode *rnhead = (struct csio_rnode *) &ln->rnhead;
+ struct list_head *tmp;
+ struct csio_rnode *rn;
+
+ list_for_each(tmp, &rnhead->sm.sm_list) {
+ rn = (struct csio_rnode *) tmp;
+ if (rn->flowid == flowid)
+ return rn;
+ }
+
+ return NULL;
+}
+
+/*
+ * csio_rn_lookup_wwpn - Finds the rnode with the given wwpn
+ * @ln: lnode
+ * @wwpn: wwpn
+ *
+ * Does the rnode lookup on the given lnode and wwpn. If no matching entry
+ * found, NULL is returned.
+ */
+static struct csio_rnode *
+csio_rn_lookup_wwpn(struct csio_lnode *ln, uint8_t *wwpn)
+{
+ struct csio_rnode *rnhead = (struct csio_rnode *) &ln->rnhead;
+ struct list_head *tmp;
+ struct csio_rnode *rn;
+
+ list_for_each(tmp, &rnhead->sm.sm_list) {
+ rn = (struct csio_rnode *) tmp;
+ if (!memcmp(csio_rn_wwpn(rn), wwpn, 8))
+ return rn;
+ }
+
+ return NULL;
+}
+
+/**
+ * csio_rnode_lookup_portid - Finds the rnode with the given portid
+ * @ln: lnode
+ * @portid: port id
+ *
+ * Lookup the rnode list for a given portid. If no matching entry
+ * found, NULL is returned.
+ */
+struct csio_rnode *
+csio_rnode_lookup_portid(struct csio_lnode *ln, uint32_t portid)
+{
+ struct csio_rnode *rnhead = (struct csio_rnode *) &ln->rnhead;
+ struct list_head *tmp;
+ struct csio_rnode *rn;
+
+ list_for_each(tmp, &rnhead->sm.sm_list) {
+ rn = (struct csio_rnode *) tmp;
+ if (rn->nport_id == portid)
+ return rn;
+ }
+
+ return NULL;
+}
+
+static int
+csio_rn_dup_flowid(struct csio_lnode *ln, uint32_t rdev_flowid,
+ uint32_t *vnp_flowid)
+{
+ struct csio_rnode *rnhead;
+ struct list_head *tmp, *tmp1;
+ struct csio_rnode *rn;
+ struct csio_lnode *ln_tmp;
+ struct csio_hw *hw = csio_lnode_to_hw(ln);
+
+ list_for_each(tmp1, &hw->sln_head) {
+ ln_tmp = (struct csio_lnode *) tmp1;
+ if (ln_tmp == ln)
+ continue;
+
+ rnhead = (struct csio_rnode *)&ln_tmp->rnhead;
+ list_for_each(tmp, &rnhead->sm.sm_list) {
+
+ rn = (struct csio_rnode *) tmp;
+ if (csio_is_rnode_ready(rn)) {
+ if (rn->flowid == rdev_flowid) {
+ *vnp_flowid = csio_ln_flowid(ln_tmp);
+ return 1;
+ }
+ }
+ }
+ }
+
+ return 0;
+}
+
+static struct csio_rnode *
+csio_alloc_rnode(struct csio_lnode *ln)
+{
+ struct csio_hw *hw = csio_lnode_to_hw(ln);
+
+ struct csio_rnode *rn = mempool_alloc(hw->rnode_mempool, GFP_ATOMIC);
+ if (!rn)
+ goto err;
+
+ memset(rn, 0, sizeof(struct csio_rnode));
+ if (csio_rnode_init(rn, ln))
+ goto err_free;
+
+ CSIO_INC_STATS(ln, n_rnode_alloc);
+
+ return rn;
+
+err_free:
+ mempool_free(rn, hw->rnode_mempool);
+err:
+ CSIO_INC_STATS(ln, n_rnode_nomem);
+ return NULL;
+}
+
+static void
+csio_free_rnode(struct csio_rnode *rn)
+{
+ struct csio_hw *hw = csio_lnode_to_hw(csio_rnode_to_lnode(rn));
+
+ csio_rnode_exit(rn);
+ CSIO_INC_STATS(rn->lnp, n_rnode_free);
+ mempool_free(rn, hw->rnode_mempool);
+}
+
+/*
+ * csio_get_rnode - Gets rnode with the given flowid
+ * @ln - lnode
+ * @flowid - flow id.
+ *
+ * Does the rnode lookup on the given lnode and flowid. If no matching
+ * rnode found, then new rnode with given npid is allocated and returned.
+ */
+static struct csio_rnode *
+csio_get_rnode(struct csio_lnode *ln, uint32_t flowid)
+{
+ struct csio_rnode *rn;
+
+ rn = csio_rn_lookup(ln, flowid);
+ if (!rn) {
+ rn = csio_alloc_rnode(ln);
+ if (!rn)
+ return NULL;
+
+ rn->flowid = flowid;
+ }
+
+ return rn;
+}
+
+/*
+ * csio_put_rnode - Frees the given rnode
+ * @ln - lnode
+ * @flowid - flow id.
+ *
+ * Does the rnode lookup on the given lnode and flowid. If no matching
+ * rnode found, then new rnode with given npid is allocated and returned.
+ */
+void
+csio_put_rnode(struct csio_lnode *ln, struct csio_rnode *rn)
+{
+ CSIO_DB_ASSERT(csio_is_rnode_uninit(rn) != 0);
+ csio_free_rnode(rn);
+}
+
+/*
+ * csio_confirm_rnode - confirms rnode based on wwpn.
+ * @ln: lnode
+ * @rdev_flowid: remote device flowid
+ * @rdevp: remote device params
+ * This routines searches other rnode in list having same wwpn of new rnode.
+ * If there is a match, then matched rnode is returned and otherwise new rnode
+ * is returned.
+ * returns rnode.
+ */
+struct csio_rnode *
+csio_confirm_rnode(struct csio_lnode *ln, uint32_t rdev_flowid,
+ struct fcoe_rdev_entry *rdevp)
+{
+ uint8_t rport_type;
+ struct csio_rnode *rn, *match_rn;
+ uint32_t vnp_flowid = 0;
+ __be32 *port_id;
+
+ port_id = (__be32 *)&rdevp->r_id[0];
+ rport_type =
+ FW_RDEV_WR_RPORT_TYPE_GET(rdevp->rd_xfer_rdy_to_rport_type);
+
+ /* Drop rdev event for cntrl port */
+ if (rport_type == FAB_CTLR_VNPORT) {
+ csio_ln_dbg(ln,
+ "Unhandled rport_type:%d recv in rdev evt "
+ "ssni:x%x\n", rport_type, rdev_flowid);
+ return NULL;
+ }
+
+ /* Lookup on flowid */
+ rn = csio_rn_lookup(ln, rdev_flowid);
+ if (!rn) {
+
+ /* Drop events with duplicate flowid */
+ if (csio_rn_dup_flowid(ln, rdev_flowid, &vnp_flowid)) {
+ csio_ln_warn(ln,
+ "ssni:%x already active on vnpi:%x",
+ rdev_flowid, vnp_flowid);
+ return NULL;
+ }
+
+ /* Lookup on wwpn for NPORTs */
+ rn = csio_rn_lookup_wwpn(ln, rdevp->wwpn);
+ if (!rn)
+ goto alloc_rnode;
+
+ } else {
+ /* Lookup well-known ports with nport id */
+ if (csio_is_rnode_wka(rport_type)) {
+ match_rn = csio_rnode_lookup_portid(ln,
+ ((ntohl(*port_id) >> 8) & CSIO_DID_MASK));
+ if (match_rn == NULL) {
+ csio_rn_flowid(rn) = CSIO_INVALID_IDX;
+ goto alloc_rnode;
+ }
+
+ /*
+ * Now compare the wwpn to confirm that
+ * same port relogged in. If so update the matched rn.
+ * Else, go ahead and alloc a new rnode.
+ */
+ if (!memcmp(csio_rn_wwpn(match_rn), rdevp->wwpn, 8)) {
+ if (rn == match_rn)
+ goto found_rnode;
+ csio_ln_dbg(ln,
+ "nport_id:x%x and wwpn:%llx"
+ " match for ssni:x%x\n",
+ rn->nport_id,
+ wwn_to_u64(rdevp->wwpn),
+ rdev_flowid);
+ if (csio_is_rnode_ready(rn)) {
+ csio_ln_warn(ln,
+ "rnode is already"
+ "active ssni:x%x\n",
+ rdev_flowid);
+ CSIO_ASSERT(0);
+ }
+ csio_rn_flowid(rn) = CSIO_INVALID_IDX;
+ rn = match_rn;
+
+ /* Update rn */
+ goto found_rnode;
+ }
+ csio_rn_flowid(rn) = CSIO_INVALID_IDX;
+ goto alloc_rnode;
+ }
+
+ /* wwpn match */
+ if (!memcmp(csio_rn_wwpn(rn), rdevp->wwpn, 8))
+ goto found_rnode;
+
+ /* Search for rnode that have same wwpn */
+ match_rn = csio_rn_lookup_wwpn(ln, rdevp->wwpn);
+ if (match_rn != NULL) {
+ csio_ln_dbg(ln,
+ "ssni:x%x changed for rport name(wwpn):%llx "
+ "did:x%x\n", rdev_flowid,
+ wwn_to_u64(rdevp->wwpn),
+ match_rn->nport_id);
+ csio_rn_flowid(rn) = CSIO_INVALID_IDX;
+ rn = match_rn;
+ } else {
+ csio_ln_dbg(ln,
+ "rnode wwpn mismatch found ssni:x%x "
+ "name(wwpn):%llx\n",
+ rdev_flowid,
+ wwn_to_u64(csio_rn_wwpn(rn)));
+ if (csio_is_rnode_ready(rn)) {
+ csio_ln_warn(ln,
+ "rnode is already active "
+ "wwpn:%llx ssni:x%x\n",
+ wwn_to_u64(csio_rn_wwpn(rn)),
+ rdev_flowid);
+ CSIO_ASSERT(0);
+ }
+ csio_rn_flowid(rn) = CSIO_INVALID_IDX;
+ goto alloc_rnode;
+ }
+ }
+
+found_rnode:
+ csio_ln_dbg(ln, "found rnode:%p ssni:x%x name(wwpn):%llx\n",
+ rn, rdev_flowid, wwn_to_u64(rdevp->wwpn));
+
+ /* Update flowid */
+ csio_rn_flowid(rn) = rdev_flowid;
+
+ /* update rdev entry */
+ rn->rdev_entry = rdevp;
+ CSIO_INC_STATS(ln, n_rnode_match);
+ return rn;
+
+alloc_rnode:
+ rn = csio_get_rnode(ln, rdev_flowid);
+ if (!rn)
+ return NULL;
+
+ csio_ln_dbg(ln, "alloc rnode:%p ssni:x%x name(wwpn):%llx\n",
+ rn, rdev_flowid, wwn_to_u64(rdevp->wwpn));
+
+ /* update rdev entry */
+ rn->rdev_entry = rdevp;
+ return rn;
+}
+
+/*
+ * csio_rn_verify_rparams - verify rparams.
+ * @ln: lnode
+ * @rn: rnode
+ * @rdevp: remote device params
+ * returns success if rparams are verified.
+ */
+static int
+csio_rn_verify_rparams(struct csio_lnode *ln, struct csio_rnode *rn,
+ struct fcoe_rdev_entry *rdevp)
+{
+ uint8_t null[8];
+ uint8_t rport_type;
+ uint8_t fc_class;
+ __be32 *did;
+
+ did = (__be32 *) &rdevp->r_id[0];
+ rport_type =
+ FW_RDEV_WR_RPORT_TYPE_GET(rdevp->rd_xfer_rdy_to_rport_type);
+ switch (rport_type) {
+ case FLOGI_VFPORT:
+ rn->role = CSIO_RNFR_FABRIC;
+ if (((ntohl(*did) >> 8) & CSIO_DID_MASK) != FC_FID_FLOGI) {
+ csio_ln_err(ln, "ssni:x%x invalid fabric portid\n",
+ csio_rn_flowid(rn));
+ return -EINVAL;
+ }
+ /* NPIV support */
+ if (FW_RDEV_WR_NPIV_GET(rdevp->vft_to_qos))
+ ln->flags |= CSIO_LNF_NPIVSUPP;
+
+ break;
+
+ case NS_VNPORT:
+ rn->role = CSIO_RNFR_NS;
+ if (((ntohl(*did) >> 8) & CSIO_DID_MASK) != FC_FID_DIR_SERV) {
+ csio_ln_err(ln, "ssni:x%x invalid fabric portid\n",
+ csio_rn_flowid(rn));
+ return -EINVAL;
+ }
+ break;
+
+ case REG_FC4_VNPORT:
+ case REG_VNPORT:
+ rn->role = CSIO_RNFR_NPORT;
+ if (rdevp->event_cause == PRLI_ACC_RCVD ||
+ rdevp->event_cause == PRLI_RCVD) {
+ if (FW_RDEV_WR_TASK_RETRY_ID_GET(
+ rdevp->enh_disc_to_tgt))
+ rn->fcp_flags |= FCP_SPPF_OVLY_ALLOW;
+
+ if (FW_RDEV_WR_RETRY_GET(rdevp->enh_disc_to_tgt))
+ rn->fcp_flags |= FCP_SPPF_RETRY;
+
+ if (FW_RDEV_WR_CONF_CMPL_GET(rdevp->enh_disc_to_tgt))
+ rn->fcp_flags |= FCP_SPPF_CONF_COMPL;
+
+ if (FW_RDEV_WR_TGT_GET(rdevp->enh_disc_to_tgt))
+ rn->role |= CSIO_RNFR_TARGET;
+
+ if (FW_RDEV_WR_INI_GET(rdevp->enh_disc_to_tgt))
+ rn->role |= CSIO_RNFR_INITIATOR;
+ }
+
+ break;
+
+ case FDMI_VNPORT:
+ case FAB_CTLR_VNPORT:
+ rn->role = 0;
+ break;
+
+ default:
+ csio_ln_err(ln, "ssni:x%x invalid rport type recv x%x\n",
+ csio_rn_flowid(rn), rport_type);
+ return -EINVAL;
+ }
+
+ /* validate wwpn/wwnn for Name server/remote port */
+ if (rport_type == REG_VNPORT || rport_type == NS_VNPORT) {
+ memset(null, 0, 8);
+ if (!memcmp(rdevp->wwnn, null, 8)) {
+ csio_ln_err(ln,
+ "ssni:x%x invalid wwnn received from"
+ " rport did:x%x\n",
+ csio_rn_flowid(rn),
+ (ntohl(*did) & CSIO_DID_MASK));
+ return -EINVAL;
+ }
+
+ if (!memcmp(rdevp->wwpn, null, 8)) {
+ csio_ln_err(ln,
+ "ssni:x%x invalid wwpn received from"
+ " rport did:x%x\n",
+ csio_rn_flowid(rn),
+ (ntohl(*did) & CSIO_DID_MASK));
+ return -EINVAL;
+ }
+
+ }
+
+ /* Copy wwnn, wwpn and nport id */
+ rn->nport_id = (ntohl(*did) >> 8) & CSIO_DID_MASK;
+ memcpy(csio_rn_wwnn(rn), rdevp->wwnn, 8);
+ memcpy(csio_rn_wwpn(rn), rdevp->wwpn, 8);
+ rn->rn_sparm.csp.sp_bb_data = rdevp->rcv_fr_sz;
+ fc_class = FW_RDEV_WR_CLASS_GET(rdevp->vft_to_qos);
+ rn->rn_sparm.clsp[fc_class - 1].cp_class = htons(FC_CPC_VALID);
+
+ return 0;
+}
+
+static void
+__csio_reg_rnode(struct csio_rnode *rn)
+{
+ struct csio_lnode *ln = csio_rnode_to_lnode(rn);
+ struct csio_hw *hw = csio_lnode_to_hw(ln);
+
+ spin_unlock_irq(&hw->lock);
+ csio_reg_rnode(rn);
+ spin_lock_irq(&hw->lock);
+
+ if (rn->role & CSIO_RNFR_TARGET)
+ ln->n_scsi_tgts++;
+
+ if (rn->nport_id == FC_FID_MGMT_SERV)
+ csio_ln_fdmi_start(ln, (void *) rn);
+}
+
+static void
+__csio_unreg_rnode(struct csio_rnode *rn)
+{
+ struct csio_lnode *ln = csio_rnode_to_lnode(rn);
+ struct csio_hw *hw = csio_lnode_to_hw(ln);
+ LIST_HEAD(tmp_q);
+ int cmpl = 0;
+
+ if (!list_empty(&rn->host_cmpl_q)) {
+ csio_dbg(hw, "Returning completion queue I/Os\n");
+ list_splice_tail_init(&rn->host_cmpl_q, &tmp_q);
+ cmpl = 1;
+ }
+
+ if (rn->role & CSIO_RNFR_TARGET) {
+ ln->n_scsi_tgts--;
+ ln->last_scan_ntgts--;
+ }
+
+ spin_unlock_irq(&hw->lock);
+ csio_unreg_rnode(rn);
+ spin_lock_irq(&hw->lock);
+
+ /* Cleanup I/Os that were waiting for rnode to unregister */
+ if (cmpl)
+ csio_scsi_cleanup_io_q(csio_hw_to_scsim(hw), &tmp_q);
+
+}
+
+/*****************************************************************************/
+/* START: Rnode SM */
+/*****************************************************************************/
+
+/*
+ * csio_rns_uninit -
+ * @rn - rnode
+ * @evt - SM event.
+ *
+ */
+static void
+csio_rns_uninit(struct csio_rnode *rn, enum csio_rn_ev evt)
+{
+ struct csio_lnode *ln = csio_rnode_to_lnode(rn);
+ int ret = 0;
+
+ CSIO_INC_STATS(rn, n_evt_sm[evt]);
+
+ switch (evt) {
+ case CSIO_RNFE_LOGGED_IN:
+ case CSIO_RNFE_PLOGI_RECV:
+ ret = csio_rn_verify_rparams(ln, rn, rn->rdev_entry);
+ if (!ret) {
+ csio_set_state(&rn->sm, csio_rns_ready);
+ __csio_reg_rnode(rn);
+ } else {
+ CSIO_INC_STATS(rn, n_err_inval);
+ }
+ break;
+ case CSIO_RNFE_LOGO_RECV:
+ csio_ln_dbg(ln,
+ "ssni:x%x Ignoring event %d recv "
+ "in rn state[uninit]\n", csio_rn_flowid(rn), evt);
+ CSIO_INC_STATS(rn, n_evt_drop);
+ break;
+ default:
+ csio_ln_dbg(ln,
+ "ssni:x%x unexp event %d recv "
+ "in rn state[uninit]\n", csio_rn_flowid(rn), evt);
+ CSIO_INC_STATS(rn, n_evt_unexp);
+ break;
+ }
+}
+
+/*
+ * csio_rns_ready -
+ * @rn - rnode
+ * @evt - SM event.
+ *
+ */
+static void
+csio_rns_ready(struct csio_rnode *rn, enum csio_rn_ev evt)
+{
+ struct csio_lnode *ln = csio_rnode_to_lnode(rn);
+ int ret = 0;
+
+ CSIO_INC_STATS(rn, n_evt_sm[evt]);
+
+ switch (evt) {
+ case CSIO_RNFE_LOGGED_IN:
+ case CSIO_RNFE_PLOGI_RECV:
+ csio_ln_dbg(ln,
+ "ssni:x%x Ignoring event %d recv from did:x%x "
+ "in rn state[ready]\n", csio_rn_flowid(rn), evt,
+ rn->nport_id);
+ CSIO_INC_STATS(rn, n_evt_drop);
+ break;
+
+ case CSIO_RNFE_PRLI_DONE:
+ case CSIO_RNFE_PRLI_RECV:
+ ret = csio_rn_verify_rparams(ln, rn, rn->rdev_entry);
+ if (!ret)
+ __csio_reg_rnode(rn);
+ else
+ CSIO_INC_STATS(rn, n_err_inval);
+
+ break;
+ case CSIO_RNFE_DOWN:
+ csio_set_state(&rn->sm, csio_rns_offline);
+ __csio_unreg_rnode(rn);
+
+ /* FW expected to internally aborted outstanding SCSI WRs
+ * and return all SCSI WRs to host with status "ABORTED".
+ */
+ break;
+
+ case CSIO_RNFE_LOGO_RECV:
+ csio_set_state(&rn->sm, csio_rns_offline);
+
+ __csio_unreg_rnode(rn);
+
+ /* FW expected to internally aborted outstanding SCSI WRs
+ * and return all SCSI WRs to host with status "ABORTED".
+ */
+ break;
+
+ case CSIO_RNFE_CLOSE:
+ /*
+ * Each rnode receives CLOSE event when driver is removed or
+ * device is reset
+ * Note: All outstanding IOs on remote port need to returned
+ * to uppper layer with appropriate error before sending
+ * CLOSE event
+ */
+ csio_set_state(&rn->sm, csio_rns_uninit);
+ __csio_unreg_rnode(rn);
+ break;
+
+ case CSIO_RNFE_NAME_MISSING:
+ csio_set_state(&rn->sm, csio_rns_disappeared);
+ __csio_unreg_rnode(rn);
+
+ /*
+ * FW expected to internally aborted outstanding SCSI WRs
+ * and return all SCSI WRs to host with status "ABORTED".
+ */
+
+ break;
+
+ default:
+ csio_ln_dbg(ln,
+ "ssni:x%x unexp event %d recv from did:x%x "
+ "in rn state[uninit]\n", csio_rn_flowid(rn), evt,
+ rn->nport_id);
+ CSIO_INC_STATS(rn, n_evt_unexp);
+ break;
+ }
+}
+
+/*
+ * csio_rns_offline -
+ * @rn - rnode
+ * @evt - SM event.
+ *
+ */
+static void
+csio_rns_offline(struct csio_rnode *rn, enum csio_rn_ev evt)
+{
+ struct csio_lnode *ln = csio_rnode_to_lnode(rn);
+ int ret = 0;
+
+ CSIO_INC_STATS(rn, n_evt_sm[evt]);
+
+ switch (evt) {
+ case CSIO_RNFE_LOGGED_IN:
+ case CSIO_RNFE_PLOGI_RECV:
+ ret = csio_rn_verify_rparams(ln, rn, rn->rdev_entry);
+ if (!ret) {
+ csio_set_state(&rn->sm, csio_rns_ready);
+ __csio_reg_rnode(rn);
+ } else {
+ CSIO_INC_STATS(rn, n_err_inval);
+ csio_post_event(&rn->sm, CSIO_RNFE_CLOSE);
+ }
+ break;
+
+ case CSIO_RNFE_DOWN:
+ csio_ln_dbg(ln,
+ "ssni:x%x Ignoring event %d recv from did:x%x "
+ "in rn state[offline]\n", csio_rn_flowid(rn), evt,
+ rn->nport_id);
+ CSIO_INC_STATS(rn, n_evt_drop);
+ break;
+
+ case CSIO_RNFE_CLOSE:
+ /* Each rnode receives CLOSE event when driver is removed or
+ * device is reset
+ * Note: All outstanding IOs on remote port need to returned
+ * to uppper layer with appropriate error before sending
+ * CLOSE event
+ */
+ csio_set_state(&rn->sm, csio_rns_uninit);
+ break;
+
+ case CSIO_RNFE_NAME_MISSING:
+ csio_set_state(&rn->sm, csio_rns_disappeared);
+ break;
+
+ default:
+ csio_ln_dbg(ln,
+ "ssni:x%x unexp event %d recv from did:x%x "
+ "in rn state[offline]\n", csio_rn_flowid(rn), evt,
+ rn->nport_id);
+ CSIO_INC_STATS(rn, n_evt_unexp);
+ break;
+ }
+}
+
+/*
+ * csio_rns_disappeared -
+ * @rn - rnode
+ * @evt - SM event.
+ *
+ */
+static void
+csio_rns_disappeared(struct csio_rnode *rn, enum csio_rn_ev evt)
+{
+ struct csio_lnode *ln = csio_rnode_to_lnode(rn);
+ int ret = 0;
+
+ CSIO_INC_STATS(rn, n_evt_sm[evt]);
+
+ switch (evt) {
+ case CSIO_RNFE_LOGGED_IN:
+ case CSIO_RNFE_PLOGI_RECV:
+ ret = csio_rn_verify_rparams(ln, rn, rn->rdev_entry);
+ if (!ret) {
+ csio_set_state(&rn->sm, csio_rns_ready);
+ __csio_reg_rnode(rn);
+ } else {
+ CSIO_INC_STATS(rn, n_err_inval);
+ csio_post_event(&rn->sm, CSIO_RNFE_CLOSE);
+ }
+ break;
+
+ case CSIO_RNFE_CLOSE:
+ /* Each rnode receives CLOSE event when driver is removed or
+ * device is reset.
+ * Note: All outstanding IOs on remote port need to returned
+ * to uppper layer with appropriate error before sending
+ * CLOSE event
+ */
+ csio_set_state(&rn->sm, csio_rns_uninit);
+ break;
+
+ case CSIO_RNFE_DOWN:
+ case CSIO_RNFE_NAME_MISSING:
+ csio_ln_dbg(ln,
+ "ssni:x%x Ignoring event %d recv from did x%x"
+ "in rn state[disappeared]\n", csio_rn_flowid(rn),
+ evt, rn->nport_id);
+ break;
+
+ default:
+ csio_ln_dbg(ln,
+ "ssni:x%x unexp event %d recv from did x%x"
+ "in rn state[disappeared]\n", csio_rn_flowid(rn),
+ evt, rn->nport_id);
+ CSIO_INC_STATS(rn, n_evt_unexp);
+ break;
+ }
+}
+
+/*****************************************************************************/
+/* END: Rnode SM */
+/*****************************************************************************/
+
+/*
+ * csio_rnode_devloss_handler - Device loss event handler
+ * @rn: rnode
+ *
+ * Post event to close rnode SM and free rnode.
+ */
+void
+csio_rnode_devloss_handler(struct csio_rnode *rn)
+{
+ struct csio_lnode *ln = csio_rnode_to_lnode(rn);
+
+ /* ignore if same rnode came back as online */
+ if (csio_is_rnode_ready(rn))
+ return;
+
+ csio_post_event(&rn->sm, CSIO_RNFE_CLOSE);
+
+ /* Free rn if in uninit state */
+ if (csio_is_rnode_uninit(rn))
+ csio_put_rnode(ln, rn);
+}
+
+/**
+ * csio_rnode_fwevt_handler - Event handler for firmware rnode events.
+ * @rn: rnode
+ *
+ */
+void
+csio_rnode_fwevt_handler(struct csio_rnode *rn, uint8_t fwevt)
+{
+ struct csio_lnode *ln = csio_rnode_to_lnode(rn);
+ enum csio_rn_ev evt;
+
+ evt = CSIO_FWE_TO_RNFE(fwevt);
+ if (!evt) {
+ csio_ln_err(ln, "ssni:x%x Unhandled FW Rdev event: %d\n",
+ csio_rn_flowid(rn), fwevt);
+ CSIO_INC_STATS(rn, n_evt_unexp);
+ return;
+ }
+ CSIO_INC_STATS(rn, n_evt_fw[fwevt]);
+
+ /* Track previous & current events for debugging */
+ rn->prev_evt = rn->cur_evt;
+ rn->cur_evt = fwevt;
+
+ /* Post event to rnode SM */
+ csio_post_event(&rn->sm, evt);
+
+ /* Free rn if in uninit state */
+ if (csio_is_rnode_uninit(rn))
+ csio_put_rnode(ln, rn);
+}
+
+/*
+ * csio_rnode_init - Initialize rnode.
+ * @rn: RNode
+ * @ln: Associated lnode
+ *
+ * Caller is responsible for holding the lock. The lock is required
+ * to be held for inserting the rnode in ln->rnhead list.
+ */
+static int
+csio_rnode_init(struct csio_rnode *rn, struct csio_lnode *ln)
+{
+ csio_rnode_to_lnode(rn) = ln;
+ csio_init_state(&rn->sm, csio_rns_uninit);
+ INIT_LIST_HEAD(&rn->host_cmpl_q);
+ csio_rn_flowid(rn) = CSIO_INVALID_IDX;
+
+ /* Add rnode to list of lnodes->rnhead */
+ list_add_tail(&rn->sm.sm_list, &ln->rnhead);
+
+ return 0;
+}
+
+static void
+csio_rnode_exit(struct csio_rnode *rn)
+{
+ list_del_init(&rn->sm.sm_list);
+ CSIO_DB_ASSERT(list_empty(&rn->host_cmpl_q));
+}
diff --git a/drivers/scsi/csiostor/csio_rnode.h b/drivers/scsi/csiostor/csio_rnode.h
new file mode 100644
index 000000000..433434221
--- /dev/null
+++ b/drivers/scsi/csiostor/csio_rnode.h
@@ -0,0 +1,141 @@
+/*
+ * This file is part of the Chelsio FCoE driver for Linux.
+ *
+ * Copyright (c) 2008-2012 Chelsio Communications, Inc. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#ifndef __CSIO_RNODE_H__
+#define __CSIO_RNODE_H__
+
+#include "csio_defs.h"
+
+/* State machine evets */
+enum csio_rn_ev {
+ CSIO_RNFE_NONE = (uint32_t)0, /* None */
+ CSIO_RNFE_LOGGED_IN, /* [N/F]Port login
+ * complete.
+ */
+ CSIO_RNFE_PRLI_DONE, /* PRLI completed */
+ CSIO_RNFE_PLOGI_RECV, /* Received PLOGI */
+ CSIO_RNFE_PRLI_RECV, /* Received PLOGI */
+ CSIO_RNFE_LOGO_RECV, /* Received LOGO */
+ CSIO_RNFE_PRLO_RECV, /* Received PRLO */
+ CSIO_RNFE_DOWN, /* Rnode is down */
+ CSIO_RNFE_CLOSE, /* Close rnode */
+ CSIO_RNFE_NAME_MISSING, /* Rnode name missing
+ * in name server.
+ */
+ CSIO_RNFE_MAX_EVENT,
+};
+
+/* rnode stats */
+struct csio_rnode_stats {
+ uint32_t n_err; /* error */
+ uint32_t n_err_inval; /* invalid parameter */
+ uint32_t n_err_nomem; /* error nomem */
+ uint32_t n_evt_unexp; /* unexpected event */
+ uint32_t n_evt_drop; /* unexpected event */
+ uint32_t n_evt_fw[PROTO_ERR_IMPL_LOGO + 1]; /* fw events */
+ enum csio_rn_ev n_evt_sm[CSIO_RNFE_MAX_EVENT]; /* State m/c events */
+ uint32_t n_lun_rst; /* Number of resets of
+ * of LUNs under this
+ * target
+ */
+ uint32_t n_lun_rst_fail; /* Number of LUN reset
+ * failures.
+ */
+ uint32_t n_tgt_rst; /* Number of target resets */
+ uint32_t n_tgt_rst_fail; /* Number of target reset
+ * failures.
+ */
+};
+
+/* Defines for rnode role */
+#define CSIO_RNFR_INITIATOR 0x1
+#define CSIO_RNFR_TARGET 0x2
+#define CSIO_RNFR_FABRIC 0x4
+#define CSIO_RNFR_NS 0x8
+#define CSIO_RNFR_NPORT 0x10
+
+struct csio_rnode {
+ struct csio_sm sm; /* State machine -
+ * should be the
+ * 1st member
+ */
+ struct csio_lnode *lnp; /* Pointer to owning
+ * Lnode */
+ uint32_t flowid; /* Firmware ID */
+ struct list_head host_cmpl_q; /* SCSI IOs
+ * pending to completed
+ * to Mid-layer.
+ */
+ /* FC identifiers for remote node */
+ uint32_t nport_id;
+ uint16_t fcp_flags; /* FCP Flags */
+ uint8_t cur_evt; /* Current event */
+ uint8_t prev_evt; /* Previous event */
+ uint32_t role; /* Fabric/Target/
+ * Initiator/NS
+ */
+ struct fcoe_rdev_entry *rdev_entry; /* Rdev entry */
+ struct csio_service_parms rn_sparm;
+
+ /* FC transport attributes */
+ struct fc_rport *rport; /* FC transport rport */
+ uint32_t supp_classes; /* Supported FC classes */
+ uint32_t maxframe_size; /* Max Frame size */
+ uint32_t scsi_id; /* Transport given SCSI id */
+
+ struct csio_rnode_stats stats; /* Common rnode stats */
+};
+
+#define csio_rn_flowid(rn) ((rn)->flowid)
+#define csio_rn_wwpn(rn) ((rn)->rn_sparm.wwpn)
+#define csio_rn_wwnn(rn) ((rn)->rn_sparm.wwnn)
+#define csio_rnode_to_lnode(rn) ((rn)->lnp)
+
+int csio_is_rnode_ready(struct csio_rnode *rn);
+void csio_rnode_state_to_str(struct csio_rnode *rn, int8_t *str);
+
+struct csio_rnode *csio_rnode_lookup_portid(struct csio_lnode *, uint32_t);
+struct csio_rnode *csio_confirm_rnode(struct csio_lnode *,
+ uint32_t, struct fcoe_rdev_entry *);
+
+void csio_rnode_fwevt_handler(struct csio_rnode *rn, uint8_t fwevt);
+
+void csio_put_rnode(struct csio_lnode *ln, struct csio_rnode *rn);
+
+void csio_reg_rnode(struct csio_rnode *);
+void csio_unreg_rnode(struct csio_rnode *);
+
+void csio_rnode_devloss_handler(struct csio_rnode *);
+
+#endif /* ifndef __CSIO_RNODE_H__ */
diff --git a/drivers/scsi/csiostor/csio_scsi.c b/drivers/scsi/csiostor/csio_scsi.c
new file mode 100644
index 000000000..2c4562d82
--- /dev/null
+++ b/drivers/scsi/csiostor/csio_scsi.c
@@ -0,0 +1,2529 @@
+/*
+ * This file is part of the Chelsio FCoE driver for Linux.
+ *
+ * Copyright (c) 2008-2012 Chelsio Communications, Inc. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include <linux/device.h>
+#include <linux/delay.h>
+#include <linux/ctype.h>
+#include <linux/kernel.h>
+#include <linux/slab.h>
+#include <linux/string.h>
+#include <linux/compiler.h>
+#include <linux/export.h>
+#include <linux/module.h>
+#include <asm/unaligned.h>
+#include <asm/page.h>
+#include <scsi/scsi.h>
+#include <scsi/scsi_device.h>
+#include <scsi/scsi_transport_fc.h>
+
+#include "csio_hw.h"
+#include "csio_lnode.h"
+#include "csio_rnode.h"
+#include "csio_scsi.h"
+#include "csio_init.h"
+
+int csio_scsi_eqsize = 65536;
+int csio_scsi_iqlen = 128;
+int csio_scsi_ioreqs = 2048;
+uint32_t csio_max_scan_tmo;
+uint32_t csio_delta_scan_tmo = 5;
+int csio_lun_qdepth = 32;
+
+static int csio_ddp_descs = 128;
+
+static int csio_do_abrt_cls(struct csio_hw *,
+ struct csio_ioreq *, bool);
+
+static void csio_scsis_uninit(struct csio_ioreq *, enum csio_scsi_ev);
+static void csio_scsis_io_active(struct csio_ioreq *, enum csio_scsi_ev);
+static void csio_scsis_tm_active(struct csio_ioreq *, enum csio_scsi_ev);
+static void csio_scsis_aborting(struct csio_ioreq *, enum csio_scsi_ev);
+static void csio_scsis_closing(struct csio_ioreq *, enum csio_scsi_ev);
+static void csio_scsis_shost_cmpl_await(struct csio_ioreq *, enum csio_scsi_ev);
+
+/*
+ * csio_scsi_match_io - Match an ioreq with the given SCSI level data.
+ * @ioreq: The I/O request
+ * @sld: Level information
+ *
+ * Should be called with lock held.
+ *
+ */
+static bool
+csio_scsi_match_io(struct csio_ioreq *ioreq, struct csio_scsi_level_data *sld)
+{
+ struct scsi_cmnd *scmnd = csio_scsi_cmnd(ioreq);
+
+ switch (sld->level) {
+ case CSIO_LEV_LUN:
+ if (scmnd == NULL)
+ return false;
+
+ return ((ioreq->lnode == sld->lnode) &&
+ (ioreq->rnode == sld->rnode) &&
+ ((uint64_t)scmnd->device->lun == sld->oslun));
+
+ case CSIO_LEV_RNODE:
+ return ((ioreq->lnode == sld->lnode) &&
+ (ioreq->rnode == sld->rnode));
+ case CSIO_LEV_LNODE:
+ return (ioreq->lnode == sld->lnode);
+ case CSIO_LEV_ALL:
+ return true;
+ default:
+ return false;
+ }
+}
+
+/*
+ * csio_scsi_gather_active_ios - Gather active I/Os based on level
+ * @scm: SCSI module
+ * @sld: Level information
+ * @dest: The queue where these I/Os have to be gathered.
+ *
+ * Should be called with lock held.
+ */
+static void
+csio_scsi_gather_active_ios(struct csio_scsim *scm,
+ struct csio_scsi_level_data *sld,
+ struct list_head *dest)
+{
+ struct list_head *tmp, *next;
+
+ if (list_empty(&scm->active_q))
+ return;
+
+ /* Just splice the entire active_q into dest */
+ if (sld->level == CSIO_LEV_ALL) {
+ list_splice_tail_init(&scm->active_q, dest);
+ return;
+ }
+
+ list_for_each_safe(tmp, next, &scm->active_q) {
+ if (csio_scsi_match_io((struct csio_ioreq *)tmp, sld)) {
+ list_del_init(tmp);
+ list_add_tail(tmp, dest);
+ }
+ }
+}
+
+static inline bool
+csio_scsi_itnexus_loss_error(uint16_t error)
+{
+ switch (error) {
+ case FW_ERR_LINK_DOWN:
+ case FW_RDEV_NOT_READY:
+ case FW_ERR_RDEV_LOST:
+ case FW_ERR_RDEV_LOGO:
+ case FW_ERR_RDEV_IMPL_LOGO:
+ return 1;
+ }
+ return 0;
+}
+
+/*
+ * csio_scsi_fcp_cmnd - Frame the SCSI FCP command paylod.
+ * @req: IO req structure.
+ * @addr: DMA location to place the payload.
+ *
+ * This routine is shared between FCP_WRITE, FCP_READ and FCP_CMD requests.
+ */
+static inline void
+csio_scsi_fcp_cmnd(struct csio_ioreq *req, void *addr)
+{
+ struct fcp_cmnd *fcp_cmnd = (struct fcp_cmnd *)addr;
+ struct scsi_cmnd *scmnd = csio_scsi_cmnd(req);
+
+ /* Check for Task Management */
+ if (likely(scmnd->SCp.Message == 0)) {
+ int_to_scsilun(scmnd->device->lun, &fcp_cmnd->fc_lun);
+ fcp_cmnd->fc_tm_flags = 0;
+ fcp_cmnd->fc_cmdref = 0;
+
+ memcpy(fcp_cmnd->fc_cdb, scmnd->cmnd, 16);
+ fcp_cmnd->fc_pri_ta = FCP_PTA_SIMPLE;
+ fcp_cmnd->fc_dl = cpu_to_be32(scsi_bufflen(scmnd));
+
+ if (req->nsge)
+ if (req->datadir == DMA_TO_DEVICE)
+ fcp_cmnd->fc_flags = FCP_CFL_WRDATA;
+ else
+ fcp_cmnd->fc_flags = FCP_CFL_RDDATA;
+ else
+ fcp_cmnd->fc_flags = 0;
+ } else {
+ memset(fcp_cmnd, 0, sizeof(*fcp_cmnd));
+ int_to_scsilun(scmnd->device->lun, &fcp_cmnd->fc_lun);
+ fcp_cmnd->fc_tm_flags = (uint8_t)scmnd->SCp.Message;
+ }
+}
+
+/*
+ * csio_scsi_init_cmd_wr - Initialize the SCSI CMD WR.
+ * @req: IO req structure.
+ * @addr: DMA location to place the payload.
+ * @size: Size of WR (including FW WR + immed data + rsp SG entry
+ *
+ * Wrapper for populating fw_scsi_cmd_wr.
+ */
+static inline void
+csio_scsi_init_cmd_wr(struct csio_ioreq *req, void *addr, uint32_t size)
+{
+ struct csio_hw *hw = req->lnode->hwp;
+ struct csio_rnode *rn = req->rnode;
+ struct fw_scsi_cmd_wr *wr = (struct fw_scsi_cmd_wr *)addr;
+ struct csio_dma_buf *dma_buf;
+ uint8_t imm = csio_hw_to_scsim(hw)->proto_cmd_len;
+
+ wr->op_immdlen = cpu_to_be32(FW_WR_OP_V(FW_SCSI_CMD_WR) |
+ FW_SCSI_CMD_WR_IMMDLEN(imm));
+ wr->flowid_len16 = cpu_to_be32(FW_WR_FLOWID_V(rn->flowid) |
+ FW_WR_LEN16_V(
+ DIV_ROUND_UP(size, 16)));
+
+ wr->cookie = (uintptr_t) req;
+ wr->iqid = cpu_to_be16(csio_q_physiqid(hw, req->iq_idx));
+ wr->tmo_val = (uint8_t) req->tmo;
+ wr->r3 = 0;
+ memset(&wr->r5, 0, 8);
+
+ /* Get RSP DMA buffer */
+ dma_buf = &req->dma_buf;
+
+ /* Prepare RSP SGL */
+ wr->rsp_dmalen = cpu_to_be32(dma_buf->len);
+ wr->rsp_dmaaddr = cpu_to_be64(dma_buf->paddr);
+
+ wr->r6 = 0;
+
+ wr->u.fcoe.ctl_pri = 0;
+ wr->u.fcoe.cp_en_class = 0;
+ wr->u.fcoe.r4_lo[0] = 0;
+ wr->u.fcoe.r4_lo[1] = 0;
+
+ /* Frame a FCP command */
+ csio_scsi_fcp_cmnd(req, (void *)((uintptr_t)addr +
+ sizeof(struct fw_scsi_cmd_wr)));
+}
+
+#define CSIO_SCSI_CMD_WR_SZ(_imm) \
+ (sizeof(struct fw_scsi_cmd_wr) + /* WR size */ \
+ ALIGN((_imm), 16)) /* Immed data */
+
+#define CSIO_SCSI_CMD_WR_SZ_16(_imm) \
+ (ALIGN(CSIO_SCSI_CMD_WR_SZ((_imm)), 16))
+
+/*
+ * csio_scsi_cmd - Create a SCSI CMD WR.
+ * @req: IO req structure.
+ *
+ * Gets a WR slot in the ingress queue and initializes it with SCSI CMD WR.
+ *
+ */
+static inline void
+csio_scsi_cmd(struct csio_ioreq *req)
+{
+ struct csio_wr_pair wrp;
+ struct csio_hw *hw = req->lnode->hwp;
+ struct csio_scsim *scsim = csio_hw_to_scsim(hw);
+ uint32_t size = CSIO_SCSI_CMD_WR_SZ_16(scsim->proto_cmd_len);
+
+ req->drv_status = csio_wr_get(hw, req->eq_idx, size, &wrp);
+ if (unlikely(req->drv_status != 0))
+ return;
+
+ if (wrp.size1 >= size) {
+ /* Initialize WR in one shot */
+ csio_scsi_init_cmd_wr(req, wrp.addr1, size);
+ } else {
+ uint8_t *tmpwr = csio_q_eq_wrap(hw, req->eq_idx);
+
+ /*
+ * Make a temporary copy of the WR and write back
+ * the copy into the WR pair.
+ */
+ csio_scsi_init_cmd_wr(req, (void *)tmpwr, size);
+ memcpy(wrp.addr1, tmpwr, wrp.size1);
+ memcpy(wrp.addr2, tmpwr + wrp.size1, size - wrp.size1);
+ }
+}
+
+/*
+ * csio_scsi_init_ulptx_dsgl - Fill in a ULP_TX_SC_DSGL
+ * @hw: HW module
+ * @req: IO request
+ * @sgl: ULP TX SGL pointer.
+ *
+ */
+static inline void
+csio_scsi_init_ultptx_dsgl(struct csio_hw *hw, struct csio_ioreq *req,
+ struct ulptx_sgl *sgl)
+{
+ struct ulptx_sge_pair *sge_pair = NULL;
+ struct scatterlist *sgel;
+ uint32_t i = 0;
+ uint32_t xfer_len;
+ struct list_head *tmp;
+ struct csio_dma_buf *dma_buf;
+ struct scsi_cmnd *scmnd = csio_scsi_cmnd(req);
+
+ sgl->cmd_nsge = htonl(ULPTX_CMD_V(ULP_TX_SC_DSGL) | ULPTX_MORE_F |
+ ULPTX_NSGE_V(req->nsge));
+ /* Now add the data SGLs */
+ if (likely(!req->dcopy)) {
+ scsi_for_each_sg(scmnd, sgel, req->nsge, i) {
+ if (i == 0) {
+ sgl->addr0 = cpu_to_be64(sg_dma_address(sgel));
+ sgl->len0 = cpu_to_be32(sg_dma_len(sgel));
+ sge_pair = (struct ulptx_sge_pair *)(sgl + 1);
+ continue;
+ }
+ if ((i - 1) & 0x1) {
+ sge_pair->addr[1] = cpu_to_be64(
+ sg_dma_address(sgel));
+ sge_pair->len[1] = cpu_to_be32(
+ sg_dma_len(sgel));
+ sge_pair++;
+ } else {
+ sge_pair->addr[0] = cpu_to_be64(
+ sg_dma_address(sgel));
+ sge_pair->len[0] = cpu_to_be32(
+ sg_dma_len(sgel));
+ }
+ }
+ } else {
+ /* Program sg elements with driver's DDP buffer */
+ xfer_len = scsi_bufflen(scmnd);
+ list_for_each(tmp, &req->gen_list) {
+ dma_buf = (struct csio_dma_buf *)tmp;
+ if (i == 0) {
+ sgl->addr0 = cpu_to_be64(dma_buf->paddr);
+ sgl->len0 = cpu_to_be32(
+ min(xfer_len, dma_buf->len));
+ sge_pair = (struct ulptx_sge_pair *)(sgl + 1);
+ } else if ((i - 1) & 0x1) {
+ sge_pair->addr[1] = cpu_to_be64(dma_buf->paddr);
+ sge_pair->len[1] = cpu_to_be32(
+ min(xfer_len, dma_buf->len));
+ sge_pair++;
+ } else {
+ sge_pair->addr[0] = cpu_to_be64(dma_buf->paddr);
+ sge_pair->len[0] = cpu_to_be32(
+ min(xfer_len, dma_buf->len));
+ }
+ xfer_len -= min(xfer_len, dma_buf->len);
+ i++;
+ }
+ }
+}
+
+/*
+ * csio_scsi_init_read_wr - Initialize the READ SCSI WR.
+ * @req: IO req structure.
+ * @wrp: DMA location to place the payload.
+ * @size: Size of WR (including FW WR + immed data + rsp SG entry + data SGL
+ *
+ * Wrapper for populating fw_scsi_read_wr.
+ */
+static inline void
+csio_scsi_init_read_wr(struct csio_ioreq *req, void *wrp, uint32_t size)
+{
+ struct csio_hw *hw = req->lnode->hwp;
+ struct csio_rnode *rn = req->rnode;
+ struct fw_scsi_read_wr *wr = (struct fw_scsi_read_wr *)wrp;
+ struct ulptx_sgl *sgl;
+ struct csio_dma_buf *dma_buf;
+ uint8_t imm = csio_hw_to_scsim(hw)->proto_cmd_len;
+ struct scsi_cmnd *scmnd = csio_scsi_cmnd(req);
+
+ wr->op_immdlen = cpu_to_be32(FW_WR_OP_V(FW_SCSI_READ_WR) |
+ FW_SCSI_READ_WR_IMMDLEN(imm));
+ wr->flowid_len16 = cpu_to_be32(FW_WR_FLOWID_V(rn->flowid) |
+ FW_WR_LEN16_V(DIV_ROUND_UP(size, 16)));
+ wr->cookie = (uintptr_t)req;
+ wr->iqid = cpu_to_be16(csio_q_physiqid(hw, req->iq_idx));
+ wr->tmo_val = (uint8_t)(req->tmo);
+ wr->use_xfer_cnt = 1;
+ wr->xfer_cnt = cpu_to_be32(scsi_bufflen(scmnd));
+ wr->ini_xfer_cnt = cpu_to_be32(scsi_bufflen(scmnd));
+ /* Get RSP DMA buffer */
+ dma_buf = &req->dma_buf;
+
+ /* Prepare RSP SGL */
+ wr->rsp_dmalen = cpu_to_be32(dma_buf->len);
+ wr->rsp_dmaaddr = cpu_to_be64(dma_buf->paddr);
+
+ wr->r4 = 0;
+
+ wr->u.fcoe.ctl_pri = 0;
+ wr->u.fcoe.cp_en_class = 0;
+ wr->u.fcoe.r3_lo[0] = 0;
+ wr->u.fcoe.r3_lo[1] = 0;
+ csio_scsi_fcp_cmnd(req, (void *)((uintptr_t)wrp +
+ sizeof(struct fw_scsi_read_wr)));
+
+ /* Move WR pointer past command and immediate data */
+ sgl = (struct ulptx_sgl *)((uintptr_t)wrp +
+ sizeof(struct fw_scsi_read_wr) + ALIGN(imm, 16));
+
+ /* Fill in the DSGL */
+ csio_scsi_init_ultptx_dsgl(hw, req, sgl);
+}
+
+/*
+ * csio_scsi_init_write_wr - Initialize the WRITE SCSI WR.
+ * @req: IO req structure.
+ * @wrp: DMA location to place the payload.
+ * @size: Size of WR (including FW WR + immed data + rsp SG entry + data SGL
+ *
+ * Wrapper for populating fw_scsi_write_wr.
+ */
+static inline void
+csio_scsi_init_write_wr(struct csio_ioreq *req, void *wrp, uint32_t size)
+{
+ struct csio_hw *hw = req->lnode->hwp;
+ struct csio_rnode *rn = req->rnode;
+ struct fw_scsi_write_wr *wr = (struct fw_scsi_write_wr *)wrp;
+ struct ulptx_sgl *sgl;
+ struct csio_dma_buf *dma_buf;
+ uint8_t imm = csio_hw_to_scsim(hw)->proto_cmd_len;
+ struct scsi_cmnd *scmnd = csio_scsi_cmnd(req);
+
+ wr->op_immdlen = cpu_to_be32(FW_WR_OP_V(FW_SCSI_WRITE_WR) |
+ FW_SCSI_WRITE_WR_IMMDLEN(imm));
+ wr->flowid_len16 = cpu_to_be32(FW_WR_FLOWID_V(rn->flowid) |
+ FW_WR_LEN16_V(DIV_ROUND_UP(size, 16)));
+ wr->cookie = (uintptr_t)req;
+ wr->iqid = cpu_to_be16(csio_q_physiqid(hw, req->iq_idx));
+ wr->tmo_val = (uint8_t)(req->tmo);
+ wr->use_xfer_cnt = 1;
+ wr->xfer_cnt = cpu_to_be32(scsi_bufflen(scmnd));
+ wr->ini_xfer_cnt = cpu_to_be32(scsi_bufflen(scmnd));
+ /* Get RSP DMA buffer */
+ dma_buf = &req->dma_buf;
+
+ /* Prepare RSP SGL */
+ wr->rsp_dmalen = cpu_to_be32(dma_buf->len);
+ wr->rsp_dmaaddr = cpu_to_be64(dma_buf->paddr);
+
+ wr->r4 = 0;
+
+ wr->u.fcoe.ctl_pri = 0;
+ wr->u.fcoe.cp_en_class = 0;
+ wr->u.fcoe.r3_lo[0] = 0;
+ wr->u.fcoe.r3_lo[1] = 0;
+ csio_scsi_fcp_cmnd(req, (void *)((uintptr_t)wrp +
+ sizeof(struct fw_scsi_write_wr)));
+
+ /* Move WR pointer past command and immediate data */
+ sgl = (struct ulptx_sgl *)((uintptr_t)wrp +
+ sizeof(struct fw_scsi_write_wr) + ALIGN(imm, 16));
+
+ /* Fill in the DSGL */
+ csio_scsi_init_ultptx_dsgl(hw, req, sgl);
+}
+
+/* Calculate WR size needed for fw_scsi_read_wr/fw_scsi_write_wr */
+#define CSIO_SCSI_DATA_WRSZ(req, oper, sz, imm) \
+do { \
+ (sz) = sizeof(struct fw_scsi_##oper##_wr) + /* WR size */ \
+ ALIGN((imm), 16) + /* Immed data */ \
+ sizeof(struct ulptx_sgl); /* ulptx_sgl */ \
+ \
+ if (unlikely((req)->nsge > 1)) \
+ (sz) += (sizeof(struct ulptx_sge_pair) * \
+ (ALIGN(((req)->nsge - 1), 2) / 2)); \
+ /* Data SGE */ \
+} while (0)
+
+/*
+ * csio_scsi_read - Create a SCSI READ WR.
+ * @req: IO req structure.
+ *
+ * Gets a WR slot in the ingress queue and initializes it with
+ * SCSI READ WR.
+ *
+ */
+static inline void
+csio_scsi_read(struct csio_ioreq *req)
+{
+ struct csio_wr_pair wrp;
+ uint32_t size;
+ struct csio_hw *hw = req->lnode->hwp;
+ struct csio_scsim *scsim = csio_hw_to_scsim(hw);
+
+ CSIO_SCSI_DATA_WRSZ(req, read, size, scsim->proto_cmd_len);
+ size = ALIGN(size, 16);
+
+ req->drv_status = csio_wr_get(hw, req->eq_idx, size, &wrp);
+ if (likely(req->drv_status == 0)) {
+ if (likely(wrp.size1 >= size)) {
+ /* Initialize WR in one shot */
+ csio_scsi_init_read_wr(req, wrp.addr1, size);
+ } else {
+ uint8_t *tmpwr = csio_q_eq_wrap(hw, req->eq_idx);
+ /*
+ * Make a temporary copy of the WR and write back
+ * the copy into the WR pair.
+ */
+ csio_scsi_init_read_wr(req, (void *)tmpwr, size);
+ memcpy(wrp.addr1, tmpwr, wrp.size1);
+ memcpy(wrp.addr2, tmpwr + wrp.size1, size - wrp.size1);
+ }
+ }
+}
+
+/*
+ * csio_scsi_write - Create a SCSI WRITE WR.
+ * @req: IO req structure.
+ *
+ * Gets a WR slot in the ingress queue and initializes it with
+ * SCSI WRITE WR.
+ *
+ */
+static inline void
+csio_scsi_write(struct csio_ioreq *req)
+{
+ struct csio_wr_pair wrp;
+ uint32_t size;
+ struct csio_hw *hw = req->lnode->hwp;
+ struct csio_scsim *scsim = csio_hw_to_scsim(hw);
+
+ CSIO_SCSI_DATA_WRSZ(req, write, size, scsim->proto_cmd_len);
+ size = ALIGN(size, 16);
+
+ req->drv_status = csio_wr_get(hw, req->eq_idx, size, &wrp);
+ if (likely(req->drv_status == 0)) {
+ if (likely(wrp.size1 >= size)) {
+ /* Initialize WR in one shot */
+ csio_scsi_init_write_wr(req, wrp.addr1, size);
+ } else {
+ uint8_t *tmpwr = csio_q_eq_wrap(hw, req->eq_idx);
+ /*
+ * Make a temporary copy of the WR and write back
+ * the copy into the WR pair.
+ */
+ csio_scsi_init_write_wr(req, (void *)tmpwr, size);
+ memcpy(wrp.addr1, tmpwr, wrp.size1);
+ memcpy(wrp.addr2, tmpwr + wrp.size1, size - wrp.size1);
+ }
+ }
+}
+
+/*
+ * csio_setup_ddp - Setup DDP buffers for Read request.
+ * @req: IO req structure.
+ *
+ * Checks SGLs/Data buffers are virtually contiguous required for DDP.
+ * If contiguous,driver posts SGLs in the WR otherwise post internal
+ * buffers for such request for DDP.
+ */
+static inline void
+csio_setup_ddp(struct csio_scsim *scsim, struct csio_ioreq *req)
+{
+#ifdef __CSIO_DEBUG__
+ struct csio_hw *hw = req->lnode->hwp;
+#endif
+ struct scatterlist *sgel = NULL;
+ struct scsi_cmnd *scmnd = csio_scsi_cmnd(req);
+ uint64_t sg_addr = 0;
+ uint32_t ddp_pagesz = 4096;
+ uint32_t buf_off;
+ struct csio_dma_buf *dma_buf = NULL;
+ uint32_t alloc_len = 0;
+ uint32_t xfer_len = 0;
+ uint32_t sg_len = 0;
+ uint32_t i;
+
+ scsi_for_each_sg(scmnd, sgel, req->nsge, i) {
+ sg_addr = sg_dma_address(sgel);
+ sg_len = sg_dma_len(sgel);
+
+ buf_off = sg_addr & (ddp_pagesz - 1);
+
+ /* Except 1st buffer,all buffer addr have to be Page aligned */
+ if (i != 0 && buf_off) {
+ csio_dbg(hw, "SGL addr not DDP aligned (%llx:%d)\n",
+ sg_addr, sg_len);
+ goto unaligned;
+ }
+
+ /* Except last buffer,all buffer must end on page boundary */
+ if ((i != (req->nsge - 1)) &&
+ ((buf_off + sg_len) & (ddp_pagesz - 1))) {
+ csio_dbg(hw,
+ "SGL addr not ending on page boundary"
+ "(%llx:%d)\n", sg_addr, sg_len);
+ goto unaligned;
+ }
+ }
+
+ /* SGL's are virtually contiguous. HW will DDP to SGLs */
+ req->dcopy = 0;
+ csio_scsi_read(req);
+
+ return;
+
+unaligned:
+ CSIO_INC_STATS(scsim, n_unaligned);
+ /*
+ * For unaligned SGLs, driver will allocate internal DDP buffer.
+ * Once command is completed data from DDP buffer copied to SGLs
+ */
+ req->dcopy = 1;
+
+ /* Use gen_list to store the DDP buffers */
+ INIT_LIST_HEAD(&req->gen_list);
+ xfer_len = scsi_bufflen(scmnd);
+
+ i = 0;
+ /* Allocate ddp buffers for this request */
+ while (alloc_len < xfer_len) {
+ dma_buf = csio_get_scsi_ddp(scsim);
+ if (dma_buf == NULL || i > scsim->max_sge) {
+ req->drv_status = -EBUSY;
+ break;
+ }
+ alloc_len += dma_buf->len;
+ /* Added to IO req */
+ list_add_tail(&dma_buf->list, &req->gen_list);
+ i++;
+ }
+
+ if (!req->drv_status) {
+ /* set number of ddp bufs used */
+ req->nsge = i;
+ csio_scsi_read(req);
+ return;
+ }
+
+ /* release dma descs */
+ if (i > 0)
+ csio_put_scsi_ddp_list(scsim, &req->gen_list, i);
+}
+
+/*
+ * csio_scsi_init_abrt_cls_wr - Initialize an ABORT/CLOSE WR.
+ * @req: IO req structure.
+ * @addr: DMA location to place the payload.
+ * @size: Size of WR
+ * @abort: abort OR close
+ *
+ * Wrapper for populating fw_scsi_cmd_wr.
+ */
+static inline void
+csio_scsi_init_abrt_cls_wr(struct csio_ioreq *req, void *addr, uint32_t size,
+ bool abort)
+{
+ struct csio_hw *hw = req->lnode->hwp;
+ struct csio_rnode *rn = req->rnode;
+ struct fw_scsi_abrt_cls_wr *wr = (struct fw_scsi_abrt_cls_wr *)addr;
+
+ wr->op_immdlen = cpu_to_be32(FW_WR_OP_V(FW_SCSI_ABRT_CLS_WR));
+ wr->flowid_len16 = cpu_to_be32(FW_WR_FLOWID_V(rn->flowid) |
+ FW_WR_LEN16_V(
+ DIV_ROUND_UP(size, 16)));
+
+ wr->cookie = (uintptr_t) req;
+ wr->iqid = cpu_to_be16(csio_q_physiqid(hw, req->iq_idx));
+ wr->tmo_val = (uint8_t) req->tmo;
+ /* 0 for CHK_ALL_IO tells FW to look up t_cookie */
+ wr->sub_opcode_to_chk_all_io =
+ (FW_SCSI_ABRT_CLS_WR_SUB_OPCODE(abort) |
+ FW_SCSI_ABRT_CLS_WR_CHK_ALL_IO(0));
+ wr->r3[0] = 0;
+ wr->r3[1] = 0;
+ wr->r3[2] = 0;
+ wr->r3[3] = 0;
+ /* Since we re-use the same ioreq for abort as well */
+ wr->t_cookie = (uintptr_t) req;
+}
+
+static inline void
+csio_scsi_abrt_cls(struct csio_ioreq *req, bool abort)
+{
+ struct csio_wr_pair wrp;
+ struct csio_hw *hw = req->lnode->hwp;
+ uint32_t size = ALIGN(sizeof(struct fw_scsi_abrt_cls_wr), 16);
+
+ req->drv_status = csio_wr_get(hw, req->eq_idx, size, &wrp);
+ if (req->drv_status != 0)
+ return;
+
+ if (wrp.size1 >= size) {
+ /* Initialize WR in one shot */
+ csio_scsi_init_abrt_cls_wr(req, wrp.addr1, size, abort);
+ } else {
+ uint8_t *tmpwr = csio_q_eq_wrap(hw, req->eq_idx);
+ /*
+ * Make a temporary copy of the WR and write back
+ * the copy into the WR pair.
+ */
+ csio_scsi_init_abrt_cls_wr(req, (void *)tmpwr, size, abort);
+ memcpy(wrp.addr1, tmpwr, wrp.size1);
+ memcpy(wrp.addr2, tmpwr + wrp.size1, size - wrp.size1);
+ }
+}
+
+/*****************************************************************************/
+/* START: SCSI SM */
+/*****************************************************************************/
+static void
+csio_scsis_uninit(struct csio_ioreq *req, enum csio_scsi_ev evt)
+{
+ struct csio_hw *hw = req->lnode->hwp;
+ struct csio_scsim *scsim = csio_hw_to_scsim(hw);
+
+ switch (evt) {
+ case CSIO_SCSIE_START_IO:
+
+ if (req->nsge) {
+ if (req->datadir == DMA_TO_DEVICE) {
+ req->dcopy = 0;
+ csio_scsi_write(req);
+ } else
+ csio_setup_ddp(scsim, req);
+ } else {
+ csio_scsi_cmd(req);
+ }
+
+ if (likely(req->drv_status == 0)) {
+ /* change state and enqueue on active_q */
+ csio_set_state(&req->sm, csio_scsis_io_active);
+ list_add_tail(&req->sm.sm_list, &scsim->active_q);
+ csio_wr_issue(hw, req->eq_idx, false);
+ CSIO_INC_STATS(scsim, n_active);
+
+ return;
+ }
+ break;
+
+ case CSIO_SCSIE_START_TM:
+ csio_scsi_cmd(req);
+ if (req->drv_status == 0) {
+ /*
+ * NOTE: We collect the affected I/Os prior to issuing
+ * LUN reset, and not after it. This is to prevent
+ * aborting I/Os that get issued after the LUN reset,
+ * but prior to LUN reset completion (in the event that
+ * the host stack has not blocked I/Os to a LUN that is
+ * being reset.
+ */
+ csio_set_state(&req->sm, csio_scsis_tm_active);
+ list_add_tail(&req->sm.sm_list, &scsim->active_q);
+ csio_wr_issue(hw, req->eq_idx, false);
+ CSIO_INC_STATS(scsim, n_tm_active);
+ }
+ return;
+
+ case CSIO_SCSIE_ABORT:
+ case CSIO_SCSIE_CLOSE:
+ /*
+ * NOTE:
+ * We could get here due to :
+ * - a window in the cleanup path of the SCSI module
+ * (csio_scsi_abort_io()). Please see NOTE in this function.
+ * - a window in the time we tried to issue an abort/close
+ * of a request to FW, and the FW completed the request
+ * itself.
+ * Print a message for now, and return INVAL either way.
+ */
+ req->drv_status = -EINVAL;
+ csio_warn(hw, "Trying to abort/close completed IO:%p!\n", req);
+ break;
+
+ default:
+ csio_dbg(hw, "Unhandled event:%d sent to req:%p\n", evt, req);
+ CSIO_DB_ASSERT(0);
+ }
+}
+
+static void
+csio_scsis_io_active(struct csio_ioreq *req, enum csio_scsi_ev evt)
+{
+ struct csio_hw *hw = req->lnode->hwp;
+ struct csio_scsim *scm = csio_hw_to_scsim(hw);
+ struct csio_rnode *rn;
+
+ switch (evt) {
+ case CSIO_SCSIE_COMPLETED:
+ CSIO_DEC_STATS(scm, n_active);
+ list_del_init(&req->sm.sm_list);
+ csio_set_state(&req->sm, csio_scsis_uninit);
+ /*
+ * In MSIX mode, with multiple queues, the SCSI compeltions
+ * could reach us sooner than the FW events sent to indicate
+ * I-T nexus loss (link down, remote device logo etc). We
+ * dont want to be returning such I/Os to the upper layer
+ * immediately, since we wouldnt have reported the I-T nexus
+ * loss itself. This forces us to serialize such completions
+ * with the reporting of the I-T nexus loss. Therefore, we
+ * internally queue up such up such completions in the rnode.
+ * The reporting of I-T nexus loss to the upper layer is then
+ * followed by the returning of I/Os in this internal queue.
+ * Having another state alongwith another queue helps us take
+ * actions for events such as ABORT received while we are
+ * in this rnode queue.
+ */
+ if (unlikely(req->wr_status != FW_SUCCESS)) {
+ rn = req->rnode;
+ /*
+ * FW says remote device is lost, but rnode
+ * doesnt reflect it.
+ */
+ if (csio_scsi_itnexus_loss_error(req->wr_status) &&
+ csio_is_rnode_ready(rn)) {
+ csio_set_state(&req->sm,
+ csio_scsis_shost_cmpl_await);
+ list_add_tail(&req->sm.sm_list,
+ &rn->host_cmpl_q);
+ }
+ }
+
+ break;
+
+ case CSIO_SCSIE_ABORT:
+ csio_scsi_abrt_cls(req, SCSI_ABORT);
+ if (req->drv_status == 0) {
+ csio_wr_issue(hw, req->eq_idx, false);
+ csio_set_state(&req->sm, csio_scsis_aborting);
+ }
+ break;
+
+ case CSIO_SCSIE_CLOSE:
+ csio_scsi_abrt_cls(req, SCSI_CLOSE);
+ if (req->drv_status == 0) {
+ csio_wr_issue(hw, req->eq_idx, false);
+ csio_set_state(&req->sm, csio_scsis_closing);
+ }
+ break;
+
+ case CSIO_SCSIE_DRVCLEANUP:
+ req->wr_status = FW_HOSTERROR;
+ CSIO_DEC_STATS(scm, n_active);
+ csio_set_state(&req->sm, csio_scsis_uninit);
+ break;
+
+ default:
+ csio_dbg(hw, "Unhandled event:%d sent to req:%p\n", evt, req);
+ CSIO_DB_ASSERT(0);
+ }
+}
+
+static void
+csio_scsis_tm_active(struct csio_ioreq *req, enum csio_scsi_ev evt)
+{
+ struct csio_hw *hw = req->lnode->hwp;
+ struct csio_scsim *scm = csio_hw_to_scsim(hw);
+
+ switch (evt) {
+ case CSIO_SCSIE_COMPLETED:
+ CSIO_DEC_STATS(scm, n_tm_active);
+ list_del_init(&req->sm.sm_list);
+ csio_set_state(&req->sm, csio_scsis_uninit);
+
+ break;
+
+ case CSIO_SCSIE_ABORT:
+ csio_scsi_abrt_cls(req, SCSI_ABORT);
+ if (req->drv_status == 0) {
+ csio_wr_issue(hw, req->eq_idx, false);
+ csio_set_state(&req->sm, csio_scsis_aborting);
+ }
+ break;
+
+
+ case CSIO_SCSIE_CLOSE:
+ csio_scsi_abrt_cls(req, SCSI_CLOSE);
+ if (req->drv_status == 0) {
+ csio_wr_issue(hw, req->eq_idx, false);
+ csio_set_state(&req->sm, csio_scsis_closing);
+ }
+ break;
+
+ case CSIO_SCSIE_DRVCLEANUP:
+ req->wr_status = FW_HOSTERROR;
+ CSIO_DEC_STATS(scm, n_tm_active);
+ csio_set_state(&req->sm, csio_scsis_uninit);
+ break;
+
+ default:
+ csio_dbg(hw, "Unhandled event:%d sent to req:%p\n", evt, req);
+ CSIO_DB_ASSERT(0);
+ }
+}
+
+static void
+csio_scsis_aborting(struct csio_ioreq *req, enum csio_scsi_ev evt)
+{
+ struct csio_hw *hw = req->lnode->hwp;
+ struct csio_scsim *scm = csio_hw_to_scsim(hw);
+
+ switch (evt) {
+ case CSIO_SCSIE_COMPLETED:
+ csio_dbg(hw,
+ "ioreq %p recvd cmpltd (wr_status:%d) "
+ "in aborting st\n", req, req->wr_status);
+ /*
+ * Use -ECANCELED to explicitly tell the ABORTED event that
+ * the original I/O was returned to driver by FW.
+ * We dont really care if the I/O was returned with success by
+ * FW (because the ABORT and completion of the I/O crossed each
+ * other), or any other return value. Once we are in aborting
+ * state, the success or failure of the I/O is unimportant to
+ * us.
+ */
+ req->drv_status = -ECANCELED;
+ break;
+
+ case CSIO_SCSIE_ABORT:
+ CSIO_INC_STATS(scm, n_abrt_dups);
+ break;
+
+ case CSIO_SCSIE_ABORTED:
+
+ csio_dbg(hw, "abort of %p return status:0x%x drv_status:%x\n",
+ req, req->wr_status, req->drv_status);
+ /*
+ * Check if original I/O WR completed before the Abort
+ * completion.
+ */
+ if (req->drv_status != -ECANCELED) {
+ csio_warn(hw,
+ "Abort completed before original I/O,"
+ " req:%p\n", req);
+ CSIO_DB_ASSERT(0);
+ }
+
+ /*
+ * There are the following possible scenarios:
+ * 1. The abort completed successfully, FW returned FW_SUCCESS.
+ * 2. The completion of an I/O and the receipt of
+ * abort for that I/O by the FW crossed each other.
+ * The FW returned FW_EINVAL. The original I/O would have
+ * returned with FW_SUCCESS or any other SCSI error.
+ * 3. The FW couldnt sent the abort out on the wire, as there
+ * was an I-T nexus loss (link down, remote device logged
+ * out etc). FW sent back an appropriate IT nexus loss status
+ * for the abort.
+ * 4. FW sent an abort, but abort timed out (remote device
+ * didnt respond). FW replied back with
+ * FW_SCSI_ABORT_TIMEDOUT.
+ * 5. FW couldnt genuinely abort the request for some reason,
+ * and sent us an error.
+ *
+ * The first 3 scenarios are treated as succesful abort
+ * operations by the host, while the last 2 are failed attempts
+ * to abort. Manipulate the return value of the request
+ * appropriately, so that host can convey these results
+ * back to the upper layer.
+ */
+ if ((req->wr_status == FW_SUCCESS) ||
+ (req->wr_status == FW_EINVAL) ||
+ csio_scsi_itnexus_loss_error(req->wr_status))
+ req->wr_status = FW_SCSI_ABORT_REQUESTED;
+
+ CSIO_DEC_STATS(scm, n_active);
+ list_del_init(&req->sm.sm_list);
+ csio_set_state(&req->sm, csio_scsis_uninit);
+ break;
+
+ case CSIO_SCSIE_DRVCLEANUP:
+ req->wr_status = FW_HOSTERROR;
+ CSIO_DEC_STATS(scm, n_active);
+ csio_set_state(&req->sm, csio_scsis_uninit);
+ break;
+
+ case CSIO_SCSIE_CLOSE:
+ /*
+ * We can receive this event from the module
+ * cleanup paths, if the FW forgot to reply to the ABORT WR
+ * and left this ioreq in this state. For now, just ignore
+ * the event. The CLOSE event is sent to this state, as
+ * the LINK may have already gone down.
+ */
+ break;
+
+ default:
+ csio_dbg(hw, "Unhandled event:%d sent to req:%p\n", evt, req);
+ CSIO_DB_ASSERT(0);
+ }
+}
+
+static void
+csio_scsis_closing(struct csio_ioreq *req, enum csio_scsi_ev evt)
+{
+ struct csio_hw *hw = req->lnode->hwp;
+ struct csio_scsim *scm = csio_hw_to_scsim(hw);
+
+ switch (evt) {
+ case CSIO_SCSIE_COMPLETED:
+ csio_dbg(hw,
+ "ioreq %p recvd cmpltd (wr_status:%d) "
+ "in closing st\n", req, req->wr_status);
+ /*
+ * Use -ECANCELED to explicitly tell the CLOSED event that
+ * the original I/O was returned to driver by FW.
+ * We dont really care if the I/O was returned with success by
+ * FW (because the CLOSE and completion of the I/O crossed each
+ * other), or any other return value. Once we are in aborting
+ * state, the success or failure of the I/O is unimportant to
+ * us.
+ */
+ req->drv_status = -ECANCELED;
+ break;
+
+ case CSIO_SCSIE_CLOSED:
+ /*
+ * Check if original I/O WR completed before the Close
+ * completion.
+ */
+ if (req->drv_status != -ECANCELED) {
+ csio_fatal(hw,
+ "Close completed before original I/O,"
+ " req:%p\n", req);
+ CSIO_DB_ASSERT(0);
+ }
+
+ /*
+ * Either close succeeded, or we issued close to FW at the
+ * same time FW compelted it to us. Either way, the I/O
+ * is closed.
+ */
+ CSIO_DB_ASSERT((req->wr_status == FW_SUCCESS) ||
+ (req->wr_status == FW_EINVAL));
+ req->wr_status = FW_SCSI_CLOSE_REQUESTED;
+
+ CSIO_DEC_STATS(scm, n_active);
+ list_del_init(&req->sm.sm_list);
+ csio_set_state(&req->sm, csio_scsis_uninit);
+ break;
+
+ case CSIO_SCSIE_CLOSE:
+ break;
+
+ case CSIO_SCSIE_DRVCLEANUP:
+ req->wr_status = FW_HOSTERROR;
+ CSIO_DEC_STATS(scm, n_active);
+ csio_set_state(&req->sm, csio_scsis_uninit);
+ break;
+
+ default:
+ csio_dbg(hw, "Unhandled event:%d sent to req:%p\n", evt, req);
+ CSIO_DB_ASSERT(0);
+ }
+}
+
+static void
+csio_scsis_shost_cmpl_await(struct csio_ioreq *req, enum csio_scsi_ev evt)
+{
+ switch (evt) {
+ case CSIO_SCSIE_ABORT:
+ case CSIO_SCSIE_CLOSE:
+ /*
+ * Just succeed the abort request, and hope that
+ * the remote device unregister path will cleanup
+ * this I/O to the upper layer within a sane
+ * amount of time.
+ */
+ /*
+ * A close can come in during a LINK DOWN. The FW would have
+ * returned us the I/O back, but not the remote device lost
+ * FW event. In this interval, if the I/O times out at the upper
+ * layer, a close can come in. Take the same action as abort:
+ * return success, and hope that the remote device unregister
+ * path will cleanup this I/O. If the FW still doesnt send
+ * the msg, the close times out, and the upper layer resorts
+ * to the next level of error recovery.
+ */
+ req->drv_status = 0;
+ break;
+ case CSIO_SCSIE_DRVCLEANUP:
+ csio_set_state(&req->sm, csio_scsis_uninit);
+ break;
+ default:
+ csio_dbg(req->lnode->hwp, "Unhandled event:%d sent to req:%p\n",
+ evt, req);
+ CSIO_DB_ASSERT(0);
+ }
+}
+
+/*
+ * csio_scsi_cmpl_handler - WR completion handler for SCSI.
+ * @hw: HW module.
+ * @wr: The completed WR from the ingress queue.
+ * @len: Length of the WR.
+ * @flb: Freelist buffer array.
+ * @priv: Private object
+ * @scsiwr: Pointer to SCSI WR.
+ *
+ * This is the WR completion handler called per completion from the
+ * ISR. It is called with lock held. It walks past the RSS and CPL message
+ * header where the actual WR is present.
+ * It then gets the status, WR handle (ioreq pointer) and the len of
+ * the WR, based on WR opcode. Only on a non-good status is the entire
+ * WR copied into the WR cache (ioreq->fw_wr).
+ * The ioreq corresponding to the WR is returned to the caller.
+ * NOTE: The SCSI queue doesnt allocate a freelist today, hence
+ * no freelist buffer is expected.
+ */
+struct csio_ioreq *
+csio_scsi_cmpl_handler(struct csio_hw *hw, void *wr, uint32_t len,
+ struct csio_fl_dma_buf *flb, void *priv, uint8_t **scsiwr)
+{
+ struct csio_ioreq *ioreq = NULL;
+ struct cpl_fw6_msg *cpl;
+ uint8_t *tempwr;
+ uint8_t status;
+ struct csio_scsim *scm = csio_hw_to_scsim(hw);
+
+ /* skip RSS header */
+ cpl = (struct cpl_fw6_msg *)((uintptr_t)wr + sizeof(__be64));
+
+ if (unlikely(cpl->opcode != CPL_FW6_MSG)) {
+ csio_warn(hw, "Error: Invalid CPL msg %x recvd on SCSI q\n",
+ cpl->opcode);
+ CSIO_INC_STATS(scm, n_inval_cplop);
+ return NULL;
+ }
+
+ tempwr = (uint8_t *)(cpl->data);
+ status = csio_wr_status(tempwr);
+ *scsiwr = tempwr;
+
+ if (likely((*tempwr == FW_SCSI_READ_WR) ||
+ (*tempwr == FW_SCSI_WRITE_WR) ||
+ (*tempwr == FW_SCSI_CMD_WR))) {
+ ioreq = (struct csio_ioreq *)((uintptr_t)
+ (((struct fw_scsi_read_wr *)tempwr)->cookie));
+ CSIO_DB_ASSERT(virt_addr_valid(ioreq));
+
+ ioreq->wr_status = status;
+
+ return ioreq;
+ }
+
+ if (*tempwr == FW_SCSI_ABRT_CLS_WR) {
+ ioreq = (struct csio_ioreq *)((uintptr_t)
+ (((struct fw_scsi_abrt_cls_wr *)tempwr)->cookie));
+ CSIO_DB_ASSERT(virt_addr_valid(ioreq));
+
+ ioreq->wr_status = status;
+ return ioreq;
+ }
+
+ csio_warn(hw, "WR with invalid opcode in SCSI IQ: %x\n", *tempwr);
+ CSIO_INC_STATS(scm, n_inval_scsiop);
+ return NULL;
+}
+
+/*
+ * csio_scsi_cleanup_io_q - Cleanup the given queue.
+ * @scm: SCSI module.
+ * @q: Queue to be cleaned up.
+ *
+ * Called with lock held. Has to exit with lock held.
+ */
+void
+csio_scsi_cleanup_io_q(struct csio_scsim *scm, struct list_head *q)
+{
+ struct csio_hw *hw = scm->hw;
+ struct csio_ioreq *ioreq;
+ struct list_head *tmp, *next;
+ struct scsi_cmnd *scmnd;
+
+ /* Call back the completion routines of the active_q */
+ list_for_each_safe(tmp, next, q) {
+ ioreq = (struct csio_ioreq *)tmp;
+ csio_scsi_drvcleanup(ioreq);
+ list_del_init(&ioreq->sm.sm_list);
+ scmnd = csio_scsi_cmnd(ioreq);
+ spin_unlock_irq(&hw->lock);
+
+ /*
+ * Upper layers may have cleared this command, hence this
+ * check to avoid accessing stale references.
+ */
+ if (scmnd != NULL)
+ ioreq->io_cbfn(hw, ioreq);
+
+ spin_lock_irq(&scm->freelist_lock);
+ csio_put_scsi_ioreq(scm, ioreq);
+ spin_unlock_irq(&scm->freelist_lock);
+
+ spin_lock_irq(&hw->lock);
+ }
+}
+
+#define CSIO_SCSI_ABORT_Q_POLL_MS 2000
+
+static void
+csio_abrt_cls(struct csio_ioreq *ioreq, struct scsi_cmnd *scmnd)
+{
+ struct csio_lnode *ln = ioreq->lnode;
+ struct csio_hw *hw = ln->hwp;
+ int ready = 0;
+ struct csio_scsim *scsim = csio_hw_to_scsim(hw);
+ int rv;
+
+ if (csio_scsi_cmnd(ioreq) != scmnd) {
+ CSIO_INC_STATS(scsim, n_abrt_race_comp);
+ return;
+ }
+
+ ready = csio_is_lnode_ready(ln);
+
+ rv = csio_do_abrt_cls(hw, ioreq, (ready ? SCSI_ABORT : SCSI_CLOSE));
+ if (rv != 0) {
+ if (ready)
+ CSIO_INC_STATS(scsim, n_abrt_busy_error);
+ else
+ CSIO_INC_STATS(scsim, n_cls_busy_error);
+ }
+}
+
+/*
+ * csio_scsi_abort_io_q - Abort all I/Os on given queue
+ * @scm: SCSI module.
+ * @q: Queue to abort.
+ * @tmo: Timeout in ms
+ *
+ * Attempt to abort all I/Os on given queue, and wait for a max
+ * of tmo milliseconds for them to complete. Returns success
+ * if all I/Os are aborted. Else returns -ETIMEDOUT.
+ * Should be entered with lock held. Exits with lock held.
+ * NOTE:
+ * Lock has to be held across the loop that aborts I/Os, since dropping the lock
+ * in between can cause the list to be corrupted. As a result, the caller
+ * of this function has to ensure that the number of I/os to be aborted
+ * is finite enough to not cause lock-held-for-too-long issues.
+ */
+static int
+csio_scsi_abort_io_q(struct csio_scsim *scm, struct list_head *q, uint32_t tmo)
+{
+ struct csio_hw *hw = scm->hw;
+ struct list_head *tmp, *next;
+ int count = DIV_ROUND_UP(tmo, CSIO_SCSI_ABORT_Q_POLL_MS);
+ struct scsi_cmnd *scmnd;
+
+ if (list_empty(q))
+ return 0;
+
+ csio_dbg(hw, "Aborting SCSI I/Os\n");
+
+ /* Now abort/close I/Os in the queue passed */
+ list_for_each_safe(tmp, next, q) {
+ scmnd = csio_scsi_cmnd((struct csio_ioreq *)tmp);
+ csio_abrt_cls((struct csio_ioreq *)tmp, scmnd);
+ }
+
+ /* Wait till all active I/Os are completed/aborted/closed */
+ while (!list_empty(q) && count--) {
+ spin_unlock_irq(&hw->lock);
+ msleep(CSIO_SCSI_ABORT_Q_POLL_MS);
+ spin_lock_irq(&hw->lock);
+ }
+
+ /* all aborts completed */
+ if (list_empty(q))
+ return 0;
+
+ return -ETIMEDOUT;
+}
+
+/*
+ * csio_scsim_cleanup_io - Cleanup all I/Os in SCSI module.
+ * @scm: SCSI module.
+ * @abort: abort required.
+ * Called with lock held, should exit with lock held.
+ * Can sleep when waiting for I/Os to complete.
+ */
+int
+csio_scsim_cleanup_io(struct csio_scsim *scm, bool abort)
+{
+ struct csio_hw *hw = scm->hw;
+ int rv = 0;
+ int count = DIV_ROUND_UP(60 * 1000, CSIO_SCSI_ABORT_Q_POLL_MS);
+
+ /* No I/Os pending */
+ if (list_empty(&scm->active_q))
+ return 0;
+
+ /* Wait until all active I/Os are completed */
+ while (!list_empty(&scm->active_q) && count--) {
+ spin_unlock_irq(&hw->lock);
+ msleep(CSIO_SCSI_ABORT_Q_POLL_MS);
+ spin_lock_irq(&hw->lock);
+ }
+
+ /* all I/Os completed */
+ if (list_empty(&scm->active_q))
+ return 0;
+
+ /* Else abort */
+ if (abort) {
+ rv = csio_scsi_abort_io_q(scm, &scm->active_q, 30000);
+ if (rv == 0)
+ return rv;
+ csio_dbg(hw, "Some I/O aborts timed out, cleaning up..\n");
+ }
+
+ csio_scsi_cleanup_io_q(scm, &scm->active_q);
+
+ CSIO_DB_ASSERT(list_empty(&scm->active_q));
+
+ return rv;
+}
+
+/*
+ * csio_scsim_cleanup_io_lnode - Cleanup all I/Os of given lnode.
+ * @scm: SCSI module.
+ * @lnode: lnode
+ *
+ * Called with lock held, should exit with lock held.
+ * Can sleep (with dropped lock) when waiting for I/Os to complete.
+ */
+int
+csio_scsim_cleanup_io_lnode(struct csio_scsim *scm, struct csio_lnode *ln)
+{
+ struct csio_hw *hw = scm->hw;
+ struct csio_scsi_level_data sld;
+ int rv;
+ int count = DIV_ROUND_UP(60 * 1000, CSIO_SCSI_ABORT_Q_POLL_MS);
+
+ csio_dbg(hw, "Gathering all SCSI I/Os on lnode %p\n", ln);
+
+ sld.level = CSIO_LEV_LNODE;
+ sld.lnode = ln;
+ INIT_LIST_HEAD(&ln->cmpl_q);
+ csio_scsi_gather_active_ios(scm, &sld, &ln->cmpl_q);
+
+ /* No I/Os pending on this lnode */
+ if (list_empty(&ln->cmpl_q))
+ return 0;
+
+ /* Wait until all active I/Os on this lnode are completed */
+ while (!list_empty(&ln->cmpl_q) && count--) {
+ spin_unlock_irq(&hw->lock);
+ msleep(CSIO_SCSI_ABORT_Q_POLL_MS);
+ spin_lock_irq(&hw->lock);
+ }
+
+ /* all I/Os completed */
+ if (list_empty(&ln->cmpl_q))
+ return 0;
+
+ csio_dbg(hw, "Some I/Os pending on ln:%p, aborting them..\n", ln);
+
+ /* I/Os are pending, abort them */
+ rv = csio_scsi_abort_io_q(scm, &ln->cmpl_q, 30000);
+ if (rv != 0) {
+ csio_dbg(hw, "Some I/O aborts timed out, cleaning up..\n");
+ csio_scsi_cleanup_io_q(scm, &ln->cmpl_q);
+ }
+
+ CSIO_DB_ASSERT(list_empty(&ln->cmpl_q));
+
+ return rv;
+}
+
+static ssize_t
+csio_show_hw_state(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct csio_lnode *ln = shost_priv(class_to_shost(dev));
+ struct csio_hw *hw = csio_lnode_to_hw(ln);
+
+ if (csio_is_hw_ready(hw))
+ return snprintf(buf, PAGE_SIZE, "ready\n");
+ else
+ return snprintf(buf, PAGE_SIZE, "not ready\n");
+}
+
+/* Device reset */
+static ssize_t
+csio_device_reset(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t count)
+{
+ struct csio_lnode *ln = shost_priv(class_to_shost(dev));
+ struct csio_hw *hw = csio_lnode_to_hw(ln);
+
+ if (*buf != '1')
+ return -EINVAL;
+
+ /* Delete NPIV lnodes */
+ csio_lnodes_exit(hw, 1);
+
+ /* Block upper IOs */
+ csio_lnodes_block_request(hw);
+
+ spin_lock_irq(&hw->lock);
+ csio_hw_reset(hw);
+ spin_unlock_irq(&hw->lock);
+
+ /* Unblock upper IOs */
+ csio_lnodes_unblock_request(hw);
+ return count;
+}
+
+/* disable port */
+static ssize_t
+csio_disable_port(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t count)
+{
+ struct csio_lnode *ln = shost_priv(class_to_shost(dev));
+ struct csio_hw *hw = csio_lnode_to_hw(ln);
+ bool disable;
+
+ if (*buf == '1' || *buf == '0')
+ disable = (*buf == '1') ? true : false;
+ else
+ return -EINVAL;
+
+ /* Block upper IOs */
+ csio_lnodes_block_by_port(hw, ln->portid);
+
+ spin_lock_irq(&hw->lock);
+ csio_disable_lnodes(hw, ln->portid, disable);
+ spin_unlock_irq(&hw->lock);
+
+ /* Unblock upper IOs */
+ csio_lnodes_unblock_by_port(hw, ln->portid);
+ return count;
+}
+
+/* Show debug level */
+static ssize_t
+csio_show_dbg_level(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct csio_lnode *ln = shost_priv(class_to_shost(dev));
+
+ return snprintf(buf, PAGE_SIZE, "%x\n", ln->params.log_level);
+}
+
+/* Store debug level */
+static ssize_t
+csio_store_dbg_level(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t count)
+{
+ struct csio_lnode *ln = shost_priv(class_to_shost(dev));
+ struct csio_hw *hw = csio_lnode_to_hw(ln);
+ uint32_t dbg_level = 0;
+
+ if (!isdigit(buf[0]))
+ return -EINVAL;
+
+ if (sscanf(buf, "%i", &dbg_level))
+ return -EINVAL;
+
+ ln->params.log_level = dbg_level;
+ hw->params.log_level = dbg_level;
+
+ return 0;
+}
+
+static DEVICE_ATTR(hw_state, S_IRUGO, csio_show_hw_state, NULL);
+static DEVICE_ATTR(device_reset, S_IWUSR, NULL, csio_device_reset);
+static DEVICE_ATTR(disable_port, S_IWUSR, NULL, csio_disable_port);
+static DEVICE_ATTR(dbg_level, S_IRUGO | S_IWUSR, csio_show_dbg_level,
+ csio_store_dbg_level);
+
+static struct device_attribute *csio_fcoe_lport_attrs[] = {
+ &dev_attr_hw_state,
+ &dev_attr_device_reset,
+ &dev_attr_disable_port,
+ &dev_attr_dbg_level,
+ NULL,
+};
+
+static ssize_t
+csio_show_num_reg_rnodes(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct csio_lnode *ln = shost_priv(class_to_shost(dev));
+
+ return snprintf(buf, PAGE_SIZE, "%d\n", ln->num_reg_rnodes);
+}
+
+static DEVICE_ATTR(num_reg_rnodes, S_IRUGO, csio_show_num_reg_rnodes, NULL);
+
+static struct device_attribute *csio_fcoe_vport_attrs[] = {
+ &dev_attr_num_reg_rnodes,
+ &dev_attr_dbg_level,
+ NULL,
+};
+
+static inline uint32_t
+csio_scsi_copy_to_sgl(struct csio_hw *hw, struct csio_ioreq *req)
+{
+ struct scsi_cmnd *scmnd = (struct scsi_cmnd *)csio_scsi_cmnd(req);
+ struct scatterlist *sg;
+ uint32_t bytes_left;
+ uint32_t bytes_copy;
+ uint32_t buf_off = 0;
+ uint32_t start_off = 0;
+ uint32_t sg_off = 0;
+ void *sg_addr;
+ void *buf_addr;
+ struct csio_dma_buf *dma_buf;
+
+ bytes_left = scsi_bufflen(scmnd);
+ sg = scsi_sglist(scmnd);
+ dma_buf = (struct csio_dma_buf *)csio_list_next(&req->gen_list);
+
+ /* Copy data from driver buffer to SGs of SCSI CMD */
+ while (bytes_left > 0 && sg && dma_buf) {
+ if (buf_off >= dma_buf->len) {
+ buf_off = 0;
+ dma_buf = (struct csio_dma_buf *)
+ csio_list_next(dma_buf);
+ continue;
+ }
+
+ if (start_off >= sg->length) {
+ start_off -= sg->length;
+ sg = sg_next(sg);
+ continue;
+ }
+
+ buf_addr = dma_buf->vaddr + buf_off;
+ sg_off = sg->offset + start_off;
+ bytes_copy = min((dma_buf->len - buf_off),
+ sg->length - start_off);
+ bytes_copy = min((uint32_t)(PAGE_SIZE - (sg_off & ~PAGE_MASK)),
+ bytes_copy);
+
+ sg_addr = kmap_atomic(sg_page(sg) + (sg_off >> PAGE_SHIFT));
+ if (!sg_addr) {
+ csio_err(hw, "failed to kmap sg:%p of ioreq:%p\n",
+ sg, req);
+ break;
+ }
+
+ csio_dbg(hw, "copy_to_sgl:sg_addr %p sg_off %d buf %p len %d\n",
+ sg_addr, sg_off, buf_addr, bytes_copy);
+ memcpy(sg_addr + (sg_off & ~PAGE_MASK), buf_addr, bytes_copy);
+ kunmap_atomic(sg_addr);
+
+ start_off += bytes_copy;
+ buf_off += bytes_copy;
+ bytes_left -= bytes_copy;
+ }
+
+ if (bytes_left > 0)
+ return DID_ERROR;
+ else
+ return DID_OK;
+}
+
+/*
+ * csio_scsi_err_handler - SCSI error handler.
+ * @hw: HW module.
+ * @req: IO request.
+ *
+ */
+static inline void
+csio_scsi_err_handler(struct csio_hw *hw, struct csio_ioreq *req)
+{
+ struct scsi_cmnd *cmnd = (struct scsi_cmnd *)csio_scsi_cmnd(req);
+ struct csio_scsim *scm = csio_hw_to_scsim(hw);
+ struct fcp_resp_with_ext *fcp_resp;
+ struct fcp_resp_rsp_info *rsp_info;
+ struct csio_dma_buf *dma_buf;
+ uint8_t flags, scsi_status = 0;
+ uint32_t host_status = DID_OK;
+ uint32_t rsp_len = 0, sns_len = 0;
+ struct csio_rnode *rn = (struct csio_rnode *)(cmnd->device->hostdata);
+
+
+ switch (req->wr_status) {
+ case FW_HOSTERROR:
+ if (unlikely(!csio_is_hw_ready(hw)))
+ return;
+
+ host_status = DID_ERROR;
+ CSIO_INC_STATS(scm, n_hosterror);
+
+ break;
+ case FW_SCSI_RSP_ERR:
+ dma_buf = &req->dma_buf;
+ fcp_resp = (struct fcp_resp_with_ext *)dma_buf->vaddr;
+ rsp_info = (struct fcp_resp_rsp_info *)(fcp_resp + 1);
+ flags = fcp_resp->resp.fr_flags;
+ scsi_status = fcp_resp->resp.fr_status;
+
+ if (flags & FCP_RSP_LEN_VAL) {
+ rsp_len = be32_to_cpu(fcp_resp->ext.fr_rsp_len);
+ if ((rsp_len != 0 && rsp_len != 4 && rsp_len != 8) ||
+ (rsp_info->rsp_code != FCP_TMF_CMPL)) {
+ host_status = DID_ERROR;
+ goto out;
+ }
+ }
+
+ if ((flags & FCP_SNS_LEN_VAL) && fcp_resp->ext.fr_sns_len) {
+ sns_len = be32_to_cpu(fcp_resp->ext.fr_sns_len);
+ if (sns_len > SCSI_SENSE_BUFFERSIZE)
+ sns_len = SCSI_SENSE_BUFFERSIZE;
+
+ memcpy(cmnd->sense_buffer,
+ &rsp_info->_fr_resvd[0] + rsp_len, sns_len);
+ CSIO_INC_STATS(scm, n_autosense);
+ }
+
+ scsi_set_resid(cmnd, 0);
+
+ /* Under run */
+ if (flags & FCP_RESID_UNDER) {
+ scsi_set_resid(cmnd,
+ be32_to_cpu(fcp_resp->ext.fr_resid));
+
+ if (!(flags & FCP_SNS_LEN_VAL) &&
+ (scsi_status == SAM_STAT_GOOD) &&
+ ((scsi_bufflen(cmnd) - scsi_get_resid(cmnd))
+ < cmnd->underflow))
+ host_status = DID_ERROR;
+ } else if (flags & FCP_RESID_OVER)
+ host_status = DID_ERROR;
+
+ CSIO_INC_STATS(scm, n_rsperror);
+ break;
+
+ case FW_SCSI_OVER_FLOW_ERR:
+ csio_warn(hw,
+ "Over-flow error,cmnd:0x%x expected len:0x%x"
+ " resid:0x%x\n", cmnd->cmnd[0],
+ scsi_bufflen(cmnd), scsi_get_resid(cmnd));
+ host_status = DID_ERROR;
+ CSIO_INC_STATS(scm, n_ovflerror);
+ break;
+
+ case FW_SCSI_UNDER_FLOW_ERR:
+ csio_warn(hw,
+ "Under-flow error,cmnd:0x%x expected"
+ " len:0x%x resid:0x%x lun:0x%llx ssn:0x%x\n",
+ cmnd->cmnd[0], scsi_bufflen(cmnd),
+ scsi_get_resid(cmnd), cmnd->device->lun,
+ rn->flowid);
+ host_status = DID_ERROR;
+ CSIO_INC_STATS(scm, n_unflerror);
+ break;
+
+ case FW_SCSI_ABORT_REQUESTED:
+ case FW_SCSI_ABORTED:
+ case FW_SCSI_CLOSE_REQUESTED:
+ csio_dbg(hw, "Req %p cmd:%p op:%x %s\n", req, cmnd,
+ cmnd->cmnd[0],
+ (req->wr_status == FW_SCSI_CLOSE_REQUESTED) ?
+ "closed" : "aborted");
+ /*
+ * csio_eh_abort_handler checks this value to
+ * succeed or fail the abort request.
+ */
+ host_status = DID_REQUEUE;
+ if (req->wr_status == FW_SCSI_CLOSE_REQUESTED)
+ CSIO_INC_STATS(scm, n_closed);
+ else
+ CSIO_INC_STATS(scm, n_aborted);
+ break;
+
+ case FW_SCSI_ABORT_TIMEDOUT:
+ /* FW timed out the abort itself */
+ csio_dbg(hw, "FW timed out abort req:%p cmnd:%p status:%x\n",
+ req, cmnd, req->wr_status);
+ host_status = DID_ERROR;
+ CSIO_INC_STATS(scm, n_abrt_timedout);
+ break;
+
+ case FW_RDEV_NOT_READY:
+ /*
+ * In firmware, a RDEV can get into this state
+ * temporarily, before moving into dissapeared/lost
+ * state. So, the driver should complete the request equivalent
+ * to device-disappeared!
+ */
+ CSIO_INC_STATS(scm, n_rdev_nr_error);
+ host_status = DID_ERROR;
+ break;
+
+ case FW_ERR_RDEV_LOST:
+ CSIO_INC_STATS(scm, n_rdev_lost_error);
+ host_status = DID_ERROR;
+ break;
+
+ case FW_ERR_RDEV_LOGO:
+ CSIO_INC_STATS(scm, n_rdev_logo_error);
+ host_status = DID_ERROR;
+ break;
+
+ case FW_ERR_RDEV_IMPL_LOGO:
+ host_status = DID_ERROR;
+ break;
+
+ case FW_ERR_LINK_DOWN:
+ CSIO_INC_STATS(scm, n_link_down_error);
+ host_status = DID_ERROR;
+ break;
+
+ case FW_FCOE_NO_XCHG:
+ CSIO_INC_STATS(scm, n_no_xchg_error);
+ host_status = DID_ERROR;
+ break;
+
+ default:
+ csio_err(hw, "Unknown SCSI FW WR status:%d req:%p cmnd:%p\n",
+ req->wr_status, req, cmnd);
+ CSIO_DB_ASSERT(0);
+
+ CSIO_INC_STATS(scm, n_unknown_error);
+ host_status = DID_ERROR;
+ break;
+ }
+
+out:
+ if (req->nsge > 0)
+ scsi_dma_unmap(cmnd);
+
+ cmnd->result = (((host_status) << 16) | scsi_status);
+ cmnd->scsi_done(cmnd);
+
+ /* Wake up waiting threads */
+ csio_scsi_cmnd(req) = NULL;
+ complete_all(&req->cmplobj);
+}
+
+/*
+ * csio_scsi_cbfn - SCSI callback function.
+ * @hw: HW module.
+ * @req: IO request.
+ *
+ */
+static void
+csio_scsi_cbfn(struct csio_hw *hw, struct csio_ioreq *req)
+{
+ struct scsi_cmnd *cmnd = (struct scsi_cmnd *)csio_scsi_cmnd(req);
+ uint8_t scsi_status = SAM_STAT_GOOD;
+ uint32_t host_status = DID_OK;
+
+ if (likely(req->wr_status == FW_SUCCESS)) {
+ if (req->nsge > 0) {
+ scsi_dma_unmap(cmnd);
+ if (req->dcopy)
+ host_status = csio_scsi_copy_to_sgl(hw, req);
+ }
+
+ cmnd->result = (((host_status) << 16) | scsi_status);
+ cmnd->scsi_done(cmnd);
+ csio_scsi_cmnd(req) = NULL;
+ CSIO_INC_STATS(csio_hw_to_scsim(hw), n_tot_success);
+ } else {
+ /* Error handling */
+ csio_scsi_err_handler(hw, req);
+ }
+}
+
+/**
+ * csio_queuecommand - Entry point to kickstart an I/O request.
+ * @host: The scsi_host pointer.
+ * @cmnd: The I/O request from ML.
+ *
+ * This routine does the following:
+ * - Checks for HW and Rnode module readiness.
+ * - Gets a free ioreq structure (which is already initialized
+ * to uninit during its allocation).
+ * - Maps SG elements.
+ * - Initializes ioreq members.
+ * - Kicks off the SCSI state machine for this IO.
+ * - Returns busy status on error.
+ */
+static int
+csio_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *cmnd)
+{
+ struct csio_lnode *ln = shost_priv(host);
+ struct csio_hw *hw = csio_lnode_to_hw(ln);
+ struct csio_scsim *scsim = csio_hw_to_scsim(hw);
+ struct csio_rnode *rn = (struct csio_rnode *)(cmnd->device->hostdata);
+ struct csio_ioreq *ioreq = NULL;
+ unsigned long flags;
+ int nsge = 0;
+ int rv = SCSI_MLQUEUE_HOST_BUSY, nr;
+ int retval;
+ int cpu;
+ struct csio_scsi_qset *sqset;
+ struct fc_rport *rport = starget_to_rport(scsi_target(cmnd->device));
+
+ if (!blk_rq_cpu_valid(cmnd->request))
+ cpu = smp_processor_id();
+ else
+ cpu = cmnd->request->cpu;
+
+ sqset = &hw->sqset[ln->portid][cpu];
+
+ nr = fc_remote_port_chkready(rport);
+ if (nr) {
+ cmnd->result = nr;
+ CSIO_INC_STATS(scsim, n_rn_nr_error);
+ goto err_done;
+ }
+
+ if (unlikely(!csio_is_hw_ready(hw))) {
+ cmnd->result = (DID_REQUEUE << 16);
+ CSIO_INC_STATS(scsim, n_hw_nr_error);
+ goto err_done;
+ }
+
+ /* Get req->nsge, if there are SG elements to be mapped */
+ nsge = scsi_dma_map(cmnd);
+ if (unlikely(nsge < 0)) {
+ CSIO_INC_STATS(scsim, n_dmamap_error);
+ goto err;
+ }
+
+ /* Do we support so many mappings? */
+ if (unlikely(nsge > scsim->max_sge)) {
+ csio_warn(hw,
+ "More SGEs than can be supported."
+ " SGEs: %d, Max SGEs: %d\n", nsge, scsim->max_sge);
+ CSIO_INC_STATS(scsim, n_unsupp_sge_error);
+ goto err_dma_unmap;
+ }
+
+ /* Get a free ioreq structure - SM is already set to uninit */
+ ioreq = csio_get_scsi_ioreq_lock(hw, scsim);
+ if (!ioreq) {
+ csio_err(hw, "Out of I/O request elements. Active #:%d\n",
+ scsim->stats.n_active);
+ CSIO_INC_STATS(scsim, n_no_req_error);
+ goto err_dma_unmap;
+ }
+
+ ioreq->nsge = nsge;
+ ioreq->lnode = ln;
+ ioreq->rnode = rn;
+ ioreq->iq_idx = sqset->iq_idx;
+ ioreq->eq_idx = sqset->eq_idx;
+ ioreq->wr_status = 0;
+ ioreq->drv_status = 0;
+ csio_scsi_cmnd(ioreq) = (void *)cmnd;
+ ioreq->tmo = 0;
+ ioreq->datadir = cmnd->sc_data_direction;
+
+ if (cmnd->sc_data_direction == DMA_TO_DEVICE) {
+ CSIO_INC_STATS(ln, n_output_requests);
+ ln->stats.n_output_bytes += scsi_bufflen(cmnd);
+ } else if (cmnd->sc_data_direction == DMA_FROM_DEVICE) {
+ CSIO_INC_STATS(ln, n_input_requests);
+ ln->stats.n_input_bytes += scsi_bufflen(cmnd);
+ } else
+ CSIO_INC_STATS(ln, n_control_requests);
+
+ /* Set cbfn */
+ ioreq->io_cbfn = csio_scsi_cbfn;
+
+ /* Needed during abort */
+ cmnd->host_scribble = (unsigned char *)ioreq;
+ cmnd->SCp.Message = 0;
+
+ /* Kick off SCSI IO SM on the ioreq */
+ spin_lock_irqsave(&hw->lock, flags);
+ retval = csio_scsi_start_io(ioreq);
+ spin_unlock_irqrestore(&hw->lock, flags);
+
+ if (retval != 0) {
+ csio_err(hw, "ioreq: %p couldnt be started, status:%d\n",
+ ioreq, retval);
+ CSIO_INC_STATS(scsim, n_busy_error);
+ goto err_put_req;
+ }
+
+ return 0;
+
+err_put_req:
+ csio_put_scsi_ioreq_lock(hw, scsim, ioreq);
+err_dma_unmap:
+ if (nsge > 0)
+ scsi_dma_unmap(cmnd);
+err:
+ return rv;
+
+err_done:
+ cmnd->scsi_done(cmnd);
+ return 0;
+}
+
+static int
+csio_do_abrt_cls(struct csio_hw *hw, struct csio_ioreq *ioreq, bool abort)
+{
+ int rv;
+ int cpu = smp_processor_id();
+ struct csio_lnode *ln = ioreq->lnode;
+ struct csio_scsi_qset *sqset = &hw->sqset[ln->portid][cpu];
+
+ ioreq->tmo = CSIO_SCSI_ABRT_TMO_MS;
+ /*
+ * Use current processor queue for posting the abort/close, but retain
+ * the ingress queue ID of the original I/O being aborted/closed - we
+ * need the abort/close completion to be received on the same queue
+ * as the original I/O.
+ */
+ ioreq->eq_idx = sqset->eq_idx;
+
+ if (abort == SCSI_ABORT)
+ rv = csio_scsi_abort(ioreq);
+ else
+ rv = csio_scsi_close(ioreq);
+
+ return rv;
+}
+
+static int
+csio_eh_abort_handler(struct scsi_cmnd *cmnd)
+{
+ struct csio_ioreq *ioreq;
+ struct csio_lnode *ln = shost_priv(cmnd->device->host);
+ struct csio_hw *hw = csio_lnode_to_hw(ln);
+ struct csio_scsim *scsim = csio_hw_to_scsim(hw);
+ int ready = 0, ret;
+ unsigned long tmo = 0;
+ int rv;
+ struct csio_rnode *rn = (struct csio_rnode *)(cmnd->device->hostdata);
+
+ ret = fc_block_scsi_eh(cmnd);
+ if (ret)
+ return ret;
+
+ ioreq = (struct csio_ioreq *)cmnd->host_scribble;
+ if (!ioreq)
+ return SUCCESS;
+
+ if (!rn)
+ return FAILED;
+
+ csio_dbg(hw,
+ "Request to abort ioreq:%p cmd:%p cdb:%08llx"
+ " ssni:0x%x lun:%llu iq:0x%x\n",
+ ioreq, cmnd, *((uint64_t *)cmnd->cmnd), rn->flowid,
+ cmnd->device->lun, csio_q_physiqid(hw, ioreq->iq_idx));
+
+ if (((struct scsi_cmnd *)csio_scsi_cmnd(ioreq)) != cmnd) {
+ CSIO_INC_STATS(scsim, n_abrt_race_comp);
+ return SUCCESS;
+ }
+
+ ready = csio_is_lnode_ready(ln);
+ tmo = CSIO_SCSI_ABRT_TMO_MS;
+
+ spin_lock_irq(&hw->lock);
+ rv = csio_do_abrt_cls(hw, ioreq, (ready ? SCSI_ABORT : SCSI_CLOSE));
+ spin_unlock_irq(&hw->lock);
+
+ if (rv != 0) {
+ if (rv == -EINVAL) {
+ /* Return success, if abort/close request issued on
+ * already completed IO
+ */
+ return SUCCESS;
+ }
+ if (ready)
+ CSIO_INC_STATS(scsim, n_abrt_busy_error);
+ else
+ CSIO_INC_STATS(scsim, n_cls_busy_error);
+
+ goto inval_scmnd;
+ }
+
+ /* Wait for completion */
+ init_completion(&ioreq->cmplobj);
+ wait_for_completion_timeout(&ioreq->cmplobj, msecs_to_jiffies(tmo));
+
+ /* FW didnt respond to abort within our timeout */
+ if (((struct scsi_cmnd *)csio_scsi_cmnd(ioreq)) == cmnd) {
+
+ csio_err(hw, "Abort timed out -- req: %p\n", ioreq);
+ CSIO_INC_STATS(scsim, n_abrt_timedout);
+
+inval_scmnd:
+ if (ioreq->nsge > 0)
+ scsi_dma_unmap(cmnd);
+
+ spin_lock_irq(&hw->lock);
+ csio_scsi_cmnd(ioreq) = NULL;
+ spin_unlock_irq(&hw->lock);
+
+ cmnd->result = (DID_ERROR << 16);
+ cmnd->scsi_done(cmnd);
+
+ return FAILED;
+ }
+
+ /* FW successfully aborted the request */
+ if (host_byte(cmnd->result) == DID_REQUEUE) {
+ csio_info(hw,
+ "Aborted SCSI command to (%d:%llu) serial#:0x%lx\n",
+ cmnd->device->id, cmnd->device->lun,
+ cmnd->serial_number);
+ return SUCCESS;
+ } else {
+ csio_info(hw,
+ "Failed to abort SCSI command, (%d:%llu) serial#:0x%lx\n",
+ cmnd->device->id, cmnd->device->lun,
+ cmnd->serial_number);
+ return FAILED;
+ }
+}
+
+/*
+ * csio_tm_cbfn - TM callback function.
+ * @hw: HW module.
+ * @req: IO request.
+ *
+ * Cache the result in 'cmnd', since ioreq will be freed soon
+ * after we return from here, and the waiting thread shouldnt trust
+ * the ioreq contents.
+ */
+static void
+csio_tm_cbfn(struct csio_hw *hw, struct csio_ioreq *req)
+{
+ struct scsi_cmnd *cmnd = (struct scsi_cmnd *)csio_scsi_cmnd(req);
+ struct csio_dma_buf *dma_buf;
+ uint8_t flags = 0;
+ struct fcp_resp_with_ext *fcp_resp;
+ struct fcp_resp_rsp_info *rsp_info;
+
+ csio_dbg(hw, "req: %p in csio_tm_cbfn status: %d\n",
+ req, req->wr_status);
+
+ /* Cache FW return status */
+ cmnd->SCp.Status = req->wr_status;
+
+ /* Special handling based on FCP response */
+
+ /*
+ * FW returns us this error, if flags were set. FCP4 says
+ * FCP_RSP_LEN_VAL in flags shall be set for TM completions.
+ * So if a target were to set this bit, we expect that the
+ * rsp_code is set to FCP_TMF_CMPL for a successful TM
+ * completion. Any other rsp_code means TM operation failed.
+ * If a target were to just ignore setting flags, we treat
+ * the TM operation as success, and FW returns FW_SUCCESS.
+ */
+ if (req->wr_status == FW_SCSI_RSP_ERR) {
+ dma_buf = &req->dma_buf;
+ fcp_resp = (struct fcp_resp_with_ext *)dma_buf->vaddr;
+ rsp_info = (struct fcp_resp_rsp_info *)(fcp_resp + 1);
+
+ flags = fcp_resp->resp.fr_flags;
+
+ /* Modify return status if flags indicate success */
+ if (flags & FCP_RSP_LEN_VAL)
+ if (rsp_info->rsp_code == FCP_TMF_CMPL)
+ cmnd->SCp.Status = FW_SUCCESS;
+
+ csio_dbg(hw, "TM FCP rsp code: %d\n", rsp_info->rsp_code);
+ }
+
+ /* Wake up the TM handler thread */
+ csio_scsi_cmnd(req) = NULL;
+}
+
+static int
+csio_eh_lun_reset_handler(struct scsi_cmnd *cmnd)
+{
+ struct csio_lnode *ln = shost_priv(cmnd->device->host);
+ struct csio_hw *hw = csio_lnode_to_hw(ln);
+ struct csio_scsim *scsim = csio_hw_to_scsim(hw);
+ struct csio_rnode *rn = (struct csio_rnode *)(cmnd->device->hostdata);
+ struct csio_ioreq *ioreq = NULL;
+ struct csio_scsi_qset *sqset;
+ unsigned long flags;
+ int retval;
+ int count, ret;
+ LIST_HEAD(local_q);
+ struct csio_scsi_level_data sld;
+
+ if (!rn)
+ goto fail;
+
+ csio_dbg(hw, "Request to reset LUN:%llu (ssni:0x%x tgtid:%d)\n",
+ cmnd->device->lun, rn->flowid, rn->scsi_id);
+
+ if (!csio_is_lnode_ready(ln)) {
+ csio_err(hw,
+ "LUN reset cannot be issued on non-ready"
+ " local node vnpi:0x%x (LUN:%llu)\n",
+ ln->vnp_flowid, cmnd->device->lun);
+ goto fail;
+ }
+
+ /* Lnode is ready, now wait on rport node readiness */
+ ret = fc_block_scsi_eh(cmnd);
+ if (ret)
+ return ret;
+
+ /*
+ * If we have blocked in the previous call, at this point, either the
+ * remote node has come back online, or device loss timer has fired
+ * and the remote node is destroyed. Allow the LUN reset only for
+ * the former case, since LUN reset is a TMF I/O on the wire, and we
+ * need a valid session to issue it.
+ */
+ if (fc_remote_port_chkready(rn->rport)) {
+ csio_err(hw,
+ "LUN reset cannot be issued on non-ready"
+ " remote node ssni:0x%x (LUN:%llu)\n",
+ rn->flowid, cmnd->device->lun);
+ goto fail;
+ }
+
+ /* Get a free ioreq structure - SM is already set to uninit */
+ ioreq = csio_get_scsi_ioreq_lock(hw, scsim);
+
+ if (!ioreq) {
+ csio_err(hw, "Out of IO request elements. Active # :%d\n",
+ scsim->stats.n_active);
+ goto fail;
+ }
+
+ sqset = &hw->sqset[ln->portid][smp_processor_id()];
+ ioreq->nsge = 0;
+ ioreq->lnode = ln;
+ ioreq->rnode = rn;
+ ioreq->iq_idx = sqset->iq_idx;
+ ioreq->eq_idx = sqset->eq_idx;
+
+ csio_scsi_cmnd(ioreq) = cmnd;
+ cmnd->host_scribble = (unsigned char *)ioreq;
+ cmnd->SCp.Status = 0;
+
+ cmnd->SCp.Message = FCP_TMF_LUN_RESET;
+ ioreq->tmo = CSIO_SCSI_LUNRST_TMO_MS / 1000;
+
+ /*
+ * FW times the LUN reset for ioreq->tmo, so we got to wait a little
+ * longer (10s for now) than that to allow FW to return the timed
+ * out command.
+ */
+ count = DIV_ROUND_UP((ioreq->tmo + 10) * 1000, CSIO_SCSI_TM_POLL_MS);
+
+ /* Set cbfn */
+ ioreq->io_cbfn = csio_tm_cbfn;
+
+ /* Save of the ioreq info for later use */
+ sld.level = CSIO_LEV_LUN;
+ sld.lnode = ioreq->lnode;
+ sld.rnode = ioreq->rnode;
+ sld.oslun = cmnd->device->lun;
+
+ spin_lock_irqsave(&hw->lock, flags);
+ /* Kick off TM SM on the ioreq */
+ retval = csio_scsi_start_tm(ioreq);
+ spin_unlock_irqrestore(&hw->lock, flags);
+
+ if (retval != 0) {
+ csio_err(hw, "Failed to issue LUN reset, req:%p, status:%d\n",
+ ioreq, retval);
+ goto fail_ret_ioreq;
+ }
+
+ csio_dbg(hw, "Waiting max %d secs for LUN reset completion\n",
+ count * (CSIO_SCSI_TM_POLL_MS / 1000));
+ /* Wait for completion */
+ while ((((struct scsi_cmnd *)csio_scsi_cmnd(ioreq)) == cmnd)
+ && count--)
+ msleep(CSIO_SCSI_TM_POLL_MS);
+
+ /* LUN reset timed-out */
+ if (((struct scsi_cmnd *)csio_scsi_cmnd(ioreq)) == cmnd) {
+ csio_err(hw, "LUN reset (%d:%llu) timed out\n",
+ cmnd->device->id, cmnd->device->lun);
+
+ spin_lock_irq(&hw->lock);
+ csio_scsi_drvcleanup(ioreq);
+ list_del_init(&ioreq->sm.sm_list);
+ spin_unlock_irq(&hw->lock);
+
+ goto fail_ret_ioreq;
+ }
+
+ /* LUN reset returned, check cached status */
+ if (cmnd->SCp.Status != FW_SUCCESS) {
+ csio_err(hw, "LUN reset failed (%d:%llu), status: %d\n",
+ cmnd->device->id, cmnd->device->lun, cmnd->SCp.Status);
+ goto fail;
+ }
+
+ /* LUN reset succeeded, Start aborting affected I/Os */
+ /*
+ * Since the host guarantees during LUN reset that there
+ * will not be any more I/Os to that LUN, until the LUN reset
+ * completes, we gather pending I/Os after the LUN reset.
+ */
+ spin_lock_irq(&hw->lock);
+ csio_scsi_gather_active_ios(scsim, &sld, &local_q);
+
+ retval = csio_scsi_abort_io_q(scsim, &local_q, 30000);
+ spin_unlock_irq(&hw->lock);
+
+ /* Aborts may have timed out */
+ if (retval != 0) {
+ csio_err(hw,
+ "Attempt to abort I/Os during LUN reset of %llu"
+ " returned %d\n", cmnd->device->lun, retval);
+ /* Return I/Os back to active_q */
+ spin_lock_irq(&hw->lock);
+ list_splice_tail_init(&local_q, &scsim->active_q);
+ spin_unlock_irq(&hw->lock);
+ goto fail;
+ }
+
+ CSIO_INC_STATS(rn, n_lun_rst);
+
+ csio_info(hw, "LUN reset occurred (%d:%llu)\n",
+ cmnd->device->id, cmnd->device->lun);
+
+ return SUCCESS;
+
+fail_ret_ioreq:
+ csio_put_scsi_ioreq_lock(hw, scsim, ioreq);
+fail:
+ CSIO_INC_STATS(rn, n_lun_rst_fail);
+ return FAILED;
+}
+
+static int
+csio_slave_alloc(struct scsi_device *sdev)
+{
+ struct fc_rport *rport = starget_to_rport(scsi_target(sdev));
+
+ if (!rport || fc_remote_port_chkready(rport))
+ return -ENXIO;
+
+ sdev->hostdata = *((struct csio_lnode **)(rport->dd_data));
+
+ return 0;
+}
+
+static int
+csio_slave_configure(struct scsi_device *sdev)
+{
+ scsi_change_queue_depth(sdev, csio_lun_qdepth);
+ return 0;
+}
+
+static void
+csio_slave_destroy(struct scsi_device *sdev)
+{
+ sdev->hostdata = NULL;
+}
+
+static int
+csio_scan_finished(struct Scsi_Host *shost, unsigned long time)
+{
+ struct csio_lnode *ln = shost_priv(shost);
+ int rv = 1;
+
+ spin_lock_irq(shost->host_lock);
+ if (!ln->hwp || csio_list_deleted(&ln->sm.sm_list))
+ goto out;
+
+ rv = csio_scan_done(ln, jiffies, time, csio_max_scan_tmo * HZ,
+ csio_delta_scan_tmo * HZ);
+out:
+ spin_unlock_irq(shost->host_lock);
+
+ return rv;
+}
+
+struct scsi_host_template csio_fcoe_shost_template = {
+ .module = THIS_MODULE,
+ .name = CSIO_DRV_DESC,
+ .proc_name = KBUILD_MODNAME,
+ .queuecommand = csio_queuecommand,
+ .eh_abort_handler = csio_eh_abort_handler,
+ .eh_device_reset_handler = csio_eh_lun_reset_handler,
+ .slave_alloc = csio_slave_alloc,
+ .slave_configure = csio_slave_configure,
+ .slave_destroy = csio_slave_destroy,
+ .scan_finished = csio_scan_finished,
+ .this_id = -1,
+ .sg_tablesize = CSIO_SCSI_MAX_SGE,
+ .cmd_per_lun = CSIO_MAX_CMD_PER_LUN,
+ .use_clustering = ENABLE_CLUSTERING,
+ .shost_attrs = csio_fcoe_lport_attrs,
+ .max_sectors = CSIO_MAX_SECTOR_SIZE,
+ .use_blk_tags = 1,
+};
+
+struct scsi_host_template csio_fcoe_shost_vport_template = {
+ .module = THIS_MODULE,
+ .name = CSIO_DRV_DESC,
+ .proc_name = KBUILD_MODNAME,
+ .queuecommand = csio_queuecommand,
+ .eh_abort_handler = csio_eh_abort_handler,
+ .eh_device_reset_handler = csio_eh_lun_reset_handler,
+ .slave_alloc = csio_slave_alloc,
+ .slave_configure = csio_slave_configure,
+ .slave_destroy = csio_slave_destroy,
+ .scan_finished = csio_scan_finished,
+ .this_id = -1,
+ .sg_tablesize = CSIO_SCSI_MAX_SGE,
+ .cmd_per_lun = CSIO_MAX_CMD_PER_LUN,
+ .use_clustering = ENABLE_CLUSTERING,
+ .shost_attrs = csio_fcoe_vport_attrs,
+ .max_sectors = CSIO_MAX_SECTOR_SIZE,
+ .use_blk_tags = 1,
+};
+
+/*
+ * csio_scsi_alloc_ddp_bufs - Allocate buffers for DDP of unaligned SGLs.
+ * @scm: SCSI Module
+ * @hw: HW device.
+ * @buf_size: buffer size
+ * @num_buf : Number of buffers.
+ *
+ * This routine allocates DMA buffers required for SCSI Data xfer, if
+ * each SGL buffer for a SCSI Read request posted by SCSI midlayer are
+ * not virtually contiguous.
+ */
+static int
+csio_scsi_alloc_ddp_bufs(struct csio_scsim *scm, struct csio_hw *hw,
+ int buf_size, int num_buf)
+{
+ int n = 0;
+ struct list_head *tmp;
+ struct csio_dma_buf *ddp_desc = NULL;
+ uint32_t unit_size = 0;
+
+ if (!num_buf)
+ return 0;
+
+ if (!buf_size)
+ return -EINVAL;
+
+ INIT_LIST_HEAD(&scm->ddp_freelist);
+
+ /* Align buf size to page size */
+ buf_size = (buf_size + PAGE_SIZE - 1) & PAGE_MASK;
+ /* Initialize dma descriptors */
+ for (n = 0; n < num_buf; n++) {
+ /* Set unit size to request size */
+ unit_size = buf_size;
+ ddp_desc = kzalloc(sizeof(struct csio_dma_buf), GFP_KERNEL);
+ if (!ddp_desc) {
+ csio_err(hw,
+ "Failed to allocate ddp descriptors,"
+ " Num allocated = %d.\n",
+ scm->stats.n_free_ddp);
+ goto no_mem;
+ }
+
+ /* Allocate Dma buffers for DDP */
+ ddp_desc->vaddr = pci_alloc_consistent(hw->pdev, unit_size,
+ &ddp_desc->paddr);
+ if (!ddp_desc->vaddr) {
+ csio_err(hw,
+ "SCSI response DMA buffer (ddp) allocation"
+ " failed!\n");
+ kfree(ddp_desc);
+ goto no_mem;
+ }
+
+ ddp_desc->len = unit_size;
+
+ /* Added it to scsi ddp freelist */
+ list_add_tail(&ddp_desc->list, &scm->ddp_freelist);
+ CSIO_INC_STATS(scm, n_free_ddp);
+ }
+
+ return 0;
+no_mem:
+ /* release dma descs back to freelist and free dma memory */
+ list_for_each(tmp, &scm->ddp_freelist) {
+ ddp_desc = (struct csio_dma_buf *) tmp;
+ tmp = csio_list_prev(tmp);
+ pci_free_consistent(hw->pdev, ddp_desc->len, ddp_desc->vaddr,
+ ddp_desc->paddr);
+ list_del_init(&ddp_desc->list);
+ kfree(ddp_desc);
+ }
+ scm->stats.n_free_ddp = 0;
+
+ return -ENOMEM;
+}
+
+/*
+ * csio_scsi_free_ddp_bufs - free DDP buffers of unaligned SGLs.
+ * @scm: SCSI Module
+ * @hw: HW device.
+ *
+ * This routine frees ddp buffers.
+ */
+static void
+csio_scsi_free_ddp_bufs(struct csio_scsim *scm, struct csio_hw *hw)
+{
+ struct list_head *tmp;
+ struct csio_dma_buf *ddp_desc;
+
+ /* release dma descs back to freelist and free dma memory */
+ list_for_each(tmp, &scm->ddp_freelist) {
+ ddp_desc = (struct csio_dma_buf *) tmp;
+ tmp = csio_list_prev(tmp);
+ pci_free_consistent(hw->pdev, ddp_desc->len, ddp_desc->vaddr,
+ ddp_desc->paddr);
+ list_del_init(&ddp_desc->list);
+ kfree(ddp_desc);
+ }
+ scm->stats.n_free_ddp = 0;
+}
+
+/**
+ * csio_scsim_init - Initialize SCSI Module
+ * @scm: SCSI Module
+ * @hw: HW module
+ *
+ */
+int
+csio_scsim_init(struct csio_scsim *scm, struct csio_hw *hw)
+{
+ int i;
+ struct csio_ioreq *ioreq;
+ struct csio_dma_buf *dma_buf;
+
+ INIT_LIST_HEAD(&scm->active_q);
+ scm->hw = hw;
+
+ scm->proto_cmd_len = sizeof(struct fcp_cmnd);
+ scm->proto_rsp_len = CSIO_SCSI_RSP_LEN;
+ scm->max_sge = CSIO_SCSI_MAX_SGE;
+
+ spin_lock_init(&scm->freelist_lock);
+
+ /* Pre-allocate ioreqs and initialize them */
+ INIT_LIST_HEAD(&scm->ioreq_freelist);
+ for (i = 0; i < csio_scsi_ioreqs; i++) {
+
+ ioreq = kzalloc(sizeof(struct csio_ioreq), GFP_KERNEL);
+ if (!ioreq) {
+ csio_err(hw,
+ "I/O request element allocation failed, "
+ " Num allocated = %d.\n",
+ scm->stats.n_free_ioreq);
+
+ goto free_ioreq;
+ }
+
+ /* Allocate Dma buffers for Response Payload */
+ dma_buf = &ioreq->dma_buf;
+ dma_buf->vaddr = pci_pool_alloc(hw->scsi_pci_pool, GFP_KERNEL,
+ &dma_buf->paddr);
+ if (!dma_buf->vaddr) {
+ csio_err(hw,
+ "SCSI response DMA buffer allocation"
+ " failed!\n");
+ kfree(ioreq);
+ goto free_ioreq;
+ }
+
+ dma_buf->len = scm->proto_rsp_len;
+
+ /* Set state to uninit */
+ csio_init_state(&ioreq->sm, csio_scsis_uninit);
+ INIT_LIST_HEAD(&ioreq->gen_list);
+ init_completion(&ioreq->cmplobj);
+
+ list_add_tail(&ioreq->sm.sm_list, &scm->ioreq_freelist);
+ CSIO_INC_STATS(scm, n_free_ioreq);
+ }
+
+ if (csio_scsi_alloc_ddp_bufs(scm, hw, PAGE_SIZE, csio_ddp_descs))
+ goto free_ioreq;
+
+ return 0;
+
+free_ioreq:
+ /*
+ * Free up existing allocations, since an error
+ * from here means we are returning for good
+ */
+ while (!list_empty(&scm->ioreq_freelist)) {
+ struct csio_sm *tmp;
+
+ tmp = list_first_entry(&scm->ioreq_freelist,
+ struct csio_sm, sm_list);
+ list_del_init(&tmp->sm_list);
+ ioreq = (struct csio_ioreq *)tmp;
+
+ dma_buf = &ioreq->dma_buf;
+ pci_pool_free(hw->scsi_pci_pool, dma_buf->vaddr,
+ dma_buf->paddr);
+
+ kfree(ioreq);
+ }
+
+ scm->stats.n_free_ioreq = 0;
+
+ return -ENOMEM;
+}
+
+/**
+ * csio_scsim_exit: Uninitialize SCSI Module
+ * @scm: SCSI Module
+ *
+ */
+void
+csio_scsim_exit(struct csio_scsim *scm)
+{
+ struct csio_ioreq *ioreq;
+ struct csio_dma_buf *dma_buf;
+
+ while (!list_empty(&scm->ioreq_freelist)) {
+ struct csio_sm *tmp;
+
+ tmp = list_first_entry(&scm->ioreq_freelist,
+ struct csio_sm, sm_list);
+ list_del_init(&tmp->sm_list);
+ ioreq = (struct csio_ioreq *)tmp;
+
+ dma_buf = &ioreq->dma_buf;
+ pci_pool_free(scm->hw->scsi_pci_pool, dma_buf->vaddr,
+ dma_buf->paddr);
+
+ kfree(ioreq);
+ }
+
+ scm->stats.n_free_ioreq = 0;
+
+ csio_scsi_free_ddp_bufs(scm, scm->hw);
+}
diff --git a/drivers/scsi/csiostor/csio_scsi.h b/drivers/scsi/csiostor/csio_scsi.h
new file mode 100644
index 000000000..2257c3dcf
--- /dev/null
+++ b/drivers/scsi/csiostor/csio_scsi.h
@@ -0,0 +1,342 @@
+/*
+ * This file is part of the Chelsio FCoE driver for Linux.
+ *
+ * Copyright (c) 2008-2012 Chelsio Communications, Inc. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#ifndef __CSIO_SCSI_H__
+#define __CSIO_SCSI_H__
+
+#include <linux/spinlock_types.h>
+#include <linux/completion.h>
+#include <scsi/scsi.h>
+#include <scsi/scsi_cmnd.h>
+#include <scsi/scsi_device.h>
+#include <scsi/scsi_host.h>
+#include <scsi/scsi_eh.h>
+#include <scsi/scsi_tcq.h>
+#include <scsi/fc/fc_fcp.h>
+
+#include "csio_defs.h"
+#include "csio_wr.h"
+
+extern struct scsi_host_template csio_fcoe_shost_template;
+extern struct scsi_host_template csio_fcoe_shost_vport_template;
+
+extern int csio_scsi_eqsize;
+extern int csio_scsi_iqlen;
+extern int csio_scsi_ioreqs;
+extern uint32_t csio_max_scan_tmo;
+extern uint32_t csio_delta_scan_tmo;
+extern int csio_lun_qdepth;
+
+/*
+ **************************** NOTE *******************************
+ * How do we calculate MAX FCoE SCSI SGEs? Here is the math:
+ * Max Egress WR size = 512 bytes
+ * One SCSI egress WR has the following fixed no of bytes:
+ * 48 (sizeof(struct fw_scsi_write[read]_wr)) - FW WR
+ * + 32 (sizeof(struct fc_fcp_cmnd)) - Immediate FCP_CMD
+ * ------
+ * 80
+ * ------
+ * That leaves us with 512 - 96 = 432 bytes for data SGE. Using
+ * struct ulptx_sgl header for the SGE consumes:
+ * - 4 bytes for cmnd_sge.
+ * - 12 bytes for the first SGL.
+ * That leaves us with 416 bytes for the remaining SGE pairs. Which is
+ * is 416 / 24 (size(struct ulptx_sge_pair)) = 17 SGE pairs,
+ * or 34 SGEs. Adding the first SGE fetches us 35 SGEs.
+ */
+#define CSIO_SCSI_MAX_SGE 35
+#define CSIO_SCSI_ABRT_TMO_MS 60000
+#define CSIO_SCSI_LUNRST_TMO_MS 60000
+#define CSIO_SCSI_TM_POLL_MS 2000 /* should be less than
+ * all TM timeouts.
+ */
+#define CSIO_SCSI_IQ_WRSZ 128
+#define CSIO_SCSI_IQSIZE (csio_scsi_iqlen * CSIO_SCSI_IQ_WRSZ)
+
+#define CSIO_MAX_SNS_LEN 128
+#define CSIO_SCSI_RSP_LEN (FCP_RESP_WITH_EXT + 4 + CSIO_MAX_SNS_LEN)
+
+/* Reference to scsi_cmnd */
+#define csio_scsi_cmnd(req) ((req)->scratch1)
+
+struct csio_scsi_stats {
+ uint64_t n_tot_success; /* Total number of good I/Os */
+ uint32_t n_rn_nr_error; /* No. of remote-node-not-
+ * ready errors
+ */
+ uint32_t n_hw_nr_error; /* No. of hw-module-not-
+ * ready errors
+ */
+ uint32_t n_dmamap_error; /* No. of DMA map erros */
+ uint32_t n_unsupp_sge_error; /* No. of too-many-SGes
+ * errors.
+ */
+ uint32_t n_no_req_error; /* No. of Out-of-ioreqs error */
+ uint32_t n_busy_error; /* No. of -EBUSY errors */
+ uint32_t n_hosterror; /* No. of FW_HOSTERROR I/O */
+ uint32_t n_rsperror; /* No. of response errors */
+ uint32_t n_autosense; /* No. of auto sense replies */
+ uint32_t n_ovflerror; /* No. of overflow errors */
+ uint32_t n_unflerror; /* No. of underflow errors */
+ uint32_t n_rdev_nr_error;/* No. of rdev not
+ * ready errors
+ */
+ uint32_t n_rdev_lost_error;/* No. of rdev lost errors */
+ uint32_t n_rdev_logo_error;/* No. of rdev logo errors */
+ uint32_t n_link_down_error;/* No. of link down errors */
+ uint32_t n_no_xchg_error; /* No. no exchange error */
+ uint32_t n_unknown_error;/* No. of unhandled errors */
+ uint32_t n_aborted; /* No. of aborted I/Os */
+ uint32_t n_abrt_timedout; /* No. of abort timedouts */
+ uint32_t n_abrt_fail; /* No. of abort failures */
+ uint32_t n_abrt_dups; /* No. of duplicate aborts */
+ uint32_t n_abrt_race_comp; /* No. of aborts that raced
+ * with completions.
+ */
+ uint32_t n_abrt_busy_error;/* No. of abort failures
+ * due to -EBUSY.
+ */
+ uint32_t n_closed; /* No. of closed I/Os */
+ uint32_t n_cls_busy_error; /* No. of close failures
+ * due to -EBUSY.
+ */
+ uint32_t n_active; /* No. of IOs in active_q */
+ uint32_t n_tm_active; /* No. of TMs in active_q */
+ uint32_t n_wcbfn; /* No. of I/Os in worker
+ * cbfn q
+ */
+ uint32_t n_free_ioreq; /* No. of freelist entries */
+ uint32_t n_free_ddp; /* No. of DDP freelist */
+ uint32_t n_unaligned; /* No. of Unaligned SGls */
+ uint32_t n_inval_cplop; /* No. invalid CPL op's in IQ */
+ uint32_t n_inval_scsiop; /* No. invalid scsi op's in IQ*/
+};
+
+struct csio_scsim {
+ struct csio_hw *hw; /* Pointer to HW moduel */
+ uint8_t max_sge; /* Max SGE */
+ uint8_t proto_cmd_len; /* Proto specific SCSI
+ * cmd length
+ */
+ uint16_t proto_rsp_len; /* Proto specific SCSI
+ * response length
+ */
+ spinlock_t freelist_lock; /* Lock for ioreq freelist */
+ struct list_head active_q; /* Outstanding SCSI I/Os */
+ struct list_head ioreq_freelist; /* Free list of ioreq's */
+ struct list_head ddp_freelist; /* DDP descriptor freelist */
+ struct csio_scsi_stats stats; /* This module's statistics */
+};
+
+/* State machine defines */
+enum csio_scsi_ev {
+ CSIO_SCSIE_START_IO = 1, /* Start a regular SCSI IO */
+ CSIO_SCSIE_START_TM, /* Start a TM IO */
+ CSIO_SCSIE_COMPLETED, /* IO Completed */
+ CSIO_SCSIE_ABORT, /* Abort IO */
+ CSIO_SCSIE_ABORTED, /* IO Aborted */
+ CSIO_SCSIE_CLOSE, /* Close exchange */
+ CSIO_SCSIE_CLOSED, /* Exchange closed */
+ CSIO_SCSIE_DRVCLEANUP, /* Driver wants to manually
+ * cleanup this I/O.
+ */
+};
+
+enum csio_scsi_lev {
+ CSIO_LEV_ALL = 1,
+ CSIO_LEV_LNODE,
+ CSIO_LEV_RNODE,
+ CSIO_LEV_LUN,
+};
+
+struct csio_scsi_level_data {
+ enum csio_scsi_lev level;
+ struct csio_rnode *rnode;
+ struct csio_lnode *lnode;
+ uint64_t oslun;
+};
+
+static inline struct csio_ioreq *
+csio_get_scsi_ioreq(struct csio_scsim *scm)
+{
+ struct csio_sm *req;
+
+ if (likely(!list_empty(&scm->ioreq_freelist))) {
+ req = list_first_entry(&scm->ioreq_freelist,
+ struct csio_sm, sm_list);
+ list_del_init(&req->sm_list);
+ CSIO_DEC_STATS(scm, n_free_ioreq);
+ return (struct csio_ioreq *)req;
+ } else
+ return NULL;
+}
+
+static inline void
+csio_put_scsi_ioreq(struct csio_scsim *scm, struct csio_ioreq *ioreq)
+{
+ list_add_tail(&ioreq->sm.sm_list, &scm->ioreq_freelist);
+ CSIO_INC_STATS(scm, n_free_ioreq);
+}
+
+static inline void
+csio_put_scsi_ioreq_list(struct csio_scsim *scm, struct list_head *reqlist,
+ int n)
+{
+ list_splice_init(reqlist, &scm->ioreq_freelist);
+ scm->stats.n_free_ioreq += n;
+}
+
+static inline struct csio_dma_buf *
+csio_get_scsi_ddp(struct csio_scsim *scm)
+{
+ struct csio_dma_buf *ddp;
+
+ if (likely(!list_empty(&scm->ddp_freelist))) {
+ ddp = list_first_entry(&scm->ddp_freelist,
+ struct csio_dma_buf, list);
+ list_del_init(&ddp->list);
+ CSIO_DEC_STATS(scm, n_free_ddp);
+ return ddp;
+ } else
+ return NULL;
+}
+
+static inline void
+csio_put_scsi_ddp(struct csio_scsim *scm, struct csio_dma_buf *ddp)
+{
+ list_add_tail(&ddp->list, &scm->ddp_freelist);
+ CSIO_INC_STATS(scm, n_free_ddp);
+}
+
+static inline void
+csio_put_scsi_ddp_list(struct csio_scsim *scm, struct list_head *reqlist,
+ int n)
+{
+ list_splice_tail_init(reqlist, &scm->ddp_freelist);
+ scm->stats.n_free_ddp += n;
+}
+
+static inline void
+csio_scsi_completed(struct csio_ioreq *ioreq, struct list_head *cbfn_q)
+{
+ csio_post_event(&ioreq->sm, CSIO_SCSIE_COMPLETED);
+ if (csio_list_deleted(&ioreq->sm.sm_list))
+ list_add_tail(&ioreq->sm.sm_list, cbfn_q);
+}
+
+static inline void
+csio_scsi_aborted(struct csio_ioreq *ioreq, struct list_head *cbfn_q)
+{
+ csio_post_event(&ioreq->sm, CSIO_SCSIE_ABORTED);
+ list_add_tail(&ioreq->sm.sm_list, cbfn_q);
+}
+
+static inline void
+csio_scsi_closed(struct csio_ioreq *ioreq, struct list_head *cbfn_q)
+{
+ csio_post_event(&ioreq->sm, CSIO_SCSIE_CLOSED);
+ list_add_tail(&ioreq->sm.sm_list, cbfn_q);
+}
+
+static inline void
+csio_scsi_drvcleanup(struct csio_ioreq *ioreq)
+{
+ csio_post_event(&ioreq->sm, CSIO_SCSIE_DRVCLEANUP);
+}
+
+/*
+ * csio_scsi_start_io - Kick starts the IO SM.
+ * @req: io request SM.
+ *
+ * needs to be called with lock held.
+ */
+static inline int
+csio_scsi_start_io(struct csio_ioreq *ioreq)
+{
+ csio_post_event(&ioreq->sm, CSIO_SCSIE_START_IO);
+ return ioreq->drv_status;
+}
+
+/*
+ * csio_scsi_start_tm - Kicks off the Task management IO SM.
+ * @req: io request SM.
+ *
+ * needs to be called with lock held.
+ */
+static inline int
+csio_scsi_start_tm(struct csio_ioreq *ioreq)
+{
+ csio_post_event(&ioreq->sm, CSIO_SCSIE_START_TM);
+ return ioreq->drv_status;
+}
+
+/*
+ * csio_scsi_abort - Abort an IO request
+ * @req: io request SM.
+ *
+ * needs to be called with lock held.
+ */
+static inline int
+csio_scsi_abort(struct csio_ioreq *ioreq)
+{
+ csio_post_event(&ioreq->sm, CSIO_SCSIE_ABORT);
+ return ioreq->drv_status;
+}
+
+/*
+ * csio_scsi_close - Close an IO request
+ * @req: io request SM.
+ *
+ * needs to be called with lock held.
+ */
+static inline int
+csio_scsi_close(struct csio_ioreq *ioreq)
+{
+ csio_post_event(&ioreq->sm, CSIO_SCSIE_CLOSE);
+ return ioreq->drv_status;
+}
+
+void csio_scsi_cleanup_io_q(struct csio_scsim *, struct list_head *);
+int csio_scsim_cleanup_io(struct csio_scsim *, bool abort);
+int csio_scsim_cleanup_io_lnode(struct csio_scsim *,
+ struct csio_lnode *);
+struct csio_ioreq *csio_scsi_cmpl_handler(struct csio_hw *, void *, uint32_t,
+ struct csio_fl_dma_buf *,
+ void *, uint8_t **);
+int csio_scsi_qconfig(struct csio_hw *);
+int csio_scsim_init(struct csio_scsim *, struct csio_hw *);
+void csio_scsim_exit(struct csio_scsim *);
+
+#endif /* __CSIO_SCSI_H__ */
diff --git a/drivers/scsi/csiostor/csio_wr.c b/drivers/scsi/csiostor/csio_wr.c
new file mode 100644
index 000000000..e8f18174f
--- /dev/null
+++ b/drivers/scsi/csiostor/csio_wr.c
@@ -0,0 +1,1645 @@
+/*
+ * This file is part of the Chelsio FCoE driver for Linux.
+ *
+ * Copyright (c) 2008-2012 Chelsio Communications, Inc. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include <linux/kernel.h>
+#include <linux/string.h>
+#include <linux/compiler.h>
+#include <linux/slab.h>
+#include <asm/page.h>
+#include <linux/cache.h>
+
+#include "csio_hw.h"
+#include "csio_wr.h"
+#include "csio_mb.h"
+#include "csio_defs.h"
+
+int csio_intr_coalesce_cnt; /* value:SGE_INGRESS_RX_THRESHOLD[0] */
+static int csio_sge_thresh_reg; /* SGE_INGRESS_RX_THRESHOLD[0] */
+
+int csio_intr_coalesce_time = 10; /* value:SGE_TIMER_VALUE_1 */
+static int csio_sge_timer_reg = 1;
+
+#define CSIO_SET_FLBUF_SIZE(_hw, _reg, _val) \
+ csio_wr_reg32((_hw), (_val), SGE_FL_BUFFER_SIZE##_reg##_A)
+
+static void
+csio_get_flbuf_size(struct csio_hw *hw, struct csio_sge *sge, uint32_t reg)
+{
+ sge->sge_fl_buf_size[reg] = csio_rd_reg32(hw, SGE_FL_BUFFER_SIZE0_A +
+ reg * sizeof(uint32_t));
+}
+
+/* Free list buffer size */
+static inline uint32_t
+csio_wr_fl_bufsz(struct csio_sge *sge, struct csio_dma_buf *buf)
+{
+ return sge->sge_fl_buf_size[buf->paddr & 0xF];
+}
+
+/* Size of the egress queue status page */
+static inline uint32_t
+csio_wr_qstat_pgsz(struct csio_hw *hw)
+{
+ return (hw->wrm.sge.sge_control & EGRSTATUSPAGESIZE_F) ? 128 : 64;
+}
+
+/* Ring freelist doorbell */
+static inline void
+csio_wr_ring_fldb(struct csio_hw *hw, struct csio_q *flq)
+{
+ /*
+ * Ring the doorbell only when we have atleast CSIO_QCREDIT_SZ
+ * number of bytes in the freelist queue. This translates to atleast
+ * 8 freelist buffer pointers (since each pointer is 8 bytes).
+ */
+ if (flq->inc_idx >= 8) {
+ csio_wr_reg32(hw, DBPRIO_F | QID_V(flq->un.fl.flid) |
+ PIDX_T5_V(flq->inc_idx / 8) | DBTYPE_F,
+ MYPF_REG(SGE_PF_KDOORBELL_A));
+ flq->inc_idx &= 7;
+ }
+}
+
+/* Write a 0 cidx increment value to enable SGE interrupts for this queue */
+static void
+csio_wr_sge_intr_enable(struct csio_hw *hw, uint16_t iqid)
+{
+ csio_wr_reg32(hw, CIDXINC_V(0) |
+ INGRESSQID_V(iqid) |
+ TIMERREG_V(X_TIMERREG_RESTART_COUNTER),
+ MYPF_REG(SGE_PF_GTS_A));
+}
+
+/*
+ * csio_wr_fill_fl - Populate the FL buffers of a FL queue.
+ * @hw: HW module.
+ * @flq: Freelist queue.
+ *
+ * Fill up freelist buffer entries with buffers of size specified
+ * in the size register.
+ *
+ */
+static int
+csio_wr_fill_fl(struct csio_hw *hw, struct csio_q *flq)
+{
+ struct csio_wrm *wrm = csio_hw_to_wrm(hw);
+ struct csio_sge *sge = &wrm->sge;
+ __be64 *d = (__be64 *)(flq->vstart);
+ struct csio_dma_buf *buf = &flq->un.fl.bufs[0];
+ uint64_t paddr;
+ int sreg = flq->un.fl.sreg;
+ int n = flq->credits;
+
+ while (n--) {
+ buf->len = sge->sge_fl_buf_size[sreg];
+ buf->vaddr = pci_alloc_consistent(hw->pdev, buf->len,
+ &buf->paddr);
+ if (!buf->vaddr) {
+ csio_err(hw, "Could only fill %d buffers!\n", n + 1);
+ return -ENOMEM;
+ }
+
+ paddr = buf->paddr | (sreg & 0xF);
+
+ *d++ = cpu_to_be64(paddr);
+ buf++;
+ }
+
+ return 0;
+}
+
+/*
+ * csio_wr_update_fl -
+ * @hw: HW module.
+ * @flq: Freelist queue.
+ *
+ *
+ */
+static inline void
+csio_wr_update_fl(struct csio_hw *hw, struct csio_q *flq, uint16_t n)
+{
+
+ flq->inc_idx += n;
+ flq->pidx += n;
+ if (unlikely(flq->pidx >= flq->credits))
+ flq->pidx -= (uint16_t)flq->credits;
+
+ CSIO_INC_STATS(flq, n_flq_refill);
+}
+
+/*
+ * csio_wr_alloc_q - Allocate a WR queue and initialize it.
+ * @hw: HW module
+ * @qsize: Size of the queue in bytes
+ * @wrsize: Since of WR in this queue, if fixed.
+ * @type: Type of queue (Ingress/Egress/Freelist)
+ * @owner: Module that owns this queue.
+ * @nflb: Number of freelist buffers for FL.
+ * @sreg: What is the FL buffer size register?
+ * @iq_int_handler: Ingress queue handler in INTx mode.
+ *
+ * This function allocates and sets up a queue for the caller
+ * of size qsize, aligned at the required boundary. This is subject to
+ * be free entries being available in the queue array. If one is found,
+ * it is initialized with the allocated queue, marked as being used (owner),
+ * and a handle returned to the caller in form of the queue's index
+ * into the q_arr array.
+ * If user has indicated a freelist (by specifying nflb > 0), create
+ * another queue (with its own index into q_arr) for the freelist. Allocate
+ * memory for DMA buffer metadata (vaddr, len etc). Save off the freelist
+ * idx in the ingress queue's flq.idx. This is how a Freelist is associated
+ * with its owning ingress queue.
+ */
+int
+csio_wr_alloc_q(struct csio_hw *hw, uint32_t qsize, uint32_t wrsize,
+ uint16_t type, void *owner, uint32_t nflb, int sreg,
+ iq_handler_t iq_intx_handler)
+{
+ struct csio_wrm *wrm = csio_hw_to_wrm(hw);
+ struct csio_q *q, *flq;
+ int free_idx = wrm->free_qidx;
+ int ret_idx = free_idx;
+ uint32_t qsz;
+ int flq_idx;
+
+ if (free_idx >= wrm->num_q) {
+ csio_err(hw, "No more free queues.\n");
+ return -1;
+ }
+
+ switch (type) {
+ case CSIO_EGRESS:
+ qsz = ALIGN(qsize, CSIO_QCREDIT_SZ) + csio_wr_qstat_pgsz(hw);
+ break;
+ case CSIO_INGRESS:
+ switch (wrsize) {
+ case 16:
+ case 32:
+ case 64:
+ case 128:
+ break;
+ default:
+ csio_err(hw, "Invalid Ingress queue WR size:%d\n",
+ wrsize);
+ return -1;
+ }
+
+ /*
+ * Number of elements must be a multiple of 16
+ * So this includes status page size
+ */
+ qsz = ALIGN(qsize/wrsize, 16) * wrsize;
+
+ break;
+ case CSIO_FREELIST:
+ qsz = ALIGN(qsize/wrsize, 8) * wrsize + csio_wr_qstat_pgsz(hw);
+ break;
+ default:
+ csio_err(hw, "Invalid queue type: 0x%x\n", type);
+ return -1;
+ }
+
+ q = wrm->q_arr[free_idx];
+
+ q->vstart = pci_zalloc_consistent(hw->pdev, qsz, &q->pstart);
+ if (!q->vstart) {
+ csio_err(hw,
+ "Failed to allocate DMA memory for "
+ "queue at id: %d size: %d\n", free_idx, qsize);
+ return -1;
+ }
+
+ q->type = type;
+ q->owner = owner;
+ q->pidx = q->cidx = q->inc_idx = 0;
+ q->size = qsz;
+ q->wr_sz = wrsize; /* If using fixed size WRs */
+
+ wrm->free_qidx++;
+
+ if (type == CSIO_INGRESS) {
+ /* Since queue area is set to zero */
+ q->un.iq.genbit = 1;
+
+ /*
+ * Ingress queue status page size is always the size of
+ * the ingress queue entry.
+ */
+ q->credits = (qsz - q->wr_sz) / q->wr_sz;
+ q->vwrap = (void *)((uintptr_t)(q->vstart) + qsz
+ - q->wr_sz);
+
+ /* Allocate memory for FL if requested */
+ if (nflb > 0) {
+ flq_idx = csio_wr_alloc_q(hw, nflb * sizeof(__be64),
+ sizeof(__be64), CSIO_FREELIST,
+ owner, 0, sreg, NULL);
+ if (flq_idx == -1) {
+ csio_err(hw,
+ "Failed to allocate FL queue"
+ " for IQ idx:%d\n", free_idx);
+ return -1;
+ }
+
+ /* Associate the new FL with the Ingress quue */
+ q->un.iq.flq_idx = flq_idx;
+
+ flq = wrm->q_arr[q->un.iq.flq_idx];
+ flq->un.fl.bufs = kzalloc(flq->credits *
+ sizeof(struct csio_dma_buf),
+ GFP_KERNEL);
+ if (!flq->un.fl.bufs) {
+ csio_err(hw,
+ "Failed to allocate FL queue bufs"
+ " for IQ idx:%d\n", free_idx);
+ return -1;
+ }
+
+ flq->un.fl.packen = 0;
+ flq->un.fl.offset = 0;
+ flq->un.fl.sreg = sreg;
+
+ /* Fill up the free list buffers */
+ if (csio_wr_fill_fl(hw, flq))
+ return -1;
+
+ /*
+ * Make sure in a FLQ, atleast 1 credit (8 FL buffers)
+ * remains unpopulated,otherwise HW thinks
+ * FLQ is empty.
+ */
+ flq->pidx = flq->inc_idx = flq->credits - 8;
+ } else {
+ q->un.iq.flq_idx = -1;
+ }
+
+ /* Associate the IQ INTx handler. */
+ q->un.iq.iq_intx_handler = iq_intx_handler;
+
+ csio_q_iqid(hw, ret_idx) = CSIO_MAX_QID;
+
+ } else if (type == CSIO_EGRESS) {
+ q->credits = (qsz - csio_wr_qstat_pgsz(hw)) / CSIO_QCREDIT_SZ;
+ q->vwrap = (void *)((uintptr_t)(q->vstart) + qsz
+ - csio_wr_qstat_pgsz(hw));
+ csio_q_eqid(hw, ret_idx) = CSIO_MAX_QID;
+ } else { /* Freelist */
+ q->credits = (qsz - csio_wr_qstat_pgsz(hw)) / sizeof(__be64);
+ q->vwrap = (void *)((uintptr_t)(q->vstart) + qsz
+ - csio_wr_qstat_pgsz(hw));
+ csio_q_flid(hw, ret_idx) = CSIO_MAX_QID;
+ }
+
+ return ret_idx;
+}
+
+/*
+ * csio_wr_iq_create_rsp - Response handler for IQ creation.
+ * @hw: The HW module.
+ * @mbp: Mailbox.
+ * @iq_idx: Ingress queue that got created.
+ *
+ * Handle FW_IQ_CMD mailbox completion. Save off the assigned IQ/FL ids.
+ */
+static int
+csio_wr_iq_create_rsp(struct csio_hw *hw, struct csio_mb *mbp, int iq_idx)
+{
+ struct csio_iq_params iqp;
+ enum fw_retval retval;
+ uint32_t iq_id;
+ int flq_idx;
+
+ memset(&iqp, 0, sizeof(struct csio_iq_params));
+
+ csio_mb_iq_alloc_write_rsp(hw, mbp, &retval, &iqp);
+
+ if (retval != FW_SUCCESS) {
+ csio_err(hw, "IQ cmd returned 0x%x!\n", retval);
+ mempool_free(mbp, hw->mb_mempool);
+ return -EINVAL;
+ }
+
+ csio_q_iqid(hw, iq_idx) = iqp.iqid;
+ csio_q_physiqid(hw, iq_idx) = iqp.physiqid;
+ csio_q_pidx(hw, iq_idx) = csio_q_cidx(hw, iq_idx) = 0;
+ csio_q_inc_idx(hw, iq_idx) = 0;
+
+ /* Actual iq-id. */
+ iq_id = iqp.iqid - hw->wrm.fw_iq_start;
+
+ /* Set the iq-id to iq map table. */
+ if (iq_id >= CSIO_MAX_IQ) {
+ csio_err(hw,
+ "Exceeding MAX_IQ(%d) supported!"
+ " iqid:%d rel_iqid:%d FW iq_start:%d\n",
+ CSIO_MAX_IQ, iq_id, iqp.iqid, hw->wrm.fw_iq_start);
+ mempool_free(mbp, hw->mb_mempool);
+ return -EINVAL;
+ }
+ csio_q_set_intr_map(hw, iq_idx, iq_id);
+
+ /*
+ * During FW_IQ_CMD, FW sets interrupt_sent bit to 1 in the SGE
+ * ingress context of this queue. This will block interrupts to
+ * this queue until the next GTS write. Therefore, we do a
+ * 0-cidx increment GTS write for this queue just to clear the
+ * interrupt_sent bit. This will re-enable interrupts to this
+ * queue.
+ */
+ csio_wr_sge_intr_enable(hw, iqp.physiqid);
+
+ flq_idx = csio_q_iq_flq_idx(hw, iq_idx);
+ if (flq_idx != -1) {
+ struct csio_q *flq = hw->wrm.q_arr[flq_idx];
+
+ csio_q_flid(hw, flq_idx) = iqp.fl0id;
+ csio_q_cidx(hw, flq_idx) = 0;
+ csio_q_pidx(hw, flq_idx) = csio_q_credits(hw, flq_idx) - 8;
+ csio_q_inc_idx(hw, flq_idx) = csio_q_credits(hw, flq_idx) - 8;
+
+ /* Now update SGE about the buffers allocated during init */
+ csio_wr_ring_fldb(hw, flq);
+ }
+
+ mempool_free(mbp, hw->mb_mempool);
+
+ return 0;
+}
+
+/*
+ * csio_wr_iq_create - Configure an Ingress queue with FW.
+ * @hw: The HW module.
+ * @priv: Private data object.
+ * @iq_idx: Ingress queue index in the WR module.
+ * @vec: MSIX vector.
+ * @portid: PCIE Channel to be associated with this queue.
+ * @async: Is this a FW asynchronous message handling queue?
+ * @cbfn: Completion callback.
+ *
+ * This API configures an ingress queue with FW by issuing a FW_IQ_CMD mailbox
+ * with alloc/write bits set.
+ */
+int
+csio_wr_iq_create(struct csio_hw *hw, void *priv, int iq_idx,
+ uint32_t vec, uint8_t portid, bool async,
+ void (*cbfn) (struct csio_hw *, struct csio_mb *))
+{
+ struct csio_mb *mbp;
+ struct csio_iq_params iqp;
+ int flq_idx;
+
+ memset(&iqp, 0, sizeof(struct csio_iq_params));
+ csio_q_portid(hw, iq_idx) = portid;
+
+ mbp = mempool_alloc(hw->mb_mempool, GFP_ATOMIC);
+ if (!mbp) {
+ csio_err(hw, "IQ command out of memory!\n");
+ return -ENOMEM;
+ }
+
+ switch (hw->intr_mode) {
+ case CSIO_IM_INTX:
+ case CSIO_IM_MSI:
+ /* For interrupt forwarding queue only */
+ if (hw->intr_iq_idx == iq_idx)
+ iqp.iqandst = X_INTERRUPTDESTINATION_PCIE;
+ else
+ iqp.iqandst = X_INTERRUPTDESTINATION_IQ;
+ iqp.iqandstindex =
+ csio_q_physiqid(hw, hw->intr_iq_idx);
+ break;
+ case CSIO_IM_MSIX:
+ iqp.iqandst = X_INTERRUPTDESTINATION_PCIE;
+ iqp.iqandstindex = (uint16_t)vec;
+ break;
+ case CSIO_IM_NONE:
+ mempool_free(mbp, hw->mb_mempool);
+ return -EINVAL;
+ }
+
+ /* Pass in the ingress queue cmd parameters */
+ iqp.pfn = hw->pfn;
+ iqp.vfn = 0;
+ iqp.iq_start = 1;
+ iqp.viid = 0;
+ iqp.type = FW_IQ_TYPE_FL_INT_CAP;
+ iqp.iqasynch = async;
+ if (csio_intr_coalesce_cnt)
+ iqp.iqanus = X_UPDATESCHEDULING_COUNTER_OPTTIMER;
+ else
+ iqp.iqanus = X_UPDATESCHEDULING_TIMER;
+ iqp.iqanud = X_UPDATEDELIVERY_INTERRUPT;
+ iqp.iqpciech = portid;
+ iqp.iqintcntthresh = (uint8_t)csio_sge_thresh_reg;
+
+ switch (csio_q_wr_sz(hw, iq_idx)) {
+ case 16:
+ iqp.iqesize = 0; break;
+ case 32:
+ iqp.iqesize = 1; break;
+ case 64:
+ iqp.iqesize = 2; break;
+ case 128:
+ iqp.iqesize = 3; break;
+ }
+
+ iqp.iqsize = csio_q_size(hw, iq_idx) /
+ csio_q_wr_sz(hw, iq_idx);
+ iqp.iqaddr = csio_q_pstart(hw, iq_idx);
+
+ flq_idx = csio_q_iq_flq_idx(hw, iq_idx);
+ if (flq_idx != -1) {
+ struct csio_q *flq = hw->wrm.q_arr[flq_idx];
+
+ iqp.fl0paden = 1;
+ iqp.fl0packen = flq->un.fl.packen ? 1 : 0;
+ iqp.fl0fbmin = X_FETCHBURSTMIN_64B;
+ iqp.fl0fbmax = X_FETCHBURSTMAX_512B;
+ iqp.fl0size = csio_q_size(hw, flq_idx) / CSIO_QCREDIT_SZ;
+ iqp.fl0addr = csio_q_pstart(hw, flq_idx);
+ }
+
+ csio_mb_iq_alloc_write(hw, mbp, priv, CSIO_MB_DEFAULT_TMO, &iqp, cbfn);
+
+ if (csio_mb_issue(hw, mbp)) {
+ csio_err(hw, "Issue of IQ cmd failed!\n");
+ mempool_free(mbp, hw->mb_mempool);
+ return -EINVAL;
+ }
+
+ if (cbfn != NULL)
+ return 0;
+
+ return csio_wr_iq_create_rsp(hw, mbp, iq_idx);
+}
+
+/*
+ * csio_wr_eq_create_rsp - Response handler for EQ creation.
+ * @hw: The HW module.
+ * @mbp: Mailbox.
+ * @eq_idx: Egress queue that got created.
+ *
+ * Handle FW_EQ_OFLD_CMD mailbox completion. Save off the assigned EQ ids.
+ */
+static int
+csio_wr_eq_cfg_rsp(struct csio_hw *hw, struct csio_mb *mbp, int eq_idx)
+{
+ struct csio_eq_params eqp;
+ enum fw_retval retval;
+
+ memset(&eqp, 0, sizeof(struct csio_eq_params));
+
+ csio_mb_eq_ofld_alloc_write_rsp(hw, mbp, &retval, &eqp);
+
+ if (retval != FW_SUCCESS) {
+ csio_err(hw, "EQ OFLD cmd returned 0x%x!\n", retval);
+ mempool_free(mbp, hw->mb_mempool);
+ return -EINVAL;
+ }
+
+ csio_q_eqid(hw, eq_idx) = (uint16_t)eqp.eqid;
+ csio_q_physeqid(hw, eq_idx) = (uint16_t)eqp.physeqid;
+ csio_q_pidx(hw, eq_idx) = csio_q_cidx(hw, eq_idx) = 0;
+ csio_q_inc_idx(hw, eq_idx) = 0;
+
+ mempool_free(mbp, hw->mb_mempool);
+
+ return 0;
+}
+
+/*
+ * csio_wr_eq_create - Configure an Egress queue with FW.
+ * @hw: HW module.
+ * @priv: Private data.
+ * @eq_idx: Egress queue index in the WR module.
+ * @iq_idx: Associated ingress queue index.
+ * @cbfn: Completion callback.
+ *
+ * This API configures a offload egress queue with FW by issuing a
+ * FW_EQ_OFLD_CMD (with alloc + write ) mailbox.
+ */
+int
+csio_wr_eq_create(struct csio_hw *hw, void *priv, int eq_idx,
+ int iq_idx, uint8_t portid,
+ void (*cbfn) (struct csio_hw *, struct csio_mb *))
+{
+ struct csio_mb *mbp;
+ struct csio_eq_params eqp;
+
+ memset(&eqp, 0, sizeof(struct csio_eq_params));
+
+ mbp = mempool_alloc(hw->mb_mempool, GFP_ATOMIC);
+ if (!mbp) {
+ csio_err(hw, "EQ command out of memory!\n");
+ return -ENOMEM;
+ }
+
+ eqp.pfn = hw->pfn;
+ eqp.vfn = 0;
+ eqp.eqstart = 1;
+ eqp.hostfcmode = X_HOSTFCMODE_STATUS_PAGE;
+ eqp.iqid = csio_q_iqid(hw, iq_idx);
+ eqp.fbmin = X_FETCHBURSTMIN_64B;
+ eqp.fbmax = X_FETCHBURSTMAX_512B;
+ eqp.cidxfthresh = 0;
+ eqp.pciechn = portid;
+ eqp.eqsize = csio_q_size(hw, eq_idx) / CSIO_QCREDIT_SZ;
+ eqp.eqaddr = csio_q_pstart(hw, eq_idx);
+
+ csio_mb_eq_ofld_alloc_write(hw, mbp, priv, CSIO_MB_DEFAULT_TMO,
+ &eqp, cbfn);
+
+ if (csio_mb_issue(hw, mbp)) {
+ csio_err(hw, "Issue of EQ OFLD cmd failed!\n");
+ mempool_free(mbp, hw->mb_mempool);
+ return -EINVAL;
+ }
+
+ if (cbfn != NULL)
+ return 0;
+
+ return csio_wr_eq_cfg_rsp(hw, mbp, eq_idx);
+}
+
+/*
+ * csio_wr_iq_destroy_rsp - Response handler for IQ removal.
+ * @hw: The HW module.
+ * @mbp: Mailbox.
+ * @iq_idx: Ingress queue that was freed.
+ *
+ * Handle FW_IQ_CMD (free) mailbox completion.
+ */
+static int
+csio_wr_iq_destroy_rsp(struct csio_hw *hw, struct csio_mb *mbp, int iq_idx)
+{
+ enum fw_retval retval = csio_mb_fw_retval(mbp);
+ int rv = 0;
+
+ if (retval != FW_SUCCESS)
+ rv = -EINVAL;
+
+ mempool_free(mbp, hw->mb_mempool);
+
+ return rv;
+}
+
+/*
+ * csio_wr_iq_destroy - Free an ingress queue.
+ * @hw: The HW module.
+ * @priv: Private data object.
+ * @iq_idx: Ingress queue index to destroy
+ * @cbfn: Completion callback.
+ *
+ * This API frees an ingress queue by issuing the FW_IQ_CMD
+ * with the free bit set.
+ */
+static int
+csio_wr_iq_destroy(struct csio_hw *hw, void *priv, int iq_idx,
+ void (*cbfn)(struct csio_hw *, struct csio_mb *))
+{
+ int rv = 0;
+ struct csio_mb *mbp;
+ struct csio_iq_params iqp;
+ int flq_idx;
+
+ memset(&iqp, 0, sizeof(struct csio_iq_params));
+
+ mbp = mempool_alloc(hw->mb_mempool, GFP_ATOMIC);
+ if (!mbp)
+ return -ENOMEM;
+
+ iqp.pfn = hw->pfn;
+ iqp.vfn = 0;
+ iqp.iqid = csio_q_iqid(hw, iq_idx);
+ iqp.type = FW_IQ_TYPE_FL_INT_CAP;
+
+ flq_idx = csio_q_iq_flq_idx(hw, iq_idx);
+ if (flq_idx != -1)
+ iqp.fl0id = csio_q_flid(hw, flq_idx);
+ else
+ iqp.fl0id = 0xFFFF;
+
+ iqp.fl1id = 0xFFFF;
+
+ csio_mb_iq_free(hw, mbp, priv, CSIO_MB_DEFAULT_TMO, &iqp, cbfn);
+
+ rv = csio_mb_issue(hw, mbp);
+ if (rv != 0) {
+ mempool_free(mbp, hw->mb_mempool);
+ return rv;
+ }
+
+ if (cbfn != NULL)
+ return 0;
+
+ return csio_wr_iq_destroy_rsp(hw, mbp, iq_idx);
+}
+
+/*
+ * csio_wr_eq_destroy_rsp - Response handler for OFLD EQ creation.
+ * @hw: The HW module.
+ * @mbp: Mailbox.
+ * @eq_idx: Egress queue that was freed.
+ *
+ * Handle FW_OFLD_EQ_CMD (free) mailbox completion.
+ */
+static int
+csio_wr_eq_destroy_rsp(struct csio_hw *hw, struct csio_mb *mbp, int eq_idx)
+{
+ enum fw_retval retval = csio_mb_fw_retval(mbp);
+ int rv = 0;
+
+ if (retval != FW_SUCCESS)
+ rv = -EINVAL;
+
+ mempool_free(mbp, hw->mb_mempool);
+
+ return rv;
+}
+
+/*
+ * csio_wr_eq_destroy - Free an Egress queue.
+ * @hw: The HW module.
+ * @priv: Private data object.
+ * @eq_idx: Egress queue index to destroy
+ * @cbfn: Completion callback.
+ *
+ * This API frees an Egress queue by issuing the FW_EQ_OFLD_CMD
+ * with the free bit set.
+ */
+static int
+csio_wr_eq_destroy(struct csio_hw *hw, void *priv, int eq_idx,
+ void (*cbfn) (struct csio_hw *, struct csio_mb *))
+{
+ int rv = 0;
+ struct csio_mb *mbp;
+ struct csio_eq_params eqp;
+
+ memset(&eqp, 0, sizeof(struct csio_eq_params));
+
+ mbp = mempool_alloc(hw->mb_mempool, GFP_ATOMIC);
+ if (!mbp)
+ return -ENOMEM;
+
+ eqp.pfn = hw->pfn;
+ eqp.vfn = 0;
+ eqp.eqid = csio_q_eqid(hw, eq_idx);
+
+ csio_mb_eq_ofld_free(hw, mbp, priv, CSIO_MB_DEFAULT_TMO, &eqp, cbfn);
+
+ rv = csio_mb_issue(hw, mbp);
+ if (rv != 0) {
+ mempool_free(mbp, hw->mb_mempool);
+ return rv;
+ }
+
+ if (cbfn != NULL)
+ return 0;
+
+ return csio_wr_eq_destroy_rsp(hw, mbp, eq_idx);
+}
+
+/*
+ * csio_wr_cleanup_eq_stpg - Cleanup Egress queue status page
+ * @hw: HW module
+ * @qidx: Egress queue index
+ *
+ * Cleanup the Egress queue status page.
+ */
+static void
+csio_wr_cleanup_eq_stpg(struct csio_hw *hw, int qidx)
+{
+ struct csio_q *q = csio_hw_to_wrm(hw)->q_arr[qidx];
+ struct csio_qstatus_page *stp = (struct csio_qstatus_page *)q->vwrap;
+
+ memset(stp, 0, sizeof(*stp));
+}
+
+/*
+ * csio_wr_cleanup_iq_ftr - Cleanup Footer entries in IQ
+ * @hw: HW module
+ * @qidx: Ingress queue index
+ *
+ * Cleanup the footer entries in the given ingress queue,
+ * set to 1 the internal copy of genbit.
+ */
+static void
+csio_wr_cleanup_iq_ftr(struct csio_hw *hw, int qidx)
+{
+ struct csio_wrm *wrm = csio_hw_to_wrm(hw);
+ struct csio_q *q = wrm->q_arr[qidx];
+ void *wr;
+ struct csio_iqwr_footer *ftr;
+ uint32_t i = 0;
+
+ /* set to 1 since we are just about zero out genbit */
+ q->un.iq.genbit = 1;
+
+ for (i = 0; i < q->credits; i++) {
+ /* Get the WR */
+ wr = (void *)((uintptr_t)q->vstart +
+ (i * q->wr_sz));
+ /* Get the footer */
+ ftr = (struct csio_iqwr_footer *)((uintptr_t)wr +
+ (q->wr_sz - sizeof(*ftr)));
+ /* Zero out footer */
+ memset(ftr, 0, sizeof(*ftr));
+ }
+}
+
+int
+csio_wr_destroy_queues(struct csio_hw *hw, bool cmd)
+{
+ int i, flq_idx;
+ struct csio_q *q;
+ struct csio_wrm *wrm = csio_hw_to_wrm(hw);
+ int rv;
+
+ for (i = 0; i < wrm->free_qidx; i++) {
+ q = wrm->q_arr[i];
+
+ switch (q->type) {
+ case CSIO_EGRESS:
+ if (csio_q_eqid(hw, i) != CSIO_MAX_QID) {
+ csio_wr_cleanup_eq_stpg(hw, i);
+ if (!cmd) {
+ csio_q_eqid(hw, i) = CSIO_MAX_QID;
+ continue;
+ }
+
+ rv = csio_wr_eq_destroy(hw, NULL, i, NULL);
+ if ((rv == -EBUSY) || (rv == -ETIMEDOUT))
+ cmd = false;
+
+ csio_q_eqid(hw, i) = CSIO_MAX_QID;
+ }
+ case CSIO_INGRESS:
+ if (csio_q_iqid(hw, i) != CSIO_MAX_QID) {
+ csio_wr_cleanup_iq_ftr(hw, i);
+ if (!cmd) {
+ csio_q_iqid(hw, i) = CSIO_MAX_QID;
+ flq_idx = csio_q_iq_flq_idx(hw, i);
+ if (flq_idx != -1)
+ csio_q_flid(hw, flq_idx) =
+ CSIO_MAX_QID;
+ continue;
+ }
+
+ rv = csio_wr_iq_destroy(hw, NULL, i, NULL);
+ if ((rv == -EBUSY) || (rv == -ETIMEDOUT))
+ cmd = false;
+
+ csio_q_iqid(hw, i) = CSIO_MAX_QID;
+ flq_idx = csio_q_iq_flq_idx(hw, i);
+ if (flq_idx != -1)
+ csio_q_flid(hw, flq_idx) = CSIO_MAX_QID;
+ }
+ default:
+ break;
+ }
+ }
+
+ hw->flags &= ~CSIO_HWF_Q_FW_ALLOCED;
+
+ return 0;
+}
+
+/*
+ * csio_wr_get - Get requested size of WR entry/entries from queue.
+ * @hw: HW module.
+ * @qidx: Index of queue.
+ * @size: Cumulative size of Work request(s).
+ * @wrp: Work request pair.
+ *
+ * If requested credits are available, return the start address of the
+ * work request in the work request pair. Set pidx accordingly and
+ * return.
+ *
+ * NOTE about WR pair:
+ * ==================
+ * A WR can start towards the end of a queue, and then continue at the
+ * beginning, since the queue is considered to be circular. This will
+ * require a pair of address/size to be passed back to the caller -
+ * hence Work request pair format.
+ */
+int
+csio_wr_get(struct csio_hw *hw, int qidx, uint32_t size,
+ struct csio_wr_pair *wrp)
+{
+ struct csio_wrm *wrm = csio_hw_to_wrm(hw);
+ struct csio_q *q = wrm->q_arr[qidx];
+ void *cwr = (void *)((uintptr_t)(q->vstart) +
+ (q->pidx * CSIO_QCREDIT_SZ));
+ struct csio_qstatus_page *stp = (struct csio_qstatus_page *)q->vwrap;
+ uint16_t cidx = q->cidx = ntohs(stp->cidx);
+ uint16_t pidx = q->pidx;
+ uint32_t req_sz = ALIGN(size, CSIO_QCREDIT_SZ);
+ int req_credits = req_sz / CSIO_QCREDIT_SZ;
+ int credits;
+
+ CSIO_DB_ASSERT(q->owner != NULL);
+ CSIO_DB_ASSERT((qidx >= 0) && (qidx < wrm->free_qidx));
+ CSIO_DB_ASSERT(cidx <= q->credits);
+
+ /* Calculate credits */
+ if (pidx > cidx) {
+ credits = q->credits - (pidx - cidx) - 1;
+ } else if (cidx > pidx) {
+ credits = cidx - pidx - 1;
+ } else {
+ /* cidx == pidx, empty queue */
+ credits = q->credits;
+ CSIO_INC_STATS(q, n_qempty);
+ }
+
+ /*
+ * Check if we have enough credits.
+ * credits = 1 implies queue is full.
+ */
+ if (!credits || (req_credits > credits)) {
+ CSIO_INC_STATS(q, n_qfull);
+ return -EBUSY;
+ }
+
+ /*
+ * If we are here, we have enough credits to satisfy the
+ * request. Check if we are near the end of q, and if WR spills over.
+ * If it does, use the first addr/size to cover the queue until
+ * the end. Fit the remainder portion of the request at the top
+ * of queue and return it in the second addr/len. Set pidx
+ * accordingly.
+ */
+ if (unlikely(((uintptr_t)cwr + req_sz) > (uintptr_t)(q->vwrap))) {
+ wrp->addr1 = cwr;
+ wrp->size1 = (uint32_t)((uintptr_t)q->vwrap - (uintptr_t)cwr);
+ wrp->addr2 = q->vstart;
+ wrp->size2 = req_sz - wrp->size1;
+ q->pidx = (uint16_t)(ALIGN(wrp->size2, CSIO_QCREDIT_SZ) /
+ CSIO_QCREDIT_SZ);
+ CSIO_INC_STATS(q, n_qwrap);
+ CSIO_INC_STATS(q, n_eq_wr_split);
+ } else {
+ wrp->addr1 = cwr;
+ wrp->size1 = req_sz;
+ wrp->addr2 = NULL;
+ wrp->size2 = 0;
+ q->pidx += (uint16_t)req_credits;
+
+ /* We are the end of queue, roll back pidx to top of queue */
+ if (unlikely(q->pidx == q->credits)) {
+ q->pidx = 0;
+ CSIO_INC_STATS(q, n_qwrap);
+ }
+ }
+
+ q->inc_idx = (uint16_t)req_credits;
+
+ CSIO_INC_STATS(q, n_tot_reqs);
+
+ return 0;
+}
+
+/*
+ * csio_wr_copy_to_wrp - Copies given data into WR.
+ * @data_buf - Data buffer
+ * @wrp - Work request pair.
+ * @wr_off - Work request offset.
+ * @data_len - Data length.
+ *
+ * Copies the given data in Work Request. Work request pair(wrp) specifies
+ * address information of Work request.
+ * Returns: none
+ */
+void
+csio_wr_copy_to_wrp(void *data_buf, struct csio_wr_pair *wrp,
+ uint32_t wr_off, uint32_t data_len)
+{
+ uint32_t nbytes;
+
+ /* Number of space available in buffer addr1 of WRP */
+ nbytes = ((wrp->size1 - wr_off) >= data_len) ?
+ data_len : (wrp->size1 - wr_off);
+
+ memcpy((uint8_t *) wrp->addr1 + wr_off, data_buf, nbytes);
+ data_len -= nbytes;
+
+ /* Write the remaining data from the begining of circular buffer */
+ if (data_len) {
+ CSIO_DB_ASSERT(data_len <= wrp->size2);
+ CSIO_DB_ASSERT(wrp->addr2 != NULL);
+ memcpy(wrp->addr2, (uint8_t *) data_buf + nbytes, data_len);
+ }
+}
+
+/*
+ * csio_wr_issue - Notify chip of Work request.
+ * @hw: HW module.
+ * @qidx: Index of queue.
+ * @prio: 0: Low priority, 1: High priority
+ *
+ * Rings the SGE Doorbell by writing the current producer index of the passed
+ * in queue into the register.
+ *
+ */
+int
+csio_wr_issue(struct csio_hw *hw, int qidx, bool prio)
+{
+ struct csio_wrm *wrm = csio_hw_to_wrm(hw);
+ struct csio_q *q = wrm->q_arr[qidx];
+
+ CSIO_DB_ASSERT((qidx >= 0) && (qidx < wrm->free_qidx));
+
+ wmb();
+ /* Ring SGE Doorbell writing q->pidx into it */
+ csio_wr_reg32(hw, DBPRIO_V(prio) | QID_V(q->un.eq.physeqid) |
+ PIDX_T5_V(q->inc_idx) | DBTYPE_F,
+ MYPF_REG(SGE_PF_KDOORBELL_A));
+ q->inc_idx = 0;
+
+ return 0;
+}
+
+static inline uint32_t
+csio_wr_avail_qcredits(struct csio_q *q)
+{
+ if (q->pidx > q->cidx)
+ return q->pidx - q->cidx;
+ else if (q->cidx > q->pidx)
+ return q->credits - (q->cidx - q->pidx);
+ else
+ return 0; /* cidx == pidx, empty queue */
+}
+
+/*
+ * csio_wr_inval_flq_buf - Invalidate a free list buffer entry.
+ * @hw: HW module.
+ * @flq: The freelist queue.
+ *
+ * Invalidate the driver's version of a freelist buffer entry,
+ * without freeing the associated the DMA memory. The entry
+ * to be invalidated is picked up from the current Free list
+ * queue cidx.
+ *
+ */
+static inline void
+csio_wr_inval_flq_buf(struct csio_hw *hw, struct csio_q *flq)
+{
+ flq->cidx++;
+ if (flq->cidx == flq->credits) {
+ flq->cidx = 0;
+ CSIO_INC_STATS(flq, n_qwrap);
+ }
+}
+
+/*
+ * csio_wr_process_fl - Process a freelist completion.
+ * @hw: HW module.
+ * @q: The ingress queue attached to the Freelist.
+ * @wr: The freelist completion WR in the ingress queue.
+ * @len_to_qid: The lower 32-bits of the first flit of the RSP footer
+ * @iq_handler: Caller's handler for this completion.
+ * @priv: Private pointer of caller
+ *
+ */
+static inline void
+csio_wr_process_fl(struct csio_hw *hw, struct csio_q *q,
+ void *wr, uint32_t len_to_qid,
+ void (*iq_handler)(struct csio_hw *, void *,
+ uint32_t, struct csio_fl_dma_buf *,
+ void *),
+ void *priv)
+{
+ struct csio_wrm *wrm = csio_hw_to_wrm(hw);
+ struct csio_sge *sge = &wrm->sge;
+ struct csio_fl_dma_buf flb;
+ struct csio_dma_buf *buf, *fbuf;
+ uint32_t bufsz, len, lastlen = 0;
+ int n;
+ struct csio_q *flq = hw->wrm.q_arr[q->un.iq.flq_idx];
+
+ CSIO_DB_ASSERT(flq != NULL);
+
+ len = len_to_qid;
+
+ if (len & IQWRF_NEWBUF) {
+ if (flq->un.fl.offset > 0) {
+ csio_wr_inval_flq_buf(hw, flq);
+ flq->un.fl.offset = 0;
+ }
+ len = IQWRF_LEN_GET(len);
+ }
+
+ CSIO_DB_ASSERT(len != 0);
+
+ flb.totlen = len;
+
+ /* Consume all freelist buffers used for len bytes */
+ for (n = 0, fbuf = flb.flbufs; ; n++, fbuf++) {
+ buf = &flq->un.fl.bufs[flq->cidx];
+ bufsz = csio_wr_fl_bufsz(sge, buf);
+
+ fbuf->paddr = buf->paddr;
+ fbuf->vaddr = buf->vaddr;
+
+ flb.offset = flq->un.fl.offset;
+ lastlen = min(bufsz, len);
+ fbuf->len = lastlen;
+
+ len -= lastlen;
+ if (!len)
+ break;
+ csio_wr_inval_flq_buf(hw, flq);
+ }
+
+ flb.defer_free = flq->un.fl.packen ? 0 : 1;
+
+ iq_handler(hw, wr, q->wr_sz - sizeof(struct csio_iqwr_footer),
+ &flb, priv);
+
+ if (flq->un.fl.packen)
+ flq->un.fl.offset += ALIGN(lastlen, sge->csio_fl_align);
+ else
+ csio_wr_inval_flq_buf(hw, flq);
+
+}
+
+/*
+ * csio_is_new_iqwr - Is this a new Ingress queue entry ?
+ * @q: Ingress quueue.
+ * @ftr: Ingress queue WR SGE footer.
+ *
+ * The entry is new if our generation bit matches the corresponding
+ * bit in the footer of the current WR.
+ */
+static inline bool
+csio_is_new_iqwr(struct csio_q *q, struct csio_iqwr_footer *ftr)
+{
+ return (q->un.iq.genbit == (ftr->u.type_gen >> IQWRF_GEN_SHIFT));
+}
+
+/*
+ * csio_wr_process_iq - Process elements in Ingress queue.
+ * @hw: HW pointer
+ * @qidx: Index of queue
+ * @iq_handler: Handler for this queue
+ * @priv: Caller's private pointer
+ *
+ * This routine walks through every entry of the ingress queue, calling
+ * the provided iq_handler with the entry, until the generation bit
+ * flips.
+ */
+int
+csio_wr_process_iq(struct csio_hw *hw, struct csio_q *q,
+ void (*iq_handler)(struct csio_hw *, void *,
+ uint32_t, struct csio_fl_dma_buf *,
+ void *),
+ void *priv)
+{
+ struct csio_wrm *wrm = csio_hw_to_wrm(hw);
+ void *wr = (void *)((uintptr_t)q->vstart + (q->cidx * q->wr_sz));
+ struct csio_iqwr_footer *ftr;
+ uint32_t wr_type, fw_qid, qid;
+ struct csio_q *q_completed;
+ struct csio_q *flq = csio_iq_has_fl(q) ?
+ wrm->q_arr[q->un.iq.flq_idx] : NULL;
+ int rv = 0;
+
+ /* Get the footer */
+ ftr = (struct csio_iqwr_footer *)((uintptr_t)wr +
+ (q->wr_sz - sizeof(*ftr)));
+
+ /*
+ * When q wrapped around last time, driver should have inverted
+ * ic.genbit as well.
+ */
+ while (csio_is_new_iqwr(q, ftr)) {
+
+ CSIO_DB_ASSERT(((uintptr_t)wr + q->wr_sz) <=
+ (uintptr_t)q->vwrap);
+ rmb();
+ wr_type = IQWRF_TYPE_GET(ftr->u.type_gen);
+
+ switch (wr_type) {
+ case X_RSPD_TYPE_CPL:
+ /* Subtract footer from WR len */
+ iq_handler(hw, wr, q->wr_sz - sizeof(*ftr), NULL, priv);
+ break;
+ case X_RSPD_TYPE_FLBUF:
+ csio_wr_process_fl(hw, q, wr,
+ ntohl(ftr->pldbuflen_qid),
+ iq_handler, priv);
+ break;
+ case X_RSPD_TYPE_INTR:
+ fw_qid = ntohl(ftr->pldbuflen_qid);
+ qid = fw_qid - wrm->fw_iq_start;
+ q_completed = hw->wrm.intr_map[qid];
+
+ if (unlikely(qid ==
+ csio_q_physiqid(hw, hw->intr_iq_idx))) {
+ /*
+ * We are already in the Forward Interrupt
+ * Interrupt Queue Service! Do-not service
+ * again!
+ *
+ */
+ } else {
+ CSIO_DB_ASSERT(q_completed);
+ CSIO_DB_ASSERT(
+ q_completed->un.iq.iq_intx_handler);
+
+ /* Call the queue handler. */
+ q_completed->un.iq.iq_intx_handler(hw, NULL,
+ 0, NULL, (void *)q_completed);
+ }
+ break;
+ default:
+ csio_warn(hw, "Unknown resp type 0x%x received\n",
+ wr_type);
+ CSIO_INC_STATS(q, n_rsp_unknown);
+ break;
+ }
+
+ /*
+ * Ingress *always* has fixed size WR entries. Therefore,
+ * there should always be complete WRs towards the end of
+ * queue.
+ */
+ if (((uintptr_t)wr + q->wr_sz) == (uintptr_t)q->vwrap) {
+
+ /* Roll over to start of queue */
+ q->cidx = 0;
+ wr = q->vstart;
+
+ /* Toggle genbit */
+ q->un.iq.genbit ^= 0x1;
+
+ CSIO_INC_STATS(q, n_qwrap);
+ } else {
+ q->cidx++;
+ wr = (void *)((uintptr_t)(q->vstart) +
+ (q->cidx * q->wr_sz));
+ }
+
+ ftr = (struct csio_iqwr_footer *)((uintptr_t)wr +
+ (q->wr_sz - sizeof(*ftr)));
+ q->inc_idx++;
+
+ } /* while (q->un.iq.genbit == hdr->genbit) */
+
+ /*
+ * We need to re-arm SGE interrupts in case we got a stray interrupt,
+ * especially in msix mode. With INTx, this may be a common occurence.
+ */
+ if (unlikely(!q->inc_idx)) {
+ CSIO_INC_STATS(q, n_stray_comp);
+ rv = -EINVAL;
+ goto restart;
+ }
+
+ /* Replenish free list buffers if pending falls below low water mark */
+ if (flq) {
+ uint32_t avail = csio_wr_avail_qcredits(flq);
+ if (avail <= 16) {
+ /* Make sure in FLQ, atleast 1 credit (8 FL buffers)
+ * remains unpopulated otherwise HW thinks
+ * FLQ is empty.
+ */
+ csio_wr_update_fl(hw, flq, (flq->credits - 8) - avail);
+ csio_wr_ring_fldb(hw, flq);
+ }
+ }
+
+restart:
+ /* Now inform SGE about our incremental index value */
+ csio_wr_reg32(hw, CIDXINC_V(q->inc_idx) |
+ INGRESSQID_V(q->un.iq.physiqid) |
+ TIMERREG_V(csio_sge_timer_reg),
+ MYPF_REG(SGE_PF_GTS_A));
+ q->stats.n_tot_rsps += q->inc_idx;
+
+ q->inc_idx = 0;
+
+ return rv;
+}
+
+int
+csio_wr_process_iq_idx(struct csio_hw *hw, int qidx,
+ void (*iq_handler)(struct csio_hw *, void *,
+ uint32_t, struct csio_fl_dma_buf *,
+ void *),
+ void *priv)
+{
+ struct csio_wrm *wrm = csio_hw_to_wrm(hw);
+ struct csio_q *iq = wrm->q_arr[qidx];
+
+ return csio_wr_process_iq(hw, iq, iq_handler, priv);
+}
+
+static int
+csio_closest_timer(struct csio_sge *s, int time)
+{
+ int i, delta, match = 0, min_delta = INT_MAX;
+
+ for (i = 0; i < ARRAY_SIZE(s->timer_val); i++) {
+ delta = time - s->timer_val[i];
+ if (delta < 0)
+ delta = -delta;
+ if (delta < min_delta) {
+ min_delta = delta;
+ match = i;
+ }
+ }
+ return match;
+}
+
+static int
+csio_closest_thresh(struct csio_sge *s, int cnt)
+{
+ int i, delta, match = 0, min_delta = INT_MAX;
+
+ for (i = 0; i < ARRAY_SIZE(s->counter_val); i++) {
+ delta = cnt - s->counter_val[i];
+ if (delta < 0)
+ delta = -delta;
+ if (delta < min_delta) {
+ min_delta = delta;
+ match = i;
+ }
+ }
+ return match;
+}
+
+static void
+csio_wr_fixup_host_params(struct csio_hw *hw)
+{
+ struct csio_wrm *wrm = csio_hw_to_wrm(hw);
+ struct csio_sge *sge = &wrm->sge;
+ uint32_t clsz = L1_CACHE_BYTES;
+ uint32_t s_hps = PAGE_SHIFT - 10;
+ uint32_t ingpad = 0;
+ uint32_t stat_len = clsz > 64 ? 128 : 64;
+
+ csio_wr_reg32(hw, HOSTPAGESIZEPF0_V(s_hps) | HOSTPAGESIZEPF1_V(s_hps) |
+ HOSTPAGESIZEPF2_V(s_hps) | HOSTPAGESIZEPF3_V(s_hps) |
+ HOSTPAGESIZEPF4_V(s_hps) | HOSTPAGESIZEPF5_V(s_hps) |
+ HOSTPAGESIZEPF6_V(s_hps) | HOSTPAGESIZEPF7_V(s_hps),
+ SGE_HOST_PAGE_SIZE_A);
+
+ sge->csio_fl_align = clsz < 32 ? 32 : clsz;
+ ingpad = ilog2(sge->csio_fl_align) - 5;
+
+ csio_set_reg_field(hw, SGE_CONTROL_A,
+ INGPADBOUNDARY_V(INGPADBOUNDARY_M) |
+ EGRSTATUSPAGESIZE_F,
+ INGPADBOUNDARY_V(ingpad) |
+ EGRSTATUSPAGESIZE_V(stat_len != 64));
+
+ /* FL BUFFER SIZE#0 is Page size i,e already aligned to cache line */
+ csio_wr_reg32(hw, PAGE_SIZE, SGE_FL_BUFFER_SIZE0_A);
+
+ /*
+ * If using hard params, the following will get set correctly
+ * in csio_wr_set_sge().
+ */
+ if (hw->flags & CSIO_HWF_USING_SOFT_PARAMS) {
+ csio_wr_reg32(hw,
+ (csio_rd_reg32(hw, SGE_FL_BUFFER_SIZE2_A) +
+ sge->csio_fl_align - 1) & ~(sge->csio_fl_align - 1),
+ SGE_FL_BUFFER_SIZE2_A);
+ csio_wr_reg32(hw,
+ (csio_rd_reg32(hw, SGE_FL_BUFFER_SIZE3_A) +
+ sge->csio_fl_align - 1) & ~(sge->csio_fl_align - 1),
+ SGE_FL_BUFFER_SIZE3_A);
+ }
+
+ csio_wr_reg32(hw, HPZ0_V(PAGE_SHIFT - 12), ULP_RX_TDDP_PSZ_A);
+
+ /* default value of rx_dma_offset of the NIC driver */
+ csio_set_reg_field(hw, SGE_CONTROL_A,
+ PKTSHIFT_V(PKTSHIFT_M),
+ PKTSHIFT_V(CSIO_SGE_RX_DMA_OFFSET));
+
+ csio_hw_tp_wr_bits_indirect(hw, TP_INGRESS_CONFIG_A,
+ CSUM_HAS_PSEUDO_HDR_F, 0);
+}
+
+static void
+csio_init_intr_coalesce_parms(struct csio_hw *hw)
+{
+ struct csio_wrm *wrm = csio_hw_to_wrm(hw);
+ struct csio_sge *sge = &wrm->sge;
+
+ csio_sge_thresh_reg = csio_closest_thresh(sge, csio_intr_coalesce_cnt);
+ if (csio_intr_coalesce_cnt) {
+ csio_sge_thresh_reg = 0;
+ csio_sge_timer_reg = X_TIMERREG_RESTART_COUNTER;
+ return;
+ }
+
+ csio_sge_timer_reg = csio_closest_timer(sge, csio_intr_coalesce_time);
+}
+
+/*
+ * csio_wr_get_sge - Get SGE register values.
+ * @hw: HW module.
+ *
+ * Used by non-master functions and by master-functions relying on config file.
+ */
+static void
+csio_wr_get_sge(struct csio_hw *hw)
+{
+ struct csio_wrm *wrm = csio_hw_to_wrm(hw);
+ struct csio_sge *sge = &wrm->sge;
+ uint32_t ingpad;
+ int i;
+ u32 timer_value_0_and_1, timer_value_2_and_3, timer_value_4_and_5;
+ u32 ingress_rx_threshold;
+
+ sge->sge_control = csio_rd_reg32(hw, SGE_CONTROL_A);
+
+ ingpad = INGPADBOUNDARY_G(sge->sge_control);
+
+ switch (ingpad) {
+ case X_INGPCIEBOUNDARY_32B:
+ sge->csio_fl_align = 32; break;
+ case X_INGPCIEBOUNDARY_64B:
+ sge->csio_fl_align = 64; break;
+ case X_INGPCIEBOUNDARY_128B:
+ sge->csio_fl_align = 128; break;
+ case X_INGPCIEBOUNDARY_256B:
+ sge->csio_fl_align = 256; break;
+ case X_INGPCIEBOUNDARY_512B:
+ sge->csio_fl_align = 512; break;
+ case X_INGPCIEBOUNDARY_1024B:
+ sge->csio_fl_align = 1024; break;
+ case X_INGPCIEBOUNDARY_2048B:
+ sge->csio_fl_align = 2048; break;
+ case X_INGPCIEBOUNDARY_4096B:
+ sge->csio_fl_align = 4096; break;
+ }
+
+ for (i = 0; i < CSIO_SGE_FL_SIZE_REGS; i++)
+ csio_get_flbuf_size(hw, sge, i);
+
+ timer_value_0_and_1 = csio_rd_reg32(hw, SGE_TIMER_VALUE_0_AND_1_A);
+ timer_value_2_and_3 = csio_rd_reg32(hw, SGE_TIMER_VALUE_2_AND_3_A);
+ timer_value_4_and_5 = csio_rd_reg32(hw, SGE_TIMER_VALUE_4_AND_5_A);
+
+ sge->timer_val[0] = (uint16_t)csio_core_ticks_to_us(hw,
+ TIMERVALUE0_G(timer_value_0_and_1));
+ sge->timer_val[1] = (uint16_t)csio_core_ticks_to_us(hw,
+ TIMERVALUE1_G(timer_value_0_and_1));
+ sge->timer_val[2] = (uint16_t)csio_core_ticks_to_us(hw,
+ TIMERVALUE2_G(timer_value_2_and_3));
+ sge->timer_val[3] = (uint16_t)csio_core_ticks_to_us(hw,
+ TIMERVALUE3_G(timer_value_2_and_3));
+ sge->timer_val[4] = (uint16_t)csio_core_ticks_to_us(hw,
+ TIMERVALUE4_G(timer_value_4_and_5));
+ sge->timer_val[5] = (uint16_t)csio_core_ticks_to_us(hw,
+ TIMERVALUE5_G(timer_value_4_and_5));
+
+ ingress_rx_threshold = csio_rd_reg32(hw, SGE_INGRESS_RX_THRESHOLD_A);
+ sge->counter_val[0] = THRESHOLD_0_G(ingress_rx_threshold);
+ sge->counter_val[1] = THRESHOLD_1_G(ingress_rx_threshold);
+ sge->counter_val[2] = THRESHOLD_2_G(ingress_rx_threshold);
+ sge->counter_val[3] = THRESHOLD_3_G(ingress_rx_threshold);
+
+ csio_init_intr_coalesce_parms(hw);
+}
+
+/*
+ * csio_wr_set_sge - Initialize SGE registers
+ * @hw: HW module.
+ *
+ * Used by Master function to initialize SGE registers in the absence
+ * of a config file.
+ */
+static void
+csio_wr_set_sge(struct csio_hw *hw)
+{
+ struct csio_wrm *wrm = csio_hw_to_wrm(hw);
+ struct csio_sge *sge = &wrm->sge;
+ int i;
+
+ /*
+ * Set up our basic SGE mode to deliver CPL messages to our Ingress
+ * Queue and Packet Date to the Free List.
+ */
+ csio_set_reg_field(hw, SGE_CONTROL_A, RXPKTCPLMODE_F, RXPKTCPLMODE_F);
+
+ sge->sge_control = csio_rd_reg32(hw, SGE_CONTROL_A);
+
+ /* sge->csio_fl_align is set up by csio_wr_fixup_host_params(). */
+
+ /*
+ * Set up to drop DOORBELL writes when the DOORBELL FIFO overflows
+ * and generate an interrupt when this occurs so we can recover.
+ */
+ csio_set_reg_field(hw, SGE_DBFIFO_STATUS_A,
+ LP_INT_THRESH_T5_V(LP_INT_THRESH_T5_M),
+ LP_INT_THRESH_T5_V(CSIO_SGE_DBFIFO_INT_THRESH));
+ csio_set_reg_field(hw, SGE_DBFIFO_STATUS2_A,
+ HP_INT_THRESH_T5_V(LP_INT_THRESH_T5_M),
+ HP_INT_THRESH_T5_V(CSIO_SGE_DBFIFO_INT_THRESH));
+
+ csio_set_reg_field(hw, SGE_DOORBELL_CONTROL_A, ENABLE_DROP_F,
+ ENABLE_DROP_F);
+
+ /* SGE_FL_BUFFER_SIZE0 is set up by csio_wr_fixup_host_params(). */
+
+ CSIO_SET_FLBUF_SIZE(hw, 1, CSIO_SGE_FLBUF_SIZE1);
+ csio_wr_reg32(hw, (CSIO_SGE_FLBUF_SIZE2 + sge->csio_fl_align - 1)
+ & ~(sge->csio_fl_align - 1), SGE_FL_BUFFER_SIZE2_A);
+ csio_wr_reg32(hw, (CSIO_SGE_FLBUF_SIZE3 + sge->csio_fl_align - 1)
+ & ~(sge->csio_fl_align - 1), SGE_FL_BUFFER_SIZE3_A);
+ CSIO_SET_FLBUF_SIZE(hw, 4, CSIO_SGE_FLBUF_SIZE4);
+ CSIO_SET_FLBUF_SIZE(hw, 5, CSIO_SGE_FLBUF_SIZE5);
+ CSIO_SET_FLBUF_SIZE(hw, 6, CSIO_SGE_FLBUF_SIZE6);
+ CSIO_SET_FLBUF_SIZE(hw, 7, CSIO_SGE_FLBUF_SIZE7);
+ CSIO_SET_FLBUF_SIZE(hw, 8, CSIO_SGE_FLBUF_SIZE8);
+
+ for (i = 0; i < CSIO_SGE_FL_SIZE_REGS; i++)
+ csio_get_flbuf_size(hw, sge, i);
+
+ /* Initialize interrupt coalescing attributes */
+ sge->timer_val[0] = CSIO_SGE_TIMER_VAL_0;
+ sge->timer_val[1] = CSIO_SGE_TIMER_VAL_1;
+ sge->timer_val[2] = CSIO_SGE_TIMER_VAL_2;
+ sge->timer_val[3] = CSIO_SGE_TIMER_VAL_3;
+ sge->timer_val[4] = CSIO_SGE_TIMER_VAL_4;
+ sge->timer_val[5] = CSIO_SGE_TIMER_VAL_5;
+
+ sge->counter_val[0] = CSIO_SGE_INT_CNT_VAL_0;
+ sge->counter_val[1] = CSIO_SGE_INT_CNT_VAL_1;
+ sge->counter_val[2] = CSIO_SGE_INT_CNT_VAL_2;
+ sge->counter_val[3] = CSIO_SGE_INT_CNT_VAL_3;
+
+ csio_wr_reg32(hw, THRESHOLD_0_V(sge->counter_val[0]) |
+ THRESHOLD_1_V(sge->counter_val[1]) |
+ THRESHOLD_2_V(sge->counter_val[2]) |
+ THRESHOLD_3_V(sge->counter_val[3]),
+ SGE_INGRESS_RX_THRESHOLD_A);
+
+ csio_wr_reg32(hw,
+ TIMERVALUE0_V(csio_us_to_core_ticks(hw, sge->timer_val[0])) |
+ TIMERVALUE1_V(csio_us_to_core_ticks(hw, sge->timer_val[1])),
+ SGE_TIMER_VALUE_0_AND_1_A);
+
+ csio_wr_reg32(hw,
+ TIMERVALUE2_V(csio_us_to_core_ticks(hw, sge->timer_val[2])) |
+ TIMERVALUE3_V(csio_us_to_core_ticks(hw, sge->timer_val[3])),
+ SGE_TIMER_VALUE_2_AND_3_A);
+
+ csio_wr_reg32(hw,
+ TIMERVALUE4_V(csio_us_to_core_ticks(hw, sge->timer_val[4])) |
+ TIMERVALUE5_V(csio_us_to_core_ticks(hw, sge->timer_val[5])),
+ SGE_TIMER_VALUE_4_AND_5_A);
+
+ csio_init_intr_coalesce_parms(hw);
+}
+
+void
+csio_wr_sge_init(struct csio_hw *hw)
+{
+ /*
+ * If we are master and chip is not initialized:
+ * - If we plan to use the config file, we need to fixup some
+ * host specific registers, and read the rest of the SGE
+ * configuration.
+ * - If we dont plan to use the config file, we need to initialize
+ * SGE entirely, including fixing the host specific registers.
+ * If we are master and chip is initialized, just read and work off of
+ * the already initialized SGE values.
+ * If we arent the master, we are only allowed to read and work off of
+ * the already initialized SGE values.
+ *
+ * Therefore, before calling this function, we assume that the master-
+ * ship of the card, state and whether to use config file or not, have
+ * already been decided.
+ */
+ if (csio_is_hw_master(hw)) {
+ if (hw->fw_state != CSIO_DEV_STATE_INIT)
+ csio_wr_fixup_host_params(hw);
+
+ if (hw->flags & CSIO_HWF_USING_SOFT_PARAMS)
+ csio_wr_get_sge(hw);
+ else
+ csio_wr_set_sge(hw);
+ } else
+ csio_wr_get_sge(hw);
+}
+
+/*
+ * csio_wrm_init - Initialize Work request module.
+ * @wrm: WR module
+ * @hw: HW pointer
+ *
+ * Allocates memory for an array of queue pointers starting at q_arr.
+ */
+int
+csio_wrm_init(struct csio_wrm *wrm, struct csio_hw *hw)
+{
+ int i;
+
+ if (!wrm->num_q) {
+ csio_err(hw, "Num queues is not set\n");
+ return -EINVAL;
+ }
+
+ wrm->q_arr = kzalloc(sizeof(struct csio_q *) * wrm->num_q, GFP_KERNEL);
+ if (!wrm->q_arr)
+ goto err;
+
+ for (i = 0; i < wrm->num_q; i++) {
+ wrm->q_arr[i] = kzalloc(sizeof(struct csio_q), GFP_KERNEL);
+ if (!wrm->q_arr[i]) {
+ while (--i >= 0)
+ kfree(wrm->q_arr[i]);
+ goto err_free_arr;
+ }
+ }
+ wrm->free_qidx = 0;
+
+ return 0;
+
+err_free_arr:
+ kfree(wrm->q_arr);
+err:
+ return -ENOMEM;
+}
+
+/*
+ * csio_wrm_exit - Initialize Work request module.
+ * @wrm: WR module
+ * @hw: HW module
+ *
+ * Uninitialize WR module. Free q_arr and pointers in it.
+ * We have the additional job of freeing the DMA memory associated
+ * with the queues.
+ */
+void
+csio_wrm_exit(struct csio_wrm *wrm, struct csio_hw *hw)
+{
+ int i;
+ uint32_t j;
+ struct csio_q *q;
+ struct csio_dma_buf *buf;
+
+ for (i = 0; i < wrm->num_q; i++) {
+ q = wrm->q_arr[i];
+
+ if (wrm->free_qidx && (i < wrm->free_qidx)) {
+ if (q->type == CSIO_FREELIST) {
+ if (!q->un.fl.bufs)
+ continue;
+ for (j = 0; j < q->credits; j++) {
+ buf = &q->un.fl.bufs[j];
+ if (!buf->vaddr)
+ continue;
+ pci_free_consistent(hw->pdev, buf->len,
+ buf->vaddr,
+ buf->paddr);
+ }
+ kfree(q->un.fl.bufs);
+ }
+ pci_free_consistent(hw->pdev, q->size,
+ q->vstart, q->pstart);
+ }
+ kfree(q);
+ }
+
+ hw->flags &= ~CSIO_HWF_Q_MEM_ALLOCED;
+
+ kfree(wrm->q_arr);
+}
diff --git a/drivers/scsi/csiostor/csio_wr.h b/drivers/scsi/csiostor/csio_wr.h
new file mode 100644
index 000000000..0c0dd9a65
--- /dev/null
+++ b/drivers/scsi/csiostor/csio_wr.h
@@ -0,0 +1,512 @@
+/*
+ * This file is part of the Chelsio FCoE driver for Linux.
+ *
+ * Copyright (c) 2008-2012 Chelsio Communications, Inc. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#ifndef __CSIO_WR_H__
+#define __CSIO_WR_H__
+
+#include <linux/cache.h>
+
+#include "csio_defs.h"
+#include "t4fw_api.h"
+#include "t4fw_api_stor.h"
+
+/*
+ * SGE register field values.
+ */
+#define X_INGPCIEBOUNDARY_32B 0
+#define X_INGPCIEBOUNDARY_64B 1
+#define X_INGPCIEBOUNDARY_128B 2
+#define X_INGPCIEBOUNDARY_256B 3
+#define X_INGPCIEBOUNDARY_512B 4
+#define X_INGPCIEBOUNDARY_1024B 5
+#define X_INGPCIEBOUNDARY_2048B 6
+#define X_INGPCIEBOUNDARY_4096B 7
+
+/* GTS register */
+#define X_TIMERREG_COUNTER0 0
+#define X_TIMERREG_COUNTER1 1
+#define X_TIMERREG_COUNTER2 2
+#define X_TIMERREG_COUNTER3 3
+#define X_TIMERREG_COUNTER4 4
+#define X_TIMERREG_COUNTER5 5
+#define X_TIMERREG_RESTART_COUNTER 6
+#define X_TIMERREG_UPDATE_CIDX 7
+
+/*
+ * Egress Context field values
+ */
+#define X_FETCHBURSTMIN_16B 0
+#define X_FETCHBURSTMIN_32B 1
+#define X_FETCHBURSTMIN_64B 2
+#define X_FETCHBURSTMIN_128B 3
+
+#define X_FETCHBURSTMAX_64B 0
+#define X_FETCHBURSTMAX_128B 1
+#define X_FETCHBURSTMAX_256B 2
+#define X_FETCHBURSTMAX_512B 3
+
+#define X_HOSTFCMODE_NONE 0
+#define X_HOSTFCMODE_INGRESS_QUEUE 1
+#define X_HOSTFCMODE_STATUS_PAGE 2
+#define X_HOSTFCMODE_BOTH 3
+
+/*
+ * Ingress Context field values
+ */
+#define X_UPDATESCHEDULING_TIMER 0
+#define X_UPDATESCHEDULING_COUNTER_OPTTIMER 1
+
+#define X_UPDATEDELIVERY_NONE 0
+#define X_UPDATEDELIVERY_INTERRUPT 1
+#define X_UPDATEDELIVERY_STATUS_PAGE 2
+#define X_UPDATEDELIVERY_BOTH 3
+
+#define X_INTERRUPTDESTINATION_PCIE 0
+#define X_INTERRUPTDESTINATION_IQ 1
+
+#define X_RSPD_TYPE_FLBUF 0
+#define X_RSPD_TYPE_CPL 1
+#define X_RSPD_TYPE_INTR 2
+
+/* WR status is at the same position as retval in a CMD header */
+#define csio_wr_status(_wr) \
+ (FW_CMD_RETVAL_G(ntohl(((struct fw_cmd_hdr *)(_wr))->lo)))
+
+struct csio_hw;
+
+extern int csio_intr_coalesce_cnt;
+extern int csio_intr_coalesce_time;
+
+/* Ingress queue params */
+struct csio_iq_params {
+
+ uint8_t iq_start:1;
+ uint8_t iq_stop:1;
+ uint8_t pfn:3;
+
+ uint8_t vfn;
+
+ uint16_t physiqid;
+ uint16_t iqid;
+
+ uint16_t fl0id;
+ uint16_t fl1id;
+
+ uint8_t viid;
+
+ uint8_t type;
+ uint8_t iqasynch;
+ uint8_t reserved4;
+
+ uint8_t iqandst;
+ uint8_t iqanus;
+ uint8_t iqanud;
+
+ uint16_t iqandstindex;
+
+ uint8_t iqdroprss;
+ uint8_t iqpciech;
+ uint8_t iqdcaen;
+
+ uint8_t iqdcacpu;
+ uint8_t iqintcntthresh;
+ uint8_t iqo;
+
+ uint8_t iqcprio;
+ uint8_t iqesize;
+
+ uint16_t iqsize;
+
+ uint64_t iqaddr;
+
+ uint8_t iqflintiqhsen;
+ uint8_t reserved5;
+ uint8_t iqflintcongen;
+ uint8_t iqflintcngchmap;
+
+ uint32_t reserved6;
+
+ uint8_t fl0hostfcmode;
+ uint8_t fl0cprio;
+ uint8_t fl0paden;
+ uint8_t fl0packen;
+ uint8_t fl0congen;
+ uint8_t fl0dcaen;
+
+ uint8_t fl0dcacpu;
+ uint8_t fl0fbmin;
+
+ uint8_t fl0fbmax;
+ uint8_t fl0cidxfthresho;
+ uint8_t fl0cidxfthresh;
+
+ uint16_t fl0size;
+
+ uint64_t fl0addr;
+
+ uint64_t reserved7;
+
+ uint8_t fl1hostfcmode;
+ uint8_t fl1cprio;
+ uint8_t fl1paden;
+ uint8_t fl1packen;
+ uint8_t fl1congen;
+ uint8_t fl1dcaen;
+
+ uint8_t fl1dcacpu;
+ uint8_t fl1fbmin;
+
+ uint8_t fl1fbmax;
+ uint8_t fl1cidxfthresho;
+ uint8_t fl1cidxfthresh;
+
+ uint16_t fl1size;
+
+ uint64_t fl1addr;
+};
+
+/* Egress queue params */
+struct csio_eq_params {
+
+ uint8_t pfn;
+ uint8_t vfn;
+
+ uint8_t eqstart:1;
+ uint8_t eqstop:1;
+
+ uint16_t physeqid;
+ uint32_t eqid;
+
+ uint8_t hostfcmode:2;
+ uint8_t cprio:1;
+ uint8_t pciechn:3;
+
+ uint16_t iqid;
+
+ uint8_t dcaen:1;
+ uint8_t dcacpu:5;
+
+ uint8_t fbmin:3;
+ uint8_t fbmax:3;
+
+ uint8_t cidxfthresho:1;
+ uint8_t cidxfthresh:3;
+
+ uint16_t eqsize;
+
+ uint64_t eqaddr;
+};
+
+struct csio_dma_buf {
+ struct list_head list;
+ void *vaddr; /* Virtual address */
+ dma_addr_t paddr; /* Physical address */
+ uint32_t len; /* Buffer size */
+};
+
+/* Generic I/O request structure */
+struct csio_ioreq {
+ struct csio_sm sm; /* SM, List
+ * should be the first member
+ */
+ int iq_idx; /* Ingress queue index */
+ int eq_idx; /* Egress queue index */
+ uint32_t nsge; /* Number of SG elements */
+ uint32_t tmo; /* Driver timeout */
+ uint32_t datadir; /* Data direction */
+ struct csio_dma_buf dma_buf; /* Req/resp DMA buffers */
+ uint16_t wr_status; /* WR completion status */
+ int16_t drv_status; /* Driver internal status */
+ struct csio_lnode *lnode; /* Owner lnode */
+ struct csio_rnode *rnode; /* Src/destination rnode */
+ void (*io_cbfn) (struct csio_hw *, struct csio_ioreq *);
+ /* completion callback */
+ void *scratch1; /* Scratch area 1.
+ */
+ void *scratch2; /* Scratch area 2. */
+ struct list_head gen_list; /* Any list associated with
+ * this ioreq.
+ */
+ uint64_t fw_handle; /* Unique handle passed
+ * to FW
+ */
+ uint8_t dcopy; /* Data copy required */
+ uint8_t reserved1;
+ uint16_t reserved2;
+ struct completion cmplobj; /* ioreq completion object */
+} ____cacheline_aligned_in_smp;
+
+/*
+ * Egress status page for egress cidx updates
+ */
+struct csio_qstatus_page {
+ __be32 qid;
+ __be16 cidx;
+ __be16 pidx;
+};
+
+
+enum {
+ CSIO_MAX_FLBUF_PER_IQWR = 4,
+ CSIO_QCREDIT_SZ = 64, /* pidx/cidx increments
+ * in bytes
+ */
+ CSIO_MAX_QID = 0xFFFF,
+ CSIO_MAX_IQ = 128,
+
+ CSIO_SGE_NTIMERS = 6,
+ CSIO_SGE_NCOUNTERS = 4,
+ CSIO_SGE_FL_SIZE_REGS = 16,
+};
+
+/* Defines for type */
+enum {
+ CSIO_EGRESS = 1,
+ CSIO_INGRESS = 2,
+ CSIO_FREELIST = 3,
+};
+
+/*
+ * Structure for footer (last 2 flits) of Ingress Queue Entry.
+ */
+struct csio_iqwr_footer {
+ __be32 hdrbuflen_pidx;
+ __be32 pldbuflen_qid;
+ union {
+ u8 type_gen;
+ __be64 last_flit;
+ } u;
+};
+
+#define IQWRF_NEWBUF (1 << 31)
+#define IQWRF_LEN_GET(x) (((x) >> 0) & 0x7fffffffU)
+#define IQWRF_GEN_SHIFT 7
+#define IQWRF_TYPE_GET(x) (((x) >> 4) & 0x3U)
+
+
+/*
+ * WR pair:
+ * ========
+ * A WR can start towards the end of a queue, and then continue at the
+ * beginning, since the queue is considered to be circular. This will
+ * require a pair of address/len to be passed back to the caller -
+ * hence the Work request pair structure.
+ */
+struct csio_wr_pair {
+ void *addr1;
+ uint32_t size1;
+ void *addr2;
+ uint32_t size2;
+};
+
+/*
+ * The following structure is used by ingress processing to return the
+ * free list buffers to consumers.
+ */
+struct csio_fl_dma_buf {
+ struct csio_dma_buf flbufs[CSIO_MAX_FLBUF_PER_IQWR];
+ /* Freelist DMA buffers */
+ int offset; /* Offset within the
+ * first FL buf.
+ */
+ uint32_t totlen; /* Total length */
+ uint8_t defer_free; /* Free of buffer can
+ * deferred
+ */
+};
+
+/* Data-types */
+typedef void (*iq_handler_t)(struct csio_hw *, void *, uint32_t,
+ struct csio_fl_dma_buf *, void *);
+
+struct csio_iq {
+ uint16_t iqid; /* Queue ID */
+ uint16_t physiqid; /* Physical Queue ID */
+ uint16_t genbit; /* Generation bit,
+ * initially set to 1
+ */
+ int flq_idx; /* Freelist queue index */
+ iq_handler_t iq_intx_handler; /* IQ INTx handler routine */
+};
+
+struct csio_eq {
+ uint16_t eqid; /* Qid */
+ uint16_t physeqid; /* Physical Queue ID */
+ uint8_t wrap[512]; /* Temp area for q-wrap around*/
+};
+
+struct csio_fl {
+ uint16_t flid; /* Qid */
+ uint16_t packen; /* Packing enabled? */
+ int offset; /* Offset within FL buf */
+ int sreg; /* Size register */
+ struct csio_dma_buf *bufs; /* Free list buffer ptr array
+ * indexed using flq->cidx/pidx
+ */
+};
+
+struct csio_qstats {
+ uint32_t n_tot_reqs; /* Total no. of Requests */
+ uint32_t n_tot_rsps; /* Total no. of responses */
+ uint32_t n_qwrap; /* Queue wraps */
+ uint32_t n_eq_wr_split; /* Number of split EQ WRs */
+ uint32_t n_qentry; /* Queue entry */
+ uint32_t n_qempty; /* Queue empty */
+ uint32_t n_qfull; /* Queue fulls */
+ uint32_t n_rsp_unknown; /* Unknown response type */
+ uint32_t n_stray_comp; /* Stray completion intr */
+ uint32_t n_flq_refill; /* Number of FL refills */
+};
+
+/* Queue metadata */
+struct csio_q {
+ uint16_t type; /* Type: Ingress/Egress/FL */
+ uint16_t pidx; /* producer index */
+ uint16_t cidx; /* consumer index */
+ uint16_t inc_idx; /* Incremental index */
+ uint32_t wr_sz; /* Size of all WRs in this q
+ * if fixed
+ */
+ void *vstart; /* Base virtual address
+ * of queue
+ */
+ void *vwrap; /* Virtual end address to
+ * wrap around at
+ */
+ uint32_t credits; /* Size of queue in credits */
+ void *owner; /* Owner */
+ union { /* Queue contexts */
+ struct csio_iq iq;
+ struct csio_eq eq;
+ struct csio_fl fl;
+ } un;
+
+ dma_addr_t pstart; /* Base physical address of
+ * queue
+ */
+ uint32_t portid; /* PCIE Channel */
+ uint32_t size; /* Size of queue in bytes */
+ struct csio_qstats stats; /* Statistics */
+} ____cacheline_aligned_in_smp;
+
+struct csio_sge {
+ uint32_t csio_fl_align; /* Calculated and cached
+ * for fast path
+ */
+ uint32_t sge_control; /* padding, boundaries,
+ * lengths, etc.
+ */
+ uint32_t sge_host_page_size; /* Host page size */
+ uint32_t sge_fl_buf_size[CSIO_SGE_FL_SIZE_REGS];
+ /* free list buffer sizes */
+ uint16_t timer_val[CSIO_SGE_NTIMERS];
+ uint8_t counter_val[CSIO_SGE_NCOUNTERS];
+};
+
+/* Work request module */
+struct csio_wrm {
+ int num_q; /* Number of queues */
+ struct csio_q **q_arr; /* Array of queue pointers
+ * allocated dynamically
+ * based on configured values
+ */
+ uint32_t fw_iq_start; /* Start ID of IQ for this fn*/
+ uint32_t fw_eq_start; /* Start ID of EQ for this fn*/
+ struct csio_q *intr_map[CSIO_MAX_IQ];
+ /* IQ-id to IQ map table. */
+ int free_qidx; /* queue idx of free queue */
+ struct csio_sge sge; /* SGE params */
+};
+
+#define csio_get_q(__hw, __idx) ((__hw)->wrm.q_arr[__idx])
+#define csio_q_type(__hw, __idx) ((__hw)->wrm.q_arr[(__idx)]->type)
+#define csio_q_pidx(__hw, __idx) ((__hw)->wrm.q_arr[(__idx)]->pidx)
+#define csio_q_cidx(__hw, __idx) ((__hw)->wrm.q_arr[(__idx)]->cidx)
+#define csio_q_inc_idx(__hw, __idx) ((__hw)->wrm.q_arr[(__idx)]->inc_idx)
+#define csio_q_vstart(__hw, __idx) ((__hw)->wrm.q_arr[(__idx)]->vstart)
+#define csio_q_pstart(__hw, __idx) ((__hw)->wrm.q_arr[(__idx)]->pstart)
+#define csio_q_size(__hw, __idx) ((__hw)->wrm.q_arr[(__idx)]->size)
+#define csio_q_credits(__hw, __idx) ((__hw)->wrm.q_arr[(__idx)]->credits)
+#define csio_q_portid(__hw, __idx) ((__hw)->wrm.q_arr[(__idx)]->portid)
+#define csio_q_wr_sz(__hw, __idx) ((__hw)->wrm.q_arr[(__idx)]->wr_sz)
+#define csio_q_iqid(__hw, __idx) ((__hw)->wrm.q_arr[(__idx)]->un.iq.iqid)
+#define csio_q_physiqid(__hw, __idx) \
+ ((__hw)->wrm.q_arr[(__idx)]->un.iq.physiqid)
+#define csio_q_iq_flq_idx(__hw, __idx) \
+ ((__hw)->wrm.q_arr[(__idx)]->un.iq.flq_idx)
+#define csio_q_eqid(__hw, __idx) ((__hw)->wrm.q_arr[(__idx)]->un.eq.eqid)
+#define csio_q_flid(__hw, __idx) ((__hw)->wrm.q_arr[(__idx)]->un.fl.flid)
+
+#define csio_q_physeqid(__hw, __idx) \
+ ((__hw)->wrm.q_arr[(__idx)]->un.eq.physeqid)
+#define csio_iq_has_fl(__iq) ((__iq)->un.iq.flq_idx != -1)
+
+#define csio_q_iq_to_flid(__hw, __iq_idx) \
+ csio_q_flid((__hw), (__hw)->wrm.q_arr[(__iq_qidx)]->un.iq.flq_idx)
+#define csio_q_set_intr_map(__hw, __iq_idx, __rel_iq_id) \
+ (__hw)->wrm.intr_map[__rel_iq_id] = csio_get_q(__hw, __iq_idx)
+#define csio_q_eq_wrap(__hw, __idx) ((__hw)->wrm.q_arr[(__idx)]->un.eq.wrap)
+
+struct csio_mb;
+
+int csio_wr_alloc_q(struct csio_hw *, uint32_t, uint32_t,
+ uint16_t, void *, uint32_t, int, iq_handler_t);
+int csio_wr_iq_create(struct csio_hw *, void *, int,
+ uint32_t, uint8_t, bool,
+ void (*)(struct csio_hw *, struct csio_mb *));
+int csio_wr_eq_create(struct csio_hw *, void *, int, int, uint8_t,
+ void (*)(struct csio_hw *, struct csio_mb *));
+int csio_wr_destroy_queues(struct csio_hw *, bool cmd);
+
+
+int csio_wr_get(struct csio_hw *, int, uint32_t,
+ struct csio_wr_pair *);
+void csio_wr_copy_to_wrp(void *, struct csio_wr_pair *, uint32_t, uint32_t);
+int csio_wr_issue(struct csio_hw *, int, bool);
+int csio_wr_process_iq(struct csio_hw *, struct csio_q *,
+ void (*)(struct csio_hw *, void *,
+ uint32_t, struct csio_fl_dma_buf *,
+ void *),
+ void *);
+int csio_wr_process_iq_idx(struct csio_hw *, int,
+ void (*)(struct csio_hw *, void *,
+ uint32_t, struct csio_fl_dma_buf *,
+ void *),
+ void *);
+
+void csio_wr_sge_init(struct csio_hw *);
+int csio_wrm_init(struct csio_wrm *, struct csio_hw *);
+void csio_wrm_exit(struct csio_wrm *, struct csio_hw *);
+
+#endif /* ifndef __CSIO_WR_H__ */
diff --git a/drivers/scsi/csiostor/t4fw_api_stor.h b/drivers/scsi/csiostor/t4fw_api_stor.h
new file mode 100644
index 000000000..097e52c0f
--- /dev/null
+++ b/drivers/scsi/csiostor/t4fw_api_stor.h
@@ -0,0 +1,539 @@
+/*
+ * This file is part of the Chelsio FCoE driver for Linux.
+ *
+ * Copyright (c) 2009-2010 Chelsio Communications, Inc. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#ifndef _T4FW_API_STOR_H_
+#define _T4FW_API_STOR_H_
+
+
+/******************************************************************************
+ * R E T U R N V A L U E S
+ ********************************/
+
+enum fw_fcoe_link_sub_op {
+ FCOE_LINK_DOWN = 0x0,
+ FCOE_LINK_UP = 0x1,
+ FCOE_LINK_COND = 0x2,
+};
+
+enum fw_fcoe_link_status {
+ FCOE_LINKDOWN = 0x0,
+ FCOE_LINKUP = 0x1,
+};
+
+enum fw_ofld_prot {
+ PROT_FCOE = 0x1,
+ PROT_ISCSI = 0x2,
+};
+
+enum rport_type_fcoe {
+ FLOGI_VFPORT = 0x1, /* 0xfffffe */
+ FDISC_VFPORT = 0x2, /* 0xfffffe */
+ NS_VNPORT = 0x3, /* 0xfffffc */
+ REG_FC4_VNPORT = 0x4, /* any FC4 type VN_PORT */
+ REG_VNPORT = 0x5, /* 0xfffxxx - non FC4 port in switch */
+ FDMI_VNPORT = 0x6, /* 0xfffffa */
+ FAB_CTLR_VNPORT = 0x7, /* 0xfffffd */
+};
+
+enum event_cause_fcoe {
+ PLOGI_ACC_RCVD = 0x01,
+ PLOGI_RJT_RCVD = 0x02,
+ PLOGI_RCVD = 0x03,
+ PLOGO_RCVD = 0x04,
+ PRLI_ACC_RCVD = 0x05,
+ PRLI_RJT_RCVD = 0x06,
+ PRLI_RCVD = 0x07,
+ PRLO_RCVD = 0x08,
+ NPORT_ID_CHGD = 0x09,
+ FLOGO_RCVD = 0x0a,
+ CLR_VIRT_LNK_RCVD = 0x0b,
+ FLOGI_ACC_RCVD = 0x0c,
+ FLOGI_RJT_RCVD = 0x0d,
+ FDISC_ACC_RCVD = 0x0e,
+ FDISC_RJT_RCVD = 0x0f,
+ FLOGI_TMO_MAX_RETRY = 0x10,
+ IMPL_LOGO_ADISC_ACC = 0x11,
+ IMPL_LOGO_ADISC_RJT = 0x12,
+ IMPL_LOGO_ADISC_CNFLT = 0x13,
+ PRLI_TMO = 0x14,
+ ADISC_TMO = 0x15,
+ RSCN_DEV_LOST = 0x16,
+ SCR_ACC_RCVD = 0x17,
+ ADISC_RJT_RCVD = 0x18,
+ LOGO_SNT = 0x19,
+ PROTO_ERR_IMPL_LOGO = 0x1a,
+};
+
+enum fcoe_cmn_type {
+ FCOE_ELS,
+ FCOE_CT,
+ FCOE_SCSI_CMD,
+ FCOE_UNSOL_ELS,
+};
+
+enum fw_wr_stor_opcodes {
+ FW_RDEV_WR = 0x38,
+ FW_FCOE_ELS_CT_WR = 0x30,
+ FW_SCSI_WRITE_WR = 0x31,
+ FW_SCSI_READ_WR = 0x32,
+ FW_SCSI_CMD_WR = 0x33,
+ FW_SCSI_ABRT_CLS_WR = 0x34,
+};
+
+struct fw_rdev_wr {
+ __be32 op_to_immdlen;
+ __be32 alloc_to_len16;
+ __be64 cookie;
+ u8 protocol;
+ u8 event_cause;
+ u8 cur_state;
+ u8 prev_state;
+ __be32 flags_to_assoc_flowid;
+ union rdev_entry {
+ struct fcoe_rdev_entry {
+ __be32 flowid;
+ u8 protocol;
+ u8 event_cause;
+ u8 flags;
+ u8 rjt_reason;
+ u8 cur_login_st;
+ u8 prev_login_st;
+ __be16 rcv_fr_sz;
+ u8 rd_xfer_rdy_to_rport_type;
+ u8 vft_to_qos;
+ u8 org_proc_assoc_to_acc_rsp_code;
+ u8 enh_disc_to_tgt;
+ u8 wwnn[8];
+ u8 wwpn[8];
+ __be16 iqid;
+ u8 fc_oui[3];
+ u8 r_id[3];
+ } fcoe_rdev;
+ struct iscsi_rdev_entry {
+ __be32 flowid;
+ u8 protocol;
+ u8 event_cause;
+ u8 flags;
+ u8 r3;
+ __be16 iscsi_opts;
+ __be16 tcp_opts;
+ __be16 ip_opts;
+ __be16 max_rcv_len;
+ __be16 max_snd_len;
+ __be16 first_brst_len;
+ __be16 max_brst_len;
+ __be16 r4;
+ __be16 def_time2wait;
+ __be16 def_time2ret;
+ __be16 nop_out_intrvl;
+ __be16 non_scsi_to;
+ __be16 isid;
+ __be16 tsid;
+ __be16 port;
+ __be16 tpgt;
+ u8 r5[6];
+ __be16 iqid;
+ } iscsi_rdev;
+ } u;
+};
+
+#define FW_RDEV_WR_FLOWID_GET(x) (((x) >> 8) & 0xfffff)
+#define FW_RDEV_WR_ASSOC_FLOWID_GET(x) (((x) >> 0) & 0xfffff)
+#define FW_RDEV_WR_RPORT_TYPE_GET(x) (((x) >> 0) & 0x1f)
+#define FW_RDEV_WR_NPIV_GET(x) (((x) >> 6) & 0x1)
+#define FW_RDEV_WR_CLASS_GET(x) (((x) >> 4) & 0x3)
+#define FW_RDEV_WR_TASK_RETRY_ID_GET(x) (((x) >> 5) & 0x1)
+#define FW_RDEV_WR_RETRY_GET(x) (((x) >> 4) & 0x1)
+#define FW_RDEV_WR_CONF_CMPL_GET(x) (((x) >> 3) & 0x1)
+#define FW_RDEV_WR_INI_GET(x) (((x) >> 1) & 0x1)
+#define FW_RDEV_WR_TGT_GET(x) (((x) >> 0) & 0x1)
+
+struct fw_fcoe_els_ct_wr {
+ __be32 op_immdlen;
+ __be32 flowid_len16;
+ u64 cookie;
+ __be16 iqid;
+ u8 tmo_val;
+ u8 els_ct_type;
+ u8 ctl_pri;
+ u8 cp_en_class;
+ __be16 xfer_cnt;
+ u8 fl_to_sp;
+ u8 l_id[3];
+ u8 r5;
+ u8 r_id[3];
+ __be64 rsp_dmaaddr;
+ __be32 rsp_dmalen;
+ __be32 r6;
+};
+
+#define FW_FCOE_ELS_CT_WR_OPCODE(x) ((x) << 24)
+#define FW_FCOE_ELS_CT_WR_OPCODE_GET(x) (((x) >> 24) & 0xff)
+#define FW_FCOE_ELS_CT_WR_IMMDLEN(x) ((x) << 0)
+#define FW_FCOE_ELS_CT_WR_IMMDLEN_GET(x) (((x) >> 0) & 0xff)
+#define FW_FCOE_ELS_CT_WR_SP(x) ((x) << 0)
+
+struct fw_scsi_write_wr {
+ __be32 op_immdlen;
+ __be32 flowid_len16;
+ u64 cookie;
+ __be16 iqid;
+ u8 tmo_val;
+ u8 use_xfer_cnt;
+ union fw_scsi_write_priv {
+ struct fcoe_write_priv {
+ u8 ctl_pri;
+ u8 cp_en_class;
+ u8 r3_lo[2];
+ } fcoe;
+ struct iscsi_write_priv {
+ u8 r3[4];
+ } iscsi;
+ } u;
+ __be32 xfer_cnt;
+ __be32 ini_xfer_cnt;
+ __be64 rsp_dmaaddr;
+ __be32 rsp_dmalen;
+ __be32 r4;
+};
+
+#define FW_SCSI_WRITE_WR_IMMDLEN(x) ((x) << 0)
+
+struct fw_scsi_read_wr {
+ __be32 op_immdlen;
+ __be32 flowid_len16;
+ u64 cookie;
+ __be16 iqid;
+ u8 tmo_val;
+ u8 use_xfer_cnt;
+ union fw_scsi_read_priv {
+ struct fcoe_read_priv {
+ u8 ctl_pri;
+ u8 cp_en_class;
+ u8 r3_lo[2];
+ } fcoe;
+ struct iscsi_read_priv {
+ u8 r3[4];
+ } iscsi;
+ } u;
+ __be32 xfer_cnt;
+ __be32 ini_xfer_cnt;
+ __be64 rsp_dmaaddr;
+ __be32 rsp_dmalen;
+ __be32 r4;
+};
+
+#define FW_SCSI_READ_WR_IMMDLEN(x) ((x) << 0)
+
+struct fw_scsi_cmd_wr {
+ __be32 op_immdlen;
+ __be32 flowid_len16;
+ u64 cookie;
+ __be16 iqid;
+ u8 tmo_val;
+ u8 r3;
+ union fw_scsi_cmd_priv {
+ struct fcoe_cmd_priv {
+ u8 ctl_pri;
+ u8 cp_en_class;
+ u8 r4_lo[2];
+ } fcoe;
+ struct iscsi_cmd_priv {
+ u8 r4[4];
+ } iscsi;
+ } u;
+ u8 r5[8];
+ __be64 rsp_dmaaddr;
+ __be32 rsp_dmalen;
+ __be32 r6;
+};
+
+#define FW_SCSI_CMD_WR_IMMDLEN(x) ((x) << 0)
+
+#define SCSI_ABORT 0
+#define SCSI_CLOSE 1
+
+struct fw_scsi_abrt_cls_wr {
+ __be32 op_immdlen;
+ __be32 flowid_len16;
+ u64 cookie;
+ __be16 iqid;
+ u8 tmo_val;
+ u8 sub_opcode_to_chk_all_io;
+ u8 r3[4];
+ u64 t_cookie;
+};
+
+#define FW_SCSI_ABRT_CLS_WR_SUB_OPCODE(x) ((x) << 2)
+#define FW_SCSI_ABRT_CLS_WR_SUB_OPCODE_GET(x) (((x) >> 2) & 0x3f)
+#define FW_SCSI_ABRT_CLS_WR_CHK_ALL_IO(x) ((x) << 0)
+
+enum fw_cmd_stor_opcodes {
+ FW_FCOE_RES_INFO_CMD = 0x31,
+ FW_FCOE_LINK_CMD = 0x32,
+ FW_FCOE_VNP_CMD = 0x33,
+ FW_FCOE_SPARAMS_CMD = 0x35,
+ FW_FCOE_STATS_CMD = 0x37,
+ FW_FCOE_FCF_CMD = 0x38,
+};
+
+struct fw_fcoe_res_info_cmd {
+ __be32 op_to_read;
+ __be32 retval_len16;
+ __be16 e_d_tov;
+ __be16 r_a_tov_seq;
+ __be16 r_a_tov_els;
+ __be16 r_r_tov;
+ __be32 max_xchgs;
+ __be32 max_ssns;
+ __be32 used_xchgs;
+ __be32 used_ssns;
+ __be32 max_fcfs;
+ __be32 max_vnps;
+ __be32 used_fcfs;
+ __be32 used_vnps;
+};
+
+struct fw_fcoe_link_cmd {
+ __be32 op_to_portid;
+ __be32 retval_len16;
+ __be32 sub_opcode_fcfi;
+ u8 r3;
+ u8 lstatus;
+ __be16 flags;
+ u8 r4;
+ u8 set_vlan;
+ __be16 vlan_id;
+ __be32 vnpi_pkd;
+ __be16 r6;
+ u8 phy_mac[6];
+ u8 vnport_wwnn[8];
+ u8 vnport_wwpn[8];
+};
+
+#define FW_FCOE_LINK_CMD_PORTID(x) ((x) << 0)
+#define FW_FCOE_LINK_CMD_PORTID_GET(x) (((x) >> 0) & 0xf)
+#define FW_FCOE_LINK_CMD_SUB_OPCODE(x) ((x) << 24U)
+#define FW_FCOE_LINK_CMD_FCFI(x) ((x) << 0)
+#define FW_FCOE_LINK_CMD_FCFI_GET(x) (((x) >> 0) & 0xffffff)
+#define FW_FCOE_LINK_CMD_VNPI_GET(x) (((x) >> 0) & 0xfffff)
+
+struct fw_fcoe_vnp_cmd {
+ __be32 op_to_fcfi;
+ __be32 alloc_to_len16;
+ __be32 gen_wwn_to_vnpi;
+ __be32 vf_id;
+ __be16 iqid;
+ u8 vnport_mac[6];
+ u8 vnport_wwnn[8];
+ u8 vnport_wwpn[8];
+ u8 cmn_srv_parms[16];
+ u8 clsp_word_0_1[8];
+};
+
+#define FW_FCOE_VNP_CMD_FCFI(x) ((x) << 0)
+#define FW_FCOE_VNP_CMD_ALLOC (1U << 31)
+#define FW_FCOE_VNP_CMD_FREE (1U << 30)
+#define FW_FCOE_VNP_CMD_MODIFY (1U << 29)
+#define FW_FCOE_VNP_CMD_GEN_WWN (1U << 22)
+#define FW_FCOE_VNP_CMD_VFID_EN (1U << 20)
+#define FW_FCOE_VNP_CMD_VNPI(x) ((x) << 0)
+#define FW_FCOE_VNP_CMD_VNPI_GET(x) (((x) >> 0) & 0xfffff)
+
+struct fw_fcoe_sparams_cmd {
+ __be32 op_to_portid;
+ __be32 retval_len16;
+ u8 r3[7];
+ u8 cos;
+ u8 lport_wwnn[8];
+ u8 lport_wwpn[8];
+ u8 cmn_srv_parms[16];
+ u8 cls_srv_parms[16];
+};
+
+#define FW_FCOE_SPARAMS_CMD_PORTID(x) ((x) << 0)
+
+struct fw_fcoe_stats_cmd {
+ __be32 op_to_flowid;
+ __be32 free_to_len16;
+ union fw_fcoe_stats {
+ struct fw_fcoe_stats_ctl {
+ u8 nstats_port;
+ u8 port_valid_ix;
+ __be16 r6;
+ __be32 r7;
+ __be64 stat0;
+ __be64 stat1;
+ __be64 stat2;
+ __be64 stat3;
+ __be64 stat4;
+ __be64 stat5;
+ } ctl;
+ struct fw_fcoe_port_stats {
+ __be64 tx_bcast_bytes;
+ __be64 tx_bcast_frames;
+ __be64 tx_mcast_bytes;
+ __be64 tx_mcast_frames;
+ __be64 tx_ucast_bytes;
+ __be64 tx_ucast_frames;
+ __be64 tx_drop_frames;
+ __be64 tx_offload_bytes;
+ __be64 tx_offload_frames;
+ __be64 rx_bcast_bytes;
+ __be64 rx_bcast_frames;
+ __be64 rx_mcast_bytes;
+ __be64 rx_mcast_frames;
+ __be64 rx_ucast_bytes;
+ __be64 rx_ucast_frames;
+ __be64 rx_err_frames;
+ } port_stats;
+ struct fw_fcoe_fcf_stats {
+ __be32 fip_tx_bytes;
+ __be32 fip_tx_fr;
+ __be64 fcf_ka;
+ __be64 mcast_adv_rcvd;
+ __be16 ucast_adv_rcvd;
+ __be16 sol_sent;
+ __be16 vlan_req;
+ __be16 vlan_rpl;
+ __be16 clr_vlink;
+ __be16 link_down;
+ __be16 link_up;
+ __be16 logo;
+ __be16 flogi_req;
+ __be16 flogi_rpl;
+ __be16 fdisc_req;
+ __be16 fdisc_rpl;
+ __be16 fka_prd_chg;
+ __be16 fc_map_chg;
+ __be16 vfid_chg;
+ u8 no_fka_req;
+ u8 no_vnp;
+ } fcf_stats;
+ struct fw_fcoe_pcb_stats {
+ __be64 tx_bytes;
+ __be64 tx_frames;
+ __be64 rx_bytes;
+ __be64 rx_frames;
+ __be32 vnp_ka;
+ __be32 unsol_els_rcvd;
+ __be64 unsol_cmd_rcvd;
+ __be16 implicit_logo;
+ __be16 flogi_inv_sparm;
+ __be16 fdisc_inv_sparm;
+ __be16 flogi_rjt;
+ __be16 fdisc_rjt;
+ __be16 no_ssn;
+ __be16 mac_flt_fail;
+ __be16 inv_fr_rcvd;
+ } pcb_stats;
+ struct fw_fcoe_scb_stats {
+ __be64 tx_bytes;
+ __be64 tx_frames;
+ __be64 rx_bytes;
+ __be64 rx_frames;
+ __be32 host_abrt_req;
+ __be32 adap_auto_abrt;
+ __be32 adap_abrt_rsp;
+ __be32 host_ios_req;
+ __be16 ssn_offl_ios;
+ __be16 ssn_not_rdy_ios;
+ u8 rx_data_ddp_err;
+ u8 ddp_flt_set_err;
+ __be16 rx_data_fr_err;
+ u8 bad_st_abrt_req;
+ u8 no_io_abrt_req;
+ u8 abort_tmo;
+ u8 abort_tmo_2;
+ __be32 abort_req;
+ u8 no_ppod_res_tmo;
+ u8 bp_tmo;
+ u8 adap_auto_cls;
+ u8 no_io_cls_req;
+ __be32 host_cls_req;
+ __be64 unsol_cmd_rcvd;
+ __be32 plogi_req_rcvd;
+ __be32 prli_req_rcvd;
+ __be16 logo_req_rcvd;
+ __be16 prlo_req_rcvd;
+ __be16 plogi_rjt_rcvd;
+ __be16 prli_rjt_rcvd;
+ __be32 adisc_req_rcvd;
+ __be32 rscn_rcvd;
+ __be32 rrq_req_rcvd;
+ __be32 unsol_els_rcvd;
+ u8 adisc_rjt_rcvd;
+ u8 scr_rjt;
+ u8 ct_rjt;
+ u8 inval_bls_rcvd;
+ __be32 ba_rjt_rcvd;
+ } scb_stats;
+ } u;
+};
+
+#define FW_FCOE_STATS_CMD_FLOWID(x) ((x) << 0)
+#define FW_FCOE_STATS_CMD_FREE (1U << 30)
+#define FW_FCOE_STATS_CMD_NSTATS(x) ((x) << 4)
+#define FW_FCOE_STATS_CMD_PORT(x) ((x) << 0)
+#define FW_FCOE_STATS_CMD_PORT_VALID (1U << 7)
+#define FW_FCOE_STATS_CMD_IX(x) ((x) << 0)
+
+struct fw_fcoe_fcf_cmd {
+ __be32 op_to_fcfi;
+ __be32 retval_len16;
+ __be16 priority_pkd;
+ u8 mac[6];
+ u8 name_id[8];
+ u8 fabric[8];
+ __be16 vf_id;
+ __be16 max_fcoe_size;
+ u8 vlan_id;
+ u8 fc_map[3];
+ __be32 fka_adv;
+ __be32 r6;
+ u8 r7_hi;
+ u8 fpma_to_portid;
+ u8 spma_mac[6];
+ __be64 r8;
+};
+
+#define FW_FCOE_FCF_CMD_FCFI(x) ((x) << 0)
+#define FW_FCOE_FCF_CMD_FCFI_GET(x) (((x) >> 0) & 0xfffff)
+#define FW_FCOE_FCF_CMD_PRIORITY_GET(x) (((x) >> 0) & 0xff)
+#define FW_FCOE_FCF_CMD_FPMA_GET(x) (((x) >> 6) & 0x1)
+#define FW_FCOE_FCF_CMD_SPMA_GET(x) (((x) >> 5) & 0x1)
+#define FW_FCOE_FCF_CMD_LOGIN_GET(x) (((x) >> 4) & 0x1)
+#define FW_FCOE_FCF_CMD_PORTID_GET(x) (((x) >> 0) & 0xf)
+
+#endif /* _T4FW_API_STOR_H_ */
diff --git a/drivers/scsi/cxgbi/Kconfig b/drivers/scsi/cxgbi/Kconfig
new file mode 100644
index 000000000..17eb5d522
--- /dev/null
+++ b/drivers/scsi/cxgbi/Kconfig
@@ -0,0 +1,2 @@
+source "drivers/scsi/cxgbi/cxgb3i/Kconfig"
+source "drivers/scsi/cxgbi/cxgb4i/Kconfig"
diff --git a/drivers/scsi/cxgbi/Makefile b/drivers/scsi/cxgbi/Makefile
new file mode 100644
index 000000000..86007e344
--- /dev/null
+++ b/drivers/scsi/cxgbi/Makefile
@@ -0,0 +1,2 @@
+obj-$(CONFIG_SCSI_CXGB3_ISCSI) += libcxgbi.o cxgb3i/
+obj-$(CONFIG_SCSI_CXGB4_ISCSI) += libcxgbi.o cxgb4i/
diff --git a/drivers/scsi/cxgbi/cxgb3i/Kbuild b/drivers/scsi/cxgbi/cxgb3i/Kbuild
new file mode 100644
index 000000000..6f095e28a
--- /dev/null
+++ b/drivers/scsi/cxgbi/cxgb3i/Kbuild
@@ -0,0 +1,3 @@
+EXTRA_CFLAGS += -I$(srctree)/drivers/net/ethernet/chelsio/cxgb3
+
+obj-$(CONFIG_SCSI_CXGB3_ISCSI) += cxgb3i.o
diff --git a/drivers/scsi/cxgbi/cxgb3i/Kconfig b/drivers/scsi/cxgbi/cxgb3i/Kconfig
new file mode 100644
index 000000000..e4603985d
--- /dev/null
+++ b/drivers/scsi/cxgbi/cxgb3i/Kconfig
@@ -0,0 +1,10 @@
+config SCSI_CXGB3_ISCSI
+ tristate "Chelsio T3 iSCSI support"
+ depends on PCI && INET && (IPV6 || IPV6=n)
+ select NETDEVICES
+ select ETHERNET
+ select NET_VENDOR_CHELSIO
+ select CHELSIO_T3
+ select SCSI_ISCSI_ATTRS
+ ---help---
+ This driver supports iSCSI offload for the Chelsio T3 devices.
diff --git a/drivers/scsi/cxgbi/cxgb3i/cxgb3i.c b/drivers/scsi/cxgbi/cxgb3i/cxgb3i.c
new file mode 100644
index 000000000..3db4c6397
--- /dev/null
+++ b/drivers/scsi/cxgbi/cxgb3i/cxgb3i.c
@@ -0,0 +1,1414 @@
+/*
+ * cxgb3i_offload.c: Chelsio S3xx iscsi offloaded tcp connection management
+ *
+ * Copyright (C) 2003-2008 Chelsio Communications. All rights reserved.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the LICENSE file included in this
+ * release for licensing terms and conditions.
+ *
+ * Written by: Dimitris Michailidis (dm@chelsio.com)
+ * Karen Xie (kxie@chelsio.com)
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ":%s: " fmt, __func__
+
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <scsi/scsi_host.h>
+
+#include "common.h"
+#include "t3_cpl.h"
+#include "t3cdev.h"
+#include "cxgb3_defs.h"
+#include "cxgb3_ctl_defs.h"
+#include "cxgb3_offload.h"
+#include "firmware_exports.h"
+#include "cxgb3i.h"
+
+static unsigned int dbg_level;
+#include "../libcxgbi.h"
+
+#define DRV_MODULE_NAME "cxgb3i"
+#define DRV_MODULE_DESC "Chelsio T3 iSCSI Driver"
+#define DRV_MODULE_VERSION "2.0.0"
+#define DRV_MODULE_RELDATE "Jun. 2010"
+
+static char version[] =
+ DRV_MODULE_DESC " " DRV_MODULE_NAME
+ " v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
+
+MODULE_AUTHOR("Chelsio Communications, Inc.");
+MODULE_DESCRIPTION(DRV_MODULE_DESC);
+MODULE_VERSION(DRV_MODULE_VERSION);
+MODULE_LICENSE("GPL");
+
+module_param(dbg_level, uint, 0644);
+MODULE_PARM_DESC(dbg_level, "debug flag (default=0)");
+
+static int cxgb3i_rcv_win = 256 * 1024;
+module_param(cxgb3i_rcv_win, int, 0644);
+MODULE_PARM_DESC(cxgb3i_rcv_win, "TCP receive window in bytes (default=256KB)");
+
+static int cxgb3i_snd_win = 128 * 1024;
+module_param(cxgb3i_snd_win, int, 0644);
+MODULE_PARM_DESC(cxgb3i_snd_win, "TCP send window in bytes (default=128KB)");
+
+static int cxgb3i_rx_credit_thres = 10 * 1024;
+module_param(cxgb3i_rx_credit_thres, int, 0644);
+MODULE_PARM_DESC(rx_credit_thres,
+ "RX credits return threshold in bytes (default=10KB)");
+
+static unsigned int cxgb3i_max_connect = 8 * 1024;
+module_param(cxgb3i_max_connect, uint, 0644);
+MODULE_PARM_DESC(cxgb3i_max_connect, "Max. # of connections (default=8092)");
+
+static unsigned int cxgb3i_sport_base = 20000;
+module_param(cxgb3i_sport_base, uint, 0644);
+MODULE_PARM_DESC(cxgb3i_sport_base, "starting port number (default=20000)");
+
+static void cxgb3i_dev_open(struct t3cdev *);
+static void cxgb3i_dev_close(struct t3cdev *);
+static void cxgb3i_dev_event_handler(struct t3cdev *, u32, u32);
+
+static struct cxgb3_client t3_client = {
+ .name = DRV_MODULE_NAME,
+ .handlers = cxgb3i_cpl_handlers,
+ .add = cxgb3i_dev_open,
+ .remove = cxgb3i_dev_close,
+ .event_handler = cxgb3i_dev_event_handler,
+};
+
+static struct scsi_host_template cxgb3i_host_template = {
+ .module = THIS_MODULE,
+ .name = DRV_MODULE_NAME,
+ .proc_name = DRV_MODULE_NAME,
+ .can_queue = CXGB3I_SCSI_HOST_QDEPTH,
+ .queuecommand = iscsi_queuecommand,
+ .change_queue_depth = scsi_change_queue_depth,
+ .sg_tablesize = SG_ALL,
+ .max_sectors = 0xFFFF,
+ .cmd_per_lun = ISCSI_DEF_CMD_PER_LUN,
+ .eh_abort_handler = iscsi_eh_abort,
+ .eh_device_reset_handler = iscsi_eh_device_reset,
+ .eh_target_reset_handler = iscsi_eh_recover_target,
+ .target_alloc = iscsi_target_alloc,
+ .use_clustering = DISABLE_CLUSTERING,
+ .this_id = -1,
+ .track_queue_depth = 1,
+};
+
+static struct iscsi_transport cxgb3i_iscsi_transport = {
+ .owner = THIS_MODULE,
+ .name = DRV_MODULE_NAME,
+ /* owner and name should be set already */
+ .caps = CAP_RECOVERY_L0 | CAP_MULTI_R2T | CAP_HDRDGST
+ | CAP_DATADGST | CAP_DIGEST_OFFLOAD |
+ CAP_PADDING_OFFLOAD | CAP_TEXT_NEGO,
+ .attr_is_visible = cxgbi_attr_is_visible,
+ .get_host_param = cxgbi_get_host_param,
+ .set_host_param = cxgbi_set_host_param,
+ /* session management */
+ .create_session = cxgbi_create_session,
+ .destroy_session = cxgbi_destroy_session,
+ .get_session_param = iscsi_session_get_param,
+ /* connection management */
+ .create_conn = cxgbi_create_conn,
+ .bind_conn = cxgbi_bind_conn,
+ .destroy_conn = iscsi_tcp_conn_teardown,
+ .start_conn = iscsi_conn_start,
+ .stop_conn = iscsi_conn_stop,
+ .get_conn_param = iscsi_conn_get_param,
+ .set_param = cxgbi_set_conn_param,
+ .get_stats = cxgbi_get_conn_stats,
+ /* pdu xmit req from user space */
+ .send_pdu = iscsi_conn_send_pdu,
+ /* task */
+ .init_task = iscsi_tcp_task_init,
+ .xmit_task = iscsi_tcp_task_xmit,
+ .cleanup_task = cxgbi_cleanup_task,
+ /* pdu */
+ .alloc_pdu = cxgbi_conn_alloc_pdu,
+ .init_pdu = cxgbi_conn_init_pdu,
+ .xmit_pdu = cxgbi_conn_xmit_pdu,
+ .parse_pdu_itt = cxgbi_parse_pdu_itt,
+ /* TCP connect/disconnect */
+ .get_ep_param = cxgbi_get_ep_param,
+ .ep_connect = cxgbi_ep_connect,
+ .ep_poll = cxgbi_ep_poll,
+ .ep_disconnect = cxgbi_ep_disconnect,
+ /* Error recovery timeout call */
+ .session_recovery_timedout = iscsi_session_recovery_timedout,
+};
+
+static struct scsi_transport_template *cxgb3i_stt;
+
+/*
+ * CPL (Chelsio Protocol Language) defines a message passing interface between
+ * the host driver and Chelsio asic.
+ * The section below implments CPLs that related to iscsi tcp connection
+ * open/close/abort and data send/receive.
+ */
+
+static int push_tx_frames(struct cxgbi_sock *csk, int req_completion);
+
+static void send_act_open_req(struct cxgbi_sock *csk, struct sk_buff *skb,
+ const struct l2t_entry *e)
+{
+ unsigned int wscale = cxgbi_sock_compute_wscale(cxgb3i_rcv_win);
+ struct cpl_act_open_req *req = (struct cpl_act_open_req *)skb->head;
+
+ skb->priority = CPL_PRIORITY_SETUP;
+
+ req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD));
+ OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_ACT_OPEN_REQ, csk->atid));
+ req->local_port = csk->saddr.sin_port;
+ req->peer_port = csk->daddr.sin_port;
+ req->local_ip = csk->saddr.sin_addr.s_addr;
+ req->peer_ip = csk->daddr.sin_addr.s_addr;
+
+ req->opt0h = htonl(V_KEEP_ALIVE(1) | F_TCAM_BYPASS |
+ V_WND_SCALE(wscale) | V_MSS_IDX(csk->mss_idx) |
+ V_L2T_IDX(e->idx) | V_TX_CHANNEL(e->smt_idx));
+ req->opt0l = htonl(V_ULP_MODE(ULP2_MODE_ISCSI) |
+ V_RCV_BUFSIZ(cxgb3i_rcv_win>>10));
+
+ log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK,
+ "csk 0x%p,%u,0x%lx,%u, %pI4:%u-%pI4:%u, %u,%u,%u.\n",
+ csk, csk->state, csk->flags, csk->atid,
+ &req->local_ip, ntohs(req->local_port),
+ &req->peer_ip, ntohs(req->peer_port),
+ csk->mss_idx, e->idx, e->smt_idx);
+
+ l2t_send(csk->cdev->lldev, skb, csk->l2t);
+}
+
+static inline void act_open_arp_failure(struct t3cdev *dev, struct sk_buff *skb)
+{
+ cxgbi_sock_act_open_req_arp_failure(NULL, skb);
+}
+
+/*
+ * CPL connection close request: host ->
+ *
+ * Close a connection by sending a CPL_CLOSE_CON_REQ message and queue it to
+ * the write queue (i.e., after any unsent txt data).
+ */
+static void send_close_req(struct cxgbi_sock *csk)
+{
+ struct sk_buff *skb = csk->cpl_close;
+ struct cpl_close_con_req *req = (struct cpl_close_con_req *)skb->head;
+ unsigned int tid = csk->tid;
+
+ log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK,
+ "csk 0x%p,%u,0x%lx,%u.\n",
+ csk, csk->state, csk->flags, csk->tid);
+
+ csk->cpl_close = NULL;
+ req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_OFLD_CLOSE_CON));
+ req->wr.wr_lo = htonl(V_WR_TID(tid));
+ OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_CLOSE_CON_REQ, tid));
+ req->rsvd = htonl(csk->write_seq);
+
+ cxgbi_sock_skb_entail(csk, skb);
+ if (csk->state >= CTP_ESTABLISHED)
+ push_tx_frames(csk, 1);
+}
+
+/*
+ * CPL connection abort request: host ->
+ *
+ * Send an ABORT_REQ message. Makes sure we do not send multiple ABORT_REQs
+ * for the same connection and also that we do not try to send a message
+ * after the connection has closed.
+ */
+static void abort_arp_failure(struct t3cdev *tdev, struct sk_buff *skb)
+{
+ struct cpl_abort_req *req = cplhdr(skb);
+
+ log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK,
+ "t3dev 0x%p, tid %u, skb 0x%p.\n",
+ tdev, GET_TID(req), skb);
+ req->cmd = CPL_ABORT_NO_RST;
+ cxgb3_ofld_send(tdev, skb);
+}
+
+static void send_abort_req(struct cxgbi_sock *csk)
+{
+ struct sk_buff *skb = csk->cpl_abort_req;
+ struct cpl_abort_req *req;
+
+ if (unlikely(csk->state == CTP_ABORTING || !skb))
+ return;
+ cxgbi_sock_set_state(csk, CTP_ABORTING);
+ cxgbi_sock_set_flag(csk, CTPF_ABORT_RPL_PENDING);
+ /* Purge the send queue so we don't send anything after an abort. */
+ cxgbi_sock_purge_write_queue(csk);
+
+ csk->cpl_abort_req = NULL;
+ req = (struct cpl_abort_req *)skb->head;
+ skb->priority = CPL_PRIORITY_DATA;
+ set_arp_failure_handler(skb, abort_arp_failure);
+ req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_OFLD_HOST_ABORT_CON_REQ));
+ req->wr.wr_lo = htonl(V_WR_TID(csk->tid));
+ OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_ABORT_REQ, csk->tid));
+ req->rsvd0 = htonl(csk->snd_nxt);
+ req->rsvd1 = !cxgbi_sock_flag(csk, CTPF_TX_DATA_SENT);
+ req->cmd = CPL_ABORT_SEND_RST;
+
+ log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK,
+ "csk 0x%p,%u,0x%lx,%u, snd_nxt %u, 0x%x.\n",
+ csk, csk->state, csk->flags, csk->tid, csk->snd_nxt,
+ req->rsvd1);
+
+ l2t_send(csk->cdev->lldev, skb, csk->l2t);
+}
+
+/*
+ * CPL connection abort reply: host ->
+ *
+ * Send an ABORT_RPL message in response of the ABORT_REQ received.
+ */
+static void send_abort_rpl(struct cxgbi_sock *csk, int rst_status)
+{
+ struct sk_buff *skb = csk->cpl_abort_rpl;
+ struct cpl_abort_rpl *rpl = (struct cpl_abort_rpl *)skb->head;
+
+ log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK,
+ "csk 0x%p,%u,0x%lx,%u, status %d.\n",
+ csk, csk->state, csk->flags, csk->tid, rst_status);
+
+ csk->cpl_abort_rpl = NULL;
+ skb->priority = CPL_PRIORITY_DATA;
+ rpl->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_OFLD_HOST_ABORT_CON_RPL));
+ rpl->wr.wr_lo = htonl(V_WR_TID(csk->tid));
+ OPCODE_TID(rpl) = htonl(MK_OPCODE_TID(CPL_ABORT_RPL, csk->tid));
+ rpl->cmd = rst_status;
+ cxgb3_ofld_send(csk->cdev->lldev, skb);
+}
+
+/*
+ * CPL connection rx data ack: host ->
+ * Send RX credits through an RX_DATA_ACK CPL message. Returns the number of
+ * credits sent.
+ */
+static u32 send_rx_credits(struct cxgbi_sock *csk, u32 credits)
+{
+ struct sk_buff *skb;
+ struct cpl_rx_data_ack *req;
+ u32 dack = F_RX_DACK_CHANGE | V_RX_DACK_MODE(1);
+
+ log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_PDU_RX,
+ "csk 0x%p,%u,0x%lx,%u, credit %u, dack %u.\n",
+ csk, csk->state, csk->flags, csk->tid, credits, dack);
+
+ skb = alloc_wr(sizeof(*req), 0, GFP_ATOMIC);
+ if (!skb) {
+ pr_info("csk 0x%p, credit %u, OOM.\n", csk, credits);
+ return 0;
+ }
+ req = (struct cpl_rx_data_ack *)skb->head;
+ req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD));
+ OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_RX_DATA_ACK, csk->tid));
+ req->credit_dack = htonl(F_RX_DACK_CHANGE | V_RX_DACK_MODE(1) |
+ V_RX_CREDITS(credits));
+ skb->priority = CPL_PRIORITY_ACK;
+ cxgb3_ofld_send(csk->cdev->lldev, skb);
+ return credits;
+}
+
+/*
+ * CPL connection tx data: host ->
+ *
+ * Send iscsi PDU via TX_DATA CPL message. Returns the number of
+ * credits sent.
+ * Each TX_DATA consumes work request credit (wrs), so we need to keep track of
+ * how many we've used so far and how many are pending (i.e., yet ack'ed by T3).
+ */
+
+static unsigned int wrlen __read_mostly;
+static unsigned int skb_wrs[SKB_WR_LIST_SIZE] __read_mostly;
+
+static void init_wr_tab(unsigned int wr_len)
+{
+ int i;
+
+ if (skb_wrs[1]) /* already initialized */
+ return;
+ for (i = 1; i < SKB_WR_LIST_SIZE; i++) {
+ int sgl_len = (3 * i) / 2 + (i & 1);
+
+ sgl_len += 3;
+ skb_wrs[i] = (sgl_len <= wr_len
+ ? 1 : 1 + (sgl_len - 2) / (wr_len - 1));
+ }
+ wrlen = wr_len * 8;
+}
+
+static inline void make_tx_data_wr(struct cxgbi_sock *csk, struct sk_buff *skb,
+ int len, int req_completion)
+{
+ struct tx_data_wr *req;
+ struct l2t_entry *l2t = csk->l2t;
+
+ skb_reset_transport_header(skb);
+ req = (struct tx_data_wr *)__skb_push(skb, sizeof(*req));
+ req->wr_hi = htonl(V_WR_OP(FW_WROPCODE_OFLD_TX_DATA) |
+ (req_completion ? F_WR_COMPL : 0));
+ req->wr_lo = htonl(V_WR_TID(csk->tid));
+ /* len includes the length of any HW ULP additions */
+ req->len = htonl(len);
+ /* V_TX_ULP_SUBMODE sets both the mode and submode */
+ req->flags = htonl(V_TX_ULP_SUBMODE(cxgbi_skcb_ulp_mode(skb)) |
+ V_TX_SHOVE((skb_peek(&csk->write_queue) ? 0 : 1)));
+ req->sndseq = htonl(csk->snd_nxt);
+ req->param = htonl(V_TX_PORT(l2t->smt_idx));
+
+ if (!cxgbi_sock_flag(csk, CTPF_TX_DATA_SENT)) {
+ req->flags |= htonl(V_TX_ACK_PAGES(2) | F_TX_INIT |
+ V_TX_CPU_IDX(csk->rss_qid));
+ /* sendbuffer is in units of 32KB. */
+ req->param |= htonl(V_TX_SNDBUF(cxgb3i_snd_win >> 15));
+ cxgbi_sock_set_flag(csk, CTPF_TX_DATA_SENT);
+ }
+}
+
+/**
+ * push_tx_frames -- start transmit
+ * @c3cn: the offloaded connection
+ * @req_completion: request wr_ack or not
+ *
+ * Prepends TX_DATA_WR or CPL_CLOSE_CON_REQ headers to buffers waiting in a
+ * connection's send queue and sends them on to T3. Must be called with the
+ * connection's lock held. Returns the amount of send buffer space that was
+ * freed as a result of sending queued data to T3.
+ */
+
+static void arp_failure_skb_discard(struct t3cdev *dev, struct sk_buff *skb)
+{
+ kfree_skb(skb);
+}
+
+static int push_tx_frames(struct cxgbi_sock *csk, int req_completion)
+{
+ int total_size = 0;
+ struct sk_buff *skb;
+
+ if (unlikely(csk->state < CTP_ESTABLISHED ||
+ csk->state == CTP_CLOSE_WAIT_1 || csk->state >= CTP_ABORTING)) {
+ log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_PDU_TX,
+ "csk 0x%p,%u,0x%lx,%u, in closing state.\n",
+ csk, csk->state, csk->flags, csk->tid);
+ return 0;
+ }
+
+ while (csk->wr_cred && (skb = skb_peek(&csk->write_queue)) != NULL) {
+ int len = skb->len; /* length before skb_push */
+ int frags = skb_shinfo(skb)->nr_frags + (len != skb->data_len);
+ int wrs_needed = skb_wrs[frags];
+
+ if (wrs_needed > 1 && len + sizeof(struct tx_data_wr) <= wrlen)
+ wrs_needed = 1;
+
+ WARN_ON(frags >= SKB_WR_LIST_SIZE || wrs_needed < 1);
+
+ if (csk->wr_cred < wrs_needed) {
+ log_debug(1 << CXGBI_DBG_PDU_TX,
+ "csk 0x%p, skb len %u/%u, frag %u, wr %d<%u.\n",
+ csk, skb->len, skb->data_len, frags,
+ wrs_needed, csk->wr_cred);
+ break;
+ }
+
+ __skb_unlink(skb, &csk->write_queue);
+ skb->priority = CPL_PRIORITY_DATA;
+ skb->csum = wrs_needed; /* remember this until the WR_ACK */
+ csk->wr_cred -= wrs_needed;
+ csk->wr_una_cred += wrs_needed;
+ cxgbi_sock_enqueue_wr(csk, skb);
+
+ log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_PDU_TX,
+ "csk 0x%p, enqueue, skb len %u/%u, frag %u, wr %d, "
+ "left %u, unack %u.\n",
+ csk, skb->len, skb->data_len, frags, skb->csum,
+ csk->wr_cred, csk->wr_una_cred);
+
+ if (likely(cxgbi_skcb_test_flag(skb, SKCBF_TX_NEED_HDR))) {
+ if ((req_completion &&
+ csk->wr_una_cred == wrs_needed) ||
+ csk->wr_una_cred >= csk->wr_max_cred / 2) {
+ req_completion = 1;
+ csk->wr_una_cred = 0;
+ }
+ len += cxgbi_ulp_extra_len(cxgbi_skcb_ulp_mode(skb));
+ make_tx_data_wr(csk, skb, len, req_completion);
+ csk->snd_nxt += len;
+ cxgbi_skcb_clear_flag(skb, SKCBF_TX_NEED_HDR);
+ }
+ total_size += skb->truesize;
+ log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_PDU_TX,
+ "csk 0x%p, tid 0x%x, send skb 0x%p.\n",
+ csk, csk->tid, skb);
+ set_arp_failure_handler(skb, arp_failure_skb_discard);
+ l2t_send(csk->cdev->lldev, skb, csk->l2t);
+ }
+ return total_size;
+}
+
+/*
+ * Process a CPL_ACT_ESTABLISH message: -> host
+ * Updates connection state from an active establish CPL message. Runs with
+ * the connection lock held.
+ */
+
+static inline void free_atid(struct cxgbi_sock *csk)
+{
+ if (cxgbi_sock_flag(csk, CTPF_HAS_ATID)) {
+ cxgb3_free_atid(csk->cdev->lldev, csk->atid);
+ cxgbi_sock_clear_flag(csk, CTPF_HAS_ATID);
+ cxgbi_sock_put(csk);
+ }
+}
+
+static int do_act_establish(struct t3cdev *tdev, struct sk_buff *skb, void *ctx)
+{
+ struct cxgbi_sock *csk = ctx;
+ struct cpl_act_establish *req = cplhdr(skb);
+ unsigned int tid = GET_TID(req);
+ unsigned int atid = G_PASS_OPEN_TID(ntohl(req->tos_tid));
+ u32 rcv_isn = ntohl(req->rcv_isn); /* real RCV_ISN + 1 */
+
+ log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK,
+ "atid 0x%x,tid 0x%x, csk 0x%p,%u,0x%lx, isn %u.\n",
+ atid, atid, csk, csk->state, csk->flags, rcv_isn);
+
+ cxgbi_sock_get(csk);
+ cxgbi_sock_set_flag(csk, CTPF_HAS_TID);
+ csk->tid = tid;
+ cxgb3_insert_tid(csk->cdev->lldev, &t3_client, csk, tid);
+
+ free_atid(csk);
+
+ csk->rss_qid = G_QNUM(ntohs(skb->csum));
+
+ spin_lock_bh(&csk->lock);
+ if (csk->retry_timer.function) {
+ del_timer(&csk->retry_timer);
+ csk->retry_timer.function = NULL;
+ }
+
+ if (unlikely(csk->state != CTP_ACTIVE_OPEN))
+ pr_info("csk 0x%p,%u,0x%lx,%u, got EST.\n",
+ csk, csk->state, csk->flags, csk->tid);
+
+ csk->copied_seq = csk->rcv_wup = csk->rcv_nxt = rcv_isn;
+ if (cxgb3i_rcv_win > (M_RCV_BUFSIZ << 10))
+ csk->rcv_wup -= cxgb3i_rcv_win - (M_RCV_BUFSIZ << 10);
+
+ cxgbi_sock_established(csk, ntohl(req->snd_isn), ntohs(req->tcp_opt));
+
+ if (unlikely(cxgbi_sock_flag(csk, CTPF_ACTIVE_CLOSE_NEEDED)))
+ /* upper layer has requested closing */
+ send_abort_req(csk);
+ else {
+ if (skb_queue_len(&csk->write_queue))
+ push_tx_frames(csk, 1);
+ cxgbi_conn_tx_open(csk);
+ }
+
+ spin_unlock_bh(&csk->lock);
+ __kfree_skb(skb);
+ return 0;
+}
+
+/*
+ * Process a CPL_ACT_OPEN_RPL message: -> host
+ * Handle active open failures.
+ */
+static int act_open_rpl_status_to_errno(int status)
+{
+ switch (status) {
+ case CPL_ERR_CONN_RESET:
+ return -ECONNREFUSED;
+ case CPL_ERR_ARP_MISS:
+ return -EHOSTUNREACH;
+ case CPL_ERR_CONN_TIMEDOUT:
+ return -ETIMEDOUT;
+ case CPL_ERR_TCAM_FULL:
+ return -ENOMEM;
+ case CPL_ERR_CONN_EXIST:
+ return -EADDRINUSE;
+ default:
+ return -EIO;
+ }
+}
+
+static void act_open_retry_timer(unsigned long data)
+{
+ struct sk_buff *skb;
+ struct cxgbi_sock *csk = (struct cxgbi_sock *)data;
+
+ log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK,
+ "csk 0x%p,%u,0x%lx,%u.\n",
+ csk, csk->state, csk->flags, csk->tid);
+
+ cxgbi_sock_get(csk);
+ spin_lock_bh(&csk->lock);
+ skb = alloc_wr(sizeof(struct cpl_act_open_req), 0, GFP_ATOMIC);
+ if (!skb)
+ cxgbi_sock_fail_act_open(csk, -ENOMEM);
+ else {
+ skb->sk = (struct sock *)csk;
+ set_arp_failure_handler(skb, act_open_arp_failure);
+ send_act_open_req(csk, skb, csk->l2t);
+ }
+ spin_unlock_bh(&csk->lock);
+ cxgbi_sock_put(csk);
+}
+
+static int do_act_open_rpl(struct t3cdev *tdev, struct sk_buff *skb, void *ctx)
+{
+ struct cxgbi_sock *csk = ctx;
+ struct cpl_act_open_rpl *rpl = cplhdr(skb);
+
+ pr_info("csk 0x%p,%u,0x%lx,%u, status %u, %pI4:%u-%pI4:%u.\n",
+ csk, csk->state, csk->flags, csk->atid, rpl->status,
+ &csk->saddr.sin_addr.s_addr, ntohs(csk->saddr.sin_port),
+ &csk->daddr.sin_addr.s_addr, ntohs(csk->daddr.sin_port));
+
+ if (rpl->status != CPL_ERR_TCAM_FULL &&
+ rpl->status != CPL_ERR_CONN_EXIST &&
+ rpl->status != CPL_ERR_ARP_MISS)
+ cxgb3_queue_tid_release(tdev, GET_TID(rpl));
+
+ cxgbi_sock_get(csk);
+ spin_lock_bh(&csk->lock);
+ if (rpl->status == CPL_ERR_CONN_EXIST &&
+ csk->retry_timer.function != act_open_retry_timer) {
+ csk->retry_timer.function = act_open_retry_timer;
+ mod_timer(&csk->retry_timer, jiffies + HZ / 2);
+ } else
+ cxgbi_sock_fail_act_open(csk,
+ act_open_rpl_status_to_errno(rpl->status));
+
+ spin_unlock_bh(&csk->lock);
+ cxgbi_sock_put(csk);
+ __kfree_skb(skb);
+ return 0;
+}
+
+/*
+ * Process PEER_CLOSE CPL messages: -> host
+ * Handle peer FIN.
+ */
+static int do_peer_close(struct t3cdev *cdev, struct sk_buff *skb, void *ctx)
+{
+ struct cxgbi_sock *csk = ctx;
+
+ log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK,
+ "csk 0x%p,%u,0x%lx,%u.\n",
+ csk, csk->state, csk->flags, csk->tid);
+
+ cxgbi_sock_rcv_peer_close(csk);
+ __kfree_skb(skb);
+ return 0;
+}
+
+/*
+ * Process CLOSE_CONN_RPL CPL message: -> host
+ * Process a peer ACK to our FIN.
+ */
+static int do_close_con_rpl(struct t3cdev *cdev, struct sk_buff *skb,
+ void *ctx)
+{
+ struct cxgbi_sock *csk = ctx;
+ struct cpl_close_con_rpl *rpl = cplhdr(skb);
+
+ log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK,
+ "csk 0x%p,%u,0x%lx,%u, snxt %u.\n",
+ csk, csk->state, csk->flags, csk->tid, ntohl(rpl->snd_nxt));
+
+ cxgbi_sock_rcv_close_conn_rpl(csk, ntohl(rpl->snd_nxt));
+ __kfree_skb(skb);
+ return 0;
+}
+
+/*
+ * Process ABORT_REQ_RSS CPL message: -> host
+ * Process abort requests. If we are waiting for an ABORT_RPL we ignore this
+ * request except that we need to reply to it.
+ */
+
+static int abort_status_to_errno(struct cxgbi_sock *csk, int abort_reason,
+ int *need_rst)
+{
+ switch (abort_reason) {
+ case CPL_ERR_BAD_SYN: /* fall through */
+ case CPL_ERR_CONN_RESET:
+ return csk->state > CTP_ESTABLISHED ? -EPIPE : -ECONNRESET;
+ case CPL_ERR_XMIT_TIMEDOUT:
+ case CPL_ERR_PERSIST_TIMEDOUT:
+ case CPL_ERR_FINWAIT2_TIMEDOUT:
+ case CPL_ERR_KEEPALIVE_TIMEDOUT:
+ return -ETIMEDOUT;
+ default:
+ return -EIO;
+ }
+}
+
+static int do_abort_req(struct t3cdev *cdev, struct sk_buff *skb, void *ctx)
+{
+ const struct cpl_abort_req_rss *req = cplhdr(skb);
+ struct cxgbi_sock *csk = ctx;
+ int rst_status = CPL_ABORT_NO_RST;
+
+ log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK,
+ "csk 0x%p,%u,0x%lx,%u.\n",
+ csk, csk->state, csk->flags, csk->tid);
+
+ if (req->status == CPL_ERR_RTX_NEG_ADVICE ||
+ req->status == CPL_ERR_PERSIST_NEG_ADVICE) {
+ goto done;
+ }
+
+ cxgbi_sock_get(csk);
+ spin_lock_bh(&csk->lock);
+
+ if (!cxgbi_sock_flag(csk, CTPF_ABORT_REQ_RCVD)) {
+ cxgbi_sock_set_flag(csk, CTPF_ABORT_REQ_RCVD);
+ cxgbi_sock_set_state(csk, CTP_ABORTING);
+ goto out;
+ }
+
+ cxgbi_sock_clear_flag(csk, CTPF_ABORT_REQ_RCVD);
+ send_abort_rpl(csk, rst_status);
+
+ if (!cxgbi_sock_flag(csk, CTPF_ABORT_RPL_PENDING)) {
+ csk->err = abort_status_to_errno(csk, req->status, &rst_status);
+ cxgbi_sock_closed(csk);
+ }
+
+out:
+ spin_unlock_bh(&csk->lock);
+ cxgbi_sock_put(csk);
+done:
+ __kfree_skb(skb);
+ return 0;
+}
+
+/*
+ * Process ABORT_RPL_RSS CPL message: -> host
+ * Process abort replies. We only process these messages if we anticipate
+ * them as the coordination between SW and HW in this area is somewhat lacking
+ * and sometimes we get ABORT_RPLs after we are done with the connection that
+ * originated the ABORT_REQ.
+ */
+static int do_abort_rpl(struct t3cdev *cdev, struct sk_buff *skb, void *ctx)
+{
+ struct cpl_abort_rpl_rss *rpl = cplhdr(skb);
+ struct cxgbi_sock *csk = ctx;
+
+ log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK,
+ "status 0x%x, csk 0x%p, s %u, 0x%lx.\n",
+ rpl->status, csk, csk ? csk->state : 0,
+ csk ? csk->flags : 0UL);
+ /*
+ * Ignore replies to post-close aborts indicating that the abort was
+ * requested too late. These connections are terminated when we get
+ * PEER_CLOSE or CLOSE_CON_RPL and by the time the abort_rpl_rss
+ * arrives the TID is either no longer used or it has been recycled.
+ */
+ if (rpl->status == CPL_ERR_ABORT_FAILED)
+ goto rel_skb;
+ /*
+ * Sometimes we've already closed the connection, e.g., a post-close
+ * abort races with ABORT_REQ_RSS, the latter frees the connection
+ * expecting the ABORT_REQ will fail with CPL_ERR_ABORT_FAILED,
+ * but FW turns the ABORT_REQ into a regular one and so we get
+ * ABORT_RPL_RSS with status 0 and no connection.
+ */
+ if (csk)
+ cxgbi_sock_rcv_abort_rpl(csk);
+rel_skb:
+ __kfree_skb(skb);
+ return 0;
+}
+
+/*
+ * Process RX_ISCSI_HDR CPL message: -> host
+ * Handle received PDUs, the payload could be DDP'ed. If not, the payload
+ * follow after the bhs.
+ */
+static int do_iscsi_hdr(struct t3cdev *t3dev, struct sk_buff *skb, void *ctx)
+{
+ struct cxgbi_sock *csk = ctx;
+ struct cpl_iscsi_hdr *hdr_cpl = cplhdr(skb);
+ struct cpl_iscsi_hdr_norss data_cpl;
+ struct cpl_rx_data_ddp_norss ddp_cpl;
+ unsigned int hdr_len, data_len, status;
+ unsigned int len;
+ int err;
+
+ log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_PDU_RX,
+ "csk 0x%p,%u,0x%lx,%u, skb 0x%p,%u.\n",
+ csk, csk->state, csk->flags, csk->tid, skb, skb->len);
+
+ spin_lock_bh(&csk->lock);
+
+ if (unlikely(csk->state >= CTP_PASSIVE_CLOSE)) {
+ log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK,
+ "csk 0x%p,%u,0x%lx,%u, bad state.\n",
+ csk, csk->state, csk->flags, csk->tid);
+ if (csk->state != CTP_ABORTING)
+ goto abort_conn;
+ else
+ goto discard;
+ }
+
+ cxgbi_skcb_tcp_seq(skb) = ntohl(hdr_cpl->seq);
+ cxgbi_skcb_flags(skb) = 0;
+
+ skb_reset_transport_header(skb);
+ __skb_pull(skb, sizeof(struct cpl_iscsi_hdr));
+
+ len = hdr_len = ntohs(hdr_cpl->len);
+ /* msg coalesce is off or not enough data received */
+ if (skb->len <= hdr_len) {
+ pr_err("%s: tid %u, CPL_ISCSI_HDR, skb len %u < %u.\n",
+ csk->cdev->ports[csk->port_id]->name, csk->tid,
+ skb->len, hdr_len);
+ goto abort_conn;
+ }
+ cxgbi_skcb_set_flag(skb, SKCBF_RX_COALESCED);
+
+ err = skb_copy_bits(skb, skb->len - sizeof(ddp_cpl), &ddp_cpl,
+ sizeof(ddp_cpl));
+ if (err < 0) {
+ pr_err("%s: tid %u, copy cpl_ddp %u-%zu failed %d.\n",
+ csk->cdev->ports[csk->port_id]->name, csk->tid,
+ skb->len, sizeof(ddp_cpl), err);
+ goto abort_conn;
+ }
+
+ cxgbi_skcb_set_flag(skb, SKCBF_RX_STATUS);
+ cxgbi_skcb_rx_pdulen(skb) = ntohs(ddp_cpl.len);
+ cxgbi_skcb_rx_ddigest(skb) = ntohl(ddp_cpl.ulp_crc);
+ status = ntohl(ddp_cpl.ddp_status);
+
+ log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_PDU_RX,
+ "csk 0x%p, skb 0x%p,%u, pdulen %u, status 0x%x.\n",
+ csk, skb, skb->len, cxgbi_skcb_rx_pdulen(skb), status);
+
+ if (status & (1 << CPL_RX_DDP_STATUS_HCRC_SHIFT))
+ cxgbi_skcb_set_flag(skb, SKCBF_RX_HCRC_ERR);
+ if (status & (1 << CPL_RX_DDP_STATUS_DCRC_SHIFT))
+ cxgbi_skcb_set_flag(skb, SKCBF_RX_DCRC_ERR);
+ if (status & (1 << CPL_RX_DDP_STATUS_PAD_SHIFT))
+ cxgbi_skcb_set_flag(skb, SKCBF_RX_PAD_ERR);
+
+ if (skb->len > (hdr_len + sizeof(ddp_cpl))) {
+ err = skb_copy_bits(skb, hdr_len, &data_cpl, sizeof(data_cpl));
+ if (err < 0) {
+ pr_err("%s: tid %u, cp %zu/%u failed %d.\n",
+ csk->cdev->ports[csk->port_id]->name,
+ csk->tid, sizeof(data_cpl), skb->len, err);
+ goto abort_conn;
+ }
+ data_len = ntohs(data_cpl.len);
+ log_debug(1 << CXGBI_DBG_DDP | 1 << CXGBI_DBG_PDU_RX,
+ "skb 0x%p, pdu not ddp'ed %u/%u, status 0x%x.\n",
+ skb, data_len, cxgbi_skcb_rx_pdulen(skb), status);
+ len += sizeof(data_cpl) + data_len;
+ } else if (status & (1 << CPL_RX_DDP_STATUS_DDP_SHIFT))
+ cxgbi_skcb_set_flag(skb, SKCBF_RX_DATA_DDPD);
+
+ csk->rcv_nxt = ntohl(ddp_cpl.seq) + cxgbi_skcb_rx_pdulen(skb);
+ __pskb_trim(skb, len);
+ __skb_queue_tail(&csk->receive_queue, skb);
+ cxgbi_conn_pdu_ready(csk);
+
+ spin_unlock_bh(&csk->lock);
+ return 0;
+
+abort_conn:
+ send_abort_req(csk);
+discard:
+ spin_unlock_bh(&csk->lock);
+ __kfree_skb(skb);
+ return 0;
+}
+
+/*
+ * Process TX_DATA_ACK CPL messages: -> host
+ * Process an acknowledgment of WR completion. Advance snd_una and send the
+ * next batch of work requests from the write queue.
+ */
+static int do_wr_ack(struct t3cdev *cdev, struct sk_buff *skb, void *ctx)
+{
+ struct cxgbi_sock *csk = ctx;
+ struct cpl_wr_ack *hdr = cplhdr(skb);
+
+ log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_PDU_RX,
+ "csk 0x%p,%u,0x%lx,%u, cr %u.\n",
+ csk, csk->state, csk->flags, csk->tid, ntohs(hdr->credits));
+
+ cxgbi_sock_rcv_wr_ack(csk, ntohs(hdr->credits), ntohl(hdr->snd_una), 1);
+ __kfree_skb(skb);
+ return 0;
+}
+
+/*
+ * for each connection, pre-allocate skbs needed for close/abort requests. So
+ * that we can service the request right away.
+ */
+static int alloc_cpls(struct cxgbi_sock *csk)
+{
+ csk->cpl_close = alloc_wr(sizeof(struct cpl_close_con_req), 0,
+ GFP_KERNEL);
+ if (!csk->cpl_close)
+ return -ENOMEM;
+ csk->cpl_abort_req = alloc_wr(sizeof(struct cpl_abort_req), 0,
+ GFP_KERNEL);
+ if (!csk->cpl_abort_req)
+ goto free_cpl_skbs;
+
+ csk->cpl_abort_rpl = alloc_wr(sizeof(struct cpl_abort_rpl), 0,
+ GFP_KERNEL);
+ if (!csk->cpl_abort_rpl)
+ goto free_cpl_skbs;
+
+ return 0;
+
+free_cpl_skbs:
+ cxgbi_sock_free_cpl_skbs(csk);
+ return -ENOMEM;
+}
+
+/**
+ * release_offload_resources - release offload resource
+ * @c3cn: the offloaded iscsi tcp connection.
+ * Release resources held by an offload connection (TID, L2T entry, etc.)
+ */
+static void l2t_put(struct cxgbi_sock *csk)
+{
+ struct t3cdev *t3dev = (struct t3cdev *)csk->cdev->lldev;
+
+ if (csk->l2t) {
+ l2t_release(t3dev, csk->l2t);
+ csk->l2t = NULL;
+ cxgbi_sock_put(csk);
+ }
+}
+
+static void release_offload_resources(struct cxgbi_sock *csk)
+{
+ struct t3cdev *t3dev = (struct t3cdev *)csk->cdev->lldev;
+
+ log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK,
+ "csk 0x%p,%u,0x%lx,%u.\n",
+ csk, csk->state, csk->flags, csk->tid);
+
+ csk->rss_qid = 0;
+ cxgbi_sock_free_cpl_skbs(csk);
+
+ if (csk->wr_cred != csk->wr_max_cred) {
+ cxgbi_sock_purge_wr_queue(csk);
+ cxgbi_sock_reset_wr_list(csk);
+ }
+ l2t_put(csk);
+ if (cxgbi_sock_flag(csk, CTPF_HAS_ATID))
+ free_atid(csk);
+ else if (cxgbi_sock_flag(csk, CTPF_HAS_TID)) {
+ cxgb3_remove_tid(t3dev, (void *)csk, csk->tid);
+ cxgbi_sock_clear_flag(csk, CTPF_HAS_TID);
+ cxgbi_sock_put(csk);
+ }
+ csk->dst = NULL;
+ csk->cdev = NULL;
+}
+
+static void update_address(struct cxgbi_hba *chba)
+{
+ if (chba->ipv4addr) {
+ if (chba->vdev &&
+ chba->ipv4addr != cxgb3i_get_private_ipv4addr(chba->vdev)) {
+ cxgb3i_set_private_ipv4addr(chba->vdev, chba->ipv4addr);
+ cxgb3i_set_private_ipv4addr(chba->ndev, 0);
+ pr_info("%s set %pI4.\n",
+ chba->vdev->name, &chba->ipv4addr);
+ } else if (chba->ipv4addr !=
+ cxgb3i_get_private_ipv4addr(chba->ndev)) {
+ cxgb3i_set_private_ipv4addr(chba->ndev, chba->ipv4addr);
+ pr_info("%s set %pI4.\n",
+ chba->ndev->name, &chba->ipv4addr);
+ }
+ } else if (cxgb3i_get_private_ipv4addr(chba->ndev)) {
+ if (chba->vdev)
+ cxgb3i_set_private_ipv4addr(chba->vdev, 0);
+ cxgb3i_set_private_ipv4addr(chba->ndev, 0);
+ }
+}
+
+static int init_act_open(struct cxgbi_sock *csk)
+{
+ struct dst_entry *dst = csk->dst;
+ struct cxgbi_device *cdev = csk->cdev;
+ struct t3cdev *t3dev = (struct t3cdev *)cdev->lldev;
+ struct net_device *ndev = cdev->ports[csk->port_id];
+ struct cxgbi_hba *chba = cdev->hbas[csk->port_id];
+ struct sk_buff *skb = NULL;
+
+ log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK,
+ "csk 0x%p,%u,0x%lx.\n", csk, csk->state, csk->flags);
+
+ update_address(chba);
+ if (chba->ipv4addr)
+ csk->saddr.sin_addr.s_addr = chba->ipv4addr;
+
+ csk->rss_qid = 0;
+ csk->l2t = t3_l2t_get(t3dev, dst, ndev,
+ &csk->daddr.sin_addr.s_addr);
+ if (!csk->l2t) {
+ pr_err("NO l2t available.\n");
+ return -EINVAL;
+ }
+ cxgbi_sock_get(csk);
+
+ csk->atid = cxgb3_alloc_atid(t3dev, &t3_client, csk);
+ if (csk->atid < 0) {
+ pr_err("NO atid available.\n");
+ goto rel_resource;
+ }
+ cxgbi_sock_set_flag(csk, CTPF_HAS_ATID);
+ cxgbi_sock_get(csk);
+
+ skb = alloc_wr(sizeof(struct cpl_act_open_req), 0, GFP_KERNEL);
+ if (!skb)
+ goto rel_resource;
+ skb->sk = (struct sock *)csk;
+ set_arp_failure_handler(skb, act_open_arp_failure);
+
+ csk->wr_max_cred = csk->wr_cred = T3C_DATA(t3dev)->max_wrs - 1;
+ csk->wr_una_cred = 0;
+ csk->mss_idx = cxgbi_sock_select_mss(csk, dst_mtu(dst));
+ cxgbi_sock_reset_wr_list(csk);
+ csk->err = 0;
+
+ log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK,
+ "csk 0x%p,%u,0x%lx, %pI4:%u-%pI4:%u.\n",
+ csk, csk->state, csk->flags,
+ &csk->saddr.sin_addr.s_addr, ntohs(csk->saddr.sin_port),
+ &csk->daddr.sin_addr.s_addr, ntohs(csk->daddr.sin_port));
+
+ cxgbi_sock_set_state(csk, CTP_ACTIVE_OPEN);
+ send_act_open_req(csk, skb, csk->l2t);
+ return 0;
+
+rel_resource:
+ if (skb)
+ __kfree_skb(skb);
+ return -EINVAL;
+}
+
+cxgb3_cpl_handler_func cxgb3i_cpl_handlers[NUM_CPL_CMDS] = {
+ [CPL_ACT_ESTABLISH] = do_act_establish,
+ [CPL_ACT_OPEN_RPL] = do_act_open_rpl,
+ [CPL_PEER_CLOSE] = do_peer_close,
+ [CPL_ABORT_REQ_RSS] = do_abort_req,
+ [CPL_ABORT_RPL_RSS] = do_abort_rpl,
+ [CPL_CLOSE_CON_RPL] = do_close_con_rpl,
+ [CPL_TX_DMA_ACK] = do_wr_ack,
+ [CPL_ISCSI_HDR] = do_iscsi_hdr,
+};
+
+/**
+ * cxgb3i_ofld_init - allocate and initialize resources for each adapter found
+ * @cdev: cxgbi adapter
+ */
+int cxgb3i_ofld_init(struct cxgbi_device *cdev)
+{
+ struct t3cdev *t3dev = (struct t3cdev *)cdev->lldev;
+ struct adap_ports port;
+ struct ofld_page_info rx_page_info;
+ unsigned int wr_len;
+ int rc;
+
+ if (t3dev->ctl(t3dev, GET_WR_LEN, &wr_len) < 0 ||
+ t3dev->ctl(t3dev, GET_PORTS, &port) < 0 ||
+ t3dev->ctl(t3dev, GET_RX_PAGE_INFO, &rx_page_info) < 0) {
+ pr_warn("t3 0x%p, offload up, ioctl failed.\n", t3dev);
+ return -EINVAL;
+ }
+
+ if (cxgb3i_max_connect > CXGBI_MAX_CONN)
+ cxgb3i_max_connect = CXGBI_MAX_CONN;
+
+ rc = cxgbi_device_portmap_create(cdev, cxgb3i_sport_base,
+ cxgb3i_max_connect);
+ if (rc < 0)
+ return rc;
+
+ init_wr_tab(wr_len);
+ cdev->csk_release_offload_resources = release_offload_resources;
+ cdev->csk_push_tx_frames = push_tx_frames;
+ cdev->csk_send_abort_req = send_abort_req;
+ cdev->csk_send_close_req = send_close_req;
+ cdev->csk_send_rx_credits = send_rx_credits;
+ cdev->csk_alloc_cpls = alloc_cpls;
+ cdev->csk_init_act_open = init_act_open;
+
+ pr_info("cdev 0x%p, offload up, added.\n", cdev);
+ return 0;
+}
+
+/*
+ * functions to program the pagepod in h/w
+ */
+static inline void ulp_mem_io_set_hdr(struct sk_buff *skb, unsigned int addr)
+{
+ struct ulp_mem_io *req = (struct ulp_mem_io *)skb->head;
+
+ memset(req, 0, sizeof(*req));
+
+ req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_BYPASS));
+ req->cmd_lock_addr = htonl(V_ULP_MEMIO_ADDR(addr >> 5) |
+ V_ULPTX_CMD(ULP_MEM_WRITE));
+ req->len = htonl(V_ULP_MEMIO_DATA_LEN(PPOD_SIZE >> 5) |
+ V_ULPTX_NFLITS((PPOD_SIZE >> 3) + 1));
+}
+
+static int ddp_set_map(struct cxgbi_sock *csk, struct cxgbi_pagepod_hdr *hdr,
+ unsigned int idx, unsigned int npods,
+ struct cxgbi_gather_list *gl)
+{
+ struct cxgbi_device *cdev = csk->cdev;
+ struct cxgbi_ddp_info *ddp = cdev->ddp;
+ unsigned int pm_addr = (idx << PPOD_SIZE_SHIFT) + ddp->llimit;
+ int i;
+
+ log_debug(1 << CXGBI_DBG_DDP,
+ "csk 0x%p, idx %u, npods %u, gl 0x%p.\n",
+ csk, idx, npods, gl);
+
+ for (i = 0; i < npods; i++, idx++, pm_addr += PPOD_SIZE) {
+ struct sk_buff *skb = alloc_wr(sizeof(struct ulp_mem_io) +
+ PPOD_SIZE, 0, GFP_ATOMIC);
+
+ if (!skb)
+ return -ENOMEM;
+
+ ulp_mem_io_set_hdr(skb, pm_addr);
+ cxgbi_ddp_ppod_set((struct cxgbi_pagepod *)(skb->head +
+ sizeof(struct ulp_mem_io)),
+ hdr, gl, i * PPOD_PAGES_MAX);
+ skb->priority = CPL_PRIORITY_CONTROL;
+ cxgb3_ofld_send(cdev->lldev, skb);
+ }
+ return 0;
+}
+
+static void ddp_clear_map(struct cxgbi_hba *chba, unsigned int tag,
+ unsigned int idx, unsigned int npods)
+{
+ struct cxgbi_device *cdev = chba->cdev;
+ struct cxgbi_ddp_info *ddp = cdev->ddp;
+ unsigned int pm_addr = (idx << PPOD_SIZE_SHIFT) + ddp->llimit;
+ int i;
+
+ log_debug(1 << CXGBI_DBG_DDP,
+ "cdev 0x%p, idx %u, npods %u, tag 0x%x.\n",
+ cdev, idx, npods, tag);
+
+ for (i = 0; i < npods; i++, idx++, pm_addr += PPOD_SIZE) {
+ struct sk_buff *skb = alloc_wr(sizeof(struct ulp_mem_io) +
+ PPOD_SIZE, 0, GFP_ATOMIC);
+
+ if (!skb) {
+ pr_err("tag 0x%x, 0x%x, %d/%u, skb OOM.\n",
+ tag, idx, i, npods);
+ continue;
+ }
+ ulp_mem_io_set_hdr(skb, pm_addr);
+ skb->priority = CPL_PRIORITY_CONTROL;
+ cxgb3_ofld_send(cdev->lldev, skb);
+ }
+}
+
+static int ddp_setup_conn_pgidx(struct cxgbi_sock *csk,
+ unsigned int tid, int pg_idx, bool reply)
+{
+ struct sk_buff *skb = alloc_wr(sizeof(struct cpl_set_tcb_field), 0,
+ GFP_KERNEL);
+ struct cpl_set_tcb_field *req;
+ u64 val = pg_idx < DDP_PGIDX_MAX ? pg_idx : 0;
+
+ log_debug(1 << CXGBI_DBG_DDP,
+ "csk 0x%p, tid %u, pg_idx %d.\n", csk, tid, pg_idx);
+ if (!skb)
+ return -ENOMEM;
+
+ /* set up ulp submode and page size */
+ req = (struct cpl_set_tcb_field *)skb->head;
+ req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD));
+ OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_SET_TCB_FIELD, tid));
+ req->reply = V_NO_REPLY(reply ? 0 : 1);
+ req->cpu_idx = 0;
+ req->word = htons(31);
+ req->mask = cpu_to_be64(0xF0000000);
+ req->val = cpu_to_be64(val << 28);
+ skb->priority = CPL_PRIORITY_CONTROL;
+
+ cxgb3_ofld_send(csk->cdev->lldev, skb);
+ return 0;
+}
+
+/**
+ * cxgb3i_setup_conn_digest - setup conn. digest setting
+ * @csk: cxgb tcp socket
+ * @tid: connection id
+ * @hcrc: header digest enabled
+ * @dcrc: data digest enabled
+ * @reply: request reply from h/w
+ * set up the iscsi digest settings for a connection identified by tid
+ */
+static int ddp_setup_conn_digest(struct cxgbi_sock *csk, unsigned int tid,
+ int hcrc, int dcrc, int reply)
+{
+ struct sk_buff *skb = alloc_wr(sizeof(struct cpl_set_tcb_field), 0,
+ GFP_KERNEL);
+ struct cpl_set_tcb_field *req;
+ u64 val = (hcrc ? 1 : 0) | (dcrc ? 2 : 0);
+
+ log_debug(1 << CXGBI_DBG_DDP,
+ "csk 0x%p, tid %u, crc %d,%d.\n", csk, tid, hcrc, dcrc);
+ if (!skb)
+ return -ENOMEM;
+
+ /* set up ulp submode and page size */
+ req = (struct cpl_set_tcb_field *)skb->head;
+ req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD));
+ OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_SET_TCB_FIELD, tid));
+ req->reply = V_NO_REPLY(reply ? 0 : 1);
+ req->cpu_idx = 0;
+ req->word = htons(31);
+ req->mask = cpu_to_be64(0x0F000000);
+ req->val = cpu_to_be64(val << 24);
+ skb->priority = CPL_PRIORITY_CONTROL;
+
+ cxgb3_ofld_send(csk->cdev->lldev, skb);
+ return 0;
+}
+
+/**
+ * t3_ddp_cleanup - release the cxgb3 adapter's ddp resource
+ * @cdev: cxgb3i adapter
+ * release all the resource held by the ddp pagepod manager for a given
+ * adapter if needed
+ */
+
+static void t3_ddp_cleanup(struct cxgbi_device *cdev)
+{
+ struct t3cdev *tdev = (struct t3cdev *)cdev->lldev;
+
+ if (cxgbi_ddp_cleanup(cdev)) {
+ pr_info("t3dev 0x%p, ulp_iscsi no more user.\n", tdev);
+ tdev->ulp_iscsi = NULL;
+ }
+}
+
+/**
+ * ddp_init - initialize the cxgb3 adapter's ddp resource
+ * @cdev: cxgb3i adapter
+ * initialize the ddp pagepod manager for a given adapter
+ */
+static int cxgb3i_ddp_init(struct cxgbi_device *cdev)
+{
+ struct t3cdev *tdev = (struct t3cdev *)cdev->lldev;
+ struct cxgbi_ddp_info *ddp = tdev->ulp_iscsi;
+ struct ulp_iscsi_info uinfo;
+ unsigned int pgsz_factor[4];
+ int i, err;
+
+ if (ddp) {
+ kref_get(&ddp->refcnt);
+ pr_warn("t3dev 0x%p, ddp 0x%p already set up.\n",
+ tdev, tdev->ulp_iscsi);
+ cdev->ddp = ddp;
+ return -EALREADY;
+ }
+
+ err = tdev->ctl(tdev, ULP_ISCSI_GET_PARAMS, &uinfo);
+ if (err < 0) {
+ pr_err("%s, failed to get iscsi param err=%d.\n",
+ tdev->name, err);
+ return err;
+ }
+
+ err = cxgbi_ddp_init(cdev, uinfo.llimit, uinfo.ulimit,
+ uinfo.max_txsz, uinfo.max_rxsz);
+ if (err < 0)
+ return err;
+
+ ddp = cdev->ddp;
+
+ uinfo.tagmask = ddp->idx_mask << PPOD_IDX_SHIFT;
+ cxgbi_ddp_page_size_factor(pgsz_factor);
+ for (i = 0; i < 4; i++)
+ uinfo.pgsz_factor[i] = pgsz_factor[i];
+ uinfo.ulimit = uinfo.llimit + (ddp->nppods << PPOD_SIZE_SHIFT);
+
+ err = tdev->ctl(tdev, ULP_ISCSI_SET_PARAMS, &uinfo);
+ if (err < 0) {
+ pr_warn("%s unable to set iscsi param err=%d, ddp disabled.\n",
+ tdev->name, err);
+ cxgbi_ddp_cleanup(cdev);
+ return err;
+ }
+ tdev->ulp_iscsi = ddp;
+
+ cdev->csk_ddp_setup_digest = ddp_setup_conn_digest;
+ cdev->csk_ddp_setup_pgidx = ddp_setup_conn_pgidx;
+ cdev->csk_ddp_set = ddp_set_map;
+ cdev->csk_ddp_clear = ddp_clear_map;
+
+ pr_info("tdev 0x%p, nppods %u, bits %u, mask 0x%x,0x%x pkt %u/%u, "
+ "%u/%u.\n",
+ tdev, ddp->nppods, ddp->idx_bits, ddp->idx_mask,
+ ddp->rsvd_tag_mask, ddp->max_txsz, uinfo.max_txsz,
+ ddp->max_rxsz, uinfo.max_rxsz);
+ return 0;
+}
+
+static void cxgb3i_dev_close(struct t3cdev *t3dev)
+{
+ struct cxgbi_device *cdev = cxgbi_device_find_by_lldev(t3dev);
+
+ if (!cdev || cdev->flags & CXGBI_FLAG_ADAPTER_RESET) {
+ pr_info("0x%p close, f 0x%x.\n", cdev, cdev ? cdev->flags : 0);
+ return;
+ }
+
+ cxgbi_device_unregister(cdev);
+}
+
+/**
+ * cxgb3i_dev_open - init a t3 adapter structure and any h/w settings
+ * @t3dev: t3cdev adapter
+ */
+static void cxgb3i_dev_open(struct t3cdev *t3dev)
+{
+ struct cxgbi_device *cdev = cxgbi_device_find_by_lldev(t3dev);
+ struct adapter *adapter = tdev2adap(t3dev);
+ int i, err;
+
+ if (cdev) {
+ pr_info("0x%p, updating.\n", cdev);
+ return;
+ }
+
+ cdev = cxgbi_device_register(0, adapter->params.nports);
+ if (!cdev) {
+ pr_warn("device 0x%p register failed.\n", t3dev);
+ return;
+ }
+
+ cdev->flags = CXGBI_FLAG_DEV_T3 | CXGBI_FLAG_IPV4_SET;
+ cdev->lldev = t3dev;
+ cdev->pdev = adapter->pdev;
+ cdev->ports = adapter->port;
+ cdev->nports = adapter->params.nports;
+ cdev->mtus = adapter->params.mtus;
+ cdev->nmtus = NMTUS;
+ cdev->snd_win = cxgb3i_snd_win;
+ cdev->rcv_win = cxgb3i_rcv_win;
+ cdev->rx_credit_thres = cxgb3i_rx_credit_thres;
+ cdev->skb_tx_rsvd = CXGB3I_TX_HEADER_LEN;
+ cdev->skb_rx_extra = sizeof(struct cpl_iscsi_hdr_norss);
+ cdev->dev_ddp_cleanup = t3_ddp_cleanup;
+ cdev->itp = &cxgb3i_iscsi_transport;
+
+ err = cxgb3i_ddp_init(cdev);
+ if (err) {
+ pr_info("0x%p ddp init failed\n", cdev);
+ goto err_out;
+ }
+
+ err = cxgb3i_ofld_init(cdev);
+ if (err) {
+ pr_info("0x%p offload init failed\n", cdev);
+ goto err_out;
+ }
+
+ err = cxgbi_hbas_add(cdev, CXGB3I_MAX_LUN, CXGBI_MAX_CONN,
+ &cxgb3i_host_template, cxgb3i_stt);
+ if (err)
+ goto err_out;
+
+ for (i = 0; i < cdev->nports; i++)
+ cdev->hbas[i]->ipv4addr =
+ cxgb3i_get_private_ipv4addr(cdev->ports[i]);
+
+ pr_info("cdev 0x%p, f 0x%x, t3dev 0x%p open, err %d.\n",
+ cdev, cdev ? cdev->flags : 0, t3dev, err);
+ return;
+
+err_out:
+ cxgbi_device_unregister(cdev);
+}
+
+static void cxgb3i_dev_event_handler(struct t3cdev *t3dev, u32 event, u32 port)
+{
+ struct cxgbi_device *cdev = cxgbi_device_find_by_lldev(t3dev);
+
+ log_debug(1 << CXGBI_DBG_TOE,
+ "0x%p, cdev 0x%p, event 0x%x, port 0x%x.\n",
+ t3dev, cdev, event, port);
+ if (!cdev)
+ return;
+
+ switch (event) {
+ case OFFLOAD_STATUS_DOWN:
+ cdev->flags |= CXGBI_FLAG_ADAPTER_RESET;
+ break;
+ case OFFLOAD_STATUS_UP:
+ cdev->flags &= ~CXGBI_FLAG_ADAPTER_RESET;
+ break;
+ }
+}
+
+/**
+ * cxgb3i_init_module - module init entry point
+ *
+ * initialize any driver wide global data structures and register itself
+ * with the cxgb3 module
+ */
+static int __init cxgb3i_init_module(void)
+{
+ int rc;
+
+ printk(KERN_INFO "%s", version);
+
+ rc = cxgbi_iscsi_init(&cxgb3i_iscsi_transport, &cxgb3i_stt);
+ if (rc < 0)
+ return rc;
+
+ cxgb3_register_client(&t3_client);
+ return 0;
+}
+
+/**
+ * cxgb3i_exit_module - module cleanup/exit entry point
+ *
+ * go through the driver hba list and for each hba, release any resource held.
+ * and unregisters iscsi transport and the cxgb3 module
+ */
+static void __exit cxgb3i_exit_module(void)
+{
+ cxgb3_unregister_client(&t3_client);
+ cxgbi_device_unregister_all(CXGBI_FLAG_DEV_T3);
+ cxgbi_iscsi_cleanup(&cxgb3i_iscsi_transport, &cxgb3i_stt);
+}
+
+module_init(cxgb3i_init_module);
+module_exit(cxgb3i_exit_module);
diff --git a/drivers/scsi/cxgbi/cxgb3i/cxgb3i.h b/drivers/scsi/cxgbi/cxgb3i/cxgb3i.h
new file mode 100644
index 000000000..20593fd69
--- /dev/null
+++ b/drivers/scsi/cxgbi/cxgb3i/cxgb3i.h
@@ -0,0 +1,62 @@
+/*
+ * cxgb3i.h: Chelsio S3xx iSCSI driver.
+ *
+ * Copyright (c) 2008 Chelsio Communications, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation.
+ *
+ * Written by: Karen Xie (kxie@chelsio.com)
+ */
+
+#ifndef __CXGB3I_H__
+#define __CXGB3I_H__
+
+#define CXGB3I_SCSI_HOST_QDEPTH 1024
+#define CXGB3I_MAX_LUN 512
+#define ISCSI_PDU_NONPAYLOAD_MAX \
+ (sizeof(struct iscsi_hdr) + ISCSI_MAX_AHS_SIZE + 2*ISCSI_DIGEST_SIZE)
+
+/*for TX: a skb must have a headroom of at least TX_HEADER_LEN bytes */
+#define CXGB3I_TX_HEADER_LEN \
+ (sizeof(struct tx_data_wr) + sizeof(struct sge_opaque_hdr))
+
+extern cxgb3_cpl_handler_func cxgb3i_cpl_handlers[NUM_CPL_CMDS];
+
+static inline unsigned int cxgb3i_get_private_ipv4addr(struct net_device *ndev)
+{
+ return ((struct port_info *)(netdev_priv(ndev)))->iscsi_ipv4addr;
+}
+
+static inline void cxgb3i_set_private_ipv4addr(struct net_device *ndev,
+ unsigned int addr)
+{
+ struct port_info *pi = (struct port_info *)netdev_priv(ndev);
+
+ pi->iscsic.flags = addr ? 1 : 0;
+ pi->iscsi_ipv4addr = addr;
+ if (addr)
+ memcpy(pi->iscsic.mac_addr, ndev->dev_addr, ETH_ALEN);
+}
+
+struct cpl_iscsi_hdr_norss {
+ union opcode_tid ot;
+ u16 pdu_len_ddp;
+ u16 len;
+ u32 seq;
+ u16 urg;
+ u8 rsvd;
+ u8 status;
+};
+
+struct cpl_rx_data_ddp_norss {
+ union opcode_tid ot;
+ u16 urg;
+ u16 len;
+ u32 seq;
+ u32 nxt_seq;
+ u32 ulp_crc;
+ u32 ddp_status;
+};
+#endif
diff --git a/drivers/scsi/cxgbi/cxgb4i/Kbuild b/drivers/scsi/cxgbi/cxgb4i/Kbuild
new file mode 100644
index 000000000..8290cdaa4
--- /dev/null
+++ b/drivers/scsi/cxgbi/cxgb4i/Kbuild
@@ -0,0 +1,3 @@
+EXTRA_CFLAGS += -I$(srctree)/drivers/net/ethernet/chelsio/cxgb4
+
+obj-$(CONFIG_SCSI_CXGB4_ISCSI) += cxgb4i.o
diff --git a/drivers/scsi/cxgbi/cxgb4i/Kconfig b/drivers/scsi/cxgbi/cxgb4i/Kconfig
new file mode 100644
index 000000000..8c4e42303
--- /dev/null
+++ b/drivers/scsi/cxgbi/cxgb4i/Kconfig
@@ -0,0 +1,10 @@
+config SCSI_CXGB4_ISCSI
+ tristate "Chelsio T4 iSCSI support"
+ depends on PCI && INET && (IPV6 || IPV6=n)
+ select NETDEVICES
+ select ETHERNET
+ select NET_VENDOR_CHELSIO
+ select CHELSIO_T4
+ select SCSI_ISCSI_ATTRS
+ ---help---
+ This driver supports iSCSI offload for the Chelsio T4 devices.
diff --git a/drivers/scsi/cxgbi/cxgb4i/cxgb4i.c b/drivers/scsi/cxgbi/cxgb4i/cxgb4i.c
new file mode 100644
index 000000000..dd00e5fe4
--- /dev/null
+++ b/drivers/scsi/cxgbi/cxgb4i/cxgb4i.c
@@ -0,0 +1,1894 @@
+/*
+ * cxgb4i.c: Chelsio T4 iSCSI driver.
+ *
+ * Copyright (c) 2010 Chelsio Communications, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation.
+ *
+ * Written by: Karen Xie (kxie@chelsio.com)
+ * Rakesh Ranjan (rranjan@chelsio.com)
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ":%s: " fmt, __func__
+
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <scsi/scsi_host.h>
+#include <net/tcp.h>
+#include <net/dst.h>
+#include <linux/netdevice.h>
+#include <net/addrconf.h>
+
+#include "t4_regs.h"
+#include "t4_msg.h"
+#include "cxgb4.h"
+#include "cxgb4_uld.h"
+#include "t4fw_api.h"
+#include "l2t.h"
+#include "cxgb4i.h"
+#include "clip_tbl.h"
+
+static unsigned int dbg_level;
+
+#include "../libcxgbi.h"
+
+#define DRV_MODULE_NAME "cxgb4i"
+#define DRV_MODULE_DESC "Chelsio T4/T5 iSCSI Driver"
+#define DRV_MODULE_VERSION "0.9.4"
+
+static char version[] =
+ DRV_MODULE_DESC " " DRV_MODULE_NAME
+ " v" DRV_MODULE_VERSION "\n";
+
+MODULE_AUTHOR("Chelsio Communications, Inc.");
+MODULE_DESCRIPTION(DRV_MODULE_DESC);
+MODULE_VERSION(DRV_MODULE_VERSION);
+MODULE_LICENSE("GPL");
+
+module_param(dbg_level, uint, 0644);
+MODULE_PARM_DESC(dbg_level, "Debug flag (default=0)");
+
+static int cxgb4i_rcv_win = 256 * 1024;
+module_param(cxgb4i_rcv_win, int, 0644);
+MODULE_PARM_DESC(cxgb4i_rcv_win, "TCP reveive window in bytes");
+
+static int cxgb4i_snd_win = 128 * 1024;
+module_param(cxgb4i_snd_win, int, 0644);
+MODULE_PARM_DESC(cxgb4i_snd_win, "TCP send window in bytes");
+
+static int cxgb4i_rx_credit_thres = 10 * 1024;
+module_param(cxgb4i_rx_credit_thres, int, 0644);
+MODULE_PARM_DESC(cxgb4i_rx_credit_thres,
+ "RX credits return threshold in bytes (default=10KB)");
+
+static unsigned int cxgb4i_max_connect = (8 * 1024);
+module_param(cxgb4i_max_connect, uint, 0644);
+MODULE_PARM_DESC(cxgb4i_max_connect, "Maximum number of connections");
+
+static unsigned short cxgb4i_sport_base = 20000;
+module_param(cxgb4i_sport_base, ushort, 0644);
+MODULE_PARM_DESC(cxgb4i_sport_base, "Starting port number (default 20000)");
+
+typedef void (*cxgb4i_cplhandler_func)(struct cxgbi_device *, struct sk_buff *);
+
+static void *t4_uld_add(const struct cxgb4_lld_info *);
+static int t4_uld_rx_handler(void *, const __be64 *, const struct pkt_gl *);
+static int t4_uld_state_change(void *, enum cxgb4_state state);
+static inline int send_tx_flowc_wr(struct cxgbi_sock *);
+
+static const struct cxgb4_uld_info cxgb4i_uld_info = {
+ .name = DRV_MODULE_NAME,
+ .add = t4_uld_add,
+ .rx_handler = t4_uld_rx_handler,
+ .state_change = t4_uld_state_change,
+};
+
+static struct scsi_host_template cxgb4i_host_template = {
+ .module = THIS_MODULE,
+ .name = DRV_MODULE_NAME,
+ .proc_name = DRV_MODULE_NAME,
+ .can_queue = CXGB4I_SCSI_HOST_QDEPTH,
+ .queuecommand = iscsi_queuecommand,
+ .change_queue_depth = scsi_change_queue_depth,
+ .sg_tablesize = SG_ALL,
+ .max_sectors = 0xFFFF,
+ .cmd_per_lun = ISCSI_DEF_CMD_PER_LUN,
+ .eh_abort_handler = iscsi_eh_abort,
+ .eh_device_reset_handler = iscsi_eh_device_reset,
+ .eh_target_reset_handler = iscsi_eh_recover_target,
+ .target_alloc = iscsi_target_alloc,
+ .use_clustering = DISABLE_CLUSTERING,
+ .this_id = -1,
+ .track_queue_depth = 1,
+};
+
+static struct iscsi_transport cxgb4i_iscsi_transport = {
+ .owner = THIS_MODULE,
+ .name = DRV_MODULE_NAME,
+ .caps = CAP_RECOVERY_L0 | CAP_MULTI_R2T | CAP_HDRDGST |
+ CAP_DATADGST | CAP_DIGEST_OFFLOAD |
+ CAP_PADDING_OFFLOAD | CAP_TEXT_NEGO,
+ .attr_is_visible = cxgbi_attr_is_visible,
+ .get_host_param = cxgbi_get_host_param,
+ .set_host_param = cxgbi_set_host_param,
+ /* session management */
+ .create_session = cxgbi_create_session,
+ .destroy_session = cxgbi_destroy_session,
+ .get_session_param = iscsi_session_get_param,
+ /* connection management */
+ .create_conn = cxgbi_create_conn,
+ .bind_conn = cxgbi_bind_conn,
+ .destroy_conn = iscsi_tcp_conn_teardown,
+ .start_conn = iscsi_conn_start,
+ .stop_conn = iscsi_conn_stop,
+ .get_conn_param = iscsi_conn_get_param,
+ .set_param = cxgbi_set_conn_param,
+ .get_stats = cxgbi_get_conn_stats,
+ /* pdu xmit req from user space */
+ .send_pdu = iscsi_conn_send_pdu,
+ /* task */
+ .init_task = iscsi_tcp_task_init,
+ .xmit_task = iscsi_tcp_task_xmit,
+ .cleanup_task = cxgbi_cleanup_task,
+ /* pdu */
+ .alloc_pdu = cxgbi_conn_alloc_pdu,
+ .init_pdu = cxgbi_conn_init_pdu,
+ .xmit_pdu = cxgbi_conn_xmit_pdu,
+ .parse_pdu_itt = cxgbi_parse_pdu_itt,
+ /* TCP connect/disconnect */
+ .get_ep_param = cxgbi_get_ep_param,
+ .ep_connect = cxgbi_ep_connect,
+ .ep_poll = cxgbi_ep_poll,
+ .ep_disconnect = cxgbi_ep_disconnect,
+ /* Error recovery timeout call */
+ .session_recovery_timedout = iscsi_session_recovery_timedout,
+};
+
+static struct scsi_transport_template *cxgb4i_stt;
+
+/*
+ * CPL (Chelsio Protocol Language) defines a message passing interface between
+ * the host driver and Chelsio asic.
+ * The section below implments CPLs that related to iscsi tcp connection
+ * open/close/abort and data send/receive.
+ */
+
+#define DIV_ROUND_UP(n, d) (((n) + (d) - 1) / (d))
+#define RCV_BUFSIZ_MASK 0x3FFU
+#define MAX_IMM_TX_PKT_LEN 128
+
+static int push_tx_frames(struct cxgbi_sock *, int);
+
+/*
+ * is_ofld_imm - check whether a packet can be sent as immediate data
+ * @skb: the packet
+ *
+ * Returns true if a packet can be sent as an offload WR with immediate
+ * data. We currently use the same limit as for Ethernet packets.
+ */
+static inline bool is_ofld_imm(const struct sk_buff *skb)
+{
+ int len = skb->len;
+
+ if (likely(cxgbi_skcb_test_flag(skb, SKCBF_TX_NEED_HDR)))
+ len += sizeof(struct fw_ofld_tx_data_wr);
+
+ return len <= MAX_IMM_TX_PKT_LEN;
+}
+
+static void send_act_open_req(struct cxgbi_sock *csk, struct sk_buff *skb,
+ struct l2t_entry *e)
+{
+ struct cxgb4_lld_info *lldi = cxgbi_cdev_priv(csk->cdev);
+ int t4 = is_t4(lldi->adapter_type);
+ int wscale = cxgbi_sock_compute_wscale(csk->mss_idx);
+ unsigned long long opt0;
+ unsigned int opt2;
+ unsigned int qid_atid = ((unsigned int)csk->atid) |
+ (((unsigned int)csk->rss_qid) << 14);
+
+ opt0 = KEEP_ALIVE_F |
+ WND_SCALE_V(wscale) |
+ MSS_IDX_V(csk->mss_idx) |
+ L2T_IDX_V(((struct l2t_entry *)csk->l2t)->idx) |
+ TX_CHAN_V(csk->tx_chan) |
+ SMAC_SEL_V(csk->smac_idx) |
+ ULP_MODE_V(ULP_MODE_ISCSI) |
+ RCV_BUFSIZ_V(cxgb4i_rcv_win >> 10);
+ opt2 = RX_CHANNEL_V(0) |
+ RSS_QUEUE_VALID_F |
+ (RX_FC_DISABLE_F) |
+ RSS_QUEUE_V(csk->rss_qid);
+
+ if (is_t4(lldi->adapter_type)) {
+ struct cpl_act_open_req *req =
+ (struct cpl_act_open_req *)skb->head;
+
+ INIT_TP_WR(req, 0);
+ OPCODE_TID(req) = cpu_to_be32(MK_OPCODE_TID(CPL_ACT_OPEN_REQ,
+ qid_atid));
+ req->local_port = csk->saddr.sin_port;
+ req->peer_port = csk->daddr.sin_port;
+ req->local_ip = csk->saddr.sin_addr.s_addr;
+ req->peer_ip = csk->daddr.sin_addr.s_addr;
+ req->opt0 = cpu_to_be64(opt0);
+ req->params = cpu_to_be32(cxgb4_select_ntuple(
+ csk->cdev->ports[csk->port_id],
+ csk->l2t));
+ opt2 |= RX_FC_VALID_F;
+ req->opt2 = cpu_to_be32(opt2);
+
+ log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK,
+ "csk t4 0x%p, %pI4:%u-%pI4:%u, atid %d, qid %u.\n",
+ csk, &req->local_ip, ntohs(req->local_port),
+ &req->peer_ip, ntohs(req->peer_port),
+ csk->atid, csk->rss_qid);
+ } else {
+ struct cpl_t5_act_open_req *req =
+ (struct cpl_t5_act_open_req *)skb->head;
+
+ INIT_TP_WR(req, 0);
+ OPCODE_TID(req) = cpu_to_be32(MK_OPCODE_TID(CPL_ACT_OPEN_REQ,
+ qid_atid));
+ req->local_port = csk->saddr.sin_port;
+ req->peer_port = csk->daddr.sin_port;
+ req->local_ip = csk->saddr.sin_addr.s_addr;
+ req->peer_ip = csk->daddr.sin_addr.s_addr;
+ req->opt0 = cpu_to_be64(opt0);
+ req->params = cpu_to_be64(FILTER_TUPLE_V(
+ cxgb4_select_ntuple(
+ csk->cdev->ports[csk->port_id],
+ csk->l2t)));
+ opt2 |= 1 << 31;
+ req->opt2 = cpu_to_be32(opt2);
+
+ log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK,
+ "csk t5 0x%p, %pI4:%u-%pI4:%u, atid %d, qid %u.\n",
+ csk, &req->local_ip, ntohs(req->local_port),
+ &req->peer_ip, ntohs(req->peer_port),
+ csk->atid, csk->rss_qid);
+ }
+
+ set_wr_txq(skb, CPL_PRIORITY_SETUP, csk->port_id);
+
+ pr_info_ipaddr("t%d csk 0x%p,%u,0x%lx,%u, rss_qid %u.\n",
+ (&csk->saddr), (&csk->daddr), t4 ? 4 : 5, csk,
+ csk->state, csk->flags, csk->atid, csk->rss_qid);
+
+ cxgb4_l2t_send(csk->cdev->ports[csk->port_id], skb, csk->l2t);
+}
+
+#if IS_ENABLED(CONFIG_IPV6)
+static void send_act_open_req6(struct cxgbi_sock *csk, struct sk_buff *skb,
+ struct l2t_entry *e)
+{
+ struct cxgb4_lld_info *lldi = cxgbi_cdev_priv(csk->cdev);
+ int t4 = is_t4(lldi->adapter_type);
+ int wscale = cxgbi_sock_compute_wscale(csk->mss_idx);
+ unsigned long long opt0;
+ unsigned int opt2;
+ unsigned int qid_atid = ((unsigned int)csk->atid) |
+ (((unsigned int)csk->rss_qid) << 14);
+
+ opt0 = KEEP_ALIVE_F |
+ WND_SCALE_V(wscale) |
+ MSS_IDX_V(csk->mss_idx) |
+ L2T_IDX_V(((struct l2t_entry *)csk->l2t)->idx) |
+ TX_CHAN_V(csk->tx_chan) |
+ SMAC_SEL_V(csk->smac_idx) |
+ ULP_MODE_V(ULP_MODE_ISCSI) |
+ RCV_BUFSIZ_V(cxgb4i_rcv_win >> 10);
+
+ opt2 = RX_CHANNEL_V(0) |
+ RSS_QUEUE_VALID_F |
+ RX_FC_DISABLE_F |
+ RSS_QUEUE_V(csk->rss_qid);
+
+ if (t4) {
+ struct cpl_act_open_req6 *req =
+ (struct cpl_act_open_req6 *)skb->head;
+
+ INIT_TP_WR(req, 0);
+ OPCODE_TID(req) = cpu_to_be32(MK_OPCODE_TID(CPL_ACT_OPEN_REQ6,
+ qid_atid));
+ req->local_port = csk->saddr6.sin6_port;
+ req->peer_port = csk->daddr6.sin6_port;
+
+ req->local_ip_hi = *(__be64 *)(csk->saddr6.sin6_addr.s6_addr);
+ req->local_ip_lo = *(__be64 *)(csk->saddr6.sin6_addr.s6_addr +
+ 8);
+ req->peer_ip_hi = *(__be64 *)(csk->daddr6.sin6_addr.s6_addr);
+ req->peer_ip_lo = *(__be64 *)(csk->daddr6.sin6_addr.s6_addr +
+ 8);
+
+ req->opt0 = cpu_to_be64(opt0);
+
+ opt2 |= RX_FC_VALID_F;
+ req->opt2 = cpu_to_be32(opt2);
+
+ req->params = cpu_to_be32(cxgb4_select_ntuple(
+ csk->cdev->ports[csk->port_id],
+ csk->l2t));
+ } else {
+ struct cpl_t5_act_open_req6 *req =
+ (struct cpl_t5_act_open_req6 *)skb->head;
+
+ INIT_TP_WR(req, 0);
+ OPCODE_TID(req) = cpu_to_be32(MK_OPCODE_TID(CPL_ACT_OPEN_REQ6,
+ qid_atid));
+ req->local_port = csk->saddr6.sin6_port;
+ req->peer_port = csk->daddr6.sin6_port;
+ req->local_ip_hi = *(__be64 *)(csk->saddr6.sin6_addr.s6_addr);
+ req->local_ip_lo = *(__be64 *)(csk->saddr6.sin6_addr.s6_addr +
+ 8);
+ req->peer_ip_hi = *(__be64 *)(csk->daddr6.sin6_addr.s6_addr);
+ req->peer_ip_lo = *(__be64 *)(csk->daddr6.sin6_addr.s6_addr +
+ 8);
+ req->opt0 = cpu_to_be64(opt0);
+
+ opt2 |= T5_OPT_2_VALID_F;
+ req->opt2 = cpu_to_be32(opt2);
+
+ req->params = cpu_to_be64(FILTER_TUPLE_V(cxgb4_select_ntuple(
+ csk->cdev->ports[csk->port_id],
+ csk->l2t)));
+ }
+
+ set_wr_txq(skb, CPL_PRIORITY_SETUP, csk->port_id);
+
+ pr_info("t%d csk 0x%p,%u,0x%lx,%u, [%pI6]:%u-[%pI6]:%u, rss_qid %u.\n",
+ t4 ? 4 : 5, csk, csk->state, csk->flags, csk->atid,
+ &csk->saddr6.sin6_addr, ntohs(csk->saddr.sin_port),
+ &csk->daddr6.sin6_addr, ntohs(csk->daddr.sin_port),
+ csk->rss_qid);
+
+ cxgb4_l2t_send(csk->cdev->ports[csk->port_id], skb, csk->l2t);
+}
+#endif
+
+static void send_close_req(struct cxgbi_sock *csk)
+{
+ struct sk_buff *skb = csk->cpl_close;
+ struct cpl_close_con_req *req = (struct cpl_close_con_req *)skb->head;
+ unsigned int tid = csk->tid;
+
+ log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK,
+ "csk 0x%p,%u,0x%lx, tid %u.\n",
+ csk, csk->state, csk->flags, csk->tid);
+ csk->cpl_close = NULL;
+ set_wr_txq(skb, CPL_PRIORITY_DATA, csk->port_id);
+ INIT_TP_WR(req, tid);
+ OPCODE_TID(req) = cpu_to_be32(MK_OPCODE_TID(CPL_CLOSE_CON_REQ, tid));
+ req->rsvd = 0;
+
+ cxgbi_sock_skb_entail(csk, skb);
+ if (csk->state >= CTP_ESTABLISHED)
+ push_tx_frames(csk, 1);
+}
+
+static void abort_arp_failure(void *handle, struct sk_buff *skb)
+{
+ struct cxgbi_sock *csk = (struct cxgbi_sock *)handle;
+ struct cpl_abort_req *req;
+
+ log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK,
+ "csk 0x%p,%u,0x%lx, tid %u, abort.\n",
+ csk, csk->state, csk->flags, csk->tid);
+ req = (struct cpl_abort_req *)skb->data;
+ req->cmd = CPL_ABORT_NO_RST;
+ cxgb4_ofld_send(csk->cdev->ports[csk->port_id], skb);
+}
+
+static void send_abort_req(struct cxgbi_sock *csk)
+{
+ struct cpl_abort_req *req;
+ struct sk_buff *skb = csk->cpl_abort_req;
+
+ if (unlikely(csk->state == CTP_ABORTING) || !skb || !csk->cdev)
+ return;
+
+ if (!cxgbi_sock_flag(csk, CTPF_TX_DATA_SENT)) {
+ send_tx_flowc_wr(csk);
+ cxgbi_sock_set_flag(csk, CTPF_TX_DATA_SENT);
+ }
+
+ cxgbi_sock_set_state(csk, CTP_ABORTING);
+ cxgbi_sock_set_flag(csk, CTPF_ABORT_RPL_PENDING);
+ cxgbi_sock_purge_write_queue(csk);
+
+ csk->cpl_abort_req = NULL;
+ req = (struct cpl_abort_req *)skb->head;
+ set_wr_txq(skb, CPL_PRIORITY_DATA, csk->port_id);
+ req->cmd = CPL_ABORT_SEND_RST;
+ t4_set_arp_err_handler(skb, csk, abort_arp_failure);
+ INIT_TP_WR(req, csk->tid);
+ OPCODE_TID(req) = cpu_to_be32(MK_OPCODE_TID(CPL_ABORT_REQ, csk->tid));
+ req->rsvd0 = htonl(csk->snd_nxt);
+ req->rsvd1 = !cxgbi_sock_flag(csk, CTPF_TX_DATA_SENT);
+
+ log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK,
+ "csk 0x%p,%u,0x%lx,%u, snd_nxt %u, 0x%x.\n",
+ csk, csk->state, csk->flags, csk->tid, csk->snd_nxt,
+ req->rsvd1);
+
+ cxgb4_l2t_send(csk->cdev->ports[csk->port_id], skb, csk->l2t);
+}
+
+static void send_abort_rpl(struct cxgbi_sock *csk, int rst_status)
+{
+ struct sk_buff *skb = csk->cpl_abort_rpl;
+ struct cpl_abort_rpl *rpl = (struct cpl_abort_rpl *)skb->head;
+
+ log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK,
+ "csk 0x%p,%u,0x%lx,%u, status %d.\n",
+ csk, csk->state, csk->flags, csk->tid, rst_status);
+
+ csk->cpl_abort_rpl = NULL;
+ set_wr_txq(skb, CPL_PRIORITY_DATA, csk->port_id);
+ INIT_TP_WR(rpl, csk->tid);
+ OPCODE_TID(rpl) = cpu_to_be32(MK_OPCODE_TID(CPL_ABORT_RPL, csk->tid));
+ rpl->cmd = rst_status;
+ cxgb4_ofld_send(csk->cdev->ports[csk->port_id], skb);
+}
+
+/*
+ * CPL connection rx data ack: host ->
+ * Send RX credits through an RX_DATA_ACK CPL message. Returns the number of
+ * credits sent.
+ */
+static u32 send_rx_credits(struct cxgbi_sock *csk, u32 credits)
+{
+ struct sk_buff *skb;
+ struct cpl_rx_data_ack *req;
+
+ log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_PDU_RX,
+ "csk 0x%p,%u,0x%lx,%u, credit %u.\n",
+ csk, csk->state, csk->flags, csk->tid, credits);
+
+ skb = alloc_wr(sizeof(*req), 0, GFP_ATOMIC);
+ if (!skb) {
+ pr_info("csk 0x%p, credit %u, OOM.\n", csk, credits);
+ return 0;
+ }
+ req = (struct cpl_rx_data_ack *)skb->head;
+
+ set_wr_txq(skb, CPL_PRIORITY_ACK, csk->port_id);
+ INIT_TP_WR(req, csk->tid);
+ OPCODE_TID(req) = cpu_to_be32(MK_OPCODE_TID(CPL_RX_DATA_ACK,
+ csk->tid));
+ req->credit_dack = cpu_to_be32(RX_CREDITS_V(credits)
+ | RX_FORCE_ACK_F);
+ cxgb4_ofld_send(csk->cdev->ports[csk->port_id], skb);
+ return credits;
+}
+
+/*
+ * sgl_len - calculates the size of an SGL of the given capacity
+ * @n: the number of SGL entries
+ * Calculates the number of flits needed for a scatter/gather list that
+ * can hold the given number of entries.
+ */
+static inline unsigned int sgl_len(unsigned int n)
+{
+ n--;
+ return (3 * n) / 2 + (n & 1) + 2;
+}
+
+/*
+ * calc_tx_flits_ofld - calculate # of flits for an offload packet
+ * @skb: the packet
+ *
+ * Returns the number of flits needed for the given offload packet.
+ * These packets are already fully constructed and no additional headers
+ * will be added.
+ */
+static inline unsigned int calc_tx_flits_ofld(const struct sk_buff *skb)
+{
+ unsigned int flits, cnt;
+
+ if (is_ofld_imm(skb))
+ return DIV_ROUND_UP(skb->len, 8);
+ flits = skb_transport_offset(skb) / 8;
+ cnt = skb_shinfo(skb)->nr_frags;
+ if (skb_tail_pointer(skb) != skb_transport_header(skb))
+ cnt++;
+ return flits + sgl_len(cnt);
+}
+
+#define FLOWC_WR_NPARAMS_MIN 9
+static inline int tx_flowc_wr_credits(int *nparamsp, int *flowclenp)
+{
+ int nparams, flowclen16, flowclen;
+
+ nparams = FLOWC_WR_NPARAMS_MIN;
+ flowclen = offsetof(struct fw_flowc_wr, mnemval[nparams]);
+ flowclen16 = DIV_ROUND_UP(flowclen, 16);
+ flowclen = flowclen16 * 16;
+ /*
+ * Return the number of 16-byte credits used by the FlowC request.
+ * Pass back the nparams and actual FlowC length if requested.
+ */
+ if (nparamsp)
+ *nparamsp = nparams;
+ if (flowclenp)
+ *flowclenp = flowclen;
+
+ return flowclen16;
+}
+
+static inline int send_tx_flowc_wr(struct cxgbi_sock *csk)
+{
+ struct sk_buff *skb;
+ struct fw_flowc_wr *flowc;
+ int nparams, flowclen16, flowclen;
+
+ flowclen16 = tx_flowc_wr_credits(&nparams, &flowclen);
+ skb = alloc_wr(flowclen, 0, GFP_ATOMIC);
+ flowc = (struct fw_flowc_wr *)skb->head;
+ flowc->op_to_nparams =
+ htonl(FW_WR_OP_V(FW_FLOWC_WR) | FW_FLOWC_WR_NPARAMS_V(nparams));
+ flowc->flowid_len16 =
+ htonl(FW_WR_LEN16_V(flowclen16) | FW_WR_FLOWID_V(csk->tid));
+ flowc->mnemval[0].mnemonic = FW_FLOWC_MNEM_PFNVFN;
+ flowc->mnemval[0].val = htonl(csk->cdev->pfvf);
+ flowc->mnemval[1].mnemonic = FW_FLOWC_MNEM_CH;
+ flowc->mnemval[1].val = htonl(csk->tx_chan);
+ flowc->mnemval[2].mnemonic = FW_FLOWC_MNEM_PORT;
+ flowc->mnemval[2].val = htonl(csk->tx_chan);
+ flowc->mnemval[3].mnemonic = FW_FLOWC_MNEM_IQID;
+ flowc->mnemval[3].val = htonl(csk->rss_qid);
+ flowc->mnemval[4].mnemonic = FW_FLOWC_MNEM_SNDNXT;
+ flowc->mnemval[4].val = htonl(csk->snd_nxt);
+ flowc->mnemval[5].mnemonic = FW_FLOWC_MNEM_RCVNXT;
+ flowc->mnemval[5].val = htonl(csk->rcv_nxt);
+ flowc->mnemval[6].mnemonic = FW_FLOWC_MNEM_SNDBUF;
+ flowc->mnemval[6].val = htonl(cxgb4i_snd_win);
+ flowc->mnemval[7].mnemonic = FW_FLOWC_MNEM_MSS;
+ flowc->mnemval[7].val = htonl(csk->advmss);
+ flowc->mnemval[8].mnemonic = 0;
+ flowc->mnemval[8].val = 0;
+ flowc->mnemval[8].mnemonic = FW_FLOWC_MNEM_TXDATAPLEN_MAX;
+ flowc->mnemval[8].val = 16384;
+
+ set_wr_txq(skb, CPL_PRIORITY_DATA, csk->port_id);
+
+ log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK,
+ "csk 0x%p, tid 0x%x, %u,%u,%u,%u,%u,%u,%u.\n",
+ csk, csk->tid, 0, csk->tx_chan, csk->rss_qid,
+ csk->snd_nxt, csk->rcv_nxt, cxgb4i_snd_win,
+ csk->advmss);
+
+ cxgb4_ofld_send(csk->cdev->ports[csk->port_id], skb);
+
+ return flowclen16;
+}
+
+static inline void make_tx_data_wr(struct cxgbi_sock *csk, struct sk_buff *skb,
+ int dlen, int len, u32 credits, int compl)
+{
+ struct fw_ofld_tx_data_wr *req;
+ unsigned int submode = cxgbi_skcb_ulp_mode(skb) & 3;
+ unsigned int wr_ulp_mode = 0, val;
+ bool imm = is_ofld_imm(skb);
+
+ req = (struct fw_ofld_tx_data_wr *)__skb_push(skb, sizeof(*req));
+
+ if (imm) {
+ req->op_to_immdlen = htonl(FW_WR_OP_V(FW_OFLD_TX_DATA_WR) |
+ FW_WR_COMPL_F |
+ FW_WR_IMMDLEN_V(dlen));
+ req->flowid_len16 = htonl(FW_WR_FLOWID_V(csk->tid) |
+ FW_WR_LEN16_V(credits));
+ } else {
+ req->op_to_immdlen =
+ cpu_to_be32(FW_WR_OP_V(FW_OFLD_TX_DATA_WR) |
+ FW_WR_COMPL_F |
+ FW_WR_IMMDLEN_V(0));
+ req->flowid_len16 =
+ cpu_to_be32(FW_WR_FLOWID_V(csk->tid) |
+ FW_WR_LEN16_V(credits));
+ }
+ if (submode)
+ wr_ulp_mode = FW_OFLD_TX_DATA_WR_ULPMODE_V(ULP2_MODE_ISCSI) |
+ FW_OFLD_TX_DATA_WR_ULPSUBMODE_V(submode);
+ val = skb_peek(&csk->write_queue) ? 0 : 1;
+ req->tunnel_to_proxy = htonl(wr_ulp_mode |
+ FW_OFLD_TX_DATA_WR_SHOVE_V(val));
+ req->plen = htonl(len);
+ if (!cxgbi_sock_flag(csk, CTPF_TX_DATA_SENT))
+ cxgbi_sock_set_flag(csk, CTPF_TX_DATA_SENT);
+}
+
+static void arp_failure_skb_discard(void *handle, struct sk_buff *skb)
+{
+ kfree_skb(skb);
+}
+
+static int push_tx_frames(struct cxgbi_sock *csk, int req_completion)
+{
+ int total_size = 0;
+ struct sk_buff *skb;
+
+ if (unlikely(csk->state < CTP_ESTABLISHED ||
+ csk->state == CTP_CLOSE_WAIT_1 || csk->state >= CTP_ABORTING)) {
+ log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK |
+ 1 << CXGBI_DBG_PDU_TX,
+ "csk 0x%p,%u,0x%lx,%u, in closing state.\n",
+ csk, csk->state, csk->flags, csk->tid);
+ return 0;
+ }
+
+ while (csk->wr_cred && (skb = skb_peek(&csk->write_queue)) != NULL) {
+ int dlen = skb->len;
+ int len = skb->len;
+ unsigned int credits_needed;
+ int flowclen16 = 0;
+
+ skb_reset_transport_header(skb);
+ if (is_ofld_imm(skb))
+ credits_needed = DIV_ROUND_UP(dlen, 16);
+ else
+ credits_needed = DIV_ROUND_UP(
+ 8 * calc_tx_flits_ofld(skb),
+ 16);
+
+ if (likely(cxgbi_skcb_test_flag(skb, SKCBF_TX_NEED_HDR)))
+ credits_needed += DIV_ROUND_UP(
+ sizeof(struct fw_ofld_tx_data_wr),
+ 16);
+
+ /*
+ * Assumes the initial credits is large enough to support
+ * fw_flowc_wr plus largest possible first payload
+ */
+ if (!cxgbi_sock_flag(csk, CTPF_TX_DATA_SENT)) {
+ flowclen16 = send_tx_flowc_wr(csk);
+ csk->wr_cred -= flowclen16;
+ csk->wr_una_cred += flowclen16;
+ cxgbi_sock_set_flag(csk, CTPF_TX_DATA_SENT);
+ }
+
+ if (csk->wr_cred < credits_needed) {
+ log_debug(1 << CXGBI_DBG_PDU_TX,
+ "csk 0x%p, skb %u/%u, wr %d < %u.\n",
+ csk, skb->len, skb->data_len,
+ credits_needed, csk->wr_cred);
+ break;
+ }
+ __skb_unlink(skb, &csk->write_queue);
+ set_wr_txq(skb, CPL_PRIORITY_DATA, csk->port_id);
+ skb->csum = credits_needed + flowclen16;
+ csk->wr_cred -= credits_needed;
+ csk->wr_una_cred += credits_needed;
+ cxgbi_sock_enqueue_wr(csk, skb);
+
+ log_debug(1 << CXGBI_DBG_PDU_TX,
+ "csk 0x%p, skb %u/%u, wr %d, left %u, unack %u.\n",
+ csk, skb->len, skb->data_len, credits_needed,
+ csk->wr_cred, csk->wr_una_cred);
+
+ if (likely(cxgbi_skcb_test_flag(skb, SKCBF_TX_NEED_HDR))) {
+ len += cxgbi_ulp_extra_len(cxgbi_skcb_ulp_mode(skb));
+ make_tx_data_wr(csk, skb, dlen, len, credits_needed,
+ req_completion);
+ csk->snd_nxt += len;
+ cxgbi_skcb_clear_flag(skb, SKCBF_TX_NEED_HDR);
+ }
+ total_size += skb->truesize;
+ t4_set_arp_err_handler(skb, csk, arp_failure_skb_discard);
+
+ log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_PDU_TX,
+ "csk 0x%p,%u,0x%lx,%u, skb 0x%p, %u.\n",
+ csk, csk->state, csk->flags, csk->tid, skb, len);
+
+ cxgb4_l2t_send(csk->cdev->ports[csk->port_id], skb, csk->l2t);
+ }
+ return total_size;
+}
+
+static inline void free_atid(struct cxgbi_sock *csk)
+{
+ struct cxgb4_lld_info *lldi = cxgbi_cdev_priv(csk->cdev);
+
+ if (cxgbi_sock_flag(csk, CTPF_HAS_ATID)) {
+ cxgb4_free_atid(lldi->tids, csk->atid);
+ cxgbi_sock_clear_flag(csk, CTPF_HAS_ATID);
+ cxgbi_sock_put(csk);
+ }
+}
+
+static void do_act_establish(struct cxgbi_device *cdev, struct sk_buff *skb)
+{
+ struct cxgbi_sock *csk;
+ struct cpl_act_establish *req = (struct cpl_act_establish *)skb->data;
+ unsigned short tcp_opt = ntohs(req->tcp_opt);
+ unsigned int tid = GET_TID(req);
+ unsigned int atid = TID_TID_G(ntohl(req->tos_atid));
+ struct cxgb4_lld_info *lldi = cxgbi_cdev_priv(cdev);
+ struct tid_info *t = lldi->tids;
+ u32 rcv_isn = be32_to_cpu(req->rcv_isn);
+
+ csk = lookup_atid(t, atid);
+ if (unlikely(!csk)) {
+ pr_err("NO conn. for atid %u, cdev 0x%p.\n", atid, cdev);
+ goto rel_skb;
+ }
+
+ if (csk->atid != atid) {
+ pr_err("bad conn atid %u, csk 0x%p,%u,0x%lx,tid %u, atid %u.\n",
+ atid, csk, csk->state, csk->flags, csk->tid, csk->atid);
+ goto rel_skb;
+ }
+
+ pr_info_ipaddr("atid 0x%x, tid 0x%x, csk 0x%p,%u,0x%lx, isn %u.\n",
+ (&csk->saddr), (&csk->daddr),
+ atid, tid, csk, csk->state, csk->flags, rcv_isn);
+
+ module_put(THIS_MODULE);
+
+ cxgbi_sock_get(csk);
+ csk->tid = tid;
+ cxgb4_insert_tid(lldi->tids, csk, tid);
+ cxgbi_sock_set_flag(csk, CTPF_HAS_TID);
+
+ free_atid(csk);
+
+ spin_lock_bh(&csk->lock);
+ if (unlikely(csk->state != CTP_ACTIVE_OPEN))
+ pr_info("csk 0x%p,%u,0x%lx,%u, got EST.\n",
+ csk, csk->state, csk->flags, csk->tid);
+
+ if (csk->retry_timer.function) {
+ del_timer(&csk->retry_timer);
+ csk->retry_timer.function = NULL;
+ }
+
+ csk->copied_seq = csk->rcv_wup = csk->rcv_nxt = rcv_isn;
+ /*
+ * Causes the first RX_DATA_ACK to supply any Rx credits we couldn't
+ * pass through opt0.
+ */
+ if (cxgb4i_rcv_win > (RCV_BUFSIZ_MASK << 10))
+ csk->rcv_wup -= cxgb4i_rcv_win - (RCV_BUFSIZ_MASK << 10);
+
+ csk->advmss = lldi->mtus[TCPOPT_MSS_G(tcp_opt)] - 40;
+ if (TCPOPT_TSTAMP_G(tcp_opt))
+ csk->advmss -= 12;
+ if (csk->advmss < 128)
+ csk->advmss = 128;
+
+ log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK,
+ "csk 0x%p, mss_idx %u, advmss %u.\n",
+ csk, TCPOPT_MSS_G(tcp_opt), csk->advmss);
+
+ cxgbi_sock_established(csk, ntohl(req->snd_isn), ntohs(req->tcp_opt));
+
+ if (unlikely(cxgbi_sock_flag(csk, CTPF_ACTIVE_CLOSE_NEEDED)))
+ send_abort_req(csk);
+ else {
+ if (skb_queue_len(&csk->write_queue))
+ push_tx_frames(csk, 0);
+ cxgbi_conn_tx_open(csk);
+ }
+ spin_unlock_bh(&csk->lock);
+
+rel_skb:
+ __kfree_skb(skb);
+}
+
+static int act_open_rpl_status_to_errno(int status)
+{
+ switch (status) {
+ case CPL_ERR_CONN_RESET:
+ return -ECONNREFUSED;
+ case CPL_ERR_ARP_MISS:
+ return -EHOSTUNREACH;
+ case CPL_ERR_CONN_TIMEDOUT:
+ return -ETIMEDOUT;
+ case CPL_ERR_TCAM_FULL:
+ return -ENOMEM;
+ case CPL_ERR_CONN_EXIST:
+ return -EADDRINUSE;
+ default:
+ return -EIO;
+ }
+}
+
+static void csk_act_open_retry_timer(unsigned long data)
+{
+ struct sk_buff *skb = NULL;
+ struct cxgbi_sock *csk = (struct cxgbi_sock *)data;
+ struct cxgb4_lld_info *lldi = cxgbi_cdev_priv(csk->cdev);
+ void (*send_act_open_func)(struct cxgbi_sock *, struct sk_buff *,
+ struct l2t_entry *);
+ int t4 = is_t4(lldi->adapter_type), size, size6;
+
+ log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK,
+ "csk 0x%p,%u,0x%lx,%u.\n",
+ csk, csk->state, csk->flags, csk->tid);
+
+ cxgbi_sock_get(csk);
+ spin_lock_bh(&csk->lock);
+
+ if (t4) {
+ size = sizeof(struct cpl_act_open_req);
+ size6 = sizeof(struct cpl_act_open_req6);
+ } else {
+ size = sizeof(struct cpl_t5_act_open_req);
+ size6 = sizeof(struct cpl_t5_act_open_req6);
+ }
+
+ if (csk->csk_family == AF_INET) {
+ send_act_open_func = send_act_open_req;
+ skb = alloc_wr(size, 0, GFP_ATOMIC);
+#if IS_ENABLED(CONFIG_IPV6)
+ } else {
+ send_act_open_func = send_act_open_req6;
+ skb = alloc_wr(size6, 0, GFP_ATOMIC);
+#endif
+ }
+
+ if (!skb)
+ cxgbi_sock_fail_act_open(csk, -ENOMEM);
+ else {
+ skb->sk = (struct sock *)csk;
+ t4_set_arp_err_handler(skb, csk,
+ cxgbi_sock_act_open_req_arp_failure);
+ send_act_open_func(csk, skb, csk->l2t);
+ }
+
+ spin_unlock_bh(&csk->lock);
+ cxgbi_sock_put(csk);
+
+}
+
+static inline bool is_neg_adv(unsigned int status)
+{
+ return status == CPL_ERR_RTX_NEG_ADVICE ||
+ status == CPL_ERR_KEEPALV_NEG_ADVICE ||
+ status == CPL_ERR_PERSIST_NEG_ADVICE;
+}
+
+static void do_act_open_rpl(struct cxgbi_device *cdev, struct sk_buff *skb)
+{
+ struct cxgbi_sock *csk;
+ struct cpl_act_open_rpl *rpl = (struct cpl_act_open_rpl *)skb->data;
+ unsigned int tid = GET_TID(rpl);
+ unsigned int atid =
+ TID_TID_G(AOPEN_ATID_G(be32_to_cpu(rpl->atid_status)));
+ unsigned int status = AOPEN_STATUS_G(be32_to_cpu(rpl->atid_status));
+ struct cxgb4_lld_info *lldi = cxgbi_cdev_priv(cdev);
+ struct tid_info *t = lldi->tids;
+
+ csk = lookup_atid(t, atid);
+ if (unlikely(!csk)) {
+ pr_err("NO matching conn. atid %u, tid %u.\n", atid, tid);
+ goto rel_skb;
+ }
+
+ pr_info_ipaddr("tid %u/%u, status %u.\n"
+ "csk 0x%p,%u,0x%lx. ", (&csk->saddr), (&csk->daddr),
+ atid, tid, status, csk, csk->state, csk->flags);
+
+ if (is_neg_adv(status))
+ goto rel_skb;
+
+ module_put(THIS_MODULE);
+
+ if (status && status != CPL_ERR_TCAM_FULL &&
+ status != CPL_ERR_CONN_EXIST &&
+ status != CPL_ERR_ARP_MISS)
+ cxgb4_remove_tid(lldi->tids, csk->port_id, GET_TID(rpl));
+
+ cxgbi_sock_get(csk);
+ spin_lock_bh(&csk->lock);
+
+ if (status == CPL_ERR_CONN_EXIST &&
+ csk->retry_timer.function != csk_act_open_retry_timer) {
+ csk->retry_timer.function = csk_act_open_retry_timer;
+ mod_timer(&csk->retry_timer, jiffies + HZ / 2);
+ } else
+ cxgbi_sock_fail_act_open(csk,
+ act_open_rpl_status_to_errno(status));
+
+ spin_unlock_bh(&csk->lock);
+ cxgbi_sock_put(csk);
+rel_skb:
+ __kfree_skb(skb);
+}
+
+static void do_peer_close(struct cxgbi_device *cdev, struct sk_buff *skb)
+{
+ struct cxgbi_sock *csk;
+ struct cpl_peer_close *req = (struct cpl_peer_close *)skb->data;
+ unsigned int tid = GET_TID(req);
+ struct cxgb4_lld_info *lldi = cxgbi_cdev_priv(cdev);
+ struct tid_info *t = lldi->tids;
+
+ csk = lookup_tid(t, tid);
+ if (unlikely(!csk)) {
+ pr_err("can't find connection for tid %u.\n", tid);
+ goto rel_skb;
+ }
+ pr_info_ipaddr("csk 0x%p,%u,0x%lx,%u.\n",
+ (&csk->saddr), (&csk->daddr),
+ csk, csk->state, csk->flags, csk->tid);
+ cxgbi_sock_rcv_peer_close(csk);
+rel_skb:
+ __kfree_skb(skb);
+}
+
+static void do_close_con_rpl(struct cxgbi_device *cdev, struct sk_buff *skb)
+{
+ struct cxgbi_sock *csk;
+ struct cpl_close_con_rpl *rpl = (struct cpl_close_con_rpl *)skb->data;
+ unsigned int tid = GET_TID(rpl);
+ struct cxgb4_lld_info *lldi = cxgbi_cdev_priv(cdev);
+ struct tid_info *t = lldi->tids;
+
+ csk = lookup_tid(t, tid);
+ if (unlikely(!csk)) {
+ pr_err("can't find connection for tid %u.\n", tid);
+ goto rel_skb;
+ }
+ pr_info_ipaddr("csk 0x%p,%u,0x%lx,%u.\n",
+ (&csk->saddr), (&csk->daddr),
+ csk, csk->state, csk->flags, csk->tid);
+ cxgbi_sock_rcv_close_conn_rpl(csk, ntohl(rpl->snd_nxt));
+rel_skb:
+ __kfree_skb(skb);
+}
+
+static int abort_status_to_errno(struct cxgbi_sock *csk, int abort_reason,
+ int *need_rst)
+{
+ switch (abort_reason) {
+ case CPL_ERR_BAD_SYN: /* fall through */
+ case CPL_ERR_CONN_RESET:
+ return csk->state > CTP_ESTABLISHED ?
+ -EPIPE : -ECONNRESET;
+ case CPL_ERR_XMIT_TIMEDOUT:
+ case CPL_ERR_PERSIST_TIMEDOUT:
+ case CPL_ERR_FINWAIT2_TIMEDOUT:
+ case CPL_ERR_KEEPALIVE_TIMEDOUT:
+ return -ETIMEDOUT;
+ default:
+ return -EIO;
+ }
+}
+
+static void do_abort_req_rss(struct cxgbi_device *cdev, struct sk_buff *skb)
+{
+ struct cxgbi_sock *csk;
+ struct cpl_abort_req_rss *req = (struct cpl_abort_req_rss *)skb->data;
+ unsigned int tid = GET_TID(req);
+ struct cxgb4_lld_info *lldi = cxgbi_cdev_priv(cdev);
+ struct tid_info *t = lldi->tids;
+ int rst_status = CPL_ABORT_NO_RST;
+
+ csk = lookup_tid(t, tid);
+ if (unlikely(!csk)) {
+ pr_err("can't find connection for tid %u.\n", tid);
+ goto rel_skb;
+ }
+
+ pr_info_ipaddr("csk 0x%p,%u,0x%lx,%u, status %u.\n",
+ (&csk->saddr), (&csk->daddr),
+ csk, csk->state, csk->flags, csk->tid, req->status);
+
+ if (is_neg_adv(req->status))
+ goto rel_skb;
+
+ cxgbi_sock_get(csk);
+ spin_lock_bh(&csk->lock);
+
+ cxgbi_sock_clear_flag(csk, CTPF_ABORT_REQ_RCVD);
+
+ if (!cxgbi_sock_flag(csk, CTPF_TX_DATA_SENT)) {
+ send_tx_flowc_wr(csk);
+ cxgbi_sock_set_flag(csk, CTPF_TX_DATA_SENT);
+ }
+
+ cxgbi_sock_set_flag(csk, CTPF_ABORT_REQ_RCVD);
+ cxgbi_sock_set_state(csk, CTP_ABORTING);
+
+ send_abort_rpl(csk, rst_status);
+
+ if (!cxgbi_sock_flag(csk, CTPF_ABORT_RPL_PENDING)) {
+ csk->err = abort_status_to_errno(csk, req->status, &rst_status);
+ cxgbi_sock_closed(csk);
+ }
+
+ spin_unlock_bh(&csk->lock);
+ cxgbi_sock_put(csk);
+rel_skb:
+ __kfree_skb(skb);
+}
+
+static void do_abort_rpl_rss(struct cxgbi_device *cdev, struct sk_buff *skb)
+{
+ struct cxgbi_sock *csk;
+ struct cpl_abort_rpl_rss *rpl = (struct cpl_abort_rpl_rss *)skb->data;
+ unsigned int tid = GET_TID(rpl);
+ struct cxgb4_lld_info *lldi = cxgbi_cdev_priv(cdev);
+ struct tid_info *t = lldi->tids;
+
+ csk = lookup_tid(t, tid);
+ if (!csk)
+ goto rel_skb;
+
+ if (csk)
+ pr_info_ipaddr("csk 0x%p,%u,0x%lx,%u, status %u.\n",
+ (&csk->saddr), (&csk->daddr), csk,
+ csk->state, csk->flags, csk->tid, rpl->status);
+
+ if (rpl->status == CPL_ERR_ABORT_FAILED)
+ goto rel_skb;
+
+ cxgbi_sock_rcv_abort_rpl(csk);
+rel_skb:
+ __kfree_skb(skb);
+}
+
+static void do_rx_data(struct cxgbi_device *cdev, struct sk_buff *skb)
+{
+ struct cxgbi_sock *csk;
+ struct cpl_rx_data *cpl = (struct cpl_rx_data *)skb->data;
+ unsigned int tid = GET_TID(cpl);
+ struct cxgb4_lld_info *lldi = cxgbi_cdev_priv(cdev);
+ struct tid_info *t = lldi->tids;
+
+ csk = lookup_tid(t, tid);
+ if (!csk) {
+ pr_err("can't find connection for tid %u.\n", tid);
+ } else {
+ /* not expecting this, reset the connection. */
+ pr_err("csk 0x%p, tid %u, rcv cpl_rx_data.\n", csk, tid);
+ spin_lock_bh(&csk->lock);
+ send_abort_req(csk);
+ spin_unlock_bh(&csk->lock);
+ }
+ __kfree_skb(skb);
+}
+
+static void do_rx_iscsi_hdr(struct cxgbi_device *cdev, struct sk_buff *skb)
+{
+ struct cxgbi_sock *csk;
+ struct cpl_iscsi_hdr *cpl = (struct cpl_iscsi_hdr *)skb->data;
+ unsigned short pdu_len_ddp = be16_to_cpu(cpl->pdu_len_ddp);
+ unsigned int tid = GET_TID(cpl);
+ struct cxgb4_lld_info *lldi = cxgbi_cdev_priv(cdev);
+ struct tid_info *t = lldi->tids;
+
+ csk = lookup_tid(t, tid);
+ if (unlikely(!csk)) {
+ pr_err("can't find conn. for tid %u.\n", tid);
+ goto rel_skb;
+ }
+
+ log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_PDU_RX,
+ "csk 0x%p,%u,0x%lx, tid %u, skb 0x%p,%u, 0x%x.\n",
+ csk, csk->state, csk->flags, csk->tid, skb, skb->len,
+ pdu_len_ddp);
+
+ spin_lock_bh(&csk->lock);
+
+ if (unlikely(csk->state >= CTP_PASSIVE_CLOSE)) {
+ log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK,
+ "csk 0x%p,%u,0x%lx,%u, bad state.\n",
+ csk, csk->state, csk->flags, csk->tid);
+ if (csk->state != CTP_ABORTING)
+ goto abort_conn;
+ else
+ goto discard;
+ }
+
+ cxgbi_skcb_tcp_seq(skb) = ntohl(cpl->seq);
+ cxgbi_skcb_flags(skb) = 0;
+
+ skb_reset_transport_header(skb);
+ __skb_pull(skb, sizeof(*cpl));
+ __pskb_trim(skb, ntohs(cpl->len));
+
+ if (!csk->skb_ulp_lhdr) {
+ unsigned char *bhs;
+ unsigned int hlen, dlen, plen;
+
+ log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_PDU_RX,
+ "csk 0x%p,%u,0x%lx, tid %u, skb 0x%p header.\n",
+ csk, csk->state, csk->flags, csk->tid, skb);
+ csk->skb_ulp_lhdr = skb;
+ cxgbi_skcb_set_flag(skb, SKCBF_RX_HDR);
+
+ if (cxgbi_skcb_tcp_seq(skb) != csk->rcv_nxt) {
+ pr_info("tid %u, CPL_ISCSI_HDR, bad seq, 0x%x/0x%x.\n",
+ csk->tid, cxgbi_skcb_tcp_seq(skb),
+ csk->rcv_nxt);
+ goto abort_conn;
+ }
+
+ bhs = skb->data;
+ hlen = ntohs(cpl->len);
+ dlen = ntohl(*(unsigned int *)(bhs + 4)) & 0xFFFFFF;
+
+ plen = ISCSI_PDU_LEN_G(pdu_len_ddp);
+ if (is_t4(lldi->adapter_type))
+ plen -= 40;
+
+ if ((hlen + dlen) != plen) {
+ pr_info("tid 0x%x, CPL_ISCSI_HDR, pdu len "
+ "mismatch %u != %u + %u, seq 0x%x.\n",
+ csk->tid, plen, hlen, dlen,
+ cxgbi_skcb_tcp_seq(skb));
+ goto abort_conn;
+ }
+
+ cxgbi_skcb_rx_pdulen(skb) = (hlen + dlen + 3) & (~0x3);
+ if (dlen)
+ cxgbi_skcb_rx_pdulen(skb) += csk->dcrc_len;
+ csk->rcv_nxt += cxgbi_skcb_rx_pdulen(skb);
+
+ log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_PDU_RX,
+ "csk 0x%p, skb 0x%p, 0x%x,%u+%u,0x%x,0x%x.\n",
+ csk, skb, *bhs, hlen, dlen,
+ ntohl(*((unsigned int *)(bhs + 16))),
+ ntohl(*((unsigned int *)(bhs + 24))));
+
+ } else {
+ struct sk_buff *lskb = csk->skb_ulp_lhdr;
+
+ cxgbi_skcb_set_flag(lskb, SKCBF_RX_DATA);
+ log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_PDU_RX,
+ "csk 0x%p,%u,0x%lx, skb 0x%p data, 0x%p.\n",
+ csk, csk->state, csk->flags, skb, lskb);
+ }
+
+ __skb_queue_tail(&csk->receive_queue, skb);
+ spin_unlock_bh(&csk->lock);
+ return;
+
+abort_conn:
+ send_abort_req(csk);
+discard:
+ spin_unlock_bh(&csk->lock);
+rel_skb:
+ __kfree_skb(skb);
+}
+
+static void do_rx_data_ddp(struct cxgbi_device *cdev,
+ struct sk_buff *skb)
+{
+ struct cxgbi_sock *csk;
+ struct sk_buff *lskb;
+ struct cpl_rx_data_ddp *rpl = (struct cpl_rx_data_ddp *)skb->data;
+ unsigned int tid = GET_TID(rpl);
+ struct cxgb4_lld_info *lldi = cxgbi_cdev_priv(cdev);
+ struct tid_info *t = lldi->tids;
+ unsigned int status = ntohl(rpl->ddpvld);
+
+ csk = lookup_tid(t, tid);
+ if (unlikely(!csk)) {
+ pr_err("can't find connection for tid %u.\n", tid);
+ goto rel_skb;
+ }
+
+ log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_PDU_RX,
+ "csk 0x%p,%u,0x%lx, skb 0x%p,0x%x, lhdr 0x%p.\n",
+ csk, csk->state, csk->flags, skb, status, csk->skb_ulp_lhdr);
+
+ spin_lock_bh(&csk->lock);
+
+ if (unlikely(csk->state >= CTP_PASSIVE_CLOSE)) {
+ log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK,
+ "csk 0x%p,%u,0x%lx,%u, bad state.\n",
+ csk, csk->state, csk->flags, csk->tid);
+ if (csk->state != CTP_ABORTING)
+ goto abort_conn;
+ else
+ goto discard;
+ }
+
+ if (!csk->skb_ulp_lhdr) {
+ pr_err("tid 0x%x, rcv RX_DATA_DDP w/o pdu bhs.\n", csk->tid);
+ goto abort_conn;
+ }
+
+ lskb = csk->skb_ulp_lhdr;
+ csk->skb_ulp_lhdr = NULL;
+
+ cxgbi_skcb_rx_ddigest(lskb) = ntohl(rpl->ulp_crc);
+
+ if (ntohs(rpl->len) != cxgbi_skcb_rx_pdulen(lskb))
+ pr_info("tid 0x%x, RX_DATA_DDP pdulen %u != %u.\n",
+ csk->tid, ntohs(rpl->len), cxgbi_skcb_rx_pdulen(lskb));
+
+ if (status & (1 << CPL_RX_DDP_STATUS_HCRC_SHIFT)) {
+ pr_info("csk 0x%p, lhdr 0x%p, status 0x%x, hcrc bad 0x%lx.\n",
+ csk, lskb, status, cxgbi_skcb_flags(lskb));
+ cxgbi_skcb_set_flag(lskb, SKCBF_RX_HCRC_ERR);
+ }
+ if (status & (1 << CPL_RX_DDP_STATUS_DCRC_SHIFT)) {
+ pr_info("csk 0x%p, lhdr 0x%p, status 0x%x, dcrc bad 0x%lx.\n",
+ csk, lskb, status, cxgbi_skcb_flags(lskb));
+ cxgbi_skcb_set_flag(lskb, SKCBF_RX_DCRC_ERR);
+ }
+ if (status & (1 << CPL_RX_DDP_STATUS_PAD_SHIFT)) {
+ log_debug(1 << CXGBI_DBG_PDU_RX,
+ "csk 0x%p, lhdr 0x%p, status 0x%x, pad bad.\n",
+ csk, lskb, status);
+ cxgbi_skcb_set_flag(lskb, SKCBF_RX_PAD_ERR);
+ }
+ if ((status & (1 << CPL_RX_DDP_STATUS_DDP_SHIFT)) &&
+ !cxgbi_skcb_test_flag(lskb, SKCBF_RX_DATA)) {
+ log_debug(1 << CXGBI_DBG_PDU_RX,
+ "csk 0x%p, lhdr 0x%p, 0x%x, data ddp'ed.\n",
+ csk, lskb, status);
+ cxgbi_skcb_set_flag(lskb, SKCBF_RX_DATA_DDPD);
+ }
+ log_debug(1 << CXGBI_DBG_PDU_RX,
+ "csk 0x%p, lskb 0x%p, f 0x%lx.\n",
+ csk, lskb, cxgbi_skcb_flags(lskb));
+
+ cxgbi_skcb_set_flag(lskb, SKCBF_RX_STATUS);
+ cxgbi_conn_pdu_ready(csk);
+ spin_unlock_bh(&csk->lock);
+ goto rel_skb;
+
+abort_conn:
+ send_abort_req(csk);
+discard:
+ spin_unlock_bh(&csk->lock);
+rel_skb:
+ __kfree_skb(skb);
+}
+
+static void do_fw4_ack(struct cxgbi_device *cdev, struct sk_buff *skb)
+{
+ struct cxgbi_sock *csk;
+ struct cpl_fw4_ack *rpl = (struct cpl_fw4_ack *)skb->data;
+ unsigned int tid = GET_TID(rpl);
+ struct cxgb4_lld_info *lldi = cxgbi_cdev_priv(cdev);
+ struct tid_info *t = lldi->tids;
+
+ csk = lookup_tid(t, tid);
+ if (unlikely(!csk))
+ pr_err("can't find connection for tid %u.\n", tid);
+ else {
+ log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK,
+ "csk 0x%p,%u,0x%lx,%u.\n",
+ csk, csk->state, csk->flags, csk->tid);
+ cxgbi_sock_rcv_wr_ack(csk, rpl->credits, ntohl(rpl->snd_una),
+ rpl->seq_vld);
+ }
+ __kfree_skb(skb);
+}
+
+static void do_set_tcb_rpl(struct cxgbi_device *cdev, struct sk_buff *skb)
+{
+ struct cpl_set_tcb_rpl *rpl = (struct cpl_set_tcb_rpl *)skb->data;
+ unsigned int tid = GET_TID(rpl);
+ struct cxgb4_lld_info *lldi = cxgbi_cdev_priv(cdev);
+ struct tid_info *t = lldi->tids;
+ struct cxgbi_sock *csk;
+
+ csk = lookup_tid(t, tid);
+ if (!csk)
+ pr_err("can't find conn. for tid %u.\n", tid);
+
+ log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK,
+ "csk 0x%p,%u,%lx,%u, status 0x%x.\n",
+ csk, csk->state, csk->flags, csk->tid, rpl->status);
+
+ if (rpl->status != CPL_ERR_NONE)
+ pr_err("csk 0x%p,%u, SET_TCB_RPL status %u.\n",
+ csk, tid, rpl->status);
+
+ __kfree_skb(skb);
+}
+
+static int alloc_cpls(struct cxgbi_sock *csk)
+{
+ csk->cpl_close = alloc_wr(sizeof(struct cpl_close_con_req),
+ 0, GFP_KERNEL);
+ if (!csk->cpl_close)
+ return -ENOMEM;
+
+ csk->cpl_abort_req = alloc_wr(sizeof(struct cpl_abort_req),
+ 0, GFP_KERNEL);
+ if (!csk->cpl_abort_req)
+ goto free_cpls;
+
+ csk->cpl_abort_rpl = alloc_wr(sizeof(struct cpl_abort_rpl),
+ 0, GFP_KERNEL);
+ if (!csk->cpl_abort_rpl)
+ goto free_cpls;
+ return 0;
+
+free_cpls:
+ cxgbi_sock_free_cpl_skbs(csk);
+ return -ENOMEM;
+}
+
+static inline void l2t_put(struct cxgbi_sock *csk)
+{
+ if (csk->l2t) {
+ cxgb4_l2t_release(csk->l2t);
+ csk->l2t = NULL;
+ cxgbi_sock_put(csk);
+ }
+}
+
+static void release_offload_resources(struct cxgbi_sock *csk)
+{
+ struct cxgb4_lld_info *lldi;
+#if IS_ENABLED(CONFIG_IPV6)
+ struct net_device *ndev = csk->cdev->ports[csk->port_id];
+#endif
+
+ log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK,
+ "csk 0x%p,%u,0x%lx,%u.\n",
+ csk, csk->state, csk->flags, csk->tid);
+
+ cxgbi_sock_free_cpl_skbs(csk);
+ if (csk->wr_cred != csk->wr_max_cred) {
+ cxgbi_sock_purge_wr_queue(csk);
+ cxgbi_sock_reset_wr_list(csk);
+ }
+
+ l2t_put(csk);
+#if IS_ENABLED(CONFIG_IPV6)
+ if (csk->csk_family == AF_INET6)
+ cxgb4_clip_release(ndev,
+ (const u32 *)&csk->saddr6.sin6_addr, 1);
+#endif
+
+ if (cxgbi_sock_flag(csk, CTPF_HAS_ATID))
+ free_atid(csk);
+ else if (cxgbi_sock_flag(csk, CTPF_HAS_TID)) {
+ lldi = cxgbi_cdev_priv(csk->cdev);
+ cxgb4_remove_tid(lldi->tids, 0, csk->tid);
+ cxgbi_sock_clear_flag(csk, CTPF_HAS_TID);
+ cxgbi_sock_put(csk);
+ }
+ csk->dst = NULL;
+ csk->cdev = NULL;
+}
+
+static int init_act_open(struct cxgbi_sock *csk)
+{
+ struct cxgbi_device *cdev = csk->cdev;
+ struct cxgb4_lld_info *lldi = cxgbi_cdev_priv(cdev);
+ struct net_device *ndev = cdev->ports[csk->port_id];
+ struct sk_buff *skb = NULL;
+ struct neighbour *n = NULL;
+ void *daddr;
+ unsigned int step;
+ unsigned int size, size6;
+ int t4 = is_t4(lldi->adapter_type);
+
+ log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK,
+ "csk 0x%p,%u,0x%lx,%u.\n",
+ csk, csk->state, csk->flags, csk->tid);
+
+ if (csk->csk_family == AF_INET)
+ daddr = &csk->daddr.sin_addr.s_addr;
+#if IS_ENABLED(CONFIG_IPV6)
+ else if (csk->csk_family == AF_INET6)
+ daddr = &csk->daddr6.sin6_addr;
+#endif
+ else {
+ pr_err("address family 0x%x not supported\n", csk->csk_family);
+ goto rel_resource;
+ }
+
+ n = dst_neigh_lookup(csk->dst, daddr);
+
+ if (!n) {
+ pr_err("%s, can't get neighbour of csk->dst.\n", ndev->name);
+ goto rel_resource;
+ }
+
+ csk->atid = cxgb4_alloc_atid(lldi->tids, csk);
+ if (csk->atid < 0) {
+ pr_err("%s, NO atid available.\n", ndev->name);
+ return -EINVAL;
+ }
+ cxgbi_sock_set_flag(csk, CTPF_HAS_ATID);
+ cxgbi_sock_get(csk);
+
+ csk->l2t = cxgb4_l2t_get(lldi->l2t, n, ndev, 0);
+ if (!csk->l2t) {
+ pr_err("%s, cannot alloc l2t.\n", ndev->name);
+ goto rel_resource_without_clip;
+ }
+ cxgbi_sock_get(csk);
+
+#if IS_ENABLED(CONFIG_IPV6)
+ if (csk->csk_family == AF_INET6)
+ cxgb4_clip_get(ndev, (const u32 *)&csk->saddr6.sin6_addr, 1);
+#endif
+
+ if (t4) {
+ size = sizeof(struct cpl_act_open_req);
+ size6 = sizeof(struct cpl_act_open_req6);
+ } else {
+ size = sizeof(struct cpl_t5_act_open_req);
+ size6 = sizeof(struct cpl_t5_act_open_req6);
+ }
+
+ if (csk->csk_family == AF_INET)
+ skb = alloc_wr(size, 0, GFP_NOIO);
+#if IS_ENABLED(CONFIG_IPV6)
+ else
+ skb = alloc_wr(size6, 0, GFP_NOIO);
+#endif
+
+ if (!skb)
+ goto rel_resource;
+ skb->sk = (struct sock *)csk;
+ t4_set_arp_err_handler(skb, csk, cxgbi_sock_act_open_req_arp_failure);
+
+ if (!csk->mtu)
+ csk->mtu = dst_mtu(csk->dst);
+ cxgb4_best_mtu(lldi->mtus, csk->mtu, &csk->mss_idx);
+ csk->tx_chan = cxgb4_port_chan(ndev);
+ /* SMT two entries per row */
+ csk->smac_idx = ((cxgb4_port_viid(ndev) & 0x7F)) << 1;
+ step = lldi->ntxq / lldi->nchan;
+ csk->txq_idx = cxgb4_port_idx(ndev) * step;
+ step = lldi->nrxq / lldi->nchan;
+ csk->rss_qid = lldi->rxq_ids[cxgb4_port_idx(ndev) * step];
+ csk->wr_cred = lldi->wr_cred -
+ DIV_ROUND_UP(sizeof(struct cpl_abort_req), 16);
+ csk->wr_max_cred = csk->wr_cred;
+ csk->wr_una_cred = 0;
+ cxgbi_sock_reset_wr_list(csk);
+ csk->err = 0;
+
+ pr_info_ipaddr("csk 0x%p,%u,0x%lx,%u,%u,%u, mtu %u,%u, smac %u.\n",
+ (&csk->saddr), (&csk->daddr), csk, csk->state,
+ csk->flags, csk->tx_chan, csk->txq_idx, csk->rss_qid,
+ csk->mtu, csk->mss_idx, csk->smac_idx);
+
+ /* must wait for either a act_open_rpl or act_open_establish */
+ try_module_get(THIS_MODULE);
+ cxgbi_sock_set_state(csk, CTP_ACTIVE_OPEN);
+ if (csk->csk_family == AF_INET)
+ send_act_open_req(csk, skb, csk->l2t);
+#if IS_ENABLED(CONFIG_IPV6)
+ else
+ send_act_open_req6(csk, skb, csk->l2t);
+#endif
+ neigh_release(n);
+
+ return 0;
+
+rel_resource:
+#if IS_ENABLED(CONFIG_IPV6)
+ if (csk->csk_family == AF_INET6)
+ cxgb4_clip_release(ndev,
+ (const u32 *)&csk->saddr6.sin6_addr, 1);
+#endif
+rel_resource_without_clip:
+ if (n)
+ neigh_release(n);
+ if (skb)
+ __kfree_skb(skb);
+ return -EINVAL;
+}
+
+cxgb4i_cplhandler_func cxgb4i_cplhandlers[NUM_CPL_CMDS] = {
+ [CPL_ACT_ESTABLISH] = do_act_establish,
+ [CPL_ACT_OPEN_RPL] = do_act_open_rpl,
+ [CPL_PEER_CLOSE] = do_peer_close,
+ [CPL_ABORT_REQ_RSS] = do_abort_req_rss,
+ [CPL_ABORT_RPL_RSS] = do_abort_rpl_rss,
+ [CPL_CLOSE_CON_RPL] = do_close_con_rpl,
+ [CPL_FW4_ACK] = do_fw4_ack,
+ [CPL_ISCSI_HDR] = do_rx_iscsi_hdr,
+ [CPL_ISCSI_DATA] = do_rx_iscsi_hdr,
+ [CPL_SET_TCB_RPL] = do_set_tcb_rpl,
+ [CPL_RX_DATA_DDP] = do_rx_data_ddp,
+ [CPL_RX_ISCSI_DDP] = do_rx_data_ddp,
+ [CPL_RX_DATA] = do_rx_data,
+};
+
+int cxgb4i_ofld_init(struct cxgbi_device *cdev)
+{
+ int rc;
+
+ if (cxgb4i_max_connect > CXGB4I_MAX_CONN)
+ cxgb4i_max_connect = CXGB4I_MAX_CONN;
+
+ rc = cxgbi_device_portmap_create(cdev, cxgb4i_sport_base,
+ cxgb4i_max_connect);
+ if (rc < 0)
+ return rc;
+
+ cdev->csk_release_offload_resources = release_offload_resources;
+ cdev->csk_push_tx_frames = push_tx_frames;
+ cdev->csk_send_abort_req = send_abort_req;
+ cdev->csk_send_close_req = send_close_req;
+ cdev->csk_send_rx_credits = send_rx_credits;
+ cdev->csk_alloc_cpls = alloc_cpls;
+ cdev->csk_init_act_open = init_act_open;
+
+ pr_info("cdev 0x%p, offload up, added.\n", cdev);
+ return 0;
+}
+
+/*
+ * functions to program the pagepod in h/w
+ */
+#define ULPMEM_IDATA_MAX_NPPODS 4 /* 256/PPOD_SIZE */
+static inline void ulp_mem_io_set_hdr(struct cxgb4_lld_info *lldi,
+ struct ulp_mem_io *req,
+ unsigned int wr_len, unsigned int dlen,
+ unsigned int pm_addr)
+{
+ struct ulptx_idata *idata = (struct ulptx_idata *)(req + 1);
+
+ INIT_ULPTX_WR(req, wr_len, 0, 0);
+ if (is_t4(lldi->adapter_type))
+ req->cmd = htonl(ULPTX_CMD_V(ULP_TX_MEM_WRITE) |
+ (ULP_MEMIO_ORDER_F));
+ else
+ req->cmd = htonl(ULPTX_CMD_V(ULP_TX_MEM_WRITE) |
+ (T5_ULP_MEMIO_IMM_F));
+ req->dlen = htonl(ULP_MEMIO_DATA_LEN_V(dlen >> 5));
+ req->lock_addr = htonl(ULP_MEMIO_ADDR_V(pm_addr >> 5));
+ req->len16 = htonl(DIV_ROUND_UP(wr_len - sizeof(req->wr), 16));
+
+ idata->cmd_more = htonl(ULPTX_CMD_V(ULP_TX_SC_IMM));
+ idata->len = htonl(dlen);
+}
+
+static int ddp_ppod_write_idata(struct cxgbi_device *cdev, unsigned int port_id,
+ struct cxgbi_pagepod_hdr *hdr, unsigned int idx,
+ unsigned int npods,
+ struct cxgbi_gather_list *gl,
+ unsigned int gl_pidx)
+{
+ struct cxgbi_ddp_info *ddp = cdev->ddp;
+ struct cxgb4_lld_info *lldi = cxgbi_cdev_priv(cdev);
+ struct sk_buff *skb;
+ struct ulp_mem_io *req;
+ struct ulptx_idata *idata;
+ struct cxgbi_pagepod *ppod;
+ unsigned int pm_addr = idx * PPOD_SIZE + ddp->llimit;
+ unsigned int dlen = PPOD_SIZE * npods;
+ unsigned int wr_len = roundup(sizeof(struct ulp_mem_io) +
+ sizeof(struct ulptx_idata) + dlen, 16);
+ unsigned int i;
+
+ skb = alloc_wr(wr_len, 0, GFP_ATOMIC);
+ if (!skb) {
+ pr_err("cdev 0x%p, idx %u, npods %u, OOM.\n",
+ cdev, idx, npods);
+ return -ENOMEM;
+ }
+ req = (struct ulp_mem_io *)skb->head;
+ set_wr_txq(skb, CPL_PRIORITY_CONTROL, 0);
+
+ ulp_mem_io_set_hdr(lldi, req, wr_len, dlen, pm_addr);
+ idata = (struct ulptx_idata *)(req + 1);
+ ppod = (struct cxgbi_pagepod *)(idata + 1);
+
+ for (i = 0; i < npods; i++, ppod++, gl_pidx += PPOD_PAGES_MAX) {
+ if (!hdr && !gl)
+ cxgbi_ddp_ppod_clear(ppod);
+ else
+ cxgbi_ddp_ppod_set(ppod, hdr, gl, gl_pidx);
+ }
+
+ cxgb4_ofld_send(cdev->ports[port_id], skb);
+ return 0;
+}
+
+static int ddp_set_map(struct cxgbi_sock *csk, struct cxgbi_pagepod_hdr *hdr,
+ unsigned int idx, unsigned int npods,
+ struct cxgbi_gather_list *gl)
+{
+ unsigned int i, cnt;
+ int err = 0;
+
+ for (i = 0; i < npods; i += cnt, idx += cnt) {
+ cnt = npods - i;
+ if (cnt > ULPMEM_IDATA_MAX_NPPODS)
+ cnt = ULPMEM_IDATA_MAX_NPPODS;
+ err = ddp_ppod_write_idata(csk->cdev, csk->port_id, hdr,
+ idx, cnt, gl, 4 * i);
+ if (err < 0)
+ break;
+ }
+ return err;
+}
+
+static void ddp_clear_map(struct cxgbi_hba *chba, unsigned int tag,
+ unsigned int idx, unsigned int npods)
+{
+ unsigned int i, cnt;
+ int err;
+
+ for (i = 0; i < npods; i += cnt, idx += cnt) {
+ cnt = npods - i;
+ if (cnt > ULPMEM_IDATA_MAX_NPPODS)
+ cnt = ULPMEM_IDATA_MAX_NPPODS;
+ err = ddp_ppod_write_idata(chba->cdev, chba->port_id, NULL,
+ idx, cnt, NULL, 0);
+ if (err < 0)
+ break;
+ }
+}
+
+static int ddp_setup_conn_pgidx(struct cxgbi_sock *csk, unsigned int tid,
+ int pg_idx, bool reply)
+{
+ struct sk_buff *skb;
+ struct cpl_set_tcb_field *req;
+
+ if (!pg_idx || pg_idx >= DDP_PGIDX_MAX)
+ return 0;
+
+ skb = alloc_wr(sizeof(*req), 0, GFP_KERNEL);
+ if (!skb)
+ return -ENOMEM;
+
+ /* set up ulp page size */
+ req = (struct cpl_set_tcb_field *)skb->head;
+ INIT_TP_WR(req, csk->tid);
+ OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_SET_TCB_FIELD, csk->tid));
+ req->reply_ctrl = htons(NO_REPLY_V(reply) | QUEUENO_V(csk->rss_qid));
+ req->word_cookie = htons(0);
+ req->mask = cpu_to_be64(0x3 << 8);
+ req->val = cpu_to_be64(pg_idx << 8);
+ set_wr_txq(skb, CPL_PRIORITY_CONTROL, csk->port_id);
+
+ log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK,
+ "csk 0x%p, tid 0x%x, pg_idx %u.\n", csk, csk->tid, pg_idx);
+
+ cxgb4_ofld_send(csk->cdev->ports[csk->port_id], skb);
+ return 0;
+}
+
+static int ddp_setup_conn_digest(struct cxgbi_sock *csk, unsigned int tid,
+ int hcrc, int dcrc, int reply)
+{
+ struct sk_buff *skb;
+ struct cpl_set_tcb_field *req;
+
+ if (!hcrc && !dcrc)
+ return 0;
+
+ skb = alloc_wr(sizeof(*req), 0, GFP_KERNEL);
+ if (!skb)
+ return -ENOMEM;
+
+ csk->hcrc_len = (hcrc ? 4 : 0);
+ csk->dcrc_len = (dcrc ? 4 : 0);
+ /* set up ulp submode */
+ req = (struct cpl_set_tcb_field *)skb->head;
+ INIT_TP_WR(req, tid);
+ OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_SET_TCB_FIELD, tid));
+ req->reply_ctrl = htons(NO_REPLY_V(reply) | QUEUENO_V(csk->rss_qid));
+ req->word_cookie = htons(0);
+ req->mask = cpu_to_be64(0x3 << 4);
+ req->val = cpu_to_be64(((hcrc ? ULP_CRC_HEADER : 0) |
+ (dcrc ? ULP_CRC_DATA : 0)) << 4);
+ set_wr_txq(skb, CPL_PRIORITY_CONTROL, csk->port_id);
+
+ log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK,
+ "csk 0x%p, tid 0x%x, crc %d,%d.\n", csk, csk->tid, hcrc, dcrc);
+
+ cxgb4_ofld_send(csk->cdev->ports[csk->port_id], skb);
+ return 0;
+}
+
+static int cxgb4i_ddp_init(struct cxgbi_device *cdev)
+{
+ struct cxgb4_lld_info *lldi = cxgbi_cdev_priv(cdev);
+ struct cxgbi_ddp_info *ddp = cdev->ddp;
+ unsigned int tagmask, pgsz_factor[4];
+ int err;
+
+ if (ddp) {
+ kref_get(&ddp->refcnt);
+ pr_warn("cdev 0x%p, ddp 0x%p already set up.\n",
+ cdev, cdev->ddp);
+ return -EALREADY;
+ }
+
+ err = cxgbi_ddp_init(cdev, lldi->vr->iscsi.start,
+ lldi->vr->iscsi.start + lldi->vr->iscsi.size - 1,
+ lldi->iscsi_iolen, lldi->iscsi_iolen);
+ if (err < 0)
+ return err;
+
+ ddp = cdev->ddp;
+
+ tagmask = ddp->idx_mask << PPOD_IDX_SHIFT;
+ cxgbi_ddp_page_size_factor(pgsz_factor);
+ cxgb4_iscsi_init(lldi->ports[0], tagmask, pgsz_factor);
+
+ cdev->csk_ddp_setup_digest = ddp_setup_conn_digest;
+ cdev->csk_ddp_setup_pgidx = ddp_setup_conn_pgidx;
+ cdev->csk_ddp_set = ddp_set_map;
+ cdev->csk_ddp_clear = ddp_clear_map;
+
+ pr_info("cxgb4i 0x%p tag: sw %u, rsvd %u,%u, mask 0x%x.\n",
+ cdev, cdev->tag_format.sw_bits, cdev->tag_format.rsvd_bits,
+ cdev->tag_format.rsvd_shift, cdev->tag_format.rsvd_mask);
+ pr_info("cxgb4i 0x%p, nppods %u, bits %u, mask 0x%x,0x%x pkt %u/%u, "
+ " %u/%u.\n",
+ cdev, ddp->nppods, ddp->idx_bits, ddp->idx_mask,
+ ddp->rsvd_tag_mask, ddp->max_txsz, lldi->iscsi_iolen,
+ ddp->max_rxsz, lldi->iscsi_iolen);
+ pr_info("cxgb4i 0x%p max payload size: %u/%u, %u/%u.\n",
+ cdev, cdev->tx_max_size, ddp->max_txsz, cdev->rx_max_size,
+ ddp->max_rxsz);
+ return 0;
+}
+
+static void *t4_uld_add(const struct cxgb4_lld_info *lldi)
+{
+ struct cxgbi_device *cdev;
+ struct port_info *pi;
+ int i, rc;
+
+ cdev = cxgbi_device_register(sizeof(*lldi), lldi->nports);
+ if (!cdev) {
+ pr_info("t4 device 0x%p, register failed.\n", lldi);
+ return NULL;
+ }
+ pr_info("0x%p,0x%x, ports %u,%s, chan %u, q %u,%u, wr %u.\n",
+ cdev, lldi->adapter_type, lldi->nports,
+ lldi->ports[0]->name, lldi->nchan, lldi->ntxq,
+ lldi->nrxq, lldi->wr_cred);
+ for (i = 0; i < lldi->nrxq; i++)
+ log_debug(1 << CXGBI_DBG_DEV,
+ "t4 0x%p, rxq id #%d: %u.\n",
+ cdev, i, lldi->rxq_ids[i]);
+
+ memcpy(cxgbi_cdev_priv(cdev), lldi, sizeof(*lldi));
+ cdev->flags = CXGBI_FLAG_DEV_T4;
+ cdev->pdev = lldi->pdev;
+ cdev->ports = lldi->ports;
+ cdev->nports = lldi->nports;
+ cdev->mtus = lldi->mtus;
+ cdev->nmtus = NMTUS;
+ cdev->snd_win = cxgb4i_snd_win;
+ cdev->rcv_win = cxgb4i_rcv_win;
+ cdev->rx_credit_thres = cxgb4i_rx_credit_thres;
+ cdev->skb_tx_rsvd = CXGB4I_TX_HEADER_LEN;
+ cdev->skb_rx_extra = sizeof(struct cpl_iscsi_hdr);
+ cdev->itp = &cxgb4i_iscsi_transport;
+
+ cdev->pfvf = FW_VIID_PFN_G(cxgb4_port_viid(lldi->ports[0]))
+ << FW_VIID_PFN_S;
+ pr_info("cdev 0x%p,%s, pfvf %u.\n",
+ cdev, lldi->ports[0]->name, cdev->pfvf);
+
+ rc = cxgb4i_ddp_init(cdev);
+ if (rc) {
+ pr_info("t4 0x%p ddp init failed.\n", cdev);
+ goto err_out;
+ }
+ rc = cxgb4i_ofld_init(cdev);
+ if (rc) {
+ pr_info("t4 0x%p ofld init failed.\n", cdev);
+ goto err_out;
+ }
+
+ rc = cxgbi_hbas_add(cdev, CXGB4I_MAX_LUN, CXGBI_MAX_CONN,
+ &cxgb4i_host_template, cxgb4i_stt);
+ if (rc)
+ goto err_out;
+
+ for (i = 0; i < cdev->nports; i++) {
+ pi = netdev_priv(lldi->ports[i]);
+ cdev->hbas[i]->port_id = pi->port_id;
+ }
+ return cdev;
+
+err_out:
+ cxgbi_device_unregister(cdev);
+ return ERR_PTR(-ENOMEM);
+}
+
+#define RX_PULL_LEN 128
+static int t4_uld_rx_handler(void *handle, const __be64 *rsp,
+ const struct pkt_gl *pgl)
+{
+ const struct cpl_act_establish *rpl;
+ struct sk_buff *skb;
+ unsigned int opc;
+ struct cxgbi_device *cdev = handle;
+
+ if (pgl == NULL) {
+ unsigned int len = 64 - sizeof(struct rsp_ctrl) - 8;
+
+ skb = alloc_wr(len, 0, GFP_ATOMIC);
+ if (!skb)
+ goto nomem;
+ skb_copy_to_linear_data(skb, &rsp[1], len);
+ } else {
+ if (unlikely(*(u8 *)rsp != *(u8 *)pgl->va)) {
+ pr_info("? FL 0x%p,RSS%#llx,FL %#llx,len %u.\n",
+ pgl->va, be64_to_cpu(*rsp),
+ be64_to_cpu(*(u64 *)pgl->va),
+ pgl->tot_len);
+ return 0;
+ }
+ skb = cxgb4_pktgl_to_skb(pgl, RX_PULL_LEN, RX_PULL_LEN);
+ if (unlikely(!skb))
+ goto nomem;
+ }
+
+ rpl = (struct cpl_act_establish *)skb->data;
+ opc = rpl->ot.opcode;
+ log_debug(1 << CXGBI_DBG_TOE,
+ "cdev %p, opcode 0x%x(0x%x,0x%x), skb %p.\n",
+ cdev, opc, rpl->ot.opcode_tid, ntohl(rpl->ot.opcode_tid), skb);
+ if (cxgb4i_cplhandlers[opc])
+ cxgb4i_cplhandlers[opc](cdev, skb);
+ else {
+ pr_err("No handler for opcode 0x%x.\n", opc);
+ __kfree_skb(skb);
+ }
+ return 0;
+nomem:
+ log_debug(1 << CXGBI_DBG_TOE, "OOM bailing out.\n");
+ return 1;
+}
+
+static int t4_uld_state_change(void *handle, enum cxgb4_state state)
+{
+ struct cxgbi_device *cdev = handle;
+
+ switch (state) {
+ case CXGB4_STATE_UP:
+ pr_info("cdev 0x%p, UP.\n", cdev);
+ break;
+ case CXGB4_STATE_START_RECOVERY:
+ pr_info("cdev 0x%p, RECOVERY.\n", cdev);
+ /* close all connections */
+ break;
+ case CXGB4_STATE_DOWN:
+ pr_info("cdev 0x%p, DOWN.\n", cdev);
+ break;
+ case CXGB4_STATE_DETACH:
+ pr_info("cdev 0x%p, DETACH.\n", cdev);
+ cxgbi_device_unregister(cdev);
+ break;
+ default:
+ pr_info("cdev 0x%p, unknown state %d.\n", cdev, state);
+ break;
+ }
+ return 0;
+}
+
+static int __init cxgb4i_init_module(void)
+{
+ int rc;
+
+ printk(KERN_INFO "%s", version);
+
+ rc = cxgbi_iscsi_init(&cxgb4i_iscsi_transport, &cxgb4i_stt);
+ if (rc < 0)
+ return rc;
+ cxgb4_register_uld(CXGB4_ULD_ISCSI, &cxgb4i_uld_info);
+
+ return 0;
+}
+
+static void __exit cxgb4i_exit_module(void)
+{
+ cxgb4_unregister_uld(CXGB4_ULD_ISCSI);
+ cxgbi_device_unregister_all(CXGBI_FLAG_DEV_T4);
+ cxgbi_iscsi_cleanup(&cxgb4i_iscsi_transport, &cxgb4i_stt);
+}
+
+module_init(cxgb4i_init_module);
+module_exit(cxgb4i_exit_module);
diff --git a/drivers/scsi/cxgbi/cxgb4i/cxgb4i.h b/drivers/scsi/cxgbi/cxgb4i/cxgb4i.h
new file mode 100644
index 000000000..1096026ba
--- /dev/null
+++ b/drivers/scsi/cxgbi/cxgb4i/cxgb4i.h
@@ -0,0 +1,43 @@
+/*
+ * cxgb4i.h: Chelsio T4 iSCSI driver.
+ *
+ * Copyright (c) 2010 Chelsio Communications, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation.
+ *
+ * Written by: Karen Xie (kxie@chelsio.com)
+ * Written by: Rakesh Ranjan (rranjan@chelsio.com)
+ */
+
+#ifndef __CXGB4I_H__
+#define __CXGB4I_H__
+
+#define CXGB4I_SCSI_HOST_QDEPTH 1024
+#define CXGB4I_MAX_CONN 16384
+#define CXGB4I_MAX_TARGET CXGB4I_MAX_CONN
+#define CXGB4I_MAX_LUN 0x1000
+
+/* for TX: a skb must have a headroom of at least TX_HEADER_LEN bytes */
+#define CXGB4I_TX_HEADER_LEN \
+ (sizeof(struct fw_ofld_tx_data_wr) + sizeof(struct sge_opaque_hdr))
+
+struct ulptx_idata {
+ __be32 cmd_more;
+ __be32 len;
+};
+
+struct cpl_rx_data_ddp {
+ union opcode_tid ot;
+ __be16 urg;
+ __be16 len;
+ __be32 seq;
+ union {
+ __be32 nxt_seq;
+ __be32 ddp_report;
+ };
+ __be32 ulp_crc;
+ __be32 ddpvld;
+};
+#endif /* __CXGB4I_H__ */
diff --git a/drivers/scsi/cxgbi/libcxgbi.c b/drivers/scsi/cxgbi/libcxgbi.c
new file mode 100644
index 000000000..eb58afcfb
--- /dev/null
+++ b/drivers/scsi/cxgbi/libcxgbi.c
@@ -0,0 +1,2931 @@
+/*
+ * libcxgbi.c: Chelsio common library for T3/T4 iSCSI driver.
+ *
+ * Copyright (c) 2010 Chelsio Communications, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation.
+ *
+ * Written by: Karen Xie (kxie@chelsio.com)
+ * Written by: Rakesh Ranjan (rranjan@chelsio.com)
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ":%s: " fmt, __func__
+
+#include <linux/skbuff.h>
+#include <linux/crypto.h>
+#include <linux/scatterlist.h>
+#include <linux/pci.h>
+#include <scsi/scsi.h>
+#include <scsi/scsi_cmnd.h>
+#include <scsi/scsi_host.h>
+#include <linux/if_vlan.h>
+#include <linux/inet.h>
+#include <net/dst.h>
+#include <net/route.h>
+#include <net/ipv6.h>
+#include <net/ip6_route.h>
+#include <net/addrconf.h>
+
+#include <linux/inetdevice.h> /* ip_dev_find */
+#include <linux/module.h>
+#include <net/tcp.h>
+
+static unsigned int dbg_level;
+
+#include "libcxgbi.h"
+
+#define DRV_MODULE_NAME "libcxgbi"
+#define DRV_MODULE_DESC "Chelsio iSCSI driver library"
+#define DRV_MODULE_VERSION "0.9.0"
+#define DRV_MODULE_RELDATE "Jun. 2010"
+
+MODULE_AUTHOR("Chelsio Communications, Inc.");
+MODULE_DESCRIPTION(DRV_MODULE_DESC);
+MODULE_VERSION(DRV_MODULE_VERSION);
+MODULE_LICENSE("GPL");
+
+module_param(dbg_level, uint, 0644);
+MODULE_PARM_DESC(dbg_level, "libiscsi debug level (default=0)");
+
+
+/*
+ * cxgbi device management
+ * maintains a list of the cxgbi devices
+ */
+static LIST_HEAD(cdev_list);
+static DEFINE_MUTEX(cdev_mutex);
+
+static LIST_HEAD(cdev_rcu_list);
+static DEFINE_SPINLOCK(cdev_rcu_lock);
+
+int cxgbi_device_portmap_create(struct cxgbi_device *cdev, unsigned int base,
+ unsigned int max_conn)
+{
+ struct cxgbi_ports_map *pmap = &cdev->pmap;
+
+ pmap->port_csk = cxgbi_alloc_big_mem(max_conn *
+ sizeof(struct cxgbi_sock *),
+ GFP_KERNEL);
+ if (!pmap->port_csk) {
+ pr_warn("cdev 0x%p, portmap OOM %u.\n", cdev, max_conn);
+ return -ENOMEM;
+ }
+
+ pmap->max_connect = max_conn;
+ pmap->sport_base = base;
+ spin_lock_init(&pmap->lock);
+ return 0;
+}
+EXPORT_SYMBOL_GPL(cxgbi_device_portmap_create);
+
+void cxgbi_device_portmap_cleanup(struct cxgbi_device *cdev)
+{
+ struct cxgbi_ports_map *pmap = &cdev->pmap;
+ struct cxgbi_sock *csk;
+ int i;
+
+ for (i = 0; i < pmap->max_connect; i++) {
+ if (pmap->port_csk[i]) {
+ csk = pmap->port_csk[i];
+ pmap->port_csk[i] = NULL;
+ log_debug(1 << CXGBI_DBG_SOCK,
+ "csk 0x%p, cdev 0x%p, offload down.\n",
+ csk, cdev);
+ spin_lock_bh(&csk->lock);
+ cxgbi_sock_set_flag(csk, CTPF_OFFLOAD_DOWN);
+ cxgbi_sock_closed(csk);
+ spin_unlock_bh(&csk->lock);
+ cxgbi_sock_put(csk);
+ }
+ }
+}
+EXPORT_SYMBOL_GPL(cxgbi_device_portmap_cleanup);
+
+static inline void cxgbi_device_destroy(struct cxgbi_device *cdev)
+{
+ log_debug(1 << CXGBI_DBG_DEV,
+ "cdev 0x%p, p# %u.\n", cdev, cdev->nports);
+ cxgbi_hbas_remove(cdev);
+ cxgbi_device_portmap_cleanup(cdev);
+ if (cdev->dev_ddp_cleanup)
+ cdev->dev_ddp_cleanup(cdev);
+ else
+ cxgbi_ddp_cleanup(cdev);
+ if (cdev->ddp)
+ cxgbi_ddp_cleanup(cdev);
+ if (cdev->pmap.max_connect)
+ cxgbi_free_big_mem(cdev->pmap.port_csk);
+ kfree(cdev);
+}
+
+struct cxgbi_device *cxgbi_device_register(unsigned int extra,
+ unsigned int nports)
+{
+ struct cxgbi_device *cdev;
+
+ cdev = kzalloc(sizeof(*cdev) + extra + nports *
+ (sizeof(struct cxgbi_hba *) +
+ sizeof(struct net_device *)),
+ GFP_KERNEL);
+ if (!cdev) {
+ pr_warn("nport %d, OOM.\n", nports);
+ return NULL;
+ }
+ cdev->ports = (struct net_device **)(cdev + 1);
+ cdev->hbas = (struct cxgbi_hba **)(((char*)cdev->ports) + nports *
+ sizeof(struct net_device *));
+ if (extra)
+ cdev->dd_data = ((char *)cdev->hbas) +
+ nports * sizeof(struct cxgbi_hba *);
+ spin_lock_init(&cdev->pmap.lock);
+
+ mutex_lock(&cdev_mutex);
+ list_add_tail(&cdev->list_head, &cdev_list);
+ mutex_unlock(&cdev_mutex);
+
+ spin_lock(&cdev_rcu_lock);
+ list_add_tail_rcu(&cdev->rcu_node, &cdev_rcu_list);
+ spin_unlock(&cdev_rcu_lock);
+
+ log_debug(1 << CXGBI_DBG_DEV,
+ "cdev 0x%p, p# %u.\n", cdev, nports);
+ return cdev;
+}
+EXPORT_SYMBOL_GPL(cxgbi_device_register);
+
+void cxgbi_device_unregister(struct cxgbi_device *cdev)
+{
+ log_debug(1 << CXGBI_DBG_DEV,
+ "cdev 0x%p, p# %u,%s.\n",
+ cdev, cdev->nports, cdev->nports ? cdev->ports[0]->name : "");
+
+ mutex_lock(&cdev_mutex);
+ list_del(&cdev->list_head);
+ mutex_unlock(&cdev_mutex);
+
+ spin_lock(&cdev_rcu_lock);
+ list_del_rcu(&cdev->rcu_node);
+ spin_unlock(&cdev_rcu_lock);
+ synchronize_rcu();
+
+ cxgbi_device_destroy(cdev);
+}
+EXPORT_SYMBOL_GPL(cxgbi_device_unregister);
+
+void cxgbi_device_unregister_all(unsigned int flag)
+{
+ struct cxgbi_device *cdev, *tmp;
+
+ mutex_lock(&cdev_mutex);
+ list_for_each_entry_safe(cdev, tmp, &cdev_list, list_head) {
+ if ((cdev->flags & flag) == flag) {
+ mutex_unlock(&cdev_mutex);
+ cxgbi_device_unregister(cdev);
+ mutex_lock(&cdev_mutex);
+ }
+ }
+ mutex_unlock(&cdev_mutex);
+}
+EXPORT_SYMBOL_GPL(cxgbi_device_unregister_all);
+
+struct cxgbi_device *cxgbi_device_find_by_lldev(void *lldev)
+{
+ struct cxgbi_device *cdev, *tmp;
+
+ mutex_lock(&cdev_mutex);
+ list_for_each_entry_safe(cdev, tmp, &cdev_list, list_head) {
+ if (cdev->lldev == lldev) {
+ mutex_unlock(&cdev_mutex);
+ return cdev;
+ }
+ }
+ mutex_unlock(&cdev_mutex);
+
+ log_debug(1 << CXGBI_DBG_DEV,
+ "lldev 0x%p, NO match found.\n", lldev);
+ return NULL;
+}
+EXPORT_SYMBOL_GPL(cxgbi_device_find_by_lldev);
+
+struct cxgbi_device *cxgbi_device_find_by_netdev(struct net_device *ndev,
+ int *port)
+{
+ struct net_device *vdev = NULL;
+ struct cxgbi_device *cdev, *tmp;
+ int i;
+
+ if (ndev->priv_flags & IFF_802_1Q_VLAN) {
+ vdev = ndev;
+ ndev = vlan_dev_real_dev(ndev);
+ log_debug(1 << CXGBI_DBG_DEV,
+ "vlan dev %s -> %s.\n", vdev->name, ndev->name);
+ }
+
+ mutex_lock(&cdev_mutex);
+ list_for_each_entry_safe(cdev, tmp, &cdev_list, list_head) {
+ for (i = 0; i < cdev->nports; i++) {
+ if (ndev == cdev->ports[i]) {
+ cdev->hbas[i]->vdev = vdev;
+ mutex_unlock(&cdev_mutex);
+ if (port)
+ *port = i;
+ return cdev;
+ }
+ }
+ }
+ mutex_unlock(&cdev_mutex);
+ log_debug(1 << CXGBI_DBG_DEV,
+ "ndev 0x%p, %s, NO match found.\n", ndev, ndev->name);
+ return NULL;
+}
+EXPORT_SYMBOL_GPL(cxgbi_device_find_by_netdev);
+
+struct cxgbi_device *cxgbi_device_find_by_netdev_rcu(struct net_device *ndev,
+ int *port)
+{
+ struct net_device *vdev = NULL;
+ struct cxgbi_device *cdev;
+ int i;
+
+ if (ndev->priv_flags & IFF_802_1Q_VLAN) {
+ vdev = ndev;
+ ndev = vlan_dev_real_dev(ndev);
+ pr_info("vlan dev %s -> %s.\n", vdev->name, ndev->name);
+ }
+
+ rcu_read_lock();
+ list_for_each_entry_rcu(cdev, &cdev_rcu_list, rcu_node) {
+ for (i = 0; i < cdev->nports; i++) {
+ if (ndev == cdev->ports[i]) {
+ cdev->hbas[i]->vdev = vdev;
+ rcu_read_unlock();
+ if (port)
+ *port = i;
+ return cdev;
+ }
+ }
+ }
+ rcu_read_unlock();
+
+ log_debug(1 << CXGBI_DBG_DEV,
+ "ndev 0x%p, %s, NO match found.\n", ndev, ndev->name);
+ return NULL;
+}
+EXPORT_SYMBOL_GPL(cxgbi_device_find_by_netdev_rcu);
+
+#if IS_ENABLED(CONFIG_IPV6)
+static struct cxgbi_device *cxgbi_device_find_by_mac(struct net_device *ndev,
+ int *port)
+{
+ struct net_device *vdev = NULL;
+ struct cxgbi_device *cdev, *tmp;
+ int i;
+
+ if (ndev->priv_flags & IFF_802_1Q_VLAN) {
+ vdev = ndev;
+ ndev = vlan_dev_real_dev(ndev);
+ pr_info("vlan dev %s -> %s.\n", vdev->name, ndev->name);
+ }
+
+ mutex_lock(&cdev_mutex);
+ list_for_each_entry_safe(cdev, tmp, &cdev_list, list_head) {
+ for (i = 0; i < cdev->nports; i++) {
+ if (!memcmp(ndev->dev_addr, cdev->ports[i]->dev_addr,
+ MAX_ADDR_LEN)) {
+ cdev->hbas[i]->vdev = vdev;
+ mutex_unlock(&cdev_mutex);
+ if (port)
+ *port = i;
+ return cdev;
+ }
+ }
+ }
+ mutex_unlock(&cdev_mutex);
+ log_debug(1 << CXGBI_DBG_DEV,
+ "ndev 0x%p, %s, NO match mac found.\n",
+ ndev, ndev->name);
+ return NULL;
+}
+#endif
+
+void cxgbi_hbas_remove(struct cxgbi_device *cdev)
+{
+ int i;
+ struct cxgbi_hba *chba;
+
+ log_debug(1 << CXGBI_DBG_DEV,
+ "cdev 0x%p, p#%u.\n", cdev, cdev->nports);
+
+ for (i = 0; i < cdev->nports; i++) {
+ chba = cdev->hbas[i];
+ if (chba) {
+ cdev->hbas[i] = NULL;
+ iscsi_host_remove(chba->shost);
+ pci_dev_put(cdev->pdev);
+ iscsi_host_free(chba->shost);
+ }
+ }
+}
+EXPORT_SYMBOL_GPL(cxgbi_hbas_remove);
+
+int cxgbi_hbas_add(struct cxgbi_device *cdev, u64 max_lun,
+ unsigned int max_id, struct scsi_host_template *sht,
+ struct scsi_transport_template *stt)
+{
+ struct cxgbi_hba *chba;
+ struct Scsi_Host *shost;
+ int i, err;
+
+ log_debug(1 << CXGBI_DBG_DEV, "cdev 0x%p, p#%u.\n", cdev, cdev->nports);
+
+ for (i = 0; i < cdev->nports; i++) {
+ shost = iscsi_host_alloc(sht, sizeof(*chba), 1);
+ if (!shost) {
+ pr_info("0x%p, p%d, %s, host alloc failed.\n",
+ cdev, i, cdev->ports[i]->name);
+ err = -ENOMEM;
+ goto err_out;
+ }
+
+ shost->transportt = stt;
+ shost->max_lun = max_lun;
+ shost->max_id = max_id;
+ shost->max_channel = 0;
+ shost->max_cmd_len = 16;
+
+ chba = iscsi_host_priv(shost);
+ chba->cdev = cdev;
+ chba->ndev = cdev->ports[i];
+ chba->shost = shost;
+
+ log_debug(1 << CXGBI_DBG_DEV,
+ "cdev 0x%p, p#%d %s: chba 0x%p.\n",
+ cdev, i, cdev->ports[i]->name, chba);
+
+ pci_dev_get(cdev->pdev);
+ err = iscsi_host_add(shost, &cdev->pdev->dev);
+ if (err) {
+ pr_info("cdev 0x%p, p#%d %s, host add failed.\n",
+ cdev, i, cdev->ports[i]->name);
+ pci_dev_put(cdev->pdev);
+ scsi_host_put(shost);
+ goto err_out;
+ }
+
+ cdev->hbas[i] = chba;
+ }
+
+ return 0;
+
+err_out:
+ cxgbi_hbas_remove(cdev);
+ return err;
+}
+EXPORT_SYMBOL_GPL(cxgbi_hbas_add);
+
+/*
+ * iSCSI offload
+ *
+ * - source port management
+ * To find a free source port in the port allocation map we use a very simple
+ * rotor scheme to look for the next free port.
+ *
+ * If a source port has been specified make sure that it doesn't collide with
+ * our normal source port allocation map. If it's outside the range of our
+ * allocation/deallocation scheme just let them use it.
+ *
+ * If the source port is outside our allocation range, the caller is
+ * responsible for keeping track of their port usage.
+ */
+
+static struct cxgbi_sock *find_sock_on_port(struct cxgbi_device *cdev,
+ unsigned char port_id)
+{
+ struct cxgbi_ports_map *pmap = &cdev->pmap;
+ unsigned int i;
+ unsigned int used;
+
+ if (!pmap->max_connect || !pmap->used)
+ return NULL;
+
+ spin_lock_bh(&pmap->lock);
+ used = pmap->used;
+ for (i = 0; used && i < pmap->max_connect; i++) {
+ struct cxgbi_sock *csk = pmap->port_csk[i];
+
+ if (csk) {
+ if (csk->port_id == port_id) {
+ spin_unlock_bh(&pmap->lock);
+ return csk;
+ }
+ used--;
+ }
+ }
+ spin_unlock_bh(&pmap->lock);
+
+ return NULL;
+}
+
+static int sock_get_port(struct cxgbi_sock *csk)
+{
+ struct cxgbi_device *cdev = csk->cdev;
+ struct cxgbi_ports_map *pmap = &cdev->pmap;
+ unsigned int start;
+ int idx;
+ __be16 *port;
+
+ if (!pmap->max_connect) {
+ pr_err("cdev 0x%p, p#%u %s, NO port map.\n",
+ cdev, csk->port_id, cdev->ports[csk->port_id]->name);
+ return -EADDRNOTAVAIL;
+ }
+
+ if (csk->csk_family == AF_INET)
+ port = &csk->saddr.sin_port;
+ else /* ipv6 */
+ port = &csk->saddr6.sin6_port;
+
+ if (*port) {
+ pr_err("source port NON-ZERO %u.\n",
+ ntohs(*port));
+ return -EADDRINUSE;
+ }
+
+ spin_lock_bh(&pmap->lock);
+ if (pmap->used >= pmap->max_connect) {
+ spin_unlock_bh(&pmap->lock);
+ pr_info("cdev 0x%p, p#%u %s, ALL ports used.\n",
+ cdev, csk->port_id, cdev->ports[csk->port_id]->name);
+ return -EADDRNOTAVAIL;
+ }
+
+ start = idx = pmap->next;
+ do {
+ if (++idx >= pmap->max_connect)
+ idx = 0;
+ if (!pmap->port_csk[idx]) {
+ pmap->used++;
+ *port = htons(pmap->sport_base + idx);
+ pmap->next = idx;
+ pmap->port_csk[idx] = csk;
+ spin_unlock_bh(&pmap->lock);
+ cxgbi_sock_get(csk);
+ log_debug(1 << CXGBI_DBG_SOCK,
+ "cdev 0x%p, p#%u %s, p %u, %u.\n",
+ cdev, csk->port_id,
+ cdev->ports[csk->port_id]->name,
+ pmap->sport_base + idx, pmap->next);
+ return 0;
+ }
+ } while (idx != start);
+ spin_unlock_bh(&pmap->lock);
+
+ /* should not happen */
+ pr_warn("cdev 0x%p, p#%u %s, next %u?\n",
+ cdev, csk->port_id, cdev->ports[csk->port_id]->name,
+ pmap->next);
+ return -EADDRNOTAVAIL;
+}
+
+static void sock_put_port(struct cxgbi_sock *csk)
+{
+ struct cxgbi_device *cdev = csk->cdev;
+ struct cxgbi_ports_map *pmap = &cdev->pmap;
+ __be16 *port;
+
+ if (csk->csk_family == AF_INET)
+ port = &csk->saddr.sin_port;
+ else /* ipv6 */
+ port = &csk->saddr6.sin6_port;
+
+ if (*port) {
+ int idx = ntohs(*port) - pmap->sport_base;
+
+ *port = 0;
+ if (idx < 0 || idx >= pmap->max_connect) {
+ pr_err("cdev 0x%p, p#%u %s, port %u OOR.\n",
+ cdev, csk->port_id,
+ cdev->ports[csk->port_id]->name,
+ ntohs(*port));
+ return;
+ }
+
+ spin_lock_bh(&pmap->lock);
+ pmap->port_csk[idx] = NULL;
+ pmap->used--;
+ spin_unlock_bh(&pmap->lock);
+
+ log_debug(1 << CXGBI_DBG_SOCK,
+ "cdev 0x%p, p#%u %s, release %u.\n",
+ cdev, csk->port_id, cdev->ports[csk->port_id]->name,
+ pmap->sport_base + idx);
+
+ cxgbi_sock_put(csk);
+ }
+}
+
+/*
+ * iscsi tcp connection
+ */
+void cxgbi_sock_free_cpl_skbs(struct cxgbi_sock *csk)
+{
+ if (csk->cpl_close) {
+ kfree_skb(csk->cpl_close);
+ csk->cpl_close = NULL;
+ }
+ if (csk->cpl_abort_req) {
+ kfree_skb(csk->cpl_abort_req);
+ csk->cpl_abort_req = NULL;
+ }
+ if (csk->cpl_abort_rpl) {
+ kfree_skb(csk->cpl_abort_rpl);
+ csk->cpl_abort_rpl = NULL;
+ }
+}
+EXPORT_SYMBOL_GPL(cxgbi_sock_free_cpl_skbs);
+
+static struct cxgbi_sock *cxgbi_sock_create(struct cxgbi_device *cdev)
+{
+ struct cxgbi_sock *csk = kzalloc(sizeof(*csk), GFP_NOIO);
+
+ if (!csk) {
+ pr_info("alloc csk %zu failed.\n", sizeof(*csk));
+ return NULL;
+ }
+
+ if (cdev->csk_alloc_cpls(csk) < 0) {
+ pr_info("csk 0x%p, alloc cpls failed.\n", csk);
+ kfree(csk);
+ return NULL;
+ }
+
+ spin_lock_init(&csk->lock);
+ kref_init(&csk->refcnt);
+ skb_queue_head_init(&csk->receive_queue);
+ skb_queue_head_init(&csk->write_queue);
+ setup_timer(&csk->retry_timer, NULL, (unsigned long)csk);
+ rwlock_init(&csk->callback_lock);
+ csk->cdev = cdev;
+ csk->flags = 0;
+ cxgbi_sock_set_state(csk, CTP_CLOSED);
+
+ log_debug(1 << CXGBI_DBG_SOCK, "cdev 0x%p, new csk 0x%p.\n", cdev, csk);
+
+ return csk;
+}
+
+static struct rtable *find_route_ipv4(struct flowi4 *fl4,
+ __be32 saddr, __be32 daddr,
+ __be16 sport, __be16 dport, u8 tos)
+{
+ struct rtable *rt;
+
+ rt = ip_route_output_ports(&init_net, fl4, NULL, daddr, saddr,
+ dport, sport, IPPROTO_TCP, tos, 0);
+ if (IS_ERR(rt))
+ return NULL;
+
+ return rt;
+}
+
+static struct cxgbi_sock *cxgbi_check_route(struct sockaddr *dst_addr)
+{
+ struct sockaddr_in *daddr = (struct sockaddr_in *)dst_addr;
+ struct dst_entry *dst;
+ struct net_device *ndev;
+ struct cxgbi_device *cdev;
+ struct rtable *rt = NULL;
+ struct neighbour *n;
+ struct flowi4 fl4;
+ struct cxgbi_sock *csk = NULL;
+ unsigned int mtu = 0;
+ int port = 0xFFFF;
+ int err = 0;
+
+ rt = find_route_ipv4(&fl4, 0, daddr->sin_addr.s_addr, 0, daddr->sin_port, 0);
+ if (!rt) {
+ pr_info("no route to ipv4 0x%x, port %u.\n",
+ be32_to_cpu(daddr->sin_addr.s_addr),
+ be16_to_cpu(daddr->sin_port));
+ err = -ENETUNREACH;
+ goto err_out;
+ }
+ dst = &rt->dst;
+ n = dst_neigh_lookup(dst, &daddr->sin_addr.s_addr);
+ if (!n) {
+ err = -ENODEV;
+ goto rel_rt;
+ }
+ ndev = n->dev;
+
+ if (rt->rt_flags & (RTCF_MULTICAST | RTCF_BROADCAST)) {
+ pr_info("multi-cast route %pI4, port %u, dev %s.\n",
+ &daddr->sin_addr.s_addr, ntohs(daddr->sin_port),
+ ndev->name);
+ err = -ENETUNREACH;
+ goto rel_neigh;
+ }
+
+ if (ndev->flags & IFF_LOOPBACK) {
+ ndev = ip_dev_find(&init_net, daddr->sin_addr.s_addr);
+ mtu = ndev->mtu;
+ pr_info("rt dev %s, loopback -> %s, mtu %u.\n",
+ n->dev->name, ndev->name, mtu);
+ }
+
+ cdev = cxgbi_device_find_by_netdev(ndev, &port);
+ if (!cdev) {
+ pr_info("dst %pI4, %s, NOT cxgbi device.\n",
+ &daddr->sin_addr.s_addr, ndev->name);
+ err = -ENETUNREACH;
+ goto rel_neigh;
+ }
+ log_debug(1 << CXGBI_DBG_SOCK,
+ "route to %pI4 :%u, ndev p#%d,%s, cdev 0x%p.\n",
+ &daddr->sin_addr.s_addr, ntohs(daddr->sin_port),
+ port, ndev->name, cdev);
+
+ csk = cxgbi_sock_create(cdev);
+ if (!csk) {
+ err = -ENOMEM;
+ goto rel_neigh;
+ }
+ csk->cdev = cdev;
+ csk->port_id = port;
+ csk->mtu = mtu;
+ csk->dst = dst;
+
+ csk->csk_family = AF_INET;
+ csk->daddr.sin_addr.s_addr = daddr->sin_addr.s_addr;
+ csk->daddr.sin_port = daddr->sin_port;
+ csk->daddr.sin_family = daddr->sin_family;
+ csk->saddr.sin_family = daddr->sin_family;
+ csk->saddr.sin_addr.s_addr = fl4.saddr;
+ neigh_release(n);
+
+ return csk;
+
+rel_neigh:
+ neigh_release(n);
+
+rel_rt:
+ ip_rt_put(rt);
+ if (csk)
+ cxgbi_sock_closed(csk);
+err_out:
+ return ERR_PTR(err);
+}
+
+#if IS_ENABLED(CONFIG_IPV6)
+static struct rt6_info *find_route_ipv6(const struct in6_addr *saddr,
+ const struct in6_addr *daddr)
+{
+ struct flowi6 fl;
+
+ if (saddr)
+ memcpy(&fl.saddr, saddr, sizeof(struct in6_addr));
+ if (daddr)
+ memcpy(&fl.daddr, daddr, sizeof(struct in6_addr));
+ return (struct rt6_info *)ip6_route_output(&init_net, NULL, &fl);
+}
+
+static struct cxgbi_sock *cxgbi_check_route6(struct sockaddr *dst_addr)
+{
+ struct sockaddr_in6 *daddr6 = (struct sockaddr_in6 *)dst_addr;
+ struct dst_entry *dst;
+ struct net_device *ndev;
+ struct cxgbi_device *cdev;
+ struct rt6_info *rt = NULL;
+ struct neighbour *n;
+ struct in6_addr pref_saddr;
+ struct cxgbi_sock *csk = NULL;
+ unsigned int mtu = 0;
+ int port = 0xFFFF;
+ int err = 0;
+
+ rt = find_route_ipv6(NULL, &daddr6->sin6_addr);
+
+ if (!rt) {
+ pr_info("no route to ipv6 %pI6 port %u\n",
+ daddr6->sin6_addr.s6_addr,
+ be16_to_cpu(daddr6->sin6_port));
+ err = -ENETUNREACH;
+ goto err_out;
+ }
+
+ dst = &rt->dst;
+
+ n = dst_neigh_lookup(dst, &daddr6->sin6_addr);
+
+ if (!n) {
+ pr_info("%pI6, port %u, dst no neighbour.\n",
+ daddr6->sin6_addr.s6_addr,
+ be16_to_cpu(daddr6->sin6_port));
+ err = -ENETUNREACH;
+ goto rel_rt;
+ }
+ ndev = n->dev;
+
+ if (ipv6_addr_is_multicast(&rt->rt6i_dst.addr)) {
+ pr_info("multi-cast route %pI6 port %u, dev %s.\n",
+ daddr6->sin6_addr.s6_addr,
+ ntohs(daddr6->sin6_port), ndev->name);
+ err = -ENETUNREACH;
+ goto rel_rt;
+ }
+
+ cdev = cxgbi_device_find_by_netdev(ndev, &port);
+ if (!cdev)
+ cdev = cxgbi_device_find_by_mac(ndev, &port);
+ if (!cdev) {
+ pr_info("dst %pI6 %s, NOT cxgbi device.\n",
+ daddr6->sin6_addr.s6_addr, ndev->name);
+ err = -ENETUNREACH;
+ goto rel_rt;
+ }
+ log_debug(1 << CXGBI_DBG_SOCK,
+ "route to %pI6 :%u, ndev p#%d,%s, cdev 0x%p.\n",
+ daddr6->sin6_addr.s6_addr, ntohs(daddr6->sin6_port), port,
+ ndev->name, cdev);
+
+ csk = cxgbi_sock_create(cdev);
+ if (!csk) {
+ err = -ENOMEM;
+ goto rel_rt;
+ }
+ csk->cdev = cdev;
+ csk->port_id = port;
+ csk->mtu = mtu;
+ csk->dst = dst;
+
+ if (ipv6_addr_any(&rt->rt6i_prefsrc.addr)) {
+ struct inet6_dev *idev = ip6_dst_idev((struct dst_entry *)rt);
+
+ err = ipv6_dev_get_saddr(&init_net, idev ? idev->dev : NULL,
+ &daddr6->sin6_addr, 0, &pref_saddr);
+ if (err) {
+ pr_info("failed to get source address to reach %pI6\n",
+ &daddr6->sin6_addr);
+ goto rel_rt;
+ }
+ } else {
+ pref_saddr = rt->rt6i_prefsrc.addr;
+ }
+
+ csk->csk_family = AF_INET6;
+ csk->daddr6.sin6_addr = daddr6->sin6_addr;
+ csk->daddr6.sin6_port = daddr6->sin6_port;
+ csk->daddr6.sin6_family = daddr6->sin6_family;
+ csk->saddr6.sin6_family = daddr6->sin6_family;
+ csk->saddr6.sin6_addr = pref_saddr;
+
+ neigh_release(n);
+ return csk;
+
+rel_rt:
+ if (n)
+ neigh_release(n);
+
+ ip6_rt_put(rt);
+ if (csk)
+ cxgbi_sock_closed(csk);
+err_out:
+ return ERR_PTR(err);
+}
+#endif /* IS_ENABLED(CONFIG_IPV6) */
+
+void cxgbi_sock_established(struct cxgbi_sock *csk, unsigned int snd_isn,
+ unsigned int opt)
+{
+ csk->write_seq = csk->snd_nxt = csk->snd_una = snd_isn;
+ dst_confirm(csk->dst);
+ smp_mb();
+ cxgbi_sock_set_state(csk, CTP_ESTABLISHED);
+}
+EXPORT_SYMBOL_GPL(cxgbi_sock_established);
+
+static void cxgbi_inform_iscsi_conn_closing(struct cxgbi_sock *csk)
+{
+ log_debug(1 << CXGBI_DBG_SOCK,
+ "csk 0x%p, state %u, flags 0x%lx, conn 0x%p.\n",
+ csk, csk->state, csk->flags, csk->user_data);
+
+ if (csk->state != CTP_ESTABLISHED) {
+ read_lock_bh(&csk->callback_lock);
+ if (csk->user_data)
+ iscsi_conn_failure(csk->user_data,
+ ISCSI_ERR_TCP_CONN_CLOSE);
+ read_unlock_bh(&csk->callback_lock);
+ }
+}
+
+void cxgbi_sock_closed(struct cxgbi_sock *csk)
+{
+ log_debug(1 << CXGBI_DBG_SOCK, "csk 0x%p,%u,0x%lx,%u.\n",
+ csk, (csk)->state, (csk)->flags, (csk)->tid);
+ cxgbi_sock_set_flag(csk, CTPF_ACTIVE_CLOSE_NEEDED);
+ if (csk->state == CTP_ACTIVE_OPEN || csk->state == CTP_CLOSED)
+ return;
+ if (csk->saddr.sin_port)
+ sock_put_port(csk);
+ if (csk->dst)
+ dst_release(csk->dst);
+ csk->cdev->csk_release_offload_resources(csk);
+ cxgbi_sock_set_state(csk, CTP_CLOSED);
+ cxgbi_inform_iscsi_conn_closing(csk);
+ cxgbi_sock_put(csk);
+}
+EXPORT_SYMBOL_GPL(cxgbi_sock_closed);
+
+static void need_active_close(struct cxgbi_sock *csk)
+{
+ int data_lost;
+ int close_req = 0;
+
+ log_debug(1 << CXGBI_DBG_SOCK, "csk 0x%p,%u,0x%lx,%u.\n",
+ csk, (csk)->state, (csk)->flags, (csk)->tid);
+ spin_lock_bh(&csk->lock);
+ dst_confirm(csk->dst);
+ data_lost = skb_queue_len(&csk->receive_queue);
+ __skb_queue_purge(&csk->receive_queue);
+
+ if (csk->state == CTP_ACTIVE_OPEN)
+ cxgbi_sock_set_flag(csk, CTPF_ACTIVE_CLOSE_NEEDED);
+ else if (csk->state == CTP_ESTABLISHED) {
+ close_req = 1;
+ cxgbi_sock_set_state(csk, CTP_ACTIVE_CLOSE);
+ } else if (csk->state == CTP_PASSIVE_CLOSE) {
+ close_req = 1;
+ cxgbi_sock_set_state(csk, CTP_CLOSE_WAIT_2);
+ }
+
+ if (close_req) {
+ if (data_lost)
+ csk->cdev->csk_send_abort_req(csk);
+ else
+ csk->cdev->csk_send_close_req(csk);
+ }
+
+ spin_unlock_bh(&csk->lock);
+}
+
+void cxgbi_sock_fail_act_open(struct cxgbi_sock *csk, int errno)
+{
+ pr_info("csk 0x%p,%u,%lx, %pI4:%u-%pI4:%u, err %d.\n",
+ csk, csk->state, csk->flags,
+ &csk->saddr.sin_addr.s_addr, csk->saddr.sin_port,
+ &csk->daddr.sin_addr.s_addr, csk->daddr.sin_port,
+ errno);
+
+ cxgbi_sock_set_state(csk, CTP_CONNECTING);
+ csk->err = errno;
+ cxgbi_sock_closed(csk);
+}
+EXPORT_SYMBOL_GPL(cxgbi_sock_fail_act_open);
+
+void cxgbi_sock_act_open_req_arp_failure(void *handle, struct sk_buff *skb)
+{
+ struct cxgbi_sock *csk = (struct cxgbi_sock *)skb->sk;
+
+ log_debug(1 << CXGBI_DBG_SOCK, "csk 0x%p,%u,0x%lx,%u.\n",
+ csk, (csk)->state, (csk)->flags, (csk)->tid);
+ cxgbi_sock_get(csk);
+ spin_lock_bh(&csk->lock);
+ if (csk->state == CTP_ACTIVE_OPEN)
+ cxgbi_sock_fail_act_open(csk, -EHOSTUNREACH);
+ spin_unlock_bh(&csk->lock);
+ cxgbi_sock_put(csk);
+ __kfree_skb(skb);
+}
+EXPORT_SYMBOL_GPL(cxgbi_sock_act_open_req_arp_failure);
+
+void cxgbi_sock_rcv_abort_rpl(struct cxgbi_sock *csk)
+{
+ cxgbi_sock_get(csk);
+ spin_lock_bh(&csk->lock);
+
+ cxgbi_sock_set_flag(csk, CTPF_ABORT_RPL_RCVD);
+ if (cxgbi_sock_flag(csk, CTPF_ABORT_RPL_PENDING)) {
+ cxgbi_sock_clear_flag(csk, CTPF_ABORT_RPL_PENDING);
+ if (cxgbi_sock_flag(csk, CTPF_ABORT_REQ_RCVD))
+ pr_err("csk 0x%p,%u,0x%lx,%u,ABT_RPL_RSS.\n",
+ csk, csk->state, csk->flags, csk->tid);
+ cxgbi_sock_closed(csk);
+ }
+
+ spin_unlock_bh(&csk->lock);
+ cxgbi_sock_put(csk);
+}
+EXPORT_SYMBOL_GPL(cxgbi_sock_rcv_abort_rpl);
+
+void cxgbi_sock_rcv_peer_close(struct cxgbi_sock *csk)
+{
+ log_debug(1 << CXGBI_DBG_SOCK, "csk 0x%p,%u,0x%lx,%u.\n",
+ csk, (csk)->state, (csk)->flags, (csk)->tid);
+ cxgbi_sock_get(csk);
+ spin_lock_bh(&csk->lock);
+
+ if (cxgbi_sock_flag(csk, CTPF_ABORT_RPL_PENDING))
+ goto done;
+
+ switch (csk->state) {
+ case CTP_ESTABLISHED:
+ cxgbi_sock_set_state(csk, CTP_PASSIVE_CLOSE);
+ break;
+ case CTP_ACTIVE_CLOSE:
+ cxgbi_sock_set_state(csk, CTP_CLOSE_WAIT_2);
+ break;
+ case CTP_CLOSE_WAIT_1:
+ cxgbi_sock_closed(csk);
+ break;
+ case CTP_ABORTING:
+ break;
+ default:
+ pr_err("csk 0x%p,%u,0x%lx,%u, bad state.\n",
+ csk, csk->state, csk->flags, csk->tid);
+ }
+ cxgbi_inform_iscsi_conn_closing(csk);
+done:
+ spin_unlock_bh(&csk->lock);
+ cxgbi_sock_put(csk);
+}
+EXPORT_SYMBOL_GPL(cxgbi_sock_rcv_peer_close);
+
+void cxgbi_sock_rcv_close_conn_rpl(struct cxgbi_sock *csk, u32 snd_nxt)
+{
+ log_debug(1 << CXGBI_DBG_SOCK, "csk 0x%p,%u,0x%lx,%u.\n",
+ csk, (csk)->state, (csk)->flags, (csk)->tid);
+ cxgbi_sock_get(csk);
+ spin_lock_bh(&csk->lock);
+
+ csk->snd_una = snd_nxt - 1;
+ if (cxgbi_sock_flag(csk, CTPF_ABORT_RPL_PENDING))
+ goto done;
+
+ switch (csk->state) {
+ case CTP_ACTIVE_CLOSE:
+ cxgbi_sock_set_state(csk, CTP_CLOSE_WAIT_1);
+ break;
+ case CTP_CLOSE_WAIT_1:
+ case CTP_CLOSE_WAIT_2:
+ cxgbi_sock_closed(csk);
+ break;
+ case CTP_ABORTING:
+ break;
+ default:
+ pr_err("csk 0x%p,%u,0x%lx,%u, bad state.\n",
+ csk, csk->state, csk->flags, csk->tid);
+ }
+done:
+ spin_unlock_bh(&csk->lock);
+ cxgbi_sock_put(csk);
+}
+EXPORT_SYMBOL_GPL(cxgbi_sock_rcv_close_conn_rpl);
+
+void cxgbi_sock_rcv_wr_ack(struct cxgbi_sock *csk, unsigned int credits,
+ unsigned int snd_una, int seq_chk)
+{
+ log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK,
+ "csk 0x%p,%u,0x%lx,%u, cr %u,%u+%u, snd_una %u,%d.\n",
+ csk, csk->state, csk->flags, csk->tid, credits,
+ csk->wr_cred, csk->wr_una_cred, snd_una, seq_chk);
+
+ spin_lock_bh(&csk->lock);
+
+ csk->wr_cred += credits;
+ if (csk->wr_una_cred > csk->wr_max_cred - csk->wr_cred)
+ csk->wr_una_cred = csk->wr_max_cred - csk->wr_cred;
+
+ while (credits) {
+ struct sk_buff *p = cxgbi_sock_peek_wr(csk);
+
+ if (unlikely(!p)) {
+ pr_err("csk 0x%p,%u,0x%lx,%u, cr %u,%u+%u, empty.\n",
+ csk, csk->state, csk->flags, csk->tid, credits,
+ csk->wr_cred, csk->wr_una_cred);
+ break;
+ }
+
+ if (unlikely(credits < p->csum)) {
+ pr_warn("csk 0x%p,%u,0x%lx,%u, cr %u,%u+%u, < %u.\n",
+ csk, csk->state, csk->flags, csk->tid,
+ credits, csk->wr_cred, csk->wr_una_cred,
+ p->csum);
+ p->csum -= credits;
+ break;
+ } else {
+ cxgbi_sock_dequeue_wr(csk);
+ credits -= p->csum;
+ kfree_skb(p);
+ }
+ }
+
+ cxgbi_sock_check_wr_invariants(csk);
+
+ if (seq_chk) {
+ if (unlikely(before(snd_una, csk->snd_una))) {
+ pr_warn("csk 0x%p,%u,0x%lx,%u, snd_una %u/%u.",
+ csk, csk->state, csk->flags, csk->tid, snd_una,
+ csk->snd_una);
+ goto done;
+ }
+
+ if (csk->snd_una != snd_una) {
+ csk->snd_una = snd_una;
+ dst_confirm(csk->dst);
+ }
+ }
+
+ if (skb_queue_len(&csk->write_queue)) {
+ if (csk->cdev->csk_push_tx_frames(csk, 0))
+ cxgbi_conn_tx_open(csk);
+ } else
+ cxgbi_conn_tx_open(csk);
+done:
+ spin_unlock_bh(&csk->lock);
+}
+EXPORT_SYMBOL_GPL(cxgbi_sock_rcv_wr_ack);
+
+static unsigned int cxgbi_sock_find_best_mtu(struct cxgbi_sock *csk,
+ unsigned short mtu)
+{
+ int i = 0;
+
+ while (i < csk->cdev->nmtus - 1 && csk->cdev->mtus[i + 1] <= mtu)
+ ++i;
+
+ return i;
+}
+
+unsigned int cxgbi_sock_select_mss(struct cxgbi_sock *csk, unsigned int pmtu)
+{
+ unsigned int idx;
+ struct dst_entry *dst = csk->dst;
+
+ csk->advmss = dst_metric_advmss(dst);
+
+ if (csk->advmss > pmtu - 40)
+ csk->advmss = pmtu - 40;
+ if (csk->advmss < csk->cdev->mtus[0] - 40)
+ csk->advmss = csk->cdev->mtus[0] - 40;
+ idx = cxgbi_sock_find_best_mtu(csk, csk->advmss + 40);
+
+ return idx;
+}
+EXPORT_SYMBOL_GPL(cxgbi_sock_select_mss);
+
+void cxgbi_sock_skb_entail(struct cxgbi_sock *csk, struct sk_buff *skb)
+{
+ cxgbi_skcb_tcp_seq(skb) = csk->write_seq;
+ __skb_queue_tail(&csk->write_queue, skb);
+}
+EXPORT_SYMBOL_GPL(cxgbi_sock_skb_entail);
+
+void cxgbi_sock_purge_wr_queue(struct cxgbi_sock *csk)
+{
+ struct sk_buff *skb;
+
+ while ((skb = cxgbi_sock_dequeue_wr(csk)) != NULL)
+ kfree_skb(skb);
+}
+EXPORT_SYMBOL_GPL(cxgbi_sock_purge_wr_queue);
+
+void cxgbi_sock_check_wr_invariants(const struct cxgbi_sock *csk)
+{
+ int pending = cxgbi_sock_count_pending_wrs(csk);
+
+ if (unlikely(csk->wr_cred + pending != csk->wr_max_cred))
+ pr_err("csk 0x%p, tid %u, credit %u + %u != %u.\n",
+ csk, csk->tid, csk->wr_cred, pending, csk->wr_max_cred);
+}
+EXPORT_SYMBOL_GPL(cxgbi_sock_check_wr_invariants);
+
+static int cxgbi_sock_send_pdus(struct cxgbi_sock *csk, struct sk_buff *skb)
+{
+ struct cxgbi_device *cdev = csk->cdev;
+ struct sk_buff *next;
+ int err, copied = 0;
+
+ spin_lock_bh(&csk->lock);
+
+ if (csk->state != CTP_ESTABLISHED) {
+ log_debug(1 << CXGBI_DBG_PDU_TX,
+ "csk 0x%p,%u,0x%lx,%u, EAGAIN.\n",
+ csk, csk->state, csk->flags, csk->tid);
+ err = -EAGAIN;
+ goto out_err;
+ }
+
+ if (csk->err) {
+ log_debug(1 << CXGBI_DBG_PDU_TX,
+ "csk 0x%p,%u,0x%lx,%u, EPIPE %d.\n",
+ csk, csk->state, csk->flags, csk->tid, csk->err);
+ err = -EPIPE;
+ goto out_err;
+ }
+
+ if (csk->write_seq - csk->snd_una >= cdev->snd_win) {
+ log_debug(1 << CXGBI_DBG_PDU_TX,
+ "csk 0x%p,%u,0x%lx,%u, FULL %u-%u >= %u.\n",
+ csk, csk->state, csk->flags, csk->tid, csk->write_seq,
+ csk->snd_una, cdev->snd_win);
+ err = -ENOBUFS;
+ goto out_err;
+ }
+
+ while (skb) {
+ int frags = skb_shinfo(skb)->nr_frags +
+ (skb->len != skb->data_len);
+
+ if (unlikely(skb_headroom(skb) < cdev->skb_tx_rsvd)) {
+ pr_err("csk 0x%p, skb head %u < %u.\n",
+ csk, skb_headroom(skb), cdev->skb_tx_rsvd);
+ err = -EINVAL;
+ goto out_err;
+ }
+
+ if (frags >= SKB_WR_LIST_SIZE) {
+ pr_err("csk 0x%p, frags %d, %u,%u >%u.\n",
+ csk, skb_shinfo(skb)->nr_frags, skb->len,
+ skb->data_len, (uint)(SKB_WR_LIST_SIZE));
+ err = -EINVAL;
+ goto out_err;
+ }
+
+ next = skb->next;
+ skb->next = NULL;
+ cxgbi_skcb_set_flag(skb, SKCBF_TX_NEED_HDR);
+ cxgbi_sock_skb_entail(csk, skb);
+ copied += skb->len;
+ csk->write_seq += skb->len +
+ cxgbi_ulp_extra_len(cxgbi_skcb_ulp_mode(skb));
+ skb = next;
+ }
+done:
+ if (likely(skb_queue_len(&csk->write_queue)))
+ cdev->csk_push_tx_frames(csk, 1);
+ spin_unlock_bh(&csk->lock);
+ return copied;
+
+out_err:
+ if (copied == 0 && err == -EPIPE)
+ copied = csk->err ? csk->err : -EPIPE;
+ else
+ copied = err;
+ goto done;
+}
+
+/*
+ * Direct Data Placement -
+ * Directly place the iSCSI Data-In or Data-Out PDU's payload into pre-posted
+ * final destination host-memory buffers based on the Initiator Task Tag (ITT)
+ * in Data-In or Target Task Tag (TTT) in Data-Out PDUs.
+ * The host memory address is programmed into h/w in the format of pagepod
+ * entries.
+ * The location of the pagepod entry is encoded into ddp tag which is used as
+ * the base for ITT/TTT.
+ */
+
+static unsigned char ddp_page_order[DDP_PGIDX_MAX] = {0, 1, 2, 4};
+static unsigned char ddp_page_shift[DDP_PGIDX_MAX] = {12, 13, 14, 16};
+static unsigned char page_idx = DDP_PGIDX_MAX;
+
+static unsigned char sw_tag_idx_bits;
+static unsigned char sw_tag_age_bits;
+
+/*
+ * Direct-Data Placement page size adjustment
+ */
+static int ddp_adjust_page_table(void)
+{
+ int i;
+ unsigned int base_order, order;
+
+ if (PAGE_SIZE < (1UL << ddp_page_shift[0])) {
+ pr_info("PAGE_SIZE 0x%lx too small, min 0x%lx\n",
+ PAGE_SIZE, 1UL << ddp_page_shift[0]);
+ return -EINVAL;
+ }
+
+ base_order = get_order(1UL << ddp_page_shift[0]);
+ order = get_order(1UL << PAGE_SHIFT);
+
+ for (i = 0; i < DDP_PGIDX_MAX; i++) {
+ /* first is the kernel page size, then just doubling */
+ ddp_page_order[i] = order - base_order + i;
+ ddp_page_shift[i] = PAGE_SHIFT + i;
+ }
+ return 0;
+}
+
+static int ddp_find_page_index(unsigned long pgsz)
+{
+ int i;
+
+ for (i = 0; i < DDP_PGIDX_MAX; i++) {
+ if (pgsz == (1UL << ddp_page_shift[i]))
+ return i;
+ }
+ pr_info("ddp page size %lu not supported.\n", pgsz);
+ return DDP_PGIDX_MAX;
+}
+
+static void ddp_setup_host_page_size(void)
+{
+ if (page_idx == DDP_PGIDX_MAX) {
+ page_idx = ddp_find_page_index(PAGE_SIZE);
+
+ if (page_idx == DDP_PGIDX_MAX) {
+ pr_info("system PAGE %lu, update hw.\n", PAGE_SIZE);
+ if (ddp_adjust_page_table() < 0) {
+ pr_info("PAGE %lu, disable ddp.\n", PAGE_SIZE);
+ return;
+ }
+ page_idx = ddp_find_page_index(PAGE_SIZE);
+ }
+ pr_info("system PAGE %lu, ddp idx %u.\n", PAGE_SIZE, page_idx);
+ }
+}
+
+void cxgbi_ddp_page_size_factor(int *pgsz_factor)
+{
+ int i;
+
+ for (i = 0; i < DDP_PGIDX_MAX; i++)
+ pgsz_factor[i] = ddp_page_order[i];
+}
+EXPORT_SYMBOL_GPL(cxgbi_ddp_page_size_factor);
+
+/*
+ * DDP setup & teardown
+ */
+
+void cxgbi_ddp_ppod_set(struct cxgbi_pagepod *ppod,
+ struct cxgbi_pagepod_hdr *hdr,
+ struct cxgbi_gather_list *gl, unsigned int gidx)
+{
+ int i;
+
+ memcpy(ppod, hdr, sizeof(*hdr));
+ for (i = 0; i < (PPOD_PAGES_MAX + 1); i++, gidx++) {
+ ppod->addr[i] = gidx < gl->nelem ?
+ cpu_to_be64(gl->phys_addr[gidx]) : 0ULL;
+ }
+}
+EXPORT_SYMBOL_GPL(cxgbi_ddp_ppod_set);
+
+void cxgbi_ddp_ppod_clear(struct cxgbi_pagepod *ppod)
+{
+ memset(ppod, 0, sizeof(*ppod));
+}
+EXPORT_SYMBOL_GPL(cxgbi_ddp_ppod_clear);
+
+static inline int ddp_find_unused_entries(struct cxgbi_ddp_info *ddp,
+ unsigned int start, unsigned int max,
+ unsigned int count,
+ struct cxgbi_gather_list *gl)
+{
+ unsigned int i, j, k;
+
+ /* not enough entries */
+ if ((max - start) < count) {
+ log_debug(1 << CXGBI_DBG_DDP,
+ "NOT enough entries %u+%u < %u.\n", start, count, max);
+ return -EBUSY;
+ }
+
+ max -= count;
+ spin_lock(&ddp->map_lock);
+ for (i = start; i < max;) {
+ for (j = 0, k = i; j < count; j++, k++) {
+ if (ddp->gl_map[k])
+ break;
+ }
+ if (j == count) {
+ for (j = 0, k = i; j < count; j++, k++)
+ ddp->gl_map[k] = gl;
+ spin_unlock(&ddp->map_lock);
+ return i;
+ }
+ i += j + 1;
+ }
+ spin_unlock(&ddp->map_lock);
+ log_debug(1 << CXGBI_DBG_DDP,
+ "NO suitable entries %u available.\n", count);
+ return -EBUSY;
+}
+
+static inline void ddp_unmark_entries(struct cxgbi_ddp_info *ddp,
+ int start, int count)
+{
+ spin_lock(&ddp->map_lock);
+ memset(&ddp->gl_map[start], 0,
+ count * sizeof(struct cxgbi_gather_list *));
+ spin_unlock(&ddp->map_lock);
+}
+
+static inline void ddp_gl_unmap(struct pci_dev *pdev,
+ struct cxgbi_gather_list *gl)
+{
+ int i;
+
+ for (i = 0; i < gl->nelem; i++)
+ dma_unmap_page(&pdev->dev, gl->phys_addr[i], PAGE_SIZE,
+ PCI_DMA_FROMDEVICE);
+}
+
+static inline int ddp_gl_map(struct pci_dev *pdev,
+ struct cxgbi_gather_list *gl)
+{
+ int i;
+
+ for (i = 0; i < gl->nelem; i++) {
+ gl->phys_addr[i] = dma_map_page(&pdev->dev, gl->pages[i], 0,
+ PAGE_SIZE,
+ PCI_DMA_FROMDEVICE);
+ if (unlikely(dma_mapping_error(&pdev->dev, gl->phys_addr[i]))) {
+ log_debug(1 << CXGBI_DBG_DDP,
+ "page %d 0x%p, 0x%p dma mapping err.\n",
+ i, gl->pages[i], pdev);
+ goto unmap;
+ }
+ }
+ return i;
+unmap:
+ if (i) {
+ unsigned int nelem = gl->nelem;
+
+ gl->nelem = i;
+ ddp_gl_unmap(pdev, gl);
+ gl->nelem = nelem;
+ }
+ return -EINVAL;
+}
+
+static void ddp_release_gl(struct cxgbi_gather_list *gl,
+ struct pci_dev *pdev)
+{
+ ddp_gl_unmap(pdev, gl);
+ kfree(gl);
+}
+
+static struct cxgbi_gather_list *ddp_make_gl(unsigned int xferlen,
+ struct scatterlist *sgl,
+ unsigned int sgcnt,
+ struct pci_dev *pdev,
+ gfp_t gfp)
+{
+ struct cxgbi_gather_list *gl;
+ struct scatterlist *sg = sgl;
+ struct page *sgpage = sg_page(sg);
+ unsigned int sglen = sg->length;
+ unsigned int sgoffset = sg->offset;
+ unsigned int npages = (xferlen + sgoffset + PAGE_SIZE - 1) >>
+ PAGE_SHIFT;
+ int i = 1, j = 0;
+
+ if (xferlen < DDP_THRESHOLD) {
+ log_debug(1 << CXGBI_DBG_DDP,
+ "xfer %u < threshold %u, no ddp.\n",
+ xferlen, DDP_THRESHOLD);
+ return NULL;
+ }
+
+ gl = kzalloc(sizeof(struct cxgbi_gather_list) +
+ npages * (sizeof(dma_addr_t) +
+ sizeof(struct page *)), gfp);
+ if (!gl) {
+ log_debug(1 << CXGBI_DBG_DDP,
+ "xfer %u, %u pages, OOM.\n", xferlen, npages);
+ return NULL;
+ }
+
+ log_debug(1 << CXGBI_DBG_DDP,
+ "xfer %u, sgl %u, gl max %u.\n", xferlen, sgcnt, npages);
+
+ gl->pages = (struct page **)&gl->phys_addr[npages];
+ gl->nelem = npages;
+ gl->length = xferlen;
+ gl->offset = sgoffset;
+ gl->pages[0] = sgpage;
+
+ for (i = 1, sg = sg_next(sgl), j = 0; i < sgcnt;
+ i++, sg = sg_next(sg)) {
+ struct page *page = sg_page(sg);
+
+ if (sgpage == page && sg->offset == sgoffset + sglen)
+ sglen += sg->length;
+ else {
+ /* make sure the sgl is fit for ddp:
+ * each has the same page size, and
+ * all of the middle pages are used completely
+ */
+ if ((j && sgoffset) || ((i != sgcnt - 1) &&
+ ((sglen + sgoffset) & ~PAGE_MASK))) {
+ log_debug(1 << CXGBI_DBG_DDP,
+ "page %d/%u, %u + %u.\n",
+ i, sgcnt, sgoffset, sglen);
+ goto error_out;
+ }
+
+ j++;
+ if (j == gl->nelem || sg->offset) {
+ log_debug(1 << CXGBI_DBG_DDP,
+ "page %d/%u, offset %u.\n",
+ j, gl->nelem, sg->offset);
+ goto error_out;
+ }
+ gl->pages[j] = page;
+ sglen = sg->length;
+ sgoffset = sg->offset;
+ sgpage = page;
+ }
+ }
+ gl->nelem = ++j;
+
+ if (ddp_gl_map(pdev, gl) < 0)
+ goto error_out;
+
+ return gl;
+
+error_out:
+ kfree(gl);
+ return NULL;
+}
+
+static void ddp_tag_release(struct cxgbi_hba *chba, u32 tag)
+{
+ struct cxgbi_device *cdev = chba->cdev;
+ struct cxgbi_ddp_info *ddp = cdev->ddp;
+ u32 idx;
+
+ idx = (tag >> PPOD_IDX_SHIFT) & ddp->idx_mask;
+ if (idx < ddp->nppods) {
+ struct cxgbi_gather_list *gl = ddp->gl_map[idx];
+ unsigned int npods;
+
+ if (!gl || !gl->nelem) {
+ pr_warn("tag 0x%x, idx %u, gl 0x%p, %u.\n",
+ tag, idx, gl, gl ? gl->nelem : 0);
+ return;
+ }
+ npods = (gl->nelem + PPOD_PAGES_MAX - 1) >> PPOD_PAGES_SHIFT;
+ log_debug(1 << CXGBI_DBG_DDP,
+ "tag 0x%x, release idx %u, npods %u.\n",
+ tag, idx, npods);
+ cdev->csk_ddp_clear(chba, tag, idx, npods);
+ ddp_unmark_entries(ddp, idx, npods);
+ ddp_release_gl(gl, ddp->pdev);
+ } else
+ pr_warn("tag 0x%x, idx %u > max %u.\n", tag, idx, ddp->nppods);
+}
+
+static int ddp_tag_reserve(struct cxgbi_sock *csk, unsigned int tid,
+ u32 sw_tag, u32 *tagp, struct cxgbi_gather_list *gl,
+ gfp_t gfp)
+{
+ struct cxgbi_device *cdev = csk->cdev;
+ struct cxgbi_ddp_info *ddp = cdev->ddp;
+ struct cxgbi_tag_format *tformat = &cdev->tag_format;
+ struct cxgbi_pagepod_hdr hdr;
+ unsigned int npods;
+ int idx = -1;
+ int err = -ENOMEM;
+ u32 tag;
+
+ npods = (gl->nelem + PPOD_PAGES_MAX - 1) >> PPOD_PAGES_SHIFT;
+ if (ddp->idx_last == ddp->nppods)
+ idx = ddp_find_unused_entries(ddp, 0, ddp->nppods,
+ npods, gl);
+ else {
+ idx = ddp_find_unused_entries(ddp, ddp->idx_last + 1,
+ ddp->nppods, npods,
+ gl);
+ if (idx < 0 && ddp->idx_last >= npods) {
+ idx = ddp_find_unused_entries(ddp, 0,
+ min(ddp->idx_last + npods, ddp->nppods),
+ npods, gl);
+ }
+ }
+ if (idx < 0) {
+ log_debug(1 << CXGBI_DBG_DDP,
+ "xferlen %u, gl %u, npods %u NO DDP.\n",
+ gl->length, gl->nelem, npods);
+ return idx;
+ }
+
+ tag = cxgbi_ddp_tag_base(tformat, sw_tag);
+ tag |= idx << PPOD_IDX_SHIFT;
+
+ hdr.rsvd = 0;
+ hdr.vld_tid = htonl(PPOD_VALID_FLAG | PPOD_TID(tid));
+ hdr.pgsz_tag_clr = htonl(tag & ddp->rsvd_tag_mask);
+ hdr.max_offset = htonl(gl->length);
+ hdr.page_offset = htonl(gl->offset);
+
+ err = cdev->csk_ddp_set(csk, &hdr, idx, npods, gl);
+ if (err < 0)
+ goto unmark_entries;
+
+ ddp->idx_last = idx;
+ log_debug(1 << CXGBI_DBG_DDP,
+ "xfer %u, gl %u,%u, tid 0x%x, tag 0x%x->0x%x(%u,%u).\n",
+ gl->length, gl->nelem, gl->offset, tid, sw_tag, tag, idx,
+ npods);
+ *tagp = tag;
+ return 0;
+
+unmark_entries:
+ ddp_unmark_entries(ddp, idx, npods);
+ return err;
+}
+
+int cxgbi_ddp_reserve(struct cxgbi_sock *csk, unsigned int *tagp,
+ unsigned int sw_tag, unsigned int xferlen,
+ struct scatterlist *sgl, unsigned int sgcnt, gfp_t gfp)
+{
+ struct cxgbi_device *cdev = csk->cdev;
+ struct cxgbi_tag_format *tformat = &cdev->tag_format;
+ struct cxgbi_gather_list *gl;
+ int err;
+
+ if (page_idx >= DDP_PGIDX_MAX || !cdev->ddp ||
+ xferlen < DDP_THRESHOLD) {
+ log_debug(1 << CXGBI_DBG_DDP,
+ "pgidx %u, xfer %u, NO ddp.\n", page_idx, xferlen);
+ return -EINVAL;
+ }
+
+ if (!cxgbi_sw_tag_usable(tformat, sw_tag)) {
+ log_debug(1 << CXGBI_DBG_DDP,
+ "sw_tag 0x%x NOT usable.\n", sw_tag);
+ return -EINVAL;
+ }
+
+ gl = ddp_make_gl(xferlen, sgl, sgcnt, cdev->pdev, gfp);
+ if (!gl)
+ return -ENOMEM;
+
+ err = ddp_tag_reserve(csk, csk->tid, sw_tag, tagp, gl, gfp);
+ if (err < 0)
+ ddp_release_gl(gl, cdev->pdev);
+
+ return err;
+}
+
+static void ddp_destroy(struct kref *kref)
+{
+ struct cxgbi_ddp_info *ddp = container_of(kref,
+ struct cxgbi_ddp_info,
+ refcnt);
+ struct cxgbi_device *cdev = ddp->cdev;
+ int i = 0;
+
+ pr_info("kref 0, destroy ddp 0x%p, cdev 0x%p.\n", ddp, cdev);
+
+ while (i < ddp->nppods) {
+ struct cxgbi_gather_list *gl = ddp->gl_map[i];
+
+ if (gl) {
+ int npods = (gl->nelem + PPOD_PAGES_MAX - 1)
+ >> PPOD_PAGES_SHIFT;
+ pr_info("cdev 0x%p, ddp %d + %d.\n", cdev, i, npods);
+ kfree(gl);
+ i += npods;
+ } else
+ i++;
+ }
+ cxgbi_free_big_mem(ddp);
+}
+
+int cxgbi_ddp_cleanup(struct cxgbi_device *cdev)
+{
+ struct cxgbi_ddp_info *ddp = cdev->ddp;
+
+ log_debug(1 << CXGBI_DBG_DDP,
+ "cdev 0x%p, release ddp 0x%p.\n", cdev, ddp);
+ cdev->ddp = NULL;
+ if (ddp)
+ return kref_put(&ddp->refcnt, ddp_destroy);
+ return 0;
+}
+EXPORT_SYMBOL_GPL(cxgbi_ddp_cleanup);
+
+int cxgbi_ddp_init(struct cxgbi_device *cdev,
+ unsigned int llimit, unsigned int ulimit,
+ unsigned int max_txsz, unsigned int max_rxsz)
+{
+ struct cxgbi_ddp_info *ddp;
+ unsigned int ppmax, bits;
+
+ ppmax = (ulimit - llimit + 1) >> PPOD_SIZE_SHIFT;
+ bits = __ilog2_u32(ppmax) + 1;
+ if (bits > PPOD_IDX_MAX_SIZE)
+ bits = PPOD_IDX_MAX_SIZE;
+ ppmax = (1 << (bits - 1)) - 1;
+
+ ddp = cxgbi_alloc_big_mem(sizeof(struct cxgbi_ddp_info) +
+ ppmax * (sizeof(struct cxgbi_gather_list *) +
+ sizeof(struct sk_buff *)),
+ GFP_KERNEL);
+ if (!ddp) {
+ pr_warn("cdev 0x%p, ddp ppmax %u OOM.\n", cdev, ppmax);
+ return -ENOMEM;
+ }
+ ddp->gl_map = (struct cxgbi_gather_list **)(ddp + 1);
+ cdev->ddp = ddp;
+
+ spin_lock_init(&ddp->map_lock);
+ kref_init(&ddp->refcnt);
+
+ ddp->cdev = cdev;
+ ddp->pdev = cdev->pdev;
+ ddp->llimit = llimit;
+ ddp->ulimit = ulimit;
+ ddp->max_txsz = min_t(unsigned int, max_txsz, ULP2_MAX_PKT_SIZE);
+ ddp->max_rxsz = min_t(unsigned int, max_rxsz, ULP2_MAX_PKT_SIZE);
+ ddp->nppods = ppmax;
+ ddp->idx_last = ppmax;
+ ddp->idx_bits = bits;
+ ddp->idx_mask = (1 << bits) - 1;
+ ddp->rsvd_tag_mask = (1 << (bits + PPOD_IDX_SHIFT)) - 1;
+
+ cdev->tag_format.sw_bits = sw_tag_idx_bits + sw_tag_age_bits;
+ cdev->tag_format.rsvd_bits = ddp->idx_bits;
+ cdev->tag_format.rsvd_shift = PPOD_IDX_SHIFT;
+ cdev->tag_format.rsvd_mask = (1 << cdev->tag_format.rsvd_bits) - 1;
+
+ pr_info("%s tag format, sw %u, rsvd %u,%u, mask 0x%x.\n",
+ cdev->ports[0]->name, cdev->tag_format.sw_bits,
+ cdev->tag_format.rsvd_bits, cdev->tag_format.rsvd_shift,
+ cdev->tag_format.rsvd_mask);
+
+ cdev->tx_max_size = min_t(unsigned int, ULP2_MAX_PDU_PAYLOAD,
+ ddp->max_txsz - ISCSI_PDU_NONPAYLOAD_LEN);
+ cdev->rx_max_size = min_t(unsigned int, ULP2_MAX_PDU_PAYLOAD,
+ ddp->max_rxsz - ISCSI_PDU_NONPAYLOAD_LEN);
+
+ log_debug(1 << CXGBI_DBG_DDP,
+ "%s max payload size: %u/%u, %u/%u.\n",
+ cdev->ports[0]->name, cdev->tx_max_size, ddp->max_txsz,
+ cdev->rx_max_size, ddp->max_rxsz);
+ return 0;
+}
+EXPORT_SYMBOL_GPL(cxgbi_ddp_init);
+
+/*
+ * APIs interacting with open-iscsi libraries
+ */
+
+static unsigned char padding[4];
+
+static void task_release_itt(struct iscsi_task *task, itt_t hdr_itt)
+{
+ struct scsi_cmnd *sc = task->sc;
+ struct iscsi_tcp_conn *tcp_conn = task->conn->dd_data;
+ struct cxgbi_conn *cconn = tcp_conn->dd_data;
+ struct cxgbi_hba *chba = cconn->chba;
+ struct cxgbi_tag_format *tformat = &chba->cdev->tag_format;
+ u32 tag = ntohl((__force u32)hdr_itt);
+
+ log_debug(1 << CXGBI_DBG_DDP,
+ "cdev 0x%p, release tag 0x%x.\n", chba->cdev, tag);
+ if (sc &&
+ (scsi_bidi_cmnd(sc) || sc->sc_data_direction == DMA_FROM_DEVICE) &&
+ cxgbi_is_ddp_tag(tformat, tag))
+ ddp_tag_release(chba, tag);
+}
+
+static int task_reserve_itt(struct iscsi_task *task, itt_t *hdr_itt)
+{
+ struct scsi_cmnd *sc = task->sc;
+ struct iscsi_conn *conn = task->conn;
+ struct iscsi_session *sess = conn->session;
+ struct iscsi_tcp_conn *tcp_conn = conn->dd_data;
+ struct cxgbi_conn *cconn = tcp_conn->dd_data;
+ struct cxgbi_hba *chba = cconn->chba;
+ struct cxgbi_tag_format *tformat = &chba->cdev->tag_format;
+ u32 sw_tag = (sess->age << cconn->task_idx_bits) | task->itt;
+ u32 tag = 0;
+ int err = -EINVAL;
+
+ if (sc &&
+ (scsi_bidi_cmnd(sc) || sc->sc_data_direction == DMA_FROM_DEVICE)) {
+ err = cxgbi_ddp_reserve(cconn->cep->csk, &tag, sw_tag,
+ scsi_in(sc)->length,
+ scsi_in(sc)->table.sgl,
+ scsi_in(sc)->table.nents,
+ GFP_ATOMIC);
+ if (err < 0)
+ log_debug(1 << CXGBI_DBG_DDP,
+ "csk 0x%p, R task 0x%p, %u,%u, no ddp.\n",
+ cconn->cep->csk, task, scsi_in(sc)->length,
+ scsi_in(sc)->table.nents);
+ }
+
+ if (err < 0)
+ tag = cxgbi_set_non_ddp_tag(tformat, sw_tag);
+ /* the itt need to sent in big-endian order */
+ *hdr_itt = (__force itt_t)htonl(tag);
+
+ log_debug(1 << CXGBI_DBG_DDP,
+ "cdev 0x%p, task 0x%p, 0x%x(0x%x,0x%x)->0x%x/0x%x.\n",
+ chba->cdev, task, sw_tag, task->itt, sess->age, tag, *hdr_itt);
+ return 0;
+}
+
+void cxgbi_parse_pdu_itt(struct iscsi_conn *conn, itt_t itt, int *idx, int *age)
+{
+ struct iscsi_tcp_conn *tcp_conn = conn->dd_data;
+ struct cxgbi_conn *cconn = tcp_conn->dd_data;
+ struct cxgbi_device *cdev = cconn->chba->cdev;
+ u32 tag = ntohl((__force u32) itt);
+ u32 sw_bits;
+
+ sw_bits = cxgbi_tag_nonrsvd_bits(&cdev->tag_format, tag);
+ if (idx)
+ *idx = sw_bits & ((1 << cconn->task_idx_bits) - 1);
+ if (age)
+ *age = (sw_bits >> cconn->task_idx_bits) & ISCSI_AGE_MASK;
+
+ log_debug(1 << CXGBI_DBG_DDP,
+ "cdev 0x%p, tag 0x%x/0x%x, -> 0x%x(0x%x,0x%x).\n",
+ cdev, tag, itt, sw_bits, idx ? *idx : 0xFFFFF,
+ age ? *age : 0xFF);
+}
+EXPORT_SYMBOL_GPL(cxgbi_parse_pdu_itt);
+
+void cxgbi_conn_tx_open(struct cxgbi_sock *csk)
+{
+ struct iscsi_conn *conn = csk->user_data;
+
+ if (conn) {
+ log_debug(1 << CXGBI_DBG_SOCK,
+ "csk 0x%p, cid %d.\n", csk, conn->id);
+ iscsi_conn_queue_work(conn);
+ }
+}
+EXPORT_SYMBOL_GPL(cxgbi_conn_tx_open);
+
+/*
+ * pdu receive, interact with libiscsi_tcp
+ */
+static inline int read_pdu_skb(struct iscsi_conn *conn,
+ struct sk_buff *skb,
+ unsigned int offset,
+ int offloaded)
+{
+ int status = 0;
+ int bytes_read;
+
+ bytes_read = iscsi_tcp_recv_skb(conn, skb, offset, offloaded, &status);
+ switch (status) {
+ case ISCSI_TCP_CONN_ERR:
+ pr_info("skb 0x%p, off %u, %d, TCP_ERR.\n",
+ skb, offset, offloaded);
+ return -EIO;
+ case ISCSI_TCP_SUSPENDED:
+ log_debug(1 << CXGBI_DBG_PDU_RX,
+ "skb 0x%p, off %u, %d, TCP_SUSPEND, rc %d.\n",
+ skb, offset, offloaded, bytes_read);
+ /* no transfer - just have caller flush queue */
+ return bytes_read;
+ case ISCSI_TCP_SKB_DONE:
+ pr_info("skb 0x%p, off %u, %d, TCP_SKB_DONE.\n",
+ skb, offset, offloaded);
+ /*
+ * pdus should always fit in the skb and we should get
+ * segment done notifcation.
+ */
+ iscsi_conn_printk(KERN_ERR, conn, "Invalid pdu or skb.");
+ return -EFAULT;
+ case ISCSI_TCP_SEGMENT_DONE:
+ log_debug(1 << CXGBI_DBG_PDU_RX,
+ "skb 0x%p, off %u, %d, TCP_SEG_DONE, rc %d.\n",
+ skb, offset, offloaded, bytes_read);
+ return bytes_read;
+ default:
+ pr_info("skb 0x%p, off %u, %d, invalid status %d.\n",
+ skb, offset, offloaded, status);
+ return -EINVAL;
+ }
+}
+
+static int skb_read_pdu_bhs(struct iscsi_conn *conn, struct sk_buff *skb)
+{
+ struct iscsi_tcp_conn *tcp_conn = conn->dd_data;
+
+ log_debug(1 << CXGBI_DBG_PDU_RX,
+ "conn 0x%p, skb 0x%p, len %u, flag 0x%lx.\n",
+ conn, skb, skb->len, cxgbi_skcb_flags(skb));
+
+ if (!iscsi_tcp_recv_segment_is_hdr(tcp_conn)) {
+ pr_info("conn 0x%p, skb 0x%p, not hdr.\n", conn, skb);
+ iscsi_conn_failure(conn, ISCSI_ERR_PROTO);
+ return -EIO;
+ }
+
+ if (conn->hdrdgst_en &&
+ cxgbi_skcb_test_flag(skb, SKCBF_RX_HCRC_ERR)) {
+ pr_info("conn 0x%p, skb 0x%p, hcrc.\n", conn, skb);
+ iscsi_conn_failure(conn, ISCSI_ERR_HDR_DGST);
+ return -EIO;
+ }
+
+ return read_pdu_skb(conn, skb, 0, 0);
+}
+
+static int skb_read_pdu_data(struct iscsi_conn *conn, struct sk_buff *lskb,
+ struct sk_buff *skb, unsigned int offset)
+{
+ struct iscsi_tcp_conn *tcp_conn = conn->dd_data;
+ bool offloaded = 0;
+ int opcode = tcp_conn->in.hdr->opcode & ISCSI_OPCODE_MASK;
+
+ log_debug(1 << CXGBI_DBG_PDU_RX,
+ "conn 0x%p, skb 0x%p, len %u, flag 0x%lx.\n",
+ conn, skb, skb->len, cxgbi_skcb_flags(skb));
+
+ if (conn->datadgst_en &&
+ cxgbi_skcb_test_flag(lskb, SKCBF_RX_DCRC_ERR)) {
+ pr_info("conn 0x%p, skb 0x%p, dcrc 0x%lx.\n",
+ conn, lskb, cxgbi_skcb_flags(lskb));
+ iscsi_conn_failure(conn, ISCSI_ERR_DATA_DGST);
+ return -EIO;
+ }
+
+ if (iscsi_tcp_recv_segment_is_hdr(tcp_conn))
+ return 0;
+
+ /* coalesced, add header digest length */
+ if (lskb == skb && conn->hdrdgst_en)
+ offset += ISCSI_DIGEST_SIZE;
+
+ if (cxgbi_skcb_test_flag(lskb, SKCBF_RX_DATA_DDPD))
+ offloaded = 1;
+
+ if (opcode == ISCSI_OP_SCSI_DATA_IN)
+ log_debug(1 << CXGBI_DBG_PDU_RX,
+ "skb 0x%p, op 0x%x, itt 0x%x, %u %s ddp'ed.\n",
+ skb, opcode, ntohl(tcp_conn->in.hdr->itt),
+ tcp_conn->in.datalen, offloaded ? "is" : "not");
+
+ return read_pdu_skb(conn, skb, offset, offloaded);
+}
+
+static void csk_return_rx_credits(struct cxgbi_sock *csk, int copied)
+{
+ struct cxgbi_device *cdev = csk->cdev;
+ int must_send;
+ u32 credits;
+
+ log_debug(1 << CXGBI_DBG_PDU_RX,
+ "csk 0x%p,%u,0x%lx,%u, seq %u, wup %u, thre %u, %u.\n",
+ csk, csk->state, csk->flags, csk->tid, csk->copied_seq,
+ csk->rcv_wup, cdev->rx_credit_thres,
+ cdev->rcv_win);
+
+ if (csk->state != CTP_ESTABLISHED)
+ return;
+
+ credits = csk->copied_seq - csk->rcv_wup;
+ if (unlikely(!credits))
+ return;
+ if (unlikely(cdev->rx_credit_thres == 0))
+ return;
+
+ must_send = credits + 16384 >= cdev->rcv_win;
+ if (must_send || credits >= cdev->rx_credit_thres)
+ csk->rcv_wup += cdev->csk_send_rx_credits(csk, credits);
+}
+
+void cxgbi_conn_pdu_ready(struct cxgbi_sock *csk)
+{
+ struct cxgbi_device *cdev = csk->cdev;
+ struct iscsi_conn *conn = csk->user_data;
+ struct sk_buff *skb;
+ unsigned int read = 0;
+ int err = 0;
+
+ log_debug(1 << CXGBI_DBG_PDU_RX,
+ "csk 0x%p, conn 0x%p.\n", csk, conn);
+
+ if (unlikely(!conn || conn->suspend_rx)) {
+ log_debug(1 << CXGBI_DBG_PDU_RX,
+ "csk 0x%p, conn 0x%p, id %d, suspend_rx %lu!\n",
+ csk, conn, conn ? conn->id : 0xFF,
+ conn ? conn->suspend_rx : 0xFF);
+ return;
+ }
+
+ while (!err) {
+ skb = skb_peek(&csk->receive_queue);
+ if (!skb ||
+ !(cxgbi_skcb_test_flag(skb, SKCBF_RX_STATUS))) {
+ if (skb)
+ log_debug(1 << CXGBI_DBG_PDU_RX,
+ "skb 0x%p, NOT ready 0x%lx.\n",
+ skb, cxgbi_skcb_flags(skb));
+ break;
+ }
+ __skb_unlink(skb, &csk->receive_queue);
+
+ read += cxgbi_skcb_rx_pdulen(skb);
+ log_debug(1 << CXGBI_DBG_PDU_RX,
+ "csk 0x%p, skb 0x%p,%u,f 0x%lx, pdu len %u.\n",
+ csk, skb, skb->len, cxgbi_skcb_flags(skb),
+ cxgbi_skcb_rx_pdulen(skb));
+
+ if (cxgbi_skcb_test_flag(skb, SKCBF_RX_COALESCED)) {
+ err = skb_read_pdu_bhs(conn, skb);
+ if (err < 0) {
+ pr_err("coalesced bhs, csk 0x%p, skb 0x%p,%u, "
+ "f 0x%lx, plen %u.\n",
+ csk, skb, skb->len,
+ cxgbi_skcb_flags(skb),
+ cxgbi_skcb_rx_pdulen(skb));
+ goto skb_done;
+ }
+ err = skb_read_pdu_data(conn, skb, skb,
+ err + cdev->skb_rx_extra);
+ if (err < 0)
+ pr_err("coalesced data, csk 0x%p, skb 0x%p,%u, "
+ "f 0x%lx, plen %u.\n",
+ csk, skb, skb->len,
+ cxgbi_skcb_flags(skb),
+ cxgbi_skcb_rx_pdulen(skb));
+ } else {
+ err = skb_read_pdu_bhs(conn, skb);
+ if (err < 0) {
+ pr_err("bhs, csk 0x%p, skb 0x%p,%u, "
+ "f 0x%lx, plen %u.\n",
+ csk, skb, skb->len,
+ cxgbi_skcb_flags(skb),
+ cxgbi_skcb_rx_pdulen(skb));
+ goto skb_done;
+ }
+
+ if (cxgbi_skcb_test_flag(skb, SKCBF_RX_DATA)) {
+ struct sk_buff *dskb;
+
+ dskb = skb_peek(&csk->receive_queue);
+ if (!dskb) {
+ pr_err("csk 0x%p, skb 0x%p,%u, f 0x%lx,"
+ " plen %u, NO data.\n",
+ csk, skb, skb->len,
+ cxgbi_skcb_flags(skb),
+ cxgbi_skcb_rx_pdulen(skb));
+ err = -EIO;
+ goto skb_done;
+ }
+ __skb_unlink(dskb, &csk->receive_queue);
+
+ err = skb_read_pdu_data(conn, skb, dskb, 0);
+ if (err < 0)
+ pr_err("data, csk 0x%p, skb 0x%p,%u, "
+ "f 0x%lx, plen %u, dskb 0x%p,"
+ "%u.\n",
+ csk, skb, skb->len,
+ cxgbi_skcb_flags(skb),
+ cxgbi_skcb_rx_pdulen(skb),
+ dskb, dskb->len);
+ __kfree_skb(dskb);
+ } else
+ err = skb_read_pdu_data(conn, skb, skb, 0);
+ }
+skb_done:
+ __kfree_skb(skb);
+
+ if (err < 0)
+ break;
+ }
+
+ log_debug(1 << CXGBI_DBG_PDU_RX, "csk 0x%p, read %u.\n", csk, read);
+ if (read) {
+ csk->copied_seq += read;
+ csk_return_rx_credits(csk, read);
+ conn->rxdata_octets += read;
+ }
+
+ if (err < 0) {
+ pr_info("csk 0x%p, 0x%p, rx failed %d, read %u.\n",
+ csk, conn, err, read);
+ iscsi_conn_failure(conn, ISCSI_ERR_CONN_FAILED);
+ }
+}
+EXPORT_SYMBOL_GPL(cxgbi_conn_pdu_ready);
+
+static int sgl_seek_offset(struct scatterlist *sgl, unsigned int sgcnt,
+ unsigned int offset, unsigned int *off,
+ struct scatterlist **sgp)
+{
+ int i;
+ struct scatterlist *sg;
+
+ for_each_sg(sgl, sg, sgcnt, i) {
+ if (offset < sg->length) {
+ *off = offset;
+ *sgp = sg;
+ return 0;
+ }
+ offset -= sg->length;
+ }
+ return -EFAULT;
+}
+
+static int sgl_read_to_frags(struct scatterlist *sg, unsigned int sgoffset,
+ unsigned int dlen, struct page_frag *frags,
+ int frag_max)
+{
+ unsigned int datalen = dlen;
+ unsigned int sglen = sg->length - sgoffset;
+ struct page *page = sg_page(sg);
+ int i;
+
+ i = 0;
+ do {
+ unsigned int copy;
+
+ if (!sglen) {
+ sg = sg_next(sg);
+ if (!sg) {
+ pr_warn("sg %d NULL, len %u/%u.\n",
+ i, datalen, dlen);
+ return -EINVAL;
+ }
+ sgoffset = 0;
+ sglen = sg->length;
+ page = sg_page(sg);
+
+ }
+ copy = min(datalen, sglen);
+ if (i && page == frags[i - 1].page &&
+ sgoffset + sg->offset ==
+ frags[i - 1].offset + frags[i - 1].size) {
+ frags[i - 1].size += copy;
+ } else {
+ if (i >= frag_max) {
+ pr_warn("too many pages %u, dlen %u.\n",
+ frag_max, dlen);
+ return -EINVAL;
+ }
+
+ frags[i].page = page;
+ frags[i].offset = sg->offset + sgoffset;
+ frags[i].size = copy;
+ i++;
+ }
+ datalen -= copy;
+ sgoffset += copy;
+ sglen -= copy;
+ } while (datalen);
+
+ return i;
+}
+
+int cxgbi_conn_alloc_pdu(struct iscsi_task *task, u8 opcode)
+{
+ struct iscsi_tcp_conn *tcp_conn = task->conn->dd_data;
+ struct cxgbi_conn *cconn = tcp_conn->dd_data;
+ struct cxgbi_device *cdev = cconn->chba->cdev;
+ struct iscsi_conn *conn = task->conn;
+ struct iscsi_tcp_task *tcp_task = task->dd_data;
+ struct cxgbi_task_data *tdata = iscsi_task_cxgbi_data(task);
+ struct scsi_cmnd *sc = task->sc;
+ int headroom = SKB_TX_ISCSI_PDU_HEADER_MAX;
+
+ tcp_task->dd_data = tdata;
+ task->hdr = NULL;
+
+ if (SKB_MAX_HEAD(cdev->skb_tx_rsvd) > (512 * MAX_SKB_FRAGS) &&
+ (opcode == ISCSI_OP_SCSI_DATA_OUT ||
+ (opcode == ISCSI_OP_SCSI_CMD &&
+ (scsi_bidi_cmnd(sc) || sc->sc_data_direction == DMA_TO_DEVICE))))
+ /* data could goes into skb head */
+ headroom += min_t(unsigned int,
+ SKB_MAX_HEAD(cdev->skb_tx_rsvd),
+ conn->max_xmit_dlength);
+
+ tdata->skb = alloc_skb(cdev->skb_tx_rsvd + headroom, GFP_ATOMIC);
+ if (!tdata->skb) {
+ struct cxgbi_sock *csk = cconn->cep->csk;
+ struct net_device *ndev = cdev->ports[csk->port_id];
+ ndev->stats.tx_dropped++;
+ return -ENOMEM;
+ }
+
+ skb_reserve(tdata->skb, cdev->skb_tx_rsvd);
+ task->hdr = (struct iscsi_hdr *)tdata->skb->data;
+ task->hdr_max = SKB_TX_ISCSI_PDU_HEADER_MAX; /* BHS + AHS */
+
+ /* data_out uses scsi_cmd's itt */
+ if (opcode != ISCSI_OP_SCSI_DATA_OUT)
+ task_reserve_itt(task, &task->hdr->itt);
+
+ log_debug(1 << CXGBI_DBG_ISCSI | 1 << CXGBI_DBG_PDU_TX,
+ "task 0x%p, op 0x%x, skb 0x%p,%u+%u/%u, itt 0x%x.\n",
+ task, opcode, tdata->skb, cdev->skb_tx_rsvd, headroom,
+ conn->max_xmit_dlength, ntohl(task->hdr->itt));
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(cxgbi_conn_alloc_pdu);
+
+static inline void tx_skb_setmode(struct sk_buff *skb, int hcrc, int dcrc)
+{
+ if (hcrc || dcrc) {
+ u8 submode = 0;
+
+ if (hcrc)
+ submode |= 1;
+ if (dcrc)
+ submode |= 2;
+ cxgbi_skcb_ulp_mode(skb) = (ULP2_MODE_ISCSI << 4) | submode;
+ } else
+ cxgbi_skcb_ulp_mode(skb) = 0;
+}
+
+int cxgbi_conn_init_pdu(struct iscsi_task *task, unsigned int offset,
+ unsigned int count)
+{
+ struct iscsi_conn *conn = task->conn;
+ struct cxgbi_task_data *tdata = iscsi_task_cxgbi_data(task);
+ struct sk_buff *skb = tdata->skb;
+ unsigned int datalen = count;
+ int i, padlen = iscsi_padding(count);
+ struct page *pg;
+
+ log_debug(1 << CXGBI_DBG_ISCSI | 1 << CXGBI_DBG_PDU_TX,
+ "task 0x%p,0x%p, skb 0x%p, 0x%x,0x%x,0x%x, %u+%u.\n",
+ task, task->sc, skb, (*skb->data) & ISCSI_OPCODE_MASK,
+ ntohl(task->cmdsn), ntohl(task->hdr->itt), offset, count);
+
+ skb_put(skb, task->hdr_len);
+ tx_skb_setmode(skb, conn->hdrdgst_en, datalen ? conn->datadgst_en : 0);
+ if (!count)
+ return 0;
+
+ if (task->sc) {
+ struct scsi_data_buffer *sdb = scsi_out(task->sc);
+ struct scatterlist *sg = NULL;
+ int err;
+
+ tdata->offset = offset;
+ tdata->count = count;
+ err = sgl_seek_offset(
+ sdb->table.sgl, sdb->table.nents,
+ tdata->offset, &tdata->sgoffset, &sg);
+ if (err < 0) {
+ pr_warn("tpdu, sgl %u, bad offset %u/%u.\n",
+ sdb->table.nents, tdata->offset, sdb->length);
+ return err;
+ }
+ err = sgl_read_to_frags(sg, tdata->sgoffset, tdata->count,
+ tdata->frags, MAX_PDU_FRAGS);
+ if (err < 0) {
+ pr_warn("tpdu, sgl %u, bad offset %u + %u.\n",
+ sdb->table.nents, tdata->offset, tdata->count);
+ return err;
+ }
+ tdata->nr_frags = err;
+
+ if (tdata->nr_frags > MAX_SKB_FRAGS ||
+ (padlen && tdata->nr_frags == MAX_SKB_FRAGS)) {
+ char *dst = skb->data + task->hdr_len;
+ struct page_frag *frag = tdata->frags;
+
+ /* data fits in the skb's headroom */
+ for (i = 0; i < tdata->nr_frags; i++, frag++) {
+ char *src = kmap_atomic(frag->page);
+
+ memcpy(dst, src+frag->offset, frag->size);
+ dst += frag->size;
+ kunmap_atomic(src);
+ }
+ if (padlen) {
+ memset(dst, 0, padlen);
+ padlen = 0;
+ }
+ skb_put(skb, count + padlen);
+ } else {
+ /* data fit into frag_list */
+ for (i = 0; i < tdata->nr_frags; i++) {
+ __skb_fill_page_desc(skb, i,
+ tdata->frags[i].page,
+ tdata->frags[i].offset,
+ tdata->frags[i].size);
+ skb_frag_ref(skb, i);
+ }
+ skb_shinfo(skb)->nr_frags = tdata->nr_frags;
+ skb->len += count;
+ skb->data_len += count;
+ skb->truesize += count;
+ }
+
+ } else {
+ pg = virt_to_page(task->data);
+
+ get_page(pg);
+ skb_fill_page_desc(skb, 0, pg, offset_in_page(task->data),
+ count);
+ skb->len += count;
+ skb->data_len += count;
+ skb->truesize += count;
+ }
+
+ if (padlen) {
+ i = skb_shinfo(skb)->nr_frags;
+ skb_fill_page_desc(skb, skb_shinfo(skb)->nr_frags,
+ virt_to_page(padding), offset_in_page(padding),
+ padlen);
+
+ skb->data_len += padlen;
+ skb->truesize += padlen;
+ skb->len += padlen;
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(cxgbi_conn_init_pdu);
+
+int cxgbi_conn_xmit_pdu(struct iscsi_task *task)
+{
+ struct iscsi_tcp_conn *tcp_conn = task->conn->dd_data;
+ struct cxgbi_conn *cconn = tcp_conn->dd_data;
+ struct cxgbi_task_data *tdata = iscsi_task_cxgbi_data(task);
+ struct sk_buff *skb = tdata->skb;
+ unsigned int datalen;
+ int err;
+
+ if (!skb) {
+ log_debug(1 << CXGBI_DBG_ISCSI | 1 << CXGBI_DBG_PDU_TX,
+ "task 0x%p, skb NULL.\n", task);
+ return 0;
+ }
+
+ datalen = skb->data_len;
+ tdata->skb = NULL;
+ err = cxgbi_sock_send_pdus(cconn->cep->csk, skb);
+ if (err > 0) {
+ int pdulen = err;
+
+ log_debug(1 << CXGBI_DBG_PDU_TX,
+ "task 0x%p,0x%p, skb 0x%p, len %u/%u, rv %d.\n",
+ task, task->sc, skb, skb->len, skb->data_len, err);
+
+ if (task->conn->hdrdgst_en)
+ pdulen += ISCSI_DIGEST_SIZE;
+
+ if (datalen && task->conn->datadgst_en)
+ pdulen += ISCSI_DIGEST_SIZE;
+
+ task->conn->txdata_octets += pdulen;
+ return 0;
+ }
+
+ if (err == -EAGAIN || err == -ENOBUFS) {
+ log_debug(1 << CXGBI_DBG_PDU_TX,
+ "task 0x%p, skb 0x%p, len %u/%u, %d EAGAIN.\n",
+ task, skb, skb->len, skb->data_len, err);
+ /* reset skb to send when we are called again */
+ tdata->skb = skb;
+ return err;
+ }
+
+ log_debug(1 << CXGBI_DBG_ISCSI | 1 << CXGBI_DBG_PDU_TX,
+ "itt 0x%x, skb 0x%p, len %u/%u, xmit err %d.\n",
+ task->itt, skb, skb->len, skb->data_len, err);
+
+ kfree_skb(skb);
+
+ iscsi_conn_printk(KERN_ERR, task->conn, "xmit err %d.\n", err);
+ iscsi_conn_failure(task->conn, ISCSI_ERR_XMIT_FAILED);
+ return err;
+}
+EXPORT_SYMBOL_GPL(cxgbi_conn_xmit_pdu);
+
+void cxgbi_cleanup_task(struct iscsi_task *task)
+{
+ struct cxgbi_task_data *tdata = iscsi_task_cxgbi_data(task);
+
+ log_debug(1 << CXGBI_DBG_ISCSI,
+ "task 0x%p, skb 0x%p, itt 0x%x.\n",
+ task, tdata->skb, task->hdr_itt);
+
+ /* never reached the xmit task callout */
+ if (tdata->skb)
+ __kfree_skb(tdata->skb);
+ memset(tdata, 0, sizeof(*tdata));
+
+ task_release_itt(task, task->hdr_itt);
+ iscsi_tcp_cleanup_task(task);
+}
+EXPORT_SYMBOL_GPL(cxgbi_cleanup_task);
+
+void cxgbi_get_conn_stats(struct iscsi_cls_conn *cls_conn,
+ struct iscsi_stats *stats)
+{
+ struct iscsi_conn *conn = cls_conn->dd_data;
+
+ stats->txdata_octets = conn->txdata_octets;
+ stats->rxdata_octets = conn->rxdata_octets;
+ stats->scsicmd_pdus = conn->scsicmd_pdus_cnt;
+ stats->dataout_pdus = conn->dataout_pdus_cnt;
+ stats->scsirsp_pdus = conn->scsirsp_pdus_cnt;
+ stats->datain_pdus = conn->datain_pdus_cnt;
+ stats->r2t_pdus = conn->r2t_pdus_cnt;
+ stats->tmfcmd_pdus = conn->tmfcmd_pdus_cnt;
+ stats->tmfrsp_pdus = conn->tmfrsp_pdus_cnt;
+ stats->digest_err = 0;
+ stats->timeout_err = 0;
+ stats->custom_length = 1;
+ strcpy(stats->custom[0].desc, "eh_abort_cnt");
+ stats->custom[0].value = conn->eh_abort_cnt;
+}
+EXPORT_SYMBOL_GPL(cxgbi_get_conn_stats);
+
+static int cxgbi_conn_max_xmit_dlength(struct iscsi_conn *conn)
+{
+ struct iscsi_tcp_conn *tcp_conn = conn->dd_data;
+ struct cxgbi_conn *cconn = tcp_conn->dd_data;
+ struct cxgbi_device *cdev = cconn->chba->cdev;
+ unsigned int headroom = SKB_MAX_HEAD(cdev->skb_tx_rsvd);
+ unsigned int max_def = 512 * MAX_SKB_FRAGS;
+ unsigned int max = max(max_def, headroom);
+
+ max = min(cconn->chba->cdev->tx_max_size, max);
+ if (conn->max_xmit_dlength)
+ conn->max_xmit_dlength = min(conn->max_xmit_dlength, max);
+ else
+ conn->max_xmit_dlength = max;
+ cxgbi_align_pdu_size(conn->max_xmit_dlength);
+
+ return 0;
+}
+
+static int cxgbi_conn_max_recv_dlength(struct iscsi_conn *conn)
+{
+ struct iscsi_tcp_conn *tcp_conn = conn->dd_data;
+ struct cxgbi_conn *cconn = tcp_conn->dd_data;
+ unsigned int max = cconn->chba->cdev->rx_max_size;
+
+ cxgbi_align_pdu_size(max);
+
+ if (conn->max_recv_dlength) {
+ if (conn->max_recv_dlength > max) {
+ pr_err("MaxRecvDataSegmentLength %u > %u.\n",
+ conn->max_recv_dlength, max);
+ return -EINVAL;
+ }
+ conn->max_recv_dlength = min(conn->max_recv_dlength, max);
+ cxgbi_align_pdu_size(conn->max_recv_dlength);
+ } else
+ conn->max_recv_dlength = max;
+
+ return 0;
+}
+
+int cxgbi_set_conn_param(struct iscsi_cls_conn *cls_conn,
+ enum iscsi_param param, char *buf, int buflen)
+{
+ struct iscsi_conn *conn = cls_conn->dd_data;
+ struct iscsi_tcp_conn *tcp_conn = conn->dd_data;
+ struct cxgbi_conn *cconn = tcp_conn->dd_data;
+ struct cxgbi_sock *csk = cconn->cep->csk;
+ int err;
+
+ log_debug(1 << CXGBI_DBG_ISCSI,
+ "cls_conn 0x%p, param %d, buf(%d) %s.\n",
+ cls_conn, param, buflen, buf);
+
+ switch (param) {
+ case ISCSI_PARAM_HDRDGST_EN:
+ err = iscsi_set_param(cls_conn, param, buf, buflen);
+ if (!err && conn->hdrdgst_en)
+ err = csk->cdev->csk_ddp_setup_digest(csk, csk->tid,
+ conn->hdrdgst_en,
+ conn->datadgst_en, 0);
+ break;
+ case ISCSI_PARAM_DATADGST_EN:
+ err = iscsi_set_param(cls_conn, param, buf, buflen);
+ if (!err && conn->datadgst_en)
+ err = csk->cdev->csk_ddp_setup_digest(csk, csk->tid,
+ conn->hdrdgst_en,
+ conn->datadgst_en, 0);
+ break;
+ case ISCSI_PARAM_MAX_R2T:
+ return iscsi_tcp_set_max_r2t(conn, buf);
+ case ISCSI_PARAM_MAX_RECV_DLENGTH:
+ err = iscsi_set_param(cls_conn, param, buf, buflen);
+ if (!err)
+ err = cxgbi_conn_max_recv_dlength(conn);
+ break;
+ case ISCSI_PARAM_MAX_XMIT_DLENGTH:
+ err = iscsi_set_param(cls_conn, param, buf, buflen);
+ if (!err)
+ err = cxgbi_conn_max_xmit_dlength(conn);
+ break;
+ default:
+ return iscsi_set_param(cls_conn, param, buf, buflen);
+ }
+ return err;
+}
+EXPORT_SYMBOL_GPL(cxgbi_set_conn_param);
+
+static inline int csk_print_port(struct cxgbi_sock *csk, char *buf)
+{
+ int len;
+
+ cxgbi_sock_get(csk);
+ len = sprintf(buf, "%hu\n", ntohs(csk->daddr.sin_port));
+ cxgbi_sock_put(csk);
+
+ return len;
+}
+
+static inline int csk_print_ip(struct cxgbi_sock *csk, char *buf)
+{
+ int len;
+
+ cxgbi_sock_get(csk);
+ if (csk->csk_family == AF_INET)
+ len = sprintf(buf, "%pI4",
+ &csk->daddr.sin_addr.s_addr);
+ else
+ len = sprintf(buf, "%pI6",
+ &csk->daddr6.sin6_addr);
+
+ cxgbi_sock_put(csk);
+
+ return len;
+}
+
+int cxgbi_get_ep_param(struct iscsi_endpoint *ep, enum iscsi_param param,
+ char *buf)
+{
+ struct cxgbi_endpoint *cep = ep->dd_data;
+ struct cxgbi_sock *csk;
+ int len;
+
+ log_debug(1 << CXGBI_DBG_ISCSI,
+ "cls_conn 0x%p, param %d.\n", ep, param);
+
+ switch (param) {
+ case ISCSI_PARAM_CONN_PORT:
+ case ISCSI_PARAM_CONN_ADDRESS:
+ if (!cep)
+ return -ENOTCONN;
+
+ csk = cep->csk;
+ if (!csk)
+ return -ENOTCONN;
+
+ return iscsi_conn_get_addr_param((struct sockaddr_storage *)
+ &csk->daddr, param, buf);
+ default:
+ return -ENOSYS;
+ }
+ return len;
+}
+EXPORT_SYMBOL_GPL(cxgbi_get_ep_param);
+
+struct iscsi_cls_conn *
+cxgbi_create_conn(struct iscsi_cls_session *cls_session, u32 cid)
+{
+ struct iscsi_cls_conn *cls_conn;
+ struct iscsi_conn *conn;
+ struct iscsi_tcp_conn *tcp_conn;
+ struct cxgbi_conn *cconn;
+
+ cls_conn = iscsi_tcp_conn_setup(cls_session, sizeof(*cconn), cid);
+ if (!cls_conn)
+ return NULL;
+
+ conn = cls_conn->dd_data;
+ tcp_conn = conn->dd_data;
+ cconn = tcp_conn->dd_data;
+ cconn->iconn = conn;
+
+ log_debug(1 << CXGBI_DBG_ISCSI,
+ "cid %u(0x%x), cls 0x%p,0x%p, conn 0x%p,0x%p,0x%p.\n",
+ cid, cid, cls_session, cls_conn, conn, tcp_conn, cconn);
+
+ return cls_conn;
+}
+EXPORT_SYMBOL_GPL(cxgbi_create_conn);
+
+int cxgbi_bind_conn(struct iscsi_cls_session *cls_session,
+ struct iscsi_cls_conn *cls_conn,
+ u64 transport_eph, int is_leading)
+{
+ struct iscsi_conn *conn = cls_conn->dd_data;
+ struct iscsi_tcp_conn *tcp_conn = conn->dd_data;
+ struct cxgbi_conn *cconn = tcp_conn->dd_data;
+ struct iscsi_endpoint *ep;
+ struct cxgbi_endpoint *cep;
+ struct cxgbi_sock *csk;
+ int err;
+
+ ep = iscsi_lookup_endpoint(transport_eph);
+ if (!ep)
+ return -EINVAL;
+
+ /* setup ddp pagesize */
+ cep = ep->dd_data;
+ csk = cep->csk;
+ err = csk->cdev->csk_ddp_setup_pgidx(csk, csk->tid, page_idx, 0);
+ if (err < 0)
+ return err;
+
+ err = iscsi_conn_bind(cls_session, cls_conn, is_leading);
+ if (err)
+ return -EINVAL;
+
+ /* calculate the tag idx bits needed for this conn based on cmds_max */
+ cconn->task_idx_bits = (__ilog2_u32(conn->session->cmds_max - 1)) + 1;
+
+ write_lock_bh(&csk->callback_lock);
+ csk->user_data = conn;
+ cconn->chba = cep->chba;
+ cconn->cep = cep;
+ cep->cconn = cconn;
+ write_unlock_bh(&csk->callback_lock);
+
+ cxgbi_conn_max_xmit_dlength(conn);
+ cxgbi_conn_max_recv_dlength(conn);
+
+ log_debug(1 << CXGBI_DBG_ISCSI,
+ "cls 0x%p,0x%p, ep 0x%p, cconn 0x%p, csk 0x%p.\n",
+ cls_session, cls_conn, ep, cconn, csk);
+ /* init recv engine */
+ iscsi_tcp_hdr_recv_prep(tcp_conn);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(cxgbi_bind_conn);
+
+struct iscsi_cls_session *cxgbi_create_session(struct iscsi_endpoint *ep,
+ u16 cmds_max, u16 qdepth,
+ u32 initial_cmdsn)
+{
+ struct cxgbi_endpoint *cep;
+ struct cxgbi_hba *chba;
+ struct Scsi_Host *shost;
+ struct iscsi_cls_session *cls_session;
+ struct iscsi_session *session;
+
+ if (!ep) {
+ pr_err("missing endpoint.\n");
+ return NULL;
+ }
+
+ cep = ep->dd_data;
+ chba = cep->chba;
+ shost = chba->shost;
+
+ BUG_ON(chba != iscsi_host_priv(shost));
+
+ cls_session = iscsi_session_setup(chba->cdev->itp, shost,
+ cmds_max, 0,
+ sizeof(struct iscsi_tcp_task) +
+ sizeof(struct cxgbi_task_data),
+ initial_cmdsn, ISCSI_MAX_TARGET);
+ if (!cls_session)
+ return NULL;
+
+ session = cls_session->dd_data;
+ if (iscsi_tcp_r2tpool_alloc(session))
+ goto remove_session;
+
+ log_debug(1 << CXGBI_DBG_ISCSI,
+ "ep 0x%p, cls sess 0x%p.\n", ep, cls_session);
+ return cls_session;
+
+remove_session:
+ iscsi_session_teardown(cls_session);
+ return NULL;
+}
+EXPORT_SYMBOL_GPL(cxgbi_create_session);
+
+void cxgbi_destroy_session(struct iscsi_cls_session *cls_session)
+{
+ log_debug(1 << CXGBI_DBG_ISCSI,
+ "cls sess 0x%p.\n", cls_session);
+
+ iscsi_tcp_r2tpool_free(cls_session->dd_data);
+ iscsi_session_teardown(cls_session);
+}
+EXPORT_SYMBOL_GPL(cxgbi_destroy_session);
+
+int cxgbi_set_host_param(struct Scsi_Host *shost, enum iscsi_host_param param,
+ char *buf, int buflen)
+{
+ struct cxgbi_hba *chba = iscsi_host_priv(shost);
+
+ if (!chba->ndev) {
+ shost_printk(KERN_ERR, shost, "Could not get host param. "
+ "netdev for host not set.\n");
+ return -ENODEV;
+ }
+
+ log_debug(1 << CXGBI_DBG_ISCSI,
+ "shost 0x%p, hba 0x%p,%s, param %d, buf(%d) %s.\n",
+ shost, chba, chba->ndev->name, param, buflen, buf);
+
+ switch (param) {
+ case ISCSI_HOST_PARAM_IPADDRESS:
+ {
+ __be32 addr = in_aton(buf);
+ log_debug(1 << CXGBI_DBG_ISCSI,
+ "hba %s, req. ipv4 %pI4.\n", chba->ndev->name, &addr);
+ cxgbi_set_iscsi_ipv4(chba, addr);
+ return 0;
+ }
+ case ISCSI_HOST_PARAM_HWADDRESS:
+ case ISCSI_HOST_PARAM_NETDEV_NAME:
+ return 0;
+ default:
+ return iscsi_host_set_param(shost, param, buf, buflen);
+ }
+}
+EXPORT_SYMBOL_GPL(cxgbi_set_host_param);
+
+int cxgbi_get_host_param(struct Scsi_Host *shost, enum iscsi_host_param param,
+ char *buf)
+{
+ struct cxgbi_hba *chba = iscsi_host_priv(shost);
+ int len = 0;
+
+ if (!chba->ndev) {
+ shost_printk(KERN_ERR, shost, "Could not get host param. "
+ "netdev for host not set.\n");
+ return -ENODEV;
+ }
+
+ log_debug(1 << CXGBI_DBG_ISCSI,
+ "shost 0x%p, hba 0x%p,%s, param %d.\n",
+ shost, chba, chba->ndev->name, param);
+
+ switch (param) {
+ case ISCSI_HOST_PARAM_HWADDRESS:
+ len = sysfs_format_mac(buf, chba->ndev->dev_addr, 6);
+ break;
+ case ISCSI_HOST_PARAM_NETDEV_NAME:
+ len = sprintf(buf, "%s\n", chba->ndev->name);
+ break;
+ case ISCSI_HOST_PARAM_IPADDRESS:
+ {
+ struct cxgbi_sock *csk = find_sock_on_port(chba->cdev,
+ chba->port_id);
+ if (csk) {
+ len = sprintf(buf, "%pIS",
+ (struct sockaddr *)&csk->saddr);
+ }
+ log_debug(1 << CXGBI_DBG_ISCSI,
+ "hba %s, addr %s.\n", chba->ndev->name, buf);
+ break;
+ }
+ default:
+ return iscsi_host_get_param(shost, param, buf);
+ }
+
+ return len;
+}
+EXPORT_SYMBOL_GPL(cxgbi_get_host_param);
+
+struct iscsi_endpoint *cxgbi_ep_connect(struct Scsi_Host *shost,
+ struct sockaddr *dst_addr,
+ int non_blocking)
+{
+ struct iscsi_endpoint *ep;
+ struct cxgbi_endpoint *cep;
+ struct cxgbi_hba *hba = NULL;
+ struct cxgbi_sock *csk;
+ int err = -EINVAL;
+
+ log_debug(1 << CXGBI_DBG_ISCSI | 1 << CXGBI_DBG_SOCK,
+ "shost 0x%p, non_blocking %d, dst_addr 0x%p.\n",
+ shost, non_blocking, dst_addr);
+
+ if (shost) {
+ hba = iscsi_host_priv(shost);
+ if (!hba) {
+ pr_info("shost 0x%p, priv NULL.\n", shost);
+ goto err_out;
+ }
+ }
+
+ if (dst_addr->sa_family == AF_INET) {
+ csk = cxgbi_check_route(dst_addr);
+#if IS_ENABLED(CONFIG_IPV6)
+ } else if (dst_addr->sa_family == AF_INET6) {
+ csk = cxgbi_check_route6(dst_addr);
+#endif
+ } else {
+ pr_info("address family 0x%x NOT supported.\n",
+ dst_addr->sa_family);
+ err = -EAFNOSUPPORT;
+ return (struct iscsi_endpoint *)ERR_PTR(err);
+ }
+
+ if (IS_ERR(csk))
+ return (struct iscsi_endpoint *)csk;
+ cxgbi_sock_get(csk);
+
+ if (!hba)
+ hba = csk->cdev->hbas[csk->port_id];
+ else if (hba != csk->cdev->hbas[csk->port_id]) {
+ pr_info("Could not connect through requested host %u"
+ "hba 0x%p != 0x%p (%u).\n",
+ shost->host_no, hba,
+ csk->cdev->hbas[csk->port_id], csk->port_id);
+ err = -ENOSPC;
+ goto release_conn;
+ }
+
+ err = sock_get_port(csk);
+ if (err)
+ goto release_conn;
+
+ cxgbi_sock_set_state(csk, CTP_CONNECTING);
+ err = csk->cdev->csk_init_act_open(csk);
+ if (err)
+ goto release_conn;
+
+ if (cxgbi_sock_is_closing(csk)) {
+ err = -ENOSPC;
+ pr_info("csk 0x%p is closing.\n", csk);
+ goto release_conn;
+ }
+
+ ep = iscsi_create_endpoint(sizeof(*cep));
+ if (!ep) {
+ err = -ENOMEM;
+ pr_info("iscsi alloc ep, OOM.\n");
+ goto release_conn;
+ }
+
+ cep = ep->dd_data;
+ cep->csk = csk;
+ cep->chba = hba;
+
+ log_debug(1 << CXGBI_DBG_ISCSI | 1 << CXGBI_DBG_SOCK,
+ "ep 0x%p, cep 0x%p, csk 0x%p, hba 0x%p,%s.\n",
+ ep, cep, csk, hba, hba->ndev->name);
+ return ep;
+
+release_conn:
+ cxgbi_sock_put(csk);
+ cxgbi_sock_closed(csk);
+err_out:
+ return ERR_PTR(err);
+}
+EXPORT_SYMBOL_GPL(cxgbi_ep_connect);
+
+int cxgbi_ep_poll(struct iscsi_endpoint *ep, int timeout_ms)
+{
+ struct cxgbi_endpoint *cep = ep->dd_data;
+ struct cxgbi_sock *csk = cep->csk;
+
+ if (!cxgbi_sock_is_established(csk))
+ return 0;
+ return 1;
+}
+EXPORT_SYMBOL_GPL(cxgbi_ep_poll);
+
+void cxgbi_ep_disconnect(struct iscsi_endpoint *ep)
+{
+ struct cxgbi_endpoint *cep = ep->dd_data;
+ struct cxgbi_conn *cconn = cep->cconn;
+ struct cxgbi_sock *csk = cep->csk;
+
+ log_debug(1 << CXGBI_DBG_ISCSI | 1 << CXGBI_DBG_SOCK,
+ "ep 0x%p, cep 0x%p, cconn 0x%p, csk 0x%p,%u,0x%lx.\n",
+ ep, cep, cconn, csk, csk->state, csk->flags);
+
+ if (cconn && cconn->iconn) {
+ iscsi_suspend_tx(cconn->iconn);
+ write_lock_bh(&csk->callback_lock);
+ cep->csk->user_data = NULL;
+ cconn->cep = NULL;
+ write_unlock_bh(&csk->callback_lock);
+ }
+ iscsi_destroy_endpoint(ep);
+
+ if (likely(csk->state >= CTP_ESTABLISHED))
+ need_active_close(csk);
+ else
+ cxgbi_sock_closed(csk);
+
+ cxgbi_sock_put(csk);
+}
+EXPORT_SYMBOL_GPL(cxgbi_ep_disconnect);
+
+int cxgbi_iscsi_init(struct iscsi_transport *itp,
+ struct scsi_transport_template **stt)
+{
+ *stt = iscsi_register_transport(itp);
+ if (*stt == NULL) {
+ pr_err("unable to register %s transport 0x%p.\n",
+ itp->name, itp);
+ return -ENODEV;
+ }
+ log_debug(1 << CXGBI_DBG_ISCSI,
+ "%s, registered iscsi transport 0x%p.\n",
+ itp->name, stt);
+ return 0;
+}
+EXPORT_SYMBOL_GPL(cxgbi_iscsi_init);
+
+void cxgbi_iscsi_cleanup(struct iscsi_transport *itp,
+ struct scsi_transport_template **stt)
+{
+ if (*stt) {
+ log_debug(1 << CXGBI_DBG_ISCSI,
+ "de-register transport 0x%p, %s, stt 0x%p.\n",
+ itp, itp->name, *stt);
+ *stt = NULL;
+ iscsi_unregister_transport(itp);
+ }
+}
+EXPORT_SYMBOL_GPL(cxgbi_iscsi_cleanup);
+
+umode_t cxgbi_attr_is_visible(int param_type, int param)
+{
+ switch (param_type) {
+ case ISCSI_HOST_PARAM:
+ switch (param) {
+ case ISCSI_HOST_PARAM_NETDEV_NAME:
+ case ISCSI_HOST_PARAM_HWADDRESS:
+ case ISCSI_HOST_PARAM_IPADDRESS:
+ case ISCSI_HOST_PARAM_INITIATOR_NAME:
+ return S_IRUGO;
+ default:
+ return 0;
+ }
+ case ISCSI_PARAM:
+ switch (param) {
+ case ISCSI_PARAM_MAX_RECV_DLENGTH:
+ case ISCSI_PARAM_MAX_XMIT_DLENGTH:
+ case ISCSI_PARAM_HDRDGST_EN:
+ case ISCSI_PARAM_DATADGST_EN:
+ case ISCSI_PARAM_CONN_ADDRESS:
+ case ISCSI_PARAM_CONN_PORT:
+ case ISCSI_PARAM_EXP_STATSN:
+ case ISCSI_PARAM_PERSISTENT_ADDRESS:
+ case ISCSI_PARAM_PERSISTENT_PORT:
+ case ISCSI_PARAM_PING_TMO:
+ case ISCSI_PARAM_RECV_TMO:
+ case ISCSI_PARAM_INITIAL_R2T_EN:
+ case ISCSI_PARAM_MAX_R2T:
+ case ISCSI_PARAM_IMM_DATA_EN:
+ case ISCSI_PARAM_FIRST_BURST:
+ case ISCSI_PARAM_MAX_BURST:
+ case ISCSI_PARAM_PDU_INORDER_EN:
+ case ISCSI_PARAM_DATASEQ_INORDER_EN:
+ case ISCSI_PARAM_ERL:
+ case ISCSI_PARAM_TARGET_NAME:
+ case ISCSI_PARAM_TPGT:
+ case ISCSI_PARAM_USERNAME:
+ case ISCSI_PARAM_PASSWORD:
+ case ISCSI_PARAM_USERNAME_IN:
+ case ISCSI_PARAM_PASSWORD_IN:
+ case ISCSI_PARAM_FAST_ABORT:
+ case ISCSI_PARAM_ABORT_TMO:
+ case ISCSI_PARAM_LU_RESET_TMO:
+ case ISCSI_PARAM_TGT_RESET_TMO:
+ case ISCSI_PARAM_IFACE_NAME:
+ case ISCSI_PARAM_INITIATOR_NAME:
+ return S_IRUGO;
+ default:
+ return 0;
+ }
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(cxgbi_attr_is_visible);
+
+static int __init libcxgbi_init_module(void)
+{
+ sw_tag_idx_bits = (__ilog2_u32(ISCSI_ITT_MASK)) + 1;
+ sw_tag_age_bits = (__ilog2_u32(ISCSI_AGE_MASK)) + 1;
+
+ pr_info("tag itt 0x%x, %u bits, age 0x%x, %u bits.\n",
+ ISCSI_ITT_MASK, sw_tag_idx_bits,
+ ISCSI_AGE_MASK, sw_tag_age_bits);
+
+ ddp_setup_host_page_size();
+ return 0;
+}
+
+static void __exit libcxgbi_exit_module(void)
+{
+ cxgbi_device_unregister_all(0xFF);
+ return;
+}
+
+module_init(libcxgbi_init_module);
+module_exit(libcxgbi_exit_module);
diff --git a/drivers/scsi/cxgbi/libcxgbi.h b/drivers/scsi/cxgbi/libcxgbi.h
new file mode 100644
index 000000000..aba1af720
--- /dev/null
+++ b/drivers/scsi/cxgbi/libcxgbi.h
@@ -0,0 +1,758 @@
+/*
+ * libcxgbi.h: Chelsio common library for T3/T4 iSCSI driver.
+ *
+ * Copyright (c) 2010 Chelsio Communications, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation.
+ *
+ * Written by: Karen Xie (kxie@chelsio.com)
+ * Written by: Rakesh Ranjan (rranjan@chelsio.com)
+ */
+
+#ifndef __LIBCXGBI_H__
+#define __LIBCXGBI_H__
+
+#include <linux/kernel.h>
+#include <linux/errno.h>
+#include <linux/types.h>
+#include <linux/debugfs.h>
+#include <linux/list.h>
+#include <linux/netdevice.h>
+#include <linux/if_vlan.h>
+#include <linux/scatterlist.h>
+#include <linux/skbuff.h>
+#include <linux/vmalloc.h>
+#include <scsi/scsi_device.h>
+#include <scsi/libiscsi_tcp.h>
+
+enum cxgbi_dbg_flag {
+ CXGBI_DBG_ISCSI,
+ CXGBI_DBG_DDP,
+ CXGBI_DBG_TOE,
+ CXGBI_DBG_SOCK,
+
+ CXGBI_DBG_PDU_TX,
+ CXGBI_DBG_PDU_RX,
+ CXGBI_DBG_DEV,
+};
+
+#define log_debug(level, fmt, ...) \
+ do { \
+ if (dbg_level & (level)) \
+ pr_info(fmt, ##__VA_ARGS__); \
+ } while (0)
+
+#define pr_info_ipaddr(fmt_trail, \
+ addr1, addr2, args_trail...) \
+do { \
+ if (!((1 << CXGBI_DBG_SOCK) & dbg_level)) \
+ break; \
+ pr_info("%pISpc - %pISpc, " fmt_trail, \
+ addr1, addr2, args_trail); \
+} while (0)
+
+/* max. connections per adapter */
+#define CXGBI_MAX_CONN 16384
+
+/* always allocate rooms for AHS */
+#define SKB_TX_ISCSI_PDU_HEADER_MAX \
+ (sizeof(struct iscsi_hdr) + ISCSI_MAX_AHS_SIZE)
+
+#define ISCSI_PDU_NONPAYLOAD_LEN 312 /* bhs(48) + ahs(256) + digest(8)*/
+
+/*
+ * align pdu size to multiple of 512 for better performance
+ */
+#define cxgbi_align_pdu_size(n) do { n = (n) & (~511); } while (0)
+
+#define ULP2_MODE_ISCSI 2
+
+#define ULP2_MAX_PKT_SIZE 16224
+#define ULP2_MAX_PDU_PAYLOAD \
+ (ULP2_MAX_PKT_SIZE - ISCSI_PDU_NONPAYLOAD_LEN)
+
+/*
+ * For iscsi connections HW may inserts digest bytes into the pdu. Those digest
+ * bytes are not sent by the host but are part of the TCP payload and therefore
+ * consume TCP sequence space.
+ */
+static const unsigned int ulp2_extra_len[] = { 0, 4, 4, 8 };
+static inline unsigned int cxgbi_ulp_extra_len(int submode)
+{
+ return ulp2_extra_len[submode & 3];
+}
+
+/*
+ * struct pagepod_hdr, pagepod - pagepod format
+ */
+
+#define CPL_RX_DDP_STATUS_DDP_SHIFT 16 /* ddp'able */
+#define CPL_RX_DDP_STATUS_PAD_SHIFT 19 /* pad error */
+#define CPL_RX_DDP_STATUS_HCRC_SHIFT 20 /* hcrc error */
+#define CPL_RX_DDP_STATUS_DCRC_SHIFT 21 /* dcrc error */
+
+struct cxgbi_pagepod_hdr {
+ u32 vld_tid;
+ u32 pgsz_tag_clr;
+ u32 max_offset;
+ u32 page_offset;
+ u64 rsvd;
+};
+
+#define PPOD_PAGES_MAX 4
+struct cxgbi_pagepod {
+ struct cxgbi_pagepod_hdr hdr;
+ u64 addr[PPOD_PAGES_MAX + 1];
+};
+
+struct cxgbi_tag_format {
+ unsigned char sw_bits;
+ unsigned char rsvd_bits;
+ unsigned char rsvd_shift;
+ unsigned char filler[1];
+ u32 rsvd_mask;
+};
+
+struct cxgbi_gather_list {
+ unsigned int tag;
+ unsigned int length;
+ unsigned int offset;
+ unsigned int nelem;
+ struct page **pages;
+ dma_addr_t phys_addr[0];
+};
+
+struct cxgbi_ddp_info {
+ struct kref refcnt;
+ struct cxgbi_device *cdev;
+ struct pci_dev *pdev;
+ unsigned int max_txsz;
+ unsigned int max_rxsz;
+ unsigned int llimit;
+ unsigned int ulimit;
+ unsigned int nppods;
+ unsigned int idx_last;
+ unsigned char idx_bits;
+ unsigned char filler[3];
+ unsigned int idx_mask;
+ unsigned int rsvd_tag_mask;
+ spinlock_t map_lock;
+ struct cxgbi_gather_list **gl_map;
+};
+
+#define DDP_PGIDX_MAX 4
+#define DDP_THRESHOLD 2048
+
+#define PPOD_PAGES_SHIFT 2 /* 4 pages per pod */
+
+#define PPOD_SIZE sizeof(struct cxgbi_pagepod) /* 64 */
+#define PPOD_SIZE_SHIFT 6
+
+#define ULPMEM_DSGL_MAX_NPPODS 16 /* 1024/PPOD_SIZE */
+#define ULPMEM_IDATA_MAX_NPPODS 4 /* 256/PPOD_SIZE */
+#define PCIE_MEMWIN_MAX_NPPODS 16 /* 1024/PPOD_SIZE */
+
+#define PPOD_COLOR_SHIFT 0
+#define PPOD_COLOR(x) ((x) << PPOD_COLOR_SHIFT)
+
+#define PPOD_IDX_SHIFT 6
+#define PPOD_IDX_MAX_SIZE 24
+
+#define PPOD_TID_SHIFT 0
+#define PPOD_TID(x) ((x) << PPOD_TID_SHIFT)
+
+#define PPOD_TAG_SHIFT 6
+#define PPOD_TAG(x) ((x) << PPOD_TAG_SHIFT)
+
+#define PPOD_VALID_SHIFT 24
+#define PPOD_VALID(x) ((x) << PPOD_VALID_SHIFT)
+#define PPOD_VALID_FLAG PPOD_VALID(1U)
+
+/*
+ * sge_opaque_hdr -
+ * Opaque version of structure the SGE stores at skb->head of TX_DATA packets
+ * and for which we must reserve space.
+ */
+struct sge_opaque_hdr {
+ void *dev;
+ dma_addr_t addr[MAX_SKB_FRAGS + 1];
+};
+
+struct cxgbi_sock {
+ struct cxgbi_device *cdev;
+
+ int tid;
+ int atid;
+ unsigned long flags;
+ unsigned int mtu;
+ unsigned short rss_qid;
+ unsigned short txq_idx;
+ unsigned short advmss;
+ unsigned int tx_chan;
+ unsigned int rx_chan;
+ unsigned int mss_idx;
+ unsigned int smac_idx;
+ unsigned char port_id;
+ int wr_max_cred;
+ int wr_cred;
+ int wr_una_cred;
+ unsigned char hcrc_len;
+ unsigned char dcrc_len;
+
+ void *l2t;
+ struct sk_buff *wr_pending_head;
+ struct sk_buff *wr_pending_tail;
+ struct sk_buff *cpl_close;
+ struct sk_buff *cpl_abort_req;
+ struct sk_buff *cpl_abort_rpl;
+ struct sk_buff *skb_ulp_lhdr;
+ spinlock_t lock;
+ struct kref refcnt;
+ unsigned int state;
+ unsigned int csk_family;
+ union {
+ struct sockaddr_in saddr;
+ struct sockaddr_in6 saddr6;
+ };
+ union {
+ struct sockaddr_in daddr;
+ struct sockaddr_in6 daddr6;
+ };
+ struct dst_entry *dst;
+ struct sk_buff_head receive_queue;
+ struct sk_buff_head write_queue;
+ struct timer_list retry_timer;
+ int err;
+ rwlock_t callback_lock;
+ void *user_data;
+
+ u32 rcv_nxt;
+ u32 copied_seq;
+ u32 rcv_wup;
+ u32 snd_nxt;
+ u32 snd_una;
+ u32 write_seq;
+};
+
+/*
+ * connection states
+ */
+enum cxgbi_sock_states{
+ CTP_CLOSED,
+ CTP_CONNECTING,
+ CTP_ACTIVE_OPEN,
+ CTP_ESTABLISHED,
+ CTP_ACTIVE_CLOSE,
+ CTP_PASSIVE_CLOSE,
+ CTP_CLOSE_WAIT_1,
+ CTP_CLOSE_WAIT_2,
+ CTP_ABORTING,
+};
+
+/*
+ * Connection flags -- many to track some close related events.
+ */
+enum cxgbi_sock_flags {
+ CTPF_ABORT_RPL_RCVD, /*received one ABORT_RPL_RSS message */
+ CTPF_ABORT_REQ_RCVD, /*received one ABORT_REQ_RSS message */
+ CTPF_ABORT_RPL_PENDING, /* expecting an abort reply */
+ CTPF_TX_DATA_SENT, /* already sent a TX_DATA WR */
+ CTPF_ACTIVE_CLOSE_NEEDED,/* need to be closed */
+ CTPF_HAS_ATID, /* reserved atid */
+ CTPF_HAS_TID, /* reserved hw tid */
+ CTPF_OFFLOAD_DOWN, /* offload function off */
+};
+
+struct cxgbi_skb_rx_cb {
+ __u32 ddigest;
+ __u32 pdulen;
+};
+
+struct cxgbi_skb_tx_cb {
+ void *l2t;
+ struct sk_buff *wr_next;
+};
+
+enum cxgbi_skcb_flags {
+ SKCBF_TX_NEED_HDR, /* packet needs a header */
+ SKCBF_RX_COALESCED, /* received whole pdu */
+ SKCBF_RX_HDR, /* received pdu header */
+ SKCBF_RX_DATA, /* received pdu payload */
+ SKCBF_RX_STATUS, /* received ddp status */
+ SKCBF_RX_DATA_DDPD, /* pdu payload ddp'd */
+ SKCBF_RX_HCRC_ERR, /* header digest error */
+ SKCBF_RX_DCRC_ERR, /* data digest error */
+ SKCBF_RX_PAD_ERR, /* padding byte error */
+};
+
+struct cxgbi_skb_cb {
+ unsigned char ulp_mode;
+ unsigned long flags;
+ unsigned int seq;
+ union {
+ struct cxgbi_skb_rx_cb rx;
+ struct cxgbi_skb_tx_cb tx;
+ };
+};
+
+#define CXGBI_SKB_CB(skb) ((struct cxgbi_skb_cb *)&((skb)->cb[0]))
+#define cxgbi_skcb_flags(skb) (CXGBI_SKB_CB(skb)->flags)
+#define cxgbi_skcb_ulp_mode(skb) (CXGBI_SKB_CB(skb)->ulp_mode)
+#define cxgbi_skcb_tcp_seq(skb) (CXGBI_SKB_CB(skb)->seq)
+#define cxgbi_skcb_rx_ddigest(skb) (CXGBI_SKB_CB(skb)->rx.ddigest)
+#define cxgbi_skcb_rx_pdulen(skb) (CXGBI_SKB_CB(skb)->rx.pdulen)
+#define cxgbi_skcb_tx_wr_next(skb) (CXGBI_SKB_CB(skb)->tx.wr_next)
+
+static inline void cxgbi_skcb_set_flag(struct sk_buff *skb,
+ enum cxgbi_skcb_flags flag)
+{
+ __set_bit(flag, &(cxgbi_skcb_flags(skb)));
+}
+
+static inline void cxgbi_skcb_clear_flag(struct sk_buff *skb,
+ enum cxgbi_skcb_flags flag)
+{
+ __clear_bit(flag, &(cxgbi_skcb_flags(skb)));
+}
+
+static inline int cxgbi_skcb_test_flag(const struct sk_buff *skb,
+ enum cxgbi_skcb_flags flag)
+{
+ return test_bit(flag, &(cxgbi_skcb_flags(skb)));
+}
+
+static inline void cxgbi_sock_set_flag(struct cxgbi_sock *csk,
+ enum cxgbi_sock_flags flag)
+{
+ __set_bit(flag, &csk->flags);
+ log_debug(1 << CXGBI_DBG_SOCK,
+ "csk 0x%p,%u,0x%lx, bit %d.\n",
+ csk, csk->state, csk->flags, flag);
+}
+
+static inline void cxgbi_sock_clear_flag(struct cxgbi_sock *csk,
+ enum cxgbi_sock_flags flag)
+{
+ __clear_bit(flag, &csk->flags);
+ log_debug(1 << CXGBI_DBG_SOCK,
+ "csk 0x%p,%u,0x%lx, bit %d.\n",
+ csk, csk->state, csk->flags, flag);
+}
+
+static inline int cxgbi_sock_flag(struct cxgbi_sock *csk,
+ enum cxgbi_sock_flags flag)
+{
+ if (csk == NULL)
+ return 0;
+ return test_bit(flag, &csk->flags);
+}
+
+static inline void cxgbi_sock_set_state(struct cxgbi_sock *csk, int state)
+{
+ log_debug(1 << CXGBI_DBG_SOCK,
+ "csk 0x%p,%u,0x%lx, state -> %u.\n",
+ csk, csk->state, csk->flags, state);
+ csk->state = state;
+}
+
+static inline void cxgbi_sock_free(struct kref *kref)
+{
+ struct cxgbi_sock *csk = container_of(kref,
+ struct cxgbi_sock,
+ refcnt);
+ if (csk) {
+ log_debug(1 << CXGBI_DBG_SOCK,
+ "free csk 0x%p, state %u, flags 0x%lx\n",
+ csk, csk->state, csk->flags);
+ kfree(csk);
+ }
+}
+
+static inline void __cxgbi_sock_put(const char *fn, struct cxgbi_sock *csk)
+{
+ log_debug(1 << CXGBI_DBG_SOCK,
+ "%s, put csk 0x%p, ref %u-1.\n",
+ fn, csk, atomic_read(&csk->refcnt.refcount));
+ kref_put(&csk->refcnt, cxgbi_sock_free);
+}
+#define cxgbi_sock_put(csk) __cxgbi_sock_put(__func__, csk)
+
+static inline void __cxgbi_sock_get(const char *fn, struct cxgbi_sock *csk)
+{
+ log_debug(1 << CXGBI_DBG_SOCK,
+ "%s, get csk 0x%p, ref %u+1.\n",
+ fn, csk, atomic_read(&csk->refcnt.refcount));
+ kref_get(&csk->refcnt);
+}
+#define cxgbi_sock_get(csk) __cxgbi_sock_get(__func__, csk)
+
+static inline int cxgbi_sock_is_closing(struct cxgbi_sock *csk)
+{
+ return csk->state >= CTP_ACTIVE_CLOSE;
+}
+
+static inline int cxgbi_sock_is_established(struct cxgbi_sock *csk)
+{
+ return csk->state == CTP_ESTABLISHED;
+}
+
+static inline void cxgbi_sock_purge_write_queue(struct cxgbi_sock *csk)
+{
+ struct sk_buff *skb;
+
+ while ((skb = __skb_dequeue(&csk->write_queue)))
+ __kfree_skb(skb);
+}
+
+static inline unsigned int cxgbi_sock_compute_wscale(unsigned int win)
+{
+ unsigned int wscale = 0;
+
+ while (wscale < 14 && (65535 << wscale) < win)
+ wscale++;
+ return wscale;
+}
+
+static inline struct sk_buff *alloc_wr(int wrlen, int dlen, gfp_t gfp)
+{
+ struct sk_buff *skb = alloc_skb(wrlen + dlen, gfp);
+
+ if (skb) {
+ __skb_put(skb, wrlen);
+ memset(skb->head, 0, wrlen + dlen);
+ } else
+ pr_info("alloc cpl wr skb %u+%u, OOM.\n", wrlen, dlen);
+ return skb;
+}
+
+
+/*
+ * The number of WRs needed for an skb depends on the number of fragments
+ * in the skb and whether it has any payload in its main body. This maps the
+ * length of the gather list represented by an skb into the # of necessary WRs.
+ * The extra two fragments are for iscsi bhs and payload padding.
+ */
+#define SKB_WR_LIST_SIZE (MAX_SKB_FRAGS + 2)
+
+static inline void cxgbi_sock_reset_wr_list(struct cxgbi_sock *csk)
+{
+ csk->wr_pending_head = csk->wr_pending_tail = NULL;
+}
+
+static inline void cxgbi_sock_enqueue_wr(struct cxgbi_sock *csk,
+ struct sk_buff *skb)
+{
+ cxgbi_skcb_tx_wr_next(skb) = NULL;
+ /*
+ * We want to take an extra reference since both us and the driver
+ * need to free the packet before it's really freed. We know there's
+ * just one user currently so we use atomic_set rather than skb_get
+ * to avoid the atomic op.
+ */
+ atomic_set(&skb->users, 2);
+
+ if (!csk->wr_pending_head)
+ csk->wr_pending_head = skb;
+ else
+ cxgbi_skcb_tx_wr_next(csk->wr_pending_tail) = skb;
+ csk->wr_pending_tail = skb;
+}
+
+static inline int cxgbi_sock_count_pending_wrs(const struct cxgbi_sock *csk)
+{
+ int n = 0;
+ const struct sk_buff *skb = csk->wr_pending_head;
+
+ while (skb) {
+ n += skb->csum;
+ skb = cxgbi_skcb_tx_wr_next(skb);
+ }
+ return n;
+}
+
+static inline struct sk_buff *cxgbi_sock_peek_wr(const struct cxgbi_sock *csk)
+{
+ return csk->wr_pending_head;
+}
+
+static inline struct sk_buff *cxgbi_sock_dequeue_wr(struct cxgbi_sock *csk)
+{
+ struct sk_buff *skb = csk->wr_pending_head;
+
+ if (likely(skb)) {
+ csk->wr_pending_head = cxgbi_skcb_tx_wr_next(skb);
+ cxgbi_skcb_tx_wr_next(skb) = NULL;
+ }
+ return skb;
+}
+
+void cxgbi_sock_check_wr_invariants(const struct cxgbi_sock *);
+void cxgbi_sock_purge_wr_queue(struct cxgbi_sock *);
+void cxgbi_sock_skb_entail(struct cxgbi_sock *, struct sk_buff *);
+void cxgbi_sock_fail_act_open(struct cxgbi_sock *, int);
+void cxgbi_sock_act_open_req_arp_failure(void *, struct sk_buff *);
+void cxgbi_sock_closed(struct cxgbi_sock *);
+void cxgbi_sock_established(struct cxgbi_sock *, unsigned int, unsigned int);
+void cxgbi_sock_rcv_abort_rpl(struct cxgbi_sock *);
+void cxgbi_sock_rcv_peer_close(struct cxgbi_sock *);
+void cxgbi_sock_rcv_close_conn_rpl(struct cxgbi_sock *, u32);
+void cxgbi_sock_rcv_wr_ack(struct cxgbi_sock *, unsigned int, unsigned int,
+ int);
+unsigned int cxgbi_sock_select_mss(struct cxgbi_sock *, unsigned int);
+void cxgbi_sock_free_cpl_skbs(struct cxgbi_sock *);
+
+struct cxgbi_hba {
+ struct net_device *ndev;
+ struct net_device *vdev; /* vlan dev */
+ struct Scsi_Host *shost;
+ struct cxgbi_device *cdev;
+ __be32 ipv4addr;
+ unsigned char port_id;
+};
+
+struct cxgbi_ports_map {
+ unsigned int max_connect;
+ unsigned int used;
+ unsigned short sport_base;
+ spinlock_t lock;
+ unsigned int next;
+ struct cxgbi_sock **port_csk;
+};
+
+#define CXGBI_FLAG_DEV_T3 0x1
+#define CXGBI_FLAG_DEV_T4 0x2
+#define CXGBI_FLAG_ADAPTER_RESET 0x4
+#define CXGBI_FLAG_IPV4_SET 0x10
+struct cxgbi_device {
+ struct list_head list_head;
+ struct list_head rcu_node;
+ unsigned int flags;
+ struct net_device **ports;
+ void *lldev;
+ struct cxgbi_hba **hbas;
+ const unsigned short *mtus;
+ unsigned char nmtus;
+ unsigned char nports;
+ struct pci_dev *pdev;
+ struct dentry *debugfs_root;
+ struct iscsi_transport *itp;
+
+ unsigned int pfvf;
+ unsigned int snd_win;
+ unsigned int rcv_win;
+ unsigned int rx_credit_thres;
+ unsigned int skb_tx_rsvd;
+ unsigned int skb_rx_extra; /* for msg coalesced mode */
+ unsigned int tx_max_size;
+ unsigned int rx_max_size;
+ struct cxgbi_ports_map pmap;
+ struct cxgbi_tag_format tag_format;
+ struct cxgbi_ddp_info *ddp;
+
+ void (*dev_ddp_cleanup)(struct cxgbi_device *);
+ int (*csk_ddp_set)(struct cxgbi_sock *, struct cxgbi_pagepod_hdr *,
+ unsigned int, unsigned int,
+ struct cxgbi_gather_list *);
+ void (*csk_ddp_clear)(struct cxgbi_hba *,
+ unsigned int, unsigned int, unsigned int);
+ int (*csk_ddp_setup_digest)(struct cxgbi_sock *,
+ unsigned int, int, int, int);
+ int (*csk_ddp_setup_pgidx)(struct cxgbi_sock *,
+ unsigned int, int, bool);
+
+ void (*csk_release_offload_resources)(struct cxgbi_sock *);
+ int (*csk_rx_pdu_ready)(struct cxgbi_sock *, struct sk_buff *);
+ u32 (*csk_send_rx_credits)(struct cxgbi_sock *, u32);
+ int (*csk_push_tx_frames)(struct cxgbi_sock *, int);
+ void (*csk_send_abort_req)(struct cxgbi_sock *);
+ void (*csk_send_close_req)(struct cxgbi_sock *);
+ int (*csk_alloc_cpls)(struct cxgbi_sock *);
+ int (*csk_init_act_open)(struct cxgbi_sock *);
+
+ void *dd_data;
+};
+#define cxgbi_cdev_priv(cdev) ((cdev)->dd_data)
+
+struct cxgbi_conn {
+ struct cxgbi_endpoint *cep;
+ struct iscsi_conn *iconn;
+ struct cxgbi_hba *chba;
+ u32 task_idx_bits;
+};
+
+struct cxgbi_endpoint {
+ struct cxgbi_conn *cconn;
+ struct cxgbi_hba *chba;
+ struct cxgbi_sock *csk;
+};
+
+#define MAX_PDU_FRAGS ((ULP2_MAX_PDU_PAYLOAD + 512 - 1) / 512)
+struct cxgbi_task_data {
+ unsigned short nr_frags;
+ struct page_frag frags[MAX_PDU_FRAGS];
+ struct sk_buff *skb;
+ unsigned int offset;
+ unsigned int count;
+ unsigned int sgoffset;
+};
+#define iscsi_task_cxgbi_data(task) \
+ ((task)->dd_data + sizeof(struct iscsi_tcp_task))
+
+static inline int cxgbi_is_ddp_tag(struct cxgbi_tag_format *tformat, u32 tag)
+{
+ return !(tag & (1 << (tformat->rsvd_bits + tformat->rsvd_shift - 1)));
+}
+
+static inline int cxgbi_sw_tag_usable(struct cxgbi_tag_format *tformat,
+ u32 sw_tag)
+{
+ sw_tag >>= (32 - tformat->rsvd_bits);
+ return !sw_tag;
+}
+
+static inline u32 cxgbi_set_non_ddp_tag(struct cxgbi_tag_format *tformat,
+ u32 sw_tag)
+{
+ unsigned char shift = tformat->rsvd_bits + tformat->rsvd_shift - 1;
+ u32 mask = (1 << shift) - 1;
+
+ if (sw_tag && (sw_tag & ~mask)) {
+ u32 v1 = sw_tag & ((1 << shift) - 1);
+ u32 v2 = (sw_tag >> (shift - 1)) << shift;
+
+ return v2 | v1 | 1 << shift;
+ }
+
+ return sw_tag | 1 << shift;
+}
+
+static inline u32 cxgbi_ddp_tag_base(struct cxgbi_tag_format *tformat,
+ u32 sw_tag)
+{
+ u32 mask = (1 << tformat->rsvd_shift) - 1;
+
+ if (sw_tag && (sw_tag & ~mask)) {
+ u32 v1 = sw_tag & mask;
+ u32 v2 = sw_tag >> tformat->rsvd_shift;
+
+ v2 <<= tformat->rsvd_bits + tformat->rsvd_shift;
+
+ return v2 | v1;
+ }
+
+ return sw_tag;
+}
+
+static inline u32 cxgbi_tag_rsvd_bits(struct cxgbi_tag_format *tformat,
+ u32 tag)
+{
+ if (cxgbi_is_ddp_tag(tformat, tag))
+ return (tag >> tformat->rsvd_shift) & tformat->rsvd_mask;
+
+ return 0;
+}
+
+static inline u32 cxgbi_tag_nonrsvd_bits(struct cxgbi_tag_format *tformat,
+ u32 tag)
+{
+ unsigned char shift = tformat->rsvd_bits + tformat->rsvd_shift - 1;
+ u32 v1, v2;
+
+ if (cxgbi_is_ddp_tag(tformat, tag)) {
+ v1 = tag & ((1 << tformat->rsvd_shift) - 1);
+ v2 = (tag >> (shift + 1)) << tformat->rsvd_shift;
+ } else {
+ u32 mask = (1 << shift) - 1;
+ tag &= ~(1 << shift);
+ v1 = tag & mask;
+ v2 = (tag >> 1) & ~mask;
+ }
+ return v1 | v2;
+}
+
+static inline void *cxgbi_alloc_big_mem(unsigned int size,
+ gfp_t gfp)
+{
+ void *p = kzalloc(size, gfp | __GFP_NOWARN);
+
+ if (!p)
+ p = vzalloc(size);
+
+ return p;
+}
+
+static inline void cxgbi_free_big_mem(void *addr)
+{
+ if (is_vmalloc_addr(addr))
+ vfree(addr);
+ else
+ kfree(addr);
+}
+
+static inline void cxgbi_set_iscsi_ipv4(struct cxgbi_hba *chba, __be32 ipaddr)
+{
+ if (chba->cdev->flags & CXGBI_FLAG_IPV4_SET)
+ chba->ipv4addr = ipaddr;
+ else
+ pr_info("set iscsi ipv4 NOT supported, using %s ipv4.\n",
+ chba->ndev->name);
+}
+
+struct cxgbi_device *cxgbi_device_register(unsigned int, unsigned int);
+void cxgbi_device_unregister(struct cxgbi_device *);
+void cxgbi_device_unregister_all(unsigned int flag);
+struct cxgbi_device *cxgbi_device_find_by_lldev(void *);
+struct cxgbi_device *cxgbi_device_find_by_netdev(struct net_device *, int *);
+struct cxgbi_device *cxgbi_device_find_by_netdev_rcu(struct net_device *,
+ int *);
+int cxgbi_hbas_add(struct cxgbi_device *, u64, unsigned int,
+ struct scsi_host_template *,
+ struct scsi_transport_template *);
+void cxgbi_hbas_remove(struct cxgbi_device *);
+
+int cxgbi_device_portmap_create(struct cxgbi_device *cdev, unsigned int base,
+ unsigned int max_conn);
+void cxgbi_device_portmap_cleanup(struct cxgbi_device *cdev);
+
+void cxgbi_conn_tx_open(struct cxgbi_sock *);
+void cxgbi_conn_pdu_ready(struct cxgbi_sock *);
+int cxgbi_conn_alloc_pdu(struct iscsi_task *, u8);
+int cxgbi_conn_init_pdu(struct iscsi_task *, unsigned int , unsigned int);
+int cxgbi_conn_xmit_pdu(struct iscsi_task *);
+
+void cxgbi_cleanup_task(struct iscsi_task *task);
+
+umode_t cxgbi_attr_is_visible(int param_type, int param);
+void cxgbi_get_conn_stats(struct iscsi_cls_conn *, struct iscsi_stats *);
+int cxgbi_set_conn_param(struct iscsi_cls_conn *,
+ enum iscsi_param, char *, int);
+int cxgbi_get_ep_param(struct iscsi_endpoint *ep, enum iscsi_param, char *);
+struct iscsi_cls_conn *cxgbi_create_conn(struct iscsi_cls_session *, u32);
+int cxgbi_bind_conn(struct iscsi_cls_session *,
+ struct iscsi_cls_conn *, u64, int);
+void cxgbi_destroy_session(struct iscsi_cls_session *);
+struct iscsi_cls_session *cxgbi_create_session(struct iscsi_endpoint *,
+ u16, u16, u32);
+int cxgbi_set_host_param(struct Scsi_Host *,
+ enum iscsi_host_param, char *, int);
+int cxgbi_get_host_param(struct Scsi_Host *, enum iscsi_host_param, char *);
+struct iscsi_endpoint *cxgbi_ep_connect(struct Scsi_Host *,
+ struct sockaddr *, int);
+int cxgbi_ep_poll(struct iscsi_endpoint *, int);
+void cxgbi_ep_disconnect(struct iscsi_endpoint *);
+
+int cxgbi_iscsi_init(struct iscsi_transport *,
+ struct scsi_transport_template **);
+void cxgbi_iscsi_cleanup(struct iscsi_transport *,
+ struct scsi_transport_template **);
+void cxgbi_parse_pdu_itt(struct iscsi_conn *, itt_t, int *, int *);
+int cxgbi_ddp_init(struct cxgbi_device *, unsigned int, unsigned int,
+ unsigned int, unsigned int);
+int cxgbi_ddp_cleanup(struct cxgbi_device *);
+void cxgbi_ddp_page_size_factor(int *);
+void cxgbi_ddp_ppod_clear(struct cxgbi_pagepod *);
+void cxgbi_ddp_ppod_set(struct cxgbi_pagepod *, struct cxgbi_pagepod_hdr *,
+ struct cxgbi_gather_list *, unsigned int);
+#endif /*__LIBCXGBI_H__*/
diff --git a/drivers/scsi/dc395x.c b/drivers/scsi/dc395x.c
new file mode 100644
index 000000000..5ee7f44cf
--- /dev/null
+++ b/drivers/scsi/dc395x.c
@@ -0,0 +1,4900 @@
+/*
+ * dc395x.c
+ *
+ * Device Driver for Tekram DC395(U/UW/F), DC315(U)
+ * PCI SCSI Bus Master Host Adapter
+ * (SCSI chip set used Tekram ASIC TRM-S1040)
+ *
+ * Authors:
+ * C.L. Huang <ching@tekram.com.tw>
+ * Erich Chen <erich@tekram.com.tw>
+ * (C) Copyright 1995-1999 Tekram Technology Co., Ltd.
+ *
+ * Kurt Garloff <garloff@suse.de>
+ * (C) 1999-2000 Kurt Garloff
+ *
+ * Oliver Neukum <oliver@neukum.name>
+ * Ali Akcaagac <aliakc@web.de>
+ * Jamie Lenehan <lenehan@twibble.org>
+ * (C) 2003
+ *
+ * License: GNU GPL
+ *
+ *************************************************************************
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. The name of the author may not be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ ************************************************************************
+ */
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/delay.h>
+#include <linux/ctype.h>
+#include <linux/blkdev.h>
+#include <linux/interrupt.h>
+#include <linux/init.h>
+#include <linux/spinlock.h>
+#include <linux/pci.h>
+#include <linux/list.h>
+#include <linux/vmalloc.h>
+#include <linux/slab.h>
+#include <asm/io.h>
+
+#include <scsi/scsi.h>
+#include <scsi/scsicam.h> /* needed for scsicam_bios_param */
+#include <scsi/scsi_cmnd.h>
+#include <scsi/scsi_device.h>
+#include <scsi/scsi_host.h>
+
+#include "dc395x.h"
+
+#define DC395X_NAME "dc395x"
+#define DC395X_BANNER "Tekram DC395(U/UW/F), DC315(U) - ASIC TRM-S1040"
+#define DC395X_VERSION "v2.05, 2004/03/08"
+
+/*---------------------------------------------------------------------------
+ Features
+ ---------------------------------------------------------------------------*/
+/*
+ * Set to disable parts of the driver
+ */
+/*#define DC395x_NO_DISCONNECT*/
+/*#define DC395x_NO_TAGQ*/
+/*#define DC395x_NO_SYNC*/
+/*#define DC395x_NO_WIDE*/
+
+/*---------------------------------------------------------------------------
+ Debugging
+ ---------------------------------------------------------------------------*/
+/*
+ * Types of debugging that can be enabled and disabled
+ */
+#define DBG_KG 0x0001
+#define DBG_0 0x0002
+#define DBG_1 0x0004
+#define DBG_SG 0x0020
+#define DBG_FIFO 0x0040
+#define DBG_PIO 0x0080
+
+
+/*
+ * Set set of things to output debugging for.
+ * Undefine to remove all debugging
+ */
+/*#define DEBUG_MASK (DBG_0|DBG_1|DBG_SG|DBG_FIFO|DBG_PIO)*/
+/*#define DEBUG_MASK DBG_0*/
+
+
+/*
+ * Output a kernel mesage at the specified level and append the
+ * driver name and a ": " to the start of the message
+ */
+#define dprintkl(level, format, arg...) \
+ printk(level DC395X_NAME ": " format , ## arg)
+
+
+#ifdef DEBUG_MASK
+/*
+ * print a debug message - this is formated with KERN_DEBUG, then the
+ * driver name followed by a ": " and then the message is output.
+ * This also checks that the specified debug level is enabled before
+ * outputing the message
+ */
+#define dprintkdbg(type, format, arg...) \
+ do { \
+ if ((type) & (DEBUG_MASK)) \
+ dprintkl(KERN_DEBUG , format , ## arg); \
+ } while (0)
+
+/*
+ * Check if the specified type of debugging is enabled
+ */
+#define debug_enabled(type) ((DEBUG_MASK) & (type))
+
+#else
+/*
+ * No debugging. Do nothing
+ */
+#define dprintkdbg(type, format, arg...) \
+ do {} while (0)
+#define debug_enabled(type) (0)
+
+#endif
+
+
+#ifndef PCI_VENDOR_ID_TEKRAM
+#define PCI_VENDOR_ID_TEKRAM 0x1DE1 /* Vendor ID */
+#endif
+#ifndef PCI_DEVICE_ID_TEKRAM_TRMS1040
+#define PCI_DEVICE_ID_TEKRAM_TRMS1040 0x0391 /* Device ID */
+#endif
+
+
+#define DC395x_LOCK_IO(dev,flags) spin_lock_irqsave(((struct Scsi_Host *)dev)->host_lock, flags)
+#define DC395x_UNLOCK_IO(dev,flags) spin_unlock_irqrestore(((struct Scsi_Host *)dev)->host_lock, flags)
+
+#define DC395x_read8(acb,address) (u8)(inb(acb->io_port_base + (address)))
+#define DC395x_read16(acb,address) (u16)(inw(acb->io_port_base + (address)))
+#define DC395x_read32(acb,address) (u32)(inl(acb->io_port_base + (address)))
+#define DC395x_write8(acb,address,value) outb((value), acb->io_port_base + (address))
+#define DC395x_write16(acb,address,value) outw((value), acb->io_port_base + (address))
+#define DC395x_write32(acb,address,value) outl((value), acb->io_port_base + (address))
+
+/* cmd->result */
+#define RES_TARGET 0x000000FF /* Target State */
+#define RES_TARGET_LNX STATUS_MASK /* Only official ... */
+#define RES_ENDMSG 0x0000FF00 /* End Message */
+#define RES_DID 0x00FF0000 /* DID_ codes */
+#define RES_DRV 0xFF000000 /* DRIVER_ codes */
+
+#define MK_RES(drv,did,msg,tgt) ((int)(drv)<<24 | (int)(did)<<16 | (int)(msg)<<8 | (int)(tgt))
+#define MK_RES_LNX(drv,did,msg,tgt) ((int)(drv)<<24 | (int)(did)<<16 | (int)(msg)<<8 | (int)(tgt)<<1)
+
+#define SET_RES_TARGET(who,tgt) { who &= ~RES_TARGET; who |= (int)(tgt); }
+#define SET_RES_TARGET_LNX(who,tgt) { who &= ~RES_TARGET_LNX; who |= (int)(tgt) << 1; }
+#define SET_RES_MSG(who,msg) { who &= ~RES_ENDMSG; who |= (int)(msg) << 8; }
+#define SET_RES_DID(who,did) { who &= ~RES_DID; who |= (int)(did) << 16; }
+#define SET_RES_DRV(who,drv) { who &= ~RES_DRV; who |= (int)(drv) << 24; }
+
+#define TAG_NONE 255
+
+/*
+ * srb->segement_x is the hw sg list. It is always allocated as a
+ * DC395x_MAX_SG_LISTENTRY entries in a linear block which does not
+ * cross a page boundy.
+ */
+#define SEGMENTX_LEN (sizeof(struct SGentry)*DC395x_MAX_SG_LISTENTRY)
+
+
+struct SGentry {
+ u32 address; /* bus! address */
+ u32 length;
+};
+
+/* The SEEPROM structure for TRM_S1040 */
+struct NVRamTarget {
+ u8 cfg0; /* Target configuration byte 0 */
+ u8 period; /* Target period */
+ u8 cfg2; /* Target configuration byte 2 */
+ u8 cfg3; /* Target configuration byte 3 */
+};
+
+struct NvRamType {
+ u8 sub_vendor_id[2]; /* 0,1 Sub Vendor ID */
+ u8 sub_sys_id[2]; /* 2,3 Sub System ID */
+ u8 sub_class; /* 4 Sub Class */
+ u8 vendor_id[2]; /* 5,6 Vendor ID */
+ u8 device_id[2]; /* 7,8 Device ID */
+ u8 reserved; /* 9 Reserved */
+ struct NVRamTarget target[DC395x_MAX_SCSI_ID];
+ /** 10,11,12,13
+ ** 14,15,16,17
+ ** ....
+ ** ....
+ ** 70,71,72,73
+ */
+ u8 scsi_id; /* 74 Host Adapter SCSI ID */
+ u8 channel_cfg; /* 75 Channel configuration */
+ u8 delay_time; /* 76 Power on delay time */
+ u8 max_tag; /* 77 Maximum tags */
+ u8 reserved0; /* 78 */
+ u8 boot_target; /* 79 */
+ u8 boot_lun; /* 80 */
+ u8 reserved1; /* 81 */
+ u16 reserved2[22]; /* 82,..125 */
+ u16 cksum; /* 126,127 */
+};
+
+struct ScsiReqBlk {
+ struct list_head list; /* next/prev ptrs for srb lists */
+ struct DeviceCtlBlk *dcb;
+ struct scsi_cmnd *cmd;
+
+ struct SGentry *segment_x; /* Linear array of hw sg entries (up to 64 entries) */
+ dma_addr_t sg_bus_addr; /* Bus address of sg list (ie, of segment_x) */
+
+ u8 sg_count; /* No of HW sg entries for this request */
+ u8 sg_index; /* Index of HW sg entry for this request */
+ size_t total_xfer_length; /* Total number of bytes remaining to be transferred */
+ size_t request_length; /* Total number of bytes in this request */
+ /*
+ * The sense buffer handling function, request_sense, uses
+ * the first hw sg entry (segment_x[0]) and the transfer
+ * length (total_xfer_length). While doing this it stores the
+ * original values into the last sg hw list
+ * (srb->segment_x[DC395x_MAX_SG_LISTENTRY - 1] and the
+ * total_xfer_length in xferred. These values are restored in
+ * pci_unmap_srb_sense. This is the only place xferred is used.
+ */
+ size_t xferred; /* Saved copy of total_xfer_length */
+
+ u16 state;
+
+ u8 msgin_buf[6];
+ u8 msgout_buf[6];
+
+ u8 adapter_status;
+ u8 target_status;
+ u8 msg_count;
+ u8 end_message;
+
+ u8 tag_number;
+ u8 status;
+ u8 retry_count;
+ u8 flag;
+
+ u8 scsi_phase;
+};
+
+struct DeviceCtlBlk {
+ struct list_head list; /* next/prev ptrs for the dcb list */
+ struct AdapterCtlBlk *acb;
+ struct list_head srb_going_list; /* head of going srb list */
+ struct list_head srb_waiting_list; /* head of waiting srb list */
+
+ struct ScsiReqBlk *active_srb;
+ u32 tag_mask;
+
+ u16 max_command;
+
+ u8 target_id; /* SCSI Target ID (SCSI Only) */
+ u8 target_lun; /* SCSI Log. Unit (SCSI Only) */
+ u8 identify_msg;
+ u8 dev_mode;
+
+ u8 inquiry7; /* To store Inquiry flags */
+ u8 sync_mode; /* 0:async mode */
+ u8 min_nego_period; /* for nego. */
+ u8 sync_period; /* for reg. */
+
+ u8 sync_offset; /* for reg. and nego.(low nibble) */
+ u8 flag;
+ u8 dev_type;
+ u8 init_tcq_flag;
+};
+
+struct AdapterCtlBlk {
+ struct Scsi_Host *scsi_host;
+
+ unsigned long io_port_base;
+ unsigned long io_port_len;
+
+ struct list_head dcb_list; /* head of going dcb list */
+ struct DeviceCtlBlk *dcb_run_robin;
+ struct DeviceCtlBlk *active_dcb;
+
+ struct list_head srb_free_list; /* head of free srb list */
+ struct ScsiReqBlk *tmp_srb;
+ struct timer_list waiting_timer;
+ struct timer_list selto_timer;
+
+ unsigned long last_reset;
+
+ u16 srb_count;
+
+ u8 sel_timeout;
+
+ unsigned int irq_level;
+ u8 tag_max_num;
+ u8 acb_flag;
+ u8 gmode2;
+
+ u8 config;
+ u8 lun_chk;
+ u8 scan_devices;
+ u8 hostid_bit;
+
+ u8 dcb_map[DC395x_MAX_SCSI_ID];
+ struct DeviceCtlBlk *children[DC395x_MAX_SCSI_ID][32];
+
+ struct pci_dev *dev;
+
+ u8 msg_len;
+
+ struct ScsiReqBlk srb_array[DC395x_MAX_SRB_CNT];
+ struct ScsiReqBlk srb;
+
+ struct NvRamType eeprom; /* eeprom settings for this adapter */
+};
+
+
+/*---------------------------------------------------------------------------
+ Forward declarations
+ ---------------------------------------------------------------------------*/
+static void data_out_phase0(struct AdapterCtlBlk *acb, struct ScsiReqBlk *srb,
+ u16 *pscsi_status);
+static void data_in_phase0(struct AdapterCtlBlk *acb, struct ScsiReqBlk *srb,
+ u16 *pscsi_status);
+static void command_phase0(struct AdapterCtlBlk *acb, struct ScsiReqBlk *srb,
+ u16 *pscsi_status);
+static void status_phase0(struct AdapterCtlBlk *acb, struct ScsiReqBlk *srb,
+ u16 *pscsi_status);
+static void msgout_phase0(struct AdapterCtlBlk *acb, struct ScsiReqBlk *srb,
+ u16 *pscsi_status);
+static void msgin_phase0(struct AdapterCtlBlk *acb, struct ScsiReqBlk *srb,
+ u16 *pscsi_status);
+static void data_out_phase1(struct AdapterCtlBlk *acb, struct ScsiReqBlk *srb,
+ u16 *pscsi_status);
+static void data_in_phase1(struct AdapterCtlBlk *acb, struct ScsiReqBlk *srb,
+ u16 *pscsi_status);
+static void command_phase1(struct AdapterCtlBlk *acb, struct ScsiReqBlk *srb,
+ u16 *pscsi_status);
+static void status_phase1(struct AdapterCtlBlk *acb, struct ScsiReqBlk *srb,
+ u16 *pscsi_status);
+static void msgout_phase1(struct AdapterCtlBlk *acb, struct ScsiReqBlk *srb,
+ u16 *pscsi_status);
+static void msgin_phase1(struct AdapterCtlBlk *acb, struct ScsiReqBlk *srb,
+ u16 *pscsi_status);
+static void nop0(struct AdapterCtlBlk *acb, struct ScsiReqBlk *srb,
+ u16 *pscsi_status);
+static void nop1(struct AdapterCtlBlk *acb, struct ScsiReqBlk *srb,
+ u16 *pscsi_status);
+static void set_basic_config(struct AdapterCtlBlk *acb);
+static void cleanup_after_transfer(struct AdapterCtlBlk *acb,
+ struct ScsiReqBlk *srb);
+static void reset_scsi_bus(struct AdapterCtlBlk *acb);
+static void data_io_transfer(struct AdapterCtlBlk *acb,
+ struct ScsiReqBlk *srb, u16 io_dir);
+static void disconnect(struct AdapterCtlBlk *acb);
+static void reselect(struct AdapterCtlBlk *acb);
+static u8 start_scsi(struct AdapterCtlBlk *acb, struct DeviceCtlBlk *dcb,
+ struct ScsiReqBlk *srb);
+static inline void enable_msgout_abort(struct AdapterCtlBlk *acb,
+ struct ScsiReqBlk *srb);
+static void build_srb(struct scsi_cmnd *cmd, struct DeviceCtlBlk *dcb,
+ struct ScsiReqBlk *srb);
+static void doing_srb_done(struct AdapterCtlBlk *acb, u8 did_code,
+ struct scsi_cmnd *cmd, u8 force);
+static void scsi_reset_detect(struct AdapterCtlBlk *acb);
+static void pci_unmap_srb(struct AdapterCtlBlk *acb, struct ScsiReqBlk *srb);
+static void pci_unmap_srb_sense(struct AdapterCtlBlk *acb,
+ struct ScsiReqBlk *srb);
+static void srb_done(struct AdapterCtlBlk *acb, struct DeviceCtlBlk *dcb,
+ struct ScsiReqBlk *srb);
+static void request_sense(struct AdapterCtlBlk *acb, struct DeviceCtlBlk *dcb,
+ struct ScsiReqBlk *srb);
+static void set_xfer_rate(struct AdapterCtlBlk *acb,
+ struct DeviceCtlBlk *dcb);
+static void waiting_timeout(unsigned long ptr);
+
+
+/*---------------------------------------------------------------------------
+ Static Data
+ ---------------------------------------------------------------------------*/
+static u16 current_sync_offset = 0;
+
+static void *dc395x_scsi_phase0[] = {
+ data_out_phase0,/* phase:0 */
+ data_in_phase0, /* phase:1 */
+ command_phase0, /* phase:2 */
+ status_phase0, /* phase:3 */
+ nop0, /* phase:4 PH_BUS_FREE .. initial phase */
+ nop0, /* phase:5 PH_BUS_FREE .. initial phase */
+ msgout_phase0, /* phase:6 */
+ msgin_phase0, /* phase:7 */
+};
+
+static void *dc395x_scsi_phase1[] = {
+ data_out_phase1,/* phase:0 */
+ data_in_phase1, /* phase:1 */
+ command_phase1, /* phase:2 */
+ status_phase1, /* phase:3 */
+ nop1, /* phase:4 PH_BUS_FREE .. initial phase */
+ nop1, /* phase:5 PH_BUS_FREE .. initial phase */
+ msgout_phase1, /* phase:6 */
+ msgin_phase1, /* phase:7 */
+};
+
+/*
+ *Fast20: 000 50ns, 20.0 MHz
+ * 001 75ns, 13.3 MHz
+ * 010 100ns, 10.0 MHz
+ * 011 125ns, 8.0 MHz
+ * 100 150ns, 6.6 MHz
+ * 101 175ns, 5.7 MHz
+ * 110 200ns, 5.0 MHz
+ * 111 250ns, 4.0 MHz
+ *
+ *Fast40(LVDS): 000 25ns, 40.0 MHz
+ * 001 50ns, 20.0 MHz
+ * 010 75ns, 13.3 MHz
+ * 011 100ns, 10.0 MHz
+ * 100 125ns, 8.0 MHz
+ * 101 150ns, 6.6 MHz
+ * 110 175ns, 5.7 MHz
+ * 111 200ns, 5.0 MHz
+ */
+/*static u8 clock_period[] = {12,19,25,31,37,44,50,62};*/
+
+/* real period:48ns,76ns,100ns,124ns,148ns,176ns,200ns,248ns */
+static u8 clock_period[] = { 12, 18, 25, 31, 37, 43, 50, 62 };
+static u16 clock_speed[] = { 200, 133, 100, 80, 67, 58, 50, 40 };
+
+
+/*---------------------------------------------------------------------------
+ Configuration
+ ---------------------------------------------------------------------------*/
+/*
+ * Module/boot parameters currently effect *all* instances of the
+ * card in the system.
+ */
+
+/*
+ * Command line parameters are stored in a structure below.
+ * These are the index's into the structure for the various
+ * command line options.
+ */
+#define CFG_ADAPTER_ID 0
+#define CFG_MAX_SPEED 1
+#define CFG_DEV_MODE 2
+#define CFG_ADAPTER_MODE 3
+#define CFG_TAGS 4
+#define CFG_RESET_DELAY 5
+
+#define CFG_NUM 6 /* number of configuration items */
+
+
+/*
+ * Value used to indicate that a command line override
+ * hasn't been used to modify the value.
+ */
+#define CFG_PARAM_UNSET -1
+
+
+/*
+ * Hold command line parameters.
+ */
+struct ParameterData {
+ int value; /* value of this setting */
+ int min; /* minimum value */
+ int max; /* maximum value */
+ int def; /* default value */
+ int safe; /* safe value */
+};
+static struct ParameterData cfg_data[] = {
+ { /* adapter id */
+ CFG_PARAM_UNSET,
+ 0,
+ 15,
+ 7,
+ 7
+ },
+ { /* max speed */
+ CFG_PARAM_UNSET,
+ 0,
+ 7,
+ 1, /* 13.3Mhz */
+ 4, /* 6.7Hmz */
+ },
+ { /* dev mode */
+ CFG_PARAM_UNSET,
+ 0,
+ 0x3f,
+ NTC_DO_PARITY_CHK | NTC_DO_DISCONNECT | NTC_DO_SYNC_NEGO |
+ NTC_DO_WIDE_NEGO | NTC_DO_TAG_QUEUEING |
+ NTC_DO_SEND_START,
+ NTC_DO_PARITY_CHK | NTC_DO_SEND_START
+ },
+ { /* adapter mode */
+ CFG_PARAM_UNSET,
+ 0,
+ 0x2f,
+ NAC_SCANLUN |
+ NAC_GT2DRIVES | NAC_GREATER_1G | NAC_POWERON_SCSI_RESET
+ /*| NAC_ACTIVE_NEG*/,
+ NAC_GT2DRIVES | NAC_GREATER_1G | NAC_POWERON_SCSI_RESET | 0x08
+ },
+ { /* tags */
+ CFG_PARAM_UNSET,
+ 0,
+ 5,
+ 3, /* 16 tags (??) */
+ 2,
+ },
+ { /* reset delay */
+ CFG_PARAM_UNSET,
+ 0,
+ 180,
+ 1, /* 1 second */
+ 10, /* 10 seconds */
+ }
+};
+
+
+/*
+ * Safe settings. If set to zero the BIOS/default values with
+ * command line overrides will be used. If set to 1 then safe and
+ * slow settings will be used.
+ */
+static bool use_safe_settings = 0;
+module_param_named(safe, use_safe_settings, bool, 0);
+MODULE_PARM_DESC(safe, "Use safe and slow settings only. Default: false");
+
+
+module_param_named(adapter_id, cfg_data[CFG_ADAPTER_ID].value, int, 0);
+MODULE_PARM_DESC(adapter_id, "Adapter SCSI ID. Default 7 (0-15)");
+
+module_param_named(max_speed, cfg_data[CFG_MAX_SPEED].value, int, 0);
+MODULE_PARM_DESC(max_speed, "Maximum bus speed. Default 1 (0-7) Speeds: 0=20, 1=13.3, 2=10, 3=8, 4=6.7, 5=5.8, 6=5, 7=4 Mhz");
+
+module_param_named(dev_mode, cfg_data[CFG_DEV_MODE].value, int, 0);
+MODULE_PARM_DESC(dev_mode, "Device mode.");
+
+module_param_named(adapter_mode, cfg_data[CFG_ADAPTER_MODE].value, int, 0);
+MODULE_PARM_DESC(adapter_mode, "Adapter mode.");
+
+module_param_named(tags, cfg_data[CFG_TAGS].value, int, 0);
+MODULE_PARM_DESC(tags, "Number of tags (1<<x). Default 3 (0-5)");
+
+module_param_named(reset_delay, cfg_data[CFG_RESET_DELAY].value, int, 0);
+MODULE_PARM_DESC(reset_delay, "Reset delay in seconds. Default 1 (0-180)");
+
+
+/**
+ * set_safe_settings - if the use_safe_settings option is set then
+ * set all values to the safe and slow values.
+ **/
+static void set_safe_settings(void)
+{
+ if (use_safe_settings)
+ {
+ int i;
+
+ dprintkl(KERN_INFO, "Using safe settings.\n");
+ for (i = 0; i < CFG_NUM; i++)
+ {
+ cfg_data[i].value = cfg_data[i].safe;
+ }
+ }
+}
+
+
+/**
+ * fix_settings - reset any boot parameters which are out of range
+ * back to the default values.
+ **/
+static void fix_settings(void)
+{
+ int i;
+
+ dprintkdbg(DBG_1,
+ "setup: AdapterId=%08x MaxSpeed=%08x DevMode=%08x "
+ "AdapterMode=%08x Tags=%08x ResetDelay=%08x\n",
+ cfg_data[CFG_ADAPTER_ID].value,
+ cfg_data[CFG_MAX_SPEED].value,
+ cfg_data[CFG_DEV_MODE].value,
+ cfg_data[CFG_ADAPTER_MODE].value,
+ cfg_data[CFG_TAGS].value,
+ cfg_data[CFG_RESET_DELAY].value);
+ for (i = 0; i < CFG_NUM; i++)
+ {
+ if (cfg_data[i].value < cfg_data[i].min
+ || cfg_data[i].value > cfg_data[i].max)
+ cfg_data[i].value = cfg_data[i].def;
+ }
+}
+
+
+
+/*
+ * Mapping from the eeprom delay index value (index into this array)
+ * to the number of actual seconds that the delay should be for.
+ */
+static char eeprom_index_to_delay_map[] =
+ { 1, 3, 5, 10, 16, 30, 60, 120 };
+
+
+/**
+ * eeprom_index_to_delay - Take the eeprom delay setting and convert it
+ * into a number of seconds.
+ *
+ * @eeprom: The eeprom structure in which we find the delay index to map.
+ **/
+static void eeprom_index_to_delay(struct NvRamType *eeprom)
+{
+ eeprom->delay_time = eeprom_index_to_delay_map[eeprom->delay_time];
+}
+
+
+/**
+ * delay_to_eeprom_index - Take a delay in seconds and return the
+ * closest eeprom index which will delay for at least that amount of
+ * seconds.
+ *
+ * @delay: The delay, in seconds, to find the eeprom index for.
+ **/
+static int delay_to_eeprom_index(int delay)
+{
+ u8 idx = 0;
+ while (idx < 7 && eeprom_index_to_delay_map[idx] < delay)
+ idx++;
+ return idx;
+}
+
+
+/**
+ * eeprom_override - Override the eeprom settings, in the provided
+ * eeprom structure, with values that have been set on the command
+ * line.
+ *
+ * @eeprom: The eeprom data to override with command line options.
+ **/
+static void eeprom_override(struct NvRamType *eeprom)
+{
+ u8 id;
+
+ /* Adapter Settings */
+ if (cfg_data[CFG_ADAPTER_ID].value != CFG_PARAM_UNSET)
+ eeprom->scsi_id = (u8)cfg_data[CFG_ADAPTER_ID].value;
+
+ if (cfg_data[CFG_ADAPTER_MODE].value != CFG_PARAM_UNSET)
+ eeprom->channel_cfg = (u8)cfg_data[CFG_ADAPTER_MODE].value;
+
+ if (cfg_data[CFG_RESET_DELAY].value != CFG_PARAM_UNSET)
+ eeprom->delay_time = delay_to_eeprom_index(
+ cfg_data[CFG_RESET_DELAY].value);
+
+ if (cfg_data[CFG_TAGS].value != CFG_PARAM_UNSET)
+ eeprom->max_tag = (u8)cfg_data[CFG_TAGS].value;
+
+ /* Device Settings */
+ for (id = 0; id < DC395x_MAX_SCSI_ID; id++) {
+ if (cfg_data[CFG_DEV_MODE].value != CFG_PARAM_UNSET)
+ eeprom->target[id].cfg0 =
+ (u8)cfg_data[CFG_DEV_MODE].value;
+
+ if (cfg_data[CFG_MAX_SPEED].value != CFG_PARAM_UNSET)
+ eeprom->target[id].period =
+ (u8)cfg_data[CFG_MAX_SPEED].value;
+
+ }
+}
+
+
+/*---------------------------------------------------------------------------
+ ---------------------------------------------------------------------------*/
+
+static unsigned int list_size(struct list_head *head)
+{
+ unsigned int count = 0;
+ struct list_head *pos;
+ list_for_each(pos, head)
+ count++;
+ return count;
+}
+
+
+static struct DeviceCtlBlk *dcb_get_next(struct list_head *head,
+ struct DeviceCtlBlk *pos)
+{
+ int use_next = 0;
+ struct DeviceCtlBlk* next = NULL;
+ struct DeviceCtlBlk* i;
+
+ if (list_empty(head))
+ return NULL;
+
+ /* find supplied dcb and then select the next one */
+ list_for_each_entry(i, head, list)
+ if (use_next) {
+ next = i;
+ break;
+ } else if (i == pos) {
+ use_next = 1;
+ }
+ /* if no next one take the head one (ie, wraparound) */
+ if (!next)
+ list_for_each_entry(i, head, list) {
+ next = i;
+ break;
+ }
+
+ return next;
+}
+
+
+static void free_tag(struct DeviceCtlBlk *dcb, struct ScsiReqBlk *srb)
+{
+ if (srb->tag_number < 255) {
+ dcb->tag_mask &= ~(1 << srb->tag_number); /* free tag mask */
+ srb->tag_number = 255;
+ }
+}
+
+
+/* Find cmd in SRB list */
+static inline struct ScsiReqBlk *find_cmd(struct scsi_cmnd *cmd,
+ struct list_head *head)
+{
+ struct ScsiReqBlk *i;
+ list_for_each_entry(i, head, list)
+ if (i->cmd == cmd)
+ return i;
+ return NULL;
+}
+
+
+static struct ScsiReqBlk *srb_get_free(struct AdapterCtlBlk *acb)
+{
+ struct list_head *head = &acb->srb_free_list;
+ struct ScsiReqBlk *srb = NULL;
+
+ if (!list_empty(head)) {
+ srb = list_entry(head->next, struct ScsiReqBlk, list);
+ list_del(head->next);
+ dprintkdbg(DBG_0, "srb_get_free: srb=%p\n", srb);
+ }
+ return srb;
+}
+
+
+static void srb_free_insert(struct AdapterCtlBlk *acb, struct ScsiReqBlk *srb)
+{
+ dprintkdbg(DBG_0, "srb_free_insert: srb=%p\n", srb);
+ list_add_tail(&srb->list, &acb->srb_free_list);
+}
+
+
+static void srb_waiting_insert(struct DeviceCtlBlk *dcb,
+ struct ScsiReqBlk *srb)
+{
+ dprintkdbg(DBG_0, "srb_waiting_insert: (0x%p) <%02i-%i> srb=%p\n",
+ srb->cmd, dcb->target_id, dcb->target_lun, srb);
+ list_add(&srb->list, &dcb->srb_waiting_list);
+}
+
+
+static void srb_waiting_append(struct DeviceCtlBlk *dcb,
+ struct ScsiReqBlk *srb)
+{
+ dprintkdbg(DBG_0, "srb_waiting_append: (0x%p) <%02i-%i> srb=%p\n",
+ srb->cmd, dcb->target_id, dcb->target_lun, srb);
+ list_add_tail(&srb->list, &dcb->srb_waiting_list);
+}
+
+
+static void srb_going_append(struct DeviceCtlBlk *dcb, struct ScsiReqBlk *srb)
+{
+ dprintkdbg(DBG_0, "srb_going_append: (0x%p) <%02i-%i> srb=%p\n",
+ srb->cmd, dcb->target_id, dcb->target_lun, srb);
+ list_add_tail(&srb->list, &dcb->srb_going_list);
+}
+
+
+static void srb_going_remove(struct DeviceCtlBlk *dcb, struct ScsiReqBlk *srb)
+{
+ struct ScsiReqBlk *i;
+ struct ScsiReqBlk *tmp;
+ dprintkdbg(DBG_0, "srb_going_remove: (0x%p) <%02i-%i> srb=%p\n",
+ srb->cmd, dcb->target_id, dcb->target_lun, srb);
+
+ list_for_each_entry_safe(i, tmp, &dcb->srb_going_list, list)
+ if (i == srb) {
+ list_del(&srb->list);
+ break;
+ }
+}
+
+
+static void srb_waiting_remove(struct DeviceCtlBlk *dcb,
+ struct ScsiReqBlk *srb)
+{
+ struct ScsiReqBlk *i;
+ struct ScsiReqBlk *tmp;
+ dprintkdbg(DBG_0, "srb_waiting_remove: (0x%p) <%02i-%i> srb=%p\n",
+ srb->cmd, dcb->target_id, dcb->target_lun, srb);
+
+ list_for_each_entry_safe(i, tmp, &dcb->srb_waiting_list, list)
+ if (i == srb) {
+ list_del(&srb->list);
+ break;
+ }
+}
+
+
+static void srb_going_to_waiting_move(struct DeviceCtlBlk *dcb,
+ struct ScsiReqBlk *srb)
+{
+ dprintkdbg(DBG_0,
+ "srb_going_to_waiting_move: (0x%p) <%02i-%i> srb=%p\n",
+ srb->cmd, dcb->target_id, dcb->target_lun, srb);
+ list_move(&srb->list, &dcb->srb_waiting_list);
+}
+
+
+static void srb_waiting_to_going_move(struct DeviceCtlBlk *dcb,
+ struct ScsiReqBlk *srb)
+{
+ dprintkdbg(DBG_0,
+ "srb_waiting_to_going_move: (0x%p) <%02i-%i> srb=%p\n",
+ srb->cmd, dcb->target_id, dcb->target_lun, srb);
+ list_move(&srb->list, &dcb->srb_going_list);
+}
+
+
+/* Sets the timer to wake us up */
+static void waiting_set_timer(struct AdapterCtlBlk *acb, unsigned long to)
+{
+ if (timer_pending(&acb->waiting_timer))
+ return;
+ init_timer(&acb->waiting_timer);
+ acb->waiting_timer.function = waiting_timeout;
+ acb->waiting_timer.data = (unsigned long) acb;
+ if (time_before(jiffies + to, acb->last_reset - HZ / 2))
+ acb->waiting_timer.expires =
+ acb->last_reset - HZ / 2 + 1;
+ else
+ acb->waiting_timer.expires = jiffies + to + 1;
+ add_timer(&acb->waiting_timer);
+}
+
+
+/* Send the next command from the waiting list to the bus */
+static void waiting_process_next(struct AdapterCtlBlk *acb)
+{
+ struct DeviceCtlBlk *start = NULL;
+ struct DeviceCtlBlk *pos;
+ struct DeviceCtlBlk *dcb;
+ struct ScsiReqBlk *srb;
+ struct list_head *dcb_list_head = &acb->dcb_list;
+
+ if (acb->active_dcb
+ || (acb->acb_flag & (RESET_DETECT + RESET_DONE + RESET_DEV)))
+ return;
+
+ if (timer_pending(&acb->waiting_timer))
+ del_timer(&acb->waiting_timer);
+
+ if (list_empty(dcb_list_head))
+ return;
+
+ /*
+ * Find the starting dcb. Need to find it again in the list
+ * since the list may have changed since we set the ptr to it
+ */
+ list_for_each_entry(dcb, dcb_list_head, list)
+ if (dcb == acb->dcb_run_robin) {
+ start = dcb;
+ break;
+ }
+ if (!start) {
+ /* This can happen! */
+ start = list_entry(dcb_list_head->next, typeof(*start), list);
+ acb->dcb_run_robin = start;
+ }
+
+
+ /*
+ * Loop over the dcb, but we start somewhere (potentially) in
+ * the middle of the loop so we need to manully do this.
+ */
+ pos = start;
+ do {
+ struct list_head *waiting_list_head = &pos->srb_waiting_list;
+
+ /* Make sure, the next another device gets scheduled ... */
+ acb->dcb_run_robin = dcb_get_next(dcb_list_head,
+ acb->dcb_run_robin);
+
+ if (list_empty(waiting_list_head) ||
+ pos->max_command <= list_size(&pos->srb_going_list)) {
+ /* move to next dcb */
+ pos = dcb_get_next(dcb_list_head, pos);
+ } else {
+ srb = list_entry(waiting_list_head->next,
+ struct ScsiReqBlk, list);
+
+ /* Try to send to the bus */
+ if (!start_scsi(acb, pos, srb))
+ srb_waiting_to_going_move(pos, srb);
+ else
+ waiting_set_timer(acb, HZ/50);
+ break;
+ }
+ } while (pos != start);
+}
+
+
+/* Wake up waiting queue */
+static void waiting_timeout(unsigned long ptr)
+{
+ unsigned long flags;
+ struct AdapterCtlBlk *acb = (struct AdapterCtlBlk *)ptr;
+ dprintkdbg(DBG_1,
+ "waiting_timeout: Queue woken up by timer. acb=%p\n", acb);
+ DC395x_LOCK_IO(acb->scsi_host, flags);
+ waiting_process_next(acb);
+ DC395x_UNLOCK_IO(acb->scsi_host, flags);
+}
+
+
+/* Get the DCB for a given ID/LUN combination */
+static struct DeviceCtlBlk *find_dcb(struct AdapterCtlBlk *acb, u8 id, u8 lun)
+{
+ return acb->children[id][lun];
+}
+
+
+/* Send SCSI Request Block (srb) to adapter (acb) */
+static void send_srb(struct AdapterCtlBlk *acb, struct ScsiReqBlk *srb)
+{
+ struct DeviceCtlBlk *dcb = srb->dcb;
+
+ if (dcb->max_command <= list_size(&dcb->srb_going_list) ||
+ acb->active_dcb ||
+ (acb->acb_flag & (RESET_DETECT + RESET_DONE + RESET_DEV))) {
+ srb_waiting_append(dcb, srb);
+ waiting_process_next(acb);
+ return;
+ }
+
+ if (!start_scsi(acb, dcb, srb))
+ srb_going_append(dcb, srb);
+ else {
+ srb_waiting_insert(dcb, srb);
+ waiting_set_timer(acb, HZ / 50);
+ }
+}
+
+/* Prepare SRB for being sent to Device DCB w/ command *cmd */
+static void build_srb(struct scsi_cmnd *cmd, struct DeviceCtlBlk *dcb,
+ struct ScsiReqBlk *srb)
+{
+ int nseg;
+ enum dma_data_direction dir = cmd->sc_data_direction;
+ dprintkdbg(DBG_0, "build_srb: (0x%p) <%02i-%i>\n",
+ cmd, dcb->target_id, dcb->target_lun);
+
+ srb->dcb = dcb;
+ srb->cmd = cmd;
+ srb->sg_count = 0;
+ srb->total_xfer_length = 0;
+ srb->sg_bus_addr = 0;
+ srb->sg_index = 0;
+ srb->adapter_status = 0;
+ srb->target_status = 0;
+ srb->msg_count = 0;
+ srb->status = 0;
+ srb->flag = 0;
+ srb->state = 0;
+ srb->retry_count = 0;
+ srb->tag_number = TAG_NONE;
+ srb->scsi_phase = PH_BUS_FREE; /* initial phase */
+ srb->end_message = 0;
+
+ nseg = scsi_dma_map(cmd);
+ BUG_ON(nseg < 0);
+
+ if (dir == PCI_DMA_NONE || !nseg) {
+ dprintkdbg(DBG_0,
+ "build_srb: [0] len=%d buf=%p use_sg=%d !MAP=%08x\n",
+ cmd->bufflen, scsi_sglist(cmd), scsi_sg_count(cmd),
+ srb->segment_x[0].address);
+ } else {
+ int i;
+ u32 reqlen = scsi_bufflen(cmd);
+ struct scatterlist *sg;
+ struct SGentry *sgp = srb->segment_x;
+
+ srb->sg_count = nseg;
+
+ dprintkdbg(DBG_0,
+ "build_srb: [n] len=%d buf=%p use_sg=%d segs=%d\n",
+ reqlen, scsi_sglist(cmd), scsi_sg_count(cmd),
+ srb->sg_count);
+
+ scsi_for_each_sg(cmd, sg, srb->sg_count, i) {
+ u32 busaddr = (u32)sg_dma_address(sg);
+ u32 seglen = (u32)sg->length;
+ sgp[i].address = busaddr;
+ sgp[i].length = seglen;
+ srb->total_xfer_length += seglen;
+ }
+ sgp += srb->sg_count - 1;
+
+ /*
+ * adjust last page if too big as it is allocated
+ * on even page boundaries
+ */
+ if (srb->total_xfer_length > reqlen) {
+ sgp->length -= (srb->total_xfer_length - reqlen);
+ srb->total_xfer_length = reqlen;
+ }
+
+ /* Fixup for WIDE padding - make sure length is even */
+ if (dcb->sync_period & WIDE_SYNC &&
+ srb->total_xfer_length % 2) {
+ srb->total_xfer_length++;
+ sgp->length++;
+ }
+
+ srb->sg_bus_addr = pci_map_single(dcb->acb->dev,
+ srb->segment_x,
+ SEGMENTX_LEN,
+ PCI_DMA_TODEVICE);
+
+ dprintkdbg(DBG_SG, "build_srb: [n] map sg %p->%08x(%05x)\n",
+ srb->segment_x, srb->sg_bus_addr, SEGMENTX_LEN);
+ }
+
+ srb->request_length = srb->total_xfer_length;
+}
+
+
+/**
+ * dc395x_queue_command - queue scsi command passed from the mid
+ * layer, invoke 'done' on completion
+ *
+ * @cmd: pointer to scsi command object
+ * @done: function pointer to be invoked on completion
+ *
+ * Returns 1 if the adapter (host) is busy, else returns 0. One
+ * reason for an adapter to be busy is that the number
+ * of outstanding queued commands is already equal to
+ * struct Scsi_Host::can_queue .
+ *
+ * Required: if struct Scsi_Host::can_queue is ever non-zero
+ * then this function is required.
+ *
+ * Locks: struct Scsi_Host::host_lock held on entry (with "irqsave")
+ * and is expected to be held on return.
+ *
+ **/
+static int dc395x_queue_command_lck(struct scsi_cmnd *cmd, void (*done)(struct scsi_cmnd *))
+{
+ struct DeviceCtlBlk *dcb;
+ struct ScsiReqBlk *srb;
+ struct AdapterCtlBlk *acb =
+ (struct AdapterCtlBlk *)cmd->device->host->hostdata;
+ dprintkdbg(DBG_0, "queue_command: (0x%p) <%02i-%i> cmnd=0x%02x\n",
+ cmd, cmd->device->id, (u8)cmd->device->lun, cmd->cmnd[0]);
+
+ /* Assume BAD_TARGET; will be cleared later */
+ cmd->result = DID_BAD_TARGET << 16;
+
+ /* ignore invalid targets */
+ if (cmd->device->id >= acb->scsi_host->max_id ||
+ cmd->device->lun >= acb->scsi_host->max_lun ||
+ cmd->device->lun >31) {
+ goto complete;
+ }
+
+ /* does the specified lun on the specified device exist */
+ if (!(acb->dcb_map[cmd->device->id] & (1 << cmd->device->lun))) {
+ dprintkl(KERN_INFO, "queue_command: Ignore target <%02i-%i>\n",
+ cmd->device->id, (u8)cmd->device->lun);
+ goto complete;
+ }
+
+ /* do we have a DCB for the device */
+ dcb = find_dcb(acb, cmd->device->id, cmd->device->lun);
+ if (!dcb) {
+ /* should never happen */
+ dprintkl(KERN_ERR, "queue_command: No such device <%02i-%i>",
+ cmd->device->id, (u8)cmd->device->lun);
+ goto complete;
+ }
+
+ /* set callback and clear result in the command */
+ cmd->scsi_done = done;
+ cmd->result = 0;
+
+ srb = srb_get_free(acb);
+ if (!srb)
+ {
+ /*
+ * Return 1 since we are unable to queue this command at this
+ * point in time.
+ */
+ dprintkdbg(DBG_0, "queue_command: No free srb's\n");
+ return 1;
+ }
+
+ build_srb(cmd, dcb, srb);
+
+ if (!list_empty(&dcb->srb_waiting_list)) {
+ /* append to waiting queue */
+ srb_waiting_append(dcb, srb);
+ waiting_process_next(acb);
+ } else {
+ /* process immediately */
+ send_srb(acb, srb);
+ }
+ dprintkdbg(DBG_1, "queue_command: (0x%p) done\n", cmd);
+ return 0;
+
+complete:
+ /*
+ * Complete the command immediatey, and then return 0 to
+ * indicate that we have handled the command. This is usually
+ * done when the commad is for things like non existent
+ * devices.
+ */
+ done(cmd);
+ return 0;
+}
+
+static DEF_SCSI_QCMD(dc395x_queue_command)
+
+/*
+ * Return the disk geometry for the given SCSI device.
+ */
+static int dc395x_bios_param(struct scsi_device *sdev,
+ struct block_device *bdev, sector_t capacity, int *info)
+{
+#ifdef CONFIG_SCSI_DC395x_TRMS1040_TRADMAP
+ int heads, sectors, cylinders;
+ struct AdapterCtlBlk *acb;
+ int size = capacity;
+
+ dprintkdbg(DBG_0, "dc395x_bios_param..............\n");
+ acb = (struct AdapterCtlBlk *)sdev->host->hostdata;
+ heads = 64;
+ sectors = 32;
+ cylinders = size / (heads * sectors);
+
+ if ((acb->gmode2 & NAC_GREATER_1G) && (cylinders > 1024)) {
+ heads = 255;
+ sectors = 63;
+ cylinders = size / (heads * sectors);
+ }
+ geom[0] = heads;
+ geom[1] = sectors;
+ geom[2] = cylinders;
+ return 0;
+#else
+ return scsicam_bios_param(bdev, capacity, info);
+#endif
+}
+
+
+static void dump_register_info(struct AdapterCtlBlk *acb,
+ struct DeviceCtlBlk *dcb, struct ScsiReqBlk *srb)
+{
+ u16 pstat;
+ struct pci_dev *dev = acb->dev;
+ pci_read_config_word(dev, PCI_STATUS, &pstat);
+ if (!dcb)
+ dcb = acb->active_dcb;
+ if (!srb && dcb)
+ srb = dcb->active_srb;
+ if (srb) {
+ if (!srb->cmd)
+ dprintkl(KERN_INFO, "dump: srb=%p cmd=%p OOOPS!\n",
+ srb, srb->cmd);
+ else
+ dprintkl(KERN_INFO, "dump: srb=%p cmd=%p "
+ "cmnd=0x%02x <%02i-%i>\n",
+ srb, srb->cmd,
+ srb->cmd->cmnd[0], srb->cmd->device->id,
+ (u8)srb->cmd->device->lun);
+ printk(" sglist=%p cnt=%i idx=%i len=%zu\n",
+ srb->segment_x, srb->sg_count, srb->sg_index,
+ srb->total_xfer_length);
+ printk(" state=0x%04x status=0x%02x phase=0x%02x (%sconn.)\n",
+ srb->state, srb->status, srb->scsi_phase,
+ (acb->active_dcb) ? "" : "not");
+ }
+ dprintkl(KERN_INFO, "dump: SCSI{status=0x%04x fifocnt=0x%02x "
+ "signals=0x%02x irqstat=0x%02x sync=0x%02x target=0x%02x "
+ "rselid=0x%02x ctr=0x%08x irqen=0x%02x config=0x%04x "
+ "config2=0x%02x cmd=0x%02x selto=0x%02x}\n",
+ DC395x_read16(acb, TRM_S1040_SCSI_STATUS),
+ DC395x_read8(acb, TRM_S1040_SCSI_FIFOCNT),
+ DC395x_read8(acb, TRM_S1040_SCSI_SIGNAL),
+ DC395x_read8(acb, TRM_S1040_SCSI_INTSTATUS),
+ DC395x_read8(acb, TRM_S1040_SCSI_SYNC),
+ DC395x_read8(acb, TRM_S1040_SCSI_TARGETID),
+ DC395x_read8(acb, TRM_S1040_SCSI_IDMSG),
+ DC395x_read32(acb, TRM_S1040_SCSI_COUNTER),
+ DC395x_read8(acb, TRM_S1040_SCSI_INTEN),
+ DC395x_read16(acb, TRM_S1040_SCSI_CONFIG0),
+ DC395x_read8(acb, TRM_S1040_SCSI_CONFIG2),
+ DC395x_read8(acb, TRM_S1040_SCSI_COMMAND),
+ DC395x_read8(acb, TRM_S1040_SCSI_TIMEOUT));
+ dprintkl(KERN_INFO, "dump: DMA{cmd=0x%04x fifocnt=0x%02x fstat=0x%02x "
+ "irqstat=0x%02x irqen=0x%02x cfg=0x%04x tctr=0x%08x "
+ "ctctr=0x%08x addr=0x%08x:0x%08x}\n",
+ DC395x_read16(acb, TRM_S1040_DMA_COMMAND),
+ DC395x_read8(acb, TRM_S1040_DMA_FIFOCNT),
+ DC395x_read8(acb, TRM_S1040_DMA_FIFOSTAT),
+ DC395x_read8(acb, TRM_S1040_DMA_STATUS),
+ DC395x_read8(acb, TRM_S1040_DMA_INTEN),
+ DC395x_read16(acb, TRM_S1040_DMA_CONFIG),
+ DC395x_read32(acb, TRM_S1040_DMA_XCNT),
+ DC395x_read32(acb, TRM_S1040_DMA_CXCNT),
+ DC395x_read32(acb, TRM_S1040_DMA_XHIGHADDR),
+ DC395x_read32(acb, TRM_S1040_DMA_XLOWADDR));
+ dprintkl(KERN_INFO, "dump: gen{gctrl=0x%02x gstat=0x%02x gtmr=0x%02x} "
+ "pci{status=0x%04x}\n",
+ DC395x_read8(acb, TRM_S1040_GEN_CONTROL),
+ DC395x_read8(acb, TRM_S1040_GEN_STATUS),
+ DC395x_read8(acb, TRM_S1040_GEN_TIMER),
+ pstat);
+}
+
+
+static inline void clear_fifo(struct AdapterCtlBlk *acb, char *txt)
+{
+#if debug_enabled(DBG_FIFO)
+ u8 lines = DC395x_read8(acb, TRM_S1040_SCSI_SIGNAL);
+ u8 fifocnt = DC395x_read8(acb, TRM_S1040_SCSI_FIFOCNT);
+ if (!(fifocnt & 0x40))
+ dprintkdbg(DBG_FIFO,
+ "clear_fifo: (%i bytes) on phase %02x in %s\n",
+ fifocnt & 0x3f, lines, txt);
+#endif
+ DC395x_write16(acb, TRM_S1040_SCSI_CONTROL, DO_CLRFIFO);
+}
+
+
+static void reset_dev_param(struct AdapterCtlBlk *acb)
+{
+ struct DeviceCtlBlk *dcb;
+ struct NvRamType *eeprom = &acb->eeprom;
+ dprintkdbg(DBG_0, "reset_dev_param: acb=%p\n", acb);
+
+ list_for_each_entry(dcb, &acb->dcb_list, list) {
+ u8 period_index;
+
+ dcb->sync_mode &= ~(SYNC_NEGO_DONE + WIDE_NEGO_DONE);
+ dcb->sync_period = 0;
+ dcb->sync_offset = 0;
+
+ dcb->dev_mode = eeprom->target[dcb->target_id].cfg0;
+ period_index = eeprom->target[dcb->target_id].period & 0x07;
+ dcb->min_nego_period = clock_period[period_index];
+ if (!(dcb->dev_mode & NTC_DO_WIDE_NEGO)
+ || !(acb->config & HCC_WIDE_CARD))
+ dcb->sync_mode &= ~WIDE_NEGO_ENABLE;
+ }
+}
+
+
+/*
+ * perform a hard reset on the SCSI bus
+ * @cmd - some command for this host (for fetching hooks)
+ * Returns: SUCCESS (0x2002) on success, else FAILED (0x2003).
+ */
+static int __dc395x_eh_bus_reset(struct scsi_cmnd *cmd)
+{
+ struct AdapterCtlBlk *acb =
+ (struct AdapterCtlBlk *)cmd->device->host->hostdata;
+ dprintkl(KERN_INFO,
+ "eh_bus_reset: (0%p) target=<%02i-%i> cmd=%p\n",
+ cmd, cmd->device->id, (u8)cmd->device->lun, cmd);
+
+ if (timer_pending(&acb->waiting_timer))
+ del_timer(&acb->waiting_timer);
+
+ /*
+ * disable interrupt
+ */
+ DC395x_write8(acb, TRM_S1040_DMA_INTEN, 0x00);
+ DC395x_write8(acb, TRM_S1040_SCSI_INTEN, 0x00);
+ DC395x_write8(acb, TRM_S1040_SCSI_CONTROL, DO_RSTMODULE);
+ DC395x_write8(acb, TRM_S1040_DMA_CONTROL, DMARESETMODULE);
+
+ reset_scsi_bus(acb);
+ udelay(500);
+
+ /* We may be in serious trouble. Wait some seconds */
+ acb->last_reset =
+ jiffies + 3 * HZ / 2 +
+ HZ * acb->eeprom.delay_time;
+
+ /*
+ * re-enable interrupt
+ */
+ /* Clear SCSI FIFO */
+ DC395x_write8(acb, TRM_S1040_DMA_CONTROL, CLRXFIFO);
+ clear_fifo(acb, "eh_bus_reset");
+ /* Delete pending IRQ */
+ DC395x_read8(acb, TRM_S1040_SCSI_INTSTATUS);
+ set_basic_config(acb);
+
+ reset_dev_param(acb);
+ doing_srb_done(acb, DID_RESET, cmd, 0);
+ acb->active_dcb = NULL;
+ acb->acb_flag = 0; /* RESET_DETECT, RESET_DONE ,RESET_DEV */
+ waiting_process_next(acb);
+
+ return SUCCESS;
+}
+
+static int dc395x_eh_bus_reset(struct scsi_cmnd *cmd)
+{
+ int rc;
+
+ spin_lock_irq(cmd->device->host->host_lock);
+ rc = __dc395x_eh_bus_reset(cmd);
+ spin_unlock_irq(cmd->device->host->host_lock);
+
+ return rc;
+}
+
+/*
+ * abort an errant SCSI command
+ * @cmd - command to be aborted
+ * Returns: SUCCESS (0x2002) on success, else FAILED (0x2003).
+ */
+static int dc395x_eh_abort(struct scsi_cmnd *cmd)
+{
+ /*
+ * Look into our command queues: If it has not been sent already,
+ * we remove it and return success. Otherwise fail.
+ */
+ struct AdapterCtlBlk *acb =
+ (struct AdapterCtlBlk *)cmd->device->host->hostdata;
+ struct DeviceCtlBlk *dcb;
+ struct ScsiReqBlk *srb;
+ dprintkl(KERN_INFO, "eh_abort: (0x%p) target=<%02i-%i> cmd=%p\n",
+ cmd, cmd->device->id, (u8)cmd->device->lun, cmd);
+
+ dcb = find_dcb(acb, cmd->device->id, cmd->device->lun);
+ if (!dcb) {
+ dprintkl(KERN_DEBUG, "eh_abort: No such device\n");
+ return FAILED;
+ }
+
+ srb = find_cmd(cmd, &dcb->srb_waiting_list);
+ if (srb) {
+ srb_waiting_remove(dcb, srb);
+ pci_unmap_srb_sense(acb, srb);
+ pci_unmap_srb(acb, srb);
+ free_tag(dcb, srb);
+ srb_free_insert(acb, srb);
+ dprintkl(KERN_DEBUG, "eh_abort: Command was waiting\n");
+ cmd->result = DID_ABORT << 16;
+ return SUCCESS;
+ }
+ srb = find_cmd(cmd, &dcb->srb_going_list);
+ if (srb) {
+ dprintkl(KERN_DEBUG, "eh_abort: Command in progress\n");
+ /* XXX: Should abort the command here */
+ } else {
+ dprintkl(KERN_DEBUG, "eh_abort: Command not found\n");
+ }
+ return FAILED;
+}
+
+
+/* SDTR */
+static void build_sdtr(struct AdapterCtlBlk *acb, struct DeviceCtlBlk *dcb,
+ struct ScsiReqBlk *srb)
+{
+ u8 *ptr = srb->msgout_buf + srb->msg_count;
+ if (srb->msg_count > 1) {
+ dprintkl(KERN_INFO,
+ "build_sdtr: msgout_buf BUSY (%i: %02x %02x)\n",
+ srb->msg_count, srb->msgout_buf[0],
+ srb->msgout_buf[1]);
+ return;
+ }
+ if (!(dcb->dev_mode & NTC_DO_SYNC_NEGO)) {
+ dcb->sync_offset = 0;
+ dcb->min_nego_period = 200 >> 2;
+ } else if (dcb->sync_offset == 0)
+ dcb->sync_offset = SYNC_NEGO_OFFSET;
+
+ *ptr++ = MSG_EXTENDED; /* (01h) */
+ *ptr++ = 3; /* length */
+ *ptr++ = EXTENDED_SDTR; /* (01h) */
+ *ptr++ = dcb->min_nego_period; /* Transfer period (in 4ns) */
+ *ptr++ = dcb->sync_offset; /* Transfer period (max. REQ/ACK dist) */
+ srb->msg_count += 5;
+ srb->state |= SRB_DO_SYNC_NEGO;
+}
+
+
+/* WDTR */
+static void build_wdtr(struct AdapterCtlBlk *acb, struct DeviceCtlBlk *dcb,
+ struct ScsiReqBlk *srb)
+{
+ u8 wide = ((dcb->dev_mode & NTC_DO_WIDE_NEGO) &
+ (acb->config & HCC_WIDE_CARD)) ? 1 : 0;
+ u8 *ptr = srb->msgout_buf + srb->msg_count;
+ if (srb->msg_count > 1) {
+ dprintkl(KERN_INFO,
+ "build_wdtr: msgout_buf BUSY (%i: %02x %02x)\n",
+ srb->msg_count, srb->msgout_buf[0],
+ srb->msgout_buf[1]);
+ return;
+ }
+ *ptr++ = MSG_EXTENDED; /* (01h) */
+ *ptr++ = 2; /* length */
+ *ptr++ = EXTENDED_WDTR; /* (03h) */
+ *ptr++ = wide;
+ srb->msg_count += 4;
+ srb->state |= SRB_DO_WIDE_NEGO;
+}
+
+
+#if 0
+/* Timer to work around chip flaw: When selecting and the bus is
+ * busy, we sometimes miss a Selection timeout IRQ */
+void selection_timeout_missed(unsigned long ptr);
+/* Sets the timer to wake us up */
+static void selto_timer(struct AdapterCtlBlk *acb)
+{
+ if (timer_pending(&acb->selto_timer))
+ return;
+ acb->selto_timer.function = selection_timeout_missed;
+ acb->selto_timer.data = (unsigned long) acb;
+ if (time_before
+ (jiffies + HZ, acb->last_reset + HZ / 2))
+ acb->selto_timer.expires =
+ acb->last_reset + HZ / 2 + 1;
+ else
+ acb->selto_timer.expires = jiffies + HZ + 1;
+ add_timer(&acb->selto_timer);
+}
+
+
+void selection_timeout_missed(unsigned long ptr)
+{
+ unsigned long flags;
+ struct AdapterCtlBlk *acb = (struct AdapterCtlBlk *)ptr;
+ struct ScsiReqBlk *srb;
+ dprintkl(KERN_DEBUG, "Chip forgot to produce SelTO IRQ!\n");
+ if (!acb->active_dcb || !acb->active_dcb->active_srb) {
+ dprintkl(KERN_DEBUG, "... but no cmd pending? Oops!\n");
+ return;
+ }
+ DC395x_LOCK_IO(acb->scsi_host, flags);
+ srb = acb->active_dcb->active_srb;
+ disconnect(acb);
+ DC395x_UNLOCK_IO(acb->scsi_host, flags);
+}
+#endif
+
+
+static u8 start_scsi(struct AdapterCtlBlk* acb, struct DeviceCtlBlk* dcb,
+ struct ScsiReqBlk* srb)
+{
+ u16 s_stat2, return_code;
+ u8 s_stat, scsicommand, i, identify_message;
+ u8 *ptr;
+ dprintkdbg(DBG_0, "start_scsi: (0x%p) <%02i-%i> srb=%p\n",
+ dcb->target_id, dcb->target_lun, srb);
+
+ srb->tag_number = TAG_NONE; /* acb->tag_max_num: had error read in eeprom */
+
+ s_stat = DC395x_read8(acb, TRM_S1040_SCSI_SIGNAL);
+ s_stat2 = 0;
+ s_stat2 = DC395x_read16(acb, TRM_S1040_SCSI_STATUS);
+#if 1
+ if (s_stat & 0x20 /* s_stat2 & 0x02000 */ ) {
+ dprintkdbg(DBG_KG, "start_scsi: (0x%p) BUSY %02x %04x\n",
+ s_stat, s_stat2);
+ /*
+ * Try anyway?
+ *
+ * We could, BUT: Sometimes the TRM_S1040 misses to produce a Selection
+ * Timeout, a Disconnect or a Reselection IRQ, so we would be screwed!
+ * (This is likely to be a bug in the hardware. Obviously, most people
+ * only have one initiator per SCSI bus.)
+ * Instead let this fail and have the timer make sure the command is
+ * tried again after a short time
+ */
+ /*selto_timer (acb); */
+ return 1;
+ }
+#endif
+ if (acb->active_dcb) {
+ dprintkl(KERN_DEBUG, "start_scsi: (0x%p) Attempt to start a"
+ "command while another command (0x%p) is active.",
+ srb->cmd,
+ acb->active_dcb->active_srb ?
+ acb->active_dcb->active_srb->cmd : 0);
+ return 1;
+ }
+ if (DC395x_read16(acb, TRM_S1040_SCSI_STATUS) & SCSIINTERRUPT) {
+ dprintkdbg(DBG_KG, "start_scsi: (0x%p) Failed (busy)\n", srb->cmd);
+ return 1;
+ }
+ /* Allow starting of SCSI commands half a second before we allow the mid-level
+ * to queue them again after a reset */
+ if (time_before(jiffies, acb->last_reset - HZ / 2)) {
+ dprintkdbg(DBG_KG, "start_scsi: Refuse cmds (reset wait)\n");
+ return 1;
+ }
+
+ /* Flush FIFO */
+ clear_fifo(acb, "start_scsi");
+ DC395x_write8(acb, TRM_S1040_SCSI_HOSTID, acb->scsi_host->this_id);
+ DC395x_write8(acb, TRM_S1040_SCSI_TARGETID, dcb->target_id);
+ DC395x_write8(acb, TRM_S1040_SCSI_SYNC, dcb->sync_period);
+ DC395x_write8(acb, TRM_S1040_SCSI_OFFSET, dcb->sync_offset);
+ srb->scsi_phase = PH_BUS_FREE; /* initial phase */
+
+ identify_message = dcb->identify_msg;
+ /*DC395x_TRM_write8(TRM_S1040_SCSI_IDMSG, identify_message); */
+ /* Don't allow disconnection for AUTO_REQSENSE: Cont.All.Cond.! */
+ if (srb->flag & AUTO_REQSENSE)
+ identify_message &= 0xBF;
+
+ if (((srb->cmd->cmnd[0] == INQUIRY)
+ || (srb->cmd->cmnd[0] == REQUEST_SENSE)
+ || (srb->flag & AUTO_REQSENSE))
+ && (((dcb->sync_mode & WIDE_NEGO_ENABLE)
+ && !(dcb->sync_mode & WIDE_NEGO_DONE))
+ || ((dcb->sync_mode & SYNC_NEGO_ENABLE)
+ && !(dcb->sync_mode & SYNC_NEGO_DONE)))
+ && (dcb->target_lun == 0)) {
+ srb->msgout_buf[0] = identify_message;
+ srb->msg_count = 1;
+ scsicommand = SCMD_SEL_ATNSTOP;
+ srb->state = SRB_MSGOUT;
+#ifndef SYNC_FIRST
+ if (dcb->sync_mode & WIDE_NEGO_ENABLE
+ && dcb->inquiry7 & SCSI_INQ_WBUS16) {
+ build_wdtr(acb, dcb, srb);
+ goto no_cmd;
+ }
+#endif
+ if (dcb->sync_mode & SYNC_NEGO_ENABLE
+ && dcb->inquiry7 & SCSI_INQ_SYNC) {
+ build_sdtr(acb, dcb, srb);
+ goto no_cmd;
+ }
+ if (dcb->sync_mode & WIDE_NEGO_ENABLE
+ && dcb->inquiry7 & SCSI_INQ_WBUS16) {
+ build_wdtr(acb, dcb, srb);
+ goto no_cmd;
+ }
+ srb->msg_count = 0;
+ }
+ /* Send identify message */
+ DC395x_write8(acb, TRM_S1040_SCSI_FIFO, identify_message);
+
+ scsicommand = SCMD_SEL_ATN;
+ srb->state = SRB_START_;
+#ifndef DC395x_NO_TAGQ
+ if ((dcb->sync_mode & EN_TAG_QUEUEING)
+ && (identify_message & 0xC0)) {
+ /* Send Tag message */
+ u32 tag_mask = 1;
+ u8 tag_number = 0;
+ while (tag_mask & dcb->tag_mask
+ && tag_number < dcb->max_command) {
+ tag_mask = tag_mask << 1;
+ tag_number++;
+ }
+ if (tag_number >= dcb->max_command) {
+ dprintkl(KERN_WARNING, "start_scsi: (0x%p) "
+ "Out of tags target=<%02i-%i>)\n",
+ srb->cmd, srb->cmd->device->id,
+ (u8)srb->cmd->device->lun);
+ srb->state = SRB_READY;
+ DC395x_write16(acb, TRM_S1040_SCSI_CONTROL,
+ DO_HWRESELECT);
+ return 1;
+ }
+ /* Send Tag id */
+ DC395x_write8(acb, TRM_S1040_SCSI_FIFO, MSG_SIMPLE_QTAG);
+ DC395x_write8(acb, TRM_S1040_SCSI_FIFO, tag_number);
+ dcb->tag_mask |= tag_mask;
+ srb->tag_number = tag_number;
+ scsicommand = SCMD_SEL_ATN3;
+ srb->state = SRB_START_;
+ }
+#endif
+/*polling:*/
+ /* Send CDB ..command block ......... */
+ dprintkdbg(DBG_KG, "start_scsi: (0x%p) <%02i-%i> cmnd=0x%02x tag=%i\n",
+ srb->cmd, srb->cmd->device->id, (u8)srb->cmd->device->lun,
+ srb->cmd->cmnd[0], srb->tag_number);
+ if (srb->flag & AUTO_REQSENSE) {
+ DC395x_write8(acb, TRM_S1040_SCSI_FIFO, REQUEST_SENSE);
+ DC395x_write8(acb, TRM_S1040_SCSI_FIFO, (dcb->target_lun << 5));
+ DC395x_write8(acb, TRM_S1040_SCSI_FIFO, 0);
+ DC395x_write8(acb, TRM_S1040_SCSI_FIFO, 0);
+ DC395x_write8(acb, TRM_S1040_SCSI_FIFO, SCSI_SENSE_BUFFERSIZE);
+ DC395x_write8(acb, TRM_S1040_SCSI_FIFO, 0);
+ } else {
+ ptr = (u8 *)srb->cmd->cmnd;
+ for (i = 0; i < srb->cmd->cmd_len; i++)
+ DC395x_write8(acb, TRM_S1040_SCSI_FIFO, *ptr++);
+ }
+ no_cmd:
+ DC395x_write16(acb, TRM_S1040_SCSI_CONTROL,
+ DO_HWRESELECT | DO_DATALATCH);
+ if (DC395x_read16(acb, TRM_S1040_SCSI_STATUS) & SCSIINTERRUPT) {
+ /*
+ * If start_scsi return 1:
+ * we caught an interrupt (must be reset or reselection ... )
+ * : Let's process it first!
+ */
+ dprintkdbg(DBG_0, "start_scsi: (0x%p) <%02i-%i> Failed - busy\n",
+ srb->cmd, dcb->target_id, dcb->target_lun);
+ srb->state = SRB_READY;
+ free_tag(dcb, srb);
+ srb->msg_count = 0;
+ return_code = 1;
+ /* This IRQ should NOT get lost, as we did not acknowledge it */
+ } else {
+ /*
+ * If start_scsi returns 0:
+ * we know that the SCSI processor is free
+ */
+ srb->scsi_phase = PH_BUS_FREE; /* initial phase */
+ dcb->active_srb = srb;
+ acb->active_dcb = dcb;
+ return_code = 0;
+ /* it's important for atn stop */
+ DC395x_write16(acb, TRM_S1040_SCSI_CONTROL,
+ DO_DATALATCH | DO_HWRESELECT);
+ /* SCSI command */
+ DC395x_write8(acb, TRM_S1040_SCSI_COMMAND, scsicommand);
+ }
+ return return_code;
+}
+
+
+#define DC395x_ENABLE_MSGOUT \
+ DC395x_write16 (acb, TRM_S1040_SCSI_CONTROL, DO_SETATN); \
+ srb->state |= SRB_MSGOUT
+
+
+/* abort command */
+static inline void enable_msgout_abort(struct AdapterCtlBlk *acb,
+ struct ScsiReqBlk *srb)
+{
+ srb->msgout_buf[0] = ABORT;
+ srb->msg_count = 1;
+ DC395x_ENABLE_MSGOUT;
+ srb->state &= ~SRB_MSGIN;
+ srb->state |= SRB_MSGOUT;
+}
+
+
+/**
+ * dc395x_handle_interrupt - Handle an interrupt that has been confirmed to
+ * have been triggered for this card.
+ *
+ * @acb: a pointer to the adpter control block
+ * @scsi_status: the status return when we checked the card
+ **/
+static void dc395x_handle_interrupt(struct AdapterCtlBlk *acb,
+ u16 scsi_status)
+{
+ struct DeviceCtlBlk *dcb;
+ struct ScsiReqBlk *srb;
+ u16 phase;
+ u8 scsi_intstatus;
+ unsigned long flags;
+ void (*dc395x_statev)(struct AdapterCtlBlk *, struct ScsiReqBlk *,
+ u16 *);
+
+ DC395x_LOCK_IO(acb->scsi_host, flags);
+
+ /* This acknowledges the IRQ */
+ scsi_intstatus = DC395x_read8(acb, TRM_S1040_SCSI_INTSTATUS);
+ if ((scsi_status & 0x2007) == 0x2002)
+ dprintkl(KERN_DEBUG,
+ "COP after COP completed? %04x\n", scsi_status);
+ if (debug_enabled(DBG_KG)) {
+ if (scsi_intstatus & INT_SELTIMEOUT)
+ dprintkdbg(DBG_KG, "handle_interrupt: Selection timeout\n");
+ }
+ /*dprintkl(KERN_DEBUG, "handle_interrupt: intstatus = 0x%02x ", scsi_intstatus); */
+
+ if (timer_pending(&acb->selto_timer))
+ del_timer(&acb->selto_timer);
+
+ if (scsi_intstatus & (INT_SELTIMEOUT | INT_DISCONNECT)) {
+ disconnect(acb); /* bus free interrupt */
+ goto out_unlock;
+ }
+ if (scsi_intstatus & INT_RESELECTED) {
+ reselect(acb);
+ goto out_unlock;
+ }
+ if (scsi_intstatus & INT_SELECT) {
+ dprintkl(KERN_INFO, "Host does not support target mode!\n");
+ goto out_unlock;
+ }
+ if (scsi_intstatus & INT_SCSIRESET) {
+ scsi_reset_detect(acb);
+ goto out_unlock;
+ }
+ if (scsi_intstatus & (INT_BUSSERVICE | INT_CMDDONE)) {
+ dcb = acb->active_dcb;
+ if (!dcb) {
+ dprintkl(KERN_DEBUG,
+ "Oops: BusService (%04x %02x) w/o ActiveDCB!\n",
+ scsi_status, scsi_intstatus);
+ goto out_unlock;
+ }
+ srb = dcb->active_srb;
+ if (dcb->flag & ABORT_DEV_) {
+ dprintkdbg(DBG_0, "MsgOut Abort Device.....\n");
+ enable_msgout_abort(acb, srb);
+ }
+
+ /* software sequential machine */
+ phase = (u16)srb->scsi_phase;
+
+ /*
+ * 62037 or 62137
+ * call dc395x_scsi_phase0[]... "phase entry"
+ * handle every phase before start transfer
+ */
+ /* data_out_phase0, phase:0 */
+ /* data_in_phase0, phase:1 */
+ /* command_phase0, phase:2 */
+ /* status_phase0, phase:3 */
+ /* nop0, phase:4 PH_BUS_FREE .. initial phase */
+ /* nop0, phase:5 PH_BUS_FREE .. initial phase */
+ /* msgout_phase0, phase:6 */
+ /* msgin_phase0, phase:7 */
+ dc395x_statev = dc395x_scsi_phase0[phase];
+ dc395x_statev(acb, srb, &scsi_status);
+
+ /*
+ * if there were any exception occurred scsi_status
+ * will be modify to bus free phase new scsi_status
+ * transfer out from ... previous dc395x_statev
+ */
+ srb->scsi_phase = scsi_status & PHASEMASK;
+ phase = (u16)scsi_status & PHASEMASK;
+
+ /*
+ * call dc395x_scsi_phase1[]... "phase entry" handle
+ * every phase to do transfer
+ */
+ /* data_out_phase1, phase:0 */
+ /* data_in_phase1, phase:1 */
+ /* command_phase1, phase:2 */
+ /* status_phase1, phase:3 */
+ /* nop1, phase:4 PH_BUS_FREE .. initial phase */
+ /* nop1, phase:5 PH_BUS_FREE .. initial phase */
+ /* msgout_phase1, phase:6 */
+ /* msgin_phase1, phase:7 */
+ dc395x_statev = dc395x_scsi_phase1[phase];
+ dc395x_statev(acb, srb, &scsi_status);
+ }
+ out_unlock:
+ DC395x_UNLOCK_IO(acb->scsi_host, flags);
+}
+
+
+static irqreturn_t dc395x_interrupt(int irq, void *dev_id)
+{
+ struct AdapterCtlBlk *acb = dev_id;
+ u16 scsi_status;
+ u8 dma_status;
+ irqreturn_t handled = IRQ_NONE;
+
+ /*
+ * Check for pending interrupt
+ */
+ scsi_status = DC395x_read16(acb, TRM_S1040_SCSI_STATUS);
+ dma_status = DC395x_read8(acb, TRM_S1040_DMA_STATUS);
+ if (scsi_status & SCSIINTERRUPT) {
+ /* interrupt pending - let's process it! */
+ dc395x_handle_interrupt(acb, scsi_status);
+ handled = IRQ_HANDLED;
+ }
+ else if (dma_status & 0x20) {
+ /* Error from the DMA engine */
+ dprintkl(KERN_INFO, "Interrupt from DMA engine: 0x%02x!\n", dma_status);
+#if 0
+ dprintkl(KERN_INFO, "This means DMA error! Try to handle ...\n");
+ if (acb->active_dcb) {
+ acb->active_dcb-> flag |= ABORT_DEV_;
+ if (acb->active_dcb->active_srb)
+ enable_msgout_abort(acb, acb->active_dcb->active_srb);
+ }
+ DC395x_write8(acb, TRM_S1040_DMA_CONTROL, ABORTXFER | CLRXFIFO);
+#else
+ dprintkl(KERN_INFO, "Ignoring DMA error (probably a bad thing) ...\n");
+ acb = NULL;
+#endif
+ handled = IRQ_HANDLED;
+ }
+
+ return handled;
+}
+
+
+static void msgout_phase0(struct AdapterCtlBlk *acb, struct ScsiReqBlk *srb,
+ u16 *pscsi_status)
+{
+ dprintkdbg(DBG_0, "msgout_phase0: (0x%p)\n", srb->cmd);
+ if (srb->state & (SRB_UNEXPECT_RESEL + SRB_ABORT_SENT))
+ *pscsi_status = PH_BUS_FREE; /*.. initial phase */
+
+ DC395x_write16(acb, TRM_S1040_SCSI_CONTROL, DO_DATALATCH); /* it's important for atn stop */
+ srb->state &= ~SRB_MSGOUT;
+}
+
+
+static void msgout_phase1(struct AdapterCtlBlk *acb, struct ScsiReqBlk *srb,
+ u16 *pscsi_status)
+{
+ u16 i;
+ u8 *ptr;
+ dprintkdbg(DBG_0, "msgout_phase1: (0x%p)\n", srb->cmd);
+
+ clear_fifo(acb, "msgout_phase1");
+ if (!(srb->state & SRB_MSGOUT)) {
+ srb->state |= SRB_MSGOUT;
+ dprintkl(KERN_DEBUG,
+ "msgout_phase1: (0x%p) Phase unexpected\n",
+ srb->cmd); /* So what ? */
+ }
+ if (!srb->msg_count) {
+ dprintkdbg(DBG_0, "msgout_phase1: (0x%p) NOP msg\n",
+ srb->cmd);
+ DC395x_write8(acb, TRM_S1040_SCSI_FIFO, MSG_NOP);
+ DC395x_write16(acb, TRM_S1040_SCSI_CONTROL, DO_DATALATCH); /* it's important for atn stop */
+ DC395x_write8(acb, TRM_S1040_SCSI_COMMAND, SCMD_FIFO_OUT);
+ return;
+ }
+ ptr = (u8 *)srb->msgout_buf;
+ for (i = 0; i < srb->msg_count; i++)
+ DC395x_write8(acb, TRM_S1040_SCSI_FIFO, *ptr++);
+ srb->msg_count = 0;
+ if (srb->msgout_buf[0] == MSG_ABORT)
+ srb->state = SRB_ABORT_SENT;
+
+ DC395x_write8(acb, TRM_S1040_SCSI_COMMAND, SCMD_FIFO_OUT);
+}
+
+
+static void command_phase0(struct AdapterCtlBlk *acb, struct ScsiReqBlk *srb,
+ u16 *pscsi_status)
+{
+ dprintkdbg(DBG_0, "command_phase0: (0x%p)\n", srb->cmd);
+ DC395x_write16(acb, TRM_S1040_SCSI_CONTROL, DO_DATALATCH);
+}
+
+
+static void command_phase1(struct AdapterCtlBlk *acb, struct ScsiReqBlk *srb,
+ u16 *pscsi_status)
+{
+ struct DeviceCtlBlk *dcb;
+ u8 *ptr;
+ u16 i;
+ dprintkdbg(DBG_0, "command_phase1: (0x%p)\n", srb->cmd);
+
+ clear_fifo(acb, "command_phase1");
+ DC395x_write16(acb, TRM_S1040_SCSI_CONTROL, DO_CLRATN);
+ if (!(srb->flag & AUTO_REQSENSE)) {
+ ptr = (u8 *)srb->cmd->cmnd;
+ for (i = 0; i < srb->cmd->cmd_len; i++) {
+ DC395x_write8(acb, TRM_S1040_SCSI_FIFO, *ptr);
+ ptr++;
+ }
+ } else {
+ DC395x_write8(acb, TRM_S1040_SCSI_FIFO, REQUEST_SENSE);
+ dcb = acb->active_dcb;
+ /* target id */
+ DC395x_write8(acb, TRM_S1040_SCSI_FIFO, (dcb->target_lun << 5));
+ DC395x_write8(acb, TRM_S1040_SCSI_FIFO, 0);
+ DC395x_write8(acb, TRM_S1040_SCSI_FIFO, 0);
+ DC395x_write8(acb, TRM_S1040_SCSI_FIFO, SCSI_SENSE_BUFFERSIZE);
+ DC395x_write8(acb, TRM_S1040_SCSI_FIFO, 0);
+ }
+ srb->state |= SRB_COMMAND;
+ /* it's important for atn stop */
+ DC395x_write16(acb, TRM_S1040_SCSI_CONTROL, DO_DATALATCH);
+ /* SCSI command */
+ DC395x_write8(acb, TRM_S1040_SCSI_COMMAND, SCMD_FIFO_OUT);
+}
+
+
+/*
+ * Verify that the remaining space in the hw sg lists is the same as
+ * the count of remaining bytes in srb->total_xfer_length
+ */
+static void sg_verify_length(struct ScsiReqBlk *srb)
+{
+ if (debug_enabled(DBG_SG)) {
+ unsigned len = 0;
+ unsigned idx = srb->sg_index;
+ struct SGentry *psge = srb->segment_x + idx;
+ for (; idx < srb->sg_count; psge++, idx++)
+ len += psge->length;
+ if (len != srb->total_xfer_length)
+ dprintkdbg(DBG_SG,
+ "Inconsistent SRB S/G lengths (Tot=%i, Count=%i) !!\n",
+ srb->total_xfer_length, len);
+ }
+}
+
+
+/*
+ * Compute the next Scatter Gather list index and adjust its length
+ * and address if necessary
+ */
+static void sg_update_list(struct ScsiReqBlk *srb, u32 left)
+{
+ u8 idx;
+ u32 xferred = srb->total_xfer_length - left; /* bytes transferred */
+ struct SGentry *psge = srb->segment_x + srb->sg_index;
+
+ dprintkdbg(DBG_0,
+ "sg_update_list: Transferred %i of %i bytes, %i remain\n",
+ xferred, srb->total_xfer_length, left);
+ if (xferred == 0) {
+ /* nothing to update since we did not transfer any data */
+ return;
+ }
+
+ sg_verify_length(srb);
+ srb->total_xfer_length = left; /* update remaining count */
+ for (idx = srb->sg_index; idx < srb->sg_count; idx++) {
+ if (xferred >= psge->length) {
+ /* Complete SG entries done */
+ xferred -= psge->length;
+ } else {
+ /* Partial SG entry done */
+ psge->length -= xferred;
+ psge->address += xferred;
+ srb->sg_index = idx;
+ pci_dma_sync_single_for_device(srb->dcb->
+ acb->dev,
+ srb->sg_bus_addr,
+ SEGMENTX_LEN,
+ PCI_DMA_TODEVICE);
+ break;
+ }
+ psge++;
+ }
+ sg_verify_length(srb);
+}
+
+
+/*
+ * We have transferred a single byte (PIO mode?) and need to update
+ * the count of bytes remaining (total_xfer_length) and update the sg
+ * entry to either point to next byte in the current sg entry, or of
+ * already at the end to point to the start of the next sg entry
+ */
+static void sg_subtract_one(struct ScsiReqBlk *srb)
+{
+ sg_update_list(srb, srb->total_xfer_length - 1);
+}
+
+
+/*
+ * cleanup_after_transfer
+ *
+ * Makes sure, DMA and SCSI engine are empty, after the transfer has finished
+ * KG: Currently called from StatusPhase1 ()
+ * Should probably also be called from other places
+ * Best might be to call it in DataXXPhase0, if new phase will differ
+ */
+static void cleanup_after_transfer(struct AdapterCtlBlk *acb,
+ struct ScsiReqBlk *srb)
+{
+ /*DC395x_write8 (TRM_S1040_DMA_STATUS, FORCEDMACOMP); */
+ if (DC395x_read16(acb, TRM_S1040_DMA_COMMAND) & 0x0001) { /* read */
+ if (!(DC395x_read8(acb, TRM_S1040_SCSI_FIFOCNT) & 0x40))
+ clear_fifo(acb, "cleanup/in");
+ if (!(DC395x_read8(acb, TRM_S1040_DMA_FIFOSTAT) & 0x80))
+ DC395x_write8(acb, TRM_S1040_DMA_CONTROL, CLRXFIFO);
+ } else { /* write */
+ if (!(DC395x_read8(acb, TRM_S1040_DMA_FIFOSTAT) & 0x80))
+ DC395x_write8(acb, TRM_S1040_DMA_CONTROL, CLRXFIFO);
+ if (!(DC395x_read8(acb, TRM_S1040_SCSI_FIFOCNT) & 0x40))
+ clear_fifo(acb, "cleanup/out");
+ }
+ DC395x_write16(acb, TRM_S1040_SCSI_CONTROL, DO_DATALATCH);
+}
+
+
+/*
+ * Those no of bytes will be transferred w/ PIO through the SCSI FIFO
+ * Seems to be needed for unknown reasons; could be a hardware bug :-(
+ */
+#define DC395x_LASTPIO 4
+
+
+static void data_out_phase0(struct AdapterCtlBlk *acb, struct ScsiReqBlk *srb,
+ u16 *pscsi_status)
+{
+ struct DeviceCtlBlk *dcb = srb->dcb;
+ u16 scsi_status = *pscsi_status;
+ u32 d_left_counter = 0;
+ dprintkdbg(DBG_0, "data_out_phase0: (0x%p) <%02i-%i>\n",
+ srb->cmd, srb->cmd->device->id, (u8)srb->cmd->device->lun);
+
+ /*
+ * KG: We need to drain the buffers before we draw any conclusions!
+ * This means telling the DMA to push the rest into SCSI, telling
+ * SCSI to push the rest to the bus.
+ * However, the device might have been the one to stop us (phase
+ * change), and the data in transit just needs to be accounted so
+ * it can be retransmitted.)
+ */
+ /*
+ * KG: Stop DMA engine pushing more data into the SCSI FIFO
+ * If we need more data, the DMA SG list will be freshly set up, anyway
+ */
+ dprintkdbg(DBG_PIO, "data_out_phase0: "
+ "DMA{fifocnt=0x%02x fifostat=0x%02x} "
+ "SCSI{fifocnt=0x%02x cnt=0x%06x status=0x%04x} total=0x%06x\n",
+ DC395x_read8(acb, TRM_S1040_DMA_FIFOCNT),
+ DC395x_read8(acb, TRM_S1040_DMA_FIFOSTAT),
+ DC395x_read8(acb, TRM_S1040_SCSI_FIFOCNT),
+ DC395x_read32(acb, TRM_S1040_SCSI_COUNTER), scsi_status,
+ srb->total_xfer_length);
+ DC395x_write8(acb, TRM_S1040_DMA_CONTROL, STOPDMAXFER | CLRXFIFO);
+
+ if (!(srb->state & SRB_XFERPAD)) {
+ if (scsi_status & PARITYERROR)
+ srb->status |= PARITY_ERROR;
+
+ /*
+ * KG: Right, we can't just rely on the SCSI_COUNTER, because this
+ * is the no of bytes it got from the DMA engine not the no it
+ * transferred successfully to the device. (And the difference could
+ * be as much as the FIFO size, I guess ...)
+ */
+ if (!(scsi_status & SCSIXFERDONE)) {
+ /*
+ * when data transfer from DMA FIFO to SCSI FIFO
+ * if there was some data left in SCSI FIFO
+ */
+ d_left_counter =
+ (u32)(DC395x_read8(acb, TRM_S1040_SCSI_FIFOCNT) &
+ 0x1F);
+ if (dcb->sync_period & WIDE_SYNC)
+ d_left_counter <<= 1;
+
+ dprintkdbg(DBG_KG, "data_out_phase0: FIFO contains %i %s\n"
+ "SCSI{fifocnt=0x%02x cnt=0x%08x} "
+ "DMA{fifocnt=0x%04x cnt=0x%02x ctr=0x%08x}\n",
+ DC395x_read8(acb, TRM_S1040_SCSI_FIFOCNT),
+ (dcb->sync_period & WIDE_SYNC) ? "words" : "bytes",
+ DC395x_read8(acb, TRM_S1040_SCSI_FIFOCNT),
+ DC395x_read32(acb, TRM_S1040_SCSI_COUNTER),
+ DC395x_read8(acb, TRM_S1040_DMA_FIFOCNT),
+ DC395x_read8(acb, TRM_S1040_DMA_FIFOSTAT),
+ DC395x_read32(acb, TRM_S1040_DMA_CXCNT));
+ }
+ /*
+ * calculate all the residue data that not yet tranfered
+ * SCSI transfer counter + left in SCSI FIFO data
+ *
+ * .....TRM_S1040_SCSI_COUNTER (24bits)
+ * The counter always decrement by one for every SCSI byte transfer.
+ * .....TRM_S1040_SCSI_FIFOCNT ( 5bits)
+ * The counter is SCSI FIFO offset counter (in units of bytes or! words)
+ */
+ if (srb->total_xfer_length > DC395x_LASTPIO)
+ d_left_counter +=
+ DC395x_read32(acb, TRM_S1040_SCSI_COUNTER);
+
+ /* Is this a good idea? */
+ /*clear_fifo(acb, "DOP1"); */
+ /* KG: What is this supposed to be useful for? WIDE padding stuff? */
+ if (d_left_counter == 1 && dcb->sync_period & WIDE_SYNC
+ && scsi_bufflen(srb->cmd) % 2) {
+ d_left_counter = 0;
+ dprintkl(KERN_INFO,
+ "data_out_phase0: Discard 1 byte (0x%02x)\n",
+ scsi_status);
+ }
+ /*
+ * KG: Oops again. Same thinko as above: The SCSI might have been
+ * faster than the DMA engine, so that it ran out of data.
+ * In that case, we have to do just nothing!
+ * But: Why the interrupt: No phase change. No XFERCNT_2_ZERO. Or?
+ */
+ /*
+ * KG: This is nonsense: We have been WRITING data to the bus
+ * If the SCSI engine has no bytes left, how should the DMA engine?
+ */
+ if (d_left_counter == 0) {
+ srb->total_xfer_length = 0;
+ } else {
+ /*
+ * if transfer not yet complete
+ * there were some data residue in SCSI FIFO or
+ * SCSI transfer counter not empty
+ */
+ long oldxferred =
+ srb->total_xfer_length - d_left_counter;
+ const int diff =
+ (dcb->sync_period & WIDE_SYNC) ? 2 : 1;
+ sg_update_list(srb, d_left_counter);
+ /* KG: Most ugly hack! Apparently, this works around a chip bug */
+ if ((srb->segment_x[srb->sg_index].length ==
+ diff && scsi_sg_count(srb->cmd))
+ || ((oldxferred & ~PAGE_MASK) ==
+ (PAGE_SIZE - diff))
+ ) {
+ dprintkl(KERN_INFO, "data_out_phase0: "
+ "Work around chip bug (%i)?\n", diff);
+ d_left_counter =
+ srb->total_xfer_length - diff;
+ sg_update_list(srb, d_left_counter);
+ /*srb->total_xfer_length -= diff; */
+ /*srb->virt_addr += diff; */
+ /*if (srb->cmd->use_sg) */
+ /* srb->sg_index++; */
+ }
+ }
+ }
+ if ((*pscsi_status & PHASEMASK) != PH_DATA_OUT) {
+ cleanup_after_transfer(acb, srb);
+ }
+}
+
+
+static void data_out_phase1(struct AdapterCtlBlk *acb, struct ScsiReqBlk *srb,
+ u16 *pscsi_status)
+{
+ dprintkdbg(DBG_0, "data_out_phase1: (0x%p) <%02i-%i>\n",
+ srb->cmd, srb->cmd->device->id, (u8)srb->cmd->device->lun);
+ clear_fifo(acb, "data_out_phase1");
+ /* do prepare before transfer when data out phase */
+ data_io_transfer(acb, srb, XFERDATAOUT);
+}
+
+static void data_in_phase0(struct AdapterCtlBlk *acb, struct ScsiReqBlk *srb,
+ u16 *pscsi_status)
+{
+ u16 scsi_status = *pscsi_status;
+
+ dprintkdbg(DBG_0, "data_in_phase0: (0x%p) <%02i-%i>\n",
+ srb->cmd, srb->cmd->device->id, (u8)srb->cmd->device->lun);
+
+ /*
+ * KG: DataIn is much more tricky than DataOut. When the device is finished
+ * and switches to another phase, the SCSI engine should be finished too.
+ * But: There might still be bytes left in its FIFO to be fetched by the DMA
+ * engine and transferred to memory.
+ * We should wait for the FIFOs to be emptied by that (is there any way to
+ * enforce this?) and then stop the DMA engine, because it might think, that
+ * there are more bytes to follow. Yes, the device might disconnect prior to
+ * having all bytes transferred!
+ * Also we should make sure that all data from the DMA engine buffer's really
+ * made its way to the system memory! Some documentation on this would not
+ * seem to be a bad idea, actually.
+ */
+ if (!(srb->state & SRB_XFERPAD)) {
+ u32 d_left_counter;
+ unsigned int sc, fc;
+
+ if (scsi_status & PARITYERROR) {
+ dprintkl(KERN_INFO, "data_in_phase0: (0x%p) "
+ "Parity Error\n", srb->cmd);
+ srb->status |= PARITY_ERROR;
+ }
+ /*
+ * KG: We should wait for the DMA FIFO to be empty ...
+ * but: it would be better to wait first for the SCSI FIFO and then the
+ * the DMA FIFO to become empty? How do we know, that the device not already
+ * sent data to the FIFO in a MsgIn phase, eg.?
+ */
+ if (!(DC395x_read8(acb, TRM_S1040_DMA_FIFOSTAT) & 0x80)) {
+#if 0
+ int ctr = 6000000;
+ dprintkl(KERN_DEBUG,
+ "DIP0: Wait for DMA FIFO to flush ...\n");
+ /*DC395x_write8 (TRM_S1040_DMA_CONTROL, STOPDMAXFER); */
+ /*DC395x_write32 (TRM_S1040_SCSI_COUNTER, 7); */
+ /*DC395x_write8 (TRM_S1040_SCSI_COMMAND, SCMD_DMA_IN); */
+ while (!
+ (DC395x_read16(acb, TRM_S1040_DMA_FIFOSTAT) &
+ 0x80) && --ctr);
+ if (ctr < 6000000 - 1)
+ dprintkl(KERN_DEBUG
+ "DIP0: Had to wait for DMA ...\n");
+ if (!ctr)
+ dprintkl(KERN_ERR,
+ "Deadlock in DIP0 waiting for DMA FIFO empty!!\n");
+ /*DC395x_write32 (TRM_S1040_SCSI_COUNTER, 0); */
+#endif
+ dprintkdbg(DBG_KG, "data_in_phase0: "
+ "DMA{fifocnt=0x%02x fifostat=0x%02x}\n",
+ DC395x_read8(acb, TRM_S1040_DMA_FIFOCNT),
+ DC395x_read8(acb, TRM_S1040_DMA_FIFOSTAT));
+ }
+ /* Now: Check remainig data: The SCSI counters should tell us ... */
+ sc = DC395x_read32(acb, TRM_S1040_SCSI_COUNTER);
+ fc = DC395x_read8(acb, TRM_S1040_SCSI_FIFOCNT);
+ d_left_counter = sc + ((fc & 0x1f)
+ << ((srb->dcb->sync_period & WIDE_SYNC) ? 1 :
+ 0));
+ dprintkdbg(DBG_KG, "data_in_phase0: "
+ "SCSI{fifocnt=0x%02x%s ctr=0x%08x} "
+ "DMA{fifocnt=0x%02x fifostat=0x%02x ctr=0x%08x} "
+ "Remain{totxfer=%i scsi_fifo+ctr=%i}\n",
+ fc,
+ (srb->dcb->sync_period & WIDE_SYNC) ? "words" : "bytes",
+ sc,
+ fc,
+ DC395x_read8(acb, TRM_S1040_DMA_FIFOSTAT),
+ DC395x_read32(acb, TRM_S1040_DMA_CXCNT),
+ srb->total_xfer_length, d_left_counter);
+#if DC395x_LASTPIO
+ /* KG: Less than or equal to 4 bytes can not be transferred via DMA, it seems. */
+ if (d_left_counter
+ && srb->total_xfer_length <= DC395x_LASTPIO) {
+ size_t left_io = srb->total_xfer_length;
+
+ /*u32 addr = (srb->segment_x[srb->sg_index].address); */
+ /*sg_update_list (srb, d_left_counter); */
+ dprintkdbg(DBG_PIO, "data_in_phase0: PIO (%i %s) "
+ "for remaining %i bytes:",
+ fc & 0x1f,
+ (srb->dcb->sync_period & WIDE_SYNC) ?
+ "words" : "bytes",
+ srb->total_xfer_length);
+ if (srb->dcb->sync_period & WIDE_SYNC)
+ DC395x_write8(acb, TRM_S1040_SCSI_CONFIG2,
+ CFG2_WIDEFIFO);
+ while (left_io) {
+ unsigned char *virt, *base = NULL;
+ unsigned long flags = 0;
+ size_t len = left_io;
+ size_t offset = srb->request_length - left_io;
+
+ local_irq_save(flags);
+ /* Assumption: it's inside one page as it's at most 4 bytes and
+ I just assume it's on a 4-byte boundary */
+ base = scsi_kmap_atomic_sg(scsi_sglist(srb->cmd),
+ srb->sg_count, &offset, &len);
+ virt = base + offset;
+
+ left_io -= len;
+
+ while (len) {
+ u8 byte;
+ byte = DC395x_read8(acb, TRM_S1040_SCSI_FIFO);
+ *virt++ = byte;
+
+ if (debug_enabled(DBG_PIO))
+ printk(" %02x", byte);
+
+ d_left_counter--;
+ sg_subtract_one(srb);
+
+ len--;
+
+ fc = DC395x_read8(acb, TRM_S1040_SCSI_FIFOCNT);
+
+ if (fc == 0x40) {
+ left_io = 0;
+ break;
+ }
+ }
+
+ WARN_ON((fc != 0x40) == !d_left_counter);
+
+ if (fc == 0x40 && (srb->dcb->sync_period & WIDE_SYNC)) {
+ /* Read the last byte ... */
+ if (srb->total_xfer_length > 0) {
+ u8 byte = DC395x_read8(acb, TRM_S1040_SCSI_FIFO);
+
+ *virt++ = byte;
+ srb->total_xfer_length--;
+ if (debug_enabled(DBG_PIO))
+ printk(" %02x", byte);
+ }
+
+ DC395x_write8(acb, TRM_S1040_SCSI_CONFIG2, 0);
+ }
+
+ scsi_kunmap_atomic_sg(base);
+ local_irq_restore(flags);
+ }
+ /*printk(" %08x", *(u32*)(bus_to_virt (addr))); */
+ /*srb->total_xfer_length = 0; */
+ if (debug_enabled(DBG_PIO))
+ printk("\n");
+ }
+#endif /* DC395x_LASTPIO */
+
+#if 0
+ /*
+ * KG: This was in DATAOUT. Does it also belong here?
+ * Nobody seems to know what counter and fifo_cnt count exactly ...
+ */
+ if (!(scsi_status & SCSIXFERDONE)) {
+ /*
+ * when data transfer from DMA FIFO to SCSI FIFO
+ * if there was some data left in SCSI FIFO
+ */
+ d_left_counter =
+ (u32)(DC395x_read8(acb, TRM_S1040_SCSI_FIFOCNT) &
+ 0x1F);
+ if (srb->dcb->sync_period & WIDE_SYNC)
+ d_left_counter <<= 1;
+ /*
+ * if WIDE scsi SCSI FIFOCNT unit is word !!!
+ * so need to *= 2
+ * KG: Seems to be correct ...
+ */
+ }
+#endif
+ /* KG: This should not be needed any more! */
+ if (d_left_counter == 0
+ || (scsi_status & SCSIXFERCNT_2_ZERO)) {
+#if 0
+ int ctr = 6000000;
+ u8 TempDMAstatus;
+ do {
+ TempDMAstatus =
+ DC395x_read8(acb, TRM_S1040_DMA_STATUS);
+ } while (!(TempDMAstatus & DMAXFERCOMP) && --ctr);
+ if (!ctr)
+ dprintkl(KERN_ERR,
+ "Deadlock in DataInPhase0 waiting for DMA!!\n");
+ srb->total_xfer_length = 0;
+#endif
+ srb->total_xfer_length = d_left_counter;
+ } else { /* phase changed */
+ /*
+ * parsing the case:
+ * when a transfer not yet complete
+ * but be disconnected by target
+ * if transfer not yet complete
+ * there were some data residue in SCSI FIFO or
+ * SCSI transfer counter not empty
+ */
+ sg_update_list(srb, d_left_counter);
+ }
+ }
+ /* KG: The target may decide to disconnect: Empty FIFO before! */
+ if ((*pscsi_status & PHASEMASK) != PH_DATA_IN) {
+ cleanup_after_transfer(acb, srb);
+ }
+}
+
+
+static void data_in_phase1(struct AdapterCtlBlk *acb, struct ScsiReqBlk *srb,
+ u16 *pscsi_status)
+{
+ dprintkdbg(DBG_0, "data_in_phase1: (0x%p) <%02i-%i>\n",
+ srb->cmd, srb->cmd->device->id, (u8)srb->cmd->device->lun);
+ data_io_transfer(acb, srb, XFERDATAIN);
+}
+
+
+static void data_io_transfer(struct AdapterCtlBlk *acb,
+ struct ScsiReqBlk *srb, u16 io_dir)
+{
+ struct DeviceCtlBlk *dcb = srb->dcb;
+ u8 bval;
+ dprintkdbg(DBG_0,
+ "data_io_transfer: (0x%p) <%02i-%i> %c len=%i, sg=(%i/%i)\n",
+ srb->cmd, srb->cmd->device->id, (u8)srb->cmd->device->lun,
+ ((io_dir & DMACMD_DIR) ? 'r' : 'w'),
+ srb->total_xfer_length, srb->sg_index, srb->sg_count);
+ if (srb == acb->tmp_srb)
+ dprintkl(KERN_ERR, "data_io_transfer: Using tmp_srb!\n");
+ if (srb->sg_index >= srb->sg_count) {
+ /* can't happen? out of bounds error */
+ return;
+ }
+
+ if (srb->total_xfer_length > DC395x_LASTPIO) {
+ u8 dma_status = DC395x_read8(acb, TRM_S1040_DMA_STATUS);
+ /*
+ * KG: What should we do: Use SCSI Cmd 0x90/0x92?
+ * Maybe, even ABORTXFER would be appropriate
+ */
+ if (dma_status & XFERPENDING) {
+ dprintkl(KERN_DEBUG, "data_io_transfer: Xfer pending! "
+ "Expect trouble!\n");
+ dump_register_info(acb, dcb, srb);
+ DC395x_write8(acb, TRM_S1040_DMA_CONTROL, CLRXFIFO);
+ }
+ /* clear_fifo(acb, "IO"); */
+ /*
+ * load what physical address of Scatter/Gather list table
+ * want to be transfer
+ */
+ srb->state |= SRB_DATA_XFER;
+ DC395x_write32(acb, TRM_S1040_DMA_XHIGHADDR, 0);
+ if (scsi_sg_count(srb->cmd)) { /* with S/G */
+ io_dir |= DMACMD_SG;
+ DC395x_write32(acb, TRM_S1040_DMA_XLOWADDR,
+ srb->sg_bus_addr +
+ sizeof(struct SGentry) *
+ srb->sg_index);
+ /* load how many bytes in the sg list table */
+ DC395x_write32(acb, TRM_S1040_DMA_XCNT,
+ ((u32)(srb->sg_count -
+ srb->sg_index) << 3));
+ } else { /* without S/G */
+ io_dir &= ~DMACMD_SG;
+ DC395x_write32(acb, TRM_S1040_DMA_XLOWADDR,
+ srb->segment_x[0].address);
+ DC395x_write32(acb, TRM_S1040_DMA_XCNT,
+ srb->segment_x[0].length);
+ }
+ /* load total transfer length (24bits) max value 16Mbyte */
+ DC395x_write32(acb, TRM_S1040_SCSI_COUNTER,
+ srb->total_xfer_length);
+ DC395x_write16(acb, TRM_S1040_SCSI_CONTROL, DO_DATALATCH); /* it's important for atn stop */
+ if (io_dir & DMACMD_DIR) { /* read */
+ DC395x_write8(acb, TRM_S1040_SCSI_COMMAND,
+ SCMD_DMA_IN);
+ DC395x_write16(acb, TRM_S1040_DMA_COMMAND, io_dir);
+ } else {
+ DC395x_write16(acb, TRM_S1040_DMA_COMMAND, io_dir);
+ DC395x_write8(acb, TRM_S1040_SCSI_COMMAND,
+ SCMD_DMA_OUT);
+ }
+
+ }
+#if DC395x_LASTPIO
+ else if (srb->total_xfer_length > 0) { /* The last four bytes: Do PIO */
+ /*
+ * load what physical address of Scatter/Gather list table
+ * want to be transfer
+ */
+ srb->state |= SRB_DATA_XFER;
+ /* load total transfer length (24bits) max value 16Mbyte */
+ DC395x_write32(acb, TRM_S1040_SCSI_COUNTER,
+ srb->total_xfer_length);
+ DC395x_write16(acb, TRM_S1040_SCSI_CONTROL, DO_DATALATCH); /* it's important for atn stop */
+ if (io_dir & DMACMD_DIR) { /* read */
+ DC395x_write8(acb, TRM_S1040_SCSI_COMMAND,
+ SCMD_FIFO_IN);
+ } else { /* write */
+ int ln = srb->total_xfer_length;
+ size_t left_io = srb->total_xfer_length;
+
+ if (srb->dcb->sync_period & WIDE_SYNC)
+ DC395x_write8(acb, TRM_S1040_SCSI_CONFIG2,
+ CFG2_WIDEFIFO);
+
+ while (left_io) {
+ unsigned char *virt, *base = NULL;
+ unsigned long flags = 0;
+ size_t len = left_io;
+ size_t offset = srb->request_length - left_io;
+
+ local_irq_save(flags);
+ /* Again, max 4 bytes */
+ base = scsi_kmap_atomic_sg(scsi_sglist(srb->cmd),
+ srb->sg_count, &offset, &len);
+ virt = base + offset;
+
+ left_io -= len;
+
+ while (len--) {
+ if (debug_enabled(DBG_PIO))
+ printk(" %02x", *virt);
+
+ DC395x_write8(acb, TRM_S1040_SCSI_FIFO, *virt++);
+
+ sg_subtract_one(srb);
+ }
+
+ scsi_kunmap_atomic_sg(base);
+ local_irq_restore(flags);
+ }
+ if (srb->dcb->sync_period & WIDE_SYNC) {
+ if (ln % 2) {
+ DC395x_write8(acb, TRM_S1040_SCSI_FIFO, 0);
+ if (debug_enabled(DBG_PIO))
+ printk(" |00");
+ }
+ DC395x_write8(acb, TRM_S1040_SCSI_CONFIG2, 0);
+ }
+ /*DC395x_write32(acb, TRM_S1040_SCSI_COUNTER, ln); */
+ if (debug_enabled(DBG_PIO))
+ printk("\n");
+ DC395x_write8(acb, TRM_S1040_SCSI_COMMAND,
+ SCMD_FIFO_OUT);
+ }
+ }
+#endif /* DC395x_LASTPIO */
+ else { /* xfer pad */
+ u8 data = 0, data2 = 0;
+ if (srb->sg_count) {
+ srb->adapter_status = H_OVER_UNDER_RUN;
+ srb->status |= OVER_RUN;
+ }
+ /*
+ * KG: despite the fact that we are using 16 bits I/O ops
+ * the SCSI FIFO is only 8 bits according to the docs
+ * (we can set bit 1 in 0x8f to serialize FIFO access ...)
+ */
+ if (dcb->sync_period & WIDE_SYNC) {
+ DC395x_write32(acb, TRM_S1040_SCSI_COUNTER, 2);
+ DC395x_write8(acb, TRM_S1040_SCSI_CONFIG2,
+ CFG2_WIDEFIFO);
+ if (io_dir & DMACMD_DIR) {
+ data = DC395x_read8(acb, TRM_S1040_SCSI_FIFO);
+ data2 = DC395x_read8(acb, TRM_S1040_SCSI_FIFO);
+ } else {
+ /* Danger, Robinson: If you find KGs
+ * scattered over the wide disk, the driver
+ * or chip is to blame :-( */
+ DC395x_write8(acb, TRM_S1040_SCSI_FIFO, 'K');
+ DC395x_write8(acb, TRM_S1040_SCSI_FIFO, 'G');
+ }
+ DC395x_write8(acb, TRM_S1040_SCSI_CONFIG2, 0);
+ } else {
+ DC395x_write32(acb, TRM_S1040_SCSI_COUNTER, 1);
+ /* Danger, Robinson: If you find a collection of Ks on your disk
+ * something broke :-( */
+ if (io_dir & DMACMD_DIR)
+ data = DC395x_read8(acb, TRM_S1040_SCSI_FIFO);
+ else
+ DC395x_write8(acb, TRM_S1040_SCSI_FIFO, 'K');
+ }
+ srb->state |= SRB_XFERPAD;
+ DC395x_write16(acb, TRM_S1040_SCSI_CONTROL, DO_DATALATCH); /* it's important for atn stop */
+ /* SCSI command */
+ bval = (io_dir & DMACMD_DIR) ? SCMD_FIFO_IN : SCMD_FIFO_OUT;
+ DC395x_write8(acb, TRM_S1040_SCSI_COMMAND, bval);
+ }
+}
+
+
+static void status_phase0(struct AdapterCtlBlk *acb, struct ScsiReqBlk *srb,
+ u16 *pscsi_status)
+{
+ dprintkdbg(DBG_0, "status_phase0: (0x%p) <%02i-%i>\n",
+ srb->cmd, srb->cmd->device->id, (u8)srb->cmd->device->lun);
+ srb->target_status = DC395x_read8(acb, TRM_S1040_SCSI_FIFO);
+ srb->end_message = DC395x_read8(acb, TRM_S1040_SCSI_FIFO); /* get message */
+ srb->state = SRB_COMPLETED;
+ *pscsi_status = PH_BUS_FREE; /*.. initial phase */
+ DC395x_write16(acb, TRM_S1040_SCSI_CONTROL, DO_DATALATCH); /* it's important for atn stop */
+ DC395x_write8(acb, TRM_S1040_SCSI_COMMAND, SCMD_MSGACCEPT);
+}
+
+
+static void status_phase1(struct AdapterCtlBlk *acb, struct ScsiReqBlk *srb,
+ u16 *pscsi_status)
+{
+ dprintkdbg(DBG_0, "status_phase1: (0x%p) <%02i-%i>\n",
+ srb->cmd, srb->cmd->device->id, (u8)srb->cmd->device->lun);
+ srb->state = SRB_STATUS;
+ DC395x_write16(acb, TRM_S1040_SCSI_CONTROL, DO_DATALATCH); /* it's important for atn stop */
+ DC395x_write8(acb, TRM_S1040_SCSI_COMMAND, SCMD_COMP);
+}
+
+
+/* Check if the message is complete */
+static inline u8 msgin_completed(u8 * msgbuf, u32 len)
+{
+ if (*msgbuf == EXTENDED_MESSAGE) {
+ if (len < 2)
+ return 0;
+ if (len < msgbuf[1] + 2)
+ return 0;
+ } else if (*msgbuf >= 0x20 && *msgbuf <= 0x2f) /* two byte messages */
+ if (len < 2)
+ return 0;
+ return 1;
+}
+
+/* reject_msg */
+static inline void msgin_reject(struct AdapterCtlBlk *acb,
+ struct ScsiReqBlk *srb)
+{
+ srb->msgout_buf[0] = MESSAGE_REJECT;
+ srb->msg_count = 1;
+ DC395x_ENABLE_MSGOUT;
+ srb->state &= ~SRB_MSGIN;
+ srb->state |= SRB_MSGOUT;
+ dprintkl(KERN_INFO, "msgin_reject: 0x%02x <%02i-%i>\n",
+ srb->msgin_buf[0],
+ srb->dcb->target_id, srb->dcb->target_lun);
+}
+
+
+static struct ScsiReqBlk *msgin_qtag(struct AdapterCtlBlk *acb,
+ struct DeviceCtlBlk *dcb, u8 tag)
+{
+ struct ScsiReqBlk *srb = NULL;
+ struct ScsiReqBlk *i;
+ dprintkdbg(DBG_0, "msgin_qtag: (0x%p) tag=%i srb=%p\n",
+ srb->cmd, tag, srb);
+
+ if (!(dcb->tag_mask & (1 << tag)))
+ dprintkl(KERN_DEBUG,
+ "msgin_qtag: tag_mask=0x%08x does not reserve tag %i!\n",
+ dcb->tag_mask, tag);
+
+ if (list_empty(&dcb->srb_going_list))
+ goto mingx0;
+ list_for_each_entry(i, &dcb->srb_going_list, list) {
+ if (i->tag_number == tag) {
+ srb = i;
+ break;
+ }
+ }
+ if (!srb)
+ goto mingx0;
+
+ dprintkdbg(DBG_0, "msgin_qtag: (0x%p) <%02i-%i>\n",
+ srb->cmd, srb->dcb->target_id, srb->dcb->target_lun);
+ if (dcb->flag & ABORT_DEV_) {
+ /*srb->state = SRB_ABORT_SENT; */
+ enable_msgout_abort(acb, srb);
+ }
+
+ if (!(srb->state & SRB_DISCONNECT))
+ goto mingx0;
+
+ memcpy(srb->msgin_buf, dcb->active_srb->msgin_buf, acb->msg_len);
+ srb->state |= dcb->active_srb->state;
+ srb->state |= SRB_DATA_XFER;
+ dcb->active_srb = srb;
+ /* How can we make the DORS happy? */
+ return srb;
+
+ mingx0:
+ srb = acb->tmp_srb;
+ srb->state = SRB_UNEXPECT_RESEL;
+ dcb->active_srb = srb;
+ srb->msgout_buf[0] = MSG_ABORT_TAG;
+ srb->msg_count = 1;
+ DC395x_ENABLE_MSGOUT;
+ dprintkl(KERN_DEBUG, "msgin_qtag: Unknown tag %i - abort\n", tag);
+ return srb;
+}
+
+
+static inline void reprogram_regs(struct AdapterCtlBlk *acb,
+ struct DeviceCtlBlk *dcb)
+{
+ DC395x_write8(acb, TRM_S1040_SCSI_TARGETID, dcb->target_id);
+ DC395x_write8(acb, TRM_S1040_SCSI_SYNC, dcb->sync_period);
+ DC395x_write8(acb, TRM_S1040_SCSI_OFFSET, dcb->sync_offset);
+ set_xfer_rate(acb, dcb);
+}
+
+
+/* set async transfer mode */
+static void msgin_set_async(struct AdapterCtlBlk *acb, struct ScsiReqBlk *srb)
+{
+ struct DeviceCtlBlk *dcb = srb->dcb;
+ dprintkl(KERN_DEBUG, "msgin_set_async: No sync transfers <%02i-%i>\n",
+ dcb->target_id, dcb->target_lun);
+
+ dcb->sync_mode &= ~(SYNC_NEGO_ENABLE);
+ dcb->sync_mode |= SYNC_NEGO_DONE;
+ /*dcb->sync_period &= 0; */
+ dcb->sync_offset = 0;
+ dcb->min_nego_period = 200 >> 2; /* 200ns <=> 5 MHz */
+ srb->state &= ~SRB_DO_SYNC_NEGO;
+ reprogram_regs(acb, dcb);
+ if ((dcb->sync_mode & WIDE_NEGO_ENABLE)
+ && !(dcb->sync_mode & WIDE_NEGO_DONE)) {
+ build_wdtr(acb, dcb, srb);
+ DC395x_ENABLE_MSGOUT;
+ dprintkdbg(DBG_0, "msgin_set_async(rej): Try WDTR anyway\n");
+ }
+}
+
+
+/* set sync transfer mode */
+static void msgin_set_sync(struct AdapterCtlBlk *acb, struct ScsiReqBlk *srb)
+{
+ struct DeviceCtlBlk *dcb = srb->dcb;
+ u8 bval;
+ int fact;
+ dprintkdbg(DBG_1, "msgin_set_sync: <%02i> Sync: %ins "
+ "(%02i.%01i MHz) Offset %i\n",
+ dcb->target_id, srb->msgin_buf[3] << 2,
+ (250 / srb->msgin_buf[3]),
+ ((250 % srb->msgin_buf[3]) * 10) / srb->msgin_buf[3],
+ srb->msgin_buf[4]);
+
+ if (srb->msgin_buf[4] > 15)
+ srb->msgin_buf[4] = 15;
+ if (!(dcb->dev_mode & NTC_DO_SYNC_NEGO))
+ dcb->sync_offset = 0;
+ else if (dcb->sync_offset == 0)
+ dcb->sync_offset = srb->msgin_buf[4];
+ if (srb->msgin_buf[4] > dcb->sync_offset)
+ srb->msgin_buf[4] = dcb->sync_offset;
+ else
+ dcb->sync_offset = srb->msgin_buf[4];
+ bval = 0;
+ while (bval < 7 && (srb->msgin_buf[3] > clock_period[bval]
+ || dcb->min_nego_period >
+ clock_period[bval]))
+ bval++;
+ if (srb->msgin_buf[3] < clock_period[bval])
+ dprintkl(KERN_INFO,
+ "msgin_set_sync: Increase sync nego period to %ins\n",
+ clock_period[bval] << 2);
+ srb->msgin_buf[3] = clock_period[bval];
+ dcb->sync_period &= 0xf0;
+ dcb->sync_period |= ALT_SYNC | bval;
+ dcb->min_nego_period = srb->msgin_buf[3];
+
+ if (dcb->sync_period & WIDE_SYNC)
+ fact = 500;
+ else
+ fact = 250;
+
+ dprintkl(KERN_INFO,
+ "Target %02i: %s Sync: %ins Offset %i (%02i.%01i MB/s)\n",
+ dcb->target_id, (fact == 500) ? "Wide16" : "",
+ dcb->min_nego_period << 2, dcb->sync_offset,
+ (fact / dcb->min_nego_period),
+ ((fact % dcb->min_nego_period) * 10 +
+ dcb->min_nego_period / 2) / dcb->min_nego_period);
+
+ if (!(srb->state & SRB_DO_SYNC_NEGO)) {
+ /* Reply with corrected SDTR Message */
+ dprintkl(KERN_DEBUG, "msgin_set_sync: answer w/%ins %i\n",
+ srb->msgin_buf[3] << 2, srb->msgin_buf[4]);
+
+ memcpy(srb->msgout_buf, srb->msgin_buf, 5);
+ srb->msg_count = 5;
+ DC395x_ENABLE_MSGOUT;
+ dcb->sync_mode |= SYNC_NEGO_DONE;
+ } else {
+ if ((dcb->sync_mode & WIDE_NEGO_ENABLE)
+ && !(dcb->sync_mode & WIDE_NEGO_DONE)) {
+ build_wdtr(acb, dcb, srb);
+ DC395x_ENABLE_MSGOUT;
+ dprintkdbg(DBG_0, "msgin_set_sync: Also try WDTR\n");
+ }
+ }
+ srb->state &= ~SRB_DO_SYNC_NEGO;
+ dcb->sync_mode |= SYNC_NEGO_DONE | SYNC_NEGO_ENABLE;
+
+ reprogram_regs(acb, dcb);
+}
+
+
+static inline void msgin_set_nowide(struct AdapterCtlBlk *acb,
+ struct ScsiReqBlk *srb)
+{
+ struct DeviceCtlBlk *dcb = srb->dcb;
+ dprintkdbg(DBG_1, "msgin_set_nowide: <%02i>\n", dcb->target_id);
+
+ dcb->sync_period &= ~WIDE_SYNC;
+ dcb->sync_mode &= ~(WIDE_NEGO_ENABLE);
+ dcb->sync_mode |= WIDE_NEGO_DONE;
+ srb->state &= ~SRB_DO_WIDE_NEGO;
+ reprogram_regs(acb, dcb);
+ if ((dcb->sync_mode & SYNC_NEGO_ENABLE)
+ && !(dcb->sync_mode & SYNC_NEGO_DONE)) {
+ build_sdtr(acb, dcb, srb);
+ DC395x_ENABLE_MSGOUT;
+ dprintkdbg(DBG_0, "msgin_set_nowide: Rejected. Try SDTR anyway\n");
+ }
+}
+
+static void msgin_set_wide(struct AdapterCtlBlk *acb, struct ScsiReqBlk *srb)
+{
+ struct DeviceCtlBlk *dcb = srb->dcb;
+ u8 wide = (dcb->dev_mode & NTC_DO_WIDE_NEGO
+ && acb->config & HCC_WIDE_CARD) ? 1 : 0;
+ dprintkdbg(DBG_1, "msgin_set_wide: <%02i>\n", dcb->target_id);
+
+ if (srb->msgin_buf[3] > wide)
+ srb->msgin_buf[3] = wide;
+ /* Completed */
+ if (!(srb->state & SRB_DO_WIDE_NEGO)) {
+ dprintkl(KERN_DEBUG,
+ "msgin_set_wide: Wide nego initiated <%02i>\n",
+ dcb->target_id);
+ memcpy(srb->msgout_buf, srb->msgin_buf, 4);
+ srb->msg_count = 4;
+ srb->state |= SRB_DO_WIDE_NEGO;
+ DC395x_ENABLE_MSGOUT;
+ }
+
+ dcb->sync_mode |= (WIDE_NEGO_ENABLE | WIDE_NEGO_DONE);
+ if (srb->msgin_buf[3] > 0)
+ dcb->sync_period |= WIDE_SYNC;
+ else
+ dcb->sync_period &= ~WIDE_SYNC;
+ srb->state &= ~SRB_DO_WIDE_NEGO;
+ /*dcb->sync_mode &= ~(WIDE_NEGO_ENABLE+WIDE_NEGO_DONE); */
+ dprintkdbg(DBG_1,
+ "msgin_set_wide: Wide (%i bit) negotiated <%02i>\n",
+ (8 << srb->msgin_buf[3]), dcb->target_id);
+ reprogram_regs(acb, dcb);
+ if ((dcb->sync_mode & SYNC_NEGO_ENABLE)
+ && !(dcb->sync_mode & SYNC_NEGO_DONE)) {
+ build_sdtr(acb, dcb, srb);
+ DC395x_ENABLE_MSGOUT;
+ dprintkdbg(DBG_0, "msgin_set_wide: Also try SDTR.\n");
+ }
+}
+
+
+/*
+ * extended message codes:
+ *
+ * code description
+ *
+ * 02h Reserved
+ * 00h MODIFY DATA POINTER
+ * 01h SYNCHRONOUS DATA TRANSFER REQUEST
+ * 03h WIDE DATA TRANSFER REQUEST
+ * 04h - 7Fh Reserved
+ * 80h - FFh Vendor specific
+ */
+static void msgin_phase0(struct AdapterCtlBlk *acb, struct ScsiReqBlk *srb,
+ u16 *pscsi_status)
+{
+ struct DeviceCtlBlk *dcb = acb->active_dcb;
+ dprintkdbg(DBG_0, "msgin_phase0: (0x%p)\n", srb->cmd);
+
+ srb->msgin_buf[acb->msg_len++] = DC395x_read8(acb, TRM_S1040_SCSI_FIFO);
+ if (msgin_completed(srb->msgin_buf, acb->msg_len)) {
+ /* Now eval the msg */
+ switch (srb->msgin_buf[0]) {
+ case DISCONNECT:
+ srb->state = SRB_DISCONNECT;
+ break;
+
+ case SIMPLE_QUEUE_TAG:
+ case HEAD_OF_QUEUE_TAG:
+ case ORDERED_QUEUE_TAG:
+ srb =
+ msgin_qtag(acb, dcb,
+ srb->msgin_buf[1]);
+ break;
+
+ case MESSAGE_REJECT:
+ DC395x_write16(acb, TRM_S1040_SCSI_CONTROL,
+ DO_CLRATN | DO_DATALATCH);
+ /* A sync nego message was rejected ! */
+ if (srb->state & SRB_DO_SYNC_NEGO) {
+ msgin_set_async(acb, srb);
+ break;
+ }
+ /* A wide nego message was rejected ! */
+ if (srb->state & SRB_DO_WIDE_NEGO) {
+ msgin_set_nowide(acb, srb);
+ break;
+ }
+ enable_msgout_abort(acb, srb);
+ /*srb->state |= SRB_ABORT_SENT */
+ break;
+
+ case EXTENDED_MESSAGE:
+ /* SDTR */
+ if (srb->msgin_buf[1] == 3
+ && srb->msgin_buf[2] == EXTENDED_SDTR) {
+ msgin_set_sync(acb, srb);
+ break;
+ }
+ /* WDTR */
+ if (srb->msgin_buf[1] == 2
+ && srb->msgin_buf[2] == EXTENDED_WDTR
+ && srb->msgin_buf[3] <= 2) { /* sanity check ... */
+ msgin_set_wide(acb, srb);
+ break;
+ }
+ msgin_reject(acb, srb);
+ break;
+
+ case MSG_IGNOREWIDE:
+ /* Discard wide residual */
+ dprintkdbg(DBG_0, "msgin_phase0: Ignore Wide Residual!\n");
+ break;
+
+ case COMMAND_COMPLETE:
+ /* nothing has to be done */
+ break;
+
+ case SAVE_POINTERS:
+ /*
+ * SAVE POINTER may be ignored as we have the struct
+ * ScsiReqBlk* associated with the scsi command.
+ */
+ dprintkdbg(DBG_0, "msgin_phase0: (0x%p) "
+ "SAVE POINTER rem=%i Ignore\n",
+ srb->cmd, srb->total_xfer_length);
+ break;
+
+ case RESTORE_POINTERS:
+ dprintkdbg(DBG_0, "msgin_phase0: RESTORE POINTER. Ignore\n");
+ break;
+
+ case ABORT:
+ dprintkdbg(DBG_0, "msgin_phase0: (0x%p) "
+ "<%02i-%i> ABORT msg\n",
+ srb->cmd, dcb->target_id,
+ dcb->target_lun);
+ dcb->flag |= ABORT_DEV_;
+ enable_msgout_abort(acb, srb);
+ break;
+
+ default:
+ /* reject unknown messages */
+ if (srb->msgin_buf[0] & IDENTIFY_BASE) {
+ dprintkdbg(DBG_0, "msgin_phase0: Identify msg\n");
+ srb->msg_count = 1;
+ srb->msgout_buf[0] = dcb->identify_msg;
+ DC395x_ENABLE_MSGOUT;
+ srb->state |= SRB_MSGOUT;
+ /*break; */
+ }
+ msgin_reject(acb, srb);
+ }
+
+ /* Clear counter and MsgIn state */
+ srb->state &= ~SRB_MSGIN;
+ acb->msg_len = 0;
+ }
+ *pscsi_status = PH_BUS_FREE;
+ DC395x_write16(acb, TRM_S1040_SCSI_CONTROL, DO_DATALATCH); /* it's important ... you know! */
+ DC395x_write8(acb, TRM_S1040_SCSI_COMMAND, SCMD_MSGACCEPT);
+}
+
+
+static void msgin_phase1(struct AdapterCtlBlk *acb, struct ScsiReqBlk *srb,
+ u16 *pscsi_status)
+{
+ dprintkdbg(DBG_0, "msgin_phase1: (0x%p)\n", srb->cmd);
+ clear_fifo(acb, "msgin_phase1");
+ DC395x_write32(acb, TRM_S1040_SCSI_COUNTER, 1);
+ if (!(srb->state & SRB_MSGIN)) {
+ srb->state &= ~SRB_DISCONNECT;
+ srb->state |= SRB_MSGIN;
+ }
+ DC395x_write16(acb, TRM_S1040_SCSI_CONTROL, DO_DATALATCH); /* it's important for atn stop */
+ /* SCSI command */
+ DC395x_write8(acb, TRM_S1040_SCSI_COMMAND, SCMD_FIFO_IN);
+}
+
+
+static void nop0(struct AdapterCtlBlk *acb, struct ScsiReqBlk *srb,
+ u16 *pscsi_status)
+{
+}
+
+
+static void nop1(struct AdapterCtlBlk *acb, struct ScsiReqBlk *srb,
+ u16 *pscsi_status)
+{
+}
+
+
+static void set_xfer_rate(struct AdapterCtlBlk *acb, struct DeviceCtlBlk *dcb)
+{
+ struct DeviceCtlBlk *i;
+
+ /* set all lun device's period, offset */
+ if (dcb->identify_msg & 0x07)
+ return;
+
+ if (acb->scan_devices) {
+ current_sync_offset = dcb->sync_offset;
+ return;
+ }
+
+ list_for_each_entry(i, &acb->dcb_list, list)
+ if (i->target_id == dcb->target_id) {
+ i->sync_period = dcb->sync_period;
+ i->sync_offset = dcb->sync_offset;
+ i->sync_mode = dcb->sync_mode;
+ i->min_nego_period = dcb->min_nego_period;
+ }
+}
+
+
+static void disconnect(struct AdapterCtlBlk *acb)
+{
+ struct DeviceCtlBlk *dcb = acb->active_dcb;
+ struct ScsiReqBlk *srb;
+
+ if (!dcb) {
+ dprintkl(KERN_ERR, "disconnect: No such device\n");
+ udelay(500);
+ /* Suspend queue for a while */
+ acb->last_reset =
+ jiffies + HZ / 2 +
+ HZ * acb->eeprom.delay_time;
+ clear_fifo(acb, "disconnectEx");
+ DC395x_write16(acb, TRM_S1040_SCSI_CONTROL, DO_HWRESELECT);
+ return;
+ }
+ srb = dcb->active_srb;
+ acb->active_dcb = NULL;
+ dprintkdbg(DBG_0, "disconnect: (0x%p)\n", srb->cmd);
+
+ srb->scsi_phase = PH_BUS_FREE; /* initial phase */
+ clear_fifo(acb, "disconnect");
+ DC395x_write16(acb, TRM_S1040_SCSI_CONTROL, DO_HWRESELECT);
+ if (srb->state & SRB_UNEXPECT_RESEL) {
+ dprintkl(KERN_ERR,
+ "disconnect: Unexpected reselection <%02i-%i>\n",
+ dcb->target_id, dcb->target_lun);
+ srb->state = 0;
+ waiting_process_next(acb);
+ } else if (srb->state & SRB_ABORT_SENT) {
+ dcb->flag &= ~ABORT_DEV_;
+ acb->last_reset = jiffies + HZ / 2 + 1;
+ dprintkl(KERN_ERR, "disconnect: SRB_ABORT_SENT\n");
+ doing_srb_done(acb, DID_ABORT, srb->cmd, 1);
+ waiting_process_next(acb);
+ } else {
+ if ((srb->state & (SRB_START_ + SRB_MSGOUT))
+ || !(srb->
+ state & (SRB_DISCONNECT + SRB_COMPLETED))) {
+ /*
+ * Selection time out
+ * SRB_START_ || SRB_MSGOUT || (!SRB_DISCONNECT && !SRB_COMPLETED)
+ */
+ /* Unexp. Disc / Sel Timeout */
+ if (srb->state != SRB_START_
+ && srb->state != SRB_MSGOUT) {
+ srb->state = SRB_READY;
+ dprintkl(KERN_DEBUG,
+ "disconnect: (0x%p) Unexpected\n",
+ srb->cmd);
+ srb->target_status = SCSI_STAT_SEL_TIMEOUT;
+ goto disc1;
+ } else {
+ /* Normal selection timeout */
+ dprintkdbg(DBG_KG, "disconnect: (0x%p) "
+ "<%02i-%i> SelTO\n", srb->cmd,
+ dcb->target_id, dcb->target_lun);
+ if (srb->retry_count++ > DC395x_MAX_RETRIES
+ || acb->scan_devices) {
+ srb->target_status =
+ SCSI_STAT_SEL_TIMEOUT;
+ goto disc1;
+ }
+ free_tag(dcb, srb);
+ srb_going_to_waiting_move(dcb, srb);
+ dprintkdbg(DBG_KG,
+ "disconnect: (0x%p) Retry\n",
+ srb->cmd);
+ waiting_set_timer(acb, HZ / 20);
+ }
+ } else if (srb->state & SRB_DISCONNECT) {
+ u8 bval = DC395x_read8(acb, TRM_S1040_SCSI_SIGNAL);
+ /*
+ * SRB_DISCONNECT (This is what we expect!)
+ */
+ if (bval & 0x40) {
+ dprintkdbg(DBG_0, "disconnect: SCSI bus stat "
+ " 0x%02x: ACK set! Other controllers?\n",
+ bval);
+ /* It could come from another initiator, therefore don't do much ! */
+ } else
+ waiting_process_next(acb);
+ } else if (srb->state & SRB_COMPLETED) {
+ disc1:
+ /*
+ ** SRB_COMPLETED
+ */
+ free_tag(dcb, srb);
+ dcb->active_srb = NULL;
+ srb->state = SRB_FREE;
+ srb_done(acb, dcb, srb);
+ }
+ }
+}
+
+
+static void reselect(struct AdapterCtlBlk *acb)
+{
+ struct DeviceCtlBlk *dcb = acb->active_dcb;
+ struct ScsiReqBlk *srb = NULL;
+ u16 rsel_tar_lun_id;
+ u8 id, lun;
+ u8 arblostflag = 0;
+ dprintkdbg(DBG_0, "reselect: acb=%p\n", acb);
+
+ clear_fifo(acb, "reselect");
+ /*DC395x_write16(acb, TRM_S1040_SCSI_CONTROL, DO_HWRESELECT | DO_DATALATCH); */
+ /* Read Reselected Target ID and LUN */
+ rsel_tar_lun_id = DC395x_read16(acb, TRM_S1040_SCSI_TARGETID);
+ if (dcb) { /* Arbitration lost but Reselection win */
+ srb = dcb->active_srb;
+ if (!srb) {
+ dprintkl(KERN_DEBUG, "reselect: Arb lost Resel won, "
+ "but active_srb == NULL\n");
+ DC395x_write16(acb, TRM_S1040_SCSI_CONTROL, DO_DATALATCH); /* it's important for atn stop */
+ return;
+ }
+ /* Why the if ? */
+ if (!acb->scan_devices) {
+ dprintkdbg(DBG_KG, "reselect: (0x%p) <%02i-%i> "
+ "Arb lost but Resel win rsel=%i stat=0x%04x\n",
+ srb->cmd, dcb->target_id,
+ dcb->target_lun, rsel_tar_lun_id,
+ DC395x_read16(acb, TRM_S1040_SCSI_STATUS));
+ arblostflag = 1;
+ /*srb->state |= SRB_DISCONNECT; */
+
+ srb->state = SRB_READY;
+ free_tag(dcb, srb);
+ srb_going_to_waiting_move(dcb, srb);
+ waiting_set_timer(acb, HZ / 20);
+
+ /* return; */
+ }
+ }
+ /* Read Reselected Target Id and LUN */
+ if (!(rsel_tar_lun_id & (IDENTIFY_BASE << 8)))
+ dprintkl(KERN_DEBUG, "reselect: Expects identify msg. "
+ "Got %i!\n", rsel_tar_lun_id);
+ id = rsel_tar_lun_id & 0xff;
+ lun = (rsel_tar_lun_id >> 8) & 7;
+ dcb = find_dcb(acb, id, lun);
+ if (!dcb) {
+ dprintkl(KERN_ERR, "reselect: From non existent device "
+ "<%02i-%i>\n", id, lun);
+ DC395x_write16(acb, TRM_S1040_SCSI_CONTROL, DO_DATALATCH); /* it's important for atn stop */
+ return;
+ }
+ acb->active_dcb = dcb;
+
+ if (!(dcb->dev_mode & NTC_DO_DISCONNECT))
+ dprintkl(KERN_DEBUG, "reselect: in spite of forbidden "
+ "disconnection? <%02i-%i>\n",
+ dcb->target_id, dcb->target_lun);
+
+ if (dcb->sync_mode & EN_TAG_QUEUEING /*&& !arblostflag */) {
+ srb = acb->tmp_srb;
+ dcb->active_srb = srb;
+ } else {
+ /* There can be only one! */
+ srb = dcb->active_srb;
+ if (!srb || !(srb->state & SRB_DISCONNECT)) {
+ /*
+ * abort command
+ */
+ dprintkl(KERN_DEBUG,
+ "reselect: w/o disconnected cmds <%02i-%i>\n",
+ dcb->target_id, dcb->target_lun);
+ srb = acb->tmp_srb;
+ srb->state = SRB_UNEXPECT_RESEL;
+ dcb->active_srb = srb;
+ enable_msgout_abort(acb, srb);
+ } else {
+ if (dcb->flag & ABORT_DEV_) {
+ /*srb->state = SRB_ABORT_SENT; */
+ enable_msgout_abort(acb, srb);
+ } else
+ srb->state = SRB_DATA_XFER;
+
+ }
+ }
+ srb->scsi_phase = PH_BUS_FREE; /* initial phase */
+
+ /* Program HA ID, target ID, period and offset */
+ dprintkdbg(DBG_0, "reselect: select <%i>\n", dcb->target_id);
+ DC395x_write8(acb, TRM_S1040_SCSI_HOSTID, acb->scsi_host->this_id); /* host ID */
+ DC395x_write8(acb, TRM_S1040_SCSI_TARGETID, dcb->target_id); /* target ID */
+ DC395x_write8(acb, TRM_S1040_SCSI_OFFSET, dcb->sync_offset); /* offset */
+ DC395x_write8(acb, TRM_S1040_SCSI_SYNC, dcb->sync_period); /* sync period, wide */
+ DC395x_write16(acb, TRM_S1040_SCSI_CONTROL, DO_DATALATCH); /* it's important for atn stop */
+ /* SCSI command */
+ DC395x_write8(acb, TRM_S1040_SCSI_COMMAND, SCMD_MSGACCEPT);
+}
+
+
+static inline u8 tagq_blacklist(char *name)
+{
+#ifndef DC395x_NO_TAGQ
+#if 0
+ u8 i;
+ for (i = 0; i < BADDEVCNT; i++)
+ if (memcmp(name, DC395x_baddevname1[i], 28) == 0)
+ return 1;
+#endif
+ return 0;
+#else
+ return 1;
+#endif
+}
+
+
+static void disc_tagq_set(struct DeviceCtlBlk *dcb, struct ScsiInqData *ptr)
+{
+ /* Check for SCSI format (ANSI and Response data format) */
+ if ((ptr->Vers & 0x07) >= 2 || (ptr->RDF & 0x0F) == 2) {
+ if ((ptr->Flags & SCSI_INQ_CMDQUEUE)
+ && (dcb->dev_mode & NTC_DO_TAG_QUEUEING) &&
+ /*(dcb->dev_mode & NTC_DO_DISCONNECT) */
+ /* ((dcb->dev_type == TYPE_DISK)
+ || (dcb->dev_type == TYPE_MOD)) && */
+ !tagq_blacklist(((char *)ptr) + 8)) {
+ if (dcb->max_command == 1)
+ dcb->max_command =
+ dcb->acb->tag_max_num;
+ dcb->sync_mode |= EN_TAG_QUEUEING;
+ /*dcb->tag_mask = 0; */
+ } else
+ dcb->max_command = 1;
+ }
+}
+
+
+static void add_dev(struct AdapterCtlBlk *acb, struct DeviceCtlBlk *dcb,
+ struct ScsiInqData *ptr)
+{
+ u8 bval1 = ptr->DevType & SCSI_DEVTYPE;
+ dcb->dev_type = bval1;
+ /* if (bval1 == TYPE_DISK || bval1 == TYPE_MOD) */
+ disc_tagq_set(dcb, ptr);
+}
+
+
+/* unmap mapped pci regions from SRB */
+static void pci_unmap_srb(struct AdapterCtlBlk *acb, struct ScsiReqBlk *srb)
+{
+ struct scsi_cmnd *cmd = srb->cmd;
+ enum dma_data_direction dir = cmd->sc_data_direction;
+
+ if (scsi_sg_count(cmd) && dir != PCI_DMA_NONE) {
+ /* unmap DC395x SG list */
+ dprintkdbg(DBG_SG, "pci_unmap_srb: list=%08x(%05x)\n",
+ srb->sg_bus_addr, SEGMENTX_LEN);
+ pci_unmap_single(acb->dev, srb->sg_bus_addr,
+ SEGMENTX_LEN,
+ PCI_DMA_TODEVICE);
+ dprintkdbg(DBG_SG, "pci_unmap_srb: segs=%i buffer=%p\n",
+ scsi_sg_count(cmd), scsi_bufflen(cmd));
+ /* unmap the sg segments */
+ scsi_dma_unmap(cmd);
+ }
+}
+
+
+/* unmap mapped pci sense buffer from SRB */
+static void pci_unmap_srb_sense(struct AdapterCtlBlk *acb,
+ struct ScsiReqBlk *srb)
+{
+ if (!(srb->flag & AUTO_REQSENSE))
+ return;
+ /* Unmap sense buffer */
+ dprintkdbg(DBG_SG, "pci_unmap_srb_sense: buffer=%08x\n",
+ srb->segment_x[0].address);
+ pci_unmap_single(acb->dev, srb->segment_x[0].address,
+ srb->segment_x[0].length, PCI_DMA_FROMDEVICE);
+ /* Restore SG stuff */
+ srb->total_xfer_length = srb->xferred;
+ srb->segment_x[0].address =
+ srb->segment_x[DC395x_MAX_SG_LISTENTRY - 1].address;
+ srb->segment_x[0].length =
+ srb->segment_x[DC395x_MAX_SG_LISTENTRY - 1].length;
+}
+
+
+/*
+ * Complete execution of a SCSI command
+ * Signal completion to the generic SCSI driver
+ */
+static void srb_done(struct AdapterCtlBlk *acb, struct DeviceCtlBlk *dcb,
+ struct ScsiReqBlk *srb)
+{
+ u8 tempcnt, status;
+ struct scsi_cmnd *cmd = srb->cmd;
+ enum dma_data_direction dir = cmd->sc_data_direction;
+ int ckc_only = 1;
+
+ dprintkdbg(DBG_1, "srb_done: (0x%p) <%02i-%i>\n", srb->cmd,
+ srb->cmd->device->id, (u8)srb->cmd->device->lun);
+ dprintkdbg(DBG_SG, "srb_done: srb=%p sg=%i(%i/%i) buf=%p\n",
+ srb, scsi_sg_count(cmd), srb->sg_index, srb->sg_count,
+ scsi_sgtalbe(cmd));
+ status = srb->target_status;
+ if (srb->flag & AUTO_REQSENSE) {
+ dprintkdbg(DBG_0, "srb_done: AUTO_REQSENSE1\n");
+ pci_unmap_srb_sense(acb, srb);
+ /*
+ ** target status..........................
+ */
+ srb->flag &= ~AUTO_REQSENSE;
+ srb->adapter_status = 0;
+ srb->target_status = CHECK_CONDITION << 1;
+ if (debug_enabled(DBG_1)) {
+ switch (cmd->sense_buffer[2] & 0x0f) {
+ case NOT_READY:
+ dprintkl(KERN_DEBUG,
+ "ReqSense: NOT_READY cmnd=0x%02x <%02i-%i> stat=%i scan=%i ",
+ cmd->cmnd[0], dcb->target_id,
+ dcb->target_lun, status, acb->scan_devices);
+ break;
+ case UNIT_ATTENTION:
+ dprintkl(KERN_DEBUG,
+ "ReqSense: UNIT_ATTENTION cmnd=0x%02x <%02i-%i> stat=%i scan=%i ",
+ cmd->cmnd[0], dcb->target_id,
+ dcb->target_lun, status, acb->scan_devices);
+ break;
+ case ILLEGAL_REQUEST:
+ dprintkl(KERN_DEBUG,
+ "ReqSense: ILLEGAL_REQUEST cmnd=0x%02x <%02i-%i> stat=%i scan=%i ",
+ cmd->cmnd[0], dcb->target_id,
+ dcb->target_lun, status, acb->scan_devices);
+ break;
+ case MEDIUM_ERROR:
+ dprintkl(KERN_DEBUG,
+ "ReqSense: MEDIUM_ERROR cmnd=0x%02x <%02i-%i> stat=%i scan=%i ",
+ cmd->cmnd[0], dcb->target_id,
+ dcb->target_lun, status, acb->scan_devices);
+ break;
+ case HARDWARE_ERROR:
+ dprintkl(KERN_DEBUG,
+ "ReqSense: HARDWARE_ERROR cmnd=0x%02x <%02i-%i> stat=%i scan=%i ",
+ cmd->cmnd[0], dcb->target_id,
+ dcb->target_lun, status, acb->scan_devices);
+ break;
+ }
+ if (cmd->sense_buffer[7] >= 6)
+ printk("sense=0x%02x ASC=0x%02x ASCQ=0x%02x "
+ "(0x%08x 0x%08x)\n",
+ cmd->sense_buffer[2], cmd->sense_buffer[12],
+ cmd->sense_buffer[13],
+ *((unsigned int *)(cmd->sense_buffer + 3)),
+ *((unsigned int *)(cmd->sense_buffer + 8)));
+ else
+ printk("sense=0x%02x No ASC/ASCQ (0x%08x)\n",
+ cmd->sense_buffer[2],
+ *((unsigned int *)(cmd->sense_buffer + 3)));
+ }
+
+ if (status == (CHECK_CONDITION << 1)) {
+ cmd->result = DID_BAD_TARGET << 16;
+ goto ckc_e;
+ }
+ dprintkdbg(DBG_0, "srb_done: AUTO_REQSENSE2\n");
+
+ if (srb->total_xfer_length
+ && srb->total_xfer_length >= cmd->underflow)
+ cmd->result =
+ MK_RES_LNX(DRIVER_SENSE, DID_OK,
+ srb->end_message, CHECK_CONDITION);
+ /*SET_RES_DID(cmd->result,DID_OK) */
+ else
+ cmd->result =
+ MK_RES_LNX(DRIVER_SENSE, DID_OK,
+ srb->end_message, CHECK_CONDITION);
+
+ goto ckc_e;
+ }
+
+/*************************************************************/
+ if (status) {
+ /*
+ * target status..........................
+ */
+ if (status_byte(status) == CHECK_CONDITION) {
+ request_sense(acb, dcb, srb);
+ return;
+ } else if (status_byte(status) == QUEUE_FULL) {
+ tempcnt = (u8)list_size(&dcb->srb_going_list);
+ dprintkl(KERN_INFO, "QUEUE_FULL for dev <%02i-%i> with %i cmnds\n",
+ dcb->target_id, dcb->target_lun, tempcnt);
+ if (tempcnt > 1)
+ tempcnt--;
+ dcb->max_command = tempcnt;
+ free_tag(dcb, srb);
+ srb_going_to_waiting_move(dcb, srb);
+ waiting_set_timer(acb, HZ / 20);
+ srb->adapter_status = 0;
+ srb->target_status = 0;
+ return;
+ } else if (status == SCSI_STAT_SEL_TIMEOUT) {
+ srb->adapter_status = H_SEL_TIMEOUT;
+ srb->target_status = 0;
+ cmd->result = DID_NO_CONNECT << 16;
+ } else {
+ srb->adapter_status = 0;
+ SET_RES_DID(cmd->result, DID_ERROR);
+ SET_RES_MSG(cmd->result, srb->end_message);
+ SET_RES_TARGET(cmd->result, status);
+
+ }
+ } else {
+ /*
+ ** process initiator status..........................
+ */
+ status = srb->adapter_status;
+ if (status & H_OVER_UNDER_RUN) {
+ srb->target_status = 0;
+ SET_RES_DID(cmd->result, DID_OK);
+ SET_RES_MSG(cmd->result, srb->end_message);
+ } else if (srb->status & PARITY_ERROR) {
+ SET_RES_DID(cmd->result, DID_PARITY);
+ SET_RES_MSG(cmd->result, srb->end_message);
+ } else { /* No error */
+
+ srb->adapter_status = 0;
+ srb->target_status = 0;
+ SET_RES_DID(cmd->result, DID_OK);
+ }
+ }
+
+ if (dir != PCI_DMA_NONE && scsi_sg_count(cmd))
+ pci_dma_sync_sg_for_cpu(acb->dev, scsi_sglist(cmd),
+ scsi_sg_count(cmd), dir);
+
+ ckc_only = 0;
+/* Check Error Conditions */
+ ckc_e:
+
+ if (cmd->cmnd[0] == INQUIRY) {
+ unsigned char *base = NULL;
+ struct ScsiInqData *ptr;
+ unsigned long flags = 0;
+ struct scatterlist* sg = scsi_sglist(cmd);
+ size_t offset = 0, len = sizeof(struct ScsiInqData);
+
+ local_irq_save(flags);
+ base = scsi_kmap_atomic_sg(sg, scsi_sg_count(cmd), &offset, &len);
+ ptr = (struct ScsiInqData *)(base + offset);
+
+ if (!ckc_only && (cmd->result & RES_DID) == 0
+ && cmd->cmnd[2] == 0 && scsi_bufflen(cmd) >= 8
+ && dir != PCI_DMA_NONE && ptr && (ptr->Vers & 0x07) >= 2)
+ dcb->inquiry7 = ptr->Flags;
+
+ /*if( srb->cmd->cmnd[0] == INQUIRY && */
+ /* (host_byte(cmd->result) == DID_OK || status_byte(cmd->result) & CHECK_CONDITION) ) */
+ if ((cmd->result == (DID_OK << 16)
+ || status_byte(cmd->result) &
+ CHECK_CONDITION)) {
+ if (!dcb->init_tcq_flag) {
+ add_dev(acb, dcb, ptr);
+ dcb->init_tcq_flag = 1;
+ }
+ }
+
+ scsi_kunmap_atomic_sg(base);
+ local_irq_restore(flags);
+ }
+
+ /* Here is the info for Doug Gilbert's sg3 ... */
+ scsi_set_resid(cmd, srb->total_xfer_length);
+ /* This may be interpreted by sb. or not ... */
+ cmd->SCp.this_residual = srb->total_xfer_length;
+ cmd->SCp.buffers_residual = 0;
+ if (debug_enabled(DBG_KG)) {
+ if (srb->total_xfer_length)
+ dprintkdbg(DBG_KG, "srb_done: (0x%p) <%02i-%i> "
+ "cmnd=0x%02x Missed %i bytes\n",
+ cmd, cmd->device->id, (u8)cmd->device->lun,
+ cmd->cmnd[0], srb->total_xfer_length);
+ }
+
+ srb_going_remove(dcb, srb);
+ /* Add to free list */
+ if (srb == acb->tmp_srb)
+ dprintkl(KERN_ERR, "srb_done: ERROR! Completed cmd with tmp_srb\n");
+ else {
+ dprintkdbg(DBG_0, "srb_done: (0x%p) done result=0x%08x\n",
+ cmd, cmd->result);
+ srb_free_insert(acb, srb);
+ }
+ pci_unmap_srb(acb, srb);
+
+ cmd->scsi_done(cmd);
+ waiting_process_next(acb);
+}
+
+
+/* abort all cmds in our queues */
+static void doing_srb_done(struct AdapterCtlBlk *acb, u8 did_flag,
+ struct scsi_cmnd *cmd, u8 force)
+{
+ struct DeviceCtlBlk *dcb;
+ dprintkl(KERN_INFO, "doing_srb_done: pids ");
+
+ list_for_each_entry(dcb, &acb->dcb_list, list) {
+ struct ScsiReqBlk *srb;
+ struct ScsiReqBlk *tmp;
+ struct scsi_cmnd *p;
+
+ list_for_each_entry_safe(srb, tmp, &dcb->srb_going_list, list) {
+ enum dma_data_direction dir;
+ int result;
+
+ p = srb->cmd;
+ dir = p->sc_data_direction;
+ result = MK_RES(0, did_flag, 0, 0);
+ printk("G:%p(%02i-%i) ", p,
+ p->device->id, (u8)p->device->lun);
+ srb_going_remove(dcb, srb);
+ free_tag(dcb, srb);
+ srb_free_insert(acb, srb);
+ p->result = result;
+ pci_unmap_srb_sense(acb, srb);
+ pci_unmap_srb(acb, srb);
+ if (force) {
+ /* For new EH, we normally don't need to give commands back,
+ * as they all complete or all time out */
+ p->scsi_done(p);
+ }
+ }
+ if (!list_empty(&dcb->srb_going_list))
+ dprintkl(KERN_DEBUG,
+ "How could the ML send cmnds to the Going queue? <%02i-%i>\n",
+ dcb->target_id, dcb->target_lun);
+ if (dcb->tag_mask)
+ dprintkl(KERN_DEBUG,
+ "tag_mask for <%02i-%i> should be empty, is %08x!\n",
+ dcb->target_id, dcb->target_lun,
+ dcb->tag_mask);
+
+ /* Waiting queue */
+ list_for_each_entry_safe(srb, tmp, &dcb->srb_waiting_list, list) {
+ int result;
+ p = srb->cmd;
+
+ result = MK_RES(0, did_flag, 0, 0);
+ printk("W:%p<%02i-%i>", p, p->device->id,
+ (u8)p->device->lun);
+ srb_waiting_remove(dcb, srb);
+ srb_free_insert(acb, srb);
+ p->result = result;
+ pci_unmap_srb_sense(acb, srb);
+ pci_unmap_srb(acb, srb);
+ if (force) {
+ /* For new EH, we normally don't need to give commands back,
+ * as they all complete or all time out */
+ cmd->scsi_done(cmd);
+ }
+ }
+ if (!list_empty(&dcb->srb_waiting_list))
+ dprintkl(KERN_DEBUG, "ML queued %i cmnds again to <%02i-%i>\n",
+ list_size(&dcb->srb_waiting_list), dcb->target_id,
+ dcb->target_lun);
+ dcb->flag &= ~ABORT_DEV_;
+ }
+ printk("\n");
+}
+
+
+static void reset_scsi_bus(struct AdapterCtlBlk *acb)
+{
+ dprintkdbg(DBG_0, "reset_scsi_bus: acb=%p\n", acb);
+ acb->acb_flag |= RESET_DEV; /* RESET_DETECT, RESET_DONE, RESET_DEV */
+ DC395x_write16(acb, TRM_S1040_SCSI_CONTROL, DO_RSTSCSI);
+
+ while (!(DC395x_read8(acb, TRM_S1040_SCSI_INTSTATUS) & INT_SCSIRESET))
+ /* nothing */;
+}
+
+
+static void set_basic_config(struct AdapterCtlBlk *acb)
+{
+ u8 bval;
+ u16 wval;
+ DC395x_write8(acb, TRM_S1040_SCSI_TIMEOUT, acb->sel_timeout);
+ if (acb->config & HCC_PARITY)
+ bval = PHASELATCH | INITIATOR | BLOCKRST | PARITYCHECK;
+ else
+ bval = PHASELATCH | INITIATOR | BLOCKRST;
+
+ DC395x_write8(acb, TRM_S1040_SCSI_CONFIG0, bval);
+
+ /* program configuration 1: Act_Neg (+ Act_Neg_Enh? + Fast_Filter? + DataDis?) */
+ DC395x_write8(acb, TRM_S1040_SCSI_CONFIG1, 0x03); /* was 0x13: default */
+ /* program Host ID */
+ DC395x_write8(acb, TRM_S1040_SCSI_HOSTID, acb->scsi_host->this_id);
+ /* set ansynchronous transfer */
+ DC395x_write8(acb, TRM_S1040_SCSI_OFFSET, 0x00);
+ /* Turn LED control off */
+ wval = DC395x_read16(acb, TRM_S1040_GEN_CONTROL) & 0x7F;
+ DC395x_write16(acb, TRM_S1040_GEN_CONTROL, wval);
+ /* DMA config */
+ wval = DC395x_read16(acb, TRM_S1040_DMA_CONFIG) & ~DMA_FIFO_CTRL;
+ wval |=
+ DMA_FIFO_HALF_HALF | DMA_ENHANCE /*| DMA_MEM_MULTI_READ */ ;
+ DC395x_write16(acb, TRM_S1040_DMA_CONFIG, wval);
+ /* Clear pending interrupt status */
+ DC395x_read8(acb, TRM_S1040_SCSI_INTSTATUS);
+ /* Enable SCSI interrupt */
+ DC395x_write8(acb, TRM_S1040_SCSI_INTEN, 0x7F);
+ DC395x_write8(acb, TRM_S1040_DMA_INTEN, EN_SCSIINTR | EN_DMAXFERERROR
+ /*| EN_DMAXFERABORT | EN_DMAXFERCOMP | EN_FORCEDMACOMP */
+ );
+}
+
+
+static void scsi_reset_detect(struct AdapterCtlBlk *acb)
+{
+ dprintkl(KERN_INFO, "scsi_reset_detect: acb=%p\n", acb);
+ /* delay half a second */
+ if (timer_pending(&acb->waiting_timer))
+ del_timer(&acb->waiting_timer);
+
+ DC395x_write8(acb, TRM_S1040_SCSI_CONTROL, DO_RSTMODULE);
+ DC395x_write8(acb, TRM_S1040_DMA_CONTROL, DMARESETMODULE);
+ /*DC395x_write8(acb, TRM_S1040_DMA_CONTROL,STOPDMAXFER); */
+ udelay(500);
+ /* Maybe we locked up the bus? Then lets wait even longer ... */
+ acb->last_reset =
+ jiffies + 5 * HZ / 2 +
+ HZ * acb->eeprom.delay_time;
+
+ clear_fifo(acb, "scsi_reset_detect");
+ set_basic_config(acb);
+ /*1.25 */
+ /*DC395x_write16(acb, TRM_S1040_SCSI_CONTROL, DO_HWRESELECT); */
+
+ if (acb->acb_flag & RESET_DEV) { /* RESET_DETECT, RESET_DONE, RESET_DEV */
+ acb->acb_flag |= RESET_DONE;
+ } else {
+ acb->acb_flag |= RESET_DETECT;
+ reset_dev_param(acb);
+ doing_srb_done(acb, DID_RESET, NULL, 1);
+ /*DC395x_RecoverSRB( acb ); */
+ acb->active_dcb = NULL;
+ acb->acb_flag = 0;
+ waiting_process_next(acb);
+ }
+}
+
+
+static void request_sense(struct AdapterCtlBlk *acb, struct DeviceCtlBlk *dcb,
+ struct ScsiReqBlk *srb)
+{
+ struct scsi_cmnd *cmd = srb->cmd;
+ dprintkdbg(DBG_1, "request_sense: (0x%p) <%02i-%i>\n",
+ cmd, cmd->device->id, (u8)cmd->device->lun);
+
+ srb->flag |= AUTO_REQSENSE;
+ srb->adapter_status = 0;
+ srb->target_status = 0;
+
+ /* KG: Can this prevent crap sense data ? */
+ memset(cmd->sense_buffer, 0, SCSI_SENSE_BUFFERSIZE);
+
+ /* Save some data */
+ srb->segment_x[DC395x_MAX_SG_LISTENTRY - 1].address =
+ srb->segment_x[0].address;
+ srb->segment_x[DC395x_MAX_SG_LISTENTRY - 1].length =
+ srb->segment_x[0].length;
+ srb->xferred = srb->total_xfer_length;
+ /* srb->segment_x : a one entry of S/G list table */
+ srb->total_xfer_length = SCSI_SENSE_BUFFERSIZE;
+ srb->segment_x[0].length = SCSI_SENSE_BUFFERSIZE;
+ /* Map sense buffer */
+ srb->segment_x[0].address =
+ pci_map_single(acb->dev, cmd->sense_buffer,
+ SCSI_SENSE_BUFFERSIZE, PCI_DMA_FROMDEVICE);
+ dprintkdbg(DBG_SG, "request_sense: map buffer %p->%08x(%05x)\n",
+ cmd->sense_buffer, srb->segment_x[0].address,
+ SCSI_SENSE_BUFFERSIZE);
+ srb->sg_count = 1;
+ srb->sg_index = 0;
+
+ if (start_scsi(acb, dcb, srb)) { /* Should only happen, if sb. else grabs the bus */
+ dprintkl(KERN_DEBUG,
+ "request_sense: (0x%p) failed <%02i-%i>\n",
+ srb->cmd, dcb->target_id, dcb->target_lun);
+ srb_going_to_waiting_move(dcb, srb);
+ waiting_set_timer(acb, HZ / 100);
+ }
+}
+
+
+/**
+ * device_alloc - Allocate a new device instance. This create the
+ * devices instance and sets up all the data items. The adapter
+ * instance is required to obtain confiuration information for this
+ * device. This does *not* add this device to the adapters device
+ * list.
+ *
+ * @acb: The adapter to obtain configuration information from.
+ * @target: The target for the new device.
+ * @lun: The lun for the new device.
+ *
+ * Return the new device if successful or NULL on failure.
+ **/
+static struct DeviceCtlBlk *device_alloc(struct AdapterCtlBlk *acb,
+ u8 target, u8 lun)
+{
+ struct NvRamType *eeprom = &acb->eeprom;
+ u8 period_index = eeprom->target[target].period & 0x07;
+ struct DeviceCtlBlk *dcb;
+
+ dcb = kmalloc(sizeof(struct DeviceCtlBlk), GFP_ATOMIC);
+ dprintkdbg(DBG_0, "device_alloc: <%02i-%i>\n", target, lun);
+ if (!dcb)
+ return NULL;
+ dcb->acb = NULL;
+ INIT_LIST_HEAD(&dcb->srb_going_list);
+ INIT_LIST_HEAD(&dcb->srb_waiting_list);
+ dcb->active_srb = NULL;
+ dcb->tag_mask = 0;
+ dcb->max_command = 1;
+ dcb->target_id = target;
+ dcb->target_lun = lun;
+ dcb->dev_mode = eeprom->target[target].cfg0;
+#ifndef DC395x_NO_DISCONNECT
+ dcb->identify_msg =
+ IDENTIFY(dcb->dev_mode & NTC_DO_DISCONNECT, lun);
+#else
+ dcb->identify_msg = IDENTIFY(0, lun);
+#endif
+ dcb->inquiry7 = 0;
+ dcb->sync_mode = 0;
+ dcb->min_nego_period = clock_period[period_index];
+ dcb->sync_period = 0;
+ dcb->sync_offset = 0;
+ dcb->flag = 0;
+
+#ifndef DC395x_NO_WIDE
+ if ((dcb->dev_mode & NTC_DO_WIDE_NEGO)
+ && (acb->config & HCC_WIDE_CARD))
+ dcb->sync_mode |= WIDE_NEGO_ENABLE;
+#endif
+#ifndef DC395x_NO_SYNC
+ if (dcb->dev_mode & NTC_DO_SYNC_NEGO)
+ if (!(lun) || current_sync_offset)
+ dcb->sync_mode |= SYNC_NEGO_ENABLE;
+#endif
+ if (dcb->target_lun != 0) {
+ /* Copy settings */
+ struct DeviceCtlBlk *p;
+ list_for_each_entry(p, &acb->dcb_list, list)
+ if (p->target_id == dcb->target_id)
+ break;
+ dprintkdbg(DBG_1,
+ "device_alloc: <%02i-%i> copy from <%02i-%i>\n",
+ dcb->target_id, dcb->target_lun,
+ p->target_id, p->target_lun);
+ dcb->sync_mode = p->sync_mode;
+ dcb->sync_period = p->sync_period;
+ dcb->min_nego_period = p->min_nego_period;
+ dcb->sync_offset = p->sync_offset;
+ dcb->inquiry7 = p->inquiry7;
+ }
+ return dcb;
+}
+
+
+/**
+ * adapter_add_device - Adds the device instance to the adaptor instance.
+ *
+ * @acb: The adapter device to be updated
+ * @dcb: A newly created and initialised device instance to add.
+ **/
+static void adapter_add_device(struct AdapterCtlBlk *acb,
+ struct DeviceCtlBlk *dcb)
+{
+ /* backpointer to adapter */
+ dcb->acb = acb;
+
+ /* set run_robin to this device if it is currently empty */
+ if (list_empty(&acb->dcb_list))
+ acb->dcb_run_robin = dcb;
+
+ /* add device to list */
+ list_add_tail(&dcb->list, &acb->dcb_list);
+
+ /* update device maps */
+ acb->dcb_map[dcb->target_id] |= (1 << dcb->target_lun);
+ acb->children[dcb->target_id][dcb->target_lun] = dcb;
+}
+
+
+/**
+ * adapter_remove_device - Removes the device instance from the adaptor
+ * instance. The device instance is not check in any way or freed by this.
+ * The caller is expected to take care of that. This will simply remove the
+ * device from the adapters data strcutures.
+ *
+ * @acb: The adapter device to be updated
+ * @dcb: A device that has previously been added to the adapter.
+ **/
+static void adapter_remove_device(struct AdapterCtlBlk *acb,
+ struct DeviceCtlBlk *dcb)
+{
+ struct DeviceCtlBlk *i;
+ struct DeviceCtlBlk *tmp;
+ dprintkdbg(DBG_0, "adapter_remove_device: <%02i-%i>\n",
+ dcb->target_id, dcb->target_lun);
+
+ /* fix up any pointers to this device that we have in the adapter */
+ if (acb->active_dcb == dcb)
+ acb->active_dcb = NULL;
+ if (acb->dcb_run_robin == dcb)
+ acb->dcb_run_robin = dcb_get_next(&acb->dcb_list, dcb);
+
+ /* unlink from list */
+ list_for_each_entry_safe(i, tmp, &acb->dcb_list, list)
+ if (dcb == i) {
+ list_del(&i->list);
+ break;
+ }
+
+ /* clear map and children */
+ acb->dcb_map[dcb->target_id] &= ~(1 << dcb->target_lun);
+ acb->children[dcb->target_id][dcb->target_lun] = NULL;
+ dcb->acb = NULL;
+}
+
+
+/**
+ * adapter_remove_and_free_device - Removes a single device from the adapter
+ * and then frees the device information.
+ *
+ * @acb: The adapter device to be updated
+ * @dcb: A device that has previously been added to the adapter.
+ */
+static void adapter_remove_and_free_device(struct AdapterCtlBlk *acb,
+ struct DeviceCtlBlk *dcb)
+{
+ if (list_size(&dcb->srb_going_list) > 1) {
+ dprintkdbg(DBG_1, "adapter_remove_and_free_device: <%02i-%i> "
+ "Won't remove because of %i active requests.\n",
+ dcb->target_id, dcb->target_lun,
+ list_size(&dcb->srb_going_list));
+ return;
+ }
+ adapter_remove_device(acb, dcb);
+ kfree(dcb);
+}
+
+
+/**
+ * adapter_remove_and_free_all_devices - Removes and frees all of the
+ * devices associated with the specified adapter.
+ *
+ * @acb: The adapter from which all devices should be removed.
+ **/
+static void adapter_remove_and_free_all_devices(struct AdapterCtlBlk* acb)
+{
+ struct DeviceCtlBlk *dcb;
+ struct DeviceCtlBlk *tmp;
+ dprintkdbg(DBG_1, "adapter_remove_and_free_all_devices: num=%i\n",
+ list_size(&acb->dcb_list));
+
+ list_for_each_entry_safe(dcb, tmp, &acb->dcb_list, list)
+ adapter_remove_and_free_device(acb, dcb);
+}
+
+
+/**
+ * dc395x_slave_alloc - Called by the scsi mid layer to tell us about a new
+ * scsi device that we need to deal with. We allocate a new device and then
+ * insert that device into the adapters device list.
+ *
+ * @scsi_device: The new scsi device that we need to handle.
+ **/
+static int dc395x_slave_alloc(struct scsi_device *scsi_device)
+{
+ struct AdapterCtlBlk *acb = (struct AdapterCtlBlk *)scsi_device->host->hostdata;
+ struct DeviceCtlBlk *dcb;
+
+ dcb = device_alloc(acb, scsi_device->id, scsi_device->lun);
+ if (!dcb)
+ return -ENOMEM;
+ adapter_add_device(acb, dcb);
+
+ return 0;
+}
+
+
+/**
+ * dc395x_slave_destroy - Called by the scsi mid layer to tell us about a
+ * device that is going away.
+ *
+ * @scsi_device: The new scsi device that we need to handle.
+ **/
+static void dc395x_slave_destroy(struct scsi_device *scsi_device)
+{
+ struct AdapterCtlBlk *acb = (struct AdapterCtlBlk *)scsi_device->host->hostdata;
+ struct DeviceCtlBlk *dcb = find_dcb(acb, scsi_device->id, scsi_device->lun);
+ if (dcb)
+ adapter_remove_and_free_device(acb, dcb);
+}
+
+
+
+
+/**
+ * trms1040_wait_30us: wait for 30 us
+ *
+ * Waits for 30us (using the chip by the looks of it..)
+ *
+ * @io_port: base I/O address
+ **/
+static void trms1040_wait_30us(unsigned long io_port)
+{
+ /* ScsiPortStallExecution(30); wait 30 us */
+ outb(5, io_port + TRM_S1040_GEN_TIMER);
+ while (!(inb(io_port + TRM_S1040_GEN_STATUS) & GTIMEOUT))
+ /* nothing */ ;
+}
+
+
+/**
+ * trms1040_write_cmd - write the secified command and address to
+ * chip
+ *
+ * @io_port: base I/O address
+ * @cmd: SB + op code (command) to send
+ * @addr: address to send
+ **/
+static void trms1040_write_cmd(unsigned long io_port, u8 cmd, u8 addr)
+{
+ int i;
+ u8 send_data;
+
+ /* program SB + OP code */
+ for (i = 0; i < 3; i++, cmd <<= 1) {
+ send_data = NVR_SELECT;
+ if (cmd & 0x04) /* Start from bit 2 */
+ send_data |= NVR_BITOUT;
+
+ outb(send_data, io_port + TRM_S1040_GEN_NVRAM);
+ trms1040_wait_30us(io_port);
+ outb((send_data | NVR_CLOCK),
+ io_port + TRM_S1040_GEN_NVRAM);
+ trms1040_wait_30us(io_port);
+ }
+
+ /* send address */
+ for (i = 0; i < 7; i++, addr <<= 1) {
+ send_data = NVR_SELECT;
+ if (addr & 0x40) /* Start from bit 6 */
+ send_data |= NVR_BITOUT;
+
+ outb(send_data, io_port + TRM_S1040_GEN_NVRAM);
+ trms1040_wait_30us(io_port);
+ outb((send_data | NVR_CLOCK),
+ io_port + TRM_S1040_GEN_NVRAM);
+ trms1040_wait_30us(io_port);
+ }
+ outb(NVR_SELECT, io_port + TRM_S1040_GEN_NVRAM);
+ trms1040_wait_30us(io_port);
+}
+
+
+/**
+ * trms1040_set_data - store a single byte in the eeprom
+ *
+ * Called from write all to write a single byte into the SSEEPROM
+ * Which is done one bit at a time.
+ *
+ * @io_port: base I/O address
+ * @addr: offset into EEPROM
+ * @byte: bytes to write
+ **/
+static void trms1040_set_data(unsigned long io_port, u8 addr, u8 byte)
+{
+ int i;
+ u8 send_data;
+
+ /* Send write command & address */
+ trms1040_write_cmd(io_port, 0x05, addr);
+
+ /* Write data */
+ for (i = 0; i < 8; i++, byte <<= 1) {
+ send_data = NVR_SELECT;
+ if (byte & 0x80) /* Start from bit 7 */
+ send_data |= NVR_BITOUT;
+
+ outb(send_data, io_port + TRM_S1040_GEN_NVRAM);
+ trms1040_wait_30us(io_port);
+ outb((send_data | NVR_CLOCK), io_port + TRM_S1040_GEN_NVRAM);
+ trms1040_wait_30us(io_port);
+ }
+ outb(NVR_SELECT, io_port + TRM_S1040_GEN_NVRAM);
+ trms1040_wait_30us(io_port);
+
+ /* Disable chip select */
+ outb(0, io_port + TRM_S1040_GEN_NVRAM);
+ trms1040_wait_30us(io_port);
+
+ outb(NVR_SELECT, io_port + TRM_S1040_GEN_NVRAM);
+ trms1040_wait_30us(io_port);
+
+ /* Wait for write ready */
+ while (1) {
+ outb((NVR_SELECT | NVR_CLOCK), io_port + TRM_S1040_GEN_NVRAM);
+ trms1040_wait_30us(io_port);
+
+ outb(NVR_SELECT, io_port + TRM_S1040_GEN_NVRAM);
+ trms1040_wait_30us(io_port);
+
+ if (inb(io_port + TRM_S1040_GEN_NVRAM) & NVR_BITIN)
+ break;
+ }
+
+ /* Disable chip select */
+ outb(0, io_port + TRM_S1040_GEN_NVRAM);
+}
+
+
+/**
+ * trms1040_write_all - write 128 bytes to the eeprom
+ *
+ * Write the supplied 128 bytes to the chips SEEPROM
+ *
+ * @eeprom: the data to write
+ * @io_port: the base io port
+ **/
+static void trms1040_write_all(struct NvRamType *eeprom, unsigned long io_port)
+{
+ u8 *b_eeprom = (u8 *)eeprom;
+ u8 addr;
+
+ /* Enable SEEPROM */
+ outb((inb(io_port + TRM_S1040_GEN_CONTROL) | EN_EEPROM),
+ io_port + TRM_S1040_GEN_CONTROL);
+
+ /* write enable */
+ trms1040_write_cmd(io_port, 0x04, 0xFF);
+ outb(0, io_port + TRM_S1040_GEN_NVRAM);
+ trms1040_wait_30us(io_port);
+
+ /* write */
+ for (addr = 0; addr < 128; addr++, b_eeprom++)
+ trms1040_set_data(io_port, addr, *b_eeprom);
+
+ /* write disable */
+ trms1040_write_cmd(io_port, 0x04, 0x00);
+ outb(0, io_port + TRM_S1040_GEN_NVRAM);
+ trms1040_wait_30us(io_port);
+
+ /* Disable SEEPROM */
+ outb((inb(io_port + TRM_S1040_GEN_CONTROL) & ~EN_EEPROM),
+ io_port + TRM_S1040_GEN_CONTROL);
+}
+
+
+/**
+ * trms1040_get_data - get a single byte from the eeprom
+ *
+ * Called from read all to read a single byte into the SSEEPROM
+ * Which is done one bit at a time.
+ *
+ * @io_port: base I/O address
+ * @addr: offset into SEEPROM
+ *
+ * Returns the byte read.
+ **/
+static u8 trms1040_get_data(unsigned long io_port, u8 addr)
+{
+ int i;
+ u8 read_byte;
+ u8 result = 0;
+
+ /* Send read command & address */
+ trms1040_write_cmd(io_port, 0x06, addr);
+
+ /* read data */
+ for (i = 0; i < 8; i++) {
+ outb((NVR_SELECT | NVR_CLOCK), io_port + TRM_S1040_GEN_NVRAM);
+ trms1040_wait_30us(io_port);
+ outb(NVR_SELECT, io_port + TRM_S1040_GEN_NVRAM);
+
+ /* Get data bit while falling edge */
+ read_byte = inb(io_port + TRM_S1040_GEN_NVRAM);
+ result <<= 1;
+ if (read_byte & NVR_BITIN)
+ result |= 1;
+
+ trms1040_wait_30us(io_port);
+ }
+
+ /* Disable chip select */
+ outb(0, io_port + TRM_S1040_GEN_NVRAM);
+ return result;
+}
+
+
+/**
+ * trms1040_read_all - read all bytes from the eeprom
+ *
+ * Read the 128 bytes from the SEEPROM.
+ *
+ * @eeprom: where to store the data
+ * @io_port: the base io port
+ **/
+static void trms1040_read_all(struct NvRamType *eeprom, unsigned long io_port)
+{
+ u8 *b_eeprom = (u8 *)eeprom;
+ u8 addr;
+
+ /* Enable SEEPROM */
+ outb((inb(io_port + TRM_S1040_GEN_CONTROL) | EN_EEPROM),
+ io_port + TRM_S1040_GEN_CONTROL);
+
+ /* read details */
+ for (addr = 0; addr < 128; addr++, b_eeprom++)
+ *b_eeprom = trms1040_get_data(io_port, addr);
+
+ /* Disable SEEPROM */
+ outb((inb(io_port + TRM_S1040_GEN_CONTROL) & ~EN_EEPROM),
+ io_port + TRM_S1040_GEN_CONTROL);
+}
+
+
+
+/**
+ * check_eeprom - get and check contents of the eeprom
+ *
+ * Read seeprom 128 bytes into the memory provider in eeprom.
+ * Checks the checksum and if it's not correct it uses a set of default
+ * values.
+ *
+ * @eeprom: caller allocated strcuture to read the eeprom data into
+ * @io_port: io port to read from
+ **/
+static void check_eeprom(struct NvRamType *eeprom, unsigned long io_port)
+{
+ u16 *w_eeprom = (u16 *)eeprom;
+ u16 w_addr;
+ u16 cksum;
+ u32 d_addr;
+ u32 *d_eeprom;
+
+ trms1040_read_all(eeprom, io_port); /* read eeprom */
+
+ cksum = 0;
+ for (w_addr = 0, w_eeprom = (u16 *)eeprom; w_addr < 64;
+ w_addr++, w_eeprom++)
+ cksum += *w_eeprom;
+ if (cksum != 0x1234) {
+ /*
+ * Checksum is wrong.
+ * Load a set of defaults into the eeprom buffer
+ */
+ dprintkl(KERN_WARNING,
+ "EEProm checksum error: using default values and options.\n");
+ eeprom->sub_vendor_id[0] = (u8)PCI_VENDOR_ID_TEKRAM;
+ eeprom->sub_vendor_id[1] = (u8)(PCI_VENDOR_ID_TEKRAM >> 8);
+ eeprom->sub_sys_id[0] = (u8)PCI_DEVICE_ID_TEKRAM_TRMS1040;
+ eeprom->sub_sys_id[1] =
+ (u8)(PCI_DEVICE_ID_TEKRAM_TRMS1040 >> 8);
+ eeprom->sub_class = 0x00;
+ eeprom->vendor_id[0] = (u8)PCI_VENDOR_ID_TEKRAM;
+ eeprom->vendor_id[1] = (u8)(PCI_VENDOR_ID_TEKRAM >> 8);
+ eeprom->device_id[0] = (u8)PCI_DEVICE_ID_TEKRAM_TRMS1040;
+ eeprom->device_id[1] =
+ (u8)(PCI_DEVICE_ID_TEKRAM_TRMS1040 >> 8);
+ eeprom->reserved = 0x00;
+
+ for (d_addr = 0, d_eeprom = (u32 *)eeprom->target;
+ d_addr < 16; d_addr++, d_eeprom++)
+ *d_eeprom = 0x00000077; /* cfg3,cfg2,period,cfg0 */
+
+ *d_eeprom++ = 0x04000F07; /* max_tag,delay_time,channel_cfg,scsi_id */
+ *d_eeprom++ = 0x00000015; /* reserved1,boot_lun,boot_target,reserved0 */
+ for (d_addr = 0; d_addr < 12; d_addr++, d_eeprom++)
+ *d_eeprom = 0x00;
+
+ /* Now load defaults (maybe set by boot/module params) */
+ set_safe_settings();
+ fix_settings();
+ eeprom_override(eeprom);
+
+ eeprom->cksum = 0x00;
+ for (w_addr = 0, cksum = 0, w_eeprom = (u16 *)eeprom;
+ w_addr < 63; w_addr++, w_eeprom++)
+ cksum += *w_eeprom;
+
+ *w_eeprom = 0x1234 - cksum;
+ trms1040_write_all(eeprom, io_port);
+ eeprom->delay_time = cfg_data[CFG_RESET_DELAY].value;
+ } else {
+ set_safe_settings();
+ eeprom_index_to_delay(eeprom);
+ eeprom_override(eeprom);
+ }
+}
+
+
+/**
+ * print_eeprom_settings - output the eeprom settings
+ * to the kernel log so people can see what they were.
+ *
+ * @eeprom: The eeprom data strucutre to show details for.
+ **/
+static void print_eeprom_settings(struct NvRamType *eeprom)
+{
+ dprintkl(KERN_INFO, "Used settings: AdapterID=%02i, Speed=%i(%02i.%01iMHz), dev_mode=0x%02x\n",
+ eeprom->scsi_id,
+ eeprom->target[0].period,
+ clock_speed[eeprom->target[0].period] / 10,
+ clock_speed[eeprom->target[0].period] % 10,
+ eeprom->target[0].cfg0);
+ dprintkl(KERN_INFO, " AdaptMode=0x%02x, Tags=%i(%02i), DelayReset=%is\n",
+ eeprom->channel_cfg, eeprom->max_tag,
+ 1 << eeprom->max_tag, eeprom->delay_time);
+}
+
+
+/* Free SG tables */
+static void adapter_sg_tables_free(struct AdapterCtlBlk *acb)
+{
+ int i;
+ const unsigned srbs_per_page = PAGE_SIZE/SEGMENTX_LEN;
+
+ for (i = 0; i < DC395x_MAX_SRB_CNT; i += srbs_per_page)
+ kfree(acb->srb_array[i].segment_x);
+}
+
+
+/*
+ * Allocate SG tables; as we have to pci_map them, an SG list (struct SGentry*)
+ * should never cross a page boundary */
+static int adapter_sg_tables_alloc(struct AdapterCtlBlk *acb)
+{
+ const unsigned mem_needed = (DC395x_MAX_SRB_CNT+1)
+ *SEGMENTX_LEN;
+ int pages = (mem_needed+(PAGE_SIZE-1))/PAGE_SIZE;
+ const unsigned srbs_per_page = PAGE_SIZE/SEGMENTX_LEN;
+ int srb_idx = 0;
+ unsigned i = 0;
+ struct SGentry *uninitialized_var(ptr);
+
+ for (i = 0; i < DC395x_MAX_SRB_CNT; i++)
+ acb->srb_array[i].segment_x = NULL;
+
+ dprintkdbg(DBG_1, "Allocate %i pages for SG tables\n", pages);
+ while (pages--) {
+ ptr = kmalloc(PAGE_SIZE, GFP_KERNEL);
+ if (!ptr) {
+ adapter_sg_tables_free(acb);
+ return 1;
+ }
+ dprintkdbg(DBG_1, "Allocate %li bytes at %p for SG segments %i\n",
+ PAGE_SIZE, ptr, srb_idx);
+ i = 0;
+ while (i < srbs_per_page && srb_idx < DC395x_MAX_SRB_CNT)
+ acb->srb_array[srb_idx++].segment_x =
+ ptr + (i++ * DC395x_MAX_SG_LISTENTRY);
+ }
+ if (i < srbs_per_page)
+ acb->srb.segment_x =
+ ptr + (i * DC395x_MAX_SG_LISTENTRY);
+ else
+ dprintkl(KERN_DEBUG, "No space for tmsrb SG table reserved?!\n");
+ return 0;
+}
+
+
+
+/**
+ * adapter_print_config - print adapter connection and termination
+ * config
+ *
+ * The io port in the adapter needs to have been set before calling
+ * this function.
+ *
+ * @acb: The adapter to print the information for.
+ **/
+static void adapter_print_config(struct AdapterCtlBlk *acb)
+{
+ u8 bval;
+
+ bval = DC395x_read8(acb, TRM_S1040_GEN_STATUS);
+ dprintkl(KERN_INFO, "%sConnectors: ",
+ ((bval & WIDESCSI) ? "(Wide) " : ""));
+ if (!(bval & CON5068))
+ printk("ext%s ", !(bval & EXT68HIGH) ? "68" : "50");
+ if (!(bval & CON68))
+ printk("int68%s ", !(bval & INT68HIGH) ? "" : "(50)");
+ if (!(bval & CON50))
+ printk("int50 ");
+ if ((bval & (CON5068 | CON50 | CON68)) ==
+ 0 /*(CON5068 | CON50 | CON68) */ )
+ printk(" Oops! (All 3?) ");
+ bval = DC395x_read8(acb, TRM_S1040_GEN_CONTROL);
+ printk(" Termination: ");
+ if (bval & DIS_TERM)
+ printk("Disabled\n");
+ else {
+ if (bval & AUTOTERM)
+ printk("Auto ");
+ if (bval & LOW8TERM)
+ printk("Low ");
+ if (bval & UP8TERM)
+ printk("High ");
+ printk("\n");
+ }
+}
+
+
+/**
+ * adapter_init_params - Initialize the various parameters in the
+ * adapter structure. Note that the pointer to the scsi_host is set
+ * early (when this instance is created) and the io_port and irq
+ * values are set later after they have been reserved. This just gets
+ * everything set to a good starting position.
+ *
+ * The eeprom structure in the adapter needs to have been set before
+ * calling this function.
+ *
+ * @acb: The adapter to initialize.
+ **/
+static void adapter_init_params(struct AdapterCtlBlk *acb)
+{
+ struct NvRamType *eeprom = &acb->eeprom;
+ int i;
+
+ /* NOTE: acb->scsi_host is set at scsi_host/acb creation time */
+ /* NOTE: acb->io_port_base is set at port registration time */
+ /* NOTE: acb->io_port_len is set at port registration time */
+
+ INIT_LIST_HEAD(&acb->dcb_list);
+ acb->dcb_run_robin = NULL;
+ acb->active_dcb = NULL;
+
+ INIT_LIST_HEAD(&acb->srb_free_list);
+ /* temp SRB for Q tag used or abort command used */
+ acb->tmp_srb = &acb->srb;
+ init_timer(&acb->waiting_timer);
+ init_timer(&acb->selto_timer);
+
+ acb->srb_count = DC395x_MAX_SRB_CNT;
+
+ acb->sel_timeout = DC395x_SEL_TIMEOUT; /* timeout=250ms */
+ /* NOTE: acb->irq_level is set at IRQ registration time */
+
+ acb->tag_max_num = 1 << eeprom->max_tag;
+ if (acb->tag_max_num > 30)
+ acb->tag_max_num = 30;
+
+ acb->acb_flag = 0; /* RESET_DETECT, RESET_DONE, RESET_DEV */
+ acb->gmode2 = eeprom->channel_cfg;
+ acb->config = 0; /* NOTE: actually set in adapter_init_chip */
+
+ if (eeprom->channel_cfg & NAC_SCANLUN)
+ acb->lun_chk = 1;
+ acb->scan_devices = 1;
+
+ acb->scsi_host->this_id = eeprom->scsi_id;
+ acb->hostid_bit = (1 << acb->scsi_host->this_id);
+
+ for (i = 0; i < DC395x_MAX_SCSI_ID; i++)
+ acb->dcb_map[i] = 0;
+
+ acb->msg_len = 0;
+
+ /* link static array of srbs into the srb free list */
+ for (i = 0; i < acb->srb_count - 1; i++)
+ srb_free_insert(acb, &acb->srb_array[i]);
+}
+
+
+/**
+ * adapter_init_host - Initialize the scsi host instance based on
+ * values that we have already stored in the adapter instance. There's
+ * some mention that a lot of these are deprecated, so we won't use
+ * them (we'll use the ones in the adapter instance) but we'll fill
+ * them in in case something else needs them.
+ *
+ * The eeprom structure, irq and io ports in the adapter need to have
+ * been set before calling this function.
+ *
+ * @host: The scsi host instance to fill in the values for.
+ **/
+static void adapter_init_scsi_host(struct Scsi_Host *host)
+{
+ struct AdapterCtlBlk *acb = (struct AdapterCtlBlk *)host->hostdata;
+ struct NvRamType *eeprom = &acb->eeprom;
+
+ host->max_cmd_len = 24;
+ host->can_queue = DC395x_MAX_CMD_QUEUE;
+ host->cmd_per_lun = DC395x_MAX_CMD_PER_LUN;
+ host->this_id = (int)eeprom->scsi_id;
+ host->io_port = acb->io_port_base;
+ host->n_io_port = acb->io_port_len;
+ host->dma_channel = -1;
+ host->unique_id = acb->io_port_base;
+ host->irq = acb->irq_level;
+ acb->last_reset = jiffies;
+
+ host->max_id = 16;
+ if (host->max_id - 1 == eeprom->scsi_id)
+ host->max_id--;
+
+ if (eeprom->channel_cfg & NAC_SCANLUN)
+ host->max_lun = 8;
+ else
+ host->max_lun = 1;
+}
+
+
+/**
+ * adapter_init_chip - Get the chip into a know state and figure out
+ * some of the settings that apply to this adapter.
+ *
+ * The io port in the adapter needs to have been set before calling
+ * this function. The config will be configured correctly on return.
+ *
+ * @acb: The adapter which we are to init.
+ **/
+static void adapter_init_chip(struct AdapterCtlBlk *acb)
+{
+ struct NvRamType *eeprom = &acb->eeprom;
+
+ /* Mask all the interrupt */
+ DC395x_write8(acb, TRM_S1040_DMA_INTEN, 0x00);
+ DC395x_write8(acb, TRM_S1040_SCSI_INTEN, 0x00);
+
+ /* Reset SCSI module */
+ DC395x_write16(acb, TRM_S1040_SCSI_CONTROL, DO_RSTMODULE);
+
+ /* Reset PCI/DMA module */
+ DC395x_write8(acb, TRM_S1040_DMA_CONTROL, DMARESETMODULE);
+ udelay(20);
+
+ /* program configuration 0 */
+ acb->config = HCC_AUTOTERM | HCC_PARITY;
+ if (DC395x_read8(acb, TRM_S1040_GEN_STATUS) & WIDESCSI)
+ acb->config |= HCC_WIDE_CARD;
+
+ if (eeprom->channel_cfg & NAC_POWERON_SCSI_RESET)
+ acb->config |= HCC_SCSI_RESET;
+
+ if (acb->config & HCC_SCSI_RESET) {
+ dprintkl(KERN_INFO, "Performing initial SCSI bus reset\n");
+ DC395x_write8(acb, TRM_S1040_SCSI_CONTROL, DO_RSTSCSI);
+
+ /*while (!( DC395x_read8(acb, TRM_S1040_SCSI_INTSTATUS) & INT_SCSIRESET )); */
+ /*spin_unlock_irq (&io_request_lock); */
+ udelay(500);
+
+ acb->last_reset =
+ jiffies + HZ / 2 +
+ HZ * acb->eeprom.delay_time;
+
+ /*spin_lock_irq (&io_request_lock); */
+ }
+}
+
+
+/**
+ * init_adapter - Grab the resource for the card, setup the adapter
+ * information, set the card into a known state, create the various
+ * tables etc etc. This basically gets all adapter information all up
+ * to date, initialised and gets the chip in sync with it.
+ *
+ * @host: This hosts adapter structure
+ * @io_port: The base I/O port
+ * @irq: IRQ
+ *
+ * Returns 0 if the initialization succeeds, any other value on
+ * failure.
+ **/
+static int adapter_init(struct AdapterCtlBlk *acb, unsigned long io_port,
+ u32 io_port_len, unsigned int irq)
+{
+ if (!request_region(io_port, io_port_len, DC395X_NAME)) {
+ dprintkl(KERN_ERR, "Failed to reserve IO region 0x%lx\n", io_port);
+ goto failed;
+ }
+ /* store port base to indicate we have registered it */
+ acb->io_port_base = io_port;
+ acb->io_port_len = io_port_len;
+
+ if (request_irq(irq, dc395x_interrupt, IRQF_SHARED, DC395X_NAME, acb)) {
+ /* release the region we just claimed */
+ dprintkl(KERN_INFO, "Failed to register IRQ\n");
+ goto failed;
+ }
+ /* store irq to indicate we have registered it */
+ acb->irq_level = irq;
+
+ /* get eeprom configuration information and command line settings etc */
+ check_eeprom(&acb->eeprom, io_port);
+ print_eeprom_settings(&acb->eeprom);
+
+ /* setup adapter control block */
+ adapter_init_params(acb);
+
+ /* display card connectors/termination settings */
+ adapter_print_config(acb);
+
+ if (adapter_sg_tables_alloc(acb)) {
+ dprintkl(KERN_DEBUG, "Memory allocation for SG tables failed\n");
+ goto failed;
+ }
+ adapter_init_scsi_host(acb->scsi_host);
+ adapter_init_chip(acb);
+ set_basic_config(acb);
+
+ dprintkdbg(DBG_0,
+ "adapter_init: acb=%p, pdcb_map=%p psrb_array=%p "
+ "size{acb=0x%04x dcb=0x%04x srb=0x%04x}\n",
+ acb, acb->dcb_map, acb->srb_array, sizeof(struct AdapterCtlBlk),
+ sizeof(struct DeviceCtlBlk), sizeof(struct ScsiReqBlk));
+ return 0;
+
+failed:
+ if (acb->irq_level)
+ free_irq(acb->irq_level, acb);
+ if (acb->io_port_base)
+ release_region(acb->io_port_base, acb->io_port_len);
+ adapter_sg_tables_free(acb);
+
+ return 1;
+}
+
+
+/**
+ * adapter_uninit_chip - cleanly shut down the scsi controller chip,
+ * stopping all operations and disabling interrupt generation on the
+ * card.
+ *
+ * @acb: The adapter which we are to shutdown.
+ **/
+static void adapter_uninit_chip(struct AdapterCtlBlk *acb)
+{
+ /* disable interrupts */
+ DC395x_write8(acb, TRM_S1040_DMA_INTEN, 0);
+ DC395x_write8(acb, TRM_S1040_SCSI_INTEN, 0);
+
+ /* reset the scsi bus */
+ if (acb->config & HCC_SCSI_RESET)
+ reset_scsi_bus(acb);
+
+ /* clear any pending interrupt state */
+ DC395x_read8(acb, TRM_S1040_SCSI_INTSTATUS);
+}
+
+
+
+/**
+ * adapter_uninit - Shut down the chip and release any resources that
+ * we had allocated. Once this returns the adapter should not be used
+ * anymore.
+ *
+ * @acb: The adapter which we are to un-initialize.
+ **/
+static void adapter_uninit(struct AdapterCtlBlk *acb)
+{
+ unsigned long flags;
+ DC395x_LOCK_IO(acb->scsi_host, flags);
+
+ /* remove timers */
+ if (timer_pending(&acb->waiting_timer))
+ del_timer(&acb->waiting_timer);
+ if (timer_pending(&acb->selto_timer))
+ del_timer(&acb->selto_timer);
+
+ adapter_uninit_chip(acb);
+ adapter_remove_and_free_all_devices(acb);
+ DC395x_UNLOCK_IO(acb->scsi_host, flags);
+
+ if (acb->irq_level)
+ free_irq(acb->irq_level, acb);
+ if (acb->io_port_base)
+ release_region(acb->io_port_base, acb->io_port_len);
+
+ adapter_sg_tables_free(acb);
+}
+
+
+#undef YESNO
+#define YESNO(YN) \
+ if (YN) seq_printf(m, " Yes ");\
+ else seq_printf(m, " No ")
+
+static int dc395x_show_info(struct seq_file *m, struct Scsi_Host *host)
+{
+ struct AdapterCtlBlk *acb = (struct AdapterCtlBlk *)host->hostdata;
+ int spd, spd1;
+ struct DeviceCtlBlk *dcb;
+ unsigned long flags;
+ int dev;
+
+ seq_puts(m, DC395X_BANNER " PCI SCSI Host Adapter\n"
+ " Driver Version " DC395X_VERSION "\n");
+
+ DC395x_LOCK_IO(acb->scsi_host, flags);
+
+ seq_printf(m, "SCSI Host Nr %i, ", host->host_no);
+ seq_printf(m, "DC395U/UW/F DC315/U %s\n",
+ (acb->config & HCC_WIDE_CARD) ? "Wide" : "");
+ seq_printf(m, "io_port_base 0x%04lx, ", acb->io_port_base);
+ seq_printf(m, "irq_level 0x%04x, ", acb->irq_level);
+ seq_printf(m, " SelTimeout %ims\n", (1638 * acb->sel_timeout) / 1000);
+
+ seq_printf(m, "MaxID %i, MaxLUN %llu, ", host->max_id, host->max_lun);
+ seq_printf(m, "AdapterID %i\n", host->this_id);
+
+ seq_printf(m, "tag_max_num %i", acb->tag_max_num);
+ /*seq_printf(m, ", DMA_Status %i\n", DC395x_read8(acb, TRM_S1040_DMA_STATUS)); */
+ seq_printf(m, ", FilterCfg 0x%02x",
+ DC395x_read8(acb, TRM_S1040_SCSI_CONFIG1));
+ seq_printf(m, ", DelayReset %is\n", acb->eeprom.delay_time);
+ /*seq_printf(m, "\n"); */
+
+ seq_printf(m, "Nr of DCBs: %i\n", list_size(&acb->dcb_list));
+ seq_printf(m, "Map of attached LUNs: %02x %02x %02x %02x %02x %02x %02x %02x\n",
+ acb->dcb_map[0], acb->dcb_map[1], acb->dcb_map[2],
+ acb->dcb_map[3], acb->dcb_map[4], acb->dcb_map[5],
+ acb->dcb_map[6], acb->dcb_map[7]);
+ seq_printf(m, " %02x %02x %02x %02x %02x %02x %02x %02x\n",
+ acb->dcb_map[8], acb->dcb_map[9], acb->dcb_map[10],
+ acb->dcb_map[11], acb->dcb_map[12], acb->dcb_map[13],
+ acb->dcb_map[14], acb->dcb_map[15]);
+
+ seq_puts(m,
+ "Un ID LUN Prty Sync Wide DsCn SndS TagQ nego_period SyncFreq SyncOffs MaxCmd\n");
+
+ dev = 0;
+ list_for_each_entry(dcb, &acb->dcb_list, list) {
+ int nego_period;
+ seq_printf(m, "%02i %02i %02i ", dev, dcb->target_id,
+ dcb->target_lun);
+ YESNO(dcb->dev_mode & NTC_DO_PARITY_CHK);
+ YESNO(dcb->sync_offset);
+ YESNO(dcb->sync_period & WIDE_SYNC);
+ YESNO(dcb->dev_mode & NTC_DO_DISCONNECT);
+ YESNO(dcb->dev_mode & NTC_DO_SEND_START);
+ YESNO(dcb->sync_mode & EN_TAG_QUEUEING);
+ nego_period = clock_period[dcb->sync_period & 0x07] << 2;
+ if (dcb->sync_offset)
+ seq_printf(m, " %03i ns ", nego_period);
+ else
+ seq_printf(m, " (%03i ns)", (dcb->min_nego_period << 2));
+
+ if (dcb->sync_offset & 0x0f) {
+ spd = 1000 / (nego_period);
+ spd1 = 1000 % (nego_period);
+ spd1 = (spd1 * 10 + nego_period / 2) / (nego_period);
+ seq_printf(m, " %2i.%1i M %02i ", spd, spd1,
+ (dcb->sync_offset & 0x0f));
+ } else
+ seq_puts(m, " ");
+
+ /* Add more info ... */
+ seq_printf(m, " %02i\n", dcb->max_command);
+ dev++;
+ }
+
+ if (timer_pending(&acb->waiting_timer))
+ seq_puts(m, "Waiting queue timer running\n");
+ else
+ seq_putc(m, '\n');
+
+ list_for_each_entry(dcb, &acb->dcb_list, list) {
+ struct ScsiReqBlk *srb;
+ if (!list_empty(&dcb->srb_waiting_list))
+ seq_printf(m, "DCB (%02i-%i): Waiting: %i:",
+ dcb->target_id, dcb->target_lun,
+ list_size(&dcb->srb_waiting_list));
+ list_for_each_entry(srb, &dcb->srb_waiting_list, list)
+ seq_printf(m, " %p", srb->cmd);
+ if (!list_empty(&dcb->srb_going_list))
+ seq_printf(m, "\nDCB (%02i-%i): Going : %i:",
+ dcb->target_id, dcb->target_lun,
+ list_size(&dcb->srb_going_list));
+ list_for_each_entry(srb, &dcb->srb_going_list, list)
+ seq_printf(m, " %p", srb->cmd);
+ if (!list_empty(&dcb->srb_waiting_list) || !list_empty(&dcb->srb_going_list))
+ seq_putc(m, '\n');
+ }
+
+ if (debug_enabled(DBG_1)) {
+ seq_printf(m, "DCB list for ACB %p:\n", acb);
+ list_for_each_entry(dcb, &acb->dcb_list, list) {
+ seq_printf(m, "%p -> ", dcb);
+ }
+ seq_puts(m, "END\n");
+ }
+
+ DC395x_UNLOCK_IO(acb->scsi_host, flags);
+ return 0;
+}
+
+
+static struct scsi_host_template dc395x_driver_template = {
+ .module = THIS_MODULE,
+ .proc_name = DC395X_NAME,
+ .show_info = dc395x_show_info,
+ .name = DC395X_BANNER " " DC395X_VERSION,
+ .queuecommand = dc395x_queue_command,
+ .bios_param = dc395x_bios_param,
+ .slave_alloc = dc395x_slave_alloc,
+ .slave_destroy = dc395x_slave_destroy,
+ .can_queue = DC395x_MAX_CAN_QUEUE,
+ .this_id = 7,
+ .sg_tablesize = DC395x_MAX_SG_TABLESIZE,
+ .cmd_per_lun = DC395x_MAX_CMD_PER_LUN,
+ .eh_abort_handler = dc395x_eh_abort,
+ .eh_bus_reset_handler = dc395x_eh_bus_reset,
+ .use_clustering = DISABLE_CLUSTERING,
+};
+
+
+/**
+ * banner_display - Display banner on first instance of driver
+ * initialized.
+ **/
+static void banner_display(void)
+{
+ static int banner_done = 0;
+ if (!banner_done)
+ {
+ dprintkl(KERN_INFO, "%s %s\n", DC395X_BANNER, DC395X_VERSION);
+ banner_done = 1;
+ }
+}
+
+
+/**
+ * dc395x_init_one - Initialise a single instance of the adapter.
+ *
+ * The PCI layer will call this once for each instance of the adapter
+ * that it finds in the system. The pci_dev strcuture indicates which
+ * instance we are being called from.
+ *
+ * @dev: The PCI device to initialize.
+ * @id: Looks like a pointer to the entry in our pci device table
+ * that was actually matched by the PCI subsystem.
+ *
+ * Returns 0 on success, or an error code (-ve) on failure.
+ **/
+static int dc395x_init_one(struct pci_dev *dev, const struct pci_device_id *id)
+{
+ struct Scsi_Host *scsi_host = NULL;
+ struct AdapterCtlBlk *acb = NULL;
+ unsigned long io_port_base;
+ unsigned int io_port_len;
+ unsigned int irq;
+
+ dprintkdbg(DBG_0, "Init one instance (%s)\n", pci_name(dev));
+ banner_display();
+
+ if (pci_enable_device(dev))
+ {
+ dprintkl(KERN_INFO, "PCI Enable device failed.\n");
+ return -ENODEV;
+ }
+ io_port_base = pci_resource_start(dev, 0) & PCI_BASE_ADDRESS_IO_MASK;
+ io_port_len = pci_resource_len(dev, 0);
+ irq = dev->irq;
+ dprintkdbg(DBG_0, "IO_PORT=0x%04lx, IRQ=0x%x\n", io_port_base, dev->irq);
+
+ /* allocate scsi host information (includes out adapter) */
+ scsi_host = scsi_host_alloc(&dc395x_driver_template,
+ sizeof(struct AdapterCtlBlk));
+ if (!scsi_host) {
+ dprintkl(KERN_INFO, "scsi_host_alloc failed\n");
+ goto fail;
+ }
+ acb = (struct AdapterCtlBlk*)scsi_host->hostdata;
+ acb->scsi_host = scsi_host;
+ acb->dev = dev;
+
+ /* initialise the adapter and everything we need */
+ if (adapter_init(acb, io_port_base, io_port_len, irq)) {
+ dprintkl(KERN_INFO, "adapter init failed\n");
+ goto fail;
+ }
+
+ pci_set_master(dev);
+
+ /* get the scsi mid level to scan for new devices on the bus */
+ if (scsi_add_host(scsi_host, &dev->dev)) {
+ dprintkl(KERN_ERR, "scsi_add_host failed\n");
+ goto fail;
+ }
+ pci_set_drvdata(dev, scsi_host);
+ scsi_scan_host(scsi_host);
+
+ return 0;
+
+fail:
+ if (acb != NULL)
+ adapter_uninit(acb);
+ if (scsi_host != NULL)
+ scsi_host_put(scsi_host);
+ pci_disable_device(dev);
+ return -ENODEV;
+}
+
+
+/**
+ * dc395x_remove_one - Called to remove a single instance of the
+ * adapter.
+ *
+ * @dev: The PCI device to initialize.
+ **/
+static void dc395x_remove_one(struct pci_dev *dev)
+{
+ struct Scsi_Host *scsi_host = pci_get_drvdata(dev);
+ struct AdapterCtlBlk *acb = (struct AdapterCtlBlk *)(scsi_host->hostdata);
+
+ dprintkdbg(DBG_0, "dc395x_remove_one: acb=%p\n", acb);
+
+ scsi_remove_host(scsi_host);
+ adapter_uninit(acb);
+ pci_disable_device(dev);
+ scsi_host_put(scsi_host);
+}
+
+
+static struct pci_device_id dc395x_pci_table[] = {
+ {
+ .vendor = PCI_VENDOR_ID_TEKRAM,
+ .device = PCI_DEVICE_ID_TEKRAM_TRMS1040,
+ .subvendor = PCI_ANY_ID,
+ .subdevice = PCI_ANY_ID,
+ },
+ {} /* Terminating entry */
+};
+MODULE_DEVICE_TABLE(pci, dc395x_pci_table);
+
+
+static struct pci_driver dc395x_driver = {
+ .name = DC395X_NAME,
+ .id_table = dc395x_pci_table,
+ .probe = dc395x_init_one,
+ .remove = dc395x_remove_one,
+};
+
+
+/**
+ * dc395x_module_init - Module initialization function
+ *
+ * Used by both module and built-in driver to initialise this driver.
+ **/
+static int __init dc395x_module_init(void)
+{
+ return pci_register_driver(&dc395x_driver);
+}
+
+
+/**
+ * dc395x_module_exit - Module cleanup function.
+ **/
+static void __exit dc395x_module_exit(void)
+{
+ pci_unregister_driver(&dc395x_driver);
+}
+
+
+module_init(dc395x_module_init);
+module_exit(dc395x_module_exit);
+
+MODULE_AUTHOR("C.L. Huang / Erich Chen / Kurt Garloff");
+MODULE_DESCRIPTION("SCSI host adapter driver for Tekram TRM-S1040 based adapters: Tekram DC395 and DC315 series");
+MODULE_LICENSE("GPL");
diff --git a/drivers/scsi/dc395x.h b/drivers/scsi/dc395x.h
new file mode 100644
index 000000000..fbf35e377
--- /dev/null
+++ b/drivers/scsi/dc395x.h
@@ -0,0 +1,648 @@
+/************************************************************************/
+/* */
+/* dc395x.h */
+/* */
+/* Device Driver for Tekram DC395(U/UW/F), DC315(U) */
+/* PCI SCSI Bus Master Host Adapter */
+/* (SCSI chip set used Tekram ASIC TRM-S1040) */
+/* */
+/************************************************************************/
+#ifndef DC395x_H
+#define DC395x_H
+
+/************************************************************************/
+/* */
+/* Initial values */
+/* */
+/************************************************************************/
+#define DC395x_MAX_CMD_QUEUE 32
+/* #define DC395x_MAX_QTAGS 32 */
+#define DC395x_MAX_QTAGS 16
+#define DC395x_MAX_SCSI_ID 16
+#define DC395x_MAX_CMD_PER_LUN DC395x_MAX_QTAGS
+#define DC395x_MAX_SG_TABLESIZE 64 /* HW limitation */
+#define DC395x_MAX_SG_LISTENTRY 64 /* Must be equal or lower to previous */
+ /* item */
+#define DC395x_MAX_SRB_CNT 63
+/* #define DC395x_MAX_CAN_QUEUE 7 * DC395x_MAX_QTAGS */
+#define DC395x_MAX_CAN_QUEUE DC395x_MAX_SRB_CNT
+#define DC395x_END_SCAN 2
+#define DC395x_SEL_TIMEOUT 153 /* 250 ms selection timeout (@ 40 MHz) */
+#define DC395x_MAX_RETRIES 3
+
+#if 0
+#define SYNC_FIRST
+#endif
+
+#define NORM_REC_LVL 0
+
+/************************************************************************/
+/* */
+/* Various definitions */
+/* */
+/************************************************************************/
+#define BIT31 0x80000000
+#define BIT30 0x40000000
+#define BIT29 0x20000000
+#define BIT28 0x10000000
+#define BIT27 0x08000000
+#define BIT26 0x04000000
+#define BIT25 0x02000000
+#define BIT24 0x01000000
+#define BIT23 0x00800000
+#define BIT22 0x00400000
+#define BIT21 0x00200000
+#define BIT20 0x00100000
+#define BIT19 0x00080000
+#define BIT18 0x00040000
+#define BIT17 0x00020000
+#define BIT16 0x00010000
+#define BIT15 0x00008000
+#define BIT14 0x00004000
+#define BIT13 0x00002000
+#define BIT12 0x00001000
+#define BIT11 0x00000800
+#define BIT10 0x00000400
+#define BIT9 0x00000200
+#define BIT8 0x00000100
+#define BIT7 0x00000080
+#define BIT6 0x00000040
+#define BIT5 0x00000020
+#define BIT4 0x00000010
+#define BIT3 0x00000008
+#define BIT2 0x00000004
+#define BIT1 0x00000002
+#define BIT0 0x00000001
+
+/* UnitCtrlFlag */
+#define UNIT_ALLOCATED BIT0
+#define UNIT_INFO_CHANGED BIT1
+#define FORMATING_MEDIA BIT2
+#define UNIT_RETRY BIT3
+
+/* UnitFlags */
+#define DASD_SUPPORT BIT0
+#define SCSI_SUPPORT BIT1
+#define ASPI_SUPPORT BIT2
+
+/* SRBState machine definition */
+#define SRB_FREE 0x0000
+#define SRB_WAIT 0x0001
+#define SRB_READY 0x0002
+#define SRB_MSGOUT 0x0004 /* arbitration+msg_out 1st byte */
+#define SRB_MSGIN 0x0008
+#define SRB_EXTEND_MSGIN 0x0010
+#define SRB_COMMAND 0x0020
+#define SRB_START_ 0x0040 /* arbitration+msg_out+command_out */
+#define SRB_DISCONNECT 0x0080
+#define SRB_DATA_XFER 0x0100
+#define SRB_XFERPAD 0x0200
+#define SRB_STATUS 0x0400
+#define SRB_COMPLETED 0x0800
+#define SRB_ABORT_SENT 0x1000
+#define SRB_DO_SYNC_NEGO 0x2000
+#define SRB_DO_WIDE_NEGO 0x4000
+#define SRB_UNEXPECT_RESEL 0x8000
+
+/************************************************************************/
+/* */
+/* ACB Config */
+/* */
+/************************************************************************/
+#define HCC_WIDE_CARD 0x20
+#define HCC_SCSI_RESET 0x10
+#define HCC_PARITY 0x08
+#define HCC_AUTOTERM 0x04
+#define HCC_LOW8TERM 0x02
+#define HCC_UP8TERM 0x01
+
+/* ACBFlag */
+#define RESET_DEV BIT0
+#define RESET_DETECT BIT1
+#define RESET_DONE BIT2
+
+/* DCBFlag */
+#define ABORT_DEV_ BIT0
+
+/* SRBstatus */
+#define SRB_OK BIT0
+#define ABORTION BIT1
+#define OVER_RUN BIT2
+#define UNDER_RUN BIT3
+#define PARITY_ERROR BIT4
+#define SRB_ERROR BIT5
+
+/* SRBFlag */
+#define DATAOUT BIT7
+#define DATAIN BIT6
+#define RESIDUAL_VALID BIT5
+#define ENABLE_TIMER BIT4
+#define RESET_DEV0 BIT2
+#define ABORT_DEV BIT1
+#define AUTO_REQSENSE BIT0
+
+/* Adapter status */
+#define H_STATUS_GOOD 0
+#define H_SEL_TIMEOUT 0x11
+#define H_OVER_UNDER_RUN 0x12
+#define H_UNEXP_BUS_FREE 0x13
+#define H_TARGET_PHASE_F 0x14
+#define H_INVALID_CCB_OP 0x16
+#define H_LINK_CCB_BAD 0x17
+#define H_BAD_TARGET_DIR 0x18
+#define H_DUPLICATE_CCB 0x19
+#define H_BAD_CCB_OR_SG 0x1A
+#define H_ABORT 0x0FF
+
+/* SCSI BUS Status byte codes */
+#define SCSI_STAT_GOOD 0x0 /* Good status */
+#define SCSI_STAT_CHECKCOND 0x02 /* SCSI Check Condition */
+#define SCSI_STAT_CONDMET 0x04 /* Condition Met */
+#define SCSI_STAT_BUSY 0x08 /* Target busy status */
+#define SCSI_STAT_INTER 0x10 /* Intermediate status */
+#define SCSI_STAT_INTERCONDMET 0x14 /* Intermediate condition met */
+#define SCSI_STAT_RESCONFLICT 0x18 /* Reservation conflict */
+#define SCSI_STAT_CMDTERM 0x22 /* Command Terminated */
+#define SCSI_STAT_QUEUEFULL 0x28 /* Queue Full */
+#define SCSI_STAT_UNEXP_BUS_F 0xFD /* Unexpect Bus Free */
+#define SCSI_STAT_BUS_RST_DETECT 0xFE /* Scsi Bus Reset detected */
+#define SCSI_STAT_SEL_TIMEOUT 0xFF /* Selection Time out */
+
+/* Sync_Mode */
+#define SYNC_WIDE_TAG_ATNT_DISABLE 0
+#define SYNC_NEGO_ENABLE BIT0
+#define SYNC_NEGO_DONE BIT1
+#define WIDE_NEGO_ENABLE BIT2
+#define WIDE_NEGO_DONE BIT3
+#define WIDE_NEGO_STATE BIT4
+#define EN_TAG_QUEUEING BIT5
+#define EN_ATN_STOP BIT6
+
+#define SYNC_NEGO_OFFSET 15
+
+/* SCSI MSG BYTE */
+#define MSG_COMPLETE 0x00
+#define MSG_EXTENDED 0x01
+#define MSG_SAVE_PTR 0x02
+#define MSG_RESTORE_PTR 0x03
+#define MSG_DISCONNECT 0x04
+#define MSG_INITIATOR_ERROR 0x05
+#define MSG_ABORT 0x06
+#define MSG_REJECT_ 0x07
+#define MSG_NOP 0x08
+#define MSG_PARITY_ERROR 0x09
+#define MSG_LINK_CMD_COMPL 0x0A
+#define MSG_LINK_CMD_COMPL_FLG 0x0B
+#define MSG_BUS_RESET 0x0C
+#define MSG_ABORT_TAG 0x0D
+#define MSG_SIMPLE_QTAG 0x20
+#define MSG_HEAD_QTAG 0x21
+#define MSG_ORDER_QTAG 0x22
+#define MSG_IGNOREWIDE 0x23
+#define MSG_IDENTIFY 0x80
+#define MSG_HOST_ID 0xC0
+
+/* SCSI STATUS BYTE */
+#define STATUS_GOOD 0x00
+#define CHECK_CONDITION_ 0x02
+#define STATUS_BUSY 0x08
+#define STATUS_INTERMEDIATE 0x10
+#define RESERVE_CONFLICT 0x18
+
+/* cmd->result */
+#define STATUS_MASK_ 0xFF
+#define MSG_MASK 0xFF00
+#define RETURN_MASK 0xFF0000
+
+/************************************************************************/
+/* */
+/* Inquiry Data format */
+/* */
+/************************************************************************/
+struct ScsiInqData
+{ /* INQ */
+ u8 DevType; /* Periph Qualifier & Periph Dev Type */
+ u8 RMB_TypeMod; /* rem media bit & Dev Type Modifier */
+ u8 Vers; /* ISO, ECMA, & ANSI versions */
+ u8 RDF; /* AEN, TRMIOP, & response data format */
+ u8 AddLen; /* length of additional data */
+ u8 Res1; /* reserved */
+ u8 Res2; /* reserved */
+ u8 Flags; /* RelADr, Wbus32, Wbus16, Sync, etc. */
+ u8 VendorID[8]; /* Vendor Identification */
+ u8 ProductID[16]; /* Product Identification */
+ u8 ProductRev[4]; /* Product Revision */
+};
+
+ /* Inquiry byte 0 masks */
+#define SCSI_DEVTYPE 0x1F /* Peripheral Device Type */
+#define SCSI_PERIPHQUAL 0xE0 /* Peripheral Qualifier */
+ /* Inquiry byte 1 mask */
+#define SCSI_REMOVABLE_MEDIA 0x80 /* Removable Media bit (1=removable) */
+ /* Peripheral Device Type definitions */
+ /* See include/scsi/scsi.h */
+#define TYPE_NODEV SCSI_DEVTYPE /* Unknown or no device type */
+#ifndef TYPE_PRINTER /* */
+# define TYPE_PRINTER 0x02 /* Printer device */
+#endif /* */
+#ifndef TYPE_COMM /* */
+# define TYPE_COMM 0x09 /* Communications device */
+#endif
+
+/************************************************************************/
+/* */
+/* Inquiry flag definitions (Inq data byte 7) */
+/* */
+/************************************************************************/
+#define SCSI_INQ_RELADR 0x80 /* device supports relative addressing */
+#define SCSI_INQ_WBUS32 0x40 /* device supports 32 bit data xfers */
+#define SCSI_INQ_WBUS16 0x20 /* device supports 16 bit data xfers */
+#define SCSI_INQ_SYNC 0x10 /* device supports synchronous xfer */
+#define SCSI_INQ_LINKED 0x08 /* device supports linked commands */
+#define SCSI_INQ_CMDQUEUE 0x02 /* device supports command queueing */
+#define SCSI_INQ_SFTRE 0x01 /* device supports soft resets */
+
+#define ENABLE_CE 1
+#define DISABLE_CE 0
+#define EEPROM_READ 0x80
+
+/************************************************************************/
+/* */
+/* The PCI configuration register offset for TRM_S1040 */
+/* */
+/************************************************************************/
+#define TRM_S1040_ID 0x00 /* Vendor and Device ID */
+#define TRM_S1040_COMMAND 0x04 /* PCI command register */
+#define TRM_S1040_IOBASE 0x10 /* I/O Space base address */
+#define TRM_S1040_ROMBASE 0x30 /* Expansion ROM Base Address */
+#define TRM_S1040_INTLINE 0x3C /* Interrupt line */
+
+/************************************************************************/
+/* */
+/* The SCSI register offset for TRM_S1040 */
+/* */
+/************************************************************************/
+#define TRM_S1040_SCSI_STATUS 0x80 /* SCSI Status (R) */
+#define COMMANDPHASEDONE 0x2000 /* SCSI command phase done */
+#define SCSIXFERDONE 0x0800 /* SCSI SCSI transfer done */
+#define SCSIXFERCNT_2_ZERO 0x0100 /* SCSI SCSI transfer count to zero */
+#define SCSIINTERRUPT 0x0080 /* SCSI interrupt pending */
+#define COMMANDABORT 0x0040 /* SCSI command abort */
+#define SEQUENCERACTIVE 0x0020 /* SCSI sequencer active */
+#define PHASEMISMATCH 0x0010 /* SCSI phase mismatch */
+#define PARITYERROR 0x0008 /* SCSI parity error */
+
+#define PHASEMASK 0x0007 /* Phase MSG/CD/IO */
+#define PH_DATA_OUT 0x00 /* Data out phase */
+#define PH_DATA_IN 0x01 /* Data in phase */
+#define PH_COMMAND 0x02 /* Command phase */
+#define PH_STATUS 0x03 /* Status phase */
+#define PH_BUS_FREE 0x05 /* Invalid phase used as bus free */
+#define PH_MSG_OUT 0x06 /* Message out phase */
+#define PH_MSG_IN 0x07 /* Message in phase */
+
+#define TRM_S1040_SCSI_CONTROL 0x80 /* SCSI Control (W) */
+#define DO_CLRATN 0x0400 /* Clear ATN */
+#define DO_SETATN 0x0200 /* Set ATN */
+#define DO_CMDABORT 0x0100 /* Abort SCSI command */
+#define DO_RSTMODULE 0x0010 /* Reset SCSI chip */
+#define DO_RSTSCSI 0x0008 /* Reset SCSI bus */
+#define DO_CLRFIFO 0x0004 /* Clear SCSI transfer FIFO */
+#define DO_DATALATCH 0x0002 /* Enable SCSI bus data input (latched) */
+/* #define DO_DATALATCH 0x0000 */ /* KG: DISable SCSI bus data latch */
+#define DO_HWRESELECT 0x0001 /* Enable hardware reselection */
+
+#define TRM_S1040_SCSI_FIFOCNT 0x82 /* SCSI FIFO Counter 5bits(R) */
+#define TRM_S1040_SCSI_SIGNAL 0x83 /* SCSI low level signal (R/W) */
+
+#define TRM_S1040_SCSI_INTSTATUS 0x84 /* SCSI Interrupt Status (R) */
+#define INT_SCAM 0x80 /* SCAM selection interrupt */
+#define INT_SELECT 0x40 /* Selection interrupt */
+#define INT_SELTIMEOUT 0x20 /* Selection timeout interrupt */
+#define INT_DISCONNECT 0x10 /* Bus disconnected interrupt */
+#define INT_RESELECTED 0x08 /* Reselected interrupt */
+#define INT_SCSIRESET 0x04 /* SCSI reset detected interrupt */
+#define INT_BUSSERVICE 0x02 /* Bus service interrupt */
+#define INT_CMDDONE 0x01 /* SCSI command done interrupt */
+
+#define TRM_S1040_SCSI_OFFSET 0x84 /* SCSI Offset Count (W) */
+
+/************************************************************************/
+/* */
+/* Bit Name Definition */
+/* --------- ------------- ---------------------------- */
+/* 07-05 0 RSVD Reversed. Always 0. */
+/* 04 0 OFFSET4 Reversed for LVDS. Always 0. */
+/* 03-00 0 OFFSET[03:00] Offset number from 0 to 15 */
+/* */
+/************************************************************************/
+
+#define TRM_S1040_SCSI_SYNC 0x85 /* SCSI Synchronous Control (R/W) */
+#define LVDS_SYNC 0x20 /* Enable LVDS synchronous */
+#define WIDE_SYNC 0x10 /* Enable WIDE synchronous */
+#define ALT_SYNC 0x08 /* Enable Fast-20 alternate synchronous */
+
+/************************************************************************/
+/* */
+/* SYNCM 7 6 5 4 3 2 1 0 */
+/* Name RSVD RSVD LVDS WIDE ALTPERD PERIOD2 PERIOD1 PERIOD0 */
+/* Default 0 0 0 0 0 0 0 0 */
+/* */
+/* Bit Name Definition */
+/* --------- ------------- --------------------------- */
+/* 07-06 0 RSVD Reversed. Always read 0 */
+/* 05 0 LVDS Reversed. Always read 0 */
+/* 04 0 WIDE/WSCSI Enable wide (16-bits) SCSI */
+/* transfer. */
+/* 03 0 ALTPERD/ALTPD Alternate (Sync./Period) mode. */
+/* */
+/* @@ When this bit is set, */
+/* the synchronous period bits 2:0 */
+/* in the Synchronous Mode register */
+/* are used to transfer data */
+/* at the Fast-20 rate. */
+/* @@ When this bit is unset, */
+/* the synchronous period bits 2:0 */
+/* in the Synchronous Mode Register */
+/* are used to transfer data */
+/* at the Fast-10 rate (or Fast-40 w/ LVDS). */
+/* */
+/* 02-00 0 PERIOD[2:0]/ Synchronous SCSI Transfer Rate. */
+/* SXPD[02:00] These 3 bits specify */
+/* the Synchronous SCSI Transfer */
+/* Rate for Fast-20 and Fast-10. */
+/* These bits are also reset */
+/* by a SCSI Bus reset. */
+/* */
+/* For Fast-10 bit ALTPD = 0 and LVDS = 0 */
+/* and bit2,bit1,bit0 is defined as follows : */
+/* */
+/* 000 100ns, 10.0 MHz */
+/* 001 150ns, 6.6 MHz */
+/* 010 200ns, 5.0 MHz */
+/* 011 250ns, 4.0 MHz */
+/* 100 300ns, 3.3 MHz */
+/* 101 350ns, 2.8 MHz */
+/* 110 400ns, 2.5 MHz */
+/* 111 450ns, 2.2 MHz */
+/* */
+/* For Fast-20 bit ALTPD = 1 and LVDS = 0 */
+/* and bit2,bit1,bit0 is defined as follows : */
+/* */
+/* 000 50ns, 20.0 MHz */
+/* 001 75ns, 13.3 MHz */
+/* 010 100ns, 10.0 MHz */
+/* 011 125ns, 8.0 MHz */
+/* 100 150ns, 6.6 MHz */
+/* 101 175ns, 5.7 MHz */
+/* 110 200ns, 5.0 MHz */
+/* 111 250ns, 4.0 MHz KG: Maybe 225ns, 4.4 MHz */
+/* */
+/* For Fast-40 bit ALTPD = 0 and LVDS = 1 */
+/* and bit2,bit1,bit0 is defined as follows : */
+/* */
+/* 000 25ns, 40.0 MHz */
+/* 001 50ns, 20.0 MHz */
+/* 010 75ns, 13.3 MHz */
+/* 011 100ns, 10.0 MHz */
+/* 100 125ns, 8.0 MHz */
+/* 101 150ns, 6.6 MHz */
+/* 110 175ns, 5.7 MHz */
+/* 111 200ns, 5.0 MHz */
+/* */
+/************************************************************************/
+
+#define TRM_S1040_SCSI_TARGETID 0x86 /* SCSI Target ID (R/W) */
+#define TRM_S1040_SCSI_IDMSG 0x87 /* SCSI Identify Message (R) */
+#define TRM_S1040_SCSI_HOSTID 0x87 /* SCSI Host ID (W) */
+#define TRM_S1040_SCSI_COUNTER 0x88 /* SCSI Transfer Counter 24bits(R/W) */
+
+#define TRM_S1040_SCSI_INTEN 0x8C /* SCSI Interrupt Enable (R/W) */
+#define EN_SCAM 0x80 /* Enable SCAM selection interrupt */
+#define EN_SELECT 0x40 /* Enable selection interrupt */
+#define EN_SELTIMEOUT 0x20 /* Enable selection timeout interrupt */
+#define EN_DISCONNECT 0x10 /* Enable bus disconnected interrupt */
+#define EN_RESELECTED 0x08 /* Enable reselected interrupt */
+#define EN_SCSIRESET 0x04 /* Enable SCSI reset detected interrupt */
+#define EN_BUSSERVICE 0x02 /* Enable bus service interrupt */
+#define EN_CMDDONE 0x01 /* Enable SCSI command done interrupt */
+
+#define TRM_S1040_SCSI_CONFIG0 0x8D /* SCSI Configuration 0 (R/W) */
+#define PHASELATCH 0x40 /* Enable phase latch */
+#define INITIATOR 0x20 /* Enable initiator mode */
+#define PARITYCHECK 0x10 /* Enable parity check */
+#define BLOCKRST 0x01 /* Disable SCSI reset1 */
+
+#define TRM_S1040_SCSI_CONFIG1 0x8E /* SCSI Configuration 1 (R/W) */
+#define ACTIVE_NEGPLUS 0x10 /* Enhance active negation */
+#define FILTER_DISABLE 0x08 /* Disable SCSI data filter */
+#define FAST_FILTER 0x04 /* ? */
+#define ACTIVE_NEG 0x02 /* Enable active negation */
+
+#define TRM_S1040_SCSI_CONFIG2 0x8F /* SCSI Configuration 2 (R/W) */
+#define CFG2_WIDEFIFO 0x02 /* */
+
+#define TRM_S1040_SCSI_COMMAND 0x90 /* SCSI Command (R/W) */
+#define SCMD_COMP 0x12 /* Command complete */
+#define SCMD_SEL_ATN 0x60 /* Selection with ATN */
+#define SCMD_SEL_ATN3 0x64 /* Selection with ATN3 */
+#define SCMD_SEL_ATNSTOP 0xB8 /* Selection with ATN and Stop */
+#define SCMD_FIFO_OUT 0xC0 /* SCSI FIFO transfer out */
+#define SCMD_DMA_OUT 0xC1 /* SCSI DMA transfer out */
+#define SCMD_FIFO_IN 0xC2 /* SCSI FIFO transfer in */
+#define SCMD_DMA_IN 0xC3 /* SCSI DMA transfer in */
+#define SCMD_MSGACCEPT 0xD8 /* Message accept */
+
+/************************************************************************/
+/* */
+/* Code Command Description */
+/* ---- ---------------------------------------- */
+/* 02 Enable reselection with FIFO */
+/* 40 Select without ATN with FIFO */
+/* 60 Select with ATN with FIFO */
+/* 64 Select with ATN3 with FIFO */
+/* A0 Select with ATN and stop with FIFO */
+/* C0 Transfer information out with FIFO */
+/* C1 Transfer information out with DMA */
+/* C2 Transfer information in with FIFO */
+/* C3 Transfer information in with DMA */
+/* 12 Initiator command complete with FIFO */
+/* 50 Initiator transfer information out sequence without ATN */
+/* with FIFO */
+/* 70 Initiator transfer information out sequence with ATN */
+/* with FIFO */
+/* 74 Initiator transfer information out sequence with ATN3 */
+/* with FIFO */
+/* 52 Initiator transfer information in sequence without ATN */
+/* with FIFO */
+/* 72 Initiator transfer information in sequence with ATN */
+/* with FIFO */
+/* 76 Initiator transfer information in sequence with ATN3 */
+/* with FIFO */
+/* 90 Initiator transfer information out command complete */
+/* with FIFO */
+/* 92 Initiator transfer information in command complete */
+/* with FIFO */
+/* D2 Enable selection */
+/* 08 Reselection */
+/* 48 Disconnect command with FIFO */
+/* 88 Terminate command with FIFO */
+/* C8 Target command complete with FIFO */
+/* 18 SCAM Arbitration/ Selection */
+/* 5A Enable reselection */
+/* 98 Select without ATN with FIFO */
+/* B8 Select with ATN with FIFO */
+/* D8 Message Accepted */
+/* 58 NOP */
+/* */
+/************************************************************************/
+
+#define TRM_S1040_SCSI_TIMEOUT 0x91 /* SCSI Time Out Value (R/W) */
+#define TRM_S1040_SCSI_FIFO 0x98 /* SCSI FIFO (R/W) */
+
+#define TRM_S1040_SCSI_TCR0 0x9C /* SCSI Target Control 0 (R/W) */
+#define TCR0_WIDE_NEGO_DONE 0x8000 /* Wide nego done */
+#define TCR0_SYNC_NEGO_DONE 0x4000 /* Synchronous nego done */
+#define TCR0_ENABLE_LVDS 0x2000 /* Enable LVDS synchronous */
+#define TCR0_ENABLE_WIDE 0x1000 /* Enable WIDE synchronous */
+#define TCR0_ENABLE_ALT 0x0800 /* Enable alternate synchronous */
+#define TCR0_PERIOD_MASK 0x0700 /* Transfer rate */
+
+#define TCR0_DO_WIDE_NEGO 0x0080 /* Do wide NEGO */
+#define TCR0_DO_SYNC_NEGO 0x0040 /* Do sync NEGO */
+#define TCR0_DISCONNECT_EN 0x0020 /* Disconnection enable */
+#define TCR0_OFFSET_MASK 0x001F /* Offset number */
+
+#define TRM_S1040_SCSI_TCR1 0x9E /* SCSI Target Control 1 (R/W) */
+#define MAXTAG_MASK 0x7F00 /* Maximum tags (127) */
+#define NON_TAG_BUSY 0x0080 /* Non tag command active */
+#define ACTTAG_MASK 0x007F /* Active tags */
+
+/************************************************************************/
+/* */
+/* The DMA register offset for TRM_S1040 */
+/* */
+/************************************************************************/
+#define TRM_S1040_DMA_COMMAND 0xA0 /* DMA Command (R/W) */
+#define DMACMD_SG 0x02 /* Enable HW S/G support */
+#define DMACMD_DIR 0x01 /* 1 = read from SCSI write to Host */
+#define XFERDATAIN_SG 0x0103 /* Transfer data in w/ SG */
+#define XFERDATAOUT_SG 0x0102 /* Transfer data out w/ SG */
+#define XFERDATAIN 0x0101 /* Transfer data in w/o SG */
+#define XFERDATAOUT 0x0100 /* Transfer data out w/o SG */
+
+#define TRM_S1040_DMA_FIFOCNT 0xA1 /* DMA FIFO Counter (R) */
+
+#define TRM_S1040_DMA_CONTROL 0xA1 /* DMA Control (W) */
+#define DMARESETMODULE 0x10 /* Reset PCI/DMA module */
+#define STOPDMAXFER 0x08 /* Stop DMA transfer */
+#define ABORTXFER 0x04 /* Abort DMA transfer */
+#define CLRXFIFO 0x02 /* Clear DMA transfer FIFO */
+#define STARTDMAXFER 0x01 /* Start DMA transfer */
+
+#define TRM_S1040_DMA_FIFOSTAT 0xA2 /* DMA FIFO Status (R) */
+
+#define TRM_S1040_DMA_STATUS 0xA3 /* DMA Interrupt Status (R/W) */
+#define XFERPENDING 0x80 /* Transfer pending */
+#define SCSIBUSY 0x40 /* SCSI busy */
+#define GLOBALINT 0x20 /* DMA_INTEN bit 0-4 set */
+#define FORCEDMACOMP 0x10 /* Force DMA transfer complete */
+#define DMAXFERERROR 0x08 /* DMA transfer error */
+#define DMAXFERABORT 0x04 /* DMA transfer abort */
+#define DMAXFERCOMP 0x02 /* Bus Master XFER Complete status */
+#define SCSICOMP 0x01 /* SCSI complete interrupt */
+
+#define TRM_S1040_DMA_INTEN 0xA4 /* DMA Interrupt Enable (R/W) */
+#define EN_FORCEDMACOMP 0x10 /* Force DMA transfer complete */
+#define EN_DMAXFERERROR 0x08 /* DMA transfer error */
+#define EN_DMAXFERABORT 0x04 /* DMA transfer abort */
+#define EN_DMAXFERCOMP 0x02 /* Bus Master XFER Complete status */
+#define EN_SCSIINTR 0x01 /* Enable SCSI complete interrupt */
+
+#define TRM_S1040_DMA_CONFIG 0xA6 /* DMA Configuration (R/W) */
+#define DMA_ENHANCE 0x8000 /* Enable DMA enhance feature (SG?) */
+#define DMA_PCI_DUAL_ADDR 0x4000 /* */
+#define DMA_CFG_RES 0x2000 /* Always 1 */
+#define DMA_AUTO_CLR_FIFO 0x1000 /* DISable DMA auto clear FIFO */
+#define DMA_MEM_MULTI_READ 0x0800 /* */
+#define DMA_MEM_WRITE_INVAL 0x0400 /* Memory write and invalidate */
+#define DMA_FIFO_CTRL 0x0300 /* Control FIFO operation with DMA */
+#define DMA_FIFO_HALF_HALF 0x0200 /* Keep half filled on both read/write */
+
+#define TRM_S1040_DMA_XCNT 0xA8 /* DMA Transfer Counter (R/W), 24bits */
+#define TRM_S1040_DMA_CXCNT 0xAC /* DMA Current Transfer Counter (R) */
+#define TRM_S1040_DMA_XLOWADDR 0xB0 /* DMA Transfer Physical Low Address */
+#define TRM_S1040_DMA_XHIGHADDR 0xB4 /* DMA Transfer Physical High Address */
+
+/************************************************************************/
+/* */
+/* The general register offset for TRM_S1040 */
+/* */
+/************************************************************************/
+#define TRM_S1040_GEN_CONTROL 0xD4 /* Global Control */
+#define CTRL_LED 0x80 /* Control onboard LED */
+#define EN_EEPROM 0x10 /* Enable EEPROM programming */
+#define DIS_TERM 0x08 /* Disable onboard termination */
+#define AUTOTERM 0x04 /* Enable Auto SCSI terminator */
+#define LOW8TERM 0x02 /* Enable Lower 8 bit SCSI terminator */
+#define UP8TERM 0x01 /* Enable Upper 8 bit SCSI terminator */
+
+#define TRM_S1040_GEN_STATUS 0xD5 /* Global Status */
+#define GTIMEOUT 0x80 /* Global timer reach 0 */
+#define EXT68HIGH 0x40 /* Higher 8 bit connected externally */
+#define INT68HIGH 0x20 /* Higher 8 bit connected internally */
+#define CON5068 0x10 /* External 50/68 pin connected (low) */
+#define CON68 0x08 /* Internal 68 pin connected (low) */
+#define CON50 0x04 /* Internal 50 pin connected (low!) */
+#define WIDESCSI 0x02 /* Wide SCSI card */
+#define STATUS_LOAD_DEFAULT 0x01 /* */
+
+#define TRM_S1040_GEN_NVRAM 0xD6 /* Serial NON-VOLATILE RAM port */
+#define NVR_BITOUT 0x08 /* Serial data out */
+#define NVR_BITIN 0x04 /* Serial data in */
+#define NVR_CLOCK 0x02 /* Serial clock */
+#define NVR_SELECT 0x01 /* Serial select */
+
+#define TRM_S1040_GEN_EDATA 0xD7 /* Parallel EEPROM data port */
+#define TRM_S1040_GEN_EADDRESS 0xD8 /* Parallel EEPROM address */
+#define TRM_S1040_GEN_TIMER 0xDB /* Global timer */
+
+/************************************************************************/
+/* */
+/* NvmTarCfg0: Target configuration byte 0 :..pDCB->DevMode */
+/* */
+/************************************************************************/
+#define NTC_DO_WIDE_NEGO 0x20 /* Wide negotiate */
+#define NTC_DO_TAG_QUEUEING 0x10 /* Enable SCSI tag queuing */
+#define NTC_DO_SEND_START 0x08 /* Send start command SPINUP */
+#define NTC_DO_DISCONNECT 0x04 /* Enable SCSI disconnect */
+#define NTC_DO_SYNC_NEGO 0x02 /* Sync negotiation */
+#define NTC_DO_PARITY_CHK 0x01 /* (it should define at NAC) */
+ /* Parity check enable */
+
+/************************************************************************/
+/* */
+/* Nvram Initiater bits definition */
+/* */
+/************************************************************************/
+#if 0
+#define MORE2_DRV BIT0
+#define GREATER_1G BIT1
+#define RST_SCSI_BUS BIT2
+#define ACTIVE_NEGATION BIT3
+#define NO_SEEK BIT4
+#define LUN_CHECK BIT5
+#endif
+
+/************************************************************************/
+/* */
+/* Nvram Adapter Cfg bits definition */
+/* */
+/************************************************************************/
+#define NAC_SCANLUN 0x20 /* Include LUN as BIOS device */
+#define NAC_POWERON_SCSI_RESET 0x04 /* Power on reset enable */
+#define NAC_GREATER_1G 0x02 /* > 1G support enable */
+#define NAC_GT2DRIVES 0x01 /* Support more than 2 drives */
+/* #define NAC_DO_PARITY_CHK 0x08 */ /* Parity check enable */
+
+#endif
diff --git a/drivers/scsi/device_handler/Kconfig b/drivers/scsi/device_handler/Kconfig
new file mode 100644
index 000000000..69abd0ad4
--- /dev/null
+++ b/drivers/scsi/device_handler/Kconfig
@@ -0,0 +1,40 @@
+#
+# SCSI Device Handler configuration
+#
+
+menuconfig SCSI_DH
+ tristate "SCSI Device Handlers"
+ depends on SCSI
+ default n
+ help
+ SCSI Device Handlers provide device specific support for
+ devices utilized in multipath configurations. Say Y here to
+ select support for specific hardware.
+
+config SCSI_DH_RDAC
+ tristate "LSI RDAC Device Handler"
+ depends on SCSI_DH
+ help
+ If you have a LSI RDAC select y. Otherwise, say N.
+
+config SCSI_DH_HP_SW
+ tristate "HP/COMPAQ MSA Device Handler"
+ depends on SCSI_DH
+ help
+ If you have a HP/COMPAQ MSA device that requires START_STOP to
+ be sent to start it and cannot upgrade the firmware then select y.
+ Otherwise, say N.
+
+config SCSI_DH_EMC
+ tristate "EMC CLARiiON Device Handler"
+ depends on SCSI_DH
+ help
+ If you have a EMC CLARiiON select y. Otherwise, say N.
+
+config SCSI_DH_ALUA
+ tristate "SPC-3 ALUA Device Handler"
+ depends on SCSI_DH
+ help
+ SCSI Device handler for generic SPC-3 Asymmetric Logical Unit
+ Access (ALUA).
+
diff --git a/drivers/scsi/device_handler/Makefile b/drivers/scsi/device_handler/Makefile
new file mode 100644
index 000000000..e1d2ea083
--- /dev/null
+++ b/drivers/scsi/device_handler/Makefile
@@ -0,0 +1,8 @@
+#
+# SCSI Device Handler
+#
+obj-$(CONFIG_SCSI_DH) += scsi_dh.o
+obj-$(CONFIG_SCSI_DH_RDAC) += scsi_dh_rdac.o
+obj-$(CONFIG_SCSI_DH_HP_SW) += scsi_dh_hp_sw.o
+obj-$(CONFIG_SCSI_DH_EMC) += scsi_dh_emc.o
+obj-$(CONFIG_SCSI_DH_ALUA) += scsi_dh_alua.o
diff --git a/drivers/scsi/device_handler/scsi_dh.c b/drivers/scsi/device_handler/scsi_dh.c
new file mode 100644
index 000000000..1efebc9ee
--- /dev/null
+++ b/drivers/scsi/device_handler/scsi_dh.c
@@ -0,0 +1,621 @@
+/*
+ * SCSI device handler infrastruture.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ *
+ * Copyright IBM Corporation, 2007
+ * Authors:
+ * Chandra Seetharaman <sekharan@us.ibm.com>
+ * Mike Anderson <andmike@linux.vnet.ibm.com>
+ */
+
+#include <linux/slab.h>
+#include <linux/module.h>
+#include <scsi/scsi_dh.h>
+#include "../scsi_priv.h"
+
+static DEFINE_SPINLOCK(list_lock);
+static LIST_HEAD(scsi_dh_list);
+
+static struct scsi_device_handler *get_device_handler(const char *name)
+{
+ struct scsi_device_handler *tmp, *found = NULL;
+
+ spin_lock(&list_lock);
+ list_for_each_entry(tmp, &scsi_dh_list, list) {
+ if (!strncmp(tmp->name, name, strlen(tmp->name))) {
+ found = tmp;
+ break;
+ }
+ }
+ spin_unlock(&list_lock);
+ return found;
+}
+
+/*
+ * device_handler_match_function - Match a device handler to a device
+ * @sdev - SCSI device to be tested
+ *
+ * Tests @sdev against the match function of all registered device_handler.
+ * Returns the found device handler or NULL if not found.
+ */
+static struct scsi_device_handler *
+device_handler_match_function(struct scsi_device *sdev)
+{
+ struct scsi_device_handler *tmp_dh, *found_dh = NULL;
+
+ spin_lock(&list_lock);
+ list_for_each_entry(tmp_dh, &scsi_dh_list, list) {
+ if (tmp_dh->match && tmp_dh->match(sdev)) {
+ found_dh = tmp_dh;
+ break;
+ }
+ }
+ spin_unlock(&list_lock);
+ return found_dh;
+}
+
+/*
+ * device_handler_match - Attach a device handler to a device
+ * @scsi_dh - The device handler to match against or NULL
+ * @sdev - SCSI device to be tested against @scsi_dh
+ *
+ * Tests @sdev against the device handler @scsi_dh or against
+ * all registered device_handler if @scsi_dh == NULL.
+ * Returns the found device handler or NULL if not found.
+ */
+static struct scsi_device_handler *
+device_handler_match(struct scsi_device_handler *scsi_dh,
+ struct scsi_device *sdev)
+{
+ struct scsi_device_handler *found_dh;
+
+ found_dh = device_handler_match_function(sdev);
+
+ if (scsi_dh && found_dh != scsi_dh)
+ found_dh = NULL;
+
+ return found_dh;
+}
+
+/*
+ * scsi_dh_handler_attach - Attach a device handler to a device
+ * @sdev - SCSI device the device handler should attach to
+ * @scsi_dh - The device handler to attach
+ */
+static int scsi_dh_handler_attach(struct scsi_device *sdev,
+ struct scsi_device_handler *scsi_dh)
+{
+ struct scsi_dh_data *d;
+
+ if (sdev->scsi_dh_data) {
+ if (sdev->scsi_dh_data->scsi_dh != scsi_dh)
+ return -EBUSY;
+
+ kref_get(&sdev->scsi_dh_data->kref);
+ return 0;
+ }
+
+ if (!try_module_get(scsi_dh->module))
+ return -EINVAL;
+
+ d = scsi_dh->attach(sdev);
+ if (IS_ERR(d)) {
+ sdev_printk(KERN_ERR, sdev, "%s: Attach failed (%ld)\n",
+ scsi_dh->name, PTR_ERR(d));
+ module_put(scsi_dh->module);
+ return PTR_ERR(d);
+ }
+
+ d->scsi_dh = scsi_dh;
+ kref_init(&d->kref);
+ d->sdev = sdev;
+
+ spin_lock_irq(sdev->request_queue->queue_lock);
+ sdev->scsi_dh_data = d;
+ spin_unlock_irq(sdev->request_queue->queue_lock);
+ return 0;
+}
+
+static void __detach_handler (struct kref *kref)
+{
+ struct scsi_dh_data *scsi_dh_data =
+ container_of(kref, struct scsi_dh_data, kref);
+ struct scsi_device_handler *scsi_dh = scsi_dh_data->scsi_dh;
+ struct scsi_device *sdev = scsi_dh_data->sdev;
+
+ scsi_dh->detach(sdev);
+
+ spin_lock_irq(sdev->request_queue->queue_lock);
+ sdev->scsi_dh_data = NULL;
+ spin_unlock_irq(sdev->request_queue->queue_lock);
+
+ sdev_printk(KERN_NOTICE, sdev, "%s: Detached\n", scsi_dh->name);
+ module_put(scsi_dh->module);
+}
+
+/*
+ * scsi_dh_handler_detach - Detach a device handler from a device
+ * @sdev - SCSI device the device handler should be detached from
+ * @scsi_dh - Device handler to be detached
+ *
+ * Detach from a device handler. If a device handler is specified,
+ * only detach if the currently attached handler matches @scsi_dh.
+ */
+static void scsi_dh_handler_detach(struct scsi_device *sdev,
+ struct scsi_device_handler *scsi_dh)
+{
+ if (!sdev->scsi_dh_data)
+ return;
+
+ if (scsi_dh && scsi_dh != sdev->scsi_dh_data->scsi_dh)
+ return;
+
+ if (!scsi_dh)
+ scsi_dh = sdev->scsi_dh_data->scsi_dh;
+
+ if (scsi_dh)
+ kref_put(&sdev->scsi_dh_data->kref, __detach_handler);
+}
+
+/*
+ * Functions for sysfs attribute 'dh_state'
+ */
+static ssize_t
+store_dh_state(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct scsi_device *sdev = to_scsi_device(dev);
+ struct scsi_device_handler *scsi_dh;
+ int err = -EINVAL;
+
+ if (sdev->sdev_state == SDEV_CANCEL ||
+ sdev->sdev_state == SDEV_DEL)
+ return -ENODEV;
+
+ if (!sdev->scsi_dh_data) {
+ /*
+ * Attach to a device handler
+ */
+ if (!(scsi_dh = get_device_handler(buf)))
+ return err;
+ err = scsi_dh_handler_attach(sdev, scsi_dh);
+ } else {
+ scsi_dh = sdev->scsi_dh_data->scsi_dh;
+ if (!strncmp(buf, "detach", 6)) {
+ /*
+ * Detach from a device handler
+ */
+ scsi_dh_handler_detach(sdev, scsi_dh);
+ err = 0;
+ } else if (!strncmp(buf, "activate", 8)) {
+ /*
+ * Activate a device handler
+ */
+ if (scsi_dh->activate)
+ err = scsi_dh->activate(sdev, NULL, NULL);
+ else
+ err = 0;
+ }
+ }
+
+ return err<0?err:count;
+}
+
+static ssize_t
+show_dh_state(struct device *dev, struct device_attribute *attr, char *buf)
+{
+ struct scsi_device *sdev = to_scsi_device(dev);
+
+ if (!sdev->scsi_dh_data)
+ return snprintf(buf, 20, "detached\n");
+
+ return snprintf(buf, 20, "%s\n", sdev->scsi_dh_data->scsi_dh->name);
+}
+
+static struct device_attribute scsi_dh_state_attr =
+ __ATTR(dh_state, S_IRUGO | S_IWUSR, show_dh_state,
+ store_dh_state);
+
+/*
+ * scsi_dh_sysfs_attr_add - Callback for scsi_init_dh
+ */
+static int scsi_dh_sysfs_attr_add(struct device *dev, void *data)
+{
+ struct scsi_device *sdev;
+ int err;
+
+ if (!scsi_is_sdev_device(dev))
+ return 0;
+
+ sdev = to_scsi_device(dev);
+
+ err = device_create_file(&sdev->sdev_gendev,
+ &scsi_dh_state_attr);
+
+ return 0;
+}
+
+/*
+ * scsi_dh_sysfs_attr_remove - Callback for scsi_exit_dh
+ */
+static int scsi_dh_sysfs_attr_remove(struct device *dev, void *data)
+{
+ struct scsi_device *sdev;
+
+ if (!scsi_is_sdev_device(dev))
+ return 0;
+
+ sdev = to_scsi_device(dev);
+
+ device_remove_file(&sdev->sdev_gendev,
+ &scsi_dh_state_attr);
+
+ return 0;
+}
+
+/*
+ * scsi_dh_notifier - notifier chain callback
+ */
+static int scsi_dh_notifier(struct notifier_block *nb,
+ unsigned long action, void *data)
+{
+ struct device *dev = data;
+ struct scsi_device *sdev;
+ int err = 0;
+ struct scsi_device_handler *devinfo = NULL;
+
+ if (!scsi_is_sdev_device(dev))
+ return 0;
+
+ sdev = to_scsi_device(dev);
+
+ if (action == BUS_NOTIFY_ADD_DEVICE) {
+ err = device_create_file(dev, &scsi_dh_state_attr);
+ /* don't care about err */
+ devinfo = device_handler_match(NULL, sdev);
+ if (devinfo)
+ err = scsi_dh_handler_attach(sdev, devinfo);
+ } else if (action == BUS_NOTIFY_DEL_DEVICE) {
+ device_remove_file(dev, &scsi_dh_state_attr);
+ scsi_dh_handler_detach(sdev, NULL);
+ }
+ return err;
+}
+
+/*
+ * scsi_dh_notifier_add - Callback for scsi_register_device_handler
+ */
+static int scsi_dh_notifier_add(struct device *dev, void *data)
+{
+ struct scsi_device_handler *scsi_dh = data;
+ struct scsi_device *sdev;
+
+ if (!scsi_is_sdev_device(dev))
+ return 0;
+
+ if (!get_device(dev))
+ return 0;
+
+ sdev = to_scsi_device(dev);
+
+ if (device_handler_match(scsi_dh, sdev))
+ scsi_dh_handler_attach(sdev, scsi_dh);
+
+ put_device(dev);
+
+ return 0;
+}
+
+/*
+ * scsi_dh_notifier_remove - Callback for scsi_unregister_device_handler
+ */
+static int scsi_dh_notifier_remove(struct device *dev, void *data)
+{
+ struct scsi_device_handler *scsi_dh = data;
+ struct scsi_device *sdev;
+
+ if (!scsi_is_sdev_device(dev))
+ return 0;
+
+ if (!get_device(dev))
+ return 0;
+
+ sdev = to_scsi_device(dev);
+
+ scsi_dh_handler_detach(sdev, scsi_dh);
+
+ put_device(dev);
+
+ return 0;
+}
+
+/*
+ * scsi_register_device_handler - register a device handler personality
+ * module.
+ * @scsi_dh - device handler to be registered.
+ *
+ * Returns 0 on success, -EBUSY if handler already registered.
+ */
+int scsi_register_device_handler(struct scsi_device_handler *scsi_dh)
+{
+
+ if (get_device_handler(scsi_dh->name))
+ return -EBUSY;
+
+ if (!scsi_dh->attach || !scsi_dh->detach)
+ return -EINVAL;
+
+ spin_lock(&list_lock);
+ list_add(&scsi_dh->list, &scsi_dh_list);
+ spin_unlock(&list_lock);
+
+ bus_for_each_dev(&scsi_bus_type, NULL, scsi_dh, scsi_dh_notifier_add);
+ printk(KERN_INFO "%s: device handler registered\n", scsi_dh->name);
+
+ return SCSI_DH_OK;
+}
+EXPORT_SYMBOL_GPL(scsi_register_device_handler);
+
+/*
+ * scsi_unregister_device_handler - register a device handler personality
+ * module.
+ * @scsi_dh - device handler to be unregistered.
+ *
+ * Returns 0 on success, -ENODEV if handler not registered.
+ */
+int scsi_unregister_device_handler(struct scsi_device_handler *scsi_dh)
+{
+
+ if (!get_device_handler(scsi_dh->name))
+ return -ENODEV;
+
+ bus_for_each_dev(&scsi_bus_type, NULL, scsi_dh,
+ scsi_dh_notifier_remove);
+
+ spin_lock(&list_lock);
+ list_del(&scsi_dh->list);
+ spin_unlock(&list_lock);
+ printk(KERN_INFO "%s: device handler unregistered\n", scsi_dh->name);
+
+ return SCSI_DH_OK;
+}
+EXPORT_SYMBOL_GPL(scsi_unregister_device_handler);
+
+/*
+ * scsi_dh_activate - activate the path associated with the scsi_device
+ * corresponding to the given request queue.
+ * Returns immediately without waiting for activation to be completed.
+ * @q - Request queue that is associated with the scsi_device to be
+ * activated.
+ * @fn - Function to be called upon completion of the activation.
+ * Function fn is called with data (below) and the error code.
+ * Function fn may be called from the same calling context. So,
+ * do not hold the lock in the caller which may be needed in fn.
+ * @data - data passed to the function fn upon completion.
+ *
+ */
+int scsi_dh_activate(struct request_queue *q, activate_complete fn, void *data)
+{
+ int err = 0;
+ unsigned long flags;
+ struct scsi_device *sdev;
+ struct scsi_device_handler *scsi_dh = NULL;
+ struct device *dev = NULL;
+
+ spin_lock_irqsave(q->queue_lock, flags);
+ sdev = q->queuedata;
+ if (!sdev) {
+ spin_unlock_irqrestore(q->queue_lock, flags);
+ err = SCSI_DH_NOSYS;
+ if (fn)
+ fn(data, err);
+ return err;
+ }
+
+ if (sdev->scsi_dh_data)
+ scsi_dh = sdev->scsi_dh_data->scsi_dh;
+ dev = get_device(&sdev->sdev_gendev);
+ if (!scsi_dh || !dev ||
+ sdev->sdev_state == SDEV_CANCEL ||
+ sdev->sdev_state == SDEV_DEL)
+ err = SCSI_DH_NOSYS;
+ if (sdev->sdev_state == SDEV_OFFLINE)
+ err = SCSI_DH_DEV_OFFLINED;
+ spin_unlock_irqrestore(q->queue_lock, flags);
+
+ if (err) {
+ if (fn)
+ fn(data, err);
+ goto out;
+ }
+
+ if (scsi_dh->activate)
+ err = scsi_dh->activate(sdev, fn, data);
+out:
+ put_device(dev);
+ return err;
+}
+EXPORT_SYMBOL_GPL(scsi_dh_activate);
+
+/*
+ * scsi_dh_set_params - set the parameters for the device as per the
+ * string specified in params.
+ * @q - Request queue that is associated with the scsi_device for
+ * which the parameters to be set.
+ * @params - parameters in the following format
+ * "no_of_params\0param1\0param2\0param3\0...\0"
+ * for example, string for 2 parameters with value 10 and 21
+ * is specified as "2\010\021\0".
+ */
+int scsi_dh_set_params(struct request_queue *q, const char *params)
+{
+ int err = -SCSI_DH_NOSYS;
+ unsigned long flags;
+ struct scsi_device *sdev;
+ struct scsi_device_handler *scsi_dh = NULL;
+
+ spin_lock_irqsave(q->queue_lock, flags);
+ sdev = q->queuedata;
+ if (sdev && sdev->scsi_dh_data)
+ scsi_dh = sdev->scsi_dh_data->scsi_dh;
+ if (scsi_dh && scsi_dh->set_params && get_device(&sdev->sdev_gendev))
+ err = 0;
+ spin_unlock_irqrestore(q->queue_lock, flags);
+
+ if (err)
+ return err;
+ err = scsi_dh->set_params(sdev, params);
+ put_device(&sdev->sdev_gendev);
+ return err;
+}
+EXPORT_SYMBOL_GPL(scsi_dh_set_params);
+
+/*
+ * scsi_dh_handler_exist - Return TRUE(1) if a device handler exists for
+ * the given name. FALSE(0) otherwise.
+ * @name - name of the device handler.
+ */
+int scsi_dh_handler_exist(const char *name)
+{
+ return (get_device_handler(name) != NULL);
+}
+EXPORT_SYMBOL_GPL(scsi_dh_handler_exist);
+
+/*
+ * scsi_dh_attach - Attach device handler
+ * @q - Request queue that is associated with the scsi_device
+ * the handler should be attached to
+ * @name - name of the handler to attach
+ */
+int scsi_dh_attach(struct request_queue *q, const char *name)
+{
+ unsigned long flags;
+ struct scsi_device *sdev;
+ struct scsi_device_handler *scsi_dh;
+ int err = 0;
+
+ scsi_dh = get_device_handler(name);
+ if (!scsi_dh)
+ return -EINVAL;
+
+ spin_lock_irqsave(q->queue_lock, flags);
+ sdev = q->queuedata;
+ if (!sdev || !get_device(&sdev->sdev_gendev))
+ err = -ENODEV;
+ spin_unlock_irqrestore(q->queue_lock, flags);
+
+ if (!err) {
+ err = scsi_dh_handler_attach(sdev, scsi_dh);
+ put_device(&sdev->sdev_gendev);
+ }
+ return err;
+}
+EXPORT_SYMBOL_GPL(scsi_dh_attach);
+
+/*
+ * scsi_dh_detach - Detach device handler
+ * @q - Request queue that is associated with the scsi_device
+ * the handler should be detached from
+ *
+ * This function will detach the device handler only
+ * if the sdev is not part of the internal list, ie
+ * if it has been attached manually.
+ */
+void scsi_dh_detach(struct request_queue *q)
+{
+ unsigned long flags;
+ struct scsi_device *sdev;
+ struct scsi_device_handler *scsi_dh = NULL;
+
+ spin_lock_irqsave(q->queue_lock, flags);
+ sdev = q->queuedata;
+ if (!sdev || !get_device(&sdev->sdev_gendev))
+ sdev = NULL;
+ spin_unlock_irqrestore(q->queue_lock, flags);
+
+ if (!sdev)
+ return;
+
+ if (sdev->scsi_dh_data) {
+ scsi_dh = sdev->scsi_dh_data->scsi_dh;
+ scsi_dh_handler_detach(sdev, scsi_dh);
+ }
+ put_device(&sdev->sdev_gendev);
+}
+EXPORT_SYMBOL_GPL(scsi_dh_detach);
+
+/*
+ * scsi_dh_attached_handler_name - Get attached device handler's name
+ * @q - Request queue that is associated with the scsi_device
+ * that may have a device handler attached
+ * @gfp - the GFP mask used in the kmalloc() call when allocating memory
+ *
+ * Returns name of attached handler, NULL if no handler is attached.
+ * Caller must take care to free the returned string.
+ */
+const char *scsi_dh_attached_handler_name(struct request_queue *q, gfp_t gfp)
+{
+ unsigned long flags;
+ struct scsi_device *sdev;
+ const char *handler_name = NULL;
+
+ spin_lock_irqsave(q->queue_lock, flags);
+ sdev = q->queuedata;
+ if (!sdev || !get_device(&sdev->sdev_gendev))
+ sdev = NULL;
+ spin_unlock_irqrestore(q->queue_lock, flags);
+
+ if (!sdev)
+ return NULL;
+
+ if (sdev->scsi_dh_data)
+ handler_name = kstrdup(sdev->scsi_dh_data->scsi_dh->name, gfp);
+
+ put_device(&sdev->sdev_gendev);
+ return handler_name;
+}
+EXPORT_SYMBOL_GPL(scsi_dh_attached_handler_name);
+
+static struct notifier_block scsi_dh_nb = {
+ .notifier_call = scsi_dh_notifier
+};
+
+static int __init scsi_dh_init(void)
+{
+ int r;
+
+ r = bus_register_notifier(&scsi_bus_type, &scsi_dh_nb);
+
+ if (!r)
+ bus_for_each_dev(&scsi_bus_type, NULL, NULL,
+ scsi_dh_sysfs_attr_add);
+
+ return r;
+}
+
+static void __exit scsi_dh_exit(void)
+{
+ bus_for_each_dev(&scsi_bus_type, NULL, NULL,
+ scsi_dh_sysfs_attr_remove);
+ bus_unregister_notifier(&scsi_bus_type, &scsi_dh_nb);
+}
+
+module_init(scsi_dh_init);
+module_exit(scsi_dh_exit);
+
+MODULE_DESCRIPTION("SCSI device handler");
+MODULE_AUTHOR("Chandra Seetharaman <sekharan@us.ibm.com>");
+MODULE_LICENSE("GPL");
diff --git a/drivers/scsi/device_handler/scsi_dh_alua.c b/drivers/scsi/device_handler/scsi_dh_alua.c
new file mode 100644
index 000000000..854b568b9
--- /dev/null
+++ b/drivers/scsi/device_handler/scsi_dh_alua.c
@@ -0,0 +1,905 @@
+/*
+ * Generic SCSI-3 ALUA SCSI Device Handler
+ *
+ * Copyright (C) 2007-2010 Hannes Reinecke, SUSE Linux Products GmbH.
+ * All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ *
+ */
+#include <linux/slab.h>
+#include <linux/delay.h>
+#include <linux/module.h>
+#include <scsi/scsi.h>
+#include <scsi/scsi_eh.h>
+#include <scsi/scsi_dh.h>
+
+#define ALUA_DH_NAME "alua"
+#define ALUA_DH_VER "1.3"
+
+#define TPGS_STATE_OPTIMIZED 0x0
+#define TPGS_STATE_NONOPTIMIZED 0x1
+#define TPGS_STATE_STANDBY 0x2
+#define TPGS_STATE_UNAVAILABLE 0x3
+#define TPGS_STATE_LBA_DEPENDENT 0x4
+#define TPGS_STATE_OFFLINE 0xe
+#define TPGS_STATE_TRANSITIONING 0xf
+
+#define TPGS_SUPPORT_NONE 0x00
+#define TPGS_SUPPORT_OPTIMIZED 0x01
+#define TPGS_SUPPORT_NONOPTIMIZED 0x02
+#define TPGS_SUPPORT_STANDBY 0x04
+#define TPGS_SUPPORT_UNAVAILABLE 0x08
+#define TPGS_SUPPORT_LBA_DEPENDENT 0x10
+#define TPGS_SUPPORT_OFFLINE 0x40
+#define TPGS_SUPPORT_TRANSITION 0x80
+
+#define RTPG_FMT_MASK 0x70
+#define RTPG_FMT_EXT_HDR 0x10
+
+#define TPGS_MODE_UNINITIALIZED -1
+#define TPGS_MODE_NONE 0x0
+#define TPGS_MODE_IMPLICIT 0x1
+#define TPGS_MODE_EXPLICIT 0x2
+
+#define ALUA_INQUIRY_SIZE 36
+#define ALUA_FAILOVER_TIMEOUT 60
+#define ALUA_FAILOVER_RETRIES 5
+
+/* flags passed from user level */
+#define ALUA_OPTIMIZE_STPG 1
+
+struct alua_dh_data {
+ struct scsi_dh_data dh_data;
+ int group_id;
+ int rel_port;
+ int tpgs;
+ int state;
+ int pref;
+ unsigned flags; /* used for optimizing STPG */
+ unsigned char inq[ALUA_INQUIRY_SIZE];
+ unsigned char *buff;
+ int bufflen;
+ unsigned char transition_tmo;
+ unsigned char sense[SCSI_SENSE_BUFFERSIZE];
+ int senselen;
+ struct scsi_device *sdev;
+ activate_complete callback_fn;
+ void *callback_data;
+};
+
+#define ALUA_POLICY_SWITCH_CURRENT 0
+#define ALUA_POLICY_SWITCH_ALL 1
+
+static char print_alua_state(int);
+static int alua_check_sense(struct scsi_device *, struct scsi_sense_hdr *);
+
+static inline struct alua_dh_data *get_alua_data(struct scsi_device *sdev)
+{
+ return container_of(sdev->scsi_dh_data, struct alua_dh_data, dh_data);
+}
+
+static int realloc_buffer(struct alua_dh_data *h, unsigned len)
+{
+ if (h->buff && h->buff != h->inq)
+ kfree(h->buff);
+
+ h->buff = kmalloc(len, GFP_NOIO);
+ if (!h->buff) {
+ h->buff = h->inq;
+ h->bufflen = ALUA_INQUIRY_SIZE;
+ return 1;
+ }
+ h->bufflen = len;
+ return 0;
+}
+
+static struct request *get_alua_req(struct scsi_device *sdev,
+ void *buffer, unsigned buflen, int rw)
+{
+ struct request *rq;
+ struct request_queue *q = sdev->request_queue;
+
+ rq = blk_get_request(q, rw, GFP_NOIO);
+
+ if (IS_ERR(rq)) {
+ sdev_printk(KERN_INFO, sdev,
+ "%s: blk_get_request failed\n", __func__);
+ return NULL;
+ }
+ blk_rq_set_block_pc(rq);
+
+ if (buflen && blk_rq_map_kern(q, rq, buffer, buflen, GFP_NOIO)) {
+ blk_put_request(rq);
+ sdev_printk(KERN_INFO, sdev,
+ "%s: blk_rq_map_kern failed\n", __func__);
+ return NULL;
+ }
+
+ rq->cmd_flags |= REQ_FAILFAST_DEV | REQ_FAILFAST_TRANSPORT |
+ REQ_FAILFAST_DRIVER;
+ rq->retries = ALUA_FAILOVER_RETRIES;
+ rq->timeout = ALUA_FAILOVER_TIMEOUT * HZ;
+
+ return rq;
+}
+
+/*
+ * submit_vpd_inquiry - Issue an INQUIRY VPD page 0x83 command
+ * @sdev: sdev the command should be sent to
+ */
+static int submit_vpd_inquiry(struct scsi_device *sdev, struct alua_dh_data *h)
+{
+ struct request *rq;
+ int err = SCSI_DH_RES_TEMP_UNAVAIL;
+
+ rq = get_alua_req(sdev, h->buff, h->bufflen, READ);
+ if (!rq)
+ goto done;
+
+ /* Prepare the command. */
+ rq->cmd[0] = INQUIRY;
+ rq->cmd[1] = 1;
+ rq->cmd[2] = 0x83;
+ rq->cmd[4] = h->bufflen;
+ rq->cmd_len = COMMAND_SIZE(INQUIRY);
+
+ rq->sense = h->sense;
+ memset(rq->sense, 0, SCSI_SENSE_BUFFERSIZE);
+ rq->sense_len = h->senselen = 0;
+
+ err = blk_execute_rq(rq->q, NULL, rq, 1);
+ if (err == -EIO) {
+ sdev_printk(KERN_INFO, sdev,
+ "%s: evpd inquiry failed with %x\n",
+ ALUA_DH_NAME, rq->errors);
+ h->senselen = rq->sense_len;
+ err = SCSI_DH_IO;
+ }
+ blk_put_request(rq);
+done:
+ return err;
+}
+
+/*
+ * submit_rtpg - Issue a REPORT TARGET GROUP STATES command
+ * @sdev: sdev the command should be sent to
+ */
+static unsigned submit_rtpg(struct scsi_device *sdev, struct alua_dh_data *h,
+ bool rtpg_ext_hdr_req)
+{
+ struct request *rq;
+ int err = SCSI_DH_RES_TEMP_UNAVAIL;
+
+ rq = get_alua_req(sdev, h->buff, h->bufflen, READ);
+ if (!rq)
+ goto done;
+
+ /* Prepare the command. */
+ rq->cmd[0] = MAINTENANCE_IN;
+ if (rtpg_ext_hdr_req)
+ rq->cmd[1] = MI_REPORT_TARGET_PGS | MI_EXT_HDR_PARAM_FMT;
+ else
+ rq->cmd[1] = MI_REPORT_TARGET_PGS;
+ rq->cmd[6] = (h->bufflen >> 24) & 0xff;
+ rq->cmd[7] = (h->bufflen >> 16) & 0xff;
+ rq->cmd[8] = (h->bufflen >> 8) & 0xff;
+ rq->cmd[9] = h->bufflen & 0xff;
+ rq->cmd_len = COMMAND_SIZE(MAINTENANCE_IN);
+
+ rq->sense = h->sense;
+ memset(rq->sense, 0, SCSI_SENSE_BUFFERSIZE);
+ rq->sense_len = h->senselen = 0;
+
+ err = blk_execute_rq(rq->q, NULL, rq, 1);
+ if (err == -EIO) {
+ sdev_printk(KERN_INFO, sdev,
+ "%s: rtpg failed with %x\n",
+ ALUA_DH_NAME, rq->errors);
+ h->senselen = rq->sense_len;
+ err = SCSI_DH_IO;
+ }
+ blk_put_request(rq);
+done:
+ return err;
+}
+
+/*
+ * alua_stpg - Evaluate SET TARGET GROUP STATES
+ * @sdev: the device to be evaluated
+ * @state: the new target group state
+ *
+ * Send a SET TARGET GROUP STATES command to the device.
+ * We only have to test here if we should resubmit the command;
+ * any other error is assumed as a failure.
+ */
+static void stpg_endio(struct request *req, int error)
+{
+ struct alua_dh_data *h = req->end_io_data;
+ struct scsi_sense_hdr sense_hdr;
+ unsigned err = SCSI_DH_OK;
+
+ if (host_byte(req->errors) != DID_OK ||
+ msg_byte(req->errors) != COMMAND_COMPLETE) {
+ err = SCSI_DH_IO;
+ goto done;
+ }
+
+ if (req->sense_len > 0) {
+ err = scsi_normalize_sense(h->sense, SCSI_SENSE_BUFFERSIZE,
+ &sense_hdr);
+ if (!err) {
+ err = SCSI_DH_IO;
+ goto done;
+ }
+ err = alua_check_sense(h->sdev, &sense_hdr);
+ if (err == ADD_TO_MLQUEUE) {
+ err = SCSI_DH_RETRY;
+ goto done;
+ }
+ sdev_printk(KERN_INFO, h->sdev,
+ "%s: stpg sense code: %02x/%02x/%02x\n",
+ ALUA_DH_NAME, sense_hdr.sense_key,
+ sense_hdr.asc, sense_hdr.ascq);
+ err = SCSI_DH_IO;
+ } else if (error)
+ err = SCSI_DH_IO;
+
+ if (err == SCSI_DH_OK) {
+ h->state = TPGS_STATE_OPTIMIZED;
+ sdev_printk(KERN_INFO, h->sdev,
+ "%s: port group %02x switched to state %c\n",
+ ALUA_DH_NAME, h->group_id,
+ print_alua_state(h->state));
+ }
+done:
+ req->end_io_data = NULL;
+ __blk_put_request(req->q, req);
+ if (h->callback_fn) {
+ h->callback_fn(h->callback_data, err);
+ h->callback_fn = h->callback_data = NULL;
+ }
+ return;
+}
+
+/*
+ * submit_stpg - Issue a SET TARGET GROUP STATES command
+ *
+ * Currently we're only setting the current target port group state
+ * to 'active/optimized' and let the array firmware figure out
+ * the states of the remaining groups.
+ */
+static unsigned submit_stpg(struct alua_dh_data *h)
+{
+ struct request *rq;
+ int stpg_len = 8;
+ struct scsi_device *sdev = h->sdev;
+
+ /* Prepare the data buffer */
+ memset(h->buff, 0, stpg_len);
+ h->buff[4] = TPGS_STATE_OPTIMIZED & 0x0f;
+ h->buff[6] = (h->group_id >> 8) & 0xff;
+ h->buff[7] = h->group_id & 0xff;
+
+ rq = get_alua_req(sdev, h->buff, stpg_len, WRITE);
+ if (!rq)
+ return SCSI_DH_RES_TEMP_UNAVAIL;
+
+ /* Prepare the command. */
+ rq->cmd[0] = MAINTENANCE_OUT;
+ rq->cmd[1] = MO_SET_TARGET_PGS;
+ rq->cmd[6] = (stpg_len >> 24) & 0xff;
+ rq->cmd[7] = (stpg_len >> 16) & 0xff;
+ rq->cmd[8] = (stpg_len >> 8) & 0xff;
+ rq->cmd[9] = stpg_len & 0xff;
+ rq->cmd_len = COMMAND_SIZE(MAINTENANCE_OUT);
+
+ rq->sense = h->sense;
+ memset(rq->sense, 0, SCSI_SENSE_BUFFERSIZE);
+ rq->sense_len = h->senselen = 0;
+ rq->end_io_data = h;
+
+ blk_execute_rq_nowait(rq->q, NULL, rq, 1, stpg_endio);
+ return SCSI_DH_OK;
+}
+
+/*
+ * alua_check_tpgs - Evaluate TPGS setting
+ * @sdev: device to be checked
+ *
+ * Examine the TPGS setting of the sdev to find out if ALUA
+ * is supported.
+ */
+static int alua_check_tpgs(struct scsi_device *sdev, struct alua_dh_data *h)
+{
+ int err = SCSI_DH_OK;
+
+ h->tpgs = scsi_device_tpgs(sdev);
+ switch (h->tpgs) {
+ case TPGS_MODE_EXPLICIT|TPGS_MODE_IMPLICIT:
+ sdev_printk(KERN_INFO, sdev,
+ "%s: supports implicit and explicit TPGS\n",
+ ALUA_DH_NAME);
+ break;
+ case TPGS_MODE_EXPLICIT:
+ sdev_printk(KERN_INFO, sdev, "%s: supports explicit TPGS\n",
+ ALUA_DH_NAME);
+ break;
+ case TPGS_MODE_IMPLICIT:
+ sdev_printk(KERN_INFO, sdev, "%s: supports implicit TPGS\n",
+ ALUA_DH_NAME);
+ break;
+ default:
+ h->tpgs = TPGS_MODE_NONE;
+ sdev_printk(KERN_INFO, sdev, "%s: not supported\n",
+ ALUA_DH_NAME);
+ err = SCSI_DH_DEV_UNSUPP;
+ break;
+ }
+
+ return err;
+}
+
+/*
+ * alua_vpd_inquiry - Evaluate INQUIRY vpd page 0x83
+ * @sdev: device to be checked
+ *
+ * Extract the relative target port and the target port group
+ * descriptor from the list of identificators.
+ */
+static int alua_vpd_inquiry(struct scsi_device *sdev, struct alua_dh_data *h)
+{
+ int len;
+ unsigned err;
+ unsigned char *d;
+
+ retry:
+ err = submit_vpd_inquiry(sdev, h);
+
+ if (err != SCSI_DH_OK)
+ return err;
+
+ /* Check if vpd page exceeds initial buffer */
+ len = (h->buff[2] << 8) + h->buff[3] + 4;
+ if (len > h->bufflen) {
+ /* Resubmit with the correct length */
+ if (realloc_buffer(h, len)) {
+ sdev_printk(KERN_WARNING, sdev,
+ "%s: kmalloc buffer failed\n",
+ ALUA_DH_NAME);
+ /* Temporary failure, bypass */
+ return SCSI_DH_DEV_TEMP_BUSY;
+ }
+ goto retry;
+ }
+
+ /*
+ * Now look for the correct descriptor.
+ */
+ d = h->buff + 4;
+ while (d < h->buff + len) {
+ switch (d[1] & 0xf) {
+ case 0x4:
+ /* Relative target port */
+ h->rel_port = (d[6] << 8) + d[7];
+ break;
+ case 0x5:
+ /* Target port group */
+ h->group_id = (d[6] << 8) + d[7];
+ break;
+ default:
+ break;
+ }
+ d += d[3] + 4;
+ }
+
+ if (h->group_id == -1) {
+ /*
+ * Internal error; TPGS supported but required
+ * VPD identification descriptors not present.
+ * Disable ALUA support
+ */
+ sdev_printk(KERN_INFO, sdev,
+ "%s: No target port descriptors found\n",
+ ALUA_DH_NAME);
+ h->state = TPGS_STATE_OPTIMIZED;
+ h->tpgs = TPGS_MODE_NONE;
+ err = SCSI_DH_DEV_UNSUPP;
+ } else {
+ sdev_printk(KERN_INFO, sdev,
+ "%s: port group %02x rel port %02x\n",
+ ALUA_DH_NAME, h->group_id, h->rel_port);
+ }
+
+ return err;
+}
+
+static char print_alua_state(int state)
+{
+ switch (state) {
+ case TPGS_STATE_OPTIMIZED:
+ return 'A';
+ case TPGS_STATE_NONOPTIMIZED:
+ return 'N';
+ case TPGS_STATE_STANDBY:
+ return 'S';
+ case TPGS_STATE_UNAVAILABLE:
+ return 'U';
+ case TPGS_STATE_LBA_DEPENDENT:
+ return 'L';
+ case TPGS_STATE_OFFLINE:
+ return 'O';
+ case TPGS_STATE_TRANSITIONING:
+ return 'T';
+ default:
+ return 'X';
+ }
+}
+
+static int alua_check_sense(struct scsi_device *sdev,
+ struct scsi_sense_hdr *sense_hdr)
+{
+ switch (sense_hdr->sense_key) {
+ case NOT_READY:
+ if (sense_hdr->asc == 0x04 && sense_hdr->ascq == 0x0a)
+ /*
+ * LUN Not Accessible - ALUA state transition
+ */
+ return ADD_TO_MLQUEUE;
+ if (sense_hdr->asc == 0x04 && sense_hdr->ascq == 0x0b)
+ /*
+ * LUN Not Accessible -- Target port in standby state
+ */
+ return SUCCESS;
+ if (sense_hdr->asc == 0x04 && sense_hdr->ascq == 0x0c)
+ /*
+ * LUN Not Accessible -- Target port in unavailable state
+ */
+ return SUCCESS;
+ if (sense_hdr->asc == 0x04 && sense_hdr->ascq == 0x12)
+ /*
+ * LUN Not Ready -- Offline
+ */
+ return SUCCESS;
+ if (sdev->allow_restart &&
+ sense_hdr->asc == 0x04 && sense_hdr->ascq == 0x02)
+ /*
+ * if the device is not started, we need to wake
+ * the error handler to start the motor
+ */
+ return FAILED;
+ break;
+ case UNIT_ATTENTION:
+ if (sense_hdr->asc == 0x29 && sense_hdr->ascq == 0x00)
+ /*
+ * Power On, Reset, or Bus Device Reset, just retry.
+ */
+ return ADD_TO_MLQUEUE;
+ if (sense_hdr->asc == 0x29 && sense_hdr->ascq == 0x04)
+ /*
+ * Device internal reset
+ */
+ return ADD_TO_MLQUEUE;
+ if (sense_hdr->asc == 0x2a && sense_hdr->ascq == 0x01)
+ /*
+ * Mode Parameters Changed
+ */
+ return ADD_TO_MLQUEUE;
+ if (sense_hdr->asc == 0x2a && sense_hdr->ascq == 0x06)
+ /*
+ * ALUA state changed
+ */
+ return ADD_TO_MLQUEUE;
+ if (sense_hdr->asc == 0x2a && sense_hdr->ascq == 0x07)
+ /*
+ * Implicit ALUA state transition failed
+ */
+ return ADD_TO_MLQUEUE;
+ if (sense_hdr->asc == 0x3f && sense_hdr->ascq == 0x03)
+ /*
+ * Inquiry data has changed
+ */
+ return ADD_TO_MLQUEUE;
+ if (sense_hdr->asc == 0x3f && sense_hdr->ascq == 0x0e)
+ /*
+ * REPORTED_LUNS_DATA_HAS_CHANGED is reported
+ * when switching controllers on targets like
+ * Intel Multi-Flex. We can just retry.
+ */
+ return ADD_TO_MLQUEUE;
+ break;
+ }
+
+ return SCSI_RETURN_NOT_HANDLED;
+}
+
+/*
+ * alua_rtpg - Evaluate REPORT TARGET GROUP STATES
+ * @sdev: the device to be evaluated.
+ * @wait_for_transition: if nonzero, wait ALUA_FAILOVER_TIMEOUT seconds for device to exit transitioning state
+ *
+ * Evaluate the Target Port Group State.
+ * Returns SCSI_DH_DEV_OFFLINED if the path is
+ * found to be unusable.
+ */
+static int alua_rtpg(struct scsi_device *sdev, struct alua_dh_data *h, int wait_for_transition)
+{
+ struct scsi_sense_hdr sense_hdr;
+ int len, k, off, valid_states = 0;
+ unsigned char *ucp;
+ unsigned err;
+ bool rtpg_ext_hdr_req = 1;
+ unsigned long expiry, interval = 0;
+ unsigned int tpg_desc_tbl_off;
+ unsigned char orig_transition_tmo;
+
+ if (!h->transition_tmo)
+ expiry = round_jiffies_up(jiffies + ALUA_FAILOVER_TIMEOUT * HZ);
+ else
+ expiry = round_jiffies_up(jiffies + h->transition_tmo * HZ);
+
+ retry:
+ err = submit_rtpg(sdev, h, rtpg_ext_hdr_req);
+
+ if (err == SCSI_DH_IO && h->senselen > 0) {
+ err = scsi_normalize_sense(h->sense, SCSI_SENSE_BUFFERSIZE,
+ &sense_hdr);
+ if (!err)
+ return SCSI_DH_IO;
+
+ /*
+ * submit_rtpg() has failed on existing arrays
+ * when requesting extended header info, and
+ * the array doesn't support extended headers,
+ * even though it shouldn't according to T10.
+ * The retry without rtpg_ext_hdr_req set
+ * handles this.
+ */
+ if (rtpg_ext_hdr_req == 1 &&
+ sense_hdr.sense_key == ILLEGAL_REQUEST &&
+ sense_hdr.asc == 0x24 && sense_hdr.ascq == 0) {
+ rtpg_ext_hdr_req = 0;
+ goto retry;
+ }
+
+ err = alua_check_sense(sdev, &sense_hdr);
+ if (err == ADD_TO_MLQUEUE && time_before(jiffies, expiry))
+ goto retry;
+ sdev_printk(KERN_INFO, sdev,
+ "%s: rtpg sense code %02x/%02x/%02x\n",
+ ALUA_DH_NAME, sense_hdr.sense_key,
+ sense_hdr.asc, sense_hdr.ascq);
+ err = SCSI_DH_IO;
+ }
+ if (err != SCSI_DH_OK)
+ return err;
+
+ len = (h->buff[0] << 24) + (h->buff[1] << 16) +
+ (h->buff[2] << 8) + h->buff[3] + 4;
+
+ if (len > h->bufflen) {
+ /* Resubmit with the correct length */
+ if (realloc_buffer(h, len)) {
+ sdev_printk(KERN_WARNING, sdev,
+ "%s: kmalloc buffer failed\n",__func__);
+ /* Temporary failure, bypass */
+ return SCSI_DH_DEV_TEMP_BUSY;
+ }
+ goto retry;
+ }
+
+ orig_transition_tmo = h->transition_tmo;
+ if ((h->buff[4] & RTPG_FMT_MASK) == RTPG_FMT_EXT_HDR && h->buff[5] != 0)
+ h->transition_tmo = h->buff[5];
+ else
+ h->transition_tmo = ALUA_FAILOVER_TIMEOUT;
+
+ if (wait_for_transition && (orig_transition_tmo != h->transition_tmo)) {
+ sdev_printk(KERN_INFO, sdev,
+ "%s: transition timeout set to %d seconds\n",
+ ALUA_DH_NAME, h->transition_tmo);
+ expiry = jiffies + h->transition_tmo * HZ;
+ }
+
+ if ((h->buff[4] & RTPG_FMT_MASK) == RTPG_FMT_EXT_HDR)
+ tpg_desc_tbl_off = 8;
+ else
+ tpg_desc_tbl_off = 4;
+
+ for (k = tpg_desc_tbl_off, ucp = h->buff + tpg_desc_tbl_off;
+ k < len;
+ k += off, ucp += off) {
+
+ if (h->group_id == (ucp[2] << 8) + ucp[3]) {
+ h->state = ucp[0] & 0x0f;
+ h->pref = ucp[0] >> 7;
+ valid_states = ucp[1];
+ }
+ off = 8 + (ucp[7] * 4);
+ }
+
+ sdev_printk(KERN_INFO, sdev,
+ "%s: port group %02x state %c %s supports %c%c%c%c%c%c%c\n",
+ ALUA_DH_NAME, h->group_id, print_alua_state(h->state),
+ h->pref ? "preferred" : "non-preferred",
+ valid_states&TPGS_SUPPORT_TRANSITION?'T':'t',
+ valid_states&TPGS_SUPPORT_OFFLINE?'O':'o',
+ valid_states&TPGS_SUPPORT_LBA_DEPENDENT?'L':'l',
+ valid_states&TPGS_SUPPORT_UNAVAILABLE?'U':'u',
+ valid_states&TPGS_SUPPORT_STANDBY?'S':'s',
+ valid_states&TPGS_SUPPORT_NONOPTIMIZED?'N':'n',
+ valid_states&TPGS_SUPPORT_OPTIMIZED?'A':'a');
+
+ switch (h->state) {
+ case TPGS_STATE_TRANSITIONING:
+ if (wait_for_transition) {
+ if (time_before(jiffies, expiry)) {
+ /* State transition, retry */
+ interval += 2000;
+ msleep(interval);
+ goto retry;
+ }
+ err = SCSI_DH_RETRY;
+ } else {
+ err = SCSI_DH_OK;
+ }
+
+ /* Transitioning time exceeded, set port to standby */
+ h->state = TPGS_STATE_STANDBY;
+ break;
+ case TPGS_STATE_OFFLINE:
+ /* Path unusable */
+ err = SCSI_DH_DEV_OFFLINED;
+ break;
+ default:
+ /* Useable path if active */
+ err = SCSI_DH_OK;
+ break;
+ }
+ return err;
+}
+
+/*
+ * alua_initialize - Initialize ALUA state
+ * @sdev: the device to be initialized
+ *
+ * For the prep_fn to work correctly we have
+ * to initialize the ALUA state for the device.
+ */
+static int alua_initialize(struct scsi_device *sdev, struct alua_dh_data *h)
+{
+ int err;
+
+ err = alua_check_tpgs(sdev, h);
+ if (err != SCSI_DH_OK)
+ goto out;
+
+ err = alua_vpd_inquiry(sdev, h);
+ if (err != SCSI_DH_OK)
+ goto out;
+
+ err = alua_rtpg(sdev, h, 0);
+ if (err != SCSI_DH_OK)
+ goto out;
+
+out:
+ return err;
+}
+/*
+ * alua_set_params - set/unset the optimize flag
+ * @sdev: device on the path to be activated
+ * params - parameters in the following format
+ * "no_of_params\0param1\0param2\0param3\0...\0"
+ * For example, to set the flag pass the following parameters
+ * from multipath.conf
+ * hardware_handler "2 alua 1"
+ */
+static int alua_set_params(struct scsi_device *sdev, const char *params)
+{
+ struct alua_dh_data *h = get_alua_data(sdev);
+ unsigned int optimize = 0, argc;
+ const char *p = params;
+ int result = SCSI_DH_OK;
+
+ if ((sscanf(params, "%u", &argc) != 1) || (argc != 1))
+ return -EINVAL;
+
+ while (*p++)
+ ;
+ if ((sscanf(p, "%u", &optimize) != 1) || (optimize > 1))
+ return -EINVAL;
+
+ if (optimize)
+ h->flags |= ALUA_OPTIMIZE_STPG;
+ else
+ h->flags &= ~ALUA_OPTIMIZE_STPG;
+
+ return result;
+}
+
+static uint optimize_stpg;
+module_param(optimize_stpg, uint, S_IRUGO|S_IWUSR);
+MODULE_PARM_DESC(optimize_stpg, "Allow use of a non-optimized path, rather than sending a STPG, when implicit TPGS is supported (0=No,1=Yes). Default is 0.");
+
+/*
+ * alua_activate - activate a path
+ * @sdev: device on the path to be activated
+ *
+ * We're currently switching the port group to be activated only and
+ * let the array figure out the rest.
+ * There may be other arrays which require us to switch all port groups
+ * based on a certain policy. But until we actually encounter them it
+ * should be okay.
+ */
+static int alua_activate(struct scsi_device *sdev,
+ activate_complete fn, void *data)
+{
+ struct alua_dh_data *h = get_alua_data(sdev);
+ int err = SCSI_DH_OK;
+ int stpg = 0;
+
+ err = alua_rtpg(sdev, h, 1);
+ if (err != SCSI_DH_OK)
+ goto out;
+
+ if (optimize_stpg)
+ h->flags |= ALUA_OPTIMIZE_STPG;
+
+ if (h->tpgs & TPGS_MODE_EXPLICIT) {
+ switch (h->state) {
+ case TPGS_STATE_NONOPTIMIZED:
+ stpg = 1;
+ if ((h->flags & ALUA_OPTIMIZE_STPG) &&
+ (!h->pref) &&
+ (h->tpgs & TPGS_MODE_IMPLICIT))
+ stpg = 0;
+ break;
+ case TPGS_STATE_STANDBY:
+ case TPGS_STATE_UNAVAILABLE:
+ stpg = 1;
+ break;
+ case TPGS_STATE_OFFLINE:
+ err = SCSI_DH_IO;
+ break;
+ case TPGS_STATE_TRANSITIONING:
+ err = SCSI_DH_RETRY;
+ break;
+ default:
+ break;
+ }
+ }
+
+ if (stpg) {
+ h->callback_fn = fn;
+ h->callback_data = data;
+ err = submit_stpg(h);
+ if (err == SCSI_DH_OK)
+ return 0;
+ h->callback_fn = h->callback_data = NULL;
+ }
+
+out:
+ if (fn)
+ fn(data, err);
+ return 0;
+}
+
+/*
+ * alua_prep_fn - request callback
+ *
+ * Fail I/O to all paths not in state
+ * active/optimized or active/non-optimized.
+ */
+static int alua_prep_fn(struct scsi_device *sdev, struct request *req)
+{
+ struct alua_dh_data *h = get_alua_data(sdev);
+ int ret = BLKPREP_OK;
+
+ if (h->state == TPGS_STATE_TRANSITIONING)
+ ret = BLKPREP_DEFER;
+ else if (h->state != TPGS_STATE_OPTIMIZED &&
+ h->state != TPGS_STATE_NONOPTIMIZED &&
+ h->state != TPGS_STATE_LBA_DEPENDENT) {
+ ret = BLKPREP_KILL;
+ req->cmd_flags |= REQ_QUIET;
+ }
+ return ret;
+
+}
+
+static bool alua_match(struct scsi_device *sdev)
+{
+ return (scsi_device_tpgs(sdev) != 0);
+}
+
+/*
+ * alua_bus_attach - Attach device handler
+ * @sdev: device to be attached to
+ */
+static struct scsi_dh_data *alua_bus_attach(struct scsi_device *sdev)
+{
+ struct alua_dh_data *h;
+ int err;
+
+ h = kzalloc(sizeof(*h) , GFP_KERNEL);
+ if (!h)
+ return ERR_PTR(-ENOMEM);
+ h->tpgs = TPGS_MODE_UNINITIALIZED;
+ h->state = TPGS_STATE_OPTIMIZED;
+ h->group_id = -1;
+ h->rel_port = -1;
+ h->buff = h->inq;
+ h->bufflen = ALUA_INQUIRY_SIZE;
+ h->sdev = sdev;
+
+ err = alua_initialize(sdev, h);
+ if (err != SCSI_DH_OK && err != SCSI_DH_DEV_OFFLINED)
+ goto failed;
+
+ sdev_printk(KERN_NOTICE, sdev, "%s: Attached\n", ALUA_DH_NAME);
+ return &h->dh_data;
+failed:
+ kfree(h);
+ return ERR_PTR(-EINVAL);
+}
+
+/*
+ * alua_bus_detach - Detach device handler
+ * @sdev: device to be detached from
+ */
+static void alua_bus_detach(struct scsi_device *sdev)
+{
+ struct alua_dh_data *h = get_alua_data(sdev);
+
+ if (h->buff && h->inq != h->buff)
+ kfree(h->buff);
+ kfree(h);
+}
+
+static struct scsi_device_handler alua_dh = {
+ .name = ALUA_DH_NAME,
+ .module = THIS_MODULE,
+ .attach = alua_bus_attach,
+ .detach = alua_bus_detach,
+ .prep_fn = alua_prep_fn,
+ .check_sense = alua_check_sense,
+ .activate = alua_activate,
+ .set_params = alua_set_params,
+ .match = alua_match,
+};
+
+static int __init alua_init(void)
+{
+ int r;
+
+ r = scsi_register_device_handler(&alua_dh);
+ if (r != 0)
+ printk(KERN_ERR "%s: Failed to register scsi device handler",
+ ALUA_DH_NAME);
+ return r;
+}
+
+static void __exit alua_exit(void)
+{
+ scsi_unregister_device_handler(&alua_dh);
+}
+
+module_init(alua_init);
+module_exit(alua_exit);
+
+MODULE_DESCRIPTION("DM Multipath ALUA support");
+MODULE_AUTHOR("Hannes Reinecke <hare@suse.de>");
+MODULE_LICENSE("GPL");
+MODULE_VERSION(ALUA_DH_VER);
diff --git a/drivers/scsi/device_handler/scsi_dh_emc.c b/drivers/scsi/device_handler/scsi_dh_emc.c
new file mode 100644
index 000000000..6ed1caadb
--- /dev/null
+++ b/drivers/scsi/device_handler/scsi_dh_emc.c
@@ -0,0 +1,725 @@
+/*
+ * Target driver for EMC CLARiiON AX/CX-series hardware.
+ * Based on code from Lars Marowsky-Bree <lmb@suse.de>
+ * and Ed Goggin <egoggin@emc.com>.
+ *
+ * Copyright (C) 2006 Red Hat, Inc. All rights reserved.
+ * Copyright (C) 2006 Mike Christie
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2, or (at your option)
+ * any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; see the file COPYING. If not, write to
+ * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+#include <linux/slab.h>
+#include <linux/module.h>
+#include <scsi/scsi.h>
+#include <scsi/scsi_eh.h>
+#include <scsi/scsi_dh.h>
+#include <scsi/scsi_device.h>
+
+#define CLARIION_NAME "emc"
+
+#define CLARIION_TRESPASS_PAGE 0x22
+#define CLARIION_BUFFER_SIZE 0xFC
+#define CLARIION_TIMEOUT (60 * HZ)
+#define CLARIION_RETRIES 3
+#define CLARIION_UNBOUND_LU -1
+#define CLARIION_SP_A 0
+#define CLARIION_SP_B 1
+
+/* Flags */
+#define CLARIION_SHORT_TRESPASS 1
+#define CLARIION_HONOR_RESERVATIONS 2
+
+/* LUN states */
+#define CLARIION_LUN_UNINITIALIZED -1
+#define CLARIION_LUN_UNBOUND 0
+#define CLARIION_LUN_BOUND 1
+#define CLARIION_LUN_OWNED 2
+
+static unsigned char long_trespass[] = {
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ CLARIION_TRESPASS_PAGE, /* Page code */
+ 0x09, /* Page length - 2 */
+ 0x01, /* Trespass code */
+ 0xff, 0xff, /* Trespass target */
+ 0, 0, 0, 0, 0, 0 /* Reserved bytes / unknown */
+};
+
+static unsigned char short_trespass[] = {
+ 0, 0, 0, 0,
+ CLARIION_TRESPASS_PAGE, /* Page code */
+ 0x02, /* Page length - 2 */
+ 0x01, /* Trespass code */
+ 0xff, /* Trespass target */
+};
+
+static const char * lun_state[] =
+{
+ "not bound",
+ "bound",
+ "owned",
+};
+
+struct clariion_dh_data {
+ struct scsi_dh_data dh_data;
+ /*
+ * Flags:
+ * CLARIION_SHORT_TRESPASS
+ * Use short trespass command (FC-series) or the long version
+ * (default for AX/CX CLARiiON arrays).
+ *
+ * CLARIION_HONOR_RESERVATIONS
+ * Whether or not (default) to honor SCSI reservations when
+ * initiating a switch-over.
+ */
+ unsigned flags;
+ /*
+ * I/O buffer for both MODE_SELECT and INQUIRY commands.
+ */
+ unsigned char buffer[CLARIION_BUFFER_SIZE];
+ /*
+ * SCSI sense buffer for commands -- assumes serial issuance
+ * and completion sequence of all commands for same multipath.
+ */
+ unsigned char sense[SCSI_SENSE_BUFFERSIZE];
+ unsigned int senselen;
+ /*
+ * LUN state
+ */
+ int lun_state;
+ /*
+ * SP Port number
+ */
+ int port;
+ /*
+ * which SP (A=0,B=1,UNBOUND=-1) is the default SP for this
+ * path's mapped LUN
+ */
+ int default_sp;
+ /*
+ * which SP (A=0,B=1,UNBOUND=-1) is the active SP for this
+ * path's mapped LUN
+ */
+ int current_sp;
+};
+
+static inline struct clariion_dh_data
+ *get_clariion_data(struct scsi_device *sdev)
+{
+ return container_of(sdev->scsi_dh_data, struct clariion_dh_data,
+ dh_data);
+}
+
+/*
+ * Parse MODE_SELECT cmd reply.
+ */
+static int trespass_endio(struct scsi_device *sdev, char *sense)
+{
+ int err = SCSI_DH_IO;
+ struct scsi_sense_hdr sshdr;
+
+ if (!scsi_normalize_sense(sense, SCSI_SENSE_BUFFERSIZE, &sshdr)) {
+ sdev_printk(KERN_ERR, sdev, "%s: Found valid sense data 0x%2x, "
+ "0x%2x, 0x%2x while sending CLARiiON trespass "
+ "command.\n", CLARIION_NAME, sshdr.sense_key,
+ sshdr.asc, sshdr.ascq);
+
+ if ((sshdr.sense_key == 0x05) && (sshdr.asc == 0x04) &&
+ (sshdr.ascq == 0x00)) {
+ /*
+ * Array based copy in progress -- do not send
+ * mode_select or copy will be aborted mid-stream.
+ */
+ sdev_printk(KERN_INFO, sdev, "%s: Array Based Copy in "
+ "progress while sending CLARiiON trespass "
+ "command.\n", CLARIION_NAME);
+ err = SCSI_DH_DEV_TEMP_BUSY;
+ } else if ((sshdr.sense_key == 0x02) && (sshdr.asc == 0x04) &&
+ (sshdr.ascq == 0x03)) {
+ /*
+ * LUN Not Ready - Manual Intervention Required
+ * indicates in-progress ucode upgrade (NDU).
+ */
+ sdev_printk(KERN_INFO, sdev, "%s: Detected in-progress "
+ "ucode upgrade NDU operation while sending "
+ "CLARiiON trespass command.\n", CLARIION_NAME);
+ err = SCSI_DH_DEV_TEMP_BUSY;
+ } else
+ err = SCSI_DH_DEV_FAILED;
+ } else {
+ sdev_printk(KERN_INFO, sdev,
+ "%s: failed to send MODE SELECT, no sense available\n",
+ CLARIION_NAME);
+ }
+ return err;
+}
+
+static int parse_sp_info_reply(struct scsi_device *sdev,
+ struct clariion_dh_data *csdev)
+{
+ int err = SCSI_DH_OK;
+
+ /* check for in-progress ucode upgrade (NDU) */
+ if (csdev->buffer[48] != 0) {
+ sdev_printk(KERN_NOTICE, sdev, "%s: Detected in-progress "
+ "ucode upgrade NDU operation while finding "
+ "current active SP.", CLARIION_NAME);
+ err = SCSI_DH_DEV_TEMP_BUSY;
+ goto out;
+ }
+ if (csdev->buffer[4] > 2) {
+ /* Invalid buffer format */
+ sdev_printk(KERN_NOTICE, sdev,
+ "%s: invalid VPD page 0xC0 format\n",
+ CLARIION_NAME);
+ err = SCSI_DH_NOSYS;
+ goto out;
+ }
+ switch (csdev->buffer[28] & 0x0f) {
+ case 6:
+ sdev_printk(KERN_NOTICE, sdev,
+ "%s: ALUA failover mode detected\n",
+ CLARIION_NAME);
+ break;
+ case 4:
+ /* Linux failover */
+ break;
+ default:
+ sdev_printk(KERN_WARNING, sdev,
+ "%s: Invalid failover mode %d\n",
+ CLARIION_NAME, csdev->buffer[28] & 0x0f);
+ err = SCSI_DH_NOSYS;
+ goto out;
+ }
+
+ csdev->default_sp = csdev->buffer[5];
+ csdev->lun_state = csdev->buffer[4];
+ csdev->current_sp = csdev->buffer[8];
+ csdev->port = csdev->buffer[7];
+
+out:
+ return err;
+}
+
+#define emc_default_str "FC (Legacy)"
+
+static char * parse_sp_model(struct scsi_device *sdev, unsigned char *buffer)
+{
+ unsigned char len = buffer[4] + 5;
+ char *sp_model = NULL;
+ unsigned char sp_len, serial_len;
+
+ if (len < 160) {
+ sdev_printk(KERN_WARNING, sdev,
+ "%s: Invalid information section length %d\n",
+ CLARIION_NAME, len);
+ /* Check for old FC arrays */
+ if (!strncmp(buffer + 8, "DGC", 3)) {
+ /* Old FC array, not supporting extended information */
+ sp_model = emc_default_str;
+ }
+ goto out;
+ }
+
+ /*
+ * Parse extended information for SP model number
+ */
+ serial_len = buffer[160];
+ if (serial_len == 0 || serial_len + 161 > len) {
+ sdev_printk(KERN_WARNING, sdev,
+ "%s: Invalid array serial number length %d\n",
+ CLARIION_NAME, serial_len);
+ goto out;
+ }
+ sp_len = buffer[99];
+ if (sp_len == 0 || serial_len + sp_len + 161 > len) {
+ sdev_printk(KERN_WARNING, sdev,
+ "%s: Invalid model number length %d\n",
+ CLARIION_NAME, sp_len);
+ goto out;
+ }
+ sp_model = &buffer[serial_len + 161];
+ /* Strip whitespace at the end */
+ while (sp_len > 1 && sp_model[sp_len - 1] == ' ')
+ sp_len--;
+
+ sp_model[sp_len] = '\0';
+
+out:
+ return sp_model;
+}
+
+/*
+ * Get block request for REQ_BLOCK_PC command issued to path. Currently
+ * limited to MODE_SELECT (trespass) and INQUIRY (VPD page 0xC0) commands.
+ *
+ * Uses data and sense buffers in hardware handler context structure and
+ * assumes serial servicing of commands, both issuance and completion.
+ */
+static struct request *get_req(struct scsi_device *sdev, int cmd,
+ unsigned char *buffer)
+{
+ struct request *rq;
+ int len = 0;
+
+ rq = blk_get_request(sdev->request_queue,
+ (cmd != INQUIRY) ? WRITE : READ, GFP_NOIO);
+ if (IS_ERR(rq)) {
+ sdev_printk(KERN_INFO, sdev, "get_req: blk_get_request failed");
+ return NULL;
+ }
+
+ blk_rq_set_block_pc(rq);
+ rq->cmd_len = COMMAND_SIZE(cmd);
+ rq->cmd[0] = cmd;
+
+ switch (cmd) {
+ case MODE_SELECT:
+ len = sizeof(short_trespass);
+ rq->cmd[1] = 0x10;
+ rq->cmd[4] = len;
+ break;
+ case MODE_SELECT_10:
+ len = sizeof(long_trespass);
+ rq->cmd[1] = 0x10;
+ rq->cmd[8] = len;
+ break;
+ case INQUIRY:
+ len = CLARIION_BUFFER_SIZE;
+ rq->cmd[4] = len;
+ memset(buffer, 0, len);
+ break;
+ default:
+ BUG_ON(1);
+ break;
+ }
+
+ rq->cmd_flags |= REQ_FAILFAST_DEV | REQ_FAILFAST_TRANSPORT |
+ REQ_FAILFAST_DRIVER;
+ rq->timeout = CLARIION_TIMEOUT;
+ rq->retries = CLARIION_RETRIES;
+
+ if (blk_rq_map_kern(rq->q, rq, buffer, len, GFP_NOIO)) {
+ blk_put_request(rq);
+ return NULL;
+ }
+
+ return rq;
+}
+
+static int send_inquiry_cmd(struct scsi_device *sdev, int page,
+ struct clariion_dh_data *csdev)
+{
+ struct request *rq = get_req(sdev, INQUIRY, csdev->buffer);
+ int err;
+
+ if (!rq)
+ return SCSI_DH_RES_TEMP_UNAVAIL;
+
+ rq->sense = csdev->sense;
+ memset(rq->sense, 0, SCSI_SENSE_BUFFERSIZE);
+ rq->sense_len = csdev->senselen = 0;
+
+ rq->cmd[0] = INQUIRY;
+ if (page != 0) {
+ rq->cmd[1] = 1;
+ rq->cmd[2] = page;
+ }
+ err = blk_execute_rq(sdev->request_queue, NULL, rq, 1);
+ if (err == -EIO) {
+ sdev_printk(KERN_INFO, sdev,
+ "%s: failed to send %s INQUIRY: %x\n",
+ CLARIION_NAME, page?"EVPD":"standard",
+ rq->errors);
+ csdev->senselen = rq->sense_len;
+ err = SCSI_DH_IO;
+ }
+
+ blk_put_request(rq);
+
+ return err;
+}
+
+static int send_trespass_cmd(struct scsi_device *sdev,
+ struct clariion_dh_data *csdev)
+{
+ struct request *rq;
+ unsigned char *page22;
+ int err, len, cmd;
+
+ if (csdev->flags & CLARIION_SHORT_TRESPASS) {
+ page22 = short_trespass;
+ if (!(csdev->flags & CLARIION_HONOR_RESERVATIONS))
+ /* Set Honor Reservations bit */
+ page22[6] |= 0x80;
+ len = sizeof(short_trespass);
+ cmd = MODE_SELECT;
+ } else {
+ page22 = long_trespass;
+ if (!(csdev->flags & CLARIION_HONOR_RESERVATIONS))
+ /* Set Honor Reservations bit */
+ page22[10] |= 0x80;
+ len = sizeof(long_trespass);
+ cmd = MODE_SELECT_10;
+ }
+ BUG_ON((len > CLARIION_BUFFER_SIZE));
+ memcpy(csdev->buffer, page22, len);
+
+ rq = get_req(sdev, cmd, csdev->buffer);
+ if (!rq)
+ return SCSI_DH_RES_TEMP_UNAVAIL;
+
+ rq->sense = csdev->sense;
+ memset(rq->sense, 0, SCSI_SENSE_BUFFERSIZE);
+ rq->sense_len = csdev->senselen = 0;
+
+ err = blk_execute_rq(sdev->request_queue, NULL, rq, 1);
+ if (err == -EIO) {
+ if (rq->sense_len) {
+ err = trespass_endio(sdev, csdev->sense);
+ } else {
+ sdev_printk(KERN_INFO, sdev,
+ "%s: failed to send MODE SELECT: %x\n",
+ CLARIION_NAME, rq->errors);
+ }
+ }
+
+ blk_put_request(rq);
+
+ return err;
+}
+
+static int clariion_check_sense(struct scsi_device *sdev,
+ struct scsi_sense_hdr *sense_hdr)
+{
+ switch (sense_hdr->sense_key) {
+ case NOT_READY:
+ if (sense_hdr->asc == 0x04 && sense_hdr->ascq == 0x03)
+ /*
+ * LUN Not Ready - Manual Intervention Required
+ * indicates this is a passive path.
+ *
+ * FIXME: However, if this is seen and EVPD C0
+ * indicates that this is due to a NDU in
+ * progress, we should set FAIL_PATH too.
+ * This indicates we might have to do a SCSI
+ * inquiry in the end_io path. Ugh.
+ *
+ * Can return FAILED only when we want the error
+ * recovery process to kick in.
+ */
+ return SUCCESS;
+ break;
+ case ILLEGAL_REQUEST:
+ if (sense_hdr->asc == 0x25 && sense_hdr->ascq == 0x01)
+ /*
+ * An array based copy is in progress. Do not
+ * fail the path, do not bypass to another PG,
+ * do not retry. Fail the IO immediately.
+ * (Actually this is the same conclusion as in
+ * the default handler, but lets make sure.)
+ *
+ * Can return FAILED only when we want the error
+ * recovery process to kick in.
+ */
+ return SUCCESS;
+ break;
+ case UNIT_ATTENTION:
+ if (sense_hdr->asc == 0x29 && sense_hdr->ascq == 0x00)
+ /*
+ * Unit Attention Code. This is the first IO
+ * to the new path, so just retry.
+ */
+ return ADD_TO_MLQUEUE;
+ break;
+ }
+
+ return SCSI_RETURN_NOT_HANDLED;
+}
+
+static int clariion_prep_fn(struct scsi_device *sdev, struct request *req)
+{
+ struct clariion_dh_data *h = get_clariion_data(sdev);
+ int ret = BLKPREP_OK;
+
+ if (h->lun_state != CLARIION_LUN_OWNED) {
+ ret = BLKPREP_KILL;
+ req->cmd_flags |= REQ_QUIET;
+ }
+ return ret;
+
+}
+
+static int clariion_std_inquiry(struct scsi_device *sdev,
+ struct clariion_dh_data *csdev)
+{
+ int err;
+ char *sp_model;
+
+ err = send_inquiry_cmd(sdev, 0, csdev);
+ if (err != SCSI_DH_OK && csdev->senselen) {
+ struct scsi_sense_hdr sshdr;
+
+ if (scsi_normalize_sense(csdev->sense, SCSI_SENSE_BUFFERSIZE,
+ &sshdr)) {
+ sdev_printk(KERN_ERR, sdev, "%s: INQUIRY sense code "
+ "%02x/%02x/%02x\n", CLARIION_NAME,
+ sshdr.sense_key, sshdr.asc, sshdr.ascq);
+ }
+ err = SCSI_DH_IO;
+ goto out;
+ }
+
+ sp_model = parse_sp_model(sdev, csdev->buffer);
+ if (!sp_model) {
+ err = SCSI_DH_DEV_UNSUPP;
+ goto out;
+ }
+
+ /*
+ * FC Series arrays do not support long trespass
+ */
+ if (!strlen(sp_model) || !strncmp(sp_model, "FC",2))
+ csdev->flags |= CLARIION_SHORT_TRESPASS;
+
+ sdev_printk(KERN_INFO, sdev,
+ "%s: detected Clariion %s, flags %x\n",
+ CLARIION_NAME, sp_model, csdev->flags);
+out:
+ return err;
+}
+
+static int clariion_send_inquiry(struct scsi_device *sdev,
+ struct clariion_dh_data *csdev)
+{
+ int err, retry = CLARIION_RETRIES;
+
+retry:
+ err = send_inquiry_cmd(sdev, 0xC0, csdev);
+ if (err != SCSI_DH_OK && csdev->senselen) {
+ struct scsi_sense_hdr sshdr;
+
+ err = scsi_normalize_sense(csdev->sense, SCSI_SENSE_BUFFERSIZE,
+ &sshdr);
+ if (!err)
+ return SCSI_DH_IO;
+
+ err = clariion_check_sense(sdev, &sshdr);
+ if (retry > 0 && err == ADD_TO_MLQUEUE) {
+ retry--;
+ goto retry;
+ }
+ sdev_printk(KERN_ERR, sdev, "%s: INQUIRY sense code "
+ "%02x/%02x/%02x\n", CLARIION_NAME,
+ sshdr.sense_key, sshdr.asc, sshdr.ascq);
+ err = SCSI_DH_IO;
+ } else {
+ err = parse_sp_info_reply(sdev, csdev);
+ }
+ return err;
+}
+
+static int clariion_activate(struct scsi_device *sdev,
+ activate_complete fn, void *data)
+{
+ struct clariion_dh_data *csdev = get_clariion_data(sdev);
+ int result;
+
+ result = clariion_send_inquiry(sdev, csdev);
+ if (result != SCSI_DH_OK)
+ goto done;
+
+ if (csdev->lun_state == CLARIION_LUN_OWNED)
+ goto done;
+
+ result = send_trespass_cmd(sdev, csdev);
+ if (result != SCSI_DH_OK)
+ goto done;
+ sdev_printk(KERN_INFO, sdev,"%s: %s trespass command sent\n",
+ CLARIION_NAME,
+ csdev->flags&CLARIION_SHORT_TRESPASS?"short":"long" );
+
+ /* Update status */
+ result = clariion_send_inquiry(sdev, csdev);
+ if (result != SCSI_DH_OK)
+ goto done;
+
+done:
+ sdev_printk(KERN_INFO, sdev,
+ "%s: at SP %c Port %d (%s, default SP %c)\n",
+ CLARIION_NAME, csdev->current_sp + 'A',
+ csdev->port, lun_state[csdev->lun_state],
+ csdev->default_sp + 'A');
+
+ if (fn)
+ fn(data, result);
+ return 0;
+}
+/*
+ * params - parameters in the following format
+ * "no_of_params\0param1\0param2\0param3\0...\0"
+ * for example, string for 2 parameters with value 10 and 21
+ * is specified as "2\010\021\0".
+ */
+static int clariion_set_params(struct scsi_device *sdev, const char *params)
+{
+ struct clariion_dh_data *csdev = get_clariion_data(sdev);
+ unsigned int hr = 0, st = 0, argc;
+ const char *p = params;
+ int result = SCSI_DH_OK;
+
+ if ((sscanf(params, "%u", &argc) != 1) || (argc != 2))
+ return -EINVAL;
+
+ while (*p++)
+ ;
+ if ((sscanf(p, "%u", &st) != 1) || (st > 1))
+ return -EINVAL;
+
+ while (*p++)
+ ;
+ if ((sscanf(p, "%u", &hr) != 1) || (hr > 1))
+ return -EINVAL;
+
+ if (st)
+ csdev->flags |= CLARIION_SHORT_TRESPASS;
+ else
+ csdev->flags &= ~CLARIION_SHORT_TRESPASS;
+
+ if (hr)
+ csdev->flags |= CLARIION_HONOR_RESERVATIONS;
+ else
+ csdev->flags &= ~CLARIION_HONOR_RESERVATIONS;
+
+ /*
+ * If this path is owned, we have to send a trespass command
+ * with the new parameters. If not, simply return. Next trespass
+ * command would use the parameters.
+ */
+ if (csdev->lun_state != CLARIION_LUN_OWNED)
+ goto done;
+
+ csdev->lun_state = CLARIION_LUN_UNINITIALIZED;
+ result = send_trespass_cmd(sdev, csdev);
+ if (result != SCSI_DH_OK)
+ goto done;
+
+ /* Update status */
+ result = clariion_send_inquiry(sdev, csdev);
+
+done:
+ return result;
+}
+
+static const struct {
+ char *vendor;
+ char *model;
+} clariion_dev_list[] = {
+ {"DGC", "RAID"},
+ {"DGC", "DISK"},
+ {"DGC", "VRAID"},
+ {NULL, NULL},
+};
+
+static bool clariion_match(struct scsi_device *sdev)
+{
+ int i;
+
+ if (scsi_device_tpgs(sdev))
+ return false;
+
+ for (i = 0; clariion_dev_list[i].vendor; i++) {
+ if (!strncmp(sdev->vendor, clariion_dev_list[i].vendor,
+ strlen(clariion_dev_list[i].vendor)) &&
+ !strncmp(sdev->model, clariion_dev_list[i].model,
+ strlen(clariion_dev_list[i].model))) {
+ return true;
+ }
+ }
+ return false;
+}
+
+static struct scsi_dh_data *clariion_bus_attach(struct scsi_device *sdev)
+{
+ struct clariion_dh_data *h;
+ int err;
+
+ h = kzalloc(sizeof(*h) , GFP_KERNEL);
+ if (!h)
+ return ERR_PTR(-ENOMEM);
+ h->lun_state = CLARIION_LUN_UNINITIALIZED;
+ h->default_sp = CLARIION_UNBOUND_LU;
+ h->current_sp = CLARIION_UNBOUND_LU;
+
+ err = clariion_std_inquiry(sdev, h);
+ if (err != SCSI_DH_OK)
+ goto failed;
+
+ err = clariion_send_inquiry(sdev, h);
+ if (err != SCSI_DH_OK)
+ goto failed;
+
+ sdev_printk(KERN_INFO, sdev,
+ "%s: connected to SP %c Port %d (%s, default SP %c)\n",
+ CLARIION_NAME, h->current_sp + 'A',
+ h->port, lun_state[h->lun_state],
+ h->default_sp + 'A');
+ return &h->dh_data;
+
+failed:
+ kfree(h);
+ return ERR_PTR(-EINVAL);
+}
+
+static void clariion_bus_detach(struct scsi_device *sdev)
+{
+ struct clariion_dh_data *h = get_clariion_data(sdev);
+
+ kfree(h);
+}
+
+static struct scsi_device_handler clariion_dh = {
+ .name = CLARIION_NAME,
+ .module = THIS_MODULE,
+ .attach = clariion_bus_attach,
+ .detach = clariion_bus_detach,
+ .check_sense = clariion_check_sense,
+ .activate = clariion_activate,
+ .prep_fn = clariion_prep_fn,
+ .set_params = clariion_set_params,
+ .match = clariion_match,
+};
+
+static int __init clariion_init(void)
+{
+ int r;
+
+ r = scsi_register_device_handler(&clariion_dh);
+ if (r != 0)
+ printk(KERN_ERR "%s: Failed to register scsi device handler.",
+ CLARIION_NAME);
+ return r;
+}
+
+static void __exit clariion_exit(void)
+{
+ scsi_unregister_device_handler(&clariion_dh);
+}
+
+module_init(clariion_init);
+module_exit(clariion_exit);
+
+MODULE_DESCRIPTION("EMC CX/AX/FC-family driver");
+MODULE_AUTHOR("Mike Christie <michaelc@cs.wisc.edu>, Chandra Seetharaman <sekharan@us.ibm.com>");
+MODULE_LICENSE("GPL");
diff --git a/drivers/scsi/device_handler/scsi_dh_hp_sw.c b/drivers/scsi/device_handler/scsi_dh_hp_sw.c
new file mode 100644
index 000000000..485d99544
--- /dev/null
+++ b/drivers/scsi/device_handler/scsi_dh_hp_sw.c
@@ -0,0 +1,400 @@
+/*
+ * Basic HP/COMPAQ MSA 1000 support. This is only needed if your HW cannot be
+ * upgraded.
+ *
+ * Copyright (C) 2006 Red Hat, Inc. All rights reserved.
+ * Copyright (C) 2006 Mike Christie
+ * Copyright (C) 2008 Hannes Reinecke <hare@suse.de>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2, or (at your option)
+ * any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; see the file COPYING. If not, write to
+ * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+
+#include <linux/slab.h>
+#include <linux/module.h>
+#include <scsi/scsi.h>
+#include <scsi/scsi_dbg.h>
+#include <scsi/scsi_eh.h>
+#include <scsi/scsi_dh.h>
+
+#define HP_SW_NAME "hp_sw"
+
+#define HP_SW_TIMEOUT (60 * HZ)
+#define HP_SW_RETRIES 3
+
+#define HP_SW_PATH_UNINITIALIZED -1
+#define HP_SW_PATH_ACTIVE 0
+#define HP_SW_PATH_PASSIVE 1
+
+struct hp_sw_dh_data {
+ struct scsi_dh_data dh_data;
+ unsigned char sense[SCSI_SENSE_BUFFERSIZE];
+ int path_state;
+ int retries;
+ int retry_cnt;
+ struct scsi_device *sdev;
+ activate_complete callback_fn;
+ void *callback_data;
+};
+
+static int hp_sw_start_stop(struct hp_sw_dh_data *);
+
+static inline struct hp_sw_dh_data *get_hp_sw_data(struct scsi_device *sdev)
+{
+ return container_of(sdev->scsi_dh_data, struct hp_sw_dh_data, dh_data);
+}
+
+/*
+ * tur_done - Handle TEST UNIT READY return status
+ * @sdev: sdev the command has been sent to
+ * @errors: blk error code
+ *
+ * Returns SCSI_DH_DEV_OFFLINED if the sdev is on the passive path
+ */
+static int tur_done(struct scsi_device *sdev, unsigned char *sense)
+{
+ struct scsi_sense_hdr sshdr;
+ int ret;
+
+ ret = scsi_normalize_sense(sense, SCSI_SENSE_BUFFERSIZE, &sshdr);
+ if (!ret) {
+ sdev_printk(KERN_WARNING, sdev,
+ "%s: sending tur failed, no sense available\n",
+ HP_SW_NAME);
+ ret = SCSI_DH_IO;
+ goto done;
+ }
+ switch (sshdr.sense_key) {
+ case UNIT_ATTENTION:
+ ret = SCSI_DH_IMM_RETRY;
+ break;
+ case NOT_READY:
+ if ((sshdr.asc == 0x04) && (sshdr.ascq == 2)) {
+ /*
+ * LUN not ready - Initialization command required
+ *
+ * This is the passive path
+ */
+ ret = SCSI_DH_DEV_OFFLINED;
+ break;
+ }
+ /* Fallthrough */
+ default:
+ sdev_printk(KERN_WARNING, sdev,
+ "%s: sending tur failed, sense %x/%x/%x\n",
+ HP_SW_NAME, sshdr.sense_key, sshdr.asc,
+ sshdr.ascq);
+ break;
+ }
+
+done:
+ return ret;
+}
+
+/*
+ * hp_sw_tur - Send TEST UNIT READY
+ * @sdev: sdev command should be sent to
+ *
+ * Use the TEST UNIT READY command to determine
+ * the path state.
+ */
+static int hp_sw_tur(struct scsi_device *sdev, struct hp_sw_dh_data *h)
+{
+ struct request *req;
+ int ret;
+
+retry:
+ req = blk_get_request(sdev->request_queue, WRITE, GFP_NOIO);
+ if (IS_ERR(req))
+ return SCSI_DH_RES_TEMP_UNAVAIL;
+
+ blk_rq_set_block_pc(req);
+ req->cmd_flags |= REQ_FAILFAST_DEV | REQ_FAILFAST_TRANSPORT |
+ REQ_FAILFAST_DRIVER;
+ req->cmd_len = COMMAND_SIZE(TEST_UNIT_READY);
+ req->cmd[0] = TEST_UNIT_READY;
+ req->timeout = HP_SW_TIMEOUT;
+ req->sense = h->sense;
+ memset(req->sense, 0, SCSI_SENSE_BUFFERSIZE);
+ req->sense_len = 0;
+
+ ret = blk_execute_rq(req->q, NULL, req, 1);
+ if (ret == -EIO) {
+ if (req->sense_len > 0) {
+ ret = tur_done(sdev, h->sense);
+ } else {
+ sdev_printk(KERN_WARNING, sdev,
+ "%s: sending tur failed with %x\n",
+ HP_SW_NAME, req->errors);
+ ret = SCSI_DH_IO;
+ }
+ } else {
+ h->path_state = HP_SW_PATH_ACTIVE;
+ ret = SCSI_DH_OK;
+ }
+ if (ret == SCSI_DH_IMM_RETRY) {
+ blk_put_request(req);
+ goto retry;
+ }
+ if (ret == SCSI_DH_DEV_OFFLINED) {
+ h->path_state = HP_SW_PATH_PASSIVE;
+ ret = SCSI_DH_OK;
+ }
+
+ blk_put_request(req);
+
+ return ret;
+}
+
+/*
+ * start_done - Handle START STOP UNIT return status
+ * @sdev: sdev the command has been sent to
+ * @errors: blk error code
+ */
+static int start_done(struct scsi_device *sdev, unsigned char *sense)
+{
+ struct scsi_sense_hdr sshdr;
+ int rc;
+
+ rc = scsi_normalize_sense(sense, SCSI_SENSE_BUFFERSIZE, &sshdr);
+ if (!rc) {
+ sdev_printk(KERN_WARNING, sdev,
+ "%s: sending start_stop_unit failed, "
+ "no sense available\n",
+ HP_SW_NAME);
+ return SCSI_DH_IO;
+ }
+ switch (sshdr.sense_key) {
+ case NOT_READY:
+ if ((sshdr.asc == 0x04) && (sshdr.ascq == 3)) {
+ /*
+ * LUN not ready - manual intervention required
+ *
+ * Switch-over in progress, retry.
+ */
+ rc = SCSI_DH_RETRY;
+ break;
+ }
+ /* fall through */
+ default:
+ sdev_printk(KERN_WARNING, sdev,
+ "%s: sending start_stop_unit failed, sense %x/%x/%x\n",
+ HP_SW_NAME, sshdr.sense_key, sshdr.asc,
+ sshdr.ascq);
+ rc = SCSI_DH_IO;
+ }
+
+ return rc;
+}
+
+static void start_stop_endio(struct request *req, int error)
+{
+ struct hp_sw_dh_data *h = req->end_io_data;
+ unsigned err = SCSI_DH_OK;
+
+ if (error || host_byte(req->errors) != DID_OK ||
+ msg_byte(req->errors) != COMMAND_COMPLETE) {
+ sdev_printk(KERN_WARNING, h->sdev,
+ "%s: sending start_stop_unit failed with %x\n",
+ HP_SW_NAME, req->errors);
+ err = SCSI_DH_IO;
+ goto done;
+ }
+
+ if (req->sense_len > 0) {
+ err = start_done(h->sdev, h->sense);
+ if (err == SCSI_DH_RETRY) {
+ err = SCSI_DH_IO;
+ if (--h->retry_cnt) {
+ blk_put_request(req);
+ err = hp_sw_start_stop(h);
+ if (err == SCSI_DH_OK)
+ return;
+ }
+ }
+ }
+done:
+ req->end_io_data = NULL;
+ __blk_put_request(req->q, req);
+ if (h->callback_fn) {
+ h->callback_fn(h->callback_data, err);
+ h->callback_fn = h->callback_data = NULL;
+ }
+ return;
+
+}
+
+/*
+ * hp_sw_start_stop - Send START STOP UNIT command
+ * @sdev: sdev command should be sent to
+ *
+ * Sending START STOP UNIT activates the SP.
+ */
+static int hp_sw_start_stop(struct hp_sw_dh_data *h)
+{
+ struct request *req;
+
+ req = blk_get_request(h->sdev->request_queue, WRITE, GFP_ATOMIC);
+ if (IS_ERR(req))
+ return SCSI_DH_RES_TEMP_UNAVAIL;
+
+ blk_rq_set_block_pc(req);
+ req->cmd_flags |= REQ_FAILFAST_DEV | REQ_FAILFAST_TRANSPORT |
+ REQ_FAILFAST_DRIVER;
+ req->cmd_len = COMMAND_SIZE(START_STOP);
+ req->cmd[0] = START_STOP;
+ req->cmd[4] = 1; /* Start spin cycle */
+ req->timeout = HP_SW_TIMEOUT;
+ req->sense = h->sense;
+ memset(req->sense, 0, SCSI_SENSE_BUFFERSIZE);
+ req->sense_len = 0;
+ req->end_io_data = h;
+
+ blk_execute_rq_nowait(req->q, NULL, req, 1, start_stop_endio);
+ return SCSI_DH_OK;
+}
+
+static int hp_sw_prep_fn(struct scsi_device *sdev, struct request *req)
+{
+ struct hp_sw_dh_data *h = get_hp_sw_data(sdev);
+ int ret = BLKPREP_OK;
+
+ if (h->path_state != HP_SW_PATH_ACTIVE) {
+ ret = BLKPREP_KILL;
+ req->cmd_flags |= REQ_QUIET;
+ }
+ return ret;
+
+}
+
+/*
+ * hp_sw_activate - Activate a path
+ * @sdev: sdev on the path to be activated
+ *
+ * The HP Active/Passive firmware is pretty simple;
+ * the passive path reports NOT READY with sense codes
+ * 0x04/0x02; a START STOP UNIT command will then
+ * activate the passive path (and deactivate the
+ * previously active one).
+ */
+static int hp_sw_activate(struct scsi_device *sdev,
+ activate_complete fn, void *data)
+{
+ int ret = SCSI_DH_OK;
+ struct hp_sw_dh_data *h = get_hp_sw_data(sdev);
+
+ ret = hp_sw_tur(sdev, h);
+
+ if (ret == SCSI_DH_OK && h->path_state == HP_SW_PATH_PASSIVE) {
+ h->retry_cnt = h->retries;
+ h->callback_fn = fn;
+ h->callback_data = data;
+ ret = hp_sw_start_stop(h);
+ if (ret == SCSI_DH_OK)
+ return 0;
+ h->callback_fn = h->callback_data = NULL;
+ }
+
+ if (fn)
+ fn(data, ret);
+ return 0;
+}
+
+static const struct {
+ char *vendor;
+ char *model;
+} hp_sw_dh_data_list[] = {
+ {"COMPAQ", "MSA1000 VOLUME"},
+ {"COMPAQ", "HSV110"},
+ {"HP", "HSV100"},
+ {"DEC", "HSG80"},
+ {NULL, NULL},
+};
+
+static bool hp_sw_match(struct scsi_device *sdev)
+{
+ int i;
+
+ if (scsi_device_tpgs(sdev))
+ return false;
+
+ for (i = 0; hp_sw_dh_data_list[i].vendor; i++) {
+ if (!strncmp(sdev->vendor, hp_sw_dh_data_list[i].vendor,
+ strlen(hp_sw_dh_data_list[i].vendor)) &&
+ !strncmp(sdev->model, hp_sw_dh_data_list[i].model,
+ strlen(hp_sw_dh_data_list[i].model))) {
+ return true;
+ }
+ }
+ return false;
+}
+
+static struct scsi_dh_data *hp_sw_bus_attach(struct scsi_device *sdev)
+{
+ struct hp_sw_dh_data *h;
+ int ret;
+
+ h = kzalloc(sizeof(*h), GFP_KERNEL);
+ if (!h)
+ return ERR_PTR(-ENOMEM);
+ h->path_state = HP_SW_PATH_UNINITIALIZED;
+ h->retries = HP_SW_RETRIES;
+ h->sdev = sdev;
+
+ ret = hp_sw_tur(sdev, h);
+ if (ret != SCSI_DH_OK || h->path_state == HP_SW_PATH_UNINITIALIZED)
+ goto failed;
+
+ sdev_printk(KERN_INFO, sdev, "%s: attached to %s path\n",
+ HP_SW_NAME, h->path_state == HP_SW_PATH_ACTIVE?
+ "active":"passive");
+ return &h->dh_data;
+failed:
+ kfree(h);
+ return ERR_PTR(-EINVAL);
+}
+
+static void hp_sw_bus_detach( struct scsi_device *sdev )
+{
+ struct hp_sw_dh_data *h = get_hp_sw_data(sdev);
+
+ kfree(h);
+}
+
+static struct scsi_device_handler hp_sw_dh = {
+ .name = HP_SW_NAME,
+ .module = THIS_MODULE,
+ .attach = hp_sw_bus_attach,
+ .detach = hp_sw_bus_detach,
+ .activate = hp_sw_activate,
+ .prep_fn = hp_sw_prep_fn,
+ .match = hp_sw_match,
+};
+
+static int __init hp_sw_init(void)
+{
+ return scsi_register_device_handler(&hp_sw_dh);
+}
+
+static void __exit hp_sw_exit(void)
+{
+ scsi_unregister_device_handler(&hp_sw_dh);
+}
+
+module_init(hp_sw_init);
+module_exit(hp_sw_exit);
+
+MODULE_DESCRIPTION("HP Active/Passive driver");
+MODULE_AUTHOR("Mike Christie <michaelc@cs.wisc.edu");
+MODULE_LICENSE("GPL");
diff --git a/drivers/scsi/device_handler/scsi_dh_rdac.c b/drivers/scsi/device_handler/scsi_dh_rdac.c
new file mode 100644
index 000000000..b46ace3d4
--- /dev/null
+++ b/drivers/scsi/device_handler/scsi_dh_rdac.c
@@ -0,0 +1,937 @@
+/*
+ * LSI/Engenio/NetApp E-Series RDAC SCSI Device Handler
+ *
+ * Copyright (C) 2005 Mike Christie. All rights reserved.
+ * Copyright (C) Chandra Seetharaman, IBM Corp. 2007
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ *
+ */
+#include <scsi/scsi.h>
+#include <scsi/scsi_eh.h>
+#include <scsi/scsi_dh.h>
+#include <linux/workqueue.h>
+#include <linux/slab.h>
+#include <linux/module.h>
+
+#define RDAC_NAME "rdac"
+#define RDAC_RETRY_COUNT 5
+
+/*
+ * LSI mode page stuff
+ *
+ * These struct definitions and the forming of the
+ * mode page were taken from the LSI RDAC 2.4 GPL'd
+ * driver, and then converted to Linux conventions.
+ */
+#define RDAC_QUIESCENCE_TIME 20
+/*
+ * Page Codes
+ */
+#define RDAC_PAGE_CODE_REDUNDANT_CONTROLLER 0x2c
+
+/*
+ * Controller modes definitions
+ */
+#define RDAC_MODE_TRANSFER_SPECIFIED_LUNS 0x02
+
+/*
+ * RDAC Options field
+ */
+#define RDAC_FORCED_QUIESENCE 0x02
+
+#define RDAC_TIMEOUT (60 * HZ)
+#define RDAC_RETRIES 3
+
+struct rdac_mode_6_hdr {
+ u8 data_len;
+ u8 medium_type;
+ u8 device_params;
+ u8 block_desc_len;
+};
+
+struct rdac_mode_10_hdr {
+ u16 data_len;
+ u8 medium_type;
+ u8 device_params;
+ u16 reserved;
+ u16 block_desc_len;
+};
+
+struct rdac_mode_common {
+ u8 controller_serial[16];
+ u8 alt_controller_serial[16];
+ u8 rdac_mode[2];
+ u8 alt_rdac_mode[2];
+ u8 quiescence_timeout;
+ u8 rdac_options;
+};
+
+struct rdac_pg_legacy {
+ struct rdac_mode_6_hdr hdr;
+ u8 page_code;
+ u8 page_len;
+ struct rdac_mode_common common;
+#define MODE6_MAX_LUN 32
+ u8 lun_table[MODE6_MAX_LUN];
+ u8 reserved2[32];
+ u8 reserved3;
+ u8 reserved4;
+};
+
+struct rdac_pg_expanded {
+ struct rdac_mode_10_hdr hdr;
+ u8 page_code;
+ u8 subpage_code;
+ u8 page_len[2];
+ struct rdac_mode_common common;
+ u8 lun_table[256];
+ u8 reserved3;
+ u8 reserved4;
+};
+
+struct c9_inquiry {
+ u8 peripheral_info;
+ u8 page_code; /* 0xC9 */
+ u8 reserved1;
+ u8 page_len;
+ u8 page_id[4]; /* "vace" */
+ u8 avte_cvp;
+ u8 path_prio;
+ u8 reserved2[38];
+};
+
+#define SUBSYS_ID_LEN 16
+#define SLOT_ID_LEN 2
+#define ARRAY_LABEL_LEN 31
+
+struct c4_inquiry {
+ u8 peripheral_info;
+ u8 page_code; /* 0xC4 */
+ u8 reserved1;
+ u8 page_len;
+ u8 page_id[4]; /* "subs" */
+ u8 subsys_id[SUBSYS_ID_LEN];
+ u8 revision[4];
+ u8 slot_id[SLOT_ID_LEN];
+ u8 reserved[2];
+};
+
+#define UNIQUE_ID_LEN 16
+struct c8_inquiry {
+ u8 peripheral_info;
+ u8 page_code; /* 0xC8 */
+ u8 reserved1;
+ u8 page_len;
+ u8 page_id[4]; /* "edid" */
+ u8 reserved2[3];
+ u8 vol_uniq_id_len;
+ u8 vol_uniq_id[16];
+ u8 vol_user_label_len;
+ u8 vol_user_label[60];
+ u8 array_uniq_id_len;
+ u8 array_unique_id[UNIQUE_ID_LEN];
+ u8 array_user_label_len;
+ u8 array_user_label[60];
+ u8 lun[8];
+};
+
+struct rdac_controller {
+ u8 array_id[UNIQUE_ID_LEN];
+ int use_ms10;
+ struct kref kref;
+ struct list_head node; /* list of all controllers */
+ union {
+ struct rdac_pg_legacy legacy;
+ struct rdac_pg_expanded expanded;
+ } mode_select;
+ u8 index;
+ u8 array_name[ARRAY_LABEL_LEN];
+ struct Scsi_Host *host;
+ spinlock_t ms_lock;
+ int ms_queued;
+ struct work_struct ms_work;
+ struct scsi_device *ms_sdev;
+ struct list_head ms_head;
+};
+
+struct c2_inquiry {
+ u8 peripheral_info;
+ u8 page_code; /* 0xC2 */
+ u8 reserved1;
+ u8 page_len;
+ u8 page_id[4]; /* "swr4" */
+ u8 sw_version[3];
+ u8 sw_date[3];
+ u8 features_enabled;
+ u8 max_lun_supported;
+ u8 partitions[239]; /* Total allocation length should be 0xFF */
+};
+
+struct rdac_dh_data {
+ struct scsi_dh_data dh_data;
+ struct rdac_controller *ctlr;
+#define UNINITIALIZED_LUN (1 << 8)
+ unsigned lun;
+
+#define RDAC_MODE 0
+#define RDAC_MODE_AVT 1
+#define RDAC_MODE_IOSHIP 2
+ unsigned char mode;
+
+#define RDAC_STATE_ACTIVE 0
+#define RDAC_STATE_PASSIVE 1
+ unsigned char state;
+
+#define RDAC_LUN_UNOWNED 0
+#define RDAC_LUN_OWNED 1
+ char lun_state;
+
+#define RDAC_PREFERRED 0
+#define RDAC_NON_PREFERRED 1
+ char preferred;
+
+ unsigned char sense[SCSI_SENSE_BUFFERSIZE];
+ union {
+ struct c2_inquiry c2;
+ struct c4_inquiry c4;
+ struct c8_inquiry c8;
+ struct c9_inquiry c9;
+ } inq;
+};
+
+static const char *mode[] = {
+ "RDAC",
+ "AVT",
+ "IOSHIP",
+};
+static const char *lun_state[] =
+{
+ "unowned",
+ "owned",
+};
+
+struct rdac_queue_data {
+ struct list_head entry;
+ struct rdac_dh_data *h;
+ activate_complete callback_fn;
+ void *callback_data;
+};
+
+static LIST_HEAD(ctlr_list);
+static DEFINE_SPINLOCK(list_lock);
+static struct workqueue_struct *kmpath_rdacd;
+static void send_mode_select(struct work_struct *work);
+
+/*
+ * module parameter to enable rdac debug logging.
+ * 2 bits for each type of logging, only two types defined for now
+ * Can be enhanced if required at later point
+ */
+static int rdac_logging = 1;
+module_param(rdac_logging, int, S_IRUGO|S_IWUSR);
+MODULE_PARM_DESC(rdac_logging, "A bit mask of rdac logging levels, "
+ "Default is 1 - failover logging enabled, "
+ "set it to 0xF to enable all the logs");
+
+#define RDAC_LOG_FAILOVER 0
+#define RDAC_LOG_SENSE 2
+
+#define RDAC_LOG_BITS 2
+
+#define RDAC_LOG_LEVEL(SHIFT) \
+ ((rdac_logging >> (SHIFT)) & ((1 << (RDAC_LOG_BITS)) - 1))
+
+#define RDAC_LOG(SHIFT, sdev, f, arg...) \
+do { \
+ if (unlikely(RDAC_LOG_LEVEL(SHIFT))) \
+ sdev_printk(KERN_INFO, sdev, RDAC_NAME ": " f "\n", ## arg); \
+} while (0);
+
+static inline struct rdac_dh_data *get_rdac_data(struct scsi_device *sdev)
+{
+ return container_of(sdev->scsi_dh_data, struct rdac_dh_data, dh_data);
+}
+
+static struct request *get_rdac_req(struct scsi_device *sdev,
+ void *buffer, unsigned buflen, int rw)
+{
+ struct request *rq;
+ struct request_queue *q = sdev->request_queue;
+
+ rq = blk_get_request(q, rw, GFP_NOIO);
+
+ if (IS_ERR(rq)) {
+ sdev_printk(KERN_INFO, sdev,
+ "get_rdac_req: blk_get_request failed.\n");
+ return NULL;
+ }
+ blk_rq_set_block_pc(rq);
+
+ if (buflen && blk_rq_map_kern(q, rq, buffer, buflen, GFP_NOIO)) {
+ blk_put_request(rq);
+ sdev_printk(KERN_INFO, sdev,
+ "get_rdac_req: blk_rq_map_kern failed.\n");
+ return NULL;
+ }
+
+ rq->cmd_flags |= REQ_FAILFAST_DEV | REQ_FAILFAST_TRANSPORT |
+ REQ_FAILFAST_DRIVER;
+ rq->retries = RDAC_RETRIES;
+ rq->timeout = RDAC_TIMEOUT;
+
+ return rq;
+}
+
+static struct request *rdac_failover_get(struct scsi_device *sdev,
+ struct rdac_dh_data *h, struct list_head *list)
+{
+ struct request *rq;
+ struct rdac_mode_common *common;
+ unsigned data_size;
+ struct rdac_queue_data *qdata;
+ u8 *lun_table;
+
+ if (h->ctlr->use_ms10) {
+ struct rdac_pg_expanded *rdac_pg;
+
+ data_size = sizeof(struct rdac_pg_expanded);
+ rdac_pg = &h->ctlr->mode_select.expanded;
+ memset(rdac_pg, 0, data_size);
+ common = &rdac_pg->common;
+ rdac_pg->page_code = RDAC_PAGE_CODE_REDUNDANT_CONTROLLER + 0x40;
+ rdac_pg->subpage_code = 0x1;
+ rdac_pg->page_len[0] = 0x01;
+ rdac_pg->page_len[1] = 0x28;
+ lun_table = rdac_pg->lun_table;
+ } else {
+ struct rdac_pg_legacy *rdac_pg;
+
+ data_size = sizeof(struct rdac_pg_legacy);
+ rdac_pg = &h->ctlr->mode_select.legacy;
+ memset(rdac_pg, 0, data_size);
+ common = &rdac_pg->common;
+ rdac_pg->page_code = RDAC_PAGE_CODE_REDUNDANT_CONTROLLER;
+ rdac_pg->page_len = 0x68;
+ lun_table = rdac_pg->lun_table;
+ }
+ common->rdac_mode[1] = RDAC_MODE_TRANSFER_SPECIFIED_LUNS;
+ common->quiescence_timeout = RDAC_QUIESCENCE_TIME;
+ common->rdac_options = RDAC_FORCED_QUIESENCE;
+
+ list_for_each_entry(qdata, list, entry) {
+ lun_table[qdata->h->lun] = 0x81;
+ }
+
+ /* get request for block layer packet command */
+ rq = get_rdac_req(sdev, &h->ctlr->mode_select, data_size, WRITE);
+ if (!rq)
+ return NULL;
+
+ /* Prepare the command. */
+ if (h->ctlr->use_ms10) {
+ rq->cmd[0] = MODE_SELECT_10;
+ rq->cmd[7] = data_size >> 8;
+ rq->cmd[8] = data_size & 0xff;
+ } else {
+ rq->cmd[0] = MODE_SELECT;
+ rq->cmd[4] = data_size;
+ }
+ rq->cmd_len = COMMAND_SIZE(rq->cmd[0]);
+
+ rq->sense = h->sense;
+ memset(rq->sense, 0, SCSI_SENSE_BUFFERSIZE);
+ rq->sense_len = 0;
+
+ return rq;
+}
+
+static void release_controller(struct kref *kref)
+{
+ struct rdac_controller *ctlr;
+ ctlr = container_of(kref, struct rdac_controller, kref);
+
+ list_del(&ctlr->node);
+ kfree(ctlr);
+}
+
+static struct rdac_controller *get_controller(int index, char *array_name,
+ u8 *array_id, struct scsi_device *sdev)
+{
+ struct rdac_controller *ctlr, *tmp;
+
+ list_for_each_entry(tmp, &ctlr_list, node) {
+ if ((memcmp(tmp->array_id, array_id, UNIQUE_ID_LEN) == 0) &&
+ (tmp->index == index) &&
+ (tmp->host == sdev->host)) {
+ kref_get(&tmp->kref);
+ return tmp;
+ }
+ }
+ ctlr = kmalloc(sizeof(*ctlr), GFP_ATOMIC);
+ if (!ctlr)
+ return NULL;
+
+ /* initialize fields of controller */
+ memcpy(ctlr->array_id, array_id, UNIQUE_ID_LEN);
+ ctlr->index = index;
+ ctlr->host = sdev->host;
+ memcpy(ctlr->array_name, array_name, ARRAY_LABEL_LEN);
+
+ kref_init(&ctlr->kref);
+ ctlr->use_ms10 = -1;
+ ctlr->ms_queued = 0;
+ ctlr->ms_sdev = NULL;
+ spin_lock_init(&ctlr->ms_lock);
+ INIT_WORK(&ctlr->ms_work, send_mode_select);
+ INIT_LIST_HEAD(&ctlr->ms_head);
+ list_add(&ctlr->node, &ctlr_list);
+
+ return ctlr;
+}
+
+static int submit_inquiry(struct scsi_device *sdev, int page_code,
+ unsigned int len, struct rdac_dh_data *h)
+{
+ struct request *rq;
+ struct request_queue *q = sdev->request_queue;
+ int err = SCSI_DH_RES_TEMP_UNAVAIL;
+
+ rq = get_rdac_req(sdev, &h->inq, len, READ);
+ if (!rq)
+ goto done;
+
+ /* Prepare the command. */
+ rq->cmd[0] = INQUIRY;
+ rq->cmd[1] = 1;
+ rq->cmd[2] = page_code;
+ rq->cmd[4] = len;
+ rq->cmd_len = COMMAND_SIZE(INQUIRY);
+
+ rq->sense = h->sense;
+ memset(rq->sense, 0, SCSI_SENSE_BUFFERSIZE);
+ rq->sense_len = 0;
+
+ err = blk_execute_rq(q, NULL, rq, 1);
+ if (err == -EIO)
+ err = SCSI_DH_IO;
+
+ blk_put_request(rq);
+done:
+ return err;
+}
+
+static int get_lun_info(struct scsi_device *sdev, struct rdac_dh_data *h,
+ char *array_name, u8 *array_id)
+{
+ int err, i;
+ struct c8_inquiry *inqp;
+
+ err = submit_inquiry(sdev, 0xC8, sizeof(struct c8_inquiry), h);
+ if (err == SCSI_DH_OK) {
+ inqp = &h->inq.c8;
+ if (inqp->page_code != 0xc8)
+ return SCSI_DH_NOSYS;
+ if (inqp->page_id[0] != 'e' || inqp->page_id[1] != 'd' ||
+ inqp->page_id[2] != 'i' || inqp->page_id[3] != 'd')
+ return SCSI_DH_NOSYS;
+ h->lun = inqp->lun[7]; /* Uses only the last byte */
+
+ for(i=0; i<ARRAY_LABEL_LEN-1; ++i)
+ *(array_name+i) = inqp->array_user_label[(2*i)+1];
+
+ *(array_name+ARRAY_LABEL_LEN-1) = '\0';
+ memset(array_id, 0, UNIQUE_ID_LEN);
+ memcpy(array_id, inqp->array_unique_id, inqp->array_uniq_id_len);
+ }
+ return err;
+}
+
+static int check_ownership(struct scsi_device *sdev, struct rdac_dh_data *h)
+{
+ int err;
+ struct c9_inquiry *inqp;
+
+ h->state = RDAC_STATE_ACTIVE;
+ err = submit_inquiry(sdev, 0xC9, sizeof(struct c9_inquiry), h);
+ if (err == SCSI_DH_OK) {
+ inqp = &h->inq.c9;
+ /* detect the operating mode */
+ if ((inqp->avte_cvp >> 5) & 0x1)
+ h->mode = RDAC_MODE_IOSHIP; /* LUN in IOSHIP mode */
+ else if (inqp->avte_cvp >> 7)
+ h->mode = RDAC_MODE_AVT; /* LUN in AVT mode */
+ else
+ h->mode = RDAC_MODE; /* LUN in RDAC mode */
+
+ /* Update ownership */
+ if (inqp->avte_cvp & 0x1)
+ h->lun_state = RDAC_LUN_OWNED;
+ else {
+ h->lun_state = RDAC_LUN_UNOWNED;
+ if (h->mode == RDAC_MODE)
+ h->state = RDAC_STATE_PASSIVE;
+ }
+
+ /* Update path prio*/
+ if (inqp->path_prio & 0x1)
+ h->preferred = RDAC_PREFERRED;
+ else
+ h->preferred = RDAC_NON_PREFERRED;
+ }
+
+ return err;
+}
+
+static int initialize_controller(struct scsi_device *sdev,
+ struct rdac_dh_data *h, char *array_name, u8 *array_id)
+{
+ int err, index;
+ struct c4_inquiry *inqp;
+
+ err = submit_inquiry(sdev, 0xC4, sizeof(struct c4_inquiry), h);
+ if (err == SCSI_DH_OK) {
+ inqp = &h->inq.c4;
+ /* get the controller index */
+ if (inqp->slot_id[1] == 0x31)
+ index = 0;
+ else
+ index = 1;
+
+ spin_lock(&list_lock);
+ h->ctlr = get_controller(index, array_name, array_id, sdev);
+ if (!h->ctlr)
+ err = SCSI_DH_RES_TEMP_UNAVAIL;
+ spin_unlock(&list_lock);
+ }
+ return err;
+}
+
+static int set_mode_select(struct scsi_device *sdev, struct rdac_dh_data *h)
+{
+ int err;
+ struct c2_inquiry *inqp;
+
+ err = submit_inquiry(sdev, 0xC2, sizeof(struct c2_inquiry), h);
+ if (err == SCSI_DH_OK) {
+ inqp = &h->inq.c2;
+ /*
+ * If more than MODE6_MAX_LUN luns are supported, use
+ * mode select 10
+ */
+ if (inqp->max_lun_supported >= MODE6_MAX_LUN)
+ h->ctlr->use_ms10 = 1;
+ else
+ h->ctlr->use_ms10 = 0;
+ }
+ return err;
+}
+
+static int mode_select_handle_sense(struct scsi_device *sdev,
+ unsigned char *sensebuf)
+{
+ struct scsi_sense_hdr sense_hdr;
+ int err = SCSI_DH_IO, ret;
+ struct rdac_dh_data *h = get_rdac_data(sdev);
+
+ ret = scsi_normalize_sense(sensebuf, SCSI_SENSE_BUFFERSIZE, &sense_hdr);
+ if (!ret)
+ goto done;
+
+ switch (sense_hdr.sense_key) {
+ case NO_SENSE:
+ case ABORTED_COMMAND:
+ case UNIT_ATTENTION:
+ err = SCSI_DH_RETRY;
+ break;
+ case NOT_READY:
+ if (sense_hdr.asc == 0x04 && sense_hdr.ascq == 0x01)
+ /* LUN Not Ready and is in the Process of Becoming
+ * Ready
+ */
+ err = SCSI_DH_RETRY;
+ break;
+ case ILLEGAL_REQUEST:
+ if (sense_hdr.asc == 0x91 && sense_hdr.ascq == 0x36)
+ /*
+ * Command Lock contention
+ */
+ err = SCSI_DH_RETRY;
+ break;
+ default:
+ break;
+ }
+
+ RDAC_LOG(RDAC_LOG_FAILOVER, sdev, "array %s, ctlr %d, "
+ "MODE_SELECT returned with sense %02x/%02x/%02x",
+ (char *) h->ctlr->array_name, h->ctlr->index,
+ sense_hdr.sense_key, sense_hdr.asc, sense_hdr.ascq);
+
+done:
+ return err;
+}
+
+static void send_mode_select(struct work_struct *work)
+{
+ struct rdac_controller *ctlr =
+ container_of(work, struct rdac_controller, ms_work);
+ struct request *rq;
+ struct scsi_device *sdev = ctlr->ms_sdev;
+ struct rdac_dh_data *h = get_rdac_data(sdev);
+ struct request_queue *q = sdev->request_queue;
+ int err, retry_cnt = RDAC_RETRY_COUNT;
+ struct rdac_queue_data *tmp, *qdata;
+ LIST_HEAD(list);
+
+ spin_lock(&ctlr->ms_lock);
+ list_splice_init(&ctlr->ms_head, &list);
+ ctlr->ms_queued = 0;
+ ctlr->ms_sdev = NULL;
+ spin_unlock(&ctlr->ms_lock);
+
+retry:
+ err = SCSI_DH_RES_TEMP_UNAVAIL;
+ rq = rdac_failover_get(sdev, h, &list);
+ if (!rq)
+ goto done;
+
+ RDAC_LOG(RDAC_LOG_FAILOVER, sdev, "array %s, ctlr %d, "
+ "%s MODE_SELECT command",
+ (char *) h->ctlr->array_name, h->ctlr->index,
+ (retry_cnt == RDAC_RETRY_COUNT) ? "queueing" : "retrying");
+
+ err = blk_execute_rq(q, NULL, rq, 1);
+ blk_put_request(rq);
+ if (err != SCSI_DH_OK) {
+ err = mode_select_handle_sense(sdev, h->sense);
+ if (err == SCSI_DH_RETRY && retry_cnt--)
+ goto retry;
+ }
+ if (err == SCSI_DH_OK) {
+ h->state = RDAC_STATE_ACTIVE;
+ RDAC_LOG(RDAC_LOG_FAILOVER, sdev, "array %s, ctlr %d, "
+ "MODE_SELECT completed",
+ (char *) h->ctlr->array_name, h->ctlr->index);
+ }
+
+done:
+ list_for_each_entry_safe(qdata, tmp, &list, entry) {
+ list_del(&qdata->entry);
+ if (err == SCSI_DH_OK)
+ qdata->h->state = RDAC_STATE_ACTIVE;
+ if (qdata->callback_fn)
+ qdata->callback_fn(qdata->callback_data, err);
+ kfree(qdata);
+ }
+ return;
+}
+
+static int queue_mode_select(struct scsi_device *sdev,
+ activate_complete fn, void *data)
+{
+ struct rdac_queue_data *qdata;
+ struct rdac_controller *ctlr;
+
+ qdata = kzalloc(sizeof(*qdata), GFP_KERNEL);
+ if (!qdata)
+ return SCSI_DH_RETRY;
+
+ qdata->h = get_rdac_data(sdev);
+ qdata->callback_fn = fn;
+ qdata->callback_data = data;
+
+ ctlr = qdata->h->ctlr;
+ spin_lock(&ctlr->ms_lock);
+ list_add_tail(&qdata->entry, &ctlr->ms_head);
+ if (!ctlr->ms_queued) {
+ ctlr->ms_queued = 1;
+ ctlr->ms_sdev = sdev;
+ queue_work(kmpath_rdacd, &ctlr->ms_work);
+ }
+ spin_unlock(&ctlr->ms_lock);
+ return SCSI_DH_OK;
+}
+
+static int rdac_activate(struct scsi_device *sdev,
+ activate_complete fn, void *data)
+{
+ struct rdac_dh_data *h = get_rdac_data(sdev);
+ int err = SCSI_DH_OK;
+ int act = 0;
+
+ err = check_ownership(sdev, h);
+ if (err != SCSI_DH_OK)
+ goto done;
+
+ switch (h->mode) {
+ case RDAC_MODE:
+ if (h->lun_state == RDAC_LUN_UNOWNED)
+ act = 1;
+ break;
+ case RDAC_MODE_IOSHIP:
+ if ((h->lun_state == RDAC_LUN_UNOWNED) &&
+ (h->preferred == RDAC_PREFERRED))
+ act = 1;
+ break;
+ default:
+ break;
+ }
+
+ if (act) {
+ err = queue_mode_select(sdev, fn, data);
+ if (err == SCSI_DH_OK)
+ return 0;
+ }
+done:
+ if (fn)
+ fn(data, err);
+ return 0;
+}
+
+static int rdac_prep_fn(struct scsi_device *sdev, struct request *req)
+{
+ struct rdac_dh_data *h = get_rdac_data(sdev);
+ int ret = BLKPREP_OK;
+
+ if (h->state != RDAC_STATE_ACTIVE) {
+ ret = BLKPREP_KILL;
+ req->cmd_flags |= REQ_QUIET;
+ }
+ return ret;
+
+}
+
+static int rdac_check_sense(struct scsi_device *sdev,
+ struct scsi_sense_hdr *sense_hdr)
+{
+ struct rdac_dh_data *h = get_rdac_data(sdev);
+
+ RDAC_LOG(RDAC_LOG_SENSE, sdev, "array %s, ctlr %d, "
+ "I/O returned with sense %02x/%02x/%02x",
+ (char *) h->ctlr->array_name, h->ctlr->index,
+ sense_hdr->sense_key, sense_hdr->asc, sense_hdr->ascq);
+
+ switch (sense_hdr->sense_key) {
+ case NOT_READY:
+ if (sense_hdr->asc == 0x04 && sense_hdr->ascq == 0x01)
+ /* LUN Not Ready - Logical Unit Not Ready and is in
+ * the process of becoming ready
+ * Just retry.
+ */
+ return ADD_TO_MLQUEUE;
+ if (sense_hdr->asc == 0x04 && sense_hdr->ascq == 0x81)
+ /* LUN Not Ready - Storage firmware incompatible
+ * Manual code synchonisation required.
+ *
+ * Nothing we can do here. Try to bypass the path.
+ */
+ return SUCCESS;
+ if (sense_hdr->asc == 0x04 && sense_hdr->ascq == 0xA1)
+ /* LUN Not Ready - Quiescense in progress
+ *
+ * Just retry and wait.
+ */
+ return ADD_TO_MLQUEUE;
+ if (sense_hdr->asc == 0xA1 && sense_hdr->ascq == 0x02)
+ /* LUN Not Ready - Quiescense in progress
+ * or has been achieved
+ * Just retry.
+ */
+ return ADD_TO_MLQUEUE;
+ break;
+ case ILLEGAL_REQUEST:
+ if (sense_hdr->asc == 0x94 && sense_hdr->ascq == 0x01) {
+ /* Invalid Request - Current Logical Unit Ownership.
+ * Controller is not the current owner of the LUN,
+ * Fail the path, so that the other path be used.
+ */
+ h->state = RDAC_STATE_PASSIVE;
+ return SUCCESS;
+ }
+ break;
+ case UNIT_ATTENTION:
+ if (sense_hdr->asc == 0x29 && sense_hdr->ascq == 0x00)
+ /*
+ * Power On, Reset, or Bus Device Reset, just retry.
+ */
+ return ADD_TO_MLQUEUE;
+ if (sense_hdr->asc == 0x8b && sense_hdr->ascq == 0x02)
+ /*
+ * Quiescence in progress , just retry.
+ */
+ return ADD_TO_MLQUEUE;
+ break;
+ }
+ /* success just means we do not care what scsi-ml does */
+ return SCSI_RETURN_NOT_HANDLED;
+}
+
+static const struct {
+ char *vendor;
+ char *model;
+} rdac_dev_list[] = {
+ {"IBM", "1722"},
+ {"IBM", "1724"},
+ {"IBM", "1726"},
+ {"IBM", "1742"},
+ {"IBM", "1745"},
+ {"IBM", "1746"},
+ {"IBM", "1813"},
+ {"IBM", "1814"},
+ {"IBM", "1815"},
+ {"IBM", "1818"},
+ {"IBM", "3526"},
+ {"SGI", "TP9"},
+ {"SGI", "IS"},
+ {"STK", "OPENstorage D280"},
+ {"STK", "FLEXLINE 380"},
+ {"SUN", "CSM"},
+ {"SUN", "LCSM100"},
+ {"SUN", "STK6580_6780"},
+ {"SUN", "SUN_6180"},
+ {"SUN", "ArrayStorage"},
+ {"DELL", "MD3"},
+ {"NETAPP", "INF-01-00"},
+ {"LSI", "INF-01-00"},
+ {"ENGENIO", "INF-01-00"},
+ {NULL, NULL},
+};
+
+static bool rdac_match(struct scsi_device *sdev)
+{
+ int i;
+
+ if (scsi_device_tpgs(sdev))
+ return false;
+
+ for (i = 0; rdac_dev_list[i].vendor; i++) {
+ if (!strncmp(sdev->vendor, rdac_dev_list[i].vendor,
+ strlen(rdac_dev_list[i].vendor)) &&
+ !strncmp(sdev->model, rdac_dev_list[i].model,
+ strlen(rdac_dev_list[i].model))) {
+ return true;
+ }
+ }
+ return false;
+}
+
+static struct scsi_dh_data *rdac_bus_attach(struct scsi_device *sdev)
+{
+ struct rdac_dh_data *h;
+ int err;
+ char array_name[ARRAY_LABEL_LEN];
+ char array_id[UNIQUE_ID_LEN];
+
+ h = kzalloc(sizeof(*h) , GFP_KERNEL);
+ if (!h)
+ return ERR_PTR(-ENOMEM);
+ h->lun = UNINITIALIZED_LUN;
+ h->state = RDAC_STATE_ACTIVE;
+
+ err = get_lun_info(sdev, h, array_name, array_id);
+ if (err != SCSI_DH_OK)
+ goto failed;
+
+ err = initialize_controller(sdev, h, array_name, array_id);
+ if (err != SCSI_DH_OK)
+ goto failed;
+
+ err = check_ownership(sdev, h);
+ if (err != SCSI_DH_OK)
+ goto clean_ctlr;
+
+ err = set_mode_select(sdev, h);
+ if (err != SCSI_DH_OK)
+ goto clean_ctlr;
+
+ sdev_printk(KERN_NOTICE, sdev,
+ "%s: LUN %d (%s) (%s)\n",
+ RDAC_NAME, h->lun, mode[(int)h->mode],
+ lun_state[(int)h->lun_state]);
+
+ return &h->dh_data;
+
+clean_ctlr:
+ spin_lock(&list_lock);
+ kref_put(&h->ctlr->kref, release_controller);
+ spin_unlock(&list_lock);
+
+failed:
+ kfree(h);
+ return ERR_PTR(-EINVAL);
+}
+
+static void rdac_bus_detach( struct scsi_device *sdev )
+{
+ struct rdac_dh_data *h = get_rdac_data(sdev);
+
+ if (h->ctlr && h->ctlr->ms_queued)
+ flush_workqueue(kmpath_rdacd);
+
+ spin_lock(&list_lock);
+ if (h->ctlr)
+ kref_put(&h->ctlr->kref, release_controller);
+ spin_unlock(&list_lock);
+ kfree(h);
+}
+
+static struct scsi_device_handler rdac_dh = {
+ .name = RDAC_NAME,
+ .module = THIS_MODULE,
+ .prep_fn = rdac_prep_fn,
+ .check_sense = rdac_check_sense,
+ .attach = rdac_bus_attach,
+ .detach = rdac_bus_detach,
+ .activate = rdac_activate,
+ .match = rdac_match,
+};
+
+static int __init rdac_init(void)
+{
+ int r;
+
+ r = scsi_register_device_handler(&rdac_dh);
+ if (r != 0) {
+ printk(KERN_ERR "Failed to register scsi device handler.");
+ goto done;
+ }
+
+ /*
+ * Create workqueue to handle mode selects for rdac
+ */
+ kmpath_rdacd = create_singlethread_workqueue("kmpath_rdacd");
+ if (!kmpath_rdacd) {
+ scsi_unregister_device_handler(&rdac_dh);
+ printk(KERN_ERR "kmpath_rdacd creation failed.\n");
+
+ r = -EINVAL;
+ }
+done:
+ return r;
+}
+
+static void __exit rdac_exit(void)
+{
+ destroy_workqueue(kmpath_rdacd);
+ scsi_unregister_device_handler(&rdac_dh);
+}
+
+module_init(rdac_init);
+module_exit(rdac_exit);
+
+MODULE_DESCRIPTION("Multipath LSI/Engenio/NetApp E-Series RDAC driver");
+MODULE_AUTHOR("Mike Christie, Chandra Seetharaman");
+MODULE_VERSION("01.00.0000.0000");
+MODULE_LICENSE("GPL");
diff --git a/drivers/scsi/dmx3191d.c b/drivers/scsi/dmx3191d.c
new file mode 100644
index 000000000..3e088125a
--- /dev/null
+++ b/drivers/scsi/dmx3191d.c
@@ -0,0 +1,162 @@
+/*
+ dmx3191d.c - driver for the Domex DMX3191D SCSI card.
+ Copyright (C) 2000 by Massimo Piccioni <dafastidio@libero.it>
+ Portions Copyright (C) 2004 by Christoph Hellwig <hch@lst.de>
+
+ Based on the generic NCR5380 driver by Drew Eckhardt et al.
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 2 of the License, or
+ (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+*/
+
+#include <linux/init.h>
+#include <linux/ioport.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/interrupt.h>
+#include <asm/io.h>
+
+#include <scsi/scsi_host.h>
+
+/*
+ * Definitions for the generic 5380 driver.
+ */
+
+#define DONT_USE_INTR
+
+#define NCR5380_read(reg) inb(port + reg)
+#define NCR5380_write(reg, value) outb(value, port + reg)
+
+#define NCR5380_implementation_fields /* none */
+#define NCR5380_local_declare() unsigned int port
+#define NCR5380_setup(instance) port = instance->io_port
+
+/*
+ * Includes needed for NCR5380.[ch] (XXX: Move them to NCR5380.h)
+ */
+#include <linux/delay.h>
+
+#include "NCR5380.h"
+#include "NCR5380.c"
+
+#define DMX3191D_DRIVER_NAME "dmx3191d"
+#define DMX3191D_REGION_LEN 8
+
+
+static struct scsi_host_template dmx3191d_driver_template = {
+ .proc_name = DMX3191D_DRIVER_NAME,
+ .name = "Domex DMX3191D",
+ .info = NCR5380_info,
+ .queuecommand = NCR5380_queue_command,
+ .eh_abort_handler = NCR5380_abort,
+ .eh_bus_reset_handler = NCR5380_bus_reset,
+ .can_queue = 32,
+ .this_id = 7,
+ .sg_tablesize = SG_ALL,
+ .cmd_per_lun = 2,
+ .use_clustering = DISABLE_CLUSTERING,
+};
+
+static int dmx3191d_probe_one(struct pci_dev *pdev,
+ const struct pci_device_id *id)
+{
+ struct Scsi_Host *shost;
+ unsigned long io;
+ int error = -ENODEV;
+
+ if (pci_enable_device(pdev))
+ goto out;
+
+ io = pci_resource_start(pdev, 0);
+ if (!request_region(io, DMX3191D_REGION_LEN, DMX3191D_DRIVER_NAME)) {
+ printk(KERN_ERR "dmx3191: region 0x%lx-0x%lx already reserved\n",
+ io, io + DMX3191D_REGION_LEN);
+ goto out_disable_device;
+ }
+
+ shost = scsi_host_alloc(&dmx3191d_driver_template,
+ sizeof(struct NCR5380_hostdata));
+ if (!shost)
+ goto out_release_region;
+ shost->io_port = io;
+
+ /* This card does not seem to raise an interrupt on pdev->irq.
+ * Steam-powered SCSI controllers run without an IRQ anyway.
+ */
+ shost->irq = NO_IRQ;
+
+ NCR5380_init(shost, FLAG_NO_PSEUDO_DMA | FLAG_DTC3181E);
+
+ pci_set_drvdata(pdev, shost);
+
+ error = scsi_add_host(shost, &pdev->dev);
+ if (error)
+ goto out_release_region;
+
+ scsi_scan_host(shost);
+ return 0;
+
+ out_release_region:
+ release_region(io, DMX3191D_REGION_LEN);
+ out_disable_device:
+ pci_disable_device(pdev);
+ out:
+ return error;
+}
+
+static void dmx3191d_remove_one(struct pci_dev *pdev)
+{
+ struct Scsi_Host *shost = pci_get_drvdata(pdev);
+
+ scsi_remove_host(shost);
+
+ NCR5380_exit(shost);
+
+ release_region(shost->io_port, DMX3191D_REGION_LEN);
+ pci_disable_device(pdev);
+
+ scsi_host_put(shost);
+}
+
+static struct pci_device_id dmx3191d_pci_tbl[] = {
+ {PCI_VENDOR_ID_DOMEX, PCI_DEVICE_ID_DOMEX_DMX3191D,
+ PCI_ANY_ID, PCI_ANY_ID, 0, 0, 4},
+ { }
+};
+MODULE_DEVICE_TABLE(pci, dmx3191d_pci_tbl);
+
+static struct pci_driver dmx3191d_pci_driver = {
+ .name = DMX3191D_DRIVER_NAME,
+ .id_table = dmx3191d_pci_tbl,
+ .probe = dmx3191d_probe_one,
+ .remove = dmx3191d_remove_one,
+};
+
+static int __init dmx3191d_init(void)
+{
+ return pci_register_driver(&dmx3191d_pci_driver);
+}
+
+static void __exit dmx3191d_exit(void)
+{
+ pci_unregister_driver(&dmx3191d_pci_driver);
+}
+
+module_init(dmx3191d_init);
+module_exit(dmx3191d_exit);
+
+MODULE_AUTHOR("Massimo Piccioni <dafastidio@libero.it>");
+MODULE_DESCRIPTION("Domex DMX3191D SCSI driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/scsi/dpt/dpti_i2o.h b/drivers/scsi/dpt/dpti_i2o.h
new file mode 100644
index 000000000..bd9e31e16
--- /dev/null
+++ b/drivers/scsi/dpt/dpti_i2o.h
@@ -0,0 +1,446 @@
+#ifndef _SCSI_I2O_H
+#define _SCSI_I2O_H
+
+/* I2O kernel space accessible structures/APIs
+ *
+ * (c) Copyright 1999, 2000 Red Hat Software
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ *
+ *************************************************************************
+ *
+ * This header file defined the I2O APIs/structures for use by
+ * the I2O kernel modules.
+ *
+ */
+
+#ifdef __KERNEL__ /* This file to be included by kernel only */
+
+#include <linux/i2o-dev.h>
+
+#include <linux/notifier.h>
+#include <linux/atomic.h>
+
+
+/*
+ * Tunable parameters first
+ */
+
+/* How many different OSM's are we allowing */
+#define MAX_I2O_MODULES 64
+
+#define I2O_EVT_CAPABILITY_OTHER 0x01
+#define I2O_EVT_CAPABILITY_CHANGED 0x02
+
+#define I2O_EVT_SENSOR_STATE_CHANGED 0x01
+
+//#ifdef __KERNEL__ /* ioctl stuff only thing exported to users */
+
+#define I2O_MAX_MANAGERS 4
+
+/*
+ * I2O Interface Objects
+ */
+
+#include <linux/wait.h>
+typedef wait_queue_head_t adpt_wait_queue_head_t;
+#define ADPT_DECLARE_WAIT_QUEUE_HEAD(wait) DECLARE_WAIT_QUEUE_HEAD_ONSTACK(wait)
+typedef wait_queue_t adpt_wait_queue_t;
+
+/*
+ * message structures
+ */
+
+struct i2o_message
+{
+ u8 version_offset;
+ u8 flags;
+ u16 size;
+ u32 target_tid:12;
+ u32 init_tid:12;
+ u32 function:8;
+ u32 initiator_context;
+ /* List follows */
+};
+
+struct adpt_device;
+struct _adpt_hba;
+struct i2o_device
+{
+ struct i2o_device *next; /* Chain */
+ struct i2o_device *prev;
+
+ char dev_name[8]; /* linux /dev name if available */
+ i2o_lct_entry lct_data;/* Device LCT information */
+ u32 flags;
+ struct proc_dir_entry* proc_entry; /* /proc dir */
+ struct adpt_device *owner;
+ struct _adpt_hba *controller; /* Controlling IOP */
+};
+
+/*
+ * Each I2O controller has one of these objects
+ */
+
+struct i2o_controller
+{
+ char name[16];
+ int unit;
+ int type;
+ int enabled;
+
+ struct notifier_block *event_notifer; /* Events */
+ atomic_t users;
+ struct i2o_device *devices; /* I2O device chain */
+ struct i2o_controller *next; /* Controller chain */
+
+};
+
+/*
+ * I2O System table entry
+ */
+struct i2o_sys_tbl_entry
+{
+ u16 org_id;
+ u16 reserved1;
+ u32 iop_id:12;
+ u32 reserved2:20;
+ u16 seg_num:12;
+ u16 i2o_version:4;
+ u8 iop_state;
+ u8 msg_type;
+ u16 frame_size;
+ u16 reserved3;
+ u32 last_changed;
+ u32 iop_capabilities;
+ u32 inbound_low;
+ u32 inbound_high;
+};
+
+struct i2o_sys_tbl
+{
+ u8 num_entries;
+ u8 version;
+ u16 reserved1;
+ u32 change_ind;
+ u32 reserved2;
+ u32 reserved3;
+ struct i2o_sys_tbl_entry iops[0];
+};
+
+/*
+ * I2O classes / subclasses
+ */
+
+/* Class ID and Code Assignments
+ * (LCT.ClassID.Version field)
+ */
+#define I2O_CLASS_VERSION_10 0x00
+#define I2O_CLASS_VERSION_11 0x01
+
+/* Class code names
+ * (from v1.5 Table 6-1 Class Code Assignments.)
+ */
+
+#define I2O_CLASS_EXECUTIVE 0x000
+#define I2O_CLASS_DDM 0x001
+#define I2O_CLASS_RANDOM_BLOCK_STORAGE 0x010
+#define I2O_CLASS_SEQUENTIAL_STORAGE 0x011
+#define I2O_CLASS_LAN 0x020
+#define I2O_CLASS_WAN 0x030
+#define I2O_CLASS_FIBRE_CHANNEL_PORT 0x040
+#define I2O_CLASS_FIBRE_CHANNEL_PERIPHERAL 0x041
+#define I2O_CLASS_SCSI_PERIPHERAL 0x051
+#define I2O_CLASS_ATE_PORT 0x060
+#define I2O_CLASS_ATE_PERIPHERAL 0x061
+#define I2O_CLASS_FLOPPY_CONTROLLER 0x070
+#define I2O_CLASS_FLOPPY_DEVICE 0x071
+#define I2O_CLASS_BUS_ADAPTER_PORT 0x080
+#define I2O_CLASS_PEER_TRANSPORT_AGENT 0x090
+#define I2O_CLASS_PEER_TRANSPORT 0x091
+
+/* Rest of 0x092 - 0x09f reserved for peer-to-peer classes
+ */
+
+#define I2O_CLASS_MATCH_ANYCLASS 0xffffffff
+
+/* Subclasses
+ */
+
+#define I2O_SUBCLASS_i960 0x001
+#define I2O_SUBCLASS_HDM 0x020
+#define I2O_SUBCLASS_ISM 0x021
+
+/* Operation functions */
+
+#define I2O_PARAMS_FIELD_GET 0x0001
+#define I2O_PARAMS_LIST_GET 0x0002
+#define I2O_PARAMS_MORE_GET 0x0003
+#define I2O_PARAMS_SIZE_GET 0x0004
+#define I2O_PARAMS_TABLE_GET 0x0005
+#define I2O_PARAMS_FIELD_SET 0x0006
+#define I2O_PARAMS_LIST_SET 0x0007
+#define I2O_PARAMS_ROW_ADD 0x0008
+#define I2O_PARAMS_ROW_DELETE 0x0009
+#define I2O_PARAMS_TABLE_CLEAR 0x000A
+
+/*
+ * I2O serial number conventions / formats
+ * (circa v1.5)
+ */
+
+#define I2O_SNFORMAT_UNKNOWN 0
+#define I2O_SNFORMAT_BINARY 1
+#define I2O_SNFORMAT_ASCII 2
+#define I2O_SNFORMAT_UNICODE 3
+#define I2O_SNFORMAT_LAN48_MAC 4
+#define I2O_SNFORMAT_WAN 5
+
+/* Plus new in v2.0 (Yellowstone pdf doc)
+ */
+
+#define I2O_SNFORMAT_LAN64_MAC 6
+#define I2O_SNFORMAT_DDM 7
+#define I2O_SNFORMAT_IEEE_REG64 8
+#define I2O_SNFORMAT_IEEE_REG128 9
+#define I2O_SNFORMAT_UNKNOWN2 0xff
+
+/* Transaction Reply Lists (TRL) Control Word structure */
+
+#define TRL_SINGLE_FIXED_LENGTH 0x00
+#define TRL_SINGLE_VARIABLE_LENGTH 0x40
+#define TRL_MULTIPLE_FIXED_LENGTH 0x80
+
+/*
+ * Messaging API values
+ */
+
+#define I2O_CMD_ADAPTER_ASSIGN 0xB3
+#define I2O_CMD_ADAPTER_READ 0xB2
+#define I2O_CMD_ADAPTER_RELEASE 0xB5
+#define I2O_CMD_BIOS_INFO_SET 0xA5
+#define I2O_CMD_BOOT_DEVICE_SET 0xA7
+#define I2O_CMD_CONFIG_VALIDATE 0xBB
+#define I2O_CMD_CONN_SETUP 0xCA
+#define I2O_CMD_DDM_DESTROY 0xB1
+#define I2O_CMD_DDM_ENABLE 0xD5
+#define I2O_CMD_DDM_QUIESCE 0xC7
+#define I2O_CMD_DDM_RESET 0xD9
+#define I2O_CMD_DDM_SUSPEND 0xAF
+#define I2O_CMD_DEVICE_ASSIGN 0xB7
+#define I2O_CMD_DEVICE_RELEASE 0xB9
+#define I2O_CMD_HRT_GET 0xA8
+#define I2O_CMD_ADAPTER_CLEAR 0xBE
+#define I2O_CMD_ADAPTER_CONNECT 0xC9
+#define I2O_CMD_ADAPTER_RESET 0xBD
+#define I2O_CMD_LCT_NOTIFY 0xA2
+#define I2O_CMD_OUTBOUND_INIT 0xA1
+#define I2O_CMD_PATH_ENABLE 0xD3
+#define I2O_CMD_PATH_QUIESCE 0xC5
+#define I2O_CMD_PATH_RESET 0xD7
+#define I2O_CMD_STATIC_MF_CREATE 0xDD
+#define I2O_CMD_STATIC_MF_RELEASE 0xDF
+#define I2O_CMD_STATUS_GET 0xA0
+#define I2O_CMD_SW_DOWNLOAD 0xA9
+#define I2O_CMD_SW_UPLOAD 0xAB
+#define I2O_CMD_SW_REMOVE 0xAD
+#define I2O_CMD_SYS_ENABLE 0xD1
+#define I2O_CMD_SYS_MODIFY 0xC1
+#define I2O_CMD_SYS_QUIESCE 0xC3
+#define I2O_CMD_SYS_TAB_SET 0xA3
+
+#define I2O_CMD_UTIL_NOP 0x00
+#define I2O_CMD_UTIL_ABORT 0x01
+#define I2O_CMD_UTIL_CLAIM 0x09
+#define I2O_CMD_UTIL_RELEASE 0x0B
+#define I2O_CMD_UTIL_PARAMS_GET 0x06
+#define I2O_CMD_UTIL_PARAMS_SET 0x05
+#define I2O_CMD_UTIL_EVT_REGISTER 0x13
+#define I2O_CMD_UTIL_EVT_ACK 0x14
+#define I2O_CMD_UTIL_CONFIG_DIALOG 0x10
+#define I2O_CMD_UTIL_DEVICE_RESERVE 0x0D
+#define I2O_CMD_UTIL_DEVICE_RELEASE 0x0F
+#define I2O_CMD_UTIL_LOCK 0x17
+#define I2O_CMD_UTIL_LOCK_RELEASE 0x19
+#define I2O_CMD_UTIL_REPLY_FAULT_NOTIFY 0x15
+
+#define I2O_CMD_SCSI_EXEC 0x81
+#define I2O_CMD_SCSI_ABORT 0x83
+#define I2O_CMD_SCSI_BUSRESET 0x27
+
+#define I2O_CMD_BLOCK_READ 0x30
+#define I2O_CMD_BLOCK_WRITE 0x31
+#define I2O_CMD_BLOCK_CFLUSH 0x37
+#define I2O_CMD_BLOCK_MLOCK 0x49
+#define I2O_CMD_BLOCK_MUNLOCK 0x4B
+#define I2O_CMD_BLOCK_MMOUNT 0x41
+#define I2O_CMD_BLOCK_MEJECT 0x43
+
+#define I2O_PRIVATE_MSG 0xFF
+
+/*
+ * Init Outbound Q status
+ */
+
+#define I2O_CMD_OUTBOUND_INIT_IN_PROGRESS 0x01
+#define I2O_CMD_OUTBOUND_INIT_REJECTED 0x02
+#define I2O_CMD_OUTBOUND_INIT_FAILED 0x03
+#define I2O_CMD_OUTBOUND_INIT_COMPLETE 0x04
+
+/*
+ * I2O Get Status State values
+ */
+
+#define ADAPTER_STATE_INITIALIZING 0x01
+#define ADAPTER_STATE_RESET 0x02
+#define ADAPTER_STATE_HOLD 0x04
+#define ADAPTER_STATE_READY 0x05
+#define ADAPTER_STATE_OPERATIONAL 0x08
+#define ADAPTER_STATE_FAILED 0x10
+#define ADAPTER_STATE_FAULTED 0x11
+
+/* I2O API function return values */
+
+#define I2O_RTN_NO_ERROR 0
+#define I2O_RTN_NOT_INIT 1
+#define I2O_RTN_FREE_Q_EMPTY 2
+#define I2O_RTN_TCB_ERROR 3
+#define I2O_RTN_TRANSACTION_ERROR 4
+#define I2O_RTN_ADAPTER_ALREADY_INIT 5
+#define I2O_RTN_MALLOC_ERROR 6
+#define I2O_RTN_ADPTR_NOT_REGISTERED 7
+#define I2O_RTN_MSG_REPLY_TIMEOUT 8
+#define I2O_RTN_NO_STATUS 9
+#define I2O_RTN_NO_FIRM_VER 10
+#define I2O_RTN_NO_LINK_SPEED 11
+
+/* Reply message status defines for all messages */
+
+#define I2O_REPLY_STATUS_SUCCESS 0x00
+#define I2O_REPLY_STATUS_ABORT_DIRTY 0x01
+#define I2O_REPLY_STATUS_ABORT_NO_DATA_TRANSFER 0x02
+#define I2O_REPLY_STATUS_ABORT_PARTIAL_TRANSFER 0x03
+#define I2O_REPLY_STATUS_ERROR_DIRTY 0x04
+#define I2O_REPLY_STATUS_ERROR_NO_DATA_TRANSFER 0x05
+#define I2O_REPLY_STATUS_ERROR_PARTIAL_TRANSFER 0x06
+#define I2O_REPLY_STATUS_PROCESS_ABORT_DIRTY 0x08
+#define I2O_REPLY_STATUS_PROCESS_ABORT_NO_DATA_TRANSFER 0x09
+#define I2O_REPLY_STATUS_PROCESS_ABORT_PARTIAL_TRANSFER 0x0A
+#define I2O_REPLY_STATUS_TRANSACTION_ERROR 0x0B
+#define I2O_REPLY_STATUS_PROGRESS_REPORT 0x80
+
+/* Status codes and Error Information for Parameter functions */
+
+#define I2O_PARAMS_STATUS_SUCCESS 0x00
+#define I2O_PARAMS_STATUS_BAD_KEY_ABORT 0x01
+#define I2O_PARAMS_STATUS_BAD_KEY_CONTINUE 0x02
+#define I2O_PARAMS_STATUS_BUFFER_FULL 0x03
+#define I2O_PARAMS_STATUS_BUFFER_TOO_SMALL 0x04
+#define I2O_PARAMS_STATUS_FIELD_UNREADABLE 0x05
+#define I2O_PARAMS_STATUS_FIELD_UNWRITEABLE 0x06
+#define I2O_PARAMS_STATUS_INSUFFICIENT_FIELDS 0x07
+#define I2O_PARAMS_STATUS_INVALID_GROUP_ID 0x08
+#define I2O_PARAMS_STATUS_INVALID_OPERATION 0x09
+#define I2O_PARAMS_STATUS_NO_KEY_FIELD 0x0A
+#define I2O_PARAMS_STATUS_NO_SUCH_FIELD 0x0B
+#define I2O_PARAMS_STATUS_NON_DYNAMIC_GROUP 0x0C
+#define I2O_PARAMS_STATUS_OPERATION_ERROR 0x0D
+#define I2O_PARAMS_STATUS_SCALAR_ERROR 0x0E
+#define I2O_PARAMS_STATUS_TABLE_ERROR 0x0F
+#define I2O_PARAMS_STATUS_WRONG_GROUP_TYPE 0x10
+
+/* DetailedStatusCode defines for Executive, DDM, Util and Transaction error
+ * messages: Table 3-2 Detailed Status Codes.*/
+
+#define I2O_DSC_SUCCESS 0x0000
+#define I2O_DSC_BAD_KEY 0x0002
+#define I2O_DSC_TCL_ERROR 0x0003
+#define I2O_DSC_REPLY_BUFFER_FULL 0x0004
+#define I2O_DSC_NO_SUCH_PAGE 0x0005
+#define I2O_DSC_INSUFFICIENT_RESOURCE_SOFT 0x0006
+#define I2O_DSC_INSUFFICIENT_RESOURCE_HARD 0x0007
+#define I2O_DSC_CHAIN_BUFFER_TOO_LARGE 0x0009
+#define I2O_DSC_UNSUPPORTED_FUNCTION 0x000A
+#define I2O_DSC_DEVICE_LOCKED 0x000B
+#define I2O_DSC_DEVICE_RESET 0x000C
+#define I2O_DSC_INAPPROPRIATE_FUNCTION 0x000D
+#define I2O_DSC_INVALID_INITIATOR_ADDRESS 0x000E
+#define I2O_DSC_INVALID_MESSAGE_FLAGS 0x000F
+#define I2O_DSC_INVALID_OFFSET 0x0010
+#define I2O_DSC_INVALID_PARAMETER 0x0011
+#define I2O_DSC_INVALID_REQUEST 0x0012
+#define I2O_DSC_INVALID_TARGET_ADDRESS 0x0013
+#define I2O_DSC_MESSAGE_TOO_LARGE 0x0014
+#define I2O_DSC_MESSAGE_TOO_SMALL 0x0015
+#define I2O_DSC_MISSING_PARAMETER 0x0016
+#define I2O_DSC_TIMEOUT 0x0017
+#define I2O_DSC_UNKNOWN_ERROR 0x0018
+#define I2O_DSC_UNKNOWN_FUNCTION 0x0019
+#define I2O_DSC_UNSUPPORTED_VERSION 0x001A
+#define I2O_DSC_DEVICE_BUSY 0x001B
+#define I2O_DSC_DEVICE_NOT_AVAILABLE 0x001C
+
+/* Device Claim Types */
+#define I2O_CLAIM_PRIMARY 0x01000000
+#define I2O_CLAIM_MANAGEMENT 0x02000000
+#define I2O_CLAIM_AUTHORIZED 0x03000000
+#define I2O_CLAIM_SECONDARY 0x04000000
+
+/* Message header defines for VersionOffset */
+#define I2OVER15 0x0001
+#define I2OVER20 0x0002
+/* Default is 1.5, FIXME: Need support for both 1.5 and 2.0 */
+#define I2OVERSION I2OVER15
+#define SGL_OFFSET_0 I2OVERSION
+#define SGL_OFFSET_4 (0x0040 | I2OVERSION)
+#define SGL_OFFSET_5 (0x0050 | I2OVERSION)
+#define SGL_OFFSET_6 (0x0060 | I2OVERSION)
+#define SGL_OFFSET_7 (0x0070 | I2OVERSION)
+#define SGL_OFFSET_8 (0x0080 | I2OVERSION)
+#define SGL_OFFSET_9 (0x0090 | I2OVERSION)
+#define SGL_OFFSET_10 (0x00A0 | I2OVERSION)
+#define SGL_OFFSET_12 (0x00C0 | I2OVERSION)
+
+#define TRL_OFFSET_5 (0x0050 | I2OVERSION)
+#define TRL_OFFSET_6 (0x0060 | I2OVERSION)
+
+ /* msg header defines for MsgFlags */
+#define MSG_STATIC 0x0100
+#define MSG_64BIT_CNTXT 0x0200
+#define MSG_MULTI_TRANS 0x1000
+#define MSG_FAIL 0x2000
+#define MSG_LAST 0x4000
+#define MSG_REPLY 0x8000
+
+ /* minimum size msg */
+#define THREE_WORD_MSG_SIZE 0x00030000
+#define FOUR_WORD_MSG_SIZE 0x00040000
+#define FIVE_WORD_MSG_SIZE 0x00050000
+#define SIX_WORD_MSG_SIZE 0x00060000
+#define SEVEN_WORD_MSG_SIZE 0x00070000
+#define EIGHT_WORD_MSG_SIZE 0x00080000
+#define NINE_WORD_MSG_SIZE 0x00090000
+#define TEN_WORD_MSG_SIZE 0x000A0000
+#define I2O_MESSAGE_SIZE(x) ((x)<<16)
+
+
+/* Special TID Assignments */
+
+#define ADAPTER_TID 0
+#define HOST_TID 1
+
+#define MSG_FRAME_SIZE 128
+#define NMBR_MSG_FRAMES 128
+
+#define MSG_POOL_SIZE 16384
+
+#define I2O_POST_WAIT_OK 0
+#define I2O_POST_WAIT_TIMEOUT -ETIMEDOUT
+
+
+#endif /* __KERNEL__ */
+
+#endif /* _SCSI_I2O_H */
diff --git a/drivers/scsi/dpt/dpti_ioctl.h b/drivers/scsi/dpt/dpti_ioctl.h
new file mode 100644
index 000000000..f60236721
--- /dev/null
+++ b/drivers/scsi/dpt/dpti_ioctl.h
@@ -0,0 +1,139 @@
+/***************************************************************************
+ dpti_ioctl.h - description
+ -------------------
+ begin : Thu Sep 7 2000
+ copyright : (C) 2001 by Adaptec
+
+ See Documentation/scsi/dpti.txt for history, notes, license info
+ and credits
+ ***************************************************************************/
+
+/***************************************************************************
+ * *
+ * This program is free software; you can redistribute it and/or modify *
+ * it under the terms of the GNU General Public License as published by *
+ * the Free Software Foundation; either version 2 of the License, or *
+ * (at your option) any later version. *
+ * *
+ ***************************************************************************/
+
+/***************************************************************************
+ * This file is generated from osd_unix.h *
+ * *************************************************************************/
+
+#ifndef _dpti_ioctl_h
+#define _dpti_ioctl_h
+
+// IOCTL interface commands
+
+#ifndef _IOWR
+# define _IOWR(x,y,z) (((x)<<8)|y)
+#endif
+#ifndef _IOW
+# define _IOW(x,y,z) (((x)<<8)|y)
+#endif
+#ifndef _IOR
+# define _IOR(x,y,z) (((x)<<8)|y)
+#endif
+#ifndef _IO
+# define _IO(x,y) (((x)<<8)|y)
+#endif
+/* EATA PassThrough Command */
+#define EATAUSRCMD _IOWR('D',65,EATA_CP)
+/* Set Debug Level If Enabled */
+#define DPT_DEBUG _IOW('D',66,int)
+/* Get Signature Structure */
+#define DPT_SIGNATURE _IOR('D',67,dpt_sig_S)
+#if defined __bsdi__
+#define DPT_SIGNATURE_PACKED _IOR('D',67,dpt_sig_S_Packed)
+#endif
+/* Get Number Of DPT Adapters */
+#define DPT_NUMCTRLS _IOR('D',68,int)
+/* Get Adapter Info Structure */
+#define DPT_CTRLINFO _IOR('D',69,CtrlInfo)
+/* Get Statistics If Enabled */
+#define DPT_STATINFO _IO('D',70)
+/* Clear Stats If Enabled */
+#define DPT_CLRSTAT _IO('D',71)
+/* Get System Info Structure */
+#define DPT_SYSINFO _IOR('D',72,sysInfo_S)
+/* Set Timeout Value */
+#define DPT_TIMEOUT _IO('D',73)
+/* Get config Data */
+#define DPT_CONFIG _IO('D',74)
+/* Get Blink LED Code */
+#define DPT_BLINKLED _IOR('D',75,int)
+/* Get Statistical information (if available) */
+#define DPT_STATS_INFO _IOR('D',80,STATS_DATA)
+/* Clear the statistical information */
+#define DPT_STATS_CLEAR _IO('D',81)
+/* Get Performance metrics */
+#define DPT_PERF_INFO _IOR('D',82,dpt_perf_t)
+/* Send an I2O command */
+#define I2OUSRCMD _IO('D',76)
+/* Inform driver to re-acquire LCT information */
+#define I2ORESCANCMD _IO('D',77)
+/* Inform driver to reset adapter */
+#define I2ORESETCMD _IO('D',78)
+/* See if the target is mounted */
+#define DPT_TARGET_BUSY _IOR('D',79, TARGET_BUSY_T)
+
+
+ /* Structure Returned From Get Controller Info */
+
+typedef struct {
+ uCHAR state; /* Operational state */
+ uCHAR id; /* Host adapter SCSI id */
+ int vect; /* Interrupt vector number */
+ int base; /* Base I/O address */
+ int njobs; /* # of jobs sent to HA */
+ int qdepth; /* Controller queue depth. */
+ int wakebase; /* mpx wakeup base index. */
+ uINT SGsize; /* Scatter/Gather list size. */
+ unsigned heads; /* heads for drives on cntlr. */
+ unsigned sectors; /* sectors for drives on cntlr. */
+ uCHAR do_drive32; /* Flag for Above 16 MB Ability */
+ uCHAR BusQuiet; /* SCSI Bus Quiet Flag */
+ char idPAL[4]; /* 4 Bytes Of The ID Pal */
+ uCHAR primary; /* 1 For Primary, 0 For Secondary */
+ uCHAR eataVersion; /* EATA Version */
+ uINT cpLength; /* EATA Command Packet Length */
+ uINT spLength; /* EATA Status Packet Length */
+ uCHAR drqNum; /* DRQ Index (0,5,6,7) */
+ uCHAR flag1; /* EATA Flags 1 (Byte 9) */
+ uCHAR flag2; /* EATA Flags 2 (Byte 30) */
+} CtrlInfo;
+
+typedef struct {
+ uSHORT length; // Remaining length of this
+ uSHORT drvrHBAnum; // Relative HBA # used by the driver
+ uINT baseAddr; // Base I/O address
+ uSHORT blinkState; // Blink LED state (0=Not in blink LED)
+ uCHAR pciBusNum; // PCI Bus # (Optional)
+ uCHAR pciDeviceNum; // PCI Device # (Optional)
+ uSHORT hbaFlags; // Miscellaneous HBA flags
+ uSHORT Interrupt; // Interrupt set for this device.
+# if (defined(_DPT_ARC))
+ uINT baseLength;
+ ADAPTER_OBJECT *AdapterObject;
+ LARGE_INTEGER DmaLogicalAddress;
+ PVOID DmaVirtualAddress;
+ LARGE_INTEGER ReplyLogicalAddress;
+ PVOID ReplyVirtualAddress;
+# else
+ uINT reserved1; // Reserved for future expansion
+ uINT reserved2; // Reserved for future expansion
+ uINT reserved3; // Reserved for future expansion
+# endif
+} drvrHBAinfo_S;
+
+typedef struct TARGET_BUSY
+{
+ uLONG channel;
+ uLONG id;
+ uLONG lun;
+ uLONG isBusy;
+} TARGET_BUSY_T;
+
+#endif
+
diff --git a/drivers/scsi/dpt/dptsig.h b/drivers/scsi/dpt/dptsig.h
new file mode 100644
index 000000000..a6644b332
--- /dev/null
+++ b/drivers/scsi/dpt/dptsig.h
@@ -0,0 +1,336 @@
+/* BSDI dptsig.h,v 1.7 1998/06/03 19:15:00 karels Exp */
+
+/*
+ * Copyright (c) 1996-1999 Distributed Processing Technology Corporation
+ * All rights reserved.
+ *
+ * Redistribution and use in source form, with or without modification, are
+ * permitted provided that redistributions of source code must retain the
+ * above copyright notice, this list of conditions and the following disclaimer.
+ *
+ * This software is provided `as is' by Distributed Processing Technology and
+ * any express or implied warranties, including, but not limited to, the
+ * implied warranties of merchantability and fitness for a particular purpose,
+ * are disclaimed. In no event shall Distributed Processing Technology be
+ * liable for any direct, indirect, incidental, special, exemplary or
+ * consequential damages (including, but not limited to, procurement of
+ * substitute goods or services; loss of use, data, or profits; or business
+ * interruptions) however caused and on any theory of liability, whether in
+ * contract, strict liability, or tort (including negligence or otherwise)
+ * arising in any way out of the use of this driver software, even if advised
+ * of the possibility of such damage.
+ *
+ */
+
+#ifndef __DPTSIG_H_
+#define __DPTSIG_H_
+#ifdef _SINIX_ADDON
+#include "dpt.h"
+#endif
+/* DPT SIGNATURE SPEC AND HEADER FILE */
+/* Signature Version 1 (sorry no 'A') */
+
+/* to make sure we are talking the same size under all OS's */
+typedef unsigned char sigBYTE;
+typedef unsigned short sigWORD;
+typedef unsigned int sigINT;
+
+/*
+ * use sigWORDLittleEndian for:
+ * dsCapabilities
+ * dsDeviceSupp
+ * dsAdapterSupp
+ * dsApplication
+ * use sigLONGLittleEndian for:
+ * dsOS
+ * so that the sig can be standardised to Little Endian
+ */
+#if (defined(_DPT_BIG_ENDIAN))
+# define sigWORDLittleEndian(x) ((((x)&0xFF)<<8)|(((x)>>8)&0xFF))
+# define sigLONGLittleEndian(x) \
+ ((((x)&0xFF)<<24) | \
+ (((x)&0xFF00)<<8) | \
+ (((x)&0xFF0000L)>>8) | \
+ (((x)&0xFF000000L)>>24))
+#else
+# define sigWORDLittleEndian(x) (x)
+# define sigLONGLittleEndian(x) (x)
+#endif
+
+/* must make sure the structure is not word or double-word aligned */
+/* --------------------------------------------------------------- */
+/* Borland will ignore the following pragma: */
+/* Word alignment is OFF by default. If in the, IDE make */
+/* sure that Options | Compiler | Code Generation | Word Alignment */
+/* is not checked. If using BCC, do not use the -a option. */
+
+#ifndef NO_PACK
+#if defined (_DPT_AIX)
+#pragma options align=packed
+#else
+#pragma pack(1)
+#endif /* aix */
+#endif
+/* For the Macintosh */
+#ifdef STRUCTALIGNMENTSUPPORTED
+#pragma options align=mac68k
+#endif
+
+
+/* Current Signature Version - sigBYTE dsSigVersion; */
+/* ------------------------------------------------------------------ */
+#define SIG_VERSION 1
+
+/* Processor Family - sigBYTE dsProcessorFamily; DISTINCT VALUES */
+/* ------------------------------------------------------------------ */
+/* What type of processor the file is meant to run on. */
+/* This will let us know whether to read sigWORDs as high/low or low/high. */
+#define PROC_INTEL 0x00 /* Intel 80x86/ia64 */
+#define PROC_MOTOROLA 0x01 /* Motorola 68K */
+#define PROC_MIPS4000 0x02 /* MIPS RISC 4000 */
+#define PROC_ALPHA 0x03 /* DEC Alpha */
+#define PROC_POWERPC 0x04 /* IBM Power PC */
+#define PROC_i960 0x05 /* Intel i960 */
+#define PROC_ULTRASPARC 0x06 /* SPARC processor */
+
+/* Specific Minimim Processor - sigBYTE dsProcessor; FLAG BITS */
+/* ------------------------------------------------------------------ */
+/* Different bit definitions dependent on processor_family */
+
+/* PROC_INTEL: */
+#define PROC_8086 0x01 /* Intel 8086 */
+#define PROC_286 0x02 /* Intel 80286 */
+#define PROC_386 0x04 /* Intel 80386 */
+#define PROC_486 0x08 /* Intel 80486 */
+#define PROC_PENTIUM 0x10 /* Intel 586 aka P5 aka Pentium */
+#define PROC_SEXIUM 0x20 /* Intel 686 aka P6 aka Pentium Pro or MMX */
+#define PROC_IA64 0x40 /* Intel IA64 processor */
+
+/* PROC_i960: */
+#define PROC_960RX 0x01 /* Intel 80960RC/RD */
+#define PROC_960HX 0x02 /* Intel 80960HA/HD/HT */
+
+/* PROC_MOTOROLA: */
+#define PROC_68000 0x01 /* Motorola 68000 */
+#define PROC_68010 0x02 /* Motorola 68010 */
+#define PROC_68020 0x04 /* Motorola 68020 */
+#define PROC_68030 0x08 /* Motorola 68030 */
+#define PROC_68040 0x10 /* Motorola 68040 */
+
+/* PROC_POWERPC */
+#define PROC_PPC601 0x01 /* PowerPC 601 */
+#define PROC_PPC603 0x02 /* PowerPC 603 */
+#define PROC_PPC604 0x04 /* PowerPC 604 */
+
+/* PROC_MIPS4000: */
+#define PROC_R4000 0x01 /* MIPS R4000 */
+
+/* Filetype - sigBYTE dsFiletype; DISTINCT VALUES */
+/* ------------------------------------------------------------------ */
+#define FT_EXECUTABLE 0 /* Executable Program */
+#define FT_SCRIPT 1 /* Script/Batch File??? */
+#define FT_HBADRVR 2 /* HBA Driver */
+#define FT_OTHERDRVR 3 /* Other Driver */
+#define FT_IFS 4 /* Installable Filesystem Driver */
+#define FT_ENGINE 5 /* DPT Engine */
+#define FT_COMPDRVR 6 /* Compressed Driver Disk */
+#define FT_LANGUAGE 7 /* Foreign Language file */
+#define FT_FIRMWARE 8 /* Downloadable or actual Firmware */
+#define FT_COMMMODL 9 /* Communications Module */
+#define FT_INT13 10 /* INT 13 style HBA Driver */
+#define FT_HELPFILE 11 /* Help file */
+#define FT_LOGGER 12 /* Event Logger */
+#define FT_INSTALL 13 /* An Install Program */
+#define FT_LIBRARY 14 /* Storage Manager Real-Mode Calls */
+#define FT_RESOURCE 15 /* Storage Manager Resource File */
+#define FT_MODEM_DB 16 /* Storage Manager Modem Database */
+
+/* Filetype flags - sigBYTE dsFiletypeFlags; FLAG BITS */
+/* ------------------------------------------------------------------ */
+#define FTF_DLL 0x01 /* Dynamic Link Library */
+#define FTF_NLM 0x02 /* Netware Loadable Module */
+#define FTF_OVERLAYS 0x04 /* Uses overlays */
+#define FTF_DEBUG 0x08 /* Debug version */
+#define FTF_TSR 0x10 /* TSR */
+#define FTF_SYS 0x20 /* DOS Loadable driver */
+#define FTF_PROTECTED 0x40 /* Runs in protected mode */
+#define FTF_APP_SPEC 0x80 /* Application Specific */
+#define FTF_ROM (FTF_SYS|FTF_TSR) /* Special Case */
+
+/* OEM - sigBYTE dsOEM; DISTINCT VALUES */
+/* ------------------------------------------------------------------ */
+#define OEM_DPT 0 /* DPT */
+#define OEM_ATT 1 /* ATT */
+#define OEM_NEC 2 /* NEC */
+#define OEM_ALPHA 3 /* Alphatronix */
+#define OEM_AST 4 /* AST */
+#define OEM_OLIVETTI 5 /* Olivetti */
+#define OEM_SNI 6 /* Siemens/Nixdorf */
+#define OEM_SUN 7 /* SUN Microsystems */
+
+/* Operating System - sigLONG dsOS; FLAG BITS */
+/* ------------------------------------------------------------------ */
+#define OS_DOS 0x00000001 /* PC/MS-DOS */
+#define OS_WINDOWS 0x00000002 /* Microsoft Windows 3.x */
+#define OS_WINDOWS_NT 0x00000004 /* Microsoft Windows NT */
+#define OS_OS2M 0x00000008 /* OS/2 1.2.x,MS 1.3.0,IBM 1.3.x - Monolithic */
+#define OS_OS2L 0x00000010 /* Microsoft OS/2 1.301 - LADDR */
+#define OS_OS22x 0x00000020 /* IBM OS/2 2.x */
+#define OS_NW286 0x00000040 /* Novell NetWare 286 */
+#define OS_NW386 0x00000080 /* Novell NetWare 386 */
+#define OS_GEN_UNIX 0x00000100 /* Generic Unix */
+#define OS_SCO_UNIX 0x00000200 /* SCO Unix */
+#define OS_ATT_UNIX 0x00000400 /* ATT Unix */
+#define OS_UNIXWARE 0x00000800 /* USL Unix */
+#define OS_INT_UNIX 0x00001000 /* Interactive Unix */
+#define OS_SOLARIS 0x00002000 /* SunSoft Solaris */
+#define OS_QNX 0x00004000 /* QNX for Tom Moch */
+#define OS_NEXTSTEP 0x00008000 /* NeXTSTEP/OPENSTEP/MACH */
+#define OS_BANYAN 0x00010000 /* Banyan Vines */
+#define OS_OLIVETTI_UNIX 0x00020000/* Olivetti Unix */
+#define OS_MAC_OS 0x00040000 /* Mac OS */
+#define OS_WINDOWS_95 0x00080000 /* Microsoft Windows '95 */
+#define OS_NW4x 0x00100000 /* Novell Netware 4.x */
+#define OS_BSDI_UNIX 0x00200000 /* BSDi Unix BSD/OS 2.0 and up */
+#define OS_AIX_UNIX 0x00400000 /* AIX Unix */
+#define OS_FREE_BSD 0x00800000 /* FreeBSD Unix */
+#define OS_LINUX 0x01000000 /* Linux */
+#define OS_DGUX_UNIX 0x02000000 /* Data General Unix */
+#define OS_SINIX_N 0x04000000 /* SNI SINIX-N */
+#define OS_PLAN9 0x08000000 /* ATT Plan 9 */
+#define OS_TSX 0x10000000 /* SNH TSX-32 */
+
+#define OS_OTHER 0x80000000 /* Other */
+
+/* Capabilities - sigWORD dsCapabilities; FLAG BITS */
+/* ------------------------------------------------------------------ */
+#define CAP_RAID0 0x0001 /* RAID-0 */
+#define CAP_RAID1 0x0002 /* RAID-1 */
+#define CAP_RAID3 0x0004 /* RAID-3 */
+#define CAP_RAID5 0x0008 /* RAID-5 */
+#define CAP_SPAN 0x0010 /* Spanning */
+#define CAP_PASS 0x0020 /* Provides passthrough */
+#define CAP_OVERLAP 0x0040 /* Passthrough supports overlapped commands */
+#define CAP_ASPI 0x0080 /* Supports ASPI Command Requests */
+#define CAP_ABOVE16MB 0x0100 /* ISA Driver supports greater than 16MB */
+#define CAP_EXTEND 0x8000 /* Extended info appears after description */
+#ifdef SNI_MIPS
+#define CAP_CACHEMODE 0x1000 /* dpt_force_cache is set in driver */
+#endif
+
+/* Devices Supported - sigWORD dsDeviceSupp; FLAG BITS */
+/* ------------------------------------------------------------------ */
+#define DEV_DASD 0x0001 /* DASD (hard drives) */
+#define DEV_TAPE 0x0002 /* Tape drives */
+#define DEV_PRINTER 0x0004 /* Printers */
+#define DEV_PROC 0x0008 /* Processors */
+#define DEV_WORM 0x0010 /* WORM drives */
+#define DEV_CDROM 0x0020 /* CD-ROM drives */
+#define DEV_SCANNER 0x0040 /* Scanners */
+#define DEV_OPTICAL 0x0080 /* Optical Drives */
+#define DEV_JUKEBOX 0x0100 /* Jukebox */
+#define DEV_COMM 0x0200 /* Communications Devices */
+#define DEV_OTHER 0x0400 /* Other Devices */
+#define DEV_ALL 0xFFFF /* All SCSI Devices */
+
+/* Adapters Families Supported - sigWORD dsAdapterSupp; FLAG BITS */
+/* ------------------------------------------------------------------ */
+#define ADF_2001 0x0001 /* PM2001 */
+#define ADF_2012A 0x0002 /* PM2012A */
+#define ADF_PLUS_ISA 0x0004 /* PM2011,PM2021 */
+#define ADF_PLUS_EISA 0x0008 /* PM2012B,PM2022 */
+#define ADF_SC3_ISA 0x0010 /* PM2021 */
+#define ADF_SC3_EISA 0x0020 /* PM2022,PM2122, etc */
+#define ADF_SC3_PCI 0x0040 /* SmartCache III PCI */
+#define ADF_SC4_ISA 0x0080 /* SmartCache IV ISA */
+#define ADF_SC4_EISA 0x0100 /* SmartCache IV EISA */
+#define ADF_SC4_PCI 0x0200 /* SmartCache IV PCI */
+#define ADF_SC5_PCI 0x0400 /* Fifth Generation I2O products */
+/*
+ * Combinations of products
+ */
+#define ADF_ALL_2000 (ADF_2001|ADF_2012A)
+#define ADF_ALL_PLUS (ADF_PLUS_ISA|ADF_PLUS_EISA)
+#define ADF_ALL_SC3 (ADF_SC3_ISA|ADF_SC3_EISA|ADF_SC3_PCI)
+#define ADF_ALL_SC4 (ADF_SC4_ISA|ADF_SC4_EISA|ADF_SC4_PCI)
+#define ADF_ALL_SC5 (ADF_SC5_PCI)
+/* All EATA Cacheing Products */
+#define ADF_ALL_CACHE (ADF_ALL_PLUS|ADF_ALL_SC3|ADF_ALL_SC4)
+/* All EATA Bus Mastering Products */
+#define ADF_ALL_MASTER (ADF_2012A|ADF_ALL_CACHE)
+/* All EATA Adapter Products */
+#define ADF_ALL_EATA (ADF_2001|ADF_ALL_MASTER)
+#define ADF_ALL ADF_ALL_EATA
+
+/* Application - sigWORD dsApplication; FLAG BITS */
+/* ------------------------------------------------------------------ */
+#define APP_DPTMGR 0x0001 /* DPT Storage Manager */
+#define APP_ENGINE 0x0002 /* DPT Engine */
+#define APP_SYTOS 0x0004 /* Sytron Sytos Plus */
+#define APP_CHEYENNE 0x0008 /* Cheyenne ARCServe + ARCSolo */
+#define APP_MSCDEX 0x0010 /* Microsoft CD-ROM extensions */
+#define APP_NOVABACK 0x0020 /* NovaStor Novaback */
+#define APP_AIM 0x0040 /* Archive Information Manager */
+
+/* Requirements - sigBYTE dsRequirements; FLAG BITS */
+/* ------------------------------------------------------------------ */
+#define REQ_SMARTROM 0x01 /* Requires SmartROM to be present */
+#define REQ_DPTDDL 0x02 /* Requires DPTDDL.SYS to be loaded */
+#define REQ_HBA_DRIVER 0x04 /* Requires an HBA driver to be loaded */
+#define REQ_ASPI_TRAN 0x08 /* Requires an ASPI Transport Modules */
+#define REQ_ENGINE 0x10 /* Requires a DPT Engine to be loaded */
+#define REQ_COMM_ENG 0x20 /* Requires a DPT Communications Engine */
+
+/*
+ * You may adjust dsDescription_size with an override to a value less than
+ * 50 so that the structure allocates less real space.
+ */
+#if (!defined(dsDescription_size))
+# define dsDescription_size 50
+#endif
+
+typedef struct dpt_sig {
+ char dsSignature[6]; /* ALWAYS "dPtSiG" */
+ sigBYTE dsSigVersion; /* signature version (currently 1) */
+ sigBYTE dsProcessorFamily; /* what type of processor */
+ sigBYTE dsProcessor; /* precise processor */
+ sigBYTE dsFiletype; /* type of file */
+ sigBYTE dsFiletypeFlags; /* flags to specify load type, etc. */
+ sigBYTE dsOEM; /* OEM file was created for */
+ sigINT dsOS; /* which Operating systems */
+ sigWORD dsCapabilities; /* RAID levels, etc. */
+ sigWORD dsDeviceSupp; /* Types of SCSI devices supported */
+ sigWORD dsAdapterSupp; /* DPT adapter families supported */
+ sigWORD dsApplication; /* applications file is for */
+ sigBYTE dsRequirements; /* Other driver dependencies */
+ sigBYTE dsVersion; /* 1 */
+ sigBYTE dsRevision; /* 'J' */
+ sigBYTE dsSubRevision; /* '9' ' ' if N/A */
+ sigBYTE dsMonth; /* creation month */
+ sigBYTE dsDay; /* creation day */
+ sigBYTE dsYear; /* creation year since 1980 (1993=13) */
+ /* description (NULL terminated) */
+ char dsDescription[dsDescription_size];
+} dpt_sig_S;
+/* 32 bytes minimum - with no description. Put NULL at description[0] */
+/* 81 bytes maximum - with 49 character description plus NULL. */
+
+/* This line added at Roycroft's request */
+/* Microsoft's NT compiler gets confused if you do a pack and don't */
+/* restore it. */
+
+#ifndef NO_UNPACK
+#if defined (_DPT_AIX)
+#pragma options align=reset
+#elif defined (UNPACK_FOUR)
+#pragma pack(4)
+#else
+#pragma pack()
+#endif /* aix */
+#endif
+/* For the Macintosh */
+#ifdef STRUCTALIGNMENTSUPPORTED
+#pragma options align=reset
+#endif
+
+#endif
diff --git a/drivers/scsi/dpt/osd_defs.h b/drivers/scsi/dpt/osd_defs.h
new file mode 100644
index 000000000..de3ae5722
--- /dev/null
+++ b/drivers/scsi/dpt/osd_defs.h
@@ -0,0 +1,79 @@
+/* BSDI osd_defs.h,v 1.4 1998/06/03 19:14:58 karels Exp */
+/*
+ * Copyright (c) 1996-1999 Distributed Processing Technology Corporation
+ * All rights reserved.
+ *
+ * Redistribution and use in source form, with or without modification, are
+ * permitted provided that redistributions of source code must retain the
+ * above copyright notice, this list of conditions and the following disclaimer.
+ *
+ * This software is provided `as is' by Distributed Processing Technology and
+ * any express or implied warranties, including, but not limited to, the
+ * implied warranties of merchantability and fitness for a particular purpose,
+ * are disclaimed. In no event shall Distributed Processing Technology be
+ * liable for any direct, indirect, incidental, special, exemplary or
+ * consequential damages (including, but not limited to, procurement of
+ * substitute goods or services; loss of use, data, or profits; or business
+ * interruptions) however caused and on any theory of liability, whether in
+ * contract, strict liability, or tort (including negligence or otherwise)
+ * arising in any way out of the use of this driver software, even if advised
+ * of the possibility of such damage.
+ *
+ */
+
+#ifndef _OSD_DEFS_H
+#define _OSD_DEFS_H
+
+/*File - OSD_DEFS.H
+ ****************************************************************************
+ *
+ *Description:
+ *
+ * This file contains the OS dependent defines. This file is included
+ *in osd_util.h and provides the OS specific defines for that file.
+ *
+ *Copyright Distributed Processing Technology, Corp.
+ * 140 Candace Dr.
+ * Maitland, Fl. 32751 USA
+ * Phone: (407) 830-5522 Fax: (407) 260-5366
+ * All Rights Reserved
+ *
+ *Author: Doug Anderson
+ *Date: 1/31/94
+ *
+ *Editors:
+ *
+ *Remarks:
+ *
+ *
+ *****************************************************************************/
+
+
+/*Definitions - Defines & Constants ----------------------------------------- */
+
+ /* Define the operating system */
+#if (defined(__linux__))
+# define _DPT_LINUX
+#elif (defined(__bsdi__))
+# define _DPT_BSDI
+#elif (defined(__FreeBSD__))
+# define _DPT_FREE_BSD
+#else
+# define _DPT_SCO
+#endif
+
+#if defined (ZIL_CURSES)
+#define _DPT_CURSES
+#else
+#define _DPT_MOTIF
+#endif
+
+ /* Redefine 'far' to nothing - no far pointer type required in UNIX */
+#define far
+
+ /* Define the mutually exclusive semaphore type */
+#define SEMAPHORE_T unsigned int *
+ /* Define a handle to a DLL */
+#define DLL_HANDLE_T unsigned int *
+
+#endif
diff --git a/drivers/scsi/dpt/osd_util.h b/drivers/scsi/dpt/osd_util.h
new file mode 100644
index 000000000..b2613c2ea
--- /dev/null
+++ b/drivers/scsi/dpt/osd_util.h
@@ -0,0 +1,358 @@
+/* BSDI osd_util.h,v 1.8 1998/06/03 19:14:58 karels Exp */
+
+/*
+ * Copyright (c) 1996-1999 Distributed Processing Technology Corporation
+ * All rights reserved.
+ *
+ * Redistribution and use in source form, with or without modification, are
+ * permitted provided that redistributions of source code must retain the
+ * above copyright notice, this list of conditions and the following disclaimer.
+ *
+ * This software is provided `as is' by Distributed Processing Technology and
+ * any express or implied warranties, including, but not limited to, the
+ * implied warranties of merchantability and fitness for a particular purpose,
+ * are disclaimed. In no event shall Distributed Processing Technology be
+ * liable for any direct, indirect, incidental, special, exemplary or
+ * consequential damages (including, but not limited to, procurement of
+ * substitute goods or services; loss of use, data, or profits; or business
+ * interruptions) however caused and on any theory of liability, whether in
+ * contract, strict liability, or tort (including negligence or otherwise)
+ * arising in any way out of the use of this driver software, even if advised
+ * of the possibility of such damage.
+ *
+ */
+
+#ifndef __OSD_UTIL_H
+#define __OSD_UTIL_H
+
+/*File - OSD_UTIL.H
+ ****************************************************************************
+ *
+ *Description:
+ *
+ * This file contains defines and function prototypes that are
+ *operating system dependent. The resources defined in this file
+ *are not specific to any particular application.
+ *
+ *Copyright Distributed Processing Technology, Corp.
+ * 140 Candace Dr.
+ * Maitland, Fl. 32751 USA
+ * Phone: (407) 830-5522 Fax: (407) 260-5366
+ * All Rights Reserved
+ *
+ *Author: Doug Anderson
+ *Date: 1/7/94
+ *
+ *Editors:
+ *
+ *Remarks:
+ *
+ *
+ *****************************************************************************/
+
+
+/*Definitions - Defines & Constants ----------------------------------------- */
+
+/*----------------------------- */
+/* Operating system selections: */
+/*----------------------------- */
+
+/*#define _DPT_MSDOS */
+/*#define _DPT_WIN_3X */
+/*#define _DPT_WIN_4X */
+/*#define _DPT_WIN_NT */
+/*#define _DPT_NETWARE */
+/*#define _DPT_OS2 */
+/*#define _DPT_SCO */
+/*#define _DPT_UNIXWARE */
+/*#define _DPT_SOLARIS */
+/*#define _DPT_NEXTSTEP */
+/*#define _DPT_BANYAN */
+
+/*-------------------------------- */
+/* Include the OS specific defines */
+/*-------------------------------- */
+
+/*#define OS_SELECTION From Above List */
+/*#define SEMAPHORE_T ??? */
+/*#define DLL_HANDLE_T ??? */
+
+#if (defined(KERNEL) && (defined(__FreeBSD__) || defined(__bsdi__)))
+# include "i386/isa/dpt_osd_defs.h"
+#else
+# include "osd_defs.h"
+#endif
+
+#ifndef DPT_UNALIGNED
+ #define DPT_UNALIGNED
+#endif
+
+#ifndef DPT_EXPORT
+ #define DPT_EXPORT
+#endif
+
+#ifndef DPT_IMPORT
+ #define DPT_IMPORT
+#endif
+
+#ifndef DPT_RUNTIME_IMPORT
+ #define DPT_RUNTIME_IMPORT DPT_IMPORT
+#endif
+
+/*--------------------- */
+/* OS dependent defines */
+/*--------------------- */
+
+#if defined (_DPT_MSDOS) || defined (_DPT_WIN_3X)
+ #define _DPT_16_BIT
+#else
+ #define _DPT_32_BIT
+#endif
+
+#if defined (_DPT_SCO) || defined (_DPT_UNIXWARE) || defined (_DPT_SOLARIS) || defined (_DPT_AIX) || defined (SNI_MIPS) || defined (_DPT_BSDI) || defined (_DPT_FREE_BSD) || defined(_DPT_LINUX)
+ #define _DPT_UNIX
+#endif
+
+#if defined (_DPT_WIN_3x) || defined (_DPT_WIN_4X) || defined (_DPT_WIN_NT) \
+ || defined (_DPT_OS2)
+ #define _DPT_DLL_SUPPORT
+#endif
+
+#if !defined (_DPT_MSDOS) && !defined (_DPT_WIN_3X) && !defined (_DPT_NETWARE)
+ #define _DPT_PREEMPTIVE
+#endif
+
+#if !defined (_DPT_MSDOS) && !defined (_DPT_WIN_3X)
+ #define _DPT_MULTI_THREADED
+#endif
+
+#if !defined (_DPT_MSDOS)
+ #define _DPT_MULTI_TASKING
+#endif
+
+ /* These exist for platforms that */
+ /* chunk when accessing mis-aligned */
+ /* data */
+#if defined (SNI_MIPS) || defined (_DPT_SOLARIS)
+ #if defined (_DPT_BIG_ENDIAN)
+ #if !defined (_DPT_STRICT_ALIGN)
+ #define _DPT_STRICT_ALIGN
+ #endif
+ #endif
+#endif
+
+ /* Determine if in C or C++ mode */
+#ifdef __cplusplus
+ #define _DPT_CPP
+#else
+ #define _DPT_C
+#endif
+
+/*-------------------------------------------------------------------*/
+/* Under Solaris the compiler refuses to accept code like: */
+/* { {"DPT"}, 0, NULL .... }, */
+/* and complains about the {"DPT"} part by saying "cannot use { } */
+/* to initialize char*". */
+/* */
+/* By defining these ugly macros we can get around this and also */
+/* not have to copy and #ifdef large sections of code. I know that */
+/* these macros are *really* ugly, but they should help reduce */
+/* maintenance in the long run. */
+/* */
+/*-------------------------------------------------------------------*/
+#if !defined (DPTSQO)
+ #if defined (_DPT_SOLARIS)
+ #define DPTSQO
+ #define DPTSQC
+ #else
+ #define DPTSQO {
+ #define DPTSQC }
+ #endif /* solaris */
+#endif /* DPTSQO */
+
+
+/*---------------------- */
+/* OS dependent typedefs */
+/*---------------------- */
+
+#if defined (_DPT_MSDOS) || defined (_DPT_SCO)
+ #define BYTE unsigned char
+ #define WORD unsigned short
+#endif
+
+#ifndef _DPT_TYPEDEFS
+ #define _DPT_TYPEDEFS
+ typedef unsigned char uCHAR;
+ typedef unsigned short uSHORT;
+ typedef unsigned int uINT;
+ typedef unsigned long uLONG;
+
+ typedef union {
+ uCHAR u8[4];
+ uSHORT u16[2];
+ uLONG u32;
+ } access_U;
+#endif
+
+#if !defined (NULL)
+ #define NULL 0
+#endif
+
+
+/*Prototypes - function ----------------------------------------------------- */
+
+#ifdef __cplusplus
+ extern "C" { /* Declare all these functions as "C" functions */
+#endif
+
+/*------------------------ */
+/* Byte reversal functions */
+/*------------------------ */
+
+ /* Reverses the byte ordering of a 2 byte variable */
+#if (!defined(osdSwap2))
+ uSHORT osdSwap2(DPT_UNALIGNED uSHORT *);
+#endif // !osdSwap2
+
+ /* Reverses the byte ordering of a 4 byte variable and shifts left 8 bits */
+#if (!defined(osdSwap3))
+ uLONG osdSwap3(DPT_UNALIGNED uLONG *);
+#endif // !osdSwap3
+
+
+#ifdef _DPT_NETWARE
+ #include "novpass.h" /* For DPT_Bswapl() prototype */
+ /* Inline the byte swap */
+ #ifdef __cplusplus
+ inline uLONG osdSwap4(uLONG *inLong) {
+ return *inLong = DPT_Bswapl(*inLong);
+ }
+ #else
+ #define osdSwap4(inLong) DPT_Bswapl(inLong)
+ #endif // cplusplus
+#else
+ /* Reverses the byte ordering of a 4 byte variable */
+# if (!defined(osdSwap4))
+ uLONG osdSwap4(DPT_UNALIGNED uLONG *);
+# endif // !osdSwap4
+
+ /* The following functions ALWAYS swap regardless of the *
+ * presence of DPT_BIG_ENDIAN */
+
+ uSHORT trueSwap2(DPT_UNALIGNED uSHORT *);
+ uLONG trueSwap4(DPT_UNALIGNED uLONG *);
+
+#endif // netware
+
+
+/*-------------------------------------*
+ * Network order swap functions *
+ * *
+ * These functions/macros will be used *
+ * by the structure insert()/extract() *
+ * functions. *
+ *
+ * We will enclose all structure *
+ * portability modifications inside *
+ * #ifdefs. When we are ready, we *
+ * will #define DPT_PORTABLE to begin *
+ * using the modifications. *
+ *-------------------------------------*/
+uLONG netSwap4(uLONG val);
+
+#if defined (_DPT_BIG_ENDIAN)
+
+// for big-endian we need to swap
+
+#ifndef NET_SWAP_2
+#define NET_SWAP_2(x) (((x) >> 8) | ((x) << 8))
+#endif // NET_SWAP_2
+
+#ifndef NET_SWAP_4
+#define NET_SWAP_4(x) netSwap4((x))
+#endif // NET_SWAP_4
+
+#else
+
+// for little-endian we don't need to do anything
+
+#ifndef NET_SWAP_2
+#define NET_SWAP_2(x) (x)
+#endif // NET_SWAP_2
+
+#ifndef NET_SWAP_4
+#define NET_SWAP_4(x) (x)
+#endif // NET_SWAP_4
+
+#endif // big endian
+
+
+
+/*----------------------------------- */
+/* Run-time loadable module functions */
+/*----------------------------------- */
+
+ /* Loads the specified run-time loadable DLL */
+DLL_HANDLE_T osdLoadModule(uCHAR *);
+ /* Unloads the specified run-time loadable DLL */
+uSHORT osdUnloadModule(DLL_HANDLE_T);
+ /* Returns a pointer to a function inside a run-time loadable DLL */
+void * osdGetFnAddr(DLL_HANDLE_T,uCHAR *);
+
+/*--------------------------------------- */
+/* Mutually exclusive semaphore functions */
+/*--------------------------------------- */
+
+ /* Create a named semaphore */
+SEMAPHORE_T osdCreateNamedSemaphore(char *);
+ /* Create a mutually exlusive semaphore */
+SEMAPHORE_T osdCreateSemaphore(void);
+ /* create an event semaphore */
+SEMAPHORE_T osdCreateEventSemaphore(void);
+ /* create a named event semaphore */
+SEMAPHORE_T osdCreateNamedEventSemaphore(char *);
+
+ /* Destroy the specified mutually exclusive semaphore object */
+uSHORT osdDestroySemaphore(SEMAPHORE_T);
+ /* Request access to the specified mutually exclusive semaphore */
+uLONG osdRequestSemaphore(SEMAPHORE_T,uLONG);
+ /* Release access to the specified mutually exclusive semaphore */
+uSHORT osdReleaseSemaphore(SEMAPHORE_T);
+ /* wait for a event to happen */
+uLONG osdWaitForEventSemaphore(SEMAPHORE_T, uLONG);
+ /* signal an event */
+uLONG osdSignalEventSemaphore(SEMAPHORE_T);
+ /* reset the event */
+uLONG osdResetEventSemaphore(SEMAPHORE_T);
+
+/*----------------- */
+/* Thread functions */
+/*----------------- */
+
+ /* Releases control to the task switcher in non-preemptive */
+ /* multitasking operating systems. */
+void osdSwitchThreads(void);
+
+ /* Starts a thread function */
+uLONG osdStartThread(void *,void *);
+
+/* what is my thread id */
+uLONG osdGetThreadID(void);
+
+/* wakes up the specifed thread */
+void osdWakeThread(uLONG);
+
+/* osd sleep for x milliseconds */
+void osdSleep(uLONG);
+
+#define DPT_THREAD_PRIORITY_LOWEST 0x00
+#define DPT_THREAD_PRIORITY_NORMAL 0x01
+#define DPT_THREAD_PRIORITY_HIGHEST 0x02
+
+uCHAR osdSetThreadPriority(uLONG tid, uCHAR priority);
+
+#ifdef __cplusplus
+ } /* end the xtern "C" declaration */
+#endif
+
+#endif /* osd_util_h */
diff --git a/drivers/scsi/dpt/sys_info.h b/drivers/scsi/dpt/sys_info.h
new file mode 100644
index 000000000..a4aa1c31f
--- /dev/null
+++ b/drivers/scsi/dpt/sys_info.h
@@ -0,0 +1,417 @@
+/* BSDI sys_info.h,v 1.6 1998/06/03 19:14:59 karels Exp */
+
+/*
+ * Copyright (c) 1996-1999 Distributed Processing Technology Corporation
+ * All rights reserved.
+ *
+ * Redistribution and use in source form, with or without modification, are
+ * permitted provided that redistributions of source code must retain the
+ * above copyright notice, this list of conditions and the following disclaimer.
+ *
+ * This software is provided `as is' by Distributed Processing Technology and
+ * any express or implied warranties, including, but not limited to, the
+ * implied warranties of merchantability and fitness for a particular purpose,
+ * are disclaimed. In no event shall Distributed Processing Technology be
+ * liable for any direct, indirect, incidental, special, exemplary or
+ * consequential damages (including, but not limited to, procurement of
+ * substitute goods or services; loss of use, data, or profits; or business
+ * interruptions) however caused and on any theory of liability, whether in
+ * contract, strict liability, or tort (including negligence or otherwise)
+ * arising in any way out of the use of this driver software, even if advised
+ * of the possibility of such damage.
+ *
+ */
+
+#ifndef __SYS_INFO_H
+#define __SYS_INFO_H
+
+/*File - SYS_INFO.H
+ ****************************************************************************
+ *
+ *Description:
+ *
+ * This file contains structure definitions for the OS dependent
+ *layer system information buffers.
+ *
+ *Copyright Distributed Processing Technology, Corp.
+ * 140 Candace Dr.
+ * Maitland, Fl. 32751 USA
+ * Phone: (407) 830-5522 Fax: (407) 260-5366
+ * All Rights Reserved
+ *
+ *Author: Don Kemper
+ *Date: 5/10/94
+ *
+ *Editors:
+ *
+ *Remarks:
+ *
+ *
+ *****************************************************************************/
+
+
+/*Include Files ------------------------------------------------------------- */
+
+#include "osd_util.h"
+
+#ifndef NO_PACK
+#if defined (_DPT_AIX)
+#pragma options align=packed
+#else
+#pragma pack(1)
+#endif /* aix */
+#endif // no unpack
+
+
+/*struct - driveParam_S - start
+ *===========================================================================
+ *
+ *Description:
+ *
+ * This structure defines the drive parameters seen during
+ *booting.
+ *
+ *---------------------------------------------------------------------------*/
+
+#ifdef __cplusplus
+ struct driveParam_S {
+#else
+ typedef struct {
+#endif
+
+ uSHORT cylinders; /* Up to 1024 */
+ uCHAR heads; /* Up to 255 */
+ uCHAR sectors; /* Up to 63 */
+
+#ifdef __cplusplus
+
+//---------- Portability Additions ----------- in sp_sinfo.cpp
+#ifdef DPT_PORTABLE
+ uSHORT netInsert(dptBuffer_S *buffer);
+ uSHORT netExtract(dptBuffer_S *buffer);
+#endif // DPT PORTABLE
+//--------------------------------------------
+
+ };
+#else
+ } driveParam_S;
+#endif
+/*driveParam_S - end */
+
+
+/*struct - sysInfo_S - start
+ *===========================================================================
+ *
+ *Description:
+ *
+ * This structure defines the command system information that
+ *should be returned by every OS dependent layer.
+ *
+ *---------------------------------------------------------------------------*/
+
+/*flags - bit definitions */
+#define SI_CMOS_Valid 0x0001
+#define SI_NumDrivesValid 0x0002
+#define SI_ProcessorValid 0x0004
+#define SI_MemorySizeValid 0x0008
+#define SI_DriveParamsValid 0x0010
+#define SI_SmartROMverValid 0x0020
+#define SI_OSversionValid 0x0040
+#define SI_OSspecificValid 0x0080 /* 1 if OS structure returned */
+#define SI_BusTypeValid 0x0100
+
+#define SI_ALL_VALID 0x0FFF /* All Std SysInfo is valid */
+#define SI_NO_SmartROM 0x8000
+
+/*busType - definitions */
+#define SI_ISA_BUS 0x00
+#define SI_MCA_BUS 0x01
+#define SI_EISA_BUS 0x02
+#define SI_PCI_BUS 0x04
+
+#ifdef __cplusplus
+ struct sysInfo_S {
+#else
+ typedef struct {
+#endif
+
+ uCHAR drive0CMOS; /* CMOS Drive 0 Type */
+ uCHAR drive1CMOS; /* CMOS Drive 1 Type */
+ uCHAR numDrives; /* 0040:0075 contents */
+ uCHAR processorFamily; /* Same as DPTSIG's definition */
+ uCHAR processorType; /* Same as DPTSIG's definition */
+ uCHAR smartROMMajorVersion;
+ uCHAR smartROMMinorVersion; /* SmartROM version */
+ uCHAR smartROMRevision;
+ uSHORT flags; /* See bit definitions above */
+ uSHORT conventionalMemSize; /* in KB */
+ uINT extendedMemSize; /* in KB */
+ uINT osType; /* Same as DPTSIG's definition */
+ uCHAR osMajorVersion;
+ uCHAR osMinorVersion; /* The OS version */
+ uCHAR osRevision;
+#ifdef _SINIX_ADDON
+ uCHAR busType; /* See defininitions above */
+ uSHORT osSubRevision;
+ uCHAR pad[2]; /* For alignment */
+#else
+ uCHAR osSubRevision;
+ uCHAR busType; /* See defininitions above */
+ uCHAR pad[3]; /* For alignment */
+#endif
+ driveParam_S drives[16]; /* SmartROM Logical Drives */
+
+#ifdef __cplusplus
+
+//---------- Portability Additions ----------- in sp_sinfo.cpp
+#ifdef DPT_PORTABLE
+ uSHORT netInsert(dptBuffer_S *buffer);
+ uSHORT netExtract(dptBuffer_S *buffer);
+#endif // DPT PORTABLE
+//--------------------------------------------
+
+ };
+#else
+ } sysInfo_S;
+#endif
+/*sysInfo_S - end */
+
+
+/*struct - DOS_Info_S - start
+ *===========================================================================
+ *
+ *Description:
+ *
+ * This structure defines the system information specific to a
+ *DOS workstation.
+ *
+ *---------------------------------------------------------------------------*/
+
+/*flags - bit definitions */
+#define DI_DOS_HIGH 0x01 /* DOS is loaded high */
+#define DI_DPMI_VALID 0x02 /* DPMI version is valid */
+
+#ifdef __cplusplus
+ struct DOS_Info_S {
+#else
+ typedef struct {
+#endif
+
+ uCHAR flags; /* See bit definitions above */
+ uSHORT driverLocation; /* SmartROM BIOS address */
+ uSHORT DOS_version;
+ uSHORT DPMI_version;
+
+#ifdef __cplusplus
+
+//---------- Portability Additions ----------- in sp_sinfo.cpp
+#ifdef DPT_PORTABLE
+ uSHORT netInsert(dptBuffer_S *buffer);
+ uSHORT netExtract(dptBuffer_S *buffer);
+#endif // DPT PORTABLE
+//--------------------------------------------
+
+ };
+#else
+ } DOS_Info_S;
+#endif
+/*DOS_Info_S - end */
+
+
+/*struct - Netware_Info_S - start
+ *===========================================================================
+ *
+ *Description:
+ *
+ * This structure defines the system information specific to a
+ *Netware machine.
+ *
+ *---------------------------------------------------------------------------*/
+
+#ifdef __cplusplus
+ struct Netware_Info_S {
+#else
+ typedef struct {
+#endif
+
+ uCHAR driverName[13]; /* ie PM12NW31.DSK */
+ uCHAR serverName[48];
+ uCHAR netwareVersion; /* The Netware OS version */
+ uCHAR netwareSubVersion;
+ uCHAR netwareRevision;
+ uSHORT maxConnections; /* Probably 250 or 1000 */
+ uSHORT connectionsInUse;
+ uSHORT maxVolumes;
+ uCHAR unused;
+ uCHAR SFTlevel;
+ uCHAR TTSlevel;
+
+ uCHAR clibMajorVersion; /* The CLIB.NLM version */
+ uCHAR clibMinorVersion;
+ uCHAR clibRevision;
+
+#ifdef __cplusplus
+
+//---------- Portability Additions ----------- in sp_sinfo.cpp
+#ifdef DPT_PORTABLE
+ uSHORT netInsert(dptBuffer_S *buffer);
+ uSHORT netExtract(dptBuffer_S *buffer);
+#endif // DPT PORTABLE
+//--------------------------------------------
+
+ };
+#else
+ } Netware_Info_S;
+#endif
+/*Netware_Info_S - end */
+
+
+/*struct - OS2_Info_S - start
+ *===========================================================================
+ *
+ *Description:
+ *
+ * This structure defines the system information specific to an
+ *OS/2 machine.
+ *
+ *---------------------------------------------------------------------------*/
+
+#ifdef __cplusplus
+ struct OS2_Info_S {
+#else
+ typedef struct {
+#endif
+
+ uCHAR something;
+
+#ifdef __cplusplus
+
+//---------- Portability Additions ----------- in sp_sinfo.cpp
+#ifdef DPT_PORTABLE
+ uSHORT netInsert(dptBuffer_S *buffer);
+ uSHORT netExtract(dptBuffer_S *buffer);
+#endif // DPT PORTABLE
+//--------------------------------------------
+
+ };
+#else
+ } OS2_Info_S;
+#endif
+/*OS2_Info_S - end */
+
+
+/*struct - WinNT_Info_S - start
+ *===========================================================================
+ *
+ *Description:
+ *
+ * This structure defines the system information specific to a
+ *Windows NT machine.
+ *
+ *---------------------------------------------------------------------------*/
+
+#ifdef __cplusplus
+ struct WinNT_Info_S {
+#else
+ typedef struct {
+#endif
+
+ uCHAR something;
+
+#ifdef __cplusplus
+
+//---------- Portability Additions ----------- in sp_sinfo.cpp
+#ifdef DPT_PORTABLE
+ uSHORT netInsert(dptBuffer_S *buffer);
+ uSHORT netExtract(dptBuffer_S *buffer);
+#endif // DPT PORTABLE
+//--------------------------------------------
+
+ };
+#else
+ } WinNT_Info_S;
+#endif
+/*WinNT_Info_S - end */
+
+
+/*struct - SCO_Info_S - start
+ *===========================================================================
+ *
+ *Description:
+ *
+ * This structure defines the system information specific to an
+ *SCO UNIX machine.
+ *
+ *---------------------------------------------------------------------------*/
+
+#ifdef __cplusplus
+ struct SCO_Info_S {
+#else
+ typedef struct {
+#endif
+
+ uCHAR something;
+
+#ifdef __cplusplus
+
+//---------- Portability Additions ----------- in sp_sinfo.cpp
+#ifdef DPT_PORTABLE
+ uSHORT netInsert(dptBuffer_S *buffer);
+ uSHORT netExtract(dptBuffer_S *buffer);
+#endif // DPT PORTABLE
+//--------------------------------------------
+
+ };
+#else
+ } SCO_Info_S;
+#endif
+/*SCO_Info_S - end */
+
+
+/*struct - USL_Info_S - start
+ *===========================================================================
+ *
+ *Description:
+ *
+ * This structure defines the system information specific to a
+ *USL UNIX machine.
+ *
+ *---------------------------------------------------------------------------*/
+
+#ifdef __cplusplus
+ struct USL_Info_S {
+#else
+ typedef struct {
+#endif
+
+ uCHAR something;
+
+#ifdef __cplusplus
+
+//---------- Portability Additions ----------- in sp_sinfo.cpp
+#ifdef DPT_PORTABLE
+ uSHORT netInsert(dptBuffer_S *buffer);
+ uSHORT netExtract(dptBuffer_S *buffer);
+#endif // DPT PORTABLE
+//--------------------------------------------
+
+ };
+#else
+ } USL_Info_S;
+#endif
+/*USL_Info_S - end */
+
+
+ /* Restore default structure packing */
+#ifndef NO_UNPACK
+#if defined (_DPT_AIX)
+#pragma options align=reset
+#elif defined (UNPACK_FOUR)
+#pragma pack(4)
+#else
+#pragma pack()
+#endif /* aix */
+#endif // no unpack
+
+#endif // __SYS_INFO_H
+
diff --git a/drivers/scsi/dpt_i2o.c b/drivers/scsi/dpt_i2o.c
new file mode 100644
index 000000000..2806cfbec
--- /dev/null
+++ b/drivers/scsi/dpt_i2o.c
@@ -0,0 +1,3612 @@
+/***************************************************************************
+ dpti.c - description
+ -------------------
+ begin : Thu Sep 7 2000
+ copyright : (C) 2000 by Adaptec
+
+ July 30, 2001 First version being submitted
+ for inclusion in the kernel. V2.4
+
+ See Documentation/scsi/dpti.txt for history, notes, license info
+ and credits
+ ***************************************************************************/
+
+/***************************************************************************
+ * *
+ * This program is free software; you can redistribute it and/or modify *
+ * it under the terms of the GNU General Public License as published by *
+ * the Free Software Foundation; either version 2 of the License, or *
+ * (at your option) any later version. *
+ * *
+ ***************************************************************************/
+/***************************************************************************
+ * Sat Dec 20 2003 Go Taniguchi <go@turbolinux.co.jp>
+ - Support 2.6 kernel and DMA-mapping
+ - ioctl fix for raid tools
+ - use schedule_timeout in long long loop
+ **************************************************************************/
+
+/*#define DEBUG 1 */
+/*#define UARTDELAY 1 */
+
+#include <linux/module.h>
+
+MODULE_AUTHOR("Deanna Bonds, with _lots_ of help from Mark Salyzyn");
+MODULE_DESCRIPTION("Adaptec I2O RAID Driver");
+
+////////////////////////////////////////////////////////////////
+
+#include <linux/ioctl.h> /* For SCSI-Passthrough */
+#include <asm/uaccess.h>
+
+#include <linux/stat.h>
+#include <linux/slab.h> /* for kmalloc() */
+#include <linux/pci.h> /* for PCI support */
+#include <linux/proc_fs.h>
+#include <linux/blkdev.h>
+#include <linux/delay.h> /* for udelay */
+#include <linux/interrupt.h>
+#include <linux/kernel.h> /* for printk */
+#include <linux/sched.h>
+#include <linux/reboot.h>
+#include <linux/spinlock.h>
+#include <linux/dma-mapping.h>
+
+#include <linux/timer.h>
+#include <linux/string.h>
+#include <linux/ioport.h>
+#include <linux/mutex.h>
+
+#include <asm/processor.h> /* for boot_cpu_data */
+#include <asm/pgtable.h>
+#include <asm/io.h> /* for virt_to_bus, etc. */
+
+#include <scsi/scsi.h>
+#include <scsi/scsi_cmnd.h>
+#include <scsi/scsi_device.h>
+#include <scsi/scsi_host.h>
+#include <scsi/scsi_tcq.h>
+
+#include "dpt/dptsig.h"
+#include "dpti.h"
+
+/*============================================================================
+ * Create a binary signature - this is read by dptsig
+ * Needed for our management apps
+ *============================================================================
+ */
+static DEFINE_MUTEX(adpt_mutex);
+static dpt_sig_S DPTI_sig = {
+ {'d', 'P', 't', 'S', 'i', 'G'}, SIG_VERSION,
+#ifdef __i386__
+ PROC_INTEL, PROC_386 | PROC_486 | PROC_PENTIUM | PROC_SEXIUM,
+#elif defined(__ia64__)
+ PROC_INTEL, PROC_IA64,
+#elif defined(__sparc__)
+ PROC_ULTRASPARC, PROC_ULTRASPARC,
+#elif defined(__alpha__)
+ PROC_ALPHA, PROC_ALPHA,
+#else
+ (-1),(-1),
+#endif
+ FT_HBADRVR, 0, OEM_DPT, OS_LINUX, CAP_OVERLAP, DEV_ALL,
+ ADF_ALL_SC5, 0, 0, DPT_VERSION, DPT_REVISION, DPT_SUBREVISION,
+ DPT_MONTH, DPT_DAY, DPT_YEAR, "Adaptec Linux I2O RAID Driver"
+};
+
+
+
+
+/*============================================================================
+ * Globals
+ *============================================================================
+ */
+
+static DEFINE_MUTEX(adpt_configuration_lock);
+
+static struct i2o_sys_tbl *sys_tbl;
+static dma_addr_t sys_tbl_pa;
+static int sys_tbl_ind;
+static int sys_tbl_len;
+
+static adpt_hba* hba_chain = NULL;
+static int hba_count = 0;
+
+static struct class *adpt_sysfs_class;
+
+static long adpt_unlocked_ioctl(struct file *, unsigned int, unsigned long);
+#ifdef CONFIG_COMPAT
+static long compat_adpt_ioctl(struct file *, unsigned int, unsigned long);
+#endif
+
+static const struct file_operations adpt_fops = {
+ .unlocked_ioctl = adpt_unlocked_ioctl,
+ .open = adpt_open,
+ .release = adpt_close,
+#ifdef CONFIG_COMPAT
+ .compat_ioctl = compat_adpt_ioctl,
+#endif
+ .llseek = noop_llseek,
+};
+
+/* Structures and definitions for synchronous message posting.
+ * See adpt_i2o_post_wait() for description
+ * */
+struct adpt_i2o_post_wait_data
+{
+ int status;
+ u32 id;
+ adpt_wait_queue_head_t *wq;
+ struct adpt_i2o_post_wait_data *next;
+};
+
+static struct adpt_i2o_post_wait_data *adpt_post_wait_queue = NULL;
+static u32 adpt_post_wait_id = 0;
+static DEFINE_SPINLOCK(adpt_post_wait_lock);
+
+
+/*============================================================================
+ * Functions
+ *============================================================================
+ */
+
+static inline int dpt_dma64(adpt_hba *pHba)
+{
+ return (sizeof(dma_addr_t) > 4 && (pHba)->dma64);
+}
+
+static inline u32 dma_high(dma_addr_t addr)
+{
+ return upper_32_bits(addr);
+}
+
+static inline u32 dma_low(dma_addr_t addr)
+{
+ return (u32)addr;
+}
+
+static u8 adpt_read_blink_led(adpt_hba* host)
+{
+ if (host->FwDebugBLEDflag_P) {
+ if( readb(host->FwDebugBLEDflag_P) == 0xbc ){
+ return readb(host->FwDebugBLEDvalue_P);
+ }
+ }
+ return 0;
+}
+
+/*============================================================================
+ * Scsi host template interface functions
+ *============================================================================
+ */
+
+static struct pci_device_id dptids[] = {
+ { PCI_DPT_VENDOR_ID, PCI_DPT_DEVICE_ID, PCI_ANY_ID, PCI_ANY_ID,},
+ { PCI_DPT_VENDOR_ID, PCI_DPT_RAPTOR_DEVICE_ID, PCI_ANY_ID, PCI_ANY_ID,},
+ { 0, }
+};
+MODULE_DEVICE_TABLE(pci,dptids);
+
+static int adpt_detect(struct scsi_host_template* sht)
+{
+ struct pci_dev *pDev = NULL;
+ adpt_hba *pHba;
+ adpt_hba *next;
+
+ PINFO("Detecting Adaptec I2O RAID controllers...\n");
+
+ /* search for all Adatpec I2O RAID cards */
+ while ((pDev = pci_get_device( PCI_DPT_VENDOR_ID, PCI_ANY_ID, pDev))) {
+ if(pDev->device == PCI_DPT_DEVICE_ID ||
+ pDev->device == PCI_DPT_RAPTOR_DEVICE_ID){
+ if(adpt_install_hba(sht, pDev) ){
+ PERROR("Could not Init an I2O RAID device\n");
+ PERROR("Will not try to detect others.\n");
+ return hba_count-1;
+ }
+ pci_dev_get(pDev);
+ }
+ }
+
+ /* In INIT state, Activate IOPs */
+ for (pHba = hba_chain; pHba; pHba = next) {
+ next = pHba->next;
+ // Activate does get status , init outbound, and get hrt
+ if (adpt_i2o_activate_hba(pHba) < 0) {
+ adpt_i2o_delete_hba(pHba);
+ }
+ }
+
+
+ /* Active IOPs in HOLD state */
+
+rebuild_sys_tab:
+ if (hba_chain == NULL)
+ return 0;
+
+ /*
+ * If build_sys_table fails, we kill everything and bail
+ * as we can't init the IOPs w/o a system table
+ */
+ if (adpt_i2o_build_sys_table() < 0) {
+ adpt_i2o_sys_shutdown();
+ return 0;
+ }
+
+ PDEBUG("HBA's in HOLD state\n");
+
+ /* If IOP don't get online, we need to rebuild the System table */
+ for (pHba = hba_chain; pHba; pHba = pHba->next) {
+ if (adpt_i2o_online_hba(pHba) < 0) {
+ adpt_i2o_delete_hba(pHba);
+ goto rebuild_sys_tab;
+ }
+ }
+
+ /* Active IOPs now in OPERATIONAL state */
+ PDEBUG("HBA's in OPERATIONAL state\n");
+
+ printk("dpti: If you have a lot of devices this could take a few minutes.\n");
+ for (pHba = hba_chain; pHba; pHba = next) {
+ next = pHba->next;
+ printk(KERN_INFO"%s: Reading the hardware resource table.\n", pHba->name);
+ if (adpt_i2o_lct_get(pHba) < 0){
+ adpt_i2o_delete_hba(pHba);
+ continue;
+ }
+
+ if (adpt_i2o_parse_lct(pHba) < 0){
+ adpt_i2o_delete_hba(pHba);
+ continue;
+ }
+ adpt_inquiry(pHba);
+ }
+
+ adpt_sysfs_class = class_create(THIS_MODULE, "dpt_i2o");
+ if (IS_ERR(adpt_sysfs_class)) {
+ printk(KERN_WARNING"dpti: unable to create dpt_i2o class\n");
+ adpt_sysfs_class = NULL;
+ }
+
+ for (pHba = hba_chain; pHba; pHba = next) {
+ next = pHba->next;
+ if (adpt_scsi_host_alloc(pHba, sht) < 0){
+ adpt_i2o_delete_hba(pHba);
+ continue;
+ }
+ pHba->initialized = TRUE;
+ pHba->state &= ~DPTI_STATE_RESET;
+ if (adpt_sysfs_class) {
+ struct device *dev = device_create(adpt_sysfs_class,
+ NULL, MKDEV(DPTI_I2O_MAJOR, pHba->unit), NULL,
+ "dpti%d", pHba->unit);
+ if (IS_ERR(dev)) {
+ printk(KERN_WARNING"dpti%d: unable to "
+ "create device in dpt_i2o class\n",
+ pHba->unit);
+ }
+ }
+ }
+
+ // Register our control device node
+ // nodes will need to be created in /dev to access this
+ // the nodes can not be created from within the driver
+ if (hba_count && register_chrdev(DPTI_I2O_MAJOR, DPT_DRIVER, &adpt_fops)) {
+ adpt_i2o_sys_shutdown();
+ return 0;
+ }
+ return hba_count;
+}
+
+
+/*
+ * scsi_unregister will be called AFTER we return.
+ */
+static int adpt_release(struct Scsi_Host *host)
+{
+ adpt_hba* pHba = (adpt_hba*) host->hostdata[0];
+// adpt_i2o_quiesce_hba(pHba);
+ adpt_i2o_delete_hba(pHba);
+ scsi_unregister(host);
+ return 0;
+}
+
+
+static void adpt_inquiry(adpt_hba* pHba)
+{
+ u32 msg[17];
+ u32 *mptr;
+ u32 *lenptr;
+ int direction;
+ int scsidir;
+ u32 len;
+ u32 reqlen;
+ u8* buf;
+ dma_addr_t addr;
+ u8 scb[16];
+ s32 rcode;
+
+ memset(msg, 0, sizeof(msg));
+ buf = dma_alloc_coherent(&pHba->pDev->dev, 80, &addr, GFP_KERNEL);
+ if(!buf){
+ printk(KERN_ERR"%s: Could not allocate buffer\n",pHba->name);
+ return;
+ }
+ memset((void*)buf, 0, 36);
+
+ len = 36;
+ direction = 0x00000000;
+ scsidir =0x40000000; // DATA IN (iop<--dev)
+
+ if (dpt_dma64(pHba))
+ reqlen = 17; // SINGLE SGE, 64 bit
+ else
+ reqlen = 14; // SINGLE SGE, 32 bit
+ /* Stick the headers on */
+ msg[0] = reqlen<<16 | SGL_OFFSET_12;
+ msg[1] = (0xff<<24|HOST_TID<<12|ADAPTER_TID);
+ msg[2] = 0;
+ msg[3] = 0;
+ // Adaptec/DPT Private stuff
+ msg[4] = I2O_CMD_SCSI_EXEC|DPT_ORGANIZATION_ID<<16;
+ msg[5] = ADAPTER_TID | 1<<16 /* Interpret*/;
+ /* Direction, disconnect ok | sense data | simple queue , CDBLen */
+ // I2O_SCB_FLAG_ENABLE_DISCONNECT |
+ // I2O_SCB_FLAG_SIMPLE_QUEUE_TAG |
+ // I2O_SCB_FLAG_SENSE_DATA_IN_MESSAGE;
+ msg[6] = scsidir|0x20a00000| 6 /* cmd len*/;
+
+ mptr=msg+7;
+
+ memset(scb, 0, sizeof(scb));
+ // Write SCSI command into the message - always 16 byte block
+ scb[0] = INQUIRY;
+ scb[1] = 0;
+ scb[2] = 0;
+ scb[3] = 0;
+ scb[4] = 36;
+ scb[5] = 0;
+ // Don't care about the rest of scb
+
+ memcpy(mptr, scb, sizeof(scb));
+ mptr+=4;
+ lenptr=mptr++; /* Remember me - fill in when we know */
+
+ /* Now fill in the SGList and command */
+ *lenptr = len;
+ if (dpt_dma64(pHba)) {
+ *mptr++ = (0x7C<<24)+(2<<16)+0x02; /* Enable 64 bit */
+ *mptr++ = 1 << PAGE_SHIFT;
+ *mptr++ = 0xD0000000|direction|len;
+ *mptr++ = dma_low(addr);
+ *mptr++ = dma_high(addr);
+ } else {
+ *mptr++ = 0xD0000000|direction|len;
+ *mptr++ = addr;
+ }
+
+ // Send it on it's way
+ rcode = adpt_i2o_post_wait(pHba, msg, reqlen<<2, 120);
+ if (rcode != 0) {
+ sprintf(pHba->detail, "Adaptec I2O RAID");
+ printk(KERN_INFO "%s: Inquiry Error (%d)\n",pHba->name,rcode);
+ if (rcode != -ETIME && rcode != -EINTR)
+ dma_free_coherent(&pHba->pDev->dev, 80, buf, addr);
+ } else {
+ memset(pHba->detail, 0, sizeof(pHba->detail));
+ memcpy(&(pHba->detail), "Vendor: Adaptec ", 16);
+ memcpy(&(pHba->detail[16]), " Model: ", 8);
+ memcpy(&(pHba->detail[24]), (u8*) &buf[16], 16);
+ memcpy(&(pHba->detail[40]), " FW: ", 4);
+ memcpy(&(pHba->detail[44]), (u8*) &buf[32], 4);
+ pHba->detail[48] = '\0'; /* precautionary */
+ dma_free_coherent(&pHba->pDev->dev, 80, buf, addr);
+ }
+ adpt_i2o_status_get(pHba);
+ return ;
+}
+
+
+static int adpt_slave_configure(struct scsi_device * device)
+{
+ struct Scsi_Host *host = device->host;
+ adpt_hba* pHba;
+
+ pHba = (adpt_hba *) host->hostdata[0];
+
+ if (host->can_queue && device->tagged_supported) {
+ scsi_change_queue_depth(device,
+ host->can_queue - 1);
+ }
+ return 0;
+}
+
+static int adpt_queue_lck(struct scsi_cmnd * cmd, void (*done) (struct scsi_cmnd *))
+{
+ adpt_hba* pHba = NULL;
+ struct adpt_device* pDev = NULL; /* dpt per device information */
+
+ cmd->scsi_done = done;
+ /*
+ * SCSI REQUEST_SENSE commands will be executed automatically by the
+ * Host Adapter for any errors, so they should not be executed
+ * explicitly unless the Sense Data is zero indicating that no error
+ * occurred.
+ */
+
+ if ((cmd->cmnd[0] == REQUEST_SENSE) && (cmd->sense_buffer[0] != 0)) {
+ cmd->result = (DID_OK << 16);
+ cmd->scsi_done(cmd);
+ return 0;
+ }
+
+ pHba = (adpt_hba*)cmd->device->host->hostdata[0];
+ if (!pHba) {
+ return FAILED;
+ }
+
+ rmb();
+ if ((pHba->state) & DPTI_STATE_RESET)
+ return SCSI_MLQUEUE_HOST_BUSY;
+
+ // TODO if the cmd->device if offline then I may need to issue a bus rescan
+ // followed by a get_lct to see if the device is there anymore
+ if((pDev = (struct adpt_device*) (cmd->device->hostdata)) == NULL) {
+ /*
+ * First command request for this device. Set up a pointer
+ * to the device structure. This should be a TEST_UNIT_READY
+ * command from scan_scsis_single.
+ */
+ if ((pDev = adpt_find_device(pHba, (u32)cmd->device->channel, (u32)cmd->device->id, cmd->device->lun)) == NULL) {
+ // TODO: if any luns are at this bus, scsi id then fake a TEST_UNIT_READY and INQUIRY response
+ // with type 7F (for all luns less than the max for this bus,id) so the lun scan will continue.
+ cmd->result = (DID_NO_CONNECT << 16);
+ cmd->scsi_done(cmd);
+ return 0;
+ }
+ cmd->device->hostdata = pDev;
+ }
+ pDev->pScsi_dev = cmd->device;
+
+ /*
+ * If we are being called from when the device is being reset,
+ * delay processing of the command until later.
+ */
+ if (pDev->state & DPTI_DEV_RESET ) {
+ return FAILED;
+ }
+ return adpt_scsi_to_i2o(pHba, cmd, pDev);
+}
+
+static DEF_SCSI_QCMD(adpt_queue)
+
+static int adpt_bios_param(struct scsi_device *sdev, struct block_device *dev,
+ sector_t capacity, int geom[])
+{
+ int heads=-1;
+ int sectors=-1;
+ int cylinders=-1;
+
+ // *** First lets set the default geometry ****
+
+ // If the capacity is less than ox2000
+ if (capacity < 0x2000 ) { // floppy
+ heads = 18;
+ sectors = 2;
+ }
+ // else if between 0x2000 and 0x20000
+ else if (capacity < 0x20000) {
+ heads = 64;
+ sectors = 32;
+ }
+ // else if between 0x20000 and 0x40000
+ else if (capacity < 0x40000) {
+ heads = 65;
+ sectors = 63;
+ }
+ // else if between 0x4000 and 0x80000
+ else if (capacity < 0x80000) {
+ heads = 128;
+ sectors = 63;
+ }
+ // else if greater than 0x80000
+ else {
+ heads = 255;
+ sectors = 63;
+ }
+ cylinders = sector_div(capacity, heads * sectors);
+
+ // Special case if CDROM
+ if(sdev->type == 5) { // CDROM
+ heads = 252;
+ sectors = 63;
+ cylinders = 1111;
+ }
+
+ geom[0] = heads;
+ geom[1] = sectors;
+ geom[2] = cylinders;
+
+ PDEBUG("adpt_bios_param: exit\n");
+ return 0;
+}
+
+
+static const char *adpt_info(struct Scsi_Host *host)
+{
+ adpt_hba* pHba;
+
+ pHba = (adpt_hba *) host->hostdata[0];
+ return (char *) (pHba->detail);
+}
+
+static int adpt_show_info(struct seq_file *m, struct Scsi_Host *host)
+{
+ struct adpt_device* d;
+ int id;
+ int chan;
+ adpt_hba* pHba;
+ int unit;
+
+ // Find HBA (host bus adapter) we are looking for
+ mutex_lock(&adpt_configuration_lock);
+ for (pHba = hba_chain; pHba; pHba = pHba->next) {
+ if (pHba->host == host) {
+ break; /* found adapter */
+ }
+ }
+ mutex_unlock(&adpt_configuration_lock);
+ if (pHba == NULL) {
+ return 0;
+ }
+ host = pHba->host;
+
+ seq_printf(m, "Adaptec I2O RAID Driver Version: %s\n\n", DPT_I2O_VERSION);
+ seq_printf(m, "%s\n", pHba->detail);
+ seq_printf(m, "SCSI Host=scsi%d Control Node=/dev/%s irq=%d\n",
+ pHba->host->host_no, pHba->name, host->irq);
+ seq_printf(m, "\tpost fifo size = %d\n\treply fifo size = %d\n\tsg table size = %d\n\n",
+ host->can_queue, (int) pHba->reply_fifo_size , host->sg_tablesize);
+
+ seq_puts(m, "Devices:\n");
+ for(chan = 0; chan < MAX_CHANNEL; chan++) {
+ for(id = 0; id < MAX_ID; id++) {
+ d = pHba->channel[chan].device[id];
+ while(d) {
+ seq_printf(m,"\t%-24.24s", d->pScsi_dev->vendor);
+ seq_printf(m," Rev: %-8.8s\n", d->pScsi_dev->rev);
+
+ unit = d->pI2o_dev->lct_data.tid;
+ seq_printf(m, "\tTID=%d, (Channel=%d, Target=%d, Lun=%llu) (%s)\n\n",
+ unit, (int)d->scsi_channel, (int)d->scsi_id, d->scsi_lun,
+ scsi_device_online(d->pScsi_dev)? "online":"offline");
+ d = d->next_lun;
+ }
+ }
+ }
+ return 0;
+}
+
+/*
+ * Turn a struct scsi_cmnd * into a unique 32 bit 'context'.
+ */
+static u32 adpt_cmd_to_context(struct scsi_cmnd *cmd)
+{
+ return (u32)cmd->serial_number;
+}
+
+/*
+ * Go from a u32 'context' to a struct scsi_cmnd * .
+ * This could probably be made more efficient.
+ */
+static struct scsi_cmnd *
+ adpt_cmd_from_context(adpt_hba * pHba, u32 context)
+{
+ struct scsi_cmnd * cmd;
+ struct scsi_device * d;
+
+ if (context == 0)
+ return NULL;
+
+ spin_unlock(pHba->host->host_lock);
+ shost_for_each_device(d, pHba->host) {
+ unsigned long flags;
+ spin_lock_irqsave(&d->list_lock, flags);
+ list_for_each_entry(cmd, &d->cmd_list, list) {
+ if (((u32)cmd->serial_number == context)) {
+ spin_unlock_irqrestore(&d->list_lock, flags);
+ scsi_device_put(d);
+ spin_lock(pHba->host->host_lock);
+ return cmd;
+ }
+ }
+ spin_unlock_irqrestore(&d->list_lock, flags);
+ }
+ spin_lock(pHba->host->host_lock);
+
+ return NULL;
+}
+
+/*
+ * Turn a pointer to ioctl reply data into an u32 'context'
+ */
+static u32 adpt_ioctl_to_context(adpt_hba * pHba, void *reply)
+{
+#if BITS_PER_LONG == 32
+ return (u32)(unsigned long)reply;
+#else
+ ulong flags = 0;
+ u32 nr, i;
+
+ spin_lock_irqsave(pHba->host->host_lock, flags);
+ nr = ARRAY_SIZE(pHba->ioctl_reply_context);
+ for (i = 0; i < nr; i++) {
+ if (pHba->ioctl_reply_context[i] == NULL) {
+ pHba->ioctl_reply_context[i] = reply;
+ break;
+ }
+ }
+ spin_unlock_irqrestore(pHba->host->host_lock, flags);
+ if (i >= nr) {
+ kfree (reply);
+ printk(KERN_WARNING"%s: Too many outstanding "
+ "ioctl commands\n", pHba->name);
+ return (u32)-1;
+ }
+
+ return i;
+#endif
+}
+
+/*
+ * Go from an u32 'context' to a pointer to ioctl reply data.
+ */
+static void *adpt_ioctl_from_context(adpt_hba *pHba, u32 context)
+{
+#if BITS_PER_LONG == 32
+ return (void *)(unsigned long)context;
+#else
+ void *p = pHba->ioctl_reply_context[context];
+ pHba->ioctl_reply_context[context] = NULL;
+
+ return p;
+#endif
+}
+
+/*===========================================================================
+ * Error Handling routines
+ *===========================================================================
+ */
+
+static int adpt_abort(struct scsi_cmnd * cmd)
+{
+ adpt_hba* pHba = NULL; /* host bus adapter structure */
+ struct adpt_device* dptdevice; /* dpt per device information */
+ u32 msg[5];
+ int rcode;
+
+ if(cmd->serial_number == 0){
+ return FAILED;
+ }
+ pHba = (adpt_hba*) cmd->device->host->hostdata[0];
+ printk(KERN_INFO"%s: Trying to Abort\n",pHba->name);
+ if ((dptdevice = (void*) (cmd->device->hostdata)) == NULL) {
+ printk(KERN_ERR "%s: Unable to abort: No device in cmnd\n",pHba->name);
+ return FAILED;
+ }
+
+ memset(msg, 0, sizeof(msg));
+ msg[0] = FIVE_WORD_MSG_SIZE|SGL_OFFSET_0;
+ msg[1] = I2O_CMD_SCSI_ABORT<<24|HOST_TID<<12|dptdevice->tid;
+ msg[2] = 0;
+ msg[3]= 0;
+ msg[4] = adpt_cmd_to_context(cmd);
+ if (pHba->host)
+ spin_lock_irq(pHba->host->host_lock);
+ rcode = adpt_i2o_post_wait(pHba, msg, sizeof(msg), FOREVER);
+ if (pHba->host)
+ spin_unlock_irq(pHba->host->host_lock);
+ if (rcode != 0) {
+ if(rcode == -EOPNOTSUPP ){
+ printk(KERN_INFO"%s: Abort cmd not supported\n",pHba->name);
+ return FAILED;
+ }
+ printk(KERN_INFO"%s: Abort failed.\n",pHba->name);
+ return FAILED;
+ }
+ printk(KERN_INFO"%s: Abort complete.\n",pHba->name);
+ return SUCCESS;
+}
+
+
+#define I2O_DEVICE_RESET 0x27
+// This is the same for BLK and SCSI devices
+// NOTE this is wrong in the i2o.h definitions
+// This is not currently supported by our adapter but we issue it anyway
+static int adpt_device_reset(struct scsi_cmnd* cmd)
+{
+ adpt_hba* pHba;
+ u32 msg[4];
+ u32 rcode;
+ int old_state;
+ struct adpt_device* d = cmd->device->hostdata;
+
+ pHba = (void*) cmd->device->host->hostdata[0];
+ printk(KERN_INFO"%s: Trying to reset device\n",pHba->name);
+ if (!d) {
+ printk(KERN_INFO"%s: Reset Device: Device Not found\n",pHba->name);
+ return FAILED;
+ }
+ memset(msg, 0, sizeof(msg));
+ msg[0] = FOUR_WORD_MSG_SIZE|SGL_OFFSET_0;
+ msg[1] = (I2O_DEVICE_RESET<<24|HOST_TID<<12|d->tid);
+ msg[2] = 0;
+ msg[3] = 0;
+
+ if (pHba->host)
+ spin_lock_irq(pHba->host->host_lock);
+ old_state = d->state;
+ d->state |= DPTI_DEV_RESET;
+ rcode = adpt_i2o_post_wait(pHba, msg,sizeof(msg), FOREVER);
+ d->state = old_state;
+ if (pHba->host)
+ spin_unlock_irq(pHba->host->host_lock);
+ if (rcode != 0) {
+ if(rcode == -EOPNOTSUPP ){
+ printk(KERN_INFO"%s: Device reset not supported\n",pHba->name);
+ return FAILED;
+ }
+ printk(KERN_INFO"%s: Device reset failed\n",pHba->name);
+ return FAILED;
+ } else {
+ printk(KERN_INFO"%s: Device reset successful\n",pHba->name);
+ return SUCCESS;
+ }
+}
+
+
+#define I2O_HBA_BUS_RESET 0x87
+// This version of bus reset is called by the eh_error handler
+static int adpt_bus_reset(struct scsi_cmnd* cmd)
+{
+ adpt_hba* pHba;
+ u32 msg[4];
+ u32 rcode;
+
+ pHba = (adpt_hba*)cmd->device->host->hostdata[0];
+ memset(msg, 0, sizeof(msg));
+ printk(KERN_WARNING"%s: Bus reset: SCSI Bus %d: tid: %d\n",pHba->name, cmd->device->channel,pHba->channel[cmd->device->channel].tid );
+ msg[0] = FOUR_WORD_MSG_SIZE|SGL_OFFSET_0;
+ msg[1] = (I2O_HBA_BUS_RESET<<24|HOST_TID<<12|pHba->channel[cmd->device->channel].tid);
+ msg[2] = 0;
+ msg[3] = 0;
+ if (pHba->host)
+ spin_lock_irq(pHba->host->host_lock);
+ rcode = adpt_i2o_post_wait(pHba, msg,sizeof(msg), FOREVER);
+ if (pHba->host)
+ spin_unlock_irq(pHba->host->host_lock);
+ if (rcode != 0) {
+ printk(KERN_WARNING"%s: Bus reset failed.\n",pHba->name);
+ return FAILED;
+ } else {
+ printk(KERN_WARNING"%s: Bus reset success.\n",pHba->name);
+ return SUCCESS;
+ }
+}
+
+// This version of reset is called by the eh_error_handler
+static int __adpt_reset(struct scsi_cmnd* cmd)
+{
+ adpt_hba* pHba;
+ int rcode;
+ pHba = (adpt_hba*)cmd->device->host->hostdata[0];
+ printk(KERN_WARNING"%s: Hba Reset: scsi id %d: tid: %d\n",pHba->name,cmd->device->channel,pHba->channel[cmd->device->channel].tid );
+ rcode = adpt_hba_reset(pHba);
+ if(rcode == 0){
+ printk(KERN_WARNING"%s: HBA reset complete\n",pHba->name);
+ return SUCCESS;
+ } else {
+ printk(KERN_WARNING"%s: HBA reset failed (%x)\n",pHba->name, rcode);
+ return FAILED;
+ }
+}
+
+static int adpt_reset(struct scsi_cmnd* cmd)
+{
+ int rc;
+
+ spin_lock_irq(cmd->device->host->host_lock);
+ rc = __adpt_reset(cmd);
+ spin_unlock_irq(cmd->device->host->host_lock);
+
+ return rc;
+}
+
+// This version of reset is called by the ioctls and indirectly from eh_error_handler via adpt_reset
+static int adpt_hba_reset(adpt_hba* pHba)
+{
+ int rcode;
+
+ pHba->state |= DPTI_STATE_RESET;
+
+ // Activate does get status , init outbound, and get hrt
+ if ((rcode=adpt_i2o_activate_hba(pHba)) < 0) {
+ printk(KERN_ERR "%s: Could not activate\n", pHba->name);
+ adpt_i2o_delete_hba(pHba);
+ return rcode;
+ }
+
+ if ((rcode=adpt_i2o_build_sys_table()) < 0) {
+ adpt_i2o_delete_hba(pHba);
+ return rcode;
+ }
+ PDEBUG("%s: in HOLD state\n",pHba->name);
+
+ if ((rcode=adpt_i2o_online_hba(pHba)) < 0) {
+ adpt_i2o_delete_hba(pHba);
+ return rcode;
+ }
+ PDEBUG("%s: in OPERATIONAL state\n",pHba->name);
+
+ if ((rcode=adpt_i2o_lct_get(pHba)) < 0){
+ adpt_i2o_delete_hba(pHba);
+ return rcode;
+ }
+
+ if ((rcode=adpt_i2o_reparse_lct(pHba)) < 0){
+ adpt_i2o_delete_hba(pHba);
+ return rcode;
+ }
+ pHba->state &= ~DPTI_STATE_RESET;
+
+ adpt_fail_posted_scbs(pHba);
+ return 0; /* return success */
+}
+
+/*===========================================================================
+ *
+ *===========================================================================
+ */
+
+
+static void adpt_i2o_sys_shutdown(void)
+{
+ adpt_hba *pHba, *pNext;
+ struct adpt_i2o_post_wait_data *p1, *old;
+
+ printk(KERN_INFO"Shutting down Adaptec I2O controllers.\n");
+ printk(KERN_INFO" This could take a few minutes if there are many devices attached\n");
+ /* Delete all IOPs from the controller chain */
+ /* They should have already been released by the
+ * scsi-core
+ */
+ for (pHba = hba_chain; pHba; pHba = pNext) {
+ pNext = pHba->next;
+ adpt_i2o_delete_hba(pHba);
+ }
+
+ /* Remove any timedout entries from the wait queue. */
+// spin_lock_irqsave(&adpt_post_wait_lock, flags);
+ /* Nothing should be outstanding at this point so just
+ * free them
+ */
+ for(p1 = adpt_post_wait_queue; p1;) {
+ old = p1;
+ p1 = p1->next;
+ kfree(old);
+ }
+// spin_unlock_irqrestore(&adpt_post_wait_lock, flags);
+ adpt_post_wait_queue = NULL;
+
+ printk(KERN_INFO "Adaptec I2O controllers down.\n");
+}
+
+static int adpt_install_hba(struct scsi_host_template* sht, struct pci_dev* pDev)
+{
+
+ adpt_hba* pHba = NULL;
+ adpt_hba* p = NULL;
+ ulong base_addr0_phys = 0;
+ ulong base_addr1_phys = 0;
+ u32 hba_map0_area_size = 0;
+ u32 hba_map1_area_size = 0;
+ void __iomem *base_addr_virt = NULL;
+ void __iomem *msg_addr_virt = NULL;
+ int dma64 = 0;
+
+ int raptorFlag = FALSE;
+
+ if(pci_enable_device(pDev)) {
+ return -EINVAL;
+ }
+
+ if (pci_request_regions(pDev, "dpt_i2o")) {
+ PERROR("dpti: adpt_config_hba: pci request region failed\n");
+ return -EINVAL;
+ }
+
+ pci_set_master(pDev);
+
+ /*
+ * See if we should enable dma64 mode.
+ */
+ if (sizeof(dma_addr_t) > 4 &&
+ pci_set_dma_mask(pDev, DMA_BIT_MASK(64)) == 0) {
+ if (dma_get_required_mask(&pDev->dev) > DMA_BIT_MASK(32))
+ dma64 = 1;
+ }
+ if (!dma64 && pci_set_dma_mask(pDev, DMA_BIT_MASK(32)) != 0)
+ return -EINVAL;
+
+ /* adapter only supports message blocks below 4GB */
+ pci_set_consistent_dma_mask(pDev, DMA_BIT_MASK(32));
+
+ base_addr0_phys = pci_resource_start(pDev,0);
+ hba_map0_area_size = pci_resource_len(pDev,0);
+
+ // Check if standard PCI card or single BAR Raptor
+ if(pDev->device == PCI_DPT_DEVICE_ID){
+ if(pDev->subsystem_device >=0xc032 && pDev->subsystem_device <= 0xc03b){
+ // Raptor card with this device id needs 4M
+ hba_map0_area_size = 0x400000;
+ } else { // Not Raptor - it is a PCI card
+ if(hba_map0_area_size > 0x100000 ){
+ hba_map0_area_size = 0x100000;
+ }
+ }
+ } else {// Raptor split BAR config
+ // Use BAR1 in this configuration
+ base_addr1_phys = pci_resource_start(pDev,1);
+ hba_map1_area_size = pci_resource_len(pDev,1);
+ raptorFlag = TRUE;
+ }
+
+#if BITS_PER_LONG == 64
+ /*
+ * The original Adaptec 64 bit driver has this comment here:
+ * "x86_64 machines need more optimal mappings"
+ *
+ * I assume some HBAs report ridiculously large mappings
+ * and we need to limit them on platforms with IOMMUs.
+ */
+ if (raptorFlag == TRUE) {
+ if (hba_map0_area_size > 128)
+ hba_map0_area_size = 128;
+ if (hba_map1_area_size > 524288)
+ hba_map1_area_size = 524288;
+ } else {
+ if (hba_map0_area_size > 524288)
+ hba_map0_area_size = 524288;
+ }
+#endif
+
+ base_addr_virt = ioremap(base_addr0_phys,hba_map0_area_size);
+ if (!base_addr_virt) {
+ pci_release_regions(pDev);
+ PERROR("dpti: adpt_config_hba: io remap failed\n");
+ return -EINVAL;
+ }
+
+ if(raptorFlag == TRUE) {
+ msg_addr_virt = ioremap(base_addr1_phys, hba_map1_area_size );
+ if (!msg_addr_virt) {
+ PERROR("dpti: adpt_config_hba: io remap failed on BAR1\n");
+ iounmap(base_addr_virt);
+ pci_release_regions(pDev);
+ return -EINVAL;
+ }
+ } else {
+ msg_addr_virt = base_addr_virt;
+ }
+
+ // Allocate and zero the data structure
+ pHba = kzalloc(sizeof(adpt_hba), GFP_KERNEL);
+ if (!pHba) {
+ if (msg_addr_virt != base_addr_virt)
+ iounmap(msg_addr_virt);
+ iounmap(base_addr_virt);
+ pci_release_regions(pDev);
+ return -ENOMEM;
+ }
+
+ mutex_lock(&adpt_configuration_lock);
+
+ if(hba_chain != NULL){
+ for(p = hba_chain; p->next; p = p->next);
+ p->next = pHba;
+ } else {
+ hba_chain = pHba;
+ }
+ pHba->next = NULL;
+ pHba->unit = hba_count;
+ sprintf(pHba->name, "dpti%d", hba_count);
+ hba_count++;
+
+ mutex_unlock(&adpt_configuration_lock);
+
+ pHba->pDev = pDev;
+ pHba->base_addr_phys = base_addr0_phys;
+
+ // Set up the Virtual Base Address of the I2O Device
+ pHba->base_addr_virt = base_addr_virt;
+ pHba->msg_addr_virt = msg_addr_virt;
+ pHba->irq_mask = base_addr_virt+0x30;
+ pHba->post_port = base_addr_virt+0x40;
+ pHba->reply_port = base_addr_virt+0x44;
+
+ pHba->hrt = NULL;
+ pHba->lct = NULL;
+ pHba->lct_size = 0;
+ pHba->status_block = NULL;
+ pHba->post_count = 0;
+ pHba->state = DPTI_STATE_RESET;
+ pHba->pDev = pDev;
+ pHba->devices = NULL;
+ pHba->dma64 = dma64;
+
+ // Initializing the spinlocks
+ spin_lock_init(&pHba->state_lock);
+ spin_lock_init(&adpt_post_wait_lock);
+
+ if(raptorFlag == 0){
+ printk(KERN_INFO "Adaptec I2O RAID controller"
+ " %d at %p size=%x irq=%d%s\n",
+ hba_count-1, base_addr_virt,
+ hba_map0_area_size, pDev->irq,
+ dma64 ? " (64-bit DMA)" : "");
+ } else {
+ printk(KERN_INFO"Adaptec I2O RAID controller %d irq=%d%s\n",
+ hba_count-1, pDev->irq,
+ dma64 ? " (64-bit DMA)" : "");
+ printk(KERN_INFO" BAR0 %p - size= %x\n",base_addr_virt,hba_map0_area_size);
+ printk(KERN_INFO" BAR1 %p - size= %x\n",msg_addr_virt,hba_map1_area_size);
+ }
+
+ if (request_irq (pDev->irq, adpt_isr, IRQF_SHARED, pHba->name, pHba)) {
+ printk(KERN_ERR"%s: Couldn't register IRQ %d\n", pHba->name, pDev->irq);
+ adpt_i2o_delete_hba(pHba);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+
+static void adpt_i2o_delete_hba(adpt_hba* pHba)
+{
+ adpt_hba* p1;
+ adpt_hba* p2;
+ struct i2o_device* d;
+ struct i2o_device* next;
+ int i;
+ int j;
+ struct adpt_device* pDev;
+ struct adpt_device* pNext;
+
+
+ mutex_lock(&adpt_configuration_lock);
+ // scsi_unregister calls our adpt_release which
+ // does a quiese
+ if(pHba->host){
+ free_irq(pHba->host->irq, pHba);
+ }
+ p2 = NULL;
+ for( p1 = hba_chain; p1; p2 = p1,p1=p1->next){
+ if(p1 == pHba) {
+ if(p2) {
+ p2->next = p1->next;
+ } else {
+ hba_chain = p1->next;
+ }
+ break;
+ }
+ }
+
+ hba_count--;
+ mutex_unlock(&adpt_configuration_lock);
+
+ iounmap(pHba->base_addr_virt);
+ pci_release_regions(pHba->pDev);
+ if(pHba->msg_addr_virt != pHba->base_addr_virt){
+ iounmap(pHba->msg_addr_virt);
+ }
+ if(pHba->FwDebugBuffer_P)
+ iounmap(pHba->FwDebugBuffer_P);
+ if(pHba->hrt) {
+ dma_free_coherent(&pHba->pDev->dev,
+ pHba->hrt->num_entries * pHba->hrt->entry_len << 2,
+ pHba->hrt, pHba->hrt_pa);
+ }
+ if(pHba->lct) {
+ dma_free_coherent(&pHba->pDev->dev, pHba->lct_size,
+ pHba->lct, pHba->lct_pa);
+ }
+ if(pHba->status_block) {
+ dma_free_coherent(&pHba->pDev->dev, sizeof(i2o_status_block),
+ pHba->status_block, pHba->status_block_pa);
+ }
+ if(pHba->reply_pool) {
+ dma_free_coherent(&pHba->pDev->dev,
+ pHba->reply_fifo_size * REPLY_FRAME_SIZE * 4,
+ pHba->reply_pool, pHba->reply_pool_pa);
+ }
+
+ for(d = pHba->devices; d ; d = next){
+ next = d->next;
+ kfree(d);
+ }
+ for(i = 0 ; i < pHba->top_scsi_channel ; i++){
+ for(j = 0; j < MAX_ID; j++){
+ if(pHba->channel[i].device[j] != NULL){
+ for(pDev = pHba->channel[i].device[j]; pDev; pDev = pNext){
+ pNext = pDev->next_lun;
+ kfree(pDev);
+ }
+ }
+ }
+ }
+ pci_dev_put(pHba->pDev);
+ if (adpt_sysfs_class)
+ device_destroy(adpt_sysfs_class,
+ MKDEV(DPTI_I2O_MAJOR, pHba->unit));
+ kfree(pHba);
+
+ if(hba_count <= 0){
+ unregister_chrdev(DPTI_I2O_MAJOR, DPT_DRIVER);
+ if (adpt_sysfs_class) {
+ class_destroy(adpt_sysfs_class);
+ adpt_sysfs_class = NULL;
+ }
+ }
+}
+
+static struct adpt_device* adpt_find_device(adpt_hba* pHba, u32 chan, u32 id, u64 lun)
+{
+ struct adpt_device* d;
+
+ if(chan < 0 || chan >= MAX_CHANNEL)
+ return NULL;
+
+ if( pHba->channel[chan].device == NULL){
+ printk(KERN_DEBUG"Adaptec I2O RAID: Trying to find device before they are allocated\n");
+ return NULL;
+ }
+
+ d = pHba->channel[chan].device[id];
+ if(!d || d->tid == 0) {
+ return NULL;
+ }
+
+ /* If it is the only lun at that address then this should match*/
+ if(d->scsi_lun == lun){
+ return d;
+ }
+
+ /* else we need to look through all the luns */
+ for(d=d->next_lun ; d ; d = d->next_lun){
+ if(d->scsi_lun == lun){
+ return d;
+ }
+ }
+ return NULL;
+}
+
+
+static int adpt_i2o_post_wait(adpt_hba* pHba, u32* msg, int len, int timeout)
+{
+ // I used my own version of the WAIT_QUEUE_HEAD
+ // to handle some version differences
+ // When embedded in the kernel this could go back to the vanilla one
+ ADPT_DECLARE_WAIT_QUEUE_HEAD(adpt_wq_i2o_post);
+ int status = 0;
+ ulong flags = 0;
+ struct adpt_i2o_post_wait_data *p1, *p2;
+ struct adpt_i2o_post_wait_data *wait_data =
+ kmalloc(sizeof(struct adpt_i2o_post_wait_data), GFP_ATOMIC);
+ DECLARE_WAITQUEUE(wait, current);
+
+ if (!wait_data)
+ return -ENOMEM;
+
+ /*
+ * The spin locking is needed to keep anyone from playing
+ * with the queue pointers and id while we do the same
+ */
+ spin_lock_irqsave(&adpt_post_wait_lock, flags);
+ // TODO we need a MORE unique way of getting ids
+ // to support async LCT get
+ wait_data->next = adpt_post_wait_queue;
+ adpt_post_wait_queue = wait_data;
+ adpt_post_wait_id++;
+ adpt_post_wait_id &= 0x7fff;
+ wait_data->id = adpt_post_wait_id;
+ spin_unlock_irqrestore(&adpt_post_wait_lock, flags);
+
+ wait_data->wq = &adpt_wq_i2o_post;
+ wait_data->status = -ETIMEDOUT;
+
+ add_wait_queue(&adpt_wq_i2o_post, &wait);
+
+ msg[2] |= 0x80000000 | ((u32)wait_data->id);
+ timeout *= HZ;
+ if((status = adpt_i2o_post_this(pHba, msg, len)) == 0){
+ set_current_state(TASK_INTERRUPTIBLE);
+ if(pHba->host)
+ spin_unlock_irq(pHba->host->host_lock);
+ if (!timeout)
+ schedule();
+ else{
+ timeout = schedule_timeout(timeout);
+ if (timeout == 0) {
+ // I/O issued, but cannot get result in
+ // specified time. Freeing resorces is
+ // dangerous.
+ status = -ETIME;
+ }
+ }
+ if(pHba->host)
+ spin_lock_irq(pHba->host->host_lock);
+ }
+ remove_wait_queue(&adpt_wq_i2o_post, &wait);
+
+ if(status == -ETIMEDOUT){
+ printk(KERN_INFO"dpti%d: POST WAIT TIMEOUT\n",pHba->unit);
+ // We will have to free the wait_data memory during shutdown
+ return status;
+ }
+
+ /* Remove the entry from the queue. */
+ p2 = NULL;
+ spin_lock_irqsave(&adpt_post_wait_lock, flags);
+ for(p1 = adpt_post_wait_queue; p1; p2 = p1, p1 = p1->next) {
+ if(p1 == wait_data) {
+ if(p1->status == I2O_DETAIL_STATUS_UNSUPPORTED_FUNCTION ) {
+ status = -EOPNOTSUPP;
+ }
+ if(p2) {
+ p2->next = p1->next;
+ } else {
+ adpt_post_wait_queue = p1->next;
+ }
+ break;
+ }
+ }
+ spin_unlock_irqrestore(&adpt_post_wait_lock, flags);
+
+ kfree(wait_data);
+
+ return status;
+}
+
+
+static s32 adpt_i2o_post_this(adpt_hba* pHba, u32* data, int len)
+{
+
+ u32 m = EMPTY_QUEUE;
+ u32 __iomem *msg;
+ ulong timeout = jiffies + 30*HZ;
+ do {
+ rmb();
+ m = readl(pHba->post_port);
+ if (m != EMPTY_QUEUE) {
+ break;
+ }
+ if(time_after(jiffies,timeout)){
+ printk(KERN_WARNING"dpti%d: Timeout waiting for message frame!\n", pHba->unit);
+ return -ETIMEDOUT;
+ }
+ schedule_timeout_uninterruptible(1);
+ } while(m == EMPTY_QUEUE);
+
+ msg = pHba->msg_addr_virt + m;
+ memcpy_toio(msg, data, len);
+ wmb();
+
+ //post message
+ writel(m, pHba->post_port);
+ wmb();
+
+ return 0;
+}
+
+
+static void adpt_i2o_post_wait_complete(u32 context, int status)
+{
+ struct adpt_i2o_post_wait_data *p1 = NULL;
+ /*
+ * We need to search through the adpt_post_wait
+ * queue to see if the given message is still
+ * outstanding. If not, it means that the IOP
+ * took longer to respond to the message than we
+ * had allowed and timer has already expired.
+ * Not much we can do about that except log
+ * it for debug purposes, increase timeout, and recompile
+ *
+ * Lock needed to keep anyone from moving queue pointers
+ * around while we're looking through them.
+ */
+
+ context &= 0x7fff;
+
+ spin_lock(&adpt_post_wait_lock);
+ for(p1 = adpt_post_wait_queue; p1; p1 = p1->next) {
+ if(p1->id == context) {
+ p1->status = status;
+ spin_unlock(&adpt_post_wait_lock);
+ wake_up_interruptible(p1->wq);
+ return;
+ }
+ }
+ spin_unlock(&adpt_post_wait_lock);
+ // If this happens we lose commands that probably really completed
+ printk(KERN_DEBUG"dpti: Could Not find task %d in wait queue\n",context);
+ printk(KERN_DEBUG" Tasks in wait queue:\n");
+ for(p1 = adpt_post_wait_queue; p1; p1 = p1->next) {
+ printk(KERN_DEBUG" %d\n",p1->id);
+ }
+ return;
+}
+
+static s32 adpt_i2o_reset_hba(adpt_hba* pHba)
+{
+ u32 msg[8];
+ u8* status;
+ dma_addr_t addr;
+ u32 m = EMPTY_QUEUE ;
+ ulong timeout = jiffies + (TMOUT_IOPRESET*HZ);
+
+ if(pHba->initialized == FALSE) { // First time reset should be quick
+ timeout = jiffies + (25*HZ);
+ } else {
+ adpt_i2o_quiesce_hba(pHba);
+ }
+
+ do {
+ rmb();
+ m = readl(pHba->post_port);
+ if (m != EMPTY_QUEUE) {
+ break;
+ }
+ if(time_after(jiffies,timeout)){
+ printk(KERN_WARNING"Timeout waiting for message!\n");
+ return -ETIMEDOUT;
+ }
+ schedule_timeout_uninterruptible(1);
+ } while (m == EMPTY_QUEUE);
+
+ status = dma_alloc_coherent(&pHba->pDev->dev, 4, &addr, GFP_KERNEL);
+ if(status == NULL) {
+ adpt_send_nop(pHba, m);
+ printk(KERN_ERR"IOP reset failed - no free memory.\n");
+ return -ENOMEM;
+ }
+ memset(status,0,4);
+
+ msg[0]=EIGHT_WORD_MSG_SIZE|SGL_OFFSET_0;
+ msg[1]=I2O_CMD_ADAPTER_RESET<<24|HOST_TID<<12|ADAPTER_TID;
+ msg[2]=0;
+ msg[3]=0;
+ msg[4]=0;
+ msg[5]=0;
+ msg[6]=dma_low(addr);
+ msg[7]=dma_high(addr);
+
+ memcpy_toio(pHba->msg_addr_virt+m, msg, sizeof(msg));
+ wmb();
+ writel(m, pHba->post_port);
+ wmb();
+
+ while(*status == 0){
+ if(time_after(jiffies,timeout)){
+ printk(KERN_WARNING"%s: IOP Reset Timeout\n",pHba->name);
+ /* We lose 4 bytes of "status" here, but we cannot
+ free these because controller may awake and corrupt
+ those bytes at any time */
+ /* dma_free_coherent(&pHba->pDev->dev, 4, buf, addr); */
+ return -ETIMEDOUT;
+ }
+ rmb();
+ schedule_timeout_uninterruptible(1);
+ }
+
+ if(*status == 0x01 /*I2O_EXEC_IOP_RESET_IN_PROGRESS*/) {
+ PDEBUG("%s: Reset in progress...\n", pHba->name);
+ // Here we wait for message frame to become available
+ // indicated that reset has finished
+ do {
+ rmb();
+ m = readl(pHba->post_port);
+ if (m != EMPTY_QUEUE) {
+ break;
+ }
+ if(time_after(jiffies,timeout)){
+ printk(KERN_ERR "%s:Timeout waiting for IOP Reset.\n",pHba->name);
+ /* We lose 4 bytes of "status" here, but we
+ cannot free these because controller may
+ awake and corrupt those bytes at any time */
+ /* dma_free_coherent(&pHba->pDev->dev, 4, buf, addr); */
+ return -ETIMEDOUT;
+ }
+ schedule_timeout_uninterruptible(1);
+ } while (m == EMPTY_QUEUE);
+ // Flush the offset
+ adpt_send_nop(pHba, m);
+ }
+ adpt_i2o_status_get(pHba);
+ if(*status == 0x02 ||
+ pHba->status_block->iop_state != ADAPTER_STATE_RESET) {
+ printk(KERN_WARNING"%s: Reset reject, trying to clear\n",
+ pHba->name);
+ } else {
+ PDEBUG("%s: Reset completed.\n", pHba->name);
+ }
+
+ dma_free_coherent(&pHba->pDev->dev, 4, status, addr);
+#ifdef UARTDELAY
+ // This delay is to allow someone attached to the card through the debug UART to
+ // set up the dump levels that they want before the rest of the initialization sequence
+ adpt_delay(20000);
+#endif
+ return 0;
+}
+
+
+static int adpt_i2o_parse_lct(adpt_hba* pHba)
+{
+ int i;
+ int max;
+ int tid;
+ struct i2o_device *d;
+ i2o_lct *lct = pHba->lct;
+ u8 bus_no = 0;
+ s16 scsi_id;
+ u64 scsi_lun;
+ u32 buf[10]; // larger than 7, or 8 ...
+ struct adpt_device* pDev;
+
+ if (lct == NULL) {
+ printk(KERN_ERR "%s: LCT is empty???\n",pHba->name);
+ return -1;
+ }
+
+ max = lct->table_size;
+ max -= 3;
+ max /= 9;
+
+ for(i=0;i<max;i++) {
+ if( lct->lct_entry[i].user_tid != 0xfff){
+ /*
+ * If we have hidden devices, we need to inform the upper layers about
+ * the possible maximum id reference to handle device access when
+ * an array is disassembled. This code has no other purpose but to
+ * allow us future access to devices that are currently hidden
+ * behind arrays, hotspares or have not been configured (JBOD mode).
+ */
+ if( lct->lct_entry[i].class_id != I2O_CLASS_RANDOM_BLOCK_STORAGE &&
+ lct->lct_entry[i].class_id != I2O_CLASS_SCSI_PERIPHERAL &&
+ lct->lct_entry[i].class_id != I2O_CLASS_FIBRE_CHANNEL_PERIPHERAL ){
+ continue;
+ }
+ tid = lct->lct_entry[i].tid;
+ // I2O_DPT_DEVICE_INFO_GROUP_NO;
+ if(adpt_i2o_query_scalar(pHba, tid, 0x8000, -1, buf, 32)<0) {
+ continue;
+ }
+ bus_no = buf[0]>>16;
+ scsi_id = buf[1];
+ scsi_lun = scsilun_to_int((struct scsi_lun *)&buf[2]);
+ if(bus_no >= MAX_CHANNEL) { // Something wrong skip it
+ printk(KERN_WARNING"%s: Channel number %d out of range \n", pHba->name, bus_no);
+ continue;
+ }
+ if (scsi_id >= MAX_ID){
+ printk(KERN_WARNING"%s: SCSI ID %d out of range \n", pHba->name, bus_no);
+ continue;
+ }
+ if(bus_no > pHba->top_scsi_channel){
+ pHba->top_scsi_channel = bus_no;
+ }
+ if(scsi_id > pHba->top_scsi_id){
+ pHba->top_scsi_id = scsi_id;
+ }
+ if(scsi_lun > pHba->top_scsi_lun){
+ pHba->top_scsi_lun = scsi_lun;
+ }
+ continue;
+ }
+ d = kmalloc(sizeof(struct i2o_device), GFP_KERNEL);
+ if(d==NULL)
+ {
+ printk(KERN_CRIT"%s: Out of memory for I2O device data.\n",pHba->name);
+ return -ENOMEM;
+ }
+
+ d->controller = pHba;
+ d->next = NULL;
+
+ memcpy(&d->lct_data, &lct->lct_entry[i], sizeof(i2o_lct_entry));
+
+ d->flags = 0;
+ tid = d->lct_data.tid;
+ adpt_i2o_report_hba_unit(pHba, d);
+ adpt_i2o_install_device(pHba, d);
+ }
+ bus_no = 0;
+ for(d = pHba->devices; d ; d = d->next) {
+ if(d->lct_data.class_id == I2O_CLASS_BUS_ADAPTER_PORT ||
+ d->lct_data.class_id == I2O_CLASS_FIBRE_CHANNEL_PORT){
+ tid = d->lct_data.tid;
+ // TODO get the bus_no from hrt-but for now they are in order
+ //bus_no =
+ if(bus_no > pHba->top_scsi_channel){
+ pHba->top_scsi_channel = bus_no;
+ }
+ pHba->channel[bus_no].type = d->lct_data.class_id;
+ pHba->channel[bus_no].tid = tid;
+ if(adpt_i2o_query_scalar(pHba, tid, 0x0200, -1, buf, 28)>=0)
+ {
+ pHba->channel[bus_no].scsi_id = buf[1];
+ PDEBUG("Bus %d - SCSI ID %d.\n", bus_no, buf[1]);
+ }
+ // TODO remove - this is just until we get from hrt
+ bus_no++;
+ if(bus_no >= MAX_CHANNEL) { // Something wrong skip it
+ printk(KERN_WARNING"%s: Channel number %d out of range - LCT\n", pHba->name, bus_no);
+ break;
+ }
+ }
+ }
+
+ // Setup adpt_device table
+ for(d = pHba->devices; d ; d = d->next) {
+ if(d->lct_data.class_id == I2O_CLASS_RANDOM_BLOCK_STORAGE ||
+ d->lct_data.class_id == I2O_CLASS_SCSI_PERIPHERAL ||
+ d->lct_data.class_id == I2O_CLASS_FIBRE_CHANNEL_PERIPHERAL ){
+
+ tid = d->lct_data.tid;
+ scsi_id = -1;
+ // I2O_DPT_DEVICE_INFO_GROUP_NO;
+ if(adpt_i2o_query_scalar(pHba, tid, 0x8000, -1, buf, 32)>=0) {
+ bus_no = buf[0]>>16;
+ scsi_id = buf[1];
+ scsi_lun = scsilun_to_int((struct scsi_lun *)&buf[2]);
+ if(bus_no >= MAX_CHANNEL) { // Something wrong skip it
+ continue;
+ }
+ if (scsi_id >= MAX_ID) {
+ continue;
+ }
+ if( pHba->channel[bus_no].device[scsi_id] == NULL){
+ pDev = kzalloc(sizeof(struct adpt_device),GFP_KERNEL);
+ if(pDev == NULL) {
+ return -ENOMEM;
+ }
+ pHba->channel[bus_no].device[scsi_id] = pDev;
+ } else {
+ for( pDev = pHba->channel[bus_no].device[scsi_id];
+ pDev->next_lun; pDev = pDev->next_lun){
+ }
+ pDev->next_lun = kzalloc(sizeof(struct adpt_device),GFP_KERNEL);
+ if(pDev->next_lun == NULL) {
+ return -ENOMEM;
+ }
+ pDev = pDev->next_lun;
+ }
+ pDev->tid = tid;
+ pDev->scsi_channel = bus_no;
+ pDev->scsi_id = scsi_id;
+ pDev->scsi_lun = scsi_lun;
+ pDev->pI2o_dev = d;
+ d->owner = pDev;
+ pDev->type = (buf[0])&0xff;
+ pDev->flags = (buf[0]>>8)&0xff;
+ if(scsi_id > pHba->top_scsi_id){
+ pHba->top_scsi_id = scsi_id;
+ }
+ if(scsi_lun > pHba->top_scsi_lun){
+ pHba->top_scsi_lun = scsi_lun;
+ }
+ }
+ if(scsi_id == -1){
+ printk(KERN_WARNING"Could not find SCSI ID for %s\n",
+ d->lct_data.identity_tag);
+ }
+ }
+ }
+ return 0;
+}
+
+
+/*
+ * Each I2O controller has a chain of devices on it - these match
+ * the useful parts of the LCT of the board.
+ */
+
+static int adpt_i2o_install_device(adpt_hba* pHba, struct i2o_device *d)
+{
+ mutex_lock(&adpt_configuration_lock);
+ d->controller=pHba;
+ d->owner=NULL;
+ d->next=pHba->devices;
+ d->prev=NULL;
+ if (pHba->devices != NULL){
+ pHba->devices->prev=d;
+ }
+ pHba->devices=d;
+ *d->dev_name = 0;
+
+ mutex_unlock(&adpt_configuration_lock);
+ return 0;
+}
+
+static int adpt_open(struct inode *inode, struct file *file)
+{
+ int minor;
+ adpt_hba* pHba;
+
+ mutex_lock(&adpt_mutex);
+ //TODO check for root access
+ //
+ minor = iminor(inode);
+ if (minor >= hba_count) {
+ mutex_unlock(&adpt_mutex);
+ return -ENXIO;
+ }
+ mutex_lock(&adpt_configuration_lock);
+ for (pHba = hba_chain; pHba; pHba = pHba->next) {
+ if (pHba->unit == minor) {
+ break; /* found adapter */
+ }
+ }
+ if (pHba == NULL) {
+ mutex_unlock(&adpt_configuration_lock);
+ mutex_unlock(&adpt_mutex);
+ return -ENXIO;
+ }
+
+// if(pHba->in_use){
+ // mutex_unlock(&adpt_configuration_lock);
+// return -EBUSY;
+// }
+
+ pHba->in_use = 1;
+ mutex_unlock(&adpt_configuration_lock);
+ mutex_unlock(&adpt_mutex);
+
+ return 0;
+}
+
+static int adpt_close(struct inode *inode, struct file *file)
+{
+ int minor;
+ adpt_hba* pHba;
+
+ minor = iminor(inode);
+ if (minor >= hba_count) {
+ return -ENXIO;
+ }
+ mutex_lock(&adpt_configuration_lock);
+ for (pHba = hba_chain; pHba; pHba = pHba->next) {
+ if (pHba->unit == minor) {
+ break; /* found adapter */
+ }
+ }
+ mutex_unlock(&adpt_configuration_lock);
+ if (pHba == NULL) {
+ return -ENXIO;
+ }
+
+ pHba->in_use = 0;
+
+ return 0;
+}
+
+
+static int adpt_i2o_passthru(adpt_hba* pHba, u32 __user *arg)
+{
+ u32 msg[MAX_MESSAGE_SIZE];
+ u32* reply = NULL;
+ u32 size = 0;
+ u32 reply_size = 0;
+ u32 __user *user_msg = arg;
+ u32 __user * user_reply = NULL;
+ void *sg_list[pHba->sg_tablesize];
+ u32 sg_offset = 0;
+ u32 sg_count = 0;
+ int sg_index = 0;
+ u32 i = 0;
+ u32 rcode = 0;
+ void *p = NULL;
+ dma_addr_t addr;
+ ulong flags = 0;
+
+ memset(&msg, 0, MAX_MESSAGE_SIZE*4);
+ // get user msg size in u32s
+ if(get_user(size, &user_msg[0])){
+ return -EFAULT;
+ }
+ size = size>>16;
+
+ user_reply = &user_msg[size];
+ if(size > MAX_MESSAGE_SIZE){
+ return -EFAULT;
+ }
+ size *= 4; // Convert to bytes
+
+ /* Copy in the user's I2O command */
+ if(copy_from_user(msg, user_msg, size)) {
+ return -EFAULT;
+ }
+ get_user(reply_size, &user_reply[0]);
+ reply_size = reply_size>>16;
+ if(reply_size > REPLY_FRAME_SIZE){
+ reply_size = REPLY_FRAME_SIZE;
+ }
+ reply_size *= 4;
+ reply = kzalloc(REPLY_FRAME_SIZE*4, GFP_KERNEL);
+ if(reply == NULL) {
+ printk(KERN_WARNING"%s: Could not allocate reply buffer\n",pHba->name);
+ return -ENOMEM;
+ }
+ sg_offset = (msg[0]>>4)&0xf;
+ msg[2] = 0x40000000; // IOCTL context
+ msg[3] = adpt_ioctl_to_context(pHba, reply);
+ if (msg[3] == (u32)-1)
+ return -EBUSY;
+
+ memset(sg_list,0, sizeof(sg_list[0])*pHba->sg_tablesize);
+ if(sg_offset) {
+ // TODO add 64 bit API
+ struct sg_simple_element *sg = (struct sg_simple_element*) (msg+sg_offset);
+ sg_count = (size - sg_offset*4) / sizeof(struct sg_simple_element);
+ if (sg_count > pHba->sg_tablesize){
+ printk(KERN_DEBUG"%s:IOCTL SG List too large (%u)\n", pHba->name,sg_count);
+ kfree (reply);
+ return -EINVAL;
+ }
+
+ for(i = 0; i < sg_count; i++) {
+ int sg_size;
+
+ if (!(sg[i].flag_count & 0x10000000 /*I2O_SGL_FLAGS_SIMPLE_ADDRESS_ELEMENT*/)) {
+ printk(KERN_DEBUG"%s:Bad SG element %d - not simple (%x)\n",pHba->name,i, sg[i].flag_count);
+ rcode = -EINVAL;
+ goto cleanup;
+ }
+ sg_size = sg[i].flag_count & 0xffffff;
+ /* Allocate memory for the transfer */
+ p = dma_alloc_coherent(&pHba->pDev->dev, sg_size, &addr, GFP_KERNEL);
+ if(!p) {
+ printk(KERN_DEBUG"%s: Could not allocate SG buffer - size = %d buffer number %d of %d\n",
+ pHba->name,sg_size,i,sg_count);
+ rcode = -ENOMEM;
+ goto cleanup;
+ }
+ sg_list[sg_index++] = p; // sglist indexed with input frame, not our internal frame.
+ /* Copy in the user's SG buffer if necessary */
+ if(sg[i].flag_count & 0x04000000 /*I2O_SGL_FLAGS_DIR*/) {
+ // sg_simple_element API is 32 bit
+ if (copy_from_user(p,(void __user *)(ulong)sg[i].addr_bus, sg_size)) {
+ printk(KERN_DEBUG"%s: Could not copy SG buf %d FROM user\n",pHba->name,i);
+ rcode = -EFAULT;
+ goto cleanup;
+ }
+ }
+ /* sg_simple_element API is 32 bit, but addr < 4GB */
+ sg[i].addr_bus = addr;
+ }
+ }
+
+ do {
+ /*
+ * Stop any new commands from enterring the
+ * controller while processing the ioctl
+ */
+ if (pHba->host) {
+ scsi_block_requests(pHba->host);
+ spin_lock_irqsave(pHba->host->host_lock, flags);
+ }
+ rcode = adpt_i2o_post_wait(pHba, msg, size, FOREVER);
+ if (rcode != 0)
+ printk("adpt_i2o_passthru: post wait failed %d %p\n",
+ rcode, reply);
+ if (pHba->host) {
+ spin_unlock_irqrestore(pHba->host->host_lock, flags);
+ scsi_unblock_requests(pHba->host);
+ }
+ } while (rcode == -ETIMEDOUT);
+
+ if(rcode){
+ goto cleanup;
+ }
+
+ if(sg_offset) {
+ /* Copy back the Scatter Gather buffers back to user space */
+ u32 j;
+ // TODO add 64 bit API
+ struct sg_simple_element* sg;
+ int sg_size;
+
+ // re-acquire the original message to handle correctly the sg copy operation
+ memset(&msg, 0, MAX_MESSAGE_SIZE*4);
+ // get user msg size in u32s
+ if(get_user(size, &user_msg[0])){
+ rcode = -EFAULT;
+ goto cleanup;
+ }
+ size = size>>16;
+ size *= 4;
+ if (size > MAX_MESSAGE_SIZE) {
+ rcode = -EINVAL;
+ goto cleanup;
+ }
+ /* Copy in the user's I2O command */
+ if (copy_from_user (msg, user_msg, size)) {
+ rcode = -EFAULT;
+ goto cleanup;
+ }
+ sg_count = (size - sg_offset*4) / sizeof(struct sg_simple_element);
+
+ // TODO add 64 bit API
+ sg = (struct sg_simple_element*)(msg + sg_offset);
+ for (j = 0; j < sg_count; j++) {
+ /* Copy out the SG list to user's buffer if necessary */
+ if(! (sg[j].flag_count & 0x4000000 /*I2O_SGL_FLAGS_DIR*/)) {
+ sg_size = sg[j].flag_count & 0xffffff;
+ // sg_simple_element API is 32 bit
+ if (copy_to_user((void __user *)(ulong)sg[j].addr_bus,sg_list[j], sg_size)) {
+ printk(KERN_WARNING"%s: Could not copy %p TO user %x\n",pHba->name, sg_list[j], sg[j].addr_bus);
+ rcode = -EFAULT;
+ goto cleanup;
+ }
+ }
+ }
+ }
+
+ /* Copy back the reply to user space */
+ if (reply_size) {
+ // we wrote our own values for context - now restore the user supplied ones
+ if(copy_from_user(reply+2, user_msg+2, sizeof(u32)*2)) {
+ printk(KERN_WARNING"%s: Could not copy message context FROM user\n",pHba->name);
+ rcode = -EFAULT;
+ }
+ if(copy_to_user(user_reply, reply, reply_size)) {
+ printk(KERN_WARNING"%s: Could not copy reply TO user\n",pHba->name);
+ rcode = -EFAULT;
+ }
+ }
+
+
+cleanup:
+ if (rcode != -ETIME && rcode != -EINTR) {
+ struct sg_simple_element *sg =
+ (struct sg_simple_element*) (msg +sg_offset);
+ kfree (reply);
+ while(sg_index) {
+ if(sg_list[--sg_index]) {
+ dma_free_coherent(&pHba->pDev->dev,
+ sg[sg_index].flag_count & 0xffffff,
+ sg_list[sg_index],
+ sg[sg_index].addr_bus);
+ }
+ }
+ }
+ return rcode;
+}
+
+#if defined __ia64__
+static void adpt_ia64_info(sysInfo_S* si)
+{
+ // This is all the info we need for now
+ // We will add more info as our new
+ // managmenent utility requires it
+ si->processorType = PROC_IA64;
+}
+#endif
+
+#if defined __sparc__
+static void adpt_sparc_info(sysInfo_S* si)
+{
+ // This is all the info we need for now
+ // We will add more info as our new
+ // managmenent utility requires it
+ si->processorType = PROC_ULTRASPARC;
+}
+#endif
+#if defined __alpha__
+static void adpt_alpha_info(sysInfo_S* si)
+{
+ // This is all the info we need for now
+ // We will add more info as our new
+ // managmenent utility requires it
+ si->processorType = PROC_ALPHA;
+}
+#endif
+
+#if defined __i386__
+static void adpt_i386_info(sysInfo_S* si)
+{
+ // This is all the info we need for now
+ // We will add more info as our new
+ // managmenent utility requires it
+ switch (boot_cpu_data.x86) {
+ case CPU_386:
+ si->processorType = PROC_386;
+ break;
+ case CPU_486:
+ si->processorType = PROC_486;
+ break;
+ case CPU_586:
+ si->processorType = PROC_PENTIUM;
+ break;
+ default: // Just in case
+ si->processorType = PROC_PENTIUM;
+ break;
+ }
+}
+#endif
+
+/*
+ * This routine returns information about the system. This does not effect
+ * any logic and if the info is wrong - it doesn't matter.
+ */
+
+/* Get all the info we can not get from kernel services */
+static int adpt_system_info(void __user *buffer)
+{
+ sysInfo_S si;
+
+ memset(&si, 0, sizeof(si));
+
+ si.osType = OS_LINUX;
+ si.osMajorVersion = 0;
+ si.osMinorVersion = 0;
+ si.osRevision = 0;
+ si.busType = SI_PCI_BUS;
+ si.processorFamily = DPTI_sig.dsProcessorFamily;
+
+#if defined __i386__
+ adpt_i386_info(&si);
+#elif defined (__ia64__)
+ adpt_ia64_info(&si);
+#elif defined(__sparc__)
+ adpt_sparc_info(&si);
+#elif defined (__alpha__)
+ adpt_alpha_info(&si);
+#else
+ si.processorType = 0xff ;
+#endif
+ if (copy_to_user(buffer, &si, sizeof(si))){
+ printk(KERN_WARNING"dpti: Could not copy buffer TO user\n");
+ return -EFAULT;
+ }
+
+ return 0;
+}
+
+static int adpt_ioctl(struct inode *inode, struct file *file, uint cmd, ulong arg)
+{
+ int minor;
+ int error = 0;
+ adpt_hba* pHba;
+ ulong flags = 0;
+ void __user *argp = (void __user *)arg;
+
+ minor = iminor(inode);
+ if (minor >= DPTI_MAX_HBA){
+ return -ENXIO;
+ }
+ mutex_lock(&adpt_configuration_lock);
+ for (pHba = hba_chain; pHba; pHba = pHba->next) {
+ if (pHba->unit == minor) {
+ break; /* found adapter */
+ }
+ }
+ mutex_unlock(&adpt_configuration_lock);
+ if(pHba == NULL){
+ return -ENXIO;
+ }
+
+ while((volatile u32) pHba->state & DPTI_STATE_RESET )
+ schedule_timeout_uninterruptible(2);
+
+ switch (cmd) {
+ // TODO: handle 3 cases
+ case DPT_SIGNATURE:
+ if (copy_to_user(argp, &DPTI_sig, sizeof(DPTI_sig))) {
+ return -EFAULT;
+ }
+ break;
+ case I2OUSRCMD:
+ return adpt_i2o_passthru(pHba, argp);
+
+ case DPT_CTRLINFO:{
+ drvrHBAinfo_S HbaInfo;
+
+#define FLG_OSD_PCI_VALID 0x0001
+#define FLG_OSD_DMA 0x0002
+#define FLG_OSD_I2O 0x0004
+ memset(&HbaInfo, 0, sizeof(HbaInfo));
+ HbaInfo.drvrHBAnum = pHba->unit;
+ HbaInfo.baseAddr = (ulong) pHba->base_addr_phys;
+ HbaInfo.blinkState = adpt_read_blink_led(pHba);
+ HbaInfo.pciBusNum = pHba->pDev->bus->number;
+ HbaInfo.pciDeviceNum=PCI_SLOT(pHba->pDev->devfn);
+ HbaInfo.Interrupt = pHba->pDev->irq;
+ HbaInfo.hbaFlags = FLG_OSD_PCI_VALID | FLG_OSD_DMA | FLG_OSD_I2O;
+ if(copy_to_user(argp, &HbaInfo, sizeof(HbaInfo))){
+ printk(KERN_WARNING"%s: Could not copy HbaInfo TO user\n",pHba->name);
+ return -EFAULT;
+ }
+ break;
+ }
+ case DPT_SYSINFO:
+ return adpt_system_info(argp);
+ case DPT_BLINKLED:{
+ u32 value;
+ value = (u32)adpt_read_blink_led(pHba);
+ if (copy_to_user(argp, &value, sizeof(value))) {
+ return -EFAULT;
+ }
+ break;
+ }
+ case I2ORESETCMD:
+ if(pHba->host)
+ spin_lock_irqsave(pHba->host->host_lock, flags);
+ adpt_hba_reset(pHba);
+ if(pHba->host)
+ spin_unlock_irqrestore(pHba->host->host_lock, flags);
+ break;
+ case I2ORESCANCMD:
+ adpt_rescan(pHba);
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return error;
+}
+
+static long adpt_unlocked_ioctl(struct file *file, uint cmd, ulong arg)
+{
+ struct inode *inode;
+ long ret;
+
+ inode = file_inode(file);
+
+ mutex_lock(&adpt_mutex);
+ ret = adpt_ioctl(inode, file, cmd, arg);
+ mutex_unlock(&adpt_mutex);
+
+ return ret;
+}
+
+#ifdef CONFIG_COMPAT
+static long compat_adpt_ioctl(struct file *file,
+ unsigned int cmd, unsigned long arg)
+{
+ struct inode *inode;
+ long ret;
+
+ inode = file_inode(file);
+
+ mutex_lock(&adpt_mutex);
+
+ switch(cmd) {
+ case DPT_SIGNATURE:
+ case I2OUSRCMD:
+ case DPT_CTRLINFO:
+ case DPT_SYSINFO:
+ case DPT_BLINKLED:
+ case I2ORESETCMD:
+ case I2ORESCANCMD:
+ case (DPT_TARGET_BUSY & 0xFFFF):
+ case DPT_TARGET_BUSY:
+ ret = adpt_ioctl(inode, file, cmd, arg);
+ break;
+ default:
+ ret = -ENOIOCTLCMD;
+ }
+
+ mutex_unlock(&adpt_mutex);
+
+ return ret;
+}
+#endif
+
+static irqreturn_t adpt_isr(int irq, void *dev_id)
+{
+ struct scsi_cmnd* cmd;
+ adpt_hba* pHba = dev_id;
+ u32 m;
+ void __iomem *reply;
+ u32 status=0;
+ u32 context;
+ ulong flags = 0;
+ int handled = 0;
+
+ if (pHba == NULL){
+ printk(KERN_WARNING"adpt_isr: NULL dev_id\n");
+ return IRQ_NONE;
+ }
+ if(pHba->host)
+ spin_lock_irqsave(pHba->host->host_lock, flags);
+
+ while( readl(pHba->irq_mask) & I2O_INTERRUPT_PENDING_B) {
+ m = readl(pHba->reply_port);
+ if(m == EMPTY_QUEUE){
+ // Try twice then give up
+ rmb();
+ m = readl(pHba->reply_port);
+ if(m == EMPTY_QUEUE){
+ // This really should not happen
+ printk(KERN_ERR"dpti: Could not get reply frame\n");
+ goto out;
+ }
+ }
+ if (pHba->reply_pool_pa <= m &&
+ m < pHba->reply_pool_pa +
+ (pHba->reply_fifo_size * REPLY_FRAME_SIZE * 4)) {
+ reply = (u8 *)pHba->reply_pool +
+ (m - pHba->reply_pool_pa);
+ } else {
+ /* Ick, we should *never* be here */
+ printk(KERN_ERR "dpti: reply frame not from pool\n");
+ reply = (u8 *)bus_to_virt(m);
+ }
+
+ if (readl(reply) & MSG_FAIL) {
+ u32 old_m = readl(reply+28);
+ void __iomem *msg;
+ u32 old_context;
+ PDEBUG("%s: Failed message\n",pHba->name);
+ if(old_m >= 0x100000){
+ printk(KERN_ERR"%s: Bad preserved MFA (%x)- dropping frame\n",pHba->name,old_m);
+ writel(m,pHba->reply_port);
+ continue;
+ }
+ // Transaction context is 0 in failed reply frame
+ msg = pHba->msg_addr_virt + old_m;
+ old_context = readl(msg+12);
+ writel(old_context, reply+12);
+ adpt_send_nop(pHba, old_m);
+ }
+ context = readl(reply+8);
+ if(context & 0x40000000){ // IOCTL
+ void *p = adpt_ioctl_from_context(pHba, readl(reply+12));
+ if( p != NULL) {
+ memcpy_fromio(p, reply, REPLY_FRAME_SIZE * 4);
+ }
+ // All IOCTLs will also be post wait
+ }
+ if(context & 0x80000000){ // Post wait message
+ status = readl(reply+16);
+ if(status >> 24){
+ status &= 0xffff; /* Get detail status */
+ } else {
+ status = I2O_POST_WAIT_OK;
+ }
+ if(!(context & 0x40000000)) {
+ cmd = adpt_cmd_from_context(pHba,
+ readl(reply+12));
+ if(cmd != NULL) {
+ printk(KERN_WARNING"%s: Apparent SCSI cmd in Post Wait Context - cmd=%p context=%x\n", pHba->name, cmd, context);
+ }
+ }
+ adpt_i2o_post_wait_complete(context, status);
+ } else { // SCSI message
+ cmd = adpt_cmd_from_context (pHba, readl(reply+12));
+ if(cmd != NULL){
+ scsi_dma_unmap(cmd);
+ if(cmd->serial_number != 0) { // If not timedout
+ adpt_i2o_to_scsi(reply, cmd);
+ }
+ }
+ }
+ writel(m, pHba->reply_port);
+ wmb();
+ rmb();
+ }
+ handled = 1;
+out: if(pHba->host)
+ spin_unlock_irqrestore(pHba->host->host_lock, flags);
+ return IRQ_RETVAL(handled);
+}
+
+static s32 adpt_scsi_to_i2o(adpt_hba* pHba, struct scsi_cmnd* cmd, struct adpt_device* d)
+{
+ int i;
+ u32 msg[MAX_MESSAGE_SIZE];
+ u32* mptr;
+ u32* lptr;
+ u32 *lenptr;
+ int direction;
+ int scsidir;
+ int nseg;
+ u32 len;
+ u32 reqlen;
+ s32 rcode;
+ dma_addr_t addr;
+
+ memset(msg, 0 , sizeof(msg));
+ len = scsi_bufflen(cmd);
+ direction = 0x00000000;
+
+ scsidir = 0x00000000; // DATA NO XFER
+ if(len) {
+ /*
+ * Set SCBFlags to indicate if data is being transferred
+ * in or out, or no data transfer
+ * Note: Do not have to verify index is less than 0 since
+ * cmd->cmnd[0] is an unsigned char
+ */
+ switch(cmd->sc_data_direction){
+ case DMA_FROM_DEVICE:
+ scsidir =0x40000000; // DATA IN (iop<--dev)
+ break;
+ case DMA_TO_DEVICE:
+ direction=0x04000000; // SGL OUT
+ scsidir =0x80000000; // DATA OUT (iop-->dev)
+ break;
+ case DMA_NONE:
+ break;
+ case DMA_BIDIRECTIONAL:
+ scsidir =0x40000000; // DATA IN (iop<--dev)
+ // Assume In - and continue;
+ break;
+ default:
+ printk(KERN_WARNING"%s: scsi opcode 0x%x not supported.\n",
+ pHba->name, cmd->cmnd[0]);
+ cmd->result = (DID_OK <<16) | (INITIATOR_ERROR << 8);
+ cmd->scsi_done(cmd);
+ return 0;
+ }
+ }
+ // msg[0] is set later
+ // I2O_CMD_SCSI_EXEC
+ msg[1] = ((0xff<<24)|(HOST_TID<<12)|d->tid);
+ msg[2] = 0;
+ msg[3] = adpt_cmd_to_context(cmd); /* Want SCSI control block back */
+ // Our cards use the transaction context as the tag for queueing
+ // Adaptec/DPT Private stuff
+ msg[4] = I2O_CMD_SCSI_EXEC|(DPT_ORGANIZATION_ID<<16);
+ msg[5] = d->tid;
+ /* Direction, disconnect ok | sense data | simple queue , CDBLen */
+ // I2O_SCB_FLAG_ENABLE_DISCONNECT |
+ // I2O_SCB_FLAG_SIMPLE_QUEUE_TAG |
+ // I2O_SCB_FLAG_SENSE_DATA_IN_MESSAGE;
+ msg[6] = scsidir|0x20a00000|cmd->cmd_len;
+
+ mptr=msg+7;
+
+ // Write SCSI command into the message - always 16 byte block
+ memset(mptr, 0, 16);
+ memcpy(mptr, cmd->cmnd, cmd->cmd_len);
+ mptr+=4;
+ lenptr=mptr++; /* Remember me - fill in when we know */
+ if (dpt_dma64(pHba)) {
+ reqlen = 16; // SINGLE SGE
+ *mptr++ = (0x7C<<24)+(2<<16)+0x02; /* Enable 64 bit */
+ *mptr++ = 1 << PAGE_SHIFT;
+ } else {
+ reqlen = 14; // SINGLE SGE
+ }
+ /* Now fill in the SGList and command */
+
+ nseg = scsi_dma_map(cmd);
+ BUG_ON(nseg < 0);
+ if (nseg) {
+ struct scatterlist *sg;
+
+ len = 0;
+ scsi_for_each_sg(cmd, sg, nseg, i) {
+ lptr = mptr;
+ *mptr++ = direction|0x10000000|sg_dma_len(sg);
+ len+=sg_dma_len(sg);
+ addr = sg_dma_address(sg);
+ *mptr++ = dma_low(addr);
+ if (dpt_dma64(pHba))
+ *mptr++ = dma_high(addr);
+ /* Make this an end of list */
+ if (i == nseg - 1)
+ *lptr = direction|0xD0000000|sg_dma_len(sg);
+ }
+ reqlen = mptr - msg;
+ *lenptr = len;
+
+ if(cmd->underflow && len != cmd->underflow){
+ printk(KERN_WARNING"Cmd len %08X Cmd underflow %08X\n",
+ len, cmd->underflow);
+ }
+ } else {
+ *lenptr = len = 0;
+ reqlen = 12;
+ }
+
+ /* Stick the headers on */
+ msg[0] = reqlen<<16 | ((reqlen > 12) ? SGL_OFFSET_12 : SGL_OFFSET_0);
+
+ // Send it on it's way
+ rcode = adpt_i2o_post_this(pHba, msg, reqlen<<2);
+ if (rcode == 0) {
+ return 0;
+ }
+ return rcode;
+}
+
+
+static s32 adpt_scsi_host_alloc(adpt_hba* pHba, struct scsi_host_template *sht)
+{
+ struct Scsi_Host *host;
+
+ host = scsi_host_alloc(sht, sizeof(adpt_hba*));
+ if (host == NULL) {
+ printk("%s: scsi_host_alloc returned NULL\n", pHba->name);
+ return -1;
+ }
+ host->hostdata[0] = (unsigned long)pHba;
+ pHba->host = host;
+
+ host->irq = pHba->pDev->irq;
+ /* no IO ports, so don't have to set host->io_port and
+ * host->n_io_port
+ */
+ host->io_port = 0;
+ host->n_io_port = 0;
+ /* see comments in scsi_host.h */
+ host->max_id = 16;
+ host->max_lun = 256;
+ host->max_channel = pHba->top_scsi_channel + 1;
+ host->cmd_per_lun = 1;
+ host->unique_id = (u32)sys_tbl_pa + pHba->unit;
+ host->sg_tablesize = pHba->sg_tablesize;
+ host->can_queue = pHba->post_fifo_size;
+ host->use_cmd_list = 1;
+
+ return 0;
+}
+
+
+static s32 adpt_i2o_to_scsi(void __iomem *reply, struct scsi_cmnd* cmd)
+{
+ adpt_hba* pHba;
+ u32 hba_status;
+ u32 dev_status;
+ u32 reply_flags = readl(reply) & 0xff00; // Leave it shifted up 8 bits
+ // I know this would look cleaner if I just read bytes
+ // but the model I have been using for all the rest of the
+ // io is in 4 byte words - so I keep that model
+ u16 detailed_status = readl(reply+16) &0xffff;
+ dev_status = (detailed_status & 0xff);
+ hba_status = detailed_status >> 8;
+
+ // calculate resid for sg
+ scsi_set_resid(cmd, scsi_bufflen(cmd) - readl(reply+20));
+
+ pHba = (adpt_hba*) cmd->device->host->hostdata[0];
+
+ cmd->sense_buffer[0] = '\0'; // initialize sense valid flag to false
+
+ if(!(reply_flags & MSG_FAIL)) {
+ switch(detailed_status & I2O_SCSI_DSC_MASK) {
+ case I2O_SCSI_DSC_SUCCESS:
+ cmd->result = (DID_OK << 16);
+ // handle underflow
+ if (readl(reply+20) < cmd->underflow) {
+ cmd->result = (DID_ERROR <<16);
+ printk(KERN_WARNING"%s: SCSI CMD underflow\n",pHba->name);
+ }
+ break;
+ case I2O_SCSI_DSC_REQUEST_ABORTED:
+ cmd->result = (DID_ABORT << 16);
+ break;
+ case I2O_SCSI_DSC_PATH_INVALID:
+ case I2O_SCSI_DSC_DEVICE_NOT_PRESENT:
+ case I2O_SCSI_DSC_SELECTION_TIMEOUT:
+ case I2O_SCSI_DSC_COMMAND_TIMEOUT:
+ case I2O_SCSI_DSC_NO_ADAPTER:
+ case I2O_SCSI_DSC_RESOURCE_UNAVAILABLE:
+ printk(KERN_WARNING"%s: SCSI Timeout-Device (%d,%d,%llu) hba status=0x%x, dev status=0x%x, cmd=0x%x\n",
+ pHba->name, (u32)cmd->device->channel, (u32)cmd->device->id, cmd->device->lun, hba_status, dev_status, cmd->cmnd[0]);
+ cmd->result = (DID_TIME_OUT << 16);
+ break;
+ case I2O_SCSI_DSC_ADAPTER_BUSY:
+ case I2O_SCSI_DSC_BUS_BUSY:
+ cmd->result = (DID_BUS_BUSY << 16);
+ break;
+ case I2O_SCSI_DSC_SCSI_BUS_RESET:
+ case I2O_SCSI_DSC_BDR_MESSAGE_SENT:
+ cmd->result = (DID_RESET << 16);
+ break;
+ case I2O_SCSI_DSC_PARITY_ERROR_FAILURE:
+ printk(KERN_WARNING"%s: SCSI CMD parity error\n",pHba->name);
+ cmd->result = (DID_PARITY << 16);
+ break;
+ case I2O_SCSI_DSC_UNABLE_TO_ABORT:
+ case I2O_SCSI_DSC_COMPLETE_WITH_ERROR:
+ case I2O_SCSI_DSC_UNABLE_TO_TERMINATE:
+ case I2O_SCSI_DSC_MR_MESSAGE_RECEIVED:
+ case I2O_SCSI_DSC_AUTOSENSE_FAILED:
+ case I2O_SCSI_DSC_DATA_OVERRUN:
+ case I2O_SCSI_DSC_UNEXPECTED_BUS_FREE:
+ case I2O_SCSI_DSC_SEQUENCE_FAILURE:
+ case I2O_SCSI_DSC_REQUEST_LENGTH_ERROR:
+ case I2O_SCSI_DSC_PROVIDE_FAILURE:
+ case I2O_SCSI_DSC_REQUEST_TERMINATED:
+ case I2O_SCSI_DSC_IDE_MESSAGE_SENT:
+ case I2O_SCSI_DSC_UNACKNOWLEDGED_EVENT:
+ case I2O_SCSI_DSC_MESSAGE_RECEIVED:
+ case I2O_SCSI_DSC_INVALID_CDB:
+ case I2O_SCSI_DSC_LUN_INVALID:
+ case I2O_SCSI_DSC_SCSI_TID_INVALID:
+ case I2O_SCSI_DSC_FUNCTION_UNAVAILABLE:
+ case I2O_SCSI_DSC_NO_NEXUS:
+ case I2O_SCSI_DSC_CDB_RECEIVED:
+ case I2O_SCSI_DSC_LUN_ALREADY_ENABLED:
+ case I2O_SCSI_DSC_QUEUE_FROZEN:
+ case I2O_SCSI_DSC_REQUEST_INVALID:
+ default:
+ printk(KERN_WARNING"%s: SCSI error %0x-Device(%d,%d,%llu) hba_status=0x%x, dev_status=0x%x, cmd=0x%x\n",
+ pHba->name, detailed_status & I2O_SCSI_DSC_MASK, (u32)cmd->device->channel, (u32)cmd->device->id, cmd->device->lun,
+ hba_status, dev_status, cmd->cmnd[0]);
+ cmd->result = (DID_ERROR << 16);
+ break;
+ }
+
+ // copy over the request sense data if it was a check
+ // condition status
+ if (dev_status == SAM_STAT_CHECK_CONDITION) {
+ u32 len = min(SCSI_SENSE_BUFFERSIZE, 40);
+ // Copy over the sense data
+ memcpy_fromio(cmd->sense_buffer, (reply+28) , len);
+ if(cmd->sense_buffer[0] == 0x70 /* class 7 */ &&
+ cmd->sense_buffer[2] == DATA_PROTECT ){
+ /* This is to handle an array failed */
+ cmd->result = (DID_TIME_OUT << 16);
+ printk(KERN_WARNING"%s: SCSI Data Protect-Device (%d,%d,%llu) hba_status=0x%x, dev_status=0x%x, cmd=0x%x\n",
+ pHba->name, (u32)cmd->device->channel, (u32)cmd->device->id, cmd->device->lun,
+ hba_status, dev_status, cmd->cmnd[0]);
+
+ }
+ }
+ } else {
+ /* In this condtion we could not talk to the tid
+ * the card rejected it. We should signal a retry
+ * for a limitted number of retries.
+ */
+ cmd->result = (DID_TIME_OUT << 16);
+ printk(KERN_WARNING"%s: I2O MSG_FAIL - Device (%d,%d,%llu) tid=%d, cmd=0x%x\n",
+ pHba->name, (u32)cmd->device->channel, (u32)cmd->device->id, cmd->device->lun,
+ ((struct adpt_device*)(cmd->device->hostdata))->tid, cmd->cmnd[0]);
+ }
+
+ cmd->result |= (dev_status);
+
+ if(cmd->scsi_done != NULL){
+ cmd->scsi_done(cmd);
+ }
+ return cmd->result;
+}
+
+
+static s32 adpt_rescan(adpt_hba* pHba)
+{
+ s32 rcode;
+ ulong flags = 0;
+
+ if(pHba->host)
+ spin_lock_irqsave(pHba->host->host_lock, flags);
+ if ((rcode=adpt_i2o_lct_get(pHba)) < 0)
+ goto out;
+ if ((rcode=adpt_i2o_reparse_lct(pHba)) < 0)
+ goto out;
+ rcode = 0;
+out: if(pHba->host)
+ spin_unlock_irqrestore(pHba->host->host_lock, flags);
+ return rcode;
+}
+
+
+static s32 adpt_i2o_reparse_lct(adpt_hba* pHba)
+{
+ int i;
+ int max;
+ int tid;
+ struct i2o_device *d;
+ i2o_lct *lct = pHba->lct;
+ u8 bus_no = 0;
+ s16 scsi_id;
+ u64 scsi_lun;
+ u32 buf[10]; // at least 8 u32's
+ struct adpt_device* pDev = NULL;
+ struct i2o_device* pI2o_dev = NULL;
+
+ if (lct == NULL) {
+ printk(KERN_ERR "%s: LCT is empty???\n",pHba->name);
+ return -1;
+ }
+
+ max = lct->table_size;
+ max -= 3;
+ max /= 9;
+
+ // Mark each drive as unscanned
+ for (d = pHba->devices; d; d = d->next) {
+ pDev =(struct adpt_device*) d->owner;
+ if(!pDev){
+ continue;
+ }
+ pDev->state |= DPTI_DEV_UNSCANNED;
+ }
+
+ printk(KERN_INFO "%s: LCT has %d entries.\n", pHba->name,max);
+
+ for(i=0;i<max;i++) {
+ if( lct->lct_entry[i].user_tid != 0xfff){
+ continue;
+ }
+
+ if( lct->lct_entry[i].class_id == I2O_CLASS_RANDOM_BLOCK_STORAGE ||
+ lct->lct_entry[i].class_id == I2O_CLASS_SCSI_PERIPHERAL ||
+ lct->lct_entry[i].class_id == I2O_CLASS_FIBRE_CHANNEL_PERIPHERAL ){
+ tid = lct->lct_entry[i].tid;
+ if(adpt_i2o_query_scalar(pHba, tid, 0x8000, -1, buf, 32)<0) {
+ printk(KERN_ERR"%s: Could not query device\n",pHba->name);
+ continue;
+ }
+ bus_no = buf[0]>>16;
+ if (bus_no >= MAX_CHANNEL) { /* Something wrong skip it */
+ printk(KERN_WARNING
+ "%s: Channel number %d out of range\n",
+ pHba->name, bus_no);
+ continue;
+ }
+
+ scsi_id = buf[1];
+ scsi_lun = scsilun_to_int((struct scsi_lun *)&buf[2]);
+ pDev = pHba->channel[bus_no].device[scsi_id];
+ /* da lun */
+ while(pDev) {
+ if(pDev->scsi_lun == scsi_lun) {
+ break;
+ }
+ pDev = pDev->next_lun;
+ }
+ if(!pDev ) { // Something new add it
+ d = kmalloc(sizeof(struct i2o_device),
+ GFP_ATOMIC);
+ if(d==NULL)
+ {
+ printk(KERN_CRIT "Out of memory for I2O device data.\n");
+ return -ENOMEM;
+ }
+
+ d->controller = pHba;
+ d->next = NULL;
+
+ memcpy(&d->lct_data, &lct->lct_entry[i], sizeof(i2o_lct_entry));
+
+ d->flags = 0;
+ adpt_i2o_report_hba_unit(pHba, d);
+ adpt_i2o_install_device(pHba, d);
+
+ pDev = pHba->channel[bus_no].device[scsi_id];
+ if( pDev == NULL){
+ pDev =
+ kzalloc(sizeof(struct adpt_device),
+ GFP_ATOMIC);
+ if(pDev == NULL) {
+ return -ENOMEM;
+ }
+ pHba->channel[bus_no].device[scsi_id] = pDev;
+ } else {
+ while (pDev->next_lun) {
+ pDev = pDev->next_lun;
+ }
+ pDev = pDev->next_lun =
+ kzalloc(sizeof(struct adpt_device),
+ GFP_ATOMIC);
+ if(pDev == NULL) {
+ return -ENOMEM;
+ }
+ }
+ pDev->tid = d->lct_data.tid;
+ pDev->scsi_channel = bus_no;
+ pDev->scsi_id = scsi_id;
+ pDev->scsi_lun = scsi_lun;
+ pDev->pI2o_dev = d;
+ d->owner = pDev;
+ pDev->type = (buf[0])&0xff;
+ pDev->flags = (buf[0]>>8)&0xff;
+ // Too late, SCSI system has made up it's mind, but what the hey ...
+ if(scsi_id > pHba->top_scsi_id){
+ pHba->top_scsi_id = scsi_id;
+ }
+ if(scsi_lun > pHba->top_scsi_lun){
+ pHba->top_scsi_lun = scsi_lun;
+ }
+ continue;
+ } // end of new i2o device
+
+ // We found an old device - check it
+ while(pDev) {
+ if(pDev->scsi_lun == scsi_lun) {
+ if(!scsi_device_online(pDev->pScsi_dev)) {
+ printk(KERN_WARNING"%s: Setting device (%d,%d,%llu) back online\n",
+ pHba->name,bus_no,scsi_id,scsi_lun);
+ if (pDev->pScsi_dev) {
+ scsi_device_set_state(pDev->pScsi_dev, SDEV_RUNNING);
+ }
+ }
+ d = pDev->pI2o_dev;
+ if(d->lct_data.tid != tid) { // something changed
+ pDev->tid = tid;
+ memcpy(&d->lct_data, &lct->lct_entry[i], sizeof(i2o_lct_entry));
+ if (pDev->pScsi_dev) {
+ pDev->pScsi_dev->changed = TRUE;
+ pDev->pScsi_dev->removable = TRUE;
+ }
+ }
+ // Found it - mark it scanned
+ pDev->state = DPTI_DEV_ONLINE;
+ break;
+ }
+ pDev = pDev->next_lun;
+ }
+ }
+ }
+ for (pI2o_dev = pHba->devices; pI2o_dev; pI2o_dev = pI2o_dev->next) {
+ pDev =(struct adpt_device*) pI2o_dev->owner;
+ if(!pDev){
+ continue;
+ }
+ // Drive offline drives that previously existed but could not be found
+ // in the LCT table
+ if (pDev->state & DPTI_DEV_UNSCANNED){
+ pDev->state = DPTI_DEV_OFFLINE;
+ printk(KERN_WARNING"%s: Device (%d,%d,%llu) offline\n",pHba->name,pDev->scsi_channel,pDev->scsi_id,pDev->scsi_lun);
+ if (pDev->pScsi_dev) {
+ scsi_device_set_state(pDev->pScsi_dev, SDEV_OFFLINE);
+ }
+ }
+ }
+ return 0;
+}
+
+static void adpt_fail_posted_scbs(adpt_hba* pHba)
+{
+ struct scsi_cmnd* cmd = NULL;
+ struct scsi_device* d = NULL;
+
+ shost_for_each_device(d, pHba->host) {
+ unsigned long flags;
+ spin_lock_irqsave(&d->list_lock, flags);
+ list_for_each_entry(cmd, &d->cmd_list, list) {
+ if(cmd->serial_number == 0){
+ continue;
+ }
+ cmd->result = (DID_OK << 16) | (QUEUE_FULL <<1);
+ cmd->scsi_done(cmd);
+ }
+ spin_unlock_irqrestore(&d->list_lock, flags);
+ }
+}
+
+
+/*============================================================================
+ * Routines from i2o subsystem
+ *============================================================================
+ */
+
+
+
+/*
+ * Bring an I2O controller into HOLD state. See the spec.
+ */
+static int adpt_i2o_activate_hba(adpt_hba* pHba)
+{
+ int rcode;
+
+ if(pHba->initialized ) {
+ if (adpt_i2o_status_get(pHba) < 0) {
+ if((rcode = adpt_i2o_reset_hba(pHba)) != 0){
+ printk(KERN_WARNING"%s: Could NOT reset.\n", pHba->name);
+ return rcode;
+ }
+ if (adpt_i2o_status_get(pHba) < 0) {
+ printk(KERN_INFO "HBA not responding.\n");
+ return -1;
+ }
+ }
+
+ if(pHba->status_block->iop_state == ADAPTER_STATE_FAULTED) {
+ printk(KERN_CRIT "%s: hardware fault\n", pHba->name);
+ return -1;
+ }
+
+ if (pHba->status_block->iop_state == ADAPTER_STATE_READY ||
+ pHba->status_block->iop_state == ADAPTER_STATE_OPERATIONAL ||
+ pHba->status_block->iop_state == ADAPTER_STATE_HOLD ||
+ pHba->status_block->iop_state == ADAPTER_STATE_FAILED) {
+ adpt_i2o_reset_hba(pHba);
+ if (adpt_i2o_status_get(pHba) < 0 || pHba->status_block->iop_state != ADAPTER_STATE_RESET) {
+ printk(KERN_ERR "%s: Failed to initialize.\n", pHba->name);
+ return -1;
+ }
+ }
+ } else {
+ if((rcode = adpt_i2o_reset_hba(pHba)) != 0){
+ printk(KERN_WARNING"%s: Could NOT reset.\n", pHba->name);
+ return rcode;
+ }
+
+ }
+
+ if (adpt_i2o_init_outbound_q(pHba) < 0) {
+ return -1;
+ }
+
+ /* In HOLD state */
+
+ if (adpt_i2o_hrt_get(pHba) < 0) {
+ return -1;
+ }
+
+ return 0;
+}
+
+/*
+ * Bring a controller online into OPERATIONAL state.
+ */
+
+static int adpt_i2o_online_hba(adpt_hba* pHba)
+{
+ if (adpt_i2o_systab_send(pHba) < 0) {
+ adpt_i2o_delete_hba(pHba);
+ return -1;
+ }
+ /* In READY state */
+
+ if (adpt_i2o_enable_hba(pHba) < 0) {
+ adpt_i2o_delete_hba(pHba);
+ return -1;
+ }
+
+ /* In OPERATIONAL state */
+ return 0;
+}
+
+static s32 adpt_send_nop(adpt_hba*pHba,u32 m)
+{
+ u32 __iomem *msg;
+ ulong timeout = jiffies + 5*HZ;
+
+ while(m == EMPTY_QUEUE){
+ rmb();
+ m = readl(pHba->post_port);
+ if(m != EMPTY_QUEUE){
+ break;
+ }
+ if(time_after(jiffies,timeout)){
+ printk(KERN_ERR "%s: Timeout waiting for message frame!\n",pHba->name);
+ return 2;
+ }
+ schedule_timeout_uninterruptible(1);
+ }
+ msg = (u32 __iomem *)(pHba->msg_addr_virt + m);
+ writel( THREE_WORD_MSG_SIZE | SGL_OFFSET_0,&msg[0]);
+ writel( I2O_CMD_UTIL_NOP << 24 | HOST_TID << 12 | 0,&msg[1]);
+ writel( 0,&msg[2]);
+ wmb();
+
+ writel(m, pHba->post_port);
+ wmb();
+ return 0;
+}
+
+static s32 adpt_i2o_init_outbound_q(adpt_hba* pHba)
+{
+ u8 *status;
+ dma_addr_t addr;
+ u32 __iomem *msg = NULL;
+ int i;
+ ulong timeout = jiffies + TMOUT_INITOUTBOUND*HZ;
+ u32 m;
+
+ do {
+ rmb();
+ m = readl(pHba->post_port);
+ if (m != EMPTY_QUEUE) {
+ break;
+ }
+
+ if(time_after(jiffies,timeout)){
+ printk(KERN_WARNING"%s: Timeout waiting for message frame\n",pHba->name);
+ return -ETIMEDOUT;
+ }
+ schedule_timeout_uninterruptible(1);
+ } while(m == EMPTY_QUEUE);
+
+ msg=(u32 __iomem *)(pHba->msg_addr_virt+m);
+
+ status = dma_alloc_coherent(&pHba->pDev->dev, 4, &addr, GFP_KERNEL);
+ if (!status) {
+ adpt_send_nop(pHba, m);
+ printk(KERN_WARNING"%s: IOP reset failed - no free memory.\n",
+ pHba->name);
+ return -ENOMEM;
+ }
+ memset(status, 0, 4);
+
+ writel(EIGHT_WORD_MSG_SIZE| SGL_OFFSET_6, &msg[0]);
+ writel(I2O_CMD_OUTBOUND_INIT<<24 | HOST_TID<<12 | ADAPTER_TID, &msg[1]);
+ writel(0, &msg[2]);
+ writel(0x0106, &msg[3]); /* Transaction context */
+ writel(4096, &msg[4]); /* Host page frame size */
+ writel((REPLY_FRAME_SIZE)<<16|0x80, &msg[5]); /* Outbound msg frame size and Initcode */
+ writel(0xD0000004, &msg[6]); /* Simple SG LE, EOB */
+ writel((u32)addr, &msg[7]);
+
+ writel(m, pHba->post_port);
+ wmb();
+
+ // Wait for the reply status to come back
+ do {
+ if (*status) {
+ if (*status != 0x01 /*I2O_EXEC_OUTBOUND_INIT_IN_PROGRESS*/) {
+ break;
+ }
+ }
+ rmb();
+ if(time_after(jiffies,timeout)){
+ printk(KERN_WARNING"%s: Timeout Initializing\n",pHba->name);
+ /* We lose 4 bytes of "status" here, but we
+ cannot free these because controller may
+ awake and corrupt those bytes at any time */
+ /* dma_free_coherent(&pHba->pDev->dev, 4, status, addr); */
+ return -ETIMEDOUT;
+ }
+ schedule_timeout_uninterruptible(1);
+ } while (1);
+
+ // If the command was successful, fill the fifo with our reply
+ // message packets
+ if(*status != 0x04 /*I2O_EXEC_OUTBOUND_INIT_COMPLETE*/) {
+ dma_free_coherent(&pHba->pDev->dev, 4, status, addr);
+ return -2;
+ }
+ dma_free_coherent(&pHba->pDev->dev, 4, status, addr);
+
+ if(pHba->reply_pool != NULL) {
+ dma_free_coherent(&pHba->pDev->dev,
+ pHba->reply_fifo_size * REPLY_FRAME_SIZE * 4,
+ pHba->reply_pool, pHba->reply_pool_pa);
+ }
+
+ pHba->reply_pool = dma_alloc_coherent(&pHba->pDev->dev,
+ pHba->reply_fifo_size * REPLY_FRAME_SIZE * 4,
+ &pHba->reply_pool_pa, GFP_KERNEL);
+ if (!pHba->reply_pool) {
+ printk(KERN_ERR "%s: Could not allocate reply pool\n", pHba->name);
+ return -ENOMEM;
+ }
+ memset(pHba->reply_pool, 0 , pHba->reply_fifo_size * REPLY_FRAME_SIZE * 4);
+
+ for(i = 0; i < pHba->reply_fifo_size; i++) {
+ writel(pHba->reply_pool_pa + (i * REPLY_FRAME_SIZE * 4),
+ pHba->reply_port);
+ wmb();
+ }
+ adpt_i2o_status_get(pHba);
+ return 0;
+}
+
+
+/*
+ * I2O System Table. Contains information about
+ * all the IOPs in the system. Used to inform IOPs
+ * about each other's existence.
+ *
+ * sys_tbl_ver is the CurrentChangeIndicator that is
+ * used by IOPs to track changes.
+ */
+
+
+
+static s32 adpt_i2o_status_get(adpt_hba* pHba)
+{
+ ulong timeout;
+ u32 m;
+ u32 __iomem *msg;
+ u8 *status_block=NULL;
+
+ if(pHba->status_block == NULL) {
+ pHba->status_block = dma_alloc_coherent(&pHba->pDev->dev,
+ sizeof(i2o_status_block),
+ &pHba->status_block_pa, GFP_KERNEL);
+ if(pHba->status_block == NULL) {
+ printk(KERN_ERR
+ "dpti%d: Get Status Block failed; Out of memory. \n",
+ pHba->unit);
+ return -ENOMEM;
+ }
+ }
+ memset(pHba->status_block, 0, sizeof(i2o_status_block));
+ status_block = (u8*)(pHba->status_block);
+ timeout = jiffies+TMOUT_GETSTATUS*HZ;
+ do {
+ rmb();
+ m = readl(pHba->post_port);
+ if (m != EMPTY_QUEUE) {
+ break;
+ }
+ if(time_after(jiffies,timeout)){
+ printk(KERN_ERR "%s: Timeout waiting for message !\n",
+ pHba->name);
+ return -ETIMEDOUT;
+ }
+ schedule_timeout_uninterruptible(1);
+ } while(m==EMPTY_QUEUE);
+
+
+ msg=(u32 __iomem *)(pHba->msg_addr_virt+m);
+
+ writel(NINE_WORD_MSG_SIZE|SGL_OFFSET_0, &msg[0]);
+ writel(I2O_CMD_STATUS_GET<<24|HOST_TID<<12|ADAPTER_TID, &msg[1]);
+ writel(1, &msg[2]);
+ writel(0, &msg[3]);
+ writel(0, &msg[4]);
+ writel(0, &msg[5]);
+ writel( dma_low(pHba->status_block_pa), &msg[6]);
+ writel( dma_high(pHba->status_block_pa), &msg[7]);
+ writel(sizeof(i2o_status_block), &msg[8]); // 88 bytes
+
+ //post message
+ writel(m, pHba->post_port);
+ wmb();
+
+ while(status_block[87]!=0xff){
+ if(time_after(jiffies,timeout)){
+ printk(KERN_ERR"dpti%d: Get status timeout.\n",
+ pHba->unit);
+ return -ETIMEDOUT;
+ }
+ rmb();
+ schedule_timeout_uninterruptible(1);
+ }
+
+ // Set up our number of outbound and inbound messages
+ pHba->post_fifo_size = pHba->status_block->max_inbound_frames;
+ if (pHba->post_fifo_size > MAX_TO_IOP_MESSAGES) {
+ pHba->post_fifo_size = MAX_TO_IOP_MESSAGES;
+ }
+
+ pHba->reply_fifo_size = pHba->status_block->max_outbound_frames;
+ if (pHba->reply_fifo_size > MAX_FROM_IOP_MESSAGES) {
+ pHba->reply_fifo_size = MAX_FROM_IOP_MESSAGES;
+ }
+
+ // Calculate the Scatter Gather list size
+ if (dpt_dma64(pHba)) {
+ pHba->sg_tablesize
+ = ((pHba->status_block->inbound_frame_size * 4
+ - 14 * sizeof(u32))
+ / (sizeof(struct sg_simple_element) + sizeof(u32)));
+ } else {
+ pHba->sg_tablesize
+ = ((pHba->status_block->inbound_frame_size * 4
+ - 12 * sizeof(u32))
+ / sizeof(struct sg_simple_element));
+ }
+ if (pHba->sg_tablesize > SG_LIST_ELEMENTS) {
+ pHba->sg_tablesize = SG_LIST_ELEMENTS;
+ }
+
+
+#ifdef DEBUG
+ printk("dpti%d: State = ",pHba->unit);
+ switch(pHba->status_block->iop_state) {
+ case 0x01:
+ printk("INIT\n");
+ break;
+ case 0x02:
+ printk("RESET\n");
+ break;
+ case 0x04:
+ printk("HOLD\n");
+ break;
+ case 0x05:
+ printk("READY\n");
+ break;
+ case 0x08:
+ printk("OPERATIONAL\n");
+ break;
+ case 0x10:
+ printk("FAILED\n");
+ break;
+ case 0x11:
+ printk("FAULTED\n");
+ break;
+ default:
+ printk("%x (unknown!!)\n",pHba->status_block->iop_state);
+ }
+#endif
+ return 0;
+}
+
+/*
+ * Get the IOP's Logical Configuration Table
+ */
+static int adpt_i2o_lct_get(adpt_hba* pHba)
+{
+ u32 msg[8];
+ int ret;
+ u32 buf[16];
+
+ if ((pHba->lct_size == 0) || (pHba->lct == NULL)){
+ pHba->lct_size = pHba->status_block->expected_lct_size;
+ }
+ do {
+ if (pHba->lct == NULL) {
+ pHba->lct = dma_alloc_coherent(&pHba->pDev->dev,
+ pHba->lct_size, &pHba->lct_pa,
+ GFP_ATOMIC);
+ if(pHba->lct == NULL) {
+ printk(KERN_CRIT "%s: Lct Get failed. Out of memory.\n",
+ pHba->name);
+ return -ENOMEM;
+ }
+ }
+ memset(pHba->lct, 0, pHba->lct_size);
+
+ msg[0] = EIGHT_WORD_MSG_SIZE|SGL_OFFSET_6;
+ msg[1] = I2O_CMD_LCT_NOTIFY<<24 | HOST_TID<<12 | ADAPTER_TID;
+ msg[2] = 0;
+ msg[3] = 0;
+ msg[4] = 0xFFFFFFFF; /* All devices */
+ msg[5] = 0x00000000; /* Report now */
+ msg[6] = 0xD0000000|pHba->lct_size;
+ msg[7] = (u32)pHba->lct_pa;
+
+ if ((ret=adpt_i2o_post_wait(pHba, msg, sizeof(msg), 360))) {
+ printk(KERN_ERR "%s: LCT Get failed (status=%#10x.\n",
+ pHba->name, ret);
+ printk(KERN_ERR"Adaptec: Error Reading Hardware.\n");
+ return ret;
+ }
+
+ if ((pHba->lct->table_size << 2) > pHba->lct_size) {
+ pHba->lct_size = pHba->lct->table_size << 2;
+ dma_free_coherent(&pHba->pDev->dev, pHba->lct_size,
+ pHba->lct, pHba->lct_pa);
+ pHba->lct = NULL;
+ }
+ } while (pHba->lct == NULL);
+
+ PDEBUG("%s: Hardware resource table read.\n", pHba->name);
+
+
+ // I2O_DPT_EXEC_IOP_BUFFERS_GROUP_NO;
+ if(adpt_i2o_query_scalar(pHba, 0 , 0x8000, -1, buf, sizeof(buf))>=0) {
+ pHba->FwDebugBufferSize = buf[1];
+ pHba->FwDebugBuffer_P = ioremap(pHba->base_addr_phys + buf[0],
+ pHba->FwDebugBufferSize);
+ if (pHba->FwDebugBuffer_P) {
+ pHba->FwDebugFlags_P = pHba->FwDebugBuffer_P +
+ FW_DEBUG_FLAGS_OFFSET;
+ pHba->FwDebugBLEDvalue_P = pHba->FwDebugBuffer_P +
+ FW_DEBUG_BLED_OFFSET;
+ pHba->FwDebugBLEDflag_P = pHba->FwDebugBLEDvalue_P + 1;
+ pHba->FwDebugStrLength_P = pHba->FwDebugBuffer_P +
+ FW_DEBUG_STR_LENGTH_OFFSET;
+ pHba->FwDebugBuffer_P += buf[2];
+ pHba->FwDebugFlags = 0;
+ }
+ }
+
+ return 0;
+}
+
+static int adpt_i2o_build_sys_table(void)
+{
+ adpt_hba* pHba = hba_chain;
+ int count = 0;
+
+ if (sys_tbl)
+ dma_free_coherent(&pHba->pDev->dev, sys_tbl_len,
+ sys_tbl, sys_tbl_pa);
+
+ sys_tbl_len = sizeof(struct i2o_sys_tbl) + // Header + IOPs
+ (hba_count) * sizeof(struct i2o_sys_tbl_entry);
+
+ sys_tbl = dma_alloc_coherent(&pHba->pDev->dev,
+ sys_tbl_len, &sys_tbl_pa, GFP_KERNEL);
+ if (!sys_tbl) {
+ printk(KERN_WARNING "SysTab Set failed. Out of memory.\n");
+ return -ENOMEM;
+ }
+ memset(sys_tbl, 0, sys_tbl_len);
+
+ sys_tbl->num_entries = hba_count;
+ sys_tbl->version = I2OVERSION;
+ sys_tbl->change_ind = sys_tbl_ind++;
+
+ for(pHba = hba_chain; pHba; pHba = pHba->next) {
+ u64 addr;
+ // Get updated Status Block so we have the latest information
+ if (adpt_i2o_status_get(pHba)) {
+ sys_tbl->num_entries--;
+ continue; // try next one
+ }
+
+ sys_tbl->iops[count].org_id = pHba->status_block->org_id;
+ sys_tbl->iops[count].iop_id = pHba->unit + 2;
+ sys_tbl->iops[count].seg_num = 0;
+ sys_tbl->iops[count].i2o_version = pHba->status_block->i2o_version;
+ sys_tbl->iops[count].iop_state = pHba->status_block->iop_state;
+ sys_tbl->iops[count].msg_type = pHba->status_block->msg_type;
+ sys_tbl->iops[count].frame_size = pHba->status_block->inbound_frame_size;
+ sys_tbl->iops[count].last_changed = sys_tbl_ind - 1; // ??
+ sys_tbl->iops[count].iop_capabilities = pHba->status_block->iop_capabilities;
+ addr = pHba->base_addr_phys + 0x40;
+ sys_tbl->iops[count].inbound_low = dma_low(addr);
+ sys_tbl->iops[count].inbound_high = dma_high(addr);
+
+ count++;
+ }
+
+#ifdef DEBUG
+{
+ u32 *table = (u32*)sys_tbl;
+ printk(KERN_DEBUG"sys_tbl_len=%d in 32bit words\n",(sys_tbl_len >>2));
+ for(count = 0; count < (sys_tbl_len >>2); count++) {
+ printk(KERN_INFO "sys_tbl[%d] = %0#10x\n",
+ count, table[count]);
+ }
+}
+#endif
+
+ return 0;
+}
+
+
+/*
+ * Dump the information block associated with a given unit (TID)
+ */
+
+static void adpt_i2o_report_hba_unit(adpt_hba* pHba, struct i2o_device *d)
+{
+ char buf[64];
+ int unit = d->lct_data.tid;
+
+ printk(KERN_INFO "TID %3.3d ", unit);
+
+ if(adpt_i2o_query_scalar(pHba, unit, 0xF100, 3, buf, 16)>=0)
+ {
+ buf[16]=0;
+ printk(" Vendor: %-12.12s", buf);
+ }
+ if(adpt_i2o_query_scalar(pHba, unit, 0xF100, 4, buf, 16)>=0)
+ {
+ buf[16]=0;
+ printk(" Device: %-12.12s", buf);
+ }
+ if(adpt_i2o_query_scalar(pHba, unit, 0xF100, 6, buf, 8)>=0)
+ {
+ buf[8]=0;
+ printk(" Rev: %-12.12s\n", buf);
+ }
+#ifdef DEBUG
+ printk(KERN_INFO "\tClass: %.21s\n", adpt_i2o_get_class_name(d->lct_data.class_id));
+ printk(KERN_INFO "\tSubclass: 0x%04X\n", d->lct_data.sub_class);
+ printk(KERN_INFO "\tFlags: ");
+
+ if(d->lct_data.device_flags&(1<<0))
+ printk("C"); // ConfigDialog requested
+ if(d->lct_data.device_flags&(1<<1))
+ printk("U"); // Multi-user capable
+ if(!(d->lct_data.device_flags&(1<<4)))
+ printk("P"); // Peer service enabled!
+ if(!(d->lct_data.device_flags&(1<<5)))
+ printk("M"); // Mgmt service enabled!
+ printk("\n");
+#endif
+}
+
+#ifdef DEBUG
+/*
+ * Do i2o class name lookup
+ */
+static const char *adpt_i2o_get_class_name(int class)
+{
+ int idx = 16;
+ static char *i2o_class_name[] = {
+ "Executive",
+ "Device Driver Module",
+ "Block Device",
+ "Tape Device",
+ "LAN Interface",
+ "WAN Interface",
+ "Fibre Channel Port",
+ "Fibre Channel Device",
+ "SCSI Device",
+ "ATE Port",
+ "ATE Device",
+ "Floppy Controller",
+ "Floppy Device",
+ "Secondary Bus Port",
+ "Peer Transport Agent",
+ "Peer Transport",
+ "Unknown"
+ };
+
+ switch(class&0xFFF) {
+ case I2O_CLASS_EXECUTIVE:
+ idx = 0; break;
+ case I2O_CLASS_DDM:
+ idx = 1; break;
+ case I2O_CLASS_RANDOM_BLOCK_STORAGE:
+ idx = 2; break;
+ case I2O_CLASS_SEQUENTIAL_STORAGE:
+ idx = 3; break;
+ case I2O_CLASS_LAN:
+ idx = 4; break;
+ case I2O_CLASS_WAN:
+ idx = 5; break;
+ case I2O_CLASS_FIBRE_CHANNEL_PORT:
+ idx = 6; break;
+ case I2O_CLASS_FIBRE_CHANNEL_PERIPHERAL:
+ idx = 7; break;
+ case I2O_CLASS_SCSI_PERIPHERAL:
+ idx = 8; break;
+ case I2O_CLASS_ATE_PORT:
+ idx = 9; break;
+ case I2O_CLASS_ATE_PERIPHERAL:
+ idx = 10; break;
+ case I2O_CLASS_FLOPPY_CONTROLLER:
+ idx = 11; break;
+ case I2O_CLASS_FLOPPY_DEVICE:
+ idx = 12; break;
+ case I2O_CLASS_BUS_ADAPTER_PORT:
+ idx = 13; break;
+ case I2O_CLASS_PEER_TRANSPORT_AGENT:
+ idx = 14; break;
+ case I2O_CLASS_PEER_TRANSPORT:
+ idx = 15; break;
+ }
+ return i2o_class_name[idx];
+}
+#endif
+
+
+static s32 adpt_i2o_hrt_get(adpt_hba* pHba)
+{
+ u32 msg[6];
+ int ret, size = sizeof(i2o_hrt);
+
+ do {
+ if (pHba->hrt == NULL) {
+ pHba->hrt = dma_alloc_coherent(&pHba->pDev->dev,
+ size, &pHba->hrt_pa, GFP_KERNEL);
+ if (pHba->hrt == NULL) {
+ printk(KERN_CRIT "%s: Hrt Get failed; Out of memory.\n", pHba->name);
+ return -ENOMEM;
+ }
+ }
+
+ msg[0]= SIX_WORD_MSG_SIZE| SGL_OFFSET_4;
+ msg[1]= I2O_CMD_HRT_GET<<24 | HOST_TID<<12 | ADAPTER_TID;
+ msg[2]= 0;
+ msg[3]= 0;
+ msg[4]= (0xD0000000 | size); /* Simple transaction */
+ msg[5]= (u32)pHba->hrt_pa; /* Dump it here */
+
+ if ((ret = adpt_i2o_post_wait(pHba, msg, sizeof(msg),20))) {
+ printk(KERN_ERR "%s: Unable to get HRT (status=%#10x)\n", pHba->name, ret);
+ return ret;
+ }
+
+ if (pHba->hrt->num_entries * pHba->hrt->entry_len << 2 > size) {
+ int newsize = pHba->hrt->num_entries * pHba->hrt->entry_len << 2;
+ dma_free_coherent(&pHba->pDev->dev, size,
+ pHba->hrt, pHba->hrt_pa);
+ size = newsize;
+ pHba->hrt = NULL;
+ }
+ } while(pHba->hrt == NULL);
+ return 0;
+}
+
+/*
+ * Query one scalar group value or a whole scalar group.
+ */
+static int adpt_i2o_query_scalar(adpt_hba* pHba, int tid,
+ int group, int field, void *buf, int buflen)
+{
+ u16 opblk[] = { 1, 0, I2O_PARAMS_FIELD_GET, group, 1, field };
+ u8 *opblk_va;
+ dma_addr_t opblk_pa;
+ u8 *resblk_va;
+ dma_addr_t resblk_pa;
+
+ int size;
+
+ /* 8 bytes for header */
+ resblk_va = dma_alloc_coherent(&pHba->pDev->dev,
+ sizeof(u8) * (8 + buflen), &resblk_pa, GFP_KERNEL);
+ if (resblk_va == NULL) {
+ printk(KERN_CRIT "%s: query scalar failed; Out of memory.\n", pHba->name);
+ return -ENOMEM;
+ }
+
+ opblk_va = dma_alloc_coherent(&pHba->pDev->dev,
+ sizeof(opblk), &opblk_pa, GFP_KERNEL);
+ if (opblk_va == NULL) {
+ dma_free_coherent(&pHba->pDev->dev, sizeof(u8) * (8+buflen),
+ resblk_va, resblk_pa);
+ printk(KERN_CRIT "%s: query operatio failed; Out of memory.\n",
+ pHba->name);
+ return -ENOMEM;
+ }
+ if (field == -1) /* whole group */
+ opblk[4] = -1;
+
+ memcpy(opblk_va, opblk, sizeof(opblk));
+ size = adpt_i2o_issue_params(I2O_CMD_UTIL_PARAMS_GET, pHba, tid,
+ opblk_va, opblk_pa, sizeof(opblk),
+ resblk_va, resblk_pa, sizeof(u8)*(8+buflen));
+ dma_free_coherent(&pHba->pDev->dev, sizeof(opblk), opblk_va, opblk_pa);
+ if (size == -ETIME) {
+ dma_free_coherent(&pHba->pDev->dev, sizeof(u8) * (8+buflen),
+ resblk_va, resblk_pa);
+ printk(KERN_WARNING "%s: issue params failed; Timed out.\n", pHba->name);
+ return -ETIME;
+ } else if (size == -EINTR) {
+ dma_free_coherent(&pHba->pDev->dev, sizeof(u8) * (8+buflen),
+ resblk_va, resblk_pa);
+ printk(KERN_WARNING "%s: issue params failed; Interrupted.\n", pHba->name);
+ return -EINTR;
+ }
+
+ memcpy(buf, resblk_va+8, buflen); /* cut off header */
+
+ dma_free_coherent(&pHba->pDev->dev, sizeof(u8) * (8+buflen),
+ resblk_va, resblk_pa);
+ if (size < 0)
+ return size;
+
+ return buflen;
+}
+
+
+/* Issue UTIL_PARAMS_GET or UTIL_PARAMS_SET
+ *
+ * This function can be used for all UtilParamsGet/Set operations.
+ * The OperationBlock is given in opblk-buffer,
+ * and results are returned in resblk-buffer.
+ * Note that the minimum sized resblk is 8 bytes and contains
+ * ResultCount, ErrorInfoSize, BlockStatus and BlockSize.
+ */
+static int adpt_i2o_issue_params(int cmd, adpt_hba* pHba, int tid,
+ void *opblk_va, dma_addr_t opblk_pa, int oplen,
+ void *resblk_va, dma_addr_t resblk_pa, int reslen)
+{
+ u32 msg[9];
+ u32 *res = (u32 *)resblk_va;
+ int wait_status;
+
+ msg[0] = NINE_WORD_MSG_SIZE | SGL_OFFSET_5;
+ msg[1] = cmd << 24 | HOST_TID << 12 | tid;
+ msg[2] = 0;
+ msg[3] = 0;
+ msg[4] = 0;
+ msg[5] = 0x54000000 | oplen; /* OperationBlock */
+ msg[6] = (u32)opblk_pa;
+ msg[7] = 0xD0000000 | reslen; /* ResultBlock */
+ msg[8] = (u32)resblk_pa;
+
+ if ((wait_status = adpt_i2o_post_wait(pHba, msg, sizeof(msg), 20))) {
+ printk("adpt_i2o_issue_params: post_wait failed (%p)\n", resblk_va);
+ return wait_status; /* -DetailedStatus */
+ }
+
+ if (res[1]&0x00FF0000) { /* BlockStatus != SUCCESS */
+ printk(KERN_WARNING "%s: %s - Error:\n ErrorInfoSize = 0x%02x, "
+ "BlockStatus = 0x%02x, BlockSize = 0x%04x\n",
+ pHba->name,
+ (cmd == I2O_CMD_UTIL_PARAMS_SET) ? "PARAMS_SET"
+ : "PARAMS_GET",
+ res[1]>>24, (res[1]>>16)&0xFF, res[1]&0xFFFF);
+ return -((res[1] >> 16) & 0xFF); /* -BlockStatus */
+ }
+
+ return 4 + ((res[1] & 0x0000FFFF) << 2); /* bytes used in resblk */
+}
+
+
+static s32 adpt_i2o_quiesce_hba(adpt_hba* pHba)
+{
+ u32 msg[4];
+ int ret;
+
+ adpt_i2o_status_get(pHba);
+
+ /* SysQuiesce discarded if IOP not in READY or OPERATIONAL state */
+
+ if((pHba->status_block->iop_state != ADAPTER_STATE_READY) &&
+ (pHba->status_block->iop_state != ADAPTER_STATE_OPERATIONAL)){
+ return 0;
+ }
+
+ msg[0] = FOUR_WORD_MSG_SIZE|SGL_OFFSET_0;
+ msg[1] = I2O_CMD_SYS_QUIESCE<<24|HOST_TID<<12|ADAPTER_TID;
+ msg[2] = 0;
+ msg[3] = 0;
+
+ if((ret = adpt_i2o_post_wait(pHba, msg, sizeof(msg), 240))) {
+ printk(KERN_INFO"dpti%d: Unable to quiesce (status=%#x).\n",
+ pHba->unit, -ret);
+ } else {
+ printk(KERN_INFO"dpti%d: Quiesced.\n",pHba->unit);
+ }
+
+ adpt_i2o_status_get(pHba);
+ return ret;
+}
+
+
+/*
+ * Enable IOP. Allows the IOP to resume external operations.
+ */
+static int adpt_i2o_enable_hba(adpt_hba* pHba)
+{
+ u32 msg[4];
+ int ret;
+
+ adpt_i2o_status_get(pHba);
+ if(!pHba->status_block){
+ return -ENOMEM;
+ }
+ /* Enable only allowed on READY state */
+ if(pHba->status_block->iop_state == ADAPTER_STATE_OPERATIONAL)
+ return 0;
+
+ if(pHba->status_block->iop_state != ADAPTER_STATE_READY)
+ return -EINVAL;
+
+ msg[0]=FOUR_WORD_MSG_SIZE|SGL_OFFSET_0;
+ msg[1]=I2O_CMD_SYS_ENABLE<<24|HOST_TID<<12|ADAPTER_TID;
+ msg[2]= 0;
+ msg[3]= 0;
+
+ if ((ret = adpt_i2o_post_wait(pHba, msg, sizeof(msg), 240))) {
+ printk(KERN_WARNING"%s: Could not enable (status=%#10x).\n",
+ pHba->name, ret);
+ } else {
+ PDEBUG("%s: Enabled.\n", pHba->name);
+ }
+
+ adpt_i2o_status_get(pHba);
+ return ret;
+}
+
+
+static int adpt_i2o_systab_send(adpt_hba* pHba)
+{
+ u32 msg[12];
+ int ret;
+
+ msg[0] = I2O_MESSAGE_SIZE(12) | SGL_OFFSET_6;
+ msg[1] = I2O_CMD_SYS_TAB_SET<<24 | HOST_TID<<12 | ADAPTER_TID;
+ msg[2] = 0;
+ msg[3] = 0;
+ msg[4] = (0<<16) | ((pHba->unit+2) << 12); /* Host 0 IOP ID (unit + 2) */
+ msg[5] = 0; /* Segment 0 */
+
+ /*
+ * Provide three SGL-elements:
+ * System table (SysTab), Private memory space declaration and
+ * Private i/o space declaration
+ */
+ msg[6] = 0x54000000 | sys_tbl_len;
+ msg[7] = (u32)sys_tbl_pa;
+ msg[8] = 0x54000000 | 0;
+ msg[9] = 0;
+ msg[10] = 0xD4000000 | 0;
+ msg[11] = 0;
+
+ if ((ret=adpt_i2o_post_wait(pHba, msg, sizeof(msg), 120))) {
+ printk(KERN_INFO "%s: Unable to set SysTab (status=%#10x).\n",
+ pHba->name, ret);
+ }
+#ifdef DEBUG
+ else {
+ PINFO("%s: SysTab set.\n", pHba->name);
+ }
+#endif
+
+ return ret;
+ }
+
+
+/*============================================================================
+ *
+ *============================================================================
+ */
+
+
+#ifdef UARTDELAY
+
+static static void adpt_delay(int millisec)
+{
+ int i;
+ for (i = 0; i < millisec; i++) {
+ udelay(1000); /* delay for one millisecond */
+ }
+}
+
+#endif
+
+static struct scsi_host_template driver_template = {
+ .module = THIS_MODULE,
+ .name = "dpt_i2o",
+ .proc_name = "dpt_i2o",
+ .show_info = adpt_show_info,
+ .info = adpt_info,
+ .queuecommand = adpt_queue,
+ .eh_abort_handler = adpt_abort,
+ .eh_device_reset_handler = adpt_device_reset,
+ .eh_bus_reset_handler = adpt_bus_reset,
+ .eh_host_reset_handler = adpt_reset,
+ .bios_param = adpt_bios_param,
+ .slave_configure = adpt_slave_configure,
+ .can_queue = MAX_TO_IOP_MESSAGES,
+ .this_id = 7,
+ .cmd_per_lun = 1,
+ .use_clustering = ENABLE_CLUSTERING,
+};
+
+static int __init adpt_init(void)
+{
+ int error;
+ adpt_hba *pHba, *next;
+
+ printk("Loading Adaptec I2O RAID: Version " DPT_I2O_VERSION "\n");
+
+ error = adpt_detect(&driver_template);
+ if (error < 0)
+ return error;
+ if (hba_chain == NULL)
+ return -ENODEV;
+
+ for (pHba = hba_chain; pHba; pHba = pHba->next) {
+ error = scsi_add_host(pHba->host, &pHba->pDev->dev);
+ if (error)
+ goto fail;
+ scsi_scan_host(pHba->host);
+ }
+ return 0;
+fail:
+ for (pHba = hba_chain; pHba; pHba = next) {
+ next = pHba->next;
+ scsi_remove_host(pHba->host);
+ }
+ return error;
+}
+
+static void __exit adpt_exit(void)
+{
+ adpt_hba *pHba, *next;
+
+ for (pHba = hba_chain; pHba; pHba = pHba->next)
+ scsi_remove_host(pHba->host);
+ for (pHba = hba_chain; pHba; pHba = next) {
+ next = pHba->next;
+ adpt_release(pHba->host);
+ }
+}
+
+module_init(adpt_init);
+module_exit(adpt_exit);
+
+MODULE_LICENSE("GPL");
diff --git a/drivers/scsi/dpti.h b/drivers/scsi/dpti.h
new file mode 100644
index 000000000..1fa345ab8
--- /dev/null
+++ b/drivers/scsi/dpti.h
@@ -0,0 +1,336 @@
+/***************************************************************************
+ dpti.h - description
+ -------------------
+ begin : Thu Sep 7 2000
+ copyright : (C) 2001 by Adaptec
+
+ See Documentation/scsi/dpti.txt for history, notes, license info
+ and credits
+ ***************************************************************************/
+
+/***************************************************************************
+ * *
+ * This program is free software; you can redistribute it and/or modify *
+ * it under the terms of the GNU General Public License as published by *
+ * the Free Software Foundation; either version 2 of the License, or *
+ * (at your option) any later version. *
+ * *
+ ***************************************************************************/
+
+#ifndef _DPT_H
+#define _DPT_H
+
+#define MAX_TO_IOP_MESSAGES (255)
+#define MAX_FROM_IOP_MESSAGES (255)
+
+
+/*
+ * SCSI interface function Prototypes
+ */
+
+static int adpt_detect(struct scsi_host_template * sht);
+static int adpt_queue(struct Scsi_Host *h, struct scsi_cmnd * cmd);
+static int adpt_abort(struct scsi_cmnd * cmd);
+static int adpt_reset(struct scsi_cmnd* cmd);
+static int adpt_release(struct Scsi_Host *host);
+static int adpt_slave_configure(struct scsi_device *);
+
+static const char *adpt_info(struct Scsi_Host *pSHost);
+static int adpt_bios_param(struct scsi_device * sdev, struct block_device *dev,
+ sector_t, int geom[]);
+
+static int adpt_bus_reset(struct scsi_cmnd* cmd);
+static int adpt_device_reset(struct scsi_cmnd* cmd);
+
+
+/*
+ * struct scsi_host_template (see scsi/scsi_host.h)
+ */
+
+#define DPT_DRIVER_NAME "Adaptec I2O RAID"
+
+#ifndef HOSTS_C
+
+#include "dpt/sys_info.h"
+#include <linux/wait.h>
+#include "dpt/dpti_i2o.h"
+#include "dpt/dpti_ioctl.h"
+
+#define DPT_I2O_VERSION "2.4 Build 5go"
+#define DPT_VERSION 2
+#define DPT_REVISION '4'
+#define DPT_SUBREVISION '5'
+#define DPT_BETA ""
+#define DPT_MONTH 8
+#define DPT_DAY 7
+#define DPT_YEAR (2001-1980)
+
+#define DPT_DRIVER "dpt_i2o"
+#define DPTI_I2O_MAJOR (151)
+#define DPT_ORGANIZATION_ID (0x1B) /* For Private Messages */
+#define DPTI_MAX_HBA (16)
+#define MAX_CHANNEL (5) // Maximum Channel # Supported
+#define MAX_ID (128) // Maximum Target ID Supported
+
+/* Sizes in 4 byte words */
+#define REPLY_FRAME_SIZE (17)
+#define MAX_MESSAGE_SIZE (128)
+#define SG_LIST_ELEMENTS (56)
+
+#define EMPTY_QUEUE 0xffffffff
+#define I2O_INTERRUPT_PENDING_B (0x08)
+
+#define PCI_DPT_VENDOR_ID (0x1044) // DPT PCI Vendor ID
+#define PCI_DPT_DEVICE_ID (0xA501) // DPT PCI I2O Device ID
+#define PCI_DPT_RAPTOR_DEVICE_ID (0xA511)
+
+/* Debugging macro from Linux Device Drivers - Rubini */
+#undef PDEBUG
+#ifdef DEBUG
+//TODO add debug level switch
+# define PDEBUG(fmt, args...) printk(KERN_DEBUG "dpti: " fmt, ##args)
+# define PDEBUGV(fmt, args...) printk(KERN_DEBUG "dpti: " fmt, ##args)
+#else
+# define PDEBUG(fmt, args...) /* not debugging: nothing */
+# define PDEBUGV(fmt, args...) /* not debugging: nothing */
+#endif
+
+#define PERROR(fmt, args...) printk(KERN_ERR fmt, ##args)
+#define PWARN(fmt, args...) printk(KERN_WARNING fmt, ##args)
+#define PINFO(fmt, args...) printk(KERN_INFO fmt, ##args)
+#define PCRIT(fmt, args...) printk(KERN_CRIT fmt, ##args)
+
+#define SHUTDOWN_SIGS (sigmask(SIGKILL)|sigmask(SIGINT)|sigmask(SIGTERM))
+
+// Command timeouts
+#define FOREVER (0)
+#define TMOUT_INQUIRY (20)
+#define TMOUT_FLUSH (360/45)
+#define TMOUT_ABORT (30)
+#define TMOUT_SCSI (300)
+#define TMOUT_IOPRESET (360)
+#define TMOUT_GETSTATUS (15)
+#define TMOUT_INITOUTBOUND (15)
+#define TMOUT_LCT (360)
+
+
+#define I2O_SCSI_DEVICE_DSC_MASK 0x00FF
+
+#define I2O_DETAIL_STATUS_UNSUPPORTED_FUNCTION 0x000A
+
+#define I2O_SCSI_DSC_MASK 0xFF00
+#define I2O_SCSI_DSC_SUCCESS 0x0000
+#define I2O_SCSI_DSC_REQUEST_ABORTED 0x0200
+#define I2O_SCSI_DSC_UNABLE_TO_ABORT 0x0300
+#define I2O_SCSI_DSC_COMPLETE_WITH_ERROR 0x0400
+#define I2O_SCSI_DSC_ADAPTER_BUSY 0x0500
+#define I2O_SCSI_DSC_REQUEST_INVALID 0x0600
+#define I2O_SCSI_DSC_PATH_INVALID 0x0700
+#define I2O_SCSI_DSC_DEVICE_NOT_PRESENT 0x0800
+#define I2O_SCSI_DSC_UNABLE_TO_TERMINATE 0x0900
+#define I2O_SCSI_DSC_SELECTION_TIMEOUT 0x0A00
+#define I2O_SCSI_DSC_COMMAND_TIMEOUT 0x0B00
+#define I2O_SCSI_DSC_MR_MESSAGE_RECEIVED 0x0D00
+#define I2O_SCSI_DSC_SCSI_BUS_RESET 0x0E00
+#define I2O_SCSI_DSC_PARITY_ERROR_FAILURE 0x0F00
+#define I2O_SCSI_DSC_AUTOSENSE_FAILED 0x1000
+#define I2O_SCSI_DSC_NO_ADAPTER 0x1100
+#define I2O_SCSI_DSC_DATA_OVERRUN 0x1200
+#define I2O_SCSI_DSC_UNEXPECTED_BUS_FREE 0x1300
+#define I2O_SCSI_DSC_SEQUENCE_FAILURE 0x1400
+#define I2O_SCSI_DSC_REQUEST_LENGTH_ERROR 0x1500
+#define I2O_SCSI_DSC_PROVIDE_FAILURE 0x1600
+#define I2O_SCSI_DSC_BDR_MESSAGE_SENT 0x1700
+#define I2O_SCSI_DSC_REQUEST_TERMINATED 0x1800
+#define I2O_SCSI_DSC_IDE_MESSAGE_SENT 0x3300
+#define I2O_SCSI_DSC_RESOURCE_UNAVAILABLE 0x3400
+#define I2O_SCSI_DSC_UNACKNOWLEDGED_EVENT 0x3500
+#define I2O_SCSI_DSC_MESSAGE_RECEIVED 0x3600
+#define I2O_SCSI_DSC_INVALID_CDB 0x3700
+#define I2O_SCSI_DSC_LUN_INVALID 0x3800
+#define I2O_SCSI_DSC_SCSI_TID_INVALID 0x3900
+#define I2O_SCSI_DSC_FUNCTION_UNAVAILABLE 0x3A00
+#define I2O_SCSI_DSC_NO_NEXUS 0x3B00
+#define I2O_SCSI_DSC_SCSI_IID_INVALID 0x3C00
+#define I2O_SCSI_DSC_CDB_RECEIVED 0x3D00
+#define I2O_SCSI_DSC_LUN_ALREADY_ENABLED 0x3E00
+#define I2O_SCSI_DSC_BUS_BUSY 0x3F00
+#define I2O_SCSI_DSC_QUEUE_FROZEN 0x4000
+
+
+#ifndef TRUE
+#define TRUE 1
+#define FALSE 0
+#endif
+
+#define HBA_FLAGS_INSTALLED_B 0x00000001 // Adapter Was Installed
+#define HBA_FLAGS_BLINKLED_B 0x00000002 // Adapter In Blink LED State
+#define HBA_FLAGS_IN_RESET 0x00000040 /* in reset */
+#define HBA_HOSTRESET_FAILED 0x00000080 /* adpt_resethost failed */
+
+
+// Device state flags
+#define DPTI_DEV_ONLINE 0x00
+#define DPTI_DEV_UNSCANNED 0x01
+#define DPTI_DEV_RESET 0x02
+#define DPTI_DEV_OFFLINE 0x04
+
+
+struct adpt_device {
+ struct adpt_device* next_lun;
+ u32 flags;
+ u32 type;
+ u32 capacity;
+ u32 block_size;
+ u8 scsi_channel;
+ u8 scsi_id;
+ u64 scsi_lun;
+ u8 state;
+ u16 tid;
+ struct i2o_device* pI2o_dev;
+ struct scsi_device *pScsi_dev;
+};
+
+struct adpt_channel {
+ struct adpt_device* device[MAX_ID]; /* used as an array of 128 scsi ids */
+ u8 scsi_id;
+ u8 type;
+ u16 tid;
+ u32 state;
+ struct i2o_device* pI2o_dev;
+};
+
+// HBA state flags
+#define DPTI_STATE_RESET (0x01)
+
+typedef struct _adpt_hba {
+ struct _adpt_hba *next;
+ struct pci_dev *pDev;
+ struct Scsi_Host *host;
+ u32 state;
+ spinlock_t state_lock;
+ int unit;
+ int host_no; /* SCSI host number */
+ u8 initialized;
+ u8 in_use; /* is the management node open*/
+
+ char name[32];
+ char detail[55];
+
+ void __iomem *base_addr_virt;
+ void __iomem *msg_addr_virt;
+ ulong base_addr_phys;
+ void __iomem *post_port;
+ void __iomem *reply_port;
+ void __iomem *irq_mask;
+ u16 post_count;
+ u32 post_fifo_size;
+ u32 reply_fifo_size;
+ u32* reply_pool;
+ dma_addr_t reply_pool_pa;
+ u32 sg_tablesize; // Scatter/Gather List Size.
+ u8 top_scsi_channel;
+ u8 top_scsi_id;
+ u64 top_scsi_lun;
+ u8 dma64;
+
+ i2o_status_block* status_block;
+ dma_addr_t status_block_pa;
+ i2o_hrt* hrt;
+ dma_addr_t hrt_pa;
+ i2o_lct* lct;
+ dma_addr_t lct_pa;
+ uint lct_size;
+ struct i2o_device* devices;
+ struct adpt_channel channel[MAX_CHANNEL];
+ struct proc_dir_entry* proc_entry; /* /proc dir */
+
+ void __iomem *FwDebugBuffer_P; // Virtual Address Of FW Debug Buffer
+ u32 FwDebugBufferSize; // FW Debug Buffer Size In Bytes
+ void __iomem *FwDebugStrLength_P;// Virtual Addr Of FW Debug String Len
+ void __iomem *FwDebugFlags_P; // Virtual Address Of FW Debug Flags
+ void __iomem *FwDebugBLEDflag_P;// Virtual Addr Of FW Debug BLED
+ void __iomem *FwDebugBLEDvalue_P;// Virtual Addr Of FW Debug BLED
+ u32 FwDebugFlags;
+ u32 *ioctl_reply_context[4];
+} adpt_hba;
+
+struct sg_simple_element {
+ u32 flag_count;
+ u32 addr_bus;
+};
+
+/*
+ * Function Prototypes
+ */
+
+static void adpt_i2o_sys_shutdown(void);
+static int adpt_init(void);
+static int adpt_i2o_build_sys_table(void);
+static irqreturn_t adpt_isr(int irq, void *dev_id);
+
+static void adpt_i2o_report_hba_unit(adpt_hba* pHba, struct i2o_device *d);
+static int adpt_i2o_query_scalar(adpt_hba* pHba, int tid,
+ int group, int field, void *buf, int buflen);
+#ifdef DEBUG
+static const char *adpt_i2o_get_class_name(int class);
+#endif
+static int adpt_i2o_issue_params(int cmd, adpt_hba* pHba, int tid,
+ void *opblk, dma_addr_t opblk_pa, int oplen,
+ void *resblk, dma_addr_t resblk_pa, int reslen);
+static int adpt_i2o_post_wait(adpt_hba* pHba, u32* msg, int len, int timeout);
+static int adpt_i2o_lct_get(adpt_hba* pHba);
+static int adpt_i2o_parse_lct(adpt_hba* pHba);
+static int adpt_i2o_activate_hba(adpt_hba* pHba);
+static int adpt_i2o_enable_hba(adpt_hba* pHba);
+static int adpt_i2o_install_device(adpt_hba* pHba, struct i2o_device *d);
+static s32 adpt_i2o_post_this(adpt_hba* pHba, u32* data, int len);
+static s32 adpt_i2o_quiesce_hba(adpt_hba* pHba);
+static s32 adpt_i2o_status_get(adpt_hba* pHba);
+static s32 adpt_i2o_init_outbound_q(adpt_hba* pHba);
+static s32 adpt_i2o_hrt_get(adpt_hba* pHba);
+static s32 adpt_scsi_to_i2o(adpt_hba* pHba, struct scsi_cmnd* cmd, struct adpt_device* dptdevice);
+static s32 adpt_i2o_to_scsi(void __iomem *reply, struct scsi_cmnd* cmd);
+static s32 adpt_scsi_host_alloc(adpt_hba* pHba,struct scsi_host_template * sht);
+static s32 adpt_hba_reset(adpt_hba* pHba);
+static s32 adpt_i2o_reset_hba(adpt_hba* pHba);
+static s32 adpt_rescan(adpt_hba* pHba);
+static s32 adpt_i2o_reparse_lct(adpt_hba* pHba);
+static s32 adpt_send_nop(adpt_hba*pHba,u32 m);
+static void adpt_i2o_delete_hba(adpt_hba* pHba);
+static void adpt_inquiry(adpt_hba* pHba);
+static void adpt_fail_posted_scbs(adpt_hba* pHba);
+static struct adpt_device* adpt_find_device(adpt_hba* pHba, u32 chan, u32 id, u64 lun);
+static int adpt_install_hba(struct scsi_host_template* sht, struct pci_dev* pDev) ;
+static int adpt_i2o_online_hba(adpt_hba* pHba);
+static void adpt_i2o_post_wait_complete(u32, int);
+static int adpt_i2o_systab_send(adpt_hba* pHba);
+
+static int adpt_ioctl(struct inode *inode, struct file *file, uint cmd, ulong arg);
+static int adpt_open(struct inode *inode, struct file *file);
+static int adpt_close(struct inode *inode, struct file *file);
+
+
+#ifdef UARTDELAY
+static void adpt_delay(int millisec);
+#endif
+
+#define PRINT_BUFFER_SIZE 512
+
+#define HBA_FLAGS_DBG_FLAGS_MASK 0xffff0000 // Mask for debug flags
+#define HBA_FLAGS_DBG_KERNEL_PRINT_B 0x00010000 // Kernel Debugger Print
+#define HBA_FLAGS_DBG_FW_PRINT_B 0x00020000 // Firmware Debugger Print
+#define HBA_FLAGS_DBG_FUNCTION_ENTRY_B 0x00040000 // Function Entry Point
+#define HBA_FLAGS_DBG_FUNCTION_EXIT_B 0x00080000 // Function Exit
+#define HBA_FLAGS_DBG_ERROR_B 0x00100000 // Error Conditions
+#define HBA_FLAGS_DBG_INIT_B 0x00200000 // Init Prints
+#define HBA_FLAGS_DBG_OS_COMMANDS_B 0x00400000 // OS Command Info
+#define HBA_FLAGS_DBG_SCAN_B 0x00800000 // Device Scan
+
+#define FW_DEBUG_STR_LENGTH_OFFSET 0
+#define FW_DEBUG_FLAGS_OFFSET 4
+#define FW_DEBUG_BLED_OFFSET 8
+
+#define FW_DEBUG_FLAGS_NO_HEADERS_B 0x01
+#endif /* !HOSTS_C */
+#endif /* _DPT_H */
diff --git a/drivers/scsi/dtc.c b/drivers/scsi/dtc.c
new file mode 100644
index 000000000..4c74c7ba2
--- /dev/null
+++ b/drivers/scsi/dtc.c
@@ -0,0 +1,459 @@
+
+#define PSEUDO_DMA
+#define DONT_USE_INTR
+#define UNSAFE /* Leave interrupts enabled during pseudo-dma I/O */
+#define DMA_WORKS_RIGHT
+
+
+/*
+ * DTC 3180/3280 driver, by
+ * Ray Van Tassle rayvt@comm.mot.com
+ *
+ * taken from ...
+ * Trantor T128/T128F/T228 driver by...
+ *
+ * Drew Eckhardt
+ * Visionary Computing
+ * (Unix and Linux consulting and custom programming)
+ * drew@colorado.edu
+ * +1 (303) 440-4894
+ */
+
+/*
+ * The card is detected and initialized in one of several ways :
+ * 1. Autoprobe (default) - since the board is memory mapped,
+ * a BIOS signature is scanned for to locate the registers.
+ * An interrupt is triggered to autoprobe for the interrupt
+ * line.
+ *
+ * 2. With command line overrides - dtc=address,irq may be
+ * used on the LILO command line to override the defaults.
+ *
+*/
+
+/*----------------------------------------------------------------*/
+/* the following will set the monitor border color (useful to find
+ where something crashed or gets stuck at */
+/* 1 = blue
+ 2 = green
+ 3 = cyan
+ 4 = red
+ 5 = magenta
+ 6 = yellow
+ 7 = white
+*/
+#if 0
+#define rtrc(i) {inb(0x3da); outb(0x31, 0x3c0); outb((i), 0x3c0);}
+#else
+#define rtrc(i) {}
+#endif
+
+
+#include <linux/module.h>
+#include <linux/signal.h>
+#include <linux/blkdev.h>
+#include <linux/delay.h>
+#include <linux/stat.h>
+#include <linux/string.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <scsi/scsi_host.h>
+#include "dtc.h"
+#define AUTOPROBE_IRQ
+#include "NCR5380.h"
+
+/*
+ * The DTC3180 & 3280 boards are memory mapped.
+ *
+ */
+
+/*
+ */
+/* Offset from DTC_5380_OFFSET */
+#define DTC_CONTROL_REG 0x100 /* rw */
+#define D_CR_ACCESS 0x80 /* ro set=can access 3280 registers */
+#define CSR_DIR_READ 0x40 /* rw direction, 1 = read 0 = write */
+
+#define CSR_RESET 0x80 /* wo Resets 53c400 */
+#define CSR_5380_REG 0x80 /* ro 5380 registers can be accessed */
+#define CSR_TRANS_DIR 0x40 /* rw Data transfer direction */
+#define CSR_SCSI_BUFF_INTR 0x20 /* rw Enable int on transfer ready */
+#define CSR_5380_INTR 0x10 /* rw Enable 5380 interrupts */
+#define CSR_SHARED_INTR 0x08 /* rw Interrupt sharing */
+#define CSR_HOST_BUF_NOT_RDY 0x04 /* ro Host buffer not ready */
+#define CSR_SCSI_BUF_RDY 0x02 /* ro SCSI buffer ready */
+#define CSR_GATED_5380_IRQ 0x01 /* ro Last block xferred */
+#define CSR_INT_BASE (CSR_SCSI_BUFF_INTR | CSR_5380_INTR)
+
+
+#define DTC_BLK_CNT 0x101 /* rw
+ * # of 128-byte blocks to transfer */
+
+
+#define D_CR_ACCESS 0x80 /* ro set=can access 3280 registers */
+
+#define DTC_SWITCH_REG 0x3982 /* ro - DIP switches */
+#define DTC_RESUME_XFER 0x3982 /* wo - resume data xfer
+ * after disconnect/reconnect*/
+
+#define DTC_5380_OFFSET 0x3880 /* 8 registers here, see NCR5380.h */
+
+/*!!!! for dtc, it's a 128 byte buffer at 3900 !!! */
+#define DTC_DATA_BUF 0x3900 /* rw 128 bytes long */
+
+static struct override {
+ unsigned int address;
+ int irq;
+} overrides
+#ifdef OVERRIDE
+[] __initdata = OVERRIDE;
+#else
+[4] __initdata = {
+ { 0, IRQ_AUTO }, { 0, IRQ_AUTO }, { 0, IRQ_AUTO }, { 0, IRQ_AUTO }
+};
+#endif
+
+#define NO_OVERRIDES ARRAY_SIZE(overrides)
+
+static struct base {
+ unsigned long address;
+ int noauto;
+} bases[] __initdata = {
+ { 0xcc000, 0 },
+ { 0xc8000, 0 },
+ { 0xdc000, 0 },
+ { 0xd8000, 0 }
+};
+
+#define NO_BASES ARRAY_SIZE(bases)
+
+static const struct signature {
+ const char *string;
+ int offset;
+} signatures[] = {
+ {"DATA TECHNOLOGY CORPORATION BIOS", 0x25},
+};
+
+#define NO_SIGNATURES ARRAY_SIZE(signatures)
+
+#ifndef MODULE
+/*
+ * Function : dtc_setup(char *str, int *ints)
+ *
+ * Purpose : LILO command line initialization of the overrides array,
+ *
+ * Inputs : str - unused, ints - array of integer parameters with ints[0]
+ * equal to the number of ints.
+ *
+ */
+
+static int __init dtc_setup(char *str)
+{
+ static int commandline_current = 0;
+ int i;
+ int ints[10];
+
+ get_options(str, ARRAY_SIZE(ints), ints);
+ if (ints[0] != 2)
+ printk("dtc_setup: usage dtc=address,irq\n");
+ else if (commandline_current < NO_OVERRIDES) {
+ overrides[commandline_current].address = ints[1];
+ overrides[commandline_current].irq = ints[2];
+ for (i = 0; i < NO_BASES; ++i)
+ if (bases[i].address == ints[1]) {
+ bases[i].noauto = 1;
+ break;
+ }
+ ++commandline_current;
+ }
+ return 1;
+}
+
+__setup("dtc=", dtc_setup);
+#endif
+
+/*
+ * Function : int dtc_detect(struct scsi_host_template * tpnt)
+ *
+ * Purpose : detects and initializes DTC 3180/3280 controllers
+ * that were autoprobed, overridden on the LILO command line,
+ * or specified at compile time.
+ *
+ * Inputs : tpnt - template for this SCSI adapter.
+ *
+ * Returns : 1 if a host adapter was found, 0 if not.
+ *
+*/
+
+static int __init dtc_detect(struct scsi_host_template * tpnt)
+{
+ static int current_override = 0, current_base = 0;
+ struct Scsi_Host *instance;
+ unsigned int addr;
+ void __iomem *base;
+ int sig, count;
+
+ for (count = 0; current_override < NO_OVERRIDES; ++current_override) {
+ addr = 0;
+ base = NULL;
+
+ if (overrides[current_override].address) {
+ addr = overrides[current_override].address;
+ base = ioremap(addr, 0x2000);
+ if (!base)
+ addr = 0;
+ } else
+ for (; !addr && (current_base < NO_BASES); ++current_base) {
+#if (DTCDEBUG & DTCDEBUG_INIT)
+ printk(KERN_DEBUG "scsi-dtc : probing address %08x\n", bases[current_base].address);
+#endif
+ if (bases[current_base].noauto)
+ continue;
+ base = ioremap(bases[current_base].address, 0x2000);
+ if (!base)
+ continue;
+ for (sig = 0; sig < NO_SIGNATURES; ++sig) {
+ if (check_signature(base + signatures[sig].offset, signatures[sig].string, strlen(signatures[sig].string))) {
+ addr = bases[current_base].address;
+#if (DTCDEBUG & DTCDEBUG_INIT)
+ printk(KERN_DEBUG "scsi-dtc : detected board.\n");
+#endif
+ goto found;
+ }
+ }
+ iounmap(base);
+ }
+
+#if defined(DTCDEBUG) && (DTCDEBUG & DTCDEBUG_INIT)
+ printk(KERN_DEBUG "scsi-dtc : base = %08x\n", addr);
+#endif
+
+ if (!addr)
+ break;
+
+found:
+ instance = scsi_register(tpnt, sizeof(struct NCR5380_hostdata));
+ if (instance == NULL)
+ break;
+
+ instance->base = addr;
+ ((struct NCR5380_hostdata *)(instance)->hostdata)->base = base;
+
+ NCR5380_init(instance, 0);
+
+ NCR5380_write(DTC_CONTROL_REG, CSR_5380_INTR); /* Enable int's */
+ if (overrides[current_override].irq != IRQ_AUTO)
+ instance->irq = overrides[current_override].irq;
+ else
+ instance->irq = NCR5380_probe_irq(instance, DTC_IRQS);
+
+ /* Compatibility with documented NCR5380 kernel parameters */
+ if (instance->irq == 255)
+ instance->irq = NO_IRQ;
+
+#ifndef DONT_USE_INTR
+ /* With interrupts enabled, it will sometimes hang when doing heavy
+ * reads. So better not enable them until I finger it out. */
+ if (instance->irq != NO_IRQ)
+ if (request_irq(instance->irq, dtc_intr, 0,
+ "dtc", instance)) {
+ printk(KERN_ERR "scsi%d : IRQ%d not free, interrupts disabled\n", instance->host_no, instance->irq);
+ instance->irq = NO_IRQ;
+ }
+
+ if (instance->irq == NO_IRQ) {
+ printk(KERN_WARNING "scsi%d : interrupts not enabled. for better interactive performance,\n", instance->host_no);
+ printk(KERN_WARNING "scsi%d : please jumper the board for a free IRQ.\n", instance->host_no);
+ }
+#else
+ if (instance->irq != NO_IRQ)
+ printk(KERN_WARNING "scsi%d : interrupts not used. Might as well not jumper it.\n", instance->host_no);
+ instance->irq = NO_IRQ;
+#endif
+#if defined(DTCDEBUG) && (DTCDEBUG & DTCDEBUG_INIT)
+ printk("scsi%d : irq = %d\n", instance->host_no, instance->irq);
+#endif
+
+ ++current_override;
+ ++count;
+ }
+ return count;
+}
+
+/*
+ * Function : int dtc_biosparam(Disk * disk, struct block_device *dev, int *ip)
+ *
+ * Purpose : Generates a BIOS / DOS compatible H-C-S mapping for
+ * the specified device / size.
+ *
+ * Inputs : size = size of device in sectors (512 bytes), dev = block device
+ * major / minor, ip[] = {heads, sectors, cylinders}
+ *
+ * Returns : always 0 (success), initializes ip
+ *
+*/
+
+/*
+ * XXX Most SCSI boards use this mapping, I could be incorrect. Some one
+ * using hard disks on a trantor should verify that this mapping corresponds
+ * to that used by the BIOS / ASPI driver by running the linux fdisk program
+ * and matching the H_C_S coordinates to what DOS uses.
+*/
+
+static int dtc_biosparam(struct scsi_device *sdev, struct block_device *dev,
+ sector_t capacity, int *ip)
+{
+ int size = capacity;
+
+ ip[0] = 64;
+ ip[1] = 32;
+ ip[2] = size >> 11;
+ return 0;
+}
+
+
+/****************************************************************
+ * Function : int NCR5380_pread (struct Scsi_Host *instance,
+ * unsigned char *dst, int len)
+ *
+ * Purpose : Fast 5380 pseudo-dma read function, reads len bytes to
+ * dst
+ *
+ * Inputs : dst = destination, len = length in bytes
+ *
+ * Returns : 0 on success, non zero on a failure such as a watchdog
+ * timeout.
+*/
+
+static inline int NCR5380_pread(struct Scsi_Host *instance, unsigned char *dst, int len)
+{
+ unsigned char *d = dst;
+ int i; /* For counting time spent in the poll-loop */
+ struct NCR5380_hostdata *hostdata = shost_priv(instance);
+ NCR5380_local_declare();
+ NCR5380_setup(instance);
+
+ i = 0;
+ NCR5380_read(RESET_PARITY_INTERRUPT_REG);
+ NCR5380_write(MODE_REG, MR_ENABLE_EOP_INTR | MR_DMA_MODE);
+ if (instance->irq == NO_IRQ)
+ NCR5380_write(DTC_CONTROL_REG, CSR_DIR_READ);
+ else
+ NCR5380_write(DTC_CONTROL_REG, CSR_DIR_READ | CSR_INT_BASE);
+ NCR5380_write(DTC_BLK_CNT, len >> 7); /* Block count */
+ rtrc(1);
+ while (len > 0) {
+ rtrc(2);
+ while (NCR5380_read(DTC_CONTROL_REG) & CSR_HOST_BUF_NOT_RDY)
+ ++i;
+ rtrc(3);
+ memcpy_fromio(d, base + DTC_DATA_BUF, 128);
+ d += 128;
+ len -= 128;
+ rtrc(7);
+ /*** with int's on, it sometimes hangs after here.
+ * Looks like something makes HBNR go away. */
+ }
+ rtrc(4);
+ while (!(NCR5380_read(DTC_CONTROL_REG) & D_CR_ACCESS))
+ ++i;
+ NCR5380_write(MODE_REG, 0); /* Clear the operating mode */
+ rtrc(0);
+ NCR5380_read(RESET_PARITY_INTERRUPT_REG);
+ if (i > hostdata->spin_max_r)
+ hostdata->spin_max_r = i;
+ return (0);
+}
+
+/****************************************************************
+ * Function : int NCR5380_pwrite (struct Scsi_Host *instance,
+ * unsigned char *src, int len)
+ *
+ * Purpose : Fast 5380 pseudo-dma write function, transfers len bytes from
+ * src
+ *
+ * Inputs : src = source, len = length in bytes
+ *
+ * Returns : 0 on success, non zero on a failure such as a watchdog
+ * timeout.
+*/
+
+static inline int NCR5380_pwrite(struct Scsi_Host *instance, unsigned char *src, int len)
+{
+ int i;
+ struct NCR5380_hostdata *hostdata = shost_priv(instance);
+ NCR5380_local_declare();
+ NCR5380_setup(instance);
+
+ NCR5380_read(RESET_PARITY_INTERRUPT_REG);
+ NCR5380_write(MODE_REG, MR_ENABLE_EOP_INTR | MR_DMA_MODE);
+ /* set direction (write) */
+ if (instance->irq == NO_IRQ)
+ NCR5380_write(DTC_CONTROL_REG, 0);
+ else
+ NCR5380_write(DTC_CONTROL_REG, CSR_5380_INTR);
+ NCR5380_write(DTC_BLK_CNT, len >> 7); /* Block count */
+ for (i = 0; len > 0; ++i) {
+ rtrc(5);
+ /* Poll until the host buffer can accept data. */
+ while (NCR5380_read(DTC_CONTROL_REG) & CSR_HOST_BUF_NOT_RDY)
+ ++i;
+ rtrc(3);
+ memcpy_toio(base + DTC_DATA_BUF, src, 128);
+ src += 128;
+ len -= 128;
+ }
+ rtrc(4);
+ while (!(NCR5380_read(DTC_CONTROL_REG) & D_CR_ACCESS))
+ ++i;
+ rtrc(6);
+ /* Wait until the last byte has been sent to the disk */
+ while (!(NCR5380_read(TARGET_COMMAND_REG) & TCR_LAST_BYTE_SENT))
+ ++i;
+ rtrc(7);
+ /* Check for parity error here. fixme. */
+ NCR5380_write(MODE_REG, 0); /* Clear the operating mode */
+ rtrc(0);
+ if (i > hostdata->spin_max_w)
+ hostdata->spin_max_w = i;
+ return (0);
+}
+
+MODULE_LICENSE("GPL");
+
+#include "NCR5380.c"
+
+static int dtc_release(struct Scsi_Host *shost)
+{
+ NCR5380_local_declare();
+ NCR5380_setup(shost);
+ if (shost->irq != NO_IRQ)
+ free_irq(shost->irq, shost);
+ NCR5380_exit(shost);
+ if (shost->io_port && shost->n_io_port)
+ release_region(shost->io_port, shost->n_io_port);
+ scsi_unregister(shost);
+ iounmap(base);
+ return 0;
+}
+
+static struct scsi_host_template driver_template = {
+ .name = "DTC 3180/3280 ",
+ .detect = dtc_detect,
+ .release = dtc_release,
+ .proc_name = "dtc3x80",
+ .show_info = dtc_show_info,
+ .write_info = dtc_write_info,
+ .info = dtc_info,
+ .queuecommand = dtc_queue_command,
+ .eh_abort_handler = dtc_abort,
+ .eh_bus_reset_handler = dtc_bus_reset,
+ .bios_param = dtc_biosparam,
+ .can_queue = CAN_QUEUE,
+ .this_id = 7,
+ .sg_tablesize = SG_ALL,
+ .cmd_per_lun = CMD_PER_LUN,
+ .use_clustering = DISABLE_CLUSTERING,
+};
+#include "scsi_module.c"
diff --git a/drivers/scsi/dtc.h b/drivers/scsi/dtc.h
new file mode 100644
index 000000000..78a2332e9
--- /dev/null
+++ b/drivers/scsi/dtc.h
@@ -0,0 +1,76 @@
+/*
+ * DTC controller, taken from T128 driver by...
+ * Copyright 1993, Drew Eckhardt
+ * Visionary Computing
+ * (Unix and Linux consulting and custom programming)
+ * drew@colorado.edu
+ * +1 (303) 440-4894
+ */
+
+#ifndef DTC3280_H
+#define DTC3280_H
+
+#define DTCDEBUG 0
+#define DTCDEBUG_INIT 0x1
+#define DTCDEBUG_TRANSFER 0x2
+
+#ifndef CMD_PER_LUN
+#define CMD_PER_LUN 2
+#endif
+
+#ifndef CAN_QUEUE
+#define CAN_QUEUE 32
+#endif
+
+#define NCR5380_implementation_fields \
+ void __iomem *base
+
+#define NCR5380_local_declare() \
+ void __iomem *base
+
+#define NCR5380_setup(instance) \
+ base = ((struct NCR5380_hostdata *)(instance)->hostdata)->base
+
+#define DTC_address(reg) (base + DTC_5380_OFFSET + reg)
+
+#define dbNCR5380_read(reg) \
+ (rval=readb(DTC_address(reg)), \
+ (((unsigned char) printk("DTC : read register %d at addr %p is: %02x\n"\
+ , (reg), DTC_address(reg), rval)), rval ) )
+
+#define dbNCR5380_write(reg, value) do { \
+ printk("DTC : write %02x to register %d at address %p\n", \
+ (value), (reg), DTC_address(reg)); \
+ writeb(value, DTC_address(reg));} while(0)
+
+
+#if !(DTCDEBUG & DTCDEBUG_TRANSFER)
+#define NCR5380_read(reg) (readb(DTC_address(reg)))
+#define NCR5380_write(reg, value) (writeb(value, DTC_address(reg)))
+#else
+#define NCR5380_read(reg) (readb(DTC_address(reg)))
+#define xNCR5380_read(reg) \
+ (((unsigned char) printk("DTC : read register %d at address %p\n"\
+ , (reg), DTC_address(reg))), readb(DTC_address(reg)))
+
+#define NCR5380_write(reg, value) do { \
+ printk("DTC : write %02x to register %d at address %p\n", \
+ (value), (reg), DTC_address(reg)); \
+ writeb(value, DTC_address(reg));} while(0)
+#endif
+
+#define NCR5380_intr dtc_intr
+#define NCR5380_queue_command dtc_queue_command
+#define NCR5380_abort dtc_abort
+#define NCR5380_bus_reset dtc_bus_reset
+#define NCR5380_info dtc_info
+#define NCR5380_show_info dtc_show_info
+#define NCR5380_write_info dtc_write_info
+
+/* 15 12 11 10
+ 1001 1100 0000 0000 */
+
+#define DTC_IRQS 0x9c00
+
+
+#endif /* DTC3280_H */
diff --git a/drivers/scsi/eata.c b/drivers/scsi/eata.c
new file mode 100644
index 000000000..227dd2c2e
--- /dev/null
+++ b/drivers/scsi/eata.c
@@ -0,0 +1,2578 @@
+/*
+ * eata.c - Low-level driver for EATA/DMA SCSI host adapters.
+ *
+ * 03 Jun 2003 Rev. 8.10 for linux-2.5.70
+ * + Update for new IRQ API.
+ * + Use "goto" when appropriate.
+ * + Drop eata.h.
+ * + Update for new module_param API.
+ * + Module parameters can now be specified only in the
+ * same format as the kernel boot options.
+ *
+ * boot option old module param
+ * ----------- ------------------
+ * addr,... io_port=addr,...
+ * lc:[y|n] linked_comm=[1|0]
+ * mq:xx max_queue_depth=xx
+ * tm:[0|1|2] tag_mode=[0|1|2]
+ * et:[y|n] ext_tran=[1|0]
+ * rs:[y|n] rev_scan=[1|0]
+ * ip:[y|n] isa_probe=[1|0]
+ * ep:[y|n] eisa_probe=[1|0]
+ * pp:[y|n] pci_probe=[1|0]
+ *
+ * A valid example using the new parameter format is:
+ * modprobe eata "eata=0x7410,0x230,lc:y,tm:0,mq:4,ep:n"
+ *
+ * which is equivalent to the old format:
+ * modprobe eata io_port=0x7410,0x230 linked_comm=1 tag_mode=0 \
+ * max_queue_depth=4 eisa_probe=0
+ *
+ * 12 Feb 2003 Rev. 8.04 for linux 2.5.60
+ * + Release irq before calling scsi_register.
+ *
+ * 12 Nov 2002 Rev. 8.02 for linux 2.5.47
+ * + Release driver_lock before calling scsi_register.
+ *
+ * 11 Nov 2002 Rev. 8.01 for linux 2.5.47
+ * + Fixed bios_param and scsicam_bios_param calling parameters.
+ *
+ * 28 Oct 2002 Rev. 8.00 for linux 2.5.44-ac4
+ * + Use new tcq and adjust_queue_depth api.
+ * + New command line option (tm:[0-2]) to choose the type of tags:
+ * 0 -> disable tagging ; 1 -> simple tags ; 2 -> ordered tags.
+ * Default is tm:0 (tagged commands disabled).
+ * For compatibility the "tc:" option is an alias of the "tm:"
+ * option; tc:n is equivalent to tm:0 and tc:y is equivalent to
+ * tm:1.
+ * + The tagged_comm module parameter has been removed, use tag_mode
+ * instead, equivalent to the "tm:" boot option.
+ *
+ * 10 Oct 2002 Rev. 7.70 for linux 2.5.42
+ * + Foreport from revision 6.70.
+ *
+ * 25 Jun 2002 Rev. 6.70 for linux 2.4.19
+ * + This release is the first one tested on a Big Endian platform:
+ * fixed endian-ness problem due to bitfields;
+ * fixed endian-ness problem in read_pio.
+ * + Added new options for selectively probing ISA, EISA and PCI bus:
+ *
+ * Boot option Parameter name Default according to
+ *
+ * ip:[y|n] isa_probe=[1|0] CONFIG_ISA defined
+ * ep:[y|n] eisa_probe=[1|0] CONFIG_EISA defined
+ * pp:[y|n] pci_probe=[1|0] CONFIG_PCI defined
+ *
+ * The default action is to perform probing if the corresponding
+ * bus is configured and to skip probing otherwise.
+ *
+ * + If pci_probe is in effect and a list of I/O ports is specified
+ * as parameter or boot option, pci_enable_device() is performed
+ * on all pci devices matching PCI_CLASS_STORAGE_SCSI.
+ *
+ * 21 Feb 2002 Rev. 6.52 for linux 2.4.18
+ * + Backport from rev. 7.22 (use io_request_lock).
+ *
+ * 20 Feb 2002 Rev. 7.22 for linux 2.5.5
+ * + Remove any reference to virt_to_bus().
+ * + Fix pio hang while detecting multiple HBAs.
+ * + Fixed a board detection bug: in a system with
+ * multiple ISA/EISA boards, all but the first one
+ * were erroneously detected as PCI.
+ *
+ * 01 Jan 2002 Rev. 7.20 for linux 2.5.1
+ * + Use the dynamic DMA mapping API.
+ *
+ * 19 Dec 2001 Rev. 7.02 for linux 2.5.1
+ * + Use SCpnt->sc_data_direction if set.
+ * + Use sglist.page instead of sglist.address.
+ *
+ * 11 Dec 2001 Rev. 7.00 for linux 2.5.1
+ * + Use host->host_lock instead of io_request_lock.
+ *
+ * 1 May 2001 Rev. 6.05 for linux 2.4.4
+ * + Clean up all pci related routines.
+ * + Fix data transfer direction for opcode SEND_CUE_SHEET (0x5d)
+ *
+ * 30 Jan 2001 Rev. 6.04 for linux 2.4.1
+ * + Call pci_resource_start after pci_enable_device.
+ *
+ * 25 Jan 2001 Rev. 6.03 for linux 2.4.0
+ * + "check_region" call replaced by "request_region".
+ *
+ * 22 Nov 2000 Rev. 6.02 for linux 2.4.0-test11
+ * + Return code checked when calling pci_enable_device.
+ * + Removed old scsi error handling support.
+ * + The obsolete boot option flag eh:n is silently ignored.
+ * + Removed error messages while a disk drive is powered up at
+ * boot time.
+ * + Improved boot messages: all tagged capable device are
+ * indicated as "tagged" or "soft-tagged" :
+ * - "soft-tagged" means that the driver is trying to do its
+ * own tagging (i.e. the tc:y option is in effect);
+ * - "tagged" means that the device supports tagged commands,
+ * but the driver lets the HBA be responsible for tagging
+ * support.
+ *
+ * 16 Sep 1999 Rev. 5.11 for linux 2.2.12 and 2.3.18
+ * + Updated to the new __setup interface for boot command line options.
+ * + When loaded as a module, accepts the new parameter boot_options
+ * which value is a string with the same format of the kernel boot
+ * command line options. A valid example is:
+ * modprobe eata 'boot_options="0x7410,0x230,lc:y,tc:n,mq:4"'
+ *
+ * 9 Sep 1999 Rev. 5.10 for linux 2.2.12 and 2.3.17
+ * + 64bit cleanup for Linux/Alpha platform support
+ * (contribution from H.J. Lu).
+ *
+ * 22 Jul 1999 Rev. 5.00 for linux 2.2.10 and 2.3.11
+ * + Removed pre-2.2 source code compatibility.
+ * + Added call to pci_set_master.
+ *
+ * 26 Jul 1998 Rev. 4.33 for linux 2.0.35 and 2.1.111
+ * + Added command line option (rs:[y|n]) to reverse the scan order
+ * of PCI boards. The default is rs:y, which reverses the BIOS order
+ * while registering PCI boards. The default value rs:y generates
+ * the same order of all previous revisions of this driver.
+ * Pls. note that "BIOS order" might have been reversed itself
+ * after the 2.1.9x PCI modifications in the linux kernel.
+ * The rs value is ignored when the explicit list of addresses
+ * is used by the "eata=port0,port1,..." command line option.
+ * + Added command line option (et:[y|n]) to force use of extended
+ * translation (255 heads, 63 sectors) as disk geometry.
+ * The default is et:n, which uses the disk geometry returned
+ * by scsicam_bios_param. The default value et:n is compatible with
+ * all previous revisions of this driver.
+ *
+ * 28 May 1998 Rev. 4.32 for linux 2.0.33 and 2.1.104
+ * Increased busy timeout from 10 msec. to 200 msec. while
+ * processing interrupts.
+ *
+ * 16 May 1998 Rev. 4.31 for linux 2.0.33 and 2.1.102
+ * Improved abort handling during the eh recovery process.
+ *
+ * 13 May 1998 Rev. 4.30 for linux 2.0.33 and 2.1.101
+ * The driver is now fully SMP safe, including the
+ * abort and reset routines.
+ * Added command line options (eh:[y|n]) to choose between
+ * new_eh_code and the old scsi code.
+ * If linux version >= 2.1.101 the default is eh:y, while the eh
+ * option is ignored for previous releases and the old scsi code
+ * is used.
+ *
+ * 18 Apr 1998 Rev. 4.20 for linux 2.0.33 and 2.1.97
+ * Reworked interrupt handler.
+ *
+ * 11 Apr 1998 rev. 4.05 for linux 2.0.33 and 2.1.95
+ * Major reliability improvement: when a batch with overlapping
+ * requests is detected, requests are queued one at a time
+ * eliminating any possible board or drive reordering.
+ *
+ * 10 Apr 1998 rev. 4.04 for linux 2.0.33 and 2.1.95
+ * Improved SMP support (if linux version >= 2.1.95).
+ *
+ * 9 Apr 1998 rev. 4.03 for linux 2.0.33 and 2.1.94
+ * Added support for new PCI code and IO-APIC remapping of irqs.
+ * Performance improvement: when sequential i/o is detected,
+ * always use direct sort instead of reverse sort.
+ *
+ * 4 Apr 1998 rev. 4.02 for linux 2.0.33 and 2.1.92
+ * io_port is now unsigned long.
+ *
+ * 17 Mar 1998 rev. 4.01 for linux 2.0.33 and 2.1.88
+ * Use new scsi error handling code (if linux version >= 2.1.88).
+ * Use new interrupt code.
+ *
+ * 12 Sep 1997 rev. 3.11 for linux 2.0.30 and 2.1.55
+ * Use of udelay inside the wait loops to avoid timeout
+ * problems with fast cpus.
+ * Removed check about useless calls to the interrupt service
+ * routine (reported on SMP systems only).
+ * At initialization time "sorted/unsorted" is displayed instead
+ * of "linked/unlinked" to reinforce the fact that "linking" is
+ * nothing but "elevator sorting" in the actual implementation.
+ *
+ * 17 May 1997 rev. 3.10 for linux 2.0.30 and 2.1.38
+ * Use of serial_number_at_timeout in abort and reset processing.
+ * Use of the __initfunc and __initdata macro in setup code.
+ * Minor cleanups in the list_statistics code.
+ * Increased controller busy timeout in order to better support
+ * slow SCSI devices.
+ *
+ * 24 Feb 1997 rev. 3.00 for linux 2.0.29 and 2.1.26
+ * When loading as a module, parameter passing is now supported
+ * both in 2.0 and in 2.1 style.
+ * Fixed data transfer direction for some SCSI opcodes.
+ * Immediate acknowledge to request sense commands.
+ * Linked commands to each disk device are now reordered by elevator
+ * sorting. Rare cases in which reordering of write requests could
+ * cause wrong results are managed.
+ * Fixed spurious timeouts caused by long simple queue tag sequences.
+ * New command line option (tm:[0-3]) to choose the type of tags:
+ * 0 -> mixed (default); 1 -> simple; 2 -> head; 3 -> ordered.
+ *
+ * 18 Jan 1997 rev. 2.60 for linux 2.1.21 and 2.0.28
+ * Added command line options to enable/disable linked commands
+ * (lc:[y|n]), tagged commands (tc:[y|n]) and to set the max queue
+ * depth (mq:xx). Default is "eata=lc:n,tc:n,mq:16".
+ * Improved command linking.
+ * Documented how to setup RAID-0 with DPT SmartRAID boards.
+ *
+ * 8 Jan 1997 rev. 2.50 for linux 2.1.20 and 2.0.27
+ * Added linked command support.
+ * Improved detection of PCI boards using ISA base addresses.
+ *
+ * 3 Dec 1996 rev. 2.40 for linux 2.1.14 and 2.0.27
+ * Added support for tagged commands and queue depth adjustment.
+ *
+ * 22 Nov 1996 rev. 2.30 for linux 2.1.12 and 2.0.26
+ * When CONFIG_PCI is defined, BIOS32 is used to include in the
+ * list of i/o ports to be probed all the PCI SCSI controllers.
+ * The list of i/o ports to be probed can be overwritten by the
+ * "eata=port0,port1,...." boot command line option.
+ * Scatter/gather lists are now allocated by a number of kmalloc
+ * calls, in order to avoid the previous size limit of 64Kb.
+ *
+ * 16 Nov 1996 rev. 2.20 for linux 2.1.10 and 2.0.25
+ * Added support for EATA 2.0C, PCI, multichannel and wide SCSI.
+ *
+ * 27 Sep 1996 rev. 2.12 for linux 2.1.0
+ * Portability cleanups (virtual/bus addressing, little/big endian
+ * support).
+ *
+ * 09 Jul 1996 rev. 2.11 for linux 2.0.4
+ * Number of internal retries is now limited.
+ *
+ * 16 Apr 1996 rev. 2.10 for linux 1.3.90
+ * New argument "reset_flags" to the reset routine.
+ *
+ * 6 Jul 1995 rev. 2.01 for linux 1.3.7
+ * Update required by the new /proc/scsi support.
+ *
+ * 11 Mar 1995 rev. 2.00 for linux 1.2.0
+ * Fixed a bug which prevented media change detection for removable
+ * disk drives.
+ *
+ * 23 Feb 1995 rev. 1.18 for linux 1.1.94
+ * Added a check for scsi_register returning NULL.
+ *
+ * 11 Feb 1995 rev. 1.17 for linux 1.1.91
+ * Now DEBUG_RESET is disabled by default.
+ * Register a board even if it does not assert DMA protocol support
+ * (DPT SK2011B does not report correctly the dmasup bit).
+ *
+ * 9 Feb 1995 rev. 1.16 for linux 1.1.90
+ * Use host->wish_block instead of host->block.
+ * New list of Data Out SCSI commands.
+ *
+ * 8 Feb 1995 rev. 1.15 for linux 1.1.89
+ * Cleared target_time_out counter while performing a reset.
+ * All external symbols renamed to avoid possible name conflicts.
+ *
+ * 28 Jan 1995 rev. 1.14 for linux 1.1.86
+ * Added module support.
+ * Log and do a retry when a disk drive returns a target status
+ * different from zero on a recovered error.
+ *
+ * 24 Jan 1995 rev. 1.13 for linux 1.1.85
+ * Use optimized board configuration, with a measured performance
+ * increase in the range 10%-20% on i/o throughput.
+ *
+ * 16 Jan 1995 rev. 1.12 for linux 1.1.81
+ * Fix mscp structure comments (no functional change).
+ * Display a message if check_region detects a port address
+ * already in use.
+ *
+ * 17 Dec 1994 rev. 1.11 for linux 1.1.74
+ * Use the scsicam_bios_param routine. This allows an easy
+ * migration path from disk partition tables created using
+ * different SCSI drivers and non optimal disk geometry.
+ *
+ * 15 Dec 1994 rev. 1.10 for linux 1.1.74
+ * Added support for ISA EATA boards (DPT PM2011, DPT PM2021).
+ * The host->block flag is set for all the detected ISA boards.
+ * The detect routine no longer enforces LEVEL triggering
+ * for EISA boards, it just prints a warning message.
+ *
+ * 30 Nov 1994 rev. 1.09 for linux 1.1.68
+ * Redo i/o on target status CHECK_CONDITION for TYPE_DISK only.
+ * Added optional support for using a single board at a time.
+ *
+ * 18 Nov 1994 rev. 1.08 for linux 1.1.64
+ * Forces sg_tablesize = 64 and can_queue = 64 if these
+ * values are not correctly detected (DPT PM2012).
+ *
+ * 14 Nov 1994 rev. 1.07 for linux 1.1.63 Final BETA release.
+ * 04 Aug 1994 rev. 1.00 for linux 1.1.39 First BETA release.
+ *
+ *
+ * This driver is based on the CAM (Common Access Method Committee)
+ * EATA (Enhanced AT Bus Attachment) rev. 2.0A, using DMA protocol.
+ *
+ * Copyright (C) 1994-2003 Dario Ballabio (ballabio_dario@emc.com)
+ *
+ * Alternate email: dario.ballabio@inwind.it, dario.ballabio@tiscalinet.it
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that redistributions of source
+ * code retain the above copyright notice and this comment without
+ * modification.
+ *
+ */
+
+/*
+ *
+ * Here is a brief description of the DPT SCSI host adapters.
+ * All these boards provide an EATA/DMA compatible programming interface
+ * and are fully supported by this driver in any configuration, including
+ * multiple SCSI channels:
+ *
+ * PM2011B/9X - Entry Level ISA
+ * PM2021A/9X - High Performance ISA
+ * PM2012A Old EISA
+ * PM2012B Old EISA
+ * PM2022A/9X - Entry Level EISA
+ * PM2122A/9X - High Performance EISA
+ * PM2322A/9X - Extra High Performance EISA
+ * PM3021 - SmartRAID Adapter for ISA
+ * PM3222 - SmartRAID Adapter for EISA (PM3222W is 16-bit wide SCSI)
+ * PM3224 - SmartRAID Adapter for PCI (PM3224W is 16-bit wide SCSI)
+ * PM33340UW - SmartRAID Adapter for PCI ultra wide multichannel
+ *
+ * The above list is just an indication: as a matter of fact all DPT
+ * boards using the EATA/DMA protocol are supported by this driver,
+ * since they use exactely the same programming interface.
+ *
+ * The DPT PM2001 provides only the EATA/PIO interface and hence is not
+ * supported by this driver.
+ *
+ * This code has been tested with up to 3 Distributed Processing Technology
+ * PM2122A/9X (DPT SCSI BIOS v002.D1, firmware v05E.0) EISA controllers,
+ * in any combination of private and shared IRQ.
+ * PCI support has been tested using up to 2 DPT PM3224W (DPT SCSI BIOS
+ * v003.D0, firmware v07G.0).
+ *
+ * DPT SmartRAID boards support "Hardware Array" - a group of disk drives
+ * which are all members of the same RAID-0, RAID-1 or RAID-5 array implemented
+ * in host adapter hardware. Hardware Arrays are fully compatible with this
+ * driver, since they look to it as a single disk drive.
+ *
+ * WARNING: to create a RAID-0 "Hardware Array" you must select "Other Unix"
+ * as the current OS in the DPTMGR "Initial System Installation" menu.
+ * Otherwise RAID-0 is generated as an "Array Group" (i.e. software RAID-0),
+ * which is not supported by the actual SCSI subsystem.
+ * To get the "Array Group" functionality, the Linux MD driver must be used
+ * instead of the DPT "Array Group" feature.
+ *
+ * Multiple ISA, EISA and PCI boards can be configured in the same system.
+ * It is suggested to put all the EISA boards on the same IRQ level, all
+ * the PCI boards on another IRQ level, while ISA boards cannot share
+ * interrupts.
+ *
+ * If you configure multiple boards on the same IRQ, the interrupt must
+ * be _level_ triggered (not _edge_ triggered).
+ *
+ * This driver detects EATA boards by probes at fixed port addresses,
+ * so no BIOS32 or PCI BIOS support is required.
+ * The suggested way to detect a generic EATA PCI board is to force on it
+ * any unused EISA address, even if there are other controllers on the EISA
+ * bus, or even if you system has no EISA bus at all.
+ * Do not force any ISA address on EATA PCI boards.
+ *
+ * If PCI bios support is configured into the kernel, BIOS32 is used to
+ * include in the list of i/o ports to be probed all the PCI SCSI controllers.
+ *
+ * Due to a DPT BIOS "feature", it might not be possible to force an EISA
+ * address on more than a single DPT PCI board, so in this case you have to
+ * let the PCI BIOS assign the addresses.
+ *
+ * The sequence of detection probes is:
+ *
+ * - ISA 0x1F0;
+ * - PCI SCSI controllers (only if BIOS32 is available);
+ * - EISA/PCI 0x1C88 through 0xFC88 (corresponding to EISA slots 1 to 15);
+ * - ISA 0x170, 0x230, 0x330.
+ *
+ * The above list of detection probes can be totally replaced by the
+ * boot command line option: "eata=port0,port1,port2,...", where the
+ * port0, port1... arguments are ISA/EISA/PCI addresses to be probed.
+ * For example using "eata=0x7410,0x7450,0x230", the driver probes
+ * only the two PCI addresses 0x7410 and 0x7450 and the ISA address 0x230,
+ * in this order; "eata=0" totally disables this driver.
+ *
+ * After the optional list of detection probes, other possible command line
+ * options are:
+ *
+ * et:y force use of extended translation (255 heads, 63 sectors);
+ * et:n use disk geometry detected by scsicam_bios_param;
+ * rs:y reverse scan order while detecting PCI boards;
+ * rs:n use BIOS order while detecting PCI boards;
+ * lc:y enables linked commands;
+ * lc:n disables linked commands;
+ * tm:0 disables tagged commands (same as tc:n);
+ * tm:1 use simple queue tags (same as tc:y);
+ * tm:2 use ordered queue tags (same as tc:2);
+ * mq:xx set the max queue depth to the value xx (2 <= xx <= 32).
+ *
+ * The default value is: "eata=lc:n,mq:16,tm:0,et:n,rs:n".
+ * An example using the list of detection probes could be:
+ * "eata=0x7410,0x230,lc:y,tm:2,mq:4,et:n".
+ *
+ * When loading as a module, parameters can be specified as well.
+ * The above example would be (use 1 in place of y and 0 in place of n):
+ *
+ * modprobe eata io_port=0x7410,0x230 linked_comm=1 \
+ * max_queue_depth=4 ext_tran=0 tag_mode=2 \
+ * rev_scan=1
+ *
+ * ----------------------------------------------------------------------------
+ * In this implementation, linked commands are designed to work with any DISK
+ * or CD-ROM, since this linking has only the intent of clustering (time-wise)
+ * and reordering by elevator sorting commands directed to each device,
+ * without any relation with the actual SCSI protocol between the controller
+ * and the device.
+ * If Q is the queue depth reported at boot time for each device (also named
+ * cmds/lun) and Q > 2, whenever there is already an active command to the
+ * device all other commands to the same device (up to Q-1) are kept waiting
+ * in the elevator sorting queue. When the active command completes, the
+ * commands in this queue are sorted by sector address. The sort is chosen
+ * between increasing or decreasing by minimizing the seek distance between
+ * the sector of the commands just completed and the sector of the first
+ * command in the list to be sorted.
+ * Trivial math assures that the unsorted average seek distance when doing
+ * random seeks over S sectors is S/3.
+ * When (Q-1) requests are uniformly distributed over S sectors, the average
+ * distance between two adjacent requests is S/((Q-1) + 1), so the sorted
+ * average seek distance for (Q-1) random requests over S sectors is S/Q.
+ * The elevator sorting hence divides the seek distance by a factor Q/3.
+ * The above pure geometric remarks are valid in all cases and the
+ * driver effectively reduces the seek distance by the predicted factor
+ * when there are Q concurrent read i/o operations on the device, but this
+ * does not necessarily results in a noticeable performance improvement:
+ * your mileage may vary....
+ *
+ * Note: command reordering inside a batch of queued commands could cause
+ * wrong results only if there is at least one write request and the
+ * intersection (sector-wise) of all requests is not empty.
+ * When the driver detects a batch including overlapping requests
+ * (a really rare event) strict serial (pid) order is enforced.
+ * ----------------------------------------------------------------------------
+ * The extended translation option (et:y) is useful when using large physical
+ * disks/arrays. It could also be useful when switching between Adaptec boards
+ * and DPT boards without reformatting the disk.
+ * When a boot disk is partitioned with extended translation, in order to
+ * be able to boot it with a DPT board is could be necessary to add to
+ * lilo.conf additional commands as in the following example:
+ *
+ * fix-table
+ * disk=/dev/sda bios=0x80 sectors=63 heads=128 cylindres=546
+ *
+ * where the above geometry should be replaced with the one reported at
+ * power up by the DPT controller.
+ * ----------------------------------------------------------------------------
+ *
+ * The boards are named EATA0, EATA1,... according to the detection order.
+ *
+ * In order to support multiple ISA boards in a reliable way,
+ * the driver sets host->wish_block = 1 for all ISA boards.
+ */
+
+#include <linux/string.h>
+#include <linux/kernel.h>
+#include <linux/ioport.h>
+#include <linux/delay.h>
+#include <linux/proc_fs.h>
+#include <linux/blkdev.h>
+#include <linux/interrupt.h>
+#include <linux/stat.h>
+#include <linux/pci.h>
+#include <linux/init.h>
+#include <linux/ctype.h>
+#include <linux/spinlock.h>
+#include <linux/dma-mapping.h>
+#include <linux/slab.h>
+#include <asm/byteorder.h>
+#include <asm/dma.h>
+#include <asm/io.h>
+#include <asm/irq.h>
+
+#include <scsi/scsi.h>
+#include <scsi/scsi_cmnd.h>
+#include <scsi/scsi_device.h>
+#include <scsi/scsi_host.h>
+#include <scsi/scsi_tcq.h>
+#include <scsi/scsicam.h>
+
+static int eata2x_detect(struct scsi_host_template *);
+static int eata2x_release(struct Scsi_Host *);
+static int eata2x_queuecommand(struct Scsi_Host *, struct scsi_cmnd *);
+static int eata2x_eh_abort(struct scsi_cmnd *);
+static int eata2x_eh_host_reset(struct scsi_cmnd *);
+static int eata2x_bios_param(struct scsi_device *, struct block_device *,
+ sector_t, int *);
+static int eata2x_slave_configure(struct scsi_device *);
+
+static struct scsi_host_template driver_template = {
+ .name = "EATA/DMA 2.0x rev. 8.10.00 ",
+ .detect = eata2x_detect,
+ .release = eata2x_release,
+ .queuecommand = eata2x_queuecommand,
+ .eh_abort_handler = eata2x_eh_abort,
+ .eh_host_reset_handler = eata2x_eh_host_reset,
+ .bios_param = eata2x_bios_param,
+ .slave_configure = eata2x_slave_configure,
+ .this_id = 7,
+ .unchecked_isa_dma = 1,
+ .use_clustering = ENABLE_CLUSTERING,
+};
+
+#if !defined(__BIG_ENDIAN_BITFIELD) && !defined(__LITTLE_ENDIAN_BITFIELD)
+#error "Adjust your <asm/byteorder.h> defines"
+#endif
+
+/* Subversion values */
+#define ISA 0
+#define ESA 1
+
+#undef FORCE_CONFIG
+
+#undef DEBUG_LINKED_COMMANDS
+#undef DEBUG_DETECT
+#undef DEBUG_PCI_DETECT
+#undef DEBUG_INTERRUPT
+#undef DEBUG_RESET
+#undef DEBUG_GENERATE_ERRORS
+#undef DEBUG_GENERATE_ABORTS
+#undef DEBUG_GEOMETRY
+
+#define MAX_ISA 4
+#define MAX_VESA 0
+#define MAX_EISA 15
+#define MAX_PCI 16
+#define MAX_BOARDS (MAX_ISA + MAX_VESA + MAX_EISA + MAX_PCI)
+#define MAX_CHANNEL 4
+#define MAX_LUN 32
+#define MAX_TARGET 32
+#define MAX_MAILBOXES 64
+#define MAX_SGLIST 64
+#define MAX_LARGE_SGLIST 122
+#define MAX_INTERNAL_RETRIES 64
+#define MAX_CMD_PER_LUN 2
+#define MAX_TAGGED_CMD_PER_LUN (MAX_MAILBOXES - MAX_CMD_PER_LUN)
+
+#define SKIP ULONG_MAX
+#define FREE 0
+#define IN_USE 1
+#define LOCKED 2
+#define IN_RESET 3
+#define IGNORE 4
+#define READY 5
+#define ABORTING 6
+#define NO_DMA 0xff
+#define MAXLOOP 10000
+#define TAG_DISABLED 0
+#define TAG_SIMPLE 1
+#define TAG_ORDERED 2
+
+#define REG_CMD 7
+#define REG_STATUS 7
+#define REG_AUX_STATUS 8
+#define REG_DATA 0
+#define REG_DATA2 1
+#define REG_SEE 6
+#define REG_LOW 2
+#define REG_LM 3
+#define REG_MID 4
+#define REG_MSB 5
+#define REGION_SIZE 9UL
+#define MAX_ISA_ADDR 0x03ff
+#define MIN_EISA_ADDR 0x1c88
+#define MAX_EISA_ADDR 0xfc88
+#define BSY_ASSERTED 0x80
+#define DRQ_ASSERTED 0x08
+#define ABSY_ASSERTED 0x01
+#define IRQ_ASSERTED 0x02
+#define READ_CONFIG_PIO 0xf0
+#define SET_CONFIG_PIO 0xf1
+#define SEND_CP_PIO 0xf2
+#define RECEIVE_SP_PIO 0xf3
+#define TRUNCATE_XFR_PIO 0xf4
+#define RESET_PIO 0xf9
+#define READ_CONFIG_DMA 0xfd
+#define SET_CONFIG_DMA 0xfe
+#define SEND_CP_DMA 0xff
+#define ASOK 0x00
+#define ASST 0x01
+
+#define YESNO(a) ((a) ? 'y' : 'n')
+#define TLDEV(type) ((type) == TYPE_DISK || (type) == TYPE_ROM)
+
+/* "EATA", in Big Endian format */
+#define EATA_SIG_BE 0x45415441
+
+/* Number of valid bytes in the board config structure for EATA 2.0x */
+#define EATA_2_0A_SIZE 28
+#define EATA_2_0B_SIZE 30
+#define EATA_2_0C_SIZE 34
+
+/* Board info structure */
+struct eata_info {
+ u_int32_t data_len; /* Number of valid bytes after this field */
+ u_int32_t sign; /* ASCII "EATA" signature */
+
+#if defined(__BIG_ENDIAN_BITFIELD)
+ unchar version : 4,
+ : 4;
+ unchar haaval : 1,
+ ata : 1,
+ drqvld : 1,
+ dmasup : 1,
+ morsup : 1,
+ trnxfr : 1,
+ tarsup : 1,
+ ocsena : 1;
+#else
+ unchar : 4, /* unused low nibble */
+ version : 4; /* EATA version, should be 0x1 */
+ unchar ocsena : 1, /* Overlap Command Support Enabled */
+ tarsup : 1, /* Target Mode Supported */
+ trnxfr : 1, /* Truncate Transfer Cmd NOT Necessary */
+ morsup : 1, /* More Supported */
+ dmasup : 1, /* DMA Supported */
+ drqvld : 1, /* DRQ Index (DRQX) is valid */
+ ata : 1, /* This is an ATA device */
+ haaval : 1; /* Host Adapter Address Valid */
+#endif
+
+ ushort cp_pad_len; /* Number of pad bytes after cp_len */
+ unchar host_addr[4]; /* Host Adapter SCSI ID for channels 3, 2, 1, 0 */
+ u_int32_t cp_len; /* Number of valid bytes in cp */
+ u_int32_t sp_len; /* Number of valid bytes in sp */
+ ushort queue_size; /* Max number of cp that can be queued */
+ ushort unused;
+ ushort scatt_size; /* Max number of entries in scatter/gather table */
+
+#if defined(__BIG_ENDIAN_BITFIELD)
+ unchar drqx : 2,
+ second : 1,
+ irq_tr : 1,
+ irq : 4;
+ unchar sync;
+ unchar : 4,
+ res1 : 1,
+ large_sg : 1,
+ forcaddr : 1,
+ isaena : 1;
+ unchar max_chan : 3,
+ max_id : 5;
+ unchar max_lun;
+ unchar eisa : 1,
+ pci : 1,
+ idquest : 1,
+ m1 : 1,
+ : 4;
+#else
+ unchar irq : 4, /* Interrupt Request assigned to this controller */
+ irq_tr : 1, /* 0 for edge triggered, 1 for level triggered */
+ second : 1, /* 1 if this is a secondary (not primary) controller */
+ drqx : 2; /* DRQ Index (0=DMA0, 1=DMA7, 2=DMA6, 3=DMA5) */
+ unchar sync; /* 1 if scsi target id 7...0 is running sync scsi */
+
+ /* Structure extension defined in EATA 2.0B */
+ unchar isaena : 1, /* ISA i/o addressing is disabled/enabled */
+ forcaddr : 1, /* Port address has been forced */
+ large_sg : 1, /* 1 if large SG lists are supported */
+ res1 : 1,
+ : 4;
+ unchar max_id : 5, /* Max SCSI target ID number */
+ max_chan : 3; /* Max SCSI channel number on this board */
+
+ /* Structure extension defined in EATA 2.0C */
+ unchar max_lun; /* Max SCSI LUN number */
+ unchar
+ : 4,
+ m1 : 1, /* This is a PCI with an M1 chip installed */
+ idquest : 1, /* RAIDNUM returned is questionable */
+ pci : 1, /* This board is PCI */
+ eisa : 1; /* This board is EISA */
+#endif
+
+ unchar raidnum; /* Uniquely identifies this HBA in a system */
+ unchar notused;
+
+ ushort ipad[247];
+};
+
+/* Board config structure */
+struct eata_config {
+ ushort len; /* Number of bytes following this field */
+
+#if defined(__BIG_ENDIAN_BITFIELD)
+ unchar : 4,
+ tarena : 1,
+ mdpena : 1,
+ ocena : 1,
+ edis : 1;
+#else
+ unchar edis : 1, /* Disable EATA interface after config command */
+ ocena : 1, /* Overlapped Commands Enabled */
+ mdpena : 1, /* Transfer all Modified Data Pointer Messages */
+ tarena : 1, /* Target Mode Enabled for this controller */
+ : 4;
+#endif
+ unchar cpad[511];
+};
+
+/* Returned status packet structure */
+struct mssp {
+#if defined(__BIG_ENDIAN_BITFIELD)
+ unchar eoc : 1,
+ adapter_status : 7;
+#else
+ unchar adapter_status : 7, /* State related to current command */
+ eoc : 1; /* End Of Command (1 = command completed) */
+#endif
+ unchar target_status; /* SCSI status received after data transfer */
+ unchar unused[2];
+ u_int32_t inv_res_len; /* Number of bytes not transferred */
+ u_int32_t cpp_index; /* Index of address set in cp */
+ char mess[12];
+};
+
+struct sg_list {
+ unsigned int address; /* Segment Address */
+ unsigned int num_bytes; /* Segment Length */
+};
+
+/* MailBox SCSI Command Packet */
+struct mscp {
+#if defined(__BIG_ENDIAN_BITFIELD)
+ unchar din : 1,
+ dout : 1,
+ interp : 1,
+ : 1,
+ sg : 1,
+ reqsen :1,
+ init : 1,
+ sreset : 1;
+ unchar sense_len;
+ unchar unused[3];
+ unchar : 7,
+ fwnest : 1;
+ unchar : 5,
+ hbaci : 1,
+ iat : 1,
+ phsunit : 1;
+ unchar channel : 3,
+ target : 5;
+ unchar one : 1,
+ dispri : 1,
+ luntar : 1,
+ lun : 5;
+#else
+ unchar sreset :1, /* SCSI Bus Reset Signal should be asserted */
+ init :1, /* Re-initialize controller and self test */
+ reqsen :1, /* Transfer Request Sense Data to addr using DMA */
+ sg :1, /* Use Scatter/Gather */
+ :1,
+ interp :1, /* The controller interprets cp, not the target */
+ dout :1, /* Direction of Transfer is Out (Host to Target) */
+ din :1; /* Direction of Transfer is In (Target to Host) */
+ unchar sense_len; /* Request Sense Length */
+ unchar unused[3];
+ unchar fwnest : 1, /* Send command to a component of an Array Group */
+ : 7;
+ unchar phsunit : 1, /* Send to Target Physical Unit (bypass RAID) */
+ iat : 1, /* Inhibit Address Translation */
+ hbaci : 1, /* Inhibit HBA Caching for this command */
+ : 5;
+ unchar target : 5, /* SCSI target ID */
+ channel : 3; /* SCSI channel number */
+ unchar lun : 5, /* SCSI logical unit number */
+ luntar : 1, /* This cp is for Target (not LUN) */
+ dispri : 1, /* Disconnect Privilege granted */
+ one : 1; /* 1 */
+#endif
+
+ unchar mess[3]; /* Massage to/from Target */
+ unchar cdb[12]; /* Command Descriptor Block */
+ u_int32_t data_len; /* If sg=0 Data Length, if sg=1 sglist length */
+ u_int32_t cpp_index; /* Index of address to be returned in sp */
+ u_int32_t data_address; /* If sg=0 Data Address, if sg=1 sglist address */
+ u_int32_t sp_dma_addr; /* Address where sp is DMA'ed when cp completes */
+ u_int32_t sense_addr; /* Address where Sense Data is DMA'ed on error */
+
+ /* Additional fields begin here. */
+ struct scsi_cmnd *SCpnt;
+
+ /* All the cp structure is zero filled by queuecommand except the
+ following CP_TAIL_SIZE bytes, initialized by detect */
+ dma_addr_t cp_dma_addr; /* dma handle for this cp structure */
+ struct sg_list *sglist; /* pointer to the allocated SG list */
+};
+
+#define CP_TAIL_SIZE (sizeof(struct sglist *) + sizeof(dma_addr_t))
+
+struct hostdata {
+ struct mscp cp[MAX_MAILBOXES]; /* Mailboxes for this board */
+ unsigned int cp_stat[MAX_MAILBOXES]; /* FREE, IN_USE, LOCKED, IN_RESET */
+ unsigned int last_cp_used; /* Index of last mailbox used */
+ unsigned int iocount; /* Total i/o done for this board */
+ int board_number; /* Number of this board */
+ char board_name[16]; /* Name of this board */
+ int in_reset; /* True if board is doing a reset */
+ int target_to[MAX_TARGET][MAX_CHANNEL]; /* N. of timeout errors on target */
+ int target_redo[MAX_TARGET][MAX_CHANNEL]; /* If 1 redo i/o on target */
+ unsigned int retries; /* Number of internal retries */
+ unsigned long last_retried_pid; /* Pid of last retried command */
+ unsigned char subversion; /* Bus type, either ISA or EISA/PCI */
+ unsigned char protocol_rev; /* EATA 2.0 rev., 'A' or 'B' or 'C' */
+ unsigned char is_pci; /* 1 is bus type is PCI */
+ struct pci_dev *pdev; /* pdev for PCI bus, NULL otherwise */
+ struct mssp *sp_cpu_addr; /* cpu addr for DMA buffer sp */
+ dma_addr_t sp_dma_addr; /* dma handle for DMA buffer sp */
+ struct mssp sp; /* Local copy of sp buffer */
+};
+
+static struct Scsi_Host *sh[MAX_BOARDS];
+static const char *driver_name = "EATA";
+static char sha[MAX_BOARDS];
+
+/* Initialize num_boards so that ihdlr can work while detect is in progress */
+static unsigned int num_boards = MAX_BOARDS;
+
+static unsigned long io_port[] = {
+
+ /* Space for MAX_INT_PARAM ports usable while loading as a module */
+ SKIP, SKIP, SKIP, SKIP, SKIP, SKIP, SKIP, SKIP,
+ SKIP, SKIP,
+
+ /* First ISA */
+ 0x1f0,
+
+ /* Space for MAX_PCI ports possibly reported by PCI_BIOS */
+ SKIP, SKIP, SKIP, SKIP, SKIP, SKIP, SKIP, SKIP,
+ SKIP, SKIP, SKIP, SKIP, SKIP, SKIP, SKIP, SKIP,
+
+ /* MAX_EISA ports */
+ 0x1c88, 0x2c88, 0x3c88, 0x4c88, 0x5c88, 0x6c88, 0x7c88, 0x8c88,
+ 0x9c88, 0xac88, 0xbc88, 0xcc88, 0xdc88, 0xec88, 0xfc88,
+
+ /* Other (MAX_ISA - 1) ports */
+ 0x170, 0x230, 0x330,
+
+ /* End of list */
+ 0x0
+};
+
+/* Device is Big Endian */
+#define H2DEV(x) cpu_to_be32(x)
+#define DEV2H(x) be32_to_cpu(x)
+#define H2DEV16(x) cpu_to_be16(x)
+#define DEV2H16(x) be16_to_cpu(x)
+
+/* But transfer orientation from the 16 bit data register is Little Endian */
+#define REG2H(x) le16_to_cpu(x)
+
+static irqreturn_t do_interrupt_handler(int, void *);
+static void flush_dev(struct scsi_device *, unsigned long, struct hostdata *,
+ unsigned int);
+static int do_trace = 0;
+static int setup_done = 0;
+static int link_statistics;
+static int ext_tran = 0;
+static int rev_scan = 1;
+
+#if defined(CONFIG_SCSI_EATA_TAGGED_QUEUE)
+static int tag_mode = TAG_SIMPLE;
+#else
+static int tag_mode = TAG_DISABLED;
+#endif
+
+#if defined(CONFIG_SCSI_EATA_LINKED_COMMANDS)
+static int linked_comm = 1;
+#else
+static int linked_comm = 0;
+#endif
+
+#if defined(CONFIG_SCSI_EATA_MAX_TAGS)
+static int max_queue_depth = CONFIG_SCSI_EATA_MAX_TAGS;
+#else
+static int max_queue_depth = MAX_CMD_PER_LUN;
+#endif
+
+#if defined(CONFIG_ISA)
+static int isa_probe = 1;
+#else
+static int isa_probe = 0;
+#endif
+
+#if defined(CONFIG_EISA)
+static int eisa_probe = 1;
+#else
+static int eisa_probe = 0;
+#endif
+
+#if defined(CONFIG_PCI)
+static int pci_probe = 1;
+#else
+static int pci_probe = 0;
+#endif
+
+#define MAX_INT_PARAM 10
+#define MAX_BOOT_OPTIONS_SIZE 256
+static char boot_options[MAX_BOOT_OPTIONS_SIZE];
+
+#if defined(MODULE)
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+
+module_param_string(eata, boot_options, MAX_BOOT_OPTIONS_SIZE, 0);
+MODULE_PARM_DESC(eata, " equivalent to the \"eata=...\" kernel boot option."
+ " Example: modprobe eata \"eata=0x7410,0x230,lc:y,tm:0,mq:4,ep:n\"");
+MODULE_AUTHOR("Dario Ballabio");
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("EATA/DMA SCSI Driver");
+
+#endif
+
+static int eata2x_slave_configure(struct scsi_device *dev)
+{
+ int tqd, utqd;
+ char *tag_suffix, *link_suffix;
+
+ utqd = MAX_CMD_PER_LUN;
+ tqd = max_queue_depth;
+
+ if (TLDEV(dev->type) && dev->tagged_supported) {
+ if (tag_mode == TAG_SIMPLE) {
+ tag_suffix = ", simple tags";
+ } else if (tag_mode == TAG_ORDERED) {
+ tag_suffix = ", ordered tags";
+ } else {
+ tag_suffix = ", no tags";
+ }
+ scsi_change_queue_depth(dev, tqd);
+ } else if (TLDEV(dev->type) && linked_comm) {
+ scsi_change_queue_depth(dev, tqd);
+ tag_suffix = ", untagged";
+ } else {
+ scsi_change_queue_depth(dev, utqd);
+ tag_suffix = "";
+ }
+
+ if (TLDEV(dev->type) && linked_comm && dev->queue_depth > 2)
+ link_suffix = ", sorted";
+ else if (TLDEV(dev->type))
+ link_suffix = ", unsorted";
+ else
+ link_suffix = "";
+
+ sdev_printk(KERN_INFO, dev,
+ "cmds/lun %d%s%s.\n",
+ dev->queue_depth, link_suffix, tag_suffix);
+
+ return 0;
+}
+
+static int wait_on_busy(unsigned long iobase, unsigned int loop)
+{
+ while (inb(iobase + REG_AUX_STATUS) & ABSY_ASSERTED) {
+ udelay(1L);
+ if (--loop == 0)
+ return 1;
+ }
+ return 0;
+}
+
+static int do_dma(unsigned long iobase, unsigned long addr, unchar cmd)
+{
+ unsigned char *byaddr;
+ unsigned long devaddr;
+
+ if (wait_on_busy(iobase, (addr ? MAXLOOP * 100 : MAXLOOP)))
+ return 1;
+
+ if (addr) {
+ devaddr = H2DEV(addr);
+ byaddr = (unsigned char *)&devaddr;
+ outb(byaddr[3], iobase + REG_LOW);
+ outb(byaddr[2], iobase + REG_LM);
+ outb(byaddr[1], iobase + REG_MID);
+ outb(byaddr[0], iobase + REG_MSB);
+ }
+
+ outb(cmd, iobase + REG_CMD);
+ return 0;
+}
+
+static int read_pio(unsigned long iobase, ushort * start, ushort * end)
+{
+ unsigned int loop = MAXLOOP;
+ ushort *p;
+
+ for (p = start; p <= end; p++) {
+ while (!(inb(iobase + REG_STATUS) & DRQ_ASSERTED)) {
+ udelay(1L);
+ if (--loop == 0)
+ return 1;
+ }
+ loop = MAXLOOP;
+ *p = REG2H(inw(iobase));
+ }
+
+ return 0;
+}
+
+static struct pci_dev *get_pci_dev(unsigned long port_base)
+{
+#if defined(CONFIG_PCI)
+ unsigned int addr;
+ struct pci_dev *dev = NULL;
+
+ while ((dev = pci_get_class(PCI_CLASS_STORAGE_SCSI << 8, dev))) {
+ addr = pci_resource_start(dev, 0);
+
+#if defined(DEBUG_PCI_DETECT)
+ printk("%s: get_pci_dev, bus %d, devfn 0x%x, addr 0x%x.\n",
+ driver_name, dev->bus->number, dev->devfn, addr);
+#endif
+
+ /* we are in so much trouble for a pci hotplug system with this driver
+ * anyway, so doing this at least lets people unload the driver and not
+ * cause memory problems, but in general this is a bad thing to do (this
+ * driver needs to be converted to the proper PCI api someday... */
+ pci_dev_put(dev);
+ if (addr + PCI_BASE_ADDRESS_0 == port_base)
+ return dev;
+ }
+#endif /* end CONFIG_PCI */
+ return NULL;
+}
+
+static void enable_pci_ports(void)
+{
+#if defined(CONFIG_PCI)
+ struct pci_dev *dev = NULL;
+
+ while ((dev = pci_get_class(PCI_CLASS_STORAGE_SCSI << 8, dev))) {
+#if defined(DEBUG_PCI_DETECT)
+ printk("%s: enable_pci_ports, bus %d, devfn 0x%x.\n",
+ driver_name, dev->bus->number, dev->devfn);
+#endif
+
+ if (pci_enable_device(dev))
+ printk
+ ("%s: warning, pci_enable_device failed, bus %d devfn 0x%x.\n",
+ driver_name, dev->bus->number, dev->devfn);
+ }
+
+#endif /* end CONFIG_PCI */
+}
+
+static int port_detect(unsigned long port_base, unsigned int j,
+ struct scsi_host_template *tpnt)
+{
+ unsigned char irq, dma_channel, subversion, i, is_pci = 0;
+ unsigned char protocol_rev;
+ struct eata_info info;
+ char *bus_type, dma_name[16];
+ struct pci_dev *pdev;
+ /* Allowed DMA channels for ISA (0 indicates reserved) */
+ unsigned char dma_channel_table[4] = { 5, 6, 7, 0 };
+ struct Scsi_Host *shost;
+ struct hostdata *ha;
+ char name[16];
+
+ sprintf(name, "%s%d", driver_name, j);
+
+ if (!request_region(port_base, REGION_SIZE, driver_name)) {
+#if defined(DEBUG_DETECT)
+ printk("%s: address 0x%03lx in use, skipping probe.\n", name,
+ port_base);
+#endif
+ goto fail;
+ }
+
+ if (do_dma(port_base, 0, READ_CONFIG_PIO)) {
+#if defined(DEBUG_DETECT)
+ printk("%s: detect, do_dma failed at 0x%03lx.\n", name,
+ port_base);
+#endif
+ goto freelock;
+ }
+
+ /* Read the info structure */
+ if (read_pio(port_base, (ushort *) & info, (ushort *) & info.ipad[0])) {
+#if defined(DEBUG_DETECT)
+ printk("%s: detect, read_pio failed at 0x%03lx.\n", name,
+ port_base);
+#endif
+ goto freelock;
+ }
+
+ info.data_len = DEV2H(info.data_len);
+ info.sign = DEV2H(info.sign);
+ info.cp_pad_len = DEV2H16(info.cp_pad_len);
+ info.cp_len = DEV2H(info.cp_len);
+ info.sp_len = DEV2H(info.sp_len);
+ info.scatt_size = DEV2H16(info.scatt_size);
+ info.queue_size = DEV2H16(info.queue_size);
+
+ /* Check the controller "EATA" signature */
+ if (info.sign != EATA_SIG_BE) {
+#if defined(DEBUG_DETECT)
+ printk("%s: signature 0x%04x discarded.\n", name, info.sign);
+#endif
+ goto freelock;
+ }
+
+ if (info.data_len < EATA_2_0A_SIZE) {
+ printk
+ ("%s: config structure size (%d bytes) too short, detaching.\n",
+ name, info.data_len);
+ goto freelock;
+ } else if (info.data_len == EATA_2_0A_SIZE)
+ protocol_rev = 'A';
+ else if (info.data_len == EATA_2_0B_SIZE)
+ protocol_rev = 'B';
+ else
+ protocol_rev = 'C';
+
+ if (protocol_rev != 'A' && info.forcaddr) {
+ printk("%s: warning, port address has been forced.\n", name);
+ bus_type = "PCI";
+ is_pci = 1;
+ subversion = ESA;
+ } else if (port_base > MAX_EISA_ADDR
+ || (protocol_rev == 'C' && info.pci)) {
+ bus_type = "PCI";
+ is_pci = 1;
+ subversion = ESA;
+ } else if (port_base >= MIN_EISA_ADDR
+ || (protocol_rev == 'C' && info.eisa)) {
+ bus_type = "EISA";
+ subversion = ESA;
+ } else if (protocol_rev == 'C' && !info.eisa && !info.pci) {
+ bus_type = "ISA";
+ subversion = ISA;
+ } else if (port_base > MAX_ISA_ADDR) {
+ bus_type = "PCI";
+ is_pci = 1;
+ subversion = ESA;
+ } else {
+ bus_type = "ISA";
+ subversion = ISA;
+ }
+
+ if (!info.haaval || info.ata) {
+ printk
+ ("%s: address 0x%03lx, unusable %s board (%d%d), detaching.\n",
+ name, port_base, bus_type, info.haaval, info.ata);
+ goto freelock;
+ }
+
+ if (info.drqvld) {
+ if (subversion == ESA)
+ printk("%s: warning, weird %s board using DMA.\n", name,
+ bus_type);
+
+ subversion = ISA;
+ dma_channel = dma_channel_table[3 - info.drqx];
+ } else {
+ if (subversion == ISA)
+ printk("%s: warning, weird %s board not using DMA.\n",
+ name, bus_type);
+
+ subversion = ESA;
+ dma_channel = NO_DMA;
+ }
+
+ if (!info.dmasup)
+ printk("%s: warning, DMA protocol support not asserted.\n",
+ name);
+
+ irq = info.irq;
+
+ if (subversion == ESA && !info.irq_tr)
+ printk
+ ("%s: warning, LEVEL triggering is suggested for IRQ %u.\n",
+ name, irq);
+
+ if (is_pci) {
+ pdev = get_pci_dev(port_base);
+ if (!pdev)
+ printk
+ ("%s: warning, failed to get pci_dev structure.\n",
+ name);
+ } else
+ pdev = NULL;
+
+ if (pdev && (irq != pdev->irq)) {
+ printk("%s: IRQ %u mapped to IO-APIC IRQ %u.\n", name, irq,
+ pdev->irq);
+ irq = pdev->irq;
+ }
+
+ /* Board detected, allocate its IRQ */
+ if (request_irq(irq, do_interrupt_handler,
+ (subversion == ESA) ? IRQF_SHARED : 0,
+ driver_name, (void *)&sha[j])) {
+ printk("%s: unable to allocate IRQ %u, detaching.\n", name,
+ irq);
+ goto freelock;
+ }
+
+ if (subversion == ISA && request_dma(dma_channel, driver_name)) {
+ printk("%s: unable to allocate DMA channel %u, detaching.\n",
+ name, dma_channel);
+ goto freeirq;
+ }
+#if defined(FORCE_CONFIG)
+ {
+ struct eata_config *cf;
+ dma_addr_t cf_dma_addr;
+
+ cf = pci_zalloc_consistent(pdev, sizeof(struct eata_config),
+ &cf_dma_addr);
+
+ if (!cf) {
+ printk
+ ("%s: config, pci_alloc_consistent failed, detaching.\n",
+ name);
+ goto freedma;
+ }
+
+ /* Set board configuration */
+ cf->len = (ushort) H2DEV16((ushort) 510);
+ cf->ocena = 1;
+
+ if (do_dma(port_base, cf_dma_addr, SET_CONFIG_DMA)) {
+ printk
+ ("%s: busy timeout sending configuration, detaching.\n",
+ name);
+ pci_free_consistent(pdev, sizeof(struct eata_config),
+ cf, cf_dma_addr);
+ goto freedma;
+ }
+
+ }
+#endif
+
+ sh[j] = shost = scsi_register(tpnt, sizeof(struct hostdata));
+ if (shost == NULL) {
+ printk("%s: unable to register host, detaching.\n", name);
+ goto freedma;
+ }
+
+ shost->io_port = port_base;
+ shost->unique_id = port_base;
+ shost->n_io_port = REGION_SIZE;
+ shost->dma_channel = dma_channel;
+ shost->irq = irq;
+ shost->sg_tablesize = (ushort) info.scatt_size;
+ shost->this_id = (ushort) info.host_addr[3];
+ shost->can_queue = (ushort) info.queue_size;
+ shost->cmd_per_lun = MAX_CMD_PER_LUN;
+
+ ha = (struct hostdata *)shost->hostdata;
+
+ memset(ha, 0, sizeof(struct hostdata));
+ ha->subversion = subversion;
+ ha->protocol_rev = protocol_rev;
+ ha->is_pci = is_pci;
+ ha->pdev = pdev;
+ ha->board_number = j;
+
+ if (ha->subversion == ESA)
+ shost->unchecked_isa_dma = 0;
+ else {
+ unsigned long flags;
+ shost->unchecked_isa_dma = 1;
+
+ flags = claim_dma_lock();
+ disable_dma(dma_channel);
+ clear_dma_ff(dma_channel);
+ set_dma_mode(dma_channel, DMA_MODE_CASCADE);
+ enable_dma(dma_channel);
+ release_dma_lock(flags);
+
+ }
+
+ strcpy(ha->board_name, name);
+
+ /* DPT PM2012 does not allow to detect sg_tablesize correctly */
+ if (shost->sg_tablesize > MAX_SGLIST || shost->sg_tablesize < 2) {
+ printk("%s: detect, wrong n. of SG lists %d, fixed.\n",
+ ha->board_name, shost->sg_tablesize);
+ shost->sg_tablesize = MAX_SGLIST;
+ }
+
+ /* DPT PM2012 does not allow to detect can_queue correctly */
+ if (shost->can_queue > MAX_MAILBOXES || shost->can_queue < 2) {
+ printk("%s: detect, wrong n. of mbox %d, fixed.\n",
+ ha->board_name, shost->can_queue);
+ shost->can_queue = MAX_MAILBOXES;
+ }
+
+ if (protocol_rev != 'A') {
+ if (info.max_chan > 0 && info.max_chan < MAX_CHANNEL)
+ shost->max_channel = info.max_chan;
+
+ if (info.max_id > 7 && info.max_id < MAX_TARGET)
+ shost->max_id = info.max_id + 1;
+
+ if (info.large_sg && shost->sg_tablesize == MAX_SGLIST)
+ shost->sg_tablesize = MAX_LARGE_SGLIST;
+ }
+
+ if (protocol_rev == 'C') {
+ if (info.max_lun > 7 && info.max_lun < MAX_LUN)
+ shost->max_lun = info.max_lun + 1;
+ }
+
+ if (dma_channel == NO_DMA)
+ sprintf(dma_name, "%s", "BMST");
+ else
+ sprintf(dma_name, "DMA %u", dma_channel);
+
+ for (i = 0; i < shost->can_queue; i++)
+ ha->cp[i].cp_dma_addr = pci_map_single(ha->pdev,
+ &ha->cp[i],
+ sizeof(struct mscp),
+ PCI_DMA_BIDIRECTIONAL);
+
+ for (i = 0; i < shost->can_queue; i++) {
+ size_t sz = shost->sg_tablesize *sizeof(struct sg_list);
+ gfp_t gfp_mask = (shost->unchecked_isa_dma ? GFP_DMA : 0) | GFP_ATOMIC;
+ ha->cp[i].sglist = kmalloc(sz, gfp_mask);
+ if (!ha->cp[i].sglist) {
+ printk
+ ("%s: kmalloc SGlist failed, mbox %d, detaching.\n",
+ ha->board_name, i);
+ goto release;
+ }
+ }
+
+ if (!(ha->sp_cpu_addr = pci_alloc_consistent(ha->pdev,
+ sizeof(struct mssp),
+ &ha->sp_dma_addr))) {
+ printk("%s: pci_alloc_consistent failed, detaching.\n", ha->board_name);
+ goto release;
+ }
+
+ if (max_queue_depth > MAX_TAGGED_CMD_PER_LUN)
+ max_queue_depth = MAX_TAGGED_CMD_PER_LUN;
+
+ if (max_queue_depth < MAX_CMD_PER_LUN)
+ max_queue_depth = MAX_CMD_PER_LUN;
+
+ if (tag_mode != TAG_DISABLED && tag_mode != TAG_SIMPLE)
+ tag_mode = TAG_ORDERED;
+
+ if (j == 0) {
+ printk
+ ("EATA/DMA 2.0x: Copyright (C) 1994-2003 Dario Ballabio.\n");
+ printk
+ ("%s config options -> tm:%d, lc:%c, mq:%d, rs:%c, et:%c, "
+ "ip:%c, ep:%c, pp:%c.\n", driver_name, tag_mode,
+ YESNO(linked_comm), max_queue_depth, YESNO(rev_scan),
+ YESNO(ext_tran), YESNO(isa_probe), YESNO(eisa_probe),
+ YESNO(pci_probe));
+ }
+
+ printk("%s: 2.0%c, %s 0x%03lx, IRQ %u, %s, SG %d, MB %d.\n",
+ ha->board_name, ha->protocol_rev, bus_type,
+ (unsigned long)shost->io_port, shost->irq, dma_name,
+ shost->sg_tablesize, shost->can_queue);
+
+ if (shost->max_id > 8 || shost->max_lun > 8)
+ printk
+ ("%s: wide SCSI support enabled, max_id %u, max_lun %llu.\n",
+ ha->board_name, shost->max_id, shost->max_lun);
+
+ for (i = 0; i <= shost->max_channel; i++)
+ printk("%s: SCSI channel %u enabled, host target ID %d.\n",
+ ha->board_name, i, info.host_addr[3 - i]);
+
+#if defined(DEBUG_DETECT)
+ printk("%s: Vers. 0x%x, ocs %u, tar %u, trnxfr %u, more %u, SYNC 0x%x, "
+ "sec. %u, infol %d, cpl %d spl %d.\n", name, info.version,
+ info.ocsena, info.tarsup, info.trnxfr, info.morsup, info.sync,
+ info.second, info.data_len, info.cp_len, info.sp_len);
+
+ if (protocol_rev == 'B' || protocol_rev == 'C')
+ printk("%s: isaena %u, forcaddr %u, max_id %u, max_chan %u, "
+ "large_sg %u, res1 %u.\n", name, info.isaena,
+ info.forcaddr, info.max_id, info.max_chan, info.large_sg,
+ info.res1);
+
+ if (protocol_rev == 'C')
+ printk("%s: max_lun %u, m1 %u, idquest %u, pci %u, eisa %u, "
+ "raidnum %u.\n", name, info.max_lun, info.m1,
+ info.idquest, info.pci, info.eisa, info.raidnum);
+#endif
+
+ if (ha->pdev) {
+ pci_set_master(ha->pdev);
+ if (pci_set_dma_mask(ha->pdev, DMA_BIT_MASK(32)))
+ printk("%s: warning, pci_set_dma_mask failed.\n",
+ ha->board_name);
+ }
+
+ return 1;
+
+ freedma:
+ if (subversion == ISA)
+ free_dma(dma_channel);
+ freeirq:
+ free_irq(irq, &sha[j]);
+ freelock:
+ release_region(port_base, REGION_SIZE);
+ fail:
+ return 0;
+
+ release:
+ eata2x_release(shost);
+ return 0;
+}
+
+static void internal_setup(char *str, int *ints)
+{
+ int i, argc = ints[0];
+ char *cur = str, *pc;
+
+ if (argc > 0) {
+ if (argc > MAX_INT_PARAM)
+ argc = MAX_INT_PARAM;
+
+ for (i = 0; i < argc; i++)
+ io_port[i] = ints[i + 1];
+
+ io_port[i] = 0;
+ setup_done = 1;
+ }
+
+ while (cur && (pc = strchr(cur, ':'))) {
+ int val = 0, c = *++pc;
+
+ if (c == 'n' || c == 'N')
+ val = 0;
+ else if (c == 'y' || c == 'Y')
+ val = 1;
+ else
+ val = (int)simple_strtoul(pc, NULL, 0);
+
+ if (!strncmp(cur, "lc:", 3))
+ linked_comm = val;
+ else if (!strncmp(cur, "tm:", 3))
+ tag_mode = val;
+ else if (!strncmp(cur, "tc:", 3))
+ tag_mode = val;
+ else if (!strncmp(cur, "mq:", 3))
+ max_queue_depth = val;
+ else if (!strncmp(cur, "ls:", 3))
+ link_statistics = val;
+ else if (!strncmp(cur, "et:", 3))
+ ext_tran = val;
+ else if (!strncmp(cur, "rs:", 3))
+ rev_scan = val;
+ else if (!strncmp(cur, "ip:", 3))
+ isa_probe = val;
+ else if (!strncmp(cur, "ep:", 3))
+ eisa_probe = val;
+ else if (!strncmp(cur, "pp:", 3))
+ pci_probe = val;
+
+ if ((cur = strchr(cur, ',')))
+ ++cur;
+ }
+
+ return;
+}
+
+static int option_setup(char *str)
+{
+ int ints[MAX_INT_PARAM];
+ char *cur = str;
+ int i = 1;
+
+ while (cur && isdigit(*cur) && i < MAX_INT_PARAM) {
+ ints[i++] = simple_strtoul(cur, NULL, 0);
+
+ if ((cur = strchr(cur, ',')) != NULL)
+ cur++;
+ }
+
+ ints[0] = i - 1;
+ internal_setup(cur, ints);
+ return 1;
+}
+
+static void add_pci_ports(void)
+{
+#if defined(CONFIG_PCI)
+ unsigned int addr, k;
+ struct pci_dev *dev = NULL;
+
+ for (k = 0; k < MAX_PCI; k++) {
+
+ if (!(dev = pci_get_class(PCI_CLASS_STORAGE_SCSI << 8, dev)))
+ break;
+
+ if (pci_enable_device(dev)) {
+#if defined(DEBUG_PCI_DETECT)
+ printk
+ ("%s: detect, bus %d, devfn 0x%x, pci_enable_device failed.\n",
+ driver_name, dev->bus->number, dev->devfn);
+#endif
+
+ continue;
+ }
+
+ addr = pci_resource_start(dev, 0);
+
+#if defined(DEBUG_PCI_DETECT)
+ printk("%s: detect, seq. %d, bus %d, devfn 0x%x, addr 0x%x.\n",
+ driver_name, k, dev->bus->number, dev->devfn, addr);
+#endif
+
+ /* Order addresses according to rev_scan value */
+ io_port[MAX_INT_PARAM + (rev_scan ? (MAX_PCI - k) : (1 + k))] =
+ addr + PCI_BASE_ADDRESS_0;
+ }
+
+ pci_dev_put(dev);
+#endif /* end CONFIG_PCI */
+}
+
+static int eata2x_detect(struct scsi_host_template *tpnt)
+{
+ unsigned int j = 0, k;
+
+ tpnt->proc_name = "eata2x";
+
+ if (strlen(boot_options))
+ option_setup(boot_options);
+
+#if defined(MODULE)
+ /* io_port could have been modified when loading as a module */
+ if (io_port[0] != SKIP) {
+ setup_done = 1;
+ io_port[MAX_INT_PARAM] = 0;
+ }
+#endif
+
+ for (k = MAX_INT_PARAM; io_port[k]; k++)
+ if (io_port[k] == SKIP)
+ continue;
+ else if (io_port[k] <= MAX_ISA_ADDR) {
+ if (!isa_probe)
+ io_port[k] = SKIP;
+ } else if (io_port[k] >= MIN_EISA_ADDR
+ && io_port[k] <= MAX_EISA_ADDR) {
+ if (!eisa_probe)
+ io_port[k] = SKIP;
+ }
+
+ if (pci_probe) {
+ if (!setup_done)
+ add_pci_ports();
+ else
+ enable_pci_ports();
+ }
+
+ for (k = 0; io_port[k]; k++) {
+
+ if (io_port[k] == SKIP)
+ continue;
+
+ if (j < MAX_BOARDS && port_detect(io_port[k], j, tpnt))
+ j++;
+ }
+
+ num_boards = j;
+ return j;
+}
+
+static void map_dma(unsigned int i, struct hostdata *ha)
+{
+ unsigned int k, pci_dir;
+ int count;
+ struct scatterlist *sg;
+ struct mscp *cpp;
+ struct scsi_cmnd *SCpnt;
+
+ cpp = &ha->cp[i];
+ SCpnt = cpp->SCpnt;
+ pci_dir = SCpnt->sc_data_direction;
+
+ if (SCpnt->sense_buffer)
+ cpp->sense_addr =
+ H2DEV(pci_map_single(ha->pdev, SCpnt->sense_buffer,
+ SCSI_SENSE_BUFFERSIZE, PCI_DMA_FROMDEVICE));
+
+ cpp->sense_len = SCSI_SENSE_BUFFERSIZE;
+
+ if (!scsi_sg_count(SCpnt)) {
+ cpp->data_len = 0;
+ return;
+ }
+
+ count = pci_map_sg(ha->pdev, scsi_sglist(SCpnt), scsi_sg_count(SCpnt),
+ pci_dir);
+ BUG_ON(!count);
+
+ scsi_for_each_sg(SCpnt, sg, count, k) {
+ cpp->sglist[k].address = H2DEV(sg_dma_address(sg));
+ cpp->sglist[k].num_bytes = H2DEV(sg_dma_len(sg));
+ }
+
+ cpp->sg = 1;
+ cpp->data_address = H2DEV(pci_map_single(ha->pdev, cpp->sglist,
+ scsi_sg_count(SCpnt) *
+ sizeof(struct sg_list),
+ pci_dir));
+ cpp->data_len = H2DEV((scsi_sg_count(SCpnt) * sizeof(struct sg_list)));
+}
+
+static void unmap_dma(unsigned int i, struct hostdata *ha)
+{
+ unsigned int pci_dir;
+ struct mscp *cpp;
+ struct scsi_cmnd *SCpnt;
+
+ cpp = &ha->cp[i];
+ SCpnt = cpp->SCpnt;
+ pci_dir = SCpnt->sc_data_direction;
+
+ if (DEV2H(cpp->sense_addr))
+ pci_unmap_single(ha->pdev, DEV2H(cpp->sense_addr),
+ DEV2H(cpp->sense_len), PCI_DMA_FROMDEVICE);
+
+ if (scsi_sg_count(SCpnt))
+ pci_unmap_sg(ha->pdev, scsi_sglist(SCpnt), scsi_sg_count(SCpnt),
+ pci_dir);
+
+ if (!DEV2H(cpp->data_len))
+ pci_dir = PCI_DMA_BIDIRECTIONAL;
+
+ if (DEV2H(cpp->data_address))
+ pci_unmap_single(ha->pdev, DEV2H(cpp->data_address),
+ DEV2H(cpp->data_len), pci_dir);
+}
+
+static void sync_dma(unsigned int i, struct hostdata *ha)
+{
+ unsigned int pci_dir;
+ struct mscp *cpp;
+ struct scsi_cmnd *SCpnt;
+
+ cpp = &ha->cp[i];
+ SCpnt = cpp->SCpnt;
+ pci_dir = SCpnt->sc_data_direction;
+
+ if (DEV2H(cpp->sense_addr))
+ pci_dma_sync_single_for_cpu(ha->pdev, DEV2H(cpp->sense_addr),
+ DEV2H(cpp->sense_len),
+ PCI_DMA_FROMDEVICE);
+
+ if (scsi_sg_count(SCpnt))
+ pci_dma_sync_sg_for_cpu(ha->pdev, scsi_sglist(SCpnt),
+ scsi_sg_count(SCpnt), pci_dir);
+
+ if (!DEV2H(cpp->data_len))
+ pci_dir = PCI_DMA_BIDIRECTIONAL;
+
+ if (DEV2H(cpp->data_address))
+ pci_dma_sync_single_for_cpu(ha->pdev,
+ DEV2H(cpp->data_address),
+ DEV2H(cpp->data_len), pci_dir);
+}
+
+static void scsi_to_dev_dir(unsigned int i, struct hostdata *ha)
+{
+ unsigned int k;
+
+ static const unsigned char data_out_cmds[] = {
+ 0x0a, 0x2a, 0x15, 0x55, 0x04, 0x07, 0x18, 0x1d, 0x24, 0x2e,
+ 0x30, 0x31, 0x32, 0x38, 0x39, 0x3a, 0x3b, 0x3d, 0x3f, 0x40,
+ 0x41, 0x4c, 0xaa, 0xae, 0xb0, 0xb1, 0xb2, 0xb6, 0xea, 0x1b, 0x5d
+ };
+
+ static const unsigned char data_none_cmds[] = {
+ 0x01, 0x0b, 0x10, 0x11, 0x13, 0x16, 0x17, 0x19, 0x2b, 0x1e,
+ 0x2c, 0xac, 0x2f, 0xaf, 0x33, 0xb3, 0x35, 0x36, 0x45, 0x47,
+ 0x48, 0x49, 0xa9, 0x4b, 0xa5, 0xa6, 0xb5, 0x00
+ };
+
+ struct mscp *cpp;
+ struct scsi_cmnd *SCpnt;
+
+ cpp = &ha->cp[i];
+ SCpnt = cpp->SCpnt;
+
+ if (SCpnt->sc_data_direction == DMA_FROM_DEVICE) {
+ cpp->din = 1;
+ cpp->dout = 0;
+ return;
+ } else if (SCpnt->sc_data_direction == DMA_TO_DEVICE) {
+ cpp->din = 0;
+ cpp->dout = 1;
+ return;
+ } else if (SCpnt->sc_data_direction == DMA_NONE) {
+ cpp->din = 0;
+ cpp->dout = 0;
+ return;
+ }
+
+ if (SCpnt->sc_data_direction != DMA_BIDIRECTIONAL)
+ panic("%s: qcomm, invalid SCpnt->sc_data_direction.\n",
+ ha->board_name);
+
+ for (k = 0; k < ARRAY_SIZE(data_out_cmds); k++)
+ if (SCpnt->cmnd[0] == data_out_cmds[k]) {
+ cpp->dout = 1;
+ break;
+ }
+
+ if ((cpp->din = !cpp->dout))
+ for (k = 0; k < ARRAY_SIZE(data_none_cmds); k++)
+ if (SCpnt->cmnd[0] == data_none_cmds[k]) {
+ cpp->din = 0;
+ break;
+ }
+
+}
+
+static int eata2x_queuecommand_lck(struct scsi_cmnd *SCpnt,
+ void (*done) (struct scsi_cmnd *))
+{
+ struct Scsi_Host *shost = SCpnt->device->host;
+ struct hostdata *ha = (struct hostdata *)shost->hostdata;
+ unsigned int i, k;
+ struct mscp *cpp;
+
+ if (SCpnt->host_scribble)
+ panic("%s: qcomm, SCpnt %p already active.\n",
+ ha->board_name, SCpnt);
+
+ /* i is the mailbox number, look for the first free mailbox
+ starting from last_cp_used */
+ i = ha->last_cp_used + 1;
+
+ for (k = 0; k < shost->can_queue; k++, i++) {
+ if (i >= shost->can_queue)
+ i = 0;
+ if (ha->cp_stat[i] == FREE) {
+ ha->last_cp_used = i;
+ break;
+ }
+ }
+
+ if (k == shost->can_queue) {
+ printk("%s: qcomm, no free mailbox.\n", ha->board_name);
+ return 1;
+ }
+
+ /* Set pointer to control packet structure */
+ cpp = &ha->cp[i];
+
+ memset(cpp, 0, sizeof(struct mscp) - CP_TAIL_SIZE);
+
+ /* Set pointer to status packet structure, Big Endian format */
+ cpp->sp_dma_addr = H2DEV(ha->sp_dma_addr);
+
+ SCpnt->scsi_done = done;
+ cpp->cpp_index = i;
+ SCpnt->host_scribble = (unsigned char *)&cpp->cpp_index;
+
+ if (do_trace)
+ scmd_printk(KERN_INFO, SCpnt,
+ "qcomm, mbox %d.\n", i);
+
+ cpp->reqsen = 1;
+ cpp->dispri = 1;
+#if 0
+ if (SCpnt->device->type == TYPE_TAPE)
+ cpp->hbaci = 1;
+#endif
+ cpp->one = 1;
+ cpp->channel = SCpnt->device->channel;
+ cpp->target = SCpnt->device->id;
+ cpp->lun = SCpnt->device->lun;
+ cpp->SCpnt = SCpnt;
+ memcpy(cpp->cdb, SCpnt->cmnd, SCpnt->cmd_len);
+
+ /* Use data transfer direction SCpnt->sc_data_direction */
+ scsi_to_dev_dir(i, ha);
+
+ /* Map DMA buffers and SG list */
+ map_dma(i, ha);
+
+ if (linked_comm && SCpnt->device->queue_depth > 2
+ && TLDEV(SCpnt->device->type)) {
+ ha->cp_stat[i] = READY;
+ flush_dev(SCpnt->device, blk_rq_pos(SCpnt->request), ha, 0);
+ return 0;
+ }
+
+ /* Send control packet to the board */
+ if (do_dma(shost->io_port, cpp->cp_dma_addr, SEND_CP_DMA)) {
+ unmap_dma(i, ha);
+ SCpnt->host_scribble = NULL;
+ scmd_printk(KERN_INFO, SCpnt, "qcomm, adapter busy.\n");
+ return 1;
+ }
+
+ ha->cp_stat[i] = IN_USE;
+ return 0;
+}
+
+static DEF_SCSI_QCMD(eata2x_queuecommand)
+
+static int eata2x_eh_abort(struct scsi_cmnd *SCarg)
+{
+ struct Scsi_Host *shost = SCarg->device->host;
+ struct hostdata *ha = (struct hostdata *)shost->hostdata;
+ unsigned int i;
+
+ if (SCarg->host_scribble == NULL) {
+ scmd_printk(KERN_INFO, SCarg, "abort, cmd inactive.\n");
+ return SUCCESS;
+ }
+
+ i = *(unsigned int *)SCarg->host_scribble;
+ scmd_printk(KERN_WARNING, SCarg, "abort, mbox %d.\n", i);
+
+ if (i >= shost->can_queue)
+ panic("%s: abort, invalid SCarg->host_scribble.\n", ha->board_name);
+
+ if (wait_on_busy(shost->io_port, MAXLOOP)) {
+ printk("%s: abort, timeout error.\n", ha->board_name);
+ return FAILED;
+ }
+
+ if (ha->cp_stat[i] == FREE) {
+ printk("%s: abort, mbox %d is free.\n", ha->board_name, i);
+ return SUCCESS;
+ }
+
+ if (ha->cp_stat[i] == IN_USE) {
+ printk("%s: abort, mbox %d is in use.\n", ha->board_name, i);
+
+ if (SCarg != ha->cp[i].SCpnt)
+ panic("%s: abort, mbox %d, SCarg %p, cp SCpnt %p.\n",
+ ha->board_name, i, SCarg, ha->cp[i].SCpnt);
+
+ if (inb(shost->io_port + REG_AUX_STATUS) & IRQ_ASSERTED)
+ printk("%s: abort, mbox %d, interrupt pending.\n",
+ ha->board_name, i);
+
+ return FAILED;
+ }
+
+ if (ha->cp_stat[i] == IN_RESET) {
+ printk("%s: abort, mbox %d is in reset.\n", ha->board_name, i);
+ return FAILED;
+ }
+
+ if (ha->cp_stat[i] == LOCKED) {
+ printk("%s: abort, mbox %d is locked.\n", ha->board_name, i);
+ return SUCCESS;
+ }
+
+ if (ha->cp_stat[i] == READY || ha->cp_stat[i] == ABORTING) {
+ unmap_dma(i, ha);
+ SCarg->result = DID_ABORT << 16;
+ SCarg->host_scribble = NULL;
+ ha->cp_stat[i] = FREE;
+ printk("%s, abort, mbox %d ready, DID_ABORT, done.\n",
+ ha->board_name, i);
+ SCarg->scsi_done(SCarg);
+ return SUCCESS;
+ }
+
+ panic("%s: abort, mbox %d, invalid cp_stat.\n", ha->board_name, i);
+}
+
+static int eata2x_eh_host_reset(struct scsi_cmnd *SCarg)
+{
+ unsigned int i, time, k, c, limit = 0;
+ int arg_done = 0;
+ struct scsi_cmnd *SCpnt;
+ struct Scsi_Host *shost = SCarg->device->host;
+ struct hostdata *ha = (struct hostdata *)shost->hostdata;
+
+ scmd_printk(KERN_INFO, SCarg, "reset, enter.\n");
+
+ spin_lock_irq(shost->host_lock);
+
+ if (SCarg->host_scribble == NULL)
+ printk("%s: reset, inactive.\n", ha->board_name);
+
+ if (ha->in_reset) {
+ printk("%s: reset, exit, already in reset.\n", ha->board_name);
+ spin_unlock_irq(shost->host_lock);
+ return FAILED;
+ }
+
+ if (wait_on_busy(shost->io_port, MAXLOOP)) {
+ printk("%s: reset, exit, timeout error.\n", ha->board_name);
+ spin_unlock_irq(shost->host_lock);
+ return FAILED;
+ }
+
+ ha->retries = 0;
+
+ for (c = 0; c <= shost->max_channel; c++)
+ for (k = 0; k < shost->max_id; k++) {
+ ha->target_redo[k][c] = 1;
+ ha->target_to[k][c] = 0;
+ }
+
+ for (i = 0; i < shost->can_queue; i++) {
+
+ if (ha->cp_stat[i] == FREE)
+ continue;
+
+ if (ha->cp_stat[i] == LOCKED) {
+ ha->cp_stat[i] = FREE;
+ printk("%s: reset, locked mbox %d forced free.\n",
+ ha->board_name, i);
+ continue;
+ }
+
+ if (!(SCpnt = ha->cp[i].SCpnt))
+ panic("%s: reset, mbox %d, SCpnt == NULL.\n", ha->board_name, i);
+
+ if (ha->cp_stat[i] == READY || ha->cp_stat[i] == ABORTING) {
+ ha->cp_stat[i] = ABORTING;
+ printk("%s: reset, mbox %d aborting.\n",
+ ha->board_name, i);
+ }
+
+ else {
+ ha->cp_stat[i] = IN_RESET;
+ printk("%s: reset, mbox %d in reset.\n",
+ ha->board_name, i);
+ }
+
+ if (SCpnt->host_scribble == NULL)
+ panic("%s: reset, mbox %d, garbled SCpnt.\n", ha->board_name, i);
+
+ if (*(unsigned int *)SCpnt->host_scribble != i)
+ panic("%s: reset, mbox %d, index mismatch.\n", ha->board_name, i);
+
+ if (SCpnt->scsi_done == NULL)
+ panic("%s: reset, mbox %d, SCpnt->scsi_done == NULL.\n",
+ ha->board_name, i);
+
+ if (SCpnt == SCarg)
+ arg_done = 1;
+ }
+
+ if (do_dma(shost->io_port, 0, RESET_PIO)) {
+ printk("%s: reset, cannot reset, timeout error.\n", ha->board_name);
+ spin_unlock_irq(shost->host_lock);
+ return FAILED;
+ }
+
+ printk("%s: reset, board reset done, enabling interrupts.\n", ha->board_name);
+
+#if defined(DEBUG_RESET)
+ do_trace = 1;
+#endif
+
+ ha->in_reset = 1;
+
+ spin_unlock_irq(shost->host_lock);
+
+ /* FIXME: use a sleep instead */
+ time = jiffies;
+ while ((jiffies - time) < (10 * HZ) && limit++ < 200000)
+ udelay(100L);
+
+ spin_lock_irq(shost->host_lock);
+
+ printk("%s: reset, interrupts disabled, loops %d.\n", ha->board_name, limit);
+
+ for (i = 0; i < shost->can_queue; i++) {
+
+ if (ha->cp_stat[i] == IN_RESET) {
+ SCpnt = ha->cp[i].SCpnt;
+ unmap_dma(i, ha);
+ SCpnt->result = DID_RESET << 16;
+ SCpnt->host_scribble = NULL;
+
+ /* This mailbox is still waiting for its interrupt */
+ ha->cp_stat[i] = LOCKED;
+
+ printk
+ ("%s, reset, mbox %d locked, DID_RESET, done.\n",
+ ha->board_name, i);
+ }
+
+ else if (ha->cp_stat[i] == ABORTING) {
+ SCpnt = ha->cp[i].SCpnt;
+ unmap_dma(i, ha);
+ SCpnt->result = DID_RESET << 16;
+ SCpnt->host_scribble = NULL;
+
+ /* This mailbox was never queued to the adapter */
+ ha->cp_stat[i] = FREE;
+
+ printk
+ ("%s, reset, mbox %d aborting, DID_RESET, done.\n",
+ ha->board_name, i);
+ }
+
+ else
+ /* Any other mailbox has already been set free by interrupt */
+ continue;
+
+ SCpnt->scsi_done(SCpnt);
+ }
+
+ ha->in_reset = 0;
+ do_trace = 0;
+
+ if (arg_done)
+ printk("%s: reset, exit, done.\n", ha->board_name);
+ else
+ printk("%s: reset, exit.\n", ha->board_name);
+
+ spin_unlock_irq(shost->host_lock);
+ return SUCCESS;
+}
+
+int eata2x_bios_param(struct scsi_device *sdev, struct block_device *bdev,
+ sector_t capacity, int *dkinfo)
+{
+ unsigned int size = capacity;
+
+ if (ext_tran || (scsicam_bios_param(bdev, capacity, dkinfo) < 0)) {
+ dkinfo[0] = 255;
+ dkinfo[1] = 63;
+ dkinfo[2] = size / (dkinfo[0] * dkinfo[1]);
+ }
+#if defined (DEBUG_GEOMETRY)
+ printk("%s: bios_param, head=%d, sec=%d, cyl=%d.\n", driver_name,
+ dkinfo[0], dkinfo[1], dkinfo[2]);
+#endif
+
+ return 0;
+}
+
+static void sort(unsigned long sk[], unsigned int da[], unsigned int n,
+ unsigned int rev)
+{
+ unsigned int i, j, k, y;
+ unsigned long x;
+
+ for (i = 0; i < n - 1; i++) {
+ k = i;
+
+ for (j = k + 1; j < n; j++)
+ if (rev) {
+ if (sk[j] > sk[k])
+ k = j;
+ } else {
+ if (sk[j] < sk[k])
+ k = j;
+ }
+
+ if (k != i) {
+ x = sk[k];
+ sk[k] = sk[i];
+ sk[i] = x;
+ y = da[k];
+ da[k] = da[i];
+ da[i] = y;
+ }
+ }
+
+ return;
+}
+
+static int reorder(struct hostdata *ha, unsigned long cursec,
+ unsigned int ihdlr, unsigned int il[], unsigned int n_ready)
+{
+ struct scsi_cmnd *SCpnt;
+ struct mscp *cpp;
+ unsigned int k, n;
+ unsigned int rev = 0, s = 1, r = 1;
+ unsigned int input_only = 1, overlap = 0;
+ unsigned long sl[n_ready], pl[n_ready], ll[n_ready];
+ unsigned long maxsec = 0, minsec = ULONG_MAX, seek = 0, iseek = 0;
+ unsigned long ioseek = 0;
+
+ static unsigned int flushcount = 0, batchcount = 0, sortcount = 0;
+ static unsigned int readycount = 0, ovlcount = 0, inputcount = 0;
+ static unsigned int readysorted = 0, revcount = 0;
+ static unsigned long seeksorted = 0, seeknosort = 0;
+
+ if (link_statistics && !(++flushcount % link_statistics))
+ printk("fc %d bc %d ic %d oc %d rc %d rs %d sc %d re %d"
+ " av %ldK as %ldK.\n", flushcount, batchcount,
+ inputcount, ovlcount, readycount, readysorted, sortcount,
+ revcount, seeknosort / (readycount + 1),
+ seeksorted / (readycount + 1));
+
+ if (n_ready <= 1)
+ return 0;
+
+ for (n = 0; n < n_ready; n++) {
+ k = il[n];
+ cpp = &ha->cp[k];
+ SCpnt = cpp->SCpnt;
+
+ if (!cpp->din)
+ input_only = 0;
+
+ if (blk_rq_pos(SCpnt->request) < minsec)
+ minsec = blk_rq_pos(SCpnt->request);
+ if (blk_rq_pos(SCpnt->request) > maxsec)
+ maxsec = blk_rq_pos(SCpnt->request);
+
+ sl[n] = blk_rq_pos(SCpnt->request);
+ ioseek += blk_rq_sectors(SCpnt->request);
+
+ if (!n)
+ continue;
+
+ if (sl[n] < sl[n - 1])
+ s = 0;
+ if (sl[n] > sl[n - 1])
+ r = 0;
+
+ if (link_statistics) {
+ if (sl[n] > sl[n - 1])
+ seek += sl[n] - sl[n - 1];
+ else
+ seek += sl[n - 1] - sl[n];
+ }
+
+ }
+
+ if (link_statistics) {
+ if (cursec > sl[0])
+ seek += cursec - sl[0];
+ else
+ seek += sl[0] - cursec;
+ }
+
+ if (cursec > ((maxsec + minsec) / 2))
+ rev = 1;
+
+ if (ioseek > ((maxsec - minsec) / 2))
+ rev = 0;
+
+ if (!((rev && r) || (!rev && s)))
+ sort(sl, il, n_ready, rev);
+
+ if (!input_only)
+ for (n = 0; n < n_ready; n++) {
+ k = il[n];
+ cpp = &ha->cp[k];
+ SCpnt = cpp->SCpnt;
+ ll[n] = blk_rq_sectors(SCpnt->request);
+ pl[n] = SCpnt->serial_number;
+
+ if (!n)
+ continue;
+
+ if ((sl[n] == sl[n - 1])
+ || (!rev && ((sl[n - 1] + ll[n - 1]) > sl[n]))
+ || (rev && ((sl[n] + ll[n]) > sl[n - 1])))
+ overlap = 1;
+ }
+
+ if (overlap)
+ sort(pl, il, n_ready, 0);
+
+ if (link_statistics) {
+ if (cursec > sl[0])
+ iseek = cursec - sl[0];
+ else
+ iseek = sl[0] - cursec;
+ batchcount++;
+ readycount += n_ready;
+ seeknosort += seek / 1024;
+ if (input_only)
+ inputcount++;
+ if (overlap) {
+ ovlcount++;
+ seeksorted += iseek / 1024;
+ } else
+ seeksorted += (iseek + maxsec - minsec) / 1024;
+ if (rev && !r) {
+ revcount++;
+ readysorted += n_ready;
+ }
+ if (!rev && !s) {
+ sortcount++;
+ readysorted += n_ready;
+ }
+ }
+#if defined(DEBUG_LINKED_COMMANDS)
+ if (link_statistics && (overlap || !(flushcount % link_statistics)))
+ for (n = 0; n < n_ready; n++) {
+ k = il[n];
+ cpp = &ha->cp[k];
+ SCpnt = cpp->SCpnt;
+ scmd_printk(KERN_INFO, SCpnt,
+ "%s mb %d fc %d nr %d sec %ld ns %u"
+ " cur %ld s:%c r:%c rev:%c in:%c ov:%c xd %d.\n",
+ (ihdlr ? "ihdlr" : "qcomm"),
+ k, flushcount,
+ n_ready, blk_rq_pos(SCpnt->request),
+ blk_rq_sectors(SCpnt->request), cursec, YESNO(s),
+ YESNO(r), YESNO(rev), YESNO(input_only),
+ YESNO(overlap), cpp->din);
+ }
+#endif
+ return overlap;
+}
+
+static void flush_dev(struct scsi_device *dev, unsigned long cursec,
+ struct hostdata *ha, unsigned int ihdlr)
+{
+ struct scsi_cmnd *SCpnt;
+ struct mscp *cpp;
+ unsigned int k, n, n_ready = 0, il[MAX_MAILBOXES];
+
+ for (k = 0; k < dev->host->can_queue; k++) {
+
+ if (ha->cp_stat[k] != READY && ha->cp_stat[k] != IN_USE)
+ continue;
+
+ cpp = &ha->cp[k];
+ SCpnt = cpp->SCpnt;
+
+ if (SCpnt->device != dev)
+ continue;
+
+ if (ha->cp_stat[k] == IN_USE)
+ return;
+
+ il[n_ready++] = k;
+ }
+
+ if (reorder(ha, cursec, ihdlr, il, n_ready))
+ n_ready = 1;
+
+ for (n = 0; n < n_ready; n++) {
+ k = il[n];
+ cpp = &ha->cp[k];
+ SCpnt = cpp->SCpnt;
+
+ if (do_dma(dev->host->io_port, cpp->cp_dma_addr, SEND_CP_DMA)) {
+ scmd_printk(KERN_INFO, SCpnt,
+ "%s, mbox %d, adapter"
+ " busy, will abort.\n",
+ (ihdlr ? "ihdlr" : "qcomm"),
+ k);
+ ha->cp_stat[k] = ABORTING;
+ continue;
+ }
+
+ ha->cp_stat[k] = IN_USE;
+ }
+}
+
+static irqreturn_t ihdlr(struct Scsi_Host *shost)
+{
+ struct scsi_cmnd *SCpnt;
+ unsigned int i, k, c, status, tstatus, reg;
+ struct mssp *spp;
+ struct mscp *cpp;
+ struct hostdata *ha = (struct hostdata *)shost->hostdata;
+ int irq = shost->irq;
+
+ /* Check if this board need to be serviced */
+ if (!(inb(shost->io_port + REG_AUX_STATUS) & IRQ_ASSERTED))
+ goto none;
+
+ ha->iocount++;
+
+ if (do_trace)
+ printk("%s: ihdlr, enter, irq %d, count %d.\n", ha->board_name, irq,
+ ha->iocount);
+
+ /* Check if this board is still busy */
+ if (wait_on_busy(shost->io_port, 20 * MAXLOOP)) {
+ reg = inb(shost->io_port + REG_STATUS);
+ printk
+ ("%s: ihdlr, busy timeout error, irq %d, reg 0x%x, count %d.\n",
+ ha->board_name, irq, reg, ha->iocount);
+ goto none;
+ }
+
+ spp = &ha->sp;
+
+ /* Make a local copy just before clearing the interrupt indication */
+ memcpy(spp, ha->sp_cpu_addr, sizeof(struct mssp));
+
+ /* Clear the completion flag and cp pointer on the dynamic copy of sp */
+ memset(ha->sp_cpu_addr, 0, sizeof(struct mssp));
+
+ /* Read the status register to clear the interrupt indication */
+ reg = inb(shost->io_port + REG_STATUS);
+
+#if defined (DEBUG_INTERRUPT)
+ {
+ unsigned char *bytesp;
+ int cnt;
+ bytesp = (unsigned char *)spp;
+ if (ha->iocount < 200) {
+ printk("sp[] =");
+ for (cnt = 0; cnt < 15; cnt++)
+ printk(" 0x%x", bytesp[cnt]);
+ printk("\n");
+ }
+ }
+#endif
+
+ /* Reject any sp with supspect data */
+ if (spp->eoc == 0 && ha->iocount > 1)
+ printk
+ ("%s: ihdlr, spp->eoc == 0, irq %d, reg 0x%x, count %d.\n",
+ ha->board_name, irq, reg, ha->iocount);
+ if (spp->cpp_index < 0 || spp->cpp_index >= shost->can_queue)
+ printk
+ ("%s: ihdlr, bad spp->cpp_index %d, irq %d, reg 0x%x, count %d.\n",
+ ha->board_name, spp->cpp_index, irq, reg, ha->iocount);
+ if (spp->eoc == 0 || spp->cpp_index < 0
+ || spp->cpp_index >= shost->can_queue)
+ goto handled;
+
+ /* Find the mailbox to be serviced on this board */
+ i = spp->cpp_index;
+
+ cpp = &(ha->cp[i]);
+
+#if defined(DEBUG_GENERATE_ABORTS)
+ if ((ha->iocount > 500) && ((ha->iocount % 500) < 3))
+ goto handled;
+#endif
+
+ if (ha->cp_stat[i] == IGNORE) {
+ ha->cp_stat[i] = FREE;
+ goto handled;
+ } else if (ha->cp_stat[i] == LOCKED) {
+ ha->cp_stat[i] = FREE;
+ printk("%s: ihdlr, mbox %d unlocked, count %d.\n", ha->board_name, i,
+ ha->iocount);
+ goto handled;
+ } else if (ha->cp_stat[i] == FREE) {
+ printk("%s: ihdlr, mbox %d is free, count %d.\n", ha->board_name, i,
+ ha->iocount);
+ goto handled;
+ } else if (ha->cp_stat[i] == IN_RESET)
+ printk("%s: ihdlr, mbox %d is in reset.\n", ha->board_name, i);
+ else if (ha->cp_stat[i] != IN_USE)
+ panic("%s: ihdlr, mbox %d, invalid cp_stat: %d.\n",
+ ha->board_name, i, ha->cp_stat[i]);
+
+ ha->cp_stat[i] = FREE;
+ SCpnt = cpp->SCpnt;
+
+ if (SCpnt == NULL)
+ panic("%s: ihdlr, mbox %d, SCpnt == NULL.\n", ha->board_name, i);
+
+ if (SCpnt->host_scribble == NULL)
+ panic("%s: ihdlr, mbox %d, SCpnt %p garbled.\n", ha->board_name,
+ i, SCpnt);
+
+ if (*(unsigned int *)SCpnt->host_scribble != i)
+ panic("%s: ihdlr, mbox %d, index mismatch %d.\n",
+ ha->board_name, i,
+ *(unsigned int *)SCpnt->host_scribble);
+
+ sync_dma(i, ha);
+
+ if (linked_comm && SCpnt->device->queue_depth > 2
+ && TLDEV(SCpnt->device->type))
+ flush_dev(SCpnt->device, blk_rq_pos(SCpnt->request), ha, 1);
+
+ tstatus = status_byte(spp->target_status);
+
+#if defined(DEBUG_GENERATE_ERRORS)
+ if ((ha->iocount > 500) && ((ha->iocount % 200) < 2))
+ spp->adapter_status = 0x01;
+#endif
+
+ switch (spp->adapter_status) {
+ case ASOK: /* status OK */
+
+ /* Forces a reset if a disk drive keeps returning BUSY */
+ if (tstatus == BUSY && SCpnt->device->type != TYPE_TAPE)
+ status = DID_ERROR << 16;
+
+ /* If there was a bus reset, redo operation on each target */
+ else if (tstatus != GOOD && SCpnt->device->type == TYPE_DISK
+ && ha->target_redo[SCpnt->device->id][SCpnt->
+ device->
+ channel])
+ status = DID_BUS_BUSY << 16;
+
+ /* Works around a flaw in scsi.c */
+ else if (tstatus == CHECK_CONDITION
+ && SCpnt->device->type == TYPE_DISK
+ && (SCpnt->sense_buffer[2] & 0xf) == RECOVERED_ERROR)
+ status = DID_BUS_BUSY << 16;
+
+ else
+ status = DID_OK << 16;
+
+ if (tstatus == GOOD)
+ ha->target_redo[SCpnt->device->id][SCpnt->device->
+ channel] = 0;
+
+ if (spp->target_status && SCpnt->device->type == TYPE_DISK &&
+ (!(tstatus == CHECK_CONDITION && ha->iocount <= 1000 &&
+ (SCpnt->sense_buffer[2] & 0xf) == NOT_READY)))
+ printk("%s: ihdlr, target %d.%d:%d, "
+ "target_status 0x%x, sense key 0x%x.\n",
+ ha->board_name,
+ SCpnt->device->channel, SCpnt->device->id,
+ (u8)SCpnt->device->lun,
+ spp->target_status, SCpnt->sense_buffer[2]);
+
+ ha->target_to[SCpnt->device->id][SCpnt->device->channel] = 0;
+
+ if (ha->last_retried_pid == SCpnt->serial_number)
+ ha->retries = 0;
+
+ break;
+ case ASST: /* Selection Time Out */
+ case 0x02: /* Command Time Out */
+
+ if (ha->target_to[SCpnt->device->id][SCpnt->device->channel] > 1)
+ status = DID_ERROR << 16;
+ else {
+ status = DID_TIME_OUT << 16;
+ ha->target_to[SCpnt->device->id][SCpnt->device->
+ channel]++;
+ }
+
+ break;
+
+ /* Perform a limited number of internal retries */
+ case 0x03: /* SCSI Bus Reset Received */
+ case 0x04: /* Initial Controller Power-up */
+
+ for (c = 0; c <= shost->max_channel; c++)
+ for (k = 0; k < shost->max_id; k++)
+ ha->target_redo[k][c] = 1;
+
+ if (SCpnt->device->type != TYPE_TAPE
+ && ha->retries < MAX_INTERNAL_RETRIES) {
+
+#if defined(DID_SOFT_ERROR)
+ status = DID_SOFT_ERROR << 16;
+#else
+ status = DID_BUS_BUSY << 16;
+#endif
+
+ ha->retries++;
+ ha->last_retried_pid = SCpnt->serial_number;
+ } else
+ status = DID_ERROR << 16;
+
+ break;
+ case 0x05: /* Unexpected Bus Phase */
+ case 0x06: /* Unexpected Bus Free */
+ case 0x07: /* Bus Parity Error */
+ case 0x08: /* SCSI Hung */
+ case 0x09: /* Unexpected Message Reject */
+ case 0x0a: /* SCSI Bus Reset Stuck */
+ case 0x0b: /* Auto Request-Sense Failed */
+ case 0x0c: /* Controller Ram Parity Error */
+ default:
+ status = DID_ERROR << 16;
+ break;
+ }
+
+ SCpnt->result = status | spp->target_status;
+
+#if defined(DEBUG_INTERRUPT)
+ if (SCpnt->result || do_trace)
+#else
+ if ((spp->adapter_status != ASOK && ha->iocount > 1000) ||
+ (spp->adapter_status != ASOK &&
+ spp->adapter_status != ASST && ha->iocount <= 1000) ||
+ do_trace || msg_byte(spp->target_status))
+#endif
+ scmd_printk(KERN_INFO, SCpnt, "ihdlr, mbox %2d, err 0x%x:%x,"
+ " reg 0x%x, count %d.\n",
+ i, spp->adapter_status, spp->target_status,
+ reg, ha->iocount);
+
+ unmap_dma(i, ha);
+
+ /* Set the command state to inactive */
+ SCpnt->host_scribble = NULL;
+
+ SCpnt->scsi_done(SCpnt);
+
+ if (do_trace)
+ printk("%s: ihdlr, exit, irq %d, count %d.\n", ha->board_name,
+ irq, ha->iocount);
+
+ handled:
+ return IRQ_HANDLED;
+ none:
+ return IRQ_NONE;
+}
+
+static irqreturn_t do_interrupt_handler(int dummy, void *shap)
+{
+ struct Scsi_Host *shost;
+ unsigned int j;
+ unsigned long spin_flags;
+ irqreturn_t ret;
+
+ /* Check if the interrupt must be processed by this handler */
+ if ((j = (unsigned int)((char *)shap - sha)) >= num_boards)
+ return IRQ_NONE;
+ shost = sh[j];
+
+ spin_lock_irqsave(shost->host_lock, spin_flags);
+ ret = ihdlr(shost);
+ spin_unlock_irqrestore(shost->host_lock, spin_flags);
+ return ret;
+}
+
+static int eata2x_release(struct Scsi_Host *shost)
+{
+ struct hostdata *ha = (struct hostdata *)shost->hostdata;
+ unsigned int i;
+
+ for (i = 0; i < shost->can_queue; i++)
+ kfree((&ha->cp[i])->sglist);
+
+ for (i = 0; i < shost->can_queue; i++)
+ pci_unmap_single(ha->pdev, ha->cp[i].cp_dma_addr,
+ sizeof(struct mscp), PCI_DMA_BIDIRECTIONAL);
+
+ if (ha->sp_cpu_addr)
+ pci_free_consistent(ha->pdev, sizeof(struct mssp),
+ ha->sp_cpu_addr, ha->sp_dma_addr);
+
+ free_irq(shost->irq, &sha[ha->board_number]);
+
+ if (shost->dma_channel != NO_DMA)
+ free_dma(shost->dma_channel);
+
+ release_region(shost->io_port, shost->n_io_port);
+ scsi_unregister(shost);
+ return 0;
+}
+
+#include "scsi_module.c"
+
+#ifndef MODULE
+__setup("eata=", option_setup);
+#endif /* end MODULE */
diff --git a/drivers/scsi/eata_generic.h b/drivers/scsi/eata_generic.h
new file mode 100644
index 000000000..5016af5cf
--- /dev/null
+++ b/drivers/scsi/eata_generic.h
@@ -0,0 +1,400 @@
+/********************************************************
+* Header file for eata_dma.c and eata_pio.c *
+* Linux EATA SCSI drivers *
+* (c) 1993-96 Michael Neuffer *
+* mike@i-Connect.Net *
+* neuffer@mail.uni-mainz.de *
+*********************************************************
+* last change: 96/08/14 *
+********************************************************/
+
+
+#ifndef _EATA_GENERIC_H
+#define _EATA_GENERIC_H
+
+
+
+/*********************************************
+ * Misc. definitions *
+ *********************************************/
+
+#define R_LIMIT 0x20000
+
+#define MAXISA 4
+#define MAXEISA 16
+#define MAXPCI 16
+#define MAXIRQ 16
+#define MAXTARGET 16
+#define MAXCHANNEL 3
+
+#define IS_ISA 'I'
+#define IS_EISA 'E'
+#define IS_PCI 'P'
+
+#define BROKEN_INQUIRY 1
+
+#define BUSMASTER 0xff
+#define PIO 0xfe
+
+#define EATA_SIGNATURE 0x45415441 /* BIG ENDIAN coded "EATA" sig. */
+
+#define DPT_ID1 0x12
+#define DPT_ID2 0x14
+
+#define ATT_ID1 0x06
+#define ATT_ID2 0x94
+#define ATT_ID3 0x0
+
+#define NEC_ID1 0x38
+#define NEC_ID2 0xa3
+#define NEC_ID3 0x82
+
+
+#define EATA_CP_SIZE 44
+
+#define MAX_PCI_DEVICES 32 /* Maximum # Of Devices Per Bus */
+#define MAX_METHOD_2 16 /* Max Devices For Method 2 */
+#define MAX_PCI_BUS 16 /* Maximum # Of Busses Allowed */
+
+#define SG_SIZE 64
+#define SG_SIZE_BIG 252 /* max. 8096 elements, 64k */
+
+#define UPPER_DEVICE_QUEUE_LIMIT 64 /* The limit we have to set for the
+ * device queue to keep the broken
+ * midlevel SCSI code from producing
+ * bogus timeouts
+ */
+
+#define TYPE_DISK_QUEUE 16
+#define TYPE_TAPE_QUEUE 4
+#define TYPE_ROM_QUEUE 4
+#define TYPE_OTHER_QUEUE 2
+
+#define FREE 0
+#define OK 0
+#define NO_TIMEOUT 0
+#define USED 1
+#define TIMEOUT 2
+#define RESET 4
+#define LOCKED 8
+#define ABORTED 16
+
+#define READ 0
+#define WRITE 1
+#define OTHER 2
+
+#define HD(cmd) ((hostdata *)&(cmd->device->host->hostdata))
+#define CD(cmd) ((struct eata_ccb *)(cmd->host_scribble))
+#define SD(host) ((hostdata *)&(host->hostdata))
+
+/***********************************************
+ * EATA Command & Register definitions *
+ ***********************************************/
+#define PCI_REG_DPTconfig 0x40
+#define PCI_REG_PumpModeAddress 0x44
+#define PCI_REG_PumpModeData 0x48
+#define PCI_REG_ConfigParam1 0x50
+#define PCI_REG_ConfigParam2 0x54
+
+
+#define EATA_CMD_PIO_SETUPTEST 0xc6
+#define EATA_CMD_PIO_READ_CONFIG 0xf0
+#define EATA_CMD_PIO_SET_CONFIG 0xf1
+#define EATA_CMD_PIO_SEND_CP 0xf2
+#define EATA_CMD_PIO_RECEIVE_SP 0xf3
+#define EATA_CMD_PIO_TRUNC 0xf4
+
+#define EATA_CMD_RESET 0xf9
+#define EATA_CMD_IMMEDIATE 0xfa
+
+#define EATA_CMD_DMA_READ_CONFIG 0xfd
+#define EATA_CMD_DMA_SET_CONFIG 0xfe
+#define EATA_CMD_DMA_SEND_CP 0xff
+
+#define ECS_EMULATE_SENSE 0xd4
+
+#define EATA_GENERIC_ABORT 0x00
+#define EATA_SPECIFIC_RESET 0x01
+#define EATA_BUS_RESET 0x02
+#define EATA_SPECIFIC_ABORT 0x03
+#define EATA_QUIET_INTR 0x04
+#define EATA_COLD_BOOT_HBA 0x06 /* Only as a last resort */
+#define EATA_FORCE_IO 0x07
+
+#define HA_CTRLREG 0x206 /* control register for HBA */
+#define HA_CTRL_DISINT 0x02 /* CTRLREG: disable interrupts */
+#define HA_CTRL_RESCPU 0x04 /* CTRLREG: reset processor */
+#define HA_CTRL_8HEADS 0x08 /* CTRLREG: set for drives with*
+ * >=8 heads (WD1003 rudimentary :-) */
+
+#define HA_WCOMMAND 0x07 /* command register offset */
+#define HA_WIFC 0x06 /* immediate command offset */
+#define HA_WCODE 0x05
+#define HA_WCODE2 0x04
+#define HA_WDMAADDR 0x02 /* DMA address LSB offset */
+#define HA_RAUXSTAT 0x08 /* aux status register offset*/
+#define HA_RSTATUS 0x07 /* status register offset */
+#define HA_RDATA 0x00 /* data register (16bit) */
+#define HA_WDATA 0x00 /* data register (16bit) */
+
+#define HA_ABUSY 0x01 /* aux busy bit */
+#define HA_AIRQ 0x02 /* aux IRQ pending bit */
+#define HA_SERROR 0x01 /* pr. command ended in error*/
+#define HA_SMORE 0x02 /* more data soon to come */
+#define HA_SCORR 0x04 /* data corrected */
+#define HA_SDRQ 0x08 /* data request active */
+#define HA_SSC 0x10 /* seek complete */
+#define HA_SFAULT 0x20 /* write fault */
+#define HA_SREADY 0x40 /* drive ready */
+#define HA_SBUSY 0x80 /* drive busy */
+#define HA_SDRDY HA_SSC+HA_SREADY+HA_SDRQ
+
+/**********************************************
+ * Message definitions *
+ **********************************************/
+
+#define HA_NO_ERROR 0x00 /* No Error */
+#define HA_ERR_SEL_TO 0x01 /* Selection Timeout */
+#define HA_ERR_CMD_TO 0x02 /* Command Timeout */
+#define HA_BUS_RESET 0x03 /* SCSI Bus Reset Received */
+#define HA_INIT_POWERUP 0x04 /* Initial Controller Power-up */
+#define HA_UNX_BUSPHASE 0x05 /* Unexpected Bus Phase */
+#define HA_UNX_BUS_FREE 0x06 /* Unexpected Bus Free */
+#define HA_BUS_PARITY 0x07 /* Bus Parity Error */
+#define HA_SCSI_HUNG 0x08 /* SCSI Hung */
+#define HA_UNX_MSGRJCT 0x09 /* Unexpected Message Rejected */
+#define HA_RESET_STUCK 0x0a /* SCSI Bus Reset Stuck */
+#define HA_RSENSE_FAIL 0x0b /* Auto Request-Sense Failed */
+#define HA_PARITY_ERR 0x0c /* Controller Ram Parity Error */
+#define HA_CP_ABORT_NA 0x0d /* Abort Message sent to non-active cmd */
+#define HA_CP_ABORTED 0x0e /* Abort Message sent to active cmd */
+#define HA_CP_RESET_NA 0x0f /* Reset Message sent to non-active cmd */
+#define HA_CP_RESET 0x10 /* Reset Message sent to active cmd */
+#define HA_ECC_ERR 0x11 /* Controller Ram ECC Error */
+#define HA_PCI_PARITY 0x12 /* PCI Parity Error */
+#define HA_PCI_MABORT 0x13 /* PCI Master Abort */
+#define HA_PCI_TABORT 0x14 /* PCI Target Abort */
+#define HA_PCI_STABORT 0x15 /* PCI Signaled Target Abort */
+
+/**********************************************
+ * Other definitions *
+ **********************************************/
+
+struct reg_bit { /* reading this one will clear the interrupt */
+ __u8 error:1; /* previous command ended in an error */
+ __u8 more:1; /* more DATA coming soon, poll BSY & DRQ (PIO) */
+ __u8 corr:1; /* data read was successfully corrected with ECC*/
+ __u8 drq:1; /* data request active */
+ __u8 sc:1; /* seek complete */
+ __u8 fault:1; /* write fault */
+ __u8 ready:1; /* drive ready */
+ __u8 busy:1; /* controller busy */
+};
+
+struct reg_abit { /* reading this won't clear the interrupt */
+ __u8 abusy:1; /* auxiliary busy */
+ __u8 irq:1; /* set when drive interrupt is asserted */
+ __u8 dummy:6;
+};
+
+struct eata_register { /* EATA register set */
+ __u8 data_reg[2]; /* R, couldn't figure this one out */
+ __u8 cp_addr[4]; /* W, CP address register */
+ union {
+ __u8 command; /* W, command code: [read|set] conf, send CP*/
+ struct reg_bit status; /* R, see register_bit1 */
+ __u8 statusbyte;
+ } ovr;
+ struct reg_abit aux_stat; /* R, see register_bit2 */
+};
+
+struct get_conf { /* Read Configuration Array */
+ __u32 len; /* Should return 0x22, 0x24, etc */
+ __u32 signature; /* Signature MUST be "EATA" */
+ __u8 version2:4,
+ version:4; /* EATA Version level */
+ __u8 OCS_enabled:1, /* Overlap Command Support enabled */
+ TAR_support:1, /* SCSI Target Mode supported */
+ TRNXFR:1, /* Truncate Transfer Cmd not necessary *
+ * Only used in PIO Mode */
+ MORE_support:1, /* MORE supported (only PIO Mode) */
+ DMA_support:1, /* DMA supported Driver uses only *
+ * this mode */
+ DMA_valid:1, /* DRQ value in Byte 30 is valid */
+ ATA:1, /* ATA device connected (not supported) */
+ HAA_valid:1; /* Hostadapter Address is valid */
+
+ __u16 cppadlen; /* Number of pad bytes send after CD data *
+ * set to zero for DMA commands */
+ __u8 scsi_id[4]; /* SCSI ID of controller 2-0 Byte 0 res. *
+ * if not, zero is returned */
+ __u32 cplen; /* CP length: number of valid cp bytes */
+ __u32 splen; /* Number of bytes returned after *
+ * Receive SP command */
+ __u16 queuesiz; /* max number of queueable CPs */
+ __u16 dummy;
+ __u16 SGsiz; /* max number of SG table entries */
+ __u8 IRQ:4, /* IRQ used this HA */
+ IRQ_TR:1, /* IRQ Trigger: 0=edge, 1=level */
+ SECOND:1, /* This is a secondary controller */
+ DMA_channel:2; /* DRQ index, DRQ is 2comp of DRQX */
+ __u8 sync; /* device at ID 7 tru 0 is running in *
+ * synchronous mode, this will disappear */
+ __u8 DSBLE:1, /* ISA i/o addressing is disabled */
+ FORCADR:1, /* i/o address has been forced */
+ SG_64K:1,
+ SG_UAE:1,
+ :4;
+ __u8 MAX_ID:5, /* Max number of SCSI target IDs */
+ MAX_CHAN:3; /* Number of SCSI busses on HBA */
+ __u8 MAX_LUN; /* Max number of LUNs */
+ __u8 :3,
+ AUTOTRM:1,
+ M1_inst:1,
+ ID_qest:1, /* Raidnum ID is questionable */
+ is_PCI:1, /* HBA is PCI */
+ is_EISA:1; /* HBA is EISA */
+ __u8 RAIDNUM; /* unique HBA identifier */
+ __u8 unused[474];
+};
+
+struct eata_sg_list
+{
+ __u32 data;
+ __u32 len;
+};
+
+struct eata_ccb { /* Send Command Packet structure */
+
+ __u8 SCSI_Reset:1, /* Cause a SCSI Bus reset on the cmd */
+ HBA_Init:1, /* Cause Controller to reinitialize */
+ Auto_Req_Sen:1, /* Do Auto Request Sense on errors */
+ scatter:1, /* Data Ptr points to a SG Packet */
+ Resrvd:1, /* RFU */
+ Interpret:1, /* Interpret the SCSI cdb of own use */
+ DataOut:1, /* Data Out phase with command */
+ DataIn:1; /* Data In phase with command */
+ __u8 reqlen; /* Request Sense Length *
+ * Valid if Auto_Req_Sen=1 */
+ __u8 unused[3];
+ __u8 FWNEST:1, /* send cmd to phys RAID component */
+ unused2:7;
+ __u8 Phsunit:1, /* physical unit on mirrored pair */
+ I_AT:1, /* inhibit address translation */
+ I_HBA_C:1, /* HBA inhibit caching */
+ unused3:5;
+
+ __u8 cp_id:5, /* SCSI Device ID of target */
+ cp_channel:3; /* SCSI Channel # of HBA */
+ __u8 cp_lun:3,
+ :2,
+ cp_luntar:1, /* CP is for target ROUTINE */
+ cp_dispri:1, /* Grant disconnect privilege */
+ cp_identify:1; /* Always TRUE */
+ __u8 cp_msg1; /* Message bytes 0-3 */
+ __u8 cp_msg2;
+ __u8 cp_msg3;
+ __u8 cp_cdb[12]; /* Command Descriptor Block */
+ __u32 cp_datalen; /* Data Transfer Length *
+ * If scatter=1 len of sg package */
+ void *cp_viraddr; /* address of this ccb */
+ __u32 cp_dataDMA; /* Data Address, if scatter=1 *
+ * address of scatter packet */
+ __u32 cp_statDMA; /* address for Status Packet */
+ __u32 cp_reqDMA; /* Request Sense Address, used if *
+ * CP command ends with error */
+ /* Additional CP info begins here */
+ __u32 timestamp; /* Needed to measure command latency */
+ __u32 timeout;
+ __u8 sizeindex;
+ __u8 rw_latency;
+ __u8 retries;
+ __u8 status; /* status of this queueslot */
+ struct scsi_cmnd *cmd; /* address of cmd */
+ struct eata_sg_list *sg_list;
+};
+
+
+struct eata_sp {
+ __u8 hba_stat:7, /* HBA status */
+ EOC:1; /* True if command finished */
+ __u8 scsi_stat; /* Target SCSI status */
+ __u8 reserved[2];
+ __u32 residue_len; /* Number of bytes not transferred */
+ struct eata_ccb *ccb; /* Address set in COMMAND PACKET */
+ __u8 msg[12];
+};
+
+typedef struct hstd {
+ __u8 vendor[9];
+ __u8 name[18];
+ __u8 revision[6];
+ __u8 EATA_revision;
+ __u32 firmware_revision;
+ __u8 HBA_number;
+ __u8 bustype; /* bustype of HBA */
+ __u8 channel; /* # of avail. scsi channels */
+ __u8 state; /* state of HBA */
+ __u8 primary; /* true if primary */
+ __u8 more_support:1, /* HBA supports MORE flag */
+ immediate_support:1, /* HBA supports IMMEDIATE CMDs*/
+ broken_INQUIRY:1; /* This is an EISA HBA with *
+ * broken INQUIRY */
+ __u8 do_latency; /* Latency measurement flag */
+ __u32 reads[13];
+ __u32 writes[13];
+ __u32 reads_lat[12][4];
+ __u32 writes_lat[12][4];
+ __u32 all_lat[4];
+ __u8 resetlevel[MAXCHANNEL];
+ __u32 last_ccb; /* Last used ccb */
+ __u32 cplen; /* size of CP in words */
+ __u16 cppadlen; /* pad length of cp in words */
+ __u16 queuesize;
+ __u16 sgsize; /* # of entries in the SG list*/
+ __u16 devflags; /* bits set for detected devices */
+ __u8 hostid; /* SCSI ID of HBA */
+ __u8 moresupport; /* HBA supports MORE flag */
+ struct Scsi_Host *next;
+ struct Scsi_Host *prev;
+ struct pci_dev *pdev; /* PCI device or NULL for non PCI */
+ struct eata_sp sp; /* status packet */
+ struct eata_ccb ccb[0]; /* ccb array begins here */
+}hostdata;
+
+/* structure for max. 2 emulated drives */
+struct drive_geom_emul {
+ __u8 trans; /* translation flag 1=transl */
+ __u8 channel; /* SCSI channel number */
+ __u8 HBA; /* HBA number (prim/sec) */
+ __u8 id; /* drive id */
+ __u8 lun; /* drive lun */
+ __u32 heads; /* number of heads */
+ __u32 sectors; /* number of sectors */
+ __u32 cylinder; /* number of cylinders */
+};
+
+struct geom_emul {
+ __u8 bios_drives; /* number of emulated drives */
+ struct drive_geom_emul drv[2]; /* drive structures */
+};
+
+#endif /* _EATA_GENERIC_H */
+
+/*
+ * Overrides for Emacs so that we almost follow Linus's tabbing style.
+ * Emacs will notice this stuff at the end of the file and automatically
+ * adjust the settings for this buffer only. This must remain at the end
+ * of the file.
+ * ---------------------------------------------------------------------------
+ * Local variables:
+ * c-indent-level: 4
+ * c-brace-imaginary-offset: 0
+ * c-brace-offset: -4
+ * c-argdecl-indent: 4
+ * c-label-offset: -4
+ * c-continued-statement-offset: 4
+ * c-continued-brace-offset: 0
+ * tab-width: 8
+ * End:
+ */
diff --git a/drivers/scsi/eata_pio.c b/drivers/scsi/eata_pio.c
new file mode 100644
index 000000000..ca8003f0d
--- /dev/null
+++ b/drivers/scsi/eata_pio.c
@@ -0,0 +1,965 @@
+/************************************************************
+ * *
+ * Linux EATA SCSI PIO driver *
+ * *
+ * based on the CAM document CAM/89-004 rev. 2.0c, *
+ * DPT's driver kit, some internal documents and source, *
+ * and several other Linux scsi drivers and kernel docs. *
+ * *
+ * The driver currently: *
+ * -supports all EATA-PIO boards *
+ * -only supports DASD devices *
+ * *
+ * (c)1993-96 Michael Neuffer, Alfred Arnold *
+ * neuffer@goofy.zdv.uni-mainz.de *
+ * a.arnold@kfa-juelich.de *
+ * *
+ * Updated 2002 by Alan Cox <alan@lxorguk.ukuu.org.uk> for *
+ * Linux 2.5.x and the newer locking and error handling *
+ * *
+ * This program is free software; you can redistribute it *
+ * and/or modify it under the terms of the GNU General *
+ * Public License as published by the Free Software *
+ * Foundation; either version 2 of the License, or *
+ * (at your option) any later version. *
+ * *
+ * This program is distributed in the hope that it will be *
+ * useful, but WITHOUT ANY WARRANTY; without even the *
+ * implied warranty of MERCHANTABILITY or FITNESS FOR A *
+ * PARTICULAR PURPOSE. See the GNU General Public License *
+ * for more details. *
+ * *
+ * You should have received a copy of the GNU General *
+ * Public License along with this kernel; if not, write to *
+ * the Free Software Foundation, Inc., 675 Mass Ave, *
+ * Cambridge, MA 02139, USA. *
+ * *
+ * For the avoidance of doubt the "preferred form" of this *
+ * code is one which is in an open non patent encumbered *
+ * format. Where cryptographic key signing forms part of *
+ * the process of creating an executable the information *
+ * including keys needed to generate an equivalently *
+ * functional executable are deemed to be part of the *
+ * source code are deemed to be part of the source code. *
+ * *
+ ************************************************************
+ * last change: 2002/11/02 OS: Linux 2.5.45 *
+ ************************************************************/
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/string.h>
+#include <linux/ioport.h>
+#include <linux/in.h>
+#include <linux/pci.h>
+#include <linux/proc_fs.h>
+#include <linux/interrupt.h>
+#include <linux/blkdev.h>
+#include <linux/spinlock.h>
+#include <linux/delay.h>
+
+#include <asm/io.h>
+
+#include <scsi/scsi.h>
+#include <scsi/scsi_cmnd.h>
+#include <scsi/scsi_device.h>
+#include <scsi/scsi_host.h>
+
+#include "eata_generic.h"
+#include "eata_pio.h"
+
+
+static unsigned int ISAbases[MAXISA] = {
+ 0x1F0, 0x170, 0x330, 0x230
+};
+
+static unsigned int ISAirqs[MAXISA] = {
+ 14, 12, 15, 11
+};
+
+static unsigned char EISAbases[] = {
+ 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1
+};
+
+static unsigned int registered_HBAs;
+static struct Scsi_Host *last_HBA;
+static struct Scsi_Host *first_HBA;
+static unsigned char reg_IRQ[16];
+static unsigned char reg_IRQL[16];
+static unsigned long int_counter;
+static unsigned long queue_counter;
+
+static struct scsi_host_template driver_template;
+
+static int eata_pio_show_info(struct seq_file *m, struct Scsi_Host *shost)
+{
+ seq_printf(m, "EATA (Extended Attachment) PIO driver version: "
+ "%d.%d%s\n",VER_MAJOR, VER_MINOR, VER_SUB);
+ seq_printf(m, "queued commands: %10ld\n"
+ "processed interrupts:%10ld\n", queue_counter, int_counter);
+ seq_printf(m, "\nscsi%-2d: HBA %.10s\n",
+ shost->host_no, SD(shost)->name);
+ seq_printf(m, "Firmware revision: v%s\n",
+ SD(shost)->revision);
+ seq_puts(m, "IO: PIO\n");
+ seq_printf(m, "Base IO : %#.4x\n", (u32) shost->base);
+ seq_printf(m, "Host Bus: %s\n",
+ (SD(shost)->bustype == 'P')?"PCI ":
+ (SD(shost)->bustype == 'E')?"EISA":"ISA ");
+ return 0;
+}
+
+static int eata_pio_release(struct Scsi_Host *sh)
+{
+ hostdata *hd = SD(sh);
+ if (sh->irq && reg_IRQ[sh->irq] == 1)
+ free_irq(sh->irq, NULL);
+ else
+ reg_IRQ[sh->irq]--;
+ if (SD(sh)->channel == 0) {
+ if (sh->io_port && sh->n_io_port)
+ release_region(sh->io_port, sh->n_io_port);
+ }
+ /* At this point the PCI reference can go */
+ if (hd->pdev)
+ pci_dev_put(hd->pdev);
+ return 1;
+}
+
+static void IncStat(struct scsi_pointer *SCp, unsigned int Increment)
+{
+ SCp->ptr += Increment;
+ if ((SCp->this_residual -= Increment) == 0) {
+ if ((--SCp->buffers_residual) == 0)
+ SCp->Status = 0;
+ else {
+ SCp->buffer++;
+ SCp->ptr = sg_virt(SCp->buffer);
+ SCp->this_residual = SCp->buffer->length;
+ }
+ }
+}
+
+static irqreturn_t eata_pio_int_handler(int irq, void *dev_id);
+
+static irqreturn_t do_eata_pio_int_handler(int irq, void *dev_id)
+{
+ unsigned long flags;
+ struct Scsi_Host *dev = dev_id;
+ irqreturn_t ret;
+
+ spin_lock_irqsave(dev->host_lock, flags);
+ ret = eata_pio_int_handler(irq, dev_id);
+ spin_unlock_irqrestore(dev->host_lock, flags);
+ return ret;
+}
+
+static irqreturn_t eata_pio_int_handler(int irq, void *dev_id)
+{
+ unsigned int eata_stat = 0xfffff;
+ struct scsi_cmnd *cmd;
+ hostdata *hd;
+ struct eata_ccb *cp;
+ unsigned long base;
+ unsigned int x, z;
+ struct Scsi_Host *sh;
+ unsigned short zwickel = 0;
+ unsigned char stat, odd;
+ irqreturn_t ret = IRQ_NONE;
+
+ for (x = 1, sh = first_HBA; x <= registered_HBAs; x++, sh = SD(sh)->prev)
+ {
+ if (sh->irq != irq)
+ continue;
+ if (inb(sh->base + HA_RSTATUS) & HA_SBUSY)
+ continue;
+
+ int_counter++;
+ ret = IRQ_HANDLED;
+
+ hd = SD(sh);
+
+ cp = &hd->ccb[0];
+ cmd = cp->cmd;
+ base = cmd->device->host->base;
+
+ do {
+ stat = inb(base + HA_RSTATUS);
+ if (stat & HA_SDRQ) {
+ if (cp->DataIn) {
+ z = 256;
+ odd = 0;
+ while ((cmd->SCp.Status) && ((z > 0) || (odd))) {
+ if (odd) {
+ *(cmd->SCp.ptr) = zwickel >> 8;
+ IncStat(&cmd->SCp, 1);
+ odd = 0;
+ }
+ x = min_t(unsigned int, z, cmd->SCp.this_residual / 2);
+ insw(base + HA_RDATA, cmd->SCp.ptr, x);
+ z -= x;
+ IncStat(&cmd->SCp, 2 * x);
+ if ((z > 0) && (cmd->SCp.this_residual == 1)) {
+ zwickel = inw(base + HA_RDATA);
+ *(cmd->SCp.ptr) = zwickel & 0xff;
+ IncStat(&cmd->SCp, 1);
+ z--;
+ odd = 1;
+ }
+ }
+ while (z > 0) {
+ zwickel = inw(base + HA_RDATA);
+ z--;
+ }
+ } else { /* cp->DataOut */
+
+ odd = 0;
+ z = 256;
+ while ((cmd->SCp.Status) && ((z > 0) || (odd))) {
+ if (odd) {
+ zwickel += *(cmd->SCp.ptr) << 8;
+ IncStat(&cmd->SCp, 1);
+ outw(zwickel, base + HA_RDATA);
+ z--;
+ odd = 0;
+ }
+ x = min_t(unsigned int, z, cmd->SCp.this_residual / 2);
+ outsw(base + HA_RDATA, cmd->SCp.ptr, x);
+ z -= x;
+ IncStat(&cmd->SCp, 2 * x);
+ if ((z > 0) && (cmd->SCp.this_residual == 1)) {
+ zwickel = *(cmd->SCp.ptr);
+ zwickel &= 0xff;
+ IncStat(&cmd->SCp, 1);
+ odd = 1;
+ }
+ }
+ while (z > 0 || odd) {
+ outw(zwickel, base + HA_RDATA);
+ z--;
+ odd = 0;
+ }
+ }
+ }
+ }
+ while ((stat & HA_SDRQ) || ((stat & HA_SMORE) && hd->moresupport));
+
+ /* terminate handler if HBA goes busy again, i.e. transfers
+ * more data */
+
+ if (stat & HA_SBUSY)
+ break;
+
+ /* OK, this is quite stupid, but I haven't found any correct
+ * way to get HBA&SCSI status so far */
+
+ if (!(inb(base + HA_RSTATUS) & HA_SERROR)) {
+ cmd->result = (DID_OK << 16);
+ hd->devflags |= (1 << cp->cp_id);
+ } else if (hd->devflags & (1 << cp->cp_id))
+ cmd->result = (DID_OK << 16) + 0x02;
+ else
+ cmd->result = (DID_NO_CONNECT << 16);
+
+ if (cp->status == LOCKED) {
+ cp->status = FREE;
+ eata_stat = inb(base + HA_RSTATUS);
+ printk(KERN_CRIT "eata_pio: int_handler, freeing locked " "queueslot\n");
+ return ret;
+ }
+#if DBG_INTR2
+ if (stat != 0x50)
+ printk(KERN_DEBUG "stat: %#.2x, result: %#.8x\n", stat, cmd->result);
+#endif
+
+ cp->status = FREE; /* now we can release the slot */
+
+ cmd->scsi_done(cmd);
+ }
+
+ return ret;
+}
+
+static inline unsigned int eata_pio_send_command(unsigned long base, unsigned char command)
+{
+ unsigned int loop = 50;
+
+ while (inb(base + HA_RSTATUS) & HA_SBUSY)
+ if (--loop == 0)
+ return 1;
+
+ /* Enable interrupts for HBA. It is not the best way to do it at this
+ * place, but I hope that it doesn't interfere with the IDE driver
+ * initialization this way */
+
+ outb(HA_CTRL_8HEADS, base + HA_CTRLREG);
+
+ outb(command, base + HA_WCOMMAND);
+ return 0;
+}
+
+static int eata_pio_queue_lck(struct scsi_cmnd *cmd,
+ void (*done)(struct scsi_cmnd *))
+{
+ unsigned int x, y;
+ unsigned long base;
+
+ hostdata *hd;
+ struct Scsi_Host *sh;
+ struct eata_ccb *cp;
+
+ queue_counter++;
+
+ hd = HD(cmd);
+ sh = cmd->device->host;
+ base = sh->base;
+
+ /* use only slot 0, as 2001 can handle only one cmd at a time */
+
+ y = x = 0;
+
+ if (hd->ccb[y].status != FREE) {
+
+ DBG(DBG_QUEUE, printk(KERN_EMERG "can_queue %d, x %d, y %d\n", sh->can_queue, x, y));
+#if DEBUG_EATA
+ panic(KERN_EMERG "eata_pio: run out of queue slots cmdno:%ld " "intrno: %ld\n", queue_counter, int_counter);
+#else
+ panic(KERN_EMERG "eata_pio: run out of queue slots....\n");
+#endif
+ }
+
+ cp = &hd->ccb[y];
+
+ memset(cp, 0, sizeof(struct eata_ccb));
+
+ cp->status = USED; /* claim free slot */
+
+ DBG(DBG_QUEUE, scmd_printk(KERN_DEBUG, cmd,
+ "eata_pio_queue 0x%p, y %d\n", cmd, y));
+
+ cmd->scsi_done = (void *) done;
+
+ if (cmd->sc_data_direction == DMA_TO_DEVICE)
+ cp->DataOut = 1; /* Output mode */
+ else
+ cp->DataIn = 0; /* Input mode */
+
+ cp->Interpret = (cmd->device->id == hd->hostid);
+ cp->cp_datalen = cpu_to_be32(scsi_bufflen(cmd));
+ cp->Auto_Req_Sen = 0;
+ cp->cp_reqDMA = 0;
+ cp->reqlen = 0;
+
+ cp->cp_id = cmd->device->id;
+ cp->cp_lun = cmd->device->lun;
+ cp->cp_dispri = 0;
+ cp->cp_identify = 1;
+ memcpy(cp->cp_cdb, cmd->cmnd, COMMAND_SIZE(*cmd->cmnd));
+
+ cp->cp_statDMA = 0;
+
+ cp->cp_viraddr = cp;
+ cp->cmd = cmd;
+ cmd->host_scribble = (char *) &hd->ccb[y];
+
+ if (!scsi_bufflen(cmd)) {
+ cmd->SCp.buffers_residual = 1;
+ cmd->SCp.ptr = NULL;
+ cmd->SCp.this_residual = 0;
+ cmd->SCp.buffer = NULL;
+ } else {
+ cmd->SCp.buffer = scsi_sglist(cmd);
+ cmd->SCp.buffers_residual = scsi_sg_count(cmd);
+ cmd->SCp.ptr = sg_virt(cmd->SCp.buffer);
+ cmd->SCp.this_residual = cmd->SCp.buffer->length;
+ }
+ cmd->SCp.Status = (cmd->SCp.this_residual != 0); /* TRUE as long as bytes
+ * are to transfer */
+
+ if (eata_pio_send_command(base, EATA_CMD_PIO_SEND_CP)) {
+ cmd->result = DID_BUS_BUSY << 16;
+ scmd_printk(KERN_NOTICE, cmd,
+ "eata_pio_queue pid 0x%p, HBA busy, "
+ "returning DID_BUS_BUSY, done.\n", cmd);
+ done(cmd);
+ cp->status = FREE;
+ return 0;
+ }
+ /* FIXME: timeout */
+ while (!(inb(base + HA_RSTATUS) & HA_SDRQ))
+ cpu_relax();
+ outsw(base + HA_RDATA, cp, hd->cplen);
+ outb(EATA_CMD_PIO_TRUNC, base + HA_WCOMMAND);
+ for (x = 0; x < hd->cppadlen; x++)
+ outw(0, base + HA_RDATA);
+
+ DBG(DBG_QUEUE, scmd_printk(KERN_DEBUG, cmd,
+ "Queued base %#.4lx cmd: 0x%p "
+ "slot %d irq %d\n", sh->base, cmd, y, sh->irq));
+
+ return 0;
+}
+
+static DEF_SCSI_QCMD(eata_pio_queue)
+
+static int eata_pio_abort(struct scsi_cmnd *cmd)
+{
+ unsigned int loop = 100;
+
+ DBG(DBG_ABNORM, scmd_printk(KERN_WARNING, cmd,
+ "eata_pio_abort called pid: 0x%p\n", cmd));
+
+ while (inb(cmd->device->host->base + HA_RAUXSTAT) & HA_ABUSY)
+ if (--loop == 0) {
+ printk(KERN_WARNING "eata_pio: abort, timeout error.\n");
+ return FAILED;
+ }
+ if (CD(cmd)->status == FREE) {
+ DBG(DBG_ABNORM, printk(KERN_WARNING "Returning: SCSI_ABORT_NOT_RUNNING\n"));
+ return FAILED;
+ }
+ if (CD(cmd)->status == USED) {
+ DBG(DBG_ABNORM, printk(KERN_WARNING "Returning: SCSI_ABORT_BUSY\n"));
+ /* We want to sleep a bit more here */
+ return FAILED; /* SNOOZE */
+ }
+ if (CD(cmd)->status == RESET) {
+ printk(KERN_WARNING "eata_pio: abort, command reset error.\n");
+ return FAILED;
+ }
+ if (CD(cmd)->status == LOCKED) {
+ DBG(DBG_ABNORM, printk(KERN_WARNING "eata_pio: abort, queue slot " "locked.\n"));
+ return FAILED;
+ }
+ panic("eata_pio: abort: invalid slot status\n");
+}
+
+static int eata_pio_host_reset(struct scsi_cmnd *cmd)
+{
+ unsigned int x, limit = 0;
+ unsigned char success = 0;
+ struct scsi_cmnd *sp;
+ struct Scsi_Host *host = cmd->device->host;
+
+ DBG(DBG_ABNORM, scmd_printk(KERN_WARNING, cmd,
+ "eata_pio_reset called\n"));
+
+ spin_lock_irq(host->host_lock);
+
+ if (HD(cmd)->state == RESET) {
+ printk(KERN_WARNING "eata_pio_reset: exit, already in reset.\n");
+ spin_unlock_irq(host->host_lock);
+ return FAILED;
+ }
+
+ /* force all slots to be free */
+
+ for (x = 0; x < cmd->device->host->can_queue; x++) {
+
+ if (HD(cmd)->ccb[x].status == FREE)
+ continue;
+
+ sp = HD(cmd)->ccb[x].cmd;
+ HD(cmd)->ccb[x].status = RESET;
+ printk(KERN_WARNING "eata_pio_reset: slot %d in reset.\n", x);
+
+ if (sp == NULL)
+ panic("eata_pio_reset: slot %d, sp==NULL.\n", x);
+ }
+
+ /* hard reset the HBA */
+ outb(EATA_CMD_RESET, cmd->device->host->base + HA_WCOMMAND);
+
+ DBG(DBG_ABNORM, printk(KERN_WARNING "eata_pio_reset: board reset done.\n"));
+ HD(cmd)->state = RESET;
+
+ spin_unlock_irq(host->host_lock);
+ msleep(3000);
+ spin_lock_irq(host->host_lock);
+
+ DBG(DBG_ABNORM, printk(KERN_WARNING "eata_pio_reset: interrupts disabled, " "loops %d.\n", limit));
+
+ for (x = 0; x < cmd->device->host->can_queue; x++) {
+
+ /* Skip slots already set free by interrupt */
+ if (HD(cmd)->ccb[x].status != RESET)
+ continue;
+
+ sp = HD(cmd)->ccb[x].cmd;
+ sp->result = DID_RESET << 16;
+
+ /* This mailbox is terminated */
+ printk(KERN_WARNING "eata_pio_reset: reset ccb %d.\n", x);
+ HD(cmd)->ccb[x].status = FREE;
+
+ sp->scsi_done(sp);
+ }
+
+ HD(cmd)->state = 0;
+
+ spin_unlock_irq(host->host_lock);
+
+ if (success) { /* hmmm... */
+ DBG(DBG_ABNORM, printk(KERN_WARNING "eata_pio_reset: exit, success.\n"));
+ return SUCCESS;
+ } else {
+ DBG(DBG_ABNORM, printk(KERN_WARNING "eata_pio_reset: exit, wakeup.\n"));
+ return FAILED;
+ }
+}
+
+static char *get_pio_board_data(unsigned long base, unsigned int irq, unsigned int id, unsigned long cplen, unsigned short cppadlen)
+{
+ struct eata_ccb cp;
+ static char buff[256];
+ int z;
+
+ memset(&cp, 0, sizeof(struct eata_ccb));
+ memset(buff, 0, sizeof(buff));
+
+ cp.DataIn = 1;
+ cp.Interpret = 1; /* Interpret command */
+
+ cp.cp_datalen = cpu_to_be32(254);
+ cp.cp_dataDMA = cpu_to_be32(0);
+
+ cp.cp_id = id;
+ cp.cp_lun = 0;
+
+ cp.cp_cdb[0] = INQUIRY;
+ cp.cp_cdb[1] = 0;
+ cp.cp_cdb[2] = 0;
+ cp.cp_cdb[3] = 0;
+ cp.cp_cdb[4] = 254;
+ cp.cp_cdb[5] = 0;
+
+ if (eata_pio_send_command(base, EATA_CMD_PIO_SEND_CP))
+ return NULL;
+
+ while (!(inb(base + HA_RSTATUS) & HA_SDRQ))
+ cpu_relax();
+
+ outsw(base + HA_RDATA, &cp, cplen);
+ outb(EATA_CMD_PIO_TRUNC, base + HA_WCOMMAND);
+ for (z = 0; z < cppadlen; z++)
+ outw(0, base + HA_RDATA);
+
+ while (inb(base + HA_RSTATUS) & HA_SBUSY)
+ cpu_relax();
+
+ if (inb(base + HA_RSTATUS) & HA_SERROR)
+ return NULL;
+ else if (!(inb(base + HA_RSTATUS) & HA_SDRQ))
+ return NULL;
+ else {
+ insw(base + HA_RDATA, &buff, 127);
+ while (inb(base + HA_RSTATUS) & HA_SDRQ)
+ inw(base + HA_RDATA);
+ return buff;
+ }
+}
+
+static int get_pio_conf_PIO(unsigned long base, struct get_conf *buf)
+{
+ unsigned long loop = HZ / 2;
+ int z;
+ unsigned short *p;
+
+ if (!request_region(base, 9, "eata_pio"))
+ return 0;
+
+ memset(buf, 0, sizeof(struct get_conf));
+
+ while (inb(base + HA_RSTATUS) & HA_SBUSY)
+ if (--loop == 0)
+ goto fail;
+
+ DBG(DBG_PIO && DBG_PROBE, printk(KERN_DEBUG "Issuing PIO READ CONFIG to HBA at %#lx\n", base));
+ eata_pio_send_command(base, EATA_CMD_PIO_READ_CONFIG);
+
+ loop = 50;
+ for (p = (unsigned short *) buf; (long) p <= ((long) buf + (sizeof(struct get_conf) / 2)); p++) {
+ while (!(inb(base + HA_RSTATUS) & HA_SDRQ))
+ if (--loop == 0)
+ goto fail;
+
+ loop = 50;
+ *p = inw(base + HA_RDATA);
+ }
+ if (inb(base + HA_RSTATUS) & HA_SERROR) {
+ DBG(DBG_PROBE, printk("eata_dma: get_conf_PIO, error during "
+ "transfer for HBA at %lx\n", base));
+ goto fail;
+ }
+
+ if (cpu_to_be32(EATA_SIGNATURE) != buf->signature)
+ goto fail;
+
+ DBG(DBG_PIO && DBG_PROBE, printk(KERN_NOTICE "EATA Controller found "
+ "at %#4lx EATA Level: %x\n",
+ base, (unsigned int) (buf->version)));
+
+ while (inb(base + HA_RSTATUS) & HA_SDRQ)
+ inw(base + HA_RDATA);
+
+ if (!ALLOW_DMA_BOARDS) {
+ for (z = 0; z < MAXISA; z++)
+ if (base == ISAbases[z]) {
+ buf->IRQ = ISAirqs[z];
+ break;
+ }
+ }
+
+ return 1;
+
+ fail:
+ release_region(base, 9);
+ return 0;
+}
+
+static void print_pio_config(struct get_conf *gc)
+{
+ printk("Please check values: (read config data)\n");
+ printk("LEN: %d ver:%d OCS:%d TAR:%d TRNXFR:%d MORES:%d\n", be32_to_cpu(gc->len), gc->version, gc->OCS_enabled, gc->TAR_support, gc->TRNXFR, gc->MORE_support);
+ printk("HAAV:%d SCSIID0:%d ID1:%d ID2:%d QUEUE:%d SG:%d SEC:%d\n", gc->HAA_valid, gc->scsi_id[3], gc->scsi_id[2], gc->scsi_id[1], be16_to_cpu(gc->queuesiz), be16_to_cpu(gc->SGsiz), gc->SECOND);
+ printk("IRQ:%d IRQT:%d FORCADR:%d MCH:%d RIDQ:%d\n", gc->IRQ, gc->IRQ_TR, gc->FORCADR, gc->MAX_CHAN, gc->ID_qest);
+}
+
+static unsigned int print_selftest(unsigned int base)
+{
+ unsigned char buffer[512];
+#ifdef VERBOSE_SETUP
+ int z;
+#endif
+
+ printk("eata_pio: executing controller self test & setup...\n");
+ while (inb(base + HA_RSTATUS) & HA_SBUSY);
+ outb(EATA_CMD_PIO_SETUPTEST, base + HA_WCOMMAND);
+ do {
+ while (inb(base + HA_RSTATUS) & HA_SBUSY)
+ /* nothing */ ;
+ if (inb(base + HA_RSTATUS) & HA_SDRQ) {
+ insw(base + HA_RDATA, &buffer, 256);
+#ifdef VERBOSE_SETUP
+ /* no beeps please... */
+ for (z = 0; z < 511 && buffer[z]; z++)
+ if (buffer[z] != 7)
+ printk("%c", buffer[z]);
+#endif
+ }
+ } while (inb(base + HA_RSTATUS) & (HA_SBUSY | HA_SDRQ));
+
+ return (!(inb(base + HA_RSTATUS) & HA_SERROR));
+}
+
+static int register_pio_HBA(long base, struct get_conf *gc, struct pci_dev *pdev)
+{
+ unsigned long size = 0;
+ char *buff;
+ unsigned long cplen;
+ unsigned short cppadlen;
+ struct Scsi_Host *sh;
+ hostdata *hd;
+
+ DBG(DBG_REGISTER, print_pio_config(gc));
+
+ if (gc->DMA_support) {
+ printk("HBA at %#.4lx supports DMA. Please use EATA-DMA driver.\n", base);
+ if (!ALLOW_DMA_BOARDS)
+ return 0;
+ }
+
+ if ((buff = get_pio_board_data(base, gc->IRQ, gc->scsi_id[3], cplen = (cpu_to_be32(gc->cplen) + 1) / 2, cppadlen = (cpu_to_be16(gc->cppadlen) + 1) / 2)) == NULL) {
+ printk("HBA at %#lx didn't react on INQUIRY. Sorry.\n", base);
+ return 0;
+ }
+
+ if (!print_selftest(base) && !ALLOW_DMA_BOARDS) {
+ printk("HBA at %#lx failed while performing self test & setup.\n", base);
+ return 0;
+ }
+
+ size = sizeof(hostdata) + (sizeof(struct eata_ccb) * be16_to_cpu(gc->queuesiz));
+
+ sh = scsi_register(&driver_template, size);
+ if (sh == NULL)
+ return 0;
+
+ if (!reg_IRQ[gc->IRQ]) { /* Interrupt already registered ? */
+ if (!request_irq(gc->IRQ, do_eata_pio_int_handler, 0, "EATA-PIO", sh)) {
+ reg_IRQ[gc->IRQ]++;
+ if (!gc->IRQ_TR)
+ reg_IRQL[gc->IRQ] = 1; /* IRQ is edge triggered */
+ } else {
+ printk("Couldn't allocate IRQ %d, Sorry.\n", gc->IRQ);
+ return 0;
+ }
+ } else { /* More than one HBA on this IRQ */
+ if (reg_IRQL[gc->IRQ]) {
+ printk("Can't support more than one HBA on this IRQ,\n" " if the IRQ is edge triggered. Sorry.\n");
+ return 0;
+ } else
+ reg_IRQ[gc->IRQ]++;
+ }
+
+ hd = SD(sh);
+
+ memset(hd->ccb, 0, (sizeof(struct eata_ccb) * be16_to_cpu(gc->queuesiz)));
+ memset(hd->reads, 0, sizeof(hd->reads));
+
+ strlcpy(SD(sh)->vendor, &buff[8], sizeof(SD(sh)->vendor));
+ strlcpy(SD(sh)->name, &buff[16], sizeof(SD(sh)->name));
+ SD(sh)->revision[0] = buff[32];
+ SD(sh)->revision[1] = buff[33];
+ SD(sh)->revision[2] = buff[34];
+ SD(sh)->revision[3] = '.';
+ SD(sh)->revision[4] = buff[35];
+ SD(sh)->revision[5] = 0;
+
+ switch (be32_to_cpu(gc->len)) {
+ case 0x1c:
+ SD(sh)->EATA_revision = 'a';
+ break;
+ case 0x1e:
+ SD(sh)->EATA_revision = 'b';
+ break;
+ case 0x22:
+ SD(sh)->EATA_revision = 'c';
+ break;
+ case 0x24:
+ SD(sh)->EATA_revision = 'z';
+ default:
+ SD(sh)->EATA_revision = '?';
+ }
+
+ if (be32_to_cpu(gc->len) >= 0x22) {
+ if (gc->is_PCI)
+ hd->bustype = IS_PCI;
+ else if (gc->is_EISA)
+ hd->bustype = IS_EISA;
+ else
+ hd->bustype = IS_ISA;
+ } else {
+ if (buff[21] == '4')
+ hd->bustype = IS_PCI;
+ else if (buff[21] == '2')
+ hd->bustype = IS_EISA;
+ else
+ hd->bustype = IS_ISA;
+ }
+
+ SD(sh)->cplen = cplen;
+ SD(sh)->cppadlen = cppadlen;
+ SD(sh)->hostid = gc->scsi_id[3];
+ SD(sh)->devflags = 1 << gc->scsi_id[3];
+ SD(sh)->moresupport = gc->MORE_support;
+ sh->unique_id = base;
+ sh->base = base;
+ sh->io_port = base;
+ sh->n_io_port = 9;
+ sh->irq = gc->IRQ;
+ sh->dma_channel = PIO;
+ sh->this_id = gc->scsi_id[3];
+ sh->can_queue = 1;
+ sh->cmd_per_lun = 1;
+ sh->sg_tablesize = SG_ALL;
+
+ hd->channel = 0;
+
+ hd->pdev = pci_dev_get(pdev); /* Keep a PCI reference */
+
+ sh->max_id = 8;
+ sh->max_lun = 8;
+
+ if (gc->SECOND)
+ hd->primary = 0;
+ else
+ hd->primary = 1;
+
+ hd->next = NULL; /* build a linked list of all HBAs */
+ hd->prev = last_HBA;
+ if (hd->prev != NULL)
+ SD(hd->prev)->next = sh;
+ last_HBA = sh;
+ if (first_HBA == NULL)
+ first_HBA = sh;
+ registered_HBAs++;
+ return (1);
+}
+
+static void find_pio_ISA(struct get_conf *buf)
+{
+ int i;
+
+ for (i = 0; i < MAXISA; i++) {
+ if (!ISAbases[i])
+ continue;
+ if (!get_pio_conf_PIO(ISAbases[i], buf))
+ continue;
+ if (!register_pio_HBA(ISAbases[i], buf, NULL))
+ release_region(ISAbases[i], 9);
+ else
+ ISAbases[i] = 0;
+ }
+ return;
+}
+
+static void find_pio_EISA(struct get_conf *buf)
+{
+ u32 base;
+ int i;
+
+#ifdef CHECKPAL
+ u8 pal1, pal2, pal3;
+#endif
+
+ for (i = 0; i < MAXEISA; i++) {
+ if (EISAbases[i]) { /* Still a possibility ? */
+
+ base = 0x1c88 + (i * 0x1000);
+#ifdef CHECKPAL
+ pal1 = inb((u16) base - 8);
+ pal2 = inb((u16) base - 7);
+ pal3 = inb((u16) base - 6);
+
+ if (((pal1 == 0x12) && (pal2 == 0x14)) || ((pal1 == 0x38) && (pal2 == 0xa3) && (pal3 == 0x82)) || ((pal1 == 0x06) && (pal2 == 0x94) && (pal3 == 0x24))) {
+ DBG(DBG_PROBE, printk(KERN_NOTICE "EISA EATA id tags found: " "%x %x %x \n", (int) pal1, (int) pal2, (int) pal3));
+#endif
+ if (get_pio_conf_PIO(base, buf)) {
+ DBG(DBG_PROBE && DBG_EISA, print_pio_config(buf));
+ if (buf->IRQ) {
+ if (!register_pio_HBA(base, buf, NULL))
+ release_region(base, 9);
+ } else {
+ printk(KERN_NOTICE "eata_dma: No valid IRQ. HBA " "removed from list\n");
+ release_region(base, 9);
+ }
+ }
+ /* Nothing found here so we take it from the list */
+ EISAbases[i] = 0;
+#ifdef CHECKPAL
+ }
+#endif
+ }
+ }
+ return;
+}
+
+static void find_pio_PCI(struct get_conf *buf)
+{
+#ifndef CONFIG_PCI
+ printk("eata_dma: kernel PCI support not enabled. Skipping scan for PCI HBAs.\n");
+#else
+ struct pci_dev *dev = NULL;
+ unsigned long base, x;
+
+ while ((dev = pci_get_device(PCI_VENDOR_ID_DPT, PCI_DEVICE_ID_DPT, dev)) != NULL) {
+ DBG(DBG_PROBE && DBG_PCI, printk("eata_pio: find_PCI, HBA at %s\n", pci_name(dev)));
+ if (pci_enable_device(dev))
+ continue;
+ pci_set_master(dev);
+ base = pci_resource_flags(dev, 0);
+ if (base & IORESOURCE_MEM) {
+ printk("eata_pio: invalid base address of device %s\n", pci_name(dev));
+ continue;
+ }
+ base = pci_resource_start(dev, 0);
+ /* EISA tag there ? */
+ if ((inb(base) == 0x12) && (inb(base + 1) == 0x14))
+ continue; /* Jep, it's forced, so move on */
+ base += 0x10; /* Now, THIS is the real address */
+ if (base != 0x1f8) {
+ /* We didn't find it in the primary search */
+ if (get_pio_conf_PIO(base, buf)) {
+ if (buf->FORCADR) { /* If the address is forced */
+ release_region(base, 9);
+ continue; /* we'll find it later */
+ }
+
+ /* OK. We made it till here, so we can go now
+ * and register it. We only have to check and
+ * eventually remove it from the EISA and ISA list
+ */
+
+ if (!register_pio_HBA(base, buf, dev)) {
+ release_region(base, 9);
+ continue;
+ }
+
+ if (base < 0x1000) {
+ for (x = 0; x < MAXISA; ++x) {
+ if (ISAbases[x] == base) {
+ ISAbases[x] = 0;
+ break;
+ }
+ }
+ } else if ((base & 0x0fff) == 0x0c88) {
+ x = (base >> 12) & 0x0f;
+ EISAbases[x] = 0;
+ }
+ }
+#ifdef CHECK_BLINK
+ else if (check_blink_state(base)) {
+ printk("eata_pio: HBA is in BLINK state.\n" "Consult your HBAs manual to correct this.\n");
+ }
+#endif
+ }
+ }
+#endif /* #ifndef CONFIG_PCI */
+}
+
+static int eata_pio_detect(struct scsi_host_template *tpnt)
+{
+ struct Scsi_Host *HBA_ptr;
+ struct get_conf gc;
+ int i;
+
+ find_pio_PCI(&gc);
+ find_pio_EISA(&gc);
+ find_pio_ISA(&gc);
+
+ for (i = 0; i < MAXIRQ; i++)
+ if (reg_IRQ[i])
+ request_irq(i, do_eata_pio_int_handler, 0, "EATA-PIO", NULL);
+
+ HBA_ptr = first_HBA;
+
+ if (registered_HBAs != 0) {
+ printk("EATA (Extended Attachment) PIO driver version: %d.%d%s\n"
+ "(c) 1993-95 Michael Neuffer, neuffer@goofy.zdv.uni-mainz.de\n" " Alfred Arnold, a.arnold@kfa-juelich.de\n" "This release only supports DASD devices (harddisks)\n", VER_MAJOR, VER_MINOR, VER_SUB);
+
+ printk("Registered HBAs:\n");
+ printk("HBA no. Boardtype: Revis: EATA: Bus: BaseIO: IRQ: Ch: ID: Pr:" " QS: SG: CPL:\n");
+ for (i = 1; i <= registered_HBAs; i++) {
+ printk("scsi%-2d: %.10s v%s 2.0%c %s %#.4lx %2d %d %d %c"
+ " %2d %2d %2d\n",
+ HBA_ptr->host_no, SD(HBA_ptr)->name, SD(HBA_ptr)->revision,
+ SD(HBA_ptr)->EATA_revision, (SD(HBA_ptr)->bustype == 'P') ?
+ "PCI " : (SD(HBA_ptr)->bustype == 'E') ? "EISA" : "ISA ",
+ HBA_ptr->base, HBA_ptr->irq, SD(HBA_ptr)->channel, HBA_ptr->this_id,
+ SD(HBA_ptr)->primary ? 'Y' : 'N', HBA_ptr->can_queue,
+ HBA_ptr->sg_tablesize, HBA_ptr->cmd_per_lun);
+ HBA_ptr = SD(HBA_ptr)->next;
+ }
+ }
+ return (registered_HBAs);
+}
+
+static struct scsi_host_template driver_template = {
+ .proc_name = "eata_pio",
+ .name = "EATA (Extended Attachment) PIO driver",
+ .show_info = eata_pio_show_info,
+ .detect = eata_pio_detect,
+ .release = eata_pio_release,
+ .queuecommand = eata_pio_queue,
+ .eh_abort_handler = eata_pio_abort,
+ .eh_host_reset_handler = eata_pio_host_reset,
+ .use_clustering = ENABLE_CLUSTERING,
+};
+
+MODULE_AUTHOR("Michael Neuffer, Alfred Arnold");
+MODULE_DESCRIPTION("EATA SCSI PIO driver");
+MODULE_LICENSE("GPL");
+
+#include "scsi_module.c"
diff --git a/drivers/scsi/eata_pio.h b/drivers/scsi/eata_pio.h
new file mode 100644
index 000000000..7deeb9357
--- /dev/null
+++ b/drivers/scsi/eata_pio.h
@@ -0,0 +1,53 @@
+/********************************************************
+* Header file for eata_pio.c Linux EATA-PIO SCSI driver *
+* (c) 1993-96 Michael Neuffer *
+*********************************************************
+* last change: 2002/11/02 *
+********************************************************/
+
+
+#ifndef _EATA_PIO_H
+#define _EATA_PIO_H
+
+#define VER_MAJOR 0
+#define VER_MINOR 0
+#define VER_SUB "1b"
+
+/************************************************************************
+ * Here you can switch parts of the code on and of *
+ ************************************************************************/
+
+#define VERBOSE_SETUP /* show startup screen of 2001 */
+#define ALLOW_DMA_BOARDS 1
+
+/************************************************************************
+ * Debug options. *
+ * Enable DEBUG and whichever options you require. *
+ ************************************************************************/
+#define DEBUG_EATA 1 /* Enable debug code. */
+#define DPT_DEBUG 0 /* Bobs special */
+#define DBG_DELAY 0 /* Build in delays so debug messages can be
+ * be read before they vanish of the top of
+ * the screen!
+ */
+#define DBG_PROBE 0 /* Debug probe routines. */
+#define DBG_ISA 0 /* Trace ISA routines */
+#define DBG_EISA 0 /* Trace EISA routines */
+#define DBG_PCI 0 /* Trace PCI routines */
+#define DBG_PIO 0 /* Trace get_config_PIO */
+#define DBG_COM 0 /* Trace command call */
+#define DBG_QUEUE 0 /* Trace command queueing. */
+#define DBG_INTR 0 /* Trace interrupt service routine. */
+#define DBG_INTR2 0 /* Trace interrupt service routine. */
+#define DBG_PROC 0 /* Debug proc-fs related statistics */
+#define DBG_PROC_WRITE 0
+#define DBG_REGISTER 0 /* */
+#define DBG_ABNORM 1 /* Debug abnormal actions (reset, abort) */
+
+#if DEBUG_EATA
+#define DBG(x, y) if ((x)) {y;}
+#else
+#define DBG(x, y)
+#endif
+
+#endif /* _EATA_PIO_H */
diff --git a/drivers/scsi/esas2r/Kconfig b/drivers/scsi/esas2r/Kconfig
new file mode 100644
index 000000000..78fdbfd9b
--- /dev/null
+++ b/drivers/scsi/esas2r/Kconfig
@@ -0,0 +1,5 @@
+config SCSI_ESAS2R
+ tristate "ATTO Technology's ExpressSAS RAID adapter driver"
+ depends on PCI && SCSI
+ ---help---
+ This driver supports the ATTO ExpressSAS R6xx SAS/SATA RAID controllers.
diff --git a/drivers/scsi/esas2r/Makefile b/drivers/scsi/esas2r/Makefile
new file mode 100644
index 000000000..c77160b8c
--- /dev/null
+++ b/drivers/scsi/esas2r/Makefile
@@ -0,0 +1,5 @@
+obj-$(CONFIG_SCSI_ESAS2R) += esas2r.o
+
+esas2r-objs := esas2r_log.o esas2r_disc.o esas2r_flash.o esas2r_init.o \
+ esas2r_int.o esas2r_io.o esas2r_ioctl.o esas2r_targdb.o \
+ esas2r_vda.o esas2r_main.o
diff --git a/drivers/scsi/esas2r/atioctl.h b/drivers/scsi/esas2r/atioctl.h
new file mode 100644
index 000000000..4aca3d52c
--- /dev/null
+++ b/drivers/scsi/esas2r/atioctl.h
@@ -0,0 +1,1254 @@
+/* linux/drivers/scsi/esas2r/atioctl.h
+ * ATTO IOCTL Handling
+ *
+ * Copyright (c) 2001-2013 ATTO Technology, Inc.
+ * (mailto:linuxdrivers@attotech.com)
+ */
+/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
+/*
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * NO WARRANTY
+ * THE PROGRAM IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OR
+ * CONDITIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED INCLUDING, WITHOUT
+ * LIMITATION, ANY WARRANTIES OR CONDITIONS OF TITLE, NON-INFRINGEMENT,
+ * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE. Each Recipient is
+ * solely responsible for determining the appropriateness of using and
+ * distributing the Program and assumes all risks associated with its
+ * exercise of rights under this Agreement, including but not limited to
+ * the risks and costs of program errors, damage to or loss of data,
+ * programs or equipment, and unavailability or interruption of operations.
+ *
+ * DISCLAIMER OF LIABILITY
+ * NEITHER RECIPIENT NOR ANY CONTRIBUTORS SHALL HAVE ANY LIABILITY FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING WITHOUT LIMITATION LOST PROFITS), HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR
+ * TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
+ * USE OR DISTRIBUTION OF THE PROGRAM OR THE EXERCISE OF ANY RIGHTS GRANTED
+ * HEREUNDER, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGES
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
+
+#include "atvda.h"
+
+#ifndef ATIOCTL_H
+#define ATIOCTL_H
+
+#define EXPRESS_IOCTL_SIGNATURE "Express"
+#define EXPRESS_IOCTL_SIGNATURE_SIZE 8
+
+/* structure definitions for IOCTls */
+
+struct __packed atto_express_ioctl_header {
+ u8 signature[EXPRESS_IOCTL_SIGNATURE_SIZE];
+ u8 return_code;
+
+#define IOCTL_SUCCESS 0
+#define IOCTL_ERR_INVCMD 101
+#define IOCTL_INIT_FAILED 102
+#define IOCTL_NOT_IMPLEMENTED 103
+#define IOCTL_BAD_CHANNEL 104
+#define IOCTL_TARGET_OVERRUN 105
+#define IOCTL_TARGET_NOT_ENABLED 106
+#define IOCTL_BAD_FLASH_IMGTYPE 107
+#define IOCTL_OUT_OF_RESOURCES 108
+#define IOCTL_GENERAL_ERROR 109
+#define IOCTL_INVALID_PARAM 110
+
+ u8 channel;
+ u8 retries;
+ u8 pad[5];
+};
+
+/*
+ * NOTE - if channel == 0xFF, the request is
+ * handled on the adapter it came in on.
+ */
+#define MAX_NODE_NAMES 256
+
+struct __packed atto_firmware_rw_request {
+ u8 function;
+ #define FUNC_FW_DOWNLOAD 0x09
+ #define FUNC_FW_UPLOAD 0x12
+
+ u8 img_type;
+ #define FW_IMG_FW 0x01
+ #define FW_IMG_BIOS 0x02
+ #define FW_IMG_NVR 0x03
+ #define FW_IMG_RAW 0x04
+ #define FW_IMG_FM_API 0x05
+ #define FW_IMG_FS_API 0x06
+
+ u8 pad[2];
+ u32 img_offset;
+ u32 img_size;
+ u8 image[0x80000];
+};
+
+struct __packed atto_param_rw_request {
+ u16 code;
+ char data_buffer[512];
+};
+
+#define MAX_CHANNEL 256
+
+struct __packed atto_channel_list {
+ u32 num_channels;
+ u8 channel[MAX_CHANNEL];
+};
+
+struct __packed atto_channel_info {
+ u8 major_rev;
+ u8 minor_rev;
+ u8 IRQ;
+ u8 revision_id;
+ u8 pci_bus;
+ u8 pci_dev_func;
+ u8 core_rev;
+ u8 host_no;
+ u16 device_id;
+ u16 vendor_id;
+ u16 ven_dev_id;
+ u8 pad[3];
+ u32 hbaapi_rev;
+};
+
+/*
+ * CSMI control codes
+ * class independent
+ */
+#define CSMI_CC_GET_DRVR_INFO 1
+#define CSMI_CC_GET_CNTLR_CFG 2
+#define CSMI_CC_GET_CNTLR_STS 3
+#define CSMI_CC_FW_DOWNLOAD 4
+
+/* RAID class */
+#define CSMI_CC_GET_RAID_INFO 10
+#define CSMI_CC_GET_RAID_CFG 11
+
+/* HBA class */
+#define CSMI_CC_GET_PHY_INFO 20
+#define CSMI_CC_SET_PHY_INFO 21
+#define CSMI_CC_GET_LINK_ERRORS 22
+#define CSMI_CC_SMP_PASSTHRU 23
+#define CSMI_CC_SSP_PASSTHRU 24
+#define CSMI_CC_STP_PASSTHRU 25
+#define CSMI_CC_GET_SATA_SIG 26
+#define CSMI_CC_GET_SCSI_ADDR 27
+#define CSMI_CC_GET_DEV_ADDR 28
+#define CSMI_CC_TASK_MGT 29
+#define CSMI_CC_GET_CONN_INFO 30
+
+/* PHY class */
+#define CSMI_CC_PHY_CTRL 60
+
+/*
+ * CSMI status codes
+ * class independent
+ */
+#define CSMI_STS_SUCCESS 0
+#define CSMI_STS_FAILED 1
+#define CSMI_STS_BAD_CTRL_CODE 2
+#define CSMI_STS_INV_PARAM 3
+#define CSMI_STS_WRITE_ATTEMPTED 4
+
+/* RAID class */
+#define CSMI_STS_INV_RAID_SET 1000
+
+/* HBA class */
+#define CSMI_STS_PHY_CHANGED CSMI_STS_SUCCESS
+#define CSMI_STS_PHY_UNCHANGEABLE 2000
+#define CSMI_STS_INV_LINK_RATE 2001
+#define CSMI_STS_INV_PHY 2002
+#define CSMI_STS_INV_PHY_FOR_PORT 2003
+#define CSMI_STS_PHY_UNSELECTABLE 2004
+#define CSMI_STS_SELECT_PHY_OR_PORT 2005
+#define CSMI_STS_INV_PORT 2006
+#define CSMI_STS_PORT_UNSELECTABLE 2007
+#define CSMI_STS_CONNECTION_FAILED 2008
+#define CSMI_STS_NO_SATA_DEV 2009
+#define CSMI_STS_NO_SATA_SIGNATURE 2010
+#define CSMI_STS_SCSI_EMULATION 2011
+#define CSMI_STS_NOT_AN_END_DEV 2012
+#define CSMI_STS_NO_SCSI_ADDR 2013
+#define CSMI_STS_NO_DEV_ADDR 2014
+
+/* CSMI class independent structures */
+struct atto_csmi_get_driver_info {
+ char name[81];
+ char description[81];
+ u16 major_rev;
+ u16 minor_rev;
+ u16 build_rev;
+ u16 release_rev;
+ u16 csmi_major_rev;
+ u16 csmi_minor_rev;
+ #define CSMI_MAJOR_REV_0_81 0
+ #define CSMI_MINOR_REV_0_81 81
+
+ #define CSMI_MAJOR_REV CSMI_MAJOR_REV_0_81
+ #define CSMI_MINOR_REV CSMI_MINOR_REV_0_81
+};
+
+struct atto_csmi_get_pci_bus_addr {
+ u8 bus_num;
+ u8 device_num;
+ u8 function_num;
+ u8 reserved;
+};
+
+struct atto_csmi_get_cntlr_cfg {
+ u32 base_io_addr;
+
+ struct {
+ u32 base_memaddr_lo;
+ u32 base_memaddr_hi;
+ };
+
+ u32 board_id;
+ u16 slot_num;
+ #define CSMI_SLOT_NUM_UNKNOWN 0xFFFF
+
+ u8 cntlr_class;
+ #define CSMI_CNTLR_CLASS_HBA 5
+
+ u8 io_bus_type;
+ #define CSMI_BUS_TYPE_PCI 3
+ #define CSMI_BUS_TYPE_PCMCIA 4
+
+ union {
+ struct atto_csmi_get_pci_bus_addr pci_addr;
+ u8 reserved[32];
+ };
+
+ char serial_num[81];
+ u16 major_rev;
+ u16 minor_rev;
+ u16 build_rev;
+ u16 release_rev;
+ u16 bios_major_rev;
+ u16 bios_minor_rev;
+ u16 bios_build_rev;
+ u16 bios_release_rev;
+ u32 cntlr_flags;
+ #define CSMI_CNTLRF_SAS_HBA 0x00000001
+ #define CSMI_CNTLRF_SAS_RAID 0x00000002
+ #define CSMI_CNTLRF_SATA_HBA 0x00000004
+ #define CSMI_CNTLRF_SATA_RAID 0x00000008
+ #define CSMI_CNTLRF_FWD_SUPPORT 0x00010000
+ #define CSMI_CNTLRF_FWD_ONLINE 0x00020000
+ #define CSMI_CNTLRF_FWD_SRESET 0x00040000
+ #define CSMI_CNTLRF_FWD_HRESET 0x00080000
+ #define CSMI_CNTLRF_FWD_RROM 0x00100000
+
+ u16 rrom_major_rev;
+ u16 rrom_minor_rev;
+ u16 rrom_build_rev;
+ u16 rrom_release_rev;
+ u16 rrom_biosmajor_rev;
+ u16 rrom_biosminor_rev;
+ u16 rrom_biosbuild_rev;
+ u16 rrom_biosrelease_rev;
+ u8 reserved2[7];
+};
+
+struct atto_csmi_get_cntlr_sts {
+ u32 status;
+ #define CSMI_CNTLR_STS_GOOD 1
+ #define CSMI_CNTLR_STS_FAILED 2
+ #define CSMI_CNTLR_STS_OFFLINE 3
+ #define CSMI_CNTLR_STS_POWEROFF 4
+
+ u32 offline_reason;
+ #define CSMI_OFFLINE_NO_REASON 0
+ #define CSMI_OFFLINE_INITIALIZING 1
+ #define CSMI_OFFLINE_BUS_DEGRADED 2
+ #define CSMI_OFFLINE_BUS_FAILURE 3
+
+ u8 reserved[28];
+};
+
+struct atto_csmi_fw_download {
+ u32 buffer_len;
+ u32 download_flags;
+ #define CSMI_FWDF_VALIDATE 0x00000001
+ #define CSMI_FWDF_SOFT_RESET 0x00000002
+ #define CSMI_FWDF_HARD_RESET 0x00000004
+
+ u8 reserved[32];
+ u16 status;
+ #define CSMI_FWD_STS_SUCCESS 0
+ #define CSMI_FWD_STS_FAILED 1
+ #define CSMI_FWD_STS_USING_RROM 2
+ #define CSMI_FWD_STS_REJECT 3
+ #define CSMI_FWD_STS_DOWNREV 4
+
+ u16 severity;
+ #define CSMI_FWD_SEV_INFO 0
+ #define CSMI_FWD_SEV_WARNING 1
+ #define CSMI_FWD_SEV_ERROR 2
+ #define CSMI_FWD_SEV_FATAL 3
+
+};
+
+/* CSMI RAID class structures */
+struct atto_csmi_get_raid_info {
+ u32 num_raid_sets;
+ u32 max_drivesper_set;
+ u8 reserved[92];
+};
+
+struct atto_csmi_raid_drives {
+ char model[40];
+ char firmware[8];
+ char serial_num[40];
+ u8 sas_addr[8];
+ u8 lun[8];
+ u8 drive_sts;
+ #define CSMI_DRV_STS_OK 0
+ #define CSMI_DRV_STS_REBUILDING 1
+ #define CSMI_DRV_STS_FAILED 2
+ #define CSMI_DRV_STS_DEGRADED 3
+
+ u8 drive_usage;
+ #define CSMI_DRV_USE_NOT_USED 0
+ #define CSMI_DRV_USE_MEMBER 1
+ #define CSMI_DRV_USE_SPARE 2
+
+ u8 reserved[30]; /* spec says 22 */
+};
+
+struct atto_csmi_get_raid_cfg {
+ u32 raid_set_index;
+ u32 capacity;
+ u32 stripe_size;
+ u8 raid_type;
+ u8 status;
+ u8 information;
+ u8 drive_cnt;
+ u8 reserved[20];
+
+ struct atto_csmi_raid_drives drives[1];
+};
+
+/* CSMI HBA class structures */
+struct atto_csmi_phy_entity {
+ u8 ident_frame[0x1C];
+ u8 port_id;
+ u8 neg_link_rate;
+ u8 min_link_rate;
+ u8 max_link_rate;
+ u8 phy_change_cnt;
+ u8 auto_discover;
+ #define CSMI_DISC_NOT_SUPPORTED 0x00
+ #define CSMI_DISC_NOT_STARTED 0x01
+ #define CSMI_DISC_IN_PROGRESS 0x02
+ #define CSMI_DISC_COMPLETE 0x03
+ #define CSMI_DISC_ERROR 0x04
+
+ u8 reserved[2];
+ u8 attach_ident_frame[0x1C];
+};
+
+struct atto_csmi_get_phy_info {
+ u8 number_of_phys;
+ u8 reserved[3];
+ struct atto_csmi_phy_entity
+ phy[32];
+};
+
+struct atto_csmi_set_phy_info {
+ u8 phy_id;
+ u8 neg_link_rate;
+ #define CSMI_NEG_RATE_NEGOTIATE 0x00
+ #define CSMI_NEG_RATE_PHY_DIS 0x01
+
+ u8 prog_minlink_rate;
+ u8 prog_maxlink_rate;
+ u8 signal_class;
+ #define CSMI_SIG_CLASS_UNKNOWN 0x00
+ #define CSMI_SIG_CLASS_DIRECT 0x01
+ #define CSMI_SIG_CLASS_SERVER 0x02
+ #define CSMI_SIG_CLASS_ENCLOSURE 0x03
+
+ u8 reserved[3];
+};
+
+struct atto_csmi_get_link_errors {
+ u8 phy_id;
+ u8 reset_cnts;
+ #define CSMI_RESET_CNTS_NO 0x00
+ #define CSMI_RESET_CNTS_YES 0x01
+
+ u8 reserved[2];
+ u32 inv_dw_cnt;
+ u32 disp_err_cnt;
+ u32 loss_ofdw_sync_cnt;
+ u32 phy_reseterr_cnt;
+
+ /*
+ * The following field has been added by ATTO for ease of
+ * implementation of additional statistics. Drivers must validate
+ * the length of the IOCTL payload prior to filling them in so CSMI
+ * complaint applications function correctly.
+ */
+
+ u32 crc_err_cnt;
+};
+
+struct atto_csmi_smp_passthru {
+ u8 phy_id;
+ u8 port_id;
+ u8 conn_rate;
+ u8 reserved;
+ u8 dest_sas_addr[8];
+ u32 req_len;
+ u8 smp_req[1020];
+ u8 conn_sts;
+ u8 reserved2[3];
+ u32 rsp_len;
+ u8 smp_rsp[1020];
+};
+
+struct atto_csmi_ssp_passthru_sts {
+ u8 conn_sts;
+ u8 reserved[3];
+ u8 data_present;
+ u8 status;
+ u16 rsp_length;
+ u8 rsp[256];
+ u32 data_bytes;
+};
+
+struct atto_csmi_ssp_passthru {
+ u8 phy_id;
+ u8 port_id;
+ u8 conn_rate;
+ u8 reserved;
+ u8 dest_sas_addr[8];
+ u8 lun[8];
+ u8 cdb_len;
+ u8 add_cdb_len;
+ u8 reserved2[2];
+ u8 cdb[16];
+ u32 flags;
+ #define CSMI_SSPF_DD_READ 0x00000001
+ #define CSMI_SSPF_DD_WRITE 0x00000002
+ #define CSMI_SSPF_DD_UNSPECIFIED 0x00000004
+ #define CSMI_SSPF_TA_SIMPLE 0x00000000
+ #define CSMI_SSPF_TA_HEAD_OF_Q 0x00000010
+ #define CSMI_SSPF_TA_ORDERED 0x00000020
+ #define CSMI_SSPF_TA_ACA 0x00000040
+
+ u8 add_cdb[24];
+ u32 data_len;
+
+ struct atto_csmi_ssp_passthru_sts sts;
+};
+
+struct atto_csmi_stp_passthru_sts {
+ u8 conn_sts;
+ u8 reserved[3];
+ u8 sts_fis[20];
+ u32 scr[16];
+ u32 data_bytes;
+};
+
+struct atto_csmi_stp_passthru {
+ u8 phy_id;
+ u8 port_id;
+ u8 conn_rate;
+ u8 reserved;
+ u8 dest_sas_addr[8];
+ u8 reserved2[4];
+ u8 command_fis[20];
+ u32 flags;
+ #define CSMI_STPF_DD_READ 0x00000001
+ #define CSMI_STPF_DD_WRITE 0x00000002
+ #define CSMI_STPF_DD_UNSPECIFIED 0x00000004
+ #define CSMI_STPF_PIO 0x00000010
+ #define CSMI_STPF_DMA 0x00000020
+ #define CSMI_STPF_PACKET 0x00000040
+ #define CSMI_STPF_DMA_QUEUED 0x00000080
+ #define CSMI_STPF_EXECUTE_DIAG 0x00000100
+ #define CSMI_STPF_RESET_DEVICE 0x00000200
+
+ u32 data_len;
+
+ struct atto_csmi_stp_passthru_sts sts;
+};
+
+struct atto_csmi_get_sata_sig {
+ u8 phy_id;
+ u8 reserved[3];
+ u8 reg_dth_fis[20];
+};
+
+struct atto_csmi_get_scsi_addr {
+ u8 sas_addr[8];
+ u8 sas_lun[8];
+ u8 host_index;
+ u8 path_id;
+ u8 target_id;
+ u8 lun;
+};
+
+struct atto_csmi_get_dev_addr {
+ u8 host_index;
+ u8 path_id;
+ u8 target_id;
+ u8 lun;
+ u8 sas_addr[8];
+ u8 sas_lun[8];
+};
+
+struct atto_csmi_task_mgmt {
+ u8 host_index;
+ u8 path_id;
+ u8 target_id;
+ u8 lun;
+ u32 flags;
+ #define CSMI_TMF_TASK_IU 0x00000001
+ #define CSMI_TMF_HARD_RST 0x00000002
+ #define CSMI_TMF_SUPPRESS_RSLT 0x00000004
+
+ u32 queue_tag;
+ u32 reserved;
+ u8 task_mgt_func;
+ u8 reserved2[7];
+ u32 information;
+ #define CSMI_TM_INFO_TEST 1
+ #define CSMI_TM_INFO_EXCEEDED 2
+ #define CSMI_TM_INFO_DEMAND 3
+ #define CSMI_TM_INFO_TRIGGER 4
+
+ struct atto_csmi_ssp_passthru_sts sts;
+
+};
+
+struct atto_csmi_get_conn_info {
+ u32 pinout;
+ #define CSMI_CON_UNKNOWN 0x00000001
+ #define CSMI_CON_SFF_8482 0x00000002
+ #define CSMI_CON_SFF_8470_LANE_1 0x00000100
+ #define CSMI_CON_SFF_8470_LANE_2 0x00000200
+ #define CSMI_CON_SFF_8470_LANE_3 0x00000400
+ #define CSMI_CON_SFF_8470_LANE_4 0x00000800
+ #define CSMI_CON_SFF_8484_LANE_1 0x00010000
+ #define CSMI_CON_SFF_8484_LANE_2 0x00020000
+ #define CSMI_CON_SFF_8484_LANE_3 0x00040000
+ #define CSMI_CON_SFF_8484_LANE_4 0x00080000
+
+ u8 connector[16];
+ u8 location;
+ #define CSMI_CON_INTERNAL 0x02
+ #define CSMI_CON_EXTERNAL 0x04
+ #define CSMI_CON_SWITCHABLE 0x08
+ #define CSMI_CON_AUTO 0x10
+
+ u8 reserved[15];
+};
+
+/* CSMI PHY class structures */
+struct atto_csmi_character {
+ u8 type_flags;
+ #define CSMI_CTF_POS_DISP 0x01
+ #define CSMI_CTF_NEG_DISP 0x02
+ #define CSMI_CTF_CTRL_CHAR 0x04
+
+ u8 value;
+};
+
+struct atto_csmi_pc_ctrl {
+ u8 type;
+ #define CSMI_PC_TYPE_UNDEFINED 0x00
+ #define CSMI_PC_TYPE_SATA 0x01
+ #define CSMI_PC_TYPE_SAS 0x02
+ u8 rate;
+ u8 reserved[6];
+ u32 vendor_unique[8];
+ u32 tx_flags;
+ #define CSMI_PC_TXF_PREEMP_DIS 0x00000001
+
+ signed char tx_amplitude;
+ signed char tx_preemphasis;
+ signed char tx_slew_rate;
+ signed char tx_reserved[13];
+ u8 tx_vendor_unique[64];
+ u32 rx_flags;
+ #define CSMI_PC_RXF_EQ_DIS 0x00000001
+
+ signed char rx_threshold;
+ signed char rx_equalization_gain;
+ signed char rx_reserved[14];
+ u8 rx_vendor_unique[64];
+ u32 pattern_flags;
+ #define CSMI_PC_PATF_FIXED 0x00000001
+ #define CSMI_PC_PATF_DIS_SCR 0x00000002
+ #define CSMI_PC_PATF_DIS_ALIGN 0x00000004
+ #define CSMI_PC_PATF_DIS_SSC 0x00000008
+
+ u8 fixed_pattern;
+ #define CSMI_PC_FP_CJPAT 0x00000001
+ #define CSMI_PC_FP_ALIGN 0x00000002
+
+ u8 user_pattern_len;
+ u8 pattern_reserved[6];
+
+ struct atto_csmi_character user_pattern_buffer[16];
+};
+
+struct atto_csmi_phy_ctrl {
+ u32 function;
+ #define CSMI_PC_FUNC_GET_SETUP 0x00000100
+
+ u8 phy_id;
+ u16 len_of_cntl;
+ u8 num_of_cntls;
+ u8 reserved[4];
+ u32 link_flags;
+ #define CSMI_PHY_ACTIVATE_CTRL 0x00000001
+ #define CSMI_PHY_UPD_SPINUP_RATE 0x00000002
+ #define CSMI_PHY_AUTO_COMWAKE 0x00000004
+
+ u8 spinup_rate;
+ u8 link_reserved[7];
+ u32 vendor_unique[8];
+
+ struct atto_csmi_pc_ctrl control[1];
+};
+
+union atto_ioctl_csmi {
+ struct atto_csmi_get_driver_info drvr_info;
+ struct atto_csmi_get_cntlr_cfg cntlr_cfg;
+ struct atto_csmi_get_cntlr_sts cntlr_sts;
+ struct atto_csmi_fw_download fw_dwnld;
+ struct atto_csmi_get_raid_info raid_info;
+ struct atto_csmi_get_raid_cfg raid_cfg;
+ struct atto_csmi_get_phy_info get_phy_info;
+ struct atto_csmi_set_phy_info set_phy_info;
+ struct atto_csmi_get_link_errors link_errs;
+ struct atto_csmi_smp_passthru smp_pass_thru;
+ struct atto_csmi_ssp_passthru ssp_pass_thru;
+ struct atto_csmi_stp_passthru stp_pass_thru;
+ struct atto_csmi_task_mgmt tsk_mgt;
+ struct atto_csmi_get_sata_sig sata_sig;
+ struct atto_csmi_get_scsi_addr scsi_addr;
+ struct atto_csmi_get_dev_addr dev_addr;
+ struct atto_csmi_get_conn_info conn_info[32];
+ struct atto_csmi_phy_ctrl phy_ctrl;
+};
+
+struct atto_csmi {
+ u32 control_code;
+ u32 status;
+ union atto_ioctl_csmi data;
+};
+
+struct atto_module_info {
+ void *adapter;
+ void *pci_dev;
+ void *scsi_host;
+ unsigned short host_no;
+ union {
+ struct {
+ u64 node_name;
+ u64 port_name;
+ };
+ u64 sas_addr;
+ };
+};
+
+#define ATTO_FUNC_GET_ADAP_INFO 0x00
+#define ATTO_VER_GET_ADAP_INFO0 0
+#define ATTO_VER_GET_ADAP_INFO ATTO_VER_GET_ADAP_INFO0
+
+struct __packed atto_hba_get_adapter_info {
+
+ struct {
+ u16 vendor_id;
+ u16 device_id;
+ u16 ss_vendor_id;
+ u16 ss_device_id;
+ u8 class_code[3];
+ u8 rev_id;
+ u8 bus_num;
+ u8 dev_num;
+ u8 func_num;
+ u8 link_width_max;
+ u8 link_width_curr;
+ #define ATTO_GAI_PCILW_UNKNOWN 0x00
+
+ u8 link_speed_max;
+ u8 link_speed_curr;
+ #define ATTO_GAI_PCILS_UNKNOWN 0x00
+ #define ATTO_GAI_PCILS_GEN1 0x01
+ #define ATTO_GAI_PCILS_GEN2 0x02
+ #define ATTO_GAI_PCILS_GEN3 0x03
+
+ u8 interrupt_mode;
+ #define ATTO_GAI_PCIIM_UNKNOWN 0x00
+ #define ATTO_GAI_PCIIM_LEGACY 0x01
+ #define ATTO_GAI_PCIIM_MSI 0x02
+ #define ATTO_GAI_PCIIM_MSIX 0x03
+
+ u8 msi_vector_cnt;
+ u8 reserved[19];
+ } pci;
+
+ u8 adap_type;
+ #define ATTO_GAI_AT_EPCIU320 0x00
+ #define ATTO_GAI_AT_ESASRAID 0x01
+ #define ATTO_GAI_AT_ESASRAID2 0x02
+ #define ATTO_GAI_AT_ESASHBA 0x03
+ #define ATTO_GAI_AT_ESASHBA2 0x04
+ #define ATTO_GAI_AT_CELERITY 0x05
+ #define ATTO_GAI_AT_CELERITY8 0x06
+ #define ATTO_GAI_AT_FASTFRAME 0x07
+ #define ATTO_GAI_AT_ESASHBA3 0x08
+ #define ATTO_GAI_AT_CELERITY16 0x09
+ #define ATTO_GAI_AT_TLSASHBA 0x0A
+ #define ATTO_GAI_AT_ESASHBA4 0x0B
+
+ u8 adap_flags;
+ #define ATTO_GAI_AF_DEGRADED 0x01
+ #define ATTO_GAI_AF_SPT_SUPP 0x02
+ #define ATTO_GAI_AF_DEVADDR_SUPP 0x04
+ #define ATTO_GAI_AF_PHYCTRL_SUPP 0x08
+ #define ATTO_GAI_AF_TEST_SUPP 0x10
+ #define ATTO_GAI_AF_DIAG_SUPP 0x20
+ #define ATTO_GAI_AF_VIRT_SES 0x40
+ #define ATTO_GAI_AF_CONN_CTRL 0x80
+
+ u8 num_ports;
+ u8 num_phys;
+ u8 drvr_rev_major;
+ u8 drvr_rev_minor;
+ u8 drvr_revsub_minor;
+ u8 drvr_rev_build;
+ char drvr_rev_ascii[16];
+ char drvr_name[32];
+ char firmware_rev[16];
+ char flash_rev[16];
+ char model_name_short[16];
+ char model_name[32];
+ u32 num_targets;
+ u32 num_targsper_bus;
+ u32 num_lunsper_targ;
+ u8 num_busses;
+ u8 num_connectors;
+ u8 adap_flags2;
+ #define ATTO_GAI_AF2_FCOE_SUPP 0x01
+ #define ATTO_GAI_AF2_NIC_SUPP 0x02
+ #define ATTO_GAI_AF2_LOCATE_SUPP 0x04
+ #define ATTO_GAI_AF2_ADAP_CTRL_SUPP 0x08
+ #define ATTO_GAI_AF2_DEV_INFO_SUPP 0x10
+ #define ATTO_GAI_AF2_NPIV_SUPP 0x20
+ #define ATTO_GAI_AF2_MP_SUPP 0x40
+
+ u8 num_temp_sensors;
+ u32 num_targets_backend;
+ u32 tunnel_flags;
+ #define ATTO_GAI_TF_MEM_RW 0x00000001
+ #define ATTO_GAI_TF_TRACE 0x00000002
+ #define ATTO_GAI_TF_SCSI_PASS_THRU 0x00000004
+ #define ATTO_GAI_TF_GET_DEV_ADDR 0x00000008
+ #define ATTO_GAI_TF_PHY_CTRL 0x00000010
+ #define ATTO_GAI_TF_CONN_CTRL 0x00000020
+ #define ATTO_GAI_TF_GET_DEV_INFO 0x00000040
+
+ u8 reserved3[0x138];
+};
+
+#define ATTO_FUNC_GET_ADAP_ADDR 0x01
+#define ATTO_VER_GET_ADAP_ADDR0 0
+#define ATTO_VER_GET_ADAP_ADDR ATTO_VER_GET_ADAP_ADDR0
+
+struct __packed atto_hba_get_adapter_address {
+
+ u8 addr_type;
+ #define ATTO_GAA_AT_PORT 0x00
+ #define ATTO_GAA_AT_NODE 0x01
+ #define ATTO_GAA_AT_CURR_MAC 0x02
+ #define ATTO_GAA_AT_PERM_MAC 0x03
+ #define ATTO_GAA_AT_VNIC 0x04
+
+ u8 port_id;
+ u16 addr_len;
+ u8 address[256];
+};
+
+#define ATTO_FUNC_MEM_RW 0x02
+#define ATTO_VER_MEM_RW0 0
+#define ATTO_VER_MEM_RW ATTO_VER_MEM_RW0
+
+struct __packed atto_hba_memory_read_write {
+ u8 mem_func;
+ u8 mem_type;
+ union {
+ u8 pci_index;
+ u8 i2c_dev;
+ };
+ u8 i2c_status;
+ u32 length;
+ u64 address;
+ u8 reserved[48];
+
+};
+
+#define ATTO_FUNC_TRACE 0x03
+#define ATTO_VER_TRACE0 0
+#define ATTO_VER_TRACE1 1
+#define ATTO_VER_TRACE ATTO_VER_TRACE1
+
+struct __packed atto_hba_trace {
+ u8 trace_func;
+ #define ATTO_TRC_TF_GET_INFO 0x00
+ #define ATTO_TRC_TF_ENABLE 0x01
+ #define ATTO_TRC_TF_DISABLE 0x02
+ #define ATTO_TRC_TF_SET_MASK 0x03
+ #define ATTO_TRC_TF_UPLOAD 0x04
+ #define ATTO_TRC_TF_RESET 0x05
+
+ u8 trace_type;
+ #define ATTO_TRC_TT_DRIVER 0x00
+ #define ATTO_TRC_TT_FWCOREDUMP 0x01
+
+ u8 reserved[2];
+ u32 current_offset;
+ u32 total_length;
+ u32 trace_mask;
+ u8 reserved2[48];
+};
+
+#define ATTO_FUNC_SCSI_PASS_THRU 0x04
+#define ATTO_VER_SCSI_PASS_THRU0 0
+#define ATTO_VER_SCSI_PASS_THRU ATTO_VER_SCSI_PASS_THRU0
+
+struct __packed atto_hba_scsi_pass_thru {
+ u8 cdb[32];
+ u8 cdb_length;
+ u8 req_status;
+ #define ATTO_SPT_RS_SUCCESS 0x00
+ #define ATTO_SPT_RS_FAILED 0x01
+ #define ATTO_SPT_RS_OVERRUN 0x02
+ #define ATTO_SPT_RS_UNDERRUN 0x03
+ #define ATTO_SPT_RS_NO_DEVICE 0x04
+ #define ATTO_SPT_RS_NO_LUN 0x05
+ #define ATTO_SPT_RS_TIMEOUT 0x06
+ #define ATTO_SPT_RS_BUS_RESET 0x07
+ #define ATTO_SPT_RS_ABORTED 0x08
+ #define ATTO_SPT_RS_BUSY 0x09
+ #define ATTO_SPT_RS_DEGRADED 0x0A
+
+ u8 scsi_status;
+ u8 sense_length;
+ u32 flags;
+ #define ATTO_SPTF_DATA_IN 0x00000001
+ #define ATTO_SPTF_DATA_OUT 0x00000002
+ #define ATTO_SPTF_SIMPLE_Q 0x00000004
+ #define ATTO_SPTF_HEAD_OF_Q 0x00000008
+ #define ATTO_SPTF_ORDERED_Q 0x00000010
+
+ u32 timeout;
+ u32 target_id;
+ u8 lun[8];
+ u32 residual_length;
+ u8 sense_data[0xFC];
+ u8 reserved[0x28];
+};
+
+#define ATTO_FUNC_GET_DEV_ADDR 0x05
+#define ATTO_VER_GET_DEV_ADDR0 0
+#define ATTO_VER_GET_DEV_ADDR ATTO_VER_GET_DEV_ADDR0
+
+struct __packed atto_hba_get_device_address {
+ u8 addr_type;
+ #define ATTO_GDA_AT_PORT 0x00
+ #define ATTO_GDA_AT_NODE 0x01
+ #define ATTO_GDA_AT_MAC 0x02
+ #define ATTO_GDA_AT_PORTID 0x03
+ #define ATTO_GDA_AT_UNIQUE 0x04
+
+ u8 reserved;
+ u16 addr_len;
+ u32 target_id;
+ u8 address[256];
+};
+
+/* The following functions are supported by firmware but do not have any
+ * associated driver structures
+ */
+#define ATTO_FUNC_PHY_CTRL 0x06
+#define ATTO_FUNC_CONN_CTRL 0x0C
+#define ATTO_FUNC_ADAP_CTRL 0x0E
+#define ATTO_VER_ADAP_CTRL0 0
+#define ATTO_VER_ADAP_CTRL ATTO_VER_ADAP_CTRL0
+
+struct __packed atto_hba_adap_ctrl {
+ u8 adap_func;
+ #define ATTO_AC_AF_HARD_RST 0x00
+ #define ATTO_AC_AF_GET_STATE 0x01
+ #define ATTO_AC_AF_GET_TEMP 0x02
+
+ u8 adap_state;
+ #define ATTO_AC_AS_UNKNOWN 0x00
+ #define ATTO_AC_AS_OK 0x01
+ #define ATTO_AC_AS_RST_SCHED 0x02
+ #define ATTO_AC_AS_RST_IN_PROG 0x03
+ #define ATTO_AC_AS_RST_DISC 0x04
+ #define ATTO_AC_AS_DEGRADED 0x05
+ #define ATTO_AC_AS_DISABLED 0x06
+ #define ATTO_AC_AS_TEMP 0x07
+
+ u8 reserved[2];
+
+ union {
+ struct {
+ u8 temp_sensor;
+ u8 temp_state;
+
+ #define ATTO_AC_TS_UNSUPP 0x00
+ #define ATTO_AC_TS_UNKNOWN 0x01
+ #define ATTO_AC_TS_INIT_FAILED 0x02
+ #define ATTO_AC_TS_NORMAL 0x03
+ #define ATTO_AC_TS_OUT_OF_RANGE 0x04
+ #define ATTO_AC_TS_FAULT 0x05
+
+ signed short temp_value;
+ signed short temp_lower_lim;
+ signed short temp_upper_lim;
+ char temp_desc[32];
+ u8 reserved2[20];
+ };
+ };
+};
+
+#define ATTO_FUNC_GET_DEV_INFO 0x0F
+#define ATTO_VER_GET_DEV_INFO0 0
+#define ATTO_VER_GET_DEV_INFO ATTO_VER_GET_DEV_INFO0
+
+struct __packed atto_hba_sas_device_info {
+
+ #define ATTO_SDI_MAX_PHYS_WIDE_PORT 16
+
+ u8 phy_id[ATTO_SDI_MAX_PHYS_WIDE_PORT]; /* IDs of parent exp/adapt */
+ #define ATTO_SDI_PHY_ID_INV ATTO_SAS_PHY_ID_INV
+ u32 exp_target_id;
+ u32 sas_port_mask;
+ u8 sas_level;
+ #define ATTO_SDI_SAS_LVL_INV 0xFF
+
+ u8 slot_num;
+ #define ATTO_SDI_SLOT_NUM_INV ATTO_SLOT_NUM_INV
+
+ u8 dev_type;
+ #define ATTO_SDI_DT_END_DEVICE 0
+ #define ATTO_SDI_DT_EXPANDER 1
+ #define ATTO_SDI_DT_PORT_MULT 2
+
+ u8 ini_flags;
+ u8 tgt_flags;
+ u8 link_rate; /* SMP_RATE_XXX */
+ u8 loc_flags;
+ #define ATTO_SDI_LF_DIRECT 0x01
+ #define ATTO_SDI_LF_EXPANDER 0x02
+ #define ATTO_SDI_LF_PORT_MULT 0x04
+ u8 pm_port;
+ u8 reserved[0x60];
+};
+
+union atto_hba_device_info {
+ struct atto_hba_sas_device_info sas_dev_info;
+};
+
+struct __packed atto_hba_get_device_info {
+ u32 target_id;
+ u8 info_type;
+ #define ATTO_GDI_IT_UNKNOWN 0x00
+ #define ATTO_GDI_IT_SAS 0x01
+ #define ATTO_GDI_IT_FC 0x02
+ #define ATTO_GDI_IT_FCOE 0x03
+
+ u8 reserved[11];
+ union atto_hba_device_info dev_info;
+};
+
+struct atto_ioctl {
+ u8 version;
+ u8 function; /* ATTO_FUNC_XXX */
+ u8 status;
+#define ATTO_STS_SUCCESS 0x00
+#define ATTO_STS_FAILED 0x01
+#define ATTO_STS_INV_VERSION 0x02
+#define ATTO_STS_OUT_OF_RSRC 0x03
+#define ATTO_STS_INV_FUNC 0x04
+#define ATTO_STS_UNSUPPORTED 0x05
+#define ATTO_STS_INV_ADAPTER 0x06
+#define ATTO_STS_INV_DRVR_VER 0x07
+#define ATTO_STS_INV_PARAM 0x08
+#define ATTO_STS_TIMEOUT 0x09
+#define ATTO_STS_NOT_APPL 0x0A
+#define ATTO_STS_DEGRADED 0x0B
+
+ u8 flags;
+ #define HBAF_TUNNEL 0x01
+
+ u32 data_length;
+ u8 reserved2[56];
+
+ union {
+ u8 byte[1];
+ struct atto_hba_get_adapter_info get_adap_info;
+ struct atto_hba_get_adapter_address get_adap_addr;
+ struct atto_hba_scsi_pass_thru scsi_pass_thru;
+ struct atto_hba_get_device_address get_dev_addr;
+ struct atto_hba_adap_ctrl adap_ctrl;
+ struct atto_hba_get_device_info get_dev_info;
+ struct atto_hba_trace trace;
+ } data;
+
+};
+
+struct __packed atto_ioctl_vda_scsi_cmd {
+
+ #define ATTO_VDA_SCSI_VER0 0
+ #define ATTO_VDA_SCSI_VER ATTO_VDA_SCSI_VER0
+
+ u8 cdb[16];
+ u32 flags;
+ u32 data_length;
+ u32 residual_length;
+ u16 target_id;
+ u8 sense_len;
+ u8 scsi_stat;
+ u8 reserved[8];
+ u8 sense_data[80];
+};
+
+struct __packed atto_ioctl_vda_flash_cmd {
+
+ #define ATTO_VDA_FLASH_VER0 0
+ #define ATTO_VDA_FLASH_VER ATTO_VDA_FLASH_VER0
+
+ u32 flash_addr;
+ u32 data_length;
+ u8 sub_func;
+ u8 reserved[15];
+
+ union {
+ struct {
+ u32 flash_size;
+ u32 page_size;
+ u8 prod_info[32];
+ } info;
+
+ struct {
+ char file_name[16]; /* 8.3 fname, NULL term, wc=* */
+ u32 file_size;
+ } file;
+ } data;
+
+};
+
+struct __packed atto_ioctl_vda_diag_cmd {
+
+ #define ATTO_VDA_DIAG_VER0 0
+ #define ATTO_VDA_DIAG_VER ATTO_VDA_DIAG_VER0
+
+ u64 local_addr;
+ u32 data_length;
+ u8 sub_func;
+ u8 flags;
+ u8 reserved[3];
+};
+
+struct __packed atto_ioctl_vda_cli_cmd {
+
+ #define ATTO_VDA_CLI_VER0 0
+ #define ATTO_VDA_CLI_VER ATTO_VDA_CLI_VER0
+
+ u32 cmd_rsp_len;
+};
+
+struct __packed atto_ioctl_vda_smp_cmd {
+
+ #define ATTO_VDA_SMP_VER0 0
+ #define ATTO_VDA_SMP_VER ATTO_VDA_SMP_VER0
+
+ u64 dest;
+ u32 cmd_rsp_len;
+};
+
+struct __packed atto_ioctl_vda_cfg_cmd {
+
+ #define ATTO_VDA_CFG_VER0 0
+ #define ATTO_VDA_CFG_VER ATTO_VDA_CFG_VER0
+
+ u32 data_length;
+ u8 cfg_func;
+ u8 reserved[11];
+
+ union {
+ u8 bytes[112];
+ struct atto_vda_cfg_init init;
+ } data;
+
+};
+
+struct __packed atto_ioctl_vda_mgt_cmd {
+
+ #define ATTO_VDA_MGT_VER0 0
+ #define ATTO_VDA_MGT_VER ATTO_VDA_MGT_VER0
+
+ u8 mgt_func;
+ u8 scan_generation;
+ u16 dev_index;
+ u32 data_length;
+ u8 reserved[8];
+ union {
+ u8 bytes[112];
+ struct atto_vda_devinfo dev_info;
+ struct atto_vda_grp_info grp_info;
+ struct atto_vdapart_info part_info;
+ struct atto_vda_dh_info dh_info;
+ struct atto_vda_metrics_info metrics_info;
+ struct atto_vda_schedule_info sched_info;
+ struct atto_vda_n_vcache_info nvcache_info;
+ struct atto_vda_buzzer_info buzzer_info;
+ struct atto_vda_adapter_info adapter_info;
+ struct atto_vda_temp_info temp_info;
+ struct atto_vda_fan_info fan_info;
+ } data;
+};
+
+struct __packed atto_ioctl_vda_gsv_cmd {
+
+ #define ATTO_VDA_GSV_VER0 0
+ #define ATTO_VDA_GSV_VER ATTO_VDA_GSV_VER0
+
+ u8 rsp_len;
+ u8 reserved[7];
+ u8 version_info[1];
+ #define ATTO_VDA_VER_UNSUPPORTED 0xFF
+
+};
+
+struct __packed atto_ioctl_vda {
+ u8 version;
+ u8 function; /* VDA_FUNC_XXXX */
+ u8 status; /* ATTO_STS_XXX */
+ u8 vda_status; /* RS_XXX (if status == ATTO_STS_SUCCESS) */
+ u32 data_length;
+ u8 reserved[8];
+
+ union {
+ struct atto_ioctl_vda_scsi_cmd scsi;
+ struct atto_ioctl_vda_flash_cmd flash;
+ struct atto_ioctl_vda_diag_cmd diag;
+ struct atto_ioctl_vda_cli_cmd cli;
+ struct atto_ioctl_vda_smp_cmd smp;
+ struct atto_ioctl_vda_cfg_cmd cfg;
+ struct atto_ioctl_vda_mgt_cmd mgt;
+ struct atto_ioctl_vda_gsv_cmd gsv;
+ u8 cmd_info[256];
+ } cmd;
+
+ union {
+ u8 data[1];
+ struct atto_vda_devinfo2 dev_info2;
+ } data;
+
+};
+
+struct __packed atto_ioctl_smp {
+ u8 version;
+ #define ATTO_SMP_VERSION0 0
+ #define ATTO_SMP_VERSION1 1
+ #define ATTO_SMP_VERSION2 2
+ #define ATTO_SMP_VERSION ATTO_SMP_VERSION2
+
+ u8 function;
+#define ATTO_SMP_FUNC_DISC_SMP 0x00
+#define ATTO_SMP_FUNC_DISC_TARG 0x01
+#define ATTO_SMP_FUNC_SEND_CMD 0x02
+#define ATTO_SMP_FUNC_DISC_TARG_DIRECT 0x03
+#define ATTO_SMP_FUNC_SEND_CMD_DIRECT 0x04
+#define ATTO_SMP_FUNC_DISC_SMP_DIRECT 0x05
+
+ u8 status; /* ATTO_STS_XXX */
+ u8 smp_status; /* if status == ATTO_STS_SUCCESS */
+ #define ATTO_SMP_STS_SUCCESS 0x00
+ #define ATTO_SMP_STS_FAILURE 0x01
+ #define ATTO_SMP_STS_RESCAN 0x02
+ #define ATTO_SMP_STS_NOT_FOUND 0x03
+
+ u16 target_id;
+ u8 phy_id;
+ u8 dev_index;
+ u64 smp_sas_addr;
+ u64 targ_sas_addr;
+ u32 req_length;
+ u32 rsp_length;
+ u8 flags;
+ #define ATTO_SMPF_ROOT_EXP 0x01 /* expander direct attached */
+
+ u8 reserved[31];
+
+ union {
+ u8 byte[1];
+ u32 dword[1];
+ } data;
+
+};
+
+struct __packed atto_express_ioctl {
+ struct atto_express_ioctl_header header;
+
+ union {
+ struct atto_firmware_rw_request fwrw;
+ struct atto_param_rw_request prw;
+ struct atto_channel_list chanlist;
+ struct atto_channel_info chaninfo;
+ struct atto_ioctl ioctl_hba;
+ struct atto_module_info modinfo;
+ struct atto_ioctl_vda ioctl_vda;
+ struct atto_ioctl_smp ioctl_smp;
+ struct atto_csmi csmi;
+
+ } data;
+};
+
+/* The struct associated with the code is listed after the definition */
+#define EXPRESS_IOCTL_MIN 0x4500
+#define EXPRESS_IOCTL_RW_FIRMWARE 0x4500 /* FIRMWARERW */
+#define EXPRESS_IOCTL_READ_PARAMS 0x4501 /* PARAMRW */
+#define EXPRESS_IOCTL_WRITE_PARAMS 0x4502 /* PARAMRW */
+#define EXPRESS_IOCTL_FC_API 0x4503 /* internal */
+#define EXPRESS_IOCTL_GET_CHANNELS 0x4504 /* CHANNELLIST */
+#define EXPRESS_IOCTL_CHAN_INFO 0x4505 /* CHANNELINFO */
+#define EXPRESS_IOCTL_DEFAULT_PARAMS 0x4506 /* PARAMRW */
+#define EXPRESS_ADDR_MEMORY 0x4507 /* MEMADDR */
+#define EXPRESS_RW_MEMORY 0x4508 /* MEMRW */
+#define EXPRESS_TSDK_DUMP 0x4509 /* TSDKDUMP */
+#define EXPRESS_IOCTL_SMP 0x450A /* IOCTL_SMP */
+#define EXPRESS_CSMI 0x450B /* CSMI */
+#define EXPRESS_IOCTL_HBA 0x450C /* IOCTL_HBA */
+#define EXPRESS_IOCTL_VDA 0x450D /* IOCTL_VDA */
+#define EXPRESS_IOCTL_GET_ID 0x450E /* GET_ID */
+#define EXPRESS_IOCTL_GET_MOD_INFO 0x450F /* MODULE_INFO */
+#define EXPRESS_IOCTL_MAX 0x450F
+
+#endif
diff --git a/drivers/scsi/esas2r/atvda.h b/drivers/scsi/esas2r/atvda.h
new file mode 100644
index 000000000..5fc1f991d
--- /dev/null
+++ b/drivers/scsi/esas2r/atvda.h
@@ -0,0 +1,1319 @@
+/* linux/drivers/scsi/esas2r/atvda.h
+ * ATTO VDA interface definitions
+ *
+ * Copyright (c) 2001-2013 ATTO Technology, Inc.
+ * (mailto:linuxdrivers@attotech.com)
+ */
+/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
+/*
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * NO WARRANTY
+ * THE PROGRAM IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OR
+ * CONDITIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED INCLUDING, WITHOUT
+ * LIMITATION, ANY WARRANTIES OR CONDITIONS OF TITLE, NON-INFRINGEMENT,
+ * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE. Each Recipient is
+ * solely responsible for determining the appropriateness of using and
+ * distributing the Program and assumes all risks associated with its
+ * exercise of rights under this Agreement, including but not limited to
+ * the risks and costs of program errors, damage to or loss of data,
+ * programs or equipment, and unavailability or interruption of operations.
+ *
+ * DISCLAIMER OF LIABILITY
+ * NEITHER RECIPIENT NOR ANY CONTRIBUTORS SHALL HAVE ANY LIABILITY FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING WITHOUT LIMITATION LOST PROFITS), HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR
+ * TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
+ * USE OR DISTRIBUTION OF THE PROGRAM OR THE EXERCISE OF ANY RIGHTS GRANTED
+ * HEREUNDER, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGES
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
+
+
+#ifndef ATVDA_H
+#define ATVDA_H
+
+struct __packed atto_dev_addr {
+ u64 dev_port;
+ u64 hba_port;
+ u8 lun;
+ u8 flags;
+ #define VDA_DEVADDRF_SATA 0x01
+ #define VDA_DEVADDRF_SSD 0x02
+ u8 link_speed; /* VDALINKSPEED_xxx */
+ u8 pad[1];
+};
+
+/* dev_addr2 was added for 64-bit alignment */
+
+struct __packed atto_dev_addr2 {
+ u64 dev_port;
+ u64 hba_port;
+ u8 lun;
+ u8 flags;
+ u8 link_speed;
+ u8 pad[5];
+};
+
+struct __packed atto_vda_sge {
+ u32 length;
+ u64 address;
+};
+
+
+/* VDA request function codes */
+
+#define VDA_FUNC_SCSI 0x00
+#define VDA_FUNC_FLASH 0x01
+#define VDA_FUNC_DIAG 0x02
+#define VDA_FUNC_AE 0x03
+#define VDA_FUNC_CLI 0x04
+#define VDA_FUNC_IOCTL 0x05
+#define VDA_FUNC_CFG 0x06
+#define VDA_FUNC_MGT 0x07
+#define VDA_FUNC_GSV 0x08
+
+
+/* VDA request status values. for host driver considerations, values for
+ * SCSI requests start at zero. other requests may use these values as well. */
+
+#define RS_SUCCESS 0x00 /*! successful completion */
+#define RS_INV_FUNC 0x01 /*! invalid command function */
+#define RS_BUSY 0x02 /*! insufficient resources */
+#define RS_SEL 0x03 /*! no target at target_id */
+#define RS_NO_LUN 0x04 /*! invalid LUN */
+#define RS_TIMEOUT 0x05 /*! request timeout */
+#define RS_OVERRUN 0x06 /*! data overrun */
+#define RS_UNDERRUN 0x07 /*! data underrun */
+#define RS_SCSI_ERROR 0x08 /*! SCSI error occurred */
+#define RS_ABORTED 0x0A /*! command aborted */
+#define RS_RESID_MISM 0x0B /*! residual length incorrect */
+#define RS_TM_FAILED 0x0C /*! task management failed */
+#define RS_RESET 0x0D /*! aborted due to bus reset */
+#define RS_ERR_DMA_SG 0x0E /*! error reading SG list */
+#define RS_ERR_DMA_DATA 0x0F /*! error transferring data */
+#define RS_UNSUPPORTED 0x10 /*! unsupported request */
+#define RS_SEL2 0x70 /*! internal generated RS_SEL */
+#define RS_VDA_BASE 0x80 /*! base of VDA-specific errors */
+#define RS_MGT_BASE 0x80 /*! base of VDA management errors */
+#define RS_SCAN_FAIL (RS_MGT_BASE + 0x00)
+#define RS_DEV_INVALID (RS_MGT_BASE + 0x01)
+#define RS_DEV_ASSIGNED (RS_MGT_BASE + 0x02)
+#define RS_DEV_REMOVE (RS_MGT_BASE + 0x03)
+#define RS_DEV_LOST (RS_MGT_BASE + 0x04)
+#define RS_SCAN_GEN (RS_MGT_BASE + 0x05)
+#define RS_GRP_INVALID (RS_MGT_BASE + 0x08)
+#define RS_GRP_EXISTS (RS_MGT_BASE + 0x09)
+#define RS_GRP_LIMIT (RS_MGT_BASE + 0x0A)
+#define RS_GRP_INTLV (RS_MGT_BASE + 0x0B)
+#define RS_GRP_SPAN (RS_MGT_BASE + 0x0C)
+#define RS_GRP_TYPE (RS_MGT_BASE + 0x0D)
+#define RS_GRP_MEMBERS (RS_MGT_BASE + 0x0E)
+#define RS_GRP_COMMIT (RS_MGT_BASE + 0x0F)
+#define RS_GRP_REBUILD (RS_MGT_BASE + 0x10)
+#define RS_GRP_REBUILD_TYPE (RS_MGT_BASE + 0x11)
+#define RS_GRP_BLOCK_SIZE (RS_MGT_BASE + 0x12)
+#define RS_CFG_SAVE (RS_MGT_BASE + 0x14)
+#define RS_PART_LAST (RS_MGT_BASE + 0x18)
+#define RS_ELEM_INVALID (RS_MGT_BASE + 0x19)
+#define RS_PART_MAPPED (RS_MGT_BASE + 0x1A)
+#define RS_PART_TARGET (RS_MGT_BASE + 0x1B)
+#define RS_PART_LUN (RS_MGT_BASE + 0x1C)
+#define RS_PART_DUP (RS_MGT_BASE + 0x1D)
+#define RS_PART_NOMAP (RS_MGT_BASE + 0x1E)
+#define RS_PART_MAX (RS_MGT_BASE + 0x1F)
+#define RS_PART_CAP (RS_MGT_BASE + 0x20)
+#define RS_PART_STATE (RS_MGT_BASE + 0x21)
+#define RS_TEST_IN_PROG (RS_MGT_BASE + 0x22)
+#define RS_METRICS_ERROR (RS_MGT_BASE + 0x23)
+#define RS_HS_ERROR (RS_MGT_BASE + 0x24)
+#define RS_NO_METRICS_TEST (RS_MGT_BASE + 0x25)
+#define RS_BAD_PARAM (RS_MGT_BASE + 0x26)
+#define RS_GRP_MEMBER_SIZE (RS_MGT_BASE + 0x27)
+#define RS_FLS_BASE 0xB0 /*! base of VDA errors */
+#define RS_FLS_ERR_AREA (RS_FLS_BASE + 0x00)
+#define RS_FLS_ERR_BUSY (RS_FLS_BASE + 0x01)
+#define RS_FLS_ERR_RANGE (RS_FLS_BASE + 0x02)
+#define RS_FLS_ERR_BEGIN (RS_FLS_BASE + 0x03)
+#define RS_FLS_ERR_CHECK (RS_FLS_BASE + 0x04)
+#define RS_FLS_ERR_FAIL (RS_FLS_BASE + 0x05)
+#define RS_FLS_ERR_RSRC (RS_FLS_BASE + 0x06)
+#define RS_FLS_ERR_NOFILE (RS_FLS_BASE + 0x07)
+#define RS_FLS_ERR_FSIZE (RS_FLS_BASE + 0x08)
+#define RS_CFG_BASE 0xC0 /*! base of VDA configuration errors */
+#define RS_CFG_ERR_BUSY (RS_CFG_BASE + 0)
+#define RS_CFG_ERR_SGE (RS_CFG_BASE + 1)
+#define RS_CFG_ERR_DATE (RS_CFG_BASE + 2)
+#define RS_CFG_ERR_TIME (RS_CFG_BASE + 3)
+#define RS_DEGRADED 0xFB /*! degraded mode */
+#define RS_CLI_INTERNAL 0xFC /*! VDA CLI internal error */
+#define RS_VDA_INTERNAL 0xFD /*! catch-all */
+#define RS_PENDING 0xFE /*! pending, not started */
+#define RS_STARTED 0xFF /*! started */
+
+
+/* flash request subfunctions. these are used in both the IOCTL and the
+ * driver-firmware interface (VDA_FUNC_FLASH). */
+
+#define VDA_FLASH_BEGINW 0x00
+#define VDA_FLASH_READ 0x01
+#define VDA_FLASH_WRITE 0x02
+#define VDA_FLASH_COMMIT 0x03
+#define VDA_FLASH_CANCEL 0x04
+#define VDA_FLASH_INFO 0x05
+#define VDA_FLASH_FREAD 0x06
+#define VDA_FLASH_FWRITE 0x07
+#define VDA_FLASH_FINFO 0x08
+
+
+/* IOCTL request subfunctions. these identify the payload type for
+ * VDA_FUNC_IOCTL.
+ */
+
+#define VDA_IOCTL_HBA 0x00
+#define VDA_IOCTL_CSMI 0x01
+#define VDA_IOCTL_SMP 0x02
+
+struct __packed atto_vda_devinfo {
+ struct atto_dev_addr dev_addr;
+ u8 vendor_id[8];
+ u8 product_id[16];
+ u8 revision[4];
+ u64 capacity;
+ u32 block_size;
+ u8 dev_type;
+
+ union {
+ u8 dev_status;
+ #define VDADEVSTAT_INVALID 0x00
+ #define VDADEVSTAT_CORRUPT VDADEVSTAT_INVALID
+ #define VDADEVSTAT_ASSIGNED 0x01
+ #define VDADEVSTAT_SPARE 0x02
+ #define VDADEVSTAT_UNAVAIL 0x03
+ #define VDADEVSTAT_PT_MAINT 0x04
+ #define VDADEVSTAT_LCLSPARE 0x05
+ #define VDADEVSTAT_UNUSEABLE 0x06
+ #define VDADEVSTAT_AVAIL 0xFF
+
+ u8 op_ctrl;
+ #define VDA_DEV_OP_CTRL_START 0x01
+ #define VDA_DEV_OP_CTRL_HALT 0x02
+ #define VDA_DEV_OP_CTRL_RESUME 0x03
+ #define VDA_DEV_OP_CTRL_CANCEL 0x04
+ };
+
+ u8 member_state;
+ #define VDAMBRSTATE_ONLINE 0x00
+ #define VDAMBRSTATE_DEGRADED 0x01
+ #define VDAMBRSTATE_UNAVAIL 0x02
+ #define VDAMBRSTATE_FAULTED 0x03
+ #define VDAMBRSTATE_MISREAD 0x04
+ #define VDAMBRSTATE_INCOMPAT 0x05
+
+ u8 operation;
+ #define VDAOP_NONE 0x00
+ #define VDAOP_REBUILD 0x01
+ #define VDAOP_ERASE 0x02
+ #define VDAOP_PATTERN 0x03
+ #define VDAOP_CONVERSION 0x04
+ #define VDAOP_FULL_INIT 0x05
+ #define VDAOP_QUICK_INIT 0x06
+ #define VDAOP_SECT_SCAN 0x07
+ #define VDAOP_SECT_SCAN_PARITY 0x08
+ #define VDAOP_SECT_SCAN_PARITY_FIX 0x09
+ #define VDAOP_RECOV_REBUILD 0x0A
+
+ u8 op_status;
+ #define VDAOPSTAT_OK 0x00
+ #define VDAOPSTAT_FAULTED 0x01
+ #define VDAOPSTAT_HALTED 0x02
+ #define VDAOPSTAT_INT 0x03
+
+ u8 progress; /* 0 - 100% */
+ u16 ses_dev_index;
+ #define VDASESDI_INVALID 0xFFFF
+
+ u8 serial_no[32];
+
+ union {
+ u16 target_id;
+ #define VDATGTID_INVALID 0xFFFF
+
+ u16 features_mask;
+ };
+
+ u16 lun;
+ u16 features;
+ #define VDADEVFEAT_ENC_SERV 0x0001
+ #define VDADEVFEAT_IDENT 0x0002
+ #define VDADEVFEAT_DH_SUPP 0x0004
+ #define VDADEVFEAT_PHYS_ID 0x0008
+
+ u8 ses_element_id;
+ u8 link_speed;
+ #define VDALINKSPEED_UNKNOWN 0x00
+ #define VDALINKSPEED_1GB 0x01
+ #define VDALINKSPEED_1_5GB 0x02
+ #define VDALINKSPEED_2GB 0x03
+ #define VDALINKSPEED_3GB 0x04
+ #define VDALINKSPEED_4GB 0x05
+ #define VDALINKSPEED_6GB 0x06
+ #define VDALINKSPEED_8GB 0x07
+
+ u16 phys_target_id;
+ u8 reserved[2];
+};
+
+
+/*! struct atto_vda_devinfo2 is a replacement for atto_vda_devinfo. it
+ * extends beyond the 0x70 bytes allowed in atto_vda_mgmt_req; therefore,
+ * the entire structure is DMaed between the firmware and host buffer and
+ * the data will always be in little endian format.
+ */
+
+struct __packed atto_vda_devinfo2 {
+ struct atto_dev_addr dev_addr;
+ u8 vendor_id[8];
+ u8 product_id[16];
+ u8 revision[4];
+ u64 capacity;
+ u32 block_size;
+ u8 dev_type;
+ u8 dev_status;
+ u8 member_state;
+ u8 operation;
+ u8 op_status;
+ u8 progress;
+ u16 ses_dev_index;
+ u8 serial_no[32];
+ union {
+ u16 target_id;
+ u16 features_mask;
+ };
+
+ u16 lun;
+ u16 features;
+ u8 ses_element_id;
+ u8 link_speed;
+ u16 phys_target_id;
+ u8 reserved[2];
+
+/* This is where fields specific to struct atto_vda_devinfo2 begin. Note
+ * that the structure version started at one so applications that unionize this
+ * structure with atto_vda_dev_info can differentiate them if desired.
+ */
+
+ u8 version;
+ #define VDADEVINFO_VERSION0 0x00
+ #define VDADEVINFO_VERSION1 0x01
+ #define VDADEVINFO_VERSION2 0x02
+ #define VDADEVINFO_VERSION3 0x03
+ #define VDADEVINFO_VERSION VDADEVINFO_VERSION3
+
+ u8 reserved2[3];
+
+ /* sector scanning fields */
+
+ u32 ss_curr_errors;
+ u64 ss_curr_scanned;
+ u32 ss_curr_recvrd;
+ u32 ss_scan_length;
+ u32 ss_total_errors;
+ u32 ss_total_recvrd;
+ u32 ss_num_scans;
+
+ /* grp_name was added in version 2 of this structure. */
+
+ char grp_name[15];
+ u8 reserved3[4];
+
+ /* dev_addr_list was added in version 3 of this structure. */
+
+ u8 num_dev_addr;
+ struct atto_dev_addr2 dev_addr_list[8];
+};
+
+
+struct __packed atto_vda_grp_info {
+ u8 grp_index;
+ #define VDA_MAX_RAID_GROUPS 32
+
+ char grp_name[15];
+ u64 capacity;
+ u32 block_size;
+ u32 interleave;
+ u8 type;
+ #define VDA_GRP_TYPE_RAID0 0
+ #define VDA_GRP_TYPE_RAID1 1
+ #define VDA_GRP_TYPE_RAID4 4
+ #define VDA_GRP_TYPE_RAID5 5
+ #define VDA_GRP_TYPE_RAID6 6
+ #define VDA_GRP_TYPE_RAID10 10
+ #define VDA_GRP_TYPE_RAID40 40
+ #define VDA_GRP_TYPE_RAID50 50
+ #define VDA_GRP_TYPE_RAID60 60
+ #define VDA_GRP_TYPE_DVRAID_HS 252
+ #define VDA_GRP_TYPE_DVRAID_NOHS 253
+ #define VDA_GRP_TYPE_JBOD 254
+ #define VDA_GRP_TYPE_SPARE 255
+
+ union {
+ u8 status;
+ #define VDA_GRP_STAT_INVALID 0x00
+ #define VDA_GRP_STAT_NEW 0x01
+ #define VDA_GRP_STAT_WAITING 0x02
+ #define VDA_GRP_STAT_ONLINE 0x03
+ #define VDA_GRP_STAT_DEGRADED 0x04
+ #define VDA_GRP_STAT_OFFLINE 0x05
+ #define VDA_GRP_STAT_DELETED 0x06
+ #define VDA_GRP_STAT_RECOV_BASIC 0x07
+ #define VDA_GRP_STAT_RECOV_EXTREME 0x08
+
+ u8 op_ctrl;
+ #define VDA_GRP_OP_CTRL_START 0x01
+ #define VDA_GRP_OP_CTRL_HALT 0x02
+ #define VDA_GRP_OP_CTRL_RESUME 0x03
+ #define VDA_GRP_OP_CTRL_CANCEL 0x04
+ };
+
+ u8 rebuild_state;
+ #define VDA_RBLD_NONE 0x00
+ #define VDA_RBLD_REBUILD 0x01
+ #define VDA_RBLD_ERASE 0x02
+ #define VDA_RBLD_PATTERN 0x03
+ #define VDA_RBLD_CONV 0x04
+ #define VDA_RBLD_FULL_INIT 0x05
+ #define VDA_RBLD_QUICK_INIT 0x06
+ #define VDA_RBLD_SECT_SCAN 0x07
+ #define VDA_RBLD_SECT_SCAN_PARITY 0x08
+ #define VDA_RBLD_SECT_SCAN_PARITY_FIX 0x09
+ #define VDA_RBLD_RECOV_REBUILD 0x0A
+ #define VDA_RBLD_RECOV_BASIC 0x0B
+ #define VDA_RBLD_RECOV_EXTREME 0x0C
+
+ u8 span_depth;
+ u8 progress;
+ u8 mirror_width;
+ u8 stripe_width;
+ u8 member_cnt;
+
+ union {
+ u16 members[32];
+ #define VDA_MEMBER_MISSING 0xFFFF
+ #define VDA_MEMBER_NEW 0xFFFE
+ u16 features_mask;
+ };
+
+ u16 features;
+ #define VDA_GRP_FEAT_HOTSWAP 0x0001
+ #define VDA_GRP_FEAT_SPDRD_MASK 0x0006
+ #define VDA_GRP_FEAT_SPDRD_DIS 0x0000
+ #define VDA_GRP_FEAT_SPDRD_ENB 0x0002
+ #define VDA_GRP_FEAT_SPDRD_AUTO 0x0004
+ #define VDA_GRP_FEAT_IDENT 0x0008
+ #define VDA_GRP_FEAT_RBLDPRI_MASK 0x0030
+ #define VDA_GRP_FEAT_RBLDPRI_LOW 0x0010
+ #define VDA_GRP_FEAT_RBLDPRI_SAME 0x0020
+ #define VDA_GRP_FEAT_RBLDPRI_HIGH 0x0030
+ #define VDA_GRP_FEAT_WRITE_CACHE 0x0040
+ #define VDA_GRP_FEAT_RBLD_RESUME 0x0080
+ #define VDA_GRP_FEAT_SECT_RESUME 0x0100
+ #define VDA_GRP_FEAT_INIT_RESUME 0x0200
+ #define VDA_GRP_FEAT_SSD 0x0400
+ #define VDA_GRP_FEAT_BOOT_DEV 0x0800
+
+ /*
+ * for backward compatibility, a prefetch value of zero means the
+ * setting is ignored/unsupported. therefore, the firmware supported
+ * 0-6 values are incremented to 1-7.
+ */
+
+ u8 prefetch;
+ u8 op_status;
+ #define VDAGRPOPSTAT_MASK 0x0F
+ #define VDAGRPOPSTAT_INVALID 0x00
+ #define VDAGRPOPSTAT_OK 0x01
+ #define VDAGRPOPSTAT_FAULTED 0x02
+ #define VDAGRPOPSTAT_HALTED 0x03
+ #define VDAGRPOPSTAT_INT 0x04
+ #define VDAGRPOPPROC_MASK 0xF0
+ #define VDAGRPOPPROC_STARTABLE 0x10
+ #define VDAGRPOPPROC_CANCELABLE 0x20
+ #define VDAGRPOPPROC_RESUMABLE 0x40
+ #define VDAGRPOPPROC_HALTABLE 0x80
+ u8 over_provision;
+ u8 reserved[3];
+
+};
+
+
+struct __packed atto_vdapart_info {
+ u8 part_no;
+ #define VDA_MAX_PARTITIONS 128
+
+ char grp_name[15];
+ u64 part_size;
+ u64 start_lba;
+ u32 block_size;
+ u16 target_id;
+ u8 LUN;
+ char serial_no[41];
+ u8 features;
+ #define VDAPI_FEAT_WRITE_CACHE 0x01
+
+ u8 reserved[7];
+};
+
+
+struct __packed atto_vda_dh_info {
+ u8 req_type;
+ #define VDADH_RQTYPE_CACHE 0x01
+ #define VDADH_RQTYPE_FETCH 0x02
+ #define VDADH_RQTYPE_SET_STAT 0x03
+ #define VDADH_RQTYPE_GET_STAT 0x04
+
+ u8 req_qual;
+ #define VDADH_RQQUAL_SMART 0x01
+ #define VDADH_RQQUAL_MEDDEF 0x02
+ #define VDADH_RQQUAL_INFOEXC 0x04
+
+ u8 num_smart_attribs;
+ u8 status;
+ #define VDADH_STAT_DISABLE 0x00
+ #define VDADH_STAT_ENABLE 0x01
+
+ u32 med_defect_cnt;
+ u32 info_exc_cnt;
+ u8 smart_status;
+ #define VDADH_SMARTSTAT_OK 0x00
+ #define VDADH_SMARTSTAT_ERR 0x01
+
+ u8 reserved[35];
+ struct atto_vda_sge sge[1];
+};
+
+
+struct __packed atto_vda_dh_smart {
+ u8 attrib_id;
+ u8 current_val;
+ u8 worst;
+ u8 threshold;
+ u8 raw_data[6];
+ u8 raw_attrib_status;
+ #define VDADHSM_RAWSTAT_PREFAIL_WARRANTY 0x01
+ #define VDADHSM_RAWSTAT_ONLINE_COLLECTION 0x02
+ #define VDADHSM_RAWSTAT_PERFORMANCE_ATTR 0x04
+ #define VDADHSM_RAWSTAT_ERROR_RATE_ATTR 0x08
+ #define VDADHSM_RAWSTAT_EVENT_COUNT_ATTR 0x10
+ #define VDADHSM_RAWSTAT_SELF_PRESERVING_ATTR 0x20
+
+ u8 calc_attrib_status;
+ #define VDADHSM_CALCSTAT_UNKNOWN 0x00
+ #define VDADHSM_CALCSTAT_GOOD 0x01
+ #define VDADHSM_CALCSTAT_PREFAIL 0x02
+ #define VDADHSM_CALCSTAT_OLDAGE 0x03
+
+ u8 reserved[4];
+};
+
+
+struct __packed atto_vda_metrics_info {
+ u8 data_version;
+ #define VDAMET_VERSION0 0x00
+ #define VDAMET_VERSION VDAMET_VERSION0
+
+ u8 metrics_action;
+ #define VDAMET_METACT_NONE 0x00
+ #define VDAMET_METACT_START 0x01
+ #define VDAMET_METACT_STOP 0x02
+ #define VDAMET_METACT_RETRIEVE 0x03
+ #define VDAMET_METACT_CLEAR 0x04
+
+ u8 test_action;
+ #define VDAMET_TSTACT_NONE 0x00
+ #define VDAMET_TSTACT_STRT_INIT 0x01
+ #define VDAMET_TSTACT_STRT_READ 0x02
+ #define VDAMET_TSTACT_STRT_VERIFY 0x03
+ #define VDAMET_TSTACT_STRT_INIT_VERIFY 0x04
+ #define VDAMET_TSTACT_STOP 0x05
+
+ u8 num_dev_indexes;
+ #define VDAMET_ALL_DEVICES 0xFF
+
+ u16 dev_indexes[32];
+ u8 reserved[12];
+ struct atto_vda_sge sge[1];
+};
+
+
+struct __packed atto_vda_metrics_data {
+ u16 dev_index;
+ u16 length;
+ #define VDAMD_LEN_LAST 0x8000
+ #define VDAMD_LEN_MASK 0x0FFF
+
+ u32 flags;
+ #define VDAMDF_RUN 0x00000007
+ #define VDAMDF_RUN_READ 0x00000001
+ #define VDAMDF_RUN_WRITE 0x00000002
+ #define VDAMDF_RUN_ALL 0x00000004
+ #define VDAMDF_READ 0x00000010
+ #define VDAMDF_WRITE 0x00000020
+ #define VDAMDF_ALL 0x00000040
+ #define VDAMDF_DRIVETEST 0x40000000
+ #define VDAMDF_NEW 0x80000000
+
+ u64 total_read_data;
+ u64 total_write_data;
+ u64 total_read_io;
+ u64 total_write_io;
+ u64 read_start_time;
+ u64 read_stop_time;
+ u64 write_start_time;
+ u64 write_stop_time;
+ u64 read_maxio_time;
+ u64 wpvdadmetricsdatarite_maxio_time;
+ u64 read_totalio_time;
+ u64 write_totalio_time;
+ u64 read_total_errs;
+ u64 write_total_errs;
+ u64 read_recvrd_errs;
+ u64 write_recvrd_errs;
+ u64 miscompares;
+};
+
+
+struct __packed atto_vda_schedule_info {
+ u8 schedule_type;
+ #define VDASI_SCHTYPE_ONETIME 0x01
+ #define VDASI_SCHTYPE_DAILY 0x02
+ #define VDASI_SCHTYPE_WEEKLY 0x03
+
+ u8 operation;
+ #define VDASI_OP_NONE 0x00
+ #define VDASI_OP_CREATE 0x01
+ #define VDASI_OP_CANCEL 0x02
+
+ u8 hour;
+ u8 minute;
+ u8 day;
+ #define VDASI_DAY_NONE 0x00
+
+ u8 progress;
+ #define VDASI_PROG_NONE 0xFF
+
+ u8 event_type;
+ #define VDASI_EVTTYPE_SECT_SCAN 0x01
+ #define VDASI_EVTTYPE_SECT_SCAN_PARITY 0x02
+ #define VDASI_EVTTYPE_SECT_SCAN_PARITY_FIX 0x03
+
+ u8 recurrences;
+ #define VDASI_RECUR_FOREVER 0x00
+
+ u32 id;
+ #define VDASI_ID_NONE 0x00
+
+ char grp_name[15];
+ u8 reserved[85];
+};
+
+
+struct __packed atto_vda_n_vcache_info {
+ u8 super_cap_status;
+ #define VDANVCI_SUPERCAP_NOT_PRESENT 0x00
+ #define VDANVCI_SUPERCAP_FULLY_CHARGED 0x01
+ #define VDANVCI_SUPERCAP_NOT_CHARGED 0x02
+
+ u8 nvcache_module_status;
+ #define VDANVCI_NVCACHEMODULE_NOT_PRESENT 0x00
+ #define VDANVCI_NVCACHEMODULE_PRESENT 0x01
+
+ u8 protection_mode;
+ #define VDANVCI_PROTMODE_HI_PROTECT 0x00
+ #define VDANVCI_PROTMODE_HI_PERFORM 0x01
+
+ u8 reserved[109];
+};
+
+
+struct __packed atto_vda_buzzer_info {
+ u8 status;
+ #define VDABUZZI_BUZZER_OFF 0x00
+ #define VDABUZZI_BUZZER_ON 0x01
+ #define VDABUZZI_BUZZER_LAST 0x02
+
+ u8 reserved[3];
+ u32 duration;
+ #define VDABUZZI_DURATION_INDEFINITE 0xffffffff
+
+ u8 reserved2[104];
+};
+
+
+struct __packed atto_vda_adapter_info {
+ u8 version;
+ #define VDAADAPINFO_VERSION0 0x00
+ #define VDAADAPINFO_VERSION VDAADAPINFO_VERSION0
+
+ u8 reserved;
+ signed short utc_offset;
+ u32 utc_time;
+ u32 features;
+ #define VDA_ADAP_FEAT_IDENT 0x0001
+ #define VDA_ADAP_FEAT_BUZZ_ERR 0x0002
+ #define VDA_ADAP_FEAT_UTC_TIME 0x0004
+
+ u32 valid_features;
+ char active_config[33];
+ u8 temp_count;
+ u8 fan_count;
+ u8 reserved3[61];
+};
+
+
+struct __packed atto_vda_temp_info {
+ u8 temp_index;
+ u8 max_op_temp;
+ u8 min_op_temp;
+ u8 op_temp_warn;
+ u8 temperature;
+ u8 type;
+ #define VDA_TEMP_TYPE_CPU 1
+
+ u8 reserved[106];
+};
+
+
+struct __packed atto_vda_fan_info {
+ u8 fan_index;
+ u8 status;
+ #define VDA_FAN_STAT_UNKNOWN 0
+ #define VDA_FAN_STAT_NORMAL 1
+ #define VDA_FAN_STAT_FAIL 2
+
+ u16 crit_pvdafaninfothreshold;
+ u16 warn_threshold;
+ u16 speed;
+ u8 reserved[104];
+};
+
+
+/* VDA management commands */
+
+#define VDAMGT_DEV_SCAN 0x00
+#define VDAMGT_DEV_INFO 0x01
+#define VDAMGT_DEV_CLEAN 0x02
+#define VDAMGT_DEV_IDENTIFY 0x03
+#define VDAMGT_DEV_IDENTSTOP 0x04
+#define VDAMGT_DEV_PT_INFO 0x05
+#define VDAMGT_DEV_FEATURES 0x06
+#define VDAMGT_DEV_PT_FEATURES 0x07
+#define VDAMGT_DEV_HEALTH_REQ 0x08
+#define VDAMGT_DEV_METRICS 0x09
+#define VDAMGT_DEV_INFO2 0x0A
+#define VDAMGT_DEV_OPERATION 0x0B
+#define VDAMGT_DEV_INFO2_BYADDR 0x0C
+#define VDAMGT_GRP_INFO 0x10
+#define VDAMGT_GRP_CREATE 0x11
+#define VDAMGT_GRP_DELETE 0x12
+#define VDAMGT_ADD_STORAGE 0x13
+#define VDAMGT_MEMBER_ADD 0x14
+#define VDAMGT_GRP_COMMIT 0x15
+#define VDAMGT_GRP_REBUILD 0x16
+#define VDAMGT_GRP_COMMIT_INIT 0x17
+#define VDAMGT_QUICK_RAID 0x18
+#define VDAMGT_GRP_FEATURES 0x19
+#define VDAMGT_GRP_COMMIT_INIT_AUTOMAP 0x1A
+#define VDAMGT_QUICK_RAID_INIT_AUTOMAP 0x1B
+#define VDAMGT_GRP_OPERATION 0x1C
+#define VDAMGT_CFG_SAVE 0x20
+#define VDAMGT_LAST_ERROR 0x21
+#define VDAMGT_ADAP_INFO 0x22
+#define VDAMGT_ADAP_FEATURES 0x23
+#define VDAMGT_TEMP_INFO 0x24
+#define VDAMGT_FAN_INFO 0x25
+#define VDAMGT_PART_INFO 0x30
+#define VDAMGT_PART_MAP 0x31
+#define VDAMGT_PART_UNMAP 0x32
+#define VDAMGT_PART_AUTOMAP 0x33
+#define VDAMGT_PART_SPLIT 0x34
+#define VDAMGT_PART_MERGE 0x35
+#define VDAMGT_SPARE_LIST 0x40
+#define VDAMGT_SPARE_ADD 0x41
+#define VDAMGT_SPARE_REMOVE 0x42
+#define VDAMGT_LOCAL_SPARE_ADD 0x43
+#define VDAMGT_SCHEDULE_EVENT 0x50
+#define VDAMGT_SCHEDULE_INFO 0x51
+#define VDAMGT_NVCACHE_INFO 0x60
+#define VDAMGT_NVCACHE_SET 0x61
+#define VDAMGT_BUZZER_INFO 0x70
+#define VDAMGT_BUZZER_SET 0x71
+
+
+struct __packed atto_vda_ae_hdr {
+ u8 bylength;
+ u8 byflags;
+ #define VDAAE_HDRF_EVENT_ACK 0x01
+
+ u8 byversion;
+ #define VDAAE_HDR_VER_0 0
+
+ u8 bytype;
+ #define VDAAE_HDR_TYPE_RAID 1
+ #define VDAAE_HDR_TYPE_LU 2
+ #define VDAAE_HDR_TYPE_DISK 3
+ #define VDAAE_HDR_TYPE_RESET 4
+ #define VDAAE_HDR_TYPE_LOG_INFO 5
+ #define VDAAE_HDR_TYPE_LOG_WARN 6
+ #define VDAAE_HDR_TYPE_LOG_CRIT 7
+ #define VDAAE_HDR_TYPE_LOG_FAIL 8
+ #define VDAAE_HDR_TYPE_NVC 9
+ #define VDAAE_HDR_TYPE_TLG_INFO 10
+ #define VDAAE_HDR_TYPE_TLG_WARN 11
+ #define VDAAE_HDR_TYPE_TLG_CRIT 12
+ #define VDAAE_HDR_TYPE_PWRMGT 13
+ #define VDAAE_HDR_TYPE_MUTE 14
+ #define VDAAE_HDR_TYPE_DEV 15
+};
+
+
+struct __packed atto_vda_ae_raid {
+ struct atto_vda_ae_hdr hdr;
+ u32 dwflags;
+ #define VDAAE_GROUP_STATE 0x00000001
+ #define VDAAE_RBLD_STATE 0x00000002
+ #define VDAAE_RBLD_PROG 0x00000004
+ #define VDAAE_MEMBER_CHG 0x00000008
+ #define VDAAE_PART_CHG 0x00000010
+ #define VDAAE_MEM_STATE_CHG 0x00000020
+
+ u8 bygroup_state;
+ #define VDAAE_RAID_INVALID 0
+ #define VDAAE_RAID_NEW 1
+ #define VDAAE_RAID_WAITING 2
+ #define VDAAE_RAID_ONLINE 3
+ #define VDAAE_RAID_DEGRADED 4
+ #define VDAAE_RAID_OFFLINE 5
+ #define VDAAE_RAID_DELETED 6
+ #define VDAAE_RAID_BASIC 7
+ #define VDAAE_RAID_EXTREME 8
+ #define VDAAE_RAID_UNKNOWN 9
+
+ u8 byrebuild_state;
+ #define VDAAE_RBLD_NONE 0
+ #define VDAAE_RBLD_REBUILD 1
+ #define VDAAE_RBLD_ERASE 2
+ #define VDAAE_RBLD_PATTERN 3
+ #define VDAAE_RBLD_CONV 4
+ #define VDAAE_RBLD_FULL_INIT 5
+ #define VDAAE_RBLD_QUICK_INIT 6
+ #define VDAAE_RBLD_SECT_SCAN 7
+ #define VDAAE_RBLD_SECT_SCAN_PARITY 8
+ #define VDAAE_RBLD_SECT_SCAN_PARITY_FIX 9
+ #define VDAAE_RBLD_RECOV_REBUILD 10
+ #define VDAAE_RBLD_UNKNOWN 11
+
+ u8 byrebuild_progress;
+ u8 op_status;
+ #define VDAAE_GRPOPSTAT_MASK 0x0F
+ #define VDAAE_GRPOPSTAT_INVALID 0x00
+ #define VDAAE_GRPOPSTAT_OK 0x01
+ #define VDAAE_GRPOPSTAT_FAULTED 0x02
+ #define VDAAE_GRPOPSTAT_HALTED 0x03
+ #define VDAAE_GRPOPSTAT_INT 0x04
+ #define VDAAE_GRPOPPROC_MASK 0xF0
+ #define VDAAE_GRPOPPROC_STARTABLE 0x10
+ #define VDAAE_GRPOPPROC_CANCELABLE 0x20
+ #define VDAAE_GRPOPPROC_RESUMABLE 0x40
+ #define VDAAE_GRPOPPROC_HALTABLE 0x80
+ char acname[15];
+ u8 byreserved;
+ u8 byreserved2[0x80 - 0x1C];
+};
+
+
+struct __packed atto_vda_ae_lu_tgt_lun {
+ u16 wtarget_id;
+ u8 bylun;
+ u8 byreserved;
+};
+
+
+struct __packed atto_vda_ae_lu_tgt_lun_raid {
+ u16 wtarget_id;
+ u8 bylun;
+ u8 byreserved;
+ u32 dwinterleave;
+ u32 dwblock_size;
+};
+
+
+struct __packed atto_vda_ae_lu {
+ struct atto_vda_ae_hdr hdr;
+ u32 dwevent;
+ #define VDAAE_LU_DISC 0x00000001
+ #define VDAAE_LU_LOST 0x00000002
+ #define VDAAE_LU_STATE 0x00000004
+ #define VDAAE_LU_PASSTHROUGH 0x10000000
+ #define VDAAE_LU_PHYS_ID 0x20000000
+
+ u8 bystate;
+ #define VDAAE_LU_UNDEFINED 0
+ #define VDAAE_LU_NOT_PRESENT 1
+ #define VDAAE_LU_OFFLINE 2
+ #define VDAAE_LU_ONLINE 3
+ #define VDAAE_LU_DEGRADED 4
+ #define VDAAE_LU_FACTORY_DISABLED 5
+ #define VDAAE_LU_DELETED 6
+ #define VDAAE_LU_BUSSCAN 7
+ #define VDAAE_LU_UNKNOWN 8
+
+ u8 byreserved;
+ u16 wphys_target_id;
+
+ union {
+ struct atto_vda_ae_lu_tgt_lun tgtlun;
+ struct atto_vda_ae_lu_tgt_lun_raid tgtlun_raid;
+ } id;
+};
+
+
+struct __packed atto_vda_ae_disk {
+ struct atto_vda_ae_hdr hdr;
+};
+
+
+#define VDAAE_LOG_STRSZ 64
+
+struct __packed atto_vda_ae_log {
+ struct atto_vda_ae_hdr hdr;
+ char aclog_ascii[VDAAE_LOG_STRSZ];
+};
+
+
+#define VDAAE_TLG_STRSZ 56
+
+struct __packed atto_vda_ae_timestamp_log {
+ struct atto_vda_ae_hdr hdr;
+ u32 dwtimestamp;
+ char aclog_ascii[VDAAE_TLG_STRSZ];
+};
+
+
+struct __packed atto_vda_ae_nvc {
+ struct atto_vda_ae_hdr hdr;
+};
+
+
+struct __packed atto_vda_ae_dev {
+ struct atto_vda_ae_hdr hdr;
+ struct atto_dev_addr devaddr;
+};
+
+
+union atto_vda_ae {
+ struct atto_vda_ae_hdr hdr;
+ struct atto_vda_ae_disk disk;
+ struct atto_vda_ae_lu lu;
+ struct atto_vda_ae_raid raid;
+ struct atto_vda_ae_log log;
+ struct atto_vda_ae_timestamp_log tslog;
+ struct atto_vda_ae_nvc nvcache;
+ struct atto_vda_ae_dev dev;
+};
+
+
+struct __packed atto_vda_date_and_time {
+ u8 flags;
+ #define VDA_DT_DAY_MASK 0x07
+ #define VDA_DT_DAY_NONE 0x00
+ #define VDA_DT_DAY_SUN 0x01
+ #define VDA_DT_DAY_MON 0x02
+ #define VDA_DT_DAY_TUE 0x03
+ #define VDA_DT_DAY_WED 0x04
+ #define VDA_DT_DAY_THU 0x05
+ #define VDA_DT_DAY_FRI 0x06
+ #define VDA_DT_DAY_SAT 0x07
+ #define VDA_DT_PM 0x40
+ #define VDA_DT_MILITARY 0x80
+
+ u8 seconds;
+ u8 minutes;
+ u8 hours;
+ u8 day;
+ u8 month;
+ u16 year;
+};
+
+#define SGE_LEN_LIMIT 0x003FFFFF /*! mask of segment length */
+#define SGE_LEN_MAX 0x003FF000 /*! maximum segment length */
+#define SGE_LAST 0x01000000 /*! last entry */
+#define SGE_ADDR_64 0x04000000 /*! 64-bit addressing flag */
+#define SGE_CHAIN 0x80000000 /*! chain descriptor flag */
+#define SGE_CHAIN_LEN 0x0000FFFF /*! mask of length in chain entries */
+#define SGE_CHAIN_SZ 0x00FF0000 /*! mask of size of chained buffer */
+
+
+struct __packed atto_vda_cfg_init {
+ struct atto_vda_date_and_time date_time;
+ u32 sgl_page_size;
+ u32 vda_version;
+ u32 fw_version;
+ u32 fw_build;
+ u32 fw_release;
+ u32 epoch_time;
+ u32 ioctl_tunnel;
+ #define VDA_ITF_MEM_RW 0x00000001
+ #define VDA_ITF_TRACE 0x00000002
+ #define VDA_ITF_SCSI_PASS_THRU 0x00000004
+ #define VDA_ITF_GET_DEV_ADDR 0x00000008
+ #define VDA_ITF_PHY_CTRL 0x00000010
+ #define VDA_ITF_CONN_CTRL 0x00000020
+ #define VDA_ITF_GET_DEV_INFO 0x00000040
+
+ u32 num_targets_backend;
+ u8 reserved[0x48];
+};
+
+
+/* configuration commands */
+
+#define VDA_CFG_INIT 0x00
+#define VDA_CFG_GET_INIT 0x01
+#define VDA_CFG_GET_INIT2 0x02
+
+
+/*! physical region descriptor (PRD) aka scatter/gather entry */
+
+struct __packed atto_physical_region_description {
+ u64 address;
+ u32 ctl_len;
+ #define PRD_LEN_LIMIT 0x003FFFFF
+ #define PRD_LEN_MAX 0x003FF000
+ #define PRD_NXT_PRD_CNT 0x0000007F
+ #define PRD_CHAIN 0x01000000
+ #define PRD_DATA 0x00000000
+ #define PRD_INT_SEL 0xF0000000
+ #define PRD_INT_SEL_F0 0x00000000
+ #define PRD_INT_SEL_F1 0x40000000
+ #define PRD_INT_SEL_F2 0x80000000
+ #define PRD_INT_SEL_F3 0xc0000000
+ #define PRD_INT_SEL_SRAM 0x10000000
+ #define PRD_INT_SEL_PBSR 0x20000000
+
+};
+
+/* Request types. NOTE that ALL requests have the same layout for the first
+ * few bytes.
+ */
+struct __packed atto_vda_req_header {
+ u32 length;
+ u8 function;
+ u8 variable1;
+ u8 chain_offset;
+ u8 sg_list_offset;
+ u32 handle;
+};
+
+
+#define FCP_CDB_SIZE 16
+
+struct __packed atto_vda_scsi_req {
+ u32 length;
+ u8 function; /* VDA_FUNC_SCSI */
+ u8 sense_len;
+ u8 chain_offset;
+ u8 sg_list_offset;
+ u32 handle;
+ u32 flags;
+ #define FCP_CMND_LUN_MASK 0x000000FF
+ #define FCP_CMND_TA_MASK 0x00000700
+ #define FCP_CMND_TA_SIMPL_Q 0x00000000
+ #define FCP_CMND_TA_HEAD_Q 0x00000100
+ #define FCP_CMND_TA_ORDRD_Q 0x00000200
+ #define FCP_CMND_TA_ACA 0x00000400
+ #define FCP_CMND_PRI_MASK 0x00007800
+ #define FCP_CMND_TM_MASK 0x00FF0000
+ #define FCP_CMND_ATS 0x00020000
+ #define FCP_CMND_CTS 0x00040000
+ #define FCP_CMND_LRS 0x00100000
+ #define FCP_CMND_TRS 0x00200000
+ #define FCP_CMND_CLA 0x00400000
+ #define FCP_CMND_TRM 0x00800000
+ #define FCP_CMND_DATA_DIR 0x03000000
+ #define FCP_CMND_WRD 0x01000000
+ #define FCP_CMND_RDD 0x02000000
+
+ u8 cdb[FCP_CDB_SIZE];
+ union {
+ struct __packed {
+ u64 ppsense_buf;
+ u16 target_id;
+ u8 iblk_cnt_prd;
+ u8 reserved;
+ };
+
+ struct atto_physical_region_description sense_buff_prd;
+ };
+
+ union {
+ struct atto_vda_sge sge[1];
+
+ u32 abort_handle;
+ u32 dwords[245];
+ struct atto_physical_region_description prd[1];
+ } u;
+};
+
+
+struct __packed atto_vda_flash_req {
+ u32 length;
+ u8 function; /* VDA_FUNC_FLASH */
+ u8 sub_func;
+ u8 chain_offset;
+ u8 sg_list_offset;
+ u32 handle;
+ u32 flash_addr;
+ u8 checksum;
+ u8 rsvd[3];
+
+ union {
+ struct {
+ char file_name[16]; /* 8.3 fname, NULL term, wc=* */
+ struct atto_vda_sge sge[1];
+ } file;
+
+ struct atto_vda_sge sge[1];
+ struct atto_physical_region_description prde[2];
+ } data;
+};
+
+
+struct __packed atto_vda_diag_req {
+ u32 length;
+ u8 function; /* VDA_FUNC_DIAG */
+ u8 sub_func;
+ #define VDA_DIAG_STATUS 0x00
+ #define VDA_DIAG_RESET 0x01
+ #define VDA_DIAG_PAUSE 0x02
+ #define VDA_DIAG_RESUME 0x03
+ #define VDA_DIAG_READ 0x04
+ #define VDA_DIAG_WRITE 0x05
+
+ u8 chain_offset;
+ u8 sg_list_offset;
+ u32 handle;
+ u32 rsvd;
+ u64 local_addr;
+ struct atto_vda_sge sge[1];
+};
+
+
+struct __packed atto_vda_ae_req {
+ u32 length;
+ u8 function; /* VDA_FUNC_AE */
+ u8 reserved1;
+ u8 chain_offset;
+ u8 sg_list_offset;
+ u32 handle;
+
+ union {
+ struct atto_vda_sge sge[1];
+ struct atto_physical_region_description prde[1];
+ };
+};
+
+
+struct __packed atto_vda_cli_req {
+ u32 length;
+ u8 function; /* VDA_FUNC_CLI */
+ u8 reserved1;
+ u8 chain_offset;
+ u8 sg_list_offset;
+ u32 handle;
+ u32 cmd_rsp_len;
+ struct atto_vda_sge sge[1];
+};
+
+
+struct __packed atto_vda_ioctl_req {
+ u32 length;
+ u8 function; /* VDA_FUNC_IOCTL */
+ u8 sub_func;
+ u8 chain_offset;
+ u8 sg_list_offset;
+ u32 handle;
+
+ union {
+ struct atto_vda_sge reserved_sge;
+ struct atto_physical_region_description reserved_prde;
+ };
+
+ union {
+ struct {
+ u32 ctrl_code;
+ u16 target_id;
+ u8 lun;
+ u8 reserved;
+ } csmi;
+ };
+
+ union {
+ struct atto_vda_sge sge[1];
+ struct atto_physical_region_description prde[1];
+ };
+};
+
+
+struct __packed atto_vda_cfg_req {
+ u32 length;
+ u8 function; /* VDA_FUNC_CFG */
+ u8 sub_func;
+ u8 rsvd1;
+ u8 sg_list_offset;
+ u32 handle;
+
+ union {
+ u8 bytes[116];
+ struct atto_vda_cfg_init init;
+ struct atto_vda_sge sge;
+ struct atto_physical_region_description prde;
+ } data;
+};
+
+
+struct __packed atto_vda_mgmt_req {
+ u32 length;
+ u8 function; /* VDA_FUNC_MGT */
+ u8 mgt_func;
+ u8 chain_offset;
+ u8 sg_list_offset;
+ u32 handle;
+ u8 scan_generation;
+ u8 payld_sglst_offset;
+ u16 dev_index;
+ u32 payld_length;
+ u32 pad;
+ union {
+ struct atto_vda_sge sge[2];
+ struct atto_physical_region_description prde[2];
+ };
+ struct atto_vda_sge payld_sge[1];
+};
+
+
+union atto_vda_req {
+ struct atto_vda_scsi_req scsi;
+ struct atto_vda_flash_req flash;
+ struct atto_vda_diag_req diag;
+ struct atto_vda_ae_req ae;
+ struct atto_vda_cli_req cli;
+ struct atto_vda_ioctl_req ioctl;
+ struct atto_vda_cfg_req cfg;
+ struct atto_vda_mgmt_req mgt;
+ u8 bytes[1024];
+};
+
+/* Outbound response structures */
+
+struct __packed atto_vda_scsi_rsp {
+ u8 scsi_stat;
+ u8 sense_len;
+ u8 rsvd[2];
+ u32 residual_length;
+};
+
+struct __packed atto_vda_flash_rsp {
+ u32 file_size;
+};
+
+struct __packed atto_vda_ae_rsp {
+ u32 length;
+};
+
+struct __packed atto_vda_cli_rsp {
+ u32 cmd_rsp_len;
+};
+
+struct __packed atto_vda_ioctl_rsp {
+ union {
+ struct {
+ u32 csmi_status;
+ u16 target_id;
+ u8 lun;
+ u8 reserved;
+ } csmi;
+ };
+};
+
+struct __packed atto_vda_cfg_rsp {
+ u16 vda_version;
+ u16 fw_release;
+ u32 fw_build;
+};
+
+struct __packed atto_vda_mgmt_rsp {
+ u32 length;
+ u16 dev_index;
+ u8 scan_generation;
+};
+
+union atto_vda_func_rsp {
+ struct atto_vda_scsi_rsp scsi_rsp;
+ struct atto_vda_flash_rsp flash_rsp;
+ struct atto_vda_ae_rsp ae_rsp;
+ struct atto_vda_cli_rsp cli_rsp;
+ struct atto_vda_ioctl_rsp ioctl_rsp;
+ struct atto_vda_cfg_rsp cfg_rsp;
+ struct atto_vda_mgmt_rsp mgt_rsp;
+ u32 dwords[2];
+};
+
+struct __packed atto_vda_ob_rsp {
+ u32 handle;
+ u8 req_stat;
+ u8 rsvd[3];
+
+ union atto_vda_func_rsp
+ func_rsp;
+};
+
+struct __packed atto_vda_ae_data {
+ u8 event_data[256];
+};
+
+struct __packed atto_vda_mgmt_data {
+ union {
+ u8 bytes[112];
+ struct atto_vda_devinfo dev_info;
+ struct atto_vda_grp_info grp_info;
+ struct atto_vdapart_info part_info;
+ struct atto_vda_dh_info dev_health_info;
+ struct atto_vda_metrics_info metrics_info;
+ struct atto_vda_schedule_info sched_info;
+ struct atto_vda_n_vcache_info nvcache_info;
+ struct atto_vda_buzzer_info buzzer_info;
+ } data;
+};
+
+union atto_vda_rsp_data {
+ struct atto_vda_ae_data ae_data;
+ struct atto_vda_mgmt_data mgt_data;
+ u8 sense_data[252];
+ #define SENSE_DATA_SZ 252;
+ u8 bytes[256];
+};
+
+#endif
diff --git a/drivers/scsi/esas2r/esas2r.h b/drivers/scsi/esas2r/esas2r.h
new file mode 100644
index 000000000..b6030e3ed
--- /dev/null
+++ b/drivers/scsi/esas2r/esas2r.h
@@ -0,0 +1,1426 @@
+/*
+ * linux/drivers/scsi/esas2r/esas2r.h
+ * For use with ATTO ExpressSAS R6xx SAS/SATA RAID controllers
+ *
+ * Copyright (c) 2001-2013 ATTO Technology, Inc.
+ * (mailto:linuxdrivers@attotech.com)
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * NO WARRANTY
+ * THE PROGRAM IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OR
+ * CONDITIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED INCLUDING, WITHOUT
+ * LIMITATION, ANY WARRANTIES OR CONDITIONS OF TITLE, NON-INFRINGEMENT,
+ * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE. Each Recipient is
+ * solely responsible for determining the appropriateness of using and
+ * distributing the Program and assumes all risks associated with its
+ * exercise of rights under this Agreement, including but not limited to
+ * the risks and costs of program errors, damage to or loss of data,
+ * programs or equipment, and unavailability or interruption of operations.
+ *
+ * DISCLAIMER OF LIABILITY
+ * NEITHER RECIPIENT NOR ANY CONTRIBUTORS SHALL HAVE ANY LIABILITY FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING WITHOUT LIMITATION LOST PROFITS), HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR
+ * TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
+ * USE OR DISTRIBUTION OF THE PROGRAM OR THE EXERCISE OF ANY RIGHTS GRANTED
+ * HEREUNDER, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGES
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301,
+ * USA.
+ */
+
+#include <linux/kernel.h>
+#include <linux/delay.h>
+#include <linux/pci.h>
+#include <linux/proc_fs.h>
+#include <linux/workqueue.h>
+#include <linux/interrupt.h>
+#include <linux/module.h>
+#include <linux/vmalloc.h>
+#include <scsi/scsi.h>
+#include <scsi/scsi_host.h>
+#include <scsi/scsi_cmnd.h>
+#include <scsi/scsi_device.h>
+#include <scsi/scsi_eh.h>
+#include <scsi/scsi_tcq.h>
+
+#include "esas2r_log.h"
+#include "atioctl.h"
+#include "atvda.h"
+
+#ifndef ESAS2R_H
+#define ESAS2R_H
+
+/* Global Variables */
+extern struct esas2r_adapter *esas2r_adapters[];
+extern u8 *esas2r_buffered_ioctl;
+extern dma_addr_t esas2r_buffered_ioctl_addr;
+extern u32 esas2r_buffered_ioctl_size;
+extern struct pci_dev *esas2r_buffered_ioctl_pcid;
+#define SGL_PG_SZ_MIN 64
+#define SGL_PG_SZ_MAX 1024
+extern int sgl_page_size;
+#define NUM_SGL_MIN 8
+#define NUM_SGL_MAX 2048
+extern int num_sg_lists;
+#define NUM_REQ_MIN 4
+#define NUM_REQ_MAX 256
+extern int num_requests;
+#define NUM_AE_MIN 2
+#define NUM_AE_MAX 8
+extern int num_ae_requests;
+extern int cmd_per_lun;
+extern int can_queue;
+extern int esas2r_max_sectors;
+extern int sg_tablesize;
+extern int interrupt_mode;
+extern int num_io_requests;
+
+/* Macro defintions */
+#define ESAS2R_MAX_ID 255
+#define MAX_ADAPTERS 32
+#define ESAS2R_DRVR_NAME "esas2r"
+#define ESAS2R_LONGNAME "ATTO ExpressSAS 6GB RAID Adapter"
+#define ESAS2R_MAX_DEVICES 32
+#define ATTONODE_NAME "ATTONode"
+#define ESAS2R_MAJOR_REV 1
+#define ESAS2R_MINOR_REV 00
+#define ESAS2R_VERSION_STR DEFINED_NUM_TO_STR(ESAS2R_MAJOR_REV) "." \
+ DEFINED_NUM_TO_STR(ESAS2R_MINOR_REV)
+#define ESAS2R_COPYRIGHT_YEARS "2001-2013"
+#define ESAS2R_DEFAULT_SGL_PAGE_SIZE 384
+#define ESAS2R_DEFAULT_CMD_PER_LUN 64
+#define ESAS2R_DEFAULT_NUM_SG_LISTS 1024
+#define DEFINED_NUM_TO_STR(num) NUM_TO_STR(num)
+#define NUM_TO_STR(num) #num
+
+#define ESAS2R_SGL_ALIGN 16
+#define ESAS2R_LIST_ALIGN 16
+#define ESAS2R_LIST_EXTRA ESAS2R_NUM_EXTRA
+#define ESAS2R_DATA_BUF_LEN 256
+#define ESAS2R_DEFAULT_TMO 5000
+#define ESAS2R_DISC_BUF_LEN 512
+#define ESAS2R_FWCOREDUMP_SZ 0x80000
+#define ESAS2R_NUM_PHYS 8
+#define ESAS2R_TARG_ID_INV 0xFFFF
+#define ESAS2R_INT_STS_MASK MU_INTSTAT_MASK
+#define ESAS2R_INT_ENB_MASK MU_INTSTAT_MASK
+#define ESAS2R_INT_DIS_MASK 0
+#define ESAS2R_MAX_TARGETS 256
+#define ESAS2R_KOBJ_NAME_LEN 20
+
+/* u16 (WORD) component macros */
+#define LOBYTE(w) ((u8)(u16)(w))
+#define HIBYTE(w) ((u8)(((u16)(w)) >> 8))
+#define MAKEWORD(lo, hi) ((u16)((u8)(lo) | ((u16)(u8)(hi) << 8)))
+
+/* u32 (DWORD) component macros */
+#define LOWORD(d) ((u16)(u32)(d))
+#define HIWORD(d) ((u16)(((u32)(d)) >> 16))
+#define MAKEDWORD(lo, hi) ((u32)((u16)(lo) | ((u32)(u16)(hi) << 16)))
+
+/* macro to get the lowest nonzero bit of a value */
+#define LOBIT(x) ((x) & (0 - (x)))
+
+/* These functions are provided to access the chip's control registers.
+ * The register is specified by its byte offset from the register base
+ * for the adapter.
+ */
+#define esas2r_read_register_dword(a, reg) \
+ readl((void __iomem *)a->regs + (reg) + MW_REG_OFFSET_HWREG)
+
+#define esas2r_write_register_dword(a, reg, data) \
+ writel(data, (void __iomem *)(a->regs + (reg) + MW_REG_OFFSET_HWREG))
+
+#define esas2r_flush_register_dword(a, r) esas2r_read_register_dword(a, r)
+
+/* This function is provided to access the chip's data window. The
+ * register is specified by its byte offset from the window base
+ * for the adapter.
+ */
+#define esas2r_read_data_byte(a, reg) \
+ readb((void __iomem *)a->data_window + (reg))
+
+/* ATTO vendor and device Ids */
+#define ATTO_VENDOR_ID 0x117C
+#define ATTO_DID_INTEL_IOP348 0x002C
+#define ATTO_DID_MV_88RC9580 0x0049
+#define ATTO_DID_MV_88RC9580TS 0x0066
+#define ATTO_DID_MV_88RC9580TSE 0x0067
+#define ATTO_DID_MV_88RC9580TL 0x0068
+
+/* ATTO subsystem device Ids */
+#define ATTO_SSDID_TBT 0x4000
+#define ATTO_TSSC_3808 0x4066
+#define ATTO_TSSC_3808E 0x4067
+#define ATTO_TLSH_1068 0x4068
+#define ATTO_ESAS_R680 0x0049
+#define ATTO_ESAS_R608 0x004A
+#define ATTO_ESAS_R60F 0x004B
+#define ATTO_ESAS_R6F0 0x004C
+#define ATTO_ESAS_R644 0x004D
+#define ATTO_ESAS_R648 0x004E
+
+/*
+ * flash definitions & structures
+ * define the code types
+ */
+#define FBT_CPYR 0xAA00
+#define FBT_SETUP 0xAA02
+#define FBT_FLASH_VER 0xAA04
+
+/* offsets to various locations in flash */
+#define FLS_OFFSET_BOOT (u32)(0x00700000)
+#define FLS_OFFSET_NVR (u32)(0x007C0000)
+#define FLS_OFFSET_CPYR FLS_OFFSET_NVR
+#define FLS_LENGTH_BOOT (FLS_OFFSET_CPYR - FLS_OFFSET_BOOT)
+#define FLS_BLOCK_SIZE (u32)(0x00020000)
+#define FI_NVR_2KB 0x0800
+#define FI_NVR_8KB 0x2000
+#define FM_BUF_SZ 0x800
+
+/*
+ * marvell frey (88R9580) register definitions
+ * chip revision identifiers
+ */
+#define MVR_FREY_B2 0xB2
+
+/*
+ * memory window definitions. window 0 is the data window with definitions
+ * of MW_DATA_XXX. window 1 is the register window with definitions of
+ * MW_REG_XXX.
+ */
+#define MW_REG_WINDOW_SIZE (u32)(0x00040000)
+#define MW_REG_OFFSET_HWREG (u32)(0x00000000)
+#define MW_REG_OFFSET_PCI (u32)(0x00008000)
+#define MW_REG_PCI_HWREG_DELTA (MW_REG_OFFSET_PCI - MW_REG_OFFSET_HWREG)
+#define MW_DATA_WINDOW_SIZE (u32)(0x00020000)
+#define MW_DATA_ADDR_SER_FLASH (u32)(0xEC000000)
+#define MW_DATA_ADDR_SRAM (u32)(0xF4000000)
+#define MW_DATA_ADDR_PAR_FLASH (u32)(0xFC000000)
+
+/*
+ * the following registers are for the communication
+ * list interface (AKA message unit (MU))
+ */
+#define MU_IN_LIST_ADDR_LO (u32)(0x00004000)
+#define MU_IN_LIST_ADDR_HI (u32)(0x00004004)
+
+#define MU_IN_LIST_WRITE (u32)(0x00004018)
+ #define MU_ILW_TOGGLE (u32)(0x00004000)
+
+#define MU_IN_LIST_READ (u32)(0x0000401C)
+ #define MU_ILR_TOGGLE (u32)(0x00004000)
+ #define MU_ILIC_LIST (u32)(0x0000000F)
+ #define MU_ILIC_LIST_F0 (u32)(0x00000000)
+ #define MU_ILIC_DEST (u32)(0x00000F00)
+ #define MU_ILIC_DEST_DDR (u32)(0x00000200)
+#define MU_IN_LIST_IFC_CONFIG (u32)(0x00004028)
+
+#define MU_IN_LIST_CONFIG (u32)(0x0000402C)
+ #define MU_ILC_ENABLE (u32)(0x00000001)
+ #define MU_ILC_ENTRY_MASK (u32)(0x000000F0)
+ #define MU_ILC_ENTRY_4_DW (u32)(0x00000020)
+ #define MU_ILC_DYNAMIC_SRC (u32)(0x00008000)
+ #define MU_ILC_NUMBER_MASK (u32)(0x7FFF0000)
+ #define MU_ILC_NUMBER_SHIFT 16
+
+#define MU_OUT_LIST_ADDR_LO (u32)(0x00004050)
+#define MU_OUT_LIST_ADDR_HI (u32)(0x00004054)
+
+#define MU_OUT_LIST_COPY_PTR_LO (u32)(0x00004058)
+#define MU_OUT_LIST_COPY_PTR_HI (u32)(0x0000405C)
+
+#define MU_OUT_LIST_WRITE (u32)(0x00004068)
+ #define MU_OLW_TOGGLE (u32)(0x00004000)
+
+#define MU_OUT_LIST_COPY (u32)(0x0000406C)
+ #define MU_OLC_TOGGLE (u32)(0x00004000)
+ #define MU_OLC_WRT_PTR (u32)(0x00003FFF)
+
+#define MU_OUT_LIST_IFC_CONFIG (u32)(0x00004078)
+ #define MU_OLIC_LIST (u32)(0x0000000F)
+ #define MU_OLIC_LIST_F0 (u32)(0x00000000)
+ #define MU_OLIC_SOURCE (u32)(0x00000F00)
+ #define MU_OLIC_SOURCE_DDR (u32)(0x00000200)
+
+#define MU_OUT_LIST_CONFIG (u32)(0x0000407C)
+ #define MU_OLC_ENABLE (u32)(0x00000001)
+ #define MU_OLC_ENTRY_MASK (u32)(0x000000F0)
+ #define MU_OLC_ENTRY_4_DW (u32)(0x00000020)
+ #define MU_OLC_NUMBER_MASK (u32)(0x7FFF0000)
+ #define MU_OLC_NUMBER_SHIFT 16
+
+#define MU_OUT_LIST_INT_STAT (u32)(0x00004088)
+ #define MU_OLIS_INT (u32)(0x00000001)
+
+#define MU_OUT_LIST_INT_MASK (u32)(0x0000408C)
+ #define MU_OLIS_MASK (u32)(0x00000001)
+
+/*
+ * the maximum size of the communication lists is two greater than the
+ * maximum amount of VDA requests. the extra are to prevent queue overflow.
+ */
+#define ESAS2R_MAX_NUM_REQS 256
+#define ESAS2R_NUM_EXTRA 2
+#define ESAS2R_MAX_COMM_LIST_SIZE (ESAS2R_MAX_NUM_REQS + ESAS2R_NUM_EXTRA)
+
+/*
+ * the following registers are for the CPU interface
+ */
+#define MU_CTL_STATUS_IN (u32)(0x00010108)
+ #define MU_CTL_IN_FULL_RST (u32)(0x00000020)
+#define MU_CTL_STATUS_IN_B2 (u32)(0x00010130)
+ #define MU_CTL_IN_FULL_RST2 (u32)(0x80000000)
+#define MU_DOORBELL_IN (u32)(0x00010460)
+ #define DRBL_RESET_BUS (u32)(0x00000002)
+ #define DRBL_PAUSE_AE (u32)(0x00000004)
+ #define DRBL_RESUME_AE (u32)(0x00000008)
+ #define DRBL_MSG_IFC_DOWN (u32)(0x00000010)
+ #define DRBL_FLASH_REQ (u32)(0x00000020)
+ #define DRBL_FLASH_DONE (u32)(0x00000040)
+ #define DRBL_FORCE_INT (u32)(0x00000080)
+ #define DRBL_MSG_IFC_INIT (u32)(0x00000100)
+ #define DRBL_POWER_DOWN (u32)(0x00000200)
+ #define DRBL_DRV_VER_1 (u32)(0x00010000)
+ #define DRBL_DRV_VER DRBL_DRV_VER_1
+#define MU_DOORBELL_IN_ENB (u32)(0x00010464)
+#define MU_DOORBELL_OUT (u32)(0x00010480)
+ #define DRBL_PANIC_REASON_MASK (u32)(0x00F00000)
+ #define DRBL_UNUSED_HANDLER (u32)(0x00100000)
+ #define DRBL_UNDEF_INSTR (u32)(0x00200000)
+ #define DRBL_PREFETCH_ABORT (u32)(0x00300000)
+ #define DRBL_DATA_ABORT (u32)(0x00400000)
+ #define DRBL_JUMP_TO_ZERO (u32)(0x00500000)
+ #define DRBL_FW_RESET (u32)(0x00080000)
+ #define DRBL_FW_VER_MSK (u32)(0x00070000)
+ #define DRBL_FW_VER_0 (u32)(0x00000000)
+ #define DRBL_FW_VER_1 (u32)(0x00010000)
+ #define DRBL_FW_VER DRBL_FW_VER_1
+#define MU_DOORBELL_OUT_ENB (u32)(0x00010484)
+ #define DRBL_ENB_MASK (u32)(0x00F803FF)
+#define MU_INT_STATUS_OUT (u32)(0x00010200)
+ #define MU_INTSTAT_POST_OUT (u32)(0x00000010)
+ #define MU_INTSTAT_DRBL_IN (u32)(0x00000100)
+ #define MU_INTSTAT_DRBL (u32)(0x00001000)
+ #define MU_INTSTAT_MASK (u32)(0x00001010)
+#define MU_INT_MASK_OUT (u32)(0x0001020C)
+
+/* PCI express registers accessed via window 1 */
+#define MVR_PCI_WIN1_REMAP (u32)(0x00008438)
+ #define MVRPW1R_ENABLE (u32)(0x00000001)
+
+
+/* structures */
+
+/* inbound list dynamic source entry */
+struct esas2r_inbound_list_source_entry {
+ u64 address;
+ u32 length;
+ #define HWILSE_INTERFACE_F0 0x00000000
+ u32 reserved;
+};
+
+/* PCI data structure in expansion ROM images */
+struct __packed esas2r_boot_header {
+ char signature[4];
+ u16 vendor_id;
+ u16 device_id;
+ u16 VPD;
+ u16 struct_length;
+ u8 struct_revision;
+ u8 class_code[3];
+ u16 image_length;
+ u16 code_revision;
+ u8 code_type;
+ #define CODE_TYPE_PC 0
+ #define CODE_TYPE_OPEN 1
+ #define CODE_TYPE_EFI 3
+ u8 indicator;
+ #define INDICATOR_LAST 0x80
+ u8 reserved[2];
+};
+
+struct __packed esas2r_boot_image {
+ u16 signature;
+ u8 reserved[22];
+ u16 header_offset;
+ u16 pnp_offset;
+};
+
+struct __packed esas2r_pc_image {
+ u16 signature;
+ u8 length;
+ u8 entry_point[3];
+ u8 checksum;
+ u16 image_end;
+ u16 min_size;
+ u8 rom_flags;
+ u8 reserved[12];
+ u16 header_offset;
+ u16 pnp_offset;
+ struct esas2r_boot_header boot_image;
+};
+
+struct __packed esas2r_efi_image {
+ u16 signature;
+ u16 length;
+ u32 efi_signature;
+ #define EFI_ROM_SIG 0x00000EF1
+ u16 image_type;
+ #define EFI_IMAGE_APP 10
+ #define EFI_IMAGE_BSD 11
+ #define EFI_IMAGE_RTD 12
+ u16 machine_type;
+ #define EFI_MACHINE_IA32 0x014c
+ #define EFI_MACHINE_IA64 0x0200
+ #define EFI_MACHINE_X64 0x8664
+ #define EFI_MACHINE_EBC 0x0EBC
+ u16 compression;
+ #define EFI_UNCOMPRESSED 0x0000
+ #define EFI_COMPRESSED 0x0001
+ u8 reserved[8];
+ u16 efi_offset;
+ u16 header_offset;
+ u16 reserved2;
+ struct esas2r_boot_header boot_image;
+};
+
+struct esas2r_adapter;
+struct esas2r_sg_context;
+struct esas2r_request;
+
+typedef void (*RQCALLBK) (struct esas2r_adapter *a,
+ struct esas2r_request *rq);
+typedef bool (*RQBUILDSGL) (struct esas2r_adapter *a,
+ struct esas2r_sg_context *sgc);
+
+struct esas2r_component_header {
+ u8 img_type;
+ #define CH_IT_FW 0x00
+ #define CH_IT_NVR 0x01
+ #define CH_IT_BIOS 0x02
+ #define CH_IT_MAC 0x03
+ #define CH_IT_CFG 0x04
+ #define CH_IT_EFI 0x05
+ u8 status;
+ #define CH_STAT_PENDING 0xff
+ #define CH_STAT_FAILED 0x00
+ #define CH_STAT_SUCCESS 0x01
+ #define CH_STAT_RETRY 0x02
+ #define CH_STAT_INVALID 0x03
+ u8 pad[2];
+ u32 version;
+ u32 length;
+ u32 image_offset;
+};
+
+#define FI_REL_VER_SZ 16
+
+struct esas2r_flash_img_v0 {
+ u8 fi_version;
+ #define FI_VERSION_0 00
+ u8 status;
+ u8 adap_typ;
+ u8 action;
+ u32 length;
+ u16 checksum;
+ u16 driver_error;
+ u16 flags;
+ u16 num_comps;
+ #define FI_NUM_COMPS_V0 5
+ u8 rel_version[FI_REL_VER_SZ];
+ struct esas2r_component_header cmp_hdr[FI_NUM_COMPS_V0];
+ u8 scratch_buf[FM_BUF_SZ];
+};
+
+struct esas2r_flash_img {
+ u8 fi_version;
+ #define FI_VERSION_1 01
+ u8 status;
+ #define FI_STAT_SUCCESS 0x00
+ #define FI_STAT_FAILED 0x01
+ #define FI_STAT_REBOOT 0x02
+ #define FI_STAT_ADAPTYP 0x03
+ #define FI_STAT_INVALID 0x04
+ #define FI_STAT_CHKSUM 0x05
+ #define FI_STAT_LENGTH 0x06
+ #define FI_STAT_UNKNOWN 0x07
+ #define FI_STAT_IMG_VER 0x08
+ #define FI_STAT_BUSY 0x09
+ #define FI_STAT_DUAL 0x0A
+ #define FI_STAT_MISSING 0x0B
+ #define FI_STAT_UNSUPP 0x0C
+ #define FI_STAT_ERASE 0x0D
+ #define FI_STAT_FLASH 0x0E
+ #define FI_STAT_DEGRADED 0x0F
+ u8 adap_typ;
+ #define FI_AT_UNKNWN 0xFF
+ #define FI_AT_SUN_LAKE 0x0B
+ #define FI_AT_MV_9580 0x0F
+ u8 action;
+ #define FI_ACT_DOWN 0x00
+ #define FI_ACT_UP 0x01
+ #define FI_ACT_UPSZ 0x02
+ #define FI_ACT_MAX 0x02
+ #define FI_ACT_DOWN1 0x80
+ u32 length;
+ u16 checksum;
+ u16 driver_error;
+ u16 flags;
+ #define FI_FLG_NVR_DEF 0x0001
+ u16 num_comps;
+ #define FI_NUM_COMPS_V1 6
+ u8 rel_version[FI_REL_VER_SZ];
+ struct esas2r_component_header cmp_hdr[FI_NUM_COMPS_V1];
+ u8 scratch_buf[FM_BUF_SZ];
+};
+
+/* definitions for flash script (FS) commands */
+struct esas2r_ioctlfs_command {
+ u8 command;
+ #define ESAS2R_FS_CMD_ERASE 0
+ #define ESAS2R_FS_CMD_READ 1
+ #define ESAS2R_FS_CMD_BEGINW 2
+ #define ESAS2R_FS_CMD_WRITE 3
+ #define ESAS2R_FS_CMD_COMMIT 4
+ #define ESAS2R_FS_CMD_CANCEL 5
+ u8 checksum;
+ u8 reserved[2];
+ u32 flash_addr;
+ u32 length;
+ u32 image_offset;
+};
+
+struct esas2r_ioctl_fs {
+ u8 version;
+ #define ESAS2R_FS_VER 0
+ u8 status;
+ u8 driver_error;
+ u8 adap_type;
+ #define ESAS2R_FS_AT_ESASRAID2 3
+ #define ESAS2R_FS_AT_TSSASRAID2 4
+ #define ESAS2R_FS_AT_TSSASRAID2E 5
+ #define ESAS2R_FS_AT_TLSASHBA 6
+ u8 driver_ver;
+ u8 reserved[11];
+ struct esas2r_ioctlfs_command command;
+ u8 data[1];
+};
+
+struct esas2r_sas_nvram {
+ u8 signature[4];
+ u8 version;
+ #define SASNVR_VERSION_0 0x00
+ #define SASNVR_VERSION SASNVR_VERSION_0
+ u8 checksum;
+ #define SASNVR_CKSUM_SEED 0x5A
+ u8 max_lun_for_target;
+ u8 pci_latency;
+ #define SASNVR_PCILAT_DIS 0x00
+ #define SASNVR_PCILAT_MIN 0x10
+ #define SASNVR_PCILAT_MAX 0xF8
+ u8 options1;
+ #define SASNVR1_BOOT_DRVR 0x01
+ #define SASNVR1_BOOT_SCAN 0x02
+ #define SASNVR1_DIS_PCI_MWI 0x04
+ #define SASNVR1_FORCE_ORD_Q 0x08
+ #define SASNVR1_CACHELINE_0 0x10
+ #define SASNVR1_DIS_DEVSORT 0x20
+ #define SASNVR1_PWR_MGT_EN 0x40
+ #define SASNVR1_WIDEPORT 0x80
+ u8 options2;
+ #define SASNVR2_SINGLE_BUS 0x01
+ #define SASNVR2_SLOT_BIND 0x02
+ #define SASNVR2_EXP_PROG 0x04
+ #define SASNVR2_CMDTHR_LUN 0x08
+ #define SASNVR2_HEARTBEAT 0x10
+ #define SASNVR2_INT_CONNECT 0x20
+ #define SASNVR2_SW_MUX_CTRL 0x40
+ #define SASNVR2_DISABLE_NCQ 0x80
+ u8 int_coalescing;
+ #define SASNVR_COAL_DIS 0x00
+ #define SASNVR_COAL_LOW 0x01
+ #define SASNVR_COAL_MED 0x02
+ #define SASNVR_COAL_HI 0x03
+ u8 cmd_throttle;
+ #define SASNVR_CMDTHR_NONE 0x00
+ u8 dev_wait_time;
+ u8 dev_wait_count;
+ u8 spin_up_delay;
+ #define SASNVR_SPINUP_MAX 0x14
+ u8 ssp_align_rate;
+ u8 sas_addr[8];
+ u8 phy_speed[16];
+ #define SASNVR_SPEED_AUTO 0x00
+ #define SASNVR_SPEED_1_5GB 0x01
+ #define SASNVR_SPEED_3GB 0x02
+ #define SASNVR_SPEED_6GB 0x03
+ #define SASNVR_SPEED_12GB 0x04
+ u8 phy_mux[16];
+ #define SASNVR_MUX_DISABLED 0x00
+ #define SASNVR_MUX_1_5GB 0x01
+ #define SASNVR_MUX_3GB 0x02
+ #define SASNVR_MUX_6GB 0x03
+ u8 phy_flags[16];
+ #define SASNVR_PHF_DISABLED 0x01
+ #define SASNVR_PHF_RD_ONLY 0x02
+ u8 sort_type;
+ #define SASNVR_SORT_SAS_ADDR 0x00
+ #define SASNVR_SORT_H308_CONN 0x01
+ #define SASNVR_SORT_PHY_ID 0x02
+ #define SASNVR_SORT_SLOT_ID 0x03
+ u8 dpm_reqcmd_lmt;
+ u8 dpm_stndby_time;
+ u8 dpm_active_time;
+ u8 phy_target_id[16];
+ #define SASNVR_PTI_DISABLED 0xFF
+ u8 virt_ses_mode;
+ #define SASNVR_VSMH_DISABLED 0x00
+ u8 read_write_mode;
+ #define SASNVR_RWM_DEFAULT 0x00
+ u8 link_down_to;
+ u8 reserved[0xA1];
+};
+
+typedef u32 (*PGETPHYSADDR) (struct esas2r_sg_context *sgc, u64 *addr);
+
+struct esas2r_sg_context {
+ struct esas2r_adapter *adapter;
+ struct esas2r_request *first_req;
+ u32 length;
+ u8 *cur_offset;
+ PGETPHYSADDR get_phys_addr;
+ union {
+ struct {
+ struct atto_vda_sge *curr;
+ struct atto_vda_sge *last;
+ struct atto_vda_sge *limit;
+ struct atto_vda_sge *chain;
+ } a64;
+ struct {
+ struct atto_physical_region_description *curr;
+ struct atto_physical_region_description *chain;
+ u32 sgl_max_cnt;
+ u32 sge_cnt;
+ } prd;
+ } sge;
+ struct scatterlist *cur_sgel;
+ u8 *exp_offset;
+ int num_sgel;
+ int sgel_count;
+};
+
+struct esas2r_target {
+ u8 flags;
+ #define TF_PASS_THRU 0x01
+ #define TF_USED 0x02
+ u8 new_target_state;
+ u8 target_state;
+ u8 buffered_target_state;
+#define TS_NOT_PRESENT 0x00
+#define TS_PRESENT 0x05
+#define TS_LUN_CHANGE 0x06
+#define TS_INVALID 0xFF
+ u32 block_size;
+ u32 inter_block;
+ u32 inter_byte;
+ u16 virt_targ_id;
+ u16 phys_targ_id;
+ u8 identifier_len;
+ u64 sas_addr;
+ u8 identifier[60];
+ struct atto_vda_ae_lu lu_event;
+};
+
+struct esas2r_request {
+ struct list_head comp_list;
+ struct list_head req_list;
+ union atto_vda_req *vrq;
+ struct esas2r_mem_desc *vrq_md;
+ union {
+ void *data_buf;
+ union atto_vda_rsp_data *vda_rsp_data;
+ };
+ u8 *sense_buf;
+ struct list_head sg_table_head;
+ struct esas2r_mem_desc *sg_table;
+ u32 timeout;
+ #define RQ_TIMEOUT_S1 0xFFFFFFFF
+ #define RQ_TIMEOUT_S2 0xFFFFFFFE
+ #define RQ_MAX_TIMEOUT 0xFFFFFFFD
+ u16 target_id;
+ u8 req_type;
+ #define RT_INI_REQ 0x01
+ #define RT_DISC_REQ 0x02
+ u8 sense_len;
+ union atto_vda_func_rsp func_rsp;
+ RQCALLBK comp_cb;
+ RQCALLBK interrupt_cb;
+ void *interrupt_cx;
+ u8 flags;
+ #define RF_1ST_IBLK_BASE 0x04
+ #define RF_FAILURE_OK 0x08
+ u8 req_stat;
+ u16 vda_req_sz;
+ #define RQ_SIZE_DEFAULT 0
+ u64 lba;
+ RQCALLBK aux_req_cb;
+ void *aux_req_cx;
+ u32 blk_len;
+ u32 max_blk_len;
+ union {
+ struct scsi_cmnd *cmd;
+ u8 *task_management_status_ptr;
+ };
+};
+
+struct esas2r_flash_context {
+ struct esas2r_flash_img *fi;
+ RQCALLBK interrupt_cb;
+ u8 *sgc_offset;
+ u8 *scratch;
+ u32 fi_hdr_len;
+ u8 task;
+ #define FMTSK_ERASE_BOOT 0
+ #define FMTSK_WRTBIOS 1
+ #define FMTSK_READBIOS 2
+ #define FMTSK_WRTMAC 3
+ #define FMTSK_READMAC 4
+ #define FMTSK_WRTEFI 5
+ #define FMTSK_READEFI 6
+ #define FMTSK_WRTCFG 7
+ #define FMTSK_READCFG 8
+ u8 func;
+ u16 num_comps;
+ u32 cmp_len;
+ u32 flsh_addr;
+ u32 curr_len;
+ u8 comp_typ;
+ struct esas2r_sg_context sgc;
+};
+
+struct esas2r_disc_context {
+ u8 disc_evt;
+ #define DCDE_DEV_CHANGE 0x01
+ #define DCDE_DEV_SCAN 0x02
+ u8 state;
+ #define DCS_DEV_RMV 0x00
+ #define DCS_DEV_ADD 0x01
+ #define DCS_BLOCK_DEV_SCAN 0x02
+ #define DCS_RAID_GRP_INFO 0x03
+ #define DCS_PART_INFO 0x04
+ #define DCS_PT_DEV_INFO 0x05
+ #define DCS_PT_DEV_ADDR 0x06
+ #define DCS_DISC_DONE 0xFF
+ u16 flags;
+ #define DCF_DEV_CHANGE 0x0001
+ #define DCF_DEV_SCAN 0x0002
+ #define DCF_POLLED 0x8000
+ u32 interleave;
+ u32 block_size;
+ u16 dev_ix;
+ u8 part_num;
+ u8 raid_grp_ix;
+ char raid_grp_name[16];
+ struct esas2r_target *curr_targ;
+ u16 curr_virt_id;
+ u16 curr_phys_id;
+ u8 scan_gen;
+ u8 dev_addr_type;
+ u64 sas_addr;
+};
+
+struct esas2r_mem_desc {
+ struct list_head next_desc;
+ void *virt_addr;
+ u64 phys_addr;
+ void *pad;
+ void *esas2r_data;
+ u32 esas2r_param;
+ u32 size;
+};
+
+enum fw_event_type {
+ fw_event_null,
+ fw_event_lun_change,
+ fw_event_present,
+ fw_event_not_present,
+ fw_event_vda_ae
+};
+
+struct esas2r_vda_ae {
+ u32 signature;
+#define ESAS2R_VDA_EVENT_SIG 0x4154544F
+ u8 bus_number;
+ u8 devfn;
+ u8 pad[2];
+ union atto_vda_ae vda_ae;
+};
+
+struct esas2r_fw_event_work {
+ struct list_head list;
+ struct delayed_work work;
+ struct esas2r_adapter *a;
+ enum fw_event_type type;
+ u8 data[sizeof(struct esas2r_vda_ae)];
+};
+
+enum state {
+ FW_INVALID_ST,
+ FW_STATUS_ST,
+ FW_COMMAND_ST
+};
+
+struct esas2r_firmware {
+ enum state state;
+ struct esas2r_flash_img header;
+ u8 *data;
+ u64 phys;
+ int orig_len;
+ void *header_buff;
+ u64 header_buff_phys;
+};
+
+struct esas2r_adapter {
+ struct esas2r_target targetdb[ESAS2R_MAX_TARGETS];
+ struct esas2r_target *targetdb_end;
+ unsigned char *regs;
+ unsigned char *data_window;
+ long flags;
+ #define AF_PORT_CHANGE 0
+ #define AF_CHPRST_NEEDED 1
+ #define AF_CHPRST_PENDING 2
+ #define AF_CHPRST_DETECTED 3
+ #define AF_BUSRST_NEEDED 4
+ #define AF_BUSRST_PENDING 5
+ #define AF_BUSRST_DETECTED 6
+ #define AF_DISABLED 7
+ #define AF_FLASH_LOCK 8
+ #define AF_OS_RESET 9
+ #define AF_FLASHING 10
+ #define AF_POWER_MGT 11
+ #define AF_NVR_VALID 12
+ #define AF_DEGRADED_MODE 13
+ #define AF_DISC_PENDING 14
+ #define AF_TASKLET_SCHEDULED 15
+ #define AF_HEARTBEAT 16
+ #define AF_HEARTBEAT_ENB 17
+ #define AF_NOT_PRESENT 18
+ #define AF_CHPRST_STARTED 19
+ #define AF_FIRST_INIT 20
+ #define AF_POWER_DOWN 21
+ #define AF_DISC_IN_PROG 22
+ #define AF_COMM_LIST_TOGGLE 23
+ #define AF_LEGACY_SGE_MODE 24
+ #define AF_DISC_POLLED 25
+ long flags2;
+ #define AF2_SERIAL_FLASH 0
+ #define AF2_DEV_SCAN 1
+ #define AF2_DEV_CNT_OK 2
+ #define AF2_COREDUMP_AVAIL 3
+ #define AF2_COREDUMP_SAVED 4
+ #define AF2_VDA_POWER_DOWN 5
+ #define AF2_THUNDERLINK 6
+ #define AF2_THUNDERBOLT 7
+ #define AF2_INIT_DONE 8
+ #define AF2_INT_PENDING 9
+ #define AF2_TIMER_TICK 10
+ #define AF2_IRQ_CLAIMED 11
+ #define AF2_MSI_ENABLED 12
+ atomic_t disable_cnt;
+ atomic_t dis_ints_cnt;
+ u32 int_stat;
+ u32 int_mask;
+ u32 volatile *outbound_copy;
+ struct list_head avail_request;
+ spinlock_t request_lock;
+ spinlock_t sg_list_lock;
+ spinlock_t queue_lock;
+ spinlock_t mem_lock;
+ struct list_head free_sg_list_head;
+ struct esas2r_mem_desc *sg_list_mds;
+ struct list_head active_list;
+ struct list_head defer_list;
+ struct esas2r_request **req_table;
+ union {
+ u16 prev_dev_cnt;
+ u32 heartbeat_time;
+ #define ESAS2R_HEARTBEAT_TIME (3000)
+ };
+ u32 chip_uptime;
+ #define ESAS2R_CHP_UPTIME_MAX (60000)
+ #define ESAS2R_CHP_UPTIME_CNT (20000)
+ u64 uncached_phys;
+ u8 *uncached;
+ struct esas2r_sas_nvram *nvram;
+ struct esas2r_request general_req;
+ u8 init_msg;
+ #define ESAS2R_INIT_MSG_START 1
+ #define ESAS2R_INIT_MSG_INIT 2
+ #define ESAS2R_INIT_MSG_GET_INIT 3
+ #define ESAS2R_INIT_MSG_REINIT 4
+ u16 cmd_ref_no;
+ u32 fw_version;
+ u32 fw_build;
+ u32 chip_init_time;
+ #define ESAS2R_CHPRST_TIME (180000)
+ #define ESAS2R_CHPRST_WAIT_TIME (2000)
+ u32 last_tick_time;
+ u32 window_base;
+ RQBUILDSGL build_sgl;
+ struct esas2r_request *first_ae_req;
+ u32 list_size;
+ u32 last_write;
+ u32 last_read;
+ u16 max_vdareq_size;
+ u16 disc_wait_cnt;
+ struct esas2r_mem_desc inbound_list_md;
+ struct esas2r_mem_desc outbound_list_md;
+ struct esas2r_disc_context disc_ctx;
+ u8 *disc_buffer;
+ u32 disc_start_time;
+ u32 disc_wait_time;
+ u32 flash_ver;
+ char flash_rev[16];
+ char fw_rev[16];
+ char image_type[16];
+ struct esas2r_flash_context flash_context;
+ u32 num_targets_backend;
+ u32 ioctl_tunnel;
+ struct tasklet_struct tasklet;
+ struct pci_dev *pcid;
+ struct Scsi_Host *host;
+ unsigned int index;
+ char name[32];
+ struct timer_list timer;
+ struct esas2r_firmware firmware;
+ wait_queue_head_t nvram_waiter;
+ int nvram_command_done;
+ wait_queue_head_t fm_api_waiter;
+ int fm_api_command_done;
+ wait_queue_head_t vda_waiter;
+ int vda_command_done;
+ u8 *vda_buffer;
+ u64 ppvda_buffer;
+#define VDA_BUFFER_HEADER_SZ (offsetof(struct atto_ioctl_vda, data))
+#define VDA_MAX_BUFFER_SIZE (0x40000 + VDA_BUFFER_HEADER_SZ)
+ wait_queue_head_t fs_api_waiter;
+ int fs_api_command_done;
+ u64 ppfs_api_buffer;
+ u8 *fs_api_buffer;
+ u32 fs_api_buffer_size;
+ wait_queue_head_t buffered_ioctl_waiter;
+ int buffered_ioctl_done;
+ int uncached_size;
+ struct workqueue_struct *fw_event_q;
+ struct list_head fw_event_list;
+ spinlock_t fw_event_lock;
+ u8 fw_events_off; /* if '1', then ignore events */
+ char fw_event_q_name[ESAS2R_KOBJ_NAME_LEN];
+ /*
+ * intr_mode stores the interrupt mode currently being used by this
+ * adapter. it is based on the interrupt_mode module parameter, but
+ * can be changed based on the ability (or not) to utilize the
+ * mode requested by the parameter.
+ */
+ int intr_mode;
+#define INTR_MODE_LEGACY 0
+#define INTR_MODE_MSI 1
+#define INTR_MODE_MSIX 2
+ struct esas2r_sg_context fm_api_sgc;
+ u8 *save_offset;
+ struct list_head vrq_mds_head;
+ struct esas2r_mem_desc *vrq_mds;
+ int num_vrqs;
+ struct semaphore fm_api_semaphore;
+ struct semaphore fs_api_semaphore;
+ struct semaphore nvram_semaphore;
+ struct atto_ioctl *local_atto_ioctl;
+ u8 fw_coredump_buff[ESAS2R_FWCOREDUMP_SZ];
+ unsigned int sysfs_fw_created:1;
+ unsigned int sysfs_fs_created:1;
+ unsigned int sysfs_vda_created:1;
+ unsigned int sysfs_hw_created:1;
+ unsigned int sysfs_live_nvram_created:1;
+ unsigned int sysfs_default_nvram_created:1;
+};
+
+/*
+ * Function Declarations
+ * SCSI functions
+ */
+int esas2r_release(struct Scsi_Host *);
+const char *esas2r_info(struct Scsi_Host *);
+int esas2r_write_params(struct esas2r_adapter *a, struct esas2r_request *rq,
+ struct esas2r_sas_nvram *data);
+int esas2r_ioctl_handler(void *hostdata, int cmd, void __user *arg);
+int esas2r_ioctl(struct scsi_device *dev, int cmd, void __user *arg);
+u8 handle_hba_ioctl(struct esas2r_adapter *a,
+ struct atto_ioctl *ioctl_hba);
+int esas2r_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *cmd);
+int esas2r_show_info(struct seq_file *m, struct Scsi_Host *sh);
+long esas2r_proc_ioctl(struct file *fp, unsigned int cmd, unsigned long arg);
+
+/* SCSI error handler (eh) functions */
+int esas2r_eh_abort(struct scsi_cmnd *cmd);
+int esas2r_device_reset(struct scsi_cmnd *cmd);
+int esas2r_host_reset(struct scsi_cmnd *cmd);
+int esas2r_bus_reset(struct scsi_cmnd *cmd);
+int esas2r_target_reset(struct scsi_cmnd *cmd);
+
+/* Internal functions */
+int esas2r_init_adapter(struct Scsi_Host *host, struct pci_dev *pcid,
+ int index);
+int esas2r_cleanup(struct Scsi_Host *host);
+int esas2r_read_fw(struct esas2r_adapter *a, char *buf, long off, int count);
+int esas2r_write_fw(struct esas2r_adapter *a, const char *buf, long off,
+ int count);
+int esas2r_read_vda(struct esas2r_adapter *a, char *buf, long off, int count);
+int esas2r_write_vda(struct esas2r_adapter *a, const char *buf, long off,
+ int count);
+int esas2r_read_fs(struct esas2r_adapter *a, char *buf, long off, int count);
+int esas2r_write_fs(struct esas2r_adapter *a, const char *buf, long off,
+ int count);
+void esas2r_adapter_tasklet(unsigned long context);
+irqreturn_t esas2r_interrupt(int irq, void *dev_id);
+irqreturn_t esas2r_msi_interrupt(int irq, void *dev_id);
+void esas2r_kickoff_timer(struct esas2r_adapter *a);
+int esas2r_suspend(struct pci_dev *pcid, pm_message_t state);
+int esas2r_resume(struct pci_dev *pcid);
+void esas2r_fw_event_off(struct esas2r_adapter *a);
+void esas2r_fw_event_on(struct esas2r_adapter *a);
+bool esas2r_nvram_write(struct esas2r_adapter *a, struct esas2r_request *rq,
+ struct esas2r_sas_nvram *nvram);
+void esas2r_nvram_get_defaults(struct esas2r_adapter *a,
+ struct esas2r_sas_nvram *nvram);
+void esas2r_complete_request_cb(struct esas2r_adapter *a,
+ struct esas2r_request *rq);
+void esas2r_reset_detected(struct esas2r_adapter *a);
+void esas2r_target_state_changed(struct esas2r_adapter *ha, u16 targ_id,
+ u8 state);
+int esas2r_req_status_to_error(u8 req_stat);
+void esas2r_kill_adapter(int i);
+void esas2r_free_request(struct esas2r_adapter *a, struct esas2r_request *rq);
+struct esas2r_request *esas2r_alloc_request(struct esas2r_adapter *a);
+u32 esas2r_get_uncached_size(struct esas2r_adapter *a);
+bool esas2r_init_adapter_struct(struct esas2r_adapter *a,
+ void **uncached_area);
+bool esas2r_check_adapter(struct esas2r_adapter *a);
+bool esas2r_init_adapter_hw(struct esas2r_adapter *a, bool init_poll);
+void esas2r_start_request(struct esas2r_adapter *a, struct esas2r_request *rq);
+bool esas2r_send_task_mgmt(struct esas2r_adapter *a,
+ struct esas2r_request *rqaux, u8 task_mgt_func);
+void esas2r_do_tasklet_tasks(struct esas2r_adapter *a);
+void esas2r_adapter_interrupt(struct esas2r_adapter *a);
+void esas2r_do_deferred_processes(struct esas2r_adapter *a);
+void esas2r_reset_bus(struct esas2r_adapter *a);
+void esas2r_reset_adapter(struct esas2r_adapter *a);
+void esas2r_timer_tick(struct esas2r_adapter *a);
+const char *esas2r_get_model_name(struct esas2r_adapter *a);
+const char *esas2r_get_model_name_short(struct esas2r_adapter *a);
+u32 esas2r_stall_execution(struct esas2r_adapter *a, u32 start_time,
+ u32 *delay);
+void esas2r_build_flash_req(struct esas2r_adapter *a,
+ struct esas2r_request *rq,
+ u8 sub_func,
+ u8 cksum,
+ u32 addr,
+ u32 length);
+void esas2r_build_mgt_req(struct esas2r_adapter *a,
+ struct esas2r_request *rq,
+ u8 sub_func,
+ u8 scan_gen,
+ u16 dev_index,
+ u32 length,
+ void *data);
+void esas2r_build_ae_req(struct esas2r_adapter *a, struct esas2r_request *rq);
+void esas2r_build_cli_req(struct esas2r_adapter *a,
+ struct esas2r_request *rq,
+ u32 length,
+ u32 cmd_rsp_len);
+void esas2r_build_ioctl_req(struct esas2r_adapter *a,
+ struct esas2r_request *rq,
+ u32 length,
+ u8 sub_func);
+void esas2r_build_cfg_req(struct esas2r_adapter *a,
+ struct esas2r_request *rq,
+ u8 sub_func,
+ u32 length,
+ void *data);
+void esas2r_power_down(struct esas2r_adapter *a);
+bool esas2r_power_up(struct esas2r_adapter *a, bool init_poll);
+void esas2r_wait_request(struct esas2r_adapter *a, struct esas2r_request *rq);
+u32 esas2r_map_data_window(struct esas2r_adapter *a, u32 addr_lo);
+bool esas2r_process_fs_ioctl(struct esas2r_adapter *a,
+ struct esas2r_ioctl_fs *fs,
+ struct esas2r_request *rq,
+ struct esas2r_sg_context *sgc);
+bool esas2r_read_flash_block(struct esas2r_adapter *a, void *to, u32 from,
+ u32 size);
+bool esas2r_read_mem_block(struct esas2r_adapter *a, void *to, u32 from,
+ u32 size);
+bool esas2r_fm_api(struct esas2r_adapter *a, struct esas2r_flash_img *fi,
+ struct esas2r_request *rq, struct esas2r_sg_context *sgc);
+void esas2r_force_interrupt(struct esas2r_adapter *a);
+void esas2r_local_start_request(struct esas2r_adapter *a,
+ struct esas2r_request *rq);
+void esas2r_process_adapter_reset(struct esas2r_adapter *a);
+void esas2r_complete_request(struct esas2r_adapter *a,
+ struct esas2r_request *rq);
+void esas2r_dummy_complete(struct esas2r_adapter *a,
+ struct esas2r_request *rq);
+void esas2r_ae_complete(struct esas2r_adapter *a, struct esas2r_request *rq);
+void esas2r_start_vda_request(struct esas2r_adapter *a,
+ struct esas2r_request *rq);
+bool esas2r_read_flash_rev(struct esas2r_adapter *a);
+bool esas2r_read_image_type(struct esas2r_adapter *a);
+bool esas2r_nvram_read_direct(struct esas2r_adapter *a);
+bool esas2r_nvram_validate(struct esas2r_adapter *a);
+void esas2r_nvram_set_defaults(struct esas2r_adapter *a);
+bool esas2r_print_flash_rev(struct esas2r_adapter *a);
+void esas2r_send_reset_ae(struct esas2r_adapter *a, bool pwr_mgt);
+bool esas2r_init_msgs(struct esas2r_adapter *a);
+bool esas2r_is_adapter_present(struct esas2r_adapter *a);
+void esas2r_nuxi_mgt_data(u8 function, void *data);
+void esas2r_nuxi_cfg_data(u8 function, void *data);
+void esas2r_nuxi_ae_data(union atto_vda_ae *ae);
+void esas2r_reset_chip(struct esas2r_adapter *a);
+void esas2r_log_request_failure(struct esas2r_adapter *a,
+ struct esas2r_request *rq);
+void esas2r_polled_interrupt(struct esas2r_adapter *a);
+bool esas2r_ioreq_aborted(struct esas2r_adapter *a, struct esas2r_request *rq,
+ u8 status);
+bool esas2r_build_sg_list_sge(struct esas2r_adapter *a,
+ struct esas2r_sg_context *sgc);
+bool esas2r_build_sg_list_prd(struct esas2r_adapter *a,
+ struct esas2r_sg_context *sgc);
+void esas2r_targ_db_initialize(struct esas2r_adapter *a);
+void esas2r_targ_db_remove_all(struct esas2r_adapter *a, bool notify);
+void esas2r_targ_db_report_changes(struct esas2r_adapter *a);
+struct esas2r_target *esas2r_targ_db_add_raid(struct esas2r_adapter *a,
+ struct esas2r_disc_context *dc);
+struct esas2r_target *esas2r_targ_db_add_pthru(struct esas2r_adapter *a,
+ struct esas2r_disc_context *dc,
+ u8 *ident,
+ u8 ident_len);
+void esas2r_targ_db_remove(struct esas2r_adapter *a, struct esas2r_target *t);
+struct esas2r_target *esas2r_targ_db_find_by_sas_addr(struct esas2r_adapter *a,
+ u64 *sas_addr);
+struct esas2r_target *esas2r_targ_db_find_by_ident(struct esas2r_adapter *a,
+ void *identifier,
+ u8 ident_len);
+u16 esas2r_targ_db_find_next_present(struct esas2r_adapter *a, u16 target_id);
+struct esas2r_target *esas2r_targ_db_find_by_virt_id(struct esas2r_adapter *a,
+ u16 virt_id);
+u16 esas2r_targ_db_get_tgt_cnt(struct esas2r_adapter *a);
+void esas2r_disc_initialize(struct esas2r_adapter *a);
+void esas2r_disc_start_waiting(struct esas2r_adapter *a);
+void esas2r_disc_check_for_work(struct esas2r_adapter *a);
+void esas2r_disc_check_complete(struct esas2r_adapter *a);
+void esas2r_disc_queue_event(struct esas2r_adapter *a, u8 disc_evt);
+bool esas2r_disc_start_port(struct esas2r_adapter *a);
+void esas2r_disc_local_start_request(struct esas2r_adapter *a,
+ struct esas2r_request *rq);
+bool esas2r_set_degraded_mode(struct esas2r_adapter *a, char *error_str);
+bool esas2r_process_vda_ioctl(struct esas2r_adapter *a,
+ struct atto_ioctl_vda *vi,
+ struct esas2r_request *rq,
+ struct esas2r_sg_context *sgc);
+void esas2r_queue_fw_event(struct esas2r_adapter *a,
+ enum fw_event_type type,
+ void *data,
+ int data_sz);
+
+/* Inline functions */
+
+/* Allocate a chip scatter/gather list entry */
+static inline struct esas2r_mem_desc *esas2r_alloc_sgl(struct esas2r_adapter *a)
+{
+ unsigned long flags;
+ struct list_head *sgl;
+ struct esas2r_mem_desc *result = NULL;
+
+ spin_lock_irqsave(&a->sg_list_lock, flags);
+ if (likely(!list_empty(&a->free_sg_list_head))) {
+ sgl = a->free_sg_list_head.next;
+ result = list_entry(sgl, struct esas2r_mem_desc, next_desc);
+ list_del_init(sgl);
+ }
+ spin_unlock_irqrestore(&a->sg_list_lock, flags);
+
+ return result;
+}
+
+/* Initialize a scatter/gather context */
+static inline void esas2r_sgc_init(struct esas2r_sg_context *sgc,
+ struct esas2r_adapter *a,
+ struct esas2r_request *rq,
+ struct atto_vda_sge *first)
+{
+ sgc->adapter = a;
+ sgc->first_req = rq;
+
+ /*
+ * set the limit pointer such that an SGE pointer above this value
+ * would be the first one to overflow the SGL.
+ */
+ sgc->sge.a64.limit = (struct atto_vda_sge *)((u8 *)rq->vrq
+ + (sizeof(union
+ atto_vda_req) /
+ 8)
+ - sizeof(struct
+ atto_vda_sge));
+ if (first) {
+ sgc->sge.a64.last =
+ sgc->sge.a64.curr = first;
+ rq->vrq->scsi.sg_list_offset = (u8)
+ ((u8 *)first -
+ (u8 *)rq->vrq);
+ } else {
+ sgc->sge.a64.last =
+ sgc->sge.a64.curr = &rq->vrq->scsi.u.sge[0];
+ rq->vrq->scsi.sg_list_offset =
+ (u8)offsetof(struct atto_vda_scsi_req, u.sge);
+ }
+ sgc->sge.a64.chain = NULL;
+}
+
+static inline void esas2r_rq_init_request(struct esas2r_request *rq,
+ struct esas2r_adapter *a)
+{
+ union atto_vda_req *vrq = rq->vrq;
+
+ INIT_LIST_HEAD(&rq->sg_table_head);
+ rq->data_buf = (void *)(vrq + 1);
+ rq->interrupt_cb = NULL;
+ rq->comp_cb = esas2r_complete_request_cb;
+ rq->flags = 0;
+ rq->timeout = 0;
+ rq->req_stat = RS_PENDING;
+ rq->req_type = RT_INI_REQ;
+
+ /* clear the outbound response */
+ rq->func_rsp.dwords[0] = 0;
+ rq->func_rsp.dwords[1] = 0;
+
+ /*
+ * clear the size of the VDA request. esas2r_build_sg_list() will
+ * only allow the size of the request to grow. there are some
+ * management requests that go through there twice and the second
+ * time through sets a smaller request size. if this is not modified
+ * at all we'll set it to the size of the entire VDA request.
+ */
+ rq->vda_req_sz = RQ_SIZE_DEFAULT;
+
+ /* req_table entry should be NULL at this point - if not, halt */
+
+ if (a->req_table[LOWORD(vrq->scsi.handle)])
+ esas2r_bugon();
+
+ /* fill in the table for this handle so we can get back to the
+ * request.
+ */
+ a->req_table[LOWORD(vrq->scsi.handle)] = rq;
+
+ /*
+ * add a reference number to the handle to make it unique (until it
+ * wraps of course) while preserving the least significant word
+ */
+ vrq->scsi.handle = (a->cmd_ref_no++ << 16) | (u16)vrq->scsi.handle;
+
+ /*
+ * the following formats a SCSI request. the caller can override as
+ * necessary. clear_vda_request can be called to clear the VDA
+ * request for another type of request.
+ */
+ vrq->scsi.function = VDA_FUNC_SCSI;
+ vrq->scsi.sense_len = SENSE_DATA_SZ;
+
+ /* clear out sg_list_offset and chain_offset */
+ vrq->scsi.sg_list_offset = 0;
+ vrq->scsi.chain_offset = 0;
+ vrq->scsi.flags = 0;
+ vrq->scsi.reserved = 0;
+
+ /* set the sense buffer to be the data payload buffer */
+ vrq->scsi.ppsense_buf
+ = cpu_to_le64(rq->vrq_md->phys_addr +
+ sizeof(union atto_vda_req));
+}
+
+static inline void esas2r_rq_free_sg_lists(struct esas2r_request *rq,
+ struct esas2r_adapter *a)
+{
+ unsigned long flags;
+
+ if (list_empty(&rq->sg_table_head))
+ return;
+
+ spin_lock_irqsave(&a->sg_list_lock, flags);
+ list_splice_tail_init(&rq->sg_table_head, &a->free_sg_list_head);
+ spin_unlock_irqrestore(&a->sg_list_lock, flags);
+}
+
+static inline void esas2r_rq_destroy_request(struct esas2r_request *rq,
+ struct esas2r_adapter *a)
+
+{
+ esas2r_rq_free_sg_lists(rq, a);
+ a->req_table[LOWORD(rq->vrq->scsi.handle)] = NULL;
+ rq->data_buf = NULL;
+}
+
+static inline bool esas2r_is_tasklet_pending(struct esas2r_adapter *a)
+{
+
+ return test_bit(AF_BUSRST_NEEDED, &a->flags) ||
+ test_bit(AF_BUSRST_DETECTED, &a->flags) ||
+ test_bit(AF_CHPRST_NEEDED, &a->flags) ||
+ test_bit(AF_CHPRST_DETECTED, &a->flags) ||
+ test_bit(AF_PORT_CHANGE, &a->flags);
+
+}
+
+/*
+ * Build the scatter/gather list for an I/O request according to the
+ * specifications placed in the esas2r_sg_context. The caller must initialize
+ * struct esas2r_sg_context prior to the initial call by calling
+ * esas2r_sgc_init()
+ */
+static inline bool esas2r_build_sg_list(struct esas2r_adapter *a,
+ struct esas2r_request *rq,
+ struct esas2r_sg_context *sgc)
+{
+ if (unlikely(le32_to_cpu(rq->vrq->scsi.length) == 0))
+ return true;
+
+ return (*a->build_sgl)(a, sgc);
+}
+
+static inline void esas2r_disable_chip_interrupts(struct esas2r_adapter *a)
+{
+ if (atomic_inc_return(&a->dis_ints_cnt) == 1)
+ esas2r_write_register_dword(a, MU_INT_MASK_OUT,
+ ESAS2R_INT_DIS_MASK);
+}
+
+static inline void esas2r_enable_chip_interrupts(struct esas2r_adapter *a)
+{
+ if (atomic_dec_return(&a->dis_ints_cnt) == 0)
+ esas2r_write_register_dword(a, MU_INT_MASK_OUT,
+ ESAS2R_INT_ENB_MASK);
+}
+
+/* Schedule a TASKLET to perform non-interrupt tasks that may require delays
+ * or long completion times.
+ */
+static inline void esas2r_schedule_tasklet(struct esas2r_adapter *a)
+{
+ /* make sure we don't schedule twice */
+ if (!test_and_set_bit(AF_TASKLET_SCHEDULED, &a->flags))
+ tasklet_hi_schedule(&a->tasklet);
+}
+
+static inline void esas2r_enable_heartbeat(struct esas2r_adapter *a)
+{
+ if (!test_bit(AF_DEGRADED_MODE, &a->flags) &&
+ !test_bit(AF_CHPRST_PENDING, &a->flags) &&
+ (a->nvram->options2 & SASNVR2_HEARTBEAT))
+ set_bit(AF_HEARTBEAT_ENB, &a->flags);
+ else
+ clear_bit(AF_HEARTBEAT_ENB, &a->flags);
+}
+
+static inline void esas2r_disable_heartbeat(struct esas2r_adapter *a)
+{
+ clear_bit(AF_HEARTBEAT_ENB, &a->flags);
+ clear_bit(AF_HEARTBEAT, &a->flags);
+}
+
+/* Set the initial state for resetting the adapter on the next pass through
+ * esas2r_do_deferred.
+ */
+static inline void esas2r_local_reset_adapter(struct esas2r_adapter *a)
+{
+ esas2r_disable_heartbeat(a);
+
+ set_bit(AF_CHPRST_NEEDED, &a->flags);
+ set_bit(AF_CHPRST_PENDING, &a->flags);
+ set_bit(AF_DISC_PENDING, &a->flags);
+}
+
+/* See if an interrupt is pending on the adapter. */
+static inline bool esas2r_adapter_interrupt_pending(struct esas2r_adapter *a)
+{
+ u32 intstat;
+
+ if (a->int_mask == 0)
+ return false;
+
+ intstat = esas2r_read_register_dword(a, MU_INT_STATUS_OUT);
+
+ if ((intstat & a->int_mask) == 0)
+ return false;
+
+ esas2r_disable_chip_interrupts(a);
+
+ a->int_stat = intstat;
+ a->int_mask = 0;
+
+ return true;
+}
+
+static inline u16 esas2r_targ_get_id(struct esas2r_target *t,
+ struct esas2r_adapter *a)
+{
+ return (u16)(uintptr_t)(t - a->targetdb);
+}
+
+/* Build and start an asynchronous event request */
+static inline void esas2r_start_ae_request(struct esas2r_adapter *a,
+ struct esas2r_request *rq)
+{
+ unsigned long flags;
+
+ esas2r_build_ae_req(a, rq);
+
+ spin_lock_irqsave(&a->queue_lock, flags);
+ esas2r_start_vda_request(a, rq);
+ spin_unlock_irqrestore(&a->queue_lock, flags);
+}
+
+static inline void esas2r_comp_list_drain(struct esas2r_adapter *a,
+ struct list_head *comp_list)
+{
+ struct esas2r_request *rq;
+ struct list_head *element, *next;
+
+ list_for_each_safe(element, next, comp_list) {
+ rq = list_entry(element, struct esas2r_request, comp_list);
+ list_del_init(element);
+ esas2r_complete_request(a, rq);
+ }
+}
+
+/* sysfs handlers */
+extern struct bin_attribute bin_attr_fw;
+extern struct bin_attribute bin_attr_fs;
+extern struct bin_attribute bin_attr_vda;
+extern struct bin_attribute bin_attr_hw;
+extern struct bin_attribute bin_attr_live_nvram;
+extern struct bin_attribute bin_attr_default_nvram;
+
+#endif /* ESAS2R_H */
diff --git a/drivers/scsi/esas2r/esas2r_disc.c b/drivers/scsi/esas2r/esas2r_disc.c
new file mode 100644
index 000000000..1c079f430
--- /dev/null
+++ b/drivers/scsi/esas2r/esas2r_disc.c
@@ -0,0 +1,1184 @@
+/*
+ * linux/drivers/scsi/esas2r/esas2r_disc.c
+ * esas2r device discovery routines
+ *
+ * Copyright (c) 2001-2013 ATTO Technology, Inc.
+ * (mailto:linuxdrivers@attotech.com)
+ */
+/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
+/*
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * NO WARRANTY
+ * THE PROGRAM IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OR
+ * CONDITIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED INCLUDING, WITHOUT
+ * LIMITATION, ANY WARRANTIES OR CONDITIONS OF TITLE, NON-INFRINGEMENT,
+ * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE. Each Recipient is
+ * solely responsible for determining the appropriateness of using and
+ * distributing the Program and assumes all risks associated with its
+ * exercise of rights under this Agreement, including but not limited to
+ * the risks and costs of program errors, damage to or loss of data,
+ * programs or equipment, and unavailability or interruption of operations.
+ *
+ * DISCLAIMER OF LIABILITY
+ * NEITHER RECIPIENT NOR ANY CONTRIBUTORS SHALL HAVE ANY LIABILITY FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING WITHOUT LIMITATION LOST PROFITS), HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR
+ * TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
+ * USE OR DISTRIBUTION OF THE PROGRAM OR THE EXERCISE OF ANY RIGHTS GRANTED
+ * HEREUNDER, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGES
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
+
+#include "esas2r.h"
+
+/* Miscellaneous internal discovery routines */
+static void esas2r_disc_abort(struct esas2r_adapter *a,
+ struct esas2r_request *rq);
+static bool esas2r_disc_continue(struct esas2r_adapter *a,
+ struct esas2r_request *rq);
+static void esas2r_disc_fix_curr_requests(struct esas2r_adapter *a);
+static u32 esas2r_disc_get_phys_addr(struct esas2r_sg_context *sgc, u64 *addr);
+static bool esas2r_disc_start_request(struct esas2r_adapter *a,
+ struct esas2r_request *rq);
+
+/* Internal discovery routines that process the states */
+static bool esas2r_disc_block_dev_scan(struct esas2r_adapter *a,
+ struct esas2r_request *rq);
+static void esas2r_disc_block_dev_scan_cb(struct esas2r_adapter *a,
+ struct esas2r_request *rq);
+static bool esas2r_disc_dev_add(struct esas2r_adapter *a,
+ struct esas2r_request *rq);
+static bool esas2r_disc_dev_remove(struct esas2r_adapter *a,
+ struct esas2r_request *rq);
+static bool esas2r_disc_part_info(struct esas2r_adapter *a,
+ struct esas2r_request *rq);
+static void esas2r_disc_part_info_cb(struct esas2r_adapter *a,
+ struct esas2r_request *rq);
+static bool esas2r_disc_passthru_dev_info(struct esas2r_adapter *a,
+ struct esas2r_request *rq);
+static void esas2r_disc_passthru_dev_info_cb(struct esas2r_adapter *a,
+ struct esas2r_request *rq);
+static bool esas2r_disc_passthru_dev_addr(struct esas2r_adapter *a,
+ struct esas2r_request *rq);
+static void esas2r_disc_passthru_dev_addr_cb(struct esas2r_adapter *a,
+ struct esas2r_request *rq);
+static bool esas2r_disc_raid_grp_info(struct esas2r_adapter *a,
+ struct esas2r_request *rq);
+static void esas2r_disc_raid_grp_info_cb(struct esas2r_adapter *a,
+ struct esas2r_request *rq);
+
+void esas2r_disc_initialize(struct esas2r_adapter *a)
+{
+ struct esas2r_sas_nvram *nvr = a->nvram;
+
+ esas2r_trace_enter();
+
+ clear_bit(AF_DISC_IN_PROG, &a->flags);
+ clear_bit(AF2_DEV_SCAN, &a->flags2);
+ clear_bit(AF2_DEV_CNT_OK, &a->flags2);
+
+ a->disc_start_time = jiffies_to_msecs(jiffies);
+ a->disc_wait_time = nvr->dev_wait_time * 1000;
+ a->disc_wait_cnt = nvr->dev_wait_count;
+
+ if (a->disc_wait_cnt > ESAS2R_MAX_TARGETS)
+ a->disc_wait_cnt = ESAS2R_MAX_TARGETS;
+
+ /*
+ * If we are doing chip reset or power management processing, always
+ * wait for devices. use the NVRAM device count if it is greater than
+ * previously discovered devices.
+ */
+
+ esas2r_hdebug("starting discovery...");
+
+ a->general_req.interrupt_cx = NULL;
+
+ if (test_bit(AF_CHPRST_DETECTED, &a->flags) ||
+ test_bit(AF_POWER_MGT, &a->flags)) {
+ if (a->prev_dev_cnt == 0) {
+ /* Don't bother waiting if there is nothing to wait
+ * for.
+ */
+ a->disc_wait_time = 0;
+ } else {
+ /*
+ * Set the device wait count to what was previously
+ * found. We don't care if the user only configured
+ * a time because we know the exact count to wait for.
+ * There is no need to honor the user's wishes to
+ * always wait the full time.
+ */
+ a->disc_wait_cnt = a->prev_dev_cnt;
+
+ /*
+ * bump the minimum wait time to 15 seconds since the
+ * default is 3 (system boot or the boot driver usually
+ * buys us more time).
+ */
+ if (a->disc_wait_time < 15000)
+ a->disc_wait_time = 15000;
+ }
+ }
+
+ esas2r_trace("disc wait count: %d", a->disc_wait_cnt);
+ esas2r_trace("disc wait time: %d", a->disc_wait_time);
+
+ if (a->disc_wait_time == 0)
+ esas2r_disc_check_complete(a);
+
+ esas2r_trace_exit();
+}
+
+void esas2r_disc_start_waiting(struct esas2r_adapter *a)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&a->mem_lock, flags);
+
+ if (a->disc_ctx.disc_evt)
+ esas2r_disc_start_port(a);
+
+ spin_unlock_irqrestore(&a->mem_lock, flags);
+}
+
+void esas2r_disc_check_for_work(struct esas2r_adapter *a)
+{
+ struct esas2r_request *rq = &a->general_req;
+
+ /* service any pending interrupts first */
+
+ esas2r_polled_interrupt(a);
+
+ /*
+ * now, interrupt processing may have queued up a discovery event. go
+ * see if we have one to start. we couldn't start it in the ISR since
+ * polled discovery would cause a deadlock.
+ */
+
+ esas2r_disc_start_waiting(a);
+
+ if (rq->interrupt_cx == NULL)
+ return;
+
+ if (rq->req_stat == RS_STARTED
+ && rq->timeout <= RQ_MAX_TIMEOUT) {
+ /* wait for the current discovery request to complete. */
+ esas2r_wait_request(a, rq);
+
+ if (rq->req_stat == RS_TIMEOUT) {
+ esas2r_disc_abort(a, rq);
+ esas2r_local_reset_adapter(a);
+ return;
+ }
+ }
+
+ if (rq->req_stat == RS_PENDING
+ || rq->req_stat == RS_STARTED)
+ return;
+
+ esas2r_disc_continue(a, rq);
+}
+
+void esas2r_disc_check_complete(struct esas2r_adapter *a)
+{
+ unsigned long flags;
+
+ esas2r_trace_enter();
+
+ /* check to see if we should be waiting for devices */
+ if (a->disc_wait_time) {
+ u32 currtime = jiffies_to_msecs(jiffies);
+ u32 time = currtime - a->disc_start_time;
+
+ /*
+ * Wait until the device wait time is exhausted or the device
+ * wait count is satisfied.
+ */
+ if (time < a->disc_wait_time
+ && (esas2r_targ_db_get_tgt_cnt(a) < a->disc_wait_cnt
+ || a->disc_wait_cnt == 0)) {
+ /* After three seconds of waiting, schedule a scan. */
+ if (time >= 3000
+ && !test_and_set_bit(AF2_DEV_SCAN, &a->flags2)) {
+ spin_lock_irqsave(&a->mem_lock, flags);
+ esas2r_disc_queue_event(a, DCDE_DEV_SCAN);
+ spin_unlock_irqrestore(&a->mem_lock, flags);
+ }
+
+ esas2r_trace_exit();
+ return;
+ }
+
+ /*
+ * We are done waiting...we think. Adjust the wait time to
+ * consume events after the count is met.
+ */
+ if (!test_and_set_bit(AF2_DEV_CNT_OK, &a->flags2))
+ a->disc_wait_time = time + 3000;
+
+ /* If we haven't done a full scan yet, do it now. */
+ if (!test_and_set_bit(AF2_DEV_SCAN, &a->flags2)) {
+ spin_lock_irqsave(&a->mem_lock, flags);
+ esas2r_disc_queue_event(a, DCDE_DEV_SCAN);
+ spin_unlock_irqrestore(&a->mem_lock, flags);
+ esas2r_trace_exit();
+ return;
+ }
+
+ /*
+ * Now, if there is still time left to consume events, continue
+ * waiting.
+ */
+ if (time < a->disc_wait_time) {
+ esas2r_trace_exit();
+ return;
+ }
+ } else {
+ if (!test_and_set_bit(AF2_DEV_SCAN, &a->flags2)) {
+ spin_lock_irqsave(&a->mem_lock, flags);
+ esas2r_disc_queue_event(a, DCDE_DEV_SCAN);
+ spin_unlock_irqrestore(&a->mem_lock, flags);
+ }
+ }
+
+ /* We want to stop waiting for devices. */
+ a->disc_wait_time = 0;
+
+ if (test_bit(AF_DISC_POLLED, &a->flags) &&
+ test_bit(AF_DISC_IN_PROG, &a->flags)) {
+ /*
+ * Polled discovery is still pending so continue the active
+ * discovery until it is done. At that point, we will stop
+ * polled discovery and transition to interrupt driven
+ * discovery.
+ */
+ } else {
+ /*
+ * Done waiting for devices. Note that we get here immediately
+ * after deferred waiting completes because that is interrupt
+ * driven; i.e. There is no transition.
+ */
+ esas2r_disc_fix_curr_requests(a);
+ clear_bit(AF_DISC_PENDING, &a->flags);
+
+ /*
+ * We have deferred target state changes until now because we
+ * don't want to report any removals (due to the first arrival)
+ * until the device wait time expires.
+ */
+ set_bit(AF_PORT_CHANGE, &a->flags);
+ }
+
+ esas2r_trace_exit();
+}
+
+void esas2r_disc_queue_event(struct esas2r_adapter *a, u8 disc_evt)
+{
+ struct esas2r_disc_context *dc = &a->disc_ctx;
+
+ esas2r_trace_enter();
+
+ esas2r_trace("disc_event: %d", disc_evt);
+
+ /* Initialize the discovery context */
+ dc->disc_evt |= disc_evt;
+
+ /*
+ * Don't start discovery before or during polled discovery. if we did,
+ * we would have a deadlock if we are in the ISR already.
+ */
+ if (!test_bit(AF_CHPRST_PENDING, &a->flags) &&
+ !test_bit(AF_DISC_POLLED, &a->flags))
+ esas2r_disc_start_port(a);
+
+ esas2r_trace_exit();
+}
+
+bool esas2r_disc_start_port(struct esas2r_adapter *a)
+{
+ struct esas2r_request *rq = &a->general_req;
+ struct esas2r_disc_context *dc = &a->disc_ctx;
+ bool ret;
+
+ esas2r_trace_enter();
+
+ if (test_bit(AF_DISC_IN_PROG, &a->flags)) {
+ esas2r_trace_exit();
+
+ return false;
+ }
+
+ /* If there is a discovery waiting, process it. */
+ if (dc->disc_evt) {
+ if (test_bit(AF_DISC_POLLED, &a->flags)
+ && a->disc_wait_time == 0) {
+ /*
+ * We are doing polled discovery, but we no longer want
+ * to wait for devices. Stop polled discovery and
+ * transition to interrupt driven discovery.
+ */
+
+ esas2r_trace_exit();
+
+ return false;
+ }
+ } else {
+ /* Discovery is complete. */
+
+ esas2r_hdebug("disc done");
+
+ set_bit(AF_PORT_CHANGE, &a->flags);
+
+ esas2r_trace_exit();
+
+ return false;
+ }
+
+ /* Handle the discovery context */
+ esas2r_trace("disc_evt: %d", dc->disc_evt);
+ set_bit(AF_DISC_IN_PROG, &a->flags);
+ dc->flags = 0;
+
+ if (test_bit(AF_DISC_POLLED, &a->flags))
+ dc->flags |= DCF_POLLED;
+
+ rq->interrupt_cx = dc;
+ rq->req_stat = RS_SUCCESS;
+
+ /* Decode the event code */
+ if (dc->disc_evt & DCDE_DEV_SCAN) {
+ dc->disc_evt &= ~DCDE_DEV_SCAN;
+
+ dc->flags |= DCF_DEV_SCAN;
+ dc->state = DCS_BLOCK_DEV_SCAN;
+ } else if (dc->disc_evt & DCDE_DEV_CHANGE) {
+ dc->disc_evt &= ~DCDE_DEV_CHANGE;
+
+ dc->flags |= DCF_DEV_CHANGE;
+ dc->state = DCS_DEV_RMV;
+ }
+
+ /* Continue interrupt driven discovery */
+ if (!test_bit(AF_DISC_POLLED, &a->flags))
+ ret = esas2r_disc_continue(a, rq);
+ else
+ ret = true;
+
+ esas2r_trace_exit();
+
+ return ret;
+}
+
+static bool esas2r_disc_continue(struct esas2r_adapter *a,
+ struct esas2r_request *rq)
+{
+ struct esas2r_disc_context *dc =
+ (struct esas2r_disc_context *)rq->interrupt_cx;
+ bool rslt;
+
+ /* Device discovery/removal */
+ while (dc->flags & (DCF_DEV_CHANGE | DCF_DEV_SCAN)) {
+ rslt = false;
+
+ switch (dc->state) {
+ case DCS_DEV_RMV:
+
+ rslt = esas2r_disc_dev_remove(a, rq);
+ break;
+
+ case DCS_DEV_ADD:
+
+ rslt = esas2r_disc_dev_add(a, rq);
+ break;
+
+ case DCS_BLOCK_DEV_SCAN:
+
+ rslt = esas2r_disc_block_dev_scan(a, rq);
+ break;
+
+ case DCS_RAID_GRP_INFO:
+
+ rslt = esas2r_disc_raid_grp_info(a, rq);
+ break;
+
+ case DCS_PART_INFO:
+
+ rslt = esas2r_disc_part_info(a, rq);
+ break;
+
+ case DCS_PT_DEV_INFO:
+
+ rslt = esas2r_disc_passthru_dev_info(a, rq);
+ break;
+ case DCS_PT_DEV_ADDR:
+
+ rslt = esas2r_disc_passthru_dev_addr(a, rq);
+ break;
+ case DCS_DISC_DONE:
+
+ dc->flags &= ~(DCF_DEV_CHANGE | DCF_DEV_SCAN);
+ break;
+
+ default:
+
+ esas2r_bugon();
+ dc->state = DCS_DISC_DONE;
+ break;
+ }
+
+ if (rslt)
+ return true;
+ }
+
+ /* Discovery is done...for now. */
+ rq->interrupt_cx = NULL;
+
+ if (!test_bit(AF_DISC_PENDING, &a->flags))
+ esas2r_disc_fix_curr_requests(a);
+
+ clear_bit(AF_DISC_IN_PROG, &a->flags);
+
+ /* Start the next discovery. */
+ return esas2r_disc_start_port(a);
+}
+
+static bool esas2r_disc_start_request(struct esas2r_adapter *a,
+ struct esas2r_request *rq)
+{
+ unsigned long flags;
+
+ /* Set the timeout to a minimum value. */
+ if (rq->timeout < ESAS2R_DEFAULT_TMO)
+ rq->timeout = ESAS2R_DEFAULT_TMO;
+
+ /*
+ * Override the request type to distinguish discovery requests. If we
+ * end up deferring the request, esas2r_disc_local_start_request()
+ * will be called to restart it.
+ */
+ rq->req_type = RT_DISC_REQ;
+
+ spin_lock_irqsave(&a->queue_lock, flags);
+
+ if (!test_bit(AF_CHPRST_PENDING, &a->flags) &&
+ !test_bit(AF_FLASHING, &a->flags))
+ esas2r_disc_local_start_request(a, rq);
+ else
+ list_add_tail(&rq->req_list, &a->defer_list);
+
+ spin_unlock_irqrestore(&a->queue_lock, flags);
+
+ return true;
+}
+
+void esas2r_disc_local_start_request(struct esas2r_adapter *a,
+ struct esas2r_request *rq)
+{
+ esas2r_trace_enter();
+
+ list_add_tail(&rq->req_list, &a->active_list);
+
+ esas2r_start_vda_request(a, rq);
+
+ esas2r_trace_exit();
+
+ return;
+}
+
+static void esas2r_disc_abort(struct esas2r_adapter *a,
+ struct esas2r_request *rq)
+{
+ struct esas2r_disc_context *dc =
+ (struct esas2r_disc_context *)rq->interrupt_cx;
+
+ esas2r_trace_enter();
+
+ /* abort the current discovery */
+
+ dc->state = DCS_DISC_DONE;
+
+ esas2r_trace_exit();
+}
+
+static bool esas2r_disc_block_dev_scan(struct esas2r_adapter *a,
+ struct esas2r_request *rq)
+{
+ struct esas2r_disc_context *dc =
+ (struct esas2r_disc_context *)rq->interrupt_cx;
+ bool rslt;
+
+ esas2r_trace_enter();
+
+ esas2r_rq_init_request(rq, a);
+
+ esas2r_build_mgt_req(a,
+ rq,
+ VDAMGT_DEV_SCAN,
+ 0,
+ 0,
+ 0,
+ NULL);
+
+ rq->comp_cb = esas2r_disc_block_dev_scan_cb;
+
+ rq->timeout = 30000;
+ rq->interrupt_cx = dc;
+
+ rslt = esas2r_disc_start_request(a, rq);
+
+ esas2r_trace_exit();
+
+ return rslt;
+}
+
+static void esas2r_disc_block_dev_scan_cb(struct esas2r_adapter *a,
+ struct esas2r_request *rq)
+{
+ struct esas2r_disc_context *dc =
+ (struct esas2r_disc_context *)rq->interrupt_cx;
+ unsigned long flags;
+
+ esas2r_trace_enter();
+
+ spin_lock_irqsave(&a->mem_lock, flags);
+
+ if (rq->req_stat == RS_SUCCESS)
+ dc->scan_gen = rq->func_rsp.mgt_rsp.scan_generation;
+
+ dc->state = DCS_RAID_GRP_INFO;
+ dc->raid_grp_ix = 0;
+
+ esas2r_rq_destroy_request(rq, a);
+
+ /* continue discovery if it's interrupt driven */
+
+ if (!(dc->flags & DCF_POLLED))
+ esas2r_disc_continue(a, rq);
+
+ spin_unlock_irqrestore(&a->mem_lock, flags);
+
+ esas2r_trace_exit();
+}
+
+static bool esas2r_disc_raid_grp_info(struct esas2r_adapter *a,
+ struct esas2r_request *rq)
+{
+ struct esas2r_disc_context *dc =
+ (struct esas2r_disc_context *)rq->interrupt_cx;
+ bool rslt;
+ struct atto_vda_grp_info *grpinfo;
+
+ esas2r_trace_enter();
+
+ esas2r_trace("raid_group_idx: %d", dc->raid_grp_ix);
+
+ if (dc->raid_grp_ix >= VDA_MAX_RAID_GROUPS) {
+ dc->state = DCS_DISC_DONE;
+
+ esas2r_trace_exit();
+
+ return false;
+ }
+
+ esas2r_rq_init_request(rq, a);
+
+ grpinfo = &rq->vda_rsp_data->mgt_data.data.grp_info;
+
+ memset(grpinfo, 0, sizeof(struct atto_vda_grp_info));
+
+ esas2r_build_mgt_req(a,
+ rq,
+ VDAMGT_GRP_INFO,
+ dc->scan_gen,
+ 0,
+ sizeof(struct atto_vda_grp_info),
+ NULL);
+
+ grpinfo->grp_index = dc->raid_grp_ix;
+
+ rq->comp_cb = esas2r_disc_raid_grp_info_cb;
+
+ rq->interrupt_cx = dc;
+
+ rslt = esas2r_disc_start_request(a, rq);
+
+ esas2r_trace_exit();
+
+ return rslt;
+}
+
+static void esas2r_disc_raid_grp_info_cb(struct esas2r_adapter *a,
+ struct esas2r_request *rq)
+{
+ struct esas2r_disc_context *dc =
+ (struct esas2r_disc_context *)rq->interrupt_cx;
+ unsigned long flags;
+ struct atto_vda_grp_info *grpinfo;
+
+ esas2r_trace_enter();
+
+ spin_lock_irqsave(&a->mem_lock, flags);
+
+ if (rq->req_stat == RS_SCAN_GEN) {
+ dc->scan_gen = rq->func_rsp.mgt_rsp.scan_generation;
+ dc->raid_grp_ix = 0;
+ goto done;
+ }
+
+ if (rq->req_stat == RS_SUCCESS) {
+ grpinfo = &rq->vda_rsp_data->mgt_data.data.grp_info;
+
+ if (grpinfo->status != VDA_GRP_STAT_ONLINE
+ && grpinfo->status != VDA_GRP_STAT_DEGRADED) {
+ /* go to the next group. */
+
+ dc->raid_grp_ix++;
+ } else {
+ memcpy(&dc->raid_grp_name[0],
+ &grpinfo->grp_name[0],
+ sizeof(grpinfo->grp_name));
+
+ dc->interleave = le32_to_cpu(grpinfo->interleave);
+ dc->block_size = le32_to_cpu(grpinfo->block_size);
+
+ dc->state = DCS_PART_INFO;
+ dc->part_num = 0;
+ }
+ } else {
+ if (!(rq->req_stat == RS_GRP_INVALID)) {
+ esas2r_log(ESAS2R_LOG_WARN,
+ "A request for RAID group info failed - "
+ "returned with %x",
+ rq->req_stat);
+ }
+
+ dc->dev_ix = 0;
+ dc->state = DCS_PT_DEV_INFO;
+ }
+
+done:
+
+ esas2r_rq_destroy_request(rq, a);
+
+ /* continue discovery if it's interrupt driven */
+
+ if (!(dc->flags & DCF_POLLED))
+ esas2r_disc_continue(a, rq);
+
+ spin_unlock_irqrestore(&a->mem_lock, flags);
+
+ esas2r_trace_exit();
+}
+
+static bool esas2r_disc_part_info(struct esas2r_adapter *a,
+ struct esas2r_request *rq)
+{
+ struct esas2r_disc_context *dc =
+ (struct esas2r_disc_context *)rq->interrupt_cx;
+ bool rslt;
+ struct atto_vdapart_info *partinfo;
+
+ esas2r_trace_enter();
+
+ esas2r_trace("part_num: %d", dc->part_num);
+
+ if (dc->part_num >= VDA_MAX_PARTITIONS) {
+ dc->state = DCS_RAID_GRP_INFO;
+ dc->raid_grp_ix++;
+
+ esas2r_trace_exit();
+
+ return false;
+ }
+
+ esas2r_rq_init_request(rq, a);
+
+ partinfo = &rq->vda_rsp_data->mgt_data.data.part_info;
+
+ memset(partinfo, 0, sizeof(struct atto_vdapart_info));
+
+ esas2r_build_mgt_req(a,
+ rq,
+ VDAMGT_PART_INFO,
+ dc->scan_gen,
+ 0,
+ sizeof(struct atto_vdapart_info),
+ NULL);
+
+ partinfo->part_no = dc->part_num;
+
+ memcpy(&partinfo->grp_name[0],
+ &dc->raid_grp_name[0],
+ sizeof(partinfo->grp_name));
+
+ rq->comp_cb = esas2r_disc_part_info_cb;
+
+ rq->interrupt_cx = dc;
+
+ rslt = esas2r_disc_start_request(a, rq);
+
+ esas2r_trace_exit();
+
+ return rslt;
+}
+
+static void esas2r_disc_part_info_cb(struct esas2r_adapter *a,
+ struct esas2r_request *rq)
+{
+ struct esas2r_disc_context *dc =
+ (struct esas2r_disc_context *)rq->interrupt_cx;
+ unsigned long flags;
+ struct atto_vdapart_info *partinfo;
+
+ esas2r_trace_enter();
+
+ spin_lock_irqsave(&a->mem_lock, flags);
+
+ if (rq->req_stat == RS_SCAN_GEN) {
+ dc->scan_gen = rq->func_rsp.mgt_rsp.scan_generation;
+ dc->raid_grp_ix = 0;
+ dc->state = DCS_RAID_GRP_INFO;
+ } else if (rq->req_stat == RS_SUCCESS) {
+ partinfo = &rq->vda_rsp_data->mgt_data.data.part_info;
+
+ dc->part_num = partinfo->part_no;
+
+ dc->curr_virt_id = le16_to_cpu(partinfo->target_id);
+
+ esas2r_targ_db_add_raid(a, dc);
+
+ dc->part_num++;
+ } else {
+ if (!(rq->req_stat == RS_PART_LAST)) {
+ esas2r_log(ESAS2R_LOG_WARN,
+ "A request for RAID group partition info "
+ "failed - status:%d", rq->req_stat);
+ }
+
+ dc->state = DCS_RAID_GRP_INFO;
+ dc->raid_grp_ix++;
+ }
+
+ esas2r_rq_destroy_request(rq, a);
+
+ /* continue discovery if it's interrupt driven */
+
+ if (!(dc->flags & DCF_POLLED))
+ esas2r_disc_continue(a, rq);
+
+ spin_unlock_irqrestore(&a->mem_lock, flags);
+
+ esas2r_trace_exit();
+}
+
+static bool esas2r_disc_passthru_dev_info(struct esas2r_adapter *a,
+ struct esas2r_request *rq)
+{
+ struct esas2r_disc_context *dc =
+ (struct esas2r_disc_context *)rq->interrupt_cx;
+ bool rslt;
+ struct atto_vda_devinfo *devinfo;
+
+ esas2r_trace_enter();
+
+ esas2r_trace("dev_ix: %d", dc->dev_ix);
+
+ esas2r_rq_init_request(rq, a);
+
+ devinfo = &rq->vda_rsp_data->mgt_data.data.dev_info;
+
+ memset(devinfo, 0, sizeof(struct atto_vda_devinfo));
+
+ esas2r_build_mgt_req(a,
+ rq,
+ VDAMGT_DEV_PT_INFO,
+ dc->scan_gen,
+ dc->dev_ix,
+ sizeof(struct atto_vda_devinfo),
+ NULL);
+
+ rq->comp_cb = esas2r_disc_passthru_dev_info_cb;
+
+ rq->interrupt_cx = dc;
+
+ rslt = esas2r_disc_start_request(a, rq);
+
+ esas2r_trace_exit();
+
+ return rslt;
+}
+
+static void esas2r_disc_passthru_dev_info_cb(struct esas2r_adapter *a,
+ struct esas2r_request *rq)
+{
+ struct esas2r_disc_context *dc =
+ (struct esas2r_disc_context *)rq->interrupt_cx;
+ unsigned long flags;
+ struct atto_vda_devinfo *devinfo;
+
+ esas2r_trace_enter();
+
+ spin_lock_irqsave(&a->mem_lock, flags);
+
+ if (rq->req_stat == RS_SCAN_GEN) {
+ dc->scan_gen = rq->func_rsp.mgt_rsp.scan_generation;
+ dc->dev_ix = 0;
+ dc->state = DCS_PT_DEV_INFO;
+ } else if (rq->req_stat == RS_SUCCESS) {
+ devinfo = &rq->vda_rsp_data->mgt_data.data.dev_info;
+
+ dc->dev_ix = le16_to_cpu(rq->func_rsp.mgt_rsp.dev_index);
+
+ dc->curr_virt_id = le16_to_cpu(devinfo->target_id);
+
+ if (le16_to_cpu(devinfo->features) & VDADEVFEAT_PHYS_ID) {
+ dc->curr_phys_id =
+ le16_to_cpu(devinfo->phys_target_id);
+ dc->dev_addr_type = ATTO_GDA_AT_PORT;
+ dc->state = DCS_PT_DEV_ADDR;
+
+ esas2r_trace("curr_virt_id: %d", dc->curr_virt_id);
+ esas2r_trace("curr_phys_id: %d", dc->curr_phys_id);
+ } else {
+ dc->dev_ix++;
+ }
+ } else {
+ if (!(rq->req_stat == RS_DEV_INVALID)) {
+ esas2r_log(ESAS2R_LOG_WARN,
+ "A request for device information failed - "
+ "status:%d", rq->req_stat);
+ }
+
+ dc->state = DCS_DISC_DONE;
+ }
+
+ esas2r_rq_destroy_request(rq, a);
+
+ /* continue discovery if it's interrupt driven */
+
+ if (!(dc->flags & DCF_POLLED))
+ esas2r_disc_continue(a, rq);
+
+ spin_unlock_irqrestore(&a->mem_lock, flags);
+
+ esas2r_trace_exit();
+}
+
+static bool esas2r_disc_passthru_dev_addr(struct esas2r_adapter *a,
+ struct esas2r_request *rq)
+{
+ struct esas2r_disc_context *dc =
+ (struct esas2r_disc_context *)rq->interrupt_cx;
+ bool rslt;
+ struct atto_ioctl *hi;
+ struct esas2r_sg_context sgc;
+
+ esas2r_trace_enter();
+
+ esas2r_rq_init_request(rq, a);
+
+ /* format the request. */
+
+ sgc.cur_offset = NULL;
+ sgc.get_phys_addr = (PGETPHYSADDR)esas2r_disc_get_phys_addr;
+ sgc.length = offsetof(struct atto_ioctl, data)
+ + sizeof(struct atto_hba_get_device_address);
+
+ esas2r_sgc_init(&sgc, a, rq, rq->vrq->ioctl.sge);
+
+ esas2r_build_ioctl_req(a, rq, sgc.length, VDA_IOCTL_HBA);
+
+ if (!esas2r_build_sg_list(a, rq, &sgc)) {
+ esas2r_rq_destroy_request(rq, a);
+
+ esas2r_trace_exit();
+
+ return false;
+ }
+
+ rq->comp_cb = esas2r_disc_passthru_dev_addr_cb;
+
+ rq->interrupt_cx = dc;
+
+ /* format the IOCTL data. */
+
+ hi = (struct atto_ioctl *)a->disc_buffer;
+
+ memset(a->disc_buffer, 0, ESAS2R_DISC_BUF_LEN);
+
+ hi->version = ATTO_VER_GET_DEV_ADDR0;
+ hi->function = ATTO_FUNC_GET_DEV_ADDR;
+ hi->flags = HBAF_TUNNEL;
+
+ hi->data.get_dev_addr.target_id = le32_to_cpu(dc->curr_phys_id);
+ hi->data.get_dev_addr.addr_type = dc->dev_addr_type;
+
+ /* start it up. */
+
+ rslt = esas2r_disc_start_request(a, rq);
+
+ esas2r_trace_exit();
+
+ return rslt;
+}
+
+static void esas2r_disc_passthru_dev_addr_cb(struct esas2r_adapter *a,
+ struct esas2r_request *rq)
+{
+ struct esas2r_disc_context *dc =
+ (struct esas2r_disc_context *)rq->interrupt_cx;
+ struct esas2r_target *t = NULL;
+ unsigned long flags;
+ struct atto_ioctl *hi;
+ u16 addrlen;
+
+ esas2r_trace_enter();
+
+ spin_lock_irqsave(&a->mem_lock, flags);
+
+ hi = (struct atto_ioctl *)a->disc_buffer;
+
+ if (rq->req_stat == RS_SUCCESS
+ && hi->status == ATTO_STS_SUCCESS) {
+ addrlen = le16_to_cpu(hi->data.get_dev_addr.addr_len);
+
+ if (dc->dev_addr_type == ATTO_GDA_AT_PORT) {
+ if (addrlen == sizeof(u64))
+ memcpy(&dc->sas_addr,
+ &hi->data.get_dev_addr.address[0],
+ addrlen);
+ else
+ memset(&dc->sas_addr, 0, sizeof(dc->sas_addr));
+
+ /* Get the unique identifier. */
+ dc->dev_addr_type = ATTO_GDA_AT_UNIQUE;
+
+ goto next_dev_addr;
+ } else {
+ /* Add the pass through target. */
+ if (HIBYTE(addrlen) == 0) {
+ t = esas2r_targ_db_add_pthru(a,
+ dc,
+ &hi->data.
+ get_dev_addr.
+ address[0],
+ (u8)hi->data.
+ get_dev_addr.
+ addr_len);
+
+ if (t)
+ memcpy(&t->sas_addr, &dc->sas_addr,
+ sizeof(t->sas_addr));
+ } else {
+ /* getting the back end data failed */
+
+ esas2r_log(ESAS2R_LOG_WARN,
+ "an error occurred retrieving the "
+ "back end data (%s:%d)",
+ __func__,
+ __LINE__);
+ }
+ }
+ } else {
+ /* getting the back end data failed */
+
+ esas2r_log(ESAS2R_LOG_WARN,
+ "an error occurred retrieving the back end data - "
+ "rq->req_stat:%d hi->status:%d",
+ rq->req_stat, hi->status);
+ }
+
+ /* proceed to the next device. */
+
+ if (dc->flags & DCF_DEV_SCAN) {
+ dc->dev_ix++;
+ dc->state = DCS_PT_DEV_INFO;
+ } else if (dc->flags & DCF_DEV_CHANGE) {
+ dc->curr_targ++;
+ dc->state = DCS_DEV_ADD;
+ } else {
+ esas2r_bugon();
+ }
+
+next_dev_addr:
+ esas2r_rq_destroy_request(rq, a);
+
+ /* continue discovery if it's interrupt driven */
+
+ if (!(dc->flags & DCF_POLLED))
+ esas2r_disc_continue(a, rq);
+
+ spin_unlock_irqrestore(&a->mem_lock, flags);
+
+ esas2r_trace_exit();
+}
+
+static u32 esas2r_disc_get_phys_addr(struct esas2r_sg_context *sgc, u64 *addr)
+{
+ struct esas2r_adapter *a = sgc->adapter;
+
+ if (sgc->length > ESAS2R_DISC_BUF_LEN)
+ esas2r_bugon();
+
+ *addr = a->uncached_phys
+ + (u64)((u8 *)a->disc_buffer - a->uncached);
+
+ return sgc->length;
+}
+
+static bool esas2r_disc_dev_remove(struct esas2r_adapter *a,
+ struct esas2r_request *rq)
+{
+ struct esas2r_disc_context *dc =
+ (struct esas2r_disc_context *)rq->interrupt_cx;
+ struct esas2r_target *t;
+ struct esas2r_target *t2;
+
+ esas2r_trace_enter();
+
+ /* process removals. */
+
+ for (t = a->targetdb; t < a->targetdb_end; t++) {
+ if (t->new_target_state != TS_NOT_PRESENT)
+ continue;
+
+ t->new_target_state = TS_INVALID;
+
+ /* remove the right target! */
+
+ t2 =
+ esas2r_targ_db_find_by_virt_id(a,
+ esas2r_targ_get_id(t,
+ a));
+
+ if (t2)
+ esas2r_targ_db_remove(a, t2);
+ }
+
+ /* removals complete. process arrivals. */
+
+ dc->state = DCS_DEV_ADD;
+ dc->curr_targ = a->targetdb;
+
+ esas2r_trace_exit();
+
+ return false;
+}
+
+static bool esas2r_disc_dev_add(struct esas2r_adapter *a,
+ struct esas2r_request *rq)
+{
+ struct esas2r_disc_context *dc =
+ (struct esas2r_disc_context *)rq->interrupt_cx;
+ struct esas2r_target *t = dc->curr_targ;
+
+ if (t >= a->targetdb_end) {
+ /* done processing state changes. */
+
+ dc->state = DCS_DISC_DONE;
+ } else if (t->new_target_state == TS_PRESENT) {
+ struct atto_vda_ae_lu *luevt = &t->lu_event;
+
+ esas2r_trace_enter();
+
+ /* clear this now in case more events come in. */
+
+ t->new_target_state = TS_INVALID;
+
+ /* setup the discovery context for adding this device. */
+
+ dc->curr_virt_id = esas2r_targ_get_id(t, a);
+
+ if ((luevt->hdr.bylength >= offsetof(struct atto_vda_ae_lu, id)
+ + sizeof(struct atto_vda_ae_lu_tgt_lun_raid))
+ && !(luevt->dwevent & VDAAE_LU_PASSTHROUGH)) {
+ dc->block_size = luevt->id.tgtlun_raid.dwblock_size;
+ dc->interleave = luevt->id.tgtlun_raid.dwinterleave;
+ } else {
+ dc->block_size = 0;
+ dc->interleave = 0;
+ }
+
+ /* determine the device type being added. */
+
+ if (luevt->dwevent & VDAAE_LU_PASSTHROUGH) {
+ if (luevt->dwevent & VDAAE_LU_PHYS_ID) {
+ dc->state = DCS_PT_DEV_ADDR;
+ dc->dev_addr_type = ATTO_GDA_AT_PORT;
+ dc->curr_phys_id = luevt->wphys_target_id;
+ } else {
+ esas2r_log(ESAS2R_LOG_WARN,
+ "luevt->dwevent does not have the "
+ "VDAAE_LU_PHYS_ID bit set (%s:%d)",
+ __func__, __LINE__);
+ }
+ } else {
+ dc->raid_grp_name[0] = 0;
+
+ esas2r_targ_db_add_raid(a, dc);
+ }
+
+ esas2r_trace("curr_virt_id: %d", dc->curr_virt_id);
+ esas2r_trace("curr_phys_id: %d", dc->curr_phys_id);
+ esas2r_trace("dwevent: %d", luevt->dwevent);
+
+ esas2r_trace_exit();
+ }
+
+ if (dc->state == DCS_DEV_ADD) {
+ /* go to the next device. */
+
+ dc->curr_targ++;
+ }
+
+ return false;
+}
+
+/*
+ * When discovery is done, find all requests on defer queue and
+ * test if they need to be modified. If a target is no longer present
+ * then complete the request with RS_SEL. Otherwise, update the
+ * target_id since after a hibernate it can be a different value.
+ * VDA does not make passthrough target IDs persistent.
+ */
+static void esas2r_disc_fix_curr_requests(struct esas2r_adapter *a)
+{
+ unsigned long flags;
+ struct esas2r_target *t;
+ struct esas2r_request *rq;
+ struct list_head *element;
+
+ /* update virt_targ_id in any outstanding esas2r_requests */
+
+ spin_lock_irqsave(&a->queue_lock, flags);
+
+ list_for_each(element, &a->defer_list) {
+ rq = list_entry(element, struct esas2r_request, req_list);
+ if (rq->vrq->scsi.function == VDA_FUNC_SCSI) {
+ t = a->targetdb + rq->target_id;
+
+ if (t->target_state == TS_PRESENT)
+ rq->vrq->scsi.target_id = le16_to_cpu(
+ t->virt_targ_id);
+ else
+ rq->req_stat = RS_SEL;
+ }
+
+ }
+
+ spin_unlock_irqrestore(&a->queue_lock, flags);
+}
diff --git a/drivers/scsi/esas2r/esas2r_flash.c b/drivers/scsi/esas2r/esas2r_flash.c
new file mode 100644
index 000000000..7bd376d95
--- /dev/null
+++ b/drivers/scsi/esas2r/esas2r_flash.c
@@ -0,0 +1,1521 @@
+
+/*
+ * linux/drivers/scsi/esas2r/esas2r_flash.c
+ * For use with ATTO ExpressSAS R6xx SAS/SATA RAID controllers
+ *
+ * Copyright (c) 2001-2013 ATTO Technology, Inc.
+ * (mailto:linuxdrivers@attotech.com)
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * NO WARRANTY
+ * THE PROGRAM IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OR
+ * CONDITIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED INCLUDING, WITHOUT
+ * LIMITATION, ANY WARRANTIES OR CONDITIONS OF TITLE, NON-INFRINGEMENT,
+ * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE. Each Recipient is
+ * solely responsible for determining the appropriateness of using and
+ * distributing the Program and assumes all risks associated with its
+ * exercise of rights under this Agreement, including but not limited to
+ * the risks and costs of program errors, damage to or loss of data,
+ * programs or equipment, and unavailability or interruption of operations.
+ *
+ * DISCLAIMER OF LIABILITY
+ * NEITHER RECIPIENT NOR ANY CONTRIBUTORS SHALL HAVE ANY LIABILITY FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING WITHOUT LIMITATION LOST PROFITS), HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR
+ * TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
+ * USE OR DISTRIBUTION OF THE PROGRAM OR THE EXERCISE OF ANY RIGHTS GRANTED
+ * HEREUNDER, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGES
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301,
+ * USA.
+ */
+
+#include "esas2r.h"
+
+/* local macro defs */
+#define esas2r_nvramcalc_cksum(n) \
+ (esas2r_calc_byte_cksum((u8 *)(n), sizeof(struct esas2r_sas_nvram), \
+ SASNVR_CKSUM_SEED))
+#define esas2r_nvramcalc_xor_cksum(n) \
+ (esas2r_calc_byte_xor_cksum((u8 *)(n), \
+ sizeof(struct esas2r_sas_nvram), 0))
+
+#define ESAS2R_FS_DRVR_VER 2
+
+static struct esas2r_sas_nvram default_sas_nvram = {
+ { 'E', 'S', 'A', 'S' }, /* signature */
+ SASNVR_VERSION, /* version */
+ 0, /* checksum */
+ 31, /* max_lun_for_target */
+ SASNVR_PCILAT_MAX, /* pci_latency */
+ SASNVR1_BOOT_DRVR, /* options1 */
+ SASNVR2_HEARTBEAT | SASNVR2_SINGLE_BUS /* options2 */
+ | SASNVR2_SW_MUX_CTRL,
+ SASNVR_COAL_DIS, /* int_coalescing */
+ SASNVR_CMDTHR_NONE, /* cmd_throttle */
+ 3, /* dev_wait_time */
+ 1, /* dev_wait_count */
+ 0, /* spin_up_delay */
+ 0, /* ssp_align_rate */
+ { 0x50, 0x01, 0x08, 0x60, /* sas_addr */
+ 0x00, 0x00, 0x00, 0x00 },
+ { SASNVR_SPEED_AUTO }, /* phy_speed */
+ { SASNVR_MUX_DISABLED }, /* SAS multiplexing */
+ { 0 }, /* phy_flags */
+ SASNVR_SORT_SAS_ADDR, /* sort_type */
+ 3, /* dpm_reqcmd_lmt */
+ 3, /* dpm_stndby_time */
+ 0, /* dpm_active_time */
+ { 0 }, /* phy_target_id */
+ SASNVR_VSMH_DISABLED, /* virt_ses_mode */
+ SASNVR_RWM_DEFAULT, /* read_write_mode */
+ 0, /* link down timeout */
+ { 0 } /* reserved */
+};
+
+static u8 cmd_to_fls_func[] = {
+ 0xFF,
+ VDA_FLASH_READ,
+ VDA_FLASH_BEGINW,
+ VDA_FLASH_WRITE,
+ VDA_FLASH_COMMIT,
+ VDA_FLASH_CANCEL
+};
+
+static u8 esas2r_calc_byte_xor_cksum(u8 *addr, u32 len, u8 seed)
+{
+ u32 cksum = seed;
+ u8 *p = (u8 *)&cksum;
+
+ while (len) {
+ if (((uintptr_t)addr & 3) == 0)
+ break;
+
+ cksum = cksum ^ *addr;
+ addr++;
+ len--;
+ }
+ while (len >= sizeof(u32)) {
+ cksum = cksum ^ *(u32 *)addr;
+ addr += 4;
+ len -= 4;
+ }
+ while (len--) {
+ cksum = cksum ^ *addr;
+ addr++;
+ }
+ return p[0] ^ p[1] ^ p[2] ^ p[3];
+}
+
+static u8 esas2r_calc_byte_cksum(void *addr, u32 len, u8 seed)
+{
+ u8 *p = (u8 *)addr;
+ u8 cksum = seed;
+
+ while (len--)
+ cksum = cksum + p[len];
+ return cksum;
+}
+
+/* Interrupt callback to process FM API write requests. */
+static void esas2r_fmapi_callback(struct esas2r_adapter *a,
+ struct esas2r_request *rq)
+{
+ struct atto_vda_flash_req *vrq = &rq->vrq->flash;
+ struct esas2r_flash_context *fc =
+ (struct esas2r_flash_context *)rq->interrupt_cx;
+
+ if (rq->req_stat == RS_SUCCESS) {
+ /* Last request was successful. See what to do now. */
+ switch (vrq->sub_func) {
+ case VDA_FLASH_BEGINW:
+ if (fc->sgc.cur_offset == NULL)
+ goto commit;
+
+ vrq->sub_func = VDA_FLASH_WRITE;
+ rq->req_stat = RS_PENDING;
+ break;
+
+ case VDA_FLASH_WRITE:
+commit:
+ vrq->sub_func = VDA_FLASH_COMMIT;
+ rq->req_stat = RS_PENDING;
+ rq->interrupt_cb = fc->interrupt_cb;
+ break;
+
+ default:
+ break;
+ }
+ }
+
+ if (rq->req_stat != RS_PENDING)
+ /*
+ * All done. call the real callback to complete the FM API
+ * request. We should only get here if a BEGINW or WRITE
+ * operation failed.
+ */
+ (*fc->interrupt_cb)(a, rq);
+}
+
+/*
+ * Build a flash request based on the flash context. The request status
+ * is filled in on an error.
+ */
+static void build_flash_msg(struct esas2r_adapter *a,
+ struct esas2r_request *rq)
+{
+ struct esas2r_flash_context *fc =
+ (struct esas2r_flash_context *)rq->interrupt_cx;
+ struct esas2r_sg_context *sgc = &fc->sgc;
+ u8 cksum = 0;
+
+ /* calculate the checksum */
+ if (fc->func == VDA_FLASH_BEGINW) {
+ if (sgc->cur_offset)
+ cksum = esas2r_calc_byte_xor_cksum(sgc->cur_offset,
+ sgc->length,
+ 0);
+ rq->interrupt_cb = esas2r_fmapi_callback;
+ } else {
+ rq->interrupt_cb = fc->interrupt_cb;
+ }
+ esas2r_build_flash_req(a,
+ rq,
+ fc->func,
+ cksum,
+ fc->flsh_addr,
+ sgc->length);
+
+ esas2r_rq_free_sg_lists(rq, a);
+
+ /*
+ * remember the length we asked for. we have to keep track of
+ * the current amount done so we know how much to compare when
+ * doing the verification phase.
+ */
+ fc->curr_len = fc->sgc.length;
+
+ if (sgc->cur_offset) {
+ /* setup the S/G context to build the S/G table */
+ esas2r_sgc_init(sgc, a, rq, &rq->vrq->flash.data.sge[0]);
+
+ if (!esas2r_build_sg_list(a, rq, sgc)) {
+ rq->req_stat = RS_BUSY;
+ return;
+ }
+ } else {
+ fc->sgc.length = 0;
+ }
+
+ /* update the flsh_addr to the next one to write to */
+ fc->flsh_addr += fc->curr_len;
+}
+
+/* determine the method to process the flash request */
+static bool load_image(struct esas2r_adapter *a, struct esas2r_request *rq)
+{
+ /*
+ * assume we have more to do. if we return with the status set to
+ * RS_PENDING, FM API tasks will continue.
+ */
+ rq->req_stat = RS_PENDING;
+ if (test_bit(AF_DEGRADED_MODE, &a->flags))
+ /* not suppported for now */;
+ else
+ build_flash_msg(a, rq);
+
+ return rq->req_stat == RS_PENDING;
+}
+
+/* boot image fixer uppers called before downloading the image. */
+static void fix_bios(struct esas2r_adapter *a, struct esas2r_flash_img *fi)
+{
+ struct esas2r_component_header *ch = &fi->cmp_hdr[CH_IT_BIOS];
+ struct esas2r_pc_image *pi;
+ struct esas2r_boot_header *bh;
+
+ pi = (struct esas2r_pc_image *)((u8 *)fi + ch->image_offset);
+ bh =
+ (struct esas2r_boot_header *)((u8 *)pi +
+ le16_to_cpu(pi->header_offset));
+ bh->device_id = cpu_to_le16(a->pcid->device);
+
+ /* Recalculate the checksum in the PNP header if there */
+ if (pi->pnp_offset) {
+ u8 *pnp_header_bytes =
+ ((u8 *)pi + le16_to_cpu(pi->pnp_offset));
+
+ /* Identifier - dword that starts at byte 10 */
+ *((u32 *)&pnp_header_bytes[10]) =
+ cpu_to_le32(MAKEDWORD(a->pcid->subsystem_vendor,
+ a->pcid->subsystem_device));
+
+ /* Checksum - byte 9 */
+ pnp_header_bytes[9] -= esas2r_calc_byte_cksum(pnp_header_bytes,
+ 32, 0);
+ }
+
+ /* Recalculate the checksum needed by the PC */
+ pi->checksum = pi->checksum -
+ esas2r_calc_byte_cksum((u8 *)pi, ch->length, 0);
+}
+
+static void fix_efi(struct esas2r_adapter *a, struct esas2r_flash_img *fi)
+{
+ struct esas2r_component_header *ch = &fi->cmp_hdr[CH_IT_EFI];
+ u32 len = ch->length;
+ u32 offset = ch->image_offset;
+ struct esas2r_efi_image *ei;
+ struct esas2r_boot_header *bh;
+
+ while (len) {
+ u32 thislen;
+
+ ei = (struct esas2r_efi_image *)((u8 *)fi + offset);
+ bh = (struct esas2r_boot_header *)((u8 *)ei +
+ le16_to_cpu(
+ ei->header_offset));
+ bh->device_id = cpu_to_le16(a->pcid->device);
+ thislen = (u32)le16_to_cpu(bh->image_length) * 512;
+
+ if (thislen > len)
+ break;
+
+ len -= thislen;
+ offset += thislen;
+ }
+}
+
+/* Complete a FM API request with the specified status. */
+static bool complete_fmapi_req(struct esas2r_adapter *a,
+ struct esas2r_request *rq, u8 fi_stat)
+{
+ struct esas2r_flash_context *fc =
+ (struct esas2r_flash_context *)rq->interrupt_cx;
+ struct esas2r_flash_img *fi = fc->fi;
+
+ fi->status = fi_stat;
+ fi->driver_error = rq->req_stat;
+ rq->interrupt_cb = NULL;
+ rq->req_stat = RS_SUCCESS;
+
+ if (fi_stat != FI_STAT_IMG_VER)
+ memset(fc->scratch, 0, FM_BUF_SZ);
+
+ esas2r_enable_heartbeat(a);
+ clear_bit(AF_FLASH_LOCK, &a->flags);
+ return false;
+}
+
+/* Process each phase of the flash download process. */
+static void fw_download_proc(struct esas2r_adapter *a,
+ struct esas2r_request *rq)
+{
+ struct esas2r_flash_context *fc =
+ (struct esas2r_flash_context *)rq->interrupt_cx;
+ struct esas2r_flash_img *fi = fc->fi;
+ struct esas2r_component_header *ch;
+ u32 len;
+ u8 *p, *q;
+
+ /* If the previous operation failed, just return. */
+ if (rq->req_stat != RS_SUCCESS)
+ goto error;
+
+ /*
+ * If an upload just completed and the compare length is non-zero,
+ * then we just read back part of the image we just wrote. verify the
+ * section and continue reading until the entire image is verified.
+ */
+ if (fc->func == VDA_FLASH_READ
+ && fc->cmp_len) {
+ ch = &fi->cmp_hdr[fc->comp_typ];
+
+ p = fc->scratch;
+ q = (u8 *)fi /* start of the whole gob */
+ + ch->image_offset /* start of the current image */
+ + ch->length /* end of the current image */
+ - fc->cmp_len; /* where we are now */
+
+ /*
+ * NOTE - curr_len is the exact count of bytes for the read
+ * even when the end is read and its not a full buffer
+ */
+ for (len = fc->curr_len; len; len--)
+ if (*p++ != *q++)
+ goto error;
+
+ fc->cmp_len -= fc->curr_len; /* # left to compare */
+
+ /* Update fc and determine the length for the next upload */
+ if (fc->cmp_len > FM_BUF_SZ)
+ fc->sgc.length = FM_BUF_SZ;
+ else
+ fc->sgc.length = fc->cmp_len;
+
+ fc->sgc.cur_offset = fc->sgc_offset +
+ ((u8 *)fc->scratch - (u8 *)fi);
+ }
+
+ /*
+ * This code uses a 'while' statement since the next component may
+ * have a length = zero. This can happen since some components are
+ * not required. At the end of this 'while' we set up the length
+ * for the next request and therefore sgc.length can be = 0.
+ */
+ while (fc->sgc.length == 0) {
+ ch = &fi->cmp_hdr[fc->comp_typ];
+
+ switch (fc->task) {
+ case FMTSK_ERASE_BOOT:
+ /* the BIOS image is written next */
+ ch = &fi->cmp_hdr[CH_IT_BIOS];
+ if (ch->length == 0)
+ goto no_bios;
+
+ fc->task = FMTSK_WRTBIOS;
+ fc->func = VDA_FLASH_BEGINW;
+ fc->comp_typ = CH_IT_BIOS;
+ fc->flsh_addr = FLS_OFFSET_BOOT;
+ fc->sgc.length = ch->length;
+ fc->sgc.cur_offset = fc->sgc_offset +
+ ch->image_offset;
+ break;
+
+ case FMTSK_WRTBIOS:
+ /*
+ * The BIOS image has been written - read it and
+ * verify it
+ */
+ fc->task = FMTSK_READBIOS;
+ fc->func = VDA_FLASH_READ;
+ fc->flsh_addr = FLS_OFFSET_BOOT;
+ fc->cmp_len = ch->length;
+ fc->sgc.length = FM_BUF_SZ;
+ fc->sgc.cur_offset = fc->sgc_offset
+ + ((u8 *)fc->scratch -
+ (u8 *)fi);
+ break;
+
+ case FMTSK_READBIOS:
+no_bios:
+ /*
+ * Mark the component header status for the image
+ * completed
+ */
+ ch->status = CH_STAT_SUCCESS;
+
+ /* The MAC image is written next */
+ ch = &fi->cmp_hdr[CH_IT_MAC];
+ if (ch->length == 0)
+ goto no_mac;
+
+ fc->task = FMTSK_WRTMAC;
+ fc->func = VDA_FLASH_BEGINW;
+ fc->comp_typ = CH_IT_MAC;
+ fc->flsh_addr = FLS_OFFSET_BOOT
+ + fi->cmp_hdr[CH_IT_BIOS].length;
+ fc->sgc.length = ch->length;
+ fc->sgc.cur_offset = fc->sgc_offset +
+ ch->image_offset;
+ break;
+
+ case FMTSK_WRTMAC:
+ /* The MAC image has been written - read and verify */
+ fc->task = FMTSK_READMAC;
+ fc->func = VDA_FLASH_READ;
+ fc->flsh_addr -= ch->length;
+ fc->cmp_len = ch->length;
+ fc->sgc.length = FM_BUF_SZ;
+ fc->sgc.cur_offset = fc->sgc_offset
+ + ((u8 *)fc->scratch -
+ (u8 *)fi);
+ break;
+
+ case FMTSK_READMAC:
+no_mac:
+ /*
+ * Mark the component header status for the image
+ * completed
+ */
+ ch->status = CH_STAT_SUCCESS;
+
+ /* The EFI image is written next */
+ ch = &fi->cmp_hdr[CH_IT_EFI];
+ if (ch->length == 0)
+ goto no_efi;
+
+ fc->task = FMTSK_WRTEFI;
+ fc->func = VDA_FLASH_BEGINW;
+ fc->comp_typ = CH_IT_EFI;
+ fc->flsh_addr = FLS_OFFSET_BOOT
+ + fi->cmp_hdr[CH_IT_BIOS].length
+ + fi->cmp_hdr[CH_IT_MAC].length;
+ fc->sgc.length = ch->length;
+ fc->sgc.cur_offset = fc->sgc_offset +
+ ch->image_offset;
+ break;
+
+ case FMTSK_WRTEFI:
+ /* The EFI image has been written - read and verify */
+ fc->task = FMTSK_READEFI;
+ fc->func = VDA_FLASH_READ;
+ fc->flsh_addr -= ch->length;
+ fc->cmp_len = ch->length;
+ fc->sgc.length = FM_BUF_SZ;
+ fc->sgc.cur_offset = fc->sgc_offset
+ + ((u8 *)fc->scratch -
+ (u8 *)fi);
+ break;
+
+ case FMTSK_READEFI:
+no_efi:
+ /*
+ * Mark the component header status for the image
+ * completed
+ */
+ ch->status = CH_STAT_SUCCESS;
+
+ /* The CFG image is written next */
+ ch = &fi->cmp_hdr[CH_IT_CFG];
+
+ if (ch->length == 0)
+ goto no_cfg;
+ fc->task = FMTSK_WRTCFG;
+ fc->func = VDA_FLASH_BEGINW;
+ fc->comp_typ = CH_IT_CFG;
+ fc->flsh_addr = FLS_OFFSET_CPYR - ch->length;
+ fc->sgc.length = ch->length;
+ fc->sgc.cur_offset = fc->sgc_offset +
+ ch->image_offset;
+ break;
+
+ case FMTSK_WRTCFG:
+ /* The CFG image has been written - read and verify */
+ fc->task = FMTSK_READCFG;
+ fc->func = VDA_FLASH_READ;
+ fc->flsh_addr = FLS_OFFSET_CPYR - ch->length;
+ fc->cmp_len = ch->length;
+ fc->sgc.length = FM_BUF_SZ;
+ fc->sgc.cur_offset = fc->sgc_offset
+ + ((u8 *)fc->scratch -
+ (u8 *)fi);
+ break;
+
+ case FMTSK_READCFG:
+no_cfg:
+ /*
+ * Mark the component header status for the image
+ * completed
+ */
+ ch->status = CH_STAT_SUCCESS;
+
+ /*
+ * The download is complete. If in degraded mode,
+ * attempt a chip reset.
+ */
+ if (test_bit(AF_DEGRADED_MODE, &a->flags))
+ esas2r_local_reset_adapter(a);
+
+ a->flash_ver = fi->cmp_hdr[CH_IT_BIOS].version;
+ esas2r_print_flash_rev(a);
+
+ /* Update the type of boot image on the card */
+ memcpy(a->image_type, fi->rel_version,
+ sizeof(fi->rel_version));
+ complete_fmapi_req(a, rq, FI_STAT_SUCCESS);
+ return;
+ }
+
+ /* If verifying, don't try reading more than what's there */
+ if (fc->func == VDA_FLASH_READ
+ && fc->sgc.length > fc->cmp_len)
+ fc->sgc.length = fc->cmp_len;
+ }
+
+ /* Build the request to perform the next action */
+ if (!load_image(a, rq)) {
+error:
+ if (fc->comp_typ < fi->num_comps) {
+ ch = &fi->cmp_hdr[fc->comp_typ];
+ ch->status = CH_STAT_FAILED;
+ }
+
+ complete_fmapi_req(a, rq, FI_STAT_FAILED);
+ }
+}
+
+/* Determine the flash image adaptyp for this adapter */
+static u8 get_fi_adap_type(struct esas2r_adapter *a)
+{
+ u8 type;
+
+ /* use the device ID to get the correct adap_typ for this HBA */
+ switch (a->pcid->device) {
+ case ATTO_DID_INTEL_IOP348:
+ type = FI_AT_SUN_LAKE;
+ break;
+
+ case ATTO_DID_MV_88RC9580:
+ case ATTO_DID_MV_88RC9580TS:
+ case ATTO_DID_MV_88RC9580TSE:
+ case ATTO_DID_MV_88RC9580TL:
+ type = FI_AT_MV_9580;
+ break;
+
+ default:
+ type = FI_AT_UNKNWN;
+ break;
+ }
+
+ return type;
+}
+
+/* Size of config + copyright + flash_ver images, 0 for failure. */
+static u32 chk_cfg(u8 *cfg, u32 length, u32 *flash_ver)
+{
+ u16 *pw = (u16 *)cfg - 1;
+ u32 sz = 0;
+ u32 len = length;
+
+ if (len == 0)
+ len = FM_BUF_SZ;
+
+ if (flash_ver)
+ *flash_ver = 0;
+
+ while (true) {
+ u16 type;
+ u16 size;
+
+ type = le16_to_cpu(*pw--);
+ size = le16_to_cpu(*pw--);
+
+ if (type != FBT_CPYR
+ && type != FBT_SETUP
+ && type != FBT_FLASH_VER)
+ break;
+
+ if (type == FBT_FLASH_VER
+ && flash_ver)
+ *flash_ver = le32_to_cpu(*(u32 *)(pw - 1));
+
+ sz += size + (2 * sizeof(u16));
+ pw -= size / sizeof(u16);
+
+ if (sz > len - (2 * sizeof(u16)))
+ break;
+ }
+
+ /* See if we are comparing the size to the specified length */
+ if (length && sz != length)
+ return 0;
+
+ return sz;
+}
+
+/* Verify that the boot image is valid */
+static u8 chk_boot(u8 *boot_img, u32 length)
+{
+ struct esas2r_boot_image *bi = (struct esas2r_boot_image *)boot_img;
+ u16 hdroffset = le16_to_cpu(bi->header_offset);
+ struct esas2r_boot_header *bh;
+
+ if (bi->signature != le16_to_cpu(0xaa55)
+ || (long)hdroffset >
+ (long)(65536L - sizeof(struct esas2r_boot_header))
+ || (hdroffset & 3)
+ || (hdroffset < sizeof(struct esas2r_boot_image))
+ || ((u32)hdroffset + sizeof(struct esas2r_boot_header) > length))
+ return 0xff;
+
+ bh = (struct esas2r_boot_header *)((char *)bi + hdroffset);
+
+ if (bh->signature[0] != 'P'
+ || bh->signature[1] != 'C'
+ || bh->signature[2] != 'I'
+ || bh->signature[3] != 'R'
+ || le16_to_cpu(bh->struct_length) <
+ (u16)sizeof(struct esas2r_boot_header)
+ || bh->class_code[2] != 0x01
+ || bh->class_code[1] != 0x04
+ || bh->class_code[0] != 0x00
+ || (bh->code_type != CODE_TYPE_PC
+ && bh->code_type != CODE_TYPE_OPEN
+ && bh->code_type != CODE_TYPE_EFI))
+ return 0xff;
+
+ return bh->code_type;
+}
+
+/* The sum of all the WORDS of the image */
+static u16 calc_fi_checksum(struct esas2r_flash_context *fc)
+{
+ struct esas2r_flash_img *fi = fc->fi;
+ u16 cksum;
+ u32 len;
+ u16 *pw;
+
+ for (len = (fi->length - fc->fi_hdr_len) / 2,
+ pw = (u16 *)((u8 *)fi + fc->fi_hdr_len),
+ cksum = 0;
+ len;
+ len--, pw++)
+ cksum = cksum + le16_to_cpu(*pw);
+
+ return cksum;
+}
+
+/*
+ * Verify the flash image structure. The following verifications will
+ * be performed:
+ * 1) verify the fi_version is correct
+ * 2) verify the checksum of the entire image.
+ * 3) validate the adap_typ, action and length fields.
+ * 4) validate each component header. check the img_type and
+ * length fields
+ * 5) validate each component image. validate signatures and
+ * local checksums
+ */
+static bool verify_fi(struct esas2r_adapter *a,
+ struct esas2r_flash_context *fc)
+{
+ struct esas2r_flash_img *fi = fc->fi;
+ u8 type;
+ bool imgerr;
+ u16 i;
+ u32 len;
+ struct esas2r_component_header *ch;
+
+ /* Verify the length - length must even since we do a word checksum */
+ len = fi->length;
+
+ if ((len & 1)
+ || len < fc->fi_hdr_len) {
+ fi->status = FI_STAT_LENGTH;
+ return false;
+ }
+
+ /* Get adapter type and verify type in flash image */
+ type = get_fi_adap_type(a);
+ if ((type == FI_AT_UNKNWN) || (fi->adap_typ != type)) {
+ fi->status = FI_STAT_ADAPTYP;
+ return false;
+ }
+
+ /*
+ * Loop through each component and verify the img_type and length
+ * fields. Keep a running count of the sizes sooze we can verify total
+ * size to additive size.
+ */
+ imgerr = false;
+
+ for (i = 0, len = 0, ch = fi->cmp_hdr;
+ i < fi->num_comps;
+ i++, ch++) {
+ bool cmperr = false;
+
+ /*
+ * Verify that the component header has the same index as the
+ * image type. The headers must be ordered correctly
+ */
+ if (i != ch->img_type) {
+ imgerr = true;
+ ch->status = CH_STAT_INVALID;
+ continue;
+ }
+
+ switch (ch->img_type) {
+ case CH_IT_BIOS:
+ type = CODE_TYPE_PC;
+ break;
+
+ case CH_IT_MAC:
+ type = CODE_TYPE_OPEN;
+ break;
+
+ case CH_IT_EFI:
+ type = CODE_TYPE_EFI;
+ break;
+ }
+
+ switch (ch->img_type) {
+ case CH_IT_FW:
+ case CH_IT_NVR:
+ break;
+
+ case CH_IT_BIOS:
+ case CH_IT_MAC:
+ case CH_IT_EFI:
+ if (ch->length & 0x1ff)
+ cmperr = true;
+
+ /* Test if component image is present */
+ if (ch->length == 0)
+ break;
+
+ /* Image is present - verify the image */
+ if (chk_boot((u8 *)fi + ch->image_offset, ch->length)
+ != type)
+ cmperr = true;
+
+ break;
+
+ case CH_IT_CFG:
+
+ /* Test if component image is present */
+ if (ch->length == 0) {
+ cmperr = true;
+ break;
+ }
+
+ /* Image is present - verify the image */
+ if (!chk_cfg((u8 *)fi + ch->image_offset + ch->length,
+ ch->length, NULL))
+ cmperr = true;
+
+ break;
+
+ default:
+
+ fi->status = FI_STAT_UNKNOWN;
+ return false;
+ }
+
+ if (cmperr) {
+ imgerr = true;
+ ch->status = CH_STAT_INVALID;
+ } else {
+ ch->status = CH_STAT_PENDING;
+ len += ch->length;
+ }
+ }
+
+ if (imgerr) {
+ fi->status = FI_STAT_MISSING;
+ return false;
+ }
+
+ /* Compare fi->length to the sum of ch->length fields */
+ if (len != fi->length - fc->fi_hdr_len) {
+ fi->status = FI_STAT_LENGTH;
+ return false;
+ }
+
+ /* Compute the checksum - it should come out zero */
+ if (fi->checksum != calc_fi_checksum(fc)) {
+ fi->status = FI_STAT_CHKSUM;
+ return false;
+ }
+
+ return true;
+}
+
+/* Fill in the FS IOCTL response data from a completed request. */
+static void esas2r_complete_fs_ioctl(struct esas2r_adapter *a,
+ struct esas2r_request *rq)
+{
+ struct esas2r_ioctl_fs *fs =
+ (struct esas2r_ioctl_fs *)rq->interrupt_cx;
+
+ if (rq->vrq->flash.sub_func == VDA_FLASH_COMMIT)
+ esas2r_enable_heartbeat(a);
+
+ fs->driver_error = rq->req_stat;
+
+ if (fs->driver_error == RS_SUCCESS)
+ fs->status = ATTO_STS_SUCCESS;
+ else
+ fs->status = ATTO_STS_FAILED;
+}
+
+/* Prepare an FS IOCTL request to be sent to the firmware. */
+bool esas2r_process_fs_ioctl(struct esas2r_adapter *a,
+ struct esas2r_ioctl_fs *fs,
+ struct esas2r_request *rq,
+ struct esas2r_sg_context *sgc)
+{
+ u8 cmdcnt = (u8)ARRAY_SIZE(cmd_to_fls_func);
+ struct esas2r_ioctlfs_command *fsc = &fs->command;
+ u8 func = 0;
+ u32 datalen;
+
+ fs->status = ATTO_STS_FAILED;
+ fs->driver_error = RS_PENDING;
+
+ if (fs->version > ESAS2R_FS_VER) {
+ fs->status = ATTO_STS_INV_VERSION;
+ return false;
+ }
+
+ if (fsc->command >= cmdcnt) {
+ fs->status = ATTO_STS_INV_FUNC;
+ return false;
+ }
+
+ func = cmd_to_fls_func[fsc->command];
+ if (func == 0xFF) {
+ fs->status = ATTO_STS_INV_FUNC;
+ return false;
+ }
+
+ if (fsc->command != ESAS2R_FS_CMD_CANCEL) {
+ if ((a->pcid->device != ATTO_DID_MV_88RC9580
+ || fs->adap_type != ESAS2R_FS_AT_ESASRAID2)
+ && (a->pcid->device != ATTO_DID_MV_88RC9580TS
+ || fs->adap_type != ESAS2R_FS_AT_TSSASRAID2)
+ && (a->pcid->device != ATTO_DID_MV_88RC9580TSE
+ || fs->adap_type != ESAS2R_FS_AT_TSSASRAID2E)
+ && (a->pcid->device != ATTO_DID_MV_88RC9580TL
+ || fs->adap_type != ESAS2R_FS_AT_TLSASHBA)) {
+ fs->status = ATTO_STS_INV_ADAPTER;
+ return false;
+ }
+
+ if (fs->driver_ver > ESAS2R_FS_DRVR_VER) {
+ fs->status = ATTO_STS_INV_DRVR_VER;
+ return false;
+ }
+ }
+
+ if (test_bit(AF_DEGRADED_MODE, &a->flags)) {
+ fs->status = ATTO_STS_DEGRADED;
+ return false;
+ }
+
+ rq->interrupt_cb = esas2r_complete_fs_ioctl;
+ rq->interrupt_cx = fs;
+ datalen = le32_to_cpu(fsc->length);
+ esas2r_build_flash_req(a,
+ rq,
+ func,
+ fsc->checksum,
+ le32_to_cpu(fsc->flash_addr),
+ datalen);
+
+ if (func == VDA_FLASH_WRITE
+ || func == VDA_FLASH_READ) {
+ if (datalen == 0) {
+ fs->status = ATTO_STS_INV_FUNC;
+ return false;
+ }
+
+ esas2r_sgc_init(sgc, a, rq, rq->vrq->flash.data.sge);
+ sgc->length = datalen;
+
+ if (!esas2r_build_sg_list(a, rq, sgc)) {
+ fs->status = ATTO_STS_OUT_OF_RSRC;
+ return false;
+ }
+ }
+
+ if (func == VDA_FLASH_COMMIT)
+ esas2r_disable_heartbeat(a);
+
+ esas2r_start_request(a, rq);
+
+ return true;
+}
+
+static bool esas2r_flash_access(struct esas2r_adapter *a, u32 function)
+{
+ u32 starttime;
+ u32 timeout;
+ u32 intstat;
+ u32 doorbell;
+
+ /* Disable chip interrupts awhile */
+ if (function == DRBL_FLASH_REQ)
+ esas2r_disable_chip_interrupts(a);
+
+ /* Issue the request to the firmware */
+ esas2r_write_register_dword(a, MU_DOORBELL_IN, function);
+
+ /* Now wait for the firmware to process it */
+ starttime = jiffies_to_msecs(jiffies);
+
+ if (test_bit(AF_CHPRST_PENDING, &a->flags) ||
+ test_bit(AF_DISC_PENDING, &a->flags))
+ timeout = 40000;
+ else
+ timeout = 5000;
+
+ while (true) {
+ intstat = esas2r_read_register_dword(a, MU_INT_STATUS_OUT);
+
+ if (intstat & MU_INTSTAT_DRBL) {
+ /* Got a doorbell interrupt. Check for the function */
+ doorbell =
+ esas2r_read_register_dword(a, MU_DOORBELL_OUT);
+ esas2r_write_register_dword(a, MU_DOORBELL_OUT,
+ doorbell);
+ if (doorbell & function)
+ break;
+ }
+
+ schedule_timeout_interruptible(msecs_to_jiffies(100));
+
+ if ((jiffies_to_msecs(jiffies) - starttime) > timeout) {
+ /*
+ * Iimeout. If we were requesting flash access,
+ * indicate we are done so the firmware knows we gave
+ * up. If this was a REQ, we also need to re-enable
+ * chip interrupts.
+ */
+ if (function == DRBL_FLASH_REQ) {
+ esas2r_hdebug("flash access timeout");
+ esas2r_write_register_dword(a, MU_DOORBELL_IN,
+ DRBL_FLASH_DONE);
+ esas2r_enable_chip_interrupts(a);
+ } else {
+ esas2r_hdebug("flash release timeout");
+ }
+
+ return false;
+ }
+ }
+
+ /* if we're done, re-enable chip interrupts */
+ if (function == DRBL_FLASH_DONE)
+ esas2r_enable_chip_interrupts(a);
+
+ return true;
+}
+
+#define WINDOW_SIZE ((signed int)MW_DATA_WINDOW_SIZE)
+
+bool esas2r_read_flash_block(struct esas2r_adapter *a,
+ void *to,
+ u32 from,
+ u32 size)
+{
+ u8 *end = (u8 *)to;
+
+ /* Try to acquire access to the flash */
+ if (!esas2r_flash_access(a, DRBL_FLASH_REQ))
+ return false;
+
+ while (size) {
+ u32 len;
+ u32 offset;
+ u32 iatvr;
+
+ if (test_bit(AF2_SERIAL_FLASH, &a->flags2))
+ iatvr = MW_DATA_ADDR_SER_FLASH + (from & -WINDOW_SIZE);
+ else
+ iatvr = MW_DATA_ADDR_PAR_FLASH + (from & -WINDOW_SIZE);
+
+ esas2r_map_data_window(a, iatvr);
+ offset = from & (WINDOW_SIZE - 1);
+ len = size;
+
+ if (len > WINDOW_SIZE - offset)
+ len = WINDOW_SIZE - offset;
+
+ from += len;
+ size -= len;
+
+ while (len--) {
+ *end++ = esas2r_read_data_byte(a, offset);
+ offset++;
+ }
+ }
+
+ /* Release flash access */
+ esas2r_flash_access(a, DRBL_FLASH_DONE);
+ return true;
+}
+
+bool esas2r_read_flash_rev(struct esas2r_adapter *a)
+{
+ u8 bytes[256];
+ u16 *pw;
+ u16 *pwstart;
+ u16 type;
+ u16 size;
+ u32 sz;
+
+ sz = sizeof(bytes);
+ pw = (u16 *)(bytes + sz);
+ pwstart = (u16 *)bytes + 2;
+
+ if (!esas2r_read_flash_block(a, bytes, FLS_OFFSET_CPYR - sz, sz))
+ goto invalid_rev;
+
+ while (pw >= pwstart) {
+ pw--;
+ type = le16_to_cpu(*pw);
+ pw--;
+ size = le16_to_cpu(*pw);
+ pw -= size / 2;
+
+ if (type == FBT_CPYR
+ || type == FBT_SETUP
+ || pw < pwstart)
+ continue;
+
+ if (type == FBT_FLASH_VER)
+ a->flash_ver = le32_to_cpu(*(u32 *)pw);
+
+ break;
+ }
+
+invalid_rev:
+ return esas2r_print_flash_rev(a);
+}
+
+bool esas2r_print_flash_rev(struct esas2r_adapter *a)
+{
+ u16 year = LOWORD(a->flash_ver);
+ u8 day = LOBYTE(HIWORD(a->flash_ver));
+ u8 month = HIBYTE(HIWORD(a->flash_ver));
+
+ if (day == 0
+ || month == 0
+ || day > 31
+ || month > 12
+ || year < 2006
+ || year > 9999) {
+ strcpy(a->flash_rev, "not found");
+ a->flash_ver = 0;
+ return false;
+ }
+
+ sprintf(a->flash_rev, "%02d/%02d/%04d", month, day, year);
+ esas2r_hdebug("flash version: %s", a->flash_rev);
+ return true;
+}
+
+/*
+ * Find the type of boot image type that is currently in the flash.
+ * The chip only has a 64 KB PCI-e expansion ROM
+ * size so only one image can be flashed at a time.
+ */
+bool esas2r_read_image_type(struct esas2r_adapter *a)
+{
+ u8 bytes[256];
+ struct esas2r_boot_image *bi;
+ struct esas2r_boot_header *bh;
+ u32 sz;
+ u32 len;
+ u32 offset;
+
+ /* Start at the base of the boot images and look for a valid image */
+ sz = sizeof(bytes);
+ len = FLS_LENGTH_BOOT;
+ offset = 0;
+
+ while (true) {
+ if (!esas2r_read_flash_block(a, bytes, FLS_OFFSET_BOOT +
+ offset,
+ sz))
+ goto invalid_rev;
+
+ bi = (struct esas2r_boot_image *)bytes;
+ bh = (struct esas2r_boot_header *)((u8 *)bi +
+ le16_to_cpu(
+ bi->header_offset));
+ if (bi->signature != cpu_to_le16(0xAA55))
+ goto invalid_rev;
+
+ if (bh->code_type == CODE_TYPE_PC) {
+ strcpy(a->image_type, "BIOS");
+
+ return true;
+ } else if (bh->code_type == CODE_TYPE_EFI) {
+ struct esas2r_efi_image *ei;
+
+ /*
+ * So we have an EFI image. There are several types
+ * so see which architecture we have.
+ */
+ ei = (struct esas2r_efi_image *)bytes;
+
+ switch (le16_to_cpu(ei->machine_type)) {
+ case EFI_MACHINE_IA32:
+ strcpy(a->image_type, "EFI 32-bit");
+ return true;
+
+ case EFI_MACHINE_IA64:
+ strcpy(a->image_type, "EFI itanium");
+ return true;
+
+ case EFI_MACHINE_X64:
+ strcpy(a->image_type, "EFI 64-bit");
+ return true;
+
+ case EFI_MACHINE_EBC:
+ strcpy(a->image_type, "EFI EBC");
+ return true;
+
+ default:
+ goto invalid_rev;
+ }
+ } else {
+ u32 thislen;
+
+ /* jump to the next image */
+ thislen = (u32)le16_to_cpu(bh->image_length) * 512;
+ if (thislen == 0
+ || thislen + offset > len
+ || bh->indicator == INDICATOR_LAST)
+ break;
+
+ offset += thislen;
+ }
+ }
+
+invalid_rev:
+ strcpy(a->image_type, "no boot images");
+ return false;
+}
+
+/*
+ * Read and validate current NVRAM parameters by accessing
+ * physical NVRAM directly. if currently stored parameters are
+ * invalid, use the defaults.
+ */
+bool esas2r_nvram_read_direct(struct esas2r_adapter *a)
+{
+ bool result;
+
+ if (down_interruptible(&a->nvram_semaphore))
+ return false;
+
+ if (!esas2r_read_flash_block(a, a->nvram, FLS_OFFSET_NVR,
+ sizeof(struct esas2r_sas_nvram))) {
+ esas2r_hdebug("NVRAM read failed, using defaults");
+ return false;
+ }
+
+ result = esas2r_nvram_validate(a);
+
+ up(&a->nvram_semaphore);
+
+ return result;
+}
+
+/* Interrupt callback to process NVRAM completions. */
+static void esas2r_nvram_callback(struct esas2r_adapter *a,
+ struct esas2r_request *rq)
+{
+ struct atto_vda_flash_req *vrq = &rq->vrq->flash;
+
+ if (rq->req_stat == RS_SUCCESS) {
+ /* last request was successful. see what to do now. */
+
+ switch (vrq->sub_func) {
+ case VDA_FLASH_BEGINW:
+ vrq->sub_func = VDA_FLASH_WRITE;
+ rq->req_stat = RS_PENDING;
+ break;
+
+ case VDA_FLASH_WRITE:
+ vrq->sub_func = VDA_FLASH_COMMIT;
+ rq->req_stat = RS_PENDING;
+ break;
+
+ case VDA_FLASH_READ:
+ esas2r_nvram_validate(a);
+ break;
+
+ case VDA_FLASH_COMMIT:
+ default:
+ break;
+ }
+ }
+
+ if (rq->req_stat != RS_PENDING) {
+ /* update the NVRAM state */
+ if (rq->req_stat == RS_SUCCESS)
+ set_bit(AF_NVR_VALID, &a->flags);
+ else
+ clear_bit(AF_NVR_VALID, &a->flags);
+
+ esas2r_enable_heartbeat(a);
+
+ up(&a->nvram_semaphore);
+ }
+}
+
+/*
+ * Write the contents of nvram to the adapter's physical NVRAM.
+ * The cached copy of the NVRAM is also updated.
+ */
+bool esas2r_nvram_write(struct esas2r_adapter *a, struct esas2r_request *rq,
+ struct esas2r_sas_nvram *nvram)
+{
+ struct esas2r_sas_nvram *n = nvram;
+ u8 sas_address_bytes[8];
+ u32 *sas_address_dwords = (u32 *)&sas_address_bytes[0];
+ struct atto_vda_flash_req *vrq = &rq->vrq->flash;
+
+ if (test_bit(AF_DEGRADED_MODE, &a->flags))
+ return false;
+
+ if (down_interruptible(&a->nvram_semaphore))
+ return false;
+
+ if (n == NULL)
+ n = a->nvram;
+
+ /* check the validity of the settings */
+ if (n->version > SASNVR_VERSION) {
+ up(&a->nvram_semaphore);
+ return false;
+ }
+
+ memcpy(&sas_address_bytes[0], n->sas_addr, 8);
+
+ if (sas_address_bytes[0] != 0x50
+ || sas_address_bytes[1] != 0x01
+ || sas_address_bytes[2] != 0x08
+ || (sas_address_bytes[3] & 0xF0) != 0x60
+ || ((sas_address_bytes[3] & 0x0F) | sas_address_dwords[1]) == 0) {
+ up(&a->nvram_semaphore);
+ return false;
+ }
+
+ if (n->spin_up_delay > SASNVR_SPINUP_MAX)
+ n->spin_up_delay = SASNVR_SPINUP_MAX;
+
+ n->version = SASNVR_VERSION;
+ n->checksum = n->checksum - esas2r_nvramcalc_cksum(n);
+ memcpy(a->nvram, n, sizeof(struct esas2r_sas_nvram));
+
+ /* write the NVRAM */
+ n = a->nvram;
+ esas2r_disable_heartbeat(a);
+
+ esas2r_build_flash_req(a,
+ rq,
+ VDA_FLASH_BEGINW,
+ esas2r_nvramcalc_xor_cksum(n),
+ FLS_OFFSET_NVR,
+ sizeof(struct esas2r_sas_nvram));
+
+ if (test_bit(AF_LEGACY_SGE_MODE, &a->flags)) {
+
+ vrq->data.sge[0].length =
+ cpu_to_le32(SGE_LAST |
+ sizeof(struct esas2r_sas_nvram));
+ vrq->data.sge[0].address = cpu_to_le64(
+ a->uncached_phys + (u64)((u8 *)n - a->uncached));
+ } else {
+ vrq->data.prde[0].ctl_len =
+ cpu_to_le32(sizeof(struct esas2r_sas_nvram));
+ vrq->data.prde[0].address = cpu_to_le64(
+ a->uncached_phys
+ + (u64)((u8 *)n - a->uncached));
+ }
+ rq->interrupt_cb = esas2r_nvram_callback;
+ esas2r_start_request(a, rq);
+ return true;
+}
+
+/* Validate the cached NVRAM. if the NVRAM is invalid, load the defaults. */
+bool esas2r_nvram_validate(struct esas2r_adapter *a)
+{
+ struct esas2r_sas_nvram *n = a->nvram;
+ bool rslt = false;
+
+ if (n->signature[0] != 'E'
+ || n->signature[1] != 'S'
+ || n->signature[2] != 'A'
+ || n->signature[3] != 'S') {
+ esas2r_hdebug("invalid NVRAM signature");
+ } else if (esas2r_nvramcalc_cksum(n)) {
+ esas2r_hdebug("invalid NVRAM checksum");
+ } else if (n->version > SASNVR_VERSION) {
+ esas2r_hdebug("invalid NVRAM version");
+ } else {
+ set_bit(AF_NVR_VALID, &a->flags);
+ rslt = true;
+ }
+
+ if (rslt == false) {
+ esas2r_hdebug("using defaults");
+ esas2r_nvram_set_defaults(a);
+ }
+
+ return rslt;
+}
+
+/*
+ * Set the cached NVRAM to defaults. note that this function sets the default
+ * NVRAM when it has been determined that the physical NVRAM is invalid.
+ * In this case, the SAS address is fabricated.
+ */
+void esas2r_nvram_set_defaults(struct esas2r_adapter *a)
+{
+ struct esas2r_sas_nvram *n = a->nvram;
+ u32 time = jiffies_to_msecs(jiffies);
+
+ clear_bit(AF_NVR_VALID, &a->flags);
+ *n = default_sas_nvram;
+ n->sas_addr[3] |= 0x0F;
+ n->sas_addr[4] = HIBYTE(LOWORD(time));
+ n->sas_addr[5] = LOBYTE(LOWORD(time));
+ n->sas_addr[6] = a->pcid->bus->number;
+ n->sas_addr[7] = a->pcid->devfn;
+}
+
+void esas2r_nvram_get_defaults(struct esas2r_adapter *a,
+ struct esas2r_sas_nvram *nvram)
+{
+ u8 sas_addr[8];
+
+ /*
+ * in case we are copying the defaults into the adapter, copy the SAS
+ * address out first.
+ */
+ memcpy(&sas_addr[0], a->nvram->sas_addr, 8);
+ *nvram = default_sas_nvram;
+ memcpy(&nvram->sas_addr[0], &sas_addr[0], 8);
+}
+
+bool esas2r_fm_api(struct esas2r_adapter *a, struct esas2r_flash_img *fi,
+ struct esas2r_request *rq, struct esas2r_sg_context *sgc)
+{
+ struct esas2r_flash_context *fc = &a->flash_context;
+ u8 j;
+ struct esas2r_component_header *ch;
+
+ if (test_and_set_bit(AF_FLASH_LOCK, &a->flags)) {
+ /* flag was already set */
+ fi->status = FI_STAT_BUSY;
+ return false;
+ }
+
+ memcpy(&fc->sgc, sgc, sizeof(struct esas2r_sg_context));
+ sgc = &fc->sgc;
+ fc->fi = fi;
+ fc->sgc_offset = sgc->cur_offset;
+ rq->req_stat = RS_SUCCESS;
+ rq->interrupt_cx = fc;
+
+ switch (fi->fi_version) {
+ case FI_VERSION_1:
+ fc->scratch = ((struct esas2r_flash_img *)fi)->scratch_buf;
+ fc->num_comps = FI_NUM_COMPS_V1;
+ fc->fi_hdr_len = sizeof(struct esas2r_flash_img);
+ break;
+
+ default:
+ return complete_fmapi_req(a, rq, FI_STAT_IMG_VER);
+ }
+
+ if (test_bit(AF_DEGRADED_MODE, &a->flags))
+ return complete_fmapi_req(a, rq, FI_STAT_DEGRADED);
+
+ switch (fi->action) {
+ case FI_ACT_DOWN: /* Download the components */
+ /* Verify the format of the flash image */
+ if (!verify_fi(a, fc))
+ return complete_fmapi_req(a, rq, fi->status);
+
+ /* Adjust the BIOS fields that are dependent on the HBA */
+ ch = &fi->cmp_hdr[CH_IT_BIOS];
+
+ if (ch->length)
+ fix_bios(a, fi);
+
+ /* Adjust the EFI fields that are dependent on the HBA */
+ ch = &fi->cmp_hdr[CH_IT_EFI];
+
+ if (ch->length)
+ fix_efi(a, fi);
+
+ /*
+ * Since the image was just modified, compute the checksum on
+ * the modified image. First update the CRC for the composite
+ * expansion ROM image.
+ */
+ fi->checksum = calc_fi_checksum(fc);
+
+ /* Disable the heartbeat */
+ esas2r_disable_heartbeat(a);
+
+ /* Now start up the download sequence */
+ fc->task = FMTSK_ERASE_BOOT;
+ fc->func = VDA_FLASH_BEGINW;
+ fc->comp_typ = CH_IT_CFG;
+ fc->flsh_addr = FLS_OFFSET_BOOT;
+ fc->sgc.length = FLS_LENGTH_BOOT;
+ fc->sgc.cur_offset = NULL;
+
+ /* Setup the callback address */
+ fc->interrupt_cb = fw_download_proc;
+ break;
+
+ case FI_ACT_UPSZ: /* Get upload sizes */
+ fi->adap_typ = get_fi_adap_type(a);
+ fi->flags = 0;
+ fi->num_comps = fc->num_comps;
+ fi->length = fc->fi_hdr_len;
+
+ /* Report the type of boot image in the rel_version string */
+ memcpy(fi->rel_version, a->image_type,
+ sizeof(fi->rel_version));
+
+ /* Build the component headers */
+ for (j = 0, ch = fi->cmp_hdr;
+ j < fi->num_comps;
+ j++, ch++) {
+ ch->img_type = j;
+ ch->status = CH_STAT_PENDING;
+ ch->length = 0;
+ ch->version = 0xffffffff;
+ ch->image_offset = 0;
+ ch->pad[0] = 0;
+ ch->pad[1] = 0;
+ }
+
+ if (a->flash_ver != 0) {
+ fi->cmp_hdr[CH_IT_BIOS].version =
+ fi->cmp_hdr[CH_IT_MAC].version =
+ fi->cmp_hdr[CH_IT_EFI].version =
+ fi->cmp_hdr[CH_IT_CFG].version
+ = a->flash_ver;
+
+ fi->cmp_hdr[CH_IT_BIOS].status =
+ fi->cmp_hdr[CH_IT_MAC].status =
+ fi->cmp_hdr[CH_IT_EFI].status =
+ fi->cmp_hdr[CH_IT_CFG].status =
+ CH_STAT_SUCCESS;
+
+ return complete_fmapi_req(a, rq, FI_STAT_SUCCESS);
+ }
+
+ /* fall through */
+
+ case FI_ACT_UP: /* Upload the components */
+ default:
+ return complete_fmapi_req(a, rq, FI_STAT_INVALID);
+ }
+
+ /*
+ * If we make it here, fc has been setup to do the first task. Call
+ * load_image to format the request, start it, and get out. The
+ * interrupt code will call the callback when the first message is
+ * complete.
+ */
+ if (!load_image(a, rq))
+ return complete_fmapi_req(a, rq, FI_STAT_FAILED);
+
+ esas2r_start_request(a, rq);
+
+ return true;
+}
diff --git a/drivers/scsi/esas2r/esas2r_init.c b/drivers/scsi/esas2r/esas2r_init.c
new file mode 100644
index 000000000..78ce4d61a
--- /dev/null
+++ b/drivers/scsi/esas2r/esas2r_init.c
@@ -0,0 +1,1772 @@
+/*
+ * linux/drivers/scsi/esas2r/esas2r_init.c
+ * For use with ATTO ExpressSAS R6xx SAS/SATA RAID controllers
+ *
+ * Copyright (c) 2001-2013 ATTO Technology, Inc.
+ * (mailto:linuxdrivers@attotech.com)mpt3sas/mpt3sas_trigger_diag.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * NO WARRANTY
+ * THE PROGRAM IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OR
+ * CONDITIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED INCLUDING, WITHOUT
+ * LIMITATION, ANY WARRANTIES OR CONDITIONS OF TITLE, NON-INFRINGEMENT,
+ * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE. Each Recipient is
+ * solely responsible for determining the appropriateness of using and
+ * distributing the Program and assumes all risks associated with its
+ * exercise of rights under this Agreement, including but not limited to
+ * the risks and costs of program errors, damage to or loss of data,
+ * programs or equipment, and unavailability or interruption of operations.
+ *
+ * DISCLAIMER OF LIABILITY
+ * NEITHER RECIPIENT NOR ANY CONTRIBUTORS SHALL HAVE ANY LIABILITY FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING WITHOUT LIMITATION LOST PROFITS), HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR
+ * TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
+ * USE OR DISTRIBUTION OF THE PROGRAM OR THE EXERCISE OF ANY RIGHTS GRANTED
+ * HEREUNDER, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGES
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301,
+ * USA.
+ */
+
+#include "esas2r.h"
+
+static bool esas2r_initmem_alloc(struct esas2r_adapter *a,
+ struct esas2r_mem_desc *mem_desc,
+ u32 align)
+{
+ mem_desc->esas2r_param = mem_desc->size + align;
+ mem_desc->virt_addr = NULL;
+ mem_desc->phys_addr = 0;
+ mem_desc->esas2r_data = dma_alloc_coherent(&a->pcid->dev,
+ (size_t)mem_desc->
+ esas2r_param,
+ (dma_addr_t *)&mem_desc->
+ phys_addr,
+ GFP_KERNEL);
+
+ if (mem_desc->esas2r_data == NULL) {
+ esas2r_log(ESAS2R_LOG_CRIT,
+ "failed to allocate %lu bytes of consistent memory!",
+ (long
+ unsigned
+ int)mem_desc->esas2r_param);
+ return false;
+ }
+
+ mem_desc->virt_addr = PTR_ALIGN(mem_desc->esas2r_data, align);
+ mem_desc->phys_addr = ALIGN(mem_desc->phys_addr, align);
+ memset(mem_desc->virt_addr, 0, mem_desc->size);
+ return true;
+}
+
+static void esas2r_initmem_free(struct esas2r_adapter *a,
+ struct esas2r_mem_desc *mem_desc)
+{
+ if (mem_desc->virt_addr == NULL)
+ return;
+
+ /*
+ * Careful! phys_addr and virt_addr may have been adjusted from the
+ * original allocation in order to return the desired alignment. That
+ * means we have to use the original address (in esas2r_data) and size
+ * (esas2r_param) and calculate the original physical address based on
+ * the difference between the requested and actual allocation size.
+ */
+ if (mem_desc->phys_addr) {
+ int unalign = ((u8 *)mem_desc->virt_addr) -
+ ((u8 *)mem_desc->esas2r_data);
+
+ dma_free_coherent(&a->pcid->dev,
+ (size_t)mem_desc->esas2r_param,
+ mem_desc->esas2r_data,
+ (dma_addr_t)(mem_desc->phys_addr - unalign));
+ } else {
+ kfree(mem_desc->esas2r_data);
+ }
+
+ mem_desc->virt_addr = NULL;
+}
+
+static bool alloc_vda_req(struct esas2r_adapter *a,
+ struct esas2r_request *rq)
+{
+ struct esas2r_mem_desc *memdesc = kzalloc(
+ sizeof(struct esas2r_mem_desc), GFP_KERNEL);
+
+ if (memdesc == NULL) {
+ esas2r_hdebug("could not alloc mem for vda request memdesc\n");
+ return false;
+ }
+
+ memdesc->size = sizeof(union atto_vda_req) +
+ ESAS2R_DATA_BUF_LEN;
+
+ if (!esas2r_initmem_alloc(a, memdesc, 256)) {
+ esas2r_hdebug("could not alloc mem for vda request\n");
+ kfree(memdesc);
+ return false;
+ }
+
+ a->num_vrqs++;
+ list_add(&memdesc->next_desc, &a->vrq_mds_head);
+
+ rq->vrq_md = memdesc;
+ rq->vrq = (union atto_vda_req *)memdesc->virt_addr;
+ rq->vrq->scsi.handle = a->num_vrqs;
+
+ return true;
+}
+
+static void esas2r_unmap_regions(struct esas2r_adapter *a)
+{
+ if (a->regs)
+ iounmap((void __iomem *)a->regs);
+
+ a->regs = NULL;
+
+ pci_release_region(a->pcid, 2);
+
+ if (a->data_window)
+ iounmap((void __iomem *)a->data_window);
+
+ a->data_window = NULL;
+
+ pci_release_region(a->pcid, 0);
+}
+
+static int esas2r_map_regions(struct esas2r_adapter *a)
+{
+ int error;
+
+ a->regs = NULL;
+ a->data_window = NULL;
+
+ error = pci_request_region(a->pcid, 2, a->name);
+ if (error != 0) {
+ esas2r_log(ESAS2R_LOG_CRIT,
+ "pci_request_region(2) failed, error %d",
+ error);
+
+ return error;
+ }
+
+ a->regs = (void __force *)ioremap(pci_resource_start(a->pcid, 2),
+ pci_resource_len(a->pcid, 2));
+ if (a->regs == NULL) {
+ esas2r_log(ESAS2R_LOG_CRIT,
+ "ioremap failed for regs mem region\n");
+ pci_release_region(a->pcid, 2);
+ return -EFAULT;
+ }
+
+ error = pci_request_region(a->pcid, 0, a->name);
+ if (error != 0) {
+ esas2r_log(ESAS2R_LOG_CRIT,
+ "pci_request_region(2) failed, error %d",
+ error);
+ esas2r_unmap_regions(a);
+ return error;
+ }
+
+ a->data_window = (void __force *)ioremap(pci_resource_start(a->pcid,
+ 0),
+ pci_resource_len(a->pcid, 0));
+ if (a->data_window == NULL) {
+ esas2r_log(ESAS2R_LOG_CRIT,
+ "ioremap failed for data_window mem region\n");
+ esas2r_unmap_regions(a);
+ return -EFAULT;
+ }
+
+ return 0;
+}
+
+static void esas2r_setup_interrupts(struct esas2r_adapter *a, int intr_mode)
+{
+ int i;
+
+ /* Set up interrupt mode based on the requested value */
+ switch (intr_mode) {
+ case INTR_MODE_LEGACY:
+use_legacy_interrupts:
+ a->intr_mode = INTR_MODE_LEGACY;
+ break;
+
+ case INTR_MODE_MSI:
+ i = pci_enable_msi(a->pcid);
+ if (i != 0) {
+ esas2r_log(ESAS2R_LOG_WARN,
+ "failed to enable MSI for adapter %d, "
+ "falling back to legacy interrupts "
+ "(err=%d)", a->index,
+ i);
+ goto use_legacy_interrupts;
+ }
+ a->intr_mode = INTR_MODE_MSI;
+ set_bit(AF2_MSI_ENABLED, &a->flags2);
+ break;
+
+
+ default:
+ esas2r_log(ESAS2R_LOG_WARN,
+ "unknown interrupt_mode %d requested, "
+ "falling back to legacy interrupt",
+ interrupt_mode);
+ goto use_legacy_interrupts;
+ }
+}
+
+static void esas2r_claim_interrupts(struct esas2r_adapter *a)
+{
+ unsigned long flags = 0;
+
+ if (a->intr_mode == INTR_MODE_LEGACY)
+ flags |= IRQF_SHARED;
+
+ esas2r_log(ESAS2R_LOG_INFO,
+ "esas2r_claim_interrupts irq=%d (%p, %s, %x)",
+ a->pcid->irq, a, a->name, flags);
+
+ if (request_irq(a->pcid->irq,
+ (a->intr_mode ==
+ INTR_MODE_LEGACY) ? esas2r_interrupt :
+ esas2r_msi_interrupt,
+ flags,
+ a->name,
+ a)) {
+ esas2r_log(ESAS2R_LOG_CRIT, "unable to request IRQ %02X",
+ a->pcid->irq);
+ return;
+ }
+
+ set_bit(AF2_IRQ_CLAIMED, &a->flags2);
+ esas2r_log(ESAS2R_LOG_INFO,
+ "claimed IRQ %d flags: 0x%lx",
+ a->pcid->irq, flags);
+}
+
+int esas2r_init_adapter(struct Scsi_Host *host, struct pci_dev *pcid,
+ int index)
+{
+ struct esas2r_adapter *a;
+ u64 bus_addr = 0;
+ int i;
+ void *next_uncached;
+ struct esas2r_request *first_request, *last_request;
+
+ if (index >= MAX_ADAPTERS) {
+ esas2r_log(ESAS2R_LOG_CRIT,
+ "tried to init invalid adapter index %u!",
+ index);
+ return 0;
+ }
+
+ if (esas2r_adapters[index]) {
+ esas2r_log(ESAS2R_LOG_CRIT,
+ "tried to init existing adapter index %u!",
+ index);
+ return 0;
+ }
+
+ a = (struct esas2r_adapter *)host->hostdata;
+ memset(a, 0, sizeof(struct esas2r_adapter));
+ a->pcid = pcid;
+ a->host = host;
+
+ if (sizeof(dma_addr_t) > 4) {
+ const uint64_t required_mask = dma_get_required_mask
+ (&pcid->dev);
+ if (required_mask > DMA_BIT_MASK(32)
+ && !pci_set_dma_mask(pcid, DMA_BIT_MASK(64))
+ && !pci_set_consistent_dma_mask(pcid,
+ DMA_BIT_MASK(64))) {
+ esas2r_log_dev(ESAS2R_LOG_INFO,
+ &(a->pcid->dev),
+ "64-bit PCI addressing enabled\n");
+ } else if (!pci_set_dma_mask(pcid, DMA_BIT_MASK(32))
+ && !pci_set_consistent_dma_mask(pcid,
+ DMA_BIT_MASK(32))) {
+ esas2r_log_dev(ESAS2R_LOG_INFO,
+ &(a->pcid->dev),
+ "32-bit PCI addressing enabled\n");
+ } else {
+ esas2r_log(ESAS2R_LOG_CRIT,
+ "failed to set DMA mask");
+ esas2r_kill_adapter(index);
+ return 0;
+ }
+ } else {
+ if (!pci_set_dma_mask(pcid, DMA_BIT_MASK(32))
+ && !pci_set_consistent_dma_mask(pcid,
+ DMA_BIT_MASK(32))) {
+ esas2r_log_dev(ESAS2R_LOG_INFO,
+ &(a->pcid->dev),
+ "32-bit PCI addressing enabled\n");
+ } else {
+ esas2r_log(ESAS2R_LOG_CRIT,
+ "failed to set DMA mask");
+ esas2r_kill_adapter(index);
+ return 0;
+ }
+ }
+ esas2r_adapters[index] = a;
+ sprintf(a->name, ESAS2R_DRVR_NAME "_%02d", index);
+ esas2r_debug("new adapter %p, name %s", a, a->name);
+ spin_lock_init(&a->request_lock);
+ spin_lock_init(&a->fw_event_lock);
+ sema_init(&a->fm_api_semaphore, 1);
+ sema_init(&a->fs_api_semaphore, 1);
+ sema_init(&a->nvram_semaphore, 1);
+
+ esas2r_fw_event_off(a);
+ snprintf(a->fw_event_q_name, ESAS2R_KOBJ_NAME_LEN, "esas2r/%d",
+ a->index);
+ a->fw_event_q = create_singlethread_workqueue(a->fw_event_q_name);
+
+ init_waitqueue_head(&a->buffered_ioctl_waiter);
+ init_waitqueue_head(&a->nvram_waiter);
+ init_waitqueue_head(&a->fm_api_waiter);
+ init_waitqueue_head(&a->fs_api_waiter);
+ init_waitqueue_head(&a->vda_waiter);
+
+ INIT_LIST_HEAD(&a->general_req.req_list);
+ INIT_LIST_HEAD(&a->active_list);
+ INIT_LIST_HEAD(&a->defer_list);
+ INIT_LIST_HEAD(&a->free_sg_list_head);
+ INIT_LIST_HEAD(&a->avail_request);
+ INIT_LIST_HEAD(&a->vrq_mds_head);
+ INIT_LIST_HEAD(&a->fw_event_list);
+
+ first_request = (struct esas2r_request *)((u8 *)(a + 1));
+
+ for (last_request = first_request, i = 1; i < num_requests;
+ last_request++, i++) {
+ INIT_LIST_HEAD(&last_request->req_list);
+ list_add_tail(&last_request->comp_list, &a->avail_request);
+ if (!alloc_vda_req(a, last_request)) {
+ esas2r_log(ESAS2R_LOG_CRIT,
+ "failed to allocate a VDA request!");
+ esas2r_kill_adapter(index);
+ return 0;
+ }
+ }
+
+ esas2r_debug("requests: %p to %p (%d, %d)", first_request,
+ last_request,
+ sizeof(*first_request),
+ num_requests);
+
+ if (esas2r_map_regions(a) != 0) {
+ esas2r_log(ESAS2R_LOG_CRIT, "could not map PCI regions!");
+ esas2r_kill_adapter(index);
+ return 0;
+ }
+
+ a->index = index;
+
+ /* interrupts will be disabled until we are done with init */
+ atomic_inc(&a->dis_ints_cnt);
+ atomic_inc(&a->disable_cnt);
+ set_bit(AF_CHPRST_PENDING, &a->flags);
+ set_bit(AF_DISC_PENDING, &a->flags);
+ set_bit(AF_FIRST_INIT, &a->flags);
+ set_bit(AF_LEGACY_SGE_MODE, &a->flags);
+
+ a->init_msg = ESAS2R_INIT_MSG_START;
+ a->max_vdareq_size = 128;
+ a->build_sgl = esas2r_build_sg_list_sge;
+
+ esas2r_setup_interrupts(a, interrupt_mode);
+
+ a->uncached_size = esas2r_get_uncached_size(a);
+ a->uncached = dma_alloc_coherent(&pcid->dev,
+ (size_t)a->uncached_size,
+ (dma_addr_t *)&bus_addr,
+ GFP_KERNEL);
+ if (a->uncached == NULL) {
+ esas2r_log(ESAS2R_LOG_CRIT,
+ "failed to allocate %d bytes of consistent memory!",
+ a->uncached_size);
+ esas2r_kill_adapter(index);
+ return 0;
+ }
+
+ a->uncached_phys = bus_addr;
+
+ esas2r_debug("%d bytes uncached memory allocated @ %p (%x:%x)",
+ a->uncached_size,
+ a->uncached,
+ upper_32_bits(bus_addr),
+ lower_32_bits(bus_addr));
+ memset(a->uncached, 0, a->uncached_size);
+ next_uncached = a->uncached;
+
+ if (!esas2r_init_adapter_struct(a,
+ &next_uncached)) {
+ esas2r_log(ESAS2R_LOG_CRIT,
+ "failed to initialize adapter structure (2)!");
+ esas2r_kill_adapter(index);
+ return 0;
+ }
+
+ tasklet_init(&a->tasklet,
+ esas2r_adapter_tasklet,
+ (unsigned long)a);
+
+ /*
+ * Disable chip interrupts to prevent spurious interrupts
+ * until we claim the IRQ.
+ */
+ esas2r_disable_chip_interrupts(a);
+ esas2r_check_adapter(a);
+
+ if (!esas2r_init_adapter_hw(a, true))
+ esas2r_log(ESAS2R_LOG_CRIT, "failed to initialize hardware!");
+ else
+ esas2r_debug("esas2r_init_adapter ok");
+
+ esas2r_claim_interrupts(a);
+
+ if (test_bit(AF2_IRQ_CLAIMED, &a->flags2))
+ esas2r_enable_chip_interrupts(a);
+
+ set_bit(AF2_INIT_DONE, &a->flags2);
+ if (!test_bit(AF_DEGRADED_MODE, &a->flags))
+ esas2r_kickoff_timer(a);
+ esas2r_debug("esas2r_init_adapter done for %p (%d)",
+ a, a->disable_cnt);
+
+ return 1;
+}
+
+static void esas2r_adapter_power_down(struct esas2r_adapter *a,
+ int power_management)
+{
+ struct esas2r_mem_desc *memdesc, *next;
+
+ if ((test_bit(AF2_INIT_DONE, &a->flags2))
+ && (!test_bit(AF_DEGRADED_MODE, &a->flags))) {
+ if (!power_management) {
+ del_timer_sync(&a->timer);
+ tasklet_kill(&a->tasklet);
+ }
+ esas2r_power_down(a);
+
+ /*
+ * There are versions of firmware that do not handle the sync
+ * cache command correctly. Stall here to ensure that the
+ * cache is lazily flushed.
+ */
+ mdelay(500);
+ esas2r_debug("chip halted");
+ }
+
+ /* Remove sysfs binary files */
+ if (a->sysfs_fw_created) {
+ sysfs_remove_bin_file(&a->host->shost_dev.kobj, &bin_attr_fw);
+ a->sysfs_fw_created = 0;
+ }
+
+ if (a->sysfs_fs_created) {
+ sysfs_remove_bin_file(&a->host->shost_dev.kobj, &bin_attr_fs);
+ a->sysfs_fs_created = 0;
+ }
+
+ if (a->sysfs_vda_created) {
+ sysfs_remove_bin_file(&a->host->shost_dev.kobj, &bin_attr_vda);
+ a->sysfs_vda_created = 0;
+ }
+
+ if (a->sysfs_hw_created) {
+ sysfs_remove_bin_file(&a->host->shost_dev.kobj, &bin_attr_hw);
+ a->sysfs_hw_created = 0;
+ }
+
+ if (a->sysfs_live_nvram_created) {
+ sysfs_remove_bin_file(&a->host->shost_dev.kobj,
+ &bin_attr_live_nvram);
+ a->sysfs_live_nvram_created = 0;
+ }
+
+ if (a->sysfs_default_nvram_created) {
+ sysfs_remove_bin_file(&a->host->shost_dev.kobj,
+ &bin_attr_default_nvram);
+ a->sysfs_default_nvram_created = 0;
+ }
+
+ /* Clean up interrupts */
+ if (test_bit(AF2_IRQ_CLAIMED, &a->flags2)) {
+ esas2r_log_dev(ESAS2R_LOG_INFO,
+ &(a->pcid->dev),
+ "free_irq(%d) called", a->pcid->irq);
+
+ free_irq(a->pcid->irq, a);
+ esas2r_debug("IRQ released");
+ clear_bit(AF2_IRQ_CLAIMED, &a->flags2);
+ }
+
+ if (test_bit(AF2_MSI_ENABLED, &a->flags2)) {
+ pci_disable_msi(a->pcid);
+ clear_bit(AF2_MSI_ENABLED, &a->flags2);
+ esas2r_debug("MSI disabled");
+ }
+
+ if (a->inbound_list_md.virt_addr)
+ esas2r_initmem_free(a, &a->inbound_list_md);
+
+ if (a->outbound_list_md.virt_addr)
+ esas2r_initmem_free(a, &a->outbound_list_md);
+
+ list_for_each_entry_safe(memdesc, next, &a->free_sg_list_head,
+ next_desc) {
+ esas2r_initmem_free(a, memdesc);
+ }
+
+ /* Following frees everything allocated via alloc_vda_req */
+ list_for_each_entry_safe(memdesc, next, &a->vrq_mds_head, next_desc) {
+ esas2r_initmem_free(a, memdesc);
+ list_del(&memdesc->next_desc);
+ kfree(memdesc);
+ }
+
+ kfree(a->first_ae_req);
+ a->first_ae_req = NULL;
+
+ kfree(a->sg_list_mds);
+ a->sg_list_mds = NULL;
+
+ kfree(a->req_table);
+ a->req_table = NULL;
+
+ if (a->regs) {
+ esas2r_unmap_regions(a);
+ a->regs = NULL;
+ a->data_window = NULL;
+ esas2r_debug("regions unmapped");
+ }
+}
+
+/* Release/free allocated resources for specified adapters. */
+void esas2r_kill_adapter(int i)
+{
+ struct esas2r_adapter *a = esas2r_adapters[i];
+
+ if (a) {
+ unsigned long flags;
+ struct workqueue_struct *wq;
+ esas2r_debug("killing adapter %p [%d] ", a, i);
+ esas2r_fw_event_off(a);
+ esas2r_adapter_power_down(a, 0);
+ if (esas2r_buffered_ioctl &&
+ (a->pcid == esas2r_buffered_ioctl_pcid)) {
+ dma_free_coherent(&a->pcid->dev,
+ (size_t)esas2r_buffered_ioctl_size,
+ esas2r_buffered_ioctl,
+ esas2r_buffered_ioctl_addr);
+ esas2r_buffered_ioctl = NULL;
+ }
+
+ if (a->vda_buffer) {
+ dma_free_coherent(&a->pcid->dev,
+ (size_t)VDA_MAX_BUFFER_SIZE,
+ a->vda_buffer,
+ (dma_addr_t)a->ppvda_buffer);
+ a->vda_buffer = NULL;
+ }
+ if (a->fs_api_buffer) {
+ dma_free_coherent(&a->pcid->dev,
+ (size_t)a->fs_api_buffer_size,
+ a->fs_api_buffer,
+ (dma_addr_t)a->ppfs_api_buffer);
+ a->fs_api_buffer = NULL;
+ }
+
+ kfree(a->local_atto_ioctl);
+ a->local_atto_ioctl = NULL;
+
+ spin_lock_irqsave(&a->fw_event_lock, flags);
+ wq = a->fw_event_q;
+ a->fw_event_q = NULL;
+ spin_unlock_irqrestore(&a->fw_event_lock, flags);
+ if (wq)
+ destroy_workqueue(wq);
+
+ if (a->uncached) {
+ dma_free_coherent(&a->pcid->dev,
+ (size_t)a->uncached_size,
+ a->uncached,
+ (dma_addr_t)a->uncached_phys);
+ a->uncached = NULL;
+ esas2r_debug("uncached area freed");
+ }
+
+ esas2r_log_dev(ESAS2R_LOG_INFO,
+ &(a->pcid->dev),
+ "pci_disable_device() called. msix_enabled: %d "
+ "msi_enabled: %d irq: %d pin: %d",
+ a->pcid->msix_enabled,
+ a->pcid->msi_enabled,
+ a->pcid->irq,
+ a->pcid->pin);
+
+ esas2r_log_dev(ESAS2R_LOG_INFO,
+ &(a->pcid->dev),
+ "before pci_disable_device() enable_cnt: %d",
+ a->pcid->enable_cnt.counter);
+
+ pci_disable_device(a->pcid);
+ esas2r_log_dev(ESAS2R_LOG_INFO,
+ &(a->pcid->dev),
+ "after pci_disable_device() enable_cnt: %d",
+ a->pcid->enable_cnt.counter);
+
+ esas2r_log_dev(ESAS2R_LOG_INFO,
+ &(a->pcid->dev),
+ "pci_set_drv_data(%p, NULL) called",
+ a->pcid);
+
+ pci_set_drvdata(a->pcid, NULL);
+ esas2r_adapters[i] = NULL;
+
+ if (test_bit(AF2_INIT_DONE, &a->flags2)) {
+ clear_bit(AF2_INIT_DONE, &a->flags2);
+
+ set_bit(AF_DEGRADED_MODE, &a->flags);
+
+ esas2r_log_dev(ESAS2R_LOG_INFO,
+ &(a->host->shost_gendev),
+ "scsi_remove_host() called");
+
+ scsi_remove_host(a->host);
+
+ esas2r_log_dev(ESAS2R_LOG_INFO,
+ &(a->host->shost_gendev),
+ "scsi_host_put() called");
+
+ scsi_host_put(a->host);
+ }
+ }
+}
+
+int esas2r_cleanup(struct Scsi_Host *host)
+{
+ struct esas2r_adapter *a;
+ int index;
+
+ if (host == NULL) {
+ int i;
+
+ esas2r_debug("esas2r_cleanup everything");
+ for (i = 0; i < MAX_ADAPTERS; i++)
+ esas2r_kill_adapter(i);
+ return -1;
+ }
+
+ esas2r_debug("esas2r_cleanup called for host %p", host);
+ a = (struct esas2r_adapter *)host->hostdata;
+ index = a->index;
+ esas2r_kill_adapter(index);
+ return index;
+}
+
+int esas2r_suspend(struct pci_dev *pdev, pm_message_t state)
+{
+ struct Scsi_Host *host = pci_get_drvdata(pdev);
+ u32 device_state;
+ struct esas2r_adapter *a = (struct esas2r_adapter *)host->hostdata;
+
+ esas2r_log_dev(ESAS2R_LOG_INFO, &(pdev->dev), "suspending adapter()");
+ if (!a)
+ return -ENODEV;
+
+ esas2r_adapter_power_down(a, 1);
+ device_state = pci_choose_state(pdev, state);
+ esas2r_log_dev(ESAS2R_LOG_INFO, &(pdev->dev),
+ "pci_save_state() called");
+ pci_save_state(pdev);
+ esas2r_log_dev(ESAS2R_LOG_INFO, &(pdev->dev),
+ "pci_disable_device() called");
+ pci_disable_device(pdev);
+ esas2r_log_dev(ESAS2R_LOG_INFO, &(pdev->dev),
+ "pci_set_power_state() called");
+ pci_set_power_state(pdev, device_state);
+ esas2r_log_dev(ESAS2R_LOG_INFO, &(pdev->dev), "esas2r_suspend(): 0");
+ return 0;
+}
+
+int esas2r_resume(struct pci_dev *pdev)
+{
+ struct Scsi_Host *host = pci_get_drvdata(pdev);
+ struct esas2r_adapter *a = (struct esas2r_adapter *)host->hostdata;
+ int rez;
+
+ esas2r_log_dev(ESAS2R_LOG_INFO, &(pdev->dev), "resuming adapter()");
+ esas2r_log_dev(ESAS2R_LOG_INFO, &(pdev->dev),
+ "pci_set_power_state(PCI_D0) "
+ "called");
+ pci_set_power_state(pdev, PCI_D0);
+ esas2r_log_dev(ESAS2R_LOG_INFO, &(pdev->dev),
+ "pci_enable_wake(PCI_D0, 0) "
+ "called");
+ pci_enable_wake(pdev, PCI_D0, 0);
+ esas2r_log_dev(ESAS2R_LOG_INFO, &(pdev->dev),
+ "pci_restore_state() called");
+ pci_restore_state(pdev);
+ esas2r_log_dev(ESAS2R_LOG_INFO, &(pdev->dev),
+ "pci_enable_device() called");
+ rez = pci_enable_device(pdev);
+ pci_set_master(pdev);
+
+ if (!a) {
+ rez = -ENODEV;
+ goto error_exit;
+ }
+
+ if (esas2r_map_regions(a) != 0) {
+ esas2r_log(ESAS2R_LOG_CRIT, "could not re-map PCI regions!");
+ rez = -ENOMEM;
+ goto error_exit;
+ }
+
+ /* Set up interupt mode */
+ esas2r_setup_interrupts(a, a->intr_mode);
+
+ /*
+ * Disable chip interrupts to prevent spurious interrupts until we
+ * claim the IRQ.
+ */
+ esas2r_disable_chip_interrupts(a);
+ if (!esas2r_power_up(a, true)) {
+ esas2r_debug("yikes, esas2r_power_up failed");
+ rez = -ENOMEM;
+ goto error_exit;
+ }
+
+ esas2r_claim_interrupts(a);
+
+ if (test_bit(AF2_IRQ_CLAIMED, &a->flags2)) {
+ /*
+ * Now that system interrupt(s) are claimed, we can enable
+ * chip interrupts.
+ */
+ esas2r_enable_chip_interrupts(a);
+ esas2r_kickoff_timer(a);
+ } else {
+ esas2r_debug("yikes, unable to claim IRQ");
+ esas2r_log(ESAS2R_LOG_CRIT, "could not re-claim IRQ!");
+ rez = -ENOMEM;
+ goto error_exit;
+ }
+
+error_exit:
+ esas2r_log_dev(ESAS2R_LOG_CRIT, &(pdev->dev), "esas2r_resume(): %d",
+ rez);
+ return rez;
+}
+
+bool esas2r_set_degraded_mode(struct esas2r_adapter *a, char *error_str)
+{
+ set_bit(AF_DEGRADED_MODE, &a->flags);
+ esas2r_log(ESAS2R_LOG_CRIT,
+ "setting adapter to degraded mode: %s\n", error_str);
+ return false;
+}
+
+u32 esas2r_get_uncached_size(struct esas2r_adapter *a)
+{
+ return sizeof(struct esas2r_sas_nvram)
+ + ALIGN(ESAS2R_DISC_BUF_LEN, 8)
+ + ALIGN(sizeof(u32), 8) /* outbound list copy pointer */
+ + 8
+ + (num_sg_lists * (u16)sgl_page_size)
+ + ALIGN((num_requests + num_ae_requests + 1 +
+ ESAS2R_LIST_EXTRA) *
+ sizeof(struct esas2r_inbound_list_source_entry),
+ 8)
+ + ALIGN((num_requests + num_ae_requests + 1 +
+ ESAS2R_LIST_EXTRA) *
+ sizeof(struct atto_vda_ob_rsp), 8)
+ + 256; /* VDA request and buffer align */
+}
+
+static void esas2r_init_pci_cfg_space(struct esas2r_adapter *a)
+{
+ int pcie_cap_reg;
+
+ pcie_cap_reg = pci_find_capability(a->pcid, PCI_CAP_ID_EXP);
+ if (pcie_cap_reg) {
+ u16 devcontrol;
+
+ pci_read_config_word(a->pcid, pcie_cap_reg + PCI_EXP_DEVCTL,
+ &devcontrol);
+
+ if ((devcontrol & PCI_EXP_DEVCTL_READRQ) >
+ PCI_EXP_DEVCTL_READRQ_512B) {
+ esas2r_log(ESAS2R_LOG_INFO,
+ "max read request size > 512B");
+
+ devcontrol &= ~PCI_EXP_DEVCTL_READRQ;
+ devcontrol |= PCI_EXP_DEVCTL_READRQ_512B;
+ pci_write_config_word(a->pcid,
+ pcie_cap_reg + PCI_EXP_DEVCTL,
+ devcontrol);
+ }
+ }
+}
+
+/*
+ * Determine the organization of the uncached data area and
+ * finish initializing the adapter structure
+ */
+bool esas2r_init_adapter_struct(struct esas2r_adapter *a,
+ void **uncached_area)
+{
+ u32 i;
+ u8 *high;
+ struct esas2r_inbound_list_source_entry *element;
+ struct esas2r_request *rq;
+ struct esas2r_mem_desc *sgl;
+
+ spin_lock_init(&a->sg_list_lock);
+ spin_lock_init(&a->mem_lock);
+ spin_lock_init(&a->queue_lock);
+
+ a->targetdb_end = &a->targetdb[ESAS2R_MAX_TARGETS];
+
+ if (!alloc_vda_req(a, &a->general_req)) {
+ esas2r_hdebug(
+ "failed to allocate a VDA request for the general req!");
+ return false;
+ }
+
+ /* allocate requests for asynchronous events */
+ a->first_ae_req =
+ kzalloc(num_ae_requests * sizeof(struct esas2r_request),
+ GFP_KERNEL);
+
+ if (a->first_ae_req == NULL) {
+ esas2r_log(ESAS2R_LOG_CRIT,
+ "failed to allocate memory for asynchronous events");
+ return false;
+ }
+
+ /* allocate the S/G list memory descriptors */
+ a->sg_list_mds = kzalloc(
+ num_sg_lists * sizeof(struct esas2r_mem_desc), GFP_KERNEL);
+
+ if (a->sg_list_mds == NULL) {
+ esas2r_log(ESAS2R_LOG_CRIT,
+ "failed to allocate memory for s/g list descriptors");
+ return false;
+ }
+
+ /* allocate the request table */
+ a->req_table =
+ kzalloc((num_requests + num_ae_requests +
+ 1) * sizeof(struct esas2r_request *), GFP_KERNEL);
+
+ if (a->req_table == NULL) {
+ esas2r_log(ESAS2R_LOG_CRIT,
+ "failed to allocate memory for the request table");
+ return false;
+ }
+
+ /* initialize PCI configuration space */
+ esas2r_init_pci_cfg_space(a);
+
+ /*
+ * the thunder_stream boards all have a serial flash part that has a
+ * different base address on the AHB bus.
+ */
+ if ((a->pcid->subsystem_vendor == ATTO_VENDOR_ID)
+ && (a->pcid->subsystem_device & ATTO_SSDID_TBT))
+ a->flags2 |= AF2_THUNDERBOLT;
+
+ if (test_bit(AF2_THUNDERBOLT, &a->flags2))
+ a->flags2 |= AF2_SERIAL_FLASH;
+
+ if (a->pcid->subsystem_device == ATTO_TLSH_1068)
+ a->flags2 |= AF2_THUNDERLINK;
+
+ /* Uncached Area */
+ high = (u8 *)*uncached_area;
+
+ /* initialize the scatter/gather table pages */
+
+ for (i = 0, sgl = a->sg_list_mds; i < num_sg_lists; i++, sgl++) {
+ sgl->size = sgl_page_size;
+
+ list_add_tail(&sgl->next_desc, &a->free_sg_list_head);
+
+ if (!esas2r_initmem_alloc(a, sgl, ESAS2R_SGL_ALIGN)) {
+ /* Allow the driver to load if the minimum count met. */
+ if (i < NUM_SGL_MIN)
+ return false;
+ break;
+ }
+ }
+
+ /* compute the size of the lists */
+ a->list_size = num_requests + ESAS2R_LIST_EXTRA;
+
+ /* allocate the inbound list */
+ a->inbound_list_md.size = a->list_size *
+ sizeof(struct
+ esas2r_inbound_list_source_entry);
+
+ if (!esas2r_initmem_alloc(a, &a->inbound_list_md, ESAS2R_LIST_ALIGN)) {
+ esas2r_hdebug("failed to allocate IB list");
+ return false;
+ }
+
+ /* allocate the outbound list */
+ a->outbound_list_md.size = a->list_size *
+ sizeof(struct atto_vda_ob_rsp);
+
+ if (!esas2r_initmem_alloc(a, &a->outbound_list_md,
+ ESAS2R_LIST_ALIGN)) {
+ esas2r_hdebug("failed to allocate IB list");
+ return false;
+ }
+
+ /* allocate the NVRAM structure */
+ a->nvram = (struct esas2r_sas_nvram *)high;
+ high += sizeof(struct esas2r_sas_nvram);
+
+ /* allocate the discovery buffer */
+ a->disc_buffer = high;
+ high += ESAS2R_DISC_BUF_LEN;
+ high = PTR_ALIGN(high, 8);
+
+ /* allocate the outbound list copy pointer */
+ a->outbound_copy = (u32 volatile *)high;
+ high += sizeof(u32);
+
+ if (!test_bit(AF_NVR_VALID, &a->flags))
+ esas2r_nvram_set_defaults(a);
+
+ /* update the caller's uncached memory area pointer */
+ *uncached_area = (void *)high;
+
+ /* initialize the allocated memory */
+ if (test_bit(AF_FIRST_INIT, &a->flags)) {
+ memset(a->req_table, 0,
+ (num_requests + num_ae_requests +
+ 1) * sizeof(struct esas2r_request *));
+
+ esas2r_targ_db_initialize(a);
+
+ /* prime parts of the inbound list */
+ element =
+ (struct esas2r_inbound_list_source_entry *)a->
+ inbound_list_md.
+ virt_addr;
+
+ for (i = 0; i < a->list_size; i++) {
+ element->address = 0;
+ element->reserved = 0;
+ element->length = cpu_to_le32(HWILSE_INTERFACE_F0
+ | (sizeof(union
+ atto_vda_req)
+ /
+ sizeof(u32)));
+ element++;
+ }
+
+ /* init the AE requests */
+ for (rq = a->first_ae_req, i = 0; i < num_ae_requests; rq++,
+ i++) {
+ INIT_LIST_HEAD(&rq->req_list);
+ if (!alloc_vda_req(a, rq)) {
+ esas2r_hdebug(
+ "failed to allocate a VDA request!");
+ return false;
+ }
+
+ esas2r_rq_init_request(rq, a);
+
+ /* override the completion function */
+ rq->comp_cb = esas2r_ae_complete;
+ }
+ }
+
+ return true;
+}
+
+/* This code will verify that the chip is operational. */
+bool esas2r_check_adapter(struct esas2r_adapter *a)
+{
+ u32 starttime;
+ u32 doorbell;
+ u64 ppaddr;
+ u32 dw;
+
+ /*
+ * if the chip reset detected flag is set, we can bypass a bunch of
+ * stuff.
+ */
+ if (test_bit(AF_CHPRST_DETECTED, &a->flags))
+ goto skip_chip_reset;
+
+ /*
+ * BEFORE WE DO ANYTHING, disable the chip interrupts! the boot driver
+ * may have left them enabled or we may be recovering from a fault.
+ */
+ esas2r_write_register_dword(a, MU_INT_MASK_OUT, ESAS2R_INT_DIS_MASK);
+ esas2r_flush_register_dword(a, MU_INT_MASK_OUT);
+
+ /*
+ * wait for the firmware to become ready by forcing an interrupt and
+ * waiting for a response.
+ */
+ starttime = jiffies_to_msecs(jiffies);
+
+ while (true) {
+ esas2r_force_interrupt(a);
+ doorbell = esas2r_read_register_dword(a, MU_DOORBELL_OUT);
+ if (doorbell == 0xFFFFFFFF) {
+ /*
+ * Give the firmware up to two seconds to enable
+ * register access after a reset.
+ */
+ if ((jiffies_to_msecs(jiffies) - starttime) > 2000)
+ return esas2r_set_degraded_mode(a,
+ "unable to access registers");
+ } else if (doorbell & DRBL_FORCE_INT) {
+ u32 ver = (doorbell & DRBL_FW_VER_MSK);
+
+ /*
+ * This driver supports version 0 and version 1 of
+ * the API
+ */
+ esas2r_write_register_dword(a, MU_DOORBELL_OUT,
+ doorbell);
+
+ if (ver == DRBL_FW_VER_0) {
+ set_bit(AF_LEGACY_SGE_MODE, &a->flags);
+
+ a->max_vdareq_size = 128;
+ a->build_sgl = esas2r_build_sg_list_sge;
+ } else if (ver == DRBL_FW_VER_1) {
+ clear_bit(AF_LEGACY_SGE_MODE, &a->flags);
+
+ a->max_vdareq_size = 1024;
+ a->build_sgl = esas2r_build_sg_list_prd;
+ } else {
+ return esas2r_set_degraded_mode(a,
+ "unknown firmware version");
+ }
+ break;
+ }
+
+ schedule_timeout_interruptible(msecs_to_jiffies(100));
+
+ if ((jiffies_to_msecs(jiffies) - starttime) > 180000) {
+ esas2r_hdebug("FW ready TMO");
+ esas2r_bugon();
+
+ return esas2r_set_degraded_mode(a,
+ "firmware start has timed out");
+ }
+ }
+
+ /* purge any asynchronous events since we will repost them later */
+ esas2r_write_register_dword(a, MU_DOORBELL_IN, DRBL_MSG_IFC_DOWN);
+ starttime = jiffies_to_msecs(jiffies);
+
+ while (true) {
+ doorbell = esas2r_read_register_dword(a, MU_DOORBELL_OUT);
+ if (doorbell & DRBL_MSG_IFC_DOWN) {
+ esas2r_write_register_dword(a, MU_DOORBELL_OUT,
+ doorbell);
+ break;
+ }
+
+ schedule_timeout_interruptible(msecs_to_jiffies(50));
+
+ if ((jiffies_to_msecs(jiffies) - starttime) > 3000) {
+ esas2r_hdebug("timeout waiting for interface down");
+ break;
+ }
+ }
+skip_chip_reset:
+ /*
+ * first things first, before we go changing any of these registers
+ * disable the communication lists.
+ */
+ dw = esas2r_read_register_dword(a, MU_IN_LIST_CONFIG);
+ dw &= ~MU_ILC_ENABLE;
+ esas2r_write_register_dword(a, MU_IN_LIST_CONFIG, dw);
+ dw = esas2r_read_register_dword(a, MU_OUT_LIST_CONFIG);
+ dw &= ~MU_OLC_ENABLE;
+ esas2r_write_register_dword(a, MU_OUT_LIST_CONFIG, dw);
+
+ /* configure the communication list addresses */
+ ppaddr = a->inbound_list_md.phys_addr;
+ esas2r_write_register_dword(a, MU_IN_LIST_ADDR_LO,
+ lower_32_bits(ppaddr));
+ esas2r_write_register_dword(a, MU_IN_LIST_ADDR_HI,
+ upper_32_bits(ppaddr));
+ ppaddr = a->outbound_list_md.phys_addr;
+ esas2r_write_register_dword(a, MU_OUT_LIST_ADDR_LO,
+ lower_32_bits(ppaddr));
+ esas2r_write_register_dword(a, MU_OUT_LIST_ADDR_HI,
+ upper_32_bits(ppaddr));
+ ppaddr = a->uncached_phys +
+ ((u8 *)a->outbound_copy - a->uncached);
+ esas2r_write_register_dword(a, MU_OUT_LIST_COPY_PTR_LO,
+ lower_32_bits(ppaddr));
+ esas2r_write_register_dword(a, MU_OUT_LIST_COPY_PTR_HI,
+ upper_32_bits(ppaddr));
+
+ /* reset the read and write pointers */
+ *a->outbound_copy =
+ a->last_write =
+ a->last_read = a->list_size - 1;
+ set_bit(AF_COMM_LIST_TOGGLE, &a->flags);
+ esas2r_write_register_dword(a, MU_IN_LIST_WRITE, MU_ILW_TOGGLE |
+ a->last_write);
+ esas2r_write_register_dword(a, MU_OUT_LIST_COPY, MU_OLC_TOGGLE |
+ a->last_write);
+ esas2r_write_register_dword(a, MU_IN_LIST_READ, MU_ILR_TOGGLE |
+ a->last_write);
+ esas2r_write_register_dword(a, MU_OUT_LIST_WRITE,
+ MU_OLW_TOGGLE | a->last_write);
+
+ /* configure the interface select fields */
+ dw = esas2r_read_register_dword(a, MU_IN_LIST_IFC_CONFIG);
+ dw &= ~(MU_ILIC_LIST | MU_ILIC_DEST);
+ esas2r_write_register_dword(a, MU_IN_LIST_IFC_CONFIG,
+ (dw | MU_ILIC_LIST_F0 | MU_ILIC_DEST_DDR));
+ dw = esas2r_read_register_dword(a, MU_OUT_LIST_IFC_CONFIG);
+ dw &= ~(MU_OLIC_LIST | MU_OLIC_SOURCE);
+ esas2r_write_register_dword(a, MU_OUT_LIST_IFC_CONFIG,
+ (dw | MU_OLIC_LIST_F0 |
+ MU_OLIC_SOURCE_DDR));
+
+ /* finish configuring the communication lists */
+ dw = esas2r_read_register_dword(a, MU_IN_LIST_CONFIG);
+ dw &= ~(MU_ILC_ENTRY_MASK | MU_ILC_NUMBER_MASK);
+ dw |= MU_ILC_ENTRY_4_DW | MU_ILC_DYNAMIC_SRC
+ | (a->list_size << MU_ILC_NUMBER_SHIFT);
+ esas2r_write_register_dword(a, MU_IN_LIST_CONFIG, dw);
+ dw = esas2r_read_register_dword(a, MU_OUT_LIST_CONFIG);
+ dw &= ~(MU_OLC_ENTRY_MASK | MU_OLC_NUMBER_MASK);
+ dw |= MU_OLC_ENTRY_4_DW | (a->list_size << MU_OLC_NUMBER_SHIFT);
+ esas2r_write_register_dword(a, MU_OUT_LIST_CONFIG, dw);
+
+ /*
+ * notify the firmware that we're done setting up the communication
+ * list registers. wait here until the firmware is done configuring
+ * its lists. it will signal that it is done by enabling the lists.
+ */
+ esas2r_write_register_dword(a, MU_DOORBELL_IN, DRBL_MSG_IFC_INIT);
+ starttime = jiffies_to_msecs(jiffies);
+
+ while (true) {
+ doorbell = esas2r_read_register_dword(a, MU_DOORBELL_OUT);
+ if (doorbell & DRBL_MSG_IFC_INIT) {
+ esas2r_write_register_dword(a, MU_DOORBELL_OUT,
+ doorbell);
+ break;
+ }
+
+ schedule_timeout_interruptible(msecs_to_jiffies(100));
+
+ if ((jiffies_to_msecs(jiffies) - starttime) > 3000) {
+ esas2r_hdebug(
+ "timeout waiting for communication list init");
+ esas2r_bugon();
+ return esas2r_set_degraded_mode(a,
+ "timeout waiting for communication list init");
+ }
+ }
+
+ /*
+ * flag whether the firmware supports the power down doorbell. we
+ * determine this by reading the inbound doorbell enable mask.
+ */
+ doorbell = esas2r_read_register_dword(a, MU_DOORBELL_IN_ENB);
+ if (doorbell & DRBL_POWER_DOWN)
+ set_bit(AF2_VDA_POWER_DOWN, &a->flags2);
+ else
+ clear_bit(AF2_VDA_POWER_DOWN, &a->flags2);
+
+ /*
+ * enable assertion of outbound queue and doorbell interrupts in the
+ * main interrupt cause register.
+ */
+ esas2r_write_register_dword(a, MU_OUT_LIST_INT_MASK, MU_OLIS_MASK);
+ esas2r_write_register_dword(a, MU_DOORBELL_OUT_ENB, DRBL_ENB_MASK);
+ return true;
+}
+
+/* Process the initialization message just completed and format the next one. */
+static bool esas2r_format_init_msg(struct esas2r_adapter *a,
+ struct esas2r_request *rq)
+{
+ u32 msg = a->init_msg;
+ struct atto_vda_cfg_init *ci;
+
+ a->init_msg = 0;
+
+ switch (msg) {
+ case ESAS2R_INIT_MSG_START:
+ case ESAS2R_INIT_MSG_REINIT:
+ {
+ struct timeval now;
+ do_gettimeofday(&now);
+ esas2r_hdebug("CFG init");
+ esas2r_build_cfg_req(a,
+ rq,
+ VDA_CFG_INIT,
+ 0,
+ NULL);
+ ci = (struct atto_vda_cfg_init *)&rq->vrq->cfg.data.init;
+ ci->sgl_page_size = cpu_to_le32(sgl_page_size);
+ ci->epoch_time = cpu_to_le32(now.tv_sec);
+ rq->flags |= RF_FAILURE_OK;
+ a->init_msg = ESAS2R_INIT_MSG_INIT;
+ break;
+ }
+
+ case ESAS2R_INIT_MSG_INIT:
+ if (rq->req_stat == RS_SUCCESS) {
+ u32 major;
+ u32 minor;
+ u16 fw_release;
+
+ a->fw_version = le16_to_cpu(
+ rq->func_rsp.cfg_rsp.vda_version);
+ a->fw_build = rq->func_rsp.cfg_rsp.fw_build;
+ fw_release = le16_to_cpu(
+ rq->func_rsp.cfg_rsp.fw_release);
+ major = LOBYTE(fw_release);
+ minor = HIBYTE(fw_release);
+ a->fw_version += (major << 16) + (minor << 24);
+ } else {
+ esas2r_hdebug("FAILED");
+ }
+
+ /*
+ * the 2.71 and earlier releases of R6xx firmware did not error
+ * unsupported config requests correctly.
+ */
+
+ if ((test_bit(AF2_THUNDERBOLT, &a->flags2))
+ || (be32_to_cpu(a->fw_version) > 0x00524702)) {
+ esas2r_hdebug("CFG get init");
+ esas2r_build_cfg_req(a,
+ rq,
+ VDA_CFG_GET_INIT2,
+ sizeof(struct atto_vda_cfg_init),
+ NULL);
+
+ rq->vrq->cfg.sg_list_offset = offsetof(
+ struct atto_vda_cfg_req,
+ data.sge);
+ rq->vrq->cfg.data.prde.ctl_len =
+ cpu_to_le32(sizeof(struct atto_vda_cfg_init));
+ rq->vrq->cfg.data.prde.address = cpu_to_le64(
+ rq->vrq_md->phys_addr +
+ sizeof(union atto_vda_req));
+ rq->flags |= RF_FAILURE_OK;
+ a->init_msg = ESAS2R_INIT_MSG_GET_INIT;
+ break;
+ }
+
+ case ESAS2R_INIT_MSG_GET_INIT:
+ if (msg == ESAS2R_INIT_MSG_GET_INIT) {
+ ci = (struct atto_vda_cfg_init *)rq->data_buf;
+ if (rq->req_stat == RS_SUCCESS) {
+ a->num_targets_backend =
+ le32_to_cpu(ci->num_targets_backend);
+ a->ioctl_tunnel =
+ le32_to_cpu(ci->ioctl_tunnel);
+ } else {
+ esas2r_hdebug("FAILED");
+ }
+ }
+ /* fall through */
+
+ default:
+ rq->req_stat = RS_SUCCESS;
+ return false;
+ }
+ return true;
+}
+
+/*
+ * Perform initialization messages via the request queue. Messages are
+ * performed with interrupts disabled.
+ */
+bool esas2r_init_msgs(struct esas2r_adapter *a)
+{
+ bool success = true;
+ struct esas2r_request *rq = &a->general_req;
+
+ esas2r_rq_init_request(rq, a);
+ rq->comp_cb = esas2r_dummy_complete;
+
+ if (a->init_msg == 0)
+ a->init_msg = ESAS2R_INIT_MSG_REINIT;
+
+ while (a->init_msg) {
+ if (esas2r_format_init_msg(a, rq)) {
+ unsigned long flags;
+ while (true) {
+ spin_lock_irqsave(&a->queue_lock, flags);
+ esas2r_start_vda_request(a, rq);
+ spin_unlock_irqrestore(&a->queue_lock, flags);
+ esas2r_wait_request(a, rq);
+ if (rq->req_stat != RS_PENDING)
+ break;
+ }
+ }
+
+ if (rq->req_stat == RS_SUCCESS
+ || ((rq->flags & RF_FAILURE_OK)
+ && rq->req_stat != RS_TIMEOUT))
+ continue;
+
+ esas2r_log(ESAS2R_LOG_CRIT, "init message %x failed (%x, %x)",
+ a->init_msg, rq->req_stat, rq->flags);
+ a->init_msg = ESAS2R_INIT_MSG_START;
+ success = false;
+ break;
+ }
+
+ esas2r_rq_destroy_request(rq, a);
+ return success;
+}
+
+/* Initialize the adapter chip */
+bool esas2r_init_adapter_hw(struct esas2r_adapter *a, bool init_poll)
+{
+ bool rslt = false;
+ struct esas2r_request *rq;
+ u32 i;
+
+ if (test_bit(AF_DEGRADED_MODE, &a->flags))
+ goto exit;
+
+ if (!test_bit(AF_NVR_VALID, &a->flags)) {
+ if (!esas2r_nvram_read_direct(a))
+ esas2r_log(ESAS2R_LOG_WARN,
+ "invalid/missing NVRAM parameters");
+ }
+
+ if (!esas2r_init_msgs(a)) {
+ esas2r_set_degraded_mode(a, "init messages failed");
+ goto exit;
+ }
+
+ /* The firmware is ready. */
+ clear_bit(AF_DEGRADED_MODE, &a->flags);
+ clear_bit(AF_CHPRST_PENDING, &a->flags);
+
+ /* Post all the async event requests */
+ for (i = 0, rq = a->first_ae_req; i < num_ae_requests; i++, rq++)
+ esas2r_start_ae_request(a, rq);
+
+ if (!a->flash_rev[0])
+ esas2r_read_flash_rev(a);
+
+ if (!a->image_type[0])
+ esas2r_read_image_type(a);
+
+ if (a->fw_version == 0)
+ a->fw_rev[0] = 0;
+ else
+ sprintf(a->fw_rev, "%1d.%02d",
+ (int)LOBYTE(HIWORD(a->fw_version)),
+ (int)HIBYTE(HIWORD(a->fw_version)));
+
+ esas2r_hdebug("firmware revision: %s", a->fw_rev);
+
+ if (test_bit(AF_CHPRST_DETECTED, &a->flags)
+ && (test_bit(AF_FIRST_INIT, &a->flags))) {
+ esas2r_enable_chip_interrupts(a);
+ return true;
+ }
+
+ /* initialize discovery */
+ esas2r_disc_initialize(a);
+
+ /*
+ * wait for the device wait time to expire here if requested. this is
+ * usually requested during initial driver load and possibly when
+ * resuming from a low power state. deferred device waiting will use
+ * interrupts. chip reset recovery always defers device waiting to
+ * avoid being in a TASKLET too long.
+ */
+ if (init_poll) {
+ u32 currtime = a->disc_start_time;
+ u32 nexttick = 100;
+ u32 deltatime;
+
+ /*
+ * Block Tasklets from getting scheduled and indicate this is
+ * polled discovery.
+ */
+ set_bit(AF_TASKLET_SCHEDULED, &a->flags);
+ set_bit(AF_DISC_POLLED, &a->flags);
+
+ /*
+ * Temporarily bring the disable count to zero to enable
+ * deferred processing. Note that the count is already zero
+ * after the first initialization.
+ */
+ if (test_bit(AF_FIRST_INIT, &a->flags))
+ atomic_dec(&a->disable_cnt);
+
+ while (test_bit(AF_DISC_PENDING, &a->flags)) {
+ schedule_timeout_interruptible(msecs_to_jiffies(100));
+
+ /*
+ * Determine the need for a timer tick based on the
+ * delta time between this and the last iteration of
+ * this loop. We don't use the absolute time because
+ * then we would have to worry about when nexttick
+ * wraps and currtime hasn't yet.
+ */
+ deltatime = jiffies_to_msecs(jiffies) - currtime;
+ currtime += deltatime;
+
+ /*
+ * Process any waiting discovery as long as the chip is
+ * up. If a chip reset happens during initial polling,
+ * we have to make sure the timer tick processes the
+ * doorbell indicating the firmware is ready.
+ */
+ if (!test_bit(AF_CHPRST_PENDING, &a->flags))
+ esas2r_disc_check_for_work(a);
+
+ /* Simulate a timer tick. */
+ if (nexttick <= deltatime) {
+
+ /* Time for a timer tick */
+ nexttick += 100;
+ esas2r_timer_tick(a);
+ }
+
+ if (nexttick > deltatime)
+ nexttick -= deltatime;
+
+ /* Do any deferred processing */
+ if (esas2r_is_tasklet_pending(a))
+ esas2r_do_tasklet_tasks(a);
+
+ }
+
+ if (test_bit(AF_FIRST_INIT, &a->flags))
+ atomic_inc(&a->disable_cnt);
+
+ clear_bit(AF_DISC_POLLED, &a->flags);
+ clear_bit(AF_TASKLET_SCHEDULED, &a->flags);
+ }
+
+
+ esas2r_targ_db_report_changes(a);
+
+ /*
+ * For cases where (a) the initialization messages processing may
+ * handle an interrupt for a port event and a discovery is waiting, but
+ * we are not waiting for devices, or (b) the device wait time has been
+ * exhausted but there is still discovery pending, start any leftover
+ * discovery in interrupt driven mode.
+ */
+ esas2r_disc_start_waiting(a);
+
+ /* Enable chip interrupts */
+ a->int_mask = ESAS2R_INT_STS_MASK;
+ esas2r_enable_chip_interrupts(a);
+ esas2r_enable_heartbeat(a);
+ rslt = true;
+
+exit:
+ /*
+ * Regardless of whether initialization was successful, certain things
+ * need to get done before we exit.
+ */
+
+ if (test_bit(AF_CHPRST_DETECTED, &a->flags) &&
+ test_bit(AF_FIRST_INIT, &a->flags)) {
+ /*
+ * Reinitialization was performed during the first
+ * initialization. Only clear the chip reset flag so the
+ * original device polling is not cancelled.
+ */
+ if (!rslt)
+ clear_bit(AF_CHPRST_PENDING, &a->flags);
+ } else {
+ /* First initialization or a subsequent re-init is complete. */
+ if (!rslt) {
+ clear_bit(AF_CHPRST_PENDING, &a->flags);
+ clear_bit(AF_DISC_PENDING, &a->flags);
+ }
+
+
+ /* Enable deferred processing after the first initialization. */
+ if (test_bit(AF_FIRST_INIT, &a->flags)) {
+ clear_bit(AF_FIRST_INIT, &a->flags);
+
+ if (atomic_dec_return(&a->disable_cnt) == 0)
+ esas2r_do_deferred_processes(a);
+ }
+ }
+
+ return rslt;
+}
+
+void esas2r_reset_adapter(struct esas2r_adapter *a)
+{
+ set_bit(AF_OS_RESET, &a->flags);
+ esas2r_local_reset_adapter(a);
+ esas2r_schedule_tasklet(a);
+}
+
+void esas2r_reset_chip(struct esas2r_adapter *a)
+{
+ if (!esas2r_is_adapter_present(a))
+ return;
+
+ /*
+ * Before we reset the chip, save off the VDA core dump. The VDA core
+ * dump is located in the upper 512KB of the onchip SRAM. Make sure
+ * to not overwrite a previous crash that was saved.
+ */
+ if (test_bit(AF2_COREDUMP_AVAIL, &a->flags2) &&
+ !test_bit(AF2_COREDUMP_SAVED, &a->flags2)) {
+ esas2r_read_mem_block(a,
+ a->fw_coredump_buff,
+ MW_DATA_ADDR_SRAM + 0x80000,
+ ESAS2R_FWCOREDUMP_SZ);
+
+ set_bit(AF2_COREDUMP_SAVED, &a->flags2);
+ }
+
+ clear_bit(AF2_COREDUMP_AVAIL, &a->flags2);
+
+ /* Reset the chip */
+ if (a->pcid->revision == MVR_FREY_B2)
+ esas2r_write_register_dword(a, MU_CTL_STATUS_IN_B2,
+ MU_CTL_IN_FULL_RST2);
+ else
+ esas2r_write_register_dword(a, MU_CTL_STATUS_IN,
+ MU_CTL_IN_FULL_RST);
+
+
+ /* Stall a little while to let the reset condition clear */
+ mdelay(10);
+}
+
+static void esas2r_power_down_notify_firmware(struct esas2r_adapter *a)
+{
+ u32 starttime;
+ u32 doorbell;
+
+ esas2r_write_register_dword(a, MU_DOORBELL_IN, DRBL_POWER_DOWN);
+ starttime = jiffies_to_msecs(jiffies);
+
+ while (true) {
+ doorbell = esas2r_read_register_dword(a, MU_DOORBELL_OUT);
+ if (doorbell & DRBL_POWER_DOWN) {
+ esas2r_write_register_dword(a, MU_DOORBELL_OUT,
+ doorbell);
+ break;
+ }
+
+ schedule_timeout_interruptible(msecs_to_jiffies(100));
+
+ if ((jiffies_to_msecs(jiffies) - starttime) > 30000) {
+ esas2r_hdebug("Timeout waiting for power down");
+ break;
+ }
+ }
+}
+
+/*
+ * Perform power management processing including managing device states, adapter
+ * states, interrupts, and I/O.
+ */
+void esas2r_power_down(struct esas2r_adapter *a)
+{
+ set_bit(AF_POWER_MGT, &a->flags);
+ set_bit(AF_POWER_DOWN, &a->flags);
+
+ if (!test_bit(AF_DEGRADED_MODE, &a->flags)) {
+ u32 starttime;
+ u32 doorbell;
+
+ /*
+ * We are currently running OK and will be reinitializing later.
+ * increment the disable count to coordinate with
+ * esas2r_init_adapter. We don't have to do this in degraded
+ * mode since we never enabled interrupts in the first place.
+ */
+ esas2r_disable_chip_interrupts(a);
+ esas2r_disable_heartbeat(a);
+
+ /* wait for any VDA activity to clear before continuing */
+ esas2r_write_register_dword(a, MU_DOORBELL_IN,
+ DRBL_MSG_IFC_DOWN);
+ starttime = jiffies_to_msecs(jiffies);
+
+ while (true) {
+ doorbell =
+ esas2r_read_register_dword(a, MU_DOORBELL_OUT);
+ if (doorbell & DRBL_MSG_IFC_DOWN) {
+ esas2r_write_register_dword(a, MU_DOORBELL_OUT,
+ doorbell);
+ break;
+ }
+
+ schedule_timeout_interruptible(msecs_to_jiffies(100));
+
+ if ((jiffies_to_msecs(jiffies) - starttime) > 3000) {
+ esas2r_hdebug(
+ "timeout waiting for interface down");
+ break;
+ }
+ }
+
+ /*
+ * For versions of firmware that support it tell them the driver
+ * is powering down.
+ */
+ if (test_bit(AF2_VDA_POWER_DOWN, &a->flags2))
+ esas2r_power_down_notify_firmware(a);
+ }
+
+ /* Suspend I/O processing. */
+ set_bit(AF_OS_RESET, &a->flags);
+ set_bit(AF_DISC_PENDING, &a->flags);
+ set_bit(AF_CHPRST_PENDING, &a->flags);
+
+ esas2r_process_adapter_reset(a);
+
+ /* Remove devices now that I/O is cleaned up. */
+ a->prev_dev_cnt = esas2r_targ_db_get_tgt_cnt(a);
+ esas2r_targ_db_remove_all(a, false);
+}
+
+/*
+ * Perform power management processing including managing device states, adapter
+ * states, interrupts, and I/O.
+ */
+bool esas2r_power_up(struct esas2r_adapter *a, bool init_poll)
+{
+ bool ret;
+
+ clear_bit(AF_POWER_DOWN, &a->flags);
+ esas2r_init_pci_cfg_space(a);
+ set_bit(AF_FIRST_INIT, &a->flags);
+ atomic_inc(&a->disable_cnt);
+
+ /* reinitialize the adapter */
+ ret = esas2r_check_adapter(a);
+ if (!esas2r_init_adapter_hw(a, init_poll))
+ ret = false;
+
+ /* send the reset asynchronous event */
+ esas2r_send_reset_ae(a, true);
+
+ /* clear this flag after initialization. */
+ clear_bit(AF_POWER_MGT, &a->flags);
+ return ret;
+}
+
+bool esas2r_is_adapter_present(struct esas2r_adapter *a)
+{
+ if (test_bit(AF_NOT_PRESENT, &a->flags))
+ return false;
+
+ if (esas2r_read_register_dword(a, MU_DOORBELL_OUT) == 0xFFFFFFFF) {
+ set_bit(AF_NOT_PRESENT, &a->flags);
+
+ return false;
+ }
+ return true;
+}
+
+const char *esas2r_get_model_name(struct esas2r_adapter *a)
+{
+ switch (a->pcid->subsystem_device) {
+ case ATTO_ESAS_R680:
+ return "ATTO ExpressSAS R680";
+
+ case ATTO_ESAS_R608:
+ return "ATTO ExpressSAS R608";
+
+ case ATTO_ESAS_R60F:
+ return "ATTO ExpressSAS R60F";
+
+ case ATTO_ESAS_R6F0:
+ return "ATTO ExpressSAS R6F0";
+
+ case ATTO_ESAS_R644:
+ return "ATTO ExpressSAS R644";
+
+ case ATTO_ESAS_R648:
+ return "ATTO ExpressSAS R648";
+
+ case ATTO_TSSC_3808:
+ return "ATTO ThunderStream SC 3808D";
+
+ case ATTO_TSSC_3808E:
+ return "ATTO ThunderStream SC 3808E";
+
+ case ATTO_TLSH_1068:
+ return "ATTO ThunderLink SH 1068";
+ }
+
+ return "ATTO SAS Controller";
+}
+
+const char *esas2r_get_model_name_short(struct esas2r_adapter *a)
+{
+ switch (a->pcid->subsystem_device) {
+ case ATTO_ESAS_R680:
+ return "R680";
+
+ case ATTO_ESAS_R608:
+ return "R608";
+
+ case ATTO_ESAS_R60F:
+ return "R60F";
+
+ case ATTO_ESAS_R6F0:
+ return "R6F0";
+
+ case ATTO_ESAS_R644:
+ return "R644";
+
+ case ATTO_ESAS_R648:
+ return "R648";
+
+ case ATTO_TSSC_3808:
+ return "SC 3808D";
+
+ case ATTO_TSSC_3808E:
+ return "SC 3808E";
+
+ case ATTO_TLSH_1068:
+ return "SH 1068";
+ }
+
+ return "unknown";
+}
diff --git a/drivers/scsi/esas2r/esas2r_int.c b/drivers/scsi/esas2r/esas2r_int.c
new file mode 100644
index 000000000..f16d6bcf9
--- /dev/null
+++ b/drivers/scsi/esas2r/esas2r_int.c
@@ -0,0 +1,942 @@
+/*
+ * linux/drivers/scsi/esas2r/esas2r_int.c
+ * esas2r interrupt handling
+ *
+ * Copyright (c) 2001-2013 ATTO Technology, Inc.
+ * (mailto:linuxdrivers@attotech.com)
+ */
+/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
+/*
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * NO WARRANTY
+ * THE PROGRAM IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OR
+ * CONDITIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED INCLUDING, WITHOUT
+ * LIMITATION, ANY WARRANTIES OR CONDITIONS OF TITLE, NON-INFRINGEMENT,
+ * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE. Each Recipient is
+ * solely responsible for determining the appropriateness of using and
+ * distributing the Program and assumes all risks associated with its
+ * exercise of rights under this Agreement, including but not limited to
+ * the risks and costs of program errors, damage to or loss of data,
+ * programs or equipment, and unavailability or interruption of operations.
+ *
+ * DISCLAIMER OF LIABILITY
+ * NEITHER RECIPIENT NOR ANY CONTRIBUTORS SHALL HAVE ANY LIABILITY FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING WITHOUT LIMITATION LOST PROFITS), HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR
+ * TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
+ * USE OR DISTRIBUTION OF THE PROGRAM OR THE EXERCISE OF ANY RIGHTS GRANTED
+ * HEREUNDER, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGES
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
+
+#include "esas2r.h"
+
+/* Local function prototypes */
+static void esas2r_doorbell_interrupt(struct esas2r_adapter *a, u32 doorbell);
+static void esas2r_get_outbound_responses(struct esas2r_adapter *a);
+static void esas2r_process_bus_reset(struct esas2r_adapter *a);
+
+/*
+ * Poll the adapter for interrupts and service them.
+ * This function handles both legacy interrupts and MSI.
+ */
+void esas2r_polled_interrupt(struct esas2r_adapter *a)
+{
+ u32 intstat;
+ u32 doorbell;
+
+ esas2r_disable_chip_interrupts(a);
+
+ intstat = esas2r_read_register_dword(a, MU_INT_STATUS_OUT);
+
+ if (intstat & MU_INTSTAT_POST_OUT) {
+ /* clear the interrupt */
+
+ esas2r_write_register_dword(a, MU_OUT_LIST_INT_STAT,
+ MU_OLIS_INT);
+ esas2r_flush_register_dword(a, MU_OUT_LIST_INT_STAT);
+
+ esas2r_get_outbound_responses(a);
+ }
+
+ if (intstat & MU_INTSTAT_DRBL) {
+ doorbell = esas2r_read_register_dword(a, MU_DOORBELL_OUT);
+ if (doorbell != 0)
+ esas2r_doorbell_interrupt(a, doorbell);
+ }
+
+ esas2r_enable_chip_interrupts(a);
+
+ if (atomic_read(&a->disable_cnt) == 0)
+ esas2r_do_deferred_processes(a);
+}
+
+/*
+ * Legacy and MSI interrupt handlers. Note that the legacy interrupt handler
+ * schedules a TASKLET to process events, whereas the MSI handler just
+ * processes interrupt events directly.
+ */
+irqreturn_t esas2r_interrupt(int irq, void *dev_id)
+{
+ struct esas2r_adapter *a = (struct esas2r_adapter *)dev_id;
+
+ if (!esas2r_adapter_interrupt_pending(a))
+ return IRQ_NONE;
+
+ set_bit(AF2_INT_PENDING, &a->flags2);
+ esas2r_schedule_tasklet(a);
+
+ return IRQ_HANDLED;
+}
+
+void esas2r_adapter_interrupt(struct esas2r_adapter *a)
+{
+ u32 doorbell;
+
+ if (likely(a->int_stat & MU_INTSTAT_POST_OUT)) {
+ /* clear the interrupt */
+ esas2r_write_register_dword(a, MU_OUT_LIST_INT_STAT,
+ MU_OLIS_INT);
+ esas2r_flush_register_dword(a, MU_OUT_LIST_INT_STAT);
+ esas2r_get_outbound_responses(a);
+ }
+
+ if (unlikely(a->int_stat & MU_INTSTAT_DRBL)) {
+ doorbell = esas2r_read_register_dword(a, MU_DOORBELL_OUT);
+ if (doorbell != 0)
+ esas2r_doorbell_interrupt(a, doorbell);
+ }
+
+ a->int_mask = ESAS2R_INT_STS_MASK;
+
+ esas2r_enable_chip_interrupts(a);
+
+ if (likely(atomic_read(&a->disable_cnt) == 0))
+ esas2r_do_deferred_processes(a);
+}
+
+irqreturn_t esas2r_msi_interrupt(int irq, void *dev_id)
+{
+ struct esas2r_adapter *a = (struct esas2r_adapter *)dev_id;
+ u32 intstat;
+ u32 doorbell;
+
+ intstat = esas2r_read_register_dword(a, MU_INT_STATUS_OUT);
+
+ if (likely(intstat & MU_INTSTAT_POST_OUT)) {
+ /* clear the interrupt */
+
+ esas2r_write_register_dword(a, MU_OUT_LIST_INT_STAT,
+ MU_OLIS_INT);
+ esas2r_flush_register_dword(a, MU_OUT_LIST_INT_STAT);
+
+ esas2r_get_outbound_responses(a);
+ }
+
+ if (unlikely(intstat & MU_INTSTAT_DRBL)) {
+ doorbell = esas2r_read_register_dword(a, MU_DOORBELL_OUT);
+ if (doorbell != 0)
+ esas2r_doorbell_interrupt(a, doorbell);
+ }
+
+ /*
+ * Work around a chip bug and force a new MSI to be sent if one is
+ * still pending.
+ */
+ esas2r_disable_chip_interrupts(a);
+ esas2r_enable_chip_interrupts(a);
+
+ if (likely(atomic_read(&a->disable_cnt) == 0))
+ esas2r_do_deferred_processes(a);
+
+ esas2r_do_tasklet_tasks(a);
+
+ return 1;
+}
+
+
+
+static void esas2r_handle_outbound_rsp_err(struct esas2r_adapter *a,
+ struct esas2r_request *rq,
+ struct atto_vda_ob_rsp *rsp)
+{
+
+ /*
+ * For I/O requests, only copy the response if an error
+ * occurred and setup a callback to do error processing.
+ */
+ if (unlikely(rq->req_stat != RS_SUCCESS)) {
+ memcpy(&rq->func_rsp, &rsp->func_rsp, sizeof(rsp->func_rsp));
+
+ if (rq->req_stat == RS_ABORTED) {
+ if (rq->timeout > RQ_MAX_TIMEOUT)
+ rq->req_stat = RS_TIMEOUT;
+ } else if (rq->req_stat == RS_SCSI_ERROR) {
+ u8 scsistatus = rq->func_rsp.scsi_rsp.scsi_stat;
+
+ esas2r_trace("scsistatus: %x", scsistatus);
+
+ /* Any of these are a good result. */
+ if (scsistatus == SAM_STAT_GOOD || scsistatus ==
+ SAM_STAT_CONDITION_MET || scsistatus ==
+ SAM_STAT_INTERMEDIATE || scsistatus ==
+ SAM_STAT_INTERMEDIATE_CONDITION_MET) {
+ rq->req_stat = RS_SUCCESS;
+ rq->func_rsp.scsi_rsp.scsi_stat =
+ SAM_STAT_GOOD;
+ }
+ }
+ }
+}
+
+static void esas2r_get_outbound_responses(struct esas2r_adapter *a)
+{
+ struct atto_vda_ob_rsp *rsp;
+ u32 rspput_ptr;
+ u32 rspget_ptr;
+ struct esas2r_request *rq;
+ u32 handle;
+ unsigned long flags;
+
+ LIST_HEAD(comp_list);
+
+ esas2r_trace_enter();
+
+ spin_lock_irqsave(&a->queue_lock, flags);
+
+ /* Get the outbound limit and pointers */
+ rspput_ptr = le32_to_cpu(*a->outbound_copy) & MU_OLC_WRT_PTR;
+ rspget_ptr = a->last_read;
+
+ esas2r_trace("rspput_ptr: %x, rspget_ptr: %x", rspput_ptr, rspget_ptr);
+
+ /* If we don't have anything to process, get out */
+ if (unlikely(rspget_ptr == rspput_ptr)) {
+ spin_unlock_irqrestore(&a->queue_lock, flags);
+ esas2r_trace_exit();
+ return;
+ }
+
+ /* Make sure the firmware is healthy */
+ if (unlikely(rspput_ptr >= a->list_size)) {
+ spin_unlock_irqrestore(&a->queue_lock, flags);
+ esas2r_bugon();
+ esas2r_local_reset_adapter(a);
+ esas2r_trace_exit();
+ return;
+ }
+
+ do {
+ rspget_ptr++;
+
+ if (rspget_ptr >= a->list_size)
+ rspget_ptr = 0;
+
+ rsp = (struct atto_vda_ob_rsp *)a->outbound_list_md.virt_addr
+ + rspget_ptr;
+
+ handle = rsp->handle;
+
+ /* Verify the handle range */
+ if (unlikely(LOWORD(handle) == 0
+ || LOWORD(handle) > num_requests +
+ num_ae_requests + 1)) {
+ esas2r_bugon();
+ continue;
+ }
+
+ /* Get the request for this handle */
+ rq = a->req_table[LOWORD(handle)];
+
+ if (unlikely(rq == NULL || rq->vrq->scsi.handle != handle)) {
+ esas2r_bugon();
+ continue;
+ }
+
+ list_del(&rq->req_list);
+
+ /* Get the completion status */
+ rq->req_stat = rsp->req_stat;
+
+ esas2r_trace("handle: %x", handle);
+ esas2r_trace("rq: %p", rq);
+ esas2r_trace("req_status: %x", rq->req_stat);
+
+ if (likely(rq->vrq->scsi.function == VDA_FUNC_SCSI)) {
+ esas2r_handle_outbound_rsp_err(a, rq, rsp);
+ } else {
+ /*
+ * Copy the outbound completion struct for non-I/O
+ * requests.
+ */
+ memcpy(&rq->func_rsp, &rsp->func_rsp,
+ sizeof(rsp->func_rsp));
+ }
+
+ /* Queue the request for completion. */
+ list_add_tail(&rq->comp_list, &comp_list);
+
+ } while (rspget_ptr != rspput_ptr);
+
+ a->last_read = rspget_ptr;
+ spin_unlock_irqrestore(&a->queue_lock, flags);
+
+ esas2r_comp_list_drain(a, &comp_list);
+ esas2r_trace_exit();
+}
+
+/*
+ * Perform all deferred processes for the adapter. Deferred
+ * processes can only be done while the current interrupt
+ * disable_cnt for the adapter is zero.
+ */
+void esas2r_do_deferred_processes(struct esas2r_adapter *a)
+{
+ int startreqs = 2;
+ struct esas2r_request *rq;
+ unsigned long flags;
+
+ /*
+ * startreqs is used to control starting requests
+ * that are on the deferred queue
+ * = 0 - do not start any requests
+ * = 1 - can start discovery requests
+ * = 2 - can start any request
+ */
+
+ if (test_bit(AF_CHPRST_PENDING, &a->flags) ||
+ test_bit(AF_FLASHING, &a->flags))
+ startreqs = 0;
+ else if (test_bit(AF_DISC_PENDING, &a->flags))
+ startreqs = 1;
+
+ atomic_inc(&a->disable_cnt);
+
+ /* Clear off the completed list to be processed later. */
+
+ if (esas2r_is_tasklet_pending(a)) {
+ esas2r_schedule_tasklet(a);
+
+ startreqs = 0;
+ }
+
+ /*
+ * If we can start requests then traverse the defer queue
+ * looking for requests to start or complete
+ */
+ if (startreqs && !list_empty(&a->defer_list)) {
+ LIST_HEAD(comp_list);
+ struct list_head *element, *next;
+
+ spin_lock_irqsave(&a->queue_lock, flags);
+
+ list_for_each_safe(element, next, &a->defer_list) {
+ rq = list_entry(element, struct esas2r_request,
+ req_list);
+
+ if (rq->req_stat != RS_PENDING) {
+ list_del(element);
+ list_add_tail(&rq->comp_list, &comp_list);
+ }
+ /*
+ * Process discovery and OS requests separately. We
+ * can't hold up discovery requests when discovery is
+ * pending. In general, there may be different sets of
+ * conditions for starting different types of requests.
+ */
+ else if (rq->req_type == RT_DISC_REQ) {
+ list_del(element);
+ esas2r_disc_local_start_request(a, rq);
+ } else if (startreqs == 2) {
+ list_del(element);
+ esas2r_local_start_request(a, rq);
+
+ /*
+ * Flashing could have been set by last local
+ * start
+ */
+ if (test_bit(AF_FLASHING, &a->flags))
+ break;
+ }
+ }
+
+ spin_unlock_irqrestore(&a->queue_lock, flags);
+ esas2r_comp_list_drain(a, &comp_list);
+ }
+
+ atomic_dec(&a->disable_cnt);
+}
+
+/*
+ * Process an adapter reset (or one that is about to happen)
+ * by making sure all outstanding requests are completed that
+ * haven't been already.
+ */
+void esas2r_process_adapter_reset(struct esas2r_adapter *a)
+{
+ struct esas2r_request *rq = &a->general_req;
+ unsigned long flags;
+ struct esas2r_disc_context *dc;
+
+ LIST_HEAD(comp_list);
+ struct list_head *element;
+
+ esas2r_trace_enter();
+
+ spin_lock_irqsave(&a->queue_lock, flags);
+
+ /* abort the active discovery, if any. */
+
+ if (rq->interrupt_cx) {
+ dc = (struct esas2r_disc_context *)rq->interrupt_cx;
+
+ dc->disc_evt = 0;
+
+ clear_bit(AF_DISC_IN_PROG, &a->flags);
+ }
+
+ /*
+ * just clear the interrupt callback for now. it will be dequeued if
+ * and when we find it on the active queue and we don't want the
+ * callback called. also set the dummy completion callback in case we
+ * were doing an I/O request.
+ */
+
+ rq->interrupt_cx = NULL;
+ rq->interrupt_cb = NULL;
+
+ rq->comp_cb = esas2r_dummy_complete;
+
+ /* Reset the read and write pointers */
+
+ *a->outbound_copy =
+ a->last_write =
+ a->last_read = a->list_size - 1;
+
+ set_bit(AF_COMM_LIST_TOGGLE, &a->flags);
+
+ /* Kill all the requests on the active list */
+ list_for_each(element, &a->defer_list) {
+ rq = list_entry(element, struct esas2r_request, req_list);
+
+ if (rq->req_stat == RS_STARTED)
+ if (esas2r_ioreq_aborted(a, rq, RS_ABORTED))
+ list_add_tail(&rq->comp_list, &comp_list);
+ }
+
+ spin_unlock_irqrestore(&a->queue_lock, flags);
+ esas2r_comp_list_drain(a, &comp_list);
+ esas2r_process_bus_reset(a);
+ esas2r_trace_exit();
+}
+
+static void esas2r_process_bus_reset(struct esas2r_adapter *a)
+{
+ struct esas2r_request *rq;
+ struct list_head *element;
+ unsigned long flags;
+
+ LIST_HEAD(comp_list);
+
+ esas2r_trace_enter();
+
+ esas2r_hdebug("reset detected");
+
+ spin_lock_irqsave(&a->queue_lock, flags);
+
+ /* kill all the requests on the deferred queue */
+ list_for_each(element, &a->defer_list) {
+ rq = list_entry(element, struct esas2r_request, req_list);
+ if (esas2r_ioreq_aborted(a, rq, RS_ABORTED))
+ list_add_tail(&rq->comp_list, &comp_list);
+ }
+
+ spin_unlock_irqrestore(&a->queue_lock, flags);
+
+ esas2r_comp_list_drain(a, &comp_list);
+
+ if (atomic_read(&a->disable_cnt) == 0)
+ esas2r_do_deferred_processes(a);
+
+ clear_bit(AF_OS_RESET, &a->flags);
+
+ esas2r_trace_exit();
+}
+
+static void esas2r_chip_rst_needed_during_tasklet(struct esas2r_adapter *a)
+{
+
+ clear_bit(AF_CHPRST_NEEDED, &a->flags);
+ clear_bit(AF_BUSRST_NEEDED, &a->flags);
+ clear_bit(AF_BUSRST_DETECTED, &a->flags);
+ clear_bit(AF_BUSRST_PENDING, &a->flags);
+ /*
+ * Make sure we don't get attempt more than 3 resets
+ * when the uptime between resets does not exceed one
+ * minute. This will stop any situation where there is
+ * really something wrong with the hardware. The way
+ * this works is that we start with uptime ticks at 0.
+ * Each time we do a reset, we add 20 seconds worth to
+ * the count. Each time a timer tick occurs, as long
+ * as a chip reset is not pending, we decrement the
+ * tick count. If the uptime ticks ever gets to 60
+ * seconds worth, we disable the adapter from that
+ * point forward. Three strikes, you're out.
+ */
+ if (!esas2r_is_adapter_present(a) || (a->chip_uptime >=
+ ESAS2R_CHP_UPTIME_MAX)) {
+ esas2r_hdebug("*** adapter disabled ***");
+
+ /*
+ * Ok, some kind of hard failure. Make sure we
+ * exit this loop with chip interrupts
+ * permanently disabled so we don't lock up the
+ * entire system. Also flag degraded mode to
+ * prevent the heartbeat from trying to recover.
+ */
+
+ set_bit(AF_DEGRADED_MODE, &a->flags);
+ set_bit(AF_DISABLED, &a->flags);
+ clear_bit(AF_CHPRST_PENDING, &a->flags);
+ clear_bit(AF_DISC_PENDING, &a->flags);
+
+ esas2r_disable_chip_interrupts(a);
+ a->int_mask = 0;
+ esas2r_process_adapter_reset(a);
+
+ esas2r_log(ESAS2R_LOG_CRIT,
+ "Adapter disabled because of hardware failure");
+ } else {
+ bool alrdyrst = test_and_set_bit(AF_CHPRST_STARTED, &a->flags);
+
+ if (!alrdyrst)
+ /*
+ * Only disable interrupts if this is
+ * the first reset attempt.
+ */
+ esas2r_disable_chip_interrupts(a);
+
+ if ((test_bit(AF_POWER_MGT, &a->flags)) &&
+ !test_bit(AF_FIRST_INIT, &a->flags) && !alrdyrst) {
+ /*
+ * Don't reset the chip on the first
+ * deferred power up attempt.
+ */
+ } else {
+ esas2r_hdebug("*** resetting chip ***");
+ esas2r_reset_chip(a);
+ }
+
+ /* Kick off the reinitialization */
+ a->chip_uptime += ESAS2R_CHP_UPTIME_CNT;
+ a->chip_init_time = jiffies_to_msecs(jiffies);
+ if (!test_bit(AF_POWER_MGT, &a->flags)) {
+ esas2r_process_adapter_reset(a);
+
+ if (!alrdyrst) {
+ /* Remove devices now that I/O is cleaned up. */
+ a->prev_dev_cnt =
+ esas2r_targ_db_get_tgt_cnt(a);
+ esas2r_targ_db_remove_all(a, false);
+ }
+ }
+
+ a->int_mask = 0;
+ }
+}
+
+static void esas2r_handle_chip_rst_during_tasklet(struct esas2r_adapter *a)
+{
+ while (test_bit(AF_CHPRST_DETECTED, &a->flags)) {
+ /*
+ * Balance the enable in esas2r_initadapter_hw.
+ * Esas2r_power_down already took care of it for power
+ * management.
+ */
+ if (!test_bit(AF_DEGRADED_MODE, &a->flags) &&
+ !test_bit(AF_POWER_MGT, &a->flags))
+ esas2r_disable_chip_interrupts(a);
+
+ /* Reinitialize the chip. */
+ esas2r_check_adapter(a);
+ esas2r_init_adapter_hw(a, 0);
+
+ if (test_bit(AF_CHPRST_NEEDED, &a->flags))
+ break;
+
+ if (test_bit(AF_POWER_MGT, &a->flags)) {
+ /* Recovery from power management. */
+ if (test_bit(AF_FIRST_INIT, &a->flags)) {
+ /* Chip reset during normal power up */
+ esas2r_log(ESAS2R_LOG_CRIT,
+ "The firmware was reset during a normal power-up sequence");
+ } else {
+ /* Deferred power up complete. */
+ clear_bit(AF_POWER_MGT, &a->flags);
+ esas2r_send_reset_ae(a, true);
+ }
+ } else {
+ /* Recovery from online chip reset. */
+ if (test_bit(AF_FIRST_INIT, &a->flags)) {
+ /* Chip reset during driver load */
+ } else {
+ /* Chip reset after driver load */
+ esas2r_send_reset_ae(a, false);
+ }
+
+ esas2r_log(ESAS2R_LOG_CRIT,
+ "Recovering from a chip reset while the chip was online");
+ }
+
+ clear_bit(AF_CHPRST_STARTED, &a->flags);
+ esas2r_enable_chip_interrupts(a);
+
+ /*
+ * Clear this flag last! this indicates that the chip has been
+ * reset already during initialization.
+ */
+ clear_bit(AF_CHPRST_DETECTED, &a->flags);
+ }
+}
+
+
+/* Perform deferred tasks when chip interrupts are disabled */
+void esas2r_do_tasklet_tasks(struct esas2r_adapter *a)
+{
+
+ if (test_bit(AF_CHPRST_NEEDED, &a->flags) ||
+ test_bit(AF_CHPRST_DETECTED, &a->flags)) {
+ if (test_bit(AF_CHPRST_NEEDED, &a->flags))
+ esas2r_chip_rst_needed_during_tasklet(a);
+
+ esas2r_handle_chip_rst_during_tasklet(a);
+ }
+
+ if (test_bit(AF_BUSRST_NEEDED, &a->flags)) {
+ esas2r_hdebug("hard resetting bus");
+
+ clear_bit(AF_BUSRST_NEEDED, &a->flags);
+
+ if (test_bit(AF_FLASHING, &a->flags))
+ set_bit(AF_BUSRST_DETECTED, &a->flags);
+ else
+ esas2r_write_register_dword(a, MU_DOORBELL_IN,
+ DRBL_RESET_BUS);
+ }
+
+ if (test_bit(AF_BUSRST_DETECTED, &a->flags)) {
+ esas2r_process_bus_reset(a);
+
+ esas2r_log_dev(ESAS2R_LOG_WARN,
+ &(a->host->shost_gendev),
+ "scsi_report_bus_reset() called");
+
+ scsi_report_bus_reset(a->host, 0);
+
+ clear_bit(AF_BUSRST_DETECTED, &a->flags);
+ clear_bit(AF_BUSRST_PENDING, &a->flags);
+
+ esas2r_log(ESAS2R_LOG_WARN, "Bus reset complete");
+ }
+
+ if (test_bit(AF_PORT_CHANGE, &a->flags)) {
+ clear_bit(AF_PORT_CHANGE, &a->flags);
+
+ esas2r_targ_db_report_changes(a);
+ }
+
+ if (atomic_read(&a->disable_cnt) == 0)
+ esas2r_do_deferred_processes(a);
+}
+
+static void esas2r_doorbell_interrupt(struct esas2r_adapter *a, u32 doorbell)
+{
+ if (!(doorbell & DRBL_FORCE_INT)) {
+ esas2r_trace_enter();
+ esas2r_trace("doorbell: %x", doorbell);
+ }
+
+ /* First clear the doorbell bits */
+ esas2r_write_register_dword(a, MU_DOORBELL_OUT, doorbell);
+
+ if (doorbell & DRBL_RESET_BUS)
+ set_bit(AF_BUSRST_DETECTED, &a->flags);
+
+ if (doorbell & DRBL_FORCE_INT)
+ clear_bit(AF_HEARTBEAT, &a->flags);
+
+ if (doorbell & DRBL_PANIC_REASON_MASK) {
+ esas2r_hdebug("*** Firmware Panic ***");
+ esas2r_log(ESAS2R_LOG_CRIT, "The firmware has panicked");
+ }
+
+ if (doorbell & DRBL_FW_RESET) {
+ set_bit(AF2_COREDUMP_AVAIL, &a->flags2);
+ esas2r_local_reset_adapter(a);
+ }
+
+ if (!(doorbell & DRBL_FORCE_INT))
+ esas2r_trace_exit();
+}
+
+void esas2r_force_interrupt(struct esas2r_adapter *a)
+{
+ esas2r_write_register_dword(a, MU_DOORBELL_IN, DRBL_FORCE_INT |
+ DRBL_DRV_VER);
+}
+
+
+static void esas2r_lun_event(struct esas2r_adapter *a, union atto_vda_ae *ae,
+ u16 target, u32 length)
+{
+ struct esas2r_target *t = a->targetdb + target;
+ u32 cplen = length;
+ unsigned long flags;
+
+ if (cplen > sizeof(t->lu_event))
+ cplen = sizeof(t->lu_event);
+
+ esas2r_trace("ae->lu.dwevent: %x", ae->lu.dwevent);
+ esas2r_trace("ae->lu.bystate: %x", ae->lu.bystate);
+
+ spin_lock_irqsave(&a->mem_lock, flags);
+
+ t->new_target_state = TS_INVALID;
+
+ if (ae->lu.dwevent & VDAAE_LU_LOST) {
+ t->new_target_state = TS_NOT_PRESENT;
+ } else {
+ switch (ae->lu.bystate) {
+ case VDAAE_LU_NOT_PRESENT:
+ case VDAAE_LU_OFFLINE:
+ case VDAAE_LU_DELETED:
+ case VDAAE_LU_FACTORY_DISABLED:
+ t->new_target_state = TS_NOT_PRESENT;
+ break;
+
+ case VDAAE_LU_ONLINE:
+ case VDAAE_LU_DEGRADED:
+ t->new_target_state = TS_PRESENT;
+ break;
+ }
+ }
+
+ if (t->new_target_state != TS_INVALID) {
+ memcpy(&t->lu_event, &ae->lu, cplen);
+
+ esas2r_disc_queue_event(a, DCDE_DEV_CHANGE);
+ }
+
+ spin_unlock_irqrestore(&a->mem_lock, flags);
+}
+
+
+
+void esas2r_ae_complete(struct esas2r_adapter *a, struct esas2r_request *rq)
+{
+ union atto_vda_ae *ae =
+ (union atto_vda_ae *)rq->vda_rsp_data->ae_data.event_data;
+ u32 length = le32_to_cpu(rq->func_rsp.ae_rsp.length);
+ union atto_vda_ae *last =
+ (union atto_vda_ae *)(rq->vda_rsp_data->ae_data.event_data
+ + length);
+
+ esas2r_trace_enter();
+ esas2r_trace("length: %d", length);
+
+ if (length > sizeof(struct atto_vda_ae_data)
+ || (length & 3) != 0
+ || length == 0) {
+ esas2r_log(ESAS2R_LOG_WARN,
+ "The AE request response length (%p) is too long: %d",
+ rq, length);
+
+ esas2r_hdebug("aereq->length (0x%x) too long", length);
+ esas2r_bugon();
+
+ last = ae;
+ }
+
+ while (ae < last) {
+ u16 target;
+
+ esas2r_trace("ae: %p", ae);
+ esas2r_trace("ae->hdr: %p", &(ae->hdr));
+
+ length = ae->hdr.bylength;
+
+ if (length > (u32)((u8 *)last - (u8 *)ae)
+ || (length & 3) != 0
+ || length == 0) {
+ esas2r_log(ESAS2R_LOG_CRIT,
+ "the async event length is invalid (%p): %d",
+ ae, length);
+
+ esas2r_hdebug("ae->hdr.length (0x%x) invalid", length);
+ esas2r_bugon();
+
+ break;
+ }
+
+ esas2r_nuxi_ae_data(ae);
+
+ esas2r_queue_fw_event(a, fw_event_vda_ae, ae,
+ sizeof(union atto_vda_ae));
+
+ switch (ae->hdr.bytype) {
+ case VDAAE_HDR_TYPE_RAID:
+
+ if (ae->raid.dwflags & (VDAAE_GROUP_STATE
+ | VDAAE_RBLD_STATE
+ | VDAAE_MEMBER_CHG
+ | VDAAE_PART_CHG)) {
+ esas2r_log(ESAS2R_LOG_INFO,
+ "RAID event received - name:%s rebuild_state:%d group_state:%d",
+ ae->raid.acname,
+ ae->raid.byrebuild_state,
+ ae->raid.bygroup_state);
+ }
+
+ break;
+
+ case VDAAE_HDR_TYPE_LU:
+ esas2r_log(ESAS2R_LOG_INFO,
+ "LUN event received: event:%d target_id:%d LUN:%d state:%d",
+ ae->lu.dwevent,
+ ae->lu.id.tgtlun.wtarget_id,
+ ae->lu.id.tgtlun.bylun,
+ ae->lu.bystate);
+
+ target = ae->lu.id.tgtlun.wtarget_id;
+
+ if (target < ESAS2R_MAX_TARGETS)
+ esas2r_lun_event(a, ae, target, length);
+
+ break;
+
+ case VDAAE_HDR_TYPE_DISK:
+ esas2r_log(ESAS2R_LOG_INFO, "Disk event received");
+ break;
+
+ default:
+
+ /* Silently ignore the rest and let the apps deal with
+ * them.
+ */
+
+ break;
+ }
+
+ ae = (union atto_vda_ae *)((u8 *)ae + length);
+ }
+
+ /* Now requeue it. */
+ esas2r_start_ae_request(a, rq);
+ esas2r_trace_exit();
+}
+
+/* Send an asynchronous event for a chip reset or power management. */
+void esas2r_send_reset_ae(struct esas2r_adapter *a, bool pwr_mgt)
+{
+ struct atto_vda_ae_hdr ae;
+
+ if (pwr_mgt)
+ ae.bytype = VDAAE_HDR_TYPE_PWRMGT;
+ else
+ ae.bytype = VDAAE_HDR_TYPE_RESET;
+
+ ae.byversion = VDAAE_HDR_VER_0;
+ ae.byflags = 0;
+ ae.bylength = (u8)sizeof(struct atto_vda_ae_hdr);
+
+ if (pwr_mgt)
+ esas2r_hdebug("*** sending power management AE ***");
+ else
+ esas2r_hdebug("*** sending reset AE ***");
+
+ esas2r_queue_fw_event(a, fw_event_vda_ae, &ae,
+ sizeof(union atto_vda_ae));
+}
+
+void esas2r_dummy_complete(struct esas2r_adapter *a, struct esas2r_request *rq)
+{}
+
+static void esas2r_check_req_rsp_sense(struct esas2r_adapter *a,
+ struct esas2r_request *rq)
+{
+ u8 snslen, snslen2;
+
+ snslen = snslen2 = rq->func_rsp.scsi_rsp.sense_len;
+
+ if (snslen > rq->sense_len)
+ snslen = rq->sense_len;
+
+ if (snslen) {
+ if (rq->sense_buf)
+ memcpy(rq->sense_buf, rq->data_buf, snslen);
+ else
+ rq->sense_buf = (u8 *)rq->data_buf;
+
+ /* See about possible sense data */
+ if (snslen2 > 0x0c) {
+ u8 *s = (u8 *)rq->data_buf;
+
+ esas2r_trace_enter();
+
+ /* Report LUNS data has changed */
+ if (s[0x0c] == 0x3f && s[0x0d] == 0x0E) {
+ esas2r_trace("rq->target_id: %d",
+ rq->target_id);
+ esas2r_target_state_changed(a, rq->target_id,
+ TS_LUN_CHANGE);
+ }
+
+ esas2r_trace("add_sense_key=%x", s[0x0c]);
+ esas2r_trace("add_sense_qual=%x", s[0x0d]);
+ esas2r_trace_exit();
+ }
+ }
+
+ rq->sense_len = snslen;
+}
+
+
+void esas2r_complete_request(struct esas2r_adapter *a,
+ struct esas2r_request *rq)
+{
+ if (rq->vrq->scsi.function == VDA_FUNC_FLASH
+ && rq->vrq->flash.sub_func == VDA_FLASH_COMMIT)
+ clear_bit(AF_FLASHING, &a->flags);
+
+ /* See if we setup a callback to do special processing */
+
+ if (rq->interrupt_cb) {
+ (*rq->interrupt_cb)(a, rq);
+
+ if (rq->req_stat == RS_PENDING) {
+ esas2r_start_request(a, rq);
+ return;
+ }
+ }
+
+ if (likely(rq->vrq->scsi.function == VDA_FUNC_SCSI)
+ && unlikely(rq->req_stat != RS_SUCCESS)) {
+ esas2r_check_req_rsp_sense(a, rq);
+ esas2r_log_request_failure(a, rq);
+ }
+
+ (*rq->comp_cb)(a, rq);
+}
diff --git a/drivers/scsi/esas2r/esas2r_io.c b/drivers/scsi/esas2r/esas2r_io.c
new file mode 100644
index 000000000..a8df916cd
--- /dev/null
+++ b/drivers/scsi/esas2r/esas2r_io.c
@@ -0,0 +1,877 @@
+/*
+ * linux/drivers/scsi/esas2r/esas2r_io.c
+ * For use with ATTO ExpressSAS R6xx SAS/SATA RAID controllers
+ *
+ * Copyright (c) 2001-2013 ATTO Technology, Inc.
+ * (mailto:linuxdrivers@attotech.com)mpt3sas/mpt3sas_trigger_diag.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * NO WARRANTY
+ * THE PROGRAM IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OR
+ * CONDITIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED INCLUDING, WITHOUT
+ * LIMITATION, ANY WARRANTIES OR CONDITIONS OF TITLE, NON-INFRINGEMENT,
+ * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE. Each Recipient is
+ * solely responsible for determining the appropriateness of using and
+ * distributing the Program and assumes all risks associated with its
+ * exercise of rights under this Agreement, including but not limited to
+ * the risks and costs of program errors, damage to or loss of data,
+ * programs or equipment, and unavailability or interruption of operations.
+ *
+ * DISCLAIMER OF LIABILITY
+ * NEITHER RECIPIENT NOR ANY CONTRIBUTORS SHALL HAVE ANY LIABILITY FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING WITHOUT LIMITATION LOST PROFITS), HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR
+ * TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
+ * USE OR DISTRIBUTION OF THE PROGRAM OR THE EXERCISE OF ANY RIGHTS GRANTED
+ * HEREUNDER, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGES
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301,
+ * USA.
+ */
+
+#include "esas2r.h"
+
+void esas2r_start_request(struct esas2r_adapter *a, struct esas2r_request *rq)
+{
+ struct esas2r_target *t = NULL;
+ struct esas2r_request *startrq = rq;
+ unsigned long flags;
+
+ if (unlikely(test_bit(AF_DEGRADED_MODE, &a->flags) ||
+ test_bit(AF_POWER_DOWN, &a->flags))) {
+ if (rq->vrq->scsi.function == VDA_FUNC_SCSI)
+ rq->req_stat = RS_SEL2;
+ else
+ rq->req_stat = RS_DEGRADED;
+ } else if (likely(rq->vrq->scsi.function == VDA_FUNC_SCSI)) {
+ t = a->targetdb + rq->target_id;
+
+ if (unlikely(t >= a->targetdb_end
+ || !(t->flags & TF_USED))) {
+ rq->req_stat = RS_SEL;
+ } else {
+ /* copy in the target ID. */
+ rq->vrq->scsi.target_id = cpu_to_le16(t->virt_targ_id);
+
+ /*
+ * Test if we want to report RS_SEL for missing target.
+ * Note that if AF_DISC_PENDING is set than this will
+ * go on the defer queue.
+ */
+ if (unlikely(t->target_state != TS_PRESENT &&
+ !test_bit(AF_DISC_PENDING, &a->flags)))
+ rq->req_stat = RS_SEL;
+ }
+ }
+
+ if (unlikely(rq->req_stat != RS_PENDING)) {
+ esas2r_complete_request(a, rq);
+ return;
+ }
+
+ esas2r_trace("rq=%p", rq);
+ esas2r_trace("rq->vrq->scsi.handle=%x", rq->vrq->scsi.handle);
+
+ if (rq->vrq->scsi.function == VDA_FUNC_SCSI) {
+ esas2r_trace("rq->target_id=%d", rq->target_id);
+ esas2r_trace("rq->vrq->scsi.flags=%x", rq->vrq->scsi.flags);
+ }
+
+ spin_lock_irqsave(&a->queue_lock, flags);
+
+ if (likely(list_empty(&a->defer_list) &&
+ !test_bit(AF_CHPRST_PENDING, &a->flags) &&
+ !test_bit(AF_FLASHING, &a->flags) &&
+ !test_bit(AF_DISC_PENDING, &a->flags)))
+ esas2r_local_start_request(a, startrq);
+ else
+ list_add_tail(&startrq->req_list, &a->defer_list);
+
+ spin_unlock_irqrestore(&a->queue_lock, flags);
+}
+
+/*
+ * Starts the specified request. all requests have RS_PENDING set when this
+ * routine is called. The caller is usually esas2r_start_request, but
+ * esas2r_do_deferred_processes will start request that are deferred.
+ *
+ * The caller must ensure that requests can be started.
+ *
+ * esas2r_start_request will defer a request if there are already requests
+ * waiting or there is a chip reset pending. once the reset condition clears,
+ * esas2r_do_deferred_processes will call this function to start the request.
+ *
+ * When a request is started, it is placed on the active list and queued to
+ * the controller.
+ */
+void esas2r_local_start_request(struct esas2r_adapter *a,
+ struct esas2r_request *rq)
+{
+ esas2r_trace_enter();
+ esas2r_trace("rq=%p", rq);
+ esas2r_trace("rq->vrq:%p", rq->vrq);
+ esas2r_trace("rq->vrq_md->phys_addr:%x", rq->vrq_md->phys_addr);
+
+ if (unlikely(rq->vrq->scsi.function == VDA_FUNC_FLASH
+ && rq->vrq->flash.sub_func == VDA_FLASH_COMMIT))
+ set_bit(AF_FLASHING, &a->flags);
+
+ list_add_tail(&rq->req_list, &a->active_list);
+ esas2r_start_vda_request(a, rq);
+ esas2r_trace_exit();
+ return;
+}
+
+void esas2r_start_vda_request(struct esas2r_adapter *a,
+ struct esas2r_request *rq)
+{
+ struct esas2r_inbound_list_source_entry *element;
+ u32 dw;
+
+ rq->req_stat = RS_STARTED;
+ /*
+ * Calculate the inbound list entry location and the current state of
+ * toggle bit.
+ */
+ a->last_write++;
+ if (a->last_write >= a->list_size) {
+ a->last_write = 0;
+ /* update the toggle bit */
+ if (test_bit(AF_COMM_LIST_TOGGLE, &a->flags))
+ clear_bit(AF_COMM_LIST_TOGGLE, &a->flags);
+ else
+ set_bit(AF_COMM_LIST_TOGGLE, &a->flags);
+ }
+
+ element =
+ (struct esas2r_inbound_list_source_entry *)a->inbound_list_md.
+ virt_addr
+ + a->last_write;
+
+ /* Set the VDA request size if it was never modified */
+ if (rq->vda_req_sz == RQ_SIZE_DEFAULT)
+ rq->vda_req_sz = (u16)(a->max_vdareq_size / sizeof(u32));
+
+ element->address = cpu_to_le64(rq->vrq_md->phys_addr);
+ element->length = cpu_to_le32(rq->vda_req_sz);
+
+ /* Update the write pointer */
+ dw = a->last_write;
+
+ if (test_bit(AF_COMM_LIST_TOGGLE, &a->flags))
+ dw |= MU_ILW_TOGGLE;
+
+ esas2r_trace("rq->vrq->scsi.handle:%x", rq->vrq->scsi.handle);
+ esas2r_trace("dw:%x", dw);
+ esas2r_trace("rq->vda_req_sz:%x", rq->vda_req_sz);
+ esas2r_write_register_dword(a, MU_IN_LIST_WRITE, dw);
+}
+
+/*
+ * Build the scatter/gather list for an I/O request according to the
+ * specifications placed in the s/g context. The caller must initialize
+ * context prior to the initial call by calling esas2r_sgc_init().
+ */
+bool esas2r_build_sg_list_sge(struct esas2r_adapter *a,
+ struct esas2r_sg_context *sgc)
+{
+ struct esas2r_request *rq = sgc->first_req;
+ union atto_vda_req *vrq = rq->vrq;
+
+ while (sgc->length) {
+ u32 rem = 0;
+ u64 addr;
+ u32 len;
+
+ len = (*sgc->get_phys_addr)(sgc, &addr);
+
+ if (unlikely(len == 0))
+ return false;
+
+ /* if current length is more than what's left, stop there */
+ if (unlikely(len > sgc->length))
+ len = sgc->length;
+
+another_entry:
+ /* limit to a round number less than the maximum length */
+ if (len > SGE_LEN_MAX) {
+ /*
+ * Save the remainder of the split. Whenever we limit
+ * an entry we come back around to build entries out
+ * of the leftover. We do this to prevent multiple
+ * calls to the get_phys_addr() function for an SGE
+ * that is too large.
+ */
+ rem = len - SGE_LEN_MAX;
+ len = SGE_LEN_MAX;
+ }
+
+ /* See if we need to allocate a new SGL */
+ if (unlikely(sgc->sge.a64.curr > sgc->sge.a64.limit)) {
+ u8 sgelen;
+ struct esas2r_mem_desc *sgl;
+
+ /*
+ * If no SGls are available, return failure. The
+ * caller can call us later with the current context
+ * to pick up here.
+ */
+ sgl = esas2r_alloc_sgl(a);
+
+ if (unlikely(sgl == NULL))
+ return false;
+
+ /* Calculate the length of the last SGE filled in */
+ sgelen = (u8)((u8 *)sgc->sge.a64.curr
+ - (u8 *)sgc->sge.a64.last);
+
+ /*
+ * Copy the last SGE filled in to the first entry of
+ * the new SGL to make room for the chain entry.
+ */
+ memcpy(sgl->virt_addr, sgc->sge.a64.last, sgelen);
+
+ /* Figure out the new curr pointer in the new segment */
+ sgc->sge.a64.curr =
+ (struct atto_vda_sge *)((u8 *)sgl->virt_addr +
+ sgelen);
+
+ /* Set the limit pointer and build the chain entry */
+ sgc->sge.a64.limit =
+ (struct atto_vda_sge *)((u8 *)sgl->virt_addr
+ + sgl_page_size
+ - sizeof(struct
+ atto_vda_sge));
+ sgc->sge.a64.last->length = cpu_to_le32(
+ SGE_CHAIN | SGE_ADDR_64);
+ sgc->sge.a64.last->address =
+ cpu_to_le64(sgl->phys_addr);
+
+ /*
+ * Now, if there was a previous chain entry, then
+ * update it to contain the length of this segment
+ * and size of this chain. otherwise this is the
+ * first SGL, so set the chain_offset in the request.
+ */
+ if (sgc->sge.a64.chain) {
+ sgc->sge.a64.chain->length |=
+ cpu_to_le32(
+ ((u8 *)(sgc->sge.a64.
+ last + 1)
+ - (u8 *)rq->sg_table->
+ virt_addr)
+ + sizeof(struct atto_vda_sge) *
+ LOBIT(SGE_CHAIN_SZ));
+ } else {
+ vrq->scsi.chain_offset = (u8)
+ ((u8 *)sgc->
+ sge.a64.last -
+ (u8 *)vrq);
+
+ /*
+ * This is the first SGL, so set the
+ * chain_offset and the VDA request size in
+ * the request.
+ */
+ rq->vda_req_sz =
+ (vrq->scsi.chain_offset +
+ sizeof(struct atto_vda_sge) +
+ 3)
+ / sizeof(u32);
+ }
+
+ /*
+ * Remember this so when we get a new SGL filled in we
+ * can update the length of this chain entry.
+ */
+ sgc->sge.a64.chain = sgc->sge.a64.last;
+
+ /* Now link the new SGL onto the primary request. */
+ list_add(&sgl->next_desc, &rq->sg_table_head);
+ }
+
+ /* Update last one filled in */
+ sgc->sge.a64.last = sgc->sge.a64.curr;
+
+ /* Build the new SGE and update the S/G context */
+ sgc->sge.a64.curr->length = cpu_to_le32(SGE_ADDR_64 | len);
+ sgc->sge.a64.curr->address = cpu_to_le32(addr);
+ sgc->sge.a64.curr++;
+ sgc->cur_offset += len;
+ sgc->length -= len;
+
+ /*
+ * Check if we previously split an entry. If so we have to
+ * pick up where we left off.
+ */
+ if (rem) {
+ addr += len;
+ len = rem;
+ rem = 0;
+ goto another_entry;
+ }
+ }
+
+ /* Mark the end of the SGL */
+ sgc->sge.a64.last->length |= cpu_to_le32(SGE_LAST);
+
+ /*
+ * If there was a previous chain entry, update the length to indicate
+ * the length of this last segment.
+ */
+ if (sgc->sge.a64.chain) {
+ sgc->sge.a64.chain->length |= cpu_to_le32(
+ ((u8 *)(sgc->sge.a64.curr) -
+ (u8 *)rq->sg_table->virt_addr));
+ } else {
+ u16 reqsize;
+
+ /*
+ * The entire VDA request was not used so lets
+ * set the size of the VDA request to be DMA'd
+ */
+ reqsize =
+ ((u16)((u8 *)sgc->sge.a64.last - (u8 *)vrq)
+ + sizeof(struct atto_vda_sge) + 3) / sizeof(u32);
+
+ /*
+ * Only update the request size if it is bigger than what is
+ * already there. We can come in here twice for some management
+ * commands.
+ */
+ if (reqsize > rq->vda_req_sz)
+ rq->vda_req_sz = reqsize;
+ }
+ return true;
+}
+
+
+/*
+ * Create PRD list for each I-block consumed by the command. This routine
+ * determines how much data is required from each I-block being consumed
+ * by the command. The first and last I-blocks can be partials and all of
+ * the I-blocks in between are for a full I-block of data.
+ *
+ * The interleave size is used to determine the number of bytes in the 1st
+ * I-block and the remaining I-blocks are what remeains.
+ */
+static bool esas2r_build_prd_iblk(struct esas2r_adapter *a,
+ struct esas2r_sg_context *sgc)
+{
+ struct esas2r_request *rq = sgc->first_req;
+ u64 addr;
+ u32 len;
+ struct esas2r_mem_desc *sgl;
+ u32 numchain = 1;
+ u32 rem = 0;
+
+ while (sgc->length) {
+ /* Get the next address/length pair */
+
+ len = (*sgc->get_phys_addr)(sgc, &addr);
+
+ if (unlikely(len == 0))
+ return false;
+
+ /* If current length is more than what's left, stop there */
+
+ if (unlikely(len > sgc->length))
+ len = sgc->length;
+
+another_entry:
+ /* Limit to a round number less than the maximum length */
+
+ if (len > PRD_LEN_MAX) {
+ /*
+ * Save the remainder of the split. whenever we limit
+ * an entry we come back around to build entries out
+ * of the leftover. We do this to prevent multiple
+ * calls to the get_phys_addr() function for an SGE
+ * that is too large.
+ */
+ rem = len - PRD_LEN_MAX;
+ len = PRD_LEN_MAX;
+ }
+
+ /* See if we need to allocate a new SGL */
+ if (sgc->sge.prd.sge_cnt == 0) {
+ if (len == sgc->length) {
+ /*
+ * We only have 1 PRD entry left.
+ * It can be placed where the chain
+ * entry would have gone
+ */
+
+ /* Build the simple SGE */
+ sgc->sge.prd.curr->ctl_len = cpu_to_le32(
+ PRD_DATA | len);
+ sgc->sge.prd.curr->address = cpu_to_le64(addr);
+
+ /* Adjust length related fields */
+ sgc->cur_offset += len;
+ sgc->length -= len;
+
+ /* We use the reserved chain entry for data */
+ numchain = 0;
+
+ break;
+ }
+
+ if (sgc->sge.prd.chain) {
+ /*
+ * Fill # of entries of current SGL in previous
+ * chain the length of this current SGL may not
+ * full.
+ */
+
+ sgc->sge.prd.chain->ctl_len |= cpu_to_le32(
+ sgc->sge.prd.sgl_max_cnt);
+ }
+
+ /*
+ * If no SGls are available, return failure. The
+ * caller can call us later with the current context
+ * to pick up here.
+ */
+
+ sgl = esas2r_alloc_sgl(a);
+
+ if (unlikely(sgl == NULL))
+ return false;
+
+ /*
+ * Link the new SGL onto the chain
+ * They are in reverse order
+ */
+ list_add(&sgl->next_desc, &rq->sg_table_head);
+
+ /*
+ * An SGL was just filled in and we are starting
+ * a new SGL. Prime the chain of the ending SGL with
+ * info that points to the new SGL. The length gets
+ * filled in when the new SGL is filled or ended
+ */
+
+ sgc->sge.prd.chain = sgc->sge.prd.curr;
+
+ sgc->sge.prd.chain->ctl_len = cpu_to_le32(PRD_CHAIN);
+ sgc->sge.prd.chain->address =
+ cpu_to_le64(sgl->phys_addr);
+
+ /*
+ * Start a new segment.
+ * Take one away and save for chain SGE
+ */
+
+ sgc->sge.prd.curr =
+ (struct atto_physical_region_description *)sgl
+ ->
+ virt_addr;
+ sgc->sge.prd.sge_cnt = sgc->sge.prd.sgl_max_cnt - 1;
+ }
+
+ sgc->sge.prd.sge_cnt--;
+ /* Build the simple SGE */
+ sgc->sge.prd.curr->ctl_len = cpu_to_le32(PRD_DATA | len);
+ sgc->sge.prd.curr->address = cpu_to_le64(addr);
+
+ /* Used another element. Point to the next one */
+
+ sgc->sge.prd.curr++;
+
+ /* Adjust length related fields */
+
+ sgc->cur_offset += len;
+ sgc->length -= len;
+
+ /*
+ * Check if we previously split an entry. If so we have to
+ * pick up where we left off.
+ */
+
+ if (rem) {
+ addr += len;
+ len = rem;
+ rem = 0;
+ goto another_entry;
+ }
+ }
+
+ if (!list_empty(&rq->sg_table_head)) {
+ if (sgc->sge.prd.chain) {
+ sgc->sge.prd.chain->ctl_len |=
+ cpu_to_le32(sgc->sge.prd.sgl_max_cnt
+ - sgc->sge.prd.sge_cnt
+ - numchain);
+ }
+ }
+
+ return true;
+}
+
+bool esas2r_build_sg_list_prd(struct esas2r_adapter *a,
+ struct esas2r_sg_context *sgc)
+{
+ struct esas2r_request *rq = sgc->first_req;
+ u32 len = sgc->length;
+ struct esas2r_target *t = a->targetdb + rq->target_id;
+ u8 is_i_o = 0;
+ u16 reqsize;
+ struct atto_physical_region_description *curr_iblk_chn;
+ u8 *cdb = (u8 *)&rq->vrq->scsi.cdb[0];
+
+ /*
+ * extract LBA from command so we can determine
+ * the I-Block boundary
+ */
+
+ if (rq->vrq->scsi.function == VDA_FUNC_SCSI
+ && t->target_state == TS_PRESENT
+ && !(t->flags & TF_PASS_THRU)) {
+ u32 lbalo = 0;
+
+ switch (rq->vrq->scsi.cdb[0]) {
+ case READ_16:
+ case WRITE_16:
+ {
+ lbalo =
+ MAKEDWORD(MAKEWORD(cdb[9],
+ cdb[8]),
+ MAKEWORD(cdb[7],
+ cdb[6]));
+ is_i_o = 1;
+ break;
+ }
+
+ case READ_12:
+ case WRITE_12:
+ case READ_10:
+ case WRITE_10:
+ {
+ lbalo =
+ MAKEDWORD(MAKEWORD(cdb[5],
+ cdb[4]),
+ MAKEWORD(cdb[3],
+ cdb[2]));
+ is_i_o = 1;
+ break;
+ }
+
+ case READ_6:
+ case WRITE_6:
+ {
+ lbalo =
+ MAKEDWORD(MAKEWORD(cdb[3],
+ cdb[2]),
+ MAKEWORD(cdb[1] & 0x1F,
+ 0));
+ is_i_o = 1;
+ break;
+ }
+
+ default:
+ break;
+ }
+
+ if (is_i_o) {
+ u32 startlba;
+
+ rq->vrq->scsi.iblk_cnt_prd = 0;
+
+ /* Determine size of 1st I-block PRD list */
+ startlba = t->inter_block - (lbalo & (t->inter_block -
+ 1));
+ sgc->length = startlba * t->block_size;
+
+ /* Chk if the 1st iblk chain starts at base of Iblock */
+ if ((lbalo & (t->inter_block - 1)) == 0)
+ rq->flags |= RF_1ST_IBLK_BASE;
+
+ if (sgc->length > len)
+ sgc->length = len;
+ } else {
+ sgc->length = len;
+ }
+ } else {
+ sgc->length = len;
+ }
+
+ /* get our starting chain address */
+
+ curr_iblk_chn =
+ (struct atto_physical_region_description *)sgc->sge.a64.curr;
+
+ sgc->sge.prd.sgl_max_cnt = sgl_page_size /
+ sizeof(struct
+ atto_physical_region_description);
+
+ /* create all of the I-block PRD lists */
+
+ while (len) {
+ sgc->sge.prd.sge_cnt = 0;
+ sgc->sge.prd.chain = NULL;
+ sgc->sge.prd.curr = curr_iblk_chn;
+
+ /* increment to next I-Block */
+
+ len -= sgc->length;
+
+ /* go build the next I-Block PRD list */
+
+ if (unlikely(!esas2r_build_prd_iblk(a, sgc)))
+ return false;
+
+ curr_iblk_chn++;
+
+ if (is_i_o) {
+ rq->vrq->scsi.iblk_cnt_prd++;
+
+ if (len > t->inter_byte)
+ sgc->length = t->inter_byte;
+ else
+ sgc->length = len;
+ }
+ }
+
+ /* figure out the size used of the VDA request */
+
+ reqsize = ((u16)((u8 *)curr_iblk_chn - (u8 *)rq->vrq))
+ / sizeof(u32);
+
+ /*
+ * only update the request size if it is bigger than what is
+ * already there. we can come in here twice for some management
+ * commands.
+ */
+
+ if (reqsize > rq->vda_req_sz)
+ rq->vda_req_sz = reqsize;
+
+ return true;
+}
+
+static void esas2r_handle_pending_reset(struct esas2r_adapter *a, u32 currtime)
+{
+ u32 delta = currtime - a->chip_init_time;
+
+ if (delta <= ESAS2R_CHPRST_WAIT_TIME) {
+ /* Wait before accessing registers */
+ } else if (delta >= ESAS2R_CHPRST_TIME) {
+ /*
+ * The last reset failed so try again. Reset
+ * processing will give up after three tries.
+ */
+ esas2r_local_reset_adapter(a);
+ } else {
+ /* We can now see if the firmware is ready */
+ u32 doorbell;
+
+ doorbell = esas2r_read_register_dword(a, MU_DOORBELL_OUT);
+ if (doorbell == 0xFFFFFFFF || !(doorbell & DRBL_FORCE_INT)) {
+ esas2r_force_interrupt(a);
+ } else {
+ u32 ver = (doorbell & DRBL_FW_VER_MSK);
+
+ /* Driver supports API version 0 and 1 */
+ esas2r_write_register_dword(a, MU_DOORBELL_OUT,
+ doorbell);
+ if (ver == DRBL_FW_VER_0) {
+ set_bit(AF_CHPRST_DETECTED, &a->flags);
+ set_bit(AF_LEGACY_SGE_MODE, &a->flags);
+
+ a->max_vdareq_size = 128;
+ a->build_sgl = esas2r_build_sg_list_sge;
+ } else if (ver == DRBL_FW_VER_1) {
+ set_bit(AF_CHPRST_DETECTED, &a->flags);
+ clear_bit(AF_LEGACY_SGE_MODE, &a->flags);
+
+ a->max_vdareq_size = 1024;
+ a->build_sgl = esas2r_build_sg_list_prd;
+ } else {
+ esas2r_local_reset_adapter(a);
+ }
+ }
+ }
+}
+
+
+/* This function must be called once per timer tick */
+void esas2r_timer_tick(struct esas2r_adapter *a)
+{
+ u32 currtime = jiffies_to_msecs(jiffies);
+ u32 deltatime = currtime - a->last_tick_time;
+
+ a->last_tick_time = currtime;
+
+ /* count down the uptime */
+ if (a->chip_uptime &&
+ !test_bit(AF_CHPRST_PENDING, &a->flags) &&
+ !test_bit(AF_DISC_PENDING, &a->flags)) {
+ if (deltatime >= a->chip_uptime)
+ a->chip_uptime = 0;
+ else
+ a->chip_uptime -= deltatime;
+ }
+
+ if (test_bit(AF_CHPRST_PENDING, &a->flags)) {
+ if (!test_bit(AF_CHPRST_NEEDED, &a->flags) &&
+ !test_bit(AF_CHPRST_DETECTED, &a->flags))
+ esas2r_handle_pending_reset(a, currtime);
+ } else {
+ if (test_bit(AF_DISC_PENDING, &a->flags))
+ esas2r_disc_check_complete(a);
+ if (test_bit(AF_HEARTBEAT_ENB, &a->flags)) {
+ if (test_bit(AF_HEARTBEAT, &a->flags)) {
+ if ((currtime - a->heartbeat_time) >=
+ ESAS2R_HEARTBEAT_TIME) {
+ clear_bit(AF_HEARTBEAT, &a->flags);
+ esas2r_hdebug("heartbeat failed");
+ esas2r_log(ESAS2R_LOG_CRIT,
+ "heartbeat failed");
+ esas2r_bugon();
+ esas2r_local_reset_adapter(a);
+ }
+ } else {
+ set_bit(AF_HEARTBEAT, &a->flags);
+ a->heartbeat_time = currtime;
+ esas2r_force_interrupt(a);
+ }
+ }
+ }
+
+ if (atomic_read(&a->disable_cnt) == 0)
+ esas2r_do_deferred_processes(a);
+}
+
+/*
+ * Send the specified task management function to the target and LUN
+ * specified in rqaux. in addition, immediately abort any commands that
+ * are queued but not sent to the device according to the rules specified
+ * by the task management function.
+ */
+bool esas2r_send_task_mgmt(struct esas2r_adapter *a,
+ struct esas2r_request *rqaux, u8 task_mgt_func)
+{
+ u16 targetid = rqaux->target_id;
+ u8 lun = (u8)le32_to_cpu(rqaux->vrq->scsi.flags);
+ bool ret = false;
+ struct esas2r_request *rq;
+ struct list_head *next, *element;
+ unsigned long flags;
+
+ LIST_HEAD(comp_list);
+
+ esas2r_trace_enter();
+ esas2r_trace("rqaux:%p", rqaux);
+ esas2r_trace("task_mgt_func:%x", task_mgt_func);
+ spin_lock_irqsave(&a->queue_lock, flags);
+
+ /* search the defer queue looking for requests for the device */
+ list_for_each_safe(element, next, &a->defer_list) {
+ rq = list_entry(element, struct esas2r_request, req_list);
+
+ if (rq->vrq->scsi.function == VDA_FUNC_SCSI
+ && rq->target_id == targetid
+ && (((u8)le32_to_cpu(rq->vrq->scsi.flags)) == lun
+ || task_mgt_func == 0x20)) { /* target reset */
+ /* Found a request affected by the task management */
+ if (rq->req_stat == RS_PENDING) {
+ /*
+ * The request is pending or waiting. We can
+ * safelycomplete the request now.
+ */
+ if (esas2r_ioreq_aborted(a, rq, RS_ABORTED))
+ list_add_tail(&rq->comp_list,
+ &comp_list);
+ }
+ }
+ }
+
+ /* Send the task management request to the firmware */
+ rqaux->sense_len = 0;
+ rqaux->vrq->scsi.length = 0;
+ rqaux->target_id = targetid;
+ rqaux->vrq->scsi.flags |= cpu_to_le32(lun);
+ memset(rqaux->vrq->scsi.cdb, 0, sizeof(rqaux->vrq->scsi.cdb));
+ rqaux->vrq->scsi.flags |=
+ cpu_to_le16(task_mgt_func * LOBIT(FCP_CMND_TM_MASK));
+
+ if (test_bit(AF_FLASHING, &a->flags)) {
+ /* Assume success. if there are active requests, return busy */
+ rqaux->req_stat = RS_SUCCESS;
+
+ list_for_each_safe(element, next, &a->active_list) {
+ rq = list_entry(element, struct esas2r_request,
+ req_list);
+ if (rq->vrq->scsi.function == VDA_FUNC_SCSI
+ && rq->target_id == targetid
+ && (((u8)le32_to_cpu(rq->vrq->scsi.flags)) == lun
+ || task_mgt_func == 0x20)) /* target reset */
+ rqaux->req_stat = RS_BUSY;
+ }
+
+ ret = true;
+ }
+
+ spin_unlock_irqrestore(&a->queue_lock, flags);
+
+ if (!test_bit(AF_FLASHING, &a->flags))
+ esas2r_start_request(a, rqaux);
+
+ esas2r_comp_list_drain(a, &comp_list);
+
+ if (atomic_read(&a->disable_cnt) == 0)
+ esas2r_do_deferred_processes(a);
+
+ esas2r_trace_exit();
+
+ return ret;
+}
+
+void esas2r_reset_bus(struct esas2r_adapter *a)
+{
+ esas2r_log(ESAS2R_LOG_INFO, "performing a bus reset");
+
+ if (!test_bit(AF_DEGRADED_MODE, &a->flags) &&
+ !test_bit(AF_CHPRST_PENDING, &a->flags) &&
+ !test_bit(AF_DISC_PENDING, &a->flags)) {
+ set_bit(AF_BUSRST_NEEDED, &a->flags);
+ set_bit(AF_BUSRST_PENDING, &a->flags);
+ set_bit(AF_OS_RESET, &a->flags);
+
+ esas2r_schedule_tasklet(a);
+ }
+}
+
+bool esas2r_ioreq_aborted(struct esas2r_adapter *a, struct esas2r_request *rq,
+ u8 status)
+{
+ esas2r_trace_enter();
+ esas2r_trace("rq:%p", rq);
+ list_del_init(&rq->req_list);
+ if (rq->timeout > RQ_MAX_TIMEOUT) {
+ /*
+ * The request timed out, but we could not abort it because a
+ * chip reset occurred. Return busy status.
+ */
+ rq->req_stat = RS_BUSY;
+ esas2r_trace_exit();
+ return true;
+ }
+
+ rq->req_stat = status;
+ esas2r_trace_exit();
+ return true;
+}
diff --git a/drivers/scsi/esas2r/esas2r_ioctl.c b/drivers/scsi/esas2r/esas2r_ioctl.c
new file mode 100644
index 000000000..baf913047
--- /dev/null
+++ b/drivers/scsi/esas2r/esas2r_ioctl.c
@@ -0,0 +1,2114 @@
+/*
+ * linux/drivers/scsi/esas2r/esas2r_ioctl.c
+ * For use with ATTO ExpressSAS R6xx SAS/SATA RAID controllers
+ *
+ * Copyright (c) 2001-2013 ATTO Technology, Inc.
+ * (mailto:linuxdrivers@attotech.com)
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * NO WARRANTY
+ * THE PROGRAM IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OR
+ * CONDITIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED INCLUDING, WITHOUT
+ * LIMITATION, ANY WARRANTIES OR CONDITIONS OF TITLE, NON-INFRINGEMENT,
+ * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE. Each Recipient is
+ * solely responsible for determining the appropriateness of using and
+ * distributing the Program and assumes all risks associated with its
+ * exercise of rights under this Agreement, including but not limited to
+ * the risks and costs of program errors, damage to or loss of data,
+ * programs or equipment, and unavailability or interruption of operations.
+ *
+ * DISCLAIMER OF LIABILITY
+ * NEITHER RECIPIENT NOR ANY CONTRIBUTORS SHALL HAVE ANY LIABILITY FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING WITHOUT LIMITATION LOST PROFITS), HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR
+ * TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
+ * USE OR DISTRIBUTION OF THE PROGRAM OR THE EXERCISE OF ANY RIGHTS GRANTED
+ * HEREUNDER, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGES
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301,
+ * USA.
+ */
+
+#include "esas2r.h"
+
+/*
+ * Buffered ioctl handlers. A buffered ioctl is one which requires that we
+ * allocate a DMA-able memory area to communicate with the firmware. In
+ * order to prevent continually allocating and freeing consistent memory,
+ * we will allocate a global buffer the first time we need it and re-use
+ * it for subsequent ioctl calls that require it.
+ */
+
+u8 *esas2r_buffered_ioctl;
+dma_addr_t esas2r_buffered_ioctl_addr;
+u32 esas2r_buffered_ioctl_size;
+struct pci_dev *esas2r_buffered_ioctl_pcid;
+
+static DEFINE_SEMAPHORE(buffered_ioctl_semaphore);
+typedef int (*BUFFERED_IOCTL_CALLBACK)(struct esas2r_adapter *,
+ struct esas2r_request *,
+ struct esas2r_sg_context *,
+ void *);
+typedef void (*BUFFERED_IOCTL_DONE_CALLBACK)(struct esas2r_adapter *,
+ struct esas2r_request *, void *);
+
+struct esas2r_buffered_ioctl {
+ struct esas2r_adapter *a;
+ void *ioctl;
+ u32 length;
+ u32 control_code;
+ u32 offset;
+ BUFFERED_IOCTL_CALLBACK
+ callback;
+ void *context;
+ BUFFERED_IOCTL_DONE_CALLBACK
+ done_callback;
+ void *done_context;
+
+};
+
+static void complete_fm_api_req(struct esas2r_adapter *a,
+ struct esas2r_request *rq)
+{
+ a->fm_api_command_done = 1;
+ wake_up_interruptible(&a->fm_api_waiter);
+}
+
+/* Callbacks for building scatter/gather lists for FM API requests */
+static u32 get_physaddr_fm_api(struct esas2r_sg_context *sgc, u64 *addr)
+{
+ struct esas2r_adapter *a = (struct esas2r_adapter *)sgc->adapter;
+ int offset = sgc->cur_offset - a->save_offset;
+
+ (*addr) = a->firmware.phys + offset;
+ return a->firmware.orig_len - offset;
+}
+
+static u32 get_physaddr_fm_api_header(struct esas2r_sg_context *sgc, u64 *addr)
+{
+ struct esas2r_adapter *a = (struct esas2r_adapter *)sgc->adapter;
+ int offset = sgc->cur_offset - a->save_offset;
+
+ (*addr) = a->firmware.header_buff_phys + offset;
+ return sizeof(struct esas2r_flash_img) - offset;
+}
+
+/* Handle EXPRESS_IOCTL_RW_FIRMWARE ioctl with img_type = FW_IMG_FM_API. */
+static void do_fm_api(struct esas2r_adapter *a, struct esas2r_flash_img *fi)
+{
+ struct esas2r_request *rq;
+
+ if (down_interruptible(&a->fm_api_semaphore)) {
+ fi->status = FI_STAT_BUSY;
+ return;
+ }
+
+ rq = esas2r_alloc_request(a);
+ if (rq == NULL) {
+ fi->status = FI_STAT_BUSY;
+ goto free_sem;
+ }
+
+ if (fi == &a->firmware.header) {
+ a->firmware.header_buff = dma_alloc_coherent(&a->pcid->dev,
+ (size_t)sizeof(
+ struct
+ esas2r_flash_img),
+ (dma_addr_t *)&a->
+ firmware.
+ header_buff_phys,
+ GFP_KERNEL);
+
+ if (a->firmware.header_buff == NULL) {
+ esas2r_debug("failed to allocate header buffer!");
+ fi->status = FI_STAT_BUSY;
+ goto free_req;
+ }
+
+ memcpy(a->firmware.header_buff, fi,
+ sizeof(struct esas2r_flash_img));
+ a->save_offset = a->firmware.header_buff;
+ a->fm_api_sgc.get_phys_addr =
+ (PGETPHYSADDR)get_physaddr_fm_api_header;
+ } else {
+ a->save_offset = (u8 *)fi;
+ a->fm_api_sgc.get_phys_addr =
+ (PGETPHYSADDR)get_physaddr_fm_api;
+ }
+
+ rq->comp_cb = complete_fm_api_req;
+ a->fm_api_command_done = 0;
+ a->fm_api_sgc.cur_offset = a->save_offset;
+
+ if (!esas2r_fm_api(a, (struct esas2r_flash_img *)a->save_offset, rq,
+ &a->fm_api_sgc))
+ goto all_done;
+
+ /* Now wait around for it to complete. */
+ while (!a->fm_api_command_done)
+ wait_event_interruptible(a->fm_api_waiter,
+ a->fm_api_command_done);
+all_done:
+ if (fi == &a->firmware.header) {
+ memcpy(fi, a->firmware.header_buff,
+ sizeof(struct esas2r_flash_img));
+
+ dma_free_coherent(&a->pcid->dev,
+ (size_t)sizeof(struct esas2r_flash_img),
+ a->firmware.header_buff,
+ (dma_addr_t)a->firmware.header_buff_phys);
+ }
+free_req:
+ esas2r_free_request(a, (struct esas2r_request *)rq);
+free_sem:
+ up(&a->fm_api_semaphore);
+ return;
+
+}
+
+static void complete_nvr_req(struct esas2r_adapter *a,
+ struct esas2r_request *rq)
+{
+ a->nvram_command_done = 1;
+ wake_up_interruptible(&a->nvram_waiter);
+}
+
+/* Callback for building scatter/gather lists for buffered ioctls */
+static u32 get_physaddr_buffered_ioctl(struct esas2r_sg_context *sgc,
+ u64 *addr)
+{
+ int offset = (u8 *)sgc->cur_offset - esas2r_buffered_ioctl;
+
+ (*addr) = esas2r_buffered_ioctl_addr + offset;
+ return esas2r_buffered_ioctl_size - offset;
+}
+
+static void complete_buffered_ioctl_req(struct esas2r_adapter *a,
+ struct esas2r_request *rq)
+{
+ a->buffered_ioctl_done = 1;
+ wake_up_interruptible(&a->buffered_ioctl_waiter);
+}
+
+static u8 handle_buffered_ioctl(struct esas2r_buffered_ioctl *bi)
+{
+ struct esas2r_adapter *a = bi->a;
+ struct esas2r_request *rq;
+ struct esas2r_sg_context sgc;
+ u8 result = IOCTL_SUCCESS;
+
+ if (down_interruptible(&buffered_ioctl_semaphore))
+ return IOCTL_OUT_OF_RESOURCES;
+
+ /* allocate a buffer or use the existing buffer. */
+ if (esas2r_buffered_ioctl) {
+ if (esas2r_buffered_ioctl_size < bi->length) {
+ /* free the too-small buffer and get a new one */
+ dma_free_coherent(&a->pcid->dev,
+ (size_t)esas2r_buffered_ioctl_size,
+ esas2r_buffered_ioctl,
+ esas2r_buffered_ioctl_addr);
+
+ goto allocate_buffer;
+ }
+ } else {
+allocate_buffer:
+ esas2r_buffered_ioctl_size = bi->length;
+ esas2r_buffered_ioctl_pcid = a->pcid;
+ esas2r_buffered_ioctl = dma_alloc_coherent(&a->pcid->dev,
+ (size_t)
+ esas2r_buffered_ioctl_size,
+ &
+ esas2r_buffered_ioctl_addr,
+ GFP_KERNEL);
+ }
+
+ if (!esas2r_buffered_ioctl) {
+ esas2r_log(ESAS2R_LOG_CRIT,
+ "could not allocate %d bytes of consistent memory "
+ "for a buffered ioctl!",
+ bi->length);
+
+ esas2r_debug("buffered ioctl alloc failure");
+ result = IOCTL_OUT_OF_RESOURCES;
+ goto exit_cleanly;
+ }
+
+ memcpy(esas2r_buffered_ioctl, bi->ioctl, bi->length);
+
+ rq = esas2r_alloc_request(a);
+ if (rq == NULL) {
+ esas2r_log(ESAS2R_LOG_CRIT,
+ "could not allocate an internal request");
+
+ result = IOCTL_OUT_OF_RESOURCES;
+ esas2r_debug("buffered ioctl - no requests");
+ goto exit_cleanly;
+ }
+
+ a->buffered_ioctl_done = 0;
+ rq->comp_cb = complete_buffered_ioctl_req;
+ sgc.cur_offset = esas2r_buffered_ioctl + bi->offset;
+ sgc.get_phys_addr = (PGETPHYSADDR)get_physaddr_buffered_ioctl;
+ sgc.length = esas2r_buffered_ioctl_size;
+
+ if (!(*bi->callback)(a, rq, &sgc, bi->context)) {
+ /* completed immediately, no need to wait */
+ a->buffered_ioctl_done = 0;
+ goto free_andexit_cleanly;
+ }
+
+ /* now wait around for it to complete. */
+ while (!a->buffered_ioctl_done)
+ wait_event_interruptible(a->buffered_ioctl_waiter,
+ a->buffered_ioctl_done);
+
+free_andexit_cleanly:
+ if (result == IOCTL_SUCCESS && bi->done_callback)
+ (*bi->done_callback)(a, rq, bi->done_context);
+
+ esas2r_free_request(a, rq);
+
+exit_cleanly:
+ if (result == IOCTL_SUCCESS)
+ memcpy(bi->ioctl, esas2r_buffered_ioctl, bi->length);
+
+ up(&buffered_ioctl_semaphore);
+ return result;
+}
+
+/* SMP ioctl support */
+static int smp_ioctl_callback(struct esas2r_adapter *a,
+ struct esas2r_request *rq,
+ struct esas2r_sg_context *sgc, void *context)
+{
+ struct atto_ioctl_smp *si =
+ (struct atto_ioctl_smp *)esas2r_buffered_ioctl;
+
+ esas2r_sgc_init(sgc, a, rq, rq->vrq->ioctl.sge);
+ esas2r_build_ioctl_req(a, rq, sgc->length, VDA_IOCTL_SMP);
+
+ if (!esas2r_build_sg_list(a, rq, sgc)) {
+ si->status = ATTO_STS_OUT_OF_RSRC;
+ return false;
+ }
+
+ esas2r_start_request(a, rq);
+ return true;
+}
+
+static u8 handle_smp_ioctl(struct esas2r_adapter *a, struct atto_ioctl_smp *si)
+{
+ struct esas2r_buffered_ioctl bi;
+
+ memset(&bi, 0, sizeof(bi));
+
+ bi.a = a;
+ bi.ioctl = si;
+ bi.length = sizeof(struct atto_ioctl_smp)
+ + le32_to_cpu(si->req_length)
+ + le32_to_cpu(si->rsp_length);
+ bi.offset = 0;
+ bi.callback = smp_ioctl_callback;
+ return handle_buffered_ioctl(&bi);
+}
+
+
+/* CSMI ioctl support */
+static void esas2r_csmi_ioctl_tunnel_comp_cb(struct esas2r_adapter *a,
+ struct esas2r_request *rq)
+{
+ rq->target_id = le16_to_cpu(rq->func_rsp.ioctl_rsp.csmi.target_id);
+ rq->vrq->scsi.flags |= cpu_to_le32(rq->func_rsp.ioctl_rsp.csmi.lun);
+
+ /* Now call the original completion callback. */
+ (*rq->aux_req_cb)(a, rq);
+}
+
+/* Tunnel a CSMI IOCTL to the back end driver for processing. */
+static bool csmi_ioctl_tunnel(struct esas2r_adapter *a,
+ union atto_ioctl_csmi *ci,
+ struct esas2r_request *rq,
+ struct esas2r_sg_context *sgc,
+ u32 ctrl_code,
+ u16 target_id)
+{
+ struct atto_vda_ioctl_req *ioctl = &rq->vrq->ioctl;
+
+ if (test_bit(AF_DEGRADED_MODE, &a->flags))
+ return false;
+
+ esas2r_sgc_init(sgc, a, rq, rq->vrq->ioctl.sge);
+ esas2r_build_ioctl_req(a, rq, sgc->length, VDA_IOCTL_CSMI);
+ ioctl->csmi.ctrl_code = cpu_to_le32(ctrl_code);
+ ioctl->csmi.target_id = cpu_to_le16(target_id);
+ ioctl->csmi.lun = (u8)le32_to_cpu(rq->vrq->scsi.flags);
+
+ /*
+ * Always usurp the completion callback since the interrupt callback
+ * mechanism may be used.
+ */
+ rq->aux_req_cx = ci;
+ rq->aux_req_cb = rq->comp_cb;
+ rq->comp_cb = esas2r_csmi_ioctl_tunnel_comp_cb;
+
+ if (!esas2r_build_sg_list(a, rq, sgc))
+ return false;
+
+ esas2r_start_request(a, rq);
+ return true;
+}
+
+static bool check_lun(struct scsi_lun lun)
+{
+ bool result;
+
+ result = ((lun.scsi_lun[7] == 0) &&
+ (lun.scsi_lun[6] == 0) &&
+ (lun.scsi_lun[5] == 0) &&
+ (lun.scsi_lun[4] == 0) &&
+ (lun.scsi_lun[3] == 0) &&
+ (lun.scsi_lun[2] == 0) &&
+/* Byte 1 is intentionally skipped */
+ (lun.scsi_lun[0] == 0));
+
+ return result;
+}
+
+static int csmi_ioctl_callback(struct esas2r_adapter *a,
+ struct esas2r_request *rq,
+ struct esas2r_sg_context *sgc, void *context)
+{
+ struct atto_csmi *ci = (struct atto_csmi *)context;
+ union atto_ioctl_csmi *ioctl_csmi =
+ (union atto_ioctl_csmi *)esas2r_buffered_ioctl;
+ u8 path = 0;
+ u8 tid = 0;
+ u8 lun = 0;
+ u32 sts = CSMI_STS_SUCCESS;
+ struct esas2r_target *t;
+ unsigned long flags;
+
+ if (ci->control_code == CSMI_CC_GET_DEV_ADDR) {
+ struct atto_csmi_get_dev_addr *gda = &ci->data.dev_addr;
+
+ path = gda->path_id;
+ tid = gda->target_id;
+ lun = gda->lun;
+ } else if (ci->control_code == CSMI_CC_TASK_MGT) {
+ struct atto_csmi_task_mgmt *tm = &ci->data.tsk_mgt;
+
+ path = tm->path_id;
+ tid = tm->target_id;
+ lun = tm->lun;
+ }
+
+ if (path > 0) {
+ rq->func_rsp.ioctl_rsp.csmi.csmi_status = cpu_to_le32(
+ CSMI_STS_INV_PARAM);
+ return false;
+ }
+
+ rq->target_id = tid;
+ rq->vrq->scsi.flags |= cpu_to_le32(lun);
+
+ switch (ci->control_code) {
+ case CSMI_CC_GET_DRVR_INFO:
+ {
+ struct atto_csmi_get_driver_info *gdi = &ioctl_csmi->drvr_info;
+
+ strcpy(gdi->description, esas2r_get_model_name(a));
+ gdi->csmi_major_rev = CSMI_MAJOR_REV;
+ gdi->csmi_minor_rev = CSMI_MINOR_REV;
+ break;
+ }
+
+ case CSMI_CC_GET_CNTLR_CFG:
+ {
+ struct atto_csmi_get_cntlr_cfg *gcc = &ioctl_csmi->cntlr_cfg;
+
+ gcc->base_io_addr = 0;
+ pci_read_config_dword(a->pcid, PCI_BASE_ADDRESS_2,
+ &gcc->base_memaddr_lo);
+ pci_read_config_dword(a->pcid, PCI_BASE_ADDRESS_3,
+ &gcc->base_memaddr_hi);
+ gcc->board_id = MAKEDWORD(a->pcid->subsystem_device,
+ a->pcid->subsystem_vendor);
+ gcc->slot_num = CSMI_SLOT_NUM_UNKNOWN;
+ gcc->cntlr_class = CSMI_CNTLR_CLASS_HBA;
+ gcc->io_bus_type = CSMI_BUS_TYPE_PCI;
+ gcc->pci_addr.bus_num = a->pcid->bus->number;
+ gcc->pci_addr.device_num = PCI_SLOT(a->pcid->devfn);
+ gcc->pci_addr.function_num = PCI_FUNC(a->pcid->devfn);
+
+ memset(gcc->serial_num, 0, sizeof(gcc->serial_num));
+
+ gcc->major_rev = LOBYTE(LOWORD(a->fw_version));
+ gcc->minor_rev = HIBYTE(LOWORD(a->fw_version));
+ gcc->build_rev = LOBYTE(HIWORD(a->fw_version));
+ gcc->release_rev = HIBYTE(HIWORD(a->fw_version));
+ gcc->bios_major_rev = HIBYTE(HIWORD(a->flash_ver));
+ gcc->bios_minor_rev = LOBYTE(HIWORD(a->flash_ver));
+ gcc->bios_build_rev = LOWORD(a->flash_ver);
+
+ if (test_bit(AF2_THUNDERLINK, &a->flags2))
+ gcc->cntlr_flags = CSMI_CNTLRF_SAS_HBA
+ | CSMI_CNTLRF_SATA_HBA;
+ else
+ gcc->cntlr_flags = CSMI_CNTLRF_SAS_RAID
+ | CSMI_CNTLRF_SATA_RAID;
+
+ gcc->rrom_major_rev = 0;
+ gcc->rrom_minor_rev = 0;
+ gcc->rrom_build_rev = 0;
+ gcc->rrom_release_rev = 0;
+ gcc->rrom_biosmajor_rev = 0;
+ gcc->rrom_biosminor_rev = 0;
+ gcc->rrom_biosbuild_rev = 0;
+ gcc->rrom_biosrelease_rev = 0;
+ break;
+ }
+
+ case CSMI_CC_GET_CNTLR_STS:
+ {
+ struct atto_csmi_get_cntlr_sts *gcs = &ioctl_csmi->cntlr_sts;
+
+ if (test_bit(AF_DEGRADED_MODE, &a->flags))
+ gcs->status = CSMI_CNTLR_STS_FAILED;
+ else
+ gcs->status = CSMI_CNTLR_STS_GOOD;
+
+ gcs->offline_reason = CSMI_OFFLINE_NO_REASON;
+ break;
+ }
+
+ case CSMI_CC_FW_DOWNLOAD:
+ case CSMI_CC_GET_RAID_INFO:
+ case CSMI_CC_GET_RAID_CFG:
+
+ sts = CSMI_STS_BAD_CTRL_CODE;
+ break;
+
+ case CSMI_CC_SMP_PASSTHRU:
+ case CSMI_CC_SSP_PASSTHRU:
+ case CSMI_CC_STP_PASSTHRU:
+ case CSMI_CC_GET_PHY_INFO:
+ case CSMI_CC_SET_PHY_INFO:
+ case CSMI_CC_GET_LINK_ERRORS:
+ case CSMI_CC_GET_SATA_SIG:
+ case CSMI_CC_GET_CONN_INFO:
+ case CSMI_CC_PHY_CTRL:
+
+ if (!csmi_ioctl_tunnel(a, ioctl_csmi, rq, sgc,
+ ci->control_code,
+ ESAS2R_TARG_ID_INV)) {
+ sts = CSMI_STS_FAILED;
+ break;
+ }
+
+ return true;
+
+ case CSMI_CC_GET_SCSI_ADDR:
+ {
+ struct atto_csmi_get_scsi_addr *gsa = &ioctl_csmi->scsi_addr;
+
+ struct scsi_lun lun;
+
+ memcpy(&lun, gsa->sas_lun, sizeof(struct scsi_lun));
+
+ if (!check_lun(lun)) {
+ sts = CSMI_STS_NO_SCSI_ADDR;
+ break;
+ }
+
+ /* make sure the device is present */
+ spin_lock_irqsave(&a->mem_lock, flags);
+ t = esas2r_targ_db_find_by_sas_addr(a, (u64 *)gsa->sas_addr);
+ spin_unlock_irqrestore(&a->mem_lock, flags);
+
+ if (t == NULL) {
+ sts = CSMI_STS_NO_SCSI_ADDR;
+ break;
+ }
+
+ gsa->host_index = 0xFF;
+ gsa->lun = gsa->sas_lun[1];
+ rq->target_id = esas2r_targ_get_id(t, a);
+ break;
+ }
+
+ case CSMI_CC_GET_DEV_ADDR:
+ {
+ struct atto_csmi_get_dev_addr *gda = &ioctl_csmi->dev_addr;
+
+ /* make sure the target is present */
+ t = a->targetdb + rq->target_id;
+
+ if (t >= a->targetdb_end
+ || t->target_state != TS_PRESENT
+ || t->sas_addr == 0) {
+ sts = CSMI_STS_NO_DEV_ADDR;
+ break;
+ }
+
+ /* fill in the result */
+ *(u64 *)gda->sas_addr = t->sas_addr;
+ memset(gda->sas_lun, 0, sizeof(gda->sas_lun));
+ gda->sas_lun[1] = (u8)le32_to_cpu(rq->vrq->scsi.flags);
+ break;
+ }
+
+ case CSMI_CC_TASK_MGT:
+
+ /* make sure the target is present */
+ t = a->targetdb + rq->target_id;
+
+ if (t >= a->targetdb_end
+ || t->target_state != TS_PRESENT
+ || !(t->flags & TF_PASS_THRU)) {
+ sts = CSMI_STS_NO_DEV_ADDR;
+ break;
+ }
+
+ if (!csmi_ioctl_tunnel(a, ioctl_csmi, rq, sgc,
+ ci->control_code,
+ t->phys_targ_id)) {
+ sts = CSMI_STS_FAILED;
+ break;
+ }
+
+ return true;
+
+ default:
+
+ sts = CSMI_STS_BAD_CTRL_CODE;
+ break;
+ }
+
+ rq->func_rsp.ioctl_rsp.csmi.csmi_status = cpu_to_le32(sts);
+
+ return false;
+}
+
+
+static void csmi_ioctl_done_callback(struct esas2r_adapter *a,
+ struct esas2r_request *rq, void *context)
+{
+ struct atto_csmi *ci = (struct atto_csmi *)context;
+ union atto_ioctl_csmi *ioctl_csmi =
+ (union atto_ioctl_csmi *)esas2r_buffered_ioctl;
+
+ switch (ci->control_code) {
+ case CSMI_CC_GET_DRVR_INFO:
+ {
+ struct atto_csmi_get_driver_info *gdi =
+ &ioctl_csmi->drvr_info;
+
+ strcpy(gdi->name, ESAS2R_VERSION_STR);
+
+ gdi->major_rev = ESAS2R_MAJOR_REV;
+ gdi->minor_rev = ESAS2R_MINOR_REV;
+ gdi->build_rev = 0;
+ gdi->release_rev = 0;
+ break;
+ }
+
+ case CSMI_CC_GET_SCSI_ADDR:
+ {
+ struct atto_csmi_get_scsi_addr *gsa = &ioctl_csmi->scsi_addr;
+
+ if (le32_to_cpu(rq->func_rsp.ioctl_rsp.csmi.csmi_status) ==
+ CSMI_STS_SUCCESS) {
+ gsa->target_id = rq->target_id;
+ gsa->path_id = 0;
+ }
+
+ break;
+ }
+ }
+
+ ci->status = le32_to_cpu(rq->func_rsp.ioctl_rsp.csmi.csmi_status);
+}
+
+
+static u8 handle_csmi_ioctl(struct esas2r_adapter *a, struct atto_csmi *ci)
+{
+ struct esas2r_buffered_ioctl bi;
+
+ memset(&bi, 0, sizeof(bi));
+
+ bi.a = a;
+ bi.ioctl = &ci->data;
+ bi.length = sizeof(union atto_ioctl_csmi);
+ bi.offset = 0;
+ bi.callback = csmi_ioctl_callback;
+ bi.context = ci;
+ bi.done_callback = csmi_ioctl_done_callback;
+ bi.done_context = ci;
+
+ return handle_buffered_ioctl(&bi);
+}
+
+/* ATTO HBA ioctl support */
+
+/* Tunnel an ATTO HBA IOCTL to the back end driver for processing. */
+static bool hba_ioctl_tunnel(struct esas2r_adapter *a,
+ struct atto_ioctl *hi,
+ struct esas2r_request *rq,
+ struct esas2r_sg_context *sgc)
+{
+ esas2r_sgc_init(sgc, a, rq, rq->vrq->ioctl.sge);
+
+ esas2r_build_ioctl_req(a, rq, sgc->length, VDA_IOCTL_HBA);
+
+ if (!esas2r_build_sg_list(a, rq, sgc)) {
+ hi->status = ATTO_STS_OUT_OF_RSRC;
+
+ return false;
+ }
+
+ esas2r_start_request(a, rq);
+
+ return true;
+}
+
+static void scsi_passthru_comp_cb(struct esas2r_adapter *a,
+ struct esas2r_request *rq)
+{
+ struct atto_ioctl *hi = (struct atto_ioctl *)rq->aux_req_cx;
+ struct atto_hba_scsi_pass_thru *spt = &hi->data.scsi_pass_thru;
+ u8 sts = ATTO_SPT_RS_FAILED;
+
+ spt->scsi_status = rq->func_rsp.scsi_rsp.scsi_stat;
+ spt->sense_length = rq->sense_len;
+ spt->residual_length =
+ le32_to_cpu(rq->func_rsp.scsi_rsp.residual_length);
+
+ switch (rq->req_stat) {
+ case RS_SUCCESS:
+ case RS_SCSI_ERROR:
+ sts = ATTO_SPT_RS_SUCCESS;
+ break;
+ case RS_UNDERRUN:
+ sts = ATTO_SPT_RS_UNDERRUN;
+ break;
+ case RS_OVERRUN:
+ sts = ATTO_SPT_RS_OVERRUN;
+ break;
+ case RS_SEL:
+ case RS_SEL2:
+ sts = ATTO_SPT_RS_NO_DEVICE;
+ break;
+ case RS_NO_LUN:
+ sts = ATTO_SPT_RS_NO_LUN;
+ break;
+ case RS_TIMEOUT:
+ sts = ATTO_SPT_RS_TIMEOUT;
+ break;
+ case RS_DEGRADED:
+ sts = ATTO_SPT_RS_DEGRADED;
+ break;
+ case RS_BUSY:
+ sts = ATTO_SPT_RS_BUSY;
+ break;
+ case RS_ABORTED:
+ sts = ATTO_SPT_RS_ABORTED;
+ break;
+ case RS_RESET:
+ sts = ATTO_SPT_RS_BUS_RESET;
+ break;
+ }
+
+ spt->req_status = sts;
+
+ /* Update the target ID to the next one present. */
+ spt->target_id =
+ esas2r_targ_db_find_next_present(a, (u16)spt->target_id);
+
+ /* Done, call the completion callback. */
+ (*rq->aux_req_cb)(a, rq);
+}
+
+static int hba_ioctl_callback(struct esas2r_adapter *a,
+ struct esas2r_request *rq,
+ struct esas2r_sg_context *sgc,
+ void *context)
+{
+ struct atto_ioctl *hi = (struct atto_ioctl *)esas2r_buffered_ioctl;
+
+ hi->status = ATTO_STS_SUCCESS;
+
+ switch (hi->function) {
+ case ATTO_FUNC_GET_ADAP_INFO:
+ {
+ u8 *class_code = (u8 *)&a->pcid->class;
+
+ struct atto_hba_get_adapter_info *gai =
+ &hi->data.get_adap_info;
+ int pcie_cap_reg;
+
+ if (hi->flags & HBAF_TUNNEL) {
+ hi->status = ATTO_STS_UNSUPPORTED;
+ break;
+ }
+
+ if (hi->version > ATTO_VER_GET_ADAP_INFO0) {
+ hi->status = ATTO_STS_INV_VERSION;
+ hi->version = ATTO_VER_GET_ADAP_INFO0;
+ break;
+ }
+
+ memset(gai, 0, sizeof(*gai));
+
+ gai->pci.vendor_id = a->pcid->vendor;
+ gai->pci.device_id = a->pcid->device;
+ gai->pci.ss_vendor_id = a->pcid->subsystem_vendor;
+ gai->pci.ss_device_id = a->pcid->subsystem_device;
+ gai->pci.class_code[0] = class_code[0];
+ gai->pci.class_code[1] = class_code[1];
+ gai->pci.class_code[2] = class_code[2];
+ gai->pci.rev_id = a->pcid->revision;
+ gai->pci.bus_num = a->pcid->bus->number;
+ gai->pci.dev_num = PCI_SLOT(a->pcid->devfn);
+ gai->pci.func_num = PCI_FUNC(a->pcid->devfn);
+
+ pcie_cap_reg = pci_find_capability(a->pcid, PCI_CAP_ID_EXP);
+ if (pcie_cap_reg) {
+ u16 stat;
+ u32 caps;
+
+ pci_read_config_word(a->pcid,
+ pcie_cap_reg + PCI_EXP_LNKSTA,
+ &stat);
+ pci_read_config_dword(a->pcid,
+ pcie_cap_reg + PCI_EXP_LNKCAP,
+ &caps);
+
+ gai->pci.link_speed_curr =
+ (u8)(stat & PCI_EXP_LNKSTA_CLS);
+ gai->pci.link_speed_max =
+ (u8)(caps & PCI_EXP_LNKCAP_SLS);
+ gai->pci.link_width_curr =
+ (u8)((stat & PCI_EXP_LNKSTA_NLW)
+ >> PCI_EXP_LNKSTA_NLW_SHIFT);
+ gai->pci.link_width_max =
+ (u8)((caps & PCI_EXP_LNKCAP_MLW)
+ >> 4);
+ }
+
+ gai->pci.msi_vector_cnt = 1;
+
+ if (a->pcid->msix_enabled)
+ gai->pci.interrupt_mode = ATTO_GAI_PCIIM_MSIX;
+ else if (a->pcid->msi_enabled)
+ gai->pci.interrupt_mode = ATTO_GAI_PCIIM_MSI;
+ else
+ gai->pci.interrupt_mode = ATTO_GAI_PCIIM_LEGACY;
+
+ gai->adap_type = ATTO_GAI_AT_ESASRAID2;
+
+ if (test_bit(AF2_THUNDERLINK, &a->flags2))
+ gai->adap_type = ATTO_GAI_AT_TLSASHBA;
+
+ if (test_bit(AF_DEGRADED_MODE, &a->flags))
+ gai->adap_flags |= ATTO_GAI_AF_DEGRADED;
+
+ gai->adap_flags |= ATTO_GAI_AF_SPT_SUPP |
+ ATTO_GAI_AF_DEVADDR_SUPP;
+
+ if (a->pcid->subsystem_device == ATTO_ESAS_R60F
+ || a->pcid->subsystem_device == ATTO_ESAS_R608
+ || a->pcid->subsystem_device == ATTO_ESAS_R644
+ || a->pcid->subsystem_device == ATTO_TSSC_3808E)
+ gai->adap_flags |= ATTO_GAI_AF_VIRT_SES;
+
+ gai->num_ports = ESAS2R_NUM_PHYS;
+ gai->num_phys = ESAS2R_NUM_PHYS;
+
+ strcpy(gai->firmware_rev, a->fw_rev);
+ strcpy(gai->flash_rev, a->flash_rev);
+ strcpy(gai->model_name_short, esas2r_get_model_name_short(a));
+ strcpy(gai->model_name, esas2r_get_model_name(a));
+
+ gai->num_targets = ESAS2R_MAX_TARGETS;
+
+ gai->num_busses = 1;
+ gai->num_targsper_bus = gai->num_targets;
+ gai->num_lunsper_targ = 256;
+
+ if (a->pcid->subsystem_device == ATTO_ESAS_R6F0
+ || a->pcid->subsystem_device == ATTO_ESAS_R60F)
+ gai->num_connectors = 4;
+ else
+ gai->num_connectors = 2;
+
+ gai->adap_flags2 |= ATTO_GAI_AF2_ADAP_CTRL_SUPP;
+
+ gai->num_targets_backend = a->num_targets_backend;
+
+ gai->tunnel_flags = a->ioctl_tunnel
+ & (ATTO_GAI_TF_MEM_RW
+ | ATTO_GAI_TF_TRACE
+ | ATTO_GAI_TF_SCSI_PASS_THRU
+ | ATTO_GAI_TF_GET_DEV_ADDR
+ | ATTO_GAI_TF_PHY_CTRL
+ | ATTO_GAI_TF_CONN_CTRL
+ | ATTO_GAI_TF_GET_DEV_INFO);
+ break;
+ }
+
+ case ATTO_FUNC_GET_ADAP_ADDR:
+ {
+ struct atto_hba_get_adapter_address *gaa =
+ &hi->data.get_adap_addr;
+
+ if (hi->flags & HBAF_TUNNEL) {
+ hi->status = ATTO_STS_UNSUPPORTED;
+ break;
+ }
+
+ if (hi->version > ATTO_VER_GET_ADAP_ADDR0) {
+ hi->status = ATTO_STS_INV_VERSION;
+ hi->version = ATTO_VER_GET_ADAP_ADDR0;
+ } else if (gaa->addr_type == ATTO_GAA_AT_PORT
+ || gaa->addr_type == ATTO_GAA_AT_NODE) {
+ if (gaa->addr_type == ATTO_GAA_AT_PORT
+ && gaa->port_id >= ESAS2R_NUM_PHYS) {
+ hi->status = ATTO_STS_NOT_APPL;
+ } else {
+ memcpy((u64 *)gaa->address,
+ &a->nvram->sas_addr[0], sizeof(u64));
+ gaa->addr_len = sizeof(u64);
+ }
+ } else {
+ hi->status = ATTO_STS_INV_PARAM;
+ }
+
+ break;
+ }
+
+ case ATTO_FUNC_MEM_RW:
+ {
+ if (hi->flags & HBAF_TUNNEL) {
+ if (hba_ioctl_tunnel(a, hi, rq, sgc))
+ return true;
+
+ break;
+ }
+
+ hi->status = ATTO_STS_UNSUPPORTED;
+
+ break;
+ }
+
+ case ATTO_FUNC_TRACE:
+ {
+ struct atto_hba_trace *trc = &hi->data.trace;
+
+ if (hi->flags & HBAF_TUNNEL) {
+ if (hba_ioctl_tunnel(a, hi, rq, sgc))
+ return true;
+
+ break;
+ }
+
+ if (hi->version > ATTO_VER_TRACE1) {
+ hi->status = ATTO_STS_INV_VERSION;
+ hi->version = ATTO_VER_TRACE1;
+ break;
+ }
+
+ if (trc->trace_type == ATTO_TRC_TT_FWCOREDUMP
+ && hi->version >= ATTO_VER_TRACE1) {
+ if (trc->trace_func == ATTO_TRC_TF_UPLOAD) {
+ u32 len = hi->data_length;
+ u32 offset = trc->current_offset;
+ u32 total_len = ESAS2R_FWCOREDUMP_SZ;
+
+ /* Size is zero if a core dump isn't present */
+ if (!test_bit(AF2_COREDUMP_SAVED, &a->flags2))
+ total_len = 0;
+
+ if (len > total_len)
+ len = total_len;
+
+ if (offset >= total_len
+ || offset + len > total_len
+ || len == 0) {
+ hi->status = ATTO_STS_INV_PARAM;
+ break;
+ }
+
+ memcpy(trc + 1,
+ a->fw_coredump_buff + offset,
+ len);
+
+ hi->data_length = len;
+ } else if (trc->trace_func == ATTO_TRC_TF_RESET) {
+ memset(a->fw_coredump_buff, 0,
+ ESAS2R_FWCOREDUMP_SZ);
+
+ clear_bit(AF2_COREDUMP_SAVED, &a->flags2);
+ } else if (trc->trace_func != ATTO_TRC_TF_GET_INFO) {
+ hi->status = ATTO_STS_UNSUPPORTED;
+ break;
+ }
+
+ /* Always return all the info we can. */
+ trc->trace_mask = 0;
+ trc->current_offset = 0;
+ trc->total_length = ESAS2R_FWCOREDUMP_SZ;
+
+ /* Return zero length buffer if core dump not present */
+ if (!test_bit(AF2_COREDUMP_SAVED, &a->flags2))
+ trc->total_length = 0;
+ } else {
+ hi->status = ATTO_STS_UNSUPPORTED;
+ }
+
+ break;
+ }
+
+ case ATTO_FUNC_SCSI_PASS_THRU:
+ {
+ struct atto_hba_scsi_pass_thru *spt = &hi->data.scsi_pass_thru;
+ struct scsi_lun lun;
+
+ memcpy(&lun, spt->lun, sizeof(struct scsi_lun));
+
+ if (hi->flags & HBAF_TUNNEL) {
+ if (hba_ioctl_tunnel(a, hi, rq, sgc))
+ return true;
+
+ break;
+ }
+
+ if (hi->version > ATTO_VER_SCSI_PASS_THRU0) {
+ hi->status = ATTO_STS_INV_VERSION;
+ hi->version = ATTO_VER_SCSI_PASS_THRU0;
+ break;
+ }
+
+ if (spt->target_id >= ESAS2R_MAX_TARGETS || !check_lun(lun)) {
+ hi->status = ATTO_STS_INV_PARAM;
+ break;
+ }
+
+ esas2r_sgc_init(sgc, a, rq, NULL);
+
+ sgc->length = hi->data_length;
+ sgc->cur_offset += offsetof(struct atto_ioctl, data.byte)
+ + sizeof(struct atto_hba_scsi_pass_thru);
+
+ /* Finish request initialization */
+ rq->target_id = (u16)spt->target_id;
+ rq->vrq->scsi.flags |= cpu_to_le32(spt->lun[1]);
+ memcpy(rq->vrq->scsi.cdb, spt->cdb, 16);
+ rq->vrq->scsi.length = cpu_to_le32(hi->data_length);
+ rq->sense_len = spt->sense_length;
+ rq->sense_buf = (u8 *)spt->sense_data;
+ /* NOTE: we ignore spt->timeout */
+
+ /*
+ * always usurp the completion callback since the interrupt
+ * callback mechanism may be used.
+ */
+
+ rq->aux_req_cx = hi;
+ rq->aux_req_cb = rq->comp_cb;
+ rq->comp_cb = scsi_passthru_comp_cb;
+
+ if (spt->flags & ATTO_SPTF_DATA_IN) {
+ rq->vrq->scsi.flags |= cpu_to_le32(FCP_CMND_RDD);
+ } else if (spt->flags & ATTO_SPTF_DATA_OUT) {
+ rq->vrq->scsi.flags |= cpu_to_le32(FCP_CMND_WRD);
+ } else {
+ if (sgc->length) {
+ hi->status = ATTO_STS_INV_PARAM;
+ break;
+ }
+ }
+
+ if (spt->flags & ATTO_SPTF_ORDERED_Q)
+ rq->vrq->scsi.flags |=
+ cpu_to_le32(FCP_CMND_TA_ORDRD_Q);
+ else if (spt->flags & ATTO_SPTF_HEAD_OF_Q)
+ rq->vrq->scsi.flags |= cpu_to_le32(FCP_CMND_TA_HEAD_Q);
+
+
+ if (!esas2r_build_sg_list(a, rq, sgc)) {
+ hi->status = ATTO_STS_OUT_OF_RSRC;
+ break;
+ }
+
+ esas2r_start_request(a, rq);
+
+ return true;
+ }
+
+ case ATTO_FUNC_GET_DEV_ADDR:
+ {
+ struct atto_hba_get_device_address *gda =
+ &hi->data.get_dev_addr;
+ struct esas2r_target *t;
+
+ if (hi->flags & HBAF_TUNNEL) {
+ if (hba_ioctl_tunnel(a, hi, rq, sgc))
+ return true;
+
+ break;
+ }
+
+ if (hi->version > ATTO_VER_GET_DEV_ADDR0) {
+ hi->status = ATTO_STS_INV_VERSION;
+ hi->version = ATTO_VER_GET_DEV_ADDR0;
+ break;
+ }
+
+ if (gda->target_id >= ESAS2R_MAX_TARGETS) {
+ hi->status = ATTO_STS_INV_PARAM;
+ break;
+ }
+
+ t = a->targetdb + (u16)gda->target_id;
+
+ if (t->target_state != TS_PRESENT) {
+ hi->status = ATTO_STS_FAILED;
+ } else if (gda->addr_type == ATTO_GDA_AT_PORT) {
+ if (t->sas_addr == 0) {
+ hi->status = ATTO_STS_UNSUPPORTED;
+ } else {
+ *(u64 *)gda->address = t->sas_addr;
+
+ gda->addr_len = sizeof(u64);
+ }
+ } else if (gda->addr_type == ATTO_GDA_AT_NODE) {
+ hi->status = ATTO_STS_NOT_APPL;
+ } else {
+ hi->status = ATTO_STS_INV_PARAM;
+ }
+
+ /* update the target ID to the next one present. */
+
+ gda->target_id =
+ esas2r_targ_db_find_next_present(a,
+ (u16)gda->target_id);
+ break;
+ }
+
+ case ATTO_FUNC_PHY_CTRL:
+ case ATTO_FUNC_CONN_CTRL:
+ {
+ if (hba_ioctl_tunnel(a, hi, rq, sgc))
+ return true;
+
+ break;
+ }
+
+ case ATTO_FUNC_ADAP_CTRL:
+ {
+ struct atto_hba_adap_ctrl *ac = &hi->data.adap_ctrl;
+
+ if (hi->flags & HBAF_TUNNEL) {
+ hi->status = ATTO_STS_UNSUPPORTED;
+ break;
+ }
+
+ if (hi->version > ATTO_VER_ADAP_CTRL0) {
+ hi->status = ATTO_STS_INV_VERSION;
+ hi->version = ATTO_VER_ADAP_CTRL0;
+ break;
+ }
+
+ if (ac->adap_func == ATTO_AC_AF_HARD_RST) {
+ esas2r_reset_adapter(a);
+ } else if (ac->adap_func != ATTO_AC_AF_GET_STATE) {
+ hi->status = ATTO_STS_UNSUPPORTED;
+ break;
+ }
+
+ if (test_bit(AF_CHPRST_NEEDED, &a->flags))
+ ac->adap_state = ATTO_AC_AS_RST_SCHED;
+ else if (test_bit(AF_CHPRST_PENDING, &a->flags))
+ ac->adap_state = ATTO_AC_AS_RST_IN_PROG;
+ else if (test_bit(AF_DISC_PENDING, &a->flags))
+ ac->adap_state = ATTO_AC_AS_RST_DISC;
+ else if (test_bit(AF_DISABLED, &a->flags))
+ ac->adap_state = ATTO_AC_AS_DISABLED;
+ else if (test_bit(AF_DEGRADED_MODE, &a->flags))
+ ac->adap_state = ATTO_AC_AS_DEGRADED;
+ else
+ ac->adap_state = ATTO_AC_AS_OK;
+
+ break;
+ }
+
+ case ATTO_FUNC_GET_DEV_INFO:
+ {
+ struct atto_hba_get_device_info *gdi = &hi->data.get_dev_info;
+ struct esas2r_target *t;
+
+ if (hi->flags & HBAF_TUNNEL) {
+ if (hba_ioctl_tunnel(a, hi, rq, sgc))
+ return true;
+
+ break;
+ }
+
+ if (hi->version > ATTO_VER_GET_DEV_INFO0) {
+ hi->status = ATTO_STS_INV_VERSION;
+ hi->version = ATTO_VER_GET_DEV_INFO0;
+ break;
+ }
+
+ if (gdi->target_id >= ESAS2R_MAX_TARGETS) {
+ hi->status = ATTO_STS_INV_PARAM;
+ break;
+ }
+
+ t = a->targetdb + (u16)gdi->target_id;
+
+ /* update the target ID to the next one present. */
+
+ gdi->target_id =
+ esas2r_targ_db_find_next_present(a,
+ (u16)gdi->target_id);
+
+ if (t->target_state != TS_PRESENT) {
+ hi->status = ATTO_STS_FAILED;
+ break;
+ }
+
+ hi->status = ATTO_STS_UNSUPPORTED;
+ break;
+ }
+
+ default:
+
+ hi->status = ATTO_STS_INV_FUNC;
+ break;
+ }
+
+ return false;
+}
+
+static void hba_ioctl_done_callback(struct esas2r_adapter *a,
+ struct esas2r_request *rq, void *context)
+{
+ struct atto_ioctl *ioctl_hba =
+ (struct atto_ioctl *)esas2r_buffered_ioctl;
+
+ esas2r_debug("hba_ioctl_done_callback %d", a->index);
+
+ if (ioctl_hba->function == ATTO_FUNC_GET_ADAP_INFO) {
+ struct atto_hba_get_adapter_info *gai =
+ &ioctl_hba->data.get_adap_info;
+
+ esas2r_debug("ATTO_FUNC_GET_ADAP_INFO");
+
+ gai->drvr_rev_major = ESAS2R_MAJOR_REV;
+ gai->drvr_rev_minor = ESAS2R_MINOR_REV;
+
+ strcpy(gai->drvr_rev_ascii, ESAS2R_VERSION_STR);
+ strcpy(gai->drvr_name, ESAS2R_DRVR_NAME);
+
+ gai->num_busses = 1;
+ gai->num_targsper_bus = ESAS2R_MAX_ID + 1;
+ gai->num_lunsper_targ = 1;
+ }
+}
+
+u8 handle_hba_ioctl(struct esas2r_adapter *a,
+ struct atto_ioctl *ioctl_hba)
+{
+ struct esas2r_buffered_ioctl bi;
+
+ memset(&bi, 0, sizeof(bi));
+
+ bi.a = a;
+ bi.ioctl = ioctl_hba;
+ bi.length = sizeof(struct atto_ioctl) + ioctl_hba->data_length;
+ bi.callback = hba_ioctl_callback;
+ bi.context = NULL;
+ bi.done_callback = hba_ioctl_done_callback;
+ bi.done_context = NULL;
+ bi.offset = 0;
+
+ return handle_buffered_ioctl(&bi);
+}
+
+
+int esas2r_write_params(struct esas2r_adapter *a, struct esas2r_request *rq,
+ struct esas2r_sas_nvram *data)
+{
+ int result = 0;
+
+ a->nvram_command_done = 0;
+ rq->comp_cb = complete_nvr_req;
+
+ if (esas2r_nvram_write(a, rq, data)) {
+ /* now wait around for it to complete. */
+ while (!a->nvram_command_done)
+ wait_event_interruptible(a->nvram_waiter,
+ a->nvram_command_done);
+ ;
+
+ /* done, check the status. */
+ if (rq->req_stat == RS_SUCCESS)
+ result = 1;
+ }
+ return result;
+}
+
+
+/* This function only cares about ATTO-specific ioctls (atto_express_ioctl) */
+int esas2r_ioctl_handler(void *hostdata, int cmd, void __user *arg)
+{
+ struct atto_express_ioctl *ioctl = NULL;
+ struct esas2r_adapter *a;
+ struct esas2r_request *rq;
+ u16 code;
+ int err;
+
+ esas2r_log(ESAS2R_LOG_DEBG, "ioctl (%p, %x, %p)", hostdata, cmd, arg);
+
+ if ((arg == NULL)
+ || (cmd < EXPRESS_IOCTL_MIN)
+ || (cmd > EXPRESS_IOCTL_MAX))
+ return -ENOTSUPP;
+
+ if (!access_ok(VERIFY_WRITE, arg, sizeof(struct atto_express_ioctl))) {
+ esas2r_log(ESAS2R_LOG_WARN,
+ "ioctl_handler access_ok failed for cmd %d, "
+ "address %p", cmd,
+ arg);
+ return -EFAULT;
+ }
+
+ /* allocate a kernel memory buffer for the IOCTL data */
+ ioctl = kzalloc(sizeof(struct atto_express_ioctl), GFP_KERNEL);
+ if (ioctl == NULL) {
+ esas2r_log(ESAS2R_LOG_WARN,
+ "ioctl_handler kzalloc failed for %d bytes",
+ sizeof(struct atto_express_ioctl));
+ return -ENOMEM;
+ }
+
+ err = __copy_from_user(ioctl, arg, sizeof(struct atto_express_ioctl));
+ if (err != 0) {
+ esas2r_log(ESAS2R_LOG_WARN,
+ "copy_from_user didn't copy everything (err %d, cmd %d)",
+ err,
+ cmd);
+ kfree(ioctl);
+
+ return -EFAULT;
+ }
+
+ /* verify the signature */
+
+ if (memcmp(ioctl->header.signature,
+ EXPRESS_IOCTL_SIGNATURE,
+ EXPRESS_IOCTL_SIGNATURE_SIZE) != 0) {
+ esas2r_log(ESAS2R_LOG_WARN, "invalid signature");
+ kfree(ioctl);
+
+ return -ENOTSUPP;
+ }
+
+ /* assume success */
+
+ ioctl->header.return_code = IOCTL_SUCCESS;
+ err = 0;
+
+ /*
+ * handle EXPRESS_IOCTL_GET_CHANNELS
+ * without paying attention to channel
+ */
+
+ if (cmd == EXPRESS_IOCTL_GET_CHANNELS) {
+ int i = 0, k = 0;
+
+ ioctl->data.chanlist.num_channels = 0;
+
+ while (i < MAX_ADAPTERS) {
+ if (esas2r_adapters[i]) {
+ ioctl->data.chanlist.num_channels++;
+ ioctl->data.chanlist.channel[k] = i;
+ k++;
+ }
+ i++;
+ }
+
+ goto ioctl_done;
+ }
+
+ /* get the channel */
+
+ if (ioctl->header.channel == 0xFF) {
+ a = (struct esas2r_adapter *)hostdata;
+ } else {
+ a = esas2r_adapters[ioctl->header.channel];
+ if (ioctl->header.channel >= MAX_ADAPTERS || (a == NULL)) {
+ ioctl->header.return_code = IOCTL_BAD_CHANNEL;
+ esas2r_log(ESAS2R_LOG_WARN, "bad channel value");
+ kfree(ioctl);
+
+ return -ENOTSUPP;
+ }
+ }
+
+ switch (cmd) {
+ case EXPRESS_IOCTL_RW_FIRMWARE:
+
+ if (ioctl->data.fwrw.img_type == FW_IMG_FM_API) {
+ err = esas2r_write_fw(a,
+ (char *)ioctl->data.fwrw.image,
+ 0,
+ sizeof(struct
+ atto_express_ioctl));
+
+ if (err >= 0) {
+ err = esas2r_read_fw(a,
+ (char *)ioctl->data.fwrw.
+ image,
+ 0,
+ sizeof(struct
+ atto_express_ioctl));
+ }
+ } else if (ioctl->data.fwrw.img_type == FW_IMG_FS_API) {
+ err = esas2r_write_fs(a,
+ (char *)ioctl->data.fwrw.image,
+ 0,
+ sizeof(struct
+ atto_express_ioctl));
+
+ if (err >= 0) {
+ err = esas2r_read_fs(a,
+ (char *)ioctl->data.fwrw.
+ image,
+ 0,
+ sizeof(struct
+ atto_express_ioctl));
+ }
+ } else {
+ ioctl->header.return_code = IOCTL_BAD_FLASH_IMGTYPE;
+ }
+
+ break;
+
+ case EXPRESS_IOCTL_READ_PARAMS:
+
+ memcpy(ioctl->data.prw.data_buffer, a->nvram,
+ sizeof(struct esas2r_sas_nvram));
+ ioctl->data.prw.code = 1;
+ break;
+
+ case EXPRESS_IOCTL_WRITE_PARAMS:
+
+ rq = esas2r_alloc_request(a);
+ if (rq == NULL) {
+ kfree(ioctl);
+ esas2r_log(ESAS2R_LOG_WARN,
+ "could not allocate an internal request");
+ return -ENOMEM;
+ }
+
+ code = esas2r_write_params(a, rq,
+ (struct esas2r_sas_nvram *)ioctl->data.prw.data_buffer);
+ ioctl->data.prw.code = code;
+
+ esas2r_free_request(a, rq);
+
+ break;
+
+ case EXPRESS_IOCTL_DEFAULT_PARAMS:
+
+ esas2r_nvram_get_defaults(a,
+ (struct esas2r_sas_nvram *)ioctl->data.prw.data_buffer);
+ ioctl->data.prw.code = 1;
+ break;
+
+ case EXPRESS_IOCTL_CHAN_INFO:
+
+ ioctl->data.chaninfo.major_rev = ESAS2R_MAJOR_REV;
+ ioctl->data.chaninfo.minor_rev = ESAS2R_MINOR_REV;
+ ioctl->data.chaninfo.IRQ = a->pcid->irq;
+ ioctl->data.chaninfo.device_id = a->pcid->device;
+ ioctl->data.chaninfo.vendor_id = a->pcid->vendor;
+ ioctl->data.chaninfo.ven_dev_id = a->pcid->subsystem_device;
+ ioctl->data.chaninfo.revision_id = a->pcid->revision;
+ ioctl->data.chaninfo.pci_bus = a->pcid->bus->number;
+ ioctl->data.chaninfo.pci_dev_func = a->pcid->devfn;
+ ioctl->data.chaninfo.core_rev = 0;
+ ioctl->data.chaninfo.host_no = a->host->host_no;
+ ioctl->data.chaninfo.hbaapi_rev = 0;
+ break;
+
+ case EXPRESS_IOCTL_SMP:
+ ioctl->header.return_code = handle_smp_ioctl(a,
+ &ioctl->data.
+ ioctl_smp);
+ break;
+
+ case EXPRESS_CSMI:
+ ioctl->header.return_code =
+ handle_csmi_ioctl(a, &ioctl->data.csmi);
+ break;
+
+ case EXPRESS_IOCTL_HBA:
+ ioctl->header.return_code = handle_hba_ioctl(a,
+ &ioctl->data.
+ ioctl_hba);
+ break;
+
+ case EXPRESS_IOCTL_VDA:
+ err = esas2r_write_vda(a,
+ (char *)&ioctl->data.ioctl_vda,
+ 0,
+ sizeof(struct atto_ioctl_vda) +
+ ioctl->data.ioctl_vda.data_length);
+
+ if (err >= 0) {
+ err = esas2r_read_vda(a,
+ (char *)&ioctl->data.ioctl_vda,
+ 0,
+ sizeof(struct atto_ioctl_vda) +
+ ioctl->data.ioctl_vda.data_length);
+ }
+
+
+
+
+ break;
+
+ case EXPRESS_IOCTL_GET_MOD_INFO:
+
+ ioctl->data.modinfo.adapter = a;
+ ioctl->data.modinfo.pci_dev = a->pcid;
+ ioctl->data.modinfo.scsi_host = a->host;
+ ioctl->data.modinfo.host_no = a->host->host_no;
+
+ break;
+
+ default:
+ esas2r_debug("esas2r_ioctl invalid cmd %p!", cmd);
+ ioctl->header.return_code = IOCTL_ERR_INVCMD;
+ }
+
+ioctl_done:
+
+ if (err < 0) {
+ esas2r_log(ESAS2R_LOG_WARN, "err %d on ioctl cmd %d", err,
+ cmd);
+
+ switch (err) {
+ case -ENOMEM:
+ case -EBUSY:
+ ioctl->header.return_code = IOCTL_OUT_OF_RESOURCES;
+ break;
+
+ case -ENOSYS:
+ case -EINVAL:
+ ioctl->header.return_code = IOCTL_INVALID_PARAM;
+ break;
+
+ default:
+ ioctl->header.return_code = IOCTL_GENERAL_ERROR;
+ break;
+ }
+
+ }
+
+ /* Always copy the buffer back, if only to pick up the status */
+ err = __copy_to_user(arg, ioctl, sizeof(struct atto_express_ioctl));
+ if (err != 0) {
+ esas2r_log(ESAS2R_LOG_WARN,
+ "ioctl_handler copy_to_user didn't copy "
+ "everything (err %d, cmd %d)", err,
+ cmd);
+ kfree(ioctl);
+
+ return -EFAULT;
+ }
+
+ kfree(ioctl);
+
+ return 0;
+}
+
+int esas2r_ioctl(struct scsi_device *sd, int cmd, void __user *arg)
+{
+ return esas2r_ioctl_handler(sd->host->hostdata, cmd, arg);
+}
+
+static void free_fw_buffers(struct esas2r_adapter *a)
+{
+ if (a->firmware.data) {
+ dma_free_coherent(&a->pcid->dev,
+ (size_t)a->firmware.orig_len,
+ a->firmware.data,
+ (dma_addr_t)a->firmware.phys);
+
+ a->firmware.data = NULL;
+ }
+}
+
+static int allocate_fw_buffers(struct esas2r_adapter *a, u32 length)
+{
+ free_fw_buffers(a);
+
+ a->firmware.orig_len = length;
+
+ a->firmware.data = (u8 *)dma_alloc_coherent(&a->pcid->dev,
+ (size_t)length,
+ (dma_addr_t *)&a->firmware.
+ phys,
+ GFP_KERNEL);
+
+ if (!a->firmware.data) {
+ esas2r_debug("buffer alloc failed!");
+ return 0;
+ }
+
+ return 1;
+}
+
+/* Handle a call to read firmware. */
+int esas2r_read_fw(struct esas2r_adapter *a, char *buf, long off, int count)
+{
+ esas2r_trace_enter();
+ /* if the cached header is a status, simply copy it over and return. */
+ if (a->firmware.state == FW_STATUS_ST) {
+ int size = min_t(int, count, sizeof(a->firmware.header));
+ esas2r_trace_exit();
+ memcpy(buf, &a->firmware.header, size);
+ esas2r_debug("esas2r_read_fw: STATUS size %d", size);
+ return size;
+ }
+
+ /*
+ * if the cached header is a command, do it if at
+ * offset 0, otherwise copy the pieces.
+ */
+
+ if (a->firmware.state == FW_COMMAND_ST) {
+ u32 length = a->firmware.header.length;
+ esas2r_trace_exit();
+
+ esas2r_debug("esas2r_read_fw: COMMAND length %d off %d",
+ length,
+ off);
+
+ if (off == 0) {
+ if (a->firmware.header.action == FI_ACT_UP) {
+ if (!allocate_fw_buffers(a, length))
+ return -ENOMEM;
+
+
+ /* copy header over */
+
+ memcpy(a->firmware.data,
+ &a->firmware.header,
+ sizeof(a->firmware.header));
+
+ do_fm_api(a,
+ (struct esas2r_flash_img *)a->firmware.data);
+ } else if (a->firmware.header.action == FI_ACT_UPSZ) {
+ int size =
+ min((int)count,
+ (int)sizeof(a->firmware.header));
+ do_fm_api(a, &a->firmware.header);
+ memcpy(buf, &a->firmware.header, size);
+ esas2r_debug("FI_ACT_UPSZ size %d", size);
+ return size;
+ } else {
+ esas2r_debug("invalid action %d",
+ a->firmware.header.action);
+ return -ENOSYS;
+ }
+ }
+
+ if (count + off > length)
+ count = length - off;
+
+ if (count < 0)
+ return 0;
+
+ if (!a->firmware.data) {
+ esas2r_debug(
+ "read: nonzero offset but no buffer available!");
+ return -ENOMEM;
+ }
+
+ esas2r_debug("esas2r_read_fw: off %d count %d length %d ", off,
+ count,
+ length);
+
+ memcpy(buf, &a->firmware.data[off], count);
+
+ /* when done, release the buffer */
+
+ if (length <= off + count) {
+ esas2r_debug("esas2r_read_fw: freeing buffer!");
+
+ free_fw_buffers(a);
+ }
+
+ return count;
+ }
+
+ esas2r_trace_exit();
+ esas2r_debug("esas2r_read_fw: invalid firmware state %d",
+ a->firmware.state);
+
+ return -EINVAL;
+}
+
+/* Handle a call to write firmware. */
+int esas2r_write_fw(struct esas2r_adapter *a, const char *buf, long off,
+ int count)
+{
+ u32 length;
+
+ if (off == 0) {
+ struct esas2r_flash_img *header =
+ (struct esas2r_flash_img *)buf;
+
+ /* assume version 0 flash image */
+
+ int min_size = sizeof(struct esas2r_flash_img_v0);
+
+ a->firmware.state = FW_INVALID_ST;
+
+ /* validate the version field first */
+
+ if (count < 4
+ || header->fi_version > FI_VERSION_1) {
+ esas2r_debug(
+ "esas2r_write_fw: short header or invalid version");
+ return -EINVAL;
+ }
+
+ /* See if its a version 1 flash image */
+
+ if (header->fi_version == FI_VERSION_1)
+ min_size = sizeof(struct esas2r_flash_img);
+
+ /* If this is the start, the header must be full and valid. */
+ if (count < min_size) {
+ esas2r_debug("esas2r_write_fw: short header, aborting");
+ return -EINVAL;
+ }
+
+ /* Make sure the size is reasonable. */
+ length = header->length;
+
+ if (length > 1024 * 1024) {
+ esas2r_debug(
+ "esas2r_write_fw: hosed, length %d fi_version %d",
+ length, header->fi_version);
+ return -EINVAL;
+ }
+
+ /*
+ * If this is a write command, allocate memory because
+ * we have to cache everything. otherwise, just cache
+ * the header, because the read op will do the command.
+ */
+
+ if (header->action == FI_ACT_DOWN) {
+ if (!allocate_fw_buffers(a, length))
+ return -ENOMEM;
+
+ /*
+ * Store the command, so there is context on subsequent
+ * calls.
+ */
+ memcpy(&a->firmware.header,
+ buf,
+ sizeof(*header));
+ } else if (header->action == FI_ACT_UP
+ || header->action == FI_ACT_UPSZ) {
+ /* Save the command, result will be picked up on read */
+ memcpy(&a->firmware.header,
+ buf,
+ sizeof(*header));
+
+ a->firmware.state = FW_COMMAND_ST;
+
+ esas2r_debug(
+ "esas2r_write_fw: COMMAND, count %d, action %d ",
+ count, header->action);
+
+ /*
+ * Pretend we took the whole buffer,
+ * so we don't get bothered again.
+ */
+
+ return count;
+ } else {
+ esas2r_debug("esas2r_write_fw: invalid action %d ",
+ a->firmware.header.action);
+ return -ENOSYS;
+ }
+ } else {
+ length = a->firmware.header.length;
+ }
+
+ /*
+ * We only get here on a download command, regardless of offset.
+ * the chunks written by the system need to be cached, and when
+ * the final one arrives, issue the fmapi command.
+ */
+
+ if (off + count > length)
+ count = length - off;
+
+ if (count > 0) {
+ esas2r_debug("esas2r_write_fw: off %d count %d length %d", off,
+ count,
+ length);
+
+ /*
+ * On a full upload, the system tries sending the whole buffer.
+ * there's nothing to do with it, so just drop it here, before
+ * trying to copy over into unallocated memory!
+ */
+ if (a->firmware.header.action == FI_ACT_UP)
+ return count;
+
+ if (!a->firmware.data) {
+ esas2r_debug(
+ "write: nonzero offset but no buffer available!");
+ return -ENOMEM;
+ }
+
+ memcpy(&a->firmware.data[off], buf, count);
+
+ if (length == off + count) {
+ do_fm_api(a,
+ (struct esas2r_flash_img *)a->firmware.data);
+
+ /*
+ * Now copy the header result to be picked up by the
+ * next read
+ */
+ memcpy(&a->firmware.header,
+ a->firmware.data,
+ sizeof(a->firmware.header));
+
+ a->firmware.state = FW_STATUS_ST;
+
+ esas2r_debug("write completed");
+
+ /*
+ * Since the system has the data buffered, the only way
+ * this can leak is if a root user writes a program
+ * that writes a shorter buffer than it claims, and the
+ * copyin fails.
+ */
+ free_fw_buffers(a);
+ }
+ }
+
+ return count;
+}
+
+/* Callback for the completion of a VDA request. */
+static void vda_complete_req(struct esas2r_adapter *a,
+ struct esas2r_request *rq)
+{
+ a->vda_command_done = 1;
+ wake_up_interruptible(&a->vda_waiter);
+}
+
+/* Scatter/gather callback for VDA requests */
+static u32 get_physaddr_vda(struct esas2r_sg_context *sgc, u64 *addr)
+{
+ struct esas2r_adapter *a = (struct esas2r_adapter *)sgc->adapter;
+ int offset = (u8 *)sgc->cur_offset - (u8 *)a->vda_buffer;
+
+ (*addr) = a->ppvda_buffer + offset;
+ return VDA_MAX_BUFFER_SIZE - offset;
+}
+
+/* Handle a call to read a VDA command. */
+int esas2r_read_vda(struct esas2r_adapter *a, char *buf, long off, int count)
+{
+ if (!a->vda_buffer)
+ return -ENOMEM;
+
+ if (off == 0) {
+ struct esas2r_request *rq;
+ struct atto_ioctl_vda *vi =
+ (struct atto_ioctl_vda *)a->vda_buffer;
+ struct esas2r_sg_context sgc;
+ bool wait_for_completion;
+
+ /*
+ * Presumeably, someone has already written to the vda_buffer,
+ * and now they are reading the node the response, so now we
+ * will actually issue the request to the chip and reply.
+ */
+
+ /* allocate a request */
+ rq = esas2r_alloc_request(a);
+ if (rq == NULL) {
+ esas2r_debug("esas2r_read_vda: out of requestss");
+ return -EBUSY;
+ }
+
+ rq->comp_cb = vda_complete_req;
+
+ sgc.first_req = rq;
+ sgc.adapter = a;
+ sgc.cur_offset = a->vda_buffer + VDA_BUFFER_HEADER_SZ;
+ sgc.get_phys_addr = (PGETPHYSADDR)get_physaddr_vda;
+
+ a->vda_command_done = 0;
+
+ wait_for_completion =
+ esas2r_process_vda_ioctl(a, vi, rq, &sgc);
+
+ if (wait_for_completion) {
+ /* now wait around for it to complete. */
+
+ while (!a->vda_command_done)
+ wait_event_interruptible(a->vda_waiter,
+ a->vda_command_done);
+ }
+
+ esas2r_free_request(a, (struct esas2r_request *)rq);
+ }
+
+ if (off > VDA_MAX_BUFFER_SIZE)
+ return 0;
+
+ if (count + off > VDA_MAX_BUFFER_SIZE)
+ count = VDA_MAX_BUFFER_SIZE - off;
+
+ if (count < 0)
+ return 0;
+
+ memcpy(buf, a->vda_buffer + off, count);
+
+ return count;
+}
+
+/* Handle a call to write a VDA command. */
+int esas2r_write_vda(struct esas2r_adapter *a, const char *buf, long off,
+ int count)
+{
+ /*
+ * allocate memory for it, if not already done. once allocated,
+ * we will keep it around until the driver is unloaded.
+ */
+
+ if (!a->vda_buffer) {
+ dma_addr_t dma_addr;
+ a->vda_buffer = (u8 *)dma_alloc_coherent(&a->pcid->dev,
+ (size_t)
+ VDA_MAX_BUFFER_SIZE,
+ &dma_addr,
+ GFP_KERNEL);
+
+ a->ppvda_buffer = dma_addr;
+ }
+
+ if (!a->vda_buffer)
+ return -ENOMEM;
+
+ if (off > VDA_MAX_BUFFER_SIZE)
+ return 0;
+
+ if (count + off > VDA_MAX_BUFFER_SIZE)
+ count = VDA_MAX_BUFFER_SIZE - off;
+
+ if (count < 1)
+ return 0;
+
+ memcpy(a->vda_buffer + off, buf, count);
+
+ return count;
+}
+
+/* Callback for the completion of an FS_API request.*/
+static void fs_api_complete_req(struct esas2r_adapter *a,
+ struct esas2r_request *rq)
+{
+ a->fs_api_command_done = 1;
+
+ wake_up_interruptible(&a->fs_api_waiter);
+}
+
+/* Scatter/gather callback for VDA requests */
+static u32 get_physaddr_fs_api(struct esas2r_sg_context *sgc, u64 *addr)
+{
+ struct esas2r_adapter *a = (struct esas2r_adapter *)sgc->adapter;
+ struct esas2r_ioctl_fs *fs =
+ (struct esas2r_ioctl_fs *)a->fs_api_buffer;
+ u32 offset = (u8 *)sgc->cur_offset - (u8 *)fs;
+
+ (*addr) = a->ppfs_api_buffer + offset;
+
+ return a->fs_api_buffer_size - offset;
+}
+
+/* Handle a call to read firmware via FS_API. */
+int esas2r_read_fs(struct esas2r_adapter *a, char *buf, long off, int count)
+{
+ if (!a->fs_api_buffer)
+ return -ENOMEM;
+
+ if (off == 0) {
+ struct esas2r_request *rq;
+ struct esas2r_sg_context sgc;
+ struct esas2r_ioctl_fs *fs =
+ (struct esas2r_ioctl_fs *)a->fs_api_buffer;
+
+ /* If another flash request is already in progress, return. */
+ if (down_interruptible(&a->fs_api_semaphore)) {
+busy:
+ fs->status = ATTO_STS_OUT_OF_RSRC;
+ return -EBUSY;
+ }
+
+ /*
+ * Presumeably, someone has already written to the
+ * fs_api_buffer, and now they are reading the node the
+ * response, so now we will actually issue the request to the
+ * chip and reply. Allocate a request
+ */
+
+ rq = esas2r_alloc_request(a);
+ if (rq == NULL) {
+ esas2r_debug("esas2r_read_fs: out of requests");
+ up(&a->fs_api_semaphore);
+ goto busy;
+ }
+
+ rq->comp_cb = fs_api_complete_req;
+
+ /* Set up the SGCONTEXT for to build the s/g table */
+
+ sgc.cur_offset = fs->data;
+ sgc.get_phys_addr = (PGETPHYSADDR)get_physaddr_fs_api;
+
+ a->fs_api_command_done = 0;
+
+ if (!esas2r_process_fs_ioctl(a, fs, rq, &sgc)) {
+ if (fs->status == ATTO_STS_OUT_OF_RSRC)
+ count = -EBUSY;
+
+ goto dont_wait;
+ }
+
+ /* Now wait around for it to complete. */
+
+ while (!a->fs_api_command_done)
+ wait_event_interruptible(a->fs_api_waiter,
+ a->fs_api_command_done);
+ ;
+dont_wait:
+ /* Free the request and keep going */
+ up(&a->fs_api_semaphore);
+ esas2r_free_request(a, (struct esas2r_request *)rq);
+
+ /* Pick up possible error code from above */
+ if (count < 0)
+ return count;
+ }
+
+ if (off > a->fs_api_buffer_size)
+ return 0;
+
+ if (count + off > a->fs_api_buffer_size)
+ count = a->fs_api_buffer_size - off;
+
+ if (count < 0)
+ return 0;
+
+ memcpy(buf, a->fs_api_buffer + off, count);
+
+ return count;
+}
+
+/* Handle a call to write firmware via FS_API. */
+int esas2r_write_fs(struct esas2r_adapter *a, const char *buf, long off,
+ int count)
+{
+ if (off == 0) {
+ struct esas2r_ioctl_fs *fs = (struct esas2r_ioctl_fs *)buf;
+ u32 length = fs->command.length + offsetof(
+ struct esas2r_ioctl_fs,
+ data);
+
+ /*
+ * Special case, for BEGIN commands, the length field
+ * is lying to us, so just get enough for the header.
+ */
+
+ if (fs->command.command == ESAS2R_FS_CMD_BEGINW)
+ length = offsetof(struct esas2r_ioctl_fs, data);
+
+ /*
+ * Beginning a command. We assume we'll get at least
+ * enough in the first write so we can look at the
+ * header and see how much we need to alloc.
+ */
+
+ if (count < offsetof(struct esas2r_ioctl_fs, data))
+ return -EINVAL;
+
+ /* Allocate a buffer or use the existing buffer. */
+ if (a->fs_api_buffer) {
+ if (a->fs_api_buffer_size < length) {
+ /* Free too-small buffer and get a new one */
+ dma_free_coherent(&a->pcid->dev,
+ (size_t)a->fs_api_buffer_size,
+ a->fs_api_buffer,
+ (dma_addr_t)a->ppfs_api_buffer);
+
+ goto re_allocate_buffer;
+ }
+ } else {
+re_allocate_buffer:
+ a->fs_api_buffer_size = length;
+
+ a->fs_api_buffer = (u8 *)dma_alloc_coherent(
+ &a->pcid->dev,
+ (size_t)a->fs_api_buffer_size,
+ (dma_addr_t *)&a->ppfs_api_buffer,
+ GFP_KERNEL);
+ }
+ }
+
+ if (!a->fs_api_buffer)
+ return -ENOMEM;
+
+ if (off > a->fs_api_buffer_size)
+ return 0;
+
+ if (count + off > a->fs_api_buffer_size)
+ count = a->fs_api_buffer_size - off;
+
+ if (count < 1)
+ return 0;
+
+ memcpy(a->fs_api_buffer + off, buf, count);
+
+ return count;
+}
diff --git a/drivers/scsi/esas2r/esas2r_log.c b/drivers/scsi/esas2r/esas2r_log.c
new file mode 100644
index 000000000..a82030aa8
--- /dev/null
+++ b/drivers/scsi/esas2r/esas2r_log.c
@@ -0,0 +1,250 @@
+/*
+ * linux/drivers/scsi/esas2r/esas2r_log.c
+ * For use with ATTO ExpressSAS R6xx SAS/SATA RAID controllers
+ *
+ * Copyright (c) 2001-2013 ATTO Technology, Inc.
+ * (mailto:linuxdrivers@attotech.com)
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * NO WARRANTY
+ * THE PROGRAM IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OR
+ * CONDITIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED INCLUDING, WITHOUT
+ * LIMITATION, ANY WARRANTIES OR CONDITIONS OF TITLE, NON-INFRINGEMENT,
+ * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE. Each Recipient is
+ * solely responsible for determining the appropriateness of using and
+ * distributing the Program and assumes all risks associated with its
+ * exercise of rights under this Agreement, including but not limited to
+ * the risks and costs of program errors, damage to or loss of data,
+ * programs or equipment, and unavailability or interruption of operations.
+ *
+ * DISCLAIMER OF LIABILITY
+ * NEITHER RECIPIENT NOR ANY CONTRIBUTORS SHALL HAVE ANY LIABILITY FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING WITHOUT LIMITATION LOST PROFITS), HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR
+ * TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
+ * USE OR DISTRIBUTION OF THE PROGRAM OR THE EXERCISE OF ANY RIGHTS GRANTED
+ * HEREUNDER, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGES
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301,
+ * USA.
+ */
+
+#include "esas2r.h"
+
+/*
+ * this module within the driver is tasked with providing logging functionality.
+ * the event_log_level module parameter controls the level of messages that are
+ * written to the system log. the default level of messages that are written
+ * are critical and warning messages. if other types of messages are desired,
+ * one simply needs to load the module with the correct value for the
+ * event_log_level module parameter. for example:
+ *
+ * insmod <module> event_log_level=1
+ *
+ * will load the module and only critical events will be written by this module
+ * to the system log. if critical, warning, and information-level messages are
+ * desired, the correct value for the event_log_level module parameter
+ * would be as follows:
+ *
+ * insmod <module> event_log_level=3
+ */
+
+#define EVENT_LOG_BUFF_SIZE 1024
+
+static long event_log_level = ESAS2R_LOG_DFLT;
+
+module_param(event_log_level, long, S_IRUGO | S_IRUSR);
+MODULE_PARM_DESC(event_log_level,
+ "Specifies the level of events to report to the system log. Critical and warning level events are logged by default.");
+
+/* A shared buffer to use for formatting messages. */
+static char event_buffer[EVENT_LOG_BUFF_SIZE];
+
+/* A lock to protect the shared buffer used for formatting messages. */
+static DEFINE_SPINLOCK(event_buffer_lock);
+
+/**
+ * translates an esas2r-defined logging event level to a kernel logging level.
+ *
+ * @param [in] level the esas2r-defined logging event level to translate
+ *
+ * @return the corresponding kernel logging level.
+ */
+static const char *translate_esas2r_event_level_to_kernel(const long level)
+{
+ switch (level) {
+ case ESAS2R_LOG_CRIT:
+ return KERN_CRIT;
+
+ case ESAS2R_LOG_WARN:
+ return KERN_WARNING;
+
+ case ESAS2R_LOG_INFO:
+ return KERN_INFO;
+
+ case ESAS2R_LOG_DEBG:
+ case ESAS2R_LOG_TRCE:
+ default:
+ return KERN_DEBUG;
+ }
+}
+
+/**
+ * the master logging function. this function will format the message as
+ * outlined by the formatting string, the input device information and the
+ * substitution arguments and output the resulting string to the system log.
+ *
+ * @param [in] level the event log level of the message
+ * @param [in] dev the device information
+ * @param [in] format the formatting string for the message
+ * @param [in] args the substition arguments to the formatting string
+ *
+ * @return 0 on success, or -1 if an error occurred.
+ */
+static int esas2r_log_master(const long level,
+ const struct device *dev,
+ const char *format,
+ va_list args)
+{
+ if (level <= event_log_level) {
+ unsigned long flags = 0;
+ int retval = 0;
+ char *buffer = event_buffer;
+ size_t buflen = EVENT_LOG_BUFF_SIZE;
+ const char *fmt_nodev = "%s%s: ";
+ const char *fmt_dev = "%s%s [%s, %s, %s]";
+ const char *slevel =
+ translate_esas2r_event_level_to_kernel(level);
+
+ spin_lock_irqsave(&event_buffer_lock, flags);
+
+ if (buffer == NULL) {
+ spin_unlock_irqrestore(&event_buffer_lock, flags);
+ return -1;
+ }
+
+ memset(buffer, 0, buflen);
+
+ /*
+ * format the level onto the beginning of the string and do
+ * some pointer arithmetic to move the pointer to the point
+ * where the actual message can be inserted.
+ */
+
+ if (dev == NULL) {
+ snprintf(buffer, buflen, fmt_nodev, slevel,
+ ESAS2R_DRVR_NAME);
+ } else {
+ snprintf(buffer, buflen, fmt_dev, slevel,
+ ESAS2R_DRVR_NAME,
+ (dev->driver ? dev->driver->name : "unknown"),
+ (dev->bus ? dev->bus->name : "unknown"),
+ dev_name(dev));
+ }
+
+ buffer += strlen(event_buffer);
+ buflen -= strlen(event_buffer);
+
+ retval = vsnprintf(buffer, buflen, format, args);
+ if (retval < 0) {
+ spin_unlock_irqrestore(&event_buffer_lock, flags);
+ return -1;
+ }
+
+ /*
+ * Put a line break at the end of the formatted string so that
+ * we don't wind up with run-on messages.
+ */
+ printk("%s\n", event_buffer);
+
+ spin_unlock_irqrestore(&event_buffer_lock, flags);
+ }
+
+ return 0;
+}
+
+/**
+ * formats and logs a message to the system log.
+ *
+ * @param [in] level the event level of the message
+ * @param [in] format the formating string for the message
+ * @param [in] ... the substitution arguments to the formatting string
+ *
+ * @return 0 on success, or -1 if an error occurred.
+ */
+int esas2r_log(const long level, const char *format, ...)
+{
+ int retval = 0;
+ va_list args;
+
+ va_start(args, format);
+
+ retval = esas2r_log_master(level, NULL, format, args);
+
+ va_end(args);
+
+ return retval;
+}
+
+/**
+ * formats and logs a message to the system log. this message will include
+ * device information.
+ *
+ * @param [in] level the event level of the message
+ * @param [in] dev the device information
+ * @param [in] format the formatting string for the message
+ * @param [in] ... the substitution arguments to the formatting string
+ *
+ * @return 0 on success, or -1 if an error occurred.
+ */
+int esas2r_log_dev(const long level,
+ const struct device *dev,
+ const char *format,
+ ...)
+{
+ int retval = 0;
+ va_list args;
+
+ va_start(args, format);
+
+ retval = esas2r_log_master(level, dev, format, args);
+
+ va_end(args);
+
+ return retval;
+}
+
+/**
+ * formats and logs a message to the system log. this message will include
+ * device information.
+ *
+ * @param [in] level the event level of the message
+ * @param [in] buf
+ * @param [in] len
+ *
+ * @return 0 on success, or -1 if an error occurred.
+ */
+int esas2r_log_hexdump(const long level,
+ const void *buf,
+ size_t len)
+{
+ if (level <= event_log_level) {
+ print_hex_dump(translate_esas2r_event_level_to_kernel(level),
+ "", DUMP_PREFIX_OFFSET, 16, 1, buf,
+ len, true);
+ }
+
+ return 1;
+}
diff --git a/drivers/scsi/esas2r/esas2r_log.h b/drivers/scsi/esas2r/esas2r_log.h
new file mode 100644
index 000000000..7b6397bb5
--- /dev/null
+++ b/drivers/scsi/esas2r/esas2r_log.h
@@ -0,0 +1,118 @@
+/*
+ * linux/drivers/scsi/esas2r/esas2r_log.h
+ * For use with ATTO ExpressSAS R6xx SAS/SATA RAID controllers
+ *
+ * Copyright (c) 2001-2013 ATTO Technology, Inc.
+ * (mailto:linuxdrivers@attotech.com)
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * NO WARRANTY
+ * THE PROGRAM IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OR
+ * CONDITIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED INCLUDING, WITHOUT
+ * LIMITATION, ANY WARRANTIES OR CONDITIONS OF TITLE, NON-INFRINGEMENT,
+ * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE. Each Recipient is
+ * solely responsible for determining the appropriateness of using and
+ * distributing the Program and assumes all risks associated with its
+ * exercise of rights under this Agreement, including but not limited to
+ * the risks and costs of program errors, damage to or loss of data,
+ * programs or equipment, and unavailability or interruption of operations.
+ *
+ * DISCLAIMER OF LIABILITY
+ * NEITHER RECIPIENT NOR ANY CONTRIBUTORS SHALL HAVE ANY LIABILITY FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING WITHOUT LIMITATION LOST PROFITS), HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR
+ * TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
+ * USE OR DISTRIBUTION OF THE PROGRAM OR THE EXERCISE OF ANY RIGHTS GRANTED
+ * HEREUNDER, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGES
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301,
+ * USA.
+ */
+
+#ifndef __esas2r_log_h__
+#define __esas2r_log_h__
+
+struct device;
+
+enum {
+ ESAS2R_LOG_NONE = 0, /* no events logged */
+ ESAS2R_LOG_CRIT = 1, /* critical events */
+ ESAS2R_LOG_WARN = 2, /* warning events */
+ ESAS2R_LOG_INFO = 3, /* info events */
+ ESAS2R_LOG_DEBG = 4, /* debugging events */
+ ESAS2R_LOG_TRCE = 5, /* tracing events */
+
+#ifdef ESAS2R_TRACE
+ ESAS2R_LOG_DFLT = ESAS2R_LOG_TRCE
+#else
+ ESAS2R_LOG_DFLT = ESAS2R_LOG_WARN
+#endif
+};
+
+int esas2r_log(const long level, const char *format, ...);
+int esas2r_log_dev(const long level,
+ const struct device *dev,
+ const char *format,
+ ...);
+int esas2r_log_hexdump(const long level,
+ const void *buf,
+ size_t len);
+
+/*
+ * the following macros are provided specifically for debugging and tracing
+ * messages. esas2r_debug() is provided for generic non-hardware layer
+ * debugging and tracing events. esas2r_hdebug is provided specifically for
+ * hardware layer debugging and tracing events.
+ */
+
+#ifdef ESAS2R_DEBUG
+#define esas2r_debug(f, args ...) esas2r_log(ESAS2R_LOG_DEBG, f, ## args)
+#define esas2r_hdebug(f, args ...) esas2r_log(ESAS2R_LOG_DEBG, f, ## args)
+#else
+#define esas2r_debug(f, args ...)
+#define esas2r_hdebug(f, args ...)
+#endif /* ESAS2R_DEBUG */
+
+/*
+ * the following macros are provided in order to trace the driver and catch
+ * some more serious bugs. be warned, enabling these macros may *severely*
+ * impact performance.
+ */
+
+#ifdef ESAS2R_TRACE
+#define esas2r_bugon() \
+ do { \
+ esas2r_log(ESAS2R_LOG_TRCE, "esas2r_bugon() called in %s:%d" \
+ " - dumping stack and stopping kernel", __func__, \
+ __LINE__); \
+ dump_stack(); \
+ BUG(); \
+ } while (0)
+
+#define esas2r_trace_enter() esas2r_log(ESAS2R_LOG_TRCE, "entered %s (%s:%d)", \
+ __func__, __FILE__, __LINE__)
+#define esas2r_trace_exit() esas2r_log(ESAS2R_LOG_TRCE, "exited %s (%s:%d)", \
+ __func__, __FILE__, __LINE__)
+#define esas2r_trace(f, args ...) esas2r_log(ESAS2R_LOG_TRCE, "(%s:%s:%d): " \
+ f, __func__, __FILE__, __LINE__, \
+ ## args)
+#else
+#define esas2r_bugon()
+#define esas2r_trace_enter()
+#define esas2r_trace_exit()
+#define esas2r_trace(f, args ...)
+#endif /* ESAS2R_TRACE */
+
+#endif /* __esas2r_log_h__ */
diff --git a/drivers/scsi/esas2r/esas2r_main.c b/drivers/scsi/esas2r/esas2r_main.c
new file mode 100644
index 000000000..31f8966b2
--- /dev/null
+++ b/drivers/scsi/esas2r/esas2r_main.c
@@ -0,0 +1,1975 @@
+/*
+ * linux/drivers/scsi/esas2r/esas2r_main.c
+ * For use with ATTO ExpressSAS R6xx SAS/SATA RAID controllers
+ *
+ * Copyright (c) 2001-2013 ATTO Technology, Inc.
+ * (mailto:linuxdrivers@attotech.com)
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * NO WARRANTY
+ * THE PROGRAM IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OR
+ * CONDITIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED INCLUDING, WITHOUT
+ * LIMITATION, ANY WARRANTIES OR CONDITIONS OF TITLE, NON-INFRINGEMENT,
+ * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE. Each Recipient is
+ * solely responsible for determining the appropriateness of using and
+ * distributing the Program and assumes all risks associated with its
+ * exercise of rights under this Agreement, including but not limited to
+ * the risks and costs of program errors, damage to or loss of data,
+ * programs or equipment, and unavailability or interruption of operations.
+ *
+ * DISCLAIMER OF LIABILITY
+ * NEITHER RECIPIENT NOR ANY CONTRIBUTORS SHALL HAVE ANY LIABILITY FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING WITHOUT LIMITATION LOST PROFITS), HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR
+ * TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
+ * USE OR DISTRIBUTION OF THE PROGRAM OR THE EXERCISE OF ANY RIGHTS GRANTED
+ * HEREUNDER, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGES
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301,
+ * USA.
+ */
+
+#include "esas2r.h"
+
+MODULE_DESCRIPTION(ESAS2R_DRVR_NAME ": " ESAS2R_LONGNAME " driver");
+MODULE_AUTHOR("ATTO Technology, Inc.");
+MODULE_LICENSE("GPL");
+MODULE_VERSION(ESAS2R_VERSION_STR);
+
+/* global definitions */
+
+static int found_adapters;
+struct esas2r_adapter *esas2r_adapters[MAX_ADAPTERS];
+
+#define ESAS2R_VDA_EVENT_PORT1 54414
+#define ESAS2R_VDA_EVENT_PORT2 54415
+#define ESAS2R_VDA_EVENT_SOCK_COUNT 2
+
+static struct esas2r_adapter *esas2r_adapter_from_kobj(struct kobject *kobj)
+{
+ struct device *dev = container_of(kobj, struct device, kobj);
+ struct Scsi_Host *host = class_to_shost(dev);
+
+ return (struct esas2r_adapter *)host->hostdata;
+}
+
+static ssize_t read_fw(struct file *file, struct kobject *kobj,
+ struct bin_attribute *attr,
+ char *buf, loff_t off, size_t count)
+{
+ struct esas2r_adapter *a = esas2r_adapter_from_kobj(kobj);
+
+ return esas2r_read_fw(a, buf, off, count);
+}
+
+static ssize_t write_fw(struct file *file, struct kobject *kobj,
+ struct bin_attribute *attr,
+ char *buf, loff_t off, size_t count)
+{
+ struct esas2r_adapter *a = esas2r_adapter_from_kobj(kobj);
+
+ return esas2r_write_fw(a, buf, off, count);
+}
+
+static ssize_t read_fs(struct file *file, struct kobject *kobj,
+ struct bin_attribute *attr,
+ char *buf, loff_t off, size_t count)
+{
+ struct esas2r_adapter *a = esas2r_adapter_from_kobj(kobj);
+
+ return esas2r_read_fs(a, buf, off, count);
+}
+
+static ssize_t write_fs(struct file *file, struct kobject *kobj,
+ struct bin_attribute *attr,
+ char *buf, loff_t off, size_t count)
+{
+ struct esas2r_adapter *a = esas2r_adapter_from_kobj(kobj);
+ int length = min(sizeof(struct esas2r_ioctl_fs), count);
+ int result = 0;
+
+ result = esas2r_write_fs(a, buf, off, count);
+
+ if (result < 0)
+ result = 0;
+
+ return length;
+}
+
+static ssize_t read_vda(struct file *file, struct kobject *kobj,
+ struct bin_attribute *attr,
+ char *buf, loff_t off, size_t count)
+{
+ struct esas2r_adapter *a = esas2r_adapter_from_kobj(kobj);
+
+ return esas2r_read_vda(a, buf, off, count);
+}
+
+static ssize_t write_vda(struct file *file, struct kobject *kobj,
+ struct bin_attribute *attr,
+ char *buf, loff_t off, size_t count)
+{
+ struct esas2r_adapter *a = esas2r_adapter_from_kobj(kobj);
+
+ return esas2r_write_vda(a, buf, off, count);
+}
+
+static ssize_t read_live_nvram(struct file *file, struct kobject *kobj,
+ struct bin_attribute *attr,
+ char *buf, loff_t off, size_t count)
+{
+ struct esas2r_adapter *a = esas2r_adapter_from_kobj(kobj);
+ int length = min_t(size_t, sizeof(struct esas2r_sas_nvram), PAGE_SIZE);
+
+ memcpy(buf, a->nvram, length);
+ return length;
+}
+
+static ssize_t write_live_nvram(struct file *file, struct kobject *kobj,
+ struct bin_attribute *attr,
+ char *buf, loff_t off, size_t count)
+{
+ struct esas2r_adapter *a = esas2r_adapter_from_kobj(kobj);
+ struct esas2r_request *rq;
+ int result = -EFAULT;
+
+ rq = esas2r_alloc_request(a);
+ if (rq == NULL)
+ return -ENOMEM;
+
+ if (esas2r_write_params(a, rq, (struct esas2r_sas_nvram *)buf))
+ result = count;
+
+ esas2r_free_request(a, rq);
+
+ return result;
+}
+
+static ssize_t read_default_nvram(struct file *file, struct kobject *kobj,
+ struct bin_attribute *attr,
+ char *buf, loff_t off, size_t count)
+{
+ struct esas2r_adapter *a = esas2r_adapter_from_kobj(kobj);
+
+ esas2r_nvram_get_defaults(a, (struct esas2r_sas_nvram *)buf);
+
+ return sizeof(struct esas2r_sas_nvram);
+}
+
+static ssize_t read_hw(struct file *file, struct kobject *kobj,
+ struct bin_attribute *attr,
+ char *buf, loff_t off, size_t count)
+{
+ struct esas2r_adapter *a = esas2r_adapter_from_kobj(kobj);
+ int length = min_t(size_t, sizeof(struct atto_ioctl), PAGE_SIZE);
+
+ if (!a->local_atto_ioctl)
+ return -ENOMEM;
+
+ if (handle_hba_ioctl(a, a->local_atto_ioctl) != IOCTL_SUCCESS)
+ return -ENOMEM;
+
+ memcpy(buf, a->local_atto_ioctl, length);
+
+ return length;
+}
+
+static ssize_t write_hw(struct file *file, struct kobject *kobj,
+ struct bin_attribute *attr,
+ char *buf, loff_t off, size_t count)
+{
+ struct esas2r_adapter *a = esas2r_adapter_from_kobj(kobj);
+ int length = min(sizeof(struct atto_ioctl), count);
+
+ if (!a->local_atto_ioctl) {
+ a->local_atto_ioctl = kzalloc(sizeof(struct atto_ioctl),
+ GFP_KERNEL);
+ if (a->local_atto_ioctl == NULL) {
+ esas2r_log(ESAS2R_LOG_WARN,
+ "write_hw kzalloc failed for %d bytes",
+ sizeof(struct atto_ioctl));
+ return -ENOMEM;
+ }
+ }
+
+ memset(a->local_atto_ioctl, 0, sizeof(struct atto_ioctl));
+ memcpy(a->local_atto_ioctl, buf, length);
+
+ return length;
+}
+
+#define ESAS2R_RW_BIN_ATTR(_name) \
+ struct bin_attribute bin_attr_ ## _name = { \
+ .attr = \
+ { .name = __stringify(_name), .mode = S_IRUSR | S_IWUSR }, \
+ .size = 0, \
+ .read = read_ ## _name, \
+ .write = write_ ## _name }
+
+ESAS2R_RW_BIN_ATTR(fw);
+ESAS2R_RW_BIN_ATTR(fs);
+ESAS2R_RW_BIN_ATTR(vda);
+ESAS2R_RW_BIN_ATTR(hw);
+ESAS2R_RW_BIN_ATTR(live_nvram);
+
+struct bin_attribute bin_attr_default_nvram = {
+ .attr = { .name = "default_nvram", .mode = S_IRUGO },
+ .size = 0,
+ .read = read_default_nvram,
+ .write = NULL
+};
+
+static struct scsi_host_template driver_template = {
+ .module = THIS_MODULE,
+ .show_info = esas2r_show_info,
+ .name = ESAS2R_LONGNAME,
+ .release = esas2r_release,
+ .info = esas2r_info,
+ .ioctl = esas2r_ioctl,
+ .queuecommand = esas2r_queuecommand,
+ .eh_abort_handler = esas2r_eh_abort,
+ .eh_device_reset_handler = esas2r_device_reset,
+ .eh_bus_reset_handler = esas2r_bus_reset,
+ .eh_host_reset_handler = esas2r_host_reset,
+ .eh_target_reset_handler = esas2r_target_reset,
+ .can_queue = 128,
+ .this_id = -1,
+ .sg_tablesize = SCSI_MAX_SG_SEGMENTS,
+ .cmd_per_lun =
+ ESAS2R_DEFAULT_CMD_PER_LUN,
+ .present = 0,
+ .unchecked_isa_dma = 0,
+ .use_clustering = ENABLE_CLUSTERING,
+ .emulated = 0,
+ .proc_name = ESAS2R_DRVR_NAME,
+ .change_queue_depth = scsi_change_queue_depth,
+ .max_sectors = 0xFFFF,
+ .use_blk_tags = 1,
+};
+
+int sgl_page_size = 512;
+module_param(sgl_page_size, int, 0);
+MODULE_PARM_DESC(sgl_page_size,
+ "Scatter/gather list (SGL) page size in number of S/G "
+ "entries. If your application is doing a lot of very large "
+ "transfers, you may want to increase the SGL page size. "
+ "Default 512.");
+
+int num_sg_lists = 1024;
+module_param(num_sg_lists, int, 0);
+MODULE_PARM_DESC(num_sg_lists,
+ "Number of scatter/gather lists. Default 1024.");
+
+int sg_tablesize = SCSI_MAX_SG_SEGMENTS;
+module_param(sg_tablesize, int, 0);
+MODULE_PARM_DESC(sg_tablesize,
+ "Maximum number of entries in a scatter/gather table.");
+
+int num_requests = 256;
+module_param(num_requests, int, 0);
+MODULE_PARM_DESC(num_requests,
+ "Number of requests. Default 256.");
+
+int num_ae_requests = 4;
+module_param(num_ae_requests, int, 0);
+MODULE_PARM_DESC(num_ae_requests,
+ "Number of VDA asynchromous event requests. Default 4.");
+
+int cmd_per_lun = ESAS2R_DEFAULT_CMD_PER_LUN;
+module_param(cmd_per_lun, int, 0);
+MODULE_PARM_DESC(cmd_per_lun,
+ "Maximum number of commands per LUN. Default "
+ DEFINED_NUM_TO_STR(ESAS2R_DEFAULT_CMD_PER_LUN) ".");
+
+int can_queue = 128;
+module_param(can_queue, int, 0);
+MODULE_PARM_DESC(can_queue,
+ "Maximum number of commands per adapter. Default 128.");
+
+int esas2r_max_sectors = 0xFFFF;
+module_param(esas2r_max_sectors, int, 0);
+MODULE_PARM_DESC(esas2r_max_sectors,
+ "Maximum number of disk sectors in a single data transfer. "
+ "Default 65535 (largest possible setting).");
+
+int interrupt_mode = 1;
+module_param(interrupt_mode, int, 0);
+MODULE_PARM_DESC(interrupt_mode,
+ "Defines the interrupt mode to use. 0 for legacy"
+ ", 1 for MSI. Default is MSI (1).");
+
+static struct pci_device_id
+ esas2r_pci_table[] = {
+ { ATTO_VENDOR_ID, 0x0049, ATTO_VENDOR_ID, 0x0049,
+ 0,
+ 0, 0 },
+ { ATTO_VENDOR_ID, 0x0049, ATTO_VENDOR_ID, 0x004A,
+ 0,
+ 0, 0 },
+ { ATTO_VENDOR_ID, 0x0049, ATTO_VENDOR_ID, 0x004B,
+ 0,
+ 0, 0 },
+ { ATTO_VENDOR_ID, 0x0049, ATTO_VENDOR_ID, 0x004C,
+ 0,
+ 0, 0 },
+ { ATTO_VENDOR_ID, 0x0049, ATTO_VENDOR_ID, 0x004D,
+ 0,
+ 0, 0 },
+ { ATTO_VENDOR_ID, 0x0049, ATTO_VENDOR_ID, 0x004E,
+ 0,
+ 0, 0 },
+ { 0, 0, 0, 0,
+ 0,
+ 0, 0 }
+};
+
+MODULE_DEVICE_TABLE(pci, esas2r_pci_table);
+
+static int
+esas2r_probe(struct pci_dev *pcid, const struct pci_device_id *id);
+
+static void
+esas2r_remove(struct pci_dev *pcid);
+
+static struct pci_driver
+ esas2r_pci_driver = {
+ .name = ESAS2R_DRVR_NAME,
+ .id_table = esas2r_pci_table,
+ .probe = esas2r_probe,
+ .remove = esas2r_remove,
+ .suspend = esas2r_suspend,
+ .resume = esas2r_resume,
+};
+
+static int esas2r_probe(struct pci_dev *pcid,
+ const struct pci_device_id *id)
+{
+ struct Scsi_Host *host = NULL;
+ struct esas2r_adapter *a;
+ int err;
+
+ size_t host_alloc_size = sizeof(struct esas2r_adapter)
+ + ((num_requests) +
+ 1) * sizeof(struct esas2r_request);
+
+ esas2r_log_dev(ESAS2R_LOG_DEBG, &(pcid->dev),
+ "esas2r_probe() 0x%02x 0x%02x 0x%02x 0x%02x",
+ pcid->vendor,
+ pcid->device,
+ pcid->subsystem_vendor,
+ pcid->subsystem_device);
+
+ esas2r_log_dev(ESAS2R_LOG_INFO, &(pcid->dev),
+ "before pci_enable_device() "
+ "enable_cnt: %d",
+ pcid->enable_cnt.counter);
+
+ err = pci_enable_device(pcid);
+ if (err != 0) {
+ esas2r_log_dev(ESAS2R_LOG_CRIT, &(pcid->dev),
+ "pci_enable_device() FAIL (%d)",
+ err);
+ return -ENODEV;
+ }
+
+ esas2r_log_dev(ESAS2R_LOG_INFO, &(pcid->dev),
+ "pci_enable_device() OK");
+ esas2r_log_dev(ESAS2R_LOG_INFO, &(pcid->dev),
+ "after pci_enable_device() enable_cnt: %d",
+ pcid->enable_cnt.counter);
+
+ host = scsi_host_alloc(&driver_template, host_alloc_size);
+ if (host == NULL) {
+ esas2r_log(ESAS2R_LOG_CRIT, "scsi_host_alloc() FAIL");
+ return -ENODEV;
+ }
+
+ memset(host->hostdata, 0, host_alloc_size);
+
+ a = (struct esas2r_adapter *)host->hostdata;
+
+ esas2r_log(ESAS2R_LOG_INFO, "scsi_host_alloc() OK host: %p", host);
+
+ /* override max LUN and max target id */
+
+ host->max_id = ESAS2R_MAX_ID + 1;
+ host->max_lun = 255;
+
+ /* we can handle 16-byte CDbs */
+
+ host->max_cmd_len = 16;
+
+ host->can_queue = can_queue;
+ host->cmd_per_lun = cmd_per_lun;
+ host->this_id = host->max_id + 1;
+ host->max_channel = 0;
+ host->unique_id = found_adapters;
+ host->sg_tablesize = sg_tablesize;
+ host->max_sectors = esas2r_max_sectors;
+
+ /* set to bus master for BIOses that don't do it for us */
+
+ esas2r_log(ESAS2R_LOG_INFO, "pci_set_master() called");
+
+ pci_set_master(pcid);
+
+ if (!esas2r_init_adapter(host, pcid, found_adapters)) {
+ esas2r_log(ESAS2R_LOG_CRIT,
+ "unable to initialize device at PCI bus %x:%x",
+ pcid->bus->number,
+ pcid->devfn);
+
+ esas2r_log_dev(ESAS2R_LOG_INFO, &(host->shost_gendev),
+ "scsi_host_put() called");
+
+ scsi_host_put(host);
+
+ return 0;
+
+ }
+
+ esas2r_log(ESAS2R_LOG_INFO, "pci_set_drvdata(%p, %p) called", pcid,
+ host->hostdata);
+
+ pci_set_drvdata(pcid, host);
+
+ esas2r_log(ESAS2R_LOG_INFO, "scsi_add_host() called");
+
+ err = scsi_add_host(host, &pcid->dev);
+
+ if (err) {
+ esas2r_log(ESAS2R_LOG_CRIT, "scsi_add_host returned %d", err);
+ esas2r_log_dev(ESAS2R_LOG_CRIT, &(host->shost_gendev),
+ "scsi_add_host() FAIL");
+
+ esas2r_log_dev(ESAS2R_LOG_INFO, &(host->shost_gendev),
+ "scsi_host_put() called");
+
+ scsi_host_put(host);
+
+ esas2r_log_dev(ESAS2R_LOG_INFO, &(host->shost_gendev),
+ "pci_set_drvdata(%p, NULL) called",
+ pcid);
+
+ pci_set_drvdata(pcid, NULL);
+
+ return -ENODEV;
+ }
+
+
+ esas2r_fw_event_on(a);
+
+ esas2r_log_dev(ESAS2R_LOG_INFO, &(host->shost_gendev),
+ "scsi_scan_host() called");
+
+ scsi_scan_host(host);
+
+ /* Add sysfs binary files */
+ if (sysfs_create_bin_file(&host->shost_dev.kobj, &bin_attr_fw))
+ esas2r_log_dev(ESAS2R_LOG_WARN, &(host->shost_gendev),
+ "Failed to create sysfs binary file: fw");
+ else
+ a->sysfs_fw_created = 1;
+
+ if (sysfs_create_bin_file(&host->shost_dev.kobj, &bin_attr_fs))
+ esas2r_log_dev(ESAS2R_LOG_WARN, &(host->shost_gendev),
+ "Failed to create sysfs binary file: fs");
+ else
+ a->sysfs_fs_created = 1;
+
+ if (sysfs_create_bin_file(&host->shost_dev.kobj, &bin_attr_vda))
+ esas2r_log_dev(ESAS2R_LOG_WARN, &(host->shost_gendev),
+ "Failed to create sysfs binary file: vda");
+ else
+ a->sysfs_vda_created = 1;
+
+ if (sysfs_create_bin_file(&host->shost_dev.kobj, &bin_attr_hw))
+ esas2r_log_dev(ESAS2R_LOG_WARN, &(host->shost_gendev),
+ "Failed to create sysfs binary file: hw");
+ else
+ a->sysfs_hw_created = 1;
+
+ if (sysfs_create_bin_file(&host->shost_dev.kobj, &bin_attr_live_nvram))
+ esas2r_log_dev(ESAS2R_LOG_WARN, &(host->shost_gendev),
+ "Failed to create sysfs binary file: live_nvram");
+ else
+ a->sysfs_live_nvram_created = 1;
+
+ if (sysfs_create_bin_file(&host->shost_dev.kobj,
+ &bin_attr_default_nvram))
+ esas2r_log_dev(ESAS2R_LOG_WARN, &(host->shost_gendev),
+ "Failed to create sysfs binary file: default_nvram");
+ else
+ a->sysfs_default_nvram_created = 1;
+
+ found_adapters++;
+
+ return 0;
+}
+
+static void esas2r_remove(struct pci_dev *pdev)
+{
+ struct Scsi_Host *host;
+ int index;
+
+ if (pdev == NULL) {
+ esas2r_log(ESAS2R_LOG_WARN, "esas2r_remove pdev==NULL");
+ return;
+ }
+
+ host = pci_get_drvdata(pdev);
+
+ if (host == NULL) {
+ /*
+ * this can happen if pci_set_drvdata was already called
+ * to clear the host pointer. if this is the case, we
+ * are okay; this channel has already been cleaned up.
+ */
+
+ return;
+ }
+
+ esas2r_log_dev(ESAS2R_LOG_INFO, &(pdev->dev),
+ "esas2r_remove(%p) called; "
+ "host:%p", pdev,
+ host);
+
+ index = esas2r_cleanup(host);
+
+ if (index < 0)
+ esas2r_log_dev(ESAS2R_LOG_WARN, &(pdev->dev),
+ "unknown host in %s",
+ __func__);
+
+ found_adapters--;
+
+ /* if this was the last adapter, clean up the rest of the driver */
+
+ if (found_adapters == 0)
+ esas2r_cleanup(NULL);
+}
+
+static int __init esas2r_init(void)
+{
+ int i;
+
+ esas2r_log(ESAS2R_LOG_INFO, "%s called", __func__);
+
+ /* verify valid parameters */
+
+ if (can_queue < 1) {
+ esas2r_log(ESAS2R_LOG_WARN,
+ "warning: can_queue must be at least 1, value "
+ "forced.");
+ can_queue = 1;
+ } else if (can_queue > 2048) {
+ esas2r_log(ESAS2R_LOG_WARN,
+ "warning: can_queue must be no larger than 2048, "
+ "value forced.");
+ can_queue = 2048;
+ }
+
+ if (cmd_per_lun < 1) {
+ esas2r_log(ESAS2R_LOG_WARN,
+ "warning: cmd_per_lun must be at least 1, value "
+ "forced.");
+ cmd_per_lun = 1;
+ } else if (cmd_per_lun > 2048) {
+ esas2r_log(ESAS2R_LOG_WARN,
+ "warning: cmd_per_lun must be no larger than "
+ "2048, value forced.");
+ cmd_per_lun = 2048;
+ }
+
+ if (sg_tablesize < 32) {
+ esas2r_log(ESAS2R_LOG_WARN,
+ "warning: sg_tablesize must be at least 32, "
+ "value forced.");
+ sg_tablesize = 32;
+ }
+
+ if (esas2r_max_sectors < 1) {
+ esas2r_log(ESAS2R_LOG_WARN,
+ "warning: esas2r_max_sectors must be at least "
+ "1, value forced.");
+ esas2r_max_sectors = 1;
+ } else if (esas2r_max_sectors > 0xffff) {
+ esas2r_log(ESAS2R_LOG_WARN,
+ "warning: esas2r_max_sectors must be no larger "
+ "than 0xffff, value forced.");
+ esas2r_max_sectors = 0xffff;
+ }
+
+ sgl_page_size &= ~(ESAS2R_SGL_ALIGN - 1);
+
+ if (sgl_page_size < SGL_PG_SZ_MIN)
+ sgl_page_size = SGL_PG_SZ_MIN;
+ else if (sgl_page_size > SGL_PG_SZ_MAX)
+ sgl_page_size = SGL_PG_SZ_MAX;
+
+ if (num_sg_lists < NUM_SGL_MIN)
+ num_sg_lists = NUM_SGL_MIN;
+ else if (num_sg_lists > NUM_SGL_MAX)
+ num_sg_lists = NUM_SGL_MAX;
+
+ if (num_requests < NUM_REQ_MIN)
+ num_requests = NUM_REQ_MIN;
+ else if (num_requests > NUM_REQ_MAX)
+ num_requests = NUM_REQ_MAX;
+
+ if (num_ae_requests < NUM_AE_MIN)
+ num_ae_requests = NUM_AE_MIN;
+ else if (num_ae_requests > NUM_AE_MAX)
+ num_ae_requests = NUM_AE_MAX;
+
+ /* set up other globals */
+
+ for (i = 0; i < MAX_ADAPTERS; i++)
+ esas2r_adapters[i] = NULL;
+
+ /* initialize */
+
+ driver_template.module = THIS_MODULE;
+
+ if (pci_register_driver(&esas2r_pci_driver) != 0)
+ esas2r_log(ESAS2R_LOG_CRIT, "pci_register_driver FAILED");
+ else
+ esas2r_log(ESAS2R_LOG_INFO, "pci_register_driver() OK");
+
+ if (!found_adapters) {
+ pci_unregister_driver(&esas2r_pci_driver);
+ esas2r_cleanup(NULL);
+
+ esas2r_log(ESAS2R_LOG_CRIT,
+ "driver will not be loaded because no ATTO "
+ "%s devices were found",
+ ESAS2R_DRVR_NAME);
+ return -1;
+ } else {
+ esas2r_log(ESAS2R_LOG_INFO, "found %d adapters",
+ found_adapters);
+ }
+
+ return 0;
+}
+
+/* Handle ioctl calls to "/proc/scsi/esas2r/ATTOnode" */
+static const struct file_operations esas2r_proc_fops = {
+ .compat_ioctl = esas2r_proc_ioctl,
+ .unlocked_ioctl = esas2r_proc_ioctl,
+};
+
+static struct Scsi_Host *esas2r_proc_host;
+static int esas2r_proc_major;
+
+long esas2r_proc_ioctl(struct file *fp, unsigned int cmd, unsigned long arg)
+{
+ return esas2r_ioctl_handler(esas2r_proc_host->hostdata,
+ (int)cmd, (void __user *)arg);
+}
+
+static void __exit esas2r_exit(void)
+{
+ esas2r_log(ESAS2R_LOG_INFO, "%s called", __func__);
+
+ if (esas2r_proc_major > 0) {
+ esas2r_log(ESAS2R_LOG_INFO, "unregister proc");
+
+ remove_proc_entry(ATTONODE_NAME,
+ esas2r_proc_host->hostt->proc_dir);
+ unregister_chrdev(esas2r_proc_major, ESAS2R_DRVR_NAME);
+
+ esas2r_proc_major = 0;
+ }
+
+ esas2r_log(ESAS2R_LOG_INFO, "pci_unregister_driver() called");
+
+ pci_unregister_driver(&esas2r_pci_driver);
+}
+
+int esas2r_show_info(struct seq_file *m, struct Scsi_Host *sh)
+{
+ struct esas2r_adapter *a = (struct esas2r_adapter *)sh->hostdata;
+
+ struct esas2r_target *t;
+ int dev_count = 0;
+
+ esas2r_log(ESAS2R_LOG_DEBG, "esas2r_show_info (%p,%d)", m, sh->host_no);
+
+ seq_printf(m, ESAS2R_LONGNAME "\n"
+ "Driver version: "ESAS2R_VERSION_STR "\n"
+ "Flash version: %s\n"
+ "Firmware version: %s\n"
+ "Copyright "ESAS2R_COPYRIGHT_YEARS "\n"
+ "http://www.attotech.com\n"
+ "\n",
+ a->flash_rev,
+ a->fw_rev[0] ? a->fw_rev : "(none)");
+
+
+ seq_printf(m, "Adapter information:\n"
+ "--------------------\n"
+ "Model: %s\n"
+ "SAS address: %02X%02X%02X%02X:%02X%02X%02X%02X\n",
+ esas2r_get_model_name(a),
+ a->nvram->sas_addr[0],
+ a->nvram->sas_addr[1],
+ a->nvram->sas_addr[2],
+ a->nvram->sas_addr[3],
+ a->nvram->sas_addr[4],
+ a->nvram->sas_addr[5],
+ a->nvram->sas_addr[6],
+ a->nvram->sas_addr[7]);
+
+ seq_puts(m, "\n"
+ "Discovered devices:\n"
+ "\n"
+ " # Target ID\n"
+ "---------------\n");
+
+ for (t = a->targetdb; t < a->targetdb_end; t++)
+ if (t->buffered_target_state == TS_PRESENT) {
+ seq_printf(m, " %3d %3d\n",
+ ++dev_count,
+ (u16)(uintptr_t)(t - a->targetdb));
+ }
+
+ if (dev_count == 0)
+ seq_puts(m, "none\n");
+
+ seq_putc(m, '\n');
+ return 0;
+
+}
+
+int esas2r_release(struct Scsi_Host *sh)
+{
+ esas2r_log_dev(ESAS2R_LOG_INFO, &(sh->shost_gendev),
+ "esas2r_release() called");
+
+ esas2r_cleanup(sh);
+ if (sh->irq)
+ free_irq(sh->irq, NULL);
+ scsi_unregister(sh);
+ return 0;
+}
+
+const char *esas2r_info(struct Scsi_Host *sh)
+{
+ struct esas2r_adapter *a = (struct esas2r_adapter *)sh->hostdata;
+ static char esas2r_info_str[512];
+
+ esas2r_log_dev(ESAS2R_LOG_INFO, &(sh->shost_gendev),
+ "esas2r_info() called");
+
+ /*
+ * if we haven't done so already, register as a char driver
+ * and stick a node under "/proc/scsi/esas2r/ATTOnode"
+ */
+
+ if (esas2r_proc_major <= 0) {
+ esas2r_proc_host = sh;
+
+ esas2r_proc_major = register_chrdev(0, ESAS2R_DRVR_NAME,
+ &esas2r_proc_fops);
+
+ esas2r_log_dev(ESAS2R_LOG_DEBG, &(sh->shost_gendev),
+ "register_chrdev (major %d)",
+ esas2r_proc_major);
+
+ if (esas2r_proc_major > 0) {
+ struct proc_dir_entry *pde;
+
+ pde = proc_create(ATTONODE_NAME, 0,
+ sh->hostt->proc_dir,
+ &esas2r_proc_fops);
+
+ if (!pde) {
+ esas2r_log_dev(ESAS2R_LOG_WARN,
+ &(sh->shost_gendev),
+ "failed to create_proc_entry");
+ esas2r_proc_major = -1;
+ }
+ }
+ }
+
+ sprintf(esas2r_info_str,
+ ESAS2R_LONGNAME " (bus 0x%02X, device 0x%02X, IRQ 0x%02X)"
+ " driver version: "ESAS2R_VERSION_STR " firmware version: "
+ "%s\n",
+ a->pcid->bus->number, a->pcid->devfn, a->pcid->irq,
+ a->fw_rev[0] ? a->fw_rev : "(none)");
+
+ return esas2r_info_str;
+}
+
+/* Callback for building a request scatter/gather list */
+static u32 get_physaddr_from_sgc(struct esas2r_sg_context *sgc, u64 *addr)
+{
+ u32 len;
+
+ if (likely(sgc->cur_offset == sgc->exp_offset)) {
+ /*
+ * the normal case: caller used all bytes from previous call, so
+ * expected offset is the same as the current offset.
+ */
+
+ if (sgc->sgel_count < sgc->num_sgel) {
+ /* retrieve next segment, except for first time */
+ if (sgc->exp_offset > (u8 *)0) {
+ /* advance current segment */
+ sgc->cur_sgel = sg_next(sgc->cur_sgel);
+ ++(sgc->sgel_count);
+ }
+
+
+ len = sg_dma_len(sgc->cur_sgel);
+ (*addr) = sg_dma_address(sgc->cur_sgel);
+
+ /* save the total # bytes returned to caller so far */
+ sgc->exp_offset += len;
+
+ } else {
+ len = 0;
+ }
+ } else if (sgc->cur_offset < sgc->exp_offset) {
+ /*
+ * caller did not use all bytes from previous call. need to
+ * compute the address based on current segment.
+ */
+
+ len = sg_dma_len(sgc->cur_sgel);
+ (*addr) = sg_dma_address(sgc->cur_sgel);
+
+ sgc->exp_offset -= len;
+
+ /* calculate PA based on prev segment address and offsets */
+ *addr = *addr +
+ (sgc->cur_offset - sgc->exp_offset);
+
+ sgc->exp_offset += len;
+
+ /* re-calculate length based on offset */
+ len = lower_32_bits(
+ sgc->exp_offset - sgc->cur_offset);
+ } else { /* if ( sgc->cur_offset > sgc->exp_offset ) */
+ /*
+ * we don't expect the caller to skip ahead.
+ * cur_offset will never exceed the len we return
+ */
+ len = 0;
+ }
+
+ return len;
+}
+
+int esas2r_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *cmd)
+{
+ struct esas2r_adapter *a =
+ (struct esas2r_adapter *)cmd->device->host->hostdata;
+ struct esas2r_request *rq;
+ struct esas2r_sg_context sgc;
+ unsigned bufflen;
+
+ /* Assume success, if it fails we will fix the result later. */
+ cmd->result = DID_OK << 16;
+
+ if (unlikely(test_bit(AF_DEGRADED_MODE, &a->flags))) {
+ cmd->result = DID_NO_CONNECT << 16;
+ cmd->scsi_done(cmd);
+ return 0;
+ }
+
+ rq = esas2r_alloc_request(a);
+ if (unlikely(rq == NULL)) {
+ esas2r_debug("esas2r_alloc_request failed");
+ return SCSI_MLQUEUE_HOST_BUSY;
+ }
+
+ rq->cmd = cmd;
+ bufflen = scsi_bufflen(cmd);
+
+ if (likely(bufflen != 0)) {
+ if (cmd->sc_data_direction == DMA_TO_DEVICE)
+ rq->vrq->scsi.flags |= cpu_to_le32(FCP_CMND_WRD);
+ else if (cmd->sc_data_direction == DMA_FROM_DEVICE)
+ rq->vrq->scsi.flags |= cpu_to_le32(FCP_CMND_RDD);
+ }
+
+ memcpy(rq->vrq->scsi.cdb, cmd->cmnd, cmd->cmd_len);
+ rq->vrq->scsi.length = cpu_to_le32(bufflen);
+ rq->target_id = cmd->device->id;
+ rq->vrq->scsi.flags |= cpu_to_le32(cmd->device->lun);
+ rq->sense_buf = cmd->sense_buffer;
+ rq->sense_len = SCSI_SENSE_BUFFERSIZE;
+
+ esas2r_sgc_init(&sgc, a, rq, NULL);
+
+ sgc.length = bufflen;
+ sgc.cur_offset = NULL;
+
+ sgc.cur_sgel = scsi_sglist(cmd);
+ sgc.exp_offset = NULL;
+ sgc.num_sgel = scsi_dma_map(cmd);
+ sgc.sgel_count = 0;
+
+ if (unlikely(sgc.num_sgel < 0)) {
+ esas2r_free_request(a, rq);
+ return SCSI_MLQUEUE_HOST_BUSY;
+ }
+
+ sgc.get_phys_addr = (PGETPHYSADDR)get_physaddr_from_sgc;
+
+ if (unlikely(!esas2r_build_sg_list(a, rq, &sgc))) {
+ scsi_dma_unmap(cmd);
+ esas2r_free_request(a, rq);
+ return SCSI_MLQUEUE_HOST_BUSY;
+ }
+
+ esas2r_debug("start request %p to %d:%d\n", rq, (int)cmd->device->id,
+ (int)cmd->device->lun);
+
+ esas2r_start_request(a, rq);
+
+ return 0;
+}
+
+static void complete_task_management_request(struct esas2r_adapter *a,
+ struct esas2r_request *rq)
+{
+ (*rq->task_management_status_ptr) = rq->req_stat;
+ esas2r_free_request(a, rq);
+}
+
+/**
+ * Searches the specified queue for the specified queue for the command
+ * to abort.
+ *
+ * @param [in] a
+ * @param [in] abort_request
+ * @param [in] cmd
+ * t
+ * @return 0 on failure, 1 if command was not found, 2 if command was found
+ */
+static int esas2r_check_active_queue(struct esas2r_adapter *a,
+ struct esas2r_request **abort_request,
+ struct scsi_cmnd *cmd,
+ struct list_head *queue)
+{
+ bool found = false;
+ struct esas2r_request *ar = *abort_request;
+ struct esas2r_request *rq;
+ struct list_head *element, *next;
+
+ list_for_each_safe(element, next, queue) {
+
+ rq = list_entry(element, struct esas2r_request, req_list);
+
+ if (rq->cmd == cmd) {
+
+ /* Found the request. See what to do with it. */
+ if (queue == &a->active_list) {
+ /*
+ * We are searching the active queue, which
+ * means that we need to send an abort request
+ * to the firmware.
+ */
+ ar = esas2r_alloc_request(a);
+ if (ar == NULL) {
+ esas2r_log_dev(ESAS2R_LOG_WARN,
+ &(a->host->shost_gendev),
+ "unable to allocate an abort request for cmd %p",
+ cmd);
+ return 0; /* Failure */
+ }
+
+ /*
+ * Task management request must be formatted
+ * with a lock held.
+ */
+ ar->sense_len = 0;
+ ar->vrq->scsi.length = 0;
+ ar->target_id = rq->target_id;
+ ar->vrq->scsi.flags |= cpu_to_le32(
+ (u8)le32_to_cpu(rq->vrq->scsi.flags));
+
+ memset(ar->vrq->scsi.cdb, 0,
+ sizeof(ar->vrq->scsi.cdb));
+
+ ar->vrq->scsi.flags |= cpu_to_le32(
+ FCP_CMND_TRM);
+ ar->vrq->scsi.u.abort_handle =
+ rq->vrq->scsi.handle;
+ } else {
+ /*
+ * The request is pending but not active on
+ * the firmware. Just free it now and we'll
+ * report the successful abort below.
+ */
+ list_del_init(&rq->req_list);
+ esas2r_free_request(a, rq);
+ }
+
+ found = true;
+ break;
+ }
+
+ }
+
+ if (!found)
+ return 1; /* Not found */
+
+ return 2; /* found */
+
+
+}
+
+int esas2r_eh_abort(struct scsi_cmnd *cmd)
+{
+ struct esas2r_adapter *a =
+ (struct esas2r_adapter *)cmd->device->host->hostdata;
+ struct esas2r_request *abort_request = NULL;
+ unsigned long flags;
+ struct list_head *queue;
+ int result;
+
+ esas2r_log(ESAS2R_LOG_INFO, "eh_abort (%p)", cmd);
+
+ if (test_bit(AF_DEGRADED_MODE, &a->flags)) {
+ cmd->result = DID_ABORT << 16;
+
+ scsi_set_resid(cmd, 0);
+
+ cmd->scsi_done(cmd);
+
+ return SUCCESS;
+ }
+
+ spin_lock_irqsave(&a->queue_lock, flags);
+
+ /*
+ * Run through the defer and active queues looking for the request
+ * to abort.
+ */
+
+ queue = &a->defer_list;
+
+check_active_queue:
+
+ result = esas2r_check_active_queue(a, &abort_request, cmd, queue);
+
+ if (!result) {
+ spin_unlock_irqrestore(&a->queue_lock, flags);
+ return FAILED;
+ } else if (result == 2 && (queue == &a->defer_list)) {
+ queue = &a->active_list;
+ goto check_active_queue;
+ }
+
+ spin_unlock_irqrestore(&a->queue_lock, flags);
+
+ if (abort_request) {
+ u8 task_management_status = RS_PENDING;
+
+ /*
+ * the request is already active, so we need to tell
+ * the firmware to abort it and wait for the response.
+ */
+
+ abort_request->comp_cb = complete_task_management_request;
+ abort_request->task_management_status_ptr =
+ &task_management_status;
+
+ esas2r_start_request(a, abort_request);
+
+ if (atomic_read(&a->disable_cnt) == 0)
+ esas2r_do_deferred_processes(a);
+
+ while (task_management_status == RS_PENDING)
+ msleep(10);
+
+ /*
+ * Once we get here, the original request will have been
+ * completed by the firmware and the abort request will have
+ * been cleaned up. we're done!
+ */
+
+ return SUCCESS;
+ }
+
+ /*
+ * If we get here, either we found the inactive request and
+ * freed it, or we didn't find it at all. Either way, success!
+ */
+
+ cmd->result = DID_ABORT << 16;
+
+ scsi_set_resid(cmd, 0);
+
+ cmd->scsi_done(cmd);
+
+ return SUCCESS;
+}
+
+static int esas2r_host_bus_reset(struct scsi_cmnd *cmd, bool host_reset)
+{
+ struct esas2r_adapter *a =
+ (struct esas2r_adapter *)cmd->device->host->hostdata;
+
+ if (test_bit(AF_DEGRADED_MODE, &a->flags))
+ return FAILED;
+
+ if (host_reset)
+ esas2r_reset_adapter(a);
+ else
+ esas2r_reset_bus(a);
+
+ /* above call sets the AF_OS_RESET flag. wait for it to clear. */
+
+ while (test_bit(AF_OS_RESET, &a->flags)) {
+ msleep(10);
+
+ if (test_bit(AF_DEGRADED_MODE, &a->flags))
+ return FAILED;
+ }
+
+ if (test_bit(AF_DEGRADED_MODE, &a->flags))
+ return FAILED;
+
+ return SUCCESS;
+}
+
+int esas2r_host_reset(struct scsi_cmnd *cmd)
+{
+ esas2r_log(ESAS2R_LOG_INFO, "host_reset (%p)", cmd);
+
+ return esas2r_host_bus_reset(cmd, true);
+}
+
+int esas2r_bus_reset(struct scsi_cmnd *cmd)
+{
+ esas2r_log(ESAS2R_LOG_INFO, "bus_reset (%p)", cmd);
+
+ return esas2r_host_bus_reset(cmd, false);
+}
+
+static int esas2r_dev_targ_reset(struct scsi_cmnd *cmd, bool target_reset)
+{
+ struct esas2r_adapter *a =
+ (struct esas2r_adapter *)cmd->device->host->hostdata;
+ struct esas2r_request *rq;
+ u8 task_management_status = RS_PENDING;
+ bool completed;
+
+ if (test_bit(AF_DEGRADED_MODE, &a->flags))
+ return FAILED;
+
+retry:
+ rq = esas2r_alloc_request(a);
+ if (rq == NULL) {
+ if (target_reset) {
+ esas2r_log(ESAS2R_LOG_CRIT,
+ "unable to allocate a request for a "
+ "target reset (%d)!",
+ cmd->device->id);
+ } else {
+ esas2r_log(ESAS2R_LOG_CRIT,
+ "unable to allocate a request for a "
+ "device reset (%d:%d)!",
+ cmd->device->id,
+ cmd->device->lun);
+ }
+
+
+ return FAILED;
+ }
+
+ rq->target_id = cmd->device->id;
+ rq->vrq->scsi.flags |= cpu_to_le32(cmd->device->lun);
+ rq->req_stat = RS_PENDING;
+
+ rq->comp_cb = complete_task_management_request;
+ rq->task_management_status_ptr = &task_management_status;
+
+ if (target_reset) {
+ esas2r_debug("issuing target reset (%p) to id %d", rq,
+ cmd->device->id);
+ completed = esas2r_send_task_mgmt(a, rq, 0x20);
+ } else {
+ esas2r_debug("issuing device reset (%p) to id %d lun %d", rq,
+ cmd->device->id, cmd->device->lun);
+ completed = esas2r_send_task_mgmt(a, rq, 0x10);
+ }
+
+ if (completed) {
+ /* Task management cmd completed right away, need to free it. */
+
+ esas2r_free_request(a, rq);
+ } else {
+ /*
+ * Wait for firmware to complete the request. Completion
+ * callback will free it.
+ */
+ while (task_management_status == RS_PENDING)
+ msleep(10);
+ }
+
+ if (test_bit(AF_DEGRADED_MODE, &a->flags))
+ return FAILED;
+
+ if (task_management_status == RS_BUSY) {
+ /*
+ * Busy, probably because we are flashing. Wait a bit and
+ * try again.
+ */
+ msleep(100);
+ goto retry;
+ }
+
+ return SUCCESS;
+}
+
+int esas2r_device_reset(struct scsi_cmnd *cmd)
+{
+ esas2r_log(ESAS2R_LOG_INFO, "device_reset (%p)", cmd);
+
+ return esas2r_dev_targ_reset(cmd, false);
+
+}
+
+int esas2r_target_reset(struct scsi_cmnd *cmd)
+{
+ esas2r_log(ESAS2R_LOG_INFO, "target_reset (%p)", cmd);
+
+ return esas2r_dev_targ_reset(cmd, true);
+}
+
+void esas2r_log_request_failure(struct esas2r_adapter *a,
+ struct esas2r_request *rq)
+{
+ u8 reqstatus = rq->req_stat;
+
+ if (reqstatus == RS_SUCCESS)
+ return;
+
+ if (rq->vrq->scsi.function == VDA_FUNC_SCSI) {
+ if (reqstatus == RS_SCSI_ERROR) {
+ if (rq->func_rsp.scsi_rsp.sense_len >= 13) {
+ esas2r_log(ESAS2R_LOG_WARN,
+ "request failure - SCSI error %x ASC:%x ASCQ:%x CDB:%x",
+ rq->sense_buf[2], rq->sense_buf[12],
+ rq->sense_buf[13],
+ rq->vrq->scsi.cdb[0]);
+ } else {
+ esas2r_log(ESAS2R_LOG_WARN,
+ "request failure - SCSI error CDB:%x\n",
+ rq->vrq->scsi.cdb[0]);
+ }
+ } else if ((rq->vrq->scsi.cdb[0] != INQUIRY
+ && rq->vrq->scsi.cdb[0] != REPORT_LUNS)
+ || (reqstatus != RS_SEL
+ && reqstatus != RS_SEL2)) {
+ if ((reqstatus == RS_UNDERRUN) &&
+ (rq->vrq->scsi.cdb[0] == INQUIRY)) {
+ /* Don't log inquiry underruns */
+ } else {
+ esas2r_log(ESAS2R_LOG_WARN,
+ "request failure - cdb:%x reqstatus:%d target:%d",
+ rq->vrq->scsi.cdb[0], reqstatus,
+ rq->target_id);
+ }
+ }
+ }
+}
+
+void esas2r_wait_request(struct esas2r_adapter *a, struct esas2r_request *rq)
+{
+ u32 starttime;
+ u32 timeout;
+
+ starttime = jiffies_to_msecs(jiffies);
+ timeout = rq->timeout ? rq->timeout : 5000;
+
+ while (true) {
+ esas2r_polled_interrupt(a);
+
+ if (rq->req_stat != RS_STARTED)
+ break;
+
+ schedule_timeout_interruptible(msecs_to_jiffies(100));
+
+ if ((jiffies_to_msecs(jiffies) - starttime) > timeout) {
+ esas2r_hdebug("request TMO");
+ esas2r_bugon();
+
+ rq->req_stat = RS_TIMEOUT;
+
+ esas2r_local_reset_adapter(a);
+ return;
+ }
+ }
+}
+
+u32 esas2r_map_data_window(struct esas2r_adapter *a, u32 addr_lo)
+{
+ u32 offset = addr_lo & (MW_DATA_WINDOW_SIZE - 1);
+ u32 base = addr_lo & -(signed int)MW_DATA_WINDOW_SIZE;
+
+ if (a->window_base != base) {
+ esas2r_write_register_dword(a, MVR_PCI_WIN1_REMAP,
+ base | MVRPW1R_ENABLE);
+ esas2r_flush_register_dword(a, MVR_PCI_WIN1_REMAP);
+ a->window_base = base;
+ }
+
+ return offset;
+}
+
+/* Read a block of data from chip memory */
+bool esas2r_read_mem_block(struct esas2r_adapter *a,
+ void *to,
+ u32 from,
+ u32 size)
+{
+ u8 *end = (u8 *)to;
+
+ while (size) {
+ u32 len;
+ u32 offset;
+ u32 iatvr;
+
+ iatvr = (from & -(signed int)MW_DATA_WINDOW_SIZE);
+
+ esas2r_map_data_window(a, iatvr);
+
+ offset = from & (MW_DATA_WINDOW_SIZE - 1);
+ len = size;
+
+ if (len > MW_DATA_WINDOW_SIZE - offset)
+ len = MW_DATA_WINDOW_SIZE - offset;
+
+ from += len;
+ size -= len;
+
+ while (len--) {
+ *end++ = esas2r_read_data_byte(a, offset);
+ offset++;
+ }
+ }
+
+ return true;
+}
+
+void esas2r_nuxi_mgt_data(u8 function, void *data)
+{
+ struct atto_vda_grp_info *g;
+ struct atto_vda_devinfo *d;
+ struct atto_vdapart_info *p;
+ struct atto_vda_dh_info *h;
+ struct atto_vda_metrics_info *m;
+ struct atto_vda_schedule_info *s;
+ struct atto_vda_buzzer_info *b;
+ u8 i;
+
+ switch (function) {
+ case VDAMGT_BUZZER_INFO:
+ case VDAMGT_BUZZER_SET:
+
+ b = (struct atto_vda_buzzer_info *)data;
+
+ b->duration = le32_to_cpu(b->duration);
+ break;
+
+ case VDAMGT_SCHEDULE_INFO:
+ case VDAMGT_SCHEDULE_EVENT:
+
+ s = (struct atto_vda_schedule_info *)data;
+
+ s->id = le32_to_cpu(s->id);
+
+ break;
+
+ case VDAMGT_DEV_INFO:
+ case VDAMGT_DEV_CLEAN:
+ case VDAMGT_DEV_PT_INFO:
+ case VDAMGT_DEV_FEATURES:
+ case VDAMGT_DEV_PT_FEATURES:
+ case VDAMGT_DEV_OPERATION:
+
+ d = (struct atto_vda_devinfo *)data;
+
+ d->capacity = le64_to_cpu(d->capacity);
+ d->block_size = le32_to_cpu(d->block_size);
+ d->ses_dev_index = le16_to_cpu(d->ses_dev_index);
+ d->target_id = le16_to_cpu(d->target_id);
+ d->lun = le16_to_cpu(d->lun);
+ d->features = le16_to_cpu(d->features);
+ break;
+
+ case VDAMGT_GRP_INFO:
+ case VDAMGT_GRP_CREATE:
+ case VDAMGT_GRP_DELETE:
+ case VDAMGT_ADD_STORAGE:
+ case VDAMGT_MEMBER_ADD:
+ case VDAMGT_GRP_COMMIT:
+ case VDAMGT_GRP_REBUILD:
+ case VDAMGT_GRP_COMMIT_INIT:
+ case VDAMGT_QUICK_RAID:
+ case VDAMGT_GRP_FEATURES:
+ case VDAMGT_GRP_COMMIT_INIT_AUTOMAP:
+ case VDAMGT_QUICK_RAID_INIT_AUTOMAP:
+ case VDAMGT_SPARE_LIST:
+ case VDAMGT_SPARE_ADD:
+ case VDAMGT_SPARE_REMOVE:
+ case VDAMGT_LOCAL_SPARE_ADD:
+ case VDAMGT_GRP_OPERATION:
+
+ g = (struct atto_vda_grp_info *)data;
+
+ g->capacity = le64_to_cpu(g->capacity);
+ g->block_size = le32_to_cpu(g->block_size);
+ g->interleave = le32_to_cpu(g->interleave);
+ g->features = le16_to_cpu(g->features);
+
+ for (i = 0; i < 32; i++)
+ g->members[i] = le16_to_cpu(g->members[i]);
+
+ break;
+
+ case VDAMGT_PART_INFO:
+ case VDAMGT_PART_MAP:
+ case VDAMGT_PART_UNMAP:
+ case VDAMGT_PART_AUTOMAP:
+ case VDAMGT_PART_SPLIT:
+ case VDAMGT_PART_MERGE:
+
+ p = (struct atto_vdapart_info *)data;
+
+ p->part_size = le64_to_cpu(p->part_size);
+ p->start_lba = le32_to_cpu(p->start_lba);
+ p->block_size = le32_to_cpu(p->block_size);
+ p->target_id = le16_to_cpu(p->target_id);
+ break;
+
+ case VDAMGT_DEV_HEALTH_REQ:
+
+ h = (struct atto_vda_dh_info *)data;
+
+ h->med_defect_cnt = le32_to_cpu(h->med_defect_cnt);
+ h->info_exc_cnt = le32_to_cpu(h->info_exc_cnt);
+ break;
+
+ case VDAMGT_DEV_METRICS:
+
+ m = (struct atto_vda_metrics_info *)data;
+
+ for (i = 0; i < 32; i++)
+ m->dev_indexes[i] = le16_to_cpu(m->dev_indexes[i]);
+
+ break;
+
+ default:
+ break;
+ }
+}
+
+void esas2r_nuxi_cfg_data(u8 function, void *data)
+{
+ struct atto_vda_cfg_init *ci;
+
+ switch (function) {
+ case VDA_CFG_INIT:
+ case VDA_CFG_GET_INIT:
+ case VDA_CFG_GET_INIT2:
+
+ ci = (struct atto_vda_cfg_init *)data;
+
+ ci->date_time.year = le16_to_cpu(ci->date_time.year);
+ ci->sgl_page_size = le32_to_cpu(ci->sgl_page_size);
+ ci->vda_version = le32_to_cpu(ci->vda_version);
+ ci->epoch_time = le32_to_cpu(ci->epoch_time);
+ ci->ioctl_tunnel = le32_to_cpu(ci->ioctl_tunnel);
+ ci->num_targets_backend = le32_to_cpu(ci->num_targets_backend);
+ break;
+
+ default:
+ break;
+ }
+}
+
+void esas2r_nuxi_ae_data(union atto_vda_ae *ae)
+{
+ struct atto_vda_ae_raid *r = &ae->raid;
+ struct atto_vda_ae_lu *l = &ae->lu;
+
+ switch (ae->hdr.bytype) {
+ case VDAAE_HDR_TYPE_RAID:
+
+ r->dwflags = le32_to_cpu(r->dwflags);
+ break;
+
+ case VDAAE_HDR_TYPE_LU:
+
+ l->dwevent = le32_to_cpu(l->dwevent);
+ l->wphys_target_id = le16_to_cpu(l->wphys_target_id);
+ l->id.tgtlun.wtarget_id = le16_to_cpu(l->id.tgtlun.wtarget_id);
+
+ if (l->hdr.bylength >= offsetof(struct atto_vda_ae_lu, id)
+ + sizeof(struct atto_vda_ae_lu_tgt_lun_raid)) {
+ l->id.tgtlun_raid.dwinterleave
+ = le32_to_cpu(l->id.tgtlun_raid.dwinterleave);
+ l->id.tgtlun_raid.dwblock_size
+ = le32_to_cpu(l->id.tgtlun_raid.dwblock_size);
+ }
+
+ break;
+
+ case VDAAE_HDR_TYPE_DISK:
+ default:
+ break;
+ }
+}
+
+void esas2r_free_request(struct esas2r_adapter *a, struct esas2r_request *rq)
+{
+ unsigned long flags;
+
+ esas2r_rq_destroy_request(rq, a);
+ spin_lock_irqsave(&a->request_lock, flags);
+ list_add(&rq->comp_list, &a->avail_request);
+ spin_unlock_irqrestore(&a->request_lock, flags);
+}
+
+struct esas2r_request *esas2r_alloc_request(struct esas2r_adapter *a)
+{
+ struct esas2r_request *rq;
+ unsigned long flags;
+
+ spin_lock_irqsave(&a->request_lock, flags);
+
+ if (unlikely(list_empty(&a->avail_request))) {
+ spin_unlock_irqrestore(&a->request_lock, flags);
+ return NULL;
+ }
+
+ rq = list_first_entry(&a->avail_request, struct esas2r_request,
+ comp_list);
+ list_del(&rq->comp_list);
+ spin_unlock_irqrestore(&a->request_lock, flags);
+ esas2r_rq_init_request(rq, a);
+
+ return rq;
+
+}
+
+void esas2r_complete_request_cb(struct esas2r_adapter *a,
+ struct esas2r_request *rq)
+{
+ esas2r_debug("completing request %p\n", rq);
+
+ scsi_dma_unmap(rq->cmd);
+
+ if (unlikely(rq->req_stat != RS_SUCCESS)) {
+ esas2r_debug("[%x STATUS %x:%x (%x)]", rq->target_id,
+ rq->req_stat,
+ rq->func_rsp.scsi_rsp.scsi_stat,
+ rq->cmd);
+
+ rq->cmd->result =
+ ((esas2r_req_status_to_error(rq->req_stat) << 16)
+ | (rq->func_rsp.scsi_rsp.scsi_stat & STATUS_MASK));
+
+ if (rq->req_stat == RS_UNDERRUN)
+ scsi_set_resid(rq->cmd,
+ le32_to_cpu(rq->func_rsp.scsi_rsp.
+ residual_length));
+ else
+ scsi_set_resid(rq->cmd, 0);
+ }
+
+ rq->cmd->scsi_done(rq->cmd);
+
+ esas2r_free_request(a, rq);
+}
+
+/* Run tasklet to handle stuff outside of interrupt context. */
+void esas2r_adapter_tasklet(unsigned long context)
+{
+ struct esas2r_adapter *a = (struct esas2r_adapter *)context;
+
+ if (unlikely(test_bit(AF2_TIMER_TICK, &a->flags2))) {
+ clear_bit(AF2_TIMER_TICK, &a->flags2);
+ esas2r_timer_tick(a);
+ }
+
+ if (likely(test_bit(AF2_INT_PENDING, &a->flags2))) {
+ clear_bit(AF2_INT_PENDING, &a->flags2);
+ esas2r_adapter_interrupt(a);
+ }
+
+ if (esas2r_is_tasklet_pending(a))
+ esas2r_do_tasklet_tasks(a);
+
+ if (esas2r_is_tasklet_pending(a)
+ || (test_bit(AF2_INT_PENDING, &a->flags2))
+ || (test_bit(AF2_TIMER_TICK, &a->flags2))) {
+ clear_bit(AF_TASKLET_SCHEDULED, &a->flags);
+ esas2r_schedule_tasklet(a);
+ } else {
+ clear_bit(AF_TASKLET_SCHEDULED, &a->flags);
+ }
+}
+
+static void esas2r_timer_callback(unsigned long context);
+
+void esas2r_kickoff_timer(struct esas2r_adapter *a)
+{
+ init_timer(&a->timer);
+
+ a->timer.function = esas2r_timer_callback;
+ a->timer.data = (unsigned long)a;
+ a->timer.expires = jiffies +
+ msecs_to_jiffies(100);
+
+ add_timer(&a->timer);
+}
+
+static void esas2r_timer_callback(unsigned long context)
+{
+ struct esas2r_adapter *a = (struct esas2r_adapter *)context;
+
+ set_bit(AF2_TIMER_TICK, &a->flags2);
+
+ esas2r_schedule_tasklet(a);
+
+ esas2r_kickoff_timer(a);
+}
+
+/*
+ * Firmware events need to be handled outside of interrupt context
+ * so we schedule a delayed_work to handle them.
+ */
+
+static void
+esas2r_free_fw_event(struct esas2r_fw_event_work *fw_event)
+{
+ unsigned long flags;
+ struct esas2r_adapter *a = fw_event->a;
+
+ spin_lock_irqsave(&a->fw_event_lock, flags);
+ list_del(&fw_event->list);
+ kfree(fw_event);
+ spin_unlock_irqrestore(&a->fw_event_lock, flags);
+}
+
+void
+esas2r_fw_event_off(struct esas2r_adapter *a)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&a->fw_event_lock, flags);
+ a->fw_events_off = 1;
+ spin_unlock_irqrestore(&a->fw_event_lock, flags);
+}
+
+void
+esas2r_fw_event_on(struct esas2r_adapter *a)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&a->fw_event_lock, flags);
+ a->fw_events_off = 0;
+ spin_unlock_irqrestore(&a->fw_event_lock, flags);
+}
+
+static void esas2r_add_device(struct esas2r_adapter *a, u16 target_id)
+{
+ int ret;
+ struct scsi_device *scsi_dev;
+
+ scsi_dev = scsi_device_lookup(a->host, 0, target_id, 0);
+
+ if (scsi_dev) {
+ esas2r_log_dev(
+ ESAS2R_LOG_WARN,
+ &(scsi_dev->
+ sdev_gendev),
+ "scsi device already exists at id %d", target_id);
+
+ scsi_device_put(scsi_dev);
+ } else {
+ esas2r_log_dev(
+ ESAS2R_LOG_INFO,
+ &(a->host->
+ shost_gendev),
+ "scsi_add_device() called for 0:%d:0",
+ target_id);
+
+ ret = scsi_add_device(a->host, 0, target_id, 0);
+ if (ret) {
+ esas2r_log_dev(
+ ESAS2R_LOG_CRIT,
+ &(a->host->
+ shost_gendev),
+ "scsi_add_device failed with %d for id %d",
+ ret, target_id);
+ }
+ }
+}
+
+static void esas2r_remove_device(struct esas2r_adapter *a, u16 target_id)
+{
+ struct scsi_device *scsi_dev;
+
+ scsi_dev = scsi_device_lookup(a->host, 0, target_id, 0);
+
+ if (scsi_dev) {
+ scsi_device_set_state(scsi_dev, SDEV_OFFLINE);
+
+ esas2r_log_dev(
+ ESAS2R_LOG_INFO,
+ &(scsi_dev->
+ sdev_gendev),
+ "scsi_remove_device() called for 0:%d:0",
+ target_id);
+
+ scsi_remove_device(scsi_dev);
+
+ esas2r_log_dev(
+ ESAS2R_LOG_INFO,
+ &(scsi_dev->
+ sdev_gendev),
+ "scsi_device_put() called");
+
+ scsi_device_put(scsi_dev);
+ } else {
+ esas2r_log_dev(
+ ESAS2R_LOG_WARN,
+ &(a->host->shost_gendev),
+ "no target found at id %d",
+ target_id);
+ }
+}
+
+/*
+ * Sends a firmware asynchronous event to anyone who happens to be
+ * listening on the defined ATTO VDA event ports.
+ */
+static void esas2r_send_ae_event(struct esas2r_fw_event_work *fw_event)
+{
+ struct esas2r_vda_ae *ae = (struct esas2r_vda_ae *)fw_event->data;
+ char *type;
+
+ switch (ae->vda_ae.hdr.bytype) {
+ case VDAAE_HDR_TYPE_RAID:
+ type = "RAID group state change";
+ break;
+
+ case VDAAE_HDR_TYPE_LU:
+ type = "Mapped destination LU change";
+ break;
+
+ case VDAAE_HDR_TYPE_DISK:
+ type = "Physical disk inventory change";
+ break;
+
+ case VDAAE_HDR_TYPE_RESET:
+ type = "Firmware reset";
+ break;
+
+ case VDAAE_HDR_TYPE_LOG_INFO:
+ type = "Event Log message (INFO level)";
+ break;
+
+ case VDAAE_HDR_TYPE_LOG_WARN:
+ type = "Event Log message (WARN level)";
+ break;
+
+ case VDAAE_HDR_TYPE_LOG_CRIT:
+ type = "Event Log message (CRIT level)";
+ break;
+
+ case VDAAE_HDR_TYPE_LOG_FAIL:
+ type = "Event Log message (FAIL level)";
+ break;
+
+ case VDAAE_HDR_TYPE_NVC:
+ type = "NVCache change";
+ break;
+
+ case VDAAE_HDR_TYPE_TLG_INFO:
+ type = "Time stamped log message (INFO level)";
+ break;
+
+ case VDAAE_HDR_TYPE_TLG_WARN:
+ type = "Time stamped log message (WARN level)";
+ break;
+
+ case VDAAE_HDR_TYPE_TLG_CRIT:
+ type = "Time stamped log message (CRIT level)";
+ break;
+
+ case VDAAE_HDR_TYPE_PWRMGT:
+ type = "Power management";
+ break;
+
+ case VDAAE_HDR_TYPE_MUTE:
+ type = "Mute button pressed";
+ break;
+
+ case VDAAE_HDR_TYPE_DEV:
+ type = "Device attribute change";
+ break;
+
+ default:
+ type = "Unknown";
+ break;
+ }
+
+ esas2r_log(ESAS2R_LOG_WARN,
+ "An async event of type \"%s\" was received from the firmware. The event contents are:",
+ type);
+ esas2r_log_hexdump(ESAS2R_LOG_WARN, &ae->vda_ae,
+ ae->vda_ae.hdr.bylength);
+
+}
+
+static void
+esas2r_firmware_event_work(struct work_struct *work)
+{
+ struct esas2r_fw_event_work *fw_event =
+ container_of(work, struct esas2r_fw_event_work, work.work);
+
+ struct esas2r_adapter *a = fw_event->a;
+
+ u16 target_id = *(u16 *)&fw_event->data[0];
+
+ if (a->fw_events_off)
+ goto done;
+
+ switch (fw_event->type) {
+ case fw_event_null:
+ break; /* do nothing */
+
+ case fw_event_lun_change:
+ esas2r_remove_device(a, target_id);
+ esas2r_add_device(a, target_id);
+ break;
+
+ case fw_event_present:
+ esas2r_add_device(a, target_id);
+ break;
+
+ case fw_event_not_present:
+ esas2r_remove_device(a, target_id);
+ break;
+
+ case fw_event_vda_ae:
+ esas2r_send_ae_event(fw_event);
+ break;
+ }
+
+done:
+ esas2r_free_fw_event(fw_event);
+}
+
+void esas2r_queue_fw_event(struct esas2r_adapter *a,
+ enum fw_event_type type,
+ void *data,
+ int data_sz)
+{
+ struct esas2r_fw_event_work *fw_event;
+ unsigned long flags;
+
+ fw_event = kzalloc(sizeof(struct esas2r_fw_event_work), GFP_ATOMIC);
+ if (!fw_event) {
+ esas2r_log(ESAS2R_LOG_WARN,
+ "esas2r_queue_fw_event failed to alloc");
+ return;
+ }
+
+ if (type == fw_event_vda_ae) {
+ struct esas2r_vda_ae *ae =
+ (struct esas2r_vda_ae *)fw_event->data;
+
+ ae->signature = ESAS2R_VDA_EVENT_SIG;
+ ae->bus_number = a->pcid->bus->number;
+ ae->devfn = a->pcid->devfn;
+ memcpy(&ae->vda_ae, data, sizeof(ae->vda_ae));
+ } else {
+ memcpy(fw_event->data, data, data_sz);
+ }
+
+ fw_event->type = type;
+ fw_event->a = a;
+
+ spin_lock_irqsave(&a->fw_event_lock, flags);
+ list_add_tail(&fw_event->list, &a->fw_event_list);
+ INIT_DELAYED_WORK(&fw_event->work, esas2r_firmware_event_work);
+ queue_delayed_work_on(
+ smp_processor_id(), a->fw_event_q, &fw_event->work,
+ msecs_to_jiffies(1));
+ spin_unlock_irqrestore(&a->fw_event_lock, flags);
+}
+
+void esas2r_target_state_changed(struct esas2r_adapter *a, u16 targ_id,
+ u8 state)
+{
+ if (state == TS_LUN_CHANGE)
+ esas2r_queue_fw_event(a, fw_event_lun_change, &targ_id,
+ sizeof(targ_id));
+ else if (state == TS_PRESENT)
+ esas2r_queue_fw_event(a, fw_event_present, &targ_id,
+ sizeof(targ_id));
+ else if (state == TS_NOT_PRESENT)
+ esas2r_queue_fw_event(a, fw_event_not_present, &targ_id,
+ sizeof(targ_id));
+}
+
+/* Translate status to a Linux SCSI mid-layer error code */
+int esas2r_req_status_to_error(u8 req_stat)
+{
+ switch (req_stat) {
+ case RS_OVERRUN:
+ case RS_UNDERRUN:
+ case RS_SUCCESS:
+ /*
+ * NOTE: SCSI mid-layer wants a good status for a SCSI error, because
+ * it will check the scsi_stat value in the completion anyway.
+ */
+ case RS_SCSI_ERROR:
+ return DID_OK;
+
+ case RS_SEL:
+ case RS_SEL2:
+ return DID_NO_CONNECT;
+
+ case RS_RESET:
+ return DID_RESET;
+
+ case RS_ABORTED:
+ return DID_ABORT;
+
+ case RS_BUSY:
+ return DID_BUS_BUSY;
+ }
+
+ /* everything else is just an error. */
+
+ return DID_ERROR;
+}
+
+module_init(esas2r_init);
+module_exit(esas2r_exit);
diff --git a/drivers/scsi/esas2r/esas2r_targdb.c b/drivers/scsi/esas2r/esas2r_targdb.c
new file mode 100644
index 000000000..bf45beaad
--- /dev/null
+++ b/drivers/scsi/esas2r/esas2r_targdb.c
@@ -0,0 +1,306 @@
+/*
+ * linux/drivers/scsi/esas2r/esas2r_targdb.c
+ * For use with ATTO ExpressSAS R6xx SAS/SATA RAID controllers
+ *
+ * Copyright (c) 2001-2013 ATTO Technology, Inc.
+ * (mailto:linuxdrivers@attotech.com)
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * NO WARRANTY
+ * THE PROGRAM IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OR
+ * CONDITIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED INCLUDING, WITHOUT
+ * LIMITATION, ANY WARRANTIES OR CONDITIONS OF TITLE, NON-INFRINGEMENT,
+ * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE. Each Recipient is
+ * solely responsible for determining the appropriateness of using and
+ * distributing the Program and assumes all risks associated with its
+ * exercise of rights under this Agreement, including but not limited to
+ * the risks and costs of program errors, damage to or loss of data,
+ * programs or equipment, and unavailability or interruption of operations.
+ *
+ * DISCLAIMER OF LIABILITY
+ * NEITHER RECIPIENT NOR ANY CONTRIBUTORS SHALL HAVE ANY LIABILITY FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING WITHOUT LIMITATION LOST PROFITS), HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR
+ * TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
+ * USE OR DISTRIBUTION OF THE PROGRAM OR THE EXERCISE OF ANY RIGHTS GRANTED
+ * HEREUNDER, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGES
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301,
+ * USA.
+ */
+
+#include "esas2r.h"
+
+void esas2r_targ_db_initialize(struct esas2r_adapter *a)
+{
+ struct esas2r_target *t;
+
+ for (t = a->targetdb; t < a->targetdb_end; t++) {
+ memset(t, 0, sizeof(struct esas2r_target));
+
+ t->target_state = TS_NOT_PRESENT;
+ t->buffered_target_state = TS_NOT_PRESENT;
+ t->new_target_state = TS_INVALID;
+ }
+}
+
+void esas2r_targ_db_remove_all(struct esas2r_adapter *a, bool notify)
+{
+ struct esas2r_target *t;
+ unsigned long flags;
+
+ for (t = a->targetdb; t < a->targetdb_end; t++) {
+ if (t->target_state != TS_PRESENT)
+ continue;
+
+ spin_lock_irqsave(&a->mem_lock, flags);
+ esas2r_targ_db_remove(a, t);
+ spin_unlock_irqrestore(&a->mem_lock, flags);
+
+ if (notify) {
+ esas2r_trace("remove id:%d", esas2r_targ_get_id(t,
+ a));
+ esas2r_target_state_changed(a, esas2r_targ_get_id(t,
+ a),
+ TS_NOT_PRESENT);
+ }
+ }
+}
+
+void esas2r_targ_db_report_changes(struct esas2r_adapter *a)
+{
+ struct esas2r_target *t;
+ unsigned long flags;
+
+ esas2r_trace_enter();
+
+ if (test_bit(AF_DISC_PENDING, &a->flags)) {
+ esas2r_trace_exit();
+ return;
+ }
+
+ for (t = a->targetdb; t < a->targetdb_end; t++) {
+ u8 state = TS_INVALID;
+
+ spin_lock_irqsave(&a->mem_lock, flags);
+ if (t->buffered_target_state != t->target_state)
+ state = t->buffered_target_state = t->target_state;
+
+ spin_unlock_irqrestore(&a->mem_lock, flags);
+ if (state != TS_INVALID) {
+ esas2r_trace("targ_db_report_changes:%d",
+ esas2r_targ_get_id(
+ t,
+ a));
+ esas2r_trace("state:%d", state);
+
+ esas2r_target_state_changed(a,
+ esas2r_targ_get_id(t,
+ a),
+ state);
+ }
+ }
+
+ esas2r_trace_exit();
+}
+
+struct esas2r_target *esas2r_targ_db_add_raid(struct esas2r_adapter *a,
+ struct esas2r_disc_context *
+ dc)
+{
+ struct esas2r_target *t;
+
+ esas2r_trace_enter();
+
+ if (dc->curr_virt_id >= ESAS2R_MAX_TARGETS) {
+ esas2r_bugon();
+ esas2r_trace_exit();
+ return NULL;
+ }
+
+ t = a->targetdb + dc->curr_virt_id;
+
+ if (t->target_state == TS_PRESENT) {
+ esas2r_trace_exit();
+ return NULL;
+ }
+
+ esas2r_hdebug("add RAID %s, T:%d", dc->raid_grp_name,
+ esas2r_targ_get_id(
+ t,
+ a));
+
+ if (dc->interleave == 0
+ || dc->block_size == 0) {
+ /* these are invalid values, don't create the target entry. */
+
+ esas2r_hdebug("invalid RAID group dimensions");
+
+ esas2r_trace_exit();
+
+ return NULL;
+ }
+
+ t->block_size = dc->block_size;
+ t->inter_byte = dc->interleave;
+ t->inter_block = dc->interleave / dc->block_size;
+ t->virt_targ_id = dc->curr_virt_id;
+ t->phys_targ_id = ESAS2R_TARG_ID_INV;
+
+ t->flags &= ~TF_PASS_THRU;
+ t->flags |= TF_USED;
+
+ t->identifier_len = 0;
+
+ t->target_state = TS_PRESENT;
+
+ return t;
+}
+
+struct esas2r_target *esas2r_targ_db_add_pthru(struct esas2r_adapter *a,
+ struct esas2r_disc_context *dc,
+ u8 *ident,
+ u8 ident_len)
+{
+ struct esas2r_target *t;
+
+ esas2r_trace_enter();
+
+ if (dc->curr_virt_id >= ESAS2R_MAX_TARGETS) {
+ esas2r_bugon();
+ esas2r_trace_exit();
+ return NULL;
+ }
+
+ /* see if we found this device before. */
+
+ t = esas2r_targ_db_find_by_ident(a, ident, ident_len);
+
+ if (t == NULL) {
+ t = a->targetdb + dc->curr_virt_id;
+
+ if (ident_len > sizeof(t->identifier)
+ || t->target_state == TS_PRESENT) {
+ esas2r_trace_exit();
+ return NULL;
+ }
+ }
+
+ esas2r_hdebug("add PT; T:%d, V:%d, P:%d", esas2r_targ_get_id(t, a),
+ dc->curr_virt_id,
+ dc->curr_phys_id);
+
+ t->block_size = 0;
+ t->inter_byte = 0;
+ t->inter_block = 0;
+ t->virt_targ_id = dc->curr_virt_id;
+ t->phys_targ_id = dc->curr_phys_id;
+ t->identifier_len = ident_len;
+
+ memcpy(t->identifier, ident, ident_len);
+
+ t->flags |= TF_PASS_THRU | TF_USED;
+
+ t->target_state = TS_PRESENT;
+
+ return t;
+}
+
+void esas2r_targ_db_remove(struct esas2r_adapter *a, struct esas2r_target *t)
+{
+ esas2r_trace_enter();
+
+ t->target_state = TS_NOT_PRESENT;
+
+ esas2r_trace("remove id:%d", esas2r_targ_get_id(t, a));
+
+ esas2r_trace_exit();
+}
+
+struct esas2r_target *esas2r_targ_db_find_by_sas_addr(struct esas2r_adapter *a,
+ u64 *sas_addr)
+{
+ struct esas2r_target *t;
+
+ for (t = a->targetdb; t < a->targetdb_end; t++)
+ if (t->sas_addr == *sas_addr)
+ return t;
+
+ return NULL;
+}
+
+struct esas2r_target *esas2r_targ_db_find_by_ident(struct esas2r_adapter *a,
+ void *identifier,
+ u8 ident_len)
+{
+ struct esas2r_target *t;
+
+ for (t = a->targetdb; t < a->targetdb_end; t++) {
+ if (ident_len == t->identifier_len
+ && memcmp(&t->identifier[0], identifier,
+ ident_len) == 0)
+ return t;
+ }
+
+ return NULL;
+}
+
+u16 esas2r_targ_db_find_next_present(struct esas2r_adapter *a, u16 target_id)
+{
+ u16 id = target_id + 1;
+
+ while (id < ESAS2R_MAX_TARGETS) {
+ struct esas2r_target *t = a->targetdb + id;
+
+ if (t->target_state == TS_PRESENT)
+ break;
+
+ id++;
+ }
+
+ return id;
+}
+
+struct esas2r_target *esas2r_targ_db_find_by_virt_id(struct esas2r_adapter *a,
+ u16 virt_id)
+{
+ struct esas2r_target *t;
+
+ for (t = a->targetdb; t < a->targetdb_end; t++) {
+ if (t->target_state != TS_PRESENT)
+ continue;
+
+ if (t->virt_targ_id == virt_id)
+ return t;
+ }
+
+ return NULL;
+}
+
+u16 esas2r_targ_db_get_tgt_cnt(struct esas2r_adapter *a)
+{
+ u16 devcnt = 0;
+ struct esas2r_target *t;
+ unsigned long flags;
+
+ spin_lock_irqsave(&a->mem_lock, flags);
+ for (t = a->targetdb; t < a->targetdb_end; t++)
+ if (t->target_state == TS_PRESENT)
+ devcnt++;
+
+ spin_unlock_irqrestore(&a->mem_lock, flags);
+
+ return devcnt;
+}
diff --git a/drivers/scsi/esas2r/esas2r_vda.c b/drivers/scsi/esas2r/esas2r_vda.c
new file mode 100644
index 000000000..30028e56d
--- /dev/null
+++ b/drivers/scsi/esas2r/esas2r_vda.c
@@ -0,0 +1,524 @@
+/*
+ * linux/drivers/scsi/esas2r/esas2r_vda.c
+ * esas2r driver VDA firmware interface functions
+ *
+ * Copyright (c) 2001-2013 ATTO Technology, Inc.
+ * (mailto:linuxdrivers@attotech.com)
+ */
+/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
+/*
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * NO WARRANTY
+ * THE PROGRAM IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OR
+ * CONDITIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED INCLUDING, WITHOUT
+ * LIMITATION, ANY WARRANTIES OR CONDITIONS OF TITLE, NON-INFRINGEMENT,
+ * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE. Each Recipient is
+ * solely responsible for determining the appropriateness of using and
+ * distributing the Program and assumes all risks associated with its
+ * exercise of rights under this Agreement, including but not limited to
+ * the risks and costs of program errors, damage to or loss of data,
+ * programs or equipment, and unavailability or interruption of operations.
+ *
+ * DISCLAIMER OF LIABILITY
+ * NEITHER RECIPIENT NOR ANY CONTRIBUTORS SHALL HAVE ANY LIABILITY FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING WITHOUT LIMITATION LOST PROFITS), HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR
+ * TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
+ * USE OR DISTRIBUTION OF THE PROGRAM OR THE EXERCISE OF ANY RIGHTS GRANTED
+ * HEREUNDER, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGES
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
+
+#include "esas2r.h"
+
+static u8 esas2r_vdaioctl_versions[] = {
+ ATTO_VDA_VER_UNSUPPORTED,
+ ATTO_VDA_FLASH_VER,
+ ATTO_VDA_VER_UNSUPPORTED,
+ ATTO_VDA_VER_UNSUPPORTED,
+ ATTO_VDA_CLI_VER,
+ ATTO_VDA_VER_UNSUPPORTED,
+ ATTO_VDA_CFG_VER,
+ ATTO_VDA_MGT_VER,
+ ATTO_VDA_GSV_VER
+};
+
+static void clear_vda_request(struct esas2r_request *rq);
+
+static void esas2r_complete_vda_ioctl(struct esas2r_adapter *a,
+ struct esas2r_request *rq);
+
+/* Prepare a VDA IOCTL request to be sent to the firmware. */
+bool esas2r_process_vda_ioctl(struct esas2r_adapter *a,
+ struct atto_ioctl_vda *vi,
+ struct esas2r_request *rq,
+ struct esas2r_sg_context *sgc)
+{
+ u32 datalen = 0;
+ struct atto_vda_sge *firstsg = NULL;
+ u8 vercnt = (u8)ARRAY_SIZE(esas2r_vdaioctl_versions);
+
+ vi->status = ATTO_STS_SUCCESS;
+ vi->vda_status = RS_PENDING;
+
+ if (vi->function >= vercnt) {
+ vi->status = ATTO_STS_INV_FUNC;
+ return false;
+ }
+
+ if (vi->version > esas2r_vdaioctl_versions[vi->function]) {
+ vi->status = ATTO_STS_INV_VERSION;
+ return false;
+ }
+
+ if (test_bit(AF_DEGRADED_MODE, &a->flags)) {
+ vi->status = ATTO_STS_DEGRADED;
+ return false;
+ }
+
+ if (vi->function != VDA_FUNC_SCSI)
+ clear_vda_request(rq);
+
+ rq->vrq->scsi.function = vi->function;
+ rq->interrupt_cb = esas2r_complete_vda_ioctl;
+ rq->interrupt_cx = vi;
+
+ switch (vi->function) {
+ case VDA_FUNC_FLASH:
+
+ if (vi->cmd.flash.sub_func != VDA_FLASH_FREAD
+ && vi->cmd.flash.sub_func != VDA_FLASH_FWRITE
+ && vi->cmd.flash.sub_func != VDA_FLASH_FINFO) {
+ vi->status = ATTO_STS_INV_FUNC;
+ return false;
+ }
+
+ if (vi->cmd.flash.sub_func != VDA_FLASH_FINFO)
+ datalen = vi->data_length;
+
+ rq->vrq->flash.length = cpu_to_le32(datalen);
+ rq->vrq->flash.sub_func = vi->cmd.flash.sub_func;
+
+ memcpy(rq->vrq->flash.data.file.file_name,
+ vi->cmd.flash.data.file.file_name,
+ sizeof(vi->cmd.flash.data.file.file_name));
+
+ firstsg = rq->vrq->flash.data.file.sge;
+ break;
+
+ case VDA_FUNC_CLI:
+
+ datalen = vi->data_length;
+
+ rq->vrq->cli.cmd_rsp_len =
+ cpu_to_le32(vi->cmd.cli.cmd_rsp_len);
+ rq->vrq->cli.length = cpu_to_le32(datalen);
+
+ firstsg = rq->vrq->cli.sge;
+ break;
+
+ case VDA_FUNC_MGT:
+ {
+ u8 *cmdcurr_offset = sgc->cur_offset
+ - offsetof(struct atto_ioctl_vda, data)
+ + offsetof(struct atto_ioctl_vda, cmd)
+ + offsetof(struct atto_ioctl_vda_mgt_cmd,
+ data);
+ /*
+ * build the data payload SGL here first since
+ * esas2r_sgc_init() will modify the S/G list offset for the
+ * management SGL (which is built below where the data SGL is
+ * usually built).
+ */
+
+ if (vi->data_length) {
+ u32 payldlen = 0;
+
+ if (vi->cmd.mgt.mgt_func == VDAMGT_DEV_HEALTH_REQ
+ || vi->cmd.mgt.mgt_func == VDAMGT_DEV_METRICS) {
+ rq->vrq->mgt.payld_sglst_offset =
+ (u8)offsetof(struct atto_vda_mgmt_req,
+ payld_sge);
+
+ payldlen = vi->data_length;
+ datalen = vi->cmd.mgt.data_length;
+ } else if (vi->cmd.mgt.mgt_func == VDAMGT_DEV_INFO2
+ || vi->cmd.mgt.mgt_func ==
+ VDAMGT_DEV_INFO2_BYADDR) {
+ datalen = vi->data_length;
+ cmdcurr_offset = sgc->cur_offset;
+ } else {
+ vi->status = ATTO_STS_INV_PARAM;
+ return false;
+ }
+
+ /* Setup the length so building the payload SGL works */
+ rq->vrq->mgt.length = cpu_to_le32(datalen);
+
+ if (payldlen) {
+ rq->vrq->mgt.payld_length =
+ cpu_to_le32(payldlen);
+
+ esas2r_sgc_init(sgc, a, rq,
+ rq->vrq->mgt.payld_sge);
+ sgc->length = payldlen;
+
+ if (!esas2r_build_sg_list(a, rq, sgc)) {
+ vi->status = ATTO_STS_OUT_OF_RSRC;
+ return false;
+ }
+ }
+ } else {
+ datalen = vi->cmd.mgt.data_length;
+
+ rq->vrq->mgt.length = cpu_to_le32(datalen);
+ }
+
+ /*
+ * Now that the payload SGL is built, if any, setup to build
+ * the management SGL.
+ */
+ firstsg = rq->vrq->mgt.sge;
+ sgc->cur_offset = cmdcurr_offset;
+
+ /* Finish initializing the management request. */
+ rq->vrq->mgt.mgt_func = vi->cmd.mgt.mgt_func;
+ rq->vrq->mgt.scan_generation = vi->cmd.mgt.scan_generation;
+ rq->vrq->mgt.dev_index =
+ cpu_to_le32(vi->cmd.mgt.dev_index);
+
+ esas2r_nuxi_mgt_data(rq->vrq->mgt.mgt_func, &vi->cmd.mgt.data);
+ break;
+ }
+
+ case VDA_FUNC_CFG:
+
+ if (vi->data_length
+ || vi->cmd.cfg.data_length == 0) {
+ vi->status = ATTO_STS_INV_PARAM;
+ return false;
+ }
+
+ if (vi->cmd.cfg.cfg_func == VDA_CFG_INIT) {
+ vi->status = ATTO_STS_INV_FUNC;
+ return false;
+ }
+
+ rq->vrq->cfg.sub_func = vi->cmd.cfg.cfg_func;
+ rq->vrq->cfg.length = cpu_to_le32(vi->cmd.cfg.data_length);
+
+ if (vi->cmd.cfg.cfg_func == VDA_CFG_GET_INIT) {
+ memcpy(&rq->vrq->cfg.data,
+ &vi->cmd.cfg.data,
+ vi->cmd.cfg.data_length);
+
+ esas2r_nuxi_cfg_data(rq->vrq->cfg.sub_func,
+ &rq->vrq->cfg.data);
+ } else {
+ vi->status = ATTO_STS_INV_FUNC;
+
+ return false;
+ }
+
+ break;
+
+ case VDA_FUNC_GSV:
+
+ vi->cmd.gsv.rsp_len = vercnt;
+
+ memcpy(vi->cmd.gsv.version_info, esas2r_vdaioctl_versions,
+ vercnt);
+
+ vi->vda_status = RS_SUCCESS;
+ break;
+
+ default:
+
+ vi->status = ATTO_STS_INV_FUNC;
+ return false;
+ }
+
+ if (datalen) {
+ esas2r_sgc_init(sgc, a, rq, firstsg);
+ sgc->length = datalen;
+
+ if (!esas2r_build_sg_list(a, rq, sgc)) {
+ vi->status = ATTO_STS_OUT_OF_RSRC;
+ return false;
+ }
+ }
+
+ esas2r_start_request(a, rq);
+
+ return true;
+}
+
+static void esas2r_complete_vda_ioctl(struct esas2r_adapter *a,
+ struct esas2r_request *rq)
+{
+ struct atto_ioctl_vda *vi = (struct atto_ioctl_vda *)rq->interrupt_cx;
+
+ vi->vda_status = rq->req_stat;
+
+ switch (vi->function) {
+ case VDA_FUNC_FLASH:
+
+ if (vi->cmd.flash.sub_func == VDA_FLASH_FINFO
+ || vi->cmd.flash.sub_func == VDA_FLASH_FREAD)
+ vi->cmd.flash.data.file.file_size =
+ le32_to_cpu(rq->func_rsp.flash_rsp.file_size);
+
+ break;
+
+ case VDA_FUNC_MGT:
+
+ vi->cmd.mgt.scan_generation =
+ rq->func_rsp.mgt_rsp.scan_generation;
+ vi->cmd.mgt.dev_index = le16_to_cpu(
+ rq->func_rsp.mgt_rsp.dev_index);
+
+ if (vi->data_length == 0)
+ vi->cmd.mgt.data_length =
+ le32_to_cpu(rq->func_rsp.mgt_rsp.length);
+
+ esas2r_nuxi_mgt_data(rq->vrq->mgt.mgt_func, &vi->cmd.mgt.data);
+ break;
+
+ case VDA_FUNC_CFG:
+
+ if (vi->cmd.cfg.cfg_func == VDA_CFG_GET_INIT) {
+ struct atto_ioctl_vda_cfg_cmd *cfg = &vi->cmd.cfg;
+ struct atto_vda_cfg_rsp *rsp = &rq->func_rsp.cfg_rsp;
+ char buf[sizeof(cfg->data.init.fw_release) + 1];
+
+ cfg->data_length =
+ cpu_to_le32(sizeof(struct atto_vda_cfg_init));
+ cfg->data.init.vda_version =
+ le32_to_cpu(rsp->vda_version);
+ cfg->data.init.fw_build = rsp->fw_build;
+
+ snprintf(buf, sizeof(buf), "%1.1u.%2.2u",
+ (int)LOBYTE(le16_to_cpu(rsp->fw_release)),
+ (int)HIBYTE(le16_to_cpu(rsp->fw_release)));
+
+ memcpy(&cfg->data.init.fw_release, buf,
+ sizeof(cfg->data.init.fw_release));
+
+ if (LOWORD(LOBYTE(cfg->data.init.fw_build)) == 'A')
+ cfg->data.init.fw_version =
+ cfg->data.init.fw_build;
+ else
+ cfg->data.init.fw_version =
+ cfg->data.init.fw_release;
+ } else {
+ esas2r_nuxi_cfg_data(rq->vrq->cfg.sub_func,
+ &vi->cmd.cfg.data);
+ }
+
+ break;
+
+ case VDA_FUNC_CLI:
+
+ vi->cmd.cli.cmd_rsp_len =
+ le32_to_cpu(rq->func_rsp.cli_rsp.cmd_rsp_len);
+ break;
+
+ default:
+
+ break;
+ }
+}
+
+/* Build a flash VDA request. */
+void esas2r_build_flash_req(struct esas2r_adapter *a,
+ struct esas2r_request *rq,
+ u8 sub_func,
+ u8 cksum,
+ u32 addr,
+ u32 length)
+{
+ struct atto_vda_flash_req *vrq = &rq->vrq->flash;
+
+ clear_vda_request(rq);
+
+ rq->vrq->scsi.function = VDA_FUNC_FLASH;
+
+ if (sub_func == VDA_FLASH_BEGINW
+ || sub_func == VDA_FLASH_WRITE
+ || sub_func == VDA_FLASH_READ)
+ vrq->sg_list_offset = (u8)offsetof(struct atto_vda_flash_req,
+ data.sge);
+
+ vrq->length = cpu_to_le32(length);
+ vrq->flash_addr = cpu_to_le32(addr);
+ vrq->checksum = cksum;
+ vrq->sub_func = sub_func;
+}
+
+/* Build a VDA management request. */
+void esas2r_build_mgt_req(struct esas2r_adapter *a,
+ struct esas2r_request *rq,
+ u8 sub_func,
+ u8 scan_gen,
+ u16 dev_index,
+ u32 length,
+ void *data)
+{
+ struct atto_vda_mgmt_req *vrq = &rq->vrq->mgt;
+
+ clear_vda_request(rq);
+
+ rq->vrq->scsi.function = VDA_FUNC_MGT;
+
+ vrq->mgt_func = sub_func;
+ vrq->scan_generation = scan_gen;
+ vrq->dev_index = cpu_to_le16(dev_index);
+ vrq->length = cpu_to_le32(length);
+
+ if (vrq->length) {
+ if (test_bit(AF_LEGACY_SGE_MODE, &a->flags)) {
+ vrq->sg_list_offset = (u8)offsetof(
+ struct atto_vda_mgmt_req, sge);
+
+ vrq->sge[0].length = cpu_to_le32(SGE_LAST | length);
+ vrq->sge[0].address = cpu_to_le64(
+ rq->vrq_md->phys_addr +
+ sizeof(union atto_vda_req));
+ } else {
+ vrq->sg_list_offset = (u8)offsetof(
+ struct atto_vda_mgmt_req, prde);
+
+ vrq->prde[0].ctl_len = cpu_to_le32(length);
+ vrq->prde[0].address = cpu_to_le64(
+ rq->vrq_md->phys_addr +
+ sizeof(union atto_vda_req));
+ }
+ }
+
+ if (data) {
+ esas2r_nuxi_mgt_data(sub_func, data);
+
+ memcpy(&rq->vda_rsp_data->mgt_data.data.bytes[0], data,
+ length);
+ }
+}
+
+/* Build a VDA asyncronous event (AE) request. */
+void esas2r_build_ae_req(struct esas2r_adapter *a, struct esas2r_request *rq)
+{
+ struct atto_vda_ae_req *vrq = &rq->vrq->ae;
+
+ clear_vda_request(rq);
+
+ rq->vrq->scsi.function = VDA_FUNC_AE;
+
+ vrq->length = cpu_to_le32(sizeof(struct atto_vda_ae_data));
+
+ if (test_bit(AF_LEGACY_SGE_MODE, &a->flags)) {
+ vrq->sg_list_offset =
+ (u8)offsetof(struct atto_vda_ae_req, sge);
+ vrq->sge[0].length = cpu_to_le32(SGE_LAST | vrq->length);
+ vrq->sge[0].address = cpu_to_le64(
+ rq->vrq_md->phys_addr +
+ sizeof(union atto_vda_req));
+ } else {
+ vrq->sg_list_offset = (u8)offsetof(struct atto_vda_ae_req,
+ prde);
+ vrq->prde[0].ctl_len = cpu_to_le32(vrq->length);
+ vrq->prde[0].address = cpu_to_le64(
+ rq->vrq_md->phys_addr +
+ sizeof(union atto_vda_req));
+ }
+}
+
+/* Build a VDA CLI request. */
+void esas2r_build_cli_req(struct esas2r_adapter *a,
+ struct esas2r_request *rq,
+ u32 length,
+ u32 cmd_rsp_len)
+{
+ struct atto_vda_cli_req *vrq = &rq->vrq->cli;
+
+ clear_vda_request(rq);
+
+ rq->vrq->scsi.function = VDA_FUNC_CLI;
+
+ vrq->length = cpu_to_le32(length);
+ vrq->cmd_rsp_len = cpu_to_le32(cmd_rsp_len);
+ vrq->sg_list_offset = (u8)offsetof(struct atto_vda_cli_req, sge);
+}
+
+/* Build a VDA IOCTL request. */
+void esas2r_build_ioctl_req(struct esas2r_adapter *a,
+ struct esas2r_request *rq,
+ u32 length,
+ u8 sub_func)
+{
+ struct atto_vda_ioctl_req *vrq = &rq->vrq->ioctl;
+
+ clear_vda_request(rq);
+
+ rq->vrq->scsi.function = VDA_FUNC_IOCTL;
+
+ vrq->length = cpu_to_le32(length);
+ vrq->sub_func = sub_func;
+ vrq->sg_list_offset = (u8)offsetof(struct atto_vda_ioctl_req, sge);
+}
+
+/* Build a VDA configuration request. */
+void esas2r_build_cfg_req(struct esas2r_adapter *a,
+ struct esas2r_request *rq,
+ u8 sub_func,
+ u32 length,
+ void *data)
+{
+ struct atto_vda_cfg_req *vrq = &rq->vrq->cfg;
+
+ clear_vda_request(rq);
+
+ rq->vrq->scsi.function = VDA_FUNC_CFG;
+
+ vrq->sub_func = sub_func;
+ vrq->length = cpu_to_le32(length);
+
+ if (data) {
+ esas2r_nuxi_cfg_data(sub_func, data);
+
+ memcpy(&vrq->data, data, length);
+ }
+}
+
+static void clear_vda_request(struct esas2r_request *rq)
+{
+ u32 handle = rq->vrq->scsi.handle;
+
+ memset(rq->vrq, 0, sizeof(*rq->vrq));
+
+ rq->vrq->scsi.handle = handle;
+
+ rq->req_stat = RS_PENDING;
+
+ /* since the data buffer is separate clear that too */
+
+ memset(rq->data_buf, 0, ESAS2R_DATA_BUF_LEN);
+
+ /*
+ * Setup next and prev pointer in case the request is not going through
+ * esas2r_start_request().
+ */
+
+ INIT_LIST_HEAD(&rq->req_list);
+}
diff --git a/drivers/scsi/esp_scsi.c b/drivers/scsi/esp_scsi.c
new file mode 100644
index 000000000..065b25df7
--- /dev/null
+++ b/drivers/scsi/esp_scsi.c
@@ -0,0 +1,2800 @@
+/* esp_scsi.c: ESP SCSI driver.
+ *
+ * Copyright (C) 2007 David S. Miller (davem@davemloft.net)
+ */
+
+#include <linux/kernel.h>
+#include <linux/types.h>
+#include <linux/slab.h>
+#include <linux/delay.h>
+#include <linux/list.h>
+#include <linux/completion.h>
+#include <linux/kallsyms.h>
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/init.h>
+#include <linux/irqreturn.h>
+
+#include <asm/irq.h>
+#include <asm/io.h>
+#include <asm/dma.h>
+
+#include <scsi/scsi.h>
+#include <scsi/scsi_host.h>
+#include <scsi/scsi_cmnd.h>
+#include <scsi/scsi_device.h>
+#include <scsi/scsi_tcq.h>
+#include <scsi/scsi_dbg.h>
+#include <scsi/scsi_transport_spi.h>
+
+#include "esp_scsi.h"
+
+#define DRV_MODULE_NAME "esp"
+#define PFX DRV_MODULE_NAME ": "
+#define DRV_VERSION "2.000"
+#define DRV_MODULE_RELDATE "April 19, 2007"
+
+/* SCSI bus reset settle time in seconds. */
+static int esp_bus_reset_settle = 3;
+
+static u32 esp_debug;
+#define ESP_DEBUG_INTR 0x00000001
+#define ESP_DEBUG_SCSICMD 0x00000002
+#define ESP_DEBUG_RESET 0x00000004
+#define ESP_DEBUG_MSGIN 0x00000008
+#define ESP_DEBUG_MSGOUT 0x00000010
+#define ESP_DEBUG_CMDDONE 0x00000020
+#define ESP_DEBUG_DISCONNECT 0x00000040
+#define ESP_DEBUG_DATASTART 0x00000080
+#define ESP_DEBUG_DATADONE 0x00000100
+#define ESP_DEBUG_RECONNECT 0x00000200
+#define ESP_DEBUG_AUTOSENSE 0x00000400
+#define ESP_DEBUG_EVENT 0x00000800
+#define ESP_DEBUG_COMMAND 0x00001000
+
+#define esp_log_intr(f, a...) \
+do { if (esp_debug & ESP_DEBUG_INTR) \
+ shost_printk(KERN_DEBUG, esp->host, f, ## a); \
+} while (0)
+
+#define esp_log_reset(f, a...) \
+do { if (esp_debug & ESP_DEBUG_RESET) \
+ shost_printk(KERN_DEBUG, esp->host, f, ## a); \
+} while (0)
+
+#define esp_log_msgin(f, a...) \
+do { if (esp_debug & ESP_DEBUG_MSGIN) \
+ shost_printk(KERN_DEBUG, esp->host, f, ## a); \
+} while (0)
+
+#define esp_log_msgout(f, a...) \
+do { if (esp_debug & ESP_DEBUG_MSGOUT) \
+ shost_printk(KERN_DEBUG, esp->host, f, ## a); \
+} while (0)
+
+#define esp_log_cmddone(f, a...) \
+do { if (esp_debug & ESP_DEBUG_CMDDONE) \
+ shost_printk(KERN_DEBUG, esp->host, f, ## a); \
+} while (0)
+
+#define esp_log_disconnect(f, a...) \
+do { if (esp_debug & ESP_DEBUG_DISCONNECT) \
+ shost_printk(KERN_DEBUG, esp->host, f, ## a); \
+} while (0)
+
+#define esp_log_datastart(f, a...) \
+do { if (esp_debug & ESP_DEBUG_DATASTART) \
+ shost_printk(KERN_DEBUG, esp->host, f, ## a); \
+} while (0)
+
+#define esp_log_datadone(f, a...) \
+do { if (esp_debug & ESP_DEBUG_DATADONE) \
+ shost_printk(KERN_DEBUG, esp->host, f, ## a); \
+} while (0)
+
+#define esp_log_reconnect(f, a...) \
+do { if (esp_debug & ESP_DEBUG_RECONNECT) \
+ shost_printk(KERN_DEBUG, esp->host, f, ## a); \
+} while (0)
+
+#define esp_log_autosense(f, a...) \
+do { if (esp_debug & ESP_DEBUG_AUTOSENSE) \
+ shost_printk(KERN_DEBUG, esp->host, f, ## a); \
+} while (0)
+
+#define esp_log_event(f, a...) \
+do { if (esp_debug & ESP_DEBUG_EVENT) \
+ shost_printk(KERN_DEBUG, esp->host, f, ## a); \
+} while (0)
+
+#define esp_log_command(f, a...) \
+do { if (esp_debug & ESP_DEBUG_COMMAND) \
+ shost_printk(KERN_DEBUG, esp->host, f, ## a); \
+} while (0)
+
+#define esp_read8(REG) esp->ops->esp_read8(esp, REG)
+#define esp_write8(VAL,REG) esp->ops->esp_write8(esp, VAL, REG)
+
+static void esp_log_fill_regs(struct esp *esp,
+ struct esp_event_ent *p)
+{
+ p->sreg = esp->sreg;
+ p->seqreg = esp->seqreg;
+ p->sreg2 = esp->sreg2;
+ p->ireg = esp->ireg;
+ p->select_state = esp->select_state;
+ p->event = esp->event;
+}
+
+void scsi_esp_cmd(struct esp *esp, u8 val)
+{
+ struct esp_event_ent *p;
+ int idx = esp->esp_event_cur;
+
+ p = &esp->esp_event_log[idx];
+ p->type = ESP_EVENT_TYPE_CMD;
+ p->val = val;
+ esp_log_fill_regs(esp, p);
+
+ esp->esp_event_cur = (idx + 1) & (ESP_EVENT_LOG_SZ - 1);
+
+ esp_log_command("cmd[%02x]\n", val);
+ esp_write8(val, ESP_CMD);
+}
+EXPORT_SYMBOL(scsi_esp_cmd);
+
+static void esp_send_dma_cmd(struct esp *esp, int len, int max_len, int cmd)
+{
+ if (esp->flags & ESP_FLAG_USE_FIFO) {
+ int i;
+
+ scsi_esp_cmd(esp, ESP_CMD_FLUSH);
+ for (i = 0; i < len; i++)
+ esp_write8(esp->command_block[i], ESP_FDATA);
+ scsi_esp_cmd(esp, cmd);
+ } else {
+ if (esp->rev == FASHME)
+ scsi_esp_cmd(esp, ESP_CMD_FLUSH);
+ cmd |= ESP_CMD_DMA;
+ esp->ops->send_dma_cmd(esp, esp->command_block_dma,
+ len, max_len, 0, cmd);
+ }
+}
+
+static void esp_event(struct esp *esp, u8 val)
+{
+ struct esp_event_ent *p;
+ int idx = esp->esp_event_cur;
+
+ p = &esp->esp_event_log[idx];
+ p->type = ESP_EVENT_TYPE_EVENT;
+ p->val = val;
+ esp_log_fill_regs(esp, p);
+
+ esp->esp_event_cur = (idx + 1) & (ESP_EVENT_LOG_SZ - 1);
+
+ esp->event = val;
+}
+
+static void esp_dump_cmd_log(struct esp *esp)
+{
+ int idx = esp->esp_event_cur;
+ int stop = idx;
+
+ shost_printk(KERN_INFO, esp->host, "Dumping command log\n");
+ do {
+ struct esp_event_ent *p = &esp->esp_event_log[idx];
+
+ shost_printk(KERN_INFO, esp->host,
+ "ent[%d] %s val[%02x] sreg[%02x] seqreg[%02x] "
+ "sreg2[%02x] ireg[%02x] ss[%02x] event[%02x]\n",
+ idx,
+ p->type == ESP_EVENT_TYPE_CMD ? "CMD" : "EVENT",
+ p->val, p->sreg, p->seqreg,
+ p->sreg2, p->ireg, p->select_state, p->event);
+
+ idx = (idx + 1) & (ESP_EVENT_LOG_SZ - 1);
+ } while (idx != stop);
+}
+
+static void esp_flush_fifo(struct esp *esp)
+{
+ scsi_esp_cmd(esp, ESP_CMD_FLUSH);
+ if (esp->rev == ESP236) {
+ int lim = 1000;
+
+ while (esp_read8(ESP_FFLAGS) & ESP_FF_FBYTES) {
+ if (--lim == 0) {
+ shost_printk(KERN_ALERT, esp->host,
+ "ESP_FF_BYTES will not clear!\n");
+ break;
+ }
+ udelay(1);
+ }
+ }
+}
+
+static void hme_read_fifo(struct esp *esp)
+{
+ int fcnt = esp_read8(ESP_FFLAGS) & ESP_FF_FBYTES;
+ int idx = 0;
+
+ while (fcnt--) {
+ esp->fifo[idx++] = esp_read8(ESP_FDATA);
+ esp->fifo[idx++] = esp_read8(ESP_FDATA);
+ }
+ if (esp->sreg2 & ESP_STAT2_F1BYTE) {
+ esp_write8(0, ESP_FDATA);
+ esp->fifo[idx++] = esp_read8(ESP_FDATA);
+ scsi_esp_cmd(esp, ESP_CMD_FLUSH);
+ }
+ esp->fifo_cnt = idx;
+}
+
+static void esp_set_all_config3(struct esp *esp, u8 val)
+{
+ int i;
+
+ for (i = 0; i < ESP_MAX_TARGET; i++)
+ esp->target[i].esp_config3 = val;
+}
+
+/* Reset the ESP chip, _not_ the SCSI bus. */
+static void esp_reset_esp(struct esp *esp)
+{
+ u8 family_code, version;
+
+ /* Now reset the ESP chip */
+ scsi_esp_cmd(esp, ESP_CMD_RC);
+ scsi_esp_cmd(esp, ESP_CMD_NULL | ESP_CMD_DMA);
+ if (esp->rev == FAST)
+ esp_write8(ESP_CONFIG2_FENAB, ESP_CFG2);
+ scsi_esp_cmd(esp, ESP_CMD_NULL | ESP_CMD_DMA);
+
+ /* This is the only point at which it is reliable to read
+ * the ID-code for a fast ESP chip variants.
+ */
+ esp->max_period = ((35 * esp->ccycle) / 1000);
+ if (esp->rev == FAST) {
+ version = esp_read8(ESP_UID);
+ family_code = (version & 0xf8) >> 3;
+ if (family_code == 0x02)
+ esp->rev = FAS236;
+ else if (family_code == 0x0a)
+ esp->rev = FASHME; /* Version is usually '5'. */
+ else
+ esp->rev = FAS100A;
+ esp->min_period = ((4 * esp->ccycle) / 1000);
+ } else {
+ esp->min_period = ((5 * esp->ccycle) / 1000);
+ }
+ if (esp->rev == FAS236) {
+ /*
+ * The AM53c974 chip returns the same ID as FAS236;
+ * try to configure glitch eater.
+ */
+ u8 config4 = ESP_CONFIG4_GE1;
+ esp_write8(config4, ESP_CFG4);
+ config4 = esp_read8(ESP_CFG4);
+ if (config4 & ESP_CONFIG4_GE1) {
+ esp->rev = PCSCSI;
+ esp_write8(esp->config4, ESP_CFG4);
+ }
+ }
+ esp->max_period = (esp->max_period + 3)>>2;
+ esp->min_period = (esp->min_period + 3)>>2;
+
+ esp_write8(esp->config1, ESP_CFG1);
+ switch (esp->rev) {
+ case ESP100:
+ /* nothing to do */
+ break;
+
+ case ESP100A:
+ esp_write8(esp->config2, ESP_CFG2);
+ break;
+
+ case ESP236:
+ /* Slow 236 */
+ esp_write8(esp->config2, ESP_CFG2);
+ esp->prev_cfg3 = esp->target[0].esp_config3;
+ esp_write8(esp->prev_cfg3, ESP_CFG3);
+ break;
+
+ case FASHME:
+ esp->config2 |= (ESP_CONFIG2_HME32 | ESP_CONFIG2_HMEFENAB);
+ /* fallthrough... */
+
+ case FAS236:
+ case PCSCSI:
+ /* Fast 236, AM53c974 or HME */
+ esp_write8(esp->config2, ESP_CFG2);
+ if (esp->rev == FASHME) {
+ u8 cfg3 = esp->target[0].esp_config3;
+
+ cfg3 |= ESP_CONFIG3_FCLOCK | ESP_CONFIG3_OBPUSH;
+ if (esp->scsi_id >= 8)
+ cfg3 |= ESP_CONFIG3_IDBIT3;
+ esp_set_all_config3(esp, cfg3);
+ } else {
+ u32 cfg3 = esp->target[0].esp_config3;
+
+ cfg3 |= ESP_CONFIG3_FCLK;
+ esp_set_all_config3(esp, cfg3);
+ }
+ esp->prev_cfg3 = esp->target[0].esp_config3;
+ esp_write8(esp->prev_cfg3, ESP_CFG3);
+ if (esp->rev == FASHME) {
+ esp->radelay = 80;
+ } else {
+ if (esp->flags & ESP_FLAG_DIFFERENTIAL)
+ esp->radelay = 0;
+ else
+ esp->radelay = 96;
+ }
+ break;
+
+ case FAS100A:
+ /* Fast 100a */
+ esp_write8(esp->config2, ESP_CFG2);
+ esp_set_all_config3(esp,
+ (esp->target[0].esp_config3 |
+ ESP_CONFIG3_FCLOCK));
+ esp->prev_cfg3 = esp->target[0].esp_config3;
+ esp_write8(esp->prev_cfg3, ESP_CFG3);
+ esp->radelay = 32;
+ break;
+
+ default:
+ break;
+ }
+
+ /* Reload the configuration registers */
+ esp_write8(esp->cfact, ESP_CFACT);
+
+ esp->prev_stp = 0;
+ esp_write8(esp->prev_stp, ESP_STP);
+
+ esp->prev_soff = 0;
+ esp_write8(esp->prev_soff, ESP_SOFF);
+
+ esp_write8(esp->neg_defp, ESP_TIMEO);
+
+ /* Eat any bitrot in the chip */
+ esp_read8(ESP_INTRPT);
+ udelay(100);
+}
+
+static void esp_map_dma(struct esp *esp, struct scsi_cmnd *cmd)
+{
+ struct esp_cmd_priv *spriv = ESP_CMD_PRIV(cmd);
+ struct scatterlist *sg = scsi_sglist(cmd);
+ int dir = cmd->sc_data_direction;
+ int total, i;
+
+ if (dir == DMA_NONE)
+ return;
+
+ spriv->u.num_sg = esp->ops->map_sg(esp, sg, scsi_sg_count(cmd), dir);
+ spriv->cur_residue = sg_dma_len(sg);
+ spriv->cur_sg = sg;
+
+ total = 0;
+ for (i = 0; i < spriv->u.num_sg; i++)
+ total += sg_dma_len(&sg[i]);
+ spriv->tot_residue = total;
+}
+
+static dma_addr_t esp_cur_dma_addr(struct esp_cmd_entry *ent,
+ struct scsi_cmnd *cmd)
+{
+ struct esp_cmd_priv *p = ESP_CMD_PRIV(cmd);
+
+ if (ent->flags & ESP_CMD_FLAG_AUTOSENSE) {
+ return ent->sense_dma +
+ (ent->sense_ptr - cmd->sense_buffer);
+ }
+
+ return sg_dma_address(p->cur_sg) +
+ (sg_dma_len(p->cur_sg) -
+ p->cur_residue);
+}
+
+static unsigned int esp_cur_dma_len(struct esp_cmd_entry *ent,
+ struct scsi_cmnd *cmd)
+{
+ struct esp_cmd_priv *p = ESP_CMD_PRIV(cmd);
+
+ if (ent->flags & ESP_CMD_FLAG_AUTOSENSE) {
+ return SCSI_SENSE_BUFFERSIZE -
+ (ent->sense_ptr - cmd->sense_buffer);
+ }
+ return p->cur_residue;
+}
+
+static void esp_advance_dma(struct esp *esp, struct esp_cmd_entry *ent,
+ struct scsi_cmnd *cmd, unsigned int len)
+{
+ struct esp_cmd_priv *p = ESP_CMD_PRIV(cmd);
+
+ if (ent->flags & ESP_CMD_FLAG_AUTOSENSE) {
+ ent->sense_ptr += len;
+ return;
+ }
+
+ p->cur_residue -= len;
+ p->tot_residue -= len;
+ if (p->cur_residue < 0 || p->tot_residue < 0) {
+ shost_printk(KERN_ERR, esp->host,
+ "Data transfer overflow.\n");
+ shost_printk(KERN_ERR, esp->host,
+ "cur_residue[%d] tot_residue[%d] len[%u]\n",
+ p->cur_residue, p->tot_residue, len);
+ p->cur_residue = 0;
+ p->tot_residue = 0;
+ }
+ if (!p->cur_residue && p->tot_residue) {
+ p->cur_sg++;
+ p->cur_residue = sg_dma_len(p->cur_sg);
+ }
+}
+
+static void esp_unmap_dma(struct esp *esp, struct scsi_cmnd *cmd)
+{
+ struct esp_cmd_priv *spriv = ESP_CMD_PRIV(cmd);
+ int dir = cmd->sc_data_direction;
+
+ if (dir == DMA_NONE)
+ return;
+
+ esp->ops->unmap_sg(esp, scsi_sglist(cmd), spriv->u.num_sg, dir);
+}
+
+static void esp_save_pointers(struct esp *esp, struct esp_cmd_entry *ent)
+{
+ struct scsi_cmnd *cmd = ent->cmd;
+ struct esp_cmd_priv *spriv = ESP_CMD_PRIV(cmd);
+
+ if (ent->flags & ESP_CMD_FLAG_AUTOSENSE) {
+ ent->saved_sense_ptr = ent->sense_ptr;
+ return;
+ }
+ ent->saved_cur_residue = spriv->cur_residue;
+ ent->saved_cur_sg = spriv->cur_sg;
+ ent->saved_tot_residue = spriv->tot_residue;
+}
+
+static void esp_restore_pointers(struct esp *esp, struct esp_cmd_entry *ent)
+{
+ struct scsi_cmnd *cmd = ent->cmd;
+ struct esp_cmd_priv *spriv = ESP_CMD_PRIV(cmd);
+
+ if (ent->flags & ESP_CMD_FLAG_AUTOSENSE) {
+ ent->sense_ptr = ent->saved_sense_ptr;
+ return;
+ }
+ spriv->cur_residue = ent->saved_cur_residue;
+ spriv->cur_sg = ent->saved_cur_sg;
+ spriv->tot_residue = ent->saved_tot_residue;
+}
+
+static void esp_check_command_len(struct esp *esp, struct scsi_cmnd *cmd)
+{
+ if (cmd->cmd_len == 6 ||
+ cmd->cmd_len == 10 ||
+ cmd->cmd_len == 12) {
+ esp->flags &= ~ESP_FLAG_DOING_SLOWCMD;
+ } else {
+ esp->flags |= ESP_FLAG_DOING_SLOWCMD;
+ }
+}
+
+static void esp_write_tgt_config3(struct esp *esp, int tgt)
+{
+ if (esp->rev > ESP100A) {
+ u8 val = esp->target[tgt].esp_config3;
+
+ if (val != esp->prev_cfg3) {
+ esp->prev_cfg3 = val;
+ esp_write8(val, ESP_CFG3);
+ }
+ }
+}
+
+static void esp_write_tgt_sync(struct esp *esp, int tgt)
+{
+ u8 off = esp->target[tgt].esp_offset;
+ u8 per = esp->target[tgt].esp_period;
+
+ if (off != esp->prev_soff) {
+ esp->prev_soff = off;
+ esp_write8(off, ESP_SOFF);
+ }
+ if (per != esp->prev_stp) {
+ esp->prev_stp = per;
+ esp_write8(per, ESP_STP);
+ }
+}
+
+static u32 esp_dma_length_limit(struct esp *esp, u32 dma_addr, u32 dma_len)
+{
+ if (esp->rev == FASHME) {
+ /* Arbitrary segment boundaries, 24-bit counts. */
+ if (dma_len > (1U << 24))
+ dma_len = (1U << 24);
+ } else {
+ u32 base, end;
+
+ /* ESP chip limits other variants by 16-bits of transfer
+ * count. Actually on FAS100A and FAS236 we could get
+ * 24-bits of transfer count by enabling ESP_CONFIG2_FENAB
+ * in the ESP_CFG2 register but that causes other unwanted
+ * changes so we don't use it currently.
+ */
+ if (dma_len > (1U << 16))
+ dma_len = (1U << 16);
+
+ /* All of the DMA variants hooked up to these chips
+ * cannot handle crossing a 24-bit address boundary.
+ */
+ base = dma_addr & ((1U << 24) - 1U);
+ end = base + dma_len;
+ if (end > (1U << 24))
+ end = (1U <<24);
+ dma_len = end - base;
+ }
+ return dma_len;
+}
+
+static int esp_need_to_nego_wide(struct esp_target_data *tp)
+{
+ struct scsi_target *target = tp->starget;
+
+ return spi_width(target) != tp->nego_goal_width;
+}
+
+static int esp_need_to_nego_sync(struct esp_target_data *tp)
+{
+ struct scsi_target *target = tp->starget;
+
+ /* When offset is zero, period is "don't care". */
+ if (!spi_offset(target) && !tp->nego_goal_offset)
+ return 0;
+
+ if (spi_offset(target) == tp->nego_goal_offset &&
+ spi_period(target) == tp->nego_goal_period)
+ return 0;
+
+ return 1;
+}
+
+static int esp_alloc_lun_tag(struct esp_cmd_entry *ent,
+ struct esp_lun_data *lp)
+{
+ if (!ent->orig_tag[0]) {
+ /* Non-tagged, slot already taken? */
+ if (lp->non_tagged_cmd)
+ return -EBUSY;
+
+ if (lp->hold) {
+ /* We are being held by active tagged
+ * commands.
+ */
+ if (lp->num_tagged)
+ return -EBUSY;
+
+ /* Tagged commands completed, we can unplug
+ * the queue and run this untagged command.
+ */
+ lp->hold = 0;
+ } else if (lp->num_tagged) {
+ /* Plug the queue until num_tagged decreases
+ * to zero in esp_free_lun_tag.
+ */
+ lp->hold = 1;
+ return -EBUSY;
+ }
+
+ lp->non_tagged_cmd = ent;
+ return 0;
+ } else {
+ /* Tagged command, see if blocked by a
+ * non-tagged one.
+ */
+ if (lp->non_tagged_cmd || lp->hold)
+ return -EBUSY;
+ }
+
+ BUG_ON(lp->tagged_cmds[ent->orig_tag[1]]);
+
+ lp->tagged_cmds[ent->orig_tag[1]] = ent;
+ lp->num_tagged++;
+
+ return 0;
+}
+
+static void esp_free_lun_tag(struct esp_cmd_entry *ent,
+ struct esp_lun_data *lp)
+{
+ if (ent->orig_tag[0]) {
+ BUG_ON(lp->tagged_cmds[ent->orig_tag[1]] != ent);
+ lp->tagged_cmds[ent->orig_tag[1]] = NULL;
+ lp->num_tagged--;
+ } else {
+ BUG_ON(lp->non_tagged_cmd != ent);
+ lp->non_tagged_cmd = NULL;
+ }
+}
+
+/* When a contingent allegiance conditon is created, we force feed a
+ * REQUEST_SENSE command to the device to fetch the sense data. I
+ * tried many other schemes, relying on the scsi error handling layer
+ * to send out the REQUEST_SENSE automatically, but this was difficult
+ * to get right especially in the presence of applications like smartd
+ * which use SG_IO to send out their own REQUEST_SENSE commands.
+ */
+static void esp_autosense(struct esp *esp, struct esp_cmd_entry *ent)
+{
+ struct scsi_cmnd *cmd = ent->cmd;
+ struct scsi_device *dev = cmd->device;
+ int tgt, lun;
+ u8 *p, val;
+
+ tgt = dev->id;
+ lun = dev->lun;
+
+
+ if (!ent->sense_ptr) {
+ esp_log_autosense("Doing auto-sense for tgt[%d] lun[%d]\n",
+ tgt, lun);
+
+ ent->sense_ptr = cmd->sense_buffer;
+ ent->sense_dma = esp->ops->map_single(esp,
+ ent->sense_ptr,
+ SCSI_SENSE_BUFFERSIZE,
+ DMA_FROM_DEVICE);
+ }
+ ent->saved_sense_ptr = ent->sense_ptr;
+
+ esp->active_cmd = ent;
+
+ p = esp->command_block;
+ esp->msg_out_len = 0;
+
+ *p++ = IDENTIFY(0, lun);
+ *p++ = REQUEST_SENSE;
+ *p++ = ((dev->scsi_level <= SCSI_2) ?
+ (lun << 5) : 0);
+ *p++ = 0;
+ *p++ = 0;
+ *p++ = SCSI_SENSE_BUFFERSIZE;
+ *p++ = 0;
+
+ esp->select_state = ESP_SELECT_BASIC;
+
+ val = tgt;
+ if (esp->rev == FASHME)
+ val |= ESP_BUSID_RESELID | ESP_BUSID_CTR32BIT;
+ esp_write8(val, ESP_BUSID);
+
+ esp_write_tgt_sync(esp, tgt);
+ esp_write_tgt_config3(esp, tgt);
+
+ val = (p - esp->command_block);
+
+ esp_send_dma_cmd(esp, val, 16, ESP_CMD_SELA);
+}
+
+static struct esp_cmd_entry *find_and_prep_issuable_command(struct esp *esp)
+{
+ struct esp_cmd_entry *ent;
+
+ list_for_each_entry(ent, &esp->queued_cmds, list) {
+ struct scsi_cmnd *cmd = ent->cmd;
+ struct scsi_device *dev = cmd->device;
+ struct esp_lun_data *lp = dev->hostdata;
+
+ if (ent->flags & ESP_CMD_FLAG_AUTOSENSE) {
+ ent->tag[0] = 0;
+ ent->tag[1] = 0;
+ return ent;
+ }
+
+ if (!spi_populate_tag_msg(&ent->tag[0], cmd)) {
+ ent->tag[0] = 0;
+ ent->tag[1] = 0;
+ }
+ ent->orig_tag[0] = ent->tag[0];
+ ent->orig_tag[1] = ent->tag[1];
+
+ if (esp_alloc_lun_tag(ent, lp) < 0)
+ continue;
+
+ return ent;
+ }
+
+ return NULL;
+}
+
+static void esp_maybe_execute_command(struct esp *esp)
+{
+ struct esp_target_data *tp;
+ struct esp_lun_data *lp;
+ struct scsi_device *dev;
+ struct scsi_cmnd *cmd;
+ struct esp_cmd_entry *ent;
+ int tgt, lun, i;
+ u32 val, start_cmd;
+ u8 *p;
+
+ if (esp->active_cmd ||
+ (esp->flags & ESP_FLAG_RESETTING))
+ return;
+
+ ent = find_and_prep_issuable_command(esp);
+ if (!ent)
+ return;
+
+ if (ent->flags & ESP_CMD_FLAG_AUTOSENSE) {
+ esp_autosense(esp, ent);
+ return;
+ }
+
+ cmd = ent->cmd;
+ dev = cmd->device;
+ tgt = dev->id;
+ lun = dev->lun;
+ tp = &esp->target[tgt];
+ lp = dev->hostdata;
+
+ list_move(&ent->list, &esp->active_cmds);
+
+ esp->active_cmd = ent;
+
+ esp_map_dma(esp, cmd);
+ esp_save_pointers(esp, ent);
+
+ esp_check_command_len(esp, cmd);
+
+ p = esp->command_block;
+
+ esp->msg_out_len = 0;
+ if (tp->flags & ESP_TGT_CHECK_NEGO) {
+ /* Need to negotiate. If the target is broken
+ * go for synchronous transfers and non-wide.
+ */
+ if (tp->flags & ESP_TGT_BROKEN) {
+ tp->flags &= ~ESP_TGT_DISCONNECT;
+ tp->nego_goal_period = 0;
+ tp->nego_goal_offset = 0;
+ tp->nego_goal_width = 0;
+ tp->nego_goal_tags = 0;
+ }
+
+ /* If the settings are not changing, skip this. */
+ if (spi_width(tp->starget) == tp->nego_goal_width &&
+ spi_period(tp->starget) == tp->nego_goal_period &&
+ spi_offset(tp->starget) == tp->nego_goal_offset) {
+ tp->flags &= ~ESP_TGT_CHECK_NEGO;
+ goto build_identify;
+ }
+
+ if (esp->rev == FASHME && esp_need_to_nego_wide(tp)) {
+ esp->msg_out_len =
+ spi_populate_width_msg(&esp->msg_out[0],
+ (tp->nego_goal_width ?
+ 1 : 0));
+ tp->flags |= ESP_TGT_NEGO_WIDE;
+ } else if (esp_need_to_nego_sync(tp)) {
+ esp->msg_out_len =
+ spi_populate_sync_msg(&esp->msg_out[0],
+ tp->nego_goal_period,
+ tp->nego_goal_offset);
+ tp->flags |= ESP_TGT_NEGO_SYNC;
+ } else {
+ tp->flags &= ~ESP_TGT_CHECK_NEGO;
+ }
+
+ /* Process it like a slow command. */
+ if (tp->flags & (ESP_TGT_NEGO_WIDE | ESP_TGT_NEGO_SYNC))
+ esp->flags |= ESP_FLAG_DOING_SLOWCMD;
+ }
+
+build_identify:
+ /* If we don't have a lun-data struct yet, we're probing
+ * so do not disconnect. Also, do not disconnect unless
+ * we have a tag on this command.
+ */
+ if (lp && (tp->flags & ESP_TGT_DISCONNECT) && ent->tag[0])
+ *p++ = IDENTIFY(1, lun);
+ else
+ *p++ = IDENTIFY(0, lun);
+
+ if (ent->tag[0] && esp->rev == ESP100) {
+ /* ESP100 lacks select w/atn3 command, use select
+ * and stop instead.
+ */
+ esp->flags |= ESP_FLAG_DOING_SLOWCMD;
+ }
+
+ if (!(esp->flags & ESP_FLAG_DOING_SLOWCMD)) {
+ start_cmd = ESP_CMD_SELA;
+ if (ent->tag[0]) {
+ *p++ = ent->tag[0];
+ *p++ = ent->tag[1];
+
+ start_cmd = ESP_CMD_SA3;
+ }
+
+ for (i = 0; i < cmd->cmd_len; i++)
+ *p++ = cmd->cmnd[i];
+
+ esp->select_state = ESP_SELECT_BASIC;
+ } else {
+ esp->cmd_bytes_left = cmd->cmd_len;
+ esp->cmd_bytes_ptr = &cmd->cmnd[0];
+
+ if (ent->tag[0]) {
+ for (i = esp->msg_out_len - 1;
+ i >= 0; i--)
+ esp->msg_out[i + 2] = esp->msg_out[i];
+ esp->msg_out[0] = ent->tag[0];
+ esp->msg_out[1] = ent->tag[1];
+ esp->msg_out_len += 2;
+ }
+
+ start_cmd = ESP_CMD_SELAS;
+ esp->select_state = ESP_SELECT_MSGOUT;
+ }
+ val = tgt;
+ if (esp->rev == FASHME)
+ val |= ESP_BUSID_RESELID | ESP_BUSID_CTR32BIT;
+ esp_write8(val, ESP_BUSID);
+
+ esp_write_tgt_sync(esp, tgt);
+ esp_write_tgt_config3(esp, tgt);
+
+ val = (p - esp->command_block);
+
+ if (esp_debug & ESP_DEBUG_SCSICMD) {
+ printk("ESP: tgt[%d] lun[%d] scsi_cmd [ ", tgt, lun);
+ for (i = 0; i < cmd->cmd_len; i++)
+ printk("%02x ", cmd->cmnd[i]);
+ printk("]\n");
+ }
+
+ esp_send_dma_cmd(esp, val, 16, start_cmd);
+}
+
+static struct esp_cmd_entry *esp_get_ent(struct esp *esp)
+{
+ struct list_head *head = &esp->esp_cmd_pool;
+ struct esp_cmd_entry *ret;
+
+ if (list_empty(head)) {
+ ret = kzalloc(sizeof(struct esp_cmd_entry), GFP_ATOMIC);
+ } else {
+ ret = list_entry(head->next, struct esp_cmd_entry, list);
+ list_del(&ret->list);
+ memset(ret, 0, sizeof(*ret));
+ }
+ return ret;
+}
+
+static void esp_put_ent(struct esp *esp, struct esp_cmd_entry *ent)
+{
+ list_add(&ent->list, &esp->esp_cmd_pool);
+}
+
+static void esp_cmd_is_done(struct esp *esp, struct esp_cmd_entry *ent,
+ struct scsi_cmnd *cmd, unsigned int result)
+{
+ struct scsi_device *dev = cmd->device;
+ int tgt = dev->id;
+ int lun = dev->lun;
+
+ esp->active_cmd = NULL;
+ esp_unmap_dma(esp, cmd);
+ esp_free_lun_tag(ent, dev->hostdata);
+ cmd->result = result;
+
+ if (ent->eh_done) {
+ complete(ent->eh_done);
+ ent->eh_done = NULL;
+ }
+
+ if (ent->flags & ESP_CMD_FLAG_AUTOSENSE) {
+ esp->ops->unmap_single(esp, ent->sense_dma,
+ SCSI_SENSE_BUFFERSIZE, DMA_FROM_DEVICE);
+ ent->sense_ptr = NULL;
+
+ /* Restore the message/status bytes to what we actually
+ * saw originally. Also, report that we are providing
+ * the sense data.
+ */
+ cmd->result = ((DRIVER_SENSE << 24) |
+ (DID_OK << 16) |
+ (COMMAND_COMPLETE << 8) |
+ (SAM_STAT_CHECK_CONDITION << 0));
+
+ ent->flags &= ~ESP_CMD_FLAG_AUTOSENSE;
+ if (esp_debug & ESP_DEBUG_AUTOSENSE) {
+ int i;
+
+ printk("esp%d: tgt[%d] lun[%d] AUTO SENSE[ ",
+ esp->host->unique_id, tgt, lun);
+ for (i = 0; i < 18; i++)
+ printk("%02x ", cmd->sense_buffer[i]);
+ printk("]\n");
+ }
+ }
+
+ cmd->scsi_done(cmd);
+
+ list_del(&ent->list);
+ esp_put_ent(esp, ent);
+
+ esp_maybe_execute_command(esp);
+}
+
+static unsigned int compose_result(unsigned int status, unsigned int message,
+ unsigned int driver_code)
+{
+ return (status | (message << 8) | (driver_code << 16));
+}
+
+static void esp_event_queue_full(struct esp *esp, struct esp_cmd_entry *ent)
+{
+ struct scsi_device *dev = ent->cmd->device;
+ struct esp_lun_data *lp = dev->hostdata;
+
+ scsi_track_queue_full(dev, lp->num_tagged - 1);
+}
+
+static int esp_queuecommand_lck(struct scsi_cmnd *cmd, void (*done)(struct scsi_cmnd *))
+{
+ struct scsi_device *dev = cmd->device;
+ struct esp *esp = shost_priv(dev->host);
+ struct esp_cmd_priv *spriv;
+ struct esp_cmd_entry *ent;
+
+ ent = esp_get_ent(esp);
+ if (!ent)
+ return SCSI_MLQUEUE_HOST_BUSY;
+
+ ent->cmd = cmd;
+
+ cmd->scsi_done = done;
+
+ spriv = ESP_CMD_PRIV(cmd);
+ spriv->u.dma_addr = ~(dma_addr_t)0x0;
+
+ list_add_tail(&ent->list, &esp->queued_cmds);
+
+ esp_maybe_execute_command(esp);
+
+ return 0;
+}
+
+static DEF_SCSI_QCMD(esp_queuecommand)
+
+static int esp_check_gross_error(struct esp *esp)
+{
+ if (esp->sreg & ESP_STAT_SPAM) {
+ /* Gross Error, could be one of:
+ * - top of fifo overwritten
+ * - top of command register overwritten
+ * - DMA programmed with wrong direction
+ * - improper phase change
+ */
+ shost_printk(KERN_ERR, esp->host,
+ "Gross error sreg[%02x]\n", esp->sreg);
+ /* XXX Reset the chip. XXX */
+ return 1;
+ }
+ return 0;
+}
+
+static int esp_check_spur_intr(struct esp *esp)
+{
+ switch (esp->rev) {
+ case ESP100:
+ case ESP100A:
+ /* The interrupt pending bit of the status register cannot
+ * be trusted on these revisions.
+ */
+ esp->sreg &= ~ESP_STAT_INTR;
+ break;
+
+ default:
+ if (!(esp->sreg & ESP_STAT_INTR)) {
+ if (esp->ireg & ESP_INTR_SR)
+ return 1;
+
+ /* If the DMA is indicating interrupt pending and the
+ * ESP is not, the only possibility is a DMA error.
+ */
+ if (!esp->ops->dma_error(esp)) {
+ shost_printk(KERN_ERR, esp->host,
+ "Spurious irq, sreg=%02x.\n",
+ esp->sreg);
+ return -1;
+ }
+
+ shost_printk(KERN_ERR, esp->host, "DMA error\n");
+
+ /* XXX Reset the chip. XXX */
+ return -1;
+ }
+ break;
+ }
+
+ return 0;
+}
+
+static void esp_schedule_reset(struct esp *esp)
+{
+ esp_log_reset("esp_schedule_reset() from %pf\n",
+ __builtin_return_address(0));
+ esp->flags |= ESP_FLAG_RESETTING;
+ esp_event(esp, ESP_EVENT_RESET);
+}
+
+/* In order to avoid having to add a special half-reconnected state
+ * into the driver we just sit here and poll through the rest of
+ * the reselection process to get the tag message bytes.
+ */
+static struct esp_cmd_entry *esp_reconnect_with_tag(struct esp *esp,
+ struct esp_lun_data *lp)
+{
+ struct esp_cmd_entry *ent;
+ int i;
+
+ if (!lp->num_tagged) {
+ shost_printk(KERN_ERR, esp->host,
+ "Reconnect w/num_tagged==0\n");
+ return NULL;
+ }
+
+ esp_log_reconnect("reconnect tag, ");
+
+ for (i = 0; i < ESP_QUICKIRQ_LIMIT; i++) {
+ if (esp->ops->irq_pending(esp))
+ break;
+ }
+ if (i == ESP_QUICKIRQ_LIMIT) {
+ shost_printk(KERN_ERR, esp->host,
+ "Reconnect IRQ1 timeout\n");
+ return NULL;
+ }
+
+ esp->sreg = esp_read8(ESP_STATUS);
+ esp->ireg = esp_read8(ESP_INTRPT);
+
+ esp_log_reconnect("IRQ(%d:%x:%x), ",
+ i, esp->ireg, esp->sreg);
+
+ if (esp->ireg & ESP_INTR_DC) {
+ shost_printk(KERN_ERR, esp->host,
+ "Reconnect, got disconnect.\n");
+ return NULL;
+ }
+
+ if ((esp->sreg & ESP_STAT_PMASK) != ESP_MIP) {
+ shost_printk(KERN_ERR, esp->host,
+ "Reconnect, not MIP sreg[%02x].\n", esp->sreg);
+ return NULL;
+ }
+
+ /* DMA in the tag bytes... */
+ esp->command_block[0] = 0xff;
+ esp->command_block[1] = 0xff;
+ esp->ops->send_dma_cmd(esp, esp->command_block_dma,
+ 2, 2, 1, ESP_CMD_DMA | ESP_CMD_TI);
+
+ /* ACK the message. */
+ scsi_esp_cmd(esp, ESP_CMD_MOK);
+
+ for (i = 0; i < ESP_RESELECT_TAG_LIMIT; i++) {
+ if (esp->ops->irq_pending(esp)) {
+ esp->sreg = esp_read8(ESP_STATUS);
+ esp->ireg = esp_read8(ESP_INTRPT);
+ if (esp->ireg & ESP_INTR_FDONE)
+ break;
+ }
+ udelay(1);
+ }
+ if (i == ESP_RESELECT_TAG_LIMIT) {
+ shost_printk(KERN_ERR, esp->host, "Reconnect IRQ2 timeout\n");
+ return NULL;
+ }
+ esp->ops->dma_drain(esp);
+ esp->ops->dma_invalidate(esp);
+
+ esp_log_reconnect("IRQ2(%d:%x:%x) tag[%x:%x]\n",
+ i, esp->ireg, esp->sreg,
+ esp->command_block[0],
+ esp->command_block[1]);
+
+ if (esp->command_block[0] < SIMPLE_QUEUE_TAG ||
+ esp->command_block[0] > ORDERED_QUEUE_TAG) {
+ shost_printk(KERN_ERR, esp->host,
+ "Reconnect, bad tag type %02x.\n",
+ esp->command_block[0]);
+ return NULL;
+ }
+
+ ent = lp->tagged_cmds[esp->command_block[1]];
+ if (!ent) {
+ shost_printk(KERN_ERR, esp->host,
+ "Reconnect, no entry for tag %02x.\n",
+ esp->command_block[1]);
+ return NULL;
+ }
+
+ return ent;
+}
+
+static int esp_reconnect(struct esp *esp)
+{
+ struct esp_cmd_entry *ent;
+ struct esp_target_data *tp;
+ struct esp_lun_data *lp;
+ struct scsi_device *dev;
+ int target, lun;
+
+ BUG_ON(esp->active_cmd);
+ if (esp->rev == FASHME) {
+ /* FASHME puts the target and lun numbers directly
+ * into the fifo.
+ */
+ target = esp->fifo[0];
+ lun = esp->fifo[1] & 0x7;
+ } else {
+ u8 bits = esp_read8(ESP_FDATA);
+
+ /* Older chips put the lun directly into the fifo, but
+ * the target is given as a sample of the arbitration
+ * lines on the bus at reselection time. So we should
+ * see the ID of the ESP and the one reconnecting target
+ * set in the bitmap.
+ */
+ if (!(bits & esp->scsi_id_mask))
+ goto do_reset;
+ bits &= ~esp->scsi_id_mask;
+ if (!bits || (bits & (bits - 1)))
+ goto do_reset;
+
+ target = ffs(bits) - 1;
+ lun = (esp_read8(ESP_FDATA) & 0x7);
+
+ scsi_esp_cmd(esp, ESP_CMD_FLUSH);
+ if (esp->rev == ESP100) {
+ u8 ireg = esp_read8(ESP_INTRPT);
+ /* This chip has a bug during reselection that can
+ * cause a spurious illegal-command interrupt, which
+ * we simply ACK here. Another possibility is a bus
+ * reset so we must check for that.
+ */
+ if (ireg & ESP_INTR_SR)
+ goto do_reset;
+ }
+ scsi_esp_cmd(esp, ESP_CMD_NULL);
+ }
+
+ esp_write_tgt_sync(esp, target);
+ esp_write_tgt_config3(esp, target);
+
+ scsi_esp_cmd(esp, ESP_CMD_MOK);
+
+ if (esp->rev == FASHME)
+ esp_write8(target | ESP_BUSID_RESELID | ESP_BUSID_CTR32BIT,
+ ESP_BUSID);
+
+ tp = &esp->target[target];
+ dev = __scsi_device_lookup_by_target(tp->starget, lun);
+ if (!dev) {
+ shost_printk(KERN_ERR, esp->host,
+ "Reconnect, no lp tgt[%u] lun[%u]\n",
+ target, lun);
+ goto do_reset;
+ }
+ lp = dev->hostdata;
+
+ ent = lp->non_tagged_cmd;
+ if (!ent) {
+ ent = esp_reconnect_with_tag(esp, lp);
+ if (!ent)
+ goto do_reset;
+ }
+
+ esp->active_cmd = ent;
+
+ if (ent->flags & ESP_CMD_FLAG_ABORT) {
+ esp->msg_out[0] = ABORT_TASK_SET;
+ esp->msg_out_len = 1;
+ scsi_esp_cmd(esp, ESP_CMD_SATN);
+ }
+
+ esp_event(esp, ESP_EVENT_CHECK_PHASE);
+ esp_restore_pointers(esp, ent);
+ esp->flags |= ESP_FLAG_QUICKIRQ_CHECK;
+ return 1;
+
+do_reset:
+ esp_schedule_reset(esp);
+ return 0;
+}
+
+static int esp_finish_select(struct esp *esp)
+{
+ struct esp_cmd_entry *ent;
+ struct scsi_cmnd *cmd;
+ u8 orig_select_state;
+
+ orig_select_state = esp->select_state;
+
+ /* No longer selecting. */
+ esp->select_state = ESP_SELECT_NONE;
+
+ esp->seqreg = esp_read8(ESP_SSTEP) & ESP_STEP_VBITS;
+ ent = esp->active_cmd;
+ cmd = ent->cmd;
+
+ if (esp->ops->dma_error(esp)) {
+ /* If we see a DMA error during or as a result of selection,
+ * all bets are off.
+ */
+ esp_schedule_reset(esp);
+ esp_cmd_is_done(esp, ent, cmd, (DID_ERROR << 16));
+ return 0;
+ }
+
+ esp->ops->dma_invalidate(esp);
+
+ if (esp->ireg == (ESP_INTR_RSEL | ESP_INTR_FDONE)) {
+ struct esp_target_data *tp = &esp->target[cmd->device->id];
+
+ /* Carefully back out of the selection attempt. Release
+ * resources (such as DMA mapping & TAG) and reset state (such
+ * as message out and command delivery variables).
+ */
+ if (!(ent->flags & ESP_CMD_FLAG_AUTOSENSE)) {
+ esp_unmap_dma(esp, cmd);
+ esp_free_lun_tag(ent, cmd->device->hostdata);
+ tp->flags &= ~(ESP_TGT_NEGO_SYNC | ESP_TGT_NEGO_WIDE);
+ esp->flags &= ~ESP_FLAG_DOING_SLOWCMD;
+ esp->cmd_bytes_ptr = NULL;
+ esp->cmd_bytes_left = 0;
+ } else {
+ esp->ops->unmap_single(esp, ent->sense_dma,
+ SCSI_SENSE_BUFFERSIZE,
+ DMA_FROM_DEVICE);
+ ent->sense_ptr = NULL;
+ }
+
+ /* Now that the state is unwound properly, put back onto
+ * the issue queue. This command is no longer active.
+ */
+ list_move(&ent->list, &esp->queued_cmds);
+ esp->active_cmd = NULL;
+
+ /* Return value ignored by caller, it directly invokes
+ * esp_reconnect().
+ */
+ return 0;
+ }
+
+ if (esp->ireg == ESP_INTR_DC) {
+ struct scsi_device *dev = cmd->device;
+
+ /* Disconnect. Make sure we re-negotiate sync and
+ * wide parameters if this target starts responding
+ * again in the future.
+ */
+ esp->target[dev->id].flags |= ESP_TGT_CHECK_NEGO;
+
+ scsi_esp_cmd(esp, ESP_CMD_ESEL);
+ esp_cmd_is_done(esp, ent, cmd, (DID_BAD_TARGET << 16));
+ return 1;
+ }
+
+ if (esp->ireg == (ESP_INTR_FDONE | ESP_INTR_BSERV)) {
+ /* Selection successful. On pre-FAST chips we have
+ * to do a NOP and possibly clean out the FIFO.
+ */
+ if (esp->rev <= ESP236) {
+ int fcnt = esp_read8(ESP_FFLAGS) & ESP_FF_FBYTES;
+
+ scsi_esp_cmd(esp, ESP_CMD_NULL);
+
+ if (!fcnt &&
+ (!esp->prev_soff ||
+ ((esp->sreg & ESP_STAT_PMASK) != ESP_DIP)))
+ esp_flush_fifo(esp);
+ }
+
+ /* If we are doing a slow command, negotiation, etc.
+ * we'll do the right thing as we transition to the
+ * next phase.
+ */
+ esp_event(esp, ESP_EVENT_CHECK_PHASE);
+ return 0;
+ }
+
+ shost_printk(KERN_INFO, esp->host,
+ "Unexpected selection completion ireg[%x]\n", esp->ireg);
+ esp_schedule_reset(esp);
+ return 0;
+}
+
+static int esp_data_bytes_sent(struct esp *esp, struct esp_cmd_entry *ent,
+ struct scsi_cmnd *cmd)
+{
+ int fifo_cnt, ecount, bytes_sent, flush_fifo;
+
+ fifo_cnt = esp_read8(ESP_FFLAGS) & ESP_FF_FBYTES;
+ if (esp->prev_cfg3 & ESP_CONFIG3_EWIDE)
+ fifo_cnt <<= 1;
+
+ ecount = 0;
+ if (!(esp->sreg & ESP_STAT_TCNT)) {
+ ecount = ((unsigned int)esp_read8(ESP_TCLOW) |
+ (((unsigned int)esp_read8(ESP_TCMED)) << 8));
+ if (esp->rev == FASHME)
+ ecount |= ((unsigned int)esp_read8(FAS_RLO)) << 16;
+ if (esp->rev == PCSCSI && (esp->config2 & ESP_CONFIG2_FENAB))
+ ecount |= ((unsigned int)esp_read8(ESP_TCHI)) << 16;
+ }
+
+ bytes_sent = esp->data_dma_len;
+ bytes_sent -= ecount;
+
+ /*
+ * The am53c974 has a DMA 'pecularity'. The doc states:
+ * In some odd byte conditions, one residual byte will
+ * be left in the SCSI FIFO, and the FIFO Flags will
+ * never count to '0 '. When this happens, the residual
+ * byte should be retrieved via PIO following completion
+ * of the BLAST operation.
+ */
+ if (fifo_cnt == 1 && ent->flags & ESP_CMD_FLAG_RESIDUAL) {
+ size_t count = 1;
+ size_t offset = bytes_sent;
+ u8 bval = esp_read8(ESP_FDATA);
+
+ if (ent->flags & ESP_CMD_FLAG_AUTOSENSE)
+ ent->sense_ptr[bytes_sent] = bval;
+ else {
+ struct esp_cmd_priv *p = ESP_CMD_PRIV(cmd);
+ u8 *ptr;
+
+ ptr = scsi_kmap_atomic_sg(p->cur_sg, p->u.num_sg,
+ &offset, &count);
+ if (likely(ptr)) {
+ *(ptr + offset) = bval;
+ scsi_kunmap_atomic_sg(ptr);
+ }
+ }
+ bytes_sent += fifo_cnt;
+ ent->flags &= ~ESP_CMD_FLAG_RESIDUAL;
+ }
+ if (!(ent->flags & ESP_CMD_FLAG_WRITE))
+ bytes_sent -= fifo_cnt;
+
+ flush_fifo = 0;
+ if (!esp->prev_soff) {
+ /* Synchronous data transfer, always flush fifo. */
+ flush_fifo = 1;
+ } else {
+ if (esp->rev == ESP100) {
+ u32 fflags, phase;
+
+ /* ESP100 has a chip bug where in the synchronous data
+ * phase it can mistake a final long REQ pulse from the
+ * target as an extra data byte. Fun.
+ *
+ * To detect this case we resample the status register
+ * and fifo flags. If we're still in a data phase and
+ * we see spurious chunks in the fifo, we return error
+ * to the caller which should reset and set things up
+ * such that we only try future transfers to this
+ * target in synchronous mode.
+ */
+ esp->sreg = esp_read8(ESP_STATUS);
+ phase = esp->sreg & ESP_STAT_PMASK;
+ fflags = esp_read8(ESP_FFLAGS);
+
+ if ((phase == ESP_DOP &&
+ (fflags & ESP_FF_ONOTZERO)) ||
+ (phase == ESP_DIP &&
+ (fflags & ESP_FF_FBYTES)))
+ return -1;
+ }
+ if (!(ent->flags & ESP_CMD_FLAG_WRITE))
+ flush_fifo = 1;
+ }
+
+ if (flush_fifo)
+ esp_flush_fifo(esp);
+
+ return bytes_sent;
+}
+
+static void esp_setsync(struct esp *esp, struct esp_target_data *tp,
+ u8 scsi_period, u8 scsi_offset,
+ u8 esp_stp, u8 esp_soff)
+{
+ spi_period(tp->starget) = scsi_period;
+ spi_offset(tp->starget) = scsi_offset;
+ spi_width(tp->starget) = (tp->flags & ESP_TGT_WIDE) ? 1 : 0;
+
+ if (esp_soff) {
+ esp_stp &= 0x1f;
+ esp_soff |= esp->radelay;
+ if (esp->rev >= FAS236) {
+ u8 bit = ESP_CONFIG3_FSCSI;
+ if (esp->rev >= FAS100A)
+ bit = ESP_CONFIG3_FAST;
+
+ if (scsi_period < 50) {
+ if (esp->rev == FASHME)
+ esp_soff &= ~esp->radelay;
+ tp->esp_config3 |= bit;
+ } else {
+ tp->esp_config3 &= ~bit;
+ }
+ esp->prev_cfg3 = tp->esp_config3;
+ esp_write8(esp->prev_cfg3, ESP_CFG3);
+ }
+ }
+
+ tp->esp_period = esp->prev_stp = esp_stp;
+ tp->esp_offset = esp->prev_soff = esp_soff;
+
+ esp_write8(esp_soff, ESP_SOFF);
+ esp_write8(esp_stp, ESP_STP);
+
+ tp->flags &= ~(ESP_TGT_NEGO_SYNC | ESP_TGT_CHECK_NEGO);
+
+ spi_display_xfer_agreement(tp->starget);
+}
+
+static void esp_msgin_reject(struct esp *esp)
+{
+ struct esp_cmd_entry *ent = esp->active_cmd;
+ struct scsi_cmnd *cmd = ent->cmd;
+ struct esp_target_data *tp;
+ int tgt;
+
+ tgt = cmd->device->id;
+ tp = &esp->target[tgt];
+
+ if (tp->flags & ESP_TGT_NEGO_WIDE) {
+ tp->flags &= ~(ESP_TGT_NEGO_WIDE | ESP_TGT_WIDE);
+
+ if (!esp_need_to_nego_sync(tp)) {
+ tp->flags &= ~ESP_TGT_CHECK_NEGO;
+ scsi_esp_cmd(esp, ESP_CMD_RATN);
+ } else {
+ esp->msg_out_len =
+ spi_populate_sync_msg(&esp->msg_out[0],
+ tp->nego_goal_period,
+ tp->nego_goal_offset);
+ tp->flags |= ESP_TGT_NEGO_SYNC;
+ scsi_esp_cmd(esp, ESP_CMD_SATN);
+ }
+ return;
+ }
+
+ if (tp->flags & ESP_TGT_NEGO_SYNC) {
+ tp->flags &= ~(ESP_TGT_NEGO_SYNC | ESP_TGT_CHECK_NEGO);
+ tp->esp_period = 0;
+ tp->esp_offset = 0;
+ esp_setsync(esp, tp, 0, 0, 0, 0);
+ scsi_esp_cmd(esp, ESP_CMD_RATN);
+ return;
+ }
+
+ esp->msg_out[0] = ABORT_TASK_SET;
+ esp->msg_out_len = 1;
+ scsi_esp_cmd(esp, ESP_CMD_SATN);
+}
+
+static void esp_msgin_sdtr(struct esp *esp, struct esp_target_data *tp)
+{
+ u8 period = esp->msg_in[3];
+ u8 offset = esp->msg_in[4];
+ u8 stp;
+
+ if (!(tp->flags & ESP_TGT_NEGO_SYNC))
+ goto do_reject;
+
+ if (offset > 15)
+ goto do_reject;
+
+ if (offset) {
+ int one_clock;
+
+ if (period > esp->max_period) {
+ period = offset = 0;
+ goto do_sdtr;
+ }
+ if (period < esp->min_period)
+ goto do_reject;
+
+ one_clock = esp->ccycle / 1000;
+ stp = DIV_ROUND_UP(period << 2, one_clock);
+ if (stp && esp->rev >= FAS236) {
+ if (stp >= 50)
+ stp--;
+ }
+ } else {
+ stp = 0;
+ }
+
+ esp_setsync(esp, tp, period, offset, stp, offset);
+ return;
+
+do_reject:
+ esp->msg_out[0] = MESSAGE_REJECT;
+ esp->msg_out_len = 1;
+ scsi_esp_cmd(esp, ESP_CMD_SATN);
+ return;
+
+do_sdtr:
+ tp->nego_goal_period = period;
+ tp->nego_goal_offset = offset;
+ esp->msg_out_len =
+ spi_populate_sync_msg(&esp->msg_out[0],
+ tp->nego_goal_period,
+ tp->nego_goal_offset);
+ scsi_esp_cmd(esp, ESP_CMD_SATN);
+}
+
+static void esp_msgin_wdtr(struct esp *esp, struct esp_target_data *tp)
+{
+ int size = 8 << esp->msg_in[3];
+ u8 cfg3;
+
+ if (esp->rev != FASHME)
+ goto do_reject;
+
+ if (size != 8 && size != 16)
+ goto do_reject;
+
+ if (!(tp->flags & ESP_TGT_NEGO_WIDE))
+ goto do_reject;
+
+ cfg3 = tp->esp_config3;
+ if (size == 16) {
+ tp->flags |= ESP_TGT_WIDE;
+ cfg3 |= ESP_CONFIG3_EWIDE;
+ } else {
+ tp->flags &= ~ESP_TGT_WIDE;
+ cfg3 &= ~ESP_CONFIG3_EWIDE;
+ }
+ tp->esp_config3 = cfg3;
+ esp->prev_cfg3 = cfg3;
+ esp_write8(cfg3, ESP_CFG3);
+
+ tp->flags &= ~ESP_TGT_NEGO_WIDE;
+
+ spi_period(tp->starget) = 0;
+ spi_offset(tp->starget) = 0;
+ if (!esp_need_to_nego_sync(tp)) {
+ tp->flags &= ~ESP_TGT_CHECK_NEGO;
+ scsi_esp_cmd(esp, ESP_CMD_RATN);
+ } else {
+ esp->msg_out_len =
+ spi_populate_sync_msg(&esp->msg_out[0],
+ tp->nego_goal_period,
+ tp->nego_goal_offset);
+ tp->flags |= ESP_TGT_NEGO_SYNC;
+ scsi_esp_cmd(esp, ESP_CMD_SATN);
+ }
+ return;
+
+do_reject:
+ esp->msg_out[0] = MESSAGE_REJECT;
+ esp->msg_out_len = 1;
+ scsi_esp_cmd(esp, ESP_CMD_SATN);
+}
+
+static void esp_msgin_extended(struct esp *esp)
+{
+ struct esp_cmd_entry *ent = esp->active_cmd;
+ struct scsi_cmnd *cmd = ent->cmd;
+ struct esp_target_data *tp;
+ int tgt = cmd->device->id;
+
+ tp = &esp->target[tgt];
+ if (esp->msg_in[2] == EXTENDED_SDTR) {
+ esp_msgin_sdtr(esp, tp);
+ return;
+ }
+ if (esp->msg_in[2] == EXTENDED_WDTR) {
+ esp_msgin_wdtr(esp, tp);
+ return;
+ }
+
+ shost_printk(KERN_INFO, esp->host,
+ "Unexpected extended msg type %x\n", esp->msg_in[2]);
+
+ esp->msg_out[0] = ABORT_TASK_SET;
+ esp->msg_out_len = 1;
+ scsi_esp_cmd(esp, ESP_CMD_SATN);
+}
+
+/* Analyze msgin bytes received from target so far. Return non-zero
+ * if there are more bytes needed to complete the message.
+ */
+static int esp_msgin_process(struct esp *esp)
+{
+ u8 msg0 = esp->msg_in[0];
+ int len = esp->msg_in_len;
+
+ if (msg0 & 0x80) {
+ /* Identify */
+ shost_printk(KERN_INFO, esp->host,
+ "Unexpected msgin identify\n");
+ return 0;
+ }
+
+ switch (msg0) {
+ case EXTENDED_MESSAGE:
+ if (len == 1)
+ return 1;
+ if (len < esp->msg_in[1] + 2)
+ return 1;
+ esp_msgin_extended(esp);
+ return 0;
+
+ case IGNORE_WIDE_RESIDUE: {
+ struct esp_cmd_entry *ent;
+ struct esp_cmd_priv *spriv;
+ if (len == 1)
+ return 1;
+
+ if (esp->msg_in[1] != 1)
+ goto do_reject;
+
+ ent = esp->active_cmd;
+ spriv = ESP_CMD_PRIV(ent->cmd);
+
+ if (spriv->cur_residue == sg_dma_len(spriv->cur_sg)) {
+ spriv->cur_sg--;
+ spriv->cur_residue = 1;
+ } else
+ spriv->cur_residue++;
+ spriv->tot_residue++;
+ return 0;
+ }
+ case NOP:
+ return 0;
+ case RESTORE_POINTERS:
+ esp_restore_pointers(esp, esp->active_cmd);
+ return 0;
+ case SAVE_POINTERS:
+ esp_save_pointers(esp, esp->active_cmd);
+ return 0;
+
+ case COMMAND_COMPLETE:
+ case DISCONNECT: {
+ struct esp_cmd_entry *ent = esp->active_cmd;
+
+ ent->message = msg0;
+ esp_event(esp, ESP_EVENT_FREE_BUS);
+ esp->flags |= ESP_FLAG_QUICKIRQ_CHECK;
+ return 0;
+ }
+ case MESSAGE_REJECT:
+ esp_msgin_reject(esp);
+ return 0;
+
+ default:
+ do_reject:
+ esp->msg_out[0] = MESSAGE_REJECT;
+ esp->msg_out_len = 1;
+ scsi_esp_cmd(esp, ESP_CMD_SATN);
+ return 0;
+ }
+}
+
+static int esp_process_event(struct esp *esp)
+{
+ int write, i;
+
+again:
+ write = 0;
+ esp_log_event("process event %d phase %x\n",
+ esp->event, esp->sreg & ESP_STAT_PMASK);
+ switch (esp->event) {
+ case ESP_EVENT_CHECK_PHASE:
+ switch (esp->sreg & ESP_STAT_PMASK) {
+ case ESP_DOP:
+ esp_event(esp, ESP_EVENT_DATA_OUT);
+ break;
+ case ESP_DIP:
+ esp_event(esp, ESP_EVENT_DATA_IN);
+ break;
+ case ESP_STATP:
+ esp_flush_fifo(esp);
+ scsi_esp_cmd(esp, ESP_CMD_ICCSEQ);
+ esp_event(esp, ESP_EVENT_STATUS);
+ esp->flags |= ESP_FLAG_QUICKIRQ_CHECK;
+ return 1;
+
+ case ESP_MOP:
+ esp_event(esp, ESP_EVENT_MSGOUT);
+ break;
+
+ case ESP_MIP:
+ esp_event(esp, ESP_EVENT_MSGIN);
+ break;
+
+ case ESP_CMDP:
+ esp_event(esp, ESP_EVENT_CMD_START);
+ break;
+
+ default:
+ shost_printk(KERN_INFO, esp->host,
+ "Unexpected phase, sreg=%02x\n",
+ esp->sreg);
+ esp_schedule_reset(esp);
+ return 0;
+ }
+ goto again;
+ break;
+
+ case ESP_EVENT_DATA_IN:
+ write = 1;
+ /* fallthru */
+
+ case ESP_EVENT_DATA_OUT: {
+ struct esp_cmd_entry *ent = esp->active_cmd;
+ struct scsi_cmnd *cmd = ent->cmd;
+ dma_addr_t dma_addr = esp_cur_dma_addr(ent, cmd);
+ unsigned int dma_len = esp_cur_dma_len(ent, cmd);
+
+ if (esp->rev == ESP100)
+ scsi_esp_cmd(esp, ESP_CMD_NULL);
+
+ if (write)
+ ent->flags |= ESP_CMD_FLAG_WRITE;
+ else
+ ent->flags &= ~ESP_CMD_FLAG_WRITE;
+
+ if (esp->ops->dma_length_limit)
+ dma_len = esp->ops->dma_length_limit(esp, dma_addr,
+ dma_len);
+ else
+ dma_len = esp_dma_length_limit(esp, dma_addr, dma_len);
+
+ esp->data_dma_len = dma_len;
+
+ if (!dma_len) {
+ shost_printk(KERN_ERR, esp->host,
+ "DMA length is zero!\n");
+ shost_printk(KERN_ERR, esp->host,
+ "cur adr[%08llx] len[%08x]\n",
+ (unsigned long long)esp_cur_dma_addr(ent, cmd),
+ esp_cur_dma_len(ent, cmd));
+ esp_schedule_reset(esp);
+ return 0;
+ }
+
+ esp_log_datastart("start data addr[%08llx] len[%u] write(%d)\n",
+ (unsigned long long)dma_addr, dma_len, write);
+
+ esp->ops->send_dma_cmd(esp, dma_addr, dma_len, dma_len,
+ write, ESP_CMD_DMA | ESP_CMD_TI);
+ esp_event(esp, ESP_EVENT_DATA_DONE);
+ break;
+ }
+ case ESP_EVENT_DATA_DONE: {
+ struct esp_cmd_entry *ent = esp->active_cmd;
+ struct scsi_cmnd *cmd = ent->cmd;
+ int bytes_sent;
+
+ if (esp->ops->dma_error(esp)) {
+ shost_printk(KERN_INFO, esp->host,
+ "data done, DMA error, resetting\n");
+ esp_schedule_reset(esp);
+ return 0;
+ }
+
+ if (ent->flags & ESP_CMD_FLAG_WRITE) {
+ /* XXX parity errors, etc. XXX */
+
+ esp->ops->dma_drain(esp);
+ }
+ esp->ops->dma_invalidate(esp);
+
+ if (esp->ireg != ESP_INTR_BSERV) {
+ /* We should always see exactly a bus-service
+ * interrupt at the end of a successful transfer.
+ */
+ shost_printk(KERN_INFO, esp->host,
+ "data done, not BSERV, resetting\n");
+ esp_schedule_reset(esp);
+ return 0;
+ }
+
+ bytes_sent = esp_data_bytes_sent(esp, ent, cmd);
+
+ esp_log_datadone("data done flgs[%x] sent[%d]\n",
+ ent->flags, bytes_sent);
+
+ if (bytes_sent < 0) {
+ /* XXX force sync mode for this target XXX */
+ esp_schedule_reset(esp);
+ return 0;
+ }
+
+ esp_advance_dma(esp, ent, cmd, bytes_sent);
+ esp_event(esp, ESP_EVENT_CHECK_PHASE);
+ goto again;
+ }
+
+ case ESP_EVENT_STATUS: {
+ struct esp_cmd_entry *ent = esp->active_cmd;
+
+ if (esp->ireg & ESP_INTR_FDONE) {
+ ent->status = esp_read8(ESP_FDATA);
+ ent->message = esp_read8(ESP_FDATA);
+ scsi_esp_cmd(esp, ESP_CMD_MOK);
+ } else if (esp->ireg == ESP_INTR_BSERV) {
+ ent->status = esp_read8(ESP_FDATA);
+ ent->message = 0xff;
+ esp_event(esp, ESP_EVENT_MSGIN);
+ return 0;
+ }
+
+ if (ent->message != COMMAND_COMPLETE) {
+ shost_printk(KERN_INFO, esp->host,
+ "Unexpected message %x in status\n",
+ ent->message);
+ esp_schedule_reset(esp);
+ return 0;
+ }
+
+ esp_event(esp, ESP_EVENT_FREE_BUS);
+ esp->flags |= ESP_FLAG_QUICKIRQ_CHECK;
+ break;
+ }
+ case ESP_EVENT_FREE_BUS: {
+ struct esp_cmd_entry *ent = esp->active_cmd;
+ struct scsi_cmnd *cmd = ent->cmd;
+
+ if (ent->message == COMMAND_COMPLETE ||
+ ent->message == DISCONNECT)
+ scsi_esp_cmd(esp, ESP_CMD_ESEL);
+
+ if (ent->message == COMMAND_COMPLETE) {
+ esp_log_cmddone("Command done status[%x] message[%x]\n",
+ ent->status, ent->message);
+ if (ent->status == SAM_STAT_TASK_SET_FULL)
+ esp_event_queue_full(esp, ent);
+
+ if (ent->status == SAM_STAT_CHECK_CONDITION &&
+ !(ent->flags & ESP_CMD_FLAG_AUTOSENSE)) {
+ ent->flags |= ESP_CMD_FLAG_AUTOSENSE;
+ esp_autosense(esp, ent);
+ } else {
+ esp_cmd_is_done(esp, ent, cmd,
+ compose_result(ent->status,
+ ent->message,
+ DID_OK));
+ }
+ } else if (ent->message == DISCONNECT) {
+ esp_log_disconnect("Disconnecting tgt[%d] tag[%x:%x]\n",
+ cmd->device->id,
+ ent->tag[0], ent->tag[1]);
+
+ esp->active_cmd = NULL;
+ esp_maybe_execute_command(esp);
+ } else {
+ shost_printk(KERN_INFO, esp->host,
+ "Unexpected message %x in freebus\n",
+ ent->message);
+ esp_schedule_reset(esp);
+ return 0;
+ }
+ if (esp->active_cmd)
+ esp->flags |= ESP_FLAG_QUICKIRQ_CHECK;
+ break;
+ }
+ case ESP_EVENT_MSGOUT: {
+ scsi_esp_cmd(esp, ESP_CMD_FLUSH);
+
+ if (esp_debug & ESP_DEBUG_MSGOUT) {
+ int i;
+ printk("ESP: Sending message [ ");
+ for (i = 0; i < esp->msg_out_len; i++)
+ printk("%02x ", esp->msg_out[i]);
+ printk("]\n");
+ }
+
+ if (esp->rev == FASHME) {
+ int i;
+
+ /* Always use the fifo. */
+ for (i = 0; i < esp->msg_out_len; i++) {
+ esp_write8(esp->msg_out[i], ESP_FDATA);
+ esp_write8(0, ESP_FDATA);
+ }
+ scsi_esp_cmd(esp, ESP_CMD_TI);
+ } else {
+ if (esp->msg_out_len == 1) {
+ esp_write8(esp->msg_out[0], ESP_FDATA);
+ scsi_esp_cmd(esp, ESP_CMD_TI);
+ } else if (esp->flags & ESP_FLAG_USE_FIFO) {
+ for (i = 0; i < esp->msg_out_len; i++)
+ esp_write8(esp->msg_out[i], ESP_FDATA);
+ scsi_esp_cmd(esp, ESP_CMD_TI);
+ } else {
+ /* Use DMA. */
+ memcpy(esp->command_block,
+ esp->msg_out,
+ esp->msg_out_len);
+
+ esp->ops->send_dma_cmd(esp,
+ esp->command_block_dma,
+ esp->msg_out_len,
+ esp->msg_out_len,
+ 0,
+ ESP_CMD_DMA|ESP_CMD_TI);
+ }
+ }
+ esp_event(esp, ESP_EVENT_MSGOUT_DONE);
+ break;
+ }
+ case ESP_EVENT_MSGOUT_DONE:
+ if (esp->rev == FASHME) {
+ scsi_esp_cmd(esp, ESP_CMD_FLUSH);
+ } else {
+ if (esp->msg_out_len > 1)
+ esp->ops->dma_invalidate(esp);
+ }
+
+ if (!(esp->ireg & ESP_INTR_DC)) {
+ if (esp->rev != FASHME)
+ scsi_esp_cmd(esp, ESP_CMD_NULL);
+ }
+ esp_event(esp, ESP_EVENT_CHECK_PHASE);
+ goto again;
+ case ESP_EVENT_MSGIN:
+ if (esp->ireg & ESP_INTR_BSERV) {
+ if (esp->rev == FASHME) {
+ if (!(esp_read8(ESP_STATUS2) &
+ ESP_STAT2_FEMPTY))
+ scsi_esp_cmd(esp, ESP_CMD_FLUSH);
+ } else {
+ scsi_esp_cmd(esp, ESP_CMD_FLUSH);
+ if (esp->rev == ESP100)
+ scsi_esp_cmd(esp, ESP_CMD_NULL);
+ }
+ scsi_esp_cmd(esp, ESP_CMD_TI);
+ esp->flags |= ESP_FLAG_QUICKIRQ_CHECK;
+ return 1;
+ }
+ if (esp->ireg & ESP_INTR_FDONE) {
+ u8 val;
+
+ if (esp->rev == FASHME)
+ val = esp->fifo[0];
+ else
+ val = esp_read8(ESP_FDATA);
+ esp->msg_in[esp->msg_in_len++] = val;
+
+ esp_log_msgin("Got msgin byte %x\n", val);
+
+ if (!esp_msgin_process(esp))
+ esp->msg_in_len = 0;
+
+ if (esp->rev == FASHME)
+ scsi_esp_cmd(esp, ESP_CMD_FLUSH);
+
+ scsi_esp_cmd(esp, ESP_CMD_MOK);
+
+ if (esp->event != ESP_EVENT_FREE_BUS)
+ esp_event(esp, ESP_EVENT_CHECK_PHASE);
+ } else {
+ shost_printk(KERN_INFO, esp->host,
+ "MSGIN neither BSERV not FDON, resetting");
+ esp_schedule_reset(esp);
+ return 0;
+ }
+ break;
+ case ESP_EVENT_CMD_START:
+ memcpy(esp->command_block, esp->cmd_bytes_ptr,
+ esp->cmd_bytes_left);
+ esp_send_dma_cmd(esp, esp->cmd_bytes_left, 16, ESP_CMD_TI);
+ esp_event(esp, ESP_EVENT_CMD_DONE);
+ esp->flags |= ESP_FLAG_QUICKIRQ_CHECK;
+ break;
+ case ESP_EVENT_CMD_DONE:
+ esp->ops->dma_invalidate(esp);
+ if (esp->ireg & ESP_INTR_BSERV) {
+ esp_event(esp, ESP_EVENT_CHECK_PHASE);
+ goto again;
+ }
+ esp_schedule_reset(esp);
+ return 0;
+ break;
+
+ case ESP_EVENT_RESET:
+ scsi_esp_cmd(esp, ESP_CMD_RS);
+ break;
+
+ default:
+ shost_printk(KERN_INFO, esp->host,
+ "Unexpected event %x, resetting\n", esp->event);
+ esp_schedule_reset(esp);
+ return 0;
+ break;
+ }
+ return 1;
+}
+
+static void esp_reset_cleanup_one(struct esp *esp, struct esp_cmd_entry *ent)
+{
+ struct scsi_cmnd *cmd = ent->cmd;
+
+ esp_unmap_dma(esp, cmd);
+ esp_free_lun_tag(ent, cmd->device->hostdata);
+ cmd->result = DID_RESET << 16;
+
+ if (ent->flags & ESP_CMD_FLAG_AUTOSENSE) {
+ esp->ops->unmap_single(esp, ent->sense_dma,
+ SCSI_SENSE_BUFFERSIZE, DMA_FROM_DEVICE);
+ ent->sense_ptr = NULL;
+ }
+
+ cmd->scsi_done(cmd);
+ list_del(&ent->list);
+ esp_put_ent(esp, ent);
+}
+
+static void esp_clear_hold(struct scsi_device *dev, void *data)
+{
+ struct esp_lun_data *lp = dev->hostdata;
+
+ BUG_ON(lp->num_tagged);
+ lp->hold = 0;
+}
+
+static void esp_reset_cleanup(struct esp *esp)
+{
+ struct esp_cmd_entry *ent, *tmp;
+ int i;
+
+ list_for_each_entry_safe(ent, tmp, &esp->queued_cmds, list) {
+ struct scsi_cmnd *cmd = ent->cmd;
+
+ list_del(&ent->list);
+ cmd->result = DID_RESET << 16;
+ cmd->scsi_done(cmd);
+ esp_put_ent(esp, ent);
+ }
+
+ list_for_each_entry_safe(ent, tmp, &esp->active_cmds, list) {
+ if (ent == esp->active_cmd)
+ esp->active_cmd = NULL;
+ esp_reset_cleanup_one(esp, ent);
+ }
+
+ BUG_ON(esp->active_cmd != NULL);
+
+ /* Force renegotiation of sync/wide transfers. */
+ for (i = 0; i < ESP_MAX_TARGET; i++) {
+ struct esp_target_data *tp = &esp->target[i];
+
+ tp->esp_period = 0;
+ tp->esp_offset = 0;
+ tp->esp_config3 &= ~(ESP_CONFIG3_EWIDE |
+ ESP_CONFIG3_FSCSI |
+ ESP_CONFIG3_FAST);
+ tp->flags &= ~ESP_TGT_WIDE;
+ tp->flags |= ESP_TGT_CHECK_NEGO;
+
+ if (tp->starget)
+ __starget_for_each_device(tp->starget, NULL,
+ esp_clear_hold);
+ }
+ esp->flags &= ~ESP_FLAG_RESETTING;
+}
+
+/* Runs under host->lock */
+static void __esp_interrupt(struct esp *esp)
+{
+ int finish_reset, intr_done;
+ u8 phase;
+
+ /*
+ * Once INTRPT is read STATUS and SSTEP are cleared.
+ */
+ esp->sreg = esp_read8(ESP_STATUS);
+ esp->seqreg = esp_read8(ESP_SSTEP);
+ esp->ireg = esp_read8(ESP_INTRPT);
+
+ if (esp->flags & ESP_FLAG_RESETTING) {
+ finish_reset = 1;
+ } else {
+ if (esp_check_gross_error(esp))
+ return;
+
+ finish_reset = esp_check_spur_intr(esp);
+ if (finish_reset < 0)
+ return;
+ }
+
+ if (esp->ireg & ESP_INTR_SR)
+ finish_reset = 1;
+
+ if (finish_reset) {
+ esp_reset_cleanup(esp);
+ if (esp->eh_reset) {
+ complete(esp->eh_reset);
+ esp->eh_reset = NULL;
+ }
+ return;
+ }
+
+ phase = (esp->sreg & ESP_STAT_PMASK);
+ if (esp->rev == FASHME) {
+ if (((phase != ESP_DIP && phase != ESP_DOP) &&
+ esp->select_state == ESP_SELECT_NONE &&
+ esp->event != ESP_EVENT_STATUS &&
+ esp->event != ESP_EVENT_DATA_DONE) ||
+ (esp->ireg & ESP_INTR_RSEL)) {
+ esp->sreg2 = esp_read8(ESP_STATUS2);
+ if (!(esp->sreg2 & ESP_STAT2_FEMPTY) ||
+ (esp->sreg2 & ESP_STAT2_F1BYTE))
+ hme_read_fifo(esp);
+ }
+ }
+
+ esp_log_intr("intr sreg[%02x] seqreg[%02x] "
+ "sreg2[%02x] ireg[%02x]\n",
+ esp->sreg, esp->seqreg, esp->sreg2, esp->ireg);
+
+ intr_done = 0;
+
+ if (esp->ireg & (ESP_INTR_S | ESP_INTR_SATN | ESP_INTR_IC)) {
+ shost_printk(KERN_INFO, esp->host,
+ "unexpected IREG %02x\n", esp->ireg);
+ if (esp->ireg & ESP_INTR_IC)
+ esp_dump_cmd_log(esp);
+
+ esp_schedule_reset(esp);
+ } else {
+ if (!(esp->ireg & ESP_INTR_RSEL)) {
+ /* Some combination of FDONE, BSERV, DC. */
+ if (esp->select_state != ESP_SELECT_NONE)
+ intr_done = esp_finish_select(esp);
+ } else if (esp->ireg & ESP_INTR_RSEL) {
+ if (esp->active_cmd)
+ (void) esp_finish_select(esp);
+ intr_done = esp_reconnect(esp);
+ }
+ }
+ while (!intr_done)
+ intr_done = esp_process_event(esp);
+}
+
+irqreturn_t scsi_esp_intr(int irq, void *dev_id)
+{
+ struct esp *esp = dev_id;
+ unsigned long flags;
+ irqreturn_t ret;
+
+ spin_lock_irqsave(esp->host->host_lock, flags);
+ ret = IRQ_NONE;
+ if (esp->ops->irq_pending(esp)) {
+ ret = IRQ_HANDLED;
+ for (;;) {
+ int i;
+
+ __esp_interrupt(esp);
+ if (!(esp->flags & ESP_FLAG_QUICKIRQ_CHECK))
+ break;
+ esp->flags &= ~ESP_FLAG_QUICKIRQ_CHECK;
+
+ for (i = 0; i < ESP_QUICKIRQ_LIMIT; i++) {
+ if (esp->ops->irq_pending(esp))
+ break;
+ }
+ if (i == ESP_QUICKIRQ_LIMIT)
+ break;
+ }
+ }
+ spin_unlock_irqrestore(esp->host->host_lock, flags);
+
+ return ret;
+}
+EXPORT_SYMBOL(scsi_esp_intr);
+
+static void esp_get_revision(struct esp *esp)
+{
+ u8 val;
+
+ esp->config1 = (ESP_CONFIG1_PENABLE | (esp->scsi_id & 7));
+ if (esp->config2 == 0) {
+ esp->config2 = (ESP_CONFIG2_SCSI2ENAB | ESP_CONFIG2_REGPARITY);
+ esp_write8(esp->config2, ESP_CFG2);
+
+ val = esp_read8(ESP_CFG2);
+ val &= ~ESP_CONFIG2_MAGIC;
+
+ esp->config2 = 0;
+ if (val != (ESP_CONFIG2_SCSI2ENAB | ESP_CONFIG2_REGPARITY)) {
+ /*
+ * If what we write to cfg2 does not come back,
+ * cfg2 is not implemented.
+ * Therefore this must be a plain esp100.
+ */
+ esp->rev = ESP100;
+ return;
+ }
+ }
+
+ esp_set_all_config3(esp, 5);
+ esp->prev_cfg3 = 5;
+ esp_write8(esp->config2, ESP_CFG2);
+ esp_write8(0, ESP_CFG3);
+ esp_write8(esp->prev_cfg3, ESP_CFG3);
+
+ val = esp_read8(ESP_CFG3);
+ if (val != 5) {
+ /* The cfg2 register is implemented, however
+ * cfg3 is not, must be esp100a.
+ */
+ esp->rev = ESP100A;
+ } else {
+ esp_set_all_config3(esp, 0);
+ esp->prev_cfg3 = 0;
+ esp_write8(esp->prev_cfg3, ESP_CFG3);
+
+ /* All of cfg{1,2,3} implemented, must be one of
+ * the fas variants, figure out which one.
+ */
+ if (esp->cfact == 0 || esp->cfact > ESP_CCF_F5) {
+ esp->rev = FAST;
+ esp->sync_defp = SYNC_DEFP_FAST;
+ } else {
+ esp->rev = ESP236;
+ }
+ }
+}
+
+static void esp_init_swstate(struct esp *esp)
+{
+ int i;
+
+ INIT_LIST_HEAD(&esp->queued_cmds);
+ INIT_LIST_HEAD(&esp->active_cmds);
+ INIT_LIST_HEAD(&esp->esp_cmd_pool);
+
+ /* Start with a clear state, domain validation (via ->slave_configure,
+ * spi_dv_device()) will attempt to enable SYNC, WIDE, and tagged
+ * commands.
+ */
+ for (i = 0 ; i < ESP_MAX_TARGET; i++) {
+ esp->target[i].flags = 0;
+ esp->target[i].nego_goal_period = 0;
+ esp->target[i].nego_goal_offset = 0;
+ esp->target[i].nego_goal_width = 0;
+ esp->target[i].nego_goal_tags = 0;
+ }
+}
+
+/* This places the ESP into a known state at boot time. */
+static void esp_bootup_reset(struct esp *esp)
+{
+ u8 val;
+
+ /* Reset the DMA */
+ esp->ops->reset_dma(esp);
+
+ /* Reset the ESP */
+ esp_reset_esp(esp);
+
+ /* Reset the SCSI bus, but tell ESP not to generate an irq */
+ val = esp_read8(ESP_CFG1);
+ val |= ESP_CONFIG1_SRRDISAB;
+ esp_write8(val, ESP_CFG1);
+
+ scsi_esp_cmd(esp, ESP_CMD_RS);
+ udelay(400);
+
+ esp_write8(esp->config1, ESP_CFG1);
+
+ /* Eat any bitrot in the chip and we are done... */
+ esp_read8(ESP_INTRPT);
+}
+
+static void esp_set_clock_params(struct esp *esp)
+{
+ int fhz;
+ u8 ccf;
+
+ /* This is getting messy but it has to be done correctly or else
+ * you get weird behavior all over the place. We are trying to
+ * basically figure out three pieces of information.
+ *
+ * a) Clock Conversion Factor
+ *
+ * This is a representation of the input crystal clock frequency
+ * going into the ESP on this machine. Any operation whose timing
+ * is longer than 400ns depends on this value being correct. For
+ * example, you'll get blips for arbitration/selection during high
+ * load or with multiple targets if this is not set correctly.
+ *
+ * b) Selection Time-Out
+ *
+ * The ESP isn't very bright and will arbitrate for the bus and try
+ * to select a target forever if you let it. This value tells the
+ * ESP when it has taken too long to negotiate and that it should
+ * interrupt the CPU so we can see what happened. The value is
+ * computed as follows (from NCR/Symbios chip docs).
+ *
+ * (Time Out Period) * (Input Clock)
+ * STO = ----------------------------------
+ * (8192) * (Clock Conversion Factor)
+ *
+ * We use a time out period of 250ms (ESP_BUS_TIMEOUT).
+ *
+ * c) Imperical constants for synchronous offset and transfer period
+ * register values
+ *
+ * This entails the smallest and largest sync period we could ever
+ * handle on this ESP.
+ */
+ fhz = esp->cfreq;
+
+ ccf = ((fhz / 1000000) + 4) / 5;
+ if (ccf == 1)
+ ccf = 2;
+
+ /* If we can't find anything reasonable, just assume 20MHZ.
+ * This is the clock frequency of the older sun4c's where I've
+ * been unable to find the clock-frequency PROM property. All
+ * other machines provide useful values it seems.
+ */
+ if (fhz <= 5000000 || ccf < 1 || ccf > 8) {
+ fhz = 20000000;
+ ccf = 4;
+ }
+
+ esp->cfact = (ccf == 8 ? 0 : ccf);
+ esp->cfreq = fhz;
+ esp->ccycle = ESP_HZ_TO_CYCLE(fhz);
+ esp->ctick = ESP_TICK(ccf, esp->ccycle);
+ esp->neg_defp = ESP_NEG_DEFP(fhz, ccf);
+ esp->sync_defp = SYNC_DEFP_SLOW;
+}
+
+static const char *esp_chip_names[] = {
+ "ESP100",
+ "ESP100A",
+ "ESP236",
+ "FAS236",
+ "FAS100A",
+ "FAST",
+ "FASHME",
+ "AM53C974",
+};
+
+static struct scsi_transport_template *esp_transport_template;
+
+int scsi_esp_register(struct esp *esp, struct device *dev)
+{
+ static int instance;
+ int err;
+
+ if (!esp->num_tags)
+ esp->num_tags = ESP_DEFAULT_TAGS;
+ esp->host->transportt = esp_transport_template;
+ esp->host->max_lun = ESP_MAX_LUN;
+ esp->host->cmd_per_lun = 2;
+ esp->host->unique_id = instance;
+
+ esp_set_clock_params(esp);
+
+ esp_get_revision(esp);
+
+ esp_init_swstate(esp);
+
+ esp_bootup_reset(esp);
+
+ dev_printk(KERN_INFO, dev, "esp%u: regs[%1p:%1p] irq[%u]\n",
+ esp->host->unique_id, esp->regs, esp->dma_regs,
+ esp->host->irq);
+ dev_printk(KERN_INFO, dev,
+ "esp%u: is a %s, %u MHz (ccf=%u), SCSI ID %u\n",
+ esp->host->unique_id, esp_chip_names[esp->rev],
+ esp->cfreq / 1000000, esp->cfact, esp->scsi_id);
+
+ /* Let the SCSI bus reset settle. */
+ ssleep(esp_bus_reset_settle);
+
+ err = scsi_add_host(esp->host, dev);
+ if (err)
+ return err;
+
+ instance++;
+
+ scsi_scan_host(esp->host);
+
+ return 0;
+}
+EXPORT_SYMBOL(scsi_esp_register);
+
+void scsi_esp_unregister(struct esp *esp)
+{
+ scsi_remove_host(esp->host);
+}
+EXPORT_SYMBOL(scsi_esp_unregister);
+
+static int esp_target_alloc(struct scsi_target *starget)
+{
+ struct esp *esp = shost_priv(dev_to_shost(&starget->dev));
+ struct esp_target_data *tp = &esp->target[starget->id];
+
+ tp->starget = starget;
+
+ return 0;
+}
+
+static void esp_target_destroy(struct scsi_target *starget)
+{
+ struct esp *esp = shost_priv(dev_to_shost(&starget->dev));
+ struct esp_target_data *tp = &esp->target[starget->id];
+
+ tp->starget = NULL;
+}
+
+static int esp_slave_alloc(struct scsi_device *dev)
+{
+ struct esp *esp = shost_priv(dev->host);
+ struct esp_target_data *tp = &esp->target[dev->id];
+ struct esp_lun_data *lp;
+
+ lp = kzalloc(sizeof(*lp), GFP_KERNEL);
+ if (!lp)
+ return -ENOMEM;
+ dev->hostdata = lp;
+
+ spi_min_period(tp->starget) = esp->min_period;
+ spi_max_offset(tp->starget) = 15;
+
+ if (esp->flags & ESP_FLAG_WIDE_CAPABLE)
+ spi_max_width(tp->starget) = 1;
+ else
+ spi_max_width(tp->starget) = 0;
+
+ return 0;
+}
+
+static int esp_slave_configure(struct scsi_device *dev)
+{
+ struct esp *esp = shost_priv(dev->host);
+ struct esp_target_data *tp = &esp->target[dev->id];
+
+ if (dev->tagged_supported)
+ scsi_change_queue_depth(dev, esp->num_tags);
+
+ tp->flags |= ESP_TGT_DISCONNECT;
+
+ if (!spi_initial_dv(dev->sdev_target))
+ spi_dv_device(dev);
+
+ return 0;
+}
+
+static void esp_slave_destroy(struct scsi_device *dev)
+{
+ struct esp_lun_data *lp = dev->hostdata;
+
+ kfree(lp);
+ dev->hostdata = NULL;
+}
+
+static int esp_eh_abort_handler(struct scsi_cmnd *cmd)
+{
+ struct esp *esp = shost_priv(cmd->device->host);
+ struct esp_cmd_entry *ent, *tmp;
+ struct completion eh_done;
+ unsigned long flags;
+
+ /* XXX This helps a lot with debugging but might be a bit
+ * XXX much for the final driver.
+ */
+ spin_lock_irqsave(esp->host->host_lock, flags);
+ shost_printk(KERN_ERR, esp->host, "Aborting command [%p:%02x]\n",
+ cmd, cmd->cmnd[0]);
+ ent = esp->active_cmd;
+ if (ent)
+ shost_printk(KERN_ERR, esp->host,
+ "Current command [%p:%02x]\n",
+ ent->cmd, ent->cmd->cmnd[0]);
+ list_for_each_entry(ent, &esp->queued_cmds, list) {
+ shost_printk(KERN_ERR, esp->host, "Queued command [%p:%02x]\n",
+ ent->cmd, ent->cmd->cmnd[0]);
+ }
+ list_for_each_entry(ent, &esp->active_cmds, list) {
+ shost_printk(KERN_ERR, esp->host, " Active command [%p:%02x]\n",
+ ent->cmd, ent->cmd->cmnd[0]);
+ }
+ esp_dump_cmd_log(esp);
+ spin_unlock_irqrestore(esp->host->host_lock, flags);
+
+ spin_lock_irqsave(esp->host->host_lock, flags);
+
+ ent = NULL;
+ list_for_each_entry(tmp, &esp->queued_cmds, list) {
+ if (tmp->cmd == cmd) {
+ ent = tmp;
+ break;
+ }
+ }
+
+ if (ent) {
+ /* Easiest case, we didn't even issue the command
+ * yet so it is trivial to abort.
+ */
+ list_del(&ent->list);
+
+ cmd->result = DID_ABORT << 16;
+ cmd->scsi_done(cmd);
+
+ esp_put_ent(esp, ent);
+
+ goto out_success;
+ }
+
+ init_completion(&eh_done);
+
+ ent = esp->active_cmd;
+ if (ent && ent->cmd == cmd) {
+ /* Command is the currently active command on
+ * the bus. If we already have an output message
+ * pending, no dice.
+ */
+ if (esp->msg_out_len)
+ goto out_failure;
+
+ /* Send out an abort, encouraging the target to
+ * go to MSGOUT phase by asserting ATN.
+ */
+ esp->msg_out[0] = ABORT_TASK_SET;
+ esp->msg_out_len = 1;
+ ent->eh_done = &eh_done;
+
+ scsi_esp_cmd(esp, ESP_CMD_SATN);
+ } else {
+ /* The command is disconnected. This is not easy to
+ * abort. For now we fail and let the scsi error
+ * handling layer go try a scsi bus reset or host
+ * reset.
+ *
+ * What we could do is put together a scsi command
+ * solely for the purpose of sending an abort message
+ * to the target. Coming up with all the code to
+ * cook up scsi commands, special case them everywhere,
+ * etc. is for questionable gain and it would be better
+ * if the generic scsi error handling layer could do at
+ * least some of that for us.
+ *
+ * Anyways this is an area for potential future improvement
+ * in this driver.
+ */
+ goto out_failure;
+ }
+
+ spin_unlock_irqrestore(esp->host->host_lock, flags);
+
+ if (!wait_for_completion_timeout(&eh_done, 5 * HZ)) {
+ spin_lock_irqsave(esp->host->host_lock, flags);
+ ent->eh_done = NULL;
+ spin_unlock_irqrestore(esp->host->host_lock, flags);
+
+ return FAILED;
+ }
+
+ return SUCCESS;
+
+out_success:
+ spin_unlock_irqrestore(esp->host->host_lock, flags);
+ return SUCCESS;
+
+out_failure:
+ /* XXX This might be a good location to set ESP_TGT_BROKEN
+ * XXX since we know which target/lun in particular is
+ * XXX causing trouble.
+ */
+ spin_unlock_irqrestore(esp->host->host_lock, flags);
+ return FAILED;
+}
+
+static int esp_eh_bus_reset_handler(struct scsi_cmnd *cmd)
+{
+ struct esp *esp = shost_priv(cmd->device->host);
+ struct completion eh_reset;
+ unsigned long flags;
+
+ init_completion(&eh_reset);
+
+ spin_lock_irqsave(esp->host->host_lock, flags);
+
+ esp->eh_reset = &eh_reset;
+
+ /* XXX This is too simple... We should add lots of
+ * XXX checks here so that if we find that the chip is
+ * XXX very wedged we return failure immediately so
+ * XXX that we can perform a full chip reset.
+ */
+ esp->flags |= ESP_FLAG_RESETTING;
+ scsi_esp_cmd(esp, ESP_CMD_RS);
+
+ spin_unlock_irqrestore(esp->host->host_lock, flags);
+
+ ssleep(esp_bus_reset_settle);
+
+ if (!wait_for_completion_timeout(&eh_reset, 5 * HZ)) {
+ spin_lock_irqsave(esp->host->host_lock, flags);
+ esp->eh_reset = NULL;
+ spin_unlock_irqrestore(esp->host->host_lock, flags);
+
+ return FAILED;
+ }
+
+ return SUCCESS;
+}
+
+/* All bets are off, reset the entire device. */
+static int esp_eh_host_reset_handler(struct scsi_cmnd *cmd)
+{
+ struct esp *esp = shost_priv(cmd->device->host);
+ unsigned long flags;
+
+ spin_lock_irqsave(esp->host->host_lock, flags);
+ esp_bootup_reset(esp);
+ esp_reset_cleanup(esp);
+ spin_unlock_irqrestore(esp->host->host_lock, flags);
+
+ ssleep(esp_bus_reset_settle);
+
+ return SUCCESS;
+}
+
+static const char *esp_info(struct Scsi_Host *host)
+{
+ return "esp";
+}
+
+struct scsi_host_template scsi_esp_template = {
+ .module = THIS_MODULE,
+ .name = "esp",
+ .info = esp_info,
+ .queuecommand = esp_queuecommand,
+ .target_alloc = esp_target_alloc,
+ .target_destroy = esp_target_destroy,
+ .slave_alloc = esp_slave_alloc,
+ .slave_configure = esp_slave_configure,
+ .slave_destroy = esp_slave_destroy,
+ .eh_abort_handler = esp_eh_abort_handler,
+ .eh_bus_reset_handler = esp_eh_bus_reset_handler,
+ .eh_host_reset_handler = esp_eh_host_reset_handler,
+ .can_queue = 7,
+ .this_id = 7,
+ .sg_tablesize = SG_ALL,
+ .use_clustering = ENABLE_CLUSTERING,
+ .max_sectors = 0xffff,
+ .skip_settle_delay = 1,
+ .use_blk_tags = 1,
+};
+EXPORT_SYMBOL(scsi_esp_template);
+
+static void esp_get_signalling(struct Scsi_Host *host)
+{
+ struct esp *esp = shost_priv(host);
+ enum spi_signal_type type;
+
+ if (esp->flags & ESP_FLAG_DIFFERENTIAL)
+ type = SPI_SIGNAL_HVD;
+ else
+ type = SPI_SIGNAL_SE;
+
+ spi_signalling(host) = type;
+}
+
+static void esp_set_offset(struct scsi_target *target, int offset)
+{
+ struct Scsi_Host *host = dev_to_shost(target->dev.parent);
+ struct esp *esp = shost_priv(host);
+ struct esp_target_data *tp = &esp->target[target->id];
+
+ if (esp->flags & ESP_FLAG_DISABLE_SYNC)
+ tp->nego_goal_offset = 0;
+ else
+ tp->nego_goal_offset = offset;
+ tp->flags |= ESP_TGT_CHECK_NEGO;
+}
+
+static void esp_set_period(struct scsi_target *target, int period)
+{
+ struct Scsi_Host *host = dev_to_shost(target->dev.parent);
+ struct esp *esp = shost_priv(host);
+ struct esp_target_data *tp = &esp->target[target->id];
+
+ tp->nego_goal_period = period;
+ tp->flags |= ESP_TGT_CHECK_NEGO;
+}
+
+static void esp_set_width(struct scsi_target *target, int width)
+{
+ struct Scsi_Host *host = dev_to_shost(target->dev.parent);
+ struct esp *esp = shost_priv(host);
+ struct esp_target_data *tp = &esp->target[target->id];
+
+ tp->nego_goal_width = (width ? 1 : 0);
+ tp->flags |= ESP_TGT_CHECK_NEGO;
+}
+
+static struct spi_function_template esp_transport_ops = {
+ .set_offset = esp_set_offset,
+ .show_offset = 1,
+ .set_period = esp_set_period,
+ .show_period = 1,
+ .set_width = esp_set_width,
+ .show_width = 1,
+ .get_signalling = esp_get_signalling,
+};
+
+static int __init esp_init(void)
+{
+ BUILD_BUG_ON(sizeof(struct scsi_pointer) <
+ sizeof(struct esp_cmd_priv));
+
+ esp_transport_template = spi_attach_transport(&esp_transport_ops);
+ if (!esp_transport_template)
+ return -ENODEV;
+
+ return 0;
+}
+
+static void __exit esp_exit(void)
+{
+ spi_release_transport(esp_transport_template);
+}
+
+MODULE_DESCRIPTION("ESP SCSI driver core");
+MODULE_AUTHOR("David S. Miller (davem@davemloft.net)");
+MODULE_LICENSE("GPL");
+MODULE_VERSION(DRV_VERSION);
+
+module_param(esp_bus_reset_settle, int, 0);
+MODULE_PARM_DESC(esp_bus_reset_settle,
+ "ESP scsi bus reset delay in seconds");
+
+module_param(esp_debug, int, 0);
+MODULE_PARM_DESC(esp_debug,
+"ESP bitmapped debugging message enable value:\n"
+" 0x00000001 Log interrupt events\n"
+" 0x00000002 Log scsi commands\n"
+" 0x00000004 Log resets\n"
+" 0x00000008 Log message in events\n"
+" 0x00000010 Log message out events\n"
+" 0x00000020 Log command completion\n"
+" 0x00000040 Log disconnects\n"
+" 0x00000080 Log data start\n"
+" 0x00000100 Log data done\n"
+" 0x00000200 Log reconnects\n"
+" 0x00000400 Log auto-sense data\n"
+);
+
+module_init(esp_init);
+module_exit(esp_exit);
diff --git a/drivers/scsi/esp_scsi.h b/drivers/scsi/esp_scsi.h
new file mode 100644
index 000000000..84dcbe4a6
--- /dev/null
+++ b/drivers/scsi/esp_scsi.h
@@ -0,0 +1,583 @@
+/* esp_scsi.h: Defines and structures for the ESP driver.
+ *
+ * Copyright (C) 2007 David S. Miller (davem@davemloft.net)
+ */
+
+#ifndef _ESP_SCSI_H
+#define _ESP_SCSI_H
+
+ /* Access Description Offset */
+#define ESP_TCLOW 0x00UL /* rw Low bits transfer count 0x00 */
+#define ESP_TCMED 0x01UL /* rw Mid bits transfer count 0x04 */
+#define ESP_FDATA 0x02UL /* rw FIFO data bits 0x08 */
+#define ESP_CMD 0x03UL /* rw SCSI command bits 0x0c */
+#define ESP_STATUS 0x04UL /* ro ESP status register 0x10 */
+#define ESP_BUSID ESP_STATUS /* wo BusID for sel/resel 0x10 */
+#define ESP_INTRPT 0x05UL /* ro Kind of interrupt 0x14 */
+#define ESP_TIMEO ESP_INTRPT /* wo Timeout for sel/resel 0x14 */
+#define ESP_SSTEP 0x06UL /* ro Sequence step register 0x18 */
+#define ESP_STP ESP_SSTEP /* wo Transfer period/sync 0x18 */
+#define ESP_FFLAGS 0x07UL /* ro Bits current FIFO info 0x1c */
+#define ESP_SOFF ESP_FFLAGS /* wo Sync offset 0x1c */
+#define ESP_CFG1 0x08UL /* rw First cfg register 0x20 */
+#define ESP_CFACT 0x09UL /* wo Clock conv factor 0x24 */
+#define ESP_STATUS2 ESP_CFACT /* ro HME status2 register 0x24 */
+#define ESP_CTEST 0x0aUL /* wo Chip test register 0x28 */
+#define ESP_CFG2 0x0bUL /* rw Second cfg register 0x2c */
+#define ESP_CFG3 0x0cUL /* rw Third cfg register 0x30 */
+#define ESP_CFG4 0x0dUL /* rw Fourth cfg register 0x34 */
+#define ESP_TCHI 0x0eUL /* rw High bits transf count 0x38 */
+#define ESP_UID ESP_TCHI /* ro Unique ID code 0x38 */
+#define FAS_RLO ESP_TCHI /* rw HME extended counter 0x38 */
+#define ESP_FGRND 0x0fUL /* rw Data base for fifo 0x3c */
+#define FAS_RHI ESP_FGRND /* rw HME extended counter 0x3c */
+
+#define SBUS_ESP_REG_SIZE 0x40UL
+
+/* Bitfield meanings for the above registers. */
+
+/* ESP config reg 1, read-write, found on all ESP chips */
+#define ESP_CONFIG1_ID 0x07 /* My BUS ID bits */
+#define ESP_CONFIG1_CHTEST 0x08 /* Enable ESP chip tests */
+#define ESP_CONFIG1_PENABLE 0x10 /* Enable parity checks */
+#define ESP_CONFIG1_PARTEST 0x20 /* Parity test mode enabled? */
+#define ESP_CONFIG1_SRRDISAB 0x40 /* Disable SCSI reset reports */
+#define ESP_CONFIG1_SLCABLE 0x80 /* Enable slow cable mode */
+
+/* ESP config reg 2, read-write, found only on esp100a+esp200+esp236 chips */
+#define ESP_CONFIG2_DMAPARITY 0x01 /* enable DMA Parity (200,236) */
+#define ESP_CONFIG2_REGPARITY 0x02 /* enable reg Parity (200,236) */
+#define ESP_CONFIG2_BADPARITY 0x04 /* Bad parity target abort */
+#define ESP_CONFIG2_SCSI2ENAB 0x08 /* Enable SCSI-2 features (tgtmode) */
+#define ESP_CONFIG2_HI 0x10 /* High Impedance DREQ ??? */
+#define ESP_CONFIG2_HMEFENAB 0x10 /* HME features enable */
+#define ESP_CONFIG2_BCM 0x20 /* Enable byte-ctrl (236) */
+#define ESP_CONFIG2_DISPINT 0x20 /* Disable pause irq (hme) */
+#define ESP_CONFIG2_FENAB 0x40 /* Enable features (fas100,216) */
+#define ESP_CONFIG2_SPL 0x40 /* Enable status-phase latch (236) */
+#define ESP_CONFIG2_MKDONE 0x40 /* HME magic feature */
+#define ESP_CONFIG2_HME32 0x80 /* HME 32 extended */
+#define ESP_CONFIG2_MAGIC 0xe0 /* Invalid bits... */
+
+/* ESP config register 3 read-write, found only esp236+fas236+fas100a+hme chips */
+#define ESP_CONFIG3_FCLOCK 0x01 /* FAST SCSI clock rate (esp100a/hme) */
+#define ESP_CONFIG3_TEM 0x01 /* Enable thresh-8 mode (esp/fas236) */
+#define ESP_CONFIG3_FAST 0x02 /* Enable FAST SCSI (esp100a/hme) */
+#define ESP_CONFIG3_ADMA 0x02 /* Enable alternate-dma (esp/fas236) */
+#define ESP_CONFIG3_TENB 0x04 /* group2 SCSI2 support (esp100a/hme) */
+#define ESP_CONFIG3_SRB 0x04 /* Save residual byte (esp/fas236) */
+#define ESP_CONFIG3_TMS 0x08 /* Three-byte msg's ok (esp100a/hme) */
+#define ESP_CONFIG3_FCLK 0x08 /* Fast SCSI clock rate (esp/fas236) */
+#define ESP_CONFIG3_IDMSG 0x10 /* ID message checking (esp100a/hme) */
+#define ESP_CONFIG3_FSCSI 0x10 /* Enable FAST SCSI (esp/fas236) */
+#define ESP_CONFIG3_GTM 0x20 /* group2 SCSI2 support (esp/fas236) */
+#define ESP_CONFIG3_IDBIT3 0x20 /* Bit 3 of HME SCSI-ID (hme) */
+#define ESP_CONFIG3_TBMS 0x40 /* Three-byte msg's ok (esp/fas236) */
+#define ESP_CONFIG3_EWIDE 0x40 /* Enable Wide-SCSI (hme) */
+#define ESP_CONFIG3_IMS 0x80 /* ID msg chk'ng (esp/fas236) */
+#define ESP_CONFIG3_OBPUSH 0x80 /* Push odd-byte to dma (hme) */
+
+/* ESP config register 4 read-write, found only on am53c974 chips */
+#define ESP_CONFIG4_RADE 0x04 /* Active negation */
+#define ESP_CONFIG4_RAE 0x08 /* Active negation on REQ and ACK */
+#define ESP_CONFIG4_PWD 0x20 /* Reduced power feature */
+#define ESP_CONFIG4_GE0 0x40 /* Glitch eater bit 0 */
+#define ESP_CONFIG4_GE1 0x80 /* Glitch eater bit 1 */
+
+#define ESP_CONFIG_GE_12NS (0)
+#define ESP_CONFIG_GE_25NS (ESP_CONFIG_GE1)
+#define ESP_CONFIG_GE_35NS (ESP_CONFIG_GE0)
+#define ESP_CONFIG_GE_0NS (ESP_CONFIG_GE0 | ESP_CONFIG_GE1)
+
+/* ESP command register read-write */
+/* Group 1 commands: These may be sent at any point in time to the ESP
+ * chip. None of them can generate interrupts 'cept
+ * the "SCSI bus reset" command if you have not disabled
+ * SCSI reset interrupts in the config1 ESP register.
+ */
+#define ESP_CMD_NULL 0x00 /* Null command, ie. a nop */
+#define ESP_CMD_FLUSH 0x01 /* FIFO Flush */
+#define ESP_CMD_RC 0x02 /* Chip reset */
+#define ESP_CMD_RS 0x03 /* SCSI bus reset */
+
+/* Group 2 commands: ESP must be an initiator and connected to a target
+ * for these commands to work.
+ */
+#define ESP_CMD_TI 0x10 /* Transfer Information */
+#define ESP_CMD_ICCSEQ 0x11 /* Initiator cmd complete sequence */
+#define ESP_CMD_MOK 0x12 /* Message okie-dokie */
+#define ESP_CMD_TPAD 0x18 /* Transfer Pad */
+#define ESP_CMD_SATN 0x1a /* Set ATN */
+#define ESP_CMD_RATN 0x1b /* De-assert ATN */
+
+/* Group 3 commands: ESP must be in the MSGOUT or MSGIN state and be connected
+ * to a target as the initiator for these commands to work.
+ */
+#define ESP_CMD_SMSG 0x20 /* Send message */
+#define ESP_CMD_SSTAT 0x21 /* Send status */
+#define ESP_CMD_SDATA 0x22 /* Send data */
+#define ESP_CMD_DSEQ 0x23 /* Discontinue Sequence */
+#define ESP_CMD_TSEQ 0x24 /* Terminate Sequence */
+#define ESP_CMD_TCCSEQ 0x25 /* Target cmd cmplt sequence */
+#define ESP_CMD_DCNCT 0x27 /* Disconnect */
+#define ESP_CMD_RMSG 0x28 /* Receive Message */
+#define ESP_CMD_RCMD 0x29 /* Receive Command */
+#define ESP_CMD_RDATA 0x2a /* Receive Data */
+#define ESP_CMD_RCSEQ 0x2b /* Receive cmd sequence */
+
+/* Group 4 commands: The ESP must be in the disconnected state and must
+ * not be connected to any targets as initiator for
+ * these commands to work.
+ */
+#define ESP_CMD_RSEL 0x40 /* Reselect */
+#define ESP_CMD_SEL 0x41 /* Select w/o ATN */
+#define ESP_CMD_SELA 0x42 /* Select w/ATN */
+#define ESP_CMD_SELAS 0x43 /* Select w/ATN & STOP */
+#define ESP_CMD_ESEL 0x44 /* Enable selection */
+#define ESP_CMD_DSEL 0x45 /* Disable selections */
+#define ESP_CMD_SA3 0x46 /* Select w/ATN3 */
+#define ESP_CMD_RSEL3 0x47 /* Reselect3 */
+
+/* This bit enables the ESP's DMA on the SBus */
+#define ESP_CMD_DMA 0x80 /* Do DMA? */
+
+/* ESP status register read-only */
+#define ESP_STAT_PIO 0x01 /* IO phase bit */
+#define ESP_STAT_PCD 0x02 /* CD phase bit */
+#define ESP_STAT_PMSG 0x04 /* MSG phase bit */
+#define ESP_STAT_PMASK 0x07 /* Mask of phase bits */
+#define ESP_STAT_TDONE 0x08 /* Transfer Completed */
+#define ESP_STAT_TCNT 0x10 /* Transfer Counter Is Zero */
+#define ESP_STAT_PERR 0x20 /* Parity error */
+#define ESP_STAT_SPAM 0x40 /* Real bad error */
+/* This indicates the 'interrupt pending' condition on esp236, it is a reserved
+ * bit on other revs of the ESP.
+ */
+#define ESP_STAT_INTR 0x80 /* Interrupt */
+
+/* The status register can be masked with ESP_STAT_PMASK and compared
+ * with the following values to determine the current phase the ESP
+ * (at least thinks it) is in. For our purposes we also add our own
+ * software 'done' bit for our phase management engine.
+ */
+#define ESP_DOP (0) /* Data Out */
+#define ESP_DIP (ESP_STAT_PIO) /* Data In */
+#define ESP_CMDP (ESP_STAT_PCD) /* Command */
+#define ESP_STATP (ESP_STAT_PCD|ESP_STAT_PIO) /* Status */
+#define ESP_MOP (ESP_STAT_PMSG|ESP_STAT_PCD) /* Message Out */
+#define ESP_MIP (ESP_STAT_PMSG|ESP_STAT_PCD|ESP_STAT_PIO) /* Message In */
+
+/* HME only: status 2 register */
+#define ESP_STAT2_SCHBIT 0x01 /* Upper bits 3-7 of sstep enabled */
+#define ESP_STAT2_FFLAGS 0x02 /* The fifo flags are now latched */
+#define ESP_STAT2_XCNT 0x04 /* The transfer counter is latched */
+#define ESP_STAT2_CREGA 0x08 /* The command reg is active now */
+#define ESP_STAT2_WIDE 0x10 /* Interface on this adapter is wide */
+#define ESP_STAT2_F1BYTE 0x20 /* There is one byte at top of fifo */
+#define ESP_STAT2_FMSB 0x40 /* Next byte in fifo is most significant */
+#define ESP_STAT2_FEMPTY 0x80 /* FIFO is empty */
+
+/* ESP interrupt register read-only */
+#define ESP_INTR_S 0x01 /* Select w/o ATN */
+#define ESP_INTR_SATN 0x02 /* Select w/ATN */
+#define ESP_INTR_RSEL 0x04 /* Reselected */
+#define ESP_INTR_FDONE 0x08 /* Function done */
+#define ESP_INTR_BSERV 0x10 /* Bus service */
+#define ESP_INTR_DC 0x20 /* Disconnect */
+#define ESP_INTR_IC 0x40 /* Illegal command given */
+#define ESP_INTR_SR 0x80 /* SCSI bus reset detected */
+
+/* ESP sequence step register read-only */
+#define ESP_STEP_VBITS 0x07 /* Valid bits */
+#define ESP_STEP_ASEL 0x00 /* Selection&Arbitrate cmplt */
+#define ESP_STEP_SID 0x01 /* One msg byte sent */
+#define ESP_STEP_NCMD 0x02 /* Was not in command phase */
+#define ESP_STEP_PPC 0x03 /* Early phase chg caused cmnd
+ * bytes to be lost
+ */
+#define ESP_STEP_FINI4 0x04 /* Command was sent ok */
+
+/* Ho hum, some ESP's set the step register to this as well... */
+#define ESP_STEP_FINI5 0x05
+#define ESP_STEP_FINI6 0x06
+#define ESP_STEP_FINI7 0x07
+
+/* ESP chip-test register read-write */
+#define ESP_TEST_TARG 0x01 /* Target test mode */
+#define ESP_TEST_INI 0x02 /* Initiator test mode */
+#define ESP_TEST_TS 0x04 /* Tristate test mode */
+
+/* ESP unique ID register read-only, found on fas236+fas100a only */
+#define ESP_UID_F100A 0x00 /* ESP FAS100A */
+#define ESP_UID_F236 0x02 /* ESP FAS236 */
+#define ESP_UID_REV 0x07 /* ESP revision */
+#define ESP_UID_FAM 0xf8 /* ESP family */
+
+/* ESP fifo flags register read-only */
+/* Note that the following implies a 16 byte FIFO on the ESP. */
+#define ESP_FF_FBYTES 0x1f /* Num bytes in FIFO */
+#define ESP_FF_ONOTZERO 0x20 /* offset ctr not zero (esp100) */
+#define ESP_FF_SSTEP 0xe0 /* Sequence step */
+
+/* ESP clock conversion factor register write-only */
+#define ESP_CCF_F0 0x00 /* 35.01MHz - 40MHz */
+#define ESP_CCF_NEVER 0x01 /* Set it to this and die */
+#define ESP_CCF_F2 0x02 /* 10MHz */
+#define ESP_CCF_F3 0x03 /* 10.01MHz - 15MHz */
+#define ESP_CCF_F4 0x04 /* 15.01MHz - 20MHz */
+#define ESP_CCF_F5 0x05 /* 20.01MHz - 25MHz */
+#define ESP_CCF_F6 0x06 /* 25.01MHz - 30MHz */
+#define ESP_CCF_F7 0x07 /* 30.01MHz - 35MHz */
+
+/* HME only... */
+#define ESP_BUSID_RESELID 0x10
+#define ESP_BUSID_CTR32BIT 0x40
+
+#define ESP_BUS_TIMEOUT 250 /* In milli-seconds */
+#define ESP_TIMEO_CONST 8192
+#define ESP_NEG_DEFP(mhz, cfact) \
+ ((ESP_BUS_TIMEOUT * ((mhz) / 1000)) / (8192 * (cfact)))
+#define ESP_HZ_TO_CYCLE(hertz) ((1000000000) / ((hertz) / 1000))
+#define ESP_TICK(ccf, cycle) ((7682 * (ccf) * (cycle) / 1000))
+
+/* For slow to medium speed input clock rates we shoot for 5mb/s, but for high
+ * input clock rates we try to do 10mb/s although I don't think a transfer can
+ * even run that fast with an ESP even with DMA2 scatter gather pipelining.
+ */
+#define SYNC_DEFP_SLOW 0x32 /* 5mb/s */
+#define SYNC_DEFP_FAST 0x19 /* 10mb/s */
+
+struct esp_cmd_priv {
+ union {
+ dma_addr_t dma_addr;
+ int num_sg;
+ } u;
+
+ int cur_residue;
+ struct scatterlist *cur_sg;
+ int tot_residue;
+};
+#define ESP_CMD_PRIV(CMD) ((struct esp_cmd_priv *)(&(CMD)->SCp))
+
+enum esp_rev {
+ ESP100 = 0x00, /* NCR53C90 - very broken */
+ ESP100A = 0x01, /* NCR53C90A */
+ ESP236 = 0x02,
+ FAS236 = 0x03,
+ FAS100A = 0x04,
+ FAST = 0x05,
+ FASHME = 0x06,
+ PCSCSI = 0x07, /* AM53c974 */
+};
+
+struct esp_cmd_entry {
+ struct list_head list;
+
+ struct scsi_cmnd *cmd;
+
+ unsigned int saved_cur_residue;
+ struct scatterlist *saved_cur_sg;
+ unsigned int saved_tot_residue;
+
+ u8 flags;
+#define ESP_CMD_FLAG_WRITE 0x01 /* DMA is a write */
+#define ESP_CMD_FLAG_ABORT 0x02 /* being aborted */
+#define ESP_CMD_FLAG_AUTOSENSE 0x04 /* Doing automatic REQUEST_SENSE */
+#define ESP_CMD_FLAG_RESIDUAL 0x08 /* AM53c974 BLAST residual */
+
+ u8 tag[2];
+ u8 orig_tag[2];
+
+ u8 status;
+ u8 message;
+
+ unsigned char *sense_ptr;
+ unsigned char *saved_sense_ptr;
+ dma_addr_t sense_dma;
+
+ struct completion *eh_done;
+};
+
+#define ESP_DEFAULT_TAGS 16
+
+#define ESP_MAX_TARGET 16
+#define ESP_MAX_LUN 8
+#define ESP_MAX_TAG 256
+
+struct esp_lun_data {
+ struct esp_cmd_entry *non_tagged_cmd;
+ int num_tagged;
+ int hold;
+ struct esp_cmd_entry *tagged_cmds[ESP_MAX_TAG];
+};
+
+struct esp_target_data {
+ /* These are the ESP_STP, ESP_SOFF, and ESP_CFG3 register values which
+ * match the currently negotiated settings for this target. The SCSI
+ * protocol values are maintained in spi_{offset,period,wide}(starget).
+ */
+ u8 esp_period;
+ u8 esp_offset;
+ u8 esp_config3;
+
+ u8 flags;
+#define ESP_TGT_WIDE 0x01
+#define ESP_TGT_DISCONNECT 0x02
+#define ESP_TGT_NEGO_WIDE 0x04
+#define ESP_TGT_NEGO_SYNC 0x08
+#define ESP_TGT_CHECK_NEGO 0x40
+#define ESP_TGT_BROKEN 0x80
+
+ /* When ESP_TGT_CHECK_NEGO is set, on the next scsi command to this
+ * device we will try to negotiate the following parameters.
+ */
+ u8 nego_goal_period;
+ u8 nego_goal_offset;
+ u8 nego_goal_width;
+ u8 nego_goal_tags;
+
+ struct scsi_target *starget;
+};
+
+struct esp_event_ent {
+ u8 type;
+#define ESP_EVENT_TYPE_EVENT 0x01
+#define ESP_EVENT_TYPE_CMD 0x02
+ u8 val;
+
+ u8 sreg;
+ u8 seqreg;
+ u8 sreg2;
+ u8 ireg;
+ u8 select_state;
+ u8 event;
+ u8 __pad;
+};
+
+struct esp;
+struct esp_driver_ops {
+ /* Read and write the ESP 8-bit registers. On some
+ * applications of the ESP chip the registers are at 4-byte
+ * instead of 1-byte intervals.
+ */
+ void (*esp_write8)(struct esp *esp, u8 val, unsigned long reg);
+ u8 (*esp_read8)(struct esp *esp, unsigned long reg);
+
+ /* Map and unmap DMA memory. Eventually the driver will be
+ * converted to the generic DMA API as soon as SBUS is able to
+ * cope with that. At such time we can remove this.
+ */
+ dma_addr_t (*map_single)(struct esp *esp, void *buf,
+ size_t sz, int dir);
+ int (*map_sg)(struct esp *esp, struct scatterlist *sg,
+ int num_sg, int dir);
+ void (*unmap_single)(struct esp *esp, dma_addr_t addr,
+ size_t sz, int dir);
+ void (*unmap_sg)(struct esp *esp, struct scatterlist *sg,
+ int num_sg, int dir);
+
+ /* Return non-zero if there is an IRQ pending. Usually this
+ * status bit lives in the DMA controller sitting in front of
+ * the ESP. This has to be accurate or else the ESP interrupt
+ * handler will not run.
+ */
+ int (*irq_pending)(struct esp *esp);
+
+ /* Return the maximum allowable size of a DMA transfer for a
+ * given buffer.
+ */
+ u32 (*dma_length_limit)(struct esp *esp, u32 dma_addr,
+ u32 dma_len);
+
+ /* Reset the DMA engine entirely. On return, ESP interrupts
+ * should be enabled. Often the interrupt enabling is
+ * controlled in the DMA engine.
+ */
+ void (*reset_dma)(struct esp *esp);
+
+ /* Drain any pending DMA in the DMA engine after a transfer.
+ * This is for writes to memory.
+ */
+ void (*dma_drain)(struct esp *esp);
+
+ /* Invalidate the DMA engine after a DMA transfer. */
+ void (*dma_invalidate)(struct esp *esp);
+
+ /* Setup an ESP command that will use a DMA transfer.
+ * The 'esp_count' specifies what transfer length should be
+ * programmed into the ESP transfer counter registers, whereas
+ * the 'dma_count' is the length that should be programmed into
+ * the DMA controller. Usually they are the same. If 'write'
+ * is non-zero, this transfer is a write into memory. 'cmd'
+ * holds the ESP command that should be issued by calling
+ * scsi_esp_cmd() at the appropriate time while programming
+ * the DMA hardware.
+ */
+ void (*send_dma_cmd)(struct esp *esp, u32 dma_addr, u32 esp_count,
+ u32 dma_count, int write, u8 cmd);
+
+ /* Return non-zero if the DMA engine is reporting an error
+ * currently.
+ */
+ int (*dma_error)(struct esp *esp);
+};
+
+#define ESP_MAX_MSG_SZ 8
+#define ESP_EVENT_LOG_SZ 32
+
+#define ESP_QUICKIRQ_LIMIT 100
+#define ESP_RESELECT_TAG_LIMIT 2500
+
+struct esp {
+ void __iomem *regs;
+ void __iomem *dma_regs;
+
+ const struct esp_driver_ops *ops;
+
+ struct Scsi_Host *host;
+ void *dev;
+
+ struct esp_cmd_entry *active_cmd;
+
+ struct list_head queued_cmds;
+ struct list_head active_cmds;
+
+ u8 *command_block;
+ dma_addr_t command_block_dma;
+
+ unsigned int data_dma_len;
+
+ /* The following are used to determine the cause of an IRQ. Upon every
+ * IRQ entry we synchronize these with the hardware registers.
+ */
+ u8 sreg;
+ u8 seqreg;
+ u8 sreg2;
+ u8 ireg;
+
+ u32 prev_hme_dmacsr;
+ u8 prev_soff;
+ u8 prev_stp;
+ u8 prev_cfg3;
+ u8 num_tags;
+
+ struct list_head esp_cmd_pool;
+
+ struct esp_target_data target[ESP_MAX_TARGET];
+
+ int fifo_cnt;
+ u8 fifo[16];
+
+ struct esp_event_ent esp_event_log[ESP_EVENT_LOG_SZ];
+ int esp_event_cur;
+
+ u8 msg_out[ESP_MAX_MSG_SZ];
+ int msg_out_len;
+
+ u8 msg_in[ESP_MAX_MSG_SZ];
+ int msg_in_len;
+
+ u8 bursts;
+ u8 config1;
+ u8 config2;
+ u8 config4;
+
+ u8 scsi_id;
+ u32 scsi_id_mask;
+
+ enum esp_rev rev;
+
+ u32 flags;
+#define ESP_FLAG_DIFFERENTIAL 0x00000001
+#define ESP_FLAG_RESETTING 0x00000002
+#define ESP_FLAG_DOING_SLOWCMD 0x00000004
+#define ESP_FLAG_WIDE_CAPABLE 0x00000008
+#define ESP_FLAG_QUICKIRQ_CHECK 0x00000010
+#define ESP_FLAG_DISABLE_SYNC 0x00000020
+#define ESP_FLAG_USE_FIFO 0x00000040
+
+ u8 select_state;
+#define ESP_SELECT_NONE 0x00 /* Not selecting */
+#define ESP_SELECT_BASIC 0x01 /* Select w/o MSGOUT phase */
+#define ESP_SELECT_MSGOUT 0x02 /* Select with MSGOUT */
+
+ /* When we are not selecting, we are expecting an event. */
+ u8 event;
+#define ESP_EVENT_NONE 0x00
+#define ESP_EVENT_CMD_START 0x01
+#define ESP_EVENT_CMD_DONE 0x02
+#define ESP_EVENT_DATA_IN 0x03
+#define ESP_EVENT_DATA_OUT 0x04
+#define ESP_EVENT_DATA_DONE 0x05
+#define ESP_EVENT_MSGIN 0x06
+#define ESP_EVENT_MSGIN_MORE 0x07
+#define ESP_EVENT_MSGIN_DONE 0x08
+#define ESP_EVENT_MSGOUT 0x09
+#define ESP_EVENT_MSGOUT_DONE 0x0a
+#define ESP_EVENT_STATUS 0x0b
+#define ESP_EVENT_FREE_BUS 0x0c
+#define ESP_EVENT_CHECK_PHASE 0x0d
+#define ESP_EVENT_RESET 0x10
+
+ /* Probed in esp_get_clock_params() */
+ u32 cfact;
+ u32 cfreq;
+ u32 ccycle;
+ u32 ctick;
+ u32 neg_defp;
+ u32 sync_defp;
+
+ /* Computed in esp_reset_esp() */
+ u32 max_period;
+ u32 min_period;
+ u32 radelay;
+
+ /* Slow command state. */
+ u8 *cmd_bytes_ptr;
+ int cmd_bytes_left;
+
+ struct completion *eh_reset;
+
+ void *dma;
+ int dmarev;
+};
+
+/* A front-end driver for the ESP chip should do the following in
+ * it's device probe routine:
+ * 1) Allocate the host and private area using scsi_host_alloc()
+ * with size 'sizeof(struct esp)'. The first argument to
+ * scsi_host_alloc() should be &scsi_esp_template.
+ * 2) Set host->max_id as appropriate.
+ * 3) Set esp->host to the scsi_host itself, and esp->dev
+ * to the device object pointer.
+ * 4) Hook up esp->ops to the front-end implementation.
+ * 5) If the ESP chip supports wide transfers, set ESP_FLAG_WIDE_CAPABLE
+ * in esp->flags.
+ * 6) Map the DMA and ESP chip registers.
+ * 7) DMA map the ESP command block, store the DMA address
+ * in esp->command_block_dma.
+ * 8) Register the scsi_esp_intr() interrupt handler.
+ * 9) Probe for and provide the following chip properties:
+ * esp->scsi_id (assign to esp->host->this_id too)
+ * esp->scsi_id_mask
+ * If ESP bus is differential, set ESP_FLAG_DIFFERENTIAL
+ * esp->cfreq
+ * DMA burst bit mask in esp->bursts, if necessary
+ * 10) Perform any actions necessary before the ESP device can
+ * be programmed for the first time. On some configs, for
+ * example, the DMA engine has to be reset before ESP can
+ * be programmed.
+ * 11) If necessary, call dev_set_drvdata() as needed.
+ * 12) Call scsi_esp_register() with prepared 'esp' structure
+ * and a device pointer if possible.
+ * 13) Check scsi_esp_register() return value, release all resources
+ * if an error was returned.
+ */
+extern struct scsi_host_template scsi_esp_template;
+extern int scsi_esp_register(struct esp *, struct device *);
+
+extern void scsi_esp_unregister(struct esp *);
+extern irqreturn_t scsi_esp_intr(int, void *);
+extern void scsi_esp_cmd(struct esp *, u8);
+
+#endif /* !(_ESP_SCSI_H) */
diff --git a/drivers/scsi/fcoe/Makefile b/drivers/scsi/fcoe/Makefile
new file mode 100644
index 000000000..aed0f5db3
--- /dev/null
+++ b/drivers/scsi/fcoe/Makefile
@@ -0,0 +1,4 @@
+obj-$(CONFIG_FCOE) += fcoe.o
+obj-$(CONFIG_LIBFCOE) += libfcoe.o
+
+libfcoe-objs := fcoe_ctlr.o fcoe_transport.o fcoe_sysfs.o
diff --git a/drivers/scsi/fcoe/fcoe.c b/drivers/scsi/fcoe/fcoe.c
new file mode 100644
index 000000000..ec193a835
--- /dev/null
+++ b/drivers/scsi/fcoe/fcoe.c
@@ -0,0 +1,2965 @@
+/*
+ * Copyright(c) 2007 - 2009 Intel Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Maintained at www.Open-FCoE.org
+ */
+
+#include <linux/module.h>
+#include <linux/spinlock.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/ethtool.h>
+#include <linux/if_ether.h>
+#include <linux/if_vlan.h>
+#include <linux/crc32.h>
+#include <linux/slab.h>
+#include <linux/cpu.h>
+#include <linux/fs.h>
+#include <linux/sysfs.h>
+#include <linux/ctype.h>
+#include <linux/workqueue.h>
+#include <net/dcbnl.h>
+#include <net/dcbevent.h>
+#include <scsi/scsi_tcq.h>
+#include <scsi/scsicam.h>
+#include <scsi/scsi_transport.h>
+#include <scsi/scsi_transport_fc.h>
+#include <net/rtnetlink.h>
+
+#include <scsi/fc/fc_encaps.h>
+#include <scsi/fc/fc_fip.h>
+#include <scsi/fc/fc_fcoe.h>
+
+#include <scsi/libfc.h>
+#include <scsi/fc_frame.h>
+#include <scsi/libfcoe.h>
+
+#include "fcoe.h"
+
+MODULE_AUTHOR("Open-FCoE.org");
+MODULE_DESCRIPTION("FCoE");
+MODULE_LICENSE("GPL v2");
+
+/* Performance tuning parameters for fcoe */
+static unsigned int fcoe_ddp_min = 4096;
+module_param_named(ddp_min, fcoe_ddp_min, uint, S_IRUGO | S_IWUSR);
+MODULE_PARM_DESC(ddp_min, "Minimum I/O size in bytes for " \
+ "Direct Data Placement (DDP).");
+
+unsigned int fcoe_debug_logging;
+module_param_named(debug_logging, fcoe_debug_logging, int, S_IRUGO|S_IWUSR);
+MODULE_PARM_DESC(debug_logging, "a bit mask of logging levels");
+
+static DEFINE_MUTEX(fcoe_config_mutex);
+
+static struct workqueue_struct *fcoe_wq;
+
+/* fcoe_percpu_clean completion. Waiter protected by fcoe_create_mutex */
+static DECLARE_COMPLETION(fcoe_flush_completion);
+
+/* fcoe host list */
+/* must only by accessed under the RTNL mutex */
+static LIST_HEAD(fcoe_hostlist);
+static DEFINE_PER_CPU(struct fcoe_percpu_s, fcoe_percpu);
+
+/* Function Prototypes */
+static int fcoe_reset(struct Scsi_Host *);
+static int fcoe_xmit(struct fc_lport *, struct fc_frame *);
+static int fcoe_rcv(struct sk_buff *, struct net_device *,
+ struct packet_type *, struct net_device *);
+static int fcoe_percpu_receive_thread(void *);
+static void fcoe_percpu_clean(struct fc_lport *);
+static int fcoe_link_ok(struct fc_lport *);
+
+static struct fc_lport *fcoe_hostlist_lookup(const struct net_device *);
+static int fcoe_hostlist_add(const struct fc_lport *);
+static void fcoe_hostlist_del(const struct fc_lport *);
+
+static int fcoe_device_notification(struct notifier_block *, ulong, void *);
+static void fcoe_dev_setup(void);
+static void fcoe_dev_cleanup(void);
+static struct fcoe_interface
+*fcoe_hostlist_lookup_port(const struct net_device *);
+
+static int fcoe_fip_recv(struct sk_buff *, struct net_device *,
+ struct packet_type *, struct net_device *);
+
+static void fcoe_fip_send(struct fcoe_ctlr *, struct sk_buff *);
+static void fcoe_update_src_mac(struct fc_lport *, u8 *);
+static u8 *fcoe_get_src_mac(struct fc_lport *);
+static void fcoe_destroy_work(struct work_struct *);
+
+static int fcoe_ddp_setup(struct fc_lport *, u16, struct scatterlist *,
+ unsigned int);
+static int fcoe_ddp_done(struct fc_lport *, u16);
+static int fcoe_ddp_target(struct fc_lport *, u16, struct scatterlist *,
+ unsigned int);
+static int fcoe_cpu_callback(struct notifier_block *, unsigned long, void *);
+static int fcoe_dcb_app_notification(struct notifier_block *notifier,
+ ulong event, void *ptr);
+
+static bool fcoe_match(struct net_device *netdev);
+static int fcoe_create(struct net_device *netdev, enum fip_state fip_mode);
+static int fcoe_destroy(struct net_device *netdev);
+static int fcoe_enable(struct net_device *netdev);
+static int fcoe_disable(struct net_device *netdev);
+
+/* fcoe_syfs control interface handlers */
+static int fcoe_ctlr_alloc(struct net_device *netdev);
+static int fcoe_ctlr_enabled(struct fcoe_ctlr_device *cdev);
+
+
+static struct fc_seq *fcoe_elsct_send(struct fc_lport *,
+ u32 did, struct fc_frame *,
+ unsigned int op,
+ void (*resp)(struct fc_seq *,
+ struct fc_frame *,
+ void *),
+ void *, u32 timeout);
+static void fcoe_recv_frame(struct sk_buff *skb);
+
+/* notification function for packets from net device */
+static struct notifier_block fcoe_notifier = {
+ .notifier_call = fcoe_device_notification,
+};
+
+/* notification function for CPU hotplug events */
+static struct notifier_block fcoe_cpu_notifier = {
+ .notifier_call = fcoe_cpu_callback,
+};
+
+/* notification function for DCB events */
+static struct notifier_block dcb_notifier = {
+ .notifier_call = fcoe_dcb_app_notification,
+};
+
+static struct scsi_transport_template *fcoe_nport_scsi_transport;
+static struct scsi_transport_template *fcoe_vport_scsi_transport;
+
+static int fcoe_vport_destroy(struct fc_vport *);
+static int fcoe_vport_create(struct fc_vport *, bool disabled);
+static int fcoe_vport_disable(struct fc_vport *, bool disable);
+static void fcoe_set_vport_symbolic_name(struct fc_vport *);
+static void fcoe_set_port_id(struct fc_lport *, u32, struct fc_frame *);
+static void fcoe_fcf_get_vlan_id(struct fcoe_fcf_device *);
+
+static struct fcoe_sysfs_function_template fcoe_sysfs_templ = {
+ .set_fcoe_ctlr_mode = fcoe_ctlr_set_fip_mode,
+ .set_fcoe_ctlr_enabled = fcoe_ctlr_enabled,
+ .get_fcoe_ctlr_link_fail = fcoe_ctlr_get_lesb,
+ .get_fcoe_ctlr_vlink_fail = fcoe_ctlr_get_lesb,
+ .get_fcoe_ctlr_miss_fka = fcoe_ctlr_get_lesb,
+ .get_fcoe_ctlr_symb_err = fcoe_ctlr_get_lesb,
+ .get_fcoe_ctlr_err_block = fcoe_ctlr_get_lesb,
+ .get_fcoe_ctlr_fcs_error = fcoe_ctlr_get_lesb,
+
+ .get_fcoe_fcf_selected = fcoe_fcf_get_selected,
+ .get_fcoe_fcf_vlan_id = fcoe_fcf_get_vlan_id,
+};
+
+static struct libfc_function_template fcoe_libfc_fcn_templ = {
+ .frame_send = fcoe_xmit,
+ .ddp_setup = fcoe_ddp_setup,
+ .ddp_done = fcoe_ddp_done,
+ .ddp_target = fcoe_ddp_target,
+ .elsct_send = fcoe_elsct_send,
+ .get_lesb = fcoe_get_lesb,
+ .lport_set_port_id = fcoe_set_port_id,
+};
+
+static struct fc_function_template fcoe_nport_fc_functions = {
+ .show_host_node_name = 1,
+ .show_host_port_name = 1,
+ .show_host_supported_classes = 1,
+ .show_host_supported_fc4s = 1,
+ .show_host_active_fc4s = 1,
+ .show_host_maxframe_size = 1,
+ .show_host_serial_number = 1,
+ .show_host_manufacturer = 1,
+ .show_host_model = 1,
+ .show_host_model_description = 1,
+ .show_host_hardware_version = 1,
+ .show_host_driver_version = 1,
+ .show_host_firmware_version = 1,
+ .show_host_optionrom_version = 1,
+
+ .show_host_port_id = 1,
+ .show_host_supported_speeds = 1,
+ .get_host_speed = fc_get_host_speed,
+ .show_host_speed = 1,
+ .show_host_port_type = 1,
+ .get_host_port_state = fc_get_host_port_state,
+ .show_host_port_state = 1,
+ .show_host_symbolic_name = 1,
+
+ .dd_fcrport_size = sizeof(struct fc_rport_libfc_priv),
+ .show_rport_maxframe_size = 1,
+ .show_rport_supported_classes = 1,
+
+ .show_host_fabric_name = 1,
+ .show_starget_node_name = 1,
+ .show_starget_port_name = 1,
+ .show_starget_port_id = 1,
+ .set_rport_dev_loss_tmo = fc_set_rport_loss_tmo,
+ .show_rport_dev_loss_tmo = 1,
+ .get_fc_host_stats = fc_get_host_stats,
+ .issue_fc_host_lip = fcoe_reset,
+
+ .terminate_rport_io = fc_rport_terminate_io,
+
+ .vport_create = fcoe_vport_create,
+ .vport_delete = fcoe_vport_destroy,
+ .vport_disable = fcoe_vport_disable,
+ .set_vport_symbolic_name = fcoe_set_vport_symbolic_name,
+
+ .bsg_request = fc_lport_bsg_request,
+};
+
+static struct fc_function_template fcoe_vport_fc_functions = {
+ .show_host_node_name = 1,
+ .show_host_port_name = 1,
+ .show_host_supported_classes = 1,
+ .show_host_supported_fc4s = 1,
+ .show_host_active_fc4s = 1,
+ .show_host_maxframe_size = 1,
+ .show_host_serial_number = 1,
+ .show_host_manufacturer = 1,
+ .show_host_model = 1,
+ .show_host_model_description = 1,
+ .show_host_hardware_version = 1,
+ .show_host_driver_version = 1,
+ .show_host_firmware_version = 1,
+ .show_host_optionrom_version = 1,
+
+ .show_host_port_id = 1,
+ .show_host_supported_speeds = 1,
+ .get_host_speed = fc_get_host_speed,
+ .show_host_speed = 1,
+ .show_host_port_type = 1,
+ .get_host_port_state = fc_get_host_port_state,
+ .show_host_port_state = 1,
+ .show_host_symbolic_name = 1,
+
+ .dd_fcrport_size = sizeof(struct fc_rport_libfc_priv),
+ .show_rport_maxframe_size = 1,
+ .show_rport_supported_classes = 1,
+
+ .show_host_fabric_name = 1,
+ .show_starget_node_name = 1,
+ .show_starget_port_name = 1,
+ .show_starget_port_id = 1,
+ .set_rport_dev_loss_tmo = fc_set_rport_loss_tmo,
+ .show_rport_dev_loss_tmo = 1,
+ .get_fc_host_stats = fc_get_host_stats,
+ .issue_fc_host_lip = fcoe_reset,
+
+ .terminate_rport_io = fc_rport_terminate_io,
+
+ .bsg_request = fc_lport_bsg_request,
+};
+
+static struct scsi_host_template fcoe_shost_template = {
+ .module = THIS_MODULE,
+ .name = "FCoE Driver",
+ .proc_name = FCOE_NAME,
+ .queuecommand = fc_queuecommand,
+ .eh_abort_handler = fc_eh_abort,
+ .eh_device_reset_handler = fc_eh_device_reset,
+ .eh_host_reset_handler = fc_eh_host_reset,
+ .slave_alloc = fc_slave_alloc,
+ .change_queue_depth = scsi_change_queue_depth,
+ .this_id = -1,
+ .cmd_per_lun = 3,
+ .can_queue = FCOE_MAX_OUTSTANDING_COMMANDS,
+ .use_clustering = ENABLE_CLUSTERING,
+ .sg_tablesize = SG_ALL,
+ .max_sectors = 0xffff,
+ .use_blk_tags = 1,
+ .track_queue_depth = 1,
+};
+
+/**
+ * fcoe_interface_setup() - Setup a FCoE interface
+ * @fcoe: The new FCoE interface
+ * @netdev: The net device that the fcoe interface is on
+ *
+ * Returns : 0 for success
+ * Locking: must be called with the RTNL mutex held
+ */
+static int fcoe_interface_setup(struct fcoe_interface *fcoe,
+ struct net_device *netdev)
+{
+ struct fcoe_ctlr *fip = fcoe_to_ctlr(fcoe);
+ struct netdev_hw_addr *ha;
+ struct net_device *real_dev;
+ u8 flogi_maddr[ETH_ALEN];
+ const struct net_device_ops *ops;
+
+ fcoe->netdev = netdev;
+
+ /* Let LLD initialize for FCoE */
+ ops = netdev->netdev_ops;
+ if (ops->ndo_fcoe_enable) {
+ if (ops->ndo_fcoe_enable(netdev))
+ FCOE_NETDEV_DBG(netdev, "Failed to enable FCoE"
+ " specific feature for LLD.\n");
+ }
+
+ /* Do not support for bonding device */
+ if (netdev->priv_flags & IFF_BONDING && netdev->flags & IFF_MASTER) {
+ FCOE_NETDEV_DBG(netdev, "Bonded interfaces not supported\n");
+ return -EOPNOTSUPP;
+ }
+
+ /* look for SAN MAC address, if multiple SAN MACs exist, only
+ * use the first one for SPMA */
+ real_dev = (netdev->priv_flags & IFF_802_1Q_VLAN) ?
+ vlan_dev_real_dev(netdev) : netdev;
+ fcoe->realdev = real_dev;
+ rcu_read_lock();
+ for_each_dev_addr(real_dev, ha) {
+ if ((ha->type == NETDEV_HW_ADDR_T_SAN) &&
+ (is_valid_ether_addr(ha->addr))) {
+ memcpy(fip->ctl_src_addr, ha->addr, ETH_ALEN);
+ fip->spma = 1;
+ break;
+ }
+ }
+ rcu_read_unlock();
+
+ /* setup Source Mac Address */
+ if (!fip->spma)
+ memcpy(fip->ctl_src_addr, netdev->dev_addr, netdev->addr_len);
+
+ /*
+ * Add FCoE MAC address as second unicast MAC address
+ * or enter promiscuous mode if not capable of listening
+ * for multiple unicast MACs.
+ */
+ memcpy(flogi_maddr, (u8[6]) FC_FCOE_FLOGI_MAC, ETH_ALEN);
+ dev_uc_add(netdev, flogi_maddr);
+ if (fip->spma)
+ dev_uc_add(netdev, fip->ctl_src_addr);
+ if (fip->mode == FIP_MODE_VN2VN) {
+ dev_mc_add(netdev, FIP_ALL_VN2VN_MACS);
+ dev_mc_add(netdev, FIP_ALL_P2P_MACS);
+ } else
+ dev_mc_add(netdev, FIP_ALL_ENODE_MACS);
+
+ /*
+ * setup the receive function from ethernet driver
+ * on the ethertype for the given device
+ */
+ fcoe->fcoe_packet_type.func = fcoe_rcv;
+ fcoe->fcoe_packet_type.type = __constant_htons(ETH_P_FCOE);
+ fcoe->fcoe_packet_type.dev = netdev;
+ dev_add_pack(&fcoe->fcoe_packet_type);
+
+ fcoe->fip_packet_type.func = fcoe_fip_recv;
+ fcoe->fip_packet_type.type = htons(ETH_P_FIP);
+ fcoe->fip_packet_type.dev = netdev;
+ dev_add_pack(&fcoe->fip_packet_type);
+
+ return 0;
+}
+
+/**
+ * fcoe_interface_create() - Create a FCoE interface on a net device
+ * @netdev: The net device to create the FCoE interface on
+ * @fip_mode: The mode to use for FIP
+ *
+ * Returns: pointer to a struct fcoe_interface or NULL on error
+ */
+static struct fcoe_interface *fcoe_interface_create(struct net_device *netdev,
+ enum fip_state fip_mode)
+{
+ struct fcoe_ctlr_device *ctlr_dev;
+ struct fcoe_ctlr *ctlr;
+ struct fcoe_interface *fcoe;
+ int size;
+ int err;
+
+ if (!try_module_get(THIS_MODULE)) {
+ FCOE_NETDEV_DBG(netdev,
+ "Could not get a reference to the module\n");
+ fcoe = ERR_PTR(-EBUSY);
+ goto out;
+ }
+
+ size = sizeof(struct fcoe_ctlr) + sizeof(struct fcoe_interface);
+ ctlr_dev = fcoe_ctlr_device_add(&netdev->dev, &fcoe_sysfs_templ,
+ size);
+ if (!ctlr_dev) {
+ FCOE_DBG("Failed to add fcoe_ctlr_device\n");
+ fcoe = ERR_PTR(-ENOMEM);
+ goto out_putmod;
+ }
+
+ ctlr = fcoe_ctlr_device_priv(ctlr_dev);
+ ctlr->cdev = ctlr_dev;
+ fcoe = fcoe_ctlr_priv(ctlr);
+
+ dev_hold(netdev);
+
+ /*
+ * Initialize FIP.
+ */
+ fcoe_ctlr_init(ctlr, fip_mode);
+ ctlr->send = fcoe_fip_send;
+ ctlr->update_mac = fcoe_update_src_mac;
+ ctlr->get_src_addr = fcoe_get_src_mac;
+
+ err = fcoe_interface_setup(fcoe, netdev);
+ if (err) {
+ fcoe_ctlr_destroy(ctlr);
+ fcoe_ctlr_device_delete(ctlr_dev);
+ dev_put(netdev);
+ fcoe = ERR_PTR(err);
+ goto out_putmod;
+ }
+
+ goto out;
+
+out_putmod:
+ module_put(THIS_MODULE);
+out:
+ return fcoe;
+}
+
+/**
+ * fcoe_interface_remove() - remove FCoE interface from netdev
+ * @fcoe: The FCoE interface to be cleaned up
+ *
+ * Caller must be holding the RTNL mutex
+ */
+static void fcoe_interface_remove(struct fcoe_interface *fcoe)
+{
+ struct net_device *netdev = fcoe->netdev;
+ struct fcoe_ctlr *fip = fcoe_to_ctlr(fcoe);
+ u8 flogi_maddr[ETH_ALEN];
+ const struct net_device_ops *ops;
+
+ /*
+ * Don't listen for Ethernet packets anymore.
+ * synchronize_net() ensures that the packet handlers are not running
+ * on another CPU. dev_remove_pack() would do that, this calls the
+ * unsyncronized version __dev_remove_pack() to avoid multiple delays.
+ */
+ __dev_remove_pack(&fcoe->fcoe_packet_type);
+ __dev_remove_pack(&fcoe->fip_packet_type);
+ synchronize_net();
+
+ /* Delete secondary MAC addresses */
+ memcpy(flogi_maddr, (u8[6]) FC_FCOE_FLOGI_MAC, ETH_ALEN);
+ dev_uc_del(netdev, flogi_maddr);
+ if (fip->spma)
+ dev_uc_del(netdev, fip->ctl_src_addr);
+ if (fip->mode == FIP_MODE_VN2VN) {
+ dev_mc_del(netdev, FIP_ALL_VN2VN_MACS);
+ dev_mc_del(netdev, FIP_ALL_P2P_MACS);
+ } else
+ dev_mc_del(netdev, FIP_ALL_ENODE_MACS);
+
+ /* Tell the LLD we are done w/ FCoE */
+ ops = netdev->netdev_ops;
+ if (ops->ndo_fcoe_disable) {
+ if (ops->ndo_fcoe_disable(netdev))
+ FCOE_NETDEV_DBG(netdev, "Failed to disable FCoE"
+ " specific feature for LLD.\n");
+ }
+ fcoe->removed = 1;
+}
+
+
+/**
+ * fcoe_interface_cleanup() - Clean up a FCoE interface
+ * @fcoe: The FCoE interface to be cleaned up
+ */
+static void fcoe_interface_cleanup(struct fcoe_interface *fcoe)
+{
+ struct net_device *netdev = fcoe->netdev;
+ struct fcoe_ctlr *fip = fcoe_to_ctlr(fcoe);
+
+ rtnl_lock();
+ if (!fcoe->removed)
+ fcoe_interface_remove(fcoe);
+ rtnl_unlock();
+
+ /* Release the self-reference taken during fcoe_interface_create() */
+ /* tear-down the FCoE controller */
+ fcoe_ctlr_destroy(fip);
+ scsi_host_put(fip->lp->host);
+ dev_put(netdev);
+ module_put(THIS_MODULE);
+}
+
+/**
+ * fcoe_fip_recv() - Handler for received FIP frames
+ * @skb: The receive skb
+ * @netdev: The associated net device
+ * @ptype: The packet_type structure which was used to register this handler
+ * @orig_dev: The original net_device the the skb was received on.
+ * (in case dev is a bond)
+ *
+ * Returns: 0 for success
+ */
+static int fcoe_fip_recv(struct sk_buff *skb, struct net_device *netdev,
+ struct packet_type *ptype,
+ struct net_device *orig_dev)
+{
+ struct fcoe_interface *fcoe;
+ struct fcoe_ctlr *ctlr;
+
+ fcoe = container_of(ptype, struct fcoe_interface, fip_packet_type);
+ ctlr = fcoe_to_ctlr(fcoe);
+ fcoe_ctlr_recv(ctlr, skb);
+ return 0;
+}
+
+/**
+ * fcoe_port_send() - Send an Ethernet-encapsulated FIP/FCoE frame
+ * @port: The FCoE port
+ * @skb: The FIP/FCoE packet to be sent
+ */
+static void fcoe_port_send(struct fcoe_port *port, struct sk_buff *skb)
+{
+ if (port->fcoe_pending_queue.qlen)
+ fcoe_check_wait_queue(port->lport, skb);
+ else if (fcoe_start_io(skb))
+ fcoe_check_wait_queue(port->lport, skb);
+}
+
+/**
+ * fcoe_fip_send() - Send an Ethernet-encapsulated FIP frame
+ * @fip: The FCoE controller
+ * @skb: The FIP packet to be sent
+ */
+static void fcoe_fip_send(struct fcoe_ctlr *fip, struct sk_buff *skb)
+{
+ skb->dev = fcoe_from_ctlr(fip)->netdev;
+ fcoe_port_send(lport_priv(fip->lp), skb);
+}
+
+/**
+ * fcoe_update_src_mac() - Update the Ethernet MAC filters
+ * @lport: The local port to update the source MAC on
+ * @addr: Unicast MAC address to add
+ *
+ * Remove any previously-set unicast MAC filter.
+ * Add secondary FCoE MAC address filter for our OUI.
+ */
+static void fcoe_update_src_mac(struct fc_lport *lport, u8 *addr)
+{
+ struct fcoe_port *port = lport_priv(lport);
+ struct fcoe_interface *fcoe = port->priv;
+
+ if (!is_zero_ether_addr(port->data_src_addr))
+ dev_uc_del(fcoe->netdev, port->data_src_addr);
+ if (!is_zero_ether_addr(addr))
+ dev_uc_add(fcoe->netdev, addr);
+ memcpy(port->data_src_addr, addr, ETH_ALEN);
+}
+
+/**
+ * fcoe_get_src_mac() - return the Ethernet source address for an lport
+ * @lport: libfc lport
+ */
+static u8 *fcoe_get_src_mac(struct fc_lport *lport)
+{
+ struct fcoe_port *port = lport_priv(lport);
+
+ return port->data_src_addr;
+}
+
+/**
+ * fcoe_lport_config() - Set up a local port
+ * @lport: The local port to be setup
+ *
+ * Returns: 0 for success
+ */
+static int fcoe_lport_config(struct fc_lport *lport)
+{
+ lport->link_up = 0;
+ lport->qfull = 0;
+ lport->max_retry_count = 3;
+ lport->max_rport_retry_count = 3;
+ lport->e_d_tov = 2 * 1000; /* FC-FS default */
+ lport->r_a_tov = 2 * 2 * 1000;
+ lport->service_params = (FCP_SPPF_INIT_FCN | FCP_SPPF_RD_XRDY_DIS |
+ FCP_SPPF_RETRY | FCP_SPPF_CONF_COMPL);
+ lport->does_npiv = 1;
+
+ fc_lport_init_stats(lport);
+
+ /* lport fc_lport related configuration */
+ fc_lport_config(lport);
+
+ /* offload related configuration */
+ lport->crc_offload = 0;
+ lport->seq_offload = 0;
+ lport->lro_enabled = 0;
+ lport->lro_xid = 0;
+ lport->lso_max = 0;
+
+ return 0;
+}
+
+/**
+ * fcoe_netdev_features_change - Updates the lport's offload flags based
+ * on the LLD netdev's FCoE feature flags
+ */
+static void fcoe_netdev_features_change(struct fc_lport *lport,
+ struct net_device *netdev)
+{
+ mutex_lock(&lport->lp_mutex);
+
+ if (netdev->features & NETIF_F_SG)
+ lport->sg_supp = 1;
+ else
+ lport->sg_supp = 0;
+
+ if (netdev->features & NETIF_F_FCOE_CRC) {
+ lport->crc_offload = 1;
+ FCOE_NETDEV_DBG(netdev, "Supports FCCRC offload\n");
+ } else {
+ lport->crc_offload = 0;
+ }
+
+ if (netdev->features & NETIF_F_FSO) {
+ lport->seq_offload = 1;
+ lport->lso_max = netdev->gso_max_size;
+ FCOE_NETDEV_DBG(netdev, "Supports LSO for max len 0x%x\n",
+ lport->lso_max);
+ } else {
+ lport->seq_offload = 0;
+ lport->lso_max = 0;
+ }
+
+ if (netdev->fcoe_ddp_xid) {
+ lport->lro_enabled = 1;
+ lport->lro_xid = netdev->fcoe_ddp_xid;
+ FCOE_NETDEV_DBG(netdev, "Supports LRO for max xid 0x%x\n",
+ lport->lro_xid);
+ } else {
+ lport->lro_enabled = 0;
+ lport->lro_xid = 0;
+ }
+
+ mutex_unlock(&lport->lp_mutex);
+}
+
+/**
+ * fcoe_netdev_config() - Set up net devive for SW FCoE
+ * @lport: The local port that is associated with the net device
+ * @netdev: The associated net device
+ *
+ * Must be called after fcoe_lport_config() as it will use local port mutex
+ *
+ * Returns: 0 for success
+ */
+static int fcoe_netdev_config(struct fc_lport *lport, struct net_device *netdev)
+{
+ u32 mfs;
+ u64 wwnn, wwpn;
+ struct fcoe_interface *fcoe;
+ struct fcoe_ctlr *ctlr;
+ struct fcoe_port *port;
+
+ /* Setup lport private data to point to fcoe softc */
+ port = lport_priv(lport);
+ fcoe = port->priv;
+ ctlr = fcoe_to_ctlr(fcoe);
+
+ /*
+ * Determine max frame size based on underlying device and optional
+ * user-configured limit. If the MFS is too low, fcoe_link_ok()
+ * will return 0, so do this first.
+ */
+ mfs = netdev->mtu;
+ if (netdev->features & NETIF_F_FCOE_MTU) {
+ mfs = FCOE_MTU;
+ FCOE_NETDEV_DBG(netdev, "Supports FCOE_MTU of %d bytes\n", mfs);
+ }
+ mfs -= (sizeof(struct fcoe_hdr) + sizeof(struct fcoe_crc_eof));
+ if (fc_set_mfs(lport, mfs))
+ return -EINVAL;
+
+ /* offload features support */
+ fcoe_netdev_features_change(lport, netdev);
+
+ skb_queue_head_init(&port->fcoe_pending_queue);
+ port->fcoe_pending_queue_active = 0;
+ setup_timer(&port->timer, fcoe_queue_timer, (unsigned long)lport);
+
+ fcoe_link_speed_update(lport);
+
+ if (!lport->vport) {
+ if (fcoe_get_wwn(netdev, &wwnn, NETDEV_FCOE_WWNN))
+ wwnn = fcoe_wwn_from_mac(ctlr->ctl_src_addr, 1, 0);
+ fc_set_wwnn(lport, wwnn);
+ if (fcoe_get_wwn(netdev, &wwpn, NETDEV_FCOE_WWPN))
+ wwpn = fcoe_wwn_from_mac(ctlr->ctl_src_addr,
+ 2, 0);
+ fc_set_wwpn(lport, wwpn);
+ }
+
+ return 0;
+}
+
+/**
+ * fcoe_shost_config() - Set up the SCSI host associated with a local port
+ * @lport: The local port
+ * @dev: The device associated with the SCSI host
+ *
+ * Must be called after fcoe_lport_config() and fcoe_netdev_config()
+ *
+ * Returns: 0 for success
+ */
+static int fcoe_shost_config(struct fc_lport *lport, struct device *dev)
+{
+ int rc = 0;
+
+ /* lport scsi host config */
+ lport->host->max_lun = FCOE_MAX_LUN;
+ lport->host->max_id = FCOE_MAX_FCP_TARGET;
+ lport->host->max_channel = 0;
+ lport->host->max_cmd_len = FCOE_MAX_CMD_LEN;
+
+ if (lport->vport)
+ lport->host->transportt = fcoe_vport_scsi_transport;
+ else
+ lport->host->transportt = fcoe_nport_scsi_transport;
+
+ /* add the new host to the SCSI-ml */
+ rc = scsi_add_host(lport->host, dev);
+ if (rc) {
+ FCOE_NETDEV_DBG(fcoe_netdev(lport), "fcoe_shost_config: "
+ "error on scsi_add_host\n");
+ return rc;
+ }
+
+ if (!lport->vport)
+ fc_host_max_npiv_vports(lport->host) = USHRT_MAX;
+
+ snprintf(fc_host_symbolic_name(lport->host), FC_SYMBOLIC_NAME_SIZE,
+ "%s v%s over %s", FCOE_NAME, FCOE_VERSION,
+ fcoe_netdev(lport)->name);
+
+ return 0;
+}
+
+
+/**
+ * fcoe_fdmi_info() - Get FDMI related info from net devive for SW FCoE
+ * @lport: The local port that is associated with the net device
+ * @netdev: The associated net device
+ *
+ * Must be called after fcoe_shost_config() as it will use local port mutex
+ *
+ */
+static void fcoe_fdmi_info(struct fc_lport *lport, struct net_device *netdev)
+{
+ struct fcoe_interface *fcoe;
+ struct fcoe_port *port;
+ struct net_device *realdev;
+ int rc;
+
+ port = lport_priv(lport);
+ fcoe = port->priv;
+ realdev = fcoe->realdev;
+
+ if (!realdev)
+ return;
+
+ /* No FDMI state m/c for NPIV ports */
+ if (lport->vport)
+ return;
+
+ if (realdev->netdev_ops->ndo_fcoe_get_hbainfo) {
+ struct netdev_fcoe_hbainfo *fdmi;
+ fdmi = kzalloc(sizeof(*fdmi), GFP_KERNEL);
+ if (!fdmi)
+ return;
+
+ rc = realdev->netdev_ops->ndo_fcoe_get_hbainfo(realdev,
+ fdmi);
+ if (rc) {
+ printk(KERN_INFO "fcoe: Failed to retrieve FDMI "
+ "information from netdev.\n");
+ return;
+ }
+
+ snprintf(fc_host_serial_number(lport->host),
+ FC_SERIAL_NUMBER_SIZE,
+ "%s",
+ fdmi->serial_number);
+ snprintf(fc_host_manufacturer(lport->host),
+ FC_SERIAL_NUMBER_SIZE,
+ "%s",
+ fdmi->manufacturer);
+ snprintf(fc_host_model(lport->host),
+ FC_SYMBOLIC_NAME_SIZE,
+ "%s",
+ fdmi->model);
+ snprintf(fc_host_model_description(lport->host),
+ FC_SYMBOLIC_NAME_SIZE,
+ "%s",
+ fdmi->model_description);
+ snprintf(fc_host_hardware_version(lport->host),
+ FC_VERSION_STRING_SIZE,
+ "%s",
+ fdmi->hardware_version);
+ snprintf(fc_host_driver_version(lport->host),
+ FC_VERSION_STRING_SIZE,
+ "%s",
+ fdmi->driver_version);
+ snprintf(fc_host_optionrom_version(lport->host),
+ FC_VERSION_STRING_SIZE,
+ "%s",
+ fdmi->optionrom_version);
+ snprintf(fc_host_firmware_version(lport->host),
+ FC_VERSION_STRING_SIZE,
+ "%s",
+ fdmi->firmware_version);
+
+ /* Enable FDMI lport states */
+ lport->fdmi_enabled = 1;
+ kfree(fdmi);
+ } else {
+ lport->fdmi_enabled = 0;
+ printk(KERN_INFO "fcoe: No FDMI support.\n");
+ }
+}
+
+/**
+ * fcoe_oem_match() - The match routine for the offloaded exchange manager
+ * @fp: The I/O frame
+ *
+ * This routine will be associated with an exchange manager (EM). When
+ * the libfc exchange handling code is looking for an EM to use it will
+ * call this routine and pass it the frame that it wishes to send. This
+ * routine will return True if the associated EM is to be used and False
+ * if the echange code should continue looking for an EM.
+ *
+ * The offload EM that this routine is associated with will handle any
+ * packets that are for SCSI read requests.
+ *
+ * This has been enhanced to work when FCoE stack is operating in target
+ * mode.
+ *
+ * Returns: True for read types I/O, otherwise returns false.
+ */
+static bool fcoe_oem_match(struct fc_frame *fp)
+{
+ struct fc_frame_header *fh = fc_frame_header_get(fp);
+ struct fcp_cmnd *fcp;
+
+ if (fc_fcp_is_read(fr_fsp(fp)) &&
+ (fr_fsp(fp)->data_len > fcoe_ddp_min))
+ return true;
+ else if ((fr_fsp(fp) == NULL) &&
+ (fh->fh_r_ctl == FC_RCTL_DD_UNSOL_CMD) &&
+ (ntohs(fh->fh_rx_id) == FC_XID_UNKNOWN)) {
+ fcp = fc_frame_payload_get(fp, sizeof(*fcp));
+ if ((fcp->fc_flags & FCP_CFL_WRDATA) &&
+ (ntohl(fcp->fc_dl) > fcoe_ddp_min))
+ return true;
+ }
+ return false;
+}
+
+/**
+ * fcoe_em_config() - Allocate and configure an exchange manager
+ * @lport: The local port that the new EM will be associated with
+ *
+ * Returns: 0 on success
+ */
+static inline int fcoe_em_config(struct fc_lport *lport)
+{
+ struct fcoe_port *port = lport_priv(lport);
+ struct fcoe_interface *fcoe = port->priv;
+ struct fcoe_interface *oldfcoe = NULL;
+ struct net_device *old_real_dev, *cur_real_dev;
+ u16 min_xid = FCOE_MIN_XID;
+ u16 max_xid = FCOE_MAX_XID;
+
+ /*
+ * Check if need to allocate an em instance for
+ * offload exchange ids to be shared across all VN_PORTs/lport.
+ */
+ if (!lport->lro_enabled || !lport->lro_xid ||
+ (lport->lro_xid >= max_xid)) {
+ lport->lro_xid = 0;
+ goto skip_oem;
+ }
+
+ /*
+ * Reuse existing offload em instance in case
+ * it is already allocated on real eth device
+ */
+ if (fcoe->netdev->priv_flags & IFF_802_1Q_VLAN)
+ cur_real_dev = vlan_dev_real_dev(fcoe->netdev);
+ else
+ cur_real_dev = fcoe->netdev;
+
+ list_for_each_entry(oldfcoe, &fcoe_hostlist, list) {
+ if (oldfcoe->netdev->priv_flags & IFF_802_1Q_VLAN)
+ old_real_dev = vlan_dev_real_dev(oldfcoe->netdev);
+ else
+ old_real_dev = oldfcoe->netdev;
+
+ if (cur_real_dev == old_real_dev) {
+ fcoe->oem = oldfcoe->oem;
+ break;
+ }
+ }
+
+ if (fcoe->oem) {
+ if (!fc_exch_mgr_add(lport, fcoe->oem, fcoe_oem_match)) {
+ printk(KERN_ERR "fcoe_em_config: failed to add "
+ "offload em:%p on interface:%s\n",
+ fcoe->oem, fcoe->netdev->name);
+ return -ENOMEM;
+ }
+ } else {
+ fcoe->oem = fc_exch_mgr_alloc(lport, FC_CLASS_3,
+ FCOE_MIN_XID, lport->lro_xid,
+ fcoe_oem_match);
+ if (!fcoe->oem) {
+ printk(KERN_ERR "fcoe_em_config: failed to allocate "
+ "em for offload exches on interface:%s\n",
+ fcoe->netdev->name);
+ return -ENOMEM;
+ }
+ }
+
+ /*
+ * Exclude offload EM xid range from next EM xid range.
+ */
+ min_xid += lport->lro_xid + 1;
+
+skip_oem:
+ if (!fc_exch_mgr_alloc(lport, FC_CLASS_3, min_xid, max_xid, NULL)) {
+ printk(KERN_ERR "fcoe_em_config: failed to "
+ "allocate em on interface %s\n", fcoe->netdev->name);
+ return -ENOMEM;
+ }
+
+ return 0;
+}
+
+/**
+ * fcoe_if_destroy() - Tear down a SW FCoE instance
+ * @lport: The local port to be destroyed
+ *
+ */
+static void fcoe_if_destroy(struct fc_lport *lport)
+{
+ struct fcoe_port *port = lport_priv(lport);
+ struct fcoe_interface *fcoe = port->priv;
+ struct net_device *netdev = fcoe->netdev;
+
+ FCOE_NETDEV_DBG(netdev, "Destroying interface\n");
+
+ /* Logout of the fabric */
+ fc_fabric_logoff(lport);
+
+ /* Cleanup the fc_lport */
+ fc_lport_destroy(lport);
+
+ /* Stop the transmit retry timer */
+ del_timer_sync(&port->timer);
+
+ /* Free existing transmit skbs */
+ fcoe_clean_pending_queue(lport);
+
+ rtnl_lock();
+ if (!is_zero_ether_addr(port->data_src_addr))
+ dev_uc_del(netdev, port->data_src_addr);
+ if (lport->vport)
+ synchronize_net();
+ else
+ fcoe_interface_remove(fcoe);
+ rtnl_unlock();
+
+ /* Free queued packets for the per-CPU receive threads */
+ fcoe_percpu_clean(lport);
+
+ /* Detach from the scsi-ml */
+ fc_remove_host(lport->host);
+ scsi_remove_host(lport->host);
+
+ /* Destroy lport scsi_priv */
+ fc_fcp_destroy(lport);
+
+ /* There are no more rports or I/O, free the EM */
+ fc_exch_mgr_free(lport);
+
+ /* Free memory used by statistical counters */
+ fc_lport_free_stats(lport);
+
+ /*
+ * Release the Scsi_Host for vport but hold on to
+ * master lport until it fcoe interface fully cleaned-up.
+ */
+ if (lport->vport)
+ scsi_host_put(lport->host);
+}
+
+/**
+ * fcoe_ddp_setup() - Call a LLD's ddp_setup through the net device
+ * @lport: The local port to setup DDP for
+ * @xid: The exchange ID for this DDP transfer
+ * @sgl: The scatterlist describing this transfer
+ * @sgc: The number of sg items
+ *
+ * Returns: 0 if the DDP context was not configured
+ */
+static int fcoe_ddp_setup(struct fc_lport *lport, u16 xid,
+ struct scatterlist *sgl, unsigned int sgc)
+{
+ struct net_device *netdev = fcoe_netdev(lport);
+
+ if (netdev->netdev_ops->ndo_fcoe_ddp_setup)
+ return netdev->netdev_ops->ndo_fcoe_ddp_setup(netdev,
+ xid, sgl,
+ sgc);
+
+ return 0;
+}
+
+/**
+ * fcoe_ddp_target() - Call a LLD's ddp_target through the net device
+ * @lport: The local port to setup DDP for
+ * @xid: The exchange ID for this DDP transfer
+ * @sgl: The scatterlist describing this transfer
+ * @sgc: The number of sg items
+ *
+ * Returns: 0 if the DDP context was not configured
+ */
+static int fcoe_ddp_target(struct fc_lport *lport, u16 xid,
+ struct scatterlist *sgl, unsigned int sgc)
+{
+ struct net_device *netdev = fcoe_netdev(lport);
+
+ if (netdev->netdev_ops->ndo_fcoe_ddp_target)
+ return netdev->netdev_ops->ndo_fcoe_ddp_target(netdev, xid,
+ sgl, sgc);
+
+ return 0;
+}
+
+
+/**
+ * fcoe_ddp_done() - Call a LLD's ddp_done through the net device
+ * @lport: The local port to complete DDP on
+ * @xid: The exchange ID for this DDP transfer
+ *
+ * Returns: the length of data that have been completed by DDP
+ */
+static int fcoe_ddp_done(struct fc_lport *lport, u16 xid)
+{
+ struct net_device *netdev = fcoe_netdev(lport);
+
+ if (netdev->netdev_ops->ndo_fcoe_ddp_done)
+ return netdev->netdev_ops->ndo_fcoe_ddp_done(netdev, xid);
+ return 0;
+}
+
+/**
+ * fcoe_if_create() - Create a FCoE instance on an interface
+ * @fcoe: The FCoE interface to create a local port on
+ * @parent: The device pointer to be the parent in sysfs for the SCSI host
+ * @npiv: Indicates if the port is a vport or not
+ *
+ * Creates a fc_lport instance and a Scsi_Host instance and configure them.
+ *
+ * Returns: The allocated fc_lport or an error pointer
+ */
+static struct fc_lport *fcoe_if_create(struct fcoe_interface *fcoe,
+ struct device *parent, int npiv)
+{
+ struct fcoe_ctlr *ctlr = fcoe_to_ctlr(fcoe);
+ struct net_device *netdev = fcoe->netdev;
+ struct fc_lport *lport, *n_port;
+ struct fcoe_port *port;
+ struct Scsi_Host *shost;
+ int rc;
+ /*
+ * parent is only a vport if npiv is 1,
+ * but we'll only use vport in that case so go ahead and set it
+ */
+ struct fc_vport *vport = dev_to_vport(parent);
+
+ FCOE_NETDEV_DBG(netdev, "Create Interface\n");
+
+ if (!npiv)
+ lport = libfc_host_alloc(&fcoe_shost_template, sizeof(*port));
+ else
+ lport = libfc_vport_create(vport, sizeof(*port));
+
+ if (!lport) {
+ FCOE_NETDEV_DBG(netdev, "Could not allocate host structure\n");
+ rc = -ENOMEM;
+ goto out;
+ }
+ port = lport_priv(lport);
+ port->lport = lport;
+ port->priv = fcoe;
+ port->get_netdev = fcoe_netdev;
+ port->max_queue_depth = FCOE_MAX_QUEUE_DEPTH;
+ port->min_queue_depth = FCOE_MIN_QUEUE_DEPTH;
+ INIT_WORK(&port->destroy_work, fcoe_destroy_work);
+
+ /*
+ * Need to add the lport to the hostlist
+ * so we catch NETDEV_CHANGE events.
+ */
+ fcoe_hostlist_add(lport);
+
+ /* configure a fc_lport including the exchange manager */
+ rc = fcoe_lport_config(lport);
+ if (rc) {
+ FCOE_NETDEV_DBG(netdev, "Could not configure lport for the "
+ "interface\n");
+ goto out_host_put;
+ }
+
+ if (npiv) {
+ FCOE_NETDEV_DBG(netdev, "Setting vport names, "
+ "%16.16llx %16.16llx\n",
+ vport->node_name, vport->port_name);
+ fc_set_wwnn(lport, vport->node_name);
+ fc_set_wwpn(lport, vport->port_name);
+ }
+
+ /* configure lport network properties */
+ rc = fcoe_netdev_config(lport, netdev);
+ if (rc) {
+ FCOE_NETDEV_DBG(netdev, "Could not configure netdev for the "
+ "interface\n");
+ goto out_lp_destroy;
+ }
+
+ /* configure lport scsi host properties */
+ rc = fcoe_shost_config(lport, parent);
+ if (rc) {
+ FCOE_NETDEV_DBG(netdev, "Could not configure shost for the "
+ "interface\n");
+ goto out_lp_destroy;
+ }
+
+ /* Initialize the library */
+ rc = fcoe_libfc_config(lport, ctlr, &fcoe_libfc_fcn_templ, 1);
+ if (rc) {
+ FCOE_NETDEV_DBG(netdev, "Could not configure libfc for the "
+ "interface\n");
+ goto out_lp_destroy;
+ }
+
+ /* Initialized FDMI information */
+ fcoe_fdmi_info(lport, netdev);
+
+ /*
+ * fcoe_em_alloc() and fcoe_hostlist_add() both
+ * need to be atomic with respect to other changes to the
+ * hostlist since fcoe_em_alloc() looks for an existing EM
+ * instance on host list updated by fcoe_hostlist_add().
+ *
+ * This is currently handled through the fcoe_config_mutex
+ * begin held.
+ */
+ if (!npiv)
+ /* lport exch manager allocation */
+ rc = fcoe_em_config(lport);
+ else {
+ shost = vport_to_shost(vport);
+ n_port = shost_priv(shost);
+ rc = fc_exch_mgr_list_clone(n_port, lport);
+ }
+
+ if (rc) {
+ FCOE_NETDEV_DBG(netdev, "Could not configure the EM\n");
+ goto out_lp_destroy;
+ }
+
+ return lport;
+
+out_lp_destroy:
+ fc_exch_mgr_free(lport);
+out_host_put:
+ fcoe_hostlist_del(lport);
+ scsi_host_put(lport->host);
+out:
+ return ERR_PTR(rc);
+}
+
+/**
+ * fcoe_if_init() - Initialization routine for fcoe.ko
+ *
+ * Attaches the SW FCoE transport to the FC transport
+ *
+ * Returns: 0 on success
+ */
+static int __init fcoe_if_init(void)
+{
+ /* attach to scsi transport */
+ fcoe_nport_scsi_transport =
+ fc_attach_transport(&fcoe_nport_fc_functions);
+ fcoe_vport_scsi_transport =
+ fc_attach_transport(&fcoe_vport_fc_functions);
+
+ if (!fcoe_nport_scsi_transport) {
+ printk(KERN_ERR "fcoe: Failed to attach to the FC transport\n");
+ return -ENODEV;
+ }
+
+ return 0;
+}
+
+/**
+ * fcoe_if_exit() - Tear down fcoe.ko
+ *
+ * Detaches the SW FCoE transport from the FC transport
+ *
+ * Returns: 0 on success
+ */
+static int __exit fcoe_if_exit(void)
+{
+ fc_release_transport(fcoe_nport_scsi_transport);
+ fc_release_transport(fcoe_vport_scsi_transport);
+ fcoe_nport_scsi_transport = NULL;
+ fcoe_vport_scsi_transport = NULL;
+ return 0;
+}
+
+/**
+ * fcoe_percpu_thread_create() - Create a receive thread for an online CPU
+ * @cpu: The CPU index of the CPU to create a receive thread for
+ */
+static void fcoe_percpu_thread_create(unsigned int cpu)
+{
+ struct fcoe_percpu_s *p;
+ struct task_struct *thread;
+
+ p = &per_cpu(fcoe_percpu, cpu);
+
+ thread = kthread_create_on_node(fcoe_percpu_receive_thread,
+ (void *)p, cpu_to_node(cpu),
+ "fcoethread/%d", cpu);
+
+ if (likely(!IS_ERR(thread))) {
+ kthread_bind(thread, cpu);
+ wake_up_process(thread);
+
+ spin_lock_bh(&p->fcoe_rx_list.lock);
+ p->thread = thread;
+ spin_unlock_bh(&p->fcoe_rx_list.lock);
+ }
+}
+
+/**
+ * fcoe_percpu_thread_destroy() - Remove the receive thread of a CPU
+ * @cpu: The CPU index of the CPU whose receive thread is to be destroyed
+ *
+ * Destroys a per-CPU Rx thread. Any pending skbs are moved to the
+ * current CPU's Rx thread. If the thread being destroyed is bound to
+ * the CPU processing this context the skbs will be freed.
+ */
+static void fcoe_percpu_thread_destroy(unsigned int cpu)
+{
+ struct fcoe_percpu_s *p;
+ struct task_struct *thread;
+ struct page *crc_eof;
+ struct sk_buff *skb;
+#ifdef CONFIG_SMP
+ struct fcoe_percpu_s *p0;
+ unsigned targ_cpu = get_cpu();
+#endif /* CONFIG_SMP */
+
+ FCOE_DBG("Destroying receive thread for CPU %d\n", cpu);
+
+ /* Prevent any new skbs from being queued for this CPU. */
+ p = &per_cpu(fcoe_percpu, cpu);
+ spin_lock_bh(&p->fcoe_rx_list.lock);
+ thread = p->thread;
+ p->thread = NULL;
+ crc_eof = p->crc_eof_page;
+ p->crc_eof_page = NULL;
+ p->crc_eof_offset = 0;
+ spin_unlock_bh(&p->fcoe_rx_list.lock);
+
+#ifdef CONFIG_SMP
+ /*
+ * Don't bother moving the skb's if this context is running
+ * on the same CPU that is having its thread destroyed. This
+ * can easily happen when the module is removed.
+ */
+ if (cpu != targ_cpu) {
+ p0 = &per_cpu(fcoe_percpu, targ_cpu);
+ spin_lock_bh(&p0->fcoe_rx_list.lock);
+ if (p0->thread) {
+ FCOE_DBG("Moving frames from CPU %d to CPU %d\n",
+ cpu, targ_cpu);
+
+ while ((skb = __skb_dequeue(&p->fcoe_rx_list)) != NULL)
+ __skb_queue_tail(&p0->fcoe_rx_list, skb);
+ spin_unlock_bh(&p0->fcoe_rx_list.lock);
+ } else {
+ /*
+ * The targeted CPU is not initialized and cannot accept
+ * new skbs. Unlock the targeted CPU and drop the skbs
+ * on the CPU that is going offline.
+ */
+ while ((skb = __skb_dequeue(&p->fcoe_rx_list)) != NULL)
+ kfree_skb(skb);
+ spin_unlock_bh(&p0->fcoe_rx_list.lock);
+ }
+ } else {
+ /*
+ * This scenario occurs when the module is being removed
+ * and all threads are being destroyed. skbs will continue
+ * to be shifted from the CPU thread that is being removed
+ * to the CPU thread associated with the CPU that is processing
+ * the module removal. Once there is only one CPU Rx thread it
+ * will reach this case and we will drop all skbs and later
+ * stop the thread.
+ */
+ spin_lock_bh(&p->fcoe_rx_list.lock);
+ while ((skb = __skb_dequeue(&p->fcoe_rx_list)) != NULL)
+ kfree_skb(skb);
+ spin_unlock_bh(&p->fcoe_rx_list.lock);
+ }
+ put_cpu();
+#else
+ /*
+ * This a non-SMP scenario where the singular Rx thread is
+ * being removed. Free all skbs and stop the thread.
+ */
+ spin_lock_bh(&p->fcoe_rx_list.lock);
+ while ((skb = __skb_dequeue(&p->fcoe_rx_list)) != NULL)
+ kfree_skb(skb);
+ spin_unlock_bh(&p->fcoe_rx_list.lock);
+#endif
+
+ if (thread)
+ kthread_stop(thread);
+
+ if (crc_eof)
+ put_page(crc_eof);
+}
+
+/**
+ * fcoe_cpu_callback() - Handler for CPU hotplug events
+ * @nfb: The callback data block
+ * @action: The event triggering the callback
+ * @hcpu: The index of the CPU that the event is for
+ *
+ * This creates or destroys per-CPU data for fcoe
+ *
+ * Returns NOTIFY_OK always.
+ */
+static int fcoe_cpu_callback(struct notifier_block *nfb,
+ unsigned long action, void *hcpu)
+{
+ unsigned cpu = (unsigned long)hcpu;
+
+ switch (action) {
+ case CPU_ONLINE:
+ case CPU_ONLINE_FROZEN:
+ FCOE_DBG("CPU %x online: Create Rx thread\n", cpu);
+ fcoe_percpu_thread_create(cpu);
+ break;
+ case CPU_DEAD:
+ case CPU_DEAD_FROZEN:
+ FCOE_DBG("CPU %x offline: Remove Rx thread\n", cpu);
+ fcoe_percpu_thread_destroy(cpu);
+ break;
+ default:
+ break;
+ }
+ return NOTIFY_OK;
+}
+
+/**
+ * fcoe_select_cpu() - Selects CPU to handle post-processing of incoming
+ * command.
+ *
+ * This routine selects next CPU based on cpumask to distribute
+ * incoming requests in round robin.
+ *
+ * Returns: int CPU number
+ */
+static inline unsigned int fcoe_select_cpu(void)
+{
+ static unsigned int selected_cpu;
+
+ selected_cpu = cpumask_next(selected_cpu, cpu_online_mask);
+ if (selected_cpu >= nr_cpu_ids)
+ selected_cpu = cpumask_first(cpu_online_mask);
+
+ return selected_cpu;
+}
+
+/**
+ * fcoe_rcv() - Receive packets from a net device
+ * @skb: The received packet
+ * @netdev: The net device that the packet was received on
+ * @ptype: The packet type context
+ * @olddev: The last device net device
+ *
+ * This routine is called by NET_RX_SOFTIRQ. It receives a packet, builds a
+ * FC frame and passes the frame to libfc.
+ *
+ * Returns: 0 for success
+ */
+static int fcoe_rcv(struct sk_buff *skb, struct net_device *netdev,
+ struct packet_type *ptype, struct net_device *olddev)
+{
+ struct fc_lport *lport;
+ struct fcoe_rcv_info *fr;
+ struct fcoe_ctlr *ctlr;
+ struct fcoe_interface *fcoe;
+ struct fc_frame_header *fh;
+ struct fcoe_percpu_s *fps;
+ struct ethhdr *eh;
+ unsigned int cpu;
+
+ fcoe = container_of(ptype, struct fcoe_interface, fcoe_packet_type);
+ ctlr = fcoe_to_ctlr(fcoe);
+ lport = ctlr->lp;
+ if (unlikely(!lport)) {
+ FCOE_NETDEV_DBG(netdev, "Cannot find hba structure\n");
+ goto err2;
+ }
+ if (!lport->link_up)
+ goto err2;
+
+ FCOE_NETDEV_DBG(netdev,
+ "skb_info: len:%d data_len:%d head:%p data:%p tail:%p end:%p sum:%d dev:%s\n",
+ skb->len, skb->data_len, skb->head, skb->data,
+ skb_tail_pointer(skb), skb_end_pointer(skb),
+ skb->csum, skb->dev ? skb->dev->name : "<NULL>");
+
+
+ skb = skb_share_check(skb, GFP_ATOMIC);
+
+ if (skb == NULL)
+ return NET_RX_DROP;
+
+ eh = eth_hdr(skb);
+
+ if (is_fip_mode(ctlr) &&
+ !ether_addr_equal(eh->h_source, ctlr->dest_addr)) {
+ FCOE_NETDEV_DBG(netdev, "wrong source mac address:%pM\n",
+ eh->h_source);
+ goto err;
+ }
+
+ /*
+ * Check for minimum frame length, and make sure required FCoE
+ * and FC headers are pulled into the linear data area.
+ */
+ if (unlikely((skb->len < FCOE_MIN_FRAME) ||
+ !pskb_may_pull(skb, FCOE_HEADER_LEN)))
+ goto err;
+
+ skb_set_transport_header(skb, sizeof(struct fcoe_hdr));
+ fh = (struct fc_frame_header *) skb_transport_header(skb);
+
+ if (ntoh24(&eh->h_dest[3]) != ntoh24(fh->fh_d_id)) {
+ FCOE_NETDEV_DBG(netdev, "FC frame d_id mismatch with MAC:%pM\n",
+ eh->h_dest);
+ goto err;
+ }
+
+ fr = fcoe_dev_from_skb(skb);
+ fr->fr_dev = lport;
+
+ /*
+ * In case the incoming frame's exchange is originated from
+ * the initiator, then received frame's exchange id is ANDed
+ * with fc_cpu_mask bits to get the same cpu on which exchange
+ * was originated, otherwise select cpu using rx exchange id
+ * or fcoe_select_cpu().
+ */
+ if (ntoh24(fh->fh_f_ctl) & FC_FC_EX_CTX)
+ cpu = ntohs(fh->fh_ox_id) & fc_cpu_mask;
+ else {
+ if (ntohs(fh->fh_rx_id) == FC_XID_UNKNOWN)
+ cpu = fcoe_select_cpu();
+ else
+ cpu = ntohs(fh->fh_rx_id) & fc_cpu_mask;
+ }
+
+ if (cpu >= nr_cpu_ids)
+ goto err;
+
+ fps = &per_cpu(fcoe_percpu, cpu);
+ spin_lock(&fps->fcoe_rx_list.lock);
+ if (unlikely(!fps->thread)) {
+ /*
+ * The targeted CPU is not ready, let's target
+ * the first CPU now. For non-SMP systems this
+ * will check the same CPU twice.
+ */
+ FCOE_NETDEV_DBG(netdev, "CPU is online, but no receive thread "
+ "ready for incoming skb- using first online "
+ "CPU.\n");
+
+ spin_unlock(&fps->fcoe_rx_list.lock);
+ cpu = cpumask_first(cpu_online_mask);
+ fps = &per_cpu(fcoe_percpu, cpu);
+ spin_lock(&fps->fcoe_rx_list.lock);
+ if (!fps->thread) {
+ spin_unlock(&fps->fcoe_rx_list.lock);
+ goto err;
+ }
+ }
+
+ /*
+ * We now have a valid CPU that we're targeting for
+ * this skb. We also have this receive thread locked,
+ * so we're free to queue skbs into it's queue.
+ */
+
+ /*
+ * Note: We used to have a set of conditions under which we would
+ * call fcoe_recv_frame directly, rather than queuing to the rx list
+ * as it could save a few cycles, but doing so is prohibited, as
+ * fcoe_recv_frame has several paths that may sleep, which is forbidden
+ * in softirq context.
+ */
+ __skb_queue_tail(&fps->fcoe_rx_list, skb);
+ if (fps->thread->state == TASK_INTERRUPTIBLE)
+ wake_up_process(fps->thread);
+ spin_unlock(&fps->fcoe_rx_list.lock);
+
+ return NET_RX_SUCCESS;
+err:
+ per_cpu_ptr(lport->stats, get_cpu())->ErrorFrames++;
+ put_cpu();
+err2:
+ kfree_skb(skb);
+ return NET_RX_DROP;
+}
+
+/**
+ * fcoe_alloc_paged_crc_eof() - Allocate a page to be used for the trailer CRC
+ * @skb: The packet to be transmitted
+ * @tlen: The total length of the trailer
+ *
+ * Returns: 0 for success
+ */
+static int fcoe_alloc_paged_crc_eof(struct sk_buff *skb, int tlen)
+{
+ struct fcoe_percpu_s *fps;
+ int rc;
+
+ fps = &get_cpu_var(fcoe_percpu);
+ rc = fcoe_get_paged_crc_eof(skb, tlen, fps);
+ put_cpu_var(fcoe_percpu);
+
+ return rc;
+}
+
+/**
+ * fcoe_xmit() - Transmit a FCoE frame
+ * @lport: The local port that the frame is to be transmitted for
+ * @fp: The frame to be transmitted
+ *
+ * Return: 0 for success
+ */
+static int fcoe_xmit(struct fc_lport *lport, struct fc_frame *fp)
+{
+ int wlen;
+ u32 crc;
+ struct ethhdr *eh;
+ struct fcoe_crc_eof *cp;
+ struct sk_buff *skb;
+ struct fc_stats *stats;
+ struct fc_frame_header *fh;
+ unsigned int hlen; /* header length implies the version */
+ unsigned int tlen; /* trailer length */
+ unsigned int elen; /* eth header, may include vlan */
+ struct fcoe_port *port = lport_priv(lport);
+ struct fcoe_interface *fcoe = port->priv;
+ struct fcoe_ctlr *ctlr = fcoe_to_ctlr(fcoe);
+ u8 sof, eof;
+ struct fcoe_hdr *hp;
+
+ WARN_ON((fr_len(fp) % sizeof(u32)) != 0);
+
+ fh = fc_frame_header_get(fp);
+ skb = fp_skb(fp);
+ wlen = skb->len / FCOE_WORD_TO_BYTE;
+
+ if (!lport->link_up) {
+ kfree_skb(skb);
+ return 0;
+ }
+
+ if (unlikely(fh->fh_type == FC_TYPE_ELS) &&
+ fcoe_ctlr_els_send(ctlr, lport, skb))
+ return 0;
+
+ sof = fr_sof(fp);
+ eof = fr_eof(fp);
+
+ elen = sizeof(struct ethhdr);
+ hlen = sizeof(struct fcoe_hdr);
+ tlen = sizeof(struct fcoe_crc_eof);
+ wlen = (skb->len - tlen + sizeof(crc)) / FCOE_WORD_TO_BYTE;
+
+ /* crc offload */
+ if (likely(lport->crc_offload)) {
+ skb->ip_summed = CHECKSUM_UNNECESSARY;
+ skb->csum_start = skb_headroom(skb);
+ skb->csum_offset = skb->len;
+ crc = 0;
+ } else {
+ skb->ip_summed = CHECKSUM_NONE;
+ crc = fcoe_fc_crc(fp);
+ }
+
+ /* copy port crc and eof to the skb buff */
+ if (skb_is_nonlinear(skb)) {
+ skb_frag_t *frag;
+ if (fcoe_alloc_paged_crc_eof(skb, tlen)) {
+ kfree_skb(skb);
+ return -ENOMEM;
+ }
+ frag = &skb_shinfo(skb)->frags[skb_shinfo(skb)->nr_frags - 1];
+ cp = kmap_atomic(skb_frag_page(frag))
+ + frag->page_offset;
+ } else {
+ cp = (struct fcoe_crc_eof *)skb_put(skb, tlen);
+ }
+
+ memset(cp, 0, sizeof(*cp));
+ cp->fcoe_eof = eof;
+ cp->fcoe_crc32 = cpu_to_le32(~crc);
+
+ if (skb_is_nonlinear(skb)) {
+ kunmap_atomic(cp);
+ cp = NULL;
+ }
+
+ /* adjust skb network/transport offsets to match mac/fcoe/port */
+ skb_push(skb, elen + hlen);
+ skb_reset_mac_header(skb);
+ skb_reset_network_header(skb);
+ skb->mac_len = elen;
+ skb->protocol = htons(ETH_P_FCOE);
+ skb->priority = fcoe->priority;
+
+ if (fcoe->netdev->priv_flags & IFF_802_1Q_VLAN &&
+ fcoe->realdev->features & NETIF_F_HW_VLAN_CTAG_TX) {
+ /* must set skb->dev before calling vlan_put_tag */
+ skb->dev = fcoe->realdev;
+ __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q),
+ vlan_dev_vlan_id(fcoe->netdev));
+ } else
+ skb->dev = fcoe->netdev;
+
+ /* fill up mac and fcoe headers */
+ eh = eth_hdr(skb);
+ eh->h_proto = htons(ETH_P_FCOE);
+ memcpy(eh->h_dest, ctlr->dest_addr, ETH_ALEN);
+ if (ctlr->map_dest)
+ memcpy(eh->h_dest + 3, fh->fh_d_id, 3);
+
+ if (unlikely(ctlr->flogi_oxid != FC_XID_UNKNOWN))
+ memcpy(eh->h_source, ctlr->ctl_src_addr, ETH_ALEN);
+ else
+ memcpy(eh->h_source, port->data_src_addr, ETH_ALEN);
+
+ hp = (struct fcoe_hdr *)(eh + 1);
+ memset(hp, 0, sizeof(*hp));
+ if (FC_FCOE_VER)
+ FC_FCOE_ENCAPS_VER(hp, FC_FCOE_VER);
+ hp->fcoe_sof = sof;
+
+ /* fcoe lso, mss is in max_payload which is non-zero for FCP data */
+ if (lport->seq_offload && fr_max_payload(fp)) {
+ skb_shinfo(skb)->gso_type = SKB_GSO_FCOE;
+ skb_shinfo(skb)->gso_size = fr_max_payload(fp);
+ } else {
+ skb_shinfo(skb)->gso_type = 0;
+ skb_shinfo(skb)->gso_size = 0;
+ }
+ /* update tx stats: regardless if LLD fails */
+ stats = per_cpu_ptr(lport->stats, get_cpu());
+ stats->TxFrames++;
+ stats->TxWords += wlen;
+ put_cpu();
+
+ /* send down to lld */
+ fr_dev(fp) = lport;
+ fcoe_port_send(port, skb);
+ return 0;
+}
+
+/**
+ * fcoe_percpu_flush_done() - Indicate per-CPU queue flush completion
+ * @skb: The completed skb (argument required by destructor)
+ */
+static void fcoe_percpu_flush_done(struct sk_buff *skb)
+{
+ complete(&fcoe_flush_completion);
+}
+
+/**
+ * fcoe_filter_frames() - filter out bad fcoe frames, i.e. bad CRC
+ * @lport: The local port the frame was received on
+ * @fp: The received frame
+ *
+ * Return: 0 on passing filtering checks
+ */
+static inline int fcoe_filter_frames(struct fc_lport *lport,
+ struct fc_frame *fp)
+{
+ struct fcoe_ctlr *ctlr;
+ struct fcoe_interface *fcoe;
+ struct fc_frame_header *fh;
+ struct sk_buff *skb = (struct sk_buff *)fp;
+ struct fc_stats *stats;
+
+ /*
+ * We only check CRC if no offload is available and if it is
+ * it's solicited data, in which case, the FCP layer would
+ * check it during the copy.
+ */
+ if (lport->crc_offload && skb->ip_summed == CHECKSUM_UNNECESSARY)
+ fr_flags(fp) &= ~FCPHF_CRC_UNCHECKED;
+ else
+ fr_flags(fp) |= FCPHF_CRC_UNCHECKED;
+
+ fh = (struct fc_frame_header *) skb_transport_header(skb);
+ fh = fc_frame_header_get(fp);
+ if (fh->fh_r_ctl == FC_RCTL_DD_SOL_DATA && fh->fh_type == FC_TYPE_FCP)
+ return 0;
+
+ fcoe = ((struct fcoe_port *)lport_priv(lport))->priv;
+ ctlr = fcoe_to_ctlr(fcoe);
+ if (is_fip_mode(ctlr) && fc_frame_payload_op(fp) == ELS_LOGO &&
+ ntoh24(fh->fh_s_id) == FC_FID_FLOGI) {
+ FCOE_DBG("fcoe: dropping FCoE lport LOGO in fip mode\n");
+ return -EINVAL;
+ }
+
+ if (!(fr_flags(fp) & FCPHF_CRC_UNCHECKED) ||
+ le32_to_cpu(fr_crc(fp)) == ~crc32(~0, skb->data, skb->len)) {
+ fr_flags(fp) &= ~FCPHF_CRC_UNCHECKED;
+ return 0;
+ }
+
+ stats = per_cpu_ptr(lport->stats, get_cpu());
+ stats->InvalidCRCCount++;
+ if (stats->InvalidCRCCount < 5)
+ printk(KERN_WARNING "fcoe: dropping frame with CRC error\n");
+ put_cpu();
+ return -EINVAL;
+}
+
+/**
+ * fcoe_recv_frame() - process a single received frame
+ * @skb: frame to process
+ */
+static void fcoe_recv_frame(struct sk_buff *skb)
+{
+ u32 fr_len;
+ struct fc_lport *lport;
+ struct fcoe_rcv_info *fr;
+ struct fc_stats *stats;
+ struct fcoe_crc_eof crc_eof;
+ struct fc_frame *fp;
+ struct fcoe_port *port;
+ struct fcoe_hdr *hp;
+
+ fr = fcoe_dev_from_skb(skb);
+ lport = fr->fr_dev;
+ if (unlikely(!lport)) {
+ if (skb->destructor != fcoe_percpu_flush_done)
+ FCOE_NETDEV_DBG(skb->dev, "NULL lport in skb\n");
+ kfree_skb(skb);
+ return;
+ }
+
+ FCOE_NETDEV_DBG(skb->dev,
+ "skb_info: len:%d data_len:%d head:%p data:%p tail:%p end:%p sum:%d dev:%s\n",
+ skb->len, skb->data_len,
+ skb->head, skb->data, skb_tail_pointer(skb),
+ skb_end_pointer(skb), skb->csum,
+ skb->dev ? skb->dev->name : "<NULL>");
+
+ port = lport_priv(lport);
+ skb_linearize(skb); /* check for skb_is_nonlinear is within skb_linearize */
+
+ /*
+ * Frame length checks and setting up the header pointers
+ * was done in fcoe_rcv already.
+ */
+ hp = (struct fcoe_hdr *) skb_network_header(skb);
+
+ stats = per_cpu_ptr(lport->stats, get_cpu());
+ if (unlikely(FC_FCOE_DECAPS_VER(hp) != FC_FCOE_VER)) {
+ if (stats->ErrorFrames < 5)
+ printk(KERN_WARNING "fcoe: FCoE version "
+ "mismatch: The frame has "
+ "version %x, but the "
+ "initiator supports version "
+ "%x\n", FC_FCOE_DECAPS_VER(hp),
+ FC_FCOE_VER);
+ goto drop;
+ }
+
+ skb_pull(skb, sizeof(struct fcoe_hdr));
+ fr_len = skb->len - sizeof(struct fcoe_crc_eof);
+
+ stats->RxFrames++;
+ stats->RxWords += fr_len / FCOE_WORD_TO_BYTE;
+
+ fp = (struct fc_frame *)skb;
+ fc_frame_init(fp);
+ fr_dev(fp) = lport;
+ fr_sof(fp) = hp->fcoe_sof;
+
+ /* Copy out the CRC and EOF trailer for access */
+ if (skb_copy_bits(skb, fr_len, &crc_eof, sizeof(crc_eof)))
+ goto drop;
+ fr_eof(fp) = crc_eof.fcoe_eof;
+ fr_crc(fp) = crc_eof.fcoe_crc32;
+ if (pskb_trim(skb, fr_len))
+ goto drop;
+
+ if (!fcoe_filter_frames(lport, fp)) {
+ put_cpu();
+ fc_exch_recv(lport, fp);
+ return;
+ }
+drop:
+ stats->ErrorFrames++;
+ put_cpu();
+ kfree_skb(skb);
+}
+
+/**
+ * fcoe_percpu_receive_thread() - The per-CPU packet receive thread
+ * @arg: The per-CPU context
+ *
+ * Return: 0 for success
+ */
+static int fcoe_percpu_receive_thread(void *arg)
+{
+ struct fcoe_percpu_s *p = arg;
+ struct sk_buff *skb;
+ struct sk_buff_head tmp;
+
+ skb_queue_head_init(&tmp);
+
+ set_user_nice(current, MIN_NICE);
+
+retry:
+ while (!kthread_should_stop()) {
+
+ spin_lock_bh(&p->fcoe_rx_list.lock);
+ skb_queue_splice_init(&p->fcoe_rx_list, &tmp);
+
+ if (!skb_queue_len(&tmp)) {
+ set_current_state(TASK_INTERRUPTIBLE);
+ spin_unlock_bh(&p->fcoe_rx_list.lock);
+ schedule();
+ goto retry;
+ }
+
+ spin_unlock_bh(&p->fcoe_rx_list.lock);
+
+ while ((skb = __skb_dequeue(&tmp)) != NULL)
+ fcoe_recv_frame(skb);
+
+ }
+ return 0;
+}
+
+/**
+ * fcoe_dev_setup() - Setup the link change notification interface
+ */
+static void fcoe_dev_setup(void)
+{
+ register_dcbevent_notifier(&dcb_notifier);
+ register_netdevice_notifier(&fcoe_notifier);
+}
+
+/**
+ * fcoe_dev_cleanup() - Cleanup the link change notification interface
+ */
+static void fcoe_dev_cleanup(void)
+{
+ unregister_dcbevent_notifier(&dcb_notifier);
+ unregister_netdevice_notifier(&fcoe_notifier);
+}
+
+static struct fcoe_interface *
+fcoe_hostlist_lookup_realdev_port(struct net_device *netdev)
+{
+ struct fcoe_interface *fcoe;
+ struct net_device *real_dev;
+
+ list_for_each_entry(fcoe, &fcoe_hostlist, list) {
+ if (fcoe->netdev->priv_flags & IFF_802_1Q_VLAN)
+ real_dev = vlan_dev_real_dev(fcoe->netdev);
+ else
+ real_dev = fcoe->netdev;
+
+ if (netdev == real_dev)
+ return fcoe;
+ }
+ return NULL;
+}
+
+static int fcoe_dcb_app_notification(struct notifier_block *notifier,
+ ulong event, void *ptr)
+{
+ struct dcb_app_type *entry = ptr;
+ struct fcoe_ctlr *ctlr;
+ struct fcoe_interface *fcoe;
+ struct net_device *netdev;
+ int prio;
+
+ if (entry->app.selector != DCB_APP_IDTYPE_ETHTYPE)
+ return NOTIFY_OK;
+
+ netdev = dev_get_by_index(&init_net, entry->ifindex);
+ if (!netdev)
+ return NOTIFY_OK;
+
+ fcoe = fcoe_hostlist_lookup_realdev_port(netdev);
+ dev_put(netdev);
+ if (!fcoe)
+ return NOTIFY_OK;
+
+ ctlr = fcoe_to_ctlr(fcoe);
+
+ if (entry->dcbx & DCB_CAP_DCBX_VER_CEE)
+ prio = ffs(entry->app.priority) - 1;
+ else
+ prio = entry->app.priority;
+
+ if (prio < 0)
+ return NOTIFY_OK;
+
+ if (entry->app.protocol == ETH_P_FIP ||
+ entry->app.protocol == ETH_P_FCOE)
+ ctlr->priority = prio;
+
+ if (entry->app.protocol == ETH_P_FCOE)
+ fcoe->priority = prio;
+
+ return NOTIFY_OK;
+}
+
+/**
+ * fcoe_device_notification() - Handler for net device events
+ * @notifier: The context of the notification
+ * @event: The type of event
+ * @ptr: The net device that the event was on
+ *
+ * This function is called by the Ethernet driver in case of link change event.
+ *
+ * Returns: 0 for success
+ */
+static int fcoe_device_notification(struct notifier_block *notifier,
+ ulong event, void *ptr)
+{
+ struct fcoe_ctlr_device *cdev;
+ struct fc_lport *lport = NULL;
+ struct net_device *netdev = netdev_notifier_info_to_dev(ptr);
+ struct fcoe_ctlr *ctlr;
+ struct fcoe_interface *fcoe;
+ struct fcoe_port *port;
+ struct fc_stats *stats;
+ u32 link_possible = 1;
+ u32 mfs;
+ int rc = NOTIFY_OK;
+
+ list_for_each_entry(fcoe, &fcoe_hostlist, list) {
+ if (fcoe->netdev == netdev) {
+ ctlr = fcoe_to_ctlr(fcoe);
+ lport = ctlr->lp;
+ break;
+ }
+ }
+ if (!lport) {
+ rc = NOTIFY_DONE;
+ goto out;
+ }
+
+ switch (event) {
+ case NETDEV_DOWN:
+ case NETDEV_GOING_DOWN:
+ link_possible = 0;
+ break;
+ case NETDEV_UP:
+ case NETDEV_CHANGE:
+ break;
+ case NETDEV_CHANGEMTU:
+ if (netdev->features & NETIF_F_FCOE_MTU)
+ break;
+ mfs = netdev->mtu - (sizeof(struct fcoe_hdr) +
+ sizeof(struct fcoe_crc_eof));
+ if (mfs >= FC_MIN_MAX_FRAME)
+ fc_set_mfs(lport, mfs);
+ break;
+ case NETDEV_REGISTER:
+ break;
+ case NETDEV_UNREGISTER:
+ list_del(&fcoe->list);
+ port = lport_priv(ctlr->lp);
+ queue_work(fcoe_wq, &port->destroy_work);
+ goto out;
+ break;
+ case NETDEV_FEAT_CHANGE:
+ fcoe_netdev_features_change(lport, netdev);
+ break;
+ default:
+ FCOE_NETDEV_DBG(netdev, "Unknown event %ld "
+ "from netdev netlink\n", event);
+ }
+
+ fcoe_link_speed_update(lport);
+
+ cdev = fcoe_ctlr_to_ctlr_dev(ctlr);
+
+ if (link_possible && !fcoe_link_ok(lport)) {
+ switch (cdev->enabled) {
+ case FCOE_CTLR_DISABLED:
+ pr_info("Link up while interface is disabled.\n");
+ break;
+ case FCOE_CTLR_ENABLED:
+ case FCOE_CTLR_UNUSED:
+ fcoe_ctlr_link_up(ctlr);
+ };
+ } else if (fcoe_ctlr_link_down(ctlr)) {
+ switch (cdev->enabled) {
+ case FCOE_CTLR_DISABLED:
+ pr_info("Link down while interface is disabled.\n");
+ break;
+ case FCOE_CTLR_ENABLED:
+ case FCOE_CTLR_UNUSED:
+ stats = per_cpu_ptr(lport->stats, get_cpu());
+ stats->LinkFailureCount++;
+ put_cpu();
+ fcoe_clean_pending_queue(lport);
+ };
+ }
+out:
+ return rc;
+}
+
+/**
+ * fcoe_disable() - Disables a FCoE interface
+ * @netdev : The net_device object the Ethernet interface to create on
+ *
+ * Called from fcoe transport.
+ *
+ * Returns: 0 for success
+ *
+ * Deprecated: use fcoe_ctlr_enabled()
+ */
+static int fcoe_disable(struct net_device *netdev)
+{
+ struct fcoe_ctlr *ctlr;
+ struct fcoe_interface *fcoe;
+ int rc = 0;
+
+ mutex_lock(&fcoe_config_mutex);
+
+ rtnl_lock();
+ fcoe = fcoe_hostlist_lookup_port(netdev);
+ rtnl_unlock();
+
+ if (fcoe) {
+ ctlr = fcoe_to_ctlr(fcoe);
+ fcoe_ctlr_link_down(ctlr);
+ fcoe_clean_pending_queue(ctlr->lp);
+ } else
+ rc = -ENODEV;
+
+ mutex_unlock(&fcoe_config_mutex);
+ return rc;
+}
+
+/**
+ * fcoe_enable() - Enables a FCoE interface
+ * @netdev : The net_device object the Ethernet interface to create on
+ *
+ * Called from fcoe transport.
+ *
+ * Returns: 0 for success
+ */
+static int fcoe_enable(struct net_device *netdev)
+{
+ struct fcoe_ctlr *ctlr;
+ struct fcoe_interface *fcoe;
+ int rc = 0;
+
+ mutex_lock(&fcoe_config_mutex);
+ rtnl_lock();
+ fcoe = fcoe_hostlist_lookup_port(netdev);
+ rtnl_unlock();
+
+ if (!fcoe) {
+ rc = -ENODEV;
+ goto out;
+ }
+
+ ctlr = fcoe_to_ctlr(fcoe);
+
+ if (!fcoe_link_ok(ctlr->lp))
+ fcoe_ctlr_link_up(ctlr);
+
+out:
+ mutex_unlock(&fcoe_config_mutex);
+ return rc;
+}
+
+/**
+ * fcoe_ctlr_enabled() - Enable or disable an FCoE Controller
+ * @cdev: The FCoE Controller that is being enabled or disabled
+ *
+ * fcoe_sysfs will ensure that the state of 'enabled' has
+ * changed, so no checking is necessary here. This routine simply
+ * calls fcoe_enable or fcoe_disable, both of which are deprecated.
+ * When those routines are removed the functionality can be merged
+ * here.
+ */
+static int fcoe_ctlr_enabled(struct fcoe_ctlr_device *cdev)
+{
+ struct fcoe_ctlr *ctlr = fcoe_ctlr_device_priv(cdev);
+ struct fc_lport *lport = ctlr->lp;
+ struct net_device *netdev = fcoe_netdev(lport);
+
+ switch (cdev->enabled) {
+ case FCOE_CTLR_ENABLED:
+ return fcoe_enable(netdev);
+ case FCOE_CTLR_DISABLED:
+ return fcoe_disable(netdev);
+ case FCOE_CTLR_UNUSED:
+ default:
+ return -ENOTSUPP;
+ };
+}
+
+/**
+ * fcoe_destroy() - Destroy a FCoE interface
+ * @netdev : The net_device object the Ethernet interface to create on
+ *
+ * Called from fcoe transport
+ *
+ * Returns: 0 for success
+ */
+static int fcoe_destroy(struct net_device *netdev)
+{
+ struct fcoe_ctlr *ctlr;
+ struct fcoe_interface *fcoe;
+ struct fc_lport *lport;
+ struct fcoe_port *port;
+ int rc = 0;
+
+ mutex_lock(&fcoe_config_mutex);
+ rtnl_lock();
+ fcoe = fcoe_hostlist_lookup_port(netdev);
+ if (!fcoe) {
+ rc = -ENODEV;
+ goto out_nodev;
+ }
+ ctlr = fcoe_to_ctlr(fcoe);
+ lport = ctlr->lp;
+ port = lport_priv(lport);
+ list_del(&fcoe->list);
+ queue_work(fcoe_wq, &port->destroy_work);
+out_nodev:
+ rtnl_unlock();
+ mutex_unlock(&fcoe_config_mutex);
+ return rc;
+}
+
+/**
+ * fcoe_destroy_work() - Destroy a FCoE port in a deferred work context
+ * @work: Handle to the FCoE port to be destroyed
+ */
+static void fcoe_destroy_work(struct work_struct *work)
+{
+ struct fcoe_ctlr_device *cdev;
+ struct fcoe_ctlr *ctlr;
+ struct fcoe_port *port;
+ struct fcoe_interface *fcoe;
+ struct Scsi_Host *shost;
+ struct fc_host_attrs *fc_host;
+ unsigned long flags;
+ struct fc_vport *vport;
+ struct fc_vport *next_vport;
+
+ port = container_of(work, struct fcoe_port, destroy_work);
+ shost = port->lport->host;
+ fc_host = shost_to_fc_host(shost);
+
+ /* Loop through all the vports and mark them for deletion */
+ spin_lock_irqsave(shost->host_lock, flags);
+ list_for_each_entry_safe(vport, next_vport, &fc_host->vports, peers) {
+ if (vport->flags & (FC_VPORT_DEL | FC_VPORT_CREATING)) {
+ continue;
+ } else {
+ vport->flags |= FC_VPORT_DELETING;
+ queue_work(fc_host_work_q(shost),
+ &vport->vport_delete_work);
+ }
+ }
+ spin_unlock_irqrestore(shost->host_lock, flags);
+
+ flush_workqueue(fc_host_work_q(shost));
+
+ mutex_lock(&fcoe_config_mutex);
+
+ fcoe = port->priv;
+ ctlr = fcoe_to_ctlr(fcoe);
+ cdev = fcoe_ctlr_to_ctlr_dev(ctlr);
+
+ fcoe_if_destroy(port->lport);
+ fcoe_interface_cleanup(fcoe);
+
+ mutex_unlock(&fcoe_config_mutex);
+
+ fcoe_ctlr_device_delete(cdev);
+}
+
+/**
+ * fcoe_match() - Check if the FCoE is supported on the given netdevice
+ * @netdev : The net_device object the Ethernet interface to create on
+ *
+ * Called from fcoe transport.
+ *
+ * Returns: always returns true as this is the default FCoE transport,
+ * i.e., support all netdevs.
+ */
+static bool fcoe_match(struct net_device *netdev)
+{
+ return true;
+}
+
+/**
+ * fcoe_dcb_create() - Initialize DCB attributes and hooks
+ * @netdev: The net_device object of the L2 link that should be queried
+ * @port: The fcoe_port to bind FCoE APP priority with
+ * @
+ */
+static void fcoe_dcb_create(struct fcoe_interface *fcoe)
+{
+#ifdef CONFIG_DCB
+ int dcbx;
+ u8 fup, up;
+ struct net_device *netdev = fcoe->realdev;
+ struct fcoe_ctlr *ctlr = fcoe_to_ctlr(fcoe);
+ struct dcb_app app = {
+ .priority = 0,
+ .protocol = ETH_P_FCOE
+ };
+
+ /* setup DCB priority attributes. */
+ if (netdev && netdev->dcbnl_ops && netdev->dcbnl_ops->getdcbx) {
+ dcbx = netdev->dcbnl_ops->getdcbx(netdev);
+
+ if (dcbx & DCB_CAP_DCBX_VER_IEEE) {
+ app.selector = IEEE_8021QAZ_APP_SEL_ETHERTYPE;
+ up = dcb_ieee_getapp_mask(netdev, &app);
+ app.protocol = ETH_P_FIP;
+ fup = dcb_ieee_getapp_mask(netdev, &app);
+ } else {
+ app.selector = DCB_APP_IDTYPE_ETHTYPE;
+ up = dcb_getapp(netdev, &app);
+ app.protocol = ETH_P_FIP;
+ fup = dcb_getapp(netdev, &app);
+ }
+
+ fcoe->priority = ffs(up) ? ffs(up) - 1 : 0;
+ ctlr->priority = ffs(fup) ? ffs(fup) - 1 : fcoe->priority;
+ }
+#endif
+}
+
+enum fcoe_create_link_state {
+ FCOE_CREATE_LINK_DOWN,
+ FCOE_CREATE_LINK_UP,
+};
+
+/**
+ * _fcoe_create() - (internal) Create a fcoe interface
+ * @netdev : The net_device object the Ethernet interface to create on
+ * @fip_mode: The FIP mode for this creation
+ * @link_state: The ctlr link state on creation
+ *
+ * Called from either the libfcoe 'create' module parameter
+ * via fcoe_create or from fcoe_syfs's ctlr_create file.
+ *
+ * libfcoe's 'create' module parameter is deprecated so some
+ * consolidation of code can be done when that interface is
+ * removed.
+ */
+static int _fcoe_create(struct net_device *netdev, enum fip_state fip_mode,
+ enum fcoe_create_link_state link_state)
+{
+ int rc = 0;
+ struct fcoe_ctlr_device *ctlr_dev;
+ struct fcoe_ctlr *ctlr;
+ struct fcoe_interface *fcoe;
+ struct fc_lport *lport;
+
+ mutex_lock(&fcoe_config_mutex);
+ rtnl_lock();
+
+ /* look for existing lport */
+ if (fcoe_hostlist_lookup(netdev)) {
+ rc = -EEXIST;
+ goto out_nodev;
+ }
+
+ fcoe = fcoe_interface_create(netdev, fip_mode);
+ if (IS_ERR(fcoe)) {
+ rc = PTR_ERR(fcoe);
+ goto out_nodev;
+ }
+
+ ctlr = fcoe_to_ctlr(fcoe);
+ ctlr_dev = fcoe_ctlr_to_ctlr_dev(ctlr);
+ lport = fcoe_if_create(fcoe, &ctlr_dev->dev, 0);
+ if (IS_ERR(lport)) {
+ printk(KERN_ERR "fcoe: Failed to create interface (%s)\n",
+ netdev->name);
+ rc = -EIO;
+ rtnl_unlock();
+ fcoe_interface_cleanup(fcoe);
+ mutex_unlock(&fcoe_config_mutex);
+ fcoe_ctlr_device_delete(ctlr_dev);
+ goto out;
+ }
+
+ /* Make this the "master" N_Port */
+ ctlr->lp = lport;
+
+ /* setup DCB priority attributes. */
+ fcoe_dcb_create(fcoe);
+
+ /* start FIP Discovery and FLOGI */
+ lport->boot_time = jiffies;
+ fc_fabric_login(lport);
+
+ /*
+ * If the fcoe_ctlr_device is to be set to DISABLED
+ * it must be done after the lport is added to the
+ * hostlist, but before the rtnl_lock is released.
+ * This is because the rtnl_lock protects the
+ * hostlist that fcoe_device_notification uses. If
+ * the FCoE Controller is intended to be created
+ * DISABLED then 'enabled' needs to be considered
+ * handling link events. 'enabled' must be set
+ * before the lport can be found in the hostlist
+ * when a link up event is received.
+ */
+ if (link_state == FCOE_CREATE_LINK_UP)
+ ctlr_dev->enabled = FCOE_CTLR_ENABLED;
+ else
+ ctlr_dev->enabled = FCOE_CTLR_DISABLED;
+
+ if (link_state == FCOE_CREATE_LINK_UP &&
+ !fcoe_link_ok(lport)) {
+ rtnl_unlock();
+ fcoe_ctlr_link_up(ctlr);
+ mutex_unlock(&fcoe_config_mutex);
+ return rc;
+ }
+
+out_nodev:
+ rtnl_unlock();
+ mutex_unlock(&fcoe_config_mutex);
+out:
+ return rc;
+}
+
+/**
+ * fcoe_create() - Create a fcoe interface
+ * @netdev : The net_device object the Ethernet interface to create on
+ * @fip_mode: The FIP mode for this creation
+ *
+ * Called from fcoe transport
+ *
+ * Returns: 0 for success
+ */
+static int fcoe_create(struct net_device *netdev, enum fip_state fip_mode)
+{
+ return _fcoe_create(netdev, fip_mode, FCOE_CREATE_LINK_UP);
+}
+
+/**
+ * fcoe_ctlr_alloc() - Allocate a fcoe interface from fcoe_sysfs
+ * @netdev: The net_device to be used by the allocated FCoE Controller
+ *
+ * This routine is called from fcoe_sysfs. It will start the fcoe_ctlr
+ * in a link_down state. The allows the user an opportunity to configure
+ * the FCoE Controller from sysfs before enabling the FCoE Controller.
+ *
+ * Creating in with this routine starts the FCoE Controller in Fabric
+ * mode. The user can change to VN2VN or another mode before enabling.
+ */
+static int fcoe_ctlr_alloc(struct net_device *netdev)
+{
+ return _fcoe_create(netdev, FIP_MODE_FABRIC,
+ FCOE_CREATE_LINK_DOWN);
+}
+
+/**
+ * fcoe_link_ok() - Check if the link is OK for a local port
+ * @lport: The local port to check link on
+ *
+ * Returns: 0 if link is UP and OK, -1 if not
+ *
+ */
+static int fcoe_link_ok(struct fc_lport *lport)
+{
+ struct net_device *netdev = fcoe_netdev(lport);
+
+ if (netif_oper_up(netdev))
+ return 0;
+ return -1;
+}
+
+/**
+ * fcoe_percpu_clean() - Clear all pending skbs for an local port
+ * @lport: The local port whose skbs are to be cleared
+ *
+ * Must be called with fcoe_create_mutex held to single-thread completion.
+ *
+ * This flushes the pending skbs by adding a new skb to each queue and
+ * waiting until they are all freed. This assures us that not only are
+ * there no packets that will be handled by the lport, but also that any
+ * threads already handling packet have returned.
+ */
+static void fcoe_percpu_clean(struct fc_lport *lport)
+{
+ struct fcoe_percpu_s *pp;
+ struct sk_buff *skb;
+ unsigned int cpu;
+
+ for_each_possible_cpu(cpu) {
+ pp = &per_cpu(fcoe_percpu, cpu);
+
+ if (!pp->thread || !cpu_online(cpu))
+ continue;
+
+ skb = dev_alloc_skb(0);
+ if (!skb)
+ continue;
+
+ skb->destructor = fcoe_percpu_flush_done;
+
+ spin_lock_bh(&pp->fcoe_rx_list.lock);
+ __skb_queue_tail(&pp->fcoe_rx_list, skb);
+ if (pp->fcoe_rx_list.qlen == 1)
+ wake_up_process(pp->thread);
+ spin_unlock_bh(&pp->fcoe_rx_list.lock);
+
+ wait_for_completion(&fcoe_flush_completion);
+ }
+}
+
+/**
+ * fcoe_reset() - Reset a local port
+ * @shost: The SCSI host associated with the local port to be reset
+ *
+ * Returns: Always 0 (return value required by FC transport template)
+ */
+static int fcoe_reset(struct Scsi_Host *shost)
+{
+ struct fc_lport *lport = shost_priv(shost);
+ struct fcoe_port *port = lport_priv(lport);
+ struct fcoe_interface *fcoe = port->priv;
+ struct fcoe_ctlr *ctlr = fcoe_to_ctlr(fcoe);
+ struct fcoe_ctlr_device *cdev = fcoe_ctlr_to_ctlr_dev(ctlr);
+
+ fcoe_ctlr_link_down(ctlr);
+ fcoe_clean_pending_queue(ctlr->lp);
+
+ if (cdev->enabled != FCOE_CTLR_DISABLED &&
+ !fcoe_link_ok(ctlr->lp))
+ fcoe_ctlr_link_up(ctlr);
+ return 0;
+}
+
+/**
+ * fcoe_hostlist_lookup_port() - Find the FCoE interface associated with a net device
+ * @netdev: The net device used as a key
+ *
+ * Locking: Must be called with the RNL mutex held.
+ *
+ * Returns: NULL or the FCoE interface
+ */
+static struct fcoe_interface *
+fcoe_hostlist_lookup_port(const struct net_device *netdev)
+{
+ struct fcoe_interface *fcoe;
+
+ list_for_each_entry(fcoe, &fcoe_hostlist, list) {
+ if (fcoe->netdev == netdev)
+ return fcoe;
+ }
+ return NULL;
+}
+
+/**
+ * fcoe_hostlist_lookup() - Find the local port associated with a
+ * given net device
+ * @netdev: The netdevice used as a key
+ *
+ * Locking: Must be called with the RTNL mutex held
+ *
+ * Returns: NULL or the local port
+ */
+static struct fc_lport *fcoe_hostlist_lookup(const struct net_device *netdev)
+{
+ struct fcoe_ctlr *ctlr;
+ struct fcoe_interface *fcoe;
+
+ fcoe = fcoe_hostlist_lookup_port(netdev);
+ ctlr = fcoe_to_ctlr(fcoe);
+ return (fcoe) ? ctlr->lp : NULL;
+}
+
+/**
+ * fcoe_hostlist_add() - Add the FCoE interface identified by a local
+ * port to the hostlist
+ * @lport: The local port that identifies the FCoE interface to be added
+ *
+ * Locking: must be called with the RTNL mutex held
+ *
+ * Returns: 0 for success
+ */
+static int fcoe_hostlist_add(const struct fc_lport *lport)
+{
+ struct fcoe_interface *fcoe;
+ struct fcoe_port *port;
+
+ fcoe = fcoe_hostlist_lookup_port(fcoe_netdev(lport));
+ if (!fcoe) {
+ port = lport_priv(lport);
+ fcoe = port->priv;
+ list_add_tail(&fcoe->list, &fcoe_hostlist);
+ }
+ return 0;
+}
+
+/**
+ * fcoe_hostlist_del() - Remove the FCoE interface identified by a local
+ * port to the hostlist
+ * @lport: The local port that identifies the FCoE interface to be added
+ *
+ * Locking: must be called with the RTNL mutex held
+ *
+ */
+static void fcoe_hostlist_del(const struct fc_lport *lport)
+{
+ struct fcoe_interface *fcoe;
+ struct fcoe_port *port;
+
+ port = lport_priv(lport);
+ fcoe = port->priv;
+ list_del(&fcoe->list);
+ return;
+}
+
+static struct fcoe_transport fcoe_sw_transport = {
+ .name = {FCOE_TRANSPORT_DEFAULT},
+ .attached = false,
+ .list = LIST_HEAD_INIT(fcoe_sw_transport.list),
+ .match = fcoe_match,
+ .alloc = fcoe_ctlr_alloc,
+ .create = fcoe_create,
+ .destroy = fcoe_destroy,
+ .enable = fcoe_enable,
+ .disable = fcoe_disable,
+};
+
+/**
+ * fcoe_init() - Initialize fcoe.ko
+ *
+ * Returns: 0 on success, or a negative value on failure
+ */
+static int __init fcoe_init(void)
+{
+ struct fcoe_percpu_s *p;
+ unsigned int cpu;
+ int rc = 0;
+
+ fcoe_wq = alloc_workqueue("fcoe", 0, 0);
+ if (!fcoe_wq)
+ return -ENOMEM;
+
+ /* register as a fcoe transport */
+ rc = fcoe_transport_attach(&fcoe_sw_transport);
+ if (rc) {
+ printk(KERN_ERR "failed to register an fcoe transport, check "
+ "if libfcoe is loaded\n");
+ return rc;
+ }
+
+ mutex_lock(&fcoe_config_mutex);
+
+ for_each_possible_cpu(cpu) {
+ p = &per_cpu(fcoe_percpu, cpu);
+ skb_queue_head_init(&p->fcoe_rx_list);
+ }
+
+ cpu_notifier_register_begin();
+
+ for_each_online_cpu(cpu)
+ fcoe_percpu_thread_create(cpu);
+
+ /* Initialize per CPU interrupt thread */
+ rc = __register_hotcpu_notifier(&fcoe_cpu_notifier);
+ if (rc)
+ goto out_free;
+
+ cpu_notifier_register_done();
+
+ /* Setup link change notification */
+ fcoe_dev_setup();
+
+ rc = fcoe_if_init();
+ if (rc)
+ goto out_free;
+
+ mutex_unlock(&fcoe_config_mutex);
+ return 0;
+
+out_free:
+ for_each_online_cpu(cpu) {
+ fcoe_percpu_thread_destroy(cpu);
+ }
+
+ cpu_notifier_register_done();
+
+ mutex_unlock(&fcoe_config_mutex);
+ destroy_workqueue(fcoe_wq);
+ return rc;
+}
+module_init(fcoe_init);
+
+/**
+ * fcoe_exit() - Clean up fcoe.ko
+ *
+ * Returns: 0 on success or a negative value on failure
+ */
+static void __exit fcoe_exit(void)
+{
+ struct fcoe_interface *fcoe, *tmp;
+ struct fcoe_ctlr *ctlr;
+ struct fcoe_port *port;
+ unsigned int cpu;
+
+ mutex_lock(&fcoe_config_mutex);
+
+ fcoe_dev_cleanup();
+
+ /* releases the associated fcoe hosts */
+ rtnl_lock();
+ list_for_each_entry_safe(fcoe, tmp, &fcoe_hostlist, list) {
+ ctlr = fcoe_to_ctlr(fcoe);
+ port = lport_priv(ctlr->lp);
+ fcoe_hostlist_del(port->lport);
+ queue_work(fcoe_wq, &port->destroy_work);
+ }
+ rtnl_unlock();
+
+ cpu_notifier_register_begin();
+
+ for_each_online_cpu(cpu)
+ fcoe_percpu_thread_destroy(cpu);
+
+ __unregister_hotcpu_notifier(&fcoe_cpu_notifier);
+
+ cpu_notifier_register_done();
+
+ mutex_unlock(&fcoe_config_mutex);
+
+ /*
+ * destroy_work's may be chained but destroy_workqueue()
+ * can take care of them. Just kill the fcoe_wq.
+ */
+ destroy_workqueue(fcoe_wq);
+
+ /*
+ * Detaching from the scsi transport must happen after all
+ * destroys are done on the fcoe_wq. destroy_workqueue will
+ * enusre the fcoe_wq is flushed.
+ */
+ fcoe_if_exit();
+
+ /* detach from fcoe transport */
+ fcoe_transport_detach(&fcoe_sw_transport);
+}
+module_exit(fcoe_exit);
+
+/**
+ * fcoe_flogi_resp() - FCoE specific FLOGI and FDISC response handler
+ * @seq: active sequence in the FLOGI or FDISC exchange
+ * @fp: response frame, or error encoded in a pointer (timeout)
+ * @arg: pointer the the fcoe_ctlr structure
+ *
+ * This handles MAC address management for FCoE, then passes control on to
+ * the libfc FLOGI response handler.
+ */
+static void fcoe_flogi_resp(struct fc_seq *seq, struct fc_frame *fp, void *arg)
+{
+ struct fcoe_ctlr *fip = arg;
+ struct fc_exch *exch = fc_seq_exch(seq);
+ struct fc_lport *lport = exch->lp;
+ u8 *mac;
+
+ if (IS_ERR(fp))
+ goto done;
+
+ mac = fr_cb(fp)->granted_mac;
+ /* pre-FIP */
+ if (is_zero_ether_addr(mac))
+ fcoe_ctlr_recv_flogi(fip, lport, fp);
+ if (!is_zero_ether_addr(mac))
+ fcoe_update_src_mac(lport, mac);
+done:
+ fc_lport_flogi_resp(seq, fp, lport);
+}
+
+/**
+ * fcoe_logo_resp() - FCoE specific LOGO response handler
+ * @seq: active sequence in the LOGO exchange
+ * @fp: response frame, or error encoded in a pointer (timeout)
+ * @arg: pointer the the fcoe_ctlr structure
+ *
+ * This handles MAC address management for FCoE, then passes control on to
+ * the libfc LOGO response handler.
+ */
+static void fcoe_logo_resp(struct fc_seq *seq, struct fc_frame *fp, void *arg)
+{
+ struct fc_lport *lport = arg;
+ static u8 zero_mac[ETH_ALEN] = { 0 };
+
+ if (!IS_ERR(fp))
+ fcoe_update_src_mac(lport, zero_mac);
+ fc_lport_logo_resp(seq, fp, lport);
+}
+
+/**
+ * fcoe_elsct_send - FCoE specific ELS handler
+ *
+ * This does special case handling of FIP encapsualted ELS exchanges for FCoE,
+ * using FCoE specific response handlers and passing the FIP controller as
+ * the argument (the lport is still available from the exchange).
+ *
+ * Most of the work here is just handed off to the libfc routine.
+ */
+static struct fc_seq *fcoe_elsct_send(struct fc_lport *lport, u32 did,
+ struct fc_frame *fp, unsigned int op,
+ void (*resp)(struct fc_seq *,
+ struct fc_frame *,
+ void *),
+ void *arg, u32 timeout)
+{
+ struct fcoe_port *port = lport_priv(lport);
+ struct fcoe_interface *fcoe = port->priv;
+ struct fcoe_ctlr *fip = fcoe_to_ctlr(fcoe);
+ struct fc_frame_header *fh = fc_frame_header_get(fp);
+
+ switch (op) {
+ case ELS_FLOGI:
+ case ELS_FDISC:
+ if (lport->point_to_multipoint)
+ break;
+ return fc_elsct_send(lport, did, fp, op, fcoe_flogi_resp,
+ fip, timeout);
+ case ELS_LOGO:
+ /* only hook onto fabric logouts, not port logouts */
+ if (ntoh24(fh->fh_d_id) != FC_FID_FLOGI)
+ break;
+ return fc_elsct_send(lport, did, fp, op, fcoe_logo_resp,
+ lport, timeout);
+ }
+ return fc_elsct_send(lport, did, fp, op, resp, arg, timeout);
+}
+
+/**
+ * fcoe_vport_create() - create an fc_host/scsi_host for a vport
+ * @vport: fc_vport object to create a new fc_host for
+ * @disabled: start the new fc_host in a disabled state by default?
+ *
+ * Returns: 0 for success
+ */
+static int fcoe_vport_create(struct fc_vport *vport, bool disabled)
+{
+ struct Scsi_Host *shost = vport_to_shost(vport);
+ struct fc_lport *n_port = shost_priv(shost);
+ struct fcoe_port *port = lport_priv(n_port);
+ struct fcoe_interface *fcoe = port->priv;
+ struct net_device *netdev = fcoe->netdev;
+ struct fc_lport *vn_port;
+ int rc;
+ char buf[32];
+
+ rc = fcoe_validate_vport_create(vport);
+ if (rc) {
+ fcoe_wwn_to_str(vport->port_name, buf, sizeof(buf));
+ printk(KERN_ERR "fcoe: Failed to create vport, "
+ "WWPN (0x%s) already exists\n",
+ buf);
+ return rc;
+ }
+
+ mutex_lock(&fcoe_config_mutex);
+ rtnl_lock();
+ vn_port = fcoe_if_create(fcoe, &vport->dev, 1);
+ rtnl_unlock();
+ mutex_unlock(&fcoe_config_mutex);
+
+ if (IS_ERR(vn_port)) {
+ printk(KERN_ERR "fcoe: fcoe_vport_create(%s) failed\n",
+ netdev->name);
+ return -EIO;
+ }
+
+ if (disabled) {
+ fc_vport_set_state(vport, FC_VPORT_DISABLED);
+ } else {
+ vn_port->boot_time = jiffies;
+ fc_fabric_login(vn_port);
+ fc_vport_setlink(vn_port);
+ }
+ return 0;
+}
+
+/**
+ * fcoe_vport_destroy() - destroy the fc_host/scsi_host for a vport
+ * @vport: fc_vport object that is being destroyed
+ *
+ * Returns: 0 for success
+ */
+static int fcoe_vport_destroy(struct fc_vport *vport)
+{
+ struct Scsi_Host *shost = vport_to_shost(vport);
+ struct fc_lport *n_port = shost_priv(shost);
+ struct fc_lport *vn_port = vport->dd_data;
+
+ mutex_lock(&n_port->lp_mutex);
+ list_del(&vn_port->list);
+ mutex_unlock(&n_port->lp_mutex);
+
+ mutex_lock(&fcoe_config_mutex);
+ fcoe_if_destroy(vn_port);
+ mutex_unlock(&fcoe_config_mutex);
+
+ return 0;
+}
+
+/**
+ * fcoe_vport_disable() - change vport state
+ * @vport: vport to bring online/offline
+ * @disable: should the vport be disabled?
+ */
+static int fcoe_vport_disable(struct fc_vport *vport, bool disable)
+{
+ struct fc_lport *lport = vport->dd_data;
+
+ if (disable) {
+ fc_vport_set_state(vport, FC_VPORT_DISABLED);
+ fc_fabric_logoff(lport);
+ } else {
+ lport->boot_time = jiffies;
+ fc_fabric_login(lport);
+ fc_vport_setlink(lport);
+ }
+
+ return 0;
+}
+
+/**
+ * fcoe_vport_set_symbolic_name() - append vport string to symbolic name
+ * @vport: fc_vport with a new symbolic name string
+ *
+ * After generating a new symbolic name string, a new RSPN_ID request is
+ * sent to the name server. There is no response handler, so if it fails
+ * for some reason it will not be retried.
+ */
+static void fcoe_set_vport_symbolic_name(struct fc_vport *vport)
+{
+ struct fc_lport *lport = vport->dd_data;
+ struct fc_frame *fp;
+ size_t len;
+
+ snprintf(fc_host_symbolic_name(lport->host), FC_SYMBOLIC_NAME_SIZE,
+ "%s v%s over %s : %s", FCOE_NAME, FCOE_VERSION,
+ fcoe_netdev(lport)->name, vport->symbolic_name);
+
+ if (lport->state != LPORT_ST_READY)
+ return;
+
+ len = strnlen(fc_host_symbolic_name(lport->host), 255);
+ fp = fc_frame_alloc(lport,
+ sizeof(struct fc_ct_hdr) +
+ sizeof(struct fc_ns_rspn) + len);
+ if (!fp)
+ return;
+ lport->tt.elsct_send(lport, FC_FID_DIR_SERV, fp, FC_NS_RSPN_ID,
+ NULL, NULL, 3 * lport->r_a_tov);
+}
+
+static void fcoe_fcf_get_vlan_id(struct fcoe_fcf_device *fcf_dev)
+{
+ struct fcoe_ctlr_device *ctlr_dev =
+ fcoe_fcf_dev_to_ctlr_dev(fcf_dev);
+ struct fcoe_ctlr *ctlr = fcoe_ctlr_device_priv(ctlr_dev);
+ struct fcoe_interface *fcoe = fcoe_ctlr_priv(ctlr);
+
+ fcf_dev->vlan_id = vlan_dev_vlan_id(fcoe->netdev);
+}
+
+/**
+ * fcoe_set_port_id() - Callback from libfc when Port_ID is set.
+ * @lport: the local port
+ * @port_id: the port ID
+ * @fp: the received frame, if any, that caused the port_id to be set.
+ *
+ * This routine handles the case where we received a FLOGI and are
+ * entering point-to-point mode. We need to call fcoe_ctlr_recv_flogi()
+ * so it can set the non-mapped mode and gateway address.
+ *
+ * The FLOGI LS_ACC is handled by fcoe_flogi_resp().
+ */
+static void fcoe_set_port_id(struct fc_lport *lport,
+ u32 port_id, struct fc_frame *fp)
+{
+ struct fcoe_port *port = lport_priv(lport);
+ struct fcoe_interface *fcoe = port->priv;
+ struct fcoe_ctlr *ctlr = fcoe_to_ctlr(fcoe);
+
+ if (fp && fc_frame_payload_op(fp) == ELS_FLOGI)
+ fcoe_ctlr_recv_flogi(ctlr, lport, fp);
+}
diff --git a/drivers/scsi/fcoe/fcoe.h b/drivers/scsi/fcoe/fcoe.h
new file mode 100644
index 000000000..2b53672bf
--- /dev/null
+++ b/drivers/scsi/fcoe/fcoe.h
@@ -0,0 +1,104 @@
+/*
+ * Copyright(c) 2009 Intel Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Maintained at www.Open-FCoE.org
+ */
+
+#ifndef _FCOE_H_
+#define _FCOE_H_
+
+#include <linux/skbuff.h>
+#include <linux/kthread.h>
+
+#define FCOE_MAX_QUEUE_DEPTH 256
+#define FCOE_MIN_QUEUE_DEPTH 32
+
+#define FCOE_WORD_TO_BYTE 4
+
+#define FCOE_VERSION "0.1"
+#define FCOE_NAME "fcoe"
+#define FCOE_VENDOR "Open-FCoE.org"
+
+#define FCOE_MAX_LUN 0xFFFF
+#define FCOE_MAX_FCP_TARGET 256
+
+#define FCOE_MAX_OUTSTANDING_COMMANDS 1024
+
+#define FCOE_MIN_XID 0x0000 /* the min xid supported by fcoe_sw */
+#define FCOE_MAX_XID 0x0FFF /* the max xid supported by fcoe_sw */
+
+extern unsigned int fcoe_debug_logging;
+
+#define FCOE_LOGGING 0x01 /* General logging, not categorized */
+#define FCOE_NETDEV_LOGGING 0x02 /* Netdevice logging */
+
+#define FCOE_CHECK_LOGGING(LEVEL, CMD) \
+do { \
+ if (unlikely(fcoe_debug_logging & LEVEL)) \
+ do { \
+ CMD; \
+ } while (0); \
+} while (0)
+
+#define FCOE_DBG(fmt, args...) \
+ FCOE_CHECK_LOGGING(FCOE_LOGGING, \
+ pr_info("fcoe: " fmt, ##args);)
+
+#define FCOE_NETDEV_DBG(netdev, fmt, args...) \
+ FCOE_CHECK_LOGGING(FCOE_NETDEV_LOGGING, \
+ pr_info("fcoe: %s: " fmt, \
+ netdev->name, ##args);)
+
+/**
+ * struct fcoe_interface - A FCoE interface
+ * @list: Handle for a list of FCoE interfaces
+ * @netdev: The associated net device
+ * @fcoe_packet_type: FCoE packet type
+ * @fip_packet_type: FIP packet type
+ * @oem: The offload exchange manager for all local port
+ * instances associated with this port
+ * @removed: Indicates fcoe interface removed from net device
+ * @priority: Priority for the FCoE packet (DCB)
+ * This structure is 1:1 with a net device.
+ */
+struct fcoe_interface {
+ struct list_head list;
+ struct net_device *netdev;
+ struct net_device *realdev;
+ struct packet_type fcoe_packet_type;
+ struct packet_type fip_packet_type;
+ struct fc_exch_mgr *oem;
+ u8 removed;
+ u8 priority;
+};
+
+#define fcoe_to_ctlr(x) \
+ (struct fcoe_ctlr *)(((struct fcoe_ctlr *)(x)) - 1)
+
+#define fcoe_from_ctlr(x) \
+ ((struct fcoe_interface *)((x) + 1))
+
+/**
+ * fcoe_netdev() - Return the net device associated with a local port
+ * @lport: The local port to get the net device from
+ */
+static inline struct net_device *fcoe_netdev(const struct fc_lport *lport)
+{
+ return ((struct fcoe_interface *)
+ ((struct fcoe_port *)lport_priv(lport))->priv)->netdev;
+}
+
+#endif /* _FCOE_H_ */
diff --git a/drivers/scsi/fcoe/fcoe_ctlr.c b/drivers/scsi/fcoe/fcoe_ctlr.c
new file mode 100644
index 000000000..34a1b1f33
--- /dev/null
+++ b/drivers/scsi/fcoe/fcoe_ctlr.c
@@ -0,0 +1,2965 @@
+/*
+ * Copyright (c) 2008-2009 Cisco Systems, Inc. All rights reserved.
+ * Copyright (c) 2009 Intel Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Maintained at www.Open-FCoE.org
+ */
+
+#include <linux/types.h>
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/list.h>
+#include <linux/spinlock.h>
+#include <linux/timer.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/ethtool.h>
+#include <linux/if_ether.h>
+#include <linux/if_vlan.h>
+#include <linux/errno.h>
+#include <linux/bitops.h>
+#include <linux/slab.h>
+#include <net/rtnetlink.h>
+
+#include <scsi/fc/fc_els.h>
+#include <scsi/fc/fc_fs.h>
+#include <scsi/fc/fc_fip.h>
+#include <scsi/fc/fc_encaps.h>
+#include <scsi/fc/fc_fcoe.h>
+#include <scsi/fc/fc_fcp.h>
+
+#include <scsi/libfc.h>
+#include <scsi/libfcoe.h>
+
+#include "libfcoe.h"
+
+#define FCOE_CTLR_MIN_FKA 500 /* min keep alive (mS) */
+#define FCOE_CTLR_DEF_FKA FIP_DEF_FKA /* default keep alive (mS) */
+
+static void fcoe_ctlr_timeout(unsigned long);
+static void fcoe_ctlr_timer_work(struct work_struct *);
+static void fcoe_ctlr_recv_work(struct work_struct *);
+static int fcoe_ctlr_flogi_retry(struct fcoe_ctlr *);
+
+static void fcoe_ctlr_vn_start(struct fcoe_ctlr *);
+static int fcoe_ctlr_vn_recv(struct fcoe_ctlr *, struct sk_buff *);
+static void fcoe_ctlr_vn_timeout(struct fcoe_ctlr *);
+static int fcoe_ctlr_vn_lookup(struct fcoe_ctlr *, u32, u8 *);
+
+static u8 fcoe_all_fcfs[ETH_ALEN] = FIP_ALL_FCF_MACS;
+static u8 fcoe_all_enode[ETH_ALEN] = FIP_ALL_ENODE_MACS;
+static u8 fcoe_all_vn2vn[ETH_ALEN] = FIP_ALL_VN2VN_MACS;
+static u8 fcoe_all_p2p[ETH_ALEN] = FIP_ALL_P2P_MACS;
+
+static const char * const fcoe_ctlr_states[] = {
+ [FIP_ST_DISABLED] = "DISABLED",
+ [FIP_ST_LINK_WAIT] = "LINK_WAIT",
+ [FIP_ST_AUTO] = "AUTO",
+ [FIP_ST_NON_FIP] = "NON_FIP",
+ [FIP_ST_ENABLED] = "ENABLED",
+ [FIP_ST_VNMP_START] = "VNMP_START",
+ [FIP_ST_VNMP_PROBE1] = "VNMP_PROBE1",
+ [FIP_ST_VNMP_PROBE2] = "VNMP_PROBE2",
+ [FIP_ST_VNMP_CLAIM] = "VNMP_CLAIM",
+ [FIP_ST_VNMP_UP] = "VNMP_UP",
+};
+
+static const char *fcoe_ctlr_state(enum fip_state state)
+{
+ const char *cp = "unknown";
+
+ if (state < ARRAY_SIZE(fcoe_ctlr_states))
+ cp = fcoe_ctlr_states[state];
+ if (!cp)
+ cp = "unknown";
+ return cp;
+}
+
+/**
+ * fcoe_ctlr_set_state() - Set and do debug printing for the new FIP state.
+ * @fip: The FCoE controller
+ * @state: The new state
+ */
+static void fcoe_ctlr_set_state(struct fcoe_ctlr *fip, enum fip_state state)
+{
+ if (state == fip->state)
+ return;
+ if (fip->lp)
+ LIBFCOE_FIP_DBG(fip, "state %s -> %s\n",
+ fcoe_ctlr_state(fip->state), fcoe_ctlr_state(state));
+ fip->state = state;
+}
+
+/**
+ * fcoe_ctlr_mtu_valid() - Check if a FCF's MTU is valid
+ * @fcf: The FCF to check
+ *
+ * Return non-zero if FCF fcoe_size has been validated.
+ */
+static inline int fcoe_ctlr_mtu_valid(const struct fcoe_fcf *fcf)
+{
+ return (fcf->flags & FIP_FL_SOL) != 0;
+}
+
+/**
+ * fcoe_ctlr_fcf_usable() - Check if a FCF is usable
+ * @fcf: The FCF to check
+ *
+ * Return non-zero if the FCF is usable.
+ */
+static inline int fcoe_ctlr_fcf_usable(struct fcoe_fcf *fcf)
+{
+ u16 flags = FIP_FL_SOL | FIP_FL_AVAIL;
+
+ return (fcf->flags & flags) == flags;
+}
+
+/**
+ * fcoe_ctlr_map_dest() - Set flag and OUI for mapping destination addresses
+ * @fip: The FCoE controller
+ */
+static void fcoe_ctlr_map_dest(struct fcoe_ctlr *fip)
+{
+ if (fip->mode == FIP_MODE_VN2VN)
+ hton24(fip->dest_addr, FIP_VN_FC_MAP);
+ else
+ hton24(fip->dest_addr, FIP_DEF_FC_MAP);
+ hton24(fip->dest_addr + 3, 0);
+ fip->map_dest = 1;
+}
+
+/**
+ * fcoe_ctlr_init() - Initialize the FCoE Controller instance
+ * @fip: The FCoE controller to initialize
+ */
+void fcoe_ctlr_init(struct fcoe_ctlr *fip, enum fip_state mode)
+{
+ fcoe_ctlr_set_state(fip, FIP_ST_LINK_WAIT);
+ fip->mode = mode;
+ INIT_LIST_HEAD(&fip->fcfs);
+ mutex_init(&fip->ctlr_mutex);
+ spin_lock_init(&fip->ctlr_lock);
+ fip->flogi_oxid = FC_XID_UNKNOWN;
+ setup_timer(&fip->timer, fcoe_ctlr_timeout, (unsigned long)fip);
+ INIT_WORK(&fip->timer_work, fcoe_ctlr_timer_work);
+ INIT_WORK(&fip->recv_work, fcoe_ctlr_recv_work);
+ skb_queue_head_init(&fip->fip_recv_list);
+}
+EXPORT_SYMBOL(fcoe_ctlr_init);
+
+/**
+ * fcoe_sysfs_fcf_add() - Add a fcoe_fcf{,_device} to a fcoe_ctlr{,_device}
+ * @new: The newly discovered FCF
+ *
+ * Called with fip->ctlr_mutex held
+ */
+static int fcoe_sysfs_fcf_add(struct fcoe_fcf *new)
+{
+ struct fcoe_ctlr *fip = new->fip;
+ struct fcoe_ctlr_device *ctlr_dev;
+ struct fcoe_fcf_device *temp, *fcf_dev;
+ int rc = -ENOMEM;
+
+ LIBFCOE_FIP_DBG(fip, "New FCF fab %16.16llx mac %pM\n",
+ new->fabric_name, new->fcf_mac);
+
+ temp = kzalloc(sizeof(*temp), GFP_KERNEL);
+ if (!temp)
+ goto out;
+
+ temp->fabric_name = new->fabric_name;
+ temp->switch_name = new->switch_name;
+ temp->fc_map = new->fc_map;
+ temp->vfid = new->vfid;
+ memcpy(temp->mac, new->fcf_mac, ETH_ALEN);
+ temp->priority = new->pri;
+ temp->fka_period = new->fka_period;
+ temp->selected = 0; /* default to unselected */
+
+ /*
+ * If ctlr_dev doesn't exist then it means we're a libfcoe user
+ * who doesn't use fcoe_syfs and didn't allocate a fcoe_ctlr_device.
+ * fnic would be an example of a driver with this behavior. In this
+ * case we want to add the fcoe_fcf to the fcoe_ctlr list, but we
+ * don't want to make sysfs changes.
+ */
+
+ ctlr_dev = fcoe_ctlr_to_ctlr_dev(fip);
+ if (ctlr_dev) {
+ mutex_lock(&ctlr_dev->lock);
+ fcf_dev = fcoe_fcf_device_add(ctlr_dev, temp);
+ if (unlikely(!fcf_dev)) {
+ rc = -ENOMEM;
+ mutex_unlock(&ctlr_dev->lock);
+ goto out;
+ }
+
+ /*
+ * The fcoe_sysfs layer can return a CONNECTED fcf that
+ * has a priv (fcf was never deleted) or a CONNECTED fcf
+ * that doesn't have a priv (fcf was deleted). However,
+ * libfcoe will always delete FCFs before trying to add
+ * them. This is ensured because both recv_adv and
+ * age_fcfs are protected by the the fcoe_ctlr's mutex.
+ * This means that we should never get a FCF with a
+ * non-NULL priv pointer.
+ */
+ BUG_ON(fcf_dev->priv);
+
+ fcf_dev->priv = new;
+ new->fcf_dev = fcf_dev;
+ mutex_unlock(&ctlr_dev->lock);
+ }
+
+ list_add(&new->list, &fip->fcfs);
+ fip->fcf_count++;
+ rc = 0;
+
+out:
+ kfree(temp);
+ return rc;
+}
+
+/**
+ * fcoe_sysfs_fcf_del() - Remove a fcoe_fcf{,_device} to a fcoe_ctlr{,_device}
+ * @new: The FCF to be removed
+ *
+ * Called with fip->ctlr_mutex held
+ */
+static void fcoe_sysfs_fcf_del(struct fcoe_fcf *new)
+{
+ struct fcoe_ctlr *fip = new->fip;
+ struct fcoe_ctlr_device *cdev;
+ struct fcoe_fcf_device *fcf_dev;
+
+ list_del(&new->list);
+ fip->fcf_count--;
+
+ /*
+ * If ctlr_dev doesn't exist then it means we're a libfcoe user
+ * who doesn't use fcoe_syfs and didn't allocate a fcoe_ctlr_device
+ * or a fcoe_fcf_device.
+ *
+ * fnic would be an example of a driver with this behavior. In this
+ * case we want to remove the fcoe_fcf from the fcoe_ctlr list (above),
+ * but we don't want to make sysfs changes.
+ */
+ cdev = fcoe_ctlr_to_ctlr_dev(fip);
+ if (cdev) {
+ mutex_lock(&cdev->lock);
+ fcf_dev = fcoe_fcf_to_fcf_dev(new);
+ WARN_ON(!fcf_dev);
+ new->fcf_dev = NULL;
+ fcoe_fcf_device_delete(fcf_dev);
+ kfree(new);
+ mutex_unlock(&cdev->lock);
+ }
+}
+
+/**
+ * fcoe_ctlr_reset_fcfs() - Reset and free all FCFs for a controller
+ * @fip: The FCoE controller whose FCFs are to be reset
+ *
+ * Called with &fcoe_ctlr lock held.
+ */
+static void fcoe_ctlr_reset_fcfs(struct fcoe_ctlr *fip)
+{
+ struct fcoe_fcf *fcf;
+ struct fcoe_fcf *next;
+
+ fip->sel_fcf = NULL;
+ list_for_each_entry_safe(fcf, next, &fip->fcfs, list) {
+ fcoe_sysfs_fcf_del(fcf);
+ }
+ WARN_ON(fip->fcf_count);
+
+ fip->sel_time = 0;
+}
+
+/**
+ * fcoe_ctlr_destroy() - Disable and tear down a FCoE controller
+ * @fip: The FCoE controller to tear down
+ *
+ * This is called by FCoE drivers before freeing the &fcoe_ctlr.
+ *
+ * The receive handler will have been deleted before this to guarantee
+ * that no more recv_work will be scheduled.
+ *
+ * The timer routine will simply return once we set FIP_ST_DISABLED.
+ * This guarantees that no further timeouts or work will be scheduled.
+ */
+void fcoe_ctlr_destroy(struct fcoe_ctlr *fip)
+{
+ cancel_work_sync(&fip->recv_work);
+ skb_queue_purge(&fip->fip_recv_list);
+
+ mutex_lock(&fip->ctlr_mutex);
+ fcoe_ctlr_set_state(fip, FIP_ST_DISABLED);
+ fcoe_ctlr_reset_fcfs(fip);
+ mutex_unlock(&fip->ctlr_mutex);
+ del_timer_sync(&fip->timer);
+ cancel_work_sync(&fip->timer_work);
+}
+EXPORT_SYMBOL(fcoe_ctlr_destroy);
+
+/**
+ * fcoe_ctlr_announce() - announce new FCF selection
+ * @fip: The FCoE controller
+ *
+ * Also sets the destination MAC for FCoE and control packets
+ *
+ * Called with neither ctlr_mutex nor ctlr_lock held.
+ */
+static void fcoe_ctlr_announce(struct fcoe_ctlr *fip)
+{
+ struct fcoe_fcf *sel;
+ struct fcoe_fcf *fcf;
+
+ mutex_lock(&fip->ctlr_mutex);
+ spin_lock_bh(&fip->ctlr_lock);
+
+ kfree_skb(fip->flogi_req);
+ fip->flogi_req = NULL;
+ list_for_each_entry(fcf, &fip->fcfs, list)
+ fcf->flogi_sent = 0;
+
+ spin_unlock_bh(&fip->ctlr_lock);
+ sel = fip->sel_fcf;
+
+ if (sel && ether_addr_equal(sel->fcf_mac, fip->dest_addr))
+ goto unlock;
+ if (!is_zero_ether_addr(fip->dest_addr)) {
+ printk(KERN_NOTICE "libfcoe: host%d: "
+ "FIP Fibre-Channel Forwarder MAC %pM deselected\n",
+ fip->lp->host->host_no, fip->dest_addr);
+ memset(fip->dest_addr, 0, ETH_ALEN);
+ }
+ if (sel) {
+ printk(KERN_INFO "libfcoe: host%d: FIP selected "
+ "Fibre-Channel Forwarder MAC %pM\n",
+ fip->lp->host->host_no, sel->fcf_mac);
+ memcpy(fip->dest_addr, sel->fcoe_mac, ETH_ALEN);
+ fip->map_dest = 0;
+ }
+unlock:
+ mutex_unlock(&fip->ctlr_mutex);
+}
+
+/**
+ * fcoe_ctlr_fcoe_size() - Return the maximum FCoE size required for VN_Port
+ * @fip: The FCoE controller to get the maximum FCoE size from
+ *
+ * Returns the maximum packet size including the FCoE header and trailer,
+ * but not including any Ethernet or VLAN headers.
+ */
+static inline u32 fcoe_ctlr_fcoe_size(struct fcoe_ctlr *fip)
+{
+ /*
+ * Determine the max FCoE frame size allowed, including
+ * FCoE header and trailer.
+ * Note: lp->mfs is currently the payload size, not the frame size.
+ */
+ return fip->lp->mfs + sizeof(struct fc_frame_header) +
+ sizeof(struct fcoe_hdr) + sizeof(struct fcoe_crc_eof);
+}
+
+/**
+ * fcoe_ctlr_solicit() - Send a FIP solicitation
+ * @fip: The FCoE controller to send the solicitation on
+ * @fcf: The destination FCF (if NULL, a multicast solicitation is sent)
+ */
+static void fcoe_ctlr_solicit(struct fcoe_ctlr *fip, struct fcoe_fcf *fcf)
+{
+ struct sk_buff *skb;
+ struct fip_sol {
+ struct ethhdr eth;
+ struct fip_header fip;
+ struct {
+ struct fip_mac_desc mac;
+ struct fip_wwn_desc wwnn;
+ struct fip_size_desc size;
+ } __packed desc;
+ } __packed * sol;
+ u32 fcoe_size;
+
+ skb = dev_alloc_skb(sizeof(*sol));
+ if (!skb)
+ return;
+
+ sol = (struct fip_sol *)skb->data;
+
+ memset(sol, 0, sizeof(*sol));
+ memcpy(sol->eth.h_dest, fcf ? fcf->fcf_mac : fcoe_all_fcfs, ETH_ALEN);
+ memcpy(sol->eth.h_source, fip->ctl_src_addr, ETH_ALEN);
+ sol->eth.h_proto = htons(ETH_P_FIP);
+
+ sol->fip.fip_ver = FIP_VER_ENCAPS(FIP_VER);
+ sol->fip.fip_op = htons(FIP_OP_DISC);
+ sol->fip.fip_subcode = FIP_SC_SOL;
+ sol->fip.fip_dl_len = htons(sizeof(sol->desc) / FIP_BPW);
+ sol->fip.fip_flags = htons(FIP_FL_FPMA);
+ if (fip->spma)
+ sol->fip.fip_flags |= htons(FIP_FL_SPMA);
+
+ sol->desc.mac.fd_desc.fip_dtype = FIP_DT_MAC;
+ sol->desc.mac.fd_desc.fip_dlen = sizeof(sol->desc.mac) / FIP_BPW;
+ memcpy(sol->desc.mac.fd_mac, fip->ctl_src_addr, ETH_ALEN);
+
+ sol->desc.wwnn.fd_desc.fip_dtype = FIP_DT_NAME;
+ sol->desc.wwnn.fd_desc.fip_dlen = sizeof(sol->desc.wwnn) / FIP_BPW;
+ put_unaligned_be64(fip->lp->wwnn, &sol->desc.wwnn.fd_wwn);
+
+ fcoe_size = fcoe_ctlr_fcoe_size(fip);
+ sol->desc.size.fd_desc.fip_dtype = FIP_DT_FCOE_SIZE;
+ sol->desc.size.fd_desc.fip_dlen = sizeof(sol->desc.size) / FIP_BPW;
+ sol->desc.size.fd_size = htons(fcoe_size);
+
+ skb_put(skb, sizeof(*sol));
+ skb->protocol = htons(ETH_P_FIP);
+ skb->priority = fip->priority;
+ skb_reset_mac_header(skb);
+ skb_reset_network_header(skb);
+ fip->send(fip, skb);
+
+ if (!fcf)
+ fip->sol_time = jiffies;
+}
+
+/**
+ * fcoe_ctlr_link_up() - Start FCoE controller
+ * @fip: The FCoE controller to start
+ *
+ * Called from the LLD when the network link is ready.
+ */
+void fcoe_ctlr_link_up(struct fcoe_ctlr *fip)
+{
+ mutex_lock(&fip->ctlr_mutex);
+ if (fip->state == FIP_ST_NON_FIP || fip->state == FIP_ST_AUTO) {
+ mutex_unlock(&fip->ctlr_mutex);
+ fc_linkup(fip->lp);
+ } else if (fip->state == FIP_ST_LINK_WAIT) {
+ fcoe_ctlr_set_state(fip, fip->mode);
+ switch (fip->mode) {
+ default:
+ LIBFCOE_FIP_DBG(fip, "invalid mode %d\n", fip->mode);
+ /* fall-through */
+ case FIP_MODE_AUTO:
+ LIBFCOE_FIP_DBG(fip, "%s", "setting AUTO mode.\n");
+ /* fall-through */
+ case FIP_MODE_FABRIC:
+ case FIP_MODE_NON_FIP:
+ mutex_unlock(&fip->ctlr_mutex);
+ fc_linkup(fip->lp);
+ fcoe_ctlr_solicit(fip, NULL);
+ break;
+ case FIP_MODE_VN2VN:
+ fcoe_ctlr_vn_start(fip);
+ mutex_unlock(&fip->ctlr_mutex);
+ fc_linkup(fip->lp);
+ break;
+ }
+ } else
+ mutex_unlock(&fip->ctlr_mutex);
+}
+EXPORT_SYMBOL(fcoe_ctlr_link_up);
+
+/**
+ * fcoe_ctlr_reset() - Reset a FCoE controller
+ * @fip: The FCoE controller to reset
+ */
+static void fcoe_ctlr_reset(struct fcoe_ctlr *fip)
+{
+ fcoe_ctlr_reset_fcfs(fip);
+ del_timer(&fip->timer);
+ fip->ctlr_ka_time = 0;
+ fip->port_ka_time = 0;
+ fip->sol_time = 0;
+ fip->flogi_oxid = FC_XID_UNKNOWN;
+ fcoe_ctlr_map_dest(fip);
+}
+
+/**
+ * fcoe_ctlr_link_down() - Stop a FCoE controller
+ * @fip: The FCoE controller to be stopped
+ *
+ * Returns non-zero if the link was up and now isn't.
+ *
+ * Called from the LLD when the network link is not ready.
+ * There may be multiple calls while the link is down.
+ */
+int fcoe_ctlr_link_down(struct fcoe_ctlr *fip)
+{
+ int link_dropped;
+
+ LIBFCOE_FIP_DBG(fip, "link down.\n");
+ mutex_lock(&fip->ctlr_mutex);
+ fcoe_ctlr_reset(fip);
+ link_dropped = fip->state != FIP_ST_LINK_WAIT;
+ fcoe_ctlr_set_state(fip, FIP_ST_LINK_WAIT);
+ mutex_unlock(&fip->ctlr_mutex);
+
+ if (link_dropped)
+ fc_linkdown(fip->lp);
+ return link_dropped;
+}
+EXPORT_SYMBOL(fcoe_ctlr_link_down);
+
+/**
+ * fcoe_ctlr_send_keep_alive() - Send a keep-alive to the selected FCF
+ * @fip: The FCoE controller to send the FKA on
+ * @lport: libfc fc_lport to send from
+ * @ports: 0 for controller keep-alive, 1 for port keep-alive
+ * @sa: The source MAC address
+ *
+ * A controller keep-alive is sent every fka_period (typically 8 seconds).
+ * The source MAC is the native MAC address.
+ *
+ * A port keep-alive is sent every 90 seconds while logged in.
+ * The source MAC is the assigned mapped source address.
+ * The destination is the FCF's F-port.
+ */
+static void fcoe_ctlr_send_keep_alive(struct fcoe_ctlr *fip,
+ struct fc_lport *lport,
+ int ports, u8 *sa)
+{
+ struct sk_buff *skb;
+ struct fip_kal {
+ struct ethhdr eth;
+ struct fip_header fip;
+ struct fip_mac_desc mac;
+ } __packed * kal;
+ struct fip_vn_desc *vn;
+ u32 len;
+ struct fc_lport *lp;
+ struct fcoe_fcf *fcf;
+
+ fcf = fip->sel_fcf;
+ lp = fip->lp;
+ if (!fcf || (ports && !lp->port_id))
+ return;
+
+ len = sizeof(*kal) + ports * sizeof(*vn);
+ skb = dev_alloc_skb(len);
+ if (!skb)
+ return;
+
+ kal = (struct fip_kal *)skb->data;
+ memset(kal, 0, len);
+ memcpy(kal->eth.h_dest, fcf->fcf_mac, ETH_ALEN);
+ memcpy(kal->eth.h_source, sa, ETH_ALEN);
+ kal->eth.h_proto = htons(ETH_P_FIP);
+
+ kal->fip.fip_ver = FIP_VER_ENCAPS(FIP_VER);
+ kal->fip.fip_op = htons(FIP_OP_CTRL);
+ kal->fip.fip_subcode = FIP_SC_KEEP_ALIVE;
+ kal->fip.fip_dl_len = htons((sizeof(kal->mac) +
+ ports * sizeof(*vn)) / FIP_BPW);
+ kal->fip.fip_flags = htons(FIP_FL_FPMA);
+ if (fip->spma)
+ kal->fip.fip_flags |= htons(FIP_FL_SPMA);
+
+ kal->mac.fd_desc.fip_dtype = FIP_DT_MAC;
+ kal->mac.fd_desc.fip_dlen = sizeof(kal->mac) / FIP_BPW;
+ memcpy(kal->mac.fd_mac, fip->ctl_src_addr, ETH_ALEN);
+ if (ports) {
+ vn = (struct fip_vn_desc *)(kal + 1);
+ vn->fd_desc.fip_dtype = FIP_DT_VN_ID;
+ vn->fd_desc.fip_dlen = sizeof(*vn) / FIP_BPW;
+ memcpy(vn->fd_mac, fip->get_src_addr(lport), ETH_ALEN);
+ hton24(vn->fd_fc_id, lport->port_id);
+ put_unaligned_be64(lport->wwpn, &vn->fd_wwpn);
+ }
+ skb_put(skb, len);
+ skb->protocol = htons(ETH_P_FIP);
+ skb->priority = fip->priority;
+ skb_reset_mac_header(skb);
+ skb_reset_network_header(skb);
+ fip->send(fip, skb);
+}
+
+/**
+ * fcoe_ctlr_encaps() - Encapsulate an ELS frame for FIP, without sending it
+ * @fip: The FCoE controller for the ELS frame
+ * @dtype: The FIP descriptor type for the frame
+ * @skb: The FCoE ELS frame including FC header but no FCoE headers
+ * @d_id: The destination port ID.
+ *
+ * Returns non-zero error code on failure.
+ *
+ * The caller must check that the length is a multiple of 4.
+ *
+ * The @skb must have enough headroom (28 bytes) and tailroom (8 bytes).
+ * Headroom includes the FIP encapsulation description, FIP header, and
+ * Ethernet header. The tailroom is for the FIP MAC descriptor.
+ */
+static int fcoe_ctlr_encaps(struct fcoe_ctlr *fip, struct fc_lport *lport,
+ u8 dtype, struct sk_buff *skb, u32 d_id)
+{
+ struct fip_encaps_head {
+ struct ethhdr eth;
+ struct fip_header fip;
+ struct fip_encaps encaps;
+ } __packed * cap;
+ struct fc_frame_header *fh;
+ struct fip_mac_desc *mac;
+ struct fcoe_fcf *fcf;
+ size_t dlen;
+ u16 fip_flags;
+ u8 op;
+
+ fh = (struct fc_frame_header *)skb->data;
+ op = *(u8 *)(fh + 1);
+ dlen = sizeof(struct fip_encaps) + skb->len; /* len before push */
+ cap = (struct fip_encaps_head *)skb_push(skb, sizeof(*cap));
+ memset(cap, 0, sizeof(*cap));
+
+ if (lport->point_to_multipoint) {
+ if (fcoe_ctlr_vn_lookup(fip, d_id, cap->eth.h_dest))
+ return -ENODEV;
+ fip_flags = 0;
+ } else {
+ fcf = fip->sel_fcf;
+ if (!fcf)
+ return -ENODEV;
+ fip_flags = fcf->flags;
+ fip_flags &= fip->spma ? FIP_FL_SPMA | FIP_FL_FPMA :
+ FIP_FL_FPMA;
+ if (!fip_flags)
+ return -ENODEV;
+ memcpy(cap->eth.h_dest, fcf->fcf_mac, ETH_ALEN);
+ }
+ memcpy(cap->eth.h_source, fip->ctl_src_addr, ETH_ALEN);
+ cap->eth.h_proto = htons(ETH_P_FIP);
+
+ cap->fip.fip_ver = FIP_VER_ENCAPS(FIP_VER);
+ cap->fip.fip_op = htons(FIP_OP_LS);
+ if (op == ELS_LS_ACC || op == ELS_LS_RJT)
+ cap->fip.fip_subcode = FIP_SC_REP;
+ else
+ cap->fip.fip_subcode = FIP_SC_REQ;
+ cap->fip.fip_flags = htons(fip_flags);
+
+ cap->encaps.fd_desc.fip_dtype = dtype;
+ cap->encaps.fd_desc.fip_dlen = dlen / FIP_BPW;
+
+ if (op != ELS_LS_RJT) {
+ dlen += sizeof(*mac);
+ mac = (struct fip_mac_desc *)skb_put(skb, sizeof(*mac));
+ memset(mac, 0, sizeof(*mac));
+ mac->fd_desc.fip_dtype = FIP_DT_MAC;
+ mac->fd_desc.fip_dlen = sizeof(*mac) / FIP_BPW;
+ if (dtype != FIP_DT_FLOGI && dtype != FIP_DT_FDISC) {
+ memcpy(mac->fd_mac, fip->get_src_addr(lport), ETH_ALEN);
+ } else if (fip->mode == FIP_MODE_VN2VN) {
+ hton24(mac->fd_mac, FIP_VN_FC_MAP);
+ hton24(mac->fd_mac + 3, fip->port_id);
+ } else if (fip_flags & FIP_FL_SPMA) {
+ LIBFCOE_FIP_DBG(fip, "FLOGI/FDISC sent with SPMA\n");
+ memcpy(mac->fd_mac, fip->ctl_src_addr, ETH_ALEN);
+ } else {
+ LIBFCOE_FIP_DBG(fip, "FLOGI/FDISC sent with FPMA\n");
+ /* FPMA only FLOGI. Must leave the MAC desc zeroed. */
+ }
+ }
+ cap->fip.fip_dl_len = htons(dlen / FIP_BPW);
+
+ skb->protocol = htons(ETH_P_FIP);
+ skb->priority = fip->priority;
+ skb_reset_mac_header(skb);
+ skb_reset_network_header(skb);
+ return 0;
+}
+
+/**
+ * fcoe_ctlr_els_send() - Send an ELS frame encapsulated by FIP if appropriate.
+ * @fip: FCoE controller.
+ * @lport: libfc fc_lport to send from
+ * @skb: FCoE ELS frame including FC header but no FCoE headers.
+ *
+ * Returns a non-zero error code if the frame should not be sent.
+ * Returns zero if the caller should send the frame with FCoE encapsulation.
+ *
+ * The caller must check that the length is a multiple of 4.
+ * The SKB must have enough headroom (28 bytes) and tailroom (8 bytes).
+ * The the skb must also be an fc_frame.
+ *
+ * This is called from the lower-level driver with spinlocks held,
+ * so we must not take a mutex here.
+ */
+int fcoe_ctlr_els_send(struct fcoe_ctlr *fip, struct fc_lport *lport,
+ struct sk_buff *skb)
+{
+ struct fc_frame *fp;
+ struct fc_frame_header *fh;
+ u16 old_xid;
+ u8 op;
+ u8 mac[ETH_ALEN];
+
+ fp = container_of(skb, struct fc_frame, skb);
+ fh = (struct fc_frame_header *)skb->data;
+ op = *(u8 *)(fh + 1);
+
+ if (op == ELS_FLOGI && fip->mode != FIP_MODE_VN2VN) {
+ old_xid = fip->flogi_oxid;
+ fip->flogi_oxid = ntohs(fh->fh_ox_id);
+ if (fip->state == FIP_ST_AUTO) {
+ if (old_xid == FC_XID_UNKNOWN)
+ fip->flogi_count = 0;
+ fip->flogi_count++;
+ if (fip->flogi_count < 3)
+ goto drop;
+ fcoe_ctlr_map_dest(fip);
+ return 0;
+ }
+ if (fip->state == FIP_ST_NON_FIP)
+ fcoe_ctlr_map_dest(fip);
+ }
+
+ if (fip->state == FIP_ST_NON_FIP)
+ return 0;
+ if (!fip->sel_fcf && fip->mode != FIP_MODE_VN2VN)
+ goto drop;
+ switch (op) {
+ case ELS_FLOGI:
+ op = FIP_DT_FLOGI;
+ if (fip->mode == FIP_MODE_VN2VN)
+ break;
+ spin_lock_bh(&fip->ctlr_lock);
+ kfree_skb(fip->flogi_req);
+ fip->flogi_req = skb;
+ fip->flogi_req_send = 1;
+ spin_unlock_bh(&fip->ctlr_lock);
+ schedule_work(&fip->timer_work);
+ return -EINPROGRESS;
+ case ELS_FDISC:
+ if (ntoh24(fh->fh_s_id))
+ return 0;
+ op = FIP_DT_FDISC;
+ break;
+ case ELS_LOGO:
+ if (fip->mode == FIP_MODE_VN2VN) {
+ if (fip->state != FIP_ST_VNMP_UP)
+ return -EINVAL;
+ if (ntoh24(fh->fh_d_id) == FC_FID_FLOGI)
+ return -EINVAL;
+ } else {
+ if (fip->state != FIP_ST_ENABLED)
+ return 0;
+ if (ntoh24(fh->fh_d_id) != FC_FID_FLOGI)
+ return 0;
+ }
+ op = FIP_DT_LOGO;
+ break;
+ case ELS_LS_ACC:
+ /*
+ * If non-FIP, we may have gotten an SID by accepting an FLOGI
+ * from a point-to-point connection. Switch to using
+ * the source mac based on the SID. The destination
+ * MAC in this case would have been set by receiving the
+ * FLOGI.
+ */
+ if (fip->state == FIP_ST_NON_FIP) {
+ if (fip->flogi_oxid == FC_XID_UNKNOWN)
+ return 0;
+ fip->flogi_oxid = FC_XID_UNKNOWN;
+ fc_fcoe_set_mac(mac, fh->fh_d_id);
+ fip->update_mac(lport, mac);
+ }
+ /* fall through */
+ case ELS_LS_RJT:
+ op = fr_encaps(fp);
+ if (op)
+ break;
+ return 0;
+ default:
+ if (fip->state != FIP_ST_ENABLED &&
+ fip->state != FIP_ST_VNMP_UP)
+ goto drop;
+ return 0;
+ }
+ LIBFCOE_FIP_DBG(fip, "els_send op %u d_id %x\n",
+ op, ntoh24(fh->fh_d_id));
+ if (fcoe_ctlr_encaps(fip, lport, op, skb, ntoh24(fh->fh_d_id)))
+ goto drop;
+ fip->send(fip, skb);
+ return -EINPROGRESS;
+drop:
+ kfree_skb(skb);
+ return -EINVAL;
+}
+EXPORT_SYMBOL(fcoe_ctlr_els_send);
+
+/**
+ * fcoe_ctlr_age_fcfs() - Reset and free all old FCFs for a controller
+ * @fip: The FCoE controller to free FCFs on
+ *
+ * Called with lock held and preemption disabled.
+ *
+ * An FCF is considered old if we have missed two advertisements.
+ * That is, there have been no valid advertisement from it for 2.5
+ * times its keep-alive period.
+ *
+ * In addition, determine the time when an FCF selection can occur.
+ *
+ * Also, increment the MissDiscAdvCount when no advertisement is received
+ * for the corresponding FCF for 1.5 * FKA_ADV_PERIOD (FC-BB-5 LESB).
+ *
+ * Returns the time in jiffies for the next call.
+ */
+static unsigned long fcoe_ctlr_age_fcfs(struct fcoe_ctlr *fip)
+{
+ struct fcoe_fcf *fcf;
+ struct fcoe_fcf *next;
+ unsigned long next_timer = jiffies + msecs_to_jiffies(FIP_VN_KA_PERIOD);
+ unsigned long deadline;
+ unsigned long sel_time = 0;
+ struct list_head del_list;
+ struct fc_stats *stats;
+
+ INIT_LIST_HEAD(&del_list);
+
+ stats = per_cpu_ptr(fip->lp->stats, get_cpu());
+
+ list_for_each_entry_safe(fcf, next, &fip->fcfs, list) {
+ deadline = fcf->time + fcf->fka_period + fcf->fka_period / 2;
+ if (fip->sel_fcf == fcf) {
+ if (time_after(jiffies, deadline)) {
+ stats->MissDiscAdvCount++;
+ printk(KERN_INFO "libfcoe: host%d: "
+ "Missing Discovery Advertisement "
+ "for fab %16.16llx count %lld\n",
+ fip->lp->host->host_no, fcf->fabric_name,
+ stats->MissDiscAdvCount);
+ } else if (time_after(next_timer, deadline))
+ next_timer = deadline;
+ }
+
+ deadline += fcf->fka_period;
+ if (time_after_eq(jiffies, deadline)) {
+ if (fip->sel_fcf == fcf)
+ fip->sel_fcf = NULL;
+ /*
+ * Move to delete list so we can call
+ * fcoe_sysfs_fcf_del (which can sleep)
+ * after the put_cpu().
+ */
+ list_del(&fcf->list);
+ list_add(&fcf->list, &del_list);
+ stats->VLinkFailureCount++;
+ } else {
+ if (time_after(next_timer, deadline))
+ next_timer = deadline;
+ if (fcoe_ctlr_mtu_valid(fcf) &&
+ (!sel_time || time_before(sel_time, fcf->time)))
+ sel_time = fcf->time;
+ }
+ }
+ put_cpu();
+
+ list_for_each_entry_safe(fcf, next, &del_list, list) {
+ /* Removes fcf from current list */
+ fcoe_sysfs_fcf_del(fcf);
+ }
+
+ if (sel_time && !fip->sel_fcf && !fip->sel_time) {
+ sel_time += msecs_to_jiffies(FCOE_CTLR_START_DELAY);
+ fip->sel_time = sel_time;
+ }
+
+ return next_timer;
+}
+
+/**
+ * fcoe_ctlr_parse_adv() - Decode a FIP advertisement into a new FCF entry
+ * @fip: The FCoE controller receiving the advertisement
+ * @skb: The received FIP advertisement frame
+ * @fcf: The resulting FCF entry
+ *
+ * Returns zero on a valid parsed advertisement,
+ * otherwise returns non zero value.
+ */
+static int fcoe_ctlr_parse_adv(struct fcoe_ctlr *fip,
+ struct sk_buff *skb, struct fcoe_fcf *fcf)
+{
+ struct fip_header *fiph;
+ struct fip_desc *desc = NULL;
+ struct fip_wwn_desc *wwn;
+ struct fip_fab_desc *fab;
+ struct fip_fka_desc *fka;
+ unsigned long t;
+ size_t rlen;
+ size_t dlen;
+ u32 desc_mask;
+
+ memset(fcf, 0, sizeof(*fcf));
+ fcf->fka_period = msecs_to_jiffies(FCOE_CTLR_DEF_FKA);
+
+ fiph = (struct fip_header *)skb->data;
+ fcf->flags = ntohs(fiph->fip_flags);
+
+ /*
+ * mask of required descriptors. validating each one clears its bit.
+ */
+ desc_mask = BIT(FIP_DT_PRI) | BIT(FIP_DT_MAC) | BIT(FIP_DT_NAME) |
+ BIT(FIP_DT_FAB) | BIT(FIP_DT_FKA);
+
+ rlen = ntohs(fiph->fip_dl_len) * 4;
+ if (rlen + sizeof(*fiph) > skb->len)
+ return -EINVAL;
+
+ desc = (struct fip_desc *)(fiph + 1);
+ while (rlen > 0) {
+ dlen = desc->fip_dlen * FIP_BPW;
+ if (dlen < sizeof(*desc) || dlen > rlen)
+ return -EINVAL;
+ /* Drop Adv if there are duplicate critical descriptors */
+ if ((desc->fip_dtype < 32) &&
+ !(desc_mask & 1U << desc->fip_dtype)) {
+ LIBFCOE_FIP_DBG(fip, "Duplicate Critical "
+ "Descriptors in FIP adv\n");
+ return -EINVAL;
+ }
+ switch (desc->fip_dtype) {
+ case FIP_DT_PRI:
+ if (dlen != sizeof(struct fip_pri_desc))
+ goto len_err;
+ fcf->pri = ((struct fip_pri_desc *)desc)->fd_pri;
+ desc_mask &= ~BIT(FIP_DT_PRI);
+ break;
+ case FIP_DT_MAC:
+ if (dlen != sizeof(struct fip_mac_desc))
+ goto len_err;
+ memcpy(fcf->fcf_mac,
+ ((struct fip_mac_desc *)desc)->fd_mac,
+ ETH_ALEN);
+ memcpy(fcf->fcoe_mac, fcf->fcf_mac, ETH_ALEN);
+ if (!is_valid_ether_addr(fcf->fcf_mac)) {
+ LIBFCOE_FIP_DBG(fip,
+ "Invalid MAC addr %pM in FIP adv\n",
+ fcf->fcf_mac);
+ return -EINVAL;
+ }
+ desc_mask &= ~BIT(FIP_DT_MAC);
+ break;
+ case FIP_DT_NAME:
+ if (dlen != sizeof(struct fip_wwn_desc))
+ goto len_err;
+ wwn = (struct fip_wwn_desc *)desc;
+ fcf->switch_name = get_unaligned_be64(&wwn->fd_wwn);
+ desc_mask &= ~BIT(FIP_DT_NAME);
+ break;
+ case FIP_DT_FAB:
+ if (dlen != sizeof(struct fip_fab_desc))
+ goto len_err;
+ fab = (struct fip_fab_desc *)desc;
+ fcf->fabric_name = get_unaligned_be64(&fab->fd_wwn);
+ fcf->vfid = ntohs(fab->fd_vfid);
+ fcf->fc_map = ntoh24(fab->fd_map);
+ desc_mask &= ~BIT(FIP_DT_FAB);
+ break;
+ case FIP_DT_FKA:
+ if (dlen != sizeof(struct fip_fka_desc))
+ goto len_err;
+ fka = (struct fip_fka_desc *)desc;
+ if (fka->fd_flags & FIP_FKA_ADV_D)
+ fcf->fd_flags = 1;
+ t = ntohl(fka->fd_fka_period);
+ if (t >= FCOE_CTLR_MIN_FKA)
+ fcf->fka_period = msecs_to_jiffies(t);
+ desc_mask &= ~BIT(FIP_DT_FKA);
+ break;
+ case FIP_DT_MAP_OUI:
+ case FIP_DT_FCOE_SIZE:
+ case FIP_DT_FLOGI:
+ case FIP_DT_FDISC:
+ case FIP_DT_LOGO:
+ case FIP_DT_ELP:
+ default:
+ LIBFCOE_FIP_DBG(fip, "unexpected descriptor type %x "
+ "in FIP adv\n", desc->fip_dtype);
+ /* standard says ignore unknown descriptors >= 128 */
+ if (desc->fip_dtype < FIP_DT_VENDOR_BASE)
+ return -EINVAL;
+ break;
+ }
+ desc = (struct fip_desc *)((char *)desc + dlen);
+ rlen -= dlen;
+ }
+ if (!fcf->fc_map || (fcf->fc_map & 0x10000))
+ return -EINVAL;
+ if (!fcf->switch_name)
+ return -EINVAL;
+ if (desc_mask) {
+ LIBFCOE_FIP_DBG(fip, "adv missing descriptors mask %x\n",
+ desc_mask);
+ return -EINVAL;
+ }
+ return 0;
+
+len_err:
+ LIBFCOE_FIP_DBG(fip, "FIP length error in descriptor type %x len %zu\n",
+ desc->fip_dtype, dlen);
+ return -EINVAL;
+}
+
+/**
+ * fcoe_ctlr_recv_adv() - Handle an incoming advertisement
+ * @fip: The FCoE controller receiving the advertisement
+ * @skb: The received FIP packet
+ */
+static void fcoe_ctlr_recv_adv(struct fcoe_ctlr *fip, struct sk_buff *skb)
+{
+ struct fcoe_fcf *fcf;
+ struct fcoe_fcf new;
+ unsigned long sol_tov = msecs_to_jiffies(FCOE_CTRL_SOL_TOV);
+ int first = 0;
+ int mtu_valid;
+ int found = 0;
+ int rc = 0;
+
+ if (fcoe_ctlr_parse_adv(fip, skb, &new))
+ return;
+
+ mutex_lock(&fip->ctlr_mutex);
+ first = list_empty(&fip->fcfs);
+ list_for_each_entry(fcf, &fip->fcfs, list) {
+ if (fcf->switch_name == new.switch_name &&
+ fcf->fabric_name == new.fabric_name &&
+ fcf->fc_map == new.fc_map &&
+ ether_addr_equal(fcf->fcf_mac, new.fcf_mac)) {
+ found = 1;
+ break;
+ }
+ }
+ if (!found) {
+ if (fip->fcf_count >= FCOE_CTLR_FCF_LIMIT)
+ goto out;
+
+ fcf = kmalloc(sizeof(*fcf), GFP_ATOMIC);
+ if (!fcf)
+ goto out;
+
+ memcpy(fcf, &new, sizeof(new));
+ fcf->fip = fip;
+ rc = fcoe_sysfs_fcf_add(fcf);
+ if (rc) {
+ printk(KERN_ERR "Failed to allocate sysfs instance "
+ "for FCF, fab %16.16llx mac %pM\n",
+ new.fabric_name, new.fcf_mac);
+ kfree(fcf);
+ goto out;
+ }
+ } else {
+ /*
+ * Update the FCF's keep-alive descriptor flags.
+ * Other flag changes from new advertisements are
+ * ignored after a solicited advertisement is
+ * received and the FCF is selectable (usable).
+ */
+ fcf->fd_flags = new.fd_flags;
+ if (!fcoe_ctlr_fcf_usable(fcf))
+ fcf->flags = new.flags;
+
+ if (fcf == fip->sel_fcf && !fcf->fd_flags) {
+ fip->ctlr_ka_time -= fcf->fka_period;
+ fip->ctlr_ka_time += new.fka_period;
+ if (time_before(fip->ctlr_ka_time, fip->timer.expires))
+ mod_timer(&fip->timer, fip->ctlr_ka_time);
+ }
+ fcf->fka_period = new.fka_period;
+ memcpy(fcf->fcf_mac, new.fcf_mac, ETH_ALEN);
+ }
+
+ mtu_valid = fcoe_ctlr_mtu_valid(fcf);
+ fcf->time = jiffies;
+ if (!found)
+ LIBFCOE_FIP_DBG(fip, "New FCF fab %16.16llx mac %pM\n",
+ fcf->fabric_name, fcf->fcf_mac);
+
+ /*
+ * If this advertisement is not solicited and our max receive size
+ * hasn't been verified, send a solicited advertisement.
+ */
+ if (!mtu_valid)
+ fcoe_ctlr_solicit(fip, fcf);
+
+ /*
+ * If its been a while since we did a solicit, and this is
+ * the first advertisement we've received, do a multicast
+ * solicitation to gather as many advertisements as we can
+ * before selection occurs.
+ */
+ if (first && time_after(jiffies, fip->sol_time + sol_tov))
+ fcoe_ctlr_solicit(fip, NULL);
+
+ /*
+ * Put this FCF at the head of the list for priority among equals.
+ * This helps in the case of an NPV switch which insists we use
+ * the FCF that answers multicast solicitations, not the others that
+ * are sending periodic multicast advertisements.
+ */
+ if (mtu_valid)
+ list_move(&fcf->list, &fip->fcfs);
+
+ /*
+ * If this is the first validated FCF, note the time and
+ * set a timer to trigger selection.
+ */
+ if (mtu_valid && !fip->sel_fcf && fcoe_ctlr_fcf_usable(fcf)) {
+ fip->sel_time = jiffies +
+ msecs_to_jiffies(FCOE_CTLR_START_DELAY);
+ if (!timer_pending(&fip->timer) ||
+ time_before(fip->sel_time, fip->timer.expires))
+ mod_timer(&fip->timer, fip->sel_time);
+ }
+
+out:
+ mutex_unlock(&fip->ctlr_mutex);
+}
+
+/**
+ * fcoe_ctlr_recv_els() - Handle an incoming FIP encapsulated ELS frame
+ * @fip: The FCoE controller which received the packet
+ * @skb: The received FIP packet
+ */
+static void fcoe_ctlr_recv_els(struct fcoe_ctlr *fip, struct sk_buff *skb)
+{
+ struct fc_lport *lport = fip->lp;
+ struct fip_header *fiph;
+ struct fc_frame *fp = (struct fc_frame *)skb;
+ struct fc_frame_header *fh = NULL;
+ struct fip_desc *desc;
+ struct fip_encaps *els;
+ struct fcoe_fcf *sel;
+ struct fc_stats *stats;
+ enum fip_desc_type els_dtype = 0;
+ u8 els_op;
+ u8 sub;
+ u8 granted_mac[ETH_ALEN] = { 0 };
+ size_t els_len = 0;
+ size_t rlen;
+ size_t dlen;
+ u32 desc_mask = 0;
+ u32 desc_cnt = 0;
+
+ fiph = (struct fip_header *)skb->data;
+ sub = fiph->fip_subcode;
+ if (sub != FIP_SC_REQ && sub != FIP_SC_REP)
+ goto drop;
+
+ rlen = ntohs(fiph->fip_dl_len) * 4;
+ if (rlen + sizeof(*fiph) > skb->len)
+ goto drop;
+
+ desc = (struct fip_desc *)(fiph + 1);
+ while (rlen > 0) {
+ desc_cnt++;
+ dlen = desc->fip_dlen * FIP_BPW;
+ if (dlen < sizeof(*desc) || dlen > rlen)
+ goto drop;
+ /* Drop ELS if there are duplicate critical descriptors */
+ if (desc->fip_dtype < 32) {
+ if ((desc->fip_dtype != FIP_DT_MAC) &&
+ (desc_mask & 1U << desc->fip_dtype)) {
+ LIBFCOE_FIP_DBG(fip, "Duplicate Critical "
+ "Descriptors in FIP ELS\n");
+ goto drop;
+ }
+ desc_mask |= (1 << desc->fip_dtype);
+ }
+ switch (desc->fip_dtype) {
+ case FIP_DT_MAC:
+ sel = fip->sel_fcf;
+ if (desc_cnt == 1) {
+ LIBFCOE_FIP_DBG(fip, "FIP descriptors "
+ "received out of order\n");
+ goto drop;
+ }
+ /*
+ * Some switch implementations send two MAC descriptors,
+ * with first MAC(granted_mac) being the FPMA, and the
+ * second one(fcoe_mac) is used as destination address
+ * for sending/receiving FCoE packets. FIP traffic is
+ * sent using fip_mac. For regular switches, both
+ * fip_mac and fcoe_mac would be the same.
+ */
+ if (desc_cnt == 2)
+ memcpy(granted_mac,
+ ((struct fip_mac_desc *)desc)->fd_mac,
+ ETH_ALEN);
+
+ if (dlen != sizeof(struct fip_mac_desc))
+ goto len_err;
+
+ if ((desc_cnt == 3) && (sel))
+ memcpy(sel->fcoe_mac,
+ ((struct fip_mac_desc *)desc)->fd_mac,
+ ETH_ALEN);
+ break;
+ case FIP_DT_FLOGI:
+ case FIP_DT_FDISC:
+ case FIP_DT_LOGO:
+ case FIP_DT_ELP:
+ if (desc_cnt != 1) {
+ LIBFCOE_FIP_DBG(fip, "FIP descriptors "
+ "received out of order\n");
+ goto drop;
+ }
+ if (fh)
+ goto drop;
+ if (dlen < sizeof(*els) + sizeof(*fh) + 1)
+ goto len_err;
+ els_len = dlen - sizeof(*els);
+ els = (struct fip_encaps *)desc;
+ fh = (struct fc_frame_header *)(els + 1);
+ els_dtype = desc->fip_dtype;
+ break;
+ default:
+ LIBFCOE_FIP_DBG(fip, "unexpected descriptor type %x "
+ "in FIP adv\n", desc->fip_dtype);
+ /* standard says ignore unknown descriptors >= 128 */
+ if (desc->fip_dtype < FIP_DT_VENDOR_BASE)
+ goto drop;
+ if (desc_cnt <= 2) {
+ LIBFCOE_FIP_DBG(fip, "FIP descriptors "
+ "received out of order\n");
+ goto drop;
+ }
+ break;
+ }
+ desc = (struct fip_desc *)((char *)desc + dlen);
+ rlen -= dlen;
+ }
+
+ if (!fh)
+ goto drop;
+ els_op = *(u8 *)(fh + 1);
+
+ if ((els_dtype == FIP_DT_FLOGI || els_dtype == FIP_DT_FDISC) &&
+ sub == FIP_SC_REP && fip->mode != FIP_MODE_VN2VN) {
+ if (els_op == ELS_LS_ACC) {
+ if (!is_valid_ether_addr(granted_mac)) {
+ LIBFCOE_FIP_DBG(fip,
+ "Invalid MAC address %pM in FIP ELS\n",
+ granted_mac);
+ goto drop;
+ }
+ memcpy(fr_cb(fp)->granted_mac, granted_mac, ETH_ALEN);
+
+ if (fip->flogi_oxid == ntohs(fh->fh_ox_id)) {
+ fip->flogi_oxid = FC_XID_UNKNOWN;
+ if (els_dtype == FIP_DT_FLOGI)
+ fcoe_ctlr_announce(fip);
+ }
+ } else if (els_dtype == FIP_DT_FLOGI &&
+ !fcoe_ctlr_flogi_retry(fip))
+ goto drop; /* retrying FLOGI so drop reject */
+ }
+
+ if ((desc_cnt == 0) || ((els_op != ELS_LS_RJT) &&
+ (!(1U << FIP_DT_MAC & desc_mask)))) {
+ LIBFCOE_FIP_DBG(fip, "Missing critical descriptors "
+ "in FIP ELS\n");
+ goto drop;
+ }
+
+ /*
+ * Convert skb into an fc_frame containing only the ELS.
+ */
+ skb_pull(skb, (u8 *)fh - skb->data);
+ skb_trim(skb, els_len);
+ fp = (struct fc_frame *)skb;
+ fc_frame_init(fp);
+ fr_sof(fp) = FC_SOF_I3;
+ fr_eof(fp) = FC_EOF_T;
+ fr_dev(fp) = lport;
+ fr_encaps(fp) = els_dtype;
+
+ stats = per_cpu_ptr(lport->stats, get_cpu());
+ stats->RxFrames++;
+ stats->RxWords += skb->len / FIP_BPW;
+ put_cpu();
+
+ fc_exch_recv(lport, fp);
+ return;
+
+len_err:
+ LIBFCOE_FIP_DBG(fip, "FIP length error in descriptor type %x len %zu\n",
+ desc->fip_dtype, dlen);
+drop:
+ kfree_skb(skb);
+}
+
+/**
+ * fcoe_ctlr_recv_els() - Handle an incoming link reset frame
+ * @fip: The FCoE controller that received the frame
+ * @fh: The received FIP header
+ *
+ * There may be multiple VN_Port descriptors.
+ * The overall length has already been checked.
+ */
+static void fcoe_ctlr_recv_clr_vlink(struct fcoe_ctlr *fip,
+ struct fip_header *fh)
+{
+ struct fip_desc *desc;
+ struct fip_mac_desc *mp;
+ struct fip_wwn_desc *wp;
+ struct fip_vn_desc *vp;
+ size_t rlen;
+ size_t dlen;
+ struct fcoe_fcf *fcf = fip->sel_fcf;
+ struct fc_lport *lport = fip->lp;
+ struct fc_lport *vn_port = NULL;
+ u32 desc_mask;
+ int num_vlink_desc;
+ int reset_phys_port = 0;
+ struct fip_vn_desc **vlink_desc_arr = NULL;
+
+ LIBFCOE_FIP_DBG(fip, "Clear Virtual Link received\n");
+
+ if (!fcf || !lport->port_id) {
+ /*
+ * We are yet to select best FCF, but we got CVL in the
+ * meantime. reset the ctlr and let it rediscover the FCF
+ */
+ mutex_lock(&fip->ctlr_mutex);
+ fcoe_ctlr_reset(fip);
+ mutex_unlock(&fip->ctlr_mutex);
+ return;
+ }
+
+ /*
+ * mask of required descriptors. Validating each one clears its bit.
+ */
+ desc_mask = BIT(FIP_DT_MAC) | BIT(FIP_DT_NAME);
+
+ rlen = ntohs(fh->fip_dl_len) * FIP_BPW;
+ desc = (struct fip_desc *)(fh + 1);
+
+ /*
+ * Actually need to subtract 'sizeof(*mp) - sizeof(*wp)' from 'rlen'
+ * before determining max Vx_Port descriptor but a buggy FCF could have
+ * omited either or both MAC Address and Name Identifier descriptors
+ */
+ num_vlink_desc = rlen / sizeof(*vp);
+ if (num_vlink_desc)
+ vlink_desc_arr = kmalloc(sizeof(vp) * num_vlink_desc,
+ GFP_ATOMIC);
+ if (!vlink_desc_arr)
+ return;
+ num_vlink_desc = 0;
+
+ while (rlen >= sizeof(*desc)) {
+ dlen = desc->fip_dlen * FIP_BPW;
+ if (dlen > rlen)
+ goto err;
+ /* Drop CVL if there are duplicate critical descriptors */
+ if ((desc->fip_dtype < 32) &&
+ (desc->fip_dtype != FIP_DT_VN_ID) &&
+ !(desc_mask & 1U << desc->fip_dtype)) {
+ LIBFCOE_FIP_DBG(fip, "Duplicate Critical "
+ "Descriptors in FIP CVL\n");
+ goto err;
+ }
+ switch (desc->fip_dtype) {
+ case FIP_DT_MAC:
+ mp = (struct fip_mac_desc *)desc;
+ if (dlen < sizeof(*mp))
+ goto err;
+ if (!ether_addr_equal(mp->fd_mac, fcf->fcf_mac))
+ goto err;
+ desc_mask &= ~BIT(FIP_DT_MAC);
+ break;
+ case FIP_DT_NAME:
+ wp = (struct fip_wwn_desc *)desc;
+ if (dlen < sizeof(*wp))
+ goto err;
+ if (get_unaligned_be64(&wp->fd_wwn) != fcf->switch_name)
+ goto err;
+ desc_mask &= ~BIT(FIP_DT_NAME);
+ break;
+ case FIP_DT_VN_ID:
+ vp = (struct fip_vn_desc *)desc;
+ if (dlen < sizeof(*vp))
+ goto err;
+ vlink_desc_arr[num_vlink_desc++] = vp;
+ vn_port = fc_vport_id_lookup(lport,
+ ntoh24(vp->fd_fc_id));
+ if (vn_port && (vn_port == lport)) {
+ mutex_lock(&fip->ctlr_mutex);
+ per_cpu_ptr(lport->stats,
+ get_cpu())->VLinkFailureCount++;
+ put_cpu();
+ fcoe_ctlr_reset(fip);
+ mutex_unlock(&fip->ctlr_mutex);
+ }
+ break;
+ default:
+ /* standard says ignore unknown descriptors >= 128 */
+ if (desc->fip_dtype < FIP_DT_VENDOR_BASE)
+ goto err;
+ break;
+ }
+ desc = (struct fip_desc *)((char *)desc + dlen);
+ rlen -= dlen;
+ }
+
+ /*
+ * reset only if all required descriptors were present and valid.
+ */
+ if (desc_mask)
+ LIBFCOE_FIP_DBG(fip, "missing descriptors mask %x\n",
+ desc_mask);
+ else if (!num_vlink_desc) {
+ LIBFCOE_FIP_DBG(fip, "CVL: no Vx_Port descriptor found\n");
+ /*
+ * No Vx_Port description. Clear all NPIV ports,
+ * followed by physical port
+ */
+ mutex_lock(&fip->ctlr_mutex);
+ per_cpu_ptr(lport->stats, get_cpu())->VLinkFailureCount++;
+ put_cpu();
+ fcoe_ctlr_reset(fip);
+ mutex_unlock(&fip->ctlr_mutex);
+
+ mutex_lock(&lport->lp_mutex);
+ list_for_each_entry(vn_port, &lport->vports, list)
+ fc_lport_reset(vn_port);
+ mutex_unlock(&lport->lp_mutex);
+
+ fc_lport_reset(fip->lp);
+ fcoe_ctlr_solicit(fip, NULL);
+ } else {
+ int i;
+
+ LIBFCOE_FIP_DBG(fip, "performing Clear Virtual Link\n");
+ for (i = 0; i < num_vlink_desc; i++) {
+ vp = vlink_desc_arr[i];
+ vn_port = fc_vport_id_lookup(lport,
+ ntoh24(vp->fd_fc_id));
+ if (!vn_port)
+ continue;
+
+ /*
+ * 'port_id' is already validated, check MAC address and
+ * wwpn
+ */
+ if (!ether_addr_equal(fip->get_src_addr(vn_port),
+ vp->fd_mac) ||
+ get_unaligned_be64(&vp->fd_wwpn) !=
+ vn_port->wwpn)
+ continue;
+
+ if (vn_port == lport)
+ /*
+ * Physical port, defer processing till all
+ * listed NPIV ports are cleared
+ */
+ reset_phys_port = 1;
+ else /* NPIV port */
+ fc_lport_reset(vn_port);
+ }
+
+ if (reset_phys_port) {
+ fc_lport_reset(fip->lp);
+ fcoe_ctlr_solicit(fip, NULL);
+ }
+ }
+
+err:
+ kfree(vlink_desc_arr);
+}
+
+/**
+ * fcoe_ctlr_recv() - Receive a FIP packet
+ * @fip: The FCoE controller that received the packet
+ * @skb: The received FIP packet
+ *
+ * This may be called from either NET_RX_SOFTIRQ or IRQ.
+ */
+void fcoe_ctlr_recv(struct fcoe_ctlr *fip, struct sk_buff *skb)
+{
+ skb = skb_share_check(skb, GFP_ATOMIC);
+ if (!skb)
+ return;
+ skb_queue_tail(&fip->fip_recv_list, skb);
+ schedule_work(&fip->recv_work);
+}
+EXPORT_SYMBOL(fcoe_ctlr_recv);
+
+/**
+ * fcoe_ctlr_recv_handler() - Receive a FIP frame
+ * @fip: The FCoE controller that received the frame
+ * @skb: The received FIP frame
+ *
+ * Returns non-zero if the frame is dropped.
+ */
+static int fcoe_ctlr_recv_handler(struct fcoe_ctlr *fip, struct sk_buff *skb)
+{
+ struct fip_header *fiph;
+ struct ethhdr *eh;
+ enum fip_state state;
+ u16 op;
+ u8 sub;
+
+ if (skb_linearize(skb))
+ goto drop;
+ if (skb->len < sizeof(*fiph))
+ goto drop;
+ eh = eth_hdr(skb);
+ if (fip->mode == FIP_MODE_VN2VN) {
+ if (!ether_addr_equal(eh->h_dest, fip->ctl_src_addr) &&
+ !ether_addr_equal(eh->h_dest, fcoe_all_vn2vn) &&
+ !ether_addr_equal(eh->h_dest, fcoe_all_p2p))
+ goto drop;
+ } else if (!ether_addr_equal(eh->h_dest, fip->ctl_src_addr) &&
+ !ether_addr_equal(eh->h_dest, fcoe_all_enode))
+ goto drop;
+ fiph = (struct fip_header *)skb->data;
+ op = ntohs(fiph->fip_op);
+ sub = fiph->fip_subcode;
+
+ if (FIP_VER_DECAPS(fiph->fip_ver) != FIP_VER)
+ goto drop;
+ if (ntohs(fiph->fip_dl_len) * FIP_BPW + sizeof(*fiph) > skb->len)
+ goto drop;
+
+ mutex_lock(&fip->ctlr_mutex);
+ state = fip->state;
+ if (state == FIP_ST_AUTO) {
+ fip->map_dest = 0;
+ fcoe_ctlr_set_state(fip, FIP_ST_ENABLED);
+ state = FIP_ST_ENABLED;
+ LIBFCOE_FIP_DBG(fip, "Using FIP mode\n");
+ }
+ mutex_unlock(&fip->ctlr_mutex);
+
+ if (fip->mode == FIP_MODE_VN2VN && op == FIP_OP_VN2VN)
+ return fcoe_ctlr_vn_recv(fip, skb);
+
+ if (state != FIP_ST_ENABLED && state != FIP_ST_VNMP_UP &&
+ state != FIP_ST_VNMP_CLAIM)
+ goto drop;
+
+ if (op == FIP_OP_LS) {
+ fcoe_ctlr_recv_els(fip, skb); /* consumes skb */
+ return 0;
+ }
+
+ if (state != FIP_ST_ENABLED)
+ goto drop;
+
+ if (op == FIP_OP_DISC && sub == FIP_SC_ADV)
+ fcoe_ctlr_recv_adv(fip, skb);
+ else if (op == FIP_OP_CTRL && sub == FIP_SC_CLR_VLINK)
+ fcoe_ctlr_recv_clr_vlink(fip, fiph);
+ kfree_skb(skb);
+ return 0;
+drop:
+ kfree_skb(skb);
+ return -1;
+}
+
+/**
+ * fcoe_ctlr_select() - Select the best FCF (if possible)
+ * @fip: The FCoE controller
+ *
+ * Returns the selected FCF, or NULL if none are usable.
+ *
+ * If there are conflicting advertisements, no FCF can be chosen.
+ *
+ * If there is already a selected FCF, this will choose a better one or
+ * an equivalent one that hasn't already been sent a FLOGI.
+ *
+ * Called with lock held.
+ */
+static struct fcoe_fcf *fcoe_ctlr_select(struct fcoe_ctlr *fip)
+{
+ struct fcoe_fcf *fcf;
+ struct fcoe_fcf *best = fip->sel_fcf;
+
+ list_for_each_entry(fcf, &fip->fcfs, list) {
+ LIBFCOE_FIP_DBG(fip, "consider FCF fab %16.16llx "
+ "VFID %d mac %pM map %x val %d "
+ "sent %u pri %u\n",
+ fcf->fabric_name, fcf->vfid, fcf->fcf_mac,
+ fcf->fc_map, fcoe_ctlr_mtu_valid(fcf),
+ fcf->flogi_sent, fcf->pri);
+ if (!fcoe_ctlr_fcf_usable(fcf)) {
+ LIBFCOE_FIP_DBG(fip, "FCF for fab %16.16llx "
+ "map %x %svalid %savailable\n",
+ fcf->fabric_name, fcf->fc_map,
+ (fcf->flags & FIP_FL_SOL) ? "" : "in",
+ (fcf->flags & FIP_FL_AVAIL) ?
+ "" : "un");
+ continue;
+ }
+ if (!best || fcf->pri < best->pri || best->flogi_sent)
+ best = fcf;
+ if (fcf->fabric_name != best->fabric_name ||
+ fcf->vfid != best->vfid ||
+ fcf->fc_map != best->fc_map) {
+ LIBFCOE_FIP_DBG(fip, "Conflicting fabric, VFID, "
+ "or FC-MAP\n");
+ return NULL;
+ }
+ }
+ fip->sel_fcf = best;
+ if (best) {
+ LIBFCOE_FIP_DBG(fip, "using FCF mac %pM\n", best->fcf_mac);
+ fip->port_ka_time = jiffies +
+ msecs_to_jiffies(FIP_VN_KA_PERIOD);
+ fip->ctlr_ka_time = jiffies + best->fka_period;
+ if (time_before(fip->ctlr_ka_time, fip->timer.expires))
+ mod_timer(&fip->timer, fip->ctlr_ka_time);
+ }
+ return best;
+}
+
+/**
+ * fcoe_ctlr_flogi_send_locked() - send FIP-encapsulated FLOGI to current FCF
+ * @fip: The FCoE controller
+ *
+ * Returns non-zero error if it could not be sent.
+ *
+ * Called with ctlr_mutex and ctlr_lock held.
+ * Caller must verify that fip->sel_fcf is not NULL.
+ */
+static int fcoe_ctlr_flogi_send_locked(struct fcoe_ctlr *fip)
+{
+ struct sk_buff *skb;
+ struct sk_buff *skb_orig;
+ struct fc_frame_header *fh;
+ int error;
+
+ skb_orig = fip->flogi_req;
+ if (!skb_orig)
+ return -EINVAL;
+
+ /*
+ * Clone and send the FLOGI request. If clone fails, use original.
+ */
+ skb = skb_clone(skb_orig, GFP_ATOMIC);
+ if (!skb) {
+ skb = skb_orig;
+ fip->flogi_req = NULL;
+ }
+ fh = (struct fc_frame_header *)skb->data;
+ error = fcoe_ctlr_encaps(fip, fip->lp, FIP_DT_FLOGI, skb,
+ ntoh24(fh->fh_d_id));
+ if (error) {
+ kfree_skb(skb);
+ return error;
+ }
+ fip->send(fip, skb);
+ fip->sel_fcf->flogi_sent = 1;
+ return 0;
+}
+
+/**
+ * fcoe_ctlr_flogi_retry() - resend FLOGI request to a new FCF if possible
+ * @fip: The FCoE controller
+ *
+ * Returns non-zero error code if there's no FLOGI request to retry or
+ * no alternate FCF available.
+ */
+static int fcoe_ctlr_flogi_retry(struct fcoe_ctlr *fip)
+{
+ struct fcoe_fcf *fcf;
+ int error;
+
+ mutex_lock(&fip->ctlr_mutex);
+ spin_lock_bh(&fip->ctlr_lock);
+ LIBFCOE_FIP_DBG(fip, "re-sending FLOGI - reselect\n");
+ fcf = fcoe_ctlr_select(fip);
+ if (!fcf || fcf->flogi_sent) {
+ kfree_skb(fip->flogi_req);
+ fip->flogi_req = NULL;
+ error = -ENOENT;
+ } else {
+ fcoe_ctlr_solicit(fip, NULL);
+ error = fcoe_ctlr_flogi_send_locked(fip);
+ }
+ spin_unlock_bh(&fip->ctlr_lock);
+ mutex_unlock(&fip->ctlr_mutex);
+ return error;
+}
+
+
+/**
+ * fcoe_ctlr_flogi_send() - Handle sending of FIP FLOGI.
+ * @fip: The FCoE controller that timed out
+ *
+ * Done here because fcoe_ctlr_els_send() can't get mutex.
+ *
+ * Called with ctlr_mutex held. The caller must not hold ctlr_lock.
+ */
+static void fcoe_ctlr_flogi_send(struct fcoe_ctlr *fip)
+{
+ struct fcoe_fcf *fcf;
+
+ spin_lock_bh(&fip->ctlr_lock);
+ fcf = fip->sel_fcf;
+ if (!fcf || !fip->flogi_req_send)
+ goto unlock;
+
+ LIBFCOE_FIP_DBG(fip, "sending FLOGI\n");
+
+ /*
+ * If this FLOGI is being sent due to a timeout retry
+ * to the same FCF as before, select a different FCF if possible.
+ */
+ if (fcf->flogi_sent) {
+ LIBFCOE_FIP_DBG(fip, "sending FLOGI - reselect\n");
+ fcf = fcoe_ctlr_select(fip);
+ if (!fcf || fcf->flogi_sent) {
+ LIBFCOE_FIP_DBG(fip, "sending FLOGI - clearing\n");
+ list_for_each_entry(fcf, &fip->fcfs, list)
+ fcf->flogi_sent = 0;
+ fcf = fcoe_ctlr_select(fip);
+ }
+ }
+ if (fcf) {
+ fcoe_ctlr_flogi_send_locked(fip);
+ fip->flogi_req_send = 0;
+ } else /* XXX */
+ LIBFCOE_FIP_DBG(fip, "No FCF selected - defer send\n");
+unlock:
+ spin_unlock_bh(&fip->ctlr_lock);
+}
+
+/**
+ * fcoe_ctlr_timeout() - FIP timeout handler
+ * @arg: The FCoE controller that timed out
+ */
+static void fcoe_ctlr_timeout(unsigned long arg)
+{
+ struct fcoe_ctlr *fip = (struct fcoe_ctlr *)arg;
+
+ schedule_work(&fip->timer_work);
+}
+
+/**
+ * fcoe_ctlr_timer_work() - Worker thread function for timer work
+ * @work: Handle to a FCoE controller
+ *
+ * Ages FCFs. Triggers FCF selection if possible.
+ * Sends keep-alives and resets.
+ */
+static void fcoe_ctlr_timer_work(struct work_struct *work)
+{
+ struct fcoe_ctlr *fip;
+ struct fc_lport *vport;
+ u8 *mac;
+ u8 reset = 0;
+ u8 send_ctlr_ka = 0;
+ u8 send_port_ka = 0;
+ struct fcoe_fcf *sel;
+ struct fcoe_fcf *fcf;
+ unsigned long next_timer;
+
+ fip = container_of(work, struct fcoe_ctlr, timer_work);
+ if (fip->mode == FIP_MODE_VN2VN)
+ return fcoe_ctlr_vn_timeout(fip);
+ mutex_lock(&fip->ctlr_mutex);
+ if (fip->state == FIP_ST_DISABLED) {
+ mutex_unlock(&fip->ctlr_mutex);
+ return;
+ }
+
+ fcf = fip->sel_fcf;
+ next_timer = fcoe_ctlr_age_fcfs(fip);
+
+ sel = fip->sel_fcf;
+ if (!sel && fip->sel_time) {
+ if (time_after_eq(jiffies, fip->sel_time)) {
+ sel = fcoe_ctlr_select(fip);
+ fip->sel_time = 0;
+ } else if (time_after(next_timer, fip->sel_time))
+ next_timer = fip->sel_time;
+ }
+
+ if (sel && fip->flogi_req_send)
+ fcoe_ctlr_flogi_send(fip);
+ else if (!sel && fcf)
+ reset = 1;
+
+ if (sel && !sel->fd_flags) {
+ if (time_after_eq(jiffies, fip->ctlr_ka_time)) {
+ fip->ctlr_ka_time = jiffies + sel->fka_period;
+ send_ctlr_ka = 1;
+ }
+ if (time_after(next_timer, fip->ctlr_ka_time))
+ next_timer = fip->ctlr_ka_time;
+
+ if (time_after_eq(jiffies, fip->port_ka_time)) {
+ fip->port_ka_time = jiffies +
+ msecs_to_jiffies(FIP_VN_KA_PERIOD);
+ send_port_ka = 1;
+ }
+ if (time_after(next_timer, fip->port_ka_time))
+ next_timer = fip->port_ka_time;
+ }
+ if (!list_empty(&fip->fcfs))
+ mod_timer(&fip->timer, next_timer);
+ mutex_unlock(&fip->ctlr_mutex);
+
+ if (reset) {
+ fc_lport_reset(fip->lp);
+ /* restart things with a solicitation */
+ fcoe_ctlr_solicit(fip, NULL);
+ }
+
+ if (send_ctlr_ka)
+ fcoe_ctlr_send_keep_alive(fip, NULL, 0, fip->ctl_src_addr);
+
+ if (send_port_ka) {
+ mutex_lock(&fip->lp->lp_mutex);
+ mac = fip->get_src_addr(fip->lp);
+ fcoe_ctlr_send_keep_alive(fip, fip->lp, 1, mac);
+ list_for_each_entry(vport, &fip->lp->vports, list) {
+ mac = fip->get_src_addr(vport);
+ fcoe_ctlr_send_keep_alive(fip, vport, 1, mac);
+ }
+ mutex_unlock(&fip->lp->lp_mutex);
+ }
+}
+
+/**
+ * fcoe_ctlr_recv_work() - Worker thread function for receiving FIP frames
+ * @recv_work: Handle to a FCoE controller
+ */
+static void fcoe_ctlr_recv_work(struct work_struct *recv_work)
+{
+ struct fcoe_ctlr *fip;
+ struct sk_buff *skb;
+
+ fip = container_of(recv_work, struct fcoe_ctlr, recv_work);
+ while ((skb = skb_dequeue(&fip->fip_recv_list)))
+ fcoe_ctlr_recv_handler(fip, skb);
+}
+
+/**
+ * fcoe_ctlr_recv_flogi() - Snoop pre-FIP receipt of FLOGI response
+ * @fip: The FCoE controller
+ * @fp: The FC frame to snoop
+ *
+ * Snoop potential response to FLOGI or even incoming FLOGI.
+ *
+ * The caller has checked that we are waiting for login as indicated
+ * by fip->flogi_oxid != FC_XID_UNKNOWN.
+ *
+ * The caller is responsible for freeing the frame.
+ * Fill in the granted_mac address.
+ *
+ * Return non-zero if the frame should not be delivered to libfc.
+ */
+int fcoe_ctlr_recv_flogi(struct fcoe_ctlr *fip, struct fc_lport *lport,
+ struct fc_frame *fp)
+{
+ struct fc_frame_header *fh;
+ u8 op;
+ u8 *sa;
+
+ sa = eth_hdr(&fp->skb)->h_source;
+ fh = fc_frame_header_get(fp);
+ if (fh->fh_type != FC_TYPE_ELS)
+ return 0;
+
+ op = fc_frame_payload_op(fp);
+ if (op == ELS_LS_ACC && fh->fh_r_ctl == FC_RCTL_ELS_REP &&
+ fip->flogi_oxid == ntohs(fh->fh_ox_id)) {
+
+ mutex_lock(&fip->ctlr_mutex);
+ if (fip->state != FIP_ST_AUTO && fip->state != FIP_ST_NON_FIP) {
+ mutex_unlock(&fip->ctlr_mutex);
+ return -EINVAL;
+ }
+ fcoe_ctlr_set_state(fip, FIP_ST_NON_FIP);
+ LIBFCOE_FIP_DBG(fip,
+ "received FLOGI LS_ACC using non-FIP mode\n");
+
+ /*
+ * FLOGI accepted.
+ * If the src mac addr is FC_OUI-based, then we mark the
+ * address_mode flag to use FC_OUI-based Ethernet DA.
+ * Otherwise we use the FCoE gateway addr
+ */
+ if (ether_addr_equal(sa, (u8[6])FC_FCOE_FLOGI_MAC)) {
+ fcoe_ctlr_map_dest(fip);
+ } else {
+ memcpy(fip->dest_addr, sa, ETH_ALEN);
+ fip->map_dest = 0;
+ }
+ fip->flogi_oxid = FC_XID_UNKNOWN;
+ mutex_unlock(&fip->ctlr_mutex);
+ fc_fcoe_set_mac(fr_cb(fp)->granted_mac, fh->fh_d_id);
+ } else if (op == ELS_FLOGI && fh->fh_r_ctl == FC_RCTL_ELS_REQ && sa) {
+ /*
+ * Save source MAC for point-to-point responses.
+ */
+ mutex_lock(&fip->ctlr_mutex);
+ if (fip->state == FIP_ST_AUTO || fip->state == FIP_ST_NON_FIP) {
+ memcpy(fip->dest_addr, sa, ETH_ALEN);
+ fip->map_dest = 0;
+ if (fip->state == FIP_ST_AUTO)
+ LIBFCOE_FIP_DBG(fip, "received non-FIP FLOGI. "
+ "Setting non-FIP mode\n");
+ fcoe_ctlr_set_state(fip, FIP_ST_NON_FIP);
+ }
+ mutex_unlock(&fip->ctlr_mutex);
+ }
+ return 0;
+}
+EXPORT_SYMBOL(fcoe_ctlr_recv_flogi);
+
+/**
+ * fcoe_wwn_from_mac() - Converts a 48-bit IEEE MAC address to a 64-bit FC WWN
+ * @mac: The MAC address to convert
+ * @scheme: The scheme to use when converting
+ * @port: The port indicator for converting
+ *
+ * Returns: u64 fc world wide name
+ */
+u64 fcoe_wwn_from_mac(unsigned char mac[MAX_ADDR_LEN],
+ unsigned int scheme, unsigned int port)
+{
+ u64 wwn;
+ u64 host_mac;
+
+ /* The MAC is in NO, so flip only the low 48 bits */
+ host_mac = ((u64) mac[0] << 40) |
+ ((u64) mac[1] << 32) |
+ ((u64) mac[2] << 24) |
+ ((u64) mac[3] << 16) |
+ ((u64) mac[4] << 8) |
+ (u64) mac[5];
+
+ WARN_ON(host_mac >= (1ULL << 48));
+ wwn = host_mac | ((u64) scheme << 60);
+ switch (scheme) {
+ case 1:
+ WARN_ON(port != 0);
+ break;
+ case 2:
+ WARN_ON(port >= 0xfff);
+ wwn |= (u64) port << 48;
+ break;
+ default:
+ WARN_ON(1);
+ break;
+ }
+
+ return wwn;
+}
+EXPORT_SYMBOL_GPL(fcoe_wwn_from_mac);
+
+/**
+ * fcoe_ctlr_rport() - return the fcoe_rport for a given fc_rport_priv
+ * @rdata: libfc remote port
+ */
+static inline struct fcoe_rport *fcoe_ctlr_rport(struct fc_rport_priv *rdata)
+{
+ return (struct fcoe_rport *)(rdata + 1);
+}
+
+/**
+ * fcoe_ctlr_vn_send() - Send a FIP VN2VN Probe Request or Reply.
+ * @fip: The FCoE controller
+ * @sub: sub-opcode for probe request, reply, or advertisement.
+ * @dest: The destination Ethernet MAC address
+ * @min_len: minimum size of the Ethernet payload to be sent
+ */
+static void fcoe_ctlr_vn_send(struct fcoe_ctlr *fip,
+ enum fip_vn2vn_subcode sub,
+ const u8 *dest, size_t min_len)
+{
+ struct sk_buff *skb;
+ struct fip_frame {
+ struct ethhdr eth;
+ struct fip_header fip;
+ struct fip_mac_desc mac;
+ struct fip_wwn_desc wwnn;
+ struct fip_vn_desc vn;
+ } __packed * frame;
+ struct fip_fc4_feat *ff;
+ struct fip_size_desc *size;
+ u32 fcp_feat;
+ size_t len;
+ size_t dlen;
+
+ len = sizeof(*frame);
+ dlen = 0;
+ if (sub == FIP_SC_VN_CLAIM_NOTIFY || sub == FIP_SC_VN_CLAIM_REP) {
+ dlen = sizeof(struct fip_fc4_feat) +
+ sizeof(struct fip_size_desc);
+ len += dlen;
+ }
+ dlen += sizeof(frame->mac) + sizeof(frame->wwnn) + sizeof(frame->vn);
+ len = max(len, min_len + sizeof(struct ethhdr));
+
+ skb = dev_alloc_skb(len);
+ if (!skb)
+ return;
+
+ frame = (struct fip_frame *)skb->data;
+ memset(frame, 0, len);
+ memcpy(frame->eth.h_dest, dest, ETH_ALEN);
+
+ if (sub == FIP_SC_VN_BEACON) {
+ hton24(frame->eth.h_source, FIP_VN_FC_MAP);
+ hton24(frame->eth.h_source + 3, fip->port_id);
+ } else {
+ memcpy(frame->eth.h_source, fip->ctl_src_addr, ETH_ALEN);
+ }
+ frame->eth.h_proto = htons(ETH_P_FIP);
+
+ frame->fip.fip_ver = FIP_VER_ENCAPS(FIP_VER);
+ frame->fip.fip_op = htons(FIP_OP_VN2VN);
+ frame->fip.fip_subcode = sub;
+ frame->fip.fip_dl_len = htons(dlen / FIP_BPW);
+
+ frame->mac.fd_desc.fip_dtype = FIP_DT_MAC;
+ frame->mac.fd_desc.fip_dlen = sizeof(frame->mac) / FIP_BPW;
+ memcpy(frame->mac.fd_mac, fip->ctl_src_addr, ETH_ALEN);
+
+ frame->wwnn.fd_desc.fip_dtype = FIP_DT_NAME;
+ frame->wwnn.fd_desc.fip_dlen = sizeof(frame->wwnn) / FIP_BPW;
+ put_unaligned_be64(fip->lp->wwnn, &frame->wwnn.fd_wwn);
+
+ frame->vn.fd_desc.fip_dtype = FIP_DT_VN_ID;
+ frame->vn.fd_desc.fip_dlen = sizeof(frame->vn) / FIP_BPW;
+ hton24(frame->vn.fd_mac, FIP_VN_FC_MAP);
+ hton24(frame->vn.fd_mac + 3, fip->port_id);
+ hton24(frame->vn.fd_fc_id, fip->port_id);
+ put_unaligned_be64(fip->lp->wwpn, &frame->vn.fd_wwpn);
+
+ /*
+ * For claims, add FC-4 features.
+ * TBD: Add interface to get fc-4 types and features from libfc.
+ */
+ if (sub == FIP_SC_VN_CLAIM_NOTIFY || sub == FIP_SC_VN_CLAIM_REP) {
+ ff = (struct fip_fc4_feat *)(frame + 1);
+ ff->fd_desc.fip_dtype = FIP_DT_FC4F;
+ ff->fd_desc.fip_dlen = sizeof(*ff) / FIP_BPW;
+ ff->fd_fts = fip->lp->fcts;
+
+ fcp_feat = 0;
+ if (fip->lp->service_params & FCP_SPPF_INIT_FCN)
+ fcp_feat |= FCP_FEAT_INIT;
+ if (fip->lp->service_params & FCP_SPPF_TARG_FCN)
+ fcp_feat |= FCP_FEAT_TARG;
+ fcp_feat <<= (FC_TYPE_FCP * 4) % 32;
+ ff->fd_ff.fd_feat[FC_TYPE_FCP * 4 / 32] = htonl(fcp_feat);
+
+ size = (struct fip_size_desc *)(ff + 1);
+ size->fd_desc.fip_dtype = FIP_DT_FCOE_SIZE;
+ size->fd_desc.fip_dlen = sizeof(*size) / FIP_BPW;
+ size->fd_size = htons(fcoe_ctlr_fcoe_size(fip));
+ }
+
+ skb_put(skb, len);
+ skb->protocol = htons(ETH_P_FIP);
+ skb->priority = fip->priority;
+ skb_reset_mac_header(skb);
+ skb_reset_network_header(skb);
+
+ fip->send(fip, skb);
+}
+
+/**
+ * fcoe_ctlr_vn_rport_callback - Event handler for rport events.
+ * @lport: The lport which is receiving the event
+ * @rdata: remote port private data
+ * @event: The event that occurred
+ *
+ * Locking Note: The rport lock must not be held when calling this function.
+ */
+static void fcoe_ctlr_vn_rport_callback(struct fc_lport *lport,
+ struct fc_rport_priv *rdata,
+ enum fc_rport_event event)
+{
+ struct fcoe_ctlr *fip = lport->disc.priv;
+ struct fcoe_rport *frport = fcoe_ctlr_rport(rdata);
+
+ LIBFCOE_FIP_DBG(fip, "vn_rport_callback %x event %d\n",
+ rdata->ids.port_id, event);
+
+ mutex_lock(&fip->ctlr_mutex);
+ switch (event) {
+ case RPORT_EV_READY:
+ frport->login_count = 0;
+ break;
+ case RPORT_EV_LOGO:
+ case RPORT_EV_FAILED:
+ case RPORT_EV_STOP:
+ frport->login_count++;
+ if (frport->login_count > FCOE_CTLR_VN2VN_LOGIN_LIMIT) {
+ LIBFCOE_FIP_DBG(fip,
+ "rport FLOGI limited port_id %6.6x\n",
+ rdata->ids.port_id);
+ lport->tt.rport_logoff(rdata);
+ }
+ break;
+ default:
+ break;
+ }
+ mutex_unlock(&fip->ctlr_mutex);
+}
+
+static struct fc_rport_operations fcoe_ctlr_vn_rport_ops = {
+ .event_callback = fcoe_ctlr_vn_rport_callback,
+};
+
+/**
+ * fcoe_ctlr_disc_stop_locked() - stop discovery in VN2VN mode
+ * @fip: The FCoE controller
+ *
+ * Called with ctlr_mutex held.
+ */
+static void fcoe_ctlr_disc_stop_locked(struct fc_lport *lport)
+{
+ struct fc_rport_priv *rdata;
+
+ mutex_lock(&lport->disc.disc_mutex);
+ list_for_each_entry_rcu(rdata, &lport->disc.rports, peers)
+ lport->tt.rport_logoff(rdata);
+ lport->disc.disc_callback = NULL;
+ mutex_unlock(&lport->disc.disc_mutex);
+}
+
+/**
+ * fcoe_ctlr_disc_stop() - stop discovery in VN2VN mode
+ * @fip: The FCoE controller
+ *
+ * Called through the local port template for discovery.
+ * Called without the ctlr_mutex held.
+ */
+static void fcoe_ctlr_disc_stop(struct fc_lport *lport)
+{
+ struct fcoe_ctlr *fip = lport->disc.priv;
+
+ mutex_lock(&fip->ctlr_mutex);
+ fcoe_ctlr_disc_stop_locked(lport);
+ mutex_unlock(&fip->ctlr_mutex);
+}
+
+/**
+ * fcoe_ctlr_disc_stop_final() - stop discovery for shutdown in VN2VN mode
+ * @fip: The FCoE controller
+ *
+ * Called through the local port template for discovery.
+ * Called without the ctlr_mutex held.
+ */
+static void fcoe_ctlr_disc_stop_final(struct fc_lport *lport)
+{
+ fcoe_ctlr_disc_stop(lport);
+ lport->tt.rport_flush_queue();
+ synchronize_rcu();
+}
+
+/**
+ * fcoe_ctlr_vn_restart() - VN2VN probe restart with new port_id
+ * @fip: The FCoE controller
+ *
+ * Called with fcoe_ctlr lock held.
+ */
+static void fcoe_ctlr_vn_restart(struct fcoe_ctlr *fip)
+{
+ unsigned long wait;
+ u32 port_id;
+
+ fcoe_ctlr_disc_stop_locked(fip->lp);
+
+ /*
+ * Get proposed port ID.
+ * If this is the first try after link up, use any previous port_id.
+ * If there was none, use the low bits of the port_name.
+ * On subsequent tries, get the next random one.
+ * Don't use reserved IDs, use another non-zero value, just as random.
+ */
+ port_id = fip->port_id;
+ if (fip->probe_tries)
+ port_id = prandom_u32_state(&fip->rnd_state) & 0xffff;
+ else if (!port_id)
+ port_id = fip->lp->wwpn & 0xffff;
+ if (!port_id || port_id == 0xffff)
+ port_id = 1;
+ fip->port_id = port_id;
+
+ if (fip->probe_tries < FIP_VN_RLIM_COUNT) {
+ fip->probe_tries++;
+ wait = prandom_u32() % FIP_VN_PROBE_WAIT;
+ } else
+ wait = FIP_VN_RLIM_INT;
+ mod_timer(&fip->timer, jiffies + msecs_to_jiffies(wait));
+ fcoe_ctlr_set_state(fip, FIP_ST_VNMP_START);
+}
+
+/**
+ * fcoe_ctlr_vn_start() - Start in VN2VN mode
+ * @fip: The FCoE controller
+ *
+ * Called with fcoe_ctlr lock held.
+ */
+static void fcoe_ctlr_vn_start(struct fcoe_ctlr *fip)
+{
+ fip->probe_tries = 0;
+ prandom_seed_state(&fip->rnd_state, fip->lp->wwpn);
+ fcoe_ctlr_vn_restart(fip);
+}
+
+/**
+ * fcoe_ctlr_vn_parse - parse probe request or response
+ * @fip: The FCoE controller
+ * @skb: incoming packet
+ * @rdata: buffer for resulting parsed VN entry plus fcoe_rport
+ *
+ * Returns non-zero error number on error.
+ * Does not consume the packet.
+ */
+static int fcoe_ctlr_vn_parse(struct fcoe_ctlr *fip,
+ struct sk_buff *skb,
+ struct fc_rport_priv *rdata)
+{
+ struct fip_header *fiph;
+ struct fip_desc *desc = NULL;
+ struct fip_mac_desc *macd = NULL;
+ struct fip_wwn_desc *wwn = NULL;
+ struct fip_vn_desc *vn = NULL;
+ struct fip_size_desc *size = NULL;
+ struct fcoe_rport *frport;
+ size_t rlen;
+ size_t dlen;
+ u32 desc_mask = 0;
+ u32 dtype;
+ u8 sub;
+
+ memset(rdata, 0, sizeof(*rdata) + sizeof(*frport));
+ frport = fcoe_ctlr_rport(rdata);
+
+ fiph = (struct fip_header *)skb->data;
+ frport->flags = ntohs(fiph->fip_flags);
+
+ sub = fiph->fip_subcode;
+ switch (sub) {
+ case FIP_SC_VN_PROBE_REQ:
+ case FIP_SC_VN_PROBE_REP:
+ case FIP_SC_VN_BEACON:
+ desc_mask = BIT(FIP_DT_MAC) | BIT(FIP_DT_NAME) |
+ BIT(FIP_DT_VN_ID);
+ break;
+ case FIP_SC_VN_CLAIM_NOTIFY:
+ case FIP_SC_VN_CLAIM_REP:
+ desc_mask = BIT(FIP_DT_MAC) | BIT(FIP_DT_NAME) |
+ BIT(FIP_DT_VN_ID) | BIT(FIP_DT_FC4F) |
+ BIT(FIP_DT_FCOE_SIZE);
+ break;
+ default:
+ LIBFCOE_FIP_DBG(fip, "vn_parse unknown subcode %u\n", sub);
+ return -EINVAL;
+ }
+
+ rlen = ntohs(fiph->fip_dl_len) * 4;
+ if (rlen + sizeof(*fiph) > skb->len)
+ return -EINVAL;
+
+ desc = (struct fip_desc *)(fiph + 1);
+ while (rlen > 0) {
+ dlen = desc->fip_dlen * FIP_BPW;
+ if (dlen < sizeof(*desc) || dlen > rlen)
+ return -EINVAL;
+
+ dtype = desc->fip_dtype;
+ if (dtype < 32) {
+ if (!(desc_mask & BIT(dtype))) {
+ LIBFCOE_FIP_DBG(fip,
+ "unexpected or duplicated desc "
+ "desc type %u in "
+ "FIP VN2VN subtype %u\n",
+ dtype, sub);
+ return -EINVAL;
+ }
+ desc_mask &= ~BIT(dtype);
+ }
+
+ switch (dtype) {
+ case FIP_DT_MAC:
+ if (dlen != sizeof(struct fip_mac_desc))
+ goto len_err;
+ macd = (struct fip_mac_desc *)desc;
+ if (!is_valid_ether_addr(macd->fd_mac)) {
+ LIBFCOE_FIP_DBG(fip,
+ "Invalid MAC addr %pM in FIP VN2VN\n",
+ macd->fd_mac);
+ return -EINVAL;
+ }
+ memcpy(frport->enode_mac, macd->fd_mac, ETH_ALEN);
+ break;
+ case FIP_DT_NAME:
+ if (dlen != sizeof(struct fip_wwn_desc))
+ goto len_err;
+ wwn = (struct fip_wwn_desc *)desc;
+ rdata->ids.node_name = get_unaligned_be64(&wwn->fd_wwn);
+ break;
+ case FIP_DT_VN_ID:
+ if (dlen != sizeof(struct fip_vn_desc))
+ goto len_err;
+ vn = (struct fip_vn_desc *)desc;
+ memcpy(frport->vn_mac, vn->fd_mac, ETH_ALEN);
+ rdata->ids.port_id = ntoh24(vn->fd_fc_id);
+ rdata->ids.port_name = get_unaligned_be64(&vn->fd_wwpn);
+ break;
+ case FIP_DT_FC4F:
+ if (dlen != sizeof(struct fip_fc4_feat))
+ goto len_err;
+ break;
+ case FIP_DT_FCOE_SIZE:
+ if (dlen != sizeof(struct fip_size_desc))
+ goto len_err;
+ size = (struct fip_size_desc *)desc;
+ frport->fcoe_len = ntohs(size->fd_size);
+ break;
+ default:
+ LIBFCOE_FIP_DBG(fip, "unexpected descriptor type %x "
+ "in FIP probe\n", dtype);
+ /* standard says ignore unknown descriptors >= 128 */
+ if (dtype < FIP_DT_VENDOR_BASE)
+ return -EINVAL;
+ break;
+ }
+ desc = (struct fip_desc *)((char *)desc + dlen);
+ rlen -= dlen;
+ }
+ return 0;
+
+len_err:
+ LIBFCOE_FIP_DBG(fip, "FIP length error in descriptor type %x len %zu\n",
+ dtype, dlen);
+ return -EINVAL;
+}
+
+/**
+ * fcoe_ctlr_vn_send_claim() - send multicast FIP VN2VN Claim Notification.
+ * @fip: The FCoE controller
+ *
+ * Called with ctlr_mutex held.
+ */
+static void fcoe_ctlr_vn_send_claim(struct fcoe_ctlr *fip)
+{
+ fcoe_ctlr_vn_send(fip, FIP_SC_VN_CLAIM_NOTIFY, fcoe_all_vn2vn, 0);
+ fip->sol_time = jiffies;
+}
+
+/**
+ * fcoe_ctlr_vn_probe_req() - handle incoming VN2VN probe request.
+ * @fip: The FCoE controller
+ * @rdata: parsed remote port with frport from the probe request
+ *
+ * Called with ctlr_mutex held.
+ */
+static void fcoe_ctlr_vn_probe_req(struct fcoe_ctlr *fip,
+ struct fc_rport_priv *rdata)
+{
+ struct fcoe_rport *frport = fcoe_ctlr_rport(rdata);
+
+ if (rdata->ids.port_id != fip->port_id)
+ return;
+
+ switch (fip->state) {
+ case FIP_ST_VNMP_CLAIM:
+ case FIP_ST_VNMP_UP:
+ fcoe_ctlr_vn_send(fip, FIP_SC_VN_PROBE_REP,
+ frport->enode_mac, 0);
+ break;
+ case FIP_ST_VNMP_PROBE1:
+ case FIP_ST_VNMP_PROBE2:
+ /*
+ * Decide whether to reply to the Probe.
+ * Our selected address is never a "recorded" one, so
+ * only reply if our WWPN is greater and the
+ * Probe's REC bit is not set.
+ * If we don't reply, we will change our address.
+ */
+ if (fip->lp->wwpn > rdata->ids.port_name &&
+ !(frport->flags & FIP_FL_REC_OR_P2P)) {
+ fcoe_ctlr_vn_send(fip, FIP_SC_VN_PROBE_REP,
+ frport->enode_mac, 0);
+ break;
+ }
+ /* fall through */
+ case FIP_ST_VNMP_START:
+ fcoe_ctlr_vn_restart(fip);
+ break;
+ default:
+ break;
+ }
+}
+
+/**
+ * fcoe_ctlr_vn_probe_reply() - handle incoming VN2VN probe reply.
+ * @fip: The FCoE controller
+ * @rdata: parsed remote port with frport from the probe request
+ *
+ * Called with ctlr_mutex held.
+ */
+static void fcoe_ctlr_vn_probe_reply(struct fcoe_ctlr *fip,
+ struct fc_rport_priv *rdata)
+{
+ if (rdata->ids.port_id != fip->port_id)
+ return;
+ switch (fip->state) {
+ case FIP_ST_VNMP_START:
+ case FIP_ST_VNMP_PROBE1:
+ case FIP_ST_VNMP_PROBE2:
+ case FIP_ST_VNMP_CLAIM:
+ fcoe_ctlr_vn_restart(fip);
+ break;
+ case FIP_ST_VNMP_UP:
+ fcoe_ctlr_vn_send_claim(fip);
+ break;
+ default:
+ break;
+ }
+}
+
+/**
+ * fcoe_ctlr_vn_add() - Add a VN2VN entry to the list, based on a claim reply.
+ * @fip: The FCoE controller
+ * @new: newly-parsed remote port with frport as a template for new rdata
+ *
+ * Called with ctlr_mutex held.
+ */
+static void fcoe_ctlr_vn_add(struct fcoe_ctlr *fip, struct fc_rport_priv *new)
+{
+ struct fc_lport *lport = fip->lp;
+ struct fc_rport_priv *rdata;
+ struct fc_rport_identifiers *ids;
+ struct fcoe_rport *frport;
+ u32 port_id;
+
+ port_id = new->ids.port_id;
+ if (port_id == fip->port_id)
+ return;
+
+ mutex_lock(&lport->disc.disc_mutex);
+ rdata = lport->tt.rport_create(lport, port_id);
+ if (!rdata) {
+ mutex_unlock(&lport->disc.disc_mutex);
+ return;
+ }
+
+ rdata->ops = &fcoe_ctlr_vn_rport_ops;
+ rdata->disc_id = lport->disc.disc_id;
+
+ ids = &rdata->ids;
+ if ((ids->port_name != -1 && ids->port_name != new->ids.port_name) ||
+ (ids->node_name != -1 && ids->node_name != new->ids.node_name))
+ lport->tt.rport_logoff(rdata);
+ ids->port_name = new->ids.port_name;
+ ids->node_name = new->ids.node_name;
+ mutex_unlock(&lport->disc.disc_mutex);
+
+ frport = fcoe_ctlr_rport(rdata);
+ LIBFCOE_FIP_DBG(fip, "vn_add rport %6.6x %s\n",
+ port_id, frport->fcoe_len ? "old" : "new");
+ *frport = *fcoe_ctlr_rport(new);
+ frport->time = 0;
+}
+
+/**
+ * fcoe_ctlr_vn_lookup() - Find VN remote port's MAC address
+ * @fip: The FCoE controller
+ * @port_id: The port_id of the remote VN_node
+ * @mac: buffer which will hold the VN_NODE destination MAC address, if found.
+ *
+ * Returns non-zero error if no remote port found.
+ */
+static int fcoe_ctlr_vn_lookup(struct fcoe_ctlr *fip, u32 port_id, u8 *mac)
+{
+ struct fc_lport *lport = fip->lp;
+ struct fc_rport_priv *rdata;
+ struct fcoe_rport *frport;
+ int ret = -1;
+
+ rcu_read_lock();
+ rdata = lport->tt.rport_lookup(lport, port_id);
+ if (rdata) {
+ frport = fcoe_ctlr_rport(rdata);
+ memcpy(mac, frport->enode_mac, ETH_ALEN);
+ ret = 0;
+ }
+ rcu_read_unlock();
+ return ret;
+}
+
+/**
+ * fcoe_ctlr_vn_claim_notify() - handle received FIP VN2VN Claim Notification
+ * @fip: The FCoE controller
+ * @new: newly-parsed remote port with frport as a template for new rdata
+ *
+ * Called with ctlr_mutex held.
+ */
+static void fcoe_ctlr_vn_claim_notify(struct fcoe_ctlr *fip,
+ struct fc_rport_priv *new)
+{
+ struct fcoe_rport *frport = fcoe_ctlr_rport(new);
+
+ if (frport->flags & FIP_FL_REC_OR_P2P) {
+ fcoe_ctlr_vn_send(fip, FIP_SC_VN_PROBE_REQ, fcoe_all_vn2vn, 0);
+ return;
+ }
+ switch (fip->state) {
+ case FIP_ST_VNMP_START:
+ case FIP_ST_VNMP_PROBE1:
+ case FIP_ST_VNMP_PROBE2:
+ if (new->ids.port_id == fip->port_id)
+ fcoe_ctlr_vn_restart(fip);
+ break;
+ case FIP_ST_VNMP_CLAIM:
+ case FIP_ST_VNMP_UP:
+ if (new->ids.port_id == fip->port_id) {
+ if (new->ids.port_name > fip->lp->wwpn) {
+ fcoe_ctlr_vn_restart(fip);
+ break;
+ }
+ fcoe_ctlr_vn_send_claim(fip);
+ break;
+ }
+ fcoe_ctlr_vn_send(fip, FIP_SC_VN_CLAIM_REP, frport->enode_mac,
+ min((u32)frport->fcoe_len,
+ fcoe_ctlr_fcoe_size(fip)));
+ fcoe_ctlr_vn_add(fip, new);
+ break;
+ default:
+ break;
+ }
+}
+
+/**
+ * fcoe_ctlr_vn_claim_resp() - handle received Claim Response
+ * @fip: The FCoE controller that received the frame
+ * @new: newly-parsed remote port with frport from the Claim Response
+ *
+ * Called with ctlr_mutex held.
+ */
+static void fcoe_ctlr_vn_claim_resp(struct fcoe_ctlr *fip,
+ struct fc_rport_priv *new)
+{
+ LIBFCOE_FIP_DBG(fip, "claim resp from from rport %x - state %s\n",
+ new->ids.port_id, fcoe_ctlr_state(fip->state));
+ if (fip->state == FIP_ST_VNMP_UP || fip->state == FIP_ST_VNMP_CLAIM)
+ fcoe_ctlr_vn_add(fip, new);
+}
+
+/**
+ * fcoe_ctlr_vn_beacon() - handle received beacon.
+ * @fip: The FCoE controller that received the frame
+ * @new: newly-parsed remote port with frport from the Beacon
+ *
+ * Called with ctlr_mutex held.
+ */
+static void fcoe_ctlr_vn_beacon(struct fcoe_ctlr *fip,
+ struct fc_rport_priv *new)
+{
+ struct fc_lport *lport = fip->lp;
+ struct fc_rport_priv *rdata;
+ struct fcoe_rport *frport;
+
+ frport = fcoe_ctlr_rport(new);
+ if (frport->flags & FIP_FL_REC_OR_P2P) {
+ fcoe_ctlr_vn_send(fip, FIP_SC_VN_PROBE_REQ, fcoe_all_vn2vn, 0);
+ return;
+ }
+ mutex_lock(&lport->disc.disc_mutex);
+ rdata = lport->tt.rport_lookup(lport, new->ids.port_id);
+ if (rdata)
+ kref_get(&rdata->kref);
+ mutex_unlock(&lport->disc.disc_mutex);
+ if (rdata) {
+ if (rdata->ids.node_name == new->ids.node_name &&
+ rdata->ids.port_name == new->ids.port_name) {
+ frport = fcoe_ctlr_rport(rdata);
+ if (!frport->time && fip->state == FIP_ST_VNMP_UP)
+ lport->tt.rport_login(rdata);
+ frport->time = jiffies;
+ }
+ kref_put(&rdata->kref, lport->tt.rport_destroy);
+ return;
+ }
+ if (fip->state != FIP_ST_VNMP_UP)
+ return;
+
+ /*
+ * Beacon from a new neighbor.
+ * Send a claim notify if one hasn't been sent recently.
+ * Don't add the neighbor yet.
+ */
+ LIBFCOE_FIP_DBG(fip, "beacon from new rport %x. sending claim notify\n",
+ new->ids.port_id);
+ if (time_after(jiffies,
+ fip->sol_time + msecs_to_jiffies(FIP_VN_ANN_WAIT)))
+ fcoe_ctlr_vn_send_claim(fip);
+}
+
+/**
+ * fcoe_ctlr_vn_age() - Check for VN_ports without recent beacons
+ * @fip: The FCoE controller
+ *
+ * Called with ctlr_mutex held.
+ * Called only in state FIP_ST_VNMP_UP.
+ * Returns the soonest time for next age-out or a time far in the future.
+ */
+static unsigned long fcoe_ctlr_vn_age(struct fcoe_ctlr *fip)
+{
+ struct fc_lport *lport = fip->lp;
+ struct fc_rport_priv *rdata;
+ struct fcoe_rport *frport;
+ unsigned long next_time;
+ unsigned long deadline;
+
+ next_time = jiffies + msecs_to_jiffies(FIP_VN_BEACON_INT * 10);
+ mutex_lock(&lport->disc.disc_mutex);
+ list_for_each_entry_rcu(rdata, &lport->disc.rports, peers) {
+ frport = fcoe_ctlr_rport(rdata);
+ if (!frport->time)
+ continue;
+ deadline = frport->time +
+ msecs_to_jiffies(FIP_VN_BEACON_INT * 25 / 10);
+ if (time_after_eq(jiffies, deadline)) {
+ frport->time = 0;
+ LIBFCOE_FIP_DBG(fip,
+ "port %16.16llx fc_id %6.6x beacon expired\n",
+ rdata->ids.port_name, rdata->ids.port_id);
+ lport->tt.rport_logoff(rdata);
+ } else if (time_before(deadline, next_time))
+ next_time = deadline;
+ }
+ mutex_unlock(&lport->disc.disc_mutex);
+ return next_time;
+}
+
+/**
+ * fcoe_ctlr_vn_recv() - Receive a FIP frame
+ * @fip: The FCoE controller that received the frame
+ * @skb: The received FIP frame
+ *
+ * Returns non-zero if the frame is dropped.
+ * Always consumes the frame.
+ */
+static int fcoe_ctlr_vn_recv(struct fcoe_ctlr *fip, struct sk_buff *skb)
+{
+ struct fip_header *fiph;
+ enum fip_vn2vn_subcode sub;
+ struct {
+ struct fc_rport_priv rdata;
+ struct fcoe_rport frport;
+ } buf;
+ int rc;
+
+ fiph = (struct fip_header *)skb->data;
+ sub = fiph->fip_subcode;
+
+ rc = fcoe_ctlr_vn_parse(fip, skb, &buf.rdata);
+ if (rc) {
+ LIBFCOE_FIP_DBG(fip, "vn_recv vn_parse error %d\n", rc);
+ goto drop;
+ }
+
+ mutex_lock(&fip->ctlr_mutex);
+ switch (sub) {
+ case FIP_SC_VN_PROBE_REQ:
+ fcoe_ctlr_vn_probe_req(fip, &buf.rdata);
+ break;
+ case FIP_SC_VN_PROBE_REP:
+ fcoe_ctlr_vn_probe_reply(fip, &buf.rdata);
+ break;
+ case FIP_SC_VN_CLAIM_NOTIFY:
+ fcoe_ctlr_vn_claim_notify(fip, &buf.rdata);
+ break;
+ case FIP_SC_VN_CLAIM_REP:
+ fcoe_ctlr_vn_claim_resp(fip, &buf.rdata);
+ break;
+ case FIP_SC_VN_BEACON:
+ fcoe_ctlr_vn_beacon(fip, &buf.rdata);
+ break;
+ default:
+ LIBFCOE_FIP_DBG(fip, "vn_recv unknown subcode %d\n", sub);
+ rc = -1;
+ break;
+ }
+ mutex_unlock(&fip->ctlr_mutex);
+drop:
+ kfree_skb(skb);
+ return rc;
+}
+
+/**
+ * fcoe_ctlr_disc_recv - discovery receive handler for VN2VN mode.
+ * @lport: The local port
+ * @fp: The received frame
+ *
+ * This should never be called since we don't see RSCNs or other
+ * fabric-generated ELSes.
+ */
+static void fcoe_ctlr_disc_recv(struct fc_lport *lport, struct fc_frame *fp)
+{
+ struct fc_seq_els_data rjt_data;
+
+ rjt_data.reason = ELS_RJT_UNSUP;
+ rjt_data.explan = ELS_EXPL_NONE;
+ lport->tt.seq_els_rsp_send(fp, ELS_LS_RJT, &rjt_data);
+ fc_frame_free(fp);
+}
+
+/**
+ * fcoe_ctlr_disc_recv - start discovery for VN2VN mode.
+ * @fip: The FCoE controller
+ *
+ * This sets a flag indicating that remote ports should be created
+ * and started for the peers we discover. We use the disc_callback
+ * pointer as that flag. Peers already discovered are created here.
+ *
+ * The lport lock is held during this call. The callback must be done
+ * later, without holding either the lport or discovery locks.
+ * The fcoe_ctlr lock may also be held during this call.
+ */
+static void fcoe_ctlr_disc_start(void (*callback)(struct fc_lport *,
+ enum fc_disc_event),
+ struct fc_lport *lport)
+{
+ struct fc_disc *disc = &lport->disc;
+ struct fcoe_ctlr *fip = disc->priv;
+
+ mutex_lock(&disc->disc_mutex);
+ disc->disc_callback = callback;
+ disc->disc_id = (disc->disc_id + 2) | 1;
+ disc->pending = 1;
+ schedule_work(&fip->timer_work);
+ mutex_unlock(&disc->disc_mutex);
+}
+
+/**
+ * fcoe_ctlr_vn_disc() - report FIP VN_port discovery results after claim state.
+ * @fip: The FCoE controller
+ *
+ * Starts the FLOGI and PLOGI login process to each discovered rport for which
+ * we've received at least one beacon.
+ * Performs the discovery complete callback.
+ */
+static void fcoe_ctlr_vn_disc(struct fcoe_ctlr *fip)
+{
+ struct fc_lport *lport = fip->lp;
+ struct fc_disc *disc = &lport->disc;
+ struct fc_rport_priv *rdata;
+ struct fcoe_rport *frport;
+ void (*callback)(struct fc_lport *, enum fc_disc_event);
+
+ mutex_lock(&disc->disc_mutex);
+ callback = disc->pending ? disc->disc_callback : NULL;
+ disc->pending = 0;
+ list_for_each_entry_rcu(rdata, &disc->rports, peers) {
+ frport = fcoe_ctlr_rport(rdata);
+ if (frport->time)
+ lport->tt.rport_login(rdata);
+ }
+ mutex_unlock(&disc->disc_mutex);
+ if (callback)
+ callback(lport, DISC_EV_SUCCESS);
+}
+
+/**
+ * fcoe_ctlr_vn_timeout - timer work function for VN2VN mode.
+ * @fip: The FCoE controller
+ */
+static void fcoe_ctlr_vn_timeout(struct fcoe_ctlr *fip)
+{
+ unsigned long next_time;
+ u8 mac[ETH_ALEN];
+ u32 new_port_id = 0;
+
+ mutex_lock(&fip->ctlr_mutex);
+ switch (fip->state) {
+ case FIP_ST_VNMP_START:
+ fcoe_ctlr_set_state(fip, FIP_ST_VNMP_PROBE1);
+ fcoe_ctlr_vn_send(fip, FIP_SC_VN_PROBE_REQ, fcoe_all_vn2vn, 0);
+ next_time = jiffies + msecs_to_jiffies(FIP_VN_PROBE_WAIT);
+ break;
+ case FIP_ST_VNMP_PROBE1:
+ fcoe_ctlr_set_state(fip, FIP_ST_VNMP_PROBE2);
+ fcoe_ctlr_vn_send(fip, FIP_SC_VN_PROBE_REQ, fcoe_all_vn2vn, 0);
+ next_time = jiffies + msecs_to_jiffies(FIP_VN_ANN_WAIT);
+ break;
+ case FIP_ST_VNMP_PROBE2:
+ fcoe_ctlr_set_state(fip, FIP_ST_VNMP_CLAIM);
+ new_port_id = fip->port_id;
+ hton24(mac, FIP_VN_FC_MAP);
+ hton24(mac + 3, new_port_id);
+ fcoe_ctlr_map_dest(fip);
+ fip->update_mac(fip->lp, mac);
+ fcoe_ctlr_vn_send_claim(fip);
+ next_time = jiffies + msecs_to_jiffies(FIP_VN_ANN_WAIT);
+ break;
+ case FIP_ST_VNMP_CLAIM:
+ /*
+ * This may be invoked either by starting discovery so don't
+ * go to the next state unless it's been long enough.
+ */
+ next_time = fip->sol_time + msecs_to_jiffies(FIP_VN_ANN_WAIT);
+ if (time_after_eq(jiffies, next_time)) {
+ fcoe_ctlr_set_state(fip, FIP_ST_VNMP_UP);
+ fcoe_ctlr_vn_send(fip, FIP_SC_VN_BEACON,
+ fcoe_all_vn2vn, 0);
+ next_time = jiffies + msecs_to_jiffies(FIP_VN_ANN_WAIT);
+ fip->port_ka_time = next_time;
+ }
+ fcoe_ctlr_vn_disc(fip);
+ break;
+ case FIP_ST_VNMP_UP:
+ next_time = fcoe_ctlr_vn_age(fip);
+ if (time_after_eq(jiffies, fip->port_ka_time)) {
+ fcoe_ctlr_vn_send(fip, FIP_SC_VN_BEACON,
+ fcoe_all_vn2vn, 0);
+ fip->port_ka_time = jiffies +
+ msecs_to_jiffies(FIP_VN_BEACON_INT +
+ (prandom_u32() % FIP_VN_BEACON_FUZZ));
+ }
+ if (time_before(fip->port_ka_time, next_time))
+ next_time = fip->port_ka_time;
+ break;
+ case FIP_ST_LINK_WAIT:
+ goto unlock;
+ default:
+ WARN(1, "unexpected state %d\n", fip->state);
+ goto unlock;
+ }
+ mod_timer(&fip->timer, next_time);
+unlock:
+ mutex_unlock(&fip->ctlr_mutex);
+
+ /* If port ID is new, notify local port after dropping ctlr_mutex */
+ if (new_port_id)
+ fc_lport_set_local_id(fip->lp, new_port_id);
+}
+
+/**
+ * fcoe_ctlr_mode_set() - Set or reset the ctlr's mode
+ * @lport: The local port to be (re)configured
+ * @fip: The FCoE controller whose mode is changing
+ * @fip_mode: The new fip mode
+ *
+ * Note that the we shouldn't be changing the libfc discovery settings
+ * (fc_disc_config) while an lport is going through the libfc state
+ * machine. The mode can only be changed when a fcoe_ctlr device is
+ * disabled, so that should ensure that this routine is only called
+ * when nothing is happening.
+ */
+static void fcoe_ctlr_mode_set(struct fc_lport *lport, struct fcoe_ctlr *fip,
+ enum fip_state fip_mode)
+{
+ void *priv;
+
+ WARN_ON(lport->state != LPORT_ST_RESET &&
+ lport->state != LPORT_ST_DISABLED);
+
+ if (fip_mode == FIP_MODE_VN2VN) {
+ lport->rport_priv_size = sizeof(struct fcoe_rport);
+ lport->point_to_multipoint = 1;
+ lport->tt.disc_recv_req = fcoe_ctlr_disc_recv;
+ lport->tt.disc_start = fcoe_ctlr_disc_start;
+ lport->tt.disc_stop = fcoe_ctlr_disc_stop;
+ lport->tt.disc_stop_final = fcoe_ctlr_disc_stop_final;
+ priv = fip;
+ } else {
+ lport->rport_priv_size = 0;
+ lport->point_to_multipoint = 0;
+ lport->tt.disc_recv_req = NULL;
+ lport->tt.disc_start = NULL;
+ lport->tt.disc_stop = NULL;
+ lport->tt.disc_stop_final = NULL;
+ priv = lport;
+ }
+
+ fc_disc_config(lport, priv);
+}
+
+/**
+ * fcoe_libfc_config() - Sets up libfc related properties for local port
+ * @lport: The local port to configure libfc for
+ * @fip: The FCoE controller in use by the local port
+ * @tt: The libfc function template
+ * @init_fcp: If non-zero, the FCP portion of libfc should be initialized
+ *
+ * Returns : 0 for success
+ */
+int fcoe_libfc_config(struct fc_lport *lport, struct fcoe_ctlr *fip,
+ const struct libfc_function_template *tt, int init_fcp)
+{
+ /* Set the function pointers set by the LLDD */
+ memcpy(&lport->tt, tt, sizeof(*tt));
+ if (init_fcp && fc_fcp_init(lport))
+ return -ENOMEM;
+ fc_exch_init(lport);
+ fc_elsct_init(lport);
+ fc_lport_init(lport);
+ fc_rport_init(lport);
+ fc_disc_init(lport);
+ fcoe_ctlr_mode_set(lport, fip, fip->mode);
+ return 0;
+}
+EXPORT_SYMBOL_GPL(fcoe_libfc_config);
+
+void fcoe_fcf_get_selected(struct fcoe_fcf_device *fcf_dev)
+{
+ struct fcoe_ctlr_device *ctlr_dev = fcoe_fcf_dev_to_ctlr_dev(fcf_dev);
+ struct fcoe_ctlr *fip = fcoe_ctlr_device_priv(ctlr_dev);
+ struct fcoe_fcf *fcf;
+
+ mutex_lock(&fip->ctlr_mutex);
+ mutex_lock(&ctlr_dev->lock);
+
+ fcf = fcoe_fcf_device_priv(fcf_dev);
+ if (fcf)
+ fcf_dev->selected = (fcf == fip->sel_fcf) ? 1 : 0;
+ else
+ fcf_dev->selected = 0;
+
+ mutex_unlock(&ctlr_dev->lock);
+ mutex_unlock(&fip->ctlr_mutex);
+}
+EXPORT_SYMBOL(fcoe_fcf_get_selected);
+
+void fcoe_ctlr_set_fip_mode(struct fcoe_ctlr_device *ctlr_dev)
+{
+ struct fcoe_ctlr *ctlr = fcoe_ctlr_device_priv(ctlr_dev);
+ struct fc_lport *lport = ctlr->lp;
+
+ mutex_lock(&ctlr->ctlr_mutex);
+ switch (ctlr_dev->mode) {
+ case FIP_CONN_TYPE_VN2VN:
+ ctlr->mode = FIP_MODE_VN2VN;
+ break;
+ case FIP_CONN_TYPE_FABRIC:
+ default:
+ ctlr->mode = FIP_MODE_FABRIC;
+ break;
+ }
+
+ mutex_unlock(&ctlr->ctlr_mutex);
+
+ fcoe_ctlr_mode_set(lport, ctlr, ctlr->mode);
+}
+EXPORT_SYMBOL(fcoe_ctlr_set_fip_mode);
diff --git a/drivers/scsi/fcoe/fcoe_sysfs.c b/drivers/scsi/fcoe/fcoe_sysfs.c
new file mode 100644
index 000000000..045c4e11e
--- /dev/null
+++ b/drivers/scsi/fcoe/fcoe_sysfs.c
@@ -0,0 +1,954 @@
+/*
+ * Copyright(c) 2011 - 2012 Intel Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Maintained at www.Open-FCoE.org
+ */
+
+#include <linux/module.h>
+#include <linux/types.h>
+#include <linux/kernel.h>
+#include <linux/etherdevice.h>
+#include <linux/ctype.h>
+
+#include <scsi/fcoe_sysfs.h>
+#include <scsi/libfcoe.h>
+
+/*
+ * OK to include local libfcoe.h for debug_logging, but cannot include
+ * <scsi/libfcoe.h> otherwise non-netdev based fcoe solutions would have
+ * have to include more than fcoe_sysfs.h.
+ */
+#include "libfcoe.h"
+
+static atomic_t ctlr_num;
+static atomic_t fcf_num;
+
+/*
+ * fcoe_fcf_dev_loss_tmo: the default number of seconds that fcoe sysfs
+ * should insulate the loss of a fcf.
+ */
+static unsigned int fcoe_fcf_dev_loss_tmo = 1800; /* seconds */
+
+module_param_named(fcf_dev_loss_tmo, fcoe_fcf_dev_loss_tmo,
+ uint, S_IRUGO|S_IWUSR);
+MODULE_PARM_DESC(fcf_dev_loss_tmo,
+ "Maximum number of seconds that libfcoe should"
+ " insulate the loss of a fcf. Once this value is"
+ " exceeded, the fcf is removed.");
+
+/*
+ * These are used by the fcoe_*_show_function routines, they
+ * are intentionally placed in the .c file as they're not intended
+ * for use throughout the code.
+ */
+#define fcoe_ctlr_id(x) \
+ ((x)->id)
+#define fcoe_ctlr_work_q_name(x) \
+ ((x)->work_q_name)
+#define fcoe_ctlr_work_q(x) \
+ ((x)->work_q)
+#define fcoe_ctlr_devloss_work_q_name(x) \
+ ((x)->devloss_work_q_name)
+#define fcoe_ctlr_devloss_work_q(x) \
+ ((x)->devloss_work_q)
+#define fcoe_ctlr_mode(x) \
+ ((x)->mode)
+#define fcoe_ctlr_fcf_dev_loss_tmo(x) \
+ ((x)->fcf_dev_loss_tmo)
+#define fcoe_ctlr_link_fail(x) \
+ ((x)->lesb.lesb_link_fail)
+#define fcoe_ctlr_vlink_fail(x) \
+ ((x)->lesb.lesb_vlink_fail)
+#define fcoe_ctlr_miss_fka(x) \
+ ((x)->lesb.lesb_miss_fka)
+#define fcoe_ctlr_symb_err(x) \
+ ((x)->lesb.lesb_symb_err)
+#define fcoe_ctlr_err_block(x) \
+ ((x)->lesb.lesb_err_block)
+#define fcoe_ctlr_fcs_error(x) \
+ ((x)->lesb.lesb_fcs_error)
+#define fcoe_ctlr_enabled(x) \
+ ((x)->enabled)
+#define fcoe_fcf_state(x) \
+ ((x)->state)
+#define fcoe_fcf_fabric_name(x) \
+ ((x)->fabric_name)
+#define fcoe_fcf_switch_name(x) \
+ ((x)->switch_name)
+#define fcoe_fcf_fc_map(x) \
+ ((x)->fc_map)
+#define fcoe_fcf_vfid(x) \
+ ((x)->vfid)
+#define fcoe_fcf_mac(x) \
+ ((x)->mac)
+#define fcoe_fcf_priority(x) \
+ ((x)->priority)
+#define fcoe_fcf_fka_period(x) \
+ ((x)->fka_period)
+#define fcoe_fcf_dev_loss_tmo(x) \
+ ((x)->dev_loss_tmo)
+#define fcoe_fcf_selected(x) \
+ ((x)->selected)
+#define fcoe_fcf_vlan_id(x) \
+ ((x)->vlan_id)
+
+/*
+ * dev_loss_tmo attribute
+ */
+static int fcoe_str_to_dev_loss(const char *buf, unsigned long *val)
+{
+ int ret;
+
+ ret = kstrtoul(buf, 0, val);
+ if (ret)
+ return -EINVAL;
+ /*
+ * Check for overflow; dev_loss_tmo is u32
+ */
+ if (*val > UINT_MAX)
+ return -EINVAL;
+
+ return 0;
+}
+
+static int fcoe_fcf_set_dev_loss_tmo(struct fcoe_fcf_device *fcf,
+ unsigned long val)
+{
+ if ((fcf->state == FCOE_FCF_STATE_UNKNOWN) ||
+ (fcf->state == FCOE_FCF_STATE_DISCONNECTED) ||
+ (fcf->state == FCOE_FCF_STATE_DELETED))
+ return -EBUSY;
+ /*
+ * Check for overflow; dev_loss_tmo is u32
+ */
+ if (val > UINT_MAX)
+ return -EINVAL;
+
+ fcoe_fcf_dev_loss_tmo(fcf) = val;
+ return 0;
+}
+
+#define FCOE_DEVICE_ATTR(_prefix, _name, _mode, _show, _store) \
+struct device_attribute device_attr_fcoe_##_prefix##_##_name = \
+ __ATTR(_name, _mode, _show, _store)
+
+#define fcoe_ctlr_show_function(field, format_string, sz, cast) \
+static ssize_t show_fcoe_ctlr_device_##field(struct device *dev, \
+ struct device_attribute *attr, \
+ char *buf) \
+{ \
+ struct fcoe_ctlr_device *ctlr = dev_to_ctlr(dev); \
+ if (ctlr->f->get_fcoe_ctlr_##field) \
+ ctlr->f->get_fcoe_ctlr_##field(ctlr); \
+ return snprintf(buf, sz, format_string, \
+ cast fcoe_ctlr_##field(ctlr)); \
+}
+
+#define fcoe_fcf_show_function(field, format_string, sz, cast) \
+static ssize_t show_fcoe_fcf_device_##field(struct device *dev, \
+ struct device_attribute *attr, \
+ char *buf) \
+{ \
+ struct fcoe_fcf_device *fcf = dev_to_fcf(dev); \
+ struct fcoe_ctlr_device *ctlr = fcoe_fcf_dev_to_ctlr_dev(fcf); \
+ if (ctlr->f->get_fcoe_fcf_##field) \
+ ctlr->f->get_fcoe_fcf_##field(fcf); \
+ return snprintf(buf, sz, format_string, \
+ cast fcoe_fcf_##field(fcf)); \
+}
+
+#define fcoe_ctlr_private_show_function(field, format_string, sz, cast) \
+static ssize_t show_fcoe_ctlr_device_##field(struct device *dev, \
+ struct device_attribute *attr, \
+ char *buf) \
+{ \
+ struct fcoe_ctlr_device *ctlr = dev_to_ctlr(dev); \
+ return snprintf(buf, sz, format_string, cast fcoe_ctlr_##field(ctlr)); \
+}
+
+#define fcoe_fcf_private_show_function(field, format_string, sz, cast) \
+static ssize_t show_fcoe_fcf_device_##field(struct device *dev, \
+ struct device_attribute *attr, \
+ char *buf) \
+{ \
+ struct fcoe_fcf_device *fcf = dev_to_fcf(dev); \
+ return snprintf(buf, sz, format_string, cast fcoe_fcf_##field(fcf)); \
+}
+
+#define fcoe_ctlr_private_rd_attr(field, format_string, sz) \
+ fcoe_ctlr_private_show_function(field, format_string, sz, ) \
+ static FCOE_DEVICE_ATTR(ctlr, field, S_IRUGO, \
+ show_fcoe_ctlr_device_##field, NULL)
+
+#define fcoe_ctlr_rd_attr(field, format_string, sz) \
+ fcoe_ctlr_show_function(field, format_string, sz, ) \
+ static FCOE_DEVICE_ATTR(ctlr, field, S_IRUGO, \
+ show_fcoe_ctlr_device_##field, NULL)
+
+#define fcoe_fcf_rd_attr(field, format_string, sz) \
+ fcoe_fcf_show_function(field, format_string, sz, ) \
+ static FCOE_DEVICE_ATTR(fcf, field, S_IRUGO, \
+ show_fcoe_fcf_device_##field, NULL)
+
+#define fcoe_fcf_private_rd_attr(field, format_string, sz) \
+ fcoe_fcf_private_show_function(field, format_string, sz, ) \
+ static FCOE_DEVICE_ATTR(fcf, field, S_IRUGO, \
+ show_fcoe_fcf_device_##field, NULL)
+
+#define fcoe_ctlr_private_rd_attr_cast(field, format_string, sz, cast) \
+ fcoe_ctlr_private_show_function(field, format_string, sz, (cast)) \
+ static FCOE_DEVICE_ATTR(ctlr, field, S_IRUGO, \
+ show_fcoe_ctlr_device_##field, NULL)
+
+#define fcoe_fcf_private_rd_attr_cast(field, format_string, sz, cast) \
+ fcoe_fcf_private_show_function(field, format_string, sz, (cast)) \
+ static FCOE_DEVICE_ATTR(fcf, field, S_IRUGO, \
+ show_fcoe_fcf_device_##field, NULL)
+
+#define fcoe_enum_name_search(title, table_type, table) \
+static const char *get_fcoe_##title##_name(enum table_type table_key) \
+{ \
+ if (table_key < 0 || table_key >= ARRAY_SIZE(table)) \
+ return NULL; \
+ return table[table_key]; \
+}
+
+static char *fip_conn_type_names[] = {
+ [ FIP_CONN_TYPE_UNKNOWN ] = "Unknown",
+ [ FIP_CONN_TYPE_FABRIC ] = "Fabric",
+ [ FIP_CONN_TYPE_VN2VN ] = "VN2VN",
+};
+fcoe_enum_name_search(ctlr_mode, fip_conn_type, fip_conn_type_names)
+
+static enum fip_conn_type fcoe_parse_mode(const char *buf)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(fip_conn_type_names); i++) {
+ if (strcasecmp(buf, fip_conn_type_names[i]) == 0)
+ return i;
+ }
+
+ return FIP_CONN_TYPE_UNKNOWN;
+}
+
+static char *fcf_state_names[] = {
+ [ FCOE_FCF_STATE_UNKNOWN ] = "Unknown",
+ [ FCOE_FCF_STATE_DISCONNECTED ] = "Disconnected",
+ [ FCOE_FCF_STATE_CONNECTED ] = "Connected",
+};
+fcoe_enum_name_search(fcf_state, fcf_state, fcf_state_names)
+#define FCOE_FCF_STATE_MAX_NAMELEN 50
+
+static ssize_t show_fcf_state(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct fcoe_fcf_device *fcf = dev_to_fcf(dev);
+ const char *name;
+ name = get_fcoe_fcf_state_name(fcf->state);
+ if (!name)
+ return -EINVAL;
+ return snprintf(buf, FCOE_FCF_STATE_MAX_NAMELEN, "%s\n", name);
+}
+static FCOE_DEVICE_ATTR(fcf, state, S_IRUGO, show_fcf_state, NULL);
+
+#define FCOE_MAX_MODENAME_LEN 20
+static ssize_t show_ctlr_mode(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct fcoe_ctlr_device *ctlr = dev_to_ctlr(dev);
+ const char *name;
+
+ name = get_fcoe_ctlr_mode_name(ctlr->mode);
+ if (!name)
+ return -EINVAL;
+ return snprintf(buf, FCOE_MAX_MODENAME_LEN,
+ "%s\n", name);
+}
+
+static ssize_t store_ctlr_mode(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct fcoe_ctlr_device *ctlr = dev_to_ctlr(dev);
+ char mode[FCOE_MAX_MODENAME_LEN + 1];
+
+ if (count > FCOE_MAX_MODENAME_LEN)
+ return -EINVAL;
+
+ strncpy(mode, buf, count);
+
+ if (mode[count - 1] == '\n')
+ mode[count - 1] = '\0';
+ else
+ mode[count] = '\0';
+
+ switch (ctlr->enabled) {
+ case FCOE_CTLR_ENABLED:
+ LIBFCOE_SYSFS_DBG(ctlr, "Cannot change mode when enabled.\n");
+ return -EBUSY;
+ case FCOE_CTLR_DISABLED:
+ if (!ctlr->f->set_fcoe_ctlr_mode) {
+ LIBFCOE_SYSFS_DBG(ctlr,
+ "Mode change not supported by LLD.\n");
+ return -ENOTSUPP;
+ }
+
+ ctlr->mode = fcoe_parse_mode(mode);
+ if (ctlr->mode == FIP_CONN_TYPE_UNKNOWN) {
+ LIBFCOE_SYSFS_DBG(ctlr, "Unknown mode %s provided.\n",
+ buf);
+ return -EINVAL;
+ }
+
+ ctlr->f->set_fcoe_ctlr_mode(ctlr);
+ LIBFCOE_SYSFS_DBG(ctlr, "Mode changed to %s.\n", buf);
+
+ return count;
+ case FCOE_CTLR_UNUSED:
+ default:
+ LIBFCOE_SYSFS_DBG(ctlr, "Mode change not supported.\n");
+ return -ENOTSUPP;
+ };
+}
+
+static FCOE_DEVICE_ATTR(ctlr, mode, S_IRUGO | S_IWUSR,
+ show_ctlr_mode, store_ctlr_mode);
+
+static ssize_t store_ctlr_enabled(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct fcoe_ctlr_device *ctlr = dev_to_ctlr(dev);
+ int rc;
+
+ switch (ctlr->enabled) {
+ case FCOE_CTLR_ENABLED:
+ if (*buf == '1')
+ return count;
+ ctlr->enabled = FCOE_CTLR_DISABLED;
+ break;
+ case FCOE_CTLR_DISABLED:
+ if (*buf == '0')
+ return count;
+ ctlr->enabled = FCOE_CTLR_ENABLED;
+ break;
+ case FCOE_CTLR_UNUSED:
+ return -ENOTSUPP;
+ };
+
+ rc = ctlr->f->set_fcoe_ctlr_enabled(ctlr);
+ if (rc)
+ return rc;
+
+ return count;
+}
+
+static char *ctlr_enabled_state_names[] = {
+ [ FCOE_CTLR_ENABLED ] = "1",
+ [ FCOE_CTLR_DISABLED ] = "0",
+};
+fcoe_enum_name_search(ctlr_enabled_state, ctlr_enabled_state,
+ ctlr_enabled_state_names)
+#define FCOE_CTLR_ENABLED_MAX_NAMELEN 50
+
+static ssize_t show_ctlr_enabled_state(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct fcoe_ctlr_device *ctlr = dev_to_ctlr(dev);
+ const char *name;
+
+ name = get_fcoe_ctlr_enabled_state_name(ctlr->enabled);
+ if (!name)
+ return -EINVAL;
+ return snprintf(buf, FCOE_CTLR_ENABLED_MAX_NAMELEN,
+ "%s\n", name);
+}
+
+static FCOE_DEVICE_ATTR(ctlr, enabled, S_IRUGO | S_IWUSR,
+ show_ctlr_enabled_state,
+ store_ctlr_enabled);
+
+static ssize_t
+store_private_fcoe_ctlr_fcf_dev_loss_tmo(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct fcoe_ctlr_device *ctlr = dev_to_ctlr(dev);
+ struct fcoe_fcf_device *fcf;
+ unsigned long val;
+ int rc;
+
+ rc = fcoe_str_to_dev_loss(buf, &val);
+ if (rc)
+ return rc;
+
+ fcoe_ctlr_fcf_dev_loss_tmo(ctlr) = val;
+ mutex_lock(&ctlr->lock);
+ list_for_each_entry(fcf, &ctlr->fcfs, peers)
+ fcoe_fcf_set_dev_loss_tmo(fcf, val);
+ mutex_unlock(&ctlr->lock);
+ return count;
+}
+fcoe_ctlr_private_show_function(fcf_dev_loss_tmo, "%d\n", 20, );
+static FCOE_DEVICE_ATTR(ctlr, fcf_dev_loss_tmo, S_IRUGO | S_IWUSR,
+ show_fcoe_ctlr_device_fcf_dev_loss_tmo,
+ store_private_fcoe_ctlr_fcf_dev_loss_tmo);
+
+/* Link Error Status Block (LESB) */
+fcoe_ctlr_rd_attr(link_fail, "%u\n", 20);
+fcoe_ctlr_rd_attr(vlink_fail, "%u\n", 20);
+fcoe_ctlr_rd_attr(miss_fka, "%u\n", 20);
+fcoe_ctlr_rd_attr(symb_err, "%u\n", 20);
+fcoe_ctlr_rd_attr(err_block, "%u\n", 20);
+fcoe_ctlr_rd_attr(fcs_error, "%u\n", 20);
+
+fcoe_fcf_private_rd_attr_cast(fabric_name, "0x%llx\n", 20, unsigned long long);
+fcoe_fcf_private_rd_attr_cast(switch_name, "0x%llx\n", 20, unsigned long long);
+fcoe_fcf_private_rd_attr(priority, "%u\n", 20);
+fcoe_fcf_private_rd_attr(fc_map, "0x%x\n", 20);
+fcoe_fcf_private_rd_attr(vfid, "%u\n", 20);
+fcoe_fcf_private_rd_attr(mac, "%pM\n", 20);
+fcoe_fcf_private_rd_attr(fka_period, "%u\n", 20);
+fcoe_fcf_rd_attr(selected, "%u\n", 20);
+fcoe_fcf_rd_attr(vlan_id, "%u\n", 20);
+
+fcoe_fcf_private_show_function(dev_loss_tmo, "%d\n", 20, )
+static ssize_t
+store_fcoe_fcf_dev_loss_tmo(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct fcoe_fcf_device *fcf = dev_to_fcf(dev);
+ unsigned long val;
+ int rc;
+
+ rc = fcoe_str_to_dev_loss(buf, &val);
+ if (rc)
+ return rc;
+
+ rc = fcoe_fcf_set_dev_loss_tmo(fcf, val);
+ if (rc)
+ return rc;
+ return count;
+}
+static FCOE_DEVICE_ATTR(fcf, dev_loss_tmo, S_IRUGO | S_IWUSR,
+ show_fcoe_fcf_device_dev_loss_tmo,
+ store_fcoe_fcf_dev_loss_tmo);
+
+static struct attribute *fcoe_ctlr_lesb_attrs[] = {
+ &device_attr_fcoe_ctlr_link_fail.attr,
+ &device_attr_fcoe_ctlr_vlink_fail.attr,
+ &device_attr_fcoe_ctlr_miss_fka.attr,
+ &device_attr_fcoe_ctlr_symb_err.attr,
+ &device_attr_fcoe_ctlr_err_block.attr,
+ &device_attr_fcoe_ctlr_fcs_error.attr,
+ NULL,
+};
+
+static struct attribute_group fcoe_ctlr_lesb_attr_group = {
+ .name = "lesb",
+ .attrs = fcoe_ctlr_lesb_attrs,
+};
+
+static struct attribute *fcoe_ctlr_attrs[] = {
+ &device_attr_fcoe_ctlr_fcf_dev_loss_tmo.attr,
+ &device_attr_fcoe_ctlr_enabled.attr,
+ &device_attr_fcoe_ctlr_mode.attr,
+ NULL,
+};
+
+static struct attribute_group fcoe_ctlr_attr_group = {
+ .attrs = fcoe_ctlr_attrs,
+};
+
+static const struct attribute_group *fcoe_ctlr_attr_groups[] = {
+ &fcoe_ctlr_attr_group,
+ &fcoe_ctlr_lesb_attr_group,
+ NULL,
+};
+
+static struct attribute *fcoe_fcf_attrs[] = {
+ &device_attr_fcoe_fcf_fabric_name.attr,
+ &device_attr_fcoe_fcf_switch_name.attr,
+ &device_attr_fcoe_fcf_dev_loss_tmo.attr,
+ &device_attr_fcoe_fcf_fc_map.attr,
+ &device_attr_fcoe_fcf_vfid.attr,
+ &device_attr_fcoe_fcf_mac.attr,
+ &device_attr_fcoe_fcf_priority.attr,
+ &device_attr_fcoe_fcf_fka_period.attr,
+ &device_attr_fcoe_fcf_state.attr,
+ &device_attr_fcoe_fcf_selected.attr,
+ &device_attr_fcoe_fcf_vlan_id.attr,
+ NULL
+};
+
+static struct attribute_group fcoe_fcf_attr_group = {
+ .attrs = fcoe_fcf_attrs,
+};
+
+static const struct attribute_group *fcoe_fcf_attr_groups[] = {
+ &fcoe_fcf_attr_group,
+ NULL,
+};
+
+static struct bus_type fcoe_bus_type;
+
+static int fcoe_bus_match(struct device *dev,
+ struct device_driver *drv)
+{
+ if (dev->bus == &fcoe_bus_type)
+ return 1;
+ return 0;
+}
+
+/**
+ * fcoe_ctlr_device_release() - Release the FIP ctlr memory
+ * @dev: Pointer to the FIP ctlr's embedded device
+ *
+ * Called when the last FIP ctlr reference is released.
+ */
+static void fcoe_ctlr_device_release(struct device *dev)
+{
+ struct fcoe_ctlr_device *ctlr = dev_to_ctlr(dev);
+ kfree(ctlr);
+}
+
+/**
+ * fcoe_fcf_device_release() - Release the FIP fcf memory
+ * @dev: Pointer to the fcf's embedded device
+ *
+ * Called when the last FIP fcf reference is released.
+ */
+static void fcoe_fcf_device_release(struct device *dev)
+{
+ struct fcoe_fcf_device *fcf = dev_to_fcf(dev);
+ kfree(fcf);
+}
+
+static struct device_type fcoe_ctlr_device_type = {
+ .name = "fcoe_ctlr",
+ .groups = fcoe_ctlr_attr_groups,
+ .release = fcoe_ctlr_device_release,
+};
+
+static struct device_type fcoe_fcf_device_type = {
+ .name = "fcoe_fcf",
+ .groups = fcoe_fcf_attr_groups,
+ .release = fcoe_fcf_device_release,
+};
+
+static BUS_ATTR(ctlr_create, S_IWUSR, NULL, fcoe_ctlr_create_store);
+static BUS_ATTR(ctlr_destroy, S_IWUSR, NULL, fcoe_ctlr_destroy_store);
+
+static struct attribute *fcoe_bus_attrs[] = {
+ &bus_attr_ctlr_create.attr,
+ &bus_attr_ctlr_destroy.attr,
+ NULL,
+};
+ATTRIBUTE_GROUPS(fcoe_bus);
+
+static struct bus_type fcoe_bus_type = {
+ .name = "fcoe",
+ .match = &fcoe_bus_match,
+ .bus_groups = fcoe_bus_groups,
+};
+
+/**
+ * fcoe_ctlr_device_flush_work() - Flush a FIP ctlr's workqueue
+ * @ctlr: Pointer to the FIP ctlr whose workqueue is to be flushed
+ */
+static void fcoe_ctlr_device_flush_work(struct fcoe_ctlr_device *ctlr)
+{
+ if (!fcoe_ctlr_work_q(ctlr)) {
+ printk(KERN_ERR
+ "ERROR: FIP Ctlr '%d' attempted to flush work, "
+ "when no workqueue created.\n", ctlr->id);
+ dump_stack();
+ return;
+ }
+
+ flush_workqueue(fcoe_ctlr_work_q(ctlr));
+}
+
+/**
+ * fcoe_ctlr_device_queue_work() - Schedule work for a FIP ctlr's workqueue
+ * @ctlr: Pointer to the FIP ctlr who owns the devloss workqueue
+ * @work: Work to queue for execution
+ *
+ * Return value:
+ * 1 on success / 0 already queued / < 0 for error
+ */
+static int fcoe_ctlr_device_queue_work(struct fcoe_ctlr_device *ctlr,
+ struct work_struct *work)
+{
+ if (unlikely(!fcoe_ctlr_work_q(ctlr))) {
+ printk(KERN_ERR
+ "ERROR: FIP Ctlr '%d' attempted to queue work, "
+ "when no workqueue created.\n", ctlr->id);
+ dump_stack();
+
+ return -EINVAL;
+ }
+
+ return queue_work(fcoe_ctlr_work_q(ctlr), work);
+}
+
+/**
+ * fcoe_ctlr_device_flush_devloss() - Flush a FIP ctlr's devloss workqueue
+ * @ctlr: Pointer to FIP ctlr whose workqueue is to be flushed
+ */
+static void fcoe_ctlr_device_flush_devloss(struct fcoe_ctlr_device *ctlr)
+{
+ if (!fcoe_ctlr_devloss_work_q(ctlr)) {
+ printk(KERN_ERR
+ "ERROR: FIP Ctlr '%d' attempted to flush work, "
+ "when no workqueue created.\n", ctlr->id);
+ dump_stack();
+ return;
+ }
+
+ flush_workqueue(fcoe_ctlr_devloss_work_q(ctlr));
+}
+
+/**
+ * fcoe_ctlr_device_queue_devloss_work() - Schedule work for a FIP ctlr's devloss workqueue
+ * @ctlr: Pointer to the FIP ctlr who owns the devloss workqueue
+ * @work: Work to queue for execution
+ * @delay: jiffies to delay the work queuing
+ *
+ * Return value:
+ * 1 on success / 0 already queued / < 0 for error
+ */
+static int fcoe_ctlr_device_queue_devloss_work(struct fcoe_ctlr_device *ctlr,
+ struct delayed_work *work,
+ unsigned long delay)
+{
+ if (unlikely(!fcoe_ctlr_devloss_work_q(ctlr))) {
+ printk(KERN_ERR
+ "ERROR: FIP Ctlr '%d' attempted to queue work, "
+ "when no workqueue created.\n", ctlr->id);
+ dump_stack();
+
+ return -EINVAL;
+ }
+
+ return queue_delayed_work(fcoe_ctlr_devloss_work_q(ctlr), work, delay);
+}
+
+static int fcoe_fcf_device_match(struct fcoe_fcf_device *new,
+ struct fcoe_fcf_device *old)
+{
+ if (new->switch_name == old->switch_name &&
+ new->fabric_name == old->fabric_name &&
+ new->fc_map == old->fc_map &&
+ ether_addr_equal(new->mac, old->mac))
+ return 1;
+ return 0;
+}
+
+/**
+ * fcoe_ctlr_device_add() - Add a FIP ctlr to sysfs
+ * @parent: The parent device to which the fcoe_ctlr instance
+ * should be attached
+ * @f: The LLD's FCoE sysfs function template pointer
+ * @priv_size: Size to be allocated with the fcoe_ctlr_device for the LLD
+ *
+ * This routine allocates a FIP ctlr object with some additional memory
+ * for the LLD. The FIP ctlr is initialized, added to sysfs and then
+ * attributes are added to it.
+ */
+struct fcoe_ctlr_device *fcoe_ctlr_device_add(struct device *parent,
+ struct fcoe_sysfs_function_template *f,
+ int priv_size)
+{
+ struct fcoe_ctlr_device *ctlr;
+ int error = 0;
+
+ ctlr = kzalloc(sizeof(struct fcoe_ctlr_device) + priv_size,
+ GFP_KERNEL);
+ if (!ctlr)
+ goto out;
+
+ ctlr->id = atomic_inc_return(&ctlr_num) - 1;
+ ctlr->f = f;
+ ctlr->mode = FIP_CONN_TYPE_FABRIC;
+ INIT_LIST_HEAD(&ctlr->fcfs);
+ mutex_init(&ctlr->lock);
+ ctlr->dev.parent = parent;
+ ctlr->dev.bus = &fcoe_bus_type;
+ ctlr->dev.type = &fcoe_ctlr_device_type;
+
+ ctlr->fcf_dev_loss_tmo = fcoe_fcf_dev_loss_tmo;
+
+ snprintf(ctlr->work_q_name, sizeof(ctlr->work_q_name),
+ "ctlr_wq_%d", ctlr->id);
+ ctlr->work_q = create_singlethread_workqueue(
+ ctlr->work_q_name);
+ if (!ctlr->work_q)
+ goto out_del;
+
+ snprintf(ctlr->devloss_work_q_name,
+ sizeof(ctlr->devloss_work_q_name),
+ "ctlr_dl_wq_%d", ctlr->id);
+ ctlr->devloss_work_q = create_singlethread_workqueue(
+ ctlr->devloss_work_q_name);
+ if (!ctlr->devloss_work_q)
+ goto out_del_q;
+
+ dev_set_name(&ctlr->dev, "ctlr_%d", ctlr->id);
+ error = device_register(&ctlr->dev);
+ if (error)
+ goto out_del_q2;
+
+ return ctlr;
+
+out_del_q2:
+ destroy_workqueue(ctlr->devloss_work_q);
+ ctlr->devloss_work_q = NULL;
+out_del_q:
+ destroy_workqueue(ctlr->work_q);
+ ctlr->work_q = NULL;
+out_del:
+ kfree(ctlr);
+out:
+ return NULL;
+}
+EXPORT_SYMBOL_GPL(fcoe_ctlr_device_add);
+
+/**
+ * fcoe_ctlr_device_delete() - Delete a FIP ctlr and its subtree from sysfs
+ * @ctlr: A pointer to the ctlr to be deleted
+ *
+ * Deletes a FIP ctlr and any fcfs attached
+ * to it. Deleting fcfs will cause their childen
+ * to be deleted as well.
+ *
+ * The ctlr is detached from sysfs and it's resources
+ * are freed (work q), but the memory is not freed
+ * until its last reference is released.
+ *
+ * This routine expects no locks to be held before
+ * calling.
+ *
+ * TODO: Currently there are no callbacks to clean up LLD data
+ * for a fcoe_fcf_device. LLDs must keep this in mind as they need
+ * to clean up each of their LLD data for all fcoe_fcf_device before
+ * calling fcoe_ctlr_device_delete.
+ */
+void fcoe_ctlr_device_delete(struct fcoe_ctlr_device *ctlr)
+{
+ struct fcoe_fcf_device *fcf, *next;
+ /* Remove any attached fcfs */
+ mutex_lock(&ctlr->lock);
+ list_for_each_entry_safe(fcf, next,
+ &ctlr->fcfs, peers) {
+ list_del(&fcf->peers);
+ fcf->state = FCOE_FCF_STATE_DELETED;
+ fcoe_ctlr_device_queue_work(ctlr, &fcf->delete_work);
+ }
+ mutex_unlock(&ctlr->lock);
+
+ fcoe_ctlr_device_flush_work(ctlr);
+
+ destroy_workqueue(ctlr->devloss_work_q);
+ ctlr->devloss_work_q = NULL;
+ destroy_workqueue(ctlr->work_q);
+ ctlr->work_q = NULL;
+
+ device_unregister(&ctlr->dev);
+}
+EXPORT_SYMBOL_GPL(fcoe_ctlr_device_delete);
+
+/**
+ * fcoe_fcf_device_final_delete() - Final delete routine
+ * @work: The FIP fcf's embedded work struct
+ *
+ * It is expected that the fcf has been removed from
+ * the FIP ctlr's list before calling this routine.
+ */
+static void fcoe_fcf_device_final_delete(struct work_struct *work)
+{
+ struct fcoe_fcf_device *fcf =
+ container_of(work, struct fcoe_fcf_device, delete_work);
+ struct fcoe_ctlr_device *ctlr = fcoe_fcf_dev_to_ctlr_dev(fcf);
+
+ /*
+ * Cancel any outstanding timers. These should really exist
+ * only when rmmod'ing the LLDD and we're asking for
+ * immediate termination of the rports
+ */
+ if (!cancel_delayed_work(&fcf->dev_loss_work))
+ fcoe_ctlr_device_flush_devloss(ctlr);
+
+ device_unregister(&fcf->dev);
+}
+
+/**
+ * fip_timeout_deleted_fcf() - Delete a fcf when the devloss timer fires
+ * @work: The FIP fcf's embedded work struct
+ *
+ * Removes the fcf from the FIP ctlr's list of fcfs and
+ * queues the final deletion.
+ */
+static void fip_timeout_deleted_fcf(struct work_struct *work)
+{
+ struct fcoe_fcf_device *fcf =
+ container_of(work, struct fcoe_fcf_device, dev_loss_work.work);
+ struct fcoe_ctlr_device *ctlr = fcoe_fcf_dev_to_ctlr_dev(fcf);
+
+ mutex_lock(&ctlr->lock);
+
+ /*
+ * If the fcf is deleted or reconnected before the timer
+ * fires the devloss queue will be flushed, but the state will
+ * either be CONNECTED or DELETED. If that is the case we
+ * cancel deleting the fcf.
+ */
+ if (fcf->state != FCOE_FCF_STATE_DISCONNECTED)
+ goto out;
+
+ dev_printk(KERN_ERR, &fcf->dev,
+ "FIP fcf connection time out: removing fcf\n");
+
+ list_del(&fcf->peers);
+ fcf->state = FCOE_FCF_STATE_DELETED;
+ fcoe_ctlr_device_queue_work(ctlr, &fcf->delete_work);
+
+out:
+ mutex_unlock(&ctlr->lock);
+}
+
+/**
+ * fcoe_fcf_device_delete() - Delete a FIP fcf
+ * @fcf: Pointer to the fcf which is to be deleted
+ *
+ * Queues the FIP fcf on the devloss workqueue
+ *
+ * Expects the ctlr_attrs mutex to be held for fcf
+ * state change.
+ */
+void fcoe_fcf_device_delete(struct fcoe_fcf_device *fcf)
+{
+ struct fcoe_ctlr_device *ctlr = fcoe_fcf_dev_to_ctlr_dev(fcf);
+ int timeout = fcf->dev_loss_tmo;
+
+ if (fcf->state != FCOE_FCF_STATE_CONNECTED)
+ return;
+
+ fcf->state = FCOE_FCF_STATE_DISCONNECTED;
+
+ /*
+ * FCF will only be re-connected by the LLD calling
+ * fcoe_fcf_device_add, and it should be setting up
+ * priv then.
+ */
+ fcf->priv = NULL;
+
+ fcoe_ctlr_device_queue_devloss_work(ctlr, &fcf->dev_loss_work,
+ timeout * HZ);
+}
+EXPORT_SYMBOL_GPL(fcoe_fcf_device_delete);
+
+/**
+ * fcoe_fcf_device_add() - Add a FCoE sysfs fcoe_fcf_device to the system
+ * @ctlr: The fcoe_ctlr_device that will be the fcoe_fcf_device parent
+ * @new_fcf: A temporary FCF used for lookups on the current list of fcfs
+ *
+ * Expects to be called with the ctlr->lock held
+ */
+struct fcoe_fcf_device *fcoe_fcf_device_add(struct fcoe_ctlr_device *ctlr,
+ struct fcoe_fcf_device *new_fcf)
+{
+ struct fcoe_fcf_device *fcf;
+ int error = 0;
+
+ list_for_each_entry(fcf, &ctlr->fcfs, peers) {
+ if (fcoe_fcf_device_match(new_fcf, fcf)) {
+ if (fcf->state == FCOE_FCF_STATE_CONNECTED)
+ return fcf;
+
+ fcf->state = FCOE_FCF_STATE_CONNECTED;
+
+ if (!cancel_delayed_work(&fcf->dev_loss_work))
+ fcoe_ctlr_device_flush_devloss(ctlr);
+
+ return fcf;
+ }
+ }
+
+ fcf = kzalloc(sizeof(struct fcoe_fcf_device), GFP_ATOMIC);
+ if (unlikely(!fcf))
+ goto out;
+
+ INIT_WORK(&fcf->delete_work, fcoe_fcf_device_final_delete);
+ INIT_DELAYED_WORK(&fcf->dev_loss_work, fip_timeout_deleted_fcf);
+
+ fcf->dev.parent = &ctlr->dev;
+ fcf->dev.bus = &fcoe_bus_type;
+ fcf->dev.type = &fcoe_fcf_device_type;
+ fcf->id = atomic_inc_return(&fcf_num) - 1;
+ fcf->state = FCOE_FCF_STATE_UNKNOWN;
+
+ fcf->dev_loss_tmo = ctlr->fcf_dev_loss_tmo;
+
+ dev_set_name(&fcf->dev, "fcf_%d", fcf->id);
+
+ fcf->fabric_name = new_fcf->fabric_name;
+ fcf->switch_name = new_fcf->switch_name;
+ fcf->fc_map = new_fcf->fc_map;
+ fcf->vfid = new_fcf->vfid;
+ memcpy(fcf->mac, new_fcf->mac, ETH_ALEN);
+ fcf->priority = new_fcf->priority;
+ fcf->fka_period = new_fcf->fka_period;
+ fcf->selected = new_fcf->selected;
+
+ error = device_register(&fcf->dev);
+ if (error)
+ goto out_del;
+
+ fcf->state = FCOE_FCF_STATE_CONNECTED;
+ list_add_tail(&fcf->peers, &ctlr->fcfs);
+
+ return fcf;
+
+out_del:
+ kfree(fcf);
+out:
+ return NULL;
+}
+EXPORT_SYMBOL_GPL(fcoe_fcf_device_add);
+
+int __init fcoe_sysfs_setup(void)
+{
+ int error;
+
+ atomic_set(&ctlr_num, 0);
+ atomic_set(&fcf_num, 0);
+
+ error = bus_register(&fcoe_bus_type);
+ if (error)
+ return error;
+
+ return 0;
+}
+
+void __exit fcoe_sysfs_teardown(void)
+{
+ bus_unregister(&fcoe_bus_type);
+}
diff --git a/drivers/scsi/fcoe/fcoe_transport.c b/drivers/scsi/fcoe/fcoe_transport.c
new file mode 100644
index 000000000..bdc898995
--- /dev/null
+++ b/drivers/scsi/fcoe/fcoe_transport.c
@@ -0,0 +1,1040 @@
+/*
+ * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Maintained at www.Open-FCoE.org
+ */
+
+#include <linux/types.h>
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/list.h>
+#include <linux/netdevice.h>
+#include <linux/errno.h>
+#include <linux/crc32.h>
+#include <scsi/libfcoe.h>
+
+#include "libfcoe.h"
+
+MODULE_AUTHOR("Open-FCoE.org");
+MODULE_DESCRIPTION("FIP discovery protocol and FCoE transport for FCoE HBAs");
+MODULE_LICENSE("GPL v2");
+
+static int fcoe_transport_create(const char *, struct kernel_param *);
+static int fcoe_transport_destroy(const char *, struct kernel_param *);
+static int fcoe_transport_show(char *buffer, const struct kernel_param *kp);
+static struct fcoe_transport *fcoe_transport_lookup(struct net_device *device);
+static struct fcoe_transport *fcoe_netdev_map_lookup(struct net_device *device);
+static int fcoe_transport_enable(const char *, struct kernel_param *);
+static int fcoe_transport_disable(const char *, struct kernel_param *);
+static int libfcoe_device_notification(struct notifier_block *notifier,
+ ulong event, void *ptr);
+
+static LIST_HEAD(fcoe_transports);
+static DEFINE_MUTEX(ft_mutex);
+static LIST_HEAD(fcoe_netdevs);
+static DEFINE_MUTEX(fn_mutex);
+
+unsigned int libfcoe_debug_logging;
+module_param_named(debug_logging, libfcoe_debug_logging, int, S_IRUGO|S_IWUSR);
+MODULE_PARM_DESC(debug_logging, "a bit mask of logging levels");
+
+module_param_call(show, NULL, fcoe_transport_show, NULL, S_IRUSR);
+__MODULE_PARM_TYPE(show, "string");
+MODULE_PARM_DESC(show, " Show attached FCoE transports");
+
+module_param_call(create, fcoe_transport_create, NULL,
+ (void *)FIP_MODE_FABRIC, S_IWUSR);
+__MODULE_PARM_TYPE(create, "string");
+MODULE_PARM_DESC(create, " Creates fcoe instance on a ethernet interface");
+
+module_param_call(create_vn2vn, fcoe_transport_create, NULL,
+ (void *)FIP_MODE_VN2VN, S_IWUSR);
+__MODULE_PARM_TYPE(create_vn2vn, "string");
+MODULE_PARM_DESC(create_vn2vn, " Creates a VN_node to VN_node FCoE instance "
+ "on an Ethernet interface");
+
+module_param_call(destroy, fcoe_transport_destroy, NULL, NULL, S_IWUSR);
+__MODULE_PARM_TYPE(destroy, "string");
+MODULE_PARM_DESC(destroy, " Destroys fcoe instance on a ethernet interface");
+
+module_param_call(enable, fcoe_transport_enable, NULL, NULL, S_IWUSR);
+__MODULE_PARM_TYPE(enable, "string");
+MODULE_PARM_DESC(enable, " Enables fcoe on a ethernet interface.");
+
+module_param_call(disable, fcoe_transport_disable, NULL, NULL, S_IWUSR);
+__MODULE_PARM_TYPE(disable, "string");
+MODULE_PARM_DESC(disable, " Disables fcoe on a ethernet interface.");
+
+/* notification function for packets from net device */
+static struct notifier_block libfcoe_notifier = {
+ .notifier_call = libfcoe_device_notification,
+};
+
+/**
+ * fcoe_link_speed_update() - Update the supported and actual link speeds
+ * @lport: The local port to update speeds for
+ *
+ * Returns: 0 if the ethtool query was successful
+ * -1 if the ethtool query failed
+ */
+int fcoe_link_speed_update(struct fc_lport *lport)
+{
+ struct net_device *netdev = fcoe_get_netdev(lport);
+ struct ethtool_cmd ecmd;
+
+ if (!__ethtool_get_settings(netdev, &ecmd)) {
+ lport->link_supported_speeds &= ~(FC_PORTSPEED_1GBIT |
+ FC_PORTSPEED_10GBIT |
+ FC_PORTSPEED_20GBIT |
+ FC_PORTSPEED_40GBIT);
+
+ if (ecmd.supported & (SUPPORTED_1000baseT_Half |
+ SUPPORTED_1000baseT_Full |
+ SUPPORTED_1000baseKX_Full))
+ lport->link_supported_speeds |= FC_PORTSPEED_1GBIT;
+
+ if (ecmd.supported & (SUPPORTED_10000baseT_Full |
+ SUPPORTED_10000baseKX4_Full |
+ SUPPORTED_10000baseKR_Full |
+ SUPPORTED_10000baseR_FEC))
+ lport->link_supported_speeds |= FC_PORTSPEED_10GBIT;
+
+ if (ecmd.supported & (SUPPORTED_20000baseMLD2_Full |
+ SUPPORTED_20000baseKR2_Full))
+ lport->link_supported_speeds |= FC_PORTSPEED_20GBIT;
+
+ if (ecmd.supported & (SUPPORTED_40000baseKR4_Full |
+ SUPPORTED_40000baseCR4_Full |
+ SUPPORTED_40000baseSR4_Full |
+ SUPPORTED_40000baseLR4_Full))
+ lport->link_supported_speeds |= FC_PORTSPEED_40GBIT;
+
+ switch (ethtool_cmd_speed(&ecmd)) {
+ case SPEED_1000:
+ lport->link_speed = FC_PORTSPEED_1GBIT;
+ break;
+ case SPEED_10000:
+ lport->link_speed = FC_PORTSPEED_10GBIT;
+ break;
+ case 20000:
+ lport->link_speed = FC_PORTSPEED_20GBIT;
+ break;
+ case 40000:
+ lport->link_speed = FC_PORTSPEED_40GBIT;
+ break;
+ default:
+ lport->link_speed = FC_PORTSPEED_UNKNOWN;
+ break;
+ }
+ return 0;
+ }
+ return -1;
+}
+EXPORT_SYMBOL_GPL(fcoe_link_speed_update);
+
+/**
+ * __fcoe_get_lesb() - Get the Link Error Status Block (LESB) for a given lport
+ * @lport: The local port to update speeds for
+ * @fc_lesb: Pointer to the LESB to be filled up
+ * @netdev: Pointer to the netdev that is associated with the lport
+ *
+ * Note, the Link Error Status Block (LESB) for FCoE is defined in FC-BB-6
+ * Clause 7.11 in v1.04.
+ */
+void __fcoe_get_lesb(struct fc_lport *lport,
+ struct fc_els_lesb *fc_lesb,
+ struct net_device *netdev)
+{
+ unsigned int cpu;
+ u32 lfc, vlfc, mdac;
+ struct fc_stats *stats;
+ struct fcoe_fc_els_lesb *lesb;
+ struct rtnl_link_stats64 temp;
+
+ lfc = 0;
+ vlfc = 0;
+ mdac = 0;
+ lesb = (struct fcoe_fc_els_lesb *)fc_lesb;
+ memset(lesb, 0, sizeof(*lesb));
+ for_each_possible_cpu(cpu) {
+ stats = per_cpu_ptr(lport->stats, cpu);
+ lfc += stats->LinkFailureCount;
+ vlfc += stats->VLinkFailureCount;
+ mdac += stats->MissDiscAdvCount;
+ }
+ lesb->lesb_link_fail = htonl(lfc);
+ lesb->lesb_vlink_fail = htonl(vlfc);
+ lesb->lesb_miss_fka = htonl(mdac);
+ lesb->lesb_fcs_error =
+ htonl(dev_get_stats(netdev, &temp)->rx_crc_errors);
+}
+EXPORT_SYMBOL_GPL(__fcoe_get_lesb);
+
+/**
+ * fcoe_get_lesb() - Fill the FCoE Link Error Status Block
+ * @lport: the local port
+ * @fc_lesb: the link error status block
+ */
+void fcoe_get_lesb(struct fc_lport *lport,
+ struct fc_els_lesb *fc_lesb)
+{
+ struct net_device *netdev = fcoe_get_netdev(lport);
+
+ __fcoe_get_lesb(lport, fc_lesb, netdev);
+}
+EXPORT_SYMBOL_GPL(fcoe_get_lesb);
+
+/**
+ * fcoe_ctlr_get_lesb() - Get the Link Error Status Block (LESB) for a given
+ * fcoe controller device
+ * @ctlr_dev: The given fcoe controller device
+ *
+ */
+void fcoe_ctlr_get_lesb(struct fcoe_ctlr_device *ctlr_dev)
+{
+ struct fcoe_ctlr *fip = fcoe_ctlr_device_priv(ctlr_dev);
+ struct net_device *netdev = fcoe_get_netdev(fip->lp);
+ struct fc_els_lesb *fc_lesb;
+
+ fc_lesb = (struct fc_els_lesb *)(&ctlr_dev->lesb);
+ __fcoe_get_lesb(fip->lp, fc_lesb, netdev);
+}
+EXPORT_SYMBOL_GPL(fcoe_ctlr_get_lesb);
+
+void fcoe_wwn_to_str(u64 wwn, char *buf, int len)
+{
+ u8 wwpn[8];
+
+ u64_to_wwn(wwn, wwpn);
+ snprintf(buf, len, "%02x%02x%02x%02x%02x%02x%02x%02x",
+ wwpn[0], wwpn[1], wwpn[2], wwpn[3],
+ wwpn[4], wwpn[5], wwpn[6], wwpn[7]);
+}
+EXPORT_SYMBOL_GPL(fcoe_wwn_to_str);
+
+/**
+ * fcoe_validate_vport_create() - Validate a vport before creating it
+ * @vport: NPIV port to be created
+ *
+ * This routine is meant to add validation for a vport before creating it
+ * via fcoe_vport_create().
+ * Current validations are:
+ * - WWPN supplied is unique for given lport
+ */
+int fcoe_validate_vport_create(struct fc_vport *vport)
+{
+ struct Scsi_Host *shost = vport_to_shost(vport);
+ struct fc_lport *n_port = shost_priv(shost);
+ struct fc_lport *vn_port;
+ int rc = 0;
+ char buf[32];
+
+ mutex_lock(&n_port->lp_mutex);
+
+ fcoe_wwn_to_str(vport->port_name, buf, sizeof(buf));
+ /* Check if the wwpn is not same as that of the lport */
+ if (!memcmp(&n_port->wwpn, &vport->port_name, sizeof(u64))) {
+ LIBFCOE_TRANSPORT_DBG("vport WWPN 0x%s is same as that of the "
+ "base port WWPN\n", buf);
+ rc = -EINVAL;
+ goto out;
+ }
+
+ /* Check if there is any existing vport with same wwpn */
+ list_for_each_entry(vn_port, &n_port->vports, list) {
+ if (!memcmp(&vn_port->wwpn, &vport->port_name, sizeof(u64))) {
+ LIBFCOE_TRANSPORT_DBG("vport with given WWPN 0x%s "
+ "already exists\n", buf);
+ rc = -EINVAL;
+ break;
+ }
+ }
+out:
+ mutex_unlock(&n_port->lp_mutex);
+ return rc;
+}
+EXPORT_SYMBOL_GPL(fcoe_validate_vport_create);
+
+/**
+ * fcoe_get_wwn() - Get the world wide name from LLD if it supports it
+ * @netdev: the associated net device
+ * @wwn: the output WWN
+ * @type: the type of WWN (WWPN or WWNN)
+ *
+ * Returns: 0 for success
+ */
+int fcoe_get_wwn(struct net_device *netdev, u64 *wwn, int type)
+{
+ const struct net_device_ops *ops = netdev->netdev_ops;
+
+ if (ops->ndo_fcoe_get_wwn)
+ return ops->ndo_fcoe_get_wwn(netdev, wwn, type);
+ return -EINVAL;
+}
+EXPORT_SYMBOL_GPL(fcoe_get_wwn);
+
+/**
+ * fcoe_fc_crc() - Calculates the CRC for a given frame
+ * @fp: The frame to be checksumed
+ *
+ * This uses crc32() routine to calculate the CRC for a frame
+ *
+ * Return: The 32 bit CRC value
+ */
+u32 fcoe_fc_crc(struct fc_frame *fp)
+{
+ struct sk_buff *skb = fp_skb(fp);
+ struct skb_frag_struct *frag;
+ unsigned char *data;
+ unsigned long off, len, clen;
+ u32 crc;
+ unsigned i;
+
+ crc = crc32(~0, skb->data, skb_headlen(skb));
+
+ for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
+ frag = &skb_shinfo(skb)->frags[i];
+ off = frag->page_offset;
+ len = skb_frag_size(frag);
+ while (len > 0) {
+ clen = min(len, PAGE_SIZE - (off & ~PAGE_MASK));
+ data = kmap_atomic(
+ skb_frag_page(frag) + (off >> PAGE_SHIFT));
+ crc = crc32(crc, data + (off & ~PAGE_MASK), clen);
+ kunmap_atomic(data);
+ off += clen;
+ len -= clen;
+ }
+ }
+ return crc;
+}
+EXPORT_SYMBOL_GPL(fcoe_fc_crc);
+
+/**
+ * fcoe_start_io() - Start FCoE I/O
+ * @skb: The packet to be transmitted
+ *
+ * This routine is called from the net device to start transmitting
+ * FCoE packets.
+ *
+ * Returns: 0 for success
+ */
+int fcoe_start_io(struct sk_buff *skb)
+{
+ struct sk_buff *nskb;
+ int rc;
+
+ nskb = skb_clone(skb, GFP_ATOMIC);
+ if (!nskb)
+ return -ENOMEM;
+ rc = dev_queue_xmit(nskb);
+ if (rc != 0)
+ return rc;
+ kfree_skb(skb);
+ return 0;
+}
+EXPORT_SYMBOL_GPL(fcoe_start_io);
+
+
+/**
+ * fcoe_clean_pending_queue() - Dequeue a skb and free it
+ * @lport: The local port to dequeue a skb on
+ */
+void fcoe_clean_pending_queue(struct fc_lport *lport)
+{
+ struct fcoe_port *port = lport_priv(lport);
+ struct sk_buff *skb;
+
+ spin_lock_bh(&port->fcoe_pending_queue.lock);
+ while ((skb = __skb_dequeue(&port->fcoe_pending_queue)) != NULL) {
+ spin_unlock_bh(&port->fcoe_pending_queue.lock);
+ kfree_skb(skb);
+ spin_lock_bh(&port->fcoe_pending_queue.lock);
+ }
+ spin_unlock_bh(&port->fcoe_pending_queue.lock);
+}
+EXPORT_SYMBOL_GPL(fcoe_clean_pending_queue);
+
+/**
+ * fcoe_check_wait_queue() - Attempt to clear the transmit backlog
+ * @lport: The local port whose backlog is to be cleared
+ *
+ * This empties the wait_queue, dequeues the head of the wait_queue queue
+ * and calls fcoe_start_io() for each packet. If all skb have been
+ * transmitted it returns the qlen. If an error occurs it restores
+ * wait_queue (to try again later) and returns -1.
+ *
+ * The wait_queue is used when the skb transmit fails. The failed skb
+ * will go in the wait_queue which will be emptied by the timer function or
+ * by the next skb transmit.
+ */
+void fcoe_check_wait_queue(struct fc_lport *lport, struct sk_buff *skb)
+{
+ struct fcoe_port *port = lport_priv(lport);
+ int rc;
+
+ spin_lock_bh(&port->fcoe_pending_queue.lock);
+
+ if (skb)
+ __skb_queue_tail(&port->fcoe_pending_queue, skb);
+
+ if (port->fcoe_pending_queue_active)
+ goto out;
+ port->fcoe_pending_queue_active = 1;
+
+ while (port->fcoe_pending_queue.qlen) {
+ /* keep qlen > 0 until fcoe_start_io succeeds */
+ port->fcoe_pending_queue.qlen++;
+ skb = __skb_dequeue(&port->fcoe_pending_queue);
+
+ spin_unlock_bh(&port->fcoe_pending_queue.lock);
+ rc = fcoe_start_io(skb);
+ spin_lock_bh(&port->fcoe_pending_queue.lock);
+
+ if (rc) {
+ __skb_queue_head(&port->fcoe_pending_queue, skb);
+ /* undo temporary increment above */
+ port->fcoe_pending_queue.qlen--;
+ break;
+ }
+ /* undo temporary increment above */
+ port->fcoe_pending_queue.qlen--;
+ }
+
+ if (port->fcoe_pending_queue.qlen < port->min_queue_depth)
+ lport->qfull = 0;
+ if (port->fcoe_pending_queue.qlen && !timer_pending(&port->timer))
+ mod_timer(&port->timer, jiffies + 2);
+ port->fcoe_pending_queue_active = 0;
+out:
+ if (port->fcoe_pending_queue.qlen > port->max_queue_depth)
+ lport->qfull = 1;
+ spin_unlock_bh(&port->fcoe_pending_queue.lock);
+}
+EXPORT_SYMBOL_GPL(fcoe_check_wait_queue);
+
+/**
+ * fcoe_queue_timer() - The fcoe queue timer
+ * @lport: The local port
+ *
+ * Calls fcoe_check_wait_queue on timeout
+ */
+void fcoe_queue_timer(ulong lport)
+{
+ fcoe_check_wait_queue((struct fc_lport *)lport, NULL);
+}
+EXPORT_SYMBOL_GPL(fcoe_queue_timer);
+
+/**
+ * fcoe_get_paged_crc_eof() - Allocate a page to be used for the trailer CRC
+ * @skb: The packet to be transmitted
+ * @tlen: The total length of the trailer
+ * @fps: The fcoe context
+ *
+ * This routine allocates a page for frame trailers. The page is re-used if
+ * there is enough room left on it for the current trailer. If there isn't
+ * enough buffer left a new page is allocated for the trailer. Reference to
+ * the page from this function as well as the skbs using the page fragments
+ * ensure that the page is freed at the appropriate time.
+ *
+ * Returns: 0 for success
+ */
+int fcoe_get_paged_crc_eof(struct sk_buff *skb, int tlen,
+ struct fcoe_percpu_s *fps)
+{
+ struct page *page;
+
+ page = fps->crc_eof_page;
+ if (!page) {
+ page = alloc_page(GFP_ATOMIC);
+ if (!page)
+ return -ENOMEM;
+
+ fps->crc_eof_page = page;
+ fps->crc_eof_offset = 0;
+ }
+
+ get_page(page);
+ skb_fill_page_desc(skb, skb_shinfo(skb)->nr_frags, page,
+ fps->crc_eof_offset, tlen);
+ skb->len += tlen;
+ skb->data_len += tlen;
+ skb->truesize += tlen;
+ fps->crc_eof_offset += sizeof(struct fcoe_crc_eof);
+
+ if (fps->crc_eof_offset >= PAGE_SIZE) {
+ fps->crc_eof_page = NULL;
+ fps->crc_eof_offset = 0;
+ put_page(page);
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(fcoe_get_paged_crc_eof);
+
+/**
+ * fcoe_transport_lookup - find an fcoe transport that matches a netdev
+ * @netdev: The netdev to look for from all attached transports
+ *
+ * Returns : ptr to the fcoe transport that supports this netdev or NULL
+ * if not found.
+ *
+ * The ft_mutex should be held when this is called
+ */
+static struct fcoe_transport *fcoe_transport_lookup(struct net_device *netdev)
+{
+ struct fcoe_transport *ft = NULL;
+
+ list_for_each_entry(ft, &fcoe_transports, list)
+ if (ft->match && ft->match(netdev))
+ return ft;
+ return NULL;
+}
+
+/**
+ * fcoe_transport_attach - Attaches an FCoE transport
+ * @ft: The fcoe transport to be attached
+ *
+ * Returns : 0 for success
+ */
+int fcoe_transport_attach(struct fcoe_transport *ft)
+{
+ int rc = 0;
+
+ mutex_lock(&ft_mutex);
+ if (ft->attached) {
+ LIBFCOE_TRANSPORT_DBG("transport %s already attached\n",
+ ft->name);
+ rc = -EEXIST;
+ goto out_attach;
+ }
+
+ /* Add default transport to the tail */
+ if (strcmp(ft->name, FCOE_TRANSPORT_DEFAULT))
+ list_add(&ft->list, &fcoe_transports);
+ else
+ list_add_tail(&ft->list, &fcoe_transports);
+
+ ft->attached = true;
+ LIBFCOE_TRANSPORT_DBG("attaching transport %s\n", ft->name);
+
+out_attach:
+ mutex_unlock(&ft_mutex);
+ return rc;
+}
+EXPORT_SYMBOL(fcoe_transport_attach);
+
+/**
+ * fcoe_transport_detach - Detaches an FCoE transport
+ * @ft: The fcoe transport to be attached
+ *
+ * Returns : 0 for success
+ */
+int fcoe_transport_detach(struct fcoe_transport *ft)
+{
+ int rc = 0;
+ struct fcoe_netdev_mapping *nm = NULL, *tmp;
+
+ mutex_lock(&ft_mutex);
+ if (!ft->attached) {
+ LIBFCOE_TRANSPORT_DBG("transport %s already detached\n",
+ ft->name);
+ rc = -ENODEV;
+ goto out_attach;
+ }
+
+ /* remove netdev mapping for this transport as it is going away */
+ mutex_lock(&fn_mutex);
+ list_for_each_entry_safe(nm, tmp, &fcoe_netdevs, list) {
+ if (nm->ft == ft) {
+ LIBFCOE_TRANSPORT_DBG("transport %s going away, "
+ "remove its netdev mapping for %s\n",
+ ft->name, nm->netdev->name);
+ list_del(&nm->list);
+ kfree(nm);
+ }
+ }
+ mutex_unlock(&fn_mutex);
+
+ list_del(&ft->list);
+ ft->attached = false;
+ LIBFCOE_TRANSPORT_DBG("detaching transport %s\n", ft->name);
+
+out_attach:
+ mutex_unlock(&ft_mutex);
+ return rc;
+
+}
+EXPORT_SYMBOL(fcoe_transport_detach);
+
+static int fcoe_transport_show(char *buffer, const struct kernel_param *kp)
+{
+ int i, j;
+ struct fcoe_transport *ft = NULL;
+
+ i = j = sprintf(buffer, "Attached FCoE transports:");
+ mutex_lock(&ft_mutex);
+ list_for_each_entry(ft, &fcoe_transports, list) {
+ if (i >= PAGE_SIZE - IFNAMSIZ)
+ break;
+ i += snprintf(&buffer[i], IFNAMSIZ, "%s ", ft->name);
+ }
+ mutex_unlock(&ft_mutex);
+ if (i == j)
+ i += snprintf(&buffer[i], IFNAMSIZ, "none");
+ return i;
+}
+
+static int __init fcoe_transport_init(void)
+{
+ register_netdevice_notifier(&libfcoe_notifier);
+ return 0;
+}
+
+static int fcoe_transport_exit(void)
+{
+ struct fcoe_transport *ft;
+
+ unregister_netdevice_notifier(&libfcoe_notifier);
+ mutex_lock(&ft_mutex);
+ list_for_each_entry(ft, &fcoe_transports, list)
+ printk(KERN_ERR "FCoE transport %s is still attached!\n",
+ ft->name);
+ mutex_unlock(&ft_mutex);
+ return 0;
+}
+
+
+static int fcoe_add_netdev_mapping(struct net_device *netdev,
+ struct fcoe_transport *ft)
+{
+ struct fcoe_netdev_mapping *nm;
+
+ nm = kmalloc(sizeof(*nm), GFP_KERNEL);
+ if (!nm) {
+ printk(KERN_ERR "Unable to allocate netdev_mapping");
+ return -ENOMEM;
+ }
+
+ nm->netdev = netdev;
+ nm->ft = ft;
+
+ mutex_lock(&fn_mutex);
+ list_add(&nm->list, &fcoe_netdevs);
+ mutex_unlock(&fn_mutex);
+ return 0;
+}
+
+
+static void fcoe_del_netdev_mapping(struct net_device *netdev)
+{
+ struct fcoe_netdev_mapping *nm = NULL, *tmp;
+
+ mutex_lock(&fn_mutex);
+ list_for_each_entry_safe(nm, tmp, &fcoe_netdevs, list) {
+ if (nm->netdev == netdev) {
+ list_del(&nm->list);
+ kfree(nm);
+ mutex_unlock(&fn_mutex);
+ return;
+ }
+ }
+ mutex_unlock(&fn_mutex);
+}
+
+
+/**
+ * fcoe_netdev_map_lookup - find the fcoe transport that matches the netdev on which
+ * it was created
+ *
+ * Returns : ptr to the fcoe transport that supports this netdev or NULL
+ * if not found.
+ *
+ * The ft_mutex should be held when this is called
+ */
+static struct fcoe_transport *fcoe_netdev_map_lookup(struct net_device *netdev)
+{
+ struct fcoe_transport *ft = NULL;
+ struct fcoe_netdev_mapping *nm;
+
+ mutex_lock(&fn_mutex);
+ list_for_each_entry(nm, &fcoe_netdevs, list) {
+ if (netdev == nm->netdev) {
+ ft = nm->ft;
+ mutex_unlock(&fn_mutex);
+ return ft;
+ }
+ }
+
+ mutex_unlock(&fn_mutex);
+ return NULL;
+}
+
+/**
+ * fcoe_if_to_netdev() - Parse a name buffer to get a net device
+ * @buffer: The name of the net device
+ *
+ * Returns: NULL or a ptr to net_device
+ */
+static struct net_device *fcoe_if_to_netdev(const char *buffer)
+{
+ char *cp;
+ char ifname[IFNAMSIZ + 2];
+
+ if (buffer) {
+ strlcpy(ifname, buffer, IFNAMSIZ);
+ cp = ifname + strlen(ifname);
+ while (--cp >= ifname && *cp == '\n')
+ *cp = '\0';
+ return dev_get_by_name(&init_net, ifname);
+ }
+ return NULL;
+}
+
+/**
+ * libfcoe_device_notification() - Handler for net device events
+ * @notifier: The context of the notification
+ * @event: The type of event
+ * @ptr: The net device that the event was on
+ *
+ * This function is called by the Ethernet driver in case of link change event.
+ *
+ * Returns: 0 for success
+ */
+static int libfcoe_device_notification(struct notifier_block *notifier,
+ ulong event, void *ptr)
+{
+ struct net_device *netdev = netdev_notifier_info_to_dev(ptr);
+
+ switch (event) {
+ case NETDEV_UNREGISTER:
+ LIBFCOE_TRANSPORT_DBG("NETDEV_UNREGISTER %s\n",
+ netdev->name);
+ fcoe_del_netdev_mapping(netdev);
+ break;
+ }
+ return NOTIFY_OK;
+}
+
+ssize_t fcoe_ctlr_create_store(struct bus_type *bus,
+ const char *buf, size_t count)
+{
+ struct net_device *netdev = NULL;
+ struct fcoe_transport *ft = NULL;
+ int rc = 0;
+ int err;
+
+ mutex_lock(&ft_mutex);
+
+ netdev = fcoe_if_to_netdev(buf);
+ if (!netdev) {
+ LIBFCOE_TRANSPORT_DBG("Invalid device %s.\n", buf);
+ rc = -ENODEV;
+ goto out_nodev;
+ }
+
+ ft = fcoe_netdev_map_lookup(netdev);
+ if (ft) {
+ LIBFCOE_TRANSPORT_DBG("transport %s already has existing "
+ "FCoE instance on %s.\n",
+ ft->name, netdev->name);
+ rc = -EEXIST;
+ goto out_putdev;
+ }
+
+ ft = fcoe_transport_lookup(netdev);
+ if (!ft) {
+ LIBFCOE_TRANSPORT_DBG("no FCoE transport found for %s.\n",
+ netdev->name);
+ rc = -ENODEV;
+ goto out_putdev;
+ }
+
+ /* pass to transport create */
+ err = ft->alloc ? ft->alloc(netdev) : -ENODEV;
+ if (err) {
+ fcoe_del_netdev_mapping(netdev);
+ rc = -ENOMEM;
+ goto out_putdev;
+ }
+
+ err = fcoe_add_netdev_mapping(netdev, ft);
+ if (err) {
+ LIBFCOE_TRANSPORT_DBG("failed to add new netdev mapping "
+ "for FCoE transport %s for %s.\n",
+ ft->name, netdev->name);
+ rc = -ENODEV;
+ goto out_putdev;
+ }
+
+ LIBFCOE_TRANSPORT_DBG("transport %s succeeded to create fcoe on %s.\n",
+ ft->name, netdev->name);
+
+out_putdev:
+ dev_put(netdev);
+out_nodev:
+ mutex_unlock(&ft_mutex);
+ if (rc)
+ return rc;
+ return count;
+}
+
+ssize_t fcoe_ctlr_destroy_store(struct bus_type *bus,
+ const char *buf, size_t count)
+{
+ int rc = -ENODEV;
+ struct net_device *netdev = NULL;
+ struct fcoe_transport *ft = NULL;
+
+ mutex_lock(&ft_mutex);
+
+ netdev = fcoe_if_to_netdev(buf);
+ if (!netdev) {
+ LIBFCOE_TRANSPORT_DBG("invalid device %s.\n", buf);
+ goto out_nodev;
+ }
+
+ ft = fcoe_netdev_map_lookup(netdev);
+ if (!ft) {
+ LIBFCOE_TRANSPORT_DBG("no FCoE transport found for %s.\n",
+ netdev->name);
+ goto out_putdev;
+ }
+
+ /* pass to transport destroy */
+ rc = ft->destroy(netdev);
+ if (rc)
+ goto out_putdev;
+
+ fcoe_del_netdev_mapping(netdev);
+ LIBFCOE_TRANSPORT_DBG("transport %s %s to destroy fcoe on %s.\n",
+ ft->name, (rc) ? "failed" : "succeeded",
+ netdev->name);
+ rc = count; /* required for successful return */
+out_putdev:
+ dev_put(netdev);
+out_nodev:
+ mutex_unlock(&ft_mutex);
+ return rc;
+}
+EXPORT_SYMBOL(fcoe_ctlr_destroy_store);
+
+/**
+ * fcoe_transport_create() - Create a fcoe interface
+ * @buffer: The name of the Ethernet interface to create on
+ * @kp: The associated kernel param
+ *
+ * Called from sysfs. This holds the ft_mutex while calling the
+ * registered fcoe transport's create function.
+ *
+ * Returns: 0 for success
+ */
+static int fcoe_transport_create(const char *buffer, struct kernel_param *kp)
+{
+ int rc = -ENODEV;
+ struct net_device *netdev = NULL;
+ struct fcoe_transport *ft = NULL;
+ enum fip_state fip_mode = (enum fip_state)(long)kp->arg;
+
+ mutex_lock(&ft_mutex);
+
+ netdev = fcoe_if_to_netdev(buffer);
+ if (!netdev) {
+ LIBFCOE_TRANSPORT_DBG("Invalid device %s.\n", buffer);
+ goto out_nodev;
+ }
+
+ ft = fcoe_netdev_map_lookup(netdev);
+ if (ft) {
+ LIBFCOE_TRANSPORT_DBG("transport %s already has existing "
+ "FCoE instance on %s.\n",
+ ft->name, netdev->name);
+ rc = -EEXIST;
+ goto out_putdev;
+ }
+
+ ft = fcoe_transport_lookup(netdev);
+ if (!ft) {
+ LIBFCOE_TRANSPORT_DBG("no FCoE transport found for %s.\n",
+ netdev->name);
+ goto out_putdev;
+ }
+
+ rc = fcoe_add_netdev_mapping(netdev, ft);
+ if (rc) {
+ LIBFCOE_TRANSPORT_DBG("failed to add new netdev mapping "
+ "for FCoE transport %s for %s.\n",
+ ft->name, netdev->name);
+ goto out_putdev;
+ }
+
+ /* pass to transport create */
+ rc = ft->create ? ft->create(netdev, fip_mode) : -ENODEV;
+ if (rc)
+ fcoe_del_netdev_mapping(netdev);
+
+ LIBFCOE_TRANSPORT_DBG("transport %s %s to create fcoe on %s.\n",
+ ft->name, (rc) ? "failed" : "succeeded",
+ netdev->name);
+
+out_putdev:
+ dev_put(netdev);
+out_nodev:
+ mutex_unlock(&ft_mutex);
+ return rc;
+}
+
+/**
+ * fcoe_transport_destroy() - Destroy a FCoE interface
+ * @buffer: The name of the Ethernet interface to be destroyed
+ * @kp: The associated kernel parameter
+ *
+ * Called from sysfs. This holds the ft_mutex while calling the
+ * registered fcoe transport's destroy function.
+ *
+ * Returns: 0 for success
+ */
+static int fcoe_transport_destroy(const char *buffer, struct kernel_param *kp)
+{
+ int rc = -ENODEV;
+ struct net_device *netdev = NULL;
+ struct fcoe_transport *ft = NULL;
+
+ mutex_lock(&ft_mutex);
+
+ netdev = fcoe_if_to_netdev(buffer);
+ if (!netdev) {
+ LIBFCOE_TRANSPORT_DBG("invalid device %s.\n", buffer);
+ goto out_nodev;
+ }
+
+ ft = fcoe_netdev_map_lookup(netdev);
+ if (!ft) {
+ LIBFCOE_TRANSPORT_DBG("no FCoE transport found for %s.\n",
+ netdev->name);
+ goto out_putdev;
+ }
+
+ /* pass to transport destroy */
+ rc = ft->destroy ? ft->destroy(netdev) : -ENODEV;
+ fcoe_del_netdev_mapping(netdev);
+ LIBFCOE_TRANSPORT_DBG("transport %s %s to destroy fcoe on %s.\n",
+ ft->name, (rc) ? "failed" : "succeeded",
+ netdev->name);
+
+out_putdev:
+ dev_put(netdev);
+out_nodev:
+ mutex_unlock(&ft_mutex);
+ return rc;
+}
+
+/**
+ * fcoe_transport_disable() - Disables a FCoE interface
+ * @buffer: The name of the Ethernet interface to be disabled
+ * @kp: The associated kernel parameter
+ *
+ * Called from sysfs.
+ *
+ * Returns: 0 for success
+ */
+static int fcoe_transport_disable(const char *buffer, struct kernel_param *kp)
+{
+ int rc = -ENODEV;
+ struct net_device *netdev = NULL;
+ struct fcoe_transport *ft = NULL;
+
+ mutex_lock(&ft_mutex);
+
+ netdev = fcoe_if_to_netdev(buffer);
+ if (!netdev)
+ goto out_nodev;
+
+ ft = fcoe_netdev_map_lookup(netdev);
+ if (!ft)
+ goto out_putdev;
+
+ rc = ft->disable ? ft->disable(netdev) : -ENODEV;
+
+out_putdev:
+ dev_put(netdev);
+out_nodev:
+ mutex_unlock(&ft_mutex);
+ return rc;
+}
+
+/**
+ * fcoe_transport_enable() - Enables a FCoE interface
+ * @buffer: The name of the Ethernet interface to be enabled
+ * @kp: The associated kernel parameter
+ *
+ * Called from sysfs.
+ *
+ * Returns: 0 for success
+ */
+static int fcoe_transport_enable(const char *buffer, struct kernel_param *kp)
+{
+ int rc = -ENODEV;
+ struct net_device *netdev = NULL;
+ struct fcoe_transport *ft = NULL;
+
+ mutex_lock(&ft_mutex);
+
+ netdev = fcoe_if_to_netdev(buffer);
+ if (!netdev)
+ goto out_nodev;
+
+ ft = fcoe_netdev_map_lookup(netdev);
+ if (!ft)
+ goto out_putdev;
+
+ rc = ft->enable ? ft->enable(netdev) : -ENODEV;
+
+out_putdev:
+ dev_put(netdev);
+out_nodev:
+ mutex_unlock(&ft_mutex);
+ return rc;
+}
+
+/**
+ * libfcoe_init() - Initialization routine for libfcoe.ko
+ */
+static int __init libfcoe_init(void)
+{
+ int rc = 0;
+
+ rc = fcoe_transport_init();
+ if (rc)
+ return rc;
+
+ rc = fcoe_sysfs_setup();
+ if (rc)
+ fcoe_transport_exit();
+
+ return rc;
+}
+module_init(libfcoe_init);
+
+/**
+ * libfcoe_exit() - Tear down libfcoe.ko
+ */
+static void __exit libfcoe_exit(void)
+{
+ fcoe_sysfs_teardown();
+ fcoe_transport_exit();
+}
+module_exit(libfcoe_exit);
diff --git a/drivers/scsi/fcoe/libfcoe.h b/drivers/scsi/fcoe/libfcoe.h
new file mode 100644
index 000000000..d3bb16d11
--- /dev/null
+++ b/drivers/scsi/fcoe/libfcoe.h
@@ -0,0 +1,35 @@
+#ifndef _FCOE_LIBFCOE_H_
+#define _FCOE_LIBFCOE_H_
+
+extern unsigned int libfcoe_debug_logging;
+#define LIBFCOE_LOGGING 0x01 /* General logging, not categorized */
+#define LIBFCOE_FIP_LOGGING 0x02 /* FIP logging */
+#define LIBFCOE_TRANSPORT_LOGGING 0x04 /* FCoE transport logging */
+#define LIBFCOE_SYSFS_LOGGING 0x08 /* fcoe_sysfs logging */
+
+#define LIBFCOE_CHECK_LOGGING(LEVEL, CMD) \
+do { \
+ if (unlikely(libfcoe_debug_logging & LEVEL)) \
+ do { \
+ CMD; \
+ } while (0); \
+} while (0)
+
+#define LIBFCOE_DBG(fmt, args...) \
+ LIBFCOE_CHECK_LOGGING(LIBFCOE_LOGGING, \
+ pr_info("libfcoe: " fmt, ##args);)
+
+#define LIBFCOE_FIP_DBG(fip, fmt, args...) \
+ LIBFCOE_CHECK_LOGGING(LIBFCOE_FIP_LOGGING, \
+ pr_info("host%d: fip: " fmt, \
+ (fip)->lp->host->host_no, ##args);)
+
+#define LIBFCOE_TRANSPORT_DBG(fmt, args...) \
+ LIBFCOE_CHECK_LOGGING(LIBFCOE_TRANSPORT_LOGGING, \
+ pr_info("%s: " fmt, __func__, ##args);)
+
+#define LIBFCOE_SYSFS_DBG(cdev, fmt, args...) \
+ LIBFCOE_CHECK_LOGGING(LIBFCOE_SYSFS_LOGGING, \
+ pr_info("ctlr_%d: " fmt, cdev->id, ##args);)
+
+#endif /* _FCOE_LIBFCOE_H_ */
diff --git a/drivers/scsi/fdomain.c b/drivers/scsi/fdomain.c
new file mode 100644
index 000000000..fff682976
--- /dev/null
+++ b/drivers/scsi/fdomain.c
@@ -0,0 +1,1784 @@
+/* fdomain.c -- Future Domain TMC-16x0 SCSI driver
+ * Created: Sun May 3 18:53:19 1992 by faith@cs.unc.edu
+ * Revised: Mon Dec 28 21:59:02 1998 by faith@acm.org
+ * Author: Rickard E. Faith, faith@cs.unc.edu
+ * Copyright 1992-1996, 1998 Rickard E. Faith (faith@acm.org)
+ * Shared IRQ supported added 7/7/2001 Alan Cox <alan@lxorguk.ukuu.org.uk>
+
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2, or (at your option) any
+ * later version.
+
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 675 Mass Ave, Cambridge, MA 02139, USA.
+
+ **************************************************************************
+
+ SUMMARY:
+
+ Future Domain BIOS versions supported for autodetect:
+ 2.0, 3.0, 3.2, 3.4 (1.0), 3.5 (2.0), 3.6, 3.61
+ Chips are supported:
+ TMC-1800, TMC-18C50, TMC-18C30, TMC-36C70
+ Boards supported:
+ Future Domain TMC-1650, TMC-1660, TMC-1670, TMC-1680, TMC-1610M/MER/MEX
+ Future Domain TMC-3260 (PCI)
+ Quantum ISA-200S, ISA-250MG
+ Adaptec AHA-2920A (PCI) [BUT *NOT* AHA-2920C -- use aic7xxx instead]
+ IBM ?
+ LILO/INSMOD command-line options:
+ fdomain=<PORT_BASE>,<IRQ>[,<ADAPTER_ID>]
+
+
+
+ NOTE:
+
+ The Adaptec AHA-2920C has an Adaptec AIC-7850 chip on it.
+ Use the aic7xxx driver for this board.
+
+ The Adaptec AHA-2920A has a Future Domain chip on it, so this is the right
+ driver for that card. Unfortunately, the boxes will probably just say
+ "2920", so you'll have to look on the card for a Future Domain logo, or a
+ letter after the 2920.
+
+
+
+ THANKS:
+
+ Thanks to Adaptec for providing PCI boards for testing. This finally
+ enabled me to test the PCI detection and correct it for PCI boards that do
+ not have a BIOS at a standard ISA location. For PCI boards, LILO/INSMOD
+ command-line options should no longer be needed. --RF 18Nov98
+
+
+
+ DESCRIPTION:
+
+ This is the Linux low-level SCSI driver for Future Domain TMC-1660/1680
+ TMC-1650/1670, and TMC-3260 SCSI host adapters. The 1650 and 1670 have a
+ 25-pin external connector, whereas the 1660 and 1680 have a SCSI-2 50-pin
+ high-density external connector. The 1670 and 1680 have floppy disk
+ controllers built in. The TMC-3260 is a PCI bus card.
+
+ Future Domain's older boards are based on the TMC-1800 chip, and this
+ driver was originally written for a TMC-1680 board with the TMC-1800 chip.
+ More recently, boards are being produced with the TMC-18C50 and TMC-18C30
+ chips. The latest and greatest board may not work with this driver. If
+ you have to patch this driver so that it will recognize your board's BIOS
+ signature, then the driver may fail to function after the board is
+ detected.
+
+ Please note that the drive ordering that Future Domain implemented in BIOS
+ versions 3.4 and 3.5 is the opposite of the order (currently) used by the
+ rest of the SCSI industry. If you have BIOS version 3.4 or 3.5, and have
+ more than one drive, then the drive ordering will be the reverse of that
+ which you see under DOS. For example, under DOS SCSI ID 0 will be D: and
+ SCSI ID 1 will be C: (the boot device). Under Linux, SCSI ID 0 will be
+ /dev/sda and SCSI ID 1 will be /dev/sdb. The Linux ordering is consistent
+ with that provided by all the other SCSI drivers for Linux. If you want
+ this changed, you will probably have to patch the higher level SCSI code.
+ If you do so, please send me patches that are protected by #ifdefs.
+
+ If you have a TMC-8xx or TMC-9xx board, then this is not the driver for
+ your board. Please refer to the Seagate driver for more information and
+ possible support.
+
+
+
+ HISTORY:
+
+ Linux Driver Driver
+ Version Version Date Support/Notes
+
+ 0.0 3 May 1992 V2.0 BIOS; 1800 chip
+ 0.97 1.9 28 Jul 1992
+ 0.98.6 3.1 27 Nov 1992
+ 0.99 3.2 9 Dec 1992
+
+ 0.99.3 3.3 10 Jan 1993 V3.0 BIOS
+ 0.99.5 3.5 18 Feb 1993
+ 0.99.10 3.6 15 May 1993 V3.2 BIOS; 18C50 chip
+ 0.99.11 3.17 3 Jul 1993 (now under RCS)
+ 0.99.12 3.18 13 Aug 1993
+ 0.99.14 5.6 31 Oct 1993 (reselection code removed)
+
+ 0.99.15 5.9 23 Jan 1994 V3.4 BIOS (preliminary)
+ 1.0.8/1.1.1 5.15 1 Apr 1994 V3.4 BIOS; 18C30 chip (preliminary)
+ 1.0.9/1.1.3 5.16 7 Apr 1994 V3.4 BIOS; 18C30 chip
+ 1.1.38 5.18 30 Jul 1994 36C70 chip (PCI version of 18C30)
+ 1.1.62 5.20 2 Nov 1994 V3.5 BIOS
+ 1.1.73 5.22 7 Dec 1994 Quantum ISA-200S board; V2.0 BIOS
+
+ 1.1.82 5.26 14 Jan 1995 V3.5 BIOS; TMC-1610M/MER/MEX board
+ 1.2.10 5.28 5 Jun 1995 Quantum ISA-250MG board; V2.0, V2.01 BIOS
+ 1.3.4 5.31 23 Jun 1995 PCI BIOS-32 detection (preliminary)
+ 1.3.7 5.33 4 Jul 1995 PCI BIOS-32 detection
+ 1.3.28 5.36 17 Sep 1995 V3.61 BIOS; LILO command-line support
+ 1.3.34 5.39 12 Oct 1995 V3.60 BIOS; /proc
+ 1.3.72 5.39 8 Feb 1996 Adaptec AHA-2920 board
+ 1.3.85 5.41 4 Apr 1996
+ 2.0.12 5.44 8 Aug 1996 Use ID 7 for all PCI cards
+ 2.1.1 5.45 2 Oct 1996 Update ROM accesses for 2.1.x
+ 2.1.97 5.46 23 Apr 1998 Rewritten PCI detection routines [mj]
+ 2.1.11x 5.47 9 Aug 1998 Touched for 8 SCSI disk majors support
+ 5.48 18 Nov 1998 BIOS no longer needed for PCI detection
+ 2.2.0 5.50 28 Dec 1998 Support insmod parameters
+
+
+ REFERENCES USED:
+
+ "TMC-1800 SCSI Chip Specification (FDC-1800T)", Future Domain Corporation,
+ 1990.
+
+ "Technical Reference Manual: 18C50 SCSI Host Adapter Chip", Future Domain
+ Corporation, January 1992.
+
+ "LXT SCSI Products: Specifications and OEM Technical Manual (Revision
+ B/September 1991)", Maxtor Corporation, 1991.
+
+ "7213S product Manual (Revision P3)", Maxtor Corporation, 1992.
+
+ "Draft Proposed American National Standard: Small Computer System
+ Interface - 2 (SCSI-2)", Global Engineering Documents. (X3T9.2/86-109,
+ revision 10h, October 17, 1991)
+
+ Private communications, Drew Eckhardt (drew@cs.colorado.edu) and Eric
+ Youngdale (ericy@cais.com), 1992.
+
+ Private communication, Tuong Le (Future Domain Engineering department),
+ 1994. (Disk geometry computations for Future Domain BIOS version 3.4, and
+ TMC-18C30 detection.)
+
+ Hogan, Thom. The Programmer's PC Sourcebook. Microsoft Press, 1988. Page
+ 60 (2.39: Disk Partition Table Layout).
+
+ "18C30 Technical Reference Manual", Future Domain Corporation, 1993, page
+ 6-1.
+
+
+
+ NOTES ON REFERENCES:
+
+ The Maxtor manuals were free. Maxtor telephone technical support is
+ great!
+
+ The Future Domain manuals were $25 and $35. They document the chip, not
+ the TMC-16x0 boards, so some information I had to guess at. In 1992,
+ Future Domain sold DOS BIOS source for $250 and the UN*X driver source was
+ $750, but these required a non-disclosure agreement, so even if I could
+ have afforded them, they would *not* have been useful for writing this
+ publicly distributable driver. Future Domain technical support has
+ provided some information on the phone and have sent a few useful FAXs.
+ They have been much more helpful since they started to recognize that the
+ word "Linux" refers to an operating system :-).
+
+
+
+ ALPHA TESTERS:
+
+ There are many other alpha testers that come and go as the driver
+ develops. The people listed here were most helpful in times of greatest
+ need (mostly early on -- I've probably left out a few worthy people in
+ more recent times):
+
+ Todd Carrico (todd@wutc.wustl.edu), Dan Poirier (poirier@cs.unc.edu ), Ken
+ Corey (kenc@sol.acs.unt.edu), C. de Bruin (bruin@bruin@sterbbs.nl), Sakari
+ Aaltonen (sakaria@vipunen.hit.fi), John Rice (rice@xanth.cs.odu.edu), Brad
+ Yearwood (brad@optilink.com), and Ray Toy (toy@soho.crd.ge.com).
+
+ Special thanks to Tien-Wan Yang (twyang@cs.uh.edu), who graciously lent me
+ his 18C50-based card for debugging. He is the sole reason that this
+ driver works with the 18C50 chip.
+
+ Thanks to Dave Newman (dnewman@crl.com) for providing initial patches for
+ the version 3.4 BIOS.
+
+ Thanks to James T. McKinley (mckinley@msupa.pa.msu.edu) for providing
+ patches that support the TMC-3260, a PCI bus card with the 36C70 chip.
+ The 36C70 chip appears to be "completely compatible" with the 18C30 chip.
+
+ Thanks to Eric Kasten (tigger@petroglyph.cl.msu.edu) for providing the
+ patch for the version 3.5 BIOS.
+
+ Thanks for Stephen Henson (shenson@nyx10.cs.du.edu) for providing the
+ patch for the Quantum ISA-200S SCSI adapter.
+
+ Thanks to Adam Bowen for the signature to the 1610M/MER/MEX scsi cards, to
+ Martin Andrews (andrewm@ccfadm.eeg.ccf.org) for the signature to some
+ random TMC-1680 repackaged by IBM; and to Mintak Ng (mintak@panix.com) for
+ the version 3.61 BIOS signature.
+
+ Thanks for Mark Singer (elf@netcom.com) and Richard Simpson
+ (rsimpson@ewrcsdra.demon.co.uk) for more Quantum signatures and detective
+ work on the Quantum RAM layout.
+
+ Special thanks to James T. McKinley (mckinley@msupa.pa.msu.edu) for
+ providing patches for proper PCI BIOS32-mediated detection of the TMC-3260
+ card (a PCI bus card with the 36C70 chip). Please send James PCI-related
+ bug reports.
+
+ Thanks to Tom Cavin (tec@usa1.com) for preliminary command-line option
+ patches.
+
+ New PCI detection code written by Martin Mares <mj@atrey.karlin.mff.cuni.cz>
+
+ Insmod parameter code based on patches from Daniel Graham
+ <graham@balance.uoregon.edu>.
+
+ All of the alpha testers deserve much thanks.
+
+
+
+ NOTES ON USER DEFINABLE OPTIONS:
+
+ DEBUG: This turns on the printing of various debug information.
+
+ ENABLE_PARITY: This turns on SCSI parity checking. With the current
+ driver, all attached devices must support SCSI parity. If none of your
+ devices support parity, then you can probably get the driver to work by
+ turning this option off. I have no way of testing this, however, and it
+ would appear that no one ever uses this option.
+
+ FIFO_COUNT: The host adapter has an 8K cache (host adapters based on the
+ 18C30 chip have a 2k cache). When this many 512 byte blocks are filled by
+ the SCSI device, an interrupt will be raised. Therefore, this could be as
+ low as 0, or as high as 16. Note, however, that values which are too high
+ or too low seem to prevent any interrupts from occurring, and thereby lock
+ up the machine. I have found that 2 is a good number, but throughput may
+ be increased by changing this value to values which are close to 2.
+ Please let me know if you try any different values.
+
+ RESELECTION: This is no longer an option, since I gave up trying to
+ implement it in version 4.x of this driver. It did not improve
+ performance at all and made the driver unstable (because I never found one
+ of the two race conditions which were introduced by the multiple
+ outstanding command code). The instability seems a very high price to pay
+ just so that you don't have to wait for the tape to rewind. If you want
+ this feature implemented, send me patches. I'll be happy to send a copy
+ of my (broken) driver to anyone who would like to see a copy.
+
+ **************************************************************************/
+
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/blkdev.h>
+#include <linux/spinlock.h>
+#include <linux/errno.h>
+#include <linux/string.h>
+#include <linux/ioport.h>
+#include <linux/proc_fs.h>
+#include <linux/pci.h>
+#include <linux/stat.h>
+#include <linux/delay.h>
+#include <linux/io.h>
+#include <linux/slab.h>
+#include <scsi/scsicam.h>
+
+
+#include <scsi/scsi.h>
+#include <scsi/scsi_cmnd.h>
+#include <scsi/scsi_device.h>
+#include <scsi/scsi_host.h>
+#include <scsi/scsi_ioctl.h>
+#include "fdomain.h"
+
+#ifndef PCMCIA
+MODULE_AUTHOR("Rickard E. Faith");
+MODULE_DESCRIPTION("Future domain SCSI driver");
+MODULE_LICENSE("GPL");
+#endif
+
+
+#define VERSION "$Revision: 5.51 $"
+
+/* START OF USER DEFINABLE OPTIONS */
+
+#define DEBUG 0 /* Enable debugging output */
+#define ENABLE_PARITY 1 /* Enable SCSI Parity */
+#define FIFO_COUNT 2 /* Number of 512 byte blocks before INTR */
+
+/* END OF USER DEFINABLE OPTIONS */
+
+#if DEBUG
+#define EVERY_ACCESS 0 /* Write a line on every scsi access */
+#define ERRORS_ONLY 1 /* Only write a line if there is an error */
+#define DEBUG_DETECT 0 /* Debug fdomain_16x0_detect() */
+#define DEBUG_MESSAGES 1 /* Debug MESSAGE IN phase */
+#define DEBUG_ABORT 1 /* Debug abort() routine */
+#define DEBUG_RESET 1 /* Debug reset() routine */
+#define DEBUG_RACE 1 /* Debug interrupt-driven race condition */
+#else
+#define EVERY_ACCESS 0 /* LEAVE THESE ALONE--CHANGE THE ONES ABOVE */
+#define ERRORS_ONLY 0
+#define DEBUG_DETECT 0
+#define DEBUG_MESSAGES 0
+#define DEBUG_ABORT 0
+#define DEBUG_RESET 0
+#define DEBUG_RACE 0
+#endif
+
+/* Errors are reported on the line, so we don't need to report them again */
+#if EVERY_ACCESS
+#undef ERRORS_ONLY
+#define ERRORS_ONLY 0
+#endif
+
+#if ENABLE_PARITY
+#define PARITY_MASK 0x08
+#else
+#define PARITY_MASK 0x00
+#endif
+
+enum chip_type {
+ unknown = 0x00,
+ tmc1800 = 0x01,
+ tmc18c50 = 0x02,
+ tmc18c30 = 0x03,
+};
+
+enum {
+ in_arbitration = 0x02,
+ in_selection = 0x04,
+ in_other = 0x08,
+ disconnect = 0x10,
+ aborted = 0x20,
+ sent_ident = 0x40,
+};
+
+enum in_port_type {
+ Read_SCSI_Data = 0,
+ SCSI_Status = 1,
+ TMC_Status = 2,
+ FIFO_Status = 3, /* tmc18c50/tmc18c30 only */
+ Interrupt_Cond = 4, /* tmc18c50/tmc18c30 only */
+ LSB_ID_Code = 5,
+ MSB_ID_Code = 6,
+ Read_Loopback = 7,
+ SCSI_Data_NoACK = 8,
+ Interrupt_Status = 9,
+ Configuration1 = 10,
+ Configuration2 = 11, /* tmc18c50/tmc18c30 only */
+ Read_FIFO = 12,
+ FIFO_Data_Count = 14
+};
+
+enum out_port_type {
+ Write_SCSI_Data = 0,
+ SCSI_Cntl = 1,
+ Interrupt_Cntl = 2,
+ SCSI_Mode_Cntl = 3,
+ TMC_Cntl = 4,
+ Memory_Cntl = 5, /* tmc18c50/tmc18c30 only */
+ Write_Loopback = 7,
+ IO_Control = 11, /* tmc18c30 only */
+ Write_FIFO = 12
+};
+
+/* .bss will zero all the static variables below */
+static int port_base;
+static unsigned long bios_base;
+static void __iomem * bios_mem;
+static int bios_major;
+static int bios_minor;
+static int PCI_bus;
+#ifdef CONFIG_PCI
+static struct pci_dev *PCI_dev;
+#endif
+static int Quantum; /* Quantum board variant */
+static int interrupt_level;
+static volatile int in_command;
+static struct scsi_cmnd *current_SC;
+static enum chip_type chip = unknown;
+static int adapter_mask;
+static int this_id;
+static int setup_called;
+
+#if DEBUG_RACE
+static volatile int in_interrupt_flag;
+#endif
+
+static int FIFO_Size = 0x2000; /* 8k FIFO for
+ pre-tmc18c30 chips */
+
+static irqreturn_t do_fdomain_16x0_intr( int irq, void *dev_id );
+/* Allow insmod parameters to be like LILO parameters. For example:
+ insmod fdomain fdomain=0x140,11 */
+static char * fdomain = NULL;
+module_param(fdomain, charp, 0);
+
+#ifndef PCMCIA
+
+static unsigned long addresses[] = {
+ 0xc8000,
+ 0xca000,
+ 0xce000,
+ 0xde000,
+ 0xcc000, /* Extra addresses for PCI boards */
+ 0xd0000,
+ 0xe0000,
+};
+#define ADDRESS_COUNT ARRAY_SIZE(addresses)
+
+static unsigned short ports[] = { 0x140, 0x150, 0x160, 0x170 };
+#define PORT_COUNT ARRAY_SIZE(ports)
+
+static unsigned short ints[] = { 3, 5, 10, 11, 12, 14, 15, 0 };
+
+#endif /* !PCMCIA */
+
+/*
+
+ READ THIS BEFORE YOU ADD A SIGNATURE!
+
+ READING THIS SHORT NOTE CAN SAVE YOU LOTS OF TIME!
+
+ READ EVERY WORD, ESPECIALLY THE WORD *NOT*
+
+ This driver works *ONLY* for Future Domain cards using the TMC-1800,
+ TMC-18C50, or TMC-18C30 chip. This includes models TMC-1650, 1660, 1670,
+ and 1680. These are all 16-bit cards.
+
+ The following BIOS signature signatures are for boards which do *NOT*
+ work with this driver (these TMC-8xx and TMC-9xx boards may work with the
+ Seagate driver):
+
+ FUTURE DOMAIN CORP. (C) 1986-1988 V4.0I 03/16/88
+ FUTURE DOMAIN CORP. (C) 1986-1989 V5.0C2/14/89
+ FUTURE DOMAIN CORP. (C) 1986-1989 V6.0A7/28/89
+ FUTURE DOMAIN CORP. (C) 1986-1990 V6.0105/31/90
+ FUTURE DOMAIN CORP. (C) 1986-1990 V6.0209/18/90
+ FUTURE DOMAIN CORP. (C) 1986-1990 V7.009/18/90
+ FUTURE DOMAIN CORP. (C) 1992 V8.00.004/02/92
+
+ (The cards which do *NOT* work are all 8-bit cards -- although some of
+ them have a 16-bit form-factor, the upper 8-bits are used only for IRQs
+ and are *NOT* used for data. You can tell the difference by following
+ the tracings on the circuit board -- if only the IRQ lines are involved,
+ you have a "8-bit" card, and should *NOT* use this driver.)
+
+*/
+
+#ifndef PCMCIA
+
+static struct signature {
+ const char *signature;
+ int sig_offset;
+ int sig_length;
+ int major_bios_version;
+ int minor_bios_version;
+ int flag; /* 1 == PCI_bus, 2 == ISA_200S, 3 == ISA_250MG, 4 == ISA_200S */
+} signatures[] = {
+ /* 1 2 3 4 5 6 */
+ /* 123456789012345678901234567890123456789012345678901234567890 */
+ { "FUTURE DOMAIN CORP. (C) 1986-1990 1800-V2.07/28/89", 5, 50, 2, 0, 0 },
+ { "FUTURE DOMAIN CORP. (C) 1986-1990 1800-V1.07/28/89", 5, 50, 2, 0, 0 },
+ { "FUTURE DOMAIN CORP. (C) 1986-1990 1800-V2.07/28/89", 72, 50, 2, 0, 2 },
+ { "FUTURE DOMAIN CORP. (C) 1986-1990 1800-V2.0", 73, 43, 2, 0, 3 },
+ { "FUTURE DOMAIN CORP. (C) 1991 1800-V2.0.", 72, 39, 2, 0, 4 },
+ { "FUTURE DOMAIN CORP. (C) 1992 V3.00.004/02/92", 5, 44, 3, 0, 0 },
+ { "FUTURE DOMAIN TMC-18XX (C) 1993 V3.203/12/93", 5, 44, 3, 2, 0 },
+ { "IBM F1 P2 BIOS v1.0104/29/93", 5, 28, 3, -1, 0 },
+ { "Future Domain Corp. V1.0008/18/93", 5, 33, 3, 4, 0 },
+ { "Future Domain Corp. V1.0008/18/93", 26, 33, 3, 4, 1 },
+ { "Adaptec AHA-2920 PCI-SCSI Card", 42, 31, 3, -1, 1 },
+ { "IBM F1 P264/32", 5, 14, 3, -1, 1 },
+ /* This next signature may not be a 3.5 bios */
+ { "Future Domain Corp. V2.0108/18/93", 5, 33, 3, 5, 0 },
+ { "FUTURE DOMAIN CORP. V3.5008/18/93", 5, 34, 3, 5, 0 },
+ { "FUTURE DOMAIN 18c30/18c50/1800 (C) 1994 V3.5", 5, 44, 3, 5, 0 },
+ { "FUTURE DOMAIN CORP. V3.6008/18/93", 5, 34, 3, 6, 0 },
+ { "FUTURE DOMAIN CORP. V3.6108/18/93", 5, 34, 3, 6, 0 },
+ { "FUTURE DOMAIN TMC-18XX", 5, 22, -1, -1, 0 },
+
+ /* READ NOTICE ABOVE *BEFORE* YOU WASTE YOUR TIME ADDING A SIGNATURE
+ Also, fix the disk geometry code for your signature and send your
+ changes for faith@cs.unc.edu. Above all, do *NOT* change any old
+ signatures!
+
+ Note that the last line will match a "generic" 18XX bios. Because
+ Future Domain has changed the host SCSI ID and/or the location of the
+ geometry information in the on-board RAM area for each of the first
+ three BIOS's, it is still important to enter a fully qualified
+ signature in the table for any new BIOS's (after the host SCSI ID and
+ geometry location are verified). */
+};
+
+#define SIGNATURE_COUNT ARRAY_SIZE(signatures)
+
+#endif /* !PCMCIA */
+
+static void print_banner( struct Scsi_Host *shpnt )
+{
+ if (!shpnt) return; /* This won't ever happen */
+
+ if (bios_major < 0 && bios_minor < 0) {
+ printk(KERN_INFO "scsi%d: <fdomain> No BIOS; using scsi id %d\n",
+ shpnt->host_no, shpnt->this_id);
+ } else {
+ printk(KERN_INFO "scsi%d: <fdomain> BIOS version ", shpnt->host_no);
+
+ if (bios_major >= 0) printk("%d.", bios_major);
+ else printk("?.");
+
+ if (bios_minor >= 0) printk("%d", bios_minor);
+ else printk("?.");
+
+ printk( " at 0x%lx using scsi id %d\n",
+ bios_base, shpnt->this_id );
+ }
+
+ /* If this driver works for later FD PCI
+ boards, we will have to modify banner
+ for additional PCI cards, but for now if
+ it's PCI it's a TMC-3260 - JTM */
+ printk(KERN_INFO "scsi%d: <fdomain> %s chip at 0x%x irq ",
+ shpnt->host_no,
+ chip == tmc1800 ? "TMC-1800" : (chip == tmc18c50 ? "TMC-18C50" : (chip == tmc18c30 ? (PCI_bus ? "TMC-36C70 (PCI bus)" : "TMC-18C30") : "Unknown")),
+ port_base);
+
+ if (interrupt_level)
+ printk("%d", interrupt_level);
+ else
+ printk("<none>");
+
+ printk( "\n" );
+}
+
+int fdomain_setup(char *str)
+{
+ int ints[4];
+
+ (void)get_options(str, ARRAY_SIZE(ints), ints);
+
+ if (setup_called++ || ints[0] < 2 || ints[0] > 3) {
+ printk(KERN_INFO "scsi: <fdomain> Usage: fdomain=<PORT_BASE>,<IRQ>[,<ADAPTER_ID>]\n");
+ printk(KERN_ERR "scsi: <fdomain> Bad LILO/INSMOD parameters?\n");
+ return 0;
+ }
+
+ port_base = ints[0] >= 1 ? ints[1] : 0;
+ interrupt_level = ints[0] >= 2 ? ints[2] : 0;
+ this_id = ints[0] >= 3 ? ints[3] : 0;
+
+ bios_major = bios_minor = -1; /* Use geometry for BIOS version >= 3.4 */
+ ++setup_called;
+ return 1;
+}
+
+__setup("fdomain=", fdomain_setup);
+
+
+static void do_pause(unsigned amount) /* Pause for amount*10 milliseconds */
+{
+ mdelay(10*amount);
+}
+
+static inline void fdomain_make_bus_idle( void )
+{
+ outb(0, port_base + SCSI_Cntl);
+ outb(0, port_base + SCSI_Mode_Cntl);
+ if (chip == tmc18c50 || chip == tmc18c30)
+ outb(0x21 | PARITY_MASK, port_base + TMC_Cntl); /* Clear forced intr. */
+ else
+ outb(0x01 | PARITY_MASK, port_base + TMC_Cntl);
+}
+
+static int fdomain_is_valid_port( int port )
+{
+#if DEBUG_DETECT
+ printk( " (%x%x),",
+ inb( port + MSB_ID_Code ), inb( port + LSB_ID_Code ) );
+#endif
+
+ /* The MCA ID is a unique id for each MCA compatible board. We
+ are using ISA boards, but Future Domain provides the MCA ID
+ anyway. We can use this ID to ensure that this is a Future
+ Domain TMC-1660/TMC-1680.
+ */
+
+ if (inb( port + LSB_ID_Code ) != 0xe9) { /* test for 0x6127 id */
+ if (inb( port + LSB_ID_Code ) != 0x27) return 0;
+ if (inb( port + MSB_ID_Code ) != 0x61) return 0;
+ chip = tmc1800;
+ } else { /* test for 0xe960 id */
+ if (inb( port + MSB_ID_Code ) != 0x60) return 0;
+ chip = tmc18c50;
+
+ /* Try to toggle 32-bit mode. This only
+ works on an 18c30 chip. (User reports
+ say this works, so we should switch to
+ it in the near future.) */
+
+ outb( 0x80, port + IO_Control );
+ if ((inb( port + Configuration2 ) & 0x80) == 0x80) {
+ outb( 0x00, port + IO_Control );
+ if ((inb( port + Configuration2 ) & 0x80) == 0x00) {
+ chip = tmc18c30;
+ FIFO_Size = 0x800; /* 2k FIFO */
+ }
+ }
+ /* If that failed, we are an 18c50. */
+ }
+
+ return 1;
+}
+
+static int fdomain_test_loopback( void )
+{
+ int i;
+ int result;
+
+ for (i = 0; i < 255; i++) {
+ outb( i, port_base + Write_Loopback );
+ result = inb( port_base + Read_Loopback );
+ if (i != result)
+ return 1;
+ }
+ return 0;
+}
+
+#ifndef PCMCIA
+
+/* fdomain_get_irq assumes that we have a valid MCA ID for a
+ TMC-1660/TMC-1680 Future Domain board. Now, check to be sure the
+ bios_base matches these ports. If someone was unlucky enough to have
+ purchased more than one Future Domain board, then they will have to
+ modify this code, as we only detect one board here. [The one with the
+ lowest bios_base.]
+
+ Note that this routine is only used for systems without a PCI BIOS32
+ (e.g., ISA bus). For PCI bus systems, this routine will likely fail
+ unless one of the IRQs listed in the ints array is used by the board.
+ Sometimes it is possible to use the computer's BIOS setup screen to
+ configure a PCI system so that one of these IRQs will be used by the
+ Future Domain card. */
+
+static int fdomain_get_irq( int base )
+{
+ int options = inb(base + Configuration1);
+
+#if DEBUG_DETECT
+ printk("scsi: <fdomain> Options = %x\n", options);
+#endif
+
+ /* Check for board with lowest bios_base --
+ this isn't valid for the 18c30 or for
+ boards on the PCI bus, so just assume we
+ have the right board. */
+
+ if (chip != tmc18c30 && !PCI_bus && addresses[(options & 0xc0) >> 6 ] != bios_base)
+ return 0;
+ return ints[(options & 0x0e) >> 1];
+}
+
+static int fdomain_isa_detect( int *irq, int *iobase )
+{
+ int i, j;
+ int base = 0xdeadbeef;
+ int flag = 0;
+
+#if DEBUG_DETECT
+ printk( "scsi: <fdomain> fdomain_isa_detect:" );
+#endif
+
+ for (i = 0; i < ADDRESS_COUNT; i++) {
+ void __iomem *p = ioremap(addresses[i], 0x2000);
+ if (!p)
+ continue;
+#if DEBUG_DETECT
+ printk( " %lx(%lx),", addresses[i], bios_base );
+#endif
+ for (j = 0; j < SIGNATURE_COUNT; j++) {
+ if (check_signature(p + signatures[j].sig_offset,
+ signatures[j].signature,
+ signatures[j].sig_length )) {
+ bios_major = signatures[j].major_bios_version;
+ bios_minor = signatures[j].minor_bios_version;
+ PCI_bus = (signatures[j].flag == 1);
+ Quantum = (signatures[j].flag > 1) ? signatures[j].flag : 0;
+ bios_base = addresses[i];
+ bios_mem = p;
+ goto found;
+ }
+ }
+ iounmap(p);
+ }
+
+found:
+ if (bios_major == 2) {
+ /* The TMC-1660/TMC-1680 has a RAM area just after the BIOS ROM.
+ Assuming the ROM is enabled (otherwise we wouldn't have been
+ able to read the ROM signature :-), then the ROM sets up the
+ RAM area with some magic numbers, such as a list of port
+ base addresses and a list of the disk "geometry" reported to
+ DOS (this geometry has nothing to do with physical geometry).
+ */
+
+ switch (Quantum) {
+ case 2: /* ISA_200S */
+ case 3: /* ISA_250MG */
+ base = readb(bios_mem + 0x1fa2) + (readb(bios_mem + 0x1fa3) << 8);
+ break;
+ case 4: /* ISA_200S (another one) */
+ base = readb(bios_mem + 0x1fa3) + (readb(bios_mem + 0x1fa4) << 8);
+ break;
+ default:
+ base = readb(bios_mem + 0x1fcc) + (readb(bios_mem + 0x1fcd) << 8);
+ break;
+ }
+
+#if DEBUG_DETECT
+ printk( " %x,", base );
+#endif
+
+ for (i = 0; i < PORT_COUNT; i++) {
+ if (base == ports[i]) {
+ if (!request_region(base, 0x10, "fdomain"))
+ break;
+ if (!fdomain_is_valid_port(base)) {
+ release_region(base, 0x10);
+ break;
+ }
+ *irq = fdomain_get_irq( base );
+ *iobase = base;
+ return 1;
+ }
+ }
+
+ /* This is a bad sign. It usually means that someone patched the
+ BIOS signature list (the signatures variable) to contain a BIOS
+ signature for a board *OTHER THAN* the TMC-1660/TMC-1680. */
+
+#if DEBUG_DETECT
+ printk( " RAM FAILED, " );
+#endif
+ }
+
+ /* Anyway, the alternative to finding the address in the RAM is to just
+ search through every possible port address for one that is attached
+ to the Future Domain card. Don't panic, though, about reading all
+ these random port addresses -- there are rumors that the Future
+ Domain BIOS does something very similar.
+
+ Do not, however, check ports which the kernel knows are being used by
+ another driver. */
+
+ for (i = 0; i < PORT_COUNT; i++) {
+ base = ports[i];
+ if (!request_region(base, 0x10, "fdomain")) {
+#if DEBUG_DETECT
+ printk( " (%x inuse),", base );
+#endif
+ continue;
+ }
+#if DEBUG_DETECT
+ printk( " %x,", base );
+#endif
+ flag = fdomain_is_valid_port(base);
+ if (flag)
+ break;
+ release_region(base, 0x10);
+ }
+
+#if DEBUG_DETECT
+ if (flag) printk( " SUCCESS\n" );
+ else printk( " FAILURE\n" );
+#endif
+
+ if (!flag) return 0; /* iobase not found */
+
+ *irq = fdomain_get_irq( base );
+ *iobase = base;
+
+ return 1; /* success */
+}
+
+#else /* PCMCIA */
+
+static int fdomain_isa_detect( int *irq, int *iobase )
+{
+ if (irq)
+ *irq = 0;
+ if (iobase)
+ *iobase = 0;
+ return 0;
+}
+
+#endif /* !PCMCIA */
+
+
+/* PCI detection function: int fdomain_pci_bios_detect(int* irq, int*
+ iobase) This function gets the Interrupt Level and I/O base address from
+ the PCI configuration registers. */
+
+#ifdef CONFIG_PCI
+static int fdomain_pci_bios_detect( int *irq, int *iobase, struct pci_dev **ret_pdev )
+{
+ unsigned int pci_irq; /* PCI interrupt line */
+ unsigned long pci_base; /* PCI I/O base address */
+ struct pci_dev *pdev = NULL;
+
+#if DEBUG_DETECT
+ /* Tell how to print a list of the known PCI devices from bios32 and
+ list vendor and device IDs being used if in debug mode. */
+
+ printk( "scsi: <fdomain> INFO: use lspci -v to see list of PCI devices\n" );
+ printk( "scsi: <fdomain> TMC-3260 detect:"
+ " Using Vendor ID: 0x%x and Device ID: 0x%x\n",
+ PCI_VENDOR_ID_FD,
+ PCI_DEVICE_ID_FD_36C70 );
+#endif
+
+ if ((pdev = pci_get_device(PCI_VENDOR_ID_FD, PCI_DEVICE_ID_FD_36C70, pdev)) == NULL)
+ return 0;
+ if (pci_enable_device(pdev))
+ goto fail;
+
+#if DEBUG_DETECT
+ printk( "scsi: <fdomain> TMC-3260 detect:"
+ " PCI bus %u, device %u, function %u\n",
+ pdev->bus->number,
+ PCI_SLOT(pdev->devfn),
+ PCI_FUNC(pdev->devfn));
+#endif
+
+ /* We now have the appropriate device function for the FD board so we
+ just read the PCI config info from the registers. */
+
+ pci_base = pci_resource_start(pdev, 0);
+ pci_irq = pdev->irq;
+
+ if (!request_region( pci_base, 0x10, "fdomain" ))
+ goto fail;
+
+ /* Now we have the I/O base address and interrupt from the PCI
+ configuration registers. */
+
+ *irq = pci_irq;
+ *iobase = pci_base;
+ *ret_pdev = pdev;
+
+#if DEBUG_DETECT
+ printk( "scsi: <fdomain> TMC-3260 detect:"
+ " IRQ = %d, I/O base = 0x%x [0x%lx]\n", *irq, *iobase, pci_base );
+#endif
+
+ if (!fdomain_is_valid_port(pci_base)) {
+ printk(KERN_ERR "scsi: <fdomain> PCI card detected, but driver not loaded (invalid port)\n" );
+ release_region(pci_base, 0x10);
+ goto fail;
+ }
+
+ /* Fill in a few global variables. Ugh. */
+ bios_major = bios_minor = -1;
+ PCI_bus = 1;
+ PCI_dev = pdev;
+ Quantum = 0;
+ bios_base = 0;
+
+ return 1;
+fail:
+ pci_dev_put(pdev);
+ return 0;
+}
+
+#endif
+
+struct Scsi_Host *__fdomain_16x0_detect(struct scsi_host_template *tpnt )
+{
+ int retcode;
+ struct Scsi_Host *shpnt;
+ struct pci_dev *pdev = NULL;
+
+ if (setup_called) {
+#if DEBUG_DETECT
+ printk( "scsi: <fdomain> No BIOS, using port_base = 0x%x, irq = %d\n",
+ port_base, interrupt_level );
+#endif
+ if (!request_region(port_base, 0x10, "fdomain")) {
+ printk( "scsi: <fdomain> port 0x%x is busy\n", port_base );
+ printk( "scsi: <fdomain> Bad LILO/INSMOD parameters?\n" );
+ return NULL;
+ }
+ if (!fdomain_is_valid_port( port_base )) {
+ printk( "scsi: <fdomain> Cannot locate chip at port base 0x%x\n",
+ port_base );
+ printk( "scsi: <fdomain> Bad LILO/INSMOD parameters?\n" );
+ release_region(port_base, 0x10);
+ return NULL;
+ }
+ } else {
+ int flag = 0;
+
+#ifdef CONFIG_PCI
+ /* Try PCI detection first */
+ flag = fdomain_pci_bios_detect( &interrupt_level, &port_base, &pdev );
+#endif
+ if (!flag) {
+ /* Then try ISA bus detection */
+ flag = fdomain_isa_detect( &interrupt_level, &port_base );
+
+ if (!flag) {
+ printk( "scsi: <fdomain> Detection failed (no card)\n" );
+ return NULL;
+ }
+ }
+ }
+
+ fdomain_16x0_bus_reset(NULL);
+
+ if (fdomain_test_loopback()) {
+ printk(KERN_ERR "scsi: <fdomain> Detection failed (loopback test failed at port base 0x%x)\n", port_base);
+ if (setup_called) {
+ printk(KERN_ERR "scsi: <fdomain> Bad LILO/INSMOD parameters?\n");
+ }
+ goto fail;
+ }
+
+ if (this_id) {
+ tpnt->this_id = (this_id & 0x07);
+ adapter_mask = (1 << tpnt->this_id);
+ } else {
+ if (PCI_bus || (bios_major == 3 && bios_minor >= 2) || bios_major < 0) {
+ tpnt->this_id = 7;
+ adapter_mask = 0x80;
+ } else {
+ tpnt->this_id = 6;
+ adapter_mask = 0x40;
+ }
+ }
+
+/* Print out a banner here in case we can't
+ get resources. */
+
+ shpnt = scsi_register( tpnt, 0 );
+ if(shpnt == NULL) {
+ release_region(port_base, 0x10);
+ return NULL;
+ }
+ shpnt->irq = interrupt_level;
+ shpnt->io_port = port_base;
+ shpnt->n_io_port = 0x10;
+ print_banner( shpnt );
+
+ /* Log IRQ with kernel */
+ if (!interrupt_level) {
+ printk(KERN_ERR "scsi: <fdomain> Card Detected, but driver not loaded (no IRQ)\n" );
+ goto fail;
+ } else {
+ /* Register the IRQ with the kernel */
+
+ retcode = request_irq( interrupt_level,
+ do_fdomain_16x0_intr, pdev?IRQF_SHARED:0, "fdomain", shpnt);
+
+ if (retcode < 0) {
+ if (retcode == -EINVAL) {
+ printk(KERN_ERR "scsi: <fdomain> IRQ %d is bad!\n", interrupt_level );
+ printk(KERN_ERR " This shouldn't happen!\n" );
+ printk(KERN_ERR " Send mail to faith@acm.org\n" );
+ } else if (retcode == -EBUSY) {
+ printk(KERN_ERR "scsi: <fdomain> IRQ %d is already in use!\n", interrupt_level );
+ printk(KERN_ERR " Please use another IRQ!\n" );
+ } else {
+ printk(KERN_ERR "scsi: <fdomain> Error getting IRQ %d\n", interrupt_level );
+ printk(KERN_ERR " This shouldn't happen!\n" );
+ printk(KERN_ERR " Send mail to faith@acm.org\n" );
+ }
+ printk(KERN_ERR "scsi: <fdomain> Detected, but driver not loaded (IRQ)\n" );
+ goto fail;
+ }
+ }
+ return shpnt;
+fail:
+ pci_dev_put(pdev);
+ release_region(port_base, 0x10);
+ return NULL;
+}
+
+static int fdomain_16x0_detect(struct scsi_host_template *tpnt)
+{
+ if (fdomain)
+ fdomain_setup(fdomain);
+ return (__fdomain_16x0_detect(tpnt) != NULL);
+}
+
+static const char *fdomain_16x0_info( struct Scsi_Host *ignore )
+{
+ static char buffer[128];
+ char *pt;
+
+ strcpy( buffer, "Future Domain 16-bit SCSI Driver Version" );
+ if (strchr( VERSION, ':')) { /* Assume VERSION is an RCS Revision string */
+ strcat( buffer, strchr( VERSION, ':' ) + 1 );
+ pt = strrchr( buffer, '$') - 1;
+ if (!pt) /* Stripped RCS Revision string? */
+ pt = buffer + strlen( buffer ) - 1;
+ if (*pt != ' ')
+ ++pt;
+ *pt = '\0';
+ } else { /* Assume VERSION is a number */
+ strcat( buffer, " " VERSION );
+ }
+
+ return buffer;
+}
+
+#if 0
+static int fdomain_arbitrate( void )
+{
+ int status = 0;
+ unsigned long timeout;
+
+#if EVERY_ACCESS
+ printk( "fdomain_arbitrate()\n" );
+#endif
+
+ outb(0x00, port_base + SCSI_Cntl); /* Disable data drivers */
+ outb(adapter_mask, port_base + SCSI_Data_NoACK); /* Set our id bit */
+ outb(0x04 | PARITY_MASK, port_base + TMC_Cntl); /* Start arbitration */
+
+ timeout = 500;
+ do {
+ status = inb(port_base + TMC_Status); /* Read adapter status */
+ if (status & 0x02) /* Arbitration complete */
+ return 0;
+ mdelay(1); /* Wait one millisecond */
+ } while (--timeout);
+
+ /* Make bus idle */
+ fdomain_make_bus_idle();
+
+#if EVERY_ACCESS
+ printk( "Arbitration failed, status = %x\n", status );
+#endif
+#if ERRORS_ONLY
+ printk( "scsi: <fdomain> Arbitration failed, status = %x\n", status );
+#endif
+ return 1;
+}
+#endif
+
+static int fdomain_select( int target )
+{
+ int status;
+ unsigned long timeout;
+#if ERRORS_ONLY
+ static int flag = 0;
+#endif
+
+ outb(0x82, port_base + SCSI_Cntl); /* Bus Enable + Select */
+ outb(adapter_mask | (1 << target), port_base + SCSI_Data_NoACK);
+
+ /* Stop arbitration and enable parity */
+ outb(PARITY_MASK, port_base + TMC_Cntl);
+
+ timeout = 350; /* 350 msec */
+
+ do {
+ status = inb(port_base + SCSI_Status); /* Read adapter status */
+ if (status & 1) { /* Busy asserted */
+ /* Enable SCSI Bus (on error, should make bus idle with 0) */
+ outb(0x80, port_base + SCSI_Cntl);
+ return 0;
+ }
+ mdelay(1); /* wait one msec */
+ } while (--timeout);
+ /* Make bus idle */
+ fdomain_make_bus_idle();
+#if EVERY_ACCESS
+ if (!target) printk( "Selection failed\n" );
+#endif
+#if ERRORS_ONLY
+ if (!target) {
+ if (!flag) /* Skip first failure for all chips. */
+ ++flag;
+ else
+ printk( "scsi: <fdomain> Selection failed\n" );
+ }
+#endif
+ return 1;
+}
+
+static void my_done(int error)
+{
+ if (in_command) {
+ in_command = 0;
+ outb(0x00, port_base + Interrupt_Cntl);
+ fdomain_make_bus_idle();
+ current_SC->result = error;
+ if (current_SC->scsi_done)
+ current_SC->scsi_done( current_SC );
+ else panic( "scsi: <fdomain> current_SC->scsi_done() == NULL" );
+ } else {
+ panic( "scsi: <fdomain> my_done() called outside of command\n" );
+ }
+#if DEBUG_RACE
+ in_interrupt_flag = 0;
+#endif
+}
+
+static irqreturn_t do_fdomain_16x0_intr(int irq, void *dev_id)
+{
+ unsigned long flags;
+ int status;
+ int done = 0;
+ unsigned data_count;
+
+ /* The fdomain_16x0_intr is only called via
+ the interrupt handler. The goal of the
+ sti() here is to allow other
+ interruptions while this routine is
+ running. */
+
+ /* Check for other IRQ sources */
+ if ((inb(port_base + TMC_Status) & 0x01) == 0)
+ return IRQ_NONE;
+
+ /* It is our IRQ */
+ outb(0x00, port_base + Interrupt_Cntl);
+
+ /* We usually have one spurious interrupt after each command. Ignore it. */
+ if (!in_command || !current_SC) { /* Spurious interrupt */
+#if EVERY_ACCESS
+ printk( "Spurious interrupt, in_command = %d, current_SC = %x\n",
+ in_command, current_SC );
+#endif
+ return IRQ_NONE;
+ }
+
+ /* Abort calls my_done, so we do nothing here. */
+ if (current_SC->SCp.phase & aborted) {
+#if DEBUG_ABORT
+ printk( "scsi: <fdomain> Interrupt after abort, ignoring\n" );
+#endif
+ /*
+ return IRQ_HANDLED; */
+ }
+
+#if DEBUG_RACE
+ ++in_interrupt_flag;
+#endif
+
+ if (current_SC->SCp.phase & in_arbitration) {
+ status = inb(port_base + TMC_Status); /* Read adapter status */
+ if (!(status & 0x02)) {
+#if EVERY_ACCESS
+ printk( " AFAIL " );
+#endif
+ spin_lock_irqsave(current_SC->device->host->host_lock, flags);
+ my_done( DID_BUS_BUSY << 16 );
+ spin_unlock_irqrestore(current_SC->device->host->host_lock, flags);
+ return IRQ_HANDLED;
+ }
+ current_SC->SCp.phase = in_selection;
+
+ outb(0x40 | FIFO_COUNT, port_base + Interrupt_Cntl);
+
+ outb(0x82, port_base + SCSI_Cntl); /* Bus Enable + Select */
+ outb(adapter_mask | (1 << scmd_id(current_SC)), port_base + SCSI_Data_NoACK);
+
+ /* Stop arbitration and enable parity */
+ outb(0x10 | PARITY_MASK, port_base + TMC_Cntl);
+#if DEBUG_RACE
+ in_interrupt_flag = 0;
+#endif
+ return IRQ_HANDLED;
+ } else if (current_SC->SCp.phase & in_selection) {
+ status = inb(port_base + SCSI_Status);
+ if (!(status & 0x01)) {
+ /* Try again, for slow devices */
+ if (fdomain_select( scmd_id(current_SC) )) {
+#if EVERY_ACCESS
+ printk( " SFAIL " );
+#endif
+ spin_lock_irqsave(current_SC->device->host->host_lock, flags);
+ my_done( DID_NO_CONNECT << 16 );
+ spin_unlock_irqrestore(current_SC->device->host->host_lock, flags);
+ return IRQ_HANDLED;
+ } else {
+#if EVERY_ACCESS
+ printk( " AltSel " );
+#endif
+ /* Stop arbitration and enable parity */
+ outb(0x10 | PARITY_MASK, port_base + TMC_Cntl);
+ }
+ }
+ current_SC->SCp.phase = in_other;
+ outb(0x90 | FIFO_COUNT, port_base + Interrupt_Cntl);
+ outb(0x80, port_base + SCSI_Cntl);
+#if DEBUG_RACE
+ in_interrupt_flag = 0;
+#endif
+ return IRQ_HANDLED;
+ }
+
+ /* current_SC->SCp.phase == in_other: this is the body of the routine */
+
+ status = inb(port_base + SCSI_Status);
+
+ if (status & 0x10) { /* REQ */
+
+ switch (status & 0x0e) {
+
+ case 0x08: /* COMMAND OUT */
+ outb(current_SC->cmnd[current_SC->SCp.sent_command++],
+ port_base + Write_SCSI_Data);
+#if EVERY_ACCESS
+ printk( "CMD = %x,",
+ current_SC->cmnd[ current_SC->SCp.sent_command - 1] );
+#endif
+ break;
+ case 0x00: /* DATA OUT -- tmc18c50/tmc18c30 only */
+ if (chip != tmc1800 && !current_SC->SCp.have_data_in) {
+ current_SC->SCp.have_data_in = -1;
+ outb(0xd0 | PARITY_MASK, port_base + TMC_Cntl);
+ }
+ break;
+ case 0x04: /* DATA IN -- tmc18c50/tmc18c30 only */
+ if (chip != tmc1800 && !current_SC->SCp.have_data_in) {
+ current_SC->SCp.have_data_in = 1;
+ outb(0x90 | PARITY_MASK, port_base + TMC_Cntl);
+ }
+ break;
+ case 0x0c: /* STATUS IN */
+ current_SC->SCp.Status = inb(port_base + Read_SCSI_Data);
+#if EVERY_ACCESS
+ printk( "Status = %x, ", current_SC->SCp.Status );
+#endif
+#if ERRORS_ONLY
+ if (current_SC->SCp.Status
+ && current_SC->SCp.Status != 2
+ && current_SC->SCp.Status != 8) {
+ printk( "scsi: <fdomain> target = %d, command = %x, status = %x\n",
+ current_SC->device->id,
+ current_SC->cmnd[0],
+ current_SC->SCp.Status );
+ }
+#endif
+ break;
+ case 0x0a: /* MESSAGE OUT */
+ outb(MESSAGE_REJECT, port_base + Write_SCSI_Data); /* Reject */
+ break;
+ case 0x0e: /* MESSAGE IN */
+ current_SC->SCp.Message = inb(port_base + Read_SCSI_Data);
+#if EVERY_ACCESS
+ printk( "Message = %x, ", current_SC->SCp.Message );
+#endif
+ if (!current_SC->SCp.Message) ++done;
+#if DEBUG_MESSAGES || EVERY_ACCESS
+ if (current_SC->SCp.Message) {
+ printk( "scsi: <fdomain> message = %x\n",
+ current_SC->SCp.Message );
+ }
+#endif
+ break;
+ }
+ }
+
+ if (chip == tmc1800 && !current_SC->SCp.have_data_in
+ && (current_SC->SCp.sent_command >= current_SC->cmd_len)) {
+
+ if(current_SC->sc_data_direction == DMA_TO_DEVICE)
+ {
+ current_SC->SCp.have_data_in = -1;
+ outb(0xd0 | PARITY_MASK, port_base + TMC_Cntl);
+ }
+ else
+ {
+ current_SC->SCp.have_data_in = 1;
+ outb(0x90 | PARITY_MASK, port_base + TMC_Cntl);
+ }
+ }
+
+ if (current_SC->SCp.have_data_in == -1) { /* DATA OUT */
+ while ((data_count = FIFO_Size - inw(port_base + FIFO_Data_Count)) > 512) {
+#if EVERY_ACCESS
+ printk( "DC=%d, ", data_count ) ;
+#endif
+ if (data_count > current_SC->SCp.this_residual)
+ data_count = current_SC->SCp.this_residual;
+ if (data_count > 0) {
+#if EVERY_ACCESS
+ printk( "%d OUT, ", data_count );
+#endif
+ if (data_count == 1) {
+ outb(*current_SC->SCp.ptr++, port_base + Write_FIFO);
+ --current_SC->SCp.this_residual;
+ } else {
+ data_count >>= 1;
+ outsw(port_base + Write_FIFO, current_SC->SCp.ptr, data_count);
+ current_SC->SCp.ptr += 2 * data_count;
+ current_SC->SCp.this_residual -= 2 * data_count;
+ }
+ }
+ if (!current_SC->SCp.this_residual) {
+ if (current_SC->SCp.buffers_residual) {
+ --current_SC->SCp.buffers_residual;
+ ++current_SC->SCp.buffer;
+ current_SC->SCp.ptr = sg_virt(current_SC->SCp.buffer);
+ current_SC->SCp.this_residual = current_SC->SCp.buffer->length;
+ } else
+ break;
+ }
+ }
+ }
+
+ if (current_SC->SCp.have_data_in == 1) { /* DATA IN */
+ while ((data_count = inw(port_base + FIFO_Data_Count)) > 0) {
+#if EVERY_ACCESS
+ printk( "DC=%d, ", data_count );
+#endif
+ if (data_count > current_SC->SCp.this_residual)
+ data_count = current_SC->SCp.this_residual;
+ if (data_count) {
+#if EVERY_ACCESS
+ printk( "%d IN, ", data_count );
+#endif
+ if (data_count == 1) {
+ *current_SC->SCp.ptr++ = inb(port_base + Read_FIFO);
+ --current_SC->SCp.this_residual;
+ } else {
+ data_count >>= 1; /* Number of words */
+ insw(port_base + Read_FIFO, current_SC->SCp.ptr, data_count);
+ current_SC->SCp.ptr += 2 * data_count;
+ current_SC->SCp.this_residual -= 2 * data_count;
+ }
+ }
+ if (!current_SC->SCp.this_residual
+ && current_SC->SCp.buffers_residual) {
+ --current_SC->SCp.buffers_residual;
+ ++current_SC->SCp.buffer;
+ current_SC->SCp.ptr = sg_virt(current_SC->SCp.buffer);
+ current_SC->SCp.this_residual = current_SC->SCp.buffer->length;
+ }
+ }
+ }
+
+ if (done) {
+#if EVERY_ACCESS
+ printk( " ** IN DONE %d ** ", current_SC->SCp.have_data_in );
+#endif
+
+#if ERRORS_ONLY
+ if (current_SC->cmnd[0] == REQUEST_SENSE && !current_SC->SCp.Status) {
+ char *buf = scsi_sglist(current_SC);
+ if ((unsigned char)(*(buf + 2)) & 0x0f) {
+ unsigned char key;
+ unsigned char code;
+ unsigned char qualifier;
+
+ key = (unsigned char)(*(buf + 2)) & 0x0f;
+ code = (unsigned char)(*(buf + 12));
+ qualifier = (unsigned char)(*(buf + 13));
+
+ if (key != UNIT_ATTENTION
+ && !(key == NOT_READY
+ && code == 0x04
+ && (!qualifier || qualifier == 0x02 || qualifier == 0x01))
+ && !(key == ILLEGAL_REQUEST && (code == 0x25
+ || code == 0x24
+ || !code)))
+
+ printk( "scsi: <fdomain> REQUEST SENSE"
+ " Key = %x, Code = %x, Qualifier = %x\n",
+ key, code, qualifier );
+ }
+ }
+#endif
+#if EVERY_ACCESS
+ printk( "BEFORE MY_DONE. . ." );
+#endif
+ spin_lock_irqsave(current_SC->device->host->host_lock, flags);
+ my_done( (current_SC->SCp.Status & 0xff)
+ | ((current_SC->SCp.Message & 0xff) << 8) | (DID_OK << 16) );
+ spin_unlock_irqrestore(current_SC->device->host->host_lock, flags);
+#if EVERY_ACCESS
+ printk( "RETURNING.\n" );
+#endif
+
+ } else {
+ if (current_SC->SCp.phase & disconnect) {
+ outb(0xd0 | FIFO_COUNT, port_base + Interrupt_Cntl);
+ outb(0x00, port_base + SCSI_Cntl);
+ } else {
+ outb(0x90 | FIFO_COUNT, port_base + Interrupt_Cntl);
+ }
+ }
+#if DEBUG_RACE
+ in_interrupt_flag = 0;
+#endif
+ return IRQ_HANDLED;
+}
+
+static int fdomain_16x0_queue_lck(struct scsi_cmnd *SCpnt,
+ void (*done)(struct scsi_cmnd *))
+{
+ if (in_command) {
+ panic( "scsi: <fdomain> fdomain_16x0_queue() NOT REENTRANT!\n" );
+ }
+#if EVERY_ACCESS
+ printk( "queue: target = %d cmnd = 0x%02x pieces = %d size = %u\n",
+ SCpnt->target,
+ *(unsigned char *)SCpnt->cmnd,
+ scsi_sg_count(SCpnt),
+ scsi_bufflen(SCpnt));
+#endif
+
+ fdomain_make_bus_idle();
+
+ current_SC = SCpnt; /* Save this for the done function */
+ current_SC->scsi_done = done;
+
+ /* Initialize static data */
+
+ if (scsi_sg_count(current_SC)) {
+ current_SC->SCp.buffer = scsi_sglist(current_SC);
+ current_SC->SCp.ptr = sg_virt(current_SC->SCp.buffer);
+ current_SC->SCp.this_residual = current_SC->SCp.buffer->length;
+ current_SC->SCp.buffers_residual = scsi_sg_count(current_SC) - 1;
+ } else {
+ current_SC->SCp.ptr = NULL;
+ current_SC->SCp.this_residual = 0;
+ current_SC->SCp.buffer = NULL;
+ current_SC->SCp.buffers_residual = 0;
+ }
+
+ current_SC->SCp.Status = 0;
+ current_SC->SCp.Message = 0;
+ current_SC->SCp.have_data_in = 0;
+ current_SC->SCp.sent_command = 0;
+ current_SC->SCp.phase = in_arbitration;
+
+ /* Start arbitration */
+ outb(0x00, port_base + Interrupt_Cntl);
+ outb(0x00, port_base + SCSI_Cntl); /* Disable data drivers */
+ outb(adapter_mask, port_base + SCSI_Data_NoACK); /* Set our id bit */
+ ++in_command;
+ outb(0x20, port_base + Interrupt_Cntl);
+ outb(0x14 | PARITY_MASK, port_base + TMC_Cntl); /* Start arbitration */
+
+ return 0;
+}
+
+static DEF_SCSI_QCMD(fdomain_16x0_queue)
+
+#if DEBUG_ABORT
+static void print_info(struct scsi_cmnd *SCpnt)
+{
+ unsigned int imr;
+ unsigned int irr;
+ unsigned int isr;
+
+ if (!SCpnt || !SCpnt->device || !SCpnt->device->host) {
+ printk(KERN_WARNING "scsi: <fdomain> Cannot provide detailed information\n");
+ return;
+ }
+
+ printk(KERN_INFO "%s\n", fdomain_16x0_info( SCpnt->device->host ) );
+ print_banner(SCpnt->device->host);
+ switch (SCpnt->SCp.phase) {
+ case in_arbitration: printk("arbitration"); break;
+ case in_selection: printk("selection"); break;
+ case in_other: printk("other"); break;
+ default: printk("unknown"); break;
+ }
+
+ printk( " (%d), target = %d cmnd = 0x%02x pieces = %d size = %u\n",
+ SCpnt->SCp.phase,
+ SCpnt->device->id,
+ *(unsigned char *)SCpnt->cmnd,
+ scsi_sg_count(SCpnt),
+ scsi_bufflen(SCpnt));
+ printk( "sent_command = %d, have_data_in = %d, timeout = %d\n",
+ SCpnt->SCp.sent_command,
+ SCpnt->SCp.have_data_in,
+ SCpnt->timeout );
+#if DEBUG_RACE
+ printk( "in_interrupt_flag = %d\n", in_interrupt_flag );
+#endif
+
+ imr = (inb( 0x0a1 ) << 8) + inb( 0x21 );
+ outb( 0x0a, 0xa0 );
+ irr = inb( 0xa0 ) << 8;
+ outb( 0x0a, 0x20 );
+ irr += inb( 0x20 );
+ outb( 0x0b, 0xa0 );
+ isr = inb( 0xa0 ) << 8;
+ outb( 0x0b, 0x20 );
+ isr += inb( 0x20 );
+
+ /* Print out interesting information */
+ printk( "IMR = 0x%04x", imr );
+ if (imr & (1 << interrupt_level))
+ printk( " (masked)" );
+ printk( ", IRR = 0x%04x, ISR = 0x%04x\n", irr, isr );
+
+ printk( "SCSI Status = 0x%02x\n", inb(port_base + SCSI_Status));
+ printk( "TMC Status = 0x%02x", inb(port_base + TMC_Status));
+ if (inb((port_base + TMC_Status) & 1))
+ printk( " (interrupt)" );
+ printk( "\n" );
+ printk("Interrupt Status = 0x%02x", inb(port_base + Interrupt_Status));
+ if (inb(port_base + Interrupt_Status) & 0x08)
+ printk( " (enabled)" );
+ printk( "\n" );
+ if (chip == tmc18c50 || chip == tmc18c30) {
+ printk("FIFO Status = 0x%02x\n", inb(port_base + FIFO_Status));
+ printk( "Int. Condition = 0x%02x\n",
+ inb( port_base + Interrupt_Cond ) );
+ }
+ printk( "Configuration 1 = 0x%02x\n", inb( port_base + Configuration1 ) );
+ if (chip == tmc18c50 || chip == tmc18c30)
+ printk( "Configuration 2 = 0x%02x\n",
+ inb( port_base + Configuration2 ) );
+}
+#endif
+
+static int fdomain_16x0_abort(struct scsi_cmnd *SCpnt)
+{
+#if EVERY_ACCESS || ERRORS_ONLY || DEBUG_ABORT
+ printk( "scsi: <fdomain> abort " );
+#endif
+
+ if (!in_command) {
+#if EVERY_ACCESS || ERRORS_ONLY
+ printk( " (not in command)\n" );
+#endif
+ return FAILED;
+ } else printk( "\n" );
+
+#if DEBUG_ABORT
+ print_info( SCpnt );
+#endif
+
+ fdomain_make_bus_idle();
+ current_SC->SCp.phase |= aborted;
+ current_SC->result = DID_ABORT << 16;
+
+ /* Aborts are not done well. . . */
+ my_done(DID_ABORT << 16);
+ return SUCCESS;
+}
+
+int fdomain_16x0_bus_reset(struct scsi_cmnd *SCpnt)
+{
+ unsigned long flags;
+
+ local_irq_save(flags);
+
+ outb(1, port_base + SCSI_Cntl);
+ do_pause( 2 );
+ outb(0, port_base + SCSI_Cntl);
+ do_pause( 115 );
+ outb(0, port_base + SCSI_Mode_Cntl);
+ outb(PARITY_MASK, port_base + TMC_Cntl);
+
+ local_irq_restore(flags);
+ return SUCCESS;
+}
+
+static int fdomain_16x0_biosparam(struct scsi_device *sdev,
+ struct block_device *bdev,
+ sector_t capacity, int *info_array)
+{
+ int drive;
+ int size = capacity;
+ unsigned long offset;
+ struct drive_info {
+ unsigned short cylinders;
+ unsigned char heads;
+ unsigned char sectors;
+ } i;
+
+ /* NOTES:
+ The RAM area starts at 0x1f00 from the bios_base address.
+
+ For BIOS Version 2.0:
+
+ The drive parameter table seems to start at 0x1f30.
+ The first byte's purpose is not known.
+ Next is the cylinder, head, and sector information.
+ The last 4 bytes appear to be the drive's size in sectors.
+ The other bytes in the drive parameter table are unknown.
+ If anyone figures them out, please send me mail, and I will
+ update these notes.
+
+ Tape drives do not get placed in this table.
+
+ There is another table at 0x1fea:
+ If the byte is 0x01, then the SCSI ID is not in use.
+ If the byte is 0x18 or 0x48, then the SCSI ID is in use,
+ although tapes don't seem to be in this table. I haven't
+ seen any other numbers (in a limited sample).
+
+ 0x1f2d is a drive count (i.e., not including tapes)
+
+ The table at 0x1fcc are I/O ports addresses for the various
+ operations. I calculate these by hand in this driver code.
+
+
+
+ For the ISA-200S version of BIOS Version 2.0:
+
+ The drive parameter table starts at 0x1f33.
+
+ WARNING: Assume that the table entry is 25 bytes long. Someone needs
+ to check this for the Quantum ISA-200S card.
+
+
+
+ For BIOS Version 3.2:
+
+ The drive parameter table starts at 0x1f70. Each entry is
+ 0x0a bytes long. Heads are one less than we need to report.
+ */
+
+ if (MAJOR(bdev->bd_dev) != SCSI_DISK0_MAJOR) {
+ printk("scsi: <fdomain> fdomain_16x0_biosparam: too many disks");
+ return 0;
+ }
+ drive = MINOR(bdev->bd_dev) >> 4;
+
+ if (bios_major == 2) {
+ switch (Quantum) {
+ case 2: /* ISA_200S */
+ /* The value of 25 has never been verified.
+ It should probably be 15. */
+ offset = 0x1f33 + drive * 25;
+ break;
+ case 3: /* ISA_250MG */
+ offset = 0x1f36 + drive * 15;
+ break;
+ case 4: /* ISA_200S (another one) */
+ offset = 0x1f34 + drive * 15;
+ break;
+ default:
+ offset = 0x1f31 + drive * 25;
+ break;
+ }
+ memcpy_fromio( &i, bios_mem + offset, sizeof( struct drive_info ) );
+ info_array[0] = i.heads;
+ info_array[1] = i.sectors;
+ info_array[2] = i.cylinders;
+ } else if (bios_major == 3
+ && bios_minor >= 0
+ && bios_minor < 4) { /* 3.0 and 3.2 BIOS */
+ memcpy_fromio( &i, bios_mem + 0x1f71 + drive * 10,
+ sizeof( struct drive_info ) );
+ info_array[0] = i.heads + 1;
+ info_array[1] = i.sectors;
+ info_array[2] = i.cylinders;
+ } else { /* 3.4 BIOS (and up?) */
+ /* This algorithm was provided by Future Domain (much thanks!). */
+ unsigned char *p = scsi_bios_ptable(bdev);
+
+ if (p && p[65] == 0xaa && p[64] == 0x55 /* Partition table valid */
+ && p[4]) { /* Partition type */
+
+ /* The partition table layout is as follows:
+
+ Start: 0x1b3h
+ Offset: 0 = partition status
+ 1 = starting head
+ 2 = starting sector and cylinder (word, encoded)
+ 4 = partition type
+ 5 = ending head
+ 6 = ending sector and cylinder (word, encoded)
+ 8 = starting absolute sector (double word)
+ c = number of sectors (double word)
+ Signature: 0x1fe = 0x55aa
+
+ So, this algorithm assumes:
+ 1) the first partition table is in use,
+ 2) the data in the first entry is correct, and
+ 3) partitions never divide cylinders
+
+ Note that (1) may be FALSE for NetBSD (and other BSD flavors),
+ as well as for Linux. Note also, that Linux doesn't pay any
+ attention to the fields that are used by this algorithm -- it
+ only uses the absolute sector data. Recent versions of Linux's
+ fdisk(1) will fill this data in correctly, and forthcoming
+ versions will check for consistency.
+
+ Checking for a non-zero partition type is not part of the
+ Future Domain algorithm, but it seemed to be a reasonable thing
+ to do, especially in the Linux and BSD worlds. */
+
+ info_array[0] = p[5] + 1; /* heads */
+ info_array[1] = p[6] & 0x3f; /* sectors */
+ } else {
+
+ /* Note that this new method guarantees that there will always be
+ less than 1024 cylinders on a platter. This is good for drives
+ up to approximately 7.85GB (where 1GB = 1024 * 1024 kB). */
+
+ if ((unsigned int)size >= 0x7e0000U) {
+ info_array[0] = 0xff; /* heads = 255 */
+ info_array[1] = 0x3f; /* sectors = 63 */
+ } else if ((unsigned int)size >= 0x200000U) {
+ info_array[0] = 0x80; /* heads = 128 */
+ info_array[1] = 0x3f; /* sectors = 63 */
+ } else {
+ info_array[0] = 0x40; /* heads = 64 */
+ info_array[1] = 0x20; /* sectors = 32 */
+ }
+ }
+ /* For both methods, compute the cylinders */
+ info_array[2] = (unsigned int)size / (info_array[0] * info_array[1] );
+ kfree(p);
+ }
+
+ return 0;
+}
+
+static int fdomain_16x0_release(struct Scsi_Host *shpnt)
+{
+ if (shpnt->irq)
+ free_irq(shpnt->irq, shpnt);
+ if (shpnt->io_port && shpnt->n_io_port)
+ release_region(shpnt->io_port, shpnt->n_io_port);
+ if (PCI_bus)
+ pci_dev_put(PCI_dev);
+ return 0;
+}
+
+struct scsi_host_template fdomain_driver_template = {
+ .module = THIS_MODULE,
+ .name = "fdomain",
+ .proc_name = "fdomain",
+ .detect = fdomain_16x0_detect,
+ .info = fdomain_16x0_info,
+ .queuecommand = fdomain_16x0_queue,
+ .eh_abort_handler = fdomain_16x0_abort,
+ .eh_bus_reset_handler = fdomain_16x0_bus_reset,
+ .bios_param = fdomain_16x0_biosparam,
+ .release = fdomain_16x0_release,
+ .can_queue = 1,
+ .this_id = 6,
+ .sg_tablesize = 64,
+ .cmd_per_lun = 1,
+ .use_clustering = DISABLE_CLUSTERING,
+};
+
+#ifndef PCMCIA
+#ifdef CONFIG_PCI
+
+static struct pci_device_id fdomain_pci_tbl[] = {
+ { PCI_VENDOR_ID_FD, PCI_DEVICE_ID_FD_36C70,
+ PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
+ { }
+};
+MODULE_DEVICE_TABLE(pci, fdomain_pci_tbl);
+#endif
+#define driver_template fdomain_driver_template
+#include "scsi_module.c"
+
+#endif
diff --git a/drivers/scsi/fdomain.h b/drivers/scsi/fdomain.h
new file mode 100644
index 000000000..47021d9d4
--- /dev/null
+++ b/drivers/scsi/fdomain.h
@@ -0,0 +1,24 @@
+/*
+ * fdomain.c -- Future Domain TMC-16x0 SCSI driver
+ * Author: Rickard E. Faith, faith@cs.unc.edu
+ * Copyright 1992-1996, 1998 Rickard E. Faith (faith@acm.org)
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2, or (at your option) any
+ * later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+
+extern struct scsi_host_template fdomain_driver_template;
+extern int fdomain_setup(char *str);
+extern struct Scsi_Host *__fdomain_16x0_detect(struct scsi_host_template *tpnt );
+extern int fdomain_16x0_bus_reset(struct scsi_cmnd *SCpnt);
diff --git a/drivers/scsi/fnic/Makefile b/drivers/scsi/fnic/Makefile
new file mode 100644
index 000000000..383598fad
--- /dev/null
+++ b/drivers/scsi/fnic/Makefile
@@ -0,0 +1,17 @@
+obj-$(CONFIG_FCOE_FNIC) += fnic.o
+
+fnic-y := \
+ fnic_attrs.o \
+ fnic_isr.o \
+ fnic_main.o \
+ fnic_res.o \
+ fnic_fcs.o \
+ fnic_scsi.o \
+ fnic_trace.o \
+ fnic_debugfs.o \
+ vnic_cq.o \
+ vnic_dev.o \
+ vnic_intr.o \
+ vnic_rq.o \
+ vnic_wq_copy.o \
+ vnic_wq.o
diff --git a/drivers/scsi/fnic/cq_desc.h b/drivers/scsi/fnic/cq_desc.h
new file mode 100644
index 000000000..d1225cf63
--- /dev/null
+++ b/drivers/scsi/fnic/cq_desc.h
@@ -0,0 +1,78 @@
+/*
+ * Copyright 2008 Cisco Systems, Inc. All rights reserved.
+ * Copyright 2007 Nuova Systems, Inc. All rights reserved.
+ *
+ * This program is free software; you may redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#ifndef _CQ_DESC_H_
+#define _CQ_DESC_H_
+
+/*
+ * Completion queue descriptor types
+ */
+enum cq_desc_types {
+ CQ_DESC_TYPE_WQ_ENET = 0,
+ CQ_DESC_TYPE_DESC_COPY = 1,
+ CQ_DESC_TYPE_WQ_EXCH = 2,
+ CQ_DESC_TYPE_RQ_ENET = 3,
+ CQ_DESC_TYPE_RQ_FCP = 4,
+};
+
+/* Completion queue descriptor: 16B
+ *
+ * All completion queues have this basic layout. The
+ * type_specfic area is unique for each completion
+ * queue type.
+ */
+struct cq_desc {
+ __le16 completed_index;
+ __le16 q_number;
+ u8 type_specfic[11];
+ u8 type_color;
+};
+
+#define CQ_DESC_TYPE_BITS 4
+#define CQ_DESC_TYPE_MASK ((1 << CQ_DESC_TYPE_BITS) - 1)
+#define CQ_DESC_COLOR_MASK 1
+#define CQ_DESC_COLOR_SHIFT 7
+#define CQ_DESC_Q_NUM_BITS 10
+#define CQ_DESC_Q_NUM_MASK ((1 << CQ_DESC_Q_NUM_BITS) - 1)
+#define CQ_DESC_COMP_NDX_BITS 12
+#define CQ_DESC_COMP_NDX_MASK ((1 << CQ_DESC_COMP_NDX_BITS) - 1)
+
+static inline void cq_desc_dec(const struct cq_desc *desc_arg,
+ u8 *type, u8 *color, u16 *q_number, u16 *completed_index)
+{
+ const struct cq_desc *desc = desc_arg;
+ const u8 type_color = desc->type_color;
+
+ *color = (type_color >> CQ_DESC_COLOR_SHIFT) & CQ_DESC_COLOR_MASK;
+
+ /*
+ * Make sure color bit is read from desc *before* other fields
+ * are read from desc. Hardware guarantees color bit is last
+ * bit (byte) written. Adding the rmb() prevents the compiler
+ * and/or CPU from reordering the reads which would potentially
+ * result in reading stale values.
+ */
+
+ rmb();
+
+ *type = type_color & CQ_DESC_TYPE_MASK;
+ *q_number = le16_to_cpu(desc->q_number) & CQ_DESC_Q_NUM_MASK;
+ *completed_index = le16_to_cpu(desc->completed_index) &
+ CQ_DESC_COMP_NDX_MASK;
+}
+
+#endif /* _CQ_DESC_H_ */
diff --git a/drivers/scsi/fnic/cq_enet_desc.h b/drivers/scsi/fnic/cq_enet_desc.h
new file mode 100644
index 000000000..a9fa26f82
--- /dev/null
+++ b/drivers/scsi/fnic/cq_enet_desc.h
@@ -0,0 +1,167 @@
+/*
+ * Copyright 2008 Cisco Systems, Inc. All rights reserved.
+ * Copyright 2007 Nuova Systems, Inc. All rights reserved.
+ *
+ * This program is free software; you may redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#ifndef _CQ_ENET_DESC_H_
+#define _CQ_ENET_DESC_H_
+
+#include "cq_desc.h"
+
+/* Ethernet completion queue descriptor: 16B */
+struct cq_enet_wq_desc {
+ __le16 completed_index;
+ __le16 q_number;
+ u8 reserved[11];
+ u8 type_color;
+};
+
+static inline void cq_enet_wq_desc_dec(struct cq_enet_wq_desc *desc,
+ u8 *type, u8 *color, u16 *q_number, u16 *completed_index)
+{
+ cq_desc_dec((struct cq_desc *)desc, type,
+ color, q_number, completed_index);
+}
+
+/* Completion queue descriptor: Ethernet receive queue, 16B */
+struct cq_enet_rq_desc {
+ __le16 completed_index_flags;
+ __le16 q_number_rss_type_flags;
+ __le32 rss_hash;
+ __le16 bytes_written_flags;
+ __le16 vlan;
+ __le16 checksum_fcoe;
+ u8 flags;
+ u8 type_color;
+};
+
+#define CQ_ENET_RQ_DESC_FLAGS_INGRESS_PORT (0x1 << 12)
+#define CQ_ENET_RQ_DESC_FLAGS_FCOE (0x1 << 13)
+#define CQ_ENET_RQ_DESC_FLAGS_EOP (0x1 << 14)
+#define CQ_ENET_RQ_DESC_FLAGS_SOP (0x1 << 15)
+
+#define CQ_ENET_RQ_DESC_RSS_TYPE_BITS 4
+#define CQ_ENET_RQ_DESC_RSS_TYPE_MASK \
+ ((1 << CQ_ENET_RQ_DESC_RSS_TYPE_BITS) - 1)
+#define CQ_ENET_RQ_DESC_RSS_TYPE_NONE 0
+#define CQ_ENET_RQ_DESC_RSS_TYPE_IPv4 1
+#define CQ_ENET_RQ_DESC_RSS_TYPE_TCP_IPv4 2
+#define CQ_ENET_RQ_DESC_RSS_TYPE_IPv6 3
+#define CQ_ENET_RQ_DESC_RSS_TYPE_TCP_IPv6 4
+#define CQ_ENET_RQ_DESC_RSS_TYPE_IPv6_EX 5
+#define CQ_ENET_RQ_DESC_RSS_TYPE_TCP_IPv6_EX 6
+
+#define CQ_ENET_RQ_DESC_FLAGS_CSUM_NOT_CALC (0x1 << 14)
+
+#define CQ_ENET_RQ_DESC_BYTES_WRITTEN_BITS 14
+#define CQ_ENET_RQ_DESC_BYTES_WRITTEN_MASK \
+ ((1 << CQ_ENET_RQ_DESC_BYTES_WRITTEN_BITS) - 1)
+#define CQ_ENET_RQ_DESC_FLAGS_TRUNCATED (0x1 << 14)
+#define CQ_ENET_RQ_DESC_FLAGS_VLAN_STRIPPED (0x1 << 15)
+
+#define CQ_ENET_RQ_DESC_FCOE_SOF_BITS 4
+#define CQ_ENET_RQ_DESC_FCOE_SOF_MASK \
+ ((1 << CQ_ENET_RQ_DESC_FCOE_SOF_BITS) - 1)
+#define CQ_ENET_RQ_DESC_FCOE_EOF_BITS 8
+#define CQ_ENET_RQ_DESC_FCOE_EOF_MASK \
+ ((1 << CQ_ENET_RQ_DESC_FCOE_EOF_BITS) - 1)
+#define CQ_ENET_RQ_DESC_FCOE_EOF_SHIFT 8
+
+#define CQ_ENET_RQ_DESC_FLAGS_TCP_UDP_CSUM_OK (0x1 << 0)
+#define CQ_ENET_RQ_DESC_FCOE_FC_CRC_OK (0x1 << 0)
+#define CQ_ENET_RQ_DESC_FLAGS_UDP (0x1 << 1)
+#define CQ_ENET_RQ_DESC_FCOE_ENC_ERROR (0x1 << 1)
+#define CQ_ENET_RQ_DESC_FLAGS_TCP (0x1 << 2)
+#define CQ_ENET_RQ_DESC_FLAGS_IPV4_CSUM_OK (0x1 << 3)
+#define CQ_ENET_RQ_DESC_FLAGS_IPV6 (0x1 << 4)
+#define CQ_ENET_RQ_DESC_FLAGS_IPV4 (0x1 << 5)
+#define CQ_ENET_RQ_DESC_FLAGS_IPV4_FRAGMENT (0x1 << 6)
+#define CQ_ENET_RQ_DESC_FLAGS_FCS_OK (0x1 << 7)
+
+static inline void cq_enet_rq_desc_dec(struct cq_enet_rq_desc *desc,
+ u8 *type, u8 *color, u16 *q_number, u16 *completed_index,
+ u8 *ingress_port, u8 *fcoe, u8 *eop, u8 *sop, u8 *rss_type,
+ u8 *csum_not_calc, u32 *rss_hash, u16 *bytes_written, u8 *packet_error,
+ u8 *vlan_stripped, u16 *vlan, u16 *checksum, u8 *fcoe_sof,
+ u8 *fcoe_fc_crc_ok, u8 *fcoe_enc_error, u8 *fcoe_eof,
+ u8 *tcp_udp_csum_ok, u8 *udp, u8 *tcp, u8 *ipv4_csum_ok,
+ u8 *ipv6, u8 *ipv4, u8 *ipv4_fragment, u8 *fcs_ok)
+{
+ u16 completed_index_flags = le16_to_cpu(desc->completed_index_flags);
+ u16 q_number_rss_type_flags =
+ le16_to_cpu(desc->q_number_rss_type_flags);
+ u16 bytes_written_flags = le16_to_cpu(desc->bytes_written_flags);
+
+ cq_desc_dec((struct cq_desc *)desc, type,
+ color, q_number, completed_index);
+
+ *ingress_port = (completed_index_flags &
+ CQ_ENET_RQ_DESC_FLAGS_INGRESS_PORT) ? 1 : 0;
+ *fcoe = (completed_index_flags & CQ_ENET_RQ_DESC_FLAGS_FCOE) ?
+ 1 : 0;
+ *eop = (completed_index_flags & CQ_ENET_RQ_DESC_FLAGS_EOP) ?
+ 1 : 0;
+ *sop = (completed_index_flags & CQ_ENET_RQ_DESC_FLAGS_SOP) ?
+ 1 : 0;
+
+ *rss_type = (u8)((q_number_rss_type_flags >> CQ_DESC_Q_NUM_BITS) &
+ CQ_ENET_RQ_DESC_RSS_TYPE_MASK);
+ *csum_not_calc = (q_number_rss_type_flags &
+ CQ_ENET_RQ_DESC_FLAGS_CSUM_NOT_CALC) ? 1 : 0;
+
+ *rss_hash = le32_to_cpu(desc->rss_hash);
+
+ *bytes_written = bytes_written_flags &
+ CQ_ENET_RQ_DESC_BYTES_WRITTEN_MASK;
+ *packet_error = (bytes_written_flags &
+ CQ_ENET_RQ_DESC_FLAGS_TRUNCATED) ? 1 : 0;
+ *vlan_stripped = (bytes_written_flags &
+ CQ_ENET_RQ_DESC_FLAGS_VLAN_STRIPPED) ? 1 : 0;
+
+ *vlan = le16_to_cpu(desc->vlan);
+
+ if (*fcoe) {
+ *fcoe_sof = (u8)(le16_to_cpu(desc->checksum_fcoe) &
+ CQ_ENET_RQ_DESC_FCOE_SOF_MASK);
+ *fcoe_fc_crc_ok = (desc->flags &
+ CQ_ENET_RQ_DESC_FCOE_FC_CRC_OK) ? 1 : 0;
+ *fcoe_enc_error = (desc->flags &
+ CQ_ENET_RQ_DESC_FCOE_ENC_ERROR) ? 1 : 0;
+ *fcoe_eof = (u8)((desc->checksum_fcoe >>
+ CQ_ENET_RQ_DESC_FCOE_EOF_SHIFT) &
+ CQ_ENET_RQ_DESC_FCOE_EOF_MASK);
+ *checksum = 0;
+ } else {
+ *fcoe_sof = 0;
+ *fcoe_fc_crc_ok = 0;
+ *fcoe_enc_error = 0;
+ *fcoe_eof = 0;
+ *checksum = le16_to_cpu(desc->checksum_fcoe);
+ }
+
+ *tcp_udp_csum_ok =
+ (desc->flags & CQ_ENET_RQ_DESC_FLAGS_TCP_UDP_CSUM_OK) ? 1 : 0;
+ *udp = (desc->flags & CQ_ENET_RQ_DESC_FLAGS_UDP) ? 1 : 0;
+ *tcp = (desc->flags & CQ_ENET_RQ_DESC_FLAGS_TCP) ? 1 : 0;
+ *ipv4_csum_ok =
+ (desc->flags & CQ_ENET_RQ_DESC_FLAGS_IPV4_CSUM_OK) ? 1 : 0;
+ *ipv6 = (desc->flags & CQ_ENET_RQ_DESC_FLAGS_IPV6) ? 1 : 0;
+ *ipv4 = (desc->flags & CQ_ENET_RQ_DESC_FLAGS_IPV4) ? 1 : 0;
+ *ipv4_fragment =
+ (desc->flags & CQ_ENET_RQ_DESC_FLAGS_IPV4_FRAGMENT) ? 1 : 0;
+ *fcs_ok = (desc->flags & CQ_ENET_RQ_DESC_FLAGS_FCS_OK) ? 1 : 0;
+}
+
+#endif /* _CQ_ENET_DESC_H_ */
diff --git a/drivers/scsi/fnic/cq_exch_desc.h b/drivers/scsi/fnic/cq_exch_desc.h
new file mode 100644
index 000000000..501660cfe
--- /dev/null
+++ b/drivers/scsi/fnic/cq_exch_desc.h
@@ -0,0 +1,182 @@
+/*
+ * Copyright 2008 Cisco Systems, Inc. All rights reserved.
+ * Copyright 2007 Nuova Systems, Inc. All rights reserved.
+ *
+ * This program is free software; you may redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#ifndef _CQ_EXCH_DESC_H_
+#define _CQ_EXCH_DESC_H_
+
+#include "cq_desc.h"
+
+/* Exchange completion queue descriptor: 16B */
+struct cq_exch_wq_desc {
+ u16 completed_index;
+ u16 q_number;
+ u16 exchange_id;
+ u8 tmpl;
+ u8 reserved0;
+ u32 reserved1;
+ u8 exch_status;
+ u8 reserved2[2];
+ u8 type_color;
+};
+
+#define CQ_EXCH_WQ_STATUS_BITS 2
+#define CQ_EXCH_WQ_STATUS_MASK ((1 << CQ_EXCH_WQ_STATUS_BITS) - 1)
+
+enum cq_exch_status_types {
+ CQ_EXCH_WQ_STATUS_TYPE_COMPLETE = 0,
+ CQ_EXCH_WQ_STATUS_TYPE_ABORT = 1,
+ CQ_EXCH_WQ_STATUS_TYPE_SGL_EOF = 2,
+ CQ_EXCH_WQ_STATUS_TYPE_TMPL_ERR = 3,
+};
+
+static inline void cq_exch_wq_desc_dec(struct cq_exch_wq_desc *desc_ptr,
+ u8 *type,
+ u8 *color,
+ u16 *q_number,
+ u16 *completed_index,
+ u8 *exch_status)
+{
+ cq_desc_dec((struct cq_desc *)desc_ptr, type,
+ color, q_number, completed_index);
+ *exch_status = desc_ptr->exch_status & CQ_EXCH_WQ_STATUS_MASK;
+}
+
+struct cq_fcp_rq_desc {
+ u16 completed_index_eop_sop_prt;
+ u16 q_number;
+ u16 exchange_id;
+ u16 tmpl;
+ u16 bytes_written;
+ u16 vlan;
+ u8 sof;
+ u8 eof;
+ u8 fcs_fer_fck;
+ u8 type_color;
+};
+
+#define CQ_FCP_RQ_DESC_FLAGS_SOP (1 << 15)
+#define CQ_FCP_RQ_DESC_FLAGS_EOP (1 << 14)
+#define CQ_FCP_RQ_DESC_FLAGS_PRT (1 << 12)
+#define CQ_FCP_RQ_DESC_TMPL_MASK 0x1f
+#define CQ_FCP_RQ_DESC_BYTES_WRITTEN_MASK 0x3fff
+#define CQ_FCP_RQ_DESC_PACKET_ERR_SHIFT 14
+#define CQ_FCP_RQ_DESC_PACKET_ERR_MASK (1 << CQ_FCP_RQ_DESC_PACKET_ERR_SHIFT)
+#define CQ_FCP_RQ_DESC_VS_STRIPPED_SHIFT 15
+#define CQ_FCP_RQ_DESC_VS_STRIPPED_MASK (1 << CQ_FCP_RQ_DESC_VS_STRIPPED_SHIFT)
+#define CQ_FCP_RQ_DESC_FC_CRC_OK_MASK 0x1
+#define CQ_FCP_RQ_DESC_FCOE_ERR_SHIFT 1
+#define CQ_FCP_RQ_DESC_FCOE_ERR_MASK (1 << CQ_FCP_RQ_DESC_FCOE_ERR_SHIFT)
+#define CQ_FCP_RQ_DESC_FCS_OK_SHIFT 7
+#define CQ_FCP_RQ_DESC_FCS_OK_MASK (1 << CQ_FCP_RQ_DESC_FCS_OK_SHIFT)
+
+static inline void cq_fcp_rq_desc_dec(struct cq_fcp_rq_desc *desc_ptr,
+ u8 *type,
+ u8 *color,
+ u16 *q_number,
+ u16 *completed_index,
+ u8 *eop,
+ u8 *sop,
+ u8 *fck,
+ u16 *exchange_id,
+ u16 *tmpl,
+ u32 *bytes_written,
+ u8 *sof,
+ u8 *eof,
+ u8 *ingress_port,
+ u8 *packet_err,
+ u8 *fcoe_err,
+ u8 *fcs_ok,
+ u8 *vlan_stripped,
+ u16 *vlan)
+{
+ cq_desc_dec((struct cq_desc *)desc_ptr, type,
+ color, q_number, completed_index);
+ *eop = (desc_ptr->completed_index_eop_sop_prt &
+ CQ_FCP_RQ_DESC_FLAGS_EOP) ? 1 : 0;
+ *sop = (desc_ptr->completed_index_eop_sop_prt &
+ CQ_FCP_RQ_DESC_FLAGS_SOP) ? 1 : 0;
+ *ingress_port =
+ (desc_ptr->completed_index_eop_sop_prt &
+ CQ_FCP_RQ_DESC_FLAGS_PRT) ? 1 : 0;
+ *exchange_id = desc_ptr->exchange_id;
+ *tmpl = desc_ptr->tmpl & CQ_FCP_RQ_DESC_TMPL_MASK;
+ *bytes_written =
+ desc_ptr->bytes_written & CQ_FCP_RQ_DESC_BYTES_WRITTEN_MASK;
+ *packet_err =
+ (desc_ptr->bytes_written & CQ_FCP_RQ_DESC_PACKET_ERR_MASK) >>
+ CQ_FCP_RQ_DESC_PACKET_ERR_SHIFT;
+ *vlan_stripped =
+ (desc_ptr->bytes_written & CQ_FCP_RQ_DESC_VS_STRIPPED_MASK) >>
+ CQ_FCP_RQ_DESC_VS_STRIPPED_SHIFT;
+ *vlan = desc_ptr->vlan;
+ *sof = desc_ptr->sof;
+ *fck = desc_ptr->fcs_fer_fck & CQ_FCP_RQ_DESC_FC_CRC_OK_MASK;
+ *fcoe_err = (desc_ptr->fcs_fer_fck & CQ_FCP_RQ_DESC_FCOE_ERR_MASK) >>
+ CQ_FCP_RQ_DESC_FCOE_ERR_SHIFT;
+ *eof = desc_ptr->eof;
+ *fcs_ok =
+ (desc_ptr->fcs_fer_fck & CQ_FCP_RQ_DESC_FCS_OK_MASK) >>
+ CQ_FCP_RQ_DESC_FCS_OK_SHIFT;
+}
+
+struct cq_sgl_desc {
+ u16 exchange_id;
+ u16 q_number;
+ u32 active_burst_offset;
+ u32 tot_data_bytes;
+ u16 tmpl;
+ u8 sgl_err;
+ u8 type_color;
+};
+
+enum cq_sgl_err_types {
+ CQ_SGL_ERR_NO_ERROR = 0,
+ CQ_SGL_ERR_OVERFLOW, /* data ran beyond end of SGL */
+ CQ_SGL_ERR_SGL_LCL_ADDR_ERR, /* sgl access to local vnic addr illegal*/
+ CQ_SGL_ERR_ADDR_RSP_ERR, /* sgl address error */
+ CQ_SGL_ERR_DATA_RSP_ERR, /* sgl data rsp error */
+ CQ_SGL_ERR_CNT_ZERO_ERR, /* SGL count is 0 */
+ CQ_SGL_ERR_CNT_MAX_ERR, /* SGL count is larger than supported */
+ CQ_SGL_ERR_ORDER_ERR, /* frames recv on both ports, order err */
+ CQ_SGL_ERR_DATA_LCL_ADDR_ERR,/* sgl data buf to local vnic addr ill */
+ CQ_SGL_ERR_HOST_CQ_ERR, /* host cq entry to local vnic addr ill */
+};
+
+#define CQ_SGL_SGL_ERR_MASK 0x1f
+#define CQ_SGL_TMPL_MASK 0x1f
+
+static inline void cq_sgl_desc_dec(struct cq_sgl_desc *desc_ptr,
+ u8 *type,
+ u8 *color,
+ u16 *q_number,
+ u16 *exchange_id,
+ u32 *active_burst_offset,
+ u32 *tot_data_bytes,
+ u16 *tmpl,
+ u8 *sgl_err)
+{
+ /* Cheat a little by assuming exchange_id is the same as completed
+ index */
+ cq_desc_dec((struct cq_desc *)desc_ptr, type, color, q_number,
+ exchange_id);
+ *active_burst_offset = desc_ptr->active_burst_offset;
+ *tot_data_bytes = desc_ptr->tot_data_bytes;
+ *tmpl = desc_ptr->tmpl & CQ_SGL_TMPL_MASK;
+ *sgl_err = desc_ptr->sgl_err & CQ_SGL_SGL_ERR_MASK;
+}
+
+#endif /* _CQ_EXCH_DESC_H_ */
diff --git a/drivers/scsi/fnic/fcpio.h b/drivers/scsi/fnic/fcpio.h
new file mode 100644
index 000000000..12d770d88
--- /dev/null
+++ b/drivers/scsi/fnic/fcpio.h
@@ -0,0 +1,780 @@
+/*
+ * Copyright 2008 Cisco Systems, Inc. All rights reserved.
+ * Copyright 2007 Nuova Systems, Inc. All rights reserved.
+ *
+ * This program is free software; you may redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#ifndef _FCPIO_H_
+#define _FCPIO_H_
+
+#include <linux/if_ether.h>
+
+/*
+ * This header file includes all of the data structures used for
+ * communication by the host driver to the fcp firmware.
+ */
+
+/*
+ * Exchange and sequence id space allocated to the host driver
+ */
+#define FCPIO_HOST_EXCH_RANGE_START 0x1000
+#define FCPIO_HOST_EXCH_RANGE_END 0x1fff
+#define FCPIO_HOST_SEQ_ID_RANGE_START 0x80
+#define FCPIO_HOST_SEQ_ID_RANGE_END 0xff
+
+/*
+ * Command entry type
+ */
+enum fcpio_type {
+ /*
+ * Initiator request types
+ */
+ FCPIO_ICMND_16 = 0x1,
+ FCPIO_ICMND_32,
+ FCPIO_ICMND_CMPL,
+ FCPIO_ITMF,
+ FCPIO_ITMF_CMPL,
+
+ /*
+ * Target request types
+ */
+ FCPIO_TCMND_16 = 0x11,
+ FCPIO_TCMND_32,
+ FCPIO_TDATA,
+ FCPIO_TXRDY,
+ FCPIO_TRSP,
+ FCPIO_TDRSP_CMPL,
+ FCPIO_TTMF,
+ FCPIO_TTMF_ACK,
+ FCPIO_TABORT,
+ FCPIO_TABORT_CMPL,
+
+ /*
+ * Misc request types
+ */
+ FCPIO_ACK = 0x20,
+ FCPIO_RESET,
+ FCPIO_RESET_CMPL,
+ FCPIO_FLOGI_REG,
+ FCPIO_FLOGI_REG_CMPL,
+ FCPIO_ECHO,
+ FCPIO_ECHO_CMPL,
+ FCPIO_LUNMAP_CHNG,
+ FCPIO_LUNMAP_REQ,
+ FCPIO_LUNMAP_REQ_CMPL,
+ FCPIO_FLOGI_FIP_REG,
+ FCPIO_FLOGI_FIP_REG_CMPL,
+};
+
+/*
+ * Header status codes from the firmware
+ */
+enum fcpio_status {
+ FCPIO_SUCCESS = 0, /* request was successful */
+
+ /*
+ * If a request to the firmware is rejected, the original request
+ * header will be returned with the status set to one of the following:
+ */
+ FCPIO_INVALID_HEADER, /* header contains invalid data */
+ FCPIO_OUT_OF_RESOURCE, /* out of resources to complete request */
+ FCPIO_INVALID_PARAM, /* some parameter in request is invalid */
+ FCPIO_REQ_NOT_SUPPORTED, /* request type is not supported */
+ FCPIO_IO_NOT_FOUND, /* requested I/O was not found */
+
+ /*
+ * Once a request is processed, the firmware will usually return
+ * a cmpl message type. In cases where errors occurred,
+ * the header status field would be filled in with one of the following:
+ */
+ FCPIO_ABORTED = 0x41, /* request was aborted */
+ FCPIO_TIMEOUT, /* request was timed out */
+ FCPIO_SGL_INVALID, /* request was aborted due to sgl error */
+ FCPIO_MSS_INVALID, /* request was aborted due to mss error */
+ FCPIO_DATA_CNT_MISMATCH, /* recv/sent more/less data than exp. */
+ FCPIO_FW_ERR, /* request was terminated due to fw error */
+ FCPIO_ITMF_REJECTED, /* itmf req was rejected by remote node */
+ FCPIO_ITMF_FAILED, /* itmf req was failed by remote node */
+ FCPIO_ITMF_INCORRECT_LUN, /* itmf req targeted incorrect LUN */
+ FCPIO_CMND_REJECTED, /* request was invalid and rejected */
+ FCPIO_NO_PATH_AVAIL, /* no paths to the lun was available */
+ FCPIO_PATH_FAILED, /* i/o sent to current path failed */
+ FCPIO_LUNMAP_CHNG_PEND, /* i/o rejected due to lunmap change */
+};
+
+/*
+ * The header command tag. All host requests will use the "tag" field
+ * to mark commands with a unique tag. When the firmware responds to
+ * a host request, it will copy the tag field into the response.
+ *
+ * The only firmware requests that will use the rx_id/ox_id fields instead
+ * of the tag field will be the target command and target task management
+ * requests. These two requests do not have corresponding host requests
+ * since they come directly from the FC initiator on the network.
+ */
+struct fcpio_tag {
+ union {
+ u32 req_id;
+ struct {
+ u16 rx_id;
+ u16 ox_id;
+ } ex_id;
+ } u;
+};
+
+static inline void
+fcpio_tag_id_enc(struct fcpio_tag *tag, u32 id)
+{
+ tag->u.req_id = id;
+}
+
+static inline void
+fcpio_tag_id_dec(struct fcpio_tag *tag, u32 *id)
+{
+ *id = tag->u.req_id;
+}
+
+static inline void
+fcpio_tag_exid_enc(struct fcpio_tag *tag, u16 ox_id, u16 rx_id)
+{
+ tag->u.ex_id.rx_id = rx_id;
+ tag->u.ex_id.ox_id = ox_id;
+}
+
+static inline void
+fcpio_tag_exid_dec(struct fcpio_tag *tag, u16 *ox_id, u16 *rx_id)
+{
+ *rx_id = tag->u.ex_id.rx_id;
+ *ox_id = tag->u.ex_id.ox_id;
+}
+
+/*
+ * The header for an fcpio request, whether from the firmware or from the
+ * host driver
+ */
+struct fcpio_header {
+ u8 type; /* enum fcpio_type */
+ u8 status; /* header status entry */
+ u16 _resvd; /* reserved */
+ struct fcpio_tag tag; /* header tag */
+};
+
+static inline void
+fcpio_header_enc(struct fcpio_header *hdr,
+ u8 type, u8 status,
+ struct fcpio_tag tag)
+{
+ hdr->type = type;
+ hdr->status = status;
+ hdr->_resvd = 0;
+ hdr->tag = tag;
+}
+
+static inline void
+fcpio_header_dec(struct fcpio_header *hdr,
+ u8 *type, u8 *status,
+ struct fcpio_tag *tag)
+{
+ *type = hdr->type;
+ *status = hdr->status;
+ *tag = hdr->tag;
+}
+
+#define CDB_16 16
+#define CDB_32 32
+#define LUN_ADDRESS 8
+
+/*
+ * fcpio_icmnd_16: host -> firmware request
+ *
+ * used for sending out an initiator SCSI 16-byte command
+ */
+struct fcpio_icmnd_16 {
+ u32 lunmap_id; /* index into lunmap table */
+ u8 special_req_flags; /* special exchange request flags */
+ u8 _resvd0[3]; /* reserved */
+ u32 sgl_cnt; /* scatter-gather list count */
+ u32 sense_len; /* sense buffer length */
+ u64 sgl_addr; /* scatter-gather list addr */
+ u64 sense_addr; /* sense buffer address */
+ u8 crn; /* SCSI Command Reference No. */
+ u8 pri_ta; /* SCSI Priority and Task attribute */
+ u8 _resvd1; /* reserved: should be 0 */
+ u8 flags; /* command flags */
+ u8 scsi_cdb[CDB_16]; /* SCSI Cmnd Descriptor Block */
+ u32 data_len; /* length of data expected */
+ u8 lun[LUN_ADDRESS]; /* FC vNIC only: LUN address */
+ u8 _resvd2; /* reserved */
+ u8 d_id[3]; /* FC vNIC only: Target D_ID */
+ u16 mss; /* FC vNIC only: max burst */
+ u16 _resvd3; /* reserved */
+ u32 r_a_tov; /* FC vNIC only: Res. Alloc Timeout */
+ u32 e_d_tov; /* FC vNIC only: Err Detect Timeout */
+};
+
+/*
+ * Special request flags
+ */
+#define FCPIO_ICMND_SRFLAG_RETRY 0x01 /* Enable Retry handling on exchange */
+
+/*
+ * Priority/Task Attribute settings
+ */
+#define FCPIO_ICMND_PTA_SIMPLE 0 /* simple task attribute */
+#define FCPIO_ICMND_PTA_HEADQ 1 /* head of queue task attribute */
+#define FCPIO_ICMND_PTA_ORDERED 2 /* ordered task attribute */
+#define FCPIO_ICMND_PTA_ACA 4 /* auto contingent allegiance */
+#define FCPIO_ICMND_PRI_SHIFT 3 /* priority field starts in bit 3 */
+
+/*
+ * Command flags
+ */
+#define FCPIO_ICMND_RDDATA 0x02 /* read data */
+#define FCPIO_ICMND_WRDATA 0x01 /* write data */
+
+/*
+ * fcpio_icmnd_32: host -> firmware request
+ *
+ * used for sending out an initiator SCSI 32-byte command
+ */
+struct fcpio_icmnd_32 {
+ u32 lunmap_id; /* index into lunmap table */
+ u8 special_req_flags; /* special exchange request flags */
+ u8 _resvd0[3]; /* reserved */
+ u32 sgl_cnt; /* scatter-gather list count */
+ u32 sense_len; /* sense buffer length */
+ u64 sgl_addr; /* scatter-gather list addr */
+ u64 sense_addr; /* sense buffer address */
+ u8 crn; /* SCSI Command Reference No. */
+ u8 pri_ta; /* SCSI Priority and Task attribute */
+ u8 _resvd1; /* reserved: should be 0 */
+ u8 flags; /* command flags */
+ u8 scsi_cdb[CDB_32]; /* SCSI Cmnd Descriptor Block */
+ u32 data_len; /* length of data expected */
+ u8 lun[LUN_ADDRESS]; /* FC vNIC only: LUN address */
+ u8 _resvd2; /* reserved */
+ u8 d_id[3]; /* FC vNIC only: Target D_ID */
+ u16 mss; /* FC vNIC only: max burst */
+ u16 _resvd3; /* reserved */
+ u32 r_a_tov; /* FC vNIC only: Res. Alloc Timeout */
+ u32 e_d_tov; /* FC vNIC only: Error Detect Timeout */
+};
+
+/*
+ * fcpio_itmf: host -> firmware request
+ *
+ * used for requesting the firmware to abort a request and/or send out
+ * a task management function
+ *
+ * The t_tag field is only needed when the request type is ABT_TASK.
+ */
+struct fcpio_itmf {
+ u32 lunmap_id; /* index into lunmap table */
+ u32 tm_req; /* SCSI Task Management request */
+ u32 t_tag; /* header tag of fcpio to be aborted */
+ u32 _resvd; /* _reserved */
+ u8 lun[LUN_ADDRESS]; /* FC vNIC only: LUN address */
+ u8 _resvd1; /* reserved */
+ u8 d_id[3]; /* FC vNIC only: Target D_ID */
+ u32 r_a_tov; /* FC vNIC only: R_A_TOV in msec */
+ u32 e_d_tov; /* FC vNIC only: E_D_TOV in msec */
+};
+
+/*
+ * Task Management request
+ */
+enum fcpio_itmf_tm_req_type {
+ FCPIO_ITMF_ABT_TASK_TERM = 0x01, /* abort task and terminate */
+ FCPIO_ITMF_ABT_TASK, /* abort task and issue abts */
+ FCPIO_ITMF_ABT_TASK_SET, /* abort task set */
+ FCPIO_ITMF_CLR_TASK_SET, /* clear task set */
+ FCPIO_ITMF_LUN_RESET, /* logical unit reset task mgmt */
+ FCPIO_ITMF_CLR_ACA, /* Clear ACA condition */
+};
+
+/*
+ * fcpio_tdata: host -> firmware request
+ *
+ * used for requesting the firmware to send out a read data transfer for a
+ * target command
+ */
+struct fcpio_tdata {
+ u16 rx_id; /* FC rx_id of target command */
+ u16 flags; /* command flags */
+ u32 rel_offset; /* data sequence relative offset */
+ u32 sgl_cnt; /* scatter-gather list count */
+ u32 data_len; /* length of data expected to send */
+ u64 sgl_addr; /* scatter-gather list address */
+};
+
+/*
+ * Command flags
+ */
+#define FCPIO_TDATA_SCSI_RSP 0x01 /* send a scsi resp. after last frame */
+
+/*
+ * fcpio_txrdy: host -> firmware request
+ *
+ * used for requesting the firmware to send out a write data transfer for a
+ * target command
+ */
+struct fcpio_txrdy {
+ u16 rx_id; /* FC rx_id of target command */
+ u16 _resvd0; /* reserved */
+ u32 rel_offset; /* data sequence relative offset */
+ u32 sgl_cnt; /* scatter-gather list count */
+ u32 data_len; /* length of data expected to send */
+ u64 sgl_addr; /* scatter-gather list address */
+};
+
+/*
+ * fcpio_trsp: host -> firmware request
+ *
+ * used for requesting the firmware to send out a response for a target
+ * command
+ */
+struct fcpio_trsp {
+ u16 rx_id; /* FC rx_id of target command */
+ u16 _resvd0; /* reserved */
+ u32 sense_len; /* sense data buffer length */
+ u64 sense_addr; /* sense data buffer address */
+ u16 _resvd1; /* reserved */
+ u8 flags; /* response request flags */
+ u8 scsi_status; /* SCSI status */
+ u32 residual; /* SCSI data residual value of I/O */
+};
+
+/*
+ * resposnse request flags
+ */
+#define FCPIO_TRSP_RESID_UNDER 0x08 /* residual is valid and is underflow */
+#define FCPIO_TRSP_RESID_OVER 0x04 /* residual is valid and is overflow */
+
+/*
+ * fcpio_ttmf_ack: host -> firmware response
+ *
+ * used by the host to indicate to the firmware it has received and processed
+ * the target tmf request
+ */
+struct fcpio_ttmf_ack {
+ u16 rx_id; /* FC rx_id of target command */
+ u16 _resvd0; /* reserved */
+ u32 tmf_status; /* SCSI task management status */
+};
+
+/*
+ * fcpio_tabort: host -> firmware request
+ *
+ * used by the host to request the firmware to abort a target request that was
+ * received by the firmware
+ */
+struct fcpio_tabort {
+ u16 rx_id; /* rx_id of the target request */
+};
+
+/*
+ * fcpio_reset: host -> firmware request
+ *
+ * used by the host to signal a reset of the driver to the firmware
+ * and to request firmware to clean up all outstanding I/O
+ */
+struct fcpio_reset {
+ u32 _resvd;
+};
+
+enum fcpio_flogi_reg_format_type {
+ FCPIO_FLOGI_REG_DEF_DEST = 0, /* Use the oui | s_id mac format */
+ FCPIO_FLOGI_REG_GW_DEST, /* Use the fixed gateway mac */
+};
+
+/*
+ * fcpio_flogi_reg: host -> firmware request
+ *
+ * fc vnic only
+ * used by the host to notify the firmware of the lif's s_id
+ * and destination mac address format
+ */
+struct fcpio_flogi_reg {
+ u8 format;
+ u8 s_id[3]; /* FC vNIC only: Source S_ID */
+ u8 gateway_mac[ETH_ALEN]; /* Destination gateway mac */
+ u16 _resvd;
+ u32 r_a_tov; /* R_A_TOV in msec */
+ u32 e_d_tov; /* E_D_TOV in msec */
+};
+
+/*
+ * fcpio_echo: host -> firmware request
+ *
+ * sends a heartbeat echo request to the firmware
+ */
+struct fcpio_echo {
+ u32 _resvd;
+};
+
+/*
+ * fcpio_lunmap_req: host -> firmware request
+ *
+ * scsi vnic only
+ * sends a request to retrieve the lunmap table for scsi vnics
+ */
+struct fcpio_lunmap_req {
+ u64 addr; /* address of the buffer */
+ u32 len; /* len of the buffer */
+};
+
+/*
+ * fcpio_flogi_fip_reg: host -> firmware request
+ *
+ * fc vnic only
+ * used by the host to notify the firmware of the lif's s_id
+ * and destination mac address format
+ */
+struct fcpio_flogi_fip_reg {
+ u8 _resvd0;
+ u8 s_id[3]; /* FC vNIC only: Source S_ID */
+ u8 fcf_mac[ETH_ALEN]; /* FCF Target destination mac */
+ u16 _resvd1;
+ u32 r_a_tov; /* R_A_TOV in msec */
+ u32 e_d_tov; /* E_D_TOV in msec */
+ u8 ha_mac[ETH_ALEN]; /* Host adapter source mac */
+ u16 _resvd2;
+};
+
+/*
+ * Basic structure for all fcpio structures that are sent from the host to the
+ * firmware. They are 128 bytes per structure.
+ */
+#define FCPIO_HOST_REQ_LEN 128 /* expected length of host requests */
+
+struct fcpio_host_req {
+ struct fcpio_header hdr;
+
+ union {
+ /*
+ * Defines space needed for request
+ */
+ u8 buf[FCPIO_HOST_REQ_LEN - sizeof(struct fcpio_header)];
+
+ /*
+ * Initiator host requests
+ */
+ struct fcpio_icmnd_16 icmnd_16;
+ struct fcpio_icmnd_32 icmnd_32;
+ struct fcpio_itmf itmf;
+
+ /*
+ * Target host requests
+ */
+ struct fcpio_tdata tdata;
+ struct fcpio_txrdy txrdy;
+ struct fcpio_trsp trsp;
+ struct fcpio_ttmf_ack ttmf_ack;
+ struct fcpio_tabort tabort;
+
+ /*
+ * Misc requests
+ */
+ struct fcpio_reset reset;
+ struct fcpio_flogi_reg flogi_reg;
+ struct fcpio_echo echo;
+ struct fcpio_lunmap_req lunmap_req;
+ struct fcpio_flogi_fip_reg flogi_fip_reg;
+ } u;
+};
+
+/*
+ * fcpio_icmnd_cmpl: firmware -> host response
+ *
+ * used for sending the host a response to an initiator command
+ */
+struct fcpio_icmnd_cmpl {
+ u8 _resvd0[6]; /* reserved */
+ u8 flags; /* response flags */
+ u8 scsi_status; /* SCSI status */
+ u32 residual; /* SCSI data residual length */
+ u32 sense_len; /* SCSI sense length */
+};
+
+/*
+ * response flags
+ */
+#define FCPIO_ICMND_CMPL_RESID_UNDER 0x08 /* resid under and valid */
+#define FCPIO_ICMND_CMPL_RESID_OVER 0x04 /* resid over and valid */
+
+/*
+ * fcpio_itmf_cmpl: firmware -> host response
+ *
+ * used for sending the host a response for a itmf request
+ */
+struct fcpio_itmf_cmpl {
+ u32 _resvd; /* reserved */
+};
+
+/*
+ * fcpio_tcmnd_16: firmware -> host request
+ *
+ * used by the firmware to notify the host of an incoming target SCSI 16-Byte
+ * request
+ */
+struct fcpio_tcmnd_16 {
+ u8 lun[LUN_ADDRESS]; /* FC vNIC only: LUN address */
+ u8 crn; /* SCSI Command Reference No. */
+ u8 pri_ta; /* SCSI Priority and Task attribute */
+ u8 _resvd2; /* reserved: should be 0 */
+ u8 flags; /* command flags */
+ u8 scsi_cdb[CDB_16]; /* SCSI Cmnd Descriptor Block */
+ u32 data_len; /* length of data expected */
+ u8 _resvd1; /* reserved */
+ u8 s_id[3]; /* FC vNIC only: Source S_ID */
+};
+
+/*
+ * Priority/Task Attribute settings
+ */
+#define FCPIO_TCMND_PTA_SIMPLE 0 /* simple task attribute */
+#define FCPIO_TCMND_PTA_HEADQ 1 /* head of queue task attribute */
+#define FCPIO_TCMND_PTA_ORDERED 2 /* ordered task attribute */
+#define FCPIO_TCMND_PTA_ACA 4 /* auto contingent allegiance */
+#define FCPIO_TCMND_PRI_SHIFT 3 /* priority field starts in bit 3 */
+
+/*
+ * Command flags
+ */
+#define FCPIO_TCMND_RDDATA 0x02 /* read data */
+#define FCPIO_TCMND_WRDATA 0x01 /* write data */
+
+/*
+ * fcpio_tcmnd_32: firmware -> host request
+ *
+ * used by the firmware to notify the host of an incoming target SCSI 32-Byte
+ * request
+ */
+struct fcpio_tcmnd_32 {
+ u8 lun[LUN_ADDRESS]; /* FC vNIC only: LUN address */
+ u8 crn; /* SCSI Command Reference No. */
+ u8 pri_ta; /* SCSI Priority and Task attribute */
+ u8 _resvd2; /* reserved: should be 0 */
+ u8 flags; /* command flags */
+ u8 scsi_cdb[CDB_32]; /* SCSI Cmnd Descriptor Block */
+ u32 data_len; /* length of data expected */
+ u8 _resvd0; /* reserved */
+ u8 s_id[3]; /* FC vNIC only: Source S_ID */
+};
+
+/*
+ * fcpio_tdrsp_cmpl: firmware -> host response
+ *
+ * used by the firmware to notify the host of a response to a host target
+ * command
+ */
+struct fcpio_tdrsp_cmpl {
+ u16 rx_id; /* rx_id of the target request */
+ u16 _resvd0; /* reserved */
+};
+
+/*
+ * fcpio_ttmf: firmware -> host request
+ *
+ * used by the firmware to notify the host of an incoming task management
+ * function request
+ */
+struct fcpio_ttmf {
+ u8 _resvd0; /* reserved */
+ u8 s_id[3]; /* FC vNIC only: Source S_ID */
+ u8 lun[LUN_ADDRESS]; /* FC vNIC only: LUN address */
+ u8 crn; /* SCSI Command Reference No. */
+ u8 _resvd2[3]; /* reserved */
+ u32 tmf_type; /* task management request type */
+};
+
+/*
+ * Task Management request
+ */
+#define FCPIO_TTMF_CLR_ACA 0x40 /* Clear ACA condition */
+#define FCPIO_TTMF_LUN_RESET 0x10 /* logical unit reset task mgmt */
+#define FCPIO_TTMF_CLR_TASK_SET 0x04 /* clear task set */
+#define FCPIO_TTMF_ABT_TASK_SET 0x02 /* abort task set */
+#define FCPIO_TTMF_ABT_TASK 0x01 /* abort task */
+
+/*
+ * fcpio_tabort_cmpl: firmware -> host response
+ *
+ * used by the firmware to respond to a host's tabort request
+ */
+struct fcpio_tabort_cmpl {
+ u16 rx_id; /* rx_id of the target request */
+ u16 _resvd0; /* reserved */
+};
+
+/*
+ * fcpio_ack: firmware -> host response
+ *
+ * used by firmware to notify the host of the last work request received
+ */
+struct fcpio_ack {
+ u16 request_out; /* last host entry received */
+ u16 _resvd;
+};
+
+/*
+ * fcpio_reset_cmpl: firmware -> host response
+ *
+ * use by firmware to respond to the host's reset request
+ */
+struct fcpio_reset_cmpl {
+ u16 vnic_id;
+};
+
+/*
+ * fcpio_flogi_reg_cmpl: firmware -> host response
+ *
+ * fc vnic only
+ * response to the fcpio_flogi_reg request
+ */
+struct fcpio_flogi_reg_cmpl {
+ u32 _resvd;
+};
+
+/*
+ * fcpio_echo_cmpl: firmware -> host response
+ *
+ * response to the fcpio_echo request
+ */
+struct fcpio_echo_cmpl {
+ u32 _resvd;
+};
+
+/*
+ * fcpio_lunmap_chng: firmware -> host notification
+ *
+ * scsi vnic only
+ * notifies the host that the lunmap tables have changed
+ */
+struct fcpio_lunmap_chng {
+ u32 _resvd;
+};
+
+/*
+ * fcpio_lunmap_req_cmpl: firmware -> host response
+ *
+ * scsi vnic only
+ * response for lunmap table request from the host
+ */
+struct fcpio_lunmap_req_cmpl {
+ u32 _resvd;
+};
+
+/*
+ * Basic structure for all fcpio structures that are sent from the firmware to
+ * the host. They are 64 bytes per structure.
+ */
+#define FCPIO_FW_REQ_LEN 64 /* expected length of fw requests */
+struct fcpio_fw_req {
+ struct fcpio_header hdr;
+
+ union {
+ /*
+ * Defines space needed for request
+ */
+ u8 buf[FCPIO_FW_REQ_LEN - sizeof(struct fcpio_header)];
+
+ /*
+ * Initiator firmware responses
+ */
+ struct fcpio_icmnd_cmpl icmnd_cmpl;
+ struct fcpio_itmf_cmpl itmf_cmpl;
+
+ /*
+ * Target firmware new requests
+ */
+ struct fcpio_tcmnd_16 tcmnd_16;
+ struct fcpio_tcmnd_32 tcmnd_32;
+
+ /*
+ * Target firmware responses
+ */
+ struct fcpio_tdrsp_cmpl tdrsp_cmpl;
+ struct fcpio_ttmf ttmf;
+ struct fcpio_tabort_cmpl tabort_cmpl;
+
+ /*
+ * Firmware response to work received
+ */
+ struct fcpio_ack ack;
+
+ /*
+ * Misc requests
+ */
+ struct fcpio_reset_cmpl reset_cmpl;
+ struct fcpio_flogi_reg_cmpl flogi_reg_cmpl;
+ struct fcpio_echo_cmpl echo_cmpl;
+ struct fcpio_lunmap_chng lunmap_chng;
+ struct fcpio_lunmap_req_cmpl lunmap_req_cmpl;
+ } u;
+};
+
+/*
+ * Access routines to encode and decode the color bit, which is the most
+ * significant bit of the MSB of the structure
+ */
+static inline void fcpio_color_enc(struct fcpio_fw_req *fw_req, u8 color)
+{
+ u8 *c = ((u8 *) fw_req) + sizeof(struct fcpio_fw_req) - 1;
+
+ if (color)
+ *c |= 0x80;
+ else
+ *c &= ~0x80;
+}
+
+static inline void fcpio_color_dec(struct fcpio_fw_req *fw_req, u8 *color)
+{
+ u8 *c = ((u8 *) fw_req) + sizeof(struct fcpio_fw_req) - 1;
+
+ *color = *c >> 7;
+
+ /*
+ * Make sure color bit is read from desc *before* other fields
+ * are read from desc. Hardware guarantees color bit is last
+ * bit (byte) written. Adding the rmb() prevents the compiler
+ * and/or CPU from reordering the reads which would potentially
+ * result in reading stale values.
+ */
+
+ rmb();
+
+}
+
+/*
+ * Lunmap table entry for scsi vnics
+ */
+#define FCPIO_LUNMAP_TABLE_SIZE 256
+#define FCPIO_FLAGS_LUNMAP_VALID 0x80
+#define FCPIO_FLAGS_BOOT 0x01
+struct fcpio_lunmap_entry {
+ u8 bus;
+ u8 target;
+ u8 lun;
+ u8 path_cnt;
+ u16 flags;
+ u16 update_cnt;
+};
+
+struct fcpio_lunmap_tbl {
+ u32 update_cnt;
+ struct fcpio_lunmap_entry lunmaps[FCPIO_LUNMAP_TABLE_SIZE];
+};
+
+#endif /* _FCPIO_H_ */
diff --git a/drivers/scsi/fnic/fnic.h b/drivers/scsi/fnic/fnic.h
new file mode 100644
index 000000000..26270c351
--- /dev/null
+++ b/drivers/scsi/fnic/fnic.h
@@ -0,0 +1,379 @@
+/*
+ * Copyright 2008 Cisco Systems, Inc. All rights reserved.
+ * Copyright 2007 Nuova Systems, Inc. All rights reserved.
+ *
+ * This program is free software; you may redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#ifndef _FNIC_H_
+#define _FNIC_H_
+
+#include <linux/interrupt.h>
+#include <linux/netdevice.h>
+#include <linux/workqueue.h>
+#include <linux/bitops.h>
+#include <scsi/libfc.h>
+#include <scsi/libfcoe.h>
+#include "fnic_io.h"
+#include "fnic_res.h"
+#include "fnic_trace.h"
+#include "fnic_stats.h"
+#include "vnic_dev.h"
+#include "vnic_wq.h"
+#include "vnic_rq.h"
+#include "vnic_cq.h"
+#include "vnic_wq_copy.h"
+#include "vnic_intr.h"
+#include "vnic_stats.h"
+#include "vnic_scsi.h"
+
+#define DRV_NAME "fnic"
+#define DRV_DESCRIPTION "Cisco FCoE HBA Driver"
+#define DRV_VERSION "1.6.0.17"
+#define PFX DRV_NAME ": "
+#define DFX DRV_NAME "%d: "
+
+#define DESC_CLEAN_LOW_WATERMARK 8
+#define FNIC_UCSM_DFLT_THROTTLE_CNT_BLD 16 /* UCSM default throttle count */
+#define FNIC_MIN_IO_REQ 256 /* Min IO throttle count */
+#define FNIC_MAX_IO_REQ 1024 /* scsi_cmnd tag map entries */
+#define FNIC_DFLT_IO_REQ 256 /* Default scsi_cmnd tag map entries */
+#define FNIC_IO_LOCKS 64 /* IO locks: power of 2 */
+#define FNIC_DFLT_QUEUE_DEPTH 32
+#define FNIC_STATS_RATE_LIMIT 4 /* limit rate at which stats are pulled up */
+
+/*
+ * Tag bits used for special requests.
+ */
+#define FNIC_TAG_ABORT BIT(30) /* tag bit indicating abort */
+#define FNIC_TAG_DEV_RST BIT(29) /* indicates device reset */
+#define FNIC_TAG_MASK (BIT(24) - 1) /* mask for lookup */
+#define FNIC_NO_TAG -1
+
+/*
+ * Command flags to identify the type of command and for other future
+ * use.
+ */
+#define FNIC_NO_FLAGS 0
+#define FNIC_IO_INITIALIZED BIT(0)
+#define FNIC_IO_ISSUED BIT(1)
+#define FNIC_IO_DONE BIT(2)
+#define FNIC_IO_REQ_NULL BIT(3)
+#define FNIC_IO_ABTS_PENDING BIT(4)
+#define FNIC_IO_ABORTED BIT(5)
+#define FNIC_IO_ABTS_ISSUED BIT(6)
+#define FNIC_IO_TERM_ISSUED BIT(7)
+#define FNIC_IO_INTERNAL_TERM_ISSUED BIT(8)
+#define FNIC_IO_ABT_TERM_DONE BIT(9)
+#define FNIC_IO_ABT_TERM_REQ_NULL BIT(10)
+#define FNIC_IO_ABT_TERM_TIMED_OUT BIT(11)
+#define FNIC_DEVICE_RESET BIT(12) /* Device reset request */
+#define FNIC_DEV_RST_ISSUED BIT(13)
+#define FNIC_DEV_RST_TIMED_OUT BIT(14)
+#define FNIC_DEV_RST_ABTS_ISSUED BIT(15)
+#define FNIC_DEV_RST_TERM_ISSUED BIT(16)
+#define FNIC_DEV_RST_DONE BIT(17)
+#define FNIC_DEV_RST_REQ_NULL BIT(18)
+#define FNIC_DEV_RST_ABTS_DONE BIT(19)
+#define FNIC_DEV_RST_TERM_DONE BIT(20)
+#define FNIC_DEV_RST_ABTS_PENDING BIT(21)
+
+/*
+ * Usage of the scsi_cmnd scratchpad.
+ * These fields are locked by the hashed io_req_lock.
+ */
+#define CMD_SP(Cmnd) ((Cmnd)->SCp.ptr)
+#define CMD_STATE(Cmnd) ((Cmnd)->SCp.phase)
+#define CMD_ABTS_STATUS(Cmnd) ((Cmnd)->SCp.Message)
+#define CMD_LR_STATUS(Cmnd) ((Cmnd)->SCp.have_data_in)
+#define CMD_TAG(Cmnd) ((Cmnd)->SCp.sent_command)
+#define CMD_FLAGS(Cmnd) ((Cmnd)->SCp.Status)
+
+#define FCPIO_INVALID_CODE 0x100 /* hdr_status value unused by firmware */
+
+#define FNIC_LUN_RESET_TIMEOUT 10000 /* mSec */
+#define FNIC_HOST_RESET_TIMEOUT 10000 /* mSec */
+#define FNIC_RMDEVICE_TIMEOUT 1000 /* mSec */
+#define FNIC_HOST_RESET_SETTLE_TIME 30 /* Sec */
+#define FNIC_ABT_TERM_DELAY_TIMEOUT 500 /* mSec */
+
+#define FNIC_MAX_FCP_TARGET 256
+
+/**
+ * state_flags to identify host state along along with fnic's state
+ **/
+#define __FNIC_FLAGS_FWRESET BIT(0) /* fwreset in progress */
+#define __FNIC_FLAGS_BLOCK_IO BIT(1) /* IOs are blocked */
+
+#define FNIC_FLAGS_NONE (0)
+#define FNIC_FLAGS_FWRESET (__FNIC_FLAGS_FWRESET | \
+ __FNIC_FLAGS_BLOCK_IO)
+
+#define FNIC_FLAGS_IO_BLOCKED (__FNIC_FLAGS_BLOCK_IO)
+
+#define fnic_set_state_flags(fnicp, st_flags) \
+ __fnic_set_state_flags(fnicp, st_flags, 0)
+
+#define fnic_clear_state_flags(fnicp, st_flags) \
+ __fnic_set_state_flags(fnicp, st_flags, 1)
+
+extern unsigned int fnic_log_level;
+
+#define FNIC_MAIN_LOGGING 0x01
+#define FNIC_FCS_LOGGING 0x02
+#define FNIC_SCSI_LOGGING 0x04
+#define FNIC_ISR_LOGGING 0x08
+
+#define FNIC_CHECK_LOGGING(LEVEL, CMD) \
+do { \
+ if (unlikely(fnic_log_level & LEVEL)) \
+ do { \
+ CMD; \
+ } while (0); \
+} while (0)
+
+#define FNIC_MAIN_DBG(kern_level, host, fmt, args...) \
+ FNIC_CHECK_LOGGING(FNIC_MAIN_LOGGING, \
+ shost_printk(kern_level, host, fmt, ##args);)
+
+#define FNIC_FCS_DBG(kern_level, host, fmt, args...) \
+ FNIC_CHECK_LOGGING(FNIC_FCS_LOGGING, \
+ shost_printk(kern_level, host, fmt, ##args);)
+
+#define FNIC_SCSI_DBG(kern_level, host, fmt, args...) \
+ FNIC_CHECK_LOGGING(FNIC_SCSI_LOGGING, \
+ shost_printk(kern_level, host, fmt, ##args);)
+
+#define FNIC_ISR_DBG(kern_level, host, fmt, args...) \
+ FNIC_CHECK_LOGGING(FNIC_ISR_LOGGING, \
+ shost_printk(kern_level, host, fmt, ##args);)
+
+#define FNIC_MAIN_NOTE(kern_level, host, fmt, args...) \
+ shost_printk(kern_level, host, fmt, ##args)
+
+extern const char *fnic_state_str[];
+
+enum fnic_intx_intr_index {
+ FNIC_INTX_WQ_RQ_COPYWQ,
+ FNIC_INTX_ERR,
+ FNIC_INTX_NOTIFY,
+ FNIC_INTX_INTR_MAX,
+};
+
+enum fnic_msix_intr_index {
+ FNIC_MSIX_RQ,
+ FNIC_MSIX_WQ,
+ FNIC_MSIX_WQ_COPY,
+ FNIC_MSIX_ERR_NOTIFY,
+ FNIC_MSIX_INTR_MAX,
+};
+
+struct fnic_msix_entry {
+ int requested;
+ char devname[IFNAMSIZ];
+ irqreturn_t (*isr)(int, void *);
+ void *devid;
+};
+
+enum fnic_state {
+ FNIC_IN_FC_MODE = 0,
+ FNIC_IN_FC_TRANS_ETH_MODE,
+ FNIC_IN_ETH_MODE,
+ FNIC_IN_ETH_TRANS_FC_MODE,
+};
+
+#define FNIC_WQ_COPY_MAX 1
+#define FNIC_WQ_MAX 1
+#define FNIC_RQ_MAX 1
+#define FNIC_CQ_MAX (FNIC_WQ_COPY_MAX + FNIC_WQ_MAX + FNIC_RQ_MAX)
+
+struct mempool;
+
+enum fnic_evt {
+ FNIC_EVT_START_VLAN_DISC = 1,
+ FNIC_EVT_START_FCF_DISC = 2,
+ FNIC_EVT_MAX,
+};
+
+struct fnic_event {
+ struct list_head list;
+ struct fnic *fnic;
+ enum fnic_evt event;
+};
+
+/* Per-instance private data structure */
+struct fnic {
+ struct fc_lport *lport;
+ struct fcoe_ctlr ctlr; /* FIP FCoE controller structure */
+ struct vnic_dev_bar bar0;
+
+ struct msix_entry msix_entry[FNIC_MSIX_INTR_MAX];
+ struct fnic_msix_entry msix[FNIC_MSIX_INTR_MAX];
+
+ struct vnic_stats *stats;
+ unsigned long stats_time; /* time of stats update */
+ unsigned long stats_reset_time; /* time of stats reset */
+ struct vnic_nic_cfg *nic_cfg;
+ char name[IFNAMSIZ];
+ struct timer_list notify_timer; /* used for MSI interrupts */
+
+ unsigned int fnic_max_tag_id;
+ unsigned int err_intr_offset;
+ unsigned int link_intr_offset;
+
+ unsigned int wq_count;
+ unsigned int cq_count;
+
+ struct dentry *fnic_stats_debugfs_host;
+ struct dentry *fnic_stats_debugfs_file;
+ struct dentry *fnic_reset_debugfs_file;
+ unsigned int reset_stats;
+ atomic64_t io_cmpl_skip;
+ struct fnic_stats fnic_stats;
+
+ u32 vlan_hw_insert:1; /* let hw insert the tag */
+ u32 in_remove:1; /* fnic device in removal */
+ u32 stop_rx_link_events:1; /* stop proc. rx frames, link events */
+
+ struct completion *remove_wait; /* device remove thread blocks */
+
+ atomic_t in_flight; /* io counter */
+ u32 _reserved; /* fill hole */
+ unsigned long state_flags; /* protected by host lock */
+ enum fnic_state state;
+ spinlock_t fnic_lock;
+
+ u16 vlan_id; /* VLAN tag including priority */
+ u8 data_src_addr[ETH_ALEN];
+ u64 fcp_input_bytes; /* internal statistic */
+ u64 fcp_output_bytes; /* internal statistic */
+ u32 link_down_cnt;
+ int link_status;
+
+ struct list_head list;
+ struct pci_dev *pdev;
+ struct vnic_fc_config config;
+ struct vnic_dev *vdev;
+ unsigned int raw_wq_count;
+ unsigned int wq_copy_count;
+ unsigned int rq_count;
+ int fw_ack_index[FNIC_WQ_COPY_MAX];
+ unsigned short fw_ack_recd[FNIC_WQ_COPY_MAX];
+ unsigned short wq_copy_desc_low[FNIC_WQ_COPY_MAX];
+ unsigned int intr_count;
+ u32 __iomem *legacy_pba;
+ struct fnic_host_tag *tags;
+ mempool_t *io_req_pool;
+ mempool_t *io_sgl_pool[FNIC_SGL_NUM_CACHES];
+ spinlock_t io_req_lock[FNIC_IO_LOCKS]; /* locks for scsi cmnds */
+
+ struct work_struct link_work;
+ struct work_struct frame_work;
+ struct sk_buff_head frame_queue;
+ struct sk_buff_head tx_queue;
+
+ /*** FIP related data members -- start ***/
+ void (*set_vlan)(struct fnic *, u16 vlan);
+ struct work_struct fip_frame_work;
+ struct sk_buff_head fip_frame_queue;
+ struct timer_list fip_timer;
+ struct list_head vlans;
+ spinlock_t vlans_lock;
+
+ struct work_struct event_work;
+ struct list_head evlist;
+ /*** FIP related data members -- end ***/
+
+ /* copy work queue cache line section */
+ ____cacheline_aligned struct vnic_wq_copy wq_copy[FNIC_WQ_COPY_MAX];
+ /* completion queue cache line section */
+ ____cacheline_aligned struct vnic_cq cq[FNIC_CQ_MAX];
+
+ spinlock_t wq_copy_lock[FNIC_WQ_COPY_MAX];
+
+ /* work queue cache line section */
+ ____cacheline_aligned struct vnic_wq wq[FNIC_WQ_MAX];
+ spinlock_t wq_lock[FNIC_WQ_MAX];
+
+ /* receive queue cache line section */
+ ____cacheline_aligned struct vnic_rq rq[FNIC_RQ_MAX];
+
+ /* interrupt resource cache line section */
+ ____cacheline_aligned struct vnic_intr intr[FNIC_MSIX_INTR_MAX];
+};
+
+static inline struct fnic *fnic_from_ctlr(struct fcoe_ctlr *fip)
+{
+ return container_of(fip, struct fnic, ctlr);
+}
+
+extern struct workqueue_struct *fnic_event_queue;
+extern struct workqueue_struct *fnic_fip_queue;
+extern struct device_attribute *fnic_attrs[];
+
+void fnic_clear_intr_mode(struct fnic *fnic);
+int fnic_set_intr_mode(struct fnic *fnic);
+void fnic_free_intr(struct fnic *fnic);
+int fnic_request_intr(struct fnic *fnic);
+
+int fnic_send(struct fc_lport *, struct fc_frame *);
+void fnic_free_wq_buf(struct vnic_wq *wq, struct vnic_wq_buf *buf);
+void fnic_handle_frame(struct work_struct *work);
+void fnic_handle_link(struct work_struct *work);
+void fnic_handle_event(struct work_struct *work);
+int fnic_rq_cmpl_handler(struct fnic *fnic, int);
+int fnic_alloc_rq_frame(struct vnic_rq *rq);
+void fnic_free_rq_buf(struct vnic_rq *rq, struct vnic_rq_buf *buf);
+void fnic_flush_tx(struct fnic *);
+void fnic_eth_send(struct fcoe_ctlr *, struct sk_buff *skb);
+void fnic_set_port_id(struct fc_lport *, u32, struct fc_frame *);
+void fnic_update_mac(struct fc_lport *, u8 *new);
+void fnic_update_mac_locked(struct fnic *, u8 *new);
+
+int fnic_queuecommand(struct Scsi_Host *, struct scsi_cmnd *);
+int fnic_abort_cmd(struct scsi_cmnd *);
+int fnic_device_reset(struct scsi_cmnd *);
+int fnic_host_reset(struct scsi_cmnd *);
+int fnic_reset(struct Scsi_Host *);
+void fnic_scsi_cleanup(struct fc_lport *);
+void fnic_scsi_abort_io(struct fc_lport *);
+void fnic_empty_scsi_cleanup(struct fc_lport *);
+void fnic_exch_mgr_reset(struct fc_lport *, u32, u32);
+int fnic_wq_copy_cmpl_handler(struct fnic *fnic, int);
+int fnic_wq_cmpl_handler(struct fnic *fnic, int);
+int fnic_flogi_reg_handler(struct fnic *fnic, u32);
+void fnic_wq_copy_cleanup_handler(struct vnic_wq_copy *wq,
+ struct fcpio_host_req *desc);
+int fnic_fw_reset_handler(struct fnic *fnic);
+void fnic_terminate_rport_io(struct fc_rport *);
+const char *fnic_state_to_str(unsigned int state);
+
+void fnic_log_q_error(struct fnic *fnic);
+void fnic_handle_link_event(struct fnic *fnic);
+
+int fnic_is_abts_pending(struct fnic *, struct scsi_cmnd *);
+
+void fnic_handle_fip_frame(struct work_struct *work);
+void fnic_handle_fip_event(struct fnic *fnic);
+void fnic_fcoe_reset_vlans(struct fnic *fnic);
+void fnic_fcoe_evlist_free(struct fnic *fnic);
+extern void fnic_handle_fip_timer(struct fnic *fnic);
+
+static inline int
+fnic_chk_state_flags_locked(struct fnic *fnic, unsigned long st_flags)
+{
+ return ((fnic->state_flags & st_flags) == st_flags);
+}
+void __fnic_set_state_flags(struct fnic *, unsigned long, unsigned long);
+void fnic_dump_fchost_stats(struct Scsi_Host *, struct fc_host_statistics *);
+#endif /* _FNIC_H_ */
diff --git a/drivers/scsi/fnic/fnic_attrs.c b/drivers/scsi/fnic/fnic_attrs.c
new file mode 100644
index 000000000..aea0c3bec
--- /dev/null
+++ b/drivers/scsi/fnic/fnic_attrs.c
@@ -0,0 +1,56 @@
+/*
+ * Copyright 2008 Cisco Systems, Inc. All rights reserved.
+ * Copyright 2007 Nuova Systems, Inc. All rights reserved.
+ *
+ * This program is free software; you may redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#include <linux/string.h>
+#include <linux/device.h>
+#include <scsi/scsi_host.h>
+#include "fnic.h"
+
+static ssize_t fnic_show_state(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct fc_lport *lp = shost_priv(class_to_shost(dev));
+ struct fnic *fnic = lport_priv(lp);
+
+ return snprintf(buf, PAGE_SIZE, "%s\n", fnic_state_str[fnic->state]);
+}
+
+static ssize_t fnic_show_drv_version(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ return snprintf(buf, PAGE_SIZE, "%s\n", DRV_VERSION);
+}
+
+static ssize_t fnic_show_link_state(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct fc_lport *lp = shost_priv(class_to_shost(dev));
+
+ return snprintf(buf, PAGE_SIZE, "%s\n", (lp->link_up)
+ ? "Link Up" : "Link Down");
+}
+
+static DEVICE_ATTR(fnic_state, S_IRUGO, fnic_show_state, NULL);
+static DEVICE_ATTR(drv_version, S_IRUGO, fnic_show_drv_version, NULL);
+static DEVICE_ATTR(link_state, S_IRUGO, fnic_show_link_state, NULL);
+
+struct device_attribute *fnic_attrs[] = {
+ &dev_attr_fnic_state,
+ &dev_attr_drv_version,
+ &dev_attr_link_state,
+ NULL,
+};
diff --git a/drivers/scsi/fnic/fnic_debugfs.c b/drivers/scsi/fnic/fnic_debugfs.c
new file mode 100644
index 000000000..5980c10c7
--- /dev/null
+++ b/drivers/scsi/fnic/fnic_debugfs.c
@@ -0,0 +1,836 @@
+/*
+ * Copyright 2012 Cisco Systems, Inc. All rights reserved.
+ *
+ * This program is free software; you may redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include <linux/module.h>
+#include <linux/errno.h>
+#include <linux/debugfs.h>
+#include "fnic.h"
+
+static struct dentry *fnic_trace_debugfs_root;
+static struct dentry *fnic_trace_debugfs_file;
+static struct dentry *fnic_trace_enable;
+static struct dentry *fnic_stats_debugfs_root;
+
+static struct dentry *fnic_fc_trace_debugfs_file;
+static struct dentry *fnic_fc_rdata_trace_debugfs_file;
+static struct dentry *fnic_fc_trace_enable;
+static struct dentry *fnic_fc_trace_clear;
+
+struct fc_trace_flag_type {
+ u8 fc_row_file;
+ u8 fc_normal_file;
+ u8 fnic_trace;
+ u8 fc_trace;
+ u8 fc_clear;
+};
+
+static struct fc_trace_flag_type *fc_trc_flag;
+
+/*
+ * fnic_debugfs_init - Initialize debugfs for fnic debug logging
+ *
+ * Description:
+ * When Debugfs is configured this routine sets up the fnic debugfs
+ * file system. If not already created, this routine will create the
+ * fnic directory and statistics directory for trace buffer and
+ * stats logging.
+ */
+int fnic_debugfs_init(void)
+{
+ int rc = -1;
+ fnic_trace_debugfs_root = debugfs_create_dir("fnic", NULL);
+ if (!fnic_trace_debugfs_root) {
+ printk(KERN_DEBUG "Cannot create debugfs root\n");
+ return rc;
+ }
+
+ if (!fnic_trace_debugfs_root) {
+ printk(KERN_DEBUG
+ "fnic root directory doesn't exist in debugfs\n");
+ return rc;
+ }
+
+ fnic_stats_debugfs_root = debugfs_create_dir("statistics",
+ fnic_trace_debugfs_root);
+ if (!fnic_stats_debugfs_root) {
+ printk(KERN_DEBUG "Cannot create Statistics directory\n");
+ return rc;
+ }
+
+ /* Allocate memory to structure */
+ fc_trc_flag = (struct fc_trace_flag_type *)
+ vmalloc(sizeof(struct fc_trace_flag_type));
+
+ if (fc_trc_flag) {
+ fc_trc_flag->fc_row_file = 0;
+ fc_trc_flag->fc_normal_file = 1;
+ fc_trc_flag->fnic_trace = 2;
+ fc_trc_flag->fc_trace = 3;
+ fc_trc_flag->fc_clear = 4;
+ }
+
+ rc = 0;
+ return rc;
+}
+
+/*
+ * fnic_debugfs_terminate - Tear down debugfs infrastructure
+ *
+ * Description:
+ * When Debugfs is configured this routine removes debugfs file system
+ * elements that are specific to fnic.
+ */
+void fnic_debugfs_terminate(void)
+{
+ debugfs_remove(fnic_stats_debugfs_root);
+ fnic_stats_debugfs_root = NULL;
+
+ debugfs_remove(fnic_trace_debugfs_root);
+ fnic_trace_debugfs_root = NULL;
+
+ if (fc_trc_flag)
+ vfree(fc_trc_flag);
+}
+
+/*
+ * fnic_trace_ctrl_open - Open the trace_enable file for fnic_trace
+ * Or Open fc_trace_enable file for fc_trace
+ * @inode: The inode pointer.
+ * @file: The file pointer to attach the trace enable/disable flag.
+ *
+ * Description:
+ * This routine opens a debugsfs file trace_enable or fc_trace_enable.
+ *
+ * Returns:
+ * This function returns zero if successful.
+ */
+static int fnic_trace_ctrl_open(struct inode *inode, struct file *filp)
+{
+ filp->private_data = inode->i_private;
+ return 0;
+}
+
+/*
+ * fnic_trace_ctrl_read -
+ * Read trace_enable ,fc_trace_enable
+ * or fc_trace_clear debugfs file
+ * @filp: The file pointer to read from.
+ * @ubuf: The buffer to copy the data to.
+ * @cnt: The number of bytes to read.
+ * @ppos: The position in the file to start reading from.
+ *
+ * Description:
+ * This routine reads value of variable fnic_tracing_enabled or
+ * fnic_fc_tracing_enabled or fnic_fc_trace_cleared
+ * and stores into local @buf.
+ * It will start reading file at @ppos and
+ * copy up to @cnt of data to @ubuf from @buf.
+ *
+ * Returns:
+ * This function returns the amount of data that was read.
+ */
+static ssize_t fnic_trace_ctrl_read(struct file *filp,
+ char __user *ubuf,
+ size_t cnt, loff_t *ppos)
+{
+ char buf[64];
+ int len;
+ u8 *trace_type;
+ len = 0;
+ trace_type = (u8 *)filp->private_data;
+ if (*trace_type == fc_trc_flag->fnic_trace)
+ len = sprintf(buf, "%u\n", fnic_tracing_enabled);
+ else if (*trace_type == fc_trc_flag->fc_trace)
+ len = sprintf(buf, "%u\n", fnic_fc_tracing_enabled);
+ else if (*trace_type == fc_trc_flag->fc_clear)
+ len = sprintf(buf, "%u\n", fnic_fc_trace_cleared);
+ else
+ pr_err("fnic: Cannot read to any debugfs file\n");
+
+ return simple_read_from_buffer(ubuf, cnt, ppos, buf, len);
+}
+
+/*
+ * fnic_trace_ctrl_write -
+ * Write to trace_enable, fc_trace_enable or
+ * fc_trace_clear debugfs file
+ * @filp: The file pointer to write from.
+ * @ubuf: The buffer to copy the data from.
+ * @cnt: The number of bytes to write.
+ * @ppos: The position in the file to start writing to.
+ *
+ * Description:
+ * This routine writes data from user buffer @ubuf to buffer @buf and
+ * sets fc_trace_enable ,tracing_enable or fnic_fc_trace_cleared
+ * value as per user input.
+ *
+ * Returns:
+ * This function returns the amount of data that was written.
+ */
+static ssize_t fnic_trace_ctrl_write(struct file *filp,
+ const char __user *ubuf,
+ size_t cnt, loff_t *ppos)
+{
+ char buf[64];
+ unsigned long val;
+ int ret;
+ u8 *trace_type;
+ trace_type = (u8 *)filp->private_data;
+
+ if (cnt >= sizeof(buf))
+ return -EINVAL;
+
+ if (copy_from_user(&buf, ubuf, cnt))
+ return -EFAULT;
+
+ buf[cnt] = 0;
+
+ ret = kstrtoul(buf, 10, &val);
+ if (ret < 0)
+ return ret;
+
+ if (*trace_type == fc_trc_flag->fnic_trace)
+ fnic_tracing_enabled = val;
+ else if (*trace_type == fc_trc_flag->fc_trace)
+ fnic_fc_tracing_enabled = val;
+ else if (*trace_type == fc_trc_flag->fc_clear)
+ fnic_fc_trace_cleared = val;
+ else
+ pr_err("fnic: cannot write to any debugfs file\n");
+
+ (*ppos)++;
+
+ return cnt;
+}
+
+static const struct file_operations fnic_trace_ctrl_fops = {
+ .owner = THIS_MODULE,
+ .open = fnic_trace_ctrl_open,
+ .read = fnic_trace_ctrl_read,
+ .write = fnic_trace_ctrl_write,
+};
+
+/*
+ * fnic_trace_debugfs_open - Open the fnic trace log
+ * @inode: The inode pointer
+ * @file: The file pointer to attach the log output
+ *
+ * Description:
+ * This routine is the entry point for the debugfs open file operation.
+ * It allocates the necessary buffer for the log, fills the buffer from
+ * the in-memory log and then returns a pointer to that log in
+ * the private_data field in @file.
+ *
+ * Returns:
+ * This function returns zero if successful. On error it will return
+ * a negative error value.
+ */
+static int fnic_trace_debugfs_open(struct inode *inode,
+ struct file *file)
+{
+ fnic_dbgfs_t *fnic_dbg_prt;
+ u8 *rdata_ptr;
+ rdata_ptr = (u8 *)inode->i_private;
+ fnic_dbg_prt = kzalloc(sizeof(fnic_dbgfs_t), GFP_KERNEL);
+ if (!fnic_dbg_prt)
+ return -ENOMEM;
+
+ if (*rdata_ptr == fc_trc_flag->fnic_trace) {
+ fnic_dbg_prt->buffer = vmalloc(3 *
+ (trace_max_pages * PAGE_SIZE));
+ if (!fnic_dbg_prt->buffer) {
+ kfree(fnic_dbg_prt);
+ return -ENOMEM;
+ }
+ memset((void *)fnic_dbg_prt->buffer, 0,
+ 3 * (trace_max_pages * PAGE_SIZE));
+ fnic_dbg_prt->buffer_len = fnic_get_trace_data(fnic_dbg_prt);
+ } else {
+ fnic_dbg_prt->buffer =
+ vmalloc(3 * (fnic_fc_trace_max_pages * PAGE_SIZE));
+ if (!fnic_dbg_prt->buffer) {
+ kfree(fnic_dbg_prt);
+ return -ENOMEM;
+ }
+ memset((void *)fnic_dbg_prt->buffer, 0,
+ 3 * (fnic_fc_trace_max_pages * PAGE_SIZE));
+ fnic_dbg_prt->buffer_len =
+ fnic_fc_trace_get_data(fnic_dbg_prt, *rdata_ptr);
+ }
+ file->private_data = fnic_dbg_prt;
+
+ return 0;
+}
+
+/*
+ * fnic_trace_debugfs_lseek - Seek through a debugfs file
+ * @file: The file pointer to seek through.
+ * @offset: The offset to seek to or the amount to seek by.
+ * @howto: Indicates how to seek.
+ *
+ * Description:
+ * This routine is the entry point for the debugfs lseek file operation.
+ * The @howto parameter indicates whether @offset is the offset to directly
+ * seek to, or if it is a value to seek forward or reverse by. This function
+ * figures out what the new offset of the debugfs file will be and assigns
+ * that value to the f_pos field of @file.
+ *
+ * Returns:
+ * This function returns the new offset if successful and returns a negative
+ * error if unable to process the seek.
+ */
+static loff_t fnic_trace_debugfs_lseek(struct file *file,
+ loff_t offset,
+ int howto)
+{
+ fnic_dbgfs_t *fnic_dbg_prt = file->private_data;
+ return fixed_size_llseek(file, offset, howto,
+ fnic_dbg_prt->buffer_len);
+}
+
+/*
+ * fnic_trace_debugfs_read - Read a debugfs file
+ * @file: The file pointer to read from.
+ * @ubuf: The buffer to copy the data to.
+ * @nbytes: The number of bytes to read.
+ * @pos: The position in the file to start reading from.
+ *
+ * Description:
+ * This routine reads data from the buffer indicated in the private_data
+ * field of @file. It will start reading at @pos and copy up to @nbytes of
+ * data to @ubuf.
+ *
+ * Returns:
+ * This function returns the amount of data that was read (this could be
+ * less than @nbytes if the end of the file was reached).
+ */
+static ssize_t fnic_trace_debugfs_read(struct file *file,
+ char __user *ubuf,
+ size_t nbytes,
+ loff_t *pos)
+{
+ fnic_dbgfs_t *fnic_dbg_prt = file->private_data;
+ int rc = 0;
+ rc = simple_read_from_buffer(ubuf, nbytes, pos,
+ fnic_dbg_prt->buffer,
+ fnic_dbg_prt->buffer_len);
+ return rc;
+}
+
+/*
+ * fnic_trace_debugfs_release - Release the buffer used to store
+ * debugfs file data
+ * @inode: The inode pointer
+ * @file: The file pointer that contains the buffer to release
+ *
+ * Description:
+ * This routine frees the buffer that was allocated when the debugfs
+ * file was opened.
+ *
+ * Returns:
+ * This function returns zero.
+ */
+static int fnic_trace_debugfs_release(struct inode *inode,
+ struct file *file)
+{
+ fnic_dbgfs_t *fnic_dbg_prt = file->private_data;
+
+ vfree(fnic_dbg_prt->buffer);
+ kfree(fnic_dbg_prt);
+ return 0;
+}
+
+static const struct file_operations fnic_trace_debugfs_fops = {
+ .owner = THIS_MODULE,
+ .open = fnic_trace_debugfs_open,
+ .llseek = fnic_trace_debugfs_lseek,
+ .read = fnic_trace_debugfs_read,
+ .release = fnic_trace_debugfs_release,
+};
+
+/*
+ * fnic_trace_debugfs_init - Initialize debugfs for fnic trace logging
+ *
+ * Description:
+ * When Debugfs is configured this routine sets up the fnic debugfs
+ * file system. If not already created, this routine will create the
+ * create file trace to log fnic trace buffer output into debugfs and
+ * it will also create file trace_enable to control enable/disable of
+ * trace logging into trace buffer.
+ */
+int fnic_trace_debugfs_init(void)
+{
+ int rc = -1;
+ if (!fnic_trace_debugfs_root) {
+ printk(KERN_DEBUG
+ "FNIC Debugfs root directory doesn't exist\n");
+ return rc;
+ }
+ fnic_trace_enable = debugfs_create_file("tracing_enable",
+ S_IFREG|S_IRUGO|S_IWUSR,
+ fnic_trace_debugfs_root,
+ &(fc_trc_flag->fnic_trace),
+ &fnic_trace_ctrl_fops);
+
+ if (!fnic_trace_enable) {
+ printk(KERN_DEBUG
+ "Cannot create trace_enable file under debugfs\n");
+ return rc;
+ }
+
+ fnic_trace_debugfs_file = debugfs_create_file("trace",
+ S_IFREG|S_IRUGO|S_IWUSR,
+ fnic_trace_debugfs_root,
+ &(fc_trc_flag->fnic_trace),
+ &fnic_trace_debugfs_fops);
+
+ if (!fnic_trace_debugfs_file) {
+ printk(KERN_DEBUG
+ "Cannot create trace file under debugfs\n");
+ return rc;
+ }
+ rc = 0;
+ return rc;
+}
+
+/*
+ * fnic_trace_debugfs_terminate - Tear down debugfs infrastructure
+ *
+ * Description:
+ * When Debugfs is configured this routine removes debugfs file system
+ * elements that are specific to fnic trace logging.
+ */
+void fnic_trace_debugfs_terminate(void)
+{
+ debugfs_remove(fnic_trace_debugfs_file);
+ fnic_trace_debugfs_file = NULL;
+
+ debugfs_remove(fnic_trace_enable);
+ fnic_trace_enable = NULL;
+}
+
+/*
+ * fnic_fc_trace_debugfs_init -
+ * Initialize debugfs for fnic control frame trace logging
+ *
+ * Description:
+ * When Debugfs is configured this routine sets up the fnic_fc debugfs
+ * file system. If not already created, this routine will create the
+ * create file trace to log fnic fc trace buffer output into debugfs and
+ * it will also create file fc_trace_enable to control enable/disable of
+ * trace logging into trace buffer.
+ */
+
+int fnic_fc_trace_debugfs_init(void)
+{
+ int rc = -1;
+
+ if (!fnic_trace_debugfs_root) {
+ pr_err("fnic:Debugfs root directory doesn't exist\n");
+ return rc;
+ }
+
+ fnic_fc_trace_enable = debugfs_create_file("fc_trace_enable",
+ S_IFREG|S_IRUGO|S_IWUSR,
+ fnic_trace_debugfs_root,
+ &(fc_trc_flag->fc_trace),
+ &fnic_trace_ctrl_fops);
+
+ if (!fnic_fc_trace_enable) {
+ pr_err("fnic: Failed create fc_trace_enable file\n");
+ return rc;
+ }
+
+ fnic_fc_trace_clear = debugfs_create_file("fc_trace_clear",
+ S_IFREG|S_IRUGO|S_IWUSR,
+ fnic_trace_debugfs_root,
+ &(fc_trc_flag->fc_clear),
+ &fnic_trace_ctrl_fops);
+
+ if (!fnic_fc_trace_clear) {
+ pr_err("fnic: Failed to create fc_trace_enable file\n");
+ return rc;
+ }
+
+ fnic_fc_rdata_trace_debugfs_file =
+ debugfs_create_file("fc_trace_rdata",
+ S_IFREG|S_IRUGO|S_IWUSR,
+ fnic_trace_debugfs_root,
+ &(fc_trc_flag->fc_normal_file),
+ &fnic_trace_debugfs_fops);
+
+ if (!fnic_fc_rdata_trace_debugfs_file) {
+ pr_err("fnic: Failed create fc_rdata_trace file\n");
+ return rc;
+ }
+
+ fnic_fc_trace_debugfs_file =
+ debugfs_create_file("fc_trace",
+ S_IFREG|S_IRUGO|S_IWUSR,
+ fnic_trace_debugfs_root,
+ &(fc_trc_flag->fc_row_file),
+ &fnic_trace_debugfs_fops);
+
+ if (!fnic_fc_trace_debugfs_file) {
+ pr_err("fnic: Failed to create fc_trace file\n");
+ return rc;
+ }
+ rc = 0;
+ return rc;
+}
+
+/*
+ * fnic_fc_trace_debugfs_terminate - Tear down debugfs infrastructure
+ *
+ * Description:
+ * When Debugfs is configured this routine removes debugfs file system
+ * elements that are specific to fnic_fc trace logging.
+ */
+
+void fnic_fc_trace_debugfs_terminate(void)
+{
+ debugfs_remove(fnic_fc_trace_debugfs_file);
+ fnic_fc_trace_debugfs_file = NULL;
+
+ debugfs_remove(fnic_fc_rdata_trace_debugfs_file);
+ fnic_fc_rdata_trace_debugfs_file = NULL;
+
+ debugfs_remove(fnic_fc_trace_enable);
+ fnic_fc_trace_enable = NULL;
+
+ debugfs_remove(fnic_fc_trace_clear);
+ fnic_fc_trace_clear = NULL;
+}
+
+/*
+ * fnic_reset_stats_open - Open the reset_stats file
+ * @inode: The inode pointer.
+ * @file: The file pointer to attach the stats reset flag.
+ *
+ * Description:
+ * This routine opens a debugsfs file reset_stats and stores i_private data
+ * to debug structure to retrieve later for while performing other
+ * file oprations.
+ *
+ * Returns:
+ * This function returns zero if successful.
+ */
+static int fnic_reset_stats_open(struct inode *inode, struct file *file)
+{
+ struct stats_debug_info *debug;
+
+ debug = kzalloc(sizeof(struct stats_debug_info), GFP_KERNEL);
+ if (!debug)
+ return -ENOMEM;
+
+ debug->i_private = inode->i_private;
+
+ file->private_data = debug;
+
+ return 0;
+}
+
+/*
+ * fnic_reset_stats_read - Read a reset_stats debugfs file
+ * @filp: The file pointer to read from.
+ * @ubuf: The buffer to copy the data to.
+ * @cnt: The number of bytes to read.
+ * @ppos: The position in the file to start reading from.
+ *
+ * Description:
+ * This routine reads value of variable reset_stats
+ * and stores into local @buf. It will start reading file at @ppos and
+ * copy up to @cnt of data to @ubuf from @buf.
+ *
+ * Returns:
+ * This function returns the amount of data that was read.
+ */
+static ssize_t fnic_reset_stats_read(struct file *file,
+ char __user *ubuf,
+ size_t cnt, loff_t *ppos)
+{
+ struct stats_debug_info *debug = file->private_data;
+ struct fnic *fnic = (struct fnic *)debug->i_private;
+ char buf[64];
+ int len;
+
+ len = sprintf(buf, "%u\n", fnic->reset_stats);
+
+ return simple_read_from_buffer(ubuf, cnt, ppos, buf, len);
+}
+
+/*
+ * fnic_reset_stats_write - Write to reset_stats debugfs file
+ * @filp: The file pointer to write from.
+ * @ubuf: The buffer to copy the data from.
+ * @cnt: The number of bytes to write.
+ * @ppos: The position in the file to start writing to.
+ *
+ * Description:
+ * This routine writes data from user buffer @ubuf to buffer @buf and
+ * resets cumulative stats of fnic.
+ *
+ * Returns:
+ * This function returns the amount of data that was written.
+ */
+static ssize_t fnic_reset_stats_write(struct file *file,
+ const char __user *ubuf,
+ size_t cnt, loff_t *ppos)
+{
+ struct stats_debug_info *debug = file->private_data;
+ struct fnic *fnic = (struct fnic *)debug->i_private;
+ struct fnic_stats *stats = &fnic->fnic_stats;
+ u64 *io_stats_p = (u64 *)&stats->io_stats;
+ u64 *fw_stats_p = (u64 *)&stats->fw_stats;
+ char buf[64];
+ unsigned long val;
+ int ret;
+
+ if (cnt >= sizeof(buf))
+ return -EINVAL;
+
+ if (copy_from_user(&buf, ubuf, cnt))
+ return -EFAULT;
+
+ buf[cnt] = 0;
+
+ ret = kstrtoul(buf, 10, &val);
+ if (ret < 0)
+ return ret;
+
+ fnic->reset_stats = val;
+
+ if (fnic->reset_stats) {
+ /* Skip variable is used to avoid descrepancies to Num IOs
+ * and IO Completions stats. Skip incrementing No IO Compls
+ * for pending active IOs after reset stats
+ */
+ atomic64_set(&fnic->io_cmpl_skip,
+ atomic64_read(&stats->io_stats.active_ios));
+ memset(&stats->abts_stats, 0, sizeof(struct abort_stats));
+ memset(&stats->term_stats, 0,
+ sizeof(struct terminate_stats));
+ memset(&stats->reset_stats, 0, sizeof(struct reset_stats));
+ memset(&stats->misc_stats, 0, sizeof(struct misc_stats));
+ memset(&stats->vlan_stats, 0, sizeof(struct vlan_stats));
+ memset(io_stats_p+1, 0,
+ sizeof(struct io_path_stats) - sizeof(u64));
+ memset(fw_stats_p+1, 0,
+ sizeof(struct fw_stats) - sizeof(u64));
+ }
+
+ (*ppos)++;
+ return cnt;
+}
+
+/*
+ * fnic_reset_stats_release - Release the buffer used to store
+ * debugfs file data
+ * @inode: The inode pointer
+ * @file: The file pointer that contains the buffer to release
+ *
+ * Description:
+ * This routine frees the buffer that was allocated when the debugfs
+ * file was opened.
+ *
+ * Returns:
+ * This function returns zero.
+ */
+static int fnic_reset_stats_release(struct inode *inode,
+ struct file *file)
+{
+ struct stats_debug_info *debug = file->private_data;
+ kfree(debug);
+ return 0;
+}
+
+/*
+ * fnic_stats_debugfs_open - Open the stats file for specific host
+ * and get fnic stats.
+ * @inode: The inode pointer.
+ * @file: The file pointer to attach the specific host statistics.
+ *
+ * Description:
+ * This routine opens a debugsfs file stats of specific host and print
+ * fnic stats.
+ *
+ * Returns:
+ * This function returns zero if successful.
+ */
+static int fnic_stats_debugfs_open(struct inode *inode,
+ struct file *file)
+{
+ struct fnic *fnic = inode->i_private;
+ struct fnic_stats *fnic_stats = &fnic->fnic_stats;
+ struct stats_debug_info *debug;
+ int buf_size = 2 * PAGE_SIZE;
+
+ debug = kzalloc(sizeof(struct stats_debug_info), GFP_KERNEL);
+ if (!debug)
+ return -ENOMEM;
+
+ debug->debug_buffer = vmalloc(buf_size);
+ if (!debug->debug_buffer) {
+ kfree(debug);
+ return -ENOMEM;
+ }
+
+ debug->buf_size = buf_size;
+ memset((void *)debug->debug_buffer, 0, buf_size);
+ debug->buffer_len = fnic_get_stats_data(debug, fnic_stats);
+
+ file->private_data = debug;
+
+ return 0;
+}
+
+/*
+ * fnic_stats_debugfs_read - Read a debugfs file
+ * @file: The file pointer to read from.
+ * @ubuf: The buffer to copy the data to.
+ * @nbytes: The number of bytes to read.
+ * @pos: The position in the file to start reading from.
+ *
+ * Description:
+ * This routine reads data from the buffer indicated in the private_data
+ * field of @file. It will start reading at @pos and copy up to @nbytes of
+ * data to @ubuf.
+ *
+ * Returns:
+ * This function returns the amount of data that was read (this could be
+ * less than @nbytes if the end of the file was reached).
+ */
+static ssize_t fnic_stats_debugfs_read(struct file *file,
+ char __user *ubuf,
+ size_t nbytes,
+ loff_t *pos)
+{
+ struct stats_debug_info *debug = file->private_data;
+ int rc = 0;
+ rc = simple_read_from_buffer(ubuf, nbytes, pos,
+ debug->debug_buffer,
+ debug->buffer_len);
+ return rc;
+}
+
+/*
+ * fnic_stats_stats_release - Release the buffer used to store
+ * debugfs file data
+ * @inode: The inode pointer
+ * @file: The file pointer that contains the buffer to release
+ *
+ * Description:
+ * This routine frees the buffer that was allocated when the debugfs
+ * file was opened.
+ *
+ * Returns:
+ * This function returns zero.
+ */
+static int fnic_stats_debugfs_release(struct inode *inode,
+ struct file *file)
+{
+ struct stats_debug_info *debug = file->private_data;
+ vfree(debug->debug_buffer);
+ kfree(debug);
+ return 0;
+}
+
+static const struct file_operations fnic_stats_debugfs_fops = {
+ .owner = THIS_MODULE,
+ .open = fnic_stats_debugfs_open,
+ .read = fnic_stats_debugfs_read,
+ .release = fnic_stats_debugfs_release,
+};
+
+static const struct file_operations fnic_reset_debugfs_fops = {
+ .owner = THIS_MODULE,
+ .open = fnic_reset_stats_open,
+ .read = fnic_reset_stats_read,
+ .write = fnic_reset_stats_write,
+ .release = fnic_reset_stats_release,
+};
+
+/*
+ * fnic_stats_init - Initialize stats struct and create stats file per fnic
+ *
+ * Description:
+ * When Debugfs is configured this routine sets up the stats file per fnic
+ * It will create file stats and reset_stats under statistics/host# directory
+ * to log per fnic stats.
+ */
+int fnic_stats_debugfs_init(struct fnic *fnic)
+{
+ int rc = -1;
+ char name[16];
+
+ snprintf(name, sizeof(name), "host%d", fnic->lport->host->host_no);
+
+ if (!fnic_stats_debugfs_root) {
+ printk(KERN_DEBUG "fnic_stats root doesn't exist\n");
+ return rc;
+ }
+ fnic->fnic_stats_debugfs_host = debugfs_create_dir(name,
+ fnic_stats_debugfs_root);
+ if (!fnic->fnic_stats_debugfs_host) {
+ printk(KERN_DEBUG "Cannot create host directory\n");
+ return rc;
+ }
+
+ fnic->fnic_stats_debugfs_file = debugfs_create_file("stats",
+ S_IFREG|S_IRUGO|S_IWUSR,
+ fnic->fnic_stats_debugfs_host,
+ fnic,
+ &fnic_stats_debugfs_fops);
+ if (!fnic->fnic_stats_debugfs_file) {
+ printk(KERN_DEBUG "Cannot create host stats file\n");
+ return rc;
+ }
+
+ fnic->fnic_reset_debugfs_file = debugfs_create_file("reset_stats",
+ S_IFREG|S_IRUGO|S_IWUSR,
+ fnic->fnic_stats_debugfs_host,
+ fnic,
+ &fnic_reset_debugfs_fops);
+ if (!fnic->fnic_reset_debugfs_file) {
+ printk(KERN_DEBUG "Cannot create host stats file\n");
+ return rc;
+ }
+ rc = 0;
+ return rc;
+}
+
+/*
+ * fnic_stats_debugfs_remove - Tear down debugfs infrastructure of stats
+ *
+ * Description:
+ * When Debugfs is configured this routine removes debugfs file system
+ * elements that are specific to fnic stats.
+ */
+void fnic_stats_debugfs_remove(struct fnic *fnic)
+{
+ if (!fnic)
+ return;
+
+ debugfs_remove(fnic->fnic_stats_debugfs_file);
+ fnic->fnic_stats_debugfs_file = NULL;
+
+ debugfs_remove(fnic->fnic_reset_debugfs_file);
+ fnic->fnic_reset_debugfs_file = NULL;
+
+ debugfs_remove(fnic->fnic_stats_debugfs_host);
+ fnic->fnic_stats_debugfs_host = NULL;
+}
diff --git a/drivers/scsi/fnic/fnic_fcs.c b/drivers/scsi/fnic/fnic_fcs.c
new file mode 100644
index 000000000..bf0bbd42e
--- /dev/null
+++ b/drivers/scsi/fnic/fnic_fcs.c
@@ -0,0 +1,1347 @@
+/*
+ * Copyright 2008 Cisco Systems, Inc. All rights reserved.
+ * Copyright 2007 Nuova Systems, Inc. All rights reserved.
+ *
+ * This program is free software; you may redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#include <linux/errno.h>
+#include <linux/pci.h>
+#include <linux/slab.h>
+#include <linux/skbuff.h>
+#include <linux/interrupt.h>
+#include <linux/spinlock.h>
+#include <linux/if_ether.h>
+#include <linux/if_vlan.h>
+#include <linux/workqueue.h>
+#include <scsi/fc/fc_fip.h>
+#include <scsi/fc/fc_els.h>
+#include <scsi/fc/fc_fcoe.h>
+#include <scsi/fc_frame.h>
+#include <scsi/libfc.h>
+#include "fnic_io.h"
+#include "fnic.h"
+#include "fnic_fip.h"
+#include "cq_enet_desc.h"
+#include "cq_exch_desc.h"
+
+static u8 fcoe_all_fcfs[ETH_ALEN] = FIP_ALL_FCF_MACS;
+struct workqueue_struct *fnic_fip_queue;
+struct workqueue_struct *fnic_event_queue;
+
+static void fnic_set_eth_mode(struct fnic *);
+static void fnic_fcoe_send_vlan_req(struct fnic *fnic);
+static void fnic_fcoe_start_fcf_disc(struct fnic *fnic);
+static void fnic_fcoe_process_vlan_resp(struct fnic *fnic, struct sk_buff *);
+static int fnic_fcoe_vlan_check(struct fnic *fnic, u16 flag);
+static int fnic_fcoe_handle_fip_frame(struct fnic *fnic, struct sk_buff *skb);
+
+void fnic_handle_link(struct work_struct *work)
+{
+ struct fnic *fnic = container_of(work, struct fnic, link_work);
+ unsigned long flags;
+ int old_link_status;
+ u32 old_link_down_cnt;
+
+ spin_lock_irqsave(&fnic->fnic_lock, flags);
+
+ if (fnic->stop_rx_link_events) {
+ spin_unlock_irqrestore(&fnic->fnic_lock, flags);
+ return;
+ }
+
+ old_link_down_cnt = fnic->link_down_cnt;
+ old_link_status = fnic->link_status;
+ fnic->link_status = vnic_dev_link_status(fnic->vdev);
+ fnic->link_down_cnt = vnic_dev_link_down_cnt(fnic->vdev);
+
+ if (old_link_status == fnic->link_status) {
+ if (!fnic->link_status) {
+ /* DOWN -> DOWN */
+ spin_unlock_irqrestore(&fnic->fnic_lock, flags);
+ fnic_fc_trace_set_data(fnic->lport->host->host_no,
+ FNIC_FC_LE, "Link Status: DOWN->DOWN",
+ strlen("Link Status: DOWN->DOWN"));
+ } else {
+ if (old_link_down_cnt != fnic->link_down_cnt) {
+ /* UP -> DOWN -> UP */
+ fnic->lport->host_stats.link_failure_count++;
+ spin_unlock_irqrestore(&fnic->fnic_lock, flags);
+ fnic_fc_trace_set_data(
+ fnic->lport->host->host_no,
+ FNIC_FC_LE,
+ "Link Status:UP_DOWN_UP",
+ strlen("Link_Status:UP_DOWN_UP")
+ );
+ FNIC_FCS_DBG(KERN_DEBUG, fnic->lport->host,
+ "link down\n");
+ fcoe_ctlr_link_down(&fnic->ctlr);
+ if (fnic->config.flags & VFCF_FIP_CAPABLE) {
+ /* start FCoE VLAN discovery */
+ fnic_fc_trace_set_data(
+ fnic->lport->host->host_no,
+ FNIC_FC_LE,
+ "Link Status: UP_DOWN_UP_VLAN",
+ strlen(
+ "Link Status: UP_DOWN_UP_VLAN")
+ );
+ fnic_fcoe_send_vlan_req(fnic);
+ return;
+ }
+ FNIC_FCS_DBG(KERN_DEBUG, fnic->lport->host,
+ "link up\n");
+ fcoe_ctlr_link_up(&fnic->ctlr);
+ } else {
+ /* UP -> UP */
+ spin_unlock_irqrestore(&fnic->fnic_lock, flags);
+ fnic_fc_trace_set_data(
+ fnic->lport->host->host_no, FNIC_FC_LE,
+ "Link Status: UP_UP",
+ strlen("Link Status: UP_UP"));
+ }
+ }
+ } else if (fnic->link_status) {
+ /* DOWN -> UP */
+ spin_unlock_irqrestore(&fnic->fnic_lock, flags);
+ if (fnic->config.flags & VFCF_FIP_CAPABLE) {
+ /* start FCoE VLAN discovery */
+ fnic_fc_trace_set_data(
+ fnic->lport->host->host_no,
+ FNIC_FC_LE, "Link Status: DOWN_UP_VLAN",
+ strlen("Link Status: DOWN_UP_VLAN"));
+ fnic_fcoe_send_vlan_req(fnic);
+ return;
+ }
+ FNIC_FCS_DBG(KERN_DEBUG, fnic->lport->host, "link up\n");
+ fnic_fc_trace_set_data(fnic->lport->host->host_no, FNIC_FC_LE,
+ "Link Status: DOWN_UP", strlen("Link Status: DOWN_UP"));
+ fcoe_ctlr_link_up(&fnic->ctlr);
+ } else {
+ /* UP -> DOWN */
+ fnic->lport->host_stats.link_failure_count++;
+ spin_unlock_irqrestore(&fnic->fnic_lock, flags);
+ FNIC_FCS_DBG(KERN_DEBUG, fnic->lport->host, "link down\n");
+ fnic_fc_trace_set_data(
+ fnic->lport->host->host_no, FNIC_FC_LE,
+ "Link Status: UP_DOWN",
+ strlen("Link Status: UP_DOWN"));
+ if (fnic->config.flags & VFCF_FIP_CAPABLE) {
+ FNIC_FCS_DBG(KERN_DEBUG, fnic->lport->host,
+ "deleting fip-timer during link-down\n");
+ del_timer_sync(&fnic->fip_timer);
+ }
+ fcoe_ctlr_link_down(&fnic->ctlr);
+ }
+
+}
+
+/*
+ * This function passes incoming fabric frames to libFC
+ */
+void fnic_handle_frame(struct work_struct *work)
+{
+ struct fnic *fnic = container_of(work, struct fnic, frame_work);
+ struct fc_lport *lp = fnic->lport;
+ unsigned long flags;
+ struct sk_buff *skb;
+ struct fc_frame *fp;
+
+ while ((skb = skb_dequeue(&fnic->frame_queue))) {
+
+ spin_lock_irqsave(&fnic->fnic_lock, flags);
+ if (fnic->stop_rx_link_events) {
+ spin_unlock_irqrestore(&fnic->fnic_lock, flags);
+ dev_kfree_skb(skb);
+ return;
+ }
+ fp = (struct fc_frame *)skb;
+
+ /*
+ * If we're in a transitional state, just re-queue and return.
+ * The queue will be serviced when we get to a stable state.
+ */
+ if (fnic->state != FNIC_IN_FC_MODE &&
+ fnic->state != FNIC_IN_ETH_MODE) {
+ skb_queue_head(&fnic->frame_queue, skb);
+ spin_unlock_irqrestore(&fnic->fnic_lock, flags);
+ return;
+ }
+ spin_unlock_irqrestore(&fnic->fnic_lock, flags);
+
+ fc_exch_recv(lp, fp);
+ }
+}
+
+void fnic_fcoe_evlist_free(struct fnic *fnic)
+{
+ struct fnic_event *fevt = NULL;
+ struct fnic_event *next = NULL;
+ unsigned long flags;
+
+ spin_lock_irqsave(&fnic->fnic_lock, flags);
+ if (list_empty(&fnic->evlist)) {
+ spin_unlock_irqrestore(&fnic->fnic_lock, flags);
+ return;
+ }
+
+ list_for_each_entry_safe(fevt, next, &fnic->evlist, list) {
+ list_del(&fevt->list);
+ kfree(fevt);
+ }
+ spin_unlock_irqrestore(&fnic->fnic_lock, flags);
+}
+
+void fnic_handle_event(struct work_struct *work)
+{
+ struct fnic *fnic = container_of(work, struct fnic, event_work);
+ struct fnic_event *fevt = NULL;
+ struct fnic_event *next = NULL;
+ unsigned long flags;
+
+ spin_lock_irqsave(&fnic->fnic_lock, flags);
+ if (list_empty(&fnic->evlist)) {
+ spin_unlock_irqrestore(&fnic->fnic_lock, flags);
+ return;
+ }
+
+ list_for_each_entry_safe(fevt, next, &fnic->evlist, list) {
+ if (fnic->stop_rx_link_events) {
+ list_del(&fevt->list);
+ kfree(fevt);
+ spin_unlock_irqrestore(&fnic->fnic_lock, flags);
+ return;
+ }
+ /*
+ * If we're in a transitional state, just re-queue and return.
+ * The queue will be serviced when we get to a stable state.
+ */
+ if (fnic->state != FNIC_IN_FC_MODE &&
+ fnic->state != FNIC_IN_ETH_MODE) {
+ spin_unlock_irqrestore(&fnic->fnic_lock, flags);
+ return;
+ }
+
+ list_del(&fevt->list);
+ switch (fevt->event) {
+ case FNIC_EVT_START_VLAN_DISC:
+ spin_unlock_irqrestore(&fnic->fnic_lock, flags);
+ fnic_fcoe_send_vlan_req(fnic);
+ spin_lock_irqsave(&fnic->fnic_lock, flags);
+ break;
+ case FNIC_EVT_START_FCF_DISC:
+ FNIC_FCS_DBG(KERN_DEBUG, fnic->lport->host,
+ "Start FCF Discovery\n");
+ fnic_fcoe_start_fcf_disc(fnic);
+ break;
+ default:
+ FNIC_FCS_DBG(KERN_DEBUG, fnic->lport->host,
+ "Unknown event 0x%x\n", fevt->event);
+ break;
+ }
+ kfree(fevt);
+ }
+ spin_unlock_irqrestore(&fnic->fnic_lock, flags);
+}
+
+/**
+ * Check if the Received FIP FLOGI frame is rejected
+ * @fip: The FCoE controller that received the frame
+ * @skb: The received FIP frame
+ *
+ * Returns non-zero if the frame is rejected with unsupported cmd with
+ * insufficient resource els explanation.
+ */
+static inline int is_fnic_fip_flogi_reject(struct fcoe_ctlr *fip,
+ struct sk_buff *skb)
+{
+ struct fc_lport *lport = fip->lp;
+ struct fip_header *fiph;
+ struct fc_frame_header *fh = NULL;
+ struct fip_desc *desc;
+ struct fip_encaps *els;
+ enum fip_desc_type els_dtype = 0;
+ u16 op;
+ u8 els_op;
+ u8 sub;
+
+ size_t els_len = 0;
+ size_t rlen;
+ size_t dlen = 0;
+
+ if (skb_linearize(skb))
+ return 0;
+
+ if (skb->len < sizeof(*fiph))
+ return 0;
+
+ fiph = (struct fip_header *)skb->data;
+ op = ntohs(fiph->fip_op);
+ sub = fiph->fip_subcode;
+
+ if (op != FIP_OP_LS)
+ return 0;
+
+ if (sub != FIP_SC_REP)
+ return 0;
+
+ rlen = ntohs(fiph->fip_dl_len) * 4;
+ if (rlen + sizeof(*fiph) > skb->len)
+ return 0;
+
+ desc = (struct fip_desc *)(fiph + 1);
+ dlen = desc->fip_dlen * FIP_BPW;
+
+ if (desc->fip_dtype == FIP_DT_FLOGI) {
+
+ if (dlen < sizeof(*els) + sizeof(*fh) + 1)
+ return 0;
+
+ els_len = dlen - sizeof(*els);
+ els = (struct fip_encaps *)desc;
+ fh = (struct fc_frame_header *)(els + 1);
+ els_dtype = desc->fip_dtype;
+
+ if (!fh)
+ return 0;
+
+ /*
+ * ELS command code, reason and explanation should be = Reject,
+ * unsupported command and insufficient resource
+ */
+ els_op = *(u8 *)(fh + 1);
+ if (els_op == ELS_LS_RJT) {
+ shost_printk(KERN_INFO, lport->host,
+ "Flogi Request Rejected by Switch\n");
+ return 1;
+ }
+ shost_printk(KERN_INFO, lport->host,
+ "Flogi Request Accepted by Switch\n");
+ }
+ return 0;
+}
+
+static void fnic_fcoe_send_vlan_req(struct fnic *fnic)
+{
+ struct fcoe_ctlr *fip = &fnic->ctlr;
+ struct fnic_stats *fnic_stats = &fnic->fnic_stats;
+ struct sk_buff *skb;
+ char *eth_fr;
+ int fr_len;
+ struct fip_vlan *vlan;
+ u64 vlan_tov;
+
+ fnic_fcoe_reset_vlans(fnic);
+ fnic->set_vlan(fnic, 0);
+ FNIC_FCS_DBG(KERN_INFO, fnic->lport->host,
+ "Sending VLAN request...\n");
+ skb = dev_alloc_skb(sizeof(struct fip_vlan));
+ if (!skb)
+ return;
+
+ fr_len = sizeof(*vlan);
+ eth_fr = (char *)skb->data;
+ vlan = (struct fip_vlan *)eth_fr;
+
+ memset(vlan, 0, sizeof(*vlan));
+ memcpy(vlan->eth.h_source, fip->ctl_src_addr, ETH_ALEN);
+ memcpy(vlan->eth.h_dest, fcoe_all_fcfs, ETH_ALEN);
+ vlan->eth.h_proto = htons(ETH_P_FIP);
+
+ vlan->fip.fip_ver = FIP_VER_ENCAPS(FIP_VER);
+ vlan->fip.fip_op = htons(FIP_OP_VLAN);
+ vlan->fip.fip_subcode = FIP_SC_VL_REQ;
+ vlan->fip.fip_dl_len = htons(sizeof(vlan->desc) / FIP_BPW);
+
+ vlan->desc.mac.fd_desc.fip_dtype = FIP_DT_MAC;
+ vlan->desc.mac.fd_desc.fip_dlen = sizeof(vlan->desc.mac) / FIP_BPW;
+ memcpy(&vlan->desc.mac.fd_mac, fip->ctl_src_addr, ETH_ALEN);
+
+ vlan->desc.wwnn.fd_desc.fip_dtype = FIP_DT_NAME;
+ vlan->desc.wwnn.fd_desc.fip_dlen = sizeof(vlan->desc.wwnn) / FIP_BPW;
+ put_unaligned_be64(fip->lp->wwnn, &vlan->desc.wwnn.fd_wwn);
+ atomic64_inc(&fnic_stats->vlan_stats.vlan_disc_reqs);
+
+ skb_put(skb, sizeof(*vlan));
+ skb->protocol = htons(ETH_P_FIP);
+ skb_reset_mac_header(skb);
+ skb_reset_network_header(skb);
+ fip->send(fip, skb);
+
+ /* set a timer so that we can retry if there no response */
+ vlan_tov = jiffies + msecs_to_jiffies(FCOE_CTLR_FIPVLAN_TOV);
+ mod_timer(&fnic->fip_timer, round_jiffies(vlan_tov));
+}
+
+static void fnic_fcoe_process_vlan_resp(struct fnic *fnic, struct sk_buff *skb)
+{
+ struct fcoe_ctlr *fip = &fnic->ctlr;
+ struct fip_header *fiph;
+ struct fip_desc *desc;
+ struct fnic_stats *fnic_stats = &fnic->fnic_stats;
+ u16 vid;
+ size_t rlen;
+ size_t dlen;
+ struct fcoe_vlan *vlan;
+ u64 sol_time;
+ unsigned long flags;
+
+ FNIC_FCS_DBG(KERN_INFO, fnic->lport->host,
+ "Received VLAN response...\n");
+
+ fiph = (struct fip_header *) skb->data;
+
+ FNIC_FCS_DBG(KERN_INFO, fnic->lport->host,
+ "Received VLAN response... OP 0x%x SUB_OP 0x%x\n",
+ ntohs(fiph->fip_op), fiph->fip_subcode);
+
+ rlen = ntohs(fiph->fip_dl_len) * 4;
+ fnic_fcoe_reset_vlans(fnic);
+ spin_lock_irqsave(&fnic->vlans_lock, flags);
+ desc = (struct fip_desc *)(fiph + 1);
+ while (rlen > 0) {
+ dlen = desc->fip_dlen * FIP_BPW;
+ switch (desc->fip_dtype) {
+ case FIP_DT_VLAN:
+ vid = ntohs(((struct fip_vlan_desc *)desc)->fd_vlan);
+ shost_printk(KERN_INFO, fnic->lport->host,
+ "process_vlan_resp: FIP VLAN %d\n", vid);
+ vlan = kmalloc(sizeof(*vlan),
+ GFP_ATOMIC);
+ if (!vlan) {
+ /* retry from timer */
+ spin_unlock_irqrestore(&fnic->vlans_lock,
+ flags);
+ goto out;
+ }
+ memset(vlan, 0, sizeof(struct fcoe_vlan));
+ vlan->vid = vid & 0x0fff;
+ vlan->state = FIP_VLAN_AVAIL;
+ list_add_tail(&vlan->list, &fnic->vlans);
+ break;
+ }
+ desc = (struct fip_desc *)((char *)desc + dlen);
+ rlen -= dlen;
+ }
+
+ /* any VLAN descriptors present ? */
+ if (list_empty(&fnic->vlans)) {
+ /* retry from timer */
+ atomic64_inc(&fnic_stats->vlan_stats.resp_withno_vlanID);
+ FNIC_FCS_DBG(KERN_INFO, fnic->lport->host,
+ "No VLAN descriptors in FIP VLAN response\n");
+ spin_unlock_irqrestore(&fnic->vlans_lock, flags);
+ goto out;
+ }
+
+ vlan = list_first_entry(&fnic->vlans, struct fcoe_vlan, list);
+ fnic->set_vlan(fnic, vlan->vid);
+ vlan->state = FIP_VLAN_SENT; /* sent now */
+ vlan->sol_count++;
+ spin_unlock_irqrestore(&fnic->vlans_lock, flags);
+
+ /* start the solicitation */
+ fcoe_ctlr_link_up(fip);
+
+ sol_time = jiffies + msecs_to_jiffies(FCOE_CTLR_START_DELAY);
+ mod_timer(&fnic->fip_timer, round_jiffies(sol_time));
+out:
+ return;
+}
+
+static void fnic_fcoe_start_fcf_disc(struct fnic *fnic)
+{
+ unsigned long flags;
+ struct fcoe_vlan *vlan;
+ u64 sol_time;
+
+ spin_lock_irqsave(&fnic->vlans_lock, flags);
+ vlan = list_first_entry(&fnic->vlans, struct fcoe_vlan, list);
+ fnic->set_vlan(fnic, vlan->vid);
+ vlan->state = FIP_VLAN_SENT; /* sent now */
+ vlan->sol_count = 1;
+ spin_unlock_irqrestore(&fnic->vlans_lock, flags);
+
+ /* start the solicitation */
+ fcoe_ctlr_link_up(&fnic->ctlr);
+
+ sol_time = jiffies + msecs_to_jiffies(FCOE_CTLR_START_DELAY);
+ mod_timer(&fnic->fip_timer, round_jiffies(sol_time));
+}
+
+static int fnic_fcoe_vlan_check(struct fnic *fnic, u16 flag)
+{
+ unsigned long flags;
+ struct fcoe_vlan *fvlan;
+
+ spin_lock_irqsave(&fnic->vlans_lock, flags);
+ if (list_empty(&fnic->vlans)) {
+ spin_unlock_irqrestore(&fnic->vlans_lock, flags);
+ return -EINVAL;
+ }
+
+ fvlan = list_first_entry(&fnic->vlans, struct fcoe_vlan, list);
+ if (fvlan->state == FIP_VLAN_USED) {
+ spin_unlock_irqrestore(&fnic->vlans_lock, flags);
+ return 0;
+ }
+
+ if (fvlan->state == FIP_VLAN_SENT) {
+ fvlan->state = FIP_VLAN_USED;
+ spin_unlock_irqrestore(&fnic->vlans_lock, flags);
+ return 0;
+ }
+ spin_unlock_irqrestore(&fnic->vlans_lock, flags);
+ return -EINVAL;
+}
+
+static void fnic_event_enq(struct fnic *fnic, enum fnic_evt ev)
+{
+ struct fnic_event *fevt;
+ unsigned long flags;
+
+ fevt = kmalloc(sizeof(*fevt), GFP_ATOMIC);
+ if (!fevt)
+ return;
+
+ fevt->fnic = fnic;
+ fevt->event = ev;
+
+ spin_lock_irqsave(&fnic->fnic_lock, flags);
+ list_add_tail(&fevt->list, &fnic->evlist);
+ spin_unlock_irqrestore(&fnic->fnic_lock, flags);
+
+ schedule_work(&fnic->event_work);
+}
+
+static int fnic_fcoe_handle_fip_frame(struct fnic *fnic, struct sk_buff *skb)
+{
+ struct fip_header *fiph;
+ int ret = 1;
+ u16 op;
+ u8 sub;
+
+ if (!skb || !(skb->data))
+ return -1;
+
+ if (skb_linearize(skb))
+ goto drop;
+
+ fiph = (struct fip_header *)skb->data;
+ op = ntohs(fiph->fip_op);
+ sub = fiph->fip_subcode;
+
+ if (FIP_VER_DECAPS(fiph->fip_ver) != FIP_VER)
+ goto drop;
+
+ if (ntohs(fiph->fip_dl_len) * FIP_BPW + sizeof(*fiph) > skb->len)
+ goto drop;
+
+ if (op == FIP_OP_DISC && sub == FIP_SC_ADV) {
+ if (fnic_fcoe_vlan_check(fnic, ntohs(fiph->fip_flags)))
+ goto drop;
+ /* pass it on to fcoe */
+ ret = 1;
+ } else if (op == FIP_OP_VLAN && sub == FIP_SC_VL_REP) {
+ /* set the vlan as used */
+ fnic_fcoe_process_vlan_resp(fnic, skb);
+ ret = 0;
+ } else if (op == FIP_OP_CTRL && sub == FIP_SC_CLR_VLINK) {
+ /* received CVL request, restart vlan disc */
+ fnic_event_enq(fnic, FNIC_EVT_START_VLAN_DISC);
+ /* pass it on to fcoe */
+ ret = 1;
+ }
+drop:
+ return ret;
+}
+
+void fnic_handle_fip_frame(struct work_struct *work)
+{
+ struct fnic *fnic = container_of(work, struct fnic, fip_frame_work);
+ struct fnic_stats *fnic_stats = &fnic->fnic_stats;
+ unsigned long flags;
+ struct sk_buff *skb;
+ struct ethhdr *eh;
+
+ while ((skb = skb_dequeue(&fnic->fip_frame_queue))) {
+ spin_lock_irqsave(&fnic->fnic_lock, flags);
+ if (fnic->stop_rx_link_events) {
+ spin_unlock_irqrestore(&fnic->fnic_lock, flags);
+ dev_kfree_skb(skb);
+ return;
+ }
+ /*
+ * If we're in a transitional state, just re-queue and return.
+ * The queue will be serviced when we get to a stable state.
+ */
+ if (fnic->state != FNIC_IN_FC_MODE &&
+ fnic->state != FNIC_IN_ETH_MODE) {
+ skb_queue_head(&fnic->fip_frame_queue, skb);
+ spin_unlock_irqrestore(&fnic->fnic_lock, flags);
+ return;
+ }
+ spin_unlock_irqrestore(&fnic->fnic_lock, flags);
+ eh = (struct ethhdr *)skb->data;
+ if (eh->h_proto == htons(ETH_P_FIP)) {
+ skb_pull(skb, sizeof(*eh));
+ if (fnic_fcoe_handle_fip_frame(fnic, skb) <= 0) {
+ dev_kfree_skb(skb);
+ continue;
+ }
+ /*
+ * If there's FLOGI rejects - clear all
+ * fcf's & restart from scratch
+ */
+ if (is_fnic_fip_flogi_reject(&fnic->ctlr, skb)) {
+ atomic64_inc(
+ &fnic_stats->vlan_stats.flogi_rejects);
+ shost_printk(KERN_INFO, fnic->lport->host,
+ "Trigger a Link down - VLAN Disc\n");
+ fcoe_ctlr_link_down(&fnic->ctlr);
+ /* start FCoE VLAN discovery */
+ fnic_fcoe_send_vlan_req(fnic);
+ dev_kfree_skb(skb);
+ continue;
+ }
+ fcoe_ctlr_recv(&fnic->ctlr, skb);
+ continue;
+ }
+ }
+}
+
+/**
+ * fnic_import_rq_eth_pkt() - handle received FCoE or FIP frame.
+ * @fnic: fnic instance.
+ * @skb: Ethernet Frame.
+ */
+static inline int fnic_import_rq_eth_pkt(struct fnic *fnic, struct sk_buff *skb)
+{
+ struct fc_frame *fp;
+ struct ethhdr *eh;
+ struct fcoe_hdr *fcoe_hdr;
+ struct fcoe_crc_eof *ft;
+
+ /*
+ * Undo VLAN encapsulation if present.
+ */
+ eh = (struct ethhdr *)skb->data;
+ if (eh->h_proto == htons(ETH_P_8021Q)) {
+ memmove((u8 *)eh + VLAN_HLEN, eh, ETH_ALEN * 2);
+ eh = (struct ethhdr *)skb_pull(skb, VLAN_HLEN);
+ skb_reset_mac_header(skb);
+ }
+ if (eh->h_proto == htons(ETH_P_FIP)) {
+ if (!(fnic->config.flags & VFCF_FIP_CAPABLE)) {
+ printk(KERN_ERR "Dropped FIP frame, as firmware "
+ "uses non-FIP mode, Enable FIP "
+ "using UCSM\n");
+ goto drop;
+ }
+ if ((fnic_fc_trace_set_data(fnic->lport->host->host_no,
+ FNIC_FC_RECV|0x80, (char *)skb->data, skb->len)) != 0) {
+ printk(KERN_ERR "fnic ctlr frame trace error!!!");
+ }
+ skb_queue_tail(&fnic->fip_frame_queue, skb);
+ queue_work(fnic_fip_queue, &fnic->fip_frame_work);
+ return 1; /* let caller know packet was used */
+ }
+ if (eh->h_proto != htons(ETH_P_FCOE))
+ goto drop;
+ skb_set_network_header(skb, sizeof(*eh));
+ skb_pull(skb, sizeof(*eh));
+
+ fcoe_hdr = (struct fcoe_hdr *)skb->data;
+ if (FC_FCOE_DECAPS_VER(fcoe_hdr) != FC_FCOE_VER)
+ goto drop;
+
+ fp = (struct fc_frame *)skb;
+ fc_frame_init(fp);
+ fr_sof(fp) = fcoe_hdr->fcoe_sof;
+ skb_pull(skb, sizeof(struct fcoe_hdr));
+ skb_reset_transport_header(skb);
+
+ ft = (struct fcoe_crc_eof *)(skb->data + skb->len - sizeof(*ft));
+ fr_eof(fp) = ft->fcoe_eof;
+ skb_trim(skb, skb->len - sizeof(*ft));
+ return 0;
+drop:
+ dev_kfree_skb_irq(skb);
+ return -1;
+}
+
+/**
+ * fnic_update_mac_locked() - set data MAC address and filters.
+ * @fnic: fnic instance.
+ * @new: newly-assigned FCoE MAC address.
+ *
+ * Called with the fnic lock held.
+ */
+void fnic_update_mac_locked(struct fnic *fnic, u8 *new)
+{
+ u8 *ctl = fnic->ctlr.ctl_src_addr;
+ u8 *data = fnic->data_src_addr;
+
+ if (is_zero_ether_addr(new))
+ new = ctl;
+ if (ether_addr_equal(data, new))
+ return;
+ FNIC_FCS_DBG(KERN_DEBUG, fnic->lport->host, "update_mac %pM\n", new);
+ if (!is_zero_ether_addr(data) && !ether_addr_equal(data, ctl))
+ vnic_dev_del_addr(fnic->vdev, data);
+ memcpy(data, new, ETH_ALEN);
+ if (!ether_addr_equal(new, ctl))
+ vnic_dev_add_addr(fnic->vdev, new);
+}
+
+/**
+ * fnic_update_mac() - set data MAC address and filters.
+ * @lport: local port.
+ * @new: newly-assigned FCoE MAC address.
+ */
+void fnic_update_mac(struct fc_lport *lport, u8 *new)
+{
+ struct fnic *fnic = lport_priv(lport);
+
+ spin_lock_irq(&fnic->fnic_lock);
+ fnic_update_mac_locked(fnic, new);
+ spin_unlock_irq(&fnic->fnic_lock);
+}
+
+/**
+ * fnic_set_port_id() - set the port_ID after successful FLOGI.
+ * @lport: local port.
+ * @port_id: assigned FC_ID.
+ * @fp: received frame containing the FLOGI accept or NULL.
+ *
+ * This is called from libfc when a new FC_ID has been assigned.
+ * This causes us to reset the firmware to FC_MODE and setup the new MAC
+ * address and FC_ID.
+ *
+ * It is also called with FC_ID 0 when we're logged off.
+ *
+ * If the FC_ID is due to point-to-point, fp may be NULL.
+ */
+void fnic_set_port_id(struct fc_lport *lport, u32 port_id, struct fc_frame *fp)
+{
+ struct fnic *fnic = lport_priv(lport);
+ u8 *mac;
+ int ret;
+
+ FNIC_FCS_DBG(KERN_DEBUG, lport->host, "set port_id %x fp %p\n",
+ port_id, fp);
+
+ /*
+ * If we're clearing the FC_ID, change to use the ctl_src_addr.
+ * Set ethernet mode to send FLOGI.
+ */
+ if (!port_id) {
+ fnic_update_mac(lport, fnic->ctlr.ctl_src_addr);
+ fnic_set_eth_mode(fnic);
+ return;
+ }
+
+ if (fp) {
+ mac = fr_cb(fp)->granted_mac;
+ if (is_zero_ether_addr(mac)) {
+ /* non-FIP - FLOGI already accepted - ignore return */
+ fcoe_ctlr_recv_flogi(&fnic->ctlr, lport, fp);
+ }
+ fnic_update_mac(lport, mac);
+ }
+
+ /* Change state to reflect transition to FC mode */
+ spin_lock_irq(&fnic->fnic_lock);
+ if (fnic->state == FNIC_IN_ETH_MODE || fnic->state == FNIC_IN_FC_MODE)
+ fnic->state = FNIC_IN_ETH_TRANS_FC_MODE;
+ else {
+ FNIC_FCS_DBG(KERN_DEBUG, fnic->lport->host,
+ "Unexpected fnic state %s while"
+ " processing flogi resp\n",
+ fnic_state_to_str(fnic->state));
+ spin_unlock_irq(&fnic->fnic_lock);
+ return;
+ }
+ spin_unlock_irq(&fnic->fnic_lock);
+
+ /*
+ * Send FLOGI registration to firmware to set up FC mode.
+ * The new address will be set up when registration completes.
+ */
+ ret = fnic_flogi_reg_handler(fnic, port_id);
+
+ if (ret < 0) {
+ spin_lock_irq(&fnic->fnic_lock);
+ if (fnic->state == FNIC_IN_ETH_TRANS_FC_MODE)
+ fnic->state = FNIC_IN_ETH_MODE;
+ spin_unlock_irq(&fnic->fnic_lock);
+ }
+}
+
+static void fnic_rq_cmpl_frame_recv(struct vnic_rq *rq, struct cq_desc
+ *cq_desc, struct vnic_rq_buf *buf,
+ int skipped __attribute__((unused)),
+ void *opaque)
+{
+ struct fnic *fnic = vnic_dev_priv(rq->vdev);
+ struct sk_buff *skb;
+ struct fc_frame *fp;
+ struct fnic_stats *fnic_stats = &fnic->fnic_stats;
+ unsigned int eth_hdrs_stripped;
+ u8 type, color, eop, sop, ingress_port, vlan_stripped;
+ u8 fcoe = 0, fcoe_sof, fcoe_eof;
+ u8 fcoe_fc_crc_ok = 1, fcoe_enc_error = 0;
+ u8 tcp_udp_csum_ok, udp, tcp, ipv4_csum_ok;
+ u8 ipv6, ipv4, ipv4_fragment, rss_type, csum_not_calc;
+ u8 fcs_ok = 1, packet_error = 0;
+ u16 q_number, completed_index, bytes_written = 0, vlan, checksum;
+ u32 rss_hash;
+ u16 exchange_id, tmpl;
+ u8 sof = 0;
+ u8 eof = 0;
+ u32 fcp_bytes_written = 0;
+ unsigned long flags;
+
+ pci_unmap_single(fnic->pdev, buf->dma_addr, buf->len,
+ PCI_DMA_FROMDEVICE);
+ skb = buf->os_buf;
+ fp = (struct fc_frame *)skb;
+ buf->os_buf = NULL;
+
+ cq_desc_dec(cq_desc, &type, &color, &q_number, &completed_index);
+ if (type == CQ_DESC_TYPE_RQ_FCP) {
+ cq_fcp_rq_desc_dec((struct cq_fcp_rq_desc *)cq_desc,
+ &type, &color, &q_number, &completed_index,
+ &eop, &sop, &fcoe_fc_crc_ok, &exchange_id,
+ &tmpl, &fcp_bytes_written, &sof, &eof,
+ &ingress_port, &packet_error,
+ &fcoe_enc_error, &fcs_ok, &vlan_stripped,
+ &vlan);
+ eth_hdrs_stripped = 1;
+ skb_trim(skb, fcp_bytes_written);
+ fr_sof(fp) = sof;
+ fr_eof(fp) = eof;
+
+ } else if (type == CQ_DESC_TYPE_RQ_ENET) {
+ cq_enet_rq_desc_dec((struct cq_enet_rq_desc *)cq_desc,
+ &type, &color, &q_number, &completed_index,
+ &ingress_port, &fcoe, &eop, &sop,
+ &rss_type, &csum_not_calc, &rss_hash,
+ &bytes_written, &packet_error,
+ &vlan_stripped, &vlan, &checksum,
+ &fcoe_sof, &fcoe_fc_crc_ok,
+ &fcoe_enc_error, &fcoe_eof,
+ &tcp_udp_csum_ok, &udp, &tcp,
+ &ipv4_csum_ok, &ipv6, &ipv4,
+ &ipv4_fragment, &fcs_ok);
+ eth_hdrs_stripped = 0;
+ skb_trim(skb, bytes_written);
+ if (!fcs_ok) {
+ atomic64_inc(&fnic_stats->misc_stats.frame_errors);
+ FNIC_FCS_DBG(KERN_DEBUG, fnic->lport->host,
+ "fcs error. dropping packet.\n");
+ goto drop;
+ }
+ if (fnic_import_rq_eth_pkt(fnic, skb))
+ return;
+
+ } else {
+ /* wrong CQ type*/
+ shost_printk(KERN_ERR, fnic->lport->host,
+ "fnic rq_cmpl wrong cq type x%x\n", type);
+ goto drop;
+ }
+
+ if (!fcs_ok || packet_error || !fcoe_fc_crc_ok || fcoe_enc_error) {
+ atomic64_inc(&fnic_stats->misc_stats.frame_errors);
+ FNIC_FCS_DBG(KERN_DEBUG, fnic->lport->host,
+ "fnic rq_cmpl fcoe x%x fcsok x%x"
+ " pkterr x%x fcoe_fc_crc_ok x%x, fcoe_enc_err"
+ " x%x\n",
+ fcoe, fcs_ok, packet_error,
+ fcoe_fc_crc_ok, fcoe_enc_error);
+ goto drop;
+ }
+
+ spin_lock_irqsave(&fnic->fnic_lock, flags);
+ if (fnic->stop_rx_link_events) {
+ spin_unlock_irqrestore(&fnic->fnic_lock, flags);
+ goto drop;
+ }
+ fr_dev(fp) = fnic->lport;
+ spin_unlock_irqrestore(&fnic->fnic_lock, flags);
+ if ((fnic_fc_trace_set_data(fnic->lport->host->host_no, FNIC_FC_RECV,
+ (char *)skb->data, skb->len)) != 0) {
+ printk(KERN_ERR "fnic ctlr frame trace error!!!");
+ }
+
+ skb_queue_tail(&fnic->frame_queue, skb);
+ queue_work(fnic_event_queue, &fnic->frame_work);
+
+ return;
+drop:
+ dev_kfree_skb_irq(skb);
+}
+
+static int fnic_rq_cmpl_handler_cont(struct vnic_dev *vdev,
+ struct cq_desc *cq_desc, u8 type,
+ u16 q_number, u16 completed_index,
+ void *opaque)
+{
+ struct fnic *fnic = vnic_dev_priv(vdev);
+
+ vnic_rq_service(&fnic->rq[q_number], cq_desc, completed_index,
+ VNIC_RQ_RETURN_DESC, fnic_rq_cmpl_frame_recv,
+ NULL);
+ return 0;
+}
+
+int fnic_rq_cmpl_handler(struct fnic *fnic, int rq_work_to_do)
+{
+ unsigned int tot_rq_work_done = 0, cur_work_done;
+ unsigned int i;
+ int err;
+
+ for (i = 0; i < fnic->rq_count; i++) {
+ cur_work_done = vnic_cq_service(&fnic->cq[i], rq_work_to_do,
+ fnic_rq_cmpl_handler_cont,
+ NULL);
+ if (cur_work_done) {
+ err = vnic_rq_fill(&fnic->rq[i], fnic_alloc_rq_frame);
+ if (err)
+ shost_printk(KERN_ERR, fnic->lport->host,
+ "fnic_alloc_rq_frame can't alloc"
+ " frame\n");
+ }
+ tot_rq_work_done += cur_work_done;
+ }
+
+ return tot_rq_work_done;
+}
+
+/*
+ * This function is called once at init time to allocate and fill RQ
+ * buffers. Subsequently, it is called in the interrupt context after RQ
+ * buffer processing to replenish the buffers in the RQ
+ */
+int fnic_alloc_rq_frame(struct vnic_rq *rq)
+{
+ struct fnic *fnic = vnic_dev_priv(rq->vdev);
+ struct sk_buff *skb;
+ u16 len;
+ dma_addr_t pa;
+
+ len = FC_FRAME_HEADROOM + FC_MAX_FRAME + FC_FRAME_TAILROOM;
+ skb = dev_alloc_skb(len);
+ if (!skb) {
+ FNIC_FCS_DBG(KERN_DEBUG, fnic->lport->host,
+ "Unable to allocate RQ sk_buff\n");
+ return -ENOMEM;
+ }
+ skb_reset_mac_header(skb);
+ skb_reset_transport_header(skb);
+ skb_reset_network_header(skb);
+ skb_put(skb, len);
+ pa = pci_map_single(fnic->pdev, skb->data, len, PCI_DMA_FROMDEVICE);
+ fnic_queue_rq_desc(rq, skb, pa, len);
+ return 0;
+}
+
+void fnic_free_rq_buf(struct vnic_rq *rq, struct vnic_rq_buf *buf)
+{
+ struct fc_frame *fp = buf->os_buf;
+ struct fnic *fnic = vnic_dev_priv(rq->vdev);
+
+ pci_unmap_single(fnic->pdev, buf->dma_addr, buf->len,
+ PCI_DMA_FROMDEVICE);
+
+ dev_kfree_skb(fp_skb(fp));
+ buf->os_buf = NULL;
+}
+
+/**
+ * fnic_eth_send() - Send Ethernet frame.
+ * @fip: fcoe_ctlr instance.
+ * @skb: Ethernet Frame, FIP, without VLAN encapsulation.
+ */
+void fnic_eth_send(struct fcoe_ctlr *fip, struct sk_buff *skb)
+{
+ struct fnic *fnic = fnic_from_ctlr(fip);
+ struct vnic_wq *wq = &fnic->wq[0];
+ dma_addr_t pa;
+ struct ethhdr *eth_hdr;
+ struct vlan_ethhdr *vlan_hdr;
+ unsigned long flags;
+
+ if (!fnic->vlan_hw_insert) {
+ eth_hdr = (struct ethhdr *)skb_mac_header(skb);
+ vlan_hdr = (struct vlan_ethhdr *)skb_push(skb,
+ sizeof(*vlan_hdr) - sizeof(*eth_hdr));
+ memcpy(vlan_hdr, eth_hdr, 2 * ETH_ALEN);
+ vlan_hdr->h_vlan_proto = htons(ETH_P_8021Q);
+ vlan_hdr->h_vlan_encapsulated_proto = eth_hdr->h_proto;
+ vlan_hdr->h_vlan_TCI = htons(fnic->vlan_id);
+ if ((fnic_fc_trace_set_data(fnic->lport->host->host_no,
+ FNIC_FC_SEND|0x80, (char *)eth_hdr, skb->len)) != 0) {
+ printk(KERN_ERR "fnic ctlr frame trace error!!!");
+ }
+ } else {
+ if ((fnic_fc_trace_set_data(fnic->lport->host->host_no,
+ FNIC_FC_SEND|0x80, (char *)skb->data, skb->len)) != 0) {
+ printk(KERN_ERR "fnic ctlr frame trace error!!!");
+ }
+ }
+
+ pa = pci_map_single(fnic->pdev, skb->data, skb->len, PCI_DMA_TODEVICE);
+
+ spin_lock_irqsave(&fnic->wq_lock[0], flags);
+ if (!vnic_wq_desc_avail(wq)) {
+ pci_unmap_single(fnic->pdev, pa, skb->len, PCI_DMA_TODEVICE);
+ spin_unlock_irqrestore(&fnic->wq_lock[0], flags);
+ kfree_skb(skb);
+ return;
+ }
+
+ fnic_queue_wq_eth_desc(wq, skb, pa, skb->len,
+ 0 /* hw inserts cos value */,
+ fnic->vlan_id, 1);
+ spin_unlock_irqrestore(&fnic->wq_lock[0], flags);
+}
+
+/*
+ * Send FC frame.
+ */
+static int fnic_send_frame(struct fnic *fnic, struct fc_frame *fp)
+{
+ struct vnic_wq *wq = &fnic->wq[0];
+ struct sk_buff *skb;
+ dma_addr_t pa;
+ struct ethhdr *eth_hdr;
+ struct vlan_ethhdr *vlan_hdr;
+ struct fcoe_hdr *fcoe_hdr;
+ struct fc_frame_header *fh;
+ u32 tot_len, eth_hdr_len;
+ int ret = 0;
+ unsigned long flags;
+
+ fh = fc_frame_header_get(fp);
+ skb = fp_skb(fp);
+
+ if (unlikely(fh->fh_r_ctl == FC_RCTL_ELS_REQ) &&
+ fcoe_ctlr_els_send(&fnic->ctlr, fnic->lport, skb))
+ return 0;
+
+ if (!fnic->vlan_hw_insert) {
+ eth_hdr_len = sizeof(*vlan_hdr) + sizeof(*fcoe_hdr);
+ vlan_hdr = (struct vlan_ethhdr *)skb_push(skb, eth_hdr_len);
+ eth_hdr = (struct ethhdr *)vlan_hdr;
+ vlan_hdr->h_vlan_proto = htons(ETH_P_8021Q);
+ vlan_hdr->h_vlan_encapsulated_proto = htons(ETH_P_FCOE);
+ vlan_hdr->h_vlan_TCI = htons(fnic->vlan_id);
+ fcoe_hdr = (struct fcoe_hdr *)(vlan_hdr + 1);
+ } else {
+ eth_hdr_len = sizeof(*eth_hdr) + sizeof(*fcoe_hdr);
+ eth_hdr = (struct ethhdr *)skb_push(skb, eth_hdr_len);
+ eth_hdr->h_proto = htons(ETH_P_FCOE);
+ fcoe_hdr = (struct fcoe_hdr *)(eth_hdr + 1);
+ }
+
+ if (fnic->ctlr.map_dest)
+ fc_fcoe_set_mac(eth_hdr->h_dest, fh->fh_d_id);
+ else
+ memcpy(eth_hdr->h_dest, fnic->ctlr.dest_addr, ETH_ALEN);
+ memcpy(eth_hdr->h_source, fnic->data_src_addr, ETH_ALEN);
+
+ tot_len = skb->len;
+ BUG_ON(tot_len % 4);
+
+ memset(fcoe_hdr, 0, sizeof(*fcoe_hdr));
+ fcoe_hdr->fcoe_sof = fr_sof(fp);
+ if (FC_FCOE_VER)
+ FC_FCOE_ENCAPS_VER(fcoe_hdr, FC_FCOE_VER);
+
+ pa = pci_map_single(fnic->pdev, eth_hdr, tot_len, PCI_DMA_TODEVICE);
+
+ if ((fnic_fc_trace_set_data(fnic->lport->host->host_no, FNIC_FC_SEND,
+ (char *)eth_hdr, tot_len)) != 0) {
+ printk(KERN_ERR "fnic ctlr frame trace error!!!");
+ }
+
+ spin_lock_irqsave(&fnic->wq_lock[0], flags);
+
+ if (!vnic_wq_desc_avail(wq)) {
+ pci_unmap_single(fnic->pdev, pa,
+ tot_len, PCI_DMA_TODEVICE);
+ ret = -1;
+ goto fnic_send_frame_end;
+ }
+
+ fnic_queue_wq_desc(wq, skb, pa, tot_len, fr_eof(fp),
+ 0 /* hw inserts cos value */,
+ fnic->vlan_id, 1, 1, 1);
+fnic_send_frame_end:
+ spin_unlock_irqrestore(&fnic->wq_lock[0], flags);
+
+ if (ret)
+ dev_kfree_skb_any(fp_skb(fp));
+
+ return ret;
+}
+
+/*
+ * fnic_send
+ * Routine to send a raw frame
+ */
+int fnic_send(struct fc_lport *lp, struct fc_frame *fp)
+{
+ struct fnic *fnic = lport_priv(lp);
+ unsigned long flags;
+
+ if (fnic->in_remove) {
+ dev_kfree_skb(fp_skb(fp));
+ return -1;
+ }
+
+ /*
+ * Queue frame if in a transitional state.
+ * This occurs while registering the Port_ID / MAC address after FLOGI.
+ */
+ spin_lock_irqsave(&fnic->fnic_lock, flags);
+ if (fnic->state != FNIC_IN_FC_MODE && fnic->state != FNIC_IN_ETH_MODE) {
+ skb_queue_tail(&fnic->tx_queue, fp_skb(fp));
+ spin_unlock_irqrestore(&fnic->fnic_lock, flags);
+ return 0;
+ }
+ spin_unlock_irqrestore(&fnic->fnic_lock, flags);
+
+ return fnic_send_frame(fnic, fp);
+}
+
+/**
+ * fnic_flush_tx() - send queued frames.
+ * @fnic: fnic device
+ *
+ * Send frames that were waiting to go out in FC or Ethernet mode.
+ * Whenever changing modes we purge queued frames, so these frames should
+ * be queued for the stable mode that we're in, either FC or Ethernet.
+ *
+ * Called without fnic_lock held.
+ */
+void fnic_flush_tx(struct fnic *fnic)
+{
+ struct sk_buff *skb;
+ struct fc_frame *fp;
+
+ while ((skb = skb_dequeue(&fnic->tx_queue))) {
+ fp = (struct fc_frame *)skb;
+ fnic_send_frame(fnic, fp);
+ }
+}
+
+/**
+ * fnic_set_eth_mode() - put fnic into ethernet mode.
+ * @fnic: fnic device
+ *
+ * Called without fnic lock held.
+ */
+static void fnic_set_eth_mode(struct fnic *fnic)
+{
+ unsigned long flags;
+ enum fnic_state old_state;
+ int ret;
+
+ spin_lock_irqsave(&fnic->fnic_lock, flags);
+again:
+ old_state = fnic->state;
+ switch (old_state) {
+ case FNIC_IN_FC_MODE:
+ case FNIC_IN_ETH_TRANS_FC_MODE:
+ default:
+ fnic->state = FNIC_IN_FC_TRANS_ETH_MODE;
+ spin_unlock_irqrestore(&fnic->fnic_lock, flags);
+
+ ret = fnic_fw_reset_handler(fnic);
+
+ spin_lock_irqsave(&fnic->fnic_lock, flags);
+ if (fnic->state != FNIC_IN_FC_TRANS_ETH_MODE)
+ goto again;
+ if (ret)
+ fnic->state = old_state;
+ break;
+
+ case FNIC_IN_FC_TRANS_ETH_MODE:
+ case FNIC_IN_ETH_MODE:
+ break;
+ }
+ spin_unlock_irqrestore(&fnic->fnic_lock, flags);
+}
+
+static void fnic_wq_complete_frame_send(struct vnic_wq *wq,
+ struct cq_desc *cq_desc,
+ struct vnic_wq_buf *buf, void *opaque)
+{
+ struct sk_buff *skb = buf->os_buf;
+ struct fc_frame *fp = (struct fc_frame *)skb;
+ struct fnic *fnic = vnic_dev_priv(wq->vdev);
+
+ pci_unmap_single(fnic->pdev, buf->dma_addr,
+ buf->len, PCI_DMA_TODEVICE);
+ dev_kfree_skb_irq(fp_skb(fp));
+ buf->os_buf = NULL;
+}
+
+static int fnic_wq_cmpl_handler_cont(struct vnic_dev *vdev,
+ struct cq_desc *cq_desc, u8 type,
+ u16 q_number, u16 completed_index,
+ void *opaque)
+{
+ struct fnic *fnic = vnic_dev_priv(vdev);
+ unsigned long flags;
+
+ spin_lock_irqsave(&fnic->wq_lock[q_number], flags);
+ vnic_wq_service(&fnic->wq[q_number], cq_desc, completed_index,
+ fnic_wq_complete_frame_send, NULL);
+ spin_unlock_irqrestore(&fnic->wq_lock[q_number], flags);
+
+ return 0;
+}
+
+int fnic_wq_cmpl_handler(struct fnic *fnic, int work_to_do)
+{
+ unsigned int wq_work_done = 0;
+ unsigned int i;
+
+ for (i = 0; i < fnic->raw_wq_count; i++) {
+ wq_work_done += vnic_cq_service(&fnic->cq[fnic->rq_count+i],
+ work_to_do,
+ fnic_wq_cmpl_handler_cont,
+ NULL);
+ }
+
+ return wq_work_done;
+}
+
+
+void fnic_free_wq_buf(struct vnic_wq *wq, struct vnic_wq_buf *buf)
+{
+ struct fc_frame *fp = buf->os_buf;
+ struct fnic *fnic = vnic_dev_priv(wq->vdev);
+
+ pci_unmap_single(fnic->pdev, buf->dma_addr,
+ buf->len, PCI_DMA_TODEVICE);
+
+ dev_kfree_skb(fp_skb(fp));
+ buf->os_buf = NULL;
+}
+
+void fnic_fcoe_reset_vlans(struct fnic *fnic)
+{
+ unsigned long flags;
+ struct fcoe_vlan *vlan;
+ struct fcoe_vlan *next;
+
+ /*
+ * indicate a link down to fcoe so that all fcf's are free'd
+ * might not be required since we did this before sending vlan
+ * discovery request
+ */
+ spin_lock_irqsave(&fnic->vlans_lock, flags);
+ if (!list_empty(&fnic->vlans)) {
+ list_for_each_entry_safe(vlan, next, &fnic->vlans, list) {
+ list_del(&vlan->list);
+ kfree(vlan);
+ }
+ }
+ spin_unlock_irqrestore(&fnic->vlans_lock, flags);
+}
+
+void fnic_handle_fip_timer(struct fnic *fnic)
+{
+ unsigned long flags;
+ struct fcoe_vlan *vlan;
+ struct fnic_stats *fnic_stats = &fnic->fnic_stats;
+ u64 sol_time;
+
+ spin_lock_irqsave(&fnic->fnic_lock, flags);
+ if (fnic->stop_rx_link_events) {
+ spin_unlock_irqrestore(&fnic->fnic_lock, flags);
+ return;
+ }
+ spin_unlock_irqrestore(&fnic->fnic_lock, flags);
+
+ if (fnic->ctlr.mode == FIP_ST_NON_FIP)
+ return;
+
+ spin_lock_irqsave(&fnic->vlans_lock, flags);
+ if (list_empty(&fnic->vlans)) {
+ /* no vlans available, try again */
+ FNIC_FCS_DBG(KERN_DEBUG, fnic->lport->host,
+ "Start VLAN Discovery\n");
+ spin_unlock_irqrestore(&fnic->vlans_lock, flags);
+ fnic_event_enq(fnic, FNIC_EVT_START_VLAN_DISC);
+ return;
+ }
+
+ vlan = list_first_entry(&fnic->vlans, struct fcoe_vlan, list);
+ shost_printk(KERN_DEBUG, fnic->lport->host,
+ "fip_timer: vlan %d state %d sol_count %d\n",
+ vlan->vid, vlan->state, vlan->sol_count);
+ switch (vlan->state) {
+ case FIP_VLAN_USED:
+ FNIC_FCS_DBG(KERN_DEBUG, fnic->lport->host,
+ "FIP VLAN is selected for FC transaction\n");
+ spin_unlock_irqrestore(&fnic->vlans_lock, flags);
+ break;
+ case FIP_VLAN_FAILED:
+ /* if all vlans are in failed state, restart vlan disc */
+ FNIC_FCS_DBG(KERN_DEBUG, fnic->lport->host,
+ "Start VLAN Discovery\n");
+ spin_unlock_irqrestore(&fnic->vlans_lock, flags);
+ fnic_event_enq(fnic, FNIC_EVT_START_VLAN_DISC);
+ break;
+ case FIP_VLAN_SENT:
+ if (vlan->sol_count >= FCOE_CTLR_MAX_SOL) {
+ /*
+ * no response on this vlan, remove from the list.
+ * Try the next vlan
+ */
+ shost_printk(KERN_INFO, fnic->lport->host,
+ "Dequeue this VLAN ID %d from list\n",
+ vlan->vid);
+ list_del(&vlan->list);
+ kfree(vlan);
+ vlan = NULL;
+ if (list_empty(&fnic->vlans)) {
+ /* we exhausted all vlans, restart vlan disc */
+ spin_unlock_irqrestore(&fnic->vlans_lock,
+ flags);
+ shost_printk(KERN_INFO, fnic->lport->host,
+ "fip_timer: vlan list empty, "
+ "trigger vlan disc\n");
+ fnic_event_enq(fnic, FNIC_EVT_START_VLAN_DISC);
+ return;
+ }
+ /* check the next vlan */
+ vlan = list_first_entry(&fnic->vlans, struct fcoe_vlan,
+ list);
+ fnic->set_vlan(fnic, vlan->vid);
+ vlan->state = FIP_VLAN_SENT; /* sent now */
+ }
+ spin_unlock_irqrestore(&fnic->vlans_lock, flags);
+ atomic64_inc(&fnic_stats->vlan_stats.sol_expiry_count);
+ vlan->sol_count++;
+ sol_time = jiffies + msecs_to_jiffies
+ (FCOE_CTLR_START_DELAY);
+ mod_timer(&fnic->fip_timer, round_jiffies(sol_time));
+ break;
+ }
+}
diff --git a/drivers/scsi/fnic/fnic_fip.h b/drivers/scsi/fnic/fnic_fip.h
new file mode 100644
index 000000000..87e74c2ab
--- /dev/null
+++ b/drivers/scsi/fnic/fnic_fip.h
@@ -0,0 +1,68 @@
+/*
+ * Copyright 2008 Cisco Systems, Inc. All rights reserved.
+ * Copyright 2007 Nuova Systems, Inc. All rights reserved.
+ *
+ * This program is free software; you may redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#ifndef _FNIC_FIP_H_
+#define _FNIC_FIP_H_
+
+
+#define FCOE_CTLR_START_DELAY 2000 /* ms after first adv. to choose FCF */
+#define FCOE_CTLR_FIPVLAN_TOV 2000 /* ms after FIP VLAN disc */
+#define FCOE_CTLR_MAX_SOL 8
+
+#define FINC_MAX_FLOGI_REJECTS 8
+
+/*
+ * FIP_DT_VLAN descriptor.
+ */
+struct fip_vlan_desc {
+ struct fip_desc fd_desc;
+ __be16 fd_vlan;
+} __attribute__((packed));
+
+struct vlan {
+ __be16 vid;
+ __be16 type;
+};
+
+/*
+ * VLAN entry.
+ */
+struct fcoe_vlan {
+ struct list_head list;
+ u16 vid; /* vlan ID */
+ u16 sol_count; /* no. of sols sent */
+ u16 state; /* state */
+};
+
+enum fip_vlan_state {
+ FIP_VLAN_AVAIL = 0, /* don't do anything */
+ FIP_VLAN_SENT = 1, /* sent */
+ FIP_VLAN_USED = 2, /* succeed */
+ FIP_VLAN_FAILED = 3, /* failed to response */
+};
+
+struct fip_vlan {
+ struct ethhdr eth;
+ struct fip_header fip;
+ struct {
+ struct fip_mac_desc mac;
+ struct fip_wwn_desc wwnn;
+ } desc;
+};
+
+#endif /* __FINC_FIP_H_ */
diff --git a/drivers/scsi/fnic/fnic_io.h b/drivers/scsi/fnic/fnic_io.h
new file mode 100644
index 000000000..c35b8f188
--- /dev/null
+++ b/drivers/scsi/fnic/fnic_io.h
@@ -0,0 +1,69 @@
+/*
+ * Copyright 2008 Cisco Systems, Inc. All rights reserved.
+ * Copyright 2007 Nuova Systems, Inc. All rights reserved.
+ *
+ * This program is free software; you may redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#ifndef _FNIC_IO_H_
+#define _FNIC_IO_H_
+
+#include <scsi/fc/fc_fcp.h>
+
+#define FNIC_DFLT_SG_DESC_CNT 32
+#define FNIC_MAX_SG_DESC_CNT 256 /* Maximum descriptors per sgl */
+#define FNIC_SG_DESC_ALIGN 16 /* Descriptor address alignment */
+
+struct host_sg_desc {
+ __le64 addr;
+ __le32 len;
+ u32 _resvd;
+};
+
+struct fnic_dflt_sgl_list {
+ struct host_sg_desc sg_desc[FNIC_DFLT_SG_DESC_CNT];
+};
+
+struct fnic_sgl_list {
+ struct host_sg_desc sg_desc[FNIC_MAX_SG_DESC_CNT];
+};
+
+enum fnic_sgl_list_type {
+ FNIC_SGL_CACHE_DFLT = 0, /* cache with default size sgl */
+ FNIC_SGL_CACHE_MAX, /* cache with max size sgl */
+ FNIC_SGL_NUM_CACHES /* number of sgl caches */
+};
+
+enum fnic_ioreq_state {
+ FNIC_IOREQ_NOT_INITED = 0,
+ FNIC_IOREQ_CMD_PENDING,
+ FNIC_IOREQ_ABTS_PENDING,
+ FNIC_IOREQ_ABTS_COMPLETE,
+ FNIC_IOREQ_CMD_COMPLETE,
+};
+
+struct fnic_io_req {
+ struct host_sg_desc *sgl_list; /* sgl list */
+ void *sgl_list_alloc; /* sgl list address used for free */
+ dma_addr_t sense_buf_pa; /* dma address for sense buffer*/
+ dma_addr_t sgl_list_pa; /* dma address for sgl list */
+ u16 sgl_cnt;
+ u8 sgl_type; /* device DMA descriptor list type */
+ u8 io_completed:1; /* set to 1 when fw completes IO */
+ u32 port_id; /* remote port DID */
+ unsigned long start_time; /* in jiffies */
+ struct completion *abts_done; /* completion for abts */
+ struct completion *dr_done; /* completion for device reset */
+};
+
+#endif /* _FNIC_IO_H_ */
diff --git a/drivers/scsi/fnic/fnic_isr.c b/drivers/scsi/fnic/fnic_isr.c
new file mode 100644
index 000000000..a0dd1b67a
--- /dev/null
+++ b/drivers/scsi/fnic/fnic_isr.c
@@ -0,0 +1,350 @@
+/*
+ * Copyright 2008 Cisco Systems, Inc. All rights reserved.
+ * Copyright 2007 Nuova Systems, Inc. All rights reserved.
+ *
+ * This program is free software; you may redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#include <linux/string.h>
+#include <linux/errno.h>
+#include <linux/pci.h>
+#include <linux/interrupt.h>
+#include <scsi/libfc.h>
+#include <scsi/fc_frame.h>
+#include "vnic_dev.h"
+#include "vnic_intr.h"
+#include "vnic_stats.h"
+#include "fnic_io.h"
+#include "fnic.h"
+
+static irqreturn_t fnic_isr_legacy(int irq, void *data)
+{
+ struct fnic *fnic = data;
+ u32 pba;
+ unsigned long work_done = 0;
+
+ pba = vnic_intr_legacy_pba(fnic->legacy_pba);
+ if (!pba)
+ return IRQ_NONE;
+
+ fnic->fnic_stats.misc_stats.last_isr_time = jiffies;
+ atomic64_inc(&fnic->fnic_stats.misc_stats.isr_count);
+
+ if (pba & (1 << FNIC_INTX_NOTIFY)) {
+ vnic_intr_return_all_credits(&fnic->intr[FNIC_INTX_NOTIFY]);
+ fnic_handle_link_event(fnic);
+ }
+
+ if (pba & (1 << FNIC_INTX_ERR)) {
+ vnic_intr_return_all_credits(&fnic->intr[FNIC_INTX_ERR]);
+ fnic_log_q_error(fnic);
+ }
+
+ if (pba & (1 << FNIC_INTX_WQ_RQ_COPYWQ)) {
+ work_done += fnic_wq_copy_cmpl_handler(fnic, -1);
+ work_done += fnic_wq_cmpl_handler(fnic, -1);
+ work_done += fnic_rq_cmpl_handler(fnic, -1);
+
+ vnic_intr_return_credits(&fnic->intr[FNIC_INTX_WQ_RQ_COPYWQ],
+ work_done,
+ 1 /* unmask intr */,
+ 1 /* reset intr timer */);
+ }
+
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t fnic_isr_msi(int irq, void *data)
+{
+ struct fnic *fnic = data;
+ unsigned long work_done = 0;
+
+ fnic->fnic_stats.misc_stats.last_isr_time = jiffies;
+ atomic64_inc(&fnic->fnic_stats.misc_stats.isr_count);
+
+ work_done += fnic_wq_copy_cmpl_handler(fnic, -1);
+ work_done += fnic_wq_cmpl_handler(fnic, -1);
+ work_done += fnic_rq_cmpl_handler(fnic, -1);
+
+ vnic_intr_return_credits(&fnic->intr[0],
+ work_done,
+ 1 /* unmask intr */,
+ 1 /* reset intr timer */);
+
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t fnic_isr_msix_rq(int irq, void *data)
+{
+ struct fnic *fnic = data;
+ unsigned long rq_work_done = 0;
+
+ fnic->fnic_stats.misc_stats.last_isr_time = jiffies;
+ atomic64_inc(&fnic->fnic_stats.misc_stats.isr_count);
+
+ rq_work_done = fnic_rq_cmpl_handler(fnic, -1);
+ vnic_intr_return_credits(&fnic->intr[FNIC_MSIX_RQ],
+ rq_work_done,
+ 1 /* unmask intr */,
+ 1 /* reset intr timer */);
+
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t fnic_isr_msix_wq(int irq, void *data)
+{
+ struct fnic *fnic = data;
+ unsigned long wq_work_done = 0;
+
+ fnic->fnic_stats.misc_stats.last_isr_time = jiffies;
+ atomic64_inc(&fnic->fnic_stats.misc_stats.isr_count);
+
+ wq_work_done = fnic_wq_cmpl_handler(fnic, -1);
+ vnic_intr_return_credits(&fnic->intr[FNIC_MSIX_WQ],
+ wq_work_done,
+ 1 /* unmask intr */,
+ 1 /* reset intr timer */);
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t fnic_isr_msix_wq_copy(int irq, void *data)
+{
+ struct fnic *fnic = data;
+ unsigned long wq_copy_work_done = 0;
+
+ fnic->fnic_stats.misc_stats.last_isr_time = jiffies;
+ atomic64_inc(&fnic->fnic_stats.misc_stats.isr_count);
+
+ wq_copy_work_done = fnic_wq_copy_cmpl_handler(fnic, -1);
+ vnic_intr_return_credits(&fnic->intr[FNIC_MSIX_WQ_COPY],
+ wq_copy_work_done,
+ 1 /* unmask intr */,
+ 1 /* reset intr timer */);
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t fnic_isr_msix_err_notify(int irq, void *data)
+{
+ struct fnic *fnic = data;
+
+ fnic->fnic_stats.misc_stats.last_isr_time = jiffies;
+ atomic64_inc(&fnic->fnic_stats.misc_stats.isr_count);
+
+ vnic_intr_return_all_credits(&fnic->intr[FNIC_MSIX_ERR_NOTIFY]);
+ fnic_log_q_error(fnic);
+ fnic_handle_link_event(fnic);
+
+ return IRQ_HANDLED;
+}
+
+void fnic_free_intr(struct fnic *fnic)
+{
+ int i;
+
+ switch (vnic_dev_get_intr_mode(fnic->vdev)) {
+ case VNIC_DEV_INTR_MODE_INTX:
+ case VNIC_DEV_INTR_MODE_MSI:
+ free_irq(fnic->pdev->irq, fnic);
+ break;
+
+ case VNIC_DEV_INTR_MODE_MSIX:
+ for (i = 0; i < ARRAY_SIZE(fnic->msix); i++)
+ if (fnic->msix[i].requested)
+ free_irq(fnic->msix_entry[i].vector,
+ fnic->msix[i].devid);
+ break;
+
+ default:
+ break;
+ }
+}
+
+int fnic_request_intr(struct fnic *fnic)
+{
+ int err = 0;
+ int i;
+
+ switch (vnic_dev_get_intr_mode(fnic->vdev)) {
+
+ case VNIC_DEV_INTR_MODE_INTX:
+ err = request_irq(fnic->pdev->irq, &fnic_isr_legacy,
+ IRQF_SHARED, DRV_NAME, fnic);
+ break;
+
+ case VNIC_DEV_INTR_MODE_MSI:
+ err = request_irq(fnic->pdev->irq, &fnic_isr_msi,
+ 0, fnic->name, fnic);
+ break;
+
+ case VNIC_DEV_INTR_MODE_MSIX:
+
+ sprintf(fnic->msix[FNIC_MSIX_RQ].devname,
+ "%.11s-fcs-rq", fnic->name);
+ fnic->msix[FNIC_MSIX_RQ].isr = fnic_isr_msix_rq;
+ fnic->msix[FNIC_MSIX_RQ].devid = fnic;
+
+ sprintf(fnic->msix[FNIC_MSIX_WQ].devname,
+ "%.11s-fcs-wq", fnic->name);
+ fnic->msix[FNIC_MSIX_WQ].isr = fnic_isr_msix_wq;
+ fnic->msix[FNIC_MSIX_WQ].devid = fnic;
+
+ sprintf(fnic->msix[FNIC_MSIX_WQ_COPY].devname,
+ "%.11s-scsi-wq", fnic->name);
+ fnic->msix[FNIC_MSIX_WQ_COPY].isr = fnic_isr_msix_wq_copy;
+ fnic->msix[FNIC_MSIX_WQ_COPY].devid = fnic;
+
+ sprintf(fnic->msix[FNIC_MSIX_ERR_NOTIFY].devname,
+ "%.11s-err-notify", fnic->name);
+ fnic->msix[FNIC_MSIX_ERR_NOTIFY].isr =
+ fnic_isr_msix_err_notify;
+ fnic->msix[FNIC_MSIX_ERR_NOTIFY].devid = fnic;
+
+ for (i = 0; i < ARRAY_SIZE(fnic->msix); i++) {
+ err = request_irq(fnic->msix_entry[i].vector,
+ fnic->msix[i].isr, 0,
+ fnic->msix[i].devname,
+ fnic->msix[i].devid);
+ if (err) {
+ shost_printk(KERN_ERR, fnic->lport->host,
+ "MSIX: request_irq"
+ " failed %d\n", err);
+ fnic_free_intr(fnic);
+ break;
+ }
+ fnic->msix[i].requested = 1;
+ }
+ break;
+
+ default:
+ break;
+ }
+
+ return err;
+}
+
+int fnic_set_intr_mode(struct fnic *fnic)
+{
+ unsigned int n = ARRAY_SIZE(fnic->rq);
+ unsigned int m = ARRAY_SIZE(fnic->wq);
+ unsigned int o = ARRAY_SIZE(fnic->wq_copy);
+ unsigned int i;
+
+ /*
+ * Set interrupt mode (INTx, MSI, MSI-X) depending
+ * system capabilities.
+ *
+ * Try MSI-X first
+ *
+ * We need n RQs, m WQs, o Copy WQs, n+m+o CQs, and n+m+o+1 INTRs
+ * (last INTR is used for WQ/RQ errors and notification area)
+ */
+
+ BUG_ON(ARRAY_SIZE(fnic->msix_entry) < n + m + o + 1);
+ for (i = 0; i < n + m + o + 1; i++)
+ fnic->msix_entry[i].entry = i;
+
+ if (fnic->rq_count >= n &&
+ fnic->raw_wq_count >= m &&
+ fnic->wq_copy_count >= o &&
+ fnic->cq_count >= n + m + o) {
+ if (!pci_enable_msix_exact(fnic->pdev, fnic->msix_entry,
+ n + m + o + 1)) {
+ fnic->rq_count = n;
+ fnic->raw_wq_count = m;
+ fnic->wq_copy_count = o;
+ fnic->wq_count = m + o;
+ fnic->cq_count = n + m + o;
+ fnic->intr_count = n + m + o + 1;
+ fnic->err_intr_offset = FNIC_MSIX_ERR_NOTIFY;
+
+ FNIC_ISR_DBG(KERN_DEBUG, fnic->lport->host,
+ "Using MSI-X Interrupts\n");
+ vnic_dev_set_intr_mode(fnic->vdev,
+ VNIC_DEV_INTR_MODE_MSIX);
+ return 0;
+ }
+ }
+
+ /*
+ * Next try MSI
+ * We need 1 RQ, 1 WQ, 1 WQ_COPY, 3 CQs, and 1 INTR
+ */
+ if (fnic->rq_count >= 1 &&
+ fnic->raw_wq_count >= 1 &&
+ fnic->wq_copy_count >= 1 &&
+ fnic->cq_count >= 3 &&
+ fnic->intr_count >= 1 &&
+ !pci_enable_msi(fnic->pdev)) {
+
+ fnic->rq_count = 1;
+ fnic->raw_wq_count = 1;
+ fnic->wq_copy_count = 1;
+ fnic->wq_count = 2;
+ fnic->cq_count = 3;
+ fnic->intr_count = 1;
+ fnic->err_intr_offset = 0;
+
+ FNIC_ISR_DBG(KERN_DEBUG, fnic->lport->host,
+ "Using MSI Interrupts\n");
+ vnic_dev_set_intr_mode(fnic->vdev, VNIC_DEV_INTR_MODE_MSI);
+
+ return 0;
+ }
+
+ /*
+ * Next try INTx
+ * We need 1 RQ, 1 WQ, 1 WQ_COPY, 3 CQs, and 3 INTRs
+ * 1 INTR is used for all 3 queues, 1 INTR for queue errors
+ * 1 INTR for notification area
+ */
+
+ if (fnic->rq_count >= 1 &&
+ fnic->raw_wq_count >= 1 &&
+ fnic->wq_copy_count >= 1 &&
+ fnic->cq_count >= 3 &&
+ fnic->intr_count >= 3) {
+
+ fnic->rq_count = 1;
+ fnic->raw_wq_count = 1;
+ fnic->wq_copy_count = 1;
+ fnic->cq_count = 3;
+ fnic->intr_count = 3;
+
+ FNIC_ISR_DBG(KERN_DEBUG, fnic->lport->host,
+ "Using Legacy Interrupts\n");
+ vnic_dev_set_intr_mode(fnic->vdev, VNIC_DEV_INTR_MODE_INTX);
+
+ return 0;
+ }
+
+ vnic_dev_set_intr_mode(fnic->vdev, VNIC_DEV_INTR_MODE_UNKNOWN);
+
+ return -EINVAL;
+}
+
+void fnic_clear_intr_mode(struct fnic *fnic)
+{
+ switch (vnic_dev_get_intr_mode(fnic->vdev)) {
+ case VNIC_DEV_INTR_MODE_MSIX:
+ pci_disable_msix(fnic->pdev);
+ break;
+ case VNIC_DEV_INTR_MODE_MSI:
+ pci_disable_msi(fnic->pdev);
+ break;
+ default:
+ break;
+ }
+
+ vnic_dev_set_intr_mode(fnic->vdev, VNIC_DEV_INTR_MODE_INTX);
+}
+
diff --git a/drivers/scsi/fnic/fnic_main.c b/drivers/scsi/fnic/fnic_main.c
new file mode 100644
index 000000000..8a0d4d7b3
--- /dev/null
+++ b/drivers/scsi/fnic/fnic_main.c
@@ -0,0 +1,1168 @@
+/*
+ * Copyright 2008 Cisco Systems, Inc. All rights reserved.
+ * Copyright 2007 Nuova Systems, Inc. All rights reserved.
+ *
+ * This program is free software; you may redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#include <linux/module.h>
+#include <linux/mempool.h>
+#include <linux/string.h>
+#include <linux/slab.h>
+#include <linux/errno.h>
+#include <linux/init.h>
+#include <linux/pci.h>
+#include <linux/skbuff.h>
+#include <linux/interrupt.h>
+#include <linux/spinlock.h>
+#include <linux/workqueue.h>
+#include <linux/if_ether.h>
+#include <scsi/fc/fc_fip.h>
+#include <scsi/scsi_host.h>
+#include <scsi/scsi_transport.h>
+#include <scsi/scsi_transport_fc.h>
+#include <scsi/scsi_tcq.h>
+#include <scsi/libfc.h>
+#include <scsi/fc_frame.h>
+
+#include "vnic_dev.h"
+#include "vnic_intr.h"
+#include "vnic_stats.h"
+#include "fnic_io.h"
+#include "fnic_fip.h"
+#include "fnic.h"
+
+#define PCI_DEVICE_ID_CISCO_FNIC 0x0045
+
+/* Timer to poll notification area for events. Used for MSI interrupts */
+#define FNIC_NOTIFY_TIMER_PERIOD (2 * HZ)
+
+static struct kmem_cache *fnic_sgl_cache[FNIC_SGL_NUM_CACHES];
+static struct kmem_cache *fnic_io_req_cache;
+LIST_HEAD(fnic_list);
+DEFINE_SPINLOCK(fnic_list_lock);
+
+/* Supported devices by fnic module */
+static struct pci_device_id fnic_id_table[] = {
+ { PCI_DEVICE(PCI_VENDOR_ID_CISCO, PCI_DEVICE_ID_CISCO_FNIC) },
+ { 0, }
+};
+
+MODULE_DESCRIPTION(DRV_DESCRIPTION);
+MODULE_AUTHOR("Abhijeet Joglekar <abjoglek@cisco.com>, "
+ "Joseph R. Eykholt <jeykholt@cisco.com>");
+MODULE_LICENSE("GPL v2");
+MODULE_VERSION(DRV_VERSION);
+MODULE_DEVICE_TABLE(pci, fnic_id_table);
+
+unsigned int fnic_log_level;
+module_param(fnic_log_level, int, S_IRUGO|S_IWUSR);
+MODULE_PARM_DESC(fnic_log_level, "bit mask of fnic logging levels");
+
+unsigned int fnic_trace_max_pages = 16;
+module_param(fnic_trace_max_pages, uint, S_IRUGO|S_IWUSR);
+MODULE_PARM_DESC(fnic_trace_max_pages, "Total allocated memory pages "
+ "for fnic trace buffer");
+
+unsigned int fnic_fc_trace_max_pages = 64;
+module_param(fnic_fc_trace_max_pages, uint, S_IRUGO|S_IWUSR);
+MODULE_PARM_DESC(fnic_fc_trace_max_pages,
+ "Total allocated memory pages for fc trace buffer");
+
+static unsigned int fnic_max_qdepth = FNIC_DFLT_QUEUE_DEPTH;
+module_param(fnic_max_qdepth, uint, S_IRUGO|S_IWUSR);
+MODULE_PARM_DESC(fnic_max_qdepth, "Queue depth to report for each LUN");
+
+static struct libfc_function_template fnic_transport_template = {
+ .frame_send = fnic_send,
+ .lport_set_port_id = fnic_set_port_id,
+ .fcp_abort_io = fnic_empty_scsi_cleanup,
+ .fcp_cleanup = fnic_empty_scsi_cleanup,
+ .exch_mgr_reset = fnic_exch_mgr_reset
+};
+
+static int fnic_slave_alloc(struct scsi_device *sdev)
+{
+ struct fc_rport *rport = starget_to_rport(scsi_target(sdev));
+
+ if (!rport || fc_remote_port_chkready(rport))
+ return -ENXIO;
+
+ scsi_change_queue_depth(sdev, fnic_max_qdepth);
+ return 0;
+}
+
+static struct scsi_host_template fnic_host_template = {
+ .module = THIS_MODULE,
+ .name = DRV_NAME,
+ .queuecommand = fnic_queuecommand,
+ .eh_abort_handler = fnic_abort_cmd,
+ .eh_device_reset_handler = fnic_device_reset,
+ .eh_host_reset_handler = fnic_host_reset,
+ .slave_alloc = fnic_slave_alloc,
+ .change_queue_depth = scsi_change_queue_depth,
+ .this_id = -1,
+ .cmd_per_lun = 3,
+ .can_queue = FNIC_DFLT_IO_REQ,
+ .use_clustering = ENABLE_CLUSTERING,
+ .sg_tablesize = FNIC_MAX_SG_DESC_CNT,
+ .max_sectors = 0xffff,
+ .shost_attrs = fnic_attrs,
+ .use_blk_tags = 1,
+ .track_queue_depth = 1,
+};
+
+static void
+fnic_set_rport_dev_loss_tmo(struct fc_rport *rport, u32 timeout)
+{
+ if (timeout)
+ rport->dev_loss_tmo = timeout;
+ else
+ rport->dev_loss_tmo = 1;
+}
+
+static void fnic_get_host_speed(struct Scsi_Host *shost);
+static struct scsi_transport_template *fnic_fc_transport;
+static struct fc_host_statistics *fnic_get_stats(struct Scsi_Host *);
+static void fnic_reset_host_stats(struct Scsi_Host *);
+
+static struct fc_function_template fnic_fc_functions = {
+
+ .show_host_node_name = 1,
+ .show_host_port_name = 1,
+ .show_host_supported_classes = 1,
+ .show_host_supported_fc4s = 1,
+ .show_host_active_fc4s = 1,
+ .show_host_maxframe_size = 1,
+ .show_host_port_id = 1,
+ .show_host_supported_speeds = 1,
+ .get_host_speed = fnic_get_host_speed,
+ .show_host_speed = 1,
+ .show_host_port_type = 1,
+ .get_host_port_state = fc_get_host_port_state,
+ .show_host_port_state = 1,
+ .show_host_symbolic_name = 1,
+ .show_rport_maxframe_size = 1,
+ .show_rport_supported_classes = 1,
+ .show_host_fabric_name = 1,
+ .show_starget_node_name = 1,
+ .show_starget_port_name = 1,
+ .show_starget_port_id = 1,
+ .show_rport_dev_loss_tmo = 1,
+ .set_rport_dev_loss_tmo = fnic_set_rport_dev_loss_tmo,
+ .issue_fc_host_lip = fnic_reset,
+ .get_fc_host_stats = fnic_get_stats,
+ .reset_fc_host_stats = fnic_reset_host_stats,
+ .dd_fcrport_size = sizeof(struct fc_rport_libfc_priv),
+ .terminate_rport_io = fnic_terminate_rport_io,
+ .bsg_request = fc_lport_bsg_request,
+};
+
+static void fnic_get_host_speed(struct Scsi_Host *shost)
+{
+ struct fc_lport *lp = shost_priv(shost);
+ struct fnic *fnic = lport_priv(lp);
+ u32 port_speed = vnic_dev_port_speed(fnic->vdev);
+
+ /* Add in other values as they get defined in fw */
+ switch (port_speed) {
+ case 10000:
+ fc_host_speed(shost) = FC_PORTSPEED_10GBIT;
+ break;
+ default:
+ fc_host_speed(shost) = FC_PORTSPEED_10GBIT;
+ break;
+ }
+}
+
+static struct fc_host_statistics *fnic_get_stats(struct Scsi_Host *host)
+{
+ int ret;
+ struct fc_lport *lp = shost_priv(host);
+ struct fnic *fnic = lport_priv(lp);
+ struct fc_host_statistics *stats = &lp->host_stats;
+ struct vnic_stats *vs;
+ unsigned long flags;
+
+ if (time_before(jiffies, fnic->stats_time + HZ / FNIC_STATS_RATE_LIMIT))
+ return stats;
+ fnic->stats_time = jiffies;
+
+ spin_lock_irqsave(&fnic->fnic_lock, flags);
+ ret = vnic_dev_stats_dump(fnic->vdev, &fnic->stats);
+ spin_unlock_irqrestore(&fnic->fnic_lock, flags);
+
+ if (ret) {
+ FNIC_MAIN_DBG(KERN_DEBUG, fnic->lport->host,
+ "fnic: Get vnic stats failed"
+ " 0x%x", ret);
+ return stats;
+ }
+ vs = fnic->stats;
+ stats->tx_frames = vs->tx.tx_unicast_frames_ok;
+ stats->tx_words = vs->tx.tx_unicast_bytes_ok / 4;
+ stats->rx_frames = vs->rx.rx_unicast_frames_ok;
+ stats->rx_words = vs->rx.rx_unicast_bytes_ok / 4;
+ stats->error_frames = vs->tx.tx_errors + vs->rx.rx_errors;
+ stats->dumped_frames = vs->tx.tx_drops + vs->rx.rx_drop;
+ stats->invalid_crc_count = vs->rx.rx_crc_errors;
+ stats->seconds_since_last_reset =
+ (jiffies - fnic->stats_reset_time) / HZ;
+ stats->fcp_input_megabytes = div_u64(fnic->fcp_input_bytes, 1000000);
+ stats->fcp_output_megabytes = div_u64(fnic->fcp_output_bytes, 1000000);
+
+ return stats;
+}
+
+/*
+ * fnic_dump_fchost_stats
+ * note : dumps fc_statistics into system logs
+ */
+void fnic_dump_fchost_stats(struct Scsi_Host *host,
+ struct fc_host_statistics *stats)
+{
+ FNIC_MAIN_NOTE(KERN_NOTICE, host,
+ "fnic: seconds since last reset = %llu\n",
+ stats->seconds_since_last_reset);
+ FNIC_MAIN_NOTE(KERN_NOTICE, host,
+ "fnic: tx frames = %llu\n",
+ stats->tx_frames);
+ FNIC_MAIN_NOTE(KERN_NOTICE, host,
+ "fnic: tx words = %llu\n",
+ stats->tx_words);
+ FNIC_MAIN_NOTE(KERN_NOTICE, host,
+ "fnic: rx frames = %llu\n",
+ stats->rx_frames);
+ FNIC_MAIN_NOTE(KERN_NOTICE, host,
+ "fnic: rx words = %llu\n",
+ stats->rx_words);
+ FNIC_MAIN_NOTE(KERN_NOTICE, host,
+ "fnic: lip count = %llu\n",
+ stats->lip_count);
+ FNIC_MAIN_NOTE(KERN_NOTICE, host,
+ "fnic: nos count = %llu\n",
+ stats->nos_count);
+ FNIC_MAIN_NOTE(KERN_NOTICE, host,
+ "fnic: error frames = %llu\n",
+ stats->error_frames);
+ FNIC_MAIN_NOTE(KERN_NOTICE, host,
+ "fnic: dumped frames = %llu\n",
+ stats->dumped_frames);
+ FNIC_MAIN_NOTE(KERN_NOTICE, host,
+ "fnic: link failure count = %llu\n",
+ stats->link_failure_count);
+ FNIC_MAIN_NOTE(KERN_NOTICE, host,
+ "fnic: loss of sync count = %llu\n",
+ stats->loss_of_sync_count);
+ FNIC_MAIN_NOTE(KERN_NOTICE, host,
+ "fnic: loss of signal count = %llu\n",
+ stats->loss_of_signal_count);
+ FNIC_MAIN_NOTE(KERN_NOTICE, host,
+ "fnic: prim seq protocol err count = %llu\n",
+ stats->prim_seq_protocol_err_count);
+ FNIC_MAIN_NOTE(KERN_NOTICE, host,
+ "fnic: invalid tx word count= %llu\n",
+ stats->invalid_tx_word_count);
+ FNIC_MAIN_NOTE(KERN_NOTICE, host,
+ "fnic: invalid crc count = %llu\n",
+ stats->invalid_crc_count);
+ FNIC_MAIN_NOTE(KERN_NOTICE, host,
+ "fnic: fcp input requests = %llu\n",
+ stats->fcp_input_requests);
+ FNIC_MAIN_NOTE(KERN_NOTICE, host,
+ "fnic: fcp output requests = %llu\n",
+ stats->fcp_output_requests);
+ FNIC_MAIN_NOTE(KERN_NOTICE, host,
+ "fnic: fcp control requests = %llu\n",
+ stats->fcp_control_requests);
+ FNIC_MAIN_NOTE(KERN_NOTICE, host,
+ "fnic: fcp input megabytes = %llu\n",
+ stats->fcp_input_megabytes);
+ FNIC_MAIN_NOTE(KERN_NOTICE, host,
+ "fnic: fcp output megabytes = %llu\n",
+ stats->fcp_output_megabytes);
+ return;
+}
+
+/*
+ * fnic_reset_host_stats : clears host stats
+ * note : called when reset_statistics set under sysfs dir
+ */
+static void fnic_reset_host_stats(struct Scsi_Host *host)
+{
+ int ret;
+ struct fc_lport *lp = shost_priv(host);
+ struct fnic *fnic = lport_priv(lp);
+ struct fc_host_statistics *stats;
+ unsigned long flags;
+
+ /* dump current stats, before clearing them */
+ stats = fnic_get_stats(host);
+ fnic_dump_fchost_stats(host, stats);
+
+ spin_lock_irqsave(&fnic->fnic_lock, flags);
+ ret = vnic_dev_stats_clear(fnic->vdev);
+ spin_unlock_irqrestore(&fnic->fnic_lock, flags);
+
+ if (ret) {
+ FNIC_MAIN_DBG(KERN_DEBUG, fnic->lport->host,
+ "fnic: Reset vnic stats failed"
+ " 0x%x", ret);
+ return;
+ }
+ fnic->stats_reset_time = jiffies;
+ memset(stats, 0, sizeof(*stats));
+
+ return;
+}
+
+void fnic_log_q_error(struct fnic *fnic)
+{
+ unsigned int i;
+ u32 error_status;
+
+ for (i = 0; i < fnic->raw_wq_count; i++) {
+ error_status = ioread32(&fnic->wq[i].ctrl->error_status);
+ if (error_status)
+ shost_printk(KERN_ERR, fnic->lport->host,
+ "WQ[%d] error_status"
+ " %d\n", i, error_status);
+ }
+
+ for (i = 0; i < fnic->rq_count; i++) {
+ error_status = ioread32(&fnic->rq[i].ctrl->error_status);
+ if (error_status)
+ shost_printk(KERN_ERR, fnic->lport->host,
+ "RQ[%d] error_status"
+ " %d\n", i, error_status);
+ }
+
+ for (i = 0; i < fnic->wq_copy_count; i++) {
+ error_status = ioread32(&fnic->wq_copy[i].ctrl->error_status);
+ if (error_status)
+ shost_printk(KERN_ERR, fnic->lport->host,
+ "CWQ[%d] error_status"
+ " %d\n", i, error_status);
+ }
+}
+
+void fnic_handle_link_event(struct fnic *fnic)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&fnic->fnic_lock, flags);
+ if (fnic->stop_rx_link_events) {
+ spin_unlock_irqrestore(&fnic->fnic_lock, flags);
+ return;
+ }
+ spin_unlock_irqrestore(&fnic->fnic_lock, flags);
+
+ queue_work(fnic_event_queue, &fnic->link_work);
+
+}
+
+static int fnic_notify_set(struct fnic *fnic)
+{
+ int err;
+
+ switch (vnic_dev_get_intr_mode(fnic->vdev)) {
+ case VNIC_DEV_INTR_MODE_INTX:
+ err = vnic_dev_notify_set(fnic->vdev, FNIC_INTX_NOTIFY);
+ break;
+ case VNIC_DEV_INTR_MODE_MSI:
+ err = vnic_dev_notify_set(fnic->vdev, -1);
+ break;
+ case VNIC_DEV_INTR_MODE_MSIX:
+ err = vnic_dev_notify_set(fnic->vdev, FNIC_MSIX_ERR_NOTIFY);
+ break;
+ default:
+ shost_printk(KERN_ERR, fnic->lport->host,
+ "Interrupt mode should be set up"
+ " before devcmd notify set %d\n",
+ vnic_dev_get_intr_mode(fnic->vdev));
+ err = -1;
+ break;
+ }
+
+ return err;
+}
+
+static void fnic_notify_timer(unsigned long data)
+{
+ struct fnic *fnic = (struct fnic *)data;
+
+ fnic_handle_link_event(fnic);
+ mod_timer(&fnic->notify_timer,
+ round_jiffies(jiffies + FNIC_NOTIFY_TIMER_PERIOD));
+}
+
+static void fnic_fip_notify_timer(unsigned long data)
+{
+ struct fnic *fnic = (struct fnic *)data;
+
+ fnic_handle_fip_timer(fnic);
+}
+
+static void fnic_notify_timer_start(struct fnic *fnic)
+{
+ switch (vnic_dev_get_intr_mode(fnic->vdev)) {
+ case VNIC_DEV_INTR_MODE_MSI:
+ /*
+ * Schedule first timeout immediately. The driver is
+ * initiatialized and ready to look for link up notification
+ */
+ mod_timer(&fnic->notify_timer, jiffies);
+ break;
+ default:
+ /* Using intr for notification for INTx/MSI-X */
+ break;
+ };
+}
+
+static int fnic_dev_wait(struct vnic_dev *vdev,
+ int (*start)(struct vnic_dev *, int),
+ int (*finished)(struct vnic_dev *, int *),
+ int arg)
+{
+ unsigned long time;
+ int done;
+ int err;
+ int count;
+
+ count = 0;
+
+ err = start(vdev, arg);
+ if (err)
+ return err;
+
+ /* Wait for func to complete.
+ * Sometime schedule_timeout_uninterruptible take long time
+ * to wake up so we do not retry as we are only waiting for
+ * 2 seconds in while loop. By adding count, we make sure
+ * we try atleast three times before returning -ETIMEDOUT
+ */
+ time = jiffies + (HZ * 2);
+ do {
+ err = finished(vdev, &done);
+ count++;
+ if (err)
+ return err;
+ if (done)
+ return 0;
+ schedule_timeout_uninterruptible(HZ / 10);
+ } while (time_after(time, jiffies) || (count < 3));
+
+ return -ETIMEDOUT;
+}
+
+static int fnic_cleanup(struct fnic *fnic)
+{
+ unsigned int i;
+ int err;
+
+ vnic_dev_disable(fnic->vdev);
+ for (i = 0; i < fnic->intr_count; i++)
+ vnic_intr_mask(&fnic->intr[i]);
+
+ for (i = 0; i < fnic->rq_count; i++) {
+ err = vnic_rq_disable(&fnic->rq[i]);
+ if (err)
+ return err;
+ }
+ for (i = 0; i < fnic->raw_wq_count; i++) {
+ err = vnic_wq_disable(&fnic->wq[i]);
+ if (err)
+ return err;
+ }
+ for (i = 0; i < fnic->wq_copy_count; i++) {
+ err = vnic_wq_copy_disable(&fnic->wq_copy[i]);
+ if (err)
+ return err;
+ }
+
+ /* Clean up completed IOs and FCS frames */
+ fnic_wq_copy_cmpl_handler(fnic, -1);
+ fnic_wq_cmpl_handler(fnic, -1);
+ fnic_rq_cmpl_handler(fnic, -1);
+
+ /* Clean up the IOs and FCS frames that have not completed */
+ for (i = 0; i < fnic->raw_wq_count; i++)
+ vnic_wq_clean(&fnic->wq[i], fnic_free_wq_buf);
+ for (i = 0; i < fnic->rq_count; i++)
+ vnic_rq_clean(&fnic->rq[i], fnic_free_rq_buf);
+ for (i = 0; i < fnic->wq_copy_count; i++)
+ vnic_wq_copy_clean(&fnic->wq_copy[i],
+ fnic_wq_copy_cleanup_handler);
+
+ for (i = 0; i < fnic->cq_count; i++)
+ vnic_cq_clean(&fnic->cq[i]);
+ for (i = 0; i < fnic->intr_count; i++)
+ vnic_intr_clean(&fnic->intr[i]);
+
+ mempool_destroy(fnic->io_req_pool);
+ for (i = 0; i < FNIC_SGL_NUM_CACHES; i++)
+ mempool_destroy(fnic->io_sgl_pool[i]);
+
+ return 0;
+}
+
+static void fnic_iounmap(struct fnic *fnic)
+{
+ if (fnic->bar0.vaddr)
+ iounmap(fnic->bar0.vaddr);
+}
+
+/**
+ * fnic_get_mac() - get assigned data MAC address for FIP code.
+ * @lport: local port.
+ */
+static u8 *fnic_get_mac(struct fc_lport *lport)
+{
+ struct fnic *fnic = lport_priv(lport);
+
+ return fnic->data_src_addr;
+}
+
+static void fnic_set_vlan(struct fnic *fnic, u16 vlan_id)
+{
+ u16 old_vlan;
+ old_vlan = vnic_dev_set_default_vlan(fnic->vdev, vlan_id);
+}
+
+static int fnic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
+{
+ struct Scsi_Host *host;
+ struct fc_lport *lp;
+ struct fnic *fnic;
+ mempool_t *pool;
+ int err;
+ int i;
+ unsigned long flags;
+
+ /*
+ * Allocate SCSI Host and set up association between host,
+ * local port, and fnic
+ */
+ lp = libfc_host_alloc(&fnic_host_template, sizeof(struct fnic));
+ if (!lp) {
+ printk(KERN_ERR PFX "Unable to alloc libfc local port\n");
+ err = -ENOMEM;
+ goto err_out;
+ }
+ host = lp->host;
+ fnic = lport_priv(lp);
+ fnic->lport = lp;
+ fnic->ctlr.lp = lp;
+
+ snprintf(fnic->name, sizeof(fnic->name) - 1, "%s%d", DRV_NAME,
+ host->host_no);
+
+ host->transportt = fnic_fc_transport;
+
+ err = fnic_stats_debugfs_init(fnic);
+ if (err) {
+ shost_printk(KERN_ERR, fnic->lport->host,
+ "Failed to initialize debugfs for stats\n");
+ fnic_stats_debugfs_remove(fnic);
+ }
+
+ /* Setup PCI resources */
+ pci_set_drvdata(pdev, fnic);
+
+ fnic->pdev = pdev;
+
+ err = pci_enable_device(pdev);
+ if (err) {
+ shost_printk(KERN_ERR, fnic->lport->host,
+ "Cannot enable PCI device, aborting.\n");
+ goto err_out_free_hba;
+ }
+
+ err = pci_request_regions(pdev, DRV_NAME);
+ if (err) {
+ shost_printk(KERN_ERR, fnic->lport->host,
+ "Cannot enable PCI resources, aborting\n");
+ goto err_out_disable_device;
+ }
+
+ pci_set_master(pdev);
+
+ /* Query PCI controller on system for DMA addressing
+ * limitation for the device. Try 64-bit first, and
+ * fail to 32-bit.
+ */
+ err = pci_set_dma_mask(pdev, DMA_BIT_MASK(64));
+ if (err) {
+ err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
+ if (err) {
+ shost_printk(KERN_ERR, fnic->lport->host,
+ "No usable DMA configuration "
+ "aborting\n");
+ goto err_out_release_regions;
+ }
+ err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
+ if (err) {
+ shost_printk(KERN_ERR, fnic->lport->host,
+ "Unable to obtain 32-bit DMA "
+ "for consistent allocations, aborting.\n");
+ goto err_out_release_regions;
+ }
+ } else {
+ err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
+ if (err) {
+ shost_printk(KERN_ERR, fnic->lport->host,
+ "Unable to obtain 64-bit DMA "
+ "for consistent allocations, aborting.\n");
+ goto err_out_release_regions;
+ }
+ }
+
+ /* Map vNIC resources from BAR0 */
+ if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
+ shost_printk(KERN_ERR, fnic->lport->host,
+ "BAR0 not memory-map'able, aborting.\n");
+ err = -ENODEV;
+ goto err_out_release_regions;
+ }
+
+ fnic->bar0.vaddr = pci_iomap(pdev, 0, 0);
+ fnic->bar0.bus_addr = pci_resource_start(pdev, 0);
+ fnic->bar0.len = pci_resource_len(pdev, 0);
+
+ if (!fnic->bar0.vaddr) {
+ shost_printk(KERN_ERR, fnic->lport->host,
+ "Cannot memory-map BAR0 res hdr, "
+ "aborting.\n");
+ err = -ENODEV;
+ goto err_out_release_regions;
+ }
+
+ fnic->vdev = vnic_dev_register(NULL, fnic, pdev, &fnic->bar0);
+ if (!fnic->vdev) {
+ shost_printk(KERN_ERR, fnic->lport->host,
+ "vNIC registration failed, "
+ "aborting.\n");
+ err = -ENODEV;
+ goto err_out_iounmap;
+ }
+
+ err = fnic_dev_wait(fnic->vdev, vnic_dev_open,
+ vnic_dev_open_done, 0);
+ if (err) {
+ shost_printk(KERN_ERR, fnic->lport->host,
+ "vNIC dev open failed, aborting.\n");
+ goto err_out_vnic_unregister;
+ }
+
+ err = vnic_dev_init(fnic->vdev, 0);
+ if (err) {
+ shost_printk(KERN_ERR, fnic->lport->host,
+ "vNIC dev init failed, aborting.\n");
+ goto err_out_dev_close;
+ }
+
+ err = vnic_dev_mac_addr(fnic->vdev, fnic->ctlr.ctl_src_addr);
+ if (err) {
+ shost_printk(KERN_ERR, fnic->lport->host,
+ "vNIC get MAC addr failed \n");
+ goto err_out_dev_close;
+ }
+ /* set data_src for point-to-point mode and to keep it non-zero */
+ memcpy(fnic->data_src_addr, fnic->ctlr.ctl_src_addr, ETH_ALEN);
+
+ /* Get vNIC configuration */
+ err = fnic_get_vnic_config(fnic);
+ if (err) {
+ shost_printk(KERN_ERR, fnic->lport->host,
+ "Get vNIC configuration failed, "
+ "aborting.\n");
+ goto err_out_dev_close;
+ }
+
+ /* Configure Maximum Outstanding IO reqs*/
+ if (fnic->config.io_throttle_count != FNIC_UCSM_DFLT_THROTTLE_CNT_BLD) {
+ host->can_queue = min_t(u32, FNIC_MAX_IO_REQ,
+ max_t(u32, FNIC_MIN_IO_REQ,
+ fnic->config.io_throttle_count));
+ }
+ fnic->fnic_max_tag_id = host->can_queue;
+
+ err = scsi_init_shared_tag_map(host, fnic->fnic_max_tag_id);
+ if (err) {
+ shost_printk(KERN_ERR, fnic->lport->host,
+ "Unable to alloc shared tag map\n");
+ goto err_out_dev_close;
+ }
+
+ host->max_lun = fnic->config.luns_per_tgt;
+ host->max_id = FNIC_MAX_FCP_TARGET;
+ host->max_cmd_len = FCOE_MAX_CMD_LEN;
+
+ fnic_get_res_counts(fnic);
+
+ err = fnic_set_intr_mode(fnic);
+ if (err) {
+ shost_printk(KERN_ERR, fnic->lport->host,
+ "Failed to set intr mode, "
+ "aborting.\n");
+ goto err_out_dev_close;
+ }
+
+ err = fnic_alloc_vnic_resources(fnic);
+ if (err) {
+ shost_printk(KERN_ERR, fnic->lport->host,
+ "Failed to alloc vNIC resources, "
+ "aborting.\n");
+ goto err_out_clear_intr;
+ }
+
+
+ /* initialize all fnic locks */
+ spin_lock_init(&fnic->fnic_lock);
+
+ for (i = 0; i < FNIC_WQ_MAX; i++)
+ spin_lock_init(&fnic->wq_lock[i]);
+
+ for (i = 0; i < FNIC_WQ_COPY_MAX; i++) {
+ spin_lock_init(&fnic->wq_copy_lock[i]);
+ fnic->wq_copy_desc_low[i] = DESC_CLEAN_LOW_WATERMARK;
+ fnic->fw_ack_recd[i] = 0;
+ fnic->fw_ack_index[i] = -1;
+ }
+
+ for (i = 0; i < FNIC_IO_LOCKS; i++)
+ spin_lock_init(&fnic->io_req_lock[i]);
+
+ fnic->io_req_pool = mempool_create_slab_pool(2, fnic_io_req_cache);
+ if (!fnic->io_req_pool)
+ goto err_out_free_resources;
+
+ pool = mempool_create_slab_pool(2, fnic_sgl_cache[FNIC_SGL_CACHE_DFLT]);
+ if (!pool)
+ goto err_out_free_ioreq_pool;
+ fnic->io_sgl_pool[FNIC_SGL_CACHE_DFLT] = pool;
+
+ pool = mempool_create_slab_pool(2, fnic_sgl_cache[FNIC_SGL_CACHE_MAX]);
+ if (!pool)
+ goto err_out_free_dflt_pool;
+ fnic->io_sgl_pool[FNIC_SGL_CACHE_MAX] = pool;
+
+ /* setup vlan config, hw inserts vlan header */
+ fnic->vlan_hw_insert = 1;
+ fnic->vlan_id = 0;
+
+ /* Initialize the FIP fcoe_ctrl struct */
+ fnic->ctlr.send = fnic_eth_send;
+ fnic->ctlr.update_mac = fnic_update_mac;
+ fnic->ctlr.get_src_addr = fnic_get_mac;
+ if (fnic->config.flags & VFCF_FIP_CAPABLE) {
+ shost_printk(KERN_INFO, fnic->lport->host,
+ "firmware supports FIP\n");
+ /* enable directed and multicast */
+ vnic_dev_packet_filter(fnic->vdev, 1, 1, 0, 0, 0);
+ vnic_dev_add_addr(fnic->vdev, FIP_ALL_ENODE_MACS);
+ vnic_dev_add_addr(fnic->vdev, fnic->ctlr.ctl_src_addr);
+ fnic->set_vlan = fnic_set_vlan;
+ fcoe_ctlr_init(&fnic->ctlr, FIP_MODE_AUTO);
+ setup_timer(&fnic->fip_timer, fnic_fip_notify_timer,
+ (unsigned long)fnic);
+ spin_lock_init(&fnic->vlans_lock);
+ INIT_WORK(&fnic->fip_frame_work, fnic_handle_fip_frame);
+ INIT_WORK(&fnic->event_work, fnic_handle_event);
+ skb_queue_head_init(&fnic->fip_frame_queue);
+ INIT_LIST_HEAD(&fnic->evlist);
+ INIT_LIST_HEAD(&fnic->vlans);
+ } else {
+ shost_printk(KERN_INFO, fnic->lport->host,
+ "firmware uses non-FIP mode\n");
+ fcoe_ctlr_init(&fnic->ctlr, FIP_MODE_NON_FIP);
+ fnic->ctlr.state = FIP_ST_NON_FIP;
+ }
+ fnic->state = FNIC_IN_FC_MODE;
+
+ atomic_set(&fnic->in_flight, 0);
+ fnic->state_flags = FNIC_FLAGS_NONE;
+
+ /* Enable hardware stripping of vlan header on ingress */
+ fnic_set_nic_config(fnic, 0, 0, 0, 0, 0, 0, 1);
+
+ /* Setup notification buffer area */
+ err = fnic_notify_set(fnic);
+ if (err) {
+ shost_printk(KERN_ERR, fnic->lport->host,
+ "Failed to alloc notify buffer, aborting.\n");
+ goto err_out_free_max_pool;
+ }
+
+ /* Setup notify timer when using MSI interrupts */
+ if (vnic_dev_get_intr_mode(fnic->vdev) == VNIC_DEV_INTR_MODE_MSI)
+ setup_timer(&fnic->notify_timer,
+ fnic_notify_timer, (unsigned long)fnic);
+
+ /* allocate RQ buffers and post them to RQ*/
+ for (i = 0; i < fnic->rq_count; i++) {
+ err = vnic_rq_fill(&fnic->rq[i], fnic_alloc_rq_frame);
+ if (err) {
+ shost_printk(KERN_ERR, fnic->lport->host,
+ "fnic_alloc_rq_frame can't alloc "
+ "frame\n");
+ goto err_out_free_rq_buf;
+ }
+ }
+
+ /*
+ * Initialization done with PCI system, hardware, firmware.
+ * Add host to SCSI
+ */
+ err = scsi_add_host(lp->host, &pdev->dev);
+ if (err) {
+ shost_printk(KERN_ERR, fnic->lport->host,
+ "fnic: scsi_add_host failed...exiting\n");
+ goto err_out_free_rq_buf;
+ }
+
+ /* Start local port initiatialization */
+
+ lp->link_up = 0;
+
+ lp->max_retry_count = fnic->config.flogi_retries;
+ lp->max_rport_retry_count = fnic->config.plogi_retries;
+ lp->service_params = (FCP_SPPF_INIT_FCN | FCP_SPPF_RD_XRDY_DIS |
+ FCP_SPPF_CONF_COMPL);
+ if (fnic->config.flags & VFCF_FCP_SEQ_LVL_ERR)
+ lp->service_params |= FCP_SPPF_RETRY;
+
+ lp->boot_time = jiffies;
+ lp->e_d_tov = fnic->config.ed_tov;
+ lp->r_a_tov = fnic->config.ra_tov;
+ lp->link_supported_speeds = FC_PORTSPEED_10GBIT;
+ fc_set_wwnn(lp, fnic->config.node_wwn);
+ fc_set_wwpn(lp, fnic->config.port_wwn);
+
+ fcoe_libfc_config(lp, &fnic->ctlr, &fnic_transport_template, 0);
+
+ if (!fc_exch_mgr_alloc(lp, FC_CLASS_3, FCPIO_HOST_EXCH_RANGE_START,
+ FCPIO_HOST_EXCH_RANGE_END, NULL)) {
+ err = -ENOMEM;
+ goto err_out_remove_scsi_host;
+ }
+
+ fc_lport_init_stats(lp);
+ fnic->stats_reset_time = jiffies;
+
+ fc_lport_config(lp);
+
+ if (fc_set_mfs(lp, fnic->config.maxdatafieldsize +
+ sizeof(struct fc_frame_header))) {
+ err = -EINVAL;
+ goto err_out_free_exch_mgr;
+ }
+ fc_host_maxframe_size(lp->host) = lp->mfs;
+ fc_host_dev_loss_tmo(lp->host) = fnic->config.port_down_timeout / 1000;
+
+ sprintf(fc_host_symbolic_name(lp->host),
+ DRV_NAME " v" DRV_VERSION " over %s", fnic->name);
+
+ spin_lock_irqsave(&fnic_list_lock, flags);
+ list_add_tail(&fnic->list, &fnic_list);
+ spin_unlock_irqrestore(&fnic_list_lock, flags);
+
+ INIT_WORK(&fnic->link_work, fnic_handle_link);
+ INIT_WORK(&fnic->frame_work, fnic_handle_frame);
+ skb_queue_head_init(&fnic->frame_queue);
+ skb_queue_head_init(&fnic->tx_queue);
+
+ /* Enable all queues */
+ for (i = 0; i < fnic->raw_wq_count; i++)
+ vnic_wq_enable(&fnic->wq[i]);
+ for (i = 0; i < fnic->rq_count; i++)
+ vnic_rq_enable(&fnic->rq[i]);
+ for (i = 0; i < fnic->wq_copy_count; i++)
+ vnic_wq_copy_enable(&fnic->wq_copy[i]);
+
+ fc_fabric_login(lp);
+
+ vnic_dev_enable(fnic->vdev);
+
+ err = fnic_request_intr(fnic);
+ if (err) {
+ shost_printk(KERN_ERR, fnic->lport->host,
+ "Unable to request irq.\n");
+ goto err_out_free_exch_mgr;
+ }
+
+ for (i = 0; i < fnic->intr_count; i++)
+ vnic_intr_unmask(&fnic->intr[i]);
+
+ fnic_notify_timer_start(fnic);
+
+ return 0;
+
+err_out_free_exch_mgr:
+ fc_exch_mgr_free(lp);
+err_out_remove_scsi_host:
+ fc_remove_host(lp->host);
+ scsi_remove_host(lp->host);
+err_out_free_rq_buf:
+ for (i = 0; i < fnic->rq_count; i++)
+ vnic_rq_clean(&fnic->rq[i], fnic_free_rq_buf);
+ vnic_dev_notify_unset(fnic->vdev);
+err_out_free_max_pool:
+ mempool_destroy(fnic->io_sgl_pool[FNIC_SGL_CACHE_MAX]);
+err_out_free_dflt_pool:
+ mempool_destroy(fnic->io_sgl_pool[FNIC_SGL_CACHE_DFLT]);
+err_out_free_ioreq_pool:
+ mempool_destroy(fnic->io_req_pool);
+err_out_free_resources:
+ fnic_free_vnic_resources(fnic);
+err_out_clear_intr:
+ fnic_clear_intr_mode(fnic);
+err_out_dev_close:
+ vnic_dev_close(fnic->vdev);
+err_out_vnic_unregister:
+ vnic_dev_unregister(fnic->vdev);
+err_out_iounmap:
+ fnic_iounmap(fnic);
+err_out_release_regions:
+ pci_release_regions(pdev);
+err_out_disable_device:
+ pci_disable_device(pdev);
+err_out_free_hba:
+ fnic_stats_debugfs_remove(fnic);
+ scsi_host_put(lp->host);
+err_out:
+ return err;
+}
+
+static void fnic_remove(struct pci_dev *pdev)
+{
+ struct fnic *fnic = pci_get_drvdata(pdev);
+ struct fc_lport *lp = fnic->lport;
+ unsigned long flags;
+
+ /*
+ * Mark state so that the workqueue thread stops forwarding
+ * received frames and link events to the local port. ISR and
+ * other threads that can queue work items will also stop
+ * creating work items on the fnic workqueue
+ */
+ spin_lock_irqsave(&fnic->fnic_lock, flags);
+ fnic->stop_rx_link_events = 1;
+ spin_unlock_irqrestore(&fnic->fnic_lock, flags);
+
+ if (vnic_dev_get_intr_mode(fnic->vdev) == VNIC_DEV_INTR_MODE_MSI)
+ del_timer_sync(&fnic->notify_timer);
+
+ /*
+ * Flush the fnic event queue. After this call, there should
+ * be no event queued for this fnic device in the workqueue
+ */
+ flush_workqueue(fnic_event_queue);
+ skb_queue_purge(&fnic->frame_queue);
+ skb_queue_purge(&fnic->tx_queue);
+
+ if (fnic->config.flags & VFCF_FIP_CAPABLE) {
+ del_timer_sync(&fnic->fip_timer);
+ skb_queue_purge(&fnic->fip_frame_queue);
+ fnic_fcoe_reset_vlans(fnic);
+ fnic_fcoe_evlist_free(fnic);
+ }
+
+ /*
+ * Log off the fabric. This stops all remote ports, dns port,
+ * logs off the fabric. This flushes all rport, disc, lport work
+ * before returning
+ */
+ fc_fabric_logoff(fnic->lport);
+
+ spin_lock_irqsave(&fnic->fnic_lock, flags);
+ fnic->in_remove = 1;
+ spin_unlock_irqrestore(&fnic->fnic_lock, flags);
+
+ fcoe_ctlr_destroy(&fnic->ctlr);
+ fc_lport_destroy(lp);
+ fnic_stats_debugfs_remove(fnic);
+
+ /*
+ * This stops the fnic device, masks all interrupts. Completed
+ * CQ entries are drained. Posted WQ/RQ/Copy-WQ entries are
+ * cleaned up
+ */
+ fnic_cleanup(fnic);
+
+ BUG_ON(!skb_queue_empty(&fnic->frame_queue));
+ BUG_ON(!skb_queue_empty(&fnic->tx_queue));
+
+ spin_lock_irqsave(&fnic_list_lock, flags);
+ list_del(&fnic->list);
+ spin_unlock_irqrestore(&fnic_list_lock, flags);
+
+ fc_remove_host(fnic->lport->host);
+ scsi_remove_host(fnic->lport->host);
+ fc_exch_mgr_free(fnic->lport);
+ vnic_dev_notify_unset(fnic->vdev);
+ fnic_free_intr(fnic);
+ fnic_free_vnic_resources(fnic);
+ fnic_clear_intr_mode(fnic);
+ vnic_dev_close(fnic->vdev);
+ vnic_dev_unregister(fnic->vdev);
+ fnic_iounmap(fnic);
+ pci_release_regions(pdev);
+ pci_disable_device(pdev);
+ scsi_host_put(lp->host);
+}
+
+static struct pci_driver fnic_driver = {
+ .name = DRV_NAME,
+ .id_table = fnic_id_table,
+ .probe = fnic_probe,
+ .remove = fnic_remove,
+};
+
+static int __init fnic_init_module(void)
+{
+ size_t len;
+ int err = 0;
+
+ printk(KERN_INFO PFX "%s, ver %s\n", DRV_DESCRIPTION, DRV_VERSION);
+
+ /* Create debugfs entries for fnic */
+ err = fnic_debugfs_init();
+ if (err < 0) {
+ printk(KERN_ERR PFX "Failed to create fnic directory "
+ "for tracing and stats logging\n");
+ fnic_debugfs_terminate();
+ }
+
+ /* Allocate memory for trace buffer */
+ err = fnic_trace_buf_init();
+ if (err < 0) {
+ printk(KERN_ERR PFX
+ "Trace buffer initialization Failed. "
+ "Fnic Tracing utility is disabled\n");
+ fnic_trace_free();
+ }
+
+ /* Allocate memory for fc trace buffer */
+ err = fnic_fc_trace_init();
+ if (err < 0) {
+ printk(KERN_ERR PFX "FC trace buffer initialization Failed "
+ "FC frame tracing utility is disabled\n");
+ fnic_fc_trace_free();
+ }
+
+ /* Create a cache for allocation of default size sgls */
+ len = sizeof(struct fnic_dflt_sgl_list);
+ fnic_sgl_cache[FNIC_SGL_CACHE_DFLT] = kmem_cache_create
+ ("fnic_sgl_dflt", len + FNIC_SG_DESC_ALIGN, FNIC_SG_DESC_ALIGN,
+ SLAB_HWCACHE_ALIGN,
+ NULL);
+ if (!fnic_sgl_cache[FNIC_SGL_CACHE_DFLT]) {
+ printk(KERN_ERR PFX "failed to create fnic dflt sgl slab\n");
+ err = -ENOMEM;
+ goto err_create_fnic_sgl_slab_dflt;
+ }
+
+ /* Create a cache for allocation of max size sgls*/
+ len = sizeof(struct fnic_sgl_list);
+ fnic_sgl_cache[FNIC_SGL_CACHE_MAX] = kmem_cache_create
+ ("fnic_sgl_max", len + FNIC_SG_DESC_ALIGN, FNIC_SG_DESC_ALIGN,
+ SLAB_HWCACHE_ALIGN,
+ NULL);
+ if (!fnic_sgl_cache[FNIC_SGL_CACHE_MAX]) {
+ printk(KERN_ERR PFX "failed to create fnic max sgl slab\n");
+ err = -ENOMEM;
+ goto err_create_fnic_sgl_slab_max;
+ }
+
+ /* Create a cache of io_req structs for use via mempool */
+ fnic_io_req_cache = kmem_cache_create("fnic_io_req",
+ sizeof(struct fnic_io_req),
+ 0, SLAB_HWCACHE_ALIGN, NULL);
+ if (!fnic_io_req_cache) {
+ printk(KERN_ERR PFX "failed to create fnic io_req slab\n");
+ err = -ENOMEM;
+ goto err_create_fnic_ioreq_slab;
+ }
+
+ fnic_event_queue = create_singlethread_workqueue("fnic_event_wq");
+ if (!fnic_event_queue) {
+ printk(KERN_ERR PFX "fnic work queue create failed\n");
+ err = -ENOMEM;
+ goto err_create_fnic_workq;
+ }
+
+ spin_lock_init(&fnic_list_lock);
+ INIT_LIST_HEAD(&fnic_list);
+
+ fnic_fip_queue = create_singlethread_workqueue("fnic_fip_q");
+ if (!fnic_fip_queue) {
+ printk(KERN_ERR PFX "fnic FIP work queue create failed\n");
+ err = -ENOMEM;
+ goto err_create_fip_workq;
+ }
+
+ fnic_fc_transport = fc_attach_transport(&fnic_fc_functions);
+ if (!fnic_fc_transport) {
+ printk(KERN_ERR PFX "fc_attach_transport error\n");
+ err = -ENOMEM;
+ goto err_fc_transport;
+ }
+
+ /* register the driver with PCI system */
+ err = pci_register_driver(&fnic_driver);
+ if (err < 0) {
+ printk(KERN_ERR PFX "pci register error\n");
+ goto err_pci_register;
+ }
+ return err;
+
+err_pci_register:
+ fc_release_transport(fnic_fc_transport);
+err_fc_transport:
+ destroy_workqueue(fnic_fip_queue);
+err_create_fip_workq:
+ destroy_workqueue(fnic_event_queue);
+err_create_fnic_workq:
+ kmem_cache_destroy(fnic_io_req_cache);
+err_create_fnic_ioreq_slab:
+ kmem_cache_destroy(fnic_sgl_cache[FNIC_SGL_CACHE_MAX]);
+err_create_fnic_sgl_slab_max:
+ kmem_cache_destroy(fnic_sgl_cache[FNIC_SGL_CACHE_DFLT]);
+err_create_fnic_sgl_slab_dflt:
+ fnic_trace_free();
+ fnic_fc_trace_free();
+ fnic_debugfs_terminate();
+ return err;
+}
+
+static void __exit fnic_cleanup_module(void)
+{
+ pci_unregister_driver(&fnic_driver);
+ destroy_workqueue(fnic_event_queue);
+ if (fnic_fip_queue) {
+ flush_workqueue(fnic_fip_queue);
+ destroy_workqueue(fnic_fip_queue);
+ }
+ kmem_cache_destroy(fnic_sgl_cache[FNIC_SGL_CACHE_MAX]);
+ kmem_cache_destroy(fnic_sgl_cache[FNIC_SGL_CACHE_DFLT]);
+ kmem_cache_destroy(fnic_io_req_cache);
+ fc_release_transport(fnic_fc_transport);
+ fnic_trace_free();
+ fnic_fc_trace_free();
+ fnic_debugfs_terminate();
+}
+
+module_init(fnic_init_module);
+module_exit(fnic_cleanup_module);
+
diff --git a/drivers/scsi/fnic/fnic_res.c b/drivers/scsi/fnic/fnic_res.c
new file mode 100644
index 000000000..50488f8e1
--- /dev/null
+++ b/drivers/scsi/fnic/fnic_res.c
@@ -0,0 +1,443 @@
+/*
+ * Copyright 2008 Cisco Systems, Inc. All rights reserved.
+ * Copyright 2007 Nuova Systems, Inc. All rights reserved.
+ *
+ * This program is free software; you may redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#include <linux/errno.h>
+#include <linux/types.h>
+#include <linux/pci.h>
+#include "wq_enet_desc.h"
+#include "rq_enet_desc.h"
+#include "cq_enet_desc.h"
+#include "vnic_resource.h"
+#include "vnic_dev.h"
+#include "vnic_wq.h"
+#include "vnic_rq.h"
+#include "vnic_cq.h"
+#include "vnic_intr.h"
+#include "vnic_stats.h"
+#include "vnic_nic.h"
+#include "fnic.h"
+
+int fnic_get_vnic_config(struct fnic *fnic)
+{
+ struct vnic_fc_config *c = &fnic->config;
+ int err;
+
+#define GET_CONFIG(m) \
+ do { \
+ err = vnic_dev_spec(fnic->vdev, \
+ offsetof(struct vnic_fc_config, m), \
+ sizeof(c->m), &c->m); \
+ if (err) { \
+ shost_printk(KERN_ERR, fnic->lport->host, \
+ "Error getting %s, %d\n", #m, \
+ err); \
+ return err; \
+ } \
+ } while (0);
+
+ GET_CONFIG(node_wwn);
+ GET_CONFIG(port_wwn);
+ GET_CONFIG(wq_enet_desc_count);
+ GET_CONFIG(wq_copy_desc_count);
+ GET_CONFIG(rq_desc_count);
+ GET_CONFIG(maxdatafieldsize);
+ GET_CONFIG(ed_tov);
+ GET_CONFIG(ra_tov);
+ GET_CONFIG(intr_timer);
+ GET_CONFIG(intr_timer_type);
+ GET_CONFIG(flags);
+ GET_CONFIG(flogi_retries);
+ GET_CONFIG(flogi_timeout);
+ GET_CONFIG(plogi_retries);
+ GET_CONFIG(plogi_timeout);
+ GET_CONFIG(io_throttle_count);
+ GET_CONFIG(link_down_timeout);
+ GET_CONFIG(port_down_timeout);
+ GET_CONFIG(port_down_io_retries);
+ GET_CONFIG(luns_per_tgt);
+
+ c->wq_enet_desc_count =
+ min_t(u32, VNIC_FNIC_WQ_DESCS_MAX,
+ max_t(u32, VNIC_FNIC_WQ_DESCS_MIN,
+ c->wq_enet_desc_count));
+ c->wq_enet_desc_count = ALIGN(c->wq_enet_desc_count, 16);
+
+ c->wq_copy_desc_count =
+ min_t(u32, VNIC_FNIC_WQ_COPY_DESCS_MAX,
+ max_t(u32, VNIC_FNIC_WQ_COPY_DESCS_MIN,
+ c->wq_copy_desc_count));
+ c->wq_copy_desc_count = ALIGN(c->wq_copy_desc_count, 16);
+
+ c->rq_desc_count =
+ min_t(u32, VNIC_FNIC_RQ_DESCS_MAX,
+ max_t(u32, VNIC_FNIC_RQ_DESCS_MIN,
+ c->rq_desc_count));
+ c->rq_desc_count = ALIGN(c->rq_desc_count, 16);
+
+ c->maxdatafieldsize =
+ min_t(u16, VNIC_FNIC_MAXDATAFIELDSIZE_MAX,
+ max_t(u16, VNIC_FNIC_MAXDATAFIELDSIZE_MIN,
+ c->maxdatafieldsize));
+ c->ed_tov =
+ min_t(u32, VNIC_FNIC_EDTOV_MAX,
+ max_t(u32, VNIC_FNIC_EDTOV_MIN,
+ c->ed_tov));
+
+ c->ra_tov =
+ min_t(u32, VNIC_FNIC_RATOV_MAX,
+ max_t(u32, VNIC_FNIC_RATOV_MIN,
+ c->ra_tov));
+
+ c->flogi_retries =
+ min_t(u32, VNIC_FNIC_FLOGI_RETRIES_MAX, c->flogi_retries);
+
+ c->flogi_timeout =
+ min_t(u32, VNIC_FNIC_FLOGI_TIMEOUT_MAX,
+ max_t(u32, VNIC_FNIC_FLOGI_TIMEOUT_MIN,
+ c->flogi_timeout));
+
+ c->plogi_retries =
+ min_t(u32, VNIC_FNIC_PLOGI_RETRIES_MAX, c->plogi_retries);
+
+ c->plogi_timeout =
+ min_t(u32, VNIC_FNIC_PLOGI_TIMEOUT_MAX,
+ max_t(u32, VNIC_FNIC_PLOGI_TIMEOUT_MIN,
+ c->plogi_timeout));
+
+ c->io_throttle_count =
+ min_t(u32, VNIC_FNIC_IO_THROTTLE_COUNT_MAX,
+ max_t(u32, VNIC_FNIC_IO_THROTTLE_COUNT_MIN,
+ c->io_throttle_count));
+
+ c->link_down_timeout =
+ min_t(u32, VNIC_FNIC_LINK_DOWN_TIMEOUT_MAX,
+ c->link_down_timeout);
+
+ c->port_down_timeout =
+ min_t(u32, VNIC_FNIC_PORT_DOWN_TIMEOUT_MAX,
+ c->port_down_timeout);
+
+ c->port_down_io_retries =
+ min_t(u32, VNIC_FNIC_PORT_DOWN_IO_RETRIES_MAX,
+ c->port_down_io_retries);
+
+ c->luns_per_tgt =
+ min_t(u32, VNIC_FNIC_LUNS_PER_TARGET_MAX,
+ max_t(u32, VNIC_FNIC_LUNS_PER_TARGET_MIN,
+ c->luns_per_tgt));
+
+ c->intr_timer = min_t(u16, VNIC_INTR_TIMER_MAX, c->intr_timer);
+ c->intr_timer_type = c->intr_timer_type;
+
+ shost_printk(KERN_INFO, fnic->lport->host,
+ "vNIC MAC addr %pM "
+ "wq/wq_copy/rq %d/%d/%d\n",
+ fnic->ctlr.ctl_src_addr,
+ c->wq_enet_desc_count, c->wq_copy_desc_count,
+ c->rq_desc_count);
+ shost_printk(KERN_INFO, fnic->lport->host,
+ "vNIC node wwn %llx port wwn %llx\n",
+ c->node_wwn, c->port_wwn);
+ shost_printk(KERN_INFO, fnic->lport->host,
+ "vNIC ed_tov %d ra_tov %d\n",
+ c->ed_tov, c->ra_tov);
+ shost_printk(KERN_INFO, fnic->lport->host,
+ "vNIC mtu %d intr timer %d\n",
+ c->maxdatafieldsize, c->intr_timer);
+ shost_printk(KERN_INFO, fnic->lport->host,
+ "vNIC flags 0x%x luns per tgt %d\n",
+ c->flags, c->luns_per_tgt);
+ shost_printk(KERN_INFO, fnic->lport->host,
+ "vNIC flogi_retries %d flogi timeout %d\n",
+ c->flogi_retries, c->flogi_timeout);
+ shost_printk(KERN_INFO, fnic->lport->host,
+ "vNIC plogi retries %d plogi timeout %d\n",
+ c->plogi_retries, c->plogi_timeout);
+ shost_printk(KERN_INFO, fnic->lport->host,
+ "vNIC io throttle count %d link dn timeout %d\n",
+ c->io_throttle_count, c->link_down_timeout);
+ shost_printk(KERN_INFO, fnic->lport->host,
+ "vNIC port dn io retries %d port dn timeout %d\n",
+ c->port_down_io_retries, c->port_down_timeout);
+
+ return 0;
+}
+
+int fnic_set_nic_config(struct fnic *fnic, u8 rss_default_cpu,
+ u8 rss_hash_type,
+ u8 rss_hash_bits, u8 rss_base_cpu, u8 rss_enable,
+ u8 tso_ipid_split_en, u8 ig_vlan_strip_en)
+{
+ u64 a0, a1;
+ u32 nic_cfg;
+ int wait = 1000;
+
+ vnic_set_nic_cfg(&nic_cfg, rss_default_cpu,
+ rss_hash_type, rss_hash_bits, rss_base_cpu,
+ rss_enable, tso_ipid_split_en, ig_vlan_strip_en);
+
+ a0 = nic_cfg;
+ a1 = 0;
+
+ return vnic_dev_cmd(fnic->vdev, CMD_NIC_CFG, &a0, &a1, wait);
+}
+
+void fnic_get_res_counts(struct fnic *fnic)
+{
+ fnic->wq_count = vnic_dev_get_res_count(fnic->vdev, RES_TYPE_WQ);
+ fnic->raw_wq_count = fnic->wq_count - 1;
+ fnic->wq_copy_count = fnic->wq_count - fnic->raw_wq_count;
+ fnic->rq_count = vnic_dev_get_res_count(fnic->vdev, RES_TYPE_RQ);
+ fnic->cq_count = vnic_dev_get_res_count(fnic->vdev, RES_TYPE_CQ);
+ fnic->intr_count = vnic_dev_get_res_count(fnic->vdev,
+ RES_TYPE_INTR_CTRL);
+}
+
+void fnic_free_vnic_resources(struct fnic *fnic)
+{
+ unsigned int i;
+
+ for (i = 0; i < fnic->raw_wq_count; i++)
+ vnic_wq_free(&fnic->wq[i]);
+
+ for (i = 0; i < fnic->wq_copy_count; i++)
+ vnic_wq_copy_free(&fnic->wq_copy[i]);
+
+ for (i = 0; i < fnic->rq_count; i++)
+ vnic_rq_free(&fnic->rq[i]);
+
+ for (i = 0; i < fnic->cq_count; i++)
+ vnic_cq_free(&fnic->cq[i]);
+
+ for (i = 0; i < fnic->intr_count; i++)
+ vnic_intr_free(&fnic->intr[i]);
+}
+
+int fnic_alloc_vnic_resources(struct fnic *fnic)
+{
+ enum vnic_dev_intr_mode intr_mode;
+ unsigned int mask_on_assertion;
+ unsigned int interrupt_offset;
+ unsigned int error_interrupt_enable;
+ unsigned int error_interrupt_offset;
+ unsigned int i, cq_index;
+ unsigned int wq_copy_cq_desc_count;
+ int err;
+
+ intr_mode = vnic_dev_get_intr_mode(fnic->vdev);
+
+ shost_printk(KERN_INFO, fnic->lport->host, "vNIC interrupt mode: %s\n",
+ intr_mode == VNIC_DEV_INTR_MODE_INTX ? "legacy PCI INTx" :
+ intr_mode == VNIC_DEV_INTR_MODE_MSI ? "MSI" :
+ intr_mode == VNIC_DEV_INTR_MODE_MSIX ?
+ "MSI-X" : "unknown");
+
+ shost_printk(KERN_INFO, fnic->lport->host, "vNIC resources avail: "
+ "wq %d cp_wq %d raw_wq %d rq %d cq %d intr %d\n",
+ fnic->wq_count, fnic->wq_copy_count, fnic->raw_wq_count,
+ fnic->rq_count, fnic->cq_count, fnic->intr_count);
+
+ /* Allocate Raw WQ used for FCS frames */
+ for (i = 0; i < fnic->raw_wq_count; i++) {
+ err = vnic_wq_alloc(fnic->vdev, &fnic->wq[i], i,
+ fnic->config.wq_enet_desc_count,
+ sizeof(struct wq_enet_desc));
+ if (err)
+ goto err_out_cleanup;
+ }
+
+ /* Allocate Copy WQs used for SCSI IOs */
+ for (i = 0; i < fnic->wq_copy_count; i++) {
+ err = vnic_wq_copy_alloc(fnic->vdev, &fnic->wq_copy[i],
+ (fnic->raw_wq_count + i),
+ fnic->config.wq_copy_desc_count,
+ sizeof(struct fcpio_host_req));
+ if (err)
+ goto err_out_cleanup;
+ }
+
+ /* RQ for receiving FCS frames */
+ for (i = 0; i < fnic->rq_count; i++) {
+ err = vnic_rq_alloc(fnic->vdev, &fnic->rq[i], i,
+ fnic->config.rq_desc_count,
+ sizeof(struct rq_enet_desc));
+ if (err)
+ goto err_out_cleanup;
+ }
+
+ /* CQ for each RQ */
+ for (i = 0; i < fnic->rq_count; i++) {
+ cq_index = i;
+ err = vnic_cq_alloc(fnic->vdev,
+ &fnic->cq[cq_index], cq_index,
+ fnic->config.rq_desc_count,
+ sizeof(struct cq_enet_rq_desc));
+ if (err)
+ goto err_out_cleanup;
+ }
+
+ /* CQ for each WQ */
+ for (i = 0; i < fnic->raw_wq_count; i++) {
+ cq_index = fnic->rq_count + i;
+ err = vnic_cq_alloc(fnic->vdev, &fnic->cq[cq_index], cq_index,
+ fnic->config.wq_enet_desc_count,
+ sizeof(struct cq_enet_wq_desc));
+ if (err)
+ goto err_out_cleanup;
+ }
+
+ /* CQ for each COPY WQ */
+ wq_copy_cq_desc_count = (fnic->config.wq_copy_desc_count * 3);
+ for (i = 0; i < fnic->wq_copy_count; i++) {
+ cq_index = fnic->raw_wq_count + fnic->rq_count + i;
+ err = vnic_cq_alloc(fnic->vdev, &fnic->cq[cq_index],
+ cq_index,
+ wq_copy_cq_desc_count,
+ sizeof(struct fcpio_fw_req));
+ if (err)
+ goto err_out_cleanup;
+ }
+
+ for (i = 0; i < fnic->intr_count; i++) {
+ err = vnic_intr_alloc(fnic->vdev, &fnic->intr[i], i);
+ if (err)
+ goto err_out_cleanup;
+ }
+
+ fnic->legacy_pba = vnic_dev_get_res(fnic->vdev,
+ RES_TYPE_INTR_PBA_LEGACY, 0);
+
+ if (!fnic->legacy_pba && intr_mode == VNIC_DEV_INTR_MODE_INTX) {
+ shost_printk(KERN_ERR, fnic->lport->host,
+ "Failed to hook legacy pba resource\n");
+ err = -ENODEV;
+ goto err_out_cleanup;
+ }
+
+ /*
+ * Init RQ/WQ resources.
+ *
+ * RQ[0 to n-1] point to CQ[0 to n-1]
+ * WQ[0 to m-1] point to CQ[n to n+m-1]
+ * WQ_COPY[0 to k-1] points to CQ[n+m to n+m+k-1]
+ *
+ * Note for copy wq we always initialize with cq_index = 0
+ *
+ * Error interrupt is not enabled for MSI.
+ */
+
+ switch (intr_mode) {
+ case VNIC_DEV_INTR_MODE_INTX:
+ case VNIC_DEV_INTR_MODE_MSIX:
+ error_interrupt_enable = 1;
+ error_interrupt_offset = fnic->err_intr_offset;
+ break;
+ default:
+ error_interrupt_enable = 0;
+ error_interrupt_offset = 0;
+ break;
+ }
+
+ for (i = 0; i < fnic->rq_count; i++) {
+ cq_index = i;
+ vnic_rq_init(&fnic->rq[i],
+ cq_index,
+ error_interrupt_enable,
+ error_interrupt_offset);
+ }
+
+ for (i = 0; i < fnic->raw_wq_count; i++) {
+ cq_index = i + fnic->rq_count;
+ vnic_wq_init(&fnic->wq[i],
+ cq_index,
+ error_interrupt_enable,
+ error_interrupt_offset);
+ }
+
+ for (i = 0; i < fnic->wq_copy_count; i++) {
+ vnic_wq_copy_init(&fnic->wq_copy[i],
+ 0 /* cq_index 0 - always */,
+ error_interrupt_enable,
+ error_interrupt_offset);
+ }
+
+ for (i = 0; i < fnic->cq_count; i++) {
+
+ switch (intr_mode) {
+ case VNIC_DEV_INTR_MODE_MSIX:
+ interrupt_offset = i;
+ break;
+ default:
+ interrupt_offset = 0;
+ break;
+ }
+
+ vnic_cq_init(&fnic->cq[i],
+ 0 /* flow_control_enable */,
+ 1 /* color_enable */,
+ 0 /* cq_head */,
+ 0 /* cq_tail */,
+ 1 /* cq_tail_color */,
+ 1 /* interrupt_enable */,
+ 1 /* cq_entry_enable */,
+ 0 /* cq_message_enable */,
+ interrupt_offset,
+ 0 /* cq_message_addr */);
+ }
+
+ /*
+ * Init INTR resources
+ *
+ * mask_on_assertion is not used for INTx due to the level-
+ * triggered nature of INTx
+ */
+
+ switch (intr_mode) {
+ case VNIC_DEV_INTR_MODE_MSI:
+ case VNIC_DEV_INTR_MODE_MSIX:
+ mask_on_assertion = 1;
+ break;
+ default:
+ mask_on_assertion = 0;
+ break;
+ }
+
+ for (i = 0; i < fnic->intr_count; i++) {
+ vnic_intr_init(&fnic->intr[i],
+ fnic->config.intr_timer,
+ fnic->config.intr_timer_type,
+ mask_on_assertion);
+ }
+
+ /* init the stats memory by making the first call here */
+ err = vnic_dev_stats_dump(fnic->vdev, &fnic->stats);
+ if (err) {
+ shost_printk(KERN_ERR, fnic->lport->host,
+ "vnic_dev_stats_dump failed - x%x\n", err);
+ goto err_out_cleanup;
+ }
+
+ /* Clear LIF stats */
+ vnic_dev_stats_clear(fnic->vdev);
+
+ return 0;
+
+err_out_cleanup:
+ fnic_free_vnic_resources(fnic);
+
+ return err;
+}
diff --git a/drivers/scsi/fnic/fnic_res.h b/drivers/scsi/fnic/fnic_res.h
new file mode 100644
index 000000000..ef8aaf215
--- /dev/null
+++ b/drivers/scsi/fnic/fnic_res.h
@@ -0,0 +1,249 @@
+/*
+ * Copyright 2008 Cisco Systems, Inc. All rights reserved.
+ * Copyright 2007 Nuova Systems, Inc. All rights reserved.
+ *
+ * This program is free software; you may redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#ifndef _FNIC_RES_H_
+#define _FNIC_RES_H_
+
+#include "wq_enet_desc.h"
+#include "rq_enet_desc.h"
+#include "vnic_wq.h"
+#include "vnic_rq.h"
+#include "fnic_io.h"
+#include "fcpio.h"
+#include "vnic_wq_copy.h"
+#include "vnic_cq_copy.h"
+
+static inline void fnic_queue_wq_desc(struct vnic_wq *wq,
+ void *os_buf, dma_addr_t dma_addr,
+ unsigned int len, unsigned int fc_eof,
+ int vlan_tag_insert,
+ unsigned int vlan_tag,
+ int cq_entry, int sop, int eop)
+{
+ struct wq_enet_desc *desc = vnic_wq_next_desc(wq);
+
+ wq_enet_desc_enc(desc,
+ (u64)dma_addr | VNIC_PADDR_TARGET,
+ (u16)len,
+ 0, /* mss_or_csum_offset */
+ (u16)fc_eof,
+ 0, /* offload_mode */
+ (u8)eop, (u8)cq_entry,
+ 1, /* fcoe_encap */
+ (u8)vlan_tag_insert,
+ (u16)vlan_tag,
+ 0 /* loopback */);
+
+ vnic_wq_post(wq, os_buf, dma_addr, len, sop, eop);
+}
+
+static inline void fnic_queue_wq_eth_desc(struct vnic_wq *wq,
+ void *os_buf, dma_addr_t dma_addr,
+ unsigned int len,
+ int vlan_tag_insert,
+ unsigned int vlan_tag,
+ int cq_entry)
+{
+ struct wq_enet_desc *desc = vnic_wq_next_desc(wq);
+
+ wq_enet_desc_enc(desc,
+ (u64)dma_addr | VNIC_PADDR_TARGET,
+ (u16)len,
+ 0, /* mss_or_csum_offset */
+ 0, /* fc_eof */
+ 0, /* offload_mode */
+ 1, /* eop */
+ (u8)cq_entry,
+ 0, /* fcoe_encap */
+ (u8)vlan_tag_insert,
+ (u16)vlan_tag,
+ 0 /* loopback */);
+
+ vnic_wq_post(wq, os_buf, dma_addr, len, 1, 1);
+}
+
+static inline void fnic_queue_wq_copy_desc_icmnd_16(struct vnic_wq_copy *wq,
+ u32 req_id,
+ u32 lunmap_id, u8 spl_flags,
+ u32 sgl_cnt, u32 sense_len,
+ u64 sgl_addr, u64 sns_addr,
+ u8 crn, u8 pri_ta,
+ u8 flags, u8 *scsi_cdb,
+ u8 cdb_len,
+ u32 data_len, u8 *lun,
+ u32 d_id, u16 mss,
+ u32 ratov, u32 edtov)
+{
+ struct fcpio_host_req *desc = vnic_wq_copy_next_desc(wq);
+
+ desc->hdr.type = FCPIO_ICMND_16; /* enum fcpio_type */
+ desc->hdr.status = 0; /* header status entry */
+ desc->hdr._resvd = 0; /* reserved */
+ desc->hdr.tag.u.req_id = req_id; /* id for this request */
+
+ desc->u.icmnd_16.lunmap_id = lunmap_id; /* index into lunmap table */
+ desc->u.icmnd_16.special_req_flags = spl_flags; /* exch req flags */
+ desc->u.icmnd_16._resvd0[0] = 0; /* reserved */
+ desc->u.icmnd_16._resvd0[1] = 0; /* reserved */
+ desc->u.icmnd_16._resvd0[2] = 0; /* reserved */
+ desc->u.icmnd_16.sgl_cnt = sgl_cnt; /* scatter-gather list count */
+ desc->u.icmnd_16.sense_len = sense_len; /* sense buffer length */
+ desc->u.icmnd_16.sgl_addr = sgl_addr; /* scatter-gather list addr */
+ desc->u.icmnd_16.sense_addr = sns_addr; /* sense buffer address */
+ desc->u.icmnd_16.crn = crn; /* SCSI Command Reference No.*/
+ desc->u.icmnd_16.pri_ta = pri_ta; /* SCSI Pri & Task attribute */
+ desc->u.icmnd_16._resvd1 = 0; /* reserved: should be 0 */
+ desc->u.icmnd_16.flags = flags; /* command flags */
+ memset(desc->u.icmnd_16.scsi_cdb, 0, CDB_16);
+ memcpy(desc->u.icmnd_16.scsi_cdb, scsi_cdb, cdb_len); /* SCSI CDB */
+ desc->u.icmnd_16.data_len = data_len; /* length of data expected */
+ memcpy(desc->u.icmnd_16.lun, lun, LUN_ADDRESS); /* LUN address */
+ desc->u.icmnd_16._resvd2 = 0; /* reserved */
+ hton24(desc->u.icmnd_16.d_id, d_id); /* FC vNIC only: Target D_ID */
+ desc->u.icmnd_16.mss = mss; /* FC vNIC only: max burst */
+ desc->u.icmnd_16.r_a_tov = ratov; /*FC vNIC only: Res. Alloc Timeout */
+ desc->u.icmnd_16.e_d_tov = edtov; /*FC vNIC only: Err Detect Timeout */
+
+ vnic_wq_copy_post(wq);
+}
+
+static inline void fnic_queue_wq_copy_desc_itmf(struct vnic_wq_copy *wq,
+ u32 req_id, u32 lunmap_id,
+ u32 tm_req, u32 tm_id, u8 *lun,
+ u32 d_id, u32 r_a_tov,
+ u32 e_d_tov)
+{
+ struct fcpio_host_req *desc = vnic_wq_copy_next_desc(wq);
+
+ desc->hdr.type = FCPIO_ITMF; /* enum fcpio_type */
+ desc->hdr.status = 0; /* header status entry */
+ desc->hdr._resvd = 0; /* reserved */
+ desc->hdr.tag.u.req_id = req_id; /* id for this request */
+
+ desc->u.itmf.lunmap_id = lunmap_id; /* index into lunmap table */
+ desc->u.itmf.tm_req = tm_req; /* SCSI Task Management request */
+ desc->u.itmf.t_tag = tm_id; /* tag of fcpio to be aborted */
+ desc->u.itmf._resvd = 0;
+ memcpy(desc->u.itmf.lun, lun, LUN_ADDRESS); /* LUN address */
+ desc->u.itmf._resvd1 = 0;
+ hton24(desc->u.itmf.d_id, d_id); /* FC vNIC only: Target D_ID */
+ desc->u.itmf.r_a_tov = r_a_tov; /* FC vNIC only: R_A_TOV in msec */
+ desc->u.itmf.e_d_tov = e_d_tov; /* FC vNIC only: E_D_TOV in msec */
+
+ vnic_wq_copy_post(wq);
+}
+
+static inline void fnic_queue_wq_copy_desc_flogi_reg(struct vnic_wq_copy *wq,
+ u32 req_id, u8 format,
+ u32 s_id, u8 *gw_mac)
+{
+ struct fcpio_host_req *desc = vnic_wq_copy_next_desc(wq);
+
+ desc->hdr.type = FCPIO_FLOGI_REG; /* enum fcpio_type */
+ desc->hdr.status = 0; /* header status entry */
+ desc->hdr._resvd = 0; /* reserved */
+ desc->hdr.tag.u.req_id = req_id; /* id for this request */
+
+ desc->u.flogi_reg.format = format;
+ desc->u.flogi_reg._resvd = 0;
+ hton24(desc->u.flogi_reg.s_id, s_id);
+ memcpy(desc->u.flogi_reg.gateway_mac, gw_mac, ETH_ALEN);
+
+ vnic_wq_copy_post(wq);
+}
+
+static inline void fnic_queue_wq_copy_desc_fip_reg(struct vnic_wq_copy *wq,
+ u32 req_id, u32 s_id,
+ u8 *fcf_mac, u8 *ha_mac,
+ u32 r_a_tov, u32 e_d_tov)
+{
+ struct fcpio_host_req *desc = vnic_wq_copy_next_desc(wq);
+
+ desc->hdr.type = FCPIO_FLOGI_FIP_REG; /* enum fcpio_type */
+ desc->hdr.status = 0; /* header status entry */
+ desc->hdr._resvd = 0; /* reserved */
+ desc->hdr.tag.u.req_id = req_id; /* id for this request */
+
+ desc->u.flogi_fip_reg._resvd0 = 0;
+ hton24(desc->u.flogi_fip_reg.s_id, s_id);
+ memcpy(desc->u.flogi_fip_reg.fcf_mac, fcf_mac, ETH_ALEN);
+ desc->u.flogi_fip_reg._resvd1 = 0;
+ desc->u.flogi_fip_reg.r_a_tov = r_a_tov;
+ desc->u.flogi_fip_reg.e_d_tov = e_d_tov;
+ memcpy(desc->u.flogi_fip_reg.ha_mac, ha_mac, ETH_ALEN);
+ desc->u.flogi_fip_reg._resvd2 = 0;
+
+ vnic_wq_copy_post(wq);
+}
+
+static inline void fnic_queue_wq_copy_desc_fw_reset(struct vnic_wq_copy *wq,
+ u32 req_id)
+{
+ struct fcpio_host_req *desc = vnic_wq_copy_next_desc(wq);
+
+ desc->hdr.type = FCPIO_RESET; /* enum fcpio_type */
+ desc->hdr.status = 0; /* header status entry */
+ desc->hdr._resvd = 0; /* reserved */
+ desc->hdr.tag.u.req_id = req_id; /* id for this request */
+
+ vnic_wq_copy_post(wq);
+}
+
+static inline void fnic_queue_wq_copy_desc_lunmap(struct vnic_wq_copy *wq,
+ u32 req_id, u64 lunmap_addr,
+ u32 lunmap_len)
+{
+ struct fcpio_host_req *desc = vnic_wq_copy_next_desc(wq);
+
+ desc->hdr.type = FCPIO_LUNMAP_REQ; /* enum fcpio_type */
+ desc->hdr.status = 0; /* header status entry */
+ desc->hdr._resvd = 0; /* reserved */
+ desc->hdr.tag.u.req_id = req_id; /* id for this request */
+
+ desc->u.lunmap_req.addr = lunmap_addr; /* address of the buffer */
+ desc->u.lunmap_req.len = lunmap_len; /* len of the buffer */
+
+ vnic_wq_copy_post(wq);
+}
+
+static inline void fnic_queue_rq_desc(struct vnic_rq *rq,
+ void *os_buf, dma_addr_t dma_addr,
+ u16 len)
+{
+ struct rq_enet_desc *desc = vnic_rq_next_desc(rq);
+
+ rq_enet_desc_enc(desc,
+ (u64)dma_addr | VNIC_PADDR_TARGET,
+ RQ_ENET_TYPE_ONLY_SOP,
+ (u16)len);
+
+ vnic_rq_post(rq, os_buf, 0, dma_addr, len);
+}
+
+
+struct fnic;
+
+int fnic_get_vnic_config(struct fnic *);
+int fnic_alloc_vnic_resources(struct fnic *);
+void fnic_free_vnic_resources(struct fnic *);
+void fnic_get_res_counts(struct fnic *);
+int fnic_set_nic_config(struct fnic *fnic, u8 rss_default_cpu,
+ u8 rss_hash_type, u8 rss_hash_bits, u8 rss_base_cpu,
+ u8 rss_enable, u8 tso_ipid_split_en,
+ u8 ig_vlan_strip_en);
+
+#endif /* _FNIC_RES_H_ */
diff --git a/drivers/scsi/fnic/fnic_scsi.c b/drivers/scsi/fnic/fnic_scsi.c
new file mode 100644
index 000000000..155b286f1
--- /dev/null
+++ b/drivers/scsi/fnic/fnic_scsi.c
@@ -0,0 +1,2710 @@
+/*
+ * Copyright 2008 Cisco Systems, Inc. All rights reserved.
+ * Copyright 2007 Nuova Systems, Inc. All rights reserved.
+ *
+ * This program is free software; you may redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#include <linux/mempool.h>
+#include <linux/errno.h>
+#include <linux/init.h>
+#include <linux/workqueue.h>
+#include <linux/pci.h>
+#include <linux/scatterlist.h>
+#include <linux/skbuff.h>
+#include <linux/spinlock.h>
+#include <linux/if_ether.h>
+#include <linux/if_vlan.h>
+#include <linux/delay.h>
+#include <linux/gfp.h>
+#include <scsi/scsi.h>
+#include <scsi/scsi_host.h>
+#include <scsi/scsi_device.h>
+#include <scsi/scsi_cmnd.h>
+#include <scsi/scsi_tcq.h>
+#include <scsi/fc/fc_els.h>
+#include <scsi/fc/fc_fcoe.h>
+#include <scsi/libfc.h>
+#include <scsi/fc_frame.h>
+#include "fnic_io.h"
+#include "fnic.h"
+
+const char *fnic_state_str[] = {
+ [FNIC_IN_FC_MODE] = "FNIC_IN_FC_MODE",
+ [FNIC_IN_FC_TRANS_ETH_MODE] = "FNIC_IN_FC_TRANS_ETH_MODE",
+ [FNIC_IN_ETH_MODE] = "FNIC_IN_ETH_MODE",
+ [FNIC_IN_ETH_TRANS_FC_MODE] = "FNIC_IN_ETH_TRANS_FC_MODE",
+};
+
+static const char *fnic_ioreq_state_str[] = {
+ [FNIC_IOREQ_NOT_INITED] = "FNIC_IOREQ_NOT_INITED",
+ [FNIC_IOREQ_CMD_PENDING] = "FNIC_IOREQ_CMD_PENDING",
+ [FNIC_IOREQ_ABTS_PENDING] = "FNIC_IOREQ_ABTS_PENDING",
+ [FNIC_IOREQ_ABTS_COMPLETE] = "FNIC_IOREQ_ABTS_COMPLETE",
+ [FNIC_IOREQ_CMD_COMPLETE] = "FNIC_IOREQ_CMD_COMPLETE",
+};
+
+static const char *fcpio_status_str[] = {
+ [FCPIO_SUCCESS] = "FCPIO_SUCCESS", /*0x0*/
+ [FCPIO_INVALID_HEADER] = "FCPIO_INVALID_HEADER",
+ [FCPIO_OUT_OF_RESOURCE] = "FCPIO_OUT_OF_RESOURCE",
+ [FCPIO_INVALID_PARAM] = "FCPIO_INVALID_PARAM]",
+ [FCPIO_REQ_NOT_SUPPORTED] = "FCPIO_REQ_NOT_SUPPORTED",
+ [FCPIO_IO_NOT_FOUND] = "FCPIO_IO_NOT_FOUND",
+ [FCPIO_ABORTED] = "FCPIO_ABORTED", /*0x41*/
+ [FCPIO_TIMEOUT] = "FCPIO_TIMEOUT",
+ [FCPIO_SGL_INVALID] = "FCPIO_SGL_INVALID",
+ [FCPIO_MSS_INVALID] = "FCPIO_MSS_INVALID",
+ [FCPIO_DATA_CNT_MISMATCH] = "FCPIO_DATA_CNT_MISMATCH",
+ [FCPIO_FW_ERR] = "FCPIO_FW_ERR",
+ [FCPIO_ITMF_REJECTED] = "FCPIO_ITMF_REJECTED",
+ [FCPIO_ITMF_FAILED] = "FCPIO_ITMF_FAILED",
+ [FCPIO_ITMF_INCORRECT_LUN] = "FCPIO_ITMF_INCORRECT_LUN",
+ [FCPIO_CMND_REJECTED] = "FCPIO_CMND_REJECTED",
+ [FCPIO_NO_PATH_AVAIL] = "FCPIO_NO_PATH_AVAIL",
+ [FCPIO_PATH_FAILED] = "FCPIO_PATH_FAILED",
+ [FCPIO_LUNMAP_CHNG_PEND] = "FCPIO_LUNHMAP_CHNG_PEND",
+};
+
+const char *fnic_state_to_str(unsigned int state)
+{
+ if (state >= ARRAY_SIZE(fnic_state_str) || !fnic_state_str[state])
+ return "unknown";
+
+ return fnic_state_str[state];
+}
+
+static const char *fnic_ioreq_state_to_str(unsigned int state)
+{
+ if (state >= ARRAY_SIZE(fnic_ioreq_state_str) ||
+ !fnic_ioreq_state_str[state])
+ return "unknown";
+
+ return fnic_ioreq_state_str[state];
+}
+
+static const char *fnic_fcpio_status_to_str(unsigned int status)
+{
+ if (status >= ARRAY_SIZE(fcpio_status_str) || !fcpio_status_str[status])
+ return "unknown";
+
+ return fcpio_status_str[status];
+}
+
+static void fnic_cleanup_io(struct fnic *fnic, int exclude_id);
+
+static inline spinlock_t *fnic_io_lock_hash(struct fnic *fnic,
+ struct scsi_cmnd *sc)
+{
+ u32 hash = sc->request->tag & (FNIC_IO_LOCKS - 1);
+
+ return &fnic->io_req_lock[hash];
+}
+
+static inline spinlock_t *fnic_io_lock_tag(struct fnic *fnic,
+ int tag)
+{
+ return &fnic->io_req_lock[tag & (FNIC_IO_LOCKS - 1)];
+}
+
+/*
+ * Unmap the data buffer and sense buffer for an io_req,
+ * also unmap and free the device-private scatter/gather list.
+ */
+static void fnic_release_ioreq_buf(struct fnic *fnic,
+ struct fnic_io_req *io_req,
+ struct scsi_cmnd *sc)
+{
+ if (io_req->sgl_list_pa)
+ pci_unmap_single(fnic->pdev, io_req->sgl_list_pa,
+ sizeof(io_req->sgl_list[0]) * io_req->sgl_cnt,
+ PCI_DMA_TODEVICE);
+ scsi_dma_unmap(sc);
+
+ if (io_req->sgl_cnt)
+ mempool_free(io_req->sgl_list_alloc,
+ fnic->io_sgl_pool[io_req->sgl_type]);
+ if (io_req->sense_buf_pa)
+ pci_unmap_single(fnic->pdev, io_req->sense_buf_pa,
+ SCSI_SENSE_BUFFERSIZE, PCI_DMA_FROMDEVICE);
+}
+
+/* Free up Copy Wq descriptors. Called with copy_wq lock held */
+static int free_wq_copy_descs(struct fnic *fnic, struct vnic_wq_copy *wq)
+{
+ /* if no Ack received from firmware, then nothing to clean */
+ if (!fnic->fw_ack_recd[0])
+ return 1;
+
+ /*
+ * Update desc_available count based on number of freed descriptors
+ * Account for wraparound
+ */
+ if (wq->to_clean_index <= fnic->fw_ack_index[0])
+ wq->ring.desc_avail += (fnic->fw_ack_index[0]
+ - wq->to_clean_index + 1);
+ else
+ wq->ring.desc_avail += (wq->ring.desc_count
+ - wq->to_clean_index
+ + fnic->fw_ack_index[0] + 1);
+
+ /*
+ * just bump clean index to ack_index+1 accounting for wraparound
+ * this will essentially free up all descriptors between
+ * to_clean_index and fw_ack_index, both inclusive
+ */
+ wq->to_clean_index =
+ (fnic->fw_ack_index[0] + 1) % wq->ring.desc_count;
+
+ /* we have processed the acks received so far */
+ fnic->fw_ack_recd[0] = 0;
+ return 0;
+}
+
+
+/**
+ * __fnic_set_state_flags
+ * Sets/Clears bits in fnic's state_flags
+ **/
+void
+__fnic_set_state_flags(struct fnic *fnic, unsigned long st_flags,
+ unsigned long clearbits)
+{
+ struct Scsi_Host *host = fnic->lport->host;
+ int sh_locked = spin_is_locked(host->host_lock);
+ unsigned long flags = 0;
+
+ if (!sh_locked)
+ spin_lock_irqsave(host->host_lock, flags);
+
+ if (clearbits)
+ fnic->state_flags &= ~st_flags;
+ else
+ fnic->state_flags |= st_flags;
+
+ if (!sh_locked)
+ spin_unlock_irqrestore(host->host_lock, flags);
+
+ return;
+}
+
+
+/*
+ * fnic_fw_reset_handler
+ * Routine to send reset msg to fw
+ */
+int fnic_fw_reset_handler(struct fnic *fnic)
+{
+ struct vnic_wq_copy *wq = &fnic->wq_copy[0];
+ int ret = 0;
+ unsigned long flags;
+
+ /* indicate fwreset to io path */
+ fnic_set_state_flags(fnic, FNIC_FLAGS_FWRESET);
+
+ skb_queue_purge(&fnic->frame_queue);
+ skb_queue_purge(&fnic->tx_queue);
+
+ /* wait for io cmpl */
+ while (atomic_read(&fnic->in_flight))
+ schedule_timeout(msecs_to_jiffies(1));
+
+ spin_lock_irqsave(&fnic->wq_copy_lock[0], flags);
+
+ if (vnic_wq_copy_desc_avail(wq) <= fnic->wq_copy_desc_low[0])
+ free_wq_copy_descs(fnic, wq);
+
+ if (!vnic_wq_copy_desc_avail(wq))
+ ret = -EAGAIN;
+ else {
+ fnic_queue_wq_copy_desc_fw_reset(wq, SCSI_NO_TAG);
+ atomic64_inc(&fnic->fnic_stats.fw_stats.active_fw_reqs);
+ if (atomic64_read(&fnic->fnic_stats.fw_stats.active_fw_reqs) >
+ atomic64_read(&fnic->fnic_stats.fw_stats.max_fw_reqs))
+ atomic64_set(&fnic->fnic_stats.fw_stats.max_fw_reqs,
+ atomic64_read(
+ &fnic->fnic_stats.fw_stats.active_fw_reqs));
+ }
+
+ spin_unlock_irqrestore(&fnic->wq_copy_lock[0], flags);
+
+ if (!ret) {
+ atomic64_inc(&fnic->fnic_stats.reset_stats.fw_resets);
+ FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
+ "Issued fw reset\n");
+ } else {
+ fnic_clear_state_flags(fnic, FNIC_FLAGS_FWRESET);
+ FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
+ "Failed to issue fw reset\n");
+ }
+
+ return ret;
+}
+
+
+/*
+ * fnic_flogi_reg_handler
+ * Routine to send flogi register msg to fw
+ */
+int fnic_flogi_reg_handler(struct fnic *fnic, u32 fc_id)
+{
+ struct vnic_wq_copy *wq = &fnic->wq_copy[0];
+ enum fcpio_flogi_reg_format_type format;
+ struct fc_lport *lp = fnic->lport;
+ u8 gw_mac[ETH_ALEN];
+ int ret = 0;
+ unsigned long flags;
+
+ spin_lock_irqsave(&fnic->wq_copy_lock[0], flags);
+
+ if (vnic_wq_copy_desc_avail(wq) <= fnic->wq_copy_desc_low[0])
+ free_wq_copy_descs(fnic, wq);
+
+ if (!vnic_wq_copy_desc_avail(wq)) {
+ ret = -EAGAIN;
+ goto flogi_reg_ioreq_end;
+ }
+
+ if (fnic->ctlr.map_dest) {
+ memset(gw_mac, 0xff, ETH_ALEN);
+ format = FCPIO_FLOGI_REG_DEF_DEST;
+ } else {
+ memcpy(gw_mac, fnic->ctlr.dest_addr, ETH_ALEN);
+ format = FCPIO_FLOGI_REG_GW_DEST;
+ }
+
+ if ((fnic->config.flags & VFCF_FIP_CAPABLE) && !fnic->ctlr.map_dest) {
+ fnic_queue_wq_copy_desc_fip_reg(wq, SCSI_NO_TAG,
+ fc_id, gw_mac,
+ fnic->data_src_addr,
+ lp->r_a_tov, lp->e_d_tov);
+ FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
+ "FLOGI FIP reg issued fcid %x src %pM dest %pM\n",
+ fc_id, fnic->data_src_addr, gw_mac);
+ } else {
+ fnic_queue_wq_copy_desc_flogi_reg(wq, SCSI_NO_TAG,
+ format, fc_id, gw_mac);
+ FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
+ "FLOGI reg issued fcid %x map %d dest %pM\n",
+ fc_id, fnic->ctlr.map_dest, gw_mac);
+ }
+
+ atomic64_inc(&fnic->fnic_stats.fw_stats.active_fw_reqs);
+ if (atomic64_read(&fnic->fnic_stats.fw_stats.active_fw_reqs) >
+ atomic64_read(&fnic->fnic_stats.fw_stats.max_fw_reqs))
+ atomic64_set(&fnic->fnic_stats.fw_stats.max_fw_reqs,
+ atomic64_read(&fnic->fnic_stats.fw_stats.active_fw_reqs));
+
+flogi_reg_ioreq_end:
+ spin_unlock_irqrestore(&fnic->wq_copy_lock[0], flags);
+ return ret;
+}
+
+/*
+ * fnic_queue_wq_copy_desc
+ * Routine to enqueue a wq copy desc
+ */
+static inline int fnic_queue_wq_copy_desc(struct fnic *fnic,
+ struct vnic_wq_copy *wq,
+ struct fnic_io_req *io_req,
+ struct scsi_cmnd *sc,
+ int sg_count)
+{
+ struct scatterlist *sg;
+ struct fc_rport *rport = starget_to_rport(scsi_target(sc->device));
+ struct fc_rport_libfc_priv *rp = rport->dd_data;
+ struct host_sg_desc *desc;
+ struct misc_stats *misc_stats = &fnic->fnic_stats.misc_stats;
+ unsigned int i;
+ unsigned long intr_flags;
+ int flags;
+ u8 exch_flags;
+ struct scsi_lun fc_lun;
+
+ if (sg_count) {
+ /* For each SGE, create a device desc entry */
+ desc = io_req->sgl_list;
+ for_each_sg(scsi_sglist(sc), sg, sg_count, i) {
+ desc->addr = cpu_to_le64(sg_dma_address(sg));
+ desc->len = cpu_to_le32(sg_dma_len(sg));
+ desc->_resvd = 0;
+ desc++;
+ }
+
+ io_req->sgl_list_pa = pci_map_single
+ (fnic->pdev,
+ io_req->sgl_list,
+ sizeof(io_req->sgl_list[0]) * sg_count,
+ PCI_DMA_TODEVICE);
+ }
+
+ io_req->sense_buf_pa = pci_map_single(fnic->pdev,
+ sc->sense_buffer,
+ SCSI_SENSE_BUFFERSIZE,
+ PCI_DMA_FROMDEVICE);
+
+ int_to_scsilun(sc->device->lun, &fc_lun);
+
+ /* Enqueue the descriptor in the Copy WQ */
+ spin_lock_irqsave(&fnic->wq_copy_lock[0], intr_flags);
+
+ if (vnic_wq_copy_desc_avail(wq) <= fnic->wq_copy_desc_low[0])
+ free_wq_copy_descs(fnic, wq);
+
+ if (unlikely(!vnic_wq_copy_desc_avail(wq))) {
+ spin_unlock_irqrestore(&fnic->wq_copy_lock[0], intr_flags);
+ FNIC_SCSI_DBG(KERN_INFO, fnic->lport->host,
+ "fnic_queue_wq_copy_desc failure - no descriptors\n");
+ atomic64_inc(&misc_stats->io_cpwq_alloc_failures);
+ return SCSI_MLQUEUE_HOST_BUSY;
+ }
+
+ flags = 0;
+ if (sc->sc_data_direction == DMA_FROM_DEVICE)
+ flags = FCPIO_ICMND_RDDATA;
+ else if (sc->sc_data_direction == DMA_TO_DEVICE)
+ flags = FCPIO_ICMND_WRDATA;
+
+ exch_flags = 0;
+ if ((fnic->config.flags & VFCF_FCP_SEQ_LVL_ERR) &&
+ (rp->flags & FC_RP_FLAGS_RETRY))
+ exch_flags |= FCPIO_ICMND_SRFLAG_RETRY;
+
+ fnic_queue_wq_copy_desc_icmnd_16(wq, sc->request->tag,
+ 0, exch_flags, io_req->sgl_cnt,
+ SCSI_SENSE_BUFFERSIZE,
+ io_req->sgl_list_pa,
+ io_req->sense_buf_pa,
+ 0, /* scsi cmd ref, always 0 */
+ FCPIO_ICMND_PTA_SIMPLE,
+ /* scsi pri and tag */
+ flags, /* command flags */
+ sc->cmnd, sc->cmd_len,
+ scsi_bufflen(sc),
+ fc_lun.scsi_lun, io_req->port_id,
+ rport->maxframe_size, rp->r_a_tov,
+ rp->e_d_tov);
+
+ atomic64_inc(&fnic->fnic_stats.fw_stats.active_fw_reqs);
+ if (atomic64_read(&fnic->fnic_stats.fw_stats.active_fw_reqs) >
+ atomic64_read(&fnic->fnic_stats.fw_stats.max_fw_reqs))
+ atomic64_set(&fnic->fnic_stats.fw_stats.max_fw_reqs,
+ atomic64_read(&fnic->fnic_stats.fw_stats.active_fw_reqs));
+
+ spin_unlock_irqrestore(&fnic->wq_copy_lock[0], intr_flags);
+ return 0;
+}
+
+/*
+ * fnic_queuecommand
+ * Routine to send a scsi cdb
+ * Called with host_lock held and interrupts disabled.
+ */
+static int fnic_queuecommand_lck(struct scsi_cmnd *sc, void (*done)(struct scsi_cmnd *))
+{
+ struct fc_lport *lp = shost_priv(sc->device->host);
+ struct fc_rport *rport;
+ struct fnic_io_req *io_req = NULL;
+ struct fnic *fnic = lport_priv(lp);
+ struct fnic_stats *fnic_stats = &fnic->fnic_stats;
+ struct vnic_wq_copy *wq;
+ int ret;
+ u64 cmd_trace;
+ int sg_count = 0;
+ unsigned long flags = 0;
+ unsigned long ptr;
+ struct fc_rport_priv *rdata;
+ spinlock_t *io_lock = NULL;
+
+ if (unlikely(fnic_chk_state_flags_locked(fnic, FNIC_FLAGS_IO_BLOCKED)))
+ return SCSI_MLQUEUE_HOST_BUSY;
+
+ rport = starget_to_rport(scsi_target(sc->device));
+ ret = fc_remote_port_chkready(rport);
+ if (ret) {
+ atomic64_inc(&fnic_stats->misc_stats.rport_not_ready);
+ sc->result = ret;
+ done(sc);
+ return 0;
+ }
+
+ rdata = lp->tt.rport_lookup(lp, rport->port_id);
+ if (!rdata || (rdata->rp_state == RPORT_ST_DELETE)) {
+ FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
+ "returning IO as rport is removed\n");
+ atomic64_inc(&fnic_stats->misc_stats.rport_not_ready);
+ sc->result = DID_NO_CONNECT;
+ done(sc);
+ return 0;
+ }
+
+ if (lp->state != LPORT_ST_READY || !(lp->link_up))
+ return SCSI_MLQUEUE_HOST_BUSY;
+
+ atomic_inc(&fnic->in_flight);
+
+ /*
+ * Release host lock, use driver resource specific locks from here.
+ * Don't re-enable interrupts in case they were disabled prior to the
+ * caller disabling them.
+ */
+ spin_unlock(lp->host->host_lock);
+ CMD_STATE(sc) = FNIC_IOREQ_NOT_INITED;
+ CMD_FLAGS(sc) = FNIC_NO_FLAGS;
+
+ /* Get a new io_req for this SCSI IO */
+ io_req = mempool_alloc(fnic->io_req_pool, GFP_ATOMIC);
+ if (!io_req) {
+ atomic64_inc(&fnic_stats->io_stats.alloc_failures);
+ ret = SCSI_MLQUEUE_HOST_BUSY;
+ goto out;
+ }
+ memset(io_req, 0, sizeof(*io_req));
+
+ /* Map the data buffer */
+ sg_count = scsi_dma_map(sc);
+ if (sg_count < 0) {
+ FNIC_TRACE(fnic_queuecommand, sc->device->host->host_no,
+ sc->request->tag, sc, 0, sc->cmnd[0],
+ sg_count, CMD_STATE(sc));
+ mempool_free(io_req, fnic->io_req_pool);
+ goto out;
+ }
+
+ /* Determine the type of scatter/gather list we need */
+ io_req->sgl_cnt = sg_count;
+ io_req->sgl_type = FNIC_SGL_CACHE_DFLT;
+ if (sg_count > FNIC_DFLT_SG_DESC_CNT)
+ io_req->sgl_type = FNIC_SGL_CACHE_MAX;
+
+ if (sg_count) {
+ io_req->sgl_list =
+ mempool_alloc(fnic->io_sgl_pool[io_req->sgl_type],
+ GFP_ATOMIC);
+ if (!io_req->sgl_list) {
+ atomic64_inc(&fnic_stats->io_stats.alloc_failures);
+ ret = SCSI_MLQUEUE_HOST_BUSY;
+ scsi_dma_unmap(sc);
+ mempool_free(io_req, fnic->io_req_pool);
+ goto out;
+ }
+
+ /* Cache sgl list allocated address before alignment */
+ io_req->sgl_list_alloc = io_req->sgl_list;
+ ptr = (unsigned long) io_req->sgl_list;
+ if (ptr % FNIC_SG_DESC_ALIGN) {
+ io_req->sgl_list = (struct host_sg_desc *)
+ (((unsigned long) ptr
+ + FNIC_SG_DESC_ALIGN - 1)
+ & ~(FNIC_SG_DESC_ALIGN - 1));
+ }
+ }
+
+ /*
+ * Will acquire lock defore setting to IO initialized.
+ */
+
+ io_lock = fnic_io_lock_hash(fnic, sc);
+ spin_lock_irqsave(io_lock, flags);
+
+ /* initialize rest of io_req */
+ io_req->port_id = rport->port_id;
+ io_req->start_time = jiffies;
+ CMD_STATE(sc) = FNIC_IOREQ_CMD_PENDING;
+ CMD_SP(sc) = (char *)io_req;
+ CMD_FLAGS(sc) |= FNIC_IO_INITIALIZED;
+ sc->scsi_done = done;
+
+ /* create copy wq desc and enqueue it */
+ wq = &fnic->wq_copy[0];
+ ret = fnic_queue_wq_copy_desc(fnic, wq, io_req, sc, sg_count);
+ if (ret) {
+ /*
+ * In case another thread cancelled the request,
+ * refetch the pointer under the lock.
+ */
+ FNIC_TRACE(fnic_queuecommand, sc->device->host->host_no,
+ sc->request->tag, sc, 0, 0, 0,
+ (((u64)CMD_FLAGS(sc) << 32) | CMD_STATE(sc)));
+ io_req = (struct fnic_io_req *)CMD_SP(sc);
+ CMD_SP(sc) = NULL;
+ CMD_STATE(sc) = FNIC_IOREQ_CMD_COMPLETE;
+ spin_unlock_irqrestore(io_lock, flags);
+ if (io_req) {
+ fnic_release_ioreq_buf(fnic, io_req, sc);
+ mempool_free(io_req, fnic->io_req_pool);
+ }
+ atomic_dec(&fnic->in_flight);
+ /* acquire host lock before returning to SCSI */
+ spin_lock(lp->host->host_lock);
+ return ret;
+ } else {
+ atomic64_inc(&fnic_stats->io_stats.active_ios);
+ atomic64_inc(&fnic_stats->io_stats.num_ios);
+ if (atomic64_read(&fnic_stats->io_stats.active_ios) >
+ atomic64_read(&fnic_stats->io_stats.max_active_ios))
+ atomic64_set(&fnic_stats->io_stats.max_active_ios,
+ atomic64_read(&fnic_stats->io_stats.active_ios));
+
+ /* REVISIT: Use per IO lock in the final code */
+ CMD_FLAGS(sc) |= FNIC_IO_ISSUED;
+ }
+out:
+ cmd_trace = ((u64)sc->cmnd[0] << 56 | (u64)sc->cmnd[7] << 40 |
+ (u64)sc->cmnd[8] << 32 | (u64)sc->cmnd[2] << 24 |
+ (u64)sc->cmnd[3] << 16 | (u64)sc->cmnd[4] << 8 |
+ sc->cmnd[5]);
+
+ FNIC_TRACE(fnic_queuecommand, sc->device->host->host_no,
+ sc->request->tag, sc, io_req,
+ sg_count, cmd_trace,
+ (((u64)CMD_FLAGS(sc) >> 32) | CMD_STATE(sc)));
+
+ /* if only we issued IO, will we have the io lock */
+ if (CMD_FLAGS(sc) & FNIC_IO_INITIALIZED)
+ spin_unlock_irqrestore(io_lock, flags);
+
+ atomic_dec(&fnic->in_flight);
+ /* acquire host lock before returning to SCSI */
+ spin_lock(lp->host->host_lock);
+ return ret;
+}
+
+DEF_SCSI_QCMD(fnic_queuecommand)
+
+/*
+ * fnic_fcpio_fw_reset_cmpl_handler
+ * Routine to handle fw reset completion
+ */
+static int fnic_fcpio_fw_reset_cmpl_handler(struct fnic *fnic,
+ struct fcpio_fw_req *desc)
+{
+ u8 type;
+ u8 hdr_status;
+ struct fcpio_tag tag;
+ int ret = 0;
+ unsigned long flags;
+ struct reset_stats *reset_stats = &fnic->fnic_stats.reset_stats;
+
+ fcpio_header_dec(&desc->hdr, &type, &hdr_status, &tag);
+
+ atomic64_inc(&reset_stats->fw_reset_completions);
+
+ /* Clean up all outstanding io requests */
+ fnic_cleanup_io(fnic, SCSI_NO_TAG);
+
+ atomic64_set(&fnic->fnic_stats.fw_stats.active_fw_reqs, 0);
+ atomic64_set(&fnic->fnic_stats.io_stats.active_ios, 0);
+
+ spin_lock_irqsave(&fnic->fnic_lock, flags);
+
+ /* fnic should be in FC_TRANS_ETH_MODE */
+ if (fnic->state == FNIC_IN_FC_TRANS_ETH_MODE) {
+ /* Check status of reset completion */
+ if (!hdr_status) {
+ FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
+ "reset cmpl success\n");
+ /* Ready to send flogi out */
+ fnic->state = FNIC_IN_ETH_MODE;
+ } else {
+ FNIC_SCSI_DBG(KERN_DEBUG,
+ fnic->lport->host,
+ "fnic fw_reset : failed %s\n",
+ fnic_fcpio_status_to_str(hdr_status));
+
+ /*
+ * Unable to change to eth mode, cannot send out flogi
+ * Change state to fc mode, so that subsequent Flogi
+ * requests from libFC will cause more attempts to
+ * reset the firmware. Free the cached flogi
+ */
+ fnic->state = FNIC_IN_FC_MODE;
+ atomic64_inc(&reset_stats->fw_reset_failures);
+ ret = -1;
+ }
+ } else {
+ FNIC_SCSI_DBG(KERN_DEBUG,
+ fnic->lport->host,
+ "Unexpected state %s while processing"
+ " reset cmpl\n", fnic_state_to_str(fnic->state));
+ atomic64_inc(&reset_stats->fw_reset_failures);
+ ret = -1;
+ }
+
+ /* Thread removing device blocks till firmware reset is complete */
+ if (fnic->remove_wait)
+ complete(fnic->remove_wait);
+
+ /*
+ * If fnic is being removed, or fw reset failed
+ * free the flogi frame. Else, send it out
+ */
+ if (fnic->remove_wait || ret) {
+ spin_unlock_irqrestore(&fnic->fnic_lock, flags);
+ skb_queue_purge(&fnic->tx_queue);
+ goto reset_cmpl_handler_end;
+ }
+
+ spin_unlock_irqrestore(&fnic->fnic_lock, flags);
+
+ fnic_flush_tx(fnic);
+
+ reset_cmpl_handler_end:
+ fnic_clear_state_flags(fnic, FNIC_FLAGS_FWRESET);
+
+ return ret;
+}
+
+/*
+ * fnic_fcpio_flogi_reg_cmpl_handler
+ * Routine to handle flogi register completion
+ */
+static int fnic_fcpio_flogi_reg_cmpl_handler(struct fnic *fnic,
+ struct fcpio_fw_req *desc)
+{
+ u8 type;
+ u8 hdr_status;
+ struct fcpio_tag tag;
+ int ret = 0;
+ unsigned long flags;
+
+ fcpio_header_dec(&desc->hdr, &type, &hdr_status, &tag);
+
+ /* Update fnic state based on status of flogi reg completion */
+ spin_lock_irqsave(&fnic->fnic_lock, flags);
+
+ if (fnic->state == FNIC_IN_ETH_TRANS_FC_MODE) {
+
+ /* Check flogi registration completion status */
+ if (!hdr_status) {
+ FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
+ "flog reg succeeded\n");
+ fnic->state = FNIC_IN_FC_MODE;
+ } else {
+ FNIC_SCSI_DBG(KERN_DEBUG,
+ fnic->lport->host,
+ "fnic flogi reg :failed %s\n",
+ fnic_fcpio_status_to_str(hdr_status));
+ fnic->state = FNIC_IN_ETH_MODE;
+ ret = -1;
+ }
+ } else {
+ FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
+ "Unexpected fnic state %s while"
+ " processing flogi reg completion\n",
+ fnic_state_to_str(fnic->state));
+ ret = -1;
+ }
+
+ if (!ret) {
+ if (fnic->stop_rx_link_events) {
+ spin_unlock_irqrestore(&fnic->fnic_lock, flags);
+ goto reg_cmpl_handler_end;
+ }
+ spin_unlock_irqrestore(&fnic->fnic_lock, flags);
+
+ fnic_flush_tx(fnic);
+ queue_work(fnic_event_queue, &fnic->frame_work);
+ } else {
+ spin_unlock_irqrestore(&fnic->fnic_lock, flags);
+ }
+
+reg_cmpl_handler_end:
+ return ret;
+}
+
+static inline int is_ack_index_in_range(struct vnic_wq_copy *wq,
+ u16 request_out)
+{
+ if (wq->to_clean_index <= wq->to_use_index) {
+ /* out of range, stale request_out index */
+ if (request_out < wq->to_clean_index ||
+ request_out >= wq->to_use_index)
+ return 0;
+ } else {
+ /* out of range, stale request_out index */
+ if (request_out < wq->to_clean_index &&
+ request_out >= wq->to_use_index)
+ return 0;
+ }
+ /* request_out index is in range */
+ return 1;
+}
+
+
+/*
+ * Mark that ack received and store the Ack index. If there are multiple
+ * acks received before Tx thread cleans it up, the latest value will be
+ * used which is correct behavior. This state should be in the copy Wq
+ * instead of in the fnic
+ */
+static inline void fnic_fcpio_ack_handler(struct fnic *fnic,
+ unsigned int cq_index,
+ struct fcpio_fw_req *desc)
+{
+ struct vnic_wq_copy *wq;
+ u16 request_out = desc->u.ack.request_out;
+ unsigned long flags;
+ u64 *ox_id_tag = (u64 *)(void *)desc;
+
+ /* mark the ack state */
+ wq = &fnic->wq_copy[cq_index - fnic->raw_wq_count - fnic->rq_count];
+ spin_lock_irqsave(&fnic->wq_copy_lock[0], flags);
+
+ fnic->fnic_stats.misc_stats.last_ack_time = jiffies;
+ if (is_ack_index_in_range(wq, request_out)) {
+ fnic->fw_ack_index[0] = request_out;
+ fnic->fw_ack_recd[0] = 1;
+ } else
+ atomic64_inc(
+ &fnic->fnic_stats.misc_stats.ack_index_out_of_range);
+
+ spin_unlock_irqrestore(&fnic->wq_copy_lock[0], flags);
+ FNIC_TRACE(fnic_fcpio_ack_handler,
+ fnic->lport->host->host_no, 0, 0, ox_id_tag[2], ox_id_tag[3],
+ ox_id_tag[4], ox_id_tag[5]);
+}
+
+/*
+ * fnic_fcpio_icmnd_cmpl_handler
+ * Routine to handle icmnd completions
+ */
+static void fnic_fcpio_icmnd_cmpl_handler(struct fnic *fnic,
+ struct fcpio_fw_req *desc)
+{
+ u8 type;
+ u8 hdr_status;
+ struct fcpio_tag tag;
+ u32 id;
+ u64 xfer_len = 0;
+ struct fcpio_icmnd_cmpl *icmnd_cmpl;
+ struct fnic_io_req *io_req;
+ struct scsi_cmnd *sc;
+ struct fnic_stats *fnic_stats = &fnic->fnic_stats;
+ unsigned long flags;
+ spinlock_t *io_lock;
+ u64 cmd_trace;
+ unsigned long start_time;
+
+ /* Decode the cmpl description to get the io_req id */
+ fcpio_header_dec(&desc->hdr, &type, &hdr_status, &tag);
+ fcpio_tag_id_dec(&tag, &id);
+ icmnd_cmpl = &desc->u.icmnd_cmpl;
+
+ if (id >= fnic->fnic_max_tag_id) {
+ shost_printk(KERN_ERR, fnic->lport->host,
+ "Tag out of range tag %x hdr status = %s\n",
+ id, fnic_fcpio_status_to_str(hdr_status));
+ return;
+ }
+
+ sc = scsi_host_find_tag(fnic->lport->host, id);
+ WARN_ON_ONCE(!sc);
+ if (!sc) {
+ atomic64_inc(&fnic_stats->io_stats.sc_null);
+ shost_printk(KERN_ERR, fnic->lport->host,
+ "icmnd_cmpl sc is null - "
+ "hdr status = %s tag = 0x%x desc = 0x%p\n",
+ fnic_fcpio_status_to_str(hdr_status), id, desc);
+ FNIC_TRACE(fnic_fcpio_icmnd_cmpl_handler,
+ fnic->lport->host->host_no, id,
+ ((u64)icmnd_cmpl->_resvd0[1] << 16 |
+ (u64)icmnd_cmpl->_resvd0[0]),
+ ((u64)hdr_status << 16 |
+ (u64)icmnd_cmpl->scsi_status << 8 |
+ (u64)icmnd_cmpl->flags), desc,
+ (u64)icmnd_cmpl->residual, 0);
+ return;
+ }
+
+ io_lock = fnic_io_lock_hash(fnic, sc);
+ spin_lock_irqsave(io_lock, flags);
+ io_req = (struct fnic_io_req *)CMD_SP(sc);
+ WARN_ON_ONCE(!io_req);
+ if (!io_req) {
+ atomic64_inc(&fnic_stats->io_stats.ioreq_null);
+ CMD_FLAGS(sc) |= FNIC_IO_REQ_NULL;
+ spin_unlock_irqrestore(io_lock, flags);
+ shost_printk(KERN_ERR, fnic->lport->host,
+ "icmnd_cmpl io_req is null - "
+ "hdr status = %s tag = 0x%x sc 0x%p\n",
+ fnic_fcpio_status_to_str(hdr_status), id, sc);
+ return;
+ }
+ start_time = io_req->start_time;
+
+ /* firmware completed the io */
+ io_req->io_completed = 1;
+
+ /*
+ * if SCSI-ML has already issued abort on this command,
+ * ignore completion of the IO. The abts path will clean it up
+ */
+ if (CMD_STATE(sc) == FNIC_IOREQ_ABTS_PENDING) {
+ spin_unlock_irqrestore(io_lock, flags);
+ CMD_FLAGS(sc) |= FNIC_IO_ABTS_PENDING;
+ switch (hdr_status) {
+ case FCPIO_SUCCESS:
+ CMD_FLAGS(sc) |= FNIC_IO_DONE;
+ FNIC_SCSI_DBG(KERN_INFO, fnic->lport->host,
+ "icmnd_cmpl ABTS pending hdr status = %s "
+ "sc 0x%p scsi_status %x residual %d\n",
+ fnic_fcpio_status_to_str(hdr_status), sc,
+ icmnd_cmpl->scsi_status,
+ icmnd_cmpl->residual);
+ break;
+ case FCPIO_ABORTED:
+ CMD_FLAGS(sc) |= FNIC_IO_ABORTED;
+ break;
+ default:
+ FNIC_SCSI_DBG(KERN_INFO, fnic->lport->host,
+ "icmnd_cmpl abts pending "
+ "hdr status = %s tag = 0x%x sc = 0x%p\n",
+ fnic_fcpio_status_to_str(hdr_status),
+ id, sc);
+ break;
+ }
+ return;
+ }
+
+ /* Mark the IO as complete */
+ CMD_STATE(sc) = FNIC_IOREQ_CMD_COMPLETE;
+
+ icmnd_cmpl = &desc->u.icmnd_cmpl;
+
+ switch (hdr_status) {
+ case FCPIO_SUCCESS:
+ sc->result = (DID_OK << 16) | icmnd_cmpl->scsi_status;
+ xfer_len = scsi_bufflen(sc);
+ scsi_set_resid(sc, icmnd_cmpl->residual);
+
+ if (icmnd_cmpl->flags & FCPIO_ICMND_CMPL_RESID_UNDER)
+ xfer_len -= icmnd_cmpl->residual;
+
+ if (icmnd_cmpl->scsi_status == SAM_STAT_TASK_SET_FULL)
+ atomic64_inc(&fnic_stats->misc_stats.queue_fulls);
+ break;
+
+ case FCPIO_TIMEOUT: /* request was timed out */
+ atomic64_inc(&fnic_stats->misc_stats.fcpio_timeout);
+ sc->result = (DID_TIME_OUT << 16) | icmnd_cmpl->scsi_status;
+ break;
+
+ case FCPIO_ABORTED: /* request was aborted */
+ atomic64_inc(&fnic_stats->misc_stats.fcpio_aborted);
+ sc->result = (DID_ERROR << 16) | icmnd_cmpl->scsi_status;
+ break;
+
+ case FCPIO_DATA_CNT_MISMATCH: /* recv/sent more/less data than exp. */
+ atomic64_inc(&fnic_stats->misc_stats.data_count_mismatch);
+ scsi_set_resid(sc, icmnd_cmpl->residual);
+ sc->result = (DID_ERROR << 16) | icmnd_cmpl->scsi_status;
+ break;
+
+ case FCPIO_OUT_OF_RESOURCE: /* out of resources to complete request */
+ atomic64_inc(&fnic_stats->fw_stats.fw_out_of_resources);
+ sc->result = (DID_REQUEUE << 16) | icmnd_cmpl->scsi_status;
+ break;
+
+ case FCPIO_IO_NOT_FOUND: /* requested I/O was not found */
+ atomic64_inc(&fnic_stats->io_stats.io_not_found);
+ sc->result = (DID_ERROR << 16) | icmnd_cmpl->scsi_status;
+ break;
+
+ case FCPIO_SGL_INVALID: /* request was aborted due to sgl error */
+ atomic64_inc(&fnic_stats->misc_stats.sgl_invalid);
+ sc->result = (DID_ERROR << 16) | icmnd_cmpl->scsi_status;
+ break;
+
+ case FCPIO_FW_ERR: /* request was terminated due fw error */
+ atomic64_inc(&fnic_stats->fw_stats.io_fw_errs);
+ sc->result = (DID_ERROR << 16) | icmnd_cmpl->scsi_status;
+ break;
+
+ case FCPIO_MSS_INVALID: /* request was aborted due to mss error */
+ atomic64_inc(&fnic_stats->misc_stats.mss_invalid);
+ sc->result = (DID_ERROR << 16) | icmnd_cmpl->scsi_status;
+ break;
+
+ case FCPIO_INVALID_HEADER: /* header contains invalid data */
+ case FCPIO_INVALID_PARAM: /* some parameter in request invalid */
+ case FCPIO_REQ_NOT_SUPPORTED:/* request type is not supported */
+ default:
+ shost_printk(KERN_ERR, fnic->lport->host, "hdr status = %s\n",
+ fnic_fcpio_status_to_str(hdr_status));
+ sc->result = (DID_ERROR << 16) | icmnd_cmpl->scsi_status;
+ break;
+ }
+
+ if (hdr_status != FCPIO_SUCCESS) {
+ atomic64_inc(&fnic_stats->io_stats.io_failures);
+ shost_printk(KERN_ERR, fnic->lport->host, "hdr status = %s\n",
+ fnic_fcpio_status_to_str(hdr_status));
+ }
+ /* Break link with the SCSI command */
+ CMD_SP(sc) = NULL;
+ CMD_FLAGS(sc) |= FNIC_IO_DONE;
+
+ spin_unlock_irqrestore(io_lock, flags);
+
+ fnic_release_ioreq_buf(fnic, io_req, sc);
+
+ mempool_free(io_req, fnic->io_req_pool);
+
+ cmd_trace = ((u64)hdr_status << 56) |
+ (u64)icmnd_cmpl->scsi_status << 48 |
+ (u64)icmnd_cmpl->flags << 40 | (u64)sc->cmnd[0] << 32 |
+ (u64)sc->cmnd[2] << 24 | (u64)sc->cmnd[3] << 16 |
+ (u64)sc->cmnd[4] << 8 | sc->cmnd[5];
+
+ FNIC_TRACE(fnic_fcpio_icmnd_cmpl_handler,
+ sc->device->host->host_no, id, sc,
+ ((u64)icmnd_cmpl->_resvd0[1] << 56 |
+ (u64)icmnd_cmpl->_resvd0[0] << 48 |
+ jiffies_to_msecs(jiffies - start_time)),
+ desc, cmd_trace,
+ (((u64)CMD_FLAGS(sc) << 32) | CMD_STATE(sc)));
+
+ if (sc->sc_data_direction == DMA_FROM_DEVICE) {
+ fnic->lport->host_stats.fcp_input_requests++;
+ fnic->fcp_input_bytes += xfer_len;
+ } else if (sc->sc_data_direction == DMA_TO_DEVICE) {
+ fnic->lport->host_stats.fcp_output_requests++;
+ fnic->fcp_output_bytes += xfer_len;
+ } else
+ fnic->lport->host_stats.fcp_control_requests++;
+
+ atomic64_dec(&fnic_stats->io_stats.active_ios);
+ if (atomic64_read(&fnic->io_cmpl_skip))
+ atomic64_dec(&fnic->io_cmpl_skip);
+ else
+ atomic64_inc(&fnic_stats->io_stats.io_completions);
+
+ /* Call SCSI completion function to complete the IO */
+ if (sc->scsi_done)
+ sc->scsi_done(sc);
+}
+
+/* fnic_fcpio_itmf_cmpl_handler
+ * Routine to handle itmf completions
+ */
+static void fnic_fcpio_itmf_cmpl_handler(struct fnic *fnic,
+ struct fcpio_fw_req *desc)
+{
+ u8 type;
+ u8 hdr_status;
+ struct fcpio_tag tag;
+ u32 id;
+ struct scsi_cmnd *sc;
+ struct fnic_io_req *io_req;
+ struct fnic_stats *fnic_stats = &fnic->fnic_stats;
+ struct abort_stats *abts_stats = &fnic->fnic_stats.abts_stats;
+ struct terminate_stats *term_stats = &fnic->fnic_stats.term_stats;
+ struct misc_stats *misc_stats = &fnic->fnic_stats.misc_stats;
+ unsigned long flags;
+ spinlock_t *io_lock;
+ unsigned long start_time;
+
+ fcpio_header_dec(&desc->hdr, &type, &hdr_status, &tag);
+ fcpio_tag_id_dec(&tag, &id);
+
+ if ((id & FNIC_TAG_MASK) >= fnic->fnic_max_tag_id) {
+ shost_printk(KERN_ERR, fnic->lport->host,
+ "Tag out of range tag %x hdr status = %s\n",
+ id, fnic_fcpio_status_to_str(hdr_status));
+ return;
+ }
+
+ sc = scsi_host_find_tag(fnic->lport->host, id & FNIC_TAG_MASK);
+ WARN_ON_ONCE(!sc);
+ if (!sc) {
+ atomic64_inc(&fnic_stats->io_stats.sc_null);
+ shost_printk(KERN_ERR, fnic->lport->host,
+ "itmf_cmpl sc is null - hdr status = %s tag = 0x%x\n",
+ fnic_fcpio_status_to_str(hdr_status), id);
+ return;
+ }
+ io_lock = fnic_io_lock_hash(fnic, sc);
+ spin_lock_irqsave(io_lock, flags);
+ io_req = (struct fnic_io_req *)CMD_SP(sc);
+ WARN_ON_ONCE(!io_req);
+ if (!io_req) {
+ atomic64_inc(&fnic_stats->io_stats.ioreq_null);
+ spin_unlock_irqrestore(io_lock, flags);
+ CMD_FLAGS(sc) |= FNIC_IO_ABT_TERM_REQ_NULL;
+ shost_printk(KERN_ERR, fnic->lport->host,
+ "itmf_cmpl io_req is null - "
+ "hdr status = %s tag = 0x%x sc 0x%p\n",
+ fnic_fcpio_status_to_str(hdr_status), id, sc);
+ return;
+ }
+ start_time = io_req->start_time;
+
+ if ((id & FNIC_TAG_ABORT) && (id & FNIC_TAG_DEV_RST)) {
+ /* Abort and terminate completion of device reset req */
+ /* REVISIT : Add asserts about various flags */
+ FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
+ "dev reset abts cmpl recd. id %x status %s\n",
+ id, fnic_fcpio_status_to_str(hdr_status));
+ CMD_STATE(sc) = FNIC_IOREQ_ABTS_COMPLETE;
+ CMD_ABTS_STATUS(sc) = hdr_status;
+ CMD_FLAGS(sc) |= FNIC_DEV_RST_DONE;
+ if (io_req->abts_done)
+ complete(io_req->abts_done);
+ spin_unlock_irqrestore(io_lock, flags);
+ } else if (id & FNIC_TAG_ABORT) {
+ /* Completion of abort cmd */
+ switch (hdr_status) {
+ case FCPIO_SUCCESS:
+ break;
+ case FCPIO_TIMEOUT:
+ if (CMD_FLAGS(sc) & FNIC_IO_ABTS_ISSUED)
+ atomic64_inc(&abts_stats->abort_fw_timeouts);
+ else
+ atomic64_inc(
+ &term_stats->terminate_fw_timeouts);
+ break;
+ case FCPIO_IO_NOT_FOUND:
+ if (CMD_FLAGS(sc) & FNIC_IO_ABTS_ISSUED)
+ atomic64_inc(&abts_stats->abort_io_not_found);
+ else
+ atomic64_inc(
+ &term_stats->terminate_io_not_found);
+ break;
+ default:
+ if (CMD_FLAGS(sc) & FNIC_IO_ABTS_ISSUED)
+ atomic64_inc(&abts_stats->abort_failures);
+ else
+ atomic64_inc(
+ &term_stats->terminate_failures);
+ break;
+ }
+ if (CMD_STATE(sc) != FNIC_IOREQ_ABTS_PENDING) {
+ /* This is a late completion. Ignore it */
+ spin_unlock_irqrestore(io_lock, flags);
+ return;
+ }
+ CMD_ABTS_STATUS(sc) = hdr_status;
+ CMD_FLAGS(sc) |= FNIC_IO_ABT_TERM_DONE;
+
+ atomic64_dec(&fnic_stats->io_stats.active_ios);
+ if (atomic64_read(&fnic->io_cmpl_skip))
+ atomic64_dec(&fnic->io_cmpl_skip);
+ else
+ atomic64_inc(&fnic_stats->io_stats.io_completions);
+
+ if (!(CMD_FLAGS(sc) & (FNIC_IO_ABORTED | FNIC_IO_DONE)))
+ atomic64_inc(&misc_stats->no_icmnd_itmf_cmpls);
+
+ FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
+ "abts cmpl recd. id %d status %s\n",
+ (int)(id & FNIC_TAG_MASK),
+ fnic_fcpio_status_to_str(hdr_status));
+
+ /*
+ * If scsi_eh thread is blocked waiting for abts to complete,
+ * signal completion to it. IO will be cleaned in the thread
+ * else clean it in this context
+ */
+ if (io_req->abts_done) {
+ complete(io_req->abts_done);
+ spin_unlock_irqrestore(io_lock, flags);
+ } else {
+ FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
+ "abts cmpl, completing IO\n");
+ CMD_SP(sc) = NULL;
+ sc->result = (DID_ERROR << 16);
+
+ spin_unlock_irqrestore(io_lock, flags);
+
+ fnic_release_ioreq_buf(fnic, io_req, sc);
+ mempool_free(io_req, fnic->io_req_pool);
+ if (sc->scsi_done) {
+ FNIC_TRACE(fnic_fcpio_itmf_cmpl_handler,
+ sc->device->host->host_no, id,
+ sc,
+ jiffies_to_msecs(jiffies - start_time),
+ desc,
+ (((u64)hdr_status << 40) |
+ (u64)sc->cmnd[0] << 32 |
+ (u64)sc->cmnd[2] << 24 |
+ (u64)sc->cmnd[3] << 16 |
+ (u64)sc->cmnd[4] << 8 | sc->cmnd[5]),
+ (((u64)CMD_FLAGS(sc) << 32) |
+ CMD_STATE(sc)));
+ sc->scsi_done(sc);
+ }
+ }
+
+ } else if (id & FNIC_TAG_DEV_RST) {
+ /* Completion of device reset */
+ CMD_LR_STATUS(sc) = hdr_status;
+ if (CMD_STATE(sc) == FNIC_IOREQ_ABTS_PENDING) {
+ spin_unlock_irqrestore(io_lock, flags);
+ CMD_FLAGS(sc) |= FNIC_DEV_RST_ABTS_PENDING;
+ FNIC_TRACE(fnic_fcpio_itmf_cmpl_handler,
+ sc->device->host->host_no, id, sc,
+ jiffies_to_msecs(jiffies - start_time),
+ desc, 0,
+ (((u64)CMD_FLAGS(sc) << 32) | CMD_STATE(sc)));
+ FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
+ "Terminate pending "
+ "dev reset cmpl recd. id %d status %s\n",
+ (int)(id & FNIC_TAG_MASK),
+ fnic_fcpio_status_to_str(hdr_status));
+ return;
+ }
+ if (CMD_FLAGS(sc) & FNIC_DEV_RST_TIMED_OUT) {
+ /* Need to wait for terminate completion */
+ spin_unlock_irqrestore(io_lock, flags);
+ FNIC_TRACE(fnic_fcpio_itmf_cmpl_handler,
+ sc->device->host->host_no, id, sc,
+ jiffies_to_msecs(jiffies - start_time),
+ desc, 0,
+ (((u64)CMD_FLAGS(sc) << 32) | CMD_STATE(sc)));
+ FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
+ "dev reset cmpl recd after time out. "
+ "id %d status %s\n",
+ (int)(id & FNIC_TAG_MASK),
+ fnic_fcpio_status_to_str(hdr_status));
+ return;
+ }
+ CMD_STATE(sc) = FNIC_IOREQ_CMD_COMPLETE;
+ CMD_FLAGS(sc) |= FNIC_DEV_RST_DONE;
+ FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
+ "dev reset cmpl recd. id %d status %s\n",
+ (int)(id & FNIC_TAG_MASK),
+ fnic_fcpio_status_to_str(hdr_status));
+ if (io_req->dr_done)
+ complete(io_req->dr_done);
+ spin_unlock_irqrestore(io_lock, flags);
+
+ } else {
+ shost_printk(KERN_ERR, fnic->lport->host,
+ "Unexpected itmf io state %s tag %x\n",
+ fnic_ioreq_state_to_str(CMD_STATE(sc)), id);
+ spin_unlock_irqrestore(io_lock, flags);
+ }
+
+}
+
+/*
+ * fnic_fcpio_cmpl_handler
+ * Routine to service the cq for wq_copy
+ */
+static int fnic_fcpio_cmpl_handler(struct vnic_dev *vdev,
+ unsigned int cq_index,
+ struct fcpio_fw_req *desc)
+{
+ struct fnic *fnic = vnic_dev_priv(vdev);
+
+ switch (desc->hdr.type) {
+ case FCPIO_ICMND_CMPL: /* fw completed a command */
+ case FCPIO_ITMF_CMPL: /* fw completed itmf (abort cmd, lun reset)*/
+ case FCPIO_FLOGI_REG_CMPL: /* fw completed flogi_reg */
+ case FCPIO_FLOGI_FIP_REG_CMPL: /* fw completed flogi_fip_reg */
+ case FCPIO_RESET_CMPL: /* fw completed reset */
+ atomic64_dec(&fnic->fnic_stats.fw_stats.active_fw_reqs);
+ break;
+ default:
+ break;
+ }
+
+ switch (desc->hdr.type) {
+ case FCPIO_ACK: /* fw copied copy wq desc to its queue */
+ fnic_fcpio_ack_handler(fnic, cq_index, desc);
+ break;
+
+ case FCPIO_ICMND_CMPL: /* fw completed a command */
+ fnic_fcpio_icmnd_cmpl_handler(fnic, desc);
+ break;
+
+ case FCPIO_ITMF_CMPL: /* fw completed itmf (abort cmd, lun reset)*/
+ fnic_fcpio_itmf_cmpl_handler(fnic, desc);
+ break;
+
+ case FCPIO_FLOGI_REG_CMPL: /* fw completed flogi_reg */
+ case FCPIO_FLOGI_FIP_REG_CMPL: /* fw completed flogi_fip_reg */
+ fnic_fcpio_flogi_reg_cmpl_handler(fnic, desc);
+ break;
+
+ case FCPIO_RESET_CMPL: /* fw completed reset */
+ fnic_fcpio_fw_reset_cmpl_handler(fnic, desc);
+ break;
+
+ default:
+ FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
+ "firmware completion type %d\n",
+ desc->hdr.type);
+ break;
+ }
+
+ return 0;
+}
+
+/*
+ * fnic_wq_copy_cmpl_handler
+ * Routine to process wq copy
+ */
+int fnic_wq_copy_cmpl_handler(struct fnic *fnic, int copy_work_to_do)
+{
+ unsigned int wq_work_done = 0;
+ unsigned int i, cq_index;
+ unsigned int cur_work_done;
+
+ for (i = 0; i < fnic->wq_copy_count; i++) {
+ cq_index = i + fnic->raw_wq_count + fnic->rq_count;
+ cur_work_done = vnic_cq_copy_service(&fnic->cq[cq_index],
+ fnic_fcpio_cmpl_handler,
+ copy_work_to_do);
+ wq_work_done += cur_work_done;
+ }
+ return wq_work_done;
+}
+
+static void fnic_cleanup_io(struct fnic *fnic, int exclude_id)
+{
+ int i;
+ struct fnic_io_req *io_req;
+ unsigned long flags = 0;
+ struct scsi_cmnd *sc;
+ spinlock_t *io_lock;
+ unsigned long start_time = 0;
+ struct fnic_stats *fnic_stats = &fnic->fnic_stats;
+
+ for (i = 0; i < fnic->fnic_max_tag_id; i++) {
+ if (i == exclude_id)
+ continue;
+
+ io_lock = fnic_io_lock_tag(fnic, i);
+ spin_lock_irqsave(io_lock, flags);
+ sc = scsi_host_find_tag(fnic->lport->host, i);
+ if (!sc) {
+ spin_unlock_irqrestore(io_lock, flags);
+ continue;
+ }
+
+ io_req = (struct fnic_io_req *)CMD_SP(sc);
+ if ((CMD_FLAGS(sc) & FNIC_DEVICE_RESET) &&
+ !(CMD_FLAGS(sc) & FNIC_DEV_RST_DONE)) {
+ /*
+ * We will be here only when FW completes reset
+ * without sending completions for outstanding ios.
+ */
+ CMD_FLAGS(sc) |= FNIC_DEV_RST_DONE;
+ if (io_req && io_req->dr_done)
+ complete(io_req->dr_done);
+ else if (io_req && io_req->abts_done)
+ complete(io_req->abts_done);
+ spin_unlock_irqrestore(io_lock, flags);
+ continue;
+ } else if (CMD_FLAGS(sc) & FNIC_DEVICE_RESET) {
+ spin_unlock_irqrestore(io_lock, flags);
+ continue;
+ }
+ if (!io_req) {
+ spin_unlock_irqrestore(io_lock, flags);
+ goto cleanup_scsi_cmd;
+ }
+
+ CMD_SP(sc) = NULL;
+
+ spin_unlock_irqrestore(io_lock, flags);
+
+ /*
+ * If there is a scsi_cmnd associated with this io_req, then
+ * free the corresponding state
+ */
+ start_time = io_req->start_time;
+ fnic_release_ioreq_buf(fnic, io_req, sc);
+ mempool_free(io_req, fnic->io_req_pool);
+
+cleanup_scsi_cmd:
+ sc->result = DID_TRANSPORT_DISRUPTED << 16;
+ FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
+ "%s: sc duration = %lu DID_TRANSPORT_DISRUPTED\n",
+ __func__, (jiffies - start_time));
+
+ if (atomic64_read(&fnic->io_cmpl_skip))
+ atomic64_dec(&fnic->io_cmpl_skip);
+ else
+ atomic64_inc(&fnic_stats->io_stats.io_completions);
+
+ /* Complete the command to SCSI */
+ if (sc->scsi_done) {
+ FNIC_TRACE(fnic_cleanup_io,
+ sc->device->host->host_no, i, sc,
+ jiffies_to_msecs(jiffies - start_time),
+ 0, ((u64)sc->cmnd[0] << 32 |
+ (u64)sc->cmnd[2] << 24 |
+ (u64)sc->cmnd[3] << 16 |
+ (u64)sc->cmnd[4] << 8 | sc->cmnd[5]),
+ (((u64)CMD_FLAGS(sc) << 32) | CMD_STATE(sc)));
+
+ sc->scsi_done(sc);
+ }
+ }
+}
+
+void fnic_wq_copy_cleanup_handler(struct vnic_wq_copy *wq,
+ struct fcpio_host_req *desc)
+{
+ u32 id;
+ struct fnic *fnic = vnic_dev_priv(wq->vdev);
+ struct fnic_io_req *io_req;
+ struct scsi_cmnd *sc;
+ unsigned long flags;
+ spinlock_t *io_lock;
+ unsigned long start_time = 0;
+
+ /* get the tag reference */
+ fcpio_tag_id_dec(&desc->hdr.tag, &id);
+ id &= FNIC_TAG_MASK;
+
+ if (id >= fnic->fnic_max_tag_id)
+ return;
+
+ sc = scsi_host_find_tag(fnic->lport->host, id);
+ if (!sc)
+ return;
+
+ io_lock = fnic_io_lock_hash(fnic, sc);
+ spin_lock_irqsave(io_lock, flags);
+
+ /* Get the IO context which this desc refers to */
+ io_req = (struct fnic_io_req *)CMD_SP(sc);
+
+ /* fnic interrupts are turned off by now */
+
+ if (!io_req) {
+ spin_unlock_irqrestore(io_lock, flags);
+ goto wq_copy_cleanup_scsi_cmd;
+ }
+
+ CMD_SP(sc) = NULL;
+
+ spin_unlock_irqrestore(io_lock, flags);
+
+ start_time = io_req->start_time;
+ fnic_release_ioreq_buf(fnic, io_req, sc);
+ mempool_free(io_req, fnic->io_req_pool);
+
+wq_copy_cleanup_scsi_cmd:
+ sc->result = DID_NO_CONNECT << 16;
+ FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host, "wq_copy_cleanup_handler:"
+ " DID_NO_CONNECT\n");
+
+ if (sc->scsi_done) {
+ FNIC_TRACE(fnic_wq_copy_cleanup_handler,
+ sc->device->host->host_no, id, sc,
+ jiffies_to_msecs(jiffies - start_time),
+ 0, ((u64)sc->cmnd[0] << 32 |
+ (u64)sc->cmnd[2] << 24 | (u64)sc->cmnd[3] << 16 |
+ (u64)sc->cmnd[4] << 8 | sc->cmnd[5]),
+ (((u64)CMD_FLAGS(sc) << 32) | CMD_STATE(sc)));
+
+ sc->scsi_done(sc);
+ }
+}
+
+static inline int fnic_queue_abort_io_req(struct fnic *fnic, int tag,
+ u32 task_req, u8 *fc_lun,
+ struct fnic_io_req *io_req)
+{
+ struct vnic_wq_copy *wq = &fnic->wq_copy[0];
+ struct Scsi_Host *host = fnic->lport->host;
+ struct misc_stats *misc_stats = &fnic->fnic_stats.misc_stats;
+ unsigned long flags;
+
+ spin_lock_irqsave(host->host_lock, flags);
+ if (unlikely(fnic_chk_state_flags_locked(fnic,
+ FNIC_FLAGS_IO_BLOCKED))) {
+ spin_unlock_irqrestore(host->host_lock, flags);
+ return 1;
+ } else
+ atomic_inc(&fnic->in_flight);
+ spin_unlock_irqrestore(host->host_lock, flags);
+
+ spin_lock_irqsave(&fnic->wq_copy_lock[0], flags);
+
+ if (vnic_wq_copy_desc_avail(wq) <= fnic->wq_copy_desc_low[0])
+ free_wq_copy_descs(fnic, wq);
+
+ if (!vnic_wq_copy_desc_avail(wq)) {
+ spin_unlock_irqrestore(&fnic->wq_copy_lock[0], flags);
+ atomic_dec(&fnic->in_flight);
+ FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
+ "fnic_queue_abort_io_req: failure: no descriptors\n");
+ atomic64_inc(&misc_stats->abts_cpwq_alloc_failures);
+ return 1;
+ }
+ fnic_queue_wq_copy_desc_itmf(wq, tag | FNIC_TAG_ABORT,
+ 0, task_req, tag, fc_lun, io_req->port_id,
+ fnic->config.ra_tov, fnic->config.ed_tov);
+
+ atomic64_inc(&fnic->fnic_stats.fw_stats.active_fw_reqs);
+ if (atomic64_read(&fnic->fnic_stats.fw_stats.active_fw_reqs) >
+ atomic64_read(&fnic->fnic_stats.fw_stats.max_fw_reqs))
+ atomic64_set(&fnic->fnic_stats.fw_stats.max_fw_reqs,
+ atomic64_read(&fnic->fnic_stats.fw_stats.active_fw_reqs));
+
+ spin_unlock_irqrestore(&fnic->wq_copy_lock[0], flags);
+ atomic_dec(&fnic->in_flight);
+
+ return 0;
+}
+
+static void fnic_rport_exch_reset(struct fnic *fnic, u32 port_id)
+{
+ int tag;
+ int abt_tag;
+ int term_cnt = 0;
+ struct fnic_io_req *io_req;
+ spinlock_t *io_lock;
+ unsigned long flags;
+ struct scsi_cmnd *sc;
+ struct reset_stats *reset_stats = &fnic->fnic_stats.reset_stats;
+ struct terminate_stats *term_stats = &fnic->fnic_stats.term_stats;
+ struct scsi_lun fc_lun;
+ enum fnic_ioreq_state old_ioreq_state;
+
+ FNIC_SCSI_DBG(KERN_DEBUG,
+ fnic->lport->host,
+ "fnic_rport_exch_reset called portid 0x%06x\n",
+ port_id);
+
+ if (fnic->in_remove)
+ return;
+
+ for (tag = 0; tag < fnic->fnic_max_tag_id; tag++) {
+ abt_tag = tag;
+ io_lock = fnic_io_lock_tag(fnic, tag);
+ spin_lock_irqsave(io_lock, flags);
+ sc = scsi_host_find_tag(fnic->lport->host, tag);
+ if (!sc) {
+ spin_unlock_irqrestore(io_lock, flags);
+ continue;
+ }
+
+ io_req = (struct fnic_io_req *)CMD_SP(sc);
+
+ if (!io_req || io_req->port_id != port_id) {
+ spin_unlock_irqrestore(io_lock, flags);
+ continue;
+ }
+
+ if ((CMD_FLAGS(sc) & FNIC_DEVICE_RESET) &&
+ (!(CMD_FLAGS(sc) & FNIC_DEV_RST_ISSUED))) {
+ FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
+ "fnic_rport_exch_reset dev rst not pending sc 0x%p\n",
+ sc);
+ spin_unlock_irqrestore(io_lock, flags);
+ continue;
+ }
+
+ /*
+ * Found IO that is still pending with firmware and
+ * belongs to rport that went away
+ */
+ if (CMD_STATE(sc) == FNIC_IOREQ_ABTS_PENDING) {
+ spin_unlock_irqrestore(io_lock, flags);
+ continue;
+ }
+ if (io_req->abts_done) {
+ shost_printk(KERN_ERR, fnic->lport->host,
+ "fnic_rport_exch_reset: io_req->abts_done is set "
+ "state is %s\n",
+ fnic_ioreq_state_to_str(CMD_STATE(sc)));
+ }
+
+ if (!(CMD_FLAGS(sc) & FNIC_IO_ISSUED)) {
+ shost_printk(KERN_ERR, fnic->lport->host,
+ "rport_exch_reset "
+ "IO not yet issued %p tag 0x%x flags "
+ "%x state %d\n",
+ sc, tag, CMD_FLAGS(sc), CMD_STATE(sc));
+ }
+ old_ioreq_state = CMD_STATE(sc);
+ CMD_STATE(sc) = FNIC_IOREQ_ABTS_PENDING;
+ CMD_ABTS_STATUS(sc) = FCPIO_INVALID_CODE;
+ if (CMD_FLAGS(sc) & FNIC_DEVICE_RESET) {
+ atomic64_inc(&reset_stats->device_reset_terminates);
+ abt_tag = (tag | FNIC_TAG_DEV_RST);
+ FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
+ "fnic_rport_exch_reset dev rst sc 0x%p\n",
+ sc);
+ }
+
+ BUG_ON(io_req->abts_done);
+
+ FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
+ "fnic_rport_reset_exch: Issuing abts\n");
+
+ spin_unlock_irqrestore(io_lock, flags);
+
+ /* Now queue the abort command to firmware */
+ int_to_scsilun(sc->device->lun, &fc_lun);
+
+ if (fnic_queue_abort_io_req(fnic, abt_tag,
+ FCPIO_ITMF_ABT_TASK_TERM,
+ fc_lun.scsi_lun, io_req)) {
+ /*
+ * Revert the cmd state back to old state, if
+ * it hasn't changed in between. This cmd will get
+ * aborted later by scsi_eh, or cleaned up during
+ * lun reset
+ */
+ spin_lock_irqsave(io_lock, flags);
+ if (CMD_STATE(sc) == FNIC_IOREQ_ABTS_PENDING)
+ CMD_STATE(sc) = old_ioreq_state;
+ spin_unlock_irqrestore(io_lock, flags);
+ } else {
+ spin_lock_irqsave(io_lock, flags);
+ if (CMD_FLAGS(sc) & FNIC_DEVICE_RESET)
+ CMD_FLAGS(sc) |= FNIC_DEV_RST_TERM_ISSUED;
+ else
+ CMD_FLAGS(sc) |= FNIC_IO_INTERNAL_TERM_ISSUED;
+ spin_unlock_irqrestore(io_lock, flags);
+ atomic64_inc(&term_stats->terminates);
+ term_cnt++;
+ }
+ }
+ if (term_cnt > atomic64_read(&term_stats->max_terminates))
+ atomic64_set(&term_stats->max_terminates, term_cnt);
+
+}
+
+void fnic_terminate_rport_io(struct fc_rport *rport)
+{
+ int tag;
+ int abt_tag;
+ int term_cnt = 0;
+ struct fnic_io_req *io_req;
+ spinlock_t *io_lock;
+ unsigned long flags;
+ struct scsi_cmnd *sc;
+ struct scsi_lun fc_lun;
+ struct fc_rport_libfc_priv *rdata;
+ struct fc_lport *lport;
+ struct fnic *fnic;
+ struct fc_rport *cmd_rport;
+ struct reset_stats *reset_stats;
+ struct terminate_stats *term_stats;
+ enum fnic_ioreq_state old_ioreq_state;
+
+ if (!rport) {
+ printk(KERN_ERR "fnic_terminate_rport_io: rport is NULL\n");
+ return;
+ }
+ rdata = rport->dd_data;
+
+ if (!rdata) {
+ printk(KERN_ERR "fnic_terminate_rport_io: rdata is NULL\n");
+ return;
+ }
+ lport = rdata->local_port;
+
+ if (!lport) {
+ printk(KERN_ERR "fnic_terminate_rport_io: lport is NULL\n");
+ return;
+ }
+ fnic = lport_priv(lport);
+ FNIC_SCSI_DBG(KERN_DEBUG,
+ fnic->lport->host, "fnic_terminate_rport_io called"
+ " wwpn 0x%llx, wwnn0x%llx, rport 0x%p, portid 0x%06x\n",
+ rport->port_name, rport->node_name, rport,
+ rport->port_id);
+
+ if (fnic->in_remove)
+ return;
+
+ reset_stats = &fnic->fnic_stats.reset_stats;
+ term_stats = &fnic->fnic_stats.term_stats;
+
+ for (tag = 0; tag < fnic->fnic_max_tag_id; tag++) {
+ abt_tag = tag;
+ io_lock = fnic_io_lock_tag(fnic, tag);
+ spin_lock_irqsave(io_lock, flags);
+ sc = scsi_host_find_tag(fnic->lport->host, tag);
+ if (!sc) {
+ spin_unlock_irqrestore(io_lock, flags);
+ continue;
+ }
+
+ cmd_rport = starget_to_rport(scsi_target(sc->device));
+ if (rport != cmd_rport) {
+ spin_unlock_irqrestore(io_lock, flags);
+ continue;
+ }
+
+ io_req = (struct fnic_io_req *)CMD_SP(sc);
+
+ if (!io_req || rport != cmd_rport) {
+ spin_unlock_irqrestore(io_lock, flags);
+ continue;
+ }
+
+ if ((CMD_FLAGS(sc) & FNIC_DEVICE_RESET) &&
+ (!(CMD_FLAGS(sc) & FNIC_DEV_RST_ISSUED))) {
+ FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
+ "fnic_terminate_rport_io dev rst not pending sc 0x%p\n",
+ sc);
+ spin_unlock_irqrestore(io_lock, flags);
+ continue;
+ }
+ /*
+ * Found IO that is still pending with firmware and
+ * belongs to rport that went away
+ */
+ if (CMD_STATE(sc) == FNIC_IOREQ_ABTS_PENDING) {
+ spin_unlock_irqrestore(io_lock, flags);
+ continue;
+ }
+ if (io_req->abts_done) {
+ shost_printk(KERN_ERR, fnic->lport->host,
+ "fnic_terminate_rport_io: io_req->abts_done is set "
+ "state is %s\n",
+ fnic_ioreq_state_to_str(CMD_STATE(sc)));
+ }
+ if (!(CMD_FLAGS(sc) & FNIC_IO_ISSUED)) {
+ FNIC_SCSI_DBG(KERN_INFO, fnic->lport->host,
+ "fnic_terminate_rport_io "
+ "IO not yet issued %p tag 0x%x flags "
+ "%x state %d\n",
+ sc, tag, CMD_FLAGS(sc), CMD_STATE(sc));
+ }
+ old_ioreq_state = CMD_STATE(sc);
+ CMD_STATE(sc) = FNIC_IOREQ_ABTS_PENDING;
+ CMD_ABTS_STATUS(sc) = FCPIO_INVALID_CODE;
+ if (CMD_FLAGS(sc) & FNIC_DEVICE_RESET) {
+ atomic64_inc(&reset_stats->device_reset_terminates);
+ abt_tag = (tag | FNIC_TAG_DEV_RST);
+ FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
+ "fnic_terminate_rport_io dev rst sc 0x%p\n", sc);
+ }
+
+ BUG_ON(io_req->abts_done);
+
+ FNIC_SCSI_DBG(KERN_DEBUG,
+ fnic->lport->host,
+ "fnic_terminate_rport_io: Issuing abts\n");
+
+ spin_unlock_irqrestore(io_lock, flags);
+
+ /* Now queue the abort command to firmware */
+ int_to_scsilun(sc->device->lun, &fc_lun);
+
+ if (fnic_queue_abort_io_req(fnic, abt_tag,
+ FCPIO_ITMF_ABT_TASK_TERM,
+ fc_lun.scsi_lun, io_req)) {
+ /*
+ * Revert the cmd state back to old state, if
+ * it hasn't changed in between. This cmd will get
+ * aborted later by scsi_eh, or cleaned up during
+ * lun reset
+ */
+ spin_lock_irqsave(io_lock, flags);
+ if (CMD_STATE(sc) == FNIC_IOREQ_ABTS_PENDING)
+ CMD_STATE(sc) = old_ioreq_state;
+ spin_unlock_irqrestore(io_lock, flags);
+ } else {
+ spin_lock_irqsave(io_lock, flags);
+ if (CMD_FLAGS(sc) & FNIC_DEVICE_RESET)
+ CMD_FLAGS(sc) |= FNIC_DEV_RST_TERM_ISSUED;
+ else
+ CMD_FLAGS(sc) |= FNIC_IO_INTERNAL_TERM_ISSUED;
+ spin_unlock_irqrestore(io_lock, flags);
+ atomic64_inc(&term_stats->terminates);
+ term_cnt++;
+ }
+ }
+ if (term_cnt > atomic64_read(&term_stats->max_terminates))
+ atomic64_set(&term_stats->max_terminates, term_cnt);
+
+}
+
+/*
+ * This function is exported to SCSI for sending abort cmnds.
+ * A SCSI IO is represented by a io_req in the driver.
+ * The ioreq is linked to the SCSI Cmd, thus a link with the ULP's IO.
+ */
+int fnic_abort_cmd(struct scsi_cmnd *sc)
+{
+ struct fc_lport *lp;
+ struct fnic *fnic;
+ struct fnic_io_req *io_req = NULL;
+ struct fc_rport *rport;
+ spinlock_t *io_lock;
+ unsigned long flags;
+ unsigned long start_time = 0;
+ int ret = SUCCESS;
+ u32 task_req = 0;
+ struct scsi_lun fc_lun;
+ struct fnic_stats *fnic_stats;
+ struct abort_stats *abts_stats;
+ struct terminate_stats *term_stats;
+ enum fnic_ioreq_state old_ioreq_state;
+ int tag;
+ DECLARE_COMPLETION_ONSTACK(tm_done);
+
+ /* Wait for rport to unblock */
+ fc_block_scsi_eh(sc);
+
+ /* Get local-port, check ready and link up */
+ lp = shost_priv(sc->device->host);
+
+ fnic = lport_priv(lp);
+ fnic_stats = &fnic->fnic_stats;
+ abts_stats = &fnic->fnic_stats.abts_stats;
+ term_stats = &fnic->fnic_stats.term_stats;
+
+ rport = starget_to_rport(scsi_target(sc->device));
+ tag = sc->request->tag;
+ FNIC_SCSI_DBG(KERN_DEBUG,
+ fnic->lport->host,
+ "Abort Cmd called FCID 0x%x, LUN 0x%llx TAG %x flags %x\n",
+ rport->port_id, sc->device->lun, tag, CMD_FLAGS(sc));
+
+ CMD_FLAGS(sc) = FNIC_NO_FLAGS;
+
+ if (lp->state != LPORT_ST_READY || !(lp->link_up)) {
+ ret = FAILED;
+ goto fnic_abort_cmd_end;
+ }
+
+ /*
+ * Avoid a race between SCSI issuing the abort and the device
+ * completing the command.
+ *
+ * If the command is already completed by the fw cmpl code,
+ * we just return SUCCESS from here. This means that the abort
+ * succeeded. In the SCSI ML, since the timeout for command has
+ * happened, the completion wont actually complete the command
+ * and it will be considered as an aborted command
+ *
+ * The CMD_SP will not be cleared except while holding io_req_lock.
+ */
+ io_lock = fnic_io_lock_hash(fnic, sc);
+ spin_lock_irqsave(io_lock, flags);
+ io_req = (struct fnic_io_req *)CMD_SP(sc);
+ if (!io_req) {
+ spin_unlock_irqrestore(io_lock, flags);
+ goto fnic_abort_cmd_end;
+ }
+
+ io_req->abts_done = &tm_done;
+
+ if (CMD_STATE(sc) == FNIC_IOREQ_ABTS_PENDING) {
+ spin_unlock_irqrestore(io_lock, flags);
+ goto wait_pending;
+ }
+ /*
+ * Command is still pending, need to abort it
+ * If the firmware completes the command after this point,
+ * the completion wont be done till mid-layer, since abort
+ * has already started.
+ */
+ old_ioreq_state = CMD_STATE(sc);
+ CMD_STATE(sc) = FNIC_IOREQ_ABTS_PENDING;
+ CMD_ABTS_STATUS(sc) = FCPIO_INVALID_CODE;
+
+ spin_unlock_irqrestore(io_lock, flags);
+
+ /*
+ * Check readiness of the remote port. If the path to remote
+ * port is up, then send abts to the remote port to terminate
+ * the IO. Else, just locally terminate the IO in the firmware
+ */
+ if (fc_remote_port_chkready(rport) == 0)
+ task_req = FCPIO_ITMF_ABT_TASK;
+ else {
+ atomic64_inc(&fnic_stats->misc_stats.rport_not_ready);
+ task_req = FCPIO_ITMF_ABT_TASK_TERM;
+ }
+
+ /* Now queue the abort command to firmware */
+ int_to_scsilun(sc->device->lun, &fc_lun);
+
+ if (fnic_queue_abort_io_req(fnic, sc->request->tag, task_req,
+ fc_lun.scsi_lun, io_req)) {
+ spin_lock_irqsave(io_lock, flags);
+ if (CMD_STATE(sc) == FNIC_IOREQ_ABTS_PENDING)
+ CMD_STATE(sc) = old_ioreq_state;
+ io_req = (struct fnic_io_req *)CMD_SP(sc);
+ if (io_req)
+ io_req->abts_done = NULL;
+ spin_unlock_irqrestore(io_lock, flags);
+ ret = FAILED;
+ goto fnic_abort_cmd_end;
+ }
+ if (task_req == FCPIO_ITMF_ABT_TASK) {
+ CMD_FLAGS(sc) |= FNIC_IO_ABTS_ISSUED;
+ atomic64_inc(&fnic_stats->abts_stats.aborts);
+ } else {
+ CMD_FLAGS(sc) |= FNIC_IO_TERM_ISSUED;
+ atomic64_inc(&fnic_stats->term_stats.terminates);
+ }
+
+ /*
+ * We queued an abort IO, wait for its completion.
+ * Once the firmware completes the abort command, it will
+ * wake up this thread.
+ */
+ wait_pending:
+ wait_for_completion_timeout(&tm_done,
+ msecs_to_jiffies
+ (2 * fnic->config.ra_tov +
+ fnic->config.ed_tov));
+
+ /* Check the abort status */
+ spin_lock_irqsave(io_lock, flags);
+
+ io_req = (struct fnic_io_req *)CMD_SP(sc);
+ if (!io_req) {
+ atomic64_inc(&fnic_stats->io_stats.ioreq_null);
+ spin_unlock_irqrestore(io_lock, flags);
+ CMD_FLAGS(sc) |= FNIC_IO_ABT_TERM_REQ_NULL;
+ ret = FAILED;
+ goto fnic_abort_cmd_end;
+ }
+ io_req->abts_done = NULL;
+
+ /* fw did not complete abort, timed out */
+ if (CMD_ABTS_STATUS(sc) == FCPIO_INVALID_CODE) {
+ spin_unlock_irqrestore(io_lock, flags);
+ if (task_req == FCPIO_ITMF_ABT_TASK) {
+ atomic64_inc(&abts_stats->abort_drv_timeouts);
+ } else {
+ atomic64_inc(&term_stats->terminate_drv_timeouts);
+ }
+ CMD_FLAGS(sc) |= FNIC_IO_ABT_TERM_TIMED_OUT;
+ ret = FAILED;
+ goto fnic_abort_cmd_end;
+ }
+
+ /* IO out of order */
+
+ if (!(CMD_FLAGS(sc) & (FNIC_IO_ABORTED | FNIC_IO_DONE))) {
+ spin_unlock_irqrestore(io_lock, flags);
+ FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
+ "Issuing Host reset due to out of order IO\n");
+
+ if (fnic_host_reset(sc) == FAILED) {
+ FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
+ "fnic_host_reset failed.\n");
+ }
+ ret = FAILED;
+ goto fnic_abort_cmd_end;
+ }
+
+ CMD_STATE(sc) = FNIC_IOREQ_ABTS_COMPLETE;
+
+ /*
+ * firmware completed the abort, check the status,
+ * free the io_req irrespective of failure or success
+ */
+ if (CMD_ABTS_STATUS(sc) != FCPIO_SUCCESS)
+ ret = FAILED;
+
+ CMD_SP(sc) = NULL;
+
+ spin_unlock_irqrestore(io_lock, flags);
+
+ start_time = io_req->start_time;
+ fnic_release_ioreq_buf(fnic, io_req, sc);
+ mempool_free(io_req, fnic->io_req_pool);
+
+fnic_abort_cmd_end:
+ FNIC_TRACE(fnic_abort_cmd, sc->device->host->host_no,
+ sc->request->tag, sc,
+ jiffies_to_msecs(jiffies - start_time),
+ 0, ((u64)sc->cmnd[0] << 32 |
+ (u64)sc->cmnd[2] << 24 | (u64)sc->cmnd[3] << 16 |
+ (u64)sc->cmnd[4] << 8 | sc->cmnd[5]),
+ (((u64)CMD_FLAGS(sc) << 32) | CMD_STATE(sc)));
+
+ FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
+ "Returning from abort cmd type %x %s\n", task_req,
+ (ret == SUCCESS) ?
+ "SUCCESS" : "FAILED");
+ return ret;
+}
+
+static inline int fnic_queue_dr_io_req(struct fnic *fnic,
+ struct scsi_cmnd *sc,
+ struct fnic_io_req *io_req)
+{
+ struct vnic_wq_copy *wq = &fnic->wq_copy[0];
+ struct Scsi_Host *host = fnic->lport->host;
+ struct misc_stats *misc_stats = &fnic->fnic_stats.misc_stats;
+ struct scsi_lun fc_lun;
+ int ret = 0;
+ unsigned long intr_flags;
+
+ spin_lock_irqsave(host->host_lock, intr_flags);
+ if (unlikely(fnic_chk_state_flags_locked(fnic,
+ FNIC_FLAGS_IO_BLOCKED))) {
+ spin_unlock_irqrestore(host->host_lock, intr_flags);
+ return FAILED;
+ } else
+ atomic_inc(&fnic->in_flight);
+ spin_unlock_irqrestore(host->host_lock, intr_flags);
+
+ spin_lock_irqsave(&fnic->wq_copy_lock[0], intr_flags);
+
+ if (vnic_wq_copy_desc_avail(wq) <= fnic->wq_copy_desc_low[0])
+ free_wq_copy_descs(fnic, wq);
+
+ if (!vnic_wq_copy_desc_avail(wq)) {
+ FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
+ "queue_dr_io_req failure - no descriptors\n");
+ atomic64_inc(&misc_stats->devrst_cpwq_alloc_failures);
+ ret = -EAGAIN;
+ goto lr_io_req_end;
+ }
+
+ /* fill in the lun info */
+ int_to_scsilun(sc->device->lun, &fc_lun);
+
+ fnic_queue_wq_copy_desc_itmf(wq, sc->request->tag | FNIC_TAG_DEV_RST,
+ 0, FCPIO_ITMF_LUN_RESET, SCSI_NO_TAG,
+ fc_lun.scsi_lun, io_req->port_id,
+ fnic->config.ra_tov, fnic->config.ed_tov);
+
+ atomic64_inc(&fnic->fnic_stats.fw_stats.active_fw_reqs);
+ if (atomic64_read(&fnic->fnic_stats.fw_stats.active_fw_reqs) >
+ atomic64_read(&fnic->fnic_stats.fw_stats.max_fw_reqs))
+ atomic64_set(&fnic->fnic_stats.fw_stats.max_fw_reqs,
+ atomic64_read(&fnic->fnic_stats.fw_stats.active_fw_reqs));
+
+lr_io_req_end:
+ spin_unlock_irqrestore(&fnic->wq_copy_lock[0], intr_flags);
+ atomic_dec(&fnic->in_flight);
+
+ return ret;
+}
+
+/*
+ * Clean up any pending aborts on the lun
+ * For each outstanding IO on this lun, whose abort is not completed by fw,
+ * issue a local abort. Wait for abort to complete. Return 0 if all commands
+ * successfully aborted, 1 otherwise
+ */
+static int fnic_clean_pending_aborts(struct fnic *fnic,
+ struct scsi_cmnd *lr_sc)
+{
+ int tag, abt_tag;
+ struct fnic_io_req *io_req;
+ spinlock_t *io_lock;
+ unsigned long flags;
+ int ret = 0;
+ struct scsi_cmnd *sc;
+ struct scsi_lun fc_lun;
+ struct scsi_device *lun_dev = lr_sc->device;
+ DECLARE_COMPLETION_ONSTACK(tm_done);
+ enum fnic_ioreq_state old_ioreq_state;
+
+ for (tag = 0; tag < fnic->fnic_max_tag_id; tag++) {
+ io_lock = fnic_io_lock_tag(fnic, tag);
+ spin_lock_irqsave(io_lock, flags);
+ sc = scsi_host_find_tag(fnic->lport->host, tag);
+ /*
+ * ignore this lun reset cmd or cmds that do not belong to
+ * this lun
+ */
+ if (!sc || sc == lr_sc || sc->device != lun_dev) {
+ spin_unlock_irqrestore(io_lock, flags);
+ continue;
+ }
+
+ io_req = (struct fnic_io_req *)CMD_SP(sc);
+
+ if (!io_req || sc->device != lun_dev) {
+ spin_unlock_irqrestore(io_lock, flags);
+ continue;
+ }
+
+ /*
+ * Found IO that is still pending with firmware and
+ * belongs to the LUN that we are resetting
+ */
+ FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
+ "Found IO in %s on lun\n",
+ fnic_ioreq_state_to_str(CMD_STATE(sc)));
+
+ if (CMD_STATE(sc) == FNIC_IOREQ_ABTS_PENDING) {
+ spin_unlock_irqrestore(io_lock, flags);
+ continue;
+ }
+ if ((CMD_FLAGS(sc) & FNIC_DEVICE_RESET) &&
+ (!(CMD_FLAGS(sc) & FNIC_DEV_RST_ISSUED))) {
+ FNIC_SCSI_DBG(KERN_INFO, fnic->lport->host,
+ "%s dev rst not pending sc 0x%p\n", __func__,
+ sc);
+ spin_unlock_irqrestore(io_lock, flags);
+ continue;
+ }
+
+ if (io_req->abts_done)
+ shost_printk(KERN_ERR, fnic->lport->host,
+ "%s: io_req->abts_done is set state is %s\n",
+ __func__, fnic_ioreq_state_to_str(CMD_STATE(sc)));
+ old_ioreq_state = CMD_STATE(sc);
+ /*
+ * Any pending IO issued prior to reset is expected to be
+ * in abts pending state, if not we need to set
+ * FNIC_IOREQ_ABTS_PENDING to indicate the IO is abort pending.
+ * When IO is completed, the IO will be handed over and
+ * handled in this function.
+ */
+ CMD_STATE(sc) = FNIC_IOREQ_ABTS_PENDING;
+
+ BUG_ON(io_req->abts_done);
+
+ abt_tag = tag;
+ if (CMD_FLAGS(sc) & FNIC_DEVICE_RESET) {
+ abt_tag |= FNIC_TAG_DEV_RST;
+ FNIC_SCSI_DBG(KERN_INFO, fnic->lport->host,
+ "%s: dev rst sc 0x%p\n", __func__, sc);
+ }
+
+ CMD_ABTS_STATUS(sc) = FCPIO_INVALID_CODE;
+ io_req->abts_done = &tm_done;
+ spin_unlock_irqrestore(io_lock, flags);
+
+ /* Now queue the abort command to firmware */
+ int_to_scsilun(sc->device->lun, &fc_lun);
+
+ if (fnic_queue_abort_io_req(fnic, abt_tag,
+ FCPIO_ITMF_ABT_TASK_TERM,
+ fc_lun.scsi_lun, io_req)) {
+ spin_lock_irqsave(io_lock, flags);
+ io_req = (struct fnic_io_req *)CMD_SP(sc);
+ if (io_req)
+ io_req->abts_done = NULL;
+ if (CMD_STATE(sc) == FNIC_IOREQ_ABTS_PENDING)
+ CMD_STATE(sc) = old_ioreq_state;
+ spin_unlock_irqrestore(io_lock, flags);
+ ret = 1;
+ goto clean_pending_aborts_end;
+ } else {
+ spin_lock_irqsave(io_lock, flags);
+ if (CMD_FLAGS(sc) & FNIC_DEVICE_RESET)
+ CMD_FLAGS(sc) |= FNIC_DEV_RST_TERM_ISSUED;
+ spin_unlock_irqrestore(io_lock, flags);
+ }
+ CMD_FLAGS(sc) |= FNIC_IO_INTERNAL_TERM_ISSUED;
+
+ wait_for_completion_timeout(&tm_done,
+ msecs_to_jiffies
+ (fnic->config.ed_tov));
+
+ /* Recheck cmd state to check if it is now aborted */
+ spin_lock_irqsave(io_lock, flags);
+ io_req = (struct fnic_io_req *)CMD_SP(sc);
+ if (!io_req) {
+ spin_unlock_irqrestore(io_lock, flags);
+ CMD_FLAGS(sc) |= FNIC_IO_ABT_TERM_REQ_NULL;
+ continue;
+ }
+
+ io_req->abts_done = NULL;
+
+ /* if abort is still pending with fw, fail */
+ if (CMD_ABTS_STATUS(sc) == FCPIO_INVALID_CODE) {
+ spin_unlock_irqrestore(io_lock, flags);
+ CMD_FLAGS(sc) |= FNIC_IO_ABT_TERM_DONE;
+ ret = 1;
+ goto clean_pending_aborts_end;
+ }
+ CMD_STATE(sc) = FNIC_IOREQ_ABTS_COMPLETE;
+ CMD_SP(sc) = NULL;
+ spin_unlock_irqrestore(io_lock, flags);
+
+ fnic_release_ioreq_buf(fnic, io_req, sc);
+ mempool_free(io_req, fnic->io_req_pool);
+ }
+
+ schedule_timeout(msecs_to_jiffies(2 * fnic->config.ed_tov));
+
+ /* walk again to check, if IOs are still pending in fw */
+ if (fnic_is_abts_pending(fnic, lr_sc))
+ ret = FAILED;
+
+clean_pending_aborts_end:
+ return ret;
+}
+
+/**
+ * fnic_scsi_host_start_tag
+ * Allocates tagid from host's tag list
+ **/
+static inline int
+fnic_scsi_host_start_tag(struct fnic *fnic, struct scsi_cmnd *sc)
+{
+ struct blk_queue_tag *bqt = fnic->lport->host->bqt;
+ int tag, ret = SCSI_NO_TAG;
+
+ BUG_ON(!bqt);
+ if (!bqt) {
+ pr_err("Tags are not supported\n");
+ goto end;
+ }
+
+ do {
+ tag = find_next_zero_bit(bqt->tag_map, bqt->max_depth, 1);
+ if (tag >= bqt->max_depth) {
+ pr_err("Tag allocation failure\n");
+ goto end;
+ }
+ } while (test_and_set_bit(tag, bqt->tag_map));
+
+ bqt->tag_index[tag] = sc->request;
+ sc->request->tag = tag;
+ sc->tag = tag;
+ if (!sc->request->special)
+ sc->request->special = sc;
+
+ ret = tag;
+
+end:
+ return ret;
+}
+
+/**
+ * fnic_scsi_host_end_tag
+ * frees tag allocated by fnic_scsi_host_start_tag.
+ **/
+static inline void
+fnic_scsi_host_end_tag(struct fnic *fnic, struct scsi_cmnd *sc)
+{
+ struct blk_queue_tag *bqt = fnic->lport->host->bqt;
+ int tag = sc->request->tag;
+
+ if (tag == SCSI_NO_TAG)
+ return;
+
+ BUG_ON(!bqt || !bqt->tag_index[tag]);
+ if (!bqt)
+ return;
+
+ bqt->tag_index[tag] = NULL;
+ clear_bit(tag, bqt->tag_map);
+
+ return;
+}
+
+/*
+ * SCSI Eh thread issues a Lun Reset when one or more commands on a LUN
+ * fail to get aborted. It calls driver's eh_device_reset with a SCSI command
+ * on the LUN.
+ */
+int fnic_device_reset(struct scsi_cmnd *sc)
+{
+ struct fc_lport *lp;
+ struct fnic *fnic;
+ struct fnic_io_req *io_req = NULL;
+ struct fc_rport *rport;
+ int status;
+ int ret = FAILED;
+ spinlock_t *io_lock;
+ unsigned long flags;
+ unsigned long start_time = 0;
+ struct scsi_lun fc_lun;
+ struct fnic_stats *fnic_stats;
+ struct reset_stats *reset_stats;
+ int tag = 0;
+ DECLARE_COMPLETION_ONSTACK(tm_done);
+ int tag_gen_flag = 0; /*to track tags allocated by fnic driver*/
+
+ /* Wait for rport to unblock */
+ fc_block_scsi_eh(sc);
+
+ /* Get local-port, check ready and link up */
+ lp = shost_priv(sc->device->host);
+
+ fnic = lport_priv(lp);
+ fnic_stats = &fnic->fnic_stats;
+ reset_stats = &fnic->fnic_stats.reset_stats;
+
+ atomic64_inc(&reset_stats->device_resets);
+
+ rport = starget_to_rport(scsi_target(sc->device));
+ FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
+ "Device reset called FCID 0x%x, LUN 0x%llx sc 0x%p\n",
+ rport->port_id, sc->device->lun, sc);
+
+ if (lp->state != LPORT_ST_READY || !(lp->link_up))
+ goto fnic_device_reset_end;
+
+ /* Check if remote port up */
+ if (fc_remote_port_chkready(rport)) {
+ atomic64_inc(&fnic_stats->misc_stats.rport_not_ready);
+ goto fnic_device_reset_end;
+ }
+
+ CMD_FLAGS(sc) = FNIC_DEVICE_RESET;
+ /* Allocate tag if not present */
+
+ tag = sc->request->tag;
+ if (unlikely(tag < 0)) {
+ /*
+ * XXX(hch): current the midlayer fakes up a struct
+ * request for the explicit reset ioctls, and those
+ * don't have a tag allocated to them. The below
+ * code pokes into midlayer structures to paper over
+ * this design issue, but that won't work for blk-mq.
+ *
+ * Either someone who can actually test the hardware
+ * will have to come up with a similar hack for the
+ * blk-mq case, or we'll have to bite the bullet and
+ * fix the way the EH ioctls work for real, but until
+ * that happens we fail these explicit requests here.
+ */
+ if (shost_use_blk_mq(sc->device->host))
+ goto fnic_device_reset_end;
+
+ tag = fnic_scsi_host_start_tag(fnic, sc);
+ if (unlikely(tag == SCSI_NO_TAG))
+ goto fnic_device_reset_end;
+ tag_gen_flag = 1;
+ }
+ io_lock = fnic_io_lock_hash(fnic, sc);
+ spin_lock_irqsave(io_lock, flags);
+ io_req = (struct fnic_io_req *)CMD_SP(sc);
+
+ /*
+ * If there is a io_req attached to this command, then use it,
+ * else allocate a new one.
+ */
+ if (!io_req) {
+ io_req = mempool_alloc(fnic->io_req_pool, GFP_ATOMIC);
+ if (!io_req) {
+ spin_unlock_irqrestore(io_lock, flags);
+ goto fnic_device_reset_end;
+ }
+ memset(io_req, 0, sizeof(*io_req));
+ io_req->port_id = rport->port_id;
+ CMD_SP(sc) = (char *)io_req;
+ }
+ io_req->dr_done = &tm_done;
+ CMD_STATE(sc) = FNIC_IOREQ_CMD_PENDING;
+ CMD_LR_STATUS(sc) = FCPIO_INVALID_CODE;
+ spin_unlock_irqrestore(io_lock, flags);
+
+ FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host, "TAG %x\n", tag);
+
+ /*
+ * issue the device reset, if enqueue failed, clean up the ioreq
+ * and break assoc with scsi cmd
+ */
+ if (fnic_queue_dr_io_req(fnic, sc, io_req)) {
+ spin_lock_irqsave(io_lock, flags);
+ io_req = (struct fnic_io_req *)CMD_SP(sc);
+ if (io_req)
+ io_req->dr_done = NULL;
+ goto fnic_device_reset_clean;
+ }
+ spin_lock_irqsave(io_lock, flags);
+ CMD_FLAGS(sc) |= FNIC_DEV_RST_ISSUED;
+ spin_unlock_irqrestore(io_lock, flags);
+
+ /*
+ * Wait on the local completion for LUN reset. The io_req may be
+ * freed while we wait since we hold no lock.
+ */
+ wait_for_completion_timeout(&tm_done,
+ msecs_to_jiffies(FNIC_LUN_RESET_TIMEOUT));
+
+ spin_lock_irqsave(io_lock, flags);
+ io_req = (struct fnic_io_req *)CMD_SP(sc);
+ if (!io_req) {
+ spin_unlock_irqrestore(io_lock, flags);
+ FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
+ "io_req is null tag 0x%x sc 0x%p\n", tag, sc);
+ goto fnic_device_reset_end;
+ }
+ io_req->dr_done = NULL;
+
+ status = CMD_LR_STATUS(sc);
+
+ /*
+ * If lun reset not completed, bail out with failed. io_req
+ * gets cleaned up during higher levels of EH
+ */
+ if (status == FCPIO_INVALID_CODE) {
+ atomic64_inc(&reset_stats->device_reset_timeouts);
+ FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
+ "Device reset timed out\n");
+ CMD_FLAGS(sc) |= FNIC_DEV_RST_TIMED_OUT;
+ spin_unlock_irqrestore(io_lock, flags);
+ int_to_scsilun(sc->device->lun, &fc_lun);
+ /*
+ * Issue abort and terminate on device reset request.
+ * If q'ing of terminate fails, retry it after a delay.
+ */
+ while (1) {
+ spin_lock_irqsave(io_lock, flags);
+ if (CMD_FLAGS(sc) & FNIC_DEV_RST_TERM_ISSUED) {
+ spin_unlock_irqrestore(io_lock, flags);
+ break;
+ }
+ spin_unlock_irqrestore(io_lock, flags);
+ if (fnic_queue_abort_io_req(fnic,
+ tag | FNIC_TAG_DEV_RST,
+ FCPIO_ITMF_ABT_TASK_TERM,
+ fc_lun.scsi_lun, io_req)) {
+ wait_for_completion_timeout(&tm_done,
+ msecs_to_jiffies(FNIC_ABT_TERM_DELAY_TIMEOUT));
+ } else {
+ spin_lock_irqsave(io_lock, flags);
+ CMD_FLAGS(sc) |= FNIC_DEV_RST_TERM_ISSUED;
+ CMD_STATE(sc) = FNIC_IOREQ_ABTS_PENDING;
+ io_req->abts_done = &tm_done;
+ spin_unlock_irqrestore(io_lock, flags);
+ FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
+ "Abort and terminate issued on Device reset "
+ "tag 0x%x sc 0x%p\n", tag, sc);
+ break;
+ }
+ }
+ while (1) {
+ spin_lock_irqsave(io_lock, flags);
+ if (!(CMD_FLAGS(sc) & FNIC_DEV_RST_DONE)) {
+ spin_unlock_irqrestore(io_lock, flags);
+ wait_for_completion_timeout(&tm_done,
+ msecs_to_jiffies(FNIC_LUN_RESET_TIMEOUT));
+ break;
+ } else {
+ io_req = (struct fnic_io_req *)CMD_SP(sc);
+ io_req->abts_done = NULL;
+ goto fnic_device_reset_clean;
+ }
+ }
+ } else {
+ spin_unlock_irqrestore(io_lock, flags);
+ }
+
+ /* Completed, but not successful, clean up the io_req, return fail */
+ if (status != FCPIO_SUCCESS) {
+ spin_lock_irqsave(io_lock, flags);
+ FNIC_SCSI_DBG(KERN_DEBUG,
+ fnic->lport->host,
+ "Device reset completed - failed\n");
+ io_req = (struct fnic_io_req *)CMD_SP(sc);
+ goto fnic_device_reset_clean;
+ }
+
+ /*
+ * Clean up any aborts on this lun that have still not
+ * completed. If any of these fail, then LUN reset fails.
+ * clean_pending_aborts cleans all cmds on this lun except
+ * the lun reset cmd. If all cmds get cleaned, the lun reset
+ * succeeds
+ */
+ if (fnic_clean_pending_aborts(fnic, sc)) {
+ spin_lock_irqsave(io_lock, flags);
+ io_req = (struct fnic_io_req *)CMD_SP(sc);
+ FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
+ "Device reset failed"
+ " since could not abort all IOs\n");
+ goto fnic_device_reset_clean;
+ }
+
+ /* Clean lun reset command */
+ spin_lock_irqsave(io_lock, flags);
+ io_req = (struct fnic_io_req *)CMD_SP(sc);
+ if (io_req)
+ /* Completed, and successful */
+ ret = SUCCESS;
+
+fnic_device_reset_clean:
+ if (io_req)
+ CMD_SP(sc) = NULL;
+
+ spin_unlock_irqrestore(io_lock, flags);
+
+ if (io_req) {
+ start_time = io_req->start_time;
+ fnic_release_ioreq_buf(fnic, io_req, sc);
+ mempool_free(io_req, fnic->io_req_pool);
+ }
+
+fnic_device_reset_end:
+ FNIC_TRACE(fnic_device_reset, sc->device->host->host_no,
+ sc->request->tag, sc,
+ jiffies_to_msecs(jiffies - start_time),
+ 0, ((u64)sc->cmnd[0] << 32 |
+ (u64)sc->cmnd[2] << 24 | (u64)sc->cmnd[3] << 16 |
+ (u64)sc->cmnd[4] << 8 | sc->cmnd[5]),
+ (((u64)CMD_FLAGS(sc) << 32) | CMD_STATE(sc)));
+
+ /* free tag if it is allocated */
+ if (unlikely(tag_gen_flag))
+ fnic_scsi_host_end_tag(fnic, sc);
+
+ FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
+ "Returning from device reset %s\n",
+ (ret == SUCCESS) ?
+ "SUCCESS" : "FAILED");
+
+ if (ret == FAILED)
+ atomic64_inc(&reset_stats->device_reset_failures);
+
+ return ret;
+}
+
+/* Clean up all IOs, clean up libFC local port */
+int fnic_reset(struct Scsi_Host *shost)
+{
+ struct fc_lport *lp;
+ struct fnic *fnic;
+ int ret = 0;
+ struct reset_stats *reset_stats;
+
+ lp = shost_priv(shost);
+ fnic = lport_priv(lp);
+ reset_stats = &fnic->fnic_stats.reset_stats;
+
+ FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
+ "fnic_reset called\n");
+
+ atomic64_inc(&reset_stats->fnic_resets);
+
+ /*
+ * Reset local port, this will clean up libFC exchanges,
+ * reset remote port sessions, and if link is up, begin flogi
+ */
+ ret = lp->tt.lport_reset(lp);
+
+ FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
+ "Returning from fnic reset %s\n",
+ (ret == 0) ?
+ "SUCCESS" : "FAILED");
+
+ if (ret == 0)
+ atomic64_inc(&reset_stats->fnic_reset_completions);
+ else
+ atomic64_inc(&reset_stats->fnic_reset_failures);
+
+ return ret;
+}
+
+/*
+ * SCSI Error handling calls driver's eh_host_reset if all prior
+ * error handling levels return FAILED. If host reset completes
+ * successfully, and if link is up, then Fabric login begins.
+ *
+ * Host Reset is the highest level of error recovery. If this fails, then
+ * host is offlined by SCSI.
+ *
+ */
+int fnic_host_reset(struct scsi_cmnd *sc)
+{
+ int ret;
+ unsigned long wait_host_tmo;
+ struct Scsi_Host *shost = sc->device->host;
+ struct fc_lport *lp = shost_priv(shost);
+
+ /*
+ * If fnic_reset is successful, wait for fabric login to complete
+ * scsi-ml tries to send a TUR to every device if host reset is
+ * successful, so before returning to scsi, fabric should be up
+ */
+ ret = (fnic_reset(shost) == 0) ? SUCCESS : FAILED;
+ if (ret == SUCCESS) {
+ wait_host_tmo = jiffies + FNIC_HOST_RESET_SETTLE_TIME * HZ;
+ ret = FAILED;
+ while (time_before(jiffies, wait_host_tmo)) {
+ if ((lp->state == LPORT_ST_READY) &&
+ (lp->link_up)) {
+ ret = SUCCESS;
+ break;
+ }
+ ssleep(1);
+ }
+ }
+
+ return ret;
+}
+
+/*
+ * This fxn is called from libFC when host is removed
+ */
+void fnic_scsi_abort_io(struct fc_lport *lp)
+{
+ int err = 0;
+ unsigned long flags;
+ enum fnic_state old_state;
+ struct fnic *fnic = lport_priv(lp);
+ DECLARE_COMPLETION_ONSTACK(remove_wait);
+
+ /* Issue firmware reset for fnic, wait for reset to complete */
+retry_fw_reset:
+ spin_lock_irqsave(&fnic->fnic_lock, flags);
+ if (unlikely(fnic->state == FNIC_IN_FC_TRANS_ETH_MODE)) {
+ /* fw reset is in progress, poll for its completion */
+ spin_unlock_irqrestore(&fnic->fnic_lock, flags);
+ schedule_timeout(msecs_to_jiffies(100));
+ goto retry_fw_reset;
+ }
+
+ fnic->remove_wait = &remove_wait;
+ old_state = fnic->state;
+ fnic->state = FNIC_IN_FC_TRANS_ETH_MODE;
+ fnic_update_mac_locked(fnic, fnic->ctlr.ctl_src_addr);
+ spin_unlock_irqrestore(&fnic->fnic_lock, flags);
+
+ err = fnic_fw_reset_handler(fnic);
+ if (err) {
+ spin_lock_irqsave(&fnic->fnic_lock, flags);
+ if (fnic->state == FNIC_IN_FC_TRANS_ETH_MODE)
+ fnic->state = old_state;
+ fnic->remove_wait = NULL;
+ spin_unlock_irqrestore(&fnic->fnic_lock, flags);
+ return;
+ }
+
+ /* Wait for firmware reset to complete */
+ wait_for_completion_timeout(&remove_wait,
+ msecs_to_jiffies(FNIC_RMDEVICE_TIMEOUT));
+
+ spin_lock_irqsave(&fnic->fnic_lock, flags);
+ fnic->remove_wait = NULL;
+ FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
+ "fnic_scsi_abort_io %s\n",
+ (fnic->state == FNIC_IN_ETH_MODE) ?
+ "SUCCESS" : "FAILED");
+ spin_unlock_irqrestore(&fnic->fnic_lock, flags);
+
+}
+
+/*
+ * This fxn called from libFC to clean up driver IO state on link down
+ */
+void fnic_scsi_cleanup(struct fc_lport *lp)
+{
+ unsigned long flags;
+ enum fnic_state old_state;
+ struct fnic *fnic = lport_priv(lp);
+
+ /* issue fw reset */
+retry_fw_reset:
+ spin_lock_irqsave(&fnic->fnic_lock, flags);
+ if (unlikely(fnic->state == FNIC_IN_FC_TRANS_ETH_MODE)) {
+ /* fw reset is in progress, poll for its completion */
+ spin_unlock_irqrestore(&fnic->fnic_lock, flags);
+ schedule_timeout(msecs_to_jiffies(100));
+ goto retry_fw_reset;
+ }
+ old_state = fnic->state;
+ fnic->state = FNIC_IN_FC_TRANS_ETH_MODE;
+ fnic_update_mac_locked(fnic, fnic->ctlr.ctl_src_addr);
+ spin_unlock_irqrestore(&fnic->fnic_lock, flags);
+
+ if (fnic_fw_reset_handler(fnic)) {
+ spin_lock_irqsave(&fnic->fnic_lock, flags);
+ if (fnic->state == FNIC_IN_FC_TRANS_ETH_MODE)
+ fnic->state = old_state;
+ spin_unlock_irqrestore(&fnic->fnic_lock, flags);
+ }
+
+}
+
+void fnic_empty_scsi_cleanup(struct fc_lport *lp)
+{
+}
+
+void fnic_exch_mgr_reset(struct fc_lport *lp, u32 sid, u32 did)
+{
+ struct fnic *fnic = lport_priv(lp);
+
+ /* Non-zero sid, nothing to do */
+ if (sid)
+ goto call_fc_exch_mgr_reset;
+
+ if (did) {
+ fnic_rport_exch_reset(fnic, did);
+ goto call_fc_exch_mgr_reset;
+ }
+
+ /*
+ * sid = 0, did = 0
+ * link down or device being removed
+ */
+ if (!fnic->in_remove)
+ fnic_scsi_cleanup(lp);
+ else
+ fnic_scsi_abort_io(lp);
+
+ /* call libFC exch mgr reset to reset its exchanges */
+call_fc_exch_mgr_reset:
+ fc_exch_mgr_reset(lp, sid, did);
+
+}
+
+/*
+ * fnic_is_abts_pending() is a helper function that
+ * walks through tag map to check if there is any IOs pending,if there is one,
+ * then it returns 1 (true), otherwise 0 (false)
+ * if @lr_sc is non NULL, then it checks IOs specific to particular LUN,
+ * otherwise, it checks for all IOs.
+ */
+int fnic_is_abts_pending(struct fnic *fnic, struct scsi_cmnd *lr_sc)
+{
+ int tag;
+ struct fnic_io_req *io_req;
+ spinlock_t *io_lock;
+ unsigned long flags;
+ int ret = 0;
+ struct scsi_cmnd *sc;
+ struct scsi_device *lun_dev = NULL;
+
+ if (lr_sc)
+ lun_dev = lr_sc->device;
+
+ /* walk again to check, if IOs are still pending in fw */
+ for (tag = 0; tag < fnic->fnic_max_tag_id; tag++) {
+ sc = scsi_host_find_tag(fnic->lport->host, tag);
+ /*
+ * ignore this lun reset cmd or cmds that do not belong to
+ * this lun
+ */
+ if (!sc || (lr_sc && (sc->device != lun_dev || sc == lr_sc)))
+ continue;
+
+ io_lock = fnic_io_lock_hash(fnic, sc);
+ spin_lock_irqsave(io_lock, flags);
+
+ io_req = (struct fnic_io_req *)CMD_SP(sc);
+
+ if (!io_req || sc->device != lun_dev) {
+ spin_unlock_irqrestore(io_lock, flags);
+ continue;
+ }
+
+ /*
+ * Found IO that is still pending with firmware and
+ * belongs to the LUN that we are resetting
+ */
+ FNIC_SCSI_DBG(KERN_INFO, fnic->lport->host,
+ "Found IO in %s on lun\n",
+ fnic_ioreq_state_to_str(CMD_STATE(sc)));
+
+ if (CMD_STATE(sc) == FNIC_IOREQ_ABTS_PENDING)
+ ret = 1;
+ spin_unlock_irqrestore(io_lock, flags);
+ }
+
+ return ret;
+}
diff --git a/drivers/scsi/fnic/fnic_stats.h b/drivers/scsi/fnic/fnic_stats.h
new file mode 100644
index 000000000..540cceb84
--- /dev/null
+++ b/drivers/scsi/fnic/fnic_stats.h
@@ -0,0 +1,116 @@
+/*
+ * Copyright 2013 Cisco Systems, Inc. All rights reserved.
+ *
+ * This program is free software; you may redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#ifndef _FNIC_STATS_H_
+#define _FNIC_STATS_H_
+struct io_path_stats {
+ atomic64_t active_ios;
+ atomic64_t max_active_ios;
+ atomic64_t io_completions;
+ atomic64_t io_failures;
+ atomic64_t ioreq_null;
+ atomic64_t alloc_failures;
+ atomic64_t sc_null;
+ atomic64_t io_not_found;
+ atomic64_t num_ios;
+};
+
+struct abort_stats {
+ atomic64_t aborts;
+ atomic64_t abort_failures;
+ atomic64_t abort_drv_timeouts;
+ atomic64_t abort_fw_timeouts;
+ atomic64_t abort_io_not_found;
+};
+
+struct terminate_stats {
+ atomic64_t terminates;
+ atomic64_t max_terminates;
+ atomic64_t terminate_drv_timeouts;
+ atomic64_t terminate_fw_timeouts;
+ atomic64_t terminate_io_not_found;
+ atomic64_t terminate_failures;
+};
+
+struct reset_stats {
+ atomic64_t device_resets;
+ atomic64_t device_reset_failures;
+ atomic64_t device_reset_aborts;
+ atomic64_t device_reset_timeouts;
+ atomic64_t device_reset_terminates;
+ atomic64_t fw_resets;
+ atomic64_t fw_reset_completions;
+ atomic64_t fw_reset_failures;
+ atomic64_t fnic_resets;
+ atomic64_t fnic_reset_completions;
+ atomic64_t fnic_reset_failures;
+};
+
+struct fw_stats {
+ atomic64_t active_fw_reqs;
+ atomic64_t max_fw_reqs;
+ atomic64_t fw_out_of_resources;
+ atomic64_t io_fw_errs;
+};
+
+struct vlan_stats {
+ atomic64_t vlan_disc_reqs;
+ atomic64_t resp_withno_vlanID;
+ atomic64_t sol_expiry_count;
+ atomic64_t flogi_rejects;
+};
+
+struct misc_stats {
+ u64 last_isr_time;
+ u64 last_ack_time;
+ atomic64_t isr_count;
+ atomic64_t max_cq_entries;
+ atomic64_t ack_index_out_of_range;
+ atomic64_t data_count_mismatch;
+ atomic64_t fcpio_timeout;
+ atomic64_t fcpio_aborted;
+ atomic64_t sgl_invalid;
+ atomic64_t mss_invalid;
+ atomic64_t abts_cpwq_alloc_failures;
+ atomic64_t devrst_cpwq_alloc_failures;
+ atomic64_t io_cpwq_alloc_failures;
+ atomic64_t no_icmnd_itmf_cmpls;
+ atomic64_t queue_fulls;
+ atomic64_t rport_not_ready;
+ atomic64_t frame_errors;
+};
+
+struct fnic_stats {
+ struct io_path_stats io_stats;
+ struct abort_stats abts_stats;
+ struct terminate_stats term_stats;
+ struct reset_stats reset_stats;
+ struct fw_stats fw_stats;
+ struct vlan_stats vlan_stats;
+ struct misc_stats misc_stats;
+};
+
+struct stats_debug_info {
+ char *debug_buffer;
+ void *i_private;
+ int buf_size;
+ int buffer_len;
+};
+
+int fnic_get_stats_data(struct stats_debug_info *, struct fnic_stats *);
+int fnic_stats_debugfs_init(struct fnic *);
+void fnic_stats_debugfs_remove(struct fnic *);
+#endif /* _FNIC_STATS_H_ */
diff --git a/drivers/scsi/fnic/fnic_trace.c b/drivers/scsi/fnic/fnic_trace.c
new file mode 100644
index 000000000..65a9bde26
--- /dev/null
+++ b/drivers/scsi/fnic/fnic_trace.c
@@ -0,0 +1,779 @@
+/*
+ * Copyright 2012 Cisco Systems, Inc. All rights reserved.
+ *
+ * This program is free software; you may redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include <linux/module.h>
+#include <linux/mempool.h>
+#include <linux/errno.h>
+#include <linux/spinlock.h>
+#include <linux/kallsyms.h>
+#include <linux/time.h>
+#include "fnic_io.h"
+#include "fnic.h"
+
+unsigned int trace_max_pages;
+static int fnic_max_trace_entries;
+
+static unsigned long fnic_trace_buf_p;
+static DEFINE_SPINLOCK(fnic_trace_lock);
+
+static fnic_trace_dbg_t fnic_trace_entries;
+int fnic_tracing_enabled = 1;
+
+/* static char *fnic_fc_ctlr_trace_buf_p; */
+
+static int fc_trace_max_entries;
+static unsigned long fnic_fc_ctlr_trace_buf_p;
+static fnic_trace_dbg_t fc_trace_entries;
+int fnic_fc_tracing_enabled = 1;
+int fnic_fc_trace_cleared = 1;
+static DEFINE_SPINLOCK(fnic_fc_trace_lock);
+
+
+/*
+ * fnic_trace_get_buf - Give buffer pointer to user to fill up trace information
+ *
+ * Description:
+ * This routine gets next available trace buffer entry location @wr_idx
+ * from allocated trace buffer pages and give that memory location
+ * to user to store the trace information.
+ *
+ * Return Value:
+ * This routine returns pointer to next available trace entry
+ * @fnic_buf_head for user to fill trace information.
+ */
+fnic_trace_data_t *fnic_trace_get_buf(void)
+{
+ unsigned long fnic_buf_head;
+ unsigned long flags;
+
+ spin_lock_irqsave(&fnic_trace_lock, flags);
+
+ /*
+ * Get next available memory location for writing trace information
+ * at @wr_idx and increment @wr_idx
+ */
+ fnic_buf_head =
+ fnic_trace_entries.page_offset[fnic_trace_entries.wr_idx];
+ fnic_trace_entries.wr_idx++;
+
+ /*
+ * Verify if trace buffer is full then change wd_idx to
+ * start from zero
+ */
+ if (fnic_trace_entries.wr_idx >= fnic_max_trace_entries)
+ fnic_trace_entries.wr_idx = 0;
+
+ /*
+ * Verify if write index @wr_idx and read index @rd_idx are same then
+ * increment @rd_idx to move to next entry in trace buffer
+ */
+ if (fnic_trace_entries.wr_idx == fnic_trace_entries.rd_idx) {
+ fnic_trace_entries.rd_idx++;
+ if (fnic_trace_entries.rd_idx >= fnic_max_trace_entries)
+ fnic_trace_entries.rd_idx = 0;
+ }
+ spin_unlock_irqrestore(&fnic_trace_lock, flags);
+ return (fnic_trace_data_t *)fnic_buf_head;
+}
+
+/*
+ * fnic_get_trace_data - Copy trace buffer to a memory file
+ * @fnic_dbgfs_t: pointer to debugfs trace buffer
+ *
+ * Description:
+ * This routine gathers the fnic trace debugfs data from the fnic_trace_data_t
+ * buffer and dumps it to fnic_dbgfs_t. It will start at the rd_idx entry in
+ * the log and process the log until the end of the buffer. Then it will gather
+ * from the beginning of the log and process until the current entry @wr_idx.
+ *
+ * Return Value:
+ * This routine returns the amount of bytes that were dumped into fnic_dbgfs_t
+ */
+int fnic_get_trace_data(fnic_dbgfs_t *fnic_dbgfs_prt)
+{
+ int rd_idx;
+ int wr_idx;
+ int len = 0;
+ unsigned long flags;
+ char str[KSYM_SYMBOL_LEN];
+ struct timespec val;
+ fnic_trace_data_t *tbp;
+
+ spin_lock_irqsave(&fnic_trace_lock, flags);
+ rd_idx = fnic_trace_entries.rd_idx;
+ wr_idx = fnic_trace_entries.wr_idx;
+ if (wr_idx < rd_idx) {
+ while (1) {
+ /* Start from read index @rd_idx */
+ tbp = (fnic_trace_data_t *)
+ fnic_trace_entries.page_offset[rd_idx];
+ if (!tbp) {
+ spin_unlock_irqrestore(&fnic_trace_lock, flags);
+ return 0;
+ }
+ /* Convert function pointer to function name */
+ if (sizeof(unsigned long) < 8) {
+ sprint_symbol(str, tbp->fnaddr.low);
+ jiffies_to_timespec(tbp->timestamp.low, &val);
+ } else {
+ sprint_symbol(str, tbp->fnaddr.val);
+ jiffies_to_timespec(tbp->timestamp.val, &val);
+ }
+ /*
+ * Dump trace buffer entry to memory file
+ * and increment read index @rd_idx
+ */
+ len += snprintf(fnic_dbgfs_prt->buffer + len,
+ (trace_max_pages * PAGE_SIZE * 3) - len,
+ "%16lu.%16lu %-50s %8x %8x %16llx %16llx "
+ "%16llx %16llx %16llx\n", val.tv_sec,
+ val.tv_nsec, str, tbp->host_no, tbp->tag,
+ tbp->data[0], tbp->data[1], tbp->data[2],
+ tbp->data[3], tbp->data[4]);
+ rd_idx++;
+ /*
+ * If rd_idx is reached to maximum trace entries
+ * then move rd_idx to zero
+ */
+ if (rd_idx > (fnic_max_trace_entries-1))
+ rd_idx = 0;
+ /*
+ * Continure dumpping trace buffer entries into
+ * memory file till rd_idx reaches write index
+ */
+ if (rd_idx == wr_idx)
+ break;
+ }
+ } else if (wr_idx > rd_idx) {
+ while (1) {
+ /* Start from read index @rd_idx */
+ tbp = (fnic_trace_data_t *)
+ fnic_trace_entries.page_offset[rd_idx];
+ if (!tbp) {
+ spin_unlock_irqrestore(&fnic_trace_lock, flags);
+ return 0;
+ }
+ /* Convert function pointer to function name */
+ if (sizeof(unsigned long) < 8) {
+ sprint_symbol(str, tbp->fnaddr.low);
+ jiffies_to_timespec(tbp->timestamp.low, &val);
+ } else {
+ sprint_symbol(str, tbp->fnaddr.val);
+ jiffies_to_timespec(tbp->timestamp.val, &val);
+ }
+ /*
+ * Dump trace buffer entry to memory file
+ * and increment read index @rd_idx
+ */
+ len += snprintf(fnic_dbgfs_prt->buffer + len,
+ (trace_max_pages * PAGE_SIZE * 3) - len,
+ "%16lu.%16lu %-50s %8x %8x %16llx %16llx "
+ "%16llx %16llx %16llx\n", val.tv_sec,
+ val.tv_nsec, str, tbp->host_no, tbp->tag,
+ tbp->data[0], tbp->data[1], tbp->data[2],
+ tbp->data[3], tbp->data[4]);
+ rd_idx++;
+ /*
+ * Continue dumpping trace buffer entries into
+ * memory file till rd_idx reaches write index
+ */
+ if (rd_idx == wr_idx)
+ break;
+ }
+ }
+ spin_unlock_irqrestore(&fnic_trace_lock, flags);
+ return len;
+}
+
+/*
+ * fnic_get_stats_data - Copy fnic stats buffer to a memory file
+ * @fnic_dbgfs_t: pointer to debugfs fnic stats buffer
+ *
+ * Description:
+ * This routine gathers the fnic stats debugfs data from the fnic_stats struct
+ * and dumps it to stats_debug_info.
+ *
+ * Return Value:
+ * This routine returns the amount of bytes that were dumped into
+ * stats_debug_info
+ */
+int fnic_get_stats_data(struct stats_debug_info *debug,
+ struct fnic_stats *stats)
+{
+ int len = 0;
+ int buf_size = debug->buf_size;
+ struct timespec val1, val2;
+
+ len = snprintf(debug->debug_buffer + len, buf_size - len,
+ "------------------------------------------\n"
+ "\t\tIO Statistics\n"
+ "------------------------------------------\n");
+ len += snprintf(debug->debug_buffer + len, buf_size - len,
+ "Number of Active IOs: %lld\nMaximum Active IOs: %lld\n"
+ "Number of IOs: %lld\nNumber of IO Completions: %lld\n"
+ "Number of IO Failures: %lld\nNumber of IO NOT Found: %lld\n"
+ "Number of Memory alloc Failures: %lld\n"
+ "Number of IOREQ Null: %lld\n"
+ "Number of SCSI cmd pointer Null: %lld\n",
+ (u64)atomic64_read(&stats->io_stats.active_ios),
+ (u64)atomic64_read(&stats->io_stats.max_active_ios),
+ (u64)atomic64_read(&stats->io_stats.num_ios),
+ (u64)atomic64_read(&stats->io_stats.io_completions),
+ (u64)atomic64_read(&stats->io_stats.io_failures),
+ (u64)atomic64_read(&stats->io_stats.io_not_found),
+ (u64)atomic64_read(&stats->io_stats.alloc_failures),
+ (u64)atomic64_read(&stats->io_stats.ioreq_null),
+ (u64)atomic64_read(&stats->io_stats.sc_null));
+
+ len += snprintf(debug->debug_buffer + len, buf_size - len,
+ "\n------------------------------------------\n"
+ "\t\tAbort Statistics\n"
+ "------------------------------------------\n");
+ len += snprintf(debug->debug_buffer + len, buf_size - len,
+ "Number of Aborts: %lld\n"
+ "Number of Abort Failures: %lld\n"
+ "Number of Abort Driver Timeouts: %lld\n"
+ "Number of Abort FW Timeouts: %lld\n"
+ "Number of Abort IO NOT Found: %lld\n",
+ (u64)atomic64_read(&stats->abts_stats.aborts),
+ (u64)atomic64_read(&stats->abts_stats.abort_failures),
+ (u64)atomic64_read(&stats->abts_stats.abort_drv_timeouts),
+ (u64)atomic64_read(&stats->abts_stats.abort_fw_timeouts),
+ (u64)atomic64_read(&stats->abts_stats.abort_io_not_found));
+
+ len += snprintf(debug->debug_buffer + len, buf_size - len,
+ "\n------------------------------------------\n"
+ "\t\tTerminate Statistics\n"
+ "------------------------------------------\n");
+ len += snprintf(debug->debug_buffer + len, buf_size - len,
+ "Number of Terminates: %lld\n"
+ "Maximum Terminates: %lld\n"
+ "Number of Terminate Driver Timeouts: %lld\n"
+ "Number of Terminate FW Timeouts: %lld\n"
+ "Number of Terminate IO NOT Found: %lld\n"
+ "Number of Terminate Failures: %lld\n",
+ (u64)atomic64_read(&stats->term_stats.terminates),
+ (u64)atomic64_read(&stats->term_stats.max_terminates),
+ (u64)atomic64_read(&stats->term_stats.terminate_drv_timeouts),
+ (u64)atomic64_read(&stats->term_stats.terminate_fw_timeouts),
+ (u64)atomic64_read(&stats->term_stats.terminate_io_not_found),
+ (u64)atomic64_read(&stats->term_stats.terminate_failures));
+
+ len += snprintf(debug->debug_buffer + len, buf_size - len,
+ "\n------------------------------------------\n"
+ "\t\tReset Statistics\n"
+ "------------------------------------------\n");
+
+ len += snprintf(debug->debug_buffer + len, buf_size - len,
+ "Number of Device Resets: %lld\n"
+ "Number of Device Reset Failures: %lld\n"
+ "Number of Device Reset Aborts: %lld\n"
+ "Number of Device Reset Timeouts: %lld\n"
+ "Number of Device Reset Terminates: %lld\n"
+ "Number of FW Resets: %lld\n"
+ "Number of FW Reset Completions: %lld\n"
+ "Number of FW Reset Failures: %lld\n"
+ "Number of Fnic Reset: %lld\n"
+ "Number of Fnic Reset Completions: %lld\n"
+ "Number of Fnic Reset Failures: %lld\n",
+ (u64)atomic64_read(&stats->reset_stats.device_resets),
+ (u64)atomic64_read(&stats->reset_stats.device_reset_failures),
+ (u64)atomic64_read(&stats->reset_stats.device_reset_aborts),
+ (u64)atomic64_read(&stats->reset_stats.device_reset_timeouts),
+ (u64)atomic64_read(
+ &stats->reset_stats.device_reset_terminates),
+ (u64)atomic64_read(&stats->reset_stats.fw_resets),
+ (u64)atomic64_read(&stats->reset_stats.fw_reset_completions),
+ (u64)atomic64_read(&stats->reset_stats.fw_reset_failures),
+ (u64)atomic64_read(&stats->reset_stats.fnic_resets),
+ (u64)atomic64_read(
+ &stats->reset_stats.fnic_reset_completions),
+ (u64)atomic64_read(&stats->reset_stats.fnic_reset_failures));
+
+ len += snprintf(debug->debug_buffer + len, buf_size - len,
+ "\n------------------------------------------\n"
+ "\t\tFirmware Statistics\n"
+ "------------------------------------------\n");
+
+ len += snprintf(debug->debug_buffer + len, buf_size - len,
+ "Number of Active FW Requests %lld\n"
+ "Maximum FW Requests: %lld\n"
+ "Number of FW out of resources: %lld\n"
+ "Number of FW IO errors: %lld\n",
+ (u64)atomic64_read(&stats->fw_stats.active_fw_reqs),
+ (u64)atomic64_read(&stats->fw_stats.max_fw_reqs),
+ (u64)atomic64_read(&stats->fw_stats.fw_out_of_resources),
+ (u64)atomic64_read(&stats->fw_stats.io_fw_errs));
+
+ len += snprintf(debug->debug_buffer + len, buf_size - len,
+ "\n------------------------------------------\n"
+ "\t\tVlan Discovery Statistics\n"
+ "------------------------------------------\n");
+
+ len += snprintf(debug->debug_buffer + len, buf_size - len,
+ "Number of Vlan Discovery Requests Sent %lld\n"
+ "Vlan Response Received with no FCF VLAN ID: %lld\n"
+ "No solicitations recvd after vlan set, expiry count: %lld\n"
+ "Flogi rejects count: %lld\n",
+ (u64)atomic64_read(&stats->vlan_stats.vlan_disc_reqs),
+ (u64)atomic64_read(&stats->vlan_stats.resp_withno_vlanID),
+ (u64)atomic64_read(&stats->vlan_stats.sol_expiry_count),
+ (u64)atomic64_read(&stats->vlan_stats.flogi_rejects));
+
+ len += snprintf(debug->debug_buffer + len, buf_size - len,
+ "\n------------------------------------------\n"
+ "\t\tOther Important Statistics\n"
+ "------------------------------------------\n");
+
+ jiffies_to_timespec(stats->misc_stats.last_isr_time, &val1);
+ jiffies_to_timespec(stats->misc_stats.last_ack_time, &val2);
+
+ len += snprintf(debug->debug_buffer + len, buf_size - len,
+ "Last ISR time: %llu (%8lu.%8lu)\n"
+ "Last ACK time: %llu (%8lu.%8lu)\n"
+ "Number of ISRs: %lld\n"
+ "Maximum CQ Entries: %lld\n"
+ "Number of ACK index out of range: %lld\n"
+ "Number of data count mismatch: %lld\n"
+ "Number of FCPIO Timeouts: %lld\n"
+ "Number of FCPIO Aborted: %lld\n"
+ "Number of SGL Invalid: %lld\n"
+ "Number of Copy WQ Alloc Failures for ABTs: %lld\n"
+ "Number of Copy WQ Alloc Failures for Device Reset: %lld\n"
+ "Number of Copy WQ Alloc Failures for IOs: %lld\n"
+ "Number of no icmnd itmf Completions: %lld\n"
+ "Number of QUEUE Fulls: %lld\n"
+ "Number of rport not ready: %lld\n"
+ "Number of receive frame errors: %lld\n",
+ (u64)stats->misc_stats.last_isr_time,
+ val1.tv_sec, val1.tv_nsec,
+ (u64)stats->misc_stats.last_ack_time,
+ val2.tv_sec, val2.tv_nsec,
+ (u64)atomic64_read(&stats->misc_stats.isr_count),
+ (u64)atomic64_read(&stats->misc_stats.max_cq_entries),
+ (u64)atomic64_read(&stats->misc_stats.ack_index_out_of_range),
+ (u64)atomic64_read(&stats->misc_stats.data_count_mismatch),
+ (u64)atomic64_read(&stats->misc_stats.fcpio_timeout),
+ (u64)atomic64_read(&stats->misc_stats.fcpio_aborted),
+ (u64)atomic64_read(&stats->misc_stats.sgl_invalid),
+ (u64)atomic64_read(
+ &stats->misc_stats.abts_cpwq_alloc_failures),
+ (u64)atomic64_read(
+ &stats->misc_stats.devrst_cpwq_alloc_failures),
+ (u64)atomic64_read(&stats->misc_stats.io_cpwq_alloc_failures),
+ (u64)atomic64_read(&stats->misc_stats.no_icmnd_itmf_cmpls),
+ (u64)atomic64_read(&stats->misc_stats.queue_fulls),
+ (u64)atomic64_read(&stats->misc_stats.rport_not_ready),
+ (u64)atomic64_read(&stats->misc_stats.frame_errors));
+
+ return len;
+
+}
+
+/*
+ * fnic_trace_buf_init - Initialize fnic trace buffer logging facility
+ *
+ * Description:
+ * Initialize trace buffer data structure by allocating required memory and
+ * setting page_offset information for every trace entry by adding trace entry
+ * length to previous page_offset value.
+ */
+int fnic_trace_buf_init(void)
+{
+ unsigned long fnic_buf_head;
+ int i;
+ int err = 0;
+
+ trace_max_pages = fnic_trace_max_pages;
+ fnic_max_trace_entries = (trace_max_pages * PAGE_SIZE)/
+ FNIC_ENTRY_SIZE_BYTES;
+
+ fnic_trace_buf_p = (unsigned long)vmalloc((trace_max_pages * PAGE_SIZE));
+ if (!fnic_trace_buf_p) {
+ printk(KERN_ERR PFX "Failed to allocate memory "
+ "for fnic_trace_buf_p\n");
+ err = -ENOMEM;
+ goto err_fnic_trace_buf_init;
+ }
+ memset((void *)fnic_trace_buf_p, 0, (trace_max_pages * PAGE_SIZE));
+
+ fnic_trace_entries.page_offset = vmalloc(fnic_max_trace_entries *
+ sizeof(unsigned long));
+ if (!fnic_trace_entries.page_offset) {
+ printk(KERN_ERR PFX "Failed to allocate memory for"
+ " page_offset\n");
+ if (fnic_trace_buf_p) {
+ vfree((void *)fnic_trace_buf_p);
+ fnic_trace_buf_p = 0;
+ }
+ err = -ENOMEM;
+ goto err_fnic_trace_buf_init;
+ }
+ memset((void *)fnic_trace_entries.page_offset, 0,
+ (fnic_max_trace_entries * sizeof(unsigned long)));
+ fnic_trace_entries.wr_idx = fnic_trace_entries.rd_idx = 0;
+ fnic_buf_head = fnic_trace_buf_p;
+
+ /*
+ * Set page_offset field of fnic_trace_entries struct by
+ * calculating memory location for every trace entry using
+ * length of each trace entry
+ */
+ for (i = 0; i < fnic_max_trace_entries; i++) {
+ fnic_trace_entries.page_offset[i] = fnic_buf_head;
+ fnic_buf_head += FNIC_ENTRY_SIZE_BYTES;
+ }
+ err = fnic_trace_debugfs_init();
+ if (err < 0) {
+ pr_err("fnic: Failed to initialize debugfs for tracing\n");
+ goto err_fnic_trace_debugfs_init;
+ }
+ pr_info("fnic: Successfully Initialized Trace Buffer\n");
+ return err;
+err_fnic_trace_debugfs_init:
+ fnic_trace_free();
+err_fnic_trace_buf_init:
+ return err;
+}
+
+/*
+ * fnic_trace_free - Free memory of fnic trace data structures.
+ */
+void fnic_trace_free(void)
+{
+ fnic_tracing_enabled = 0;
+ fnic_trace_debugfs_terminate();
+ if (fnic_trace_entries.page_offset) {
+ vfree((void *)fnic_trace_entries.page_offset);
+ fnic_trace_entries.page_offset = NULL;
+ }
+ if (fnic_trace_buf_p) {
+ vfree((void *)fnic_trace_buf_p);
+ fnic_trace_buf_p = 0;
+ }
+ printk(KERN_INFO PFX "Successfully Freed Trace Buffer\n");
+}
+
+/*
+ * fnic_fc_ctlr_trace_buf_init -
+ * Initialize trace buffer to log fnic control frames
+ * Description:
+ * Initialize trace buffer data structure by allocating
+ * required memory for trace data as well as for Indexes.
+ * Frame size is 256 bytes and
+ * memory is allocated for 1024 entries of 256 bytes.
+ * Page_offset(Index) is set to the address of trace entry
+ * and page_offset is initialized by adding frame size
+ * to the previous page_offset entry.
+ */
+
+int fnic_fc_trace_init(void)
+{
+ unsigned long fc_trace_buf_head;
+ int err = 0;
+ int i;
+
+ fc_trace_max_entries = (fnic_fc_trace_max_pages * PAGE_SIZE)/
+ FC_TRC_SIZE_BYTES;
+ fnic_fc_ctlr_trace_buf_p = (unsigned long)vmalloc(
+ fnic_fc_trace_max_pages * PAGE_SIZE);
+ if (!fnic_fc_ctlr_trace_buf_p) {
+ pr_err("fnic: Failed to allocate memory for "
+ "FC Control Trace Buf\n");
+ err = -ENOMEM;
+ goto err_fnic_fc_ctlr_trace_buf_init;
+ }
+
+ memset((void *)fnic_fc_ctlr_trace_buf_p, 0,
+ fnic_fc_trace_max_pages * PAGE_SIZE);
+
+ /* Allocate memory for page offset */
+ fc_trace_entries.page_offset = vmalloc(fc_trace_max_entries *
+ sizeof(unsigned long));
+ if (!fc_trace_entries.page_offset) {
+ pr_err("fnic:Failed to allocate memory for page_offset\n");
+ if (fnic_fc_ctlr_trace_buf_p) {
+ pr_err("fnic: Freeing FC Control Trace Buf\n");
+ vfree((void *)fnic_fc_ctlr_trace_buf_p);
+ fnic_fc_ctlr_trace_buf_p = 0;
+ }
+ err = -ENOMEM;
+ goto err_fnic_fc_ctlr_trace_buf_init;
+ }
+ memset((void *)fc_trace_entries.page_offset, 0,
+ (fc_trace_max_entries * sizeof(unsigned long)));
+
+ fc_trace_entries.rd_idx = fc_trace_entries.wr_idx = 0;
+ fc_trace_buf_head = fnic_fc_ctlr_trace_buf_p;
+
+ /*
+ * Set up fc_trace_entries.page_offset field with memory location
+ * for every trace entry
+ */
+ for (i = 0; i < fc_trace_max_entries; i++) {
+ fc_trace_entries.page_offset[i] = fc_trace_buf_head;
+ fc_trace_buf_head += FC_TRC_SIZE_BYTES;
+ }
+ err = fnic_fc_trace_debugfs_init();
+ if (err < 0) {
+ pr_err("fnic: Failed to initialize FC_CTLR tracing.\n");
+ goto err_fnic_fc_ctlr_trace_debugfs_init;
+ }
+ pr_info("fnic: Successfully Initialized FC_CTLR Trace Buffer\n");
+ return err;
+
+err_fnic_fc_ctlr_trace_debugfs_init:
+ fnic_fc_trace_free();
+err_fnic_fc_ctlr_trace_buf_init:
+ return err;
+}
+
+/*
+ * Fnic_fc_ctlr_trace_free - Free memory of fnic_fc_ctlr trace data structures.
+ */
+void fnic_fc_trace_free(void)
+{
+ fnic_fc_tracing_enabled = 0;
+ fnic_fc_trace_debugfs_terminate();
+ if (fc_trace_entries.page_offset) {
+ vfree((void *)fc_trace_entries.page_offset);
+ fc_trace_entries.page_offset = NULL;
+ }
+ if (fnic_fc_ctlr_trace_buf_p) {
+ vfree((void *)fnic_fc_ctlr_trace_buf_p);
+ fnic_fc_ctlr_trace_buf_p = 0;
+ }
+ pr_info("fnic:Successfully FC_CTLR Freed Trace Buffer\n");
+}
+
+/*
+ * fnic_fc_ctlr_set_trace_data:
+ * Maintain rd & wr idx accordingly and set data
+ * Passed parameters:
+ * host_no: host number accociated with fnic
+ * frame_type: send_frame, rece_frame or link event
+ * fc_frame: pointer to fc_frame
+ * frame_len: Length of the fc_frame
+ * Description:
+ * This routine will get next available wr_idx and
+ * copy all passed trace data to the buffer pointed by wr_idx
+ * and increment wr_idx. It will also make sure that we dont
+ * overwrite the entry which we are reading and also
+ * wrap around if we reach the maximum entries.
+ * Returned Value:
+ * It will return 0 for success or -1 for failure
+ */
+int fnic_fc_trace_set_data(u32 host_no, u8 frame_type,
+ char *frame, u32 fc_trc_frame_len)
+{
+ unsigned long flags;
+ struct fc_trace_hdr *fc_buf;
+ unsigned long eth_fcoe_hdr_len;
+ char *fc_trace;
+
+ if (fnic_fc_tracing_enabled == 0)
+ return 0;
+
+ spin_lock_irqsave(&fnic_fc_trace_lock, flags);
+
+ if (fnic_fc_trace_cleared == 1) {
+ fc_trace_entries.rd_idx = fc_trace_entries.wr_idx = 0;
+ pr_info("fnic: Resetting the read idx\n");
+ memset((void *)fnic_fc_ctlr_trace_buf_p, 0,
+ fnic_fc_trace_max_pages * PAGE_SIZE);
+ fnic_fc_trace_cleared = 0;
+ }
+
+ fc_buf = (struct fc_trace_hdr *)
+ fc_trace_entries.page_offset[fc_trace_entries.wr_idx];
+
+ fc_trace_entries.wr_idx++;
+
+ if (fc_trace_entries.wr_idx >= fc_trace_max_entries)
+ fc_trace_entries.wr_idx = 0;
+
+ if (fc_trace_entries.wr_idx == fc_trace_entries.rd_idx) {
+ fc_trace_entries.rd_idx++;
+ if (fc_trace_entries.rd_idx >= fc_trace_max_entries)
+ fc_trace_entries.rd_idx = 0;
+ }
+
+ fc_buf->time_stamp = CURRENT_TIME;
+ fc_buf->host_no = host_no;
+ fc_buf->frame_type = frame_type;
+
+ fc_trace = (char *)FC_TRACE_ADDRESS(fc_buf);
+
+ /* During the receive path, we do not have eth hdr as well as fcoe hdr
+ * at trace entry point so we will stuff 0xff just to make it generic.
+ */
+ if (frame_type == FNIC_FC_RECV) {
+ eth_fcoe_hdr_len = sizeof(struct ethhdr) +
+ sizeof(struct fcoe_hdr);
+ memset((char *)fc_trace, 0xff, eth_fcoe_hdr_len);
+ /* Copy the rest of data frame */
+ memcpy((char *)(fc_trace + eth_fcoe_hdr_len), (void *)frame,
+ min_t(u8, fc_trc_frame_len,
+ (u8)(FC_TRC_SIZE_BYTES - FC_TRC_HEADER_SIZE
+ - eth_fcoe_hdr_len)));
+ } else {
+ memcpy((char *)fc_trace, (void *)frame,
+ min_t(u8, fc_trc_frame_len,
+ (u8)(FC_TRC_SIZE_BYTES - FC_TRC_HEADER_SIZE)));
+ }
+
+ /* Store the actual received length */
+ fc_buf->frame_len = fc_trc_frame_len;
+
+ spin_unlock_irqrestore(&fnic_fc_trace_lock, flags);
+ return 0;
+}
+
+/*
+ * fnic_fc_ctlr_get_trace_data: Copy trace buffer to a memory file
+ * Passed parameter:
+ * @fnic_dbgfs_t: pointer to debugfs trace buffer
+ * rdata_flag: 1 => Unformated file
+ * 0 => formated file
+ * Description:
+ * This routine will copy the trace data to memory file with
+ * proper formatting and also copy to another memory
+ * file without formatting for further procesing.
+ * Retrun Value:
+ * Number of bytes that were dumped into fnic_dbgfs_t
+ */
+
+int fnic_fc_trace_get_data(fnic_dbgfs_t *fnic_dbgfs_prt, u8 rdata_flag)
+{
+ int rd_idx, wr_idx;
+ unsigned long flags;
+ int len = 0, j;
+ struct fc_trace_hdr *tdata;
+ char *fc_trace;
+
+ spin_lock_irqsave(&fnic_fc_trace_lock, flags);
+ if (fc_trace_entries.wr_idx == fc_trace_entries.rd_idx) {
+ spin_unlock_irqrestore(&fnic_fc_trace_lock, flags);
+ pr_info("fnic: Buffer is empty\n");
+ return 0;
+ }
+ rd_idx = fc_trace_entries.rd_idx;
+ wr_idx = fc_trace_entries.wr_idx;
+ if (rdata_flag == 0) {
+ len += snprintf(fnic_dbgfs_prt->buffer + len,
+ (fnic_fc_trace_max_pages * PAGE_SIZE * 3) - len,
+ "Time Stamp (UTC)\t\t"
+ "Host No: F Type: len: FCoE_FRAME:\n");
+ }
+
+ while (rd_idx != wr_idx) {
+ tdata = (struct fc_trace_hdr *)
+ fc_trace_entries.page_offset[rd_idx];
+ if (!tdata) {
+ pr_info("fnic: Rd data is NULL\n");
+ spin_unlock_irqrestore(&fnic_fc_trace_lock, flags);
+ return 0;
+ }
+ if (rdata_flag == 0) {
+ copy_and_format_trace_data(tdata,
+ fnic_dbgfs_prt, &len, rdata_flag);
+ } else {
+ fc_trace = (char *)tdata;
+ for (j = 0; j < FC_TRC_SIZE_BYTES; j++) {
+ len += snprintf(fnic_dbgfs_prt->buffer + len,
+ (fnic_fc_trace_max_pages * PAGE_SIZE * 3)
+ - len, "%02x", fc_trace[j] & 0xff);
+ } /* for loop */
+ len += snprintf(fnic_dbgfs_prt->buffer + len,
+ (fnic_fc_trace_max_pages * PAGE_SIZE * 3) - len,
+ "\n");
+ }
+ rd_idx++;
+ if (rd_idx > (fc_trace_max_entries - 1))
+ rd_idx = 0;
+ }
+
+ spin_unlock_irqrestore(&fnic_fc_trace_lock, flags);
+ return len;
+}
+
+/*
+ * copy_and_format_trace_data: Copy formatted data to char * buffer
+ * Passed Parameter:
+ * @fc_trace_hdr_t: pointer to trace data
+ * @fnic_dbgfs_t: pointer to debugfs trace buffer
+ * @orig_len: pointer to len
+ * rdata_flag: 0 => Formated file, 1 => Unformated file
+ * Description:
+ * This routine will format and copy the passed trace data
+ * for formated file or unformated file accordingly.
+ */
+
+void copy_and_format_trace_data(struct fc_trace_hdr *tdata,
+ fnic_dbgfs_t *fnic_dbgfs_prt, int *orig_len,
+ u8 rdata_flag)
+{
+ struct tm tm;
+ int j, i = 1, len;
+ char *fc_trace, *fmt;
+ int ethhdr_len = sizeof(struct ethhdr) - 1;
+ int fcoehdr_len = sizeof(struct fcoe_hdr);
+ int fchdr_len = sizeof(struct fc_frame_header);
+ int max_size = fnic_fc_trace_max_pages * PAGE_SIZE * 3;
+
+ tdata->frame_type = tdata->frame_type & 0x7F;
+
+ len = *orig_len;
+
+ time_to_tm(tdata->time_stamp.tv_sec, 0, &tm);
+
+ fmt = "%02d:%02d:%04ld %02d:%02d:%02d.%09lu ns%8x %c%8x\t";
+ len += snprintf(fnic_dbgfs_prt->buffer + len,
+ max_size - len,
+ fmt,
+ tm.tm_mon + 1, tm.tm_mday, tm.tm_year + 1900,
+ tm.tm_hour, tm.tm_min, tm.tm_sec,
+ tdata->time_stamp.tv_nsec, tdata->host_no,
+ tdata->frame_type, tdata->frame_len);
+
+ fc_trace = (char *)FC_TRACE_ADDRESS(tdata);
+
+ for (j = 0; j < min_t(u8, tdata->frame_len,
+ (u8)(FC_TRC_SIZE_BYTES - FC_TRC_HEADER_SIZE)); j++) {
+ if (tdata->frame_type == FNIC_FC_LE) {
+ len += snprintf(fnic_dbgfs_prt->buffer + len,
+ max_size - len, "%c", fc_trace[j]);
+ } else {
+ len += snprintf(fnic_dbgfs_prt->buffer + len,
+ max_size - len, "%02x", fc_trace[j] & 0xff);
+ len += snprintf(fnic_dbgfs_prt->buffer + len,
+ max_size - len, " ");
+ if (j == ethhdr_len ||
+ j == ethhdr_len + fcoehdr_len ||
+ j == ethhdr_len + fcoehdr_len + fchdr_len ||
+ (i > 3 && j%fchdr_len == 0)) {
+ len += snprintf(fnic_dbgfs_prt->buffer
+ + len, max_size - len,
+ "\n\t\t\t\t\t\t\t\t");
+ i++;
+ }
+ } /* end of else*/
+ } /* End of for loop*/
+ len += snprintf(fnic_dbgfs_prt->buffer + len,
+ max_size - len, "\n");
+ *orig_len = len;
+}
diff --git a/drivers/scsi/fnic/fnic_trace.h b/drivers/scsi/fnic/fnic_trace.h
new file mode 100644
index 000000000..a8aa0578f
--- /dev/null
+++ b/drivers/scsi/fnic/fnic_trace.h
@@ -0,0 +1,129 @@
+/*
+ * Copyright 2012 Cisco Systems, Inc. All rights reserved.
+ *
+ * This program is free software; you may redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#ifndef __FNIC_TRACE_H__
+#define __FNIC_TRACE_H__
+
+#define FNIC_ENTRY_SIZE_BYTES 64
+#define FC_TRC_SIZE_BYTES 256
+#define FC_TRC_HEADER_SIZE sizeof(struct fc_trace_hdr)
+
+/*
+ * Fisrt bit of FNIC_FC_RECV and FNIC_FC_SEND is used to represent the type
+ * of frame 1 => Eth frame, 0=> FC frame
+ */
+
+#define FNIC_FC_RECV 0x52 /* Character R */
+#define FNIC_FC_SEND 0x54 /* Character T */
+#define FNIC_FC_LE 0x4C /* Character L */
+
+extern ssize_t simple_read_from_buffer(void __user *to,
+ size_t count,
+ loff_t *ppos,
+ const void *from,
+ size_t available);
+
+extern unsigned int fnic_trace_max_pages;
+extern int fnic_tracing_enabled;
+extern unsigned int trace_max_pages;
+
+extern unsigned int fnic_fc_trace_max_pages;
+extern int fnic_fc_tracing_enabled;
+extern int fnic_fc_trace_cleared;
+
+typedef struct fnic_trace_dbg {
+ int wr_idx;
+ int rd_idx;
+ unsigned long *page_offset;
+} fnic_trace_dbg_t;
+
+typedef struct fnic_dbgfs {
+ int buffer_len;
+ char *buffer;
+} fnic_dbgfs_t;
+
+struct fnic_trace_data {
+ union {
+ struct {
+ u32 low;
+ u32 high;
+ };
+ u64 val;
+ } timestamp, fnaddr;
+ u32 host_no;
+ u32 tag;
+ u64 data[5];
+} __attribute__((__packed__));
+
+typedef struct fnic_trace_data fnic_trace_data_t;
+
+struct fc_trace_hdr {
+ struct timespec time_stamp;
+ u32 host_no;
+ u8 frame_type;
+ u8 frame_len;
+} __attribute__((__packed__));
+
+#define FC_TRACE_ADDRESS(a) \
+ ((unsigned long)(a) + sizeof(struct fc_trace_hdr))
+
+#define FNIC_TRACE_ENTRY_SIZE \
+ (FNIC_ENTRY_SIZE_BYTES - sizeof(fnic_trace_data_t))
+
+#define FNIC_TRACE(_fn, _hn, _t, _a, _b, _c, _d, _e) \
+ if (unlikely(fnic_tracing_enabled)) { \
+ fnic_trace_data_t *trace_buf = fnic_trace_get_buf(); \
+ if (trace_buf) { \
+ if (sizeof(unsigned long) < 8) { \
+ trace_buf->timestamp.low = jiffies; \
+ trace_buf->fnaddr.low = (u32)(unsigned long)_fn; \
+ } else { \
+ trace_buf->timestamp.val = jiffies; \
+ trace_buf->fnaddr.val = (u64)(unsigned long)_fn; \
+ } \
+ trace_buf->host_no = _hn; \
+ trace_buf->tag = _t; \
+ trace_buf->data[0] = (u64)(unsigned long)_a; \
+ trace_buf->data[1] = (u64)(unsigned long)_b; \
+ trace_buf->data[2] = (u64)(unsigned long)_c; \
+ trace_buf->data[3] = (u64)(unsigned long)_d; \
+ trace_buf->data[4] = (u64)(unsigned long)_e; \
+ } \
+ }
+
+fnic_trace_data_t *fnic_trace_get_buf(void);
+int fnic_get_trace_data(fnic_dbgfs_t *);
+int fnic_trace_buf_init(void);
+void fnic_trace_free(void);
+int fnic_debugfs_init(void);
+void fnic_debugfs_terminate(void);
+int fnic_trace_debugfs_init(void);
+void fnic_trace_debugfs_terminate(void);
+
+/* Fnic FC CTLR Trace releated function */
+int fnic_fc_trace_init(void);
+void fnic_fc_trace_free(void);
+int fnic_fc_trace_set_data(u32 host_no, u8 frame_type,
+ char *frame, u32 fc_frame_len);
+int fnic_fc_trace_get_data(fnic_dbgfs_t *fnic_dbgfs_prt, u8 rdata_flag);
+void copy_and_format_trace_data(struct fc_trace_hdr *tdata,
+ fnic_dbgfs_t *fnic_dbgfs_prt,
+ int *len, u8 rdata_flag);
+int fnic_fc_trace_debugfs_init(void);
+void fnic_fc_trace_debugfs_terminate(void);
+
+#endif
diff --git a/drivers/scsi/fnic/rq_enet_desc.h b/drivers/scsi/fnic/rq_enet_desc.h
new file mode 100644
index 000000000..92e80ae6b
--- /dev/null
+++ b/drivers/scsi/fnic/rq_enet_desc.h
@@ -0,0 +1,58 @@
+/*
+ * Copyright 2008 Cisco Systems, Inc. All rights reserved.
+ * Copyright 2007 Nuova Systems, Inc. All rights reserved.
+ *
+ * This program is free software; you may redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#ifndef _RQ_ENET_DESC_H_
+#define _RQ_ENET_DESC_H_
+
+/* Ethernet receive queue descriptor: 16B */
+struct rq_enet_desc {
+ __le64 address;
+ __le16 length_type;
+ u8 reserved[6];
+};
+
+enum rq_enet_type_types {
+ RQ_ENET_TYPE_ONLY_SOP = 0,
+ RQ_ENET_TYPE_NOT_SOP = 1,
+ RQ_ENET_TYPE_RESV2 = 2,
+ RQ_ENET_TYPE_RESV3 = 3,
+};
+
+#define RQ_ENET_ADDR_BITS 64
+#define RQ_ENET_LEN_BITS 14
+#define RQ_ENET_LEN_MASK ((1 << RQ_ENET_LEN_BITS) - 1)
+#define RQ_ENET_TYPE_BITS 2
+#define RQ_ENET_TYPE_MASK ((1 << RQ_ENET_TYPE_BITS) - 1)
+
+static inline void rq_enet_desc_enc(struct rq_enet_desc *desc,
+ u64 address, u8 type, u16 length)
+{
+ desc->address = cpu_to_le64(address);
+ desc->length_type = cpu_to_le16((length & RQ_ENET_LEN_MASK) |
+ ((type & RQ_ENET_TYPE_MASK) << RQ_ENET_LEN_BITS));
+}
+
+static inline void rq_enet_desc_dec(struct rq_enet_desc *desc,
+ u64 *address, u8 *type, u16 *length)
+{
+ *address = le64_to_cpu(desc->address);
+ *length = le16_to_cpu(desc->length_type) & RQ_ENET_LEN_MASK;
+ *type = (u8)((le16_to_cpu(desc->length_type) >> RQ_ENET_LEN_BITS) &
+ RQ_ENET_TYPE_MASK);
+}
+
+#endif /* _RQ_ENET_DESC_H_ */
diff --git a/drivers/scsi/fnic/vnic_cq.c b/drivers/scsi/fnic/vnic_cq.c
new file mode 100644
index 000000000..c5db32eda
--- /dev/null
+++ b/drivers/scsi/fnic/vnic_cq.c
@@ -0,0 +1,85 @@
+/*
+ * Copyright 2008 Cisco Systems, Inc. All rights reserved.
+ * Copyright 2007 Nuova Systems, Inc. All rights reserved.
+ *
+ * This program is free software; you may redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#include <linux/errno.h>
+#include <linux/types.h>
+#include <linux/pci.h>
+#include "vnic_dev.h"
+#include "vnic_cq.h"
+
+void vnic_cq_free(struct vnic_cq *cq)
+{
+ vnic_dev_free_desc_ring(cq->vdev, &cq->ring);
+
+ cq->ctrl = NULL;
+}
+
+int vnic_cq_alloc(struct vnic_dev *vdev, struct vnic_cq *cq, unsigned int index,
+ unsigned int desc_count, unsigned int desc_size)
+{
+ int err;
+
+ cq->index = index;
+ cq->vdev = vdev;
+
+ cq->ctrl = vnic_dev_get_res(vdev, RES_TYPE_CQ, index);
+ if (!cq->ctrl) {
+ printk(KERN_ERR "Failed to hook CQ[%d] resource\n", index);
+ return -EINVAL;
+ }
+
+ err = vnic_dev_alloc_desc_ring(vdev, &cq->ring, desc_count, desc_size);
+ if (err)
+ return err;
+
+ return 0;
+}
+
+void vnic_cq_init(struct vnic_cq *cq, unsigned int flow_control_enable,
+ unsigned int color_enable, unsigned int cq_head, unsigned int cq_tail,
+ unsigned int cq_tail_color, unsigned int interrupt_enable,
+ unsigned int cq_entry_enable, unsigned int cq_message_enable,
+ unsigned int interrupt_offset, u64 cq_message_addr)
+{
+ u64 paddr;
+
+ paddr = (u64)cq->ring.base_addr | VNIC_PADDR_TARGET;
+ writeq(paddr, &cq->ctrl->ring_base);
+ iowrite32(cq->ring.desc_count, &cq->ctrl->ring_size);
+ iowrite32(flow_control_enable, &cq->ctrl->flow_control_enable);
+ iowrite32(color_enable, &cq->ctrl->color_enable);
+ iowrite32(cq_head, &cq->ctrl->cq_head);
+ iowrite32(cq_tail, &cq->ctrl->cq_tail);
+ iowrite32(cq_tail_color, &cq->ctrl->cq_tail_color);
+ iowrite32(interrupt_enable, &cq->ctrl->interrupt_enable);
+ iowrite32(cq_entry_enable, &cq->ctrl->cq_entry_enable);
+ iowrite32(cq_message_enable, &cq->ctrl->cq_message_enable);
+ iowrite32(interrupt_offset, &cq->ctrl->interrupt_offset);
+ writeq(cq_message_addr, &cq->ctrl->cq_message_addr);
+}
+
+void vnic_cq_clean(struct vnic_cq *cq)
+{
+ cq->to_clean = 0;
+ cq->last_color = 0;
+
+ iowrite32(0, &cq->ctrl->cq_head);
+ iowrite32(0, &cq->ctrl->cq_tail);
+ iowrite32(1, &cq->ctrl->cq_tail_color);
+
+ vnic_dev_clear_desc_ring(&cq->ring);
+}
diff --git a/drivers/scsi/fnic/vnic_cq.h b/drivers/scsi/fnic/vnic_cq.h
new file mode 100644
index 000000000..4ede6809f
--- /dev/null
+++ b/drivers/scsi/fnic/vnic_cq.h
@@ -0,0 +1,121 @@
+/*
+ * Copyright 2008 Cisco Systems, Inc. All rights reserved.
+ * Copyright 2007 Nuova Systems, Inc. All rights reserved.
+ *
+ * This program is free software; you may redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#ifndef _VNIC_CQ_H_
+#define _VNIC_CQ_H_
+
+#include "cq_desc.h"
+#include "vnic_dev.h"
+
+/*
+ * These defines avoid symbol clash between fnic and enic (Cisco 10G Eth
+ * Driver) when both are built with CONFIG options =y
+ */
+#define vnic_cq_service fnic_cq_service
+#define vnic_cq_free fnic_cq_free
+#define vnic_cq_alloc fnic_cq_alloc
+#define vnic_cq_init fnic_cq_init
+#define vnic_cq_clean fnic_cq_clean
+
+/* Completion queue control */
+struct vnic_cq_ctrl {
+ u64 ring_base; /* 0x00 */
+ u32 ring_size; /* 0x08 */
+ u32 pad0;
+ u32 flow_control_enable; /* 0x10 */
+ u32 pad1;
+ u32 color_enable; /* 0x18 */
+ u32 pad2;
+ u32 cq_head; /* 0x20 */
+ u32 pad3;
+ u32 cq_tail; /* 0x28 */
+ u32 pad4;
+ u32 cq_tail_color; /* 0x30 */
+ u32 pad5;
+ u32 interrupt_enable; /* 0x38 */
+ u32 pad6;
+ u32 cq_entry_enable; /* 0x40 */
+ u32 pad7;
+ u32 cq_message_enable; /* 0x48 */
+ u32 pad8;
+ u32 interrupt_offset; /* 0x50 */
+ u32 pad9;
+ u64 cq_message_addr; /* 0x58 */
+ u32 pad10;
+};
+
+struct vnic_cq {
+ unsigned int index;
+ struct vnic_dev *vdev;
+ struct vnic_cq_ctrl __iomem *ctrl; /* memory-mapped */
+ struct vnic_dev_ring ring;
+ unsigned int to_clean;
+ unsigned int last_color;
+};
+
+static inline unsigned int vnic_cq_service(struct vnic_cq *cq,
+ unsigned int work_to_do,
+ int (*q_service)(struct vnic_dev *vdev, struct cq_desc *cq_desc,
+ u8 type, u16 q_number, u16 completed_index, void *opaque),
+ void *opaque)
+{
+ struct cq_desc *cq_desc;
+ unsigned int work_done = 0;
+ u16 q_number, completed_index;
+ u8 type, color;
+
+ cq_desc = (struct cq_desc *)((u8 *)cq->ring.descs +
+ cq->ring.desc_size * cq->to_clean);
+ cq_desc_dec(cq_desc, &type, &color,
+ &q_number, &completed_index);
+
+ while (color != cq->last_color) {
+
+ if ((*q_service)(cq->vdev, cq_desc, type,
+ q_number, completed_index, opaque))
+ break;
+
+ cq->to_clean++;
+ if (cq->to_clean == cq->ring.desc_count) {
+ cq->to_clean = 0;
+ cq->last_color = cq->last_color ? 0 : 1;
+ }
+
+ cq_desc = (struct cq_desc *)((u8 *)cq->ring.descs +
+ cq->ring.desc_size * cq->to_clean);
+ cq_desc_dec(cq_desc, &type, &color,
+ &q_number, &completed_index);
+
+ work_done++;
+ if (work_done >= work_to_do)
+ break;
+ }
+
+ return work_done;
+}
+
+void vnic_cq_free(struct vnic_cq *cq);
+int vnic_cq_alloc(struct vnic_dev *vdev, struct vnic_cq *cq, unsigned int index,
+ unsigned int desc_count, unsigned int desc_size);
+void vnic_cq_init(struct vnic_cq *cq, unsigned int flow_control_enable,
+ unsigned int color_enable, unsigned int cq_head, unsigned int cq_tail,
+ unsigned int cq_tail_color, unsigned int interrupt_enable,
+ unsigned int cq_entry_enable, unsigned int message_enable,
+ unsigned int interrupt_offset, u64 message_addr);
+void vnic_cq_clean(struct vnic_cq *cq);
+
+#endif /* _VNIC_CQ_H_ */
diff --git a/drivers/scsi/fnic/vnic_cq_copy.h b/drivers/scsi/fnic/vnic_cq_copy.h
new file mode 100644
index 000000000..7901ce255
--- /dev/null
+++ b/drivers/scsi/fnic/vnic_cq_copy.h
@@ -0,0 +1,62 @@
+/*
+ * Copyright 2008 Cisco Systems, Inc. All rights reserved.
+ * Copyright 2007 Nuova Systems, Inc. All rights reserved.
+ *
+ * This program is free software; you may redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#ifndef _VNIC_CQ_COPY_H_
+#define _VNIC_CQ_COPY_H_
+
+#include "fcpio.h"
+
+static inline unsigned int vnic_cq_copy_service(
+ struct vnic_cq *cq,
+ int (*q_service)(struct vnic_dev *vdev,
+ unsigned int index,
+ struct fcpio_fw_req *desc),
+ unsigned int work_to_do)
+
+{
+ struct fcpio_fw_req *desc;
+ unsigned int work_done = 0;
+ u8 color;
+
+ desc = (struct fcpio_fw_req *)((u8 *)cq->ring.descs +
+ cq->ring.desc_size * cq->to_clean);
+ fcpio_color_dec(desc, &color);
+
+ while (color != cq->last_color) {
+
+ if ((*q_service)(cq->vdev, cq->index, desc))
+ break;
+
+ cq->to_clean++;
+ if (cq->to_clean == cq->ring.desc_count) {
+ cq->to_clean = 0;
+ cq->last_color = cq->last_color ? 0 : 1;
+ }
+
+ desc = (struct fcpio_fw_req *)((u8 *)cq->ring.descs +
+ cq->ring.desc_size * cq->to_clean);
+ fcpio_color_dec(desc, &color);
+
+ work_done++;
+ if (work_done >= work_to_do)
+ break;
+ }
+
+ return work_done;
+}
+
+#endif /* _VNIC_CQ_COPY_H_ */
diff --git a/drivers/scsi/fnic/vnic_dev.c b/drivers/scsi/fnic/vnic_dev.c
new file mode 100644
index 000000000..9795d6f3e
--- /dev/null
+++ b/drivers/scsi/fnic/vnic_dev.c
@@ -0,0 +1,701 @@
+/*
+ * Copyright 2008 Cisco Systems, Inc. All rights reserved.
+ * Copyright 2007 Nuova Systems, Inc. All rights reserved.
+ *
+ * This program is free software; you may redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include <linux/kernel.h>
+#include <linux/errno.h>
+#include <linux/types.h>
+#include <linux/pci.h>
+#include <linux/delay.h>
+#include <linux/if_ether.h>
+#include <linux/slab.h>
+#include "vnic_resource.h"
+#include "vnic_devcmd.h"
+#include "vnic_dev.h"
+#include "vnic_stats.h"
+
+struct vnic_res {
+ void __iomem *vaddr;
+ unsigned int count;
+};
+
+struct vnic_dev {
+ void *priv;
+ struct pci_dev *pdev;
+ struct vnic_res res[RES_TYPE_MAX];
+ enum vnic_dev_intr_mode intr_mode;
+ struct vnic_devcmd __iomem *devcmd;
+ struct vnic_devcmd_notify *notify;
+ struct vnic_devcmd_notify notify_copy;
+ dma_addr_t notify_pa;
+ u32 *linkstatus;
+ dma_addr_t linkstatus_pa;
+ struct vnic_stats *stats;
+ dma_addr_t stats_pa;
+ struct vnic_devcmd_fw_info *fw_info;
+ dma_addr_t fw_info_pa;
+};
+
+#define VNIC_MAX_RES_HDR_SIZE \
+ (sizeof(struct vnic_resource_header) + \
+ sizeof(struct vnic_resource) * RES_TYPE_MAX)
+#define VNIC_RES_STRIDE 128
+
+void *vnic_dev_priv(struct vnic_dev *vdev)
+{
+ return vdev->priv;
+}
+
+static int vnic_dev_discover_res(struct vnic_dev *vdev,
+ struct vnic_dev_bar *bar)
+{
+ struct vnic_resource_header __iomem *rh;
+ struct vnic_resource __iomem *r;
+ u8 type;
+
+ if (bar->len < VNIC_MAX_RES_HDR_SIZE) {
+ printk(KERN_ERR "vNIC BAR0 res hdr length error\n");
+ return -EINVAL;
+ }
+
+ rh = bar->vaddr;
+ if (!rh) {
+ printk(KERN_ERR "vNIC BAR0 res hdr not mem-mapped\n");
+ return -EINVAL;
+ }
+
+ if (ioread32(&rh->magic) != VNIC_RES_MAGIC ||
+ ioread32(&rh->version) != VNIC_RES_VERSION) {
+ printk(KERN_ERR "vNIC BAR0 res magic/version error "
+ "exp (%lx/%lx) curr (%x/%x)\n",
+ VNIC_RES_MAGIC, VNIC_RES_VERSION,
+ ioread32(&rh->magic), ioread32(&rh->version));
+ return -EINVAL;
+ }
+
+ r = (struct vnic_resource __iomem *)(rh + 1);
+
+ while ((type = ioread8(&r->type)) != RES_TYPE_EOL) {
+
+ u8 bar_num = ioread8(&r->bar);
+ u32 bar_offset = ioread32(&r->bar_offset);
+ u32 count = ioread32(&r->count);
+ u32 len;
+
+ r++;
+
+ if (bar_num != 0) /* only mapping in BAR0 resources */
+ continue;
+
+ switch (type) {
+ case RES_TYPE_WQ:
+ case RES_TYPE_RQ:
+ case RES_TYPE_CQ:
+ case RES_TYPE_INTR_CTRL:
+ /* each count is stride bytes long */
+ len = count * VNIC_RES_STRIDE;
+ if (len + bar_offset > bar->len) {
+ printk(KERN_ERR "vNIC BAR0 resource %d "
+ "out-of-bounds, offset 0x%x + "
+ "size 0x%x > bar len 0x%lx\n",
+ type, bar_offset,
+ len,
+ bar->len);
+ return -EINVAL;
+ }
+ break;
+ case RES_TYPE_INTR_PBA_LEGACY:
+ case RES_TYPE_DEVCMD:
+ len = count;
+ break;
+ default:
+ continue;
+ }
+
+ vdev->res[type].count = count;
+ vdev->res[type].vaddr = (char __iomem *)bar->vaddr + bar_offset;
+ }
+
+ return 0;
+}
+
+unsigned int vnic_dev_get_res_count(struct vnic_dev *vdev,
+ enum vnic_res_type type)
+{
+ return vdev->res[type].count;
+}
+
+void __iomem *vnic_dev_get_res(struct vnic_dev *vdev, enum vnic_res_type type,
+ unsigned int index)
+{
+ if (!vdev->res[type].vaddr)
+ return NULL;
+
+ switch (type) {
+ case RES_TYPE_WQ:
+ case RES_TYPE_RQ:
+ case RES_TYPE_CQ:
+ case RES_TYPE_INTR_CTRL:
+ return (char __iomem *)vdev->res[type].vaddr +
+ index * VNIC_RES_STRIDE;
+ default:
+ return (char __iomem *)vdev->res[type].vaddr;
+ }
+}
+
+unsigned int vnic_dev_desc_ring_size(struct vnic_dev_ring *ring,
+ unsigned int desc_count,
+ unsigned int desc_size)
+{
+ /* The base address of the desc rings must be 512 byte aligned.
+ * Descriptor count is aligned to groups of 32 descriptors. A
+ * count of 0 means the maximum 4096 descriptors. Descriptor
+ * size is aligned to 16 bytes.
+ */
+
+ unsigned int count_align = 32;
+ unsigned int desc_align = 16;
+
+ ring->base_align = 512;
+
+ if (desc_count == 0)
+ desc_count = 4096;
+
+ ring->desc_count = ALIGN(desc_count, count_align);
+
+ ring->desc_size = ALIGN(desc_size, desc_align);
+
+ ring->size = ring->desc_count * ring->desc_size;
+ ring->size_unaligned = ring->size + ring->base_align;
+
+ return ring->size_unaligned;
+}
+
+void vnic_dev_clear_desc_ring(struct vnic_dev_ring *ring)
+{
+ memset(ring->descs, 0, ring->size);
+}
+
+int vnic_dev_alloc_desc_ring(struct vnic_dev *vdev, struct vnic_dev_ring *ring,
+ unsigned int desc_count, unsigned int desc_size)
+{
+ vnic_dev_desc_ring_size(ring, desc_count, desc_size);
+
+ ring->descs_unaligned = pci_alloc_consistent(vdev->pdev,
+ ring->size_unaligned,
+ &ring->base_addr_unaligned);
+
+ if (!ring->descs_unaligned) {
+ printk(KERN_ERR
+ "Failed to allocate ring (size=%d), aborting\n",
+ (int)ring->size);
+ return -ENOMEM;
+ }
+
+ ring->base_addr = ALIGN(ring->base_addr_unaligned,
+ ring->base_align);
+ ring->descs = (u8 *)ring->descs_unaligned +
+ (ring->base_addr - ring->base_addr_unaligned);
+
+ vnic_dev_clear_desc_ring(ring);
+
+ ring->desc_avail = ring->desc_count - 1;
+
+ return 0;
+}
+
+void vnic_dev_free_desc_ring(struct vnic_dev *vdev, struct vnic_dev_ring *ring)
+{
+ if (ring->descs) {
+ pci_free_consistent(vdev->pdev,
+ ring->size_unaligned,
+ ring->descs_unaligned,
+ ring->base_addr_unaligned);
+ ring->descs = NULL;
+ }
+}
+
+int vnic_dev_cmd(struct vnic_dev *vdev, enum vnic_devcmd_cmd cmd,
+ u64 *a0, u64 *a1, int wait)
+{
+ struct vnic_devcmd __iomem *devcmd = vdev->devcmd;
+ int delay;
+ u32 status;
+ int dev_cmd_err[] = {
+ /* convert from fw's version of error.h to host's version */
+ 0, /* ERR_SUCCESS */
+ EINVAL, /* ERR_EINVAL */
+ EFAULT, /* ERR_EFAULT */
+ EPERM, /* ERR_EPERM */
+ EBUSY, /* ERR_EBUSY */
+ };
+ int err;
+
+ status = ioread32(&devcmd->status);
+ if (status & STAT_BUSY) {
+ printk(KERN_ERR "Busy devcmd %d\n", _CMD_N(cmd));
+ return -EBUSY;
+ }
+
+ if (_CMD_DIR(cmd) & _CMD_DIR_WRITE) {
+ writeq(*a0, &devcmd->args[0]);
+ writeq(*a1, &devcmd->args[1]);
+ wmb();
+ }
+
+ iowrite32(cmd, &devcmd->cmd);
+
+ if ((_CMD_FLAGS(cmd) & _CMD_FLAGS_NOWAIT))
+ return 0;
+
+ for (delay = 0; delay < wait; delay++) {
+
+ udelay(100);
+
+ status = ioread32(&devcmd->status);
+ if (!(status & STAT_BUSY)) {
+
+ if (status & STAT_ERROR) {
+ err = dev_cmd_err[(int)readq(&devcmd->args[0])];
+ printk(KERN_ERR "Error %d devcmd %d\n",
+ err, _CMD_N(cmd));
+ return -err;
+ }
+
+ if (_CMD_DIR(cmd) & _CMD_DIR_READ) {
+ rmb();
+ *a0 = readq(&devcmd->args[0]);
+ *a1 = readq(&devcmd->args[1]);
+ }
+
+ return 0;
+ }
+ }
+
+ printk(KERN_ERR "Timedout devcmd %d\n", _CMD_N(cmd));
+ return -ETIMEDOUT;
+}
+
+int vnic_dev_fw_info(struct vnic_dev *vdev,
+ struct vnic_devcmd_fw_info **fw_info)
+{
+ u64 a0, a1 = 0;
+ int wait = 1000;
+ int err = 0;
+
+ if (!vdev->fw_info) {
+ vdev->fw_info = pci_alloc_consistent(vdev->pdev,
+ sizeof(struct vnic_devcmd_fw_info),
+ &vdev->fw_info_pa);
+ if (!vdev->fw_info)
+ return -ENOMEM;
+
+ a0 = vdev->fw_info_pa;
+
+ /* only get fw_info once and cache it */
+ err = vnic_dev_cmd(vdev, CMD_MCPU_FW_INFO, &a0, &a1, wait);
+ }
+
+ *fw_info = vdev->fw_info;
+
+ return err;
+}
+
+int vnic_dev_spec(struct vnic_dev *vdev, unsigned int offset, unsigned int size,
+ void *value)
+{
+ u64 a0, a1;
+ int wait = 1000;
+ int err;
+
+ a0 = offset;
+ a1 = size;
+
+ err = vnic_dev_cmd(vdev, CMD_DEV_SPEC, &a0, &a1, wait);
+
+ switch (size) {
+ case 1:
+ *(u8 *)value = (u8)a0;
+ break;
+ case 2:
+ *(u16 *)value = (u16)a0;
+ break;
+ case 4:
+ *(u32 *)value = (u32)a0;
+ break;
+ case 8:
+ *(u64 *)value = a0;
+ break;
+ default:
+ BUG();
+ break;
+ }
+
+ return err;
+}
+
+int vnic_dev_stats_clear(struct vnic_dev *vdev)
+{
+ u64 a0 = 0, a1 = 0;
+ int wait = 1000;
+ return vnic_dev_cmd(vdev, CMD_STATS_CLEAR, &a0, &a1, wait);
+}
+
+int vnic_dev_stats_dump(struct vnic_dev *vdev, struct vnic_stats **stats)
+{
+ u64 a0, a1;
+ int wait = 1000;
+
+ if (!vdev->stats) {
+ vdev->stats = pci_alloc_consistent(vdev->pdev,
+ sizeof(struct vnic_stats), &vdev->stats_pa);
+ if (!vdev->stats)
+ return -ENOMEM;
+ }
+
+ *stats = vdev->stats;
+ a0 = vdev->stats_pa;
+ a1 = sizeof(struct vnic_stats);
+
+ return vnic_dev_cmd(vdev, CMD_STATS_DUMP, &a0, &a1, wait);
+}
+
+int vnic_dev_close(struct vnic_dev *vdev)
+{
+ u64 a0 = 0, a1 = 0;
+ int wait = 1000;
+ return vnic_dev_cmd(vdev, CMD_CLOSE, &a0, &a1, wait);
+}
+
+int vnic_dev_enable(struct vnic_dev *vdev)
+{
+ u64 a0 = 0, a1 = 0;
+ int wait = 1000;
+ return vnic_dev_cmd(vdev, CMD_ENABLE, &a0, &a1, wait);
+}
+
+int vnic_dev_disable(struct vnic_dev *vdev)
+{
+ u64 a0 = 0, a1 = 0;
+ int wait = 1000;
+ return vnic_dev_cmd(vdev, CMD_DISABLE, &a0, &a1, wait);
+}
+
+int vnic_dev_open(struct vnic_dev *vdev, int arg)
+{
+ u64 a0 = (u32)arg, a1 = 0;
+ int wait = 1000;
+ return vnic_dev_cmd(vdev, CMD_OPEN, &a0, &a1, wait);
+}
+
+int vnic_dev_open_done(struct vnic_dev *vdev, int *done)
+{
+ u64 a0 = 0, a1 = 0;
+ int wait = 1000;
+ int err;
+
+ *done = 0;
+
+ err = vnic_dev_cmd(vdev, CMD_OPEN_STATUS, &a0, &a1, wait);
+ if (err)
+ return err;
+
+ *done = (a0 == 0);
+
+ return 0;
+}
+
+int vnic_dev_soft_reset(struct vnic_dev *vdev, int arg)
+{
+ u64 a0 = (u32)arg, a1 = 0;
+ int wait = 1000;
+ return vnic_dev_cmd(vdev, CMD_SOFT_RESET, &a0, &a1, wait);
+}
+
+int vnic_dev_soft_reset_done(struct vnic_dev *vdev, int *done)
+{
+ u64 a0 = 0, a1 = 0;
+ int wait = 1000;
+ int err;
+
+ *done = 0;
+
+ err = vnic_dev_cmd(vdev, CMD_SOFT_RESET_STATUS, &a0, &a1, wait);
+ if (err)
+ return err;
+
+ *done = (a0 == 0);
+
+ return 0;
+}
+
+int vnic_dev_hang_notify(struct vnic_dev *vdev)
+{
+ u64 a0, a1;
+ int wait = 1000;
+ return vnic_dev_cmd(vdev, CMD_HANG_NOTIFY, &a0, &a1, wait);
+}
+
+int vnic_dev_mac_addr(struct vnic_dev *vdev, u8 *mac_addr)
+{
+ u64 a0, a1;
+ int wait = 1000;
+ int err, i;
+
+ for (i = 0; i < ETH_ALEN; i++)
+ mac_addr[i] = 0;
+
+ err = vnic_dev_cmd(vdev, CMD_MAC_ADDR, &a0, &a1, wait);
+ if (err)
+ return err;
+
+ for (i = 0; i < ETH_ALEN; i++)
+ mac_addr[i] = ((u8 *)&a0)[i];
+
+ return 0;
+}
+
+void vnic_dev_packet_filter(struct vnic_dev *vdev, int directed, int multicast,
+ int broadcast, int promisc, int allmulti)
+{
+ u64 a0, a1 = 0;
+ int wait = 1000;
+ int err;
+
+ a0 = (directed ? CMD_PFILTER_DIRECTED : 0) |
+ (multicast ? CMD_PFILTER_MULTICAST : 0) |
+ (broadcast ? CMD_PFILTER_BROADCAST : 0) |
+ (promisc ? CMD_PFILTER_PROMISCUOUS : 0) |
+ (allmulti ? CMD_PFILTER_ALL_MULTICAST : 0);
+
+ err = vnic_dev_cmd(vdev, CMD_PACKET_FILTER, &a0, &a1, wait);
+ if (err)
+ printk(KERN_ERR "Can't set packet filter\n");
+}
+
+void vnic_dev_add_addr(struct vnic_dev *vdev, u8 *addr)
+{
+ u64 a0 = 0, a1 = 0;
+ int wait = 1000;
+ int err;
+ int i;
+
+ for (i = 0; i < ETH_ALEN; i++)
+ ((u8 *)&a0)[i] = addr[i];
+
+ err = vnic_dev_cmd(vdev, CMD_ADDR_ADD, &a0, &a1, wait);
+ if (err)
+ printk(KERN_ERR
+ "Can't add addr [%02x:%02x:%02x:%02x:%02x:%02x], %d\n",
+ addr[0], addr[1], addr[2], addr[3], addr[4], addr[5],
+ err);
+}
+
+void vnic_dev_del_addr(struct vnic_dev *vdev, u8 *addr)
+{
+ u64 a0 = 0, a1 = 0;
+ int wait = 1000;
+ int err;
+ int i;
+
+ for (i = 0; i < ETH_ALEN; i++)
+ ((u8 *)&a0)[i] = addr[i];
+
+ err = vnic_dev_cmd(vdev, CMD_ADDR_DEL, &a0, &a1, wait);
+ if (err)
+ printk(KERN_ERR
+ "Can't del addr [%02x:%02x:%02x:%02x:%02x:%02x], %d\n",
+ addr[0], addr[1], addr[2], addr[3], addr[4], addr[5],
+ err);
+}
+
+int vnic_dev_notify_set(struct vnic_dev *vdev, u16 intr)
+{
+ u64 a0, a1;
+ int wait = 1000;
+
+ if (!vdev->notify) {
+ vdev->notify = pci_alloc_consistent(vdev->pdev,
+ sizeof(struct vnic_devcmd_notify),
+ &vdev->notify_pa);
+ if (!vdev->notify)
+ return -ENOMEM;
+ }
+
+ a0 = vdev->notify_pa;
+ a1 = ((u64)intr << 32) & 0x0000ffff00000000ULL;
+ a1 += sizeof(struct vnic_devcmd_notify);
+
+ return vnic_dev_cmd(vdev, CMD_NOTIFY, &a0, &a1, wait);
+}
+
+void vnic_dev_notify_unset(struct vnic_dev *vdev)
+{
+ u64 a0, a1;
+ int wait = 1000;
+
+ a0 = 0; /* paddr = 0 to unset notify buffer */
+ a1 = 0x0000ffff00000000ULL; /* intr num = -1 to unreg for intr */
+ a1 += sizeof(struct vnic_devcmd_notify);
+
+ vnic_dev_cmd(vdev, CMD_NOTIFY, &a0, &a1, wait);
+}
+
+static int vnic_dev_notify_ready(struct vnic_dev *vdev)
+{
+ u32 *words;
+ unsigned int nwords = sizeof(struct vnic_devcmd_notify) / 4;
+ unsigned int i;
+ u32 csum;
+
+ if (!vdev->notify)
+ return 0;
+
+ do {
+ csum = 0;
+ memcpy(&vdev->notify_copy, vdev->notify,
+ sizeof(struct vnic_devcmd_notify));
+ words = (u32 *)&vdev->notify_copy;
+ for (i = 1; i < nwords; i++)
+ csum += words[i];
+ } while (csum != words[0]);
+
+ return 1;
+}
+
+int vnic_dev_init(struct vnic_dev *vdev, int arg)
+{
+ u64 a0 = (u32)arg, a1 = 0;
+ int wait = 1000;
+ return vnic_dev_cmd(vdev, CMD_INIT, &a0, &a1, wait);
+}
+
+u16 vnic_dev_set_default_vlan(struct vnic_dev *vdev, u16 new_default_vlan)
+{
+ u64 a0 = new_default_vlan, a1 = 0;
+ int wait = 1000;
+ int old_vlan = 0;
+
+ old_vlan = vnic_dev_cmd(vdev, CMD_SET_DEFAULT_VLAN, &a0, &a1, wait);
+ return (u16)old_vlan;
+}
+
+int vnic_dev_link_status(struct vnic_dev *vdev)
+{
+ if (vdev->linkstatus)
+ return *vdev->linkstatus;
+
+ if (!vnic_dev_notify_ready(vdev))
+ return 0;
+
+ return vdev->notify_copy.link_state;
+}
+
+u32 vnic_dev_port_speed(struct vnic_dev *vdev)
+{
+ if (!vnic_dev_notify_ready(vdev))
+ return 0;
+
+ return vdev->notify_copy.port_speed;
+}
+
+u32 vnic_dev_msg_lvl(struct vnic_dev *vdev)
+{
+ if (!vnic_dev_notify_ready(vdev))
+ return 0;
+
+ return vdev->notify_copy.msglvl;
+}
+
+u32 vnic_dev_mtu(struct vnic_dev *vdev)
+{
+ if (!vnic_dev_notify_ready(vdev))
+ return 0;
+
+ return vdev->notify_copy.mtu;
+}
+
+u32 vnic_dev_link_down_cnt(struct vnic_dev *vdev)
+{
+ if (!vnic_dev_notify_ready(vdev))
+ return 0;
+
+ return vdev->notify_copy.link_down_cnt;
+}
+
+void vnic_dev_set_intr_mode(struct vnic_dev *vdev,
+ enum vnic_dev_intr_mode intr_mode)
+{
+ vdev->intr_mode = intr_mode;
+}
+
+enum vnic_dev_intr_mode vnic_dev_get_intr_mode(
+ struct vnic_dev *vdev)
+{
+ return vdev->intr_mode;
+}
+
+void vnic_dev_unregister(struct vnic_dev *vdev)
+{
+ if (vdev) {
+ if (vdev->notify)
+ pci_free_consistent(vdev->pdev,
+ sizeof(struct vnic_devcmd_notify),
+ vdev->notify,
+ vdev->notify_pa);
+ if (vdev->linkstatus)
+ pci_free_consistent(vdev->pdev,
+ sizeof(u32),
+ vdev->linkstatus,
+ vdev->linkstatus_pa);
+ if (vdev->stats)
+ pci_free_consistent(vdev->pdev,
+ sizeof(struct vnic_stats),
+ vdev->stats, vdev->stats_pa);
+ if (vdev->fw_info)
+ pci_free_consistent(vdev->pdev,
+ sizeof(struct vnic_devcmd_fw_info),
+ vdev->fw_info, vdev->fw_info_pa);
+ kfree(vdev);
+ }
+}
+
+struct vnic_dev *vnic_dev_register(struct vnic_dev *vdev,
+ void *priv, struct pci_dev *pdev, struct vnic_dev_bar *bar)
+{
+ if (!vdev) {
+ vdev = kzalloc(sizeof(struct vnic_dev), GFP_KERNEL);
+ if (!vdev)
+ return NULL;
+ }
+
+ vdev->priv = priv;
+ vdev->pdev = pdev;
+
+ if (vnic_dev_discover_res(vdev, bar))
+ goto err_out;
+
+ vdev->devcmd = vnic_dev_get_res(vdev, RES_TYPE_DEVCMD, 0);
+ if (!vdev->devcmd)
+ goto err_out;
+
+ return vdev;
+
+err_out:
+ vnic_dev_unregister(vdev);
+ return NULL;
+}
diff --git a/drivers/scsi/fnic/vnic_dev.h b/drivers/scsi/fnic/vnic_dev.h
new file mode 100644
index 000000000..40d4195f5
--- /dev/null
+++ b/drivers/scsi/fnic/vnic_dev.h
@@ -0,0 +1,163 @@
+/*
+ * Copyright 2008 Cisco Systems, Inc. All rights reserved.
+ * Copyright 2007 Nuova Systems, Inc. All rights reserved.
+ *
+ * This program is free software; you may redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#ifndef _VNIC_DEV_H_
+#define _VNIC_DEV_H_
+
+#include "vnic_resource.h"
+#include "vnic_devcmd.h"
+
+/*
+ * These defines avoid symbol clash between fnic and enic (Cisco 10G Eth
+ * Driver) when both are built with CONFIG options =y
+ */
+#define vnic_dev_priv fnic_dev_priv
+#define vnic_dev_get_res_count fnic_dev_get_res_count
+#define vnic_dev_get_res fnic_dev_get_res
+#define vnic_dev_desc_ring_size fnic_dev_desc_ring_siz
+#define vnic_dev_clear_desc_ring fnic_dev_clear_desc_ring
+#define vnic_dev_alloc_desc_ring fnic_dev_alloc_desc_ring
+#define vnic_dev_free_desc_ring fnic_dev_free_desc_ring
+#define vnic_dev_cmd fnic_dev_cmd
+#define vnic_dev_fw_info fnic_dev_fw_info
+#define vnic_dev_spec fnic_dev_spec
+#define vnic_dev_stats_clear fnic_dev_stats_clear
+#define vnic_dev_stats_dump fnic_dev_stats_dump
+#define vnic_dev_hang_notify fnic_dev_hang_notify
+#define vnic_dev_packet_filter fnic_dev_packet_filter
+#define vnic_dev_add_addr fnic_dev_add_addr
+#define vnic_dev_del_addr fnic_dev_del_addr
+#define vnic_dev_mac_addr fnic_dev_mac_addr
+#define vnic_dev_notify_set fnic_dev_notify_set
+#define vnic_dev_notify_unset fnic_dev_notify_unset
+#define vnic_dev_link_status fnic_dev_link_status
+#define vnic_dev_port_speed fnic_dev_port_speed
+#define vnic_dev_msg_lvl fnic_dev_msg_lvl
+#define vnic_dev_mtu fnic_dev_mtu
+#define vnic_dev_link_down_cnt fnic_dev_link_down_cnt
+#define vnic_dev_close fnic_dev_close
+#define vnic_dev_enable fnic_dev_enable
+#define vnic_dev_disable fnic_dev_disable
+#define vnic_dev_open fnic_dev_open
+#define vnic_dev_open_done fnic_dev_open_done
+#define vnic_dev_init fnic_dev_init
+#define vnic_dev_soft_reset fnic_dev_soft_reset
+#define vnic_dev_soft_reset_done fnic_dev_soft_reset_done
+#define vnic_dev_set_intr_mode fnic_dev_set_intr_mode
+#define vnic_dev_get_intr_mode fnic_dev_get_intr_mode
+#define vnic_dev_unregister fnic_dev_unregister
+#define vnic_dev_register fnic_dev_register
+
+#ifndef VNIC_PADDR_TARGET
+#define VNIC_PADDR_TARGET 0x0000000000000000ULL
+#endif
+
+#ifndef readq
+static inline u64 readq(void __iomem *reg)
+{
+ return ((u64)readl(reg + 0x4UL) << 32) | (u64)readl(reg);
+}
+
+static inline void writeq(u64 val, void __iomem *reg)
+{
+ writel(val & 0xffffffff, reg);
+ writel(val >> 32, reg + 0x4UL);
+}
+#endif
+
+enum vnic_dev_intr_mode {
+ VNIC_DEV_INTR_MODE_UNKNOWN,
+ VNIC_DEV_INTR_MODE_INTX,
+ VNIC_DEV_INTR_MODE_MSI,
+ VNIC_DEV_INTR_MODE_MSIX,
+};
+
+struct vnic_dev_bar {
+ void __iomem *vaddr;
+ dma_addr_t bus_addr;
+ unsigned long len;
+};
+
+struct vnic_dev_ring {
+ void *descs;
+ size_t size;
+ dma_addr_t base_addr;
+ size_t base_align;
+ void *descs_unaligned;
+ size_t size_unaligned;
+ dma_addr_t base_addr_unaligned;
+ unsigned int desc_size;
+ unsigned int desc_count;
+ unsigned int desc_avail;
+};
+
+struct vnic_dev;
+struct vnic_stats;
+
+void *vnic_dev_priv(struct vnic_dev *vdev);
+unsigned int vnic_dev_get_res_count(struct vnic_dev *vdev,
+ enum vnic_res_type type);
+void __iomem *vnic_dev_get_res(struct vnic_dev *vdev, enum vnic_res_type type,
+ unsigned int index);
+unsigned int vnic_dev_desc_ring_size(struct vnic_dev_ring *ring,
+ unsigned int desc_count,
+ unsigned int desc_size);
+void vnic_dev_clear_desc_ring(struct vnic_dev_ring *ring);
+int vnic_dev_alloc_desc_ring(struct vnic_dev *vdev, struct vnic_dev_ring *ring,
+ unsigned int desc_count, unsigned int desc_size);
+void vnic_dev_free_desc_ring(struct vnic_dev *vdev,
+ struct vnic_dev_ring *ring);
+int vnic_dev_cmd(struct vnic_dev *vdev, enum vnic_devcmd_cmd cmd,
+ u64 *a0, u64 *a1, int wait);
+int vnic_dev_fw_info(struct vnic_dev *vdev,
+ struct vnic_devcmd_fw_info **fw_info);
+int vnic_dev_spec(struct vnic_dev *vdev, unsigned int offset,
+ unsigned int size, void *value);
+int vnic_dev_stats_clear(struct vnic_dev *vdev);
+int vnic_dev_stats_dump(struct vnic_dev *vdev, struct vnic_stats **stats);
+int vnic_dev_hang_notify(struct vnic_dev *vdev);
+void vnic_dev_packet_filter(struct vnic_dev *vdev, int directed, int multicast,
+ int broadcast, int promisc, int allmulti);
+void vnic_dev_add_addr(struct vnic_dev *vdev, u8 *addr);
+void vnic_dev_del_addr(struct vnic_dev *vdev, u8 *addr);
+int vnic_dev_mac_addr(struct vnic_dev *vdev, u8 *mac_addr);
+int vnic_dev_notify_set(struct vnic_dev *vdev, u16 intr);
+void vnic_dev_notify_unset(struct vnic_dev *vdev);
+int vnic_dev_link_status(struct vnic_dev *vdev);
+u32 vnic_dev_port_speed(struct vnic_dev *vdev);
+u32 vnic_dev_msg_lvl(struct vnic_dev *vdev);
+u32 vnic_dev_mtu(struct vnic_dev *vdev);
+u32 vnic_dev_link_down_cnt(struct vnic_dev *vdev);
+int vnic_dev_close(struct vnic_dev *vdev);
+int vnic_dev_enable(struct vnic_dev *vdev);
+int vnic_dev_disable(struct vnic_dev *vdev);
+int vnic_dev_open(struct vnic_dev *vdev, int arg);
+int vnic_dev_open_done(struct vnic_dev *vdev, int *done);
+int vnic_dev_init(struct vnic_dev *vdev, int arg);
+u16 vnic_dev_set_default_vlan(struct vnic_dev *vdev,
+ u16 new_default_vlan);
+int vnic_dev_soft_reset(struct vnic_dev *vdev, int arg);
+int vnic_dev_soft_reset_done(struct vnic_dev *vdev, int *done);
+void vnic_dev_set_intr_mode(struct vnic_dev *vdev,
+ enum vnic_dev_intr_mode intr_mode);
+enum vnic_dev_intr_mode vnic_dev_get_intr_mode(struct vnic_dev *vdev);
+void vnic_dev_unregister(struct vnic_dev *vdev);
+struct vnic_dev *vnic_dev_register(struct vnic_dev *vdev,
+ void *priv, struct pci_dev *pdev,
+ struct vnic_dev_bar *bar);
+
+#endif /* _VNIC_DEV_H_ */
diff --git a/drivers/scsi/fnic/vnic_devcmd.h b/drivers/scsi/fnic/vnic_devcmd.h
new file mode 100644
index 000000000..3e2fcbda6
--- /dev/null
+++ b/drivers/scsi/fnic/vnic_devcmd.h
@@ -0,0 +1,348 @@
+/*
+ * Copyright 2008 Cisco Systems, Inc. All rights reserved.
+ * Copyright 2007 Nuova Systems, Inc. All rights reserved.
+ *
+ * This program is free software; you may redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#ifndef _VNIC_DEVCMD_H_
+#define _VNIC_DEVCMD_H_
+
+#define _CMD_NBITS 14
+#define _CMD_VTYPEBITS 10
+#define _CMD_FLAGSBITS 6
+#define _CMD_DIRBITS 2
+
+#define _CMD_NMASK ((1 << _CMD_NBITS)-1)
+#define _CMD_VTYPEMASK ((1 << _CMD_VTYPEBITS)-1)
+#define _CMD_FLAGSMASK ((1 << _CMD_FLAGSBITS)-1)
+#define _CMD_DIRMASK ((1 << _CMD_DIRBITS)-1)
+
+#define _CMD_NSHIFT 0
+#define _CMD_VTYPESHIFT (_CMD_NSHIFT+_CMD_NBITS)
+#define _CMD_FLAGSSHIFT (_CMD_VTYPESHIFT+_CMD_VTYPEBITS)
+#define _CMD_DIRSHIFT (_CMD_FLAGSSHIFT+_CMD_FLAGSBITS)
+
+/*
+ * Direction bits (from host perspective).
+ */
+#define _CMD_DIR_NONE 0U
+#define _CMD_DIR_WRITE 1U
+#define _CMD_DIR_READ 2U
+#define _CMD_DIR_RW (_CMD_DIR_WRITE | _CMD_DIR_READ)
+
+/*
+ * Flag bits.
+ */
+#define _CMD_FLAGS_NONE 0U
+#define _CMD_FLAGS_NOWAIT 1U
+
+/*
+ * vNIC type bits.
+ */
+#define _CMD_VTYPE_NONE 0U
+#define _CMD_VTYPE_ENET 1U
+#define _CMD_VTYPE_FC 2U
+#define _CMD_VTYPE_SCSI 4U
+#define _CMD_VTYPE_ALL (_CMD_VTYPE_ENET | _CMD_VTYPE_FC | _CMD_VTYPE_SCSI)
+
+/*
+ * Used to create cmds..
+*/
+#define _CMDCF(dir, flags, vtype, nr) \
+ (((dir) << _CMD_DIRSHIFT) | \
+ ((flags) << _CMD_FLAGSSHIFT) | \
+ ((vtype) << _CMD_VTYPESHIFT) | \
+ ((nr) << _CMD_NSHIFT))
+#define _CMDC(dir, vtype, nr) _CMDCF(dir, 0, vtype, nr)
+#define _CMDCNW(dir, vtype, nr) _CMDCF(dir, _CMD_FLAGS_NOWAIT, vtype, nr)
+
+/*
+ * Used to decode cmds..
+*/
+#define _CMD_DIR(cmd) (((cmd) >> _CMD_DIRSHIFT) & _CMD_DIRMASK)
+#define _CMD_FLAGS(cmd) (((cmd) >> _CMD_FLAGSSHIFT) & _CMD_FLAGSMASK)
+#define _CMD_VTYPE(cmd) (((cmd) >> _CMD_VTYPESHIFT) & _CMD_VTYPEMASK)
+#define _CMD_N(cmd) (((cmd) >> _CMD_NSHIFT) & _CMD_NMASK)
+
+enum vnic_devcmd_cmd {
+ CMD_NONE = _CMDC(_CMD_DIR_NONE, _CMD_VTYPE_NONE, 0),
+
+ /* mcpu fw info in mem: (u64)a0=paddr to struct vnic_devcmd_fw_info */
+ CMD_MCPU_FW_INFO = _CMDC(_CMD_DIR_WRITE, _CMD_VTYPE_ALL, 1),
+
+ /* dev-specific block member:
+ * in: (u16)a0=offset,(u8)a1=size
+ * out: a0=value */
+ CMD_DEV_SPEC = _CMDC(_CMD_DIR_RW, _CMD_VTYPE_ALL, 2),
+
+ /* stats clear */
+ CMD_STATS_CLEAR = _CMDCNW(_CMD_DIR_NONE, _CMD_VTYPE_ALL, 3),
+
+ /* stats dump in mem: (u64)a0=paddr to stats area,
+ * (u16)a1=sizeof stats area */
+ CMD_STATS_DUMP = _CMDC(_CMD_DIR_WRITE, _CMD_VTYPE_ALL, 4),
+
+ /* set Rx packet filter: (u32)a0=filters (see CMD_PFILTER_*) */
+ CMD_PACKET_FILTER = _CMDCNW(_CMD_DIR_WRITE, _CMD_VTYPE_ALL, 7),
+
+ /* hang detection notification */
+ CMD_HANG_NOTIFY = _CMDC(_CMD_DIR_NONE, _CMD_VTYPE_ALL, 8),
+
+ /* MAC address in (u48)a0 */
+ CMD_MAC_ADDR = _CMDC(_CMD_DIR_READ,
+ _CMD_VTYPE_ENET | _CMD_VTYPE_FC, 9),
+
+ /* disable/enable promisc mode: (u8)a0=0/1 */
+/***** XXX DEPRECATED *****/
+ CMD_PROMISC_MODE = _CMDCNW(_CMD_DIR_WRITE, _CMD_VTYPE_ENET, 10),
+
+ /* disable/enable all-multi mode: (u8)a0=0/1 */
+/***** XXX DEPRECATED *****/
+ CMD_ALLMULTI_MODE = _CMDCNW(_CMD_DIR_WRITE, _CMD_VTYPE_ENET, 11),
+
+ /* add addr from (u48)a0 */
+ CMD_ADDR_ADD = _CMDCNW(_CMD_DIR_WRITE,
+ _CMD_VTYPE_ENET | _CMD_VTYPE_FC, 12),
+
+ /* del addr from (u48)a0 */
+ CMD_ADDR_DEL = _CMDCNW(_CMD_DIR_WRITE,
+ _CMD_VTYPE_ENET | _CMD_VTYPE_FC, 13),
+
+ /* add VLAN id in (u16)a0 */
+ CMD_VLAN_ADD = _CMDCNW(_CMD_DIR_WRITE, _CMD_VTYPE_ENET, 14),
+
+ /* del VLAN id in (u16)a0 */
+ CMD_VLAN_DEL = _CMDCNW(_CMD_DIR_WRITE, _CMD_VTYPE_ENET, 15),
+
+ /* nic_cfg in (u32)a0 */
+ CMD_NIC_CFG = _CMDCNW(_CMD_DIR_WRITE, _CMD_VTYPE_ALL, 16),
+
+ /* union vnic_rss_key in mem: (u64)a0=paddr, (u16)a1=len */
+ CMD_RSS_KEY = _CMDC(_CMD_DIR_WRITE, _CMD_VTYPE_ENET, 17),
+
+ /* union vnic_rss_cpu in mem: (u64)a0=paddr, (u16)a1=len */
+ CMD_RSS_CPU = _CMDC(_CMD_DIR_WRITE, _CMD_VTYPE_ENET, 18),
+
+ /* initiate softreset */
+ CMD_SOFT_RESET = _CMDCNW(_CMD_DIR_NONE, _CMD_VTYPE_ALL, 19),
+
+ /* softreset status:
+ * out: a0=0 reset complete, a0=1 reset in progress */
+ CMD_SOFT_RESET_STATUS = _CMDC(_CMD_DIR_READ, _CMD_VTYPE_ALL, 20),
+
+ /* set struct vnic_devcmd_notify buffer in mem:
+ * in:
+ * (u64)a0=paddr to notify (set paddr=0 to unset)
+ * (u32)a1 & 0x00000000ffffffff=sizeof(struct vnic_devcmd_notify)
+ * (u16)a1 & 0x0000ffff00000000=intr num (-1 for no intr)
+ * out:
+ * (u32)a1 = effective size
+ */
+ CMD_NOTIFY = _CMDC(_CMD_DIR_RW, _CMD_VTYPE_ALL, 21),
+
+ /* UNDI API: (u64)a0=paddr to s_PXENV_UNDI_ struct,
+ * (u8)a1=PXENV_UNDI_xxx */
+ CMD_UNDI = _CMDC(_CMD_DIR_WRITE, _CMD_VTYPE_ENET, 22),
+
+ /* initiate open sequence (u32)a0=flags (see CMD_OPENF_*) */
+ CMD_OPEN = _CMDCNW(_CMD_DIR_WRITE, _CMD_VTYPE_ALL, 23),
+
+ /* open status:
+ * out: a0=0 open complete, a0=1 open in progress */
+ CMD_OPEN_STATUS = _CMDC(_CMD_DIR_READ, _CMD_VTYPE_ALL, 24),
+
+ /* close vnic */
+ CMD_CLOSE = _CMDC(_CMD_DIR_NONE, _CMD_VTYPE_ALL, 25),
+
+ /* initialize virtual link: (u32)a0=flags (see CMD_INITF_*) */
+ CMD_INIT = _CMDCNW(_CMD_DIR_READ, _CMD_VTYPE_ALL, 26),
+
+ /* variant of CMD_INIT, with provisioning info
+ * (u64)a0=paddr of vnic_devcmd_provinfo
+ * (u32)a1=sizeof provision info */
+ CMD_INIT_PROV_INFO = _CMDC(_CMD_DIR_WRITE, _CMD_VTYPE_ENET, 27),
+
+ /* enable virtual link */
+ CMD_ENABLE = _CMDCNW(_CMD_DIR_WRITE, _CMD_VTYPE_ALL, 28),
+
+ /* disable virtual link */
+ CMD_DISABLE = _CMDC(_CMD_DIR_NONE, _CMD_VTYPE_ALL, 29),
+
+ /* stats dump all vnics on uplink in mem: (u64)a0=paddr (u32)a1=uif */
+ CMD_STATS_DUMP_ALL = _CMDC(_CMD_DIR_WRITE, _CMD_VTYPE_ALL, 30),
+
+ /* init status:
+ * out: a0=0 init complete, a0=1 init in progress
+ * if a0=0, a1=errno */
+ CMD_INIT_STATUS = _CMDC(_CMD_DIR_READ, _CMD_VTYPE_ALL, 31),
+
+ /* INT13 API: (u64)a0=paddr to vnic_int13_params struct
+ * (u8)a1=INT13_CMD_xxx */
+ CMD_INT13 = _CMDC(_CMD_DIR_WRITE, _CMD_VTYPE_FC, 32),
+
+ /* logical uplink enable/disable: (u64)a0: 0/1=disable/enable */
+ CMD_LOGICAL_UPLINK = _CMDCNW(_CMD_DIR_WRITE, _CMD_VTYPE_ENET, 33),
+
+ /* undo initialize of virtual link */
+ CMD_DEINIT = _CMDCNW(_CMD_DIR_NONE, _CMD_VTYPE_ALL, 34),
+
+ /* check fw capability of a cmd:
+ * in: (u32)a0=cmd
+ * out: (u32)a0=errno, 0:valid cmd, a1=supported VNIC_STF_* bits */
+ CMD_CAPABILITY = _CMDC(_CMD_DIR_RW, _CMD_VTYPE_ALL, 36),
+
+ /* persistent binding info
+ * in: (u64)a0=paddr of arg
+ * (u32)a1=CMD_PERBI_XXX */
+ CMD_PERBI = _CMDC(_CMD_DIR_RW, _CMD_VTYPE_FC, 37),
+
+ /* Interrupt Assert Register functionality
+ * in: (u16)a0=interrupt number to assert
+ */
+ CMD_IAR = _CMDCNW(_CMD_DIR_WRITE, _CMD_VTYPE_ALL, 38),
+
+ /* initiate hangreset, like softreset after hang detected */
+ CMD_HANG_RESET = _CMDC(_CMD_DIR_NONE, _CMD_VTYPE_ALL, 39),
+
+ /* hangreset status:
+ * out: a0=0 reset complete, a0=1 reset in progress */
+ CMD_HANG_RESET_STATUS = _CMDC(_CMD_DIR_READ, _CMD_VTYPE_ALL, 40),
+
+ /*
+ * Set hw ingress packet vlan rewrite mode:
+ * in: (u32)a0=new vlan rewrite mode
+ * out: (u32)a0=old vlan rewrite mode */
+ CMD_IG_VLAN_REWRITE_MODE = _CMDC(_CMD_DIR_RW, _CMD_VTYPE_ENET, 41),
+
+ /*
+ * in: (u16)a0=bdf of target vnic
+ * (u32)a1=cmd to proxy
+ * a2-a15=args to cmd in a1
+ * out: (u32)a0=status of proxied cmd
+ * a1-a15=out args of proxied cmd */
+ CMD_PROXY_BY_BDF = _CMDC(_CMD_DIR_RW, _CMD_VTYPE_ALL, 42),
+
+ /*
+ * As for BY_BDF except a0 is index of hvnlink subordinate vnic
+ * or SR-IOV virtual vnic
+ */
+ CMD_PROXY_BY_INDEX = _CMDC(_CMD_DIR_RW, _CMD_VTYPE_ALL, 43),
+
+ /*
+ * For HPP toggle:
+ * adapter-info-get
+ * in: (u64)a0=phsical address of buffer passed in from caller.
+ * (u16)a1=size of buffer specified in a0.
+ * out: (u64)a0=phsical address of buffer passed in from caller.
+ * (u16)a1=actual bytes from VIF-CONFIG-INFO TLV, or
+ * 0 if no VIF-CONFIG-INFO TLV was ever received. */
+ CMD_CONFIG_INFO_GET = _CMDC(_CMD_DIR_RW, _CMD_VTYPE_ALL, 44),
+
+ /*
+ * INT13 API: (u64)a0=paddr to vnic_int13_params struct
+ * (u32)a1=INT13_CMD_xxx
+ */
+ CMD_INT13_ALL = _CMDC(_CMD_DIR_WRITE, _CMD_VTYPE_ALL, 45),
+
+ /*
+ * Set default vlan:
+ * in: (u16)a0=new default vlan
+ * (u16)a1=zero for overriding vlan with param a0,
+ * non-zero for resetting vlan to the default
+ * out: (u16)a0=old default vlan
+ */
+ CMD_SET_DEFAULT_VLAN = _CMDC(_CMD_DIR_RW, _CMD_VTYPE_ALL, 46)
+};
+
+/* flags for CMD_OPEN */
+#define CMD_OPENF_OPROM 0x1 /* open coming from option rom */
+
+/* flags for CMD_INIT */
+#define CMD_INITF_DEFAULT_MAC 0x1 /* init with default mac addr */
+
+/* flags for CMD_PACKET_FILTER */
+#define CMD_PFILTER_DIRECTED 0x01
+#define CMD_PFILTER_MULTICAST 0x02
+#define CMD_PFILTER_BROADCAST 0x04
+#define CMD_PFILTER_PROMISCUOUS 0x08
+#define CMD_PFILTER_ALL_MULTICAST 0x10
+
+enum vnic_devcmd_status {
+ STAT_NONE = 0,
+ STAT_BUSY = 1 << 0, /* cmd in progress */
+ STAT_ERROR = 1 << 1, /* last cmd caused error (code in a0) */
+};
+
+enum vnic_devcmd_error {
+ ERR_SUCCESS = 0,
+ ERR_EINVAL = 1,
+ ERR_EFAULT = 2,
+ ERR_EPERM = 3,
+ ERR_EBUSY = 4,
+ ERR_ECMDUNKNOWN = 5,
+ ERR_EBADSTATE = 6,
+ ERR_ENOMEM = 7,
+ ERR_ETIMEDOUT = 8,
+ ERR_ELINKDOWN = 9,
+};
+
+struct vnic_devcmd_fw_info {
+ char fw_version[32];
+ char fw_build[32];
+ char hw_version[32];
+ char hw_serial_number[32];
+};
+
+struct vnic_devcmd_notify {
+ u32 csum; /* checksum over following words */
+
+ u32 link_state; /* link up == 1 */
+ u32 port_speed; /* effective port speed (rate limit) */
+ u32 mtu; /* MTU */
+ u32 msglvl; /* requested driver msg lvl */
+ u32 uif; /* uplink interface */
+ u32 status; /* status bits (see VNIC_STF_*) */
+ u32 error; /* error code (see ERR_*) for first ERR */
+ u32 link_down_cnt; /* running count of link down transitions */
+};
+#define VNIC_STF_FATAL_ERR 0x0001 /* fatal fw error */
+
+struct vnic_devcmd_provinfo {
+ u8 oui[3];
+ u8 type;
+ u8 data[0];
+};
+
+/*
+ * Writing cmd register causes STAT_BUSY to get set in status register.
+ * When cmd completes, STAT_BUSY will be cleared.
+ *
+ * If cmd completed successfully STAT_ERROR will be clear
+ * and args registers contain cmd-specific results.
+ *
+ * If cmd error, STAT_ERROR will be set and args[0] contains error code.
+ *
+ * status register is read-only. While STAT_BUSY is set,
+ * all other register contents are read-only.
+ */
+
+/* Make sizeof(vnic_devcmd) a power-of-2 for I/O BAR. */
+#define VNIC_DEVCMD_NARGS 15
+struct vnic_devcmd {
+ u32 status; /* RO */
+ u32 cmd; /* RW */
+ u64 args[VNIC_DEVCMD_NARGS]; /* RW cmd args (little-endian) */
+};
+
+#endif /* _VNIC_DEVCMD_H_ */
diff --git a/drivers/scsi/fnic/vnic_intr.c b/drivers/scsi/fnic/vnic_intr.c
new file mode 100644
index 000000000..4f4dc8793
--- /dev/null
+++ b/drivers/scsi/fnic/vnic_intr.c
@@ -0,0 +1,60 @@
+/*
+ * Copyright 2008 Cisco Systems, Inc. All rights reserved.
+ * Copyright 2007 Nuova Systems, Inc. All rights reserved.
+ *
+ * This program is free software; you may redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include <linux/kernel.h>
+#include <linux/errno.h>
+#include <linux/types.h>
+#include <linux/pci.h>
+#include <linux/delay.h>
+#include "vnic_dev.h"
+#include "vnic_intr.h"
+
+void vnic_intr_free(struct vnic_intr *intr)
+{
+ intr->ctrl = NULL;
+}
+
+int vnic_intr_alloc(struct vnic_dev *vdev, struct vnic_intr *intr,
+ unsigned int index)
+{
+ intr->index = index;
+ intr->vdev = vdev;
+
+ intr->ctrl = vnic_dev_get_res(vdev, RES_TYPE_INTR_CTRL, index);
+ if (!intr->ctrl) {
+ printk(KERN_ERR "Failed to hook INTR[%d].ctrl resource\n",
+ index);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+void vnic_intr_init(struct vnic_intr *intr, unsigned int coalescing_timer,
+ unsigned int coalescing_type, unsigned int mask_on_assertion)
+{
+ iowrite32(coalescing_timer, &intr->ctrl->coalescing_timer);
+ iowrite32(coalescing_type, &intr->ctrl->coalescing_type);
+ iowrite32(mask_on_assertion, &intr->ctrl->mask_on_assertion);
+ iowrite32(0, &intr->ctrl->int_credits);
+}
+
+void vnic_intr_clean(struct vnic_intr *intr)
+{
+ iowrite32(0, &intr->ctrl->int_credits);
+}
diff --git a/drivers/scsi/fnic/vnic_intr.h b/drivers/scsi/fnic/vnic_intr.h
new file mode 100644
index 000000000..d5fb40e7c
--- /dev/null
+++ b/drivers/scsi/fnic/vnic_intr.h
@@ -0,0 +1,118 @@
+/*
+ * Copyright 2008 Cisco Systems, Inc. All rights reserved.
+ * Copyright 2007 Nuova Systems, Inc. All rights reserved.
+ *
+ * This program is free software; you may redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#ifndef _VNIC_INTR_H_
+#define _VNIC_INTR_H_
+
+#include <linux/pci.h>
+#include "vnic_dev.h"
+
+/*
+ * These defines avoid symbol clash between fnic and enic (Cisco 10G Eth
+ * Driver) when both are built with CONFIG options =y
+ */
+#define vnic_intr_unmask fnic_intr_unmask
+#define vnic_intr_mask fnic_intr_mask
+#define vnic_intr_return_credits fnic_intr_return_credits
+#define vnic_intr_credits fnic_intr_credits
+#define vnic_intr_return_all_credits fnic_intr_return_all_credits
+#define vnic_intr_legacy_pba fnic_intr_legacy_pba
+#define vnic_intr_free fnic_intr_free
+#define vnic_intr_alloc fnic_intr_alloc
+#define vnic_intr_init fnic_intr_init
+#define vnic_intr_clean fnic_intr_clean
+
+#define VNIC_INTR_TIMER_MAX 0xffff
+
+#define VNIC_INTR_TIMER_TYPE_ABS 0
+#define VNIC_INTR_TIMER_TYPE_QUIET 1
+
+/* Interrupt control */
+struct vnic_intr_ctrl {
+ u32 coalescing_timer; /* 0x00 */
+ u32 pad0;
+ u32 coalescing_value; /* 0x08 */
+ u32 pad1;
+ u32 coalescing_type; /* 0x10 */
+ u32 pad2;
+ u32 mask_on_assertion; /* 0x18 */
+ u32 pad3;
+ u32 mask; /* 0x20 */
+ u32 pad4;
+ u32 int_credits; /* 0x28 */
+ u32 pad5;
+ u32 int_credit_return; /* 0x30 */
+ u32 pad6;
+};
+
+struct vnic_intr {
+ unsigned int index;
+ struct vnic_dev *vdev;
+ struct vnic_intr_ctrl __iomem *ctrl; /* memory-mapped */
+};
+
+static inline void vnic_intr_unmask(struct vnic_intr *intr)
+{
+ iowrite32(0, &intr->ctrl->mask);
+}
+
+static inline void vnic_intr_mask(struct vnic_intr *intr)
+{
+ iowrite32(1, &intr->ctrl->mask);
+}
+
+static inline void vnic_intr_return_credits(struct vnic_intr *intr,
+ unsigned int credits, int unmask, int reset_timer)
+{
+#define VNIC_INTR_UNMASK_SHIFT 16
+#define VNIC_INTR_RESET_TIMER_SHIFT 17
+
+ u32 int_credit_return = (credits & 0xffff) |
+ (unmask ? (1 << VNIC_INTR_UNMASK_SHIFT) : 0) |
+ (reset_timer ? (1 << VNIC_INTR_RESET_TIMER_SHIFT) : 0);
+
+ iowrite32(int_credit_return, &intr->ctrl->int_credit_return);
+}
+
+static inline unsigned int vnic_intr_credits(struct vnic_intr *intr)
+{
+ return ioread32(&intr->ctrl->int_credits);
+}
+
+static inline void vnic_intr_return_all_credits(struct vnic_intr *intr)
+{
+ unsigned int credits = vnic_intr_credits(intr);
+ int unmask = 1;
+ int reset_timer = 1;
+
+ vnic_intr_return_credits(intr, credits, unmask, reset_timer);
+}
+
+static inline u32 vnic_intr_legacy_pba(u32 __iomem *legacy_pba)
+{
+ /* read PBA without clearing */
+ return ioread32(legacy_pba);
+}
+
+void vnic_intr_free(struct vnic_intr *intr);
+int vnic_intr_alloc(struct vnic_dev *vdev, struct vnic_intr *intr,
+ unsigned int index);
+void vnic_intr_init(struct vnic_intr *intr, unsigned int coalescing_timer,
+ unsigned int coalescing_type, unsigned int mask_on_assertion);
+void vnic_intr_clean(struct vnic_intr *intr);
+
+#endif /* _VNIC_INTR_H_ */
diff --git a/drivers/scsi/fnic/vnic_nic.h b/drivers/scsi/fnic/vnic_nic.h
new file mode 100644
index 000000000..f15b83eea
--- /dev/null
+++ b/drivers/scsi/fnic/vnic_nic.h
@@ -0,0 +1,69 @@
+/*
+ * Copyright 2008 Cisco Systems, Inc. All rights reserved.
+ * Copyright 2007 Nuova Systems, Inc. All rights reserved.
+ *
+ * This program is free software; you may redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#ifndef _VNIC_NIC_H_
+#define _VNIC_NIC_H_
+
+/*
+ * These defines avoid symbol clash between fnic and enic (Cisco 10G Eth
+ * Driver) when both are built with CONFIG options =y
+ */
+#define vnic_set_nic_cfg fnic_set_nic_cfg
+
+#define NIC_CFG_RSS_DEFAULT_CPU_MASK_FIELD 0xffUL
+#define NIC_CFG_RSS_DEFAULT_CPU_SHIFT 0
+#define NIC_CFG_RSS_HASH_TYPE (0xffUL << 8)
+#define NIC_CFG_RSS_HASH_TYPE_MASK_FIELD 0xffUL
+#define NIC_CFG_RSS_HASH_TYPE_SHIFT 8
+#define NIC_CFG_RSS_HASH_BITS (7UL << 16)
+#define NIC_CFG_RSS_HASH_BITS_MASK_FIELD 7UL
+#define NIC_CFG_RSS_HASH_BITS_SHIFT 16
+#define NIC_CFG_RSS_BASE_CPU (7UL << 19)
+#define NIC_CFG_RSS_BASE_CPU_MASK_FIELD 7UL
+#define NIC_CFG_RSS_BASE_CPU_SHIFT 19
+#define NIC_CFG_RSS_ENABLE (1UL << 22)
+#define NIC_CFG_RSS_ENABLE_MASK_FIELD 1UL
+#define NIC_CFG_RSS_ENABLE_SHIFT 22
+#define NIC_CFG_TSO_IPID_SPLIT_EN (1UL << 23)
+#define NIC_CFG_TSO_IPID_SPLIT_EN_MASK_FIELD 1UL
+#define NIC_CFG_TSO_IPID_SPLIT_EN_SHIFT 23
+#define NIC_CFG_IG_VLAN_STRIP_EN (1UL << 24)
+#define NIC_CFG_IG_VLAN_STRIP_EN_MASK_FIELD 1UL
+#define NIC_CFG_IG_VLAN_STRIP_EN_SHIFT 24
+
+static inline void vnic_set_nic_cfg(u32 *nic_cfg,
+ u8 rss_default_cpu, u8 rss_hash_type,
+ u8 rss_hash_bits, u8 rss_base_cpu,
+ u8 rss_enable, u8 tso_ipid_split_en,
+ u8 ig_vlan_strip_en)
+{
+ *nic_cfg = (rss_default_cpu & NIC_CFG_RSS_DEFAULT_CPU_MASK_FIELD) |
+ ((rss_hash_type & NIC_CFG_RSS_HASH_TYPE_MASK_FIELD)
+ << NIC_CFG_RSS_HASH_TYPE_SHIFT) |
+ ((rss_hash_bits & NIC_CFG_RSS_HASH_BITS_MASK_FIELD)
+ << NIC_CFG_RSS_HASH_BITS_SHIFT) |
+ ((rss_base_cpu & NIC_CFG_RSS_BASE_CPU_MASK_FIELD)
+ << NIC_CFG_RSS_BASE_CPU_SHIFT) |
+ ((rss_enable & NIC_CFG_RSS_ENABLE_MASK_FIELD)
+ << NIC_CFG_RSS_ENABLE_SHIFT) |
+ ((tso_ipid_split_en & NIC_CFG_TSO_IPID_SPLIT_EN_MASK_FIELD)
+ << NIC_CFG_TSO_IPID_SPLIT_EN_SHIFT) |
+ ((ig_vlan_strip_en & NIC_CFG_IG_VLAN_STRIP_EN_MASK_FIELD)
+ << NIC_CFG_IG_VLAN_STRIP_EN_SHIFT);
+}
+
+#endif /* _VNIC_NIC_H_ */
diff --git a/drivers/scsi/fnic/vnic_resource.h b/drivers/scsi/fnic/vnic_resource.h
new file mode 100644
index 000000000..2d842f79d
--- /dev/null
+++ b/drivers/scsi/fnic/vnic_resource.h
@@ -0,0 +1,61 @@
+/*
+ * Copyright 2008 Cisco Systems, Inc. All rights reserved.
+ * Copyright 2007 Nuova Systems, Inc. All rights reserved.
+ *
+ * This program is free software; you may redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#ifndef _VNIC_RESOURCE_H_
+#define _VNIC_RESOURCE_H_
+
+#define VNIC_RES_MAGIC 0x766E6963L /* 'vnic' */
+#define VNIC_RES_VERSION 0x00000000L
+
+/* vNIC resource types */
+enum vnic_res_type {
+ RES_TYPE_EOL, /* End-of-list */
+ RES_TYPE_WQ, /* Work queues */
+ RES_TYPE_RQ, /* Receive queues */
+ RES_TYPE_CQ, /* Completion queues */
+ RES_TYPE_RSVD1,
+ RES_TYPE_NIC_CFG, /* Enet NIC config registers */
+ RES_TYPE_RSVD2,
+ RES_TYPE_RSVD3,
+ RES_TYPE_RSVD4,
+ RES_TYPE_RSVD5,
+ RES_TYPE_INTR_CTRL, /* Interrupt ctrl table */
+ RES_TYPE_INTR_TABLE, /* MSI/MSI-X Interrupt table */
+ RES_TYPE_INTR_PBA, /* MSI/MSI-X PBA table */
+ RES_TYPE_INTR_PBA_LEGACY, /* Legacy intr status */
+ RES_TYPE_RSVD6,
+ RES_TYPE_RSVD7,
+ RES_TYPE_DEVCMD, /* Device command region */
+ RES_TYPE_PASS_THRU_PAGE, /* Pass-thru page */
+
+ RES_TYPE_MAX, /* Count of resource types */
+};
+
+struct vnic_resource_header {
+ u32 magic;
+ u32 version;
+};
+
+struct vnic_resource {
+ u8 type;
+ u8 bar;
+ u8 pad[2];
+ u32 bar_offset;
+ u32 count;
+};
+
+#endif /* _VNIC_RESOURCE_H_ */
diff --git a/drivers/scsi/fnic/vnic_rq.c b/drivers/scsi/fnic/vnic_rq.c
new file mode 100644
index 000000000..fd2068f5a
--- /dev/null
+++ b/drivers/scsi/fnic/vnic_rq.c
@@ -0,0 +1,197 @@
+/*
+ * Copyright 2008 Cisco Systems, Inc. All rights reserved.
+ * Copyright 2007 Nuova Systems, Inc. All rights reserved.
+ *
+ * This program is free software; you may redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include <linux/errno.h>
+#include <linux/types.h>
+#include <linux/pci.h>
+#include <linux/delay.h>
+#include <linux/slab.h>
+#include "vnic_dev.h"
+#include "vnic_rq.h"
+
+static int vnic_rq_alloc_bufs(struct vnic_rq *rq)
+{
+ struct vnic_rq_buf *buf;
+ struct vnic_dev *vdev;
+ unsigned int i, j, count = rq->ring.desc_count;
+ unsigned int blks = VNIC_RQ_BUF_BLKS_NEEDED(count);
+
+ vdev = rq->vdev;
+
+ for (i = 0; i < blks; i++) {
+ rq->bufs[i] = kzalloc(VNIC_RQ_BUF_BLK_SZ, GFP_ATOMIC);
+ if (!rq->bufs[i]) {
+ printk(KERN_ERR "Failed to alloc rq_bufs\n");
+ return -ENOMEM;
+ }
+ }
+
+ for (i = 0; i < blks; i++) {
+ buf = rq->bufs[i];
+ for (j = 0; j < VNIC_RQ_BUF_BLK_ENTRIES; j++) {
+ buf->index = i * VNIC_RQ_BUF_BLK_ENTRIES + j;
+ buf->desc = (u8 *)rq->ring.descs +
+ rq->ring.desc_size * buf->index;
+ if (buf->index + 1 == count) {
+ buf->next = rq->bufs[0];
+ break;
+ } else if (j + 1 == VNIC_RQ_BUF_BLK_ENTRIES) {
+ buf->next = rq->bufs[i + 1];
+ } else {
+ buf->next = buf + 1;
+ buf++;
+ }
+ }
+ }
+
+ rq->to_use = rq->to_clean = rq->bufs[0];
+ rq->buf_index = 0;
+
+ return 0;
+}
+
+void vnic_rq_free(struct vnic_rq *rq)
+{
+ struct vnic_dev *vdev;
+ unsigned int i;
+
+ vdev = rq->vdev;
+
+ vnic_dev_free_desc_ring(vdev, &rq->ring);
+
+ for (i = 0; i < VNIC_RQ_BUF_BLKS_MAX; i++) {
+ kfree(rq->bufs[i]);
+ rq->bufs[i] = NULL;
+ }
+
+ rq->ctrl = NULL;
+}
+
+int vnic_rq_alloc(struct vnic_dev *vdev, struct vnic_rq *rq, unsigned int index,
+ unsigned int desc_count, unsigned int desc_size)
+{
+ int err;
+
+ rq->index = index;
+ rq->vdev = vdev;
+
+ rq->ctrl = vnic_dev_get_res(vdev, RES_TYPE_RQ, index);
+ if (!rq->ctrl) {
+ printk(KERN_ERR "Failed to hook RQ[%d] resource\n", index);
+ return -EINVAL;
+ }
+
+ vnic_rq_disable(rq);
+
+ err = vnic_dev_alloc_desc_ring(vdev, &rq->ring, desc_count, desc_size);
+ if (err)
+ return err;
+
+ err = vnic_rq_alloc_bufs(rq);
+ if (err) {
+ vnic_rq_free(rq);
+ return err;
+ }
+
+ return 0;
+}
+
+void vnic_rq_init(struct vnic_rq *rq, unsigned int cq_index,
+ unsigned int error_interrupt_enable,
+ unsigned int error_interrupt_offset)
+{
+ u64 paddr;
+ u32 fetch_index;
+
+ paddr = (u64)rq->ring.base_addr | VNIC_PADDR_TARGET;
+ writeq(paddr, &rq->ctrl->ring_base);
+ iowrite32(rq->ring.desc_count, &rq->ctrl->ring_size);
+ iowrite32(cq_index, &rq->ctrl->cq_index);
+ iowrite32(error_interrupt_enable, &rq->ctrl->error_interrupt_enable);
+ iowrite32(error_interrupt_offset, &rq->ctrl->error_interrupt_offset);
+ iowrite32(0, &rq->ctrl->dropped_packet_count);
+ iowrite32(0, &rq->ctrl->error_status);
+
+ /* Use current fetch_index as the ring starting point */
+ fetch_index = ioread32(&rq->ctrl->fetch_index);
+ rq->to_use = rq->to_clean =
+ &rq->bufs[fetch_index / VNIC_RQ_BUF_BLK_ENTRIES]
+ [fetch_index % VNIC_RQ_BUF_BLK_ENTRIES];
+ iowrite32(fetch_index, &rq->ctrl->posted_index);
+
+ rq->buf_index = 0;
+}
+
+unsigned int vnic_rq_error_status(struct vnic_rq *rq)
+{
+ return ioread32(&rq->ctrl->error_status);
+}
+
+void vnic_rq_enable(struct vnic_rq *rq)
+{
+ iowrite32(1, &rq->ctrl->enable);
+}
+
+int vnic_rq_disable(struct vnic_rq *rq)
+{
+ unsigned int wait;
+
+ iowrite32(0, &rq->ctrl->enable);
+
+ /* Wait for HW to ACK disable request */
+ for (wait = 0; wait < 100; wait++) {
+ if (!(ioread32(&rq->ctrl->running)))
+ return 0;
+ udelay(1);
+ }
+
+ printk(KERN_ERR "Failed to disable RQ[%d]\n", rq->index);
+
+ return -ETIMEDOUT;
+}
+
+void vnic_rq_clean(struct vnic_rq *rq,
+ void (*buf_clean)(struct vnic_rq *rq, struct vnic_rq_buf *buf))
+{
+ struct vnic_rq_buf *buf;
+ u32 fetch_index;
+
+ BUG_ON(ioread32(&rq->ctrl->enable));
+
+ buf = rq->to_clean;
+
+ while (vnic_rq_desc_used(rq) > 0) {
+
+ (*buf_clean)(rq, buf);
+
+ buf = rq->to_clean = buf->next;
+ rq->ring.desc_avail++;
+ }
+
+ /* Use current fetch_index as the ring starting point */
+ fetch_index = ioread32(&rq->ctrl->fetch_index);
+ rq->to_use = rq->to_clean =
+ &rq->bufs[fetch_index / VNIC_RQ_BUF_BLK_ENTRIES]
+ [fetch_index % VNIC_RQ_BUF_BLK_ENTRIES];
+ iowrite32(fetch_index, &rq->ctrl->posted_index);
+
+ rq->buf_index = 0;
+
+ vnic_dev_clear_desc_ring(&rq->ring);
+}
+
diff --git a/drivers/scsi/fnic/vnic_rq.h b/drivers/scsi/fnic/vnic_rq.h
new file mode 100644
index 000000000..aebdfbd6a
--- /dev/null
+++ b/drivers/scsi/fnic/vnic_rq.h
@@ -0,0 +1,235 @@
+/*
+ * Copyright 2008 Cisco Systems, Inc. All rights reserved.
+ * Copyright 2007 Nuova Systems, Inc. All rights reserved.
+ *
+ * This program is free software; you may redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#ifndef _VNIC_RQ_H_
+#define _VNIC_RQ_H_
+
+#include <linux/pci.h>
+#include "vnic_dev.h"
+#include "vnic_cq.h"
+
+/*
+ * These defines avoid symbol clash between fnic and enic (Cisco 10G Eth
+ * Driver) when both are built with CONFIG options =y
+ */
+#define vnic_rq_desc_avail fnic_rq_desc_avail
+#define vnic_rq_desc_used fnic_rq_desc_used
+#define vnic_rq_next_desc fnic_rq_next_desc
+#define vnic_rq_next_index fnic_rq_next_index
+#define vnic_rq_next_buf_index fnic_rq_next_buf_index
+#define vnic_rq_post fnic_rq_post
+#define vnic_rq_posting_soon fnic_rq_posting_soon
+#define vnic_rq_return_descs fnic_rq_return_descs
+#define vnic_rq_service fnic_rq_service
+#define vnic_rq_fill fnic_rq_fill
+#define vnic_rq_free fnic_rq_free
+#define vnic_rq_alloc fnic_rq_alloc
+#define vnic_rq_init fnic_rq_init
+#define vnic_rq_error_status fnic_rq_error_status
+#define vnic_rq_enable fnic_rq_enable
+#define vnic_rq_disable fnic_rq_disable
+#define vnic_rq_clean fnic_rq_clean
+
+/* Receive queue control */
+struct vnic_rq_ctrl {
+ u64 ring_base; /* 0x00 */
+ u32 ring_size; /* 0x08 */
+ u32 pad0;
+ u32 posted_index; /* 0x10 */
+ u32 pad1;
+ u32 cq_index; /* 0x18 */
+ u32 pad2;
+ u32 enable; /* 0x20 */
+ u32 pad3;
+ u32 running; /* 0x28 */
+ u32 pad4;
+ u32 fetch_index; /* 0x30 */
+ u32 pad5;
+ u32 error_interrupt_enable; /* 0x38 */
+ u32 pad6;
+ u32 error_interrupt_offset; /* 0x40 */
+ u32 pad7;
+ u32 error_status; /* 0x48 */
+ u32 pad8;
+ u32 dropped_packet_count; /* 0x50 */
+ u32 pad9;
+ u32 dropped_packet_count_rc; /* 0x58 */
+ u32 pad10;
+};
+
+/* Break the vnic_rq_buf allocations into blocks of 64 entries */
+#define VNIC_RQ_BUF_BLK_ENTRIES 64
+#define VNIC_RQ_BUF_BLK_SZ \
+ (VNIC_RQ_BUF_BLK_ENTRIES * sizeof(struct vnic_rq_buf))
+#define VNIC_RQ_BUF_BLKS_NEEDED(entries) \
+ DIV_ROUND_UP(entries, VNIC_RQ_BUF_BLK_ENTRIES)
+#define VNIC_RQ_BUF_BLKS_MAX VNIC_RQ_BUF_BLKS_NEEDED(4096)
+
+struct vnic_rq_buf {
+ struct vnic_rq_buf *next;
+ dma_addr_t dma_addr;
+ void *os_buf;
+ unsigned int os_buf_index;
+ unsigned int len;
+ unsigned int index;
+ void *desc;
+};
+
+struct vnic_rq {
+ unsigned int index;
+ struct vnic_dev *vdev;
+ struct vnic_rq_ctrl __iomem *ctrl; /* memory-mapped */
+ struct vnic_dev_ring ring;
+ struct vnic_rq_buf *bufs[VNIC_RQ_BUF_BLKS_MAX];
+ struct vnic_rq_buf *to_use;
+ struct vnic_rq_buf *to_clean;
+ void *os_buf_head;
+ unsigned int buf_index;
+ unsigned int pkts_outstanding;
+};
+
+static inline unsigned int vnic_rq_desc_avail(struct vnic_rq *rq)
+{
+ /* how many does SW own? */
+ return rq->ring.desc_avail;
+}
+
+static inline unsigned int vnic_rq_desc_used(struct vnic_rq *rq)
+{
+ /* how many does HW own? */
+ return rq->ring.desc_count - rq->ring.desc_avail - 1;
+}
+
+static inline void *vnic_rq_next_desc(struct vnic_rq *rq)
+{
+ return rq->to_use->desc;
+}
+
+static inline unsigned int vnic_rq_next_index(struct vnic_rq *rq)
+{
+ return rq->to_use->index;
+}
+
+static inline unsigned int vnic_rq_next_buf_index(struct vnic_rq *rq)
+{
+ return rq->buf_index++;
+}
+
+static inline void vnic_rq_post(struct vnic_rq *rq,
+ void *os_buf, unsigned int os_buf_index,
+ dma_addr_t dma_addr, unsigned int len)
+{
+ struct vnic_rq_buf *buf = rq->to_use;
+
+ buf->os_buf = os_buf;
+ buf->os_buf_index = os_buf_index;
+ buf->dma_addr = dma_addr;
+ buf->len = len;
+
+ buf = buf->next;
+ rq->to_use = buf;
+ rq->ring.desc_avail--;
+
+ /* Move the posted_index every nth descriptor
+ */
+
+#ifndef VNIC_RQ_RETURN_RATE
+#define VNIC_RQ_RETURN_RATE 0xf /* keep 2^n - 1 */
+#endif
+
+ if ((buf->index & VNIC_RQ_RETURN_RATE) == 0) {
+ /* Adding write memory barrier prevents compiler and/or CPU
+ * reordering, thus avoiding descriptor posting before
+ * descriptor is initialized. Otherwise, hardware can read
+ * stale descriptor fields.
+ */
+ wmb();
+ iowrite32(buf->index, &rq->ctrl->posted_index);
+ }
+}
+
+static inline int vnic_rq_posting_soon(struct vnic_rq *rq)
+{
+ return (rq->to_use->index & VNIC_RQ_RETURN_RATE) == 0;
+}
+
+static inline void vnic_rq_return_descs(struct vnic_rq *rq, unsigned int count)
+{
+ rq->ring.desc_avail += count;
+}
+
+enum desc_return_options {
+ VNIC_RQ_RETURN_DESC,
+ VNIC_RQ_DEFER_RETURN_DESC,
+};
+
+static inline void vnic_rq_service(struct vnic_rq *rq,
+ struct cq_desc *cq_desc, u16 completed_index,
+ int desc_return, void (*buf_service)(struct vnic_rq *rq,
+ struct cq_desc *cq_desc, struct vnic_rq_buf *buf,
+ int skipped, void *opaque), void *opaque)
+{
+ struct vnic_rq_buf *buf;
+ int skipped;
+
+ buf = rq->to_clean;
+ while (1) {
+
+ skipped = (buf->index != completed_index);
+
+ (*buf_service)(rq, cq_desc, buf, skipped, opaque);
+
+ if (desc_return == VNIC_RQ_RETURN_DESC)
+ rq->ring.desc_avail++;
+
+ rq->to_clean = buf->next;
+
+ if (!skipped)
+ break;
+
+ buf = rq->to_clean;
+ }
+}
+
+static inline int vnic_rq_fill(struct vnic_rq *rq,
+ int (*buf_fill)(struct vnic_rq *rq))
+{
+ int err;
+
+ while (vnic_rq_desc_avail(rq) > 1) {
+
+ err = (*buf_fill)(rq);
+ if (err)
+ return err;
+ }
+
+ return 0;
+}
+
+void vnic_rq_free(struct vnic_rq *rq);
+int vnic_rq_alloc(struct vnic_dev *vdev, struct vnic_rq *rq, unsigned int index,
+ unsigned int desc_count, unsigned int desc_size);
+void vnic_rq_init(struct vnic_rq *rq, unsigned int cq_index,
+ unsigned int error_interrupt_enable,
+ unsigned int error_interrupt_offset);
+unsigned int vnic_rq_error_status(struct vnic_rq *rq);
+void vnic_rq_enable(struct vnic_rq *rq);
+int vnic_rq_disable(struct vnic_rq *rq);
+void vnic_rq_clean(struct vnic_rq *rq,
+ void (*buf_clean)(struct vnic_rq *rq, struct vnic_rq_buf *buf));
+
+#endif /* _VNIC_RQ_H_ */
diff --git a/drivers/scsi/fnic/vnic_scsi.h b/drivers/scsi/fnic/vnic_scsi.h
new file mode 100644
index 000000000..e343e1d0f
--- /dev/null
+++ b/drivers/scsi/fnic/vnic_scsi.h
@@ -0,0 +1,100 @@
+/*
+ * Copyright 2008 Cisco Systems, Inc. All rights reserved.
+ * Copyright 2007 Nuova Systems, Inc. All rights reserved.
+ *
+ * This program is free software; you may redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#ifndef _VNIC_SCSI_H_
+#define _VNIC_SCSI_H_
+
+#define VNIC_FNIC_WQ_COPY_COUNT_MIN 1
+#define VNIC_FNIC_WQ_COPY_COUNT_MAX 1
+
+#define VNIC_FNIC_WQ_DESCS_MIN 64
+#define VNIC_FNIC_WQ_DESCS_MAX 128
+
+#define VNIC_FNIC_WQ_COPY_DESCS_MIN 64
+#define VNIC_FNIC_WQ_COPY_DESCS_MAX 512
+
+#define VNIC_FNIC_RQ_DESCS_MIN 64
+#define VNIC_FNIC_RQ_DESCS_MAX 128
+
+#define VNIC_FNIC_EDTOV_MIN 1000
+#define VNIC_FNIC_EDTOV_MAX 255000
+#define VNIC_FNIC_EDTOV_DEF 2000
+
+#define VNIC_FNIC_RATOV_MIN 1000
+#define VNIC_FNIC_RATOV_MAX 255000
+
+#define VNIC_FNIC_MAXDATAFIELDSIZE_MIN 256
+#define VNIC_FNIC_MAXDATAFIELDSIZE_MAX 2112
+
+#define VNIC_FNIC_FLOGI_RETRIES_MIN 0
+#define VNIC_FNIC_FLOGI_RETRIES_MAX 0xffffffff
+#define VNIC_FNIC_FLOGI_RETRIES_DEF 0xffffffff
+
+#define VNIC_FNIC_FLOGI_TIMEOUT_MIN 1000
+#define VNIC_FNIC_FLOGI_TIMEOUT_MAX 255000
+
+#define VNIC_FNIC_PLOGI_RETRIES_MIN 0
+#define VNIC_FNIC_PLOGI_RETRIES_MAX 255
+#define VNIC_FNIC_PLOGI_RETRIES_DEF 8
+
+#define VNIC_FNIC_PLOGI_TIMEOUT_MIN 1000
+#define VNIC_FNIC_PLOGI_TIMEOUT_MAX 255000
+
+#define VNIC_FNIC_IO_THROTTLE_COUNT_MIN 1
+#define VNIC_FNIC_IO_THROTTLE_COUNT_MAX 2048
+
+#define VNIC_FNIC_LINK_DOWN_TIMEOUT_MIN 0
+#define VNIC_FNIC_LINK_DOWN_TIMEOUT_MAX 240000
+
+#define VNIC_FNIC_PORT_DOWN_TIMEOUT_MIN 0
+#define VNIC_FNIC_PORT_DOWN_TIMEOUT_MAX 240000
+
+#define VNIC_FNIC_PORT_DOWN_IO_RETRIES_MIN 0
+#define VNIC_FNIC_PORT_DOWN_IO_RETRIES_MAX 255
+
+#define VNIC_FNIC_LUNS_PER_TARGET_MIN 1
+#define VNIC_FNIC_LUNS_PER_TARGET_MAX 1024
+
+/* Device-specific region: scsi configuration */
+struct vnic_fc_config {
+ u64 node_wwn;
+ u64 port_wwn;
+ u32 flags;
+ u32 wq_enet_desc_count;
+ u32 wq_copy_desc_count;
+ u32 rq_desc_count;
+ u32 flogi_retries;
+ u32 flogi_timeout;
+ u32 plogi_retries;
+ u32 plogi_timeout;
+ u32 io_throttle_count;
+ u32 link_down_timeout;
+ u32 port_down_timeout;
+ u32 port_down_io_retries;
+ u32 luns_per_tgt;
+ u16 maxdatafieldsize;
+ u16 ed_tov;
+ u16 ra_tov;
+ u16 intr_timer;
+ u8 intr_timer_type;
+};
+
+#define VFCF_FCP_SEQ_LVL_ERR 0x1 /* Enable FCP-2 Error Recovery */
+#define VFCF_PERBI 0x2 /* persistent binding info available */
+#define VFCF_FIP_CAPABLE 0x4 /* firmware can handle FIP */
+
+#endif /* _VNIC_SCSI_H_ */
diff --git a/drivers/scsi/fnic/vnic_stats.h b/drivers/scsi/fnic/vnic_stats.h
new file mode 100644
index 000000000..5372e23c1
--- /dev/null
+++ b/drivers/scsi/fnic/vnic_stats.h
@@ -0,0 +1,68 @@
+/*
+ * Copyright 2008 Cisco Systems, Inc. All rights reserved.
+ * Copyright 2007 Nuova Systems, Inc. All rights reserved.
+ *
+ * This program is free software; you may redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#ifndef _VNIC_STATS_H_
+#define _VNIC_STATS_H_
+
+/* Tx statistics */
+struct vnic_tx_stats {
+ u64 tx_frames_ok;
+ u64 tx_unicast_frames_ok;
+ u64 tx_multicast_frames_ok;
+ u64 tx_broadcast_frames_ok;
+ u64 tx_bytes_ok;
+ u64 tx_unicast_bytes_ok;
+ u64 tx_multicast_bytes_ok;
+ u64 tx_broadcast_bytes_ok;
+ u64 tx_drops;
+ u64 tx_errors;
+ u64 tx_tso;
+ u64 rsvd[16];
+};
+
+/* Rx statistics */
+struct vnic_rx_stats {
+ u64 rx_frames_ok;
+ u64 rx_frames_total;
+ u64 rx_unicast_frames_ok;
+ u64 rx_multicast_frames_ok;
+ u64 rx_broadcast_frames_ok;
+ u64 rx_bytes_ok;
+ u64 rx_unicast_bytes_ok;
+ u64 rx_multicast_bytes_ok;
+ u64 rx_broadcast_bytes_ok;
+ u64 rx_drop;
+ u64 rx_no_bufs;
+ u64 rx_errors;
+ u64 rx_rss;
+ u64 rx_crc_errors;
+ u64 rx_frames_64;
+ u64 rx_frames_127;
+ u64 rx_frames_255;
+ u64 rx_frames_511;
+ u64 rx_frames_1023;
+ u64 rx_frames_1518;
+ u64 rx_frames_to_max;
+ u64 rsvd[16];
+};
+
+struct vnic_stats {
+ struct vnic_tx_stats tx;
+ struct vnic_rx_stats rx;
+};
+
+#endif /* _VNIC_STATS_H_ */
diff --git a/drivers/scsi/fnic/vnic_wq.c b/drivers/scsi/fnic/vnic_wq.c
new file mode 100644
index 000000000..a41413546
--- /dev/null
+++ b/drivers/scsi/fnic/vnic_wq.c
@@ -0,0 +1,183 @@
+/*
+ * Copyright 2008 Cisco Systems, Inc. All rights reserved.
+ * Copyright 2007 Nuova Systems, Inc. All rights reserved.
+ *
+ * This program is free software; you may redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include <linux/errno.h>
+#include <linux/types.h>
+#include <linux/pci.h>
+#include <linux/delay.h>
+#include <linux/slab.h>
+#include "vnic_dev.h"
+#include "vnic_wq.h"
+
+static int vnic_wq_alloc_bufs(struct vnic_wq *wq)
+{
+ struct vnic_wq_buf *buf;
+ struct vnic_dev *vdev;
+ unsigned int i, j, count = wq->ring.desc_count;
+ unsigned int blks = VNIC_WQ_BUF_BLKS_NEEDED(count);
+
+ vdev = wq->vdev;
+
+ for (i = 0; i < blks; i++) {
+ wq->bufs[i] = kzalloc(VNIC_WQ_BUF_BLK_SZ, GFP_ATOMIC);
+ if (!wq->bufs[i]) {
+ printk(KERN_ERR "Failed to alloc wq_bufs\n");
+ return -ENOMEM;
+ }
+ }
+
+ for (i = 0; i < blks; i++) {
+ buf = wq->bufs[i];
+ for (j = 0; j < VNIC_WQ_BUF_BLK_ENTRIES; j++) {
+ buf->index = i * VNIC_WQ_BUF_BLK_ENTRIES + j;
+ buf->desc = (u8 *)wq->ring.descs +
+ wq->ring.desc_size * buf->index;
+ if (buf->index + 1 == count) {
+ buf->next = wq->bufs[0];
+ break;
+ } else if (j + 1 == VNIC_WQ_BUF_BLK_ENTRIES) {
+ buf->next = wq->bufs[i + 1];
+ } else {
+ buf->next = buf + 1;
+ buf++;
+ }
+ }
+ }
+
+ wq->to_use = wq->to_clean = wq->bufs[0];
+
+ return 0;
+}
+
+void vnic_wq_free(struct vnic_wq *wq)
+{
+ struct vnic_dev *vdev;
+ unsigned int i;
+
+ vdev = wq->vdev;
+
+ vnic_dev_free_desc_ring(vdev, &wq->ring);
+
+ for (i = 0; i < VNIC_WQ_BUF_BLKS_MAX; i++) {
+ kfree(wq->bufs[i]);
+ wq->bufs[i] = NULL;
+ }
+
+ wq->ctrl = NULL;
+
+}
+
+int vnic_wq_alloc(struct vnic_dev *vdev, struct vnic_wq *wq, unsigned int index,
+ unsigned int desc_count, unsigned int desc_size)
+{
+ int err;
+
+ wq->index = index;
+ wq->vdev = vdev;
+
+ wq->ctrl = vnic_dev_get_res(vdev, RES_TYPE_WQ, index);
+ if (!wq->ctrl) {
+ printk(KERN_ERR "Failed to hook WQ[%d] resource\n", index);
+ return -EINVAL;
+ }
+
+ vnic_wq_disable(wq);
+
+ err = vnic_dev_alloc_desc_ring(vdev, &wq->ring, desc_count, desc_size);
+ if (err)
+ return err;
+
+ err = vnic_wq_alloc_bufs(wq);
+ if (err) {
+ vnic_wq_free(wq);
+ return err;
+ }
+
+ return 0;
+}
+
+void vnic_wq_init(struct vnic_wq *wq, unsigned int cq_index,
+ unsigned int error_interrupt_enable,
+ unsigned int error_interrupt_offset)
+{
+ u64 paddr;
+
+ paddr = (u64)wq->ring.base_addr | VNIC_PADDR_TARGET;
+ writeq(paddr, &wq->ctrl->ring_base);
+ iowrite32(wq->ring.desc_count, &wq->ctrl->ring_size);
+ iowrite32(0, &wq->ctrl->fetch_index);
+ iowrite32(0, &wq->ctrl->posted_index);
+ iowrite32(cq_index, &wq->ctrl->cq_index);
+ iowrite32(error_interrupt_enable, &wq->ctrl->error_interrupt_enable);
+ iowrite32(error_interrupt_offset, &wq->ctrl->error_interrupt_offset);
+ iowrite32(0, &wq->ctrl->error_status);
+}
+
+unsigned int vnic_wq_error_status(struct vnic_wq *wq)
+{
+ return ioread32(&wq->ctrl->error_status);
+}
+
+void vnic_wq_enable(struct vnic_wq *wq)
+{
+ iowrite32(1, &wq->ctrl->enable);
+}
+
+int vnic_wq_disable(struct vnic_wq *wq)
+{
+ unsigned int wait;
+
+ iowrite32(0, &wq->ctrl->enable);
+
+ /* Wait for HW to ACK disable request */
+ for (wait = 0; wait < 100; wait++) {
+ if (!(ioread32(&wq->ctrl->running)))
+ return 0;
+ udelay(1);
+ }
+
+ printk(KERN_ERR "Failed to disable WQ[%d]\n", wq->index);
+
+ return -ETIMEDOUT;
+}
+
+void vnic_wq_clean(struct vnic_wq *wq,
+ void (*buf_clean)(struct vnic_wq *wq, struct vnic_wq_buf *buf))
+{
+ struct vnic_wq_buf *buf;
+
+ BUG_ON(ioread32(&wq->ctrl->enable));
+
+ buf = wq->to_clean;
+
+ while (vnic_wq_desc_used(wq) > 0) {
+
+ (*buf_clean)(wq, buf);
+
+ buf = wq->to_clean = buf->next;
+ wq->ring.desc_avail++;
+ }
+
+ wq->to_use = wq->to_clean = wq->bufs[0];
+
+ iowrite32(0, &wq->ctrl->fetch_index);
+ iowrite32(0, &wq->ctrl->posted_index);
+ iowrite32(0, &wq->ctrl->error_status);
+
+ vnic_dev_clear_desc_ring(&wq->ring);
+}
diff --git a/drivers/scsi/fnic/vnic_wq.h b/drivers/scsi/fnic/vnic_wq.h
new file mode 100644
index 000000000..5cd094f79
--- /dev/null
+++ b/drivers/scsi/fnic/vnic_wq.h
@@ -0,0 +1,175 @@
+/*
+ * Copyright 2008 Cisco Systems, Inc. All rights reserved.
+ * Copyright 2007 Nuova Systems, Inc. All rights reserved.
+ *
+ * This program is free software; you may redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#ifndef _VNIC_WQ_H_
+#define _VNIC_WQ_H_
+
+#include <linux/pci.h>
+#include "vnic_dev.h"
+#include "vnic_cq.h"
+
+/*
+ * These defines avoid symbol clash between fnic and enic (Cisco 10G Eth
+ * Driver) when both are built with CONFIG options =y
+ */
+#define vnic_wq_desc_avail fnic_wq_desc_avail
+#define vnic_wq_desc_used fnic_wq_desc_used
+#define vnic_wq_next_desc fni_cwq_next_desc
+#define vnic_wq_post fnic_wq_post
+#define vnic_wq_service fnic_wq_service
+#define vnic_wq_free fnic_wq_free
+#define vnic_wq_alloc fnic_wq_alloc
+#define vnic_wq_init fnic_wq_init
+#define vnic_wq_error_status fnic_wq_error_status
+#define vnic_wq_enable fnic_wq_enable
+#define vnic_wq_disable fnic_wq_disable
+#define vnic_wq_clean fnic_wq_clean
+
+/* Work queue control */
+struct vnic_wq_ctrl {
+ u64 ring_base; /* 0x00 */
+ u32 ring_size; /* 0x08 */
+ u32 pad0;
+ u32 posted_index; /* 0x10 */
+ u32 pad1;
+ u32 cq_index; /* 0x18 */
+ u32 pad2;
+ u32 enable; /* 0x20 */
+ u32 pad3;
+ u32 running; /* 0x28 */
+ u32 pad4;
+ u32 fetch_index; /* 0x30 */
+ u32 pad5;
+ u32 dca_value; /* 0x38 */
+ u32 pad6;
+ u32 error_interrupt_enable; /* 0x40 */
+ u32 pad7;
+ u32 error_interrupt_offset; /* 0x48 */
+ u32 pad8;
+ u32 error_status; /* 0x50 */
+ u32 pad9;
+};
+
+struct vnic_wq_buf {
+ struct vnic_wq_buf *next;
+ dma_addr_t dma_addr;
+ void *os_buf;
+ unsigned int len;
+ unsigned int index;
+ int sop;
+ void *desc;
+};
+
+/* Break the vnic_wq_buf allocations into blocks of 64 entries */
+#define VNIC_WQ_BUF_BLK_ENTRIES 64
+#define VNIC_WQ_BUF_BLK_SZ \
+ (VNIC_WQ_BUF_BLK_ENTRIES * sizeof(struct vnic_wq_buf))
+#define VNIC_WQ_BUF_BLKS_NEEDED(entries) \
+ DIV_ROUND_UP(entries, VNIC_WQ_BUF_BLK_ENTRIES)
+#define VNIC_WQ_BUF_BLKS_MAX VNIC_WQ_BUF_BLKS_NEEDED(4096)
+
+struct vnic_wq {
+ unsigned int index;
+ struct vnic_dev *vdev;
+ struct vnic_wq_ctrl __iomem *ctrl; /* memory-mapped */
+ struct vnic_dev_ring ring;
+ struct vnic_wq_buf *bufs[VNIC_WQ_BUF_BLKS_MAX];
+ struct vnic_wq_buf *to_use;
+ struct vnic_wq_buf *to_clean;
+ unsigned int pkts_outstanding;
+};
+
+static inline unsigned int vnic_wq_desc_avail(struct vnic_wq *wq)
+{
+ /* how many does SW own? */
+ return wq->ring.desc_avail;
+}
+
+static inline unsigned int vnic_wq_desc_used(struct vnic_wq *wq)
+{
+ /* how many does HW own? */
+ return wq->ring.desc_count - wq->ring.desc_avail - 1;
+}
+
+static inline void *vnic_wq_next_desc(struct vnic_wq *wq)
+{
+ return wq->to_use->desc;
+}
+
+static inline void vnic_wq_post(struct vnic_wq *wq,
+ void *os_buf, dma_addr_t dma_addr,
+ unsigned int len, int sop, int eop)
+{
+ struct vnic_wq_buf *buf = wq->to_use;
+
+ buf->sop = sop;
+ buf->os_buf = eop ? os_buf : NULL;
+ buf->dma_addr = dma_addr;
+ buf->len = len;
+
+ buf = buf->next;
+ if (eop) {
+ /* Adding write memory barrier prevents compiler and/or CPU
+ * reordering, thus avoiding descriptor posting before
+ * descriptor is initialized. Otherwise, hardware can read
+ * stale descriptor fields.
+ */
+ wmb();
+ iowrite32(buf->index, &wq->ctrl->posted_index);
+ }
+ wq->to_use = buf;
+
+ wq->ring.desc_avail--;
+}
+
+static inline void vnic_wq_service(struct vnic_wq *wq,
+ struct cq_desc *cq_desc, u16 completed_index,
+ void (*buf_service)(struct vnic_wq *wq,
+ struct cq_desc *cq_desc, struct vnic_wq_buf *buf, void *opaque),
+ void *opaque)
+{
+ struct vnic_wq_buf *buf;
+
+ buf = wq->to_clean;
+ while (1) {
+
+ (*buf_service)(wq, cq_desc, buf, opaque);
+
+ wq->ring.desc_avail++;
+
+ wq->to_clean = buf->next;
+
+ if (buf->index == completed_index)
+ break;
+
+ buf = wq->to_clean;
+ }
+}
+
+void vnic_wq_free(struct vnic_wq *wq);
+int vnic_wq_alloc(struct vnic_dev *vdev, struct vnic_wq *wq, unsigned int index,
+ unsigned int desc_count, unsigned int desc_size);
+void vnic_wq_init(struct vnic_wq *wq, unsigned int cq_index,
+ unsigned int error_interrupt_enable,
+ unsigned int error_interrupt_offset);
+unsigned int vnic_wq_error_status(struct vnic_wq *wq);
+void vnic_wq_enable(struct vnic_wq *wq);
+int vnic_wq_disable(struct vnic_wq *wq);
+void vnic_wq_clean(struct vnic_wq *wq,
+ void (*buf_clean)(struct vnic_wq *wq, struct vnic_wq_buf *buf));
+
+#endif /* _VNIC_WQ_H_ */
diff --git a/drivers/scsi/fnic/vnic_wq_copy.c b/drivers/scsi/fnic/vnic_wq_copy.c
new file mode 100644
index 000000000..9eab7e7ca
--- /dev/null
+++ b/drivers/scsi/fnic/vnic_wq_copy.c
@@ -0,0 +1,117 @@
+/*
+ * Copyright 2008 Cisco Systems, Inc. All rights reserved.
+ * Copyright 2007 Nuova Systems, Inc. All rights reserved.
+ *
+ * This program is free software; you may redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include <linux/errno.h>
+#include <linux/types.h>
+#include <linux/pci.h>
+#include <linux/delay.h>
+#include "vnic_wq_copy.h"
+
+void vnic_wq_copy_enable(struct vnic_wq_copy *wq)
+{
+ iowrite32(1, &wq->ctrl->enable);
+}
+
+int vnic_wq_copy_disable(struct vnic_wq_copy *wq)
+{
+ unsigned int wait;
+
+ iowrite32(0, &wq->ctrl->enable);
+
+ /* Wait for HW to ACK disable request */
+ for (wait = 0; wait < 100; wait++) {
+ if (!(ioread32(&wq->ctrl->running)))
+ return 0;
+ udelay(1);
+ }
+
+ printk(KERN_ERR "Failed to disable Copy WQ[%d],"
+ " fetch index=%d, posted_index=%d\n",
+ wq->index, ioread32(&wq->ctrl->fetch_index),
+ ioread32(&wq->ctrl->posted_index));
+
+ return -ENODEV;
+}
+
+void vnic_wq_copy_clean(struct vnic_wq_copy *wq,
+ void (*q_clean)(struct vnic_wq_copy *wq,
+ struct fcpio_host_req *wq_desc))
+{
+ BUG_ON(ioread32(&wq->ctrl->enable));
+
+ if (vnic_wq_copy_desc_in_use(wq))
+ vnic_wq_copy_service(wq, -1, q_clean);
+
+ wq->to_use_index = wq->to_clean_index = 0;
+
+ iowrite32(0, &wq->ctrl->fetch_index);
+ iowrite32(0, &wq->ctrl->posted_index);
+ iowrite32(0, &wq->ctrl->error_status);
+
+ vnic_dev_clear_desc_ring(&wq->ring);
+}
+
+void vnic_wq_copy_free(struct vnic_wq_copy *wq)
+{
+ struct vnic_dev *vdev;
+
+ vdev = wq->vdev;
+ vnic_dev_free_desc_ring(vdev, &wq->ring);
+ wq->ctrl = NULL;
+}
+
+int vnic_wq_copy_alloc(struct vnic_dev *vdev, struct vnic_wq_copy *wq,
+ unsigned int index, unsigned int desc_count,
+ unsigned int desc_size)
+{
+ int err;
+
+ wq->index = index;
+ wq->vdev = vdev;
+ wq->to_use_index = wq->to_clean_index = 0;
+ wq->ctrl = vnic_dev_get_res(vdev, RES_TYPE_WQ, index);
+ if (!wq->ctrl) {
+ printk(KERN_ERR "Failed to hook COPY WQ[%d] resource\n", index);
+ return -EINVAL;
+ }
+
+ vnic_wq_copy_disable(wq);
+
+ err = vnic_dev_alloc_desc_ring(vdev, &wq->ring, desc_count, desc_size);
+ if (err)
+ return err;
+
+ return 0;
+}
+
+void vnic_wq_copy_init(struct vnic_wq_copy *wq, unsigned int cq_index,
+ unsigned int error_interrupt_enable,
+ unsigned int error_interrupt_offset)
+{
+ u64 paddr;
+
+ paddr = (u64)wq->ring.base_addr | VNIC_PADDR_TARGET;
+ writeq(paddr, &wq->ctrl->ring_base);
+ iowrite32(wq->ring.desc_count, &wq->ctrl->ring_size);
+ iowrite32(0, &wq->ctrl->fetch_index);
+ iowrite32(0, &wq->ctrl->posted_index);
+ iowrite32(cq_index, &wq->ctrl->cq_index);
+ iowrite32(error_interrupt_enable, &wq->ctrl->error_interrupt_enable);
+ iowrite32(error_interrupt_offset, &wq->ctrl->error_interrupt_offset);
+}
+
diff --git a/drivers/scsi/fnic/vnic_wq_copy.h b/drivers/scsi/fnic/vnic_wq_copy.h
new file mode 100644
index 000000000..6aff9740c
--- /dev/null
+++ b/drivers/scsi/fnic/vnic_wq_copy.h
@@ -0,0 +1,128 @@
+/*
+ * Copyright 2008 Cisco Systems, Inc. All rights reserved.
+ * Copyright 2007 Nuova Systems, Inc. All rights reserved.
+ *
+ * This program is free software; you may redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#ifndef _VNIC_WQ_COPY_H_
+#define _VNIC_WQ_COPY_H_
+
+#include <linux/pci.h>
+#include "vnic_wq.h"
+#include "fcpio.h"
+
+#define VNIC_WQ_COPY_MAX 1
+
+struct vnic_wq_copy {
+ unsigned int index;
+ struct vnic_dev *vdev;
+ struct vnic_wq_ctrl __iomem *ctrl; /* memory-mapped */
+ struct vnic_dev_ring ring;
+ unsigned to_use_index;
+ unsigned to_clean_index;
+};
+
+static inline unsigned int vnic_wq_copy_desc_avail(struct vnic_wq_copy *wq)
+{
+ return wq->ring.desc_avail;
+}
+
+static inline unsigned int vnic_wq_copy_desc_in_use(struct vnic_wq_copy *wq)
+{
+ return wq->ring.desc_count - 1 - wq->ring.desc_avail;
+}
+
+static inline void *vnic_wq_copy_next_desc(struct vnic_wq_copy *wq)
+{
+ struct fcpio_host_req *desc = wq->ring.descs;
+ return &desc[wq->to_use_index];
+}
+
+static inline void vnic_wq_copy_post(struct vnic_wq_copy *wq)
+{
+
+ ((wq->to_use_index + 1) == wq->ring.desc_count) ?
+ (wq->to_use_index = 0) : (wq->to_use_index++);
+ wq->ring.desc_avail--;
+
+ /* Adding write memory barrier prevents compiler and/or CPU
+ * reordering, thus avoiding descriptor posting before
+ * descriptor is initialized. Otherwise, hardware can read
+ * stale descriptor fields.
+ */
+ wmb();
+
+ iowrite32(wq->to_use_index, &wq->ctrl->posted_index);
+}
+
+static inline void vnic_wq_copy_desc_process(struct vnic_wq_copy *wq, u16 index)
+{
+ unsigned int cnt;
+
+ if (wq->to_clean_index <= index)
+ cnt = (index - wq->to_clean_index) + 1;
+ else
+ cnt = wq->ring.desc_count - wq->to_clean_index + index + 1;
+
+ wq->to_clean_index = ((index + 1) % wq->ring.desc_count);
+ wq->ring.desc_avail += cnt;
+
+}
+
+static inline void vnic_wq_copy_service(struct vnic_wq_copy *wq,
+ u16 completed_index,
+ void (*q_service)(struct vnic_wq_copy *wq,
+ struct fcpio_host_req *wq_desc))
+{
+ struct fcpio_host_req *wq_desc = wq->ring.descs;
+ unsigned int curr_index;
+
+ while (1) {
+
+ if (q_service)
+ (*q_service)(wq, &wq_desc[wq->to_clean_index]);
+
+ wq->ring.desc_avail++;
+
+ curr_index = wq->to_clean_index;
+
+ /* increment the to-clean index so that we start
+ * with an unprocessed index next time we enter the loop
+ */
+ ((wq->to_clean_index + 1) == wq->ring.desc_count) ?
+ (wq->to_clean_index = 0) : (wq->to_clean_index++);
+
+ if (curr_index == completed_index)
+ break;
+
+ /* we have cleaned all the entries */
+ if ((completed_index == (u16)-1) &&
+ (wq->to_clean_index == wq->to_use_index))
+ break;
+ }
+}
+
+void vnic_wq_copy_enable(struct vnic_wq_copy *wq);
+int vnic_wq_copy_disable(struct vnic_wq_copy *wq);
+void vnic_wq_copy_free(struct vnic_wq_copy *wq);
+int vnic_wq_copy_alloc(struct vnic_dev *vdev, struct vnic_wq_copy *wq,
+ unsigned int index, unsigned int desc_count, unsigned int desc_size);
+void vnic_wq_copy_init(struct vnic_wq_copy *wq, unsigned int cq_index,
+ unsigned int error_interrupt_enable,
+ unsigned int error_interrupt_offset);
+void vnic_wq_copy_clean(struct vnic_wq_copy *wq,
+ void (*q_clean)(struct vnic_wq_copy *wq,
+ struct fcpio_host_req *wq_desc));
+
+#endif /* _VNIC_WQ_COPY_H_ */
diff --git a/drivers/scsi/fnic/wq_enet_desc.h b/drivers/scsi/fnic/wq_enet_desc.h
new file mode 100644
index 000000000..b121cbad1
--- /dev/null
+++ b/drivers/scsi/fnic/wq_enet_desc.h
@@ -0,0 +1,96 @@
+/*
+ * Copyright 2008 Cisco Systems, Inc. All rights reserved.
+ * Copyright 2007 Nuova Systems, Inc. All rights reserved.
+ *
+ * This program is free software; you may redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#ifndef _WQ_ENET_DESC_H_
+#define _WQ_ENET_DESC_H_
+
+/* Ethernet work queue descriptor: 16B */
+struct wq_enet_desc {
+ __le64 address;
+ __le16 length;
+ __le16 mss_loopback;
+ __le16 header_length_flags;
+ __le16 vlan_tag;
+};
+
+#define WQ_ENET_ADDR_BITS 64
+#define WQ_ENET_LEN_BITS 14
+#define WQ_ENET_LEN_MASK ((1 << WQ_ENET_LEN_BITS) - 1)
+#define WQ_ENET_MSS_BITS 14
+#define WQ_ENET_MSS_MASK ((1 << WQ_ENET_MSS_BITS) - 1)
+#define WQ_ENET_MSS_SHIFT 2
+#define WQ_ENET_LOOPBACK_SHIFT 1
+#define WQ_ENET_HDRLEN_BITS 10
+#define WQ_ENET_HDRLEN_MASK ((1 << WQ_ENET_HDRLEN_BITS) - 1)
+#define WQ_ENET_FLAGS_OM_BITS 2
+#define WQ_ENET_FLAGS_OM_MASK ((1 << WQ_ENET_FLAGS_OM_BITS) - 1)
+#define WQ_ENET_FLAGS_EOP_SHIFT 12
+#define WQ_ENET_FLAGS_CQ_ENTRY_SHIFT 13
+#define WQ_ENET_FLAGS_FCOE_ENCAP_SHIFT 14
+#define WQ_ENET_FLAGS_VLAN_TAG_INSERT_SHIFT 15
+
+#define WQ_ENET_OFFLOAD_MODE_CSUM 0
+#define WQ_ENET_OFFLOAD_MODE_RESERVED 1
+#define WQ_ENET_OFFLOAD_MODE_CSUM_L4 2
+#define WQ_ENET_OFFLOAD_MODE_TSO 3
+
+static inline void wq_enet_desc_enc(struct wq_enet_desc *desc,
+ u64 address, u16 length, u16 mss, u16 header_length,
+ u8 offload_mode, u8 eop, u8 cq_entry, u8 fcoe_encap,
+ u8 vlan_tag_insert, u16 vlan_tag, u8 loopback)
+{
+ desc->address = cpu_to_le64(address);
+ desc->length = cpu_to_le16(length & WQ_ENET_LEN_MASK);
+ desc->mss_loopback = cpu_to_le16((mss & WQ_ENET_MSS_MASK) <<
+ WQ_ENET_MSS_SHIFT | (loopback & 1) << WQ_ENET_LOOPBACK_SHIFT);
+ desc->header_length_flags = cpu_to_le16(
+ (header_length & WQ_ENET_HDRLEN_MASK) |
+ (offload_mode & WQ_ENET_FLAGS_OM_MASK) << WQ_ENET_HDRLEN_BITS |
+ (eop & 1) << WQ_ENET_FLAGS_EOP_SHIFT |
+ (cq_entry & 1) << WQ_ENET_FLAGS_CQ_ENTRY_SHIFT |
+ (fcoe_encap & 1) << WQ_ENET_FLAGS_FCOE_ENCAP_SHIFT |
+ (vlan_tag_insert & 1) << WQ_ENET_FLAGS_VLAN_TAG_INSERT_SHIFT);
+ desc->vlan_tag = cpu_to_le16(vlan_tag);
+}
+
+static inline void wq_enet_desc_dec(struct wq_enet_desc *desc,
+ u64 *address, u16 *length, u16 *mss, u16 *header_length,
+ u8 *offload_mode, u8 *eop, u8 *cq_entry, u8 *fcoe_encap,
+ u8 *vlan_tag_insert, u16 *vlan_tag, u8 *loopback)
+{
+ *address = le64_to_cpu(desc->address);
+ *length = le16_to_cpu(desc->length) & WQ_ENET_LEN_MASK;
+ *mss = (le16_to_cpu(desc->mss_loopback) >> WQ_ENET_MSS_SHIFT) &
+ WQ_ENET_MSS_MASK;
+ *loopback = (u8)((le16_to_cpu(desc->mss_loopback) >>
+ WQ_ENET_LOOPBACK_SHIFT) & 1);
+ *header_length = le16_to_cpu(desc->header_length_flags) &
+ WQ_ENET_HDRLEN_MASK;
+ *offload_mode = (u8)((le16_to_cpu(desc->header_length_flags) >>
+ WQ_ENET_HDRLEN_BITS) & WQ_ENET_FLAGS_OM_MASK);
+ *eop = (u8)((le16_to_cpu(desc->header_length_flags) >>
+ WQ_ENET_FLAGS_EOP_SHIFT) & 1);
+ *cq_entry = (u8)((le16_to_cpu(desc->header_length_flags) >>
+ WQ_ENET_FLAGS_CQ_ENTRY_SHIFT) & 1);
+ *fcoe_encap = (u8)((le16_to_cpu(desc->header_length_flags) >>
+ WQ_ENET_FLAGS_FCOE_ENCAP_SHIFT) & 1);
+ *vlan_tag_insert = (u8)((le16_to_cpu(desc->header_length_flags) >>
+ WQ_ENET_FLAGS_VLAN_TAG_INSERT_SHIFT) & 1);
+ *vlan_tag = le16_to_cpu(desc->vlan_tag);
+}
+
+#endif /* _WQ_ENET_DESC_H_ */
diff --git a/drivers/scsi/g_NCR5380.c b/drivers/scsi/g_NCR5380.c
new file mode 100644
index 000000000..f8d2478b1
--- /dev/null
+++ b/drivers/scsi/g_NCR5380.c
@@ -0,0 +1,741 @@
+/*
+ * Generic Generic NCR5380 driver
+ *
+ * Copyright 1993, Drew Eckhardt
+ * Visionary Computing
+ * (Unix and Linux consulting and custom programming)
+ * drew@colorado.edu
+ * +1 (303) 440-4894
+ *
+ * NCR53C400 extensions (c) 1994,1995,1996, Kevin Lentin
+ * K.Lentin@cs.monash.edu.au
+ *
+ * NCR53C400A extensions (c) 1996, Ingmar Baumgart
+ * ingmar@gonzo.schwaben.de
+ *
+ * DTC3181E extensions (c) 1997, Ronald van Cuijlenborg
+ * ronald.van.cuijlenborg@tip.nl or nutty@dds.nl
+ *
+ * Added ISAPNP support for DTC436 adapters,
+ * Thomas Sailer, sailer@ife.ee.ethz.ch
+ */
+
+/*
+ * TODO : flesh out DMA support, find some one actually using this (I have
+ * a memory mapped Trantor board that works fine)
+ */
+
+/*
+ * The card is detected and initialized in one of several ways :
+ * 1. With command line overrides - NCR5380=port,irq may be
+ * used on the LILO command line to override the defaults.
+ *
+ * 2. With the GENERIC_NCR5380_OVERRIDE compile time define. This is
+ * specified as an array of address, irq, dma, board tuples. Ie, for
+ * one board at 0x350, IRQ5, no dma, I could say
+ * -DGENERIC_NCR5380_OVERRIDE={{0xcc000, 5, DMA_NONE, BOARD_NCR5380}}
+ *
+ * -1 should be specified for no or DMA interrupt, -2 to autoprobe for an
+ * IRQ line if overridden on the command line.
+ *
+ * 3. When included as a module, with arguments passed on the command line:
+ * ncr_irq=xx the interrupt
+ * ncr_addr=xx the port or base address (for port or memory
+ * mapped, resp.)
+ * ncr_dma=xx the DMA
+ * ncr_5380=1 to set up for a NCR5380 board
+ * ncr_53c400=1 to set up for a NCR53C400 board
+ * e.g.
+ * modprobe g_NCR5380 ncr_irq=5 ncr_addr=0x350 ncr_5380=1
+ * for a port mapped NCR5380 board or
+ * modprobe g_NCR5380 ncr_irq=255 ncr_addr=0xc8000 ncr_53c400=1
+ * for a memory mapped NCR53C400 board with interrupts disabled.
+ *
+ * 255 should be specified for no or DMA interrupt, 254 to autoprobe for an
+ * IRQ line if overridden on the command line.
+ *
+ */
+
+/* settings for DTC3181E card with only Mustek scanner attached */
+#define USLEEP_POLL msecs_to_jiffies(10)
+#define USLEEP_SLEEP msecs_to_jiffies(200)
+#define USLEEP_WAITLONG msecs_to_jiffies(5000)
+
+#define AUTOPROBE_IRQ
+
+#ifdef CONFIG_SCSI_GENERIC_NCR53C400
+#define NCR53C400_PSEUDO_DMA 1
+#define PSEUDO_DMA
+#define NCR53C400
+#endif
+
+#include <asm/io.h>
+#include <linux/signal.h>
+#include <linux/blkdev.h>
+#include <scsi/scsi_host.h>
+#include "g_NCR5380.h"
+#include "NCR5380.h"
+#include <linux/stat.h>
+#include <linux/init.h>
+#include <linux/ioport.h>
+#include <linux/isapnp.h>
+#include <linux/delay.h>
+#include <linux/interrupt.h>
+
+#define NCR_NOT_SET 0
+static int ncr_irq = NCR_NOT_SET;
+static int ncr_dma = NCR_NOT_SET;
+static int ncr_addr = NCR_NOT_SET;
+static int ncr_5380 = NCR_NOT_SET;
+static int ncr_53c400 = NCR_NOT_SET;
+static int ncr_53c400a = NCR_NOT_SET;
+static int dtc_3181e = NCR_NOT_SET;
+
+static struct override {
+ NCR5380_map_type NCR5380_map_name;
+ int irq;
+ int dma;
+ int board; /* Use NCR53c400, Ricoh, etc. extensions ? */
+} overrides
+#ifdef GENERIC_NCR5380_OVERRIDE
+[] __initdata = GENERIC_NCR5380_OVERRIDE;
+#else
+[1] __initdata = { { 0,},};
+#endif
+
+#define NO_OVERRIDES ARRAY_SIZE(overrides)
+
+#ifndef MODULE
+
+/**
+ * internal_setup - handle lilo command string override
+ * @board: BOARD_* identifier for the board
+ * @str: unused
+ * @ints: numeric parameters
+ *
+ * Do LILO command line initialization of the overrides array. Display
+ * errors when needed
+ *
+ * Locks: none
+ */
+
+static void __init internal_setup(int board, char *str, int *ints)
+{
+ static int commandline_current = 0;
+ switch (board) {
+ case BOARD_NCR5380:
+ if (ints[0] != 2 && ints[0] != 3) {
+ printk(KERN_ERR "generic_NCR5380_setup : usage ncr5380=" STRVAL(NCR5380_map_name) ",irq,dma\n");
+ return;
+ }
+ break;
+ case BOARD_NCR53C400:
+ if (ints[0] != 2) {
+ printk(KERN_ERR "generic_NCR53C400_setup : usage ncr53c400=" STRVAL(NCR5380_map_name) ",irq\n");
+ return;
+ }
+ break;
+ case BOARD_NCR53C400A:
+ if (ints[0] != 2) {
+ printk(KERN_ERR "generic_NCR53C400A_setup : usage ncr53c400a=" STRVAL(NCR5380_map_name) ",irq\n");
+ return;
+ }
+ break;
+ case BOARD_DTC3181E:
+ if (ints[0] != 2) {
+ printk("generic_DTC3181E_setup : usage dtc3181e=" STRVAL(NCR5380_map_name) ",irq\n");
+ return;
+ }
+ break;
+ }
+
+ if (commandline_current < NO_OVERRIDES) {
+ overrides[commandline_current].NCR5380_map_name = (NCR5380_map_type) ints[1];
+ overrides[commandline_current].irq = ints[2];
+ if (ints[0] == 3)
+ overrides[commandline_current].dma = ints[3];
+ else
+ overrides[commandline_current].dma = DMA_NONE;
+ overrides[commandline_current].board = board;
+ ++commandline_current;
+ }
+}
+
+
+/**
+ * do_NCR53C80_setup - set up entry point
+ * @str: unused
+ *
+ * Setup function invoked at boot to parse the ncr5380= command
+ * line.
+ */
+
+static int __init do_NCR5380_setup(char *str)
+{
+ int ints[10];
+
+ get_options(str, ARRAY_SIZE(ints), ints);
+ internal_setup(BOARD_NCR5380, str, ints);
+ return 1;
+}
+
+/**
+ * do_NCR53C400_setup - set up entry point
+ * @str: unused
+ * @ints: integer parameters from kernel setup code
+ *
+ * Setup function invoked at boot to parse the ncr53c400= command
+ * line.
+ */
+
+static int __init do_NCR53C400_setup(char *str)
+{
+ int ints[10];
+
+ get_options(str, ARRAY_SIZE(ints), ints);
+ internal_setup(BOARD_NCR53C400, str, ints);
+ return 1;
+}
+
+/**
+ * do_NCR53C400A_setup - set up entry point
+ * @str: unused
+ * @ints: integer parameters from kernel setup code
+ *
+ * Setup function invoked at boot to parse the ncr53c400a= command
+ * line.
+ */
+
+static int __init do_NCR53C400A_setup(char *str)
+{
+ int ints[10];
+
+ get_options(str, ARRAY_SIZE(ints), ints);
+ internal_setup(BOARD_NCR53C400A, str, ints);
+ return 1;
+}
+
+/**
+ * do_DTC3181E_setup - set up entry point
+ * @str: unused
+ * @ints: integer parameters from kernel setup code
+ *
+ * Setup function invoked at boot to parse the dtc3181e= command
+ * line.
+ */
+
+static int __init do_DTC3181E_setup(char *str)
+{
+ int ints[10];
+
+ get_options(str, ARRAY_SIZE(ints), ints);
+ internal_setup(BOARD_DTC3181E, str, ints);
+ return 1;
+}
+
+#endif
+
+/**
+ * generic_NCR5380_detect - look for NCR5380 controllers
+ * @tpnt: the scsi template
+ *
+ * Scan for the present of NCR5380, NCR53C400, NCR53C400A, DTC3181E
+ * and DTC436(ISAPnP) controllers. If overrides have been set we use
+ * them.
+ *
+ * The caller supplied NCR5380_init function is invoked from here, before
+ * the interrupt line is taken.
+ *
+ * Locks: none
+ */
+
+static int __init generic_NCR5380_detect(struct scsi_host_template *tpnt)
+{
+ static int current_override = 0;
+ int count;
+ unsigned int *ports;
+#ifndef SCSI_G_NCR5380_MEM
+ int i;
+ unsigned long region_size = 16;
+#endif
+ static unsigned int __initdata ncr_53c400a_ports[] = {
+ 0x280, 0x290, 0x300, 0x310, 0x330, 0x340, 0x348, 0x350, 0
+ };
+ static unsigned int __initdata dtc_3181e_ports[] = {
+ 0x220, 0x240, 0x280, 0x2a0, 0x2c0, 0x300, 0x320, 0x340, 0
+ };
+ int flags = 0;
+ struct Scsi_Host *instance;
+#ifdef SCSI_G_NCR5380_MEM
+ unsigned long base;
+ void __iomem *iomem;
+#endif
+
+ if (ncr_irq != NCR_NOT_SET)
+ overrides[0].irq = ncr_irq;
+ if (ncr_dma != NCR_NOT_SET)
+ overrides[0].dma = ncr_dma;
+ if (ncr_addr != NCR_NOT_SET)
+ overrides[0].NCR5380_map_name = (NCR5380_map_type) ncr_addr;
+ if (ncr_5380 != NCR_NOT_SET)
+ overrides[0].board = BOARD_NCR5380;
+ else if (ncr_53c400 != NCR_NOT_SET)
+ overrides[0].board = BOARD_NCR53C400;
+ else if (ncr_53c400a != NCR_NOT_SET)
+ overrides[0].board = BOARD_NCR53C400A;
+ else if (dtc_3181e != NCR_NOT_SET)
+ overrides[0].board = BOARD_DTC3181E;
+#ifndef SCSI_G_NCR5380_MEM
+ if (!current_override && isapnp_present()) {
+ struct pnp_dev *dev = NULL;
+ count = 0;
+ while ((dev = pnp_find_dev(NULL, ISAPNP_VENDOR('D', 'T', 'C'), ISAPNP_FUNCTION(0x436e), dev))) {
+ if (count >= NO_OVERRIDES)
+ break;
+ if (pnp_device_attach(dev) < 0)
+ continue;
+ if (pnp_activate_dev(dev) < 0) {
+ printk(KERN_ERR "dtc436e probe: activate failed\n");
+ pnp_device_detach(dev);
+ continue;
+ }
+ if (!pnp_port_valid(dev, 0)) {
+ printk(KERN_ERR "dtc436e probe: no valid port\n");
+ pnp_device_detach(dev);
+ continue;
+ }
+ if (pnp_irq_valid(dev, 0))
+ overrides[count].irq = pnp_irq(dev, 0);
+ else
+ overrides[count].irq = NO_IRQ;
+ if (pnp_dma_valid(dev, 0))
+ overrides[count].dma = pnp_dma(dev, 0);
+ else
+ overrides[count].dma = DMA_NONE;
+ overrides[count].NCR5380_map_name = (NCR5380_map_type) pnp_port_start(dev, 0);
+ overrides[count].board = BOARD_DTC3181E;
+ count++;
+ }
+ }
+#endif
+ tpnt->proc_name = "g_NCR5380";
+
+ for (count = 0; current_override < NO_OVERRIDES; ++current_override) {
+ if (!(overrides[current_override].NCR5380_map_name))
+ continue;
+
+ ports = NULL;
+ switch (overrides[current_override].board) {
+ case BOARD_NCR5380:
+ flags = FLAG_NO_PSEUDO_DMA;
+ break;
+ case BOARD_NCR53C400:
+ flags = FLAG_NCR53C400;
+ break;
+ case BOARD_NCR53C400A:
+ flags = FLAG_NO_PSEUDO_DMA;
+ ports = ncr_53c400a_ports;
+ break;
+ case BOARD_DTC3181E:
+ flags = FLAG_NO_PSEUDO_DMA | FLAG_DTC3181E;
+ ports = dtc_3181e_ports;
+ break;
+ }
+
+#ifndef SCSI_G_NCR5380_MEM
+ if (ports) {
+ /* wakeup sequence for the NCR53C400A and DTC3181E */
+
+ /* Disable the adapter and look for a free io port */
+ outb(0x59, 0x779);
+ outb(0xb9, 0x379);
+ outb(0xc5, 0x379);
+ outb(0xae, 0x379);
+ outb(0xa6, 0x379);
+ outb(0x00, 0x379);
+
+ if (overrides[current_override].NCR5380_map_name != PORT_AUTO)
+ for (i = 0; ports[i]; i++) {
+ if (!request_region(ports[i], 16, "ncr53c80"))
+ continue;
+ if (overrides[current_override].NCR5380_map_name == ports[i])
+ break;
+ release_region(ports[i], 16);
+ } else
+ for (i = 0; ports[i]; i++) {
+ if (!request_region(ports[i], 16, "ncr53c80"))
+ continue;
+ if (inb(ports[i]) == 0xff)
+ break;
+ release_region(ports[i], 16);
+ }
+ if (ports[i]) {
+ /* At this point we have our region reserved */
+ outb(0x59, 0x779);
+ outb(0xb9, 0x379);
+ outb(0xc5, 0x379);
+ outb(0xae, 0x379);
+ outb(0xa6, 0x379);
+ outb(0x80 | i, 0x379); /* set io port to be used */
+ outb(0xc0, ports[i] + 9);
+ if (inb(ports[i] + 9) != 0x80)
+ continue;
+ else
+ overrides[current_override].NCR5380_map_name = ports[i];
+ } else
+ continue;
+ }
+ else
+ {
+ /* Not a 53C400A style setup - just grab */
+ if(!(request_region(overrides[current_override].NCR5380_map_name, NCR5380_region_size, "ncr5380")))
+ continue;
+ region_size = NCR5380_region_size;
+ }
+#else
+ base = overrides[current_override].NCR5380_map_name;
+ if (!request_mem_region(base, NCR5380_region_size, "ncr5380"))
+ continue;
+ iomem = ioremap(base, NCR5380_region_size);
+ if (!iomem) {
+ release_mem_region(base, NCR5380_region_size);
+ continue;
+ }
+#endif
+ instance = scsi_register(tpnt, sizeof(struct NCR5380_hostdata));
+ if (instance == NULL) {
+#ifndef SCSI_G_NCR5380_MEM
+ release_region(overrides[current_override].NCR5380_map_name, region_size);
+#else
+ iounmap(iomem);
+ release_mem_region(base, NCR5380_region_size);
+#endif
+ continue;
+ }
+
+ instance->NCR5380_instance_name = overrides[current_override].NCR5380_map_name;
+#ifndef SCSI_G_NCR5380_MEM
+ instance->n_io_port = region_size;
+#else
+ ((struct NCR5380_hostdata *)instance->hostdata)->iomem = iomem;
+#endif
+
+ NCR5380_init(instance, flags);
+
+ if (overrides[current_override].irq != IRQ_AUTO)
+ instance->irq = overrides[current_override].irq;
+ else
+ instance->irq = NCR5380_probe_irq(instance, 0xffff);
+
+ /* Compatibility with documented NCR5380 kernel parameters */
+ if (instance->irq == 255)
+ instance->irq = NO_IRQ;
+
+ if (instance->irq != NO_IRQ)
+ if (request_irq(instance->irq, generic_NCR5380_intr,
+ 0, "NCR5380", instance)) {
+ printk(KERN_WARNING "scsi%d : IRQ%d not free, interrupts disabled\n", instance->host_no, instance->irq);
+ instance->irq = NO_IRQ;
+ }
+
+ if (instance->irq == NO_IRQ) {
+ printk(KERN_INFO "scsi%d : interrupts not enabled. for better interactive performance,\n", instance->host_no);
+ printk(KERN_INFO "scsi%d : please jumper the board for a free IRQ.\n", instance->host_no);
+ }
+
+ ++current_override;
+ ++count;
+ }
+ return count;
+}
+
+/**
+ * generic_NCR5380_release_resources - free resources
+ * @instance: host adapter to clean up
+ *
+ * Free the generic interface resources from this adapter.
+ *
+ * Locks: none
+ */
+
+static int generic_NCR5380_release_resources(struct Scsi_Host *instance)
+{
+ NCR5380_local_declare();
+ NCR5380_setup(instance);
+
+ if (instance->irq != NO_IRQ)
+ free_irq(instance->irq, instance);
+ NCR5380_exit(instance);
+
+#ifndef SCSI_G_NCR5380_MEM
+ release_region(instance->NCR5380_instance_name, instance->n_io_port);
+#else
+ iounmap(((struct NCR5380_hostdata *)instance->hostdata)->iomem);
+ release_mem_region(instance->NCR5380_instance_name, NCR5380_region_size);
+#endif
+
+
+ return 0;
+}
+
+#ifdef BIOSPARAM
+/**
+ * generic_NCR5380_biosparam
+ * @disk: disk to compute geometry for
+ * @dev: device identifier for this disk
+ * @ip: sizes to fill in
+ *
+ * Generates a BIOS / DOS compatible H-C-S mapping for the specified
+ * device / size.
+ *
+ * XXX Most SCSI boards use this mapping, I could be incorrect. Someone
+ * using hard disks on a trantor should verify that this mapping
+ * corresponds to that used by the BIOS / ASPI driver by running the linux
+ * fdisk program and matching the H_C_S coordinates to what DOS uses.
+ *
+ * Locks: none
+ */
+
+static int
+generic_NCR5380_biosparam(struct scsi_device *sdev, struct block_device *bdev,
+ sector_t capacity, int *ip)
+{
+ ip[0] = 64;
+ ip[1] = 32;
+ ip[2] = capacity >> 11;
+ return 0;
+}
+#endif
+
+#ifdef NCR53C400_PSEUDO_DMA
+
+/**
+ * NCR5380_pread - pseudo DMA read
+ * @instance: adapter to read from
+ * @dst: buffer to read into
+ * @len: buffer length
+ *
+ * Perform a pseudo DMA mode read from an NCR53C400 or equivalent
+ * controller
+ */
+
+static inline int NCR5380_pread(struct Scsi_Host *instance, unsigned char *dst, int len)
+{
+ int blocks = len / 128;
+ int start = 0;
+ int bl;
+
+ NCR5380_local_declare();
+ NCR5380_setup(instance);
+
+ NCR5380_write(C400_CONTROL_STATUS_REG, CSR_BASE | CSR_TRANS_DIR);
+ NCR5380_write(C400_BLOCK_COUNTER_REG, blocks);
+ while (1) {
+ if ((bl = NCR5380_read(C400_BLOCK_COUNTER_REG)) == 0) {
+ break;
+ }
+ if (NCR5380_read(C400_CONTROL_STATUS_REG) & CSR_GATED_53C80_IRQ) {
+ printk(KERN_ERR "53C400r: Got 53C80_IRQ start=%d, blocks=%d\n", start, blocks);
+ return -1;
+ }
+ while (NCR5380_read(C400_CONTROL_STATUS_REG) & CSR_HOST_BUF_NOT_RDY);
+
+#ifndef SCSI_G_NCR5380_MEM
+ {
+ int i;
+ for (i = 0; i < 128; i++)
+ dst[start + i] = NCR5380_read(C400_HOST_BUFFER);
+ }
+#else
+ /* implies SCSI_G_NCR5380_MEM */
+ memcpy_fromio(dst + start, iomem + NCR53C400_host_buffer, 128);
+#endif
+ start += 128;
+ blocks--;
+ }
+
+ if (blocks) {
+ while (NCR5380_read(C400_CONTROL_STATUS_REG) & CSR_HOST_BUF_NOT_RDY)
+ {
+ // FIXME - no timeout
+ }
+
+#ifndef SCSI_G_NCR5380_MEM
+ {
+ int i;
+ for (i = 0; i < 128; i++)
+ dst[start + i] = NCR5380_read(C400_HOST_BUFFER);
+ }
+#else
+ /* implies SCSI_G_NCR5380_MEM */
+ memcpy_fromio(dst + start, iomem + NCR53C400_host_buffer, 128);
+#endif
+ start += 128;
+ blocks--;
+ }
+
+ if (!(NCR5380_read(C400_CONTROL_STATUS_REG) & CSR_GATED_53C80_IRQ))
+ printk("53C400r: no 53C80 gated irq after transfer");
+
+#if 0
+ /*
+ * DON'T DO THIS - THEY NEVER ARRIVE!
+ */
+ printk("53C400r: Waiting for 53C80 registers\n");
+ while (NCR5380_read(C400_CONTROL_STATUS_REG) & CSR_53C80_REG)
+ ;
+#endif
+ if (!(NCR5380_read(BUS_AND_STATUS_REG) & BASR_END_DMA_TRANSFER))
+ printk(KERN_ERR "53C400r: no end dma signal\n");
+
+ NCR5380_write(MODE_REG, MR_BASE);
+ NCR5380_read(RESET_PARITY_INTERRUPT_REG);
+ return 0;
+}
+
+/**
+ * NCR5380_write - pseudo DMA write
+ * @instance: adapter to read from
+ * @dst: buffer to read into
+ * @len: buffer length
+ *
+ * Perform a pseudo DMA mode read from an NCR53C400 or equivalent
+ * controller
+ */
+
+static inline int NCR5380_pwrite(struct Scsi_Host *instance, unsigned char *src, int len)
+{
+ int blocks = len / 128;
+ int start = 0;
+ int bl;
+ int i;
+
+ NCR5380_local_declare();
+ NCR5380_setup(instance);
+
+ NCR5380_write(C400_CONTROL_STATUS_REG, CSR_BASE);
+ NCR5380_write(C400_BLOCK_COUNTER_REG, blocks);
+ while (1) {
+ if (NCR5380_read(C400_CONTROL_STATUS_REG) & CSR_GATED_53C80_IRQ) {
+ printk(KERN_ERR "53C400w: Got 53C80_IRQ start=%d, blocks=%d\n", start, blocks);
+ return -1;
+ }
+
+ if ((bl = NCR5380_read(C400_BLOCK_COUNTER_REG)) == 0) {
+ break;
+ }
+ while (NCR5380_read(C400_CONTROL_STATUS_REG) & CSR_HOST_BUF_NOT_RDY)
+ ; // FIXME - timeout
+#ifndef SCSI_G_NCR5380_MEM
+ {
+ for (i = 0; i < 128; i++)
+ NCR5380_write(C400_HOST_BUFFER, src[start + i]);
+ }
+#else
+ /* implies SCSI_G_NCR5380_MEM */
+ memcpy_toio(iomem + NCR53C400_host_buffer, src + start, 128);
+#endif
+ start += 128;
+ blocks--;
+ }
+ if (blocks) {
+ while (NCR5380_read(C400_CONTROL_STATUS_REG) & CSR_HOST_BUF_NOT_RDY)
+ ; // FIXME - no timeout
+
+#ifndef SCSI_G_NCR5380_MEM
+ {
+ for (i = 0; i < 128; i++)
+ NCR5380_write(C400_HOST_BUFFER, src[start + i]);
+ }
+#else
+ /* implies SCSI_G_NCR5380_MEM */
+ memcpy_toio(iomem + NCR53C400_host_buffer, src + start, 128);
+#endif
+ start += 128;
+ blocks--;
+ }
+
+#if 0
+ printk("53C400w: waiting for registers to be available\n");
+ THEY NEVER DO ! while (NCR5380_read(C400_CONTROL_STATUS_REG) & CSR_53C80_REG);
+ printk("53C400w: Got em\n");
+#endif
+
+ /* Let's wait for this instead - could be ugly */
+ /* All documentation says to check for this. Maybe my hardware is too
+ * fast. Waiting for it seems to work fine! KLL
+ */
+ while (!(i = NCR5380_read(C400_CONTROL_STATUS_REG) & CSR_GATED_53C80_IRQ))
+ ; // FIXME - no timeout
+
+ /*
+ * I know. i is certainly != 0 here but the loop is new. See previous
+ * comment.
+ */
+ if (i) {
+ if (!((i = NCR5380_read(BUS_AND_STATUS_REG)) & BASR_END_DMA_TRANSFER))
+ printk(KERN_ERR "53C400w: No END OF DMA bit - WHOOPS! BASR=%0x\n", i);
+ } else
+ printk(KERN_ERR "53C400w: no 53C80 gated irq after transfer (last block)\n");
+
+#if 0
+ if (!(NCR5380_read(BUS_AND_STATUS_REG) & BASR_END_DMA_TRANSFER)) {
+ printk(KERN_ERR "53C400w: no end dma signal\n");
+ }
+#endif
+ while (!(NCR5380_read(TARGET_COMMAND_REG) & TCR_LAST_BYTE_SENT))
+ ; // TIMEOUT
+ return 0;
+}
+#endif /* PSEUDO_DMA */
+
+/*
+ * Include the NCR5380 core code that we build our driver around
+ */
+
+#include "NCR5380.c"
+
+static struct scsi_host_template driver_template = {
+ .show_info = generic_NCR5380_show_info,
+ .name = "Generic NCR5380/NCR53C400 SCSI",
+ .detect = generic_NCR5380_detect,
+ .release = generic_NCR5380_release_resources,
+ .info = generic_NCR5380_info,
+ .queuecommand = generic_NCR5380_queue_command,
+ .eh_abort_handler = generic_NCR5380_abort,
+ .eh_bus_reset_handler = generic_NCR5380_bus_reset,
+ .bios_param = NCR5380_BIOSPARAM,
+ .can_queue = CAN_QUEUE,
+ .this_id = 7,
+ .sg_tablesize = SG_ALL,
+ .cmd_per_lun = CMD_PER_LUN,
+ .use_clustering = DISABLE_CLUSTERING,
+};
+#include <linux/module.h>
+#include "scsi_module.c"
+
+module_param(ncr_irq, int, 0);
+module_param(ncr_dma, int, 0);
+module_param(ncr_addr, int, 0);
+module_param(ncr_5380, int, 0);
+module_param(ncr_53c400, int, 0);
+module_param(ncr_53c400a, int, 0);
+module_param(dtc_3181e, int, 0);
+MODULE_LICENSE("GPL");
+
+#if !defined(SCSI_G_NCR5380_MEM) && defined(MODULE)
+static struct isapnp_device_id id_table[] = {
+ {
+ ISAPNP_ANY_ID, ISAPNP_ANY_ID,
+ ISAPNP_VENDOR('D', 'T', 'C'), ISAPNP_FUNCTION(0x436e),
+ 0},
+ {0}
+};
+
+MODULE_DEVICE_TABLE(isapnp, id_table);
+#endif
+
+__setup("ncr5380=", do_NCR5380_setup);
+__setup("ncr53c400=", do_NCR53C400_setup);
+__setup("ncr53c400a=", do_NCR53C400A_setup);
+__setup("dtc3181e=", do_DTC3181E_setup);
diff --git a/drivers/scsi/g_NCR5380.h b/drivers/scsi/g_NCR5380.h
new file mode 100644
index 000000000..bea1a3b9b
--- /dev/null
+++ b/drivers/scsi/g_NCR5380.h
@@ -0,0 +1,108 @@
+/*
+ * Generic Generic NCR5380 driver defines
+ *
+ * Copyright 1993, Drew Eckhardt
+ * Visionary Computing
+ * (Unix and Linux consulting and custom programming)
+ * drew@colorado.edu
+ * +1 (303) 440-4894
+ *
+ * NCR53C400 extensions (c) 1994,1995,1996, Kevin Lentin
+ * K.Lentin@cs.monash.edu.au
+ */
+
+#ifndef GENERIC_NCR5380_H
+#define GENERIC_NCR5380_H
+
+#ifdef NCR53C400
+#define BIOSPARAM
+#define NCR5380_BIOSPARAM generic_NCR5380_biosparam
+#else
+#define NCR5380_BIOSPARAM NULL
+#endif
+
+#ifndef ASM
+
+#ifndef CMD_PER_LUN
+#define CMD_PER_LUN 2
+#endif
+
+#ifndef CAN_QUEUE
+#define CAN_QUEUE 16
+#endif
+
+#define __STRVAL(x) #x
+#define STRVAL(x) __STRVAL(x)
+
+#ifndef SCSI_G_NCR5380_MEM
+
+#define NCR5380_map_config port
+#define NCR5380_map_type int
+#define NCR5380_map_name port
+#define NCR5380_instance_name io_port
+#define NCR53C400_register_offset 0
+#define NCR53C400_address_adjust 8
+
+#ifdef NCR53C400
+#define NCR5380_region_size 16
+#else
+#define NCR5380_region_size 8
+#endif
+
+#define NCR5380_read(reg) (inb(NCR5380_map_name + (reg)))
+#define NCR5380_write(reg, value) (outb((value), (NCR5380_map_name + (reg))))
+
+#define NCR5380_implementation_fields \
+ NCR5380_map_type NCR5380_map_name
+
+#define NCR5380_local_declare() \
+ register NCR5380_implementation_fields
+
+#define NCR5380_setup(instance) \
+ NCR5380_map_name = (NCR5380_map_type)((instance)->NCR5380_instance_name)
+
+#else
+/* therefore SCSI_G_NCR5380_MEM */
+
+#define NCR5380_map_config memory
+#define NCR5380_map_type unsigned long
+#define NCR5380_map_name base
+#define NCR5380_instance_name base
+#define NCR53C400_register_offset 0x108
+#define NCR53C400_address_adjust 0
+#define NCR53C400_mem_base 0x3880
+#define NCR53C400_host_buffer 0x3900
+#define NCR5380_region_size 0x3a00
+
+#define NCR5380_read(reg) readb(iomem + NCR53C400_mem_base + (reg))
+#define NCR5380_write(reg, value) writeb(value, iomem + NCR53C400_mem_base + (reg))
+
+#define NCR5380_implementation_fields \
+ NCR5380_map_type NCR5380_map_name; \
+ void __iomem *iomem;
+
+#define NCR5380_local_declare() \
+ register void __iomem *iomem
+
+#define NCR5380_setup(instance) \
+ iomem = (((struct NCR5380_hostdata *)(instance)->hostdata)->iomem)
+
+#endif
+
+#define NCR5380_intr generic_NCR5380_intr
+#define NCR5380_queue_command generic_NCR5380_queue_command
+#define NCR5380_abort generic_NCR5380_abort
+#define NCR5380_bus_reset generic_NCR5380_bus_reset
+#define NCR5380_pread generic_NCR5380_pread
+#define NCR5380_pwrite generic_NCR5380_pwrite
+#define NCR5380_info generic_NCR5380_info
+#define NCR5380_show_info generic_NCR5380_show_info
+
+#define BOARD_NCR5380 0
+#define BOARD_NCR53C400 1
+#define BOARD_NCR53C400A 2
+#define BOARD_DTC3181E 3
+
+#endif /* ndef ASM */
+#endif /* GENERIC_NCR5380_H */
+
diff --git a/drivers/scsi/g_NCR5380_mmio.c b/drivers/scsi/g_NCR5380_mmio.c
new file mode 100644
index 000000000..8cdde71ba
--- /dev/null
+++ b/drivers/scsi/g_NCR5380_mmio.c
@@ -0,0 +1,10 @@
+/*
+ * There is probably a nicer way to do this but this one makes
+ * pretty obvious what is happening. We rebuild the same file with
+ * different options for mmio versus pio.
+ */
+
+#define SCSI_G_NCR5380_MEM
+
+#include "g_NCR5380.c"
+
diff --git a/drivers/scsi/gdth.c b/drivers/scsi/gdth.c
new file mode 100644
index 000000000..71e138044
--- /dev/null
+++ b/drivers/scsi/gdth.c
@@ -0,0 +1,5235 @@
+/************************************************************************
+ * Linux driver for *
+ * ICP vortex GmbH: GDT ISA/EISA/PCI Disk Array Controllers *
+ * Intel Corporation: Storage RAID Controllers *
+ * *
+ * gdth.c *
+ * Copyright (C) 1995-06 ICP vortex GmbH, Achim Leubner *
+ * Copyright (C) 2002-04 Intel Corporation *
+ * Copyright (C) 2003-06 Adaptec Inc. *
+ * <achim_leubner@adaptec.com> *
+ * *
+ * Additions/Fixes: *
+ * Boji Tony Kannanthanam <boji.t.kannanthanam@intel.com> *
+ * Johannes Dinner <johannes_dinner@adaptec.com> *
+ * *
+ * This program is free software; you can redistribute it and/or modify *
+ * it under the terms of the GNU General Public License as published *
+ * by the Free Software Foundation; either version 2 of the License, *
+ * or (at your option) any later version. *
+ * *
+ * This program is distributed in the hope that it will be useful, *
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of *
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the *
+ * GNU General Public License for more details. *
+ * *
+ * You should have received a copy of the GNU General Public License *
+ * along with this kernel; if not, write to the Free Software *
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. *
+ * *
+ * Linux kernel 2.6.x supported *
+ * *
+ ************************************************************************/
+
+/* All GDT Disk Array Controllers are fully supported by this driver.
+ * This includes the PCI/EISA/ISA SCSI Disk Array Controllers and the
+ * PCI Fibre Channel Disk Array Controllers. See gdth.h for a complete
+ * list of all controller types.
+ *
+ * If you have one or more GDT3000/3020 EISA controllers with
+ * controller BIOS disabled, you have to set the IRQ values with the
+ * command line option "gdth=irq1,irq2,...", where the irq1,irq2,... are
+ * the IRQ values for the EISA controllers.
+ *
+ * After the optional list of IRQ values, other possible
+ * command line options are:
+ * disable:Y disable driver
+ * disable:N enable driver
+ * reserve_mode:0 reserve no drives for the raw service
+ * reserve_mode:1 reserve all not init., removable drives
+ * reserve_mode:2 reserve all not init. drives
+ * reserve_list:h,b,t,l,h,b,t,l,... reserve particular drive(s) with
+ * h- controller no., b- channel no.,
+ * t- target ID, l- LUN
+ * reverse_scan:Y reverse scan order for PCI controllers
+ * reverse_scan:N scan PCI controllers like BIOS
+ * max_ids:x x - target ID count per channel (1..MAXID)
+ * rescan:Y rescan all channels/IDs
+ * rescan:N use all devices found until now
+ * hdr_channel:x x - number of virtual bus for host drives
+ * shared_access:Y disable driver reserve/release protocol to
+ * access a shared resource from several nodes,
+ * appropriate controller firmware required
+ * shared_access:N enable driver reserve/release protocol
+ * probe_eisa_isa:Y scan for EISA/ISA controllers
+ * probe_eisa_isa:N do not scan for EISA/ISA controllers
+ * force_dma32:Y use only 32 bit DMA mode
+ * force_dma32:N use 64 bit DMA mode, if supported
+ *
+ * The default values are: "gdth=disable:N,reserve_mode:1,reverse_scan:N,
+ * max_ids:127,rescan:N,hdr_channel:0,
+ * shared_access:Y,probe_eisa_isa:N,force_dma32:N".
+ * Here is another example: "gdth=reserve_list:0,1,2,0,0,1,3,0,rescan:Y".
+ *
+ * When loading the gdth driver as a module, the same options are available.
+ * You can set the IRQs with "IRQ=...". However, the syntax to specify the
+ * options changes slightly. You must replace all ',' between options
+ * with ' ' and all ':' with '=' and you must use
+ * '1' in place of 'Y' and '0' in place of 'N'.
+ *
+ * Default: "modprobe gdth disable=0 reserve_mode=1 reverse_scan=0
+ * max_ids=127 rescan=0 hdr_channel=0 shared_access=0
+ * probe_eisa_isa=0 force_dma32=0"
+ * The other example: "modprobe gdth reserve_list=0,1,2,0,0,1,3,0 rescan=1".
+ */
+
+/* The meaning of the Scsi_Pointer members in this driver is as follows:
+ * ptr: Chaining
+ * this_residual: unused
+ * buffer: unused
+ * dma_handle: unused
+ * buffers_residual: unused
+ * Status: unused
+ * Message: unused
+ * have_data_in: unused
+ * sent_command: unused
+ * phase: unused
+ */
+
+
+/* interrupt coalescing */
+/* #define INT_COAL */
+
+/* statistics */
+#define GDTH_STATISTICS
+
+#include <linux/module.h>
+
+#include <linux/version.h>
+#include <linux/kernel.h>
+#include <linux/types.h>
+#include <linux/pci.h>
+#include <linux/string.h>
+#include <linux/ctype.h>
+#include <linux/ioport.h>
+#include <linux/delay.h>
+#include <linux/interrupt.h>
+#include <linux/in.h>
+#include <linux/proc_fs.h>
+#include <linux/time.h>
+#include <linux/timer.h>
+#include <linux/dma-mapping.h>
+#include <linux/list.h>
+#include <linux/mutex.h>
+#include <linux/slab.h>
+
+#ifdef GDTH_RTC
+#include <linux/mc146818rtc.h>
+#endif
+#include <linux/reboot.h>
+
+#include <asm/dma.h>
+#include <asm/io.h>
+#include <asm/uaccess.h>
+#include <linux/spinlock.h>
+#include <linux/blkdev.h>
+#include <linux/scatterlist.h>
+
+#include "scsi.h"
+#include <scsi/scsi_host.h>
+#include "gdth.h"
+
+static DEFINE_MUTEX(gdth_mutex);
+static void gdth_delay(int milliseconds);
+static void gdth_eval_mapping(u32 size, u32 *cyls, int *heads, int *secs);
+static irqreturn_t gdth_interrupt(int irq, void *dev_id);
+static irqreturn_t __gdth_interrupt(gdth_ha_str *ha,
+ int gdth_from_wait, int* pIndex);
+static int gdth_sync_event(gdth_ha_str *ha, int service, u8 index,
+ Scsi_Cmnd *scp);
+static int gdth_async_event(gdth_ha_str *ha);
+static void gdth_log_event(gdth_evt_data *dvr, char *buffer);
+
+static void gdth_putq(gdth_ha_str *ha, Scsi_Cmnd *scp, u8 priority);
+static void gdth_next(gdth_ha_str *ha);
+static int gdth_fill_raw_cmd(gdth_ha_str *ha, Scsi_Cmnd *scp, u8 b);
+static int gdth_special_cmd(gdth_ha_str *ha, Scsi_Cmnd *scp);
+static gdth_evt_str *gdth_store_event(gdth_ha_str *ha, u16 source,
+ u16 idx, gdth_evt_data *evt);
+static int gdth_read_event(gdth_ha_str *ha, int handle, gdth_evt_str *estr);
+static void gdth_readapp_event(gdth_ha_str *ha, u8 application,
+ gdth_evt_str *estr);
+static void gdth_clear_events(void);
+
+static void gdth_copy_internal_data(gdth_ha_str *ha, Scsi_Cmnd *scp,
+ char *buffer, u16 count);
+static int gdth_internal_cache_cmd(gdth_ha_str *ha, Scsi_Cmnd *scp);
+static int gdth_fill_cache_cmd(gdth_ha_str *ha, Scsi_Cmnd *scp, u16 hdrive);
+
+static void gdth_enable_int(gdth_ha_str *ha);
+static int gdth_test_busy(gdth_ha_str *ha);
+static int gdth_get_cmd_index(gdth_ha_str *ha);
+static void gdth_release_event(gdth_ha_str *ha);
+static int gdth_wait(gdth_ha_str *ha, int index,u32 time);
+static int gdth_internal_cmd(gdth_ha_str *ha, u8 service, u16 opcode,
+ u32 p1, u64 p2,u64 p3);
+static int gdth_search_drives(gdth_ha_str *ha);
+static int gdth_analyse_hdrive(gdth_ha_str *ha, u16 hdrive);
+
+static const char *gdth_ctr_name(gdth_ha_str *ha);
+
+static int gdth_open(struct inode *inode, struct file *filep);
+static int gdth_close(struct inode *inode, struct file *filep);
+static long gdth_unlocked_ioctl(struct file *filep, unsigned int cmd,
+ unsigned long arg);
+
+static void gdth_flush(gdth_ha_str *ha);
+static int gdth_queuecommand(struct Scsi_Host *h, struct scsi_cmnd *cmd);
+static int __gdth_queuecommand(gdth_ha_str *ha, struct scsi_cmnd *scp,
+ struct gdth_cmndinfo *cmndinfo);
+static void gdth_scsi_done(struct scsi_cmnd *scp);
+
+#ifdef DEBUG_GDTH
+static u8 DebugState = DEBUG_GDTH;
+
+#ifdef __SERIAL__
+#define MAX_SERBUF 160
+static void ser_init(void);
+static void ser_puts(char *str);
+static void ser_putc(char c);
+static int ser_printk(const char *fmt, ...);
+static char strbuf[MAX_SERBUF+1];
+#ifdef __COM2__
+#define COM_BASE 0x2f8
+#else
+#define COM_BASE 0x3f8
+#endif
+static void ser_init()
+{
+ unsigned port=COM_BASE;
+
+ outb(0x80,port+3);
+ outb(0,port+1);
+ /* 19200 Baud, if 9600: outb(12,port) */
+ outb(6, port);
+ outb(3,port+3);
+ outb(0,port+1);
+ /*
+ ser_putc('I');
+ ser_putc(' ');
+ */
+}
+
+static void ser_puts(char *str)
+{
+ char *ptr;
+
+ ser_init();
+ for (ptr=str;*ptr;++ptr)
+ ser_putc(*ptr);
+}
+
+static void ser_putc(char c)
+{
+ unsigned port=COM_BASE;
+
+ while ((inb(port+5) & 0x20)==0);
+ outb(c,port);
+ if (c==0x0a)
+ {
+ while ((inb(port+5) & 0x20)==0);
+ outb(0x0d,port);
+ }
+}
+
+static int ser_printk(const char *fmt, ...)
+{
+ va_list args;
+ int i;
+
+ va_start(args,fmt);
+ i = vsprintf(strbuf,fmt,args);
+ ser_puts(strbuf);
+ va_end(args);
+ return i;
+}
+
+#define TRACE(a) {if (DebugState==1) {ser_printk a;}}
+#define TRACE2(a) {if (DebugState==1 || DebugState==2) {ser_printk a;}}
+#define TRACE3(a) {if (DebugState!=0) {ser_printk a;}}
+
+#else /* !__SERIAL__ */
+#define TRACE(a) {if (DebugState==1) {printk a;}}
+#define TRACE2(a) {if (DebugState==1 || DebugState==2) {printk a;}}
+#define TRACE3(a) {if (DebugState!=0) {printk a;}}
+#endif
+
+#else /* !DEBUG */
+#define TRACE(a)
+#define TRACE2(a)
+#define TRACE3(a)
+#endif
+
+#ifdef GDTH_STATISTICS
+static u32 max_rq=0, max_index=0, max_sg=0;
+#ifdef INT_COAL
+static u32 max_int_coal=0;
+#endif
+static u32 act_ints=0, act_ios=0, act_stats=0, act_rq=0;
+static struct timer_list gdth_timer;
+#endif
+
+#define PTR2USHORT(a) (u16)(unsigned long)(a)
+#define GDTOFFSOF(a,b) (size_t)&(((a*)0)->b)
+#define INDEX_OK(i,t) ((i)<ARRAY_SIZE(t))
+
+#define BUS_L2P(a,b) ((b)>(a)->virt_bus ? (b-1):(b))
+
+#ifdef CONFIG_ISA
+static u8 gdth_drq_tab[4] = {5,6,7,7}; /* DRQ table */
+#endif
+#if defined(CONFIG_EISA) || defined(CONFIG_ISA)
+static u8 gdth_irq_tab[6] = {0,10,11,12,14,0}; /* IRQ table */
+#endif
+static u8 gdth_polling; /* polling if TRUE */
+static int gdth_ctr_count = 0; /* controller count */
+static LIST_HEAD(gdth_instances); /* controller list */
+static u8 gdth_write_through = FALSE; /* write through */
+static gdth_evt_str ebuffer[MAX_EVENTS]; /* event buffer */
+static int elastidx;
+static int eoldidx;
+static int major;
+
+#define DIN 1 /* IN data direction */
+#define DOU 2 /* OUT data direction */
+#define DNO DIN /* no data transfer */
+#define DUN DIN /* unknown data direction */
+static u8 gdth_direction_tab[0x100] = {
+ DNO,DNO,DIN,DIN,DOU,DIN,DIN,DOU,DIN,DUN,DOU,DOU,DUN,DUN,DUN,DIN,
+ DNO,DIN,DIN,DOU,DIN,DOU,DNO,DNO,DOU,DNO,DIN,DNO,DIN,DOU,DNO,DUN,
+ DIN,DUN,DIN,DUN,DOU,DIN,DUN,DUN,DIN,DIN,DOU,DNO,DUN,DIN,DOU,DOU,
+ DOU,DOU,DOU,DNO,DIN,DNO,DNO,DIN,DOU,DOU,DOU,DOU,DIN,DOU,DIN,DOU,
+ DOU,DOU,DIN,DIN,DIN,DNO,DUN,DNO,DNO,DNO,DUN,DNO,DOU,DIN,DUN,DUN,
+ DUN,DUN,DUN,DUN,DUN,DOU,DUN,DUN,DUN,DUN,DIN,DUN,DUN,DUN,DUN,DUN,
+ DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,
+ DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,
+ DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,DIN,DUN,DOU,DUN,DUN,DUN,DUN,DUN,
+ DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,DIN,DUN,
+ DUN,DUN,DUN,DUN,DUN,DNO,DNO,DUN,DIN,DNO,DOU,DUN,DNO,DUN,DOU,DOU,
+ DOU,DOU,DOU,DNO,DUN,DIN,DOU,DIN,DIN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,
+ DUN,DUN,DOU,DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,
+ DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,
+ DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,DOU,DUN,DUN,DUN,DUN,DUN,
+ DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN
+};
+
+/* LILO and modprobe/insmod parameters */
+/* IRQ list for GDT3000/3020 EISA controllers */
+static int irq[MAXHA] __initdata =
+{0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,
+ 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff};
+/* disable driver flag */
+static int disable __initdata = 0;
+/* reserve flag */
+static int reserve_mode = 1;
+/* reserve list */
+static int reserve_list[MAX_RES_ARGS] =
+{0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,
+ 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,
+ 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff};
+/* scan order for PCI controllers */
+static int reverse_scan = 0;
+/* virtual channel for the host drives */
+static int hdr_channel = 0;
+/* max. IDs per channel */
+static int max_ids = MAXID;
+/* rescan all IDs */
+static int rescan = 0;
+/* shared access */
+static int shared_access = 1;
+/* enable support for EISA and ISA controllers */
+static int probe_eisa_isa = 0;
+/* 64 bit DMA mode, support for drives > 2 TB, if force_dma32 = 0 */
+static int force_dma32 = 0;
+
+/* parameters for modprobe/insmod */
+module_param_array(irq, int, NULL, 0);
+module_param(disable, int, 0);
+module_param(reserve_mode, int, 0);
+module_param_array(reserve_list, int, NULL, 0);
+module_param(reverse_scan, int, 0);
+module_param(hdr_channel, int, 0);
+module_param(max_ids, int, 0);
+module_param(rescan, int, 0);
+module_param(shared_access, int, 0);
+module_param(probe_eisa_isa, int, 0);
+module_param(force_dma32, int, 0);
+MODULE_AUTHOR("Achim Leubner");
+MODULE_LICENSE("GPL");
+
+/* ioctl interface */
+static const struct file_operations gdth_fops = {
+ .unlocked_ioctl = gdth_unlocked_ioctl,
+ .open = gdth_open,
+ .release = gdth_close,
+ .llseek = noop_llseek,
+};
+
+#include "gdth_proc.h"
+#include "gdth_proc.c"
+
+static gdth_ha_str *gdth_find_ha(int hanum)
+{
+ gdth_ha_str *ha;
+
+ list_for_each_entry(ha, &gdth_instances, list)
+ if (hanum == ha->hanum)
+ return ha;
+
+ return NULL;
+}
+
+static struct gdth_cmndinfo *gdth_get_cmndinfo(gdth_ha_str *ha)
+{
+ struct gdth_cmndinfo *priv = NULL;
+ unsigned long flags;
+ int i;
+
+ spin_lock_irqsave(&ha->smp_lock, flags);
+
+ for (i=0; i<GDTH_MAXCMDS; ++i) {
+ if (ha->cmndinfo[i].index == 0) {
+ priv = &ha->cmndinfo[i];
+ memset(priv, 0, sizeof(*priv));
+ priv->index = i+1;
+ break;
+ }
+ }
+
+ spin_unlock_irqrestore(&ha->smp_lock, flags);
+
+ return priv;
+}
+
+static void gdth_put_cmndinfo(struct gdth_cmndinfo *priv)
+{
+ BUG_ON(!priv);
+ priv->index = 0;
+}
+
+static void gdth_delay(int milliseconds)
+{
+ if (milliseconds == 0) {
+ udelay(1);
+ } else {
+ mdelay(milliseconds);
+ }
+}
+
+static void gdth_scsi_done(struct scsi_cmnd *scp)
+{
+ struct gdth_cmndinfo *cmndinfo = gdth_cmnd_priv(scp);
+ int internal_command = cmndinfo->internal_command;
+
+ TRACE2(("gdth_scsi_done()\n"));
+
+ gdth_put_cmndinfo(cmndinfo);
+ scp->host_scribble = NULL;
+
+ if (internal_command)
+ complete((struct completion *)scp->request);
+ else
+ scp->scsi_done(scp);
+}
+
+int __gdth_execute(struct scsi_device *sdev, gdth_cmd_str *gdtcmd, char *cmnd,
+ int timeout, u32 *info)
+{
+ gdth_ha_str *ha = shost_priv(sdev->host);
+ Scsi_Cmnd *scp;
+ struct gdth_cmndinfo cmndinfo;
+ DECLARE_COMPLETION_ONSTACK(wait);
+ int rval;
+
+ scp = kzalloc(sizeof(*scp), GFP_KERNEL);
+ if (!scp)
+ return -ENOMEM;
+
+ scp->sense_buffer = kzalloc(SCSI_SENSE_BUFFERSIZE, GFP_KERNEL);
+ if (!scp->sense_buffer) {
+ kfree(scp);
+ return -ENOMEM;
+ }
+
+ scp->device = sdev;
+ memset(&cmndinfo, 0, sizeof(cmndinfo));
+
+ /* use request field to save the ptr. to completion struct. */
+ scp->request = (struct request *)&wait;
+ scp->cmd_len = 12;
+ scp->cmnd = cmnd;
+ cmndinfo.priority = IOCTL_PRI;
+ cmndinfo.internal_cmd_str = gdtcmd;
+ cmndinfo.internal_command = 1;
+
+ TRACE(("__gdth_execute() cmd 0x%x\n", scp->cmnd[0]));
+ __gdth_queuecommand(ha, scp, &cmndinfo);
+
+ wait_for_completion(&wait);
+
+ rval = cmndinfo.status;
+ if (info)
+ *info = cmndinfo.info;
+ kfree(scp->sense_buffer);
+ kfree(scp);
+ return rval;
+}
+
+int gdth_execute(struct Scsi_Host *shost, gdth_cmd_str *gdtcmd, char *cmnd,
+ int timeout, u32 *info)
+{
+ struct scsi_device *sdev = scsi_get_host_dev(shost);
+ int rval = __gdth_execute(sdev, gdtcmd, cmnd, timeout, info);
+
+ scsi_free_host_dev(sdev);
+ return rval;
+}
+
+static void gdth_eval_mapping(u32 size, u32 *cyls, int *heads, int *secs)
+{
+ *cyls = size /HEADS/SECS;
+ if (*cyls <= MAXCYLS) {
+ *heads = HEADS;
+ *secs = SECS;
+ } else { /* too high for 64*32 */
+ *cyls = size /MEDHEADS/MEDSECS;
+ if (*cyls <= MAXCYLS) {
+ *heads = MEDHEADS;
+ *secs = MEDSECS;
+ } else { /* too high for 127*63 */
+ *cyls = size /BIGHEADS/BIGSECS;
+ *heads = BIGHEADS;
+ *secs = BIGSECS;
+ }
+ }
+}
+
+/* controller search and initialization functions */
+#ifdef CONFIG_EISA
+static int __init gdth_search_eisa(u16 eisa_adr)
+{
+ u32 id;
+
+ TRACE(("gdth_search_eisa() adr. %x\n",eisa_adr));
+ id = inl(eisa_adr+ID0REG);
+ if (id == GDT3A_ID || id == GDT3B_ID) { /* GDT3000A or GDT3000B */
+ if ((inb(eisa_adr+EISAREG) & 8) == 0)
+ return 0; /* not EISA configured */
+ return 1;
+ }
+ if (id == GDT3_ID) /* GDT3000 */
+ return 1;
+
+ return 0;
+}
+#endif /* CONFIG_EISA */
+
+#ifdef CONFIG_ISA
+static int __init gdth_search_isa(u32 bios_adr)
+{
+ void __iomem *addr;
+ u32 id;
+
+ TRACE(("gdth_search_isa() bios adr. %x\n",bios_adr));
+ if ((addr = ioremap(bios_adr+BIOS_ID_OFFS, sizeof(u32))) != NULL) {
+ id = readl(addr);
+ iounmap(addr);
+ if (id == GDT2_ID) /* GDT2000 */
+ return 1;
+ }
+ return 0;
+}
+#endif /* CONFIG_ISA */
+
+#ifdef CONFIG_PCI
+
+static bool gdth_search_vortex(u16 device)
+{
+ if (device <= PCI_DEVICE_ID_VORTEX_GDT6555)
+ return true;
+ if (device >= PCI_DEVICE_ID_VORTEX_GDT6x17RP &&
+ device <= PCI_DEVICE_ID_VORTEX_GDTMAXRP)
+ return true;
+ if (device == PCI_DEVICE_ID_VORTEX_GDTNEWRX ||
+ device == PCI_DEVICE_ID_VORTEX_GDTNEWRX2)
+ return true;
+ return false;
+}
+
+static int gdth_pci_probe_one(gdth_pci_str *pcistr, gdth_ha_str **ha_out);
+static int gdth_pci_init_one(struct pci_dev *pdev,
+ const struct pci_device_id *ent);
+static void gdth_pci_remove_one(struct pci_dev *pdev);
+static void gdth_remove_one(gdth_ha_str *ha);
+
+/* Vortex only makes RAID controllers.
+ * We do not really want to specify all 550 ids here, so wildcard match.
+ */
+static const struct pci_device_id gdthtable[] = {
+ { PCI_VDEVICE(VORTEX, PCI_ANY_ID) },
+ { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_SRC) },
+ { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_SRC_XSCALE) },
+ { } /* terminate list */
+};
+MODULE_DEVICE_TABLE(pci, gdthtable);
+
+static struct pci_driver gdth_pci_driver = {
+ .name = "gdth",
+ .id_table = gdthtable,
+ .probe = gdth_pci_init_one,
+ .remove = gdth_pci_remove_one,
+};
+
+static void gdth_pci_remove_one(struct pci_dev *pdev)
+{
+ gdth_ha_str *ha = pci_get_drvdata(pdev);
+
+ list_del(&ha->list);
+ gdth_remove_one(ha);
+
+ pci_disable_device(pdev);
+}
+
+static int gdth_pci_init_one(struct pci_dev *pdev,
+ const struct pci_device_id *ent)
+{
+ u16 vendor = pdev->vendor;
+ u16 device = pdev->device;
+ unsigned long base0, base1, base2;
+ int rc;
+ gdth_pci_str gdth_pcistr;
+ gdth_ha_str *ha = NULL;
+
+ TRACE(("gdth_search_dev() cnt %d vendor %x device %x\n",
+ gdth_ctr_count, vendor, device));
+
+ memset(&gdth_pcistr, 0, sizeof(gdth_pcistr));
+
+ if (vendor == PCI_VENDOR_ID_VORTEX && !gdth_search_vortex(device))
+ return -ENODEV;
+
+ rc = pci_enable_device(pdev);
+ if (rc)
+ return rc;
+
+ if (gdth_ctr_count >= MAXHA)
+ return -EBUSY;
+
+ /* GDT PCI controller found, resources are already in pdev */
+ gdth_pcistr.pdev = pdev;
+ base0 = pci_resource_flags(pdev, 0);
+ base1 = pci_resource_flags(pdev, 1);
+ base2 = pci_resource_flags(pdev, 2);
+ if (device <= PCI_DEVICE_ID_VORTEX_GDT6000B || /* GDT6000/B */
+ device >= PCI_DEVICE_ID_VORTEX_GDT6x17RP) { /* MPR */
+ if (!(base0 & IORESOURCE_MEM))
+ return -ENODEV;
+ gdth_pcistr.dpmem = pci_resource_start(pdev, 0);
+ } else { /* GDT6110, GDT6120, .. */
+ if (!(base0 & IORESOURCE_MEM) ||
+ !(base2 & IORESOURCE_MEM) ||
+ !(base1 & IORESOURCE_IO))
+ return -ENODEV;
+ gdth_pcistr.dpmem = pci_resource_start(pdev, 2);
+ gdth_pcistr.io = pci_resource_start(pdev, 1);
+ }
+ TRACE2(("Controller found at %d/%d, irq %d, dpmem 0x%lx\n",
+ gdth_pcistr.pdev->bus->number,
+ PCI_SLOT(gdth_pcistr.pdev->devfn),
+ gdth_pcistr.irq,
+ gdth_pcistr.dpmem));
+
+ rc = gdth_pci_probe_one(&gdth_pcistr, &ha);
+ if (rc)
+ return rc;
+
+ return 0;
+}
+#endif /* CONFIG_PCI */
+
+#ifdef CONFIG_EISA
+static int __init gdth_init_eisa(u16 eisa_adr,gdth_ha_str *ha)
+{
+ u32 retries,id;
+ u8 prot_ver,eisacf,i,irq_found;
+
+ TRACE(("gdth_init_eisa() adr. %x\n",eisa_adr));
+
+ /* disable board interrupts, deinitialize services */
+ outb(0xff,eisa_adr+EDOORREG);
+ outb(0x00,eisa_adr+EDENABREG);
+ outb(0x00,eisa_adr+EINTENABREG);
+
+ outb(0xff,eisa_adr+LDOORREG);
+ retries = INIT_RETRIES;
+ gdth_delay(20);
+ while (inb(eisa_adr+EDOORREG) != 0xff) {
+ if (--retries == 0) {
+ printk("GDT-EISA: Initialization error (DEINIT failed)\n");
+ return 0;
+ }
+ gdth_delay(1);
+ TRACE2(("wait for DEINIT: retries=%d\n",retries));
+ }
+ prot_ver = inb(eisa_adr+MAILBOXREG);
+ outb(0xff,eisa_adr+EDOORREG);
+ if (prot_ver != PROTOCOL_VERSION) {
+ printk("GDT-EISA: Illegal protocol version\n");
+ return 0;
+ }
+ ha->bmic = eisa_adr;
+ ha->brd_phys = (u32)eisa_adr >> 12;
+
+ outl(0,eisa_adr+MAILBOXREG);
+ outl(0,eisa_adr+MAILBOXREG+4);
+ outl(0,eisa_adr+MAILBOXREG+8);
+ outl(0,eisa_adr+MAILBOXREG+12);
+
+ /* detect IRQ */
+ if ((id = inl(eisa_adr+ID0REG)) == GDT3_ID) {
+ ha->oem_id = OEM_ID_ICP;
+ ha->type = GDT_EISA;
+ ha->stype = id;
+ outl(1,eisa_adr+MAILBOXREG+8);
+ outb(0xfe,eisa_adr+LDOORREG);
+ retries = INIT_RETRIES;
+ gdth_delay(20);
+ while (inb(eisa_adr+EDOORREG) != 0xfe) {
+ if (--retries == 0) {
+ printk("GDT-EISA: Initialization error (get IRQ failed)\n");
+ return 0;
+ }
+ gdth_delay(1);
+ }
+ ha->irq = inb(eisa_adr+MAILBOXREG);
+ outb(0xff,eisa_adr+EDOORREG);
+ TRACE2(("GDT3000/3020: IRQ=%d\n",ha->irq));
+ /* check the result */
+ if (ha->irq == 0) {
+ TRACE2(("Unknown IRQ, use IRQ table from cmd line !\n"));
+ for (i = 0, irq_found = FALSE;
+ i < MAXHA && irq[i] != 0xff; ++i) {
+ if (irq[i]==10 || irq[i]==11 || irq[i]==12 || irq[i]==14) {
+ irq_found = TRUE;
+ break;
+ }
+ }
+ if (irq_found) {
+ ha->irq = irq[i];
+ irq[i] = 0;
+ printk("GDT-EISA: Can not detect controller IRQ,\n");
+ printk("Use IRQ setting from command line (IRQ = %d)\n",
+ ha->irq);
+ } else {
+ printk("GDT-EISA: Initialization error (unknown IRQ), Enable\n");
+ printk("the controller BIOS or use command line parameters\n");
+ return 0;
+ }
+ }
+ } else {
+ eisacf = inb(eisa_adr+EISAREG) & 7;
+ if (eisacf > 4) /* level triggered */
+ eisacf -= 4;
+ ha->irq = gdth_irq_tab[eisacf];
+ ha->oem_id = OEM_ID_ICP;
+ ha->type = GDT_EISA;
+ ha->stype = id;
+ }
+
+ ha->dma64_support = 0;
+ return 1;
+}
+#endif /* CONFIG_EISA */
+
+#ifdef CONFIG_ISA
+static int __init gdth_init_isa(u32 bios_adr,gdth_ha_str *ha)
+{
+ register gdt2_dpram_str __iomem *dp2_ptr;
+ int i;
+ u8 irq_drq,prot_ver;
+ u32 retries;
+
+ TRACE(("gdth_init_isa() bios adr. %x\n",bios_adr));
+
+ ha->brd = ioremap(bios_adr, sizeof(gdt2_dpram_str));
+ if (ha->brd == NULL) {
+ printk("GDT-ISA: Initialization error (DPMEM remap error)\n");
+ return 0;
+ }
+ dp2_ptr = ha->brd;
+ writeb(1, &dp2_ptr->io.memlock); /* switch off write protection */
+ /* reset interface area */
+ memset_io(&dp2_ptr->u, 0, sizeof(dp2_ptr->u));
+ if (readl(&dp2_ptr->u) != 0) {
+ printk("GDT-ISA: Initialization error (DPMEM write error)\n");
+ iounmap(ha->brd);
+ return 0;
+ }
+
+ /* disable board interrupts, read DRQ and IRQ */
+ writeb(0xff, &dp2_ptr->io.irqdel);
+ writeb(0x00, &dp2_ptr->io.irqen);
+ writeb(0x00, &dp2_ptr->u.ic.S_Status);
+ writeb(0x00, &dp2_ptr->u.ic.Cmd_Index);
+
+ irq_drq = readb(&dp2_ptr->io.rq);
+ for (i=0; i<3; ++i) {
+ if ((irq_drq & 1)==0)
+ break;
+ irq_drq >>= 1;
+ }
+ ha->drq = gdth_drq_tab[i];
+
+ irq_drq = readb(&dp2_ptr->io.rq) >> 3;
+ for (i=1; i<5; ++i) {
+ if ((irq_drq & 1)==0)
+ break;
+ irq_drq >>= 1;
+ }
+ ha->irq = gdth_irq_tab[i];
+
+ /* deinitialize services */
+ writel(bios_adr, &dp2_ptr->u.ic.S_Info[0]);
+ writeb(0xff, &dp2_ptr->u.ic.S_Cmd_Indx);
+ writeb(0, &dp2_ptr->io.event);
+ retries = INIT_RETRIES;
+ gdth_delay(20);
+ while (readb(&dp2_ptr->u.ic.S_Status) != 0xff) {
+ if (--retries == 0) {
+ printk("GDT-ISA: Initialization error (DEINIT failed)\n");
+ iounmap(ha->brd);
+ return 0;
+ }
+ gdth_delay(1);
+ }
+ prot_ver = (u8)readl(&dp2_ptr->u.ic.S_Info[0]);
+ writeb(0, &dp2_ptr->u.ic.Status);
+ writeb(0xff, &dp2_ptr->io.irqdel);
+ if (prot_ver != PROTOCOL_VERSION) {
+ printk("GDT-ISA: Illegal protocol version\n");
+ iounmap(ha->brd);
+ return 0;
+ }
+
+ ha->oem_id = OEM_ID_ICP;
+ ha->type = GDT_ISA;
+ ha->ic_all_size = sizeof(dp2_ptr->u);
+ ha->stype= GDT2_ID;
+ ha->brd_phys = bios_adr >> 4;
+
+ /* special request to controller BIOS */
+ writel(0x00, &dp2_ptr->u.ic.S_Info[0]);
+ writel(0x00, &dp2_ptr->u.ic.S_Info[1]);
+ writel(0x01, &dp2_ptr->u.ic.S_Info[2]);
+ writel(0x00, &dp2_ptr->u.ic.S_Info[3]);
+ writeb(0xfe, &dp2_ptr->u.ic.S_Cmd_Indx);
+ writeb(0, &dp2_ptr->io.event);
+ retries = INIT_RETRIES;
+ gdth_delay(20);
+ while (readb(&dp2_ptr->u.ic.S_Status) != 0xfe) {
+ if (--retries == 0) {
+ printk("GDT-ISA: Initialization error\n");
+ iounmap(ha->brd);
+ return 0;
+ }
+ gdth_delay(1);
+ }
+ writeb(0, &dp2_ptr->u.ic.Status);
+ writeb(0xff, &dp2_ptr->io.irqdel);
+
+ ha->dma64_support = 0;
+ return 1;
+}
+#endif /* CONFIG_ISA */
+
+#ifdef CONFIG_PCI
+static int gdth_init_pci(struct pci_dev *pdev, gdth_pci_str *pcistr,
+ gdth_ha_str *ha)
+{
+ register gdt6_dpram_str __iomem *dp6_ptr;
+ register gdt6c_dpram_str __iomem *dp6c_ptr;
+ register gdt6m_dpram_str __iomem *dp6m_ptr;
+ u32 retries;
+ u8 prot_ver;
+ u16 command;
+ int i, found = FALSE;
+
+ TRACE(("gdth_init_pci()\n"));
+
+ if (pdev->vendor == PCI_VENDOR_ID_INTEL)
+ ha->oem_id = OEM_ID_INTEL;
+ else
+ ha->oem_id = OEM_ID_ICP;
+ ha->brd_phys = (pdev->bus->number << 8) | (pdev->devfn & 0xf8);
+ ha->stype = (u32)pdev->device;
+ ha->irq = pdev->irq;
+ ha->pdev = pdev;
+
+ if (ha->pdev->device <= PCI_DEVICE_ID_VORTEX_GDT6000B) { /* GDT6000/B */
+ TRACE2(("init_pci() dpmem %lx irq %d\n",pcistr->dpmem,ha->irq));
+ ha->brd = ioremap(pcistr->dpmem, sizeof(gdt6_dpram_str));
+ if (ha->brd == NULL) {
+ printk("GDT-PCI: Initialization error (DPMEM remap error)\n");
+ return 0;
+ }
+ /* check and reset interface area */
+ dp6_ptr = ha->brd;
+ writel(DPMEM_MAGIC, &dp6_ptr->u);
+ if (readl(&dp6_ptr->u) != DPMEM_MAGIC) {
+ printk("GDT-PCI: Cannot access DPMEM at 0x%lx (shadowed?)\n",
+ pcistr->dpmem);
+ found = FALSE;
+ for (i = 0xC8000; i < 0xE8000; i += 0x4000) {
+ iounmap(ha->brd);
+ ha->brd = ioremap(i, sizeof(u16));
+ if (ha->brd == NULL) {
+ printk("GDT-PCI: Initialization error (DPMEM remap error)\n");
+ return 0;
+ }
+ if (readw(ha->brd) != 0xffff) {
+ TRACE2(("init_pci_old() address 0x%x busy\n", i));
+ continue;
+ }
+ iounmap(ha->brd);
+ pci_write_config_dword(pdev, PCI_BASE_ADDRESS_0, i);
+ ha->brd = ioremap(i, sizeof(gdt6_dpram_str));
+ if (ha->brd == NULL) {
+ printk("GDT-PCI: Initialization error (DPMEM remap error)\n");
+ return 0;
+ }
+ dp6_ptr = ha->brd;
+ writel(DPMEM_MAGIC, &dp6_ptr->u);
+ if (readl(&dp6_ptr->u) == DPMEM_MAGIC) {
+ printk("GDT-PCI: Use free address at 0x%x\n", i);
+ found = TRUE;
+ break;
+ }
+ }
+ if (!found) {
+ printk("GDT-PCI: No free address found!\n");
+ iounmap(ha->brd);
+ return 0;
+ }
+ }
+ memset_io(&dp6_ptr->u, 0, sizeof(dp6_ptr->u));
+ if (readl(&dp6_ptr->u) != 0) {
+ printk("GDT-PCI: Initialization error (DPMEM write error)\n");
+ iounmap(ha->brd);
+ return 0;
+ }
+
+ /* disable board interrupts, deinit services */
+ writeb(0xff, &dp6_ptr->io.irqdel);
+ writeb(0x00, &dp6_ptr->io.irqen);
+ writeb(0x00, &dp6_ptr->u.ic.S_Status);
+ writeb(0x00, &dp6_ptr->u.ic.Cmd_Index);
+
+ writel(pcistr->dpmem, &dp6_ptr->u.ic.S_Info[0]);
+ writeb(0xff, &dp6_ptr->u.ic.S_Cmd_Indx);
+ writeb(0, &dp6_ptr->io.event);
+ retries = INIT_RETRIES;
+ gdth_delay(20);
+ while (readb(&dp6_ptr->u.ic.S_Status) != 0xff) {
+ if (--retries == 0) {
+ printk("GDT-PCI: Initialization error (DEINIT failed)\n");
+ iounmap(ha->brd);
+ return 0;
+ }
+ gdth_delay(1);
+ }
+ prot_ver = (u8)readl(&dp6_ptr->u.ic.S_Info[0]);
+ writeb(0, &dp6_ptr->u.ic.S_Status);
+ writeb(0xff, &dp6_ptr->io.irqdel);
+ if (prot_ver != PROTOCOL_VERSION) {
+ printk("GDT-PCI: Illegal protocol version\n");
+ iounmap(ha->brd);
+ return 0;
+ }
+
+ ha->type = GDT_PCI;
+ ha->ic_all_size = sizeof(dp6_ptr->u);
+
+ /* special command to controller BIOS */
+ writel(0x00, &dp6_ptr->u.ic.S_Info[0]);
+ writel(0x00, &dp6_ptr->u.ic.S_Info[1]);
+ writel(0x00, &dp6_ptr->u.ic.S_Info[2]);
+ writel(0x00, &dp6_ptr->u.ic.S_Info[3]);
+ writeb(0xfe, &dp6_ptr->u.ic.S_Cmd_Indx);
+ writeb(0, &dp6_ptr->io.event);
+ retries = INIT_RETRIES;
+ gdth_delay(20);
+ while (readb(&dp6_ptr->u.ic.S_Status) != 0xfe) {
+ if (--retries == 0) {
+ printk("GDT-PCI: Initialization error\n");
+ iounmap(ha->brd);
+ return 0;
+ }
+ gdth_delay(1);
+ }
+ writeb(0, &dp6_ptr->u.ic.S_Status);
+ writeb(0xff, &dp6_ptr->io.irqdel);
+
+ ha->dma64_support = 0;
+
+ } else if (ha->pdev->device <= PCI_DEVICE_ID_VORTEX_GDT6555) { /* GDT6110, ... */
+ ha->plx = (gdt6c_plx_regs *)pcistr->io;
+ TRACE2(("init_pci_new() dpmem %lx irq %d\n",
+ pcistr->dpmem,ha->irq));
+ ha->brd = ioremap(pcistr->dpmem, sizeof(gdt6c_dpram_str));
+ if (ha->brd == NULL) {
+ printk("GDT-PCI: Initialization error (DPMEM remap error)\n");
+ iounmap(ha->brd);
+ return 0;
+ }
+ /* check and reset interface area */
+ dp6c_ptr = ha->brd;
+ writel(DPMEM_MAGIC, &dp6c_ptr->u);
+ if (readl(&dp6c_ptr->u) != DPMEM_MAGIC) {
+ printk("GDT-PCI: Cannot access DPMEM at 0x%lx (shadowed?)\n",
+ pcistr->dpmem);
+ found = FALSE;
+ for (i = 0xC8000; i < 0xE8000; i += 0x4000) {
+ iounmap(ha->brd);
+ ha->brd = ioremap(i, sizeof(u16));
+ if (ha->brd == NULL) {
+ printk("GDT-PCI: Initialization error (DPMEM remap error)\n");
+ return 0;
+ }
+ if (readw(ha->brd) != 0xffff) {
+ TRACE2(("init_pci_plx() address 0x%x busy\n", i));
+ continue;
+ }
+ iounmap(ha->brd);
+ pci_write_config_dword(pdev, PCI_BASE_ADDRESS_2, i);
+ ha->brd = ioremap(i, sizeof(gdt6c_dpram_str));
+ if (ha->brd == NULL) {
+ printk("GDT-PCI: Initialization error (DPMEM remap error)\n");
+ return 0;
+ }
+ dp6c_ptr = ha->brd;
+ writel(DPMEM_MAGIC, &dp6c_ptr->u);
+ if (readl(&dp6c_ptr->u) == DPMEM_MAGIC) {
+ printk("GDT-PCI: Use free address at 0x%x\n", i);
+ found = TRUE;
+ break;
+ }
+ }
+ if (!found) {
+ printk("GDT-PCI: No free address found!\n");
+ iounmap(ha->brd);
+ return 0;
+ }
+ }
+ memset_io(&dp6c_ptr->u, 0, sizeof(dp6c_ptr->u));
+ if (readl(&dp6c_ptr->u) != 0) {
+ printk("GDT-PCI: Initialization error (DPMEM write error)\n");
+ iounmap(ha->brd);
+ return 0;
+ }
+
+ /* disable board interrupts, deinit services */
+ outb(0x00,PTR2USHORT(&ha->plx->control1));
+ outb(0xff,PTR2USHORT(&ha->plx->edoor_reg));
+
+ writeb(0x00, &dp6c_ptr->u.ic.S_Status);
+ writeb(0x00, &dp6c_ptr->u.ic.Cmd_Index);
+
+ writel(pcistr->dpmem, &dp6c_ptr->u.ic.S_Info[0]);
+ writeb(0xff, &dp6c_ptr->u.ic.S_Cmd_Indx);
+
+ outb(1,PTR2USHORT(&ha->plx->ldoor_reg));
+
+ retries = INIT_RETRIES;
+ gdth_delay(20);
+ while (readb(&dp6c_ptr->u.ic.S_Status) != 0xff) {
+ if (--retries == 0) {
+ printk("GDT-PCI: Initialization error (DEINIT failed)\n");
+ iounmap(ha->brd);
+ return 0;
+ }
+ gdth_delay(1);
+ }
+ prot_ver = (u8)readl(&dp6c_ptr->u.ic.S_Info[0]);
+ writeb(0, &dp6c_ptr->u.ic.Status);
+ if (prot_ver != PROTOCOL_VERSION) {
+ printk("GDT-PCI: Illegal protocol version\n");
+ iounmap(ha->brd);
+ return 0;
+ }
+
+ ha->type = GDT_PCINEW;
+ ha->ic_all_size = sizeof(dp6c_ptr->u);
+
+ /* special command to controller BIOS */
+ writel(0x00, &dp6c_ptr->u.ic.S_Info[0]);
+ writel(0x00, &dp6c_ptr->u.ic.S_Info[1]);
+ writel(0x00, &dp6c_ptr->u.ic.S_Info[2]);
+ writel(0x00, &dp6c_ptr->u.ic.S_Info[3]);
+ writeb(0xfe, &dp6c_ptr->u.ic.S_Cmd_Indx);
+
+ outb(1,PTR2USHORT(&ha->plx->ldoor_reg));
+
+ retries = INIT_RETRIES;
+ gdth_delay(20);
+ while (readb(&dp6c_ptr->u.ic.S_Status) != 0xfe) {
+ if (--retries == 0) {
+ printk("GDT-PCI: Initialization error\n");
+ iounmap(ha->brd);
+ return 0;
+ }
+ gdth_delay(1);
+ }
+ writeb(0, &dp6c_ptr->u.ic.S_Status);
+
+ ha->dma64_support = 0;
+
+ } else { /* MPR */
+ TRACE2(("init_pci_mpr() dpmem %lx irq %d\n",pcistr->dpmem,ha->irq));
+ ha->brd = ioremap(pcistr->dpmem, sizeof(gdt6m_dpram_str));
+ if (ha->brd == NULL) {
+ printk("GDT-PCI: Initialization error (DPMEM remap error)\n");
+ return 0;
+ }
+
+ /* manipulate config. space to enable DPMEM, start RP controller */
+ pci_read_config_word(pdev, PCI_COMMAND, &command);
+ command |= 6;
+ pci_write_config_word(pdev, PCI_COMMAND, command);
+ gdth_delay(1);
+
+ dp6m_ptr = ha->brd;
+
+ /* Ensure that it is safe to access the non HW portions of DPMEM.
+ * Aditional check needed for Xscale based RAID controllers */
+ while( ((int)readb(&dp6m_ptr->i960r.sema0_reg) ) & 3 )
+ gdth_delay(1);
+
+ /* check and reset interface area */
+ writel(DPMEM_MAGIC, &dp6m_ptr->u);
+ if (readl(&dp6m_ptr->u) != DPMEM_MAGIC) {
+ printk("GDT-PCI: Cannot access DPMEM at 0x%lx (shadowed?)\n",
+ pcistr->dpmem);
+ found = FALSE;
+ for (i = 0xC8000; i < 0xE8000; i += 0x4000) {
+ iounmap(ha->brd);
+ ha->brd = ioremap(i, sizeof(u16));
+ if (ha->brd == NULL) {
+ printk("GDT-PCI: Initialization error (DPMEM remap error)\n");
+ return 0;
+ }
+ if (readw(ha->brd) != 0xffff) {
+ TRACE2(("init_pci_mpr() address 0x%x busy\n", i));
+ continue;
+ }
+ iounmap(ha->brd);
+ pci_write_config_dword(pdev, PCI_BASE_ADDRESS_0, i);
+ ha->brd = ioremap(i, sizeof(gdt6m_dpram_str));
+ if (ha->brd == NULL) {
+ printk("GDT-PCI: Initialization error (DPMEM remap error)\n");
+ return 0;
+ }
+ dp6m_ptr = ha->brd;
+ writel(DPMEM_MAGIC, &dp6m_ptr->u);
+ if (readl(&dp6m_ptr->u) == DPMEM_MAGIC) {
+ printk("GDT-PCI: Use free address at 0x%x\n", i);
+ found = TRUE;
+ break;
+ }
+ }
+ if (!found) {
+ printk("GDT-PCI: No free address found!\n");
+ iounmap(ha->brd);
+ return 0;
+ }
+ }
+ memset_io(&dp6m_ptr->u, 0, sizeof(dp6m_ptr->u));
+
+ /* disable board interrupts, deinit services */
+ writeb(readb(&dp6m_ptr->i960r.edoor_en_reg) | 4,
+ &dp6m_ptr->i960r.edoor_en_reg);
+ writeb(0xff, &dp6m_ptr->i960r.edoor_reg);
+ writeb(0x00, &dp6m_ptr->u.ic.S_Status);
+ writeb(0x00, &dp6m_ptr->u.ic.Cmd_Index);
+
+ writel(pcistr->dpmem, &dp6m_ptr->u.ic.S_Info[0]);
+ writeb(0xff, &dp6m_ptr->u.ic.S_Cmd_Indx);
+ writeb(1, &dp6m_ptr->i960r.ldoor_reg);
+ retries = INIT_RETRIES;
+ gdth_delay(20);
+ while (readb(&dp6m_ptr->u.ic.S_Status) != 0xff) {
+ if (--retries == 0) {
+ printk("GDT-PCI: Initialization error (DEINIT failed)\n");
+ iounmap(ha->brd);
+ return 0;
+ }
+ gdth_delay(1);
+ }
+ prot_ver = (u8)readl(&dp6m_ptr->u.ic.S_Info[0]);
+ writeb(0, &dp6m_ptr->u.ic.S_Status);
+ if (prot_ver != PROTOCOL_VERSION) {
+ printk("GDT-PCI: Illegal protocol version\n");
+ iounmap(ha->brd);
+ return 0;
+ }
+
+ ha->type = GDT_PCIMPR;
+ ha->ic_all_size = sizeof(dp6m_ptr->u);
+
+ /* special command to controller BIOS */
+ writel(0x00, &dp6m_ptr->u.ic.S_Info[0]);
+ writel(0x00, &dp6m_ptr->u.ic.S_Info[1]);
+ writel(0x00, &dp6m_ptr->u.ic.S_Info[2]);
+ writel(0x00, &dp6m_ptr->u.ic.S_Info[3]);
+ writeb(0xfe, &dp6m_ptr->u.ic.S_Cmd_Indx);
+ writeb(1, &dp6m_ptr->i960r.ldoor_reg);
+ retries = INIT_RETRIES;
+ gdth_delay(20);
+ while (readb(&dp6m_ptr->u.ic.S_Status) != 0xfe) {
+ if (--retries == 0) {
+ printk("GDT-PCI: Initialization error\n");
+ iounmap(ha->brd);
+ return 0;
+ }
+ gdth_delay(1);
+ }
+ writeb(0, &dp6m_ptr->u.ic.S_Status);
+
+ /* read FW version to detect 64-bit DMA support */
+ writeb(0xfd, &dp6m_ptr->u.ic.S_Cmd_Indx);
+ writeb(1, &dp6m_ptr->i960r.ldoor_reg);
+ retries = INIT_RETRIES;
+ gdth_delay(20);
+ while (readb(&dp6m_ptr->u.ic.S_Status) != 0xfd) {
+ if (--retries == 0) {
+ printk("GDT-PCI: Initialization error (DEINIT failed)\n");
+ iounmap(ha->brd);
+ return 0;
+ }
+ gdth_delay(1);
+ }
+ prot_ver = (u8)(readl(&dp6m_ptr->u.ic.S_Info[0]) >> 16);
+ writeb(0, &dp6m_ptr->u.ic.S_Status);
+ if (prot_ver < 0x2b) /* FW < x.43: no 64-bit DMA support */
+ ha->dma64_support = 0;
+ else
+ ha->dma64_support = 1;
+ }
+
+ return 1;
+}
+#endif /* CONFIG_PCI */
+
+/* controller protocol functions */
+
+static void gdth_enable_int(gdth_ha_str *ha)
+{
+ unsigned long flags;
+ gdt2_dpram_str __iomem *dp2_ptr;
+ gdt6_dpram_str __iomem *dp6_ptr;
+ gdt6m_dpram_str __iomem *dp6m_ptr;
+
+ TRACE(("gdth_enable_int() hanum %d\n",ha->hanum));
+ spin_lock_irqsave(&ha->smp_lock, flags);
+
+ if (ha->type == GDT_EISA) {
+ outb(0xff, ha->bmic + EDOORREG);
+ outb(0xff, ha->bmic + EDENABREG);
+ outb(0x01, ha->bmic + EINTENABREG);
+ } else if (ha->type == GDT_ISA) {
+ dp2_ptr = ha->brd;
+ writeb(1, &dp2_ptr->io.irqdel);
+ writeb(0, &dp2_ptr->u.ic.Cmd_Index);
+ writeb(1, &dp2_ptr->io.irqen);
+ } else if (ha->type == GDT_PCI) {
+ dp6_ptr = ha->brd;
+ writeb(1, &dp6_ptr->io.irqdel);
+ writeb(0, &dp6_ptr->u.ic.Cmd_Index);
+ writeb(1, &dp6_ptr->io.irqen);
+ } else if (ha->type == GDT_PCINEW) {
+ outb(0xff, PTR2USHORT(&ha->plx->edoor_reg));
+ outb(0x03, PTR2USHORT(&ha->plx->control1));
+ } else if (ha->type == GDT_PCIMPR) {
+ dp6m_ptr = ha->brd;
+ writeb(0xff, &dp6m_ptr->i960r.edoor_reg);
+ writeb(readb(&dp6m_ptr->i960r.edoor_en_reg) & ~4,
+ &dp6m_ptr->i960r.edoor_en_reg);
+ }
+ spin_unlock_irqrestore(&ha->smp_lock, flags);
+}
+
+/* return IStatus if interrupt was from this card else 0 */
+static u8 gdth_get_status(gdth_ha_str *ha)
+{
+ u8 IStatus = 0;
+
+ TRACE(("gdth_get_status() irq %d ctr_count %d\n", ha->irq, gdth_ctr_count));
+
+ if (ha->type == GDT_EISA)
+ IStatus = inb((u16)ha->bmic + EDOORREG);
+ else if (ha->type == GDT_ISA)
+ IStatus =
+ readb(&((gdt2_dpram_str __iomem *)ha->brd)->u.ic.Cmd_Index);
+ else if (ha->type == GDT_PCI)
+ IStatus =
+ readb(&((gdt6_dpram_str __iomem *)ha->brd)->u.ic.Cmd_Index);
+ else if (ha->type == GDT_PCINEW)
+ IStatus = inb(PTR2USHORT(&ha->plx->edoor_reg));
+ else if (ha->type == GDT_PCIMPR)
+ IStatus =
+ readb(&((gdt6m_dpram_str __iomem *)ha->brd)->i960r.edoor_reg);
+
+ return IStatus;
+}
+
+static int gdth_test_busy(gdth_ha_str *ha)
+{
+ register int gdtsema0 = 0;
+
+ TRACE(("gdth_test_busy() hanum %d\n", ha->hanum));
+
+ if (ha->type == GDT_EISA)
+ gdtsema0 = (int)inb(ha->bmic + SEMA0REG);
+ else if (ha->type == GDT_ISA)
+ gdtsema0 = (int)readb(&((gdt2_dpram_str __iomem *)ha->brd)->u.ic.Sema0);
+ else if (ha->type == GDT_PCI)
+ gdtsema0 = (int)readb(&((gdt6_dpram_str __iomem *)ha->brd)->u.ic.Sema0);
+ else if (ha->type == GDT_PCINEW)
+ gdtsema0 = (int)inb(PTR2USHORT(&ha->plx->sema0_reg));
+ else if (ha->type == GDT_PCIMPR)
+ gdtsema0 =
+ (int)readb(&((gdt6m_dpram_str __iomem *)ha->brd)->i960r.sema0_reg);
+
+ return (gdtsema0 & 1);
+}
+
+
+static int gdth_get_cmd_index(gdth_ha_str *ha)
+{
+ int i;
+
+ TRACE(("gdth_get_cmd_index() hanum %d\n", ha->hanum));
+
+ for (i=0; i<GDTH_MAXCMDS; ++i) {
+ if (ha->cmd_tab[i].cmnd == UNUSED_CMND) {
+ ha->cmd_tab[i].cmnd = ha->pccb->RequestBuffer;
+ ha->cmd_tab[i].service = ha->pccb->Service;
+ ha->pccb->CommandIndex = (u32)i+2;
+ return (i+2);
+ }
+ }
+ return 0;
+}
+
+
+static void gdth_set_sema0(gdth_ha_str *ha)
+{
+ TRACE(("gdth_set_sema0() hanum %d\n", ha->hanum));
+
+ if (ha->type == GDT_EISA) {
+ outb(1, ha->bmic + SEMA0REG);
+ } else if (ha->type == GDT_ISA) {
+ writeb(1, &((gdt2_dpram_str __iomem *)ha->brd)->u.ic.Sema0);
+ } else if (ha->type == GDT_PCI) {
+ writeb(1, &((gdt6_dpram_str __iomem *)ha->brd)->u.ic.Sema0);
+ } else if (ha->type == GDT_PCINEW) {
+ outb(1, PTR2USHORT(&ha->plx->sema0_reg));
+ } else if (ha->type == GDT_PCIMPR) {
+ writeb(1, &((gdt6m_dpram_str __iomem *)ha->brd)->i960r.sema0_reg);
+ }
+}
+
+
+static void gdth_copy_command(gdth_ha_str *ha)
+{
+ register gdth_cmd_str *cmd_ptr;
+ register gdt6m_dpram_str __iomem *dp6m_ptr;
+ register gdt6c_dpram_str __iomem *dp6c_ptr;
+ gdt6_dpram_str __iomem *dp6_ptr;
+ gdt2_dpram_str __iomem *dp2_ptr;
+ u16 cp_count,dp_offset,cmd_no;
+
+ TRACE(("gdth_copy_command() hanum %d\n", ha->hanum));
+
+ cp_count = ha->cmd_len;
+ dp_offset= ha->cmd_offs_dpmem;
+ cmd_no = ha->cmd_cnt;
+ cmd_ptr = ha->pccb;
+
+ ++ha->cmd_cnt;
+ if (ha->type == GDT_EISA)
+ return; /* no DPMEM, no copy */
+
+ /* set cpcount dword aligned */
+ if (cp_count & 3)
+ cp_count += (4 - (cp_count & 3));
+
+ ha->cmd_offs_dpmem += cp_count;
+
+ /* set offset and service, copy command to DPMEM */
+ if (ha->type == GDT_ISA) {
+ dp2_ptr = ha->brd;
+ writew(dp_offset + DPMEM_COMMAND_OFFSET,
+ &dp2_ptr->u.ic.comm_queue[cmd_no].offset);
+ writew((u16)cmd_ptr->Service,
+ &dp2_ptr->u.ic.comm_queue[cmd_no].serv_id);
+ memcpy_toio(&dp2_ptr->u.ic.gdt_dpr_cmd[dp_offset],cmd_ptr,cp_count);
+ } else if (ha->type == GDT_PCI) {
+ dp6_ptr = ha->brd;
+ writew(dp_offset + DPMEM_COMMAND_OFFSET,
+ &dp6_ptr->u.ic.comm_queue[cmd_no].offset);
+ writew((u16)cmd_ptr->Service,
+ &dp6_ptr->u.ic.comm_queue[cmd_no].serv_id);
+ memcpy_toio(&dp6_ptr->u.ic.gdt_dpr_cmd[dp_offset],cmd_ptr,cp_count);
+ } else if (ha->type == GDT_PCINEW) {
+ dp6c_ptr = ha->brd;
+ writew(dp_offset + DPMEM_COMMAND_OFFSET,
+ &dp6c_ptr->u.ic.comm_queue[cmd_no].offset);
+ writew((u16)cmd_ptr->Service,
+ &dp6c_ptr->u.ic.comm_queue[cmd_no].serv_id);
+ memcpy_toio(&dp6c_ptr->u.ic.gdt_dpr_cmd[dp_offset],cmd_ptr,cp_count);
+ } else if (ha->type == GDT_PCIMPR) {
+ dp6m_ptr = ha->brd;
+ writew(dp_offset + DPMEM_COMMAND_OFFSET,
+ &dp6m_ptr->u.ic.comm_queue[cmd_no].offset);
+ writew((u16)cmd_ptr->Service,
+ &dp6m_ptr->u.ic.comm_queue[cmd_no].serv_id);
+ memcpy_toio(&dp6m_ptr->u.ic.gdt_dpr_cmd[dp_offset],cmd_ptr,cp_count);
+ }
+}
+
+
+static void gdth_release_event(gdth_ha_str *ha)
+{
+ TRACE(("gdth_release_event() hanum %d\n", ha->hanum));
+
+#ifdef GDTH_STATISTICS
+ {
+ u32 i,j;
+ for (i=0,j=0; j<GDTH_MAXCMDS; ++j) {
+ if (ha->cmd_tab[j].cmnd != UNUSED_CMND)
+ ++i;
+ }
+ if (max_index < i) {
+ max_index = i;
+ TRACE3(("GDT: max_index = %d\n",(u16)i));
+ }
+ }
+#endif
+
+ if (ha->pccb->OpCode == GDT_INIT)
+ ha->pccb->Service |= 0x80;
+
+ if (ha->type == GDT_EISA) {
+ if (ha->pccb->OpCode == GDT_INIT) /* store DMA buffer */
+ outl(ha->ccb_phys, ha->bmic + MAILBOXREG);
+ outb(ha->pccb->Service, ha->bmic + LDOORREG);
+ } else if (ha->type == GDT_ISA) {
+ writeb(0, &((gdt2_dpram_str __iomem *)ha->brd)->io.event);
+ } else if (ha->type == GDT_PCI) {
+ writeb(0, &((gdt6_dpram_str __iomem *)ha->brd)->io.event);
+ } else if (ha->type == GDT_PCINEW) {
+ outb(1, PTR2USHORT(&ha->plx->ldoor_reg));
+ } else if (ha->type == GDT_PCIMPR) {
+ writeb(1, &((gdt6m_dpram_str __iomem *)ha->brd)->i960r.ldoor_reg);
+ }
+}
+
+static int gdth_wait(gdth_ha_str *ha, int index, u32 time)
+{
+ int answer_found = FALSE;
+ int wait_index = 0;
+
+ TRACE(("gdth_wait() hanum %d index %d time %d\n", ha->hanum, index, time));
+
+ if (index == 0)
+ return 1; /* no wait required */
+
+ do {
+ __gdth_interrupt(ha, true, &wait_index);
+ if (wait_index == index) {
+ answer_found = TRUE;
+ break;
+ }
+ gdth_delay(1);
+ } while (--time);
+
+ while (gdth_test_busy(ha))
+ gdth_delay(0);
+
+ return (answer_found);
+}
+
+
+static int gdth_internal_cmd(gdth_ha_str *ha, u8 service, u16 opcode,
+ u32 p1, u64 p2, u64 p3)
+{
+ register gdth_cmd_str *cmd_ptr;
+ int retries,index;
+
+ TRACE2(("gdth_internal_cmd() service %d opcode %d\n",service,opcode));
+
+ cmd_ptr = ha->pccb;
+ memset((char*)cmd_ptr,0,sizeof(gdth_cmd_str));
+
+ /* make command */
+ for (retries = INIT_RETRIES;;) {
+ cmd_ptr->Service = service;
+ cmd_ptr->RequestBuffer = INTERNAL_CMND;
+ if (!(index=gdth_get_cmd_index(ha))) {
+ TRACE(("GDT: No free command index found\n"));
+ return 0;
+ }
+ gdth_set_sema0(ha);
+ cmd_ptr->OpCode = opcode;
+ cmd_ptr->BoardNode = LOCALBOARD;
+ if (service == CACHESERVICE) {
+ if (opcode == GDT_IOCTL) {
+ cmd_ptr->u.ioctl.subfunc = p1;
+ cmd_ptr->u.ioctl.channel = (u32)p2;
+ cmd_ptr->u.ioctl.param_size = (u16)p3;
+ cmd_ptr->u.ioctl.p_param = ha->scratch_phys;
+ } else {
+ if (ha->cache_feat & GDT_64BIT) {
+ cmd_ptr->u.cache64.DeviceNo = (u16)p1;
+ cmd_ptr->u.cache64.BlockNo = p2;
+ } else {
+ cmd_ptr->u.cache.DeviceNo = (u16)p1;
+ cmd_ptr->u.cache.BlockNo = (u32)p2;
+ }
+ }
+ } else if (service == SCSIRAWSERVICE) {
+ if (ha->raw_feat & GDT_64BIT) {
+ cmd_ptr->u.raw64.direction = p1;
+ cmd_ptr->u.raw64.bus = (u8)p2;
+ cmd_ptr->u.raw64.target = (u8)p3;
+ cmd_ptr->u.raw64.lun = (u8)(p3 >> 8);
+ } else {
+ cmd_ptr->u.raw.direction = p1;
+ cmd_ptr->u.raw.bus = (u8)p2;
+ cmd_ptr->u.raw.target = (u8)p3;
+ cmd_ptr->u.raw.lun = (u8)(p3 >> 8);
+ }
+ } else if (service == SCREENSERVICE) {
+ if (opcode == GDT_REALTIME) {
+ *(u32 *)&cmd_ptr->u.screen.su.data[0] = p1;
+ *(u32 *)&cmd_ptr->u.screen.su.data[4] = (u32)p2;
+ *(u32 *)&cmd_ptr->u.screen.su.data[8] = (u32)p3;
+ }
+ }
+ ha->cmd_len = sizeof(gdth_cmd_str);
+ ha->cmd_offs_dpmem = 0;
+ ha->cmd_cnt = 0;
+ gdth_copy_command(ha);
+ gdth_release_event(ha);
+ gdth_delay(20);
+ if (!gdth_wait(ha, index, INIT_TIMEOUT)) {
+ printk("GDT: Initialization error (timeout service %d)\n",service);
+ return 0;
+ }
+ if (ha->status != S_BSY || --retries == 0)
+ break;
+ gdth_delay(1);
+ }
+
+ return (ha->status != S_OK ? 0:1);
+}
+
+
+/* search for devices */
+
+static int gdth_search_drives(gdth_ha_str *ha)
+{
+ u16 cdev_cnt, i;
+ int ok;
+ u32 bus_no, drv_cnt, drv_no, j;
+ gdth_getch_str *chn;
+ gdth_drlist_str *drl;
+ gdth_iochan_str *ioc;
+ gdth_raw_iochan_str *iocr;
+ gdth_arcdl_str *alst;
+ gdth_alist_str *alst2;
+ gdth_oem_str_ioctl *oemstr;
+#ifdef INT_COAL
+ gdth_perf_modes *pmod;
+#endif
+
+#ifdef GDTH_RTC
+ u8 rtc[12];
+ unsigned long flags;
+#endif
+
+ TRACE(("gdth_search_drives() hanum %d\n", ha->hanum));
+ ok = 0;
+
+ /* initialize controller services, at first: screen service */
+ ha->screen_feat = 0;
+ if (!force_dma32) {
+ ok = gdth_internal_cmd(ha, SCREENSERVICE, GDT_X_INIT_SCR, 0, 0, 0);
+ if (ok)
+ ha->screen_feat = GDT_64BIT;
+ }
+ if (force_dma32 || (!ok && ha->status == (u16)S_NOFUNC))
+ ok = gdth_internal_cmd(ha, SCREENSERVICE, GDT_INIT, 0, 0, 0);
+ if (!ok) {
+ printk("GDT-HA %d: Initialization error screen service (code %d)\n",
+ ha->hanum, ha->status);
+ return 0;
+ }
+ TRACE2(("gdth_search_drives(): SCREENSERVICE initialized\n"));
+
+#ifdef GDTH_RTC
+ /* read realtime clock info, send to controller */
+ /* 1. wait for the falling edge of update flag */
+ spin_lock_irqsave(&rtc_lock, flags);
+ for (j = 0; j < 1000000; ++j)
+ if (CMOS_READ(RTC_FREQ_SELECT) & RTC_UIP)
+ break;
+ for (j = 0; j < 1000000; ++j)
+ if (!(CMOS_READ(RTC_FREQ_SELECT) & RTC_UIP))
+ break;
+ /* 2. read info */
+ do {
+ for (j = 0; j < 12; ++j)
+ rtc[j] = CMOS_READ(j);
+ } while (rtc[0] != CMOS_READ(0));
+ spin_unlock_irqrestore(&rtc_lock, flags);
+ TRACE2(("gdth_search_drives(): RTC: %x/%x/%x\n",*(u32 *)&rtc[0],
+ *(u32 *)&rtc[4], *(u32 *)&rtc[8]));
+ /* 3. send to controller firmware */
+ gdth_internal_cmd(ha, SCREENSERVICE, GDT_REALTIME, *(u32 *)&rtc[0],
+ *(u32 *)&rtc[4], *(u32 *)&rtc[8]);
+#endif
+
+ /* unfreeze all IOs */
+ gdth_internal_cmd(ha, CACHESERVICE, GDT_UNFREEZE_IO, 0, 0, 0);
+
+ /* initialize cache service */
+ ha->cache_feat = 0;
+ if (!force_dma32) {
+ ok = gdth_internal_cmd(ha, CACHESERVICE, GDT_X_INIT_HOST, LINUX_OS,
+ 0, 0);
+ if (ok)
+ ha->cache_feat = GDT_64BIT;
+ }
+ if (force_dma32 || (!ok && ha->status == (u16)S_NOFUNC))
+ ok = gdth_internal_cmd(ha, CACHESERVICE, GDT_INIT, LINUX_OS, 0, 0);
+ if (!ok) {
+ printk("GDT-HA %d: Initialization error cache service (code %d)\n",
+ ha->hanum, ha->status);
+ return 0;
+ }
+ TRACE2(("gdth_search_drives(): CACHESERVICE initialized\n"));
+ cdev_cnt = (u16)ha->info;
+ ha->fw_vers = ha->service;
+
+#ifdef INT_COAL
+ if (ha->type == GDT_PCIMPR) {
+ /* set perf. modes */
+ pmod = (gdth_perf_modes *)ha->pscratch;
+ pmod->version = 1;
+ pmod->st_mode = 1; /* enable one status buffer */
+ *((u64 *)&pmod->st_buff_addr1) = ha->coal_stat_phys;
+ pmod->st_buff_indx1 = COALINDEX;
+ pmod->st_buff_addr2 = 0;
+ pmod->st_buff_u_addr2 = 0;
+ pmod->st_buff_indx2 = 0;
+ pmod->st_buff_size = sizeof(gdth_coal_status) * MAXOFFSETS;
+ pmod->cmd_mode = 0; // disable all cmd buffers
+ pmod->cmd_buff_addr1 = 0;
+ pmod->cmd_buff_u_addr1 = 0;
+ pmod->cmd_buff_indx1 = 0;
+ pmod->cmd_buff_addr2 = 0;
+ pmod->cmd_buff_u_addr2 = 0;
+ pmod->cmd_buff_indx2 = 0;
+ pmod->cmd_buff_size = 0;
+ pmod->reserved1 = 0;
+ pmod->reserved2 = 0;
+ if (gdth_internal_cmd(ha, CACHESERVICE, GDT_IOCTL, SET_PERF_MODES,
+ INVALID_CHANNEL,sizeof(gdth_perf_modes))) {
+ printk("GDT-HA %d: Interrupt coalescing activated\n", ha->hanum);
+ }
+ }
+#endif
+
+ /* detect number of buses - try new IOCTL */
+ iocr = (gdth_raw_iochan_str *)ha->pscratch;
+ iocr->hdr.version = 0xffffffff;
+ iocr->hdr.list_entries = MAXBUS;
+ iocr->hdr.first_chan = 0;
+ iocr->hdr.last_chan = MAXBUS-1;
+ iocr->hdr.list_offset = GDTOFFSOF(gdth_raw_iochan_str, list[0]);
+ if (gdth_internal_cmd(ha, CACHESERVICE, GDT_IOCTL, IOCHAN_RAW_DESC,
+ INVALID_CHANNEL,sizeof(gdth_raw_iochan_str))) {
+ TRACE2(("IOCHAN_RAW_DESC supported!\n"));
+ ha->bus_cnt = iocr->hdr.chan_count;
+ for (bus_no = 0; bus_no < ha->bus_cnt; ++bus_no) {
+ if (iocr->list[bus_no].proc_id < MAXID)
+ ha->bus_id[bus_no] = iocr->list[bus_no].proc_id;
+ else
+ ha->bus_id[bus_no] = 0xff;
+ }
+ } else {
+ /* old method */
+ chn = (gdth_getch_str *)ha->pscratch;
+ for (bus_no = 0; bus_no < MAXBUS; ++bus_no) {
+ chn->channel_no = bus_no;
+ if (!gdth_internal_cmd(ha, CACHESERVICE, GDT_IOCTL,
+ SCSI_CHAN_CNT | L_CTRL_PATTERN,
+ IO_CHANNEL | INVALID_CHANNEL,
+ sizeof(gdth_getch_str))) {
+ if (bus_no == 0) {
+ printk("GDT-HA %d: Error detecting channel count (0x%x)\n",
+ ha->hanum, ha->status);
+ return 0;
+ }
+ break;
+ }
+ if (chn->siop_id < MAXID)
+ ha->bus_id[bus_no] = chn->siop_id;
+ else
+ ha->bus_id[bus_no] = 0xff;
+ }
+ ha->bus_cnt = (u8)bus_no;
+ }
+ TRACE2(("gdth_search_drives() %d channels\n",ha->bus_cnt));
+
+ /* read cache configuration */
+ if (!gdth_internal_cmd(ha, CACHESERVICE, GDT_IOCTL, CACHE_INFO,
+ INVALID_CHANNEL,sizeof(gdth_cinfo_str))) {
+ printk("GDT-HA %d: Initialization error cache service (code %d)\n",
+ ha->hanum, ha->status);
+ return 0;
+ }
+ ha->cpar = ((gdth_cinfo_str *)ha->pscratch)->cpar;
+ TRACE2(("gdth_search_drives() cinfo: vs %x sta %d str %d dw %d b %d\n",
+ ha->cpar.version,ha->cpar.state,ha->cpar.strategy,
+ ha->cpar.write_back,ha->cpar.block_size));
+
+ /* read board info and features */
+ ha->more_proc = FALSE;
+ if (gdth_internal_cmd(ha, CACHESERVICE, GDT_IOCTL, BOARD_INFO,
+ INVALID_CHANNEL,sizeof(gdth_binfo_str))) {
+ memcpy(&ha->binfo, (gdth_binfo_str *)ha->pscratch,
+ sizeof(gdth_binfo_str));
+ if (gdth_internal_cmd(ha, CACHESERVICE, GDT_IOCTL, BOARD_FEATURES,
+ INVALID_CHANNEL,sizeof(gdth_bfeat_str))) {
+ TRACE2(("BOARD_INFO/BOARD_FEATURES supported\n"));
+ ha->bfeat = *(gdth_bfeat_str *)ha->pscratch;
+ ha->more_proc = TRUE;
+ }
+ } else {
+ TRACE2(("BOARD_INFO requires firmware >= 1.10/2.08\n"));
+ strcpy(ha->binfo.type_string, gdth_ctr_name(ha));
+ }
+ TRACE2(("Controller name: %s\n",ha->binfo.type_string));
+
+ /* read more informations */
+ if (ha->more_proc) {
+ /* physical drives, channel addresses */
+ ioc = (gdth_iochan_str *)ha->pscratch;
+ ioc->hdr.version = 0xffffffff;
+ ioc->hdr.list_entries = MAXBUS;
+ ioc->hdr.first_chan = 0;
+ ioc->hdr.last_chan = MAXBUS-1;
+ ioc->hdr.list_offset = GDTOFFSOF(gdth_iochan_str, list[0]);
+ if (gdth_internal_cmd(ha, CACHESERVICE, GDT_IOCTL, IOCHAN_DESC,
+ INVALID_CHANNEL,sizeof(gdth_iochan_str))) {
+ for (bus_no = 0; bus_no < ha->bus_cnt; ++bus_no) {
+ ha->raw[bus_no].address = ioc->list[bus_no].address;
+ ha->raw[bus_no].local_no = ioc->list[bus_no].local_no;
+ }
+ } else {
+ for (bus_no = 0; bus_no < ha->bus_cnt; ++bus_no) {
+ ha->raw[bus_no].address = IO_CHANNEL;
+ ha->raw[bus_no].local_no = bus_no;
+ }
+ }
+ for (bus_no = 0; bus_no < ha->bus_cnt; ++bus_no) {
+ chn = (gdth_getch_str *)ha->pscratch;
+ chn->channel_no = ha->raw[bus_no].local_no;
+ if (gdth_internal_cmd(ha, CACHESERVICE, GDT_IOCTL,
+ SCSI_CHAN_CNT | L_CTRL_PATTERN,
+ ha->raw[bus_no].address | INVALID_CHANNEL,
+ sizeof(gdth_getch_str))) {
+ ha->raw[bus_no].pdev_cnt = chn->drive_cnt;
+ TRACE2(("Channel %d: %d phys. drives\n",
+ bus_no,chn->drive_cnt));
+ }
+ if (ha->raw[bus_no].pdev_cnt > 0) {
+ drl = (gdth_drlist_str *)ha->pscratch;
+ drl->sc_no = ha->raw[bus_no].local_no;
+ drl->sc_cnt = ha->raw[bus_no].pdev_cnt;
+ if (gdth_internal_cmd(ha, CACHESERVICE, GDT_IOCTL,
+ SCSI_DR_LIST | L_CTRL_PATTERN,
+ ha->raw[bus_no].address | INVALID_CHANNEL,
+ sizeof(gdth_drlist_str))) {
+ for (j = 0; j < ha->raw[bus_no].pdev_cnt; ++j)
+ ha->raw[bus_no].id_list[j] = drl->sc_list[j];
+ } else {
+ ha->raw[bus_no].pdev_cnt = 0;
+ }
+ }
+ }
+
+ /* logical drives */
+ if (gdth_internal_cmd(ha, CACHESERVICE, GDT_IOCTL, CACHE_DRV_CNT,
+ INVALID_CHANNEL,sizeof(u32))) {
+ drv_cnt = *(u32 *)ha->pscratch;
+ if (gdth_internal_cmd(ha, CACHESERVICE, GDT_IOCTL, CACHE_DRV_LIST,
+ INVALID_CHANNEL,drv_cnt * sizeof(u32))) {
+ for (j = 0; j < drv_cnt; ++j) {
+ drv_no = ((u32 *)ha->pscratch)[j];
+ if (drv_no < MAX_LDRIVES) {
+ ha->hdr[drv_no].is_logdrv = TRUE;
+ TRACE2(("Drive %d is log. drive\n",drv_no));
+ }
+ }
+ }
+ alst = (gdth_arcdl_str *)ha->pscratch;
+ alst->entries_avail = MAX_LDRIVES;
+ alst->first_entry = 0;
+ alst->list_offset = GDTOFFSOF(gdth_arcdl_str, list[0]);
+ if (gdth_internal_cmd(ha, CACHESERVICE, GDT_IOCTL,
+ ARRAY_DRV_LIST2 | LA_CTRL_PATTERN,
+ INVALID_CHANNEL, sizeof(gdth_arcdl_str) +
+ (alst->entries_avail-1) * sizeof(gdth_alist_str))) {
+ for (j = 0; j < alst->entries_init; ++j) {
+ ha->hdr[j].is_arraydrv = alst->list[j].is_arrayd;
+ ha->hdr[j].is_master = alst->list[j].is_master;
+ ha->hdr[j].is_parity = alst->list[j].is_parity;
+ ha->hdr[j].is_hotfix = alst->list[j].is_hotfix;
+ ha->hdr[j].master_no = alst->list[j].cd_handle;
+ }
+ } else if (gdth_internal_cmd(ha, CACHESERVICE, GDT_IOCTL,
+ ARRAY_DRV_LIST | LA_CTRL_PATTERN,
+ 0, 35 * sizeof(gdth_alist_str))) {
+ for (j = 0; j < 35; ++j) {
+ alst2 = &((gdth_alist_str *)ha->pscratch)[j];
+ ha->hdr[j].is_arraydrv = alst2->is_arrayd;
+ ha->hdr[j].is_master = alst2->is_master;
+ ha->hdr[j].is_parity = alst2->is_parity;
+ ha->hdr[j].is_hotfix = alst2->is_hotfix;
+ ha->hdr[j].master_no = alst2->cd_handle;
+ }
+ }
+ }
+ }
+
+ /* initialize raw service */
+ ha->raw_feat = 0;
+ if (!force_dma32) {
+ ok = gdth_internal_cmd(ha, SCSIRAWSERVICE, GDT_X_INIT_RAW, 0, 0, 0);
+ if (ok)
+ ha->raw_feat = GDT_64BIT;
+ }
+ if (force_dma32 || (!ok && ha->status == (u16)S_NOFUNC))
+ ok = gdth_internal_cmd(ha, SCSIRAWSERVICE, GDT_INIT, 0, 0, 0);
+ if (!ok) {
+ printk("GDT-HA %d: Initialization error raw service (code %d)\n",
+ ha->hanum, ha->status);
+ return 0;
+ }
+ TRACE2(("gdth_search_drives(): RAWSERVICE initialized\n"));
+
+ /* set/get features raw service (scatter/gather) */
+ if (gdth_internal_cmd(ha, SCSIRAWSERVICE, GDT_SET_FEAT, SCATTER_GATHER,
+ 0, 0)) {
+ TRACE2(("gdth_search_drives(): set features RAWSERVICE OK\n"));
+ if (gdth_internal_cmd(ha, SCSIRAWSERVICE, GDT_GET_FEAT, 0, 0, 0)) {
+ TRACE2(("gdth_search_dr(): get feat RAWSERVICE %d\n",
+ ha->info));
+ ha->raw_feat |= (u16)ha->info;
+ }
+ }
+
+ /* set/get features cache service (equal to raw service) */
+ if (gdth_internal_cmd(ha, CACHESERVICE, GDT_SET_FEAT, 0,
+ SCATTER_GATHER,0)) {
+ TRACE2(("gdth_search_drives(): set features CACHESERVICE OK\n"));
+ if (gdth_internal_cmd(ha, CACHESERVICE, GDT_GET_FEAT, 0, 0, 0)) {
+ TRACE2(("gdth_search_dr(): get feat CACHESERV. %d\n",
+ ha->info));
+ ha->cache_feat |= (u16)ha->info;
+ }
+ }
+
+ /* reserve drives for raw service */
+ if (reserve_mode != 0) {
+ gdth_internal_cmd(ha, SCSIRAWSERVICE, GDT_RESERVE_ALL,
+ reserve_mode == 1 ? 1 : 3, 0, 0);
+ TRACE2(("gdth_search_drives(): RESERVE_ALL code %d\n",
+ ha->status));
+ }
+ for (i = 0; i < MAX_RES_ARGS; i += 4) {
+ if (reserve_list[i] == ha->hanum && reserve_list[i+1] < ha->bus_cnt &&
+ reserve_list[i+2] < ha->tid_cnt && reserve_list[i+3] < MAXLUN) {
+ TRACE2(("gdth_search_drives(): reserve ha %d bus %d id %d lun %d\n",
+ reserve_list[i], reserve_list[i+1],
+ reserve_list[i+2], reserve_list[i+3]));
+ if (!gdth_internal_cmd(ha, SCSIRAWSERVICE, GDT_RESERVE, 0,
+ reserve_list[i+1], reserve_list[i+2] |
+ (reserve_list[i+3] << 8))) {
+ printk("GDT-HA %d: Error raw service (RESERVE, code %d)\n",
+ ha->hanum, ha->status);
+ }
+ }
+ }
+
+ /* Determine OEM string using IOCTL */
+ oemstr = (gdth_oem_str_ioctl *)ha->pscratch;
+ oemstr->params.ctl_version = 0x01;
+ oemstr->params.buffer_size = sizeof(oemstr->text);
+ if (gdth_internal_cmd(ha, CACHESERVICE, GDT_IOCTL,
+ CACHE_READ_OEM_STRING_RECORD,INVALID_CHANNEL,
+ sizeof(gdth_oem_str_ioctl))) {
+ TRACE2(("gdth_search_drives(): CACHE_READ_OEM_STRING_RECORD OK\n"));
+ printk("GDT-HA %d: Vendor: %s Name: %s\n",
+ ha->hanum, oemstr->text.oem_company_name, ha->binfo.type_string);
+ /* Save the Host Drive inquiry data */
+ strlcpy(ha->oem_name,oemstr->text.scsi_host_drive_inquiry_vendor_id,
+ sizeof(ha->oem_name));
+ } else {
+ /* Old method, based on PCI ID */
+ TRACE2(("gdth_search_drives(): CACHE_READ_OEM_STRING_RECORD failed\n"));
+ printk("GDT-HA %d: Name: %s\n",
+ ha->hanum, ha->binfo.type_string);
+ if (ha->oem_id == OEM_ID_INTEL)
+ strlcpy(ha->oem_name,"Intel ", sizeof(ha->oem_name));
+ else
+ strlcpy(ha->oem_name,"ICP ", sizeof(ha->oem_name));
+ }
+
+ /* scanning for host drives */
+ for (i = 0; i < cdev_cnt; ++i)
+ gdth_analyse_hdrive(ha, i);
+
+ TRACE(("gdth_search_drives() OK\n"));
+ return 1;
+}
+
+static int gdth_analyse_hdrive(gdth_ha_str *ha, u16 hdrive)
+{
+ u32 drv_cyls;
+ int drv_hds, drv_secs;
+
+ TRACE(("gdth_analyse_hdrive() hanum %d drive %d\n", ha->hanum, hdrive));
+ if (hdrive >= MAX_HDRIVES)
+ return 0;
+
+ if (!gdth_internal_cmd(ha, CACHESERVICE, GDT_INFO, hdrive, 0, 0))
+ return 0;
+ ha->hdr[hdrive].present = TRUE;
+ ha->hdr[hdrive].size = ha->info;
+
+ /* evaluate mapping (sectors per head, heads per cylinder) */
+ ha->hdr[hdrive].size &= ~SECS32;
+ if (ha->info2 == 0) {
+ gdth_eval_mapping(ha->hdr[hdrive].size,&drv_cyls,&drv_hds,&drv_secs);
+ } else {
+ drv_hds = ha->info2 & 0xff;
+ drv_secs = (ha->info2 >> 8) & 0xff;
+ drv_cyls = (u32)ha->hdr[hdrive].size / drv_hds / drv_secs;
+ }
+ ha->hdr[hdrive].heads = (u8)drv_hds;
+ ha->hdr[hdrive].secs = (u8)drv_secs;
+ /* round size */
+ ha->hdr[hdrive].size = drv_cyls * drv_hds * drv_secs;
+
+ if (ha->cache_feat & GDT_64BIT) {
+ if (gdth_internal_cmd(ha, CACHESERVICE, GDT_X_INFO, hdrive, 0, 0)
+ && ha->info2 != 0) {
+ ha->hdr[hdrive].size = ((u64)ha->info2 << 32) | ha->info;
+ }
+ }
+ TRACE2(("gdth_search_dr() cdr. %d size %d hds %d scs %d\n",
+ hdrive,ha->hdr[hdrive].size,drv_hds,drv_secs));
+
+ /* get informations about device */
+ if (gdth_internal_cmd(ha, CACHESERVICE, GDT_DEVTYPE, hdrive, 0, 0)) {
+ TRACE2(("gdth_search_dr() cache drive %d devtype %d\n",
+ hdrive,ha->info));
+ ha->hdr[hdrive].devtype = (u16)ha->info;
+ }
+
+ /* cluster info */
+ if (gdth_internal_cmd(ha, CACHESERVICE, GDT_CLUST_INFO, hdrive, 0, 0)) {
+ TRACE2(("gdth_search_dr() cache drive %d cluster info %d\n",
+ hdrive,ha->info));
+ if (!shared_access)
+ ha->hdr[hdrive].cluster_type = (u8)ha->info;
+ }
+
+ /* R/W attributes */
+ if (gdth_internal_cmd(ha, CACHESERVICE, GDT_RW_ATTRIBS, hdrive, 0, 0)) {
+ TRACE2(("gdth_search_dr() cache drive %d r/w attrib. %d\n",
+ hdrive,ha->info));
+ ha->hdr[hdrive].rw_attribs = (u8)ha->info;
+ }
+
+ return 1;
+}
+
+
+/* command queueing/sending functions */
+
+static void gdth_putq(gdth_ha_str *ha, Scsi_Cmnd *scp, u8 priority)
+{
+ struct gdth_cmndinfo *cmndinfo = gdth_cmnd_priv(scp);
+ register Scsi_Cmnd *pscp;
+ register Scsi_Cmnd *nscp;
+ unsigned long flags;
+
+ TRACE(("gdth_putq() priority %d\n",priority));
+ spin_lock_irqsave(&ha->smp_lock, flags);
+
+ if (!cmndinfo->internal_command)
+ cmndinfo->priority = priority;
+
+ if (ha->req_first==NULL) {
+ ha->req_first = scp; /* queue was empty */
+ scp->SCp.ptr = NULL;
+ } else { /* queue not empty */
+ pscp = ha->req_first;
+ nscp = (Scsi_Cmnd *)pscp->SCp.ptr;
+ /* priority: 0-highest,..,0xff-lowest */
+ while (nscp && gdth_cmnd_priv(nscp)->priority <= priority) {
+ pscp = nscp;
+ nscp = (Scsi_Cmnd *)pscp->SCp.ptr;
+ }
+ pscp->SCp.ptr = (char *)scp;
+ scp->SCp.ptr = (char *)nscp;
+ }
+ spin_unlock_irqrestore(&ha->smp_lock, flags);
+
+#ifdef GDTH_STATISTICS
+ flags = 0;
+ for (nscp=ha->req_first; nscp; nscp=(Scsi_Cmnd*)nscp->SCp.ptr)
+ ++flags;
+ if (max_rq < flags) {
+ max_rq = flags;
+ TRACE3(("GDT: max_rq = %d\n",(u16)max_rq));
+ }
+#endif
+}
+
+static void gdth_next(gdth_ha_str *ha)
+{
+ register Scsi_Cmnd *pscp;
+ register Scsi_Cmnd *nscp;
+ u8 b, t, l, firsttime;
+ u8 this_cmd, next_cmd;
+ unsigned long flags = 0;
+ int cmd_index;
+
+ TRACE(("gdth_next() hanum %d\n", ha->hanum));
+ if (!gdth_polling)
+ spin_lock_irqsave(&ha->smp_lock, flags);
+
+ ha->cmd_cnt = ha->cmd_offs_dpmem = 0;
+ this_cmd = firsttime = TRUE;
+ next_cmd = gdth_polling ? FALSE:TRUE;
+ cmd_index = 0;
+
+ for (nscp = pscp = ha->req_first; nscp; nscp = (Scsi_Cmnd *)nscp->SCp.ptr) {
+ struct gdth_cmndinfo *nscp_cmndinfo = gdth_cmnd_priv(nscp);
+ if (nscp != pscp && nscp != (Scsi_Cmnd *)pscp->SCp.ptr)
+ pscp = (Scsi_Cmnd *)pscp->SCp.ptr;
+ if (!nscp_cmndinfo->internal_command) {
+ b = nscp->device->channel;
+ t = nscp->device->id;
+ l = nscp->device->lun;
+ if (nscp_cmndinfo->priority >= DEFAULT_PRI) {
+ if ((b != ha->virt_bus && ha->raw[BUS_L2P(ha,b)].lock) ||
+ (b == ha->virt_bus && t < MAX_HDRIVES && ha->hdr[t].lock))
+ continue;
+ }
+ } else
+ b = t = l = 0;
+
+ if (firsttime) {
+ if (gdth_test_busy(ha)) { /* controller busy ? */
+ TRACE(("gdth_next() controller %d busy !\n", ha->hanum));
+ if (!gdth_polling) {
+ spin_unlock_irqrestore(&ha->smp_lock, flags);
+ return;
+ }
+ while (gdth_test_busy(ha))
+ gdth_delay(1);
+ }
+ firsttime = FALSE;
+ }
+
+ if (!nscp_cmndinfo->internal_command) {
+ if (nscp_cmndinfo->phase == -1) {
+ nscp_cmndinfo->phase = CACHESERVICE; /* default: cache svc. */
+ if (nscp->cmnd[0] == TEST_UNIT_READY) {
+ TRACE2(("TEST_UNIT_READY Bus %d Id %d LUN %d\n",
+ b, t, l));
+ /* TEST_UNIT_READY -> set scan mode */
+ if ((ha->scan_mode & 0x0f) == 0) {
+ if (b == 0 && t == 0 && l == 0) {
+ ha->scan_mode |= 1;
+ TRACE2(("Scan mode: 0x%x\n", ha->scan_mode));
+ }
+ } else if ((ha->scan_mode & 0x0f) == 1) {
+ if (b == 0 && ((t == 0 && l == 1) ||
+ (t == 1 && l == 0))) {
+ nscp_cmndinfo->OpCode = GDT_SCAN_START;
+ nscp_cmndinfo->phase = ((ha->scan_mode & 0x10 ? 1:0) << 8)
+ | SCSIRAWSERVICE;
+ ha->scan_mode = 0x12;
+ TRACE2(("Scan mode: 0x%x (SCAN_START)\n",
+ ha->scan_mode));
+ } else {
+ ha->scan_mode &= 0x10;
+ TRACE2(("Scan mode: 0x%x\n", ha->scan_mode));
+ }
+ } else if (ha->scan_mode == 0x12) {
+ if (b == ha->bus_cnt && t == ha->tid_cnt-1) {
+ nscp_cmndinfo->phase = SCSIRAWSERVICE;
+ nscp_cmndinfo->OpCode = GDT_SCAN_END;
+ ha->scan_mode &= 0x10;
+ TRACE2(("Scan mode: 0x%x (SCAN_END)\n",
+ ha->scan_mode));
+ }
+ }
+ }
+ if (b == ha->virt_bus && nscp->cmnd[0] != INQUIRY &&
+ nscp->cmnd[0] != READ_CAPACITY && nscp->cmnd[0] != MODE_SENSE &&
+ (ha->hdr[t].cluster_type & CLUSTER_DRIVE)) {
+ /* always GDT_CLUST_INFO! */
+ nscp_cmndinfo->OpCode = GDT_CLUST_INFO;
+ }
+ }
+ }
+
+ if (nscp_cmndinfo->OpCode != -1) {
+ if ((nscp_cmndinfo->phase & 0xff) == CACHESERVICE) {
+ if (!(cmd_index=gdth_fill_cache_cmd(ha, nscp, t)))
+ this_cmd = FALSE;
+ next_cmd = FALSE;
+ } else if ((nscp_cmndinfo->phase & 0xff) == SCSIRAWSERVICE) {
+ if (!(cmd_index=gdth_fill_raw_cmd(ha, nscp, BUS_L2P(ha, b))))
+ this_cmd = FALSE;
+ next_cmd = FALSE;
+ } else {
+ memset((char*)nscp->sense_buffer,0,16);
+ nscp->sense_buffer[0] = 0x70;
+ nscp->sense_buffer[2] = NOT_READY;
+ nscp->result = (DID_OK << 16) | (CHECK_CONDITION << 1);
+ if (!nscp_cmndinfo->wait_for_completion)
+ nscp_cmndinfo->wait_for_completion++;
+ else
+ gdth_scsi_done(nscp);
+ }
+ } else if (gdth_cmnd_priv(nscp)->internal_command) {
+ if (!(cmd_index=gdth_special_cmd(ha, nscp)))
+ this_cmd = FALSE;
+ next_cmd = FALSE;
+ } else if (b != ha->virt_bus) {
+ if (ha->raw[BUS_L2P(ha,b)].io_cnt[t] >= GDTH_MAX_RAW ||
+ !(cmd_index=gdth_fill_raw_cmd(ha, nscp, BUS_L2P(ha, b))))
+ this_cmd = FALSE;
+ else
+ ha->raw[BUS_L2P(ha,b)].io_cnt[t]++;
+ } else if (t >= MAX_HDRIVES || !ha->hdr[t].present || l != 0) {
+ TRACE2(("Command 0x%x to bus %d id %d lun %d -> IGNORE\n",
+ nscp->cmnd[0], b, t, l));
+ nscp->result = DID_BAD_TARGET << 16;
+ if (!nscp_cmndinfo->wait_for_completion)
+ nscp_cmndinfo->wait_for_completion++;
+ else
+ gdth_scsi_done(nscp);
+ } else {
+ switch (nscp->cmnd[0]) {
+ case TEST_UNIT_READY:
+ case INQUIRY:
+ case REQUEST_SENSE:
+ case READ_CAPACITY:
+ case VERIFY:
+ case START_STOP:
+ case MODE_SENSE:
+ case SERVICE_ACTION_IN_16:
+ TRACE(("cache cmd %x/%x/%x/%x/%x/%x\n",nscp->cmnd[0],
+ nscp->cmnd[1],nscp->cmnd[2],nscp->cmnd[3],
+ nscp->cmnd[4],nscp->cmnd[5]));
+ if (ha->hdr[t].media_changed && nscp->cmnd[0] != INQUIRY) {
+ /* return UNIT_ATTENTION */
+ TRACE2(("cmd 0x%x target %d: UNIT_ATTENTION\n",
+ nscp->cmnd[0], t));
+ ha->hdr[t].media_changed = FALSE;
+ memset((char*)nscp->sense_buffer,0,16);
+ nscp->sense_buffer[0] = 0x70;
+ nscp->sense_buffer[2] = UNIT_ATTENTION;
+ nscp->result = (DID_OK << 16) | (CHECK_CONDITION << 1);
+ if (!nscp_cmndinfo->wait_for_completion)
+ nscp_cmndinfo->wait_for_completion++;
+ else
+ gdth_scsi_done(nscp);
+ } else if (gdth_internal_cache_cmd(ha, nscp))
+ gdth_scsi_done(nscp);
+ break;
+
+ case ALLOW_MEDIUM_REMOVAL:
+ TRACE(("cache cmd %x/%x/%x/%x/%x/%x\n",nscp->cmnd[0],
+ nscp->cmnd[1],nscp->cmnd[2],nscp->cmnd[3],
+ nscp->cmnd[4],nscp->cmnd[5]));
+ if ( (nscp->cmnd[4]&1) && !(ha->hdr[t].devtype&1) ) {
+ TRACE(("Prevent r. nonremov. drive->do nothing\n"));
+ nscp->result = DID_OK << 16;
+ nscp->sense_buffer[0] = 0;
+ if (!nscp_cmndinfo->wait_for_completion)
+ nscp_cmndinfo->wait_for_completion++;
+ else
+ gdth_scsi_done(nscp);
+ } else {
+ nscp->cmnd[3] = (ha->hdr[t].devtype&1) ? 1:0;
+ TRACE(("Prevent/allow r. %d rem. drive %d\n",
+ nscp->cmnd[4],nscp->cmnd[3]));
+ if (!(cmd_index=gdth_fill_cache_cmd(ha, nscp, t)))
+ this_cmd = FALSE;
+ }
+ break;
+
+ case RESERVE:
+ case RELEASE:
+ TRACE2(("cache cmd %s\n",nscp->cmnd[0] == RESERVE ?
+ "RESERVE" : "RELEASE"));
+ if (!(cmd_index=gdth_fill_cache_cmd(ha, nscp, t)))
+ this_cmd = FALSE;
+ break;
+
+ case READ_6:
+ case WRITE_6:
+ case READ_10:
+ case WRITE_10:
+ case READ_16:
+ case WRITE_16:
+ if (ha->hdr[t].media_changed) {
+ /* return UNIT_ATTENTION */
+ TRACE2(("cmd 0x%x target %d: UNIT_ATTENTION\n",
+ nscp->cmnd[0], t));
+ ha->hdr[t].media_changed = FALSE;
+ memset((char*)nscp->sense_buffer,0,16);
+ nscp->sense_buffer[0] = 0x70;
+ nscp->sense_buffer[2] = UNIT_ATTENTION;
+ nscp->result = (DID_OK << 16) | (CHECK_CONDITION << 1);
+ if (!nscp_cmndinfo->wait_for_completion)
+ nscp_cmndinfo->wait_for_completion++;
+ else
+ gdth_scsi_done(nscp);
+ } else if (!(cmd_index=gdth_fill_cache_cmd(ha, nscp, t)))
+ this_cmd = FALSE;
+ break;
+
+ default:
+ TRACE2(("cache cmd %x/%x/%x/%x/%x/%x unknown\n",nscp->cmnd[0],
+ nscp->cmnd[1],nscp->cmnd[2],nscp->cmnd[3],
+ nscp->cmnd[4],nscp->cmnd[5]));
+ printk("GDT-HA %d: Unknown SCSI command 0x%x to cache service !\n",
+ ha->hanum, nscp->cmnd[0]);
+ nscp->result = DID_ABORT << 16;
+ if (!nscp_cmndinfo->wait_for_completion)
+ nscp_cmndinfo->wait_for_completion++;
+ else
+ gdth_scsi_done(nscp);
+ break;
+ }
+ }
+
+ if (!this_cmd)
+ break;
+ if (nscp == ha->req_first)
+ ha->req_first = pscp = (Scsi_Cmnd *)nscp->SCp.ptr;
+ else
+ pscp->SCp.ptr = nscp->SCp.ptr;
+ if (!next_cmd)
+ break;
+ }
+
+ if (ha->cmd_cnt > 0) {
+ gdth_release_event(ha);
+ }
+
+ if (!gdth_polling)
+ spin_unlock_irqrestore(&ha->smp_lock, flags);
+
+ if (gdth_polling && ha->cmd_cnt > 0) {
+ if (!gdth_wait(ha, cmd_index, POLL_TIMEOUT))
+ printk("GDT-HA %d: Command %d timed out !\n",
+ ha->hanum, cmd_index);
+ }
+}
+
+/*
+ * gdth_copy_internal_data() - copy to/from a buffer onto a scsi_cmnd's
+ * buffers, kmap_atomic() as needed.
+ */
+static void gdth_copy_internal_data(gdth_ha_str *ha, Scsi_Cmnd *scp,
+ char *buffer, u16 count)
+{
+ u16 cpcount,i, max_sg = scsi_sg_count(scp);
+ u16 cpsum,cpnow;
+ struct scatterlist *sl;
+ char *address;
+
+ cpcount = min_t(u16, count, scsi_bufflen(scp));
+
+ if (cpcount) {
+ cpsum=0;
+ scsi_for_each_sg(scp, sl, max_sg, i) {
+ unsigned long flags;
+ cpnow = (u16)sl->length;
+ TRACE(("copy_internal() now %d sum %d count %d %d\n",
+ cpnow, cpsum, cpcount, scsi_bufflen(scp)));
+ if (cpsum+cpnow > cpcount)
+ cpnow = cpcount - cpsum;
+ cpsum += cpnow;
+ if (!sg_page(sl)) {
+ printk("GDT-HA %d: invalid sc/gt element in gdth_copy_internal_data()\n",
+ ha->hanum);
+ return;
+ }
+ local_irq_save(flags);
+ address = kmap_atomic(sg_page(sl)) + sl->offset;
+ memcpy(address, buffer, cpnow);
+ flush_dcache_page(sg_page(sl));
+ kunmap_atomic(address);
+ local_irq_restore(flags);
+ if (cpsum == cpcount)
+ break;
+ buffer += cpnow;
+ }
+ } else if (count) {
+ printk("GDT-HA %d: SCSI command with no buffers but data transfer expected!\n",
+ ha->hanum);
+ WARN_ON(1);
+ }
+}
+
+static int gdth_internal_cache_cmd(gdth_ha_str *ha, Scsi_Cmnd *scp)
+{
+ u8 t;
+ gdth_inq_data inq;
+ gdth_rdcap_data rdc;
+ gdth_sense_data sd;
+ gdth_modep_data mpd;
+ struct gdth_cmndinfo *cmndinfo = gdth_cmnd_priv(scp);
+
+ t = scp->device->id;
+ TRACE(("gdth_internal_cache_cmd() cmd 0x%x hdrive %d\n",
+ scp->cmnd[0],t));
+
+ scp->result = DID_OK << 16;
+ scp->sense_buffer[0] = 0;
+
+ switch (scp->cmnd[0]) {
+ case TEST_UNIT_READY:
+ case VERIFY:
+ case START_STOP:
+ TRACE2(("Test/Verify/Start hdrive %d\n",t));
+ break;
+
+ case INQUIRY:
+ TRACE2(("Inquiry hdrive %d devtype %d\n",
+ t,ha->hdr[t].devtype));
+ inq.type_qual = (ha->hdr[t].devtype&4) ? TYPE_ROM:TYPE_DISK;
+ /* you can here set all disks to removable, if you want to do
+ a flush using the ALLOW_MEDIUM_REMOVAL command */
+ inq.modif_rmb = 0x00;
+ if ((ha->hdr[t].devtype & 1) ||
+ (ha->hdr[t].cluster_type & CLUSTER_DRIVE))
+ inq.modif_rmb = 0x80;
+ inq.version = 2;
+ inq.resp_aenc = 2;
+ inq.add_length= 32;
+ strcpy(inq.vendor,ha->oem_name);
+ sprintf(inq.product,"Host Drive #%02d",t);
+ strcpy(inq.revision," ");
+ gdth_copy_internal_data(ha, scp, (char*)&inq, sizeof(gdth_inq_data));
+ break;
+
+ case REQUEST_SENSE:
+ TRACE2(("Request sense hdrive %d\n",t));
+ sd.errorcode = 0x70;
+ sd.segno = 0x00;
+ sd.key = NO_SENSE;
+ sd.info = 0;
+ sd.add_length= 0;
+ gdth_copy_internal_data(ha, scp, (char*)&sd, sizeof(gdth_sense_data));
+ break;
+
+ case MODE_SENSE:
+ TRACE2(("Mode sense hdrive %d\n",t));
+ memset((char*)&mpd,0,sizeof(gdth_modep_data));
+ mpd.hd.data_length = sizeof(gdth_modep_data);
+ mpd.hd.dev_par = (ha->hdr[t].devtype&2) ? 0x80:0;
+ mpd.hd.bd_length = sizeof(mpd.bd);
+ mpd.bd.block_length[0] = (SECTOR_SIZE & 0x00ff0000) >> 16;
+ mpd.bd.block_length[1] = (SECTOR_SIZE & 0x0000ff00) >> 8;
+ mpd.bd.block_length[2] = (SECTOR_SIZE & 0x000000ff);
+ gdth_copy_internal_data(ha, scp, (char*)&mpd, sizeof(gdth_modep_data));
+ break;
+
+ case READ_CAPACITY:
+ TRACE2(("Read capacity hdrive %d\n",t));
+ if (ha->hdr[t].size > (u64)0xffffffff)
+ rdc.last_block_no = 0xffffffff;
+ else
+ rdc.last_block_no = cpu_to_be32(ha->hdr[t].size-1);
+ rdc.block_length = cpu_to_be32(SECTOR_SIZE);
+ gdth_copy_internal_data(ha, scp, (char*)&rdc, sizeof(gdth_rdcap_data));
+ break;
+
+ case SERVICE_ACTION_IN_16:
+ if ((scp->cmnd[1] & 0x1f) == SAI_READ_CAPACITY_16 &&
+ (ha->cache_feat & GDT_64BIT)) {
+ gdth_rdcap16_data rdc16;
+
+ TRACE2(("Read capacity (16) hdrive %d\n",t));
+ rdc16.last_block_no = cpu_to_be64(ha->hdr[t].size-1);
+ rdc16.block_length = cpu_to_be32(SECTOR_SIZE);
+ gdth_copy_internal_data(ha, scp, (char*)&rdc16,
+ sizeof(gdth_rdcap16_data));
+ } else {
+ scp->result = DID_ABORT << 16;
+ }
+ break;
+
+ default:
+ TRACE2(("Internal cache cmd 0x%x unknown\n",scp->cmnd[0]));
+ break;
+ }
+
+ if (!cmndinfo->wait_for_completion)
+ cmndinfo->wait_for_completion++;
+ else
+ return 1;
+
+ return 0;
+}
+
+static int gdth_fill_cache_cmd(gdth_ha_str *ha, Scsi_Cmnd *scp, u16 hdrive)
+{
+ register gdth_cmd_str *cmdp;
+ struct gdth_cmndinfo *cmndinfo = gdth_cmnd_priv(scp);
+ u32 cnt, blockcnt;
+ u64 no, blockno;
+ int i, cmd_index, read_write, sgcnt, mode64;
+
+ cmdp = ha->pccb;
+ TRACE(("gdth_fill_cache_cmd() cmd 0x%x cmdsize %d hdrive %d\n",
+ scp->cmnd[0],scp->cmd_len,hdrive));
+
+ if (ha->type==GDT_EISA && ha->cmd_cnt>0)
+ return 0;
+
+ mode64 = (ha->cache_feat & GDT_64BIT) ? TRUE : FALSE;
+ /* test for READ_16, WRITE_16 if !mode64 ? ---
+ not required, should not occur due to error return on
+ READ_CAPACITY_16 */
+
+ cmdp->Service = CACHESERVICE;
+ cmdp->RequestBuffer = scp;
+ /* search free command index */
+ if (!(cmd_index=gdth_get_cmd_index(ha))) {
+ TRACE(("GDT: No free command index found\n"));
+ return 0;
+ }
+ /* if it's the first command, set command semaphore */
+ if (ha->cmd_cnt == 0)
+ gdth_set_sema0(ha);
+
+ /* fill command */
+ read_write = 0;
+ if (cmndinfo->OpCode != -1)
+ cmdp->OpCode = cmndinfo->OpCode; /* special cache cmd. */
+ else if (scp->cmnd[0] == RESERVE)
+ cmdp->OpCode = GDT_RESERVE_DRV;
+ else if (scp->cmnd[0] == RELEASE)
+ cmdp->OpCode = GDT_RELEASE_DRV;
+ else if (scp->cmnd[0] == ALLOW_MEDIUM_REMOVAL) {
+ if (scp->cmnd[4] & 1) /* prevent ? */
+ cmdp->OpCode = GDT_MOUNT;
+ else if (scp->cmnd[3] & 1) /* removable drive ? */
+ cmdp->OpCode = GDT_UNMOUNT;
+ else
+ cmdp->OpCode = GDT_FLUSH;
+ } else if (scp->cmnd[0] == WRITE_6 || scp->cmnd[0] == WRITE_10 ||
+ scp->cmnd[0] == WRITE_12 || scp->cmnd[0] == WRITE_16
+ ) {
+ read_write = 1;
+ if (gdth_write_through || ((ha->hdr[hdrive].rw_attribs & 1) &&
+ (ha->cache_feat & GDT_WR_THROUGH)))
+ cmdp->OpCode = GDT_WRITE_THR;
+ else
+ cmdp->OpCode = GDT_WRITE;
+ } else {
+ read_write = 2;
+ cmdp->OpCode = GDT_READ;
+ }
+
+ cmdp->BoardNode = LOCALBOARD;
+ if (mode64) {
+ cmdp->u.cache64.DeviceNo = hdrive;
+ cmdp->u.cache64.BlockNo = 1;
+ cmdp->u.cache64.sg_canz = 0;
+ } else {
+ cmdp->u.cache.DeviceNo = hdrive;
+ cmdp->u.cache.BlockNo = 1;
+ cmdp->u.cache.sg_canz = 0;
+ }
+
+ if (read_write) {
+ if (scp->cmd_len == 16) {
+ memcpy(&no, &scp->cmnd[2], sizeof(u64));
+ blockno = be64_to_cpu(no);
+ memcpy(&cnt, &scp->cmnd[10], sizeof(u32));
+ blockcnt = be32_to_cpu(cnt);
+ } else if (scp->cmd_len == 10) {
+ memcpy(&no, &scp->cmnd[2], sizeof(u32));
+ blockno = be32_to_cpu(no);
+ memcpy(&cnt, &scp->cmnd[7], sizeof(u16));
+ blockcnt = be16_to_cpu(cnt);
+ } else {
+ memcpy(&no, &scp->cmnd[0], sizeof(u32));
+ blockno = be32_to_cpu(no) & 0x001fffffUL;
+ blockcnt= scp->cmnd[4]==0 ? 0x100 : scp->cmnd[4];
+ }
+ if (mode64) {
+ cmdp->u.cache64.BlockNo = blockno;
+ cmdp->u.cache64.BlockCnt = blockcnt;
+ } else {
+ cmdp->u.cache.BlockNo = (u32)blockno;
+ cmdp->u.cache.BlockCnt = blockcnt;
+ }
+
+ if (scsi_bufflen(scp)) {
+ cmndinfo->dma_dir = (read_write == 1 ?
+ PCI_DMA_TODEVICE : PCI_DMA_FROMDEVICE);
+ sgcnt = pci_map_sg(ha->pdev, scsi_sglist(scp), scsi_sg_count(scp),
+ cmndinfo->dma_dir);
+ if (mode64) {
+ struct scatterlist *sl;
+
+ cmdp->u.cache64.DestAddr= (u64)-1;
+ cmdp->u.cache64.sg_canz = sgcnt;
+ scsi_for_each_sg(scp, sl, sgcnt, i) {
+ cmdp->u.cache64.sg_lst[i].sg_ptr = sg_dma_address(sl);
+#ifdef GDTH_DMA_STATISTICS
+ if (cmdp->u.cache64.sg_lst[i].sg_ptr > (u64)0xffffffff)
+ ha->dma64_cnt++;
+ else
+ ha->dma32_cnt++;
+#endif
+ cmdp->u.cache64.sg_lst[i].sg_len = sg_dma_len(sl);
+ }
+ } else {
+ struct scatterlist *sl;
+
+ cmdp->u.cache.DestAddr= 0xffffffff;
+ cmdp->u.cache.sg_canz = sgcnt;
+ scsi_for_each_sg(scp, sl, sgcnt, i) {
+ cmdp->u.cache.sg_lst[i].sg_ptr = sg_dma_address(sl);
+#ifdef GDTH_DMA_STATISTICS
+ ha->dma32_cnt++;
+#endif
+ cmdp->u.cache.sg_lst[i].sg_len = sg_dma_len(sl);
+ }
+ }
+
+#ifdef GDTH_STATISTICS
+ if (max_sg < (u32)sgcnt) {
+ max_sg = (u32)sgcnt;
+ TRACE3(("GDT: max_sg = %d\n",max_sg));
+ }
+#endif
+
+ }
+ }
+ /* evaluate command size, check space */
+ if (mode64) {
+ TRACE(("cache cmd: addr. %x sganz %x sgptr0 %x sglen0 %x\n",
+ cmdp->u.cache64.DestAddr,cmdp->u.cache64.sg_canz,
+ cmdp->u.cache64.sg_lst[0].sg_ptr,
+ cmdp->u.cache64.sg_lst[0].sg_len));
+ TRACE(("cache cmd: cmd %d blockno. %d, blockcnt %d\n",
+ cmdp->OpCode,cmdp->u.cache64.BlockNo,cmdp->u.cache64.BlockCnt));
+ ha->cmd_len = GDTOFFSOF(gdth_cmd_str,u.cache64.sg_lst) +
+ (u16)cmdp->u.cache64.sg_canz * sizeof(gdth_sg64_str);
+ } else {
+ TRACE(("cache cmd: addr. %x sganz %x sgptr0 %x sglen0 %x\n",
+ cmdp->u.cache.DestAddr,cmdp->u.cache.sg_canz,
+ cmdp->u.cache.sg_lst[0].sg_ptr,
+ cmdp->u.cache.sg_lst[0].sg_len));
+ TRACE(("cache cmd: cmd %d blockno. %d, blockcnt %d\n",
+ cmdp->OpCode,cmdp->u.cache.BlockNo,cmdp->u.cache.BlockCnt));
+ ha->cmd_len = GDTOFFSOF(gdth_cmd_str,u.cache.sg_lst) +
+ (u16)cmdp->u.cache.sg_canz * sizeof(gdth_sg_str);
+ }
+ if (ha->cmd_len & 3)
+ ha->cmd_len += (4 - (ha->cmd_len & 3));
+
+ if (ha->cmd_cnt > 0) {
+ if ((ha->cmd_offs_dpmem + ha->cmd_len + DPMEM_COMMAND_OFFSET) >
+ ha->ic_all_size) {
+ TRACE2(("gdth_fill_cache() DPMEM overflow\n"));
+ ha->cmd_tab[cmd_index-2].cmnd = UNUSED_CMND;
+ return 0;
+ }
+ }
+
+ /* copy command */
+ gdth_copy_command(ha);
+ return cmd_index;
+}
+
+static int gdth_fill_raw_cmd(gdth_ha_str *ha, Scsi_Cmnd *scp, u8 b)
+{
+ register gdth_cmd_str *cmdp;
+ u16 i;
+ dma_addr_t sense_paddr;
+ int cmd_index, sgcnt, mode64;
+ u8 t,l;
+ struct page *page;
+ unsigned long offset;
+ struct gdth_cmndinfo *cmndinfo;
+
+ t = scp->device->id;
+ l = scp->device->lun;
+ cmdp = ha->pccb;
+ TRACE(("gdth_fill_raw_cmd() cmd 0x%x bus %d ID %d LUN %d\n",
+ scp->cmnd[0],b,t,l));
+
+ if (ha->type==GDT_EISA && ha->cmd_cnt>0)
+ return 0;
+
+ mode64 = (ha->raw_feat & GDT_64BIT) ? TRUE : FALSE;
+
+ cmdp->Service = SCSIRAWSERVICE;
+ cmdp->RequestBuffer = scp;
+ /* search free command index */
+ if (!(cmd_index=gdth_get_cmd_index(ha))) {
+ TRACE(("GDT: No free command index found\n"));
+ return 0;
+ }
+ /* if it's the first command, set command semaphore */
+ if (ha->cmd_cnt == 0)
+ gdth_set_sema0(ha);
+
+ cmndinfo = gdth_cmnd_priv(scp);
+ /* fill command */
+ if (cmndinfo->OpCode != -1) {
+ cmdp->OpCode = cmndinfo->OpCode; /* special raw cmd. */
+ cmdp->BoardNode = LOCALBOARD;
+ if (mode64) {
+ cmdp->u.raw64.direction = (cmndinfo->phase >> 8);
+ TRACE2(("special raw cmd 0x%x param 0x%x\n",
+ cmdp->OpCode, cmdp->u.raw64.direction));
+ /* evaluate command size */
+ ha->cmd_len = GDTOFFSOF(gdth_cmd_str,u.raw64.sg_lst);
+ } else {
+ cmdp->u.raw.direction = (cmndinfo->phase >> 8);
+ TRACE2(("special raw cmd 0x%x param 0x%x\n",
+ cmdp->OpCode, cmdp->u.raw.direction));
+ /* evaluate command size */
+ ha->cmd_len = GDTOFFSOF(gdth_cmd_str,u.raw.sg_lst);
+ }
+
+ } else {
+ page = virt_to_page(scp->sense_buffer);
+ offset = (unsigned long)scp->sense_buffer & ~PAGE_MASK;
+ sense_paddr = pci_map_page(ha->pdev,page,offset,
+ 16,PCI_DMA_FROMDEVICE);
+
+ cmndinfo->sense_paddr = sense_paddr;
+ cmdp->OpCode = GDT_WRITE; /* always */
+ cmdp->BoardNode = LOCALBOARD;
+ if (mode64) {
+ cmdp->u.raw64.reserved = 0;
+ cmdp->u.raw64.mdisc_time = 0;
+ cmdp->u.raw64.mcon_time = 0;
+ cmdp->u.raw64.clen = scp->cmd_len;
+ cmdp->u.raw64.target = t;
+ cmdp->u.raw64.lun = l;
+ cmdp->u.raw64.bus = b;
+ cmdp->u.raw64.priority = 0;
+ cmdp->u.raw64.sdlen = scsi_bufflen(scp);
+ cmdp->u.raw64.sense_len = 16;
+ cmdp->u.raw64.sense_data = sense_paddr;
+ cmdp->u.raw64.direction =
+ gdth_direction_tab[scp->cmnd[0]]==DOU ? GDTH_DATA_OUT:GDTH_DATA_IN;
+ memcpy(cmdp->u.raw64.cmd,scp->cmnd,16);
+ cmdp->u.raw64.sg_ranz = 0;
+ } else {
+ cmdp->u.raw.reserved = 0;
+ cmdp->u.raw.mdisc_time = 0;
+ cmdp->u.raw.mcon_time = 0;
+ cmdp->u.raw.clen = scp->cmd_len;
+ cmdp->u.raw.target = t;
+ cmdp->u.raw.lun = l;
+ cmdp->u.raw.bus = b;
+ cmdp->u.raw.priority = 0;
+ cmdp->u.raw.link_p = 0;
+ cmdp->u.raw.sdlen = scsi_bufflen(scp);
+ cmdp->u.raw.sense_len = 16;
+ cmdp->u.raw.sense_data = sense_paddr;
+ cmdp->u.raw.direction =
+ gdth_direction_tab[scp->cmnd[0]]==DOU ? GDTH_DATA_OUT:GDTH_DATA_IN;
+ memcpy(cmdp->u.raw.cmd,scp->cmnd,12);
+ cmdp->u.raw.sg_ranz = 0;
+ }
+
+ if (scsi_bufflen(scp)) {
+ cmndinfo->dma_dir = PCI_DMA_BIDIRECTIONAL;
+ sgcnt = pci_map_sg(ha->pdev, scsi_sglist(scp), scsi_sg_count(scp),
+ cmndinfo->dma_dir);
+ if (mode64) {
+ struct scatterlist *sl;
+
+ cmdp->u.raw64.sdata = (u64)-1;
+ cmdp->u.raw64.sg_ranz = sgcnt;
+ scsi_for_each_sg(scp, sl, sgcnt, i) {
+ cmdp->u.raw64.sg_lst[i].sg_ptr = sg_dma_address(sl);
+#ifdef GDTH_DMA_STATISTICS
+ if (cmdp->u.raw64.sg_lst[i].sg_ptr > (u64)0xffffffff)
+ ha->dma64_cnt++;
+ else
+ ha->dma32_cnt++;
+#endif
+ cmdp->u.raw64.sg_lst[i].sg_len = sg_dma_len(sl);
+ }
+ } else {
+ struct scatterlist *sl;
+
+ cmdp->u.raw.sdata = 0xffffffff;
+ cmdp->u.raw.sg_ranz = sgcnt;
+ scsi_for_each_sg(scp, sl, sgcnt, i) {
+ cmdp->u.raw.sg_lst[i].sg_ptr = sg_dma_address(sl);
+#ifdef GDTH_DMA_STATISTICS
+ ha->dma32_cnt++;
+#endif
+ cmdp->u.raw.sg_lst[i].sg_len = sg_dma_len(sl);
+ }
+ }
+
+#ifdef GDTH_STATISTICS
+ if (max_sg < sgcnt) {
+ max_sg = sgcnt;
+ TRACE3(("GDT: max_sg = %d\n",sgcnt));
+ }
+#endif
+
+ }
+ if (mode64) {
+ TRACE(("raw cmd: addr. %x sganz %x sgptr0 %x sglen0 %x\n",
+ cmdp->u.raw64.sdata,cmdp->u.raw64.sg_ranz,
+ cmdp->u.raw64.sg_lst[0].sg_ptr,
+ cmdp->u.raw64.sg_lst[0].sg_len));
+ /* evaluate command size */
+ ha->cmd_len = GDTOFFSOF(gdth_cmd_str,u.raw64.sg_lst) +
+ (u16)cmdp->u.raw64.sg_ranz * sizeof(gdth_sg64_str);
+ } else {
+ TRACE(("raw cmd: addr. %x sganz %x sgptr0 %x sglen0 %x\n",
+ cmdp->u.raw.sdata,cmdp->u.raw.sg_ranz,
+ cmdp->u.raw.sg_lst[0].sg_ptr,
+ cmdp->u.raw.sg_lst[0].sg_len));
+ /* evaluate command size */
+ ha->cmd_len = GDTOFFSOF(gdth_cmd_str,u.raw.sg_lst) +
+ (u16)cmdp->u.raw.sg_ranz * sizeof(gdth_sg_str);
+ }
+ }
+ /* check space */
+ if (ha->cmd_len & 3)
+ ha->cmd_len += (4 - (ha->cmd_len & 3));
+
+ if (ha->cmd_cnt > 0) {
+ if ((ha->cmd_offs_dpmem + ha->cmd_len + DPMEM_COMMAND_OFFSET) >
+ ha->ic_all_size) {
+ TRACE2(("gdth_fill_raw() DPMEM overflow\n"));
+ ha->cmd_tab[cmd_index-2].cmnd = UNUSED_CMND;
+ return 0;
+ }
+ }
+
+ /* copy command */
+ gdth_copy_command(ha);
+ return cmd_index;
+}
+
+static int gdth_special_cmd(gdth_ha_str *ha, Scsi_Cmnd *scp)
+{
+ register gdth_cmd_str *cmdp;
+ struct gdth_cmndinfo *cmndinfo = gdth_cmnd_priv(scp);
+ int cmd_index;
+
+ cmdp= ha->pccb;
+ TRACE2(("gdth_special_cmd(): "));
+
+ if (ha->type==GDT_EISA && ha->cmd_cnt>0)
+ return 0;
+
+ *cmdp = *cmndinfo->internal_cmd_str;
+ cmdp->RequestBuffer = scp;
+
+ /* search free command index */
+ if (!(cmd_index=gdth_get_cmd_index(ha))) {
+ TRACE(("GDT: No free command index found\n"));
+ return 0;
+ }
+
+ /* if it's the first command, set command semaphore */
+ if (ha->cmd_cnt == 0)
+ gdth_set_sema0(ha);
+
+ /* evaluate command size, check space */
+ if (cmdp->OpCode == GDT_IOCTL) {
+ TRACE2(("IOCTL\n"));
+ ha->cmd_len =
+ GDTOFFSOF(gdth_cmd_str,u.ioctl.p_param) + sizeof(u64);
+ } else if (cmdp->Service == CACHESERVICE) {
+ TRACE2(("cache command %d\n",cmdp->OpCode));
+ if (ha->cache_feat & GDT_64BIT)
+ ha->cmd_len =
+ GDTOFFSOF(gdth_cmd_str,u.cache64.sg_lst) + sizeof(gdth_sg64_str);
+ else
+ ha->cmd_len =
+ GDTOFFSOF(gdth_cmd_str,u.cache.sg_lst) + sizeof(gdth_sg_str);
+ } else if (cmdp->Service == SCSIRAWSERVICE) {
+ TRACE2(("raw command %d\n",cmdp->OpCode));
+ if (ha->raw_feat & GDT_64BIT)
+ ha->cmd_len =
+ GDTOFFSOF(gdth_cmd_str,u.raw64.sg_lst) + sizeof(gdth_sg64_str);
+ else
+ ha->cmd_len =
+ GDTOFFSOF(gdth_cmd_str,u.raw.sg_lst) + sizeof(gdth_sg_str);
+ }
+
+ if (ha->cmd_len & 3)
+ ha->cmd_len += (4 - (ha->cmd_len & 3));
+
+ if (ha->cmd_cnt > 0) {
+ if ((ha->cmd_offs_dpmem + ha->cmd_len + DPMEM_COMMAND_OFFSET) >
+ ha->ic_all_size) {
+ TRACE2(("gdth_special_cmd() DPMEM overflow\n"));
+ ha->cmd_tab[cmd_index-2].cmnd = UNUSED_CMND;
+ return 0;
+ }
+ }
+
+ /* copy command */
+ gdth_copy_command(ha);
+ return cmd_index;
+}
+
+
+/* Controller event handling functions */
+static gdth_evt_str *gdth_store_event(gdth_ha_str *ha, u16 source,
+ u16 idx, gdth_evt_data *evt)
+{
+ gdth_evt_str *e;
+ struct timeval tv;
+
+ /* no GDTH_LOCK_HA() ! */
+ TRACE2(("gdth_store_event() source %d idx %d\n", source, idx));
+ if (source == 0) /* no source -> no event */
+ return NULL;
+
+ if (ebuffer[elastidx].event_source == source &&
+ ebuffer[elastidx].event_idx == idx &&
+ ((evt->size != 0 && ebuffer[elastidx].event_data.size != 0 &&
+ !memcmp((char *)&ebuffer[elastidx].event_data.eu,
+ (char *)&evt->eu, evt->size)) ||
+ (evt->size == 0 && ebuffer[elastidx].event_data.size == 0 &&
+ !strcmp((char *)&ebuffer[elastidx].event_data.event_string,
+ (char *)&evt->event_string)))) {
+ e = &ebuffer[elastidx];
+ do_gettimeofday(&tv);
+ e->last_stamp = tv.tv_sec;
+ ++e->same_count;
+ } else {
+ if (ebuffer[elastidx].event_source != 0) { /* entry not free ? */
+ ++elastidx;
+ if (elastidx == MAX_EVENTS)
+ elastidx = 0;
+ if (elastidx == eoldidx) { /* reached mark ? */
+ ++eoldidx;
+ if (eoldidx == MAX_EVENTS)
+ eoldidx = 0;
+ }
+ }
+ e = &ebuffer[elastidx];
+ e->event_source = source;
+ e->event_idx = idx;
+ do_gettimeofday(&tv);
+ e->first_stamp = e->last_stamp = tv.tv_sec;
+ e->same_count = 1;
+ e->event_data = *evt;
+ e->application = 0;
+ }
+ return e;
+}
+
+static int gdth_read_event(gdth_ha_str *ha, int handle, gdth_evt_str *estr)
+{
+ gdth_evt_str *e;
+ int eindex;
+ unsigned long flags;
+
+ TRACE2(("gdth_read_event() handle %d\n", handle));
+ spin_lock_irqsave(&ha->smp_lock, flags);
+ if (handle == -1)
+ eindex = eoldidx;
+ else
+ eindex = handle;
+ estr->event_source = 0;
+
+ if (eindex < 0 || eindex >= MAX_EVENTS) {
+ spin_unlock_irqrestore(&ha->smp_lock, flags);
+ return eindex;
+ }
+ e = &ebuffer[eindex];
+ if (e->event_source != 0) {
+ if (eindex != elastidx) {
+ if (++eindex == MAX_EVENTS)
+ eindex = 0;
+ } else {
+ eindex = -1;
+ }
+ memcpy(estr, e, sizeof(gdth_evt_str));
+ }
+ spin_unlock_irqrestore(&ha->smp_lock, flags);
+ return eindex;
+}
+
+static void gdth_readapp_event(gdth_ha_str *ha,
+ u8 application, gdth_evt_str *estr)
+{
+ gdth_evt_str *e;
+ int eindex;
+ unsigned long flags;
+ u8 found = FALSE;
+
+ TRACE2(("gdth_readapp_event() app. %d\n", application));
+ spin_lock_irqsave(&ha->smp_lock, flags);
+ eindex = eoldidx;
+ for (;;) {
+ e = &ebuffer[eindex];
+ if (e->event_source == 0)
+ break;
+ if ((e->application & application) == 0) {
+ e->application |= application;
+ found = TRUE;
+ break;
+ }
+ if (eindex == elastidx)
+ break;
+ if (++eindex == MAX_EVENTS)
+ eindex = 0;
+ }
+ if (found)
+ memcpy(estr, e, sizeof(gdth_evt_str));
+ else
+ estr->event_source = 0;
+ spin_unlock_irqrestore(&ha->smp_lock, flags);
+}
+
+static void gdth_clear_events(void)
+{
+ TRACE(("gdth_clear_events()"));
+
+ eoldidx = elastidx = 0;
+ ebuffer[0].event_source = 0;
+}
+
+
+/* SCSI interface functions */
+
+static irqreturn_t __gdth_interrupt(gdth_ha_str *ha,
+ int gdth_from_wait, int* pIndex)
+{
+ gdt6m_dpram_str __iomem *dp6m_ptr = NULL;
+ gdt6_dpram_str __iomem *dp6_ptr;
+ gdt2_dpram_str __iomem *dp2_ptr;
+ Scsi_Cmnd *scp;
+ int rval, i;
+ u8 IStatus;
+ u16 Service;
+ unsigned long flags = 0;
+#ifdef INT_COAL
+ int coalesced = FALSE;
+ int next = FALSE;
+ gdth_coal_status *pcs = NULL;
+ int act_int_coal = 0;
+#endif
+
+ TRACE(("gdth_interrupt() IRQ %d\n", ha->irq));
+
+ /* if polling and not from gdth_wait() -> return */
+ if (gdth_polling) {
+ if (!gdth_from_wait) {
+ return IRQ_HANDLED;
+ }
+ }
+
+ if (!gdth_polling)
+ spin_lock_irqsave(&ha->smp_lock, flags);
+
+ /* search controller */
+ IStatus = gdth_get_status(ha);
+ if (IStatus == 0) {
+ /* spurious interrupt */
+ if (!gdth_polling)
+ spin_unlock_irqrestore(&ha->smp_lock, flags);
+ return IRQ_HANDLED;
+ }
+
+#ifdef GDTH_STATISTICS
+ ++act_ints;
+#endif
+
+#ifdef INT_COAL
+ /* See if the fw is returning coalesced status */
+ if (IStatus == COALINDEX) {
+ /* Coalesced status. Setup the initial status
+ buffer pointer and flags */
+ pcs = ha->coal_stat;
+ coalesced = TRUE;
+ next = TRUE;
+ }
+
+ do {
+ if (coalesced) {
+ /* For coalesced requests all status
+ information is found in the status buffer */
+ IStatus = (u8)(pcs->status & 0xff);
+ }
+#endif
+
+ if (ha->type == GDT_EISA) {
+ if (IStatus & 0x80) { /* error flag */
+ IStatus &= ~0x80;
+ ha->status = inw(ha->bmic + MAILBOXREG+8);
+ TRACE2(("gdth_interrupt() error %d/%d\n",IStatus,ha->status));
+ } else /* no error */
+ ha->status = S_OK;
+ ha->info = inl(ha->bmic + MAILBOXREG+12);
+ ha->service = inw(ha->bmic + MAILBOXREG+10);
+ ha->info2 = inl(ha->bmic + MAILBOXREG+4);
+
+ outb(0xff, ha->bmic + EDOORREG); /* acknowledge interrupt */
+ outb(0x00, ha->bmic + SEMA1REG); /* reset status semaphore */
+ } else if (ha->type == GDT_ISA) {
+ dp2_ptr = ha->brd;
+ if (IStatus & 0x80) { /* error flag */
+ IStatus &= ~0x80;
+ ha->status = readw(&dp2_ptr->u.ic.Status);
+ TRACE2(("gdth_interrupt() error %d/%d\n",IStatus,ha->status));
+ } else /* no error */
+ ha->status = S_OK;
+ ha->info = readl(&dp2_ptr->u.ic.Info[0]);
+ ha->service = readw(&dp2_ptr->u.ic.Service);
+ ha->info2 = readl(&dp2_ptr->u.ic.Info[1]);
+
+ writeb(0xff, &dp2_ptr->io.irqdel); /* acknowledge interrupt */
+ writeb(0, &dp2_ptr->u.ic.Cmd_Index);/* reset command index */
+ writeb(0, &dp2_ptr->io.Sema1); /* reset status semaphore */
+ } else if (ha->type == GDT_PCI) {
+ dp6_ptr = ha->brd;
+ if (IStatus & 0x80) { /* error flag */
+ IStatus &= ~0x80;
+ ha->status = readw(&dp6_ptr->u.ic.Status);
+ TRACE2(("gdth_interrupt() error %d/%d\n",IStatus,ha->status));
+ } else /* no error */
+ ha->status = S_OK;
+ ha->info = readl(&dp6_ptr->u.ic.Info[0]);
+ ha->service = readw(&dp6_ptr->u.ic.Service);
+ ha->info2 = readl(&dp6_ptr->u.ic.Info[1]);
+
+ writeb(0xff, &dp6_ptr->io.irqdel); /* acknowledge interrupt */
+ writeb(0, &dp6_ptr->u.ic.Cmd_Index);/* reset command index */
+ writeb(0, &dp6_ptr->io.Sema1); /* reset status semaphore */
+ } else if (ha->type == GDT_PCINEW) {
+ if (IStatus & 0x80) { /* error flag */
+ IStatus &= ~0x80;
+ ha->status = inw(PTR2USHORT(&ha->plx->status));
+ TRACE2(("gdth_interrupt() error %d/%d\n",IStatus,ha->status));
+ } else
+ ha->status = S_OK;
+ ha->info = inl(PTR2USHORT(&ha->plx->info[0]));
+ ha->service = inw(PTR2USHORT(&ha->plx->service));
+ ha->info2 = inl(PTR2USHORT(&ha->plx->info[1]));
+
+ outb(0xff, PTR2USHORT(&ha->plx->edoor_reg));
+ outb(0x00, PTR2USHORT(&ha->plx->sema1_reg));
+ } else if (ha->type == GDT_PCIMPR) {
+ dp6m_ptr = ha->brd;
+ if (IStatus & 0x80) { /* error flag */
+ IStatus &= ~0x80;
+#ifdef INT_COAL
+ if (coalesced)
+ ha->status = pcs->ext_status & 0xffff;
+ else
+#endif
+ ha->status = readw(&dp6m_ptr->i960r.status);
+ TRACE2(("gdth_interrupt() error %d/%d\n",IStatus,ha->status));
+ } else /* no error */
+ ha->status = S_OK;
+#ifdef INT_COAL
+ /* get information */
+ if (coalesced) {
+ ha->info = pcs->info0;
+ ha->info2 = pcs->info1;
+ ha->service = (pcs->ext_status >> 16) & 0xffff;
+ } else
+#endif
+ {
+ ha->info = readl(&dp6m_ptr->i960r.info[0]);
+ ha->service = readw(&dp6m_ptr->i960r.service);
+ ha->info2 = readl(&dp6m_ptr->i960r.info[1]);
+ }
+ /* event string */
+ if (IStatus == ASYNCINDEX) {
+ if (ha->service != SCREENSERVICE &&
+ (ha->fw_vers & 0xff) >= 0x1a) {
+ ha->dvr.severity = readb
+ (&((gdt6m_dpram_str __iomem *)ha->brd)->i960r.severity);
+ for (i = 0; i < 256; ++i) {
+ ha->dvr.event_string[i] = readb
+ (&((gdt6m_dpram_str __iomem *)ha->brd)->i960r.evt_str[i]);
+ if (ha->dvr.event_string[i] == 0)
+ break;
+ }
+ }
+ }
+#ifdef INT_COAL
+ /* Make sure that non coalesced interrupts get cleared
+ before being handled by gdth_async_event/gdth_sync_event */
+ if (!coalesced)
+#endif
+ {
+ writeb(0xff, &dp6m_ptr->i960r.edoor_reg);
+ writeb(0, &dp6m_ptr->i960r.sema1_reg);
+ }
+ } else {
+ TRACE2(("gdth_interrupt() unknown controller type\n"));
+ if (!gdth_polling)
+ spin_unlock_irqrestore(&ha->smp_lock, flags);
+ return IRQ_HANDLED;
+ }
+
+ TRACE(("gdth_interrupt() index %d stat %d info %d\n",
+ IStatus,ha->status,ha->info));
+
+ if (gdth_from_wait) {
+ *pIndex = (int)IStatus;
+ }
+
+ if (IStatus == ASYNCINDEX) {
+ TRACE2(("gdth_interrupt() async. event\n"));
+ gdth_async_event(ha);
+ if (!gdth_polling)
+ spin_unlock_irqrestore(&ha->smp_lock, flags);
+ gdth_next(ha);
+ return IRQ_HANDLED;
+ }
+
+ if (IStatus == SPEZINDEX) {
+ TRACE2(("Service unknown or not initialized !\n"));
+ ha->dvr.size = sizeof(ha->dvr.eu.driver);
+ ha->dvr.eu.driver.ionode = ha->hanum;
+ gdth_store_event(ha, ES_DRIVER, 4, &ha->dvr);
+ if (!gdth_polling)
+ spin_unlock_irqrestore(&ha->smp_lock, flags);
+ return IRQ_HANDLED;
+ }
+ scp = ha->cmd_tab[IStatus-2].cmnd;
+ Service = ha->cmd_tab[IStatus-2].service;
+ ha->cmd_tab[IStatus-2].cmnd = UNUSED_CMND;
+ if (scp == UNUSED_CMND) {
+ TRACE2(("gdth_interrupt() index to unused command (%d)\n",IStatus));
+ ha->dvr.size = sizeof(ha->dvr.eu.driver);
+ ha->dvr.eu.driver.ionode = ha->hanum;
+ ha->dvr.eu.driver.index = IStatus;
+ gdth_store_event(ha, ES_DRIVER, 1, &ha->dvr);
+ if (!gdth_polling)
+ spin_unlock_irqrestore(&ha->smp_lock, flags);
+ return IRQ_HANDLED;
+ }
+ if (scp == INTERNAL_CMND) {
+ TRACE(("gdth_interrupt() answer to internal command\n"));
+ if (!gdth_polling)
+ spin_unlock_irqrestore(&ha->smp_lock, flags);
+ return IRQ_HANDLED;
+ }
+
+ TRACE(("gdth_interrupt() sync. status\n"));
+ rval = gdth_sync_event(ha,Service,IStatus,scp);
+ if (!gdth_polling)
+ spin_unlock_irqrestore(&ha->smp_lock, flags);
+ if (rval == 2) {
+ gdth_putq(ha, scp, gdth_cmnd_priv(scp)->priority);
+ } else if (rval == 1) {
+ gdth_scsi_done(scp);
+ }
+
+#ifdef INT_COAL
+ if (coalesced) {
+ /* go to the next status in the status buffer */
+ ++pcs;
+#ifdef GDTH_STATISTICS
+ ++act_int_coal;
+ if (act_int_coal > max_int_coal) {
+ max_int_coal = act_int_coal;
+ printk("GDT: max_int_coal = %d\n",(u16)max_int_coal);
+ }
+#endif
+ /* see if there is another status */
+ if (pcs->status == 0)
+ /* Stop the coalesce loop */
+ next = FALSE;
+ }
+ } while (next);
+
+ /* coalescing only for new GDT_PCIMPR controllers available */
+ if (ha->type == GDT_PCIMPR && coalesced) {
+ writeb(0xff, &dp6m_ptr->i960r.edoor_reg);
+ writeb(0, &dp6m_ptr->i960r.sema1_reg);
+ }
+#endif
+
+ gdth_next(ha);
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t gdth_interrupt(int irq, void *dev_id)
+{
+ gdth_ha_str *ha = dev_id;
+
+ return __gdth_interrupt(ha, false, NULL);
+}
+
+static int gdth_sync_event(gdth_ha_str *ha, int service, u8 index,
+ Scsi_Cmnd *scp)
+{
+ gdth_msg_str *msg;
+ gdth_cmd_str *cmdp;
+ u8 b, t;
+ struct gdth_cmndinfo *cmndinfo = gdth_cmnd_priv(scp);
+
+ cmdp = ha->pccb;
+ TRACE(("gdth_sync_event() serv %d status %d\n",
+ service,ha->status));
+
+ if (service == SCREENSERVICE) {
+ msg = ha->pmsg;
+ TRACE(("len: %d, answer: %d, ext: %d, alen: %d\n",
+ msg->msg_len,msg->msg_answer,msg->msg_ext,msg->msg_alen));
+ if (msg->msg_len > MSGLEN+1)
+ msg->msg_len = MSGLEN+1;
+ if (msg->msg_len)
+ if (!(msg->msg_answer && msg->msg_ext)) {
+ msg->msg_text[msg->msg_len] = '\0';
+ printk("%s",msg->msg_text);
+ }
+
+ if (msg->msg_ext && !msg->msg_answer) {
+ while (gdth_test_busy(ha))
+ gdth_delay(0);
+ cmdp->Service = SCREENSERVICE;
+ cmdp->RequestBuffer = SCREEN_CMND;
+ gdth_get_cmd_index(ha);
+ gdth_set_sema0(ha);
+ cmdp->OpCode = GDT_READ;
+ cmdp->BoardNode = LOCALBOARD;
+ cmdp->u.screen.reserved = 0;
+ cmdp->u.screen.su.msg.msg_handle= msg->msg_handle;
+ cmdp->u.screen.su.msg.msg_addr = ha->msg_phys;
+ ha->cmd_offs_dpmem = 0;
+ ha->cmd_len = GDTOFFSOF(gdth_cmd_str,u.screen.su.msg.msg_addr)
+ + sizeof(u64);
+ ha->cmd_cnt = 0;
+ gdth_copy_command(ha);
+ gdth_release_event(ha);
+ return 0;
+ }
+
+ if (msg->msg_answer && msg->msg_alen) {
+ /* default answers (getchar() not possible) */
+ if (msg->msg_alen == 1) {
+ msg->msg_alen = 0;
+ msg->msg_len = 1;
+ msg->msg_text[0] = 0;
+ } else {
+ msg->msg_alen -= 2;
+ msg->msg_len = 2;
+ msg->msg_text[0] = 1;
+ msg->msg_text[1] = 0;
+ }
+ msg->msg_ext = 0;
+ msg->msg_answer = 0;
+ while (gdth_test_busy(ha))
+ gdth_delay(0);
+ cmdp->Service = SCREENSERVICE;
+ cmdp->RequestBuffer = SCREEN_CMND;
+ gdth_get_cmd_index(ha);
+ gdth_set_sema0(ha);
+ cmdp->OpCode = GDT_WRITE;
+ cmdp->BoardNode = LOCALBOARD;
+ cmdp->u.screen.reserved = 0;
+ cmdp->u.screen.su.msg.msg_handle= msg->msg_handle;
+ cmdp->u.screen.su.msg.msg_addr = ha->msg_phys;
+ ha->cmd_offs_dpmem = 0;
+ ha->cmd_len = GDTOFFSOF(gdth_cmd_str,u.screen.su.msg.msg_addr)
+ + sizeof(u64);
+ ha->cmd_cnt = 0;
+ gdth_copy_command(ha);
+ gdth_release_event(ha);
+ return 0;
+ }
+ printk("\n");
+
+ } else {
+ b = scp->device->channel;
+ t = scp->device->id;
+ if (cmndinfo->OpCode == -1 && b != ha->virt_bus) {
+ ha->raw[BUS_L2P(ha,b)].io_cnt[t]--;
+ }
+ /* cache or raw service */
+ if (ha->status == S_BSY) {
+ TRACE2(("Controller busy -> retry !\n"));
+ if (cmndinfo->OpCode == GDT_MOUNT)
+ cmndinfo->OpCode = GDT_CLUST_INFO;
+ /* retry */
+ return 2;
+ }
+ if (scsi_bufflen(scp))
+ pci_unmap_sg(ha->pdev, scsi_sglist(scp), scsi_sg_count(scp),
+ cmndinfo->dma_dir);
+
+ if (cmndinfo->sense_paddr)
+ pci_unmap_page(ha->pdev, cmndinfo->sense_paddr, 16,
+ PCI_DMA_FROMDEVICE);
+
+ if (ha->status == S_OK) {
+ cmndinfo->status = S_OK;
+ cmndinfo->info = ha->info;
+ if (cmndinfo->OpCode != -1) {
+ TRACE2(("gdth_sync_event(): special cmd 0x%x OK\n",
+ cmndinfo->OpCode));
+ /* special commands GDT_CLUST_INFO/GDT_MOUNT ? */
+ if (cmndinfo->OpCode == GDT_CLUST_INFO) {
+ ha->hdr[t].cluster_type = (u8)ha->info;
+ if (!(ha->hdr[t].cluster_type &
+ CLUSTER_MOUNTED)) {
+ /* NOT MOUNTED -> MOUNT */
+ cmndinfo->OpCode = GDT_MOUNT;
+ if (ha->hdr[t].cluster_type &
+ CLUSTER_RESERVED) {
+ /* cluster drive RESERVED (on the other node) */
+ cmndinfo->phase = -2; /* reservation conflict */
+ }
+ } else {
+ cmndinfo->OpCode = -1;
+ }
+ } else {
+ if (cmndinfo->OpCode == GDT_MOUNT) {
+ ha->hdr[t].cluster_type |= CLUSTER_MOUNTED;
+ ha->hdr[t].media_changed = TRUE;
+ } else if (cmndinfo->OpCode == GDT_UNMOUNT) {
+ ha->hdr[t].cluster_type &= ~CLUSTER_MOUNTED;
+ ha->hdr[t].media_changed = TRUE;
+ }
+ cmndinfo->OpCode = -1;
+ }
+ /* retry */
+ cmndinfo->priority = HIGH_PRI;
+ return 2;
+ } else {
+ /* RESERVE/RELEASE ? */
+ if (scp->cmnd[0] == RESERVE) {
+ ha->hdr[t].cluster_type |= CLUSTER_RESERVED;
+ } else if (scp->cmnd[0] == RELEASE) {
+ ha->hdr[t].cluster_type &= ~CLUSTER_RESERVED;
+ }
+ scp->result = DID_OK << 16;
+ scp->sense_buffer[0] = 0;
+ }
+ } else {
+ cmndinfo->status = ha->status;
+ cmndinfo->info = ha->info;
+
+ if (cmndinfo->OpCode != -1) {
+ TRACE2(("gdth_sync_event(): special cmd 0x%x error 0x%x\n",
+ cmndinfo->OpCode, ha->status));
+ if (cmndinfo->OpCode == GDT_SCAN_START ||
+ cmndinfo->OpCode == GDT_SCAN_END) {
+ cmndinfo->OpCode = -1;
+ /* retry */
+ cmndinfo->priority = HIGH_PRI;
+ return 2;
+ }
+ memset((char*)scp->sense_buffer,0,16);
+ scp->sense_buffer[0] = 0x70;
+ scp->sense_buffer[2] = NOT_READY;
+ scp->result = (DID_OK << 16) | (CHECK_CONDITION << 1);
+ } else if (service == CACHESERVICE) {
+ if (ha->status == S_CACHE_UNKNOWN &&
+ (ha->hdr[t].cluster_type &
+ CLUSTER_RESERVE_STATE) == CLUSTER_RESERVE_STATE) {
+ /* bus reset -> force GDT_CLUST_INFO */
+ ha->hdr[t].cluster_type &= ~CLUSTER_RESERVED;
+ }
+ memset((char*)scp->sense_buffer,0,16);
+ if (ha->status == (u16)S_CACHE_RESERV) {
+ scp->result = (DID_OK << 16) | (RESERVATION_CONFLICT << 1);
+ } else {
+ scp->sense_buffer[0] = 0x70;
+ scp->sense_buffer[2] = NOT_READY;
+ scp->result = (DID_OK << 16) | (CHECK_CONDITION << 1);
+ }
+ if (!cmndinfo->internal_command) {
+ ha->dvr.size = sizeof(ha->dvr.eu.sync);
+ ha->dvr.eu.sync.ionode = ha->hanum;
+ ha->dvr.eu.sync.service = service;
+ ha->dvr.eu.sync.status = ha->status;
+ ha->dvr.eu.sync.info = ha->info;
+ ha->dvr.eu.sync.hostdrive = t;
+ if (ha->status >= 0x8000)
+ gdth_store_event(ha, ES_SYNC, 0, &ha->dvr);
+ else
+ gdth_store_event(ha, ES_SYNC, service, &ha->dvr);
+ }
+ } else {
+ /* sense buffer filled from controller firmware (DMA) */
+ if (ha->status != S_RAW_SCSI || ha->info >= 0x100) {
+ scp->result = DID_BAD_TARGET << 16;
+ } else {
+ scp->result = (DID_OK << 16) | ha->info;
+ }
+ }
+ }
+ if (!cmndinfo->wait_for_completion)
+ cmndinfo->wait_for_completion++;
+ else
+ return 1;
+ }
+
+ return 0;
+}
+
+static char *async_cache_tab[] = {
+/* 0*/ "\011\000\002\002\002\004\002\006\004"
+ "GDT HA %u, service %u, async. status %u/%lu unknown",
+/* 1*/ "\011\000\002\002\002\004\002\006\004"
+ "GDT HA %u, service %u, async. status %u/%lu unknown",
+/* 2*/ "\005\000\002\006\004"
+ "GDT HA %u, Host Drive %lu not ready",
+/* 3*/ "\005\000\002\006\004"
+ "GDT HA %u, Host Drive %lu: REASSIGN not successful and/or data error on reassigned blocks. Drive may crash in the future and should be replaced",
+/* 4*/ "\005\000\002\006\004"
+ "GDT HA %u, mirror update on Host Drive %lu failed",
+/* 5*/ "\005\000\002\006\004"
+ "GDT HA %u, Mirror Drive %lu failed",
+/* 6*/ "\005\000\002\006\004"
+ "GDT HA %u, Mirror Drive %lu: REASSIGN not successful and/or data error on reassigned blocks. Drive may crash in the future and should be replaced",
+/* 7*/ "\005\000\002\006\004"
+ "GDT HA %u, Host Drive %lu write protected",
+/* 8*/ "\005\000\002\006\004"
+ "GDT HA %u, media changed in Host Drive %lu",
+/* 9*/ "\005\000\002\006\004"
+ "GDT HA %u, Host Drive %lu is offline",
+/*10*/ "\005\000\002\006\004"
+ "GDT HA %u, media change of Mirror Drive %lu",
+/*11*/ "\005\000\002\006\004"
+ "GDT HA %u, Mirror Drive %lu is write protected",
+/*12*/ "\005\000\002\006\004"
+ "GDT HA %u, general error on Host Drive %lu. Please check the devices of this drive!",
+/*13*/ "\007\000\002\006\002\010\002"
+ "GDT HA %u, Array Drive %u: Cache Drive %u failed",
+/*14*/ "\005\000\002\006\002"
+ "GDT HA %u, Array Drive %u: FAIL state entered",
+/*15*/ "\005\000\002\006\002"
+ "GDT HA %u, Array Drive %u: error",
+/*16*/ "\007\000\002\006\002\010\002"
+ "GDT HA %u, Array Drive %u: failed drive replaced by Cache Drive %u",
+/*17*/ "\005\000\002\006\002"
+ "GDT HA %u, Array Drive %u: parity build failed",
+/*18*/ "\005\000\002\006\002"
+ "GDT HA %u, Array Drive %u: drive rebuild failed",
+/*19*/ "\005\000\002\010\002"
+ "GDT HA %u, Test of Hot Fix %u failed",
+/*20*/ "\005\000\002\006\002"
+ "GDT HA %u, Array Drive %u: drive build finished successfully",
+/*21*/ "\005\000\002\006\002"
+ "GDT HA %u, Array Drive %u: drive rebuild finished successfully",
+/*22*/ "\007\000\002\006\002\010\002"
+ "GDT HA %u, Array Drive %u: Hot Fix %u activated",
+/*23*/ "\005\000\002\006\002"
+ "GDT HA %u, Host Drive %u: processing of i/o aborted due to serious drive error",
+/*24*/ "\005\000\002\010\002"
+ "GDT HA %u, mirror update on Cache Drive %u completed",
+/*25*/ "\005\000\002\010\002"
+ "GDT HA %u, mirror update on Cache Drive %lu failed",
+/*26*/ "\005\000\002\006\002"
+ "GDT HA %u, Array Drive %u: drive rebuild started",
+/*27*/ "\005\000\002\012\001"
+ "GDT HA %u, Fault bus %u: SHELF OK detected",
+/*28*/ "\005\000\002\012\001"
+ "GDT HA %u, Fault bus %u: SHELF not OK detected",
+/*29*/ "\007\000\002\012\001\013\001"
+ "GDT HA %u, Fault bus %u, ID %u: Auto Hot Plug started",
+/*30*/ "\007\000\002\012\001\013\001"
+ "GDT HA %u, Fault bus %u, ID %u: new disk detected",
+/*31*/ "\007\000\002\012\001\013\001"
+ "GDT HA %u, Fault bus %u, ID %u: old disk detected",
+/*32*/ "\007\000\002\012\001\013\001"
+ "GDT HA %u, Fault bus %u, ID %u: plugging an active disk is invalid",
+/*33*/ "\007\000\002\012\001\013\001"
+ "GDT HA %u, Fault bus %u, ID %u: invalid device detected",
+/*34*/ "\011\000\002\012\001\013\001\006\004"
+ "GDT HA %u, Fault bus %u, ID %u: insufficient disk capacity (%lu MB required)",
+/*35*/ "\007\000\002\012\001\013\001"
+ "GDT HA %u, Fault bus %u, ID %u: disk write protected",
+/*36*/ "\007\000\002\012\001\013\001"
+ "GDT HA %u, Fault bus %u, ID %u: disk not available",
+/*37*/ "\007\000\002\012\001\006\004"
+ "GDT HA %u, Fault bus %u: swap detected (%lu)",
+/*38*/ "\007\000\002\012\001\013\001"
+ "GDT HA %u, Fault bus %u, ID %u: Auto Hot Plug finished successfully",
+/*39*/ "\007\000\002\012\001\013\001"
+ "GDT HA %u, Fault bus %u, ID %u: Auto Hot Plug aborted due to user Hot Plug",
+/*40*/ "\007\000\002\012\001\013\001"
+ "GDT HA %u, Fault bus %u, ID %u: Auto Hot Plug aborted",
+/*41*/ "\007\000\002\012\001\013\001"
+ "GDT HA %u, Fault bus %u, ID %u: Auto Hot Plug for Hot Fix started",
+/*42*/ "\005\000\002\006\002"
+ "GDT HA %u, Array Drive %u: drive build started",
+/*43*/ "\003\000\002"
+ "GDT HA %u, DRAM parity error detected",
+/*44*/ "\005\000\002\006\002"
+ "GDT HA %u, Mirror Drive %u: update started",
+/*45*/ "\007\000\002\006\002\010\002"
+ "GDT HA %u, Mirror Drive %u: Hot Fix %u activated",
+/*46*/ "\005\000\002\006\002"
+ "GDT HA %u, Array Drive %u: no matching Pool Hot Fix Drive available",
+/*47*/ "\005\000\002\006\002"
+ "GDT HA %u, Array Drive %u: Pool Hot Fix Drive available",
+/*48*/ "\005\000\002\006\002"
+ "GDT HA %u, Mirror Drive %u: no matching Pool Hot Fix Drive available",
+/*49*/ "\005\000\002\006\002"
+ "GDT HA %u, Mirror Drive %u: Pool Hot Fix Drive available",
+/*50*/ "\007\000\002\012\001\013\001"
+ "GDT HA %u, SCSI bus %u, ID %u: IGNORE_WIDE_RESIDUE message received",
+/*51*/ "\005\000\002\006\002"
+ "GDT HA %u, Array Drive %u: expand started",
+/*52*/ "\005\000\002\006\002"
+ "GDT HA %u, Array Drive %u: expand finished successfully",
+/*53*/ "\005\000\002\006\002"
+ "GDT HA %u, Array Drive %u: expand failed",
+/*54*/ "\003\000\002"
+ "GDT HA %u, CPU temperature critical",
+/*55*/ "\003\000\002"
+ "GDT HA %u, CPU temperature OK",
+/*56*/ "\005\000\002\006\004"
+ "GDT HA %u, Host drive %lu created",
+/*57*/ "\005\000\002\006\002"
+ "GDT HA %u, Array Drive %u: expand restarted",
+/*58*/ "\005\000\002\006\002"
+ "GDT HA %u, Array Drive %u: expand stopped",
+/*59*/ "\005\000\002\010\002"
+ "GDT HA %u, Mirror Drive %u: drive build quited",
+/*60*/ "\005\000\002\006\002"
+ "GDT HA %u, Array Drive %u: parity build quited",
+/*61*/ "\005\000\002\006\002"
+ "GDT HA %u, Array Drive %u: drive rebuild quited",
+/*62*/ "\005\000\002\006\002"
+ "GDT HA %u, Array Drive %u: parity verify started",
+/*63*/ "\005\000\002\006\002"
+ "GDT HA %u, Array Drive %u: parity verify done",
+/*64*/ "\005\000\002\006\002"
+ "GDT HA %u, Array Drive %u: parity verify failed",
+/*65*/ "\005\000\002\006\002"
+ "GDT HA %u, Array Drive %u: parity error detected",
+/*66*/ "\005\000\002\006\002"
+ "GDT HA %u, Array Drive %u: parity verify quited",
+/*67*/ "\005\000\002\006\002"
+ "GDT HA %u, Host Drive %u reserved",
+/*68*/ "\005\000\002\006\002"
+ "GDT HA %u, Host Drive %u mounted and released",
+/*69*/ "\005\000\002\006\002"
+ "GDT HA %u, Host Drive %u released",
+/*70*/ "\003\000\002"
+ "GDT HA %u, DRAM error detected and corrected with ECC",
+/*71*/ "\003\000\002"
+ "GDT HA %u, Uncorrectable DRAM error detected with ECC",
+/*72*/ "\011\000\002\012\001\013\001\014\001"
+ "GDT HA %u, SCSI bus %u, ID %u, LUN %u: reassigning block",
+/*73*/ "\005\000\002\006\002"
+ "GDT HA %u, Host drive %u resetted locally",
+/*74*/ "\005\000\002\006\002"
+ "GDT HA %u, Host drive %u resetted remotely",
+/*75*/ "\003\000\002"
+ "GDT HA %u, async. status 75 unknown",
+};
+
+
+static int gdth_async_event(gdth_ha_str *ha)
+{
+ gdth_cmd_str *cmdp;
+ int cmd_index;
+
+ cmdp= ha->pccb;
+ TRACE2(("gdth_async_event() ha %d serv %d\n",
+ ha->hanum, ha->service));
+
+ if (ha->service == SCREENSERVICE) {
+ if (ha->status == MSG_REQUEST) {
+ while (gdth_test_busy(ha))
+ gdth_delay(0);
+ cmdp->Service = SCREENSERVICE;
+ cmdp->RequestBuffer = SCREEN_CMND;
+ cmd_index = gdth_get_cmd_index(ha);
+ gdth_set_sema0(ha);
+ cmdp->OpCode = GDT_READ;
+ cmdp->BoardNode = LOCALBOARD;
+ cmdp->u.screen.reserved = 0;
+ cmdp->u.screen.su.msg.msg_handle= MSG_INV_HANDLE;
+ cmdp->u.screen.su.msg.msg_addr = ha->msg_phys;
+ ha->cmd_offs_dpmem = 0;
+ ha->cmd_len = GDTOFFSOF(gdth_cmd_str,u.screen.su.msg.msg_addr)
+ + sizeof(u64);
+ ha->cmd_cnt = 0;
+ gdth_copy_command(ha);
+ if (ha->type == GDT_EISA)
+ printk("[EISA slot %d] ",(u16)ha->brd_phys);
+ else if (ha->type == GDT_ISA)
+ printk("[DPMEM 0x%4X] ",(u16)ha->brd_phys);
+ else
+ printk("[PCI %d/%d] ",(u16)(ha->brd_phys>>8),
+ (u16)((ha->brd_phys>>3)&0x1f));
+ gdth_release_event(ha);
+ }
+
+ } else {
+ if (ha->type == GDT_PCIMPR &&
+ (ha->fw_vers & 0xff) >= 0x1a) {
+ ha->dvr.size = 0;
+ ha->dvr.eu.async.ionode = ha->hanum;
+ ha->dvr.eu.async.status = ha->status;
+ /* severity and event_string already set! */
+ } else {
+ ha->dvr.size = sizeof(ha->dvr.eu.async);
+ ha->dvr.eu.async.ionode = ha->hanum;
+ ha->dvr.eu.async.service = ha->service;
+ ha->dvr.eu.async.status = ha->status;
+ ha->dvr.eu.async.info = ha->info;
+ *(u32 *)ha->dvr.eu.async.scsi_coord = ha->info2;
+ }
+ gdth_store_event( ha, ES_ASYNC, ha->service, &ha->dvr );
+ gdth_log_event( &ha->dvr, NULL );
+
+ /* new host drive from expand? */
+ if (ha->service == CACHESERVICE && ha->status == 56) {
+ TRACE2(("gdth_async_event(): new host drive %d created\n",
+ (u16)ha->info));
+ /* gdth_analyse_hdrive(hanum, (u16)ha->info); */
+ }
+ }
+ return 1;
+}
+
+static void gdth_log_event(gdth_evt_data *dvr, char *buffer)
+{
+ gdth_stackframe stack;
+ char *f = NULL;
+ int i,j;
+
+ TRACE2(("gdth_log_event()\n"));
+ if (dvr->size == 0) {
+ if (buffer == NULL) {
+ printk("Adapter %d: %s\n",dvr->eu.async.ionode,dvr->event_string);
+ } else {
+ sprintf(buffer,"Adapter %d: %s\n",
+ dvr->eu.async.ionode,dvr->event_string);
+ }
+ } else if (dvr->eu.async.service == CACHESERVICE &&
+ INDEX_OK(dvr->eu.async.status, async_cache_tab)) {
+ TRACE2(("GDT: Async. event cache service, event no.: %d\n",
+ dvr->eu.async.status));
+
+ f = async_cache_tab[dvr->eu.async.status];
+
+ /* i: parameter to push, j: stack element to fill */
+ for (j=0,i=1; i < f[0]; i+=2) {
+ switch (f[i+1]) {
+ case 4:
+ stack.b[j++] = *(u32*)&dvr->eu.stream[(int)f[i]];
+ break;
+ case 2:
+ stack.b[j++] = *(u16*)&dvr->eu.stream[(int)f[i]];
+ break;
+ case 1:
+ stack.b[j++] = *(u8*)&dvr->eu.stream[(int)f[i]];
+ break;
+ default:
+ break;
+ }
+ }
+
+ if (buffer == NULL) {
+ printk(&f[(int)f[0]],stack);
+ printk("\n");
+ } else {
+ sprintf(buffer,&f[(int)f[0]],stack);
+ }
+
+ } else {
+ if (buffer == NULL) {
+ printk("GDT HA %u, Unknown async. event service %d event no. %d\n",
+ dvr->eu.async.ionode,dvr->eu.async.service,dvr->eu.async.status);
+ } else {
+ sprintf(buffer,"GDT HA %u, Unknown async. event service %d event no. %d",
+ dvr->eu.async.ionode,dvr->eu.async.service,dvr->eu.async.status);
+ }
+ }
+}
+
+#ifdef GDTH_STATISTICS
+static u8 gdth_timer_running;
+
+static void gdth_timeout(unsigned long data)
+{
+ u32 i;
+ Scsi_Cmnd *nscp;
+ gdth_ha_str *ha;
+ unsigned long flags;
+
+ if(unlikely(list_empty(&gdth_instances))) {
+ gdth_timer_running = 0;
+ return;
+ }
+
+ ha = list_first_entry(&gdth_instances, gdth_ha_str, list);
+ spin_lock_irqsave(&ha->smp_lock, flags);
+
+ for (act_stats=0,i=0; i<GDTH_MAXCMDS; ++i)
+ if (ha->cmd_tab[i].cmnd != UNUSED_CMND)
+ ++act_stats;
+
+ for (act_rq=0,nscp=ha->req_first; nscp; nscp=(Scsi_Cmnd*)nscp->SCp.ptr)
+ ++act_rq;
+
+ TRACE2(("gdth_to(): ints %d, ios %d, act_stats %d, act_rq %d\n",
+ act_ints, act_ios, act_stats, act_rq));
+ act_ints = act_ios = 0;
+
+ gdth_timer.expires = jiffies + 30 * HZ;
+ add_timer(&gdth_timer);
+ spin_unlock_irqrestore(&ha->smp_lock, flags);
+}
+
+static void gdth_timer_init(void)
+{
+ if (gdth_timer_running)
+ return;
+ gdth_timer_running = 1;
+ TRACE2(("gdth_detect(): Initializing timer !\n"));
+ gdth_timer.expires = jiffies + HZ;
+ gdth_timer.data = 0L;
+ gdth_timer.function = gdth_timeout;
+ add_timer(&gdth_timer);
+}
+#else
+static inline void gdth_timer_init(void)
+{
+}
+#endif
+
+static void __init internal_setup(char *str,int *ints)
+{
+ int i, argc;
+ char *cur_str, *argv;
+
+ TRACE2(("internal_setup() str %s ints[0] %d\n",
+ str ? str:"NULL", ints ? ints[0]:0));
+
+ /* read irq[] from ints[] */
+ if (ints) {
+ argc = ints[0];
+ if (argc > 0) {
+ if (argc > MAXHA)
+ argc = MAXHA;
+ for (i = 0; i < argc; ++i)
+ irq[i] = ints[i+1];
+ }
+ }
+
+ /* analyse string */
+ argv = str;
+ while (argv && (cur_str = strchr(argv, ':'))) {
+ int val = 0, c = *++cur_str;
+
+ if (c == 'n' || c == 'N')
+ val = 0;
+ else if (c == 'y' || c == 'Y')
+ val = 1;
+ else
+ val = (int)simple_strtoul(cur_str, NULL, 0);
+
+ if (!strncmp(argv, "disable:", 8))
+ disable = val;
+ else if (!strncmp(argv, "reserve_mode:", 13))
+ reserve_mode = val;
+ else if (!strncmp(argv, "reverse_scan:", 13))
+ reverse_scan = val;
+ else if (!strncmp(argv, "hdr_channel:", 12))
+ hdr_channel = val;
+ else if (!strncmp(argv, "max_ids:", 8))
+ max_ids = val;
+ else if (!strncmp(argv, "rescan:", 7))
+ rescan = val;
+ else if (!strncmp(argv, "shared_access:", 14))
+ shared_access = val;
+ else if (!strncmp(argv, "probe_eisa_isa:", 15))
+ probe_eisa_isa = val;
+ else if (!strncmp(argv, "reserve_list:", 13)) {
+ reserve_list[0] = val;
+ for (i = 1; i < MAX_RES_ARGS; i++) {
+ cur_str = strchr(cur_str, ',');
+ if (!cur_str)
+ break;
+ if (!isdigit((int)*++cur_str)) {
+ --cur_str;
+ break;
+ }
+ reserve_list[i] =
+ (int)simple_strtoul(cur_str, NULL, 0);
+ }
+ if (!cur_str)
+ break;
+ argv = ++cur_str;
+ continue;
+ }
+
+ if ((argv = strchr(argv, ',')))
+ ++argv;
+ }
+}
+
+int __init option_setup(char *str)
+{
+ int ints[MAXHA];
+ char *cur = str;
+ int i = 1;
+
+ TRACE2(("option_setup() str %s\n", str ? str:"NULL"));
+
+ while (cur && isdigit(*cur) && i < MAXHA) {
+ ints[i++] = simple_strtoul(cur, NULL, 0);
+ if ((cur = strchr(cur, ',')) != NULL) cur++;
+ }
+
+ ints[0] = i - 1;
+ internal_setup(cur, ints);
+ return 1;
+}
+
+static const char *gdth_ctr_name(gdth_ha_str *ha)
+{
+ TRACE2(("gdth_ctr_name()\n"));
+
+ if (ha->type == GDT_EISA) {
+ switch (ha->stype) {
+ case GDT3_ID:
+ return("GDT3000/3020");
+ case GDT3A_ID:
+ return("GDT3000A/3020A/3050A");
+ case GDT3B_ID:
+ return("GDT3000B/3010A");
+ }
+ } else if (ha->type == GDT_ISA) {
+ return("GDT2000/2020");
+ } else if (ha->type == GDT_PCI) {
+ switch (ha->pdev->device) {
+ case PCI_DEVICE_ID_VORTEX_GDT60x0:
+ return("GDT6000/6020/6050");
+ case PCI_DEVICE_ID_VORTEX_GDT6000B:
+ return("GDT6000B/6010");
+ }
+ }
+ /* new controllers (GDT_PCINEW, GDT_PCIMPR, ..) use board_info IOCTL! */
+
+ return("");
+}
+
+static const char *gdth_info(struct Scsi_Host *shp)
+{
+ gdth_ha_str *ha = shost_priv(shp);
+
+ TRACE2(("gdth_info()\n"));
+ return ((const char *)ha->binfo.type_string);
+}
+
+static enum blk_eh_timer_return gdth_timed_out(struct scsi_cmnd *scp)
+{
+ gdth_ha_str *ha = shost_priv(scp->device->host);
+ struct gdth_cmndinfo *cmndinfo = gdth_cmnd_priv(scp);
+ u8 b, t;
+ unsigned long flags;
+ enum blk_eh_timer_return retval = BLK_EH_NOT_HANDLED;
+
+ TRACE(("%s() cmd 0x%x\n", scp->cmnd[0], __func__));
+ b = scp->device->channel;
+ t = scp->device->id;
+
+ /*
+ * We don't really honor the command timeout, but we try to
+ * honor 6 times of the actual command timeout! So reset the
+ * timer if this is less than 6th timeout on this command!
+ */
+ if (++cmndinfo->timeout_count < 6)
+ retval = BLK_EH_RESET_TIMER;
+
+ /* Reset the timeout if it is locked IO */
+ spin_lock_irqsave(&ha->smp_lock, flags);
+ if ((b != ha->virt_bus && ha->raw[BUS_L2P(ha, b)].lock) ||
+ (b == ha->virt_bus && t < MAX_HDRIVES && ha->hdr[t].lock)) {
+ TRACE2(("%s(): locked IO, reset timeout\n", __func__));
+ retval = BLK_EH_RESET_TIMER;
+ }
+ spin_unlock_irqrestore(&ha->smp_lock, flags);
+
+ return retval;
+}
+
+
+static int gdth_eh_bus_reset(Scsi_Cmnd *scp)
+{
+ gdth_ha_str *ha = shost_priv(scp->device->host);
+ int i;
+ unsigned long flags;
+ Scsi_Cmnd *cmnd;
+ u8 b;
+
+ TRACE2(("gdth_eh_bus_reset()\n"));
+
+ b = scp->device->channel;
+
+ /* clear command tab */
+ spin_lock_irqsave(&ha->smp_lock, flags);
+ for (i = 0; i < GDTH_MAXCMDS; ++i) {
+ cmnd = ha->cmd_tab[i].cmnd;
+ if (!SPECIAL_SCP(cmnd) && cmnd->device->channel == b)
+ ha->cmd_tab[i].cmnd = UNUSED_CMND;
+ }
+ spin_unlock_irqrestore(&ha->smp_lock, flags);
+
+ if (b == ha->virt_bus) {
+ /* host drives */
+ for (i = 0; i < MAX_HDRIVES; ++i) {
+ if (ha->hdr[i].present) {
+ spin_lock_irqsave(&ha->smp_lock, flags);
+ gdth_polling = TRUE;
+ while (gdth_test_busy(ha))
+ gdth_delay(0);
+ if (gdth_internal_cmd(ha, CACHESERVICE,
+ GDT_CLUST_RESET, i, 0, 0))
+ ha->hdr[i].cluster_type &= ~CLUSTER_RESERVED;
+ gdth_polling = FALSE;
+ spin_unlock_irqrestore(&ha->smp_lock, flags);
+ }
+ }
+ } else {
+ /* raw devices */
+ spin_lock_irqsave(&ha->smp_lock, flags);
+ for (i = 0; i < MAXID; ++i)
+ ha->raw[BUS_L2P(ha,b)].io_cnt[i] = 0;
+ gdth_polling = TRUE;
+ while (gdth_test_busy(ha))
+ gdth_delay(0);
+ gdth_internal_cmd(ha, SCSIRAWSERVICE, GDT_RESET_BUS,
+ BUS_L2P(ha,b), 0, 0);
+ gdth_polling = FALSE;
+ spin_unlock_irqrestore(&ha->smp_lock, flags);
+ }
+ return SUCCESS;
+}
+
+static int gdth_bios_param(struct scsi_device *sdev,struct block_device *bdev,sector_t cap,int *ip)
+{
+ u8 b, t;
+ gdth_ha_str *ha = shost_priv(sdev->host);
+ struct scsi_device *sd;
+ unsigned capacity;
+
+ sd = sdev;
+ capacity = cap;
+ b = sd->channel;
+ t = sd->id;
+ TRACE2(("gdth_bios_param() ha %d bus %d target %d\n", ha->hanum, b, t));
+
+ if (b != ha->virt_bus || ha->hdr[t].heads == 0) {
+ /* raw device or host drive without mapping information */
+ TRACE2(("Evaluate mapping\n"));
+ gdth_eval_mapping(capacity,&ip[2],&ip[0],&ip[1]);
+ } else {
+ ip[0] = ha->hdr[t].heads;
+ ip[1] = ha->hdr[t].secs;
+ ip[2] = capacity / ip[0] / ip[1];
+ }
+
+ TRACE2(("gdth_bios_param(): %d heads, %d secs, %d cyls\n",
+ ip[0],ip[1],ip[2]));
+ return 0;
+}
+
+
+static int gdth_queuecommand_lck(struct scsi_cmnd *scp,
+ void (*done)(struct scsi_cmnd *))
+{
+ gdth_ha_str *ha = shost_priv(scp->device->host);
+ struct gdth_cmndinfo *cmndinfo;
+
+ TRACE(("gdth_queuecommand() cmd 0x%x\n", scp->cmnd[0]));
+
+ cmndinfo = gdth_get_cmndinfo(ha);
+ BUG_ON(!cmndinfo);
+
+ scp->scsi_done = done;
+ cmndinfo->timeout_count = 0;
+ cmndinfo->priority = DEFAULT_PRI;
+
+ return __gdth_queuecommand(ha, scp, cmndinfo);
+}
+
+static DEF_SCSI_QCMD(gdth_queuecommand)
+
+static int __gdth_queuecommand(gdth_ha_str *ha, struct scsi_cmnd *scp,
+ struct gdth_cmndinfo *cmndinfo)
+{
+ scp->host_scribble = (unsigned char *)cmndinfo;
+ cmndinfo->wait_for_completion = 1;
+ cmndinfo->phase = -1;
+ cmndinfo->OpCode = -1;
+
+#ifdef GDTH_STATISTICS
+ ++act_ios;
+#endif
+
+ gdth_putq(ha, scp, cmndinfo->priority);
+ gdth_next(ha);
+ return 0;
+}
+
+
+static int gdth_open(struct inode *inode, struct file *filep)
+{
+ gdth_ha_str *ha;
+
+ mutex_lock(&gdth_mutex);
+ list_for_each_entry(ha, &gdth_instances, list) {
+ if (!ha->sdev)
+ ha->sdev = scsi_get_host_dev(ha->shost);
+ }
+ mutex_unlock(&gdth_mutex);
+
+ TRACE(("gdth_open()\n"));
+ return 0;
+}
+
+static int gdth_close(struct inode *inode, struct file *filep)
+{
+ TRACE(("gdth_close()\n"));
+ return 0;
+}
+
+static int ioc_event(void __user *arg)
+{
+ gdth_ioctl_event evt;
+ gdth_ha_str *ha;
+ unsigned long flags;
+
+ if (copy_from_user(&evt, arg, sizeof(gdth_ioctl_event)))
+ return -EFAULT;
+ ha = gdth_find_ha(evt.ionode);
+ if (!ha)
+ return -EFAULT;
+
+ if (evt.erase == 0xff) {
+ if (evt.event.event_source == ES_TEST)
+ evt.event.event_data.size=sizeof(evt.event.event_data.eu.test);
+ else if (evt.event.event_source == ES_DRIVER)
+ evt.event.event_data.size=sizeof(evt.event.event_data.eu.driver);
+ else if (evt.event.event_source == ES_SYNC)
+ evt.event.event_data.size=sizeof(evt.event.event_data.eu.sync);
+ else
+ evt.event.event_data.size=sizeof(evt.event.event_data.eu.async);
+ spin_lock_irqsave(&ha->smp_lock, flags);
+ gdth_store_event(ha, evt.event.event_source, evt.event.event_idx,
+ &evt.event.event_data);
+ spin_unlock_irqrestore(&ha->smp_lock, flags);
+ } else if (evt.erase == 0xfe) {
+ gdth_clear_events();
+ } else if (evt.erase == 0) {
+ evt.handle = gdth_read_event(ha, evt.handle, &evt.event);
+ } else {
+ gdth_readapp_event(ha, evt.erase, &evt.event);
+ }
+ if (copy_to_user(arg, &evt, sizeof(gdth_ioctl_event)))
+ return -EFAULT;
+ return 0;
+}
+
+static int ioc_lockdrv(void __user *arg)
+{
+ gdth_ioctl_lockdrv ldrv;
+ u8 i, j;
+ unsigned long flags;
+ gdth_ha_str *ha;
+
+ if (copy_from_user(&ldrv, arg, sizeof(gdth_ioctl_lockdrv)))
+ return -EFAULT;
+ ha = gdth_find_ha(ldrv.ionode);
+ if (!ha)
+ return -EFAULT;
+
+ for (i = 0; i < ldrv.drive_cnt && i < MAX_HDRIVES; ++i) {
+ j = ldrv.drives[i];
+ if (j >= MAX_HDRIVES || !ha->hdr[j].present)
+ continue;
+ if (ldrv.lock) {
+ spin_lock_irqsave(&ha->smp_lock, flags);
+ ha->hdr[j].lock = 1;
+ spin_unlock_irqrestore(&ha->smp_lock, flags);
+ gdth_wait_completion(ha, ha->bus_cnt, j);
+ } else {
+ spin_lock_irqsave(&ha->smp_lock, flags);
+ ha->hdr[j].lock = 0;
+ spin_unlock_irqrestore(&ha->smp_lock, flags);
+ gdth_next(ha);
+ }
+ }
+ return 0;
+}
+
+static int ioc_resetdrv(void __user *arg, char *cmnd)
+{
+ gdth_ioctl_reset res;
+ gdth_cmd_str cmd;
+ gdth_ha_str *ha;
+ int rval;
+
+ if (copy_from_user(&res, arg, sizeof(gdth_ioctl_reset)) ||
+ res.number >= MAX_HDRIVES)
+ return -EFAULT;
+ ha = gdth_find_ha(res.ionode);
+ if (!ha)
+ return -EFAULT;
+
+ if (!ha->hdr[res.number].present)
+ return 0;
+ memset(&cmd, 0, sizeof(gdth_cmd_str));
+ cmd.Service = CACHESERVICE;
+ cmd.OpCode = GDT_CLUST_RESET;
+ if (ha->cache_feat & GDT_64BIT)
+ cmd.u.cache64.DeviceNo = res.number;
+ else
+ cmd.u.cache.DeviceNo = res.number;
+
+ rval = __gdth_execute(ha->sdev, &cmd, cmnd, 30, NULL);
+ if (rval < 0)
+ return rval;
+ res.status = rval;
+
+ if (copy_to_user(arg, &res, sizeof(gdth_ioctl_reset)))
+ return -EFAULT;
+ return 0;
+}
+
+static int ioc_general(void __user *arg, char *cmnd)
+{
+ gdth_ioctl_general gen;
+ char *buf = NULL;
+ u64 paddr;
+ gdth_ha_str *ha;
+ int rval;
+
+ if (copy_from_user(&gen, arg, sizeof(gdth_ioctl_general)))
+ return -EFAULT;
+ ha = gdth_find_ha(gen.ionode);
+ if (!ha)
+ return -EFAULT;
+
+ if (gen.data_len > INT_MAX)
+ return -EINVAL;
+ if (gen.sense_len > INT_MAX)
+ return -EINVAL;
+ if (gen.data_len + gen.sense_len > INT_MAX)
+ return -EINVAL;
+
+ if (gen.data_len + gen.sense_len != 0) {
+ if (!(buf = gdth_ioctl_alloc(ha, gen.data_len + gen.sense_len,
+ FALSE, &paddr)))
+ return -EFAULT;
+ if (copy_from_user(buf, arg + sizeof(gdth_ioctl_general),
+ gen.data_len + gen.sense_len)) {
+ gdth_ioctl_free(ha, gen.data_len+gen.sense_len, buf, paddr);
+ return -EFAULT;
+ }
+
+ if (gen.command.OpCode == GDT_IOCTL) {
+ gen.command.u.ioctl.p_param = paddr;
+ } else if (gen.command.Service == CACHESERVICE) {
+ if (ha->cache_feat & GDT_64BIT) {
+ /* copy elements from 32-bit IOCTL structure */
+ gen.command.u.cache64.BlockCnt = gen.command.u.cache.BlockCnt;
+ gen.command.u.cache64.BlockNo = gen.command.u.cache.BlockNo;
+ gen.command.u.cache64.DeviceNo = gen.command.u.cache.DeviceNo;
+ /* addresses */
+ if (ha->cache_feat & SCATTER_GATHER) {
+ gen.command.u.cache64.DestAddr = (u64)-1;
+ gen.command.u.cache64.sg_canz = 1;
+ gen.command.u.cache64.sg_lst[0].sg_ptr = paddr;
+ gen.command.u.cache64.sg_lst[0].sg_len = gen.data_len;
+ gen.command.u.cache64.sg_lst[1].sg_len = 0;
+ } else {
+ gen.command.u.cache64.DestAddr = paddr;
+ gen.command.u.cache64.sg_canz = 0;
+ }
+ } else {
+ if (ha->cache_feat & SCATTER_GATHER) {
+ gen.command.u.cache.DestAddr = 0xffffffff;
+ gen.command.u.cache.sg_canz = 1;
+ gen.command.u.cache.sg_lst[0].sg_ptr = (u32)paddr;
+ gen.command.u.cache.sg_lst[0].sg_len = gen.data_len;
+ gen.command.u.cache.sg_lst[1].sg_len = 0;
+ } else {
+ gen.command.u.cache.DestAddr = paddr;
+ gen.command.u.cache.sg_canz = 0;
+ }
+ }
+ } else if (gen.command.Service == SCSIRAWSERVICE) {
+ if (ha->raw_feat & GDT_64BIT) {
+ /* copy elements from 32-bit IOCTL structure */
+ char cmd[16];
+ gen.command.u.raw64.sense_len = gen.command.u.raw.sense_len;
+ gen.command.u.raw64.bus = gen.command.u.raw.bus;
+ gen.command.u.raw64.lun = gen.command.u.raw.lun;
+ gen.command.u.raw64.target = gen.command.u.raw.target;
+ memcpy(cmd, gen.command.u.raw.cmd, 16);
+ memcpy(gen.command.u.raw64.cmd, cmd, 16);
+ gen.command.u.raw64.clen = gen.command.u.raw.clen;
+ gen.command.u.raw64.sdlen = gen.command.u.raw.sdlen;
+ gen.command.u.raw64.direction = gen.command.u.raw.direction;
+ /* addresses */
+ if (ha->raw_feat & SCATTER_GATHER) {
+ gen.command.u.raw64.sdata = (u64)-1;
+ gen.command.u.raw64.sg_ranz = 1;
+ gen.command.u.raw64.sg_lst[0].sg_ptr = paddr;
+ gen.command.u.raw64.sg_lst[0].sg_len = gen.data_len;
+ gen.command.u.raw64.sg_lst[1].sg_len = 0;
+ } else {
+ gen.command.u.raw64.sdata = paddr;
+ gen.command.u.raw64.sg_ranz = 0;
+ }
+ gen.command.u.raw64.sense_data = paddr + gen.data_len;
+ } else {
+ if (ha->raw_feat & SCATTER_GATHER) {
+ gen.command.u.raw.sdata = 0xffffffff;
+ gen.command.u.raw.sg_ranz = 1;
+ gen.command.u.raw.sg_lst[0].sg_ptr = (u32)paddr;
+ gen.command.u.raw.sg_lst[0].sg_len = gen.data_len;
+ gen.command.u.raw.sg_lst[1].sg_len = 0;
+ } else {
+ gen.command.u.raw.sdata = paddr;
+ gen.command.u.raw.sg_ranz = 0;
+ }
+ gen.command.u.raw.sense_data = (u32)paddr + gen.data_len;
+ }
+ } else {
+ gdth_ioctl_free(ha, gen.data_len+gen.sense_len, buf, paddr);
+ return -EFAULT;
+ }
+ }
+
+ rval = __gdth_execute(ha->sdev, &gen.command, cmnd, gen.timeout, &gen.info);
+ if (rval < 0) {
+ gdth_ioctl_free(ha, gen.data_len+gen.sense_len, buf, paddr);
+ return rval;
+ }
+ gen.status = rval;
+
+ if (copy_to_user(arg + sizeof(gdth_ioctl_general), buf,
+ gen.data_len + gen.sense_len)) {
+ gdth_ioctl_free(ha, gen.data_len+gen.sense_len, buf, paddr);
+ return -EFAULT;
+ }
+ if (copy_to_user(arg, &gen,
+ sizeof(gdth_ioctl_general) - sizeof(gdth_cmd_str))) {
+ gdth_ioctl_free(ha, gen.data_len+gen.sense_len, buf, paddr);
+ return -EFAULT;
+ }
+ gdth_ioctl_free(ha, gen.data_len+gen.sense_len, buf, paddr);
+ return 0;
+}
+
+static int ioc_hdrlist(void __user *arg, char *cmnd)
+{
+ gdth_ioctl_rescan *rsc;
+ gdth_cmd_str *cmd;
+ gdth_ha_str *ha;
+ u8 i;
+ int rc = -ENOMEM;
+ u32 cluster_type = 0;
+
+ rsc = kmalloc(sizeof(*rsc), GFP_KERNEL);
+ cmd = kmalloc(sizeof(*cmd), GFP_KERNEL);
+ if (!rsc || !cmd)
+ goto free_fail;
+
+ if (copy_from_user(rsc, arg, sizeof(gdth_ioctl_rescan)) ||
+ (NULL == (ha = gdth_find_ha(rsc->ionode)))) {
+ rc = -EFAULT;
+ goto free_fail;
+ }
+ memset(cmd, 0, sizeof(gdth_cmd_str));
+
+ for (i = 0; i < MAX_HDRIVES; ++i) {
+ if (!ha->hdr[i].present) {
+ rsc->hdr_list[i].bus = 0xff;
+ continue;
+ }
+ rsc->hdr_list[i].bus = ha->virt_bus;
+ rsc->hdr_list[i].target = i;
+ rsc->hdr_list[i].lun = 0;
+ rsc->hdr_list[i].cluster_type = ha->hdr[i].cluster_type;
+ if (ha->hdr[i].cluster_type & CLUSTER_DRIVE) {
+ cmd->Service = CACHESERVICE;
+ cmd->OpCode = GDT_CLUST_INFO;
+ if (ha->cache_feat & GDT_64BIT)
+ cmd->u.cache64.DeviceNo = i;
+ else
+ cmd->u.cache.DeviceNo = i;
+ if (__gdth_execute(ha->sdev, cmd, cmnd, 30, &cluster_type) == S_OK)
+ rsc->hdr_list[i].cluster_type = cluster_type;
+ }
+ }
+
+ if (copy_to_user(arg, rsc, sizeof(gdth_ioctl_rescan)))
+ rc = -EFAULT;
+ else
+ rc = 0;
+
+free_fail:
+ kfree(rsc);
+ kfree(cmd);
+ return rc;
+}
+
+static int ioc_rescan(void __user *arg, char *cmnd)
+{
+ gdth_ioctl_rescan *rsc;
+ gdth_cmd_str *cmd;
+ u16 i, status, hdr_cnt;
+ u32 info;
+ int cyls, hds, secs;
+ int rc = -ENOMEM;
+ unsigned long flags;
+ gdth_ha_str *ha;
+
+ rsc = kmalloc(sizeof(*rsc), GFP_KERNEL);
+ cmd = kmalloc(sizeof(*cmd), GFP_KERNEL);
+ if (!cmd || !rsc)
+ goto free_fail;
+
+ if (copy_from_user(rsc, arg, sizeof(gdth_ioctl_rescan)) ||
+ (NULL == (ha = gdth_find_ha(rsc->ionode)))) {
+ rc = -EFAULT;
+ goto free_fail;
+ }
+ memset(cmd, 0, sizeof(gdth_cmd_str));
+
+ if (rsc->flag == 0) {
+ /* old method: re-init. cache service */
+ cmd->Service = CACHESERVICE;
+ if (ha->cache_feat & GDT_64BIT) {
+ cmd->OpCode = GDT_X_INIT_HOST;
+ cmd->u.cache64.DeviceNo = LINUX_OS;
+ } else {
+ cmd->OpCode = GDT_INIT;
+ cmd->u.cache.DeviceNo = LINUX_OS;
+ }
+
+ status = __gdth_execute(ha->sdev, cmd, cmnd, 30, &info);
+ i = 0;
+ hdr_cnt = (status == S_OK ? (u16)info : 0);
+ } else {
+ i = rsc->hdr_no;
+ hdr_cnt = i + 1;
+ }
+
+ for (; i < hdr_cnt && i < MAX_HDRIVES; ++i) {
+ cmd->Service = CACHESERVICE;
+ cmd->OpCode = GDT_INFO;
+ if (ha->cache_feat & GDT_64BIT)
+ cmd->u.cache64.DeviceNo = i;
+ else
+ cmd->u.cache.DeviceNo = i;
+
+ status = __gdth_execute(ha->sdev, cmd, cmnd, 30, &info);
+
+ spin_lock_irqsave(&ha->smp_lock, flags);
+ rsc->hdr_list[i].bus = ha->virt_bus;
+ rsc->hdr_list[i].target = i;
+ rsc->hdr_list[i].lun = 0;
+ if (status != S_OK) {
+ ha->hdr[i].present = FALSE;
+ } else {
+ ha->hdr[i].present = TRUE;
+ ha->hdr[i].size = info;
+ /* evaluate mapping */
+ ha->hdr[i].size &= ~SECS32;
+ gdth_eval_mapping(ha->hdr[i].size,&cyls,&hds,&secs);
+ ha->hdr[i].heads = hds;
+ ha->hdr[i].secs = secs;
+ /* round size */
+ ha->hdr[i].size = cyls * hds * secs;
+ }
+ spin_unlock_irqrestore(&ha->smp_lock, flags);
+ if (status != S_OK)
+ continue;
+
+ /* extended info, if GDT_64BIT, for drives > 2 TB */
+ /* but we need ha->info2, not yet stored in scp->SCp */
+
+ /* devtype, cluster info, R/W attribs */
+ cmd->Service = CACHESERVICE;
+ cmd->OpCode = GDT_DEVTYPE;
+ if (ha->cache_feat & GDT_64BIT)
+ cmd->u.cache64.DeviceNo = i;
+ else
+ cmd->u.cache.DeviceNo = i;
+
+ status = __gdth_execute(ha->sdev, cmd, cmnd, 30, &info);
+
+ spin_lock_irqsave(&ha->smp_lock, flags);
+ ha->hdr[i].devtype = (status == S_OK ? (u16)info : 0);
+ spin_unlock_irqrestore(&ha->smp_lock, flags);
+
+ cmd->Service = CACHESERVICE;
+ cmd->OpCode = GDT_CLUST_INFO;
+ if (ha->cache_feat & GDT_64BIT)
+ cmd->u.cache64.DeviceNo = i;
+ else
+ cmd->u.cache.DeviceNo = i;
+
+ status = __gdth_execute(ha->sdev, cmd, cmnd, 30, &info);
+
+ spin_lock_irqsave(&ha->smp_lock, flags);
+ ha->hdr[i].cluster_type =
+ ((status == S_OK && !shared_access) ? (u16)info : 0);
+ spin_unlock_irqrestore(&ha->smp_lock, flags);
+ rsc->hdr_list[i].cluster_type = ha->hdr[i].cluster_type;
+
+ cmd->Service = CACHESERVICE;
+ cmd->OpCode = GDT_RW_ATTRIBS;
+ if (ha->cache_feat & GDT_64BIT)
+ cmd->u.cache64.DeviceNo = i;
+ else
+ cmd->u.cache.DeviceNo = i;
+
+ status = __gdth_execute(ha->sdev, cmd, cmnd, 30, &info);
+
+ spin_lock_irqsave(&ha->smp_lock, flags);
+ ha->hdr[i].rw_attribs = (status == S_OK ? (u16)info : 0);
+ spin_unlock_irqrestore(&ha->smp_lock, flags);
+ }
+
+ if (copy_to_user(arg, rsc, sizeof(gdth_ioctl_rescan)))
+ rc = -EFAULT;
+ else
+ rc = 0;
+
+free_fail:
+ kfree(rsc);
+ kfree(cmd);
+ return rc;
+}
+
+static int gdth_ioctl(struct file *filep, unsigned int cmd, unsigned long arg)
+{
+ gdth_ha_str *ha;
+ Scsi_Cmnd *scp;
+ unsigned long flags;
+ char cmnd[MAX_COMMAND_SIZE];
+ void __user *argp = (void __user *)arg;
+
+ memset(cmnd, 0xff, 12);
+
+ TRACE(("gdth_ioctl() cmd 0x%x\n", cmd));
+
+ switch (cmd) {
+ case GDTIOCTL_CTRCNT:
+ {
+ int cnt = gdth_ctr_count;
+ if (put_user(cnt, (int __user *)argp))
+ return -EFAULT;
+ break;
+ }
+
+ case GDTIOCTL_DRVERS:
+ {
+ int ver = (GDTH_VERSION<<8) | GDTH_SUBVERSION;
+ if (put_user(ver, (int __user *)argp))
+ return -EFAULT;
+ break;
+ }
+
+ case GDTIOCTL_OSVERS:
+ {
+ gdth_ioctl_osvers osv;
+
+ osv.version = (u8)(LINUX_VERSION_CODE >> 16);
+ osv.subversion = (u8)(LINUX_VERSION_CODE >> 8);
+ osv.revision = (u16)(LINUX_VERSION_CODE & 0xff);
+ if (copy_to_user(argp, &osv, sizeof(gdth_ioctl_osvers)))
+ return -EFAULT;
+ break;
+ }
+
+ case GDTIOCTL_CTRTYPE:
+ {
+ gdth_ioctl_ctrtype ctrt;
+
+ if (copy_from_user(&ctrt, argp, sizeof(gdth_ioctl_ctrtype)) ||
+ (NULL == (ha = gdth_find_ha(ctrt.ionode))))
+ return -EFAULT;
+
+ if (ha->type == GDT_ISA || ha->type == GDT_EISA) {
+ ctrt.type = (u8)((ha->stype>>20) - 0x10);
+ } else {
+ if (ha->type != GDT_PCIMPR) {
+ ctrt.type = (u8)((ha->stype<<4) + 6);
+ } else {
+ ctrt.type =
+ (ha->oem_id == OEM_ID_INTEL ? 0xfd : 0xfe);
+ if (ha->stype >= 0x300)
+ ctrt.ext_type = 0x6000 | ha->pdev->subsystem_device;
+ else
+ ctrt.ext_type = 0x6000 | ha->stype;
+ }
+ ctrt.device_id = ha->pdev->device;
+ ctrt.sub_device_id = ha->pdev->subsystem_device;
+ }
+ ctrt.info = ha->brd_phys;
+ ctrt.oem_id = ha->oem_id;
+ if (copy_to_user(argp, &ctrt, sizeof(gdth_ioctl_ctrtype)))
+ return -EFAULT;
+ break;
+ }
+
+ case GDTIOCTL_GENERAL:
+ return ioc_general(argp, cmnd);
+
+ case GDTIOCTL_EVENT:
+ return ioc_event(argp);
+
+ case GDTIOCTL_LOCKDRV:
+ return ioc_lockdrv(argp);
+
+ case GDTIOCTL_LOCKCHN:
+ {
+ gdth_ioctl_lockchn lchn;
+ u8 i, j;
+
+ if (copy_from_user(&lchn, argp, sizeof(gdth_ioctl_lockchn)) ||
+ (NULL == (ha = gdth_find_ha(lchn.ionode))))
+ return -EFAULT;
+
+ i = lchn.channel;
+ if (i < ha->bus_cnt) {
+ if (lchn.lock) {
+ spin_lock_irqsave(&ha->smp_lock, flags);
+ ha->raw[i].lock = 1;
+ spin_unlock_irqrestore(&ha->smp_lock, flags);
+ for (j = 0; j < ha->tid_cnt; ++j)
+ gdth_wait_completion(ha, i, j);
+ } else {
+ spin_lock_irqsave(&ha->smp_lock, flags);
+ ha->raw[i].lock = 0;
+ spin_unlock_irqrestore(&ha->smp_lock, flags);
+ for (j = 0; j < ha->tid_cnt; ++j)
+ gdth_next(ha);
+ }
+ }
+ break;
+ }
+
+ case GDTIOCTL_RESCAN:
+ return ioc_rescan(argp, cmnd);
+
+ case GDTIOCTL_HDRLIST:
+ return ioc_hdrlist(argp, cmnd);
+
+ case GDTIOCTL_RESET_BUS:
+ {
+ gdth_ioctl_reset res;
+ int rval;
+
+ if (copy_from_user(&res, argp, sizeof(gdth_ioctl_reset)) ||
+ (NULL == (ha = gdth_find_ha(res.ionode))))
+ return -EFAULT;
+
+ scp = kzalloc(sizeof(*scp), GFP_KERNEL);
+ if (!scp)
+ return -ENOMEM;
+ scp->device = ha->sdev;
+ scp->cmd_len = 12;
+ scp->device->channel = res.number;
+ rval = gdth_eh_bus_reset(scp);
+ res.status = (rval == SUCCESS ? S_OK : S_GENERR);
+ kfree(scp);
+
+ if (copy_to_user(argp, &res, sizeof(gdth_ioctl_reset)))
+ return -EFAULT;
+ break;
+ }
+
+ case GDTIOCTL_RESET_DRV:
+ return ioc_resetdrv(argp, cmnd);
+
+ default:
+ break;
+ }
+ return 0;
+}
+
+static long gdth_unlocked_ioctl(struct file *file, unsigned int cmd,
+ unsigned long arg)
+{
+ int ret;
+
+ mutex_lock(&gdth_mutex);
+ ret = gdth_ioctl(file, cmd, arg);
+ mutex_unlock(&gdth_mutex);
+
+ return ret;
+}
+
+/* flush routine */
+static void gdth_flush(gdth_ha_str *ha)
+{
+ int i;
+ gdth_cmd_str gdtcmd;
+ char cmnd[MAX_COMMAND_SIZE];
+ memset(cmnd, 0xff, MAX_COMMAND_SIZE);
+
+ TRACE2(("gdth_flush() hanum %d\n", ha->hanum));
+
+ for (i = 0; i < MAX_HDRIVES; ++i) {
+ if (ha->hdr[i].present) {
+ gdtcmd.BoardNode = LOCALBOARD;
+ gdtcmd.Service = CACHESERVICE;
+ gdtcmd.OpCode = GDT_FLUSH;
+ if (ha->cache_feat & GDT_64BIT) {
+ gdtcmd.u.cache64.DeviceNo = i;
+ gdtcmd.u.cache64.BlockNo = 1;
+ gdtcmd.u.cache64.sg_canz = 0;
+ } else {
+ gdtcmd.u.cache.DeviceNo = i;
+ gdtcmd.u.cache.BlockNo = 1;
+ gdtcmd.u.cache.sg_canz = 0;
+ }
+ TRACE2(("gdth_flush(): flush ha %d drive %d\n", ha->hanum, i));
+
+ gdth_execute(ha->shost, &gdtcmd, cmnd, 30, NULL);
+ }
+ }
+}
+
+/* configure lun */
+static int gdth_slave_configure(struct scsi_device *sdev)
+{
+ sdev->skip_ms_page_3f = 1;
+ sdev->skip_ms_page_8 = 1;
+ return 0;
+}
+
+static struct scsi_host_template gdth_template = {
+ .name = "GDT SCSI Disk Array Controller",
+ .info = gdth_info,
+ .queuecommand = gdth_queuecommand,
+ .eh_bus_reset_handler = gdth_eh_bus_reset,
+ .slave_configure = gdth_slave_configure,
+ .bios_param = gdth_bios_param,
+ .show_info = gdth_show_info,
+ .write_info = gdth_set_info,
+ .eh_timed_out = gdth_timed_out,
+ .proc_name = "gdth",
+ .can_queue = GDTH_MAXCMDS,
+ .this_id = -1,
+ .sg_tablesize = GDTH_MAXSG,
+ .cmd_per_lun = GDTH_MAXC_P_L,
+ .unchecked_isa_dma = 1,
+ .use_clustering = ENABLE_CLUSTERING,
+ .no_write_same = 1,
+};
+
+#ifdef CONFIG_ISA
+static int __init gdth_isa_probe_one(u32 isa_bios)
+{
+ struct Scsi_Host *shp;
+ gdth_ha_str *ha;
+ dma_addr_t scratch_dma_handle = 0;
+ int error, i;
+
+ if (!gdth_search_isa(isa_bios))
+ return -ENXIO;
+
+ shp = scsi_host_alloc(&gdth_template, sizeof(gdth_ha_str));
+ if (!shp)
+ return -ENOMEM;
+ ha = shost_priv(shp);
+
+ error = -ENODEV;
+ if (!gdth_init_isa(isa_bios,ha))
+ goto out_host_put;
+
+ /* controller found and initialized */
+ printk("Configuring GDT-ISA HA at BIOS 0x%05X IRQ %u DRQ %u\n",
+ isa_bios, ha->irq, ha->drq);
+
+ error = request_irq(ha->irq, gdth_interrupt, 0, "gdth", ha);
+ if (error) {
+ printk("GDT-ISA: Unable to allocate IRQ\n");
+ goto out_host_put;
+ }
+
+ error = request_dma(ha->drq, "gdth");
+ if (error) {
+ printk("GDT-ISA: Unable to allocate DMA channel\n");
+ goto out_free_irq;
+ }
+
+ set_dma_mode(ha->drq,DMA_MODE_CASCADE);
+ enable_dma(ha->drq);
+ shp->unchecked_isa_dma = 1;
+ shp->irq = ha->irq;
+ shp->dma_channel = ha->drq;
+
+ ha->hanum = gdth_ctr_count++;
+ ha->shost = shp;
+
+ ha->pccb = &ha->cmdext;
+ ha->ccb_phys = 0L;
+ ha->pdev = NULL;
+
+ error = -ENOMEM;
+
+ ha->pscratch = pci_alloc_consistent(ha->pdev, GDTH_SCRATCH,
+ &scratch_dma_handle);
+ if (!ha->pscratch)
+ goto out_dec_counters;
+ ha->scratch_phys = scratch_dma_handle;
+
+ ha->pmsg = pci_alloc_consistent(ha->pdev, sizeof(gdth_msg_str),
+ &scratch_dma_handle);
+ if (!ha->pmsg)
+ goto out_free_pscratch;
+ ha->msg_phys = scratch_dma_handle;
+
+#ifdef INT_COAL
+ ha->coal_stat = pci_alloc_consistent(ha->pdev,
+ sizeof(gdth_coal_status) * MAXOFFSETS,
+ &scratch_dma_handle);
+ if (!ha->coal_stat)
+ goto out_free_pmsg;
+ ha->coal_stat_phys = scratch_dma_handle;
+#endif
+
+ ha->scratch_busy = FALSE;
+ ha->req_first = NULL;
+ ha->tid_cnt = MAX_HDRIVES;
+ if (max_ids > 0 && max_ids < ha->tid_cnt)
+ ha->tid_cnt = max_ids;
+ for (i = 0; i < GDTH_MAXCMDS; ++i)
+ ha->cmd_tab[i].cmnd = UNUSED_CMND;
+ ha->scan_mode = rescan ? 0x10 : 0;
+
+ error = -ENODEV;
+ if (!gdth_search_drives(ha)) {
+ printk("GDT-ISA: Error during device scan\n");
+ goto out_free_coal_stat;
+ }
+
+ if (hdr_channel < 0 || hdr_channel > ha->bus_cnt)
+ hdr_channel = ha->bus_cnt;
+ ha->virt_bus = hdr_channel;
+
+ if (ha->cache_feat & ha->raw_feat & ha->screen_feat & GDT_64BIT)
+ shp->max_cmd_len = 16;
+
+ shp->max_id = ha->tid_cnt;
+ shp->max_lun = MAXLUN;
+ shp->max_channel = ha->bus_cnt;
+
+ spin_lock_init(&ha->smp_lock);
+ gdth_enable_int(ha);
+
+ error = scsi_add_host(shp, NULL);
+ if (error)
+ goto out_free_coal_stat;
+ list_add_tail(&ha->list, &gdth_instances);
+ gdth_timer_init();
+
+ scsi_scan_host(shp);
+
+ return 0;
+
+ out_free_coal_stat:
+#ifdef INT_COAL
+ pci_free_consistent(ha->pdev, sizeof(gdth_coal_status) * MAXOFFSETS,
+ ha->coal_stat, ha->coal_stat_phys);
+ out_free_pmsg:
+#endif
+ pci_free_consistent(ha->pdev, sizeof(gdth_msg_str),
+ ha->pmsg, ha->msg_phys);
+ out_free_pscratch:
+ pci_free_consistent(ha->pdev, GDTH_SCRATCH,
+ ha->pscratch, ha->scratch_phys);
+ out_dec_counters:
+ gdth_ctr_count--;
+ out_free_irq:
+ free_irq(ha->irq, ha);
+ out_host_put:
+ scsi_host_put(shp);
+ return error;
+}
+#endif /* CONFIG_ISA */
+
+#ifdef CONFIG_EISA
+static int __init gdth_eisa_probe_one(u16 eisa_slot)
+{
+ struct Scsi_Host *shp;
+ gdth_ha_str *ha;
+ dma_addr_t scratch_dma_handle = 0;
+ int error, i;
+
+ if (!gdth_search_eisa(eisa_slot))
+ return -ENXIO;
+
+ shp = scsi_host_alloc(&gdth_template, sizeof(gdth_ha_str));
+ if (!shp)
+ return -ENOMEM;
+ ha = shost_priv(shp);
+
+ error = -ENODEV;
+ if (!gdth_init_eisa(eisa_slot,ha))
+ goto out_host_put;
+
+ /* controller found and initialized */
+ printk("Configuring GDT-EISA HA at Slot %d IRQ %u\n",
+ eisa_slot >> 12, ha->irq);
+
+ error = request_irq(ha->irq, gdth_interrupt, 0, "gdth", ha);
+ if (error) {
+ printk("GDT-EISA: Unable to allocate IRQ\n");
+ goto out_host_put;
+ }
+
+ shp->unchecked_isa_dma = 0;
+ shp->irq = ha->irq;
+ shp->dma_channel = 0xff;
+
+ ha->hanum = gdth_ctr_count++;
+ ha->shost = shp;
+
+ TRACE2(("EISA detect Bus 0: hanum %d\n", ha->hanum));
+
+ ha->pccb = &ha->cmdext;
+ ha->ccb_phys = 0L;
+
+ error = -ENOMEM;
+
+ ha->pdev = NULL;
+ ha->pscratch = pci_alloc_consistent(ha->pdev, GDTH_SCRATCH,
+ &scratch_dma_handle);
+ if (!ha->pscratch)
+ goto out_free_irq;
+ ha->scratch_phys = scratch_dma_handle;
+
+ ha->pmsg = pci_alloc_consistent(ha->pdev, sizeof(gdth_msg_str),
+ &scratch_dma_handle);
+ if (!ha->pmsg)
+ goto out_free_pscratch;
+ ha->msg_phys = scratch_dma_handle;
+
+#ifdef INT_COAL
+ ha->coal_stat = pci_alloc_consistent(ha->pdev,
+ sizeof(gdth_coal_status) * MAXOFFSETS,
+ &scratch_dma_handle);
+ if (!ha->coal_stat)
+ goto out_free_pmsg;
+ ha->coal_stat_phys = scratch_dma_handle;
+#endif
+
+ ha->ccb_phys = pci_map_single(ha->pdev,ha->pccb,
+ sizeof(gdth_cmd_str), PCI_DMA_BIDIRECTIONAL);
+ if (!ha->ccb_phys)
+ goto out_free_coal_stat;
+
+ ha->scratch_busy = FALSE;
+ ha->req_first = NULL;
+ ha->tid_cnt = MAX_HDRIVES;
+ if (max_ids > 0 && max_ids < ha->tid_cnt)
+ ha->tid_cnt = max_ids;
+ for (i = 0; i < GDTH_MAXCMDS; ++i)
+ ha->cmd_tab[i].cmnd = UNUSED_CMND;
+ ha->scan_mode = rescan ? 0x10 : 0;
+
+ if (!gdth_search_drives(ha)) {
+ printk("GDT-EISA: Error during device scan\n");
+ error = -ENODEV;
+ goto out_free_ccb_phys;
+ }
+
+ if (hdr_channel < 0 || hdr_channel > ha->bus_cnt)
+ hdr_channel = ha->bus_cnt;
+ ha->virt_bus = hdr_channel;
+
+ if (ha->cache_feat & ha->raw_feat & ha->screen_feat & GDT_64BIT)
+ shp->max_cmd_len = 16;
+
+ shp->max_id = ha->tid_cnt;
+ shp->max_lun = MAXLUN;
+ shp->max_channel = ha->bus_cnt;
+
+ spin_lock_init(&ha->smp_lock);
+ gdth_enable_int(ha);
+
+ error = scsi_add_host(shp, NULL);
+ if (error)
+ goto out_free_ccb_phys;
+ list_add_tail(&ha->list, &gdth_instances);
+ gdth_timer_init();
+
+ scsi_scan_host(shp);
+
+ return 0;
+
+ out_free_ccb_phys:
+ pci_unmap_single(ha->pdev,ha->ccb_phys, sizeof(gdth_cmd_str),
+ PCI_DMA_BIDIRECTIONAL);
+ out_free_coal_stat:
+#ifdef INT_COAL
+ pci_free_consistent(ha->pdev, sizeof(gdth_coal_status) * MAXOFFSETS,
+ ha->coal_stat, ha->coal_stat_phys);
+ out_free_pmsg:
+#endif
+ pci_free_consistent(ha->pdev, sizeof(gdth_msg_str),
+ ha->pmsg, ha->msg_phys);
+ out_free_pscratch:
+ pci_free_consistent(ha->pdev, GDTH_SCRATCH,
+ ha->pscratch, ha->scratch_phys);
+ out_free_irq:
+ free_irq(ha->irq, ha);
+ gdth_ctr_count--;
+ out_host_put:
+ scsi_host_put(shp);
+ return error;
+}
+#endif /* CONFIG_EISA */
+
+#ifdef CONFIG_PCI
+static int gdth_pci_probe_one(gdth_pci_str *pcistr, gdth_ha_str **ha_out)
+{
+ struct Scsi_Host *shp;
+ gdth_ha_str *ha;
+ dma_addr_t scratch_dma_handle = 0;
+ int error, i;
+ struct pci_dev *pdev = pcistr->pdev;
+
+ *ha_out = NULL;
+
+ shp = scsi_host_alloc(&gdth_template, sizeof(gdth_ha_str));
+ if (!shp)
+ return -ENOMEM;
+ ha = shost_priv(shp);
+
+ error = -ENODEV;
+ if (!gdth_init_pci(pdev, pcistr, ha))
+ goto out_host_put;
+
+ /* controller found and initialized */
+ printk("Configuring GDT-PCI HA at %d/%d IRQ %u\n",
+ pdev->bus->number,
+ PCI_SLOT(pdev->devfn),
+ ha->irq);
+
+ error = request_irq(ha->irq, gdth_interrupt,
+ IRQF_SHARED, "gdth", ha);
+ if (error) {
+ printk("GDT-PCI: Unable to allocate IRQ\n");
+ goto out_host_put;
+ }
+
+ shp->unchecked_isa_dma = 0;
+ shp->irq = ha->irq;
+ shp->dma_channel = 0xff;
+
+ ha->hanum = gdth_ctr_count++;
+ ha->shost = shp;
+
+ ha->pccb = &ha->cmdext;
+ ha->ccb_phys = 0L;
+
+ error = -ENOMEM;
+
+ ha->pscratch = pci_alloc_consistent(ha->pdev, GDTH_SCRATCH,
+ &scratch_dma_handle);
+ if (!ha->pscratch)
+ goto out_free_irq;
+ ha->scratch_phys = scratch_dma_handle;
+
+ ha->pmsg = pci_alloc_consistent(ha->pdev, sizeof(gdth_msg_str),
+ &scratch_dma_handle);
+ if (!ha->pmsg)
+ goto out_free_pscratch;
+ ha->msg_phys = scratch_dma_handle;
+
+#ifdef INT_COAL
+ ha->coal_stat = pci_alloc_consistent(ha->pdev,
+ sizeof(gdth_coal_status) * MAXOFFSETS,
+ &scratch_dma_handle);
+ if (!ha->coal_stat)
+ goto out_free_pmsg;
+ ha->coal_stat_phys = scratch_dma_handle;
+#endif
+
+ ha->scratch_busy = FALSE;
+ ha->req_first = NULL;
+ ha->tid_cnt = pdev->device >= 0x200 ? MAXID : MAX_HDRIVES;
+ if (max_ids > 0 && max_ids < ha->tid_cnt)
+ ha->tid_cnt = max_ids;
+ for (i = 0; i < GDTH_MAXCMDS; ++i)
+ ha->cmd_tab[i].cmnd = UNUSED_CMND;
+ ha->scan_mode = rescan ? 0x10 : 0;
+
+ error = -ENODEV;
+ if (!gdth_search_drives(ha)) {
+ printk("GDT-PCI %d: Error during device scan\n", ha->hanum);
+ goto out_free_coal_stat;
+ }
+
+ if (hdr_channel < 0 || hdr_channel > ha->bus_cnt)
+ hdr_channel = ha->bus_cnt;
+ ha->virt_bus = hdr_channel;
+
+ /* 64-bit DMA only supported from FW >= x.43 */
+ if (!(ha->cache_feat & ha->raw_feat & ha->screen_feat & GDT_64BIT) ||
+ !ha->dma64_support) {
+ if (pci_set_dma_mask(pdev, DMA_BIT_MASK(32))) {
+ printk(KERN_WARNING "GDT-PCI %d: "
+ "Unable to set 32-bit DMA\n", ha->hanum);
+ goto out_free_coal_stat;
+ }
+ } else {
+ shp->max_cmd_len = 16;
+ if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(64))) {
+ printk("GDT-PCI %d: 64-bit DMA enabled\n", ha->hanum);
+ } else if (pci_set_dma_mask(pdev, DMA_BIT_MASK(32))) {
+ printk(KERN_WARNING "GDT-PCI %d: "
+ "Unable to set 64/32-bit DMA\n", ha->hanum);
+ goto out_free_coal_stat;
+ }
+ }
+
+ shp->max_id = ha->tid_cnt;
+ shp->max_lun = MAXLUN;
+ shp->max_channel = ha->bus_cnt;
+
+ spin_lock_init(&ha->smp_lock);
+ gdth_enable_int(ha);
+
+ error = scsi_add_host(shp, &pdev->dev);
+ if (error)
+ goto out_free_coal_stat;
+ list_add_tail(&ha->list, &gdth_instances);
+
+ pci_set_drvdata(ha->pdev, ha);
+ gdth_timer_init();
+
+ scsi_scan_host(shp);
+
+ *ha_out = ha;
+
+ return 0;
+
+ out_free_coal_stat:
+#ifdef INT_COAL
+ pci_free_consistent(ha->pdev, sizeof(gdth_coal_status) * MAXOFFSETS,
+ ha->coal_stat, ha->coal_stat_phys);
+ out_free_pmsg:
+#endif
+ pci_free_consistent(ha->pdev, sizeof(gdth_msg_str),
+ ha->pmsg, ha->msg_phys);
+ out_free_pscratch:
+ pci_free_consistent(ha->pdev, GDTH_SCRATCH,
+ ha->pscratch, ha->scratch_phys);
+ out_free_irq:
+ free_irq(ha->irq, ha);
+ gdth_ctr_count--;
+ out_host_put:
+ scsi_host_put(shp);
+ return error;
+}
+#endif /* CONFIG_PCI */
+
+static void gdth_remove_one(gdth_ha_str *ha)
+{
+ struct Scsi_Host *shp = ha->shost;
+
+ TRACE2(("gdth_remove_one()\n"));
+
+ scsi_remove_host(shp);
+
+ gdth_flush(ha);
+
+ if (ha->sdev) {
+ scsi_free_host_dev(ha->sdev);
+ ha->sdev = NULL;
+ }
+
+ if (shp->irq)
+ free_irq(shp->irq,ha);
+
+#ifdef CONFIG_ISA
+ if (shp->dma_channel != 0xff)
+ free_dma(shp->dma_channel);
+#endif
+#ifdef INT_COAL
+ if (ha->coal_stat)
+ pci_free_consistent(ha->pdev, sizeof(gdth_coal_status) *
+ MAXOFFSETS, ha->coal_stat, ha->coal_stat_phys);
+#endif
+ if (ha->pscratch)
+ pci_free_consistent(ha->pdev, GDTH_SCRATCH,
+ ha->pscratch, ha->scratch_phys);
+ if (ha->pmsg)
+ pci_free_consistent(ha->pdev, sizeof(gdth_msg_str),
+ ha->pmsg, ha->msg_phys);
+ if (ha->ccb_phys)
+ pci_unmap_single(ha->pdev,ha->ccb_phys,
+ sizeof(gdth_cmd_str),PCI_DMA_BIDIRECTIONAL);
+
+ scsi_host_put(shp);
+}
+
+static int gdth_halt(struct notifier_block *nb, unsigned long event, void *buf)
+{
+ gdth_ha_str *ha;
+
+ TRACE2(("gdth_halt() event %d\n", (int)event));
+ if (event != SYS_RESTART && event != SYS_HALT && event != SYS_POWER_OFF)
+ return NOTIFY_DONE;
+
+ list_for_each_entry(ha, &gdth_instances, list)
+ gdth_flush(ha);
+
+ return NOTIFY_OK;
+}
+
+static struct notifier_block gdth_notifier = {
+ gdth_halt, NULL, 0
+};
+
+static int __init gdth_init(void)
+{
+ if (disable) {
+ printk("GDT-HA: Controller driver disabled from"
+ " command line !\n");
+ return 0;
+ }
+
+ printk("GDT-HA: Storage RAID Controller Driver. Version: %s\n",
+ GDTH_VERSION_STR);
+
+ /* initializations */
+ gdth_polling = TRUE;
+ gdth_clear_events();
+ init_timer(&gdth_timer);
+
+ /* As default we do not probe for EISA or ISA controllers */
+ if (probe_eisa_isa) {
+ /* scanning for controllers, at first: ISA controller */
+#ifdef CONFIG_ISA
+ u32 isa_bios;
+ for (isa_bios = 0xc8000UL; isa_bios <= 0xd8000UL;
+ isa_bios += 0x8000UL)
+ gdth_isa_probe_one(isa_bios);
+#endif
+#ifdef CONFIG_EISA
+ {
+ u16 eisa_slot;
+ for (eisa_slot = 0x1000; eisa_slot <= 0x8000;
+ eisa_slot += 0x1000)
+ gdth_eisa_probe_one(eisa_slot);
+ }
+#endif
+ }
+
+#ifdef CONFIG_PCI
+ /* scanning for PCI controllers */
+ if (pci_register_driver(&gdth_pci_driver)) {
+ gdth_ha_str *ha;
+
+ list_for_each_entry(ha, &gdth_instances, list)
+ gdth_remove_one(ha);
+ return -ENODEV;
+ }
+#endif /* CONFIG_PCI */
+
+ TRACE2(("gdth_detect() %d controller detected\n", gdth_ctr_count));
+
+ major = register_chrdev(0,"gdth", &gdth_fops);
+ register_reboot_notifier(&gdth_notifier);
+ gdth_polling = FALSE;
+ return 0;
+}
+
+static void __exit gdth_exit(void)
+{
+ gdth_ha_str *ha;
+
+ unregister_chrdev(major, "gdth");
+ unregister_reboot_notifier(&gdth_notifier);
+
+#ifdef GDTH_STATISTICS
+ del_timer_sync(&gdth_timer);
+#endif
+
+#ifdef CONFIG_PCI
+ pci_unregister_driver(&gdth_pci_driver);
+#endif
+
+ list_for_each_entry(ha, &gdth_instances, list)
+ gdth_remove_one(ha);
+}
+
+module_init(gdth_init);
+module_exit(gdth_exit);
+
+#ifndef MODULE
+__setup("gdth=", option_setup);
+#endif
diff --git a/drivers/scsi/gdth.h b/drivers/scsi/gdth.h
new file mode 100644
index 000000000..3fd8b83ff
--- /dev/null
+++ b/drivers/scsi/gdth.h
@@ -0,0 +1,1013 @@
+#ifndef _GDTH_H
+#define _GDTH_H
+
+/*
+ * Header file for the GDT Disk Array/Storage RAID controllers driver for Linux
+ *
+ * gdth.h Copyright (C) 1995-06 ICP vortex, Achim Leubner
+ * See gdth.c for further informations and
+ * below for supported controller types
+ *
+ * <achim_leubner@adaptec.com>
+ *
+ * $Id: gdth.h,v 1.58 2006/01/11 16:14:09 achim Exp $
+ */
+
+#include <linux/types.h>
+
+#ifndef TRUE
+#define TRUE 1
+#endif
+#ifndef FALSE
+#define FALSE 0
+#endif
+
+/* defines, macros */
+
+/* driver version */
+#define GDTH_VERSION_STR "3.05"
+#define GDTH_VERSION 3
+#define GDTH_SUBVERSION 5
+
+/* protocol version */
+#define PROTOCOL_VERSION 1
+
+/* OEM IDs */
+#define OEM_ID_ICP 0x941c
+#define OEM_ID_INTEL 0x8000
+
+/* controller classes */
+#define GDT_ISA 0x01 /* ISA controller */
+#define GDT_EISA 0x02 /* EISA controller */
+#define GDT_PCI 0x03 /* PCI controller */
+#define GDT_PCINEW 0x04 /* new PCI controller */
+#define GDT_PCIMPR 0x05 /* PCI MPR controller */
+/* GDT_EISA, controller subtypes EISA */
+#define GDT3_ID 0x0130941c /* GDT3000/3020 */
+#define GDT3A_ID 0x0230941c /* GDT3000A/3020A/3050A */
+#define GDT3B_ID 0x0330941c /* GDT3000B/3010A */
+/* GDT_ISA */
+#define GDT2_ID 0x0120941c /* GDT2000/2020 */
+
+#ifndef PCI_DEVICE_ID_VORTEX_GDT60x0
+/* GDT_PCI */
+#define PCI_DEVICE_ID_VORTEX_GDT60x0 0 /* GDT6000/6020/6050 */
+#define PCI_DEVICE_ID_VORTEX_GDT6000B 1 /* GDT6000B/6010 */
+/* GDT_PCINEW */
+#define PCI_DEVICE_ID_VORTEX_GDT6x10 2 /* GDT6110/6510 */
+#define PCI_DEVICE_ID_VORTEX_GDT6x20 3 /* GDT6120/6520 */
+#define PCI_DEVICE_ID_VORTEX_GDT6530 4 /* GDT6530 */
+#define PCI_DEVICE_ID_VORTEX_GDT6550 5 /* GDT6550 */
+/* GDT_PCINEW, wide/ultra SCSI controllers */
+#define PCI_DEVICE_ID_VORTEX_GDT6x17 6 /* GDT6117/6517 */
+#define PCI_DEVICE_ID_VORTEX_GDT6x27 7 /* GDT6127/6527 */
+#define PCI_DEVICE_ID_VORTEX_GDT6537 8 /* GDT6537 */
+#define PCI_DEVICE_ID_VORTEX_GDT6557 9 /* GDT6557/6557-ECC */
+/* GDT_PCINEW, wide SCSI controllers */
+#define PCI_DEVICE_ID_VORTEX_GDT6x15 10 /* GDT6115/6515 */
+#define PCI_DEVICE_ID_VORTEX_GDT6x25 11 /* GDT6125/6525 */
+#define PCI_DEVICE_ID_VORTEX_GDT6535 12 /* GDT6535 */
+#define PCI_DEVICE_ID_VORTEX_GDT6555 13 /* GDT6555/6555-ECC */
+#endif
+
+#ifndef PCI_DEVICE_ID_VORTEX_GDT6x17RP
+/* GDT_MPR, RP series, wide/ultra SCSI */
+#define PCI_DEVICE_ID_VORTEX_GDT6x17RP 0x100 /* GDT6117RP/GDT6517RP */
+#define PCI_DEVICE_ID_VORTEX_GDT6x27RP 0x101 /* GDT6127RP/GDT6527RP */
+#define PCI_DEVICE_ID_VORTEX_GDT6537RP 0x102 /* GDT6537RP */
+#define PCI_DEVICE_ID_VORTEX_GDT6557RP 0x103 /* GDT6557RP */
+/* GDT_MPR, RP series, narrow/ultra SCSI */
+#define PCI_DEVICE_ID_VORTEX_GDT6x11RP 0x104 /* GDT6111RP/GDT6511RP */
+#define PCI_DEVICE_ID_VORTEX_GDT6x21RP 0x105 /* GDT6121RP/GDT6521RP */
+#endif
+#ifndef PCI_DEVICE_ID_VORTEX_GDT6x17RD
+/* GDT_MPR, RD series, wide/ultra SCSI */
+#define PCI_DEVICE_ID_VORTEX_GDT6x17RD 0x110 /* GDT6117RD/GDT6517RD */
+#define PCI_DEVICE_ID_VORTEX_GDT6x27RD 0x111 /* GDT6127RD/GDT6527RD */
+#define PCI_DEVICE_ID_VORTEX_GDT6537RD 0x112 /* GDT6537RD */
+#define PCI_DEVICE_ID_VORTEX_GDT6557RD 0x113 /* GDT6557RD */
+/* GDT_MPR, RD series, narrow/ultra SCSI */
+#define PCI_DEVICE_ID_VORTEX_GDT6x11RD 0x114 /* GDT6111RD/GDT6511RD */
+#define PCI_DEVICE_ID_VORTEX_GDT6x21RD 0x115 /* GDT6121RD/GDT6521RD */
+/* GDT_MPR, RD series, wide/ultra2 SCSI */
+#define PCI_DEVICE_ID_VORTEX_GDT6x18RD 0x118 /* GDT6118RD/GDT6518RD/
+ GDT6618RD */
+#define PCI_DEVICE_ID_VORTEX_GDT6x28RD 0x119 /* GDT6128RD/GDT6528RD/
+ GDT6628RD */
+#define PCI_DEVICE_ID_VORTEX_GDT6x38RD 0x11A /* GDT6538RD/GDT6638RD */
+#define PCI_DEVICE_ID_VORTEX_GDT6x58RD 0x11B /* GDT6558RD/GDT6658RD */
+/* GDT_MPR, RN series (64-bit PCI), wide/ultra2 SCSI */
+#define PCI_DEVICE_ID_VORTEX_GDT7x18RN 0x168 /* GDT7118RN/GDT7518RN/
+ GDT7618RN */
+#define PCI_DEVICE_ID_VORTEX_GDT7x28RN 0x169 /* GDT7128RN/GDT7528RN/
+ GDT7628RN */
+#define PCI_DEVICE_ID_VORTEX_GDT7x38RN 0x16A /* GDT7538RN/GDT7638RN */
+#define PCI_DEVICE_ID_VORTEX_GDT7x58RN 0x16B /* GDT7558RN/GDT7658RN */
+#endif
+
+#ifndef PCI_DEVICE_ID_VORTEX_GDT6x19RD
+/* GDT_MPR, RD series, Fibre Channel */
+#define PCI_DEVICE_ID_VORTEX_GDT6x19RD 0x210 /* GDT6519RD/GDT6619RD */
+#define PCI_DEVICE_ID_VORTEX_GDT6x29RD 0x211 /* GDT6529RD/GDT6629RD */
+/* GDT_MPR, RN series (64-bit PCI), Fibre Channel */
+#define PCI_DEVICE_ID_VORTEX_GDT7x19RN 0x260 /* GDT7519RN/GDT7619RN */
+#define PCI_DEVICE_ID_VORTEX_GDT7x29RN 0x261 /* GDT7529RN/GDT7629RN */
+#endif
+
+#ifndef PCI_DEVICE_ID_VORTEX_GDTMAXRP
+/* GDT_MPR, last device ID */
+#define PCI_DEVICE_ID_VORTEX_GDTMAXRP 0x2ff
+#endif
+
+#ifndef PCI_DEVICE_ID_VORTEX_GDTNEWRX
+/* new GDT Rx Controller */
+#define PCI_DEVICE_ID_VORTEX_GDTNEWRX 0x300
+#endif
+
+#ifndef PCI_DEVICE_ID_VORTEX_GDTNEWRX2
+/* new(2) GDT Rx Controller */
+#define PCI_DEVICE_ID_VORTEX_GDTNEWRX2 0x301
+#endif
+
+#ifndef PCI_DEVICE_ID_INTEL_SRC
+/* Intel Storage RAID Controller */
+#define PCI_DEVICE_ID_INTEL_SRC 0x600
+#endif
+
+#ifndef PCI_DEVICE_ID_INTEL_SRC_XSCALE
+/* Intel Storage RAID Controller */
+#define PCI_DEVICE_ID_INTEL_SRC_XSCALE 0x601
+#endif
+
+/* limits */
+#define GDTH_SCRATCH PAGE_SIZE /* 4KB scratch buffer */
+#define GDTH_MAXCMDS 120
+#define GDTH_MAXC_P_L 16 /* max. cmds per lun */
+#define GDTH_MAX_RAW 2 /* max. cmds per raw device */
+#define MAXOFFSETS 128
+#define MAXHA 16
+#define MAXID 127
+#define MAXLUN 8
+#define MAXBUS 6
+#define MAX_EVENTS 100 /* event buffer count */
+#define MAX_RES_ARGS 40 /* device reservation,
+ must be a multiple of 4 */
+#define MAXCYLS 1024
+#define HEADS 64
+#define SECS 32 /* mapping 64*32 */
+#define MEDHEADS 127
+#define MEDSECS 63 /* mapping 127*63 */
+#define BIGHEADS 255
+#define BIGSECS 63 /* mapping 255*63 */
+
+/* special command ptr. */
+#define UNUSED_CMND ((Scsi_Cmnd *)-1)
+#define INTERNAL_CMND ((Scsi_Cmnd *)-2)
+#define SCREEN_CMND ((Scsi_Cmnd *)-3)
+#define SPECIAL_SCP(p) (p==UNUSED_CMND || p==INTERNAL_CMND || p==SCREEN_CMND)
+
+/* controller services */
+#define SCSIRAWSERVICE 3
+#define CACHESERVICE 9
+#define SCREENSERVICE 11
+
+/* screenservice defines */
+#define MSG_INV_HANDLE -1 /* special message handle */
+#define MSGLEN 16 /* size of message text */
+#define MSG_SIZE 34 /* size of message structure */
+#define MSG_REQUEST 0 /* async. event: message */
+
+/* cacheservice defines */
+#define SECTOR_SIZE 0x200 /* always 512 bytes per sec. */
+
+/* DPMEM constants */
+#define DPMEM_MAGIC 0xC0FFEE11
+#define IC_HEADER_BYTES 48
+#define IC_QUEUE_BYTES 4
+#define DPMEM_COMMAND_OFFSET IC_HEADER_BYTES+IC_QUEUE_BYTES*MAXOFFSETS
+
+/* cluster_type constants */
+#define CLUSTER_DRIVE 1
+#define CLUSTER_MOUNTED 2
+#define CLUSTER_RESERVED 4
+#define CLUSTER_RESERVE_STATE (CLUSTER_DRIVE|CLUSTER_MOUNTED|CLUSTER_RESERVED)
+
+/* commands for all services, cache service */
+#define GDT_INIT 0 /* service initialization */
+#define GDT_READ 1 /* read command */
+#define GDT_WRITE 2 /* write command */
+#define GDT_INFO 3 /* information about devices */
+#define GDT_FLUSH 4 /* flush dirty cache buffers */
+#define GDT_IOCTL 5 /* ioctl command */
+#define GDT_DEVTYPE 9 /* additional information */
+#define GDT_MOUNT 10 /* mount cache device */
+#define GDT_UNMOUNT 11 /* unmount cache device */
+#define GDT_SET_FEAT 12 /* set feat. (scatter/gather) */
+#define GDT_GET_FEAT 13 /* get features */
+#define GDT_WRITE_THR 16 /* write through */
+#define GDT_READ_THR 17 /* read through */
+#define GDT_EXT_INFO 18 /* extended info */
+#define GDT_RESET 19 /* controller reset */
+#define GDT_RESERVE_DRV 20 /* reserve host drive */
+#define GDT_RELEASE_DRV 21 /* release host drive */
+#define GDT_CLUST_INFO 22 /* cluster info */
+#define GDT_RW_ATTRIBS 23 /* R/W attribs (write thru,..)*/
+#define GDT_CLUST_RESET 24 /* releases the cluster drives*/
+#define GDT_FREEZE_IO 25 /* freezes all IOs */
+#define GDT_UNFREEZE_IO 26 /* unfreezes all IOs */
+#define GDT_X_INIT_HOST 29 /* ext. init: 64 bit support */
+#define GDT_X_INFO 30 /* ext. info for drives>2TB */
+
+/* raw service commands */
+#define GDT_RESERVE 14 /* reserve dev. to raw serv. */
+#define GDT_RELEASE 15 /* release device */
+#define GDT_RESERVE_ALL 16 /* reserve all devices */
+#define GDT_RELEASE_ALL 17 /* release all devices */
+#define GDT_RESET_BUS 18 /* reset bus */
+#define GDT_SCAN_START 19 /* start device scan */
+#define GDT_SCAN_END 20 /* stop device scan */
+#define GDT_X_INIT_RAW 21 /* ext. init: 64 bit support */
+
+/* screen service commands */
+#define GDT_REALTIME 3 /* realtime clock to screens. */
+#define GDT_X_INIT_SCR 4 /* ext. init: 64 bit support */
+
+/* IOCTL command defines */
+#define SCSI_DR_INFO 0x00 /* SCSI drive info */
+#define SCSI_CHAN_CNT 0x05 /* SCSI channel count */
+#define SCSI_DR_LIST 0x06 /* SCSI drive list */
+#define SCSI_DEF_CNT 0x15 /* grown/primary defects */
+#define DSK_STATISTICS 0x4b /* SCSI disk statistics */
+#define IOCHAN_DESC 0x5d /* description of IO channel */
+#define IOCHAN_RAW_DESC 0x5e /* description of raw IO chn. */
+#define L_CTRL_PATTERN 0x20000000L /* SCSI IOCTL mask */
+#define ARRAY_INFO 0x12 /* array drive info */
+#define ARRAY_DRV_LIST 0x0f /* array drive list */
+#define ARRAY_DRV_LIST2 0x34 /* array drive list (new) */
+#define LA_CTRL_PATTERN 0x10000000L /* array IOCTL mask */
+#define CACHE_DRV_CNT 0x01 /* cache drive count */
+#define CACHE_DRV_LIST 0x02 /* cache drive list */
+#define CACHE_INFO 0x04 /* cache info */
+#define CACHE_CONFIG 0x05 /* cache configuration */
+#define CACHE_DRV_INFO 0x07 /* cache drive info */
+#define BOARD_FEATURES 0x15 /* controller features */
+#define BOARD_INFO 0x28 /* controller info */
+#define SET_PERF_MODES 0x82 /* set mode (coalescing,..) */
+#define GET_PERF_MODES 0x83 /* get mode */
+#define CACHE_READ_OEM_STRING_RECORD 0x84 /* read OEM string record */
+#define HOST_GET 0x10001L /* get host drive list */
+#define IO_CHANNEL 0x00020000L /* default IO channel */
+#define INVALID_CHANNEL 0x0000ffffL /* invalid channel */
+
+/* service errors */
+#define S_OK 1 /* no error */
+#define S_GENERR 6 /* general error */
+#define S_BSY 7 /* controller busy */
+#define S_CACHE_UNKNOWN 12 /* cache serv.: drive unknown */
+#define S_RAW_SCSI 12 /* raw serv.: target error */
+#define S_RAW_ILL 0xff /* raw serv.: illegal */
+#define S_NOFUNC -2 /* unknown function */
+#define S_CACHE_RESERV -24 /* cache: reserv. conflict */
+
+/* timeout values */
+#define INIT_RETRIES 100000 /* 100000 * 1ms = 100s */
+#define INIT_TIMEOUT 100000 /* 100000 * 1ms = 100s */
+#define POLL_TIMEOUT 10000 /* 10000 * 1ms = 10s */
+
+/* priorities */
+#define DEFAULT_PRI 0x20
+#define IOCTL_PRI 0x10
+#define HIGH_PRI 0x08
+
+/* data directions */
+#define GDTH_DATA_IN 0x01000000L /* data from target */
+#define GDTH_DATA_OUT 0x00000000L /* data to target */
+
+/* BMIC registers (EISA controllers) */
+#define ID0REG 0x0c80 /* board ID */
+#define EINTENABREG 0x0c89 /* interrupt enable */
+#define SEMA0REG 0x0c8a /* command semaphore */
+#define SEMA1REG 0x0c8b /* status semaphore */
+#define LDOORREG 0x0c8d /* local doorbell */
+#define EDENABREG 0x0c8e /* EISA system doorbell enab. */
+#define EDOORREG 0x0c8f /* EISA system doorbell */
+#define MAILBOXREG 0x0c90 /* mailbox reg. (16 bytes) */
+#define EISAREG 0x0cc0 /* EISA configuration */
+
+/* other defines */
+#define LINUX_OS 8 /* used for cache optim. */
+#define SECS32 0x1f /* round capacity */
+#define BIOS_ID_OFFS 0x10 /* offset contr-ID in ISABIOS */
+#define LOCALBOARD 0 /* board node always 0 */
+#define ASYNCINDEX 0 /* cmd index async. event */
+#define SPEZINDEX 1 /* cmd index unknown service */
+#define COALINDEX (GDTH_MAXCMDS + 2)
+
+/* features */
+#define SCATTER_GATHER 1 /* s/g feature */
+#define GDT_WR_THROUGH 0x100 /* WRITE_THROUGH supported */
+#define GDT_64BIT 0x200 /* 64bit / drv>2TB support */
+
+#include "gdth_ioctl.h"
+
+/* screenservice message */
+typedef struct {
+ u32 msg_handle; /* message handle */
+ u32 msg_len; /* size of message */
+ u32 msg_alen; /* answer length */
+ u8 msg_answer; /* answer flag */
+ u8 msg_ext; /* more messages */
+ u8 msg_reserved[2];
+ char msg_text[MSGLEN+2]; /* the message text */
+} __attribute__((packed)) gdth_msg_str;
+
+
+/* IOCTL data structures */
+
+/* Status coalescing buffer for returning multiple requests per interrupt */
+typedef struct {
+ u32 status;
+ u32 ext_status;
+ u32 info0;
+ u32 info1;
+} __attribute__((packed)) gdth_coal_status;
+
+/* performance mode data structure */
+typedef struct {
+ u32 version; /* The version of this IOCTL structure. */
+ u32 st_mode; /* 0=dis., 1=st_buf_addr1 valid, 2=both */
+ u32 st_buff_addr1; /* physical address of status buffer 1 */
+ u32 st_buff_u_addr1; /* reserved for 64 bit addressing */
+ u32 st_buff_indx1; /* reserved command idx. for this buffer */
+ u32 st_buff_addr2; /* physical address of status buffer 1 */
+ u32 st_buff_u_addr2; /* reserved for 64 bit addressing */
+ u32 st_buff_indx2; /* reserved command idx. for this buffer */
+ u32 st_buff_size; /* size of each buffer in bytes */
+ u32 cmd_mode; /* 0 = mode disabled, 1 = cmd_buff_addr1 */
+ u32 cmd_buff_addr1; /* physical address of cmd buffer 1 */
+ u32 cmd_buff_u_addr1; /* reserved for 64 bit addressing */
+ u32 cmd_buff_indx1; /* cmd buf addr1 unique identifier */
+ u32 cmd_buff_addr2; /* physical address of cmd buffer 1 */
+ u32 cmd_buff_u_addr2; /* reserved for 64 bit addressing */
+ u32 cmd_buff_indx2; /* cmd buf addr1 unique identifier */
+ u32 cmd_buff_size; /* size of each cmd buffer in bytes */
+ u32 reserved1;
+ u32 reserved2;
+} __attribute__((packed)) gdth_perf_modes;
+
+/* SCSI drive info */
+typedef struct {
+ u8 vendor[8]; /* vendor string */
+ u8 product[16]; /* product string */
+ u8 revision[4]; /* revision */
+ u32 sy_rate; /* current rate for sync. tr. */
+ u32 sy_max_rate; /* max. rate for sync. tr. */
+ u32 no_ldrive; /* belongs to this log. drv.*/
+ u32 blkcnt; /* number of blocks */
+ u16 blksize; /* size of block in bytes */
+ u8 available; /* flag: access is available */
+ u8 init; /* medium is initialized */
+ u8 devtype; /* SCSI devicetype */
+ u8 rm_medium; /* medium is removable */
+ u8 wp_medium; /* medium is write protected */
+ u8 ansi; /* SCSI I/II or III? */
+ u8 protocol; /* same as ansi */
+ u8 sync; /* flag: sync. transfer enab. */
+ u8 disc; /* flag: disconnect enabled */
+ u8 queueing; /* flag: command queing enab. */
+ u8 cached; /* flag: caching enabled */
+ u8 target_id; /* target ID of device */
+ u8 lun; /* LUN id of device */
+ u8 orphan; /* flag: drive fragment */
+ u32 last_error; /* sense key or drive state */
+ u32 last_result; /* result of last command */
+ u32 check_errors; /* err. in last surface check */
+ u8 percent; /* progress for surface check */
+ u8 last_check; /* IOCTRL operation */
+ u8 res[2];
+ u32 flags; /* from 1.19/2.19: raw reserv.*/
+ u8 multi_bus; /* multi bus dev? (fibre ch.) */
+ u8 mb_status; /* status: available? */
+ u8 res2[2];
+ u8 mb_alt_status; /* status on second bus */
+ u8 mb_alt_bid; /* number of second bus */
+ u8 mb_alt_tid; /* target id on second bus */
+ u8 res3;
+ u8 fc_flag; /* from 1.22/2.22: info valid?*/
+ u8 res4;
+ u16 fc_frame_size; /* frame size (bytes) */
+ char wwn[8]; /* world wide name */
+} __attribute__((packed)) gdth_diskinfo_str;
+
+/* get SCSI channel count */
+typedef struct {
+ u32 channel_no; /* number of channel */
+ u32 drive_cnt; /* drive count */
+ u8 siop_id; /* SCSI processor ID */
+ u8 siop_state; /* SCSI processor state */
+} __attribute__((packed)) gdth_getch_str;
+
+/* get SCSI drive numbers */
+typedef struct {
+ u32 sc_no; /* SCSI channel */
+ u32 sc_cnt; /* sc_list[] elements */
+ u32 sc_list[MAXID]; /* minor device numbers */
+} __attribute__((packed)) gdth_drlist_str;
+
+/* get grown/primary defect count */
+typedef struct {
+ u8 sddc_type; /* 0x08: grown, 0x10: prim. */
+ u8 sddc_format; /* list entry format */
+ u8 sddc_len; /* list entry length */
+ u8 sddc_res;
+ u32 sddc_cnt; /* entry count */
+} __attribute__((packed)) gdth_defcnt_str;
+
+/* disk statistics */
+typedef struct {
+ u32 bid; /* SCSI channel */
+ u32 first; /* first SCSI disk */
+ u32 entries; /* number of elements */
+ u32 count; /* (R) number of init. el. */
+ u32 mon_time; /* time stamp */
+ struct {
+ u8 tid; /* target ID */
+ u8 lun; /* LUN */
+ u8 res[2];
+ u32 blk_size; /* block size in bytes */
+ u32 rd_count; /* bytes read */
+ u32 wr_count; /* bytes written */
+ u32 rd_blk_count; /* blocks read */
+ u32 wr_blk_count; /* blocks written */
+ u32 retries; /* retries */
+ u32 reassigns; /* reassigns */
+ } __attribute__((packed)) list[1];
+} __attribute__((packed)) gdth_dskstat_str;
+
+/* IO channel header */
+typedef struct {
+ u32 version; /* version (-1UL: newest) */
+ u8 list_entries; /* list entry count */
+ u8 first_chan; /* first channel number */
+ u8 last_chan; /* last channel number */
+ u8 chan_count; /* (R) channel count */
+ u32 list_offset; /* offset of list[0] */
+} __attribute__((packed)) gdth_iochan_header;
+
+/* get IO channel description */
+typedef struct {
+ gdth_iochan_header hdr;
+ struct {
+ u32 address; /* channel address */
+ u8 type; /* type (SCSI, FCAL) */
+ u8 local_no; /* local number */
+ u16 features; /* channel features */
+ } __attribute__((packed)) list[MAXBUS];
+} __attribute__((packed)) gdth_iochan_str;
+
+/* get raw IO channel description */
+typedef struct {
+ gdth_iochan_header hdr;
+ struct {
+ u8 proc_id; /* processor id */
+ u8 proc_defect; /* defect ? */
+ u8 reserved[2];
+ } __attribute__((packed)) list[MAXBUS];
+} __attribute__((packed)) gdth_raw_iochan_str;
+
+/* array drive component */
+typedef struct {
+ u32 al_controller; /* controller ID */
+ u8 al_cache_drive; /* cache drive number */
+ u8 al_status; /* cache drive state */
+ u8 al_res[2];
+} __attribute__((packed)) gdth_arraycomp_str;
+
+/* array drive information */
+typedef struct {
+ u8 ai_type; /* array type (RAID0,4,5) */
+ u8 ai_cache_drive_cnt; /* active cachedrives */
+ u8 ai_state; /* array drive state */
+ u8 ai_master_cd; /* master cachedrive */
+ u32 ai_master_controller; /* ID of master controller */
+ u32 ai_size; /* user capacity [sectors] */
+ u32 ai_striping_size; /* striping size [sectors] */
+ u32 ai_secsize; /* sector size [bytes] */
+ u32 ai_err_info; /* failed cache drive */
+ u8 ai_name[8]; /* name of the array drive */
+ u8 ai_controller_cnt; /* number of controllers */
+ u8 ai_removable; /* flag: removable */
+ u8 ai_write_protected; /* flag: write protected */
+ u8 ai_devtype; /* type: always direct access */
+ gdth_arraycomp_str ai_drives[35]; /* drive components: */
+ u8 ai_drive_entries; /* number of drive components */
+ u8 ai_protected; /* protection flag */
+ u8 ai_verify_state; /* state of a parity verify */
+ u8 ai_ext_state; /* extended array drive state */
+ u8 ai_expand_state; /* array expand state (>=2.18)*/
+ u8 ai_reserved[3];
+} __attribute__((packed)) gdth_arrayinf_str;
+
+/* get array drive list */
+typedef struct {
+ u32 controller_no; /* controller no. */
+ u8 cd_handle; /* master cachedrive */
+ u8 is_arrayd; /* Flag: is array drive? */
+ u8 is_master; /* Flag: is array master? */
+ u8 is_parity; /* Flag: is parity drive? */
+ u8 is_hotfix; /* Flag: is hotfix drive? */
+ u8 res[3];
+} __attribute__((packed)) gdth_alist_str;
+
+typedef struct {
+ u32 entries_avail; /* allocated entries */
+ u32 entries_init; /* returned entries */
+ u32 first_entry; /* first entry number */
+ u32 list_offset; /* offset of following list */
+ gdth_alist_str list[1]; /* list */
+} __attribute__((packed)) gdth_arcdl_str;
+
+/* cache info/config IOCTL */
+typedef struct {
+ u32 version; /* firmware version */
+ u16 state; /* cache state (on/off) */
+ u16 strategy; /* cache strategy */
+ u16 write_back; /* write back state (on/off) */
+ u16 block_size; /* cache block size */
+} __attribute__((packed)) gdth_cpar_str;
+
+typedef struct {
+ u32 csize; /* cache size */
+ u32 read_cnt; /* read/write counter */
+ u32 write_cnt;
+ u32 tr_hits; /* hits */
+ u32 sec_hits;
+ u32 sec_miss; /* misses */
+} __attribute__((packed)) gdth_cstat_str;
+
+typedef struct {
+ gdth_cpar_str cpar;
+ gdth_cstat_str cstat;
+} __attribute__((packed)) gdth_cinfo_str;
+
+/* cache drive info */
+typedef struct {
+ u8 cd_name[8]; /* cache drive name */
+ u32 cd_devtype; /* SCSI devicetype */
+ u32 cd_ldcnt; /* number of log. drives */
+ u32 cd_last_error; /* last error */
+ u8 cd_initialized; /* drive is initialized */
+ u8 cd_removable; /* media is removable */
+ u8 cd_write_protected; /* write protected */
+ u8 cd_flags; /* Pool Hot Fix? */
+ u32 ld_blkcnt; /* number of blocks */
+ u32 ld_blksize; /* blocksize */
+ u32 ld_dcnt; /* number of disks */
+ u32 ld_slave; /* log. drive index */
+ u32 ld_dtype; /* type of logical drive */
+ u32 ld_last_error; /* last error */
+ u8 ld_name[8]; /* log. drive name */
+ u8 ld_error; /* error */
+} __attribute__((packed)) gdth_cdrinfo_str;
+
+/* OEM string */
+typedef struct {
+ u32 ctl_version;
+ u32 file_major_version;
+ u32 file_minor_version;
+ u32 buffer_size;
+ u32 cpy_count;
+ u32 ext_error;
+ u32 oem_id;
+ u32 board_id;
+} __attribute__((packed)) gdth_oem_str_params;
+
+typedef struct {
+ u8 product_0_1_name[16];
+ u8 product_4_5_name[16];
+ u8 product_cluster_name[16];
+ u8 product_reserved[16];
+ u8 scsi_cluster_target_vendor_id[16];
+ u8 cluster_raid_fw_name[16];
+ u8 oem_brand_name[16];
+ u8 oem_raid_type[16];
+ u8 bios_type[13];
+ u8 bios_title[50];
+ u8 oem_company_name[37];
+ u32 pci_id_1;
+ u32 pci_id_2;
+ u8 validation_status[80];
+ u8 reserved_1[4];
+ u8 scsi_host_drive_inquiry_vendor_id[16];
+ u8 library_file_template[16];
+ u8 reserved_2[16];
+ u8 tool_name_1[32];
+ u8 tool_name_2[32];
+ u8 tool_name_3[32];
+ u8 oem_contact_1[84];
+ u8 oem_contact_2[84];
+ u8 oem_contact_3[84];
+} __attribute__((packed)) gdth_oem_str;
+
+typedef struct {
+ gdth_oem_str_params params;
+ gdth_oem_str text;
+} __attribute__((packed)) gdth_oem_str_ioctl;
+
+/* board features */
+typedef struct {
+ u8 chaining; /* Chaining supported */
+ u8 striping; /* Striping (RAID-0) supp. */
+ u8 mirroring; /* Mirroring (RAID-1) supp. */
+ u8 raid; /* RAID-4/5/10 supported */
+} __attribute__((packed)) gdth_bfeat_str;
+
+/* board info IOCTL */
+typedef struct {
+ u32 ser_no; /* serial no. */
+ u8 oem_id[2]; /* OEM ID */
+ u16 ep_flags; /* eprom flags */
+ u32 proc_id; /* processor ID */
+ u32 memsize; /* memory size (bytes) */
+ u8 mem_banks; /* memory banks */
+ u8 chan_type; /* channel type */
+ u8 chan_count; /* channel count */
+ u8 rdongle_pres; /* dongle present? */
+ u32 epr_fw_ver; /* (eprom) firmware version */
+ u32 upd_fw_ver; /* (update) firmware version */
+ u32 upd_revision; /* update revision */
+ char type_string[16]; /* controller name */
+ char raid_string[16]; /* RAID firmware name */
+ u8 update_pres; /* update present? */
+ u8 xor_pres; /* XOR engine present? */
+ u8 prom_type; /* ROM type (eprom/flash) */
+ u8 prom_count; /* number of ROM devices */
+ u32 dup_pres; /* duplexing module present? */
+ u32 chan_pres; /* number of expansion chn. */
+ u32 mem_pres; /* memory expansion inst. ? */
+ u8 ft_bus_system; /* fault bus supported? */
+ u8 subtype_valid; /* board_subtype valid? */
+ u8 board_subtype; /* subtype/hardware level */
+ u8 ramparity_pres; /* RAM parity check hardware? */
+} __attribute__((packed)) gdth_binfo_str;
+
+/* get host drive info */
+typedef struct {
+ char name[8]; /* host drive name */
+ u32 size; /* size (sectors) */
+ u8 host_drive; /* host drive number */
+ u8 log_drive; /* log. drive (master) */
+ u8 reserved;
+ u8 rw_attribs; /* r/w attribs */
+ u32 start_sec; /* start sector */
+} __attribute__((packed)) gdth_hentry_str;
+
+typedef struct {
+ u32 entries; /* entry count */
+ u32 offset; /* offset of entries */
+ u8 secs_p_head; /* sectors/head */
+ u8 heads_p_cyl; /* heads/cylinder */
+ u8 reserved;
+ u8 clust_drvtype; /* cluster drive type */
+ u32 location; /* controller number */
+ gdth_hentry_str entry[MAX_HDRIVES]; /* entries */
+} __attribute__((packed)) gdth_hget_str;
+
+
+/* DPRAM structures */
+
+/* interface area ISA/PCI */
+typedef struct {
+ u8 S_Cmd_Indx; /* special command */
+ u8 volatile S_Status; /* status special command */
+ u16 reserved1;
+ u32 S_Info[4]; /* add. info special command */
+ u8 volatile Sema0; /* command semaphore */
+ u8 reserved2[3];
+ u8 Cmd_Index; /* command number */
+ u8 reserved3[3];
+ u16 volatile Status; /* command status */
+ u16 Service; /* service(for async.events) */
+ u32 Info[2]; /* additional info */
+ struct {
+ u16 offset; /* command offs. in the DPRAM*/
+ u16 serv_id; /* service */
+ } __attribute__((packed)) comm_queue[MAXOFFSETS]; /* command queue */
+ u32 bios_reserved[2];
+ u8 gdt_dpr_cmd[1]; /* commands */
+} __attribute__((packed)) gdt_dpr_if;
+
+/* SRAM structure PCI controllers */
+typedef struct {
+ u32 magic; /* controller ID from BIOS */
+ u16 need_deinit; /* switch betw. BIOS/driver */
+ u8 switch_support; /* see need_deinit */
+ u8 padding[9];
+ u8 os_used[16]; /* OS code per service */
+ u8 unused[28];
+ u8 fw_magic; /* contr. ID from firmware */
+} __attribute__((packed)) gdt_pci_sram;
+
+/* SRAM structure EISA controllers (but NOT GDT3000/3020) */
+typedef struct {
+ u8 os_used[16]; /* OS code per service */
+ u16 need_deinit; /* switch betw. BIOS/driver */
+ u8 switch_support; /* see need_deinit */
+ u8 padding;
+} __attribute__((packed)) gdt_eisa_sram;
+
+
+/* DPRAM ISA controllers */
+typedef struct {
+ union {
+ struct {
+ u8 bios_used[0x3c00-32]; /* 15KB - 32Bytes BIOS */
+ u32 magic; /* controller (EISA) ID */
+ u16 need_deinit; /* switch betw. BIOS/driver */
+ u8 switch_support; /* see need_deinit */
+ u8 padding[9];
+ u8 os_used[16]; /* OS code per service */
+ } __attribute__((packed)) dp_sram;
+ u8 bios_area[0x4000]; /* 16KB reserved for BIOS */
+ } bu;
+ union {
+ gdt_dpr_if ic; /* interface area */
+ u8 if_area[0x3000]; /* 12KB for interface */
+ } u;
+ struct {
+ u8 memlock; /* write protection DPRAM */
+ u8 event; /* release event */
+ u8 irqen; /* board interrupts enable */
+ u8 irqdel; /* acknowledge board int. */
+ u8 volatile Sema1; /* status semaphore */
+ u8 rq; /* IRQ/DRQ configuration */
+ } __attribute__((packed)) io;
+} __attribute__((packed)) gdt2_dpram_str;
+
+/* DPRAM PCI controllers */
+typedef struct {
+ union {
+ gdt_dpr_if ic; /* interface area */
+ u8 if_area[0xff0-sizeof(gdt_pci_sram)];
+ } u;
+ gdt_pci_sram gdt6sr; /* SRAM structure */
+ struct {
+ u8 unused0[1];
+ u8 volatile Sema1; /* command semaphore */
+ u8 unused1[3];
+ u8 irqen; /* board interrupts enable */
+ u8 unused2[2];
+ u8 event; /* release event */
+ u8 unused3[3];
+ u8 irqdel; /* acknowledge board int. */
+ u8 unused4[3];
+ } __attribute__((packed)) io;
+} __attribute__((packed)) gdt6_dpram_str;
+
+/* PLX register structure (new PCI controllers) */
+typedef struct {
+ u8 cfg_reg; /* DPRAM cfg.(2:below 1MB,0:anywhere)*/
+ u8 unused1[0x3f];
+ u8 volatile sema0_reg; /* command semaphore */
+ u8 volatile sema1_reg; /* status semaphore */
+ u8 unused2[2];
+ u16 volatile status; /* command status */
+ u16 service; /* service */
+ u32 info[2]; /* additional info */
+ u8 unused3[0x10];
+ u8 ldoor_reg; /* PCI to local doorbell */
+ u8 unused4[3];
+ u8 volatile edoor_reg; /* local to PCI doorbell */
+ u8 unused5[3];
+ u8 control0; /* control0 register(unused) */
+ u8 control1; /* board interrupts enable */
+ u8 unused6[0x16];
+} __attribute__((packed)) gdt6c_plx_regs;
+
+/* DPRAM new PCI controllers */
+typedef struct {
+ union {
+ gdt_dpr_if ic; /* interface area */
+ u8 if_area[0x4000-sizeof(gdt_pci_sram)];
+ } u;
+ gdt_pci_sram gdt6sr; /* SRAM structure */
+} __attribute__((packed)) gdt6c_dpram_str;
+
+/* i960 register structure (PCI MPR controllers) */
+typedef struct {
+ u8 unused1[16];
+ u8 volatile sema0_reg; /* command semaphore */
+ u8 unused2;
+ u8 volatile sema1_reg; /* status semaphore */
+ u8 unused3;
+ u16 volatile status; /* command status */
+ u16 service; /* service */
+ u32 info[2]; /* additional info */
+ u8 ldoor_reg; /* PCI to local doorbell */
+ u8 unused4[11];
+ u8 volatile edoor_reg; /* local to PCI doorbell */
+ u8 unused5[7];
+ u8 edoor_en_reg; /* board interrupts enable */
+ u8 unused6[27];
+ u32 unused7[939];
+ u32 severity;
+ char evt_str[256]; /* event string */
+} __attribute__((packed)) gdt6m_i960_regs;
+
+/* DPRAM PCI MPR controllers */
+typedef struct {
+ gdt6m_i960_regs i960r; /* 4KB i960 registers */
+ union {
+ gdt_dpr_if ic; /* interface area */
+ u8 if_area[0x3000-sizeof(gdt_pci_sram)];
+ } u;
+ gdt_pci_sram gdt6sr; /* SRAM structure */
+} __attribute__((packed)) gdt6m_dpram_str;
+
+
+/* PCI resources */
+typedef struct {
+ struct pci_dev *pdev;
+ unsigned long dpmem; /* DPRAM address */
+ unsigned long io; /* IO address */
+} gdth_pci_str;
+
+
+/* controller information structure */
+typedef struct {
+ struct Scsi_Host *shost;
+ struct list_head list;
+ u16 hanum;
+ u16 oem_id; /* OEM */
+ u16 type; /* controller class */
+ u32 stype; /* subtype (PCI: device ID) */
+ u16 fw_vers; /* firmware version */
+ u16 cache_feat; /* feat. cache serv. (s/g,..)*/
+ u16 raw_feat; /* feat. raw service (s/g,..)*/
+ u16 screen_feat; /* feat. raw service (s/g,..)*/
+ u16 bmic; /* BMIC address (EISA) */
+ void __iomem *brd; /* DPRAM address */
+ u32 brd_phys; /* slot number/BIOS address */
+ gdt6c_plx_regs *plx; /* PLX regs (new PCI contr.) */
+ gdth_cmd_str cmdext;
+ gdth_cmd_str *pccb; /* address command structure */
+ u32 ccb_phys; /* phys. address */
+#ifdef INT_COAL
+ gdth_coal_status *coal_stat; /* buffer for coalescing int.*/
+ u64 coal_stat_phys; /* phys. address */
+#endif
+ char *pscratch; /* scratch (DMA) buffer */
+ u64 scratch_phys; /* phys. address */
+ u8 scratch_busy; /* in use? */
+ u8 dma64_support; /* 64-bit DMA supported? */
+ gdth_msg_str *pmsg; /* message buffer */
+ u64 msg_phys; /* phys. address */
+ u8 scan_mode; /* current scan mode */
+ u8 irq; /* IRQ */
+ u8 drq; /* DRQ (ISA controllers) */
+ u16 status; /* command status */
+ u16 service; /* service/firmware ver./.. */
+ u32 info;
+ u32 info2; /* additional info */
+ Scsi_Cmnd *req_first; /* top of request queue */
+ struct {
+ u8 present; /* Flag: host drive present? */
+ u8 is_logdrv; /* Flag: log. drive (master)? */
+ u8 is_arraydrv; /* Flag: array drive? */
+ u8 is_master; /* Flag: array drive master? */
+ u8 is_parity; /* Flag: parity drive? */
+ u8 is_hotfix; /* Flag: hotfix drive? */
+ u8 master_no; /* number of master drive */
+ u8 lock; /* drive locked? (hot plug) */
+ u8 heads; /* mapping */
+ u8 secs;
+ u16 devtype; /* further information */
+ u64 size; /* capacity */
+ u8 ldr_no; /* log. drive no. */
+ u8 rw_attribs; /* r/w attributes */
+ u8 cluster_type; /* cluster properties */
+ u8 media_changed; /* Flag:MOUNT/UNMOUNT occurred */
+ u32 start_sec; /* start sector */
+ } hdr[MAX_LDRIVES]; /* host drives */
+ struct {
+ u8 lock; /* channel locked? (hot plug) */
+ u8 pdev_cnt; /* physical device count */
+ u8 local_no; /* local channel number */
+ u8 io_cnt[MAXID]; /* current IO count */
+ u32 address; /* channel address */
+ u32 id_list[MAXID]; /* IDs of the phys. devices */
+ } raw[MAXBUS]; /* SCSI channels */
+ struct {
+ Scsi_Cmnd *cmnd; /* pending request */
+ u16 service; /* service */
+ } cmd_tab[GDTH_MAXCMDS]; /* table of pend. requests */
+ struct gdth_cmndinfo { /* per-command private info */
+ int index;
+ int internal_command; /* don't call scsi_done */
+ gdth_cmd_str *internal_cmd_str; /* crier for internal messages*/
+ dma_addr_t sense_paddr; /* sense dma-addr */
+ u8 priority;
+ int timeout_count; /* # of timeout calls */
+ volatile int wait_for_completion;
+ u16 status;
+ u32 info;
+ enum dma_data_direction dma_dir;
+ int phase; /* ???? */
+ int OpCode;
+ } cmndinfo[GDTH_MAXCMDS]; /* index==0 is free */
+ u8 bus_cnt; /* SCSI bus count */
+ u8 tid_cnt; /* Target ID count */
+ u8 bus_id[MAXBUS]; /* IOP IDs */
+ u8 virt_bus; /* number of virtual bus */
+ u8 more_proc; /* more /proc info supported */
+ u16 cmd_cnt; /* command count in DPRAM */
+ u16 cmd_len; /* length of actual command */
+ u16 cmd_offs_dpmem; /* actual offset in DPRAM */
+ u16 ic_all_size; /* sizeof DPRAM interf. area */
+ gdth_cpar_str cpar; /* controller cache par. */
+ gdth_bfeat_str bfeat; /* controller features */
+ gdth_binfo_str binfo; /* controller info */
+ gdth_evt_data dvr; /* event structure */
+ spinlock_t smp_lock;
+ struct pci_dev *pdev;
+ char oem_name[8];
+#ifdef GDTH_DMA_STATISTICS
+ unsigned long dma32_cnt, dma64_cnt; /* statistics: DMA buffer */
+#endif
+ struct scsi_device *sdev;
+} gdth_ha_str;
+
+static inline struct gdth_cmndinfo *gdth_cmnd_priv(struct scsi_cmnd* cmd)
+{
+ return (struct gdth_cmndinfo *)cmd->host_scribble;
+}
+
+/* INQUIRY data format */
+typedef struct {
+ u8 type_qual;
+ u8 modif_rmb;
+ u8 version;
+ u8 resp_aenc;
+ u8 add_length;
+ u8 reserved1;
+ u8 reserved2;
+ u8 misc;
+ u8 vendor[8];
+ u8 product[16];
+ u8 revision[4];
+} __attribute__((packed)) gdth_inq_data;
+
+/* READ_CAPACITY data format */
+typedef struct {
+ u32 last_block_no;
+ u32 block_length;
+} __attribute__((packed)) gdth_rdcap_data;
+
+/* READ_CAPACITY (16) data format */
+typedef struct {
+ u64 last_block_no;
+ u32 block_length;
+} __attribute__((packed)) gdth_rdcap16_data;
+
+/* REQUEST_SENSE data format */
+typedef struct {
+ u8 errorcode;
+ u8 segno;
+ u8 key;
+ u32 info;
+ u8 add_length;
+ u32 cmd_info;
+ u8 adsc;
+ u8 adsq;
+ u8 fruc;
+ u8 key_spec[3];
+} __attribute__((packed)) gdth_sense_data;
+
+/* MODE_SENSE data format */
+typedef struct {
+ struct {
+ u8 data_length;
+ u8 med_type;
+ u8 dev_par;
+ u8 bd_length;
+ } __attribute__((packed)) hd;
+ struct {
+ u8 dens_code;
+ u8 block_count[3];
+ u8 reserved;
+ u8 block_length[3];
+ } __attribute__((packed)) bd;
+} __attribute__((packed)) gdth_modep_data;
+
+/* stack frame */
+typedef struct {
+ unsigned long b[10]; /* 32/64 bit compiler ! */
+} __attribute__((packed)) gdth_stackframe;
+
+
+/* function prototyping */
+
+int gdth_show_info(struct seq_file *, struct Scsi_Host *);
+int gdth_set_info(struct Scsi_Host *, char *, int);
+
+#endif
diff --git a/drivers/scsi/gdth_ioctl.h b/drivers/scsi/gdth_ioctl.h
new file mode 100644
index 000000000..b004c6165
--- /dev/null
+++ b/drivers/scsi/gdth_ioctl.h
@@ -0,0 +1,339 @@
+#ifndef _GDTH_IOCTL_H
+#define _GDTH_IOCTL_H
+
+/* gdth_ioctl.h
+ * $Id: gdth_ioctl.h,v 1.14 2004/02/19 15:43:15 achim Exp $
+ */
+
+/* IOCTLs */
+#define GDTIOCTL_MASK ('J'<<8)
+#define GDTIOCTL_GENERAL (GDTIOCTL_MASK | 0) /* general IOCTL */
+#define GDTIOCTL_DRVERS (GDTIOCTL_MASK | 1) /* get driver version */
+#define GDTIOCTL_CTRTYPE (GDTIOCTL_MASK | 2) /* get controller type */
+#define GDTIOCTL_OSVERS (GDTIOCTL_MASK | 3) /* get OS version */
+#define GDTIOCTL_HDRLIST (GDTIOCTL_MASK | 4) /* get host drive list */
+#define GDTIOCTL_CTRCNT (GDTIOCTL_MASK | 5) /* get controller count */
+#define GDTIOCTL_LOCKDRV (GDTIOCTL_MASK | 6) /* lock host drive */
+#define GDTIOCTL_LOCKCHN (GDTIOCTL_MASK | 7) /* lock channel */
+#define GDTIOCTL_EVENT (GDTIOCTL_MASK | 8) /* read controller events */
+#define GDTIOCTL_SCSI (GDTIOCTL_MASK | 9) /* SCSI command */
+#define GDTIOCTL_RESET_BUS (GDTIOCTL_MASK |10) /* reset SCSI bus */
+#define GDTIOCTL_RESCAN (GDTIOCTL_MASK |11) /* rescan host drives */
+#define GDTIOCTL_RESET_DRV (GDTIOCTL_MASK |12) /* reset (remote) drv. res. */
+
+#define GDTIOCTL_MAGIC 0xaffe0004
+#define EVENT_SIZE 294
+#define GDTH_MAXSG 32 /* max. s/g elements */
+
+#define MAX_LDRIVES 255 /* max. log. drive count */
+#ifdef GDTH_IOCTL_PROC
+#define MAX_HDRIVES 100 /* max. host drive count */
+#else
+#define MAX_HDRIVES MAX_LDRIVES /* max. host drive count */
+#endif
+
+/* scatter/gather element */
+typedef struct {
+ u32 sg_ptr; /* address */
+ u32 sg_len; /* length */
+} __attribute__((packed)) gdth_sg_str;
+
+/* scatter/gather element - 64bit addresses */
+typedef struct {
+ u64 sg_ptr; /* address */
+ u32 sg_len; /* length */
+} __attribute__((packed)) gdth_sg64_str;
+
+/* command structure */
+typedef struct {
+ u32 BoardNode; /* board node (always 0) */
+ u32 CommandIndex; /* command number */
+ u16 OpCode; /* the command (READ,..) */
+ union {
+ struct {
+ u16 DeviceNo; /* number of cache drive */
+ u32 BlockNo; /* block number */
+ u32 BlockCnt; /* block count */
+ u32 DestAddr; /* dest. addr. (if s/g: -1) */
+ u32 sg_canz; /* s/g element count */
+ gdth_sg_str sg_lst[GDTH_MAXSG]; /* s/g list */
+ } __attribute__((packed)) cache; /* cache service cmd. str. */
+ struct {
+ u16 DeviceNo; /* number of cache drive */
+ u64 BlockNo; /* block number */
+ u32 BlockCnt; /* block count */
+ u64 DestAddr; /* dest. addr. (if s/g: -1) */
+ u32 sg_canz; /* s/g element count */
+ gdth_sg64_str sg_lst[GDTH_MAXSG]; /* s/g list */
+ } __attribute__((packed)) cache64; /* cache service cmd. str. */
+ struct {
+ u16 param_size; /* size of p_param buffer */
+ u32 subfunc; /* IOCTL function */
+ u32 channel; /* device */
+ u64 p_param; /* buffer */
+ } __attribute__((packed)) ioctl; /* IOCTL command structure */
+ struct {
+ u16 reserved;
+ union {
+ struct {
+ u32 msg_handle; /* message handle */
+ u64 msg_addr; /* message buffer address */
+ } __attribute__((packed)) msg;
+ u8 data[12]; /* buffer for rtc data, ... */
+ } su;
+ } __attribute__((packed)) screen; /* screen service cmd. str. */
+ struct {
+ u16 reserved;
+ u32 direction; /* data direction */
+ u32 mdisc_time; /* disc. time (0: no timeout)*/
+ u32 mcon_time; /* connect time(0: no to.) */
+ u32 sdata; /* dest. addr. (if s/g: -1) */
+ u32 sdlen; /* data length (bytes) */
+ u32 clen; /* SCSI cmd. length(6,10,12) */
+ u8 cmd[12]; /* SCSI command */
+ u8 target; /* target ID */
+ u8 lun; /* LUN */
+ u8 bus; /* SCSI bus number */
+ u8 priority; /* only 0 used */
+ u32 sense_len; /* sense data length */
+ u32 sense_data; /* sense data addr. */
+ u32 link_p; /* linked cmds (not supp.) */
+ u32 sg_ranz; /* s/g element count */
+ gdth_sg_str sg_lst[GDTH_MAXSG]; /* s/g list */
+ } __attribute__((packed)) raw; /* raw service cmd. struct. */
+ struct {
+ u16 reserved;
+ u32 direction; /* data direction */
+ u32 mdisc_time; /* disc. time (0: no timeout)*/
+ u32 mcon_time; /* connect time(0: no to.) */
+ u64 sdata; /* dest. addr. (if s/g: -1) */
+ u32 sdlen; /* data length (bytes) */
+ u32 clen; /* SCSI cmd. length(6,..,16) */
+ u8 cmd[16]; /* SCSI command */
+ u8 target; /* target ID */
+ u8 lun; /* LUN */
+ u8 bus; /* SCSI bus number */
+ u8 priority; /* only 0 used */
+ u32 sense_len; /* sense data length */
+ u64 sense_data; /* sense data addr. */
+ u32 sg_ranz; /* s/g element count */
+ gdth_sg64_str sg_lst[GDTH_MAXSG]; /* s/g list */
+ } __attribute__((packed)) raw64; /* raw service cmd. struct. */
+ } u;
+ /* additional variables */
+ u8 Service; /* controller service */
+ u8 reserved;
+ u16 Status; /* command result */
+ u32 Info; /* additional information */
+ void *RequestBuffer; /* request buffer */
+} __attribute__((packed)) gdth_cmd_str;
+
+/* controller event structure */
+#define ES_ASYNC 1
+#define ES_DRIVER 2
+#define ES_TEST 3
+#define ES_SYNC 4
+typedef struct {
+ u16 size; /* size of structure */
+ union {
+ char stream[16];
+ struct {
+ u16 ionode;
+ u16 service;
+ u32 index;
+ } __attribute__((packed)) driver;
+ struct {
+ u16 ionode;
+ u16 service;
+ u16 status;
+ u32 info;
+ u8 scsi_coord[3];
+ } __attribute__((packed)) async;
+ struct {
+ u16 ionode;
+ u16 service;
+ u16 status;
+ u32 info;
+ u16 hostdrive;
+ u8 scsi_coord[3];
+ u8 sense_key;
+ } __attribute__((packed)) sync;
+ struct {
+ u32 l1, l2, l3, l4;
+ } __attribute__((packed)) test;
+ } eu;
+ u32 severity;
+ u8 event_string[256];
+} __attribute__((packed)) gdth_evt_data;
+
+typedef struct {
+ u32 first_stamp;
+ u32 last_stamp;
+ u16 same_count;
+ u16 event_source;
+ u16 event_idx;
+ u8 application;
+ u8 reserved;
+ gdth_evt_data event_data;
+} __attribute__((packed)) gdth_evt_str;
+
+
+#ifdef GDTH_IOCTL_PROC
+/* IOCTL structure (write) */
+typedef struct {
+ u32 magic; /* IOCTL magic */
+ u16 ioctl; /* IOCTL */
+ u16 ionode; /* controller number */
+ u16 service; /* controller service */
+ u16 timeout; /* timeout */
+ union {
+ struct {
+ u8 command[512]; /* controller command */
+ u8 data[1]; /* add. data */
+ } general;
+ struct {
+ u8 lock; /* lock/unlock */
+ u8 drive_cnt; /* drive count */
+ u16 drives[MAX_HDRIVES];/* drives */
+ } lockdrv;
+ struct {
+ u8 lock; /* lock/unlock */
+ u8 channel; /* channel */
+ } lockchn;
+ struct {
+ int erase; /* erase event ? */
+ int handle;
+ u8 evt[EVENT_SIZE]; /* event structure */
+ } event;
+ struct {
+ u8 bus; /* SCSI bus */
+ u8 target; /* target ID */
+ u8 lun; /* LUN */
+ u8 cmd_len; /* command length */
+ u8 cmd[12]; /* SCSI command */
+ } scsi;
+ struct {
+ u16 hdr_no; /* host drive number */
+ u8 flag; /* old meth./add/remove */
+ } rescan;
+ } iu;
+} gdth_iowr_str;
+
+/* IOCTL structure (read) */
+typedef struct {
+ u32 size; /* buffer size */
+ u32 status; /* IOCTL error code */
+ union {
+ struct {
+ u8 data[1]; /* data */
+ } general;
+ struct {
+ u16 version; /* driver version */
+ } drvers;
+ struct {
+ u8 type; /* controller type */
+ u16 info; /* slot etc. */
+ u16 oem_id; /* OEM ID */
+ u16 bios_ver; /* not used */
+ u16 access; /* not used */
+ u16 ext_type; /* extended type */
+ u16 device_id; /* device ID */
+ u16 sub_device_id; /* sub device ID */
+ } ctrtype;
+ struct {
+ u8 version; /* OS version */
+ u8 subversion; /* OS subversion */
+ u16 revision; /* revision */
+ } osvers;
+ struct {
+ u16 count; /* controller count */
+ } ctrcnt;
+ struct {
+ int handle;
+ u8 evt[EVENT_SIZE]; /* event structure */
+ } event;
+ struct {
+ u8 bus; /* SCSI bus, 0xff: invalid */
+ u8 target; /* target ID */
+ u8 lun; /* LUN */
+ u8 cluster_type; /* cluster properties */
+ } hdr_list[MAX_HDRIVES]; /* index is host drive number */
+ } iu;
+} gdth_iord_str;
+#endif
+
+/* GDTIOCTL_GENERAL */
+typedef struct {
+ u16 ionode; /* controller number */
+ u16 timeout; /* timeout */
+ u32 info; /* error info */
+ u16 status; /* status */
+ unsigned long data_len; /* data buffer size */
+ unsigned long sense_len; /* sense buffer size */
+ gdth_cmd_str command; /* command */
+} gdth_ioctl_general;
+
+/* GDTIOCTL_LOCKDRV */
+typedef struct {
+ u16 ionode; /* controller number */
+ u8 lock; /* lock/unlock */
+ u8 drive_cnt; /* drive count */
+ u16 drives[MAX_HDRIVES]; /* drives */
+} gdth_ioctl_lockdrv;
+
+/* GDTIOCTL_LOCKCHN */
+typedef struct {
+ u16 ionode; /* controller number */
+ u8 lock; /* lock/unlock */
+ u8 channel; /* channel */
+} gdth_ioctl_lockchn;
+
+/* GDTIOCTL_OSVERS */
+typedef struct {
+ u8 version; /* OS version */
+ u8 subversion; /* OS subversion */
+ u16 revision; /* revision */
+} gdth_ioctl_osvers;
+
+/* GDTIOCTL_CTRTYPE */
+typedef struct {
+ u16 ionode; /* controller number */
+ u8 type; /* controller type */
+ u16 info; /* slot etc. */
+ u16 oem_id; /* OEM ID */
+ u16 bios_ver; /* not used */
+ u16 access; /* not used */
+ u16 ext_type; /* extended type */
+ u16 device_id; /* device ID */
+ u16 sub_device_id; /* sub device ID */
+} gdth_ioctl_ctrtype;
+
+/* GDTIOCTL_EVENT */
+typedef struct {
+ u16 ionode;
+ int erase; /* erase event? */
+ int handle; /* event handle */
+ gdth_evt_str event;
+} gdth_ioctl_event;
+
+/* GDTIOCTL_RESCAN/GDTIOCTL_HDRLIST */
+typedef struct {
+ u16 ionode; /* controller number */
+ u8 flag; /* add/remove */
+ u16 hdr_no; /* drive no. */
+ struct {
+ u8 bus; /* SCSI bus */
+ u8 target; /* target ID */
+ u8 lun; /* LUN */
+ u8 cluster_type; /* cluster properties */
+ } hdr_list[MAX_HDRIVES]; /* index is host drive number */
+} gdth_ioctl_rescan;
+
+/* GDTIOCTL_RESET_BUS/GDTIOCTL_RESET_DRV */
+typedef struct {
+ u16 ionode; /* controller number */
+ u16 number; /* bus/host drive number */
+ u16 status; /* status */
+} gdth_ioctl_reset;
+
+#endif
diff --git a/drivers/scsi/gdth_proc.c b/drivers/scsi/gdth_proc.c
new file mode 100644
index 000000000..e66e99799
--- /dev/null
+++ b/drivers/scsi/gdth_proc.c
@@ -0,0 +1,645 @@
+/* gdth_proc.c
+ * $Id: gdth_proc.c,v 1.43 2006/01/11 16:15:00 achim Exp $
+ */
+
+#include <linux/completion.h>
+#include <linux/slab.h>
+
+int gdth_set_info(struct Scsi_Host *host, char *buffer, int length)
+{
+ gdth_ha_str *ha = shost_priv(host);
+ int ret_val = -EINVAL;
+
+ TRACE2(("gdth_set_info() ha %d\n",ha->hanum,));
+
+ if (length >= 4) {
+ if (strncmp(buffer,"gdth",4) == 0) {
+ buffer += 5;
+ length -= 5;
+ ret_val = gdth_set_asc_info(host, buffer, length, ha);
+ }
+ }
+
+ return ret_val;
+}
+
+static int gdth_set_asc_info(struct Scsi_Host *host, char *buffer,
+ int length, gdth_ha_str *ha)
+{
+ int orig_length, drive, wb_mode;
+ int i, found;
+ gdth_cmd_str gdtcmd;
+ gdth_cpar_str *pcpar;
+ u64 paddr;
+
+ char cmnd[MAX_COMMAND_SIZE];
+ memset(cmnd, 0xff, 12);
+ memset(&gdtcmd, 0, sizeof(gdth_cmd_str));
+
+ TRACE2(("gdth_set_asc_info() ha %d\n",ha->hanum));
+ orig_length = length + 5;
+ drive = -1;
+ wb_mode = 0;
+ found = FALSE;
+
+ if (length >= 5 && strncmp(buffer,"flush",5)==0) {
+ buffer += 6;
+ length -= 6;
+ if (length && *buffer>='0' && *buffer<='9') {
+ drive = (int)(*buffer-'0');
+ ++buffer; --length;
+ if (length && *buffer>='0' && *buffer<='9') {
+ drive = drive*10 + (int)(*buffer-'0');
+ ++buffer; --length;
+ }
+ printk("GDT: Flushing host drive %d .. ",drive);
+ } else {
+ printk("GDT: Flushing all host drives .. ");
+ }
+ for (i = 0; i < MAX_HDRIVES; ++i) {
+ if (ha->hdr[i].present) {
+ if (drive != -1 && i != drive)
+ continue;
+ found = TRUE;
+ gdtcmd.Service = CACHESERVICE;
+ gdtcmd.OpCode = GDT_FLUSH;
+ if (ha->cache_feat & GDT_64BIT) {
+ gdtcmd.u.cache64.DeviceNo = i;
+ gdtcmd.u.cache64.BlockNo = 1;
+ } else {
+ gdtcmd.u.cache.DeviceNo = i;
+ gdtcmd.u.cache.BlockNo = 1;
+ }
+
+ gdth_execute(host, &gdtcmd, cmnd, 30, NULL);
+ }
+ }
+ if (!found)
+ printk("\nNo host drive found !\n");
+ else
+ printk("Done.\n");
+ return(orig_length);
+ }
+
+ if (length >= 7 && strncmp(buffer,"wbp_off",7)==0) {
+ buffer += 8;
+ length -= 8;
+ printk("GDT: Disabling write back permanently .. ");
+ wb_mode = 1;
+ } else if (length >= 6 && strncmp(buffer,"wbp_on",6)==0) {
+ buffer += 7;
+ length -= 7;
+ printk("GDT: Enabling write back permanently .. ");
+ wb_mode = 2;
+ } else if (length >= 6 && strncmp(buffer,"wb_off",6)==0) {
+ buffer += 7;
+ length -= 7;
+ printk("GDT: Disabling write back commands .. ");
+ if (ha->cache_feat & GDT_WR_THROUGH) {
+ gdth_write_through = TRUE;
+ printk("Done.\n");
+ } else {
+ printk("Not supported !\n");
+ }
+ return(orig_length);
+ } else if (length >= 5 && strncmp(buffer,"wb_on",5)==0) {
+ buffer += 6;
+ length -= 6;
+ printk("GDT: Enabling write back commands .. ");
+ gdth_write_through = FALSE;
+ printk("Done.\n");
+ return(orig_length);
+ }
+
+ if (wb_mode) {
+ if (!gdth_ioctl_alloc(ha, sizeof(gdth_cpar_str), TRUE, &paddr))
+ return(-EBUSY);
+ pcpar = (gdth_cpar_str *)ha->pscratch;
+ memcpy( pcpar, &ha->cpar, sizeof(gdth_cpar_str) );
+ gdtcmd.Service = CACHESERVICE;
+ gdtcmd.OpCode = GDT_IOCTL;
+ gdtcmd.u.ioctl.p_param = paddr;
+ gdtcmd.u.ioctl.param_size = sizeof(gdth_cpar_str);
+ gdtcmd.u.ioctl.subfunc = CACHE_CONFIG;
+ gdtcmd.u.ioctl.channel = INVALID_CHANNEL;
+ pcpar->write_back = wb_mode==1 ? 0:1;
+
+ gdth_execute(host, &gdtcmd, cmnd, 30, NULL);
+
+ gdth_ioctl_free(ha, GDTH_SCRATCH, ha->pscratch, paddr);
+ printk("Done.\n");
+ return(orig_length);
+ }
+
+ printk("GDT: Unknown command: %s Length: %d\n",buffer,length);
+ return(-EINVAL);
+}
+
+int gdth_show_info(struct seq_file *m, struct Scsi_Host *host)
+{
+ gdth_ha_str *ha = shost_priv(host);
+ int hlen;
+ int id, i, j, k, sec, flag;
+ int no_mdrv = 0, drv_no, is_mirr;
+ u32 cnt;
+ u64 paddr;
+ int rc = -ENOMEM;
+
+ gdth_cmd_str *gdtcmd;
+ gdth_evt_str *estr;
+ char hrec[161];
+ struct timeval tv;
+
+ char *buf;
+ gdth_dskstat_str *pds;
+ gdth_diskinfo_str *pdi;
+ gdth_arrayinf_str *pai;
+ gdth_defcnt_str *pdef;
+ gdth_cdrinfo_str *pcdi;
+ gdth_hget_str *phg;
+ char cmnd[MAX_COMMAND_SIZE];
+
+ gdtcmd = kmalloc(sizeof(*gdtcmd), GFP_KERNEL);
+ estr = kmalloc(sizeof(*estr), GFP_KERNEL);
+ if (!gdtcmd || !estr)
+ goto free_fail;
+
+ memset(cmnd, 0xff, 12);
+ memset(gdtcmd, 0, sizeof(gdth_cmd_str));
+
+ TRACE2(("gdth_get_info() ha %d\n",ha->hanum));
+
+
+ /* request is i.e. "cat /proc/scsi/gdth/0" */
+ /* format: %-15s\t%-10s\t%-15s\t%s */
+ /* driver parameters */
+ seq_puts(m, "Driver Parameters:\n");
+ if (reserve_list[0] == 0xff)
+ strcpy(hrec, "--");
+ else {
+ hlen = sprintf(hrec, "%d", reserve_list[0]);
+ for (i = 1; i < MAX_RES_ARGS; i++) {
+ if (reserve_list[i] == 0xff)
+ break;
+ hlen += snprintf(hrec + hlen , 161 - hlen, ",%d", reserve_list[i]);
+ }
+ }
+ seq_printf(m,
+ " reserve_mode: \t%d \treserve_list: \t%s\n",
+ reserve_mode, hrec);
+ seq_printf(m,
+ " max_ids: \t%-3d \thdr_channel: \t%d\n",
+ max_ids, hdr_channel);
+
+ /* controller information */
+ seq_puts(m, "\nDisk Array Controller Information:\n");
+ seq_printf(m,
+ " Number: \t%d \tName: \t%s\n",
+ ha->hanum, ha->binfo.type_string);
+
+ seq_printf(m,
+ " Driver Ver.: \t%-10s\tFirmware Ver.: \t",
+ GDTH_VERSION_STR);
+ if (ha->more_proc)
+ seq_printf(m, "%d.%02d.%02d-%c%03X\n",
+ (u8)(ha->binfo.upd_fw_ver>>24),
+ (u8)(ha->binfo.upd_fw_ver>>16),
+ (u8)(ha->binfo.upd_fw_ver),
+ ha->bfeat.raid ? 'R':'N',
+ ha->binfo.upd_revision);
+ else
+ seq_printf(m, "%d.%02d\n", (u8)(ha->cpar.version>>8),
+ (u8)(ha->cpar.version));
+
+ if (ha->more_proc)
+ /* more information: 1. about controller */
+ seq_printf(m,
+ " Serial No.: \t0x%8X\tCache RAM size:\t%d KB\n",
+ ha->binfo.ser_no, ha->binfo.memsize / 1024);
+
+#ifdef GDTH_DMA_STATISTICS
+ /* controller statistics */
+ seq_puts(m, "\nController Statistics:\n");
+ seq_printf(m,
+ " 32-bit DMA buffer:\t%lu\t64-bit DMA buffer:\t%lu\n",
+ ha->dma32_cnt, ha->dma64_cnt);
+#endif
+
+ if (ha->more_proc) {
+ /* more information: 2. about physical devices */
+ seq_puts(m, "\nPhysical Devices:");
+ flag = FALSE;
+
+ buf = gdth_ioctl_alloc(ha, GDTH_SCRATCH, FALSE, &paddr);
+ if (!buf)
+ goto stop_output;
+ for (i = 0; i < ha->bus_cnt; ++i) {
+ /* 2.a statistics (and retries/reassigns) */
+ TRACE2(("pdr_statistics() chn %d\n",i));
+ pds = (gdth_dskstat_str *)(buf + GDTH_SCRATCH/4);
+ gdtcmd->Service = CACHESERVICE;
+ gdtcmd->OpCode = GDT_IOCTL;
+ gdtcmd->u.ioctl.p_param = paddr + GDTH_SCRATCH/4;
+ gdtcmd->u.ioctl.param_size = 3*GDTH_SCRATCH/4;
+ gdtcmd->u.ioctl.subfunc = DSK_STATISTICS | L_CTRL_PATTERN;
+ gdtcmd->u.ioctl.channel = ha->raw[i].address | INVALID_CHANNEL;
+ pds->bid = ha->raw[i].local_no;
+ pds->first = 0;
+ pds->entries = ha->raw[i].pdev_cnt;
+ cnt = (3*GDTH_SCRATCH/4 - 5 * sizeof(u32)) /
+ sizeof(pds->list[0]);
+ if (pds->entries > cnt)
+ pds->entries = cnt;
+
+ if (gdth_execute(host, gdtcmd, cmnd, 30, NULL) != S_OK)
+ pds->count = 0;
+
+ /* other IOCTLs must fit into area GDTH_SCRATCH/4 */
+ for (j = 0; j < ha->raw[i].pdev_cnt; ++j) {
+ /* 2.b drive info */
+ TRACE2(("scsi_drv_info() chn %d dev %d\n",
+ i, ha->raw[i].id_list[j]));
+ pdi = (gdth_diskinfo_str *)buf;
+ gdtcmd->Service = CACHESERVICE;
+ gdtcmd->OpCode = GDT_IOCTL;
+ gdtcmd->u.ioctl.p_param = paddr;
+ gdtcmd->u.ioctl.param_size = sizeof(gdth_diskinfo_str);
+ gdtcmd->u.ioctl.subfunc = SCSI_DR_INFO | L_CTRL_PATTERN;
+ gdtcmd->u.ioctl.channel =
+ ha->raw[i].address | ha->raw[i].id_list[j];
+
+ if (gdth_execute(host, gdtcmd, cmnd, 30, NULL) == S_OK) {
+ strncpy(hrec,pdi->vendor,8);
+ strncpy(hrec+8,pdi->product,16);
+ strncpy(hrec+24,pdi->revision,4);
+ hrec[28] = 0;
+ seq_printf(m,
+ "\n Chn/ID/LUN: \t%c/%02d/%d \tName: \t%s\n",
+ 'A'+i,pdi->target_id,pdi->lun,hrec);
+ flag = TRUE;
+ pdi->no_ldrive &= 0xffff;
+ if (pdi->no_ldrive == 0xffff)
+ strcpy(hrec,"--");
+ else
+ sprintf(hrec,"%d",pdi->no_ldrive);
+ seq_printf(m,
+ " Capacity [MB]:\t%-6d \tTo Log. Drive: \t%s\n",
+ pdi->blkcnt/(1024*1024/pdi->blksize),
+ hrec);
+ } else {
+ pdi->devtype = 0xff;
+ }
+
+ if (pdi->devtype == 0) {
+ /* search retries/reassigns */
+ for (k = 0; k < pds->count; ++k) {
+ if (pds->list[k].tid == pdi->target_id &&
+ pds->list[k].lun == pdi->lun) {
+ seq_printf(m,
+ " Retries: \t%-6d \tReassigns: \t%d\n",
+ pds->list[k].retries,
+ pds->list[k].reassigns);
+ break;
+ }
+ }
+ /* 2.c grown defects */
+ TRACE2(("scsi_drv_defcnt() chn %d dev %d\n",
+ i, ha->raw[i].id_list[j]));
+ pdef = (gdth_defcnt_str *)buf;
+ gdtcmd->Service = CACHESERVICE;
+ gdtcmd->OpCode = GDT_IOCTL;
+ gdtcmd->u.ioctl.p_param = paddr;
+ gdtcmd->u.ioctl.param_size = sizeof(gdth_defcnt_str);
+ gdtcmd->u.ioctl.subfunc = SCSI_DEF_CNT | L_CTRL_PATTERN;
+ gdtcmd->u.ioctl.channel =
+ ha->raw[i].address | ha->raw[i].id_list[j];
+ pdef->sddc_type = 0x08;
+
+ if (gdth_execute(host, gdtcmd, cmnd, 30, NULL) == S_OK) {
+ seq_printf(m,
+ " Grown Defects:\t%d\n",
+ pdef->sddc_cnt);
+ }
+ }
+ }
+ }
+ gdth_ioctl_free(ha, GDTH_SCRATCH, buf, paddr);
+
+ if (!flag)
+ seq_puts(m, "\n --\n");
+
+ /* 3. about logical drives */
+ seq_puts(m, "\nLogical Drives:");
+ flag = FALSE;
+
+ buf = gdth_ioctl_alloc(ha, GDTH_SCRATCH, FALSE, &paddr);
+ if (!buf)
+ goto stop_output;
+ for (i = 0; i < MAX_LDRIVES; ++i) {
+ if (!ha->hdr[i].is_logdrv)
+ continue;
+ drv_no = i;
+ j = k = 0;
+ is_mirr = FALSE;
+ do {
+ /* 3.a log. drive info */
+ TRACE2(("cache_drv_info() drive no %d\n",drv_no));
+ pcdi = (gdth_cdrinfo_str *)buf;
+ gdtcmd->Service = CACHESERVICE;
+ gdtcmd->OpCode = GDT_IOCTL;
+ gdtcmd->u.ioctl.p_param = paddr;
+ gdtcmd->u.ioctl.param_size = sizeof(gdth_cdrinfo_str);
+ gdtcmd->u.ioctl.subfunc = CACHE_DRV_INFO;
+ gdtcmd->u.ioctl.channel = drv_no;
+ if (gdth_execute(host, gdtcmd, cmnd, 30, NULL) != S_OK)
+ break;
+ pcdi->ld_dtype >>= 16;
+ j++;
+ if (pcdi->ld_dtype > 2) {
+ strcpy(hrec, "missing");
+ } else if (pcdi->ld_error & 1) {
+ strcpy(hrec, "fault");
+ } else if (pcdi->ld_error & 2) {
+ strcpy(hrec, "invalid");
+ k++; j--;
+ } else {
+ strcpy(hrec, "ok");
+ }
+
+ if (drv_no == i) {
+ seq_printf(m,
+ "\n Number: \t%-2d \tStatus: \t%s\n",
+ drv_no, hrec);
+ flag = TRUE;
+ no_mdrv = pcdi->cd_ldcnt;
+ if (no_mdrv > 1 || pcdi->ld_slave != -1) {
+ is_mirr = TRUE;
+ strcpy(hrec, "RAID-1");
+ } else if (pcdi->ld_dtype == 0) {
+ strcpy(hrec, "Disk");
+ } else if (pcdi->ld_dtype == 1) {
+ strcpy(hrec, "RAID-0");
+ } else if (pcdi->ld_dtype == 2) {
+ strcpy(hrec, "Chain");
+ } else {
+ strcpy(hrec, "???");
+ }
+ seq_printf(m,
+ " Capacity [MB]:\t%-6d \tType: \t%s\n",
+ pcdi->ld_blkcnt/(1024*1024/pcdi->ld_blksize),
+ hrec);
+ } else {
+ seq_printf(m,
+ " Slave Number: \t%-2d \tStatus: \t%s\n",
+ drv_no & 0x7fff, hrec);
+ }
+ drv_no = pcdi->ld_slave;
+ } while (drv_no != -1);
+
+ if (is_mirr)
+ seq_printf(m,
+ " Missing Drv.: \t%-2d \tInvalid Drv.: \t%d\n",
+ no_mdrv - j - k, k);
+
+ if (!ha->hdr[i].is_arraydrv)
+ strcpy(hrec, "--");
+ else
+ sprintf(hrec, "%d", ha->hdr[i].master_no);
+ seq_printf(m,
+ " To Array Drv.:\t%s\n", hrec);
+ }
+ gdth_ioctl_free(ha, GDTH_SCRATCH, buf, paddr);
+
+ if (!flag)
+ seq_puts(m, "\n --\n");
+
+ /* 4. about array drives */
+ seq_puts(m, "\nArray Drives:");
+ flag = FALSE;
+
+ buf = gdth_ioctl_alloc(ha, GDTH_SCRATCH, FALSE, &paddr);
+ if (!buf)
+ goto stop_output;
+ for (i = 0; i < MAX_LDRIVES; ++i) {
+ if (!(ha->hdr[i].is_arraydrv && ha->hdr[i].is_master))
+ continue;
+ /* 4.a array drive info */
+ TRACE2(("array_info() drive no %d\n",i));
+ pai = (gdth_arrayinf_str *)buf;
+ gdtcmd->Service = CACHESERVICE;
+ gdtcmd->OpCode = GDT_IOCTL;
+ gdtcmd->u.ioctl.p_param = paddr;
+ gdtcmd->u.ioctl.param_size = sizeof(gdth_arrayinf_str);
+ gdtcmd->u.ioctl.subfunc = ARRAY_INFO | LA_CTRL_PATTERN;
+ gdtcmd->u.ioctl.channel = i;
+ if (gdth_execute(host, gdtcmd, cmnd, 30, NULL) == S_OK) {
+ if (pai->ai_state == 0)
+ strcpy(hrec, "idle");
+ else if (pai->ai_state == 2)
+ strcpy(hrec, "build");
+ else if (pai->ai_state == 4)
+ strcpy(hrec, "ready");
+ else if (pai->ai_state == 6)
+ strcpy(hrec, "fail");
+ else if (pai->ai_state == 8 || pai->ai_state == 10)
+ strcpy(hrec, "rebuild");
+ else
+ strcpy(hrec, "error");
+ if (pai->ai_ext_state & 0x10)
+ strcat(hrec, "/expand");
+ else if (pai->ai_ext_state & 0x1)
+ strcat(hrec, "/patch");
+ seq_printf(m,
+ "\n Number: \t%-2d \tStatus: \t%s\n",
+ i,hrec);
+ flag = TRUE;
+
+ if (pai->ai_type == 0)
+ strcpy(hrec, "RAID-0");
+ else if (pai->ai_type == 4)
+ strcpy(hrec, "RAID-4");
+ else if (pai->ai_type == 5)
+ strcpy(hrec, "RAID-5");
+ else
+ strcpy(hrec, "RAID-10");
+ seq_printf(m,
+ " Capacity [MB]:\t%-6d \tType: \t%s\n",
+ pai->ai_size/(1024*1024/pai->ai_secsize),
+ hrec);
+ }
+ }
+ gdth_ioctl_free(ha, GDTH_SCRATCH, buf, paddr);
+
+ if (!flag)
+ seq_puts(m, "\n --\n");
+
+ /* 5. about host drives */
+ seq_puts(m, "\nHost Drives:");
+ flag = FALSE;
+
+ buf = gdth_ioctl_alloc(ha, sizeof(gdth_hget_str), FALSE, &paddr);
+ if (!buf)
+ goto stop_output;
+ for (i = 0; i < MAX_LDRIVES; ++i) {
+ if (!ha->hdr[i].is_logdrv ||
+ (ha->hdr[i].is_arraydrv && !ha->hdr[i].is_master))
+ continue;
+ /* 5.a get host drive list */
+ TRACE2(("host_get() drv_no %d\n",i));
+ phg = (gdth_hget_str *)buf;
+ gdtcmd->Service = CACHESERVICE;
+ gdtcmd->OpCode = GDT_IOCTL;
+ gdtcmd->u.ioctl.p_param = paddr;
+ gdtcmd->u.ioctl.param_size = sizeof(gdth_hget_str);
+ gdtcmd->u.ioctl.subfunc = HOST_GET | LA_CTRL_PATTERN;
+ gdtcmd->u.ioctl.channel = i;
+ phg->entries = MAX_HDRIVES;
+ phg->offset = GDTOFFSOF(gdth_hget_str, entry[0]);
+ if (gdth_execute(host, gdtcmd, cmnd, 30, NULL) == S_OK) {
+ ha->hdr[i].ldr_no = i;
+ ha->hdr[i].rw_attribs = 0;
+ ha->hdr[i].start_sec = 0;
+ } else {
+ for (j = 0; j < phg->entries; ++j) {
+ k = phg->entry[j].host_drive;
+ if (k >= MAX_LDRIVES)
+ continue;
+ ha->hdr[k].ldr_no = phg->entry[j].log_drive;
+ ha->hdr[k].rw_attribs = phg->entry[j].rw_attribs;
+ ha->hdr[k].start_sec = phg->entry[j].start_sec;
+ }
+ }
+ }
+ gdth_ioctl_free(ha, sizeof(gdth_hget_str), buf, paddr);
+
+ for (i = 0; i < MAX_HDRIVES; ++i) {
+ if (!(ha->hdr[i].present))
+ continue;
+
+ seq_printf(m,
+ "\n Number: \t%-2d \tArr/Log. Drive:\t%d\n",
+ i, ha->hdr[i].ldr_no);
+ flag = TRUE;
+
+ seq_printf(m,
+ " Capacity [MB]:\t%-6d \tStart Sector: \t%d\n",
+ (u32)(ha->hdr[i].size/2048), ha->hdr[i].start_sec);
+ }
+
+ if (!flag)
+ seq_puts(m, "\n --\n");
+ }
+
+ /* controller events */
+ seq_puts(m, "\nController Events:\n");
+
+ for (id = -1;;) {
+ id = gdth_read_event(ha, id, estr);
+ if (estr->event_source == 0)
+ break;
+ if (estr->event_data.eu.driver.ionode == ha->hanum &&
+ estr->event_source == ES_ASYNC) {
+ gdth_log_event(&estr->event_data, hrec);
+ do_gettimeofday(&tv);
+ sec = (int)(tv.tv_sec - estr->first_stamp);
+ if (sec < 0) sec = 0;
+ seq_printf(m," date- %02d:%02d:%02d\t%s\n",
+ sec/3600, sec%3600/60, sec%60, hrec);
+ }
+ if (id == -1)
+ break;
+ }
+stop_output:
+ rc = 0;
+free_fail:
+ kfree(gdtcmd);
+ kfree(estr);
+ return rc;
+}
+
+static char *gdth_ioctl_alloc(gdth_ha_str *ha, int size, int scratch,
+ u64 *paddr)
+{
+ unsigned long flags;
+ char *ret_val;
+
+ if (size == 0)
+ return NULL;
+
+ spin_lock_irqsave(&ha->smp_lock, flags);
+
+ if (!ha->scratch_busy && size <= GDTH_SCRATCH) {
+ ha->scratch_busy = TRUE;
+ ret_val = ha->pscratch;
+ *paddr = ha->scratch_phys;
+ } else if (scratch) {
+ ret_val = NULL;
+ } else {
+ dma_addr_t dma_addr;
+
+ ret_val = pci_alloc_consistent(ha->pdev, size, &dma_addr);
+ *paddr = dma_addr;
+ }
+
+ spin_unlock_irqrestore(&ha->smp_lock, flags);
+ return ret_val;
+}
+
+static void gdth_ioctl_free(gdth_ha_str *ha, int size, char *buf, u64 paddr)
+{
+ unsigned long flags;
+
+ if (buf == ha->pscratch) {
+ spin_lock_irqsave(&ha->smp_lock, flags);
+ ha->scratch_busy = FALSE;
+ spin_unlock_irqrestore(&ha->smp_lock, flags);
+ } else {
+ pci_free_consistent(ha->pdev, size, buf, paddr);
+ }
+}
+
+#ifdef GDTH_IOCTL_PROC
+static int gdth_ioctl_check_bin(gdth_ha_str *ha, u16 size)
+{
+ unsigned long flags;
+ int ret_val;
+
+ spin_lock_irqsave(&ha->smp_lock, flags);
+
+ ret_val = FALSE;
+ if (ha->scratch_busy) {
+ if (((gdth_iord_str *)ha->pscratch)->size == (u32)size)
+ ret_val = TRUE;
+ }
+ spin_unlock_irqrestore(&ha->smp_lock, flags);
+ return ret_val;
+}
+#endif
+
+static void gdth_wait_completion(gdth_ha_str *ha, int busnum, int id)
+{
+ unsigned long flags;
+ int i;
+ Scsi_Cmnd *scp;
+ struct gdth_cmndinfo *cmndinfo;
+ u8 b, t;
+
+ spin_lock_irqsave(&ha->smp_lock, flags);
+
+ for (i = 0; i < GDTH_MAXCMDS; ++i) {
+ scp = ha->cmd_tab[i].cmnd;
+ cmndinfo = gdth_cmnd_priv(scp);
+
+ b = scp->device->channel;
+ t = scp->device->id;
+ if (!SPECIAL_SCP(scp) && t == (u8)id &&
+ b == (u8)busnum) {
+ cmndinfo->wait_for_completion = 0;
+ spin_unlock_irqrestore(&ha->smp_lock, flags);
+ while (!cmndinfo->wait_for_completion)
+ barrier();
+ spin_lock_irqsave(&ha->smp_lock, flags);
+ }
+ }
+ spin_unlock_irqrestore(&ha->smp_lock, flags);
+}
diff --git a/drivers/scsi/gdth_proc.h b/drivers/scsi/gdth_proc.h
new file mode 100644
index 000000000..aaa618198
--- /dev/null
+++ b/drivers/scsi/gdth_proc.h
@@ -0,0 +1,20 @@
+#ifndef _GDTH_PROC_H
+#define _GDTH_PROC_H
+
+/* gdth_proc.h
+ * $Id: gdth_proc.h,v 1.16 2004/01/14 13:09:01 achim Exp $
+ */
+
+int gdth_execute(struct Scsi_Host *shost, gdth_cmd_str *gdtcmd, char *cmnd,
+ int timeout, u32 *info);
+
+static int gdth_set_asc_info(struct Scsi_Host *host, char *buffer,
+ int length, gdth_ha_str *ha);
+
+static char *gdth_ioctl_alloc(gdth_ha_str *ha, int size, int scratch,
+ u64 *paddr);
+static void gdth_ioctl_free(gdth_ha_str *ha, int size, char *buf, u64 paddr);
+static void gdth_wait_completion(gdth_ha_str *ha, int busnum, int id);
+
+#endif
+
diff --git a/drivers/scsi/gvp11.c b/drivers/scsi/gvp11.c
new file mode 100644
index 000000000..3b6f83ffd
--- /dev/null
+++ b/drivers/scsi/gvp11.c
@@ -0,0 +1,433 @@
+#include <linux/types.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/mm.h>
+#include <linux/slab.h>
+#include <linux/spinlock.h>
+#include <linux/zorro.h>
+#include <linux/module.h>
+
+#include <asm/page.h>
+#include <asm/pgtable.h>
+#include <asm/amigaints.h>
+#include <asm/amigahw.h>
+
+#include "scsi.h"
+#include "wd33c93.h"
+#include "gvp11.h"
+
+
+#define CHECK_WD33C93
+
+struct gvp11_hostdata {
+ struct WD33C93_hostdata wh;
+ struct gvp11_scsiregs *regs;
+};
+
+static irqreturn_t gvp11_intr(int irq, void *data)
+{
+ struct Scsi_Host *instance = data;
+ struct gvp11_hostdata *hdata = shost_priv(instance);
+ unsigned int status = hdata->regs->CNTR;
+ unsigned long flags;
+
+ if (!(status & GVP11_DMAC_INT_PENDING))
+ return IRQ_NONE;
+
+ spin_lock_irqsave(instance->host_lock, flags);
+ wd33c93_intr(instance);
+ spin_unlock_irqrestore(instance->host_lock, flags);
+ return IRQ_HANDLED;
+}
+
+static int gvp11_xfer_mask = 0;
+
+void gvp11_setup(char *str, int *ints)
+{
+ gvp11_xfer_mask = ints[1];
+}
+
+static int dma_setup(struct scsi_cmnd *cmd, int dir_in)
+{
+ struct Scsi_Host *instance = cmd->device->host;
+ struct gvp11_hostdata *hdata = shost_priv(instance);
+ struct WD33C93_hostdata *wh = &hdata->wh;
+ struct gvp11_scsiregs *regs = hdata->regs;
+ unsigned short cntr = GVP11_DMAC_INT_ENABLE;
+ unsigned long addr = virt_to_bus(cmd->SCp.ptr);
+ int bank_mask;
+ static int scsi_alloc_out_of_range = 0;
+
+ /* use bounce buffer if the physical address is bad */
+ if (addr & wh->dma_xfer_mask) {
+ wh->dma_bounce_len = (cmd->SCp.this_residual + 511) & ~0x1ff;
+
+ if (!scsi_alloc_out_of_range) {
+ wh->dma_bounce_buffer =
+ kmalloc(wh->dma_bounce_len, GFP_KERNEL);
+ wh->dma_buffer_pool = BUF_SCSI_ALLOCED;
+ }
+
+ if (scsi_alloc_out_of_range ||
+ !wh->dma_bounce_buffer) {
+ wh->dma_bounce_buffer =
+ amiga_chip_alloc(wh->dma_bounce_len,
+ "GVP II SCSI Bounce Buffer");
+
+ if (!wh->dma_bounce_buffer) {
+ wh->dma_bounce_len = 0;
+ return 1;
+ }
+
+ wh->dma_buffer_pool = BUF_CHIP_ALLOCED;
+ }
+
+ /* check if the address of the bounce buffer is OK */
+ addr = virt_to_bus(wh->dma_bounce_buffer);
+
+ if (addr & wh->dma_xfer_mask) {
+ /* fall back to Chip RAM if address out of range */
+ if (wh->dma_buffer_pool == BUF_SCSI_ALLOCED) {
+ kfree(wh->dma_bounce_buffer);
+ scsi_alloc_out_of_range = 1;
+ } else {
+ amiga_chip_free(wh->dma_bounce_buffer);
+ }
+
+ wh->dma_bounce_buffer =
+ amiga_chip_alloc(wh->dma_bounce_len,
+ "GVP II SCSI Bounce Buffer");
+
+ if (!wh->dma_bounce_buffer) {
+ wh->dma_bounce_len = 0;
+ return 1;
+ }
+
+ addr = virt_to_bus(wh->dma_bounce_buffer);
+ wh->dma_buffer_pool = BUF_CHIP_ALLOCED;
+ }
+
+ if (!dir_in) {
+ /* copy to bounce buffer for a write */
+ memcpy(wh->dma_bounce_buffer, cmd->SCp.ptr,
+ cmd->SCp.this_residual);
+ }
+ }
+
+ /* setup dma direction */
+ if (!dir_in)
+ cntr |= GVP11_DMAC_DIR_WRITE;
+
+ wh->dma_dir = dir_in;
+ regs->CNTR = cntr;
+
+ /* setup DMA *physical* address */
+ regs->ACR = addr;
+
+ if (dir_in) {
+ /* invalidate any cache */
+ cache_clear(addr, cmd->SCp.this_residual);
+ } else {
+ /* push any dirty cache */
+ cache_push(addr, cmd->SCp.this_residual);
+ }
+
+ bank_mask = (~wh->dma_xfer_mask >> 18) & 0x01c0;
+ if (bank_mask)
+ regs->BANK = bank_mask & (addr >> 18);
+
+ /* start DMA */
+ regs->ST_DMA = 1;
+
+ /* return success */
+ return 0;
+}
+
+static void dma_stop(struct Scsi_Host *instance, struct scsi_cmnd *SCpnt,
+ int status)
+{
+ struct gvp11_hostdata *hdata = shost_priv(instance);
+ struct WD33C93_hostdata *wh = &hdata->wh;
+ struct gvp11_scsiregs *regs = hdata->regs;
+
+ /* stop DMA */
+ regs->SP_DMA = 1;
+ /* remove write bit from CONTROL bits */
+ regs->CNTR = GVP11_DMAC_INT_ENABLE;
+
+ /* copy from a bounce buffer, if necessary */
+ if (status && wh->dma_bounce_buffer) {
+ if (wh->dma_dir && SCpnt)
+ memcpy(SCpnt->SCp.ptr, wh->dma_bounce_buffer,
+ SCpnt->SCp.this_residual);
+
+ if (wh->dma_buffer_pool == BUF_SCSI_ALLOCED)
+ kfree(wh->dma_bounce_buffer);
+ else
+ amiga_chip_free(wh->dma_bounce_buffer);
+
+ wh->dma_bounce_buffer = NULL;
+ wh->dma_bounce_len = 0;
+ }
+}
+
+static int gvp11_bus_reset(struct scsi_cmnd *cmd)
+{
+ struct Scsi_Host *instance = cmd->device->host;
+
+ /* FIXME perform bus-specific reset */
+
+ /* FIXME 2: shouldn't we no-op this function (return
+ FAILED), and fall back to host reset function,
+ wd33c93_host_reset ? */
+
+ spin_lock_irq(instance->host_lock);
+ wd33c93_host_reset(cmd);
+ spin_unlock_irq(instance->host_lock);
+
+ return SUCCESS;
+}
+
+static struct scsi_host_template gvp11_scsi_template = {
+ .module = THIS_MODULE,
+ .name = "GVP Series II SCSI",
+ .show_info = wd33c93_show_info,
+ .write_info = wd33c93_write_info,
+ .proc_name = "GVP11",
+ .queuecommand = wd33c93_queuecommand,
+ .eh_abort_handler = wd33c93_abort,
+ .eh_bus_reset_handler = gvp11_bus_reset,
+ .eh_host_reset_handler = wd33c93_host_reset,
+ .can_queue = CAN_QUEUE,
+ .this_id = 7,
+ .sg_tablesize = SG_ALL,
+ .cmd_per_lun = CMD_PER_LUN,
+ .use_clustering = DISABLE_CLUSTERING
+};
+
+static int check_wd33c93(struct gvp11_scsiregs *regs)
+{
+#ifdef CHECK_WD33C93
+ volatile unsigned char *sasr_3393, *scmd_3393;
+ unsigned char save_sasr;
+ unsigned char q, qq;
+
+ /*
+ * These darn GVP boards are a problem - it can be tough to tell
+ * whether or not they include a SCSI controller. This is the
+ * ultimate Yet-Another-GVP-Detection-Hack in that it actually
+ * probes for a WD33c93 chip: If we find one, it's extremely
+ * likely that this card supports SCSI, regardless of Product_
+ * Code, Board_Size, etc.
+ */
+
+ /* Get pointers to the presumed register locations and save contents */
+
+ sasr_3393 = &regs->SASR;
+ scmd_3393 = &regs->SCMD;
+ save_sasr = *sasr_3393;
+
+ /* First test the AuxStatus Reg */
+
+ q = *sasr_3393; /* read it */
+ if (q & 0x08) /* bit 3 should always be clear */
+ return -ENODEV;
+ *sasr_3393 = WD_AUXILIARY_STATUS; /* setup indirect address */
+ if (*sasr_3393 == WD_AUXILIARY_STATUS) { /* shouldn't retain the write */
+ *sasr_3393 = save_sasr; /* Oops - restore this byte */
+ return -ENODEV;
+ }
+ if (*sasr_3393 != q) { /* should still read the same */
+ *sasr_3393 = save_sasr; /* Oops - restore this byte */
+ return -ENODEV;
+ }
+ if (*scmd_3393 != q) /* and so should the image at 0x1f */
+ return -ENODEV;
+
+ /*
+ * Ok, we probably have a wd33c93, but let's check a few other places
+ * for good measure. Make sure that this works for both 'A and 'B
+ * chip versions.
+ */
+
+ *sasr_3393 = WD_SCSI_STATUS;
+ q = *scmd_3393;
+ *sasr_3393 = WD_SCSI_STATUS;
+ *scmd_3393 = ~q;
+ *sasr_3393 = WD_SCSI_STATUS;
+ qq = *scmd_3393;
+ *sasr_3393 = WD_SCSI_STATUS;
+ *scmd_3393 = q;
+ if (qq != q) /* should be read only */
+ return -ENODEV;
+ *sasr_3393 = 0x1e; /* this register is unimplemented */
+ q = *scmd_3393;
+ *sasr_3393 = 0x1e;
+ *scmd_3393 = ~q;
+ *sasr_3393 = 0x1e;
+ qq = *scmd_3393;
+ *sasr_3393 = 0x1e;
+ *scmd_3393 = q;
+ if (qq != q || qq != 0xff) /* should be read only, all 1's */
+ return -ENODEV;
+ *sasr_3393 = WD_TIMEOUT_PERIOD;
+ q = *scmd_3393;
+ *sasr_3393 = WD_TIMEOUT_PERIOD;
+ *scmd_3393 = ~q;
+ *sasr_3393 = WD_TIMEOUT_PERIOD;
+ qq = *scmd_3393;
+ *sasr_3393 = WD_TIMEOUT_PERIOD;
+ *scmd_3393 = q;
+ if (qq != (~q & 0xff)) /* should be read/write */
+ return -ENODEV;
+#endif /* CHECK_WD33C93 */
+
+ return 0;
+}
+
+static int gvp11_probe(struct zorro_dev *z, const struct zorro_device_id *ent)
+{
+ struct Scsi_Host *instance;
+ unsigned long address;
+ int error;
+ unsigned int epc;
+ unsigned int default_dma_xfer_mask;
+ struct gvp11_hostdata *hdata;
+ struct gvp11_scsiregs *regs;
+ wd33c93_regs wdregs;
+
+ default_dma_xfer_mask = ent->driver_data;
+
+ /*
+ * Rumors state that some GVP ram boards use the same product
+ * code as the SCSI controllers. Therefore if the board-size
+ * is not 64KB we assume it is a ram board and bail out.
+ */
+ if (zorro_resource_len(z) != 0x10000)
+ return -ENODEV;
+
+ address = z->resource.start;
+ if (!request_mem_region(address, 256, "wd33c93"))
+ return -EBUSY;
+
+ regs = ZTWO_VADDR(address);
+
+ error = check_wd33c93(regs);
+ if (error)
+ goto fail_check_or_alloc;
+
+ instance = scsi_host_alloc(&gvp11_scsi_template,
+ sizeof(struct gvp11_hostdata));
+ if (!instance) {
+ error = -ENOMEM;
+ goto fail_check_or_alloc;
+ }
+
+ instance->irq = IRQ_AMIGA_PORTS;
+ instance->unique_id = z->slotaddr;
+
+ regs->secret2 = 1;
+ regs->secret1 = 0;
+ regs->secret3 = 15;
+ while (regs->CNTR & GVP11_DMAC_BUSY)
+ ;
+ regs->CNTR = 0;
+ regs->BANK = 0;
+
+ wdregs.SASR = &regs->SASR;
+ wdregs.SCMD = &regs->SCMD;
+
+ hdata = shost_priv(instance);
+ if (gvp11_xfer_mask)
+ hdata->wh.dma_xfer_mask = gvp11_xfer_mask;
+ else
+ hdata->wh.dma_xfer_mask = default_dma_xfer_mask;
+
+ hdata->wh.no_sync = 0xff;
+ hdata->wh.fast = 0;
+ hdata->wh.dma_mode = CTRL_DMA;
+ hdata->regs = regs;
+
+ /*
+ * Check for 14MHz SCSI clock
+ */
+ epc = *(unsigned short *)(ZTWO_VADDR(address) + 0x8000);
+ wd33c93_init(instance, wdregs, dma_setup, dma_stop,
+ (epc & GVP_SCSICLKMASK) ? WD33C93_FS_8_10
+ : WD33C93_FS_12_15);
+
+ error = request_irq(IRQ_AMIGA_PORTS, gvp11_intr, IRQF_SHARED,
+ "GVP11 SCSI", instance);
+ if (error)
+ goto fail_irq;
+
+ regs->CNTR = GVP11_DMAC_INT_ENABLE;
+
+ error = scsi_add_host(instance, NULL);
+ if (error)
+ goto fail_host;
+
+ zorro_set_drvdata(z, instance);
+ scsi_scan_host(instance);
+ return 0;
+
+fail_host:
+ free_irq(IRQ_AMIGA_PORTS, instance);
+fail_irq:
+ scsi_host_put(instance);
+fail_check_or_alloc:
+ release_mem_region(address, 256);
+ return error;
+}
+
+static void gvp11_remove(struct zorro_dev *z)
+{
+ struct Scsi_Host *instance = zorro_get_drvdata(z);
+ struct gvp11_hostdata *hdata = shost_priv(instance);
+
+ hdata->regs->CNTR = 0;
+ scsi_remove_host(instance);
+ free_irq(IRQ_AMIGA_PORTS, instance);
+ scsi_host_put(instance);
+ release_mem_region(z->resource.start, 256);
+}
+
+ /*
+ * This should (hopefully) be the correct way to identify
+ * all the different GVP SCSI controllers (except for the
+ * SERIES I though).
+ */
+
+static struct zorro_device_id gvp11_zorro_tbl[] = {
+ { ZORRO_PROD_GVP_COMBO_030_R3_SCSI, ~0x00ffffff },
+ { ZORRO_PROD_GVP_SERIES_II, ~0x00ffffff },
+ { ZORRO_PROD_GVP_GFORCE_030_SCSI, ~0x01ffffff },
+ { ZORRO_PROD_GVP_A530_SCSI, ~0x01ffffff },
+ { ZORRO_PROD_GVP_COMBO_030_R4_SCSI, ~0x01ffffff },
+ { ZORRO_PROD_GVP_A1291, ~0x07ffffff },
+ { ZORRO_PROD_GVP_GFORCE_040_SCSI_1, ~0x07ffffff },
+ { 0 }
+};
+MODULE_DEVICE_TABLE(zorro, gvp11_zorro_tbl);
+
+static struct zorro_driver gvp11_driver = {
+ .name = "gvp11",
+ .id_table = gvp11_zorro_tbl,
+ .probe = gvp11_probe,
+ .remove = gvp11_remove,
+};
+
+static int __init gvp11_init(void)
+{
+ return zorro_register_driver(&gvp11_driver);
+}
+module_init(gvp11_init);
+
+static void __exit gvp11_exit(void)
+{
+ zorro_unregister_driver(&gvp11_driver);
+}
+module_exit(gvp11_exit);
+
+MODULE_DESCRIPTION("GVP Series II SCSI");
+MODULE_LICENSE("GPL");
diff --git a/drivers/scsi/gvp11.h b/drivers/scsi/gvp11.h
new file mode 100644
index 000000000..852913cde
--- /dev/null
+++ b/drivers/scsi/gvp11.h
@@ -0,0 +1,52 @@
+#ifndef GVP11_H
+
+/* $Id: gvp11.h,v 1.4 1997/01/19 23:07:12 davem Exp $
+ *
+ * Header file for the GVP Series II SCSI controller for Linux
+ *
+ * Written and (C) 1993, Ralf Baechle, see gvp11.c for more info
+ * based on a2091.h (C) 1993 by Hamish Macdonald
+ *
+ */
+
+#include <linux/types.h>
+
+#ifndef CMD_PER_LUN
+#define CMD_PER_LUN 2
+#endif
+
+#ifndef CAN_QUEUE
+#define CAN_QUEUE 16
+#endif
+
+/*
+ * if the transfer address ANDed with this results in a non-zero
+ * result, then we can't use DMA.
+ */
+#define GVP11_XFER_MASK (0xff000001)
+
+struct gvp11_scsiregs {
+ unsigned char pad1[64];
+ volatile unsigned short CNTR;
+ unsigned char pad2[31];
+ volatile unsigned char SASR;
+ unsigned char pad3;
+ volatile unsigned char SCMD;
+ unsigned char pad4[4];
+ volatile unsigned short BANK;
+ unsigned char pad5[6];
+ volatile unsigned long ACR;
+ volatile unsigned short secret1; /* store 0 here */
+ volatile unsigned short ST_DMA;
+ volatile unsigned short SP_DMA;
+ volatile unsigned short secret2; /* store 1 here */
+ volatile unsigned short secret3; /* store 15 here */
+};
+
+/* bits in CNTR */
+#define GVP11_DMAC_BUSY (1<<0)
+#define GVP11_DMAC_INT_PENDING (1<<1)
+#define GVP11_DMAC_INT_ENABLE (1<<3)
+#define GVP11_DMAC_DIR_WRITE (1<<4)
+
+#endif /* GVP11_H */
diff --git a/drivers/scsi/hosts.c b/drivers/scsi/hosts.c
new file mode 100644
index 000000000..8bb173e01
--- /dev/null
+++ b/drivers/scsi/hosts.c
@@ -0,0 +1,640 @@
+/*
+ * hosts.c Copyright (C) 1992 Drew Eckhardt
+ * Copyright (C) 1993, 1994, 1995 Eric Youngdale
+ * Copyright (C) 2002-2003 Christoph Hellwig
+ *
+ * mid to lowlevel SCSI driver interface
+ * Initial versions: Drew Eckhardt
+ * Subsequent revisions: Eric Youngdale
+ *
+ * <drew@colorado.edu>
+ *
+ * Jiffies wrap fixes (host->resetting), 3 Dec 1998 Andrea Arcangeli
+ * Added QLOGIC QLA1280 SCSI controller kernel host support.
+ * August 4, 1999 Fred Lewis, Intel DuPont
+ *
+ * Updated to reflect the new initialization scheme for the higher
+ * level of scsi drivers (sd/sr/st)
+ * September 17, 2000 Torben Mathiasen <tmm@image.dk>
+ *
+ * Restructured scsi_host lists and associated functions.
+ * September 04, 2002 Mike Anderson (andmike@us.ibm.com)
+ */
+
+#include <linux/module.h>
+#include <linux/blkdev.h>
+#include <linux/kernel.h>
+#include <linux/slab.h>
+#include <linux/kthread.h>
+#include <linux/string.h>
+#include <linux/mm.h>
+#include <linux/init.h>
+#include <linux/completion.h>
+#include <linux/transport_class.h>
+#include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
+
+#include <scsi/scsi_device.h>
+#include <scsi/scsi_host.h>
+#include <scsi/scsi_transport.h>
+
+#include "scsi_priv.h"
+#include "scsi_logging.h"
+
+
+static atomic_t scsi_host_next_hn = ATOMIC_INIT(0); /* host_no for next new host */
+
+
+static void scsi_host_cls_release(struct device *dev)
+{
+ put_device(&class_to_shost(dev)->shost_gendev);
+}
+
+static struct class shost_class = {
+ .name = "scsi_host",
+ .dev_release = scsi_host_cls_release,
+};
+
+/**
+ * scsi_host_set_state - Take the given host through the host state model.
+ * @shost: scsi host to change the state of.
+ * @state: state to change to.
+ *
+ * Returns zero if unsuccessful or an error if the requested
+ * transition is illegal.
+ **/
+int scsi_host_set_state(struct Scsi_Host *shost, enum scsi_host_state state)
+{
+ enum scsi_host_state oldstate = shost->shost_state;
+
+ if (state == oldstate)
+ return 0;
+
+ switch (state) {
+ case SHOST_CREATED:
+ /* There are no legal states that come back to
+ * created. This is the manually initialised start
+ * state */
+ goto illegal;
+
+ case SHOST_RUNNING:
+ switch (oldstate) {
+ case SHOST_CREATED:
+ case SHOST_RECOVERY:
+ break;
+ default:
+ goto illegal;
+ }
+ break;
+
+ case SHOST_RECOVERY:
+ switch (oldstate) {
+ case SHOST_RUNNING:
+ break;
+ default:
+ goto illegal;
+ }
+ break;
+
+ case SHOST_CANCEL:
+ switch (oldstate) {
+ case SHOST_CREATED:
+ case SHOST_RUNNING:
+ case SHOST_CANCEL_RECOVERY:
+ break;
+ default:
+ goto illegal;
+ }
+ break;
+
+ case SHOST_DEL:
+ switch (oldstate) {
+ case SHOST_CANCEL:
+ case SHOST_DEL_RECOVERY:
+ break;
+ default:
+ goto illegal;
+ }
+ break;
+
+ case SHOST_CANCEL_RECOVERY:
+ switch (oldstate) {
+ case SHOST_CANCEL:
+ case SHOST_RECOVERY:
+ break;
+ default:
+ goto illegal;
+ }
+ break;
+
+ case SHOST_DEL_RECOVERY:
+ switch (oldstate) {
+ case SHOST_CANCEL_RECOVERY:
+ break;
+ default:
+ goto illegal;
+ }
+ break;
+ }
+ shost->shost_state = state;
+ return 0;
+
+ illegal:
+ SCSI_LOG_ERROR_RECOVERY(1,
+ shost_printk(KERN_ERR, shost,
+ "Illegal host state transition"
+ "%s->%s\n",
+ scsi_host_state_name(oldstate),
+ scsi_host_state_name(state)));
+ return -EINVAL;
+}
+EXPORT_SYMBOL(scsi_host_set_state);
+
+/**
+ * scsi_remove_host - remove a scsi host
+ * @shost: a pointer to a scsi host to remove
+ **/
+void scsi_remove_host(struct Scsi_Host *shost)
+{
+ unsigned long flags;
+
+ mutex_lock(&shost->scan_mutex);
+ spin_lock_irqsave(shost->host_lock, flags);
+ if (scsi_host_set_state(shost, SHOST_CANCEL))
+ if (scsi_host_set_state(shost, SHOST_CANCEL_RECOVERY)) {
+ spin_unlock_irqrestore(shost->host_lock, flags);
+ mutex_unlock(&shost->scan_mutex);
+ return;
+ }
+ spin_unlock_irqrestore(shost->host_lock, flags);
+
+ scsi_autopm_get_host(shost);
+ flush_workqueue(shost->tmf_work_q);
+ scsi_forget_host(shost);
+ mutex_unlock(&shost->scan_mutex);
+ scsi_proc_host_rm(shost);
+
+ spin_lock_irqsave(shost->host_lock, flags);
+ if (scsi_host_set_state(shost, SHOST_DEL))
+ BUG_ON(scsi_host_set_state(shost, SHOST_DEL_RECOVERY));
+ spin_unlock_irqrestore(shost->host_lock, flags);
+
+ transport_unregister_device(&shost->shost_gendev);
+ device_unregister(&shost->shost_dev);
+ device_del(&shost->shost_gendev);
+}
+EXPORT_SYMBOL(scsi_remove_host);
+
+/**
+ * scsi_add_host_with_dma - add a scsi host with dma device
+ * @shost: scsi host pointer to add
+ * @dev: a struct device of type scsi class
+ * @dma_dev: dma device for the host
+ *
+ * Note: You rarely need to worry about this unless you're in a
+ * virtualised host environments, so use the simpler scsi_add_host()
+ * function instead.
+ *
+ * Return value:
+ * 0 on success / != 0 for error
+ **/
+int scsi_add_host_with_dma(struct Scsi_Host *shost, struct device *dev,
+ struct device *dma_dev)
+{
+ struct scsi_host_template *sht = shost->hostt;
+ int error = -EINVAL;
+
+ shost_printk(KERN_INFO, shost, "%s\n",
+ sht->info ? sht->info(shost) : sht->name);
+
+ if (!shost->can_queue) {
+ shost_printk(KERN_ERR, shost,
+ "can_queue = 0 no longer supported\n");
+ goto fail;
+ }
+
+ if (shost_use_blk_mq(shost)) {
+ error = scsi_mq_setup_tags(shost);
+ if (error)
+ goto fail;
+ }
+
+ /*
+ * Note that we allocate the freelist even for the MQ case for now,
+ * as we need a command set aside for scsi_reset_provider. Having
+ * the full host freelist and one command available for that is a
+ * little heavy-handed, but avoids introducing a special allocator
+ * just for this. Eventually the structure of scsi_reset_provider
+ * will need a major overhaul.
+ */
+ error = scsi_setup_command_freelist(shost);
+ if (error)
+ goto out_destroy_tags;
+
+
+ if (!shost->shost_gendev.parent)
+ shost->shost_gendev.parent = dev ? dev : &platform_bus;
+ if (!dma_dev)
+ dma_dev = shost->shost_gendev.parent;
+
+ shost->dma_dev = dma_dev;
+
+ error = device_add(&shost->shost_gendev);
+ if (error)
+ goto out_destroy_freelist;
+
+ pm_runtime_set_active(&shost->shost_gendev);
+ pm_runtime_enable(&shost->shost_gendev);
+ device_enable_async_suspend(&shost->shost_gendev);
+
+ scsi_host_set_state(shost, SHOST_RUNNING);
+ get_device(shost->shost_gendev.parent);
+
+ device_enable_async_suspend(&shost->shost_dev);
+
+ error = device_add(&shost->shost_dev);
+ if (error)
+ goto out_del_gendev;
+
+ get_device(&shost->shost_gendev);
+
+ if (shost->transportt->host_size) {
+ shost->shost_data = kzalloc(shost->transportt->host_size,
+ GFP_KERNEL);
+ if (shost->shost_data == NULL) {
+ error = -ENOMEM;
+ goto out_del_dev;
+ }
+ }
+
+ if (shost->transportt->create_work_queue) {
+ snprintf(shost->work_q_name, sizeof(shost->work_q_name),
+ "scsi_wq_%d", shost->host_no);
+ shost->work_q = create_singlethread_workqueue(
+ shost->work_q_name);
+ if (!shost->work_q) {
+ error = -EINVAL;
+ goto out_free_shost_data;
+ }
+ }
+
+ error = scsi_sysfs_add_host(shost);
+ if (error)
+ goto out_destroy_host;
+
+ scsi_proc_host_add(shost);
+ return error;
+
+ out_destroy_host:
+ if (shost->work_q)
+ destroy_workqueue(shost->work_q);
+ out_free_shost_data:
+ kfree(shost->shost_data);
+ out_del_dev:
+ device_del(&shost->shost_dev);
+ out_del_gendev:
+ device_del(&shost->shost_gendev);
+ out_destroy_freelist:
+ scsi_destroy_command_freelist(shost);
+ out_destroy_tags:
+ if (shost_use_blk_mq(shost))
+ scsi_mq_destroy_tags(shost);
+ fail:
+ return error;
+}
+EXPORT_SYMBOL(scsi_add_host_with_dma);
+
+static void scsi_host_dev_release(struct device *dev)
+{
+ struct Scsi_Host *shost = dev_to_shost(dev);
+ struct device *parent = dev->parent;
+ struct request_queue *q;
+ void *queuedata;
+
+ scsi_proc_hostdir_rm(shost->hostt);
+
+ if (shost->tmf_work_q)
+ destroy_workqueue(shost->tmf_work_q);
+ if (shost->ehandler)
+ kthread_stop(shost->ehandler);
+ if (shost->work_q)
+ destroy_workqueue(shost->work_q);
+ q = shost->uspace_req_q;
+ if (q) {
+ queuedata = q->queuedata;
+ blk_cleanup_queue(q);
+ kfree(queuedata);
+ }
+
+ scsi_destroy_command_freelist(shost);
+ if (shost_use_blk_mq(shost)) {
+ if (shost->tag_set.tags)
+ scsi_mq_destroy_tags(shost);
+ } else {
+ if (shost->bqt)
+ blk_free_tags(shost->bqt);
+ }
+
+ kfree(shost->shost_data);
+
+ if (parent)
+ put_device(parent);
+ kfree(shost);
+}
+
+static int shost_eh_deadline = -1;
+
+module_param_named(eh_deadline, shost_eh_deadline, int, S_IRUGO|S_IWUSR);
+MODULE_PARM_DESC(eh_deadline,
+ "SCSI EH timeout in seconds (should be between 0 and 2^31-1)");
+
+static struct device_type scsi_host_type = {
+ .name = "scsi_host",
+ .release = scsi_host_dev_release,
+};
+
+/**
+ * scsi_host_alloc - register a scsi host adapter instance.
+ * @sht: pointer to scsi host template
+ * @privsize: extra bytes to allocate for driver
+ *
+ * Note:
+ * Allocate a new Scsi_Host and perform basic initialization.
+ * The host is not published to the scsi midlayer until scsi_add_host
+ * is called.
+ *
+ * Return value:
+ * Pointer to a new Scsi_Host
+ **/
+struct Scsi_Host *scsi_host_alloc(struct scsi_host_template *sht, int privsize)
+{
+ struct Scsi_Host *shost;
+ gfp_t gfp_mask = GFP_KERNEL;
+
+ if (sht->unchecked_isa_dma && privsize)
+ gfp_mask |= __GFP_DMA;
+
+ shost = kzalloc(sizeof(struct Scsi_Host) + privsize, gfp_mask);
+ if (!shost)
+ return NULL;
+
+ shost->host_lock = &shost->default_lock;
+ spin_lock_init(shost->host_lock);
+ shost->shost_state = SHOST_CREATED;
+ INIT_LIST_HEAD(&shost->__devices);
+ INIT_LIST_HEAD(&shost->__targets);
+ INIT_LIST_HEAD(&shost->eh_cmd_q);
+ INIT_LIST_HEAD(&shost->starved_list);
+ init_waitqueue_head(&shost->host_wait);
+ mutex_init(&shost->scan_mutex);
+
+ /*
+ * subtract one because we increment first then return, but we need to
+ * know what the next host number was before increment
+ */
+ shost->host_no = atomic_inc_return(&scsi_host_next_hn) - 1;
+ shost->dma_channel = 0xff;
+
+ /* These three are default values which can be overridden */
+ shost->max_channel = 0;
+ shost->max_id = 8;
+ shost->max_lun = 8;
+
+ /* Give each shost a default transportt */
+ shost->transportt = &blank_transport_template;
+
+ /*
+ * All drivers right now should be able to handle 12 byte
+ * commands. Every so often there are requests for 16 byte
+ * commands, but individual low-level drivers need to certify that
+ * they actually do something sensible with such commands.
+ */
+ shost->max_cmd_len = 12;
+ shost->hostt = sht;
+ shost->this_id = sht->this_id;
+ shost->can_queue = sht->can_queue;
+ shost->sg_tablesize = sht->sg_tablesize;
+ shost->sg_prot_tablesize = sht->sg_prot_tablesize;
+ shost->cmd_per_lun = sht->cmd_per_lun;
+ shost->unchecked_isa_dma = sht->unchecked_isa_dma;
+ shost->use_clustering = sht->use_clustering;
+ shost->no_write_same = sht->no_write_same;
+
+ if (shost_eh_deadline == -1 || !sht->eh_host_reset_handler)
+ shost->eh_deadline = -1;
+ else if ((ulong) shost_eh_deadline * HZ > INT_MAX) {
+ shost_printk(KERN_WARNING, shost,
+ "eh_deadline %u too large, setting to %u\n",
+ shost_eh_deadline, INT_MAX / HZ);
+ shost->eh_deadline = INT_MAX;
+ } else
+ shost->eh_deadline = shost_eh_deadline * HZ;
+
+ if (sht->supported_mode == MODE_UNKNOWN)
+ /* means we didn't set it ... default to INITIATOR */
+ shost->active_mode = MODE_INITIATOR;
+ else
+ shost->active_mode = sht->supported_mode;
+
+ if (sht->max_host_blocked)
+ shost->max_host_blocked = sht->max_host_blocked;
+ else
+ shost->max_host_blocked = SCSI_DEFAULT_HOST_BLOCKED;
+
+ /*
+ * If the driver imposes no hard sector transfer limit, start at
+ * machine infinity initially.
+ */
+ if (sht->max_sectors)
+ shost->max_sectors = sht->max_sectors;
+ else
+ shost->max_sectors = SCSI_DEFAULT_MAX_SECTORS;
+
+ /*
+ * assume a 4GB boundary, if not set
+ */
+ if (sht->dma_boundary)
+ shost->dma_boundary = sht->dma_boundary;
+ else
+ shost->dma_boundary = 0xffffffff;
+
+ shost->use_blk_mq = scsi_use_blk_mq && !shost->hostt->disable_blk_mq;
+
+ device_initialize(&shost->shost_gendev);
+ dev_set_name(&shost->shost_gendev, "host%d", shost->host_no);
+ shost->shost_gendev.bus = &scsi_bus_type;
+ shost->shost_gendev.type = &scsi_host_type;
+
+ device_initialize(&shost->shost_dev);
+ shost->shost_dev.parent = &shost->shost_gendev;
+ shost->shost_dev.class = &shost_class;
+ dev_set_name(&shost->shost_dev, "host%d", shost->host_no);
+ shost->shost_dev.groups = scsi_sysfs_shost_attr_groups;
+
+ shost->ehandler = kthread_run(scsi_error_handler, shost,
+ "scsi_eh_%d", shost->host_no);
+ if (IS_ERR(shost->ehandler)) {
+ shost_printk(KERN_WARNING, shost,
+ "error handler thread failed to spawn, error = %ld\n",
+ PTR_ERR(shost->ehandler));
+ goto fail_kfree;
+ }
+
+ shost->tmf_work_q = alloc_workqueue("scsi_tmf_%d",
+ WQ_UNBOUND | WQ_MEM_RECLAIM,
+ 1, shost->host_no);
+ if (!shost->tmf_work_q) {
+ shost_printk(KERN_WARNING, shost,
+ "failed to create tmf workq\n");
+ goto fail_kthread;
+ }
+ scsi_proc_hostdir_add(shost->hostt);
+ return shost;
+
+ fail_kthread:
+ kthread_stop(shost->ehandler);
+ fail_kfree:
+ kfree(shost);
+ return NULL;
+}
+EXPORT_SYMBOL(scsi_host_alloc);
+
+struct Scsi_Host *scsi_register(struct scsi_host_template *sht, int privsize)
+{
+ struct Scsi_Host *shost = scsi_host_alloc(sht, privsize);
+
+ if (!sht->detect) {
+ printk(KERN_WARNING "scsi_register() called on new-style "
+ "template for driver %s\n", sht->name);
+ dump_stack();
+ }
+
+ if (shost)
+ list_add_tail(&shost->sht_legacy_list, &sht->legacy_hosts);
+ return shost;
+}
+EXPORT_SYMBOL(scsi_register);
+
+void scsi_unregister(struct Scsi_Host *shost)
+{
+ list_del(&shost->sht_legacy_list);
+ scsi_host_put(shost);
+}
+EXPORT_SYMBOL(scsi_unregister);
+
+static int __scsi_host_match(struct device *dev, const void *data)
+{
+ struct Scsi_Host *p;
+ const unsigned short *hostnum = data;
+
+ p = class_to_shost(dev);
+ return p->host_no == *hostnum;
+}
+
+/**
+ * scsi_host_lookup - get a reference to a Scsi_Host by host no
+ * @hostnum: host number to locate
+ *
+ * Return value:
+ * A pointer to located Scsi_Host or NULL.
+ *
+ * The caller must do a scsi_host_put() to drop the reference
+ * that scsi_host_get() took. The put_device() below dropped
+ * the reference from class_find_device().
+ **/
+struct Scsi_Host *scsi_host_lookup(unsigned short hostnum)
+{
+ struct device *cdev;
+ struct Scsi_Host *shost = NULL;
+
+ cdev = class_find_device(&shost_class, NULL, &hostnum,
+ __scsi_host_match);
+ if (cdev) {
+ shost = scsi_host_get(class_to_shost(cdev));
+ put_device(cdev);
+ }
+ return shost;
+}
+EXPORT_SYMBOL(scsi_host_lookup);
+
+/**
+ * scsi_host_get - inc a Scsi_Host ref count
+ * @shost: Pointer to Scsi_Host to inc.
+ **/
+struct Scsi_Host *scsi_host_get(struct Scsi_Host *shost)
+{
+ if ((shost->shost_state == SHOST_DEL) ||
+ !get_device(&shost->shost_gendev))
+ return NULL;
+ return shost;
+}
+EXPORT_SYMBOL(scsi_host_get);
+
+/**
+ * scsi_host_put - dec a Scsi_Host ref count
+ * @shost: Pointer to Scsi_Host to dec.
+ **/
+void scsi_host_put(struct Scsi_Host *shost)
+{
+ put_device(&shost->shost_gendev);
+}
+EXPORT_SYMBOL(scsi_host_put);
+
+int scsi_init_hosts(void)
+{
+ return class_register(&shost_class);
+}
+
+void scsi_exit_hosts(void)
+{
+ class_unregister(&shost_class);
+}
+
+int scsi_is_host_device(const struct device *dev)
+{
+ return dev->type == &scsi_host_type;
+}
+EXPORT_SYMBOL(scsi_is_host_device);
+
+/**
+ * scsi_queue_work - Queue work to the Scsi_Host workqueue.
+ * @shost: Pointer to Scsi_Host.
+ * @work: Work to queue for execution.
+ *
+ * Return value:
+ * 1 - work queued for execution
+ * 0 - work is already queued
+ * -EINVAL - work queue doesn't exist
+ **/
+int scsi_queue_work(struct Scsi_Host *shost, struct work_struct *work)
+{
+ if (unlikely(!shost->work_q)) {
+ shost_printk(KERN_ERR, shost,
+ "ERROR: Scsi host '%s' attempted to queue scsi-work, "
+ "when no workqueue created.\n", shost->hostt->name);
+ dump_stack();
+
+ return -EINVAL;
+ }
+
+ return queue_work(shost->work_q, work);
+}
+EXPORT_SYMBOL_GPL(scsi_queue_work);
+
+/**
+ * scsi_flush_work - Flush a Scsi_Host's workqueue.
+ * @shost: Pointer to Scsi_Host.
+ **/
+void scsi_flush_work(struct Scsi_Host *shost)
+{
+ if (!shost->work_q) {
+ shost_printk(KERN_ERR, shost,
+ "ERROR: Scsi host '%s' attempted to flush scsi-work, "
+ "when no workqueue created.\n", shost->hostt->name);
+ dump_stack();
+ return;
+ }
+
+ flush_workqueue(shost->work_q);
+}
+EXPORT_SYMBOL_GPL(scsi_flush_work);
diff --git a/drivers/scsi/hpsa.c b/drivers/scsi/hpsa.c
new file mode 100644
index 000000000..8eab107b5
--- /dev/null
+++ b/drivers/scsi/hpsa.c
@@ -0,0 +1,7627 @@
+/*
+ * Disk Array driver for HP Smart Array SAS controllers
+ * Copyright 2000, 2014 Hewlett-Packard Development Company, L.P.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
+ * NON INFRINGEMENT. See the GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ *
+ * Questions/Comments/Bugfixes to iss_storagedev@hp.com
+ *
+ */
+
+#include <linux/module.h>
+#include <linux/interrupt.h>
+#include <linux/types.h>
+#include <linux/pci.h>
+#include <linux/pci-aspm.h>
+#include <linux/kernel.h>
+#include <linux/slab.h>
+#include <linux/delay.h>
+#include <linux/fs.h>
+#include <linux/timer.h>
+#include <linux/init.h>
+#include <linux/spinlock.h>
+#include <linux/compat.h>
+#include <linux/blktrace_api.h>
+#include <linux/uaccess.h>
+#include <linux/io.h>
+#include <linux/dma-mapping.h>
+#include <linux/completion.h>
+#include <linux/moduleparam.h>
+#include <scsi/scsi.h>
+#include <scsi/scsi_cmnd.h>
+#include <scsi/scsi_device.h>
+#include <scsi/scsi_host.h>
+#include <scsi/scsi_tcq.h>
+#include <linux/cciss_ioctl.h>
+#include <linux/string.h>
+#include <linux/bitmap.h>
+#include <linux/atomic.h>
+#include <linux/jiffies.h>
+#include <linux/percpu-defs.h>
+#include <linux/percpu.h>
+#include <asm/unaligned.h>
+#include <asm/div64.h>
+#include "hpsa_cmd.h"
+#include "hpsa.h"
+
+/* HPSA_DRIVER_VERSION must be 3 byte values (0-255) separated by '.' */
+#define HPSA_DRIVER_VERSION "3.4.4-1"
+#define DRIVER_NAME "HP HPSA Driver (v " HPSA_DRIVER_VERSION ")"
+#define HPSA "hpsa"
+
+/* How long to wait for CISS doorbell communication */
+#define CLEAR_EVENT_WAIT_INTERVAL 20 /* ms for each msleep() call */
+#define MODE_CHANGE_WAIT_INTERVAL 10 /* ms for each msleep() call */
+#define MAX_CLEAR_EVENT_WAIT 30000 /* times 20 ms = 600 s */
+#define MAX_MODE_CHANGE_WAIT 2000 /* times 10 ms = 20 s */
+#define MAX_IOCTL_CONFIG_WAIT 1000
+
+/*define how many times we will try a command because of bus resets */
+#define MAX_CMD_RETRIES 3
+
+/* Embedded module documentation macros - see modules.h */
+MODULE_AUTHOR("Hewlett-Packard Company");
+MODULE_DESCRIPTION("Driver for HP Smart Array Controller version " \
+ HPSA_DRIVER_VERSION);
+MODULE_SUPPORTED_DEVICE("HP Smart Array Controllers");
+MODULE_VERSION(HPSA_DRIVER_VERSION);
+MODULE_LICENSE("GPL");
+
+static int hpsa_allow_any;
+module_param(hpsa_allow_any, int, S_IRUGO|S_IWUSR);
+MODULE_PARM_DESC(hpsa_allow_any,
+ "Allow hpsa driver to access unknown HP Smart Array hardware");
+static int hpsa_simple_mode;
+module_param(hpsa_simple_mode, int, S_IRUGO|S_IWUSR);
+MODULE_PARM_DESC(hpsa_simple_mode,
+ "Use 'simple mode' rather than 'performant mode'");
+
+/* define the PCI info for the cards we can control */
+static const struct pci_device_id hpsa_pci_device_id[] = {
+ {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSE, 0x103C, 0x3241},
+ {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSE, 0x103C, 0x3243},
+ {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSE, 0x103C, 0x3245},
+ {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSE, 0x103C, 0x3247},
+ {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSE, 0x103C, 0x3249},
+ {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSE, 0x103C, 0x324A},
+ {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSE, 0x103C, 0x324B},
+ {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSE, 0x103C, 0x3233},
+ {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSF, 0x103C, 0x3350},
+ {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSF, 0x103C, 0x3351},
+ {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSF, 0x103C, 0x3352},
+ {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSF, 0x103C, 0x3353},
+ {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSF, 0x103C, 0x3354},
+ {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSF, 0x103C, 0x3355},
+ {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSF, 0x103C, 0x3356},
+ {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSH, 0x103C, 0x1921},
+ {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSH, 0x103C, 0x1922},
+ {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSH, 0x103C, 0x1923},
+ {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSH, 0x103C, 0x1924},
+ {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSH, 0x103C, 0x1926},
+ {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSH, 0x103C, 0x1928},
+ {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSH, 0x103C, 0x1929},
+ {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21BD},
+ {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21BE},
+ {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21BF},
+ {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21C0},
+ {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21C1},
+ {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21C2},
+ {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21C3},
+ {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21C4},
+ {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21C5},
+ {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21C6},
+ {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21C7},
+ {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21C8},
+ {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21C9},
+ {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21CA},
+ {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21CB},
+ {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21CC},
+ {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21CD},
+ {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21CE},
+ {PCI_VENDOR_ID_HP_3PAR, 0x0075, 0x1590, 0x0076},
+ {PCI_VENDOR_ID_HP_3PAR, 0x0075, 0x1590, 0x0087},
+ {PCI_VENDOR_ID_HP_3PAR, 0x0075, 0x1590, 0x007D},
+ {PCI_VENDOR_ID_HP_3PAR, 0x0075, 0x1590, 0x0088},
+ {PCI_VENDOR_ID_HP, 0x333f, 0x103c, 0x333f},
+ {PCI_VENDOR_ID_HP, PCI_ANY_ID, PCI_ANY_ID, PCI_ANY_ID,
+ PCI_CLASS_STORAGE_RAID << 8, 0xffff << 8, 0},
+ {0,}
+};
+
+MODULE_DEVICE_TABLE(pci, hpsa_pci_device_id);
+
+/* board_id = Subsystem Device ID & Vendor ID
+ * product = Marketing Name for the board
+ * access = Address of the struct of function pointers
+ */
+static struct board_type products[] = {
+ {0x3241103C, "Smart Array P212", &SA5_access},
+ {0x3243103C, "Smart Array P410", &SA5_access},
+ {0x3245103C, "Smart Array P410i", &SA5_access},
+ {0x3247103C, "Smart Array P411", &SA5_access},
+ {0x3249103C, "Smart Array P812", &SA5_access},
+ {0x324A103C, "Smart Array P712m", &SA5_access},
+ {0x324B103C, "Smart Array P711m", &SA5_access},
+ {0x3233103C, "HP StorageWorks 1210m", &SA5_access}, /* alias of 333f */
+ {0x3350103C, "Smart Array P222", &SA5_access},
+ {0x3351103C, "Smart Array P420", &SA5_access},
+ {0x3352103C, "Smart Array P421", &SA5_access},
+ {0x3353103C, "Smart Array P822", &SA5_access},
+ {0x3354103C, "Smart Array P420i", &SA5_access},
+ {0x3355103C, "Smart Array P220i", &SA5_access},
+ {0x3356103C, "Smart Array P721m", &SA5_access},
+ {0x1921103C, "Smart Array P830i", &SA5_access},
+ {0x1922103C, "Smart Array P430", &SA5_access},
+ {0x1923103C, "Smart Array P431", &SA5_access},
+ {0x1924103C, "Smart Array P830", &SA5_access},
+ {0x1926103C, "Smart Array P731m", &SA5_access},
+ {0x1928103C, "Smart Array P230i", &SA5_access},
+ {0x1929103C, "Smart Array P530", &SA5_access},
+ {0x21BD103C, "Smart Array P244br", &SA5_access},
+ {0x21BE103C, "Smart Array P741m", &SA5_access},
+ {0x21BF103C, "Smart HBA H240ar", &SA5_access},
+ {0x21C0103C, "Smart Array P440ar", &SA5_access},
+ {0x21C1103C, "Smart Array P840ar", &SA5_access},
+ {0x21C2103C, "Smart Array P440", &SA5_access},
+ {0x21C3103C, "Smart Array P441", &SA5_access},
+ {0x21C4103C, "Smart Array", &SA5_access},
+ {0x21C5103C, "Smart Array P841", &SA5_access},
+ {0x21C6103C, "Smart HBA H244br", &SA5_access},
+ {0x21C7103C, "Smart HBA H240", &SA5_access},
+ {0x21C8103C, "Smart HBA H241", &SA5_access},
+ {0x21C9103C, "Smart Array", &SA5_access},
+ {0x21CA103C, "Smart Array P246br", &SA5_access},
+ {0x21CB103C, "Smart Array P840", &SA5_access},
+ {0x21CC103C, "Smart Array", &SA5_access},
+ {0x21CD103C, "Smart Array", &SA5_access},
+ {0x21CE103C, "Smart HBA", &SA5_access},
+ {0x00761590, "HP Storage P1224 Array Controller", &SA5_access},
+ {0x00871590, "HP Storage P1224e Array Controller", &SA5_access},
+ {0x007D1590, "HP Storage P1228 Array Controller", &SA5_access},
+ {0x00881590, "HP Storage P1228e Array Controller", &SA5_access},
+ {0x333f103c, "HP StorageWorks 1210m Array Controller", &SA5_access},
+ {0xFFFF103C, "Unknown Smart Array", &SA5_access},
+};
+
+static int number_of_controllers;
+
+static irqreturn_t do_hpsa_intr_intx(int irq, void *dev_id);
+static irqreturn_t do_hpsa_intr_msi(int irq, void *dev_id);
+static int hpsa_ioctl(struct scsi_device *dev, int cmd, void __user *arg);
+
+#ifdef CONFIG_COMPAT
+static int hpsa_compat_ioctl(struct scsi_device *dev, int cmd,
+ void __user *arg);
+#endif
+
+static void cmd_free(struct ctlr_info *h, struct CommandList *c);
+static struct CommandList *cmd_alloc(struct ctlr_info *h);
+static int fill_cmd(struct CommandList *c, u8 cmd, struct ctlr_info *h,
+ void *buff, size_t size, u16 page_code, unsigned char *scsi3addr,
+ int cmd_type);
+static void hpsa_free_cmd_pool(struct ctlr_info *h);
+#define VPD_PAGE (1 << 8)
+
+static int hpsa_scsi_queue_command(struct Scsi_Host *h, struct scsi_cmnd *cmd);
+static void hpsa_scan_start(struct Scsi_Host *);
+static int hpsa_scan_finished(struct Scsi_Host *sh,
+ unsigned long elapsed_time);
+static int hpsa_change_queue_depth(struct scsi_device *sdev, int qdepth);
+
+static int hpsa_eh_device_reset_handler(struct scsi_cmnd *scsicmd);
+static int hpsa_eh_abort_handler(struct scsi_cmnd *scsicmd);
+static int hpsa_slave_alloc(struct scsi_device *sdev);
+static void hpsa_slave_destroy(struct scsi_device *sdev);
+
+static void hpsa_update_scsi_devices(struct ctlr_info *h, int hostno);
+static int check_for_unit_attention(struct ctlr_info *h,
+ struct CommandList *c);
+static void check_ioctl_unit_attention(struct ctlr_info *h,
+ struct CommandList *c);
+/* performant mode helper functions */
+static void calc_bucket_map(int *bucket, int num_buckets,
+ int nsgs, int min_blocks, u32 *bucket_map);
+static void hpsa_put_ctlr_into_performant_mode(struct ctlr_info *h);
+static inline u32 next_command(struct ctlr_info *h, u8 q);
+static int hpsa_find_cfg_addrs(struct pci_dev *pdev, void __iomem *vaddr,
+ u32 *cfg_base_addr, u64 *cfg_base_addr_index,
+ u64 *cfg_offset);
+static int hpsa_pci_find_memory_BAR(struct pci_dev *pdev,
+ unsigned long *memory_bar);
+static int hpsa_lookup_board_id(struct pci_dev *pdev, u32 *board_id);
+static int hpsa_wait_for_board_state(struct pci_dev *pdev, void __iomem *vaddr,
+ int wait_for_ready);
+static inline void finish_cmd(struct CommandList *c);
+static int hpsa_wait_for_mode_change_ack(struct ctlr_info *h);
+#define BOARD_NOT_READY 0
+#define BOARD_READY 1
+static void hpsa_drain_accel_commands(struct ctlr_info *h);
+static void hpsa_flush_cache(struct ctlr_info *h);
+static int hpsa_scsi_ioaccel_queue_command(struct ctlr_info *h,
+ struct CommandList *c, u32 ioaccel_handle, u8 *cdb, int cdb_len,
+ u8 *scsi3addr, struct hpsa_scsi_dev_t *phys_disk);
+static void hpsa_command_resubmit_worker(struct work_struct *work);
+
+static inline struct ctlr_info *sdev_to_hba(struct scsi_device *sdev)
+{
+ unsigned long *priv = shost_priv(sdev->host);
+ return (struct ctlr_info *) *priv;
+}
+
+static inline struct ctlr_info *shost_to_hba(struct Scsi_Host *sh)
+{
+ unsigned long *priv = shost_priv(sh);
+ return (struct ctlr_info *) *priv;
+}
+
+static int check_for_unit_attention(struct ctlr_info *h,
+ struct CommandList *c)
+{
+ if (c->err_info->SenseInfo[2] != UNIT_ATTENTION)
+ return 0;
+
+ switch (c->err_info->SenseInfo[12]) {
+ case STATE_CHANGED:
+ dev_warn(&h->pdev->dev, HPSA "%d: a state change "
+ "detected, command retried\n", h->ctlr);
+ break;
+ case LUN_FAILED:
+ dev_warn(&h->pdev->dev,
+ HPSA "%d: LUN failure detected\n", h->ctlr);
+ break;
+ case REPORT_LUNS_CHANGED:
+ dev_warn(&h->pdev->dev,
+ HPSA "%d: report LUN data changed\n", h->ctlr);
+ /*
+ * Note: this REPORT_LUNS_CHANGED condition only occurs on the external
+ * target (array) devices.
+ */
+ break;
+ case POWER_OR_RESET:
+ dev_warn(&h->pdev->dev, HPSA "%d: a power on "
+ "or device reset detected\n", h->ctlr);
+ break;
+ case UNIT_ATTENTION_CLEARED:
+ dev_warn(&h->pdev->dev, HPSA "%d: unit attention "
+ "cleared by another initiator\n", h->ctlr);
+ break;
+ default:
+ dev_warn(&h->pdev->dev, HPSA "%d: unknown "
+ "unit attention detected\n", h->ctlr);
+ break;
+ }
+ return 1;
+}
+
+static int check_for_busy(struct ctlr_info *h, struct CommandList *c)
+{
+ if (c->err_info->CommandStatus != CMD_TARGET_STATUS ||
+ (c->err_info->ScsiStatus != SAM_STAT_BUSY &&
+ c->err_info->ScsiStatus != SAM_STAT_TASK_SET_FULL))
+ return 0;
+ dev_warn(&h->pdev->dev, HPSA "device busy");
+ return 1;
+}
+
+static ssize_t host_store_hp_ssd_smart_path_status(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ int status, len;
+ struct ctlr_info *h;
+ struct Scsi_Host *shost = class_to_shost(dev);
+ char tmpbuf[10];
+
+ if (!capable(CAP_SYS_ADMIN) || !capable(CAP_SYS_RAWIO))
+ return -EACCES;
+ len = count > sizeof(tmpbuf) - 1 ? sizeof(tmpbuf) - 1 : count;
+ strncpy(tmpbuf, buf, len);
+ tmpbuf[len] = '\0';
+ if (sscanf(tmpbuf, "%d", &status) != 1)
+ return -EINVAL;
+ h = shost_to_hba(shost);
+ h->acciopath_status = !!status;
+ dev_warn(&h->pdev->dev,
+ "hpsa: HP SSD Smart Path %s via sysfs update.\n",
+ h->acciopath_status ? "enabled" : "disabled");
+ return count;
+}
+
+static ssize_t host_store_raid_offload_debug(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ int debug_level, len;
+ struct ctlr_info *h;
+ struct Scsi_Host *shost = class_to_shost(dev);
+ char tmpbuf[10];
+
+ if (!capable(CAP_SYS_ADMIN) || !capable(CAP_SYS_RAWIO))
+ return -EACCES;
+ len = count > sizeof(tmpbuf) - 1 ? sizeof(tmpbuf) - 1 : count;
+ strncpy(tmpbuf, buf, len);
+ tmpbuf[len] = '\0';
+ if (sscanf(tmpbuf, "%d", &debug_level) != 1)
+ return -EINVAL;
+ if (debug_level < 0)
+ debug_level = 0;
+ h = shost_to_hba(shost);
+ h->raid_offload_debug = debug_level;
+ dev_warn(&h->pdev->dev, "hpsa: Set raid_offload_debug level = %d\n",
+ h->raid_offload_debug);
+ return count;
+}
+
+static ssize_t host_store_rescan(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct ctlr_info *h;
+ struct Scsi_Host *shost = class_to_shost(dev);
+ h = shost_to_hba(shost);
+ hpsa_scan_start(h->scsi_host);
+ return count;
+}
+
+static ssize_t host_show_firmware_revision(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct ctlr_info *h;
+ struct Scsi_Host *shost = class_to_shost(dev);
+ unsigned char *fwrev;
+
+ h = shost_to_hba(shost);
+ if (!h->hba_inquiry_data)
+ return 0;
+ fwrev = &h->hba_inquiry_data[32];
+ return snprintf(buf, 20, "%c%c%c%c\n",
+ fwrev[0], fwrev[1], fwrev[2], fwrev[3]);
+}
+
+static ssize_t host_show_commands_outstanding(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct Scsi_Host *shost = class_to_shost(dev);
+ struct ctlr_info *h = shost_to_hba(shost);
+
+ return snprintf(buf, 20, "%d\n",
+ atomic_read(&h->commands_outstanding));
+}
+
+static ssize_t host_show_transport_mode(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct ctlr_info *h;
+ struct Scsi_Host *shost = class_to_shost(dev);
+
+ h = shost_to_hba(shost);
+ return snprintf(buf, 20, "%s\n",
+ h->transMethod & CFGTBL_Trans_Performant ?
+ "performant" : "simple");
+}
+
+static ssize_t host_show_hp_ssd_smart_path_status(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct ctlr_info *h;
+ struct Scsi_Host *shost = class_to_shost(dev);
+
+ h = shost_to_hba(shost);
+ return snprintf(buf, 30, "HP SSD Smart Path %s\n",
+ (h->acciopath_status == 1) ? "enabled" : "disabled");
+}
+
+/* List of controllers which cannot be hard reset on kexec with reset_devices */
+static u32 unresettable_controller[] = {
+ 0x324a103C, /* Smart Array P712m */
+ 0x324b103C, /* SmartArray P711m */
+ 0x3223103C, /* Smart Array P800 */
+ 0x3234103C, /* Smart Array P400 */
+ 0x3235103C, /* Smart Array P400i */
+ 0x3211103C, /* Smart Array E200i */
+ 0x3212103C, /* Smart Array E200 */
+ 0x3213103C, /* Smart Array E200i */
+ 0x3214103C, /* Smart Array E200i */
+ 0x3215103C, /* Smart Array E200i */
+ 0x3237103C, /* Smart Array E500 */
+ 0x323D103C, /* Smart Array P700m */
+ 0x40800E11, /* Smart Array 5i */
+ 0x409C0E11, /* Smart Array 6400 */
+ 0x409D0E11, /* Smart Array 6400 EM */
+ 0x40700E11, /* Smart Array 5300 */
+ 0x40820E11, /* Smart Array 532 */
+ 0x40830E11, /* Smart Array 5312 */
+ 0x409A0E11, /* Smart Array 641 */
+ 0x409B0E11, /* Smart Array 642 */
+ 0x40910E11, /* Smart Array 6i */
+};
+
+/* List of controllers which cannot even be soft reset */
+static u32 soft_unresettable_controller[] = {
+ 0x40800E11, /* Smart Array 5i */
+ 0x40700E11, /* Smart Array 5300 */
+ 0x40820E11, /* Smart Array 532 */
+ 0x40830E11, /* Smart Array 5312 */
+ 0x409A0E11, /* Smart Array 641 */
+ 0x409B0E11, /* Smart Array 642 */
+ 0x40910E11, /* Smart Array 6i */
+ /* Exclude 640x boards. These are two pci devices in one slot
+ * which share a battery backed cache module. One controls the
+ * cache, the other accesses the cache through the one that controls
+ * it. If we reset the one controlling the cache, the other will
+ * likely not be happy. Just forbid resetting this conjoined mess.
+ * The 640x isn't really supported by hpsa anyway.
+ */
+ 0x409C0E11, /* Smart Array 6400 */
+ 0x409D0E11, /* Smart Array 6400 EM */
+};
+
+static int ctlr_is_hard_resettable(u32 board_id)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(unresettable_controller); i++)
+ if (unresettable_controller[i] == board_id)
+ return 0;
+ return 1;
+}
+
+static int ctlr_is_soft_resettable(u32 board_id)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(soft_unresettable_controller); i++)
+ if (soft_unresettable_controller[i] == board_id)
+ return 0;
+ return 1;
+}
+
+static int ctlr_is_resettable(u32 board_id)
+{
+ return ctlr_is_hard_resettable(board_id) ||
+ ctlr_is_soft_resettable(board_id);
+}
+
+static ssize_t host_show_resettable(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct ctlr_info *h;
+ struct Scsi_Host *shost = class_to_shost(dev);
+
+ h = shost_to_hba(shost);
+ return snprintf(buf, 20, "%d\n", ctlr_is_resettable(h->board_id));
+}
+
+static inline int is_logical_dev_addr_mode(unsigned char scsi3addr[])
+{
+ return (scsi3addr[3] & 0xC0) == 0x40;
+}
+
+static const char * const raid_label[] = { "0", "4", "1(+0)", "5", "5+1", "6",
+ "1(+0)ADM", "UNKNOWN"
+};
+#define HPSA_RAID_0 0
+#define HPSA_RAID_4 1
+#define HPSA_RAID_1 2 /* also used for RAID 10 */
+#define HPSA_RAID_5 3 /* also used for RAID 50 */
+#define HPSA_RAID_51 4
+#define HPSA_RAID_6 5 /* also used for RAID 60 */
+#define HPSA_RAID_ADM 6 /* also used for RAID 1+0 ADM */
+#define RAID_UNKNOWN (ARRAY_SIZE(raid_label) - 1)
+
+static ssize_t raid_level_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ ssize_t l = 0;
+ unsigned char rlevel;
+ struct ctlr_info *h;
+ struct scsi_device *sdev;
+ struct hpsa_scsi_dev_t *hdev;
+ unsigned long flags;
+
+ sdev = to_scsi_device(dev);
+ h = sdev_to_hba(sdev);
+ spin_lock_irqsave(&h->lock, flags);
+ hdev = sdev->hostdata;
+ if (!hdev) {
+ spin_unlock_irqrestore(&h->lock, flags);
+ return -ENODEV;
+ }
+
+ /* Is this even a logical drive? */
+ if (!is_logical_dev_addr_mode(hdev->scsi3addr)) {
+ spin_unlock_irqrestore(&h->lock, flags);
+ l = snprintf(buf, PAGE_SIZE, "N/A\n");
+ return l;
+ }
+
+ rlevel = hdev->raid_level;
+ spin_unlock_irqrestore(&h->lock, flags);
+ if (rlevel > RAID_UNKNOWN)
+ rlevel = RAID_UNKNOWN;
+ l = snprintf(buf, PAGE_SIZE, "RAID %s\n", raid_label[rlevel]);
+ return l;
+}
+
+static ssize_t lunid_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct ctlr_info *h;
+ struct scsi_device *sdev;
+ struct hpsa_scsi_dev_t *hdev;
+ unsigned long flags;
+ unsigned char lunid[8];
+
+ sdev = to_scsi_device(dev);
+ h = sdev_to_hba(sdev);
+ spin_lock_irqsave(&h->lock, flags);
+ hdev = sdev->hostdata;
+ if (!hdev) {
+ spin_unlock_irqrestore(&h->lock, flags);
+ return -ENODEV;
+ }
+ memcpy(lunid, hdev->scsi3addr, sizeof(lunid));
+ spin_unlock_irqrestore(&h->lock, flags);
+ return snprintf(buf, 20, "0x%02x%02x%02x%02x%02x%02x%02x%02x\n",
+ lunid[0], lunid[1], lunid[2], lunid[3],
+ lunid[4], lunid[5], lunid[6], lunid[7]);
+}
+
+static ssize_t unique_id_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct ctlr_info *h;
+ struct scsi_device *sdev;
+ struct hpsa_scsi_dev_t *hdev;
+ unsigned long flags;
+ unsigned char sn[16];
+
+ sdev = to_scsi_device(dev);
+ h = sdev_to_hba(sdev);
+ spin_lock_irqsave(&h->lock, flags);
+ hdev = sdev->hostdata;
+ if (!hdev) {
+ spin_unlock_irqrestore(&h->lock, flags);
+ return -ENODEV;
+ }
+ memcpy(sn, hdev->device_id, sizeof(sn));
+ spin_unlock_irqrestore(&h->lock, flags);
+ return snprintf(buf, 16 * 2 + 2,
+ "%02X%02X%02X%02X%02X%02X%02X%02X"
+ "%02X%02X%02X%02X%02X%02X%02X%02X\n",
+ sn[0], sn[1], sn[2], sn[3],
+ sn[4], sn[5], sn[6], sn[7],
+ sn[8], sn[9], sn[10], sn[11],
+ sn[12], sn[13], sn[14], sn[15]);
+}
+
+static ssize_t host_show_hp_ssd_smart_path_enabled(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct ctlr_info *h;
+ struct scsi_device *sdev;
+ struct hpsa_scsi_dev_t *hdev;
+ unsigned long flags;
+ int offload_enabled;
+
+ sdev = to_scsi_device(dev);
+ h = sdev_to_hba(sdev);
+ spin_lock_irqsave(&h->lock, flags);
+ hdev = sdev->hostdata;
+ if (!hdev) {
+ spin_unlock_irqrestore(&h->lock, flags);
+ return -ENODEV;
+ }
+ offload_enabled = hdev->offload_enabled;
+ spin_unlock_irqrestore(&h->lock, flags);
+ return snprintf(buf, 20, "%d\n", offload_enabled);
+}
+
+static DEVICE_ATTR(raid_level, S_IRUGO, raid_level_show, NULL);
+static DEVICE_ATTR(lunid, S_IRUGO, lunid_show, NULL);
+static DEVICE_ATTR(unique_id, S_IRUGO, unique_id_show, NULL);
+static DEVICE_ATTR(rescan, S_IWUSR, NULL, host_store_rescan);
+static DEVICE_ATTR(hp_ssd_smart_path_enabled, S_IRUGO,
+ host_show_hp_ssd_smart_path_enabled, NULL);
+static DEVICE_ATTR(hp_ssd_smart_path_status, S_IWUSR|S_IRUGO|S_IROTH,
+ host_show_hp_ssd_smart_path_status,
+ host_store_hp_ssd_smart_path_status);
+static DEVICE_ATTR(raid_offload_debug, S_IWUSR, NULL,
+ host_store_raid_offload_debug);
+static DEVICE_ATTR(firmware_revision, S_IRUGO,
+ host_show_firmware_revision, NULL);
+static DEVICE_ATTR(commands_outstanding, S_IRUGO,
+ host_show_commands_outstanding, NULL);
+static DEVICE_ATTR(transport_mode, S_IRUGO,
+ host_show_transport_mode, NULL);
+static DEVICE_ATTR(resettable, S_IRUGO,
+ host_show_resettable, NULL);
+
+static struct device_attribute *hpsa_sdev_attrs[] = {
+ &dev_attr_raid_level,
+ &dev_attr_lunid,
+ &dev_attr_unique_id,
+ &dev_attr_hp_ssd_smart_path_enabled,
+ NULL,
+};
+
+static struct device_attribute *hpsa_shost_attrs[] = {
+ &dev_attr_rescan,
+ &dev_attr_firmware_revision,
+ &dev_attr_commands_outstanding,
+ &dev_attr_transport_mode,
+ &dev_attr_resettable,
+ &dev_attr_hp_ssd_smart_path_status,
+ &dev_attr_raid_offload_debug,
+ NULL,
+};
+
+static struct scsi_host_template hpsa_driver_template = {
+ .module = THIS_MODULE,
+ .name = HPSA,
+ .proc_name = HPSA,
+ .queuecommand = hpsa_scsi_queue_command,
+ .scan_start = hpsa_scan_start,
+ .scan_finished = hpsa_scan_finished,
+ .change_queue_depth = hpsa_change_queue_depth,
+ .this_id = -1,
+ .use_clustering = ENABLE_CLUSTERING,
+ .eh_abort_handler = hpsa_eh_abort_handler,
+ .eh_device_reset_handler = hpsa_eh_device_reset_handler,
+ .ioctl = hpsa_ioctl,
+ .slave_alloc = hpsa_slave_alloc,
+ .slave_destroy = hpsa_slave_destroy,
+#ifdef CONFIG_COMPAT
+ .compat_ioctl = hpsa_compat_ioctl,
+#endif
+ .sdev_attrs = hpsa_sdev_attrs,
+ .shost_attrs = hpsa_shost_attrs,
+ .max_sectors = 8192,
+ .no_write_same = 1,
+};
+
+static inline u32 next_command(struct ctlr_info *h, u8 q)
+{
+ u32 a;
+ struct reply_queue_buffer *rq = &h->reply_queue[q];
+
+ if (h->transMethod & CFGTBL_Trans_io_accel1)
+ return h->access.command_completed(h, q);
+
+ if (unlikely(!(h->transMethod & CFGTBL_Trans_Performant)))
+ return h->access.command_completed(h, q);
+
+ if ((rq->head[rq->current_entry] & 1) == rq->wraparound) {
+ a = rq->head[rq->current_entry];
+ rq->current_entry++;
+ atomic_dec(&h->commands_outstanding);
+ } else {
+ a = FIFO_EMPTY;
+ }
+ /* Check for wraparound */
+ if (rq->current_entry == h->max_commands) {
+ rq->current_entry = 0;
+ rq->wraparound ^= 1;
+ }
+ return a;
+}
+
+/*
+ * There are some special bits in the bus address of the
+ * command that we have to set for the controller to know
+ * how to process the command:
+ *
+ * Normal performant mode:
+ * bit 0: 1 means performant mode, 0 means simple mode.
+ * bits 1-3 = block fetch table entry
+ * bits 4-6 = command type (== 0)
+ *
+ * ioaccel1 mode:
+ * bit 0 = "performant mode" bit.
+ * bits 1-3 = block fetch table entry
+ * bits 4-6 = command type (== 110)
+ * (command type is needed because ioaccel1 mode
+ * commands are submitted through the same register as normal
+ * mode commands, so this is how the controller knows whether
+ * the command is normal mode or ioaccel1 mode.)
+ *
+ * ioaccel2 mode:
+ * bit 0 = "performant mode" bit.
+ * bits 1-4 = block fetch table entry (note extra bit)
+ * bits 4-6 = not needed, because ioaccel2 mode has
+ * a separate special register for submitting commands.
+ */
+
+/* set_performant_mode: Modify the tag for cciss performant
+ * set bit 0 for pull model, bits 3-1 for block fetch
+ * register number
+ */
+static void set_performant_mode(struct ctlr_info *h, struct CommandList *c)
+{
+ if (likely(h->transMethod & CFGTBL_Trans_Performant)) {
+ c->busaddr |= 1 | (h->blockFetchTable[c->Header.SGList] << 1);
+ if (likely(h->msix_vector > 0))
+ c->Header.ReplyQueue =
+ raw_smp_processor_id() % h->nreply_queues;
+ }
+}
+
+static void set_ioaccel1_performant_mode(struct ctlr_info *h,
+ struct CommandList *c)
+{
+ struct io_accel1_cmd *cp = &h->ioaccel_cmd_pool[c->cmdindex];
+
+ /* Tell the controller to post the reply to the queue for this
+ * processor. This seems to give the best I/O throughput.
+ */
+ cp->ReplyQueue = smp_processor_id() % h->nreply_queues;
+ /* Set the bits in the address sent down to include:
+ * - performant mode bit (bit 0)
+ * - pull count (bits 1-3)
+ * - command type (bits 4-6)
+ */
+ c->busaddr |= 1 | (h->ioaccel1_blockFetchTable[c->Header.SGList] << 1) |
+ IOACCEL1_BUSADDR_CMDTYPE;
+}
+
+static void set_ioaccel2_performant_mode(struct ctlr_info *h,
+ struct CommandList *c)
+{
+ struct io_accel2_cmd *cp = &h->ioaccel2_cmd_pool[c->cmdindex];
+
+ /* Tell the controller to post the reply to the queue for this
+ * processor. This seems to give the best I/O throughput.
+ */
+ cp->reply_queue = smp_processor_id() % h->nreply_queues;
+ /* Set the bits in the address sent down to include:
+ * - performant mode bit not used in ioaccel mode 2
+ * - pull count (bits 0-3)
+ * - command type isn't needed for ioaccel2
+ */
+ c->busaddr |= (h->ioaccel2_blockFetchTable[cp->sg_count]);
+}
+
+static int is_firmware_flash_cmd(u8 *cdb)
+{
+ return cdb[0] == BMIC_WRITE && cdb[6] == BMIC_FLASH_FIRMWARE;
+}
+
+/*
+ * During firmware flash, the heartbeat register may not update as frequently
+ * as it should. So we dial down lockup detection during firmware flash. and
+ * dial it back up when firmware flash completes.
+ */
+#define HEARTBEAT_SAMPLE_INTERVAL_DURING_FLASH (240 * HZ)
+#define HEARTBEAT_SAMPLE_INTERVAL (30 * HZ)
+static void dial_down_lockup_detection_during_fw_flash(struct ctlr_info *h,
+ struct CommandList *c)
+{
+ if (!is_firmware_flash_cmd(c->Request.CDB))
+ return;
+ atomic_inc(&h->firmware_flash_in_progress);
+ h->heartbeat_sample_interval = HEARTBEAT_SAMPLE_INTERVAL_DURING_FLASH;
+}
+
+static void dial_up_lockup_detection_on_fw_flash_complete(struct ctlr_info *h,
+ struct CommandList *c)
+{
+ if (is_firmware_flash_cmd(c->Request.CDB) &&
+ atomic_dec_and_test(&h->firmware_flash_in_progress))
+ h->heartbeat_sample_interval = HEARTBEAT_SAMPLE_INTERVAL;
+}
+
+static void enqueue_cmd_and_start_io(struct ctlr_info *h,
+ struct CommandList *c)
+{
+ dial_down_lockup_detection_during_fw_flash(h, c);
+ atomic_inc(&h->commands_outstanding);
+ switch (c->cmd_type) {
+ case CMD_IOACCEL1:
+ set_ioaccel1_performant_mode(h, c);
+ writel(c->busaddr, h->vaddr + SA5_REQUEST_PORT_OFFSET);
+ break;
+ case CMD_IOACCEL2:
+ set_ioaccel2_performant_mode(h, c);
+ writel(c->busaddr, h->vaddr + IOACCEL2_INBOUND_POSTQ_32);
+ break;
+ default:
+ set_performant_mode(h, c);
+ h->access.submit_command(h, c);
+ }
+}
+
+static inline int is_hba_lunid(unsigned char scsi3addr[])
+{
+ return memcmp(scsi3addr, RAID_CTLR_LUNID, 8) == 0;
+}
+
+static inline int is_scsi_rev_5(struct ctlr_info *h)
+{
+ if (!h->hba_inquiry_data)
+ return 0;
+ if ((h->hba_inquiry_data[2] & 0x07) == 5)
+ return 1;
+ return 0;
+}
+
+static int hpsa_find_target_lun(struct ctlr_info *h,
+ unsigned char scsi3addr[], int bus, int *target, int *lun)
+{
+ /* finds an unused bus, target, lun for a new physical device
+ * assumes h->devlock is held
+ */
+ int i, found = 0;
+ DECLARE_BITMAP(lun_taken, HPSA_MAX_DEVICES);
+
+ bitmap_zero(lun_taken, HPSA_MAX_DEVICES);
+
+ for (i = 0; i < h->ndevices; i++) {
+ if (h->dev[i]->bus == bus && h->dev[i]->target != -1)
+ __set_bit(h->dev[i]->target, lun_taken);
+ }
+
+ i = find_first_zero_bit(lun_taken, HPSA_MAX_DEVICES);
+ if (i < HPSA_MAX_DEVICES) {
+ /* *bus = 1; */
+ *target = i;
+ *lun = 0;
+ found = 1;
+ }
+ return !found;
+}
+
+/* Add an entry into h->dev[] array. */
+static int hpsa_scsi_add_entry(struct ctlr_info *h, int hostno,
+ struct hpsa_scsi_dev_t *device,
+ struct hpsa_scsi_dev_t *added[], int *nadded)
+{
+ /* assumes h->devlock is held */
+ int n = h->ndevices;
+ int i;
+ unsigned char addr1[8], addr2[8];
+ struct hpsa_scsi_dev_t *sd;
+
+ if (n >= HPSA_MAX_DEVICES) {
+ dev_err(&h->pdev->dev, "too many devices, some will be "
+ "inaccessible.\n");
+ return -1;
+ }
+
+ /* physical devices do not have lun or target assigned until now. */
+ if (device->lun != -1)
+ /* Logical device, lun is already assigned. */
+ goto lun_assigned;
+
+ /* If this device a non-zero lun of a multi-lun device
+ * byte 4 of the 8-byte LUN addr will contain the logical
+ * unit no, zero otherwise.
+ */
+ if (device->scsi3addr[4] == 0) {
+ /* This is not a non-zero lun of a multi-lun device */
+ if (hpsa_find_target_lun(h, device->scsi3addr,
+ device->bus, &device->target, &device->lun) != 0)
+ return -1;
+ goto lun_assigned;
+ }
+
+ /* This is a non-zero lun of a multi-lun device.
+ * Search through our list and find the device which
+ * has the same 8 byte LUN address, excepting byte 4.
+ * Assign the same bus and target for this new LUN.
+ * Use the logical unit number from the firmware.
+ */
+ memcpy(addr1, device->scsi3addr, 8);
+ addr1[4] = 0;
+ for (i = 0; i < n; i++) {
+ sd = h->dev[i];
+ memcpy(addr2, sd->scsi3addr, 8);
+ addr2[4] = 0;
+ /* differ only in byte 4? */
+ if (memcmp(addr1, addr2, 8) == 0) {
+ device->bus = sd->bus;
+ device->target = sd->target;
+ device->lun = device->scsi3addr[4];
+ break;
+ }
+ }
+ if (device->lun == -1) {
+ dev_warn(&h->pdev->dev, "physical device with no LUN=0,"
+ " suspect firmware bug or unsupported hardware "
+ "configuration.\n");
+ return -1;
+ }
+
+lun_assigned:
+
+ h->dev[n] = device;
+ h->ndevices++;
+ added[*nadded] = device;
+ (*nadded)++;
+
+ /* initially, (before registering with scsi layer) we don't
+ * know our hostno and we don't want to print anything first
+ * time anyway (the scsi layer's inquiries will show that info)
+ */
+ /* if (hostno != -1) */
+ dev_info(&h->pdev->dev, "%s device c%db%dt%dl%d added.\n",
+ scsi_device_type(device->devtype), hostno,
+ device->bus, device->target, device->lun);
+ return 0;
+}
+
+/* Update an entry in h->dev[] array. */
+static void hpsa_scsi_update_entry(struct ctlr_info *h, int hostno,
+ int entry, struct hpsa_scsi_dev_t *new_entry)
+{
+ /* assumes h->devlock is held */
+ BUG_ON(entry < 0 || entry >= HPSA_MAX_DEVICES);
+
+ /* Raid level changed. */
+ h->dev[entry]->raid_level = new_entry->raid_level;
+
+ /* Raid offload parameters changed. Careful about the ordering. */
+ if (new_entry->offload_config && new_entry->offload_enabled) {
+ /*
+ * if drive is newly offload_enabled, we want to copy the
+ * raid map data first. If previously offload_enabled and
+ * offload_config were set, raid map data had better be
+ * the same as it was before. if raid map data is changed
+ * then it had better be the case that
+ * h->dev[entry]->offload_enabled is currently 0.
+ */
+ h->dev[entry]->raid_map = new_entry->raid_map;
+ h->dev[entry]->ioaccel_handle = new_entry->ioaccel_handle;
+ wmb(); /* ensure raid map updated prior to ->offload_enabled */
+ }
+ h->dev[entry]->offload_config = new_entry->offload_config;
+ h->dev[entry]->offload_to_mirror = new_entry->offload_to_mirror;
+ h->dev[entry]->offload_enabled = new_entry->offload_enabled;
+ h->dev[entry]->queue_depth = new_entry->queue_depth;
+
+ dev_info(&h->pdev->dev, "%s device c%db%dt%dl%d updated.\n",
+ scsi_device_type(new_entry->devtype), hostno, new_entry->bus,
+ new_entry->target, new_entry->lun);
+}
+
+/* Replace an entry from h->dev[] array. */
+static void hpsa_scsi_replace_entry(struct ctlr_info *h, int hostno,
+ int entry, struct hpsa_scsi_dev_t *new_entry,
+ struct hpsa_scsi_dev_t *added[], int *nadded,
+ struct hpsa_scsi_dev_t *removed[], int *nremoved)
+{
+ /* assumes h->devlock is held */
+ BUG_ON(entry < 0 || entry >= HPSA_MAX_DEVICES);
+ removed[*nremoved] = h->dev[entry];
+ (*nremoved)++;
+
+ /*
+ * New physical devices won't have target/lun assigned yet
+ * so we need to preserve the values in the slot we are replacing.
+ */
+ if (new_entry->target == -1) {
+ new_entry->target = h->dev[entry]->target;
+ new_entry->lun = h->dev[entry]->lun;
+ }
+
+ h->dev[entry] = new_entry;
+ added[*nadded] = new_entry;
+ (*nadded)++;
+ dev_info(&h->pdev->dev, "%s device c%db%dt%dl%d changed.\n",
+ scsi_device_type(new_entry->devtype), hostno, new_entry->bus,
+ new_entry->target, new_entry->lun);
+}
+
+/* Remove an entry from h->dev[] array. */
+static void hpsa_scsi_remove_entry(struct ctlr_info *h, int hostno, int entry,
+ struct hpsa_scsi_dev_t *removed[], int *nremoved)
+{
+ /* assumes h->devlock is held */
+ int i;
+ struct hpsa_scsi_dev_t *sd;
+
+ BUG_ON(entry < 0 || entry >= HPSA_MAX_DEVICES);
+
+ sd = h->dev[entry];
+ removed[*nremoved] = h->dev[entry];
+ (*nremoved)++;
+
+ for (i = entry; i < h->ndevices-1; i++)
+ h->dev[i] = h->dev[i+1];
+ h->ndevices--;
+ dev_info(&h->pdev->dev, "%s device c%db%dt%dl%d removed.\n",
+ scsi_device_type(sd->devtype), hostno, sd->bus, sd->target,
+ sd->lun);
+}
+
+#define SCSI3ADDR_EQ(a, b) ( \
+ (a)[7] == (b)[7] && \
+ (a)[6] == (b)[6] && \
+ (a)[5] == (b)[5] && \
+ (a)[4] == (b)[4] && \
+ (a)[3] == (b)[3] && \
+ (a)[2] == (b)[2] && \
+ (a)[1] == (b)[1] && \
+ (a)[0] == (b)[0])
+
+static void fixup_botched_add(struct ctlr_info *h,
+ struct hpsa_scsi_dev_t *added)
+{
+ /* called when scsi_add_device fails in order to re-adjust
+ * h->dev[] to match the mid layer's view.
+ */
+ unsigned long flags;
+ int i, j;
+
+ spin_lock_irqsave(&h->lock, flags);
+ for (i = 0; i < h->ndevices; i++) {
+ if (h->dev[i] == added) {
+ for (j = i; j < h->ndevices-1; j++)
+ h->dev[j] = h->dev[j+1];
+ h->ndevices--;
+ break;
+ }
+ }
+ spin_unlock_irqrestore(&h->lock, flags);
+ kfree(added);
+}
+
+static inline int device_is_the_same(struct hpsa_scsi_dev_t *dev1,
+ struct hpsa_scsi_dev_t *dev2)
+{
+ /* we compare everything except lun and target as these
+ * are not yet assigned. Compare parts likely
+ * to differ first
+ */
+ if (memcmp(dev1->scsi3addr, dev2->scsi3addr,
+ sizeof(dev1->scsi3addr)) != 0)
+ return 0;
+ if (memcmp(dev1->device_id, dev2->device_id,
+ sizeof(dev1->device_id)) != 0)
+ return 0;
+ if (memcmp(dev1->model, dev2->model, sizeof(dev1->model)) != 0)
+ return 0;
+ if (memcmp(dev1->vendor, dev2->vendor, sizeof(dev1->vendor)) != 0)
+ return 0;
+ if (dev1->devtype != dev2->devtype)
+ return 0;
+ if (dev1->bus != dev2->bus)
+ return 0;
+ return 1;
+}
+
+static inline int device_updated(struct hpsa_scsi_dev_t *dev1,
+ struct hpsa_scsi_dev_t *dev2)
+{
+ /* Device attributes that can change, but don't mean
+ * that the device is a different device, nor that the OS
+ * needs to be told anything about the change.
+ */
+ if (dev1->raid_level != dev2->raid_level)
+ return 1;
+ if (dev1->offload_config != dev2->offload_config)
+ return 1;
+ if (dev1->offload_enabled != dev2->offload_enabled)
+ return 1;
+ if (dev1->queue_depth != dev2->queue_depth)
+ return 1;
+ return 0;
+}
+
+/* Find needle in haystack. If exact match found, return DEVICE_SAME,
+ * and return needle location in *index. If scsi3addr matches, but not
+ * vendor, model, serial num, etc. return DEVICE_CHANGED, and return needle
+ * location in *index.
+ * In the case of a minor device attribute change, such as RAID level, just
+ * return DEVICE_UPDATED, along with the updated device's location in index.
+ * If needle not found, return DEVICE_NOT_FOUND.
+ */
+static int hpsa_scsi_find_entry(struct hpsa_scsi_dev_t *needle,
+ struct hpsa_scsi_dev_t *haystack[], int haystack_size,
+ int *index)
+{
+ int i;
+#define DEVICE_NOT_FOUND 0
+#define DEVICE_CHANGED 1
+#define DEVICE_SAME 2
+#define DEVICE_UPDATED 3
+ for (i = 0; i < haystack_size; i++) {
+ if (haystack[i] == NULL) /* previously removed. */
+ continue;
+ if (SCSI3ADDR_EQ(needle->scsi3addr, haystack[i]->scsi3addr)) {
+ *index = i;
+ if (device_is_the_same(needle, haystack[i])) {
+ if (device_updated(needle, haystack[i]))
+ return DEVICE_UPDATED;
+ return DEVICE_SAME;
+ } else {
+ /* Keep offline devices offline */
+ if (needle->volume_offline)
+ return DEVICE_NOT_FOUND;
+ return DEVICE_CHANGED;
+ }
+ }
+ }
+ *index = -1;
+ return DEVICE_NOT_FOUND;
+}
+
+static void hpsa_monitor_offline_device(struct ctlr_info *h,
+ unsigned char scsi3addr[])
+{
+ struct offline_device_entry *device;
+ unsigned long flags;
+
+ /* Check to see if device is already on the list */
+ spin_lock_irqsave(&h->offline_device_lock, flags);
+ list_for_each_entry(device, &h->offline_device_list, offline_list) {
+ if (memcmp(device->scsi3addr, scsi3addr,
+ sizeof(device->scsi3addr)) == 0) {
+ spin_unlock_irqrestore(&h->offline_device_lock, flags);
+ return;
+ }
+ }
+ spin_unlock_irqrestore(&h->offline_device_lock, flags);
+
+ /* Device is not on the list, add it. */
+ device = kmalloc(sizeof(*device), GFP_KERNEL);
+ if (!device) {
+ dev_warn(&h->pdev->dev, "out of memory in %s\n", __func__);
+ return;
+ }
+ memcpy(device->scsi3addr, scsi3addr, sizeof(device->scsi3addr));
+ spin_lock_irqsave(&h->offline_device_lock, flags);
+ list_add_tail(&device->offline_list, &h->offline_device_list);
+ spin_unlock_irqrestore(&h->offline_device_lock, flags);
+}
+
+/* Print a message explaining various offline volume states */
+static void hpsa_show_volume_status(struct ctlr_info *h,
+ struct hpsa_scsi_dev_t *sd)
+{
+ if (sd->volume_offline == HPSA_VPD_LV_STATUS_UNSUPPORTED)
+ dev_info(&h->pdev->dev,
+ "C%d:B%d:T%d:L%d Volume status is not available through vital product data pages.\n",
+ h->scsi_host->host_no,
+ sd->bus, sd->target, sd->lun);
+ switch (sd->volume_offline) {
+ case HPSA_LV_OK:
+ break;
+ case HPSA_LV_UNDERGOING_ERASE:
+ dev_info(&h->pdev->dev,
+ "C%d:B%d:T%d:L%d Volume is undergoing background erase process.\n",
+ h->scsi_host->host_no,
+ sd->bus, sd->target, sd->lun);
+ break;
+ case HPSA_LV_UNDERGOING_RPI:
+ dev_info(&h->pdev->dev,
+ "C%d:B%d:T%d:L%d Volume is undergoing rapid parity initialization process.\n",
+ h->scsi_host->host_no,
+ sd->bus, sd->target, sd->lun);
+ break;
+ case HPSA_LV_PENDING_RPI:
+ dev_info(&h->pdev->dev,
+ "C%d:B%d:T%d:L%d Volume is queued for rapid parity initialization process.\n",
+ h->scsi_host->host_no,
+ sd->bus, sd->target, sd->lun);
+ break;
+ case HPSA_LV_ENCRYPTED_NO_KEY:
+ dev_info(&h->pdev->dev,
+ "C%d:B%d:T%d:L%d Volume is encrypted and cannot be accessed because key is not present.\n",
+ h->scsi_host->host_no,
+ sd->bus, sd->target, sd->lun);
+ break;
+ case HPSA_LV_PLAINTEXT_IN_ENCRYPT_ONLY_CONTROLLER:
+ dev_info(&h->pdev->dev,
+ "C%d:B%d:T%d:L%d Volume is not encrypted and cannot be accessed because controller is in encryption-only mode.\n",
+ h->scsi_host->host_no,
+ sd->bus, sd->target, sd->lun);
+ break;
+ case HPSA_LV_UNDERGOING_ENCRYPTION:
+ dev_info(&h->pdev->dev,
+ "C%d:B%d:T%d:L%d Volume is undergoing encryption process.\n",
+ h->scsi_host->host_no,
+ sd->bus, sd->target, sd->lun);
+ break;
+ case HPSA_LV_UNDERGOING_ENCRYPTION_REKEYING:
+ dev_info(&h->pdev->dev,
+ "C%d:B%d:T%d:L%d Volume is undergoing encryption re-keying process.\n",
+ h->scsi_host->host_no,
+ sd->bus, sd->target, sd->lun);
+ break;
+ case HPSA_LV_ENCRYPTED_IN_NON_ENCRYPTED_CONTROLLER:
+ dev_info(&h->pdev->dev,
+ "C%d:B%d:T%d:L%d Volume is encrypted and cannot be accessed because controller does not have encryption enabled.\n",
+ h->scsi_host->host_no,
+ sd->bus, sd->target, sd->lun);
+ break;
+ case HPSA_LV_PENDING_ENCRYPTION:
+ dev_info(&h->pdev->dev,
+ "C%d:B%d:T%d:L%d Volume is pending migration to encrypted state, but process has not started.\n",
+ h->scsi_host->host_no,
+ sd->bus, sd->target, sd->lun);
+ break;
+ case HPSA_LV_PENDING_ENCRYPTION_REKEYING:
+ dev_info(&h->pdev->dev,
+ "C%d:B%d:T%d:L%d Volume is encrypted and is pending encryption rekeying.\n",
+ h->scsi_host->host_no,
+ sd->bus, sd->target, sd->lun);
+ break;
+ }
+}
+
+/*
+ * Figure the list of physical drive pointers for a logical drive with
+ * raid offload configured.
+ */
+static void hpsa_figure_phys_disk_ptrs(struct ctlr_info *h,
+ struct hpsa_scsi_dev_t *dev[], int ndevices,
+ struct hpsa_scsi_dev_t *logical_drive)
+{
+ struct raid_map_data *map = &logical_drive->raid_map;
+ struct raid_map_disk_data *dd = &map->data[0];
+ int i, j;
+ int total_disks_per_row = le16_to_cpu(map->data_disks_per_row) +
+ le16_to_cpu(map->metadata_disks_per_row);
+ int nraid_map_entries = le16_to_cpu(map->row_cnt) *
+ le16_to_cpu(map->layout_map_count) *
+ total_disks_per_row;
+ int nphys_disk = le16_to_cpu(map->layout_map_count) *
+ total_disks_per_row;
+ int qdepth;
+
+ if (nraid_map_entries > RAID_MAP_MAX_ENTRIES)
+ nraid_map_entries = RAID_MAP_MAX_ENTRIES;
+
+ qdepth = 0;
+ for (i = 0; i < nraid_map_entries; i++) {
+ logical_drive->phys_disk[i] = NULL;
+ if (!logical_drive->offload_config)
+ continue;
+ for (j = 0; j < ndevices; j++) {
+ if (dev[j]->devtype != TYPE_DISK)
+ continue;
+ if (is_logical_dev_addr_mode(dev[j]->scsi3addr))
+ continue;
+ if (dev[j]->ioaccel_handle != dd[i].ioaccel_handle)
+ continue;
+
+ logical_drive->phys_disk[i] = dev[j];
+ if (i < nphys_disk)
+ qdepth = min(h->nr_cmds, qdepth +
+ logical_drive->phys_disk[i]->queue_depth);
+ break;
+ }
+
+ /*
+ * This can happen if a physical drive is removed and
+ * the logical drive is degraded. In that case, the RAID
+ * map data will refer to a physical disk which isn't actually
+ * present. And in that case offload_enabled should already
+ * be 0, but we'll turn it off here just in case
+ */
+ if (!logical_drive->phys_disk[i]) {
+ logical_drive->offload_enabled = 0;
+ logical_drive->queue_depth = h->nr_cmds;
+ }
+ }
+ if (nraid_map_entries)
+ /*
+ * This is correct for reads, too high for full stripe writes,
+ * way too high for partial stripe writes
+ */
+ logical_drive->queue_depth = qdepth;
+ else
+ logical_drive->queue_depth = h->nr_cmds;
+}
+
+static void hpsa_update_log_drive_phys_drive_ptrs(struct ctlr_info *h,
+ struct hpsa_scsi_dev_t *dev[], int ndevices)
+{
+ int i;
+
+ for (i = 0; i < ndevices; i++) {
+ if (dev[i]->devtype != TYPE_DISK)
+ continue;
+ if (!is_logical_dev_addr_mode(dev[i]->scsi3addr))
+ continue;
+ hpsa_figure_phys_disk_ptrs(h, dev, ndevices, dev[i]);
+ }
+}
+
+static void adjust_hpsa_scsi_table(struct ctlr_info *h, int hostno,
+ struct hpsa_scsi_dev_t *sd[], int nsds)
+{
+ /* sd contains scsi3 addresses and devtypes, and inquiry
+ * data. This function takes what's in sd to be the current
+ * reality and updates h->dev[] to reflect that reality.
+ */
+ int i, entry, device_change, changes = 0;
+ struct hpsa_scsi_dev_t *csd;
+ unsigned long flags;
+ struct hpsa_scsi_dev_t **added, **removed;
+ int nadded, nremoved;
+ struct Scsi_Host *sh = NULL;
+
+ added = kzalloc(sizeof(*added) * HPSA_MAX_DEVICES, GFP_KERNEL);
+ removed = kzalloc(sizeof(*removed) * HPSA_MAX_DEVICES, GFP_KERNEL);
+
+ if (!added || !removed) {
+ dev_warn(&h->pdev->dev, "out of memory in "
+ "adjust_hpsa_scsi_table\n");
+ goto free_and_out;
+ }
+
+ spin_lock_irqsave(&h->devlock, flags);
+
+ /* find any devices in h->dev[] that are not in
+ * sd[] and remove them from h->dev[], and for any
+ * devices which have changed, remove the old device
+ * info and add the new device info.
+ * If minor device attributes change, just update
+ * the existing device structure.
+ */
+ i = 0;
+ nremoved = 0;
+ nadded = 0;
+ while (i < h->ndevices) {
+ csd = h->dev[i];
+ device_change = hpsa_scsi_find_entry(csd, sd, nsds, &entry);
+ if (device_change == DEVICE_NOT_FOUND) {
+ changes++;
+ hpsa_scsi_remove_entry(h, hostno, i,
+ removed, &nremoved);
+ continue; /* remove ^^^, hence i not incremented */
+ } else if (device_change == DEVICE_CHANGED) {
+ changes++;
+ hpsa_scsi_replace_entry(h, hostno, i, sd[entry],
+ added, &nadded, removed, &nremoved);
+ /* Set it to NULL to prevent it from being freed
+ * at the bottom of hpsa_update_scsi_devices()
+ */
+ sd[entry] = NULL;
+ } else if (device_change == DEVICE_UPDATED) {
+ hpsa_scsi_update_entry(h, hostno, i, sd[entry]);
+ }
+ i++;
+ }
+
+ /* Now, make sure every device listed in sd[] is also
+ * listed in h->dev[], adding them if they aren't found
+ */
+
+ for (i = 0; i < nsds; i++) {
+ if (!sd[i]) /* if already added above. */
+ continue;
+
+ /* Don't add devices which are NOT READY, FORMAT IN PROGRESS
+ * as the SCSI mid-layer does not handle such devices well.
+ * It relentlessly loops sending TUR at 3Hz, then READ(10)
+ * at 160Hz, and prevents the system from coming up.
+ */
+ if (sd[i]->volume_offline) {
+ hpsa_show_volume_status(h, sd[i]);
+ dev_info(&h->pdev->dev, "c%db%dt%dl%d: temporarily offline\n",
+ h->scsi_host->host_no,
+ sd[i]->bus, sd[i]->target, sd[i]->lun);
+ continue;
+ }
+
+ device_change = hpsa_scsi_find_entry(sd[i], h->dev,
+ h->ndevices, &entry);
+ if (device_change == DEVICE_NOT_FOUND) {
+ changes++;
+ if (hpsa_scsi_add_entry(h, hostno, sd[i],
+ added, &nadded) != 0)
+ break;
+ sd[i] = NULL; /* prevent from being freed later. */
+ } else if (device_change == DEVICE_CHANGED) {
+ /* should never happen... */
+ changes++;
+ dev_warn(&h->pdev->dev,
+ "device unexpectedly changed.\n");
+ /* but if it does happen, we just ignore that device */
+ }
+ }
+ spin_unlock_irqrestore(&h->devlock, flags);
+
+ /* Monitor devices which are in one of several NOT READY states to be
+ * brought online later. This must be done without holding h->devlock,
+ * so don't touch h->dev[]
+ */
+ for (i = 0; i < nsds; i++) {
+ if (!sd[i]) /* if already added above. */
+ continue;
+ if (sd[i]->volume_offline)
+ hpsa_monitor_offline_device(h, sd[i]->scsi3addr);
+ }
+
+ /* Don't notify scsi mid layer of any changes the first time through
+ * (or if there are no changes) scsi_scan_host will do it later the
+ * first time through.
+ */
+ if (hostno == -1 || !changes)
+ goto free_and_out;
+
+ sh = h->scsi_host;
+ /* Notify scsi mid layer of any removed devices */
+ for (i = 0; i < nremoved; i++) {
+ struct scsi_device *sdev =
+ scsi_device_lookup(sh, removed[i]->bus,
+ removed[i]->target, removed[i]->lun);
+ if (sdev != NULL) {
+ scsi_remove_device(sdev);
+ scsi_device_put(sdev);
+ } else {
+ /* We don't expect to get here.
+ * future cmds to this device will get selection
+ * timeout as if the device was gone.
+ */
+ dev_warn(&h->pdev->dev, "didn't find c%db%dt%dl%d "
+ " for removal.", hostno, removed[i]->bus,
+ removed[i]->target, removed[i]->lun);
+ }
+ kfree(removed[i]);
+ removed[i] = NULL;
+ }
+
+ /* Notify scsi mid layer of any added devices */
+ for (i = 0; i < nadded; i++) {
+ if (scsi_add_device(sh, added[i]->bus,
+ added[i]->target, added[i]->lun) == 0)
+ continue;
+ dev_warn(&h->pdev->dev, "scsi_add_device c%db%dt%dl%d failed, "
+ "device not added.\n", hostno, added[i]->bus,
+ added[i]->target, added[i]->lun);
+ /* now we have to remove it from h->dev,
+ * since it didn't get added to scsi mid layer
+ */
+ fixup_botched_add(h, added[i]);
+ }
+
+free_and_out:
+ kfree(added);
+ kfree(removed);
+}
+
+/*
+ * Lookup bus/target/lun and return corresponding struct hpsa_scsi_dev_t *
+ * Assume's h->devlock is held.
+ */
+static struct hpsa_scsi_dev_t *lookup_hpsa_scsi_dev(struct ctlr_info *h,
+ int bus, int target, int lun)
+{
+ int i;
+ struct hpsa_scsi_dev_t *sd;
+
+ for (i = 0; i < h->ndevices; i++) {
+ sd = h->dev[i];
+ if (sd->bus == bus && sd->target == target && sd->lun == lun)
+ return sd;
+ }
+ return NULL;
+}
+
+/* link sdev->hostdata to our per-device structure. */
+static int hpsa_slave_alloc(struct scsi_device *sdev)
+{
+ struct hpsa_scsi_dev_t *sd;
+ unsigned long flags;
+ struct ctlr_info *h;
+
+ h = sdev_to_hba(sdev);
+ spin_lock_irqsave(&h->devlock, flags);
+ sd = lookup_hpsa_scsi_dev(h, sdev_channel(sdev),
+ sdev_id(sdev), sdev->lun);
+ if (sd != NULL) {
+ sdev->hostdata = sd;
+ if (sd->queue_depth)
+ scsi_change_queue_depth(sdev, sd->queue_depth);
+ atomic_set(&sd->ioaccel_cmds_out, 0);
+ }
+ spin_unlock_irqrestore(&h->devlock, flags);
+ return 0;
+}
+
+static void hpsa_slave_destroy(struct scsi_device *sdev)
+{
+ /* nothing to do. */
+}
+
+static void hpsa_free_sg_chain_blocks(struct ctlr_info *h)
+{
+ int i;
+
+ if (!h->cmd_sg_list)
+ return;
+ for (i = 0; i < h->nr_cmds; i++) {
+ kfree(h->cmd_sg_list[i]);
+ h->cmd_sg_list[i] = NULL;
+ }
+ kfree(h->cmd_sg_list);
+ h->cmd_sg_list = NULL;
+}
+
+static int hpsa_allocate_sg_chain_blocks(struct ctlr_info *h)
+{
+ int i;
+
+ if (h->chainsize <= 0)
+ return 0;
+
+ h->cmd_sg_list = kzalloc(sizeof(*h->cmd_sg_list) * h->nr_cmds,
+ GFP_KERNEL);
+ if (!h->cmd_sg_list) {
+ dev_err(&h->pdev->dev, "Failed to allocate SG list\n");
+ return -ENOMEM;
+ }
+ for (i = 0; i < h->nr_cmds; i++) {
+ h->cmd_sg_list[i] = kmalloc(sizeof(*h->cmd_sg_list[i]) *
+ h->chainsize, GFP_KERNEL);
+ if (!h->cmd_sg_list[i]) {
+ dev_err(&h->pdev->dev, "Failed to allocate cmd SG\n");
+ goto clean;
+ }
+ }
+ return 0;
+
+clean:
+ hpsa_free_sg_chain_blocks(h);
+ return -ENOMEM;
+}
+
+static int hpsa_map_sg_chain_block(struct ctlr_info *h,
+ struct CommandList *c)
+{
+ struct SGDescriptor *chain_sg, *chain_block;
+ u64 temp64;
+ u32 chain_len;
+
+ chain_sg = &c->SG[h->max_cmd_sg_entries - 1];
+ chain_block = h->cmd_sg_list[c->cmdindex];
+ chain_sg->Ext = cpu_to_le32(HPSA_SG_CHAIN);
+ chain_len = sizeof(*chain_sg) *
+ (le16_to_cpu(c->Header.SGTotal) - h->max_cmd_sg_entries);
+ chain_sg->Len = cpu_to_le32(chain_len);
+ temp64 = pci_map_single(h->pdev, chain_block, chain_len,
+ PCI_DMA_TODEVICE);
+ if (dma_mapping_error(&h->pdev->dev, temp64)) {
+ /* prevent subsequent unmapping */
+ chain_sg->Addr = cpu_to_le64(0);
+ return -1;
+ }
+ chain_sg->Addr = cpu_to_le64(temp64);
+ return 0;
+}
+
+static void hpsa_unmap_sg_chain_block(struct ctlr_info *h,
+ struct CommandList *c)
+{
+ struct SGDescriptor *chain_sg;
+
+ if (le16_to_cpu(c->Header.SGTotal) <= h->max_cmd_sg_entries)
+ return;
+
+ chain_sg = &c->SG[h->max_cmd_sg_entries - 1];
+ pci_unmap_single(h->pdev, le64_to_cpu(chain_sg->Addr),
+ le32_to_cpu(chain_sg->Len), PCI_DMA_TODEVICE);
+}
+
+
+/* Decode the various types of errors on ioaccel2 path.
+ * Return 1 for any error that should generate a RAID path retry.
+ * Return 0 for errors that don't require a RAID path retry.
+ */
+static int handle_ioaccel_mode2_error(struct ctlr_info *h,
+ struct CommandList *c,
+ struct scsi_cmnd *cmd,
+ struct io_accel2_cmd *c2)
+{
+ int data_len;
+ int retry = 0;
+
+ switch (c2->error_data.serv_response) {
+ case IOACCEL2_SERV_RESPONSE_COMPLETE:
+ switch (c2->error_data.status) {
+ case IOACCEL2_STATUS_SR_TASK_COMP_GOOD:
+ break;
+ case IOACCEL2_STATUS_SR_TASK_COMP_CHK_COND:
+ dev_warn(&h->pdev->dev,
+ "%s: task complete with check condition.\n",
+ "HP SSD Smart Path");
+ cmd->result |= SAM_STAT_CHECK_CONDITION;
+ if (c2->error_data.data_present !=
+ IOACCEL2_SENSE_DATA_PRESENT) {
+ memset(cmd->sense_buffer, 0,
+ SCSI_SENSE_BUFFERSIZE);
+ break;
+ }
+ /* copy the sense data */
+ data_len = c2->error_data.sense_data_len;
+ if (data_len > SCSI_SENSE_BUFFERSIZE)
+ data_len = SCSI_SENSE_BUFFERSIZE;
+ if (data_len > sizeof(c2->error_data.sense_data_buff))
+ data_len =
+ sizeof(c2->error_data.sense_data_buff);
+ memcpy(cmd->sense_buffer,
+ c2->error_data.sense_data_buff, data_len);
+ retry = 1;
+ break;
+ case IOACCEL2_STATUS_SR_TASK_COMP_BUSY:
+ dev_warn(&h->pdev->dev,
+ "%s: task complete with BUSY status.\n",
+ "HP SSD Smart Path");
+ retry = 1;
+ break;
+ case IOACCEL2_STATUS_SR_TASK_COMP_RES_CON:
+ dev_warn(&h->pdev->dev,
+ "%s: task complete with reservation conflict.\n",
+ "HP SSD Smart Path");
+ retry = 1;
+ break;
+ case IOACCEL2_STATUS_SR_TASK_COMP_SET_FULL:
+ /* Make scsi midlayer do unlimited retries */
+ cmd->result = DID_IMM_RETRY << 16;
+ break;
+ case IOACCEL2_STATUS_SR_TASK_COMP_ABORTED:
+ dev_warn(&h->pdev->dev,
+ "%s: task complete with aborted status.\n",
+ "HP SSD Smart Path");
+ retry = 1;
+ break;
+ default:
+ dev_warn(&h->pdev->dev,
+ "%s: task complete with unrecognized status: 0x%02x\n",
+ "HP SSD Smart Path", c2->error_data.status);
+ retry = 1;
+ break;
+ }
+ break;
+ case IOACCEL2_SERV_RESPONSE_FAILURE:
+ /* don't expect to get here. */
+ dev_warn(&h->pdev->dev,
+ "unexpected delivery or target failure, status = 0x%02x\n",
+ c2->error_data.status);
+ retry = 1;
+ break;
+ case IOACCEL2_SERV_RESPONSE_TMF_COMPLETE:
+ break;
+ case IOACCEL2_SERV_RESPONSE_TMF_SUCCESS:
+ break;
+ case IOACCEL2_SERV_RESPONSE_TMF_REJECTED:
+ dev_warn(&h->pdev->dev, "task management function rejected.\n");
+ retry = 1;
+ break;
+ case IOACCEL2_SERV_RESPONSE_TMF_WRONG_LUN:
+ dev_warn(&h->pdev->dev, "task management function invalid LUN\n");
+ break;
+ default:
+ dev_warn(&h->pdev->dev,
+ "%s: Unrecognized server response: 0x%02x\n",
+ "HP SSD Smart Path",
+ c2->error_data.serv_response);
+ retry = 1;
+ break;
+ }
+
+ return retry; /* retry on raid path? */
+}
+
+static void process_ioaccel2_completion(struct ctlr_info *h,
+ struct CommandList *c, struct scsi_cmnd *cmd,
+ struct hpsa_scsi_dev_t *dev)
+{
+ struct io_accel2_cmd *c2 = &h->ioaccel2_cmd_pool[c->cmdindex];
+
+ /* check for good status */
+ if (likely(c2->error_data.serv_response == 0 &&
+ c2->error_data.status == 0)) {
+ cmd_free(h, c);
+ cmd->scsi_done(cmd);
+ return;
+ }
+
+ /* Any RAID offload error results in retry which will use
+ * the normal I/O path so the controller can handle whatever's
+ * wrong.
+ */
+ if (is_logical_dev_addr_mode(dev->scsi3addr) &&
+ c2->error_data.serv_response ==
+ IOACCEL2_SERV_RESPONSE_FAILURE) {
+ if (c2->error_data.status ==
+ IOACCEL2_STATUS_SR_IOACCEL_DISABLED)
+ dev->offload_enabled = 0;
+ goto retry_cmd;
+ }
+
+ if (handle_ioaccel_mode2_error(h, c, cmd, c2))
+ goto retry_cmd;
+
+ cmd_free(h, c);
+ cmd->scsi_done(cmd);
+ return;
+
+retry_cmd:
+ INIT_WORK(&c->work, hpsa_command_resubmit_worker);
+ queue_work_on(raw_smp_processor_id(), h->resubmit_wq, &c->work);
+}
+
+static void complete_scsi_command(struct CommandList *cp)
+{
+ struct scsi_cmnd *cmd;
+ struct ctlr_info *h;
+ struct ErrorInfo *ei;
+ struct hpsa_scsi_dev_t *dev;
+
+ unsigned char sense_key;
+ unsigned char asc; /* additional sense code */
+ unsigned char ascq; /* additional sense code qualifier */
+ unsigned long sense_data_size;
+
+ ei = cp->err_info;
+ cmd = cp->scsi_cmd;
+ h = cp->h;
+ dev = cmd->device->hostdata;
+
+ scsi_dma_unmap(cmd); /* undo the DMA mappings */
+ if ((cp->cmd_type == CMD_SCSI) &&
+ (le16_to_cpu(cp->Header.SGTotal) > h->max_cmd_sg_entries))
+ hpsa_unmap_sg_chain_block(h, cp);
+
+ cmd->result = (DID_OK << 16); /* host byte */
+ cmd->result |= (COMMAND_COMPLETE << 8); /* msg byte */
+
+ if (cp->cmd_type == CMD_IOACCEL2 || cp->cmd_type == CMD_IOACCEL1)
+ atomic_dec(&cp->phys_disk->ioaccel_cmds_out);
+
+ if (cp->cmd_type == CMD_IOACCEL2)
+ return process_ioaccel2_completion(h, cp, cmd, dev);
+
+ cmd->result |= ei->ScsiStatus;
+
+ scsi_set_resid(cmd, ei->ResidualCnt);
+ if (ei->CommandStatus == 0) {
+ if (cp->cmd_type == CMD_IOACCEL1)
+ atomic_dec(&cp->phys_disk->ioaccel_cmds_out);
+ cmd_free(h, cp);
+ cmd->scsi_done(cmd);
+ return;
+ }
+
+ /* copy the sense data */
+ if (SCSI_SENSE_BUFFERSIZE < sizeof(ei->SenseInfo))
+ sense_data_size = SCSI_SENSE_BUFFERSIZE;
+ else
+ sense_data_size = sizeof(ei->SenseInfo);
+ if (ei->SenseLen < sense_data_size)
+ sense_data_size = ei->SenseLen;
+
+ memcpy(cmd->sense_buffer, ei->SenseInfo, sense_data_size);
+
+ /* For I/O accelerator commands, copy over some fields to the normal
+ * CISS header used below for error handling.
+ */
+ if (cp->cmd_type == CMD_IOACCEL1) {
+ struct io_accel1_cmd *c = &h->ioaccel_cmd_pool[cp->cmdindex];
+ cp->Header.SGList = scsi_sg_count(cmd);
+ cp->Header.SGTotal = cpu_to_le16(cp->Header.SGList);
+ cp->Request.CDBLen = le16_to_cpu(c->io_flags) &
+ IOACCEL1_IOFLAGS_CDBLEN_MASK;
+ cp->Header.tag = c->tag;
+ memcpy(cp->Header.LUN.LunAddrBytes, c->CISS_LUN, 8);
+ memcpy(cp->Request.CDB, c->CDB, cp->Request.CDBLen);
+
+ /* Any RAID offload error results in retry which will use
+ * the normal I/O path so the controller can handle whatever's
+ * wrong.
+ */
+ if (is_logical_dev_addr_mode(dev->scsi3addr)) {
+ if (ei->CommandStatus == CMD_IOACCEL_DISABLED)
+ dev->offload_enabled = 0;
+ INIT_WORK(&cp->work, hpsa_command_resubmit_worker);
+ queue_work_on(raw_smp_processor_id(),
+ h->resubmit_wq, &cp->work);
+ return;
+ }
+ }
+
+ /* an error has occurred */
+ switch (ei->CommandStatus) {
+
+ case CMD_TARGET_STATUS:
+ if (ei->ScsiStatus) {
+ /* Get sense key */
+ sense_key = 0xf & ei->SenseInfo[2];
+ /* Get additional sense code */
+ asc = ei->SenseInfo[12];
+ /* Get addition sense code qualifier */
+ ascq = ei->SenseInfo[13];
+ }
+ if (ei->ScsiStatus == SAM_STAT_CHECK_CONDITION) {
+ if (sense_key == ABORTED_COMMAND) {
+ cmd->result |= DID_SOFT_ERROR << 16;
+ break;
+ }
+ break;
+ }
+ /* Problem was not a check condition
+ * Pass it up to the upper layers...
+ */
+ if (ei->ScsiStatus) {
+ dev_warn(&h->pdev->dev, "cp %p has status 0x%x "
+ "Sense: 0x%x, ASC: 0x%x, ASCQ: 0x%x, "
+ "Returning result: 0x%x\n",
+ cp, ei->ScsiStatus,
+ sense_key, asc, ascq,
+ cmd->result);
+ } else { /* scsi status is zero??? How??? */
+ dev_warn(&h->pdev->dev, "cp %p SCSI status was 0. "
+ "Returning no connection.\n", cp),
+
+ /* Ordinarily, this case should never happen,
+ * but there is a bug in some released firmware
+ * revisions that allows it to happen if, for
+ * example, a 4100 backplane loses power and
+ * the tape drive is in it. We assume that
+ * it's a fatal error of some kind because we
+ * can't show that it wasn't. We will make it
+ * look like selection timeout since that is
+ * the most common reason for this to occur,
+ * and it's severe enough.
+ */
+
+ cmd->result = DID_NO_CONNECT << 16;
+ }
+ break;
+
+ case CMD_DATA_UNDERRUN: /* let mid layer handle it. */
+ break;
+ case CMD_DATA_OVERRUN:
+ dev_warn(&h->pdev->dev,
+ "CDB %16phN data overrun\n", cp->Request.CDB);
+ break;
+ case CMD_INVALID: {
+ /* print_bytes(cp, sizeof(*cp), 1, 0);
+ print_cmd(cp); */
+ /* We get CMD_INVALID if you address a non-existent device
+ * instead of a selection timeout (no response). You will
+ * see this if you yank out a drive, then try to access it.
+ * This is kind of a shame because it means that any other
+ * CMD_INVALID (e.g. driver bug) will get interpreted as a
+ * missing target. */
+ cmd->result = DID_NO_CONNECT << 16;
+ }
+ break;
+ case CMD_PROTOCOL_ERR:
+ cmd->result = DID_ERROR << 16;
+ dev_warn(&h->pdev->dev, "CDB %16phN : protocol error\n",
+ cp->Request.CDB);
+ break;
+ case CMD_HARDWARE_ERR:
+ cmd->result = DID_ERROR << 16;
+ dev_warn(&h->pdev->dev, "CDB %16phN : hardware error\n",
+ cp->Request.CDB);
+ break;
+ case CMD_CONNECTION_LOST:
+ cmd->result = DID_ERROR << 16;
+ dev_warn(&h->pdev->dev, "CDB %16phN : connection lost\n",
+ cp->Request.CDB);
+ break;
+ case CMD_ABORTED:
+ cmd->result = DID_ABORT << 16;
+ dev_warn(&h->pdev->dev, "CDB %16phN was aborted with status 0x%x\n",
+ cp->Request.CDB, ei->ScsiStatus);
+ break;
+ case CMD_ABORT_FAILED:
+ cmd->result = DID_ERROR << 16;
+ dev_warn(&h->pdev->dev, "CDB %16phN : abort failed\n",
+ cp->Request.CDB);
+ break;
+ case CMD_UNSOLICITED_ABORT:
+ cmd->result = DID_SOFT_ERROR << 16; /* retry the command */
+ dev_warn(&h->pdev->dev, "CDB %16phN : unsolicited abort\n",
+ cp->Request.CDB);
+ break;
+ case CMD_TIMEOUT:
+ cmd->result = DID_TIME_OUT << 16;
+ dev_warn(&h->pdev->dev, "CDB %16phN timed out\n",
+ cp->Request.CDB);
+ break;
+ case CMD_UNABORTABLE:
+ cmd->result = DID_ERROR << 16;
+ dev_warn(&h->pdev->dev, "Command unabortable\n");
+ break;
+ case CMD_IOACCEL_DISABLED:
+ /* This only handles the direct pass-through case since RAID
+ * offload is handled above. Just attempt a retry.
+ */
+ cmd->result = DID_SOFT_ERROR << 16;
+ dev_warn(&h->pdev->dev,
+ "cp %p had HP SSD Smart Path error\n", cp);
+ break;
+ default:
+ cmd->result = DID_ERROR << 16;
+ dev_warn(&h->pdev->dev, "cp %p returned unknown status %x\n",
+ cp, ei->CommandStatus);
+ }
+ cmd_free(h, cp);
+ cmd->scsi_done(cmd);
+}
+
+static void hpsa_pci_unmap(struct pci_dev *pdev,
+ struct CommandList *c, int sg_used, int data_direction)
+{
+ int i;
+
+ for (i = 0; i < sg_used; i++)
+ pci_unmap_single(pdev, (dma_addr_t) le64_to_cpu(c->SG[i].Addr),
+ le32_to_cpu(c->SG[i].Len),
+ data_direction);
+}
+
+static int hpsa_map_one(struct pci_dev *pdev,
+ struct CommandList *cp,
+ unsigned char *buf,
+ size_t buflen,
+ int data_direction)
+{
+ u64 addr64;
+
+ if (buflen == 0 || data_direction == PCI_DMA_NONE) {
+ cp->Header.SGList = 0;
+ cp->Header.SGTotal = cpu_to_le16(0);
+ return 0;
+ }
+
+ addr64 = pci_map_single(pdev, buf, buflen, data_direction);
+ if (dma_mapping_error(&pdev->dev, addr64)) {
+ /* Prevent subsequent unmap of something never mapped */
+ cp->Header.SGList = 0;
+ cp->Header.SGTotal = cpu_to_le16(0);
+ return -1;
+ }
+ cp->SG[0].Addr = cpu_to_le64(addr64);
+ cp->SG[0].Len = cpu_to_le32(buflen);
+ cp->SG[0].Ext = cpu_to_le32(HPSA_SG_LAST); /* we are not chaining */
+ cp->Header.SGList = 1; /* no. SGs contig in this cmd */
+ cp->Header.SGTotal = cpu_to_le16(1); /* total sgs in cmd list */
+ return 0;
+}
+
+static inline void hpsa_scsi_do_simple_cmd_core(struct ctlr_info *h,
+ struct CommandList *c)
+{
+ DECLARE_COMPLETION_ONSTACK(wait);
+
+ c->waiting = &wait;
+ enqueue_cmd_and_start_io(h, c);
+ wait_for_completion(&wait);
+}
+
+static u32 lockup_detected(struct ctlr_info *h)
+{
+ int cpu;
+ u32 rc, *lockup_detected;
+
+ cpu = get_cpu();
+ lockup_detected = per_cpu_ptr(h->lockup_detected, cpu);
+ rc = *lockup_detected;
+ put_cpu();
+ return rc;
+}
+
+static void hpsa_scsi_do_simple_cmd_core_if_no_lockup(struct ctlr_info *h,
+ struct CommandList *c)
+{
+ /* If controller lockup detected, fake a hardware error. */
+ if (unlikely(lockup_detected(h)))
+ c->err_info->CommandStatus = CMD_HARDWARE_ERR;
+ else
+ hpsa_scsi_do_simple_cmd_core(h, c);
+}
+
+#define MAX_DRIVER_CMD_RETRIES 25
+static void hpsa_scsi_do_simple_cmd_with_retry(struct ctlr_info *h,
+ struct CommandList *c, int data_direction)
+{
+ int backoff_time = 10, retry_count = 0;
+
+ do {
+ memset(c->err_info, 0, sizeof(*c->err_info));
+ hpsa_scsi_do_simple_cmd_core(h, c);
+ retry_count++;
+ if (retry_count > 3) {
+ msleep(backoff_time);
+ if (backoff_time < 1000)
+ backoff_time *= 2;
+ }
+ } while ((check_for_unit_attention(h, c) ||
+ check_for_busy(h, c)) &&
+ retry_count <= MAX_DRIVER_CMD_RETRIES);
+ hpsa_pci_unmap(h->pdev, c, 1, data_direction);
+}
+
+static void hpsa_print_cmd(struct ctlr_info *h, char *txt,
+ struct CommandList *c)
+{
+ const u8 *cdb = c->Request.CDB;
+ const u8 *lun = c->Header.LUN.LunAddrBytes;
+
+ dev_warn(&h->pdev->dev, "%s: LUN:%02x%02x%02x%02x%02x%02x%02x%02x"
+ " CDB:%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x\n",
+ txt, lun[0], lun[1], lun[2], lun[3],
+ lun[4], lun[5], lun[6], lun[7],
+ cdb[0], cdb[1], cdb[2], cdb[3],
+ cdb[4], cdb[5], cdb[6], cdb[7],
+ cdb[8], cdb[9], cdb[10], cdb[11],
+ cdb[12], cdb[13], cdb[14], cdb[15]);
+}
+
+static void hpsa_scsi_interpret_error(struct ctlr_info *h,
+ struct CommandList *cp)
+{
+ const struct ErrorInfo *ei = cp->err_info;
+ struct device *d = &cp->h->pdev->dev;
+ const u8 *sd = ei->SenseInfo;
+
+ switch (ei->CommandStatus) {
+ case CMD_TARGET_STATUS:
+ hpsa_print_cmd(h, "SCSI status", cp);
+ if (ei->ScsiStatus == SAM_STAT_CHECK_CONDITION)
+ dev_warn(d, "SCSI Status = 02, Sense key = %02x, ASC = %02x, ASCQ = %02x\n",
+ sd[2] & 0x0f, sd[12], sd[13]);
+ else
+ dev_warn(d, "SCSI Status = %02x\n", ei->ScsiStatus);
+ if (ei->ScsiStatus == 0)
+ dev_warn(d, "SCSI status is abnormally zero. "
+ "(probably indicates selection timeout "
+ "reported incorrectly due to a known "
+ "firmware bug, circa July, 2001.)\n");
+ break;
+ case CMD_DATA_UNDERRUN: /* let mid layer handle it. */
+ break;
+ case CMD_DATA_OVERRUN:
+ hpsa_print_cmd(h, "overrun condition", cp);
+ break;
+ case CMD_INVALID: {
+ /* controller unfortunately reports SCSI passthru's
+ * to non-existent targets as invalid commands.
+ */
+ hpsa_print_cmd(h, "invalid command", cp);
+ dev_warn(d, "probably means device no longer present\n");
+ }
+ break;
+ case CMD_PROTOCOL_ERR:
+ hpsa_print_cmd(h, "protocol error", cp);
+ break;
+ case CMD_HARDWARE_ERR:
+ hpsa_print_cmd(h, "hardware error", cp);
+ break;
+ case CMD_CONNECTION_LOST:
+ hpsa_print_cmd(h, "connection lost", cp);
+ break;
+ case CMD_ABORTED:
+ hpsa_print_cmd(h, "aborted", cp);
+ break;
+ case CMD_ABORT_FAILED:
+ hpsa_print_cmd(h, "abort failed", cp);
+ break;
+ case CMD_UNSOLICITED_ABORT:
+ hpsa_print_cmd(h, "unsolicited abort", cp);
+ break;
+ case CMD_TIMEOUT:
+ hpsa_print_cmd(h, "timed out", cp);
+ break;
+ case CMD_UNABORTABLE:
+ hpsa_print_cmd(h, "unabortable", cp);
+ break;
+ default:
+ hpsa_print_cmd(h, "unknown status", cp);
+ dev_warn(d, "Unknown command status %x\n",
+ ei->CommandStatus);
+ }
+}
+
+static int hpsa_scsi_do_inquiry(struct ctlr_info *h, unsigned char *scsi3addr,
+ u16 page, unsigned char *buf,
+ unsigned char bufsize)
+{
+ int rc = IO_OK;
+ struct CommandList *c;
+ struct ErrorInfo *ei;
+
+ c = cmd_alloc(h);
+
+ if (c == NULL) {
+ dev_warn(&h->pdev->dev, "cmd_alloc returned NULL!\n");
+ return -ENOMEM;
+ }
+
+ if (fill_cmd(c, HPSA_INQUIRY, h, buf, bufsize,
+ page, scsi3addr, TYPE_CMD)) {
+ rc = -1;
+ goto out;
+ }
+ hpsa_scsi_do_simple_cmd_with_retry(h, c, PCI_DMA_FROMDEVICE);
+ ei = c->err_info;
+ if (ei->CommandStatus != 0 && ei->CommandStatus != CMD_DATA_UNDERRUN) {
+ hpsa_scsi_interpret_error(h, c);
+ rc = -1;
+ }
+out:
+ cmd_free(h, c);
+ return rc;
+}
+
+static int hpsa_bmic_ctrl_mode_sense(struct ctlr_info *h,
+ unsigned char *scsi3addr, unsigned char page,
+ struct bmic_controller_parameters *buf, size_t bufsize)
+{
+ int rc = IO_OK;
+ struct CommandList *c;
+ struct ErrorInfo *ei;
+
+ c = cmd_alloc(h);
+ if (c == NULL) { /* trouble... */
+ dev_warn(&h->pdev->dev, "cmd_alloc returned NULL!\n");
+ return -ENOMEM;
+ }
+
+ if (fill_cmd(c, BMIC_SENSE_CONTROLLER_PARAMETERS, h, buf, bufsize,
+ page, scsi3addr, TYPE_CMD)) {
+ rc = -1;
+ goto out;
+ }
+ hpsa_scsi_do_simple_cmd_with_retry(h, c, PCI_DMA_FROMDEVICE);
+ ei = c->err_info;
+ if (ei->CommandStatus != 0 && ei->CommandStatus != CMD_DATA_UNDERRUN) {
+ hpsa_scsi_interpret_error(h, c);
+ rc = -1;
+ }
+out:
+ cmd_free(h, c);
+ return rc;
+ }
+
+static int hpsa_send_reset(struct ctlr_info *h, unsigned char *scsi3addr,
+ u8 reset_type)
+{
+ int rc = IO_OK;
+ struct CommandList *c;
+ struct ErrorInfo *ei;
+
+ c = cmd_alloc(h);
+
+ if (c == NULL) { /* trouble... */
+ dev_warn(&h->pdev->dev, "cmd_alloc returned NULL!\n");
+ return -ENOMEM;
+ }
+
+ /* fill_cmd can't fail here, no data buffer to map. */
+ (void) fill_cmd(c, HPSA_DEVICE_RESET_MSG, h, NULL, 0, 0,
+ scsi3addr, TYPE_MSG);
+ c->Request.CDB[1] = reset_type; /* fill_cmd defaults to LUN reset */
+ hpsa_scsi_do_simple_cmd_core(h, c);
+ /* no unmap needed here because no data xfer. */
+
+ ei = c->err_info;
+ if (ei->CommandStatus != 0) {
+ hpsa_scsi_interpret_error(h, c);
+ rc = -1;
+ }
+ cmd_free(h, c);
+ return rc;
+}
+
+static void hpsa_get_raid_level(struct ctlr_info *h,
+ unsigned char *scsi3addr, unsigned char *raid_level)
+{
+ int rc;
+ unsigned char *buf;
+
+ *raid_level = RAID_UNKNOWN;
+ buf = kzalloc(64, GFP_KERNEL);
+ if (!buf)
+ return;
+ rc = hpsa_scsi_do_inquiry(h, scsi3addr, VPD_PAGE | 0xC1, buf, 64);
+ if (rc == 0)
+ *raid_level = buf[8];
+ if (*raid_level > RAID_UNKNOWN)
+ *raid_level = RAID_UNKNOWN;
+ kfree(buf);
+ return;
+}
+
+#define HPSA_MAP_DEBUG
+#ifdef HPSA_MAP_DEBUG
+static void hpsa_debug_map_buff(struct ctlr_info *h, int rc,
+ struct raid_map_data *map_buff)
+{
+ struct raid_map_disk_data *dd = &map_buff->data[0];
+ int map, row, col;
+ u16 map_cnt, row_cnt, disks_per_row;
+
+ if (rc != 0)
+ return;
+
+ /* Show details only if debugging has been activated. */
+ if (h->raid_offload_debug < 2)
+ return;
+
+ dev_info(&h->pdev->dev, "structure_size = %u\n",
+ le32_to_cpu(map_buff->structure_size));
+ dev_info(&h->pdev->dev, "volume_blk_size = %u\n",
+ le32_to_cpu(map_buff->volume_blk_size));
+ dev_info(&h->pdev->dev, "volume_blk_cnt = 0x%llx\n",
+ le64_to_cpu(map_buff->volume_blk_cnt));
+ dev_info(&h->pdev->dev, "physicalBlockShift = %u\n",
+ map_buff->phys_blk_shift);
+ dev_info(&h->pdev->dev, "parity_rotation_shift = %u\n",
+ map_buff->parity_rotation_shift);
+ dev_info(&h->pdev->dev, "strip_size = %u\n",
+ le16_to_cpu(map_buff->strip_size));
+ dev_info(&h->pdev->dev, "disk_starting_blk = 0x%llx\n",
+ le64_to_cpu(map_buff->disk_starting_blk));
+ dev_info(&h->pdev->dev, "disk_blk_cnt = 0x%llx\n",
+ le64_to_cpu(map_buff->disk_blk_cnt));
+ dev_info(&h->pdev->dev, "data_disks_per_row = %u\n",
+ le16_to_cpu(map_buff->data_disks_per_row));
+ dev_info(&h->pdev->dev, "metadata_disks_per_row = %u\n",
+ le16_to_cpu(map_buff->metadata_disks_per_row));
+ dev_info(&h->pdev->dev, "row_cnt = %u\n",
+ le16_to_cpu(map_buff->row_cnt));
+ dev_info(&h->pdev->dev, "layout_map_count = %u\n",
+ le16_to_cpu(map_buff->layout_map_count));
+ dev_info(&h->pdev->dev, "flags = 0x%x\n",
+ le16_to_cpu(map_buff->flags));
+ dev_info(&h->pdev->dev, "encrypytion = %s\n",
+ le16_to_cpu(map_buff->flags) &
+ RAID_MAP_FLAG_ENCRYPT_ON ? "ON" : "OFF");
+ dev_info(&h->pdev->dev, "dekindex = %u\n",
+ le16_to_cpu(map_buff->dekindex));
+ map_cnt = le16_to_cpu(map_buff->layout_map_count);
+ for (map = 0; map < map_cnt; map++) {
+ dev_info(&h->pdev->dev, "Map%u:\n", map);
+ row_cnt = le16_to_cpu(map_buff->row_cnt);
+ for (row = 0; row < row_cnt; row++) {
+ dev_info(&h->pdev->dev, " Row%u:\n", row);
+ disks_per_row =
+ le16_to_cpu(map_buff->data_disks_per_row);
+ for (col = 0; col < disks_per_row; col++, dd++)
+ dev_info(&h->pdev->dev,
+ " D%02u: h=0x%04x xor=%u,%u\n",
+ col, dd->ioaccel_handle,
+ dd->xor_mult[0], dd->xor_mult[1]);
+ disks_per_row =
+ le16_to_cpu(map_buff->metadata_disks_per_row);
+ for (col = 0; col < disks_per_row; col++, dd++)
+ dev_info(&h->pdev->dev,
+ " M%02u: h=0x%04x xor=%u,%u\n",
+ col, dd->ioaccel_handle,
+ dd->xor_mult[0], dd->xor_mult[1]);
+ }
+ }
+}
+#else
+static void hpsa_debug_map_buff(__attribute__((unused)) struct ctlr_info *h,
+ __attribute__((unused)) int rc,
+ __attribute__((unused)) struct raid_map_data *map_buff)
+{
+}
+#endif
+
+static int hpsa_get_raid_map(struct ctlr_info *h,
+ unsigned char *scsi3addr, struct hpsa_scsi_dev_t *this_device)
+{
+ int rc = 0;
+ struct CommandList *c;
+ struct ErrorInfo *ei;
+
+ c = cmd_alloc(h);
+ if (c == NULL) {
+ dev_warn(&h->pdev->dev, "cmd_alloc returned NULL!\n");
+ return -ENOMEM;
+ }
+ if (fill_cmd(c, HPSA_GET_RAID_MAP, h, &this_device->raid_map,
+ sizeof(this_device->raid_map), 0,
+ scsi3addr, TYPE_CMD)) {
+ dev_warn(&h->pdev->dev, "Out of memory in hpsa_get_raid_map()\n");
+ cmd_free(h, c);
+ return -ENOMEM;
+ }
+ hpsa_scsi_do_simple_cmd_with_retry(h, c, PCI_DMA_FROMDEVICE);
+ ei = c->err_info;
+ if (ei->CommandStatus != 0 && ei->CommandStatus != CMD_DATA_UNDERRUN) {
+ hpsa_scsi_interpret_error(h, c);
+ cmd_free(h, c);
+ return -1;
+ }
+ cmd_free(h, c);
+
+ /* @todo in the future, dynamically allocate RAID map memory */
+ if (le32_to_cpu(this_device->raid_map.structure_size) >
+ sizeof(this_device->raid_map)) {
+ dev_warn(&h->pdev->dev, "RAID map size is too large!\n");
+ rc = -1;
+ }
+ hpsa_debug_map_buff(h, rc, &this_device->raid_map);
+ return rc;
+}
+
+static int hpsa_bmic_id_physical_device(struct ctlr_info *h,
+ unsigned char scsi3addr[], u16 bmic_device_index,
+ struct bmic_identify_physical_device *buf, size_t bufsize)
+{
+ int rc = IO_OK;
+ struct CommandList *c;
+ struct ErrorInfo *ei;
+
+ c = cmd_alloc(h);
+ rc = fill_cmd(c, BMIC_IDENTIFY_PHYSICAL_DEVICE, h, buf, bufsize,
+ 0, RAID_CTLR_LUNID, TYPE_CMD);
+ if (rc)
+ goto out;
+
+ c->Request.CDB[2] = bmic_device_index & 0xff;
+ c->Request.CDB[9] = (bmic_device_index >> 8) & 0xff;
+
+ hpsa_scsi_do_simple_cmd_with_retry(h, c, PCI_DMA_FROMDEVICE);
+ ei = c->err_info;
+ if (ei->CommandStatus != 0 && ei->CommandStatus != CMD_DATA_UNDERRUN) {
+ hpsa_scsi_interpret_error(h, c);
+ rc = -1;
+ }
+out:
+ cmd_free(h, c);
+ return rc;
+}
+
+static int hpsa_vpd_page_supported(struct ctlr_info *h,
+ unsigned char scsi3addr[], u8 page)
+{
+ int rc;
+ int i;
+ int pages;
+ unsigned char *buf, bufsize;
+
+ buf = kzalloc(256, GFP_KERNEL);
+ if (!buf)
+ return 0;
+
+ /* Get the size of the page list first */
+ rc = hpsa_scsi_do_inquiry(h, scsi3addr,
+ VPD_PAGE | HPSA_VPD_SUPPORTED_PAGES,
+ buf, HPSA_VPD_HEADER_SZ);
+ if (rc != 0)
+ goto exit_unsupported;
+ pages = buf[3];
+ if ((pages + HPSA_VPD_HEADER_SZ) <= 255)
+ bufsize = pages + HPSA_VPD_HEADER_SZ;
+ else
+ bufsize = 255;
+
+ /* Get the whole VPD page list */
+ rc = hpsa_scsi_do_inquiry(h, scsi3addr,
+ VPD_PAGE | HPSA_VPD_SUPPORTED_PAGES,
+ buf, bufsize);
+ if (rc != 0)
+ goto exit_unsupported;
+
+ pages = buf[3];
+ for (i = 1; i <= pages; i++)
+ if (buf[3 + i] == page)
+ goto exit_supported;
+exit_unsupported:
+ kfree(buf);
+ return 0;
+exit_supported:
+ kfree(buf);
+ return 1;
+}
+
+static void hpsa_get_ioaccel_status(struct ctlr_info *h,
+ unsigned char *scsi3addr, struct hpsa_scsi_dev_t *this_device)
+{
+ int rc;
+ unsigned char *buf;
+ u8 ioaccel_status;
+
+ this_device->offload_config = 0;
+ this_device->offload_enabled = 0;
+
+ buf = kzalloc(64, GFP_KERNEL);
+ if (!buf)
+ return;
+ if (!hpsa_vpd_page_supported(h, scsi3addr, HPSA_VPD_LV_IOACCEL_STATUS))
+ goto out;
+ rc = hpsa_scsi_do_inquiry(h, scsi3addr,
+ VPD_PAGE | HPSA_VPD_LV_IOACCEL_STATUS, buf, 64);
+ if (rc != 0)
+ goto out;
+
+#define IOACCEL_STATUS_BYTE 4
+#define OFFLOAD_CONFIGURED_BIT 0x01
+#define OFFLOAD_ENABLED_BIT 0x02
+ ioaccel_status = buf[IOACCEL_STATUS_BYTE];
+ this_device->offload_config =
+ !!(ioaccel_status & OFFLOAD_CONFIGURED_BIT);
+ if (this_device->offload_config) {
+ this_device->offload_enabled =
+ !!(ioaccel_status & OFFLOAD_ENABLED_BIT);
+ if (hpsa_get_raid_map(h, scsi3addr, this_device))
+ this_device->offload_enabled = 0;
+ }
+out:
+ kfree(buf);
+ return;
+}
+
+/* Get the device id from inquiry page 0x83 */
+static int hpsa_get_device_id(struct ctlr_info *h, unsigned char *scsi3addr,
+ unsigned char *device_id, int buflen)
+{
+ int rc;
+ unsigned char *buf;
+
+ if (buflen > 16)
+ buflen = 16;
+ buf = kzalloc(64, GFP_KERNEL);
+ if (!buf)
+ return -ENOMEM;
+ rc = hpsa_scsi_do_inquiry(h, scsi3addr, VPD_PAGE | 0x83, buf, 64);
+ if (rc == 0)
+ memcpy(device_id, &buf[8], buflen);
+ kfree(buf);
+ return rc != 0;
+}
+
+static int hpsa_scsi_do_report_luns(struct ctlr_info *h, int logical,
+ void *buf, int bufsize,
+ int extended_response)
+{
+ int rc = IO_OK;
+ struct CommandList *c;
+ unsigned char scsi3addr[8];
+ struct ErrorInfo *ei;
+
+ c = cmd_alloc(h);
+ if (c == NULL) { /* trouble... */
+ dev_err(&h->pdev->dev, "cmd_alloc returned NULL!\n");
+ return -1;
+ }
+ /* address the controller */
+ memset(scsi3addr, 0, sizeof(scsi3addr));
+ if (fill_cmd(c, logical ? HPSA_REPORT_LOG : HPSA_REPORT_PHYS, h,
+ buf, bufsize, 0, scsi3addr, TYPE_CMD)) {
+ rc = -1;
+ goto out;
+ }
+ if (extended_response)
+ c->Request.CDB[1] = extended_response;
+ hpsa_scsi_do_simple_cmd_with_retry(h, c, PCI_DMA_FROMDEVICE);
+ ei = c->err_info;
+ if (ei->CommandStatus != 0 &&
+ ei->CommandStatus != CMD_DATA_UNDERRUN) {
+ hpsa_scsi_interpret_error(h, c);
+ rc = -1;
+ } else {
+ struct ReportLUNdata *rld = buf;
+
+ if (rld->extended_response_flag != extended_response) {
+ dev_err(&h->pdev->dev,
+ "report luns requested format %u, got %u\n",
+ extended_response,
+ rld->extended_response_flag);
+ rc = -1;
+ }
+ }
+out:
+ cmd_free(h, c);
+ return rc;
+}
+
+static inline int hpsa_scsi_do_report_phys_luns(struct ctlr_info *h,
+ struct ReportExtendedLUNdata *buf, int bufsize)
+{
+ return hpsa_scsi_do_report_luns(h, 0, buf, bufsize,
+ HPSA_REPORT_PHYS_EXTENDED);
+}
+
+static inline int hpsa_scsi_do_report_log_luns(struct ctlr_info *h,
+ struct ReportLUNdata *buf, int bufsize)
+{
+ return hpsa_scsi_do_report_luns(h, 1, buf, bufsize, 0);
+}
+
+static inline void hpsa_set_bus_target_lun(struct hpsa_scsi_dev_t *device,
+ int bus, int target, int lun)
+{
+ device->bus = bus;
+ device->target = target;
+ device->lun = lun;
+}
+
+/* Use VPD inquiry to get details of volume status */
+static int hpsa_get_volume_status(struct ctlr_info *h,
+ unsigned char scsi3addr[])
+{
+ int rc;
+ int status;
+ int size;
+ unsigned char *buf;
+
+ buf = kzalloc(64, GFP_KERNEL);
+ if (!buf)
+ return HPSA_VPD_LV_STATUS_UNSUPPORTED;
+
+ /* Does controller have VPD for logical volume status? */
+ if (!hpsa_vpd_page_supported(h, scsi3addr, HPSA_VPD_LV_STATUS))
+ goto exit_failed;
+
+ /* Get the size of the VPD return buffer */
+ rc = hpsa_scsi_do_inquiry(h, scsi3addr, VPD_PAGE | HPSA_VPD_LV_STATUS,
+ buf, HPSA_VPD_HEADER_SZ);
+ if (rc != 0)
+ goto exit_failed;
+ size = buf[3];
+
+ /* Now get the whole VPD buffer */
+ rc = hpsa_scsi_do_inquiry(h, scsi3addr, VPD_PAGE | HPSA_VPD_LV_STATUS,
+ buf, size + HPSA_VPD_HEADER_SZ);
+ if (rc != 0)
+ goto exit_failed;
+ status = buf[4]; /* status byte */
+
+ kfree(buf);
+ return status;
+exit_failed:
+ kfree(buf);
+ return HPSA_VPD_LV_STATUS_UNSUPPORTED;
+}
+
+/* Determine offline status of a volume.
+ * Return either:
+ * 0 (not offline)
+ * 0xff (offline for unknown reasons)
+ * # (integer code indicating one of several NOT READY states
+ * describing why a volume is to be kept offline)
+ */
+static int hpsa_volume_offline(struct ctlr_info *h,
+ unsigned char scsi3addr[])
+{
+ struct CommandList *c;
+ unsigned char *sense, sense_key, asc, ascq;
+ int ldstat = 0;
+ u16 cmd_status;
+ u8 scsi_status;
+#define ASC_LUN_NOT_READY 0x04
+#define ASCQ_LUN_NOT_READY_FORMAT_IN_PROGRESS 0x04
+#define ASCQ_LUN_NOT_READY_INITIALIZING_CMD_REQ 0x02
+
+ c = cmd_alloc(h);
+ if (!c)
+ return 0;
+ (void) fill_cmd(c, TEST_UNIT_READY, h, NULL, 0, 0, scsi3addr, TYPE_CMD);
+ hpsa_scsi_do_simple_cmd_core(h, c);
+ sense = c->err_info->SenseInfo;
+ sense_key = sense[2];
+ asc = sense[12];
+ ascq = sense[13];
+ cmd_status = c->err_info->CommandStatus;
+ scsi_status = c->err_info->ScsiStatus;
+ cmd_free(h, c);
+ /* Is the volume 'not ready'? */
+ if (cmd_status != CMD_TARGET_STATUS ||
+ scsi_status != SAM_STAT_CHECK_CONDITION ||
+ sense_key != NOT_READY ||
+ asc != ASC_LUN_NOT_READY) {
+ return 0;
+ }
+
+ /* Determine the reason for not ready state */
+ ldstat = hpsa_get_volume_status(h, scsi3addr);
+
+ /* Keep volume offline in certain cases: */
+ switch (ldstat) {
+ case HPSA_LV_UNDERGOING_ERASE:
+ case HPSA_LV_UNDERGOING_RPI:
+ case HPSA_LV_PENDING_RPI:
+ case HPSA_LV_ENCRYPTED_NO_KEY:
+ case HPSA_LV_PLAINTEXT_IN_ENCRYPT_ONLY_CONTROLLER:
+ case HPSA_LV_UNDERGOING_ENCRYPTION:
+ case HPSA_LV_UNDERGOING_ENCRYPTION_REKEYING:
+ case HPSA_LV_ENCRYPTED_IN_NON_ENCRYPTED_CONTROLLER:
+ return ldstat;
+ case HPSA_VPD_LV_STATUS_UNSUPPORTED:
+ /* If VPD status page isn't available,
+ * use ASC/ASCQ to determine state
+ */
+ if ((ascq == ASCQ_LUN_NOT_READY_FORMAT_IN_PROGRESS) ||
+ (ascq == ASCQ_LUN_NOT_READY_INITIALIZING_CMD_REQ))
+ return ldstat;
+ break;
+ default:
+ break;
+ }
+ return 0;
+}
+
+static int hpsa_update_device_info(struct ctlr_info *h,
+ unsigned char scsi3addr[], struct hpsa_scsi_dev_t *this_device,
+ unsigned char *is_OBDR_device)
+{
+
+#define OBDR_SIG_OFFSET 43
+#define OBDR_TAPE_SIG "$DR-10"
+#define OBDR_SIG_LEN (sizeof(OBDR_TAPE_SIG) - 1)
+#define OBDR_TAPE_INQ_SIZE (OBDR_SIG_OFFSET + OBDR_SIG_LEN)
+
+ unsigned char *inq_buff;
+ unsigned char *obdr_sig;
+
+ inq_buff = kzalloc(OBDR_TAPE_INQ_SIZE, GFP_KERNEL);
+ if (!inq_buff)
+ goto bail_out;
+
+ /* Do an inquiry to the device to see what it is. */
+ if (hpsa_scsi_do_inquiry(h, scsi3addr, 0, inq_buff,
+ (unsigned char) OBDR_TAPE_INQ_SIZE) != 0) {
+ /* Inquiry failed (msg printed already) */
+ dev_err(&h->pdev->dev,
+ "hpsa_update_device_info: inquiry failed\n");
+ goto bail_out;
+ }
+
+ this_device->devtype = (inq_buff[0] & 0x1f);
+ memcpy(this_device->scsi3addr, scsi3addr, 8);
+ memcpy(this_device->vendor, &inq_buff[8],
+ sizeof(this_device->vendor));
+ memcpy(this_device->model, &inq_buff[16],
+ sizeof(this_device->model));
+ memset(this_device->device_id, 0,
+ sizeof(this_device->device_id));
+ hpsa_get_device_id(h, scsi3addr, this_device->device_id,
+ sizeof(this_device->device_id));
+
+ if (this_device->devtype == TYPE_DISK &&
+ is_logical_dev_addr_mode(scsi3addr)) {
+ int volume_offline;
+
+ hpsa_get_raid_level(h, scsi3addr, &this_device->raid_level);
+ if (h->fw_support & MISC_FW_RAID_OFFLOAD_BASIC)
+ hpsa_get_ioaccel_status(h, scsi3addr, this_device);
+ volume_offline = hpsa_volume_offline(h, scsi3addr);
+ if (volume_offline < 0 || volume_offline > 0xff)
+ volume_offline = HPSA_VPD_LV_STATUS_UNSUPPORTED;
+ this_device->volume_offline = volume_offline & 0xff;
+ } else {
+ this_device->raid_level = RAID_UNKNOWN;
+ this_device->offload_config = 0;
+ this_device->offload_enabled = 0;
+ this_device->volume_offline = 0;
+ this_device->queue_depth = h->nr_cmds;
+ }
+
+ if (is_OBDR_device) {
+ /* See if this is a One-Button-Disaster-Recovery device
+ * by looking for "$DR-10" at offset 43 in inquiry data.
+ */
+ obdr_sig = &inq_buff[OBDR_SIG_OFFSET];
+ *is_OBDR_device = (this_device->devtype == TYPE_ROM &&
+ strncmp(obdr_sig, OBDR_TAPE_SIG,
+ OBDR_SIG_LEN) == 0);
+ }
+
+ kfree(inq_buff);
+ return 0;
+
+bail_out:
+ kfree(inq_buff);
+ return 1;
+}
+
+static unsigned char *ext_target_model[] = {
+ "MSA2012",
+ "MSA2024",
+ "MSA2312",
+ "MSA2324",
+ "P2000 G3 SAS",
+ "MSA 2040 SAS",
+ NULL,
+};
+
+static int is_ext_target(struct ctlr_info *h, struct hpsa_scsi_dev_t *device)
+{
+ int i;
+
+ for (i = 0; ext_target_model[i]; i++)
+ if (strncmp(device->model, ext_target_model[i],
+ strlen(ext_target_model[i])) == 0)
+ return 1;
+ return 0;
+}
+
+/* Helper function to assign bus, target, lun mapping of devices.
+ * Puts non-external target logical volumes on bus 0, external target logical
+ * volumes on bus 1, physical devices on bus 2. and the hba on bus 3.
+ * Logical drive target and lun are assigned at this time, but
+ * physical device lun and target assignment are deferred (assigned
+ * in hpsa_find_target_lun, called by hpsa_scsi_add_entry.)
+ */
+static void figure_bus_target_lun(struct ctlr_info *h,
+ u8 *lunaddrbytes, struct hpsa_scsi_dev_t *device)
+{
+ u32 lunid = le32_to_cpu(*((__le32 *) lunaddrbytes));
+
+ if (!is_logical_dev_addr_mode(lunaddrbytes)) {
+ /* physical device, target and lun filled in later */
+ if (is_hba_lunid(lunaddrbytes))
+ hpsa_set_bus_target_lun(device, 3, 0, lunid & 0x3fff);
+ else
+ /* defer target, lun assignment for physical devices */
+ hpsa_set_bus_target_lun(device, 2, -1, -1);
+ return;
+ }
+ /* It's a logical device */
+ if (is_ext_target(h, device)) {
+ /* external target way, put logicals on bus 1
+ * and match target/lun numbers box
+ * reports, other smart array, bus 0, target 0, match lunid
+ */
+ hpsa_set_bus_target_lun(device,
+ 1, (lunid >> 16) & 0x3fff, lunid & 0x00ff);
+ return;
+ }
+ hpsa_set_bus_target_lun(device, 0, 0, lunid & 0x3fff);
+}
+
+/*
+ * If there is no lun 0 on a target, linux won't find any devices.
+ * For the external targets (arrays), we have to manually detect the enclosure
+ * which is at lun zero, as CCISS_REPORT_PHYSICAL_LUNS doesn't report
+ * it for some reason. *tmpdevice is the target we're adding,
+ * this_device is a pointer into the current element of currentsd[]
+ * that we're building up in update_scsi_devices(), below.
+ * lunzerobits is a bitmap that tracks which targets already have a
+ * lun 0 assigned.
+ * Returns 1 if an enclosure was added, 0 if not.
+ */
+static int add_ext_target_dev(struct ctlr_info *h,
+ struct hpsa_scsi_dev_t *tmpdevice,
+ struct hpsa_scsi_dev_t *this_device, u8 *lunaddrbytes,
+ unsigned long lunzerobits[], int *n_ext_target_devs)
+{
+ unsigned char scsi3addr[8];
+
+ if (test_bit(tmpdevice->target, lunzerobits))
+ return 0; /* There is already a lun 0 on this target. */
+
+ if (!is_logical_dev_addr_mode(lunaddrbytes))
+ return 0; /* It's the logical targets that may lack lun 0. */
+
+ if (!is_ext_target(h, tmpdevice))
+ return 0; /* Only external target devices have this problem. */
+
+ if (tmpdevice->lun == 0) /* if lun is 0, then we have a lun 0. */
+ return 0;
+
+ memset(scsi3addr, 0, 8);
+ scsi3addr[3] = tmpdevice->target;
+ if (is_hba_lunid(scsi3addr))
+ return 0; /* Don't add the RAID controller here. */
+
+ if (is_scsi_rev_5(h))
+ return 0; /* p1210m doesn't need to do this. */
+
+ if (*n_ext_target_devs >= MAX_EXT_TARGETS) {
+ dev_warn(&h->pdev->dev, "Maximum number of external "
+ "target devices exceeded. Check your hardware "
+ "configuration.");
+ return 0;
+ }
+
+ if (hpsa_update_device_info(h, scsi3addr, this_device, NULL))
+ return 0;
+ (*n_ext_target_devs)++;
+ hpsa_set_bus_target_lun(this_device,
+ tmpdevice->bus, tmpdevice->target, 0);
+ set_bit(tmpdevice->target, lunzerobits);
+ return 1;
+}
+
+/*
+ * Get address of physical disk used for an ioaccel2 mode command:
+ * 1. Extract ioaccel2 handle from the command.
+ * 2. Find a matching ioaccel2 handle from list of physical disks.
+ * 3. Return:
+ * 1 and set scsi3addr to address of matching physical
+ * 0 if no matching physical disk was found.
+ */
+static int hpsa_get_pdisk_of_ioaccel2(struct ctlr_info *h,
+ struct CommandList *ioaccel2_cmd_to_abort, unsigned char *scsi3addr)
+{
+ struct ReportExtendedLUNdata *physicals = NULL;
+ int responsesize = 24; /* size of physical extended response */
+ int reportsize = sizeof(*physicals) + HPSA_MAX_PHYS_LUN * responsesize;
+ u32 nphysicals = 0; /* number of reported physical devs */
+ int found = 0; /* found match (1) or not (0) */
+ u32 find; /* handle we need to match */
+ int i;
+ struct scsi_cmnd *scmd; /* scsi command within request being aborted */
+ struct hpsa_scsi_dev_t *d; /* device of request being aborted */
+ struct io_accel2_cmd *c2a; /* ioaccel2 command to abort */
+ __le32 it_nexus; /* 4 byte device handle for the ioaccel2 cmd */
+ __le32 scsi_nexus; /* 4 byte device handle for the ioaccel2 cmd */
+
+ if (ioaccel2_cmd_to_abort->cmd_type != CMD_IOACCEL2)
+ return 0; /* no match */
+
+ /* point to the ioaccel2 device handle */
+ c2a = &h->ioaccel2_cmd_pool[ioaccel2_cmd_to_abort->cmdindex];
+ if (c2a == NULL)
+ return 0; /* no match */
+
+ scmd = (struct scsi_cmnd *) ioaccel2_cmd_to_abort->scsi_cmd;
+ if (scmd == NULL)
+ return 0; /* no match */
+
+ d = scmd->device->hostdata;
+ if (d == NULL)
+ return 0; /* no match */
+
+ it_nexus = cpu_to_le32(d->ioaccel_handle);
+ scsi_nexus = c2a->scsi_nexus;
+ find = le32_to_cpu(c2a->scsi_nexus);
+
+ if (h->raid_offload_debug > 0)
+ dev_info(&h->pdev->dev,
+ "%s: scsi_nexus:0x%08x device id: 0x%02x%02x%02x%02x %02x%02x%02x%02x %02x%02x%02x%02x %02x%02x%02x%02x\n",
+ __func__, scsi_nexus,
+ d->device_id[0], d->device_id[1], d->device_id[2],
+ d->device_id[3], d->device_id[4], d->device_id[5],
+ d->device_id[6], d->device_id[7], d->device_id[8],
+ d->device_id[9], d->device_id[10], d->device_id[11],
+ d->device_id[12], d->device_id[13], d->device_id[14],
+ d->device_id[15]);
+
+ /* Get the list of physical devices */
+ physicals = kzalloc(reportsize, GFP_KERNEL);
+ if (physicals == NULL)
+ return 0;
+ if (hpsa_scsi_do_report_phys_luns(h, physicals, reportsize)) {
+ dev_err(&h->pdev->dev,
+ "Can't lookup %s device handle: report physical LUNs failed.\n",
+ "HP SSD Smart Path");
+ kfree(physicals);
+ return 0;
+ }
+ nphysicals = be32_to_cpu(*((__be32 *)physicals->LUNListLength)) /
+ responsesize;
+
+ /* find ioaccel2 handle in list of physicals: */
+ for (i = 0; i < nphysicals; i++) {
+ struct ext_report_lun_entry *entry = &physicals->LUN[i];
+
+ /* handle is in bytes 28-31 of each lun */
+ if (entry->ioaccel_handle != find)
+ continue; /* didn't match */
+ found = 1;
+ memcpy(scsi3addr, entry->lunid, 8);
+ if (h->raid_offload_debug > 0)
+ dev_info(&h->pdev->dev,
+ "%s: Searched h=0x%08x, Found h=0x%08x, scsiaddr 0x%8phN\n",
+ __func__, find,
+ entry->ioaccel_handle, scsi3addr);
+ break; /* found it */
+ }
+
+ kfree(physicals);
+ if (found)
+ return 1;
+ else
+ return 0;
+
+}
+/*
+ * Do CISS_REPORT_PHYS and CISS_REPORT_LOG. Data is returned in physdev,
+ * logdev. The number of luns in physdev and logdev are returned in
+ * *nphysicals and *nlogicals, respectively.
+ * Returns 0 on success, -1 otherwise.
+ */
+static int hpsa_gather_lun_info(struct ctlr_info *h,
+ struct ReportExtendedLUNdata *physdev, u32 *nphysicals,
+ struct ReportLUNdata *logdev, u32 *nlogicals)
+{
+ if (hpsa_scsi_do_report_phys_luns(h, physdev, sizeof(*physdev))) {
+ dev_err(&h->pdev->dev, "report physical LUNs failed.\n");
+ return -1;
+ }
+ *nphysicals = be32_to_cpu(*((__be32 *)physdev->LUNListLength)) / 24;
+ if (*nphysicals > HPSA_MAX_PHYS_LUN) {
+ dev_warn(&h->pdev->dev, "maximum physical LUNs (%d) exceeded. %d LUNs ignored.\n",
+ HPSA_MAX_PHYS_LUN, *nphysicals - HPSA_MAX_PHYS_LUN);
+ *nphysicals = HPSA_MAX_PHYS_LUN;
+ }
+ if (hpsa_scsi_do_report_log_luns(h, logdev, sizeof(*logdev))) {
+ dev_err(&h->pdev->dev, "report logical LUNs failed.\n");
+ return -1;
+ }
+ *nlogicals = be32_to_cpu(*((__be32 *) logdev->LUNListLength)) / 8;
+ /* Reject Logicals in excess of our max capability. */
+ if (*nlogicals > HPSA_MAX_LUN) {
+ dev_warn(&h->pdev->dev,
+ "maximum logical LUNs (%d) exceeded. "
+ "%d LUNs ignored.\n", HPSA_MAX_LUN,
+ *nlogicals - HPSA_MAX_LUN);
+ *nlogicals = HPSA_MAX_LUN;
+ }
+ if (*nlogicals + *nphysicals > HPSA_MAX_PHYS_LUN) {
+ dev_warn(&h->pdev->dev,
+ "maximum logical + physical LUNs (%d) exceeded. "
+ "%d LUNs ignored.\n", HPSA_MAX_PHYS_LUN,
+ *nphysicals + *nlogicals - HPSA_MAX_PHYS_LUN);
+ *nlogicals = HPSA_MAX_PHYS_LUN - *nphysicals;
+ }
+ return 0;
+}
+
+static u8 *figure_lunaddrbytes(struct ctlr_info *h, int raid_ctlr_position,
+ int i, int nphysicals, int nlogicals,
+ struct ReportExtendedLUNdata *physdev_list,
+ struct ReportLUNdata *logdev_list)
+{
+ /* Helper function, figure out where the LUN ID info is coming from
+ * given index i, lists of physical and logical devices, where in
+ * the list the raid controller is supposed to appear (first or last)
+ */
+
+ int logicals_start = nphysicals + (raid_ctlr_position == 0);
+ int last_device = nphysicals + nlogicals + (raid_ctlr_position == 0);
+
+ if (i == raid_ctlr_position)
+ return RAID_CTLR_LUNID;
+
+ if (i < logicals_start)
+ return &physdev_list->LUN[i -
+ (raid_ctlr_position == 0)].lunid[0];
+
+ if (i < last_device)
+ return &logdev_list->LUN[i - nphysicals -
+ (raid_ctlr_position == 0)][0];
+ BUG();
+ return NULL;
+}
+
+static int hpsa_hba_mode_enabled(struct ctlr_info *h)
+{
+ int rc;
+ int hba_mode_enabled;
+ struct bmic_controller_parameters *ctlr_params;
+ ctlr_params = kzalloc(sizeof(struct bmic_controller_parameters),
+ GFP_KERNEL);
+
+ if (!ctlr_params)
+ return -ENOMEM;
+ rc = hpsa_bmic_ctrl_mode_sense(h, RAID_CTLR_LUNID, 0, ctlr_params,
+ sizeof(struct bmic_controller_parameters));
+ if (rc) {
+ kfree(ctlr_params);
+ return rc;
+ }
+
+ hba_mode_enabled =
+ ((ctlr_params->nvram_flags & HBA_MODE_ENABLED_FLAG) != 0);
+ kfree(ctlr_params);
+ return hba_mode_enabled;
+}
+
+/* get physical drive ioaccel handle and queue depth */
+static void hpsa_get_ioaccel_drive_info(struct ctlr_info *h,
+ struct hpsa_scsi_dev_t *dev,
+ u8 *lunaddrbytes,
+ struct bmic_identify_physical_device *id_phys)
+{
+ int rc;
+ struct ext_report_lun_entry *rle =
+ (struct ext_report_lun_entry *) lunaddrbytes;
+
+ dev->ioaccel_handle = rle->ioaccel_handle;
+ memset(id_phys, 0, sizeof(*id_phys));
+ rc = hpsa_bmic_id_physical_device(h, lunaddrbytes,
+ GET_BMIC_DRIVE_NUMBER(lunaddrbytes), id_phys,
+ sizeof(*id_phys));
+ if (!rc)
+ /* Reserve space for FW operations */
+#define DRIVE_CMDS_RESERVED_FOR_FW 2
+#define DRIVE_QUEUE_DEPTH 7
+ dev->queue_depth =
+ le16_to_cpu(id_phys->current_queue_depth_limit) -
+ DRIVE_CMDS_RESERVED_FOR_FW;
+ else
+ dev->queue_depth = DRIVE_QUEUE_DEPTH; /* conservative */
+ atomic_set(&dev->ioaccel_cmds_out, 0);
+}
+
+static void hpsa_update_scsi_devices(struct ctlr_info *h, int hostno)
+{
+ /* the idea here is we could get notified
+ * that some devices have changed, so we do a report
+ * physical luns and report logical luns cmd, and adjust
+ * our list of devices accordingly.
+ *
+ * The scsi3addr's of devices won't change so long as the
+ * adapter is not reset. That means we can rescan and
+ * tell which devices we already know about, vs. new
+ * devices, vs. disappearing devices.
+ */
+ struct ReportExtendedLUNdata *physdev_list = NULL;
+ struct ReportLUNdata *logdev_list = NULL;
+ struct bmic_identify_physical_device *id_phys = NULL;
+ u32 nphysicals = 0;
+ u32 nlogicals = 0;
+ u32 ndev_allocated = 0;
+ struct hpsa_scsi_dev_t **currentsd, *this_device, *tmpdevice;
+ int ncurrent = 0;
+ int i, n_ext_target_devs, ndevs_to_allocate;
+ int raid_ctlr_position;
+ int rescan_hba_mode;
+ DECLARE_BITMAP(lunzerobits, MAX_EXT_TARGETS);
+
+ currentsd = kzalloc(sizeof(*currentsd) * HPSA_MAX_DEVICES, GFP_KERNEL);
+ physdev_list = kzalloc(sizeof(*physdev_list), GFP_KERNEL);
+ logdev_list = kzalloc(sizeof(*logdev_list), GFP_KERNEL);
+ tmpdevice = kzalloc(sizeof(*tmpdevice), GFP_KERNEL);
+ id_phys = kzalloc(sizeof(*id_phys), GFP_KERNEL);
+
+ if (!currentsd || !physdev_list || !logdev_list ||
+ !tmpdevice || !id_phys) {
+ dev_err(&h->pdev->dev, "out of memory\n");
+ goto out;
+ }
+ memset(lunzerobits, 0, sizeof(lunzerobits));
+
+ rescan_hba_mode = hpsa_hba_mode_enabled(h);
+ if (rescan_hba_mode < 0)
+ goto out;
+
+ if (!h->hba_mode_enabled && rescan_hba_mode)
+ dev_warn(&h->pdev->dev, "HBA mode enabled\n");
+ else if (h->hba_mode_enabled && !rescan_hba_mode)
+ dev_warn(&h->pdev->dev, "HBA mode disabled\n");
+
+ h->hba_mode_enabled = rescan_hba_mode;
+
+ if (hpsa_gather_lun_info(h, physdev_list, &nphysicals,
+ logdev_list, &nlogicals))
+ goto out;
+
+ /* We might see up to the maximum number of logical and physical disks
+ * plus external target devices, and a device for the local RAID
+ * controller.
+ */
+ ndevs_to_allocate = nphysicals + nlogicals + MAX_EXT_TARGETS + 1;
+
+ /* Allocate the per device structures */
+ for (i = 0; i < ndevs_to_allocate; i++) {
+ if (i >= HPSA_MAX_DEVICES) {
+ dev_warn(&h->pdev->dev, "maximum devices (%d) exceeded."
+ " %d devices ignored.\n", HPSA_MAX_DEVICES,
+ ndevs_to_allocate - HPSA_MAX_DEVICES);
+ break;
+ }
+
+ currentsd[i] = kzalloc(sizeof(*currentsd[i]), GFP_KERNEL);
+ if (!currentsd[i]) {
+ dev_warn(&h->pdev->dev, "out of memory at %s:%d\n",
+ __FILE__, __LINE__);
+ goto out;
+ }
+ ndev_allocated++;
+ }
+
+ if (is_scsi_rev_5(h))
+ raid_ctlr_position = 0;
+ else
+ raid_ctlr_position = nphysicals + nlogicals;
+
+ /* adjust our table of devices */
+ n_ext_target_devs = 0;
+ for (i = 0; i < nphysicals + nlogicals + 1; i++) {
+ u8 *lunaddrbytes, is_OBDR = 0;
+
+ /* Figure out where the LUN ID info is coming from */
+ lunaddrbytes = figure_lunaddrbytes(h, raid_ctlr_position,
+ i, nphysicals, nlogicals, physdev_list, logdev_list);
+ /* skip masked physical devices. */
+ if (lunaddrbytes[3] & 0xC0 &&
+ i < nphysicals + (raid_ctlr_position == 0))
+ continue;
+
+ /* Get device type, vendor, model, device id */
+ if (hpsa_update_device_info(h, lunaddrbytes, tmpdevice,
+ &is_OBDR))
+ continue; /* skip it if we can't talk to it. */
+ figure_bus_target_lun(h, lunaddrbytes, tmpdevice);
+ this_device = currentsd[ncurrent];
+
+ /*
+ * For external target devices, we have to insert a LUN 0 which
+ * doesn't show up in CCISS_REPORT_PHYSICAL data, but there
+ * is nonetheless an enclosure device there. We have to
+ * present that otherwise linux won't find anything if
+ * there is no lun 0.
+ */
+ if (add_ext_target_dev(h, tmpdevice, this_device,
+ lunaddrbytes, lunzerobits,
+ &n_ext_target_devs)) {
+ ncurrent++;
+ this_device = currentsd[ncurrent];
+ }
+
+ *this_device = *tmpdevice;
+
+ switch (this_device->devtype) {
+ case TYPE_ROM:
+ /* We don't *really* support actual CD-ROM devices,
+ * just "One Button Disaster Recovery" tape drive
+ * which temporarily pretends to be a CD-ROM drive.
+ * So we check that the device is really an OBDR tape
+ * device by checking for "$DR-10" in bytes 43-48 of
+ * the inquiry data.
+ */
+ if (is_OBDR)
+ ncurrent++;
+ break;
+ case TYPE_DISK:
+ if (h->hba_mode_enabled) {
+ /* never use raid mapper in HBA mode */
+ this_device->offload_enabled = 0;
+ ncurrent++;
+ break;
+ } else if (h->acciopath_status) {
+ if (i >= nphysicals) {
+ ncurrent++;
+ break;
+ }
+ } else {
+ if (i < nphysicals)
+ break;
+ ncurrent++;
+ break;
+ }
+ if (h->transMethod & CFGTBL_Trans_io_accel1 ||
+ h->transMethod & CFGTBL_Trans_io_accel2) {
+ hpsa_get_ioaccel_drive_info(h, this_device,
+ lunaddrbytes, id_phys);
+ atomic_set(&this_device->ioaccel_cmds_out, 0);
+ ncurrent++;
+ }
+ break;
+ case TYPE_TAPE:
+ case TYPE_MEDIUM_CHANGER:
+ ncurrent++;
+ break;
+ case TYPE_RAID:
+ /* Only present the Smartarray HBA as a RAID controller.
+ * If it's a RAID controller other than the HBA itself
+ * (an external RAID controller, MSA500 or similar)
+ * don't present it.
+ */
+ if (!is_hba_lunid(lunaddrbytes))
+ break;
+ ncurrent++;
+ break;
+ default:
+ break;
+ }
+ if (ncurrent >= HPSA_MAX_DEVICES)
+ break;
+ }
+ hpsa_update_log_drive_phys_drive_ptrs(h, currentsd, ncurrent);
+ adjust_hpsa_scsi_table(h, hostno, currentsd, ncurrent);
+out:
+ kfree(tmpdevice);
+ for (i = 0; i < ndev_allocated; i++)
+ kfree(currentsd[i]);
+ kfree(currentsd);
+ kfree(physdev_list);
+ kfree(logdev_list);
+ kfree(id_phys);
+}
+
+static void hpsa_set_sg_descriptor(struct SGDescriptor *desc,
+ struct scatterlist *sg)
+{
+ u64 addr64 = (u64) sg_dma_address(sg);
+ unsigned int len = sg_dma_len(sg);
+
+ desc->Addr = cpu_to_le64(addr64);
+ desc->Len = cpu_to_le32(len);
+ desc->Ext = 0;
+}
+
+/*
+ * hpsa_scatter_gather takes a struct scsi_cmnd, (cmd), and does the pci
+ * dma mapping and fills in the scatter gather entries of the
+ * hpsa command, cp.
+ */
+static int hpsa_scatter_gather(struct ctlr_info *h,
+ struct CommandList *cp,
+ struct scsi_cmnd *cmd)
+{
+ struct scatterlist *sg;
+ int use_sg, i, sg_index, chained;
+ struct SGDescriptor *curr_sg;
+
+ BUG_ON(scsi_sg_count(cmd) > h->maxsgentries);
+
+ use_sg = scsi_dma_map(cmd);
+ if (use_sg < 0)
+ return use_sg;
+
+ if (!use_sg)
+ goto sglist_finished;
+
+ curr_sg = cp->SG;
+ chained = 0;
+ sg_index = 0;
+ scsi_for_each_sg(cmd, sg, use_sg, i) {
+ if (i == h->max_cmd_sg_entries - 1 &&
+ use_sg > h->max_cmd_sg_entries) {
+ chained = 1;
+ curr_sg = h->cmd_sg_list[cp->cmdindex];
+ sg_index = 0;
+ }
+ hpsa_set_sg_descriptor(curr_sg, sg);
+ curr_sg++;
+ }
+
+ /* Back the pointer up to the last entry and mark it as "last". */
+ (--curr_sg)->Ext = cpu_to_le32(HPSA_SG_LAST);
+
+ if (use_sg + chained > h->maxSG)
+ h->maxSG = use_sg + chained;
+
+ if (chained) {
+ cp->Header.SGList = h->max_cmd_sg_entries;
+ cp->Header.SGTotal = cpu_to_le16(use_sg + 1);
+ if (hpsa_map_sg_chain_block(h, cp)) {
+ scsi_dma_unmap(cmd);
+ return -1;
+ }
+ return 0;
+ }
+
+sglist_finished:
+
+ cp->Header.SGList = (u8) use_sg; /* no. SGs contig in this cmd */
+ cp->Header.SGTotal = cpu_to_le16(use_sg); /* total sgs in cmd list */
+ return 0;
+}
+
+#define IO_ACCEL_INELIGIBLE (1)
+static int fixup_ioaccel_cdb(u8 *cdb, int *cdb_len)
+{
+ int is_write = 0;
+ u32 block;
+ u32 block_cnt;
+
+ /* Perform some CDB fixups if needed using 10 byte reads/writes only */
+ switch (cdb[0]) {
+ case WRITE_6:
+ case WRITE_12:
+ is_write = 1;
+ case READ_6:
+ case READ_12:
+ if (*cdb_len == 6) {
+ block = (((u32) cdb[2]) << 8) | cdb[3];
+ block_cnt = cdb[4];
+ } else {
+ BUG_ON(*cdb_len != 12);
+ block = (((u32) cdb[2]) << 24) |
+ (((u32) cdb[3]) << 16) |
+ (((u32) cdb[4]) << 8) |
+ cdb[5];
+ block_cnt =
+ (((u32) cdb[6]) << 24) |
+ (((u32) cdb[7]) << 16) |
+ (((u32) cdb[8]) << 8) |
+ cdb[9];
+ }
+ if (block_cnt > 0xffff)
+ return IO_ACCEL_INELIGIBLE;
+
+ cdb[0] = is_write ? WRITE_10 : READ_10;
+ cdb[1] = 0;
+ cdb[2] = (u8) (block >> 24);
+ cdb[3] = (u8) (block >> 16);
+ cdb[4] = (u8) (block >> 8);
+ cdb[5] = (u8) (block);
+ cdb[6] = 0;
+ cdb[7] = (u8) (block_cnt >> 8);
+ cdb[8] = (u8) (block_cnt);
+ cdb[9] = 0;
+ *cdb_len = 10;
+ break;
+ }
+ return 0;
+}
+
+static int hpsa_scsi_ioaccel1_queue_command(struct ctlr_info *h,
+ struct CommandList *c, u32 ioaccel_handle, u8 *cdb, int cdb_len,
+ u8 *scsi3addr, struct hpsa_scsi_dev_t *phys_disk)
+{
+ struct scsi_cmnd *cmd = c->scsi_cmd;
+ struct io_accel1_cmd *cp = &h->ioaccel_cmd_pool[c->cmdindex];
+ unsigned int len;
+ unsigned int total_len = 0;
+ struct scatterlist *sg;
+ u64 addr64;
+ int use_sg, i;
+ struct SGDescriptor *curr_sg;
+ u32 control = IOACCEL1_CONTROL_SIMPLEQUEUE;
+
+ /* TODO: implement chaining support */
+ if (scsi_sg_count(cmd) > h->ioaccel_maxsg) {
+ atomic_dec(&phys_disk->ioaccel_cmds_out);
+ return IO_ACCEL_INELIGIBLE;
+ }
+
+ BUG_ON(cmd->cmd_len > IOACCEL1_IOFLAGS_CDBLEN_MAX);
+
+ if (fixup_ioaccel_cdb(cdb, &cdb_len)) {
+ atomic_dec(&phys_disk->ioaccel_cmds_out);
+ return IO_ACCEL_INELIGIBLE;
+ }
+
+ c->cmd_type = CMD_IOACCEL1;
+
+ /* Adjust the DMA address to point to the accelerated command buffer */
+ c->busaddr = (u32) h->ioaccel_cmd_pool_dhandle +
+ (c->cmdindex * sizeof(*cp));
+ BUG_ON(c->busaddr & 0x0000007F);
+
+ use_sg = scsi_dma_map(cmd);
+ if (use_sg < 0) {
+ atomic_dec(&phys_disk->ioaccel_cmds_out);
+ return use_sg;
+ }
+
+ if (use_sg) {
+ curr_sg = cp->SG;
+ scsi_for_each_sg(cmd, sg, use_sg, i) {
+ addr64 = (u64) sg_dma_address(sg);
+ len = sg_dma_len(sg);
+ total_len += len;
+ curr_sg->Addr = cpu_to_le64(addr64);
+ curr_sg->Len = cpu_to_le32(len);
+ curr_sg->Ext = cpu_to_le32(0);
+ curr_sg++;
+ }
+ (--curr_sg)->Ext = cpu_to_le32(HPSA_SG_LAST);
+
+ switch (cmd->sc_data_direction) {
+ case DMA_TO_DEVICE:
+ control |= IOACCEL1_CONTROL_DATA_OUT;
+ break;
+ case DMA_FROM_DEVICE:
+ control |= IOACCEL1_CONTROL_DATA_IN;
+ break;
+ case DMA_NONE:
+ control |= IOACCEL1_CONTROL_NODATAXFER;
+ break;
+ default:
+ dev_err(&h->pdev->dev, "unknown data direction: %d\n",
+ cmd->sc_data_direction);
+ BUG();
+ break;
+ }
+ } else {
+ control |= IOACCEL1_CONTROL_NODATAXFER;
+ }
+
+ c->Header.SGList = use_sg;
+ /* Fill out the command structure to submit */
+ cp->dev_handle = cpu_to_le16(ioaccel_handle & 0xFFFF);
+ cp->transfer_len = cpu_to_le32(total_len);
+ cp->io_flags = cpu_to_le16(IOACCEL1_IOFLAGS_IO_REQ |
+ (cdb_len & IOACCEL1_IOFLAGS_CDBLEN_MASK));
+ cp->control = cpu_to_le32(control);
+ memcpy(cp->CDB, cdb, cdb_len);
+ memcpy(cp->CISS_LUN, scsi3addr, 8);
+ /* Tag was already set at init time. */
+ enqueue_cmd_and_start_io(h, c);
+ return 0;
+}
+
+/*
+ * Queue a command directly to a device behind the controller using the
+ * I/O accelerator path.
+ */
+static int hpsa_scsi_ioaccel_direct_map(struct ctlr_info *h,
+ struct CommandList *c)
+{
+ struct scsi_cmnd *cmd = c->scsi_cmd;
+ struct hpsa_scsi_dev_t *dev = cmd->device->hostdata;
+
+ c->phys_disk = dev;
+
+ return hpsa_scsi_ioaccel_queue_command(h, c, dev->ioaccel_handle,
+ cmd->cmnd, cmd->cmd_len, dev->scsi3addr, dev);
+}
+
+/*
+ * Set encryption parameters for the ioaccel2 request
+ */
+static void set_encrypt_ioaccel2(struct ctlr_info *h,
+ struct CommandList *c, struct io_accel2_cmd *cp)
+{
+ struct scsi_cmnd *cmd = c->scsi_cmd;
+ struct hpsa_scsi_dev_t *dev = cmd->device->hostdata;
+ struct raid_map_data *map = &dev->raid_map;
+ u64 first_block;
+
+ /* Are we doing encryption on this device */
+ if (!(le16_to_cpu(map->flags) & RAID_MAP_FLAG_ENCRYPT_ON))
+ return;
+ /* Set the data encryption key index. */
+ cp->dekindex = map->dekindex;
+
+ /* Set the encryption enable flag, encoded into direction field. */
+ cp->direction |= IOACCEL2_DIRECTION_ENCRYPT_MASK;
+
+ /* Set encryption tweak values based on logical block address
+ * If block size is 512, tweak value is LBA.
+ * For other block sizes, tweak is (LBA * block size)/ 512)
+ */
+ switch (cmd->cmnd[0]) {
+ /* Required? 6-byte cdbs eliminated by fixup_ioaccel_cdb */
+ case WRITE_6:
+ case READ_6:
+ first_block = get_unaligned_be16(&cmd->cmnd[2]);
+ break;
+ case WRITE_10:
+ case READ_10:
+ /* Required? 12-byte cdbs eliminated by fixup_ioaccel_cdb */
+ case WRITE_12:
+ case READ_12:
+ first_block = get_unaligned_be32(&cmd->cmnd[2]);
+ break;
+ case WRITE_16:
+ case READ_16:
+ first_block = get_unaligned_be64(&cmd->cmnd[2]);
+ break;
+ default:
+ dev_err(&h->pdev->dev,
+ "ERROR: %s: size (0x%x) not supported for encryption\n",
+ __func__, cmd->cmnd[0]);
+ BUG();
+ break;
+ }
+
+ if (le32_to_cpu(map->volume_blk_size) != 512)
+ first_block = first_block *
+ le32_to_cpu(map->volume_blk_size)/512;
+
+ cp->tweak_lower = cpu_to_le32(first_block);
+ cp->tweak_upper = cpu_to_le32(first_block >> 32);
+}
+
+static int hpsa_scsi_ioaccel2_queue_command(struct ctlr_info *h,
+ struct CommandList *c, u32 ioaccel_handle, u8 *cdb, int cdb_len,
+ u8 *scsi3addr, struct hpsa_scsi_dev_t *phys_disk)
+{
+ struct scsi_cmnd *cmd = c->scsi_cmd;
+ struct io_accel2_cmd *cp = &h->ioaccel2_cmd_pool[c->cmdindex];
+ struct ioaccel2_sg_element *curr_sg;
+ int use_sg, i;
+ struct scatterlist *sg;
+ u64 addr64;
+ u32 len;
+ u32 total_len = 0;
+
+ if (scsi_sg_count(cmd) > h->ioaccel_maxsg) {
+ atomic_dec(&phys_disk->ioaccel_cmds_out);
+ return IO_ACCEL_INELIGIBLE;
+ }
+
+ if (fixup_ioaccel_cdb(cdb, &cdb_len)) {
+ atomic_dec(&phys_disk->ioaccel_cmds_out);
+ return IO_ACCEL_INELIGIBLE;
+ }
+
+ c->cmd_type = CMD_IOACCEL2;
+ /* Adjust the DMA address to point to the accelerated command buffer */
+ c->busaddr = (u32) h->ioaccel2_cmd_pool_dhandle +
+ (c->cmdindex * sizeof(*cp));
+ BUG_ON(c->busaddr & 0x0000007F);
+
+ memset(cp, 0, sizeof(*cp));
+ cp->IU_type = IOACCEL2_IU_TYPE;
+
+ use_sg = scsi_dma_map(cmd);
+ if (use_sg < 0) {
+ atomic_dec(&phys_disk->ioaccel_cmds_out);
+ return use_sg;
+ }
+
+ if (use_sg) {
+ BUG_ON(use_sg > IOACCEL2_MAXSGENTRIES);
+ curr_sg = cp->sg;
+ scsi_for_each_sg(cmd, sg, use_sg, i) {
+ addr64 = (u64) sg_dma_address(sg);
+ len = sg_dma_len(sg);
+ total_len += len;
+ curr_sg->address = cpu_to_le64(addr64);
+ curr_sg->length = cpu_to_le32(len);
+ curr_sg->reserved[0] = 0;
+ curr_sg->reserved[1] = 0;
+ curr_sg->reserved[2] = 0;
+ curr_sg->chain_indicator = 0;
+ curr_sg++;
+ }
+
+ switch (cmd->sc_data_direction) {
+ case DMA_TO_DEVICE:
+ cp->direction &= ~IOACCEL2_DIRECTION_MASK;
+ cp->direction |= IOACCEL2_DIR_DATA_OUT;
+ break;
+ case DMA_FROM_DEVICE:
+ cp->direction &= ~IOACCEL2_DIRECTION_MASK;
+ cp->direction |= IOACCEL2_DIR_DATA_IN;
+ break;
+ case DMA_NONE:
+ cp->direction &= ~IOACCEL2_DIRECTION_MASK;
+ cp->direction |= IOACCEL2_DIR_NO_DATA;
+ break;
+ default:
+ dev_err(&h->pdev->dev, "unknown data direction: %d\n",
+ cmd->sc_data_direction);
+ BUG();
+ break;
+ }
+ } else {
+ cp->direction &= ~IOACCEL2_DIRECTION_MASK;
+ cp->direction |= IOACCEL2_DIR_NO_DATA;
+ }
+
+ /* Set encryption parameters, if necessary */
+ set_encrypt_ioaccel2(h, c, cp);
+
+ cp->scsi_nexus = cpu_to_le32(ioaccel_handle);
+ cp->Tag = cpu_to_le32(c->cmdindex << DIRECT_LOOKUP_SHIFT);
+ memcpy(cp->cdb, cdb, sizeof(cp->cdb));
+
+ /* fill in sg elements */
+ cp->sg_count = (u8) use_sg;
+
+ cp->data_len = cpu_to_le32(total_len);
+ cp->err_ptr = cpu_to_le64(c->busaddr +
+ offsetof(struct io_accel2_cmd, error_data));
+ cp->err_len = cpu_to_le32(sizeof(cp->error_data));
+
+ enqueue_cmd_and_start_io(h, c);
+ return 0;
+}
+
+/*
+ * Queue a command to the correct I/O accelerator path.
+ */
+static int hpsa_scsi_ioaccel_queue_command(struct ctlr_info *h,
+ struct CommandList *c, u32 ioaccel_handle, u8 *cdb, int cdb_len,
+ u8 *scsi3addr, struct hpsa_scsi_dev_t *phys_disk)
+{
+ /* Try to honor the device's queue depth */
+ if (atomic_inc_return(&phys_disk->ioaccel_cmds_out) >
+ phys_disk->queue_depth) {
+ atomic_dec(&phys_disk->ioaccel_cmds_out);
+ return IO_ACCEL_INELIGIBLE;
+ }
+ if (h->transMethod & CFGTBL_Trans_io_accel1)
+ return hpsa_scsi_ioaccel1_queue_command(h, c, ioaccel_handle,
+ cdb, cdb_len, scsi3addr,
+ phys_disk);
+ else
+ return hpsa_scsi_ioaccel2_queue_command(h, c, ioaccel_handle,
+ cdb, cdb_len, scsi3addr,
+ phys_disk);
+}
+
+static void raid_map_helper(struct raid_map_data *map,
+ int offload_to_mirror, u32 *map_index, u32 *current_group)
+{
+ if (offload_to_mirror == 0) {
+ /* use physical disk in the first mirrored group. */
+ *map_index %= le16_to_cpu(map->data_disks_per_row);
+ return;
+ }
+ do {
+ /* determine mirror group that *map_index indicates */
+ *current_group = *map_index /
+ le16_to_cpu(map->data_disks_per_row);
+ if (offload_to_mirror == *current_group)
+ continue;
+ if (*current_group < le16_to_cpu(map->layout_map_count) - 1) {
+ /* select map index from next group */
+ *map_index += le16_to_cpu(map->data_disks_per_row);
+ (*current_group)++;
+ } else {
+ /* select map index from first group */
+ *map_index %= le16_to_cpu(map->data_disks_per_row);
+ *current_group = 0;
+ }
+ } while (offload_to_mirror != *current_group);
+}
+
+/*
+ * Attempt to perform offload RAID mapping for a logical volume I/O.
+ */
+static int hpsa_scsi_ioaccel_raid_map(struct ctlr_info *h,
+ struct CommandList *c)
+{
+ struct scsi_cmnd *cmd = c->scsi_cmd;
+ struct hpsa_scsi_dev_t *dev = cmd->device->hostdata;
+ struct raid_map_data *map = &dev->raid_map;
+ struct raid_map_disk_data *dd = &map->data[0];
+ int is_write = 0;
+ u32 map_index;
+ u64 first_block, last_block;
+ u32 block_cnt;
+ u32 blocks_per_row;
+ u64 first_row, last_row;
+ u32 first_row_offset, last_row_offset;
+ u32 first_column, last_column;
+ u64 r0_first_row, r0_last_row;
+ u32 r5or6_blocks_per_row;
+ u64 r5or6_first_row, r5or6_last_row;
+ u32 r5or6_first_row_offset, r5or6_last_row_offset;
+ u32 r5or6_first_column, r5or6_last_column;
+ u32 total_disks_per_row;
+ u32 stripesize;
+ u32 first_group, last_group, current_group;
+ u32 map_row;
+ u32 disk_handle;
+ u64 disk_block;
+ u32 disk_block_cnt;
+ u8 cdb[16];
+ u8 cdb_len;
+ u16 strip_size;
+#if BITS_PER_LONG == 32
+ u64 tmpdiv;
+#endif
+ int offload_to_mirror;
+
+ /* check for valid opcode, get LBA and block count */
+ switch (cmd->cmnd[0]) {
+ case WRITE_6:
+ is_write = 1;
+ case READ_6:
+ first_block =
+ (((u64) cmd->cmnd[2]) << 8) |
+ cmd->cmnd[3];
+ block_cnt = cmd->cmnd[4];
+ if (block_cnt == 0)
+ block_cnt = 256;
+ break;
+ case WRITE_10:
+ is_write = 1;
+ case READ_10:
+ first_block =
+ (((u64) cmd->cmnd[2]) << 24) |
+ (((u64) cmd->cmnd[3]) << 16) |
+ (((u64) cmd->cmnd[4]) << 8) |
+ cmd->cmnd[5];
+ block_cnt =
+ (((u32) cmd->cmnd[7]) << 8) |
+ cmd->cmnd[8];
+ break;
+ case WRITE_12:
+ is_write = 1;
+ case READ_12:
+ first_block =
+ (((u64) cmd->cmnd[2]) << 24) |
+ (((u64) cmd->cmnd[3]) << 16) |
+ (((u64) cmd->cmnd[4]) << 8) |
+ cmd->cmnd[5];
+ block_cnt =
+ (((u32) cmd->cmnd[6]) << 24) |
+ (((u32) cmd->cmnd[7]) << 16) |
+ (((u32) cmd->cmnd[8]) << 8) |
+ cmd->cmnd[9];
+ break;
+ case WRITE_16:
+ is_write = 1;
+ case READ_16:
+ first_block =
+ (((u64) cmd->cmnd[2]) << 56) |
+ (((u64) cmd->cmnd[3]) << 48) |
+ (((u64) cmd->cmnd[4]) << 40) |
+ (((u64) cmd->cmnd[5]) << 32) |
+ (((u64) cmd->cmnd[6]) << 24) |
+ (((u64) cmd->cmnd[7]) << 16) |
+ (((u64) cmd->cmnd[8]) << 8) |
+ cmd->cmnd[9];
+ block_cnt =
+ (((u32) cmd->cmnd[10]) << 24) |
+ (((u32) cmd->cmnd[11]) << 16) |
+ (((u32) cmd->cmnd[12]) << 8) |
+ cmd->cmnd[13];
+ break;
+ default:
+ return IO_ACCEL_INELIGIBLE; /* process via normal I/O path */
+ }
+ last_block = first_block + block_cnt - 1;
+
+ /* check for write to non-RAID-0 */
+ if (is_write && dev->raid_level != 0)
+ return IO_ACCEL_INELIGIBLE;
+
+ /* check for invalid block or wraparound */
+ if (last_block >= le64_to_cpu(map->volume_blk_cnt) ||
+ last_block < first_block)
+ return IO_ACCEL_INELIGIBLE;
+
+ /* calculate stripe information for the request */
+ blocks_per_row = le16_to_cpu(map->data_disks_per_row) *
+ le16_to_cpu(map->strip_size);
+ strip_size = le16_to_cpu(map->strip_size);
+#if BITS_PER_LONG == 32
+ tmpdiv = first_block;
+ (void) do_div(tmpdiv, blocks_per_row);
+ first_row = tmpdiv;
+ tmpdiv = last_block;
+ (void) do_div(tmpdiv, blocks_per_row);
+ last_row = tmpdiv;
+ first_row_offset = (u32) (first_block - (first_row * blocks_per_row));
+ last_row_offset = (u32) (last_block - (last_row * blocks_per_row));
+ tmpdiv = first_row_offset;
+ (void) do_div(tmpdiv, strip_size);
+ first_column = tmpdiv;
+ tmpdiv = last_row_offset;
+ (void) do_div(tmpdiv, strip_size);
+ last_column = tmpdiv;
+#else
+ first_row = first_block / blocks_per_row;
+ last_row = last_block / blocks_per_row;
+ first_row_offset = (u32) (first_block - (first_row * blocks_per_row));
+ last_row_offset = (u32) (last_block - (last_row * blocks_per_row));
+ first_column = first_row_offset / strip_size;
+ last_column = last_row_offset / strip_size;
+#endif
+
+ /* if this isn't a single row/column then give to the controller */
+ if ((first_row != last_row) || (first_column != last_column))
+ return IO_ACCEL_INELIGIBLE;
+
+ /* proceeding with driver mapping */
+ total_disks_per_row = le16_to_cpu(map->data_disks_per_row) +
+ le16_to_cpu(map->metadata_disks_per_row);
+ map_row = ((u32)(first_row >> map->parity_rotation_shift)) %
+ le16_to_cpu(map->row_cnt);
+ map_index = (map_row * total_disks_per_row) + first_column;
+
+ switch (dev->raid_level) {
+ case HPSA_RAID_0:
+ break; /* nothing special to do */
+ case HPSA_RAID_1:
+ /* Handles load balance across RAID 1 members.
+ * (2-drive R1 and R10 with even # of drives.)
+ * Appropriate for SSDs, not optimal for HDDs
+ */
+ BUG_ON(le16_to_cpu(map->layout_map_count) != 2);
+ if (dev->offload_to_mirror)
+ map_index += le16_to_cpu(map->data_disks_per_row);
+ dev->offload_to_mirror = !dev->offload_to_mirror;
+ break;
+ case HPSA_RAID_ADM:
+ /* Handles N-way mirrors (R1-ADM)
+ * and R10 with # of drives divisible by 3.)
+ */
+ BUG_ON(le16_to_cpu(map->layout_map_count) != 3);
+
+ offload_to_mirror = dev->offload_to_mirror;
+ raid_map_helper(map, offload_to_mirror,
+ &map_index, &current_group);
+ /* set mirror group to use next time */
+ offload_to_mirror =
+ (offload_to_mirror >=
+ le16_to_cpu(map->layout_map_count) - 1)
+ ? 0 : offload_to_mirror + 1;
+ dev->offload_to_mirror = offload_to_mirror;
+ /* Avoid direct use of dev->offload_to_mirror within this
+ * function since multiple threads might simultaneously
+ * increment it beyond the range of dev->layout_map_count -1.
+ */
+ break;
+ case HPSA_RAID_5:
+ case HPSA_RAID_6:
+ if (le16_to_cpu(map->layout_map_count) <= 1)
+ break;
+
+ /* Verify first and last block are in same RAID group */
+ r5or6_blocks_per_row =
+ le16_to_cpu(map->strip_size) *
+ le16_to_cpu(map->data_disks_per_row);
+ BUG_ON(r5or6_blocks_per_row == 0);
+ stripesize = r5or6_blocks_per_row *
+ le16_to_cpu(map->layout_map_count);
+#if BITS_PER_LONG == 32
+ tmpdiv = first_block;
+ first_group = do_div(tmpdiv, stripesize);
+ tmpdiv = first_group;
+ (void) do_div(tmpdiv, r5or6_blocks_per_row);
+ first_group = tmpdiv;
+ tmpdiv = last_block;
+ last_group = do_div(tmpdiv, stripesize);
+ tmpdiv = last_group;
+ (void) do_div(tmpdiv, r5or6_blocks_per_row);
+ last_group = tmpdiv;
+#else
+ first_group = (first_block % stripesize) / r5or6_blocks_per_row;
+ last_group = (last_block % stripesize) / r5or6_blocks_per_row;
+#endif
+ if (first_group != last_group)
+ return IO_ACCEL_INELIGIBLE;
+
+ /* Verify request is in a single row of RAID 5/6 */
+#if BITS_PER_LONG == 32
+ tmpdiv = first_block;
+ (void) do_div(tmpdiv, stripesize);
+ first_row = r5or6_first_row = r0_first_row = tmpdiv;
+ tmpdiv = last_block;
+ (void) do_div(tmpdiv, stripesize);
+ r5or6_last_row = r0_last_row = tmpdiv;
+#else
+ first_row = r5or6_first_row = r0_first_row =
+ first_block / stripesize;
+ r5or6_last_row = r0_last_row = last_block / stripesize;
+#endif
+ if (r5or6_first_row != r5or6_last_row)
+ return IO_ACCEL_INELIGIBLE;
+
+
+ /* Verify request is in a single column */
+#if BITS_PER_LONG == 32
+ tmpdiv = first_block;
+ first_row_offset = do_div(tmpdiv, stripesize);
+ tmpdiv = first_row_offset;
+ first_row_offset = (u32) do_div(tmpdiv, r5or6_blocks_per_row);
+ r5or6_first_row_offset = first_row_offset;
+ tmpdiv = last_block;
+ r5or6_last_row_offset = do_div(tmpdiv, stripesize);
+ tmpdiv = r5or6_last_row_offset;
+ r5or6_last_row_offset = do_div(tmpdiv, r5or6_blocks_per_row);
+ tmpdiv = r5or6_first_row_offset;
+ (void) do_div(tmpdiv, map->strip_size);
+ first_column = r5or6_first_column = tmpdiv;
+ tmpdiv = r5or6_last_row_offset;
+ (void) do_div(tmpdiv, map->strip_size);
+ r5or6_last_column = tmpdiv;
+#else
+ first_row_offset = r5or6_first_row_offset =
+ (u32)((first_block % stripesize) %
+ r5or6_blocks_per_row);
+
+ r5or6_last_row_offset =
+ (u32)((last_block % stripesize) %
+ r5or6_blocks_per_row);
+
+ first_column = r5or6_first_column =
+ r5or6_first_row_offset / le16_to_cpu(map->strip_size);
+ r5or6_last_column =
+ r5or6_last_row_offset / le16_to_cpu(map->strip_size);
+#endif
+ if (r5or6_first_column != r5or6_last_column)
+ return IO_ACCEL_INELIGIBLE;
+
+ /* Request is eligible */
+ map_row = ((u32)(first_row >> map->parity_rotation_shift)) %
+ le16_to_cpu(map->row_cnt);
+
+ map_index = (first_group *
+ (le16_to_cpu(map->row_cnt) * total_disks_per_row)) +
+ (map_row * total_disks_per_row) + first_column;
+ break;
+ default:
+ return IO_ACCEL_INELIGIBLE;
+ }
+
+ if (unlikely(map_index >= RAID_MAP_MAX_ENTRIES))
+ return IO_ACCEL_INELIGIBLE;
+
+ c->phys_disk = dev->phys_disk[map_index];
+
+ disk_handle = dd[map_index].ioaccel_handle;
+ disk_block = le64_to_cpu(map->disk_starting_blk) +
+ first_row * le16_to_cpu(map->strip_size) +
+ (first_row_offset - first_column *
+ le16_to_cpu(map->strip_size));
+ disk_block_cnt = block_cnt;
+
+ /* handle differing logical/physical block sizes */
+ if (map->phys_blk_shift) {
+ disk_block <<= map->phys_blk_shift;
+ disk_block_cnt <<= map->phys_blk_shift;
+ }
+ BUG_ON(disk_block_cnt > 0xffff);
+
+ /* build the new CDB for the physical disk I/O */
+ if (disk_block > 0xffffffff) {
+ cdb[0] = is_write ? WRITE_16 : READ_16;
+ cdb[1] = 0;
+ cdb[2] = (u8) (disk_block >> 56);
+ cdb[3] = (u8) (disk_block >> 48);
+ cdb[4] = (u8) (disk_block >> 40);
+ cdb[5] = (u8) (disk_block >> 32);
+ cdb[6] = (u8) (disk_block >> 24);
+ cdb[7] = (u8) (disk_block >> 16);
+ cdb[8] = (u8) (disk_block >> 8);
+ cdb[9] = (u8) (disk_block);
+ cdb[10] = (u8) (disk_block_cnt >> 24);
+ cdb[11] = (u8) (disk_block_cnt >> 16);
+ cdb[12] = (u8) (disk_block_cnt >> 8);
+ cdb[13] = (u8) (disk_block_cnt);
+ cdb[14] = 0;
+ cdb[15] = 0;
+ cdb_len = 16;
+ } else {
+ cdb[0] = is_write ? WRITE_10 : READ_10;
+ cdb[1] = 0;
+ cdb[2] = (u8) (disk_block >> 24);
+ cdb[3] = (u8) (disk_block >> 16);
+ cdb[4] = (u8) (disk_block >> 8);
+ cdb[5] = (u8) (disk_block);
+ cdb[6] = 0;
+ cdb[7] = (u8) (disk_block_cnt >> 8);
+ cdb[8] = (u8) (disk_block_cnt);
+ cdb[9] = 0;
+ cdb_len = 10;
+ }
+ return hpsa_scsi_ioaccel_queue_command(h, c, disk_handle, cdb, cdb_len,
+ dev->scsi3addr,
+ dev->phys_disk[map_index]);
+}
+
+/* Submit commands down the "normal" RAID stack path */
+static int hpsa_ciss_submit(struct ctlr_info *h,
+ struct CommandList *c, struct scsi_cmnd *cmd,
+ unsigned char scsi3addr[])
+{
+ cmd->host_scribble = (unsigned char *) c;
+ c->cmd_type = CMD_SCSI;
+ c->scsi_cmd = cmd;
+ c->Header.ReplyQueue = 0; /* unused in simple mode */
+ memcpy(&c->Header.LUN.LunAddrBytes[0], &scsi3addr[0], 8);
+ c->Header.tag = cpu_to_le64((c->cmdindex << DIRECT_LOOKUP_SHIFT));
+
+ /* Fill in the request block... */
+
+ c->Request.Timeout = 0;
+ memset(c->Request.CDB, 0, sizeof(c->Request.CDB));
+ BUG_ON(cmd->cmd_len > sizeof(c->Request.CDB));
+ c->Request.CDBLen = cmd->cmd_len;
+ memcpy(c->Request.CDB, cmd->cmnd, cmd->cmd_len);
+ switch (cmd->sc_data_direction) {
+ case DMA_TO_DEVICE:
+ c->Request.type_attr_dir =
+ TYPE_ATTR_DIR(TYPE_CMD, ATTR_SIMPLE, XFER_WRITE);
+ break;
+ case DMA_FROM_DEVICE:
+ c->Request.type_attr_dir =
+ TYPE_ATTR_DIR(TYPE_CMD, ATTR_SIMPLE, XFER_READ);
+ break;
+ case DMA_NONE:
+ c->Request.type_attr_dir =
+ TYPE_ATTR_DIR(TYPE_CMD, ATTR_SIMPLE, XFER_NONE);
+ break;
+ case DMA_BIDIRECTIONAL:
+ /* This can happen if a buggy application does a scsi passthru
+ * and sets both inlen and outlen to non-zero. ( see
+ * ../scsi/scsi_ioctl.c:scsi_ioctl_send_command() )
+ */
+
+ c->Request.type_attr_dir =
+ TYPE_ATTR_DIR(TYPE_CMD, ATTR_SIMPLE, XFER_RSVD);
+ /* This is technically wrong, and hpsa controllers should
+ * reject it with CMD_INVALID, which is the most correct
+ * response, but non-fibre backends appear to let it
+ * slide by, and give the same results as if this field
+ * were set correctly. Either way is acceptable for
+ * our purposes here.
+ */
+
+ break;
+
+ default:
+ dev_err(&h->pdev->dev, "unknown data direction: %d\n",
+ cmd->sc_data_direction);
+ BUG();
+ break;
+ }
+
+ if (hpsa_scatter_gather(h, c, cmd) < 0) { /* Fill SG list */
+ cmd_free(h, c);
+ return SCSI_MLQUEUE_HOST_BUSY;
+ }
+ enqueue_cmd_and_start_io(h, c);
+ /* the cmd'll come back via intr handler in complete_scsi_command() */
+ return 0;
+}
+
+static void hpsa_command_resubmit_worker(struct work_struct *work)
+{
+ struct scsi_cmnd *cmd;
+ struct hpsa_scsi_dev_t *dev;
+ struct CommandList *c =
+ container_of(work, struct CommandList, work);
+
+ cmd = c->scsi_cmd;
+ dev = cmd->device->hostdata;
+ if (!dev) {
+ cmd->result = DID_NO_CONNECT << 16;
+ cmd->scsi_done(cmd);
+ return;
+ }
+ if (hpsa_ciss_submit(c->h, c, cmd, dev->scsi3addr)) {
+ /*
+ * If we get here, it means dma mapping failed. Try
+ * again via scsi mid layer, which will then get
+ * SCSI_MLQUEUE_HOST_BUSY.
+ */
+ cmd->result = DID_IMM_RETRY << 16;
+ cmd->scsi_done(cmd);
+ }
+}
+
+/* Running in struct Scsi_Host->host_lock less mode */
+static int hpsa_scsi_queue_command(struct Scsi_Host *sh, struct scsi_cmnd *cmd)
+{
+ struct ctlr_info *h;
+ struct hpsa_scsi_dev_t *dev;
+ unsigned char scsi3addr[8];
+ struct CommandList *c;
+ int rc = 0;
+
+ /* Get the ptr to our adapter structure out of cmd->host. */
+ h = sdev_to_hba(cmd->device);
+ dev = cmd->device->hostdata;
+ if (!dev) {
+ cmd->result = DID_NO_CONNECT << 16;
+ cmd->scsi_done(cmd);
+ return 0;
+ }
+ memcpy(scsi3addr, dev->scsi3addr, sizeof(scsi3addr));
+
+ if (unlikely(lockup_detected(h))) {
+ cmd->result = DID_ERROR << 16;
+ cmd->scsi_done(cmd);
+ return 0;
+ }
+ c = cmd_alloc(h);
+ if (c == NULL) { /* trouble... */
+ dev_err(&h->pdev->dev, "cmd_alloc returned NULL!\n");
+ return SCSI_MLQUEUE_HOST_BUSY;
+ }
+ if (unlikely(lockup_detected(h))) {
+ cmd->result = DID_ERROR << 16;
+ cmd_free(h, c);
+ cmd->scsi_done(cmd);
+ return 0;
+ }
+
+ /*
+ * Call alternate submit routine for I/O accelerated commands.
+ * Retries always go down the normal I/O path.
+ */
+ if (likely(cmd->retries == 0 &&
+ cmd->request->cmd_type == REQ_TYPE_FS &&
+ h->acciopath_status)) {
+
+ cmd->host_scribble = (unsigned char *) c;
+ c->cmd_type = CMD_SCSI;
+ c->scsi_cmd = cmd;
+
+ if (dev->offload_enabled) {
+ rc = hpsa_scsi_ioaccel_raid_map(h, c);
+ if (rc == 0)
+ return 0; /* Sent on ioaccel path */
+ if (rc < 0) { /* scsi_dma_map failed. */
+ cmd_free(h, c);
+ return SCSI_MLQUEUE_HOST_BUSY;
+ }
+ } else if (dev->ioaccel_handle) {
+ rc = hpsa_scsi_ioaccel_direct_map(h, c);
+ if (rc == 0)
+ return 0; /* Sent on direct map path */
+ if (rc < 0) { /* scsi_dma_map failed. */
+ cmd_free(h, c);
+ return SCSI_MLQUEUE_HOST_BUSY;
+ }
+ }
+ }
+ return hpsa_ciss_submit(h, c, cmd, scsi3addr);
+}
+
+static void hpsa_scan_complete(struct ctlr_info *h)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&h->scan_lock, flags);
+ h->scan_finished = 1;
+ wake_up_all(&h->scan_wait_queue);
+ spin_unlock_irqrestore(&h->scan_lock, flags);
+}
+
+static void hpsa_scan_start(struct Scsi_Host *sh)
+{
+ struct ctlr_info *h = shost_to_hba(sh);
+ unsigned long flags;
+
+ /*
+ * Don't let rescans be initiated on a controller known to be locked
+ * up. If the controller locks up *during* a rescan, that thread is
+ * probably hosed, but at least we can prevent new rescan threads from
+ * piling up on a locked up controller.
+ */
+ if (unlikely(lockup_detected(h)))
+ return hpsa_scan_complete(h);
+
+ /* wait until any scan already in progress is finished. */
+ while (1) {
+ spin_lock_irqsave(&h->scan_lock, flags);
+ if (h->scan_finished)
+ break;
+ spin_unlock_irqrestore(&h->scan_lock, flags);
+ wait_event(h->scan_wait_queue, h->scan_finished);
+ /* Note: We don't need to worry about a race between this
+ * thread and driver unload because the midlayer will
+ * have incremented the reference count, so unload won't
+ * happen if we're in here.
+ */
+ }
+ h->scan_finished = 0; /* mark scan as in progress */
+ spin_unlock_irqrestore(&h->scan_lock, flags);
+
+ if (unlikely(lockup_detected(h)))
+ return hpsa_scan_complete(h);
+
+ hpsa_update_scsi_devices(h, h->scsi_host->host_no);
+
+ hpsa_scan_complete(h);
+}
+
+static int hpsa_change_queue_depth(struct scsi_device *sdev, int qdepth)
+{
+ struct hpsa_scsi_dev_t *logical_drive = sdev->hostdata;
+
+ if (!logical_drive)
+ return -ENODEV;
+
+ if (qdepth < 1)
+ qdepth = 1;
+ else if (qdepth > logical_drive->queue_depth)
+ qdepth = logical_drive->queue_depth;
+
+ return scsi_change_queue_depth(sdev, qdepth);
+}
+
+static int hpsa_scan_finished(struct Scsi_Host *sh,
+ unsigned long elapsed_time)
+{
+ struct ctlr_info *h = shost_to_hba(sh);
+ unsigned long flags;
+ int finished;
+
+ spin_lock_irqsave(&h->scan_lock, flags);
+ finished = h->scan_finished;
+ spin_unlock_irqrestore(&h->scan_lock, flags);
+ return finished;
+}
+
+static void hpsa_unregister_scsi(struct ctlr_info *h)
+{
+ /* we are being forcibly unloaded, and may not refuse. */
+ scsi_remove_host(h->scsi_host);
+ scsi_host_put(h->scsi_host);
+ h->scsi_host = NULL;
+}
+
+static int hpsa_register_scsi(struct ctlr_info *h)
+{
+ struct Scsi_Host *sh;
+ int error;
+
+ sh = scsi_host_alloc(&hpsa_driver_template, sizeof(h));
+ if (sh == NULL)
+ goto fail;
+
+ sh->io_port = 0;
+ sh->n_io_port = 0;
+ sh->this_id = -1;
+ sh->max_channel = 3;
+ sh->max_cmd_len = MAX_COMMAND_SIZE;
+ sh->max_lun = HPSA_MAX_LUN;
+ sh->max_id = HPSA_MAX_LUN;
+ sh->can_queue = h->nr_cmds -
+ HPSA_CMDS_RESERVED_FOR_ABORTS -
+ HPSA_CMDS_RESERVED_FOR_DRIVER -
+ HPSA_MAX_CONCURRENT_PASSTHRUS;
+ sh->cmd_per_lun = sh->can_queue;
+ sh->sg_tablesize = h->maxsgentries;
+ h->scsi_host = sh;
+ sh->hostdata[0] = (unsigned long) h;
+ sh->irq = h->intr[h->intr_mode];
+ sh->unique_id = sh->irq;
+ error = scsi_add_host(sh, &h->pdev->dev);
+ if (error)
+ goto fail_host_put;
+ scsi_scan_host(sh);
+ return 0;
+
+ fail_host_put:
+ dev_err(&h->pdev->dev, "%s: scsi_add_host"
+ " failed for controller %d\n", __func__, h->ctlr);
+ scsi_host_put(sh);
+ return error;
+ fail:
+ dev_err(&h->pdev->dev, "%s: scsi_host_alloc"
+ " failed for controller %d\n", __func__, h->ctlr);
+ return -ENOMEM;
+}
+
+static int wait_for_device_to_become_ready(struct ctlr_info *h,
+ unsigned char lunaddr[])
+{
+ int rc;
+ int count = 0;
+ int waittime = 1; /* seconds */
+ struct CommandList *c;
+
+ c = cmd_alloc(h);
+ if (!c) {
+ dev_warn(&h->pdev->dev, "out of memory in "
+ "wait_for_device_to_become_ready.\n");
+ return IO_ERROR;
+ }
+
+ /* Send test unit ready until device ready, or give up. */
+ while (count < HPSA_TUR_RETRY_LIMIT) {
+
+ /* Wait for a bit. do this first, because if we send
+ * the TUR right away, the reset will just abort it.
+ */
+ msleep(1000 * waittime);
+ count++;
+ rc = 0; /* Device ready. */
+
+ /* Increase wait time with each try, up to a point. */
+ if (waittime < HPSA_MAX_WAIT_INTERVAL_SECS)
+ waittime = waittime * 2;
+
+ /* Send the Test Unit Ready, fill_cmd can't fail, no mapping */
+ (void) fill_cmd(c, TEST_UNIT_READY, h,
+ NULL, 0, 0, lunaddr, TYPE_CMD);
+ hpsa_scsi_do_simple_cmd_core(h, c);
+ /* no unmap needed here because no data xfer. */
+
+ if (c->err_info->CommandStatus == CMD_SUCCESS)
+ break;
+
+ if (c->err_info->CommandStatus == CMD_TARGET_STATUS &&
+ c->err_info->ScsiStatus == SAM_STAT_CHECK_CONDITION &&
+ (c->err_info->SenseInfo[2] == NO_SENSE ||
+ c->err_info->SenseInfo[2] == UNIT_ATTENTION))
+ break;
+
+ dev_warn(&h->pdev->dev, "waiting %d secs "
+ "for device to become ready.\n", waittime);
+ rc = 1; /* device not ready. */
+ }
+
+ if (rc)
+ dev_warn(&h->pdev->dev, "giving up on device.\n");
+ else
+ dev_warn(&h->pdev->dev, "device is ready.\n");
+
+ cmd_free(h, c);
+ return rc;
+}
+
+/* Need at least one of these error handlers to keep ../scsi/hosts.c from
+ * complaining. Doing a host- or bus-reset can't do anything good here.
+ */
+static int hpsa_eh_device_reset_handler(struct scsi_cmnd *scsicmd)
+{
+ int rc;
+ struct ctlr_info *h;
+ struct hpsa_scsi_dev_t *dev;
+
+ /* find the controller to which the command to be aborted was sent */
+ h = sdev_to_hba(scsicmd->device);
+ if (h == NULL) /* paranoia */
+ return FAILED;
+
+ if (lockup_detected(h))
+ return FAILED;
+
+ dev = scsicmd->device->hostdata;
+ if (!dev) {
+ dev_err(&h->pdev->dev, "hpsa_eh_device_reset_handler: "
+ "device lookup failed.\n");
+ return FAILED;
+ }
+ dev_warn(&h->pdev->dev, "resetting device %d:%d:%d:%d\n",
+ h->scsi_host->host_no, dev->bus, dev->target, dev->lun);
+ /* send a reset to the SCSI LUN which the command was sent to */
+ rc = hpsa_send_reset(h, dev->scsi3addr, HPSA_RESET_TYPE_LUN);
+ if (rc == 0 && wait_for_device_to_become_ready(h, dev->scsi3addr) == 0)
+ return SUCCESS;
+
+ dev_warn(&h->pdev->dev, "resetting device failed.\n");
+ return FAILED;
+}
+
+static void swizzle_abort_tag(u8 *tag)
+{
+ u8 original_tag[8];
+
+ memcpy(original_tag, tag, 8);
+ tag[0] = original_tag[3];
+ tag[1] = original_tag[2];
+ tag[2] = original_tag[1];
+ tag[3] = original_tag[0];
+ tag[4] = original_tag[7];
+ tag[5] = original_tag[6];
+ tag[6] = original_tag[5];
+ tag[7] = original_tag[4];
+}
+
+static void hpsa_get_tag(struct ctlr_info *h,
+ struct CommandList *c, __le32 *taglower, __le32 *tagupper)
+{
+ u64 tag;
+ if (c->cmd_type == CMD_IOACCEL1) {
+ struct io_accel1_cmd *cm1 = (struct io_accel1_cmd *)
+ &h->ioaccel_cmd_pool[c->cmdindex];
+ tag = le64_to_cpu(cm1->tag);
+ *tagupper = cpu_to_le32(tag >> 32);
+ *taglower = cpu_to_le32(tag);
+ return;
+ }
+ if (c->cmd_type == CMD_IOACCEL2) {
+ struct io_accel2_cmd *cm2 = (struct io_accel2_cmd *)
+ &h->ioaccel2_cmd_pool[c->cmdindex];
+ /* upper tag not used in ioaccel2 mode */
+ memset(tagupper, 0, sizeof(*tagupper));
+ *taglower = cm2->Tag;
+ return;
+ }
+ tag = le64_to_cpu(c->Header.tag);
+ *tagupper = cpu_to_le32(tag >> 32);
+ *taglower = cpu_to_le32(tag);
+}
+
+static int hpsa_send_abort(struct ctlr_info *h, unsigned char *scsi3addr,
+ struct CommandList *abort, int swizzle)
+{
+ int rc = IO_OK;
+ struct CommandList *c;
+ struct ErrorInfo *ei;
+ __le32 tagupper, taglower;
+
+ c = cmd_alloc(h);
+ if (c == NULL) { /* trouble... */
+ dev_warn(&h->pdev->dev, "cmd_alloc returned NULL!\n");
+ return -ENOMEM;
+ }
+
+ /* fill_cmd can't fail here, no buffer to map */
+ (void) fill_cmd(c, HPSA_ABORT_MSG, h, abort,
+ 0, 0, scsi3addr, TYPE_MSG);
+ if (swizzle)
+ swizzle_abort_tag(&c->Request.CDB[4]);
+ hpsa_scsi_do_simple_cmd_core(h, c);
+ hpsa_get_tag(h, abort, &taglower, &tagupper);
+ dev_dbg(&h->pdev->dev, "%s: Tag:0x%08x:%08x: do_simple_cmd_core completed.\n",
+ __func__, tagupper, taglower);
+ /* no unmap needed here because no data xfer. */
+
+ ei = c->err_info;
+ switch (ei->CommandStatus) {
+ case CMD_SUCCESS:
+ break;
+ case CMD_UNABORTABLE: /* Very common, don't make noise. */
+ rc = -1;
+ break;
+ default:
+ dev_dbg(&h->pdev->dev, "%s: Tag:0x%08x:%08x: interpreting error.\n",
+ __func__, tagupper, taglower);
+ hpsa_scsi_interpret_error(h, c);
+ rc = -1;
+ break;
+ }
+ cmd_free(h, c);
+ dev_dbg(&h->pdev->dev, "%s: Tag:0x%08x:%08x: Finished.\n",
+ __func__, tagupper, taglower);
+ return rc;
+}
+
+/* ioaccel2 path firmware cannot handle abort task requests.
+ * Change abort requests to physical target reset, and send to the
+ * address of the physical disk used for the ioaccel 2 command.
+ * Return 0 on success (IO_OK)
+ * -1 on failure
+ */
+
+static int hpsa_send_reset_as_abort_ioaccel2(struct ctlr_info *h,
+ unsigned char *scsi3addr, struct CommandList *abort)
+{
+ int rc = IO_OK;
+ struct scsi_cmnd *scmd; /* scsi command within request being aborted */
+ struct hpsa_scsi_dev_t *dev; /* device to which scsi cmd was sent */
+ unsigned char phys_scsi3addr[8]; /* addr of phys disk with volume */
+ unsigned char *psa = &phys_scsi3addr[0];
+
+ /* Get a pointer to the hpsa logical device. */
+ scmd = abort->scsi_cmd;
+ dev = (struct hpsa_scsi_dev_t *)(scmd->device->hostdata);
+ if (dev == NULL) {
+ dev_warn(&h->pdev->dev,
+ "Cannot abort: no device pointer for command.\n");
+ return -1; /* not abortable */
+ }
+
+ if (h->raid_offload_debug > 0)
+ dev_info(&h->pdev->dev,
+ "Reset as abort: Abort requested on C%d:B%d:T%d:L%d scsi3addr 0x%02x%02x%02x%02x%02x%02x%02x%02x\n",
+ h->scsi_host->host_no, dev->bus, dev->target, dev->lun,
+ scsi3addr[0], scsi3addr[1], scsi3addr[2], scsi3addr[3],
+ scsi3addr[4], scsi3addr[5], scsi3addr[6], scsi3addr[7]);
+
+ if (!dev->offload_enabled) {
+ dev_warn(&h->pdev->dev,
+ "Can't abort: device is not operating in HP SSD Smart Path mode.\n");
+ return -1; /* not abortable */
+ }
+
+ /* Incoming scsi3addr is logical addr. We need physical disk addr. */
+ if (!hpsa_get_pdisk_of_ioaccel2(h, abort, psa)) {
+ dev_warn(&h->pdev->dev, "Can't abort: Failed lookup of physical address.\n");
+ return -1; /* not abortable */
+ }
+
+ /* send the reset */
+ if (h->raid_offload_debug > 0)
+ dev_info(&h->pdev->dev,
+ "Reset as abort: Resetting physical device at scsi3addr 0x%02x%02x%02x%02x%02x%02x%02x%02x\n",
+ psa[0], psa[1], psa[2], psa[3],
+ psa[4], psa[5], psa[6], psa[7]);
+ rc = hpsa_send_reset(h, psa, HPSA_RESET_TYPE_TARGET);
+ if (rc != 0) {
+ dev_warn(&h->pdev->dev,
+ "Reset as abort: Failed on physical device at scsi3addr 0x%02x%02x%02x%02x%02x%02x%02x%02x\n",
+ psa[0], psa[1], psa[2], psa[3],
+ psa[4], psa[5], psa[6], psa[7]);
+ return rc; /* failed to reset */
+ }
+
+ /* wait for device to recover */
+ if (wait_for_device_to_become_ready(h, psa) != 0) {
+ dev_warn(&h->pdev->dev,
+ "Reset as abort: Failed: Device never recovered from reset: 0x%02x%02x%02x%02x%02x%02x%02x%02x\n",
+ psa[0], psa[1], psa[2], psa[3],
+ psa[4], psa[5], psa[6], psa[7]);
+ return -1; /* failed to recover */
+ }
+
+ /* device recovered */
+ dev_info(&h->pdev->dev,
+ "Reset as abort: Device recovered from reset: scsi3addr 0x%02x%02x%02x%02x%02x%02x%02x%02x\n",
+ psa[0], psa[1], psa[2], psa[3],
+ psa[4], psa[5], psa[6], psa[7]);
+
+ return rc; /* success */
+}
+
+/* Some Smart Arrays need the abort tag swizzled, and some don't. It's hard to
+ * tell which kind we're dealing with, so we send the abort both ways. There
+ * shouldn't be any collisions between swizzled and unswizzled tags due to the
+ * way we construct our tags but we check anyway in case the assumptions which
+ * make this true someday become false.
+ */
+static int hpsa_send_abort_both_ways(struct ctlr_info *h,
+ unsigned char *scsi3addr, struct CommandList *abort)
+{
+ /* ioccelerator mode 2 commands should be aborted via the
+ * accelerated path, since RAID path is unaware of these commands,
+ * but underlying firmware can't handle abort TMF.
+ * Change abort to physical device reset.
+ */
+ if (abort->cmd_type == CMD_IOACCEL2)
+ return hpsa_send_reset_as_abort_ioaccel2(h, scsi3addr, abort);
+
+ return hpsa_send_abort(h, scsi3addr, abort, 0) &&
+ hpsa_send_abort(h, scsi3addr, abort, 1);
+}
+
+/* Send an abort for the specified command.
+ * If the device and controller support it,
+ * send a task abort request.
+ */
+static int hpsa_eh_abort_handler(struct scsi_cmnd *sc)
+{
+
+ int i, rc;
+ struct ctlr_info *h;
+ struct hpsa_scsi_dev_t *dev;
+ struct CommandList *abort; /* pointer to command to be aborted */
+ struct scsi_cmnd *as; /* ptr to scsi cmd inside aborted command. */
+ char msg[256]; /* For debug messaging. */
+ int ml = 0;
+ __le32 tagupper, taglower;
+ int refcount;
+
+ /* Find the controller of the command to be aborted */
+ h = sdev_to_hba(sc->device);
+ if (WARN(h == NULL,
+ "ABORT REQUEST FAILED, Controller lookup failed.\n"))
+ return FAILED;
+
+ if (lockup_detected(h))
+ return FAILED;
+
+ /* Check that controller supports some kind of task abort */
+ if (!(HPSATMF_PHYS_TASK_ABORT & h->TMFSupportFlags) &&
+ !(HPSATMF_LOG_TASK_ABORT & h->TMFSupportFlags))
+ return FAILED;
+
+ memset(msg, 0, sizeof(msg));
+ ml += sprintf(msg+ml, "ABORT REQUEST on C%d:B%d:T%d:L%llu ",
+ h->scsi_host->host_no, sc->device->channel,
+ sc->device->id, sc->device->lun);
+
+ /* Find the device of the command to be aborted */
+ dev = sc->device->hostdata;
+ if (!dev) {
+ dev_err(&h->pdev->dev, "%s FAILED, Device lookup failed.\n",
+ msg);
+ return FAILED;
+ }
+
+ /* Get SCSI command to be aborted */
+ abort = (struct CommandList *) sc->host_scribble;
+ if (abort == NULL) {
+ /* This can happen if the command already completed. */
+ return SUCCESS;
+ }
+ refcount = atomic_inc_return(&abort->refcount);
+ if (refcount == 1) { /* Command is done already. */
+ cmd_free(h, abort);
+ return SUCCESS;
+ }
+ hpsa_get_tag(h, abort, &taglower, &tagupper);
+ ml += sprintf(msg+ml, "Tag:0x%08x:%08x ", tagupper, taglower);
+ as = abort->scsi_cmd;
+ if (as != NULL)
+ ml += sprintf(msg+ml, "Command:0x%x SN:0x%lx ",
+ as->cmnd[0], as->serial_number);
+ dev_dbg(&h->pdev->dev, "%s\n", msg);
+ dev_warn(&h->pdev->dev, "Abort request on C%d:B%d:T%d:L%d\n",
+ h->scsi_host->host_no, dev->bus, dev->target, dev->lun);
+ /*
+ * Command is in flight, or possibly already completed
+ * by the firmware (but not to the scsi mid layer) but we can't
+ * distinguish which. Send the abort down.
+ */
+ rc = hpsa_send_abort_both_ways(h, dev->scsi3addr, abort);
+ if (rc != 0) {
+ dev_dbg(&h->pdev->dev, "%s Request FAILED.\n", msg);
+ dev_warn(&h->pdev->dev, "FAILED abort on device C%d:B%d:T%d:L%d\n",
+ h->scsi_host->host_no,
+ dev->bus, dev->target, dev->lun);
+ cmd_free(h, abort);
+ return FAILED;
+ }
+ dev_info(&h->pdev->dev, "%s REQUEST SUCCEEDED.\n", msg);
+
+ /* If the abort(s) above completed and actually aborted the
+ * command, then the command to be aborted should already be
+ * completed. If not, wait around a bit more to see if they
+ * manage to complete normally.
+ */
+#define ABORT_COMPLETE_WAIT_SECS 30
+ for (i = 0; i < ABORT_COMPLETE_WAIT_SECS * 10; i++) {
+ refcount = atomic_read(&abort->refcount);
+ if (refcount < 2) {
+ cmd_free(h, abort);
+ return SUCCESS;
+ } else {
+ msleep(100);
+ }
+ }
+ dev_warn(&h->pdev->dev, "%s FAILED. Aborted command has not completed after %d seconds.\n",
+ msg, ABORT_COMPLETE_WAIT_SECS);
+ cmd_free(h, abort);
+ return FAILED;
+}
+
+/*
+ * For operations that cannot sleep, a command block is allocated at init,
+ * and managed by cmd_alloc() and cmd_free() using a simple bitmap to track
+ * which ones are free or in use. Lock must be held when calling this.
+ * cmd_free() is the complement.
+ */
+
+static struct CommandList *cmd_alloc(struct ctlr_info *h)
+{
+ struct CommandList *c;
+ int i;
+ union u64bit temp64;
+ dma_addr_t cmd_dma_handle, err_dma_handle;
+ int refcount;
+ unsigned long offset;
+
+ /*
+ * There is some *extremely* small but non-zero chance that that
+ * multiple threads could get in here, and one thread could
+ * be scanning through the list of bits looking for a free
+ * one, but the free ones are always behind him, and other
+ * threads sneak in behind him and eat them before he can
+ * get to them, so that while there is always a free one, a
+ * very unlucky thread might be starved anyway, never able to
+ * beat the other threads. In reality, this happens so
+ * infrequently as to be indistinguishable from never.
+ */
+
+ offset = h->last_allocation; /* benignly racy */
+ for (;;) {
+ i = find_next_zero_bit(h->cmd_pool_bits, h->nr_cmds, offset);
+ if (unlikely(i == h->nr_cmds)) {
+ offset = 0;
+ continue;
+ }
+ c = h->cmd_pool + i;
+ refcount = atomic_inc_return(&c->refcount);
+ if (unlikely(refcount > 1)) {
+ cmd_free(h, c); /* already in use */
+ offset = (i + 1) % h->nr_cmds;
+ continue;
+ }
+ set_bit(i & (BITS_PER_LONG - 1),
+ h->cmd_pool_bits + (i / BITS_PER_LONG));
+ break; /* it's ours now. */
+ }
+ h->last_allocation = i; /* benignly racy */
+
+ /* Zero out all of commandlist except the last field, refcount */
+ memset(c, 0, offsetof(struct CommandList, refcount));
+ c->Header.tag = cpu_to_le64((u64) (i << DIRECT_LOOKUP_SHIFT));
+ cmd_dma_handle = h->cmd_pool_dhandle + i * sizeof(*c);
+ c->err_info = h->errinfo_pool + i;
+ memset(c->err_info, 0, sizeof(*c->err_info));
+ err_dma_handle = h->errinfo_pool_dhandle
+ + i * sizeof(*c->err_info);
+
+ c->cmdindex = i;
+
+ c->busaddr = (u32) cmd_dma_handle;
+ temp64.val = (u64) err_dma_handle;
+ c->ErrDesc.Addr = cpu_to_le64((u64) err_dma_handle);
+ c->ErrDesc.Len = cpu_to_le32((u32) sizeof(*c->err_info));
+
+ c->h = h;
+ return c;
+}
+
+static void cmd_free(struct ctlr_info *h, struct CommandList *c)
+{
+ if (atomic_dec_and_test(&c->refcount)) {
+ int i;
+
+ i = c - h->cmd_pool;
+ clear_bit(i & (BITS_PER_LONG - 1),
+ h->cmd_pool_bits + (i / BITS_PER_LONG));
+ }
+}
+
+#ifdef CONFIG_COMPAT
+
+static int hpsa_ioctl32_passthru(struct scsi_device *dev, int cmd,
+ void __user *arg)
+{
+ IOCTL32_Command_struct __user *arg32 =
+ (IOCTL32_Command_struct __user *) arg;
+ IOCTL_Command_struct arg64;
+ IOCTL_Command_struct __user *p = compat_alloc_user_space(sizeof(arg64));
+ int err;
+ u32 cp;
+
+ memset(&arg64, 0, sizeof(arg64));
+ err = 0;
+ err |= copy_from_user(&arg64.LUN_info, &arg32->LUN_info,
+ sizeof(arg64.LUN_info));
+ err |= copy_from_user(&arg64.Request, &arg32->Request,
+ sizeof(arg64.Request));
+ err |= copy_from_user(&arg64.error_info, &arg32->error_info,
+ sizeof(arg64.error_info));
+ err |= get_user(arg64.buf_size, &arg32->buf_size);
+ err |= get_user(cp, &arg32->buf);
+ arg64.buf = compat_ptr(cp);
+ err |= copy_to_user(p, &arg64, sizeof(arg64));
+
+ if (err)
+ return -EFAULT;
+
+ err = hpsa_ioctl(dev, CCISS_PASSTHRU, p);
+ if (err)
+ return err;
+ err |= copy_in_user(&arg32->error_info, &p->error_info,
+ sizeof(arg32->error_info));
+ if (err)
+ return -EFAULT;
+ return err;
+}
+
+static int hpsa_ioctl32_big_passthru(struct scsi_device *dev,
+ int cmd, void __user *arg)
+{
+ BIG_IOCTL32_Command_struct __user *arg32 =
+ (BIG_IOCTL32_Command_struct __user *) arg;
+ BIG_IOCTL_Command_struct arg64;
+ BIG_IOCTL_Command_struct __user *p =
+ compat_alloc_user_space(sizeof(arg64));
+ int err;
+ u32 cp;
+
+ memset(&arg64, 0, sizeof(arg64));
+ err = 0;
+ err |= copy_from_user(&arg64.LUN_info, &arg32->LUN_info,
+ sizeof(arg64.LUN_info));
+ err |= copy_from_user(&arg64.Request, &arg32->Request,
+ sizeof(arg64.Request));
+ err |= copy_from_user(&arg64.error_info, &arg32->error_info,
+ sizeof(arg64.error_info));
+ err |= get_user(arg64.buf_size, &arg32->buf_size);
+ err |= get_user(arg64.malloc_size, &arg32->malloc_size);
+ err |= get_user(cp, &arg32->buf);
+ arg64.buf = compat_ptr(cp);
+ err |= copy_to_user(p, &arg64, sizeof(arg64));
+
+ if (err)
+ return -EFAULT;
+
+ err = hpsa_ioctl(dev, CCISS_BIG_PASSTHRU, p);
+ if (err)
+ return err;
+ err |= copy_in_user(&arg32->error_info, &p->error_info,
+ sizeof(arg32->error_info));
+ if (err)
+ return -EFAULT;
+ return err;
+}
+
+static int hpsa_compat_ioctl(struct scsi_device *dev, int cmd, void __user *arg)
+{
+ switch (cmd) {
+ case CCISS_GETPCIINFO:
+ case CCISS_GETINTINFO:
+ case CCISS_SETINTINFO:
+ case CCISS_GETNODENAME:
+ case CCISS_SETNODENAME:
+ case CCISS_GETHEARTBEAT:
+ case CCISS_GETBUSTYPES:
+ case CCISS_GETFIRMVER:
+ case CCISS_GETDRIVVER:
+ case CCISS_REVALIDVOLS:
+ case CCISS_DEREGDISK:
+ case CCISS_REGNEWDISK:
+ case CCISS_REGNEWD:
+ case CCISS_RESCANDISK:
+ case CCISS_GETLUNINFO:
+ return hpsa_ioctl(dev, cmd, arg);
+
+ case CCISS_PASSTHRU32:
+ return hpsa_ioctl32_passthru(dev, cmd, arg);
+ case CCISS_BIG_PASSTHRU32:
+ return hpsa_ioctl32_big_passthru(dev, cmd, arg);
+
+ default:
+ return -ENOIOCTLCMD;
+ }
+}
+#endif
+
+static int hpsa_getpciinfo_ioctl(struct ctlr_info *h, void __user *argp)
+{
+ struct hpsa_pci_info pciinfo;
+
+ if (!argp)
+ return -EINVAL;
+ pciinfo.domain = pci_domain_nr(h->pdev->bus);
+ pciinfo.bus = h->pdev->bus->number;
+ pciinfo.dev_fn = h->pdev->devfn;
+ pciinfo.board_id = h->board_id;
+ if (copy_to_user(argp, &pciinfo, sizeof(pciinfo)))
+ return -EFAULT;
+ return 0;
+}
+
+static int hpsa_getdrivver_ioctl(struct ctlr_info *h, void __user *argp)
+{
+ DriverVer_type DriverVer;
+ unsigned char vmaj, vmin, vsubmin;
+ int rc;
+
+ rc = sscanf(HPSA_DRIVER_VERSION, "%hhu.%hhu.%hhu",
+ &vmaj, &vmin, &vsubmin);
+ if (rc != 3) {
+ dev_info(&h->pdev->dev, "driver version string '%s' "
+ "unrecognized.", HPSA_DRIVER_VERSION);
+ vmaj = 0;
+ vmin = 0;
+ vsubmin = 0;
+ }
+ DriverVer = (vmaj << 16) | (vmin << 8) | vsubmin;
+ if (!argp)
+ return -EINVAL;
+ if (copy_to_user(argp, &DriverVer, sizeof(DriverVer_type)))
+ return -EFAULT;
+ return 0;
+}
+
+static int hpsa_passthru_ioctl(struct ctlr_info *h, void __user *argp)
+{
+ IOCTL_Command_struct iocommand;
+ struct CommandList *c;
+ char *buff = NULL;
+ u64 temp64;
+ int rc = 0;
+
+ if (!argp)
+ return -EINVAL;
+ if (!capable(CAP_SYS_RAWIO))
+ return -EPERM;
+ if (copy_from_user(&iocommand, argp, sizeof(iocommand)))
+ return -EFAULT;
+ if ((iocommand.buf_size < 1) &&
+ (iocommand.Request.Type.Direction != XFER_NONE)) {
+ return -EINVAL;
+ }
+ if (iocommand.buf_size > 0) {
+ buff = kmalloc(iocommand.buf_size, GFP_KERNEL);
+ if (buff == NULL)
+ return -EFAULT;
+ if (iocommand.Request.Type.Direction & XFER_WRITE) {
+ /* Copy the data into the buffer we created */
+ if (copy_from_user(buff, iocommand.buf,
+ iocommand.buf_size)) {
+ rc = -EFAULT;
+ goto out_kfree;
+ }
+ } else {
+ memset(buff, 0, iocommand.buf_size);
+ }
+ }
+ c = cmd_alloc(h);
+ if (c == NULL) {
+ rc = -ENOMEM;
+ goto out_kfree;
+ }
+ /* Fill in the command type */
+ c->cmd_type = CMD_IOCTL_PEND;
+ /* Fill in Command Header */
+ c->Header.ReplyQueue = 0; /* unused in simple mode */
+ if (iocommand.buf_size > 0) { /* buffer to fill */
+ c->Header.SGList = 1;
+ c->Header.SGTotal = cpu_to_le16(1);
+ } else { /* no buffers to fill */
+ c->Header.SGList = 0;
+ c->Header.SGTotal = cpu_to_le16(0);
+ }
+ memcpy(&c->Header.LUN, &iocommand.LUN_info, sizeof(c->Header.LUN));
+
+ /* Fill in Request block */
+ memcpy(&c->Request, &iocommand.Request,
+ sizeof(c->Request));
+
+ /* Fill in the scatter gather information */
+ if (iocommand.buf_size > 0) {
+ temp64 = pci_map_single(h->pdev, buff,
+ iocommand.buf_size, PCI_DMA_BIDIRECTIONAL);
+ if (dma_mapping_error(&h->pdev->dev, (dma_addr_t) temp64)) {
+ c->SG[0].Addr = cpu_to_le64(0);
+ c->SG[0].Len = cpu_to_le32(0);
+ rc = -ENOMEM;
+ goto out;
+ }
+ c->SG[0].Addr = cpu_to_le64(temp64);
+ c->SG[0].Len = cpu_to_le32(iocommand.buf_size);
+ c->SG[0].Ext = cpu_to_le32(HPSA_SG_LAST); /* not chaining */
+ }
+ hpsa_scsi_do_simple_cmd_core_if_no_lockup(h, c);
+ if (iocommand.buf_size > 0)
+ hpsa_pci_unmap(h->pdev, c, 1, PCI_DMA_BIDIRECTIONAL);
+ check_ioctl_unit_attention(h, c);
+
+ /* Copy the error information out */
+ memcpy(&iocommand.error_info, c->err_info,
+ sizeof(iocommand.error_info));
+ if (copy_to_user(argp, &iocommand, sizeof(iocommand))) {
+ rc = -EFAULT;
+ goto out;
+ }
+ if ((iocommand.Request.Type.Direction & XFER_READ) &&
+ iocommand.buf_size > 0) {
+ /* Copy the data out of the buffer we created */
+ if (copy_to_user(iocommand.buf, buff, iocommand.buf_size)) {
+ rc = -EFAULT;
+ goto out;
+ }
+ }
+out:
+ cmd_free(h, c);
+out_kfree:
+ kfree(buff);
+ return rc;
+}
+
+static int hpsa_big_passthru_ioctl(struct ctlr_info *h, void __user *argp)
+{
+ BIG_IOCTL_Command_struct *ioc;
+ struct CommandList *c;
+ unsigned char **buff = NULL;
+ int *buff_size = NULL;
+ u64 temp64;
+ BYTE sg_used = 0;
+ int status = 0;
+ u32 left;
+ u32 sz;
+ BYTE __user *data_ptr;
+
+ if (!argp)
+ return -EINVAL;
+ if (!capable(CAP_SYS_RAWIO))
+ return -EPERM;
+ ioc = (BIG_IOCTL_Command_struct *)
+ kmalloc(sizeof(*ioc), GFP_KERNEL);
+ if (!ioc) {
+ status = -ENOMEM;
+ goto cleanup1;
+ }
+ if (copy_from_user(ioc, argp, sizeof(*ioc))) {
+ status = -EFAULT;
+ goto cleanup1;
+ }
+ if ((ioc->buf_size < 1) &&
+ (ioc->Request.Type.Direction != XFER_NONE)) {
+ status = -EINVAL;
+ goto cleanup1;
+ }
+ /* Check kmalloc limits using all SGs */
+ if (ioc->malloc_size > MAX_KMALLOC_SIZE) {
+ status = -EINVAL;
+ goto cleanup1;
+ }
+ if (ioc->buf_size > ioc->malloc_size * SG_ENTRIES_IN_CMD) {
+ status = -EINVAL;
+ goto cleanup1;
+ }
+ buff = kzalloc(SG_ENTRIES_IN_CMD * sizeof(char *), GFP_KERNEL);
+ if (!buff) {
+ status = -ENOMEM;
+ goto cleanup1;
+ }
+ buff_size = kmalloc(SG_ENTRIES_IN_CMD * sizeof(int), GFP_KERNEL);
+ if (!buff_size) {
+ status = -ENOMEM;
+ goto cleanup1;
+ }
+ left = ioc->buf_size;
+ data_ptr = ioc->buf;
+ while (left) {
+ sz = (left > ioc->malloc_size) ? ioc->malloc_size : left;
+ buff_size[sg_used] = sz;
+ buff[sg_used] = kmalloc(sz, GFP_KERNEL);
+ if (buff[sg_used] == NULL) {
+ status = -ENOMEM;
+ goto cleanup1;
+ }
+ if (ioc->Request.Type.Direction & XFER_WRITE) {
+ if (copy_from_user(buff[sg_used], data_ptr, sz)) {
+ status = -EFAULT;
+ goto cleanup1;
+ }
+ } else
+ memset(buff[sg_used], 0, sz);
+ left -= sz;
+ data_ptr += sz;
+ sg_used++;
+ }
+ c = cmd_alloc(h);
+ if (c == NULL) {
+ status = -ENOMEM;
+ goto cleanup1;
+ }
+ c->cmd_type = CMD_IOCTL_PEND;
+ c->Header.ReplyQueue = 0;
+ c->Header.SGList = (u8) sg_used;
+ c->Header.SGTotal = cpu_to_le16(sg_used);
+ memcpy(&c->Header.LUN, &ioc->LUN_info, sizeof(c->Header.LUN));
+ memcpy(&c->Request, &ioc->Request, sizeof(c->Request));
+ if (ioc->buf_size > 0) {
+ int i;
+ for (i = 0; i < sg_used; i++) {
+ temp64 = pci_map_single(h->pdev, buff[i],
+ buff_size[i], PCI_DMA_BIDIRECTIONAL);
+ if (dma_mapping_error(&h->pdev->dev,
+ (dma_addr_t) temp64)) {
+ c->SG[i].Addr = cpu_to_le64(0);
+ c->SG[i].Len = cpu_to_le32(0);
+ hpsa_pci_unmap(h->pdev, c, i,
+ PCI_DMA_BIDIRECTIONAL);
+ status = -ENOMEM;
+ goto cleanup0;
+ }
+ c->SG[i].Addr = cpu_to_le64(temp64);
+ c->SG[i].Len = cpu_to_le32(buff_size[i]);
+ c->SG[i].Ext = cpu_to_le32(0);
+ }
+ c->SG[--i].Ext = cpu_to_le32(HPSA_SG_LAST);
+ }
+ hpsa_scsi_do_simple_cmd_core_if_no_lockup(h, c);
+ if (sg_used)
+ hpsa_pci_unmap(h->pdev, c, sg_used, PCI_DMA_BIDIRECTIONAL);
+ check_ioctl_unit_attention(h, c);
+ /* Copy the error information out */
+ memcpy(&ioc->error_info, c->err_info, sizeof(ioc->error_info));
+ if (copy_to_user(argp, ioc, sizeof(*ioc))) {
+ status = -EFAULT;
+ goto cleanup0;
+ }
+ if ((ioc->Request.Type.Direction & XFER_READ) && ioc->buf_size > 0) {
+ int i;
+
+ /* Copy the data out of the buffer we created */
+ BYTE __user *ptr = ioc->buf;
+ for (i = 0; i < sg_used; i++) {
+ if (copy_to_user(ptr, buff[i], buff_size[i])) {
+ status = -EFAULT;
+ goto cleanup0;
+ }
+ ptr += buff_size[i];
+ }
+ }
+ status = 0;
+cleanup0:
+ cmd_free(h, c);
+cleanup1:
+ if (buff) {
+ int i;
+
+ for (i = 0; i < sg_used; i++)
+ kfree(buff[i]);
+ kfree(buff);
+ }
+ kfree(buff_size);
+ kfree(ioc);
+ return status;
+}
+
+static void check_ioctl_unit_attention(struct ctlr_info *h,
+ struct CommandList *c)
+{
+ if (c->err_info->CommandStatus == CMD_TARGET_STATUS &&
+ c->err_info->ScsiStatus != SAM_STAT_CHECK_CONDITION)
+ (void) check_for_unit_attention(h, c);
+}
+
+/*
+ * ioctl
+ */
+static int hpsa_ioctl(struct scsi_device *dev, int cmd, void __user *arg)
+{
+ struct ctlr_info *h;
+ void __user *argp = (void __user *)arg;
+ int rc;
+
+ h = sdev_to_hba(dev);
+
+ switch (cmd) {
+ case CCISS_DEREGDISK:
+ case CCISS_REGNEWDISK:
+ case CCISS_REGNEWD:
+ hpsa_scan_start(h->scsi_host);
+ return 0;
+ case CCISS_GETPCIINFO:
+ return hpsa_getpciinfo_ioctl(h, argp);
+ case CCISS_GETDRIVVER:
+ return hpsa_getdrivver_ioctl(h, argp);
+ case CCISS_PASSTHRU:
+ if (atomic_dec_if_positive(&h->passthru_cmds_avail) < 0)
+ return -EAGAIN;
+ rc = hpsa_passthru_ioctl(h, argp);
+ atomic_inc(&h->passthru_cmds_avail);
+ return rc;
+ case CCISS_BIG_PASSTHRU:
+ if (atomic_dec_if_positive(&h->passthru_cmds_avail) < 0)
+ return -EAGAIN;
+ rc = hpsa_big_passthru_ioctl(h, argp);
+ atomic_inc(&h->passthru_cmds_avail);
+ return rc;
+ default:
+ return -ENOTTY;
+ }
+}
+
+static int hpsa_send_host_reset(struct ctlr_info *h, unsigned char *scsi3addr,
+ u8 reset_type)
+{
+ struct CommandList *c;
+
+ c = cmd_alloc(h);
+ if (!c)
+ return -ENOMEM;
+ /* fill_cmd can't fail here, no data buffer to map */
+ (void) fill_cmd(c, HPSA_DEVICE_RESET_MSG, h, NULL, 0, 0,
+ RAID_CTLR_LUNID, TYPE_MSG);
+ c->Request.CDB[1] = reset_type; /* fill_cmd defaults to target reset */
+ c->waiting = NULL;
+ enqueue_cmd_and_start_io(h, c);
+ /* Don't wait for completion, the reset won't complete. Don't free
+ * the command either. This is the last command we will send before
+ * re-initializing everything, so it doesn't matter and won't leak.
+ */
+ return 0;
+}
+
+static int fill_cmd(struct CommandList *c, u8 cmd, struct ctlr_info *h,
+ void *buff, size_t size, u16 page_code, unsigned char *scsi3addr,
+ int cmd_type)
+{
+ int pci_dir = XFER_NONE;
+ struct CommandList *a; /* for commands to be aborted */
+
+ c->cmd_type = CMD_IOCTL_PEND;
+ c->Header.ReplyQueue = 0;
+ if (buff != NULL && size > 0) {
+ c->Header.SGList = 1;
+ c->Header.SGTotal = cpu_to_le16(1);
+ } else {
+ c->Header.SGList = 0;
+ c->Header.SGTotal = cpu_to_le16(0);
+ }
+ memcpy(c->Header.LUN.LunAddrBytes, scsi3addr, 8);
+
+ if (cmd_type == TYPE_CMD) {
+ switch (cmd) {
+ case HPSA_INQUIRY:
+ /* are we trying to read a vital product page */
+ if (page_code & VPD_PAGE) {
+ c->Request.CDB[1] = 0x01;
+ c->Request.CDB[2] = (page_code & 0xff);
+ }
+ c->Request.CDBLen = 6;
+ c->Request.type_attr_dir =
+ TYPE_ATTR_DIR(cmd_type, ATTR_SIMPLE, XFER_READ);
+ c->Request.Timeout = 0;
+ c->Request.CDB[0] = HPSA_INQUIRY;
+ c->Request.CDB[4] = size & 0xFF;
+ break;
+ case HPSA_REPORT_LOG:
+ case HPSA_REPORT_PHYS:
+ /* Talking to controller so It's a physical command
+ mode = 00 target = 0. Nothing to write.
+ */
+ c->Request.CDBLen = 12;
+ c->Request.type_attr_dir =
+ TYPE_ATTR_DIR(cmd_type, ATTR_SIMPLE, XFER_READ);
+ c->Request.Timeout = 0;
+ c->Request.CDB[0] = cmd;
+ c->Request.CDB[6] = (size >> 24) & 0xFF; /* MSB */
+ c->Request.CDB[7] = (size >> 16) & 0xFF;
+ c->Request.CDB[8] = (size >> 8) & 0xFF;
+ c->Request.CDB[9] = size & 0xFF;
+ break;
+ case HPSA_CACHE_FLUSH:
+ c->Request.CDBLen = 12;
+ c->Request.type_attr_dir =
+ TYPE_ATTR_DIR(cmd_type,
+ ATTR_SIMPLE, XFER_WRITE);
+ c->Request.Timeout = 0;
+ c->Request.CDB[0] = BMIC_WRITE;
+ c->Request.CDB[6] = BMIC_CACHE_FLUSH;
+ c->Request.CDB[7] = (size >> 8) & 0xFF;
+ c->Request.CDB[8] = size & 0xFF;
+ break;
+ case TEST_UNIT_READY:
+ c->Request.CDBLen = 6;
+ c->Request.type_attr_dir =
+ TYPE_ATTR_DIR(cmd_type, ATTR_SIMPLE, XFER_NONE);
+ c->Request.Timeout = 0;
+ break;
+ case HPSA_GET_RAID_MAP:
+ c->Request.CDBLen = 12;
+ c->Request.type_attr_dir =
+ TYPE_ATTR_DIR(cmd_type, ATTR_SIMPLE, XFER_READ);
+ c->Request.Timeout = 0;
+ c->Request.CDB[0] = HPSA_CISS_READ;
+ c->Request.CDB[1] = cmd;
+ c->Request.CDB[6] = (size >> 24) & 0xFF; /* MSB */
+ c->Request.CDB[7] = (size >> 16) & 0xFF;
+ c->Request.CDB[8] = (size >> 8) & 0xFF;
+ c->Request.CDB[9] = size & 0xFF;
+ break;
+ case BMIC_SENSE_CONTROLLER_PARAMETERS:
+ c->Request.CDBLen = 10;
+ c->Request.type_attr_dir =
+ TYPE_ATTR_DIR(cmd_type, ATTR_SIMPLE, XFER_READ);
+ c->Request.Timeout = 0;
+ c->Request.CDB[0] = BMIC_READ;
+ c->Request.CDB[6] = BMIC_SENSE_CONTROLLER_PARAMETERS;
+ c->Request.CDB[7] = (size >> 16) & 0xFF;
+ c->Request.CDB[8] = (size >> 8) & 0xFF;
+ break;
+ case BMIC_IDENTIFY_PHYSICAL_DEVICE:
+ c->Request.CDBLen = 10;
+ c->Request.type_attr_dir =
+ TYPE_ATTR_DIR(cmd_type, ATTR_SIMPLE, XFER_READ);
+ c->Request.Timeout = 0;
+ c->Request.CDB[0] = BMIC_READ;
+ c->Request.CDB[6] = BMIC_IDENTIFY_PHYSICAL_DEVICE;
+ c->Request.CDB[7] = (size >> 16) & 0xFF;
+ c->Request.CDB[8] = (size >> 8) & 0XFF;
+ break;
+ default:
+ dev_warn(&h->pdev->dev, "unknown command 0x%c\n", cmd);
+ BUG();
+ return -1;
+ }
+ } else if (cmd_type == TYPE_MSG) {
+ switch (cmd) {
+
+ case HPSA_DEVICE_RESET_MSG:
+ c->Request.CDBLen = 16;
+ c->Request.type_attr_dir =
+ TYPE_ATTR_DIR(cmd_type, ATTR_SIMPLE, XFER_NONE);
+ c->Request.Timeout = 0; /* Don't time out */
+ memset(&c->Request.CDB[0], 0, sizeof(c->Request.CDB));
+ c->Request.CDB[0] = cmd;
+ c->Request.CDB[1] = HPSA_RESET_TYPE_LUN;
+ /* If bytes 4-7 are zero, it means reset the */
+ /* LunID device */
+ c->Request.CDB[4] = 0x00;
+ c->Request.CDB[5] = 0x00;
+ c->Request.CDB[6] = 0x00;
+ c->Request.CDB[7] = 0x00;
+ break;
+ case HPSA_ABORT_MSG:
+ a = buff; /* point to command to be aborted */
+ dev_dbg(&h->pdev->dev,
+ "Abort Tag:0x%016llx request Tag:0x%016llx",
+ a->Header.tag, c->Header.tag);
+ c->Request.CDBLen = 16;
+ c->Request.type_attr_dir =
+ TYPE_ATTR_DIR(cmd_type,
+ ATTR_SIMPLE, XFER_WRITE);
+ c->Request.Timeout = 0; /* Don't time out */
+ c->Request.CDB[0] = HPSA_TASK_MANAGEMENT;
+ c->Request.CDB[1] = HPSA_TMF_ABORT_TASK;
+ c->Request.CDB[2] = 0x00; /* reserved */
+ c->Request.CDB[3] = 0x00; /* reserved */
+ /* Tag to abort goes in CDB[4]-CDB[11] */
+ memcpy(&c->Request.CDB[4], &a->Header.tag,
+ sizeof(a->Header.tag));
+ c->Request.CDB[12] = 0x00; /* reserved */
+ c->Request.CDB[13] = 0x00; /* reserved */
+ c->Request.CDB[14] = 0x00; /* reserved */
+ c->Request.CDB[15] = 0x00; /* reserved */
+ break;
+ default:
+ dev_warn(&h->pdev->dev, "unknown message type %d\n",
+ cmd);
+ BUG();
+ }
+ } else {
+ dev_warn(&h->pdev->dev, "unknown command type %d\n", cmd_type);
+ BUG();
+ }
+
+ switch (GET_DIR(c->Request.type_attr_dir)) {
+ case XFER_READ:
+ pci_dir = PCI_DMA_FROMDEVICE;
+ break;
+ case XFER_WRITE:
+ pci_dir = PCI_DMA_TODEVICE;
+ break;
+ case XFER_NONE:
+ pci_dir = PCI_DMA_NONE;
+ break;
+ default:
+ pci_dir = PCI_DMA_BIDIRECTIONAL;
+ }
+ if (hpsa_map_one(h->pdev, c, buff, size, pci_dir))
+ return -1;
+ return 0;
+}
+
+/*
+ * Map (physical) PCI mem into (virtual) kernel space
+ */
+static void __iomem *remap_pci_mem(ulong base, ulong size)
+{
+ ulong page_base = ((ulong) base) & PAGE_MASK;
+ ulong page_offs = ((ulong) base) - page_base;
+ void __iomem *page_remapped = ioremap_nocache(page_base,
+ page_offs + size);
+
+ return page_remapped ? (page_remapped + page_offs) : NULL;
+}
+
+static inline unsigned long get_next_completion(struct ctlr_info *h, u8 q)
+{
+ return h->access.command_completed(h, q);
+}
+
+static inline bool interrupt_pending(struct ctlr_info *h)
+{
+ return h->access.intr_pending(h);
+}
+
+static inline long interrupt_not_for_us(struct ctlr_info *h)
+{
+ return (h->access.intr_pending(h) == 0) ||
+ (h->interrupts_enabled == 0);
+}
+
+static inline int bad_tag(struct ctlr_info *h, u32 tag_index,
+ u32 raw_tag)
+{
+ if (unlikely(tag_index >= h->nr_cmds)) {
+ dev_warn(&h->pdev->dev, "bad tag 0x%08x ignored.\n", raw_tag);
+ return 1;
+ }
+ return 0;
+}
+
+static inline void finish_cmd(struct CommandList *c)
+{
+ dial_up_lockup_detection_on_fw_flash_complete(c->h, c);
+ if (likely(c->cmd_type == CMD_IOACCEL1 || c->cmd_type == CMD_SCSI
+ || c->cmd_type == CMD_IOACCEL2))
+ complete_scsi_command(c);
+ else if (c->cmd_type == CMD_IOCTL_PEND)
+ complete(c->waiting);
+}
+
+
+static inline u32 hpsa_tag_discard_error_bits(struct ctlr_info *h, u32 tag)
+{
+#define HPSA_PERF_ERROR_BITS ((1 << DIRECT_LOOKUP_SHIFT) - 1)
+#define HPSA_SIMPLE_ERROR_BITS 0x03
+ if (unlikely(!(h->transMethod & CFGTBL_Trans_Performant)))
+ return tag & ~HPSA_SIMPLE_ERROR_BITS;
+ return tag & ~HPSA_PERF_ERROR_BITS;
+}
+
+/* process completion of an indexed ("direct lookup") command */
+static inline void process_indexed_cmd(struct ctlr_info *h,
+ u32 raw_tag)
+{
+ u32 tag_index;
+ struct CommandList *c;
+
+ tag_index = raw_tag >> DIRECT_LOOKUP_SHIFT;
+ if (!bad_tag(h, tag_index, raw_tag)) {
+ c = h->cmd_pool + tag_index;
+ finish_cmd(c);
+ }
+}
+
+/* Some controllers, like p400, will give us one interrupt
+ * after a soft reset, even if we turned interrupts off.
+ * Only need to check for this in the hpsa_xxx_discard_completions
+ * functions.
+ */
+static int ignore_bogus_interrupt(struct ctlr_info *h)
+{
+ if (likely(!reset_devices))
+ return 0;
+
+ if (likely(h->interrupts_enabled))
+ return 0;
+
+ dev_info(&h->pdev->dev, "Received interrupt while interrupts disabled "
+ "(known firmware bug.) Ignoring.\n");
+
+ return 1;
+}
+
+/*
+ * Convert &h->q[x] (passed to interrupt handlers) back to h.
+ * Relies on (h-q[x] == x) being true for x such that
+ * 0 <= x < MAX_REPLY_QUEUES.
+ */
+static struct ctlr_info *queue_to_hba(u8 *queue)
+{
+ return container_of((queue - *queue), struct ctlr_info, q[0]);
+}
+
+static irqreturn_t hpsa_intx_discard_completions(int irq, void *queue)
+{
+ struct ctlr_info *h = queue_to_hba(queue);
+ u8 q = *(u8 *) queue;
+ u32 raw_tag;
+
+ if (ignore_bogus_interrupt(h))
+ return IRQ_NONE;
+
+ if (interrupt_not_for_us(h))
+ return IRQ_NONE;
+ h->last_intr_timestamp = get_jiffies_64();
+ while (interrupt_pending(h)) {
+ raw_tag = get_next_completion(h, q);
+ while (raw_tag != FIFO_EMPTY)
+ raw_tag = next_command(h, q);
+ }
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t hpsa_msix_discard_completions(int irq, void *queue)
+{
+ struct ctlr_info *h = queue_to_hba(queue);
+ u32 raw_tag;
+ u8 q = *(u8 *) queue;
+
+ if (ignore_bogus_interrupt(h))
+ return IRQ_NONE;
+
+ h->last_intr_timestamp = get_jiffies_64();
+ raw_tag = get_next_completion(h, q);
+ while (raw_tag != FIFO_EMPTY)
+ raw_tag = next_command(h, q);
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t do_hpsa_intr_intx(int irq, void *queue)
+{
+ struct ctlr_info *h = queue_to_hba((u8 *) queue);
+ u32 raw_tag;
+ u8 q = *(u8 *) queue;
+
+ if (interrupt_not_for_us(h))
+ return IRQ_NONE;
+ h->last_intr_timestamp = get_jiffies_64();
+ while (interrupt_pending(h)) {
+ raw_tag = get_next_completion(h, q);
+ while (raw_tag != FIFO_EMPTY) {
+ process_indexed_cmd(h, raw_tag);
+ raw_tag = next_command(h, q);
+ }
+ }
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t do_hpsa_intr_msi(int irq, void *queue)
+{
+ struct ctlr_info *h = queue_to_hba(queue);
+ u32 raw_tag;
+ u8 q = *(u8 *) queue;
+
+ h->last_intr_timestamp = get_jiffies_64();
+ raw_tag = get_next_completion(h, q);
+ while (raw_tag != FIFO_EMPTY) {
+ process_indexed_cmd(h, raw_tag);
+ raw_tag = next_command(h, q);
+ }
+ return IRQ_HANDLED;
+}
+
+/* Send a message CDB to the firmware. Careful, this only works
+ * in simple mode, not performant mode due to the tag lookup.
+ * We only ever use this immediately after a controller reset.
+ */
+static int hpsa_message(struct pci_dev *pdev, unsigned char opcode,
+ unsigned char type)
+{
+ struct Command {
+ struct CommandListHeader CommandHeader;
+ struct RequestBlock Request;
+ struct ErrDescriptor ErrorDescriptor;
+ };
+ struct Command *cmd;
+ static const size_t cmd_sz = sizeof(*cmd) +
+ sizeof(cmd->ErrorDescriptor);
+ dma_addr_t paddr64;
+ __le32 paddr32;
+ u32 tag;
+ void __iomem *vaddr;
+ int i, err;
+
+ vaddr = pci_ioremap_bar(pdev, 0);
+ if (vaddr == NULL)
+ return -ENOMEM;
+
+ /* The Inbound Post Queue only accepts 32-bit physical addresses for the
+ * CCISS commands, so they must be allocated from the lower 4GiB of
+ * memory.
+ */
+ err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
+ if (err) {
+ iounmap(vaddr);
+ return err;
+ }
+
+ cmd = pci_alloc_consistent(pdev, cmd_sz, &paddr64);
+ if (cmd == NULL) {
+ iounmap(vaddr);
+ return -ENOMEM;
+ }
+
+ /* This must fit, because of the 32-bit consistent DMA mask. Also,
+ * although there's no guarantee, we assume that the address is at
+ * least 4-byte aligned (most likely, it's page-aligned).
+ */
+ paddr32 = cpu_to_le32(paddr64);
+
+ cmd->CommandHeader.ReplyQueue = 0;
+ cmd->CommandHeader.SGList = 0;
+ cmd->CommandHeader.SGTotal = cpu_to_le16(0);
+ cmd->CommandHeader.tag = cpu_to_le64(paddr64);
+ memset(&cmd->CommandHeader.LUN.LunAddrBytes, 0, 8);
+
+ cmd->Request.CDBLen = 16;
+ cmd->Request.type_attr_dir =
+ TYPE_ATTR_DIR(TYPE_MSG, ATTR_HEADOFQUEUE, XFER_NONE);
+ cmd->Request.Timeout = 0; /* Don't time out */
+ cmd->Request.CDB[0] = opcode;
+ cmd->Request.CDB[1] = type;
+ memset(&cmd->Request.CDB[2], 0, 14); /* rest of the CDB is reserved */
+ cmd->ErrorDescriptor.Addr =
+ cpu_to_le64((le32_to_cpu(paddr32) + sizeof(*cmd)));
+ cmd->ErrorDescriptor.Len = cpu_to_le32(sizeof(struct ErrorInfo));
+
+ writel(le32_to_cpu(paddr32), vaddr + SA5_REQUEST_PORT_OFFSET);
+
+ for (i = 0; i < HPSA_MSG_SEND_RETRY_LIMIT; i++) {
+ tag = readl(vaddr + SA5_REPLY_PORT_OFFSET);
+ if ((tag & ~HPSA_SIMPLE_ERROR_BITS) == paddr64)
+ break;
+ msleep(HPSA_MSG_SEND_RETRY_INTERVAL_MSECS);
+ }
+
+ iounmap(vaddr);
+
+ /* we leak the DMA buffer here ... no choice since the controller could
+ * still complete the command.
+ */
+ if (i == HPSA_MSG_SEND_RETRY_LIMIT) {
+ dev_err(&pdev->dev, "controller message %02x:%02x timed out\n",
+ opcode, type);
+ return -ETIMEDOUT;
+ }
+
+ pci_free_consistent(pdev, cmd_sz, cmd, paddr64);
+
+ if (tag & HPSA_ERROR_BIT) {
+ dev_err(&pdev->dev, "controller message %02x:%02x failed\n",
+ opcode, type);
+ return -EIO;
+ }
+
+ dev_info(&pdev->dev, "controller message %02x:%02x succeeded\n",
+ opcode, type);
+ return 0;
+}
+
+#define hpsa_noop(p) hpsa_message(p, 3, 0)
+
+static int hpsa_controller_hard_reset(struct pci_dev *pdev,
+ void __iomem *vaddr, u32 use_doorbell)
+{
+
+ if (use_doorbell) {
+ /* For everything after the P600, the PCI power state method
+ * of resetting the controller doesn't work, so we have this
+ * other way using the doorbell register.
+ */
+ dev_info(&pdev->dev, "using doorbell to reset controller\n");
+ writel(use_doorbell, vaddr + SA5_DOORBELL);
+
+ /* PMC hardware guys tell us we need a 10 second delay after
+ * doorbell reset and before any attempt to talk to the board
+ * at all to ensure that this actually works and doesn't fall
+ * over in some weird corner cases.
+ */
+ msleep(10000);
+ } else { /* Try to do it the PCI power state way */
+
+ /* Quoting from the Open CISS Specification: "The Power
+ * Management Control/Status Register (CSR) controls the power
+ * state of the device. The normal operating state is D0,
+ * CSR=00h. The software off state is D3, CSR=03h. To reset
+ * the controller, place the interface device in D3 then to D0,
+ * this causes a secondary PCI reset which will reset the
+ * controller." */
+
+ int rc = 0;
+
+ dev_info(&pdev->dev, "using PCI PM to reset controller\n");
+
+ /* enter the D3hot power management state */
+ rc = pci_set_power_state(pdev, PCI_D3hot);
+ if (rc)
+ return rc;
+
+ msleep(500);
+
+ /* enter the D0 power management state */
+ rc = pci_set_power_state(pdev, PCI_D0);
+ if (rc)
+ return rc;
+
+ /*
+ * The P600 requires a small delay when changing states.
+ * Otherwise we may think the board did not reset and we bail.
+ * This for kdump only and is particular to the P600.
+ */
+ msleep(500);
+ }
+ return 0;
+}
+
+static void init_driver_version(char *driver_version, int len)
+{
+ memset(driver_version, 0, len);
+ strncpy(driver_version, HPSA " " HPSA_DRIVER_VERSION, len - 1);
+}
+
+static int write_driver_ver_to_cfgtable(struct CfgTable __iomem *cfgtable)
+{
+ char *driver_version;
+ int i, size = sizeof(cfgtable->driver_version);
+
+ driver_version = kmalloc(size, GFP_KERNEL);
+ if (!driver_version)
+ return -ENOMEM;
+
+ init_driver_version(driver_version, size);
+ for (i = 0; i < size; i++)
+ writeb(driver_version[i], &cfgtable->driver_version[i]);
+ kfree(driver_version);
+ return 0;
+}
+
+static void read_driver_ver_from_cfgtable(struct CfgTable __iomem *cfgtable,
+ unsigned char *driver_ver)
+{
+ int i;
+
+ for (i = 0; i < sizeof(cfgtable->driver_version); i++)
+ driver_ver[i] = readb(&cfgtable->driver_version[i]);
+}
+
+static int controller_reset_failed(struct CfgTable __iomem *cfgtable)
+{
+
+ char *driver_ver, *old_driver_ver;
+ int rc, size = sizeof(cfgtable->driver_version);
+
+ old_driver_ver = kmalloc(2 * size, GFP_KERNEL);
+ if (!old_driver_ver)
+ return -ENOMEM;
+ driver_ver = old_driver_ver + size;
+
+ /* After a reset, the 32 bytes of "driver version" in the cfgtable
+ * should have been changed, otherwise we know the reset failed.
+ */
+ init_driver_version(old_driver_ver, size);
+ read_driver_ver_from_cfgtable(cfgtable, driver_ver);
+ rc = !memcmp(driver_ver, old_driver_ver, size);
+ kfree(old_driver_ver);
+ return rc;
+}
+/* This does a hard reset of the controller using PCI power management
+ * states or the using the doorbell register.
+ */
+static int hpsa_kdump_hard_reset_controller(struct pci_dev *pdev)
+{
+ u64 cfg_offset;
+ u32 cfg_base_addr;
+ u64 cfg_base_addr_index;
+ void __iomem *vaddr;
+ unsigned long paddr;
+ u32 misc_fw_support;
+ int rc;
+ struct CfgTable __iomem *cfgtable;
+ u32 use_doorbell;
+ u32 board_id;
+ u16 command_register;
+
+ /* For controllers as old as the P600, this is very nearly
+ * the same thing as
+ *
+ * pci_save_state(pci_dev);
+ * pci_set_power_state(pci_dev, PCI_D3hot);
+ * pci_set_power_state(pci_dev, PCI_D0);
+ * pci_restore_state(pci_dev);
+ *
+ * For controllers newer than the P600, the pci power state
+ * method of resetting doesn't work so we have another way
+ * using the doorbell register.
+ */
+
+ rc = hpsa_lookup_board_id(pdev, &board_id);
+ if (rc < 0) {
+ dev_warn(&pdev->dev, "Board ID not found\n");
+ return rc;
+ }
+ if (!ctlr_is_resettable(board_id)) {
+ dev_warn(&pdev->dev, "Controller not resettable\n");
+ return -ENODEV;
+ }
+
+ /* if controller is soft- but not hard resettable... */
+ if (!ctlr_is_hard_resettable(board_id))
+ return -ENOTSUPP; /* try soft reset later. */
+
+ /* Save the PCI command register */
+ pci_read_config_word(pdev, 4, &command_register);
+ pci_save_state(pdev);
+
+ /* find the first memory BAR, so we can find the cfg table */
+ rc = hpsa_pci_find_memory_BAR(pdev, &paddr);
+ if (rc)
+ return rc;
+ vaddr = remap_pci_mem(paddr, 0x250);
+ if (!vaddr)
+ return -ENOMEM;
+
+ /* find cfgtable in order to check if reset via doorbell is supported */
+ rc = hpsa_find_cfg_addrs(pdev, vaddr, &cfg_base_addr,
+ &cfg_base_addr_index, &cfg_offset);
+ if (rc)
+ goto unmap_vaddr;
+ cfgtable = remap_pci_mem(pci_resource_start(pdev,
+ cfg_base_addr_index) + cfg_offset, sizeof(*cfgtable));
+ if (!cfgtable) {
+ rc = -ENOMEM;
+ goto unmap_vaddr;
+ }
+ rc = write_driver_ver_to_cfgtable(cfgtable);
+ if (rc)
+ goto unmap_cfgtable;
+
+ /* If reset via doorbell register is supported, use that.
+ * There are two such methods. Favor the newest method.
+ */
+ misc_fw_support = readl(&cfgtable->misc_fw_support);
+ use_doorbell = misc_fw_support & MISC_FW_DOORBELL_RESET2;
+ if (use_doorbell) {
+ use_doorbell = DOORBELL_CTLR_RESET2;
+ } else {
+ use_doorbell = misc_fw_support & MISC_FW_DOORBELL_RESET;
+ if (use_doorbell) {
+ dev_warn(&pdev->dev,
+ "Soft reset not supported. Firmware update is required.\n");
+ rc = -ENOTSUPP; /* try soft reset */
+ goto unmap_cfgtable;
+ }
+ }
+
+ rc = hpsa_controller_hard_reset(pdev, vaddr, use_doorbell);
+ if (rc)
+ goto unmap_cfgtable;
+
+ pci_restore_state(pdev);
+ pci_write_config_word(pdev, 4, command_register);
+
+ /* Some devices (notably the HP Smart Array 5i Controller)
+ need a little pause here */
+ msleep(HPSA_POST_RESET_PAUSE_MSECS);
+
+ rc = hpsa_wait_for_board_state(pdev, vaddr, BOARD_READY);
+ if (rc) {
+ dev_warn(&pdev->dev,
+ "Failed waiting for board to become ready after hard reset\n");
+ goto unmap_cfgtable;
+ }
+
+ rc = controller_reset_failed(vaddr);
+ if (rc < 0)
+ goto unmap_cfgtable;
+ if (rc) {
+ dev_warn(&pdev->dev, "Unable to successfully reset "
+ "controller. Will try soft reset.\n");
+ rc = -ENOTSUPP;
+ } else {
+ dev_info(&pdev->dev, "board ready after hard reset.\n");
+ }
+
+unmap_cfgtable:
+ iounmap(cfgtable);
+
+unmap_vaddr:
+ iounmap(vaddr);
+ return rc;
+}
+
+/*
+ * We cannot read the structure directly, for portability we must use
+ * the io functions.
+ * This is for debug only.
+ */
+static void print_cfg_table(struct device *dev, struct CfgTable __iomem *tb)
+{
+#ifdef HPSA_DEBUG
+ int i;
+ char temp_name[17];
+
+ dev_info(dev, "Controller Configuration information\n");
+ dev_info(dev, "------------------------------------\n");
+ for (i = 0; i < 4; i++)
+ temp_name[i] = readb(&(tb->Signature[i]));
+ temp_name[4] = '\0';
+ dev_info(dev, " Signature = %s\n", temp_name);
+ dev_info(dev, " Spec Number = %d\n", readl(&(tb->SpecValence)));
+ dev_info(dev, " Transport methods supported = 0x%x\n",
+ readl(&(tb->TransportSupport)));
+ dev_info(dev, " Transport methods active = 0x%x\n",
+ readl(&(tb->TransportActive)));
+ dev_info(dev, " Requested transport Method = 0x%x\n",
+ readl(&(tb->HostWrite.TransportRequest)));
+ dev_info(dev, " Coalesce Interrupt Delay = 0x%x\n",
+ readl(&(tb->HostWrite.CoalIntDelay)));
+ dev_info(dev, " Coalesce Interrupt Count = 0x%x\n",
+ readl(&(tb->HostWrite.CoalIntCount)));
+ dev_info(dev, " Max outstanding commands = %d\n",
+ readl(&(tb->CmdsOutMax)));
+ dev_info(dev, " Bus Types = 0x%x\n", readl(&(tb->BusTypes)));
+ for (i = 0; i < 16; i++)
+ temp_name[i] = readb(&(tb->ServerName[i]));
+ temp_name[16] = '\0';
+ dev_info(dev, " Server Name = %s\n", temp_name);
+ dev_info(dev, " Heartbeat Counter = 0x%x\n\n\n",
+ readl(&(tb->HeartBeat)));
+#endif /* HPSA_DEBUG */
+}
+
+static int find_PCI_BAR_index(struct pci_dev *pdev, unsigned long pci_bar_addr)
+{
+ int i, offset, mem_type, bar_type;
+
+ if (pci_bar_addr == PCI_BASE_ADDRESS_0) /* looking for BAR zero? */
+ return 0;
+ offset = 0;
+ for (i = 0; i < DEVICE_COUNT_RESOURCE; i++) {
+ bar_type = pci_resource_flags(pdev, i) & PCI_BASE_ADDRESS_SPACE;
+ if (bar_type == PCI_BASE_ADDRESS_SPACE_IO)
+ offset += 4;
+ else {
+ mem_type = pci_resource_flags(pdev, i) &
+ PCI_BASE_ADDRESS_MEM_TYPE_MASK;
+ switch (mem_type) {
+ case PCI_BASE_ADDRESS_MEM_TYPE_32:
+ case PCI_BASE_ADDRESS_MEM_TYPE_1M:
+ offset += 4; /* 32 bit */
+ break;
+ case PCI_BASE_ADDRESS_MEM_TYPE_64:
+ offset += 8;
+ break;
+ default: /* reserved in PCI 2.2 */
+ dev_warn(&pdev->dev,
+ "base address is invalid\n");
+ return -1;
+ break;
+ }
+ }
+ if (offset == pci_bar_addr - PCI_BASE_ADDRESS_0)
+ return i + 1;
+ }
+ return -1;
+}
+
+/* If MSI/MSI-X is supported by the kernel we will try to enable it on
+ * controllers that are capable. If not, we use legacy INTx mode.
+ */
+
+static void hpsa_interrupt_mode(struct ctlr_info *h)
+{
+#ifdef CONFIG_PCI_MSI
+ int err, i;
+ struct msix_entry hpsa_msix_entries[MAX_REPLY_QUEUES];
+
+ for (i = 0; i < MAX_REPLY_QUEUES; i++) {
+ hpsa_msix_entries[i].vector = 0;
+ hpsa_msix_entries[i].entry = i;
+ }
+
+ /* Some boards advertise MSI but don't really support it */
+ if ((h->board_id == 0x40700E11) || (h->board_id == 0x40800E11) ||
+ (h->board_id == 0x40820E11) || (h->board_id == 0x40830E11))
+ goto default_int_mode;
+ if (pci_find_capability(h->pdev, PCI_CAP_ID_MSIX)) {
+ dev_info(&h->pdev->dev, "MSI-X capable controller\n");
+ h->msix_vector = MAX_REPLY_QUEUES;
+ if (h->msix_vector > num_online_cpus())
+ h->msix_vector = num_online_cpus();
+ err = pci_enable_msix_range(h->pdev, hpsa_msix_entries,
+ 1, h->msix_vector);
+ if (err < 0) {
+ dev_warn(&h->pdev->dev, "MSI-X init failed %d\n", err);
+ h->msix_vector = 0;
+ goto single_msi_mode;
+ } else if (err < h->msix_vector) {
+ dev_warn(&h->pdev->dev, "only %d MSI-X vectors "
+ "available\n", err);
+ }
+ h->msix_vector = err;
+ for (i = 0; i < h->msix_vector; i++)
+ h->intr[i] = hpsa_msix_entries[i].vector;
+ return;
+ }
+single_msi_mode:
+ if (pci_find_capability(h->pdev, PCI_CAP_ID_MSI)) {
+ dev_info(&h->pdev->dev, "MSI capable controller\n");
+ if (!pci_enable_msi(h->pdev))
+ h->msi_vector = 1;
+ else
+ dev_warn(&h->pdev->dev, "MSI init failed\n");
+ }
+default_int_mode:
+#endif /* CONFIG_PCI_MSI */
+ /* if we get here we're going to use the default interrupt mode */
+ h->intr[h->intr_mode] = h->pdev->irq;
+}
+
+static int hpsa_lookup_board_id(struct pci_dev *pdev, u32 *board_id)
+{
+ int i;
+ u32 subsystem_vendor_id, subsystem_device_id;
+
+ subsystem_vendor_id = pdev->subsystem_vendor;
+ subsystem_device_id = pdev->subsystem_device;
+ *board_id = ((subsystem_device_id << 16) & 0xffff0000) |
+ subsystem_vendor_id;
+
+ for (i = 0; i < ARRAY_SIZE(products); i++)
+ if (*board_id == products[i].board_id)
+ return i;
+
+ if ((subsystem_vendor_id != PCI_VENDOR_ID_HP &&
+ subsystem_vendor_id != PCI_VENDOR_ID_COMPAQ) ||
+ !hpsa_allow_any) {
+ dev_warn(&pdev->dev, "unrecognized board ID: "
+ "0x%08x, ignoring.\n", *board_id);
+ return -ENODEV;
+ }
+ return ARRAY_SIZE(products) - 1; /* generic unknown smart array */
+}
+
+static int hpsa_pci_find_memory_BAR(struct pci_dev *pdev,
+ unsigned long *memory_bar)
+{
+ int i;
+
+ for (i = 0; i < DEVICE_COUNT_RESOURCE; i++)
+ if (pci_resource_flags(pdev, i) & IORESOURCE_MEM) {
+ /* addressing mode bits already removed */
+ *memory_bar = pci_resource_start(pdev, i);
+ dev_dbg(&pdev->dev, "memory BAR = %lx\n",
+ *memory_bar);
+ return 0;
+ }
+ dev_warn(&pdev->dev, "no memory BAR found\n");
+ return -ENODEV;
+}
+
+static int hpsa_wait_for_board_state(struct pci_dev *pdev, void __iomem *vaddr,
+ int wait_for_ready)
+{
+ int i, iterations;
+ u32 scratchpad;
+ if (wait_for_ready)
+ iterations = HPSA_BOARD_READY_ITERATIONS;
+ else
+ iterations = HPSA_BOARD_NOT_READY_ITERATIONS;
+
+ for (i = 0; i < iterations; i++) {
+ scratchpad = readl(vaddr + SA5_SCRATCHPAD_OFFSET);
+ if (wait_for_ready) {
+ if (scratchpad == HPSA_FIRMWARE_READY)
+ return 0;
+ } else {
+ if (scratchpad != HPSA_FIRMWARE_READY)
+ return 0;
+ }
+ msleep(HPSA_BOARD_READY_POLL_INTERVAL_MSECS);
+ }
+ dev_warn(&pdev->dev, "board not ready, timed out.\n");
+ return -ENODEV;
+}
+
+static int hpsa_find_cfg_addrs(struct pci_dev *pdev, void __iomem *vaddr,
+ u32 *cfg_base_addr, u64 *cfg_base_addr_index,
+ u64 *cfg_offset)
+{
+ *cfg_base_addr = readl(vaddr + SA5_CTCFG_OFFSET);
+ *cfg_offset = readl(vaddr + SA5_CTMEM_OFFSET);
+ *cfg_base_addr &= (u32) 0x0000ffff;
+ *cfg_base_addr_index = find_PCI_BAR_index(pdev, *cfg_base_addr);
+ if (*cfg_base_addr_index == -1) {
+ dev_warn(&pdev->dev, "cannot find cfg_base_addr_index\n");
+ return -ENODEV;
+ }
+ return 0;
+}
+
+static int hpsa_find_cfgtables(struct ctlr_info *h)
+{
+ u64 cfg_offset;
+ u32 cfg_base_addr;
+ u64 cfg_base_addr_index;
+ u32 trans_offset;
+ int rc;
+
+ rc = hpsa_find_cfg_addrs(h->pdev, h->vaddr, &cfg_base_addr,
+ &cfg_base_addr_index, &cfg_offset);
+ if (rc)
+ return rc;
+ h->cfgtable = remap_pci_mem(pci_resource_start(h->pdev,
+ cfg_base_addr_index) + cfg_offset, sizeof(*h->cfgtable));
+ if (!h->cfgtable) {
+ dev_err(&h->pdev->dev, "Failed mapping cfgtable\n");
+ return -ENOMEM;
+ }
+ rc = write_driver_ver_to_cfgtable(h->cfgtable);
+ if (rc)
+ return rc;
+ /* Find performant mode table. */
+ trans_offset = readl(&h->cfgtable->TransMethodOffset);
+ h->transtable = remap_pci_mem(pci_resource_start(h->pdev,
+ cfg_base_addr_index)+cfg_offset+trans_offset,
+ sizeof(*h->transtable));
+ if (!h->transtable)
+ return -ENOMEM;
+ return 0;
+}
+
+static void hpsa_get_max_perf_mode_cmds(struct ctlr_info *h)
+{
+ h->max_commands = readl(&(h->cfgtable->MaxPerformantModeCommands));
+
+ /* Limit commands in memory limited kdump scenario. */
+ if (reset_devices && h->max_commands > 32)
+ h->max_commands = 32;
+
+ if (h->max_commands < 16) {
+ dev_warn(&h->pdev->dev, "Controller reports "
+ "max supported commands of %d, an obvious lie. "
+ "Using 16. Ensure that firmware is up to date.\n",
+ h->max_commands);
+ h->max_commands = 16;
+ }
+}
+
+/* If the controller reports that the total max sg entries is greater than 512,
+ * then we know that chained SG blocks work. (Original smart arrays did not
+ * support chained SG blocks and would return zero for max sg entries.)
+ */
+static int hpsa_supports_chained_sg_blocks(struct ctlr_info *h)
+{
+ return h->maxsgentries > 512;
+}
+
+/* Interrogate the hardware for some limits:
+ * max commands, max SG elements without chaining, and with chaining,
+ * SG chain block size, etc.
+ */
+static void hpsa_find_board_params(struct ctlr_info *h)
+{
+ hpsa_get_max_perf_mode_cmds(h);
+ h->nr_cmds = h->max_commands;
+ h->maxsgentries = readl(&(h->cfgtable->MaxScatterGatherElements));
+ h->fw_support = readl(&(h->cfgtable->misc_fw_support));
+ if (hpsa_supports_chained_sg_blocks(h)) {
+ /* Limit in-command s/g elements to 32 save dma'able memory. */
+ h->max_cmd_sg_entries = 32;
+ h->chainsize = h->maxsgentries - h->max_cmd_sg_entries;
+ h->maxsgentries--; /* save one for chain pointer */
+ } else {
+ /*
+ * Original smart arrays supported at most 31 s/g entries
+ * embedded inline in the command (trying to use more
+ * would lock up the controller)
+ */
+ h->max_cmd_sg_entries = 31;
+ h->maxsgentries = 31; /* default to traditional values */
+ h->chainsize = 0;
+ }
+
+ /* Find out what task management functions are supported and cache */
+ h->TMFSupportFlags = readl(&(h->cfgtable->TMFSupportFlags));
+ if (!(HPSATMF_PHYS_TASK_ABORT & h->TMFSupportFlags))
+ dev_warn(&h->pdev->dev, "Physical aborts not supported\n");
+ if (!(HPSATMF_LOG_TASK_ABORT & h->TMFSupportFlags))
+ dev_warn(&h->pdev->dev, "Logical aborts not supported\n");
+}
+
+static inline bool hpsa_CISS_signature_present(struct ctlr_info *h)
+{
+ if (!check_signature(h->cfgtable->Signature, "CISS", 4)) {
+ dev_err(&h->pdev->dev, "not a valid CISS config table\n");
+ return false;
+ }
+ return true;
+}
+
+static inline void hpsa_set_driver_support_bits(struct ctlr_info *h)
+{
+ u32 driver_support;
+
+ driver_support = readl(&(h->cfgtable->driver_support));
+ /* Need to enable prefetch in the SCSI core for 6400 in x86 */
+#ifdef CONFIG_X86
+ driver_support |= ENABLE_SCSI_PREFETCH;
+#endif
+ driver_support |= ENABLE_UNIT_ATTN;
+ writel(driver_support, &(h->cfgtable->driver_support));
+}
+
+/* Disable DMA prefetch for the P600. Otherwise an ASIC bug may result
+ * in a prefetch beyond physical memory.
+ */
+static inline void hpsa_p600_dma_prefetch_quirk(struct ctlr_info *h)
+{
+ u32 dma_prefetch;
+
+ if (h->board_id != 0x3225103C)
+ return;
+ dma_prefetch = readl(h->vaddr + I2O_DMA1_CFG);
+ dma_prefetch |= 0x8000;
+ writel(dma_prefetch, h->vaddr + I2O_DMA1_CFG);
+}
+
+static int hpsa_wait_for_clear_event_notify_ack(struct ctlr_info *h)
+{
+ int i;
+ u32 doorbell_value;
+ unsigned long flags;
+ /* wait until the clear_event_notify bit 6 is cleared by controller. */
+ for (i = 0; i < MAX_CLEAR_EVENT_WAIT; i++) {
+ spin_lock_irqsave(&h->lock, flags);
+ doorbell_value = readl(h->vaddr + SA5_DOORBELL);
+ spin_unlock_irqrestore(&h->lock, flags);
+ if (!(doorbell_value & DOORBELL_CLEAR_EVENTS))
+ goto done;
+ /* delay and try again */
+ msleep(CLEAR_EVENT_WAIT_INTERVAL);
+ }
+ return -ENODEV;
+done:
+ return 0;
+}
+
+static int hpsa_wait_for_mode_change_ack(struct ctlr_info *h)
+{
+ int i;
+ u32 doorbell_value;
+ unsigned long flags;
+
+ /* under certain very rare conditions, this can take awhile.
+ * (e.g.: hot replace a failed 144GB drive in a RAID 5 set right
+ * as we enter this code.)
+ */
+ for (i = 0; i < MAX_MODE_CHANGE_WAIT; i++) {
+ spin_lock_irqsave(&h->lock, flags);
+ doorbell_value = readl(h->vaddr + SA5_DOORBELL);
+ spin_unlock_irqrestore(&h->lock, flags);
+ if (!(doorbell_value & CFGTBL_ChangeReq))
+ goto done;
+ /* delay and try again */
+ msleep(MODE_CHANGE_WAIT_INTERVAL);
+ }
+ return -ENODEV;
+done:
+ return 0;
+}
+
+/* return -ENODEV or other reason on error, 0 on success */
+static int hpsa_enter_simple_mode(struct ctlr_info *h)
+{
+ u32 trans_support;
+
+ trans_support = readl(&(h->cfgtable->TransportSupport));
+ if (!(trans_support & SIMPLE_MODE))
+ return -ENOTSUPP;
+
+ h->max_commands = readl(&(h->cfgtable->CmdsOutMax));
+
+ /* Update the field, and then ring the doorbell */
+ writel(CFGTBL_Trans_Simple, &(h->cfgtable->HostWrite.TransportRequest));
+ writel(0, &h->cfgtable->HostWrite.command_pool_addr_hi);
+ writel(CFGTBL_ChangeReq, h->vaddr + SA5_DOORBELL);
+ if (hpsa_wait_for_mode_change_ack(h))
+ goto error;
+ print_cfg_table(&h->pdev->dev, h->cfgtable);
+ if (!(readl(&(h->cfgtable->TransportActive)) & CFGTBL_Trans_Simple))
+ goto error;
+ h->transMethod = CFGTBL_Trans_Simple;
+ return 0;
+error:
+ dev_err(&h->pdev->dev, "failed to enter simple mode\n");
+ return -ENODEV;
+}
+
+static int hpsa_pci_init(struct ctlr_info *h)
+{
+ int prod_index, err;
+
+ prod_index = hpsa_lookup_board_id(h->pdev, &h->board_id);
+ if (prod_index < 0)
+ return prod_index;
+ h->product_name = products[prod_index].product_name;
+ h->access = *(products[prod_index].access);
+
+ pci_disable_link_state(h->pdev, PCIE_LINK_STATE_L0S |
+ PCIE_LINK_STATE_L1 | PCIE_LINK_STATE_CLKPM);
+
+ err = pci_enable_device(h->pdev);
+ if (err) {
+ dev_warn(&h->pdev->dev, "unable to enable PCI device\n");
+ return err;
+ }
+
+ err = pci_request_regions(h->pdev, HPSA);
+ if (err) {
+ dev_err(&h->pdev->dev,
+ "cannot obtain PCI resources, aborting\n");
+ return err;
+ }
+
+ pci_set_master(h->pdev);
+
+ hpsa_interrupt_mode(h);
+ err = hpsa_pci_find_memory_BAR(h->pdev, &h->paddr);
+ if (err)
+ goto err_out_free_res;
+ h->vaddr = remap_pci_mem(h->paddr, 0x250);
+ if (!h->vaddr) {
+ err = -ENOMEM;
+ goto err_out_free_res;
+ }
+ err = hpsa_wait_for_board_state(h->pdev, h->vaddr, BOARD_READY);
+ if (err)
+ goto err_out_free_res;
+ err = hpsa_find_cfgtables(h);
+ if (err)
+ goto err_out_free_res;
+ hpsa_find_board_params(h);
+
+ if (!hpsa_CISS_signature_present(h)) {
+ err = -ENODEV;
+ goto err_out_free_res;
+ }
+ hpsa_set_driver_support_bits(h);
+ hpsa_p600_dma_prefetch_quirk(h);
+ err = hpsa_enter_simple_mode(h);
+ if (err)
+ goto err_out_free_res;
+ return 0;
+
+err_out_free_res:
+ if (h->transtable)
+ iounmap(h->transtable);
+ if (h->cfgtable)
+ iounmap(h->cfgtable);
+ if (h->vaddr)
+ iounmap(h->vaddr);
+ pci_disable_device(h->pdev);
+ pci_release_regions(h->pdev);
+ return err;
+}
+
+static void hpsa_hba_inquiry(struct ctlr_info *h)
+{
+ int rc;
+
+#define HBA_INQUIRY_BYTE_COUNT 64
+ h->hba_inquiry_data = kmalloc(HBA_INQUIRY_BYTE_COUNT, GFP_KERNEL);
+ if (!h->hba_inquiry_data)
+ return;
+ rc = hpsa_scsi_do_inquiry(h, RAID_CTLR_LUNID, 0,
+ h->hba_inquiry_data, HBA_INQUIRY_BYTE_COUNT);
+ if (rc != 0) {
+ kfree(h->hba_inquiry_data);
+ h->hba_inquiry_data = NULL;
+ }
+}
+
+static int hpsa_init_reset_devices(struct pci_dev *pdev)
+{
+ int rc, i;
+ void __iomem *vaddr;
+
+ if (!reset_devices)
+ return 0;
+
+ /* kdump kernel is loading, we don't know in which state is
+ * the pci interface. The dev->enable_cnt is equal zero
+ * so we call enable+disable, wait a while and switch it on.
+ */
+ rc = pci_enable_device(pdev);
+ if (rc) {
+ dev_warn(&pdev->dev, "Failed to enable PCI device\n");
+ return -ENODEV;
+ }
+ pci_disable_device(pdev);
+ msleep(260); /* a randomly chosen number */
+ rc = pci_enable_device(pdev);
+ if (rc) {
+ dev_warn(&pdev->dev, "failed to enable device.\n");
+ return -ENODEV;
+ }
+
+ pci_set_master(pdev);
+
+ vaddr = pci_ioremap_bar(pdev, 0);
+ if (vaddr == NULL) {
+ rc = -ENOMEM;
+ goto out_disable;
+ }
+ writel(SA5_INTR_OFF, vaddr + SA5_REPLY_INTR_MASK_OFFSET);
+ iounmap(vaddr);
+
+ /* Reset the controller with a PCI power-cycle or via doorbell */
+ rc = hpsa_kdump_hard_reset_controller(pdev);
+
+ /* -ENOTSUPP here means we cannot reset the controller
+ * but it's already (and still) up and running in
+ * "performant mode". Or, it might be 640x, which can't reset
+ * due to concerns about shared bbwc between 6402/6404 pair.
+ */
+ if (rc)
+ goto out_disable;
+
+ /* Now try to get the controller to respond to a no-op */
+ dev_info(&pdev->dev, "Waiting for controller to respond to no-op\n");
+ for (i = 0; i < HPSA_POST_RESET_NOOP_RETRIES; i++) {
+ if (hpsa_noop(pdev) == 0)
+ break;
+ else
+ dev_warn(&pdev->dev, "no-op failed%s\n",
+ (i < 11 ? "; re-trying" : ""));
+ }
+
+out_disable:
+
+ pci_disable_device(pdev);
+ return rc;
+}
+
+static int hpsa_allocate_cmd_pool(struct ctlr_info *h)
+{
+ h->cmd_pool_bits = kzalloc(
+ DIV_ROUND_UP(h->nr_cmds, BITS_PER_LONG) *
+ sizeof(unsigned long), GFP_KERNEL);
+ h->cmd_pool = pci_alloc_consistent(h->pdev,
+ h->nr_cmds * sizeof(*h->cmd_pool),
+ &(h->cmd_pool_dhandle));
+ h->errinfo_pool = pci_alloc_consistent(h->pdev,
+ h->nr_cmds * sizeof(*h->errinfo_pool),
+ &(h->errinfo_pool_dhandle));
+ if ((h->cmd_pool_bits == NULL)
+ || (h->cmd_pool == NULL)
+ || (h->errinfo_pool == NULL)) {
+ dev_err(&h->pdev->dev, "out of memory in %s", __func__);
+ goto clean_up;
+ }
+ return 0;
+clean_up:
+ hpsa_free_cmd_pool(h);
+ return -ENOMEM;
+}
+
+static void hpsa_free_cmd_pool(struct ctlr_info *h)
+{
+ kfree(h->cmd_pool_bits);
+ if (h->cmd_pool)
+ pci_free_consistent(h->pdev,
+ h->nr_cmds * sizeof(struct CommandList),
+ h->cmd_pool, h->cmd_pool_dhandle);
+ if (h->ioaccel2_cmd_pool)
+ pci_free_consistent(h->pdev,
+ h->nr_cmds * sizeof(*h->ioaccel2_cmd_pool),
+ h->ioaccel2_cmd_pool, h->ioaccel2_cmd_pool_dhandle);
+ if (h->errinfo_pool)
+ pci_free_consistent(h->pdev,
+ h->nr_cmds * sizeof(struct ErrorInfo),
+ h->errinfo_pool,
+ h->errinfo_pool_dhandle);
+ if (h->ioaccel_cmd_pool)
+ pci_free_consistent(h->pdev,
+ h->nr_cmds * sizeof(struct io_accel1_cmd),
+ h->ioaccel_cmd_pool, h->ioaccel_cmd_pool_dhandle);
+}
+
+static void hpsa_irq_affinity_hints(struct ctlr_info *h)
+{
+ int i, cpu;
+
+ cpu = cpumask_first(cpu_online_mask);
+ for (i = 0; i < h->msix_vector; i++) {
+ irq_set_affinity_hint(h->intr[i], get_cpu_mask(cpu));
+ cpu = cpumask_next(cpu, cpu_online_mask);
+ }
+}
+
+/* clear affinity hints and free MSI-X, MSI, or legacy INTx vectors */
+static void hpsa_free_irqs(struct ctlr_info *h)
+{
+ int i;
+
+ if (!h->msix_vector || h->intr_mode != PERF_MODE_INT) {
+ /* Single reply queue, only one irq to free */
+ i = h->intr_mode;
+ irq_set_affinity_hint(h->intr[i], NULL);
+ free_irq(h->intr[i], &h->q[i]);
+ return;
+ }
+
+ for (i = 0; i < h->msix_vector; i++) {
+ irq_set_affinity_hint(h->intr[i], NULL);
+ free_irq(h->intr[i], &h->q[i]);
+ }
+ for (; i < MAX_REPLY_QUEUES; i++)
+ h->q[i] = 0;
+}
+
+/* returns 0 on success; cleans up and returns -Enn on error */
+static int hpsa_request_irqs(struct ctlr_info *h,
+ irqreturn_t (*msixhandler)(int, void *),
+ irqreturn_t (*intxhandler)(int, void *))
+{
+ int rc, i;
+
+ /*
+ * initialize h->q[x] = x so that interrupt handlers know which
+ * queue to process.
+ */
+ for (i = 0; i < MAX_REPLY_QUEUES; i++)
+ h->q[i] = (u8) i;
+
+ if (h->intr_mode == PERF_MODE_INT && h->msix_vector > 0) {
+ /* If performant mode and MSI-X, use multiple reply queues */
+ for (i = 0; i < h->msix_vector; i++) {
+ rc = request_irq(h->intr[i], msixhandler,
+ 0, h->devname,
+ &h->q[i]);
+ if (rc) {
+ int j;
+
+ dev_err(&h->pdev->dev,
+ "failed to get irq %d for %s\n",
+ h->intr[i], h->devname);
+ for (j = 0; j < i; j++) {
+ free_irq(h->intr[j], &h->q[j]);
+ h->q[j] = 0;
+ }
+ for (; j < MAX_REPLY_QUEUES; j++)
+ h->q[j] = 0;
+ return rc;
+ }
+ }
+ hpsa_irq_affinity_hints(h);
+ } else {
+ /* Use single reply pool */
+ if (h->msix_vector > 0 || h->msi_vector) {
+ rc = request_irq(h->intr[h->intr_mode],
+ msixhandler, 0, h->devname,
+ &h->q[h->intr_mode]);
+ } else {
+ rc = request_irq(h->intr[h->intr_mode],
+ intxhandler, IRQF_SHARED, h->devname,
+ &h->q[h->intr_mode]);
+ }
+ }
+ if (rc) {
+ dev_err(&h->pdev->dev, "unable to get irq %d for %s\n",
+ h->intr[h->intr_mode], h->devname);
+ return -ENODEV;
+ }
+ return 0;
+}
+
+static int hpsa_kdump_soft_reset(struct ctlr_info *h)
+{
+ if (hpsa_send_host_reset(h, RAID_CTLR_LUNID,
+ HPSA_RESET_TYPE_CONTROLLER)) {
+ dev_warn(&h->pdev->dev, "Resetting array controller failed.\n");
+ return -EIO;
+ }
+
+ dev_info(&h->pdev->dev, "Waiting for board to soft reset.\n");
+ if (hpsa_wait_for_board_state(h->pdev, h->vaddr, BOARD_NOT_READY)) {
+ dev_warn(&h->pdev->dev, "Soft reset had no effect.\n");
+ return -1;
+ }
+
+ dev_info(&h->pdev->dev, "Board reset, awaiting READY status.\n");
+ if (hpsa_wait_for_board_state(h->pdev, h->vaddr, BOARD_READY)) {
+ dev_warn(&h->pdev->dev, "Board failed to become ready "
+ "after soft reset.\n");
+ return -1;
+ }
+
+ return 0;
+}
+
+static void hpsa_free_irqs_and_disable_msix(struct ctlr_info *h)
+{
+ hpsa_free_irqs(h);
+#ifdef CONFIG_PCI_MSI
+ if (h->msix_vector) {
+ if (h->pdev->msix_enabled)
+ pci_disable_msix(h->pdev);
+ } else if (h->msi_vector) {
+ if (h->pdev->msi_enabled)
+ pci_disable_msi(h->pdev);
+ }
+#endif /* CONFIG_PCI_MSI */
+}
+
+static void hpsa_free_reply_queues(struct ctlr_info *h)
+{
+ int i;
+
+ for (i = 0; i < h->nreply_queues; i++) {
+ if (!h->reply_queue[i].head)
+ continue;
+ pci_free_consistent(h->pdev, h->reply_queue_size,
+ h->reply_queue[i].head, h->reply_queue[i].busaddr);
+ h->reply_queue[i].head = NULL;
+ h->reply_queue[i].busaddr = 0;
+ }
+}
+
+static void hpsa_undo_allocations_after_kdump_soft_reset(struct ctlr_info *h)
+{
+ hpsa_free_irqs_and_disable_msix(h);
+ hpsa_free_sg_chain_blocks(h);
+ hpsa_free_cmd_pool(h);
+ kfree(h->ioaccel1_blockFetchTable);
+ kfree(h->blockFetchTable);
+ hpsa_free_reply_queues(h);
+ if (h->vaddr)
+ iounmap(h->vaddr);
+ if (h->transtable)
+ iounmap(h->transtable);
+ if (h->cfgtable)
+ iounmap(h->cfgtable);
+ pci_disable_device(h->pdev);
+ pci_release_regions(h->pdev);
+ kfree(h);
+}
+
+/* Called when controller lockup detected. */
+static void fail_all_outstanding_cmds(struct ctlr_info *h)
+{
+ int i, refcount;
+ struct CommandList *c;
+
+ flush_workqueue(h->resubmit_wq); /* ensure all cmds are fully built */
+ for (i = 0; i < h->nr_cmds; i++) {
+ c = h->cmd_pool + i;
+ refcount = atomic_inc_return(&c->refcount);
+ if (refcount > 1) {
+ c->err_info->CommandStatus = CMD_HARDWARE_ERR;
+ finish_cmd(c);
+ }
+ cmd_free(h, c);
+ }
+}
+
+static void set_lockup_detected_for_all_cpus(struct ctlr_info *h, u32 value)
+{
+ int cpu;
+
+ for_each_online_cpu(cpu) {
+ u32 *lockup_detected;
+ lockup_detected = per_cpu_ptr(h->lockup_detected, cpu);
+ *lockup_detected = value;
+ }
+ wmb(); /* be sure the per-cpu variables are out to memory */
+}
+
+static void controller_lockup_detected(struct ctlr_info *h)
+{
+ unsigned long flags;
+ u32 lockup_detected;
+
+ h->access.set_intr_mask(h, HPSA_INTR_OFF);
+ spin_lock_irqsave(&h->lock, flags);
+ lockup_detected = readl(h->vaddr + SA5_SCRATCHPAD_OFFSET);
+ if (!lockup_detected) {
+ /* no heartbeat, but controller gave us a zero. */
+ dev_warn(&h->pdev->dev,
+ "lockup detected but scratchpad register is zero\n");
+ lockup_detected = 0xffffffff;
+ }
+ set_lockup_detected_for_all_cpus(h, lockup_detected);
+ spin_unlock_irqrestore(&h->lock, flags);
+ dev_warn(&h->pdev->dev, "Controller lockup detected: 0x%08x\n",
+ lockup_detected);
+ pci_disable_device(h->pdev);
+ fail_all_outstanding_cmds(h);
+}
+
+static void detect_controller_lockup(struct ctlr_info *h)
+{
+ u64 now;
+ u32 heartbeat;
+ unsigned long flags;
+
+ now = get_jiffies_64();
+ /* If we've received an interrupt recently, we're ok. */
+ if (time_after64(h->last_intr_timestamp +
+ (h->heartbeat_sample_interval), now))
+ return;
+
+ /*
+ * If we've already checked the heartbeat recently, we're ok.
+ * This could happen if someone sends us a signal. We
+ * otherwise don't care about signals in this thread.
+ */
+ if (time_after64(h->last_heartbeat_timestamp +
+ (h->heartbeat_sample_interval), now))
+ return;
+
+ /* If heartbeat has not changed since we last looked, we're not ok. */
+ spin_lock_irqsave(&h->lock, flags);
+ heartbeat = readl(&h->cfgtable->HeartBeat);
+ spin_unlock_irqrestore(&h->lock, flags);
+ if (h->last_heartbeat == heartbeat) {
+ controller_lockup_detected(h);
+ return;
+ }
+
+ /* We're ok. */
+ h->last_heartbeat = heartbeat;
+ h->last_heartbeat_timestamp = now;
+}
+
+static void hpsa_ack_ctlr_events(struct ctlr_info *h)
+{
+ int i;
+ char *event_type;
+
+ if (!(h->fw_support & MISC_FW_EVENT_NOTIFY))
+ return;
+
+ /* Ask the controller to clear the events we're handling. */
+ if ((h->transMethod & (CFGTBL_Trans_io_accel1
+ | CFGTBL_Trans_io_accel2)) &&
+ (h->events & HPSA_EVENT_NOTIFY_ACCEL_IO_PATH_STATE_CHANGE ||
+ h->events & HPSA_EVENT_NOTIFY_ACCEL_IO_PATH_CONFIG_CHANGE)) {
+
+ if (h->events & HPSA_EVENT_NOTIFY_ACCEL_IO_PATH_STATE_CHANGE)
+ event_type = "state change";
+ if (h->events & HPSA_EVENT_NOTIFY_ACCEL_IO_PATH_CONFIG_CHANGE)
+ event_type = "configuration change";
+ /* Stop sending new RAID offload reqs via the IO accelerator */
+ scsi_block_requests(h->scsi_host);
+ for (i = 0; i < h->ndevices; i++)
+ h->dev[i]->offload_enabled = 0;
+ hpsa_drain_accel_commands(h);
+ /* Set 'accelerator path config change' bit */
+ dev_warn(&h->pdev->dev,
+ "Acknowledging event: 0x%08x (HP SSD Smart Path %s)\n",
+ h->events, event_type);
+ writel(h->events, &(h->cfgtable->clear_event_notify));
+ /* Set the "clear event notify field update" bit 6 */
+ writel(DOORBELL_CLEAR_EVENTS, h->vaddr + SA5_DOORBELL);
+ /* Wait until ctlr clears 'clear event notify field', bit 6 */
+ hpsa_wait_for_clear_event_notify_ack(h);
+ scsi_unblock_requests(h->scsi_host);
+ } else {
+ /* Acknowledge controller notification events. */
+ writel(h->events, &(h->cfgtable->clear_event_notify));
+ writel(DOORBELL_CLEAR_EVENTS, h->vaddr + SA5_DOORBELL);
+ hpsa_wait_for_clear_event_notify_ack(h);
+#if 0
+ writel(CFGTBL_ChangeReq, h->vaddr + SA5_DOORBELL);
+ hpsa_wait_for_mode_change_ack(h);
+#endif
+ }
+ return;
+}
+
+/* Check a register on the controller to see if there are configuration
+ * changes (added/changed/removed logical drives, etc.) which mean that
+ * we should rescan the controller for devices.
+ * Also check flag for driver-initiated rescan.
+ */
+static int hpsa_ctlr_needs_rescan(struct ctlr_info *h)
+{
+ if (!(h->fw_support & MISC_FW_EVENT_NOTIFY))
+ return 0;
+
+ h->events = readl(&(h->cfgtable->event_notify));
+ return h->events & RESCAN_REQUIRED_EVENT_BITS;
+}
+
+/*
+ * Check if any of the offline devices have become ready
+ */
+static int hpsa_offline_devices_ready(struct ctlr_info *h)
+{
+ unsigned long flags;
+ struct offline_device_entry *d;
+ struct list_head *this, *tmp;
+
+ spin_lock_irqsave(&h->offline_device_lock, flags);
+ list_for_each_safe(this, tmp, &h->offline_device_list) {
+ d = list_entry(this, struct offline_device_entry,
+ offline_list);
+ spin_unlock_irqrestore(&h->offline_device_lock, flags);
+ if (!hpsa_volume_offline(h, d->scsi3addr)) {
+ spin_lock_irqsave(&h->offline_device_lock, flags);
+ list_del(&d->offline_list);
+ spin_unlock_irqrestore(&h->offline_device_lock, flags);
+ return 1;
+ }
+ spin_lock_irqsave(&h->offline_device_lock, flags);
+ }
+ spin_unlock_irqrestore(&h->offline_device_lock, flags);
+ return 0;
+}
+
+static void hpsa_rescan_ctlr_worker(struct work_struct *work)
+{
+ unsigned long flags;
+ struct ctlr_info *h = container_of(to_delayed_work(work),
+ struct ctlr_info, rescan_ctlr_work);
+
+
+ if (h->remove_in_progress)
+ return;
+
+ if (hpsa_ctlr_needs_rescan(h) || hpsa_offline_devices_ready(h)) {
+ scsi_host_get(h->scsi_host);
+ hpsa_ack_ctlr_events(h);
+ hpsa_scan_start(h->scsi_host);
+ scsi_host_put(h->scsi_host);
+ }
+ spin_lock_irqsave(&h->lock, flags);
+ if (!h->remove_in_progress)
+ queue_delayed_work(h->rescan_ctlr_wq, &h->rescan_ctlr_work,
+ h->heartbeat_sample_interval);
+ spin_unlock_irqrestore(&h->lock, flags);
+}
+
+static void hpsa_monitor_ctlr_worker(struct work_struct *work)
+{
+ unsigned long flags;
+ struct ctlr_info *h = container_of(to_delayed_work(work),
+ struct ctlr_info, monitor_ctlr_work);
+
+ detect_controller_lockup(h);
+ if (lockup_detected(h))
+ return;
+
+ spin_lock_irqsave(&h->lock, flags);
+ if (!h->remove_in_progress)
+ schedule_delayed_work(&h->monitor_ctlr_work,
+ h->heartbeat_sample_interval);
+ spin_unlock_irqrestore(&h->lock, flags);
+}
+
+static struct workqueue_struct *hpsa_create_controller_wq(struct ctlr_info *h,
+ char *name)
+{
+ struct workqueue_struct *wq = NULL;
+
+ wq = alloc_ordered_workqueue("%s_%d_hpsa", 0, name, h->ctlr);
+ if (!wq)
+ dev_err(&h->pdev->dev, "failed to create %s workqueue\n", name);
+
+ return wq;
+}
+
+static int hpsa_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
+{
+ int dac, rc;
+ struct ctlr_info *h;
+ int try_soft_reset = 0;
+ unsigned long flags;
+
+ if (number_of_controllers == 0)
+ printk(KERN_INFO DRIVER_NAME "\n");
+
+ rc = hpsa_init_reset_devices(pdev);
+ if (rc) {
+ if (rc != -ENOTSUPP)
+ return rc;
+ /* If the reset fails in a particular way (it has no way to do
+ * a proper hard reset, so returns -ENOTSUPP) we can try to do
+ * a soft reset once we get the controller configured up to the
+ * point that it can accept a command.
+ */
+ try_soft_reset = 1;
+ rc = 0;
+ }
+
+reinit_after_soft_reset:
+
+ /* Command structures must be aligned on a 32-byte boundary because
+ * the 5 lower bits of the address are used by the hardware. and by
+ * the driver. See comments in hpsa.h for more info.
+ */
+ BUILD_BUG_ON(sizeof(struct CommandList) % COMMANDLIST_ALIGNMENT);
+ h = kzalloc(sizeof(*h), GFP_KERNEL);
+ if (!h)
+ return -ENOMEM;
+
+ h->pdev = pdev;
+ h->intr_mode = hpsa_simple_mode ? SIMPLE_MODE_INT : PERF_MODE_INT;
+ INIT_LIST_HEAD(&h->offline_device_list);
+ spin_lock_init(&h->lock);
+ spin_lock_init(&h->offline_device_lock);
+ spin_lock_init(&h->scan_lock);
+ atomic_set(&h->passthru_cmds_avail, HPSA_MAX_CONCURRENT_PASSTHRUS);
+
+ h->rescan_ctlr_wq = hpsa_create_controller_wq(h, "rescan");
+ if (!h->rescan_ctlr_wq) {
+ rc = -ENOMEM;
+ goto clean1;
+ }
+
+ h->resubmit_wq = hpsa_create_controller_wq(h, "resubmit");
+ if (!h->resubmit_wq) {
+ rc = -ENOMEM;
+ goto clean1;
+ }
+
+ /* Allocate and clear per-cpu variable lockup_detected */
+ h->lockup_detected = alloc_percpu(u32);
+ if (!h->lockup_detected) {
+ rc = -ENOMEM;
+ goto clean1;
+ }
+ set_lockup_detected_for_all_cpus(h, 0);
+
+ rc = hpsa_pci_init(h);
+ if (rc != 0)
+ goto clean1;
+
+ sprintf(h->devname, HPSA "%d", number_of_controllers);
+ h->ctlr = number_of_controllers;
+ number_of_controllers++;
+
+ /* configure PCI DMA stuff */
+ rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(64));
+ if (rc == 0) {
+ dac = 1;
+ } else {
+ rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
+ if (rc == 0) {
+ dac = 0;
+ } else {
+ dev_err(&pdev->dev, "no suitable DMA available\n");
+ goto clean1;
+ }
+ }
+
+ /* make sure the board interrupts are off */
+ h->access.set_intr_mask(h, HPSA_INTR_OFF);
+
+ if (hpsa_request_irqs(h, do_hpsa_intr_msi, do_hpsa_intr_intx))
+ goto clean2;
+ dev_info(&pdev->dev, "%s: <0x%x> at IRQ %d%s using DAC\n",
+ h->devname, pdev->device,
+ h->intr[h->intr_mode], dac ? "" : " not");
+ rc = hpsa_allocate_cmd_pool(h);
+ if (rc)
+ goto clean2_and_free_irqs;
+ if (hpsa_allocate_sg_chain_blocks(h))
+ goto clean4;
+ init_waitqueue_head(&h->scan_wait_queue);
+ h->scan_finished = 1; /* no scan currently in progress */
+
+ pci_set_drvdata(pdev, h);
+ h->ndevices = 0;
+ h->hba_mode_enabled = 0;
+ h->scsi_host = NULL;
+ spin_lock_init(&h->devlock);
+ hpsa_put_ctlr_into_performant_mode(h);
+
+ /* At this point, the controller is ready to take commands.
+ * Now, if reset_devices and the hard reset didn't work, try
+ * the soft reset and see if that works.
+ */
+ if (try_soft_reset) {
+
+ /* This is kind of gross. We may or may not get a completion
+ * from the soft reset command, and if we do, then the value
+ * from the fifo may or may not be valid. So, we wait 10 secs
+ * after the reset throwing away any completions we get during
+ * that time. Unregister the interrupt handler and register
+ * fake ones to scoop up any residual completions.
+ */
+ spin_lock_irqsave(&h->lock, flags);
+ h->access.set_intr_mask(h, HPSA_INTR_OFF);
+ spin_unlock_irqrestore(&h->lock, flags);
+ hpsa_free_irqs(h);
+ rc = hpsa_request_irqs(h, hpsa_msix_discard_completions,
+ hpsa_intx_discard_completions);
+ if (rc) {
+ dev_warn(&h->pdev->dev,
+ "Failed to request_irq after soft reset.\n");
+ goto clean4;
+ }
+
+ rc = hpsa_kdump_soft_reset(h);
+ if (rc)
+ /* Neither hard nor soft reset worked, we're hosed. */
+ goto clean4;
+
+ dev_info(&h->pdev->dev, "Board READY.\n");
+ dev_info(&h->pdev->dev,
+ "Waiting for stale completions to drain.\n");
+ h->access.set_intr_mask(h, HPSA_INTR_ON);
+ msleep(10000);
+ h->access.set_intr_mask(h, HPSA_INTR_OFF);
+
+ rc = controller_reset_failed(h->cfgtable);
+ if (rc)
+ dev_info(&h->pdev->dev,
+ "Soft reset appears to have failed.\n");
+
+ /* since the controller's reset, we have to go back and re-init
+ * everything. Easiest to just forget what we've done and do it
+ * all over again.
+ */
+ hpsa_undo_allocations_after_kdump_soft_reset(h);
+ try_soft_reset = 0;
+ if (rc)
+ /* don't go to clean4, we already unallocated */
+ return -ENODEV;
+
+ goto reinit_after_soft_reset;
+ }
+
+ /* Enable Accelerated IO path at driver layer */
+ h->acciopath_status = 1;
+
+
+ /* Turn the interrupts on so we can service requests */
+ h->access.set_intr_mask(h, HPSA_INTR_ON);
+
+ hpsa_hba_inquiry(h);
+ hpsa_register_scsi(h); /* hook ourselves into SCSI subsystem */
+
+ /* Monitor the controller for firmware lockups */
+ h->heartbeat_sample_interval = HEARTBEAT_SAMPLE_INTERVAL;
+ INIT_DELAYED_WORK(&h->monitor_ctlr_work, hpsa_monitor_ctlr_worker);
+ schedule_delayed_work(&h->monitor_ctlr_work,
+ h->heartbeat_sample_interval);
+ INIT_DELAYED_WORK(&h->rescan_ctlr_work, hpsa_rescan_ctlr_worker);
+ queue_delayed_work(h->rescan_ctlr_wq, &h->rescan_ctlr_work,
+ h->heartbeat_sample_interval);
+ return 0;
+
+clean4:
+ hpsa_free_sg_chain_blocks(h);
+ hpsa_free_cmd_pool(h);
+clean2_and_free_irqs:
+ hpsa_free_irqs(h);
+clean2:
+clean1:
+ if (h->resubmit_wq)
+ destroy_workqueue(h->resubmit_wq);
+ if (h->rescan_ctlr_wq)
+ destroy_workqueue(h->rescan_ctlr_wq);
+ if (h->lockup_detected)
+ free_percpu(h->lockup_detected);
+ kfree(h);
+ return rc;
+}
+
+static void hpsa_flush_cache(struct ctlr_info *h)
+{
+ char *flush_buf;
+ struct CommandList *c;
+
+ /* Don't bother trying to flush the cache if locked up */
+ if (unlikely(lockup_detected(h)))
+ return;
+ flush_buf = kzalloc(4, GFP_KERNEL);
+ if (!flush_buf)
+ return;
+
+ c = cmd_alloc(h);
+ if (!c) {
+ dev_warn(&h->pdev->dev, "cmd_alloc returned NULL!\n");
+ goto out_of_memory;
+ }
+ if (fill_cmd(c, HPSA_CACHE_FLUSH, h, flush_buf, 4, 0,
+ RAID_CTLR_LUNID, TYPE_CMD)) {
+ goto out;
+ }
+ hpsa_scsi_do_simple_cmd_with_retry(h, c, PCI_DMA_TODEVICE);
+ if (c->err_info->CommandStatus != 0)
+out:
+ dev_warn(&h->pdev->dev,
+ "error flushing cache on controller\n");
+ cmd_free(h, c);
+out_of_memory:
+ kfree(flush_buf);
+}
+
+static void hpsa_shutdown(struct pci_dev *pdev)
+{
+ struct ctlr_info *h;
+
+ h = pci_get_drvdata(pdev);
+ /* Turn board interrupts off and send the flush cache command
+ * sendcmd will turn off interrupt, and send the flush...
+ * To write all data in the battery backed cache to disks
+ */
+ hpsa_flush_cache(h);
+ h->access.set_intr_mask(h, HPSA_INTR_OFF);
+ hpsa_free_irqs_and_disable_msix(h);
+}
+
+static void hpsa_free_device_info(struct ctlr_info *h)
+{
+ int i;
+
+ for (i = 0; i < h->ndevices; i++)
+ kfree(h->dev[i]);
+}
+
+static void hpsa_remove_one(struct pci_dev *pdev)
+{
+ struct ctlr_info *h;
+ unsigned long flags;
+
+ if (pci_get_drvdata(pdev) == NULL) {
+ dev_err(&pdev->dev, "unable to remove device\n");
+ return;
+ }
+ h = pci_get_drvdata(pdev);
+
+ /* Get rid of any controller monitoring work items */
+ spin_lock_irqsave(&h->lock, flags);
+ h->remove_in_progress = 1;
+ spin_unlock_irqrestore(&h->lock, flags);
+ cancel_delayed_work_sync(&h->monitor_ctlr_work);
+ cancel_delayed_work_sync(&h->rescan_ctlr_work);
+ destroy_workqueue(h->rescan_ctlr_wq);
+ destroy_workqueue(h->resubmit_wq);
+ hpsa_unregister_scsi(h); /* unhook from SCSI subsystem */
+ hpsa_shutdown(pdev);
+ iounmap(h->vaddr);
+ iounmap(h->transtable);
+ iounmap(h->cfgtable);
+ hpsa_free_device_info(h);
+ hpsa_free_sg_chain_blocks(h);
+ pci_free_consistent(h->pdev,
+ h->nr_cmds * sizeof(struct CommandList),
+ h->cmd_pool, h->cmd_pool_dhandle);
+ pci_free_consistent(h->pdev,
+ h->nr_cmds * sizeof(struct ErrorInfo),
+ h->errinfo_pool, h->errinfo_pool_dhandle);
+ hpsa_free_reply_queues(h);
+ kfree(h->cmd_pool_bits);
+ kfree(h->blockFetchTable);
+ kfree(h->ioaccel1_blockFetchTable);
+ kfree(h->ioaccel2_blockFetchTable);
+ kfree(h->hba_inquiry_data);
+ pci_disable_device(pdev);
+ pci_release_regions(pdev);
+ free_percpu(h->lockup_detected);
+ kfree(h);
+}
+
+static int hpsa_suspend(__attribute__((unused)) struct pci_dev *pdev,
+ __attribute__((unused)) pm_message_t state)
+{
+ return -ENOSYS;
+}
+
+static int hpsa_resume(__attribute__((unused)) struct pci_dev *pdev)
+{
+ return -ENOSYS;
+}
+
+static struct pci_driver hpsa_pci_driver = {
+ .name = HPSA,
+ .probe = hpsa_init_one,
+ .remove = hpsa_remove_one,
+ .id_table = hpsa_pci_device_id, /* id_table */
+ .shutdown = hpsa_shutdown,
+ .suspend = hpsa_suspend,
+ .resume = hpsa_resume,
+};
+
+/* Fill in bucket_map[], given nsgs (the max number of
+ * scatter gather elements supported) and bucket[],
+ * which is an array of 8 integers. The bucket[] array
+ * contains 8 different DMA transfer sizes (in 16
+ * byte increments) which the controller uses to fetch
+ * commands. This function fills in bucket_map[], which
+ * maps a given number of scatter gather elements to one of
+ * the 8 DMA transfer sizes. The point of it is to allow the
+ * controller to only do as much DMA as needed to fetch the
+ * command, with the DMA transfer size encoded in the lower
+ * bits of the command address.
+ */
+static void calc_bucket_map(int bucket[], int num_buckets,
+ int nsgs, int min_blocks, u32 *bucket_map)
+{
+ int i, j, b, size;
+
+ /* Note, bucket_map must have nsgs+1 entries. */
+ for (i = 0; i <= nsgs; i++) {
+ /* Compute size of a command with i SG entries */
+ size = i + min_blocks;
+ b = num_buckets; /* Assume the biggest bucket */
+ /* Find the bucket that is just big enough */
+ for (j = 0; j < num_buckets; j++) {
+ if (bucket[j] >= size) {
+ b = j;
+ break;
+ }
+ }
+ /* for a command with i SG entries, use bucket b. */
+ bucket_map[i] = b;
+ }
+}
+
+/* return -ENODEV or other reason on error, 0 on success */
+static int hpsa_enter_performant_mode(struct ctlr_info *h, u32 trans_support)
+{
+ int i;
+ unsigned long register_value;
+ unsigned long transMethod = CFGTBL_Trans_Performant |
+ (trans_support & CFGTBL_Trans_use_short_tags) |
+ CFGTBL_Trans_enable_directed_msix |
+ (trans_support & (CFGTBL_Trans_io_accel1 |
+ CFGTBL_Trans_io_accel2));
+ struct access_method access = SA5_performant_access;
+
+ /* This is a bit complicated. There are 8 registers on
+ * the controller which we write to to tell it 8 different
+ * sizes of commands which there may be. It's a way of
+ * reducing the DMA done to fetch each command. Encoded into
+ * each command's tag are 3 bits which communicate to the controller
+ * which of the eight sizes that command fits within. The size of
+ * each command depends on how many scatter gather entries there are.
+ * Each SG entry requires 16 bytes. The eight registers are programmed
+ * with the number of 16-byte blocks a command of that size requires.
+ * The smallest command possible requires 5 such 16 byte blocks.
+ * the largest command possible requires SG_ENTRIES_IN_CMD + 4 16-byte
+ * blocks. Note, this only extends to the SG entries contained
+ * within the command block, and does not extend to chained blocks
+ * of SG elements. bft[] contains the eight values we write to
+ * the registers. They are not evenly distributed, but have more
+ * sizes for small commands, and fewer sizes for larger commands.
+ */
+ int bft[8] = {5, 6, 8, 10, 12, 20, 28, SG_ENTRIES_IN_CMD + 4};
+#define MIN_IOACCEL2_BFT_ENTRY 5
+#define HPSA_IOACCEL2_HEADER_SZ 4
+ int bft2[16] = {MIN_IOACCEL2_BFT_ENTRY, 6, 7, 8, 9, 10, 11, 12,
+ 13, 14, 15, 16, 17, 18, 19,
+ HPSA_IOACCEL2_HEADER_SZ + IOACCEL2_MAXSGENTRIES};
+ BUILD_BUG_ON(ARRAY_SIZE(bft2) != 16);
+ BUILD_BUG_ON(ARRAY_SIZE(bft) != 8);
+ BUILD_BUG_ON(offsetof(struct io_accel2_cmd, sg) >
+ 16 * MIN_IOACCEL2_BFT_ENTRY);
+ BUILD_BUG_ON(sizeof(struct ioaccel2_sg_element) != 16);
+ BUILD_BUG_ON(28 > SG_ENTRIES_IN_CMD + 4);
+ /* 5 = 1 s/g entry or 4k
+ * 6 = 2 s/g entry or 8k
+ * 8 = 4 s/g entry or 16k
+ * 10 = 6 s/g entry or 24k
+ */
+
+ /* If the controller supports either ioaccel method then
+ * we can also use the RAID stack submit path that does not
+ * perform the superfluous readl() after each command submission.
+ */
+ if (trans_support & (CFGTBL_Trans_io_accel1 | CFGTBL_Trans_io_accel2))
+ access = SA5_performant_access_no_read;
+
+ /* Controller spec: zero out this buffer. */
+ for (i = 0; i < h->nreply_queues; i++)
+ memset(h->reply_queue[i].head, 0, h->reply_queue_size);
+
+ bft[7] = SG_ENTRIES_IN_CMD + 4;
+ calc_bucket_map(bft, ARRAY_SIZE(bft),
+ SG_ENTRIES_IN_CMD, 4, h->blockFetchTable);
+ for (i = 0; i < 8; i++)
+ writel(bft[i], &h->transtable->BlockFetch[i]);
+
+ /* size of controller ring buffer */
+ writel(h->max_commands, &h->transtable->RepQSize);
+ writel(h->nreply_queues, &h->transtable->RepQCount);
+ writel(0, &h->transtable->RepQCtrAddrLow32);
+ writel(0, &h->transtable->RepQCtrAddrHigh32);
+
+ for (i = 0; i < h->nreply_queues; i++) {
+ writel(0, &h->transtable->RepQAddr[i].upper);
+ writel(h->reply_queue[i].busaddr,
+ &h->transtable->RepQAddr[i].lower);
+ }
+
+ writel(0, &h->cfgtable->HostWrite.command_pool_addr_hi);
+ writel(transMethod, &(h->cfgtable->HostWrite.TransportRequest));
+ /*
+ * enable outbound interrupt coalescing in accelerator mode;
+ */
+ if (trans_support & CFGTBL_Trans_io_accel1) {
+ access = SA5_ioaccel_mode1_access;
+ writel(10, &h->cfgtable->HostWrite.CoalIntDelay);
+ writel(4, &h->cfgtable->HostWrite.CoalIntCount);
+ } else {
+ if (trans_support & CFGTBL_Trans_io_accel2) {
+ access = SA5_ioaccel_mode2_access;
+ writel(10, &h->cfgtable->HostWrite.CoalIntDelay);
+ writel(4, &h->cfgtable->HostWrite.CoalIntCount);
+ }
+ }
+ writel(CFGTBL_ChangeReq, h->vaddr + SA5_DOORBELL);
+ if (hpsa_wait_for_mode_change_ack(h)) {
+ dev_err(&h->pdev->dev,
+ "performant mode problem - doorbell timeout\n");
+ return -ENODEV;
+ }
+ register_value = readl(&(h->cfgtable->TransportActive));
+ if (!(register_value & CFGTBL_Trans_Performant)) {
+ dev_err(&h->pdev->dev,
+ "performant mode problem - transport not active\n");
+ return -ENODEV;
+ }
+ /* Change the access methods to the performant access methods */
+ h->access = access;
+ h->transMethod = transMethod;
+
+ if (!((trans_support & CFGTBL_Trans_io_accel1) ||
+ (trans_support & CFGTBL_Trans_io_accel2)))
+ return 0;
+
+ if (trans_support & CFGTBL_Trans_io_accel1) {
+ /* Set up I/O accelerator mode */
+ for (i = 0; i < h->nreply_queues; i++) {
+ writel(i, h->vaddr + IOACCEL_MODE1_REPLY_QUEUE_INDEX);
+ h->reply_queue[i].current_entry =
+ readl(h->vaddr + IOACCEL_MODE1_PRODUCER_INDEX);
+ }
+ bft[7] = h->ioaccel_maxsg + 8;
+ calc_bucket_map(bft, ARRAY_SIZE(bft), h->ioaccel_maxsg, 8,
+ h->ioaccel1_blockFetchTable);
+
+ /* initialize all reply queue entries to unused */
+ for (i = 0; i < h->nreply_queues; i++)
+ memset(h->reply_queue[i].head,
+ (u8) IOACCEL_MODE1_REPLY_UNUSED,
+ h->reply_queue_size);
+
+ /* set all the constant fields in the accelerator command
+ * frames once at init time to save CPU cycles later.
+ */
+ for (i = 0; i < h->nr_cmds; i++) {
+ struct io_accel1_cmd *cp = &h->ioaccel_cmd_pool[i];
+
+ cp->function = IOACCEL1_FUNCTION_SCSIIO;
+ cp->err_info = (u32) (h->errinfo_pool_dhandle +
+ (i * sizeof(struct ErrorInfo)));
+ cp->err_info_len = sizeof(struct ErrorInfo);
+ cp->sgl_offset = IOACCEL1_SGLOFFSET;
+ cp->host_context_flags =
+ cpu_to_le16(IOACCEL1_HCFLAGS_CISS_FORMAT);
+ cp->timeout_sec = 0;
+ cp->ReplyQueue = 0;
+ cp->tag =
+ cpu_to_le64((i << DIRECT_LOOKUP_SHIFT));
+ cp->host_addr =
+ cpu_to_le64(h->ioaccel_cmd_pool_dhandle +
+ (i * sizeof(struct io_accel1_cmd)));
+ }
+ } else if (trans_support & CFGTBL_Trans_io_accel2) {
+ u64 cfg_offset, cfg_base_addr_index;
+ u32 bft2_offset, cfg_base_addr;
+ int rc;
+
+ rc = hpsa_find_cfg_addrs(h->pdev, h->vaddr, &cfg_base_addr,
+ &cfg_base_addr_index, &cfg_offset);
+ BUILD_BUG_ON(offsetof(struct io_accel2_cmd, sg) != 64);
+ bft2[15] = h->ioaccel_maxsg + HPSA_IOACCEL2_HEADER_SZ;
+ calc_bucket_map(bft2, ARRAY_SIZE(bft2), h->ioaccel_maxsg,
+ 4, h->ioaccel2_blockFetchTable);
+ bft2_offset = readl(&h->cfgtable->io_accel_request_size_offset);
+ BUILD_BUG_ON(offsetof(struct CfgTable,
+ io_accel_request_size_offset) != 0xb8);
+ h->ioaccel2_bft2_regs =
+ remap_pci_mem(pci_resource_start(h->pdev,
+ cfg_base_addr_index) +
+ cfg_offset + bft2_offset,
+ ARRAY_SIZE(bft2) *
+ sizeof(*h->ioaccel2_bft2_regs));
+ for (i = 0; i < ARRAY_SIZE(bft2); i++)
+ writel(bft2[i], &h->ioaccel2_bft2_regs[i]);
+ }
+ writel(CFGTBL_ChangeReq, h->vaddr + SA5_DOORBELL);
+ if (hpsa_wait_for_mode_change_ack(h)) {
+ dev_err(&h->pdev->dev,
+ "performant mode problem - enabling ioaccel mode\n");
+ return -ENODEV;
+ }
+ return 0;
+}
+
+static int hpsa_alloc_ioaccel_cmd_and_bft(struct ctlr_info *h)
+{
+ h->ioaccel_maxsg =
+ readl(&(h->cfgtable->io_accel_max_embedded_sg_count));
+ if (h->ioaccel_maxsg > IOACCEL1_MAXSGENTRIES)
+ h->ioaccel_maxsg = IOACCEL1_MAXSGENTRIES;
+
+ /* Command structures must be aligned on a 128-byte boundary
+ * because the 7 lower bits of the address are used by the
+ * hardware.
+ */
+ BUILD_BUG_ON(sizeof(struct io_accel1_cmd) %
+ IOACCEL1_COMMANDLIST_ALIGNMENT);
+ h->ioaccel_cmd_pool =
+ pci_alloc_consistent(h->pdev,
+ h->nr_cmds * sizeof(*h->ioaccel_cmd_pool),
+ &(h->ioaccel_cmd_pool_dhandle));
+
+ h->ioaccel1_blockFetchTable =
+ kmalloc(((h->ioaccel_maxsg + 1) *
+ sizeof(u32)), GFP_KERNEL);
+
+ if ((h->ioaccel_cmd_pool == NULL) ||
+ (h->ioaccel1_blockFetchTable == NULL))
+ goto clean_up;
+
+ memset(h->ioaccel_cmd_pool, 0,
+ h->nr_cmds * sizeof(*h->ioaccel_cmd_pool));
+ return 0;
+
+clean_up:
+ if (h->ioaccel_cmd_pool)
+ pci_free_consistent(h->pdev,
+ h->nr_cmds * sizeof(*h->ioaccel_cmd_pool),
+ h->ioaccel_cmd_pool, h->ioaccel_cmd_pool_dhandle);
+ kfree(h->ioaccel1_blockFetchTable);
+ return 1;
+}
+
+static int ioaccel2_alloc_cmds_and_bft(struct ctlr_info *h)
+{
+ /* Allocate ioaccel2 mode command blocks and block fetch table */
+
+ h->ioaccel_maxsg =
+ readl(&(h->cfgtable->io_accel_max_embedded_sg_count));
+ if (h->ioaccel_maxsg > IOACCEL2_MAXSGENTRIES)
+ h->ioaccel_maxsg = IOACCEL2_MAXSGENTRIES;
+
+ BUILD_BUG_ON(sizeof(struct io_accel2_cmd) %
+ IOACCEL2_COMMANDLIST_ALIGNMENT);
+ h->ioaccel2_cmd_pool =
+ pci_alloc_consistent(h->pdev,
+ h->nr_cmds * sizeof(*h->ioaccel2_cmd_pool),
+ &(h->ioaccel2_cmd_pool_dhandle));
+
+ h->ioaccel2_blockFetchTable =
+ kmalloc(((h->ioaccel_maxsg + 1) *
+ sizeof(u32)), GFP_KERNEL);
+
+ if ((h->ioaccel2_cmd_pool == NULL) ||
+ (h->ioaccel2_blockFetchTable == NULL))
+ goto clean_up;
+
+ memset(h->ioaccel2_cmd_pool, 0,
+ h->nr_cmds * sizeof(*h->ioaccel2_cmd_pool));
+ return 0;
+
+clean_up:
+ if (h->ioaccel2_cmd_pool)
+ pci_free_consistent(h->pdev,
+ h->nr_cmds * sizeof(*h->ioaccel2_cmd_pool),
+ h->ioaccel2_cmd_pool, h->ioaccel2_cmd_pool_dhandle);
+ kfree(h->ioaccel2_blockFetchTable);
+ return 1;
+}
+
+static void hpsa_put_ctlr_into_performant_mode(struct ctlr_info *h)
+{
+ u32 trans_support;
+ unsigned long transMethod = CFGTBL_Trans_Performant |
+ CFGTBL_Trans_use_short_tags;
+ int i;
+
+ if (hpsa_simple_mode)
+ return;
+
+ trans_support = readl(&(h->cfgtable->TransportSupport));
+ if (!(trans_support & PERFORMANT_MODE))
+ return;
+
+ /* Check for I/O accelerator mode support */
+ if (trans_support & CFGTBL_Trans_io_accel1) {
+ transMethod |= CFGTBL_Trans_io_accel1 |
+ CFGTBL_Trans_enable_directed_msix;
+ if (hpsa_alloc_ioaccel_cmd_and_bft(h))
+ goto clean_up;
+ } else {
+ if (trans_support & CFGTBL_Trans_io_accel2) {
+ transMethod |= CFGTBL_Trans_io_accel2 |
+ CFGTBL_Trans_enable_directed_msix;
+ if (ioaccel2_alloc_cmds_and_bft(h))
+ goto clean_up;
+ }
+ }
+
+ h->nreply_queues = h->msix_vector > 0 ? h->msix_vector : 1;
+ hpsa_get_max_perf_mode_cmds(h);
+ /* Performant mode ring buffer and supporting data structures */
+ h->reply_queue_size = h->max_commands * sizeof(u64);
+
+ for (i = 0; i < h->nreply_queues; i++) {
+ h->reply_queue[i].head = pci_alloc_consistent(h->pdev,
+ h->reply_queue_size,
+ &(h->reply_queue[i].busaddr));
+ if (!h->reply_queue[i].head)
+ goto clean_up;
+ h->reply_queue[i].size = h->max_commands;
+ h->reply_queue[i].wraparound = 1; /* spec: init to 1 */
+ h->reply_queue[i].current_entry = 0;
+ }
+
+ /* Need a block fetch table for performant mode */
+ h->blockFetchTable = kmalloc(((SG_ENTRIES_IN_CMD + 1) *
+ sizeof(u32)), GFP_KERNEL);
+ if (!h->blockFetchTable)
+ goto clean_up;
+
+ hpsa_enter_performant_mode(h, trans_support);
+ return;
+
+clean_up:
+ hpsa_free_reply_queues(h);
+ kfree(h->blockFetchTable);
+}
+
+static int is_accelerated_cmd(struct CommandList *c)
+{
+ return c->cmd_type == CMD_IOACCEL1 || c->cmd_type == CMD_IOACCEL2;
+}
+
+static void hpsa_drain_accel_commands(struct ctlr_info *h)
+{
+ struct CommandList *c = NULL;
+ int i, accel_cmds_out;
+ int refcount;
+
+ do { /* wait for all outstanding ioaccel commands to drain out */
+ accel_cmds_out = 0;
+ for (i = 0; i < h->nr_cmds; i++) {
+ c = h->cmd_pool + i;
+ refcount = atomic_inc_return(&c->refcount);
+ if (refcount > 1) /* Command is allocated */
+ accel_cmds_out += is_accelerated_cmd(c);
+ cmd_free(h, c);
+ }
+ if (accel_cmds_out <= 0)
+ break;
+ msleep(100);
+ } while (1);
+}
+
+/*
+ * This is it. Register the PCI driver information for the cards we control
+ * the OS will call our registered routines when it finds one of our cards.
+ */
+static int __init hpsa_init(void)
+{
+ return pci_register_driver(&hpsa_pci_driver);
+}
+
+static void __exit hpsa_cleanup(void)
+{
+ pci_unregister_driver(&hpsa_pci_driver);
+}
+
+static void __attribute__((unused)) verify_offsets(void)
+{
+#define VERIFY_OFFSET(member, offset) \
+ BUILD_BUG_ON(offsetof(struct raid_map_data, member) != offset)
+
+ VERIFY_OFFSET(structure_size, 0);
+ VERIFY_OFFSET(volume_blk_size, 4);
+ VERIFY_OFFSET(volume_blk_cnt, 8);
+ VERIFY_OFFSET(phys_blk_shift, 16);
+ VERIFY_OFFSET(parity_rotation_shift, 17);
+ VERIFY_OFFSET(strip_size, 18);
+ VERIFY_OFFSET(disk_starting_blk, 20);
+ VERIFY_OFFSET(disk_blk_cnt, 28);
+ VERIFY_OFFSET(data_disks_per_row, 36);
+ VERIFY_OFFSET(metadata_disks_per_row, 38);
+ VERIFY_OFFSET(row_cnt, 40);
+ VERIFY_OFFSET(layout_map_count, 42);
+ VERIFY_OFFSET(flags, 44);
+ VERIFY_OFFSET(dekindex, 46);
+ /* VERIFY_OFFSET(reserved, 48 */
+ VERIFY_OFFSET(data, 64);
+
+#undef VERIFY_OFFSET
+
+#define VERIFY_OFFSET(member, offset) \
+ BUILD_BUG_ON(offsetof(struct io_accel2_cmd, member) != offset)
+
+ VERIFY_OFFSET(IU_type, 0);
+ VERIFY_OFFSET(direction, 1);
+ VERIFY_OFFSET(reply_queue, 2);
+ /* VERIFY_OFFSET(reserved1, 3); */
+ VERIFY_OFFSET(scsi_nexus, 4);
+ VERIFY_OFFSET(Tag, 8);
+ VERIFY_OFFSET(cdb, 16);
+ VERIFY_OFFSET(cciss_lun, 32);
+ VERIFY_OFFSET(data_len, 40);
+ VERIFY_OFFSET(cmd_priority_task_attr, 44);
+ VERIFY_OFFSET(sg_count, 45);
+ /* VERIFY_OFFSET(reserved3 */
+ VERIFY_OFFSET(err_ptr, 48);
+ VERIFY_OFFSET(err_len, 56);
+ /* VERIFY_OFFSET(reserved4 */
+ VERIFY_OFFSET(sg, 64);
+
+#undef VERIFY_OFFSET
+
+#define VERIFY_OFFSET(member, offset) \
+ BUILD_BUG_ON(offsetof(struct io_accel1_cmd, member) != offset)
+
+ VERIFY_OFFSET(dev_handle, 0x00);
+ VERIFY_OFFSET(reserved1, 0x02);
+ VERIFY_OFFSET(function, 0x03);
+ VERIFY_OFFSET(reserved2, 0x04);
+ VERIFY_OFFSET(err_info, 0x0C);
+ VERIFY_OFFSET(reserved3, 0x10);
+ VERIFY_OFFSET(err_info_len, 0x12);
+ VERIFY_OFFSET(reserved4, 0x13);
+ VERIFY_OFFSET(sgl_offset, 0x14);
+ VERIFY_OFFSET(reserved5, 0x15);
+ VERIFY_OFFSET(transfer_len, 0x1C);
+ VERIFY_OFFSET(reserved6, 0x20);
+ VERIFY_OFFSET(io_flags, 0x24);
+ VERIFY_OFFSET(reserved7, 0x26);
+ VERIFY_OFFSET(LUN, 0x34);
+ VERIFY_OFFSET(control, 0x3C);
+ VERIFY_OFFSET(CDB, 0x40);
+ VERIFY_OFFSET(reserved8, 0x50);
+ VERIFY_OFFSET(host_context_flags, 0x60);
+ VERIFY_OFFSET(timeout_sec, 0x62);
+ VERIFY_OFFSET(ReplyQueue, 0x64);
+ VERIFY_OFFSET(reserved9, 0x65);
+ VERIFY_OFFSET(tag, 0x68);
+ VERIFY_OFFSET(host_addr, 0x70);
+ VERIFY_OFFSET(CISS_LUN, 0x78);
+ VERIFY_OFFSET(SG, 0x78 + 8);
+#undef VERIFY_OFFSET
+}
+
+module_init(hpsa_init);
+module_exit(hpsa_cleanup);
diff --git a/drivers/scsi/hpsa.h b/drivers/scsi/hpsa.h
new file mode 100644
index 000000000..657713050
--- /dev/null
+++ b/drivers/scsi/hpsa.h
@@ -0,0 +1,569 @@
+/*
+ * Disk Array driver for HP Smart Array SAS controllers
+ * Copyright 2000, 2014 Hewlett-Packard Development Company, L.P.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
+ * NON INFRINGEMENT. See the GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ *
+ * Questions/Comments/Bugfixes to iss_storagedev@hp.com
+ *
+ */
+#ifndef HPSA_H
+#define HPSA_H
+
+#include <scsi/scsicam.h>
+
+#define IO_OK 0
+#define IO_ERROR 1
+
+struct ctlr_info;
+
+struct access_method {
+ void (*submit_command)(struct ctlr_info *h,
+ struct CommandList *c);
+ void (*set_intr_mask)(struct ctlr_info *h, unsigned long val);
+ bool (*intr_pending)(struct ctlr_info *h);
+ unsigned long (*command_completed)(struct ctlr_info *h, u8 q);
+};
+
+struct hpsa_scsi_dev_t {
+ int devtype;
+ int bus, target, lun; /* as presented to the OS */
+ unsigned char scsi3addr[8]; /* as presented to the HW */
+#define RAID_CTLR_LUNID "\0\0\0\0\0\0\0\0"
+ unsigned char device_id[16]; /* from inquiry pg. 0x83 */
+ unsigned char vendor[8]; /* bytes 8-15 of inquiry data */
+ unsigned char model[16]; /* bytes 16-31 of inquiry data */
+ unsigned char raid_level; /* from inquiry page 0xC1 */
+ unsigned char volume_offline; /* discovered via TUR or VPD */
+ u16 queue_depth; /* max queue_depth for this device */
+ atomic_t ioaccel_cmds_out; /* Only used for physical devices
+ * counts commands sent to physical
+ * device via "ioaccel" path.
+ */
+ u32 ioaccel_handle;
+ int offload_config; /* I/O accel RAID offload configured */
+ int offload_enabled; /* I/O accel RAID offload enabled */
+ int offload_to_mirror; /* Send next I/O accelerator RAID
+ * offload request to mirror drive
+ */
+ struct raid_map_data raid_map; /* I/O accelerator RAID map */
+
+ /*
+ * Pointers from logical drive map indices to the phys drives that
+ * make those logical drives. Note, multiple logical drives may
+ * share physical drives. You can have for instance 5 physical
+ * drives with 3 logical drives each using those same 5 physical
+ * disks. We need these pointers for counting i/o's out to physical
+ * devices in order to honor physical device queue depth limits.
+ */
+ struct hpsa_scsi_dev_t *phys_disk[RAID_MAP_MAX_ENTRIES];
+};
+
+struct reply_queue_buffer {
+ u64 *head;
+ size_t size;
+ u8 wraparound;
+ u32 current_entry;
+ dma_addr_t busaddr;
+};
+
+#pragma pack(1)
+struct bmic_controller_parameters {
+ u8 led_flags;
+ u8 enable_command_list_verification;
+ u8 backed_out_write_drives;
+ u16 stripes_for_parity;
+ u8 parity_distribution_mode_flags;
+ u16 max_driver_requests;
+ u16 elevator_trend_count;
+ u8 disable_elevator;
+ u8 force_scan_complete;
+ u8 scsi_transfer_mode;
+ u8 force_narrow;
+ u8 rebuild_priority;
+ u8 expand_priority;
+ u8 host_sdb_asic_fix;
+ u8 pdpi_burst_from_host_disabled;
+ char software_name[64];
+ char hardware_name[32];
+ u8 bridge_revision;
+ u8 snapshot_priority;
+ u32 os_specific;
+ u8 post_prompt_timeout;
+ u8 automatic_drive_slamming;
+ u8 reserved1;
+ u8 nvram_flags;
+#define HBA_MODE_ENABLED_FLAG (1 << 3)
+ u8 cache_nvram_flags;
+ u8 drive_config_flags;
+ u16 reserved2;
+ u8 temp_warning_level;
+ u8 temp_shutdown_level;
+ u8 temp_condition_reset;
+ u8 max_coalesce_commands;
+ u32 max_coalesce_delay;
+ u8 orca_password[4];
+ u8 access_id[16];
+ u8 reserved[356];
+};
+#pragma pack()
+
+struct ctlr_info {
+ int ctlr;
+ char devname[8];
+ char *product_name;
+ struct pci_dev *pdev;
+ u32 board_id;
+ void __iomem *vaddr;
+ unsigned long paddr;
+ int nr_cmds; /* Number of commands allowed on this controller */
+#define HPSA_CMDS_RESERVED_FOR_ABORTS 2
+#define HPSA_CMDS_RESERVED_FOR_DRIVER 1
+ struct CfgTable __iomem *cfgtable;
+ int interrupts_enabled;
+ int max_commands;
+ int last_allocation;
+ atomic_t commands_outstanding;
+# define PERF_MODE_INT 0
+# define DOORBELL_INT 1
+# define SIMPLE_MODE_INT 2
+# define MEMQ_MODE_INT 3
+ unsigned int intr[MAX_REPLY_QUEUES];
+ unsigned int msix_vector;
+ unsigned int msi_vector;
+ int intr_mode; /* either PERF_MODE_INT or SIMPLE_MODE_INT */
+ struct access_method access;
+ char hba_mode_enabled;
+
+ /* queue and queue Info */
+ unsigned int Qdepth;
+ unsigned int maxSG;
+ spinlock_t lock;
+ int maxsgentries;
+ u8 max_cmd_sg_entries;
+ int chainsize;
+ struct SGDescriptor **cmd_sg_list;
+
+ /* pointers to command and error info pool */
+ struct CommandList *cmd_pool;
+ dma_addr_t cmd_pool_dhandle;
+ struct io_accel1_cmd *ioaccel_cmd_pool;
+ dma_addr_t ioaccel_cmd_pool_dhandle;
+ struct io_accel2_cmd *ioaccel2_cmd_pool;
+ dma_addr_t ioaccel2_cmd_pool_dhandle;
+ struct ErrorInfo *errinfo_pool;
+ dma_addr_t errinfo_pool_dhandle;
+ unsigned long *cmd_pool_bits;
+ int scan_finished;
+ spinlock_t scan_lock;
+ wait_queue_head_t scan_wait_queue;
+
+ struct Scsi_Host *scsi_host;
+ spinlock_t devlock; /* to protect hba[ctlr]->dev[]; */
+ int ndevices; /* number of used elements in .dev[] array. */
+ struct hpsa_scsi_dev_t *dev[HPSA_MAX_DEVICES];
+ /*
+ * Performant mode tables.
+ */
+ u32 trans_support;
+ u32 trans_offset;
+ struct TransTable_struct __iomem *transtable;
+ unsigned long transMethod;
+
+ /* cap concurrent passthrus at some reasonable maximum */
+#define HPSA_MAX_CONCURRENT_PASSTHRUS (10)
+ atomic_t passthru_cmds_avail;
+
+ /*
+ * Performant mode completion buffers
+ */
+ size_t reply_queue_size;
+ struct reply_queue_buffer reply_queue[MAX_REPLY_QUEUES];
+ u8 nreply_queues;
+ u32 *blockFetchTable;
+ u32 *ioaccel1_blockFetchTable;
+ u32 *ioaccel2_blockFetchTable;
+ u32 __iomem *ioaccel2_bft2_regs;
+ unsigned char *hba_inquiry_data;
+ u32 driver_support;
+ u32 fw_support;
+ int ioaccel_support;
+ int ioaccel_maxsg;
+ u64 last_intr_timestamp;
+ u32 last_heartbeat;
+ u64 last_heartbeat_timestamp;
+ u32 heartbeat_sample_interval;
+ atomic_t firmware_flash_in_progress;
+ u32 __percpu *lockup_detected;
+ struct delayed_work monitor_ctlr_work;
+ struct delayed_work rescan_ctlr_work;
+ int remove_in_progress;
+ /* Address of h->q[x] is passed to intr handler to know which queue */
+ u8 q[MAX_REPLY_QUEUES];
+ u32 TMFSupportFlags; /* cache what task mgmt funcs are supported. */
+#define HPSATMF_BITS_SUPPORTED (1 << 0)
+#define HPSATMF_PHYS_LUN_RESET (1 << 1)
+#define HPSATMF_PHYS_NEX_RESET (1 << 2)
+#define HPSATMF_PHYS_TASK_ABORT (1 << 3)
+#define HPSATMF_PHYS_TSET_ABORT (1 << 4)
+#define HPSATMF_PHYS_CLEAR_ACA (1 << 5)
+#define HPSATMF_PHYS_CLEAR_TSET (1 << 6)
+#define HPSATMF_PHYS_QRY_TASK (1 << 7)
+#define HPSATMF_PHYS_QRY_TSET (1 << 8)
+#define HPSATMF_PHYS_QRY_ASYNC (1 << 9)
+#define HPSATMF_MASK_SUPPORTED (1 << 16)
+#define HPSATMF_LOG_LUN_RESET (1 << 17)
+#define HPSATMF_LOG_NEX_RESET (1 << 18)
+#define HPSATMF_LOG_TASK_ABORT (1 << 19)
+#define HPSATMF_LOG_TSET_ABORT (1 << 20)
+#define HPSATMF_LOG_CLEAR_ACA (1 << 21)
+#define HPSATMF_LOG_CLEAR_TSET (1 << 22)
+#define HPSATMF_LOG_QRY_TASK (1 << 23)
+#define HPSATMF_LOG_QRY_TSET (1 << 24)
+#define HPSATMF_LOG_QRY_ASYNC (1 << 25)
+ u32 events;
+#define CTLR_STATE_CHANGE_EVENT (1 << 0)
+#define CTLR_ENCLOSURE_HOT_PLUG_EVENT (1 << 1)
+#define CTLR_STATE_CHANGE_EVENT_PHYSICAL_DRV (1 << 4)
+#define CTLR_STATE_CHANGE_EVENT_LOGICAL_DRV (1 << 5)
+#define CTLR_STATE_CHANGE_EVENT_REDUNDANT_CNTRL (1 << 6)
+#define CTLR_STATE_CHANGE_EVENT_AIO_ENABLED_DISABLED (1 << 30)
+#define CTLR_STATE_CHANGE_EVENT_AIO_CONFIG_CHANGE (1 << 31)
+
+#define RESCAN_REQUIRED_EVENT_BITS \
+ (CTLR_ENCLOSURE_HOT_PLUG_EVENT | \
+ CTLR_STATE_CHANGE_EVENT_PHYSICAL_DRV | \
+ CTLR_STATE_CHANGE_EVENT_LOGICAL_DRV | \
+ CTLR_STATE_CHANGE_EVENT_AIO_ENABLED_DISABLED | \
+ CTLR_STATE_CHANGE_EVENT_AIO_CONFIG_CHANGE)
+ spinlock_t offline_device_lock;
+ struct list_head offline_device_list;
+ int acciopath_status;
+ int raid_offload_debug;
+ struct workqueue_struct *resubmit_wq;
+ struct workqueue_struct *rescan_ctlr_wq;
+};
+
+struct offline_device_entry {
+ unsigned char scsi3addr[8];
+ struct list_head offline_list;
+};
+
+#define HPSA_ABORT_MSG 0
+#define HPSA_DEVICE_RESET_MSG 1
+#define HPSA_RESET_TYPE_CONTROLLER 0x00
+#define HPSA_RESET_TYPE_BUS 0x01
+#define HPSA_RESET_TYPE_TARGET 0x03
+#define HPSA_RESET_TYPE_LUN 0x04
+#define HPSA_MSG_SEND_RETRY_LIMIT 10
+#define HPSA_MSG_SEND_RETRY_INTERVAL_MSECS (10000)
+
+/* Maximum time in seconds driver will wait for command completions
+ * when polling before giving up.
+ */
+#define HPSA_MAX_POLL_TIME_SECS (20)
+
+/* During SCSI error recovery, HPSA_TUR_RETRY_LIMIT defines
+ * how many times to retry TEST UNIT READY on a device
+ * while waiting for it to become ready before giving up.
+ * HPSA_MAX_WAIT_INTERVAL_SECS is the max wait interval
+ * between sending TURs while waiting for a device
+ * to become ready.
+ */
+#define HPSA_TUR_RETRY_LIMIT (20)
+#define HPSA_MAX_WAIT_INTERVAL_SECS (30)
+
+/* HPSA_BOARD_READY_WAIT_SECS is how long to wait for a board
+ * to become ready, in seconds, before giving up on it.
+ * HPSA_BOARD_READY_POLL_INTERVAL_MSECS * is how long to wait
+ * between polling the board to see if it is ready, in
+ * milliseconds. HPSA_BOARD_READY_POLL_INTERVAL and
+ * HPSA_BOARD_READY_ITERATIONS are derived from those.
+ */
+#define HPSA_BOARD_READY_WAIT_SECS (120)
+#define HPSA_BOARD_NOT_READY_WAIT_SECS (100)
+#define HPSA_BOARD_READY_POLL_INTERVAL_MSECS (100)
+#define HPSA_BOARD_READY_POLL_INTERVAL \
+ ((HPSA_BOARD_READY_POLL_INTERVAL_MSECS * HZ) / 1000)
+#define HPSA_BOARD_READY_ITERATIONS \
+ ((HPSA_BOARD_READY_WAIT_SECS * 1000) / \
+ HPSA_BOARD_READY_POLL_INTERVAL_MSECS)
+#define HPSA_BOARD_NOT_READY_ITERATIONS \
+ ((HPSA_BOARD_NOT_READY_WAIT_SECS * 1000) / \
+ HPSA_BOARD_READY_POLL_INTERVAL_MSECS)
+#define HPSA_POST_RESET_PAUSE_MSECS (3000)
+#define HPSA_POST_RESET_NOOP_RETRIES (12)
+
+/* Defining the diffent access_menthods */
+/*
+ * Memory mapped FIFO interface (SMART 53xx cards)
+ */
+#define SA5_DOORBELL 0x20
+#define SA5_REQUEST_PORT_OFFSET 0x40
+#define SA5_REQUEST_PORT64_LO_OFFSET 0xC0
+#define SA5_REQUEST_PORT64_HI_OFFSET 0xC4
+#define SA5_REPLY_INTR_MASK_OFFSET 0x34
+#define SA5_REPLY_PORT_OFFSET 0x44
+#define SA5_INTR_STATUS 0x30
+#define SA5_SCRATCHPAD_OFFSET 0xB0
+
+#define SA5_CTCFG_OFFSET 0xB4
+#define SA5_CTMEM_OFFSET 0xB8
+
+#define SA5_INTR_OFF 0x08
+#define SA5B_INTR_OFF 0x04
+#define SA5_INTR_PENDING 0x08
+#define SA5B_INTR_PENDING 0x04
+#define FIFO_EMPTY 0xffffffff
+#define HPSA_FIRMWARE_READY 0xffff0000 /* value in scratchpad register */
+
+#define HPSA_ERROR_BIT 0x02
+
+/* Performant mode flags */
+#define SA5_PERF_INTR_PENDING 0x04
+#define SA5_PERF_INTR_OFF 0x05
+#define SA5_OUTDB_STATUS_PERF_BIT 0x01
+#define SA5_OUTDB_CLEAR_PERF_BIT 0x01
+#define SA5_OUTDB_CLEAR 0xA0
+#define SA5_OUTDB_CLEAR_PERF_BIT 0x01
+#define SA5_OUTDB_STATUS 0x9C
+
+
+#define HPSA_INTR_ON 1
+#define HPSA_INTR_OFF 0
+
+/*
+ * Inbound Post Queue offsets for IO Accelerator Mode 2
+ */
+#define IOACCEL2_INBOUND_POSTQ_32 0x48
+#define IOACCEL2_INBOUND_POSTQ_64_LOW 0xd0
+#define IOACCEL2_INBOUND_POSTQ_64_HI 0xd4
+
+/*
+ Send the command to the hardware
+*/
+static void SA5_submit_command(struct ctlr_info *h,
+ struct CommandList *c)
+{
+ writel(c->busaddr, h->vaddr + SA5_REQUEST_PORT_OFFSET);
+ (void) readl(h->vaddr + SA5_SCRATCHPAD_OFFSET);
+}
+
+static void SA5_submit_command_no_read(struct ctlr_info *h,
+ struct CommandList *c)
+{
+ writel(c->busaddr, h->vaddr + SA5_REQUEST_PORT_OFFSET);
+}
+
+static void SA5_submit_command_ioaccel2(struct ctlr_info *h,
+ struct CommandList *c)
+{
+ writel(c->busaddr, h->vaddr + SA5_REQUEST_PORT_OFFSET);
+}
+
+/*
+ * This card is the opposite of the other cards.
+ * 0 turns interrupts on...
+ * 0x08 turns them off...
+ */
+static void SA5_intr_mask(struct ctlr_info *h, unsigned long val)
+{
+ if (val) { /* Turn interrupts on */
+ h->interrupts_enabled = 1;
+ writel(0, h->vaddr + SA5_REPLY_INTR_MASK_OFFSET);
+ (void) readl(h->vaddr + SA5_REPLY_INTR_MASK_OFFSET);
+ } else { /* Turn them off */
+ h->interrupts_enabled = 0;
+ writel(SA5_INTR_OFF,
+ h->vaddr + SA5_REPLY_INTR_MASK_OFFSET);
+ (void) readl(h->vaddr + SA5_REPLY_INTR_MASK_OFFSET);
+ }
+}
+
+static void SA5_performant_intr_mask(struct ctlr_info *h, unsigned long val)
+{
+ if (val) { /* turn on interrupts */
+ h->interrupts_enabled = 1;
+ writel(0, h->vaddr + SA5_REPLY_INTR_MASK_OFFSET);
+ (void) readl(h->vaddr + SA5_REPLY_INTR_MASK_OFFSET);
+ } else {
+ h->interrupts_enabled = 0;
+ writel(SA5_PERF_INTR_OFF,
+ h->vaddr + SA5_REPLY_INTR_MASK_OFFSET);
+ (void) readl(h->vaddr + SA5_REPLY_INTR_MASK_OFFSET);
+ }
+}
+
+static unsigned long SA5_performant_completed(struct ctlr_info *h, u8 q)
+{
+ struct reply_queue_buffer *rq = &h->reply_queue[q];
+ unsigned long register_value = FIFO_EMPTY;
+
+ /* msi auto clears the interrupt pending bit. */
+ if (unlikely(!(h->msi_vector || h->msix_vector))) {
+ /* flush the controller write of the reply queue by reading
+ * outbound doorbell status register.
+ */
+ (void) readl(h->vaddr + SA5_OUTDB_STATUS);
+ writel(SA5_OUTDB_CLEAR_PERF_BIT, h->vaddr + SA5_OUTDB_CLEAR);
+ /* Do a read in order to flush the write to the controller
+ * (as per spec.)
+ */
+ (void) readl(h->vaddr + SA5_OUTDB_STATUS);
+ }
+
+ if ((((u32) rq->head[rq->current_entry]) & 1) == rq->wraparound) {
+ register_value = rq->head[rq->current_entry];
+ rq->current_entry++;
+ atomic_dec(&h->commands_outstanding);
+ } else {
+ register_value = FIFO_EMPTY;
+ }
+ /* Check for wraparound */
+ if (rq->current_entry == h->max_commands) {
+ rq->current_entry = 0;
+ rq->wraparound ^= 1;
+ }
+ return register_value;
+}
+
+/*
+ * returns value read from hardware.
+ * returns FIFO_EMPTY if there is nothing to read
+ */
+static unsigned long SA5_completed(struct ctlr_info *h,
+ __attribute__((unused)) u8 q)
+{
+ unsigned long register_value
+ = readl(h->vaddr + SA5_REPLY_PORT_OFFSET);
+
+ if (register_value != FIFO_EMPTY)
+ atomic_dec(&h->commands_outstanding);
+
+#ifdef HPSA_DEBUG
+ if (register_value != FIFO_EMPTY)
+ dev_dbg(&h->pdev->dev, "Read %lx back from board\n",
+ register_value);
+ else
+ dev_dbg(&h->pdev->dev, "FIFO Empty read\n");
+#endif
+
+ return register_value;
+}
+/*
+ * Returns true if an interrupt is pending..
+ */
+static bool SA5_intr_pending(struct ctlr_info *h)
+{
+ unsigned long register_value =
+ readl(h->vaddr + SA5_INTR_STATUS);
+ return register_value & SA5_INTR_PENDING;
+}
+
+static bool SA5_performant_intr_pending(struct ctlr_info *h)
+{
+ unsigned long register_value = readl(h->vaddr + SA5_INTR_STATUS);
+
+ if (!register_value)
+ return false;
+
+ /* Read outbound doorbell to flush */
+ register_value = readl(h->vaddr + SA5_OUTDB_STATUS);
+ return register_value & SA5_OUTDB_STATUS_PERF_BIT;
+}
+
+#define SA5_IOACCEL_MODE1_INTR_STATUS_CMP_BIT 0x100
+
+static bool SA5_ioaccel_mode1_intr_pending(struct ctlr_info *h)
+{
+ unsigned long register_value = readl(h->vaddr + SA5_INTR_STATUS);
+
+ return (register_value & SA5_IOACCEL_MODE1_INTR_STATUS_CMP_BIT) ?
+ true : false;
+}
+
+#define IOACCEL_MODE1_REPLY_QUEUE_INDEX 0x1A0
+#define IOACCEL_MODE1_PRODUCER_INDEX 0x1B8
+#define IOACCEL_MODE1_CONSUMER_INDEX 0x1BC
+#define IOACCEL_MODE1_REPLY_UNUSED 0xFFFFFFFFFFFFFFFFULL
+
+static unsigned long SA5_ioaccel_mode1_completed(struct ctlr_info *h, u8 q)
+{
+ u64 register_value;
+ struct reply_queue_buffer *rq = &h->reply_queue[q];
+
+ BUG_ON(q >= h->nreply_queues);
+
+ register_value = rq->head[rq->current_entry];
+ if (register_value != IOACCEL_MODE1_REPLY_UNUSED) {
+ rq->head[rq->current_entry] = IOACCEL_MODE1_REPLY_UNUSED;
+ if (++rq->current_entry == rq->size)
+ rq->current_entry = 0;
+ /*
+ * @todo
+ *
+ * Don't really need to write the new index after each command,
+ * but with current driver design this is easiest.
+ */
+ wmb();
+ writel((q << 24) | rq->current_entry, h->vaddr +
+ IOACCEL_MODE1_CONSUMER_INDEX);
+ atomic_dec(&h->commands_outstanding);
+ }
+ return (unsigned long) register_value;
+}
+
+static struct access_method SA5_access = {
+ SA5_submit_command,
+ SA5_intr_mask,
+ SA5_intr_pending,
+ SA5_completed,
+};
+
+static struct access_method SA5_ioaccel_mode1_access = {
+ SA5_submit_command,
+ SA5_performant_intr_mask,
+ SA5_ioaccel_mode1_intr_pending,
+ SA5_ioaccel_mode1_completed,
+};
+
+static struct access_method SA5_ioaccel_mode2_access = {
+ SA5_submit_command_ioaccel2,
+ SA5_performant_intr_mask,
+ SA5_performant_intr_pending,
+ SA5_performant_completed,
+};
+
+static struct access_method SA5_performant_access = {
+ SA5_submit_command,
+ SA5_performant_intr_mask,
+ SA5_performant_intr_pending,
+ SA5_performant_completed,
+};
+
+static struct access_method SA5_performant_access_no_read = {
+ SA5_submit_command_no_read,
+ SA5_performant_intr_mask,
+ SA5_performant_intr_pending,
+ SA5_performant_completed,
+};
+
+struct board_type {
+ u32 board_id;
+ char *product_name;
+ struct access_method *access;
+};
+
+#endif /* HPSA_H */
+
diff --git a/drivers/scsi/hpsa_cmd.h b/drivers/scsi/hpsa_cmd.h
new file mode 100644
index 000000000..3a621c74b
--- /dev/null
+++ b/drivers/scsi/hpsa_cmd.h
@@ -0,0 +1,794 @@
+/*
+ * Disk Array driver for HP Smart Array SAS controllers
+ * Copyright 2000, 2014 Hewlett-Packard Development Company, L.P.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
+ * NON INFRINGEMENT. See the GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ *
+ * Questions/Comments/Bugfixes to iss_storagedev@hp.com
+ *
+ */
+#ifndef HPSA_CMD_H
+#define HPSA_CMD_H
+
+/* general boundary defintions */
+#define SENSEINFOBYTES 32 /* may vary between hbas */
+#define SG_ENTRIES_IN_CMD 32 /* Max SG entries excluding chain blocks */
+#define HPSA_SG_CHAIN 0x80000000
+#define HPSA_SG_LAST 0x40000000
+#define MAXREPLYQS 256
+
+/* Command Status value */
+#define CMD_SUCCESS 0x0000
+#define CMD_TARGET_STATUS 0x0001
+#define CMD_DATA_UNDERRUN 0x0002
+#define CMD_DATA_OVERRUN 0x0003
+#define CMD_INVALID 0x0004
+#define CMD_PROTOCOL_ERR 0x0005
+#define CMD_HARDWARE_ERR 0x0006
+#define CMD_CONNECTION_LOST 0x0007
+#define CMD_ABORTED 0x0008
+#define CMD_ABORT_FAILED 0x0009
+#define CMD_UNSOLICITED_ABORT 0x000A
+#define CMD_TIMEOUT 0x000B
+#define CMD_UNABORTABLE 0x000C
+#define CMD_IOACCEL_DISABLED 0x000E
+
+
+/* Unit Attentions ASC's as defined for the MSA2012sa */
+#define POWER_OR_RESET 0x29
+#define STATE_CHANGED 0x2a
+#define UNIT_ATTENTION_CLEARED 0x2f
+#define LUN_FAILED 0x3e
+#define REPORT_LUNS_CHANGED 0x3f
+
+/* Unit Attentions ASCQ's as defined for the MSA2012sa */
+
+ /* These ASCQ's defined for ASC = POWER_OR_RESET */
+#define POWER_ON_RESET 0x00
+#define POWER_ON_REBOOT 0x01
+#define SCSI_BUS_RESET 0x02
+#define MSA_TARGET_RESET 0x03
+#define CONTROLLER_FAILOVER 0x04
+#define TRANSCEIVER_SE 0x05
+#define TRANSCEIVER_LVD 0x06
+
+ /* These ASCQ's defined for ASC = STATE_CHANGED */
+#define RESERVATION_PREEMPTED 0x03
+#define ASYM_ACCESS_CHANGED 0x06
+#define LUN_CAPACITY_CHANGED 0x09
+
+/* transfer direction */
+#define XFER_NONE 0x00
+#define XFER_WRITE 0x01
+#define XFER_READ 0x02
+#define XFER_RSVD 0x03
+
+/* task attribute */
+#define ATTR_UNTAGGED 0x00
+#define ATTR_SIMPLE 0x04
+#define ATTR_HEADOFQUEUE 0x05
+#define ATTR_ORDERED 0x06
+#define ATTR_ACA 0x07
+
+/* cdb type */
+#define TYPE_CMD 0x00
+#define TYPE_MSG 0x01
+#define TYPE_IOACCEL2_CMD 0x81 /* 0x81 is not used by hardware */
+
+/* Message Types */
+#define HPSA_TASK_MANAGEMENT 0x00
+#define HPSA_RESET 0x01
+#define HPSA_SCAN 0x02
+#define HPSA_NOOP 0x03
+
+#define HPSA_CTLR_RESET_TYPE 0x00
+#define HPSA_BUS_RESET_TYPE 0x01
+#define HPSA_TARGET_RESET_TYPE 0x03
+#define HPSA_LUN_RESET_TYPE 0x04
+#define HPSA_NEXUS_RESET_TYPE 0x05
+
+/* Task Management Functions */
+#define HPSA_TMF_ABORT_TASK 0x00
+#define HPSA_TMF_ABORT_TASK_SET 0x01
+#define HPSA_TMF_CLEAR_ACA 0x02
+#define HPSA_TMF_CLEAR_TASK_SET 0x03
+#define HPSA_TMF_QUERY_TASK 0x04
+#define HPSA_TMF_QUERY_TASK_SET 0x05
+#define HPSA_TMF_QUERY_ASYNCEVENT 0x06
+
+
+
+/* config space register offsets */
+#define CFG_VENDORID 0x00
+#define CFG_DEVICEID 0x02
+#define CFG_I2OBAR 0x10
+#define CFG_MEM1BAR 0x14
+
+/* i2o space register offsets */
+#define I2O_IBDB_SET 0x20
+#define I2O_IBDB_CLEAR 0x70
+#define I2O_INT_STATUS 0x30
+#define I2O_INT_MASK 0x34
+#define I2O_IBPOST_Q 0x40
+#define I2O_OBPOST_Q 0x44
+#define I2O_DMA1_CFG 0x214
+
+/* Configuration Table */
+#define CFGTBL_ChangeReq 0x00000001l
+#define CFGTBL_AccCmds 0x00000001l
+#define DOORBELL_CTLR_RESET 0x00000004l
+#define DOORBELL_CTLR_RESET2 0x00000020l
+#define DOORBELL_CLEAR_EVENTS 0x00000040l
+
+#define CFGTBL_Trans_Simple 0x00000002l
+#define CFGTBL_Trans_Performant 0x00000004l
+#define CFGTBL_Trans_io_accel1 0x00000080l
+#define CFGTBL_Trans_io_accel2 0x00000100l
+#define CFGTBL_Trans_use_short_tags 0x20000000l
+#define CFGTBL_Trans_enable_directed_msix (1 << 30)
+
+#define CFGTBL_BusType_Ultra2 0x00000001l
+#define CFGTBL_BusType_Ultra3 0x00000002l
+#define CFGTBL_BusType_Fibre1G 0x00000100l
+#define CFGTBL_BusType_Fibre2G 0x00000200l
+
+/* VPD Inquiry types */
+#define HPSA_VPD_SUPPORTED_PAGES 0x00
+#define HPSA_VPD_LV_DEVICE_GEOMETRY 0xC1
+#define HPSA_VPD_LV_IOACCEL_STATUS 0xC2
+#define HPSA_VPD_LV_STATUS 0xC3
+#define HPSA_VPD_HEADER_SZ 4
+
+/* Logical volume states */
+#define HPSA_VPD_LV_STATUS_UNSUPPORTED 0xff
+#define HPSA_LV_OK 0x0
+#define HPSA_LV_UNDERGOING_ERASE 0x0F
+#define HPSA_LV_UNDERGOING_RPI 0x12
+#define HPSA_LV_PENDING_RPI 0x13
+#define HPSA_LV_ENCRYPTED_NO_KEY 0x14
+#define HPSA_LV_PLAINTEXT_IN_ENCRYPT_ONLY_CONTROLLER 0x15
+#define HPSA_LV_UNDERGOING_ENCRYPTION 0x16
+#define HPSA_LV_UNDERGOING_ENCRYPTION_REKEYING 0x17
+#define HPSA_LV_ENCRYPTED_IN_NON_ENCRYPTED_CONTROLLER 0x18
+#define HPSA_LV_PENDING_ENCRYPTION 0x19
+#define HPSA_LV_PENDING_ENCRYPTION_REKEYING 0x1A
+
+struct vals32 {
+ u32 lower;
+ u32 upper;
+};
+
+union u64bit {
+ struct vals32 val32;
+ u64 val;
+};
+
+/* FIXME this is a per controller value (barf!) */
+#define HPSA_MAX_LUN 1024
+#define HPSA_MAX_PHYS_LUN 1024
+#define MAX_EXT_TARGETS 32
+#define HPSA_MAX_DEVICES (HPSA_MAX_PHYS_LUN + HPSA_MAX_LUN + \
+ MAX_EXT_TARGETS + 1) /* + 1 is for the controller itself */
+
+/* SCSI-3 Commands */
+#pragma pack(1)
+
+#define HPSA_INQUIRY 0x12
+struct InquiryData {
+ u8 data_byte[36];
+};
+
+#define HPSA_REPORT_LOG 0xc2 /* Report Logical LUNs */
+#define HPSA_REPORT_PHYS 0xc3 /* Report Physical LUNs */
+#define HPSA_REPORT_PHYS_EXTENDED 0x02
+#define HPSA_CISS_READ 0xc0 /* CISS Read */
+#define HPSA_GET_RAID_MAP 0xc8 /* CISS Get RAID Layout Map */
+
+#define RAID_MAP_MAX_ENTRIES 256
+
+struct raid_map_disk_data {
+ u32 ioaccel_handle; /**< Handle to access this disk via the
+ * I/O accelerator */
+ u8 xor_mult[2]; /**< XOR multipliers for this position,
+ * valid for data disks only */
+ u8 reserved[2];
+};
+
+struct raid_map_data {
+ __le32 structure_size; /* Size of entire structure in bytes */
+ __le32 volume_blk_size; /* bytes / block in the volume */
+ __le64 volume_blk_cnt; /* logical blocks on the volume */
+ u8 phys_blk_shift; /* Shift factor to convert between
+ * units of logical blocks and physical
+ * disk blocks */
+ u8 parity_rotation_shift; /* Shift factor to convert between units
+ * of logical stripes and physical
+ * stripes */
+ __le16 strip_size; /* blocks used on each disk / stripe */
+ __le64 disk_starting_blk; /* First disk block used in volume */
+ __le64 disk_blk_cnt; /* disk blocks used by volume / disk */
+ __le16 data_disks_per_row; /* data disk entries / row in the map */
+ __le16 metadata_disks_per_row;/* mirror/parity disk entries / row
+ * in the map */
+ __le16 row_cnt; /* rows in each layout map */
+ __le16 layout_map_count; /* layout maps (1 map per mirror/parity
+ * group) */
+ __le16 flags; /* Bit 0 set if encryption enabled */
+#define RAID_MAP_FLAG_ENCRYPT_ON 0x01
+ __le16 dekindex; /* Data encryption key index. */
+ u8 reserved[16];
+ struct raid_map_disk_data data[RAID_MAP_MAX_ENTRIES];
+};
+
+struct ReportLUNdata {
+ u8 LUNListLength[4];
+ u8 extended_response_flag;
+ u8 reserved[3];
+ u8 LUN[HPSA_MAX_LUN][8];
+};
+
+struct ext_report_lun_entry {
+ u8 lunid[8];
+#define GET_BMIC_BUS(lunid) ((lunid)[7] & 0x3F)
+#define GET_BMIC_LEVEL_TWO_TARGET(lunid) ((lunid)[6])
+#define GET_BMIC_DRIVE_NUMBER(lunid) (((GET_BMIC_BUS((lunid)) - 1) << 8) + \
+ GET_BMIC_LEVEL_TWO_TARGET((lunid)))
+ u8 wwid[8];
+ u8 device_type;
+ u8 device_flags;
+ u8 lun_count; /* multi-lun device, how many luns */
+ u8 redundant_paths;
+ u32 ioaccel_handle; /* ioaccel1 only uses lower 16 bits */
+};
+
+struct ReportExtendedLUNdata {
+ u8 LUNListLength[4];
+ u8 extended_response_flag;
+ u8 reserved[3];
+ struct ext_report_lun_entry LUN[HPSA_MAX_PHYS_LUN];
+};
+
+struct SenseSubsystem_info {
+ u8 reserved[36];
+ u8 portname[8];
+ u8 reserved1[1108];
+};
+
+/* BMIC commands */
+#define BMIC_READ 0x26
+#define BMIC_WRITE 0x27
+#define BMIC_CACHE_FLUSH 0xc2
+#define HPSA_CACHE_FLUSH 0x01 /* C2 was already being used by HPSA */
+#define BMIC_FLASH_FIRMWARE 0xF7
+#define BMIC_SENSE_CONTROLLER_PARAMETERS 0x64
+#define BMIC_IDENTIFY_PHYSICAL_DEVICE 0x15
+
+/* Command List Structure */
+union SCSI3Addr {
+ struct {
+ u8 Dev;
+ u8 Bus:6;
+ u8 Mode:2; /* b00 */
+ } PeripDev;
+ struct {
+ u8 DevLSB;
+ u8 DevMSB:6;
+ u8 Mode:2; /* b01 */
+ } LogDev;
+ struct {
+ u8 Dev:5;
+ u8 Bus:3;
+ u8 Targ:6;
+ u8 Mode:2; /* b10 */
+ } LogUnit;
+};
+
+struct PhysDevAddr {
+ u32 TargetId:24;
+ u32 Bus:6;
+ u32 Mode:2;
+ /* 2 level target device addr */
+ union SCSI3Addr Target[2];
+};
+
+struct LogDevAddr {
+ u32 VolId:30;
+ u32 Mode:2;
+ u8 reserved[4];
+};
+
+union LUNAddr {
+ u8 LunAddrBytes[8];
+ union SCSI3Addr SCSI3Lun[4];
+ struct PhysDevAddr PhysDev;
+ struct LogDevAddr LogDev;
+};
+
+struct CommandListHeader {
+ u8 ReplyQueue;
+ u8 SGList;
+ __le16 SGTotal;
+ __le64 tag;
+ union LUNAddr LUN;
+};
+
+struct RequestBlock {
+ u8 CDBLen;
+ /*
+ * type_attr_dir:
+ * type: low 3 bits
+ * attr: middle 3 bits
+ * dir: high 2 bits
+ */
+ u8 type_attr_dir;
+#define TYPE_ATTR_DIR(t, a, d) ((((d) & 0x03) << 6) |\
+ (((a) & 0x07) << 3) |\
+ ((t) & 0x07))
+#define GET_TYPE(tad) ((tad) & 0x07)
+#define GET_ATTR(tad) (((tad) >> 3) & 0x07)
+#define GET_DIR(tad) (((tad) >> 6) & 0x03)
+ u16 Timeout;
+ u8 CDB[16];
+};
+
+struct ErrDescriptor {
+ __le64 Addr;
+ __le32 Len;
+};
+
+struct SGDescriptor {
+ __le64 Addr;
+ __le32 Len;
+ __le32 Ext;
+};
+
+union MoreErrInfo {
+ struct {
+ u8 Reserved[3];
+ u8 Type;
+ u32 ErrorInfo;
+ } Common_Info;
+ struct {
+ u8 Reserved[2];
+ u8 offense_size; /* size of offending entry */
+ u8 offense_num; /* byte # of offense 0-base */
+ u32 offense_value;
+ } Invalid_Cmd;
+};
+struct ErrorInfo {
+ u8 ScsiStatus;
+ u8 SenseLen;
+ u16 CommandStatus;
+ u32 ResidualCnt;
+ union MoreErrInfo MoreErrInfo;
+ u8 SenseInfo[SENSEINFOBYTES];
+};
+/* Command types */
+#define CMD_IOCTL_PEND 0x01
+#define CMD_SCSI 0x03
+#define CMD_IOACCEL1 0x04
+#define CMD_IOACCEL2 0x05
+
+#define DIRECT_LOOKUP_SHIFT 4
+#define DIRECT_LOOKUP_MASK (~((1 << DIRECT_LOOKUP_SHIFT) - 1))
+
+#define HPSA_ERROR_BIT 0x02
+struct ctlr_info; /* defined in hpsa.h */
+/* The size of this structure needs to be divisible by 128
+ * on all architectures. The low 4 bits of the addresses
+ * are used as follows:
+ *
+ * bit 0: to device, used to indicate "performant mode" command
+ * from device, indidcates error status.
+ * bit 1-3: to device, indicates block fetch table entry for
+ * reducing DMA in fetching commands from host memory.
+ */
+
+#define COMMANDLIST_ALIGNMENT 128
+struct CommandList {
+ struct CommandListHeader Header;
+ struct RequestBlock Request;
+ struct ErrDescriptor ErrDesc;
+ struct SGDescriptor SG[SG_ENTRIES_IN_CMD];
+ /* information associated with the command */
+ u32 busaddr; /* physical addr of this record */
+ struct ErrorInfo *err_info; /* pointer to the allocated mem */
+ struct ctlr_info *h;
+ int cmd_type;
+ long cmdindex;
+ struct completion *waiting;
+ struct scsi_cmnd *scsi_cmd;
+ struct work_struct work;
+
+ /*
+ * For commands using either of the two "ioaccel" paths to
+ * bypass the RAID stack and go directly to the physical disk
+ * phys_disk is a pointer to the hpsa_scsi_dev_t to which the
+ * i/o is destined. We need to store that here because the command
+ * may potentially encounter TASK SET FULL and need to be resubmitted
+ * For "normal" i/o's not using the "ioaccel" paths, phys_disk is
+ * not used.
+ */
+ struct hpsa_scsi_dev_t *phys_disk;
+ atomic_t refcount; /* Must be last to avoid memset in cmd_alloc */
+} __aligned(COMMANDLIST_ALIGNMENT);
+
+/* Max S/G elements in I/O accelerator command */
+#define IOACCEL1_MAXSGENTRIES 24
+#define IOACCEL2_MAXSGENTRIES 28
+
+/*
+ * Structure for I/O accelerator (mode 1) commands.
+ * Note that this structure must be 128-byte aligned in size.
+ */
+#define IOACCEL1_COMMANDLIST_ALIGNMENT 128
+struct io_accel1_cmd {
+ __le16 dev_handle; /* 0x00 - 0x01 */
+ u8 reserved1; /* 0x02 */
+ u8 function; /* 0x03 */
+ u8 reserved2[8]; /* 0x04 - 0x0B */
+ u32 err_info; /* 0x0C - 0x0F */
+ u8 reserved3[2]; /* 0x10 - 0x11 */
+ u8 err_info_len; /* 0x12 */
+ u8 reserved4; /* 0x13 */
+ u8 sgl_offset; /* 0x14 */
+ u8 reserved5[7]; /* 0x15 - 0x1B */
+ __le32 transfer_len; /* 0x1C - 0x1F */
+ u8 reserved6[4]; /* 0x20 - 0x23 */
+ __le16 io_flags; /* 0x24 - 0x25 */
+ u8 reserved7[14]; /* 0x26 - 0x33 */
+ u8 LUN[8]; /* 0x34 - 0x3B */
+ __le32 control; /* 0x3C - 0x3F */
+ u8 CDB[16]; /* 0x40 - 0x4F */
+ u8 reserved8[16]; /* 0x50 - 0x5F */
+ __le16 host_context_flags; /* 0x60 - 0x61 */
+ __le16 timeout_sec; /* 0x62 - 0x63 */
+ u8 ReplyQueue; /* 0x64 */
+ u8 reserved9[3]; /* 0x65 - 0x67 */
+ __le64 tag; /* 0x68 - 0x6F */
+ __le64 host_addr; /* 0x70 - 0x77 */
+ u8 CISS_LUN[8]; /* 0x78 - 0x7F */
+ struct SGDescriptor SG[IOACCEL1_MAXSGENTRIES];
+} __aligned(IOACCEL1_COMMANDLIST_ALIGNMENT);
+
+#define IOACCEL1_FUNCTION_SCSIIO 0x00
+#define IOACCEL1_SGLOFFSET 32
+
+#define IOACCEL1_IOFLAGS_IO_REQ 0x4000
+#define IOACCEL1_IOFLAGS_CDBLEN_MASK 0x001F
+#define IOACCEL1_IOFLAGS_CDBLEN_MAX 16
+
+#define IOACCEL1_CONTROL_NODATAXFER 0x00000000
+#define IOACCEL1_CONTROL_DATA_OUT 0x01000000
+#define IOACCEL1_CONTROL_DATA_IN 0x02000000
+#define IOACCEL1_CONTROL_TASKPRIO_MASK 0x00007800
+#define IOACCEL1_CONTROL_TASKPRIO_SHIFT 11
+#define IOACCEL1_CONTROL_SIMPLEQUEUE 0x00000000
+#define IOACCEL1_CONTROL_HEADOFQUEUE 0x00000100
+#define IOACCEL1_CONTROL_ORDEREDQUEUE 0x00000200
+#define IOACCEL1_CONTROL_ACA 0x00000400
+
+#define IOACCEL1_HCFLAGS_CISS_FORMAT 0x0013
+
+#define IOACCEL1_BUSADDR_CMDTYPE 0x00000060
+
+struct ioaccel2_sg_element {
+ __le64 address;
+ __le32 length;
+ u8 reserved[3];
+ u8 chain_indicator;
+#define IOACCEL2_CHAIN 0x80
+};
+
+/*
+ * SCSI Response Format structure for IO Accelerator Mode 2
+ */
+struct io_accel2_scsi_response {
+ u8 IU_type;
+#define IOACCEL2_IU_TYPE_SRF 0x60
+ u8 reserved1[3];
+ u8 req_id[4]; /* request identifier */
+ u8 reserved2[4];
+ u8 serv_response; /* service response */
+#define IOACCEL2_SERV_RESPONSE_COMPLETE 0x000
+#define IOACCEL2_SERV_RESPONSE_FAILURE 0x001
+#define IOACCEL2_SERV_RESPONSE_TMF_COMPLETE 0x002
+#define IOACCEL2_SERV_RESPONSE_TMF_SUCCESS 0x003
+#define IOACCEL2_SERV_RESPONSE_TMF_REJECTED 0x004
+#define IOACCEL2_SERV_RESPONSE_TMF_WRONG_LUN 0x005
+ u8 status; /* status */
+#define IOACCEL2_STATUS_SR_TASK_COMP_GOOD 0x00
+#define IOACCEL2_STATUS_SR_TASK_COMP_CHK_COND 0x02
+#define IOACCEL2_STATUS_SR_TASK_COMP_BUSY 0x08
+#define IOACCEL2_STATUS_SR_TASK_COMP_RES_CON 0x18
+#define IOACCEL2_STATUS_SR_TASK_COMP_SET_FULL 0x28
+#define IOACCEL2_STATUS_SR_TASK_COMP_ABORTED 0x40
+#define IOACCEL2_STATUS_SR_IOACCEL_DISABLED 0x0E
+ u8 data_present; /* low 2 bits */
+#define IOACCEL2_NO_DATAPRESENT 0x000
+#define IOACCEL2_RESPONSE_DATAPRESENT 0x001
+#define IOACCEL2_SENSE_DATA_PRESENT 0x002
+#define IOACCEL2_RESERVED 0x003
+ u8 sense_data_len; /* sense/response data length */
+ u8 resid_cnt[4]; /* residual count */
+ u8 sense_data_buff[32]; /* sense/response data buffer */
+};
+
+/*
+ * Structure for I/O accelerator (mode 2 or m2) commands.
+ * Note that this structure must be 128-byte aligned in size.
+ */
+#define IOACCEL2_COMMANDLIST_ALIGNMENT 128
+struct io_accel2_cmd {
+ u8 IU_type; /* IU Type */
+ u8 direction; /* direction, memtype, and encryption */
+#define IOACCEL2_DIRECTION_MASK 0x03 /* bits 0,1: direction */
+#define IOACCEL2_DIRECTION_MEMTYPE_MASK 0x04 /* bit 2: memtype source/dest */
+ /* 0b=PCIe, 1b=DDR */
+#define IOACCEL2_DIRECTION_ENCRYPT_MASK 0x08 /* bit 3: encryption flag */
+ /* 0=off, 1=on */
+ u8 reply_queue; /* Reply Queue ID */
+ u8 reserved1; /* Reserved */
+ __le32 scsi_nexus; /* Device Handle */
+ __le32 Tag; /* cciss tag, lower 4 bytes only */
+ __le32 tweak_lower; /* Encryption tweak, lower 4 bytes */
+ u8 cdb[16]; /* SCSI Command Descriptor Block */
+ u8 cciss_lun[8]; /* 8 byte SCSI address */
+ __le32 data_len; /* Total bytes to transfer */
+ u8 cmd_priority_task_attr; /* priority and task attrs */
+#define IOACCEL2_PRIORITY_MASK 0x78
+#define IOACCEL2_ATTR_MASK 0x07
+ u8 sg_count; /* Number of sg elements */
+ __le16 dekindex; /* Data encryption key index */
+ __le64 err_ptr; /* Error Pointer */
+ __le32 err_len; /* Error Length*/
+ __le32 tweak_upper; /* Encryption tweak, upper 4 bytes */
+ struct ioaccel2_sg_element sg[IOACCEL2_MAXSGENTRIES];
+ struct io_accel2_scsi_response error_data;
+} __aligned(IOACCEL2_COMMANDLIST_ALIGNMENT);
+
+/*
+ * defines for Mode 2 command struct
+ * FIXME: this can't be all I need mfm
+ */
+#define IOACCEL2_IU_TYPE 0x40
+#define IOACCEL2_IU_TMF_TYPE 0x41
+#define IOACCEL2_DIR_NO_DATA 0x00
+#define IOACCEL2_DIR_DATA_IN 0x01
+#define IOACCEL2_DIR_DATA_OUT 0x02
+/*
+ * SCSI Task Management Request format for Accelerator Mode 2
+ */
+struct hpsa_tmf_struct {
+ u8 iu_type; /* Information Unit Type */
+ u8 reply_queue; /* Reply Queue ID */
+ u8 tmf; /* Task Management Function */
+ u8 reserved1; /* byte 3 Reserved */
+ u32 it_nexus; /* SCSI I-T Nexus */
+ u8 lun_id[8]; /* LUN ID for TMF request */
+ __le64 tag; /* cciss tag associated w/ request */
+ __le64 abort_tag; /* cciss tag of SCSI cmd or TMF to abort */
+ __le64 error_ptr; /* Error Pointer */
+ __le32 error_len; /* Error Length */
+};
+
+/* Configuration Table Structure */
+struct HostWrite {
+ __le32 TransportRequest;
+ __le32 command_pool_addr_hi;
+ __le32 CoalIntDelay;
+ __le32 CoalIntCount;
+};
+
+#define SIMPLE_MODE 0x02
+#define PERFORMANT_MODE 0x04
+#define MEMQ_MODE 0x08
+#define IOACCEL_MODE_1 0x80
+
+#define DRIVER_SUPPORT_UA_ENABLE 0x00000001
+
+struct CfgTable {
+ u8 Signature[4];
+ __le32 SpecValence;
+ __le32 TransportSupport;
+ __le32 TransportActive;
+ struct HostWrite HostWrite;
+ __le32 CmdsOutMax;
+ __le32 BusTypes;
+ __le32 TransMethodOffset;
+ u8 ServerName[16];
+ __le32 HeartBeat;
+ __le32 driver_support;
+#define ENABLE_SCSI_PREFETCH 0x100
+#define ENABLE_UNIT_ATTN 0x01
+ __le32 MaxScatterGatherElements;
+ __le32 MaxLogicalUnits;
+ __le32 MaxPhysicalDevices;
+ __le32 MaxPhysicalDrivesPerLogicalUnit;
+ __le32 MaxPerformantModeCommands;
+ __le32 MaxBlockFetch;
+ __le32 PowerConservationSupport;
+ __le32 PowerConservationEnable;
+ __le32 TMFSupportFlags;
+ u8 TMFTagMask[8];
+ u8 reserved[0x78 - 0x70];
+ __le32 misc_fw_support; /* offset 0x78 */
+#define MISC_FW_DOORBELL_RESET 0x02
+#define MISC_FW_DOORBELL_RESET2 0x010
+#define MISC_FW_RAID_OFFLOAD_BASIC 0x020
+#define MISC_FW_EVENT_NOTIFY 0x080
+ u8 driver_version[32];
+ __le32 max_cached_write_size;
+ u8 driver_scratchpad[16];
+ __le32 max_error_info_length;
+ __le32 io_accel_max_embedded_sg_count;
+ __le32 io_accel_request_size_offset;
+ __le32 event_notify;
+#define HPSA_EVENT_NOTIFY_ACCEL_IO_PATH_STATE_CHANGE (1 << 30)
+#define HPSA_EVENT_NOTIFY_ACCEL_IO_PATH_CONFIG_CHANGE (1 << 31)
+ __le32 clear_event_notify;
+};
+
+#define NUM_BLOCKFETCH_ENTRIES 8
+struct TransTable_struct {
+ __le32 BlockFetch[NUM_BLOCKFETCH_ENTRIES];
+ __le32 RepQSize;
+ __le32 RepQCount;
+ __le32 RepQCtrAddrLow32;
+ __le32 RepQCtrAddrHigh32;
+#define MAX_REPLY_QUEUES 64
+ struct vals32 RepQAddr[MAX_REPLY_QUEUES];
+};
+
+struct hpsa_pci_info {
+ unsigned char bus;
+ unsigned char dev_fn;
+ unsigned short domain;
+ u32 board_id;
+};
+
+struct bmic_identify_physical_device {
+ u8 scsi_bus; /* SCSI Bus number on controller */
+ u8 scsi_id; /* SCSI ID on this bus */
+ __le16 block_size; /* sector size in bytes */
+ __le32 total_blocks; /* number for sectors on drive */
+ __le32 reserved_blocks; /* controller reserved (RIS) */
+ u8 model[40]; /* Physical Drive Model */
+ u8 serial_number[40]; /* Drive Serial Number */
+ u8 firmware_revision[8]; /* drive firmware revision */
+ u8 scsi_inquiry_bits; /* inquiry byte 7 bits */
+ u8 compaq_drive_stamp; /* 0 means drive not stamped */
+ u8 last_failure_reason;
+#define BMIC_LAST_FAILURE_TOO_SMALL_IN_LOAD_CONFIG 0x01
+#define BMIC_LAST_FAILURE_ERROR_ERASING_RIS 0x02
+#define BMIC_LAST_FAILURE_ERROR_SAVING_RIS 0x03
+#define BMIC_LAST_FAILURE_FAIL_DRIVE_COMMAND 0x04
+#define BMIC_LAST_FAILURE_MARK_BAD_FAILED 0x05
+#define BMIC_LAST_FAILURE_MARK_BAD_FAILED_IN_FINISH_REMAP 0x06
+#define BMIC_LAST_FAILURE_TIMEOUT 0x07
+#define BMIC_LAST_FAILURE_AUTOSENSE_FAILED 0x08
+#define BMIC_LAST_FAILURE_MEDIUM_ERROR_1 0x09
+#define BMIC_LAST_FAILURE_MEDIUM_ERROR_2 0x0a
+#define BMIC_LAST_FAILURE_NOT_READY_BAD_SENSE 0x0b
+#define BMIC_LAST_FAILURE_NOT_READY 0x0c
+#define BMIC_LAST_FAILURE_HARDWARE_ERROR 0x0d
+#define BMIC_LAST_FAILURE_ABORTED_COMMAND 0x0e
+#define BMIC_LAST_FAILURE_WRITE_PROTECTED 0x0f
+#define BMIC_LAST_FAILURE_SPIN_UP_FAILURE_IN_RECOVER 0x10
+#define BMIC_LAST_FAILURE_REBUILD_WRITE_ERROR 0x11
+#define BMIC_LAST_FAILURE_TOO_SMALL_IN_HOT_PLUG 0x12
+#define BMIC_LAST_FAILURE_BUS_RESET_RECOVERY_ABORTED 0x13
+#define BMIC_LAST_FAILURE_REMOVED_IN_HOT_PLUG 0x14
+#define BMIC_LAST_FAILURE_INIT_REQUEST_SENSE_FAILED 0x15
+#define BMIC_LAST_FAILURE_INIT_START_UNIT_FAILED 0x16
+#define BMIC_LAST_FAILURE_INQUIRY_FAILED 0x17
+#define BMIC_LAST_FAILURE_NON_DISK_DEVICE 0x18
+#define BMIC_LAST_FAILURE_READ_CAPACITY_FAILED 0x19
+#define BMIC_LAST_FAILURE_INVALID_BLOCK_SIZE 0x1a
+#define BMIC_LAST_FAILURE_HOT_PLUG_REQUEST_SENSE_FAILED 0x1b
+#define BMIC_LAST_FAILURE_HOT_PLUG_START_UNIT_FAILED 0x1c
+#define BMIC_LAST_FAILURE_WRITE_ERROR_AFTER_REMAP 0x1d
+#define BMIC_LAST_FAILURE_INIT_RESET_RECOVERY_ABORTED 0x1e
+#define BMIC_LAST_FAILURE_DEFERRED_WRITE_ERROR 0x1f
+#define BMIC_LAST_FAILURE_MISSING_IN_SAVE_RIS 0x20
+#define BMIC_LAST_FAILURE_WRONG_REPLACE 0x21
+#define BMIC_LAST_FAILURE_GDP_VPD_INQUIRY_FAILED 0x22
+#define BMIC_LAST_FAILURE_GDP_MODE_SENSE_FAILED 0x23
+#define BMIC_LAST_FAILURE_DRIVE_NOT_IN_48BIT_MODE 0x24
+#define BMIC_LAST_FAILURE_DRIVE_TYPE_MIX_IN_HOT_PLUG 0x25
+#define BMIC_LAST_FAILURE_DRIVE_TYPE_MIX_IN_LOAD_CFG 0x26
+#define BMIC_LAST_FAILURE_PROTOCOL_ADAPTER_FAILED 0x27
+#define BMIC_LAST_FAILURE_FAULTY_ID_BAY_EMPTY 0x28
+#define BMIC_LAST_FAILURE_FAULTY_ID_BAY_OCCUPIED 0x29
+#define BMIC_LAST_FAILURE_FAULTY_ID_INVALID_BAY 0x2a
+#define BMIC_LAST_FAILURE_WRITE_RETRIES_FAILED 0x2b
+
+#define BMIC_LAST_FAILURE_SMART_ERROR_REPORTED 0x37
+#define BMIC_LAST_FAILURE_PHY_RESET_FAILED 0x38
+#define BMIC_LAST_FAILURE_ONLY_ONE_CTLR_CAN_SEE_DRIVE 0x40
+#define BMIC_LAST_FAILURE_KC_VOLUME_FAILED 0x41
+#define BMIC_LAST_FAILURE_UNEXPECTED_REPLACEMENT 0x42
+#define BMIC_LAST_FAILURE_OFFLINE_ERASE 0x80
+#define BMIC_LAST_FAILURE_OFFLINE_TOO_SMALL 0x81
+#define BMIC_LAST_FAILURE_OFFLINE_DRIVE_TYPE_MIX 0x82
+#define BMIC_LAST_FAILURE_OFFLINE_ERASE_COMPLETE 0x83
+
+ u8 flags;
+ u8 more_flags;
+ u8 scsi_lun; /* SCSI LUN for phys drive */
+ u8 yet_more_flags;
+ u8 even_more_flags;
+ __le32 spi_speed_rules;/* SPI Speed data:Ultra disable diagnose */
+ u8 phys_connector[2]; /* connector number on controller */
+ u8 phys_box_on_bus; /* phys enclosure this drive resides */
+ u8 phys_bay_in_box; /* phys drv bay this drive resides */
+ __le32 rpm; /* Drive rotational speed in rpm */
+ u8 device_type; /* type of drive */
+ u8 sata_version; /* only valid when drive_type is SATA */
+ __le64 big_total_block_count;
+ __le64 ris_starting_lba;
+ __le32 ris_size;
+ u8 wwid[20];
+ u8 controller_phy_map[32];
+ __le16 phy_count;
+ u8 phy_connected_dev_type[256];
+ u8 phy_to_drive_bay_num[256];
+ __le16 phy_to_attached_dev_index[256];
+ u8 box_index;
+ u8 reserved;
+ __le16 extra_physical_drive_flags;
+#define BMIC_PHYS_DRIVE_SUPPORTS_GAS_GAUGE(idphydrv) \
+ (idphydrv->extra_physical_drive_flags & (1 << 10))
+ u8 negotiated_link_rate[256];
+ u8 phy_to_phy_map[256];
+ u8 redundant_path_present_map;
+ u8 redundant_path_failure_map;
+ u8 active_path_number;
+ __le16 alternate_paths_phys_connector[8];
+ u8 alternate_paths_phys_box_on_port[8];
+ u8 multi_lun_device_lun_count;
+ u8 minimum_good_fw_revision[8];
+ u8 unique_inquiry_bytes[20];
+ u8 current_temperature_degreesC;
+ u8 temperature_threshold_degreesC;
+ u8 max_temperature_degreesC;
+ u8 logical_blocks_per_phys_block_exp; /* phyblocksize = 512*2^exp */
+ __le16 current_queue_depth_limit;
+ u8 switch_name[10];
+ __le16 switch_port;
+ u8 alternate_paths_switch_name[40];
+ u8 alternate_paths_switch_port[8];
+ __le16 power_on_hours; /* valid only if gas gauge supported */
+ __le16 percent_endurance_used; /* valid only if gas gauge supported. */
+#define BMIC_PHYS_DRIVE_SSD_WEAROUT(idphydrv) \
+ ((idphydrv->percent_endurance_used & 0x80) || \
+ (idphydrv->percent_endurance_used > 10000))
+ u8 drive_authentication;
+#define BMIC_PHYS_DRIVE_AUTHENTICATED(idphydrv) \
+ (idphydrv->drive_authentication == 0x80)
+ u8 smart_carrier_authentication;
+#define BMIC_SMART_CARRIER_AUTHENTICATION_SUPPORTED(idphydrv) \
+ (idphydrv->smart_carrier_authentication != 0x0)
+#define BMIC_SMART_CARRIER_AUTHENTICATED(idphydrv) \
+ (idphydrv->smart_carrier_authentication == 0x01)
+ u8 smart_carrier_app_fw_version;
+ u8 smart_carrier_bootloader_fw_version;
+ u8 encryption_key_name[64];
+ __le32 misc_drive_flags;
+ __le16 dek_index;
+ u8 padding[112];
+};
+
+#pragma pack()
+#endif /* HPSA_CMD_H */
diff --git a/drivers/scsi/hptiop.c b/drivers/scsi/hptiop.c
new file mode 100644
index 000000000..e99521847
--- /dev/null
+++ b/drivers/scsi/hptiop.c
@@ -0,0 +1,1685 @@
+/*
+ * HighPoint RR3xxx/4xxx controller driver for Linux
+ * Copyright (C) 2006-2012 HighPoint Technologies, Inc. All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * Please report bugs/comments/suggestions to linux@highpoint-tech.com
+ *
+ * For more information, visit http://www.highpoint-tech.com
+ */
+#include <linux/module.h>
+#include <linux/types.h>
+#include <linux/string.h>
+#include <linux/kernel.h>
+#include <linux/pci.h>
+#include <linux/interrupt.h>
+#include <linux/errno.h>
+#include <linux/delay.h>
+#include <linux/timer.h>
+#include <linux/spinlock.h>
+#include <linux/gfp.h>
+#include <asm/uaccess.h>
+#include <asm/io.h>
+#include <asm/div64.h>
+#include <scsi/scsi_cmnd.h>
+#include <scsi/scsi_device.h>
+#include <scsi/scsi.h>
+#include <scsi/scsi_tcq.h>
+#include <scsi/scsi_host.h>
+
+#include "hptiop.h"
+
+MODULE_AUTHOR("HighPoint Technologies, Inc.");
+MODULE_DESCRIPTION("HighPoint RocketRAID 3xxx/4xxx Controller Driver");
+
+static char driver_name[] = "hptiop";
+static const char driver_name_long[] = "RocketRAID 3xxx/4xxx Controller driver";
+static const char driver_ver[] = "v1.8";
+
+static int iop_send_sync_msg(struct hptiop_hba *hba, u32 msg, u32 millisec);
+static void hptiop_finish_scsi_req(struct hptiop_hba *hba, u32 tag,
+ struct hpt_iop_request_scsi_command *req);
+static void hptiop_host_request_callback_itl(struct hptiop_hba *hba, u32 tag);
+static void hptiop_iop_request_callback_itl(struct hptiop_hba *hba, u32 tag);
+static void hptiop_message_callback(struct hptiop_hba *hba, u32 msg);
+
+static int iop_wait_ready_itl(struct hptiop_hba *hba, u32 millisec)
+{
+ u32 req = 0;
+ int i;
+
+ for (i = 0; i < millisec; i++) {
+ req = readl(&hba->u.itl.iop->inbound_queue);
+ if (req != IOPMU_QUEUE_EMPTY)
+ break;
+ msleep(1);
+ }
+
+ if (req != IOPMU_QUEUE_EMPTY) {
+ writel(req, &hba->u.itl.iop->outbound_queue);
+ readl(&hba->u.itl.iop->outbound_intstatus);
+ return 0;
+ }
+
+ return -1;
+}
+
+static int iop_wait_ready_mv(struct hptiop_hba *hba, u32 millisec)
+{
+ return iop_send_sync_msg(hba, IOPMU_INBOUND_MSG0_NOP, millisec);
+}
+
+static int iop_wait_ready_mvfrey(struct hptiop_hba *hba, u32 millisec)
+{
+ return iop_send_sync_msg(hba, IOPMU_INBOUND_MSG0_NOP, millisec);
+}
+
+static void hptiop_request_callback_itl(struct hptiop_hba *hba, u32 tag)
+{
+ if (tag & IOPMU_QUEUE_ADDR_HOST_BIT)
+ hptiop_host_request_callback_itl(hba,
+ tag & ~IOPMU_QUEUE_ADDR_HOST_BIT);
+ else
+ hptiop_iop_request_callback_itl(hba, tag);
+}
+
+static void hptiop_drain_outbound_queue_itl(struct hptiop_hba *hba)
+{
+ u32 req;
+
+ while ((req = readl(&hba->u.itl.iop->outbound_queue)) !=
+ IOPMU_QUEUE_EMPTY) {
+
+ if (req & IOPMU_QUEUE_MASK_HOST_BITS)
+ hptiop_request_callback_itl(hba, req);
+ else {
+ struct hpt_iop_request_header __iomem * p;
+
+ p = (struct hpt_iop_request_header __iomem *)
+ ((char __iomem *)hba->u.itl.iop + req);
+
+ if (readl(&p->flags) & IOP_REQUEST_FLAG_SYNC_REQUEST) {
+ if (readl(&p->context))
+ hptiop_request_callback_itl(hba, req);
+ else
+ writel(1, &p->context);
+ }
+ else
+ hptiop_request_callback_itl(hba, req);
+ }
+ }
+}
+
+static int iop_intr_itl(struct hptiop_hba *hba)
+{
+ struct hpt_iopmu_itl __iomem *iop = hba->u.itl.iop;
+ void __iomem *plx = hba->u.itl.plx;
+ u32 status;
+ int ret = 0;
+
+ if (plx && readl(plx + 0x11C5C) & 0xf)
+ writel(1, plx + 0x11C60);
+
+ status = readl(&iop->outbound_intstatus);
+
+ if (status & IOPMU_OUTBOUND_INT_MSG0) {
+ u32 msg = readl(&iop->outbound_msgaddr0);
+
+ dprintk("received outbound msg %x\n", msg);
+ writel(IOPMU_OUTBOUND_INT_MSG0, &iop->outbound_intstatus);
+ hptiop_message_callback(hba, msg);
+ ret = 1;
+ }
+
+ if (status & IOPMU_OUTBOUND_INT_POSTQUEUE) {
+ hptiop_drain_outbound_queue_itl(hba);
+ ret = 1;
+ }
+
+ return ret;
+}
+
+static u64 mv_outbound_read(struct hpt_iopmu_mv __iomem *mu)
+{
+ u32 outbound_tail = readl(&mu->outbound_tail);
+ u32 outbound_head = readl(&mu->outbound_head);
+
+ if (outbound_tail != outbound_head) {
+ u64 p;
+
+ memcpy_fromio(&p, &mu->outbound_q[mu->outbound_tail], 8);
+ outbound_tail++;
+
+ if (outbound_tail == MVIOP_QUEUE_LEN)
+ outbound_tail = 0;
+ writel(outbound_tail, &mu->outbound_tail);
+ return p;
+ } else
+ return 0;
+}
+
+static void mv_inbound_write(u64 p, struct hptiop_hba *hba)
+{
+ u32 inbound_head = readl(&hba->u.mv.mu->inbound_head);
+ u32 head = inbound_head + 1;
+
+ if (head == MVIOP_QUEUE_LEN)
+ head = 0;
+
+ memcpy_toio(&hba->u.mv.mu->inbound_q[inbound_head], &p, 8);
+ writel(head, &hba->u.mv.mu->inbound_head);
+ writel(MVIOP_MU_INBOUND_INT_POSTQUEUE,
+ &hba->u.mv.regs->inbound_doorbell);
+}
+
+static void hptiop_request_callback_mv(struct hptiop_hba *hba, u64 tag)
+{
+ u32 req_type = (tag >> 5) & 0x7;
+ struct hpt_iop_request_scsi_command *req;
+
+ dprintk("hptiop_request_callback_mv: tag=%llx\n", tag);
+
+ BUG_ON((tag & MVIOP_MU_QUEUE_REQUEST_RETURN_CONTEXT) == 0);
+
+ switch (req_type) {
+ case IOP_REQUEST_TYPE_GET_CONFIG:
+ case IOP_REQUEST_TYPE_SET_CONFIG:
+ hba->msg_done = 1;
+ break;
+
+ case IOP_REQUEST_TYPE_SCSI_COMMAND:
+ req = hba->reqs[tag >> 8].req_virt;
+ if (likely(tag & MVIOP_MU_QUEUE_REQUEST_RESULT_BIT))
+ req->header.result = cpu_to_le32(IOP_RESULT_SUCCESS);
+
+ hptiop_finish_scsi_req(hba, tag>>8, req);
+ break;
+
+ default:
+ break;
+ }
+}
+
+static int iop_intr_mv(struct hptiop_hba *hba)
+{
+ u32 status;
+ int ret = 0;
+
+ status = readl(&hba->u.mv.regs->outbound_doorbell);
+ writel(~status, &hba->u.mv.regs->outbound_doorbell);
+
+ if (status & MVIOP_MU_OUTBOUND_INT_MSG) {
+ u32 msg;
+ msg = readl(&hba->u.mv.mu->outbound_msg);
+ dprintk("received outbound msg %x\n", msg);
+ hptiop_message_callback(hba, msg);
+ ret = 1;
+ }
+
+ if (status & MVIOP_MU_OUTBOUND_INT_POSTQUEUE) {
+ u64 tag;
+
+ while ((tag = mv_outbound_read(hba->u.mv.mu)))
+ hptiop_request_callback_mv(hba, tag);
+ ret = 1;
+ }
+
+ return ret;
+}
+
+static void hptiop_request_callback_mvfrey(struct hptiop_hba *hba, u32 _tag)
+{
+ u32 req_type = _tag & 0xf;
+ struct hpt_iop_request_scsi_command *req;
+
+ switch (req_type) {
+ case IOP_REQUEST_TYPE_GET_CONFIG:
+ case IOP_REQUEST_TYPE_SET_CONFIG:
+ hba->msg_done = 1;
+ break;
+
+ case IOP_REQUEST_TYPE_SCSI_COMMAND:
+ req = hba->reqs[(_tag >> 4) & 0xff].req_virt;
+ if (likely(_tag & IOPMU_QUEUE_REQUEST_RESULT_BIT))
+ req->header.result = IOP_RESULT_SUCCESS;
+ hptiop_finish_scsi_req(hba, (_tag >> 4) & 0xff, req);
+ break;
+
+ default:
+ break;
+ }
+}
+
+static int iop_intr_mvfrey(struct hptiop_hba *hba)
+{
+ u32 _tag, status, cptr, cur_rptr;
+ int ret = 0;
+
+ if (hba->initialized)
+ writel(0, &(hba->u.mvfrey.mu->pcie_f0_int_enable));
+
+ status = readl(&(hba->u.mvfrey.mu->f0_doorbell));
+ if (status) {
+ writel(status, &(hba->u.mvfrey.mu->f0_doorbell));
+ if (status & CPU_TO_F0_DRBL_MSG_BIT) {
+ u32 msg = readl(&(hba->u.mvfrey.mu->cpu_to_f0_msg_a));
+ dprintk("received outbound msg %x\n", msg);
+ hptiop_message_callback(hba, msg);
+ }
+ ret = 1;
+ }
+
+ status = readl(&(hba->u.mvfrey.mu->isr_cause));
+ if (status) {
+ writel(status, &(hba->u.mvfrey.mu->isr_cause));
+ do {
+ cptr = *hba->u.mvfrey.outlist_cptr & 0xff;
+ cur_rptr = hba->u.mvfrey.outlist_rptr;
+ while (cur_rptr != cptr) {
+ cur_rptr++;
+ if (cur_rptr == hba->u.mvfrey.list_count)
+ cur_rptr = 0;
+
+ _tag = hba->u.mvfrey.outlist[cur_rptr].val;
+ BUG_ON(!(_tag & IOPMU_QUEUE_MASK_HOST_BITS));
+ hptiop_request_callback_mvfrey(hba, _tag);
+ ret = 1;
+ }
+ hba->u.mvfrey.outlist_rptr = cur_rptr;
+ } while (cptr != (*hba->u.mvfrey.outlist_cptr & 0xff));
+ }
+
+ if (hba->initialized)
+ writel(0x1010, &(hba->u.mvfrey.mu->pcie_f0_int_enable));
+
+ return ret;
+}
+
+static int iop_send_sync_request_itl(struct hptiop_hba *hba,
+ void __iomem *_req, u32 millisec)
+{
+ struct hpt_iop_request_header __iomem *req = _req;
+ u32 i;
+
+ writel(readl(&req->flags) | IOP_REQUEST_FLAG_SYNC_REQUEST, &req->flags);
+ writel(0, &req->context);
+ writel((unsigned long)req - (unsigned long)hba->u.itl.iop,
+ &hba->u.itl.iop->inbound_queue);
+ readl(&hba->u.itl.iop->outbound_intstatus);
+
+ for (i = 0; i < millisec; i++) {
+ iop_intr_itl(hba);
+ if (readl(&req->context))
+ return 0;
+ msleep(1);
+ }
+
+ return -1;
+}
+
+static int iop_send_sync_request_mv(struct hptiop_hba *hba,
+ u32 size_bits, u32 millisec)
+{
+ struct hpt_iop_request_header *reqhdr = hba->u.mv.internal_req;
+ u32 i;
+
+ hba->msg_done = 0;
+ reqhdr->flags |= cpu_to_le32(IOP_REQUEST_FLAG_SYNC_REQUEST);
+ mv_inbound_write(hba->u.mv.internal_req_phy |
+ MVIOP_MU_QUEUE_ADDR_HOST_BIT | size_bits, hba);
+
+ for (i = 0; i < millisec; i++) {
+ iop_intr_mv(hba);
+ if (hba->msg_done)
+ return 0;
+ msleep(1);
+ }
+ return -1;
+}
+
+static int iop_send_sync_request_mvfrey(struct hptiop_hba *hba,
+ u32 size_bits, u32 millisec)
+{
+ struct hpt_iop_request_header *reqhdr =
+ hba->u.mvfrey.internal_req.req_virt;
+ u32 i;
+
+ hba->msg_done = 0;
+ reqhdr->flags |= cpu_to_le32(IOP_REQUEST_FLAG_SYNC_REQUEST);
+ hba->ops->post_req(hba, &(hba->u.mvfrey.internal_req));
+
+ for (i = 0; i < millisec; i++) {
+ iop_intr_mvfrey(hba);
+ if (hba->msg_done)
+ break;
+ msleep(1);
+ }
+ return hba->msg_done ? 0 : -1;
+}
+
+static void hptiop_post_msg_itl(struct hptiop_hba *hba, u32 msg)
+{
+ writel(msg, &hba->u.itl.iop->inbound_msgaddr0);
+ readl(&hba->u.itl.iop->outbound_intstatus);
+}
+
+static void hptiop_post_msg_mv(struct hptiop_hba *hba, u32 msg)
+{
+ writel(msg, &hba->u.mv.mu->inbound_msg);
+ writel(MVIOP_MU_INBOUND_INT_MSG, &hba->u.mv.regs->inbound_doorbell);
+ readl(&hba->u.mv.regs->inbound_doorbell);
+}
+
+static void hptiop_post_msg_mvfrey(struct hptiop_hba *hba, u32 msg)
+{
+ writel(msg, &(hba->u.mvfrey.mu->f0_to_cpu_msg_a));
+ readl(&(hba->u.mvfrey.mu->f0_to_cpu_msg_a));
+}
+
+static int iop_send_sync_msg(struct hptiop_hba *hba, u32 msg, u32 millisec)
+{
+ u32 i;
+
+ hba->msg_done = 0;
+ hba->ops->disable_intr(hba);
+ hba->ops->post_msg(hba, msg);
+
+ for (i = 0; i < millisec; i++) {
+ spin_lock_irq(hba->host->host_lock);
+ hba->ops->iop_intr(hba);
+ spin_unlock_irq(hba->host->host_lock);
+ if (hba->msg_done)
+ break;
+ msleep(1);
+ }
+
+ hba->ops->enable_intr(hba);
+ return hba->msg_done? 0 : -1;
+}
+
+static int iop_get_config_itl(struct hptiop_hba *hba,
+ struct hpt_iop_request_get_config *config)
+{
+ u32 req32;
+ struct hpt_iop_request_get_config __iomem *req;
+
+ req32 = readl(&hba->u.itl.iop->inbound_queue);
+ if (req32 == IOPMU_QUEUE_EMPTY)
+ return -1;
+
+ req = (struct hpt_iop_request_get_config __iomem *)
+ ((unsigned long)hba->u.itl.iop + req32);
+
+ writel(0, &req->header.flags);
+ writel(IOP_REQUEST_TYPE_GET_CONFIG, &req->header.type);
+ writel(sizeof(struct hpt_iop_request_get_config), &req->header.size);
+ writel(IOP_RESULT_PENDING, &req->header.result);
+
+ if (iop_send_sync_request_itl(hba, req, 20000)) {
+ dprintk("Get config send cmd failed\n");
+ return -1;
+ }
+
+ memcpy_fromio(config, req, sizeof(*config));
+ writel(req32, &hba->u.itl.iop->outbound_queue);
+ return 0;
+}
+
+static int iop_get_config_mv(struct hptiop_hba *hba,
+ struct hpt_iop_request_get_config *config)
+{
+ struct hpt_iop_request_get_config *req = hba->u.mv.internal_req;
+
+ req->header.flags = cpu_to_le32(IOP_REQUEST_FLAG_OUTPUT_CONTEXT);
+ req->header.type = cpu_to_le32(IOP_REQUEST_TYPE_GET_CONFIG);
+ req->header.size =
+ cpu_to_le32(sizeof(struct hpt_iop_request_get_config));
+ req->header.result = cpu_to_le32(IOP_RESULT_PENDING);
+ req->header.context = cpu_to_le32(IOP_REQUEST_TYPE_GET_CONFIG<<5);
+ req->header.context_hi32 = 0;
+
+ if (iop_send_sync_request_mv(hba, 0, 20000)) {
+ dprintk("Get config send cmd failed\n");
+ return -1;
+ }
+
+ memcpy(config, req, sizeof(struct hpt_iop_request_get_config));
+ return 0;
+}
+
+static int iop_get_config_mvfrey(struct hptiop_hba *hba,
+ struct hpt_iop_request_get_config *config)
+{
+ struct hpt_iop_request_get_config *info = hba->u.mvfrey.config;
+
+ if (info->header.size != sizeof(struct hpt_iop_request_get_config) ||
+ info->header.type != IOP_REQUEST_TYPE_GET_CONFIG)
+ return -1;
+
+ config->interface_version = info->interface_version;
+ config->firmware_version = info->firmware_version;
+ config->max_requests = info->max_requests;
+ config->request_size = info->request_size;
+ config->max_sg_count = info->max_sg_count;
+ config->data_transfer_length = info->data_transfer_length;
+ config->alignment_mask = info->alignment_mask;
+ config->max_devices = info->max_devices;
+ config->sdram_size = info->sdram_size;
+
+ return 0;
+}
+
+static int iop_set_config_itl(struct hptiop_hba *hba,
+ struct hpt_iop_request_set_config *config)
+{
+ u32 req32;
+ struct hpt_iop_request_set_config __iomem *req;
+
+ req32 = readl(&hba->u.itl.iop->inbound_queue);
+ if (req32 == IOPMU_QUEUE_EMPTY)
+ return -1;
+
+ req = (struct hpt_iop_request_set_config __iomem *)
+ ((unsigned long)hba->u.itl.iop + req32);
+
+ memcpy_toio((u8 __iomem *)req + sizeof(struct hpt_iop_request_header),
+ (u8 *)config + sizeof(struct hpt_iop_request_header),
+ sizeof(struct hpt_iop_request_set_config) -
+ sizeof(struct hpt_iop_request_header));
+
+ writel(0, &req->header.flags);
+ writel(IOP_REQUEST_TYPE_SET_CONFIG, &req->header.type);
+ writel(sizeof(struct hpt_iop_request_set_config), &req->header.size);
+ writel(IOP_RESULT_PENDING, &req->header.result);
+
+ if (iop_send_sync_request_itl(hba, req, 20000)) {
+ dprintk("Set config send cmd failed\n");
+ return -1;
+ }
+
+ writel(req32, &hba->u.itl.iop->outbound_queue);
+ return 0;
+}
+
+static int iop_set_config_mv(struct hptiop_hba *hba,
+ struct hpt_iop_request_set_config *config)
+{
+ struct hpt_iop_request_set_config *req = hba->u.mv.internal_req;
+
+ memcpy(req, config, sizeof(struct hpt_iop_request_set_config));
+ req->header.flags = cpu_to_le32(IOP_REQUEST_FLAG_OUTPUT_CONTEXT);
+ req->header.type = cpu_to_le32(IOP_REQUEST_TYPE_SET_CONFIG);
+ req->header.size =
+ cpu_to_le32(sizeof(struct hpt_iop_request_set_config));
+ req->header.result = cpu_to_le32(IOP_RESULT_PENDING);
+ req->header.context = cpu_to_le32(IOP_REQUEST_TYPE_SET_CONFIG<<5);
+ req->header.context_hi32 = 0;
+
+ if (iop_send_sync_request_mv(hba, 0, 20000)) {
+ dprintk("Set config send cmd failed\n");
+ return -1;
+ }
+
+ return 0;
+}
+
+static int iop_set_config_mvfrey(struct hptiop_hba *hba,
+ struct hpt_iop_request_set_config *config)
+{
+ struct hpt_iop_request_set_config *req =
+ hba->u.mvfrey.internal_req.req_virt;
+
+ memcpy(req, config, sizeof(struct hpt_iop_request_set_config));
+ req->header.flags = cpu_to_le32(IOP_REQUEST_FLAG_OUTPUT_CONTEXT);
+ req->header.type = cpu_to_le32(IOP_REQUEST_TYPE_SET_CONFIG);
+ req->header.size =
+ cpu_to_le32(sizeof(struct hpt_iop_request_set_config));
+ req->header.result = cpu_to_le32(IOP_RESULT_PENDING);
+ req->header.context = cpu_to_le32(IOP_REQUEST_TYPE_SET_CONFIG<<5);
+ req->header.context_hi32 = 0;
+
+ if (iop_send_sync_request_mvfrey(hba, 0, 20000)) {
+ dprintk("Set config send cmd failed\n");
+ return -1;
+ }
+
+ return 0;
+}
+
+static void hptiop_enable_intr_itl(struct hptiop_hba *hba)
+{
+ writel(~(IOPMU_OUTBOUND_INT_POSTQUEUE | IOPMU_OUTBOUND_INT_MSG0),
+ &hba->u.itl.iop->outbound_intmask);
+}
+
+static void hptiop_enable_intr_mv(struct hptiop_hba *hba)
+{
+ writel(MVIOP_MU_OUTBOUND_INT_POSTQUEUE | MVIOP_MU_OUTBOUND_INT_MSG,
+ &hba->u.mv.regs->outbound_intmask);
+}
+
+static void hptiop_enable_intr_mvfrey(struct hptiop_hba *hba)
+{
+ writel(CPU_TO_F0_DRBL_MSG_BIT, &(hba->u.mvfrey.mu->f0_doorbell_enable));
+ writel(0x1, &(hba->u.mvfrey.mu->isr_enable));
+ writel(0x1010, &(hba->u.mvfrey.mu->pcie_f0_int_enable));
+}
+
+static int hptiop_initialize_iop(struct hptiop_hba *hba)
+{
+ /* enable interrupts */
+ hba->ops->enable_intr(hba);
+
+ hba->initialized = 1;
+
+ /* start background tasks */
+ if (iop_send_sync_msg(hba,
+ IOPMU_INBOUND_MSG0_START_BACKGROUND_TASK, 5000)) {
+ printk(KERN_ERR "scsi%d: fail to start background task\n",
+ hba->host->host_no);
+ return -1;
+ }
+ return 0;
+}
+
+static void __iomem *hptiop_map_pci_bar(struct hptiop_hba *hba, int index)
+{
+ u32 mem_base_phy, length;
+ void __iomem *mem_base_virt;
+
+ struct pci_dev *pcidev = hba->pcidev;
+
+
+ if (!(pci_resource_flags(pcidev, index) & IORESOURCE_MEM)) {
+ printk(KERN_ERR "scsi%d: pci resource invalid\n",
+ hba->host->host_no);
+ return NULL;
+ }
+
+ mem_base_phy = pci_resource_start(pcidev, index);
+ length = pci_resource_len(pcidev, index);
+ mem_base_virt = ioremap(mem_base_phy, length);
+
+ if (!mem_base_virt) {
+ printk(KERN_ERR "scsi%d: Fail to ioremap memory space\n",
+ hba->host->host_no);
+ return NULL;
+ }
+ return mem_base_virt;
+}
+
+static int hptiop_map_pci_bar_itl(struct hptiop_hba *hba)
+{
+ struct pci_dev *pcidev = hba->pcidev;
+ hba->u.itl.iop = hptiop_map_pci_bar(hba, 0);
+ if (hba->u.itl.iop == NULL)
+ return -1;
+ if ((pcidev->device & 0xff00) == 0x4400) {
+ hba->u.itl.plx = hba->u.itl.iop;
+ hba->u.itl.iop = hptiop_map_pci_bar(hba, 2);
+ if (hba->u.itl.iop == NULL) {
+ iounmap(hba->u.itl.plx);
+ return -1;
+ }
+ }
+ return 0;
+}
+
+static void hptiop_unmap_pci_bar_itl(struct hptiop_hba *hba)
+{
+ if (hba->u.itl.plx)
+ iounmap(hba->u.itl.plx);
+ iounmap(hba->u.itl.iop);
+}
+
+static int hptiop_map_pci_bar_mv(struct hptiop_hba *hba)
+{
+ hba->u.mv.regs = hptiop_map_pci_bar(hba, 0);
+ if (hba->u.mv.regs == NULL)
+ return -1;
+
+ hba->u.mv.mu = hptiop_map_pci_bar(hba, 2);
+ if (hba->u.mv.mu == NULL) {
+ iounmap(hba->u.mv.regs);
+ return -1;
+ }
+
+ return 0;
+}
+
+static int hptiop_map_pci_bar_mvfrey(struct hptiop_hba *hba)
+{
+ hba->u.mvfrey.config = hptiop_map_pci_bar(hba, 0);
+ if (hba->u.mvfrey.config == NULL)
+ return -1;
+
+ hba->u.mvfrey.mu = hptiop_map_pci_bar(hba, 2);
+ if (hba->u.mvfrey.mu == NULL) {
+ iounmap(hba->u.mvfrey.config);
+ return -1;
+ }
+
+ return 0;
+}
+
+static void hptiop_unmap_pci_bar_mv(struct hptiop_hba *hba)
+{
+ iounmap(hba->u.mv.regs);
+ iounmap(hba->u.mv.mu);
+}
+
+static void hptiop_unmap_pci_bar_mvfrey(struct hptiop_hba *hba)
+{
+ iounmap(hba->u.mvfrey.config);
+ iounmap(hba->u.mvfrey.mu);
+}
+
+static void hptiop_message_callback(struct hptiop_hba *hba, u32 msg)
+{
+ dprintk("iop message 0x%x\n", msg);
+
+ if (msg == IOPMU_INBOUND_MSG0_NOP ||
+ msg == IOPMU_INBOUND_MSG0_RESET_COMM)
+ hba->msg_done = 1;
+
+ if (!hba->initialized)
+ return;
+
+ if (msg == IOPMU_INBOUND_MSG0_RESET) {
+ atomic_set(&hba->resetting, 0);
+ wake_up(&hba->reset_wq);
+ }
+ else if (msg <= IOPMU_INBOUND_MSG0_MAX)
+ hba->msg_done = 1;
+}
+
+static struct hptiop_request *get_req(struct hptiop_hba *hba)
+{
+ struct hptiop_request *ret;
+
+ dprintk("get_req : req=%p\n", hba->req_list);
+
+ ret = hba->req_list;
+ if (ret)
+ hba->req_list = ret->next;
+
+ return ret;
+}
+
+static void free_req(struct hptiop_hba *hba, struct hptiop_request *req)
+{
+ dprintk("free_req(%d, %p)\n", req->index, req);
+ req->next = hba->req_list;
+ hba->req_list = req;
+}
+
+static void hptiop_finish_scsi_req(struct hptiop_hba *hba, u32 tag,
+ struct hpt_iop_request_scsi_command *req)
+{
+ struct scsi_cmnd *scp;
+
+ dprintk("hptiop_finish_scsi_req: req=%p, type=%d, "
+ "result=%d, context=0x%x tag=%d\n",
+ req, req->header.type, req->header.result,
+ req->header.context, tag);
+
+ BUG_ON(!req->header.result);
+ BUG_ON(req->header.type != cpu_to_le32(IOP_REQUEST_TYPE_SCSI_COMMAND));
+
+ scp = hba->reqs[tag].scp;
+
+ if (HPT_SCP(scp)->mapped)
+ scsi_dma_unmap(scp);
+
+ switch (le32_to_cpu(req->header.result)) {
+ case IOP_RESULT_SUCCESS:
+ scsi_set_resid(scp,
+ scsi_bufflen(scp) - le32_to_cpu(req->dataxfer_length));
+ scp->result = (DID_OK<<16);
+ break;
+ case IOP_RESULT_BAD_TARGET:
+ scp->result = (DID_BAD_TARGET<<16);
+ break;
+ case IOP_RESULT_BUSY:
+ scp->result = (DID_BUS_BUSY<<16);
+ break;
+ case IOP_RESULT_RESET:
+ scp->result = (DID_RESET<<16);
+ break;
+ case IOP_RESULT_FAIL:
+ scp->result = (DID_ERROR<<16);
+ break;
+ case IOP_RESULT_INVALID_REQUEST:
+ scp->result = (DID_ABORT<<16);
+ break;
+ case IOP_RESULT_CHECK_CONDITION:
+ scsi_set_resid(scp,
+ scsi_bufflen(scp) - le32_to_cpu(req->dataxfer_length));
+ scp->result = SAM_STAT_CHECK_CONDITION;
+ memcpy(scp->sense_buffer, &req->sg_list,
+ min_t(size_t, SCSI_SENSE_BUFFERSIZE,
+ le32_to_cpu(req->dataxfer_length)));
+ goto skip_resid;
+ break;
+
+ default:
+ scp->result = DRIVER_INVALID << 24 | DID_ABORT << 16;
+ break;
+ }
+
+ scsi_set_resid(scp,
+ scsi_bufflen(scp) - le32_to_cpu(req->dataxfer_length));
+
+skip_resid:
+ dprintk("scsi_done(%p)\n", scp);
+ scp->scsi_done(scp);
+ free_req(hba, &hba->reqs[tag]);
+}
+
+static void hptiop_host_request_callback_itl(struct hptiop_hba *hba, u32 _tag)
+{
+ struct hpt_iop_request_scsi_command *req;
+ u32 tag;
+
+ if (hba->iopintf_v2) {
+ tag = _tag & ~IOPMU_QUEUE_REQUEST_RESULT_BIT;
+ req = hba->reqs[tag].req_virt;
+ if (likely(_tag & IOPMU_QUEUE_REQUEST_RESULT_BIT))
+ req->header.result = cpu_to_le32(IOP_RESULT_SUCCESS);
+ } else {
+ tag = _tag;
+ req = hba->reqs[tag].req_virt;
+ }
+
+ hptiop_finish_scsi_req(hba, tag, req);
+}
+
+void hptiop_iop_request_callback_itl(struct hptiop_hba *hba, u32 tag)
+{
+ struct hpt_iop_request_header __iomem *req;
+ struct hpt_iop_request_ioctl_command __iomem *p;
+ struct hpt_ioctl_k *arg;
+
+ req = (struct hpt_iop_request_header __iomem *)
+ ((unsigned long)hba->u.itl.iop + tag);
+ dprintk("hptiop_iop_request_callback_itl: req=%p, type=%d, "
+ "result=%d, context=0x%x tag=%d\n",
+ req, readl(&req->type), readl(&req->result),
+ readl(&req->context), tag);
+
+ BUG_ON(!readl(&req->result));
+ BUG_ON(readl(&req->type) != IOP_REQUEST_TYPE_IOCTL_COMMAND);
+
+ p = (struct hpt_iop_request_ioctl_command __iomem *)req;
+ arg = (struct hpt_ioctl_k *)(unsigned long)
+ (readl(&req->context) |
+ ((u64)readl(&req->context_hi32)<<32));
+
+ if (readl(&req->result) == IOP_RESULT_SUCCESS) {
+ arg->result = HPT_IOCTL_RESULT_OK;
+
+ if (arg->outbuf_size)
+ memcpy_fromio(arg->outbuf,
+ &p->buf[(readl(&p->inbuf_size) + 3)& ~3],
+ arg->outbuf_size);
+
+ if (arg->bytes_returned)
+ *arg->bytes_returned = arg->outbuf_size;
+ }
+ else
+ arg->result = HPT_IOCTL_RESULT_FAILED;
+
+ arg->done(arg);
+ writel(tag, &hba->u.itl.iop->outbound_queue);
+}
+
+static irqreturn_t hptiop_intr(int irq, void *dev_id)
+{
+ struct hptiop_hba *hba = dev_id;
+ int handled;
+ unsigned long flags;
+
+ spin_lock_irqsave(hba->host->host_lock, flags);
+ handled = hba->ops->iop_intr(hba);
+ spin_unlock_irqrestore(hba->host->host_lock, flags);
+
+ return handled;
+}
+
+static int hptiop_buildsgl(struct scsi_cmnd *scp, struct hpt_iopsg *psg)
+{
+ struct Scsi_Host *host = scp->device->host;
+ struct hptiop_hba *hba = (struct hptiop_hba *)host->hostdata;
+ struct scatterlist *sg;
+ int idx, nseg;
+
+ nseg = scsi_dma_map(scp);
+ BUG_ON(nseg < 0);
+ if (!nseg)
+ return 0;
+
+ HPT_SCP(scp)->sgcnt = nseg;
+ HPT_SCP(scp)->mapped = 1;
+
+ BUG_ON(HPT_SCP(scp)->sgcnt > hba->max_sg_descriptors);
+
+ scsi_for_each_sg(scp, sg, HPT_SCP(scp)->sgcnt, idx) {
+ psg[idx].pci_address = cpu_to_le64(sg_dma_address(sg)) |
+ hba->ops->host_phy_flag;
+ psg[idx].size = cpu_to_le32(sg_dma_len(sg));
+ psg[idx].eot = (idx == HPT_SCP(scp)->sgcnt - 1) ?
+ cpu_to_le32(1) : 0;
+ }
+ return HPT_SCP(scp)->sgcnt;
+}
+
+static void hptiop_post_req_itl(struct hptiop_hba *hba,
+ struct hptiop_request *_req)
+{
+ struct hpt_iop_request_header *reqhdr = _req->req_virt;
+
+ reqhdr->context = cpu_to_le32(IOPMU_QUEUE_ADDR_HOST_BIT |
+ (u32)_req->index);
+ reqhdr->context_hi32 = 0;
+
+ if (hba->iopintf_v2) {
+ u32 size, size_bits;
+
+ size = le32_to_cpu(reqhdr->size);
+ if (size < 256)
+ size_bits = IOPMU_QUEUE_REQUEST_SIZE_BIT;
+ else if (size < 512)
+ size_bits = IOPMU_QUEUE_ADDR_HOST_BIT;
+ else
+ size_bits = IOPMU_QUEUE_REQUEST_SIZE_BIT |
+ IOPMU_QUEUE_ADDR_HOST_BIT;
+ writel(_req->req_shifted_phy | size_bits,
+ &hba->u.itl.iop->inbound_queue);
+ } else
+ writel(_req->req_shifted_phy | IOPMU_QUEUE_ADDR_HOST_BIT,
+ &hba->u.itl.iop->inbound_queue);
+}
+
+static void hptiop_post_req_mv(struct hptiop_hba *hba,
+ struct hptiop_request *_req)
+{
+ struct hpt_iop_request_header *reqhdr = _req->req_virt;
+ u32 size, size_bit;
+
+ reqhdr->context = cpu_to_le32(_req->index<<8 |
+ IOP_REQUEST_TYPE_SCSI_COMMAND<<5);
+ reqhdr->context_hi32 = 0;
+ size = le32_to_cpu(reqhdr->size);
+
+ if (size <= 256)
+ size_bit = 0;
+ else if (size <= 256*2)
+ size_bit = 1;
+ else if (size <= 256*3)
+ size_bit = 2;
+ else
+ size_bit = 3;
+
+ mv_inbound_write((_req->req_shifted_phy << 5) |
+ MVIOP_MU_QUEUE_ADDR_HOST_BIT | size_bit, hba);
+}
+
+static void hptiop_post_req_mvfrey(struct hptiop_hba *hba,
+ struct hptiop_request *_req)
+{
+ struct hpt_iop_request_header *reqhdr = _req->req_virt;
+ u32 index;
+
+ reqhdr->flags |= cpu_to_le32(IOP_REQUEST_FLAG_OUTPUT_CONTEXT |
+ IOP_REQUEST_FLAG_ADDR_BITS |
+ ((_req->req_shifted_phy >> 11) & 0xffff0000));
+ reqhdr->context = cpu_to_le32(IOPMU_QUEUE_ADDR_HOST_BIT |
+ (_req->index << 4) | reqhdr->type);
+ reqhdr->context_hi32 = cpu_to_le32((_req->req_shifted_phy << 5) &
+ 0xffffffff);
+
+ hba->u.mvfrey.inlist_wptr++;
+ index = hba->u.mvfrey.inlist_wptr & 0x3fff;
+
+ if (index == hba->u.mvfrey.list_count) {
+ index = 0;
+ hba->u.mvfrey.inlist_wptr &= ~0x3fff;
+ hba->u.mvfrey.inlist_wptr ^= CL_POINTER_TOGGLE;
+ }
+
+ hba->u.mvfrey.inlist[index].addr =
+ (dma_addr_t)_req->req_shifted_phy << 5;
+ hba->u.mvfrey.inlist[index].intrfc_len = (reqhdr->size + 3) / 4;
+ writel(hba->u.mvfrey.inlist_wptr,
+ &(hba->u.mvfrey.mu->inbound_write_ptr));
+ readl(&(hba->u.mvfrey.mu->inbound_write_ptr));
+}
+
+static int hptiop_reset_comm_itl(struct hptiop_hba *hba)
+{
+ return 0;
+}
+
+static int hptiop_reset_comm_mv(struct hptiop_hba *hba)
+{
+ return 0;
+}
+
+static int hptiop_reset_comm_mvfrey(struct hptiop_hba *hba)
+{
+ u32 list_count = hba->u.mvfrey.list_count;
+
+ if (iop_send_sync_msg(hba, IOPMU_INBOUND_MSG0_RESET_COMM, 3000))
+ return -1;
+
+ /* wait 100ms for MCU ready */
+ msleep(100);
+
+ writel(cpu_to_le32(hba->u.mvfrey.inlist_phy & 0xffffffff),
+ &(hba->u.mvfrey.mu->inbound_base));
+ writel(cpu_to_le32((hba->u.mvfrey.inlist_phy >> 16) >> 16),
+ &(hba->u.mvfrey.mu->inbound_base_high));
+
+ writel(cpu_to_le32(hba->u.mvfrey.outlist_phy & 0xffffffff),
+ &(hba->u.mvfrey.mu->outbound_base));
+ writel(cpu_to_le32((hba->u.mvfrey.outlist_phy >> 16) >> 16),
+ &(hba->u.mvfrey.mu->outbound_base_high));
+
+ writel(cpu_to_le32(hba->u.mvfrey.outlist_cptr_phy & 0xffffffff),
+ &(hba->u.mvfrey.mu->outbound_shadow_base));
+ writel(cpu_to_le32((hba->u.mvfrey.outlist_cptr_phy >> 16) >> 16),
+ &(hba->u.mvfrey.mu->outbound_shadow_base_high));
+
+ hba->u.mvfrey.inlist_wptr = (list_count - 1) | CL_POINTER_TOGGLE;
+ *hba->u.mvfrey.outlist_cptr = (list_count - 1) | CL_POINTER_TOGGLE;
+ hba->u.mvfrey.outlist_rptr = list_count - 1;
+ return 0;
+}
+
+static int hptiop_queuecommand_lck(struct scsi_cmnd *scp,
+ void (*done)(struct scsi_cmnd *))
+{
+ struct Scsi_Host *host = scp->device->host;
+ struct hptiop_hba *hba = (struct hptiop_hba *)host->hostdata;
+ struct hpt_iop_request_scsi_command *req;
+ int sg_count = 0;
+ struct hptiop_request *_req;
+
+ BUG_ON(!done);
+ scp->scsi_done = done;
+
+ _req = get_req(hba);
+ if (_req == NULL) {
+ dprintk("hptiop_queuecmd : no free req\n");
+ return SCSI_MLQUEUE_HOST_BUSY;
+ }
+
+ _req->scp = scp;
+
+ dprintk("hptiop_queuecmd(scp=%p) %d/%d/%d/%llu cdb=(%08x-%08x-%08x-%08x) "
+ "req_index=%d, req=%p\n",
+ scp,
+ host->host_no, scp->device->channel,
+ scp->device->id, scp->device->lun,
+ cpu_to_be32(((u32 *)scp->cmnd)[0]),
+ cpu_to_be32(((u32 *)scp->cmnd)[1]),
+ cpu_to_be32(((u32 *)scp->cmnd)[2]),
+ cpu_to_be32(((u32 *)scp->cmnd)[3]),
+ _req->index, _req->req_virt);
+
+ scp->result = 0;
+
+ if (scp->device->channel || scp->device->lun ||
+ scp->device->id > hba->max_devices) {
+ scp->result = DID_BAD_TARGET << 16;
+ free_req(hba, _req);
+ goto cmd_done;
+ }
+
+ req = _req->req_virt;
+
+ /* build S/G table */
+ sg_count = hptiop_buildsgl(scp, req->sg_list);
+ if (!sg_count)
+ HPT_SCP(scp)->mapped = 0;
+
+ req->header.flags = cpu_to_le32(IOP_REQUEST_FLAG_OUTPUT_CONTEXT);
+ req->header.type = cpu_to_le32(IOP_REQUEST_TYPE_SCSI_COMMAND);
+ req->header.result = cpu_to_le32(IOP_RESULT_PENDING);
+ req->dataxfer_length = cpu_to_le32(scsi_bufflen(scp));
+ req->channel = scp->device->channel;
+ req->target = scp->device->id;
+ req->lun = scp->device->lun;
+ req->header.size = cpu_to_le32(
+ sizeof(struct hpt_iop_request_scsi_command)
+ - sizeof(struct hpt_iopsg)
+ + sg_count * sizeof(struct hpt_iopsg));
+
+ memcpy(req->cdb, scp->cmnd, sizeof(req->cdb));
+ hba->ops->post_req(hba, _req);
+ return 0;
+
+cmd_done:
+ dprintk("scsi_done(scp=%p)\n", scp);
+ scp->scsi_done(scp);
+ return 0;
+}
+
+static DEF_SCSI_QCMD(hptiop_queuecommand)
+
+static const char *hptiop_info(struct Scsi_Host *host)
+{
+ return driver_name_long;
+}
+
+static int hptiop_reset_hba(struct hptiop_hba *hba)
+{
+ if (atomic_xchg(&hba->resetting, 1) == 0) {
+ atomic_inc(&hba->reset_count);
+ hba->ops->post_msg(hba, IOPMU_INBOUND_MSG0_RESET);
+ }
+
+ wait_event_timeout(hba->reset_wq,
+ atomic_read(&hba->resetting) == 0, 60 * HZ);
+
+ if (atomic_read(&hba->resetting)) {
+ /* IOP is in unknown state, abort reset */
+ printk(KERN_ERR "scsi%d: reset failed\n", hba->host->host_no);
+ return -1;
+ }
+
+ if (iop_send_sync_msg(hba,
+ IOPMU_INBOUND_MSG0_START_BACKGROUND_TASK, 5000)) {
+ dprintk("scsi%d: fail to start background task\n",
+ hba->host->host_no);
+ }
+
+ return 0;
+}
+
+static int hptiop_reset(struct scsi_cmnd *scp)
+{
+ struct Scsi_Host * host = scp->device->host;
+ struct hptiop_hba * hba = (struct hptiop_hba *)host->hostdata;
+
+ printk(KERN_WARNING "hptiop_reset(%d/%d/%d) scp=%p\n",
+ scp->device->host->host_no, scp->device->channel,
+ scp->device->id, scp);
+
+ return hptiop_reset_hba(hba)? FAILED : SUCCESS;
+}
+
+static int hptiop_adjust_disk_queue_depth(struct scsi_device *sdev,
+ int queue_depth)
+{
+ struct hptiop_hba *hba = (struct hptiop_hba *)sdev->host->hostdata;
+
+ if (queue_depth > hba->max_requests)
+ queue_depth = hba->max_requests;
+ return scsi_change_queue_depth(sdev, queue_depth);
+}
+
+static ssize_t hptiop_show_version(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ return snprintf(buf, PAGE_SIZE, "%s\n", driver_ver);
+}
+
+static ssize_t hptiop_show_fw_version(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct Scsi_Host *host = class_to_shost(dev);
+ struct hptiop_hba *hba = (struct hptiop_hba *)host->hostdata;
+
+ return snprintf(buf, PAGE_SIZE, "%d.%d.%d.%d\n",
+ hba->firmware_version >> 24,
+ (hba->firmware_version >> 16) & 0xff,
+ (hba->firmware_version >> 8) & 0xff,
+ hba->firmware_version & 0xff);
+}
+
+static struct device_attribute hptiop_attr_version = {
+ .attr = {
+ .name = "driver-version",
+ .mode = S_IRUGO,
+ },
+ .show = hptiop_show_version,
+};
+
+static struct device_attribute hptiop_attr_fw_version = {
+ .attr = {
+ .name = "firmware-version",
+ .mode = S_IRUGO,
+ },
+ .show = hptiop_show_fw_version,
+};
+
+static struct device_attribute *hptiop_attrs[] = {
+ &hptiop_attr_version,
+ &hptiop_attr_fw_version,
+ NULL
+};
+
+static struct scsi_host_template driver_template = {
+ .module = THIS_MODULE,
+ .name = driver_name,
+ .queuecommand = hptiop_queuecommand,
+ .eh_device_reset_handler = hptiop_reset,
+ .eh_bus_reset_handler = hptiop_reset,
+ .info = hptiop_info,
+ .emulated = 0,
+ .use_clustering = ENABLE_CLUSTERING,
+ .proc_name = driver_name,
+ .shost_attrs = hptiop_attrs,
+ .this_id = -1,
+ .change_queue_depth = hptiop_adjust_disk_queue_depth,
+};
+
+static int hptiop_internal_memalloc_itl(struct hptiop_hba *hba)
+{
+ return 0;
+}
+
+static int hptiop_internal_memalloc_mv(struct hptiop_hba *hba)
+{
+ hba->u.mv.internal_req = dma_alloc_coherent(&hba->pcidev->dev,
+ 0x800, &hba->u.mv.internal_req_phy, GFP_KERNEL);
+ if (hba->u.mv.internal_req)
+ return 0;
+ else
+ return -1;
+}
+
+static int hptiop_internal_memalloc_mvfrey(struct hptiop_hba *hba)
+{
+ u32 list_count = readl(&hba->u.mvfrey.mu->inbound_conf_ctl);
+ char *p;
+ dma_addr_t phy;
+
+ BUG_ON(hba->max_request_size == 0);
+
+ if (list_count == 0) {
+ BUG_ON(1);
+ return -1;
+ }
+
+ list_count >>= 16;
+
+ hba->u.mvfrey.list_count = list_count;
+ hba->u.mvfrey.internal_mem_size = 0x800 +
+ list_count * sizeof(struct mvfrey_inlist_entry) +
+ list_count * sizeof(struct mvfrey_outlist_entry) +
+ sizeof(int);
+
+ p = dma_alloc_coherent(&hba->pcidev->dev,
+ hba->u.mvfrey.internal_mem_size, &phy, GFP_KERNEL);
+ if (!p)
+ return -1;
+
+ hba->u.mvfrey.internal_req.req_virt = p;
+ hba->u.mvfrey.internal_req.req_shifted_phy = phy >> 5;
+ hba->u.mvfrey.internal_req.scp = NULL;
+ hba->u.mvfrey.internal_req.next = NULL;
+
+ p += 0x800;
+ phy += 0x800;
+
+ hba->u.mvfrey.inlist = (struct mvfrey_inlist_entry *)p;
+ hba->u.mvfrey.inlist_phy = phy;
+
+ p += list_count * sizeof(struct mvfrey_inlist_entry);
+ phy += list_count * sizeof(struct mvfrey_inlist_entry);
+
+ hba->u.mvfrey.outlist = (struct mvfrey_outlist_entry *)p;
+ hba->u.mvfrey.outlist_phy = phy;
+
+ p += list_count * sizeof(struct mvfrey_outlist_entry);
+ phy += list_count * sizeof(struct mvfrey_outlist_entry);
+
+ hba->u.mvfrey.outlist_cptr = (__le32 *)p;
+ hba->u.mvfrey.outlist_cptr_phy = phy;
+
+ return 0;
+}
+
+static int hptiop_internal_memfree_itl(struct hptiop_hba *hba)
+{
+ return 0;
+}
+
+static int hptiop_internal_memfree_mv(struct hptiop_hba *hba)
+{
+ if (hba->u.mv.internal_req) {
+ dma_free_coherent(&hba->pcidev->dev, 0x800,
+ hba->u.mv.internal_req, hba->u.mv.internal_req_phy);
+ return 0;
+ } else
+ return -1;
+}
+
+static int hptiop_internal_memfree_mvfrey(struct hptiop_hba *hba)
+{
+ if (hba->u.mvfrey.internal_req.req_virt) {
+ dma_free_coherent(&hba->pcidev->dev,
+ hba->u.mvfrey.internal_mem_size,
+ hba->u.mvfrey.internal_req.req_virt,
+ (dma_addr_t)
+ hba->u.mvfrey.internal_req.req_shifted_phy << 5);
+ return 0;
+ } else
+ return -1;
+}
+
+static int hptiop_probe(struct pci_dev *pcidev, const struct pci_device_id *id)
+{
+ struct Scsi_Host *host = NULL;
+ struct hptiop_hba *hba;
+ struct hptiop_adapter_ops *iop_ops;
+ struct hpt_iop_request_get_config iop_config;
+ struct hpt_iop_request_set_config set_config;
+ dma_addr_t start_phy;
+ void *start_virt;
+ u32 offset, i, req_size;
+
+ dprintk("hptiop_probe(%p)\n", pcidev);
+
+ if (pci_enable_device(pcidev)) {
+ printk(KERN_ERR "hptiop: fail to enable pci device\n");
+ return -ENODEV;
+ }
+
+ printk(KERN_INFO "adapter at PCI %d:%d:%d, IRQ %d\n",
+ pcidev->bus->number, pcidev->devfn >> 3, pcidev->devfn & 7,
+ pcidev->irq);
+
+ pci_set_master(pcidev);
+
+ /* Enable 64bit DMA if possible */
+ iop_ops = (struct hptiop_adapter_ops *)id->driver_data;
+ if (pci_set_dma_mask(pcidev, DMA_BIT_MASK(iop_ops->hw_dma_bit_mask))) {
+ if (pci_set_dma_mask(pcidev, DMA_BIT_MASK(32))) {
+ printk(KERN_ERR "hptiop: fail to set dma_mask\n");
+ goto disable_pci_device;
+ }
+ }
+
+ if (pci_request_regions(pcidev, driver_name)) {
+ printk(KERN_ERR "hptiop: pci_request_regions failed\n");
+ goto disable_pci_device;
+ }
+
+ host = scsi_host_alloc(&driver_template, sizeof(struct hptiop_hba));
+ if (!host) {
+ printk(KERN_ERR "hptiop: fail to alloc scsi host\n");
+ goto free_pci_regions;
+ }
+
+ hba = (struct hptiop_hba *)host->hostdata;
+
+ hba->ops = iop_ops;
+ hba->pcidev = pcidev;
+ hba->host = host;
+ hba->initialized = 0;
+ hba->iopintf_v2 = 0;
+
+ atomic_set(&hba->resetting, 0);
+ atomic_set(&hba->reset_count, 0);
+
+ init_waitqueue_head(&hba->reset_wq);
+ init_waitqueue_head(&hba->ioctl_wq);
+
+ host->max_lun = 1;
+ host->max_channel = 0;
+ host->io_port = 0;
+ host->n_io_port = 0;
+ host->irq = pcidev->irq;
+
+ if (hba->ops->map_pci_bar(hba))
+ goto free_scsi_host;
+
+ if (hba->ops->iop_wait_ready(hba, 20000)) {
+ printk(KERN_ERR "scsi%d: firmware not ready\n",
+ hba->host->host_no);
+ goto unmap_pci_bar;
+ }
+
+ if (hba->ops->family == MV_BASED_IOP) {
+ if (hba->ops->internal_memalloc(hba)) {
+ printk(KERN_ERR "scsi%d: internal_memalloc failed\n",
+ hba->host->host_no);
+ goto unmap_pci_bar;
+ }
+ }
+
+ if (hba->ops->get_config(hba, &iop_config)) {
+ printk(KERN_ERR "scsi%d: get config failed\n",
+ hba->host->host_no);
+ goto unmap_pci_bar;
+ }
+
+ hba->max_requests = min(le32_to_cpu(iop_config.max_requests),
+ HPTIOP_MAX_REQUESTS);
+ hba->max_devices = le32_to_cpu(iop_config.max_devices);
+ hba->max_request_size = le32_to_cpu(iop_config.request_size);
+ hba->max_sg_descriptors = le32_to_cpu(iop_config.max_sg_count);
+ hba->firmware_version = le32_to_cpu(iop_config.firmware_version);
+ hba->interface_version = le32_to_cpu(iop_config.interface_version);
+ hba->sdram_size = le32_to_cpu(iop_config.sdram_size);
+
+ if (hba->ops->family == MVFREY_BASED_IOP) {
+ if (hba->ops->internal_memalloc(hba)) {
+ printk(KERN_ERR "scsi%d: internal_memalloc failed\n",
+ hba->host->host_no);
+ goto unmap_pci_bar;
+ }
+ if (hba->ops->reset_comm(hba)) {
+ printk(KERN_ERR "scsi%d: reset comm failed\n",
+ hba->host->host_no);
+ goto unmap_pci_bar;
+ }
+ }
+
+ if (hba->firmware_version > 0x01020000 ||
+ hba->interface_version > 0x01020000)
+ hba->iopintf_v2 = 1;
+
+ host->max_sectors = le32_to_cpu(iop_config.data_transfer_length) >> 9;
+ host->max_id = le32_to_cpu(iop_config.max_devices);
+ host->sg_tablesize = le32_to_cpu(iop_config.max_sg_count);
+ host->can_queue = le32_to_cpu(iop_config.max_requests);
+ host->cmd_per_lun = le32_to_cpu(iop_config.max_requests);
+ host->max_cmd_len = 16;
+
+ req_size = sizeof(struct hpt_iop_request_scsi_command)
+ + sizeof(struct hpt_iopsg) * (hba->max_sg_descriptors - 1);
+ if ((req_size & 0x1f) != 0)
+ req_size = (req_size + 0x1f) & ~0x1f;
+
+ memset(&set_config, 0, sizeof(struct hpt_iop_request_set_config));
+ set_config.iop_id = cpu_to_le32(host->host_no);
+ set_config.vbus_id = cpu_to_le16(host->host_no);
+ set_config.max_host_request_size = cpu_to_le16(req_size);
+
+ if (hba->ops->set_config(hba, &set_config)) {
+ printk(KERN_ERR "scsi%d: set config failed\n",
+ hba->host->host_no);
+ goto unmap_pci_bar;
+ }
+
+ pci_set_drvdata(pcidev, host);
+
+ if (request_irq(pcidev->irq, hptiop_intr, IRQF_SHARED,
+ driver_name, hba)) {
+ printk(KERN_ERR "scsi%d: request irq %d failed\n",
+ hba->host->host_no, pcidev->irq);
+ goto unmap_pci_bar;
+ }
+
+ /* Allocate request mem */
+
+ dprintk("req_size=%d, max_requests=%d\n", req_size, hba->max_requests);
+
+ hba->req_size = req_size;
+ start_virt = dma_alloc_coherent(&pcidev->dev,
+ hba->req_size*hba->max_requests + 0x20,
+ &start_phy, GFP_KERNEL);
+
+ if (!start_virt) {
+ printk(KERN_ERR "scsi%d: fail to alloc request mem\n",
+ hba->host->host_no);
+ goto free_request_irq;
+ }
+
+ hba->dma_coherent = start_virt;
+ hba->dma_coherent_handle = start_phy;
+
+ if ((start_phy & 0x1f) != 0) {
+ offset = ((start_phy + 0x1f) & ~0x1f) - start_phy;
+ start_phy += offset;
+ start_virt += offset;
+ }
+
+ hba->req_list = NULL;
+ for (i = 0; i < hba->max_requests; i++) {
+ hba->reqs[i].next = NULL;
+ hba->reqs[i].req_virt = start_virt;
+ hba->reqs[i].req_shifted_phy = start_phy >> 5;
+ hba->reqs[i].index = i;
+ free_req(hba, &hba->reqs[i]);
+ start_virt = (char *)start_virt + hba->req_size;
+ start_phy = start_phy + hba->req_size;
+ }
+
+ /* Enable Interrupt and start background task */
+ if (hptiop_initialize_iop(hba))
+ goto free_request_mem;
+
+ if (scsi_add_host(host, &pcidev->dev)) {
+ printk(KERN_ERR "scsi%d: scsi_add_host failed\n",
+ hba->host->host_no);
+ goto free_request_mem;
+ }
+
+ scsi_scan_host(host);
+
+ dprintk("scsi%d: hptiop_probe successfully\n", hba->host->host_no);
+ return 0;
+
+free_request_mem:
+ dma_free_coherent(&hba->pcidev->dev,
+ hba->req_size * hba->max_requests + 0x20,
+ hba->dma_coherent, hba->dma_coherent_handle);
+
+free_request_irq:
+ free_irq(hba->pcidev->irq, hba);
+
+unmap_pci_bar:
+ hba->ops->internal_memfree(hba);
+
+ hba->ops->unmap_pci_bar(hba);
+
+free_scsi_host:
+ scsi_host_put(host);
+
+free_pci_regions:
+ pci_release_regions(pcidev);
+
+disable_pci_device:
+ pci_disable_device(pcidev);
+
+ dprintk("scsi%d: hptiop_probe fail\n", host ? host->host_no : 0);
+ return -ENODEV;
+}
+
+static void hptiop_shutdown(struct pci_dev *pcidev)
+{
+ struct Scsi_Host *host = pci_get_drvdata(pcidev);
+ struct hptiop_hba *hba = (struct hptiop_hba *)host->hostdata;
+
+ dprintk("hptiop_shutdown(%p)\n", hba);
+
+ /* stop the iop */
+ if (iop_send_sync_msg(hba, IOPMU_INBOUND_MSG0_SHUTDOWN, 60000))
+ printk(KERN_ERR "scsi%d: shutdown the iop timeout\n",
+ hba->host->host_no);
+
+ /* disable all outbound interrupts */
+ hba->ops->disable_intr(hba);
+}
+
+static void hptiop_disable_intr_itl(struct hptiop_hba *hba)
+{
+ u32 int_mask;
+
+ int_mask = readl(&hba->u.itl.iop->outbound_intmask);
+ writel(int_mask |
+ IOPMU_OUTBOUND_INT_MSG0 | IOPMU_OUTBOUND_INT_POSTQUEUE,
+ &hba->u.itl.iop->outbound_intmask);
+ readl(&hba->u.itl.iop->outbound_intmask);
+}
+
+static void hptiop_disable_intr_mv(struct hptiop_hba *hba)
+{
+ writel(0, &hba->u.mv.regs->outbound_intmask);
+ readl(&hba->u.mv.regs->outbound_intmask);
+}
+
+static void hptiop_disable_intr_mvfrey(struct hptiop_hba *hba)
+{
+ writel(0, &(hba->u.mvfrey.mu->f0_doorbell_enable));
+ readl(&(hba->u.mvfrey.mu->f0_doorbell_enable));
+ writel(0, &(hba->u.mvfrey.mu->isr_enable));
+ readl(&(hba->u.mvfrey.mu->isr_enable));
+ writel(0, &(hba->u.mvfrey.mu->pcie_f0_int_enable));
+ readl(&(hba->u.mvfrey.mu->pcie_f0_int_enable));
+}
+
+static void hptiop_remove(struct pci_dev *pcidev)
+{
+ struct Scsi_Host *host = pci_get_drvdata(pcidev);
+ struct hptiop_hba *hba = (struct hptiop_hba *)host->hostdata;
+
+ dprintk("scsi%d: hptiop_remove\n", hba->host->host_no);
+
+ scsi_remove_host(host);
+
+ hptiop_shutdown(pcidev);
+
+ free_irq(hba->pcidev->irq, hba);
+
+ dma_free_coherent(&hba->pcidev->dev,
+ hba->req_size * hba->max_requests + 0x20,
+ hba->dma_coherent,
+ hba->dma_coherent_handle);
+
+ hba->ops->internal_memfree(hba);
+
+ hba->ops->unmap_pci_bar(hba);
+
+ pci_release_regions(hba->pcidev);
+ pci_set_drvdata(hba->pcidev, NULL);
+ pci_disable_device(hba->pcidev);
+
+ scsi_host_put(host);
+}
+
+static struct hptiop_adapter_ops hptiop_itl_ops = {
+ .family = INTEL_BASED_IOP,
+ .iop_wait_ready = iop_wait_ready_itl,
+ .internal_memalloc = hptiop_internal_memalloc_itl,
+ .internal_memfree = hptiop_internal_memfree_itl,
+ .map_pci_bar = hptiop_map_pci_bar_itl,
+ .unmap_pci_bar = hptiop_unmap_pci_bar_itl,
+ .enable_intr = hptiop_enable_intr_itl,
+ .disable_intr = hptiop_disable_intr_itl,
+ .get_config = iop_get_config_itl,
+ .set_config = iop_set_config_itl,
+ .iop_intr = iop_intr_itl,
+ .post_msg = hptiop_post_msg_itl,
+ .post_req = hptiop_post_req_itl,
+ .hw_dma_bit_mask = 64,
+ .reset_comm = hptiop_reset_comm_itl,
+ .host_phy_flag = cpu_to_le64(0),
+};
+
+static struct hptiop_adapter_ops hptiop_mv_ops = {
+ .family = MV_BASED_IOP,
+ .iop_wait_ready = iop_wait_ready_mv,
+ .internal_memalloc = hptiop_internal_memalloc_mv,
+ .internal_memfree = hptiop_internal_memfree_mv,
+ .map_pci_bar = hptiop_map_pci_bar_mv,
+ .unmap_pci_bar = hptiop_unmap_pci_bar_mv,
+ .enable_intr = hptiop_enable_intr_mv,
+ .disable_intr = hptiop_disable_intr_mv,
+ .get_config = iop_get_config_mv,
+ .set_config = iop_set_config_mv,
+ .iop_intr = iop_intr_mv,
+ .post_msg = hptiop_post_msg_mv,
+ .post_req = hptiop_post_req_mv,
+ .hw_dma_bit_mask = 33,
+ .reset_comm = hptiop_reset_comm_mv,
+ .host_phy_flag = cpu_to_le64(0),
+};
+
+static struct hptiop_adapter_ops hptiop_mvfrey_ops = {
+ .family = MVFREY_BASED_IOP,
+ .iop_wait_ready = iop_wait_ready_mvfrey,
+ .internal_memalloc = hptiop_internal_memalloc_mvfrey,
+ .internal_memfree = hptiop_internal_memfree_mvfrey,
+ .map_pci_bar = hptiop_map_pci_bar_mvfrey,
+ .unmap_pci_bar = hptiop_unmap_pci_bar_mvfrey,
+ .enable_intr = hptiop_enable_intr_mvfrey,
+ .disable_intr = hptiop_disable_intr_mvfrey,
+ .get_config = iop_get_config_mvfrey,
+ .set_config = iop_set_config_mvfrey,
+ .iop_intr = iop_intr_mvfrey,
+ .post_msg = hptiop_post_msg_mvfrey,
+ .post_req = hptiop_post_req_mvfrey,
+ .hw_dma_bit_mask = 64,
+ .reset_comm = hptiop_reset_comm_mvfrey,
+ .host_phy_flag = cpu_to_le64(1),
+};
+
+static struct pci_device_id hptiop_id_table[] = {
+ { PCI_VDEVICE(TTI, 0x3220), (kernel_ulong_t)&hptiop_itl_ops },
+ { PCI_VDEVICE(TTI, 0x3320), (kernel_ulong_t)&hptiop_itl_ops },
+ { PCI_VDEVICE(TTI, 0x3410), (kernel_ulong_t)&hptiop_itl_ops },
+ { PCI_VDEVICE(TTI, 0x3510), (kernel_ulong_t)&hptiop_itl_ops },
+ { PCI_VDEVICE(TTI, 0x3511), (kernel_ulong_t)&hptiop_itl_ops },
+ { PCI_VDEVICE(TTI, 0x3520), (kernel_ulong_t)&hptiop_itl_ops },
+ { PCI_VDEVICE(TTI, 0x3521), (kernel_ulong_t)&hptiop_itl_ops },
+ { PCI_VDEVICE(TTI, 0x3522), (kernel_ulong_t)&hptiop_itl_ops },
+ { PCI_VDEVICE(TTI, 0x3530), (kernel_ulong_t)&hptiop_itl_ops },
+ { PCI_VDEVICE(TTI, 0x3540), (kernel_ulong_t)&hptiop_itl_ops },
+ { PCI_VDEVICE(TTI, 0x3560), (kernel_ulong_t)&hptiop_itl_ops },
+ { PCI_VDEVICE(TTI, 0x4210), (kernel_ulong_t)&hptiop_itl_ops },
+ { PCI_VDEVICE(TTI, 0x4211), (kernel_ulong_t)&hptiop_itl_ops },
+ { PCI_VDEVICE(TTI, 0x4310), (kernel_ulong_t)&hptiop_itl_ops },
+ { PCI_VDEVICE(TTI, 0x4311), (kernel_ulong_t)&hptiop_itl_ops },
+ { PCI_VDEVICE(TTI, 0x4320), (kernel_ulong_t)&hptiop_itl_ops },
+ { PCI_VDEVICE(TTI, 0x4321), (kernel_ulong_t)&hptiop_itl_ops },
+ { PCI_VDEVICE(TTI, 0x4322), (kernel_ulong_t)&hptiop_itl_ops },
+ { PCI_VDEVICE(TTI, 0x4400), (kernel_ulong_t)&hptiop_itl_ops },
+ { PCI_VDEVICE(TTI, 0x3120), (kernel_ulong_t)&hptiop_mv_ops },
+ { PCI_VDEVICE(TTI, 0x3122), (kernel_ulong_t)&hptiop_mv_ops },
+ { PCI_VDEVICE(TTI, 0x3020), (kernel_ulong_t)&hptiop_mv_ops },
+ { PCI_VDEVICE(TTI, 0x4520), (kernel_ulong_t)&hptiop_mvfrey_ops },
+ { PCI_VDEVICE(TTI, 0x4522), (kernel_ulong_t)&hptiop_mvfrey_ops },
+ {},
+};
+
+MODULE_DEVICE_TABLE(pci, hptiop_id_table);
+
+static struct pci_driver hptiop_pci_driver = {
+ .name = driver_name,
+ .id_table = hptiop_id_table,
+ .probe = hptiop_probe,
+ .remove = hptiop_remove,
+ .shutdown = hptiop_shutdown,
+};
+
+static int __init hptiop_module_init(void)
+{
+ printk(KERN_INFO "%s %s\n", driver_name_long, driver_ver);
+ return pci_register_driver(&hptiop_pci_driver);
+}
+
+static void __exit hptiop_module_exit(void)
+{
+ pci_unregister_driver(&hptiop_pci_driver);
+}
+
+
+module_init(hptiop_module_init);
+module_exit(hptiop_module_exit);
+
+MODULE_LICENSE("GPL");
+
diff --git a/drivers/scsi/hptiop.h b/drivers/scsi/hptiop.h
new file mode 100644
index 000000000..020619d60
--- /dev/null
+++ b/drivers/scsi/hptiop.h
@@ -0,0 +1,382 @@
+/*
+ * HighPoint RR3xxx/4xxx controller driver for Linux
+ * Copyright (C) 2006-2012 HighPoint Technologies, Inc. All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * Please report bugs/comments/suggestions to linux@highpoint-tech.com
+ *
+ * For more information, visit http://www.highpoint-tech.com
+ */
+#ifndef _HPTIOP_H_
+#define _HPTIOP_H_
+
+struct hpt_iopmu_itl {
+ __le32 resrved0[4];
+ __le32 inbound_msgaddr0;
+ __le32 inbound_msgaddr1;
+ __le32 outbound_msgaddr0;
+ __le32 outbound_msgaddr1;
+ __le32 inbound_doorbell;
+ __le32 inbound_intstatus;
+ __le32 inbound_intmask;
+ __le32 outbound_doorbell;
+ __le32 outbound_intstatus;
+ __le32 outbound_intmask;
+ __le32 reserved1[2];
+ __le32 inbound_queue;
+ __le32 outbound_queue;
+};
+
+#define IOPMU_QUEUE_EMPTY 0xffffffff
+#define IOPMU_QUEUE_MASK_HOST_BITS 0xf0000000
+#define IOPMU_QUEUE_ADDR_HOST_BIT 0x80000000
+#define IOPMU_QUEUE_REQUEST_SIZE_BIT 0x40000000
+#define IOPMU_QUEUE_REQUEST_RESULT_BIT 0x40000000
+
+#define IOPMU_OUTBOUND_INT_MSG0 1
+#define IOPMU_OUTBOUND_INT_MSG1 2
+#define IOPMU_OUTBOUND_INT_DOORBELL 4
+#define IOPMU_OUTBOUND_INT_POSTQUEUE 8
+#define IOPMU_OUTBOUND_INT_PCI 0x10
+
+#define IOPMU_INBOUND_INT_MSG0 1
+#define IOPMU_INBOUND_INT_MSG1 2
+#define IOPMU_INBOUND_INT_DOORBELL 4
+#define IOPMU_INBOUND_INT_ERROR 8
+#define IOPMU_INBOUND_INT_POSTQUEUE 0x10
+
+#define MVIOP_QUEUE_LEN 512
+
+struct hpt_iopmu_mv {
+ __le32 inbound_head;
+ __le32 inbound_tail;
+ __le32 outbound_head;
+ __le32 outbound_tail;
+ __le32 inbound_msg;
+ __le32 outbound_msg;
+ __le32 reserve[10];
+ __le64 inbound_q[MVIOP_QUEUE_LEN];
+ __le64 outbound_q[MVIOP_QUEUE_LEN];
+};
+
+struct hpt_iopmv_regs {
+ __le32 reserved[0x20400 / 4];
+ __le32 inbound_doorbell;
+ __le32 inbound_intmask;
+ __le32 outbound_doorbell;
+ __le32 outbound_intmask;
+};
+
+#pragma pack(1)
+struct hpt_iopmu_mvfrey {
+ __le32 reserved0[(0x4000 - 0) / 4];
+ __le32 inbound_base;
+ __le32 inbound_base_high;
+ __le32 reserved1[(0x4018 - 0x4008) / 4];
+ __le32 inbound_write_ptr;
+ __le32 reserved2[(0x402c - 0x401c) / 4];
+ __le32 inbound_conf_ctl;
+ __le32 reserved3[(0x4050 - 0x4030) / 4];
+ __le32 outbound_base;
+ __le32 outbound_base_high;
+ __le32 outbound_shadow_base;
+ __le32 outbound_shadow_base_high;
+ __le32 reserved4[(0x4088 - 0x4060) / 4];
+ __le32 isr_cause;
+ __le32 isr_enable;
+ __le32 reserved5[(0x1020c - 0x4090) / 4];
+ __le32 pcie_f0_int_enable;
+ __le32 reserved6[(0x10400 - 0x10210) / 4];
+ __le32 f0_to_cpu_msg_a;
+ __le32 reserved7[(0x10420 - 0x10404) / 4];
+ __le32 cpu_to_f0_msg_a;
+ __le32 reserved8[(0x10480 - 0x10424) / 4];
+ __le32 f0_doorbell;
+ __le32 f0_doorbell_enable;
+};
+
+struct mvfrey_inlist_entry {
+ dma_addr_t addr;
+ __le32 intrfc_len;
+ __le32 reserved;
+};
+
+struct mvfrey_outlist_entry {
+ __le32 val;
+};
+#pragma pack()
+
+#define MVIOP_MU_QUEUE_ADDR_HOST_MASK (~(0x1full))
+#define MVIOP_MU_QUEUE_ADDR_HOST_BIT 4
+
+#define MVIOP_MU_QUEUE_ADDR_IOP_HIGH32 0xffffffff
+#define MVIOP_MU_QUEUE_REQUEST_RESULT_BIT 1
+#define MVIOP_MU_QUEUE_REQUEST_RETURN_CONTEXT 2
+
+#define MVIOP_MU_INBOUND_INT_MSG 1
+#define MVIOP_MU_INBOUND_INT_POSTQUEUE 2
+#define MVIOP_MU_OUTBOUND_INT_MSG 1
+#define MVIOP_MU_OUTBOUND_INT_POSTQUEUE 2
+
+#define CL_POINTER_TOGGLE 0x00004000
+#define CPU_TO_F0_DRBL_MSG_BIT 0x02000000
+
+enum hpt_iopmu_message {
+ /* host-to-iop messages */
+ IOPMU_INBOUND_MSG0_NOP = 0,
+ IOPMU_INBOUND_MSG0_RESET,
+ IOPMU_INBOUND_MSG0_FLUSH,
+ IOPMU_INBOUND_MSG0_SHUTDOWN,
+ IOPMU_INBOUND_MSG0_STOP_BACKGROUND_TASK,
+ IOPMU_INBOUND_MSG0_START_BACKGROUND_TASK,
+ IOPMU_INBOUND_MSG0_RESET_COMM,
+ IOPMU_INBOUND_MSG0_MAX = 0xff,
+ /* iop-to-host messages */
+ IOPMU_OUTBOUND_MSG0_REGISTER_DEVICE_0 = 0x100,
+ IOPMU_OUTBOUND_MSG0_REGISTER_DEVICE_MAX = 0x1ff,
+ IOPMU_OUTBOUND_MSG0_UNREGISTER_DEVICE_0 = 0x200,
+ IOPMU_OUTBOUND_MSG0_UNREGISTER_DEVICE_MAX = 0x2ff,
+ IOPMU_OUTBOUND_MSG0_REVALIDATE_DEVICE_0 = 0x300,
+ IOPMU_OUTBOUND_MSG0_REVALIDATE_DEVICE_MAX = 0x3ff,
+};
+
+struct hpt_iop_request_header {
+ __le32 size;
+ __le32 type;
+ __le32 flags;
+ __le32 result;
+ __le32 context; /* host context */
+ __le32 context_hi32;
+};
+
+#define IOP_REQUEST_FLAG_SYNC_REQUEST 1
+#define IOP_REQUEST_FLAG_BIST_REQUEST 2
+#define IOP_REQUEST_FLAG_REMAPPED 4
+#define IOP_REQUEST_FLAG_OUTPUT_CONTEXT 8
+#define IOP_REQUEST_FLAG_ADDR_BITS 0x40 /* flags[31:16] is phy_addr[47:32] */
+
+enum hpt_iop_request_type {
+ IOP_REQUEST_TYPE_GET_CONFIG = 0,
+ IOP_REQUEST_TYPE_SET_CONFIG,
+ IOP_REQUEST_TYPE_BLOCK_COMMAND,
+ IOP_REQUEST_TYPE_SCSI_COMMAND,
+ IOP_REQUEST_TYPE_IOCTL_COMMAND,
+ IOP_REQUEST_TYPE_MAX
+};
+
+enum hpt_iop_result_type {
+ IOP_RESULT_PENDING = 0,
+ IOP_RESULT_SUCCESS,
+ IOP_RESULT_FAIL,
+ IOP_RESULT_BUSY,
+ IOP_RESULT_RESET,
+ IOP_RESULT_INVALID_REQUEST,
+ IOP_RESULT_BAD_TARGET,
+ IOP_RESULT_CHECK_CONDITION,
+};
+
+struct hpt_iop_request_get_config {
+ struct hpt_iop_request_header header;
+ __le32 interface_version;
+ __le32 firmware_version;
+ __le32 max_requests;
+ __le32 request_size;
+ __le32 max_sg_count;
+ __le32 data_transfer_length;
+ __le32 alignment_mask;
+ __le32 max_devices;
+ __le32 sdram_size;
+};
+
+struct hpt_iop_request_set_config {
+ struct hpt_iop_request_header header;
+ __le32 iop_id;
+ __le16 vbus_id;
+ __le16 max_host_request_size;
+ __le32 reserve[6];
+};
+
+struct hpt_iopsg {
+ __le32 size;
+ __le32 eot; /* non-zero: end of table */
+ __le64 pci_address;
+};
+
+struct hpt_iop_request_block_command {
+ struct hpt_iop_request_header header;
+ u8 channel;
+ u8 target;
+ u8 lun;
+ u8 pad1;
+ __le16 command; /* IOP_BLOCK_COMMAND_{READ,WRITE} */
+ __le16 sectors;
+ __le64 lba;
+ struct hpt_iopsg sg_list[1];
+};
+
+#define IOP_BLOCK_COMMAND_READ 1
+#define IOP_BLOCK_COMMAND_WRITE 2
+#define IOP_BLOCK_COMMAND_VERIFY 3
+#define IOP_BLOCK_COMMAND_FLUSH 4
+#define IOP_BLOCK_COMMAND_SHUTDOWN 5
+
+struct hpt_iop_request_scsi_command {
+ struct hpt_iop_request_header header;
+ u8 channel;
+ u8 target;
+ u8 lun;
+ u8 pad1;
+ u8 cdb[16];
+ __le32 dataxfer_length;
+ struct hpt_iopsg sg_list[1];
+};
+
+struct hpt_iop_request_ioctl_command {
+ struct hpt_iop_request_header header;
+ __le32 ioctl_code;
+ __le32 inbuf_size;
+ __le32 outbuf_size;
+ __le32 bytes_returned;
+ u8 buf[1];
+ /* out data should be put at buf[(inbuf_size+3)&~3] */
+};
+
+#define HPTIOP_MAX_REQUESTS 256u
+
+struct hptiop_request {
+ struct hptiop_request *next;
+ void *req_virt;
+ u32 req_shifted_phy;
+ struct scsi_cmnd *scp;
+ int index;
+};
+
+struct hpt_scsi_pointer {
+ int mapped;
+ int sgcnt;
+ dma_addr_t dma_handle;
+};
+
+#define HPT_SCP(scp) ((struct hpt_scsi_pointer *)&(scp)->SCp)
+
+enum hptiop_family {
+ UNKNOWN_BASED_IOP,
+ INTEL_BASED_IOP,
+ MV_BASED_IOP,
+ MVFREY_BASED_IOP
+} ;
+
+struct hptiop_hba {
+ struct hptiop_adapter_ops *ops;
+ union {
+ struct {
+ struct hpt_iopmu_itl __iomem *iop;
+ void __iomem *plx;
+ } itl;
+ struct {
+ struct hpt_iopmv_regs *regs;
+ struct hpt_iopmu_mv __iomem *mu;
+ void *internal_req;
+ dma_addr_t internal_req_phy;
+ } mv;
+ struct {
+ struct hpt_iop_request_get_config __iomem *config;
+ struct hpt_iopmu_mvfrey __iomem *mu;
+
+ int internal_mem_size;
+ struct hptiop_request internal_req;
+ int list_count;
+ struct mvfrey_inlist_entry *inlist;
+ dma_addr_t inlist_phy;
+ __le32 inlist_wptr;
+ struct mvfrey_outlist_entry *outlist;
+ dma_addr_t outlist_phy;
+ __le32 *outlist_cptr; /* copy pointer shadow */
+ dma_addr_t outlist_cptr_phy;
+ __le32 outlist_rptr;
+ } mvfrey;
+ } u;
+
+ struct Scsi_Host *host;
+ struct pci_dev *pcidev;
+
+ /* IOP config info */
+ u32 interface_version;
+ u32 firmware_version;
+ u32 sdram_size;
+ u32 max_devices;
+ u32 max_requests;
+ u32 max_request_size;
+ u32 max_sg_descriptors;
+
+ u32 req_size; /* host-allocated request buffer size */
+
+ u32 iopintf_v2: 1;
+ u32 initialized: 1;
+ u32 msg_done: 1;
+
+ struct hptiop_request * req_list;
+ struct hptiop_request reqs[HPTIOP_MAX_REQUESTS];
+
+ /* used to free allocated dma area */
+ void *dma_coherent;
+ dma_addr_t dma_coherent_handle;
+
+ atomic_t reset_count;
+ atomic_t resetting;
+
+ wait_queue_head_t reset_wq;
+ wait_queue_head_t ioctl_wq;
+};
+
+struct hpt_ioctl_k {
+ struct hptiop_hba * hba;
+ u32 ioctl_code;
+ u32 inbuf_size;
+ u32 outbuf_size;
+ void *inbuf;
+ void *outbuf;
+ u32 *bytes_returned;
+ void (*done)(struct hpt_ioctl_k *);
+ int result; /* HPT_IOCTL_RESULT_ */
+};
+
+struct hptiop_adapter_ops {
+ enum hptiop_family family;
+ int (*iop_wait_ready)(struct hptiop_hba *hba, u32 millisec);
+ int (*internal_memalloc)(struct hptiop_hba *hba);
+ int (*internal_memfree)(struct hptiop_hba *hba);
+ int (*map_pci_bar)(struct hptiop_hba *hba);
+ void (*unmap_pci_bar)(struct hptiop_hba *hba);
+ void (*enable_intr)(struct hptiop_hba *hba);
+ void (*disable_intr)(struct hptiop_hba *hba);
+ int (*get_config)(struct hptiop_hba *hba,
+ struct hpt_iop_request_get_config *config);
+ int (*set_config)(struct hptiop_hba *hba,
+ struct hpt_iop_request_set_config *config);
+ int (*iop_intr)(struct hptiop_hba *hba);
+ void (*post_msg)(struct hptiop_hba *hba, u32 msg);
+ void (*post_req)(struct hptiop_hba *hba, struct hptiop_request *_req);
+ int hw_dma_bit_mask;
+ int (*reset_comm)(struct hptiop_hba *hba);
+ __le64 host_phy_flag;
+};
+
+#define HPT_IOCTL_RESULT_OK 0
+#define HPT_IOCTL_RESULT_FAILED (-1)
+
+#if 0
+#define dprintk(fmt, args...) do { printk(fmt, ##args); } while(0)
+#else
+#define dprintk(fmt, args...)
+#endif
+
+#endif
diff --git a/drivers/scsi/ibmvscsi/Makefile b/drivers/scsi/ibmvscsi/Makefile
new file mode 100644
index 000000000..3840c64f2
--- /dev/null
+++ b/drivers/scsi/ibmvscsi/Makefile
@@ -0,0 +1,2 @@
+obj-$(CONFIG_SCSI_IBMVSCSI) += ibmvscsi.o
+obj-$(CONFIG_SCSI_IBMVFC) += ibmvfc.o
diff --git a/drivers/scsi/ibmvscsi/ibmvfc.c b/drivers/scsi/ibmvscsi/ibmvfc.c
new file mode 100644
index 000000000..057d27721
--- /dev/null
+++ b/drivers/scsi/ibmvscsi/ibmvfc.c
@@ -0,0 +1,5016 @@
+/*
+ * ibmvfc.c -- driver for IBM Power Virtual Fibre Channel Adapter
+ *
+ * Written By: Brian King <brking@linux.vnet.ibm.com>, IBM Corporation
+ *
+ * Copyright (C) IBM Corporation, 2008
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ *
+ */
+
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/dma-mapping.h>
+#include <linux/dmapool.h>
+#include <linux/delay.h>
+#include <linux/interrupt.h>
+#include <linux/kthread.h>
+#include <linux/slab.h>
+#include <linux/of.h>
+#include <linux/pm.h>
+#include <linux/stringify.h>
+#include <asm/firmware.h>
+#include <asm/irq.h>
+#include <asm/vio.h>
+#include <scsi/scsi.h>
+#include <scsi/scsi_cmnd.h>
+#include <scsi/scsi_host.h>
+#include <scsi/scsi_device.h>
+#include <scsi/scsi_tcq.h>
+#include <scsi/scsi_transport_fc.h>
+#include <scsi/scsi_bsg_fc.h>
+#include "ibmvfc.h"
+
+static unsigned int init_timeout = IBMVFC_INIT_TIMEOUT;
+static unsigned int default_timeout = IBMVFC_DEFAULT_TIMEOUT;
+static u64 max_lun = IBMVFC_MAX_LUN;
+static unsigned int max_targets = IBMVFC_MAX_TARGETS;
+static unsigned int max_requests = IBMVFC_MAX_REQUESTS_DEFAULT;
+static unsigned int disc_threads = IBMVFC_MAX_DISC_THREADS;
+static unsigned int ibmvfc_debug = IBMVFC_DEBUG;
+static unsigned int log_level = IBMVFC_DEFAULT_LOG_LEVEL;
+static LIST_HEAD(ibmvfc_head);
+static DEFINE_SPINLOCK(ibmvfc_driver_lock);
+static struct scsi_transport_template *ibmvfc_transport_template;
+
+MODULE_DESCRIPTION("IBM Virtual Fibre Channel Driver");
+MODULE_AUTHOR("Brian King <brking@linux.vnet.ibm.com>");
+MODULE_LICENSE("GPL");
+MODULE_VERSION(IBMVFC_DRIVER_VERSION);
+
+module_param_named(init_timeout, init_timeout, uint, S_IRUGO | S_IWUSR);
+MODULE_PARM_DESC(init_timeout, "Initialization timeout in seconds. "
+ "[Default=" __stringify(IBMVFC_INIT_TIMEOUT) "]");
+module_param_named(default_timeout, default_timeout, uint, S_IRUGO | S_IWUSR);
+MODULE_PARM_DESC(default_timeout,
+ "Default timeout in seconds for initialization and EH commands. "
+ "[Default=" __stringify(IBMVFC_DEFAULT_TIMEOUT) "]");
+module_param_named(max_requests, max_requests, uint, S_IRUGO);
+MODULE_PARM_DESC(max_requests, "Maximum requests for this adapter. "
+ "[Default=" __stringify(IBMVFC_MAX_REQUESTS_DEFAULT) "]");
+module_param_named(max_lun, max_lun, ullong, S_IRUGO);
+MODULE_PARM_DESC(max_lun, "Maximum allowed LUN. "
+ "[Default=" __stringify(IBMVFC_MAX_LUN) "]");
+module_param_named(max_targets, max_targets, uint, S_IRUGO);
+MODULE_PARM_DESC(max_targets, "Maximum allowed targets. "
+ "[Default=" __stringify(IBMVFC_MAX_TARGETS) "]");
+module_param_named(disc_threads, disc_threads, uint, S_IRUGO);
+MODULE_PARM_DESC(disc_threads, "Number of device discovery threads to use. "
+ "[Default=" __stringify(IBMVFC_MAX_DISC_THREADS) "]");
+module_param_named(debug, ibmvfc_debug, uint, S_IRUGO | S_IWUSR);
+MODULE_PARM_DESC(debug, "Enable driver debug information. "
+ "[Default=" __stringify(IBMVFC_DEBUG) "]");
+module_param_named(log_level, log_level, uint, 0);
+MODULE_PARM_DESC(log_level, "Set to 0 - 4 for increasing verbosity of device driver. "
+ "[Default=" __stringify(IBMVFC_DEFAULT_LOG_LEVEL) "]");
+
+static const struct {
+ u16 status;
+ u16 error;
+ u8 result;
+ u8 retry;
+ int log;
+ char *name;
+} cmd_status [] = {
+ { IBMVFC_FABRIC_MAPPED, IBMVFC_UNABLE_TO_ESTABLISH, DID_ERROR, 1, 1, "unable to establish" },
+ { IBMVFC_FABRIC_MAPPED, IBMVFC_XPORT_FAULT, DID_OK, 1, 0, "transport fault" },
+ { IBMVFC_FABRIC_MAPPED, IBMVFC_CMD_TIMEOUT, DID_TIME_OUT, 1, 1, "command timeout" },
+ { IBMVFC_FABRIC_MAPPED, IBMVFC_ENETDOWN, DID_TRANSPORT_DISRUPTED, 1, 1, "network down" },
+ { IBMVFC_FABRIC_MAPPED, IBMVFC_HW_FAILURE, DID_ERROR, 1, 1, "hardware failure" },
+ { IBMVFC_FABRIC_MAPPED, IBMVFC_LINK_DOWN_ERR, DID_REQUEUE, 0, 0, "link down" },
+ { IBMVFC_FABRIC_MAPPED, IBMVFC_LINK_DEAD_ERR, DID_ERROR, 0, 0, "link dead" },
+ { IBMVFC_FABRIC_MAPPED, IBMVFC_UNABLE_TO_REGISTER, DID_ERROR, 1, 1, "unable to register" },
+ { IBMVFC_FABRIC_MAPPED, IBMVFC_XPORT_BUSY, DID_BUS_BUSY, 1, 0, "transport busy" },
+ { IBMVFC_FABRIC_MAPPED, IBMVFC_XPORT_DEAD, DID_ERROR, 0, 1, "transport dead" },
+ { IBMVFC_FABRIC_MAPPED, IBMVFC_CONFIG_ERROR, DID_ERROR, 1, 1, "configuration error" },
+ { IBMVFC_FABRIC_MAPPED, IBMVFC_NAME_SERVER_FAIL, DID_ERROR, 1, 1, "name server failure" },
+ { IBMVFC_FABRIC_MAPPED, IBMVFC_LINK_HALTED, DID_REQUEUE, 1, 0, "link halted" },
+ { IBMVFC_FABRIC_MAPPED, IBMVFC_XPORT_GENERAL, DID_OK, 1, 0, "general transport error" },
+
+ { IBMVFC_VIOS_FAILURE, IBMVFC_CRQ_FAILURE, DID_REQUEUE, 1, 1, "CRQ failure" },
+ { IBMVFC_VIOS_FAILURE, IBMVFC_SW_FAILURE, DID_ERROR, 0, 1, "software failure" },
+ { IBMVFC_VIOS_FAILURE, IBMVFC_INVALID_PARAMETER, DID_ERROR, 0, 1, "invalid parameter" },
+ { IBMVFC_VIOS_FAILURE, IBMVFC_MISSING_PARAMETER, DID_ERROR, 0, 1, "missing parameter" },
+ { IBMVFC_VIOS_FAILURE, IBMVFC_HOST_IO_BUS, DID_ERROR, 1, 1, "host I/O bus failure" },
+ { IBMVFC_VIOS_FAILURE, IBMVFC_TRANS_CANCELLED, DID_ERROR, 0, 1, "transaction cancelled" },
+ { IBMVFC_VIOS_FAILURE, IBMVFC_TRANS_CANCELLED_IMPLICIT, DID_ERROR, 0, 1, "transaction cancelled implicit" },
+ { IBMVFC_VIOS_FAILURE, IBMVFC_INSUFFICIENT_RESOURCE, DID_REQUEUE, 1, 1, "insufficient resources" },
+ { IBMVFC_VIOS_FAILURE, IBMVFC_PLOGI_REQUIRED, DID_ERROR, 0, 1, "port login required" },
+ { IBMVFC_VIOS_FAILURE, IBMVFC_COMMAND_FAILED, DID_ERROR, 1, 1, "command failed" },
+
+ { IBMVFC_FC_FAILURE, IBMVFC_INVALID_ELS_CMD_CODE, DID_ERROR, 0, 1, "invalid ELS command code" },
+ { IBMVFC_FC_FAILURE, IBMVFC_INVALID_VERSION, DID_ERROR, 0, 1, "invalid version level" },
+ { IBMVFC_FC_FAILURE, IBMVFC_LOGICAL_ERROR, DID_ERROR, 1, 1, "logical error" },
+ { IBMVFC_FC_FAILURE, IBMVFC_INVALID_CT_IU_SIZE, DID_ERROR, 0, 1, "invalid CT_IU size" },
+ { IBMVFC_FC_FAILURE, IBMVFC_LOGICAL_BUSY, DID_REQUEUE, 1, 0, "logical busy" },
+ { IBMVFC_FC_FAILURE, IBMVFC_PROTOCOL_ERROR, DID_ERROR, 1, 1, "protocol error" },
+ { IBMVFC_FC_FAILURE, IBMVFC_UNABLE_TO_PERFORM_REQ, DID_ERROR, 1, 1, "unable to perform request" },
+ { IBMVFC_FC_FAILURE, IBMVFC_CMD_NOT_SUPPORTED, DID_ERROR, 0, 0, "command not supported" },
+ { IBMVFC_FC_FAILURE, IBMVFC_SERVER_NOT_AVAIL, DID_ERROR, 0, 1, "server not available" },
+ { IBMVFC_FC_FAILURE, IBMVFC_CMD_IN_PROGRESS, DID_ERROR, 0, 1, "command already in progress" },
+ { IBMVFC_FC_FAILURE, IBMVFC_VENDOR_SPECIFIC, DID_ERROR, 1, 1, "vendor specific" },
+
+ { IBMVFC_FC_SCSI_ERROR, 0, DID_OK, 1, 0, "SCSI error" },
+};
+
+static void ibmvfc_npiv_login(struct ibmvfc_host *);
+static void ibmvfc_tgt_send_prli(struct ibmvfc_target *);
+static void ibmvfc_tgt_send_plogi(struct ibmvfc_target *);
+static void ibmvfc_tgt_query_target(struct ibmvfc_target *);
+static void ibmvfc_npiv_logout(struct ibmvfc_host *);
+
+static const char *unknown_error = "unknown error";
+
+#ifdef CONFIG_SCSI_IBMVFC_TRACE
+/**
+ * ibmvfc_trc_start - Log a start trace entry
+ * @evt: ibmvfc event struct
+ *
+ **/
+static void ibmvfc_trc_start(struct ibmvfc_event *evt)
+{
+ struct ibmvfc_host *vhost = evt->vhost;
+ struct ibmvfc_cmd *vfc_cmd = &evt->iu.cmd;
+ struct ibmvfc_mad_common *mad = &evt->iu.mad_common;
+ struct ibmvfc_trace_entry *entry;
+
+ entry = &vhost->trace[vhost->trace_index++];
+ entry->evt = evt;
+ entry->time = jiffies;
+ entry->fmt = evt->crq.format;
+ entry->type = IBMVFC_TRC_START;
+
+ switch (entry->fmt) {
+ case IBMVFC_CMD_FORMAT:
+ entry->op_code = vfc_cmd->iu.cdb[0];
+ entry->scsi_id = be64_to_cpu(vfc_cmd->tgt_scsi_id);
+ entry->lun = scsilun_to_int(&vfc_cmd->iu.lun);
+ entry->tmf_flags = vfc_cmd->iu.tmf_flags;
+ entry->u.start.xfer_len = be32_to_cpu(vfc_cmd->iu.xfer_len);
+ break;
+ case IBMVFC_MAD_FORMAT:
+ entry->op_code = be32_to_cpu(mad->opcode);
+ break;
+ default:
+ break;
+ };
+}
+
+/**
+ * ibmvfc_trc_end - Log an end trace entry
+ * @evt: ibmvfc event struct
+ *
+ **/
+static void ibmvfc_trc_end(struct ibmvfc_event *evt)
+{
+ struct ibmvfc_host *vhost = evt->vhost;
+ struct ibmvfc_cmd *vfc_cmd = &evt->xfer_iu->cmd;
+ struct ibmvfc_mad_common *mad = &evt->xfer_iu->mad_common;
+ struct ibmvfc_trace_entry *entry = &vhost->trace[vhost->trace_index++];
+
+ entry->evt = evt;
+ entry->time = jiffies;
+ entry->fmt = evt->crq.format;
+ entry->type = IBMVFC_TRC_END;
+
+ switch (entry->fmt) {
+ case IBMVFC_CMD_FORMAT:
+ entry->op_code = vfc_cmd->iu.cdb[0];
+ entry->scsi_id = be64_to_cpu(vfc_cmd->tgt_scsi_id);
+ entry->lun = scsilun_to_int(&vfc_cmd->iu.lun);
+ entry->tmf_flags = vfc_cmd->iu.tmf_flags;
+ entry->u.end.status = be16_to_cpu(vfc_cmd->status);
+ entry->u.end.error = be16_to_cpu(vfc_cmd->error);
+ entry->u.end.fcp_rsp_flags = vfc_cmd->rsp.flags;
+ entry->u.end.rsp_code = vfc_cmd->rsp.data.info.rsp_code;
+ entry->u.end.scsi_status = vfc_cmd->rsp.scsi_status;
+ break;
+ case IBMVFC_MAD_FORMAT:
+ entry->op_code = be32_to_cpu(mad->opcode);
+ entry->u.end.status = be16_to_cpu(mad->status);
+ break;
+ default:
+ break;
+
+ };
+}
+
+#else
+#define ibmvfc_trc_start(evt) do { } while (0)
+#define ibmvfc_trc_end(evt) do { } while (0)
+#endif
+
+/**
+ * ibmvfc_get_err_index - Find the index into cmd_status for the fcp response
+ * @status: status / error class
+ * @error: error
+ *
+ * Return value:
+ * index into cmd_status / -EINVAL on failure
+ **/
+static int ibmvfc_get_err_index(u16 status, u16 error)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(cmd_status); i++)
+ if ((cmd_status[i].status & status) == cmd_status[i].status &&
+ cmd_status[i].error == error)
+ return i;
+
+ return -EINVAL;
+}
+
+/**
+ * ibmvfc_get_cmd_error - Find the error description for the fcp response
+ * @status: status / error class
+ * @error: error
+ *
+ * Return value:
+ * error description string
+ **/
+static const char *ibmvfc_get_cmd_error(u16 status, u16 error)
+{
+ int rc = ibmvfc_get_err_index(status, error);
+ if (rc >= 0)
+ return cmd_status[rc].name;
+ return unknown_error;
+}
+
+/**
+ * ibmvfc_get_err_result - Find the scsi status to return for the fcp response
+ * @vfc_cmd: ibmvfc command struct
+ *
+ * Return value:
+ * SCSI result value to return for completed command
+ **/
+static int ibmvfc_get_err_result(struct ibmvfc_cmd *vfc_cmd)
+{
+ int err;
+ struct ibmvfc_fcp_rsp *rsp = &vfc_cmd->rsp;
+ int fc_rsp_len = be32_to_cpu(rsp->fcp_rsp_len);
+
+ if ((rsp->flags & FCP_RSP_LEN_VALID) &&
+ ((fc_rsp_len && fc_rsp_len != 4 && fc_rsp_len != 8) ||
+ rsp->data.info.rsp_code))
+ return DID_ERROR << 16;
+
+ err = ibmvfc_get_err_index(be16_to_cpu(vfc_cmd->status), be16_to_cpu(vfc_cmd->error));
+ if (err >= 0)
+ return rsp->scsi_status | (cmd_status[err].result << 16);
+ return rsp->scsi_status | (DID_ERROR << 16);
+}
+
+/**
+ * ibmvfc_retry_cmd - Determine if error status is retryable
+ * @status: status / error class
+ * @error: error
+ *
+ * Return value:
+ * 1 if error should be retried / 0 if it should not
+ **/
+static int ibmvfc_retry_cmd(u16 status, u16 error)
+{
+ int rc = ibmvfc_get_err_index(status, error);
+
+ if (rc >= 0)
+ return cmd_status[rc].retry;
+ return 1;
+}
+
+static const char *unknown_fc_explain = "unknown fc explain";
+
+static const struct {
+ u16 fc_explain;
+ char *name;
+} ls_explain [] = {
+ { 0x00, "no additional explanation" },
+ { 0x01, "service parameter error - options" },
+ { 0x03, "service parameter error - initiator control" },
+ { 0x05, "service parameter error - recipient control" },
+ { 0x07, "service parameter error - received data field size" },
+ { 0x09, "service parameter error - concurrent seq" },
+ { 0x0B, "service parameter error - credit" },
+ { 0x0D, "invalid N_Port/F_Port_Name" },
+ { 0x0E, "invalid node/Fabric Name" },
+ { 0x0F, "invalid common service parameters" },
+ { 0x11, "invalid association header" },
+ { 0x13, "association header required" },
+ { 0x15, "invalid originator S_ID" },
+ { 0x17, "invalid OX_ID-RX-ID combination" },
+ { 0x19, "command (request) already in progress" },
+ { 0x1E, "N_Port Login requested" },
+ { 0x1F, "Invalid N_Port_ID" },
+};
+
+static const struct {
+ u16 fc_explain;
+ char *name;
+} gs_explain [] = {
+ { 0x00, "no additional explanation" },
+ { 0x01, "port identifier not registered" },
+ { 0x02, "port name not registered" },
+ { 0x03, "node name not registered" },
+ { 0x04, "class of service not registered" },
+ { 0x06, "initial process associator not registered" },
+ { 0x07, "FC-4 TYPEs not registered" },
+ { 0x08, "symbolic port name not registered" },
+ { 0x09, "symbolic node name not registered" },
+ { 0x0A, "port type not registered" },
+ { 0xF0, "authorization exception" },
+ { 0xF1, "authentication exception" },
+ { 0xF2, "data base full" },
+ { 0xF3, "data base empty" },
+ { 0xF4, "processing request" },
+ { 0xF5, "unable to verify connection" },
+ { 0xF6, "devices not in a common zone" },
+};
+
+/**
+ * ibmvfc_get_ls_explain - Return the FC Explain description text
+ * @status: FC Explain status
+ *
+ * Returns:
+ * error string
+ **/
+static const char *ibmvfc_get_ls_explain(u16 status)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(ls_explain); i++)
+ if (ls_explain[i].fc_explain == status)
+ return ls_explain[i].name;
+
+ return unknown_fc_explain;
+}
+
+/**
+ * ibmvfc_get_gs_explain - Return the FC Explain description text
+ * @status: FC Explain status
+ *
+ * Returns:
+ * error string
+ **/
+static const char *ibmvfc_get_gs_explain(u16 status)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(gs_explain); i++)
+ if (gs_explain[i].fc_explain == status)
+ return gs_explain[i].name;
+
+ return unknown_fc_explain;
+}
+
+static const struct {
+ enum ibmvfc_fc_type fc_type;
+ char *name;
+} fc_type [] = {
+ { IBMVFC_FABRIC_REJECT, "fabric reject" },
+ { IBMVFC_PORT_REJECT, "port reject" },
+ { IBMVFC_LS_REJECT, "ELS reject" },
+ { IBMVFC_FABRIC_BUSY, "fabric busy" },
+ { IBMVFC_PORT_BUSY, "port busy" },
+ { IBMVFC_BASIC_REJECT, "basic reject" },
+};
+
+static const char *unknown_fc_type = "unknown fc type";
+
+/**
+ * ibmvfc_get_fc_type - Return the FC Type description text
+ * @status: FC Type error status
+ *
+ * Returns:
+ * error string
+ **/
+static const char *ibmvfc_get_fc_type(u16 status)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(fc_type); i++)
+ if (fc_type[i].fc_type == status)
+ return fc_type[i].name;
+
+ return unknown_fc_type;
+}
+
+/**
+ * ibmvfc_set_tgt_action - Set the next init action for the target
+ * @tgt: ibmvfc target struct
+ * @action: action to perform
+ *
+ **/
+static void ibmvfc_set_tgt_action(struct ibmvfc_target *tgt,
+ enum ibmvfc_target_action action)
+{
+ switch (tgt->action) {
+ case IBMVFC_TGT_ACTION_DEL_RPORT:
+ if (action == IBMVFC_TGT_ACTION_DELETED_RPORT)
+ tgt->action = action;
+ case IBMVFC_TGT_ACTION_DELETED_RPORT:
+ break;
+ default:
+ if (action == IBMVFC_TGT_ACTION_DEL_RPORT)
+ tgt->add_rport = 0;
+ tgt->action = action;
+ break;
+ }
+}
+
+/**
+ * ibmvfc_set_host_state - Set the state for the host
+ * @vhost: ibmvfc host struct
+ * @state: state to set host to
+ *
+ * Returns:
+ * 0 if state changed / non-zero if not changed
+ **/
+static int ibmvfc_set_host_state(struct ibmvfc_host *vhost,
+ enum ibmvfc_host_state state)
+{
+ int rc = 0;
+
+ switch (vhost->state) {
+ case IBMVFC_HOST_OFFLINE:
+ rc = -EINVAL;
+ break;
+ default:
+ vhost->state = state;
+ break;
+ };
+
+ return rc;
+}
+
+/**
+ * ibmvfc_set_host_action - Set the next init action for the host
+ * @vhost: ibmvfc host struct
+ * @action: action to perform
+ *
+ **/
+static void ibmvfc_set_host_action(struct ibmvfc_host *vhost,
+ enum ibmvfc_host_action action)
+{
+ switch (action) {
+ case IBMVFC_HOST_ACTION_ALLOC_TGTS:
+ if (vhost->action == IBMVFC_HOST_ACTION_INIT_WAIT)
+ vhost->action = action;
+ break;
+ case IBMVFC_HOST_ACTION_LOGO_WAIT:
+ if (vhost->action == IBMVFC_HOST_ACTION_LOGO)
+ vhost->action = action;
+ break;
+ case IBMVFC_HOST_ACTION_INIT_WAIT:
+ if (vhost->action == IBMVFC_HOST_ACTION_INIT)
+ vhost->action = action;
+ break;
+ case IBMVFC_HOST_ACTION_QUERY:
+ switch (vhost->action) {
+ case IBMVFC_HOST_ACTION_INIT_WAIT:
+ case IBMVFC_HOST_ACTION_NONE:
+ case IBMVFC_HOST_ACTION_TGT_DEL_FAILED:
+ vhost->action = action;
+ break;
+ default:
+ break;
+ };
+ break;
+ case IBMVFC_HOST_ACTION_TGT_INIT:
+ if (vhost->action == IBMVFC_HOST_ACTION_ALLOC_TGTS)
+ vhost->action = action;
+ break;
+ case IBMVFC_HOST_ACTION_INIT:
+ case IBMVFC_HOST_ACTION_TGT_DEL:
+ switch (vhost->action) {
+ case IBMVFC_HOST_ACTION_RESET:
+ case IBMVFC_HOST_ACTION_REENABLE:
+ break;
+ default:
+ vhost->action = action;
+ break;
+ };
+ break;
+ case IBMVFC_HOST_ACTION_LOGO:
+ case IBMVFC_HOST_ACTION_QUERY_TGTS:
+ case IBMVFC_HOST_ACTION_TGT_DEL_FAILED:
+ case IBMVFC_HOST_ACTION_NONE:
+ case IBMVFC_HOST_ACTION_RESET:
+ case IBMVFC_HOST_ACTION_REENABLE:
+ default:
+ vhost->action = action;
+ break;
+ };
+}
+
+/**
+ * ibmvfc_reinit_host - Re-start host initialization (no NPIV Login)
+ * @vhost: ibmvfc host struct
+ *
+ * Return value:
+ * nothing
+ **/
+static void ibmvfc_reinit_host(struct ibmvfc_host *vhost)
+{
+ if (vhost->action == IBMVFC_HOST_ACTION_NONE) {
+ if (!ibmvfc_set_host_state(vhost, IBMVFC_INITIALIZING)) {
+ scsi_block_requests(vhost->host);
+ ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_QUERY);
+ }
+ } else
+ vhost->reinit = 1;
+
+ wake_up(&vhost->work_wait_q);
+}
+
+/**
+ * ibmvfc_link_down - Handle a link down event from the adapter
+ * @vhost: ibmvfc host struct
+ * @state: ibmvfc host state to enter
+ *
+ **/
+static void ibmvfc_link_down(struct ibmvfc_host *vhost,
+ enum ibmvfc_host_state state)
+{
+ struct ibmvfc_target *tgt;
+
+ ENTER;
+ scsi_block_requests(vhost->host);
+ list_for_each_entry(tgt, &vhost->targets, queue)
+ ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_DEL_RPORT);
+ ibmvfc_set_host_state(vhost, state);
+ ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_TGT_DEL);
+ vhost->events_to_log |= IBMVFC_AE_LINKDOWN;
+ wake_up(&vhost->work_wait_q);
+ LEAVE;
+}
+
+/**
+ * ibmvfc_init_host - Start host initialization
+ * @vhost: ibmvfc host struct
+ *
+ * Return value:
+ * nothing
+ **/
+static void ibmvfc_init_host(struct ibmvfc_host *vhost)
+{
+ struct ibmvfc_target *tgt;
+
+ if (vhost->action == IBMVFC_HOST_ACTION_INIT_WAIT) {
+ if (++vhost->init_retries > IBMVFC_MAX_HOST_INIT_RETRIES) {
+ dev_err(vhost->dev,
+ "Host initialization retries exceeded. Taking adapter offline\n");
+ ibmvfc_link_down(vhost, IBMVFC_HOST_OFFLINE);
+ return;
+ }
+ }
+
+ if (!ibmvfc_set_host_state(vhost, IBMVFC_INITIALIZING)) {
+ memset(vhost->async_crq.msgs, 0, PAGE_SIZE);
+ vhost->async_crq.cur = 0;
+
+ list_for_each_entry(tgt, &vhost->targets, queue)
+ ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_DEL_RPORT);
+ scsi_block_requests(vhost->host);
+ ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_INIT);
+ vhost->job_step = ibmvfc_npiv_login;
+ wake_up(&vhost->work_wait_q);
+ }
+}
+
+/**
+ * ibmvfc_send_crq - Send a CRQ
+ * @vhost: ibmvfc host struct
+ * @word1: the first 64 bits of the data
+ * @word2: the second 64 bits of the data
+ *
+ * Return value:
+ * 0 on success / other on failure
+ **/
+static int ibmvfc_send_crq(struct ibmvfc_host *vhost, u64 word1, u64 word2)
+{
+ struct vio_dev *vdev = to_vio_dev(vhost->dev);
+ return plpar_hcall_norets(H_SEND_CRQ, vdev->unit_address, word1, word2);
+}
+
+/**
+ * ibmvfc_send_crq_init - Send a CRQ init message
+ * @vhost: ibmvfc host struct
+ *
+ * Return value:
+ * 0 on success / other on failure
+ **/
+static int ibmvfc_send_crq_init(struct ibmvfc_host *vhost)
+{
+ ibmvfc_dbg(vhost, "Sending CRQ init\n");
+ return ibmvfc_send_crq(vhost, 0xC001000000000000LL, 0);
+}
+
+/**
+ * ibmvfc_send_crq_init_complete - Send a CRQ init complete message
+ * @vhost: ibmvfc host struct
+ *
+ * Return value:
+ * 0 on success / other on failure
+ **/
+static int ibmvfc_send_crq_init_complete(struct ibmvfc_host *vhost)
+{
+ ibmvfc_dbg(vhost, "Sending CRQ init complete\n");
+ return ibmvfc_send_crq(vhost, 0xC002000000000000LL, 0);
+}
+
+/**
+ * ibmvfc_release_crq_queue - Deallocates data and unregisters CRQ
+ * @vhost: ibmvfc host struct
+ *
+ * Frees irq, deallocates a page for messages, unmaps dma, and unregisters
+ * the crq with the hypervisor.
+ **/
+static void ibmvfc_release_crq_queue(struct ibmvfc_host *vhost)
+{
+ long rc = 0;
+ struct vio_dev *vdev = to_vio_dev(vhost->dev);
+ struct ibmvfc_crq_queue *crq = &vhost->crq;
+
+ ibmvfc_dbg(vhost, "Releasing CRQ\n");
+ free_irq(vdev->irq, vhost);
+ tasklet_kill(&vhost->tasklet);
+ do {
+ if (rc)
+ msleep(100);
+ rc = plpar_hcall_norets(H_FREE_CRQ, vdev->unit_address);
+ } while (rc == H_BUSY || H_IS_LONG_BUSY(rc));
+
+ vhost->state = IBMVFC_NO_CRQ;
+ vhost->logged_in = 0;
+ dma_unmap_single(vhost->dev, crq->msg_token, PAGE_SIZE, DMA_BIDIRECTIONAL);
+ free_page((unsigned long)crq->msgs);
+}
+
+/**
+ * ibmvfc_reenable_crq_queue - reenables the CRQ
+ * @vhost: ibmvfc host struct
+ *
+ * Return value:
+ * 0 on success / other on failure
+ **/
+static int ibmvfc_reenable_crq_queue(struct ibmvfc_host *vhost)
+{
+ int rc = 0;
+ struct vio_dev *vdev = to_vio_dev(vhost->dev);
+
+ /* Re-enable the CRQ */
+ do {
+ if (rc)
+ msleep(100);
+ rc = plpar_hcall_norets(H_ENABLE_CRQ, vdev->unit_address);
+ } while (rc == H_IN_PROGRESS || rc == H_BUSY || H_IS_LONG_BUSY(rc));
+
+ if (rc)
+ dev_err(vhost->dev, "Error enabling adapter (rc=%d)\n", rc);
+
+ return rc;
+}
+
+/**
+ * ibmvfc_reset_crq - resets a crq after a failure
+ * @vhost: ibmvfc host struct
+ *
+ * Return value:
+ * 0 on success / other on failure
+ **/
+static int ibmvfc_reset_crq(struct ibmvfc_host *vhost)
+{
+ int rc = 0;
+ unsigned long flags;
+ struct vio_dev *vdev = to_vio_dev(vhost->dev);
+ struct ibmvfc_crq_queue *crq = &vhost->crq;
+
+ /* Close the CRQ */
+ do {
+ if (rc)
+ msleep(100);
+ rc = plpar_hcall_norets(H_FREE_CRQ, vdev->unit_address);
+ } while (rc == H_BUSY || H_IS_LONG_BUSY(rc));
+
+ spin_lock_irqsave(vhost->host->host_lock, flags);
+ vhost->state = IBMVFC_NO_CRQ;
+ vhost->logged_in = 0;
+ ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_NONE);
+
+ /* Clean out the queue */
+ memset(crq->msgs, 0, PAGE_SIZE);
+ crq->cur = 0;
+
+ /* And re-open it again */
+ rc = plpar_hcall_norets(H_REG_CRQ, vdev->unit_address,
+ crq->msg_token, PAGE_SIZE);
+
+ if (rc == H_CLOSED)
+ /* Adapter is good, but other end is not ready */
+ dev_warn(vhost->dev, "Partner adapter not ready\n");
+ else if (rc != 0)
+ dev_warn(vhost->dev, "Couldn't register crq (rc=%d)\n", rc);
+ spin_unlock_irqrestore(vhost->host->host_lock, flags);
+
+ return rc;
+}
+
+/**
+ * ibmvfc_valid_event - Determines if event is valid.
+ * @pool: event_pool that contains the event
+ * @evt: ibmvfc event to be checked for validity
+ *
+ * Return value:
+ * 1 if event is valid / 0 if event is not valid
+ **/
+static int ibmvfc_valid_event(struct ibmvfc_event_pool *pool,
+ struct ibmvfc_event *evt)
+{
+ int index = evt - pool->events;
+ if (index < 0 || index >= pool->size) /* outside of bounds */
+ return 0;
+ if (evt != pool->events + index) /* unaligned */
+ return 0;
+ return 1;
+}
+
+/**
+ * ibmvfc_free_event - Free the specified event
+ * @evt: ibmvfc_event to be freed
+ *
+ **/
+static void ibmvfc_free_event(struct ibmvfc_event *evt)
+{
+ struct ibmvfc_host *vhost = evt->vhost;
+ struct ibmvfc_event_pool *pool = &vhost->pool;
+
+ BUG_ON(!ibmvfc_valid_event(pool, evt));
+ BUG_ON(atomic_inc_return(&evt->free) != 1);
+ list_add_tail(&evt->queue, &vhost->free);
+}
+
+/**
+ * ibmvfc_scsi_eh_done - EH done function for queuecommand commands
+ * @evt: ibmvfc event struct
+ *
+ * This function does not setup any error status, that must be done
+ * before this function gets called.
+ **/
+static void ibmvfc_scsi_eh_done(struct ibmvfc_event *evt)
+{
+ struct scsi_cmnd *cmnd = evt->cmnd;
+
+ if (cmnd) {
+ scsi_dma_unmap(cmnd);
+ cmnd->scsi_done(cmnd);
+ }
+
+ if (evt->eh_comp)
+ complete(evt->eh_comp);
+
+ ibmvfc_free_event(evt);
+}
+
+/**
+ * ibmvfc_fail_request - Fail request with specified error code
+ * @evt: ibmvfc event struct
+ * @error_code: error code to fail request with
+ *
+ * Return value:
+ * none
+ **/
+static void ibmvfc_fail_request(struct ibmvfc_event *evt, int error_code)
+{
+ if (evt->cmnd) {
+ evt->cmnd->result = (error_code << 16);
+ evt->done = ibmvfc_scsi_eh_done;
+ } else
+ evt->xfer_iu->mad_common.status = cpu_to_be16(IBMVFC_MAD_DRIVER_FAILED);
+
+ list_del(&evt->queue);
+ del_timer(&evt->timer);
+ ibmvfc_trc_end(evt);
+ evt->done(evt);
+}
+
+/**
+ * ibmvfc_purge_requests - Our virtual adapter just shut down. Purge any sent requests
+ * @vhost: ibmvfc host struct
+ * @error_code: error code to fail requests with
+ *
+ * Return value:
+ * none
+ **/
+static void ibmvfc_purge_requests(struct ibmvfc_host *vhost, int error_code)
+{
+ struct ibmvfc_event *evt, *pos;
+
+ ibmvfc_dbg(vhost, "Purging all requests\n");
+ list_for_each_entry_safe(evt, pos, &vhost->sent, queue)
+ ibmvfc_fail_request(evt, error_code);
+}
+
+/**
+ * ibmvfc_hard_reset_host - Reset the connection to the server by breaking the CRQ
+ * @vhost: struct ibmvfc host to reset
+ **/
+static void ibmvfc_hard_reset_host(struct ibmvfc_host *vhost)
+{
+ ibmvfc_purge_requests(vhost, DID_ERROR);
+ ibmvfc_link_down(vhost, IBMVFC_LINK_DOWN);
+ ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_RESET);
+}
+
+/**
+ * __ibmvfc_reset_host - Reset the connection to the server (no locking)
+ * @vhost: struct ibmvfc host to reset
+ **/
+static void __ibmvfc_reset_host(struct ibmvfc_host *vhost)
+{
+ if (vhost->logged_in && vhost->action != IBMVFC_HOST_ACTION_LOGO_WAIT &&
+ !ibmvfc_set_host_state(vhost, IBMVFC_INITIALIZING)) {
+ scsi_block_requests(vhost->host);
+ ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_LOGO);
+ vhost->job_step = ibmvfc_npiv_logout;
+ wake_up(&vhost->work_wait_q);
+ } else
+ ibmvfc_hard_reset_host(vhost);
+}
+
+/**
+ * ibmvfc_reset_host - Reset the connection to the server
+ * @vhost: ibmvfc host struct
+ **/
+static void ibmvfc_reset_host(struct ibmvfc_host *vhost)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(vhost->host->host_lock, flags);
+ __ibmvfc_reset_host(vhost);
+ spin_unlock_irqrestore(vhost->host->host_lock, flags);
+}
+
+/**
+ * ibmvfc_retry_host_init - Retry host initialization if allowed
+ * @vhost: ibmvfc host struct
+ *
+ * Returns: 1 if init will be retried / 0 if not
+ *
+ **/
+static int ibmvfc_retry_host_init(struct ibmvfc_host *vhost)
+{
+ int retry = 0;
+
+ if (vhost->action == IBMVFC_HOST_ACTION_INIT_WAIT) {
+ vhost->delay_init = 1;
+ if (++vhost->init_retries > IBMVFC_MAX_HOST_INIT_RETRIES) {
+ dev_err(vhost->dev,
+ "Host initialization retries exceeded. Taking adapter offline\n");
+ ibmvfc_link_down(vhost, IBMVFC_HOST_OFFLINE);
+ } else if (vhost->init_retries == IBMVFC_MAX_HOST_INIT_RETRIES)
+ __ibmvfc_reset_host(vhost);
+ else {
+ ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_INIT);
+ retry = 1;
+ }
+ }
+
+ wake_up(&vhost->work_wait_q);
+ return retry;
+}
+
+/**
+ * __ibmvfc_get_target - Find the specified scsi_target (no locking)
+ * @starget: scsi target struct
+ *
+ * Return value:
+ * ibmvfc_target struct / NULL if not found
+ **/
+static struct ibmvfc_target *__ibmvfc_get_target(struct scsi_target *starget)
+{
+ struct Scsi_Host *shost = dev_to_shost(starget->dev.parent);
+ struct ibmvfc_host *vhost = shost_priv(shost);
+ struct ibmvfc_target *tgt;
+
+ list_for_each_entry(tgt, &vhost->targets, queue)
+ if (tgt->target_id == starget->id) {
+ kref_get(&tgt->kref);
+ return tgt;
+ }
+ return NULL;
+}
+
+/**
+ * ibmvfc_get_target - Find the specified scsi_target
+ * @starget: scsi target struct
+ *
+ * Return value:
+ * ibmvfc_target struct / NULL if not found
+ **/
+static struct ibmvfc_target *ibmvfc_get_target(struct scsi_target *starget)
+{
+ struct Scsi_Host *shost = dev_to_shost(starget->dev.parent);
+ struct ibmvfc_target *tgt;
+ unsigned long flags;
+
+ spin_lock_irqsave(shost->host_lock, flags);
+ tgt = __ibmvfc_get_target(starget);
+ spin_unlock_irqrestore(shost->host_lock, flags);
+ return tgt;
+}
+
+/**
+ * ibmvfc_get_host_speed - Get host port speed
+ * @shost: scsi host struct
+ *
+ * Return value:
+ * none
+ **/
+static void ibmvfc_get_host_speed(struct Scsi_Host *shost)
+{
+ struct ibmvfc_host *vhost = shost_priv(shost);
+ unsigned long flags;
+
+ spin_lock_irqsave(shost->host_lock, flags);
+ if (vhost->state == IBMVFC_ACTIVE) {
+ switch (be64_to_cpu(vhost->login_buf->resp.link_speed) / 100) {
+ case 1:
+ fc_host_speed(shost) = FC_PORTSPEED_1GBIT;
+ break;
+ case 2:
+ fc_host_speed(shost) = FC_PORTSPEED_2GBIT;
+ break;
+ case 4:
+ fc_host_speed(shost) = FC_PORTSPEED_4GBIT;
+ break;
+ case 8:
+ fc_host_speed(shost) = FC_PORTSPEED_8GBIT;
+ break;
+ case 10:
+ fc_host_speed(shost) = FC_PORTSPEED_10GBIT;
+ break;
+ case 16:
+ fc_host_speed(shost) = FC_PORTSPEED_16GBIT;
+ break;
+ default:
+ ibmvfc_log(vhost, 3, "Unknown port speed: %lld Gbit\n",
+ be64_to_cpu(vhost->login_buf->resp.link_speed) / 100);
+ fc_host_speed(shost) = FC_PORTSPEED_UNKNOWN;
+ break;
+ }
+ } else
+ fc_host_speed(shost) = FC_PORTSPEED_UNKNOWN;
+ spin_unlock_irqrestore(shost->host_lock, flags);
+}
+
+/**
+ * ibmvfc_get_host_port_state - Get host port state
+ * @shost: scsi host struct
+ *
+ * Return value:
+ * none
+ **/
+static void ibmvfc_get_host_port_state(struct Scsi_Host *shost)
+{
+ struct ibmvfc_host *vhost = shost_priv(shost);
+ unsigned long flags;
+
+ spin_lock_irqsave(shost->host_lock, flags);
+ switch (vhost->state) {
+ case IBMVFC_INITIALIZING:
+ case IBMVFC_ACTIVE:
+ fc_host_port_state(shost) = FC_PORTSTATE_ONLINE;
+ break;
+ case IBMVFC_LINK_DOWN:
+ fc_host_port_state(shost) = FC_PORTSTATE_LINKDOWN;
+ break;
+ case IBMVFC_LINK_DEAD:
+ case IBMVFC_HOST_OFFLINE:
+ fc_host_port_state(shost) = FC_PORTSTATE_OFFLINE;
+ break;
+ case IBMVFC_HALTED:
+ fc_host_port_state(shost) = FC_PORTSTATE_BLOCKED;
+ break;
+ case IBMVFC_NO_CRQ:
+ fc_host_port_state(shost) = FC_PORTSTATE_UNKNOWN;
+ break;
+ default:
+ ibmvfc_log(vhost, 3, "Unknown port state: %d\n", vhost->state);
+ fc_host_port_state(shost) = FC_PORTSTATE_UNKNOWN;
+ break;
+ }
+ spin_unlock_irqrestore(shost->host_lock, flags);
+}
+
+/**
+ * ibmvfc_set_rport_dev_loss_tmo - Set rport's device loss timeout
+ * @rport: rport struct
+ * @timeout: timeout value
+ *
+ * Return value:
+ * none
+ **/
+static void ibmvfc_set_rport_dev_loss_tmo(struct fc_rport *rport, u32 timeout)
+{
+ if (timeout)
+ rport->dev_loss_tmo = timeout;
+ else
+ rport->dev_loss_tmo = 1;
+}
+
+/**
+ * ibmvfc_release_tgt - Free memory allocated for a target
+ * @kref: kref struct
+ *
+ **/
+static void ibmvfc_release_tgt(struct kref *kref)
+{
+ struct ibmvfc_target *tgt = container_of(kref, struct ibmvfc_target, kref);
+ kfree(tgt);
+}
+
+/**
+ * ibmvfc_get_starget_node_name - Get SCSI target's node name
+ * @starget: scsi target struct
+ *
+ * Return value:
+ * none
+ **/
+static void ibmvfc_get_starget_node_name(struct scsi_target *starget)
+{
+ struct ibmvfc_target *tgt = ibmvfc_get_target(starget);
+ fc_starget_port_name(starget) = tgt ? tgt->ids.node_name : 0;
+ if (tgt)
+ kref_put(&tgt->kref, ibmvfc_release_tgt);
+}
+
+/**
+ * ibmvfc_get_starget_port_name - Get SCSI target's port name
+ * @starget: scsi target struct
+ *
+ * Return value:
+ * none
+ **/
+static void ibmvfc_get_starget_port_name(struct scsi_target *starget)
+{
+ struct ibmvfc_target *tgt = ibmvfc_get_target(starget);
+ fc_starget_port_name(starget) = tgt ? tgt->ids.port_name : 0;
+ if (tgt)
+ kref_put(&tgt->kref, ibmvfc_release_tgt);
+}
+
+/**
+ * ibmvfc_get_starget_port_id - Get SCSI target's port ID
+ * @starget: scsi target struct
+ *
+ * Return value:
+ * none
+ **/
+static void ibmvfc_get_starget_port_id(struct scsi_target *starget)
+{
+ struct ibmvfc_target *tgt = ibmvfc_get_target(starget);
+ fc_starget_port_id(starget) = tgt ? tgt->scsi_id : -1;
+ if (tgt)
+ kref_put(&tgt->kref, ibmvfc_release_tgt);
+}
+
+/**
+ * ibmvfc_wait_while_resetting - Wait while the host resets
+ * @vhost: ibmvfc host struct
+ *
+ * Return value:
+ * 0 on success / other on failure
+ **/
+static int ibmvfc_wait_while_resetting(struct ibmvfc_host *vhost)
+{
+ long timeout = wait_event_timeout(vhost->init_wait_q,
+ ((vhost->state == IBMVFC_ACTIVE ||
+ vhost->state == IBMVFC_HOST_OFFLINE ||
+ vhost->state == IBMVFC_LINK_DEAD) &&
+ vhost->action == IBMVFC_HOST_ACTION_NONE),
+ (init_timeout * HZ));
+
+ return timeout ? 0 : -EIO;
+}
+
+/**
+ * ibmvfc_issue_fc_host_lip - Re-initiate link initialization
+ * @shost: scsi host struct
+ *
+ * Return value:
+ * 0 on success / other on failure
+ **/
+static int ibmvfc_issue_fc_host_lip(struct Scsi_Host *shost)
+{
+ struct ibmvfc_host *vhost = shost_priv(shost);
+
+ dev_err(vhost->dev, "Initiating host LIP. Resetting connection\n");
+ ibmvfc_reset_host(vhost);
+ return ibmvfc_wait_while_resetting(vhost);
+}
+
+/**
+ * ibmvfc_gather_partition_info - Gather info about the LPAR
+ *
+ * Return value:
+ * none
+ **/
+static void ibmvfc_gather_partition_info(struct ibmvfc_host *vhost)
+{
+ struct device_node *rootdn;
+ const char *name;
+ const unsigned int *num;
+
+ rootdn = of_find_node_by_path("/");
+ if (!rootdn)
+ return;
+
+ name = of_get_property(rootdn, "ibm,partition-name", NULL);
+ if (name)
+ strncpy(vhost->partition_name, name, sizeof(vhost->partition_name));
+ num = of_get_property(rootdn, "ibm,partition-no", NULL);
+ if (num)
+ vhost->partition_number = *num;
+ of_node_put(rootdn);
+}
+
+/**
+ * ibmvfc_set_login_info - Setup info for NPIV login
+ * @vhost: ibmvfc host struct
+ *
+ * Return value:
+ * none
+ **/
+static void ibmvfc_set_login_info(struct ibmvfc_host *vhost)
+{
+ struct ibmvfc_npiv_login *login_info = &vhost->login_info;
+ struct device_node *of_node = vhost->dev->of_node;
+ const char *location;
+
+ memset(login_info, 0, sizeof(*login_info));
+
+ login_info->ostype = cpu_to_be32(IBMVFC_OS_LINUX);
+ login_info->max_dma_len = cpu_to_be64(IBMVFC_MAX_SECTORS << 9);
+ login_info->max_payload = cpu_to_be32(sizeof(struct ibmvfc_fcp_cmd_iu));
+ login_info->max_response = cpu_to_be32(sizeof(struct ibmvfc_fcp_rsp));
+ login_info->partition_num = cpu_to_be32(vhost->partition_number);
+ login_info->vfc_frame_version = cpu_to_be32(1);
+ login_info->fcp_version = cpu_to_be16(3);
+ login_info->flags = cpu_to_be16(IBMVFC_FLUSH_ON_HALT);
+ if (vhost->client_migrated)
+ login_info->flags |= cpu_to_be16(IBMVFC_CLIENT_MIGRATED);
+
+ login_info->max_cmds = cpu_to_be32(max_requests + IBMVFC_NUM_INTERNAL_REQ);
+ login_info->capabilities = cpu_to_be64(IBMVFC_CAN_MIGRATE);
+ login_info->async.va = cpu_to_be64(vhost->async_crq.msg_token);
+ login_info->async.len = cpu_to_be32(vhost->async_crq.size * sizeof(*vhost->async_crq.msgs));
+ strncpy(login_info->partition_name, vhost->partition_name, IBMVFC_MAX_NAME);
+ strncpy(login_info->device_name,
+ dev_name(&vhost->host->shost_gendev), IBMVFC_MAX_NAME);
+
+ location = of_get_property(of_node, "ibm,loc-code", NULL);
+ location = location ? location : dev_name(vhost->dev);
+ strncpy(login_info->drc_name, location, IBMVFC_MAX_NAME);
+}
+
+/**
+ * ibmvfc_init_event_pool - Allocates and initializes the event pool for a host
+ * @vhost: ibmvfc host who owns the event pool
+ *
+ * Returns zero on success.
+ **/
+static int ibmvfc_init_event_pool(struct ibmvfc_host *vhost)
+{
+ int i;
+ struct ibmvfc_event_pool *pool = &vhost->pool;
+
+ ENTER;
+ pool->size = max_requests + IBMVFC_NUM_INTERNAL_REQ;
+ pool->events = kcalloc(pool->size, sizeof(*pool->events), GFP_KERNEL);
+ if (!pool->events)
+ return -ENOMEM;
+
+ pool->iu_storage = dma_alloc_coherent(vhost->dev,
+ pool->size * sizeof(*pool->iu_storage),
+ &pool->iu_token, 0);
+
+ if (!pool->iu_storage) {
+ kfree(pool->events);
+ return -ENOMEM;
+ }
+
+ for (i = 0; i < pool->size; ++i) {
+ struct ibmvfc_event *evt = &pool->events[i];
+ atomic_set(&evt->free, 1);
+ evt->crq.valid = 0x80;
+ evt->crq.ioba = cpu_to_be64(pool->iu_token + (sizeof(*evt->xfer_iu) * i));
+ evt->xfer_iu = pool->iu_storage + i;
+ evt->vhost = vhost;
+ evt->ext_list = NULL;
+ list_add_tail(&evt->queue, &vhost->free);
+ }
+
+ LEAVE;
+ return 0;
+}
+
+/**
+ * ibmvfc_free_event_pool - Frees memory of the event pool of a host
+ * @vhost: ibmvfc host who owns the event pool
+ *
+ **/
+static void ibmvfc_free_event_pool(struct ibmvfc_host *vhost)
+{
+ int i;
+ struct ibmvfc_event_pool *pool = &vhost->pool;
+
+ ENTER;
+ for (i = 0; i < pool->size; ++i) {
+ list_del(&pool->events[i].queue);
+ BUG_ON(atomic_read(&pool->events[i].free) != 1);
+ if (pool->events[i].ext_list)
+ dma_pool_free(vhost->sg_pool,
+ pool->events[i].ext_list,
+ pool->events[i].ext_list_token);
+ }
+
+ kfree(pool->events);
+ dma_free_coherent(vhost->dev,
+ pool->size * sizeof(*pool->iu_storage),
+ pool->iu_storage, pool->iu_token);
+ LEAVE;
+}
+
+/**
+ * ibmvfc_get_event - Gets the next free event in pool
+ * @vhost: ibmvfc host struct
+ *
+ * Returns a free event from the pool.
+ **/
+static struct ibmvfc_event *ibmvfc_get_event(struct ibmvfc_host *vhost)
+{
+ struct ibmvfc_event *evt;
+
+ BUG_ON(list_empty(&vhost->free));
+ evt = list_entry(vhost->free.next, struct ibmvfc_event, queue);
+ atomic_set(&evt->free, 0);
+ list_del(&evt->queue);
+ return evt;
+}
+
+/**
+ * ibmvfc_init_event - Initialize fields in an event struct that are always
+ * required.
+ * @evt: The event
+ * @done: Routine to call when the event is responded to
+ * @format: SRP or MAD format
+ **/
+static void ibmvfc_init_event(struct ibmvfc_event *evt,
+ void (*done) (struct ibmvfc_event *), u8 format)
+{
+ evt->cmnd = NULL;
+ evt->sync_iu = NULL;
+ evt->crq.format = format;
+ evt->done = done;
+ evt->eh_comp = NULL;
+}
+
+/**
+ * ibmvfc_map_sg_list - Initialize scatterlist
+ * @scmd: scsi command struct
+ * @nseg: number of scatterlist segments
+ * @md: memory descriptor list to initialize
+ **/
+static void ibmvfc_map_sg_list(struct scsi_cmnd *scmd, int nseg,
+ struct srp_direct_buf *md)
+{
+ int i;
+ struct scatterlist *sg;
+
+ scsi_for_each_sg(scmd, sg, nseg, i) {
+ md[i].va = cpu_to_be64(sg_dma_address(sg));
+ md[i].len = cpu_to_be32(sg_dma_len(sg));
+ md[i].key = 0;
+ }
+}
+
+/**
+ * ibmvfc_map_sg_data - Maps dma for a scatterlist and initializes decriptor fields
+ * @scmd: Scsi_Cmnd with the scatterlist
+ * @evt: ibmvfc event struct
+ * @vfc_cmd: vfc_cmd that contains the memory descriptor
+ * @dev: device for which to map dma memory
+ *
+ * Returns:
+ * 0 on success / non-zero on failure
+ **/
+static int ibmvfc_map_sg_data(struct scsi_cmnd *scmd,
+ struct ibmvfc_event *evt,
+ struct ibmvfc_cmd *vfc_cmd, struct device *dev)
+{
+
+ int sg_mapped;
+ struct srp_direct_buf *data = &vfc_cmd->ioba;
+ struct ibmvfc_host *vhost = dev_get_drvdata(dev);
+
+ sg_mapped = scsi_dma_map(scmd);
+ if (!sg_mapped) {
+ vfc_cmd->flags |= cpu_to_be16(IBMVFC_NO_MEM_DESC);
+ return 0;
+ } else if (unlikely(sg_mapped < 0)) {
+ if (vhost->log_level > IBMVFC_DEFAULT_LOG_LEVEL)
+ scmd_printk(KERN_ERR, scmd, "Failed to map DMA buffer for command\n");
+ return sg_mapped;
+ }
+
+ if (scmd->sc_data_direction == DMA_TO_DEVICE) {
+ vfc_cmd->flags |= cpu_to_be16(IBMVFC_WRITE);
+ vfc_cmd->iu.add_cdb_len |= IBMVFC_WRDATA;
+ } else {
+ vfc_cmd->flags |= cpu_to_be16(IBMVFC_READ);
+ vfc_cmd->iu.add_cdb_len |= IBMVFC_RDDATA;
+ }
+
+ if (sg_mapped == 1) {
+ ibmvfc_map_sg_list(scmd, sg_mapped, data);
+ return 0;
+ }
+
+ vfc_cmd->flags |= cpu_to_be16(IBMVFC_SCATTERLIST);
+
+ if (!evt->ext_list) {
+ evt->ext_list = dma_pool_alloc(vhost->sg_pool, GFP_ATOMIC,
+ &evt->ext_list_token);
+
+ if (!evt->ext_list) {
+ scsi_dma_unmap(scmd);
+ if (vhost->log_level > IBMVFC_DEFAULT_LOG_LEVEL)
+ scmd_printk(KERN_ERR, scmd, "Can't allocate memory for scatterlist\n");
+ return -ENOMEM;
+ }
+ }
+
+ ibmvfc_map_sg_list(scmd, sg_mapped, evt->ext_list);
+
+ data->va = cpu_to_be64(evt->ext_list_token);
+ data->len = cpu_to_be32(sg_mapped * sizeof(struct srp_direct_buf));
+ data->key = 0;
+ return 0;
+}
+
+/**
+ * ibmvfc_timeout - Internal command timeout handler
+ * @evt: struct ibmvfc_event that timed out
+ *
+ * Called when an internally generated command times out
+ **/
+static void ibmvfc_timeout(struct ibmvfc_event *evt)
+{
+ struct ibmvfc_host *vhost = evt->vhost;
+ dev_err(vhost->dev, "Command timed out (%p). Resetting connection\n", evt);
+ ibmvfc_reset_host(vhost);
+}
+
+/**
+ * ibmvfc_send_event - Transforms event to u64 array and calls send_crq()
+ * @evt: event to be sent
+ * @vhost: ibmvfc host struct
+ * @timeout: timeout in seconds - 0 means do not time command
+ *
+ * Returns the value returned from ibmvfc_send_crq(). (Zero for success)
+ **/
+static int ibmvfc_send_event(struct ibmvfc_event *evt,
+ struct ibmvfc_host *vhost, unsigned long timeout)
+{
+ __be64 *crq_as_u64 = (__be64 *) &evt->crq;
+ int rc;
+
+ /* Copy the IU into the transfer area */
+ *evt->xfer_iu = evt->iu;
+ if (evt->crq.format == IBMVFC_CMD_FORMAT)
+ evt->xfer_iu->cmd.tag = cpu_to_be64((u64)evt);
+ else if (evt->crq.format == IBMVFC_MAD_FORMAT)
+ evt->xfer_iu->mad_common.tag = cpu_to_be64((u64)evt);
+ else
+ BUG();
+
+ list_add_tail(&evt->queue, &vhost->sent);
+ init_timer(&evt->timer);
+
+ if (timeout) {
+ evt->timer.data = (unsigned long) evt;
+ evt->timer.expires = jiffies + (timeout * HZ);
+ evt->timer.function = (void (*)(unsigned long))ibmvfc_timeout;
+ add_timer(&evt->timer);
+ }
+
+ mb();
+
+ if ((rc = ibmvfc_send_crq(vhost, be64_to_cpu(crq_as_u64[0]),
+ be64_to_cpu(crq_as_u64[1])))) {
+ list_del(&evt->queue);
+ del_timer(&evt->timer);
+
+ /* If send_crq returns H_CLOSED, return SCSI_MLQUEUE_HOST_BUSY.
+ * Firmware will send a CRQ with a transport event (0xFF) to
+ * tell this client what has happened to the transport. This
+ * will be handled in ibmvfc_handle_crq()
+ */
+ if (rc == H_CLOSED) {
+ if (printk_ratelimit())
+ dev_warn(vhost->dev, "Send warning. Receive queue closed, will retry.\n");
+ if (evt->cmnd)
+ scsi_dma_unmap(evt->cmnd);
+ ibmvfc_free_event(evt);
+ return SCSI_MLQUEUE_HOST_BUSY;
+ }
+
+ dev_err(vhost->dev, "Send error (rc=%d)\n", rc);
+ if (evt->cmnd) {
+ evt->cmnd->result = DID_ERROR << 16;
+ evt->done = ibmvfc_scsi_eh_done;
+ } else
+ evt->xfer_iu->mad_common.status = cpu_to_be16(IBMVFC_MAD_CRQ_ERROR);
+
+ evt->done(evt);
+ } else
+ ibmvfc_trc_start(evt);
+
+ return 0;
+}
+
+/**
+ * ibmvfc_log_error - Log an error for the failed command if appropriate
+ * @evt: ibmvfc event to log
+ *
+ **/
+static void ibmvfc_log_error(struct ibmvfc_event *evt)
+{
+ struct ibmvfc_cmd *vfc_cmd = &evt->xfer_iu->cmd;
+ struct ibmvfc_host *vhost = evt->vhost;
+ struct ibmvfc_fcp_rsp *rsp = &vfc_cmd->rsp;
+ struct scsi_cmnd *cmnd = evt->cmnd;
+ const char *err = unknown_error;
+ int index = ibmvfc_get_err_index(be16_to_cpu(vfc_cmd->status), be16_to_cpu(vfc_cmd->error));
+ int logerr = 0;
+ int rsp_code = 0;
+
+ if (index >= 0) {
+ logerr = cmd_status[index].log;
+ err = cmd_status[index].name;
+ }
+
+ if (!logerr && (vhost->log_level <= (IBMVFC_DEFAULT_LOG_LEVEL + 1)))
+ return;
+
+ if (rsp->flags & FCP_RSP_LEN_VALID)
+ rsp_code = rsp->data.info.rsp_code;
+
+ scmd_printk(KERN_ERR, cmnd, "Command (%02X) failed: %s (%x:%x) "
+ "flags: %x fcp_rsp: %x, resid=%d, scsi_status: %x\n",
+ cmnd->cmnd[0], err, vfc_cmd->status, vfc_cmd->error,
+ rsp->flags, rsp_code, scsi_get_resid(cmnd), rsp->scsi_status);
+}
+
+/**
+ * ibmvfc_relogin - Log back into the specified device
+ * @sdev: scsi device struct
+ *
+ **/
+static void ibmvfc_relogin(struct scsi_device *sdev)
+{
+ struct ibmvfc_host *vhost = shost_priv(sdev->host);
+ struct fc_rport *rport = starget_to_rport(scsi_target(sdev));
+ struct ibmvfc_target *tgt;
+
+ list_for_each_entry(tgt, &vhost->targets, queue) {
+ if (rport == tgt->rport) {
+ ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_DEL_RPORT);
+ break;
+ }
+ }
+
+ ibmvfc_reinit_host(vhost);
+}
+
+/**
+ * ibmvfc_scsi_done - Handle responses from commands
+ * @evt: ibmvfc event to be handled
+ *
+ * Used as a callback when sending scsi cmds.
+ **/
+static void ibmvfc_scsi_done(struct ibmvfc_event *evt)
+{
+ struct ibmvfc_cmd *vfc_cmd = &evt->xfer_iu->cmd;
+ struct ibmvfc_fcp_rsp *rsp = &vfc_cmd->rsp;
+ struct scsi_cmnd *cmnd = evt->cmnd;
+ u32 rsp_len = 0;
+ u32 sense_len = be32_to_cpu(rsp->fcp_sense_len);
+
+ if (cmnd) {
+ if (be16_to_cpu(vfc_cmd->response_flags) & IBMVFC_ADAPTER_RESID_VALID)
+ scsi_set_resid(cmnd, be32_to_cpu(vfc_cmd->adapter_resid));
+ else if (rsp->flags & FCP_RESID_UNDER)
+ scsi_set_resid(cmnd, be32_to_cpu(rsp->fcp_resid));
+ else
+ scsi_set_resid(cmnd, 0);
+
+ if (vfc_cmd->status) {
+ cmnd->result = ibmvfc_get_err_result(vfc_cmd);
+
+ if (rsp->flags & FCP_RSP_LEN_VALID)
+ rsp_len = be32_to_cpu(rsp->fcp_rsp_len);
+ if ((sense_len + rsp_len) > SCSI_SENSE_BUFFERSIZE)
+ sense_len = SCSI_SENSE_BUFFERSIZE - rsp_len;
+ if ((rsp->flags & FCP_SNS_LEN_VALID) && rsp->fcp_sense_len && rsp_len <= 8)
+ memcpy(cmnd->sense_buffer, rsp->data.sense + rsp_len, sense_len);
+ if ((be16_to_cpu(vfc_cmd->status) & IBMVFC_VIOS_FAILURE) &&
+ (be16_to_cpu(vfc_cmd->error) == IBMVFC_PLOGI_REQUIRED))
+ ibmvfc_relogin(cmnd->device);
+
+ if (!cmnd->result && (!scsi_get_resid(cmnd) || (rsp->flags & FCP_RESID_OVER)))
+ cmnd->result = (DID_ERROR << 16);
+
+ ibmvfc_log_error(evt);
+ }
+
+ if (!cmnd->result &&
+ (scsi_bufflen(cmnd) - scsi_get_resid(cmnd) < cmnd->underflow))
+ cmnd->result = (DID_ERROR << 16);
+
+ scsi_dma_unmap(cmnd);
+ cmnd->scsi_done(cmnd);
+ }
+
+ if (evt->eh_comp)
+ complete(evt->eh_comp);
+
+ ibmvfc_free_event(evt);
+}
+
+/**
+ * ibmvfc_host_chkready - Check if the host can accept commands
+ * @vhost: struct ibmvfc host
+ *
+ * Returns:
+ * 1 if host can accept command / 0 if not
+ **/
+static inline int ibmvfc_host_chkready(struct ibmvfc_host *vhost)
+{
+ int result = 0;
+
+ switch (vhost->state) {
+ case IBMVFC_LINK_DEAD:
+ case IBMVFC_HOST_OFFLINE:
+ result = DID_NO_CONNECT << 16;
+ break;
+ case IBMVFC_NO_CRQ:
+ case IBMVFC_INITIALIZING:
+ case IBMVFC_HALTED:
+ case IBMVFC_LINK_DOWN:
+ result = DID_REQUEUE << 16;
+ break;
+ case IBMVFC_ACTIVE:
+ result = 0;
+ break;
+ };
+
+ return result;
+}
+
+/**
+ * ibmvfc_queuecommand - The queuecommand function of the scsi template
+ * @cmnd: struct scsi_cmnd to be executed
+ * @done: Callback function to be called when cmnd is completed
+ *
+ * Returns:
+ * 0 on success / other on failure
+ **/
+static int ibmvfc_queuecommand_lck(struct scsi_cmnd *cmnd,
+ void (*done) (struct scsi_cmnd *))
+{
+ struct ibmvfc_host *vhost = shost_priv(cmnd->device->host);
+ struct fc_rport *rport = starget_to_rport(scsi_target(cmnd->device));
+ struct ibmvfc_cmd *vfc_cmd;
+ struct ibmvfc_event *evt;
+ int rc;
+
+ if (unlikely((rc = fc_remote_port_chkready(rport))) ||
+ unlikely((rc = ibmvfc_host_chkready(vhost)))) {
+ cmnd->result = rc;
+ done(cmnd);
+ return 0;
+ }
+
+ cmnd->result = (DID_OK << 16);
+ evt = ibmvfc_get_event(vhost);
+ ibmvfc_init_event(evt, ibmvfc_scsi_done, IBMVFC_CMD_FORMAT);
+ evt->cmnd = cmnd;
+ cmnd->scsi_done = done;
+ vfc_cmd = &evt->iu.cmd;
+ memset(vfc_cmd, 0, sizeof(*vfc_cmd));
+ vfc_cmd->resp.va = cpu_to_be64(be64_to_cpu(evt->crq.ioba) + offsetof(struct ibmvfc_cmd, rsp));
+ vfc_cmd->resp.len = cpu_to_be32(sizeof(vfc_cmd->rsp));
+ vfc_cmd->frame_type = cpu_to_be32(IBMVFC_SCSI_FCP_TYPE);
+ vfc_cmd->payload_len = cpu_to_be32(sizeof(vfc_cmd->iu));
+ vfc_cmd->resp_len = cpu_to_be32(sizeof(vfc_cmd->rsp));
+ vfc_cmd->cancel_key = cpu_to_be32((unsigned long)cmnd->device->hostdata);
+ vfc_cmd->tgt_scsi_id = cpu_to_be64(rport->port_id);
+ vfc_cmd->iu.xfer_len = cpu_to_be32(scsi_bufflen(cmnd));
+ int_to_scsilun(cmnd->device->lun, &vfc_cmd->iu.lun);
+ memcpy(vfc_cmd->iu.cdb, cmnd->cmnd, cmnd->cmd_len);
+
+ if (cmnd->flags & SCMD_TAGGED) {
+ vfc_cmd->task_tag = cpu_to_be64(cmnd->tag);
+ vfc_cmd->iu.pri_task_attr = IBMVFC_SIMPLE_TASK;
+ }
+
+ if (likely(!(rc = ibmvfc_map_sg_data(cmnd, evt, vfc_cmd, vhost->dev))))
+ return ibmvfc_send_event(evt, vhost, 0);
+
+ ibmvfc_free_event(evt);
+ if (rc == -ENOMEM)
+ return SCSI_MLQUEUE_HOST_BUSY;
+
+ if (vhost->log_level > IBMVFC_DEFAULT_LOG_LEVEL)
+ scmd_printk(KERN_ERR, cmnd,
+ "Failed to map DMA buffer for command. rc=%d\n", rc);
+
+ cmnd->result = DID_ERROR << 16;
+ done(cmnd);
+ return 0;
+}
+
+static DEF_SCSI_QCMD(ibmvfc_queuecommand)
+
+/**
+ * ibmvfc_sync_completion - Signal that a synchronous command has completed
+ * @evt: ibmvfc event struct
+ *
+ **/
+static void ibmvfc_sync_completion(struct ibmvfc_event *evt)
+{
+ /* copy the response back */
+ if (evt->sync_iu)
+ *evt->sync_iu = *evt->xfer_iu;
+
+ complete(&evt->comp);
+}
+
+/**
+ * ibmvfc_bsg_timeout_done - Completion handler for cancelling BSG commands
+ * @evt: struct ibmvfc_event
+ *
+ **/
+static void ibmvfc_bsg_timeout_done(struct ibmvfc_event *evt)
+{
+ struct ibmvfc_host *vhost = evt->vhost;
+
+ ibmvfc_free_event(evt);
+ vhost->aborting_passthru = 0;
+ dev_info(vhost->dev, "Passthru command cancelled\n");
+}
+
+/**
+ * ibmvfc_bsg_timeout - Handle a BSG timeout
+ * @job: struct fc_bsg_job that timed out
+ *
+ * Returns:
+ * 0 on success / other on failure
+ **/
+static int ibmvfc_bsg_timeout(struct fc_bsg_job *job)
+{
+ struct ibmvfc_host *vhost = shost_priv(job->shost);
+ unsigned long port_id = (unsigned long)job->dd_data;
+ struct ibmvfc_event *evt;
+ struct ibmvfc_tmf *tmf;
+ unsigned long flags;
+ int rc;
+
+ ENTER;
+ spin_lock_irqsave(vhost->host->host_lock, flags);
+ if (vhost->aborting_passthru || vhost->state != IBMVFC_ACTIVE) {
+ __ibmvfc_reset_host(vhost);
+ spin_unlock_irqrestore(vhost->host->host_lock, flags);
+ return 0;
+ }
+
+ vhost->aborting_passthru = 1;
+ evt = ibmvfc_get_event(vhost);
+ ibmvfc_init_event(evt, ibmvfc_bsg_timeout_done, IBMVFC_MAD_FORMAT);
+
+ tmf = &evt->iu.tmf;
+ memset(tmf, 0, sizeof(*tmf));
+ tmf->common.version = cpu_to_be32(1);
+ tmf->common.opcode = cpu_to_be32(IBMVFC_TMF_MAD);
+ tmf->common.length = cpu_to_be16(sizeof(*tmf));
+ tmf->scsi_id = cpu_to_be64(port_id);
+ tmf->cancel_key = cpu_to_be32(IBMVFC_PASSTHRU_CANCEL_KEY);
+ tmf->my_cancel_key = cpu_to_be32(IBMVFC_INTERNAL_CANCEL_KEY);
+ rc = ibmvfc_send_event(evt, vhost, default_timeout);
+
+ if (rc != 0) {
+ vhost->aborting_passthru = 0;
+ dev_err(vhost->dev, "Failed to send cancel event. rc=%d\n", rc);
+ rc = -EIO;
+ } else
+ dev_info(vhost->dev, "Cancelling passthru command to port id 0x%lx\n",
+ port_id);
+
+ spin_unlock_irqrestore(vhost->host->host_lock, flags);
+
+ LEAVE;
+ return rc;
+}
+
+/**
+ * ibmvfc_bsg_plogi - PLOGI into a target to handle a BSG command
+ * @vhost: struct ibmvfc_host to send command
+ * @port_id: port ID to send command
+ *
+ * Returns:
+ * 0 on success / other on failure
+ **/
+static int ibmvfc_bsg_plogi(struct ibmvfc_host *vhost, unsigned int port_id)
+{
+ struct ibmvfc_port_login *plogi;
+ struct ibmvfc_target *tgt;
+ struct ibmvfc_event *evt;
+ union ibmvfc_iu rsp_iu;
+ unsigned long flags;
+ int rc = 0, issue_login = 1;
+
+ ENTER;
+ spin_lock_irqsave(vhost->host->host_lock, flags);
+ list_for_each_entry(tgt, &vhost->targets, queue) {
+ if (tgt->scsi_id == port_id) {
+ issue_login = 0;
+ break;
+ }
+ }
+
+ if (!issue_login)
+ goto unlock_out;
+ if (unlikely((rc = ibmvfc_host_chkready(vhost))))
+ goto unlock_out;
+
+ evt = ibmvfc_get_event(vhost);
+ ibmvfc_init_event(evt, ibmvfc_sync_completion, IBMVFC_MAD_FORMAT);
+ plogi = &evt->iu.plogi;
+ memset(plogi, 0, sizeof(*plogi));
+ plogi->common.version = cpu_to_be32(1);
+ plogi->common.opcode = cpu_to_be32(IBMVFC_PORT_LOGIN);
+ plogi->common.length = cpu_to_be16(sizeof(*plogi));
+ plogi->scsi_id = cpu_to_be64(port_id);
+ evt->sync_iu = &rsp_iu;
+ init_completion(&evt->comp);
+
+ rc = ibmvfc_send_event(evt, vhost, default_timeout);
+ spin_unlock_irqrestore(vhost->host->host_lock, flags);
+
+ if (rc)
+ return -EIO;
+
+ wait_for_completion(&evt->comp);
+
+ if (rsp_iu.plogi.common.status)
+ rc = -EIO;
+
+ spin_lock_irqsave(vhost->host->host_lock, flags);
+ ibmvfc_free_event(evt);
+unlock_out:
+ spin_unlock_irqrestore(vhost->host->host_lock, flags);
+ LEAVE;
+ return rc;
+}
+
+/**
+ * ibmvfc_bsg_request - Handle a BSG request
+ * @job: struct fc_bsg_job to be executed
+ *
+ * Returns:
+ * 0 on success / other on failure
+ **/
+static int ibmvfc_bsg_request(struct fc_bsg_job *job)
+{
+ struct ibmvfc_host *vhost = shost_priv(job->shost);
+ struct fc_rport *rport = job->rport;
+ struct ibmvfc_passthru_mad *mad;
+ struct ibmvfc_event *evt;
+ union ibmvfc_iu rsp_iu;
+ unsigned long flags, port_id = -1;
+ unsigned int code = job->request->msgcode;
+ int rc = 0, req_seg, rsp_seg, issue_login = 0;
+ u32 fc_flags, rsp_len;
+
+ ENTER;
+ job->reply->reply_payload_rcv_len = 0;
+ if (rport)
+ port_id = rport->port_id;
+
+ switch (code) {
+ case FC_BSG_HST_ELS_NOLOGIN:
+ port_id = (job->request->rqst_data.h_els.port_id[0] << 16) |
+ (job->request->rqst_data.h_els.port_id[1] << 8) |
+ job->request->rqst_data.h_els.port_id[2];
+ case FC_BSG_RPT_ELS:
+ fc_flags = IBMVFC_FC_ELS;
+ break;
+ case FC_BSG_HST_CT:
+ issue_login = 1;
+ port_id = (job->request->rqst_data.h_ct.port_id[0] << 16) |
+ (job->request->rqst_data.h_ct.port_id[1] << 8) |
+ job->request->rqst_data.h_ct.port_id[2];
+ case FC_BSG_RPT_CT:
+ fc_flags = IBMVFC_FC_CT_IU;
+ break;
+ default:
+ return -ENOTSUPP;
+ };
+
+ if (port_id == -1)
+ return -EINVAL;
+ if (!mutex_trylock(&vhost->passthru_mutex))
+ return -EBUSY;
+
+ job->dd_data = (void *)port_id;
+ req_seg = dma_map_sg(vhost->dev, job->request_payload.sg_list,
+ job->request_payload.sg_cnt, DMA_TO_DEVICE);
+
+ if (!req_seg) {
+ mutex_unlock(&vhost->passthru_mutex);
+ return -ENOMEM;
+ }
+
+ rsp_seg = dma_map_sg(vhost->dev, job->reply_payload.sg_list,
+ job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
+
+ if (!rsp_seg) {
+ dma_unmap_sg(vhost->dev, job->request_payload.sg_list,
+ job->request_payload.sg_cnt, DMA_TO_DEVICE);
+ mutex_unlock(&vhost->passthru_mutex);
+ return -ENOMEM;
+ }
+
+ if (req_seg > 1 || rsp_seg > 1) {
+ rc = -EINVAL;
+ goto out;
+ }
+
+ if (issue_login)
+ rc = ibmvfc_bsg_plogi(vhost, port_id);
+
+ spin_lock_irqsave(vhost->host->host_lock, flags);
+
+ if (unlikely(rc || (rport && (rc = fc_remote_port_chkready(rport)))) ||
+ unlikely((rc = ibmvfc_host_chkready(vhost)))) {
+ spin_unlock_irqrestore(vhost->host->host_lock, flags);
+ goto out;
+ }
+
+ evt = ibmvfc_get_event(vhost);
+ ibmvfc_init_event(evt, ibmvfc_sync_completion, IBMVFC_MAD_FORMAT);
+ mad = &evt->iu.passthru;
+
+ memset(mad, 0, sizeof(*mad));
+ mad->common.version = cpu_to_be32(1);
+ mad->common.opcode = cpu_to_be32(IBMVFC_PASSTHRU);
+ mad->common.length = cpu_to_be16(sizeof(*mad) - sizeof(mad->fc_iu) - sizeof(mad->iu));
+
+ mad->cmd_ioba.va = cpu_to_be64(be64_to_cpu(evt->crq.ioba) +
+ offsetof(struct ibmvfc_passthru_mad, iu));
+ mad->cmd_ioba.len = cpu_to_be32(sizeof(mad->iu));
+
+ mad->iu.cmd_len = cpu_to_be32(job->request_payload.payload_len);
+ mad->iu.rsp_len = cpu_to_be32(job->reply_payload.payload_len);
+ mad->iu.flags = cpu_to_be32(fc_flags);
+ mad->iu.cancel_key = cpu_to_be32(IBMVFC_PASSTHRU_CANCEL_KEY);
+
+ mad->iu.cmd.va = cpu_to_be64(sg_dma_address(job->request_payload.sg_list));
+ mad->iu.cmd.len = cpu_to_be32(sg_dma_len(job->request_payload.sg_list));
+ mad->iu.rsp.va = cpu_to_be64(sg_dma_address(job->reply_payload.sg_list));
+ mad->iu.rsp.len = cpu_to_be32(sg_dma_len(job->reply_payload.sg_list));
+ mad->iu.scsi_id = cpu_to_be64(port_id);
+ mad->iu.tag = cpu_to_be64((u64)evt);
+ rsp_len = be32_to_cpu(mad->iu.rsp.len);
+
+ evt->sync_iu = &rsp_iu;
+ init_completion(&evt->comp);
+ rc = ibmvfc_send_event(evt, vhost, 0);
+ spin_unlock_irqrestore(vhost->host->host_lock, flags);
+
+ if (rc) {
+ rc = -EIO;
+ goto out;
+ }
+
+ wait_for_completion(&evt->comp);
+
+ if (rsp_iu.passthru.common.status)
+ rc = -EIO;
+ else
+ job->reply->reply_payload_rcv_len = rsp_len;
+
+ spin_lock_irqsave(vhost->host->host_lock, flags);
+ ibmvfc_free_event(evt);
+ spin_unlock_irqrestore(vhost->host->host_lock, flags);
+ job->reply->result = rc;
+ job->job_done(job);
+ rc = 0;
+out:
+ dma_unmap_sg(vhost->dev, job->request_payload.sg_list,
+ job->request_payload.sg_cnt, DMA_TO_DEVICE);
+ dma_unmap_sg(vhost->dev, job->reply_payload.sg_list,
+ job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
+ mutex_unlock(&vhost->passthru_mutex);
+ LEAVE;
+ return rc;
+}
+
+/**
+ * ibmvfc_reset_device - Reset the device with the specified reset type
+ * @sdev: scsi device to reset
+ * @type: reset type
+ * @desc: reset type description for log messages
+ *
+ * Returns:
+ * 0 on success / other on failure
+ **/
+static int ibmvfc_reset_device(struct scsi_device *sdev, int type, char *desc)
+{
+ struct ibmvfc_host *vhost = shost_priv(sdev->host);
+ struct fc_rport *rport = starget_to_rport(scsi_target(sdev));
+ struct ibmvfc_cmd *tmf;
+ struct ibmvfc_event *evt = NULL;
+ union ibmvfc_iu rsp_iu;
+ struct ibmvfc_fcp_rsp *fc_rsp = &rsp_iu.cmd.rsp;
+ int rsp_rc = -EBUSY;
+ unsigned long flags;
+ int rsp_code = 0;
+
+ spin_lock_irqsave(vhost->host->host_lock, flags);
+ if (vhost->state == IBMVFC_ACTIVE) {
+ evt = ibmvfc_get_event(vhost);
+ ibmvfc_init_event(evt, ibmvfc_sync_completion, IBMVFC_CMD_FORMAT);
+
+ tmf = &evt->iu.cmd;
+ memset(tmf, 0, sizeof(*tmf));
+ tmf->resp.va = cpu_to_be64(be64_to_cpu(evt->crq.ioba) + offsetof(struct ibmvfc_cmd, rsp));
+ tmf->resp.len = cpu_to_be32(sizeof(tmf->rsp));
+ tmf->frame_type = cpu_to_be32(IBMVFC_SCSI_FCP_TYPE);
+ tmf->payload_len = cpu_to_be32(sizeof(tmf->iu));
+ tmf->resp_len = cpu_to_be32(sizeof(tmf->rsp));
+ tmf->cancel_key = cpu_to_be32((unsigned long)sdev->hostdata);
+ tmf->tgt_scsi_id = cpu_to_be64(rport->port_id);
+ int_to_scsilun(sdev->lun, &tmf->iu.lun);
+ tmf->flags = cpu_to_be16((IBMVFC_NO_MEM_DESC | IBMVFC_TMF));
+ tmf->iu.tmf_flags = type;
+ evt->sync_iu = &rsp_iu;
+
+ init_completion(&evt->comp);
+ rsp_rc = ibmvfc_send_event(evt, vhost, default_timeout);
+ }
+ spin_unlock_irqrestore(vhost->host->host_lock, flags);
+
+ if (rsp_rc != 0) {
+ sdev_printk(KERN_ERR, sdev, "Failed to send %s reset event. rc=%d\n",
+ desc, rsp_rc);
+ return -EIO;
+ }
+
+ sdev_printk(KERN_INFO, sdev, "Resetting %s\n", desc);
+ wait_for_completion(&evt->comp);
+
+ if (rsp_iu.cmd.status)
+ rsp_code = ibmvfc_get_err_result(&rsp_iu.cmd);
+
+ if (rsp_code) {
+ if (fc_rsp->flags & FCP_RSP_LEN_VALID)
+ rsp_code = fc_rsp->data.info.rsp_code;
+
+ sdev_printk(KERN_ERR, sdev, "%s reset failed: %s (%x:%x) "
+ "flags: %x fcp_rsp: %x, scsi_status: %x\n", desc,
+ ibmvfc_get_cmd_error(be16_to_cpu(rsp_iu.cmd.status), be16_to_cpu(rsp_iu.cmd.error)),
+ rsp_iu.cmd.status, rsp_iu.cmd.error, fc_rsp->flags, rsp_code,
+ fc_rsp->scsi_status);
+ rsp_rc = -EIO;
+ } else
+ sdev_printk(KERN_INFO, sdev, "%s reset successful\n", desc);
+
+ spin_lock_irqsave(vhost->host->host_lock, flags);
+ ibmvfc_free_event(evt);
+ spin_unlock_irqrestore(vhost->host->host_lock, flags);
+ return rsp_rc;
+}
+
+/**
+ * ibmvfc_match_rport - Match function for specified remote port
+ * @evt: ibmvfc event struct
+ * @device: device to match (rport)
+ *
+ * Returns:
+ * 1 if event matches rport / 0 if event does not match rport
+ **/
+static int ibmvfc_match_rport(struct ibmvfc_event *evt, void *rport)
+{
+ struct fc_rport *cmd_rport;
+
+ if (evt->cmnd) {
+ cmd_rport = starget_to_rport(scsi_target(evt->cmnd->device));
+ if (cmd_rport == rport)
+ return 1;
+ }
+ return 0;
+}
+
+/**
+ * ibmvfc_match_target - Match function for specified target
+ * @evt: ibmvfc event struct
+ * @device: device to match (starget)
+ *
+ * Returns:
+ * 1 if event matches starget / 0 if event does not match starget
+ **/
+static int ibmvfc_match_target(struct ibmvfc_event *evt, void *device)
+{
+ if (evt->cmnd && scsi_target(evt->cmnd->device) == device)
+ return 1;
+ return 0;
+}
+
+/**
+ * ibmvfc_match_lun - Match function for specified LUN
+ * @evt: ibmvfc event struct
+ * @device: device to match (sdev)
+ *
+ * Returns:
+ * 1 if event matches sdev / 0 if event does not match sdev
+ **/
+static int ibmvfc_match_lun(struct ibmvfc_event *evt, void *device)
+{
+ if (evt->cmnd && evt->cmnd->device == device)
+ return 1;
+ return 0;
+}
+
+/**
+ * ibmvfc_wait_for_ops - Wait for ops to complete
+ * @vhost: ibmvfc host struct
+ * @device: device to match (starget or sdev)
+ * @match: match function
+ *
+ * Returns:
+ * SUCCESS / FAILED
+ **/
+static int ibmvfc_wait_for_ops(struct ibmvfc_host *vhost, void *device,
+ int (*match) (struct ibmvfc_event *, void *))
+{
+ struct ibmvfc_event *evt;
+ DECLARE_COMPLETION_ONSTACK(comp);
+ int wait;
+ unsigned long flags;
+ signed long timeout = IBMVFC_ABORT_WAIT_TIMEOUT * HZ;
+
+ ENTER;
+ do {
+ wait = 0;
+ spin_lock_irqsave(vhost->host->host_lock, flags);
+ list_for_each_entry(evt, &vhost->sent, queue) {
+ if (match(evt, device)) {
+ evt->eh_comp = &comp;
+ wait++;
+ }
+ }
+ spin_unlock_irqrestore(vhost->host->host_lock, flags);
+
+ if (wait) {
+ timeout = wait_for_completion_timeout(&comp, timeout);
+
+ if (!timeout) {
+ wait = 0;
+ spin_lock_irqsave(vhost->host->host_lock, flags);
+ list_for_each_entry(evt, &vhost->sent, queue) {
+ if (match(evt, device)) {
+ evt->eh_comp = NULL;
+ wait++;
+ }
+ }
+ spin_unlock_irqrestore(vhost->host->host_lock, flags);
+ if (wait)
+ dev_err(vhost->dev, "Timed out waiting for aborted commands\n");
+ LEAVE;
+ return wait ? FAILED : SUCCESS;
+ }
+ }
+ } while (wait);
+
+ LEAVE;
+ return SUCCESS;
+}
+
+/**
+ * ibmvfc_cancel_all - Cancel all outstanding commands to the device
+ * @sdev: scsi device to cancel commands
+ * @type: type of error recovery being performed
+ *
+ * This sends a cancel to the VIOS for the specified device. This does
+ * NOT send any abort to the actual device. That must be done separately.
+ *
+ * Returns:
+ * 0 on success / other on failure
+ **/
+static int ibmvfc_cancel_all(struct scsi_device *sdev, int type)
+{
+ struct ibmvfc_host *vhost = shost_priv(sdev->host);
+ struct scsi_target *starget = scsi_target(sdev);
+ struct fc_rport *rport = starget_to_rport(starget);
+ struct ibmvfc_tmf *tmf;
+ struct ibmvfc_event *evt, *found_evt;
+ union ibmvfc_iu rsp;
+ int rsp_rc = -EBUSY;
+ unsigned long flags;
+ u16 status;
+
+ ENTER;
+ spin_lock_irqsave(vhost->host->host_lock, flags);
+ found_evt = NULL;
+ list_for_each_entry(evt, &vhost->sent, queue) {
+ if (evt->cmnd && evt->cmnd->device == sdev) {
+ found_evt = evt;
+ break;
+ }
+ }
+
+ if (!found_evt) {
+ if (vhost->log_level > IBMVFC_DEFAULT_LOG_LEVEL)
+ sdev_printk(KERN_INFO, sdev, "No events found to cancel\n");
+ spin_unlock_irqrestore(vhost->host->host_lock, flags);
+ return 0;
+ }
+
+ if (vhost->logged_in) {
+ evt = ibmvfc_get_event(vhost);
+ ibmvfc_init_event(evt, ibmvfc_sync_completion, IBMVFC_MAD_FORMAT);
+
+ tmf = &evt->iu.tmf;
+ memset(tmf, 0, sizeof(*tmf));
+ tmf->common.version = cpu_to_be32(1);
+ tmf->common.opcode = cpu_to_be32(IBMVFC_TMF_MAD);
+ tmf->common.length = cpu_to_be16(sizeof(*tmf));
+ tmf->scsi_id = cpu_to_be64(rport->port_id);
+ int_to_scsilun(sdev->lun, &tmf->lun);
+ if (!(be64_to_cpu(vhost->login_buf->resp.capabilities) & IBMVFC_CAN_SUPPRESS_ABTS))
+ type &= ~IBMVFC_TMF_SUPPRESS_ABTS;
+ if (vhost->state == IBMVFC_ACTIVE)
+ tmf->flags = cpu_to_be32((type | IBMVFC_TMF_LUA_VALID));
+ else
+ tmf->flags = cpu_to_be32(((type & IBMVFC_TMF_SUPPRESS_ABTS) | IBMVFC_TMF_LUA_VALID));
+ tmf->cancel_key = cpu_to_be32((unsigned long)sdev->hostdata);
+ tmf->my_cancel_key = cpu_to_be32((unsigned long)starget->hostdata);
+
+ evt->sync_iu = &rsp;
+ init_completion(&evt->comp);
+ rsp_rc = ibmvfc_send_event(evt, vhost, default_timeout);
+ }
+
+ spin_unlock_irqrestore(vhost->host->host_lock, flags);
+
+ if (rsp_rc != 0) {
+ sdev_printk(KERN_ERR, sdev, "Failed to send cancel event. rc=%d\n", rsp_rc);
+ /* If failure is received, the host adapter is most likely going
+ through reset, return success so the caller will wait for the command
+ being cancelled to get returned */
+ return 0;
+ }
+
+ sdev_printk(KERN_INFO, sdev, "Cancelling outstanding commands.\n");
+
+ wait_for_completion(&evt->comp);
+ status = be16_to_cpu(rsp.mad_common.status);
+ spin_lock_irqsave(vhost->host->host_lock, flags);
+ ibmvfc_free_event(evt);
+ spin_unlock_irqrestore(vhost->host->host_lock, flags);
+
+ if (status != IBMVFC_MAD_SUCCESS) {
+ sdev_printk(KERN_WARNING, sdev, "Cancel failed with rc=%x\n", status);
+ switch (status) {
+ case IBMVFC_MAD_DRIVER_FAILED:
+ case IBMVFC_MAD_CRQ_ERROR:
+ /* Host adapter most likely going through reset, return success to
+ the caller will wait for the command being cancelled to get returned */
+ return 0;
+ default:
+ return -EIO;
+ };
+ }
+
+ sdev_printk(KERN_INFO, sdev, "Successfully cancelled outstanding commands\n");
+ return 0;
+}
+
+/**
+ * ibmvfc_match_key - Match function for specified cancel key
+ * @evt: ibmvfc event struct
+ * @key: cancel key to match
+ *
+ * Returns:
+ * 1 if event matches key / 0 if event does not match key
+ **/
+static int ibmvfc_match_key(struct ibmvfc_event *evt, void *key)
+{
+ unsigned long cancel_key = (unsigned long)key;
+
+ if (evt->crq.format == IBMVFC_CMD_FORMAT &&
+ be32_to_cpu(evt->iu.cmd.cancel_key) == cancel_key)
+ return 1;
+ return 0;
+}
+
+/**
+ * ibmvfc_match_evt - Match function for specified event
+ * @evt: ibmvfc event struct
+ * @match: event to match
+ *
+ * Returns:
+ * 1 if event matches key / 0 if event does not match key
+ **/
+static int ibmvfc_match_evt(struct ibmvfc_event *evt, void *match)
+{
+ if (evt == match)
+ return 1;
+ return 0;
+}
+
+/**
+ * ibmvfc_abort_task_set - Abort outstanding commands to the device
+ * @sdev: scsi device to abort commands
+ *
+ * This sends an Abort Task Set to the VIOS for the specified device. This does
+ * NOT send any cancel to the VIOS. That must be done separately.
+ *
+ * Returns:
+ * 0 on success / other on failure
+ **/
+static int ibmvfc_abort_task_set(struct scsi_device *sdev)
+{
+ struct ibmvfc_host *vhost = shost_priv(sdev->host);
+ struct fc_rport *rport = starget_to_rport(scsi_target(sdev));
+ struct ibmvfc_cmd *tmf;
+ struct ibmvfc_event *evt, *found_evt;
+ union ibmvfc_iu rsp_iu;
+ struct ibmvfc_fcp_rsp *fc_rsp = &rsp_iu.cmd.rsp;
+ int rc, rsp_rc = -EBUSY;
+ unsigned long flags, timeout = IBMVFC_ABORT_TIMEOUT;
+ int rsp_code = 0;
+
+ spin_lock_irqsave(vhost->host->host_lock, flags);
+ found_evt = NULL;
+ list_for_each_entry(evt, &vhost->sent, queue) {
+ if (evt->cmnd && evt->cmnd->device == sdev) {
+ found_evt = evt;
+ break;
+ }
+ }
+
+ if (!found_evt) {
+ if (vhost->log_level > IBMVFC_DEFAULT_LOG_LEVEL)
+ sdev_printk(KERN_INFO, sdev, "No events found to abort\n");
+ spin_unlock_irqrestore(vhost->host->host_lock, flags);
+ return 0;
+ }
+
+ if (vhost->state == IBMVFC_ACTIVE) {
+ evt = ibmvfc_get_event(vhost);
+ ibmvfc_init_event(evt, ibmvfc_sync_completion, IBMVFC_CMD_FORMAT);
+
+ tmf = &evt->iu.cmd;
+ memset(tmf, 0, sizeof(*tmf));
+ tmf->resp.va = cpu_to_be64(be64_to_cpu(evt->crq.ioba) + offsetof(struct ibmvfc_cmd, rsp));
+ tmf->resp.len = cpu_to_be32(sizeof(tmf->rsp));
+ tmf->frame_type = cpu_to_be32(IBMVFC_SCSI_FCP_TYPE);
+ tmf->payload_len = cpu_to_be32(sizeof(tmf->iu));
+ tmf->resp_len = cpu_to_be32(sizeof(tmf->rsp));
+ tmf->cancel_key = cpu_to_be32((unsigned long)sdev->hostdata);
+ tmf->tgt_scsi_id = cpu_to_be64(rport->port_id);
+ int_to_scsilun(sdev->lun, &tmf->iu.lun);
+ tmf->flags = cpu_to_be16((IBMVFC_NO_MEM_DESC | IBMVFC_TMF));
+ tmf->iu.tmf_flags = IBMVFC_ABORT_TASK_SET;
+ evt->sync_iu = &rsp_iu;
+
+ init_completion(&evt->comp);
+ rsp_rc = ibmvfc_send_event(evt, vhost, default_timeout);
+ }
+
+ spin_unlock_irqrestore(vhost->host->host_lock, flags);
+
+ if (rsp_rc != 0) {
+ sdev_printk(KERN_ERR, sdev, "Failed to send abort. rc=%d\n", rsp_rc);
+ return -EIO;
+ }
+
+ sdev_printk(KERN_INFO, sdev, "Aborting outstanding commands\n");
+ timeout = wait_for_completion_timeout(&evt->comp, timeout);
+
+ if (!timeout) {
+ rc = ibmvfc_cancel_all(sdev, 0);
+ if (!rc) {
+ rc = ibmvfc_wait_for_ops(vhost, sdev->hostdata, ibmvfc_match_key);
+ if (rc == SUCCESS)
+ rc = 0;
+ }
+
+ if (rc) {
+ sdev_printk(KERN_INFO, sdev, "Cancel failed, resetting host\n");
+ ibmvfc_reset_host(vhost);
+ rsp_rc = -EIO;
+ rc = ibmvfc_wait_for_ops(vhost, sdev->hostdata, ibmvfc_match_key);
+
+ if (rc == SUCCESS)
+ rsp_rc = 0;
+
+ rc = ibmvfc_wait_for_ops(vhost, evt, ibmvfc_match_evt);
+ if (rc != SUCCESS) {
+ spin_lock_irqsave(vhost->host->host_lock, flags);
+ ibmvfc_hard_reset_host(vhost);
+ spin_unlock_irqrestore(vhost->host->host_lock, flags);
+ rsp_rc = 0;
+ }
+
+ goto out;
+ }
+ }
+
+ if (rsp_iu.cmd.status)
+ rsp_code = ibmvfc_get_err_result(&rsp_iu.cmd);
+
+ if (rsp_code) {
+ if (fc_rsp->flags & FCP_RSP_LEN_VALID)
+ rsp_code = fc_rsp->data.info.rsp_code;
+
+ sdev_printk(KERN_ERR, sdev, "Abort failed: %s (%x:%x) "
+ "flags: %x fcp_rsp: %x, scsi_status: %x\n",
+ ibmvfc_get_cmd_error(be16_to_cpu(rsp_iu.cmd.status), be16_to_cpu(rsp_iu.cmd.error)),
+ rsp_iu.cmd.status, rsp_iu.cmd.error, fc_rsp->flags, rsp_code,
+ fc_rsp->scsi_status);
+ rsp_rc = -EIO;
+ } else
+ sdev_printk(KERN_INFO, sdev, "Abort successful\n");
+
+out:
+ spin_lock_irqsave(vhost->host->host_lock, flags);
+ ibmvfc_free_event(evt);
+ spin_unlock_irqrestore(vhost->host->host_lock, flags);
+ return rsp_rc;
+}
+
+/**
+ * ibmvfc_eh_abort_handler - Abort a command
+ * @cmd: scsi command to abort
+ *
+ * Returns:
+ * SUCCESS / FAST_IO_FAIL / FAILED
+ **/
+static int ibmvfc_eh_abort_handler(struct scsi_cmnd *cmd)
+{
+ struct scsi_device *sdev = cmd->device;
+ struct ibmvfc_host *vhost = shost_priv(sdev->host);
+ int cancel_rc, block_rc;
+ int rc = FAILED;
+
+ ENTER;
+ block_rc = fc_block_scsi_eh(cmd);
+ ibmvfc_wait_while_resetting(vhost);
+ if (block_rc != FAST_IO_FAIL) {
+ cancel_rc = ibmvfc_cancel_all(sdev, IBMVFC_TMF_ABORT_TASK_SET);
+ ibmvfc_abort_task_set(sdev);
+ } else
+ cancel_rc = ibmvfc_cancel_all(sdev, IBMVFC_TMF_SUPPRESS_ABTS);
+
+ if (!cancel_rc)
+ rc = ibmvfc_wait_for_ops(vhost, sdev, ibmvfc_match_lun);
+
+ if (block_rc == FAST_IO_FAIL && rc != FAILED)
+ rc = FAST_IO_FAIL;
+
+ LEAVE;
+ return rc;
+}
+
+/**
+ * ibmvfc_eh_device_reset_handler - Reset a single LUN
+ * @cmd: scsi command struct
+ *
+ * Returns:
+ * SUCCESS / FAST_IO_FAIL / FAILED
+ **/
+static int ibmvfc_eh_device_reset_handler(struct scsi_cmnd *cmd)
+{
+ struct scsi_device *sdev = cmd->device;
+ struct ibmvfc_host *vhost = shost_priv(sdev->host);
+ int cancel_rc, block_rc, reset_rc = 0;
+ int rc = FAILED;
+
+ ENTER;
+ block_rc = fc_block_scsi_eh(cmd);
+ ibmvfc_wait_while_resetting(vhost);
+ if (block_rc != FAST_IO_FAIL) {
+ cancel_rc = ibmvfc_cancel_all(sdev, IBMVFC_TMF_LUN_RESET);
+ reset_rc = ibmvfc_reset_device(sdev, IBMVFC_LUN_RESET, "LUN");
+ } else
+ cancel_rc = ibmvfc_cancel_all(sdev, IBMVFC_TMF_SUPPRESS_ABTS);
+
+ if (!cancel_rc && !reset_rc)
+ rc = ibmvfc_wait_for_ops(vhost, sdev, ibmvfc_match_lun);
+
+ if (block_rc == FAST_IO_FAIL && rc != FAILED)
+ rc = FAST_IO_FAIL;
+
+ LEAVE;
+ return rc;
+}
+
+/**
+ * ibmvfc_dev_cancel_all_noreset - Device iterated cancel all function
+ * @sdev: scsi device struct
+ * @data: return code
+ *
+ **/
+static void ibmvfc_dev_cancel_all_noreset(struct scsi_device *sdev, void *data)
+{
+ unsigned long *rc = data;
+ *rc |= ibmvfc_cancel_all(sdev, IBMVFC_TMF_SUPPRESS_ABTS);
+}
+
+/**
+ * ibmvfc_dev_cancel_all_reset - Device iterated cancel all function
+ * @sdev: scsi device struct
+ * @data: return code
+ *
+ **/
+static void ibmvfc_dev_cancel_all_reset(struct scsi_device *sdev, void *data)
+{
+ unsigned long *rc = data;
+ *rc |= ibmvfc_cancel_all(sdev, IBMVFC_TMF_TGT_RESET);
+}
+
+/**
+ * ibmvfc_eh_target_reset_handler - Reset the target
+ * @cmd: scsi command struct
+ *
+ * Returns:
+ * SUCCESS / FAST_IO_FAIL / FAILED
+ **/
+static int ibmvfc_eh_target_reset_handler(struct scsi_cmnd *cmd)
+{
+ struct scsi_device *sdev = cmd->device;
+ struct ibmvfc_host *vhost = shost_priv(sdev->host);
+ struct scsi_target *starget = scsi_target(sdev);
+ int block_rc;
+ int reset_rc = 0;
+ int rc = FAILED;
+ unsigned long cancel_rc = 0;
+
+ ENTER;
+ block_rc = fc_block_scsi_eh(cmd);
+ ibmvfc_wait_while_resetting(vhost);
+ if (block_rc != FAST_IO_FAIL) {
+ starget_for_each_device(starget, &cancel_rc, ibmvfc_dev_cancel_all_reset);
+ reset_rc = ibmvfc_reset_device(sdev, IBMVFC_TARGET_RESET, "target");
+ } else
+ starget_for_each_device(starget, &cancel_rc, ibmvfc_dev_cancel_all_noreset);
+
+ if (!cancel_rc && !reset_rc)
+ rc = ibmvfc_wait_for_ops(vhost, starget, ibmvfc_match_target);
+
+ if (block_rc == FAST_IO_FAIL && rc != FAILED)
+ rc = FAST_IO_FAIL;
+
+ LEAVE;
+ return rc;
+}
+
+/**
+ * ibmvfc_eh_host_reset_handler - Reset the connection to the server
+ * @cmd: struct scsi_cmnd having problems
+ *
+ **/
+static int ibmvfc_eh_host_reset_handler(struct scsi_cmnd *cmd)
+{
+ int rc, block_rc;
+ struct ibmvfc_host *vhost = shost_priv(cmd->device->host);
+
+ block_rc = fc_block_scsi_eh(cmd);
+ dev_err(vhost->dev, "Resetting connection due to error recovery\n");
+ rc = ibmvfc_issue_fc_host_lip(vhost->host);
+
+ if (block_rc == FAST_IO_FAIL)
+ return FAST_IO_FAIL;
+
+ return rc ? FAILED : SUCCESS;
+}
+
+/**
+ * ibmvfc_terminate_rport_io - Terminate all pending I/O to the rport.
+ * @rport: rport struct
+ *
+ * Return value:
+ * none
+ **/
+static void ibmvfc_terminate_rport_io(struct fc_rport *rport)
+{
+ struct Scsi_Host *shost = rport_to_shost(rport);
+ struct ibmvfc_host *vhost = shost_priv(shost);
+ struct fc_rport *dev_rport;
+ struct scsi_device *sdev;
+ unsigned long rc;
+
+ ENTER;
+ shost_for_each_device(sdev, shost) {
+ dev_rport = starget_to_rport(scsi_target(sdev));
+ if (dev_rport != rport)
+ continue;
+ ibmvfc_cancel_all(sdev, IBMVFC_TMF_SUPPRESS_ABTS);
+ }
+
+ rc = ibmvfc_wait_for_ops(vhost, rport, ibmvfc_match_rport);
+
+ if (rc == FAILED)
+ ibmvfc_issue_fc_host_lip(shost);
+ LEAVE;
+}
+
+static const struct ibmvfc_async_desc ae_desc [] = {
+ { "PLOGI", IBMVFC_AE_ELS_PLOGI, IBMVFC_DEFAULT_LOG_LEVEL + 1 },
+ { "LOGO", IBMVFC_AE_ELS_LOGO, IBMVFC_DEFAULT_LOG_LEVEL + 1 },
+ { "PRLO", IBMVFC_AE_ELS_PRLO, IBMVFC_DEFAULT_LOG_LEVEL + 1 },
+ { "N-Port SCN", IBMVFC_AE_SCN_NPORT, IBMVFC_DEFAULT_LOG_LEVEL + 1 },
+ { "Group SCN", IBMVFC_AE_SCN_GROUP, IBMVFC_DEFAULT_LOG_LEVEL + 1 },
+ { "Domain SCN", IBMVFC_AE_SCN_DOMAIN, IBMVFC_DEFAULT_LOG_LEVEL },
+ { "Fabric SCN", IBMVFC_AE_SCN_FABRIC, IBMVFC_DEFAULT_LOG_LEVEL },
+ { "Link Up", IBMVFC_AE_LINK_UP, IBMVFC_DEFAULT_LOG_LEVEL },
+ { "Link Down", IBMVFC_AE_LINK_DOWN, IBMVFC_DEFAULT_LOG_LEVEL },
+ { "Link Dead", IBMVFC_AE_LINK_DEAD, IBMVFC_DEFAULT_LOG_LEVEL },
+ { "Halt", IBMVFC_AE_HALT, IBMVFC_DEFAULT_LOG_LEVEL },
+ { "Resume", IBMVFC_AE_RESUME, IBMVFC_DEFAULT_LOG_LEVEL },
+ { "Adapter Failed", IBMVFC_AE_ADAPTER_FAILED, IBMVFC_DEFAULT_LOG_LEVEL },
+};
+
+static const struct ibmvfc_async_desc unknown_ae = {
+ "Unknown async", 0, IBMVFC_DEFAULT_LOG_LEVEL
+};
+
+/**
+ * ibmvfc_get_ae_desc - Get text description for async event
+ * @ae: async event
+ *
+ **/
+static const struct ibmvfc_async_desc *ibmvfc_get_ae_desc(u64 ae)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(ae_desc); i++)
+ if (ae_desc[i].ae == ae)
+ return &ae_desc[i];
+
+ return &unknown_ae;
+}
+
+static const struct {
+ enum ibmvfc_ae_link_state state;
+ const char *desc;
+} link_desc [] = {
+ { IBMVFC_AE_LS_LINK_UP, " link up" },
+ { IBMVFC_AE_LS_LINK_BOUNCED, " link bounced" },
+ { IBMVFC_AE_LS_LINK_DOWN, " link down" },
+ { IBMVFC_AE_LS_LINK_DEAD, " link dead" },
+};
+
+/**
+ * ibmvfc_get_link_state - Get text description for link state
+ * @state: link state
+ *
+ **/
+static const char *ibmvfc_get_link_state(enum ibmvfc_ae_link_state state)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(link_desc); i++)
+ if (link_desc[i].state == state)
+ return link_desc[i].desc;
+
+ return "";
+}
+
+/**
+ * ibmvfc_handle_async - Handle an async event from the adapter
+ * @crq: crq to process
+ * @vhost: ibmvfc host struct
+ *
+ **/
+static void ibmvfc_handle_async(struct ibmvfc_async_crq *crq,
+ struct ibmvfc_host *vhost)
+{
+ const struct ibmvfc_async_desc *desc = ibmvfc_get_ae_desc(be64_to_cpu(crq->event));
+ struct ibmvfc_target *tgt;
+
+ ibmvfc_log(vhost, desc->log_level, "%s event received. scsi_id: %llx, wwpn: %llx,"
+ " node_name: %llx%s\n", desc->desc, crq->scsi_id, crq->wwpn, crq->node_name,
+ ibmvfc_get_link_state(crq->link_state));
+
+ switch (be64_to_cpu(crq->event)) {
+ case IBMVFC_AE_RESUME:
+ switch (crq->link_state) {
+ case IBMVFC_AE_LS_LINK_DOWN:
+ ibmvfc_link_down(vhost, IBMVFC_LINK_DOWN);
+ break;
+ case IBMVFC_AE_LS_LINK_DEAD:
+ ibmvfc_link_down(vhost, IBMVFC_LINK_DEAD);
+ break;
+ case IBMVFC_AE_LS_LINK_UP:
+ case IBMVFC_AE_LS_LINK_BOUNCED:
+ default:
+ vhost->events_to_log |= IBMVFC_AE_LINKUP;
+ vhost->delay_init = 1;
+ __ibmvfc_reset_host(vhost);
+ break;
+ };
+
+ break;
+ case IBMVFC_AE_LINK_UP:
+ vhost->events_to_log |= IBMVFC_AE_LINKUP;
+ vhost->delay_init = 1;
+ __ibmvfc_reset_host(vhost);
+ break;
+ case IBMVFC_AE_SCN_FABRIC:
+ case IBMVFC_AE_SCN_DOMAIN:
+ vhost->events_to_log |= IBMVFC_AE_RSCN;
+ if (vhost->state < IBMVFC_HALTED) {
+ vhost->delay_init = 1;
+ __ibmvfc_reset_host(vhost);
+ }
+ break;
+ case IBMVFC_AE_SCN_NPORT:
+ case IBMVFC_AE_SCN_GROUP:
+ vhost->events_to_log |= IBMVFC_AE_RSCN;
+ ibmvfc_reinit_host(vhost);
+ break;
+ case IBMVFC_AE_ELS_LOGO:
+ case IBMVFC_AE_ELS_PRLO:
+ case IBMVFC_AE_ELS_PLOGI:
+ list_for_each_entry(tgt, &vhost->targets, queue) {
+ if (!crq->scsi_id && !crq->wwpn && !crq->node_name)
+ break;
+ if (crq->scsi_id && cpu_to_be64(tgt->scsi_id) != crq->scsi_id)
+ continue;
+ if (crq->wwpn && cpu_to_be64(tgt->ids.port_name) != crq->wwpn)
+ continue;
+ if (crq->node_name && cpu_to_be64(tgt->ids.node_name) != crq->node_name)
+ continue;
+ if (tgt->need_login && be64_to_cpu(crq->event) == IBMVFC_AE_ELS_LOGO)
+ tgt->logo_rcvd = 1;
+ if (!tgt->need_login || be64_to_cpu(crq->event) == IBMVFC_AE_ELS_PLOGI) {
+ ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_DEL_RPORT);
+ ibmvfc_reinit_host(vhost);
+ }
+ }
+ break;
+ case IBMVFC_AE_LINK_DOWN:
+ case IBMVFC_AE_ADAPTER_FAILED:
+ ibmvfc_link_down(vhost, IBMVFC_LINK_DOWN);
+ break;
+ case IBMVFC_AE_LINK_DEAD:
+ ibmvfc_link_down(vhost, IBMVFC_LINK_DEAD);
+ break;
+ case IBMVFC_AE_HALT:
+ ibmvfc_link_down(vhost, IBMVFC_HALTED);
+ break;
+ default:
+ dev_err(vhost->dev, "Unknown async event received: %lld\n", crq->event);
+ break;
+ };
+}
+
+/**
+ * ibmvfc_handle_crq - Handles and frees received events in the CRQ
+ * @crq: Command/Response queue
+ * @vhost: ibmvfc host struct
+ *
+ **/
+static void ibmvfc_handle_crq(struct ibmvfc_crq *crq, struct ibmvfc_host *vhost)
+{
+ long rc;
+ struct ibmvfc_event *evt = (struct ibmvfc_event *)be64_to_cpu(crq->ioba);
+
+ switch (crq->valid) {
+ case IBMVFC_CRQ_INIT_RSP:
+ switch (crq->format) {
+ case IBMVFC_CRQ_INIT:
+ dev_info(vhost->dev, "Partner initialized\n");
+ /* Send back a response */
+ rc = ibmvfc_send_crq_init_complete(vhost);
+ if (rc == 0)
+ ibmvfc_init_host(vhost);
+ else
+ dev_err(vhost->dev, "Unable to send init rsp. rc=%ld\n", rc);
+ break;
+ case IBMVFC_CRQ_INIT_COMPLETE:
+ dev_info(vhost->dev, "Partner initialization complete\n");
+ ibmvfc_init_host(vhost);
+ break;
+ default:
+ dev_err(vhost->dev, "Unknown crq message type: %d\n", crq->format);
+ }
+ return;
+ case IBMVFC_CRQ_XPORT_EVENT:
+ vhost->state = IBMVFC_NO_CRQ;
+ vhost->logged_in = 0;
+ ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_NONE);
+ if (crq->format == IBMVFC_PARTITION_MIGRATED) {
+ /* We need to re-setup the interpartition connection */
+ dev_info(vhost->dev, "Re-enabling adapter\n");
+ vhost->client_migrated = 1;
+ ibmvfc_purge_requests(vhost, DID_REQUEUE);
+ ibmvfc_link_down(vhost, IBMVFC_LINK_DOWN);
+ ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_REENABLE);
+ } else {
+ dev_err(vhost->dev, "Virtual adapter failed (rc=%d)\n", crq->format);
+ ibmvfc_purge_requests(vhost, DID_ERROR);
+ ibmvfc_link_down(vhost, IBMVFC_LINK_DOWN);
+ ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_RESET);
+ }
+ return;
+ case IBMVFC_CRQ_CMD_RSP:
+ break;
+ default:
+ dev_err(vhost->dev, "Got an invalid message type 0x%02x\n", crq->valid);
+ return;
+ }
+
+ if (crq->format == IBMVFC_ASYNC_EVENT)
+ return;
+
+ /* The only kind of payload CRQs we should get are responses to
+ * things we send. Make sure this response is to something we
+ * actually sent
+ */
+ if (unlikely(!ibmvfc_valid_event(&vhost->pool, evt))) {
+ dev_err(vhost->dev, "Returned correlation_token 0x%08llx is invalid!\n",
+ crq->ioba);
+ return;
+ }
+
+ if (unlikely(atomic_read(&evt->free))) {
+ dev_err(vhost->dev, "Received duplicate correlation_token 0x%08llx!\n",
+ crq->ioba);
+ return;
+ }
+
+ del_timer(&evt->timer);
+ list_del(&evt->queue);
+ ibmvfc_trc_end(evt);
+ evt->done(evt);
+}
+
+/**
+ * ibmvfc_scan_finished - Check if the device scan is done.
+ * @shost: scsi host struct
+ * @time: current elapsed time
+ *
+ * Returns:
+ * 0 if scan is not done / 1 if scan is done
+ **/
+static int ibmvfc_scan_finished(struct Scsi_Host *shost, unsigned long time)
+{
+ unsigned long flags;
+ struct ibmvfc_host *vhost = shost_priv(shost);
+ int done = 0;
+
+ spin_lock_irqsave(shost->host_lock, flags);
+ if (time >= (init_timeout * HZ)) {
+ dev_info(vhost->dev, "Scan taking longer than %d seconds, "
+ "continuing initialization\n", init_timeout);
+ done = 1;
+ }
+
+ if (vhost->scan_complete)
+ done = 1;
+ spin_unlock_irqrestore(shost->host_lock, flags);
+ return done;
+}
+
+/**
+ * ibmvfc_slave_alloc - Setup the device's task set value
+ * @sdev: struct scsi_device device to configure
+ *
+ * Set the device's task set value so that error handling works as
+ * expected.
+ *
+ * Returns:
+ * 0 on success / -ENXIO if device does not exist
+ **/
+static int ibmvfc_slave_alloc(struct scsi_device *sdev)
+{
+ struct Scsi_Host *shost = sdev->host;
+ struct fc_rport *rport = starget_to_rport(scsi_target(sdev));
+ struct ibmvfc_host *vhost = shost_priv(shost);
+ unsigned long flags = 0;
+
+ if (!rport || fc_remote_port_chkready(rport))
+ return -ENXIO;
+
+ spin_lock_irqsave(shost->host_lock, flags);
+ sdev->hostdata = (void *)(unsigned long)vhost->task_set++;
+ spin_unlock_irqrestore(shost->host_lock, flags);
+ return 0;
+}
+
+/**
+ * ibmvfc_target_alloc - Setup the target's task set value
+ * @starget: struct scsi_target
+ *
+ * Set the target's task set value so that error handling works as
+ * expected.
+ *
+ * Returns:
+ * 0 on success / -ENXIO if device does not exist
+ **/
+static int ibmvfc_target_alloc(struct scsi_target *starget)
+{
+ struct Scsi_Host *shost = dev_to_shost(starget->dev.parent);
+ struct ibmvfc_host *vhost = shost_priv(shost);
+ unsigned long flags = 0;
+
+ spin_lock_irqsave(shost->host_lock, flags);
+ starget->hostdata = (void *)(unsigned long)vhost->task_set++;
+ spin_unlock_irqrestore(shost->host_lock, flags);
+ return 0;
+}
+
+/**
+ * ibmvfc_slave_configure - Configure the device
+ * @sdev: struct scsi_device device to configure
+ *
+ * Enable allow_restart for a device if it is a disk. Adjust the
+ * queue_depth here also.
+ *
+ * Returns:
+ * 0
+ **/
+static int ibmvfc_slave_configure(struct scsi_device *sdev)
+{
+ struct Scsi_Host *shost = sdev->host;
+ unsigned long flags = 0;
+
+ spin_lock_irqsave(shost->host_lock, flags);
+ if (sdev->type == TYPE_DISK)
+ sdev->allow_restart = 1;
+ spin_unlock_irqrestore(shost->host_lock, flags);
+ return 0;
+}
+
+/**
+ * ibmvfc_change_queue_depth - Change the device's queue depth
+ * @sdev: scsi device struct
+ * @qdepth: depth to set
+ * @reason: calling context
+ *
+ * Return value:
+ * actual depth set
+ **/
+static int ibmvfc_change_queue_depth(struct scsi_device *sdev, int qdepth)
+{
+ if (qdepth > IBMVFC_MAX_CMDS_PER_LUN)
+ qdepth = IBMVFC_MAX_CMDS_PER_LUN;
+
+ return scsi_change_queue_depth(sdev, qdepth);
+}
+
+static ssize_t ibmvfc_show_host_partition_name(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct Scsi_Host *shost = class_to_shost(dev);
+ struct ibmvfc_host *vhost = shost_priv(shost);
+
+ return snprintf(buf, PAGE_SIZE, "%s\n",
+ vhost->login_buf->resp.partition_name);
+}
+
+static ssize_t ibmvfc_show_host_device_name(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct Scsi_Host *shost = class_to_shost(dev);
+ struct ibmvfc_host *vhost = shost_priv(shost);
+
+ return snprintf(buf, PAGE_SIZE, "%s\n",
+ vhost->login_buf->resp.device_name);
+}
+
+static ssize_t ibmvfc_show_host_loc_code(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct Scsi_Host *shost = class_to_shost(dev);
+ struct ibmvfc_host *vhost = shost_priv(shost);
+
+ return snprintf(buf, PAGE_SIZE, "%s\n",
+ vhost->login_buf->resp.port_loc_code);
+}
+
+static ssize_t ibmvfc_show_host_drc_name(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct Scsi_Host *shost = class_to_shost(dev);
+ struct ibmvfc_host *vhost = shost_priv(shost);
+
+ return snprintf(buf, PAGE_SIZE, "%s\n",
+ vhost->login_buf->resp.drc_name);
+}
+
+static ssize_t ibmvfc_show_host_npiv_version(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct Scsi_Host *shost = class_to_shost(dev);
+ struct ibmvfc_host *vhost = shost_priv(shost);
+ return snprintf(buf, PAGE_SIZE, "%d\n", vhost->login_buf->resp.version);
+}
+
+static ssize_t ibmvfc_show_host_capabilities(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct Scsi_Host *shost = class_to_shost(dev);
+ struct ibmvfc_host *vhost = shost_priv(shost);
+ return snprintf(buf, PAGE_SIZE, "%llx\n", vhost->login_buf->resp.capabilities);
+}
+
+/**
+ * ibmvfc_show_log_level - Show the adapter's error logging level
+ * @dev: class device struct
+ * @buf: buffer
+ *
+ * Return value:
+ * number of bytes printed to buffer
+ **/
+static ssize_t ibmvfc_show_log_level(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct Scsi_Host *shost = class_to_shost(dev);
+ struct ibmvfc_host *vhost = shost_priv(shost);
+ unsigned long flags = 0;
+ int len;
+
+ spin_lock_irqsave(shost->host_lock, flags);
+ len = snprintf(buf, PAGE_SIZE, "%d\n", vhost->log_level);
+ spin_unlock_irqrestore(shost->host_lock, flags);
+ return len;
+}
+
+/**
+ * ibmvfc_store_log_level - Change the adapter's error logging level
+ * @dev: class device struct
+ * @buf: buffer
+ *
+ * Return value:
+ * number of bytes printed to buffer
+ **/
+static ssize_t ibmvfc_store_log_level(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct Scsi_Host *shost = class_to_shost(dev);
+ struct ibmvfc_host *vhost = shost_priv(shost);
+ unsigned long flags = 0;
+
+ spin_lock_irqsave(shost->host_lock, flags);
+ vhost->log_level = simple_strtoul(buf, NULL, 10);
+ spin_unlock_irqrestore(shost->host_lock, flags);
+ return strlen(buf);
+}
+
+static DEVICE_ATTR(partition_name, S_IRUGO, ibmvfc_show_host_partition_name, NULL);
+static DEVICE_ATTR(device_name, S_IRUGO, ibmvfc_show_host_device_name, NULL);
+static DEVICE_ATTR(port_loc_code, S_IRUGO, ibmvfc_show_host_loc_code, NULL);
+static DEVICE_ATTR(drc_name, S_IRUGO, ibmvfc_show_host_drc_name, NULL);
+static DEVICE_ATTR(npiv_version, S_IRUGO, ibmvfc_show_host_npiv_version, NULL);
+static DEVICE_ATTR(capabilities, S_IRUGO, ibmvfc_show_host_capabilities, NULL);
+static DEVICE_ATTR(log_level, S_IRUGO | S_IWUSR,
+ ibmvfc_show_log_level, ibmvfc_store_log_level);
+
+#ifdef CONFIG_SCSI_IBMVFC_TRACE
+/**
+ * ibmvfc_read_trace - Dump the adapter trace
+ * @filp: open sysfs file
+ * @kobj: kobject struct
+ * @bin_attr: bin_attribute struct
+ * @buf: buffer
+ * @off: offset
+ * @count: buffer size
+ *
+ * Return value:
+ * number of bytes printed to buffer
+ **/
+static ssize_t ibmvfc_read_trace(struct file *filp, struct kobject *kobj,
+ struct bin_attribute *bin_attr,
+ char *buf, loff_t off, size_t count)
+{
+ struct device *dev = container_of(kobj, struct device, kobj);
+ struct Scsi_Host *shost = class_to_shost(dev);
+ struct ibmvfc_host *vhost = shost_priv(shost);
+ unsigned long flags = 0;
+ int size = IBMVFC_TRACE_SIZE;
+ char *src = (char *)vhost->trace;
+
+ if (off > size)
+ return 0;
+ if (off + count > size) {
+ size -= off;
+ count = size;
+ }
+
+ spin_lock_irqsave(shost->host_lock, flags);
+ memcpy(buf, &src[off], count);
+ spin_unlock_irqrestore(shost->host_lock, flags);
+ return count;
+}
+
+static struct bin_attribute ibmvfc_trace_attr = {
+ .attr = {
+ .name = "trace",
+ .mode = S_IRUGO,
+ },
+ .size = 0,
+ .read = ibmvfc_read_trace,
+};
+#endif
+
+static struct device_attribute *ibmvfc_attrs[] = {
+ &dev_attr_partition_name,
+ &dev_attr_device_name,
+ &dev_attr_port_loc_code,
+ &dev_attr_drc_name,
+ &dev_attr_npiv_version,
+ &dev_attr_capabilities,
+ &dev_attr_log_level,
+ NULL
+};
+
+static struct scsi_host_template driver_template = {
+ .module = THIS_MODULE,
+ .name = "IBM POWER Virtual FC Adapter",
+ .proc_name = IBMVFC_NAME,
+ .queuecommand = ibmvfc_queuecommand,
+ .eh_abort_handler = ibmvfc_eh_abort_handler,
+ .eh_device_reset_handler = ibmvfc_eh_device_reset_handler,
+ .eh_target_reset_handler = ibmvfc_eh_target_reset_handler,
+ .eh_host_reset_handler = ibmvfc_eh_host_reset_handler,
+ .slave_alloc = ibmvfc_slave_alloc,
+ .slave_configure = ibmvfc_slave_configure,
+ .target_alloc = ibmvfc_target_alloc,
+ .scan_finished = ibmvfc_scan_finished,
+ .change_queue_depth = ibmvfc_change_queue_depth,
+ .cmd_per_lun = 16,
+ .can_queue = IBMVFC_MAX_REQUESTS_DEFAULT,
+ .this_id = -1,
+ .sg_tablesize = SG_ALL,
+ .max_sectors = IBMVFC_MAX_SECTORS,
+ .use_clustering = ENABLE_CLUSTERING,
+ .shost_attrs = ibmvfc_attrs,
+ .use_blk_tags = 1,
+ .track_queue_depth = 1,
+};
+
+/**
+ * ibmvfc_next_async_crq - Returns the next entry in async queue
+ * @vhost: ibmvfc host struct
+ *
+ * Returns:
+ * Pointer to next entry in queue / NULL if empty
+ **/
+static struct ibmvfc_async_crq *ibmvfc_next_async_crq(struct ibmvfc_host *vhost)
+{
+ struct ibmvfc_async_crq_queue *async_crq = &vhost->async_crq;
+ struct ibmvfc_async_crq *crq;
+
+ crq = &async_crq->msgs[async_crq->cur];
+ if (crq->valid & 0x80) {
+ if (++async_crq->cur == async_crq->size)
+ async_crq->cur = 0;
+ rmb();
+ } else
+ crq = NULL;
+
+ return crq;
+}
+
+/**
+ * ibmvfc_next_crq - Returns the next entry in message queue
+ * @vhost: ibmvfc host struct
+ *
+ * Returns:
+ * Pointer to next entry in queue / NULL if empty
+ **/
+static struct ibmvfc_crq *ibmvfc_next_crq(struct ibmvfc_host *vhost)
+{
+ struct ibmvfc_crq_queue *queue = &vhost->crq;
+ struct ibmvfc_crq *crq;
+
+ crq = &queue->msgs[queue->cur];
+ if (crq->valid & 0x80) {
+ if (++queue->cur == queue->size)
+ queue->cur = 0;
+ rmb();
+ } else
+ crq = NULL;
+
+ return crq;
+}
+
+/**
+ * ibmvfc_interrupt - Interrupt handler
+ * @irq: number of irq to handle, not used
+ * @dev_instance: ibmvfc_host that received interrupt
+ *
+ * Returns:
+ * IRQ_HANDLED
+ **/
+static irqreturn_t ibmvfc_interrupt(int irq, void *dev_instance)
+{
+ struct ibmvfc_host *vhost = (struct ibmvfc_host *)dev_instance;
+ unsigned long flags;
+
+ spin_lock_irqsave(vhost->host->host_lock, flags);
+ vio_disable_interrupts(to_vio_dev(vhost->dev));
+ tasklet_schedule(&vhost->tasklet);
+ spin_unlock_irqrestore(vhost->host->host_lock, flags);
+ return IRQ_HANDLED;
+}
+
+/**
+ * ibmvfc_tasklet - Interrupt handler tasklet
+ * @data: ibmvfc host struct
+ *
+ * Returns:
+ * Nothing
+ **/
+static void ibmvfc_tasklet(void *data)
+{
+ struct ibmvfc_host *vhost = data;
+ struct vio_dev *vdev = to_vio_dev(vhost->dev);
+ struct ibmvfc_crq *crq;
+ struct ibmvfc_async_crq *async;
+ unsigned long flags;
+ int done = 0;
+
+ spin_lock_irqsave(vhost->host->host_lock, flags);
+ while (!done) {
+ /* Pull all the valid messages off the async CRQ */
+ while ((async = ibmvfc_next_async_crq(vhost)) != NULL) {
+ ibmvfc_handle_async(async, vhost);
+ async->valid = 0;
+ wmb();
+ }
+
+ /* Pull all the valid messages off the CRQ */
+ while ((crq = ibmvfc_next_crq(vhost)) != NULL) {
+ ibmvfc_handle_crq(crq, vhost);
+ crq->valid = 0;
+ wmb();
+ }
+
+ vio_enable_interrupts(vdev);
+ if ((async = ibmvfc_next_async_crq(vhost)) != NULL) {
+ vio_disable_interrupts(vdev);
+ ibmvfc_handle_async(async, vhost);
+ async->valid = 0;
+ wmb();
+ } else if ((crq = ibmvfc_next_crq(vhost)) != NULL) {
+ vio_disable_interrupts(vdev);
+ ibmvfc_handle_crq(crq, vhost);
+ crq->valid = 0;
+ wmb();
+ } else
+ done = 1;
+ }
+
+ spin_unlock_irqrestore(vhost->host->host_lock, flags);
+}
+
+/**
+ * ibmvfc_init_tgt - Set the next init job step for the target
+ * @tgt: ibmvfc target struct
+ * @job_step: job step to perform
+ *
+ **/
+static void ibmvfc_init_tgt(struct ibmvfc_target *tgt,
+ void (*job_step) (struct ibmvfc_target *))
+{
+ ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_INIT);
+ tgt->job_step = job_step;
+ wake_up(&tgt->vhost->work_wait_q);
+}
+
+/**
+ * ibmvfc_retry_tgt_init - Attempt to retry a step in target initialization
+ * @tgt: ibmvfc target struct
+ * @job_step: initialization job step
+ *
+ * Returns: 1 if step will be retried / 0 if not
+ *
+ **/
+static int ibmvfc_retry_tgt_init(struct ibmvfc_target *tgt,
+ void (*job_step) (struct ibmvfc_target *))
+{
+ if (++tgt->init_retries > IBMVFC_MAX_TGT_INIT_RETRIES) {
+ ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_DEL_RPORT);
+ wake_up(&tgt->vhost->work_wait_q);
+ return 0;
+ } else
+ ibmvfc_init_tgt(tgt, job_step);
+ return 1;
+}
+
+/* Defined in FC-LS */
+static const struct {
+ int code;
+ int retry;
+ int logged_in;
+} prli_rsp [] = {
+ { 0, 1, 0 },
+ { 1, 0, 1 },
+ { 2, 1, 0 },
+ { 3, 1, 0 },
+ { 4, 0, 0 },
+ { 5, 0, 0 },
+ { 6, 0, 1 },
+ { 7, 0, 0 },
+ { 8, 1, 0 },
+};
+
+/**
+ * ibmvfc_get_prli_rsp - Find PRLI response index
+ * @flags: PRLI response flags
+ *
+ **/
+static int ibmvfc_get_prli_rsp(u16 flags)
+{
+ int i;
+ int code = (flags & 0x0f00) >> 8;
+
+ for (i = 0; i < ARRAY_SIZE(prli_rsp); i++)
+ if (prli_rsp[i].code == code)
+ return i;
+
+ return 0;
+}
+
+/**
+ * ibmvfc_tgt_prli_done - Completion handler for Process Login
+ * @evt: ibmvfc event struct
+ *
+ **/
+static void ibmvfc_tgt_prli_done(struct ibmvfc_event *evt)
+{
+ struct ibmvfc_target *tgt = evt->tgt;
+ struct ibmvfc_host *vhost = evt->vhost;
+ struct ibmvfc_process_login *rsp = &evt->xfer_iu->prli;
+ struct ibmvfc_prli_svc_parms *parms = &rsp->parms;
+ u32 status = be16_to_cpu(rsp->common.status);
+ int index, level = IBMVFC_DEFAULT_LOG_LEVEL;
+
+ vhost->discovery_threads--;
+ ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_NONE);
+ switch (status) {
+ case IBMVFC_MAD_SUCCESS:
+ tgt_dbg(tgt, "Process Login succeeded: %X %02X %04X\n",
+ parms->type, parms->flags, parms->service_parms);
+
+ if (parms->type == IBMVFC_SCSI_FCP_TYPE) {
+ index = ibmvfc_get_prli_rsp(be16_to_cpu(parms->flags));
+ if (prli_rsp[index].logged_in) {
+ if (be16_to_cpu(parms->flags) & IBMVFC_PRLI_EST_IMG_PAIR) {
+ tgt->need_login = 0;
+ tgt->ids.roles = 0;
+ if (be32_to_cpu(parms->service_parms) & IBMVFC_PRLI_TARGET_FUNC)
+ tgt->ids.roles |= FC_PORT_ROLE_FCP_TARGET;
+ if (be32_to_cpu(parms->service_parms) & IBMVFC_PRLI_INITIATOR_FUNC)
+ tgt->ids.roles |= FC_PORT_ROLE_FCP_INITIATOR;
+ tgt->add_rport = 1;
+ } else
+ ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_DEL_RPORT);
+ } else if (prli_rsp[index].retry)
+ ibmvfc_retry_tgt_init(tgt, ibmvfc_tgt_send_prli);
+ else
+ ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_DEL_RPORT);
+ } else
+ ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_DEL_RPORT);
+ break;
+ case IBMVFC_MAD_DRIVER_FAILED:
+ break;
+ case IBMVFC_MAD_CRQ_ERROR:
+ ibmvfc_retry_tgt_init(tgt, ibmvfc_tgt_send_prli);
+ break;
+ case IBMVFC_MAD_FAILED:
+ default:
+ if ((be16_to_cpu(rsp->status) & IBMVFC_VIOS_FAILURE) &&
+ be16_to_cpu(rsp->error) == IBMVFC_PLOGI_REQUIRED)
+ level += ibmvfc_retry_tgt_init(tgt, ibmvfc_tgt_send_plogi);
+ else if (tgt->logo_rcvd)
+ level += ibmvfc_retry_tgt_init(tgt, ibmvfc_tgt_send_plogi);
+ else if (ibmvfc_retry_cmd(be16_to_cpu(rsp->status), be16_to_cpu(rsp->error)))
+ level += ibmvfc_retry_tgt_init(tgt, ibmvfc_tgt_send_prli);
+ else
+ ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_DEL_RPORT);
+
+ tgt_log(tgt, level, "Process Login failed: %s (%x:%x) rc=0x%02X\n",
+ ibmvfc_get_cmd_error(be16_to_cpu(rsp->status), be16_to_cpu(rsp->error)),
+ rsp->status, rsp->error, status);
+ break;
+ };
+
+ kref_put(&tgt->kref, ibmvfc_release_tgt);
+ ibmvfc_free_event(evt);
+ wake_up(&vhost->work_wait_q);
+}
+
+/**
+ * ibmvfc_tgt_send_prli - Send a process login
+ * @tgt: ibmvfc target struct
+ *
+ **/
+static void ibmvfc_tgt_send_prli(struct ibmvfc_target *tgt)
+{
+ struct ibmvfc_process_login *prli;
+ struct ibmvfc_host *vhost = tgt->vhost;
+ struct ibmvfc_event *evt;
+
+ if (vhost->discovery_threads >= disc_threads)
+ return;
+
+ kref_get(&tgt->kref);
+ evt = ibmvfc_get_event(vhost);
+ vhost->discovery_threads++;
+ ibmvfc_init_event(evt, ibmvfc_tgt_prli_done, IBMVFC_MAD_FORMAT);
+ evt->tgt = tgt;
+ prli = &evt->iu.prli;
+ memset(prli, 0, sizeof(*prli));
+ prli->common.version = cpu_to_be32(1);
+ prli->common.opcode = cpu_to_be32(IBMVFC_PROCESS_LOGIN);
+ prli->common.length = cpu_to_be16(sizeof(*prli));
+ prli->scsi_id = cpu_to_be64(tgt->scsi_id);
+
+ prli->parms.type = IBMVFC_SCSI_FCP_TYPE;
+ prli->parms.flags = cpu_to_be16(IBMVFC_PRLI_EST_IMG_PAIR);
+ prli->parms.service_parms = cpu_to_be32(IBMVFC_PRLI_INITIATOR_FUNC);
+
+ ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_INIT_WAIT);
+ if (ibmvfc_send_event(evt, vhost, default_timeout)) {
+ vhost->discovery_threads--;
+ ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_NONE);
+ kref_put(&tgt->kref, ibmvfc_release_tgt);
+ } else
+ tgt_dbg(tgt, "Sent process login\n");
+}
+
+/**
+ * ibmvfc_tgt_plogi_done - Completion handler for Port Login
+ * @evt: ibmvfc event struct
+ *
+ **/
+static void ibmvfc_tgt_plogi_done(struct ibmvfc_event *evt)
+{
+ struct ibmvfc_target *tgt = evt->tgt;
+ struct ibmvfc_host *vhost = evt->vhost;
+ struct ibmvfc_port_login *rsp = &evt->xfer_iu->plogi;
+ u32 status = be16_to_cpu(rsp->common.status);
+ int level = IBMVFC_DEFAULT_LOG_LEVEL;
+
+ vhost->discovery_threads--;
+ ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_NONE);
+ switch (status) {
+ case IBMVFC_MAD_SUCCESS:
+ tgt_dbg(tgt, "Port Login succeeded\n");
+ if (tgt->ids.port_name &&
+ tgt->ids.port_name != wwn_to_u64(rsp->service_parms.port_name)) {
+ vhost->reinit = 1;
+ tgt_dbg(tgt, "Port re-init required\n");
+ break;
+ }
+ tgt->ids.node_name = wwn_to_u64(rsp->service_parms.node_name);
+ tgt->ids.port_name = wwn_to_u64(rsp->service_parms.port_name);
+ tgt->ids.port_id = tgt->scsi_id;
+ memcpy(&tgt->service_parms, &rsp->service_parms,
+ sizeof(tgt->service_parms));
+ memcpy(&tgt->service_parms_change, &rsp->service_parms_change,
+ sizeof(tgt->service_parms_change));
+ ibmvfc_init_tgt(tgt, ibmvfc_tgt_send_prli);
+ break;
+ case IBMVFC_MAD_DRIVER_FAILED:
+ break;
+ case IBMVFC_MAD_CRQ_ERROR:
+ ibmvfc_retry_tgt_init(tgt, ibmvfc_tgt_send_plogi);
+ break;
+ case IBMVFC_MAD_FAILED:
+ default:
+ if (ibmvfc_retry_cmd(be16_to_cpu(rsp->status), be16_to_cpu(rsp->error)))
+ level += ibmvfc_retry_tgt_init(tgt, ibmvfc_tgt_send_plogi);
+ else
+ ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_DEL_RPORT);
+
+ tgt_log(tgt, level, "Port Login failed: %s (%x:%x) %s (%x) %s (%x) rc=0x%02X\n",
+ ibmvfc_get_cmd_error(be16_to_cpu(rsp->status), be16_to_cpu(rsp->error)), rsp->status, rsp->error,
+ ibmvfc_get_fc_type(be16_to_cpu(rsp->fc_type)), rsp->fc_type,
+ ibmvfc_get_ls_explain(be16_to_cpu(rsp->fc_explain)), rsp->fc_explain, status);
+ break;
+ };
+
+ kref_put(&tgt->kref, ibmvfc_release_tgt);
+ ibmvfc_free_event(evt);
+ wake_up(&vhost->work_wait_q);
+}
+
+/**
+ * ibmvfc_tgt_send_plogi - Send PLOGI to the specified target
+ * @tgt: ibmvfc target struct
+ *
+ **/
+static void ibmvfc_tgt_send_plogi(struct ibmvfc_target *tgt)
+{
+ struct ibmvfc_port_login *plogi;
+ struct ibmvfc_host *vhost = tgt->vhost;
+ struct ibmvfc_event *evt;
+
+ if (vhost->discovery_threads >= disc_threads)
+ return;
+
+ kref_get(&tgt->kref);
+ tgt->logo_rcvd = 0;
+ evt = ibmvfc_get_event(vhost);
+ vhost->discovery_threads++;
+ ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_INIT_WAIT);
+ ibmvfc_init_event(evt, ibmvfc_tgt_plogi_done, IBMVFC_MAD_FORMAT);
+ evt->tgt = tgt;
+ plogi = &evt->iu.plogi;
+ memset(plogi, 0, sizeof(*plogi));
+ plogi->common.version = cpu_to_be32(1);
+ plogi->common.opcode = cpu_to_be32(IBMVFC_PORT_LOGIN);
+ plogi->common.length = cpu_to_be16(sizeof(*plogi));
+ plogi->scsi_id = cpu_to_be64(tgt->scsi_id);
+
+ if (ibmvfc_send_event(evt, vhost, default_timeout)) {
+ vhost->discovery_threads--;
+ ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_NONE);
+ kref_put(&tgt->kref, ibmvfc_release_tgt);
+ } else
+ tgt_dbg(tgt, "Sent port login\n");
+}
+
+/**
+ * ibmvfc_tgt_implicit_logout_done - Completion handler for Implicit Logout MAD
+ * @evt: ibmvfc event struct
+ *
+ **/
+static void ibmvfc_tgt_implicit_logout_done(struct ibmvfc_event *evt)
+{
+ struct ibmvfc_target *tgt = evt->tgt;
+ struct ibmvfc_host *vhost = evt->vhost;
+ struct ibmvfc_implicit_logout *rsp = &evt->xfer_iu->implicit_logout;
+ u32 status = be16_to_cpu(rsp->common.status);
+
+ vhost->discovery_threads--;
+ ibmvfc_free_event(evt);
+ ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_NONE);
+
+ switch (status) {
+ case IBMVFC_MAD_SUCCESS:
+ tgt_dbg(tgt, "Implicit Logout succeeded\n");
+ break;
+ case IBMVFC_MAD_DRIVER_FAILED:
+ kref_put(&tgt->kref, ibmvfc_release_tgt);
+ wake_up(&vhost->work_wait_q);
+ return;
+ case IBMVFC_MAD_FAILED:
+ default:
+ tgt_err(tgt, "Implicit Logout failed: rc=0x%02X\n", status);
+ break;
+ };
+
+ if (vhost->action == IBMVFC_HOST_ACTION_TGT_INIT)
+ ibmvfc_init_tgt(tgt, ibmvfc_tgt_send_plogi);
+ else if (vhost->action == IBMVFC_HOST_ACTION_QUERY_TGTS &&
+ tgt->scsi_id != tgt->new_scsi_id)
+ ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_DEL_RPORT);
+ kref_put(&tgt->kref, ibmvfc_release_tgt);
+ wake_up(&vhost->work_wait_q);
+}
+
+/**
+ * ibmvfc_tgt_implicit_logout - Initiate an Implicit Logout for specified target
+ * @tgt: ibmvfc target struct
+ *
+ **/
+static void ibmvfc_tgt_implicit_logout(struct ibmvfc_target *tgt)
+{
+ struct ibmvfc_implicit_logout *mad;
+ struct ibmvfc_host *vhost = tgt->vhost;
+ struct ibmvfc_event *evt;
+
+ if (vhost->discovery_threads >= disc_threads)
+ return;
+
+ kref_get(&tgt->kref);
+ evt = ibmvfc_get_event(vhost);
+ vhost->discovery_threads++;
+ ibmvfc_init_event(evt, ibmvfc_tgt_implicit_logout_done, IBMVFC_MAD_FORMAT);
+ evt->tgt = tgt;
+ mad = &evt->iu.implicit_logout;
+ memset(mad, 0, sizeof(*mad));
+ mad->common.version = cpu_to_be32(1);
+ mad->common.opcode = cpu_to_be32(IBMVFC_IMPLICIT_LOGOUT);
+ mad->common.length = cpu_to_be16(sizeof(*mad));
+ mad->old_scsi_id = cpu_to_be64(tgt->scsi_id);
+
+ ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_INIT_WAIT);
+ if (ibmvfc_send_event(evt, vhost, default_timeout)) {
+ vhost->discovery_threads--;
+ ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_NONE);
+ kref_put(&tgt->kref, ibmvfc_release_tgt);
+ } else
+ tgt_dbg(tgt, "Sent Implicit Logout\n");
+}
+
+/**
+ * ibmvfc_adisc_needs_plogi - Does device need PLOGI?
+ * @mad: ibmvfc passthru mad struct
+ * @tgt: ibmvfc target struct
+ *
+ * Returns:
+ * 1 if PLOGI needed / 0 if PLOGI not needed
+ **/
+static int ibmvfc_adisc_needs_plogi(struct ibmvfc_passthru_mad *mad,
+ struct ibmvfc_target *tgt)
+{
+ if (memcmp(&mad->fc_iu.response[2], &tgt->ids.port_name,
+ sizeof(tgt->ids.port_name)))
+ return 1;
+ if (memcmp(&mad->fc_iu.response[4], &tgt->ids.node_name,
+ sizeof(tgt->ids.node_name)))
+ return 1;
+ if (be32_to_cpu(mad->fc_iu.response[6]) != tgt->scsi_id)
+ return 1;
+ return 0;
+}
+
+/**
+ * ibmvfc_tgt_adisc_done - Completion handler for ADISC
+ * @evt: ibmvfc event struct
+ *
+ **/
+static void ibmvfc_tgt_adisc_done(struct ibmvfc_event *evt)
+{
+ struct ibmvfc_target *tgt = evt->tgt;
+ struct ibmvfc_host *vhost = evt->vhost;
+ struct ibmvfc_passthru_mad *mad = &evt->xfer_iu->passthru;
+ u32 status = be16_to_cpu(mad->common.status);
+ u8 fc_reason, fc_explain;
+
+ vhost->discovery_threads--;
+ ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_NONE);
+ del_timer(&tgt->timer);
+
+ switch (status) {
+ case IBMVFC_MAD_SUCCESS:
+ tgt_dbg(tgt, "ADISC succeeded\n");
+ if (ibmvfc_adisc_needs_plogi(mad, tgt))
+ ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_DEL_RPORT);
+ break;
+ case IBMVFC_MAD_DRIVER_FAILED:
+ break;
+ case IBMVFC_MAD_FAILED:
+ default:
+ ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_DEL_RPORT);
+ fc_reason = (be32_to_cpu(mad->fc_iu.response[1]) & 0x00ff0000) >> 16;
+ fc_explain = (be32_to_cpu(mad->fc_iu.response[1]) & 0x0000ff00) >> 8;
+ tgt_info(tgt, "ADISC failed: %s (%x:%x) %s (%x) %s (%x) rc=0x%02X\n",
+ ibmvfc_get_cmd_error(be16_to_cpu(mad->iu.status), be16_to_cpu(mad->iu.error)),
+ mad->iu.status, mad->iu.error,
+ ibmvfc_get_fc_type(fc_reason), fc_reason,
+ ibmvfc_get_ls_explain(fc_explain), fc_explain, status);
+ break;
+ };
+
+ kref_put(&tgt->kref, ibmvfc_release_tgt);
+ ibmvfc_free_event(evt);
+ wake_up(&vhost->work_wait_q);
+}
+
+/**
+ * ibmvfc_init_passthru - Initialize an event struct for FC passthru
+ * @evt: ibmvfc event struct
+ *
+ **/
+static void ibmvfc_init_passthru(struct ibmvfc_event *evt)
+{
+ struct ibmvfc_passthru_mad *mad = &evt->iu.passthru;
+
+ memset(mad, 0, sizeof(*mad));
+ mad->common.version = cpu_to_be32(1);
+ mad->common.opcode = cpu_to_be32(IBMVFC_PASSTHRU);
+ mad->common.length = cpu_to_be16(sizeof(*mad) - sizeof(mad->fc_iu) - sizeof(mad->iu));
+ mad->cmd_ioba.va = cpu_to_be64((u64)be64_to_cpu(evt->crq.ioba) +
+ offsetof(struct ibmvfc_passthru_mad, iu));
+ mad->cmd_ioba.len = cpu_to_be32(sizeof(mad->iu));
+ mad->iu.cmd_len = cpu_to_be32(sizeof(mad->fc_iu.payload));
+ mad->iu.rsp_len = cpu_to_be32(sizeof(mad->fc_iu.response));
+ mad->iu.cmd.va = cpu_to_be64((u64)be64_to_cpu(evt->crq.ioba) +
+ offsetof(struct ibmvfc_passthru_mad, fc_iu) +
+ offsetof(struct ibmvfc_passthru_fc_iu, payload));
+ mad->iu.cmd.len = cpu_to_be32(sizeof(mad->fc_iu.payload));
+ mad->iu.rsp.va = cpu_to_be64((u64)be64_to_cpu(evt->crq.ioba) +
+ offsetof(struct ibmvfc_passthru_mad, fc_iu) +
+ offsetof(struct ibmvfc_passthru_fc_iu, response));
+ mad->iu.rsp.len = cpu_to_be32(sizeof(mad->fc_iu.response));
+}
+
+/**
+ * ibmvfc_tgt_adisc_cancel_done - Completion handler when cancelling an ADISC
+ * @evt: ibmvfc event struct
+ *
+ * Just cleanup this event struct. Everything else is handled by
+ * the ADISC completion handler. If the ADISC never actually comes
+ * back, we still have the timer running on the ADISC event struct
+ * which will fire and cause the CRQ to get reset.
+ *
+ **/
+static void ibmvfc_tgt_adisc_cancel_done(struct ibmvfc_event *evt)
+{
+ struct ibmvfc_host *vhost = evt->vhost;
+ struct ibmvfc_target *tgt = evt->tgt;
+
+ tgt_dbg(tgt, "ADISC cancel complete\n");
+ vhost->abort_threads--;
+ ibmvfc_free_event(evt);
+ kref_put(&tgt->kref, ibmvfc_release_tgt);
+ wake_up(&vhost->work_wait_q);
+}
+
+/**
+ * ibmvfc_adisc_timeout - Handle an ADISC timeout
+ * @tgt: ibmvfc target struct
+ *
+ * If an ADISC times out, send a cancel. If the cancel times
+ * out, reset the CRQ. When the ADISC comes back as cancelled,
+ * log back into the target.
+ **/
+static void ibmvfc_adisc_timeout(struct ibmvfc_target *tgt)
+{
+ struct ibmvfc_host *vhost = tgt->vhost;
+ struct ibmvfc_event *evt;
+ struct ibmvfc_tmf *tmf;
+ unsigned long flags;
+ int rc;
+
+ tgt_dbg(tgt, "ADISC timeout\n");
+ spin_lock_irqsave(vhost->host->host_lock, flags);
+ if (vhost->abort_threads >= disc_threads ||
+ tgt->action != IBMVFC_TGT_ACTION_INIT_WAIT ||
+ vhost->state != IBMVFC_INITIALIZING ||
+ vhost->action != IBMVFC_HOST_ACTION_QUERY_TGTS) {
+ spin_unlock_irqrestore(vhost->host->host_lock, flags);
+ return;
+ }
+
+ vhost->abort_threads++;
+ kref_get(&tgt->kref);
+ evt = ibmvfc_get_event(vhost);
+ ibmvfc_init_event(evt, ibmvfc_tgt_adisc_cancel_done, IBMVFC_MAD_FORMAT);
+
+ evt->tgt = tgt;
+ tmf = &evt->iu.tmf;
+ memset(tmf, 0, sizeof(*tmf));
+ tmf->common.version = cpu_to_be32(1);
+ tmf->common.opcode = cpu_to_be32(IBMVFC_TMF_MAD);
+ tmf->common.length = cpu_to_be16(sizeof(*tmf));
+ tmf->scsi_id = cpu_to_be64(tgt->scsi_id);
+ tmf->cancel_key = cpu_to_be32(tgt->cancel_key);
+
+ rc = ibmvfc_send_event(evt, vhost, default_timeout);
+
+ if (rc) {
+ tgt_err(tgt, "Failed to send cancel event for ADISC. rc=%d\n", rc);
+ vhost->abort_threads--;
+ kref_put(&tgt->kref, ibmvfc_release_tgt);
+ __ibmvfc_reset_host(vhost);
+ } else
+ tgt_dbg(tgt, "Attempting to cancel ADISC\n");
+ spin_unlock_irqrestore(vhost->host->host_lock, flags);
+}
+
+/**
+ * ibmvfc_tgt_adisc - Initiate an ADISC for specified target
+ * @tgt: ibmvfc target struct
+ *
+ * When sending an ADISC we end up with two timers running. The
+ * first timer is the timer in the ibmvfc target struct. If this
+ * fires, we send a cancel to the target. The second timer is the
+ * timer on the ibmvfc event for the ADISC, which is longer. If that
+ * fires, it means the ADISC timed out and our attempt to cancel it
+ * also failed, so we need to reset the CRQ.
+ **/
+static void ibmvfc_tgt_adisc(struct ibmvfc_target *tgt)
+{
+ struct ibmvfc_passthru_mad *mad;
+ struct ibmvfc_host *vhost = tgt->vhost;
+ struct ibmvfc_event *evt;
+
+ if (vhost->discovery_threads >= disc_threads)
+ return;
+
+ kref_get(&tgt->kref);
+ evt = ibmvfc_get_event(vhost);
+ vhost->discovery_threads++;
+ ibmvfc_init_event(evt, ibmvfc_tgt_adisc_done, IBMVFC_MAD_FORMAT);
+ evt->tgt = tgt;
+
+ ibmvfc_init_passthru(evt);
+ mad = &evt->iu.passthru;
+ mad->iu.flags = cpu_to_be32(IBMVFC_FC_ELS);
+ mad->iu.scsi_id = cpu_to_be64(tgt->scsi_id);
+ mad->iu.cancel_key = cpu_to_be32(tgt->cancel_key);
+
+ mad->fc_iu.payload[0] = cpu_to_be32(IBMVFC_ADISC);
+ memcpy(&mad->fc_iu.payload[2], &vhost->login_buf->resp.port_name,
+ sizeof(vhost->login_buf->resp.port_name));
+ memcpy(&mad->fc_iu.payload[4], &vhost->login_buf->resp.node_name,
+ sizeof(vhost->login_buf->resp.node_name));
+ mad->fc_iu.payload[6] = cpu_to_be32(be64_to_cpu(vhost->login_buf->resp.scsi_id) & 0x00ffffff);
+
+ if (timer_pending(&tgt->timer))
+ mod_timer(&tgt->timer, jiffies + (IBMVFC_ADISC_TIMEOUT * HZ));
+ else {
+ tgt->timer.data = (unsigned long) tgt;
+ tgt->timer.expires = jiffies + (IBMVFC_ADISC_TIMEOUT * HZ);
+ tgt->timer.function = (void (*)(unsigned long))ibmvfc_adisc_timeout;
+ add_timer(&tgt->timer);
+ }
+
+ ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_INIT_WAIT);
+ if (ibmvfc_send_event(evt, vhost, IBMVFC_ADISC_PLUS_CANCEL_TIMEOUT)) {
+ vhost->discovery_threads--;
+ del_timer(&tgt->timer);
+ ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_NONE);
+ kref_put(&tgt->kref, ibmvfc_release_tgt);
+ } else
+ tgt_dbg(tgt, "Sent ADISC\n");
+}
+
+/**
+ * ibmvfc_tgt_query_target_done - Completion handler for Query Target MAD
+ * @evt: ibmvfc event struct
+ *
+ **/
+static void ibmvfc_tgt_query_target_done(struct ibmvfc_event *evt)
+{
+ struct ibmvfc_target *tgt = evt->tgt;
+ struct ibmvfc_host *vhost = evt->vhost;
+ struct ibmvfc_query_tgt *rsp = &evt->xfer_iu->query_tgt;
+ u32 status = be16_to_cpu(rsp->common.status);
+ int level = IBMVFC_DEFAULT_LOG_LEVEL;
+
+ vhost->discovery_threads--;
+ ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_NONE);
+ switch (status) {
+ case IBMVFC_MAD_SUCCESS:
+ tgt_dbg(tgt, "Query Target succeeded\n");
+ tgt->new_scsi_id = be64_to_cpu(rsp->scsi_id);
+ if (be64_to_cpu(rsp->scsi_id) != tgt->scsi_id)
+ ibmvfc_init_tgt(tgt, ibmvfc_tgt_implicit_logout);
+ else
+ ibmvfc_init_tgt(tgt, ibmvfc_tgt_adisc);
+ break;
+ case IBMVFC_MAD_DRIVER_FAILED:
+ break;
+ case IBMVFC_MAD_CRQ_ERROR:
+ ibmvfc_retry_tgt_init(tgt, ibmvfc_tgt_query_target);
+ break;
+ case IBMVFC_MAD_FAILED:
+ default:
+ if ((be16_to_cpu(rsp->status) & IBMVFC_FABRIC_MAPPED) == IBMVFC_FABRIC_MAPPED &&
+ be16_to_cpu(rsp->error) == IBMVFC_UNABLE_TO_PERFORM_REQ &&
+ be16_to_cpu(rsp->fc_explain) == IBMVFC_PORT_NAME_NOT_REG)
+ ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_DEL_RPORT);
+ else if (ibmvfc_retry_cmd(be16_to_cpu(rsp->status), be16_to_cpu(rsp->error)))
+ level += ibmvfc_retry_tgt_init(tgt, ibmvfc_tgt_query_target);
+ else
+ ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_DEL_RPORT);
+
+ tgt_log(tgt, level, "Query Target failed: %s (%x:%x) %s (%x) %s (%x) rc=0x%02X\n",
+ ibmvfc_get_cmd_error(be16_to_cpu(rsp->status), be16_to_cpu(rsp->error)),
+ rsp->status, rsp->error, ibmvfc_get_fc_type(be16_to_cpu(rsp->fc_type)),
+ rsp->fc_type, ibmvfc_get_gs_explain(be16_to_cpu(rsp->fc_explain)),
+ rsp->fc_explain, status);
+ break;
+ };
+
+ kref_put(&tgt->kref, ibmvfc_release_tgt);
+ ibmvfc_free_event(evt);
+ wake_up(&vhost->work_wait_q);
+}
+
+/**
+ * ibmvfc_tgt_query_target - Initiate a Query Target for specified target
+ * @tgt: ibmvfc target struct
+ *
+ **/
+static void ibmvfc_tgt_query_target(struct ibmvfc_target *tgt)
+{
+ struct ibmvfc_query_tgt *query_tgt;
+ struct ibmvfc_host *vhost = tgt->vhost;
+ struct ibmvfc_event *evt;
+
+ if (vhost->discovery_threads >= disc_threads)
+ return;
+
+ kref_get(&tgt->kref);
+ evt = ibmvfc_get_event(vhost);
+ vhost->discovery_threads++;
+ evt->tgt = tgt;
+ ibmvfc_init_event(evt, ibmvfc_tgt_query_target_done, IBMVFC_MAD_FORMAT);
+ query_tgt = &evt->iu.query_tgt;
+ memset(query_tgt, 0, sizeof(*query_tgt));
+ query_tgt->common.version = cpu_to_be32(1);
+ query_tgt->common.opcode = cpu_to_be32(IBMVFC_QUERY_TARGET);
+ query_tgt->common.length = cpu_to_be16(sizeof(*query_tgt));
+ query_tgt->wwpn = cpu_to_be64(tgt->ids.port_name);
+
+ ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_INIT_WAIT);
+ if (ibmvfc_send_event(evt, vhost, default_timeout)) {
+ vhost->discovery_threads--;
+ ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_NONE);
+ kref_put(&tgt->kref, ibmvfc_release_tgt);
+ } else
+ tgt_dbg(tgt, "Sent Query Target\n");
+}
+
+/**
+ * ibmvfc_alloc_target - Allocate and initialize an ibmvfc target
+ * @vhost: ibmvfc host struct
+ * @scsi_id: SCSI ID to allocate target for
+ *
+ * Returns:
+ * 0 on success / other on failure
+ **/
+static int ibmvfc_alloc_target(struct ibmvfc_host *vhost, u64 scsi_id)
+{
+ struct ibmvfc_target *tgt;
+ unsigned long flags;
+
+ spin_lock_irqsave(vhost->host->host_lock, flags);
+ list_for_each_entry(tgt, &vhost->targets, queue) {
+ if (tgt->scsi_id == scsi_id) {
+ if (tgt->need_login)
+ ibmvfc_init_tgt(tgt, ibmvfc_tgt_implicit_logout);
+ goto unlock_out;
+ }
+ }
+ spin_unlock_irqrestore(vhost->host->host_lock, flags);
+
+ tgt = mempool_alloc(vhost->tgt_pool, GFP_NOIO);
+ if (!tgt) {
+ dev_err(vhost->dev, "Target allocation failure for scsi id %08llx\n",
+ scsi_id);
+ return -ENOMEM;
+ }
+
+ memset(tgt, 0, sizeof(*tgt));
+ tgt->scsi_id = scsi_id;
+ tgt->new_scsi_id = scsi_id;
+ tgt->vhost = vhost;
+ tgt->need_login = 1;
+ tgt->cancel_key = vhost->task_set++;
+ init_timer(&tgt->timer);
+ kref_init(&tgt->kref);
+ ibmvfc_init_tgt(tgt, ibmvfc_tgt_implicit_logout);
+ spin_lock_irqsave(vhost->host->host_lock, flags);
+ list_add_tail(&tgt->queue, &vhost->targets);
+
+unlock_out:
+ spin_unlock_irqrestore(vhost->host->host_lock, flags);
+ return 0;
+}
+
+/**
+ * ibmvfc_alloc_targets - Allocate and initialize ibmvfc targets
+ * @vhost: ibmvfc host struct
+ *
+ * Returns:
+ * 0 on success / other on failure
+ **/
+static int ibmvfc_alloc_targets(struct ibmvfc_host *vhost)
+{
+ int i, rc;
+
+ for (i = 0, rc = 0; !rc && i < vhost->num_targets; i++)
+ rc = ibmvfc_alloc_target(vhost,
+ be32_to_cpu(vhost->disc_buf->scsi_id[i]) &
+ IBMVFC_DISC_TGT_SCSI_ID_MASK);
+
+ return rc;
+}
+
+/**
+ * ibmvfc_discover_targets_done - Completion handler for discover targets MAD
+ * @evt: ibmvfc event struct
+ *
+ **/
+static void ibmvfc_discover_targets_done(struct ibmvfc_event *evt)
+{
+ struct ibmvfc_host *vhost = evt->vhost;
+ struct ibmvfc_discover_targets *rsp = &evt->xfer_iu->discover_targets;
+ u32 mad_status = be16_to_cpu(rsp->common.status);
+ int level = IBMVFC_DEFAULT_LOG_LEVEL;
+
+ switch (mad_status) {
+ case IBMVFC_MAD_SUCCESS:
+ ibmvfc_dbg(vhost, "Discover Targets succeeded\n");
+ vhost->num_targets = be32_to_cpu(rsp->num_written);
+ ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_ALLOC_TGTS);
+ break;
+ case IBMVFC_MAD_FAILED:
+ level += ibmvfc_retry_host_init(vhost);
+ ibmvfc_log(vhost, level, "Discover Targets failed: %s (%x:%x)\n",
+ ibmvfc_get_cmd_error(be16_to_cpu(rsp->status), be16_to_cpu(rsp->error)),
+ rsp->status, rsp->error);
+ break;
+ case IBMVFC_MAD_DRIVER_FAILED:
+ break;
+ default:
+ dev_err(vhost->dev, "Invalid Discover Targets response: 0x%x\n", mad_status);
+ ibmvfc_link_down(vhost, IBMVFC_LINK_DEAD);
+ break;
+ }
+
+ ibmvfc_free_event(evt);
+ wake_up(&vhost->work_wait_q);
+}
+
+/**
+ * ibmvfc_discover_targets - Send Discover Targets MAD
+ * @vhost: ibmvfc host struct
+ *
+ **/
+static void ibmvfc_discover_targets(struct ibmvfc_host *vhost)
+{
+ struct ibmvfc_discover_targets *mad;
+ struct ibmvfc_event *evt = ibmvfc_get_event(vhost);
+
+ ibmvfc_init_event(evt, ibmvfc_discover_targets_done, IBMVFC_MAD_FORMAT);
+ mad = &evt->iu.discover_targets;
+ memset(mad, 0, sizeof(*mad));
+ mad->common.version = cpu_to_be32(1);
+ mad->common.opcode = cpu_to_be32(IBMVFC_DISC_TARGETS);
+ mad->common.length = cpu_to_be16(sizeof(*mad));
+ mad->bufflen = cpu_to_be32(vhost->disc_buf_sz);
+ mad->buffer.va = cpu_to_be64(vhost->disc_buf_dma);
+ mad->buffer.len = cpu_to_be32(vhost->disc_buf_sz);
+ ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_INIT_WAIT);
+
+ if (!ibmvfc_send_event(evt, vhost, default_timeout))
+ ibmvfc_dbg(vhost, "Sent discover targets\n");
+ else
+ ibmvfc_link_down(vhost, IBMVFC_LINK_DEAD);
+}
+
+/**
+ * ibmvfc_npiv_login_done - Completion handler for NPIV Login
+ * @evt: ibmvfc event struct
+ *
+ **/
+static void ibmvfc_npiv_login_done(struct ibmvfc_event *evt)
+{
+ struct ibmvfc_host *vhost = evt->vhost;
+ u32 mad_status = be16_to_cpu(evt->xfer_iu->npiv_login.common.status);
+ struct ibmvfc_npiv_login_resp *rsp = &vhost->login_buf->resp;
+ unsigned int npiv_max_sectors;
+ int level = IBMVFC_DEFAULT_LOG_LEVEL;
+
+ switch (mad_status) {
+ case IBMVFC_MAD_SUCCESS:
+ ibmvfc_free_event(evt);
+ break;
+ case IBMVFC_MAD_FAILED:
+ if (ibmvfc_retry_cmd(be16_to_cpu(rsp->status), be16_to_cpu(rsp->error)))
+ level += ibmvfc_retry_host_init(vhost);
+ else
+ ibmvfc_link_down(vhost, IBMVFC_LINK_DEAD);
+ ibmvfc_log(vhost, level, "NPIV Login failed: %s (%x:%x)\n",
+ ibmvfc_get_cmd_error(be16_to_cpu(rsp->status), be16_to_cpu(rsp->error)),
+ rsp->status, rsp->error);
+ ibmvfc_free_event(evt);
+ return;
+ case IBMVFC_MAD_CRQ_ERROR:
+ ibmvfc_retry_host_init(vhost);
+ case IBMVFC_MAD_DRIVER_FAILED:
+ ibmvfc_free_event(evt);
+ return;
+ default:
+ dev_err(vhost->dev, "Invalid NPIV Login response: 0x%x\n", mad_status);
+ ibmvfc_link_down(vhost, IBMVFC_LINK_DEAD);
+ ibmvfc_free_event(evt);
+ return;
+ }
+
+ vhost->client_migrated = 0;
+
+ if (!(be32_to_cpu(rsp->flags) & IBMVFC_NATIVE_FC)) {
+ dev_err(vhost->dev, "Virtual adapter does not support FC. %x\n",
+ rsp->flags);
+ ibmvfc_link_down(vhost, IBMVFC_LINK_DEAD);
+ wake_up(&vhost->work_wait_q);
+ return;
+ }
+
+ if (be32_to_cpu(rsp->max_cmds) <= IBMVFC_NUM_INTERNAL_REQ) {
+ dev_err(vhost->dev, "Virtual adapter supported queue depth too small: %d\n",
+ rsp->max_cmds);
+ ibmvfc_link_down(vhost, IBMVFC_LINK_DEAD);
+ wake_up(&vhost->work_wait_q);
+ return;
+ }
+
+ vhost->logged_in = 1;
+ npiv_max_sectors = min((uint)(be64_to_cpu(rsp->max_dma_len) >> 9), IBMVFC_MAX_SECTORS);
+ dev_info(vhost->dev, "Host partition: %s, device: %s %s %s max sectors %u\n",
+ rsp->partition_name, rsp->device_name, rsp->port_loc_code,
+ rsp->drc_name, npiv_max_sectors);
+
+ fc_host_fabric_name(vhost->host) = be64_to_cpu(rsp->node_name);
+ fc_host_node_name(vhost->host) = be64_to_cpu(rsp->node_name);
+ fc_host_port_name(vhost->host) = be64_to_cpu(rsp->port_name);
+ fc_host_port_id(vhost->host) = be64_to_cpu(rsp->scsi_id);
+ fc_host_port_type(vhost->host) = FC_PORTTYPE_NPIV;
+ fc_host_supported_classes(vhost->host) = 0;
+ if (be32_to_cpu(rsp->service_parms.class1_parms[0]) & 0x80000000)
+ fc_host_supported_classes(vhost->host) |= FC_COS_CLASS1;
+ if (be32_to_cpu(rsp->service_parms.class2_parms[0]) & 0x80000000)
+ fc_host_supported_classes(vhost->host) |= FC_COS_CLASS2;
+ if (be32_to_cpu(rsp->service_parms.class3_parms[0]) & 0x80000000)
+ fc_host_supported_classes(vhost->host) |= FC_COS_CLASS3;
+ fc_host_maxframe_size(vhost->host) =
+ be16_to_cpu(rsp->service_parms.common.bb_rcv_sz) & 0x0fff;
+
+ vhost->host->can_queue = be32_to_cpu(rsp->max_cmds) - IBMVFC_NUM_INTERNAL_REQ;
+ vhost->host->max_sectors = npiv_max_sectors;
+ ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_QUERY);
+ wake_up(&vhost->work_wait_q);
+}
+
+/**
+ * ibmvfc_npiv_login - Sends NPIV login
+ * @vhost: ibmvfc host struct
+ *
+ **/
+static void ibmvfc_npiv_login(struct ibmvfc_host *vhost)
+{
+ struct ibmvfc_npiv_login_mad *mad;
+ struct ibmvfc_event *evt = ibmvfc_get_event(vhost);
+
+ ibmvfc_gather_partition_info(vhost);
+ ibmvfc_set_login_info(vhost);
+ ibmvfc_init_event(evt, ibmvfc_npiv_login_done, IBMVFC_MAD_FORMAT);
+
+ memcpy(vhost->login_buf, &vhost->login_info, sizeof(vhost->login_info));
+ mad = &evt->iu.npiv_login;
+ memset(mad, 0, sizeof(struct ibmvfc_npiv_login_mad));
+ mad->common.version = cpu_to_be32(1);
+ mad->common.opcode = cpu_to_be32(IBMVFC_NPIV_LOGIN);
+ mad->common.length = cpu_to_be16(sizeof(struct ibmvfc_npiv_login_mad));
+ mad->buffer.va = cpu_to_be64(vhost->login_buf_dma);
+ mad->buffer.len = cpu_to_be32(sizeof(*vhost->login_buf));
+
+ ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_INIT_WAIT);
+
+ if (!ibmvfc_send_event(evt, vhost, default_timeout))
+ ibmvfc_dbg(vhost, "Sent NPIV login\n");
+ else
+ ibmvfc_link_down(vhost, IBMVFC_LINK_DEAD);
+};
+
+/**
+ * ibmvfc_npiv_logout_done - Completion handler for NPIV Logout
+ * @vhost: ibmvfc host struct
+ *
+ **/
+static void ibmvfc_npiv_logout_done(struct ibmvfc_event *evt)
+{
+ struct ibmvfc_host *vhost = evt->vhost;
+ u32 mad_status = be16_to_cpu(evt->xfer_iu->npiv_logout.common.status);
+
+ ibmvfc_free_event(evt);
+
+ switch (mad_status) {
+ case IBMVFC_MAD_SUCCESS:
+ if (list_empty(&vhost->sent) &&
+ vhost->action == IBMVFC_HOST_ACTION_LOGO_WAIT) {
+ ibmvfc_init_host(vhost);
+ return;
+ }
+ break;
+ case IBMVFC_MAD_FAILED:
+ case IBMVFC_MAD_NOT_SUPPORTED:
+ case IBMVFC_MAD_CRQ_ERROR:
+ case IBMVFC_MAD_DRIVER_FAILED:
+ default:
+ ibmvfc_dbg(vhost, "NPIV Logout failed. 0x%X\n", mad_status);
+ break;
+ }
+
+ ibmvfc_hard_reset_host(vhost);
+}
+
+/**
+ * ibmvfc_npiv_logout - Issue an NPIV Logout
+ * @vhost: ibmvfc host struct
+ *
+ **/
+static void ibmvfc_npiv_logout(struct ibmvfc_host *vhost)
+{
+ struct ibmvfc_npiv_logout_mad *mad;
+ struct ibmvfc_event *evt;
+
+ evt = ibmvfc_get_event(vhost);
+ ibmvfc_init_event(evt, ibmvfc_npiv_logout_done, IBMVFC_MAD_FORMAT);
+
+ mad = &evt->iu.npiv_logout;
+ memset(mad, 0, sizeof(*mad));
+ mad->common.version = cpu_to_be32(1);
+ mad->common.opcode = cpu_to_be32(IBMVFC_NPIV_LOGOUT);
+ mad->common.length = cpu_to_be16(sizeof(struct ibmvfc_npiv_logout_mad));
+
+ ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_LOGO_WAIT);
+
+ if (!ibmvfc_send_event(evt, vhost, default_timeout))
+ ibmvfc_dbg(vhost, "Sent NPIV logout\n");
+ else
+ ibmvfc_link_down(vhost, IBMVFC_LINK_DEAD);
+}
+
+/**
+ * ibmvfc_dev_init_to_do - Is there target initialization work to do?
+ * @vhost: ibmvfc host struct
+ *
+ * Returns:
+ * 1 if work to do / 0 if not
+ **/
+static int ibmvfc_dev_init_to_do(struct ibmvfc_host *vhost)
+{
+ struct ibmvfc_target *tgt;
+
+ list_for_each_entry(tgt, &vhost->targets, queue) {
+ if (tgt->action == IBMVFC_TGT_ACTION_INIT ||
+ tgt->action == IBMVFC_TGT_ACTION_INIT_WAIT)
+ return 1;
+ }
+
+ return 0;
+}
+
+/**
+ * __ibmvfc_work_to_do - Is there task level work to do? (no locking)
+ * @vhost: ibmvfc host struct
+ *
+ * Returns:
+ * 1 if work to do / 0 if not
+ **/
+static int __ibmvfc_work_to_do(struct ibmvfc_host *vhost)
+{
+ struct ibmvfc_target *tgt;
+
+ if (kthread_should_stop())
+ return 1;
+ switch (vhost->action) {
+ case IBMVFC_HOST_ACTION_NONE:
+ case IBMVFC_HOST_ACTION_INIT_WAIT:
+ case IBMVFC_HOST_ACTION_LOGO_WAIT:
+ return 0;
+ case IBMVFC_HOST_ACTION_TGT_INIT:
+ case IBMVFC_HOST_ACTION_QUERY_TGTS:
+ if (vhost->discovery_threads == disc_threads)
+ return 0;
+ list_for_each_entry(tgt, &vhost->targets, queue)
+ if (tgt->action == IBMVFC_TGT_ACTION_INIT)
+ return 1;
+ list_for_each_entry(tgt, &vhost->targets, queue)
+ if (tgt->action == IBMVFC_TGT_ACTION_INIT_WAIT)
+ return 0;
+ return 1;
+ case IBMVFC_HOST_ACTION_LOGO:
+ case IBMVFC_HOST_ACTION_INIT:
+ case IBMVFC_HOST_ACTION_ALLOC_TGTS:
+ case IBMVFC_HOST_ACTION_TGT_DEL:
+ case IBMVFC_HOST_ACTION_TGT_DEL_FAILED:
+ case IBMVFC_HOST_ACTION_QUERY:
+ case IBMVFC_HOST_ACTION_RESET:
+ case IBMVFC_HOST_ACTION_REENABLE:
+ default:
+ break;
+ };
+
+ return 1;
+}
+
+/**
+ * ibmvfc_work_to_do - Is there task level work to do?
+ * @vhost: ibmvfc host struct
+ *
+ * Returns:
+ * 1 if work to do / 0 if not
+ **/
+static int ibmvfc_work_to_do(struct ibmvfc_host *vhost)
+{
+ unsigned long flags;
+ int rc;
+
+ spin_lock_irqsave(vhost->host->host_lock, flags);
+ rc = __ibmvfc_work_to_do(vhost);
+ spin_unlock_irqrestore(vhost->host->host_lock, flags);
+ return rc;
+}
+
+/**
+ * ibmvfc_log_ae - Log async events if necessary
+ * @vhost: ibmvfc host struct
+ * @events: events to log
+ *
+ **/
+static void ibmvfc_log_ae(struct ibmvfc_host *vhost, int events)
+{
+ if (events & IBMVFC_AE_RSCN)
+ fc_host_post_event(vhost->host, fc_get_event_number(), FCH_EVT_RSCN, 0);
+ if ((events & IBMVFC_AE_LINKDOWN) &&
+ vhost->state >= IBMVFC_HALTED)
+ fc_host_post_event(vhost->host, fc_get_event_number(), FCH_EVT_LINKDOWN, 0);
+ if ((events & IBMVFC_AE_LINKUP) &&
+ vhost->state == IBMVFC_INITIALIZING)
+ fc_host_post_event(vhost->host, fc_get_event_number(), FCH_EVT_LINKUP, 0);
+}
+
+/**
+ * ibmvfc_tgt_add_rport - Tell the FC transport about a new remote port
+ * @tgt: ibmvfc target struct
+ *
+ **/
+static void ibmvfc_tgt_add_rport(struct ibmvfc_target *tgt)
+{
+ struct ibmvfc_host *vhost = tgt->vhost;
+ struct fc_rport *rport;
+ unsigned long flags;
+
+ tgt_dbg(tgt, "Adding rport\n");
+ rport = fc_remote_port_add(vhost->host, 0, &tgt->ids);
+ spin_lock_irqsave(vhost->host->host_lock, flags);
+
+ if (rport && tgt->action == IBMVFC_TGT_ACTION_DEL_RPORT) {
+ tgt_dbg(tgt, "Deleting rport\n");
+ list_del(&tgt->queue);
+ ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_DELETED_RPORT);
+ spin_unlock_irqrestore(vhost->host->host_lock, flags);
+ fc_remote_port_delete(rport);
+ del_timer_sync(&tgt->timer);
+ kref_put(&tgt->kref, ibmvfc_release_tgt);
+ return;
+ } else if (rport && tgt->action == IBMVFC_TGT_ACTION_DELETED_RPORT) {
+ spin_unlock_irqrestore(vhost->host->host_lock, flags);
+ return;
+ }
+
+ if (rport) {
+ tgt_dbg(tgt, "rport add succeeded\n");
+ tgt->rport = rport;
+ rport->maxframe_size = be16_to_cpu(tgt->service_parms.common.bb_rcv_sz) & 0x0fff;
+ rport->supported_classes = 0;
+ tgt->target_id = rport->scsi_target_id;
+ if (be32_to_cpu(tgt->service_parms.class1_parms[0]) & 0x80000000)
+ rport->supported_classes |= FC_COS_CLASS1;
+ if (be32_to_cpu(tgt->service_parms.class2_parms[0]) & 0x80000000)
+ rport->supported_classes |= FC_COS_CLASS2;
+ if (be32_to_cpu(tgt->service_parms.class3_parms[0]) & 0x80000000)
+ rport->supported_classes |= FC_COS_CLASS3;
+ if (rport->rqst_q)
+ blk_queue_max_segments(rport->rqst_q, 1);
+ } else
+ tgt_dbg(tgt, "rport add failed\n");
+ spin_unlock_irqrestore(vhost->host->host_lock, flags);
+}
+
+/**
+ * ibmvfc_do_work - Do task level work
+ * @vhost: ibmvfc host struct
+ *
+ **/
+static void ibmvfc_do_work(struct ibmvfc_host *vhost)
+{
+ struct ibmvfc_target *tgt;
+ unsigned long flags;
+ struct fc_rport *rport;
+ int rc;
+
+ ibmvfc_log_ae(vhost, vhost->events_to_log);
+ spin_lock_irqsave(vhost->host->host_lock, flags);
+ vhost->events_to_log = 0;
+ switch (vhost->action) {
+ case IBMVFC_HOST_ACTION_NONE:
+ case IBMVFC_HOST_ACTION_LOGO_WAIT:
+ case IBMVFC_HOST_ACTION_INIT_WAIT:
+ break;
+ case IBMVFC_HOST_ACTION_RESET:
+ vhost->action = IBMVFC_HOST_ACTION_TGT_DEL;
+ spin_unlock_irqrestore(vhost->host->host_lock, flags);
+ rc = ibmvfc_reset_crq(vhost);
+ spin_lock_irqsave(vhost->host->host_lock, flags);
+ if (rc == H_CLOSED)
+ vio_enable_interrupts(to_vio_dev(vhost->dev));
+ if (rc || (rc = ibmvfc_send_crq_init(vhost)) ||
+ (rc = vio_enable_interrupts(to_vio_dev(vhost->dev)))) {
+ ibmvfc_link_down(vhost, IBMVFC_LINK_DEAD);
+ dev_err(vhost->dev, "Error after reset (rc=%d)\n", rc);
+ }
+ break;
+ case IBMVFC_HOST_ACTION_REENABLE:
+ vhost->action = IBMVFC_HOST_ACTION_TGT_DEL;
+ spin_unlock_irqrestore(vhost->host->host_lock, flags);
+ rc = ibmvfc_reenable_crq_queue(vhost);
+ spin_lock_irqsave(vhost->host->host_lock, flags);
+ if (rc || (rc = ibmvfc_send_crq_init(vhost))) {
+ ibmvfc_link_down(vhost, IBMVFC_LINK_DEAD);
+ dev_err(vhost->dev, "Error after enable (rc=%d)\n", rc);
+ }
+ break;
+ case IBMVFC_HOST_ACTION_LOGO:
+ vhost->job_step(vhost);
+ break;
+ case IBMVFC_HOST_ACTION_INIT:
+ BUG_ON(vhost->state != IBMVFC_INITIALIZING);
+ if (vhost->delay_init) {
+ vhost->delay_init = 0;
+ spin_unlock_irqrestore(vhost->host->host_lock, flags);
+ ssleep(15);
+ return;
+ } else
+ vhost->job_step(vhost);
+ break;
+ case IBMVFC_HOST_ACTION_QUERY:
+ list_for_each_entry(tgt, &vhost->targets, queue)
+ ibmvfc_init_tgt(tgt, ibmvfc_tgt_query_target);
+ ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_QUERY_TGTS);
+ break;
+ case IBMVFC_HOST_ACTION_QUERY_TGTS:
+ list_for_each_entry(tgt, &vhost->targets, queue) {
+ if (tgt->action == IBMVFC_TGT_ACTION_INIT) {
+ tgt->job_step(tgt);
+ break;
+ }
+ }
+
+ if (!ibmvfc_dev_init_to_do(vhost))
+ ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_TGT_DEL);
+ break;
+ case IBMVFC_HOST_ACTION_TGT_DEL:
+ case IBMVFC_HOST_ACTION_TGT_DEL_FAILED:
+ list_for_each_entry(tgt, &vhost->targets, queue) {
+ if (tgt->action == IBMVFC_TGT_ACTION_DEL_RPORT) {
+ tgt_dbg(tgt, "Deleting rport\n");
+ rport = tgt->rport;
+ tgt->rport = NULL;
+ list_del(&tgt->queue);
+ ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_DELETED_RPORT);
+ spin_unlock_irqrestore(vhost->host->host_lock, flags);
+ if (rport)
+ fc_remote_port_delete(rport);
+ del_timer_sync(&tgt->timer);
+ kref_put(&tgt->kref, ibmvfc_release_tgt);
+ return;
+ }
+ }
+
+ if (vhost->state == IBMVFC_INITIALIZING) {
+ if (vhost->action == IBMVFC_HOST_ACTION_TGT_DEL_FAILED) {
+ if (vhost->reinit) {
+ vhost->reinit = 0;
+ scsi_block_requests(vhost->host);
+ ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_QUERY);
+ spin_unlock_irqrestore(vhost->host->host_lock, flags);
+ } else {
+ ibmvfc_set_host_state(vhost, IBMVFC_ACTIVE);
+ ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_NONE);
+ wake_up(&vhost->init_wait_q);
+ schedule_work(&vhost->rport_add_work_q);
+ vhost->init_retries = 0;
+ spin_unlock_irqrestore(vhost->host->host_lock, flags);
+ scsi_unblock_requests(vhost->host);
+ }
+
+ return;
+ } else {
+ ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_INIT);
+ vhost->job_step = ibmvfc_discover_targets;
+ }
+ } else {
+ ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_NONE);
+ spin_unlock_irqrestore(vhost->host->host_lock, flags);
+ scsi_unblock_requests(vhost->host);
+ wake_up(&vhost->init_wait_q);
+ return;
+ }
+ break;
+ case IBMVFC_HOST_ACTION_ALLOC_TGTS:
+ ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_TGT_INIT);
+ spin_unlock_irqrestore(vhost->host->host_lock, flags);
+ ibmvfc_alloc_targets(vhost);
+ spin_lock_irqsave(vhost->host->host_lock, flags);
+ break;
+ case IBMVFC_HOST_ACTION_TGT_INIT:
+ list_for_each_entry(tgt, &vhost->targets, queue) {
+ if (tgt->action == IBMVFC_TGT_ACTION_INIT) {
+ tgt->job_step(tgt);
+ break;
+ }
+ }
+
+ if (!ibmvfc_dev_init_to_do(vhost))
+ ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_TGT_DEL_FAILED);
+ break;
+ default:
+ break;
+ };
+
+ spin_unlock_irqrestore(vhost->host->host_lock, flags);
+}
+
+/**
+ * ibmvfc_work - Do task level work
+ * @data: ibmvfc host struct
+ *
+ * Returns:
+ * zero
+ **/
+static int ibmvfc_work(void *data)
+{
+ struct ibmvfc_host *vhost = data;
+ int rc;
+
+ set_user_nice(current, MIN_NICE);
+
+ while (1) {
+ rc = wait_event_interruptible(vhost->work_wait_q,
+ ibmvfc_work_to_do(vhost));
+
+ BUG_ON(rc);
+
+ if (kthread_should_stop())
+ break;
+
+ ibmvfc_do_work(vhost);
+ }
+
+ ibmvfc_dbg(vhost, "ibmvfc kthread exiting...\n");
+ return 0;
+}
+
+/**
+ * ibmvfc_init_crq - Initializes and registers CRQ with hypervisor
+ * @vhost: ibmvfc host struct
+ *
+ * Allocates a page for messages, maps it for dma, and registers
+ * the crq with the hypervisor.
+ *
+ * Return value:
+ * zero on success / other on failure
+ **/
+static int ibmvfc_init_crq(struct ibmvfc_host *vhost)
+{
+ int rc, retrc = -ENOMEM;
+ struct device *dev = vhost->dev;
+ struct vio_dev *vdev = to_vio_dev(dev);
+ struct ibmvfc_crq_queue *crq = &vhost->crq;
+
+ ENTER;
+ crq->msgs = (struct ibmvfc_crq *)get_zeroed_page(GFP_KERNEL);
+
+ if (!crq->msgs)
+ return -ENOMEM;
+
+ crq->size = PAGE_SIZE / sizeof(*crq->msgs);
+ crq->msg_token = dma_map_single(dev, crq->msgs,
+ PAGE_SIZE, DMA_BIDIRECTIONAL);
+
+ if (dma_mapping_error(dev, crq->msg_token))
+ goto map_failed;
+
+ retrc = rc = plpar_hcall_norets(H_REG_CRQ, vdev->unit_address,
+ crq->msg_token, PAGE_SIZE);
+
+ if (rc == H_RESOURCE)
+ /* maybe kexecing and resource is busy. try a reset */
+ retrc = rc = ibmvfc_reset_crq(vhost);
+
+ if (rc == H_CLOSED)
+ dev_warn(dev, "Partner adapter not ready\n");
+ else if (rc) {
+ dev_warn(dev, "Error %d opening adapter\n", rc);
+ goto reg_crq_failed;
+ }
+
+ retrc = 0;
+
+ tasklet_init(&vhost->tasklet, (void *)ibmvfc_tasklet, (unsigned long)vhost);
+
+ if ((rc = request_irq(vdev->irq, ibmvfc_interrupt, 0, IBMVFC_NAME, vhost))) {
+ dev_err(dev, "Couldn't register irq 0x%x. rc=%d\n", vdev->irq, rc);
+ goto req_irq_failed;
+ }
+
+ if ((rc = vio_enable_interrupts(vdev))) {
+ dev_err(dev, "Error %d enabling interrupts\n", rc);
+ goto req_irq_failed;
+ }
+
+ crq->cur = 0;
+ LEAVE;
+ return retrc;
+
+req_irq_failed:
+ tasklet_kill(&vhost->tasklet);
+ do {
+ rc = plpar_hcall_norets(H_FREE_CRQ, vdev->unit_address);
+ } while (rc == H_BUSY || H_IS_LONG_BUSY(rc));
+reg_crq_failed:
+ dma_unmap_single(dev, crq->msg_token, PAGE_SIZE, DMA_BIDIRECTIONAL);
+map_failed:
+ free_page((unsigned long)crq->msgs);
+ return retrc;
+}
+
+/**
+ * ibmvfc_free_mem - Free memory for vhost
+ * @vhost: ibmvfc host struct
+ *
+ * Return value:
+ * none
+ **/
+static void ibmvfc_free_mem(struct ibmvfc_host *vhost)
+{
+ struct ibmvfc_async_crq_queue *async_q = &vhost->async_crq;
+
+ ENTER;
+ mempool_destroy(vhost->tgt_pool);
+ kfree(vhost->trace);
+ dma_free_coherent(vhost->dev, vhost->disc_buf_sz, vhost->disc_buf,
+ vhost->disc_buf_dma);
+ dma_free_coherent(vhost->dev, sizeof(*vhost->login_buf),
+ vhost->login_buf, vhost->login_buf_dma);
+ dma_pool_destroy(vhost->sg_pool);
+ dma_unmap_single(vhost->dev, async_q->msg_token,
+ async_q->size * sizeof(*async_q->msgs), DMA_BIDIRECTIONAL);
+ free_page((unsigned long)async_q->msgs);
+ LEAVE;
+}
+
+/**
+ * ibmvfc_alloc_mem - Allocate memory for vhost
+ * @vhost: ibmvfc host struct
+ *
+ * Return value:
+ * 0 on success / non-zero on failure
+ **/
+static int ibmvfc_alloc_mem(struct ibmvfc_host *vhost)
+{
+ struct ibmvfc_async_crq_queue *async_q = &vhost->async_crq;
+ struct device *dev = vhost->dev;
+
+ ENTER;
+ async_q->msgs = (struct ibmvfc_async_crq *)get_zeroed_page(GFP_KERNEL);
+ if (!async_q->msgs) {
+ dev_err(dev, "Couldn't allocate async queue.\n");
+ goto nomem;
+ }
+
+ async_q->size = PAGE_SIZE / sizeof(struct ibmvfc_async_crq);
+ async_q->msg_token = dma_map_single(dev, async_q->msgs,
+ async_q->size * sizeof(*async_q->msgs),
+ DMA_BIDIRECTIONAL);
+
+ if (dma_mapping_error(dev, async_q->msg_token)) {
+ dev_err(dev, "Failed to map async queue\n");
+ goto free_async_crq;
+ }
+
+ vhost->sg_pool = dma_pool_create(IBMVFC_NAME, dev,
+ SG_ALL * sizeof(struct srp_direct_buf),
+ sizeof(struct srp_direct_buf), 0);
+
+ if (!vhost->sg_pool) {
+ dev_err(dev, "Failed to allocate sg pool\n");
+ goto unmap_async_crq;
+ }
+
+ vhost->login_buf = dma_alloc_coherent(dev, sizeof(*vhost->login_buf),
+ &vhost->login_buf_dma, GFP_KERNEL);
+
+ if (!vhost->login_buf) {
+ dev_err(dev, "Couldn't allocate NPIV login buffer\n");
+ goto free_sg_pool;
+ }
+
+ vhost->disc_buf_sz = sizeof(vhost->disc_buf->scsi_id[0]) * max_targets;
+ vhost->disc_buf = dma_alloc_coherent(dev, vhost->disc_buf_sz,
+ &vhost->disc_buf_dma, GFP_KERNEL);
+
+ if (!vhost->disc_buf) {
+ dev_err(dev, "Couldn't allocate Discover Targets buffer\n");
+ goto free_login_buffer;
+ }
+
+ vhost->trace = kcalloc(IBMVFC_NUM_TRACE_ENTRIES,
+ sizeof(struct ibmvfc_trace_entry), GFP_KERNEL);
+
+ if (!vhost->trace)
+ goto free_disc_buffer;
+
+ vhost->tgt_pool = mempool_create_kmalloc_pool(IBMVFC_TGT_MEMPOOL_SZ,
+ sizeof(struct ibmvfc_target));
+
+ if (!vhost->tgt_pool) {
+ dev_err(dev, "Couldn't allocate target memory pool\n");
+ goto free_trace;
+ }
+
+ LEAVE;
+ return 0;
+
+free_trace:
+ kfree(vhost->trace);
+free_disc_buffer:
+ dma_free_coherent(dev, vhost->disc_buf_sz, vhost->disc_buf,
+ vhost->disc_buf_dma);
+free_login_buffer:
+ dma_free_coherent(dev, sizeof(*vhost->login_buf),
+ vhost->login_buf, vhost->login_buf_dma);
+free_sg_pool:
+ dma_pool_destroy(vhost->sg_pool);
+unmap_async_crq:
+ dma_unmap_single(dev, async_q->msg_token,
+ async_q->size * sizeof(*async_q->msgs), DMA_BIDIRECTIONAL);
+free_async_crq:
+ free_page((unsigned long)async_q->msgs);
+nomem:
+ LEAVE;
+ return -ENOMEM;
+}
+
+/**
+ * ibmvfc_rport_add_thread - Worker thread for rport adds
+ * @work: work struct
+ *
+ **/
+static void ibmvfc_rport_add_thread(struct work_struct *work)
+{
+ struct ibmvfc_host *vhost = container_of(work, struct ibmvfc_host,
+ rport_add_work_q);
+ struct ibmvfc_target *tgt;
+ struct fc_rport *rport;
+ unsigned long flags;
+ int did_work;
+
+ ENTER;
+ spin_lock_irqsave(vhost->host->host_lock, flags);
+ do {
+ did_work = 0;
+ if (vhost->state != IBMVFC_ACTIVE)
+ break;
+
+ list_for_each_entry(tgt, &vhost->targets, queue) {
+ if (tgt->add_rport) {
+ did_work = 1;
+ tgt->add_rport = 0;
+ kref_get(&tgt->kref);
+ rport = tgt->rport;
+ if (!rport) {
+ spin_unlock_irqrestore(vhost->host->host_lock, flags);
+ ibmvfc_tgt_add_rport(tgt);
+ } else if (get_device(&rport->dev)) {
+ spin_unlock_irqrestore(vhost->host->host_lock, flags);
+ tgt_dbg(tgt, "Setting rport roles\n");
+ fc_remote_port_rolechg(rport, tgt->ids.roles);
+ put_device(&rport->dev);
+ }
+
+ kref_put(&tgt->kref, ibmvfc_release_tgt);
+ spin_lock_irqsave(vhost->host->host_lock, flags);
+ break;
+ }
+ }
+ } while(did_work);
+
+ if (vhost->state == IBMVFC_ACTIVE)
+ vhost->scan_complete = 1;
+ spin_unlock_irqrestore(vhost->host->host_lock, flags);
+ LEAVE;
+}
+
+/**
+ * ibmvfc_probe - Adapter hot plug add entry point
+ * @vdev: vio device struct
+ * @id: vio device id struct
+ *
+ * Return value:
+ * 0 on success / non-zero on failure
+ **/
+static int ibmvfc_probe(struct vio_dev *vdev, const struct vio_device_id *id)
+{
+ struct ibmvfc_host *vhost;
+ struct Scsi_Host *shost;
+ struct device *dev = &vdev->dev;
+ int rc = -ENOMEM;
+
+ ENTER;
+ shost = scsi_host_alloc(&driver_template, sizeof(*vhost));
+ if (!shost) {
+ dev_err(dev, "Couldn't allocate host data\n");
+ goto out;
+ }
+
+ shost->transportt = ibmvfc_transport_template;
+ shost->can_queue = max_requests;
+ shost->max_lun = max_lun;
+ shost->max_id = max_targets;
+ shost->max_sectors = IBMVFC_MAX_SECTORS;
+ shost->max_cmd_len = IBMVFC_MAX_CDB_LEN;
+ shost->unique_id = shost->host_no;
+
+ vhost = shost_priv(shost);
+ INIT_LIST_HEAD(&vhost->sent);
+ INIT_LIST_HEAD(&vhost->free);
+ INIT_LIST_HEAD(&vhost->targets);
+ sprintf(vhost->name, IBMVFC_NAME);
+ vhost->host = shost;
+ vhost->dev = dev;
+ vhost->partition_number = -1;
+ vhost->log_level = log_level;
+ vhost->task_set = 1;
+ strcpy(vhost->partition_name, "UNKNOWN");
+ init_waitqueue_head(&vhost->work_wait_q);
+ init_waitqueue_head(&vhost->init_wait_q);
+ INIT_WORK(&vhost->rport_add_work_q, ibmvfc_rport_add_thread);
+ mutex_init(&vhost->passthru_mutex);
+
+ if ((rc = ibmvfc_alloc_mem(vhost)))
+ goto free_scsi_host;
+
+ vhost->work_thread = kthread_run(ibmvfc_work, vhost, "%s_%d", IBMVFC_NAME,
+ shost->host_no);
+
+ if (IS_ERR(vhost->work_thread)) {
+ dev_err(dev, "Couldn't create kernel thread: %ld\n",
+ PTR_ERR(vhost->work_thread));
+ goto free_host_mem;
+ }
+
+ if ((rc = ibmvfc_init_crq(vhost))) {
+ dev_err(dev, "Couldn't initialize crq. rc=%d\n", rc);
+ goto kill_kthread;
+ }
+
+ if ((rc = ibmvfc_init_event_pool(vhost))) {
+ dev_err(dev, "Couldn't initialize event pool. rc=%d\n", rc);
+ goto release_crq;
+ }
+
+ if ((rc = scsi_add_host(shost, dev)))
+ goto release_event_pool;
+
+ fc_host_dev_loss_tmo(shost) = IBMVFC_DEV_LOSS_TMO;
+
+ if ((rc = ibmvfc_create_trace_file(&shost->shost_dev.kobj,
+ &ibmvfc_trace_attr))) {
+ dev_err(dev, "Failed to create trace file. rc=%d\n", rc);
+ goto remove_shost;
+ }
+
+ if (shost_to_fc_host(shost)->rqst_q)
+ blk_queue_max_segments(shost_to_fc_host(shost)->rqst_q, 1);
+ dev_set_drvdata(dev, vhost);
+ spin_lock(&ibmvfc_driver_lock);
+ list_add_tail(&vhost->queue, &ibmvfc_head);
+ spin_unlock(&ibmvfc_driver_lock);
+
+ ibmvfc_send_crq_init(vhost);
+ scsi_scan_host(shost);
+ return 0;
+
+remove_shost:
+ scsi_remove_host(shost);
+release_event_pool:
+ ibmvfc_free_event_pool(vhost);
+release_crq:
+ ibmvfc_release_crq_queue(vhost);
+kill_kthread:
+ kthread_stop(vhost->work_thread);
+free_host_mem:
+ ibmvfc_free_mem(vhost);
+free_scsi_host:
+ scsi_host_put(shost);
+out:
+ LEAVE;
+ return rc;
+}
+
+/**
+ * ibmvfc_remove - Adapter hot plug remove entry point
+ * @vdev: vio device struct
+ *
+ * Return value:
+ * 0
+ **/
+static int ibmvfc_remove(struct vio_dev *vdev)
+{
+ struct ibmvfc_host *vhost = dev_get_drvdata(&vdev->dev);
+ unsigned long flags;
+
+ ENTER;
+ ibmvfc_remove_trace_file(&vhost->host->shost_dev.kobj, &ibmvfc_trace_attr);
+
+ spin_lock_irqsave(vhost->host->host_lock, flags);
+ ibmvfc_link_down(vhost, IBMVFC_HOST_OFFLINE);
+ spin_unlock_irqrestore(vhost->host->host_lock, flags);
+
+ ibmvfc_wait_while_resetting(vhost);
+ ibmvfc_release_crq_queue(vhost);
+ kthread_stop(vhost->work_thread);
+ fc_remove_host(vhost->host);
+ scsi_remove_host(vhost->host);
+
+ spin_lock_irqsave(vhost->host->host_lock, flags);
+ ibmvfc_purge_requests(vhost, DID_ERROR);
+ ibmvfc_free_event_pool(vhost);
+ spin_unlock_irqrestore(vhost->host->host_lock, flags);
+
+ ibmvfc_free_mem(vhost);
+ spin_lock(&ibmvfc_driver_lock);
+ list_del(&vhost->queue);
+ spin_unlock(&ibmvfc_driver_lock);
+ scsi_host_put(vhost->host);
+ LEAVE;
+ return 0;
+}
+
+/**
+ * ibmvfc_resume - Resume from suspend
+ * @dev: device struct
+ *
+ * We may have lost an interrupt across suspend/resume, so kick the
+ * interrupt handler
+ *
+ */
+static int ibmvfc_resume(struct device *dev)
+{
+ unsigned long flags;
+ struct ibmvfc_host *vhost = dev_get_drvdata(dev);
+ struct vio_dev *vdev = to_vio_dev(dev);
+
+ spin_lock_irqsave(vhost->host->host_lock, flags);
+ vio_disable_interrupts(vdev);
+ tasklet_schedule(&vhost->tasklet);
+ spin_unlock_irqrestore(vhost->host->host_lock, flags);
+ return 0;
+}
+
+/**
+ * ibmvfc_get_desired_dma - Calculate DMA resources needed by the driver
+ * @vdev: vio device struct
+ *
+ * Return value:
+ * Number of bytes the driver will need to DMA map at the same time in
+ * order to perform well.
+ */
+static unsigned long ibmvfc_get_desired_dma(struct vio_dev *vdev)
+{
+ unsigned long pool_dma = max_requests * sizeof(union ibmvfc_iu);
+ return pool_dma + ((512 * 1024) * driver_template.cmd_per_lun);
+}
+
+static struct vio_device_id ibmvfc_device_table[] = {
+ {"fcp", "IBM,vfc-client"},
+ { "", "" }
+};
+MODULE_DEVICE_TABLE(vio, ibmvfc_device_table);
+
+static struct dev_pm_ops ibmvfc_pm_ops = {
+ .resume = ibmvfc_resume
+};
+
+static struct vio_driver ibmvfc_driver = {
+ .id_table = ibmvfc_device_table,
+ .probe = ibmvfc_probe,
+ .remove = ibmvfc_remove,
+ .get_desired_dma = ibmvfc_get_desired_dma,
+ .name = IBMVFC_NAME,
+ .pm = &ibmvfc_pm_ops,
+};
+
+static struct fc_function_template ibmvfc_transport_functions = {
+ .show_host_fabric_name = 1,
+ .show_host_node_name = 1,
+ .show_host_port_name = 1,
+ .show_host_supported_classes = 1,
+ .show_host_port_type = 1,
+ .show_host_port_id = 1,
+ .show_host_maxframe_size = 1,
+
+ .get_host_port_state = ibmvfc_get_host_port_state,
+ .show_host_port_state = 1,
+
+ .get_host_speed = ibmvfc_get_host_speed,
+ .show_host_speed = 1,
+
+ .issue_fc_host_lip = ibmvfc_issue_fc_host_lip,
+ .terminate_rport_io = ibmvfc_terminate_rport_io,
+
+ .show_rport_maxframe_size = 1,
+ .show_rport_supported_classes = 1,
+
+ .set_rport_dev_loss_tmo = ibmvfc_set_rport_dev_loss_tmo,
+ .show_rport_dev_loss_tmo = 1,
+
+ .get_starget_node_name = ibmvfc_get_starget_node_name,
+ .show_starget_node_name = 1,
+
+ .get_starget_port_name = ibmvfc_get_starget_port_name,
+ .show_starget_port_name = 1,
+
+ .get_starget_port_id = ibmvfc_get_starget_port_id,
+ .show_starget_port_id = 1,
+
+ .bsg_request = ibmvfc_bsg_request,
+ .bsg_timeout = ibmvfc_bsg_timeout,
+};
+
+/**
+ * ibmvfc_module_init - Initialize the ibmvfc module
+ *
+ * Return value:
+ * 0 on success / other on failure
+ **/
+static int __init ibmvfc_module_init(void)
+{
+ int rc;
+
+ if (!firmware_has_feature(FW_FEATURE_VIO))
+ return -ENODEV;
+
+ printk(KERN_INFO IBMVFC_NAME": IBM Virtual Fibre Channel Driver version: %s %s\n",
+ IBMVFC_DRIVER_VERSION, IBMVFC_DRIVER_DATE);
+
+ ibmvfc_transport_template = fc_attach_transport(&ibmvfc_transport_functions);
+ if (!ibmvfc_transport_template)
+ return -ENOMEM;
+
+ rc = vio_register_driver(&ibmvfc_driver);
+ if (rc)
+ fc_release_transport(ibmvfc_transport_template);
+ return rc;
+}
+
+/**
+ * ibmvfc_module_exit - Teardown the ibmvfc module
+ *
+ * Return value:
+ * nothing
+ **/
+static void __exit ibmvfc_module_exit(void)
+{
+ vio_unregister_driver(&ibmvfc_driver);
+ fc_release_transport(ibmvfc_transport_template);
+}
+
+module_init(ibmvfc_module_init);
+module_exit(ibmvfc_module_exit);
diff --git a/drivers/scsi/ibmvscsi/ibmvfc.h b/drivers/scsi/ibmvscsi/ibmvfc.h
new file mode 100644
index 000000000..8fae03215
--- /dev/null
+++ b/drivers/scsi/ibmvscsi/ibmvfc.h
@@ -0,0 +1,775 @@
+/*
+ * ibmvfc.h -- driver for IBM Power Virtual Fibre Channel Adapter
+ *
+ * Written By: Brian King <brking@linux.vnet.ibm.com>, IBM Corporation
+ *
+ * Copyright (C) IBM Corporation, 2008
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ *
+ */
+
+#ifndef _IBMVFC_H
+#define _IBMVFC_H
+
+#include <linux/list.h>
+#include <linux/types.h>
+#include "viosrp.h"
+
+#define IBMVFC_NAME "ibmvfc"
+#define IBMVFC_DRIVER_VERSION "1.0.11"
+#define IBMVFC_DRIVER_DATE "(April 12, 2013)"
+
+#define IBMVFC_DEFAULT_TIMEOUT 60
+#define IBMVFC_ADISC_CANCEL_TIMEOUT 45
+#define IBMVFC_ADISC_TIMEOUT 15
+#define IBMVFC_ADISC_PLUS_CANCEL_TIMEOUT \
+ (IBMVFC_ADISC_TIMEOUT + IBMVFC_ADISC_CANCEL_TIMEOUT)
+#define IBMVFC_INIT_TIMEOUT 120
+#define IBMVFC_ABORT_TIMEOUT 8
+#define IBMVFC_ABORT_WAIT_TIMEOUT 40
+#define IBMVFC_MAX_REQUESTS_DEFAULT 100
+
+#define IBMVFC_DEBUG 0
+#define IBMVFC_MAX_TARGETS 1024
+#define IBMVFC_MAX_LUN 0xffffffff
+#define IBMVFC_MAX_SECTORS 0xffffu
+#define IBMVFC_MAX_DISC_THREADS 4
+#define IBMVFC_TGT_MEMPOOL_SZ 64
+#define IBMVFC_MAX_CMDS_PER_LUN 64
+#define IBMVFC_MAX_HOST_INIT_RETRIES 6
+#define IBMVFC_MAX_TGT_INIT_RETRIES 3
+#define IBMVFC_DEV_LOSS_TMO (5 * 60)
+#define IBMVFC_DEFAULT_LOG_LEVEL 2
+#define IBMVFC_MAX_CDB_LEN 16
+
+/*
+ * Ensure we have resources for ERP and initialization:
+ * 1 for ERP
+ * 1 for initialization
+ * 1 for NPIV Logout
+ * 2 for BSG passthru
+ * 2 for each discovery thread
+ */
+#define IBMVFC_NUM_INTERNAL_REQ (1 + 1 + 1 + 2 + (disc_threads * 2))
+
+#define IBMVFC_MAD_SUCCESS 0x00
+#define IBMVFC_MAD_NOT_SUPPORTED 0xF1
+#define IBMVFC_MAD_FAILED 0xF7
+#define IBMVFC_MAD_DRIVER_FAILED 0xEE
+#define IBMVFC_MAD_CRQ_ERROR 0xEF
+
+enum ibmvfc_crq_valid {
+ IBMVFC_CRQ_CMD_RSP = 0x80,
+ IBMVFC_CRQ_INIT_RSP = 0xC0,
+ IBMVFC_CRQ_XPORT_EVENT = 0xFF,
+};
+
+enum ibmvfc_crq_format {
+ IBMVFC_CRQ_INIT = 0x01,
+ IBMVFC_CRQ_INIT_COMPLETE = 0x02,
+ IBMVFC_PARTITION_MIGRATED = 0x06,
+};
+
+enum ibmvfc_cmd_status_flags {
+ IBMVFC_FABRIC_MAPPED = 0x0001,
+ IBMVFC_VIOS_FAILURE = 0x0002,
+ IBMVFC_FC_FAILURE = 0x0004,
+ IBMVFC_FC_SCSI_ERROR = 0x0008,
+ IBMVFC_HW_EVENT_LOGGED = 0x0010,
+ IBMVFC_VIOS_LOGGED = 0x0020,
+};
+
+enum ibmvfc_fabric_mapped_errors {
+ IBMVFC_UNABLE_TO_ESTABLISH = 0x0001,
+ IBMVFC_XPORT_FAULT = 0x0002,
+ IBMVFC_CMD_TIMEOUT = 0x0003,
+ IBMVFC_ENETDOWN = 0x0004,
+ IBMVFC_HW_FAILURE = 0x0005,
+ IBMVFC_LINK_DOWN_ERR = 0x0006,
+ IBMVFC_LINK_DEAD_ERR = 0x0007,
+ IBMVFC_UNABLE_TO_REGISTER = 0x0008,
+ IBMVFC_XPORT_BUSY = 0x000A,
+ IBMVFC_XPORT_DEAD = 0x000B,
+ IBMVFC_CONFIG_ERROR = 0x000C,
+ IBMVFC_NAME_SERVER_FAIL = 0x000D,
+ IBMVFC_LINK_HALTED = 0x000E,
+ IBMVFC_XPORT_GENERAL = 0x8000,
+};
+
+enum ibmvfc_vios_errors {
+ IBMVFC_CRQ_FAILURE = 0x0001,
+ IBMVFC_SW_FAILURE = 0x0002,
+ IBMVFC_INVALID_PARAMETER = 0x0003,
+ IBMVFC_MISSING_PARAMETER = 0x0004,
+ IBMVFC_HOST_IO_BUS = 0x0005,
+ IBMVFC_TRANS_CANCELLED = 0x0006,
+ IBMVFC_TRANS_CANCELLED_IMPLICIT = 0x0007,
+ IBMVFC_INSUFFICIENT_RESOURCE = 0x0008,
+ IBMVFC_PLOGI_REQUIRED = 0x0010,
+ IBMVFC_COMMAND_FAILED = 0x8000,
+};
+
+enum ibmvfc_mad_types {
+ IBMVFC_NPIV_LOGIN = 0x0001,
+ IBMVFC_DISC_TARGETS = 0x0002,
+ IBMVFC_PORT_LOGIN = 0x0004,
+ IBMVFC_PROCESS_LOGIN = 0x0008,
+ IBMVFC_QUERY_TARGET = 0x0010,
+ IBMVFC_IMPLICIT_LOGOUT = 0x0040,
+ IBMVFC_PASSTHRU = 0x0200,
+ IBMVFC_TMF_MAD = 0x0100,
+ IBMVFC_NPIV_LOGOUT = 0x0800,
+};
+
+struct ibmvfc_mad_common {
+ __be32 version;
+ __be32 reserved;
+ __be32 opcode;
+ __be16 status;
+ __be16 length;
+ __be64 tag;
+}__attribute__((packed, aligned (8)));
+
+struct ibmvfc_npiv_login_mad {
+ struct ibmvfc_mad_common common;
+ struct srp_direct_buf buffer;
+}__attribute__((packed, aligned (8)));
+
+struct ibmvfc_npiv_logout_mad {
+ struct ibmvfc_mad_common common;
+}__attribute__((packed, aligned (8)));
+
+#define IBMVFC_MAX_NAME 256
+
+struct ibmvfc_npiv_login {
+ __be32 ostype;
+#define IBMVFC_OS_LINUX 0x02
+ __be32 pad;
+ __be64 max_dma_len;
+ __be32 max_payload;
+ __be32 max_response;
+ __be32 partition_num;
+ __be32 vfc_frame_version;
+ __be16 fcp_version;
+ __be16 flags;
+#define IBMVFC_CLIENT_MIGRATED 0x01
+#define IBMVFC_FLUSH_ON_HALT 0x02
+ __be32 max_cmds;
+ __be64 capabilities;
+#define IBMVFC_CAN_MIGRATE 0x01
+ __be64 node_name;
+ struct srp_direct_buf async;
+ u8 partition_name[IBMVFC_MAX_NAME];
+ u8 device_name[IBMVFC_MAX_NAME];
+ u8 drc_name[IBMVFC_MAX_NAME];
+ __be64 reserved2[2];
+}__attribute__((packed, aligned (8)));
+
+struct ibmvfc_common_svc_parms {
+ __be16 fcph_version;
+ __be16 b2b_credit;
+ __be16 features;
+ __be16 bb_rcv_sz; /* upper nibble is BB_SC_N */
+ __be32 ratov;
+ __be32 edtov;
+}__attribute__((packed, aligned (4)));
+
+struct ibmvfc_service_parms {
+ struct ibmvfc_common_svc_parms common;
+ u8 port_name[8];
+ u8 node_name[8];
+ __be32 class1_parms[4];
+ __be32 class2_parms[4];
+ __be32 class3_parms[4];
+ __be32 obsolete[4];
+ __be32 vendor_version[4];
+ __be32 services_avail[2];
+ __be32 ext_len;
+ __be32 reserved[30];
+ __be32 clk_sync_qos[2];
+}__attribute__((packed, aligned (4)));
+
+struct ibmvfc_npiv_login_resp {
+ __be32 version;
+ __be16 status;
+ __be16 error;
+ __be32 flags;
+#define IBMVFC_NATIVE_FC 0x01
+ __be32 reserved;
+ __be64 capabilities;
+#define IBMVFC_CAN_FLUSH_ON_HALT 0x08
+#define IBMVFC_CAN_SUPPRESS_ABTS 0x10
+ __be32 max_cmds;
+ __be32 scsi_id_sz;
+ __be64 max_dma_len;
+ __be64 scsi_id;
+ __be64 port_name;
+ __be64 node_name;
+ __be64 link_speed;
+ u8 partition_name[IBMVFC_MAX_NAME];
+ u8 device_name[IBMVFC_MAX_NAME];
+ u8 port_loc_code[IBMVFC_MAX_NAME];
+ u8 drc_name[IBMVFC_MAX_NAME];
+ struct ibmvfc_service_parms service_parms;
+ __be64 reserved2;
+}__attribute__((packed, aligned (8)));
+
+union ibmvfc_npiv_login_data {
+ struct ibmvfc_npiv_login login;
+ struct ibmvfc_npiv_login_resp resp;
+}__attribute__((packed, aligned (8)));
+
+struct ibmvfc_discover_targets_buf {
+ __be32 scsi_id[1];
+#define IBMVFC_DISC_TGT_SCSI_ID_MASK 0x00ffffff
+};
+
+struct ibmvfc_discover_targets {
+ struct ibmvfc_mad_common common;
+ struct srp_direct_buf buffer;
+ __be32 flags;
+ __be16 status;
+ __be16 error;
+ __be32 bufflen;
+ __be32 num_avail;
+ __be32 num_written;
+ __be64 reserved[2];
+}__attribute__((packed, aligned (8)));
+
+enum ibmvfc_fc_reason {
+ IBMVFC_INVALID_ELS_CMD_CODE = 0x01,
+ IBMVFC_INVALID_VERSION = 0x02,
+ IBMVFC_LOGICAL_ERROR = 0x03,
+ IBMVFC_INVALID_CT_IU_SIZE = 0x04,
+ IBMVFC_LOGICAL_BUSY = 0x05,
+ IBMVFC_PROTOCOL_ERROR = 0x07,
+ IBMVFC_UNABLE_TO_PERFORM_REQ = 0x09,
+ IBMVFC_CMD_NOT_SUPPORTED = 0x0B,
+ IBMVFC_SERVER_NOT_AVAIL = 0x0D,
+ IBMVFC_CMD_IN_PROGRESS = 0x0E,
+ IBMVFC_VENDOR_SPECIFIC = 0xFF,
+};
+
+enum ibmvfc_fc_type {
+ IBMVFC_FABRIC_REJECT = 0x01,
+ IBMVFC_PORT_REJECT = 0x02,
+ IBMVFC_LS_REJECT = 0x03,
+ IBMVFC_FABRIC_BUSY = 0x04,
+ IBMVFC_PORT_BUSY = 0x05,
+ IBMVFC_BASIC_REJECT = 0x06,
+};
+
+enum ibmvfc_gs_explain {
+ IBMVFC_PORT_NAME_NOT_REG = 0x02,
+};
+
+struct ibmvfc_port_login {
+ struct ibmvfc_mad_common common;
+ __be64 scsi_id;
+ __be16 reserved;
+ __be16 fc_service_class;
+ __be32 blksz;
+ __be32 hdr_per_blk;
+ __be16 status;
+ __be16 error; /* also fc_reason */
+ __be16 fc_explain;
+ __be16 fc_type;
+ __be32 reserved2;
+ struct ibmvfc_service_parms service_parms;
+ struct ibmvfc_service_parms service_parms_change;
+ __be64 reserved3[2];
+}__attribute__((packed, aligned (8)));
+
+struct ibmvfc_prli_svc_parms {
+ u8 type;
+#define IBMVFC_SCSI_FCP_TYPE 0x08
+ u8 type_ext;
+ __be16 flags;
+#define IBMVFC_PRLI_ORIG_PA_VALID 0x8000
+#define IBMVFC_PRLI_RESP_PA_VALID 0x4000
+#define IBMVFC_PRLI_EST_IMG_PAIR 0x2000
+ __be32 orig_pa;
+ __be32 resp_pa;
+ __be32 service_parms;
+#define IBMVFC_PRLI_TASK_RETRY 0x00000200
+#define IBMVFC_PRLI_RETRY 0x00000100
+#define IBMVFC_PRLI_DATA_OVERLAY 0x00000040
+#define IBMVFC_PRLI_INITIATOR_FUNC 0x00000020
+#define IBMVFC_PRLI_TARGET_FUNC 0x00000010
+#define IBMVFC_PRLI_READ_FCP_XFER_RDY_DISABLED 0x00000002
+#define IBMVFC_PRLI_WR_FCP_XFER_RDY_DISABLED 0x00000001
+}__attribute__((packed, aligned (4)));
+
+struct ibmvfc_process_login {
+ struct ibmvfc_mad_common common;
+ __be64 scsi_id;
+ struct ibmvfc_prli_svc_parms parms;
+ u8 reserved[48];
+ __be16 status;
+ __be16 error; /* also fc_reason */
+ __be32 reserved2;
+ __be64 reserved3[2];
+}__attribute__((packed, aligned (8)));
+
+struct ibmvfc_query_tgt {
+ struct ibmvfc_mad_common common;
+ __be64 wwpn;
+ __be64 scsi_id;
+ __be16 status;
+ __be16 error;
+ __be16 fc_explain;
+ __be16 fc_type;
+ __be64 reserved[2];
+}__attribute__((packed, aligned (8)));
+
+struct ibmvfc_implicit_logout {
+ struct ibmvfc_mad_common common;
+ __be64 old_scsi_id;
+ __be64 reserved[2];
+}__attribute__((packed, aligned (8)));
+
+struct ibmvfc_tmf {
+ struct ibmvfc_mad_common common;
+ __be64 scsi_id;
+ struct scsi_lun lun;
+ __be32 flags;
+#define IBMVFC_TMF_ABORT_TASK 0x02
+#define IBMVFC_TMF_ABORT_TASK_SET 0x04
+#define IBMVFC_TMF_LUN_RESET 0x10
+#define IBMVFC_TMF_TGT_RESET 0x20
+#define IBMVFC_TMF_LUA_VALID 0x40
+#define IBMVFC_TMF_SUPPRESS_ABTS 0x80
+ __be32 cancel_key;
+ __be32 my_cancel_key;
+ __be32 pad;
+ __be64 reserved[2];
+}__attribute__((packed, aligned (8)));
+
+enum ibmvfc_fcp_rsp_info_codes {
+ RSP_NO_FAILURE = 0x00,
+ RSP_TMF_REJECTED = 0x04,
+ RSP_TMF_FAILED = 0x05,
+ RSP_TMF_INVALID_LUN = 0x09,
+};
+
+struct ibmvfc_fcp_rsp_info {
+ __be16 reserved;
+ u8 rsp_code;
+ u8 reserved2[4];
+}__attribute__((packed, aligned (2)));
+
+enum ibmvfc_fcp_rsp_flags {
+ FCP_BIDI_RSP = 0x80,
+ FCP_BIDI_READ_RESID_UNDER = 0x40,
+ FCP_BIDI_READ_RESID_OVER = 0x20,
+ FCP_CONF_REQ = 0x10,
+ FCP_RESID_UNDER = 0x08,
+ FCP_RESID_OVER = 0x04,
+ FCP_SNS_LEN_VALID = 0x02,
+ FCP_RSP_LEN_VALID = 0x01,
+};
+
+union ibmvfc_fcp_rsp_data {
+ struct ibmvfc_fcp_rsp_info info;
+ u8 sense[SCSI_SENSE_BUFFERSIZE + sizeof(struct ibmvfc_fcp_rsp_info)];
+}__attribute__((packed, aligned (8)));
+
+struct ibmvfc_fcp_rsp {
+ __be64 reserved;
+ __be16 retry_delay_timer;
+ u8 flags;
+ u8 scsi_status;
+ __be32 fcp_resid;
+ __be32 fcp_sense_len;
+ __be32 fcp_rsp_len;
+ union ibmvfc_fcp_rsp_data data;
+}__attribute__((packed, aligned (8)));
+
+enum ibmvfc_cmd_flags {
+ IBMVFC_SCATTERLIST = 0x0001,
+ IBMVFC_NO_MEM_DESC = 0x0002,
+ IBMVFC_READ = 0x0004,
+ IBMVFC_WRITE = 0x0008,
+ IBMVFC_TMF = 0x0080,
+ IBMVFC_CLASS_3_ERR = 0x0100,
+};
+
+enum ibmvfc_fc_task_attr {
+ IBMVFC_SIMPLE_TASK = 0x00,
+ IBMVFC_HEAD_OF_QUEUE = 0x01,
+ IBMVFC_ORDERED_TASK = 0x02,
+ IBMVFC_ACA_TASK = 0x04,
+};
+
+enum ibmvfc_fc_tmf_flags {
+ IBMVFC_ABORT_TASK_SET = 0x02,
+ IBMVFC_LUN_RESET = 0x10,
+ IBMVFC_TARGET_RESET = 0x20,
+};
+
+struct ibmvfc_fcp_cmd_iu {
+ struct scsi_lun lun;
+ u8 crn;
+ u8 pri_task_attr;
+ u8 tmf_flags;
+ u8 add_cdb_len;
+#define IBMVFC_RDDATA 0x02
+#define IBMVFC_WRDATA 0x01
+ u8 cdb[IBMVFC_MAX_CDB_LEN];
+ __be32 xfer_len;
+}__attribute__((packed, aligned (4)));
+
+struct ibmvfc_cmd {
+ __be64 task_tag;
+ __be32 frame_type;
+ __be32 payload_len;
+ __be32 resp_len;
+ __be32 adapter_resid;
+ __be16 status;
+ __be16 error;
+ __be16 flags;
+ __be16 response_flags;
+#define IBMVFC_ADAPTER_RESID_VALID 0x01
+ __be32 cancel_key;
+ __be32 exchange_id;
+ struct srp_direct_buf ext_func;
+ struct srp_direct_buf ioba;
+ struct srp_direct_buf resp;
+ __be64 correlation;
+ __be64 tgt_scsi_id;
+ __be64 tag;
+ __be64 reserved3[2];
+ struct ibmvfc_fcp_cmd_iu iu;
+ struct ibmvfc_fcp_rsp rsp;
+}__attribute__((packed, aligned (8)));
+
+struct ibmvfc_passthru_fc_iu {
+ __be32 payload[7];
+#define IBMVFC_ADISC 0x52000000
+ __be32 response[7];
+};
+
+struct ibmvfc_passthru_iu {
+ __be64 task_tag;
+ __be32 cmd_len;
+ __be32 rsp_len;
+ __be16 status;
+ __be16 error;
+ __be32 flags;
+#define IBMVFC_FC_ELS 0x01
+#define IBMVFC_FC_CT_IU 0x02
+ __be32 cancel_key;
+#define IBMVFC_PASSTHRU_CANCEL_KEY 0x80000000
+#define IBMVFC_INTERNAL_CANCEL_KEY 0x80000001
+ __be32 reserved;
+ struct srp_direct_buf cmd;
+ struct srp_direct_buf rsp;
+ __be64 correlation;
+ __be64 scsi_id;
+ __be64 tag;
+ __be64 reserved2[2];
+}__attribute__((packed, aligned (8)));
+
+struct ibmvfc_passthru_mad {
+ struct ibmvfc_mad_common common;
+ struct srp_direct_buf cmd_ioba;
+ struct ibmvfc_passthru_iu iu;
+ struct ibmvfc_passthru_fc_iu fc_iu;
+}__attribute__((packed, aligned (8)));
+
+struct ibmvfc_trace_start_entry {
+ u32 xfer_len;
+}__attribute__((packed));
+
+struct ibmvfc_trace_end_entry {
+ u16 status;
+ u16 error;
+ u8 fcp_rsp_flags;
+ u8 rsp_code;
+ u8 scsi_status;
+ u8 reserved;
+}__attribute__((packed));
+
+struct ibmvfc_trace_entry {
+ struct ibmvfc_event *evt;
+ u32 time;
+ u32 scsi_id;
+ u32 lun;
+ u8 fmt;
+ u8 op_code;
+ u8 tmf_flags;
+ u8 type;
+#define IBMVFC_TRC_START 0x00
+#define IBMVFC_TRC_END 0xff
+ union {
+ struct ibmvfc_trace_start_entry start;
+ struct ibmvfc_trace_end_entry end;
+ } u;
+}__attribute__((packed, aligned (8)));
+
+enum ibmvfc_crq_formats {
+ IBMVFC_CMD_FORMAT = 0x01,
+ IBMVFC_ASYNC_EVENT = 0x02,
+ IBMVFC_MAD_FORMAT = 0x04,
+};
+
+enum ibmvfc_async_event {
+ IBMVFC_AE_ELS_PLOGI = 0x0001,
+ IBMVFC_AE_ELS_LOGO = 0x0002,
+ IBMVFC_AE_ELS_PRLO = 0x0004,
+ IBMVFC_AE_SCN_NPORT = 0x0008,
+ IBMVFC_AE_SCN_GROUP = 0x0010,
+ IBMVFC_AE_SCN_DOMAIN = 0x0020,
+ IBMVFC_AE_SCN_FABRIC = 0x0040,
+ IBMVFC_AE_LINK_UP = 0x0080,
+ IBMVFC_AE_LINK_DOWN = 0x0100,
+ IBMVFC_AE_LINK_DEAD = 0x0200,
+ IBMVFC_AE_HALT = 0x0400,
+ IBMVFC_AE_RESUME = 0x0800,
+ IBMVFC_AE_ADAPTER_FAILED = 0x1000,
+};
+
+struct ibmvfc_async_desc {
+ const char *desc;
+ enum ibmvfc_async_event ae;
+ int log_level;
+};
+
+struct ibmvfc_crq {
+ volatile u8 valid;
+ volatile u8 format;
+ u8 reserved[6];
+ volatile __be64 ioba;
+}__attribute__((packed, aligned (8)));
+
+struct ibmvfc_crq_queue {
+ struct ibmvfc_crq *msgs;
+ int size, cur;
+ dma_addr_t msg_token;
+};
+
+enum ibmvfc_ae_link_state {
+ IBMVFC_AE_LS_LINK_UP = 0x01,
+ IBMVFC_AE_LS_LINK_BOUNCED = 0x02,
+ IBMVFC_AE_LS_LINK_DOWN = 0x04,
+ IBMVFC_AE_LS_LINK_DEAD = 0x08,
+};
+
+struct ibmvfc_async_crq {
+ volatile u8 valid;
+ u8 link_state;
+ u8 pad[2];
+ __be32 pad2;
+ volatile __be64 event;
+ volatile __be64 scsi_id;
+ volatile __be64 wwpn;
+ volatile __be64 node_name;
+ __be64 reserved;
+}__attribute__((packed, aligned (8)));
+
+struct ibmvfc_async_crq_queue {
+ struct ibmvfc_async_crq *msgs;
+ int size, cur;
+ dma_addr_t msg_token;
+};
+
+union ibmvfc_iu {
+ struct ibmvfc_mad_common mad_common;
+ struct ibmvfc_npiv_login_mad npiv_login;
+ struct ibmvfc_npiv_logout_mad npiv_logout;
+ struct ibmvfc_discover_targets discover_targets;
+ struct ibmvfc_port_login plogi;
+ struct ibmvfc_process_login prli;
+ struct ibmvfc_query_tgt query_tgt;
+ struct ibmvfc_implicit_logout implicit_logout;
+ struct ibmvfc_tmf tmf;
+ struct ibmvfc_cmd cmd;
+ struct ibmvfc_passthru_mad passthru;
+}__attribute__((packed, aligned (8)));
+
+enum ibmvfc_target_action {
+ IBMVFC_TGT_ACTION_NONE = 0,
+ IBMVFC_TGT_ACTION_INIT,
+ IBMVFC_TGT_ACTION_INIT_WAIT,
+ IBMVFC_TGT_ACTION_DEL_RPORT,
+ IBMVFC_TGT_ACTION_DELETED_RPORT,
+};
+
+struct ibmvfc_target {
+ struct list_head queue;
+ struct ibmvfc_host *vhost;
+ u64 scsi_id;
+ u64 new_scsi_id;
+ struct fc_rport *rport;
+ int target_id;
+ enum ibmvfc_target_action action;
+ int need_login;
+ int add_rport;
+ int init_retries;
+ int logo_rcvd;
+ u32 cancel_key;
+ struct ibmvfc_service_parms service_parms;
+ struct ibmvfc_service_parms service_parms_change;
+ struct fc_rport_identifiers ids;
+ void (*job_step) (struct ibmvfc_target *);
+ struct timer_list timer;
+ struct kref kref;
+};
+
+/* a unit of work for the hosting partition */
+struct ibmvfc_event {
+ struct list_head queue;
+ struct ibmvfc_host *vhost;
+ struct ibmvfc_target *tgt;
+ struct scsi_cmnd *cmnd;
+ atomic_t free;
+ union ibmvfc_iu *xfer_iu;
+ void (*done) (struct ibmvfc_event *);
+ struct ibmvfc_crq crq;
+ union ibmvfc_iu iu;
+ union ibmvfc_iu *sync_iu;
+ struct srp_direct_buf *ext_list;
+ dma_addr_t ext_list_token;
+ struct completion comp;
+ struct completion *eh_comp;
+ struct timer_list timer;
+};
+
+/* a pool of event structs for use */
+struct ibmvfc_event_pool {
+ struct ibmvfc_event *events;
+ u32 size;
+ union ibmvfc_iu *iu_storage;
+ dma_addr_t iu_token;
+};
+
+enum ibmvfc_host_action {
+ IBMVFC_HOST_ACTION_NONE = 0,
+ IBMVFC_HOST_ACTION_RESET,
+ IBMVFC_HOST_ACTION_REENABLE,
+ IBMVFC_HOST_ACTION_LOGO,
+ IBMVFC_HOST_ACTION_LOGO_WAIT,
+ IBMVFC_HOST_ACTION_INIT,
+ IBMVFC_HOST_ACTION_INIT_WAIT,
+ IBMVFC_HOST_ACTION_QUERY,
+ IBMVFC_HOST_ACTION_QUERY_TGTS,
+ IBMVFC_HOST_ACTION_TGT_DEL,
+ IBMVFC_HOST_ACTION_ALLOC_TGTS,
+ IBMVFC_HOST_ACTION_TGT_INIT,
+ IBMVFC_HOST_ACTION_TGT_DEL_FAILED,
+};
+
+enum ibmvfc_host_state {
+ IBMVFC_NO_CRQ = 0,
+ IBMVFC_INITIALIZING,
+ IBMVFC_ACTIVE,
+ IBMVFC_HALTED,
+ IBMVFC_LINK_DOWN,
+ IBMVFC_LINK_DEAD,
+ IBMVFC_HOST_OFFLINE,
+};
+
+struct ibmvfc_host {
+ char name[8];
+ struct list_head queue;
+ struct Scsi_Host *host;
+ enum ibmvfc_host_state state;
+ enum ibmvfc_host_action action;
+#define IBMVFC_NUM_TRACE_INDEX_BITS 8
+#define IBMVFC_NUM_TRACE_ENTRIES (1 << IBMVFC_NUM_TRACE_INDEX_BITS)
+#define IBMVFC_TRACE_SIZE (sizeof(struct ibmvfc_trace_entry) * IBMVFC_NUM_TRACE_ENTRIES)
+ struct ibmvfc_trace_entry *trace;
+ u32 trace_index:IBMVFC_NUM_TRACE_INDEX_BITS;
+ int num_targets;
+ struct list_head targets;
+ struct list_head sent;
+ struct list_head free;
+ struct device *dev;
+ struct ibmvfc_event_pool pool;
+ struct dma_pool *sg_pool;
+ mempool_t *tgt_pool;
+ struct ibmvfc_crq_queue crq;
+ struct ibmvfc_async_crq_queue async_crq;
+ struct ibmvfc_npiv_login login_info;
+ union ibmvfc_npiv_login_data *login_buf;
+ dma_addr_t login_buf_dma;
+ int disc_buf_sz;
+ int log_level;
+ struct ibmvfc_discover_targets_buf *disc_buf;
+ struct mutex passthru_mutex;
+ int task_set;
+ int init_retries;
+ int discovery_threads;
+ int abort_threads;
+ int client_migrated;
+ int reinit;
+ int delay_init;
+ int scan_complete;
+ int logged_in;
+ int aborting_passthru;
+ int events_to_log;
+#define IBMVFC_AE_LINKUP 0x0001
+#define IBMVFC_AE_LINKDOWN 0x0002
+#define IBMVFC_AE_RSCN 0x0004
+ dma_addr_t disc_buf_dma;
+ unsigned int partition_number;
+ char partition_name[97];
+ void (*job_step) (struct ibmvfc_host *);
+ struct task_struct *work_thread;
+ struct tasklet_struct tasklet;
+ struct work_struct rport_add_work_q;
+ wait_queue_head_t init_wait_q;
+ wait_queue_head_t work_wait_q;
+};
+
+#define DBG_CMD(CMD) do { if (ibmvfc_debug) CMD; } while (0)
+
+#define tgt_dbg(t, fmt, ...) \
+ DBG_CMD(dev_info((t)->vhost->dev, "%llX: " fmt, (t)->scsi_id, ##__VA_ARGS__))
+
+#define tgt_info(t, fmt, ...) \
+ dev_info((t)->vhost->dev, "%llX: " fmt, (t)->scsi_id, ##__VA_ARGS__)
+
+#define tgt_err(t, fmt, ...) \
+ dev_err((t)->vhost->dev, "%llX: " fmt, (t)->scsi_id, ##__VA_ARGS__)
+
+#define tgt_log(t, level, fmt, ...) \
+ do { \
+ if ((t)->vhost->log_level >= level) \
+ tgt_err(t, fmt, ##__VA_ARGS__); \
+ } while (0)
+
+#define ibmvfc_dbg(vhost, ...) \
+ DBG_CMD(dev_info((vhost)->dev, ##__VA_ARGS__))
+
+#define ibmvfc_log(vhost, level, ...) \
+ do { \
+ if ((vhost)->log_level >= level) \
+ dev_err((vhost)->dev, ##__VA_ARGS__); \
+ } while (0)
+
+#define ENTER DBG_CMD(printk(KERN_INFO IBMVFC_NAME": Entering %s\n", __func__))
+#define LEAVE DBG_CMD(printk(KERN_INFO IBMVFC_NAME": Leaving %s\n", __func__))
+
+#ifdef CONFIG_SCSI_IBMVFC_TRACE
+#define ibmvfc_create_trace_file(kobj, attr) sysfs_create_bin_file(kobj, attr)
+#define ibmvfc_remove_trace_file(kobj, attr) sysfs_remove_bin_file(kobj, attr)
+#else
+#define ibmvfc_create_trace_file(kobj, attr) 0
+#define ibmvfc_remove_trace_file(kobj, attr) do { } while (0)
+#endif
+
+#endif
diff --git a/drivers/scsi/ibmvscsi/ibmvscsi.c b/drivers/scsi/ibmvscsi/ibmvscsi.c
new file mode 100644
index 000000000..acea5d6ee
--- /dev/null
+++ b/drivers/scsi/ibmvscsi/ibmvscsi.c
@@ -0,0 +1,2438 @@
+/* ------------------------------------------------------------
+ * ibmvscsi.c
+ * (C) Copyright IBM Corporation 1994, 2004
+ * Authors: Colin DeVilbiss (devilbis@us.ibm.com)
+ * Santiago Leon (santil@us.ibm.com)
+ * Dave Boutcher (sleddog@us.ibm.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307
+ * USA
+ *
+ * ------------------------------------------------------------
+ * Emulation of a SCSI host adapter for Virtual I/O devices
+ *
+ * This driver supports the SCSI adapter implemented by the IBM
+ * Power5 firmware. That SCSI adapter is not a physical adapter,
+ * but allows Linux SCSI peripheral drivers to directly
+ * access devices in another logical partition on the physical system.
+ *
+ * The virtual adapter(s) are present in the open firmware device
+ * tree just like real adapters.
+ *
+ * One of the capabilities provided on these systems is the ability
+ * to DMA between partitions. The architecture states that for VSCSI,
+ * the server side is allowed to DMA to and from the client. The client
+ * is never trusted to DMA to or from the server directly.
+ *
+ * Messages are sent between partitions on a "Command/Response Queue"
+ * (CRQ), which is just a buffer of 16 byte entries in the receiver's
+ * Senders cannot access the buffer directly, but send messages by
+ * making a hypervisor call and passing in the 16 bytes. The hypervisor
+ * puts the message in the next 16 byte space in round-robin fashion,
+ * turns on the high order bit of the message (the valid bit), and
+ * generates an interrupt to the receiver (if interrupts are turned on.)
+ * The receiver just turns off the valid bit when they have copied out
+ * the message.
+ *
+ * The VSCSI client builds a SCSI Remote Protocol (SRP) Information Unit
+ * (IU) (as defined in the T10 standard available at www.t10.org), gets
+ * a DMA address for the message, and sends it to the server as the
+ * payload of a CRQ message. The server DMAs the SRP IU and processes it,
+ * including doing any additional data transfers. When it is done, it
+ * DMAs the SRP response back to the same address as the request came from,
+ * and sends a CRQ message back to inform the client that the request has
+ * completed.
+ *
+ * TODO: This is currently pretty tied to the IBM pSeries hypervisor
+ * interfaces. It would be really nice to abstract this above an RDMA
+ * layer.
+ */
+
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/dma-mapping.h>
+#include <linux/delay.h>
+#include <linux/slab.h>
+#include <linux/of.h>
+#include <linux/pm.h>
+#include <linux/kthread.h>
+#include <asm/firmware.h>
+#include <asm/vio.h>
+#include <scsi/scsi.h>
+#include <scsi/scsi_cmnd.h>
+#include <scsi/scsi_host.h>
+#include <scsi/scsi_device.h>
+#include <scsi/scsi_transport_srp.h>
+#include "ibmvscsi.h"
+
+/* The values below are somewhat arbitrary default values, but
+ * OS/400 will use 3 busses (disks, CDs, tapes, I think.)
+ * Note that there are 3 bits of channel value, 6 bits of id, and
+ * 5 bits of LUN.
+ */
+static int max_id = 64;
+static int max_channel = 3;
+static int init_timeout = 300;
+static int login_timeout = 60;
+static int info_timeout = 30;
+static int abort_timeout = 60;
+static int reset_timeout = 60;
+static int max_requests = IBMVSCSI_MAX_REQUESTS_DEFAULT;
+static int max_events = IBMVSCSI_MAX_REQUESTS_DEFAULT + 2;
+static int fast_fail = 1;
+static int client_reserve = 1;
+static char partition_name[97] = "UNKNOWN";
+static unsigned int partition_number = -1;
+
+static struct scsi_transport_template *ibmvscsi_transport_template;
+
+#define IBMVSCSI_VERSION "1.5.9"
+
+MODULE_DESCRIPTION("IBM Virtual SCSI");
+MODULE_AUTHOR("Dave Boutcher");
+MODULE_LICENSE("GPL");
+MODULE_VERSION(IBMVSCSI_VERSION);
+
+module_param_named(max_id, max_id, int, S_IRUGO | S_IWUSR);
+MODULE_PARM_DESC(max_id, "Largest ID value for each channel");
+module_param_named(max_channel, max_channel, int, S_IRUGO | S_IWUSR);
+MODULE_PARM_DESC(max_channel, "Largest channel value");
+module_param_named(init_timeout, init_timeout, int, S_IRUGO | S_IWUSR);
+MODULE_PARM_DESC(init_timeout, "Initialization timeout in seconds");
+module_param_named(max_requests, max_requests, int, S_IRUGO);
+MODULE_PARM_DESC(max_requests, "Maximum requests for this adapter");
+module_param_named(fast_fail, fast_fail, int, S_IRUGO | S_IWUSR);
+MODULE_PARM_DESC(fast_fail, "Enable fast fail. [Default=1]");
+module_param_named(client_reserve, client_reserve, int, S_IRUGO );
+MODULE_PARM_DESC(client_reserve, "Attempt client managed reserve/release");
+
+static void ibmvscsi_handle_crq(struct viosrp_crq *crq,
+ struct ibmvscsi_host_data *hostdata);
+
+/* ------------------------------------------------------------
+ * Routines for managing the command/response queue
+ */
+/**
+ * ibmvscsi_handle_event: - Interrupt handler for crq events
+ * @irq: number of irq to handle, not used
+ * @dev_instance: ibmvscsi_host_data of host that received interrupt
+ *
+ * Disables interrupts and schedules srp_task
+ * Always returns IRQ_HANDLED
+ */
+static irqreturn_t ibmvscsi_handle_event(int irq, void *dev_instance)
+{
+ struct ibmvscsi_host_data *hostdata =
+ (struct ibmvscsi_host_data *)dev_instance;
+ vio_disable_interrupts(to_vio_dev(hostdata->dev));
+ tasklet_schedule(&hostdata->srp_task);
+ return IRQ_HANDLED;
+}
+
+/**
+ * release_crq_queue: - Deallocates data and unregisters CRQ
+ * @queue: crq_queue to initialize and register
+ * @host_data: ibmvscsi_host_data of host
+ *
+ * Frees irq, deallocates a page for messages, unmaps dma, and unregisters
+ * the crq with the hypervisor.
+ */
+static void ibmvscsi_release_crq_queue(struct crq_queue *queue,
+ struct ibmvscsi_host_data *hostdata,
+ int max_requests)
+{
+ long rc = 0;
+ struct vio_dev *vdev = to_vio_dev(hostdata->dev);
+ free_irq(vdev->irq, (void *)hostdata);
+ tasklet_kill(&hostdata->srp_task);
+ do {
+ if (rc)
+ msleep(100);
+ rc = plpar_hcall_norets(H_FREE_CRQ, vdev->unit_address);
+ } while ((rc == H_BUSY) || (H_IS_LONG_BUSY(rc)));
+ dma_unmap_single(hostdata->dev,
+ queue->msg_token,
+ queue->size * sizeof(*queue->msgs), DMA_BIDIRECTIONAL);
+ free_page((unsigned long)queue->msgs);
+}
+
+/**
+ * crq_queue_next_crq: - Returns the next entry in message queue
+ * @queue: crq_queue to use
+ *
+ * Returns pointer to next entry in queue, or NULL if there are no new
+ * entried in the CRQ.
+ */
+static struct viosrp_crq *crq_queue_next_crq(struct crq_queue *queue)
+{
+ struct viosrp_crq *crq;
+ unsigned long flags;
+
+ spin_lock_irqsave(&queue->lock, flags);
+ crq = &queue->msgs[queue->cur];
+ if (crq->valid & 0x80) {
+ if (++queue->cur == queue->size)
+ queue->cur = 0;
+
+ /* Ensure the read of the valid bit occurs before reading any
+ * other bits of the CRQ entry
+ */
+ rmb();
+ } else
+ crq = NULL;
+ spin_unlock_irqrestore(&queue->lock, flags);
+
+ return crq;
+}
+
+/**
+ * ibmvscsi_send_crq: - Send a CRQ
+ * @hostdata: the adapter
+ * @word1: the first 64 bits of the data
+ * @word2: the second 64 bits of the data
+ */
+static int ibmvscsi_send_crq(struct ibmvscsi_host_data *hostdata,
+ u64 word1, u64 word2)
+{
+ struct vio_dev *vdev = to_vio_dev(hostdata->dev);
+
+ /*
+ * Ensure the command buffer is flushed to memory before handing it
+ * over to the VIOS to prevent it from fetching any stale data.
+ */
+ mb();
+ return plpar_hcall_norets(H_SEND_CRQ, vdev->unit_address, word1, word2);
+}
+
+/**
+ * ibmvscsi_task: - Process srps asynchronously
+ * @data: ibmvscsi_host_data of host
+ */
+static void ibmvscsi_task(void *data)
+{
+ struct ibmvscsi_host_data *hostdata = (struct ibmvscsi_host_data *)data;
+ struct vio_dev *vdev = to_vio_dev(hostdata->dev);
+ struct viosrp_crq *crq;
+ int done = 0;
+
+ while (!done) {
+ /* Pull all the valid messages off the CRQ */
+ while ((crq = crq_queue_next_crq(&hostdata->queue)) != NULL) {
+ ibmvscsi_handle_crq(crq, hostdata);
+ crq->valid = 0x00;
+ }
+
+ vio_enable_interrupts(vdev);
+ crq = crq_queue_next_crq(&hostdata->queue);
+ if (crq != NULL) {
+ vio_disable_interrupts(vdev);
+ ibmvscsi_handle_crq(crq, hostdata);
+ crq->valid = 0x00;
+ } else {
+ done = 1;
+ }
+ }
+}
+
+static void gather_partition_info(void)
+{
+ struct device_node *rootdn;
+
+ const char *ppartition_name;
+ const __be32 *p_number_ptr;
+
+ /* Retrieve information about this partition */
+ rootdn = of_find_node_by_path("/");
+ if (!rootdn) {
+ return;
+ }
+
+ ppartition_name = of_get_property(rootdn, "ibm,partition-name", NULL);
+ if (ppartition_name)
+ strncpy(partition_name, ppartition_name,
+ sizeof(partition_name));
+ p_number_ptr = of_get_property(rootdn, "ibm,partition-no", NULL);
+ if (p_number_ptr)
+ partition_number = of_read_number(p_number_ptr, 1);
+ of_node_put(rootdn);
+}
+
+static void set_adapter_info(struct ibmvscsi_host_data *hostdata)
+{
+ memset(&hostdata->madapter_info, 0x00,
+ sizeof(hostdata->madapter_info));
+
+ dev_info(hostdata->dev, "SRP_VERSION: %s\n", SRP_VERSION);
+ strcpy(hostdata->madapter_info.srp_version, SRP_VERSION);
+
+ strncpy(hostdata->madapter_info.partition_name, partition_name,
+ sizeof(hostdata->madapter_info.partition_name));
+
+ hostdata->madapter_info.partition_number =
+ cpu_to_be32(partition_number);
+
+ hostdata->madapter_info.mad_version = cpu_to_be32(1);
+ hostdata->madapter_info.os_type = cpu_to_be32(2);
+}
+
+/**
+ * reset_crq_queue: - resets a crq after a failure
+ * @queue: crq_queue to initialize and register
+ * @hostdata: ibmvscsi_host_data of host
+ *
+ */
+static int ibmvscsi_reset_crq_queue(struct crq_queue *queue,
+ struct ibmvscsi_host_data *hostdata)
+{
+ int rc = 0;
+ struct vio_dev *vdev = to_vio_dev(hostdata->dev);
+
+ /* Close the CRQ */
+ do {
+ if (rc)
+ msleep(100);
+ rc = plpar_hcall_norets(H_FREE_CRQ, vdev->unit_address);
+ } while ((rc == H_BUSY) || (H_IS_LONG_BUSY(rc)));
+
+ /* Clean out the queue */
+ memset(queue->msgs, 0x00, PAGE_SIZE);
+ queue->cur = 0;
+
+ set_adapter_info(hostdata);
+
+ /* And re-open it again */
+ rc = plpar_hcall_norets(H_REG_CRQ,
+ vdev->unit_address,
+ queue->msg_token, PAGE_SIZE);
+ if (rc == 2) {
+ /* Adapter is good, but other end is not ready */
+ dev_warn(hostdata->dev, "Partner adapter not ready\n");
+ } else if (rc != 0) {
+ dev_warn(hostdata->dev, "couldn't register crq--rc 0x%x\n", rc);
+ }
+ return rc;
+}
+
+/**
+ * initialize_crq_queue: - Initializes and registers CRQ with hypervisor
+ * @queue: crq_queue to initialize and register
+ * @hostdata: ibmvscsi_host_data of host
+ *
+ * Allocates a page for messages, maps it for dma, and registers
+ * the crq with the hypervisor.
+ * Returns zero on success.
+ */
+static int ibmvscsi_init_crq_queue(struct crq_queue *queue,
+ struct ibmvscsi_host_data *hostdata,
+ int max_requests)
+{
+ int rc;
+ int retrc;
+ struct vio_dev *vdev = to_vio_dev(hostdata->dev);
+
+ queue->msgs = (struct viosrp_crq *)get_zeroed_page(GFP_KERNEL);
+
+ if (!queue->msgs)
+ goto malloc_failed;
+ queue->size = PAGE_SIZE / sizeof(*queue->msgs);
+
+ queue->msg_token = dma_map_single(hostdata->dev, queue->msgs,
+ queue->size * sizeof(*queue->msgs),
+ DMA_BIDIRECTIONAL);
+
+ if (dma_mapping_error(hostdata->dev, queue->msg_token))
+ goto map_failed;
+
+ gather_partition_info();
+ set_adapter_info(hostdata);
+
+ retrc = rc = plpar_hcall_norets(H_REG_CRQ,
+ vdev->unit_address,
+ queue->msg_token, PAGE_SIZE);
+ if (rc == H_RESOURCE)
+ /* maybe kexecing and resource is busy. try a reset */
+ rc = ibmvscsi_reset_crq_queue(queue,
+ hostdata);
+
+ if (rc == 2) {
+ /* Adapter is good, but other end is not ready */
+ dev_warn(hostdata->dev, "Partner adapter not ready\n");
+ retrc = 0;
+ } else if (rc != 0) {
+ dev_warn(hostdata->dev, "Error %d opening adapter\n", rc);
+ goto reg_crq_failed;
+ }
+
+ queue->cur = 0;
+ spin_lock_init(&queue->lock);
+
+ tasklet_init(&hostdata->srp_task, (void *)ibmvscsi_task,
+ (unsigned long)hostdata);
+
+ if (request_irq(vdev->irq,
+ ibmvscsi_handle_event,
+ 0, "ibmvscsi", (void *)hostdata) != 0) {
+ dev_err(hostdata->dev, "couldn't register irq 0x%x\n",
+ vdev->irq);
+ goto req_irq_failed;
+ }
+
+ rc = vio_enable_interrupts(vdev);
+ if (rc != 0) {
+ dev_err(hostdata->dev, "Error %d enabling interrupts!!!\n", rc);
+ goto req_irq_failed;
+ }
+
+ return retrc;
+
+ req_irq_failed:
+ tasklet_kill(&hostdata->srp_task);
+ rc = 0;
+ do {
+ if (rc)
+ msleep(100);
+ rc = plpar_hcall_norets(H_FREE_CRQ, vdev->unit_address);
+ } while ((rc == H_BUSY) || (H_IS_LONG_BUSY(rc)));
+ reg_crq_failed:
+ dma_unmap_single(hostdata->dev,
+ queue->msg_token,
+ queue->size * sizeof(*queue->msgs), DMA_BIDIRECTIONAL);
+ map_failed:
+ free_page((unsigned long)queue->msgs);
+ malloc_failed:
+ return -1;
+}
+
+/**
+ * reenable_crq_queue: - reenables a crq after
+ * @queue: crq_queue to initialize and register
+ * @hostdata: ibmvscsi_host_data of host
+ *
+ */
+static int ibmvscsi_reenable_crq_queue(struct crq_queue *queue,
+ struct ibmvscsi_host_data *hostdata)
+{
+ int rc = 0;
+ struct vio_dev *vdev = to_vio_dev(hostdata->dev);
+
+ /* Re-enable the CRQ */
+ do {
+ if (rc)
+ msleep(100);
+ rc = plpar_hcall_norets(H_ENABLE_CRQ, vdev->unit_address);
+ } while ((rc == H_IN_PROGRESS) || (rc == H_BUSY) || (H_IS_LONG_BUSY(rc)));
+
+ if (rc)
+ dev_err(hostdata->dev, "Error %d enabling adapter\n", rc);
+ return rc;
+}
+
+/* ------------------------------------------------------------
+ * Routines for the event pool and event structs
+ */
+/**
+ * initialize_event_pool: - Allocates and initializes the event pool for a host
+ * @pool: event_pool to be initialized
+ * @size: Number of events in pool
+ * @hostdata: ibmvscsi_host_data who owns the event pool
+ *
+ * Returns zero on success.
+*/
+static int initialize_event_pool(struct event_pool *pool,
+ int size, struct ibmvscsi_host_data *hostdata)
+{
+ int i;
+
+ pool->size = size;
+ pool->next = 0;
+ pool->events = kcalloc(pool->size, sizeof(*pool->events), GFP_KERNEL);
+ if (!pool->events)
+ return -ENOMEM;
+
+ pool->iu_storage =
+ dma_alloc_coherent(hostdata->dev,
+ pool->size * sizeof(*pool->iu_storage),
+ &pool->iu_token, 0);
+ if (!pool->iu_storage) {
+ kfree(pool->events);
+ return -ENOMEM;
+ }
+
+ for (i = 0; i < pool->size; ++i) {
+ struct srp_event_struct *evt = &pool->events[i];
+ memset(&evt->crq, 0x00, sizeof(evt->crq));
+ atomic_set(&evt->free, 1);
+ evt->crq.valid = 0x80;
+ evt->crq.IU_length = cpu_to_be16(sizeof(*evt->xfer_iu));
+ evt->crq.IU_data_ptr = cpu_to_be64(pool->iu_token +
+ sizeof(*evt->xfer_iu) * i);
+ evt->xfer_iu = pool->iu_storage + i;
+ evt->hostdata = hostdata;
+ evt->ext_list = NULL;
+ evt->ext_list_token = 0;
+ }
+
+ return 0;
+}
+
+/**
+ * release_event_pool: - Frees memory of an event pool of a host
+ * @pool: event_pool to be released
+ * @hostdata: ibmvscsi_host_data who owns the even pool
+ *
+ * Returns zero on success.
+*/
+static void release_event_pool(struct event_pool *pool,
+ struct ibmvscsi_host_data *hostdata)
+{
+ int i, in_use = 0;
+ for (i = 0; i < pool->size; ++i) {
+ if (atomic_read(&pool->events[i].free) != 1)
+ ++in_use;
+ if (pool->events[i].ext_list) {
+ dma_free_coherent(hostdata->dev,
+ SG_ALL * sizeof(struct srp_direct_buf),
+ pool->events[i].ext_list,
+ pool->events[i].ext_list_token);
+ }
+ }
+ if (in_use)
+ dev_warn(hostdata->dev, "releasing event pool with %d "
+ "events still in use?\n", in_use);
+ kfree(pool->events);
+ dma_free_coherent(hostdata->dev,
+ pool->size * sizeof(*pool->iu_storage),
+ pool->iu_storage, pool->iu_token);
+}
+
+/**
+ * valid_event_struct: - Determines if event is valid.
+ * @pool: event_pool that contains the event
+ * @evt: srp_event_struct to be checked for validity
+ *
+ * Returns zero if event is invalid, one otherwise.
+*/
+static int valid_event_struct(struct event_pool *pool,
+ struct srp_event_struct *evt)
+{
+ int index = evt - pool->events;
+ if (index < 0 || index >= pool->size) /* outside of bounds */
+ return 0;
+ if (evt != pool->events + index) /* unaligned */
+ return 0;
+ return 1;
+}
+
+/**
+ * ibmvscsi_free-event_struct: - Changes status of event to "free"
+ * @pool: event_pool that contains the event
+ * @evt: srp_event_struct to be modified
+ *
+*/
+static void free_event_struct(struct event_pool *pool,
+ struct srp_event_struct *evt)
+{
+ if (!valid_event_struct(pool, evt)) {
+ dev_err(evt->hostdata->dev, "Freeing invalid event_struct %p "
+ "(not in pool %p)\n", evt, pool->events);
+ return;
+ }
+ if (atomic_inc_return(&evt->free) != 1) {
+ dev_err(evt->hostdata->dev, "Freeing event_struct %p "
+ "which is not in use!\n", evt);
+ return;
+ }
+}
+
+/**
+ * get_evt_struct: - Gets the next free event in pool
+ * @pool: event_pool that contains the events to be searched
+ *
+ * Returns the next event in "free" state, and NULL if none are free.
+ * Note that no synchronization is done here, we assume the host_lock
+ * will syncrhonze things.
+*/
+static struct srp_event_struct *get_event_struct(struct event_pool *pool)
+{
+ int i;
+ int poolsize = pool->size;
+ int offset = pool->next;
+
+ for (i = 0; i < poolsize; i++) {
+ offset = (offset + 1) % poolsize;
+ if (!atomic_dec_if_positive(&pool->events[offset].free)) {
+ pool->next = offset;
+ return &pool->events[offset];
+ }
+ }
+
+ printk(KERN_ERR "ibmvscsi: found no event struct in pool!\n");
+ return NULL;
+}
+
+/**
+ * init_event_struct: Initialize fields in an event struct that are always
+ * required.
+ * @evt: The event
+ * @done: Routine to call when the event is responded to
+ * @format: SRP or MAD format
+ * @timeout: timeout value set in the CRQ
+ */
+static void init_event_struct(struct srp_event_struct *evt_struct,
+ void (*done) (struct srp_event_struct *),
+ u8 format,
+ int timeout)
+{
+ evt_struct->cmnd = NULL;
+ evt_struct->cmnd_done = NULL;
+ evt_struct->sync_srp = NULL;
+ evt_struct->crq.format = format;
+ evt_struct->crq.timeout = cpu_to_be16(timeout);
+ evt_struct->done = done;
+}
+
+/* ------------------------------------------------------------
+ * Routines for receiving SCSI responses from the hosting partition
+ */
+
+/**
+ * set_srp_direction: Set the fields in the srp related to data
+ * direction and number of buffers based on the direction in
+ * the scsi_cmnd and the number of buffers
+ */
+static void set_srp_direction(struct scsi_cmnd *cmd,
+ struct srp_cmd *srp_cmd,
+ int numbuf)
+{
+ u8 fmt;
+
+ if (numbuf == 0)
+ return;
+
+ if (numbuf == 1)
+ fmt = SRP_DATA_DESC_DIRECT;
+ else {
+ fmt = SRP_DATA_DESC_INDIRECT;
+ numbuf = min(numbuf, MAX_INDIRECT_BUFS);
+
+ if (cmd->sc_data_direction == DMA_TO_DEVICE)
+ srp_cmd->data_out_desc_cnt = numbuf;
+ else
+ srp_cmd->data_in_desc_cnt = numbuf;
+ }
+
+ if (cmd->sc_data_direction == DMA_TO_DEVICE)
+ srp_cmd->buf_fmt = fmt << 4;
+ else
+ srp_cmd->buf_fmt = fmt;
+}
+
+/**
+ * unmap_cmd_data: - Unmap data pointed in srp_cmd based on the format
+ * @cmd: srp_cmd whose additional_data member will be unmapped
+ * @dev: device for which the memory is mapped
+ *
+*/
+static void unmap_cmd_data(struct srp_cmd *cmd,
+ struct srp_event_struct *evt_struct,
+ struct device *dev)
+{
+ u8 out_fmt, in_fmt;
+
+ out_fmt = cmd->buf_fmt >> 4;
+ in_fmt = cmd->buf_fmt & ((1U << 4) - 1);
+
+ if (out_fmt == SRP_NO_DATA_DESC && in_fmt == SRP_NO_DATA_DESC)
+ return;
+
+ if (evt_struct->cmnd)
+ scsi_dma_unmap(evt_struct->cmnd);
+}
+
+static int map_sg_list(struct scsi_cmnd *cmd, int nseg,
+ struct srp_direct_buf *md)
+{
+ int i;
+ struct scatterlist *sg;
+ u64 total_length = 0;
+
+ scsi_for_each_sg(cmd, sg, nseg, i) {
+ struct srp_direct_buf *descr = md + i;
+ descr->va = cpu_to_be64(sg_dma_address(sg));
+ descr->len = cpu_to_be32(sg_dma_len(sg));
+ descr->key = 0;
+ total_length += sg_dma_len(sg);
+ }
+ return total_length;
+}
+
+/**
+ * map_sg_data: - Maps dma for a scatterlist and initializes decriptor fields
+ * @cmd: Scsi_Cmnd with the scatterlist
+ * @srp_cmd: srp_cmd that contains the memory descriptor
+ * @dev: device for which to map dma memory
+ *
+ * Called by map_data_for_srp_cmd() when building srp cmd from scsi cmd.
+ * Returns 1 on success.
+*/
+static int map_sg_data(struct scsi_cmnd *cmd,
+ struct srp_event_struct *evt_struct,
+ struct srp_cmd *srp_cmd, struct device *dev)
+{
+
+ int sg_mapped;
+ u64 total_length = 0;
+ struct srp_direct_buf *data =
+ (struct srp_direct_buf *) srp_cmd->add_data;
+ struct srp_indirect_buf *indirect =
+ (struct srp_indirect_buf *) data;
+
+ sg_mapped = scsi_dma_map(cmd);
+ if (!sg_mapped)
+ return 1;
+ else if (sg_mapped < 0)
+ return 0;
+
+ set_srp_direction(cmd, srp_cmd, sg_mapped);
+
+ /* special case; we can use a single direct descriptor */
+ if (sg_mapped == 1) {
+ map_sg_list(cmd, sg_mapped, data);
+ return 1;
+ }
+
+ indirect->table_desc.va = 0;
+ indirect->table_desc.len = cpu_to_be32(sg_mapped *
+ sizeof(struct srp_direct_buf));
+ indirect->table_desc.key = 0;
+
+ if (sg_mapped <= MAX_INDIRECT_BUFS) {
+ total_length = map_sg_list(cmd, sg_mapped,
+ &indirect->desc_list[0]);
+ indirect->len = cpu_to_be32(total_length);
+ return 1;
+ }
+
+ /* get indirect table */
+ if (!evt_struct->ext_list) {
+ evt_struct->ext_list = (struct srp_direct_buf *)
+ dma_alloc_coherent(dev,
+ SG_ALL * sizeof(struct srp_direct_buf),
+ &evt_struct->ext_list_token, 0);
+ if (!evt_struct->ext_list) {
+ if (!firmware_has_feature(FW_FEATURE_CMO))
+ sdev_printk(KERN_ERR, cmd->device,
+ "Can't allocate memory "
+ "for indirect table\n");
+ scsi_dma_unmap(cmd);
+ return 0;
+ }
+ }
+
+ total_length = map_sg_list(cmd, sg_mapped, evt_struct->ext_list);
+
+ indirect->len = cpu_to_be32(total_length);
+ indirect->table_desc.va = cpu_to_be64(evt_struct->ext_list_token);
+ indirect->table_desc.len = cpu_to_be32(sg_mapped *
+ sizeof(indirect->desc_list[0]));
+ memcpy(indirect->desc_list, evt_struct->ext_list,
+ MAX_INDIRECT_BUFS * sizeof(struct srp_direct_buf));
+ return 1;
+}
+
+/**
+ * map_data_for_srp_cmd: - Calls functions to map data for srp cmds
+ * @cmd: struct scsi_cmnd with the memory to be mapped
+ * @srp_cmd: srp_cmd that contains the memory descriptor
+ * @dev: dma device for which to map dma memory
+ *
+ * Called by scsi_cmd_to_srp_cmd() when converting scsi cmds to srp cmds
+ * Returns 1 on success.
+*/
+static int map_data_for_srp_cmd(struct scsi_cmnd *cmd,
+ struct srp_event_struct *evt_struct,
+ struct srp_cmd *srp_cmd, struct device *dev)
+{
+ switch (cmd->sc_data_direction) {
+ case DMA_FROM_DEVICE:
+ case DMA_TO_DEVICE:
+ break;
+ case DMA_NONE:
+ return 1;
+ case DMA_BIDIRECTIONAL:
+ sdev_printk(KERN_ERR, cmd->device,
+ "Can't map DMA_BIDIRECTIONAL to read/write\n");
+ return 0;
+ default:
+ sdev_printk(KERN_ERR, cmd->device,
+ "Unknown data direction 0x%02x; can't map!\n",
+ cmd->sc_data_direction);
+ return 0;
+ }
+
+ return map_sg_data(cmd, evt_struct, srp_cmd, dev);
+}
+
+/**
+ * purge_requests: Our virtual adapter just shut down. purge any sent requests
+ * @hostdata: the adapter
+ */
+static void purge_requests(struct ibmvscsi_host_data *hostdata, int error_code)
+{
+ struct srp_event_struct *evt;
+ unsigned long flags;
+
+ spin_lock_irqsave(hostdata->host->host_lock, flags);
+ while (!list_empty(&hostdata->sent)) {
+ evt = list_first_entry(&hostdata->sent, struct srp_event_struct, list);
+ list_del(&evt->list);
+ del_timer(&evt->timer);
+
+ spin_unlock_irqrestore(hostdata->host->host_lock, flags);
+ if (evt->cmnd) {
+ evt->cmnd->result = (error_code << 16);
+ unmap_cmd_data(&evt->iu.srp.cmd, evt,
+ evt->hostdata->dev);
+ if (evt->cmnd_done)
+ evt->cmnd_done(evt->cmnd);
+ } else if (evt->done && evt->crq.format != VIOSRP_MAD_FORMAT &&
+ evt->iu.srp.login_req.opcode != SRP_LOGIN_REQ)
+ evt->done(evt);
+ free_event_struct(&evt->hostdata->pool, evt);
+ spin_lock_irqsave(hostdata->host->host_lock, flags);
+ }
+ spin_unlock_irqrestore(hostdata->host->host_lock, flags);
+}
+
+/**
+ * ibmvscsi_reset_host - Reset the connection to the server
+ * @hostdata: struct ibmvscsi_host_data to reset
+*/
+static void ibmvscsi_reset_host(struct ibmvscsi_host_data *hostdata)
+{
+ scsi_block_requests(hostdata->host);
+ atomic_set(&hostdata->request_limit, 0);
+
+ purge_requests(hostdata, DID_ERROR);
+ hostdata->reset_crq = 1;
+ wake_up(&hostdata->work_wait_q);
+}
+
+/**
+ * ibmvscsi_timeout - Internal command timeout handler
+ * @evt_struct: struct srp_event_struct that timed out
+ *
+ * Called when an internally generated command times out
+*/
+static void ibmvscsi_timeout(struct srp_event_struct *evt_struct)
+{
+ struct ibmvscsi_host_data *hostdata = evt_struct->hostdata;
+
+ dev_err(hostdata->dev, "Command timed out (%x). Resetting connection\n",
+ evt_struct->iu.srp.cmd.opcode);
+
+ ibmvscsi_reset_host(hostdata);
+}
+
+
+/* ------------------------------------------------------------
+ * Routines for sending and receiving SRPs
+ */
+/**
+ * ibmvscsi_send_srp_event: - Transforms event to u64 array and calls send_crq()
+ * @evt_struct: evt_struct to be sent
+ * @hostdata: ibmvscsi_host_data of host
+ * @timeout: timeout in seconds - 0 means do not time command
+ *
+ * Returns the value returned from ibmvscsi_send_crq(). (Zero for success)
+ * Note that this routine assumes that host_lock is held for synchronization
+*/
+static int ibmvscsi_send_srp_event(struct srp_event_struct *evt_struct,
+ struct ibmvscsi_host_data *hostdata,
+ unsigned long timeout)
+{
+ __be64 *crq_as_u64 = (__be64 *)&evt_struct->crq;
+ int request_status = 0;
+ int rc;
+ int srp_req = 0;
+
+ /* If we have exhausted our request limit, just fail this request,
+ * unless it is for a reset or abort.
+ * Note that there are rare cases involving driver generated requests
+ * (such as task management requests) that the mid layer may think we
+ * can handle more requests (can_queue) when we actually can't
+ */
+ if (evt_struct->crq.format == VIOSRP_SRP_FORMAT) {
+ srp_req = 1;
+ request_status =
+ atomic_dec_if_positive(&hostdata->request_limit);
+ /* If request limit was -1 when we started, it is now even
+ * less than that
+ */
+ if (request_status < -1)
+ goto send_error;
+ /* Otherwise, we may have run out of requests. */
+ /* If request limit was 0 when we started the adapter is in the
+ * process of performing a login with the server adapter, or
+ * we may have run out of requests.
+ */
+ else if (request_status == -1 &&
+ evt_struct->iu.srp.login_req.opcode != SRP_LOGIN_REQ)
+ goto send_busy;
+ /* Abort and reset calls should make it through.
+ * Nothing except abort and reset should use the last two
+ * slots unless we had two or less to begin with.
+ */
+ else if (request_status < 2 &&
+ evt_struct->iu.srp.cmd.opcode != SRP_TSK_MGMT) {
+ /* In the case that we have less than two requests
+ * available, check the server limit as a combination
+ * of the request limit and the number of requests
+ * in-flight (the size of the send list). If the
+ * server limit is greater than 2, return busy so
+ * that the last two are reserved for reset and abort.
+ */
+ int server_limit = request_status;
+ struct srp_event_struct *tmp_evt;
+
+ list_for_each_entry(tmp_evt, &hostdata->sent, list) {
+ server_limit++;
+ }
+
+ if (server_limit > 2)
+ goto send_busy;
+ }
+ }
+
+ /* Copy the IU into the transfer area */
+ *evt_struct->xfer_iu = evt_struct->iu;
+ evt_struct->xfer_iu->srp.rsp.tag = (u64)evt_struct;
+
+ /* Add this to the sent list. We need to do this
+ * before we actually send
+ * in case it comes back REALLY fast
+ */
+ list_add_tail(&evt_struct->list, &hostdata->sent);
+
+ init_timer(&evt_struct->timer);
+ if (timeout) {
+ evt_struct->timer.data = (unsigned long) evt_struct;
+ evt_struct->timer.expires = jiffies + (timeout * HZ);
+ evt_struct->timer.function = (void (*)(unsigned long))ibmvscsi_timeout;
+ add_timer(&evt_struct->timer);
+ }
+
+ rc = ibmvscsi_send_crq(hostdata, be64_to_cpu(crq_as_u64[0]),
+ be64_to_cpu(crq_as_u64[1]));
+ if (rc != 0) {
+ list_del(&evt_struct->list);
+ del_timer(&evt_struct->timer);
+
+ /* If send_crq returns H_CLOSED, return SCSI_MLQUEUE_HOST_BUSY.
+ * Firmware will send a CRQ with a transport event (0xFF) to
+ * tell this client what has happened to the transport. This
+ * will be handled in ibmvscsi_handle_crq()
+ */
+ if (rc == H_CLOSED) {
+ dev_warn(hostdata->dev, "send warning. "
+ "Receive queue closed, will retry.\n");
+ goto send_busy;
+ }
+ dev_err(hostdata->dev, "send error %d\n", rc);
+ if (srp_req)
+ atomic_inc(&hostdata->request_limit);
+ goto send_error;
+ }
+
+ return 0;
+
+ send_busy:
+ unmap_cmd_data(&evt_struct->iu.srp.cmd, evt_struct, hostdata->dev);
+
+ free_event_struct(&hostdata->pool, evt_struct);
+ if (srp_req && request_status != -1)
+ atomic_inc(&hostdata->request_limit);
+ return SCSI_MLQUEUE_HOST_BUSY;
+
+ send_error:
+ unmap_cmd_data(&evt_struct->iu.srp.cmd, evt_struct, hostdata->dev);
+
+ if (evt_struct->cmnd != NULL) {
+ evt_struct->cmnd->result = DID_ERROR << 16;
+ evt_struct->cmnd_done(evt_struct->cmnd);
+ } else if (evt_struct->done)
+ evt_struct->done(evt_struct);
+
+ free_event_struct(&hostdata->pool, evt_struct);
+ return 0;
+}
+
+/**
+ * handle_cmd_rsp: - Handle responses from commands
+ * @evt_struct: srp_event_struct to be handled
+ *
+ * Used as a callback by when sending scsi cmds.
+ * Gets called by ibmvscsi_handle_crq()
+*/
+static void handle_cmd_rsp(struct srp_event_struct *evt_struct)
+{
+ struct srp_rsp *rsp = &evt_struct->xfer_iu->srp.rsp;
+ struct scsi_cmnd *cmnd = evt_struct->cmnd;
+
+ if (unlikely(rsp->opcode != SRP_RSP)) {
+ if (printk_ratelimit())
+ dev_warn(evt_struct->hostdata->dev,
+ "bad SRP RSP type %d\n", rsp->opcode);
+ }
+
+ if (cmnd) {
+ cmnd->result |= rsp->status;
+ if (((cmnd->result >> 1) & 0x1f) == CHECK_CONDITION)
+ memcpy(cmnd->sense_buffer,
+ rsp->data,
+ be32_to_cpu(rsp->sense_data_len));
+ unmap_cmd_data(&evt_struct->iu.srp.cmd,
+ evt_struct,
+ evt_struct->hostdata->dev);
+
+ if (rsp->flags & SRP_RSP_FLAG_DOOVER)
+ scsi_set_resid(cmnd,
+ be32_to_cpu(rsp->data_out_res_cnt));
+ else if (rsp->flags & SRP_RSP_FLAG_DIOVER)
+ scsi_set_resid(cmnd, be32_to_cpu(rsp->data_in_res_cnt));
+ }
+
+ if (evt_struct->cmnd_done)
+ evt_struct->cmnd_done(cmnd);
+}
+
+/**
+ * lun_from_dev: - Returns the lun of the scsi device
+ * @dev: struct scsi_device
+ *
+*/
+static inline u16 lun_from_dev(struct scsi_device *dev)
+{
+ return (0x2 << 14) | (dev->id << 8) | (dev->channel << 5) | dev->lun;
+}
+
+/**
+ * ibmvscsi_queue: - The queuecommand function of the scsi template
+ * @cmd: struct scsi_cmnd to be executed
+ * @done: Callback function to be called when cmd is completed
+*/
+static int ibmvscsi_queuecommand_lck(struct scsi_cmnd *cmnd,
+ void (*done) (struct scsi_cmnd *))
+{
+ struct srp_cmd *srp_cmd;
+ struct srp_event_struct *evt_struct;
+ struct srp_indirect_buf *indirect;
+ struct ibmvscsi_host_data *hostdata = shost_priv(cmnd->device->host);
+ u16 lun = lun_from_dev(cmnd->device);
+ u8 out_fmt, in_fmt;
+
+ cmnd->result = (DID_OK << 16);
+ evt_struct = get_event_struct(&hostdata->pool);
+ if (!evt_struct)
+ return SCSI_MLQUEUE_HOST_BUSY;
+
+ /* Set up the actual SRP IU */
+ srp_cmd = &evt_struct->iu.srp.cmd;
+ memset(srp_cmd, 0x00, SRP_MAX_IU_LEN);
+ srp_cmd->opcode = SRP_CMD;
+ memcpy(srp_cmd->cdb, cmnd->cmnd, sizeof(srp_cmd->cdb));
+ srp_cmd->lun = cpu_to_be64(((u64)lun) << 48);
+
+ if (!map_data_for_srp_cmd(cmnd, evt_struct, srp_cmd, hostdata->dev)) {
+ if (!firmware_has_feature(FW_FEATURE_CMO))
+ sdev_printk(KERN_ERR, cmnd->device,
+ "couldn't convert cmd to srp_cmd\n");
+ free_event_struct(&hostdata->pool, evt_struct);
+ return SCSI_MLQUEUE_HOST_BUSY;
+ }
+
+ init_event_struct(evt_struct,
+ handle_cmd_rsp,
+ VIOSRP_SRP_FORMAT,
+ cmnd->request->timeout/HZ);
+
+ evt_struct->cmnd = cmnd;
+ evt_struct->cmnd_done = done;
+
+ /* Fix up dma address of the buffer itself */
+ indirect = (struct srp_indirect_buf *) srp_cmd->add_data;
+ out_fmt = srp_cmd->buf_fmt >> 4;
+ in_fmt = srp_cmd->buf_fmt & ((1U << 4) - 1);
+ if ((in_fmt == SRP_DATA_DESC_INDIRECT ||
+ out_fmt == SRP_DATA_DESC_INDIRECT) &&
+ indirect->table_desc.va == 0) {
+ indirect->table_desc.va =
+ cpu_to_be64(be64_to_cpu(evt_struct->crq.IU_data_ptr) +
+ offsetof(struct srp_cmd, add_data) +
+ offsetof(struct srp_indirect_buf, desc_list));
+ }
+
+ return ibmvscsi_send_srp_event(evt_struct, hostdata, 0);
+}
+
+static DEF_SCSI_QCMD(ibmvscsi_queuecommand)
+
+/* ------------------------------------------------------------
+ * Routines for driver initialization
+ */
+
+/**
+ * map_persist_bufs: - Pre-map persistent data for adapter logins
+ * @hostdata: ibmvscsi_host_data of host
+ *
+ * Map the capabilities and adapter info DMA buffers to avoid runtime failures.
+ * Return 1 on error, 0 on success.
+ */
+static int map_persist_bufs(struct ibmvscsi_host_data *hostdata)
+{
+
+ hostdata->caps_addr = dma_map_single(hostdata->dev, &hostdata->caps,
+ sizeof(hostdata->caps), DMA_BIDIRECTIONAL);
+
+ if (dma_mapping_error(hostdata->dev, hostdata->caps_addr)) {
+ dev_err(hostdata->dev, "Unable to map capabilities buffer!\n");
+ return 1;
+ }
+
+ hostdata->adapter_info_addr = dma_map_single(hostdata->dev,
+ &hostdata->madapter_info,
+ sizeof(hostdata->madapter_info),
+ DMA_BIDIRECTIONAL);
+ if (dma_mapping_error(hostdata->dev, hostdata->adapter_info_addr)) {
+ dev_err(hostdata->dev, "Unable to map adapter info buffer!\n");
+ dma_unmap_single(hostdata->dev, hostdata->caps_addr,
+ sizeof(hostdata->caps), DMA_BIDIRECTIONAL);
+ return 1;
+ }
+
+ return 0;
+}
+
+/**
+ * unmap_persist_bufs: - Unmap persistent data needed for adapter logins
+ * @hostdata: ibmvscsi_host_data of host
+ *
+ * Unmap the capabilities and adapter info DMA buffers
+ */
+static void unmap_persist_bufs(struct ibmvscsi_host_data *hostdata)
+{
+ dma_unmap_single(hostdata->dev, hostdata->caps_addr,
+ sizeof(hostdata->caps), DMA_BIDIRECTIONAL);
+
+ dma_unmap_single(hostdata->dev, hostdata->adapter_info_addr,
+ sizeof(hostdata->madapter_info), DMA_BIDIRECTIONAL);
+}
+
+/**
+ * login_rsp: - Handle response to SRP login request
+ * @evt_struct: srp_event_struct with the response
+ *
+ * Used as a "done" callback by when sending srp_login. Gets called
+ * by ibmvscsi_handle_crq()
+*/
+static void login_rsp(struct srp_event_struct *evt_struct)
+{
+ struct ibmvscsi_host_data *hostdata = evt_struct->hostdata;
+ switch (evt_struct->xfer_iu->srp.login_rsp.opcode) {
+ case SRP_LOGIN_RSP: /* it worked! */
+ break;
+ case SRP_LOGIN_REJ: /* refused! */
+ dev_info(hostdata->dev, "SRP_LOGIN_REJ reason %u\n",
+ evt_struct->xfer_iu->srp.login_rej.reason);
+ /* Login failed. */
+ atomic_set(&hostdata->request_limit, -1);
+ return;
+ default:
+ dev_err(hostdata->dev, "Invalid login response typecode 0x%02x!\n",
+ evt_struct->xfer_iu->srp.login_rsp.opcode);
+ /* Login failed. */
+ atomic_set(&hostdata->request_limit, -1);
+ return;
+ }
+
+ dev_info(hostdata->dev, "SRP_LOGIN succeeded\n");
+ hostdata->client_migrated = 0;
+
+ /* Now we know what the real request-limit is.
+ * This value is set rather than added to request_limit because
+ * request_limit could have been set to -1 by this client.
+ */
+ atomic_set(&hostdata->request_limit,
+ be32_to_cpu(evt_struct->xfer_iu->srp.login_rsp.req_lim_delta));
+
+ /* If we had any pending I/Os, kick them */
+ scsi_unblock_requests(hostdata->host);
+}
+
+/**
+ * send_srp_login: - Sends the srp login
+ * @hostdata: ibmvscsi_host_data of host
+ *
+ * Returns zero if successful.
+*/
+static int send_srp_login(struct ibmvscsi_host_data *hostdata)
+{
+ int rc;
+ unsigned long flags;
+ struct srp_login_req *login;
+ struct srp_event_struct *evt_struct = get_event_struct(&hostdata->pool);
+
+ BUG_ON(!evt_struct);
+ init_event_struct(evt_struct, login_rsp,
+ VIOSRP_SRP_FORMAT, login_timeout);
+
+ login = &evt_struct->iu.srp.login_req;
+ memset(login, 0, sizeof(*login));
+ login->opcode = SRP_LOGIN_REQ;
+ login->req_it_iu_len = cpu_to_be32(sizeof(union srp_iu));
+ login->req_buf_fmt = cpu_to_be16(SRP_BUF_FORMAT_DIRECT |
+ SRP_BUF_FORMAT_INDIRECT);
+
+ spin_lock_irqsave(hostdata->host->host_lock, flags);
+ /* Start out with a request limit of 0, since this is negotiated in
+ * the login request we are just sending and login requests always
+ * get sent by the driver regardless of request_limit.
+ */
+ atomic_set(&hostdata->request_limit, 0);
+
+ rc = ibmvscsi_send_srp_event(evt_struct, hostdata, login_timeout * 2);
+ spin_unlock_irqrestore(hostdata->host->host_lock, flags);
+ dev_info(hostdata->dev, "sent SRP login\n");
+ return rc;
+};
+
+/**
+ * capabilities_rsp: - Handle response to MAD adapter capabilities request
+ * @evt_struct: srp_event_struct with the response
+ *
+ * Used as a "done" callback by when sending adapter_info.
+ */
+static void capabilities_rsp(struct srp_event_struct *evt_struct)
+{
+ struct ibmvscsi_host_data *hostdata = evt_struct->hostdata;
+
+ if (evt_struct->xfer_iu->mad.capabilities.common.status) {
+ dev_err(hostdata->dev, "error 0x%X getting capabilities info\n",
+ evt_struct->xfer_iu->mad.capabilities.common.status);
+ } else {
+ if (hostdata->caps.migration.common.server_support !=
+ cpu_to_be16(SERVER_SUPPORTS_CAP))
+ dev_info(hostdata->dev, "Partition migration not supported\n");
+
+ if (client_reserve) {
+ if (hostdata->caps.reserve.common.server_support ==
+ cpu_to_be16(SERVER_SUPPORTS_CAP))
+ dev_info(hostdata->dev, "Client reserve enabled\n");
+ else
+ dev_info(hostdata->dev, "Client reserve not supported\n");
+ }
+ }
+
+ send_srp_login(hostdata);
+}
+
+/**
+ * send_mad_capabilities: - Sends the mad capabilities request
+ * and stores the result so it can be retrieved with
+ * @hostdata: ibmvscsi_host_data of host
+ */
+static void send_mad_capabilities(struct ibmvscsi_host_data *hostdata)
+{
+ struct viosrp_capabilities *req;
+ struct srp_event_struct *evt_struct;
+ unsigned long flags;
+ struct device_node *of_node = hostdata->dev->of_node;
+ const char *location;
+
+ evt_struct = get_event_struct(&hostdata->pool);
+ BUG_ON(!evt_struct);
+
+ init_event_struct(evt_struct, capabilities_rsp,
+ VIOSRP_MAD_FORMAT, info_timeout);
+
+ req = &evt_struct->iu.mad.capabilities;
+ memset(req, 0, sizeof(*req));
+
+ hostdata->caps.flags = cpu_to_be32(CAP_LIST_SUPPORTED);
+ if (hostdata->client_migrated)
+ hostdata->caps.flags |= cpu_to_be32(CLIENT_MIGRATED);
+
+ strncpy(hostdata->caps.name, dev_name(&hostdata->host->shost_gendev),
+ sizeof(hostdata->caps.name));
+ hostdata->caps.name[sizeof(hostdata->caps.name) - 1] = '\0';
+
+ location = of_get_property(of_node, "ibm,loc-code", NULL);
+ location = location ? location : dev_name(hostdata->dev);
+ strncpy(hostdata->caps.loc, location, sizeof(hostdata->caps.loc));
+ hostdata->caps.loc[sizeof(hostdata->caps.loc) - 1] = '\0';
+
+ req->common.type = cpu_to_be32(VIOSRP_CAPABILITIES_TYPE);
+ req->buffer = cpu_to_be64(hostdata->caps_addr);
+
+ hostdata->caps.migration.common.cap_type =
+ cpu_to_be32(MIGRATION_CAPABILITIES);
+ hostdata->caps.migration.common.length =
+ cpu_to_be16(sizeof(hostdata->caps.migration));
+ hostdata->caps.migration.common.server_support =
+ cpu_to_be16(SERVER_SUPPORTS_CAP);
+ hostdata->caps.migration.ecl = cpu_to_be32(1);
+
+ if (client_reserve) {
+ hostdata->caps.reserve.common.cap_type =
+ cpu_to_be32(RESERVATION_CAPABILITIES);
+ hostdata->caps.reserve.common.length =
+ cpu_to_be16(sizeof(hostdata->caps.reserve));
+ hostdata->caps.reserve.common.server_support =
+ cpu_to_be16(SERVER_SUPPORTS_CAP);
+ hostdata->caps.reserve.type =
+ cpu_to_be32(CLIENT_RESERVE_SCSI_2);
+ req->common.length =
+ cpu_to_be16(sizeof(hostdata->caps));
+ } else
+ req->common.length = cpu_to_be16(sizeof(hostdata->caps) -
+ sizeof(hostdata->caps.reserve));
+
+ spin_lock_irqsave(hostdata->host->host_lock, flags);
+ if (ibmvscsi_send_srp_event(evt_struct, hostdata, info_timeout * 2))
+ dev_err(hostdata->dev, "couldn't send CAPABILITIES_REQ!\n");
+ spin_unlock_irqrestore(hostdata->host->host_lock, flags);
+};
+
+/**
+ * fast_fail_rsp: - Handle response to MAD enable fast fail
+ * @evt_struct: srp_event_struct with the response
+ *
+ * Used as a "done" callback by when sending enable fast fail. Gets called
+ * by ibmvscsi_handle_crq()
+ */
+static void fast_fail_rsp(struct srp_event_struct *evt_struct)
+{
+ struct ibmvscsi_host_data *hostdata = evt_struct->hostdata;
+ u16 status = be16_to_cpu(evt_struct->xfer_iu->mad.fast_fail.common.status);
+
+ if (status == VIOSRP_MAD_NOT_SUPPORTED)
+ dev_err(hostdata->dev, "fast_fail not supported in server\n");
+ else if (status == VIOSRP_MAD_FAILED)
+ dev_err(hostdata->dev, "fast_fail request failed\n");
+ else if (status != VIOSRP_MAD_SUCCESS)
+ dev_err(hostdata->dev, "error 0x%X enabling fast_fail\n", status);
+
+ send_mad_capabilities(hostdata);
+}
+
+/**
+ * init_host - Start host initialization
+ * @hostdata: ibmvscsi_host_data of host
+ *
+ * Returns zero if successful.
+ */
+static int enable_fast_fail(struct ibmvscsi_host_data *hostdata)
+{
+ int rc;
+ unsigned long flags;
+ struct viosrp_fast_fail *fast_fail_mad;
+ struct srp_event_struct *evt_struct;
+
+ if (!fast_fail) {
+ send_mad_capabilities(hostdata);
+ return 0;
+ }
+
+ evt_struct = get_event_struct(&hostdata->pool);
+ BUG_ON(!evt_struct);
+
+ init_event_struct(evt_struct, fast_fail_rsp, VIOSRP_MAD_FORMAT, info_timeout);
+
+ fast_fail_mad = &evt_struct->iu.mad.fast_fail;
+ memset(fast_fail_mad, 0, sizeof(*fast_fail_mad));
+ fast_fail_mad->common.type = cpu_to_be32(VIOSRP_ENABLE_FAST_FAIL);
+ fast_fail_mad->common.length = cpu_to_be16(sizeof(*fast_fail_mad));
+
+ spin_lock_irqsave(hostdata->host->host_lock, flags);
+ rc = ibmvscsi_send_srp_event(evt_struct, hostdata, info_timeout * 2);
+ spin_unlock_irqrestore(hostdata->host->host_lock, flags);
+ return rc;
+}
+
+/**
+ * adapter_info_rsp: - Handle response to MAD adapter info request
+ * @evt_struct: srp_event_struct with the response
+ *
+ * Used as a "done" callback by when sending adapter_info. Gets called
+ * by ibmvscsi_handle_crq()
+*/
+static void adapter_info_rsp(struct srp_event_struct *evt_struct)
+{
+ struct ibmvscsi_host_data *hostdata = evt_struct->hostdata;
+
+ if (evt_struct->xfer_iu->mad.adapter_info.common.status) {
+ dev_err(hostdata->dev, "error %d getting adapter info\n",
+ evt_struct->xfer_iu->mad.adapter_info.common.status);
+ } else {
+ dev_info(hostdata->dev, "host srp version: %s, "
+ "host partition %s (%d), OS %d, max io %u\n",
+ hostdata->madapter_info.srp_version,
+ hostdata->madapter_info.partition_name,
+ be32_to_cpu(hostdata->madapter_info.partition_number),
+ be32_to_cpu(hostdata->madapter_info.os_type),
+ be32_to_cpu(hostdata->madapter_info.port_max_txu[0]));
+
+ if (hostdata->madapter_info.port_max_txu[0])
+ hostdata->host->max_sectors =
+ be32_to_cpu(hostdata->madapter_info.port_max_txu[0]) >> 9;
+
+ if (be32_to_cpu(hostdata->madapter_info.os_type) == 3 &&
+ strcmp(hostdata->madapter_info.srp_version, "1.6a") <= 0) {
+ dev_err(hostdata->dev, "host (Ver. %s) doesn't support large transfers\n",
+ hostdata->madapter_info.srp_version);
+ dev_err(hostdata->dev, "limiting scatterlists to %d\n",
+ MAX_INDIRECT_BUFS);
+ hostdata->host->sg_tablesize = MAX_INDIRECT_BUFS;
+ }
+
+ if (be32_to_cpu(hostdata->madapter_info.os_type) == 3) {
+ enable_fast_fail(hostdata);
+ return;
+ }
+ }
+
+ send_srp_login(hostdata);
+}
+
+/**
+ * send_mad_adapter_info: - Sends the mad adapter info request
+ * and stores the result so it can be retrieved with
+ * sysfs. We COULD consider causing a failure if the
+ * returned SRP version doesn't match ours.
+ * @hostdata: ibmvscsi_host_data of host
+ *
+ * Returns zero if successful.
+*/
+static void send_mad_adapter_info(struct ibmvscsi_host_data *hostdata)
+{
+ struct viosrp_adapter_info *req;
+ struct srp_event_struct *evt_struct;
+ unsigned long flags;
+
+ evt_struct = get_event_struct(&hostdata->pool);
+ BUG_ON(!evt_struct);
+
+ init_event_struct(evt_struct,
+ adapter_info_rsp,
+ VIOSRP_MAD_FORMAT,
+ info_timeout);
+
+ req = &evt_struct->iu.mad.adapter_info;
+ memset(req, 0x00, sizeof(*req));
+
+ req->common.type = cpu_to_be32(VIOSRP_ADAPTER_INFO_TYPE);
+ req->common.length = cpu_to_be16(sizeof(hostdata->madapter_info));
+ req->buffer = cpu_to_be64(hostdata->adapter_info_addr);
+
+ spin_lock_irqsave(hostdata->host->host_lock, flags);
+ if (ibmvscsi_send_srp_event(evt_struct, hostdata, info_timeout * 2))
+ dev_err(hostdata->dev, "couldn't send ADAPTER_INFO_REQ!\n");
+ spin_unlock_irqrestore(hostdata->host->host_lock, flags);
+};
+
+/**
+ * init_adapter: Start virtual adapter initialization sequence
+ *
+ */
+static void init_adapter(struct ibmvscsi_host_data *hostdata)
+{
+ send_mad_adapter_info(hostdata);
+}
+
+/**
+ * sync_completion: Signal that a synchronous command has completed
+ * Note that after returning from this call, the evt_struct is freed.
+ * the caller waiting on this completion shouldn't touch the evt_struct
+ * again.
+ */
+static void sync_completion(struct srp_event_struct *evt_struct)
+{
+ /* copy the response back */
+ if (evt_struct->sync_srp)
+ *evt_struct->sync_srp = *evt_struct->xfer_iu;
+
+ complete(&evt_struct->comp);
+}
+
+/**
+ * ibmvscsi_abort: Abort a command...from scsi host template
+ * send this over to the server and wait synchronously for the response
+ */
+static int ibmvscsi_eh_abort_handler(struct scsi_cmnd *cmd)
+{
+ struct ibmvscsi_host_data *hostdata = shost_priv(cmd->device->host);
+ struct srp_tsk_mgmt *tsk_mgmt;
+ struct srp_event_struct *evt;
+ struct srp_event_struct *tmp_evt, *found_evt;
+ union viosrp_iu srp_rsp;
+ int rsp_rc;
+ unsigned long flags;
+ u16 lun = lun_from_dev(cmd->device);
+ unsigned long wait_switch = 0;
+
+ /* First, find this command in our sent list so we can figure
+ * out the correct tag
+ */
+ spin_lock_irqsave(hostdata->host->host_lock, flags);
+ wait_switch = jiffies + (init_timeout * HZ);
+ do {
+ found_evt = NULL;
+ list_for_each_entry(tmp_evt, &hostdata->sent, list) {
+ if (tmp_evt->cmnd == cmd) {
+ found_evt = tmp_evt;
+ break;
+ }
+ }
+
+ if (!found_evt) {
+ spin_unlock_irqrestore(hostdata->host->host_lock, flags);
+ return SUCCESS;
+ }
+
+ evt = get_event_struct(&hostdata->pool);
+ if (evt == NULL) {
+ spin_unlock_irqrestore(hostdata->host->host_lock, flags);
+ sdev_printk(KERN_ERR, cmd->device,
+ "failed to allocate abort event\n");
+ return FAILED;
+ }
+
+ init_event_struct(evt,
+ sync_completion,
+ VIOSRP_SRP_FORMAT,
+ abort_timeout);
+
+ tsk_mgmt = &evt->iu.srp.tsk_mgmt;
+
+ /* Set up an abort SRP command */
+ memset(tsk_mgmt, 0x00, sizeof(*tsk_mgmt));
+ tsk_mgmt->opcode = SRP_TSK_MGMT;
+ tsk_mgmt->lun = cpu_to_be64(((u64) lun) << 48);
+ tsk_mgmt->tsk_mgmt_func = SRP_TSK_ABORT_TASK;
+ tsk_mgmt->task_tag = (u64) found_evt;
+
+ evt->sync_srp = &srp_rsp;
+
+ init_completion(&evt->comp);
+ rsp_rc = ibmvscsi_send_srp_event(evt, hostdata, abort_timeout * 2);
+
+ if (rsp_rc != SCSI_MLQUEUE_HOST_BUSY)
+ break;
+
+ spin_unlock_irqrestore(hostdata->host->host_lock, flags);
+ msleep(10);
+ spin_lock_irqsave(hostdata->host->host_lock, flags);
+ } while (time_before(jiffies, wait_switch));
+
+ spin_unlock_irqrestore(hostdata->host->host_lock, flags);
+
+ if (rsp_rc != 0) {
+ sdev_printk(KERN_ERR, cmd->device,
+ "failed to send abort() event. rc=%d\n", rsp_rc);
+ return FAILED;
+ }
+
+ sdev_printk(KERN_INFO, cmd->device,
+ "aborting command. lun 0x%llx, tag 0x%llx\n",
+ (((u64) lun) << 48), (u64) found_evt);
+
+ wait_for_completion(&evt->comp);
+
+ /* make sure we got a good response */
+ if (unlikely(srp_rsp.srp.rsp.opcode != SRP_RSP)) {
+ if (printk_ratelimit())
+ sdev_printk(KERN_WARNING, cmd->device, "abort bad SRP RSP type %d\n",
+ srp_rsp.srp.rsp.opcode);
+ return FAILED;
+ }
+
+ if (srp_rsp.srp.rsp.flags & SRP_RSP_FLAG_RSPVALID)
+ rsp_rc = *((int *)srp_rsp.srp.rsp.data);
+ else
+ rsp_rc = srp_rsp.srp.rsp.status;
+
+ if (rsp_rc) {
+ if (printk_ratelimit())
+ sdev_printk(KERN_WARNING, cmd->device,
+ "abort code %d for task tag 0x%llx\n",
+ rsp_rc, tsk_mgmt->task_tag);
+ return FAILED;
+ }
+
+ /* Because we dropped the spinlock above, it's possible
+ * The event is no longer in our list. Make sure it didn't
+ * complete while we were aborting
+ */
+ spin_lock_irqsave(hostdata->host->host_lock, flags);
+ found_evt = NULL;
+ list_for_each_entry(tmp_evt, &hostdata->sent, list) {
+ if (tmp_evt->cmnd == cmd) {
+ found_evt = tmp_evt;
+ break;
+ }
+ }
+
+ if (found_evt == NULL) {
+ spin_unlock_irqrestore(hostdata->host->host_lock, flags);
+ sdev_printk(KERN_INFO, cmd->device, "aborted task tag 0x%llx completed\n",
+ tsk_mgmt->task_tag);
+ return SUCCESS;
+ }
+
+ sdev_printk(KERN_INFO, cmd->device, "successfully aborted task tag 0x%llx\n",
+ tsk_mgmt->task_tag);
+
+ cmd->result = (DID_ABORT << 16);
+ list_del(&found_evt->list);
+ unmap_cmd_data(&found_evt->iu.srp.cmd, found_evt,
+ found_evt->hostdata->dev);
+ free_event_struct(&found_evt->hostdata->pool, found_evt);
+ spin_unlock_irqrestore(hostdata->host->host_lock, flags);
+ atomic_inc(&hostdata->request_limit);
+ return SUCCESS;
+}
+
+/**
+ * ibmvscsi_eh_device_reset_handler: Reset a single LUN...from scsi host
+ * template send this over to the server and wait synchronously for the
+ * response
+ */
+static int ibmvscsi_eh_device_reset_handler(struct scsi_cmnd *cmd)
+{
+ struct ibmvscsi_host_data *hostdata = shost_priv(cmd->device->host);
+ struct srp_tsk_mgmt *tsk_mgmt;
+ struct srp_event_struct *evt;
+ struct srp_event_struct *tmp_evt, *pos;
+ union viosrp_iu srp_rsp;
+ int rsp_rc;
+ unsigned long flags;
+ u16 lun = lun_from_dev(cmd->device);
+ unsigned long wait_switch = 0;
+
+ spin_lock_irqsave(hostdata->host->host_lock, flags);
+ wait_switch = jiffies + (init_timeout * HZ);
+ do {
+ evt = get_event_struct(&hostdata->pool);
+ if (evt == NULL) {
+ spin_unlock_irqrestore(hostdata->host->host_lock, flags);
+ sdev_printk(KERN_ERR, cmd->device,
+ "failed to allocate reset event\n");
+ return FAILED;
+ }
+
+ init_event_struct(evt,
+ sync_completion,
+ VIOSRP_SRP_FORMAT,
+ reset_timeout);
+
+ tsk_mgmt = &evt->iu.srp.tsk_mgmt;
+
+ /* Set up a lun reset SRP command */
+ memset(tsk_mgmt, 0x00, sizeof(*tsk_mgmt));
+ tsk_mgmt->opcode = SRP_TSK_MGMT;
+ tsk_mgmt->lun = cpu_to_be64(((u64) lun) << 48);
+ tsk_mgmt->tsk_mgmt_func = SRP_TSK_LUN_RESET;
+
+ evt->sync_srp = &srp_rsp;
+
+ init_completion(&evt->comp);
+ rsp_rc = ibmvscsi_send_srp_event(evt, hostdata, reset_timeout * 2);
+
+ if (rsp_rc != SCSI_MLQUEUE_HOST_BUSY)
+ break;
+
+ spin_unlock_irqrestore(hostdata->host->host_lock, flags);
+ msleep(10);
+ spin_lock_irqsave(hostdata->host->host_lock, flags);
+ } while (time_before(jiffies, wait_switch));
+
+ spin_unlock_irqrestore(hostdata->host->host_lock, flags);
+
+ if (rsp_rc != 0) {
+ sdev_printk(KERN_ERR, cmd->device,
+ "failed to send reset event. rc=%d\n", rsp_rc);
+ return FAILED;
+ }
+
+ sdev_printk(KERN_INFO, cmd->device, "resetting device. lun 0x%llx\n",
+ (((u64) lun) << 48));
+
+ wait_for_completion(&evt->comp);
+
+ /* make sure we got a good response */
+ if (unlikely(srp_rsp.srp.rsp.opcode != SRP_RSP)) {
+ if (printk_ratelimit())
+ sdev_printk(KERN_WARNING, cmd->device, "reset bad SRP RSP type %d\n",
+ srp_rsp.srp.rsp.opcode);
+ return FAILED;
+ }
+
+ if (srp_rsp.srp.rsp.flags & SRP_RSP_FLAG_RSPVALID)
+ rsp_rc = *((int *)srp_rsp.srp.rsp.data);
+ else
+ rsp_rc = srp_rsp.srp.rsp.status;
+
+ if (rsp_rc) {
+ if (printk_ratelimit())
+ sdev_printk(KERN_WARNING, cmd->device,
+ "reset code %d for task tag 0x%llx\n",
+ rsp_rc, tsk_mgmt->task_tag);
+ return FAILED;
+ }
+
+ /* We need to find all commands for this LUN that have not yet been
+ * responded to, and fail them with DID_RESET
+ */
+ spin_lock_irqsave(hostdata->host->host_lock, flags);
+ list_for_each_entry_safe(tmp_evt, pos, &hostdata->sent, list) {
+ if ((tmp_evt->cmnd) && (tmp_evt->cmnd->device == cmd->device)) {
+ if (tmp_evt->cmnd)
+ tmp_evt->cmnd->result = (DID_RESET << 16);
+ list_del(&tmp_evt->list);
+ unmap_cmd_data(&tmp_evt->iu.srp.cmd, tmp_evt,
+ tmp_evt->hostdata->dev);
+ free_event_struct(&tmp_evt->hostdata->pool,
+ tmp_evt);
+ atomic_inc(&hostdata->request_limit);
+ if (tmp_evt->cmnd_done)
+ tmp_evt->cmnd_done(tmp_evt->cmnd);
+ else if (tmp_evt->done)
+ tmp_evt->done(tmp_evt);
+ }
+ }
+ spin_unlock_irqrestore(hostdata->host->host_lock, flags);
+ return SUCCESS;
+}
+
+/**
+ * ibmvscsi_eh_host_reset_handler - Reset the connection to the server
+ * @cmd: struct scsi_cmnd having problems
+*/
+static int ibmvscsi_eh_host_reset_handler(struct scsi_cmnd *cmd)
+{
+ unsigned long wait_switch = 0;
+ struct ibmvscsi_host_data *hostdata = shost_priv(cmd->device->host);
+
+ dev_err(hostdata->dev, "Resetting connection due to error recovery\n");
+
+ ibmvscsi_reset_host(hostdata);
+
+ for (wait_switch = jiffies + (init_timeout * HZ);
+ time_before(jiffies, wait_switch) &&
+ atomic_read(&hostdata->request_limit) < 2;) {
+
+ msleep(10);
+ }
+
+ if (atomic_read(&hostdata->request_limit) <= 0)
+ return FAILED;
+
+ return SUCCESS;
+}
+
+/**
+ * ibmvscsi_handle_crq: - Handles and frees received events in the CRQ
+ * @crq: Command/Response queue
+ * @hostdata: ibmvscsi_host_data of host
+ *
+*/
+static void ibmvscsi_handle_crq(struct viosrp_crq *crq,
+ struct ibmvscsi_host_data *hostdata)
+{
+ long rc;
+ unsigned long flags;
+ /* The hypervisor copies our tag value here so no byteswapping */
+ struct srp_event_struct *evt_struct =
+ (__force struct srp_event_struct *)crq->IU_data_ptr;
+ switch (crq->valid) {
+ case 0xC0: /* initialization */
+ switch (crq->format) {
+ case 0x01: /* Initialization message */
+ dev_info(hostdata->dev, "partner initialized\n");
+ /* Send back a response */
+ rc = ibmvscsi_send_crq(hostdata, 0xC002000000000000LL, 0);
+ if (rc == 0) {
+ /* Now login */
+ init_adapter(hostdata);
+ } else {
+ dev_err(hostdata->dev, "Unable to send init rsp. rc=%ld\n", rc);
+ }
+
+ break;
+ case 0x02: /* Initialization response */
+ dev_info(hostdata->dev, "partner initialization complete\n");
+
+ /* Now login */
+ init_adapter(hostdata);
+ break;
+ default:
+ dev_err(hostdata->dev, "unknown crq message type: %d\n", crq->format);
+ }
+ return;
+ case 0xFF: /* Hypervisor telling us the connection is closed */
+ scsi_block_requests(hostdata->host);
+ atomic_set(&hostdata->request_limit, 0);
+ if (crq->format == 0x06) {
+ /* We need to re-setup the interpartition connection */
+ dev_info(hostdata->dev, "Re-enabling adapter!\n");
+ hostdata->client_migrated = 1;
+ hostdata->reenable_crq = 1;
+ purge_requests(hostdata, DID_REQUEUE);
+ wake_up(&hostdata->work_wait_q);
+ } else {
+ dev_err(hostdata->dev, "Virtual adapter failed rc %d!\n",
+ crq->format);
+ ibmvscsi_reset_host(hostdata);
+ }
+ return;
+ case 0x80: /* real payload */
+ break;
+ default:
+ dev_err(hostdata->dev, "got an invalid message type 0x%02x\n",
+ crq->valid);
+ return;
+ }
+
+ /* The only kind of payload CRQs we should get are responses to
+ * things we send. Make sure this response is to something we
+ * actually sent
+ */
+ if (!valid_event_struct(&hostdata->pool, evt_struct)) {
+ dev_err(hostdata->dev, "returned correlation_token 0x%p is invalid!\n",
+ evt_struct);
+ return;
+ }
+
+ if (atomic_read(&evt_struct->free)) {
+ dev_err(hostdata->dev, "received duplicate correlation_token 0x%p!\n",
+ evt_struct);
+ return;
+ }
+
+ if (crq->format == VIOSRP_SRP_FORMAT)
+ atomic_add(be32_to_cpu(evt_struct->xfer_iu->srp.rsp.req_lim_delta),
+ &hostdata->request_limit);
+
+ del_timer(&evt_struct->timer);
+
+ if ((crq->status != VIOSRP_OK && crq->status != VIOSRP_OK2) && evt_struct->cmnd)
+ evt_struct->cmnd->result = DID_ERROR << 16;
+ if (evt_struct->done)
+ evt_struct->done(evt_struct);
+ else
+ dev_err(hostdata->dev, "returned done() is NULL; not running it!\n");
+
+ /*
+ * Lock the host_lock before messing with these structures, since we
+ * are running in a task context
+ */
+ spin_lock_irqsave(evt_struct->hostdata->host->host_lock, flags);
+ list_del(&evt_struct->list);
+ free_event_struct(&evt_struct->hostdata->pool, evt_struct);
+ spin_unlock_irqrestore(evt_struct->hostdata->host->host_lock, flags);
+}
+
+/**
+ * ibmvscsi_get_host_config: Send the command to the server to get host
+ * configuration data. The data is opaque to us.
+ */
+static int ibmvscsi_do_host_config(struct ibmvscsi_host_data *hostdata,
+ unsigned char *buffer, int length)
+{
+ struct viosrp_host_config *host_config;
+ struct srp_event_struct *evt_struct;
+ unsigned long flags;
+ dma_addr_t addr;
+ int rc;
+
+ evt_struct = get_event_struct(&hostdata->pool);
+ if (!evt_struct) {
+ dev_err(hostdata->dev, "couldn't allocate event for HOST_CONFIG!\n");
+ return -1;
+ }
+
+ init_event_struct(evt_struct,
+ sync_completion,
+ VIOSRP_MAD_FORMAT,
+ info_timeout);
+
+ host_config = &evt_struct->iu.mad.host_config;
+
+ /* The transport length field is only 16-bit */
+ length = min(0xffff, length);
+
+ /* Set up a lun reset SRP command */
+ memset(host_config, 0x00, sizeof(*host_config));
+ host_config->common.type = cpu_to_be32(VIOSRP_HOST_CONFIG_TYPE);
+ host_config->common.length = cpu_to_be16(length);
+ addr = dma_map_single(hostdata->dev, buffer, length, DMA_BIDIRECTIONAL);
+
+ if (dma_mapping_error(hostdata->dev, addr)) {
+ if (!firmware_has_feature(FW_FEATURE_CMO))
+ dev_err(hostdata->dev,
+ "dma_mapping error getting host config\n");
+ free_event_struct(&hostdata->pool, evt_struct);
+ return -1;
+ }
+
+ host_config->buffer = cpu_to_be64(addr);
+
+ init_completion(&evt_struct->comp);
+ spin_lock_irqsave(hostdata->host->host_lock, flags);
+ rc = ibmvscsi_send_srp_event(evt_struct, hostdata, info_timeout * 2);
+ spin_unlock_irqrestore(hostdata->host->host_lock, flags);
+ if (rc == 0)
+ wait_for_completion(&evt_struct->comp);
+ dma_unmap_single(hostdata->dev, addr, length, DMA_BIDIRECTIONAL);
+
+ return rc;
+}
+
+/**
+ * ibmvscsi_slave_configure: Set the "allow_restart" flag for each disk.
+ * @sdev: struct scsi_device device to configure
+ *
+ * Enable allow_restart for a device if it is a disk. Adjust the
+ * queue_depth here also as is required by the documentation for
+ * struct scsi_host_template.
+ */
+static int ibmvscsi_slave_configure(struct scsi_device *sdev)
+{
+ struct Scsi_Host *shost = sdev->host;
+ unsigned long lock_flags = 0;
+
+ spin_lock_irqsave(shost->host_lock, lock_flags);
+ if (sdev->type == TYPE_DISK) {
+ sdev->allow_restart = 1;
+ blk_queue_rq_timeout(sdev->request_queue, 120 * HZ);
+ }
+ spin_unlock_irqrestore(shost->host_lock, lock_flags);
+ return 0;
+}
+
+/**
+ * ibmvscsi_change_queue_depth - Change the device's queue depth
+ * @sdev: scsi device struct
+ * @qdepth: depth to set
+ * @reason: calling context
+ *
+ * Return value:
+ * actual depth set
+ **/
+static int ibmvscsi_change_queue_depth(struct scsi_device *sdev, int qdepth)
+{
+ if (qdepth > IBMVSCSI_MAX_CMDS_PER_LUN)
+ qdepth = IBMVSCSI_MAX_CMDS_PER_LUN;
+ return scsi_change_queue_depth(sdev, qdepth);
+}
+
+/* ------------------------------------------------------------
+ * sysfs attributes
+ */
+static ssize_t show_host_vhost_loc(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct Scsi_Host *shost = class_to_shost(dev);
+ struct ibmvscsi_host_data *hostdata = shost_priv(shost);
+ int len;
+
+ len = snprintf(buf, sizeof(hostdata->caps.loc), "%s\n",
+ hostdata->caps.loc);
+ return len;
+}
+
+static struct device_attribute ibmvscsi_host_vhost_loc = {
+ .attr = {
+ .name = "vhost_loc",
+ .mode = S_IRUGO,
+ },
+ .show = show_host_vhost_loc,
+};
+
+static ssize_t show_host_vhost_name(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct Scsi_Host *shost = class_to_shost(dev);
+ struct ibmvscsi_host_data *hostdata = shost_priv(shost);
+ int len;
+
+ len = snprintf(buf, sizeof(hostdata->caps.name), "%s\n",
+ hostdata->caps.name);
+ return len;
+}
+
+static struct device_attribute ibmvscsi_host_vhost_name = {
+ .attr = {
+ .name = "vhost_name",
+ .mode = S_IRUGO,
+ },
+ .show = show_host_vhost_name,
+};
+
+static ssize_t show_host_srp_version(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct Scsi_Host *shost = class_to_shost(dev);
+ struct ibmvscsi_host_data *hostdata = shost_priv(shost);
+ int len;
+
+ len = snprintf(buf, PAGE_SIZE, "%s\n",
+ hostdata->madapter_info.srp_version);
+ return len;
+}
+
+static struct device_attribute ibmvscsi_host_srp_version = {
+ .attr = {
+ .name = "srp_version",
+ .mode = S_IRUGO,
+ },
+ .show = show_host_srp_version,
+};
+
+static ssize_t show_host_partition_name(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct Scsi_Host *shost = class_to_shost(dev);
+ struct ibmvscsi_host_data *hostdata = shost_priv(shost);
+ int len;
+
+ len = snprintf(buf, PAGE_SIZE, "%s\n",
+ hostdata->madapter_info.partition_name);
+ return len;
+}
+
+static struct device_attribute ibmvscsi_host_partition_name = {
+ .attr = {
+ .name = "partition_name",
+ .mode = S_IRUGO,
+ },
+ .show = show_host_partition_name,
+};
+
+static ssize_t show_host_partition_number(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct Scsi_Host *shost = class_to_shost(dev);
+ struct ibmvscsi_host_data *hostdata = shost_priv(shost);
+ int len;
+
+ len = snprintf(buf, PAGE_SIZE, "%d\n",
+ hostdata->madapter_info.partition_number);
+ return len;
+}
+
+static struct device_attribute ibmvscsi_host_partition_number = {
+ .attr = {
+ .name = "partition_number",
+ .mode = S_IRUGO,
+ },
+ .show = show_host_partition_number,
+};
+
+static ssize_t show_host_mad_version(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct Scsi_Host *shost = class_to_shost(dev);
+ struct ibmvscsi_host_data *hostdata = shost_priv(shost);
+ int len;
+
+ len = snprintf(buf, PAGE_SIZE, "%d\n",
+ hostdata->madapter_info.mad_version);
+ return len;
+}
+
+static struct device_attribute ibmvscsi_host_mad_version = {
+ .attr = {
+ .name = "mad_version",
+ .mode = S_IRUGO,
+ },
+ .show = show_host_mad_version,
+};
+
+static ssize_t show_host_os_type(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct Scsi_Host *shost = class_to_shost(dev);
+ struct ibmvscsi_host_data *hostdata = shost_priv(shost);
+ int len;
+
+ len = snprintf(buf, PAGE_SIZE, "%d\n", hostdata->madapter_info.os_type);
+ return len;
+}
+
+static struct device_attribute ibmvscsi_host_os_type = {
+ .attr = {
+ .name = "os_type",
+ .mode = S_IRUGO,
+ },
+ .show = show_host_os_type,
+};
+
+static ssize_t show_host_config(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct Scsi_Host *shost = class_to_shost(dev);
+ struct ibmvscsi_host_data *hostdata = shost_priv(shost);
+
+ /* returns null-terminated host config data */
+ if (ibmvscsi_do_host_config(hostdata, buf, PAGE_SIZE) == 0)
+ return strlen(buf);
+ else
+ return 0;
+}
+
+static struct device_attribute ibmvscsi_host_config = {
+ .attr = {
+ .name = "config",
+ .mode = S_IRUGO,
+ },
+ .show = show_host_config,
+};
+
+static struct device_attribute *ibmvscsi_attrs[] = {
+ &ibmvscsi_host_vhost_loc,
+ &ibmvscsi_host_vhost_name,
+ &ibmvscsi_host_srp_version,
+ &ibmvscsi_host_partition_name,
+ &ibmvscsi_host_partition_number,
+ &ibmvscsi_host_mad_version,
+ &ibmvscsi_host_os_type,
+ &ibmvscsi_host_config,
+ NULL
+};
+
+/* ------------------------------------------------------------
+ * SCSI driver registration
+ */
+static struct scsi_host_template driver_template = {
+ .module = THIS_MODULE,
+ .name = "IBM POWER Virtual SCSI Adapter " IBMVSCSI_VERSION,
+ .proc_name = "ibmvscsi",
+ .queuecommand = ibmvscsi_queuecommand,
+ .eh_abort_handler = ibmvscsi_eh_abort_handler,
+ .eh_device_reset_handler = ibmvscsi_eh_device_reset_handler,
+ .eh_host_reset_handler = ibmvscsi_eh_host_reset_handler,
+ .slave_configure = ibmvscsi_slave_configure,
+ .change_queue_depth = ibmvscsi_change_queue_depth,
+ .cmd_per_lun = IBMVSCSI_CMDS_PER_LUN_DEFAULT,
+ .can_queue = IBMVSCSI_MAX_REQUESTS_DEFAULT,
+ .this_id = -1,
+ .sg_tablesize = SG_ALL,
+ .use_clustering = ENABLE_CLUSTERING,
+ .shost_attrs = ibmvscsi_attrs,
+};
+
+/**
+ * ibmvscsi_get_desired_dma - Calculate IO memory desired by the driver
+ *
+ * @vdev: struct vio_dev for the device whose desired IO mem is to be returned
+ *
+ * Return value:
+ * Number of bytes of IO data the driver will need to perform well.
+ */
+static unsigned long ibmvscsi_get_desired_dma(struct vio_dev *vdev)
+{
+ /* iu_storage data allocated in initialize_event_pool */
+ unsigned long desired_io = max_events * sizeof(union viosrp_iu);
+
+ /* add io space for sg data */
+ desired_io += (IBMVSCSI_MAX_SECTORS_DEFAULT * 512 *
+ IBMVSCSI_CMDS_PER_LUN_DEFAULT);
+
+ return desired_io;
+}
+
+static void ibmvscsi_do_work(struct ibmvscsi_host_data *hostdata)
+{
+ int rc;
+ char *action = "reset";
+
+ if (hostdata->reset_crq) {
+ smp_rmb();
+ hostdata->reset_crq = 0;
+
+ rc = ibmvscsi_reset_crq_queue(&hostdata->queue, hostdata);
+ if (!rc)
+ rc = ibmvscsi_send_crq(hostdata, 0xC001000000000000LL, 0);
+ vio_enable_interrupts(to_vio_dev(hostdata->dev));
+ } else if (hostdata->reenable_crq) {
+ smp_rmb();
+ action = "enable";
+ rc = ibmvscsi_reenable_crq_queue(&hostdata->queue, hostdata);
+ hostdata->reenable_crq = 0;
+ if (!rc)
+ rc = ibmvscsi_send_crq(hostdata, 0xC001000000000000LL, 0);
+ } else
+ return;
+
+ if (rc) {
+ atomic_set(&hostdata->request_limit, -1);
+ dev_err(hostdata->dev, "error after %s\n", action);
+ }
+
+ scsi_unblock_requests(hostdata->host);
+}
+
+static int ibmvscsi_work_to_do(struct ibmvscsi_host_data *hostdata)
+{
+ if (kthread_should_stop())
+ return 1;
+ else if (hostdata->reset_crq) {
+ smp_rmb();
+ return 1;
+ } else if (hostdata->reenable_crq) {
+ smp_rmb();
+ return 1;
+ }
+
+ return 0;
+}
+
+static int ibmvscsi_work(void *data)
+{
+ struct ibmvscsi_host_data *hostdata = data;
+ int rc;
+
+ set_user_nice(current, MIN_NICE);
+
+ while (1) {
+ rc = wait_event_interruptible(hostdata->work_wait_q,
+ ibmvscsi_work_to_do(hostdata));
+
+ BUG_ON(rc);
+
+ if (kthread_should_stop())
+ break;
+
+ ibmvscsi_do_work(hostdata);
+ }
+
+ return 0;
+}
+
+/**
+ * Called by bus code for each adapter
+ */
+static int ibmvscsi_probe(struct vio_dev *vdev, const struct vio_device_id *id)
+{
+ struct ibmvscsi_host_data *hostdata;
+ struct Scsi_Host *host;
+ struct device *dev = &vdev->dev;
+ struct srp_rport_identifiers ids;
+ struct srp_rport *rport;
+ unsigned long wait_switch = 0;
+ int rc;
+
+ dev_set_drvdata(&vdev->dev, NULL);
+
+ host = scsi_host_alloc(&driver_template, sizeof(*hostdata));
+ if (!host) {
+ dev_err(&vdev->dev, "couldn't allocate host data\n");
+ goto scsi_host_alloc_failed;
+ }
+
+ host->transportt = ibmvscsi_transport_template;
+ hostdata = shost_priv(host);
+ memset(hostdata, 0x00, sizeof(*hostdata));
+ INIT_LIST_HEAD(&hostdata->sent);
+ init_waitqueue_head(&hostdata->work_wait_q);
+ hostdata->host = host;
+ hostdata->dev = dev;
+ atomic_set(&hostdata->request_limit, -1);
+ hostdata->host->max_sectors = IBMVSCSI_MAX_SECTORS_DEFAULT;
+
+ if (map_persist_bufs(hostdata)) {
+ dev_err(&vdev->dev, "couldn't map persistent buffers\n");
+ goto persist_bufs_failed;
+ }
+
+ hostdata->work_thread = kthread_run(ibmvscsi_work, hostdata, "%s_%d",
+ "ibmvscsi", host->host_no);
+
+ if (IS_ERR(hostdata->work_thread)) {
+ dev_err(&vdev->dev, "couldn't initialize kthread. rc=%ld\n",
+ PTR_ERR(hostdata->work_thread));
+ goto init_crq_failed;
+ }
+
+ rc = ibmvscsi_init_crq_queue(&hostdata->queue, hostdata, max_events);
+ if (rc != 0 && rc != H_RESOURCE) {
+ dev_err(&vdev->dev, "couldn't initialize crq. rc=%d\n", rc);
+ goto kill_kthread;
+ }
+ if (initialize_event_pool(&hostdata->pool, max_events, hostdata) != 0) {
+ dev_err(&vdev->dev, "couldn't initialize event pool\n");
+ goto init_pool_failed;
+ }
+
+ host->max_lun = 8;
+ host->max_id = max_id;
+ host->max_channel = max_channel;
+ host->max_cmd_len = 16;
+
+ if (scsi_add_host(hostdata->host, hostdata->dev))
+ goto add_host_failed;
+
+ /* we don't have a proper target_port_id so let's use the fake one */
+ memcpy(ids.port_id, hostdata->madapter_info.partition_name,
+ sizeof(ids.port_id));
+ ids.roles = SRP_RPORT_ROLE_TARGET;
+ rport = srp_rport_add(host, &ids);
+ if (IS_ERR(rport))
+ goto add_srp_port_failed;
+
+ /* Try to send an initialization message. Note that this is allowed
+ * to fail if the other end is not acive. In that case we don't
+ * want to scan
+ */
+ if (ibmvscsi_send_crq(hostdata, 0xC001000000000000LL, 0) == 0
+ || rc == H_RESOURCE) {
+ /*
+ * Wait around max init_timeout secs for the adapter to finish
+ * initializing. When we are done initializing, we will have a
+ * valid request_limit. We don't want Linux scanning before
+ * we are ready.
+ */
+ for (wait_switch = jiffies + (init_timeout * HZ);
+ time_before(jiffies, wait_switch) &&
+ atomic_read(&hostdata->request_limit) < 2;) {
+
+ msleep(10);
+ }
+
+ /* if we now have a valid request_limit, initiate a scan */
+ if (atomic_read(&hostdata->request_limit) > 0)
+ scsi_scan_host(host);
+ }
+
+ dev_set_drvdata(&vdev->dev, hostdata);
+ return 0;
+
+ add_srp_port_failed:
+ scsi_remove_host(hostdata->host);
+ add_host_failed:
+ release_event_pool(&hostdata->pool, hostdata);
+ init_pool_failed:
+ ibmvscsi_release_crq_queue(&hostdata->queue, hostdata, max_events);
+ kill_kthread:
+ kthread_stop(hostdata->work_thread);
+ init_crq_failed:
+ unmap_persist_bufs(hostdata);
+ persist_bufs_failed:
+ scsi_host_put(host);
+ scsi_host_alloc_failed:
+ return -1;
+}
+
+static int ibmvscsi_remove(struct vio_dev *vdev)
+{
+ struct ibmvscsi_host_data *hostdata = dev_get_drvdata(&vdev->dev);
+ unmap_persist_bufs(hostdata);
+ release_event_pool(&hostdata->pool, hostdata);
+ ibmvscsi_release_crq_queue(&hostdata->queue, hostdata,
+ max_events);
+
+ kthread_stop(hostdata->work_thread);
+ srp_remove_host(hostdata->host);
+ scsi_remove_host(hostdata->host);
+ scsi_host_put(hostdata->host);
+
+ return 0;
+}
+
+/**
+ * ibmvscsi_resume: Resume from suspend
+ * @dev: device struct
+ *
+ * We may have lost an interrupt across suspend/resume, so kick the
+ * interrupt handler
+ */
+static int ibmvscsi_resume(struct device *dev)
+{
+ struct ibmvscsi_host_data *hostdata = dev_get_drvdata(dev);
+ vio_disable_interrupts(to_vio_dev(hostdata->dev));
+ tasklet_schedule(&hostdata->srp_task);
+
+ return 0;
+}
+
+/**
+ * ibmvscsi_device_table: Used by vio.c to match devices in the device tree we
+ * support.
+ */
+static struct vio_device_id ibmvscsi_device_table[] = {
+ {"vscsi", "IBM,v-scsi"},
+ { "", "" }
+};
+MODULE_DEVICE_TABLE(vio, ibmvscsi_device_table);
+
+static struct dev_pm_ops ibmvscsi_pm_ops = {
+ .resume = ibmvscsi_resume
+};
+
+static struct vio_driver ibmvscsi_driver = {
+ .id_table = ibmvscsi_device_table,
+ .probe = ibmvscsi_probe,
+ .remove = ibmvscsi_remove,
+ .get_desired_dma = ibmvscsi_get_desired_dma,
+ .name = "ibmvscsi",
+ .pm = &ibmvscsi_pm_ops,
+};
+
+static struct srp_function_template ibmvscsi_transport_functions = {
+};
+
+int __init ibmvscsi_module_init(void)
+{
+ int ret;
+
+ /* Ensure we have two requests to do error recovery */
+ driver_template.can_queue = max_requests;
+ max_events = max_requests + 2;
+
+ if (!firmware_has_feature(FW_FEATURE_VIO))
+ return -ENODEV;
+
+ ibmvscsi_transport_template =
+ srp_attach_transport(&ibmvscsi_transport_functions);
+ if (!ibmvscsi_transport_template)
+ return -ENOMEM;
+
+ ret = vio_register_driver(&ibmvscsi_driver);
+ if (ret)
+ srp_release_transport(ibmvscsi_transport_template);
+ return ret;
+}
+
+void __exit ibmvscsi_module_exit(void)
+{
+ vio_unregister_driver(&ibmvscsi_driver);
+ srp_release_transport(ibmvscsi_transport_template);
+}
+
+module_init(ibmvscsi_module_init);
+module_exit(ibmvscsi_module_exit);
diff --git a/drivers/scsi/ibmvscsi/ibmvscsi.h b/drivers/scsi/ibmvscsi/ibmvscsi.h
new file mode 100644
index 000000000..7d64867c5
--- /dev/null
+++ b/drivers/scsi/ibmvscsi/ibmvscsi.h
@@ -0,0 +1,110 @@
+/* ------------------------------------------------------------
+ * ibmvscsi.h
+ * (C) Copyright IBM Corporation 1994, 2003
+ * Authors: Colin DeVilbiss (devilbis@us.ibm.com)
+ * Santiago Leon (santil@us.ibm.com)
+ * Dave Boutcher (sleddog@us.ibm.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307
+ * USA
+ *
+ * ------------------------------------------------------------
+ * Emulation of a SCSI host adapter for Virtual I/O devices
+ *
+ * This driver allows the Linux SCSI peripheral drivers to directly
+ * access devices in the hosting partition, either on an iSeries
+ * hypervisor system or a converged hypervisor system.
+ */
+#ifndef IBMVSCSI_H
+#define IBMVSCSI_H
+#include <linux/types.h>
+#include <linux/list.h>
+#include <linux/completion.h>
+#include <linux/interrupt.h>
+#include "viosrp.h"
+
+struct scsi_cmnd;
+struct Scsi_Host;
+
+/* Number of indirect bufs...the list of these has to fit in the
+ * additional data of the srp_cmd struct along with the indirect
+ * descriptor
+ */
+#define MAX_INDIRECT_BUFS 10
+
+#define IBMVSCSI_MAX_REQUESTS_DEFAULT 100
+#define IBMVSCSI_CMDS_PER_LUN_DEFAULT 16
+#define IBMVSCSI_MAX_SECTORS_DEFAULT 256 /* 32 * 8 = default max I/O 32 pages */
+#define IBMVSCSI_MAX_CMDS_PER_LUN 64
+
+/* ------------------------------------------------------------
+ * Data Structures
+ */
+/* an RPA command/response transport queue */
+struct crq_queue {
+ struct viosrp_crq *msgs;
+ int size, cur;
+ dma_addr_t msg_token;
+ spinlock_t lock;
+};
+
+/* a unit of work for the hosting partition */
+struct srp_event_struct {
+ union viosrp_iu *xfer_iu;
+ struct scsi_cmnd *cmnd;
+ struct list_head list;
+ void (*done) (struct srp_event_struct *);
+ struct viosrp_crq crq;
+ struct ibmvscsi_host_data *hostdata;
+ atomic_t free;
+ union viosrp_iu iu;
+ void (*cmnd_done) (struct scsi_cmnd *);
+ struct completion comp;
+ struct timer_list timer;
+ union viosrp_iu *sync_srp;
+ struct srp_direct_buf *ext_list;
+ dma_addr_t ext_list_token;
+};
+
+/* a pool of event structs for use */
+struct event_pool {
+ struct srp_event_struct *events;
+ u32 size;
+ int next;
+ union viosrp_iu *iu_storage;
+ dma_addr_t iu_token;
+};
+
+/* all driver data associated with a host adapter */
+struct ibmvscsi_host_data {
+ atomic_t request_limit;
+ int client_migrated;
+ int reset_crq;
+ int reenable_crq;
+ struct device *dev;
+ struct event_pool pool;
+ struct crq_queue queue;
+ struct tasklet_struct srp_task;
+ struct list_head sent;
+ struct Scsi_Host *host;
+ struct task_struct *work_thread;
+ wait_queue_head_t work_wait_q;
+ struct mad_adapter_info_data madapter_info;
+ struct capabilities caps;
+ dma_addr_t caps_addr;
+ dma_addr_t adapter_info_addr;
+};
+
+#endif /* IBMVSCSI_H */
diff --git a/drivers/scsi/ibmvscsi/viosrp.h b/drivers/scsi/ibmvscsi/viosrp.h
new file mode 100644
index 000000000..116243087
--- /dev/null
+++ b/drivers/scsi/ibmvscsi/viosrp.h
@@ -0,0 +1,217 @@
+/*****************************************************************************/
+/* srp.h -- SCSI RDMA Protocol definitions */
+/* */
+/* Written By: Colin Devilbis, IBM Corporation */
+/* */
+/* Copyright (C) 2003 IBM Corporation */
+/* */
+/* This program is free software; you can redistribute it and/or modify */
+/* it under the terms of the GNU General Public License as published by */
+/* the Free Software Foundation; either version 2 of the License, or */
+/* (at your option) any later version. */
+/* */
+/* This program is distributed in the hope that it will be useful, */
+/* but WITHOUT ANY WARRANTY; without even the implied warranty of */
+/* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the */
+/* GNU General Public License for more details. */
+/* */
+/* You should have received a copy of the GNU General Public License */
+/* along with this program; if not, write to the Free Software */
+/* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */
+/* */
+/* */
+/* This file contains structures and definitions for IBM RPA (RS/6000 */
+/* platform architecture) implementation of the SRP (SCSI RDMA Protocol) */
+/* standard. SRP is used on IBM iSeries and pSeries platforms to send SCSI */
+/* commands between logical partitions. */
+/* */
+/* SRP Information Units (IUs) are sent on a "Command/Response Queue" (CRQ) */
+/* between partitions. The definitions in this file are architected, */
+/* and cannot be changed without breaking compatibility with other versions */
+/* of Linux and other operating systems (AIX, OS/400) that talk this protocol*/
+/* between logical partitions */
+/*****************************************************************************/
+#ifndef VIOSRP_H
+#define VIOSRP_H
+#include <scsi/srp.h>
+
+#define SRP_VERSION "16.a"
+#define SRP_MAX_IU_LEN 256
+#define SRP_MAX_LOC_LEN 32
+
+union srp_iu {
+ struct srp_login_req login_req;
+ struct srp_login_rsp login_rsp;
+ struct srp_login_rej login_rej;
+ struct srp_i_logout i_logout;
+ struct srp_t_logout t_logout;
+ struct srp_tsk_mgmt tsk_mgmt;
+ struct srp_cmd cmd;
+ struct srp_rsp rsp;
+ u8 reserved[SRP_MAX_IU_LEN];
+};
+
+enum viosrp_crq_formats {
+ VIOSRP_SRP_FORMAT = 0x01,
+ VIOSRP_MAD_FORMAT = 0x02,
+ VIOSRP_OS400_FORMAT = 0x03,
+ VIOSRP_AIX_FORMAT = 0x04,
+ VIOSRP_LINUX_FORMAT = 0x06,
+ VIOSRP_INLINE_FORMAT = 0x07
+};
+
+enum viosrp_crq_status {
+ VIOSRP_OK = 0x0,
+ VIOSRP_NONRECOVERABLE_ERR = 0x1,
+ VIOSRP_VIOLATES_MAX_XFER = 0x2,
+ VIOSRP_PARTNER_PANIC = 0x3,
+ VIOSRP_DEVICE_BUSY = 0x8,
+ VIOSRP_ADAPTER_FAIL = 0x10,
+ VIOSRP_OK2 = 0x99,
+};
+
+struct viosrp_crq {
+ u8 valid; /* used by RPA */
+ u8 format; /* SCSI vs out-of-band */
+ u8 reserved;
+ u8 status; /* non-scsi failure? (e.g. DMA failure) */
+ __be16 timeout; /* in seconds */
+ __be16 IU_length; /* in bytes */
+ __be64 IU_data_ptr; /* the TCE for transferring data */
+};
+
+/* MADs are Management requests above and beyond the IUs defined in the SRP
+ * standard.
+ */
+enum viosrp_mad_types {
+ VIOSRP_EMPTY_IU_TYPE = 0x01,
+ VIOSRP_ERROR_LOG_TYPE = 0x02,
+ VIOSRP_ADAPTER_INFO_TYPE = 0x03,
+ VIOSRP_HOST_CONFIG_TYPE = 0x04,
+ VIOSRP_CAPABILITIES_TYPE = 0x05,
+ VIOSRP_ENABLE_FAST_FAIL = 0x08,
+};
+
+enum viosrp_mad_status {
+ VIOSRP_MAD_SUCCESS = 0x00,
+ VIOSRP_MAD_NOT_SUPPORTED = 0xF1,
+ VIOSRP_MAD_FAILED = 0xF7,
+};
+
+enum viosrp_capability_type {
+ MIGRATION_CAPABILITIES = 0x01,
+ RESERVATION_CAPABILITIES = 0x02,
+};
+
+enum viosrp_capability_support {
+ SERVER_DOES_NOT_SUPPORTS_CAP = 0x0,
+ SERVER_SUPPORTS_CAP = 0x01,
+ SERVER_CAP_DATA = 0x02,
+};
+
+enum viosrp_reserve_type {
+ CLIENT_RESERVE_SCSI_2 = 0x01,
+};
+
+enum viosrp_capability_flag {
+ CLIENT_MIGRATED = 0x01,
+ CLIENT_RECONNECT = 0x02,
+ CAP_LIST_SUPPORTED = 0x04,
+ CAP_LIST_DATA = 0x08,
+};
+
+/*
+ * Common MAD header
+ */
+struct mad_common {
+ __be32 type;
+ __be16 status;
+ __be16 length;
+ __be64 tag;
+};
+
+/*
+ * All SRP (and MAD) requests normally flow from the
+ * client to the server. There is no way for the server to send
+ * an asynchronous message back to the client. The Empty IU is used
+ * to hang out a meaningless request to the server so that it can respond
+ * asynchrouously with something like a SCSI AER
+ */
+struct viosrp_empty_iu {
+ struct mad_common common;
+ __be64 buffer;
+ __be32 port;
+};
+
+struct viosrp_error_log {
+ struct mad_common common;
+ __be64 buffer;
+};
+
+struct viosrp_adapter_info {
+ struct mad_common common;
+ __be64 buffer;
+};
+
+struct viosrp_host_config {
+ struct mad_common common;
+ __be64 buffer;
+};
+
+struct viosrp_fast_fail {
+ struct mad_common common;
+};
+
+struct viosrp_capabilities {
+ struct mad_common common;
+ __be64 buffer;
+};
+
+struct mad_capability_common {
+ __be32 cap_type;
+ __be16 length;
+ __be16 server_support;
+};
+
+struct mad_reserve_cap {
+ struct mad_capability_common common;
+ __be32 type;
+};
+
+struct mad_migration_cap {
+ struct mad_capability_common common;
+ __be32 ecl;
+};
+
+struct capabilities{
+ __be32 flags;
+ char name[SRP_MAX_LOC_LEN];
+ char loc[SRP_MAX_LOC_LEN];
+ struct mad_migration_cap migration;
+ struct mad_reserve_cap reserve;
+};
+
+union mad_iu {
+ struct viosrp_empty_iu empty_iu;
+ struct viosrp_error_log error_log;
+ struct viosrp_adapter_info adapter_info;
+ struct viosrp_host_config host_config;
+ struct viosrp_fast_fail fast_fail;
+ struct viosrp_capabilities capabilities;
+};
+
+union viosrp_iu {
+ union srp_iu srp;
+ union mad_iu mad;
+};
+
+struct mad_adapter_info_data {
+ char srp_version[8];
+ char partition_name[96];
+ __be32 partition_number;
+ __be32 mad_version;
+ __be32 os_type;
+ __be32 port_max_txu[8]; /* per-port maximum transfer */
+};
+
+#endif
diff --git a/drivers/scsi/imm.c b/drivers/scsi/imm.c
new file mode 100644
index 000000000..89a826656
--- /dev/null
+++ b/drivers/scsi/imm.c
@@ -0,0 +1,1268 @@
+/* imm.c -- low level driver for the IOMEGA MatchMaker
+ * parallel port SCSI host adapter.
+ *
+ * (The IMM is the embedded controller in the ZIP Plus drive.)
+ *
+ * My unofficial company acronym list is 21 pages long:
+ * FLA: Four letter acronym with built in facility for
+ * future expansion to five letters.
+ */
+
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/blkdev.h>
+#include <linux/parport.h>
+#include <linux/workqueue.h>
+#include <linux/delay.h>
+#include <linux/slab.h>
+#include <asm/io.h>
+
+#include <scsi/scsi.h>
+#include <scsi/scsi_cmnd.h>
+#include <scsi/scsi_device.h>
+#include <scsi/scsi_host.h>
+
+/* The following #define is to avoid a clash with hosts.c */
+#define IMM_PROBE_SPP 0x0001
+#define IMM_PROBE_PS2 0x0002
+#define IMM_PROBE_ECR 0x0010
+#define IMM_PROBE_EPP17 0x0100
+#define IMM_PROBE_EPP19 0x0200
+
+
+typedef struct {
+ struct pardevice *dev; /* Parport device entry */
+ int base; /* Actual port address */
+ int base_hi; /* Hi Base address for ECP-ISA chipset */
+ int mode; /* Transfer mode */
+ struct scsi_cmnd *cur_cmd; /* Current queued command */
+ struct delayed_work imm_tq; /* Polling interrupt stuff */
+ unsigned long jstart; /* Jiffies at start */
+ unsigned failed:1; /* Failure flag */
+ unsigned dp:1; /* Data phase present */
+ unsigned rd:1; /* Read data in data phase */
+ unsigned wanted:1; /* Parport sharing busy flag */
+ wait_queue_head_t *waiting;
+ struct Scsi_Host *host;
+ struct list_head list;
+} imm_struct;
+
+static void imm_reset_pulse(unsigned int base);
+static int device_check(imm_struct *dev);
+
+#include "imm.h"
+
+static inline imm_struct *imm_dev(struct Scsi_Host *host)
+{
+ return *(imm_struct **)&host->hostdata;
+}
+
+static DEFINE_SPINLOCK(arbitration_lock);
+
+static void got_it(imm_struct *dev)
+{
+ dev->base = dev->dev->port->base;
+ if (dev->cur_cmd)
+ dev->cur_cmd->SCp.phase = 1;
+ else
+ wake_up(dev->waiting);
+}
+
+static void imm_wakeup(void *ref)
+{
+ imm_struct *dev = (imm_struct *) ref;
+ unsigned long flags;
+
+ spin_lock_irqsave(&arbitration_lock, flags);
+ if (dev->wanted) {
+ parport_claim(dev->dev);
+ got_it(dev);
+ dev->wanted = 0;
+ }
+ spin_unlock_irqrestore(&arbitration_lock, flags);
+}
+
+static int imm_pb_claim(imm_struct *dev)
+{
+ unsigned long flags;
+ int res = 1;
+ spin_lock_irqsave(&arbitration_lock, flags);
+ if (parport_claim(dev->dev) == 0) {
+ got_it(dev);
+ res = 0;
+ }
+ dev->wanted = res;
+ spin_unlock_irqrestore(&arbitration_lock, flags);
+ return res;
+}
+
+static void imm_pb_dismiss(imm_struct *dev)
+{
+ unsigned long flags;
+ int wanted;
+ spin_lock_irqsave(&arbitration_lock, flags);
+ wanted = dev->wanted;
+ dev->wanted = 0;
+ spin_unlock_irqrestore(&arbitration_lock, flags);
+ if (!wanted)
+ parport_release(dev->dev);
+}
+
+static inline void imm_pb_release(imm_struct *dev)
+{
+ parport_release(dev->dev);
+}
+
+/* This is to give the imm driver a way to modify the timings (and other
+ * parameters) by writing to the /proc/scsi/imm/0 file.
+ * Very simple method really... (Too simple, no error checking :( )
+ * Reason: Kernel hackers HATE having to unload and reload modules for
+ * testing...
+ * Also gives a method to use a script to obtain optimum timings (TODO)
+ */
+static int imm_write_info(struct Scsi_Host *host, char *buffer, int length)
+{
+ imm_struct *dev = imm_dev(host);
+
+ if ((length > 5) && (strncmp(buffer, "mode=", 5) == 0)) {
+ dev->mode = simple_strtoul(buffer + 5, NULL, 0);
+ return length;
+ }
+ printk("imm /proc: invalid variable\n");
+ return -EINVAL;
+}
+
+static int imm_show_info(struct seq_file *m, struct Scsi_Host *host)
+{
+ imm_struct *dev = imm_dev(host);
+
+ seq_printf(m, "Version : %s\n", IMM_VERSION);
+ seq_printf(m, "Parport : %s\n", dev->dev->port->name);
+ seq_printf(m, "Mode : %s\n", IMM_MODE_STRING[dev->mode]);
+ return 0;
+}
+
+#if IMM_DEBUG > 0
+#define imm_fail(x,y) printk("imm: imm_fail(%i) from %s at line %d\n",\
+ y, __func__, __LINE__); imm_fail_func(x,y);
+static inline void
+imm_fail_func(imm_struct *dev, int error_code)
+#else
+static inline void
+imm_fail(imm_struct *dev, int error_code)
+#endif
+{
+ /* If we fail a device then we trash status / message bytes */
+ if (dev->cur_cmd) {
+ dev->cur_cmd->result = error_code << 16;
+ dev->failed = 1;
+ }
+}
+
+/*
+ * Wait for the high bit to be set.
+ *
+ * In principle, this could be tied to an interrupt, but the adapter
+ * doesn't appear to be designed to support interrupts. We spin on
+ * the 0x80 ready bit.
+ */
+static unsigned char imm_wait(imm_struct *dev)
+{
+ int k;
+ unsigned short ppb = dev->base;
+ unsigned char r;
+
+ w_ctr(ppb, 0x0c);
+
+ k = IMM_SPIN_TMO;
+ do {
+ r = r_str(ppb);
+ k--;
+ udelay(1);
+ }
+ while (!(r & 0x80) && (k));
+
+ /*
+ * STR register (LPT base+1) to SCSI mapping:
+ *
+ * STR imm imm
+ * ===================================
+ * 0x80 S_REQ S_REQ
+ * 0x40 !S_BSY (????)
+ * 0x20 !S_CD !S_CD
+ * 0x10 !S_IO !S_IO
+ * 0x08 (????) !S_BSY
+ *
+ * imm imm meaning
+ * ==================================
+ * 0xf0 0xb8 Bit mask
+ * 0xc0 0x88 ZIP wants more data
+ * 0xd0 0x98 ZIP wants to send more data
+ * 0xe0 0xa8 ZIP is expecting SCSI command data
+ * 0xf0 0xb8 end of transfer, ZIP is sending status
+ */
+ w_ctr(ppb, 0x04);
+ if (k)
+ return (r & 0xb8);
+
+ /* Counter expired - Time out occurred */
+ imm_fail(dev, DID_TIME_OUT);
+ printk("imm timeout in imm_wait\n");
+ return 0; /* command timed out */
+}
+
+static int imm_negotiate(imm_struct * tmp)
+{
+ /*
+ * The following is supposedly the IEEE 1284-1994 negotiate
+ * sequence. I have yet to obtain a copy of the above standard
+ * so this is a bit of a guess...
+ *
+ * A fair chunk of this is based on the Linux parport implementation
+ * of IEEE 1284.
+ *
+ * Return 0 if data available
+ * 1 if no data available
+ */
+
+ unsigned short base = tmp->base;
+ unsigned char a, mode;
+
+ switch (tmp->mode) {
+ case IMM_NIBBLE:
+ mode = 0x00;
+ break;
+ case IMM_PS2:
+ mode = 0x01;
+ break;
+ default:
+ return 0;
+ }
+
+ w_ctr(base, 0x04);
+ udelay(5);
+ w_dtr(base, mode);
+ udelay(100);
+ w_ctr(base, 0x06);
+ udelay(5);
+ a = (r_str(base) & 0x20) ? 0 : 1;
+ udelay(5);
+ w_ctr(base, 0x07);
+ udelay(5);
+ w_ctr(base, 0x06);
+
+ if (a) {
+ printk
+ ("IMM: IEEE1284 negotiate indicates no data available.\n");
+ imm_fail(tmp, DID_ERROR);
+ }
+ return a;
+}
+
+/*
+ * Clear EPP timeout bit.
+ */
+static inline void epp_reset(unsigned short ppb)
+{
+ int i;
+
+ i = r_str(ppb);
+ w_str(ppb, i);
+ w_str(ppb, i & 0xfe);
+}
+
+/*
+ * Wait for empty ECP fifo (if we are in ECP fifo mode only)
+ */
+static inline void ecp_sync(imm_struct *dev)
+{
+ int i, ppb_hi = dev->base_hi;
+
+ if (ppb_hi == 0)
+ return;
+
+ if ((r_ecr(ppb_hi) & 0xe0) == 0x60) { /* mode 011 == ECP fifo mode */
+ for (i = 0; i < 100; i++) {
+ if (r_ecr(ppb_hi) & 0x01)
+ return;
+ udelay(5);
+ }
+ printk("imm: ECP sync failed as data still present in FIFO.\n");
+ }
+}
+
+static int imm_byte_out(unsigned short base, const char *buffer, int len)
+{
+ int i;
+
+ w_ctr(base, 0x4); /* apparently a sane mode */
+ for (i = len >> 1; i; i--) {
+ w_dtr(base, *buffer++);
+ w_ctr(base, 0x5); /* Drop STROBE low */
+ w_dtr(base, *buffer++);
+ w_ctr(base, 0x0); /* STROBE high + INIT low */
+ }
+ w_ctr(base, 0x4); /* apparently a sane mode */
+ return 1; /* All went well - we hope! */
+}
+
+static int imm_nibble_in(unsigned short base, char *buffer, int len)
+{
+ unsigned char l;
+ int i;
+
+ /*
+ * The following is based on documented timing signals
+ */
+ w_ctr(base, 0x4);
+ for (i = len; i; i--) {
+ w_ctr(base, 0x6);
+ l = (r_str(base) & 0xf0) >> 4;
+ w_ctr(base, 0x5);
+ *buffer++ = (r_str(base) & 0xf0) | l;
+ w_ctr(base, 0x4);
+ }
+ return 1; /* All went well - we hope! */
+}
+
+static int imm_byte_in(unsigned short base, char *buffer, int len)
+{
+ int i;
+
+ /*
+ * The following is based on documented timing signals
+ */
+ w_ctr(base, 0x4);
+ for (i = len; i; i--) {
+ w_ctr(base, 0x26);
+ *buffer++ = r_dtr(base);
+ w_ctr(base, 0x25);
+ }
+ return 1; /* All went well - we hope! */
+}
+
+static int imm_out(imm_struct *dev, char *buffer, int len)
+{
+ unsigned short ppb = dev->base;
+ int r = imm_wait(dev);
+
+ /*
+ * Make sure that:
+ * a) the SCSI bus is BUSY (device still listening)
+ * b) the device is listening
+ */
+ if ((r & 0x18) != 0x08) {
+ imm_fail(dev, DID_ERROR);
+ printk("IMM: returned SCSI status %2x\n", r);
+ return 0;
+ }
+ switch (dev->mode) {
+ case IMM_EPP_32:
+ case IMM_EPP_16:
+ case IMM_EPP_8:
+ epp_reset(ppb);
+ w_ctr(ppb, 0x4);
+#ifdef CONFIG_SCSI_IZIP_EPP16
+ if (!(((long) buffer | len) & 0x01))
+ outsw(ppb + 4, buffer, len >> 1);
+#else
+ if (!(((long) buffer | len) & 0x03))
+ outsl(ppb + 4, buffer, len >> 2);
+#endif
+ else
+ outsb(ppb + 4, buffer, len);
+ w_ctr(ppb, 0xc);
+ r = !(r_str(ppb) & 0x01);
+ w_ctr(ppb, 0xc);
+ ecp_sync(dev);
+ break;
+
+ case IMM_NIBBLE:
+ case IMM_PS2:
+ /* 8 bit output, with a loop */
+ r = imm_byte_out(ppb, buffer, len);
+ break;
+
+ default:
+ printk("IMM: bug in imm_out()\n");
+ r = 0;
+ }
+ return r;
+}
+
+static int imm_in(imm_struct *dev, char *buffer, int len)
+{
+ unsigned short ppb = dev->base;
+ int r = imm_wait(dev);
+
+ /*
+ * Make sure that:
+ * a) the SCSI bus is BUSY (device still listening)
+ * b) the device is sending data
+ */
+ if ((r & 0x18) != 0x18) {
+ imm_fail(dev, DID_ERROR);
+ return 0;
+ }
+ switch (dev->mode) {
+ case IMM_NIBBLE:
+ /* 4 bit input, with a loop */
+ r = imm_nibble_in(ppb, buffer, len);
+ w_ctr(ppb, 0xc);
+ break;
+
+ case IMM_PS2:
+ /* 8 bit input, with a loop */
+ r = imm_byte_in(ppb, buffer, len);
+ w_ctr(ppb, 0xc);
+ break;
+
+ case IMM_EPP_32:
+ case IMM_EPP_16:
+ case IMM_EPP_8:
+ epp_reset(ppb);
+ w_ctr(ppb, 0x24);
+#ifdef CONFIG_SCSI_IZIP_EPP16
+ if (!(((long) buffer | len) & 0x01))
+ insw(ppb + 4, buffer, len >> 1);
+#else
+ if (!(((long) buffer | len) & 0x03))
+ insl(ppb + 4, buffer, len >> 2);
+#endif
+ else
+ insb(ppb + 4, buffer, len);
+ w_ctr(ppb, 0x2c);
+ r = !(r_str(ppb) & 0x01);
+ w_ctr(ppb, 0x2c);
+ ecp_sync(dev);
+ break;
+
+ default:
+ printk("IMM: bug in imm_ins()\n");
+ r = 0;
+ break;
+ }
+ return r;
+}
+
+static int imm_cpp(unsigned short ppb, unsigned char b)
+{
+ /*
+ * Comments on udelay values refer to the
+ * Command Packet Protocol (CPP) timing diagram.
+ */
+
+ unsigned char s1, s2, s3;
+ w_ctr(ppb, 0x0c);
+ udelay(2); /* 1 usec - infinite */
+ w_dtr(ppb, 0xaa);
+ udelay(10); /* 7 usec - infinite */
+ w_dtr(ppb, 0x55);
+ udelay(10); /* 7 usec - infinite */
+ w_dtr(ppb, 0x00);
+ udelay(10); /* 7 usec - infinite */
+ w_dtr(ppb, 0xff);
+ udelay(10); /* 7 usec - infinite */
+ s1 = r_str(ppb) & 0xb8;
+ w_dtr(ppb, 0x87);
+ udelay(10); /* 7 usec - infinite */
+ s2 = r_str(ppb) & 0xb8;
+ w_dtr(ppb, 0x78);
+ udelay(10); /* 7 usec - infinite */
+ s3 = r_str(ppb) & 0x38;
+ /*
+ * Values for b are:
+ * 0000 00aa Assign address aa to current device
+ * 0010 00aa Select device aa in EPP Winbond mode
+ * 0010 10aa Select device aa in EPP mode
+ * 0011 xxxx Deselect all devices
+ * 0110 00aa Test device aa
+ * 1101 00aa Select device aa in ECP mode
+ * 1110 00aa Select device aa in Compatible mode
+ */
+ w_dtr(ppb, b);
+ udelay(2); /* 1 usec - infinite */
+ w_ctr(ppb, 0x0c);
+ udelay(10); /* 7 usec - infinite */
+ w_ctr(ppb, 0x0d);
+ udelay(2); /* 1 usec - infinite */
+ w_ctr(ppb, 0x0c);
+ udelay(10); /* 7 usec - infinite */
+ w_dtr(ppb, 0xff);
+ udelay(10); /* 7 usec - infinite */
+
+ /*
+ * The following table is electrical pin values.
+ * (BSY is inverted at the CTR register)
+ *
+ * BSY ACK POut SEL Fault
+ * S1 0 X 1 1 1
+ * S2 1 X 0 1 1
+ * S3 L X 1 1 S
+ *
+ * L => Last device in chain
+ * S => Selected
+ *
+ * Observered values for S1,S2,S3 are:
+ * Disconnect => f8/58/78
+ * Connect => f8/58/70
+ */
+ if ((s1 == 0xb8) && (s2 == 0x18) && (s3 == 0x30))
+ return 1; /* Connected */
+ if ((s1 == 0xb8) && (s2 == 0x18) && (s3 == 0x38))
+ return 0; /* Disconnected */
+
+ return -1; /* No device present */
+}
+
+static inline int imm_connect(imm_struct *dev, int flag)
+{
+ unsigned short ppb = dev->base;
+
+ imm_cpp(ppb, 0xe0); /* Select device 0 in compatible mode */
+ imm_cpp(ppb, 0x30); /* Disconnect all devices */
+
+ if ((dev->mode == IMM_EPP_8) ||
+ (dev->mode == IMM_EPP_16) ||
+ (dev->mode == IMM_EPP_32))
+ return imm_cpp(ppb, 0x28); /* Select device 0 in EPP mode */
+ return imm_cpp(ppb, 0xe0); /* Select device 0 in compatible mode */
+}
+
+static void imm_disconnect(imm_struct *dev)
+{
+ imm_cpp(dev->base, 0x30); /* Disconnect all devices */
+}
+
+static int imm_select(imm_struct *dev, int target)
+{
+ int k;
+ unsigned short ppb = dev->base;
+
+ /*
+ * Firstly we want to make sure there is nothing
+ * holding onto the SCSI bus.
+ */
+ w_ctr(ppb, 0xc);
+
+ k = IMM_SELECT_TMO;
+ do {
+ k--;
+ } while ((r_str(ppb) & 0x08) && (k));
+
+ if (!k)
+ return 0;
+
+ /*
+ * Now assert the SCSI ID (HOST and TARGET) on the data bus
+ */
+ w_ctr(ppb, 0x4);
+ w_dtr(ppb, 0x80 | (1 << target));
+ udelay(1);
+
+ /*
+ * Deassert SELIN first followed by STROBE
+ */
+ w_ctr(ppb, 0xc);
+ w_ctr(ppb, 0xd);
+
+ /*
+ * ACK should drop low while SELIN is deasserted.
+ * FAULT should drop low when the SCSI device latches the bus.
+ */
+ k = IMM_SELECT_TMO;
+ do {
+ k--;
+ }
+ while (!(r_str(ppb) & 0x08) && (k));
+
+ /*
+ * Place the interface back into a sane state (status mode)
+ */
+ w_ctr(ppb, 0xc);
+ return (k) ? 1 : 0;
+}
+
+static int imm_init(imm_struct *dev)
+{
+ if (imm_connect(dev, 0) != 1)
+ return -EIO;
+ imm_reset_pulse(dev->base);
+ mdelay(1); /* Delay to allow devices to settle */
+ imm_disconnect(dev);
+ mdelay(1); /* Another delay to allow devices to settle */
+ return device_check(dev);
+}
+
+static inline int imm_send_command(struct scsi_cmnd *cmd)
+{
+ imm_struct *dev = imm_dev(cmd->device->host);
+ int k;
+
+ /* NOTE: IMM uses byte pairs */
+ for (k = 0; k < cmd->cmd_len; k += 2)
+ if (!imm_out(dev, &cmd->cmnd[k], 2))
+ return 0;
+ return 1;
+}
+
+/*
+ * The bulk flag enables some optimisations in the data transfer loops,
+ * it should be true for any command that transfers data in integral
+ * numbers of sectors.
+ *
+ * The driver appears to remain stable if we speed up the parallel port
+ * i/o in this function, but not elsewhere.
+ */
+static int imm_completion(struct scsi_cmnd *cmd)
+{
+ /* Return codes:
+ * -1 Error
+ * 0 Told to schedule
+ * 1 Finished data transfer
+ */
+ imm_struct *dev = imm_dev(cmd->device->host);
+ unsigned short ppb = dev->base;
+ unsigned long start_jiffies = jiffies;
+
+ unsigned char r, v;
+ int fast, bulk, status;
+
+ v = cmd->cmnd[0];
+ bulk = ((v == READ_6) ||
+ (v == READ_10) || (v == WRITE_6) || (v == WRITE_10));
+
+ /*
+ * We only get here if the drive is ready to comunicate,
+ * hence no need for a full imm_wait.
+ */
+ w_ctr(ppb, 0x0c);
+ r = (r_str(ppb) & 0xb8);
+
+ /*
+ * while (device is not ready to send status byte)
+ * loop;
+ */
+ while (r != (unsigned char) 0xb8) {
+ /*
+ * If we have been running for more than a full timer tick
+ * then take a rest.
+ */
+ if (time_after(jiffies, start_jiffies + 1))
+ return 0;
+
+ /*
+ * FAIL if:
+ * a) Drive status is screwy (!ready && !present)
+ * b) Drive is requesting/sending more data than expected
+ */
+ if (((r & 0x88) != 0x88) || (cmd->SCp.this_residual <= 0)) {
+ imm_fail(dev, DID_ERROR);
+ return -1; /* ERROR_RETURN */
+ }
+ /* determine if we should use burst I/O */
+ if (dev->rd == 0) {
+ fast = (bulk
+ && (cmd->SCp.this_residual >=
+ IMM_BURST_SIZE)) ? IMM_BURST_SIZE : 2;
+ status = imm_out(dev, cmd->SCp.ptr, fast);
+ } else {
+ fast = (bulk
+ && (cmd->SCp.this_residual >=
+ IMM_BURST_SIZE)) ? IMM_BURST_SIZE : 1;
+ status = imm_in(dev, cmd->SCp.ptr, fast);
+ }
+
+ cmd->SCp.ptr += fast;
+ cmd->SCp.this_residual -= fast;
+
+ if (!status) {
+ imm_fail(dev, DID_BUS_BUSY);
+ return -1; /* ERROR_RETURN */
+ }
+ if (cmd->SCp.buffer && !cmd->SCp.this_residual) {
+ /* if scatter/gather, advance to the next segment */
+ if (cmd->SCp.buffers_residual--) {
+ cmd->SCp.buffer++;
+ cmd->SCp.this_residual =
+ cmd->SCp.buffer->length;
+ cmd->SCp.ptr = sg_virt(cmd->SCp.buffer);
+
+ /*
+ * Make sure that we transfer even number of bytes
+ * otherwise it makes imm_byte_out() messy.
+ */
+ if (cmd->SCp.this_residual & 0x01)
+ cmd->SCp.this_residual++;
+ }
+ }
+ /* Now check to see if the drive is ready to comunicate */
+ w_ctr(ppb, 0x0c);
+ r = (r_str(ppb) & 0xb8);
+
+ /* If not, drop back down to the scheduler and wait a timer tick */
+ if (!(r & 0x80))
+ return 0;
+ }
+ return 1; /* FINISH_RETURN */
+}
+
+/*
+ * Since the IMM itself doesn't generate interrupts, we use
+ * the scheduler's task queue to generate a stream of call-backs and
+ * complete the request when the drive is ready.
+ */
+static void imm_interrupt(struct work_struct *work)
+{
+ imm_struct *dev = container_of(work, imm_struct, imm_tq.work);
+ struct scsi_cmnd *cmd = dev->cur_cmd;
+ struct Scsi_Host *host = cmd->device->host;
+ unsigned long flags;
+
+ if (imm_engine(dev, cmd)) {
+ schedule_delayed_work(&dev->imm_tq, 1);
+ return;
+ }
+ /* Command must of completed hence it is safe to let go... */
+#if IMM_DEBUG > 0
+ switch ((cmd->result >> 16) & 0xff) {
+ case DID_OK:
+ break;
+ case DID_NO_CONNECT:
+ printk("imm: no device at SCSI ID %i\n", cmd->device->id);
+ break;
+ case DID_BUS_BUSY:
+ printk("imm: BUS BUSY - EPP timeout detected\n");
+ break;
+ case DID_TIME_OUT:
+ printk("imm: unknown timeout\n");
+ break;
+ case DID_ABORT:
+ printk("imm: told to abort\n");
+ break;
+ case DID_PARITY:
+ printk("imm: parity error (???)\n");
+ break;
+ case DID_ERROR:
+ printk("imm: internal driver error\n");
+ break;
+ case DID_RESET:
+ printk("imm: told to reset device\n");
+ break;
+ case DID_BAD_INTR:
+ printk("imm: bad interrupt (???)\n");
+ break;
+ default:
+ printk("imm: bad return code (%02x)\n",
+ (cmd->result >> 16) & 0xff);
+ }
+#endif
+
+ if (cmd->SCp.phase > 1)
+ imm_disconnect(dev);
+
+ imm_pb_dismiss(dev);
+
+ spin_lock_irqsave(host->host_lock, flags);
+ dev->cur_cmd = NULL;
+ cmd->scsi_done(cmd);
+ spin_unlock_irqrestore(host->host_lock, flags);
+ return;
+}
+
+static int imm_engine(imm_struct *dev, struct scsi_cmnd *cmd)
+{
+ unsigned short ppb = dev->base;
+ unsigned char l = 0, h = 0;
+ int retv, x;
+
+ /* First check for any errors that may have occurred
+ * Here we check for internal errors
+ */
+ if (dev->failed)
+ return 0;
+
+ switch (cmd->SCp.phase) {
+ case 0: /* Phase 0 - Waiting for parport */
+ if (time_after(jiffies, dev->jstart + HZ)) {
+ /*
+ * We waited more than a second
+ * for parport to call us
+ */
+ imm_fail(dev, DID_BUS_BUSY);
+ return 0;
+ }
+ return 1; /* wait until imm_wakeup claims parport */
+ /* Phase 1 - Connected */
+ case 1:
+ imm_connect(dev, CONNECT_EPP_MAYBE);
+ cmd->SCp.phase++;
+
+ /* Phase 2 - We are now talking to the scsi bus */
+ case 2:
+ if (!imm_select(dev, scmd_id(cmd))) {
+ imm_fail(dev, DID_NO_CONNECT);
+ return 0;
+ }
+ cmd->SCp.phase++;
+
+ /* Phase 3 - Ready to accept a command */
+ case 3:
+ w_ctr(ppb, 0x0c);
+ if (!(r_str(ppb) & 0x80))
+ return 1;
+
+ if (!imm_send_command(cmd))
+ return 0;
+ cmd->SCp.phase++;
+
+ /* Phase 4 - Setup scatter/gather buffers */
+ case 4:
+ if (scsi_bufflen(cmd)) {
+ cmd->SCp.buffer = scsi_sglist(cmd);
+ cmd->SCp.this_residual = cmd->SCp.buffer->length;
+ cmd->SCp.ptr = sg_virt(cmd->SCp.buffer);
+ } else {
+ cmd->SCp.buffer = NULL;
+ cmd->SCp.this_residual = 0;
+ cmd->SCp.ptr = NULL;
+ }
+ cmd->SCp.buffers_residual = scsi_sg_count(cmd) - 1;
+ cmd->SCp.phase++;
+ if (cmd->SCp.this_residual & 0x01)
+ cmd->SCp.this_residual++;
+ /* Phase 5 - Pre-Data transfer stage */
+ case 5:
+ /* Spin lock for BUSY */
+ w_ctr(ppb, 0x0c);
+ if (!(r_str(ppb) & 0x80))
+ return 1;
+
+ /* Require negotiation for read requests */
+ x = (r_str(ppb) & 0xb8);
+ dev->rd = (x & 0x10) ? 1 : 0;
+ dev->dp = (x & 0x20) ? 0 : 1;
+
+ if ((dev->dp) && (dev->rd))
+ if (imm_negotiate(dev))
+ return 0;
+ cmd->SCp.phase++;
+
+ /* Phase 6 - Data transfer stage */
+ case 6:
+ /* Spin lock for BUSY */
+ w_ctr(ppb, 0x0c);
+ if (!(r_str(ppb) & 0x80))
+ return 1;
+
+ if (dev->dp) {
+ retv = imm_completion(cmd);
+ if (retv == -1)
+ return 0;
+ if (retv == 0)
+ return 1;
+ }
+ cmd->SCp.phase++;
+
+ /* Phase 7 - Post data transfer stage */
+ case 7:
+ if ((dev->dp) && (dev->rd)) {
+ if ((dev->mode == IMM_NIBBLE) || (dev->mode == IMM_PS2)) {
+ w_ctr(ppb, 0x4);
+ w_ctr(ppb, 0xc);
+ w_ctr(ppb, 0xe);
+ w_ctr(ppb, 0x4);
+ }
+ }
+ cmd->SCp.phase++;
+
+ /* Phase 8 - Read status/message */
+ case 8:
+ /* Check for data overrun */
+ if (imm_wait(dev) != (unsigned char) 0xb8) {
+ imm_fail(dev, DID_ERROR);
+ return 0;
+ }
+ if (imm_negotiate(dev))
+ return 0;
+ if (imm_in(dev, &l, 1)) { /* read status byte */
+ /* Check for optional message byte */
+ if (imm_wait(dev) == (unsigned char) 0xb8)
+ imm_in(dev, &h, 1);
+ cmd->result = (DID_OK << 16) + (l & STATUS_MASK);
+ }
+ if ((dev->mode == IMM_NIBBLE) || (dev->mode == IMM_PS2)) {
+ w_ctr(ppb, 0x4);
+ w_ctr(ppb, 0xc);
+ w_ctr(ppb, 0xe);
+ w_ctr(ppb, 0x4);
+ }
+ return 0; /* Finished */
+ break;
+
+ default:
+ printk("imm: Invalid scsi phase\n");
+ }
+ return 0;
+}
+
+static int imm_queuecommand_lck(struct scsi_cmnd *cmd,
+ void (*done)(struct scsi_cmnd *))
+{
+ imm_struct *dev = imm_dev(cmd->device->host);
+
+ if (dev->cur_cmd) {
+ printk("IMM: bug in imm_queuecommand\n");
+ return 0;
+ }
+ dev->failed = 0;
+ dev->jstart = jiffies;
+ dev->cur_cmd = cmd;
+ cmd->scsi_done = done;
+ cmd->result = DID_ERROR << 16; /* default return code */
+ cmd->SCp.phase = 0; /* bus free */
+
+ schedule_delayed_work(&dev->imm_tq, 0);
+
+ imm_pb_claim(dev);
+
+ return 0;
+}
+
+static DEF_SCSI_QCMD(imm_queuecommand)
+
+/*
+ * Apparently the disk->capacity attribute is off by 1 sector
+ * for all disk drives. We add the one here, but it should really
+ * be done in sd.c. Even if it gets fixed there, this will still
+ * work.
+ */
+static int imm_biosparam(struct scsi_device *sdev, struct block_device *dev,
+ sector_t capacity, int ip[])
+{
+ ip[0] = 0x40;
+ ip[1] = 0x20;
+ ip[2] = ((unsigned long) capacity + 1) / (ip[0] * ip[1]);
+ if (ip[2] > 1024) {
+ ip[0] = 0xff;
+ ip[1] = 0x3f;
+ ip[2] = ((unsigned long) capacity + 1) / (ip[0] * ip[1]);
+ }
+ return 0;
+}
+
+static int imm_abort(struct scsi_cmnd *cmd)
+{
+ imm_struct *dev = imm_dev(cmd->device->host);
+ /*
+ * There is no method for aborting commands since Iomega
+ * have tied the SCSI_MESSAGE line high in the interface
+ */
+
+ switch (cmd->SCp.phase) {
+ case 0: /* Do not have access to parport */
+ case 1: /* Have not connected to interface */
+ dev->cur_cmd = NULL; /* Forget the problem */
+ return SUCCESS;
+ break;
+ default: /* SCSI command sent, can not abort */
+ return FAILED;
+ break;
+ }
+}
+
+static void imm_reset_pulse(unsigned int base)
+{
+ w_ctr(base, 0x04);
+ w_dtr(base, 0x40);
+ udelay(1);
+ w_ctr(base, 0x0c);
+ w_ctr(base, 0x0d);
+ udelay(50);
+ w_ctr(base, 0x0c);
+ w_ctr(base, 0x04);
+}
+
+static int imm_reset(struct scsi_cmnd *cmd)
+{
+ imm_struct *dev = imm_dev(cmd->device->host);
+
+ if (cmd->SCp.phase)
+ imm_disconnect(dev);
+ dev->cur_cmd = NULL; /* Forget the problem */
+
+ imm_connect(dev, CONNECT_NORMAL);
+ imm_reset_pulse(dev->base);
+ mdelay(1); /* device settle delay */
+ imm_disconnect(dev);
+ mdelay(1); /* device settle delay */
+ return SUCCESS;
+}
+
+static int device_check(imm_struct *dev)
+{
+ /* This routine looks for a device and then attempts to use EPP
+ to send a command. If all goes as planned then EPP is available. */
+
+ static char cmd[6] = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 };
+ int loop, old_mode, status, k, ppb = dev->base;
+ unsigned char l;
+
+ old_mode = dev->mode;
+ for (loop = 0; loop < 8; loop++) {
+ /* Attempt to use EPP for Test Unit Ready */
+ if ((ppb & 0x0007) == 0x0000)
+ dev->mode = IMM_EPP_32;
+
+ second_pass:
+ imm_connect(dev, CONNECT_EPP_MAYBE);
+ /* Select SCSI device */
+ if (!imm_select(dev, loop)) {
+ imm_disconnect(dev);
+ continue;
+ }
+ printk("imm: Found device at ID %i, Attempting to use %s\n",
+ loop, IMM_MODE_STRING[dev->mode]);
+
+ /* Send SCSI command */
+ status = 1;
+ w_ctr(ppb, 0x0c);
+ for (l = 0; (l < 3) && (status); l++)
+ status = imm_out(dev, &cmd[l << 1], 2);
+
+ if (!status) {
+ imm_disconnect(dev);
+ imm_connect(dev, CONNECT_EPP_MAYBE);
+ imm_reset_pulse(dev->base);
+ udelay(1000);
+ imm_disconnect(dev);
+ udelay(1000);
+ if (dev->mode == IMM_EPP_32) {
+ dev->mode = old_mode;
+ goto second_pass;
+ }
+ printk("imm: Unable to establish communication\n");
+ return -EIO;
+ }
+ w_ctr(ppb, 0x0c);
+
+ k = 1000000; /* 1 Second */
+ do {
+ l = r_str(ppb);
+ k--;
+ udelay(1);
+ } while (!(l & 0x80) && (k));
+
+ l &= 0xb8;
+
+ if (l != 0xb8) {
+ imm_disconnect(dev);
+ imm_connect(dev, CONNECT_EPP_MAYBE);
+ imm_reset_pulse(dev->base);
+ udelay(1000);
+ imm_disconnect(dev);
+ udelay(1000);
+ if (dev->mode == IMM_EPP_32) {
+ dev->mode = old_mode;
+ goto second_pass;
+ }
+ printk
+ ("imm: Unable to establish communication\n");
+ return -EIO;
+ }
+ imm_disconnect(dev);
+ printk
+ ("imm: Communication established at 0x%x with ID %i using %s\n",
+ ppb, loop, IMM_MODE_STRING[dev->mode]);
+ imm_connect(dev, CONNECT_EPP_MAYBE);
+ imm_reset_pulse(dev->base);
+ udelay(1000);
+ imm_disconnect(dev);
+ udelay(1000);
+ return 0;
+ }
+ printk("imm: No devices found\n");
+ return -ENODEV;
+}
+
+/*
+ * imm cannot deal with highmem, so this causes all IO pages for this host
+ * to reside in low memory (hence mapped)
+ */
+static int imm_adjust_queue(struct scsi_device *device)
+{
+ blk_queue_bounce_limit(device->request_queue, BLK_BOUNCE_HIGH);
+ return 0;
+}
+
+static struct scsi_host_template imm_template = {
+ .module = THIS_MODULE,
+ .proc_name = "imm",
+ .show_info = imm_show_info,
+ .write_info = imm_write_info,
+ .name = "Iomega VPI2 (imm) interface",
+ .queuecommand = imm_queuecommand,
+ .eh_abort_handler = imm_abort,
+ .eh_bus_reset_handler = imm_reset,
+ .eh_host_reset_handler = imm_reset,
+ .bios_param = imm_biosparam,
+ .this_id = 7,
+ .sg_tablesize = SG_ALL,
+ .cmd_per_lun = 1,
+ .use_clustering = ENABLE_CLUSTERING,
+ .can_queue = 1,
+ .slave_alloc = imm_adjust_queue,
+};
+
+/***************************************************************************
+ * Parallel port probing routines *
+ ***************************************************************************/
+
+static LIST_HEAD(imm_hosts);
+
+static int __imm_attach(struct parport *pb)
+{
+ struct Scsi_Host *host;
+ imm_struct *dev;
+ DECLARE_WAIT_QUEUE_HEAD_ONSTACK(waiting);
+ DEFINE_WAIT(wait);
+ int ports;
+ int modes, ppb;
+ int err = -ENOMEM;
+
+ init_waitqueue_head(&waiting);
+
+ dev = kzalloc(sizeof(imm_struct), GFP_KERNEL);
+ if (!dev)
+ return -ENOMEM;
+
+
+ dev->base = -1;
+ dev->mode = IMM_AUTODETECT;
+ INIT_LIST_HEAD(&dev->list);
+
+ dev->dev = parport_register_device(pb, "imm", NULL, imm_wakeup,
+ NULL, 0, dev);
+
+ if (!dev->dev)
+ goto out;
+
+
+ /* Claim the bus so it remembers what we do to the control
+ * registers. [ CTR and ECP ]
+ */
+ err = -EBUSY;
+ dev->waiting = &waiting;
+ prepare_to_wait(&waiting, &wait, TASK_UNINTERRUPTIBLE);
+ if (imm_pb_claim(dev))
+ schedule_timeout(3 * HZ);
+ if (dev->wanted) {
+ printk(KERN_ERR "imm%d: failed to claim parport because "
+ "a pardevice is owning the port for too long "
+ "time!\n", pb->number);
+ imm_pb_dismiss(dev);
+ dev->waiting = NULL;
+ finish_wait(&waiting, &wait);
+ goto out1;
+ }
+ dev->waiting = NULL;
+ finish_wait(&waiting, &wait);
+ ppb = dev->base = dev->dev->port->base;
+ dev->base_hi = dev->dev->port->base_hi;
+ w_ctr(ppb, 0x0c);
+ modes = dev->dev->port->modes;
+
+ /* Mode detection works up the chain of speed
+ * This avoids a nasty if-then-else-if-... tree
+ */
+ dev->mode = IMM_NIBBLE;
+
+ if (modes & PARPORT_MODE_TRISTATE)
+ dev->mode = IMM_PS2;
+
+ /* Done configuration */
+
+ err = imm_init(dev);
+
+ imm_pb_release(dev);
+
+ if (err)
+ goto out1;
+
+ /* now the glue ... */
+ if (dev->mode == IMM_NIBBLE || dev->mode == IMM_PS2)
+ ports = 3;
+ else
+ ports = 8;
+
+ INIT_DELAYED_WORK(&dev->imm_tq, imm_interrupt);
+
+ err = -ENOMEM;
+ host = scsi_host_alloc(&imm_template, sizeof(imm_struct *));
+ if (!host)
+ goto out1;
+ host->io_port = pb->base;
+ host->n_io_port = ports;
+ host->dma_channel = -1;
+ host->unique_id = pb->number;
+ *(imm_struct **)&host->hostdata = dev;
+ dev->host = host;
+ list_add_tail(&dev->list, &imm_hosts);
+ err = scsi_add_host(host, NULL);
+ if (err)
+ goto out2;
+ scsi_scan_host(host);
+ return 0;
+
+out2:
+ list_del_init(&dev->list);
+ scsi_host_put(host);
+out1:
+ parport_unregister_device(dev->dev);
+out:
+ kfree(dev);
+ return err;
+}
+
+static void imm_attach(struct parport *pb)
+{
+ __imm_attach(pb);
+}
+
+static void imm_detach(struct parport *pb)
+{
+ imm_struct *dev;
+ list_for_each_entry(dev, &imm_hosts, list) {
+ if (dev->dev->port == pb) {
+ list_del_init(&dev->list);
+ scsi_remove_host(dev->host);
+ scsi_host_put(dev->host);
+ parport_unregister_device(dev->dev);
+ kfree(dev);
+ break;
+ }
+ }
+}
+
+static struct parport_driver imm_driver = {
+ .name = "imm",
+ .attach = imm_attach,
+ .detach = imm_detach,
+};
+
+static int __init imm_driver_init(void)
+{
+ printk("imm: Version %s\n", IMM_VERSION);
+ return parport_register_driver(&imm_driver);
+}
+
+static void __exit imm_driver_exit(void)
+{
+ parport_unregister_driver(&imm_driver);
+}
+
+module_init(imm_driver_init);
+module_exit(imm_driver_exit);
+
+MODULE_LICENSE("GPL");
diff --git a/drivers/scsi/imm.h b/drivers/scsi/imm.h
new file mode 100644
index 000000000..8f6f32fc6
--- /dev/null
+++ b/drivers/scsi/imm.h
@@ -0,0 +1,143 @@
+
+/* Driver for the Iomega MatchMaker parallel port SCSI HBA embedded in
+ * the Iomega ZIP Plus drive
+ *
+ * (c) 1998 David Campbell
+ *
+ * Please note that I live in Perth, Western Australia. GMT+0800
+ */
+
+#ifndef _IMM_H
+#define _IMM_H
+
+#define IMM_VERSION "2.05 (for Linux 2.4.0)"
+
+/*
+ * 10 Apr 1998 (Good Friday) - Received EN144302 by email from Iomega.
+ * Scarry thing is the level of support from one of their managers.
+ * The onus is now on us (the developers) to shut up and start coding.
+ * 11Apr98 [ 0.10 ]
+ *
+ * --- SNIP ---
+ *
+ * It manages to find the drive which is a good start. Writing data during
+ * data phase is known to be broken (due to requirements of two byte writes).
+ * Removing "Phase" debug messages.
+ *
+ * PS: Took four hours of coding after I bought a drive.
+ * ANZAC Day (Aus "War Veterans Holiday") 25Apr98 [ 0.14 ]
+ *
+ * Ten minutes later after a few fixes.... (LITERALLY!!!)
+ * Have mounted disk, copied file, dismounted disk, remount disk, diff file
+ * ----- It actually works!!! -----
+ * 25Apr98 [ 0.15 ]
+ *
+ * Twenty minutes of mucking around, rearanged the IEEE negotiate mechanism.
+ * Now have byte mode working (only EPP and ECP to go now... :=)
+ * 26Apr98 [ 0.16 ]
+ *
+ * Thirty minutes of further coding results in EPP working on my machine.
+ * 27Apr98 [ 0.17 ]
+ *
+ * Due to work commitments and inability to get a "true" ECP mode functioning
+ * I have decided to code the parport support into imm.
+ * 09Jun98 [ 0.18 ]
+ *
+ * Driver is now out of beta testing.
+ * Support for parport has been added.
+ * Now distributed with the ppa driver.
+ * 12Jun98 [ 2.00 ]
+ *
+ * Err.. It appears that imm-2.00 was broken....
+ * 18Jun98 [ 2.01 ]
+ *
+ * Patch applied to sync this against the Linux 2.1.x kernel code
+ * Included qboot_zip.sh
+ * 21Jun98 [ 2.02 ]
+ *
+ * Other clean ups include the follow changes:
+ * CONFIG_SCSI_PPA_HAVE_PEDANTIC => CONFIG_SCSI_IZIP_EPP16
+ * added CONFIG_SCSI_IZIP_SLOW_CTR option
+ * [2.03]
+ * Fix kernel panic on scsi timeout. 20Aug00 [2.04]
+ *
+ * Avoid io_request_lock problems.
+ * John Cavan <johncavan@home.com> 16Nov00 [2.05]
+ */
+/* ------ END OF USER CONFIGURABLE PARAMETERS ----- */
+
+#include <linux/stddef.h>
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/ioport.h>
+#include <linux/delay.h>
+#include <linux/proc_fs.h>
+#include <linux/stat.h>
+#include <linux/blkdev.h>
+#include <linux/sched.h>
+#include <linux/interrupt.h>
+
+#include <asm/io.h>
+#include <scsi/scsi_host.h>
+/* batteries not included :-) */
+
+/*
+ * modes in which the driver can operate
+ */
+#define IMM_AUTODETECT 0 /* Autodetect mode */
+#define IMM_NIBBLE 1 /* work in standard 4 bit mode */
+#define IMM_PS2 2 /* PS/2 byte mode */
+#define IMM_EPP_8 3 /* EPP mode, 8 bit */
+#define IMM_EPP_16 4 /* EPP mode, 16 bit */
+#define IMM_EPP_32 5 /* EPP mode, 32 bit */
+#define IMM_UNKNOWN 6 /* Just in case... */
+
+static char *IMM_MODE_STRING[] =
+{
+ [IMM_AUTODETECT] = "Autodetect",
+ [IMM_NIBBLE] = "SPP",
+ [IMM_PS2] = "PS/2",
+ [IMM_EPP_8] = "EPP 8 bit",
+ [IMM_EPP_16] = "EPP 16 bit",
+#ifdef CONFIG_SCSI_IZIP_EPP16
+ [IMM_EPP_32] = "EPP 16 bit",
+#else
+ [IMM_EPP_32] = "EPP 32 bit",
+#endif
+ [IMM_UNKNOWN] = "Unknown",
+};
+
+/* other options */
+#define IMM_BURST_SIZE 512 /* data burst size */
+#define IMM_SELECT_TMO 500 /* 500 how long to wait for target ? */
+#define IMM_SPIN_TMO 5000 /* 50000 imm_wait loop limiter */
+#define IMM_DEBUG 0 /* debugging option */
+#define IN_EPP_MODE(x) (x == IMM_EPP_8 || x == IMM_EPP_16 || x == IMM_EPP_32)
+
+/* args to imm_connect */
+#define CONNECT_EPP_MAYBE 1
+#define CONNECT_NORMAL 0
+
+#define r_dtr(x) (unsigned char)inb((x))
+#define r_str(x) (unsigned char)inb((x)+1)
+#define r_ctr(x) (unsigned char)inb((x)+2)
+#define r_epp(x) (unsigned char)inb((x)+4)
+#define r_fifo(x) (unsigned char)inb((x)) /* x must be base_hi */
+ /* On PCI is: base+0x400 != base_hi */
+#define r_ecr(x) (unsigned char)inb((x)+2) /* x must be base_hi */
+
+#define w_dtr(x,y) outb(y, (x))
+#define w_str(x,y) outb(y, (x)+1)
+#define w_epp(x,y) outb(y, (x)+4)
+#define w_fifo(x,y) outb(y, (x)) /* x must be base_hi */
+#define w_ecr(x,y) outb(y, (x)+0x2) /* x must be base_hi */
+
+#ifdef CONFIG_SCSI_IZIP_SLOW_CTR
+#define w_ctr(x,y) outb_p(y, (x)+2)
+#else
+#define w_ctr(x,y) outb(y, (x)+2)
+#endif
+
+static int imm_engine(imm_struct *, struct scsi_cmnd *);
+
+#endif /* _IMM_H */
diff --git a/drivers/scsi/in2000.c b/drivers/scsi/in2000.c
new file mode 100644
index 000000000..3882d9f51
--- /dev/null
+++ b/drivers/scsi/in2000.c
@@ -0,0 +1,2302 @@
+/*
+ * in2000.c - Linux device driver for the
+ * Always IN2000 ISA SCSI card.
+ *
+ * Copyright (c) 1996 John Shifflett, GeoLog Consulting
+ * john@geolog.com
+ * jshiffle@netcom.com
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2, or (at your option)
+ * any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * For the avoidance of doubt the "preferred form" of this code is one which
+ * is in an open non patent encumbered format. Where cryptographic key signing
+ * forms part of the process of creating an executable the information
+ * including keys needed to generate an equivalently functional executable
+ * are deemed to be part of the source code.
+ *
+ * Drew Eckhardt's excellent 'Generic NCR5380' sources provided
+ * much of the inspiration and some of the code for this driver.
+ * The Linux IN2000 driver distributed in the Linux kernels through
+ * version 1.2.13 was an extremely valuable reference on the arcane
+ * (and still mysterious) workings of the IN2000's fifo. It also
+ * is where I lifted in2000_biosparam(), the gist of the card
+ * detection scheme, and other bits of code. Many thanks to the
+ * talented and courageous people who wrote, contributed to, and
+ * maintained that driver (including Brad McLean, Shaun Savage,
+ * Bill Earnest, Larry Doolittle, Roger Sunshine, John Luckey,
+ * Matt Postiff, Peter Lu, zerucha@shell.portal.com, and Eric
+ * Youngdale). I should also mention the driver written by
+ * Hamish Macdonald for the (GASP!) Amiga A2091 card, included
+ * in the Linux-m68k distribution; it gave me a good initial
+ * understanding of the proper way to run a WD33c93 chip, and I
+ * ended up stealing lots of code from it.
+ *
+ * _This_ driver is (I feel) an improvement over the old one in
+ * several respects:
+ * - All problems relating to the data size of a SCSI request are
+ * gone (as far as I know). The old driver couldn't handle
+ * swapping to partitions because that involved 4k blocks, nor
+ * could it deal with the st.c tape driver unmodified, because
+ * that usually involved 4k - 32k blocks. The old driver never
+ * quite got away from a morbid dependence on 2k block sizes -
+ * which of course is the size of the card's fifo.
+ *
+ * - Target Disconnection/Reconnection is now supported. Any
+ * system with more than one device active on the SCSI bus
+ * will benefit from this. The driver defaults to what I'm
+ * calling 'adaptive disconnect' - meaning that each command
+ * is evaluated individually as to whether or not it should
+ * be run with the option to disconnect/reselect (if the
+ * device chooses), or as a "SCSI-bus-hog".
+ *
+ * - Synchronous data transfers are now supported. Because there
+ * are a few devices (and many improperly terminated systems)
+ * that choke when doing sync, the default is sync DISABLED
+ * for all devices. This faster protocol can (and should!)
+ * be enabled on selected devices via the command-line.
+ *
+ * - Runtime operating parameters can now be specified through
+ * either the LILO or the 'insmod' command line. For LILO do:
+ * "in2000=blah,blah,blah"
+ * and with insmod go like:
+ * "insmod /usr/src/linux/modules/in2000.o setup_strings=blah,blah"
+ * The defaults should be good for most people. See the comment
+ * for 'setup_strings' below for more details.
+ *
+ * - The old driver relied exclusively on what the Western Digital
+ * docs call "Combination Level 2 Commands", which are a great
+ * idea in that the CPU is relieved of a lot of interrupt
+ * overhead. However, by accepting a certain (user-settable)
+ * amount of additional interrupts, this driver achieves
+ * better control over the SCSI bus, and data transfers are
+ * almost as fast while being much easier to define, track,
+ * and debug.
+ *
+ * - You can force detection of a card whose BIOS has been disabled.
+ *
+ * - Multiple IN2000 cards might almost be supported. I've tried to
+ * keep it in mind, but have no way to test...
+ *
+ *
+ * TODO:
+ * tagged queuing. multiple cards.
+ *
+ *
+ * NOTE:
+ * When using this or any other SCSI driver as a module, you'll
+ * find that with the stock kernel, at most _two_ SCSI hard
+ * drives will be linked into the device list (ie, usable).
+ * If your IN2000 card has more than 2 disks on its bus, you
+ * might want to change the define of 'SD_EXTRA_DEVS' in the
+ * 'hosts.h' file from 2 to whatever is appropriate. It took
+ * me a while to track down this surprisingly obscure and
+ * undocumented little "feature".
+ *
+ *
+ * People with bug reports, wish-lists, complaints, comments,
+ * or improvements are asked to pah-leeez email me (John Shifflett)
+ * at john@geolog.com or jshiffle@netcom.com! I'm anxious to get
+ * this thing into as good a shape as possible, and I'm positive
+ * there are lots of lurking bugs and "Stupid Places".
+ *
+ * Updated for Linux 2.5 by Alan Cox <alan@lxorguk.ukuu.org.uk>
+ * - Using new_eh handler
+ * - Hopefully got all the locking right again
+ * See "FIXME" notes for items that could do with more work
+ */
+
+#include <linux/module.h>
+#include <linux/blkdev.h>
+#include <linux/interrupt.h>
+#include <linux/string.h>
+#include <linux/delay.h>
+#include <linux/proc_fs.h>
+#include <linux/ioport.h>
+#include <linux/stat.h>
+
+#include <asm/io.h>
+
+#include "scsi.h"
+#include <scsi/scsi_host.h>
+
+#define IN2000_VERSION "1.33-2.5"
+#define IN2000_DATE "2002/11/03"
+
+#include "in2000.h"
+
+
+/*
+ * 'setup_strings' is a single string used to pass operating parameters and
+ * settings from the kernel/module command-line to the driver. 'setup_args[]'
+ * is an array of strings that define the compile-time default values for
+ * these settings. If Linux boots with a LILO or insmod command-line, those
+ * settings are combined with 'setup_args[]'. Note that LILO command-lines
+ * are prefixed with "in2000=" while insmod uses a "setup_strings=" prefix.
+ * The driver recognizes the following keywords (lower case required) and
+ * arguments:
+ *
+ * - ioport:addr -Where addr is IO address of a (usually ROM-less) card.
+ * - noreset -No optional args. Prevents SCSI bus reset at boot time.
+ * - nosync:x -x is a bitmask where the 1st 7 bits correspond with
+ * the 7 possible SCSI devices (bit 0 for device #0, etc).
+ * Set a bit to PREVENT sync negotiation on that device.
+ * The driver default is sync DISABLED on all devices.
+ * - period:ns -ns is the minimum # of nanoseconds in a SCSI data transfer
+ * period. Default is 500; acceptable values are 250 - 1000.
+ * - disconnect:x -x = 0 to never allow disconnects, 2 to always allow them.
+ * x = 1 does 'adaptive' disconnects, which is the default
+ * and generally the best choice.
+ * - debug:x -If 'DEBUGGING_ON' is defined, x is a bitmask that causes
+ * various types of debug output to printed - see the DB_xxx
+ * defines in in2000.h
+ * - proc:x -If 'PROC_INTERFACE' is defined, x is a bitmask that
+ * determines how the /proc interface works and what it
+ * does - see the PR_xxx defines in in2000.h
+ *
+ * Syntax Notes:
+ * - Numeric arguments can be decimal or the '0x' form of hex notation. There
+ * _must_ be a colon between a keyword and its numeric argument, with no
+ * spaces.
+ * - Keywords are separated by commas, no spaces, in the standard kernel
+ * command-line manner.
+ * - A keyword in the 'nth' comma-separated command-line member will overwrite
+ * the 'nth' element of setup_args[]. A blank command-line member (in
+ * other words, a comma with no preceding keyword) will _not_ overwrite
+ * the corresponding setup_args[] element.
+ *
+ * A few LILO examples (for insmod, use 'setup_strings' instead of 'in2000'):
+ * - in2000=ioport:0x220,noreset
+ * - in2000=period:250,disconnect:2,nosync:0x03
+ * - in2000=debug:0x1e
+ * - in2000=proc:3
+ */
+
+/* Normally, no defaults are specified... */
+static char *setup_args[] = { "", "", "", "", "", "", "", "", "" };
+
+/* filled in by 'insmod' */
+static char *setup_strings;
+
+module_param(setup_strings, charp, 0);
+
+static inline uchar read_3393(struct IN2000_hostdata *hostdata, uchar reg_num)
+{
+ write1_io(reg_num, IO_WD_ADDR);
+ return read1_io(IO_WD_DATA);
+}
+
+
+#define READ_AUX_STAT() read1_io(IO_WD_ASR)
+
+
+static inline void write_3393(struct IN2000_hostdata *hostdata, uchar reg_num, uchar value)
+{
+ write1_io(reg_num, IO_WD_ADDR);
+ write1_io(value, IO_WD_DATA);
+}
+
+
+static inline void write_3393_cmd(struct IN2000_hostdata *hostdata, uchar cmd)
+{
+/* while (READ_AUX_STAT() & ASR_CIP)
+ printk("|");*/
+ write1_io(WD_COMMAND, IO_WD_ADDR);
+ write1_io(cmd, IO_WD_DATA);
+}
+
+
+static uchar read_1_byte(struct IN2000_hostdata *hostdata)
+{
+ uchar asr, x = 0;
+
+ write_3393(hostdata, WD_CONTROL, CTRL_IDI | CTRL_EDI | CTRL_POLLED);
+ write_3393_cmd(hostdata, WD_CMD_TRANS_INFO | 0x80);
+ do {
+ asr = READ_AUX_STAT();
+ if (asr & ASR_DBR)
+ x = read_3393(hostdata, WD_DATA);
+ } while (!(asr & ASR_INT));
+ return x;
+}
+
+
+static void write_3393_count(struct IN2000_hostdata *hostdata, unsigned long value)
+{
+ write1_io(WD_TRANSFER_COUNT_MSB, IO_WD_ADDR);
+ write1_io((value >> 16), IO_WD_DATA);
+ write1_io((value >> 8), IO_WD_DATA);
+ write1_io(value, IO_WD_DATA);
+}
+
+
+static unsigned long read_3393_count(struct IN2000_hostdata *hostdata)
+{
+ unsigned long value;
+
+ write1_io(WD_TRANSFER_COUNT_MSB, IO_WD_ADDR);
+ value = read1_io(IO_WD_DATA) << 16;
+ value |= read1_io(IO_WD_DATA) << 8;
+ value |= read1_io(IO_WD_DATA);
+ return value;
+}
+
+
+/* The 33c93 needs to be told which direction a command transfers its
+ * data; we use this function to figure it out. Returns true if there
+ * will be a DATA_OUT phase with this command, false otherwise.
+ * (Thanks to Joerg Dorchain for the research and suggestion.)
+ */
+static int is_dir_out(Scsi_Cmnd * cmd)
+{
+ switch (cmd->cmnd[0]) {
+ case WRITE_6:
+ case WRITE_10:
+ case WRITE_12:
+ case WRITE_LONG:
+ case WRITE_SAME:
+ case WRITE_BUFFER:
+ case WRITE_VERIFY:
+ case WRITE_VERIFY_12:
+ case COMPARE:
+ case COPY:
+ case COPY_VERIFY:
+ case SEARCH_EQUAL:
+ case SEARCH_HIGH:
+ case SEARCH_LOW:
+ case SEARCH_EQUAL_12:
+ case SEARCH_HIGH_12:
+ case SEARCH_LOW_12:
+ case FORMAT_UNIT:
+ case REASSIGN_BLOCKS:
+ case RESERVE:
+ case MODE_SELECT:
+ case MODE_SELECT_10:
+ case LOG_SELECT:
+ case SEND_DIAGNOSTIC:
+ case CHANGE_DEFINITION:
+ case UPDATE_BLOCK:
+ case SET_WINDOW:
+ case MEDIUM_SCAN:
+ case SEND_VOLUME_TAG:
+ case 0xea:
+ return 1;
+ default:
+ return 0;
+ }
+}
+
+
+
+static struct sx_period sx_table[] = {
+ {1, 0x20},
+ {252, 0x20},
+ {376, 0x30},
+ {500, 0x40},
+ {624, 0x50},
+ {752, 0x60},
+ {876, 0x70},
+ {1000, 0x00},
+ {0, 0}
+};
+
+static int round_period(unsigned int period)
+{
+ int x;
+
+ for (x = 1; sx_table[x].period_ns; x++) {
+ if ((period <= sx_table[x - 0].period_ns) && (period > sx_table[x - 1].period_ns)) {
+ return x;
+ }
+ }
+ return 7;
+}
+
+static uchar calc_sync_xfer(unsigned int period, unsigned int offset)
+{
+ uchar result;
+
+ period *= 4; /* convert SDTR code to ns */
+ result = sx_table[round_period(period)].reg_value;
+ result |= (offset < OPTIMUM_SX_OFF) ? offset : OPTIMUM_SX_OFF;
+ return result;
+}
+
+
+
+static void in2000_execute(struct Scsi_Host *instance);
+
+static int in2000_queuecommand_lck(Scsi_Cmnd * cmd, void (*done) (Scsi_Cmnd *))
+{
+ struct Scsi_Host *instance;
+ struct IN2000_hostdata *hostdata;
+ Scsi_Cmnd *tmp;
+
+ instance = cmd->device->host;
+ hostdata = (struct IN2000_hostdata *) instance->hostdata;
+
+ DB(DB_QUEUE_COMMAND, scmd_printk(KERN_DEBUG, cmd, "Q-%02x(", cmd->cmnd[0]))
+
+/* Set up a few fields in the Scsi_Cmnd structure for our own use:
+ * - host_scribble is the pointer to the next cmd in the input queue
+ * - scsi_done points to the routine we call when a cmd is finished
+ * - result is what you'd expect
+ */
+ cmd->host_scribble = NULL;
+ cmd->scsi_done = done;
+ cmd->result = 0;
+
+/* We use the Scsi_Pointer structure that's included with each command
+ * as a scratchpad (as it's intended to be used!). The handy thing about
+ * the SCp.xxx fields is that they're always associated with a given
+ * cmd, and are preserved across disconnect-reselect. This means we
+ * can pretty much ignore SAVE_POINTERS and RESTORE_POINTERS messages
+ * if we keep all the critical pointers and counters in SCp:
+ * - SCp.ptr is the pointer into the RAM buffer
+ * - SCp.this_residual is the size of that buffer
+ * - SCp.buffer points to the current scatter-gather buffer
+ * - SCp.buffers_residual tells us how many S.G. buffers there are
+ * - SCp.have_data_in helps keep track of >2048 byte transfers
+ * - SCp.sent_command is not used
+ * - SCp.phase records this command's SRCID_ER bit setting
+ */
+
+ if (scsi_bufflen(cmd)) {
+ cmd->SCp.buffer = scsi_sglist(cmd);
+ cmd->SCp.buffers_residual = scsi_sg_count(cmd) - 1;
+ cmd->SCp.ptr = sg_virt(cmd->SCp.buffer);
+ cmd->SCp.this_residual = cmd->SCp.buffer->length;
+ } else {
+ cmd->SCp.buffer = NULL;
+ cmd->SCp.buffers_residual = 0;
+ cmd->SCp.ptr = NULL;
+ cmd->SCp.this_residual = 0;
+ }
+ cmd->SCp.have_data_in = 0;
+
+/* We don't set SCp.phase here - that's done in in2000_execute() */
+
+/* WD docs state that at the conclusion of a "LEVEL2" command, the
+ * status byte can be retrieved from the LUN register. Apparently,
+ * this is the case only for *uninterrupted* LEVEL2 commands! If
+ * there are any unexpected phases entered, even if they are 100%
+ * legal (different devices may choose to do things differently),
+ * the LEVEL2 command sequence is exited. This often occurs prior
+ * to receiving the status byte, in which case the driver does a
+ * status phase interrupt and gets the status byte on its own.
+ * While such a command can then be "resumed" (ie restarted to
+ * finish up as a LEVEL2 command), the LUN register will NOT be
+ * a valid status byte at the command's conclusion, and we must
+ * use the byte obtained during the earlier interrupt. Here, we
+ * preset SCp.Status to an illegal value (0xff) so that when
+ * this command finally completes, we can tell where the actual
+ * status byte is stored.
+ */
+
+ cmd->SCp.Status = ILLEGAL_STATUS_BYTE;
+
+/* We need to disable interrupts before messing with the input
+ * queue and calling in2000_execute().
+ */
+
+ /*
+ * Add the cmd to the end of 'input_Q'. Note that REQUEST_SENSE
+ * commands are added to the head of the queue so that the desired
+ * sense data is not lost before REQUEST_SENSE executes.
+ */
+
+ if (!(hostdata->input_Q) || (cmd->cmnd[0] == REQUEST_SENSE)) {
+ cmd->host_scribble = (uchar *) hostdata->input_Q;
+ hostdata->input_Q = cmd;
+ } else { /* find the end of the queue */
+ for (tmp = (Scsi_Cmnd *) hostdata->input_Q; tmp->host_scribble; tmp = (Scsi_Cmnd *) tmp->host_scribble);
+ tmp->host_scribble = (uchar *) cmd;
+ }
+
+/* We know that there's at least one command in 'input_Q' now.
+ * Go see if any of them are runnable!
+ */
+
+ in2000_execute(cmd->device->host);
+
+ DB(DB_QUEUE_COMMAND, printk(")Q "))
+ return 0;
+}
+
+static DEF_SCSI_QCMD(in2000_queuecommand)
+
+
+
+/*
+ * This routine attempts to start a scsi command. If the host_card is
+ * already connected, we give up immediately. Otherwise, look through
+ * the input_Q, using the first command we find that's intended
+ * for a currently non-busy target/lun.
+ * Note that this function is always called with interrupts already
+ * disabled (either from in2000_queuecommand() or in2000_intr()).
+ */
+static void in2000_execute(struct Scsi_Host *instance)
+{
+ struct IN2000_hostdata *hostdata;
+ Scsi_Cmnd *cmd, *prev;
+ int i;
+ unsigned short *sp;
+ unsigned short f;
+ unsigned short flushbuf[16];
+
+
+ hostdata = (struct IN2000_hostdata *) instance->hostdata;
+
+ DB(DB_EXECUTE, printk("EX("))
+
+ if (hostdata->selecting || hostdata->connected) {
+
+ DB(DB_EXECUTE, printk(")EX-0 "))
+
+ return;
+ }
+
+ /*
+ * Search through the input_Q for a command destined
+ * for an idle target/lun.
+ */
+
+ cmd = (Scsi_Cmnd *) hostdata->input_Q;
+ prev = NULL;
+ while (cmd) {
+ if (!(hostdata->busy[cmd->device->id] & (1 << cmd->device->lun)))
+ break;
+ prev = cmd;
+ cmd = (Scsi_Cmnd *) cmd->host_scribble;
+ }
+
+ /* quit if queue empty or all possible targets are busy */
+
+ if (!cmd) {
+
+ DB(DB_EXECUTE, printk(")EX-1 "))
+
+ return;
+ }
+
+ /* remove command from queue */
+
+ if (prev)
+ prev->host_scribble = cmd->host_scribble;
+ else
+ hostdata->input_Q = (Scsi_Cmnd *) cmd->host_scribble;
+
+#ifdef PROC_STATISTICS
+ hostdata->cmd_cnt[cmd->device->id]++;
+#endif
+
+/*
+ * Start the selection process
+ */
+
+ if (is_dir_out(cmd))
+ write_3393(hostdata, WD_DESTINATION_ID, cmd->device->id);
+ else
+ write_3393(hostdata, WD_DESTINATION_ID, cmd->device->id | DSTID_DPD);
+
+/* Now we need to figure out whether or not this command is a good
+ * candidate for disconnect/reselect. We guess to the best of our
+ * ability, based on a set of hierarchical rules. When several
+ * devices are operating simultaneously, disconnects are usually
+ * an advantage. In a single device system, or if only 1 device
+ * is being accessed, transfers usually go faster if disconnects
+ * are not allowed:
+ *
+ * + Commands should NEVER disconnect if hostdata->disconnect =
+ * DIS_NEVER (this holds for tape drives also), and ALWAYS
+ * disconnect if hostdata->disconnect = DIS_ALWAYS.
+ * + Tape drive commands should always be allowed to disconnect.
+ * + Disconnect should be allowed if disconnected_Q isn't empty.
+ * + Commands should NOT disconnect if input_Q is empty.
+ * + Disconnect should be allowed if there are commands in input_Q
+ * for a different target/lun. In this case, the other commands
+ * should be made disconnect-able, if not already.
+ *
+ * I know, I know - this code would flunk me out of any
+ * "C Programming 101" class ever offered. But it's easy
+ * to change around and experiment with for now.
+ */
+
+ cmd->SCp.phase = 0; /* assume no disconnect */
+ if (hostdata->disconnect == DIS_NEVER)
+ goto no;
+ if (hostdata->disconnect == DIS_ALWAYS)
+ goto yes;
+ if (cmd->device->type == 1) /* tape drive? */
+ goto yes;
+ if (hostdata->disconnected_Q) /* other commands disconnected? */
+ goto yes;
+ if (!(hostdata->input_Q)) /* input_Q empty? */
+ goto no;
+ for (prev = (Scsi_Cmnd *) hostdata->input_Q; prev; prev = (Scsi_Cmnd *) prev->host_scribble) {
+ if ((prev->device->id != cmd->device->id) || (prev->device->lun != cmd->device->lun)) {
+ for (prev = (Scsi_Cmnd *) hostdata->input_Q; prev; prev = (Scsi_Cmnd *) prev->host_scribble)
+ prev->SCp.phase = 1;
+ goto yes;
+ }
+ }
+ goto no;
+
+ yes:
+ cmd->SCp.phase = 1;
+
+#ifdef PROC_STATISTICS
+ hostdata->disc_allowed_cnt[cmd->device->id]++;
+#endif
+
+ no:
+ write_3393(hostdata, WD_SOURCE_ID, ((cmd->SCp.phase) ? SRCID_ER : 0));
+
+ write_3393(hostdata, WD_TARGET_LUN, cmd->device->lun);
+ write_3393(hostdata, WD_SYNCHRONOUS_TRANSFER, hostdata->sync_xfer[cmd->device->id]);
+ hostdata->busy[cmd->device->id] |= (1 << cmd->device->lun);
+
+ if ((hostdata->level2 <= L2_NONE) || (hostdata->sync_stat[cmd->device->id] == SS_UNSET)) {
+
+ /*
+ * Do a 'Select-With-ATN' command. This will end with
+ * one of the following interrupts:
+ * CSR_RESEL_AM: failure - can try again later.
+ * CSR_TIMEOUT: failure - give up.
+ * CSR_SELECT: success - proceed.
+ */
+
+ hostdata->selecting = cmd;
+
+/* Every target has its own synchronous transfer setting, kept in
+ * the sync_xfer array, and a corresponding status byte in sync_stat[].
+ * Each target's sync_stat[] entry is initialized to SS_UNSET, and its
+ * sync_xfer[] entry is initialized to the default/safe value. SS_UNSET
+ * means that the parameters are undetermined as yet, and that we
+ * need to send an SDTR message to this device after selection is
+ * complete. We set SS_FIRST to tell the interrupt routine to do so,
+ * unless we don't want to even _try_ synchronous transfers: In this
+ * case we set SS_SET to make the defaults final.
+ */
+ if (hostdata->sync_stat[cmd->device->id] == SS_UNSET) {
+ if (hostdata->sync_off & (1 << cmd->device->id))
+ hostdata->sync_stat[cmd->device->id] = SS_SET;
+ else
+ hostdata->sync_stat[cmd->device->id] = SS_FIRST;
+ }
+ hostdata->state = S_SELECTING;
+ write_3393_count(hostdata, 0); /* this guarantees a DATA_PHASE interrupt */
+ write_3393_cmd(hostdata, WD_CMD_SEL_ATN);
+ }
+
+ else {
+
+ /*
+ * Do a 'Select-With-ATN-Xfer' command. This will end with
+ * one of the following interrupts:
+ * CSR_RESEL_AM: failure - can try again later.
+ * CSR_TIMEOUT: failure - give up.
+ * anything else: success - proceed.
+ */
+
+ hostdata->connected = cmd;
+ write_3393(hostdata, WD_COMMAND_PHASE, 0);
+
+ /* copy command_descriptor_block into WD chip
+ * (take advantage of auto-incrementing)
+ */
+
+ write1_io(WD_CDB_1, IO_WD_ADDR);
+ for (i = 0; i < cmd->cmd_len; i++)
+ write1_io(cmd->cmnd[i], IO_WD_DATA);
+
+ /* The wd33c93 only knows about Group 0, 1, and 5 commands when
+ * it's doing a 'select-and-transfer'. To be safe, we write the
+ * size of the CDB into the OWN_ID register for every case. This
+ * way there won't be problems with vendor-unique, audio, etc.
+ */
+
+ write_3393(hostdata, WD_OWN_ID, cmd->cmd_len);
+
+ /* When doing a non-disconnect command, we can save ourselves a DATA
+ * phase interrupt later by setting everything up now. With writes we
+ * need to pre-fill the fifo; if there's room for the 32 flush bytes,
+ * put them in there too - that'll avoid a fifo interrupt. Reads are
+ * somewhat simpler.
+ * KLUDGE NOTE: It seems that you can't completely fill the fifo here:
+ * This results in the IO_FIFO_COUNT register rolling over to zero,
+ * and apparently the gate array logic sees this as empty, not full,
+ * so the 3393 chip is never signalled to start reading from the
+ * fifo. Or maybe it's seen as a permanent fifo interrupt condition.
+ * Regardless, we fix this by temporarily pretending that the fifo
+ * is 16 bytes smaller. (I see now that the old driver has a comment
+ * about "don't fill completely" in an analogous place - must be the
+ * same deal.) This results in CDROM, swap partitions, and tape drives
+ * needing an extra interrupt per write command - I think we can live
+ * with that!
+ */
+
+ if (!(cmd->SCp.phase)) {
+ write_3393_count(hostdata, cmd->SCp.this_residual);
+ write_3393(hostdata, WD_CONTROL, CTRL_IDI | CTRL_EDI | CTRL_BUS);
+ write1_io(0, IO_FIFO_WRITE); /* clear fifo counter, write mode */
+
+ if (is_dir_out(cmd)) {
+ hostdata->fifo = FI_FIFO_WRITING;
+ if ((i = cmd->SCp.this_residual) > (IN2000_FIFO_SIZE - 16))
+ i = IN2000_FIFO_SIZE - 16;
+ cmd->SCp.have_data_in = i; /* this much data in fifo */
+ i >>= 1; /* Gulp. Assuming modulo 2. */
+ sp = (unsigned short *) cmd->SCp.ptr;
+ f = hostdata->io_base + IO_FIFO;
+
+#ifdef FAST_WRITE_IO
+
+ FAST_WRITE2_IO();
+#else
+ while (i--)
+ write2_io(*sp++, IO_FIFO);
+
+#endif
+
+ /* Is there room for the flush bytes? */
+
+ if (cmd->SCp.have_data_in <= ((IN2000_FIFO_SIZE - 16) - 32)) {
+ sp = flushbuf;
+ i = 16;
+
+#ifdef FAST_WRITE_IO
+
+ FAST_WRITE2_IO();
+#else
+ while (i--)
+ write2_io(0, IO_FIFO);
+
+#endif
+
+ }
+ }
+
+ else {
+ write1_io(0, IO_FIFO_READ); /* put fifo in read mode */
+ hostdata->fifo = FI_FIFO_READING;
+ cmd->SCp.have_data_in = 0; /* nothing transferred yet */
+ }
+
+ } else {
+ write_3393_count(hostdata, 0); /* this guarantees a DATA_PHASE interrupt */
+ }
+ hostdata->state = S_RUNNING_LEVEL2;
+ write_3393_cmd(hostdata, WD_CMD_SEL_ATN_XFER);
+ }
+
+ /*
+ * Since the SCSI bus can handle only 1 connection at a time,
+ * we get out of here now. If the selection fails, or when
+ * the command disconnects, we'll come back to this routine
+ * to search the input_Q again...
+ */
+
+ DB(DB_EXECUTE, printk("%s)EX-2 ", (cmd->SCp.phase) ? "d:" : ""))
+
+}
+
+
+
+static void transfer_pio(uchar * buf, int cnt, int data_in_dir, struct IN2000_hostdata *hostdata)
+{
+ uchar asr;
+
+ DB(DB_TRANSFER, printk("(%p,%d,%s)", buf, cnt, data_in_dir ? "in" : "out"))
+
+ write_3393(hostdata, WD_CONTROL, CTRL_IDI | CTRL_EDI | CTRL_POLLED);
+ write_3393_count(hostdata, cnt);
+ write_3393_cmd(hostdata, WD_CMD_TRANS_INFO);
+ if (data_in_dir) {
+ do {
+ asr = READ_AUX_STAT();
+ if (asr & ASR_DBR)
+ *buf++ = read_3393(hostdata, WD_DATA);
+ } while (!(asr & ASR_INT));
+ } else {
+ do {
+ asr = READ_AUX_STAT();
+ if (asr & ASR_DBR)
+ write_3393(hostdata, WD_DATA, *buf++);
+ } while (!(asr & ASR_INT));
+ }
+
+ /* Note: we are returning with the interrupt UN-cleared.
+ * Since (presumably) an entire I/O operation has
+ * completed, the bus phase is probably different, and
+ * the interrupt routine will discover this when it
+ * responds to the uncleared int.
+ */
+
+}
+
+
+
+static void transfer_bytes(Scsi_Cmnd * cmd, int data_in_dir)
+{
+ struct IN2000_hostdata *hostdata;
+ unsigned short *sp;
+ unsigned short f;
+ int i;
+
+ hostdata = (struct IN2000_hostdata *) cmd->device->host->hostdata;
+
+/* Normally, you'd expect 'this_residual' to be non-zero here.
+ * In a series of scatter-gather transfers, however, this
+ * routine will usually be called with 'this_residual' equal
+ * to 0 and 'buffers_residual' non-zero. This means that a
+ * previous transfer completed, clearing 'this_residual', and
+ * now we need to setup the next scatter-gather buffer as the
+ * source or destination for THIS transfer.
+ */
+ if (!cmd->SCp.this_residual && cmd->SCp.buffers_residual) {
+ ++cmd->SCp.buffer;
+ --cmd->SCp.buffers_residual;
+ cmd->SCp.this_residual = cmd->SCp.buffer->length;
+ cmd->SCp.ptr = sg_virt(cmd->SCp.buffer);
+ }
+
+/* Set up hardware registers */
+
+ write_3393(hostdata, WD_SYNCHRONOUS_TRANSFER, hostdata->sync_xfer[cmd->device->id]);
+ write_3393_count(hostdata, cmd->SCp.this_residual);
+ write_3393(hostdata, WD_CONTROL, CTRL_IDI | CTRL_EDI | CTRL_BUS);
+ write1_io(0, IO_FIFO_WRITE); /* zero counter, assume write */
+
+/* Reading is easy. Just issue the command and return - we'll
+ * get an interrupt later when we have actual data to worry about.
+ */
+
+ if (data_in_dir) {
+ write1_io(0, IO_FIFO_READ);
+ if ((hostdata->level2 >= L2_DATA) || (hostdata->level2 == L2_BASIC && cmd->SCp.phase == 0)) {
+ write_3393(hostdata, WD_COMMAND_PHASE, 0x45);
+ write_3393_cmd(hostdata, WD_CMD_SEL_ATN_XFER);
+ hostdata->state = S_RUNNING_LEVEL2;
+ } else
+ write_3393_cmd(hostdata, WD_CMD_TRANS_INFO);
+ hostdata->fifo = FI_FIFO_READING;
+ cmd->SCp.have_data_in = 0;
+ return;
+ }
+
+/* Writing is more involved - we'll start the WD chip and write as
+ * much data to the fifo as we can right now. Later interrupts will
+ * write any bytes that don't make it at this stage.
+ */
+
+ if ((hostdata->level2 >= L2_DATA) || (hostdata->level2 == L2_BASIC && cmd->SCp.phase == 0)) {
+ write_3393(hostdata, WD_COMMAND_PHASE, 0x45);
+ write_3393_cmd(hostdata, WD_CMD_SEL_ATN_XFER);
+ hostdata->state = S_RUNNING_LEVEL2;
+ } else
+ write_3393_cmd(hostdata, WD_CMD_TRANS_INFO);
+ hostdata->fifo = FI_FIFO_WRITING;
+ sp = (unsigned short *) cmd->SCp.ptr;
+
+ if ((i = cmd->SCp.this_residual) > IN2000_FIFO_SIZE)
+ i = IN2000_FIFO_SIZE;
+ cmd->SCp.have_data_in = i;
+ i >>= 1; /* Gulp. We assume this_residual is modulo 2 */
+ f = hostdata->io_base + IO_FIFO;
+
+#ifdef FAST_WRITE_IO
+
+ FAST_WRITE2_IO();
+#else
+ while (i--)
+ write2_io(*sp++, IO_FIFO);
+
+#endif
+
+}
+
+
+/* We need to use spin_lock_irqsave() & spin_unlock_irqrestore() in this
+ * function in order to work in an SMP environment. (I'd be surprised
+ * if the driver is ever used by anyone on a real multi-CPU motherboard,
+ * but it _does_ need to be able to compile and run in an SMP kernel.)
+ */
+
+static irqreturn_t in2000_intr(int irqnum, void *dev_id)
+{
+ struct Scsi_Host *instance = dev_id;
+ struct IN2000_hostdata *hostdata;
+ Scsi_Cmnd *patch, *cmd;
+ uchar asr, sr, phs, id, lun, *ucp, msg;
+ int i, j;
+ unsigned long length;
+ unsigned short *sp;
+ unsigned short f;
+ unsigned long flags;
+
+ hostdata = (struct IN2000_hostdata *) instance->hostdata;
+
+/* Get the spin_lock and disable further ints, for SMP */
+
+ spin_lock_irqsave(instance->host_lock, flags);
+
+#ifdef PROC_STATISTICS
+ hostdata->int_cnt++;
+#endif
+
+/* The IN2000 card has 2 interrupt sources OR'ed onto its IRQ line - the
+ * WD3393 chip and the 2k fifo (which is actually a dual-port RAM combined
+ * with a big logic array, so it's a little different than what you might
+ * expect). As far as I know, there's no reason that BOTH can't be active
+ * at the same time, but there's a problem: while we can read the 3393
+ * to tell if _it_ wants an interrupt, I don't know of a way to ask the
+ * fifo the same question. The best we can do is check the 3393 and if
+ * it _isn't_ the source of the interrupt, then we can be pretty sure
+ * that the fifo is the culprit.
+ * UPDATE: I have it on good authority (Bill Earnest) that bit 0 of the
+ * IO_FIFO_COUNT register mirrors the fifo interrupt state. I
+ * assume that bit clear means interrupt active. As it turns
+ * out, the driver really doesn't need to check for this after
+ * all, so my remarks above about a 'problem' can safely be
+ * ignored. The way the logic is set up, there's no advantage
+ * (that I can see) to worrying about it.
+ *
+ * It seems that the fifo interrupt signal is negated when we extract
+ * bytes during read or write bytes during write.
+ * - fifo will interrupt when data is moving from it to the 3393, and
+ * there are 31 (or less?) bytes left to go. This is sort of short-
+ * sighted: what if you don't WANT to do more? In any case, our
+ * response is to push more into the fifo - either actual data or
+ * dummy bytes if need be. Note that we apparently have to write at
+ * least 32 additional bytes to the fifo after an interrupt in order
+ * to get it to release the ones it was holding on to - writing fewer
+ * than 32 will result in another fifo int.
+ * UPDATE: Again, info from Bill Earnest makes this more understandable:
+ * 32 bytes = two counts of the fifo counter register. He tells
+ * me that the fifo interrupt is a non-latching signal derived
+ * from a straightforward boolean interpretation of the 7
+ * highest bits of the fifo counter and the fifo-read/fifo-write
+ * state. Who'd a thought?
+ */
+
+ write1_io(0, IO_LED_ON);
+ asr = READ_AUX_STAT();
+ if (!(asr & ASR_INT)) { /* no WD33c93 interrupt? */
+
+/* Ok. This is definitely a FIFO-only interrupt.
+ *
+ * If FI_FIFO_READING is set, there are up to 2048 bytes waiting to be read,
+ * maybe more to come from the SCSI bus. Read as many as we can out of the
+ * fifo and into memory at the location of SCp.ptr[SCp.have_data_in], and
+ * update have_data_in afterwards.
+ *
+ * If we have FI_FIFO_WRITING, the FIFO has almost run out of bytes to move
+ * into the WD3393 chip (I think the interrupt happens when there are 31
+ * bytes left, but it may be fewer...). The 3393 is still waiting, so we
+ * shove some more into the fifo, which gets things moving again. If the
+ * original SCSI command specified more than 2048 bytes, there may still
+ * be some of that data left: fine - use it (from SCp.ptr[SCp.have_data_in]).
+ * Don't forget to update have_data_in. If we've already written out the
+ * entire buffer, feed 32 dummy bytes to the fifo - they're needed to
+ * push out the remaining real data.
+ * (Big thanks to Bill Earnest for getting me out of the mud in here.)
+ */
+
+ cmd = (Scsi_Cmnd *) hostdata->connected; /* assume we're connected */
+ CHECK_NULL(cmd, "fifo_int")
+
+ if (hostdata->fifo == FI_FIFO_READING) {
+
+ DB(DB_FIFO, printk("{R:%02x} ", read1_io(IO_FIFO_COUNT)))
+
+ sp = (unsigned short *) (cmd->SCp.ptr + cmd->SCp.have_data_in);
+ i = read1_io(IO_FIFO_COUNT) & 0xfe;
+ i <<= 2; /* # of words waiting in the fifo */
+ f = hostdata->io_base + IO_FIFO;
+
+#ifdef FAST_READ_IO
+
+ FAST_READ2_IO();
+#else
+ while (i--)
+ *sp++ = read2_io(IO_FIFO);
+
+#endif
+
+ i = sp - (unsigned short *) (cmd->SCp.ptr + cmd->SCp.have_data_in);
+ i <<= 1;
+ cmd->SCp.have_data_in += i;
+ }
+
+ else if (hostdata->fifo == FI_FIFO_WRITING) {
+
+ DB(DB_FIFO, printk("{W:%02x} ", read1_io(IO_FIFO_COUNT)))
+
+/* If all bytes have been written to the fifo, flush out the stragglers.
+ * Note that while writing 16 dummy words seems arbitrary, we don't
+ * have another choice that I can see. What we really want is to read
+ * the 3393 transfer count register (that would tell us how many bytes
+ * needed flushing), but the TRANSFER_INFO command hasn't completed
+ * yet (not enough bytes!) and that register won't be accessible. So,
+ * we use 16 words - a number obtained through trial and error.
+ * UPDATE: Bill says this is exactly what Always does, so there.
+ * More thanks due him for help in this section.
+ */
+ if (cmd->SCp.this_residual == cmd->SCp.have_data_in) {
+ i = 16;
+ while (i--) /* write 32 dummy bytes */
+ write2_io(0, IO_FIFO);
+ }
+
+/* If there are still bytes left in the SCSI buffer, write as many as we
+ * can out to the fifo.
+ */
+
+ else {
+ sp = (unsigned short *) (cmd->SCp.ptr + cmd->SCp.have_data_in);
+ i = cmd->SCp.this_residual - cmd->SCp.have_data_in; /* bytes yet to go */
+ j = read1_io(IO_FIFO_COUNT) & 0xfe;
+ j <<= 2; /* how many words the fifo has room for */
+ if ((j << 1) > i)
+ j = (i >> 1);
+ while (j--)
+ write2_io(*sp++, IO_FIFO);
+
+ i = sp - (unsigned short *) (cmd->SCp.ptr + cmd->SCp.have_data_in);
+ i <<= 1;
+ cmd->SCp.have_data_in += i;
+ }
+ }
+
+ else {
+ printk("*** Spurious FIFO interrupt ***");
+ }
+
+ write1_io(0, IO_LED_OFF);
+
+/* release the SMP spin_lock and restore irq state */
+ spin_unlock_irqrestore(instance->host_lock, flags);
+ return IRQ_HANDLED;
+ }
+
+/* This interrupt was triggered by the WD33c93 chip. The fifo interrupt
+ * may also be asserted, but we don't bother to check it: we get more
+ * detailed info from FIFO_READING and FIFO_WRITING (see below).
+ */
+
+ cmd = (Scsi_Cmnd *) hostdata->connected; /* assume we're connected */
+ sr = read_3393(hostdata, WD_SCSI_STATUS); /* clear the interrupt */
+ phs = read_3393(hostdata, WD_COMMAND_PHASE);
+
+ if (!cmd && (sr != CSR_RESEL_AM && sr != CSR_TIMEOUT && sr != CSR_SELECT)) {
+ printk("\nNR:wd-intr-1\n");
+ write1_io(0, IO_LED_OFF);
+
+/* release the SMP spin_lock and restore irq state */
+ spin_unlock_irqrestore(instance->host_lock, flags);
+ return IRQ_HANDLED;
+ }
+
+ DB(DB_INTR, printk("{%02x:%02x-", asr, sr))
+
+/* After starting a FIFO-based transfer, the next _WD3393_ interrupt is
+ * guaranteed to be in response to the completion of the transfer.
+ * If we were reading, there's probably data in the fifo that needs
+ * to be copied into RAM - do that here. Also, we have to update
+ * 'this_residual' and 'ptr' based on the contents of the
+ * TRANSFER_COUNT register, in case the device decided to do an
+ * intermediate disconnect (a device may do this if it has to
+ * do a seek, or just to be nice and let other devices have
+ * some bus time during long transfers).
+ * After doing whatever is necessary with the fifo, we go on and
+ * service the WD3393 interrupt normally.
+ */
+ if (hostdata->fifo == FI_FIFO_READING) {
+
+/* buffer index = start-of-buffer + #-of-bytes-already-read */
+
+ sp = (unsigned short *) (cmd->SCp.ptr + cmd->SCp.have_data_in);
+
+/* bytes remaining in fifo = (total-wanted - #-not-got) - #-already-read */
+
+ i = (cmd->SCp.this_residual - read_3393_count(hostdata)) - cmd->SCp.have_data_in;
+ i >>= 1; /* Gulp. We assume this will always be modulo 2 */
+ f = hostdata->io_base + IO_FIFO;
+
+#ifdef FAST_READ_IO
+
+ FAST_READ2_IO();
+#else
+ while (i--)
+ *sp++ = read2_io(IO_FIFO);
+
+#endif
+
+ hostdata->fifo = FI_FIFO_UNUSED;
+ length = cmd->SCp.this_residual;
+ cmd->SCp.this_residual = read_3393_count(hostdata);
+ cmd->SCp.ptr += (length - cmd->SCp.this_residual);
+
+ DB(DB_TRANSFER, printk("(%p,%d)", cmd->SCp.ptr, cmd->SCp.this_residual))
+
+ }
+
+ else if (hostdata->fifo == FI_FIFO_WRITING) {
+ hostdata->fifo = FI_FIFO_UNUSED;
+ length = cmd->SCp.this_residual;
+ cmd->SCp.this_residual = read_3393_count(hostdata);
+ cmd->SCp.ptr += (length - cmd->SCp.this_residual);
+
+ DB(DB_TRANSFER, printk("(%p,%d)", cmd->SCp.ptr, cmd->SCp.this_residual))
+
+ }
+
+/* Respond to the specific WD3393 interrupt - there are quite a few! */
+
+ switch (sr) {
+
+ case CSR_TIMEOUT:
+ DB(DB_INTR, printk("TIMEOUT"))
+
+ if (hostdata->state == S_RUNNING_LEVEL2)
+ hostdata->connected = NULL;
+ else {
+ cmd = (Scsi_Cmnd *) hostdata->selecting; /* get a valid cmd */
+ CHECK_NULL(cmd, "csr_timeout")
+ hostdata->selecting = NULL;
+ }
+
+ cmd->result = DID_NO_CONNECT << 16;
+ hostdata->busy[cmd->device->id] &= ~(1 << cmd->device->lun);
+ hostdata->state = S_UNCONNECTED;
+ cmd->scsi_done(cmd);
+
+/* We are not connected to a target - check to see if there
+ * are commands waiting to be executed.
+ */
+
+ in2000_execute(instance);
+ break;
+
+
+/* Note: this interrupt should not occur in a LEVEL2 command */
+
+ case CSR_SELECT:
+ DB(DB_INTR, printk("SELECT"))
+ hostdata->connected = cmd = (Scsi_Cmnd *) hostdata->selecting;
+ CHECK_NULL(cmd, "csr_select")
+ hostdata->selecting = NULL;
+
+ /* construct an IDENTIFY message with correct disconnect bit */
+
+ hostdata->outgoing_msg[0] = (0x80 | 0x00 | cmd->device->lun);
+ if (cmd->SCp.phase)
+ hostdata->outgoing_msg[0] |= 0x40;
+
+ if (hostdata->sync_stat[cmd->device->id] == SS_FIRST) {
+#ifdef SYNC_DEBUG
+ printk(" sending SDTR ");
+#endif
+
+ hostdata->sync_stat[cmd->device->id] = SS_WAITING;
+
+ /* tack on a 2nd message to ask about synchronous transfers */
+
+ hostdata->outgoing_msg[1] = EXTENDED_MESSAGE;
+ hostdata->outgoing_msg[2] = 3;
+ hostdata->outgoing_msg[3] = EXTENDED_SDTR;
+ hostdata->outgoing_msg[4] = OPTIMUM_SX_PER / 4;
+ hostdata->outgoing_msg[5] = OPTIMUM_SX_OFF;
+ hostdata->outgoing_len = 6;
+ } else
+ hostdata->outgoing_len = 1;
+
+ hostdata->state = S_CONNECTED;
+ break;
+
+
+ case CSR_XFER_DONE | PHS_DATA_IN:
+ case CSR_UNEXP | PHS_DATA_IN:
+ case CSR_SRV_REQ | PHS_DATA_IN:
+ DB(DB_INTR, printk("IN-%d.%d", cmd->SCp.this_residual, cmd->SCp.buffers_residual))
+ transfer_bytes(cmd, DATA_IN_DIR);
+ if (hostdata->state != S_RUNNING_LEVEL2)
+ hostdata->state = S_CONNECTED;
+ break;
+
+
+ case CSR_XFER_DONE | PHS_DATA_OUT:
+ case CSR_UNEXP | PHS_DATA_OUT:
+ case CSR_SRV_REQ | PHS_DATA_OUT:
+ DB(DB_INTR, printk("OUT-%d.%d", cmd->SCp.this_residual, cmd->SCp.buffers_residual))
+ transfer_bytes(cmd, DATA_OUT_DIR);
+ if (hostdata->state != S_RUNNING_LEVEL2)
+ hostdata->state = S_CONNECTED;
+ break;
+
+
+/* Note: this interrupt should not occur in a LEVEL2 command */
+
+ case CSR_XFER_DONE | PHS_COMMAND:
+ case CSR_UNEXP | PHS_COMMAND:
+ case CSR_SRV_REQ | PHS_COMMAND:
+ DB(DB_INTR, printk("CMND-%02x", cmd->cmnd[0]))
+ transfer_pio(cmd->cmnd, cmd->cmd_len, DATA_OUT_DIR, hostdata);
+ hostdata->state = S_CONNECTED;
+ break;
+
+
+ case CSR_XFER_DONE | PHS_STATUS:
+ case CSR_UNEXP | PHS_STATUS:
+ case CSR_SRV_REQ | PHS_STATUS:
+ DB(DB_INTR, printk("STATUS="))
+
+ cmd->SCp.Status = read_1_byte(hostdata);
+ DB(DB_INTR, printk("%02x", cmd->SCp.Status))
+ if (hostdata->level2 >= L2_BASIC) {
+ sr = read_3393(hostdata, WD_SCSI_STATUS); /* clear interrupt */
+ hostdata->state = S_RUNNING_LEVEL2;
+ write_3393(hostdata, WD_COMMAND_PHASE, 0x50);
+ write_3393_cmd(hostdata, WD_CMD_SEL_ATN_XFER);
+ } else {
+ hostdata->state = S_CONNECTED;
+ }
+ break;
+
+
+ case CSR_XFER_DONE | PHS_MESS_IN:
+ case CSR_UNEXP | PHS_MESS_IN:
+ case CSR_SRV_REQ | PHS_MESS_IN:
+ DB(DB_INTR, printk("MSG_IN="))
+
+ msg = read_1_byte(hostdata);
+ sr = read_3393(hostdata, WD_SCSI_STATUS); /* clear interrupt */
+
+ hostdata->incoming_msg[hostdata->incoming_ptr] = msg;
+ if (hostdata->incoming_msg[0] == EXTENDED_MESSAGE)
+ msg = EXTENDED_MESSAGE;
+ else
+ hostdata->incoming_ptr = 0;
+
+ cmd->SCp.Message = msg;
+ switch (msg) {
+
+ case COMMAND_COMPLETE:
+ DB(DB_INTR, printk("CCMP"))
+ write_3393_cmd(hostdata, WD_CMD_NEGATE_ACK);
+ hostdata->state = S_PRE_CMP_DISC;
+ break;
+
+ case SAVE_POINTERS:
+ DB(DB_INTR, printk("SDP"))
+ write_3393_cmd(hostdata, WD_CMD_NEGATE_ACK);
+ hostdata->state = S_CONNECTED;
+ break;
+
+ case RESTORE_POINTERS:
+ DB(DB_INTR, printk("RDP"))
+ if (hostdata->level2 >= L2_BASIC) {
+ write_3393(hostdata, WD_COMMAND_PHASE, 0x45);
+ write_3393_cmd(hostdata, WD_CMD_SEL_ATN_XFER);
+ hostdata->state = S_RUNNING_LEVEL2;
+ } else {
+ write_3393_cmd(hostdata, WD_CMD_NEGATE_ACK);
+ hostdata->state = S_CONNECTED;
+ }
+ break;
+
+ case DISCONNECT:
+ DB(DB_INTR, printk("DIS"))
+ cmd->device->disconnect = 1;
+ write_3393_cmd(hostdata, WD_CMD_NEGATE_ACK);
+ hostdata->state = S_PRE_TMP_DISC;
+ break;
+
+ case MESSAGE_REJECT:
+ DB(DB_INTR, printk("REJ"))
+#ifdef SYNC_DEBUG
+ printk("-REJ-");
+#endif
+ if (hostdata->sync_stat[cmd->device->id] == SS_WAITING)
+ hostdata->sync_stat[cmd->device->id] = SS_SET;
+ write_3393_cmd(hostdata, WD_CMD_NEGATE_ACK);
+ hostdata->state = S_CONNECTED;
+ break;
+
+ case EXTENDED_MESSAGE:
+ DB(DB_INTR, printk("EXT"))
+
+ ucp = hostdata->incoming_msg;
+
+#ifdef SYNC_DEBUG
+ printk("%02x", ucp[hostdata->incoming_ptr]);
+#endif
+ /* Is this the last byte of the extended message? */
+
+ if ((hostdata->incoming_ptr >= 2) && (hostdata->incoming_ptr == (ucp[1] + 1))) {
+
+ switch (ucp[2]) { /* what's the EXTENDED code? */
+ case EXTENDED_SDTR:
+ id = calc_sync_xfer(ucp[3], ucp[4]);
+ if (hostdata->sync_stat[cmd->device->id] != SS_WAITING) {
+
+/* A device has sent an unsolicited SDTR message; rather than go
+ * through the effort of decoding it and then figuring out what
+ * our reply should be, we're just gonna say that we have a
+ * synchronous fifo depth of 0. This will result in asynchronous
+ * transfers - not ideal but so much easier.
+ * Actually, this is OK because it assures us that if we don't
+ * specifically ask for sync transfers, we won't do any.
+ */
+
+ write_3393_cmd(hostdata, WD_CMD_ASSERT_ATN); /* want MESS_OUT */
+ hostdata->outgoing_msg[0] = EXTENDED_MESSAGE;
+ hostdata->outgoing_msg[1] = 3;
+ hostdata->outgoing_msg[2] = EXTENDED_SDTR;
+ hostdata->outgoing_msg[3] = hostdata->default_sx_per / 4;
+ hostdata->outgoing_msg[4] = 0;
+ hostdata->outgoing_len = 5;
+ hostdata->sync_xfer[cmd->device->id] = calc_sync_xfer(hostdata->default_sx_per / 4, 0);
+ } else {
+ hostdata->sync_xfer[cmd->device->id] = id;
+ }
+#ifdef SYNC_DEBUG
+ printk("sync_xfer=%02x", hostdata->sync_xfer[cmd->device->id]);
+#endif
+ hostdata->sync_stat[cmd->device->id] = SS_SET;
+ write_3393_cmd(hostdata, WD_CMD_NEGATE_ACK);
+ hostdata->state = S_CONNECTED;
+ break;
+ case EXTENDED_WDTR:
+ write_3393_cmd(hostdata, WD_CMD_ASSERT_ATN); /* want MESS_OUT */
+ printk("sending WDTR ");
+ hostdata->outgoing_msg[0] = EXTENDED_MESSAGE;
+ hostdata->outgoing_msg[1] = 2;
+ hostdata->outgoing_msg[2] = EXTENDED_WDTR;
+ hostdata->outgoing_msg[3] = 0; /* 8 bit transfer width */
+ hostdata->outgoing_len = 4;
+ write_3393_cmd(hostdata, WD_CMD_NEGATE_ACK);
+ hostdata->state = S_CONNECTED;
+ break;
+ default:
+ write_3393_cmd(hostdata, WD_CMD_ASSERT_ATN); /* want MESS_OUT */
+ printk("Rejecting Unknown Extended Message(%02x). ", ucp[2]);
+ hostdata->outgoing_msg[0] = MESSAGE_REJECT;
+ hostdata->outgoing_len = 1;
+ write_3393_cmd(hostdata, WD_CMD_NEGATE_ACK);
+ hostdata->state = S_CONNECTED;
+ break;
+ }
+ hostdata->incoming_ptr = 0;
+ }
+
+ /* We need to read more MESS_IN bytes for the extended message */
+
+ else {
+ hostdata->incoming_ptr++;
+ write_3393_cmd(hostdata, WD_CMD_NEGATE_ACK);
+ hostdata->state = S_CONNECTED;
+ }
+ break;
+
+ default:
+ printk("Rejecting Unknown Message(%02x) ", msg);
+ write_3393_cmd(hostdata, WD_CMD_ASSERT_ATN); /* want MESS_OUT */
+ hostdata->outgoing_msg[0] = MESSAGE_REJECT;
+ hostdata->outgoing_len = 1;
+ write_3393_cmd(hostdata, WD_CMD_NEGATE_ACK);
+ hostdata->state = S_CONNECTED;
+ }
+ break;
+
+
+/* Note: this interrupt will occur only after a LEVEL2 command */
+
+ case CSR_SEL_XFER_DONE:
+
+/* Make sure that reselection is enabled at this point - it may
+ * have been turned off for the command that just completed.
+ */
+
+ write_3393(hostdata, WD_SOURCE_ID, SRCID_ER);
+ if (phs == 0x60) {
+ DB(DB_INTR, printk("SX-DONE"))
+ cmd->SCp.Message = COMMAND_COMPLETE;
+ lun = read_3393(hostdata, WD_TARGET_LUN);
+ DB(DB_INTR, printk(":%d.%d", cmd->SCp.Status, lun))
+ hostdata->connected = NULL;
+ hostdata->busy[cmd->device->id] &= ~(1 << cmd->device->lun);
+ hostdata->state = S_UNCONNECTED;
+ if (cmd->SCp.Status == ILLEGAL_STATUS_BYTE)
+ cmd->SCp.Status = lun;
+ if (cmd->cmnd[0] == REQUEST_SENSE && cmd->SCp.Status != GOOD)
+ cmd->result = (cmd->result & 0x00ffff) | (DID_ERROR << 16);
+ else
+ cmd->result = cmd->SCp.Status | (cmd->SCp.Message << 8);
+ cmd->scsi_done(cmd);
+
+/* We are no longer connected to a target - check to see if
+ * there are commands waiting to be executed.
+ */
+
+ in2000_execute(instance);
+ } else {
+ printk("%02x:%02x:%02x: Unknown SEL_XFER_DONE phase!!---", asr, sr, phs);
+ }
+ break;
+
+
+/* Note: this interrupt will occur only after a LEVEL2 command */
+
+ case CSR_SDP:
+ DB(DB_INTR, printk("SDP"))
+ hostdata->state = S_RUNNING_LEVEL2;
+ write_3393(hostdata, WD_COMMAND_PHASE, 0x41);
+ write_3393_cmd(hostdata, WD_CMD_SEL_ATN_XFER);
+ break;
+
+
+ case CSR_XFER_DONE | PHS_MESS_OUT:
+ case CSR_UNEXP | PHS_MESS_OUT:
+ case CSR_SRV_REQ | PHS_MESS_OUT:
+ DB(DB_INTR, printk("MSG_OUT="))
+
+/* To get here, we've probably requested MESSAGE_OUT and have
+ * already put the correct bytes in outgoing_msg[] and filled
+ * in outgoing_len. We simply send them out to the SCSI bus.
+ * Sometimes we get MESSAGE_OUT phase when we're not expecting
+ * it - like when our SDTR message is rejected by a target. Some
+ * targets send the REJECT before receiving all of the extended
+ * message, and then seem to go back to MESSAGE_OUT for a byte
+ * or two. Not sure why, or if I'm doing something wrong to
+ * cause this to happen. Regardless, it seems that sending
+ * NOP messages in these situations results in no harm and
+ * makes everyone happy.
+ */
+ if (hostdata->outgoing_len == 0) {
+ hostdata->outgoing_len = 1;
+ hostdata->outgoing_msg[0] = NOP;
+ }
+ transfer_pio(hostdata->outgoing_msg, hostdata->outgoing_len, DATA_OUT_DIR, hostdata);
+ DB(DB_INTR, printk("%02x", hostdata->outgoing_msg[0]))
+ hostdata->outgoing_len = 0;
+ hostdata->state = S_CONNECTED;
+ break;
+
+
+ case CSR_UNEXP_DISC:
+
+/* I think I've seen this after a request-sense that was in response
+ * to an error condition, but not sure. We certainly need to do
+ * something when we get this interrupt - the question is 'what?'.
+ * Let's think positively, and assume some command has finished
+ * in a legal manner (like a command that provokes a request-sense),
+ * so we treat it as a normal command-complete-disconnect.
+ */
+
+
+/* Make sure that reselection is enabled at this point - it may
+ * have been turned off for the command that just completed.
+ */
+
+ write_3393(hostdata, WD_SOURCE_ID, SRCID_ER);
+ if (cmd == NULL) {
+ printk(" - Already disconnected! ");
+ hostdata->state = S_UNCONNECTED;
+
+/* release the SMP spin_lock and restore irq state */
+ spin_unlock_irqrestore(instance->host_lock, flags);
+ return IRQ_HANDLED;
+ }
+ DB(DB_INTR, printk("UNEXP_DISC"))
+ hostdata->connected = NULL;
+ hostdata->busy[cmd->device->id] &= ~(1 << cmd->device->lun);
+ hostdata->state = S_UNCONNECTED;
+ if (cmd->cmnd[0] == REQUEST_SENSE && cmd->SCp.Status != GOOD)
+ cmd->result = (cmd->result & 0x00ffff) | (DID_ERROR << 16);
+ else
+ cmd->result = cmd->SCp.Status | (cmd->SCp.Message << 8);
+ cmd->scsi_done(cmd);
+
+/* We are no longer connected to a target - check to see if
+ * there are commands waiting to be executed.
+ */
+
+ in2000_execute(instance);
+ break;
+
+
+ case CSR_DISC:
+
+/* Make sure that reselection is enabled at this point - it may
+ * have been turned off for the command that just completed.
+ */
+
+ write_3393(hostdata, WD_SOURCE_ID, SRCID_ER);
+ DB(DB_INTR, printk("DISC"))
+ if (cmd == NULL) {
+ printk(" - Already disconnected! ");
+ hostdata->state = S_UNCONNECTED;
+ }
+ switch (hostdata->state) {
+ case S_PRE_CMP_DISC:
+ hostdata->connected = NULL;
+ hostdata->busy[cmd->device->id] &= ~(1 << cmd->device->lun);
+ hostdata->state = S_UNCONNECTED;
+ DB(DB_INTR, printk(":%d", cmd->SCp.Status))
+ if (cmd->cmnd[0] == REQUEST_SENSE && cmd->SCp.Status != GOOD)
+ cmd->result = (cmd->result & 0x00ffff) | (DID_ERROR << 16);
+ else
+ cmd->result = cmd->SCp.Status | (cmd->SCp.Message << 8);
+ cmd->scsi_done(cmd);
+ break;
+ case S_PRE_TMP_DISC:
+ case S_RUNNING_LEVEL2:
+ cmd->host_scribble = (uchar *) hostdata->disconnected_Q;
+ hostdata->disconnected_Q = cmd;
+ hostdata->connected = NULL;
+ hostdata->state = S_UNCONNECTED;
+
+#ifdef PROC_STATISTICS
+ hostdata->disc_done_cnt[cmd->device->id]++;
+#endif
+
+ break;
+ default:
+ printk("*** Unexpected DISCONNECT interrupt! ***");
+ hostdata->state = S_UNCONNECTED;
+ }
+
+/* We are no longer connected to a target - check to see if
+ * there are commands waiting to be executed.
+ */
+
+ in2000_execute(instance);
+ break;
+
+
+ case CSR_RESEL_AM:
+ DB(DB_INTR, printk("RESEL"))
+
+ /* First we have to make sure this reselection didn't */
+ /* happen during Arbitration/Selection of some other device. */
+ /* If yes, put losing command back on top of input_Q. */
+ if (hostdata->level2 <= L2_NONE) {
+
+ if (hostdata->selecting) {
+ cmd = (Scsi_Cmnd *) hostdata->selecting;
+ hostdata->selecting = NULL;
+ hostdata->busy[cmd->device->id] &= ~(1 << cmd->device->lun);
+ cmd->host_scribble = (uchar *) hostdata->input_Q;
+ hostdata->input_Q = cmd;
+ }
+ }
+
+ else {
+
+ if (cmd) {
+ if (phs == 0x00) {
+ hostdata->busy[cmd->device->id] &= ~(1 << cmd->device->lun);
+ cmd->host_scribble = (uchar *) hostdata->input_Q;
+ hostdata->input_Q = cmd;
+ } else {
+ printk("---%02x:%02x:%02x-TROUBLE: Intrusive ReSelect!---", asr, sr, phs);
+ while (1)
+ printk("\r");
+ }
+ }
+
+ }
+
+ /* OK - find out which device reselected us. */
+
+ id = read_3393(hostdata, WD_SOURCE_ID);
+ id &= SRCID_MASK;
+
+ /* and extract the lun from the ID message. (Note that we don't
+ * bother to check for a valid message here - I guess this is
+ * not the right way to go, but....)
+ */
+
+ lun = read_3393(hostdata, WD_DATA);
+ if (hostdata->level2 < L2_RESELECT)
+ write_3393_cmd(hostdata, WD_CMD_NEGATE_ACK);
+ lun &= 7;
+
+ /* Now we look for the command that's reconnecting. */
+
+ cmd = (Scsi_Cmnd *) hostdata->disconnected_Q;
+ patch = NULL;
+ while (cmd) {
+ if (id == cmd->device->id && lun == cmd->device->lun)
+ break;
+ patch = cmd;
+ cmd = (Scsi_Cmnd *) cmd->host_scribble;
+ }
+
+ /* Hmm. Couldn't find a valid command.... What to do? */
+
+ if (!cmd) {
+ printk("---TROUBLE: target %d.%d not in disconnect queue---", id, lun);
+ break;
+ }
+
+ /* Ok, found the command - now start it up again. */
+
+ if (patch)
+ patch->host_scribble = cmd->host_scribble;
+ else
+ hostdata->disconnected_Q = (Scsi_Cmnd *) cmd->host_scribble;
+ hostdata->connected = cmd;
+
+ /* We don't need to worry about 'initialize_SCp()' or 'hostdata->busy[]'
+ * because these things are preserved over a disconnect.
+ * But we DO need to fix the DPD bit so it's correct for this command.
+ */
+
+ if (is_dir_out(cmd))
+ write_3393(hostdata, WD_DESTINATION_ID, cmd->device->id);
+ else
+ write_3393(hostdata, WD_DESTINATION_ID, cmd->device->id | DSTID_DPD);
+ if (hostdata->level2 >= L2_RESELECT) {
+ write_3393_count(hostdata, 0); /* we want a DATA_PHASE interrupt */
+ write_3393(hostdata, WD_COMMAND_PHASE, 0x45);
+ write_3393_cmd(hostdata, WD_CMD_SEL_ATN_XFER);
+ hostdata->state = S_RUNNING_LEVEL2;
+ } else
+ hostdata->state = S_CONNECTED;
+
+ break;
+
+ default:
+ printk("--UNKNOWN INTERRUPT:%02x:%02x:%02x--", asr, sr, phs);
+ }
+
+ write1_io(0, IO_LED_OFF);
+
+ DB(DB_INTR, printk("} "))
+
+/* release the SMP spin_lock and restore irq state */
+ spin_unlock_irqrestore(instance->host_lock, flags);
+ return IRQ_HANDLED;
+}
+
+
+
+#define RESET_CARD 0
+#define RESET_CARD_AND_BUS 1
+#define B_FLAG 0x80
+
+/*
+ * Caller must hold instance lock!
+ */
+
+static int reset_hardware(struct Scsi_Host *instance, int type)
+{
+ struct IN2000_hostdata *hostdata;
+ int qt, x;
+
+ hostdata = (struct IN2000_hostdata *) instance->hostdata;
+
+ write1_io(0, IO_LED_ON);
+ if (type == RESET_CARD_AND_BUS) {
+ write1_io(0, IO_CARD_RESET);
+ x = read1_io(IO_HARDWARE);
+ }
+ x = read_3393(hostdata, WD_SCSI_STATUS); /* clear any WD intrpt */
+ write_3393(hostdata, WD_OWN_ID, instance->this_id | OWNID_EAF | OWNID_RAF | OWNID_FS_8);
+ write_3393(hostdata, WD_CONTROL, CTRL_IDI | CTRL_EDI | CTRL_POLLED);
+ write_3393(hostdata, WD_SYNCHRONOUS_TRANSFER, calc_sync_xfer(hostdata->default_sx_per / 4, DEFAULT_SX_OFF));
+
+ write1_io(0, IO_FIFO_WRITE); /* clear fifo counter */
+ write1_io(0, IO_FIFO_READ); /* start fifo out in read mode */
+ write_3393(hostdata, WD_COMMAND, WD_CMD_RESET);
+ /* FIXME: timeout ?? */
+ while (!(READ_AUX_STAT() & ASR_INT))
+ cpu_relax(); /* wait for RESET to complete */
+
+ x = read_3393(hostdata, WD_SCSI_STATUS); /* clear interrupt */
+
+ write_3393(hostdata, WD_QUEUE_TAG, 0xa5); /* any random number */
+ qt = read_3393(hostdata, WD_QUEUE_TAG);
+ if (qt == 0xa5) {
+ x |= B_FLAG;
+ write_3393(hostdata, WD_QUEUE_TAG, 0);
+ }
+ write_3393(hostdata, WD_TIMEOUT_PERIOD, TIMEOUT_PERIOD_VALUE);
+ write_3393(hostdata, WD_CONTROL, CTRL_IDI | CTRL_EDI | CTRL_POLLED);
+ write1_io(0, IO_LED_OFF);
+ return x;
+}
+
+
+
+static int in2000_bus_reset(Scsi_Cmnd * cmd)
+{
+ struct Scsi_Host *instance;
+ struct IN2000_hostdata *hostdata;
+ int x;
+ unsigned long flags;
+
+ instance = cmd->device->host;
+ hostdata = (struct IN2000_hostdata *) instance->hostdata;
+
+ printk(KERN_WARNING "scsi%d: Reset. ", instance->host_no);
+
+ spin_lock_irqsave(instance->host_lock, flags);
+
+ /* do scsi-reset here */
+ reset_hardware(instance, RESET_CARD_AND_BUS);
+ for (x = 0; x < 8; x++) {
+ hostdata->busy[x] = 0;
+ hostdata->sync_xfer[x] = calc_sync_xfer(DEFAULT_SX_PER / 4, DEFAULT_SX_OFF);
+ hostdata->sync_stat[x] = SS_UNSET; /* using default sync values */
+ }
+ hostdata->input_Q = NULL;
+ hostdata->selecting = NULL;
+ hostdata->connected = NULL;
+ hostdata->disconnected_Q = NULL;
+ hostdata->state = S_UNCONNECTED;
+ hostdata->fifo = FI_FIFO_UNUSED;
+ hostdata->incoming_ptr = 0;
+ hostdata->outgoing_len = 0;
+
+ cmd->result = DID_RESET << 16;
+
+ spin_unlock_irqrestore(instance->host_lock, flags);
+ return SUCCESS;
+}
+
+static int __in2000_abort(Scsi_Cmnd * cmd)
+{
+ struct Scsi_Host *instance;
+ struct IN2000_hostdata *hostdata;
+ Scsi_Cmnd *tmp, *prev;
+ uchar sr, asr;
+ unsigned long timeout;
+
+ instance = cmd->device->host;
+ hostdata = (struct IN2000_hostdata *) instance->hostdata;
+
+ printk(KERN_DEBUG "scsi%d: Abort-", instance->host_no);
+ printk("(asr=%02x,count=%ld,resid=%d,buf_resid=%d,have_data=%d,FC=%02x)- ", READ_AUX_STAT(), read_3393_count(hostdata), cmd->SCp.this_residual, cmd->SCp.buffers_residual, cmd->SCp.have_data_in, read1_io(IO_FIFO_COUNT));
+
+/*
+ * Case 1 : If the command hasn't been issued yet, we simply remove it
+ * from the inout_Q.
+ */
+
+ tmp = (Scsi_Cmnd *) hostdata->input_Q;
+ prev = NULL;
+ while (tmp) {
+ if (tmp == cmd) {
+ if (prev)
+ prev->host_scribble = cmd->host_scribble;
+ cmd->host_scribble = NULL;
+ cmd->result = DID_ABORT << 16;
+ printk(KERN_WARNING "scsi%d: Abort - removing command from input_Q. ", instance->host_no);
+ cmd->scsi_done(cmd);
+ return SUCCESS;
+ }
+ prev = tmp;
+ tmp = (Scsi_Cmnd *) tmp->host_scribble;
+ }
+
+/*
+ * Case 2 : If the command is connected, we're going to fail the abort
+ * and let the high level SCSI driver retry at a later time or
+ * issue a reset.
+ *
+ * Timeouts, and therefore aborted commands, will be highly unlikely
+ * and handling them cleanly in this situation would make the common
+ * case of noresets less efficient, and would pollute our code. So,
+ * we fail.
+ */
+
+ if (hostdata->connected == cmd) {
+
+ printk(KERN_WARNING "scsi%d: Aborting connected command - ", instance->host_no);
+
+ printk("sending wd33c93 ABORT command - ");
+ write_3393(hostdata, WD_CONTROL, CTRL_IDI | CTRL_EDI | CTRL_POLLED);
+ write_3393_cmd(hostdata, WD_CMD_ABORT);
+
+/* Now we have to attempt to flush out the FIFO... */
+
+ printk("flushing fifo - ");
+ timeout = 1000000;
+ do {
+ asr = READ_AUX_STAT();
+ if (asr & ASR_DBR)
+ read_3393(hostdata, WD_DATA);
+ } while (!(asr & ASR_INT) && timeout-- > 0);
+ sr = read_3393(hostdata, WD_SCSI_STATUS);
+ printk("asr=%02x, sr=%02x, %ld bytes un-transferred (timeout=%ld) - ", asr, sr, read_3393_count(hostdata), timeout);
+
+ /*
+ * Abort command processed.
+ * Still connected.
+ * We must disconnect.
+ */
+
+ printk("sending wd33c93 DISCONNECT command - ");
+ write_3393_cmd(hostdata, WD_CMD_DISCONNECT);
+
+ timeout = 1000000;
+ asr = READ_AUX_STAT();
+ while ((asr & ASR_CIP) && timeout-- > 0)
+ asr = READ_AUX_STAT();
+ sr = read_3393(hostdata, WD_SCSI_STATUS);
+ printk("asr=%02x, sr=%02x.", asr, sr);
+
+ hostdata->busy[cmd->device->id] &= ~(1 << cmd->device->lun);
+ hostdata->connected = NULL;
+ hostdata->state = S_UNCONNECTED;
+ cmd->result = DID_ABORT << 16;
+ cmd->scsi_done(cmd);
+
+ in2000_execute(instance);
+
+ return SUCCESS;
+ }
+
+/*
+ * Case 3: If the command is currently disconnected from the bus,
+ * we're not going to expend much effort here: Let's just return
+ * an ABORT_SNOOZE and hope for the best...
+ */
+
+ for (tmp = (Scsi_Cmnd *) hostdata->disconnected_Q; tmp; tmp = (Scsi_Cmnd *) tmp->host_scribble)
+ if (cmd == tmp) {
+ printk(KERN_DEBUG "scsi%d: unable to abort disconnected command.\n", instance->host_no);
+ return FAILED;
+ }
+
+/*
+ * Case 4 : If we reached this point, the command was not found in any of
+ * the queues.
+ *
+ * We probably reached this point because of an unlikely race condition
+ * between the command completing successfully and the abortion code,
+ * so we won't panic, but we will notify the user in case something really
+ * broke.
+ */
+
+ in2000_execute(instance);
+
+ printk("scsi%d: warning : SCSI command probably completed successfully" " before abortion. ", instance->host_no);
+ return SUCCESS;
+}
+
+static int in2000_abort(Scsi_Cmnd * cmd)
+{
+ int rc;
+
+ spin_lock_irq(cmd->device->host->host_lock);
+ rc = __in2000_abort(cmd);
+ spin_unlock_irq(cmd->device->host->host_lock);
+
+ return rc;
+}
+
+
+#define MAX_IN2000_HOSTS 3
+#define MAX_SETUP_ARGS ARRAY_SIZE(setup_args)
+#define SETUP_BUFFER_SIZE 200
+static char setup_buffer[SETUP_BUFFER_SIZE];
+static char setup_used[MAX_SETUP_ARGS];
+static int done_setup = 0;
+
+static void __init in2000_setup(char *str, int *ints)
+{
+ int i;
+ char *p1, *p2;
+
+ strlcpy(setup_buffer, str, SETUP_BUFFER_SIZE);
+ p1 = setup_buffer;
+ i = 0;
+ while (*p1 && (i < MAX_SETUP_ARGS)) {
+ p2 = strchr(p1, ',');
+ if (p2) {
+ *p2 = '\0';
+ if (p1 != p2)
+ setup_args[i] = p1;
+ p1 = p2 + 1;
+ i++;
+ } else {
+ setup_args[i] = p1;
+ break;
+ }
+ }
+ for (i = 0; i < MAX_SETUP_ARGS; i++)
+ setup_used[i] = 0;
+ done_setup = 1;
+}
+
+
+/* check_setup_args() returns index if key found, 0 if not
+ */
+
+static int __init check_setup_args(char *key, int *val, char *buf)
+{
+ int x;
+ char *cp;
+
+ for (x = 0; x < MAX_SETUP_ARGS; x++) {
+ if (setup_used[x])
+ continue;
+ if (!strncmp(setup_args[x], key, strlen(key)))
+ break;
+ }
+ if (x == MAX_SETUP_ARGS)
+ return 0;
+ setup_used[x] = 1;
+ cp = setup_args[x] + strlen(key);
+ *val = -1;
+ if (*cp != ':')
+ return ++x;
+ cp++;
+ if ((*cp >= '0') && (*cp <= '9')) {
+ *val = simple_strtoul(cp, NULL, 0);
+ }
+ return ++x;
+}
+
+
+
+/* The "correct" (ie portable) way to access memory-mapped hardware
+ * such as the IN2000 EPROM and dip switch is through the use of
+ * special macros declared in 'asm/io.h'. We use readb() and readl()
+ * when reading from the card's BIOS area in in2000_detect().
+ */
+static u32 bios_tab[] in2000__INITDATA = {
+ 0xc8000,
+ 0xd0000,
+ 0xd8000,
+ 0
+};
+
+static unsigned short base_tab[] in2000__INITDATA = {
+ 0x220,
+ 0x200,
+ 0x110,
+ 0x100,
+};
+
+static int int_tab[] in2000__INITDATA = {
+ 15,
+ 14,
+ 11,
+ 10
+};
+
+static int probe_bios(u32 addr, u32 *s1, uchar *switches)
+{
+ void __iomem *p = ioremap(addr, 0x34);
+ if (!p)
+ return 0;
+ *s1 = readl(p + 0x10);
+ if (*s1 == 0x41564f4e || readl(p + 0x30) == 0x61776c41) {
+ /* Read the switch image that's mapped into EPROM space */
+ *switches = ~readb(p + 0x20);
+ iounmap(p);
+ return 1;
+ }
+ iounmap(p);
+ return 0;
+}
+
+static int __init in2000_detect(struct scsi_host_template * tpnt)
+{
+ struct Scsi_Host *instance;
+ struct IN2000_hostdata *hostdata;
+ int detect_count;
+ int bios;
+ int x;
+ unsigned short base;
+ uchar switches;
+ uchar hrev;
+ unsigned long flags;
+ int val;
+ char buf[32];
+
+/* Thanks to help from Bill Earnest, probing for IN2000 cards is a
+ * pretty straightforward and fool-proof operation. There are 3
+ * possible locations for the IN2000 EPROM in memory space - if we
+ * find a BIOS signature, we can read the dip switch settings from
+ * the byte at BIOS+32 (shadowed in by logic on the card). From 2
+ * of the switch bits we get the card's address in IO space. There's
+ * an image of the dip switch there, also, so we have a way to back-
+ * check that this really is an IN2000 card. Very nifty. Use the
+ * 'ioport:xx' command-line parameter if your BIOS EPROM is absent
+ * or disabled.
+ */
+
+ if (!done_setup && setup_strings)
+ in2000_setup(setup_strings, NULL);
+
+ detect_count = 0;
+ for (bios = 0; bios_tab[bios]; bios++) {
+ u32 s1 = 0;
+ if (check_setup_args("ioport", &val, buf)) {
+ base = val;
+ switches = ~inb(base + IO_SWITCHES) & 0xff;
+ printk("Forcing IN2000 detection at IOport 0x%x ", base);
+ bios = 2;
+ }
+/*
+ * There have been a couple of BIOS versions with different layouts
+ * for the obvious ID strings. We look for the 2 most common ones and
+ * hope that they cover all the cases...
+ */
+ else if (probe_bios(bios_tab[bios], &s1, &switches)) {
+ printk("Found IN2000 BIOS at 0x%x ", (unsigned int) bios_tab[bios]);
+
+/* Find out where the IO space is */
+
+ x = switches & (SW_ADDR0 | SW_ADDR1);
+ base = base_tab[x];
+
+/* Check for the IN2000 signature in IO space. */
+
+ x = ~inb(base + IO_SWITCHES) & 0xff;
+ if (x != switches) {
+ printk("Bad IO signature: %02x vs %02x.\n", x, switches);
+ continue;
+ }
+ } else
+ continue;
+
+/* OK. We have a base address for the IO ports - run a few safety checks */
+
+ if (!(switches & SW_BIT7)) { /* I _think_ all cards do this */
+ printk("There is no IN-2000 SCSI card at IOport 0x%03x!\n", base);
+ continue;
+ }
+
+/* Let's assume any hardware version will work, although the driver
+ * has only been tested on 0x21, 0x22, 0x25, 0x26, and 0x27. We'll
+ * print out the rev number for reference later, but accept them all.
+ */
+
+ hrev = inb(base + IO_HARDWARE);
+
+ /* Bit 2 tells us if interrupts are disabled */
+ if (switches & SW_DISINT) {
+ printk("The IN-2000 SCSI card at IOport 0x%03x ", base);
+ printk("is not configured for interrupt operation!\n");
+ printk("This driver requires an interrupt: cancelling detection.\n");
+ continue;
+ }
+
+/* Ok. We accept that there's an IN2000 at ioaddr 'base'. Now
+ * initialize it.
+ */
+
+ tpnt->proc_name = "in2000";
+ instance = scsi_register(tpnt, sizeof(struct IN2000_hostdata));
+ if (instance == NULL)
+ continue;
+ detect_count++;
+ hostdata = (struct IN2000_hostdata *) instance->hostdata;
+ instance->io_port = hostdata->io_base = base;
+ hostdata->dip_switch = switches;
+ hostdata->hrev = hrev;
+
+ write1_io(0, IO_FIFO_WRITE); /* clear fifo counter */
+ write1_io(0, IO_FIFO_READ); /* start fifo out in read mode */
+ write1_io(0, IO_INTR_MASK); /* allow all ints */
+ x = int_tab[(switches & (SW_INT0 | SW_INT1)) >> SW_INT_SHIFT];
+ if (request_irq(x, in2000_intr, 0, "in2000", instance)) {
+ printk("in2000_detect: Unable to allocate IRQ.\n");
+ detect_count--;
+ continue;
+ }
+ instance->irq = x;
+ instance->n_io_port = 13;
+ request_region(base, 13, "in2000"); /* lock in this IO space for our use */
+
+ for (x = 0; x < 8; x++) {
+ hostdata->busy[x] = 0;
+ hostdata->sync_xfer[x] = calc_sync_xfer(DEFAULT_SX_PER / 4, DEFAULT_SX_OFF);
+ hostdata->sync_stat[x] = SS_UNSET; /* using default sync values */
+#ifdef PROC_STATISTICS
+ hostdata->cmd_cnt[x] = 0;
+ hostdata->disc_allowed_cnt[x] = 0;
+ hostdata->disc_done_cnt[x] = 0;
+#endif
+ }
+ hostdata->input_Q = NULL;
+ hostdata->selecting = NULL;
+ hostdata->connected = NULL;
+ hostdata->disconnected_Q = NULL;
+ hostdata->state = S_UNCONNECTED;
+ hostdata->fifo = FI_FIFO_UNUSED;
+ hostdata->level2 = L2_BASIC;
+ hostdata->disconnect = DIS_ADAPTIVE;
+ hostdata->args = DEBUG_DEFAULTS;
+ hostdata->incoming_ptr = 0;
+ hostdata->outgoing_len = 0;
+ hostdata->default_sx_per = DEFAULT_SX_PER;
+
+/* Older BIOS's had a 'sync on/off' switch - use its setting */
+
+ if (s1 == 0x41564f4e && (switches & SW_SYNC_DOS5))
+ hostdata->sync_off = 0x00; /* sync defaults to on */
+ else
+ hostdata->sync_off = 0xff; /* sync defaults to off */
+
+#ifdef PROC_INTERFACE
+ hostdata->proc = PR_VERSION | PR_INFO | PR_STATISTICS | PR_CONNECTED | PR_INPUTQ | PR_DISCQ | PR_STOP;
+#ifdef PROC_STATISTICS
+ hostdata->int_cnt = 0;
+#endif
+#endif
+
+ if (check_setup_args("nosync", &val, buf))
+ hostdata->sync_off = val;
+
+ if (check_setup_args("period", &val, buf))
+ hostdata->default_sx_per = sx_table[round_period((unsigned int) val)].period_ns;
+
+ if (check_setup_args("disconnect", &val, buf)) {
+ if ((val >= DIS_NEVER) && (val <= DIS_ALWAYS))
+ hostdata->disconnect = val;
+ else
+ hostdata->disconnect = DIS_ADAPTIVE;
+ }
+
+ if (check_setup_args("noreset", &val, buf))
+ hostdata->args ^= A_NO_SCSI_RESET;
+
+ if (check_setup_args("level2", &val, buf))
+ hostdata->level2 = val;
+
+ if (check_setup_args("debug", &val, buf))
+ hostdata->args = (val & DB_MASK);
+
+#ifdef PROC_INTERFACE
+ if (check_setup_args("proc", &val, buf))
+ hostdata->proc = val;
+#endif
+
+
+ /* FIXME: not strictly needed I think but the called code expects
+ to be locked */
+ spin_lock_irqsave(instance->host_lock, flags);
+ x = reset_hardware(instance, (hostdata->args & A_NO_SCSI_RESET) ? RESET_CARD : RESET_CARD_AND_BUS);
+ spin_unlock_irqrestore(instance->host_lock, flags);
+
+ hostdata->microcode = read_3393(hostdata, WD_CDB_1);
+ if (x & 0x01) {
+ if (x & B_FLAG)
+ hostdata->chip = C_WD33C93B;
+ else
+ hostdata->chip = C_WD33C93A;
+ } else
+ hostdata->chip = C_WD33C93;
+
+ printk("dip_switch=%02x irq=%d ioport=%02x floppy=%s sync/DOS5=%s ", (switches & 0x7f), instance->irq, hostdata->io_base, (switches & SW_FLOPPY) ? "Yes" : "No", (switches & SW_SYNC_DOS5) ? "Yes" : "No");
+ printk("hardware_ver=%02x chip=%s microcode=%02x\n", hrev, (hostdata->chip == C_WD33C93) ? "WD33c93" : (hostdata->chip == C_WD33C93A) ? "WD33c93A" : (hostdata->chip == C_WD33C93B) ? "WD33c93B" : "unknown", hostdata->microcode);
+#ifdef DEBUGGING_ON
+ printk("setup_args = ");
+ for (x = 0; x < MAX_SETUP_ARGS; x++)
+ printk("%s,", setup_args[x]);
+ printk("\n");
+#endif
+ if (hostdata->sync_off == 0xff)
+ printk("Sync-transfer DISABLED on all devices: ENABLE from command-line\n");
+ printk("IN2000 driver version %s - %s\n", IN2000_VERSION, IN2000_DATE);
+ }
+
+ return detect_count;
+}
+
+static int in2000_release(struct Scsi_Host *shost)
+{
+ if (shost->irq)
+ free_irq(shost->irq, shost);
+ if (shost->io_port && shost->n_io_port)
+ release_region(shost->io_port, shost->n_io_port);
+ return 0;
+}
+
+/* NOTE: I lifted this function straight out of the old driver,
+ * and have not tested it. Presumably it does what it's
+ * supposed to do...
+ */
+
+static int in2000_biosparam(struct scsi_device *sdev, struct block_device *bdev, sector_t capacity, int *iinfo)
+{
+ int size;
+
+ size = capacity;
+ iinfo[0] = 64;
+ iinfo[1] = 32;
+ iinfo[2] = size >> 11;
+
+/* This should approximate the large drive handling that the DOS ASPI manager
+ uses. Drives very near the boundaries may not be handled correctly (i.e.
+ near 2.0 Gb and 4.0 Gb) */
+
+ if (iinfo[2] > 1024) {
+ iinfo[0] = 64;
+ iinfo[1] = 63;
+ iinfo[2] = (unsigned long) capacity / (iinfo[0] * iinfo[1]);
+ }
+ if (iinfo[2] > 1024) {
+ iinfo[0] = 128;
+ iinfo[1] = 63;
+ iinfo[2] = (unsigned long) capacity / (iinfo[0] * iinfo[1]);
+ }
+ if (iinfo[2] > 1024) {
+ iinfo[0] = 255;
+ iinfo[1] = 63;
+ iinfo[2] = (unsigned long) capacity / (iinfo[0] * iinfo[1]);
+ }
+ return 0;
+}
+
+
+static int in2000_write_info(struct Scsi_Host *instance, char *buf, int len)
+{
+
+#ifdef PROC_INTERFACE
+
+ char *bp;
+ struct IN2000_hostdata *hd;
+ int x, i;
+
+ hd = (struct IN2000_hostdata *) instance->hostdata;
+
+ buf[len] = '\0';
+ bp = buf;
+ if (!strncmp(bp, "debug:", 6)) {
+ bp += 6;
+ hd->args = simple_strtoul(bp, NULL, 0) & DB_MASK;
+ } else if (!strncmp(bp, "disconnect:", 11)) {
+ bp += 11;
+ x = simple_strtoul(bp, NULL, 0);
+ if (x < DIS_NEVER || x > DIS_ALWAYS)
+ x = DIS_ADAPTIVE;
+ hd->disconnect = x;
+ } else if (!strncmp(bp, "period:", 7)) {
+ bp += 7;
+ x = simple_strtoul(bp, NULL, 0);
+ hd->default_sx_per = sx_table[round_period((unsigned int) x)].period_ns;
+ } else if (!strncmp(bp, "resync:", 7)) {
+ bp += 7;
+ x = simple_strtoul(bp, NULL, 0);
+ for (i = 0; i < 7; i++)
+ if (x & (1 << i))
+ hd->sync_stat[i] = SS_UNSET;
+ } else if (!strncmp(bp, "proc:", 5)) {
+ bp += 5;
+ hd->proc = simple_strtoul(bp, NULL, 0);
+ } else if (!strncmp(bp, "level2:", 7)) {
+ bp += 7;
+ hd->level2 = simple_strtoul(bp, NULL, 0);
+ }
+#endif
+ return len;
+}
+
+static int in2000_show_info(struct seq_file *m, struct Scsi_Host *instance)
+{
+
+#ifdef PROC_INTERFACE
+ unsigned long flags;
+ struct IN2000_hostdata *hd;
+ Scsi_Cmnd *cmd;
+ int x;
+
+ hd = (struct IN2000_hostdata *) instance->hostdata;
+
+ spin_lock_irqsave(instance->host_lock, flags);
+ if (hd->proc & PR_VERSION)
+ seq_printf(m, "\nVersion %s - %s.", IN2000_VERSION, IN2000_DATE);
+
+ if (hd->proc & PR_INFO) {
+ seq_printf(m, "\ndip_switch=%02x: irq=%d io=%02x floppy=%s sync/DOS5=%s", (hd->dip_switch & 0x7f), instance->irq, hd->io_base, (hd->dip_switch & 0x40) ? "Yes" : "No", (hd->dip_switch & 0x20) ? "Yes" : "No");
+ seq_puts(m, "\nsync_xfer[] = ");
+ for (x = 0; x < 7; x++)
+ seq_printf(m, "\t%02x", hd->sync_xfer[x]);
+ seq_puts(m, "\nsync_stat[] = ");
+ for (x = 0; x < 7; x++)
+ seq_printf(m, "\t%02x", hd->sync_stat[x]);
+ }
+#ifdef PROC_STATISTICS
+ if (hd->proc & PR_STATISTICS) {
+ seq_puts(m, "\ncommands issued: ");
+ for (x = 0; x < 7; x++)
+ seq_printf(m, "\t%ld", hd->cmd_cnt[x]);
+ seq_puts(m, "\ndisconnects allowed:");
+ for (x = 0; x < 7; x++)
+ seq_printf(m, "\t%ld", hd->disc_allowed_cnt[x]);
+ seq_puts(m, "\ndisconnects done: ");
+ for (x = 0; x < 7; x++)
+ seq_printf(m, "\t%ld", hd->disc_done_cnt[x]);
+ seq_printf(m, "\ninterrupts: \t%ld", hd->int_cnt);
+ }
+#endif
+ if (hd->proc & PR_CONNECTED) {
+ seq_puts(m, "\nconnected: ");
+ if (hd->connected) {
+ cmd = (Scsi_Cmnd *) hd->connected;
+ seq_printf(m, " %d:%llu(%02x)", cmd->device->id, cmd->device->lun, cmd->cmnd[0]);
+ }
+ }
+ if (hd->proc & PR_INPUTQ) {
+ seq_puts(m, "\ninput_Q: ");
+ cmd = (Scsi_Cmnd *) hd->input_Q;
+ while (cmd) {
+ seq_printf(m, " %d:%llu(%02x)", cmd->device->id, cmd->device->lun, cmd->cmnd[0]);
+ cmd = (Scsi_Cmnd *) cmd->host_scribble;
+ }
+ }
+ if (hd->proc & PR_DISCQ) {
+ seq_puts(m, "\ndisconnected_Q:");
+ cmd = (Scsi_Cmnd *) hd->disconnected_Q;
+ while (cmd) {
+ seq_printf(m, " %d:%llu(%02x)", cmd->device->id, cmd->device->lun, cmd->cmnd[0]);
+ cmd = (Scsi_Cmnd *) cmd->host_scribble;
+ }
+ }
+ if (hd->proc & PR_TEST) {
+ ; /* insert your own custom function here */
+ }
+ seq_putc(m, '\n');
+ spin_unlock_irqrestore(instance->host_lock, flags);
+#endif /* PROC_INTERFACE */
+ return 0;
+}
+
+MODULE_LICENSE("GPL");
+
+
+static struct scsi_host_template driver_template = {
+ .proc_name = "in2000",
+ .write_info = in2000_write_info,
+ .show_info = in2000_show_info,
+ .name = "Always IN2000",
+ .detect = in2000_detect,
+ .release = in2000_release,
+ .queuecommand = in2000_queuecommand,
+ .eh_abort_handler = in2000_abort,
+ .eh_bus_reset_handler = in2000_bus_reset,
+ .bios_param = in2000_biosparam,
+ .can_queue = IN2000_CAN_Q,
+ .this_id = IN2000_HOST_ID,
+ .sg_tablesize = IN2000_SG,
+ .cmd_per_lun = IN2000_CPL,
+ .use_clustering = DISABLE_CLUSTERING,
+};
+#include "scsi_module.c"
diff --git a/drivers/scsi/in2000.h b/drivers/scsi/in2000.h
new file mode 100644
index 000000000..5821e1fbc
--- /dev/null
+++ b/drivers/scsi/in2000.h
@@ -0,0 +1,412 @@
+/*
+ * in2000.h - Linux device driver definitions for the
+ * Always IN2000 ISA SCSI card.
+ *
+ * IMPORTANT: This file is for version 1.33 - 26/Aug/1998
+ *
+ * Copyright (c) 1996 John Shifflett, GeoLog Consulting
+ * john@geolog.com
+ * jshiffle@netcom.com
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2, or (at your option)
+ * any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef IN2000_H
+#define IN2000_H
+
+#include <asm/io.h>
+
+#define PROC_INTERFACE /* add code for /proc/scsi/in2000/xxx interface */
+#ifdef PROC_INTERFACE
+#define PROC_STATISTICS /* add code for keeping various real time stats */
+#endif
+
+#define SYNC_DEBUG /* extra info on sync negotiation printed */
+#define DEBUGGING_ON /* enable command-line debugging bitmask */
+#define DEBUG_DEFAULTS 0 /* default bitmask - change from command-line */
+
+#ifdef __i386__
+#define FAST_READ_IO /* No problems with these on my machine */
+#define FAST_WRITE_IO
+#endif
+
+#ifdef DEBUGGING_ON
+#define DB(f,a) if (hostdata->args & (f)) a;
+#define CHECK_NULL(p,s) /* if (!(p)) {printk("\n"); while (1) printk("NP:%s\r",(s));} */
+#else
+#define DB(f,a)
+#define CHECK_NULL(p,s)
+#endif
+
+#define uchar unsigned char
+
+#define read1_io(a) (inb(hostdata->io_base+(a)))
+#define read2_io(a) (inw(hostdata->io_base+(a)))
+#define write1_io(b,a) (outb((b),hostdata->io_base+(a)))
+#define write2_io(w,a) (outw((w),hostdata->io_base+(a)))
+
+#ifdef __i386__
+/* These inline assembly defines are derived from a patch
+ * sent to me by Bill Earnest. He's done a lot of very
+ * valuable thinking, testing, and coding during his effort
+ * to squeeze more speed out of this driver. I really think
+ * that we are doing IO at close to the maximum now with
+ * the fifo. (And yes, insw uses 'edi' while outsw uses
+ * 'esi'. Thanks Bill!)
+ */
+
+#define FAST_READ2_IO() \
+({ \
+int __dummy_1,__dummy_2; \
+ __asm__ __volatile__ ("\n \
+ cld \n \
+ orl %%ecx, %%ecx \n \
+ jz 1f \n \
+ rep \n \
+ insw (%%dx),%%es:(%%edi) \n \
+1: " \
+ : "=D" (sp) ,"=c" (__dummy_1) ,"=d" (__dummy_2) /* output */ \
+ : "2" (f), "0" (sp), "1" (i) /* input */ \
+ ); /* trashed */ \
+})
+
+#define FAST_WRITE2_IO() \
+({ \
+int __dummy_1,__dummy_2; \
+ __asm__ __volatile__ ("\n \
+ cld \n \
+ orl %%ecx, %%ecx \n \
+ jz 1f \n \
+ rep \n \
+ outsw %%ds:(%%esi),(%%dx) \n \
+1: " \
+ : "=S" (sp) ,"=c" (__dummy_1) ,"=d" (__dummy_2)/* output */ \
+ : "2" (f), "0" (sp), "1" (i) /* input */ \
+ ); /* trashed */ \
+})
+#endif
+
+/* IN2000 io_port offsets */
+#define IO_WD_ASR 0x00 /* R - 3393 auxstat reg */
+#define ASR_INT 0x80
+#define ASR_LCI 0x40
+#define ASR_BSY 0x20
+#define ASR_CIP 0x10
+#define ASR_PE 0x02
+#define ASR_DBR 0x01
+#define IO_WD_ADDR 0x00 /* W - 3393 address reg */
+#define IO_WD_DATA 0x01 /* R/W - rest of 3393 regs */
+#define IO_FIFO 0x02 /* R/W - in2000 dual-port fifo (16 bits) */
+#define IN2000_FIFO_SIZE 2048 /* fifo capacity in bytes */
+#define IO_CARD_RESET 0x03 /* W - in2000 start master reset */
+#define IO_FIFO_COUNT 0x04 /* R - in2000 fifo counter */
+#define IO_FIFO_WRITE 0x05 /* W - clear fifo counter, start write */
+#define IO_FIFO_READ 0x07 /* W - start fifo read */
+#define IO_LED_OFF 0x08 /* W - turn off in2000 activity LED */
+#define IO_SWITCHES 0x08 /* R - read in2000 dip switch */
+#define SW_ADDR0 0x01 /* bit 0 = bit 0 of index to io addr */
+#define SW_ADDR1 0x02 /* bit 1 = bit 1 of index io addr */
+#define SW_DISINT 0x04 /* bit 2 true if ints disabled */
+#define SW_INT0 0x08 /* bit 3 = bit 0 of index to interrupt */
+#define SW_INT1 0x10 /* bit 4 = bit 1 of index to interrupt */
+#define SW_INT_SHIFT 3 /* shift right this amount to right justify int bits */
+#define SW_SYNC_DOS5 0x20 /* bit 5 used by Always BIOS */
+#define SW_FLOPPY 0x40 /* bit 6 true if floppy enabled */
+#define SW_BIT7 0x80 /* bit 7 hardwired true (ground) */
+#define IO_LED_ON 0x09 /* W - turn on in2000 activity LED */
+#define IO_HARDWARE 0x0a /* R - read in2000 hardware rev, stop reset */
+#define IO_INTR_MASK 0x0c /* W - in2000 interrupt mask reg */
+#define IMASK_WD 0x01 /* WD33c93 interrupt mask */
+#define IMASK_FIFO 0x02 /* FIFO interrupt mask */
+
+/* wd register names */
+#define WD_OWN_ID 0x00
+#define WD_CONTROL 0x01
+#define WD_TIMEOUT_PERIOD 0x02
+#define WD_CDB_1 0x03
+#define WD_CDB_2 0x04
+#define WD_CDB_3 0x05
+#define WD_CDB_4 0x06
+#define WD_CDB_5 0x07
+#define WD_CDB_6 0x08
+#define WD_CDB_7 0x09
+#define WD_CDB_8 0x0a
+#define WD_CDB_9 0x0b
+#define WD_CDB_10 0x0c
+#define WD_CDB_11 0x0d
+#define WD_CDB_12 0x0e
+#define WD_TARGET_LUN 0x0f
+#define WD_COMMAND_PHASE 0x10
+#define WD_SYNCHRONOUS_TRANSFER 0x11
+#define WD_TRANSFER_COUNT_MSB 0x12
+#define WD_TRANSFER_COUNT 0x13
+#define WD_TRANSFER_COUNT_LSB 0x14
+#define WD_DESTINATION_ID 0x15
+#define WD_SOURCE_ID 0x16
+#define WD_SCSI_STATUS 0x17
+#define WD_COMMAND 0x18
+#define WD_DATA 0x19
+#define WD_QUEUE_TAG 0x1a
+#define WD_AUXILIARY_STATUS 0x1f
+
+/* WD commands */
+#define WD_CMD_RESET 0x00
+#define WD_CMD_ABORT 0x01
+#define WD_CMD_ASSERT_ATN 0x02
+#define WD_CMD_NEGATE_ACK 0x03
+#define WD_CMD_DISCONNECT 0x04
+#define WD_CMD_RESELECT 0x05
+#define WD_CMD_SEL_ATN 0x06
+#define WD_CMD_SEL 0x07
+#define WD_CMD_SEL_ATN_XFER 0x08
+#define WD_CMD_SEL_XFER 0x09
+#define WD_CMD_RESEL_RECEIVE 0x0a
+#define WD_CMD_RESEL_SEND 0x0b
+#define WD_CMD_WAIT_SEL_RECEIVE 0x0c
+#define WD_CMD_TRANS_ADDR 0x18
+#define WD_CMD_TRANS_INFO 0x20
+#define WD_CMD_TRANSFER_PAD 0x21
+#define WD_CMD_SBT_MODE 0x80
+
+/* SCSI Bus Phases */
+#define PHS_DATA_OUT 0x00
+#define PHS_DATA_IN 0x01
+#define PHS_COMMAND 0x02
+#define PHS_STATUS 0x03
+#define PHS_MESS_OUT 0x06
+#define PHS_MESS_IN 0x07
+
+/* Command Status Register definitions */
+
+ /* reset state interrupts */
+#define CSR_RESET 0x00
+#define CSR_RESET_AF 0x01
+
+ /* successful completion interrupts */
+#define CSR_RESELECT 0x10
+#define CSR_SELECT 0x11
+#define CSR_SEL_XFER_DONE 0x16
+#define CSR_XFER_DONE 0x18
+
+ /* paused or aborted interrupts */
+#define CSR_MSGIN 0x20
+#define CSR_SDP 0x21
+#define CSR_SEL_ABORT 0x22
+#define CSR_RESEL_ABORT 0x25
+#define CSR_RESEL_ABORT_AM 0x27
+#define CSR_ABORT 0x28
+
+ /* terminated interrupts */
+#define CSR_INVALID 0x40
+#define CSR_UNEXP_DISC 0x41
+#define CSR_TIMEOUT 0x42
+#define CSR_PARITY 0x43
+#define CSR_PARITY_ATN 0x44
+#define CSR_BAD_STATUS 0x45
+#define CSR_UNEXP 0x48
+
+ /* service required interrupts */
+#define CSR_RESEL 0x80
+#define CSR_RESEL_AM 0x81
+#define CSR_DISC 0x85
+#define CSR_SRV_REQ 0x88
+
+ /* Own ID/CDB Size register */
+#define OWNID_EAF 0x08
+#define OWNID_EHP 0x10
+#define OWNID_RAF 0x20
+#define OWNID_FS_8 0x00
+#define OWNID_FS_12 0x40
+#define OWNID_FS_16 0x80
+
+ /* Control register */
+#define CTRL_HSP 0x01
+#define CTRL_HA 0x02
+#define CTRL_IDI 0x04
+#define CTRL_EDI 0x08
+#define CTRL_HHP 0x10
+#define CTRL_POLLED 0x00
+#define CTRL_BURST 0x20
+#define CTRL_BUS 0x40
+#define CTRL_DMA 0x80
+
+ /* Timeout Period register */
+#define TIMEOUT_PERIOD_VALUE 20 /* results in 200 ms. */
+
+ /* Synchronous Transfer Register */
+#define STR_FSS 0x80
+
+ /* Destination ID register */
+#define DSTID_DPD 0x40
+#define DATA_OUT_DIR 0
+#define DATA_IN_DIR 1
+#define DSTID_SCC 0x80
+
+ /* Source ID register */
+#define SRCID_MASK 0x07
+#define SRCID_SIV 0x08
+#define SRCID_DSP 0x20
+#define SRCID_ES 0x40
+#define SRCID_ER 0x80
+
+
+
+#define ILLEGAL_STATUS_BYTE 0xff
+
+
+#define DEFAULT_SX_PER 500 /* (ns) fairly safe */
+#define DEFAULT_SX_OFF 0 /* aka async */
+
+#define OPTIMUM_SX_PER 252 /* (ns) best we can do (mult-of-4) */
+#define OPTIMUM_SX_OFF 12 /* size of in2000 fifo */
+
+struct sx_period {
+ unsigned int period_ns;
+ uchar reg_value;
+ };
+
+
+struct IN2000_hostdata {
+ struct Scsi_Host *next;
+ uchar chip; /* what kind of wd33c93 chip? */
+ uchar microcode; /* microcode rev if 'B' */
+ unsigned short io_base; /* IO port base */
+ unsigned int dip_switch; /* dip switch settings */
+ unsigned int hrev; /* hardware revision of card */
+ volatile uchar busy[8]; /* index = target, bit = lun */
+ volatile Scsi_Cmnd *input_Q; /* commands waiting to be started */
+ volatile Scsi_Cmnd *selecting; /* trying to select this command */
+ volatile Scsi_Cmnd *connected; /* currently connected command */
+ volatile Scsi_Cmnd *disconnected_Q;/* commands waiting for reconnect */
+ uchar state; /* what we are currently doing */
+ uchar fifo; /* what the FIFO is up to */
+ uchar level2; /* extent to which Level-2 commands are used */
+ uchar disconnect; /* disconnect/reselect policy */
+ unsigned int args; /* set from command-line argument */
+ uchar incoming_msg[8]; /* filled during message_in phase */
+ int incoming_ptr; /* mainly used with EXTENDED messages */
+ uchar outgoing_msg[8]; /* send this during next message_out */
+ int outgoing_len; /* length of outgoing message */
+ unsigned int default_sx_per; /* default transfer period for SCSI bus */
+ uchar sync_xfer[8]; /* sync_xfer reg settings per target */
+ uchar sync_stat[8]; /* status of sync negotiation per target */
+ uchar sync_off; /* bit mask: don't use sync with these targets */
+#ifdef PROC_INTERFACE
+ uchar proc; /* bit mask: what's in proc output */
+#ifdef PROC_STATISTICS
+ unsigned long cmd_cnt[8]; /* # of commands issued per target */
+ unsigned long int_cnt; /* # of interrupts serviced */
+ unsigned long disc_allowed_cnt[8]; /* # of disconnects allowed per target */
+ unsigned long disc_done_cnt[8]; /* # of disconnects done per target*/
+#endif
+#endif
+ };
+
+
+/* defines for hostdata->chip */
+
+#define C_WD33C93 0
+#define C_WD33C93A 1
+#define C_WD33C93B 2
+#define C_UNKNOWN_CHIP 100
+
+/* defines for hostdata->state */
+
+#define S_UNCONNECTED 0
+#define S_SELECTING 1
+#define S_RUNNING_LEVEL2 2
+#define S_CONNECTED 3
+#define S_PRE_TMP_DISC 4
+#define S_PRE_CMP_DISC 5
+
+/* defines for hostdata->fifo */
+
+#define FI_FIFO_UNUSED 0
+#define FI_FIFO_READING 1
+#define FI_FIFO_WRITING 2
+
+/* defines for hostdata->level2 */
+/* NOTE: only the first 3 are trustworthy at this point -
+ * having trouble when more than 1 device is reading/writing
+ * at the same time...
+ */
+
+#define L2_NONE 0 /* no combination commands - we get lots of ints */
+#define L2_SELECT 1 /* start with SEL_ATN_XFER, but never resume it */
+#define L2_BASIC 2 /* resume after STATUS ints & RDP messages */
+#define L2_DATA 3 /* resume after DATA_IN/OUT ints */
+#define L2_MOST 4 /* resume after anything except a RESELECT int */
+#define L2_RESELECT 5 /* resume after everything, including RESELECT ints */
+#define L2_ALL 6 /* always resume */
+
+/* defines for hostdata->disconnect */
+
+#define DIS_NEVER 0
+#define DIS_ADAPTIVE 1
+#define DIS_ALWAYS 2
+
+/* defines for hostdata->args */
+
+#define DB_TEST 1<<0
+#define DB_FIFO 1<<1
+#define DB_QUEUE_COMMAND 1<<2
+#define DB_EXECUTE 1<<3
+#define DB_INTR 1<<4
+#define DB_TRANSFER 1<<5
+#define DB_MASK 0x3f
+
+#define A_NO_SCSI_RESET 1<<15
+
+
+/* defines for hostdata->sync_xfer[] */
+
+#define SS_UNSET 0
+#define SS_FIRST 1
+#define SS_WAITING 2
+#define SS_SET 3
+
+/* defines for hostdata->proc */
+
+#define PR_VERSION 1<<0
+#define PR_INFO 1<<1
+#define PR_STATISTICS 1<<2
+#define PR_CONNECTED 1<<3
+#define PR_INPUTQ 1<<4
+#define PR_DISCQ 1<<5
+#define PR_TEST 1<<6
+#define PR_STOP 1<<7
+
+
+# include <linux/init.h>
+# include <linux/spinlock.h>
+# define in2000__INITFUNC(function) __initfunc(function)
+# define in2000__INIT __init
+# define in2000__INITDATA __initdata
+# define CLISPIN_LOCK(host,flags) spin_lock_irqsave(host->host_lock, flags)
+# define CLISPIN_UNLOCK(host,flags) spin_unlock_irqrestore(host->host_lock, \
+ flags)
+
+static int in2000_detect(struct scsi_host_template *) in2000__INIT;
+static int in2000_queuecommand(struct Scsi_Host *, struct scsi_cmnd *);
+static int in2000_abort(Scsi_Cmnd *);
+static void in2000_setup(char *, int *) in2000__INIT;
+static int in2000_biosparam(struct scsi_device *, struct block_device *,
+ sector_t, int *);
+static int in2000_bus_reset(Scsi_Cmnd *);
+
+
+#define IN2000_CAN_Q 16
+#define IN2000_SG SG_ALL
+#define IN2000_CPL 2
+#define IN2000_HOST_ID 7
+
+#endif /* IN2000_H */
diff --git a/drivers/scsi/initio.c b/drivers/scsi/initio.c
new file mode 100644
index 000000000..e5dae7b54
--- /dev/null
+++ b/drivers/scsi/initio.c
@@ -0,0 +1,3013 @@
+/**************************************************************************
+ * Initio 9100 device driver for Linux.
+ *
+ * Copyright (c) 1994-1998 Initio Corporation
+ * Copyright (c) 1998 Bas Vermeulen <bvermeul@blackstar.xs4all.nl>
+ * Copyright (c) 2004 Christoph Hellwig <hch@lst.de>
+ * Copyright (c) 2007 Red Hat
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2, or (at your option)
+ * any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; see the file COPYING. If not, write to
+ * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
+ *
+ *
+ *************************************************************************
+ *
+ * DESCRIPTION:
+ *
+ * This is the Linux low-level SCSI driver for Initio INI-9X00U/UW SCSI host
+ * adapters
+ *
+ * 08/06/97 hc - v1.01h
+ * - Support inic-940 and inic-935
+ * 09/26/97 hc - v1.01i
+ * - Make correction from J.W. Schultz suggestion
+ * 10/13/97 hc - Support reset function
+ * 10/21/97 hc - v1.01j
+ * - Support 32 LUN (SCSI 3)
+ * 01/14/98 hc - v1.01k
+ * - Fix memory allocation problem
+ * 03/04/98 hc - v1.01l
+ * - Fix tape rewind which will hang the system problem
+ * - Set can_queue to initio_num_scb
+ * 06/25/98 hc - v1.01m
+ * - Get it work for kernel version >= 2.1.75
+ * - Dynamic assign SCSI bus reset holding time in initio_init()
+ * 07/02/98 hc - v1.01n
+ * - Support 0002134A
+ * 08/07/98 hc - v1.01o
+ * - Change the initio_abort_srb routine to use scsi_done. <01>
+ * 09/07/98 hl - v1.02
+ * - Change the INI9100U define and proc_dir_entry to
+ * reflect the newer Kernel 2.1.118, but the v1.o1o
+ * should work with Kernel 2.1.118.
+ * 09/20/98 wh - v1.02a
+ * - Support Abort command.
+ * - Handle reset routine.
+ * 09/21/98 hl - v1.03
+ * - remove comments.
+ * 12/09/98 bv - v1.03a
+ * - Removed unused code
+ * 12/13/98 bv - v1.03b
+ * - Remove cli() locking for kernels >= 2.1.95. This uses
+ * spinlocks to serialize access to the pSRB_head and
+ * pSRB_tail members of the HCS structure.
+ * 09/01/99 bv - v1.03d
+ * - Fixed a deadlock problem in SMP.
+ * 21/01/99 bv - v1.03e
+ * - Add support for the Domex 3192U PCI SCSI
+ * This is a slightly modified patch by
+ * Brian Macy <bmacy@sunshinecomputing.com>
+ * 22/02/99 bv - v1.03f
+ * - Didn't detect the INIC-950 in 2.0.x correctly.
+ * Now fixed.
+ * 05/07/99 bv - v1.03g
+ * - Changed the assumption that HZ = 100
+ * 10/17/03 mc - v1.04
+ * - added new DMA API support
+ * 06/01/04 jmd - v1.04a
+ * - Re-add reset_bus support
+ **************************************************************************/
+
+#include <linux/module.h>
+#include <linux/errno.h>
+#include <linux/delay.h>
+#include <linux/pci.h>
+#include <linux/init.h>
+#include <linux/blkdev.h>
+#include <linux/spinlock.h>
+#include <linux/stat.h>
+#include <linux/kernel.h>
+#include <linux/proc_fs.h>
+#include <linux/string.h>
+#include <linux/interrupt.h>
+#include <linux/ioport.h>
+#include <linux/slab.h>
+#include <linux/jiffies.h>
+#include <linux/dma-mapping.h>
+#include <asm/io.h>
+
+#include <scsi/scsi.h>
+#include <scsi/scsi_cmnd.h>
+#include <scsi/scsi_device.h>
+#include <scsi/scsi_host.h>
+#include <scsi/scsi_tcq.h>
+
+#include "initio.h"
+
+#define SENSE_SIZE 14
+
+#define i91u_MAXQUEUE 2
+#define i91u_REVID "Initio INI-9X00U/UW SCSI device driver; Revision: 1.04a"
+
+#define I950_DEVICE_ID 0x9500 /* Initio's inic-950 product ID */
+#define I940_DEVICE_ID 0x9400 /* Initio's inic-940 product ID */
+#define I935_DEVICE_ID 0x9401 /* Initio's inic-935 product ID */
+#define I920_DEVICE_ID 0x0002 /* Initio's other product ID */
+
+#ifdef DEBUG_i91u
+static unsigned int i91u_debug = DEBUG_DEFAULT;
+#endif
+
+static int initio_tag_enable = 1;
+
+#ifdef DEBUG_i91u
+static int setup_debug = 0;
+#endif
+
+static void i91uSCBPost(u8 * pHcb, u8 * pScb);
+
+/* PCI Devices supported by this driver */
+static struct pci_device_id i91u_pci_devices[] = {
+ { PCI_VENDOR_ID_INIT, I950_DEVICE_ID, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
+ { PCI_VENDOR_ID_INIT, I940_DEVICE_ID, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
+ { PCI_VENDOR_ID_INIT, I935_DEVICE_ID, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
+ { PCI_VENDOR_ID_INIT, I920_DEVICE_ID, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
+ { PCI_VENDOR_ID_DOMEX, I920_DEVICE_ID, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
+ { }
+};
+MODULE_DEVICE_TABLE(pci, i91u_pci_devices);
+
+#define DEBUG_INTERRUPT 0
+#define DEBUG_QUEUE 0
+#define DEBUG_STATE 0
+#define INT_DISC 0
+
+/*--- forward references ---*/
+static struct scsi_ctrl_blk *initio_find_busy_scb(struct initio_host * host, u16 tarlun);
+static struct scsi_ctrl_blk *initio_find_done_scb(struct initio_host * host);
+
+static int tulip_main(struct initio_host * host);
+
+static int initio_next_state(struct initio_host * host);
+static int initio_state_1(struct initio_host * host);
+static int initio_state_2(struct initio_host * host);
+static int initio_state_3(struct initio_host * host);
+static int initio_state_4(struct initio_host * host);
+static int initio_state_5(struct initio_host * host);
+static int initio_state_6(struct initio_host * host);
+static int initio_state_7(struct initio_host * host);
+static int initio_xfer_data_in(struct initio_host * host);
+static int initio_xfer_data_out(struct initio_host * host);
+static int initio_xpad_in(struct initio_host * host);
+static int initio_xpad_out(struct initio_host * host);
+static int initio_status_msg(struct initio_host * host);
+
+static int initio_msgin(struct initio_host * host);
+static int initio_msgin_sync(struct initio_host * host);
+static int initio_msgin_accept(struct initio_host * host);
+static int initio_msgout_reject(struct initio_host * host);
+static int initio_msgin_extend(struct initio_host * host);
+
+static int initio_msgout_ide(struct initio_host * host);
+static int initio_msgout_abort_targ(struct initio_host * host);
+static int initio_msgout_abort_tag(struct initio_host * host);
+
+static int initio_bus_device_reset(struct initio_host * host);
+static void initio_select_atn(struct initio_host * host, struct scsi_ctrl_blk * scb);
+static void initio_select_atn3(struct initio_host * host, struct scsi_ctrl_blk * scb);
+static void initio_select_atn_stop(struct initio_host * host, struct scsi_ctrl_blk * scb);
+static int int_initio_busfree(struct initio_host * host);
+static int int_initio_scsi_rst(struct initio_host * host);
+static int int_initio_bad_seq(struct initio_host * host);
+static int int_initio_resel(struct initio_host * host);
+static int initio_sync_done(struct initio_host * host);
+static int wdtr_done(struct initio_host * host);
+static int wait_tulip(struct initio_host * host);
+static int initio_wait_done_disc(struct initio_host * host);
+static int initio_wait_disc(struct initio_host * host);
+static void tulip_scsi(struct initio_host * host);
+static int initio_post_scsi_rst(struct initio_host * host);
+
+static void initio_se2_ew_en(unsigned long base);
+static void initio_se2_ew_ds(unsigned long base);
+static int initio_se2_rd_all(unsigned long base);
+static void initio_se2_update_all(unsigned long base); /* setup default pattern */
+static void initio_read_eeprom(unsigned long base);
+
+/* ---- INTERNAL VARIABLES ---- */
+
+static NVRAM i91unvram;
+static NVRAM *i91unvramp;
+
+static u8 i91udftNvRam[64] =
+{
+ /*----------- header -----------*/
+ 0x25, 0xc9, /* Signature */
+ 0x40, /* Size */
+ 0x01, /* Revision */
+ /* -- Host Adapter Structure -- */
+ 0x95, /* ModelByte0 */
+ 0x00, /* ModelByte1 */
+ 0x00, /* ModelInfo */
+ 0x01, /* NumOfCh */
+ NBC1_DEFAULT, /* BIOSConfig1 */
+ 0, /* BIOSConfig2 */
+ 0, /* HAConfig1 */
+ 0, /* HAConfig2 */
+ /* SCSI channel 0 and target Structure */
+ 7, /* SCSIid */
+ NCC1_DEFAULT, /* SCSIconfig1 */
+ 0, /* SCSIconfig2 */
+ 0x10, /* NumSCSItarget */
+
+ NTC_DEFAULT, NTC_DEFAULT, NTC_DEFAULT, NTC_DEFAULT,
+ NTC_DEFAULT, NTC_DEFAULT, NTC_DEFAULT, NTC_DEFAULT,
+ NTC_DEFAULT, NTC_DEFAULT, NTC_DEFAULT, NTC_DEFAULT,
+ NTC_DEFAULT, NTC_DEFAULT, NTC_DEFAULT, NTC_DEFAULT,
+
+ /* SCSI channel 1 and target Structure */
+ 7, /* SCSIid */
+ NCC1_DEFAULT, /* SCSIconfig1 */
+ 0, /* SCSIconfig2 */
+ 0x10, /* NumSCSItarget */
+
+ NTC_DEFAULT, NTC_DEFAULT, NTC_DEFAULT, NTC_DEFAULT,
+ NTC_DEFAULT, NTC_DEFAULT, NTC_DEFAULT, NTC_DEFAULT,
+ NTC_DEFAULT, NTC_DEFAULT, NTC_DEFAULT, NTC_DEFAULT,
+ NTC_DEFAULT, NTC_DEFAULT, NTC_DEFAULT, NTC_DEFAULT,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0}; /* - CheckSum - */
+
+
+static u8 initio_rate_tbl[8] = /* fast 20 */
+{
+ /* nanosecond divide by 4 */
+ 12, /* 50ns, 20M */
+ 18, /* 75ns, 13.3M */
+ 25, /* 100ns, 10M */
+ 31, /* 125ns, 8M */
+ 37, /* 150ns, 6.6M */
+ 43, /* 175ns, 5.7M */
+ 50, /* 200ns, 5M */
+ 62 /* 250ns, 4M */
+};
+
+static void initio_do_pause(unsigned amount)
+{
+ /* Pause for amount jiffies */
+ unsigned long the_time = jiffies + amount;
+
+ while (time_before_eq(jiffies, the_time))
+ cpu_relax();
+}
+
+/*-- forward reference --*/
+
+/******************************************************************
+ Input: instruction for Serial E2PROM
+
+ EX: se2_rd(0 call se2_instr() to send address and read command
+
+ StartBit OP_Code Address Data
+ --------- -------- ------------------ -------
+ 1 1 , 0 A5,A4,A3,A2,A1,A0 D15-D0
+
+ +-----------------------------------------------------
+ |
+ CS -----+
+ +--+ +--+ +--+ +--+ +--+
+ ^ | ^ | ^ | ^ | ^ |
+ | | | | | | | | | |
+ CLK -------+ +--+ +--+ +--+ +--+ +--
+ (leading edge trigger)
+
+ +--1-----1--+
+ | SB OP | OP A5 A4
+ DI ----+ +--0------------------
+ (address and cmd sent to nvram)
+
+ -------------------------------------------+
+ |
+ DO +---
+ (data sent from nvram)
+
+
+******************************************************************/
+
+/**
+ * initio_se2_instr - bitbang an instruction
+ * @base: Base of InitIO controller
+ * @instr: Instruction for serial E2PROM
+ *
+ * Bitbang an instruction out to the serial E2Prom
+ */
+
+static void initio_se2_instr(unsigned long base, u8 instr)
+{
+ int i;
+ u8 b;
+
+ outb(SE2CS | SE2DO, base + TUL_NVRAM); /* cs+start bit */
+ udelay(30);
+ outb(SE2CS | SE2CLK | SE2DO, base + TUL_NVRAM); /* +CLK */
+ udelay(30);
+
+ for (i = 0; i < 8; i++) {
+ if (instr & 0x80)
+ b = SE2CS | SE2DO; /* -CLK+dataBit */
+ else
+ b = SE2CS; /* -CLK */
+ outb(b, base + TUL_NVRAM);
+ udelay(30);
+ outb(b | SE2CLK, base + TUL_NVRAM); /* +CLK */
+ udelay(30);
+ instr <<= 1;
+ }
+ outb(SE2CS, base + TUL_NVRAM); /* -CLK */
+ udelay(30);
+}
+
+
+/**
+ * initio_se2_ew_en - Enable erase/write
+ * @base: Base address of InitIO controller
+ *
+ * Enable erase/write state of serial EEPROM
+ */
+void initio_se2_ew_en(unsigned long base)
+{
+ initio_se2_instr(base, 0x30); /* EWEN */
+ outb(0, base + TUL_NVRAM); /* -CS */
+ udelay(30);
+}
+
+
+/**
+ * initio_se2_ew_ds - Disable erase/write
+ * @base: Base address of InitIO controller
+ *
+ * Disable erase/write state of serial EEPROM
+ */
+void initio_se2_ew_ds(unsigned long base)
+{
+ initio_se2_instr(base, 0); /* EWDS */
+ outb(0, base + TUL_NVRAM); /* -CS */
+ udelay(30);
+}
+
+
+/**
+ * initio_se2_rd - read E2PROM word
+ * @base: Base of InitIO controller
+ * @addr: Address of word in E2PROM
+ *
+ * Read a word from the NV E2PROM device
+ */
+static u16 initio_se2_rd(unsigned long base, u8 addr)
+{
+ u8 instr, rb;
+ u16 val = 0;
+ int i;
+
+ instr = (u8) (addr | 0x80);
+ initio_se2_instr(base, instr); /* READ INSTR */
+
+ for (i = 15; i >= 0; i--) {
+ outb(SE2CS | SE2CLK, base + TUL_NVRAM); /* +CLK */
+ udelay(30);
+ outb(SE2CS, base + TUL_NVRAM); /* -CLK */
+
+ /* sample data after the following edge of clock */
+ rb = inb(base + TUL_NVRAM);
+ rb &= SE2DI;
+ val += (rb << i);
+ udelay(30); /* 6/20/95 */
+ }
+
+ outb(0, base + TUL_NVRAM); /* no chip select */
+ udelay(30);
+ return val;
+}
+
+/**
+ * initio_se2_wr - read E2PROM word
+ * @base: Base of InitIO controller
+ * @addr: Address of word in E2PROM
+ * @val: Value to write
+ *
+ * Write a word to the NV E2PROM device. Used when recovering from
+ * a problem with the NV.
+ */
+static void initio_se2_wr(unsigned long base, u8 addr, u16 val)
+{
+ u8 rb;
+ u8 instr;
+ int i;
+
+ instr = (u8) (addr | 0x40);
+ initio_se2_instr(base, instr); /* WRITE INSTR */
+ for (i = 15; i >= 0; i--) {
+ if (val & 0x8000)
+ outb(SE2CS | SE2DO, base + TUL_NVRAM); /* -CLK+dataBit 1 */
+ else
+ outb(SE2CS, base + TUL_NVRAM); /* -CLK+dataBit 0 */
+ udelay(30);
+ outb(SE2CS | SE2CLK, base + TUL_NVRAM); /* +CLK */
+ udelay(30);
+ val <<= 1;
+ }
+ outb(SE2CS, base + TUL_NVRAM); /* -CLK */
+ udelay(30);
+ outb(0, base + TUL_NVRAM); /* -CS */
+ udelay(30);
+
+ outb(SE2CS, base + TUL_NVRAM); /* +CS */
+ udelay(30);
+
+ for (;;) {
+ outb(SE2CS | SE2CLK, base + TUL_NVRAM); /* +CLK */
+ udelay(30);
+ outb(SE2CS, base + TUL_NVRAM); /* -CLK */
+ udelay(30);
+ if ((rb = inb(base + TUL_NVRAM)) & SE2DI)
+ break; /* write complete */
+ }
+ outb(0, base + TUL_NVRAM); /* -CS */
+}
+
+/**
+ * initio_se2_rd_all - read hostadapter NV configuration
+ * @base: Base address of InitIO controller
+ *
+ * Reads the E2PROM data into main memory. Ensures that the checksum
+ * and header marker are valid. Returns 1 on success -1 on error.
+ */
+
+static int initio_se2_rd_all(unsigned long base)
+{
+ int i;
+ u16 chksum = 0;
+ u16 *np;
+
+ i91unvramp = &i91unvram;
+ np = (u16 *) i91unvramp;
+ for (i = 0; i < 32; i++)
+ *np++ = initio_se2_rd(base, i);
+
+ /* Is signature "ini" ok ? */
+ if (i91unvramp->NVM_Signature != INI_SIGNATURE)
+ return -1;
+ /* Is ckecksum ok ? */
+ np = (u16 *) i91unvramp;
+ for (i = 0; i < 31; i++)
+ chksum += *np++;
+ if (i91unvramp->NVM_CheckSum != chksum)
+ return -1;
+ return 1;
+}
+
+/**
+ * initio_se2_update_all - Update E2PROM
+ * @base: Base of InitIO controller
+ *
+ * Update the E2PROM by wrting any changes into the E2PROM
+ * chip, rewriting the checksum.
+ */
+static void initio_se2_update_all(unsigned long base)
+{ /* setup default pattern */
+ int i;
+ u16 chksum = 0;
+ u16 *np, *np1;
+
+ i91unvramp = &i91unvram;
+ /* Calculate checksum first */
+ np = (u16 *) i91udftNvRam;
+ for (i = 0; i < 31; i++)
+ chksum += *np++;
+ *np = chksum;
+ initio_se2_ew_en(base); /* Enable write */
+
+ np = (u16 *) i91udftNvRam;
+ np1 = (u16 *) i91unvramp;
+ for (i = 0; i < 32; i++, np++, np1++) {
+ if (*np != *np1)
+ initio_se2_wr(base, i, *np);
+ }
+ initio_se2_ew_ds(base); /* Disable write */
+}
+
+/**
+ * initio_read_eeprom - Retrieve configuration
+ * @base: Base of InitIO Host Adapter
+ *
+ * Retrieve the host adapter configuration data from E2Prom. If the
+ * data is invalid then the defaults are used and are also restored
+ * into the E2PROM. This forms the access point for the SCSI driver
+ * into the E2PROM layer, the other functions for the E2PROM are all
+ * internal use.
+ *
+ * Must be called single threaded, uses a shared global area.
+ */
+
+static void initio_read_eeprom(unsigned long base)
+{
+ u8 gctrl;
+
+ i91unvramp = &i91unvram;
+ /* Enable EEProm programming */
+ gctrl = inb(base + TUL_GCTRL);
+ outb(gctrl | TUL_GCTRL_EEPROM_BIT, base + TUL_GCTRL);
+ if (initio_se2_rd_all(base) != 1) {
+ initio_se2_update_all(base); /* setup default pattern */
+ initio_se2_rd_all(base); /* load again */
+ }
+ /* Disable EEProm programming */
+ gctrl = inb(base + TUL_GCTRL);
+ outb(gctrl & ~TUL_GCTRL_EEPROM_BIT, base + TUL_GCTRL);
+}
+
+/**
+ * initio_stop_bm - stop bus master
+ * @host: InitIO we are stopping
+ *
+ * Stop any pending DMA operation, aborting the DMA if necessary
+ */
+
+static void initio_stop_bm(struct initio_host * host)
+{
+
+ if (inb(host->addr + TUL_XStatus) & XPEND) { /* if DMA xfer is pending, abort DMA xfer */
+ outb(TAX_X_ABT | TAX_X_CLR_FIFO, host->addr + TUL_XCmd);
+ /* wait Abort DMA xfer done */
+ while ((inb(host->addr + TUL_Int) & XABT) == 0)
+ cpu_relax();
+ }
+ outb(TSC_FLUSH_FIFO, host->addr + TUL_SCtrl0);
+}
+
+/**
+ * initio_reset_scsi - Reset SCSI host controller
+ * @host: InitIO host to reset
+ * @seconds: Recovery time
+ *
+ * Perform a full reset of the SCSI subsystem.
+ */
+
+static int initio_reset_scsi(struct initio_host * host, int seconds)
+{
+ outb(TSC_RST_BUS, host->addr + TUL_SCtrl0);
+
+ while (!((host->jsint = inb(host->addr + TUL_SInt)) & TSS_SCSIRST_INT))
+ cpu_relax();
+
+ /* reset tulip chip */
+ outb(0, host->addr + TUL_SSignal);
+
+ /* Stall for a while, wait for target's firmware ready,make it 2 sec ! */
+ /* SONY 5200 tape drive won't work if only stall for 1 sec */
+ /* FIXME: this is a very long busy wait right now */
+ initio_do_pause(seconds * HZ);
+
+ inb(host->addr + TUL_SInt);
+ return SCSI_RESET_SUCCESS;
+}
+
+/**
+ * initio_init - set up an InitIO host adapter
+ * @host: InitIO host adapter
+ * @num_scbs: Number of SCBS
+ * @bios_addr: BIOS address
+ *
+ * Set up the host adapter and devices according to the configuration
+ * retrieved from the E2PROM.
+ *
+ * Locking: Calls E2PROM layer code which is not re-enterable so must
+ * run single threaded for now.
+ */
+
+static void initio_init(struct initio_host * host, u8 *bios_addr)
+{
+ int i;
+ u8 *flags;
+ u8 *heads;
+
+ /* Get E2Prom configuration */
+ initio_read_eeprom(host->addr);
+ if (i91unvramp->NVM_SCSIInfo[0].NVM_NumOfTarg == 8)
+ host->max_tar = 8;
+ else
+ host->max_tar = 16;
+
+ host->config = i91unvramp->NVM_SCSIInfo[0].NVM_ChConfig1;
+
+ host->scsi_id = i91unvramp->NVM_SCSIInfo[0].NVM_ChSCSIID;
+ host->idmask = ~(1 << host->scsi_id);
+
+#ifdef CHK_PARITY
+ /* Enable parity error response */
+ outb(inb(host->addr + TUL_PCMD) | 0x40, host->addr + TUL_PCMD);
+#endif
+
+ /* Mask all the interrupt */
+ outb(0x1F, host->addr + TUL_Mask);
+
+ initio_stop_bm(host);
+ /* --- Initialize the tulip --- */
+ outb(TSC_RST_CHIP, host->addr + TUL_SCtrl0);
+
+ /* program HBA's SCSI ID */
+ outb(host->scsi_id << 4, host->addr + TUL_SScsiId);
+
+ /* Enable Initiator Mode ,phase latch,alternate sync period mode,
+ disable SCSI reset */
+ if (host->config & HCC_EN_PAR)
+ host->sconf1 = (TSC_INITDEFAULT | TSC_EN_SCSI_PAR);
+ else
+ host->sconf1 = (TSC_INITDEFAULT);
+ outb(host->sconf1, host->addr + TUL_SConfig);
+
+ /* Enable HW reselect */
+ outb(TSC_HW_RESELECT, host->addr + TUL_SCtrl1);
+
+ outb(0, host->addr + TUL_SPeriod);
+
+ /* selection time out = 250 ms */
+ outb(153, host->addr + TUL_STimeOut);
+
+ /* Enable SCSI terminator */
+ outb((host->config & (HCC_ACT_TERM1 | HCC_ACT_TERM2)),
+ host->addr + TUL_XCtrl);
+ outb(((host->config & HCC_AUTO_TERM) >> 4) |
+ (inb(host->addr + TUL_GCTRL1) & 0xFE),
+ host->addr + TUL_GCTRL1);
+
+ for (i = 0,
+ flags = & (i91unvramp->NVM_SCSIInfo[0].NVM_Targ0Config),
+ heads = bios_addr + 0x180;
+ i < host->max_tar;
+ i++, flags++) {
+ host->targets[i].flags = *flags & ~(TCF_SYNC_DONE | TCF_WDTR_DONE);
+ if (host->targets[i].flags & TCF_EN_255)
+ host->targets[i].drv_flags = TCF_DRV_255_63;
+ else
+ host->targets[i].drv_flags = 0;
+ host->targets[i].js_period = 0;
+ host->targets[i].sconfig0 = host->sconf1;
+ host->targets[i].heads = *heads++;
+ if (host->targets[i].heads == 255)
+ host->targets[i].drv_flags = TCF_DRV_255_63;
+ else
+ host->targets[i].drv_flags = 0;
+ host->targets[i].sectors = *heads++;
+ host->targets[i].flags &= ~TCF_BUSY;
+ host->act_tags[i] = 0;
+ host->max_tags[i] = 0xFF;
+ } /* for */
+ printk("i91u: PCI Base=0x%04X, IRQ=%d, BIOS=0x%04X0, SCSI ID=%d\n",
+ host->addr, host->pci_dev->irq,
+ host->bios_addr, host->scsi_id);
+ /* Reset SCSI Bus */
+ if (host->config & HCC_SCSI_RESET) {
+ printk(KERN_INFO "i91u: Reset SCSI Bus ... \n");
+ initio_reset_scsi(host, 10);
+ }
+ outb(0x17, host->addr + TUL_SCFG1);
+ outb(0xE9, host->addr + TUL_SIntEnable);
+}
+
+/**
+ * initio_alloc_scb - Allocate an SCB
+ * @host: InitIO host we are allocating for
+ *
+ * Walk the SCB list for the controller and allocate a free SCB if
+ * one exists.
+ */
+static struct scsi_ctrl_blk *initio_alloc_scb(struct initio_host *host)
+{
+ struct scsi_ctrl_blk *scb;
+ unsigned long flags;
+
+ spin_lock_irqsave(&host->avail_lock, flags);
+ if ((scb = host->first_avail) != NULL) {
+#if DEBUG_QUEUE
+ printk("find scb at %p\n", scb);
+#endif
+ if ((host->first_avail = scb->next) == NULL)
+ host->last_avail = NULL;
+ scb->next = NULL;
+ scb->status = SCB_RENT;
+ }
+ spin_unlock_irqrestore(&host->avail_lock, flags);
+ return scb;
+}
+
+/**
+ * initio_release_scb - Release an SCB
+ * @host: InitIO host that owns the SCB
+ * @cmnd: SCB command block being returned
+ *
+ * Return an allocated SCB to the host free list
+ */
+
+static void initio_release_scb(struct initio_host * host, struct scsi_ctrl_blk * cmnd)
+{
+ unsigned long flags;
+
+#if DEBUG_QUEUE
+ printk("Release SCB %p; ", cmnd);
+#endif
+ spin_lock_irqsave(&(host->avail_lock), flags);
+ cmnd->srb = NULL;
+ cmnd->status = 0;
+ cmnd->next = NULL;
+ if (host->last_avail != NULL) {
+ host->last_avail->next = cmnd;
+ host->last_avail = cmnd;
+ } else {
+ host->first_avail = cmnd;
+ host->last_avail = cmnd;
+ }
+ spin_unlock_irqrestore(&(host->avail_lock), flags);
+}
+
+/***************************************************************************/
+static void initio_append_pend_scb(struct initio_host * host, struct scsi_ctrl_blk * scbp)
+{
+
+#if DEBUG_QUEUE
+ printk("Append pend SCB %p; ", scbp);
+#endif
+ scbp->status = SCB_PEND;
+ scbp->next = NULL;
+ if (host->last_pending != NULL) {
+ host->last_pending->next = scbp;
+ host->last_pending = scbp;
+ } else {
+ host->first_pending = scbp;
+ host->last_pending = scbp;
+ }
+}
+
+/***************************************************************************/
+static void initio_push_pend_scb(struct initio_host * host, struct scsi_ctrl_blk * scbp)
+{
+
+#if DEBUG_QUEUE
+ printk("Push pend SCB %p; ", scbp);
+#endif
+ scbp->status = SCB_PEND;
+ if ((scbp->next = host->first_pending) != NULL) {
+ host->first_pending = scbp;
+ } else {
+ host->first_pending = scbp;
+ host->last_pending = scbp;
+ }
+}
+
+static struct scsi_ctrl_blk *initio_find_first_pend_scb(struct initio_host * host)
+{
+ struct scsi_ctrl_blk *first;
+
+
+ first = host->first_pending;
+ while (first != NULL) {
+ if (first->opcode != ExecSCSI)
+ return first;
+ if (first->tagmsg == 0) {
+ if ((host->act_tags[first->target] == 0) &&
+ !(host->targets[first->target].flags & TCF_BUSY))
+ return first;
+ } else {
+ if ((host->act_tags[first->target] >=
+ host->max_tags[first->target]) |
+ (host->targets[first->target].flags & TCF_BUSY)) {
+ first = first->next;
+ continue;
+ }
+ return first;
+ }
+ first = first->next;
+ }
+ return first;
+}
+
+static void initio_unlink_pend_scb(struct initio_host * host, struct scsi_ctrl_blk * scb)
+{
+ struct scsi_ctrl_blk *tmp, *prev;
+
+#if DEBUG_QUEUE
+ printk("unlink pend SCB %p; ", scb);
+#endif
+
+ prev = tmp = host->first_pending;
+ while (tmp != NULL) {
+ if (scb == tmp) { /* Unlink this SCB */
+ if (tmp == host->first_pending) {
+ if ((host->first_pending = tmp->next) == NULL)
+ host->last_pending = NULL;
+ } else {
+ prev->next = tmp->next;
+ if (tmp == host->last_pending)
+ host->last_pending = prev;
+ }
+ tmp->next = NULL;
+ break;
+ }
+ prev = tmp;
+ tmp = tmp->next;
+ }
+}
+
+static void initio_append_busy_scb(struct initio_host * host, struct scsi_ctrl_blk * scbp)
+{
+
+#if DEBUG_QUEUE
+ printk("append busy SCB %p; ", scbp);
+#endif
+ if (scbp->tagmsg)
+ host->act_tags[scbp->target]++;
+ else
+ host->targets[scbp->target].flags |= TCF_BUSY;
+ scbp->status = SCB_BUSY;
+ scbp->next = NULL;
+ if (host->last_busy != NULL) {
+ host->last_busy->next = scbp;
+ host->last_busy = scbp;
+ } else {
+ host->first_busy = scbp;
+ host->last_busy = scbp;
+ }
+}
+
+/***************************************************************************/
+static struct scsi_ctrl_blk *initio_pop_busy_scb(struct initio_host * host)
+{
+ struct scsi_ctrl_blk *tmp;
+
+
+ if ((tmp = host->first_busy) != NULL) {
+ if ((host->first_busy = tmp->next) == NULL)
+ host->last_busy = NULL;
+ tmp->next = NULL;
+ if (tmp->tagmsg)
+ host->act_tags[tmp->target]--;
+ else
+ host->targets[tmp->target].flags &= ~TCF_BUSY;
+ }
+#if DEBUG_QUEUE
+ printk("Pop busy SCB %p; ", tmp);
+#endif
+ return tmp;
+}
+
+/***************************************************************************/
+static void initio_unlink_busy_scb(struct initio_host * host, struct scsi_ctrl_blk * scb)
+{
+ struct scsi_ctrl_blk *tmp, *prev;
+
+#if DEBUG_QUEUE
+ printk("unlink busy SCB %p; ", scb);
+#endif
+
+ prev = tmp = host->first_busy;
+ while (tmp != NULL) {
+ if (scb == tmp) { /* Unlink this SCB */
+ if (tmp == host->first_busy) {
+ if ((host->first_busy = tmp->next) == NULL)
+ host->last_busy = NULL;
+ } else {
+ prev->next = tmp->next;
+ if (tmp == host->last_busy)
+ host->last_busy = prev;
+ }
+ tmp->next = NULL;
+ if (tmp->tagmsg)
+ host->act_tags[tmp->target]--;
+ else
+ host->targets[tmp->target].flags &= ~TCF_BUSY;
+ break;
+ }
+ prev = tmp;
+ tmp = tmp->next;
+ }
+ return;
+}
+
+struct scsi_ctrl_blk *initio_find_busy_scb(struct initio_host * host, u16 tarlun)
+{
+ struct scsi_ctrl_blk *tmp, *prev;
+ u16 scbp_tarlun;
+
+
+ prev = tmp = host->first_busy;
+ while (tmp != NULL) {
+ scbp_tarlun = (tmp->lun << 8) | (tmp->target);
+ if (scbp_tarlun == tarlun) { /* Unlink this SCB */
+ break;
+ }
+ prev = tmp;
+ tmp = tmp->next;
+ }
+#if DEBUG_QUEUE
+ printk("find busy SCB %p; ", tmp);
+#endif
+ return tmp;
+}
+
+static void initio_append_done_scb(struct initio_host * host, struct scsi_ctrl_blk * scbp)
+{
+#if DEBUG_QUEUE
+ printk("append done SCB %p; ", scbp);
+#endif
+
+ scbp->status = SCB_DONE;
+ scbp->next = NULL;
+ if (host->last_done != NULL) {
+ host->last_done->next = scbp;
+ host->last_done = scbp;
+ } else {
+ host->first_done = scbp;
+ host->last_done = scbp;
+ }
+}
+
+struct scsi_ctrl_blk *initio_find_done_scb(struct initio_host * host)
+{
+ struct scsi_ctrl_blk *tmp;
+
+ if ((tmp = host->first_done) != NULL) {
+ if ((host->first_done = tmp->next) == NULL)
+ host->last_done = NULL;
+ tmp->next = NULL;
+ }
+#if DEBUG_QUEUE
+ printk("find done SCB %p; ",tmp);
+#endif
+ return tmp;
+}
+
+static int initio_abort_srb(struct initio_host * host, struct scsi_cmnd *srbp)
+{
+ unsigned long flags;
+ struct scsi_ctrl_blk *tmp, *prev;
+
+ spin_lock_irqsave(&host->semaph_lock, flags);
+
+ if ((host->semaph == 0) && (host->active == NULL)) {
+ /* disable Jasmin SCSI Int */
+ outb(0x1F, host->addr + TUL_Mask);
+ spin_unlock_irqrestore(&host->semaph_lock, flags);
+ /* FIXME: synchronize_irq needed ? */
+ tulip_main(host);
+ spin_lock_irqsave(&host->semaph_lock, flags);
+ host->semaph = 1;
+ outb(0x0F, host->addr + TUL_Mask);
+ spin_unlock_irqrestore(&host->semaph_lock, flags);
+ return SCSI_ABORT_SNOOZE;
+ }
+ prev = tmp = host->first_pending; /* Check Pend queue */
+ while (tmp != NULL) {
+ /* 07/27/98 */
+ if (tmp->srb == srbp) {
+ if (tmp == host->active) {
+ spin_unlock_irqrestore(&host->semaph_lock, flags);
+ return SCSI_ABORT_BUSY;
+ } else if (tmp == host->first_pending) {
+ if ((host->first_pending = tmp->next) == NULL)
+ host->last_pending = NULL;
+ } else {
+ prev->next = tmp->next;
+ if (tmp == host->last_pending)
+ host->last_pending = prev;
+ }
+ tmp->hastat = HOST_ABORTED;
+ tmp->flags |= SCF_DONE;
+ if (tmp->flags & SCF_POST)
+ (*tmp->post) ((u8 *) host, (u8 *) tmp);
+ spin_unlock_irqrestore(&host->semaph_lock, flags);
+ return SCSI_ABORT_SUCCESS;
+ }
+ prev = tmp;
+ tmp = tmp->next;
+ }
+
+ prev = tmp = host->first_busy; /* Check Busy queue */
+ while (tmp != NULL) {
+ if (tmp->srb == srbp) {
+ if (tmp == host->active) {
+ spin_unlock_irqrestore(&host->semaph_lock, flags);
+ return SCSI_ABORT_BUSY;
+ } else if (tmp->tagmsg == 0) {
+ spin_unlock_irqrestore(&host->semaph_lock, flags);
+ return SCSI_ABORT_BUSY;
+ } else {
+ host->act_tags[tmp->target]--;
+ if (tmp == host->first_busy) {
+ if ((host->first_busy = tmp->next) == NULL)
+ host->last_busy = NULL;
+ } else {
+ prev->next = tmp->next;
+ if (tmp == host->last_busy)
+ host->last_busy = prev;
+ }
+ tmp->next = NULL;
+
+
+ tmp->hastat = HOST_ABORTED;
+ tmp->flags |= SCF_DONE;
+ if (tmp->flags & SCF_POST)
+ (*tmp->post) ((u8 *) host, (u8 *) tmp);
+ spin_unlock_irqrestore(&host->semaph_lock, flags);
+ return SCSI_ABORT_SUCCESS;
+ }
+ }
+ prev = tmp;
+ tmp = tmp->next;
+ }
+ spin_unlock_irqrestore(&host->semaph_lock, flags);
+ return SCSI_ABORT_NOT_RUNNING;
+}
+
+/***************************************************************************/
+static int initio_bad_seq(struct initio_host * host)
+{
+ struct scsi_ctrl_blk *scb;
+
+ printk("initio_bad_seg c=%d\n", host->index);
+
+ if ((scb = host->active) != NULL) {
+ initio_unlink_busy_scb(host, scb);
+ scb->hastat = HOST_BAD_PHAS;
+ scb->tastat = 0;
+ initio_append_done_scb(host, scb);
+ }
+ initio_stop_bm(host);
+ initio_reset_scsi(host, 8); /* 7/29/98 */
+ return initio_post_scsi_rst(host);
+}
+
+
+/************************************************************************/
+static void initio_exec_scb(struct initio_host * host, struct scsi_ctrl_blk * scb)
+{
+ unsigned long flags;
+
+ scb->mode = 0;
+
+ scb->sgidx = 0;
+ scb->sgmax = scb->sglen;
+
+ spin_lock_irqsave(&host->semaph_lock, flags);
+
+ initio_append_pend_scb(host, scb); /* Append this SCB to Pending queue */
+
+/* VVVVV 07/21/98 */
+ if (host->semaph == 1) {
+ /* Disable Jasmin SCSI Int */
+ outb(0x1F, host->addr + TUL_Mask);
+ host->semaph = 0;
+ spin_unlock_irqrestore(&host->semaph_lock, flags);
+
+ tulip_main(host);
+
+ spin_lock_irqsave(&host->semaph_lock, flags);
+ host->semaph = 1;
+ outb(0x0F, host->addr + TUL_Mask);
+ }
+ spin_unlock_irqrestore(&host->semaph_lock, flags);
+ return;
+}
+
+/***************************************************************************/
+static int initio_isr(struct initio_host * host)
+{
+ if (inb(host->addr + TUL_Int) & TSS_INT_PENDING) {
+ if (host->semaph == 1) {
+ outb(0x1F, host->addr + TUL_Mask);
+ /* Disable Tulip SCSI Int */
+ host->semaph = 0;
+
+ tulip_main(host);
+
+ host->semaph = 1;
+ outb(0x0F, host->addr + TUL_Mask);
+ return 1;
+ }
+ }
+ return 0;
+}
+
+static int tulip_main(struct initio_host * host)
+{
+ struct scsi_ctrl_blk *scb;
+
+ for (;;) {
+ tulip_scsi(host); /* Call tulip_scsi */
+
+ /* Walk the list of completed SCBs */
+ while ((scb = initio_find_done_scb(host)) != NULL) { /* find done entry */
+ if (scb->tastat == INI_QUEUE_FULL) {
+ host->max_tags[scb->target] =
+ host->act_tags[scb->target] - 1;
+ scb->tastat = 0;
+ initio_append_pend_scb(host, scb);
+ continue;
+ }
+ if (!(scb->mode & SCM_RSENS)) { /* not in auto req. sense mode */
+ if (scb->tastat == 2) {
+
+ /* clr sync. nego flag */
+
+ if (scb->flags & SCF_SENSE) {
+ u8 len;
+ len = scb->senselen;
+ if (len == 0)
+ len = 1;
+ scb->buflen = scb->senselen;
+ scb->bufptr = scb->senseptr;
+ scb->flags &= ~(SCF_SG | SCF_DIR); /* for xfer_data_in */
+ /* so, we won't report wrong direction in xfer_data_in,
+ and won't report HOST_DO_DU in state_6 */
+ scb->mode = SCM_RSENS;
+ scb->ident &= 0xBF; /* Disable Disconnect */
+ scb->tagmsg = 0;
+ scb->tastat = 0;
+ scb->cdblen = 6;
+ scb->cdb[0] = SCSICMD_RequestSense;
+ scb->cdb[1] = 0;
+ scb->cdb[2] = 0;
+ scb->cdb[3] = 0;
+ scb->cdb[4] = len;
+ scb->cdb[5] = 0;
+ initio_push_pend_scb(host, scb);
+ break;
+ }
+ }
+ } else { /* in request sense mode */
+
+ if (scb->tastat == 2) { /* check contition status again after sending
+ requset sense cmd 0x3 */
+ scb->hastat = HOST_BAD_PHAS;
+ }
+ scb->tastat = 2;
+ }
+ scb->flags |= SCF_DONE;
+ if (scb->flags & SCF_POST) {
+ /* FIXME: only one post method and lose casts */
+ (*scb->post) ((u8 *) host, (u8 *) scb);
+ }
+ } /* while */
+ /* find_active: */
+ if (inb(host->addr + TUL_SStatus0) & TSS_INT_PENDING)
+ continue;
+ if (host->active) /* return to OS and wait for xfer_done_ISR/Selected_ISR */
+ return 1; /* return to OS, enable interrupt */
+ /* Check pending SCB */
+ if (initio_find_first_pend_scb(host) == NULL)
+ return 1; /* return to OS, enable interrupt */
+ } /* End of for loop */
+ /* statement won't reach here */
+}
+
+static void tulip_scsi(struct initio_host * host)
+{
+ struct scsi_ctrl_blk *scb;
+ struct target_control *active_tc;
+
+ /* make sure to service interrupt asap */
+ if ((host->jsstatus0 = inb(host->addr + TUL_SStatus0)) & TSS_INT_PENDING) {
+ host->phase = host->jsstatus0 & TSS_PH_MASK;
+ host->jsstatus1 = inb(host->addr + TUL_SStatus1);
+ host->jsint = inb(host->addr + TUL_SInt);
+ if (host->jsint & TSS_SCSIRST_INT) { /* SCSI bus reset detected */
+ int_initio_scsi_rst(host);
+ return;
+ }
+ if (host->jsint & TSS_RESEL_INT) { /* if selected/reselected interrupt */
+ if (int_initio_resel(host) == 0)
+ initio_next_state(host);
+ return;
+ }
+ if (host->jsint & TSS_SEL_TIMEOUT) {
+ int_initio_busfree(host);
+ return;
+ }
+ if (host->jsint & TSS_DISC_INT) { /* BUS disconnection */
+ int_initio_busfree(host); /* unexpected bus free or sel timeout */
+ return;
+ }
+ if (host->jsint & (TSS_FUNC_COMP | TSS_BUS_SERV)) { /* func complete or Bus service */
+ if ((scb = host->active) != NULL)
+ initio_next_state(host);
+ return;
+ }
+ }
+ if (host->active != NULL)
+ return;
+
+ if ((scb = initio_find_first_pend_scb(host)) == NULL)
+ return;
+
+ /* program HBA's SCSI ID & target SCSI ID */
+ outb((host->scsi_id << 4) | (scb->target & 0x0F),
+ host->addr + TUL_SScsiId);
+ if (scb->opcode == ExecSCSI) {
+ active_tc = &host->targets[scb->target];
+
+ if (scb->tagmsg)
+ active_tc->drv_flags |= TCF_DRV_EN_TAG;
+ else
+ active_tc->drv_flags &= ~TCF_DRV_EN_TAG;
+
+ outb(active_tc->js_period, host->addr + TUL_SPeriod);
+ if ((active_tc->flags & (TCF_WDTR_DONE | TCF_NO_WDTR)) == 0) { /* do wdtr negotiation */
+ initio_select_atn_stop(host, scb);
+ } else {
+ if ((active_tc->flags & (TCF_SYNC_DONE | TCF_NO_SYNC_NEGO)) == 0) { /* do sync negotiation */
+ initio_select_atn_stop(host, scb);
+ } else {
+ if (scb->tagmsg)
+ initio_select_atn3(host, scb);
+ else
+ initio_select_atn(host, scb);
+ }
+ }
+ if (scb->flags & SCF_POLL) {
+ while (wait_tulip(host) != -1) {
+ if (initio_next_state(host) == -1)
+ break;
+ }
+ }
+ } else if (scb->opcode == BusDevRst) {
+ initio_select_atn_stop(host, scb);
+ scb->next_state = 8;
+ if (scb->flags & SCF_POLL) {
+ while (wait_tulip(host) != -1) {
+ if (initio_next_state(host) == -1)
+ break;
+ }
+ }
+ } else if (scb->opcode == AbortCmd) {
+ if (initio_abort_srb(host, scb->srb) != 0) {
+ initio_unlink_pend_scb(host, scb);
+ initio_release_scb(host, scb);
+ } else {
+ scb->opcode = BusDevRst;
+ initio_select_atn_stop(host, scb);
+ scb->next_state = 8;
+ }
+ } else {
+ initio_unlink_pend_scb(host, scb);
+ scb->hastat = 0x16; /* bad command */
+ initio_append_done_scb(host, scb);
+ }
+ return;
+}
+
+/**
+ * initio_next_state - Next SCSI state
+ * @host: InitIO host we are processing
+ *
+ * Progress the active command block along the state machine
+ * until we hit a state which we must wait for activity to occur.
+ *
+ * Returns zero or a negative code.
+ */
+
+static int initio_next_state(struct initio_host * host)
+{
+ int next;
+
+ next = host->active->next_state;
+ for (;;) {
+ switch (next) {
+ case 1:
+ next = initio_state_1(host);
+ break;
+ case 2:
+ next = initio_state_2(host);
+ break;
+ case 3:
+ next = initio_state_3(host);
+ break;
+ case 4:
+ next = initio_state_4(host);
+ break;
+ case 5:
+ next = initio_state_5(host);
+ break;
+ case 6:
+ next = initio_state_6(host);
+ break;
+ case 7:
+ next = initio_state_7(host);
+ break;
+ case 8:
+ return initio_bus_device_reset(host);
+ default:
+ return initio_bad_seq(host);
+ }
+ if (next <= 0)
+ return next;
+ }
+}
+
+
+/**
+ * initio_state_1 - SCSI state machine
+ * @host: InitIO host we are controlling
+ *
+ * Perform SCSI state processing for Select/Attention/Stop
+ */
+
+static int initio_state_1(struct initio_host * host)
+{
+ struct scsi_ctrl_blk *scb = host->active;
+ struct target_control *active_tc = host->active_tc;
+#if DEBUG_STATE
+ printk("-s1-");
+#endif
+
+ /* Move the SCB from pending to busy */
+ initio_unlink_pend_scb(host, scb);
+ initio_append_busy_scb(host, scb);
+
+ outb(active_tc->sconfig0, host->addr + TUL_SConfig );
+ /* ATN on */
+ if (host->phase == MSG_OUT) {
+ outb(TSC_EN_BUS_IN | TSC_HW_RESELECT, host->addr + TUL_SCtrl1);
+ outb(scb->ident, host->addr + TUL_SFifo);
+
+ if (scb->tagmsg) {
+ outb(scb->tagmsg, host->addr + TUL_SFifo);
+ outb(scb->tagid, host->addr + TUL_SFifo);
+ }
+ if ((active_tc->flags & (TCF_WDTR_DONE | TCF_NO_WDTR)) == 0) {
+ active_tc->flags |= TCF_WDTR_DONE;
+ outb(MSG_EXTEND, host->addr + TUL_SFifo);
+ outb(2, host->addr + TUL_SFifo); /* Extended msg length */
+ outb(3, host->addr + TUL_SFifo); /* Sync request */
+ outb(1, host->addr + TUL_SFifo); /* Start from 16 bits */
+ } else if ((active_tc->flags & (TCF_SYNC_DONE | TCF_NO_SYNC_NEGO)) == 0) {
+ active_tc->flags |= TCF_SYNC_DONE;
+ outb(MSG_EXTEND, host->addr + TUL_SFifo);
+ outb(3, host->addr + TUL_SFifo); /* extended msg length */
+ outb(1, host->addr + TUL_SFifo); /* sync request */
+ outb(initio_rate_tbl[active_tc->flags & TCF_SCSI_RATE], host->addr + TUL_SFifo);
+ outb(MAX_OFFSET, host->addr + TUL_SFifo); /* REQ/ACK offset */
+ }
+ outb(TSC_XF_FIFO_OUT, host->addr + TUL_SCmd);
+ if (wait_tulip(host) == -1)
+ return -1;
+ }
+ outb(TSC_FLUSH_FIFO, host->addr + TUL_SCtrl0);
+ outb((inb(host->addr + TUL_SSignal) & (TSC_SET_ACK | 7)), host->addr + TUL_SSignal);
+ /* Into before CDB xfer */
+ return 3;
+}
+
+
+/**
+ * initio_state_2 - SCSI state machine
+ * @host: InitIO host we are controlling
+ *
+ * state after selection with attention
+ * state after selection with attention3
+ */
+
+static int initio_state_2(struct initio_host * host)
+{
+ struct scsi_ctrl_blk *scb = host->active;
+ struct target_control *active_tc = host->active_tc;
+#if DEBUG_STATE
+ printk("-s2-");
+#endif
+
+ initio_unlink_pend_scb(host, scb);
+ initio_append_busy_scb(host, scb);
+
+ outb(active_tc->sconfig0, host->addr + TUL_SConfig);
+
+ if (host->jsstatus1 & TSS_CMD_PH_CMP)
+ return 4;
+
+ outb(TSC_FLUSH_FIFO, host->addr + TUL_SCtrl0);
+ outb((inb(host->addr + TUL_SSignal) & (TSC_SET_ACK | 7)), host->addr + TUL_SSignal);
+ /* Into before CDB xfer */
+ return 3;
+}
+
+/**
+ * initio_state_3 - SCSI state machine
+ * @host: InitIO host we are controlling
+ *
+ * state before CDB xfer is done
+ */
+
+static int initio_state_3(struct initio_host * host)
+{
+ struct scsi_ctrl_blk *scb = host->active;
+ struct target_control *active_tc = host->active_tc;
+ int i;
+
+#if DEBUG_STATE
+ printk("-s3-");
+#endif
+ for (;;) {
+ switch (host->phase) {
+ case CMD_OUT: /* Command out phase */
+ for (i = 0; i < (int) scb->cdblen; i++)
+ outb(scb->cdb[i], host->addr + TUL_SFifo);
+ outb(TSC_XF_FIFO_OUT, host->addr + TUL_SCmd);
+ if (wait_tulip(host) == -1)
+ return -1;
+ if (host->phase == CMD_OUT)
+ return initio_bad_seq(host);
+ return 4;
+
+ case MSG_IN: /* Message in phase */
+ scb->next_state = 3;
+ if (initio_msgin(host) == -1)
+ return -1;
+ break;
+
+ case STATUS_IN: /* Status phase */
+ if (initio_status_msg(host) == -1)
+ return -1;
+ break;
+
+ case MSG_OUT: /* Message out phase */
+ if (active_tc->flags & (TCF_SYNC_DONE | TCF_NO_SYNC_NEGO)) {
+ outb(MSG_NOP, host->addr + TUL_SFifo); /* msg nop */
+ outb(TSC_XF_FIFO_OUT, host->addr + TUL_SCmd);
+ if (wait_tulip(host) == -1)
+ return -1;
+ } else {
+ active_tc->flags |= TCF_SYNC_DONE;
+
+ outb(MSG_EXTEND, host->addr + TUL_SFifo);
+ outb(3, host->addr + TUL_SFifo); /* ext. msg len */
+ outb(1, host->addr + TUL_SFifo); /* sync request */
+ outb(initio_rate_tbl[active_tc->flags & TCF_SCSI_RATE], host->addr + TUL_SFifo);
+ outb(MAX_OFFSET, host->addr + TUL_SFifo); /* REQ/ACK offset */
+ outb(TSC_XF_FIFO_OUT, host->addr + TUL_SCmd);
+ if (wait_tulip(host) == -1)
+ return -1;
+ outb(TSC_FLUSH_FIFO, host->addr + TUL_SCtrl0);
+ outb(inb(host->addr + TUL_SSignal) & (TSC_SET_ACK | 7), host->addr + TUL_SSignal);
+
+ }
+ break;
+ default:
+ return initio_bad_seq(host);
+ }
+ }
+}
+
+/**
+ * initio_state_4 - SCSI state machine
+ * @host: InitIO host we are controlling
+ *
+ * SCSI state machine. State 4
+ */
+
+static int initio_state_4(struct initio_host * host)
+{
+ struct scsi_ctrl_blk *scb = host->active;
+
+#if DEBUG_STATE
+ printk("-s4-");
+#endif
+ if ((scb->flags & SCF_DIR) == SCF_NO_XF) {
+ return 6; /* Go to state 6 (After data) */
+ }
+ for (;;) {
+ if (scb->buflen == 0)
+ return 6;
+
+ switch (host->phase) {
+
+ case STATUS_IN: /* Status phase */
+ if ((scb->flags & SCF_DIR) != 0) /* if direction bit set then report data underrun */
+ scb->hastat = HOST_DO_DU;
+ if ((initio_status_msg(host)) == -1)
+ return -1;
+ break;
+
+ case MSG_IN: /* Message in phase */
+ scb->next_state = 0x4;
+ if (initio_msgin(host) == -1)
+ return -1;
+ break;
+
+ case MSG_OUT: /* Message out phase */
+ if (host->jsstatus0 & TSS_PAR_ERROR) {
+ scb->buflen = 0;
+ scb->hastat = HOST_DO_DU;
+ if (initio_msgout_ide(host) == -1)
+ return -1;
+ return 6;
+ } else {
+ outb(MSG_NOP, host->addr + TUL_SFifo); /* msg nop */
+ outb(TSC_XF_FIFO_OUT, host->addr + TUL_SCmd);
+ if (wait_tulip(host) == -1)
+ return -1;
+ }
+ break;
+
+ case DATA_IN: /* Data in phase */
+ return initio_xfer_data_in(host);
+
+ case DATA_OUT: /* Data out phase */
+ return initio_xfer_data_out(host);
+
+ default:
+ return initio_bad_seq(host);
+ }
+ }
+}
+
+
+/**
+ * initio_state_5 - SCSI state machine
+ * @host: InitIO host we are controlling
+ *
+ * State after dma xfer done or phase change before xfer done
+ */
+
+static int initio_state_5(struct initio_host * host)
+{
+ struct scsi_ctrl_blk *scb = host->active;
+ long cnt, xcnt; /* cannot use unsigned !! code: if (xcnt < 0) */
+
+#if DEBUG_STATE
+ printk("-s5-");
+#endif
+ /*------ get remaining count -------*/
+ cnt = inl(host->addr + TUL_SCnt0) & 0x0FFFFFF;
+
+ if (inb(host->addr + TUL_XCmd) & 0x20) {
+ /* ----------------------- DATA_IN ----------------------------- */
+ /* check scsi parity error */
+ if (host->jsstatus0 & TSS_PAR_ERROR)
+ scb->hastat = HOST_DO_DU;
+ if (inb(host->addr + TUL_XStatus) & XPEND) { /* DMA xfer pending, Send STOP */
+ /* tell Hardware scsi xfer has been terminated */
+ outb(inb(host->addr + TUL_XCtrl) | 0x80, host->addr + TUL_XCtrl);
+ /* wait until DMA xfer not pending */
+ while (inb(host->addr + TUL_XStatus) & XPEND)
+ cpu_relax();
+ }
+ } else {
+ /*-------- DATA OUT -----------*/
+ if ((inb(host->addr + TUL_SStatus1) & TSS_XFER_CMP) == 0) {
+ if (host->active_tc->js_period & TSC_WIDE_SCSI)
+ cnt += (inb(host->addr + TUL_SFifoCnt) & 0x1F) << 1;
+ else
+ cnt += (inb(host->addr + TUL_SFifoCnt) & 0x1F);
+ }
+ if (inb(host->addr + TUL_XStatus) & XPEND) { /* if DMA xfer is pending, abort DMA xfer */
+ outb(TAX_X_ABT, host->addr + TUL_XCmd);
+ /* wait Abort DMA xfer done */
+ while ((inb(host->addr + TUL_Int) & XABT) == 0)
+ cpu_relax();
+ }
+ if ((cnt == 1) && (host->phase == DATA_OUT)) {
+ outb(TSC_XF_FIFO_OUT, host->addr + TUL_SCmd);
+ if (wait_tulip(host) == -1)
+ return -1;
+ cnt = 0;
+ } else {
+ if ((inb(host->addr + TUL_SStatus1) & TSS_XFER_CMP) == 0)
+ outb(TSC_FLUSH_FIFO, host->addr + TUL_SCtrl0);
+ }
+ }
+ if (cnt == 0) {
+ scb->buflen = 0;
+ return 6; /* After Data */
+ }
+ /* Update active data pointer */
+ xcnt = (long) scb->buflen - cnt; /* xcnt== bytes already xferred */
+ scb->buflen = (u32) cnt; /* cnt == bytes left to be xferred */
+ if (scb->flags & SCF_SG) {
+ struct sg_entry *sgp;
+ unsigned long i;
+
+ sgp = &scb->sglist[scb->sgidx];
+ for (i = scb->sgidx; i < scb->sgmax; sgp++, i++) {
+ xcnt -= (long) sgp->len;
+ if (xcnt < 0) { /* this sgp xfer half done */
+ xcnt += (long) sgp->len; /* xcnt == bytes xferred in this sgp */
+ sgp->data += (u32) xcnt; /* new ptr to be xfer */
+ sgp->len -= (u32) xcnt; /* new len to be xfer */
+ scb->bufptr += ((u32) (i - scb->sgidx) << 3);
+ /* new SG table ptr */
+ scb->sglen = (u8) (scb->sgmax - i);
+ /* new SG table len */
+ scb->sgidx = (u16) i;
+ /* for next disc and come in this loop */
+ return 4; /* Go to state 4 */
+ }
+ /* else (xcnt >= 0 , i.e. this sgp already xferred */
+ } /* for */
+ return 6; /* Go to state 6 */
+ } else {
+ scb->bufptr += (u32) xcnt;
+ }
+ return 4; /* Go to state 4 */
+}
+
+/**
+ * initio_state_6 - SCSI state machine
+ * @host: InitIO host we are controlling
+ *
+ * State after Data phase
+ */
+
+static int initio_state_6(struct initio_host * host)
+{
+ struct scsi_ctrl_blk *scb = host->active;
+
+#if DEBUG_STATE
+ printk("-s6-");
+#endif
+ for (;;) {
+ switch (host->phase) {
+ case STATUS_IN: /* Status phase */
+ if ((initio_status_msg(host)) == -1)
+ return -1;
+ break;
+
+ case MSG_IN: /* Message in phase */
+ scb->next_state = 6;
+ if ((initio_msgin(host)) == -1)
+ return -1;
+ break;
+
+ case MSG_OUT: /* Message out phase */
+ outb(MSG_NOP, host->addr + TUL_SFifo); /* msg nop */
+ outb(TSC_XF_FIFO_OUT, host->addr + TUL_SCmd);
+ if (wait_tulip(host) == -1)
+ return -1;
+ break;
+
+ case DATA_IN: /* Data in phase */
+ return initio_xpad_in(host);
+
+ case DATA_OUT: /* Data out phase */
+ return initio_xpad_out(host);
+
+ default:
+ return initio_bad_seq(host);
+ }
+ }
+}
+
+/**
+ * initio_state_7 - SCSI state machine
+ * @host: InitIO host we are controlling
+ *
+ */
+
+int initio_state_7(struct initio_host * host)
+{
+ int cnt, i;
+
+#if DEBUG_STATE
+ printk("-s7-");
+#endif
+ /* flush SCSI FIFO */
+ cnt = inb(host->addr + TUL_SFifoCnt) & 0x1F;
+ if (cnt) {
+ for (i = 0; i < cnt; i++)
+ inb(host->addr + TUL_SFifo);
+ }
+ switch (host->phase) {
+ case DATA_IN: /* Data in phase */
+ case DATA_OUT: /* Data out phase */
+ return initio_bad_seq(host);
+ default:
+ return 6; /* Go to state 6 */
+ }
+}
+
+/**
+ * initio_xfer_data_in - Commence data input
+ * @host: InitIO host in use
+ *
+ * Commence a block of data transfer. The transfer itself will
+ * be managed by the controller and we will get a completion (or
+ * failure) interrupt.
+ */
+static int initio_xfer_data_in(struct initio_host * host)
+{
+ struct scsi_ctrl_blk *scb = host->active;
+
+ if ((scb->flags & SCF_DIR) == SCF_DOUT)
+ return 6; /* wrong direction */
+
+ outl(scb->buflen, host->addr + TUL_SCnt0);
+ outb(TSC_XF_DMA_IN, host->addr + TUL_SCmd); /* 7/25/95 */
+
+ if (scb->flags & SCF_SG) { /* S/G xfer */
+ outl(((u32) scb->sglen) << 3, host->addr + TUL_XCntH);
+ outl(scb->bufptr, host->addr + TUL_XAddH);
+ outb(TAX_SG_IN, host->addr + TUL_XCmd);
+ } else {
+ outl(scb->buflen, host->addr + TUL_XCntH);
+ outl(scb->bufptr, host->addr + TUL_XAddH);
+ outb(TAX_X_IN, host->addr + TUL_XCmd);
+ }
+ scb->next_state = 0x5;
+ return 0; /* return to OS, wait xfer done , let jas_isr come in */
+}
+
+/**
+ * initio_xfer_data_out - Commence data output
+ * @host: InitIO host in use
+ *
+ * Commence a block of data transfer. The transfer itself will
+ * be managed by the controller and we will get a completion (or
+ * failure) interrupt.
+ */
+
+static int initio_xfer_data_out(struct initio_host * host)
+{
+ struct scsi_ctrl_blk *scb = host->active;
+
+ if ((scb->flags & SCF_DIR) == SCF_DIN)
+ return 6; /* wrong direction */
+
+ outl(scb->buflen, host->addr + TUL_SCnt0);
+ outb(TSC_XF_DMA_OUT, host->addr + TUL_SCmd);
+
+ if (scb->flags & SCF_SG) { /* S/G xfer */
+ outl(((u32) scb->sglen) << 3, host->addr + TUL_XCntH);
+ outl(scb->bufptr, host->addr + TUL_XAddH);
+ outb(TAX_SG_OUT, host->addr + TUL_XCmd);
+ } else {
+ outl(scb->buflen, host->addr + TUL_XCntH);
+ outl(scb->bufptr, host->addr + TUL_XAddH);
+ outb(TAX_X_OUT, host->addr + TUL_XCmd);
+ }
+
+ scb->next_state = 0x5;
+ return 0; /* return to OS, wait xfer done , let jas_isr come in */
+}
+
+int initio_xpad_in(struct initio_host * host)
+{
+ struct scsi_ctrl_blk *scb = host->active;
+ struct target_control *active_tc = host->active_tc;
+
+ if ((scb->flags & SCF_DIR) != SCF_NO_DCHK)
+ scb->hastat = HOST_DO_DU; /* over run */
+ for (;;) {
+ if (active_tc->js_period & TSC_WIDE_SCSI)
+ outl(2, host->addr + TUL_SCnt0);
+ else
+ outl(1, host->addr + TUL_SCnt0);
+
+ outb(TSC_XF_FIFO_IN, host->addr + TUL_SCmd);
+ if (wait_tulip(host) == -1)
+ return -1;
+ if (host->phase != DATA_IN) {
+ outb(TSC_FLUSH_FIFO, host->addr + TUL_SCtrl0);
+ return 6;
+ }
+ inb(host->addr + TUL_SFifo);
+ }
+}
+
+int initio_xpad_out(struct initio_host * host)
+{
+ struct scsi_ctrl_blk *scb = host->active;
+ struct target_control *active_tc = host->active_tc;
+
+ if ((scb->flags & SCF_DIR) != SCF_NO_DCHK)
+ scb->hastat = HOST_DO_DU; /* over run */
+ for (;;) {
+ if (active_tc->js_period & TSC_WIDE_SCSI)
+ outl(2, host->addr + TUL_SCnt0);
+ else
+ outl(1, host->addr + TUL_SCnt0);
+
+ outb(0, host->addr + TUL_SFifo);
+ outb(TSC_XF_FIFO_OUT, host->addr + TUL_SCmd);
+ if ((wait_tulip(host)) == -1)
+ return -1;
+ if (host->phase != DATA_OUT) { /* Disable wide CPU to allow read 16 bits */
+ outb(TSC_HW_RESELECT, host->addr + TUL_SCtrl1);
+ outb(TSC_FLUSH_FIFO, host->addr + TUL_SCtrl0);
+ return 6;
+ }
+ }
+}
+
+int initio_status_msg(struct initio_host * host)
+{ /* status & MSG_IN */
+ struct scsi_ctrl_blk *scb = host->active;
+ u8 msg;
+
+ outb(TSC_CMD_COMP, host->addr + TUL_SCmd);
+ if (wait_tulip(host) == -1)
+ return -1;
+
+ /* get status */
+ scb->tastat = inb(host->addr + TUL_SFifo);
+
+ if (host->phase == MSG_OUT) {
+ if (host->jsstatus0 & TSS_PAR_ERROR)
+ outb(MSG_PARITY, host->addr + TUL_SFifo);
+ else
+ outb(MSG_NOP, host->addr + TUL_SFifo);
+ outb(TSC_XF_FIFO_OUT, host->addr + TUL_SCmd);
+ return wait_tulip(host);
+ }
+ if (host->phase == MSG_IN) {
+ msg = inb(host->addr + TUL_SFifo);
+ if (host->jsstatus0 & TSS_PAR_ERROR) { /* Parity error */
+ if ((initio_msgin_accept(host)) == -1)
+ return -1;
+ if (host->phase != MSG_OUT)
+ return initio_bad_seq(host);
+ outb(MSG_PARITY, host->addr + TUL_SFifo);
+ outb(TSC_XF_FIFO_OUT, host->addr + TUL_SCmd);
+ return wait_tulip(host);
+ }
+ if (msg == 0) { /* Command complete */
+
+ if ((scb->tastat & 0x18) == 0x10) /* No link support */
+ return initio_bad_seq(host);
+ outb(TSC_FLUSH_FIFO, host->addr + TUL_SCtrl0);
+ outb(TSC_MSG_ACCEPT, host->addr + TUL_SCmd);
+ return initio_wait_done_disc(host);
+
+ }
+ if (msg == MSG_LINK_COMP || msg == MSG_LINK_FLAG) {
+ if ((scb->tastat & 0x18) == 0x10)
+ return initio_msgin_accept(host);
+ }
+ }
+ return initio_bad_seq(host);
+}
+
+
+/* scsi bus free */
+int int_initio_busfree(struct initio_host * host)
+{
+ struct scsi_ctrl_blk *scb = host->active;
+
+ if (scb != NULL) {
+ if (scb->status & SCB_SELECT) { /* selection timeout */
+ initio_unlink_pend_scb(host, scb);
+ scb->hastat = HOST_SEL_TOUT;
+ initio_append_done_scb(host, scb);
+ } else { /* Unexpected bus free */
+ initio_unlink_busy_scb(host, scb);
+ scb->hastat = HOST_BUS_FREE;
+ initio_append_done_scb(host, scb);
+ }
+ host->active = NULL;
+ host->active_tc = NULL;
+ }
+ outb(TSC_FLUSH_FIFO, host->addr + TUL_SCtrl0); /* Flush SCSI FIFO */
+ outb(TSC_INITDEFAULT, host->addr + TUL_SConfig);
+ outb(TSC_HW_RESELECT, host->addr + TUL_SCtrl1); /* Enable HW reselect */
+ return -1;
+}
+
+
+/**
+ * int_initio_scsi_rst - SCSI reset occurred
+ * @host: Host seeing the reset
+ *
+ * A SCSI bus reset has occurred. Clean up any pending transfer
+ * the hardware is doing by DMA and then abort all active and
+ * disconnected commands. The mid layer should sort the rest out
+ * for us
+ */
+
+static int int_initio_scsi_rst(struct initio_host * host)
+{
+ struct scsi_ctrl_blk *scb;
+ int i;
+
+ /* if DMA xfer is pending, abort DMA xfer */
+ if (inb(host->addr + TUL_XStatus) & 0x01) {
+ outb(TAX_X_ABT | TAX_X_CLR_FIFO, host->addr + TUL_XCmd);
+ /* wait Abort DMA xfer done */
+ while ((inb(host->addr + TUL_Int) & 0x04) == 0)
+ cpu_relax();
+ outb(TSC_FLUSH_FIFO, host->addr + TUL_SCtrl0);
+ }
+ /* Abort all active & disconnected scb */
+ while ((scb = initio_pop_busy_scb(host)) != NULL) {
+ scb->hastat = HOST_BAD_PHAS;
+ initio_append_done_scb(host, scb);
+ }
+ host->active = NULL;
+ host->active_tc = NULL;
+
+ /* clr sync nego. done flag */
+ for (i = 0; i < host->max_tar; i++)
+ host->targets[i].flags &= ~(TCF_SYNC_DONE | TCF_WDTR_DONE);
+ return -1;
+}
+
+/**
+ * int_initio_scsi_resel - Reselection occurred
+ * @host: InitIO host adapter
+ *
+ * A SCSI reselection event has been signalled and the interrupt
+ * is now being processed. Work out which command block needs attention
+ * and continue processing that command.
+ */
+
+int int_initio_resel(struct initio_host * host)
+{
+ struct scsi_ctrl_blk *scb;
+ struct target_control *active_tc;
+ u8 tag, msg = 0;
+ u8 tar, lun;
+
+ if ((scb = host->active) != NULL) {
+ /* FIXME: Why check and not just clear ? */
+ if (scb->status & SCB_SELECT) /* if waiting for selection complete */
+ scb->status &= ~SCB_SELECT;
+ host->active = NULL;
+ }
+ /* --------- get target id---------------------- */
+ tar = inb(host->addr + TUL_SBusId);
+ /* ------ get LUN from Identify message----------- */
+ lun = inb(host->addr + TUL_SIdent) & 0x0F;
+ /* 07/22/98 from 0x1F -> 0x0F */
+ active_tc = &host->targets[tar];
+ host->active_tc = active_tc;
+ outb(active_tc->sconfig0, host->addr + TUL_SConfig);
+ outb(active_tc->js_period, host->addr + TUL_SPeriod);
+
+ /* ------------- tag queueing ? ------------------- */
+ if (active_tc->drv_flags & TCF_DRV_EN_TAG) {
+ if ((initio_msgin_accept(host)) == -1)
+ return -1;
+ if (host->phase != MSG_IN)
+ goto no_tag;
+ outl(1, host->addr + TUL_SCnt0);
+ outb(TSC_XF_FIFO_IN, host->addr + TUL_SCmd);
+ if (wait_tulip(host) == -1)
+ return -1;
+ msg = inb(host->addr + TUL_SFifo); /* Read Tag Message */
+
+ if (msg < MSG_STAG || msg > MSG_OTAG) /* Is simple Tag */
+ goto no_tag;
+
+ if (initio_msgin_accept(host) == -1)
+ return -1;
+
+ if (host->phase != MSG_IN)
+ goto no_tag;
+
+ outl(1, host->addr + TUL_SCnt0);
+ outb(TSC_XF_FIFO_IN, host->addr + TUL_SCmd);
+ if (wait_tulip(host) == -1)
+ return -1;
+ tag = inb(host->addr + TUL_SFifo); /* Read Tag ID */
+ scb = host->scb + tag;
+ if (scb->target != tar || scb->lun != lun) {
+ return initio_msgout_abort_tag(host);
+ }
+ if (scb->status != SCB_BUSY) { /* 03/24/95 */
+ return initio_msgout_abort_tag(host);
+ }
+ host->active = scb;
+ if ((initio_msgin_accept(host)) == -1)
+ return -1;
+ } else { /* No tag */
+ no_tag:
+ if ((scb = initio_find_busy_scb(host, tar | (lun << 8))) == NULL) {
+ return initio_msgout_abort_targ(host);
+ }
+ host->active = scb;
+ if (!(active_tc->drv_flags & TCF_DRV_EN_TAG)) {
+ if ((initio_msgin_accept(host)) == -1)
+ return -1;
+ }
+ }
+ return 0;
+}
+
+/**
+ * int_initio_bad_seq - out of phase
+ * @host: InitIO host flagging event
+ *
+ * We have ended up out of phase somehow. Reset the host controller
+ * and throw all our toys out of the pram. Let the midlayer clean up
+ */
+
+static int int_initio_bad_seq(struct initio_host * host)
+{ /* target wrong phase */
+ struct scsi_ctrl_blk *scb;
+ int i;
+
+ initio_reset_scsi(host, 10);
+
+ while ((scb = initio_pop_busy_scb(host)) != NULL) {
+ scb->hastat = HOST_BAD_PHAS;
+ initio_append_done_scb(host, scb);
+ }
+ for (i = 0; i < host->max_tar; i++)
+ host->targets[i].flags &= ~(TCF_SYNC_DONE | TCF_WDTR_DONE);
+ return -1;
+}
+
+
+/**
+ * initio_msgout_abort_targ - abort a tag
+ * @host: InitIO host
+ *
+ * Abort when the target/lun does not match or when our SCB is not
+ * busy. Used by untagged commands.
+ */
+
+static int initio_msgout_abort_targ(struct initio_host * host)
+{
+
+ outb(((inb(host->addr + TUL_SSignal) & (TSC_SET_ACK | 7)) | TSC_SET_ATN), host->addr + TUL_SSignal);
+ if (initio_msgin_accept(host) == -1)
+ return -1;
+ if (host->phase != MSG_OUT)
+ return initio_bad_seq(host);
+
+ outb(MSG_ABORT, host->addr + TUL_SFifo);
+ outb(TSC_XF_FIFO_OUT, host->addr + TUL_SCmd);
+
+ return initio_wait_disc(host);
+}
+
+/**
+ * initio_msgout_abort_tag - abort a tag
+ * @host: InitIO host
+ *
+ * Abort when the target/lun does not match or when our SCB is not
+ * busy. Used for tagged commands.
+ */
+
+static int initio_msgout_abort_tag(struct initio_host * host)
+{
+
+ outb(((inb(host->addr + TUL_SSignal) & (TSC_SET_ACK | 7)) | TSC_SET_ATN), host->addr + TUL_SSignal);
+ if (initio_msgin_accept(host) == -1)
+ return -1;
+ if (host->phase != MSG_OUT)
+ return initio_bad_seq(host);
+
+ outb(MSG_ABORT_TAG, host->addr + TUL_SFifo);
+ outb(TSC_XF_FIFO_OUT, host->addr + TUL_SCmd);
+
+ return initio_wait_disc(host);
+
+}
+
+/**
+ * initio_msgin - Message in
+ * @host: InitIO Host
+ *
+ * Process incoming message
+ */
+static int initio_msgin(struct initio_host * host)
+{
+ struct target_control *active_tc;
+
+ for (;;) {
+ outb(TSC_FLUSH_FIFO, host->addr + TUL_SCtrl0);
+
+ outl(1, host->addr + TUL_SCnt0);
+ outb(TSC_XF_FIFO_IN, host->addr + TUL_SCmd);
+ if (wait_tulip(host) == -1)
+ return -1;
+
+ switch (inb(host->addr + TUL_SFifo)) {
+ case MSG_DISC: /* Disconnect msg */
+ outb(TSC_MSG_ACCEPT, host->addr + TUL_SCmd);
+ return initio_wait_disc(host);
+ case MSG_SDP:
+ case MSG_RESTORE:
+ case MSG_NOP:
+ initio_msgin_accept(host);
+ break;
+ case MSG_REJ: /* Clear ATN first */
+ outb((inb(host->addr + TUL_SSignal) & (TSC_SET_ACK | 7)),
+ host->addr + TUL_SSignal);
+ active_tc = host->active_tc;
+ if ((active_tc->flags & (TCF_SYNC_DONE | TCF_NO_SYNC_NEGO)) == 0) /* do sync nego */
+ outb(((inb(host->addr + TUL_SSignal) & (TSC_SET_ACK | 7)) | TSC_SET_ATN),
+ host->addr + TUL_SSignal);
+ initio_msgin_accept(host);
+ break;
+ case MSG_EXTEND: /* extended msg */
+ initio_msgin_extend(host);
+ break;
+ case MSG_IGNOREWIDE:
+ initio_msgin_accept(host);
+ break;
+ case MSG_COMP:
+ outb(TSC_FLUSH_FIFO, host->addr + TUL_SCtrl0);
+ outb(TSC_MSG_ACCEPT, host->addr + TUL_SCmd);
+ return initio_wait_done_disc(host);
+ default:
+ initio_msgout_reject(host);
+ break;
+ }
+ if (host->phase != MSG_IN)
+ return host->phase;
+ }
+ /* statement won't reach here */
+}
+
+static int initio_msgout_reject(struct initio_host * host)
+{
+ outb(((inb(host->addr + TUL_SSignal) & (TSC_SET_ACK | 7)) | TSC_SET_ATN), host->addr + TUL_SSignal);
+
+ if (initio_msgin_accept(host) == -1)
+ return -1;
+
+ if (host->phase == MSG_OUT) {
+ outb(MSG_REJ, host->addr + TUL_SFifo); /* Msg reject */
+ outb(TSC_XF_FIFO_OUT, host->addr + TUL_SCmd);
+ return wait_tulip(host);
+ }
+ return host->phase;
+}
+
+static int initio_msgout_ide(struct initio_host * host)
+{
+ outb(MSG_IDE, host->addr + TUL_SFifo); /* Initiator Detected Error */
+ outb(TSC_XF_FIFO_OUT, host->addr + TUL_SCmd);
+ return wait_tulip(host);
+}
+
+static int initio_msgin_extend(struct initio_host * host)
+{
+ u8 len, idx;
+
+ if (initio_msgin_accept(host) != MSG_IN)
+ return host->phase;
+
+ /* Get extended msg length */
+ outl(1, host->addr + TUL_SCnt0);
+ outb(TSC_XF_FIFO_IN, host->addr + TUL_SCmd);
+ if (wait_tulip(host) == -1)
+ return -1;
+
+ len = inb(host->addr + TUL_SFifo);
+ host->msg[0] = len;
+ for (idx = 1; len != 0; len--) {
+
+ if ((initio_msgin_accept(host)) != MSG_IN)
+ return host->phase;
+ outl(1, host->addr + TUL_SCnt0);
+ outb(TSC_XF_FIFO_IN, host->addr + TUL_SCmd);
+ if (wait_tulip(host) == -1)
+ return -1;
+ host->msg[idx++] = inb(host->addr + TUL_SFifo);
+ }
+ if (host->msg[1] == 1) { /* if it's synchronous data transfer request */
+ u8 r;
+ if (host->msg[0] != 3) /* if length is not right */
+ return initio_msgout_reject(host);
+ if (host->active_tc->flags & TCF_NO_SYNC_NEGO) { /* Set OFFSET=0 to do async, nego back */
+ host->msg[3] = 0;
+ } else {
+ if (initio_msgin_sync(host) == 0 &&
+ (host->active_tc->flags & TCF_SYNC_DONE)) {
+ initio_sync_done(host);
+ return initio_msgin_accept(host);
+ }
+ }
+
+ r = inb(host->addr + TUL_SSignal);
+ outb((r & (TSC_SET_ACK | 7)) | TSC_SET_ATN,
+ host->addr + TUL_SSignal);
+ if (initio_msgin_accept(host) != MSG_OUT)
+ return host->phase;
+ /* sync msg out */
+ outb(TSC_FLUSH_FIFO, host->addr + TUL_SCtrl0);
+
+ initio_sync_done(host);
+
+ outb(MSG_EXTEND, host->addr + TUL_SFifo);
+ outb(3, host->addr + TUL_SFifo);
+ outb(1, host->addr + TUL_SFifo);
+ outb(host->msg[2], host->addr + TUL_SFifo);
+ outb(host->msg[3], host->addr + TUL_SFifo);
+ outb(TSC_XF_FIFO_OUT, host->addr + TUL_SCmd);
+ return wait_tulip(host);
+ }
+ if (host->msg[0] != 2 || host->msg[1] != 3)
+ return initio_msgout_reject(host);
+ /* if it's WIDE DATA XFER REQ */
+ if (host->active_tc->flags & TCF_NO_WDTR) {
+ host->msg[2] = 0;
+ } else {
+ if (host->msg[2] > 2) /* > 32 bits */
+ return initio_msgout_reject(host);
+ if (host->msg[2] == 2) { /* == 32 */
+ host->msg[2] = 1;
+ } else {
+ if ((host->active_tc->flags & TCF_NO_WDTR) == 0) {
+ wdtr_done(host);
+ if ((host->active_tc->flags & (TCF_SYNC_DONE | TCF_NO_SYNC_NEGO)) == 0)
+ outb(((inb(host->addr + TUL_SSignal) & (TSC_SET_ACK | 7)) | TSC_SET_ATN), host->addr + TUL_SSignal);
+ return initio_msgin_accept(host);
+ }
+ }
+ }
+ outb(((inb(host->addr + TUL_SSignal) & (TSC_SET_ACK | 7)) | TSC_SET_ATN), host->addr + TUL_SSignal);
+
+ if (initio_msgin_accept(host) != MSG_OUT)
+ return host->phase;
+ /* WDTR msg out */
+ outb(MSG_EXTEND, host->addr + TUL_SFifo);
+ outb(2, host->addr + TUL_SFifo);
+ outb(3, host->addr + TUL_SFifo);
+ outb(host->msg[2], host->addr + TUL_SFifo);
+ outb(TSC_XF_FIFO_OUT, host->addr + TUL_SCmd);
+ return wait_tulip(host);
+}
+
+static int initio_msgin_sync(struct initio_host * host)
+{
+ char default_period;
+
+ default_period = initio_rate_tbl[host->active_tc->flags & TCF_SCSI_RATE];
+ if (host->msg[3] > MAX_OFFSET) {
+ host->msg[3] = MAX_OFFSET;
+ if (host->msg[2] < default_period) {
+ host->msg[2] = default_period;
+ return 1;
+ }
+ if (host->msg[2] >= 59) /* Change to async */
+ host->msg[3] = 0;
+ return 1;
+ }
+ /* offset requests asynchronous transfers ? */
+ if (host->msg[3] == 0) {
+ return 0;
+ }
+ if (host->msg[2] < default_period) {
+ host->msg[2] = default_period;
+ return 1;
+ }
+ if (host->msg[2] >= 59) {
+ host->msg[3] = 0;
+ return 1;
+ }
+ return 0;
+}
+
+static int wdtr_done(struct initio_host * host)
+{
+ host->active_tc->flags &= ~TCF_SYNC_DONE;
+ host->active_tc->flags |= TCF_WDTR_DONE;
+
+ host->active_tc->js_period = 0;
+ if (host->msg[2]) /* if 16 bit */
+ host->active_tc->js_period |= TSC_WIDE_SCSI;
+ host->active_tc->sconfig0 &= ~TSC_ALT_PERIOD;
+ outb(host->active_tc->sconfig0, host->addr + TUL_SConfig);
+ outb(host->active_tc->js_period, host->addr + TUL_SPeriod);
+
+ return 1;
+}
+
+static int initio_sync_done(struct initio_host * host)
+{
+ int i;
+
+ host->active_tc->flags |= TCF_SYNC_DONE;
+
+ if (host->msg[3]) {
+ host->active_tc->js_period |= host->msg[3];
+ for (i = 0; i < 8; i++) {
+ if (initio_rate_tbl[i] >= host->msg[2]) /* pick the big one */
+ break;
+ }
+ host->active_tc->js_period |= (i << 4);
+ host->active_tc->sconfig0 |= TSC_ALT_PERIOD;
+ }
+ outb(host->active_tc->sconfig0, host->addr + TUL_SConfig);
+ outb(host->active_tc->js_period, host->addr + TUL_SPeriod);
+
+ return -1;
+}
+
+
+static int initio_post_scsi_rst(struct initio_host * host)
+{
+ struct scsi_ctrl_blk *scb;
+ struct target_control *active_tc;
+ int i;
+
+ host->active = NULL;
+ host->active_tc = NULL;
+ host->flags = 0;
+
+ while ((scb = initio_pop_busy_scb(host)) != NULL) {
+ scb->hastat = HOST_BAD_PHAS;
+ initio_append_done_scb(host, scb);
+ }
+ /* clear sync done flag */
+ active_tc = &host->targets[0];
+ for (i = 0; i < host->max_tar; active_tc++, i++) {
+ active_tc->flags &= ~(TCF_SYNC_DONE | TCF_WDTR_DONE);
+ /* Initialize the sync. xfer register values to an asyn xfer */
+ active_tc->js_period = 0;
+ active_tc->sconfig0 = host->sconf1;
+ host->act_tags[0] = 0; /* 07/22/98 */
+ host->targets[i].flags &= ~TCF_BUSY; /* 07/22/98 */
+ } /* for */
+
+ return -1;
+}
+
+static void initio_select_atn_stop(struct initio_host * host, struct scsi_ctrl_blk * scb)
+{
+ scb->status |= SCB_SELECT;
+ scb->next_state = 0x1;
+ host->active = scb;
+ host->active_tc = &host->targets[scb->target];
+ outb(TSC_SELATNSTOP, host->addr + TUL_SCmd);
+}
+
+
+static void initio_select_atn(struct initio_host * host, struct scsi_ctrl_blk * scb)
+{
+ int i;
+
+ scb->status |= SCB_SELECT;
+ scb->next_state = 0x2;
+
+ outb(scb->ident, host->addr + TUL_SFifo);
+ for (i = 0; i < (int) scb->cdblen; i++)
+ outb(scb->cdb[i], host->addr + TUL_SFifo);
+ host->active_tc = &host->targets[scb->target];
+ host->active = scb;
+ outb(TSC_SEL_ATN, host->addr + TUL_SCmd);
+}
+
+static void initio_select_atn3(struct initio_host * host, struct scsi_ctrl_blk * scb)
+{
+ int i;
+
+ scb->status |= SCB_SELECT;
+ scb->next_state = 0x2;
+
+ outb(scb->ident, host->addr + TUL_SFifo);
+ outb(scb->tagmsg, host->addr + TUL_SFifo);
+ outb(scb->tagid, host->addr + TUL_SFifo);
+ for (i = 0; i < scb->cdblen; i++)
+ outb(scb->cdb[i], host->addr + TUL_SFifo);
+ host->active_tc = &host->targets[scb->target];
+ host->active = scb;
+ outb(TSC_SEL_ATN3, host->addr + TUL_SCmd);
+}
+
+/**
+ * initio_bus_device_reset - SCSI Bus Device Reset
+ * @host: InitIO host to reset
+ *
+ * Perform a device reset and abort all pending SCBs for the
+ * victim device
+ */
+int initio_bus_device_reset(struct initio_host * host)
+{
+ struct scsi_ctrl_blk *scb = host->active;
+ struct target_control *active_tc = host->active_tc;
+ struct scsi_ctrl_blk *tmp, *prev;
+ u8 tar;
+
+ if (host->phase != MSG_OUT)
+ return int_initio_bad_seq(host); /* Unexpected phase */
+
+ initio_unlink_pend_scb(host, scb);
+ initio_release_scb(host, scb);
+
+
+ tar = scb->target; /* target */
+ active_tc->flags &= ~(TCF_SYNC_DONE | TCF_WDTR_DONE | TCF_BUSY);
+ /* clr sync. nego & WDTR flags 07/22/98 */
+
+ /* abort all SCB with same target */
+ prev = tmp = host->first_busy; /* Check Busy queue */
+ while (tmp != NULL) {
+ if (tmp->target == tar) {
+ /* unlink it */
+ if (tmp == host->first_busy) {
+ if ((host->first_busy = tmp->next) == NULL)
+ host->last_busy = NULL;
+ } else {
+ prev->next = tmp->next;
+ if (tmp == host->last_busy)
+ host->last_busy = prev;
+ }
+ tmp->hastat = HOST_ABORTED;
+ initio_append_done_scb(host, tmp);
+ }
+ /* Previous haven't change */
+ else {
+ prev = tmp;
+ }
+ tmp = tmp->next;
+ }
+ outb(MSG_DEVRST, host->addr + TUL_SFifo);
+ outb(TSC_XF_FIFO_OUT, host->addr + TUL_SCmd);
+ return initio_wait_disc(host);
+
+}
+
+static int initio_msgin_accept(struct initio_host * host)
+{
+ outb(TSC_MSG_ACCEPT, host->addr + TUL_SCmd);
+ return wait_tulip(host);
+}
+
+static int wait_tulip(struct initio_host * host)
+{
+
+ while (!((host->jsstatus0 = inb(host->addr + TUL_SStatus0))
+ & TSS_INT_PENDING))
+ cpu_relax();
+
+ host->jsint = inb(host->addr + TUL_SInt);
+ host->phase = host->jsstatus0 & TSS_PH_MASK;
+ host->jsstatus1 = inb(host->addr + TUL_SStatus1);
+
+ if (host->jsint & TSS_RESEL_INT) /* if SCSI bus reset detected */
+ return int_initio_resel(host);
+ if (host->jsint & TSS_SEL_TIMEOUT) /* if selected/reselected timeout interrupt */
+ return int_initio_busfree(host);
+ if (host->jsint & TSS_SCSIRST_INT) /* if SCSI bus reset detected */
+ return int_initio_scsi_rst(host);
+
+ if (host->jsint & TSS_DISC_INT) { /* BUS disconnection */
+ if (host->flags & HCF_EXPECT_DONE_DISC) {
+ outb(TSC_FLUSH_FIFO, host->addr + TUL_SCtrl0); /* Flush SCSI FIFO */
+ initio_unlink_busy_scb(host, host->active);
+ host->active->hastat = 0;
+ initio_append_done_scb(host, host->active);
+ host->active = NULL;
+ host->active_tc = NULL;
+ host->flags &= ~HCF_EXPECT_DONE_DISC;
+ outb(TSC_INITDEFAULT, host->addr + TUL_SConfig);
+ outb(TSC_HW_RESELECT, host->addr + TUL_SCtrl1); /* Enable HW reselect */
+ return -1;
+ }
+ if (host->flags & HCF_EXPECT_DISC) {
+ outb(TSC_FLUSH_FIFO, host->addr + TUL_SCtrl0); /* Flush SCSI FIFO */
+ host->active = NULL;
+ host->active_tc = NULL;
+ host->flags &= ~HCF_EXPECT_DISC;
+ outb(TSC_INITDEFAULT, host->addr + TUL_SConfig);
+ outb(TSC_HW_RESELECT, host->addr + TUL_SCtrl1); /* Enable HW reselect */
+ return -1;
+ }
+ return int_initio_busfree(host);
+ }
+ /* The old code really does the below. Can probably be removed */
+ if (host->jsint & (TSS_FUNC_COMP | TSS_BUS_SERV))
+ return host->phase;
+ return host->phase;
+}
+
+static int initio_wait_disc(struct initio_host * host)
+{
+ while (!((host->jsstatus0 = inb(host->addr + TUL_SStatus0)) & TSS_INT_PENDING))
+ cpu_relax();
+
+ host->jsint = inb(host->addr + TUL_SInt);
+
+ if (host->jsint & TSS_SCSIRST_INT) /* if SCSI bus reset detected */
+ return int_initio_scsi_rst(host);
+ if (host->jsint & TSS_DISC_INT) { /* BUS disconnection */
+ outb(TSC_FLUSH_FIFO, host->addr + TUL_SCtrl0); /* Flush SCSI FIFO */
+ outb(TSC_INITDEFAULT, host->addr + TUL_SConfig);
+ outb(TSC_HW_RESELECT, host->addr + TUL_SCtrl1); /* Enable HW reselect */
+ host->active = NULL;
+ return -1;
+ }
+ return initio_bad_seq(host);
+}
+
+static int initio_wait_done_disc(struct initio_host * host)
+{
+ while (!((host->jsstatus0 = inb(host->addr + TUL_SStatus0))
+ & TSS_INT_PENDING))
+ cpu_relax();
+
+ host->jsint = inb(host->addr + TUL_SInt);
+
+ if (host->jsint & TSS_SCSIRST_INT) /* if SCSI bus reset detected */
+ return int_initio_scsi_rst(host);
+ if (host->jsint & TSS_DISC_INT) { /* BUS disconnection */
+ outb(TSC_FLUSH_FIFO, host->addr + TUL_SCtrl0); /* Flush SCSI FIFO */
+ outb(TSC_INITDEFAULT, host->addr + TUL_SConfig);
+ outb(TSC_HW_RESELECT, host->addr + TUL_SCtrl1); /* Enable HW reselect */
+ initio_unlink_busy_scb(host, host->active);
+
+ initio_append_done_scb(host, host->active);
+ host->active = NULL;
+ return -1;
+ }
+ return initio_bad_seq(host);
+}
+
+/**
+ * i91u_intr - IRQ handler
+ * @irqno: IRQ number
+ * @dev_id: IRQ identifier
+ *
+ * Take the relevant locks and then invoke the actual isr processing
+ * code under the lock.
+ */
+
+static irqreturn_t i91u_intr(int irqno, void *dev_id)
+{
+ struct Scsi_Host *dev = dev_id;
+ unsigned long flags;
+ int r;
+
+ spin_lock_irqsave(dev->host_lock, flags);
+ r = initio_isr((struct initio_host *)dev->hostdata);
+ spin_unlock_irqrestore(dev->host_lock, flags);
+ if (r)
+ return IRQ_HANDLED;
+ else
+ return IRQ_NONE;
+}
+
+
+/**
+ * initio_build_scb - Build the mappings and SCB
+ * @host: InitIO host taking the command
+ * @cblk: Firmware command block
+ * @cmnd: SCSI midlayer command block
+ *
+ * Translate the abstract SCSI command into a firmware command block
+ * suitable for feeding to the InitIO host controller. This also requires
+ * we build the scatter gather lists and ensure they are mapped properly.
+ */
+
+static void initio_build_scb(struct initio_host * host, struct scsi_ctrl_blk * cblk, struct scsi_cmnd * cmnd)
+{ /* Create corresponding SCB */
+ struct scatterlist *sglist;
+ struct sg_entry *sg; /* Pointer to SG list */
+ int i, nseg;
+ long total_len;
+ dma_addr_t dma_addr;
+
+ /* Fill in the command headers */
+ cblk->post = i91uSCBPost; /* i91u's callback routine */
+ cblk->srb = cmnd;
+ cblk->opcode = ExecSCSI;
+ cblk->flags = SCF_POST; /* After SCSI done, call post routine */
+ cblk->target = cmnd->device->id;
+ cblk->lun = cmnd->device->lun;
+ cblk->ident = cmnd->device->lun | DISC_ALLOW;
+
+ cblk->flags |= SCF_SENSE; /* Turn on auto request sense */
+
+ /* Map the sense buffer into bus memory */
+ dma_addr = dma_map_single(&host->pci_dev->dev, cmnd->sense_buffer,
+ SENSE_SIZE, DMA_FROM_DEVICE);
+ cblk->senseptr = (u32)dma_addr;
+ cblk->senselen = SENSE_SIZE;
+ cmnd->SCp.ptr = (char *)(unsigned long)dma_addr;
+ cblk->cdblen = cmnd->cmd_len;
+
+ /* Clear the returned status */
+ cblk->hastat = 0;
+ cblk->tastat = 0;
+ /* Command the command */
+ memcpy(cblk->cdb, cmnd->cmnd, cmnd->cmd_len);
+
+ /* Set up tags */
+ if (cmnd->device->tagged_supported) { /* Tag Support */
+ cblk->tagmsg = SIMPLE_QUEUE_TAG; /* Do simple tag only */
+ } else {
+ cblk->tagmsg = 0; /* No tag support */
+ }
+
+ /* todo handle map_sg error */
+ nseg = scsi_dma_map(cmnd);
+ BUG_ON(nseg < 0);
+ if (nseg) {
+ dma_addr = dma_map_single(&host->pci_dev->dev, &cblk->sglist[0],
+ sizeof(struct sg_entry) * TOTAL_SG_ENTRY,
+ DMA_BIDIRECTIONAL);
+ cblk->bufptr = (u32)dma_addr;
+ cmnd->SCp.dma_handle = dma_addr;
+
+ cblk->sglen = nseg;
+
+ cblk->flags |= SCF_SG; /* Turn on SG list flag */
+ total_len = 0;
+ sg = &cblk->sglist[0];
+ scsi_for_each_sg(cmnd, sglist, cblk->sglen, i) {
+ sg->data = cpu_to_le32((u32)sg_dma_address(sglist));
+ sg->len = cpu_to_le32((u32)sg_dma_len(sglist));
+ total_len += sg_dma_len(sglist);
+ ++sg;
+ }
+
+ cblk->buflen = (scsi_bufflen(cmnd) > total_len) ?
+ total_len : scsi_bufflen(cmnd);
+ } else { /* No data transfer required */
+ cblk->buflen = 0;
+ cblk->sglen = 0;
+ }
+}
+
+/**
+ * i91u_queuecommand - Queue a new command if possible
+ * @cmd: SCSI command block from the mid layer
+ * @done: Completion handler
+ *
+ * Attempts to queue a new command with the host adapter. Will return
+ * zero if successful or indicate a host busy condition if not (which
+ * will cause the mid layer to call us again later with the command)
+ */
+
+static int i91u_queuecommand_lck(struct scsi_cmnd *cmd,
+ void (*done)(struct scsi_cmnd *))
+{
+ struct initio_host *host = (struct initio_host *) cmd->device->host->hostdata;
+ struct scsi_ctrl_blk *cmnd;
+
+ cmd->scsi_done = done;
+
+ cmnd = initio_alloc_scb(host);
+ if (!cmnd)
+ return SCSI_MLQUEUE_HOST_BUSY;
+
+ initio_build_scb(host, cmnd, cmd);
+ initio_exec_scb(host, cmnd);
+ return 0;
+}
+
+static DEF_SCSI_QCMD(i91u_queuecommand)
+
+/**
+ * i91u_bus_reset - reset the SCSI bus
+ * @cmnd: Command block we want to trigger the reset for
+ *
+ * Initiate a SCSI bus reset sequence
+ */
+
+static int i91u_bus_reset(struct scsi_cmnd * cmnd)
+{
+ struct initio_host *host;
+
+ host = (struct initio_host *) cmnd->device->host->hostdata;
+
+ spin_lock_irq(cmnd->device->host->host_lock);
+ initio_reset_scsi(host, 0);
+ spin_unlock_irq(cmnd->device->host->host_lock);
+
+ return SUCCESS;
+}
+
+/**
+ * i91u_biospararm - return the "logical geometry
+ * @sdev: SCSI device
+ * @dev; Matching block device
+ * @capacity: Sector size of drive
+ * @info_array: Return space for BIOS geometry
+ *
+ * Map the device geometry in a manner compatible with the host
+ * controller BIOS behaviour.
+ *
+ * FIXME: limited to 2^32 sector devices.
+ */
+
+static int i91u_biosparam(struct scsi_device *sdev, struct block_device *dev,
+ sector_t capacity, int *info_array)
+{
+ struct initio_host *host; /* Point to Host adapter control block */
+ struct target_control *tc;
+
+ host = (struct initio_host *) sdev->host->hostdata;
+ tc = &host->targets[sdev->id];
+
+ if (tc->heads) {
+ info_array[0] = tc->heads;
+ info_array[1] = tc->sectors;
+ info_array[2] = (unsigned long)capacity / tc->heads / tc->sectors;
+ } else {
+ if (tc->drv_flags & TCF_DRV_255_63) {
+ info_array[0] = 255;
+ info_array[1] = 63;
+ info_array[2] = (unsigned long)capacity / 255 / 63;
+ } else {
+ info_array[0] = 64;
+ info_array[1] = 32;
+ info_array[2] = (unsigned long)capacity >> 11;
+ }
+ }
+
+#if defined(DEBUG_BIOSPARAM)
+ if (i91u_debug & debug_biosparam) {
+ printk("bios geometry: head=%d, sec=%d, cyl=%d\n",
+ info_array[0], info_array[1], info_array[2]);
+ printk("WARNING: check, if the bios geometry is correct.\n");
+ }
+#endif
+
+ return 0;
+}
+
+/**
+ * i91u_unmap_scb - Unmap a command
+ * @pci_dev: PCI device the command is for
+ * @cmnd: The command itself
+ *
+ * Unmap any PCI mapping/IOMMU resources allocated when the command
+ * was mapped originally as part of initio_build_scb
+ */
+
+static void i91u_unmap_scb(struct pci_dev *pci_dev, struct scsi_cmnd *cmnd)
+{
+ /* auto sense buffer */
+ if (cmnd->SCp.ptr) {
+ dma_unmap_single(&pci_dev->dev,
+ (dma_addr_t)((unsigned long)cmnd->SCp.ptr),
+ SENSE_SIZE, DMA_FROM_DEVICE);
+ cmnd->SCp.ptr = NULL;
+ }
+
+ /* request buffer */
+ if (scsi_sg_count(cmnd)) {
+ dma_unmap_single(&pci_dev->dev, cmnd->SCp.dma_handle,
+ sizeof(struct sg_entry) * TOTAL_SG_ENTRY,
+ DMA_BIDIRECTIONAL);
+
+ scsi_dma_unmap(cmnd);
+ }
+}
+
+/**
+ * i91uSCBPost - SCSI callback
+ * @host: Pointer to host adapter control block.
+ * @cmnd: Pointer to SCSI control block.
+ *
+ * This is callback routine be called when tulip finish one
+ * SCSI command.
+ */
+
+static void i91uSCBPost(u8 * host_mem, u8 * cblk_mem)
+{
+ struct scsi_cmnd *cmnd; /* Pointer to SCSI request block */
+ struct initio_host *host;
+ struct scsi_ctrl_blk *cblk;
+
+ host = (struct initio_host *) host_mem;
+ cblk = (struct scsi_ctrl_blk *) cblk_mem;
+ if ((cmnd = cblk->srb) == NULL) {
+ printk(KERN_ERR "i91uSCBPost: SRB pointer is empty\n");
+ WARN_ON(1);
+ initio_release_scb(host, cblk); /* Release SCB for current channel */
+ return;
+ }
+
+ /*
+ * Remap the firmware error status into a mid layer one
+ */
+ switch (cblk->hastat) {
+ case 0x0:
+ case 0xa: /* Linked command complete without error and linked normally */
+ case 0xb: /* Linked command complete without error interrupt generated */
+ cblk->hastat = 0;
+ break;
+
+ case 0x11: /* Selection time out-The initiator selection or target
+ reselection was not complete within the SCSI Time out period */
+ cblk->hastat = DID_TIME_OUT;
+ break;
+
+ case 0x14: /* Target bus phase sequence failure-An invalid bus phase or bus
+ phase sequence was requested by the target. The host adapter
+ will generate a SCSI Reset Condition, notifying the host with
+ a SCRD interrupt */
+ cblk->hastat = DID_RESET;
+ break;
+
+ case 0x1a: /* SCB Aborted. 07/21/98 */
+ cblk->hastat = DID_ABORT;
+ break;
+
+ case 0x12: /* Data overrun/underrun-The target attempted to transfer more data
+ than was allocated by the Data Length field or the sum of the
+ Scatter / Gather Data Length fields. */
+ case 0x13: /* Unexpected bus free-The target dropped the SCSI BSY at an unexpected time. */
+ case 0x16: /* Invalid SCB Operation Code. */
+
+ default:
+ printk("ini9100u: %x %x\n", cblk->hastat, cblk->tastat);
+ cblk->hastat = DID_ERROR; /* Couldn't find any better */
+ break;
+ }
+
+ cmnd->result = cblk->tastat | (cblk->hastat << 16);
+ i91u_unmap_scb(host->pci_dev, cmnd);
+ cmnd->scsi_done(cmnd); /* Notify system DONE */
+ initio_release_scb(host, cblk); /* Release SCB for current channel */
+}
+
+static struct scsi_host_template initio_template = {
+ .proc_name = "INI9100U",
+ .name = "Initio INI-9X00U/UW SCSI device driver",
+ .queuecommand = i91u_queuecommand,
+ .eh_bus_reset_handler = i91u_bus_reset,
+ .bios_param = i91u_biosparam,
+ .can_queue = MAX_TARGETS * i91u_MAXQUEUE,
+ .this_id = 1,
+ .sg_tablesize = SG_ALL,
+ .cmd_per_lun = 1,
+ .use_clustering = ENABLE_CLUSTERING,
+};
+
+static int initio_probe_one(struct pci_dev *pdev,
+ const struct pci_device_id *id)
+{
+ struct Scsi_Host *shost;
+ struct initio_host *host;
+ u32 reg;
+ u16 bios_seg;
+ struct scsi_ctrl_blk *scb, *tmp, *prev = NULL /* silence gcc */;
+ int num_scb, i, error;
+
+ error = pci_enable_device(pdev);
+ if (error)
+ return error;
+
+ pci_read_config_dword(pdev, 0x44, (u32 *) & reg);
+ bios_seg = (u16) (reg & 0xFF);
+ if (((reg & 0xFF00) >> 8) == 0xFF)
+ reg = 0;
+ bios_seg = (bios_seg << 8) + ((u16) ((reg & 0xFF00) >> 8));
+
+ if (pci_set_dma_mask(pdev, DMA_BIT_MASK(32))) {
+ printk(KERN_WARNING "i91u: Could not set 32 bit DMA mask\n");
+ error = -ENODEV;
+ goto out_disable_device;
+ }
+ shost = scsi_host_alloc(&initio_template, sizeof(struct initio_host));
+ if (!shost) {
+ printk(KERN_WARNING "initio: Could not allocate host structure.\n");
+ error = -ENOMEM;
+ goto out_disable_device;
+ }
+ host = (struct initio_host *)shost->hostdata;
+ memset(host, 0, sizeof(struct initio_host));
+ host->addr = pci_resource_start(pdev, 0);
+ host->bios_addr = bios_seg;
+
+ if (!request_region(host->addr, 256, "i91u")) {
+ printk(KERN_WARNING "initio: I/O port range 0x%x is busy.\n", host->addr);
+ error = -ENODEV;
+ goto out_host_put;
+ }
+
+ if (initio_tag_enable) /* 1.01i */
+ num_scb = MAX_TARGETS * i91u_MAXQUEUE;
+ else
+ num_scb = MAX_TARGETS + 3; /* 1-tape, 1-CD_ROM, 1- extra */
+
+ for (; num_scb >= MAX_TARGETS + 3; num_scb--) {
+ i = num_scb * sizeof(struct scsi_ctrl_blk);
+ if ((scb = kzalloc(i, GFP_DMA)) != NULL)
+ break;
+ }
+
+ if (!scb) {
+ printk(KERN_WARNING "initio: Cannot allocate SCB array.\n");
+ error = -ENOMEM;
+ goto out_release_region;
+ }
+
+ host->pci_dev = pdev;
+
+ host->semaph = 1;
+ spin_lock_init(&host->semaph_lock);
+ host->num_scbs = num_scb;
+ host->scb = scb;
+ host->next_pending = scb;
+ host->next_avail = scb;
+ for (i = 0, tmp = scb; i < num_scb; i++, tmp++) {
+ tmp->tagid = i;
+ if (i != 0)
+ prev->next = tmp;
+ prev = tmp;
+ }
+ prev->next = NULL;
+ host->scb_end = tmp;
+ host->first_avail = scb;
+ host->last_avail = prev;
+ spin_lock_init(&host->avail_lock);
+
+ initio_init(host, phys_to_virt(((u32)bios_seg << 4)));
+
+ host->jsstatus0 = 0;
+
+ shost->io_port = host->addr;
+ shost->n_io_port = 0xff;
+ shost->can_queue = num_scb; /* 03/05/98 */
+ shost->unique_id = host->addr;
+ shost->max_id = host->max_tar;
+ shost->max_lun = 32; /* 10/21/97 */
+ shost->irq = pdev->irq;
+ shost->this_id = host->scsi_id; /* Assign HCS index */
+ shost->base = host->addr;
+ shost->sg_tablesize = TOTAL_SG_ENTRY;
+
+ error = request_irq(pdev->irq, i91u_intr, IRQF_SHARED, "i91u", shost);
+ if (error < 0) {
+ printk(KERN_WARNING "initio: Unable to request IRQ %d\n", pdev->irq);
+ goto out_free_scbs;
+ }
+
+ pci_set_drvdata(pdev, shost);
+
+ error = scsi_add_host(shost, &pdev->dev);
+ if (error)
+ goto out_free_irq;
+ scsi_scan_host(shost);
+ return 0;
+out_free_irq:
+ free_irq(pdev->irq, shost);
+out_free_scbs:
+ kfree(host->scb);
+out_release_region:
+ release_region(host->addr, 256);
+out_host_put:
+ scsi_host_put(shost);
+out_disable_device:
+ pci_disable_device(pdev);
+ return error;
+}
+
+/**
+ * initio_remove_one - control shutdown
+ * @pdev: PCI device being released
+ *
+ * Release the resources assigned to this adapter after it has
+ * finished being used.
+ */
+
+static void initio_remove_one(struct pci_dev *pdev)
+{
+ struct Scsi_Host *host = pci_get_drvdata(pdev);
+ struct initio_host *s = (struct initio_host *)host->hostdata;
+ scsi_remove_host(host);
+ free_irq(pdev->irq, host);
+ release_region(s->addr, 256);
+ scsi_host_put(host);
+ pci_disable_device(pdev);
+}
+
+MODULE_LICENSE("GPL");
+
+static struct pci_device_id initio_pci_tbl[] = {
+ {PCI_VENDOR_ID_INIT, 0x9500, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
+ {PCI_VENDOR_ID_INIT, 0x9400, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
+ {PCI_VENDOR_ID_INIT, 0x9401, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
+ {PCI_VENDOR_ID_INIT, 0x0002, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
+ {PCI_VENDOR_ID_DOMEX, 0x0002, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
+ {0,}
+};
+MODULE_DEVICE_TABLE(pci, initio_pci_tbl);
+
+static struct pci_driver initio_pci_driver = {
+ .name = "initio",
+ .id_table = initio_pci_tbl,
+ .probe = initio_probe_one,
+ .remove = initio_remove_one,
+};
+
+static int __init initio_init_driver(void)
+{
+ return pci_register_driver(&initio_pci_driver);
+}
+
+static void __exit initio_exit_driver(void)
+{
+ pci_unregister_driver(&initio_pci_driver);
+}
+
+MODULE_DESCRIPTION("Initio INI-9X00U/UW SCSI device driver");
+MODULE_AUTHOR("Initio Corporation");
+MODULE_LICENSE("GPL");
+
+module_init(initio_init_driver);
+module_exit(initio_exit_driver);
diff --git a/drivers/scsi/initio.h b/drivers/scsi/initio.h
new file mode 100644
index 000000000..219b901bd
--- /dev/null
+++ b/drivers/scsi/initio.h
@@ -0,0 +1,667 @@
+/**************************************************************************
+ * Initio 9100 device driver for Linux.
+ *
+ * Copyright (c) 1994-1998 Initio Corporation
+ * All rights reserved.
+ *
+ * Cleanups (c) Copyright 2007 Red Hat <alan@lxorguk.ukuu.org.uk>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2, or (at your option)
+ * any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; see the file COPYING. If not, write to
+ * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR
+ * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ **************************************************************************/
+
+
+#include <linux/types.h>
+
+#define TOTAL_SG_ENTRY 32
+#define MAX_SUPPORTED_ADAPTERS 8
+#define MAX_OFFSET 15
+#define MAX_TARGETS 16
+
+typedef struct {
+ unsigned short base;
+ unsigned short vec;
+} i91u_config;
+
+/***************************************/
+/* Tulip Configuration Register Set */
+/***************************************/
+#define TUL_PVID 0x00 /* Vendor ID */
+#define TUL_PDID 0x02 /* Device ID */
+#define TUL_PCMD 0x04 /* Command */
+#define TUL_PSTUS 0x06 /* Status */
+#define TUL_PRID 0x08 /* Revision number */
+#define TUL_PPI 0x09 /* Programming interface */
+#define TUL_PSC 0x0A /* Sub Class */
+#define TUL_PBC 0x0B /* Base Class */
+#define TUL_PCLS 0x0C /* Cache line size */
+#define TUL_PLTR 0x0D /* Latency timer */
+#define TUL_PHDT 0x0E /* Header type */
+#define TUL_PBIST 0x0F /* BIST */
+#define TUL_PBAD 0x10 /* Base address */
+#define TUL_PBAD1 0x14 /* Base address */
+#define TUL_PBAD2 0x18 /* Base address */
+#define TUL_PBAD3 0x1C /* Base address */
+#define TUL_PBAD4 0x20 /* Base address */
+#define TUL_PBAD5 0x24 /* Base address */
+#define TUL_PRSVD 0x28 /* Reserved */
+#define TUL_PRSVD1 0x2C /* Reserved */
+#define TUL_PRAD 0x30 /* Expansion ROM base address */
+#define TUL_PRSVD2 0x34 /* Reserved */
+#define TUL_PRSVD3 0x38 /* Reserved */
+#define TUL_PINTL 0x3C /* Interrupt line */
+#define TUL_PINTP 0x3D /* Interrupt pin */
+#define TUL_PIGNT 0x3E /* MIN_GNT */
+#define TUL_PMGNT 0x3F /* MAX_GNT */
+
+/************************/
+/* Jasmin Register Set */
+/************************/
+#define TUL_HACFG0 0x40 /* H/A Configuration Register 0 */
+#define TUL_HACFG1 0x41 /* H/A Configuration Register 1 */
+#define TUL_HACFG2 0x42 /* H/A Configuration Register 2 */
+
+#define TUL_SDCFG0 0x44 /* SCSI Device Configuration 0 */
+#define TUL_SDCFG1 0x45 /* SCSI Device Configuration 1 */
+#define TUL_SDCFG2 0x46 /* SCSI Device Configuration 2 */
+#define TUL_SDCFG3 0x47 /* SCSI Device Configuration 3 */
+
+#define TUL_GINTS 0x50 /* Global Interrupt Status Register */
+#define TUL_GIMSK 0x52 /* Global Interrupt MASK Register */
+#define TUL_GCTRL 0x54 /* Global Control Register */
+#define TUL_GCTRL_EEPROM_BIT 0x04
+#define TUL_GCTRL1 0x55 /* Global Control Register */
+#define TUL_DMACFG 0x5B /* DMA configuration */
+#define TUL_NVRAM 0x5D /* Non-volatile RAM port */
+
+#define TUL_SCnt0 0x80 /* 00 R/W Transfer Counter Low */
+#define TUL_SCnt1 0x81 /* 01 R/W Transfer Counter Mid */
+#define TUL_SCnt2 0x82 /* 02 R/W Transfer Count High */
+#define TUL_SFifoCnt 0x83 /* 03 R FIFO counter */
+#define TUL_SIntEnable 0x84 /* 03 W Interrupt enble */
+#define TUL_SInt 0x84 /* 04 R Interrupt Register */
+#define TUL_SCtrl0 0x85 /* 05 W Control 0 */
+#define TUL_SStatus0 0x85 /* 05 R Status 0 */
+#define TUL_SCtrl1 0x86 /* 06 W Control 1 */
+#define TUL_SStatus1 0x86 /* 06 R Status 1 */
+#define TUL_SConfig 0x87 /* 07 W Configuration */
+#define TUL_SStatus2 0x87 /* 07 R Status 2 */
+#define TUL_SPeriod 0x88 /* 08 W Sync. Transfer Period & Offset */
+#define TUL_SOffset 0x88 /* 08 R Offset */
+#define TUL_SScsiId 0x89 /* 09 W SCSI ID */
+#define TUL_SBusId 0x89 /* 09 R SCSI BUS ID */
+#define TUL_STimeOut 0x8A /* 0A W Sel/Resel Time Out Register */
+#define TUL_SIdent 0x8A /* 0A R Identify Message Register */
+#define TUL_SAvail 0x8A /* 0A R Available Counter Register */
+#define TUL_SData 0x8B /* 0B R/W SCSI data in/out */
+#define TUL_SFifo 0x8C /* 0C R/W FIFO */
+#define TUL_SSignal 0x90 /* 10 R/W SCSI signal in/out */
+#define TUL_SCmd 0x91 /* 11 R/W Command */
+#define TUL_STest0 0x92 /* 12 R/W Test0 */
+#define TUL_STest1 0x93 /* 13 R/W Test1 */
+#define TUL_SCFG1 0x94 /* 14 R/W Configuration */
+
+#define TUL_XAddH 0xC0 /*DMA Transfer Physical Address */
+#define TUL_XAddW 0xC8 /*DMA Current Transfer Physical Address */
+#define TUL_XCntH 0xD0 /*DMA Transfer Counter */
+#define TUL_XCntW 0xD4 /*DMA Current Transfer Counter */
+#define TUL_XCmd 0xD8 /*DMA Command Register */
+#define TUL_Int 0xDC /*Interrupt Register */
+#define TUL_XStatus 0xDD /*DMA status Register */
+#define TUL_Mask 0xE0 /*Interrupt Mask Register */
+#define TUL_XCtrl 0xE4 /*DMA Control Register */
+#define TUL_XCtrl1 0xE5 /*DMA Control Register 1 */
+#define TUL_XFifo 0xE8 /*DMA FIFO */
+
+#define TUL_WCtrl 0xF7 /*Bus master wait state control */
+#define TUL_DCtrl 0xFB /*DMA delay control */
+
+/*----------------------------------------------------------------------*/
+/* bit definition for Command register of Configuration Space Header */
+/*----------------------------------------------------------------------*/
+#define BUSMS 0x04 /* BUS MASTER Enable */
+#define IOSPA 0x01 /* IO Space Enable */
+
+/*----------------------------------------------------------------------*/
+/* Command Codes of Tulip SCSI Command register */
+/*----------------------------------------------------------------------*/
+#define TSC_EN_RESEL 0x80 /* Enable Reselection */
+#define TSC_CMD_COMP 0x84 /* Command Complete Sequence */
+#define TSC_SEL 0x01 /* Select Without ATN Sequence */
+#define TSC_SEL_ATN 0x11 /* Select With ATN Sequence */
+#define TSC_SEL_ATN_DMA 0x51 /* Select With ATN Sequence with DMA */
+#define TSC_SEL_ATN3 0x31 /* Select With ATN3 Sequence */
+#define TSC_SEL_ATNSTOP 0x12 /* Select With ATN and Stop Sequence */
+#define TSC_SELATNSTOP 0x1E /* Select With ATN and Stop Sequence */
+
+#define TSC_SEL_ATN_DIRECT_IN 0x95 /* Select With ATN Sequence */
+#define TSC_SEL_ATN_DIRECT_OUT 0x15 /* Select With ATN Sequence */
+#define TSC_SEL_ATN3_DIRECT_IN 0xB5 /* Select With ATN3 Sequence */
+#define TSC_SEL_ATN3_DIRECT_OUT 0x35 /* Select With ATN3 Sequence */
+#define TSC_XF_DMA_OUT_DIRECT 0x06 /* DMA Xfer Information out */
+#define TSC_XF_DMA_IN_DIRECT 0x86 /* DMA Xfer Information in */
+
+#define TSC_XF_DMA_OUT 0x43 /* DMA Xfer Information out */
+#define TSC_XF_DMA_IN 0xC3 /* DMA Xfer Information in */
+#define TSC_XF_FIFO_OUT 0x03 /* FIFO Xfer Information out */
+#define TSC_XF_FIFO_IN 0x83 /* FIFO Xfer Information in */
+
+#define TSC_MSG_ACCEPT 0x0F /* Message Accept */
+
+/*----------------------------------------------------------------------*/
+/* bit definition for Tulip SCSI Control 0 Register */
+/*----------------------------------------------------------------------*/
+#define TSC_RST_SEQ 0x20 /* Reset sequence counter */
+#define TSC_FLUSH_FIFO 0x10 /* Flush FIFO */
+#define TSC_ABT_CMD 0x04 /* Abort command (sequence) */
+#define TSC_RST_CHIP 0x02 /* Reset SCSI Chip */
+#define TSC_RST_BUS 0x01 /* Reset SCSI Bus */
+
+/*----------------------------------------------------------------------*/
+/* bit definition for Tulip SCSI Control 1 Register */
+/*----------------------------------------------------------------------*/
+#define TSC_EN_SCAM 0x80 /* Enable SCAM */
+#define TSC_TIMER 0x40 /* Select timeout unit */
+#define TSC_EN_SCSI2 0x20 /* SCSI-2 mode */
+#define TSC_PWDN 0x10 /* Power down mode */
+#define TSC_WIDE_CPU 0x08 /* Wide CPU */
+#define TSC_HW_RESELECT 0x04 /* Enable HW reselect */
+#define TSC_EN_BUS_OUT 0x02 /* Enable SCSI data bus out latch */
+#define TSC_EN_BUS_IN 0x01 /* Enable SCSI data bus in latch */
+
+/*----------------------------------------------------------------------*/
+/* bit definition for Tulip SCSI Configuration Register */
+/*----------------------------------------------------------------------*/
+#define TSC_EN_LATCH 0x80 /* Enable phase latch */
+#define TSC_INITIATOR 0x40 /* Initiator mode */
+#define TSC_EN_SCSI_PAR 0x20 /* Enable SCSI parity */
+#define TSC_DMA_8BIT 0x10 /* Alternate dma 8-bits mode */
+#define TSC_DMA_16BIT 0x08 /* Alternate dma 16-bits mode */
+#define TSC_EN_WDACK 0x04 /* Enable DACK while wide SCSI xfer */
+#define TSC_ALT_PERIOD 0x02 /* Alternate sync period mode */
+#define TSC_DIS_SCSIRST 0x01 /* Disable SCSI bus reset us */
+
+#define TSC_INITDEFAULT (TSC_INITIATOR | TSC_EN_LATCH | TSC_ALT_PERIOD | TSC_DIS_SCSIRST)
+
+#define TSC_WIDE_SCSI 0x80 /* Enable Wide SCSI */
+
+/*----------------------------------------------------------------------*/
+/* bit definition for Tulip SCSI signal Register */
+/*----------------------------------------------------------------------*/
+#define TSC_RST_ACK 0x00 /* Release ACK signal */
+#define TSC_RST_ATN 0x00 /* Release ATN signal */
+#define TSC_RST_BSY 0x00 /* Release BSY signal */
+
+#define TSC_SET_ACK 0x40 /* ACK signal */
+#define TSC_SET_ATN 0x08 /* ATN signal */
+
+#define TSC_REQI 0x80 /* REQ signal */
+#define TSC_ACKI 0x40 /* ACK signal */
+#define TSC_BSYI 0x20 /* BSY signal */
+#define TSC_SELI 0x10 /* SEL signal */
+#define TSC_ATNI 0x08 /* ATN signal */
+#define TSC_MSGI 0x04 /* MSG signal */
+#define TSC_CDI 0x02 /* C/D signal */
+#define TSC_IOI 0x01 /* I/O signal */
+
+
+/*----------------------------------------------------------------------*/
+/* bit definition for Tulip SCSI Status 0 Register */
+/*----------------------------------------------------------------------*/
+#define TSS_INT_PENDING 0x80 /* Interrupt pending */
+#define TSS_SEQ_ACTIVE 0x40 /* Sequencer active */
+#define TSS_XFER_CNT 0x20 /* Transfer counter zero */
+#define TSS_FIFO_EMPTY 0x10 /* FIFO empty */
+#define TSS_PAR_ERROR 0x08 /* SCSI parity error */
+#define TSS_PH_MASK 0x07 /* SCSI phase mask */
+
+/*----------------------------------------------------------------------*/
+/* bit definition for Tulip SCSI Status 1 Register */
+/*----------------------------------------------------------------------*/
+#define TSS_STATUS_RCV 0x08 /* Status received */
+#define TSS_MSG_SEND 0x40 /* Message sent */
+#define TSS_CMD_PH_CMP 0x20 /* command phase done */
+#define TSS_DATA_PH_CMP 0x10 /* Data phase done */
+#define TSS_STATUS_SEND 0x08 /* Status sent */
+#define TSS_XFER_CMP 0x04 /* Transfer completed */
+#define TSS_SEL_CMP 0x02 /* Selection completed */
+#define TSS_ARB_CMP 0x01 /* Arbitration completed */
+
+/*----------------------------------------------------------------------*/
+/* bit definition for Tulip SCSI Status 2 Register */
+/*----------------------------------------------------------------------*/
+#define TSS_CMD_ABTED 0x80 /* Command aborted */
+#define TSS_OFFSET_0 0x40 /* Offset counter zero */
+#define TSS_FIFO_FULL 0x20 /* FIFO full */
+#define TSS_TIMEOUT_0 0x10 /* Timeout counter zero */
+#define TSS_BUSY_RLS 0x08 /* Busy release */
+#define TSS_PH_MISMATCH 0x04 /* Phase mismatch */
+#define TSS_SCSI_BUS_EN 0x02 /* SCSI data bus enable */
+#define TSS_SCSIRST 0x01 /* SCSI bus reset in progress */
+
+/*----------------------------------------------------------------------*/
+/* bit definition for Tulip SCSI Interrupt Register */
+/*----------------------------------------------------------------------*/
+#define TSS_RESEL_INT 0x80 /* Reselected interrupt */
+#define TSS_SEL_TIMEOUT 0x40 /* Selected/reselected timeout */
+#define TSS_BUS_SERV 0x20
+#define TSS_SCSIRST_INT 0x10 /* SCSI bus reset detected */
+#define TSS_DISC_INT 0x08 /* Disconnected interrupt */
+#define TSS_SEL_INT 0x04 /* Select interrupt */
+#define TSS_SCAM_SEL 0x02 /* SCAM selected */
+#define TSS_FUNC_COMP 0x01
+
+/*----------------------------------------------------------------------*/
+/* SCSI Phase Codes. */
+/*----------------------------------------------------------------------*/
+#define DATA_OUT 0
+#define DATA_IN 1 /* 4 */
+#define CMD_OUT 2
+#define STATUS_IN 3 /* 6 */
+#define MSG_OUT 6 /* 3 */
+#define MSG_IN 7
+
+
+
+/*----------------------------------------------------------------------*/
+/* Command Codes of Tulip xfer Command register */
+/*----------------------------------------------------------------------*/
+#define TAX_X_FORC 0x02
+#define TAX_X_ABT 0x04
+#define TAX_X_CLR_FIFO 0x08
+
+#define TAX_X_IN 0x21
+#define TAX_X_OUT 0x01
+#define TAX_SG_IN 0xA1
+#define TAX_SG_OUT 0x81
+
+/*----------------------------------------------------------------------*/
+/* Tulip Interrupt Register */
+/*----------------------------------------------------------------------*/
+#define XCMP 0x01
+#define FCMP 0x02
+#define XABT 0x04
+#define XERR 0x08
+#define SCMP 0x10
+#define IPEND 0x80
+
+/*----------------------------------------------------------------------*/
+/* Tulip DMA Status Register */
+/*----------------------------------------------------------------------*/
+#define XPEND 0x01 /* Transfer pending */
+#define FEMPTY 0x02 /* FIFO empty */
+
+
+
+/*----------------------------------------------------------------------*/
+/* bit definition for TUL_GCTRL */
+/*----------------------------------------------------------------------*/
+#define EXTSG 0x80
+#define EXTAD 0x60
+#define SEG4K 0x08
+#define EEPRG 0x04
+#define MRMUL 0x02
+
+/*----------------------------------------------------------------------*/
+/* bit definition for TUL_NVRAM */
+/*----------------------------------------------------------------------*/
+#define SE2CS 0x08
+#define SE2CLK 0x04
+#define SE2DO 0x02
+#define SE2DI 0x01
+
+
+/************************************************************************/
+/* Scatter-Gather Element Structure */
+/************************************************************************/
+struct sg_entry {
+ u32 data; /* Data Pointer */
+ u32 len; /* Data Length */
+};
+
+/***********************************************************************
+ SCSI Control Block
+************************************************************************/
+struct scsi_ctrl_blk {
+ struct scsi_ctrl_blk *next;
+ u8 status; /*4 */
+ u8 next_state; /*5 */
+ u8 mode; /*6 */
+ u8 msgin; /*7 SCB_Res0 */
+ u16 sgidx; /*8 */
+ u16 sgmax; /*A */
+#ifdef ALPHA
+ u32 reserved[2]; /*C */
+#else
+ u32 reserved[3]; /*C */
+#endif
+
+ u32 xferlen; /*18 Current xfer len */
+ u32 totxlen; /*1C Total xfer len */
+ u32 paddr; /*20 SCB phy. Addr. */
+
+ u8 opcode; /*24 SCB command code */
+ u8 flags; /*25 SCB Flags */
+ u8 target; /*26 Target Id */
+ u8 lun; /*27 Lun */
+ u32 bufptr; /*28 Data Buffer Pointer */
+ u32 buflen; /*2C Data Allocation Length */
+ u8 sglen; /*30 SG list # */
+ u8 senselen; /*31 Sense Allocation Length */
+ u8 hastat; /*32 */
+ u8 tastat; /*33 */
+ u8 cdblen; /*34 CDB Length */
+ u8 ident; /*35 Identify */
+ u8 tagmsg; /*36 Tag Message */
+ u8 tagid; /*37 Queue Tag */
+ u8 cdb[12]; /*38 */
+ u32 sgpaddr; /*44 SG List/Sense Buf phy. Addr. */
+ u32 senseptr; /*48 Sense data pointer */
+ void (*post) (u8 *, u8 *); /*4C POST routine */
+ struct scsi_cmnd *srb; /*50 SRB Pointer */
+ struct sg_entry sglist[TOTAL_SG_ENTRY]; /*54 Start of SG list */
+};
+
+/* Bit Definition for status */
+#define SCB_RENT 0x01
+#define SCB_PEND 0x02
+#define SCB_CONTIG 0x04 /* Contingent Allegiance */
+#define SCB_SELECT 0x08
+#define SCB_BUSY 0x10
+#define SCB_DONE 0x20
+
+
+/* Opcodes for opcode */
+#define ExecSCSI 0x1
+#define BusDevRst 0x2
+#define AbortCmd 0x3
+
+
+/* Bit Definition for mode */
+#define SCM_RSENS 0x01 /* request sense mode */
+
+
+/* Bit Definition for flags */
+#define SCF_DONE 0x01
+#define SCF_POST 0x02
+#define SCF_SENSE 0x04
+#define SCF_DIR 0x18
+#define SCF_NO_DCHK 0x00
+#define SCF_DIN 0x08
+#define SCF_DOUT 0x10
+#define SCF_NO_XF 0x18
+#define SCF_WR_VF 0x20 /* Write verify turn on */
+#define SCF_POLL 0x40
+#define SCF_SG 0x80
+
+/* Error Codes for SCB_HaStat */
+#define HOST_SEL_TOUT 0x11
+#define HOST_DO_DU 0x12
+#define HOST_BUS_FREE 0x13
+#define HOST_BAD_PHAS 0x14
+#define HOST_INV_CMD 0x16
+#define HOST_ABORTED 0x1A /* 07/21/98 */
+#define HOST_SCSI_RST 0x1B
+#define HOST_DEV_RST 0x1C
+
+/* Error Codes for SCB_TaStat */
+#define TARGET_CHKCOND 0x02
+#define TARGET_BUSY 0x08
+#define INI_QUEUE_FULL 0x28
+
+/* SCSI MESSAGE */
+#define MSG_COMP 0x00
+#define MSG_EXTEND 0x01
+#define MSG_SDP 0x02
+#define MSG_RESTORE 0x03
+#define MSG_DISC 0x04
+#define MSG_IDE 0x05
+#define MSG_ABORT 0x06
+#define MSG_REJ 0x07
+#define MSG_NOP 0x08
+#define MSG_PARITY 0x09
+#define MSG_LINK_COMP 0x0A
+#define MSG_LINK_FLAG 0x0B
+#define MSG_DEVRST 0x0C
+#define MSG_ABORT_TAG 0x0D
+
+/* Queue tag msg: Simple_quque_tag, Head_of_queue_tag, Ordered_queue_tag */
+#define MSG_STAG 0x20
+#define MSG_HTAG 0x21
+#define MSG_OTAG 0x22
+
+#define MSG_IGNOREWIDE 0x23
+
+#define MSG_IDENT 0x80
+
+/***********************************************************************
+ Target Device Control Structure
+**********************************************************************/
+
+struct target_control {
+ u16 flags;
+ u8 js_period;
+ u8 sconfig0;
+ u16 drv_flags;
+ u8 heads;
+ u8 sectors;
+};
+
+/***********************************************************************
+ Target Device Control Structure
+**********************************************************************/
+
+/* Bit Definition for TCF_Flags */
+#define TCF_SCSI_RATE 0x0007
+#define TCF_EN_DISC 0x0008
+#define TCF_NO_SYNC_NEGO 0x0010
+#define TCF_NO_WDTR 0x0020
+#define TCF_EN_255 0x0040
+#define TCF_EN_START 0x0080
+#define TCF_WDTR_DONE 0x0100
+#define TCF_SYNC_DONE 0x0200
+#define TCF_BUSY 0x0400
+
+
+/* Bit Definition for TCF_DrvFlags */
+#define TCF_DRV_BUSY 0x01 /* Indicate target busy(driver) */
+#define TCF_DRV_EN_TAG 0x0800
+#define TCF_DRV_255_63 0x0400
+
+/***********************************************************************
+ Host Adapter Control Structure
+************************************************************************/
+struct initio_host {
+ u16 addr; /* 00 */
+ u16 bios_addr; /* 02 */
+ u8 irq; /* 04 */
+ u8 scsi_id; /* 05 */
+ u8 max_tar; /* 06 */
+ u8 num_scbs; /* 07 */
+
+ u8 flags; /* 08 */
+ u8 index; /* 09 */
+ u8 ha_id; /* 0A */
+ u8 config; /* 0B */
+ u16 idmask; /* 0C */
+ u8 semaph; /* 0E */
+ u8 phase; /* 0F */
+ u8 jsstatus0; /* 10 */
+ u8 jsint; /* 11 */
+ u8 jsstatus1; /* 12 */
+ u8 sconf1; /* 13 */
+
+ u8 msg[8]; /* 14 */
+ struct scsi_ctrl_blk *next_avail; /* 1C */
+ struct scsi_ctrl_blk *scb; /* 20 */
+ struct scsi_ctrl_blk *scb_end; /* 24 */ /*UNUSED*/
+ struct scsi_ctrl_blk *next_pending; /* 28 */
+ struct scsi_ctrl_blk *next_contig; /* 2C */ /*UNUSED*/
+ struct scsi_ctrl_blk *active; /* 30 */
+ struct target_control *active_tc; /* 34 */
+
+ struct scsi_ctrl_blk *first_avail; /* 38 */
+ struct scsi_ctrl_blk *last_avail; /* 3C */
+ struct scsi_ctrl_blk *first_pending; /* 40 */
+ struct scsi_ctrl_blk *last_pending; /* 44 */
+ struct scsi_ctrl_blk *first_busy; /* 48 */
+ struct scsi_ctrl_blk *last_busy; /* 4C */
+ struct scsi_ctrl_blk *first_done; /* 50 */
+ struct scsi_ctrl_blk *last_done; /* 54 */
+ u8 max_tags[16]; /* 58 */
+ u8 act_tags[16]; /* 68 */
+ struct target_control targets[MAX_TARGETS]; /* 78 */
+ spinlock_t avail_lock;
+ spinlock_t semaph_lock;
+ struct pci_dev *pci_dev;
+};
+
+/* Bit Definition for HCB_Config */
+#define HCC_SCSI_RESET 0x01
+#define HCC_EN_PAR 0x02
+#define HCC_ACT_TERM1 0x04
+#define HCC_ACT_TERM2 0x08
+#define HCC_AUTO_TERM 0x10
+#define HCC_EN_PWR 0x80
+
+/* Bit Definition for HCB_Flags */
+#define HCF_EXPECT_DISC 0x01
+#define HCF_EXPECT_SELECT 0x02
+#define HCF_EXPECT_RESET 0x10
+#define HCF_EXPECT_DONE_DISC 0x20
+
+/******************************************************************
+ Serial EEProm
+*******************************************************************/
+
+typedef struct _NVRAM_SCSI { /* SCSI channel configuration */
+ u8 NVM_ChSCSIID; /* 0Ch -> Channel SCSI ID */
+ u8 NVM_ChConfig1; /* 0Dh -> Channel config 1 */
+ u8 NVM_ChConfig2; /* 0Eh -> Channel config 2 */
+ u8 NVM_NumOfTarg; /* 0Fh -> Number of SCSI target */
+ /* SCSI target configuration */
+ u8 NVM_Targ0Config; /* 10h -> Target 0 configuration */
+ u8 NVM_Targ1Config; /* 11h -> Target 1 configuration */
+ u8 NVM_Targ2Config; /* 12h -> Target 2 configuration */
+ u8 NVM_Targ3Config; /* 13h -> Target 3 configuration */
+ u8 NVM_Targ4Config; /* 14h -> Target 4 configuration */
+ u8 NVM_Targ5Config; /* 15h -> Target 5 configuration */
+ u8 NVM_Targ6Config; /* 16h -> Target 6 configuration */
+ u8 NVM_Targ7Config; /* 17h -> Target 7 configuration */
+ u8 NVM_Targ8Config; /* 18h -> Target 8 configuration */
+ u8 NVM_Targ9Config; /* 19h -> Target 9 configuration */
+ u8 NVM_TargAConfig; /* 1Ah -> Target A configuration */
+ u8 NVM_TargBConfig; /* 1Bh -> Target B configuration */
+ u8 NVM_TargCConfig; /* 1Ch -> Target C configuration */
+ u8 NVM_TargDConfig; /* 1Dh -> Target D configuration */
+ u8 NVM_TargEConfig; /* 1Eh -> Target E configuration */
+ u8 NVM_TargFConfig; /* 1Fh -> Target F configuration */
+} NVRAM_SCSI;
+
+typedef struct _NVRAM {
+/*----------header ---------------*/
+ u16 NVM_Signature; /* 0,1: Signature */
+ u8 NVM_Size; /* 2: Size of data structure */
+ u8 NVM_Revision; /* 3: Revision of data structure */
+ /* ----Host Adapter Structure ---- */
+ u8 NVM_ModelByte0; /* 4: Model number (byte 0) */
+ u8 NVM_ModelByte1; /* 5: Model number (byte 1) */
+ u8 NVM_ModelInfo; /* 6: Model information */
+ u8 NVM_NumOfCh; /* 7: Number of SCSI channel */
+ u8 NVM_BIOSConfig1; /* 8: BIOS configuration 1 */
+ u8 NVM_BIOSConfig2; /* 9: BIOS configuration 2 */
+ u8 NVM_HAConfig1; /* A: Hoat adapter configuration 1 */
+ u8 NVM_HAConfig2; /* B: Hoat adapter configuration 2 */
+ NVRAM_SCSI NVM_SCSIInfo[2];
+ u8 NVM_reserved[10];
+ /* ---------- CheckSum ---------- */
+ u16 NVM_CheckSum; /* 0x3E, 0x3F: Checksum of NVRam */
+} NVRAM, *PNVRAM;
+
+/* Bios Configuration for nvram->BIOSConfig1 */
+#define NBC1_ENABLE 0x01 /* BIOS enable */
+#define NBC1_8DRIVE 0x02 /* Support more than 2 drives */
+#define NBC1_REMOVABLE 0x04 /* Support removable drive */
+#define NBC1_INT19 0x08 /* Intercept int 19h */
+#define NBC1_BIOSSCAN 0x10 /* Dynamic BIOS scan */
+#define NBC1_LUNSUPPORT 0x40 /* Support LUN */
+
+/* HA Configuration Byte 1 */
+#define NHC1_BOOTIDMASK 0x0F /* Boot ID number */
+#define NHC1_LUNMASK 0x70 /* Boot LUN number */
+#define NHC1_CHANMASK 0x80 /* Boot Channel number */
+
+/* Bit definition for nvram->SCSIconfig1 */
+#define NCC1_BUSRESET 0x01 /* Reset SCSI bus at power up */
+#define NCC1_PARITYCHK 0x02 /* SCSI parity enable */
+#define NCC1_ACTTERM1 0x04 /* Enable active terminator 1 */
+#define NCC1_ACTTERM2 0x08 /* Enable active terminator 2 */
+#define NCC1_AUTOTERM 0x10 /* Enable auto terminator */
+#define NCC1_PWRMGR 0x80 /* Enable power management */
+
+/* Bit definition for SCSI Target configuration byte */
+#define NTC_DISCONNECT 0x08 /* Enable SCSI disconnect */
+#define NTC_SYNC 0x10 /* SYNC_NEGO */
+#define NTC_NO_WDTR 0x20 /* SYNC_NEGO */
+#define NTC_1GIGA 0x40 /* 255 head / 63 sectors (64/32) */
+#define NTC_SPINUP 0x80 /* Start disk drive */
+
+/* Default NVRam values */
+#define INI_SIGNATURE 0xC925
+#define NBC1_DEFAULT (NBC1_ENABLE)
+#define NCC1_DEFAULT (NCC1_BUSRESET | NCC1_AUTOTERM | NCC1_PARITYCHK)
+#define NTC_DEFAULT (NTC_NO_WDTR | NTC_1GIGA | NTC_DISCONNECT)
+
+/* SCSI related definition */
+#define DISC_NOT_ALLOW 0x80 /* Disconnect is not allowed */
+#define DISC_ALLOW 0xC0 /* Disconnect is allowed */
+#define SCSICMD_RequestSense 0x03
+
+#define SCSI_ABORT_SNOOZE 0
+#define SCSI_ABORT_SUCCESS 1
+#define SCSI_ABORT_PENDING 2
+#define SCSI_ABORT_BUSY 3
+#define SCSI_ABORT_NOT_RUNNING 4
+#define SCSI_ABORT_ERROR 5
+
+#define SCSI_RESET_SNOOZE 0
+#define SCSI_RESET_PUNT 1
+#define SCSI_RESET_SUCCESS 2
+#define SCSI_RESET_PENDING 3
+#define SCSI_RESET_WAKEUP 4
+#define SCSI_RESET_NOT_RUNNING 5
+#define SCSI_RESET_ERROR 6
+
+#define SCSI_RESET_SYNCHRONOUS 0x01
+#define SCSI_RESET_ASYNCHRONOUS 0x02
+#define SCSI_RESET_SUGGEST_BUS_RESET 0x04
+#define SCSI_RESET_SUGGEST_HOST_RESET 0x08
+
+#define SCSI_RESET_BUS_RESET 0x100
+#define SCSI_RESET_HOST_RESET 0x200
+#define SCSI_RESET_ACTION 0xff
+
diff --git a/drivers/scsi/ipr.c b/drivers/scsi/ipr.c
new file mode 100644
index 000000000..882744852
--- /dev/null
+++ b/drivers/scsi/ipr.c
@@ -0,0 +1,10536 @@
+/*
+ * ipr.c -- driver for IBM Power Linux RAID adapters
+ *
+ * Written By: Brian King <brking@us.ibm.com>, IBM Corporation
+ *
+ * Copyright (C) 2003, 2004 IBM Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ *
+ */
+
+/*
+ * Notes:
+ *
+ * This driver is used to control the following SCSI adapters:
+ *
+ * IBM iSeries: 5702, 5703, 2780, 5709, 570A, 570B
+ *
+ * IBM pSeries: PCI-X Dual Channel Ultra 320 SCSI RAID Adapter
+ * PCI-X Dual Channel Ultra 320 SCSI Adapter
+ * PCI-X Dual Channel Ultra 320 SCSI RAID Enablement Card
+ * Embedded SCSI adapter on p615 and p655 systems
+ *
+ * Supported Hardware Features:
+ * - Ultra 320 SCSI controller
+ * - PCI-X host interface
+ * - Embedded PowerPC RISC Processor and Hardware XOR DMA Engine
+ * - Non-Volatile Write Cache
+ * - Supports attachment of non-RAID disks, tape, and optical devices
+ * - RAID Levels 0, 5, 10
+ * - Hot spare
+ * - Background Parity Checking
+ * - Background Data Scrubbing
+ * - Ability to increase the capacity of an existing RAID 5 disk array
+ * by adding disks
+ *
+ * Driver Features:
+ * - Tagged command queuing
+ * - Adapter microcode download
+ * - PCI hot plug
+ * - SCSI device hot plug
+ *
+ */
+
+#include <linux/fs.h>
+#include <linux/init.h>
+#include <linux/types.h>
+#include <linux/errno.h>
+#include <linux/kernel.h>
+#include <linux/slab.h>
+#include <linux/vmalloc.h>
+#include <linux/ioport.h>
+#include <linux/delay.h>
+#include <linux/pci.h>
+#include <linux/wait.h>
+#include <linux/spinlock.h>
+#include <linux/sched.h>
+#include <linux/interrupt.h>
+#include <linux/blkdev.h>
+#include <linux/firmware.h>
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/libata.h>
+#include <linux/hdreg.h>
+#include <linux/reboot.h>
+#include <linux/stringify.h>
+#include <asm/io.h>
+#include <asm/irq.h>
+#include <asm/processor.h>
+#include <scsi/scsi.h>
+#include <scsi/scsi_host.h>
+#include <scsi/scsi_tcq.h>
+#include <scsi/scsi_eh.h>
+#include <scsi/scsi_cmnd.h>
+#include "ipr.h"
+
+/*
+ * Global Data
+ */
+static LIST_HEAD(ipr_ioa_head);
+static unsigned int ipr_log_level = IPR_DEFAULT_LOG_LEVEL;
+static unsigned int ipr_max_speed = 1;
+static int ipr_testmode = 0;
+static unsigned int ipr_fastfail = 0;
+static unsigned int ipr_transop_timeout = 0;
+static unsigned int ipr_debug = 0;
+static unsigned int ipr_max_devs = IPR_DEFAULT_SIS64_DEVS;
+static unsigned int ipr_dual_ioa_raid = 1;
+static unsigned int ipr_number_of_msix = 2;
+static unsigned int ipr_fast_reboot;
+static DEFINE_SPINLOCK(ipr_driver_lock);
+
+/* This table describes the differences between DMA controller chips */
+static const struct ipr_chip_cfg_t ipr_chip_cfg[] = {
+ { /* Gemstone, Citrine, Obsidian, and Obsidian-E */
+ .mailbox = 0x0042C,
+ .max_cmds = 100,
+ .cache_line_size = 0x20,
+ .clear_isr = 1,
+ .iopoll_weight = 0,
+ {
+ .set_interrupt_mask_reg = 0x0022C,
+ .clr_interrupt_mask_reg = 0x00230,
+ .clr_interrupt_mask_reg32 = 0x00230,
+ .sense_interrupt_mask_reg = 0x0022C,
+ .sense_interrupt_mask_reg32 = 0x0022C,
+ .clr_interrupt_reg = 0x00228,
+ .clr_interrupt_reg32 = 0x00228,
+ .sense_interrupt_reg = 0x00224,
+ .sense_interrupt_reg32 = 0x00224,
+ .ioarrin_reg = 0x00404,
+ .sense_uproc_interrupt_reg = 0x00214,
+ .sense_uproc_interrupt_reg32 = 0x00214,
+ .set_uproc_interrupt_reg = 0x00214,
+ .set_uproc_interrupt_reg32 = 0x00214,
+ .clr_uproc_interrupt_reg = 0x00218,
+ .clr_uproc_interrupt_reg32 = 0x00218
+ }
+ },
+ { /* Snipe and Scamp */
+ .mailbox = 0x0052C,
+ .max_cmds = 100,
+ .cache_line_size = 0x20,
+ .clear_isr = 1,
+ .iopoll_weight = 0,
+ {
+ .set_interrupt_mask_reg = 0x00288,
+ .clr_interrupt_mask_reg = 0x0028C,
+ .clr_interrupt_mask_reg32 = 0x0028C,
+ .sense_interrupt_mask_reg = 0x00288,
+ .sense_interrupt_mask_reg32 = 0x00288,
+ .clr_interrupt_reg = 0x00284,
+ .clr_interrupt_reg32 = 0x00284,
+ .sense_interrupt_reg = 0x00280,
+ .sense_interrupt_reg32 = 0x00280,
+ .ioarrin_reg = 0x00504,
+ .sense_uproc_interrupt_reg = 0x00290,
+ .sense_uproc_interrupt_reg32 = 0x00290,
+ .set_uproc_interrupt_reg = 0x00290,
+ .set_uproc_interrupt_reg32 = 0x00290,
+ .clr_uproc_interrupt_reg = 0x00294,
+ .clr_uproc_interrupt_reg32 = 0x00294
+ }
+ },
+ { /* CRoC */
+ .mailbox = 0x00044,
+ .max_cmds = 1000,
+ .cache_line_size = 0x20,
+ .clear_isr = 0,
+ .iopoll_weight = 64,
+ {
+ .set_interrupt_mask_reg = 0x00010,
+ .clr_interrupt_mask_reg = 0x00018,
+ .clr_interrupt_mask_reg32 = 0x0001C,
+ .sense_interrupt_mask_reg = 0x00010,
+ .sense_interrupt_mask_reg32 = 0x00014,
+ .clr_interrupt_reg = 0x00008,
+ .clr_interrupt_reg32 = 0x0000C,
+ .sense_interrupt_reg = 0x00000,
+ .sense_interrupt_reg32 = 0x00004,
+ .ioarrin_reg = 0x00070,
+ .sense_uproc_interrupt_reg = 0x00020,
+ .sense_uproc_interrupt_reg32 = 0x00024,
+ .set_uproc_interrupt_reg = 0x00020,
+ .set_uproc_interrupt_reg32 = 0x00024,
+ .clr_uproc_interrupt_reg = 0x00028,
+ .clr_uproc_interrupt_reg32 = 0x0002C,
+ .init_feedback_reg = 0x0005C,
+ .dump_addr_reg = 0x00064,
+ .dump_data_reg = 0x00068,
+ .endian_swap_reg = 0x00084
+ }
+ },
+};
+
+static const struct ipr_chip_t ipr_chip[] = {
+ { PCI_VENDOR_ID_MYLEX, PCI_DEVICE_ID_IBM_GEMSTONE, IPR_USE_LSI, IPR_SIS32, IPR_PCI_CFG, &ipr_chip_cfg[0] },
+ { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CITRINE, IPR_USE_LSI, IPR_SIS32, IPR_PCI_CFG, &ipr_chip_cfg[0] },
+ { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_OBSIDIAN, IPR_USE_LSI, IPR_SIS32, IPR_PCI_CFG, &ipr_chip_cfg[0] },
+ { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN, IPR_USE_LSI, IPR_SIS32, IPR_PCI_CFG, &ipr_chip_cfg[0] },
+ { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN_E, IPR_USE_MSI, IPR_SIS32, IPR_PCI_CFG, &ipr_chip_cfg[0] },
+ { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_SNIPE, IPR_USE_LSI, IPR_SIS32, IPR_PCI_CFG, &ipr_chip_cfg[1] },
+ { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_SCAMP, IPR_USE_LSI, IPR_SIS32, IPR_PCI_CFG, &ipr_chip_cfg[1] },
+ { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROC_FPGA_E2, IPR_USE_MSI, IPR_SIS64, IPR_MMIO, &ipr_chip_cfg[2] },
+ { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE, IPR_USE_MSI, IPR_SIS64, IPR_MMIO, &ipr_chip_cfg[2] }
+};
+
+static int ipr_max_bus_speeds[] = {
+ IPR_80MBs_SCSI_RATE, IPR_U160_SCSI_RATE, IPR_U320_SCSI_RATE
+};
+
+MODULE_AUTHOR("Brian King <brking@us.ibm.com>");
+MODULE_DESCRIPTION("IBM Power RAID SCSI Adapter Driver");
+module_param_named(max_speed, ipr_max_speed, uint, 0);
+MODULE_PARM_DESC(max_speed, "Maximum bus speed (0-2). Default: 1=U160. Speeds: 0=80 MB/s, 1=U160, 2=U320");
+module_param_named(log_level, ipr_log_level, uint, 0);
+MODULE_PARM_DESC(log_level, "Set to 0 - 4 for increasing verbosity of device driver");
+module_param_named(testmode, ipr_testmode, int, 0);
+MODULE_PARM_DESC(testmode, "DANGEROUS!!! Allows unsupported configurations");
+module_param_named(fastfail, ipr_fastfail, int, S_IRUGO | S_IWUSR);
+MODULE_PARM_DESC(fastfail, "Reduce timeouts and retries");
+module_param_named(transop_timeout, ipr_transop_timeout, int, 0);
+MODULE_PARM_DESC(transop_timeout, "Time in seconds to wait for adapter to come operational (default: 300)");
+module_param_named(debug, ipr_debug, int, S_IRUGO | S_IWUSR);
+MODULE_PARM_DESC(debug, "Enable device driver debugging logging. Set to 1 to enable. (default: 0)");
+module_param_named(dual_ioa_raid, ipr_dual_ioa_raid, int, 0);
+MODULE_PARM_DESC(dual_ioa_raid, "Enable dual adapter RAID support. Set to 1 to enable. (default: 1)");
+module_param_named(max_devs, ipr_max_devs, int, 0);
+MODULE_PARM_DESC(max_devs, "Specify the maximum number of physical devices. "
+ "[Default=" __stringify(IPR_DEFAULT_SIS64_DEVS) "]");
+module_param_named(number_of_msix, ipr_number_of_msix, int, 0);
+MODULE_PARM_DESC(number_of_msix, "Specify the number of MSIX interrupts to use on capable adapters (1 - 16). (default:2)");
+module_param_named(fast_reboot, ipr_fast_reboot, int, S_IRUGO | S_IWUSR);
+MODULE_PARM_DESC(fast_reboot, "Skip adapter shutdown during reboot. Set to 1 to enable. (default: 0)");
+MODULE_LICENSE("GPL");
+MODULE_VERSION(IPR_DRIVER_VERSION);
+
+/* A constant array of IOASCs/URCs/Error Messages */
+static const
+struct ipr_error_table_t ipr_error_table[] = {
+ {0x00000000, 1, IPR_DEFAULT_LOG_LEVEL,
+ "8155: An unknown error was received"},
+ {0x00330000, 0, 0,
+ "Soft underlength error"},
+ {0x005A0000, 0, 0,
+ "Command to be cancelled not found"},
+ {0x00808000, 0, 0,
+ "Qualified success"},
+ {0x01080000, 1, IPR_DEFAULT_LOG_LEVEL,
+ "FFFE: Soft device bus error recovered by the IOA"},
+ {0x01088100, 0, IPR_DEFAULT_LOG_LEVEL,
+ "4101: Soft device bus fabric error"},
+ {0x01100100, 0, IPR_DEFAULT_LOG_LEVEL,
+ "FFFC: Logical block guard error recovered by the device"},
+ {0x01100300, 0, IPR_DEFAULT_LOG_LEVEL,
+ "FFFC: Logical block reference tag error recovered by the device"},
+ {0x01108300, 0, IPR_DEFAULT_LOG_LEVEL,
+ "4171: Recovered scatter list tag / sequence number error"},
+ {0x01109000, 0, IPR_DEFAULT_LOG_LEVEL,
+ "FF3D: Recovered logical block CRC error on IOA to Host transfer"},
+ {0x01109200, 0, IPR_DEFAULT_LOG_LEVEL,
+ "4171: Recovered logical block sequence number error on IOA to Host transfer"},
+ {0x0110A000, 0, IPR_DEFAULT_LOG_LEVEL,
+ "FFFD: Recovered logical block reference tag error detected by the IOA"},
+ {0x0110A100, 0, IPR_DEFAULT_LOG_LEVEL,
+ "FFFD: Logical block guard error recovered by the IOA"},
+ {0x01170600, 0, IPR_DEFAULT_LOG_LEVEL,
+ "FFF9: Device sector reassign successful"},
+ {0x01170900, 0, IPR_DEFAULT_LOG_LEVEL,
+ "FFF7: Media error recovered by device rewrite procedures"},
+ {0x01180200, 0, IPR_DEFAULT_LOG_LEVEL,
+ "7001: IOA sector reassignment successful"},
+ {0x01180500, 0, IPR_DEFAULT_LOG_LEVEL,
+ "FFF9: Soft media error. Sector reassignment recommended"},
+ {0x01180600, 0, IPR_DEFAULT_LOG_LEVEL,
+ "FFF7: Media error recovered by IOA rewrite procedures"},
+ {0x01418000, 0, IPR_DEFAULT_LOG_LEVEL,
+ "FF3D: Soft PCI bus error recovered by the IOA"},
+ {0x01440000, 1, IPR_DEFAULT_LOG_LEVEL,
+ "FFF6: Device hardware error recovered by the IOA"},
+ {0x01448100, 0, IPR_DEFAULT_LOG_LEVEL,
+ "FFF6: Device hardware error recovered by the device"},
+ {0x01448200, 1, IPR_DEFAULT_LOG_LEVEL,
+ "FF3D: Soft IOA error recovered by the IOA"},
+ {0x01448300, 0, IPR_DEFAULT_LOG_LEVEL,
+ "FFFA: Undefined device response recovered by the IOA"},
+ {0x014A0000, 1, IPR_DEFAULT_LOG_LEVEL,
+ "FFF6: Device bus error, message or command phase"},
+ {0x014A8000, 0, IPR_DEFAULT_LOG_LEVEL,
+ "FFFE: Task Management Function failed"},
+ {0x015D0000, 0, IPR_DEFAULT_LOG_LEVEL,
+ "FFF6: Failure prediction threshold exceeded"},
+ {0x015D9200, 0, IPR_DEFAULT_LOG_LEVEL,
+ "8009: Impending cache battery pack failure"},
+ {0x02040100, 0, 0,
+ "Logical Unit in process of becoming ready"},
+ {0x02040200, 0, 0,
+ "Initializing command required"},
+ {0x02040400, 0, 0,
+ "34FF: Disk device format in progress"},
+ {0x02040C00, 0, 0,
+ "Logical unit not accessible, target port in unavailable state"},
+ {0x02048000, 0, IPR_DEFAULT_LOG_LEVEL,
+ "9070: IOA requested reset"},
+ {0x023F0000, 0, 0,
+ "Synchronization required"},
+ {0x02408500, 0, 0,
+ "IOA microcode download required"},
+ {0x02408600, 0, 0,
+ "Device bus connection is prohibited by host"},
+ {0x024E0000, 0, 0,
+ "No ready, IOA shutdown"},
+ {0x025A0000, 0, 0,
+ "Not ready, IOA has been shutdown"},
+ {0x02670100, 0, IPR_DEFAULT_LOG_LEVEL,
+ "3020: Storage subsystem configuration error"},
+ {0x03110B00, 0, 0,
+ "FFF5: Medium error, data unreadable, recommend reassign"},
+ {0x03110C00, 0, 0,
+ "7000: Medium error, data unreadable, do not reassign"},
+ {0x03310000, 0, IPR_DEFAULT_LOG_LEVEL,
+ "FFF3: Disk media format bad"},
+ {0x04050000, 0, IPR_DEFAULT_LOG_LEVEL,
+ "3002: Addressed device failed to respond to selection"},
+ {0x04080000, 1, IPR_DEFAULT_LOG_LEVEL,
+ "3100: Device bus error"},
+ {0x04080100, 0, IPR_DEFAULT_LOG_LEVEL,
+ "3109: IOA timed out a device command"},
+ {0x04088000, 0, 0,
+ "3120: SCSI bus is not operational"},
+ {0x04088100, 0, IPR_DEFAULT_LOG_LEVEL,
+ "4100: Hard device bus fabric error"},
+ {0x04100100, 0, IPR_DEFAULT_LOG_LEVEL,
+ "310C: Logical block guard error detected by the device"},
+ {0x04100300, 0, IPR_DEFAULT_LOG_LEVEL,
+ "310C: Logical block reference tag error detected by the device"},
+ {0x04108300, 1, IPR_DEFAULT_LOG_LEVEL,
+ "4170: Scatter list tag / sequence number error"},
+ {0x04109000, 1, IPR_DEFAULT_LOG_LEVEL,
+ "8150: Logical block CRC error on IOA to Host transfer"},
+ {0x04109200, 1, IPR_DEFAULT_LOG_LEVEL,
+ "4170: Logical block sequence number error on IOA to Host transfer"},
+ {0x0410A000, 0, IPR_DEFAULT_LOG_LEVEL,
+ "310D: Logical block reference tag error detected by the IOA"},
+ {0x0410A100, 0, IPR_DEFAULT_LOG_LEVEL,
+ "310D: Logical block guard error detected by the IOA"},
+ {0x04118000, 0, IPR_DEFAULT_LOG_LEVEL,
+ "9000: IOA reserved area data check"},
+ {0x04118100, 0, IPR_DEFAULT_LOG_LEVEL,
+ "9001: IOA reserved area invalid data pattern"},
+ {0x04118200, 0, IPR_DEFAULT_LOG_LEVEL,
+ "9002: IOA reserved area LRC error"},
+ {0x04118300, 1, IPR_DEFAULT_LOG_LEVEL,
+ "Hardware Error, IOA metadata access error"},
+ {0x04320000, 0, IPR_DEFAULT_LOG_LEVEL,
+ "102E: Out of alternate sectors for disk storage"},
+ {0x04330000, 1, IPR_DEFAULT_LOG_LEVEL,
+ "FFF4: Data transfer underlength error"},
+ {0x04338000, 1, IPR_DEFAULT_LOG_LEVEL,
+ "FFF4: Data transfer overlength error"},
+ {0x043E0100, 0, IPR_DEFAULT_LOG_LEVEL,
+ "3400: Logical unit failure"},
+ {0x04408500, 0, IPR_DEFAULT_LOG_LEVEL,
+ "FFF4: Device microcode is corrupt"},
+ {0x04418000, 1, IPR_DEFAULT_LOG_LEVEL,
+ "8150: PCI bus error"},
+ {0x04430000, 1, 0,
+ "Unsupported device bus message received"},
+ {0x04440000, 1, IPR_DEFAULT_LOG_LEVEL,
+ "FFF4: Disk device problem"},
+ {0x04448200, 1, IPR_DEFAULT_LOG_LEVEL,
+ "8150: Permanent IOA failure"},
+ {0x04448300, 0, IPR_DEFAULT_LOG_LEVEL,
+ "3010: Disk device returned wrong response to IOA"},
+ {0x04448400, 0, IPR_DEFAULT_LOG_LEVEL,
+ "8151: IOA microcode error"},
+ {0x04448500, 0, 0,
+ "Device bus status error"},
+ {0x04448600, 0, IPR_DEFAULT_LOG_LEVEL,
+ "8157: IOA error requiring IOA reset to recover"},
+ {0x04448700, 0, 0,
+ "ATA device status error"},
+ {0x04490000, 0, 0,
+ "Message reject received from the device"},
+ {0x04449200, 0, IPR_DEFAULT_LOG_LEVEL,
+ "8008: A permanent cache battery pack failure occurred"},
+ {0x0444A000, 0, IPR_DEFAULT_LOG_LEVEL,
+ "9090: Disk unit has been modified after the last known status"},
+ {0x0444A200, 0, IPR_DEFAULT_LOG_LEVEL,
+ "9081: IOA detected device error"},
+ {0x0444A300, 0, IPR_DEFAULT_LOG_LEVEL,
+ "9082: IOA detected device error"},
+ {0x044A0000, 1, IPR_DEFAULT_LOG_LEVEL,
+ "3110: Device bus error, message or command phase"},
+ {0x044A8000, 1, IPR_DEFAULT_LOG_LEVEL,
+ "3110: SAS Command / Task Management Function failed"},
+ {0x04670400, 0, IPR_DEFAULT_LOG_LEVEL,
+ "9091: Incorrect hardware configuration change has been detected"},
+ {0x04678000, 0, IPR_DEFAULT_LOG_LEVEL,
+ "9073: Invalid multi-adapter configuration"},
+ {0x04678100, 0, IPR_DEFAULT_LOG_LEVEL,
+ "4010: Incorrect connection between cascaded expanders"},
+ {0x04678200, 0, IPR_DEFAULT_LOG_LEVEL,
+ "4020: Connections exceed IOA design limits"},
+ {0x04678300, 0, IPR_DEFAULT_LOG_LEVEL,
+ "4030: Incorrect multipath connection"},
+ {0x04679000, 0, IPR_DEFAULT_LOG_LEVEL,
+ "4110: Unsupported enclosure function"},
+ {0x04679800, 0, IPR_DEFAULT_LOG_LEVEL,
+ "4120: SAS cable VPD cannot be read"},
+ {0x046E0000, 0, IPR_DEFAULT_LOG_LEVEL,
+ "FFF4: Command to logical unit failed"},
+ {0x05240000, 1, 0,
+ "Illegal request, invalid request type or request packet"},
+ {0x05250000, 0, 0,
+ "Illegal request, invalid resource handle"},
+ {0x05258000, 0, 0,
+ "Illegal request, commands not allowed to this device"},
+ {0x05258100, 0, 0,
+ "Illegal request, command not allowed to a secondary adapter"},
+ {0x05258200, 0, 0,
+ "Illegal request, command not allowed to a non-optimized resource"},
+ {0x05260000, 0, 0,
+ "Illegal request, invalid field in parameter list"},
+ {0x05260100, 0, 0,
+ "Illegal request, parameter not supported"},
+ {0x05260200, 0, 0,
+ "Illegal request, parameter value invalid"},
+ {0x052C0000, 0, 0,
+ "Illegal request, command sequence error"},
+ {0x052C8000, 1, 0,
+ "Illegal request, dual adapter support not enabled"},
+ {0x052C8100, 1, 0,
+ "Illegal request, another cable connector was physically disabled"},
+ {0x054E8000, 1, 0,
+ "Illegal request, inconsistent group id/group count"},
+ {0x06040500, 0, IPR_DEFAULT_LOG_LEVEL,
+ "9031: Array protection temporarily suspended, protection resuming"},
+ {0x06040600, 0, IPR_DEFAULT_LOG_LEVEL,
+ "9040: Array protection temporarily suspended, protection resuming"},
+ {0x060B0100, 0, IPR_DEFAULT_LOG_LEVEL,
+ "4080: IOA exceeded maximum operating temperature"},
+ {0x060B8000, 0, IPR_DEFAULT_LOG_LEVEL,
+ "4085: Service required"},
+ {0x06288000, 0, IPR_DEFAULT_LOG_LEVEL,
+ "3140: Device bus not ready to ready transition"},
+ {0x06290000, 0, IPR_DEFAULT_LOG_LEVEL,
+ "FFFB: SCSI bus was reset"},
+ {0x06290500, 0, 0,
+ "FFFE: SCSI bus transition to single ended"},
+ {0x06290600, 0, 0,
+ "FFFE: SCSI bus transition to LVD"},
+ {0x06298000, 0, IPR_DEFAULT_LOG_LEVEL,
+ "FFFB: SCSI bus was reset by another initiator"},
+ {0x063F0300, 0, IPR_DEFAULT_LOG_LEVEL,
+ "3029: A device replacement has occurred"},
+ {0x063F8300, 0, IPR_DEFAULT_LOG_LEVEL,
+ "4102: Device bus fabric performance degradation"},
+ {0x064C8000, 0, IPR_DEFAULT_LOG_LEVEL,
+ "9051: IOA cache data exists for a missing or failed device"},
+ {0x064C8100, 0, IPR_DEFAULT_LOG_LEVEL,
+ "9055: Auxiliary cache IOA contains cache data needed by the primary IOA"},
+ {0x06670100, 0, IPR_DEFAULT_LOG_LEVEL,
+ "9025: Disk unit is not supported at its physical location"},
+ {0x06670600, 0, IPR_DEFAULT_LOG_LEVEL,
+ "3020: IOA detected a SCSI bus configuration error"},
+ {0x06678000, 0, IPR_DEFAULT_LOG_LEVEL,
+ "3150: SCSI bus configuration error"},
+ {0x06678100, 0, IPR_DEFAULT_LOG_LEVEL,
+ "9074: Asymmetric advanced function disk configuration"},
+ {0x06678300, 0, IPR_DEFAULT_LOG_LEVEL,
+ "4040: Incomplete multipath connection between IOA and enclosure"},
+ {0x06678400, 0, IPR_DEFAULT_LOG_LEVEL,
+ "4041: Incomplete multipath connection between enclosure and device"},
+ {0x06678500, 0, IPR_DEFAULT_LOG_LEVEL,
+ "9075: Incomplete multipath connection between IOA and remote IOA"},
+ {0x06678600, 0, IPR_DEFAULT_LOG_LEVEL,
+ "9076: Configuration error, missing remote IOA"},
+ {0x06679100, 0, IPR_DEFAULT_LOG_LEVEL,
+ "4050: Enclosure does not support a required multipath function"},
+ {0x06679800, 0, IPR_DEFAULT_LOG_LEVEL,
+ "4121: Configuration error, required cable is missing"},
+ {0x06679900, 0, IPR_DEFAULT_LOG_LEVEL,
+ "4122: Cable is not plugged into the correct location on remote IOA"},
+ {0x06679A00, 0, IPR_DEFAULT_LOG_LEVEL,
+ "4123: Configuration error, invalid cable vital product data"},
+ {0x06679B00, 0, IPR_DEFAULT_LOG_LEVEL,
+ "4124: Configuration error, both cable ends are plugged into the same IOA"},
+ {0x06690000, 0, IPR_DEFAULT_LOG_LEVEL,
+ "4070: Logically bad block written on device"},
+ {0x06690200, 0, IPR_DEFAULT_LOG_LEVEL,
+ "9041: Array protection temporarily suspended"},
+ {0x06698200, 0, IPR_DEFAULT_LOG_LEVEL,
+ "9042: Corrupt array parity detected on specified device"},
+ {0x066B0200, 0, IPR_DEFAULT_LOG_LEVEL,
+ "9030: Array no longer protected due to missing or failed disk unit"},
+ {0x066B8000, 0, IPR_DEFAULT_LOG_LEVEL,
+ "9071: Link operational transition"},
+ {0x066B8100, 0, IPR_DEFAULT_LOG_LEVEL,
+ "9072: Link not operational transition"},
+ {0x066B8200, 0, IPR_DEFAULT_LOG_LEVEL,
+ "9032: Array exposed but still protected"},
+ {0x066B8300, 0, IPR_DEFAULT_LOG_LEVEL + 1,
+ "70DD: Device forced failed by disrupt device command"},
+ {0x066B9100, 0, IPR_DEFAULT_LOG_LEVEL,
+ "4061: Multipath redundancy level got better"},
+ {0x066B9200, 0, IPR_DEFAULT_LOG_LEVEL,
+ "4060: Multipath redundancy level got worse"},
+ {0x06808100, 0, IPR_DEFAULT_LOG_LEVEL,
+ "9083: Device raw mode enabled"},
+ {0x06808200, 0, IPR_DEFAULT_LOG_LEVEL,
+ "9084: Device raw mode disabled"},
+ {0x07270000, 0, 0,
+ "Failure due to other device"},
+ {0x07278000, 0, IPR_DEFAULT_LOG_LEVEL,
+ "9008: IOA does not support functions expected by devices"},
+ {0x07278100, 0, IPR_DEFAULT_LOG_LEVEL,
+ "9010: Cache data associated with attached devices cannot be found"},
+ {0x07278200, 0, IPR_DEFAULT_LOG_LEVEL,
+ "9011: Cache data belongs to devices other than those attached"},
+ {0x07278400, 0, IPR_DEFAULT_LOG_LEVEL,
+ "9020: Array missing 2 or more devices with only 1 device present"},
+ {0x07278500, 0, IPR_DEFAULT_LOG_LEVEL,
+ "9021: Array missing 2 or more devices with 2 or more devices present"},
+ {0x07278600, 0, IPR_DEFAULT_LOG_LEVEL,
+ "9022: Exposed array is missing a required device"},
+ {0x07278700, 0, IPR_DEFAULT_LOG_LEVEL,
+ "9023: Array member(s) not at required physical locations"},
+ {0x07278800, 0, IPR_DEFAULT_LOG_LEVEL,
+ "9024: Array not functional due to present hardware configuration"},
+ {0x07278900, 0, IPR_DEFAULT_LOG_LEVEL,
+ "9026: Array not functional due to present hardware configuration"},
+ {0x07278A00, 0, IPR_DEFAULT_LOG_LEVEL,
+ "9027: Array is missing a device and parity is out of sync"},
+ {0x07278B00, 0, IPR_DEFAULT_LOG_LEVEL,
+ "9028: Maximum number of arrays already exist"},
+ {0x07278C00, 0, IPR_DEFAULT_LOG_LEVEL,
+ "9050: Required cache data cannot be located for a disk unit"},
+ {0x07278D00, 0, IPR_DEFAULT_LOG_LEVEL,
+ "9052: Cache data exists for a device that has been modified"},
+ {0x07278F00, 0, IPR_DEFAULT_LOG_LEVEL,
+ "9054: IOA resources not available due to previous problems"},
+ {0x07279100, 0, IPR_DEFAULT_LOG_LEVEL,
+ "9092: Disk unit requires initialization before use"},
+ {0x07279200, 0, IPR_DEFAULT_LOG_LEVEL,
+ "9029: Incorrect hardware configuration change has been detected"},
+ {0x07279600, 0, IPR_DEFAULT_LOG_LEVEL,
+ "9060: One or more disk pairs are missing from an array"},
+ {0x07279700, 0, IPR_DEFAULT_LOG_LEVEL,
+ "9061: One or more disks are missing from an array"},
+ {0x07279800, 0, IPR_DEFAULT_LOG_LEVEL,
+ "9062: One or more disks are missing from an array"},
+ {0x07279900, 0, IPR_DEFAULT_LOG_LEVEL,
+ "9063: Maximum number of functional arrays has been exceeded"},
+ {0x07279A00, 0, 0,
+ "Data protect, other volume set problem"},
+ {0x0B260000, 0, 0,
+ "Aborted command, invalid descriptor"},
+ {0x0B3F9000, 0, 0,
+ "Target operating conditions have changed, dual adapter takeover"},
+ {0x0B530200, 0, 0,
+ "Aborted command, medium removal prevented"},
+ {0x0B5A0000, 0, 0,
+ "Command terminated by host"},
+ {0x0B5B8000, 0, 0,
+ "Aborted command, command terminated by host"}
+};
+
+static const struct ipr_ses_table_entry ipr_ses_table[] = {
+ { "2104-DL1 ", "XXXXXXXXXXXXXXXX", 80 },
+ { "2104-TL1 ", "XXXXXXXXXXXXXXXX", 80 },
+ { "HSBP07M P U2SCSI", "XXXXXXXXXXXXXXXX", 80 }, /* Hidive 7 slot */
+ { "HSBP05M P U2SCSI", "XXXXXXXXXXXXXXXX", 80 }, /* Hidive 5 slot */
+ { "HSBP05M S U2SCSI", "XXXXXXXXXXXXXXXX", 80 }, /* Bowtie */
+ { "HSBP06E ASU2SCSI", "XXXXXXXXXXXXXXXX", 80 }, /* MartinFenning */
+ { "2104-DU3 ", "XXXXXXXXXXXXXXXX", 160 },
+ { "2104-TU3 ", "XXXXXXXXXXXXXXXX", 160 },
+ { "HSBP04C RSU2SCSI", "XXXXXXX*XXXXXXXX", 160 },
+ { "HSBP06E RSU2SCSI", "XXXXXXX*XXXXXXXX", 160 },
+ { "St V1S2 ", "XXXXXXXXXXXXXXXX", 160 },
+ { "HSBPD4M PU3SCSI", "XXXXXXX*XXXXXXXX", 160 },
+ { "VSBPD1H U3SCSI", "XXXXXXX*XXXXXXXX", 160 }
+};
+
+/*
+ * Function Prototypes
+ */
+static int ipr_reset_alert(struct ipr_cmnd *);
+static void ipr_process_ccn(struct ipr_cmnd *);
+static void ipr_process_error(struct ipr_cmnd *);
+static void ipr_reset_ioa_job(struct ipr_cmnd *);
+static void ipr_initiate_ioa_reset(struct ipr_ioa_cfg *,
+ enum ipr_shutdown_type);
+
+#ifdef CONFIG_SCSI_IPR_TRACE
+/**
+ * ipr_trc_hook - Add a trace entry to the driver trace
+ * @ipr_cmd: ipr command struct
+ * @type: trace type
+ * @add_data: additional data
+ *
+ * Return value:
+ * none
+ **/
+static void ipr_trc_hook(struct ipr_cmnd *ipr_cmd,
+ u8 type, u32 add_data)
+{
+ struct ipr_trace_entry *trace_entry;
+ struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
+
+ trace_entry = &ioa_cfg->trace[atomic_add_return
+ (1, &ioa_cfg->trace_index)%IPR_NUM_TRACE_ENTRIES];
+ trace_entry->time = jiffies;
+ trace_entry->op_code = ipr_cmd->ioarcb.cmd_pkt.cdb[0];
+ trace_entry->type = type;
+ if (ipr_cmd->ioa_cfg->sis64)
+ trace_entry->ata_op_code = ipr_cmd->i.ata_ioadl.regs.command;
+ else
+ trace_entry->ata_op_code = ipr_cmd->ioarcb.u.add_data.u.regs.command;
+ trace_entry->cmd_index = ipr_cmd->cmd_index & 0xff;
+ trace_entry->res_handle = ipr_cmd->ioarcb.res_handle;
+ trace_entry->u.add_data = add_data;
+ wmb();
+}
+#else
+#define ipr_trc_hook(ipr_cmd, type, add_data) do { } while (0)
+#endif
+
+/**
+ * ipr_lock_and_done - Acquire lock and complete command
+ * @ipr_cmd: ipr command struct
+ *
+ * Return value:
+ * none
+ **/
+static void ipr_lock_and_done(struct ipr_cmnd *ipr_cmd)
+{
+ unsigned long lock_flags;
+ struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
+
+ spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
+ ipr_cmd->done(ipr_cmd);
+ spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
+}
+
+/**
+ * ipr_reinit_ipr_cmnd - Re-initialize an IPR Cmnd block for reuse
+ * @ipr_cmd: ipr command struct
+ *
+ * Return value:
+ * none
+ **/
+static void ipr_reinit_ipr_cmnd(struct ipr_cmnd *ipr_cmd)
+{
+ struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
+ struct ipr_ioasa *ioasa = &ipr_cmd->s.ioasa;
+ struct ipr_ioasa64 *ioasa64 = &ipr_cmd->s.ioasa64;
+ dma_addr_t dma_addr = ipr_cmd->dma_addr;
+ int hrrq_id;
+
+ hrrq_id = ioarcb->cmd_pkt.hrrq_id;
+ memset(&ioarcb->cmd_pkt, 0, sizeof(struct ipr_cmd_pkt));
+ ioarcb->cmd_pkt.hrrq_id = hrrq_id;
+ ioarcb->data_transfer_length = 0;
+ ioarcb->read_data_transfer_length = 0;
+ ioarcb->ioadl_len = 0;
+ ioarcb->read_ioadl_len = 0;
+
+ if (ipr_cmd->ioa_cfg->sis64) {
+ ioarcb->u.sis64_addr_data.data_ioadl_addr =
+ cpu_to_be64(dma_addr + offsetof(struct ipr_cmnd, i.ioadl64));
+ ioasa64->u.gata.status = 0;
+ } else {
+ ioarcb->write_ioadl_addr =
+ cpu_to_be32(dma_addr + offsetof(struct ipr_cmnd, i.ioadl));
+ ioarcb->read_ioadl_addr = ioarcb->write_ioadl_addr;
+ ioasa->u.gata.status = 0;
+ }
+
+ ioasa->hdr.ioasc = 0;
+ ioasa->hdr.residual_data_len = 0;
+ ipr_cmd->scsi_cmd = NULL;
+ ipr_cmd->qc = NULL;
+ ipr_cmd->sense_buffer[0] = 0;
+ ipr_cmd->dma_use_sg = 0;
+}
+
+/**
+ * ipr_init_ipr_cmnd - Initialize an IPR Cmnd block
+ * @ipr_cmd: ipr command struct
+ *
+ * Return value:
+ * none
+ **/
+static void ipr_init_ipr_cmnd(struct ipr_cmnd *ipr_cmd,
+ void (*fast_done) (struct ipr_cmnd *))
+{
+ ipr_reinit_ipr_cmnd(ipr_cmd);
+ ipr_cmd->u.scratch = 0;
+ ipr_cmd->sibling = NULL;
+ ipr_cmd->eh_comp = NULL;
+ ipr_cmd->fast_done = fast_done;
+ init_timer(&ipr_cmd->timer);
+}
+
+/**
+ * __ipr_get_free_ipr_cmnd - Get a free IPR Cmnd block
+ * @ioa_cfg: ioa config struct
+ *
+ * Return value:
+ * pointer to ipr command struct
+ **/
+static
+struct ipr_cmnd *__ipr_get_free_ipr_cmnd(struct ipr_hrr_queue *hrrq)
+{
+ struct ipr_cmnd *ipr_cmd = NULL;
+
+ if (likely(!list_empty(&hrrq->hrrq_free_q))) {
+ ipr_cmd = list_entry(hrrq->hrrq_free_q.next,
+ struct ipr_cmnd, queue);
+ list_del(&ipr_cmd->queue);
+ }
+
+
+ return ipr_cmd;
+}
+
+/**
+ * ipr_get_free_ipr_cmnd - Get a free IPR Cmnd block and initialize it
+ * @ioa_cfg: ioa config struct
+ *
+ * Return value:
+ * pointer to ipr command struct
+ **/
+static
+struct ipr_cmnd *ipr_get_free_ipr_cmnd(struct ipr_ioa_cfg *ioa_cfg)
+{
+ struct ipr_cmnd *ipr_cmd =
+ __ipr_get_free_ipr_cmnd(&ioa_cfg->hrrq[IPR_INIT_HRRQ]);
+ ipr_init_ipr_cmnd(ipr_cmd, ipr_lock_and_done);
+ return ipr_cmd;
+}
+
+/**
+ * ipr_mask_and_clear_interrupts - Mask all and clear specified interrupts
+ * @ioa_cfg: ioa config struct
+ * @clr_ints: interrupts to clear
+ *
+ * This function masks all interrupts on the adapter, then clears the
+ * interrupts specified in the mask
+ *
+ * Return value:
+ * none
+ **/
+static void ipr_mask_and_clear_interrupts(struct ipr_ioa_cfg *ioa_cfg,
+ u32 clr_ints)
+{
+ volatile u32 int_reg;
+ int i;
+
+ /* Stop new interrupts */
+ for (i = 0; i < ioa_cfg->hrrq_num; i++) {
+ spin_lock(&ioa_cfg->hrrq[i]._lock);
+ ioa_cfg->hrrq[i].allow_interrupts = 0;
+ spin_unlock(&ioa_cfg->hrrq[i]._lock);
+ }
+ wmb();
+
+ /* Set interrupt mask to stop all new interrupts */
+ if (ioa_cfg->sis64)
+ writeq(~0, ioa_cfg->regs.set_interrupt_mask_reg);
+ else
+ writel(~0, ioa_cfg->regs.set_interrupt_mask_reg);
+
+ /* Clear any pending interrupts */
+ if (ioa_cfg->sis64)
+ writel(~0, ioa_cfg->regs.clr_interrupt_reg);
+ writel(clr_ints, ioa_cfg->regs.clr_interrupt_reg32);
+ int_reg = readl(ioa_cfg->regs.sense_interrupt_reg);
+}
+
+/**
+ * ipr_save_pcix_cmd_reg - Save PCI-X command register
+ * @ioa_cfg: ioa config struct
+ *
+ * Return value:
+ * 0 on success / -EIO on failure
+ **/
+static int ipr_save_pcix_cmd_reg(struct ipr_ioa_cfg *ioa_cfg)
+{
+ int pcix_cmd_reg = pci_find_capability(ioa_cfg->pdev, PCI_CAP_ID_PCIX);
+
+ if (pcix_cmd_reg == 0)
+ return 0;
+
+ if (pci_read_config_word(ioa_cfg->pdev, pcix_cmd_reg + PCI_X_CMD,
+ &ioa_cfg->saved_pcix_cmd_reg) != PCIBIOS_SUCCESSFUL) {
+ dev_err(&ioa_cfg->pdev->dev, "Failed to save PCI-X command register\n");
+ return -EIO;
+ }
+
+ ioa_cfg->saved_pcix_cmd_reg |= PCI_X_CMD_DPERR_E | PCI_X_CMD_ERO;
+ return 0;
+}
+
+/**
+ * ipr_set_pcix_cmd_reg - Setup PCI-X command register
+ * @ioa_cfg: ioa config struct
+ *
+ * Return value:
+ * 0 on success / -EIO on failure
+ **/
+static int ipr_set_pcix_cmd_reg(struct ipr_ioa_cfg *ioa_cfg)
+{
+ int pcix_cmd_reg = pci_find_capability(ioa_cfg->pdev, PCI_CAP_ID_PCIX);
+
+ if (pcix_cmd_reg) {
+ if (pci_write_config_word(ioa_cfg->pdev, pcix_cmd_reg + PCI_X_CMD,
+ ioa_cfg->saved_pcix_cmd_reg) != PCIBIOS_SUCCESSFUL) {
+ dev_err(&ioa_cfg->pdev->dev, "Failed to setup PCI-X command register\n");
+ return -EIO;
+ }
+ }
+
+ return 0;
+}
+
+/**
+ * ipr_sata_eh_done - done function for aborted SATA commands
+ * @ipr_cmd: ipr command struct
+ *
+ * This function is invoked for ops generated to SATA
+ * devices which are being aborted.
+ *
+ * Return value:
+ * none
+ **/
+static void ipr_sata_eh_done(struct ipr_cmnd *ipr_cmd)
+{
+ struct ata_queued_cmd *qc = ipr_cmd->qc;
+ struct ipr_sata_port *sata_port = qc->ap->private_data;
+
+ qc->err_mask |= AC_ERR_OTHER;
+ sata_port->ioasa.status |= ATA_BUSY;
+ list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
+ ata_qc_complete(qc);
+}
+
+/**
+ * ipr_scsi_eh_done - mid-layer done function for aborted ops
+ * @ipr_cmd: ipr command struct
+ *
+ * This function is invoked by the interrupt handler for
+ * ops generated by the SCSI mid-layer which are being aborted.
+ *
+ * Return value:
+ * none
+ **/
+static void ipr_scsi_eh_done(struct ipr_cmnd *ipr_cmd)
+{
+ struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
+
+ scsi_cmd->result |= (DID_ERROR << 16);
+
+ scsi_dma_unmap(ipr_cmd->scsi_cmd);
+ scsi_cmd->scsi_done(scsi_cmd);
+ if (ipr_cmd->eh_comp)
+ complete(ipr_cmd->eh_comp);
+ list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
+}
+
+/**
+ * ipr_fail_all_ops - Fails all outstanding ops.
+ * @ioa_cfg: ioa config struct
+ *
+ * This function fails all outstanding ops.
+ *
+ * Return value:
+ * none
+ **/
+static void ipr_fail_all_ops(struct ipr_ioa_cfg *ioa_cfg)
+{
+ struct ipr_cmnd *ipr_cmd, *temp;
+ struct ipr_hrr_queue *hrrq;
+
+ ENTER;
+ for_each_hrrq(hrrq, ioa_cfg) {
+ spin_lock(&hrrq->_lock);
+ list_for_each_entry_safe(ipr_cmd,
+ temp, &hrrq->hrrq_pending_q, queue) {
+ list_del(&ipr_cmd->queue);
+
+ ipr_cmd->s.ioasa.hdr.ioasc =
+ cpu_to_be32(IPR_IOASC_IOA_WAS_RESET);
+ ipr_cmd->s.ioasa.hdr.ilid =
+ cpu_to_be32(IPR_DRIVER_ILID);
+
+ if (ipr_cmd->scsi_cmd)
+ ipr_cmd->done = ipr_scsi_eh_done;
+ else if (ipr_cmd->qc)
+ ipr_cmd->done = ipr_sata_eh_done;
+
+ ipr_trc_hook(ipr_cmd, IPR_TRACE_FINISH,
+ IPR_IOASC_IOA_WAS_RESET);
+ del_timer(&ipr_cmd->timer);
+ ipr_cmd->done(ipr_cmd);
+ }
+ spin_unlock(&hrrq->_lock);
+ }
+ LEAVE;
+}
+
+/**
+ * ipr_send_command - Send driver initiated requests.
+ * @ipr_cmd: ipr command struct
+ *
+ * This function sends a command to the adapter using the correct write call.
+ * In the case of sis64, calculate the ioarcb size required. Then or in the
+ * appropriate bits.
+ *
+ * Return value:
+ * none
+ **/
+static void ipr_send_command(struct ipr_cmnd *ipr_cmd)
+{
+ struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
+ dma_addr_t send_dma_addr = ipr_cmd->dma_addr;
+
+ if (ioa_cfg->sis64) {
+ /* The default size is 256 bytes */
+ send_dma_addr |= 0x1;
+
+ /* If the number of ioadls * size of ioadl > 128 bytes,
+ then use a 512 byte ioarcb */
+ if (ipr_cmd->dma_use_sg * sizeof(struct ipr_ioadl64_desc) > 128 )
+ send_dma_addr |= 0x4;
+ writeq(send_dma_addr, ioa_cfg->regs.ioarrin_reg);
+ } else
+ writel(send_dma_addr, ioa_cfg->regs.ioarrin_reg);
+}
+
+/**
+ * ipr_do_req - Send driver initiated requests.
+ * @ipr_cmd: ipr command struct
+ * @done: done function
+ * @timeout_func: timeout function
+ * @timeout: timeout value
+ *
+ * This function sends the specified command to the adapter with the
+ * timeout given. The done function is invoked on command completion.
+ *
+ * Return value:
+ * none
+ **/
+static void ipr_do_req(struct ipr_cmnd *ipr_cmd,
+ void (*done) (struct ipr_cmnd *),
+ void (*timeout_func) (struct ipr_cmnd *), u32 timeout)
+{
+ list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_pending_q);
+
+ ipr_cmd->done = done;
+
+ ipr_cmd->timer.data = (unsigned long) ipr_cmd;
+ ipr_cmd->timer.expires = jiffies + timeout;
+ ipr_cmd->timer.function = (void (*)(unsigned long))timeout_func;
+
+ add_timer(&ipr_cmd->timer);
+
+ ipr_trc_hook(ipr_cmd, IPR_TRACE_START, 0);
+
+ ipr_send_command(ipr_cmd);
+}
+
+/**
+ * ipr_internal_cmd_done - Op done function for an internally generated op.
+ * @ipr_cmd: ipr command struct
+ *
+ * This function is the op done function for an internally generated,
+ * blocking op. It simply wakes the sleeping thread.
+ *
+ * Return value:
+ * none
+ **/
+static void ipr_internal_cmd_done(struct ipr_cmnd *ipr_cmd)
+{
+ if (ipr_cmd->sibling)
+ ipr_cmd->sibling = NULL;
+ else
+ complete(&ipr_cmd->completion);
+}
+
+/**
+ * ipr_init_ioadl - initialize the ioadl for the correct SIS type
+ * @ipr_cmd: ipr command struct
+ * @dma_addr: dma address
+ * @len: transfer length
+ * @flags: ioadl flag value
+ *
+ * This function initializes an ioadl in the case where there is only a single
+ * descriptor.
+ *
+ * Return value:
+ * nothing
+ **/
+static void ipr_init_ioadl(struct ipr_cmnd *ipr_cmd, dma_addr_t dma_addr,
+ u32 len, int flags)
+{
+ struct ipr_ioadl_desc *ioadl = ipr_cmd->i.ioadl;
+ struct ipr_ioadl64_desc *ioadl64 = ipr_cmd->i.ioadl64;
+
+ ipr_cmd->dma_use_sg = 1;
+
+ if (ipr_cmd->ioa_cfg->sis64) {
+ ioadl64->flags = cpu_to_be32(flags);
+ ioadl64->data_len = cpu_to_be32(len);
+ ioadl64->address = cpu_to_be64(dma_addr);
+
+ ipr_cmd->ioarcb.ioadl_len =
+ cpu_to_be32(sizeof(struct ipr_ioadl64_desc));
+ ipr_cmd->ioarcb.data_transfer_length = cpu_to_be32(len);
+ } else {
+ ioadl->flags_and_data_len = cpu_to_be32(flags | len);
+ ioadl->address = cpu_to_be32(dma_addr);
+
+ if (flags == IPR_IOADL_FLAGS_READ_LAST) {
+ ipr_cmd->ioarcb.read_ioadl_len =
+ cpu_to_be32(sizeof(struct ipr_ioadl_desc));
+ ipr_cmd->ioarcb.read_data_transfer_length = cpu_to_be32(len);
+ } else {
+ ipr_cmd->ioarcb.ioadl_len =
+ cpu_to_be32(sizeof(struct ipr_ioadl_desc));
+ ipr_cmd->ioarcb.data_transfer_length = cpu_to_be32(len);
+ }
+ }
+}
+
+/**
+ * ipr_send_blocking_cmd - Send command and sleep on its completion.
+ * @ipr_cmd: ipr command struct
+ * @timeout_func: function to invoke if command times out
+ * @timeout: timeout
+ *
+ * Return value:
+ * none
+ **/
+static void ipr_send_blocking_cmd(struct ipr_cmnd *ipr_cmd,
+ void (*timeout_func) (struct ipr_cmnd *ipr_cmd),
+ u32 timeout)
+{
+ struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
+
+ init_completion(&ipr_cmd->completion);
+ ipr_do_req(ipr_cmd, ipr_internal_cmd_done, timeout_func, timeout);
+
+ spin_unlock_irq(ioa_cfg->host->host_lock);
+ wait_for_completion(&ipr_cmd->completion);
+ spin_lock_irq(ioa_cfg->host->host_lock);
+}
+
+static int ipr_get_hrrq_index(struct ipr_ioa_cfg *ioa_cfg)
+{
+ if (ioa_cfg->hrrq_num == 1)
+ return 0;
+ else
+ return (atomic_add_return(1, &ioa_cfg->hrrq_index) % (ioa_cfg->hrrq_num - 1)) + 1;
+}
+
+/**
+ * ipr_send_hcam - Send an HCAM to the adapter.
+ * @ioa_cfg: ioa config struct
+ * @type: HCAM type
+ * @hostrcb: hostrcb struct
+ *
+ * This function will send a Host Controlled Async command to the adapter.
+ * If HCAMs are currently not allowed to be issued to the adapter, it will
+ * place the hostrcb on the free queue.
+ *
+ * Return value:
+ * none
+ **/
+static void ipr_send_hcam(struct ipr_ioa_cfg *ioa_cfg, u8 type,
+ struct ipr_hostrcb *hostrcb)
+{
+ struct ipr_cmnd *ipr_cmd;
+ struct ipr_ioarcb *ioarcb;
+
+ if (ioa_cfg->hrrq[IPR_INIT_HRRQ].allow_cmds) {
+ ipr_cmd = ipr_get_free_ipr_cmnd(ioa_cfg);
+ list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_pending_q);
+ list_add_tail(&hostrcb->queue, &ioa_cfg->hostrcb_pending_q);
+
+ ipr_cmd->u.hostrcb = hostrcb;
+ ioarcb = &ipr_cmd->ioarcb;
+
+ ioarcb->res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
+ ioarcb->cmd_pkt.request_type = IPR_RQTYPE_HCAM;
+ ioarcb->cmd_pkt.cdb[0] = IPR_HOST_CONTROLLED_ASYNC;
+ ioarcb->cmd_pkt.cdb[1] = type;
+ ioarcb->cmd_pkt.cdb[7] = (sizeof(hostrcb->hcam) >> 8) & 0xff;
+ ioarcb->cmd_pkt.cdb[8] = sizeof(hostrcb->hcam) & 0xff;
+
+ ipr_init_ioadl(ipr_cmd, hostrcb->hostrcb_dma,
+ sizeof(hostrcb->hcam), IPR_IOADL_FLAGS_READ_LAST);
+
+ if (type == IPR_HCAM_CDB_OP_CODE_CONFIG_CHANGE)
+ ipr_cmd->done = ipr_process_ccn;
+ else
+ ipr_cmd->done = ipr_process_error;
+
+ ipr_trc_hook(ipr_cmd, IPR_TRACE_START, IPR_IOA_RES_ADDR);
+
+ ipr_send_command(ipr_cmd);
+ } else {
+ list_add_tail(&hostrcb->queue, &ioa_cfg->hostrcb_free_q);
+ }
+}
+
+/**
+ * ipr_update_ata_class - Update the ata class in the resource entry
+ * @res: resource entry struct
+ * @proto: cfgte device bus protocol value
+ *
+ * Return value:
+ * none
+ **/
+static void ipr_update_ata_class(struct ipr_resource_entry *res, unsigned int proto)
+{
+ switch (proto) {
+ case IPR_PROTO_SATA:
+ case IPR_PROTO_SAS_STP:
+ res->ata_class = ATA_DEV_ATA;
+ break;
+ case IPR_PROTO_SATA_ATAPI:
+ case IPR_PROTO_SAS_STP_ATAPI:
+ res->ata_class = ATA_DEV_ATAPI;
+ break;
+ default:
+ res->ata_class = ATA_DEV_UNKNOWN;
+ break;
+ };
+}
+
+/**
+ * ipr_init_res_entry - Initialize a resource entry struct.
+ * @res: resource entry struct
+ * @cfgtew: config table entry wrapper struct
+ *
+ * Return value:
+ * none
+ **/
+static void ipr_init_res_entry(struct ipr_resource_entry *res,
+ struct ipr_config_table_entry_wrapper *cfgtew)
+{
+ int found = 0;
+ unsigned int proto;
+ struct ipr_ioa_cfg *ioa_cfg = res->ioa_cfg;
+ struct ipr_resource_entry *gscsi_res = NULL;
+
+ res->needs_sync_complete = 0;
+ res->in_erp = 0;
+ res->add_to_ml = 0;
+ res->del_from_ml = 0;
+ res->resetting_device = 0;
+ res->reset_occurred = 0;
+ res->sdev = NULL;
+ res->sata_port = NULL;
+
+ if (ioa_cfg->sis64) {
+ proto = cfgtew->u.cfgte64->proto;
+ res->res_flags = cfgtew->u.cfgte64->res_flags;
+ res->qmodel = IPR_QUEUEING_MODEL64(res);
+ res->type = cfgtew->u.cfgte64->res_type;
+
+ memcpy(res->res_path, &cfgtew->u.cfgte64->res_path,
+ sizeof(res->res_path));
+
+ res->bus = 0;
+ memcpy(&res->dev_lun.scsi_lun, &cfgtew->u.cfgte64->lun,
+ sizeof(res->dev_lun.scsi_lun));
+ res->lun = scsilun_to_int(&res->dev_lun);
+
+ if (res->type == IPR_RES_TYPE_GENERIC_SCSI) {
+ list_for_each_entry(gscsi_res, &ioa_cfg->used_res_q, queue) {
+ if (gscsi_res->dev_id == cfgtew->u.cfgte64->dev_id) {
+ found = 1;
+ res->target = gscsi_res->target;
+ break;
+ }
+ }
+ if (!found) {
+ res->target = find_first_zero_bit(ioa_cfg->target_ids,
+ ioa_cfg->max_devs_supported);
+ set_bit(res->target, ioa_cfg->target_ids);
+ }
+ } else if (res->type == IPR_RES_TYPE_IOAFP) {
+ res->bus = IPR_IOAFP_VIRTUAL_BUS;
+ res->target = 0;
+ } else if (res->type == IPR_RES_TYPE_ARRAY) {
+ res->bus = IPR_ARRAY_VIRTUAL_BUS;
+ res->target = find_first_zero_bit(ioa_cfg->array_ids,
+ ioa_cfg->max_devs_supported);
+ set_bit(res->target, ioa_cfg->array_ids);
+ } else if (res->type == IPR_RES_TYPE_VOLUME_SET) {
+ res->bus = IPR_VSET_VIRTUAL_BUS;
+ res->target = find_first_zero_bit(ioa_cfg->vset_ids,
+ ioa_cfg->max_devs_supported);
+ set_bit(res->target, ioa_cfg->vset_ids);
+ } else {
+ res->target = find_first_zero_bit(ioa_cfg->target_ids,
+ ioa_cfg->max_devs_supported);
+ set_bit(res->target, ioa_cfg->target_ids);
+ }
+ } else {
+ proto = cfgtew->u.cfgte->proto;
+ res->qmodel = IPR_QUEUEING_MODEL(res);
+ res->flags = cfgtew->u.cfgte->flags;
+ if (res->flags & IPR_IS_IOA_RESOURCE)
+ res->type = IPR_RES_TYPE_IOAFP;
+ else
+ res->type = cfgtew->u.cfgte->rsvd_subtype & 0x0f;
+
+ res->bus = cfgtew->u.cfgte->res_addr.bus;
+ res->target = cfgtew->u.cfgte->res_addr.target;
+ res->lun = cfgtew->u.cfgte->res_addr.lun;
+ res->lun_wwn = get_unaligned_be64(cfgtew->u.cfgte->lun_wwn);
+ }
+
+ ipr_update_ata_class(res, proto);
+}
+
+/**
+ * ipr_is_same_device - Determine if two devices are the same.
+ * @res: resource entry struct
+ * @cfgtew: config table entry wrapper struct
+ *
+ * Return value:
+ * 1 if the devices are the same / 0 otherwise
+ **/
+static int ipr_is_same_device(struct ipr_resource_entry *res,
+ struct ipr_config_table_entry_wrapper *cfgtew)
+{
+ if (res->ioa_cfg->sis64) {
+ if (!memcmp(&res->dev_id, &cfgtew->u.cfgte64->dev_id,
+ sizeof(cfgtew->u.cfgte64->dev_id)) &&
+ !memcmp(&res->dev_lun.scsi_lun, &cfgtew->u.cfgte64->lun,
+ sizeof(cfgtew->u.cfgte64->lun))) {
+ return 1;
+ }
+ } else {
+ if (res->bus == cfgtew->u.cfgte->res_addr.bus &&
+ res->target == cfgtew->u.cfgte->res_addr.target &&
+ res->lun == cfgtew->u.cfgte->res_addr.lun)
+ return 1;
+ }
+
+ return 0;
+}
+
+/**
+ * __ipr_format_res_path - Format the resource path for printing.
+ * @res_path: resource path
+ * @buf: buffer
+ * @len: length of buffer provided
+ *
+ * Return value:
+ * pointer to buffer
+ **/
+static char *__ipr_format_res_path(u8 *res_path, char *buffer, int len)
+{
+ int i;
+ char *p = buffer;
+
+ *p = '\0';
+ p += snprintf(p, buffer + len - p, "%02X", res_path[0]);
+ for (i = 1; res_path[i] != 0xff && ((i * 3) < len); i++)
+ p += snprintf(p, buffer + len - p, "-%02X", res_path[i]);
+
+ return buffer;
+}
+
+/**
+ * ipr_format_res_path - Format the resource path for printing.
+ * @ioa_cfg: ioa config struct
+ * @res_path: resource path
+ * @buf: buffer
+ * @len: length of buffer provided
+ *
+ * Return value:
+ * pointer to buffer
+ **/
+static char *ipr_format_res_path(struct ipr_ioa_cfg *ioa_cfg,
+ u8 *res_path, char *buffer, int len)
+{
+ char *p = buffer;
+
+ *p = '\0';
+ p += snprintf(p, buffer + len - p, "%d/", ioa_cfg->host->host_no);
+ __ipr_format_res_path(res_path, p, len - (buffer - p));
+ return buffer;
+}
+
+/**
+ * ipr_update_res_entry - Update the resource entry.
+ * @res: resource entry struct
+ * @cfgtew: config table entry wrapper struct
+ *
+ * Return value:
+ * none
+ **/
+static void ipr_update_res_entry(struct ipr_resource_entry *res,
+ struct ipr_config_table_entry_wrapper *cfgtew)
+{
+ char buffer[IPR_MAX_RES_PATH_LENGTH];
+ unsigned int proto;
+ int new_path = 0;
+
+ if (res->ioa_cfg->sis64) {
+ res->flags = cfgtew->u.cfgte64->flags;
+ res->res_flags = cfgtew->u.cfgte64->res_flags;
+ res->type = cfgtew->u.cfgte64->res_type;
+
+ memcpy(&res->std_inq_data, &cfgtew->u.cfgte64->std_inq_data,
+ sizeof(struct ipr_std_inq_data));
+
+ res->qmodel = IPR_QUEUEING_MODEL64(res);
+ proto = cfgtew->u.cfgte64->proto;
+ res->res_handle = cfgtew->u.cfgte64->res_handle;
+ res->dev_id = cfgtew->u.cfgte64->dev_id;
+
+ memcpy(&res->dev_lun.scsi_lun, &cfgtew->u.cfgte64->lun,
+ sizeof(res->dev_lun.scsi_lun));
+
+ if (memcmp(res->res_path, &cfgtew->u.cfgte64->res_path,
+ sizeof(res->res_path))) {
+ memcpy(res->res_path, &cfgtew->u.cfgte64->res_path,
+ sizeof(res->res_path));
+ new_path = 1;
+ }
+
+ if (res->sdev && new_path)
+ sdev_printk(KERN_INFO, res->sdev, "Resource path: %s\n",
+ ipr_format_res_path(res->ioa_cfg,
+ res->res_path, buffer, sizeof(buffer)));
+ } else {
+ res->flags = cfgtew->u.cfgte->flags;
+ if (res->flags & IPR_IS_IOA_RESOURCE)
+ res->type = IPR_RES_TYPE_IOAFP;
+ else
+ res->type = cfgtew->u.cfgte->rsvd_subtype & 0x0f;
+
+ memcpy(&res->std_inq_data, &cfgtew->u.cfgte->std_inq_data,
+ sizeof(struct ipr_std_inq_data));
+
+ res->qmodel = IPR_QUEUEING_MODEL(res);
+ proto = cfgtew->u.cfgte->proto;
+ res->res_handle = cfgtew->u.cfgte->res_handle;
+ }
+
+ ipr_update_ata_class(res, proto);
+}
+
+/**
+ * ipr_clear_res_target - Clear the bit in the bit map representing the target
+ * for the resource.
+ * @res: resource entry struct
+ * @cfgtew: config table entry wrapper struct
+ *
+ * Return value:
+ * none
+ **/
+static void ipr_clear_res_target(struct ipr_resource_entry *res)
+{
+ struct ipr_resource_entry *gscsi_res = NULL;
+ struct ipr_ioa_cfg *ioa_cfg = res->ioa_cfg;
+
+ if (!ioa_cfg->sis64)
+ return;
+
+ if (res->bus == IPR_ARRAY_VIRTUAL_BUS)
+ clear_bit(res->target, ioa_cfg->array_ids);
+ else if (res->bus == IPR_VSET_VIRTUAL_BUS)
+ clear_bit(res->target, ioa_cfg->vset_ids);
+ else if (res->bus == 0 && res->type == IPR_RES_TYPE_GENERIC_SCSI) {
+ list_for_each_entry(gscsi_res, &ioa_cfg->used_res_q, queue)
+ if (gscsi_res->dev_id == res->dev_id && gscsi_res != res)
+ return;
+ clear_bit(res->target, ioa_cfg->target_ids);
+
+ } else if (res->bus == 0)
+ clear_bit(res->target, ioa_cfg->target_ids);
+}
+
+/**
+ * ipr_handle_config_change - Handle a config change from the adapter
+ * @ioa_cfg: ioa config struct
+ * @hostrcb: hostrcb
+ *
+ * Return value:
+ * none
+ **/
+static void ipr_handle_config_change(struct ipr_ioa_cfg *ioa_cfg,
+ struct ipr_hostrcb *hostrcb)
+{
+ struct ipr_resource_entry *res = NULL;
+ struct ipr_config_table_entry_wrapper cfgtew;
+ __be32 cc_res_handle;
+
+ u32 is_ndn = 1;
+
+ if (ioa_cfg->sis64) {
+ cfgtew.u.cfgte64 = &hostrcb->hcam.u.ccn.u.cfgte64;
+ cc_res_handle = cfgtew.u.cfgte64->res_handle;
+ } else {
+ cfgtew.u.cfgte = &hostrcb->hcam.u.ccn.u.cfgte;
+ cc_res_handle = cfgtew.u.cfgte->res_handle;
+ }
+
+ list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
+ if (res->res_handle == cc_res_handle) {
+ is_ndn = 0;
+ break;
+ }
+ }
+
+ if (is_ndn) {
+ if (list_empty(&ioa_cfg->free_res_q)) {
+ ipr_send_hcam(ioa_cfg,
+ IPR_HCAM_CDB_OP_CODE_CONFIG_CHANGE,
+ hostrcb);
+ return;
+ }
+
+ res = list_entry(ioa_cfg->free_res_q.next,
+ struct ipr_resource_entry, queue);
+
+ list_del(&res->queue);
+ ipr_init_res_entry(res, &cfgtew);
+ list_add_tail(&res->queue, &ioa_cfg->used_res_q);
+ }
+
+ ipr_update_res_entry(res, &cfgtew);
+
+ if (hostrcb->hcam.notify_type == IPR_HOST_RCB_NOTIF_TYPE_REM_ENTRY) {
+ if (res->sdev) {
+ res->del_from_ml = 1;
+ res->res_handle = IPR_INVALID_RES_HANDLE;
+ schedule_work(&ioa_cfg->work_q);
+ } else {
+ ipr_clear_res_target(res);
+ list_move_tail(&res->queue, &ioa_cfg->free_res_q);
+ }
+ } else if (!res->sdev || res->del_from_ml) {
+ res->add_to_ml = 1;
+ schedule_work(&ioa_cfg->work_q);
+ }
+
+ ipr_send_hcam(ioa_cfg, IPR_HCAM_CDB_OP_CODE_CONFIG_CHANGE, hostrcb);
+}
+
+/**
+ * ipr_process_ccn - Op done function for a CCN.
+ * @ipr_cmd: ipr command struct
+ *
+ * This function is the op done function for a configuration
+ * change notification host controlled async from the adapter.
+ *
+ * Return value:
+ * none
+ **/
+static void ipr_process_ccn(struct ipr_cmnd *ipr_cmd)
+{
+ struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
+ struct ipr_hostrcb *hostrcb = ipr_cmd->u.hostrcb;
+ u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
+
+ list_del(&hostrcb->queue);
+ list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
+
+ if (ioasc) {
+ if (ioasc != IPR_IOASC_IOA_WAS_RESET &&
+ ioasc != IPR_IOASC_ABORTED_CMD_TERM_BY_HOST)
+ dev_err(&ioa_cfg->pdev->dev,
+ "Host RCB failed with IOASC: 0x%08X\n", ioasc);
+
+ ipr_send_hcam(ioa_cfg, IPR_HCAM_CDB_OP_CODE_CONFIG_CHANGE, hostrcb);
+ } else {
+ ipr_handle_config_change(ioa_cfg, hostrcb);
+ }
+}
+
+/**
+ * strip_and_pad_whitespace - Strip and pad trailing whitespace.
+ * @i: index into buffer
+ * @buf: string to modify
+ *
+ * This function will strip all trailing whitespace, pad the end
+ * of the string with a single space, and NULL terminate the string.
+ *
+ * Return value:
+ * new length of string
+ **/
+static int strip_and_pad_whitespace(int i, char *buf)
+{
+ while (i && buf[i] == ' ')
+ i--;
+ buf[i+1] = ' ';
+ buf[i+2] = '\0';
+ return i + 2;
+}
+
+/**
+ * ipr_log_vpd_compact - Log the passed extended VPD compactly.
+ * @prefix: string to print at start of printk
+ * @hostrcb: hostrcb pointer
+ * @vpd: vendor/product id/sn struct
+ *
+ * Return value:
+ * none
+ **/
+static void ipr_log_vpd_compact(char *prefix, struct ipr_hostrcb *hostrcb,
+ struct ipr_vpd *vpd)
+{
+ char buffer[IPR_VENDOR_ID_LEN + IPR_PROD_ID_LEN + IPR_SERIAL_NUM_LEN + 3];
+ int i = 0;
+
+ memcpy(buffer, vpd->vpids.vendor_id, IPR_VENDOR_ID_LEN);
+ i = strip_and_pad_whitespace(IPR_VENDOR_ID_LEN - 1, buffer);
+
+ memcpy(&buffer[i], vpd->vpids.product_id, IPR_PROD_ID_LEN);
+ i = strip_and_pad_whitespace(i + IPR_PROD_ID_LEN - 1, buffer);
+
+ memcpy(&buffer[i], vpd->sn, IPR_SERIAL_NUM_LEN);
+ buffer[IPR_SERIAL_NUM_LEN + i] = '\0';
+
+ ipr_hcam_err(hostrcb, "%s VPID/SN: %s\n", prefix, buffer);
+}
+
+/**
+ * ipr_log_vpd - Log the passed VPD to the error log.
+ * @vpd: vendor/product id/sn struct
+ *
+ * Return value:
+ * none
+ **/
+static void ipr_log_vpd(struct ipr_vpd *vpd)
+{
+ char buffer[IPR_VENDOR_ID_LEN + IPR_PROD_ID_LEN
+ + IPR_SERIAL_NUM_LEN];
+
+ memcpy(buffer, vpd->vpids.vendor_id, IPR_VENDOR_ID_LEN);
+ memcpy(buffer + IPR_VENDOR_ID_LEN, vpd->vpids.product_id,
+ IPR_PROD_ID_LEN);
+ buffer[IPR_VENDOR_ID_LEN + IPR_PROD_ID_LEN] = '\0';
+ ipr_err("Vendor/Product ID: %s\n", buffer);
+
+ memcpy(buffer, vpd->sn, IPR_SERIAL_NUM_LEN);
+ buffer[IPR_SERIAL_NUM_LEN] = '\0';
+ ipr_err(" Serial Number: %s\n", buffer);
+}
+
+/**
+ * ipr_log_ext_vpd_compact - Log the passed extended VPD compactly.
+ * @prefix: string to print at start of printk
+ * @hostrcb: hostrcb pointer
+ * @vpd: vendor/product id/sn/wwn struct
+ *
+ * Return value:
+ * none
+ **/
+static void ipr_log_ext_vpd_compact(char *prefix, struct ipr_hostrcb *hostrcb,
+ struct ipr_ext_vpd *vpd)
+{
+ ipr_log_vpd_compact(prefix, hostrcb, &vpd->vpd);
+ ipr_hcam_err(hostrcb, "%s WWN: %08X%08X\n", prefix,
+ be32_to_cpu(vpd->wwid[0]), be32_to_cpu(vpd->wwid[1]));
+}
+
+/**
+ * ipr_log_ext_vpd - Log the passed extended VPD to the error log.
+ * @vpd: vendor/product id/sn/wwn struct
+ *
+ * Return value:
+ * none
+ **/
+static void ipr_log_ext_vpd(struct ipr_ext_vpd *vpd)
+{
+ ipr_log_vpd(&vpd->vpd);
+ ipr_err(" WWN: %08X%08X\n", be32_to_cpu(vpd->wwid[0]),
+ be32_to_cpu(vpd->wwid[1]));
+}
+
+/**
+ * ipr_log_enhanced_cache_error - Log a cache error.
+ * @ioa_cfg: ioa config struct
+ * @hostrcb: hostrcb struct
+ *
+ * Return value:
+ * none
+ **/
+static void ipr_log_enhanced_cache_error(struct ipr_ioa_cfg *ioa_cfg,
+ struct ipr_hostrcb *hostrcb)
+{
+ struct ipr_hostrcb_type_12_error *error;
+
+ if (ioa_cfg->sis64)
+ error = &hostrcb->hcam.u.error64.u.type_12_error;
+ else
+ error = &hostrcb->hcam.u.error.u.type_12_error;
+
+ ipr_err("-----Current Configuration-----\n");
+ ipr_err("Cache Directory Card Information:\n");
+ ipr_log_ext_vpd(&error->ioa_vpd);
+ ipr_err("Adapter Card Information:\n");
+ ipr_log_ext_vpd(&error->cfc_vpd);
+
+ ipr_err("-----Expected Configuration-----\n");
+ ipr_err("Cache Directory Card Information:\n");
+ ipr_log_ext_vpd(&error->ioa_last_attached_to_cfc_vpd);
+ ipr_err("Adapter Card Information:\n");
+ ipr_log_ext_vpd(&error->cfc_last_attached_to_ioa_vpd);
+
+ ipr_err("Additional IOA Data: %08X %08X %08X\n",
+ be32_to_cpu(error->ioa_data[0]),
+ be32_to_cpu(error->ioa_data[1]),
+ be32_to_cpu(error->ioa_data[2]));
+}
+
+/**
+ * ipr_log_cache_error - Log a cache error.
+ * @ioa_cfg: ioa config struct
+ * @hostrcb: hostrcb struct
+ *
+ * Return value:
+ * none
+ **/
+static void ipr_log_cache_error(struct ipr_ioa_cfg *ioa_cfg,
+ struct ipr_hostrcb *hostrcb)
+{
+ struct ipr_hostrcb_type_02_error *error =
+ &hostrcb->hcam.u.error.u.type_02_error;
+
+ ipr_err("-----Current Configuration-----\n");
+ ipr_err("Cache Directory Card Information:\n");
+ ipr_log_vpd(&error->ioa_vpd);
+ ipr_err("Adapter Card Information:\n");
+ ipr_log_vpd(&error->cfc_vpd);
+
+ ipr_err("-----Expected Configuration-----\n");
+ ipr_err("Cache Directory Card Information:\n");
+ ipr_log_vpd(&error->ioa_last_attached_to_cfc_vpd);
+ ipr_err("Adapter Card Information:\n");
+ ipr_log_vpd(&error->cfc_last_attached_to_ioa_vpd);
+
+ ipr_err("Additional IOA Data: %08X %08X %08X\n",
+ be32_to_cpu(error->ioa_data[0]),
+ be32_to_cpu(error->ioa_data[1]),
+ be32_to_cpu(error->ioa_data[2]));
+}
+
+/**
+ * ipr_log_enhanced_config_error - Log a configuration error.
+ * @ioa_cfg: ioa config struct
+ * @hostrcb: hostrcb struct
+ *
+ * Return value:
+ * none
+ **/
+static void ipr_log_enhanced_config_error(struct ipr_ioa_cfg *ioa_cfg,
+ struct ipr_hostrcb *hostrcb)
+{
+ int errors_logged, i;
+ struct ipr_hostrcb_device_data_entry_enhanced *dev_entry;
+ struct ipr_hostrcb_type_13_error *error;
+
+ error = &hostrcb->hcam.u.error.u.type_13_error;
+ errors_logged = be32_to_cpu(error->errors_logged);
+
+ ipr_err("Device Errors Detected/Logged: %d/%d\n",
+ be32_to_cpu(error->errors_detected), errors_logged);
+
+ dev_entry = error->dev;
+
+ for (i = 0; i < errors_logged; i++, dev_entry++) {
+ ipr_err_separator;
+
+ ipr_phys_res_err(ioa_cfg, dev_entry->dev_res_addr, "Device %d", i + 1);
+ ipr_log_ext_vpd(&dev_entry->vpd);
+
+ ipr_err("-----New Device Information-----\n");
+ ipr_log_ext_vpd(&dev_entry->new_vpd);
+
+ ipr_err("Cache Directory Card Information:\n");
+ ipr_log_ext_vpd(&dev_entry->ioa_last_with_dev_vpd);
+
+ ipr_err("Adapter Card Information:\n");
+ ipr_log_ext_vpd(&dev_entry->cfc_last_with_dev_vpd);
+ }
+}
+
+/**
+ * ipr_log_sis64_config_error - Log a device error.
+ * @ioa_cfg: ioa config struct
+ * @hostrcb: hostrcb struct
+ *
+ * Return value:
+ * none
+ **/
+static void ipr_log_sis64_config_error(struct ipr_ioa_cfg *ioa_cfg,
+ struct ipr_hostrcb *hostrcb)
+{
+ int errors_logged, i;
+ struct ipr_hostrcb64_device_data_entry_enhanced *dev_entry;
+ struct ipr_hostrcb_type_23_error *error;
+ char buffer[IPR_MAX_RES_PATH_LENGTH];
+
+ error = &hostrcb->hcam.u.error64.u.type_23_error;
+ errors_logged = be32_to_cpu(error->errors_logged);
+
+ ipr_err("Device Errors Detected/Logged: %d/%d\n",
+ be32_to_cpu(error->errors_detected), errors_logged);
+
+ dev_entry = error->dev;
+
+ for (i = 0; i < errors_logged; i++, dev_entry++) {
+ ipr_err_separator;
+
+ ipr_err("Device %d : %s", i + 1,
+ __ipr_format_res_path(dev_entry->res_path,
+ buffer, sizeof(buffer)));
+ ipr_log_ext_vpd(&dev_entry->vpd);
+
+ ipr_err("-----New Device Information-----\n");
+ ipr_log_ext_vpd(&dev_entry->new_vpd);
+
+ ipr_err("Cache Directory Card Information:\n");
+ ipr_log_ext_vpd(&dev_entry->ioa_last_with_dev_vpd);
+
+ ipr_err("Adapter Card Information:\n");
+ ipr_log_ext_vpd(&dev_entry->cfc_last_with_dev_vpd);
+ }
+}
+
+/**
+ * ipr_log_config_error - Log a configuration error.
+ * @ioa_cfg: ioa config struct
+ * @hostrcb: hostrcb struct
+ *
+ * Return value:
+ * none
+ **/
+static void ipr_log_config_error(struct ipr_ioa_cfg *ioa_cfg,
+ struct ipr_hostrcb *hostrcb)
+{
+ int errors_logged, i;
+ struct ipr_hostrcb_device_data_entry *dev_entry;
+ struct ipr_hostrcb_type_03_error *error;
+
+ error = &hostrcb->hcam.u.error.u.type_03_error;
+ errors_logged = be32_to_cpu(error->errors_logged);
+
+ ipr_err("Device Errors Detected/Logged: %d/%d\n",
+ be32_to_cpu(error->errors_detected), errors_logged);
+
+ dev_entry = error->dev;
+
+ for (i = 0; i < errors_logged; i++, dev_entry++) {
+ ipr_err_separator;
+
+ ipr_phys_res_err(ioa_cfg, dev_entry->dev_res_addr, "Device %d", i + 1);
+ ipr_log_vpd(&dev_entry->vpd);
+
+ ipr_err("-----New Device Information-----\n");
+ ipr_log_vpd(&dev_entry->new_vpd);
+
+ ipr_err("Cache Directory Card Information:\n");
+ ipr_log_vpd(&dev_entry->ioa_last_with_dev_vpd);
+
+ ipr_err("Adapter Card Information:\n");
+ ipr_log_vpd(&dev_entry->cfc_last_with_dev_vpd);
+
+ ipr_err("Additional IOA Data: %08X %08X %08X %08X %08X\n",
+ be32_to_cpu(dev_entry->ioa_data[0]),
+ be32_to_cpu(dev_entry->ioa_data[1]),
+ be32_to_cpu(dev_entry->ioa_data[2]),
+ be32_to_cpu(dev_entry->ioa_data[3]),
+ be32_to_cpu(dev_entry->ioa_data[4]));
+ }
+}
+
+/**
+ * ipr_log_enhanced_array_error - Log an array configuration error.
+ * @ioa_cfg: ioa config struct
+ * @hostrcb: hostrcb struct
+ *
+ * Return value:
+ * none
+ **/
+static void ipr_log_enhanced_array_error(struct ipr_ioa_cfg *ioa_cfg,
+ struct ipr_hostrcb *hostrcb)
+{
+ int i, num_entries;
+ struct ipr_hostrcb_type_14_error *error;
+ struct ipr_hostrcb_array_data_entry_enhanced *array_entry;
+ const u8 zero_sn[IPR_SERIAL_NUM_LEN] = { [0 ... IPR_SERIAL_NUM_LEN-1] = '0' };
+
+ error = &hostrcb->hcam.u.error.u.type_14_error;
+
+ ipr_err_separator;
+
+ ipr_err("RAID %s Array Configuration: %d:%d:%d:%d\n",
+ error->protection_level,
+ ioa_cfg->host->host_no,
+ error->last_func_vset_res_addr.bus,
+ error->last_func_vset_res_addr.target,
+ error->last_func_vset_res_addr.lun);
+
+ ipr_err_separator;
+
+ array_entry = error->array_member;
+ num_entries = min_t(u32, be32_to_cpu(error->num_entries),
+ ARRAY_SIZE(error->array_member));
+
+ for (i = 0; i < num_entries; i++, array_entry++) {
+ if (!memcmp(array_entry->vpd.vpd.sn, zero_sn, IPR_SERIAL_NUM_LEN))
+ continue;
+
+ if (be32_to_cpu(error->exposed_mode_adn) == i)
+ ipr_err("Exposed Array Member %d:\n", i);
+ else
+ ipr_err("Array Member %d:\n", i);
+
+ ipr_log_ext_vpd(&array_entry->vpd);
+ ipr_phys_res_err(ioa_cfg, array_entry->dev_res_addr, "Current Location");
+ ipr_phys_res_err(ioa_cfg, array_entry->expected_dev_res_addr,
+ "Expected Location");
+
+ ipr_err_separator;
+ }
+}
+
+/**
+ * ipr_log_array_error - Log an array configuration error.
+ * @ioa_cfg: ioa config struct
+ * @hostrcb: hostrcb struct
+ *
+ * Return value:
+ * none
+ **/
+static void ipr_log_array_error(struct ipr_ioa_cfg *ioa_cfg,
+ struct ipr_hostrcb *hostrcb)
+{
+ int i;
+ struct ipr_hostrcb_type_04_error *error;
+ struct ipr_hostrcb_array_data_entry *array_entry;
+ const u8 zero_sn[IPR_SERIAL_NUM_LEN] = { [0 ... IPR_SERIAL_NUM_LEN-1] = '0' };
+
+ error = &hostrcb->hcam.u.error.u.type_04_error;
+
+ ipr_err_separator;
+
+ ipr_err("RAID %s Array Configuration: %d:%d:%d:%d\n",
+ error->protection_level,
+ ioa_cfg->host->host_no,
+ error->last_func_vset_res_addr.bus,
+ error->last_func_vset_res_addr.target,
+ error->last_func_vset_res_addr.lun);
+
+ ipr_err_separator;
+
+ array_entry = error->array_member;
+
+ for (i = 0; i < 18; i++) {
+ if (!memcmp(array_entry->vpd.sn, zero_sn, IPR_SERIAL_NUM_LEN))
+ continue;
+
+ if (be32_to_cpu(error->exposed_mode_adn) == i)
+ ipr_err("Exposed Array Member %d:\n", i);
+ else
+ ipr_err("Array Member %d:\n", i);
+
+ ipr_log_vpd(&array_entry->vpd);
+
+ ipr_phys_res_err(ioa_cfg, array_entry->dev_res_addr, "Current Location");
+ ipr_phys_res_err(ioa_cfg, array_entry->expected_dev_res_addr,
+ "Expected Location");
+
+ ipr_err_separator;
+
+ if (i == 9)
+ array_entry = error->array_member2;
+ else
+ array_entry++;
+ }
+}
+
+/**
+ * ipr_log_hex_data - Log additional hex IOA error data.
+ * @ioa_cfg: ioa config struct
+ * @data: IOA error data
+ * @len: data length
+ *
+ * Return value:
+ * none
+ **/
+static void ipr_log_hex_data(struct ipr_ioa_cfg *ioa_cfg, u32 *data, int len)
+{
+ int i;
+
+ if (len == 0)
+ return;
+
+ if (ioa_cfg->log_level <= IPR_DEFAULT_LOG_LEVEL)
+ len = min_t(int, len, IPR_DEFAULT_MAX_ERROR_DUMP);
+
+ for (i = 0; i < len / 4; i += 4) {
+ ipr_err("%08X: %08X %08X %08X %08X\n", i*4,
+ be32_to_cpu(data[i]),
+ be32_to_cpu(data[i+1]),
+ be32_to_cpu(data[i+2]),
+ be32_to_cpu(data[i+3]));
+ }
+}
+
+/**
+ * ipr_log_enhanced_dual_ioa_error - Log an enhanced dual adapter error.
+ * @ioa_cfg: ioa config struct
+ * @hostrcb: hostrcb struct
+ *
+ * Return value:
+ * none
+ **/
+static void ipr_log_enhanced_dual_ioa_error(struct ipr_ioa_cfg *ioa_cfg,
+ struct ipr_hostrcb *hostrcb)
+{
+ struct ipr_hostrcb_type_17_error *error;
+
+ if (ioa_cfg->sis64)
+ error = &hostrcb->hcam.u.error64.u.type_17_error;
+ else
+ error = &hostrcb->hcam.u.error.u.type_17_error;
+
+ error->failure_reason[sizeof(error->failure_reason) - 1] = '\0';
+ strim(error->failure_reason);
+
+ ipr_hcam_err(hostrcb, "%s [PRC: %08X]\n", error->failure_reason,
+ be32_to_cpu(hostrcb->hcam.u.error.prc));
+ ipr_log_ext_vpd_compact("Remote IOA", hostrcb, &error->vpd);
+ ipr_log_hex_data(ioa_cfg, error->data,
+ be32_to_cpu(hostrcb->hcam.length) -
+ (offsetof(struct ipr_hostrcb_error, u) +
+ offsetof(struct ipr_hostrcb_type_17_error, data)));
+}
+
+/**
+ * ipr_log_dual_ioa_error - Log a dual adapter error.
+ * @ioa_cfg: ioa config struct
+ * @hostrcb: hostrcb struct
+ *
+ * Return value:
+ * none
+ **/
+static void ipr_log_dual_ioa_error(struct ipr_ioa_cfg *ioa_cfg,
+ struct ipr_hostrcb *hostrcb)
+{
+ struct ipr_hostrcb_type_07_error *error;
+
+ error = &hostrcb->hcam.u.error.u.type_07_error;
+ error->failure_reason[sizeof(error->failure_reason) - 1] = '\0';
+ strim(error->failure_reason);
+
+ ipr_hcam_err(hostrcb, "%s [PRC: %08X]\n", error->failure_reason,
+ be32_to_cpu(hostrcb->hcam.u.error.prc));
+ ipr_log_vpd_compact("Remote IOA", hostrcb, &error->vpd);
+ ipr_log_hex_data(ioa_cfg, error->data,
+ be32_to_cpu(hostrcb->hcam.length) -
+ (offsetof(struct ipr_hostrcb_error, u) +
+ offsetof(struct ipr_hostrcb_type_07_error, data)));
+}
+
+static const struct {
+ u8 active;
+ char *desc;
+} path_active_desc[] = {
+ { IPR_PATH_NO_INFO, "Path" },
+ { IPR_PATH_ACTIVE, "Active path" },
+ { IPR_PATH_NOT_ACTIVE, "Inactive path" }
+};
+
+static const struct {
+ u8 state;
+ char *desc;
+} path_state_desc[] = {
+ { IPR_PATH_STATE_NO_INFO, "has no path state information available" },
+ { IPR_PATH_HEALTHY, "is healthy" },
+ { IPR_PATH_DEGRADED, "is degraded" },
+ { IPR_PATH_FAILED, "is failed" }
+};
+
+/**
+ * ipr_log_fabric_path - Log a fabric path error
+ * @hostrcb: hostrcb struct
+ * @fabric: fabric descriptor
+ *
+ * Return value:
+ * none
+ **/
+static void ipr_log_fabric_path(struct ipr_hostrcb *hostrcb,
+ struct ipr_hostrcb_fabric_desc *fabric)
+{
+ int i, j;
+ u8 path_state = fabric->path_state;
+ u8 active = path_state & IPR_PATH_ACTIVE_MASK;
+ u8 state = path_state & IPR_PATH_STATE_MASK;
+
+ for (i = 0; i < ARRAY_SIZE(path_active_desc); i++) {
+ if (path_active_desc[i].active != active)
+ continue;
+
+ for (j = 0; j < ARRAY_SIZE(path_state_desc); j++) {
+ if (path_state_desc[j].state != state)
+ continue;
+
+ if (fabric->cascaded_expander == 0xff && fabric->phy == 0xff) {
+ ipr_hcam_err(hostrcb, "%s %s: IOA Port=%d\n",
+ path_active_desc[i].desc, path_state_desc[j].desc,
+ fabric->ioa_port);
+ } else if (fabric->cascaded_expander == 0xff) {
+ ipr_hcam_err(hostrcb, "%s %s: IOA Port=%d, Phy=%d\n",
+ path_active_desc[i].desc, path_state_desc[j].desc,
+ fabric->ioa_port, fabric->phy);
+ } else if (fabric->phy == 0xff) {
+ ipr_hcam_err(hostrcb, "%s %s: IOA Port=%d, Cascade=%d\n",
+ path_active_desc[i].desc, path_state_desc[j].desc,
+ fabric->ioa_port, fabric->cascaded_expander);
+ } else {
+ ipr_hcam_err(hostrcb, "%s %s: IOA Port=%d, Cascade=%d, Phy=%d\n",
+ path_active_desc[i].desc, path_state_desc[j].desc,
+ fabric->ioa_port, fabric->cascaded_expander, fabric->phy);
+ }
+ return;
+ }
+ }
+
+ ipr_err("Path state=%02X IOA Port=%d Cascade=%d Phy=%d\n", path_state,
+ fabric->ioa_port, fabric->cascaded_expander, fabric->phy);
+}
+
+/**
+ * ipr_log64_fabric_path - Log a fabric path error
+ * @hostrcb: hostrcb struct
+ * @fabric: fabric descriptor
+ *
+ * Return value:
+ * none
+ **/
+static void ipr_log64_fabric_path(struct ipr_hostrcb *hostrcb,
+ struct ipr_hostrcb64_fabric_desc *fabric)
+{
+ int i, j;
+ u8 path_state = fabric->path_state;
+ u8 active = path_state & IPR_PATH_ACTIVE_MASK;
+ u8 state = path_state & IPR_PATH_STATE_MASK;
+ char buffer[IPR_MAX_RES_PATH_LENGTH];
+
+ for (i = 0; i < ARRAY_SIZE(path_active_desc); i++) {
+ if (path_active_desc[i].active != active)
+ continue;
+
+ for (j = 0; j < ARRAY_SIZE(path_state_desc); j++) {
+ if (path_state_desc[j].state != state)
+ continue;
+
+ ipr_hcam_err(hostrcb, "%s %s: Resource Path=%s\n",
+ path_active_desc[i].desc, path_state_desc[j].desc,
+ ipr_format_res_path(hostrcb->ioa_cfg,
+ fabric->res_path,
+ buffer, sizeof(buffer)));
+ return;
+ }
+ }
+
+ ipr_err("Path state=%02X Resource Path=%s\n", path_state,
+ ipr_format_res_path(hostrcb->ioa_cfg, fabric->res_path,
+ buffer, sizeof(buffer)));
+}
+
+static const struct {
+ u8 type;
+ char *desc;
+} path_type_desc[] = {
+ { IPR_PATH_CFG_IOA_PORT, "IOA port" },
+ { IPR_PATH_CFG_EXP_PORT, "Expander port" },
+ { IPR_PATH_CFG_DEVICE_PORT, "Device port" },
+ { IPR_PATH_CFG_DEVICE_LUN, "Device LUN" }
+};
+
+static const struct {
+ u8 status;
+ char *desc;
+} path_status_desc[] = {
+ { IPR_PATH_CFG_NO_PROB, "Functional" },
+ { IPR_PATH_CFG_DEGRADED, "Degraded" },
+ { IPR_PATH_CFG_FAILED, "Failed" },
+ { IPR_PATH_CFG_SUSPECT, "Suspect" },
+ { IPR_PATH_NOT_DETECTED, "Missing" },
+ { IPR_PATH_INCORRECT_CONN, "Incorrectly connected" }
+};
+
+static const char *link_rate[] = {
+ "unknown",
+ "disabled",
+ "phy reset problem",
+ "spinup hold",
+ "port selector",
+ "unknown",
+ "unknown",
+ "unknown",
+ "1.5Gbps",
+ "3.0Gbps",
+ "unknown",
+ "unknown",
+ "unknown",
+ "unknown",
+ "unknown",
+ "unknown"
+};
+
+/**
+ * ipr_log_path_elem - Log a fabric path element.
+ * @hostrcb: hostrcb struct
+ * @cfg: fabric path element struct
+ *
+ * Return value:
+ * none
+ **/
+static void ipr_log_path_elem(struct ipr_hostrcb *hostrcb,
+ struct ipr_hostrcb_config_element *cfg)
+{
+ int i, j;
+ u8 type = cfg->type_status & IPR_PATH_CFG_TYPE_MASK;
+ u8 status = cfg->type_status & IPR_PATH_CFG_STATUS_MASK;
+
+ if (type == IPR_PATH_CFG_NOT_EXIST)
+ return;
+
+ for (i = 0; i < ARRAY_SIZE(path_type_desc); i++) {
+ if (path_type_desc[i].type != type)
+ continue;
+
+ for (j = 0; j < ARRAY_SIZE(path_status_desc); j++) {
+ if (path_status_desc[j].status != status)
+ continue;
+
+ if (type == IPR_PATH_CFG_IOA_PORT) {
+ ipr_hcam_err(hostrcb, "%s %s: Phy=%d, Link rate=%s, WWN=%08X%08X\n",
+ path_status_desc[j].desc, path_type_desc[i].desc,
+ cfg->phy, link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK],
+ be32_to_cpu(cfg->wwid[0]), be32_to_cpu(cfg->wwid[1]));
+ } else {
+ if (cfg->cascaded_expander == 0xff && cfg->phy == 0xff) {
+ ipr_hcam_err(hostrcb, "%s %s: Link rate=%s, WWN=%08X%08X\n",
+ path_status_desc[j].desc, path_type_desc[i].desc,
+ link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK],
+ be32_to_cpu(cfg->wwid[0]), be32_to_cpu(cfg->wwid[1]));
+ } else if (cfg->cascaded_expander == 0xff) {
+ ipr_hcam_err(hostrcb, "%s %s: Phy=%d, Link rate=%s, "
+ "WWN=%08X%08X\n", path_status_desc[j].desc,
+ path_type_desc[i].desc, cfg->phy,
+ link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK],
+ be32_to_cpu(cfg->wwid[0]), be32_to_cpu(cfg->wwid[1]));
+ } else if (cfg->phy == 0xff) {
+ ipr_hcam_err(hostrcb, "%s %s: Cascade=%d, Link rate=%s, "
+ "WWN=%08X%08X\n", path_status_desc[j].desc,
+ path_type_desc[i].desc, cfg->cascaded_expander,
+ link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK],
+ be32_to_cpu(cfg->wwid[0]), be32_to_cpu(cfg->wwid[1]));
+ } else {
+ ipr_hcam_err(hostrcb, "%s %s: Cascade=%d, Phy=%d, Link rate=%s "
+ "WWN=%08X%08X\n", path_status_desc[j].desc,
+ path_type_desc[i].desc, cfg->cascaded_expander, cfg->phy,
+ link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK],
+ be32_to_cpu(cfg->wwid[0]), be32_to_cpu(cfg->wwid[1]));
+ }
+ }
+ return;
+ }
+ }
+
+ ipr_hcam_err(hostrcb, "Path element=%02X: Cascade=%d Phy=%d Link rate=%s "
+ "WWN=%08X%08X\n", cfg->type_status, cfg->cascaded_expander, cfg->phy,
+ link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK],
+ be32_to_cpu(cfg->wwid[0]), be32_to_cpu(cfg->wwid[1]));
+}
+
+/**
+ * ipr_log64_path_elem - Log a fabric path element.
+ * @hostrcb: hostrcb struct
+ * @cfg: fabric path element struct
+ *
+ * Return value:
+ * none
+ **/
+static void ipr_log64_path_elem(struct ipr_hostrcb *hostrcb,
+ struct ipr_hostrcb64_config_element *cfg)
+{
+ int i, j;
+ u8 desc_id = cfg->descriptor_id & IPR_DESCRIPTOR_MASK;
+ u8 type = cfg->type_status & IPR_PATH_CFG_TYPE_MASK;
+ u8 status = cfg->type_status & IPR_PATH_CFG_STATUS_MASK;
+ char buffer[IPR_MAX_RES_PATH_LENGTH];
+
+ if (type == IPR_PATH_CFG_NOT_EXIST || desc_id != IPR_DESCRIPTOR_SIS64)
+ return;
+
+ for (i = 0; i < ARRAY_SIZE(path_type_desc); i++) {
+ if (path_type_desc[i].type != type)
+ continue;
+
+ for (j = 0; j < ARRAY_SIZE(path_status_desc); j++) {
+ if (path_status_desc[j].status != status)
+ continue;
+
+ ipr_hcam_err(hostrcb, "%s %s: Resource Path=%s, Link rate=%s, WWN=%08X%08X\n",
+ path_status_desc[j].desc, path_type_desc[i].desc,
+ ipr_format_res_path(hostrcb->ioa_cfg,
+ cfg->res_path, buffer, sizeof(buffer)),
+ link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK],
+ be32_to_cpu(cfg->wwid[0]),
+ be32_to_cpu(cfg->wwid[1]));
+ return;
+ }
+ }
+ ipr_hcam_err(hostrcb, "Path element=%02X: Resource Path=%s, Link rate=%s "
+ "WWN=%08X%08X\n", cfg->type_status,
+ ipr_format_res_path(hostrcb->ioa_cfg,
+ cfg->res_path, buffer, sizeof(buffer)),
+ link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK],
+ be32_to_cpu(cfg->wwid[0]), be32_to_cpu(cfg->wwid[1]));
+}
+
+/**
+ * ipr_log_fabric_error - Log a fabric error.
+ * @ioa_cfg: ioa config struct
+ * @hostrcb: hostrcb struct
+ *
+ * Return value:
+ * none
+ **/
+static void ipr_log_fabric_error(struct ipr_ioa_cfg *ioa_cfg,
+ struct ipr_hostrcb *hostrcb)
+{
+ struct ipr_hostrcb_type_20_error *error;
+ struct ipr_hostrcb_fabric_desc *fabric;
+ struct ipr_hostrcb_config_element *cfg;
+ int i, add_len;
+
+ error = &hostrcb->hcam.u.error.u.type_20_error;
+ error->failure_reason[sizeof(error->failure_reason) - 1] = '\0';
+ ipr_hcam_err(hostrcb, "%s\n", error->failure_reason);
+
+ add_len = be32_to_cpu(hostrcb->hcam.length) -
+ (offsetof(struct ipr_hostrcb_error, u) +
+ offsetof(struct ipr_hostrcb_type_20_error, desc));
+
+ for (i = 0, fabric = error->desc; i < error->num_entries; i++) {
+ ipr_log_fabric_path(hostrcb, fabric);
+ for_each_fabric_cfg(fabric, cfg)
+ ipr_log_path_elem(hostrcb, cfg);
+
+ add_len -= be16_to_cpu(fabric->length);
+ fabric = (struct ipr_hostrcb_fabric_desc *)
+ ((unsigned long)fabric + be16_to_cpu(fabric->length));
+ }
+
+ ipr_log_hex_data(ioa_cfg, (u32 *)fabric, add_len);
+}
+
+/**
+ * ipr_log_sis64_array_error - Log a sis64 array error.
+ * @ioa_cfg: ioa config struct
+ * @hostrcb: hostrcb struct
+ *
+ * Return value:
+ * none
+ **/
+static void ipr_log_sis64_array_error(struct ipr_ioa_cfg *ioa_cfg,
+ struct ipr_hostrcb *hostrcb)
+{
+ int i, num_entries;
+ struct ipr_hostrcb_type_24_error *error;
+ struct ipr_hostrcb64_array_data_entry *array_entry;
+ char buffer[IPR_MAX_RES_PATH_LENGTH];
+ const u8 zero_sn[IPR_SERIAL_NUM_LEN] = { [0 ... IPR_SERIAL_NUM_LEN-1] = '0' };
+
+ error = &hostrcb->hcam.u.error64.u.type_24_error;
+
+ ipr_err_separator;
+
+ ipr_err("RAID %s Array Configuration: %s\n",
+ error->protection_level,
+ ipr_format_res_path(ioa_cfg, error->last_res_path,
+ buffer, sizeof(buffer)));
+
+ ipr_err_separator;
+
+ array_entry = error->array_member;
+ num_entries = min_t(u32, error->num_entries,
+ ARRAY_SIZE(error->array_member));
+
+ for (i = 0; i < num_entries; i++, array_entry++) {
+
+ if (!memcmp(array_entry->vpd.vpd.sn, zero_sn, IPR_SERIAL_NUM_LEN))
+ continue;
+
+ if (error->exposed_mode_adn == i)
+ ipr_err("Exposed Array Member %d:\n", i);
+ else
+ ipr_err("Array Member %d:\n", i);
+
+ ipr_err("Array Member %d:\n", i);
+ ipr_log_ext_vpd(&array_entry->vpd);
+ ipr_err("Current Location: %s\n",
+ ipr_format_res_path(ioa_cfg, array_entry->res_path,
+ buffer, sizeof(buffer)));
+ ipr_err("Expected Location: %s\n",
+ ipr_format_res_path(ioa_cfg,
+ array_entry->expected_res_path,
+ buffer, sizeof(buffer)));
+
+ ipr_err_separator;
+ }
+}
+
+/**
+ * ipr_log_sis64_fabric_error - Log a sis64 fabric error.
+ * @ioa_cfg: ioa config struct
+ * @hostrcb: hostrcb struct
+ *
+ * Return value:
+ * none
+ **/
+static void ipr_log_sis64_fabric_error(struct ipr_ioa_cfg *ioa_cfg,
+ struct ipr_hostrcb *hostrcb)
+{
+ struct ipr_hostrcb_type_30_error *error;
+ struct ipr_hostrcb64_fabric_desc *fabric;
+ struct ipr_hostrcb64_config_element *cfg;
+ int i, add_len;
+
+ error = &hostrcb->hcam.u.error64.u.type_30_error;
+
+ error->failure_reason[sizeof(error->failure_reason) - 1] = '\0';
+ ipr_hcam_err(hostrcb, "%s\n", error->failure_reason);
+
+ add_len = be32_to_cpu(hostrcb->hcam.length) -
+ (offsetof(struct ipr_hostrcb64_error, u) +
+ offsetof(struct ipr_hostrcb_type_30_error, desc));
+
+ for (i = 0, fabric = error->desc; i < error->num_entries; i++) {
+ ipr_log64_fabric_path(hostrcb, fabric);
+ for_each_fabric_cfg(fabric, cfg)
+ ipr_log64_path_elem(hostrcb, cfg);
+
+ add_len -= be16_to_cpu(fabric->length);
+ fabric = (struct ipr_hostrcb64_fabric_desc *)
+ ((unsigned long)fabric + be16_to_cpu(fabric->length));
+ }
+
+ ipr_log_hex_data(ioa_cfg, (u32 *)fabric, add_len);
+}
+
+/**
+ * ipr_log_generic_error - Log an adapter error.
+ * @ioa_cfg: ioa config struct
+ * @hostrcb: hostrcb struct
+ *
+ * Return value:
+ * none
+ **/
+static void ipr_log_generic_error(struct ipr_ioa_cfg *ioa_cfg,
+ struct ipr_hostrcb *hostrcb)
+{
+ ipr_log_hex_data(ioa_cfg, hostrcb->hcam.u.raw.data,
+ be32_to_cpu(hostrcb->hcam.length));
+}
+
+/**
+ * ipr_log_sis64_device_error - Log a cache error.
+ * @ioa_cfg: ioa config struct
+ * @hostrcb: hostrcb struct
+ *
+ * Return value:
+ * none
+ **/
+static void ipr_log_sis64_device_error(struct ipr_ioa_cfg *ioa_cfg,
+ struct ipr_hostrcb *hostrcb)
+{
+ struct ipr_hostrcb_type_21_error *error;
+ char buffer[IPR_MAX_RES_PATH_LENGTH];
+
+ error = &hostrcb->hcam.u.error64.u.type_21_error;
+
+ ipr_err("-----Failing Device Information-----\n");
+ ipr_err("World Wide Unique ID: %08X%08X%08X%08X\n",
+ be32_to_cpu(error->wwn[0]), be32_to_cpu(error->wwn[1]),
+ be32_to_cpu(error->wwn[2]), be32_to_cpu(error->wwn[3]));
+ ipr_err("Device Resource Path: %s\n",
+ __ipr_format_res_path(error->res_path,
+ buffer, sizeof(buffer)));
+ error->primary_problem_desc[sizeof(error->primary_problem_desc) - 1] = '\0';
+ error->second_problem_desc[sizeof(error->second_problem_desc) - 1] = '\0';
+ ipr_err("Primary Problem Description: %s\n", error->primary_problem_desc);
+ ipr_err("Secondary Problem Description: %s\n", error->second_problem_desc);
+ ipr_err("SCSI Sense Data:\n");
+ ipr_log_hex_data(ioa_cfg, error->sense_data, sizeof(error->sense_data));
+ ipr_err("SCSI Command Descriptor Block: \n");
+ ipr_log_hex_data(ioa_cfg, error->cdb, sizeof(error->cdb));
+
+ ipr_err("Additional IOA Data:\n");
+ ipr_log_hex_data(ioa_cfg, error->ioa_data, be32_to_cpu(error->length_of_error));
+}
+
+/**
+ * ipr_get_error - Find the specfied IOASC in the ipr_error_table.
+ * @ioasc: IOASC
+ *
+ * This function will return the index of into the ipr_error_table
+ * for the specified IOASC. If the IOASC is not in the table,
+ * 0 will be returned, which points to the entry used for unknown errors.
+ *
+ * Return value:
+ * index into the ipr_error_table
+ **/
+static u32 ipr_get_error(u32 ioasc)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(ipr_error_table); i++)
+ if (ipr_error_table[i].ioasc == (ioasc & IPR_IOASC_IOASC_MASK))
+ return i;
+
+ return 0;
+}
+
+/**
+ * ipr_handle_log_data - Log an adapter error.
+ * @ioa_cfg: ioa config struct
+ * @hostrcb: hostrcb struct
+ *
+ * This function logs an adapter error to the system.
+ *
+ * Return value:
+ * none
+ **/
+static void ipr_handle_log_data(struct ipr_ioa_cfg *ioa_cfg,
+ struct ipr_hostrcb *hostrcb)
+{
+ u32 ioasc;
+ int error_index;
+ struct ipr_hostrcb_type_21_error *error;
+
+ if (hostrcb->hcam.notify_type != IPR_HOST_RCB_NOTIF_TYPE_ERROR_LOG_ENTRY)
+ return;
+
+ if (hostrcb->hcam.notifications_lost == IPR_HOST_RCB_NOTIFICATIONS_LOST)
+ dev_err(&ioa_cfg->pdev->dev, "Error notifications lost\n");
+
+ if (ioa_cfg->sis64)
+ ioasc = be32_to_cpu(hostrcb->hcam.u.error64.fd_ioasc);
+ else
+ ioasc = be32_to_cpu(hostrcb->hcam.u.error.fd_ioasc);
+
+ if (!ioa_cfg->sis64 && (ioasc == IPR_IOASC_BUS_WAS_RESET ||
+ ioasc == IPR_IOASC_BUS_WAS_RESET_BY_OTHER)) {
+ /* Tell the midlayer we had a bus reset so it will handle the UA properly */
+ scsi_report_bus_reset(ioa_cfg->host,
+ hostrcb->hcam.u.error.fd_res_addr.bus);
+ }
+
+ error_index = ipr_get_error(ioasc);
+
+ if (!ipr_error_table[error_index].log_hcam)
+ return;
+
+ if (ioasc == IPR_IOASC_HW_CMD_FAILED &&
+ hostrcb->hcam.overlay_id == IPR_HOST_RCB_OVERLAY_ID_21) {
+ error = &hostrcb->hcam.u.error64.u.type_21_error;
+
+ if (((be32_to_cpu(error->sense_data[0]) & 0x0000ff00) >> 8) == ILLEGAL_REQUEST &&
+ ioa_cfg->log_level <= IPR_DEFAULT_LOG_LEVEL)
+ return;
+ }
+
+ ipr_hcam_err(hostrcb, "%s\n", ipr_error_table[error_index].error);
+
+ /* Set indication we have logged an error */
+ ioa_cfg->errors_logged++;
+
+ if (ioa_cfg->log_level < ipr_error_table[error_index].log_hcam)
+ return;
+ if (be32_to_cpu(hostrcb->hcam.length) > sizeof(hostrcb->hcam.u.raw))
+ hostrcb->hcam.length = cpu_to_be32(sizeof(hostrcb->hcam.u.raw));
+
+ switch (hostrcb->hcam.overlay_id) {
+ case IPR_HOST_RCB_OVERLAY_ID_2:
+ ipr_log_cache_error(ioa_cfg, hostrcb);
+ break;
+ case IPR_HOST_RCB_OVERLAY_ID_3:
+ ipr_log_config_error(ioa_cfg, hostrcb);
+ break;
+ case IPR_HOST_RCB_OVERLAY_ID_4:
+ case IPR_HOST_RCB_OVERLAY_ID_6:
+ ipr_log_array_error(ioa_cfg, hostrcb);
+ break;
+ case IPR_HOST_RCB_OVERLAY_ID_7:
+ ipr_log_dual_ioa_error(ioa_cfg, hostrcb);
+ break;
+ case IPR_HOST_RCB_OVERLAY_ID_12:
+ ipr_log_enhanced_cache_error(ioa_cfg, hostrcb);
+ break;
+ case IPR_HOST_RCB_OVERLAY_ID_13:
+ ipr_log_enhanced_config_error(ioa_cfg, hostrcb);
+ break;
+ case IPR_HOST_RCB_OVERLAY_ID_14:
+ case IPR_HOST_RCB_OVERLAY_ID_16:
+ ipr_log_enhanced_array_error(ioa_cfg, hostrcb);
+ break;
+ case IPR_HOST_RCB_OVERLAY_ID_17:
+ ipr_log_enhanced_dual_ioa_error(ioa_cfg, hostrcb);
+ break;
+ case IPR_HOST_RCB_OVERLAY_ID_20:
+ ipr_log_fabric_error(ioa_cfg, hostrcb);
+ break;
+ case IPR_HOST_RCB_OVERLAY_ID_21:
+ ipr_log_sis64_device_error(ioa_cfg, hostrcb);
+ break;
+ case IPR_HOST_RCB_OVERLAY_ID_23:
+ ipr_log_sis64_config_error(ioa_cfg, hostrcb);
+ break;
+ case IPR_HOST_RCB_OVERLAY_ID_24:
+ case IPR_HOST_RCB_OVERLAY_ID_26:
+ ipr_log_sis64_array_error(ioa_cfg, hostrcb);
+ break;
+ case IPR_HOST_RCB_OVERLAY_ID_30:
+ ipr_log_sis64_fabric_error(ioa_cfg, hostrcb);
+ break;
+ case IPR_HOST_RCB_OVERLAY_ID_1:
+ case IPR_HOST_RCB_OVERLAY_ID_DEFAULT:
+ default:
+ ipr_log_generic_error(ioa_cfg, hostrcb);
+ break;
+ }
+}
+
+/**
+ * ipr_process_error - Op done function for an adapter error log.
+ * @ipr_cmd: ipr command struct
+ *
+ * This function is the op done function for an error log host
+ * controlled async from the adapter. It will log the error and
+ * send the HCAM back to the adapter.
+ *
+ * Return value:
+ * none
+ **/
+static void ipr_process_error(struct ipr_cmnd *ipr_cmd)
+{
+ struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
+ struct ipr_hostrcb *hostrcb = ipr_cmd->u.hostrcb;
+ u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
+ u32 fd_ioasc;
+
+ if (ioa_cfg->sis64)
+ fd_ioasc = be32_to_cpu(hostrcb->hcam.u.error64.fd_ioasc);
+ else
+ fd_ioasc = be32_to_cpu(hostrcb->hcam.u.error.fd_ioasc);
+
+ list_del(&hostrcb->queue);
+ list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
+
+ if (!ioasc) {
+ ipr_handle_log_data(ioa_cfg, hostrcb);
+ if (fd_ioasc == IPR_IOASC_NR_IOA_RESET_REQUIRED)
+ ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_ABBREV);
+ } else if (ioasc != IPR_IOASC_IOA_WAS_RESET &&
+ ioasc != IPR_IOASC_ABORTED_CMD_TERM_BY_HOST) {
+ dev_err(&ioa_cfg->pdev->dev,
+ "Host RCB failed with IOASC: 0x%08X\n", ioasc);
+ }
+
+ ipr_send_hcam(ioa_cfg, IPR_HCAM_CDB_OP_CODE_LOG_DATA, hostrcb);
+}
+
+/**
+ * ipr_timeout - An internally generated op has timed out.
+ * @ipr_cmd: ipr command struct
+ *
+ * This function blocks host requests and initiates an
+ * adapter reset.
+ *
+ * Return value:
+ * none
+ **/
+static void ipr_timeout(struct ipr_cmnd *ipr_cmd)
+{
+ unsigned long lock_flags = 0;
+ struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
+
+ ENTER;
+ spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
+
+ ioa_cfg->errors_logged++;
+ dev_err(&ioa_cfg->pdev->dev,
+ "Adapter being reset due to command timeout.\n");
+
+ if (WAIT_FOR_DUMP == ioa_cfg->sdt_state)
+ ioa_cfg->sdt_state = GET_DUMP;
+
+ if (!ioa_cfg->in_reset_reload || ioa_cfg->reset_cmd == ipr_cmd)
+ ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
+
+ spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
+ LEAVE;
+}
+
+/**
+ * ipr_oper_timeout - Adapter timed out transitioning to operational
+ * @ipr_cmd: ipr command struct
+ *
+ * This function blocks host requests and initiates an
+ * adapter reset.
+ *
+ * Return value:
+ * none
+ **/
+static void ipr_oper_timeout(struct ipr_cmnd *ipr_cmd)
+{
+ unsigned long lock_flags = 0;
+ struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
+
+ ENTER;
+ spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
+
+ ioa_cfg->errors_logged++;
+ dev_err(&ioa_cfg->pdev->dev,
+ "Adapter timed out transitioning to operational.\n");
+
+ if (WAIT_FOR_DUMP == ioa_cfg->sdt_state)
+ ioa_cfg->sdt_state = GET_DUMP;
+
+ if (!ioa_cfg->in_reset_reload || ioa_cfg->reset_cmd == ipr_cmd) {
+ if (ipr_fastfail)
+ ioa_cfg->reset_retries += IPR_NUM_RESET_RELOAD_RETRIES;
+ ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
+ }
+
+ spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
+ LEAVE;
+}
+
+/**
+ * ipr_find_ses_entry - Find matching SES in SES table
+ * @res: resource entry struct of SES
+ *
+ * Return value:
+ * pointer to SES table entry / NULL on failure
+ **/
+static const struct ipr_ses_table_entry *
+ipr_find_ses_entry(struct ipr_resource_entry *res)
+{
+ int i, j, matches;
+ struct ipr_std_inq_vpids *vpids;
+ const struct ipr_ses_table_entry *ste = ipr_ses_table;
+
+ for (i = 0; i < ARRAY_SIZE(ipr_ses_table); i++, ste++) {
+ for (j = 0, matches = 0; j < IPR_PROD_ID_LEN; j++) {
+ if (ste->compare_product_id_byte[j] == 'X') {
+ vpids = &res->std_inq_data.vpids;
+ if (vpids->product_id[j] == ste->product_id[j])
+ matches++;
+ else
+ break;
+ } else
+ matches++;
+ }
+
+ if (matches == IPR_PROD_ID_LEN)
+ return ste;
+ }
+
+ return NULL;
+}
+
+/**
+ * ipr_get_max_scsi_speed - Determine max SCSI speed for a given bus
+ * @ioa_cfg: ioa config struct
+ * @bus: SCSI bus
+ * @bus_width: bus width
+ *
+ * Return value:
+ * SCSI bus speed in units of 100KHz, 1600 is 160 MHz
+ * For a 2-byte wide SCSI bus, the maximum transfer speed is
+ * twice the maximum transfer rate (e.g. for a wide enabled bus,
+ * max 160MHz = max 320MB/sec).
+ **/
+static u32 ipr_get_max_scsi_speed(struct ipr_ioa_cfg *ioa_cfg, u8 bus, u8 bus_width)
+{
+ struct ipr_resource_entry *res;
+ const struct ipr_ses_table_entry *ste;
+ u32 max_xfer_rate = IPR_MAX_SCSI_RATE(bus_width);
+
+ /* Loop through each config table entry in the config table buffer */
+ list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
+ if (!(IPR_IS_SES_DEVICE(res->std_inq_data)))
+ continue;
+
+ if (bus != res->bus)
+ continue;
+
+ if (!(ste = ipr_find_ses_entry(res)))
+ continue;
+
+ max_xfer_rate = (ste->max_bus_speed_limit * 10) / (bus_width / 8);
+ }
+
+ return max_xfer_rate;
+}
+
+/**
+ * ipr_wait_iodbg_ack - Wait for an IODEBUG ACK from the IOA
+ * @ioa_cfg: ioa config struct
+ * @max_delay: max delay in micro-seconds to wait
+ *
+ * Waits for an IODEBUG ACK from the IOA, doing busy looping.
+ *
+ * Return value:
+ * 0 on success / other on failure
+ **/
+static int ipr_wait_iodbg_ack(struct ipr_ioa_cfg *ioa_cfg, int max_delay)
+{
+ volatile u32 pcii_reg;
+ int delay = 1;
+
+ /* Read interrupt reg until IOA signals IO Debug Acknowledge */
+ while (delay < max_delay) {
+ pcii_reg = readl(ioa_cfg->regs.sense_interrupt_reg);
+
+ if (pcii_reg & IPR_PCII_IO_DEBUG_ACKNOWLEDGE)
+ return 0;
+
+ /* udelay cannot be used if delay is more than a few milliseconds */
+ if ((delay / 1000) > MAX_UDELAY_MS)
+ mdelay(delay / 1000);
+ else
+ udelay(delay);
+
+ delay += delay;
+ }
+ return -EIO;
+}
+
+/**
+ * ipr_get_sis64_dump_data_section - Dump IOA memory
+ * @ioa_cfg: ioa config struct
+ * @start_addr: adapter address to dump
+ * @dest: destination kernel buffer
+ * @length_in_words: length to dump in 4 byte words
+ *
+ * Return value:
+ * 0 on success
+ **/
+static int ipr_get_sis64_dump_data_section(struct ipr_ioa_cfg *ioa_cfg,
+ u32 start_addr,
+ __be32 *dest, u32 length_in_words)
+{
+ int i;
+
+ for (i = 0; i < length_in_words; i++) {
+ writel(start_addr+(i*4), ioa_cfg->regs.dump_addr_reg);
+ *dest = cpu_to_be32(readl(ioa_cfg->regs.dump_data_reg));
+ dest++;
+ }
+
+ return 0;
+}
+
+/**
+ * ipr_get_ldump_data_section - Dump IOA memory
+ * @ioa_cfg: ioa config struct
+ * @start_addr: adapter address to dump
+ * @dest: destination kernel buffer
+ * @length_in_words: length to dump in 4 byte words
+ *
+ * Return value:
+ * 0 on success / -EIO on failure
+ **/
+static int ipr_get_ldump_data_section(struct ipr_ioa_cfg *ioa_cfg,
+ u32 start_addr,
+ __be32 *dest, u32 length_in_words)
+{
+ volatile u32 temp_pcii_reg;
+ int i, delay = 0;
+
+ if (ioa_cfg->sis64)
+ return ipr_get_sis64_dump_data_section(ioa_cfg, start_addr,
+ dest, length_in_words);
+
+ /* Write IOA interrupt reg starting LDUMP state */
+ writel((IPR_UPROCI_RESET_ALERT | IPR_UPROCI_IO_DEBUG_ALERT),
+ ioa_cfg->regs.set_uproc_interrupt_reg32);
+
+ /* Wait for IO debug acknowledge */
+ if (ipr_wait_iodbg_ack(ioa_cfg,
+ IPR_LDUMP_MAX_LONG_ACK_DELAY_IN_USEC)) {
+ dev_err(&ioa_cfg->pdev->dev,
+ "IOA dump long data transfer timeout\n");
+ return -EIO;
+ }
+
+ /* Signal LDUMP interlocked - clear IO debug ack */
+ writel(IPR_PCII_IO_DEBUG_ACKNOWLEDGE,
+ ioa_cfg->regs.clr_interrupt_reg);
+
+ /* Write Mailbox with starting address */
+ writel(start_addr, ioa_cfg->ioa_mailbox);
+
+ /* Signal address valid - clear IOA Reset alert */
+ writel(IPR_UPROCI_RESET_ALERT,
+ ioa_cfg->regs.clr_uproc_interrupt_reg32);
+
+ for (i = 0; i < length_in_words; i++) {
+ /* Wait for IO debug acknowledge */
+ if (ipr_wait_iodbg_ack(ioa_cfg,
+ IPR_LDUMP_MAX_SHORT_ACK_DELAY_IN_USEC)) {
+ dev_err(&ioa_cfg->pdev->dev,
+ "IOA dump short data transfer timeout\n");
+ return -EIO;
+ }
+
+ /* Read data from mailbox and increment destination pointer */
+ *dest = cpu_to_be32(readl(ioa_cfg->ioa_mailbox));
+ dest++;
+
+ /* For all but the last word of data, signal data received */
+ if (i < (length_in_words - 1)) {
+ /* Signal dump data received - Clear IO debug Ack */
+ writel(IPR_PCII_IO_DEBUG_ACKNOWLEDGE,
+ ioa_cfg->regs.clr_interrupt_reg);
+ }
+ }
+
+ /* Signal end of block transfer. Set reset alert then clear IO debug ack */
+ writel(IPR_UPROCI_RESET_ALERT,
+ ioa_cfg->regs.set_uproc_interrupt_reg32);
+
+ writel(IPR_UPROCI_IO_DEBUG_ALERT,
+ ioa_cfg->regs.clr_uproc_interrupt_reg32);
+
+ /* Signal dump data received - Clear IO debug Ack */
+ writel(IPR_PCII_IO_DEBUG_ACKNOWLEDGE,
+ ioa_cfg->regs.clr_interrupt_reg);
+
+ /* Wait for IOA to signal LDUMP exit - IOA reset alert will be cleared */
+ while (delay < IPR_LDUMP_MAX_SHORT_ACK_DELAY_IN_USEC) {
+ temp_pcii_reg =
+ readl(ioa_cfg->regs.sense_uproc_interrupt_reg32);
+
+ if (!(temp_pcii_reg & IPR_UPROCI_RESET_ALERT))
+ return 0;
+
+ udelay(10);
+ delay += 10;
+ }
+
+ return 0;
+}
+
+#ifdef CONFIG_SCSI_IPR_DUMP
+/**
+ * ipr_sdt_copy - Copy Smart Dump Table to kernel buffer
+ * @ioa_cfg: ioa config struct
+ * @pci_address: adapter address
+ * @length: length of data to copy
+ *
+ * Copy data from PCI adapter to kernel buffer.
+ * Note: length MUST be a 4 byte multiple
+ * Return value:
+ * 0 on success / other on failure
+ **/
+static int ipr_sdt_copy(struct ipr_ioa_cfg *ioa_cfg,
+ unsigned long pci_address, u32 length)
+{
+ int bytes_copied = 0;
+ int cur_len, rc, rem_len, rem_page_len, max_dump_size;
+ __be32 *page;
+ unsigned long lock_flags = 0;
+ struct ipr_ioa_dump *ioa_dump = &ioa_cfg->dump->ioa_dump;
+
+ if (ioa_cfg->sis64)
+ max_dump_size = IPR_FMT3_MAX_IOA_DUMP_SIZE;
+ else
+ max_dump_size = IPR_FMT2_MAX_IOA_DUMP_SIZE;
+
+ while (bytes_copied < length &&
+ (ioa_dump->hdr.len + bytes_copied) < max_dump_size) {
+ if (ioa_dump->page_offset >= PAGE_SIZE ||
+ ioa_dump->page_offset == 0) {
+ page = (__be32 *)__get_free_page(GFP_ATOMIC);
+
+ if (!page) {
+ ipr_trace;
+ return bytes_copied;
+ }
+
+ ioa_dump->page_offset = 0;
+ ioa_dump->ioa_data[ioa_dump->next_page_index] = page;
+ ioa_dump->next_page_index++;
+ } else
+ page = ioa_dump->ioa_data[ioa_dump->next_page_index - 1];
+
+ rem_len = length - bytes_copied;
+ rem_page_len = PAGE_SIZE - ioa_dump->page_offset;
+ cur_len = min(rem_len, rem_page_len);
+
+ spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
+ if (ioa_cfg->sdt_state == ABORT_DUMP) {
+ rc = -EIO;
+ } else {
+ rc = ipr_get_ldump_data_section(ioa_cfg,
+ pci_address + bytes_copied,
+ &page[ioa_dump->page_offset / 4],
+ (cur_len / sizeof(u32)));
+ }
+ spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
+
+ if (!rc) {
+ ioa_dump->page_offset += cur_len;
+ bytes_copied += cur_len;
+ } else {
+ ipr_trace;
+ break;
+ }
+ schedule();
+ }
+
+ return bytes_copied;
+}
+
+/**
+ * ipr_init_dump_entry_hdr - Initialize a dump entry header.
+ * @hdr: dump entry header struct
+ *
+ * Return value:
+ * nothing
+ **/
+static void ipr_init_dump_entry_hdr(struct ipr_dump_entry_header *hdr)
+{
+ hdr->eye_catcher = IPR_DUMP_EYE_CATCHER;
+ hdr->num_elems = 1;
+ hdr->offset = sizeof(*hdr);
+ hdr->status = IPR_DUMP_STATUS_SUCCESS;
+}
+
+/**
+ * ipr_dump_ioa_type_data - Fill in the adapter type in the dump.
+ * @ioa_cfg: ioa config struct
+ * @driver_dump: driver dump struct
+ *
+ * Return value:
+ * nothing
+ **/
+static void ipr_dump_ioa_type_data(struct ipr_ioa_cfg *ioa_cfg,
+ struct ipr_driver_dump *driver_dump)
+{
+ struct ipr_inquiry_page3 *ucode_vpd = &ioa_cfg->vpd_cbs->page3_data;
+
+ ipr_init_dump_entry_hdr(&driver_dump->ioa_type_entry.hdr);
+ driver_dump->ioa_type_entry.hdr.len =
+ sizeof(struct ipr_dump_ioa_type_entry) -
+ sizeof(struct ipr_dump_entry_header);
+ driver_dump->ioa_type_entry.hdr.data_type = IPR_DUMP_DATA_TYPE_BINARY;
+ driver_dump->ioa_type_entry.hdr.id = IPR_DUMP_DRIVER_TYPE_ID;
+ driver_dump->ioa_type_entry.type = ioa_cfg->type;
+ driver_dump->ioa_type_entry.fw_version = (ucode_vpd->major_release << 24) |
+ (ucode_vpd->card_type << 16) | (ucode_vpd->minor_release[0] << 8) |
+ ucode_vpd->minor_release[1];
+ driver_dump->hdr.num_entries++;
+}
+
+/**
+ * ipr_dump_version_data - Fill in the driver version in the dump.
+ * @ioa_cfg: ioa config struct
+ * @driver_dump: driver dump struct
+ *
+ * Return value:
+ * nothing
+ **/
+static void ipr_dump_version_data(struct ipr_ioa_cfg *ioa_cfg,
+ struct ipr_driver_dump *driver_dump)
+{
+ ipr_init_dump_entry_hdr(&driver_dump->version_entry.hdr);
+ driver_dump->version_entry.hdr.len =
+ sizeof(struct ipr_dump_version_entry) -
+ sizeof(struct ipr_dump_entry_header);
+ driver_dump->version_entry.hdr.data_type = IPR_DUMP_DATA_TYPE_ASCII;
+ driver_dump->version_entry.hdr.id = IPR_DUMP_DRIVER_VERSION_ID;
+ strcpy(driver_dump->version_entry.version, IPR_DRIVER_VERSION);
+ driver_dump->hdr.num_entries++;
+}
+
+/**
+ * ipr_dump_trace_data - Fill in the IOA trace in the dump.
+ * @ioa_cfg: ioa config struct
+ * @driver_dump: driver dump struct
+ *
+ * Return value:
+ * nothing
+ **/
+static void ipr_dump_trace_data(struct ipr_ioa_cfg *ioa_cfg,
+ struct ipr_driver_dump *driver_dump)
+{
+ ipr_init_dump_entry_hdr(&driver_dump->trace_entry.hdr);
+ driver_dump->trace_entry.hdr.len =
+ sizeof(struct ipr_dump_trace_entry) -
+ sizeof(struct ipr_dump_entry_header);
+ driver_dump->trace_entry.hdr.data_type = IPR_DUMP_DATA_TYPE_BINARY;
+ driver_dump->trace_entry.hdr.id = IPR_DUMP_TRACE_ID;
+ memcpy(driver_dump->trace_entry.trace, ioa_cfg->trace, IPR_TRACE_SIZE);
+ driver_dump->hdr.num_entries++;
+}
+
+/**
+ * ipr_dump_location_data - Fill in the IOA location in the dump.
+ * @ioa_cfg: ioa config struct
+ * @driver_dump: driver dump struct
+ *
+ * Return value:
+ * nothing
+ **/
+static void ipr_dump_location_data(struct ipr_ioa_cfg *ioa_cfg,
+ struct ipr_driver_dump *driver_dump)
+{
+ ipr_init_dump_entry_hdr(&driver_dump->location_entry.hdr);
+ driver_dump->location_entry.hdr.len =
+ sizeof(struct ipr_dump_location_entry) -
+ sizeof(struct ipr_dump_entry_header);
+ driver_dump->location_entry.hdr.data_type = IPR_DUMP_DATA_TYPE_ASCII;
+ driver_dump->location_entry.hdr.id = IPR_DUMP_LOCATION_ID;
+ strcpy(driver_dump->location_entry.location, dev_name(&ioa_cfg->pdev->dev));
+ driver_dump->hdr.num_entries++;
+}
+
+/**
+ * ipr_get_ioa_dump - Perform a dump of the driver and adapter.
+ * @ioa_cfg: ioa config struct
+ * @dump: dump struct
+ *
+ * Return value:
+ * nothing
+ **/
+static void ipr_get_ioa_dump(struct ipr_ioa_cfg *ioa_cfg, struct ipr_dump *dump)
+{
+ unsigned long start_addr, sdt_word;
+ unsigned long lock_flags = 0;
+ struct ipr_driver_dump *driver_dump = &dump->driver_dump;
+ struct ipr_ioa_dump *ioa_dump = &dump->ioa_dump;
+ u32 num_entries, max_num_entries, start_off, end_off;
+ u32 max_dump_size, bytes_to_copy, bytes_copied, rc;
+ struct ipr_sdt *sdt;
+ int valid = 1;
+ int i;
+
+ ENTER;
+
+ spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
+
+ if (ioa_cfg->sdt_state != READ_DUMP) {
+ spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
+ return;
+ }
+
+ if (ioa_cfg->sis64) {
+ spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
+ ssleep(IPR_DUMP_DELAY_SECONDS);
+ spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
+ }
+
+ start_addr = readl(ioa_cfg->ioa_mailbox);
+
+ if (!ioa_cfg->sis64 && !ipr_sdt_is_fmt2(start_addr)) {
+ dev_err(&ioa_cfg->pdev->dev,
+ "Invalid dump table format: %lx\n", start_addr);
+ spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
+ return;
+ }
+
+ dev_err(&ioa_cfg->pdev->dev, "Dump of IOA initiated\n");
+
+ driver_dump->hdr.eye_catcher = IPR_DUMP_EYE_CATCHER;
+
+ /* Initialize the overall dump header */
+ driver_dump->hdr.len = sizeof(struct ipr_driver_dump);
+ driver_dump->hdr.num_entries = 1;
+ driver_dump->hdr.first_entry_offset = sizeof(struct ipr_dump_header);
+ driver_dump->hdr.status = IPR_DUMP_STATUS_SUCCESS;
+ driver_dump->hdr.os = IPR_DUMP_OS_LINUX;
+ driver_dump->hdr.driver_name = IPR_DUMP_DRIVER_NAME;
+
+ ipr_dump_version_data(ioa_cfg, driver_dump);
+ ipr_dump_location_data(ioa_cfg, driver_dump);
+ ipr_dump_ioa_type_data(ioa_cfg, driver_dump);
+ ipr_dump_trace_data(ioa_cfg, driver_dump);
+
+ /* Update dump_header */
+ driver_dump->hdr.len += sizeof(struct ipr_dump_entry_header);
+
+ /* IOA Dump entry */
+ ipr_init_dump_entry_hdr(&ioa_dump->hdr);
+ ioa_dump->hdr.len = 0;
+ ioa_dump->hdr.data_type = IPR_DUMP_DATA_TYPE_BINARY;
+ ioa_dump->hdr.id = IPR_DUMP_IOA_DUMP_ID;
+
+ /* First entries in sdt are actually a list of dump addresses and
+ lengths to gather the real dump data. sdt represents the pointer
+ to the ioa generated dump table. Dump data will be extracted based
+ on entries in this table */
+ sdt = &ioa_dump->sdt;
+
+ if (ioa_cfg->sis64) {
+ max_num_entries = IPR_FMT3_NUM_SDT_ENTRIES;
+ max_dump_size = IPR_FMT3_MAX_IOA_DUMP_SIZE;
+ } else {
+ max_num_entries = IPR_FMT2_NUM_SDT_ENTRIES;
+ max_dump_size = IPR_FMT2_MAX_IOA_DUMP_SIZE;
+ }
+
+ bytes_to_copy = offsetof(struct ipr_sdt, entry) +
+ (max_num_entries * sizeof(struct ipr_sdt_entry));
+ rc = ipr_get_ldump_data_section(ioa_cfg, start_addr, (__be32 *)sdt,
+ bytes_to_copy / sizeof(__be32));
+
+ /* Smart Dump table is ready to use and the first entry is valid */
+ if (rc || ((be32_to_cpu(sdt->hdr.state) != IPR_FMT3_SDT_READY_TO_USE) &&
+ (be32_to_cpu(sdt->hdr.state) != IPR_FMT2_SDT_READY_TO_USE))) {
+ dev_err(&ioa_cfg->pdev->dev,
+ "Dump of IOA failed. Dump table not valid: %d, %X.\n",
+ rc, be32_to_cpu(sdt->hdr.state));
+ driver_dump->hdr.status = IPR_DUMP_STATUS_FAILED;
+ ioa_cfg->sdt_state = DUMP_OBTAINED;
+ spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
+ return;
+ }
+
+ num_entries = be32_to_cpu(sdt->hdr.num_entries_used);
+
+ if (num_entries > max_num_entries)
+ num_entries = max_num_entries;
+
+ /* Update dump length to the actual data to be copied */
+ dump->driver_dump.hdr.len += sizeof(struct ipr_sdt_header);
+ if (ioa_cfg->sis64)
+ dump->driver_dump.hdr.len += num_entries * sizeof(struct ipr_sdt_entry);
+ else
+ dump->driver_dump.hdr.len += max_num_entries * sizeof(struct ipr_sdt_entry);
+
+ spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
+
+ for (i = 0; i < num_entries; i++) {
+ if (ioa_dump->hdr.len > max_dump_size) {
+ driver_dump->hdr.status = IPR_DUMP_STATUS_QUAL_SUCCESS;
+ break;
+ }
+
+ if (sdt->entry[i].flags & IPR_SDT_VALID_ENTRY) {
+ sdt_word = be32_to_cpu(sdt->entry[i].start_token);
+ if (ioa_cfg->sis64)
+ bytes_to_copy = be32_to_cpu(sdt->entry[i].end_token);
+ else {
+ start_off = sdt_word & IPR_FMT2_MBX_ADDR_MASK;
+ end_off = be32_to_cpu(sdt->entry[i].end_token);
+
+ if (ipr_sdt_is_fmt2(sdt_word) && sdt_word)
+ bytes_to_copy = end_off - start_off;
+ else
+ valid = 0;
+ }
+ if (valid) {
+ if (bytes_to_copy > max_dump_size) {
+ sdt->entry[i].flags &= ~IPR_SDT_VALID_ENTRY;
+ continue;
+ }
+
+ /* Copy data from adapter to driver buffers */
+ bytes_copied = ipr_sdt_copy(ioa_cfg, sdt_word,
+ bytes_to_copy);
+
+ ioa_dump->hdr.len += bytes_copied;
+
+ if (bytes_copied != bytes_to_copy) {
+ driver_dump->hdr.status = IPR_DUMP_STATUS_QUAL_SUCCESS;
+ break;
+ }
+ }
+ }
+ }
+
+ dev_err(&ioa_cfg->pdev->dev, "Dump of IOA completed.\n");
+
+ /* Update dump_header */
+ driver_dump->hdr.len += ioa_dump->hdr.len;
+ wmb();
+ ioa_cfg->sdt_state = DUMP_OBTAINED;
+ LEAVE;
+}
+
+#else
+#define ipr_get_ioa_dump(ioa_cfg, dump) do { } while (0)
+#endif
+
+/**
+ * ipr_release_dump - Free adapter dump memory
+ * @kref: kref struct
+ *
+ * Return value:
+ * nothing
+ **/
+static void ipr_release_dump(struct kref *kref)
+{
+ struct ipr_dump *dump = container_of(kref, struct ipr_dump, kref);
+ struct ipr_ioa_cfg *ioa_cfg = dump->ioa_cfg;
+ unsigned long lock_flags = 0;
+ int i;
+
+ ENTER;
+ spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
+ ioa_cfg->dump = NULL;
+ ioa_cfg->sdt_state = INACTIVE;
+ spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
+
+ for (i = 0; i < dump->ioa_dump.next_page_index; i++)
+ free_page((unsigned long) dump->ioa_dump.ioa_data[i]);
+
+ vfree(dump->ioa_dump.ioa_data);
+ kfree(dump);
+ LEAVE;
+}
+
+/**
+ * ipr_worker_thread - Worker thread
+ * @work: ioa config struct
+ *
+ * Called at task level from a work thread. This function takes care
+ * of adding and removing device from the mid-layer as configuration
+ * changes are detected by the adapter.
+ *
+ * Return value:
+ * nothing
+ **/
+static void ipr_worker_thread(struct work_struct *work)
+{
+ unsigned long lock_flags;
+ struct ipr_resource_entry *res;
+ struct scsi_device *sdev;
+ struct ipr_dump *dump;
+ struct ipr_ioa_cfg *ioa_cfg =
+ container_of(work, struct ipr_ioa_cfg, work_q);
+ u8 bus, target, lun;
+ int did_work;
+
+ ENTER;
+ spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
+
+ if (ioa_cfg->sdt_state == READ_DUMP) {
+ dump = ioa_cfg->dump;
+ if (!dump) {
+ spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
+ return;
+ }
+ kref_get(&dump->kref);
+ spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
+ ipr_get_ioa_dump(ioa_cfg, dump);
+ kref_put(&dump->kref, ipr_release_dump);
+
+ spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
+ if (ioa_cfg->sdt_state == DUMP_OBTAINED && !ioa_cfg->dump_timeout)
+ ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
+ spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
+ return;
+ }
+
+restart:
+ do {
+ did_work = 0;
+ if (!ioa_cfg->hrrq[IPR_INIT_HRRQ].allow_cmds) {
+ spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
+ return;
+ }
+
+ list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
+ if (res->del_from_ml && res->sdev) {
+ did_work = 1;
+ sdev = res->sdev;
+ if (!scsi_device_get(sdev)) {
+ if (!res->add_to_ml)
+ list_move_tail(&res->queue, &ioa_cfg->free_res_q);
+ else
+ res->del_from_ml = 0;
+ spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
+ scsi_remove_device(sdev);
+ scsi_device_put(sdev);
+ spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
+ }
+ break;
+ }
+ }
+ } while (did_work);
+
+ list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
+ if (res->add_to_ml) {
+ bus = res->bus;
+ target = res->target;
+ lun = res->lun;
+ res->add_to_ml = 0;
+ spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
+ scsi_add_device(ioa_cfg->host, bus, target, lun);
+ spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
+ goto restart;
+ }
+ }
+
+ ioa_cfg->scan_done = 1;
+ spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
+ kobject_uevent(&ioa_cfg->host->shost_dev.kobj, KOBJ_CHANGE);
+ LEAVE;
+}
+
+#ifdef CONFIG_SCSI_IPR_TRACE
+/**
+ * ipr_read_trace - Dump the adapter trace
+ * @filp: open sysfs file
+ * @kobj: kobject struct
+ * @bin_attr: bin_attribute struct
+ * @buf: buffer
+ * @off: offset
+ * @count: buffer size
+ *
+ * Return value:
+ * number of bytes printed to buffer
+ **/
+static ssize_t ipr_read_trace(struct file *filp, struct kobject *kobj,
+ struct bin_attribute *bin_attr,
+ char *buf, loff_t off, size_t count)
+{
+ struct device *dev = container_of(kobj, struct device, kobj);
+ struct Scsi_Host *shost = class_to_shost(dev);
+ struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
+ unsigned long lock_flags = 0;
+ ssize_t ret;
+
+ spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
+ ret = memory_read_from_buffer(buf, count, &off, ioa_cfg->trace,
+ IPR_TRACE_SIZE);
+ spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
+
+ return ret;
+}
+
+static struct bin_attribute ipr_trace_attr = {
+ .attr = {
+ .name = "trace",
+ .mode = S_IRUGO,
+ },
+ .size = 0,
+ .read = ipr_read_trace,
+};
+#endif
+
+/**
+ * ipr_show_fw_version - Show the firmware version
+ * @dev: class device struct
+ * @buf: buffer
+ *
+ * Return value:
+ * number of bytes printed to buffer
+ **/
+static ssize_t ipr_show_fw_version(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct Scsi_Host *shost = class_to_shost(dev);
+ struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
+ struct ipr_inquiry_page3 *ucode_vpd = &ioa_cfg->vpd_cbs->page3_data;
+ unsigned long lock_flags = 0;
+ int len;
+
+ spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
+ len = snprintf(buf, PAGE_SIZE, "%02X%02X%02X%02X\n",
+ ucode_vpd->major_release, ucode_vpd->card_type,
+ ucode_vpd->minor_release[0],
+ ucode_vpd->minor_release[1]);
+ spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
+ return len;
+}
+
+static struct device_attribute ipr_fw_version_attr = {
+ .attr = {
+ .name = "fw_version",
+ .mode = S_IRUGO,
+ },
+ .show = ipr_show_fw_version,
+};
+
+/**
+ * ipr_show_log_level - Show the adapter's error logging level
+ * @dev: class device struct
+ * @buf: buffer
+ *
+ * Return value:
+ * number of bytes printed to buffer
+ **/
+static ssize_t ipr_show_log_level(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct Scsi_Host *shost = class_to_shost(dev);
+ struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
+ unsigned long lock_flags = 0;
+ int len;
+
+ spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
+ len = snprintf(buf, PAGE_SIZE, "%d\n", ioa_cfg->log_level);
+ spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
+ return len;
+}
+
+/**
+ * ipr_store_log_level - Change the adapter's error logging level
+ * @dev: class device struct
+ * @buf: buffer
+ *
+ * Return value:
+ * number of bytes printed to buffer
+ **/
+static ssize_t ipr_store_log_level(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct Scsi_Host *shost = class_to_shost(dev);
+ struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
+ unsigned long lock_flags = 0;
+
+ spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
+ ioa_cfg->log_level = simple_strtoul(buf, NULL, 10);
+ spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
+ return strlen(buf);
+}
+
+static struct device_attribute ipr_log_level_attr = {
+ .attr = {
+ .name = "log_level",
+ .mode = S_IRUGO | S_IWUSR,
+ },
+ .show = ipr_show_log_level,
+ .store = ipr_store_log_level
+};
+
+/**
+ * ipr_store_diagnostics - IOA Diagnostics interface
+ * @dev: device struct
+ * @buf: buffer
+ * @count: buffer size
+ *
+ * This function will reset the adapter and wait a reasonable
+ * amount of time for any errors that the adapter might log.
+ *
+ * Return value:
+ * count on success / other on failure
+ **/
+static ssize_t ipr_store_diagnostics(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct Scsi_Host *shost = class_to_shost(dev);
+ struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
+ unsigned long lock_flags = 0;
+ int rc = count;
+
+ if (!capable(CAP_SYS_ADMIN))
+ return -EACCES;
+
+ spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
+ while (ioa_cfg->in_reset_reload) {
+ spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
+ wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
+ spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
+ }
+
+ ioa_cfg->errors_logged = 0;
+ ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NORMAL);
+
+ if (ioa_cfg->in_reset_reload) {
+ spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
+ wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
+
+ /* Wait for a second for any errors to be logged */
+ msleep(1000);
+ } else {
+ spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
+ return -EIO;
+ }
+
+ spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
+ if (ioa_cfg->in_reset_reload || ioa_cfg->errors_logged)
+ rc = -EIO;
+ spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
+
+ return rc;
+}
+
+static struct device_attribute ipr_diagnostics_attr = {
+ .attr = {
+ .name = "run_diagnostics",
+ .mode = S_IWUSR,
+ },
+ .store = ipr_store_diagnostics
+};
+
+/**
+ * ipr_show_adapter_state - Show the adapter's state
+ * @class_dev: device struct
+ * @buf: buffer
+ *
+ * Return value:
+ * number of bytes printed to buffer
+ **/
+static ssize_t ipr_show_adapter_state(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct Scsi_Host *shost = class_to_shost(dev);
+ struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
+ unsigned long lock_flags = 0;
+ int len;
+
+ spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
+ if (ioa_cfg->hrrq[IPR_INIT_HRRQ].ioa_is_dead)
+ len = snprintf(buf, PAGE_SIZE, "offline\n");
+ else
+ len = snprintf(buf, PAGE_SIZE, "online\n");
+ spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
+ return len;
+}
+
+/**
+ * ipr_store_adapter_state - Change adapter state
+ * @dev: device struct
+ * @buf: buffer
+ * @count: buffer size
+ *
+ * This function will change the adapter's state.
+ *
+ * Return value:
+ * count on success / other on failure
+ **/
+static ssize_t ipr_store_adapter_state(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct Scsi_Host *shost = class_to_shost(dev);
+ struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
+ unsigned long lock_flags;
+ int result = count, i;
+
+ if (!capable(CAP_SYS_ADMIN))
+ return -EACCES;
+
+ spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
+ if (ioa_cfg->hrrq[IPR_INIT_HRRQ].ioa_is_dead &&
+ !strncmp(buf, "online", 6)) {
+ for (i = 0; i < ioa_cfg->hrrq_num; i++) {
+ spin_lock(&ioa_cfg->hrrq[i]._lock);
+ ioa_cfg->hrrq[i].ioa_is_dead = 0;
+ spin_unlock(&ioa_cfg->hrrq[i]._lock);
+ }
+ wmb();
+ ioa_cfg->reset_retries = 0;
+ ioa_cfg->in_ioa_bringdown = 0;
+ ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
+ }
+ spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
+ wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
+
+ return result;
+}
+
+static struct device_attribute ipr_ioa_state_attr = {
+ .attr = {
+ .name = "online_state",
+ .mode = S_IRUGO | S_IWUSR,
+ },
+ .show = ipr_show_adapter_state,
+ .store = ipr_store_adapter_state
+};
+
+/**
+ * ipr_store_reset_adapter - Reset the adapter
+ * @dev: device struct
+ * @buf: buffer
+ * @count: buffer size
+ *
+ * This function will reset the adapter.
+ *
+ * Return value:
+ * count on success / other on failure
+ **/
+static ssize_t ipr_store_reset_adapter(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct Scsi_Host *shost = class_to_shost(dev);
+ struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
+ unsigned long lock_flags;
+ int result = count;
+
+ if (!capable(CAP_SYS_ADMIN))
+ return -EACCES;
+
+ spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
+ if (!ioa_cfg->in_reset_reload)
+ ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NORMAL);
+ spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
+ wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
+
+ return result;
+}
+
+static struct device_attribute ipr_ioa_reset_attr = {
+ .attr = {
+ .name = "reset_host",
+ .mode = S_IWUSR,
+ },
+ .store = ipr_store_reset_adapter
+};
+
+static int ipr_iopoll(struct blk_iopoll *iop, int budget);
+ /**
+ * ipr_show_iopoll_weight - Show ipr polling mode
+ * @dev: class device struct
+ * @buf: buffer
+ *
+ * Return value:
+ * number of bytes printed to buffer
+ **/
+static ssize_t ipr_show_iopoll_weight(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct Scsi_Host *shost = class_to_shost(dev);
+ struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
+ unsigned long lock_flags = 0;
+ int len;
+
+ spin_lock_irqsave(shost->host_lock, lock_flags);
+ len = snprintf(buf, PAGE_SIZE, "%d\n", ioa_cfg->iopoll_weight);
+ spin_unlock_irqrestore(shost->host_lock, lock_flags);
+
+ return len;
+}
+
+/**
+ * ipr_store_iopoll_weight - Change the adapter's polling mode
+ * @dev: class device struct
+ * @buf: buffer
+ *
+ * Return value:
+ * number of bytes printed to buffer
+ **/
+static ssize_t ipr_store_iopoll_weight(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct Scsi_Host *shost = class_to_shost(dev);
+ struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
+ unsigned long user_iopoll_weight;
+ unsigned long lock_flags = 0;
+ int i;
+
+ if (!ioa_cfg->sis64) {
+ dev_info(&ioa_cfg->pdev->dev, "blk-iopoll not supported on this adapter\n");
+ return -EINVAL;
+ }
+ if (kstrtoul(buf, 10, &user_iopoll_weight))
+ return -EINVAL;
+
+ if (user_iopoll_weight > 256) {
+ dev_info(&ioa_cfg->pdev->dev, "Invalid blk-iopoll weight. It must be less than 256\n");
+ return -EINVAL;
+ }
+
+ if (user_iopoll_weight == ioa_cfg->iopoll_weight) {
+ dev_info(&ioa_cfg->pdev->dev, "Current blk-iopoll weight has the same weight\n");
+ return strlen(buf);
+ }
+
+ if (ioa_cfg->iopoll_weight && ioa_cfg->sis64 && ioa_cfg->nvectors > 1) {
+ for (i = 1; i < ioa_cfg->hrrq_num; i++)
+ blk_iopoll_disable(&ioa_cfg->hrrq[i].iopoll);
+ }
+
+ spin_lock_irqsave(shost->host_lock, lock_flags);
+ ioa_cfg->iopoll_weight = user_iopoll_weight;
+ if (ioa_cfg->iopoll_weight && ioa_cfg->sis64 && ioa_cfg->nvectors > 1) {
+ for (i = 1; i < ioa_cfg->hrrq_num; i++) {
+ blk_iopoll_init(&ioa_cfg->hrrq[i].iopoll,
+ ioa_cfg->iopoll_weight, ipr_iopoll);
+ blk_iopoll_enable(&ioa_cfg->hrrq[i].iopoll);
+ }
+ }
+ spin_unlock_irqrestore(shost->host_lock, lock_flags);
+
+ return strlen(buf);
+}
+
+static struct device_attribute ipr_iopoll_weight_attr = {
+ .attr = {
+ .name = "iopoll_weight",
+ .mode = S_IRUGO | S_IWUSR,
+ },
+ .show = ipr_show_iopoll_weight,
+ .store = ipr_store_iopoll_weight
+};
+
+/**
+ * ipr_alloc_ucode_buffer - Allocates a microcode download buffer
+ * @buf_len: buffer length
+ *
+ * Allocates a DMA'able buffer in chunks and assembles a scatter/gather
+ * list to use for microcode download
+ *
+ * Return value:
+ * pointer to sglist / NULL on failure
+ **/
+static struct ipr_sglist *ipr_alloc_ucode_buffer(int buf_len)
+{
+ int sg_size, order, bsize_elem, num_elem, i, j;
+ struct ipr_sglist *sglist;
+ struct scatterlist *scatterlist;
+ struct page *page;
+
+ /* Get the minimum size per scatter/gather element */
+ sg_size = buf_len / (IPR_MAX_SGLIST - 1);
+
+ /* Get the actual size per element */
+ order = get_order(sg_size);
+
+ /* Determine the actual number of bytes per element */
+ bsize_elem = PAGE_SIZE * (1 << order);
+
+ /* Determine the actual number of sg entries needed */
+ if (buf_len % bsize_elem)
+ num_elem = (buf_len / bsize_elem) + 1;
+ else
+ num_elem = buf_len / bsize_elem;
+
+ /* Allocate a scatter/gather list for the DMA */
+ sglist = kzalloc(sizeof(struct ipr_sglist) +
+ (sizeof(struct scatterlist) * (num_elem - 1)),
+ GFP_KERNEL);
+
+ if (sglist == NULL) {
+ ipr_trace;
+ return NULL;
+ }
+
+ scatterlist = sglist->scatterlist;
+ sg_init_table(scatterlist, num_elem);
+
+ sglist->order = order;
+ sglist->num_sg = num_elem;
+
+ /* Allocate a bunch of sg elements */
+ for (i = 0; i < num_elem; i++) {
+ page = alloc_pages(GFP_KERNEL, order);
+ if (!page) {
+ ipr_trace;
+
+ /* Free up what we already allocated */
+ for (j = i - 1; j >= 0; j--)
+ __free_pages(sg_page(&scatterlist[j]), order);
+ kfree(sglist);
+ return NULL;
+ }
+
+ sg_set_page(&scatterlist[i], page, 0, 0);
+ }
+
+ return sglist;
+}
+
+/**
+ * ipr_free_ucode_buffer - Frees a microcode download buffer
+ * @p_dnld: scatter/gather list pointer
+ *
+ * Free a DMA'able ucode download buffer previously allocated with
+ * ipr_alloc_ucode_buffer
+ *
+ * Return value:
+ * nothing
+ **/
+static void ipr_free_ucode_buffer(struct ipr_sglist *sglist)
+{
+ int i;
+
+ for (i = 0; i < sglist->num_sg; i++)
+ __free_pages(sg_page(&sglist->scatterlist[i]), sglist->order);
+
+ kfree(sglist);
+}
+
+/**
+ * ipr_copy_ucode_buffer - Copy user buffer to kernel buffer
+ * @sglist: scatter/gather list pointer
+ * @buffer: buffer pointer
+ * @len: buffer length
+ *
+ * Copy a microcode image from a user buffer into a buffer allocated by
+ * ipr_alloc_ucode_buffer
+ *
+ * Return value:
+ * 0 on success / other on failure
+ **/
+static int ipr_copy_ucode_buffer(struct ipr_sglist *sglist,
+ u8 *buffer, u32 len)
+{
+ int bsize_elem, i, result = 0;
+ struct scatterlist *scatterlist;
+ void *kaddr;
+
+ /* Determine the actual number of bytes per element */
+ bsize_elem = PAGE_SIZE * (1 << sglist->order);
+
+ scatterlist = sglist->scatterlist;
+
+ for (i = 0; i < (len / bsize_elem); i++, buffer += bsize_elem) {
+ struct page *page = sg_page(&scatterlist[i]);
+
+ kaddr = kmap(page);
+ memcpy(kaddr, buffer, bsize_elem);
+ kunmap(page);
+
+ scatterlist[i].length = bsize_elem;
+
+ if (result != 0) {
+ ipr_trace;
+ return result;
+ }
+ }
+
+ if (len % bsize_elem) {
+ struct page *page = sg_page(&scatterlist[i]);
+
+ kaddr = kmap(page);
+ memcpy(kaddr, buffer, len % bsize_elem);
+ kunmap(page);
+
+ scatterlist[i].length = len % bsize_elem;
+ }
+
+ sglist->buffer_len = len;
+ return result;
+}
+
+/**
+ * ipr_build_ucode_ioadl64 - Build a microcode download IOADL
+ * @ipr_cmd: ipr command struct
+ * @sglist: scatter/gather list
+ *
+ * Builds a microcode download IOA data list (IOADL).
+ *
+ **/
+static void ipr_build_ucode_ioadl64(struct ipr_cmnd *ipr_cmd,
+ struct ipr_sglist *sglist)
+{
+ struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
+ struct ipr_ioadl64_desc *ioadl64 = ipr_cmd->i.ioadl64;
+ struct scatterlist *scatterlist = sglist->scatterlist;
+ int i;
+
+ ipr_cmd->dma_use_sg = sglist->num_dma_sg;
+ ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
+ ioarcb->data_transfer_length = cpu_to_be32(sglist->buffer_len);
+
+ ioarcb->ioadl_len =
+ cpu_to_be32(sizeof(struct ipr_ioadl64_desc) * ipr_cmd->dma_use_sg);
+ for (i = 0; i < ipr_cmd->dma_use_sg; i++) {
+ ioadl64[i].flags = cpu_to_be32(IPR_IOADL_FLAGS_WRITE);
+ ioadl64[i].data_len = cpu_to_be32(sg_dma_len(&scatterlist[i]));
+ ioadl64[i].address = cpu_to_be64(sg_dma_address(&scatterlist[i]));
+ }
+
+ ioadl64[i-1].flags |= cpu_to_be32(IPR_IOADL_FLAGS_LAST);
+}
+
+/**
+ * ipr_build_ucode_ioadl - Build a microcode download IOADL
+ * @ipr_cmd: ipr command struct
+ * @sglist: scatter/gather list
+ *
+ * Builds a microcode download IOA data list (IOADL).
+ *
+ **/
+static void ipr_build_ucode_ioadl(struct ipr_cmnd *ipr_cmd,
+ struct ipr_sglist *sglist)
+{
+ struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
+ struct ipr_ioadl_desc *ioadl = ipr_cmd->i.ioadl;
+ struct scatterlist *scatterlist = sglist->scatterlist;
+ int i;
+
+ ipr_cmd->dma_use_sg = sglist->num_dma_sg;
+ ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
+ ioarcb->data_transfer_length = cpu_to_be32(sglist->buffer_len);
+
+ ioarcb->ioadl_len =
+ cpu_to_be32(sizeof(struct ipr_ioadl_desc) * ipr_cmd->dma_use_sg);
+
+ for (i = 0; i < ipr_cmd->dma_use_sg; i++) {
+ ioadl[i].flags_and_data_len =
+ cpu_to_be32(IPR_IOADL_FLAGS_WRITE | sg_dma_len(&scatterlist[i]));
+ ioadl[i].address =
+ cpu_to_be32(sg_dma_address(&scatterlist[i]));
+ }
+
+ ioadl[i-1].flags_and_data_len |=
+ cpu_to_be32(IPR_IOADL_FLAGS_LAST);
+}
+
+/**
+ * ipr_update_ioa_ucode - Update IOA's microcode
+ * @ioa_cfg: ioa config struct
+ * @sglist: scatter/gather list
+ *
+ * Initiate an adapter reset to update the IOA's microcode
+ *
+ * Return value:
+ * 0 on success / -EIO on failure
+ **/
+static int ipr_update_ioa_ucode(struct ipr_ioa_cfg *ioa_cfg,
+ struct ipr_sglist *sglist)
+{
+ unsigned long lock_flags;
+
+ spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
+ while (ioa_cfg->in_reset_reload) {
+ spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
+ wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
+ spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
+ }
+
+ if (ioa_cfg->ucode_sglist) {
+ spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
+ dev_err(&ioa_cfg->pdev->dev,
+ "Microcode download already in progress\n");
+ return -EIO;
+ }
+
+ sglist->num_dma_sg = dma_map_sg(&ioa_cfg->pdev->dev,
+ sglist->scatterlist, sglist->num_sg,
+ DMA_TO_DEVICE);
+
+ if (!sglist->num_dma_sg) {
+ spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
+ dev_err(&ioa_cfg->pdev->dev,
+ "Failed to map microcode download buffer!\n");
+ return -EIO;
+ }
+
+ ioa_cfg->ucode_sglist = sglist;
+ ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NORMAL);
+ spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
+ wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
+
+ spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
+ ioa_cfg->ucode_sglist = NULL;
+ spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
+ return 0;
+}
+
+/**
+ * ipr_store_update_fw - Update the firmware on the adapter
+ * @class_dev: device struct
+ * @buf: buffer
+ * @count: buffer size
+ *
+ * This function will update the firmware on the adapter.
+ *
+ * Return value:
+ * count on success / other on failure
+ **/
+static ssize_t ipr_store_update_fw(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct Scsi_Host *shost = class_to_shost(dev);
+ struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
+ struct ipr_ucode_image_header *image_hdr;
+ const struct firmware *fw_entry;
+ struct ipr_sglist *sglist;
+ char fname[100];
+ char *src;
+ int len, result, dnld_size;
+
+ if (!capable(CAP_SYS_ADMIN))
+ return -EACCES;
+
+ len = snprintf(fname, 99, "%s", buf);
+ fname[len-1] = '\0';
+
+ if (request_firmware(&fw_entry, fname, &ioa_cfg->pdev->dev)) {
+ dev_err(&ioa_cfg->pdev->dev, "Firmware file %s not found\n", fname);
+ return -EIO;
+ }
+
+ image_hdr = (struct ipr_ucode_image_header *)fw_entry->data;
+
+ src = (u8 *)image_hdr + be32_to_cpu(image_hdr->header_length);
+ dnld_size = fw_entry->size - be32_to_cpu(image_hdr->header_length);
+ sglist = ipr_alloc_ucode_buffer(dnld_size);
+
+ if (!sglist) {
+ dev_err(&ioa_cfg->pdev->dev, "Microcode buffer allocation failed\n");
+ release_firmware(fw_entry);
+ return -ENOMEM;
+ }
+
+ result = ipr_copy_ucode_buffer(sglist, src, dnld_size);
+
+ if (result) {
+ dev_err(&ioa_cfg->pdev->dev,
+ "Microcode buffer copy to DMA buffer failed\n");
+ goto out;
+ }
+
+ ipr_info("Updating microcode, please be patient. This may take up to 30 minutes.\n");
+
+ result = ipr_update_ioa_ucode(ioa_cfg, sglist);
+
+ if (!result)
+ result = count;
+out:
+ ipr_free_ucode_buffer(sglist);
+ release_firmware(fw_entry);
+ return result;
+}
+
+static struct device_attribute ipr_update_fw_attr = {
+ .attr = {
+ .name = "update_fw",
+ .mode = S_IWUSR,
+ },
+ .store = ipr_store_update_fw
+};
+
+/**
+ * ipr_show_fw_type - Show the adapter's firmware type.
+ * @dev: class device struct
+ * @buf: buffer
+ *
+ * Return value:
+ * number of bytes printed to buffer
+ **/
+static ssize_t ipr_show_fw_type(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct Scsi_Host *shost = class_to_shost(dev);
+ struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
+ unsigned long lock_flags = 0;
+ int len;
+
+ spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
+ len = snprintf(buf, PAGE_SIZE, "%d\n", ioa_cfg->sis64);
+ spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
+ return len;
+}
+
+static struct device_attribute ipr_ioa_fw_type_attr = {
+ .attr = {
+ .name = "fw_type",
+ .mode = S_IRUGO,
+ },
+ .show = ipr_show_fw_type
+};
+
+static struct device_attribute *ipr_ioa_attrs[] = {
+ &ipr_fw_version_attr,
+ &ipr_log_level_attr,
+ &ipr_diagnostics_attr,
+ &ipr_ioa_state_attr,
+ &ipr_ioa_reset_attr,
+ &ipr_update_fw_attr,
+ &ipr_ioa_fw_type_attr,
+ &ipr_iopoll_weight_attr,
+ NULL,
+};
+
+#ifdef CONFIG_SCSI_IPR_DUMP
+/**
+ * ipr_read_dump - Dump the adapter
+ * @filp: open sysfs file
+ * @kobj: kobject struct
+ * @bin_attr: bin_attribute struct
+ * @buf: buffer
+ * @off: offset
+ * @count: buffer size
+ *
+ * Return value:
+ * number of bytes printed to buffer
+ **/
+static ssize_t ipr_read_dump(struct file *filp, struct kobject *kobj,
+ struct bin_attribute *bin_attr,
+ char *buf, loff_t off, size_t count)
+{
+ struct device *cdev = container_of(kobj, struct device, kobj);
+ struct Scsi_Host *shost = class_to_shost(cdev);
+ struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
+ struct ipr_dump *dump;
+ unsigned long lock_flags = 0;
+ char *src;
+ int len, sdt_end;
+ size_t rc = count;
+
+ if (!capable(CAP_SYS_ADMIN))
+ return -EACCES;
+
+ spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
+ dump = ioa_cfg->dump;
+
+ if (ioa_cfg->sdt_state != DUMP_OBTAINED || !dump) {
+ spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
+ return 0;
+ }
+ kref_get(&dump->kref);
+ spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
+
+ if (off > dump->driver_dump.hdr.len) {
+ kref_put(&dump->kref, ipr_release_dump);
+ return 0;
+ }
+
+ if (off + count > dump->driver_dump.hdr.len) {
+ count = dump->driver_dump.hdr.len - off;
+ rc = count;
+ }
+
+ if (count && off < sizeof(dump->driver_dump)) {
+ if (off + count > sizeof(dump->driver_dump))
+ len = sizeof(dump->driver_dump) - off;
+ else
+ len = count;
+ src = (u8 *)&dump->driver_dump + off;
+ memcpy(buf, src, len);
+ buf += len;
+ off += len;
+ count -= len;
+ }
+
+ off -= sizeof(dump->driver_dump);
+
+ if (ioa_cfg->sis64)
+ sdt_end = offsetof(struct ipr_ioa_dump, sdt.entry) +
+ (be32_to_cpu(dump->ioa_dump.sdt.hdr.num_entries_used) *
+ sizeof(struct ipr_sdt_entry));
+ else
+ sdt_end = offsetof(struct ipr_ioa_dump, sdt.entry) +
+ (IPR_FMT2_NUM_SDT_ENTRIES * sizeof(struct ipr_sdt_entry));
+
+ if (count && off < sdt_end) {
+ if (off + count > sdt_end)
+ len = sdt_end - off;
+ else
+ len = count;
+ src = (u8 *)&dump->ioa_dump + off;
+ memcpy(buf, src, len);
+ buf += len;
+ off += len;
+ count -= len;
+ }
+
+ off -= sdt_end;
+
+ while (count) {
+ if ((off & PAGE_MASK) != ((off + count) & PAGE_MASK))
+ len = PAGE_ALIGN(off) - off;
+ else
+ len = count;
+ src = (u8 *)dump->ioa_dump.ioa_data[(off & PAGE_MASK) >> PAGE_SHIFT];
+ src += off & ~PAGE_MASK;
+ memcpy(buf, src, len);
+ buf += len;
+ off += len;
+ count -= len;
+ }
+
+ kref_put(&dump->kref, ipr_release_dump);
+ return rc;
+}
+
+/**
+ * ipr_alloc_dump - Prepare for adapter dump
+ * @ioa_cfg: ioa config struct
+ *
+ * Return value:
+ * 0 on success / other on failure
+ **/
+static int ipr_alloc_dump(struct ipr_ioa_cfg *ioa_cfg)
+{
+ struct ipr_dump *dump;
+ __be32 **ioa_data;
+ unsigned long lock_flags = 0;
+
+ dump = kzalloc(sizeof(struct ipr_dump), GFP_KERNEL);
+
+ if (!dump) {
+ ipr_err("Dump memory allocation failed\n");
+ return -ENOMEM;
+ }
+
+ if (ioa_cfg->sis64)
+ ioa_data = vmalloc(IPR_FMT3_MAX_NUM_DUMP_PAGES * sizeof(__be32 *));
+ else
+ ioa_data = vmalloc(IPR_FMT2_MAX_NUM_DUMP_PAGES * sizeof(__be32 *));
+
+ if (!ioa_data) {
+ ipr_err("Dump memory allocation failed\n");
+ kfree(dump);
+ return -ENOMEM;
+ }
+
+ dump->ioa_dump.ioa_data = ioa_data;
+
+ kref_init(&dump->kref);
+ dump->ioa_cfg = ioa_cfg;
+
+ spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
+
+ if (INACTIVE != ioa_cfg->sdt_state) {
+ spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
+ vfree(dump->ioa_dump.ioa_data);
+ kfree(dump);
+ return 0;
+ }
+
+ ioa_cfg->dump = dump;
+ ioa_cfg->sdt_state = WAIT_FOR_DUMP;
+ if (ioa_cfg->hrrq[IPR_INIT_HRRQ].ioa_is_dead && !ioa_cfg->dump_taken) {
+ ioa_cfg->dump_taken = 1;
+ schedule_work(&ioa_cfg->work_q);
+ }
+ spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
+
+ return 0;
+}
+
+/**
+ * ipr_free_dump - Free adapter dump memory
+ * @ioa_cfg: ioa config struct
+ *
+ * Return value:
+ * 0 on success / other on failure
+ **/
+static int ipr_free_dump(struct ipr_ioa_cfg *ioa_cfg)
+{
+ struct ipr_dump *dump;
+ unsigned long lock_flags = 0;
+
+ ENTER;
+
+ spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
+ dump = ioa_cfg->dump;
+ if (!dump) {
+ spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
+ return 0;
+ }
+
+ ioa_cfg->dump = NULL;
+ spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
+
+ kref_put(&dump->kref, ipr_release_dump);
+
+ LEAVE;
+ return 0;
+}
+
+/**
+ * ipr_write_dump - Setup dump state of adapter
+ * @filp: open sysfs file
+ * @kobj: kobject struct
+ * @bin_attr: bin_attribute struct
+ * @buf: buffer
+ * @off: offset
+ * @count: buffer size
+ *
+ * Return value:
+ * number of bytes printed to buffer
+ **/
+static ssize_t ipr_write_dump(struct file *filp, struct kobject *kobj,
+ struct bin_attribute *bin_attr,
+ char *buf, loff_t off, size_t count)
+{
+ struct device *cdev = container_of(kobj, struct device, kobj);
+ struct Scsi_Host *shost = class_to_shost(cdev);
+ struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
+ int rc;
+
+ if (!capable(CAP_SYS_ADMIN))
+ return -EACCES;
+
+ if (buf[0] == '1')
+ rc = ipr_alloc_dump(ioa_cfg);
+ else if (buf[0] == '0')
+ rc = ipr_free_dump(ioa_cfg);
+ else
+ return -EINVAL;
+
+ if (rc)
+ return rc;
+ else
+ return count;
+}
+
+static struct bin_attribute ipr_dump_attr = {
+ .attr = {
+ .name = "dump",
+ .mode = S_IRUSR | S_IWUSR,
+ },
+ .size = 0,
+ .read = ipr_read_dump,
+ .write = ipr_write_dump
+};
+#else
+static int ipr_free_dump(struct ipr_ioa_cfg *ioa_cfg) { return 0; };
+#endif
+
+/**
+ * ipr_change_queue_depth - Change the device's queue depth
+ * @sdev: scsi device struct
+ * @qdepth: depth to set
+ * @reason: calling context
+ *
+ * Return value:
+ * actual depth set
+ **/
+static int ipr_change_queue_depth(struct scsi_device *sdev, int qdepth)
+{
+ struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)sdev->host->hostdata;
+ struct ipr_resource_entry *res;
+ unsigned long lock_flags = 0;
+
+ spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
+ res = (struct ipr_resource_entry *)sdev->hostdata;
+
+ if (res && ipr_is_gata(res) && qdepth > IPR_MAX_CMD_PER_ATA_LUN)
+ qdepth = IPR_MAX_CMD_PER_ATA_LUN;
+ spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
+
+ scsi_change_queue_depth(sdev, qdepth);
+ return sdev->queue_depth;
+}
+
+/**
+ * ipr_show_adapter_handle - Show the adapter's resource handle for this device
+ * @dev: device struct
+ * @attr: device attribute structure
+ * @buf: buffer
+ *
+ * Return value:
+ * number of bytes printed to buffer
+ **/
+static ssize_t ipr_show_adapter_handle(struct device *dev, struct device_attribute *attr, char *buf)
+{
+ struct scsi_device *sdev = to_scsi_device(dev);
+ struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)sdev->host->hostdata;
+ struct ipr_resource_entry *res;
+ unsigned long lock_flags = 0;
+ ssize_t len = -ENXIO;
+
+ spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
+ res = (struct ipr_resource_entry *)sdev->hostdata;
+ if (res)
+ len = snprintf(buf, PAGE_SIZE, "%08X\n", res->res_handle);
+ spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
+ return len;
+}
+
+static struct device_attribute ipr_adapter_handle_attr = {
+ .attr = {
+ .name = "adapter_handle",
+ .mode = S_IRUSR,
+ },
+ .show = ipr_show_adapter_handle
+};
+
+/**
+ * ipr_show_resource_path - Show the resource path or the resource address for
+ * this device.
+ * @dev: device struct
+ * @attr: device attribute structure
+ * @buf: buffer
+ *
+ * Return value:
+ * number of bytes printed to buffer
+ **/
+static ssize_t ipr_show_resource_path(struct device *dev, struct device_attribute *attr, char *buf)
+{
+ struct scsi_device *sdev = to_scsi_device(dev);
+ struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)sdev->host->hostdata;
+ struct ipr_resource_entry *res;
+ unsigned long lock_flags = 0;
+ ssize_t len = -ENXIO;
+ char buffer[IPR_MAX_RES_PATH_LENGTH];
+
+ spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
+ res = (struct ipr_resource_entry *)sdev->hostdata;
+ if (res && ioa_cfg->sis64)
+ len = snprintf(buf, PAGE_SIZE, "%s\n",
+ __ipr_format_res_path(res->res_path, buffer,
+ sizeof(buffer)));
+ else if (res)
+ len = snprintf(buf, PAGE_SIZE, "%d:%d:%d:%d\n", ioa_cfg->host->host_no,
+ res->bus, res->target, res->lun);
+
+ spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
+ return len;
+}
+
+static struct device_attribute ipr_resource_path_attr = {
+ .attr = {
+ .name = "resource_path",
+ .mode = S_IRUGO,
+ },
+ .show = ipr_show_resource_path
+};
+
+/**
+ * ipr_show_device_id - Show the device_id for this device.
+ * @dev: device struct
+ * @attr: device attribute structure
+ * @buf: buffer
+ *
+ * Return value:
+ * number of bytes printed to buffer
+ **/
+static ssize_t ipr_show_device_id(struct device *dev, struct device_attribute *attr, char *buf)
+{
+ struct scsi_device *sdev = to_scsi_device(dev);
+ struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)sdev->host->hostdata;
+ struct ipr_resource_entry *res;
+ unsigned long lock_flags = 0;
+ ssize_t len = -ENXIO;
+
+ spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
+ res = (struct ipr_resource_entry *)sdev->hostdata;
+ if (res && ioa_cfg->sis64)
+ len = snprintf(buf, PAGE_SIZE, "0x%llx\n", res->dev_id);
+ else if (res)
+ len = snprintf(buf, PAGE_SIZE, "0x%llx\n", res->lun_wwn);
+
+ spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
+ return len;
+}
+
+static struct device_attribute ipr_device_id_attr = {
+ .attr = {
+ .name = "device_id",
+ .mode = S_IRUGO,
+ },
+ .show = ipr_show_device_id
+};
+
+/**
+ * ipr_show_resource_type - Show the resource type for this device.
+ * @dev: device struct
+ * @attr: device attribute structure
+ * @buf: buffer
+ *
+ * Return value:
+ * number of bytes printed to buffer
+ **/
+static ssize_t ipr_show_resource_type(struct device *dev, struct device_attribute *attr, char *buf)
+{
+ struct scsi_device *sdev = to_scsi_device(dev);
+ struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)sdev->host->hostdata;
+ struct ipr_resource_entry *res;
+ unsigned long lock_flags = 0;
+ ssize_t len = -ENXIO;
+
+ spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
+ res = (struct ipr_resource_entry *)sdev->hostdata;
+
+ if (res)
+ len = snprintf(buf, PAGE_SIZE, "%x\n", res->type);
+
+ spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
+ return len;
+}
+
+static struct device_attribute ipr_resource_type_attr = {
+ .attr = {
+ .name = "resource_type",
+ .mode = S_IRUGO,
+ },
+ .show = ipr_show_resource_type
+};
+
+/**
+ * ipr_show_raw_mode - Show the adapter's raw mode
+ * @dev: class device struct
+ * @buf: buffer
+ *
+ * Return value:
+ * number of bytes printed to buffer
+ **/
+static ssize_t ipr_show_raw_mode(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct scsi_device *sdev = to_scsi_device(dev);
+ struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)sdev->host->hostdata;
+ struct ipr_resource_entry *res;
+ unsigned long lock_flags = 0;
+ ssize_t len;
+
+ spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
+ res = (struct ipr_resource_entry *)sdev->hostdata;
+ if (res)
+ len = snprintf(buf, PAGE_SIZE, "%d\n", res->raw_mode);
+ else
+ len = -ENXIO;
+ spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
+ return len;
+}
+
+/**
+ * ipr_store_raw_mode - Change the adapter's raw mode
+ * @dev: class device struct
+ * @buf: buffer
+ *
+ * Return value:
+ * number of bytes printed to buffer
+ **/
+static ssize_t ipr_store_raw_mode(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct scsi_device *sdev = to_scsi_device(dev);
+ struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)sdev->host->hostdata;
+ struct ipr_resource_entry *res;
+ unsigned long lock_flags = 0;
+ ssize_t len;
+
+ spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
+ res = (struct ipr_resource_entry *)sdev->hostdata;
+ if (res) {
+ if (ioa_cfg->sis64 && ipr_is_af_dasd_device(res)) {
+ res->raw_mode = simple_strtoul(buf, NULL, 10);
+ len = strlen(buf);
+ if (res->sdev)
+ sdev_printk(KERN_INFO, res->sdev, "raw mode is %s\n",
+ res->raw_mode ? "enabled" : "disabled");
+ } else
+ len = -EINVAL;
+ } else
+ len = -ENXIO;
+ spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
+ return len;
+}
+
+static struct device_attribute ipr_raw_mode_attr = {
+ .attr = {
+ .name = "raw_mode",
+ .mode = S_IRUGO | S_IWUSR,
+ },
+ .show = ipr_show_raw_mode,
+ .store = ipr_store_raw_mode
+};
+
+static struct device_attribute *ipr_dev_attrs[] = {
+ &ipr_adapter_handle_attr,
+ &ipr_resource_path_attr,
+ &ipr_device_id_attr,
+ &ipr_resource_type_attr,
+ &ipr_raw_mode_attr,
+ NULL,
+};
+
+/**
+ * ipr_biosparam - Return the HSC mapping
+ * @sdev: scsi device struct
+ * @block_device: block device pointer
+ * @capacity: capacity of the device
+ * @parm: Array containing returned HSC values.
+ *
+ * This function generates the HSC parms that fdisk uses.
+ * We want to make sure we return something that places partitions
+ * on 4k boundaries for best performance with the IOA.
+ *
+ * Return value:
+ * 0 on success
+ **/
+static int ipr_biosparam(struct scsi_device *sdev,
+ struct block_device *block_device,
+ sector_t capacity, int *parm)
+{
+ int heads, sectors;
+ sector_t cylinders;
+
+ heads = 128;
+ sectors = 32;
+
+ cylinders = capacity;
+ sector_div(cylinders, (128 * 32));
+
+ /* return result */
+ parm[0] = heads;
+ parm[1] = sectors;
+ parm[2] = cylinders;
+
+ return 0;
+}
+
+/**
+ * ipr_find_starget - Find target based on bus/target.
+ * @starget: scsi target struct
+ *
+ * Return value:
+ * resource entry pointer if found / NULL if not found
+ **/
+static struct ipr_resource_entry *ipr_find_starget(struct scsi_target *starget)
+{
+ struct Scsi_Host *shost = dev_to_shost(&starget->dev);
+ struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *) shost->hostdata;
+ struct ipr_resource_entry *res;
+
+ list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
+ if ((res->bus == starget->channel) &&
+ (res->target == starget->id)) {
+ return res;
+ }
+ }
+
+ return NULL;
+}
+
+static struct ata_port_info sata_port_info;
+
+/**
+ * ipr_target_alloc - Prepare for commands to a SCSI target
+ * @starget: scsi target struct
+ *
+ * If the device is a SATA device, this function allocates an
+ * ATA port with libata, else it does nothing.
+ *
+ * Return value:
+ * 0 on success / non-0 on failure
+ **/
+static int ipr_target_alloc(struct scsi_target *starget)
+{
+ struct Scsi_Host *shost = dev_to_shost(&starget->dev);
+ struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *) shost->hostdata;
+ struct ipr_sata_port *sata_port;
+ struct ata_port *ap;
+ struct ipr_resource_entry *res;
+ unsigned long lock_flags;
+
+ spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
+ res = ipr_find_starget(starget);
+ starget->hostdata = NULL;
+
+ if (res && ipr_is_gata(res)) {
+ spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
+ sata_port = kzalloc(sizeof(*sata_port), GFP_KERNEL);
+ if (!sata_port)
+ return -ENOMEM;
+
+ ap = ata_sas_port_alloc(&ioa_cfg->ata_host, &sata_port_info, shost);
+ if (ap) {
+ spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
+ sata_port->ioa_cfg = ioa_cfg;
+ sata_port->ap = ap;
+ sata_port->res = res;
+
+ res->sata_port = sata_port;
+ ap->private_data = sata_port;
+ starget->hostdata = sata_port;
+ } else {
+ kfree(sata_port);
+ return -ENOMEM;
+ }
+ }
+ spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
+
+ return 0;
+}
+
+/**
+ * ipr_target_destroy - Destroy a SCSI target
+ * @starget: scsi target struct
+ *
+ * If the device was a SATA device, this function frees the libata
+ * ATA port, else it does nothing.
+ *
+ **/
+static void ipr_target_destroy(struct scsi_target *starget)
+{
+ struct ipr_sata_port *sata_port = starget->hostdata;
+ struct Scsi_Host *shost = dev_to_shost(&starget->dev);
+ struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *) shost->hostdata;
+
+ if (ioa_cfg->sis64) {
+ if (!ipr_find_starget(starget)) {
+ if (starget->channel == IPR_ARRAY_VIRTUAL_BUS)
+ clear_bit(starget->id, ioa_cfg->array_ids);
+ else if (starget->channel == IPR_VSET_VIRTUAL_BUS)
+ clear_bit(starget->id, ioa_cfg->vset_ids);
+ else if (starget->channel == 0)
+ clear_bit(starget->id, ioa_cfg->target_ids);
+ }
+ }
+
+ if (sata_port) {
+ starget->hostdata = NULL;
+ ata_sas_port_destroy(sata_port->ap);
+ kfree(sata_port);
+ }
+}
+
+/**
+ * ipr_find_sdev - Find device based on bus/target/lun.
+ * @sdev: scsi device struct
+ *
+ * Return value:
+ * resource entry pointer if found / NULL if not found
+ **/
+static struct ipr_resource_entry *ipr_find_sdev(struct scsi_device *sdev)
+{
+ struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *) sdev->host->hostdata;
+ struct ipr_resource_entry *res;
+
+ list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
+ if ((res->bus == sdev->channel) &&
+ (res->target == sdev->id) &&
+ (res->lun == sdev->lun))
+ return res;
+ }
+
+ return NULL;
+}
+
+/**
+ * ipr_slave_destroy - Unconfigure a SCSI device
+ * @sdev: scsi device struct
+ *
+ * Return value:
+ * nothing
+ **/
+static void ipr_slave_destroy(struct scsi_device *sdev)
+{
+ struct ipr_resource_entry *res;
+ struct ipr_ioa_cfg *ioa_cfg;
+ unsigned long lock_flags = 0;
+
+ ioa_cfg = (struct ipr_ioa_cfg *) sdev->host->hostdata;
+
+ spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
+ res = (struct ipr_resource_entry *) sdev->hostdata;
+ if (res) {
+ if (res->sata_port)
+ res->sata_port->ap->link.device[0].class = ATA_DEV_NONE;
+ sdev->hostdata = NULL;
+ res->sdev = NULL;
+ res->sata_port = NULL;
+ }
+ spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
+}
+
+/**
+ * ipr_slave_configure - Configure a SCSI device
+ * @sdev: scsi device struct
+ *
+ * This function configures the specified scsi device.
+ *
+ * Return value:
+ * 0 on success
+ **/
+static int ipr_slave_configure(struct scsi_device *sdev)
+{
+ struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *) sdev->host->hostdata;
+ struct ipr_resource_entry *res;
+ struct ata_port *ap = NULL;
+ unsigned long lock_flags = 0;
+ char buffer[IPR_MAX_RES_PATH_LENGTH];
+
+ spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
+ res = sdev->hostdata;
+ if (res) {
+ if (ipr_is_af_dasd_device(res))
+ sdev->type = TYPE_RAID;
+ if (ipr_is_af_dasd_device(res) || ipr_is_ioa_resource(res)) {
+ sdev->scsi_level = 4;
+ sdev->no_uld_attach = 1;
+ }
+ if (ipr_is_vset_device(res)) {
+ sdev->scsi_level = SCSI_SPC_3;
+ blk_queue_rq_timeout(sdev->request_queue,
+ IPR_VSET_RW_TIMEOUT);
+ blk_queue_max_hw_sectors(sdev->request_queue, IPR_VSET_MAX_SECTORS);
+ }
+ if (ipr_is_gata(res) && res->sata_port)
+ ap = res->sata_port->ap;
+ spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
+
+ if (ap) {
+ scsi_change_queue_depth(sdev, IPR_MAX_CMD_PER_ATA_LUN);
+ ata_sas_slave_configure(sdev, ap);
+ }
+
+ if (ioa_cfg->sis64)
+ sdev_printk(KERN_INFO, sdev, "Resource path: %s\n",
+ ipr_format_res_path(ioa_cfg,
+ res->res_path, buffer, sizeof(buffer)));
+ return 0;
+ }
+ spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
+ return 0;
+}
+
+/**
+ * ipr_ata_slave_alloc - Prepare for commands to a SATA device
+ * @sdev: scsi device struct
+ *
+ * This function initializes an ATA port so that future commands
+ * sent through queuecommand will work.
+ *
+ * Return value:
+ * 0 on success
+ **/
+static int ipr_ata_slave_alloc(struct scsi_device *sdev)
+{
+ struct ipr_sata_port *sata_port = NULL;
+ int rc = -ENXIO;
+
+ ENTER;
+ if (sdev->sdev_target)
+ sata_port = sdev->sdev_target->hostdata;
+ if (sata_port) {
+ rc = ata_sas_port_init(sata_port->ap);
+ if (rc == 0)
+ rc = ata_sas_sync_probe(sata_port->ap);
+ }
+
+ if (rc)
+ ipr_slave_destroy(sdev);
+
+ LEAVE;
+ return rc;
+}
+
+/**
+ * ipr_slave_alloc - Prepare for commands to a device.
+ * @sdev: scsi device struct
+ *
+ * This function saves a pointer to the resource entry
+ * in the scsi device struct if the device exists. We
+ * can then use this pointer in ipr_queuecommand when
+ * handling new commands.
+ *
+ * Return value:
+ * 0 on success / -ENXIO if device does not exist
+ **/
+static int ipr_slave_alloc(struct scsi_device *sdev)
+{
+ struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *) sdev->host->hostdata;
+ struct ipr_resource_entry *res;
+ unsigned long lock_flags;
+ int rc = -ENXIO;
+
+ sdev->hostdata = NULL;
+
+ spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
+
+ res = ipr_find_sdev(sdev);
+ if (res) {
+ res->sdev = sdev;
+ res->add_to_ml = 0;
+ res->in_erp = 0;
+ sdev->hostdata = res;
+ if (!ipr_is_naca_model(res))
+ res->needs_sync_complete = 1;
+ rc = 0;
+ if (ipr_is_gata(res)) {
+ spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
+ return ipr_ata_slave_alloc(sdev);
+ }
+ }
+
+ spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
+
+ return rc;
+}
+
+/**
+ * ipr_match_lun - Match function for specified LUN
+ * @ipr_cmd: ipr command struct
+ * @device: device to match (sdev)
+ *
+ * Returns:
+ * 1 if command matches sdev / 0 if command does not match sdev
+ **/
+static int ipr_match_lun(struct ipr_cmnd *ipr_cmd, void *device)
+{
+ if (ipr_cmd->scsi_cmd && ipr_cmd->scsi_cmd->device == device)
+ return 1;
+ return 0;
+}
+
+/**
+ * ipr_wait_for_ops - Wait for matching commands to complete
+ * @ipr_cmd: ipr command struct
+ * @device: device to match (sdev)
+ * @match: match function to use
+ *
+ * Returns:
+ * SUCCESS / FAILED
+ **/
+static int ipr_wait_for_ops(struct ipr_ioa_cfg *ioa_cfg, void *device,
+ int (*match)(struct ipr_cmnd *, void *))
+{
+ struct ipr_cmnd *ipr_cmd;
+ int wait;
+ unsigned long flags;
+ struct ipr_hrr_queue *hrrq;
+ signed long timeout = IPR_ABORT_TASK_TIMEOUT;
+ DECLARE_COMPLETION_ONSTACK(comp);
+
+ ENTER;
+ do {
+ wait = 0;
+
+ for_each_hrrq(hrrq, ioa_cfg) {
+ spin_lock_irqsave(hrrq->lock, flags);
+ list_for_each_entry(ipr_cmd, &hrrq->hrrq_pending_q, queue) {
+ if (match(ipr_cmd, device)) {
+ ipr_cmd->eh_comp = &comp;
+ wait++;
+ }
+ }
+ spin_unlock_irqrestore(hrrq->lock, flags);
+ }
+
+ if (wait) {
+ timeout = wait_for_completion_timeout(&comp, timeout);
+
+ if (!timeout) {
+ wait = 0;
+
+ for_each_hrrq(hrrq, ioa_cfg) {
+ spin_lock_irqsave(hrrq->lock, flags);
+ list_for_each_entry(ipr_cmd, &hrrq->hrrq_pending_q, queue) {
+ if (match(ipr_cmd, device)) {
+ ipr_cmd->eh_comp = NULL;
+ wait++;
+ }
+ }
+ spin_unlock_irqrestore(hrrq->lock, flags);
+ }
+
+ if (wait)
+ dev_err(&ioa_cfg->pdev->dev, "Timed out waiting for aborted commands\n");
+ LEAVE;
+ return wait ? FAILED : SUCCESS;
+ }
+ }
+ } while (wait);
+
+ LEAVE;
+ return SUCCESS;
+}
+
+static int ipr_eh_host_reset(struct scsi_cmnd *cmd)
+{
+ struct ipr_ioa_cfg *ioa_cfg;
+ unsigned long lock_flags = 0;
+ int rc = SUCCESS;
+
+ ENTER;
+ ioa_cfg = (struct ipr_ioa_cfg *) cmd->device->host->hostdata;
+ spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
+
+ if (!ioa_cfg->in_reset_reload && !ioa_cfg->hrrq[IPR_INIT_HRRQ].ioa_is_dead) {
+ ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_ABBREV);
+ dev_err(&ioa_cfg->pdev->dev,
+ "Adapter being reset as a result of error recovery.\n");
+
+ if (WAIT_FOR_DUMP == ioa_cfg->sdt_state)
+ ioa_cfg->sdt_state = GET_DUMP;
+ }
+
+ spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
+ wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
+ spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
+
+ /* If we got hit with a host reset while we were already resetting
+ the adapter for some reason, and the reset failed. */
+ if (ioa_cfg->hrrq[IPR_INIT_HRRQ].ioa_is_dead) {
+ ipr_trace;
+ rc = FAILED;
+ }
+
+ spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
+ LEAVE;
+ return rc;
+}
+
+/**
+ * ipr_device_reset - Reset the device
+ * @ioa_cfg: ioa config struct
+ * @res: resource entry struct
+ *
+ * This function issues a device reset to the affected device.
+ * If the device is a SCSI device, a LUN reset will be sent
+ * to the device first. If that does not work, a target reset
+ * will be sent. If the device is a SATA device, a PHY reset will
+ * be sent.
+ *
+ * Return value:
+ * 0 on success / non-zero on failure
+ **/
+static int ipr_device_reset(struct ipr_ioa_cfg *ioa_cfg,
+ struct ipr_resource_entry *res)
+{
+ struct ipr_cmnd *ipr_cmd;
+ struct ipr_ioarcb *ioarcb;
+ struct ipr_cmd_pkt *cmd_pkt;
+ struct ipr_ioarcb_ata_regs *regs;
+ u32 ioasc;
+
+ ENTER;
+ ipr_cmd = ipr_get_free_ipr_cmnd(ioa_cfg);
+ ioarcb = &ipr_cmd->ioarcb;
+ cmd_pkt = &ioarcb->cmd_pkt;
+
+ if (ipr_cmd->ioa_cfg->sis64) {
+ regs = &ipr_cmd->i.ata_ioadl.regs;
+ ioarcb->add_cmd_parms_offset = cpu_to_be16(sizeof(*ioarcb));
+ } else
+ regs = &ioarcb->u.add_data.u.regs;
+
+ ioarcb->res_handle = res->res_handle;
+ cmd_pkt->request_type = IPR_RQTYPE_IOACMD;
+ cmd_pkt->cdb[0] = IPR_RESET_DEVICE;
+ if (ipr_is_gata(res)) {
+ cmd_pkt->cdb[2] = IPR_ATA_PHY_RESET;
+ ioarcb->add_cmd_parms_len = cpu_to_be16(sizeof(regs->flags));
+ regs->flags |= IPR_ATA_FLAG_STATUS_ON_GOOD_COMPLETION;
+ }
+
+ ipr_send_blocking_cmd(ipr_cmd, ipr_timeout, IPR_DEVICE_RESET_TIMEOUT);
+ ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
+ list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
+ if (ipr_is_gata(res) && res->sata_port && ioasc != IPR_IOASC_IOA_WAS_RESET) {
+ if (ipr_cmd->ioa_cfg->sis64)
+ memcpy(&res->sata_port->ioasa, &ipr_cmd->s.ioasa64.u.gata,
+ sizeof(struct ipr_ioasa_gata));
+ else
+ memcpy(&res->sata_port->ioasa, &ipr_cmd->s.ioasa.u.gata,
+ sizeof(struct ipr_ioasa_gata));
+ }
+
+ LEAVE;
+ return IPR_IOASC_SENSE_KEY(ioasc) ? -EIO : 0;
+}
+
+/**
+ * ipr_sata_reset - Reset the SATA port
+ * @link: SATA link to reset
+ * @classes: class of the attached device
+ *
+ * This function issues a SATA phy reset to the affected ATA link.
+ *
+ * Return value:
+ * 0 on success / non-zero on failure
+ **/
+static int ipr_sata_reset(struct ata_link *link, unsigned int *classes,
+ unsigned long deadline)
+{
+ struct ipr_sata_port *sata_port = link->ap->private_data;
+ struct ipr_ioa_cfg *ioa_cfg = sata_port->ioa_cfg;
+ struct ipr_resource_entry *res;
+ unsigned long lock_flags = 0;
+ int rc = -ENXIO;
+
+ ENTER;
+ spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
+ while (ioa_cfg->in_reset_reload) {
+ spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
+ wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
+ spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
+ }
+
+ res = sata_port->res;
+ if (res) {
+ rc = ipr_device_reset(ioa_cfg, res);
+ *classes = res->ata_class;
+ }
+
+ spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
+ LEAVE;
+ return rc;
+}
+
+/**
+ * ipr_eh_dev_reset - Reset the device
+ * @scsi_cmd: scsi command struct
+ *
+ * This function issues a device reset to the affected device.
+ * A LUN reset will be sent to the device first. If that does
+ * not work, a target reset will be sent.
+ *
+ * Return value:
+ * SUCCESS / FAILED
+ **/
+static int __ipr_eh_dev_reset(struct scsi_cmnd *scsi_cmd)
+{
+ struct ipr_cmnd *ipr_cmd;
+ struct ipr_ioa_cfg *ioa_cfg;
+ struct ipr_resource_entry *res;
+ struct ata_port *ap;
+ int rc = 0;
+ struct ipr_hrr_queue *hrrq;
+
+ ENTER;
+ ioa_cfg = (struct ipr_ioa_cfg *) scsi_cmd->device->host->hostdata;
+ res = scsi_cmd->device->hostdata;
+
+ if (!res)
+ return FAILED;
+
+ /*
+ * If we are currently going through reset/reload, return failed. This will force the
+ * mid-layer to call ipr_eh_host_reset, which will then go to sleep and wait for the
+ * reset to complete
+ */
+ if (ioa_cfg->in_reset_reload)
+ return FAILED;
+ if (ioa_cfg->hrrq[IPR_INIT_HRRQ].ioa_is_dead)
+ return FAILED;
+
+ for_each_hrrq(hrrq, ioa_cfg) {
+ spin_lock(&hrrq->_lock);
+ list_for_each_entry(ipr_cmd, &hrrq->hrrq_pending_q, queue) {
+ if (ipr_cmd->ioarcb.res_handle == res->res_handle) {
+ if (ipr_cmd->scsi_cmd)
+ ipr_cmd->done = ipr_scsi_eh_done;
+ if (ipr_cmd->qc)
+ ipr_cmd->done = ipr_sata_eh_done;
+ if (ipr_cmd->qc &&
+ !(ipr_cmd->qc->flags & ATA_QCFLAG_FAILED)) {
+ ipr_cmd->qc->err_mask |= AC_ERR_TIMEOUT;
+ ipr_cmd->qc->flags |= ATA_QCFLAG_FAILED;
+ }
+ }
+ }
+ spin_unlock(&hrrq->_lock);
+ }
+ res->resetting_device = 1;
+ scmd_printk(KERN_ERR, scsi_cmd, "Resetting device\n");
+
+ if (ipr_is_gata(res) && res->sata_port) {
+ ap = res->sata_port->ap;
+ spin_unlock_irq(scsi_cmd->device->host->host_lock);
+ ata_std_error_handler(ap);
+ spin_lock_irq(scsi_cmd->device->host->host_lock);
+
+ for_each_hrrq(hrrq, ioa_cfg) {
+ spin_lock(&hrrq->_lock);
+ list_for_each_entry(ipr_cmd,
+ &hrrq->hrrq_pending_q, queue) {
+ if (ipr_cmd->ioarcb.res_handle ==
+ res->res_handle) {
+ rc = -EIO;
+ break;
+ }
+ }
+ spin_unlock(&hrrq->_lock);
+ }
+ } else
+ rc = ipr_device_reset(ioa_cfg, res);
+ res->resetting_device = 0;
+ res->reset_occurred = 1;
+
+ LEAVE;
+ return rc ? FAILED : SUCCESS;
+}
+
+static int ipr_eh_dev_reset(struct scsi_cmnd *cmd)
+{
+ int rc;
+ struct ipr_ioa_cfg *ioa_cfg;
+
+ ioa_cfg = (struct ipr_ioa_cfg *) cmd->device->host->hostdata;
+
+ spin_lock_irq(cmd->device->host->host_lock);
+ rc = __ipr_eh_dev_reset(cmd);
+ spin_unlock_irq(cmd->device->host->host_lock);
+
+ if (rc == SUCCESS)
+ rc = ipr_wait_for_ops(ioa_cfg, cmd->device, ipr_match_lun);
+
+ return rc;
+}
+
+/**
+ * ipr_bus_reset_done - Op done function for bus reset.
+ * @ipr_cmd: ipr command struct
+ *
+ * This function is the op done function for a bus reset
+ *
+ * Return value:
+ * none
+ **/
+static void ipr_bus_reset_done(struct ipr_cmnd *ipr_cmd)
+{
+ struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
+ struct ipr_resource_entry *res;
+
+ ENTER;
+ if (!ioa_cfg->sis64)
+ list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
+ if (res->res_handle == ipr_cmd->ioarcb.res_handle) {
+ scsi_report_bus_reset(ioa_cfg->host, res->bus);
+ break;
+ }
+ }
+
+ /*
+ * If abort has not completed, indicate the reset has, else call the
+ * abort's done function to wake the sleeping eh thread
+ */
+ if (ipr_cmd->sibling->sibling)
+ ipr_cmd->sibling->sibling = NULL;
+ else
+ ipr_cmd->sibling->done(ipr_cmd->sibling);
+
+ list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
+ LEAVE;
+}
+
+/**
+ * ipr_abort_timeout - An abort task has timed out
+ * @ipr_cmd: ipr command struct
+ *
+ * This function handles when an abort task times out. If this
+ * happens we issue a bus reset since we have resources tied
+ * up that must be freed before returning to the midlayer.
+ *
+ * Return value:
+ * none
+ **/
+static void ipr_abort_timeout(struct ipr_cmnd *ipr_cmd)
+{
+ struct ipr_cmnd *reset_cmd;
+ struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
+ struct ipr_cmd_pkt *cmd_pkt;
+ unsigned long lock_flags = 0;
+
+ ENTER;
+ spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
+ if (ipr_cmd->completion.done || ioa_cfg->in_reset_reload) {
+ spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
+ return;
+ }
+
+ sdev_printk(KERN_ERR, ipr_cmd->u.sdev, "Abort timed out. Resetting bus.\n");
+ reset_cmd = ipr_get_free_ipr_cmnd(ioa_cfg);
+ ipr_cmd->sibling = reset_cmd;
+ reset_cmd->sibling = ipr_cmd;
+ reset_cmd->ioarcb.res_handle = ipr_cmd->ioarcb.res_handle;
+ cmd_pkt = &reset_cmd->ioarcb.cmd_pkt;
+ cmd_pkt->request_type = IPR_RQTYPE_IOACMD;
+ cmd_pkt->cdb[0] = IPR_RESET_DEVICE;
+ cmd_pkt->cdb[2] = IPR_RESET_TYPE_SELECT | IPR_BUS_RESET;
+
+ ipr_do_req(reset_cmd, ipr_bus_reset_done, ipr_timeout, IPR_DEVICE_RESET_TIMEOUT);
+ spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
+ LEAVE;
+}
+
+/**
+ * ipr_cancel_op - Cancel specified op
+ * @scsi_cmd: scsi command struct
+ *
+ * This function cancels specified op.
+ *
+ * Return value:
+ * SUCCESS / FAILED
+ **/
+static int ipr_cancel_op(struct scsi_cmnd *scsi_cmd)
+{
+ struct ipr_cmnd *ipr_cmd;
+ struct ipr_ioa_cfg *ioa_cfg;
+ struct ipr_resource_entry *res;
+ struct ipr_cmd_pkt *cmd_pkt;
+ u32 ioasc, int_reg;
+ int op_found = 0;
+ struct ipr_hrr_queue *hrrq;
+
+ ENTER;
+ ioa_cfg = (struct ipr_ioa_cfg *)scsi_cmd->device->host->hostdata;
+ res = scsi_cmd->device->hostdata;
+
+ /* If we are currently going through reset/reload, return failed.
+ * This will force the mid-layer to call ipr_eh_host_reset,
+ * which will then go to sleep and wait for the reset to complete
+ */
+ if (ioa_cfg->in_reset_reload ||
+ ioa_cfg->hrrq[IPR_INIT_HRRQ].ioa_is_dead)
+ return FAILED;
+ if (!res)
+ return FAILED;
+
+ /*
+ * If we are aborting a timed out op, chances are that the timeout was caused
+ * by a still not detected EEH error. In such cases, reading a register will
+ * trigger the EEH recovery infrastructure.
+ */
+ int_reg = readl(ioa_cfg->regs.sense_interrupt_reg);
+
+ if (!ipr_is_gscsi(res))
+ return FAILED;
+
+ for_each_hrrq(hrrq, ioa_cfg) {
+ spin_lock(&hrrq->_lock);
+ list_for_each_entry(ipr_cmd, &hrrq->hrrq_pending_q, queue) {
+ if (ipr_cmd->scsi_cmd == scsi_cmd) {
+ ipr_cmd->done = ipr_scsi_eh_done;
+ op_found = 1;
+ break;
+ }
+ }
+ spin_unlock(&hrrq->_lock);
+ }
+
+ if (!op_found)
+ return SUCCESS;
+
+ ipr_cmd = ipr_get_free_ipr_cmnd(ioa_cfg);
+ ipr_cmd->ioarcb.res_handle = res->res_handle;
+ cmd_pkt = &ipr_cmd->ioarcb.cmd_pkt;
+ cmd_pkt->request_type = IPR_RQTYPE_IOACMD;
+ cmd_pkt->cdb[0] = IPR_CANCEL_ALL_REQUESTS;
+ ipr_cmd->u.sdev = scsi_cmd->device;
+
+ scmd_printk(KERN_ERR, scsi_cmd, "Aborting command: %02X\n",
+ scsi_cmd->cmnd[0]);
+ ipr_send_blocking_cmd(ipr_cmd, ipr_abort_timeout, IPR_CANCEL_ALL_TIMEOUT);
+ ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
+
+ /*
+ * If the abort task timed out and we sent a bus reset, we will get
+ * one the following responses to the abort
+ */
+ if (ioasc == IPR_IOASC_BUS_WAS_RESET || ioasc == IPR_IOASC_SYNC_REQUIRED) {
+ ioasc = 0;
+ ipr_trace;
+ }
+
+ list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
+ if (!ipr_is_naca_model(res))
+ res->needs_sync_complete = 1;
+
+ LEAVE;
+ return IPR_IOASC_SENSE_KEY(ioasc) ? FAILED : SUCCESS;
+}
+
+/**
+ * ipr_eh_abort - Abort a single op
+ * @scsi_cmd: scsi command struct
+ *
+ * Return value:
+ * 0 if scan in progress / 1 if scan is complete
+ **/
+static int ipr_scan_finished(struct Scsi_Host *shost, unsigned long elapsed_time)
+{
+ unsigned long lock_flags;
+ struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *) shost->hostdata;
+ int rc = 0;
+
+ spin_lock_irqsave(shost->host_lock, lock_flags);
+ if (ioa_cfg->hrrq[IPR_INIT_HRRQ].ioa_is_dead || ioa_cfg->scan_done)
+ rc = 1;
+ if ((elapsed_time/HZ) > (ioa_cfg->transop_timeout * 2))
+ rc = 1;
+ spin_unlock_irqrestore(shost->host_lock, lock_flags);
+ return rc;
+}
+
+/**
+ * ipr_eh_host_reset - Reset the host adapter
+ * @scsi_cmd: scsi command struct
+ *
+ * Return value:
+ * SUCCESS / FAILED
+ **/
+static int ipr_eh_abort(struct scsi_cmnd *scsi_cmd)
+{
+ unsigned long flags;
+ int rc;
+ struct ipr_ioa_cfg *ioa_cfg;
+
+ ENTER;
+
+ ioa_cfg = (struct ipr_ioa_cfg *) scsi_cmd->device->host->hostdata;
+
+ spin_lock_irqsave(scsi_cmd->device->host->host_lock, flags);
+ rc = ipr_cancel_op(scsi_cmd);
+ spin_unlock_irqrestore(scsi_cmd->device->host->host_lock, flags);
+
+ if (rc == SUCCESS)
+ rc = ipr_wait_for_ops(ioa_cfg, scsi_cmd->device, ipr_match_lun);
+ LEAVE;
+ return rc;
+}
+
+/**
+ * ipr_handle_other_interrupt - Handle "other" interrupts
+ * @ioa_cfg: ioa config struct
+ * @int_reg: interrupt register
+ *
+ * Return value:
+ * IRQ_NONE / IRQ_HANDLED
+ **/
+static irqreturn_t ipr_handle_other_interrupt(struct ipr_ioa_cfg *ioa_cfg,
+ u32 int_reg)
+{
+ irqreturn_t rc = IRQ_HANDLED;
+ u32 int_mask_reg;
+
+ int_mask_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg32);
+ int_reg &= ~int_mask_reg;
+
+ /* If an interrupt on the adapter did not occur, ignore it.
+ * Or in the case of SIS 64, check for a stage change interrupt.
+ */
+ if ((int_reg & IPR_PCII_OPER_INTERRUPTS) == 0) {
+ if (ioa_cfg->sis64) {
+ int_mask_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg);
+ int_reg = readl(ioa_cfg->regs.sense_interrupt_reg) & ~int_mask_reg;
+ if (int_reg & IPR_PCII_IPL_STAGE_CHANGE) {
+
+ /* clear stage change */
+ writel(IPR_PCII_IPL_STAGE_CHANGE, ioa_cfg->regs.clr_interrupt_reg);
+ int_reg = readl(ioa_cfg->regs.sense_interrupt_reg) & ~int_mask_reg;
+ list_del(&ioa_cfg->reset_cmd->queue);
+ del_timer(&ioa_cfg->reset_cmd->timer);
+ ipr_reset_ioa_job(ioa_cfg->reset_cmd);
+ return IRQ_HANDLED;
+ }
+ }
+
+ return IRQ_NONE;
+ }
+
+ if (int_reg & IPR_PCII_IOA_TRANS_TO_OPER) {
+ /* Mask the interrupt */
+ writel(IPR_PCII_IOA_TRANS_TO_OPER, ioa_cfg->regs.set_interrupt_mask_reg);
+ int_reg = readl(ioa_cfg->regs.sense_interrupt_reg);
+
+ list_del(&ioa_cfg->reset_cmd->queue);
+ del_timer(&ioa_cfg->reset_cmd->timer);
+ ipr_reset_ioa_job(ioa_cfg->reset_cmd);
+ } else if ((int_reg & IPR_PCII_HRRQ_UPDATED) == int_reg) {
+ if (ioa_cfg->clear_isr) {
+ if (ipr_debug && printk_ratelimit())
+ dev_err(&ioa_cfg->pdev->dev,
+ "Spurious interrupt detected. 0x%08X\n", int_reg);
+ writel(IPR_PCII_HRRQ_UPDATED, ioa_cfg->regs.clr_interrupt_reg32);
+ int_reg = readl(ioa_cfg->regs.sense_interrupt_reg32);
+ return IRQ_NONE;
+ }
+ } else {
+ if (int_reg & IPR_PCII_IOA_UNIT_CHECKED)
+ ioa_cfg->ioa_unit_checked = 1;
+ else if (int_reg & IPR_PCII_NO_HOST_RRQ)
+ dev_err(&ioa_cfg->pdev->dev,
+ "No Host RRQ. 0x%08X\n", int_reg);
+ else
+ dev_err(&ioa_cfg->pdev->dev,
+ "Permanent IOA failure. 0x%08X\n", int_reg);
+
+ if (WAIT_FOR_DUMP == ioa_cfg->sdt_state)
+ ioa_cfg->sdt_state = GET_DUMP;
+
+ ipr_mask_and_clear_interrupts(ioa_cfg, ~0);
+ ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
+ }
+
+ return rc;
+}
+
+/**
+ * ipr_isr_eh - Interrupt service routine error handler
+ * @ioa_cfg: ioa config struct
+ * @msg: message to log
+ *
+ * Return value:
+ * none
+ **/
+static void ipr_isr_eh(struct ipr_ioa_cfg *ioa_cfg, char *msg, u16 number)
+{
+ ioa_cfg->errors_logged++;
+ dev_err(&ioa_cfg->pdev->dev, "%s %d\n", msg, number);
+
+ if (WAIT_FOR_DUMP == ioa_cfg->sdt_state)
+ ioa_cfg->sdt_state = GET_DUMP;
+
+ ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
+}
+
+static int ipr_process_hrrq(struct ipr_hrr_queue *hrr_queue, int budget,
+ struct list_head *doneq)
+{
+ u32 ioasc;
+ u16 cmd_index;
+ struct ipr_cmnd *ipr_cmd;
+ struct ipr_ioa_cfg *ioa_cfg = hrr_queue->ioa_cfg;
+ int num_hrrq = 0;
+
+ /* If interrupts are disabled, ignore the interrupt */
+ if (!hrr_queue->allow_interrupts)
+ return 0;
+
+ while ((be32_to_cpu(*hrr_queue->hrrq_curr) & IPR_HRRQ_TOGGLE_BIT) ==
+ hrr_queue->toggle_bit) {
+
+ cmd_index = (be32_to_cpu(*hrr_queue->hrrq_curr) &
+ IPR_HRRQ_REQ_RESP_HANDLE_MASK) >>
+ IPR_HRRQ_REQ_RESP_HANDLE_SHIFT;
+
+ if (unlikely(cmd_index > hrr_queue->max_cmd_id ||
+ cmd_index < hrr_queue->min_cmd_id)) {
+ ipr_isr_eh(ioa_cfg,
+ "Invalid response handle from IOA: ",
+ cmd_index);
+ break;
+ }
+
+ ipr_cmd = ioa_cfg->ipr_cmnd_list[cmd_index];
+ ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
+
+ ipr_trc_hook(ipr_cmd, IPR_TRACE_FINISH, ioasc);
+
+ list_move_tail(&ipr_cmd->queue, doneq);
+
+ if (hrr_queue->hrrq_curr < hrr_queue->hrrq_end) {
+ hrr_queue->hrrq_curr++;
+ } else {
+ hrr_queue->hrrq_curr = hrr_queue->hrrq_start;
+ hrr_queue->toggle_bit ^= 1u;
+ }
+ num_hrrq++;
+ if (budget > 0 && num_hrrq >= budget)
+ break;
+ }
+
+ return num_hrrq;
+}
+
+static int ipr_iopoll(struct blk_iopoll *iop, int budget)
+{
+ struct ipr_ioa_cfg *ioa_cfg;
+ struct ipr_hrr_queue *hrrq;
+ struct ipr_cmnd *ipr_cmd, *temp;
+ unsigned long hrrq_flags;
+ int completed_ops;
+ LIST_HEAD(doneq);
+
+ hrrq = container_of(iop, struct ipr_hrr_queue, iopoll);
+ ioa_cfg = hrrq->ioa_cfg;
+
+ spin_lock_irqsave(hrrq->lock, hrrq_flags);
+ completed_ops = ipr_process_hrrq(hrrq, budget, &doneq);
+
+ if (completed_ops < budget)
+ blk_iopoll_complete(iop);
+ spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
+
+ list_for_each_entry_safe(ipr_cmd, temp, &doneq, queue) {
+ list_del(&ipr_cmd->queue);
+ del_timer(&ipr_cmd->timer);
+ ipr_cmd->fast_done(ipr_cmd);
+ }
+
+ return completed_ops;
+}
+
+/**
+ * ipr_isr - Interrupt service routine
+ * @irq: irq number
+ * @devp: pointer to ioa config struct
+ *
+ * Return value:
+ * IRQ_NONE / IRQ_HANDLED
+ **/
+static irqreturn_t ipr_isr(int irq, void *devp)
+{
+ struct ipr_hrr_queue *hrrq = (struct ipr_hrr_queue *)devp;
+ struct ipr_ioa_cfg *ioa_cfg = hrrq->ioa_cfg;
+ unsigned long hrrq_flags = 0;
+ u32 int_reg = 0;
+ int num_hrrq = 0;
+ int irq_none = 0;
+ struct ipr_cmnd *ipr_cmd, *temp;
+ irqreturn_t rc = IRQ_NONE;
+ LIST_HEAD(doneq);
+
+ spin_lock_irqsave(hrrq->lock, hrrq_flags);
+ /* If interrupts are disabled, ignore the interrupt */
+ if (!hrrq->allow_interrupts) {
+ spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
+ return IRQ_NONE;
+ }
+
+ while (1) {
+ if (ipr_process_hrrq(hrrq, -1, &doneq)) {
+ rc = IRQ_HANDLED;
+
+ if (!ioa_cfg->clear_isr)
+ break;
+
+ /* Clear the PCI interrupt */
+ num_hrrq = 0;
+ do {
+ writel(IPR_PCII_HRRQ_UPDATED,
+ ioa_cfg->regs.clr_interrupt_reg32);
+ int_reg = readl(ioa_cfg->regs.sense_interrupt_reg32);
+ } while (int_reg & IPR_PCII_HRRQ_UPDATED &&
+ num_hrrq++ < IPR_MAX_HRRQ_RETRIES);
+
+ } else if (rc == IRQ_NONE && irq_none == 0) {
+ int_reg = readl(ioa_cfg->regs.sense_interrupt_reg32);
+ irq_none++;
+ } else if (num_hrrq == IPR_MAX_HRRQ_RETRIES &&
+ int_reg & IPR_PCII_HRRQ_UPDATED) {
+ ipr_isr_eh(ioa_cfg,
+ "Error clearing HRRQ: ", num_hrrq);
+ rc = IRQ_HANDLED;
+ break;
+ } else
+ break;
+ }
+
+ if (unlikely(rc == IRQ_NONE))
+ rc = ipr_handle_other_interrupt(ioa_cfg, int_reg);
+
+ spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
+ list_for_each_entry_safe(ipr_cmd, temp, &doneq, queue) {
+ list_del(&ipr_cmd->queue);
+ del_timer(&ipr_cmd->timer);
+ ipr_cmd->fast_done(ipr_cmd);
+ }
+ return rc;
+}
+
+/**
+ * ipr_isr_mhrrq - Interrupt service routine
+ * @irq: irq number
+ * @devp: pointer to ioa config struct
+ *
+ * Return value:
+ * IRQ_NONE / IRQ_HANDLED
+ **/
+static irqreturn_t ipr_isr_mhrrq(int irq, void *devp)
+{
+ struct ipr_hrr_queue *hrrq = (struct ipr_hrr_queue *)devp;
+ struct ipr_ioa_cfg *ioa_cfg = hrrq->ioa_cfg;
+ unsigned long hrrq_flags = 0;
+ struct ipr_cmnd *ipr_cmd, *temp;
+ irqreturn_t rc = IRQ_NONE;
+ LIST_HEAD(doneq);
+
+ spin_lock_irqsave(hrrq->lock, hrrq_flags);
+
+ /* If interrupts are disabled, ignore the interrupt */
+ if (!hrrq->allow_interrupts) {
+ spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
+ return IRQ_NONE;
+ }
+
+ if (ioa_cfg->iopoll_weight && ioa_cfg->sis64 && ioa_cfg->nvectors > 1) {
+ if ((be32_to_cpu(*hrrq->hrrq_curr) & IPR_HRRQ_TOGGLE_BIT) ==
+ hrrq->toggle_bit) {
+ if (!blk_iopoll_sched_prep(&hrrq->iopoll))
+ blk_iopoll_sched(&hrrq->iopoll);
+ spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
+ return IRQ_HANDLED;
+ }
+ } else {
+ if ((be32_to_cpu(*hrrq->hrrq_curr) & IPR_HRRQ_TOGGLE_BIT) ==
+ hrrq->toggle_bit)
+
+ if (ipr_process_hrrq(hrrq, -1, &doneq))
+ rc = IRQ_HANDLED;
+ }
+
+ spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
+
+ list_for_each_entry_safe(ipr_cmd, temp, &doneq, queue) {
+ list_del(&ipr_cmd->queue);
+ del_timer(&ipr_cmd->timer);
+ ipr_cmd->fast_done(ipr_cmd);
+ }
+ return rc;
+}
+
+/**
+ * ipr_build_ioadl64 - Build a scatter/gather list and map the buffer
+ * @ioa_cfg: ioa config struct
+ * @ipr_cmd: ipr command struct
+ *
+ * Return value:
+ * 0 on success / -1 on failure
+ **/
+static int ipr_build_ioadl64(struct ipr_ioa_cfg *ioa_cfg,
+ struct ipr_cmnd *ipr_cmd)
+{
+ int i, nseg;
+ struct scatterlist *sg;
+ u32 length;
+ u32 ioadl_flags = 0;
+ struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
+ struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
+ struct ipr_ioadl64_desc *ioadl64 = ipr_cmd->i.ioadl64;
+
+ length = scsi_bufflen(scsi_cmd);
+ if (!length)
+ return 0;
+
+ nseg = scsi_dma_map(scsi_cmd);
+ if (nseg < 0) {
+ if (printk_ratelimit())
+ dev_err(&ioa_cfg->pdev->dev, "scsi_dma_map failed!\n");
+ return -1;
+ }
+
+ ipr_cmd->dma_use_sg = nseg;
+
+ ioarcb->data_transfer_length = cpu_to_be32(length);
+ ioarcb->ioadl_len =
+ cpu_to_be32(sizeof(struct ipr_ioadl64_desc) * ipr_cmd->dma_use_sg);
+
+ if (scsi_cmd->sc_data_direction == DMA_TO_DEVICE) {
+ ioadl_flags = IPR_IOADL_FLAGS_WRITE;
+ ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
+ } else if (scsi_cmd->sc_data_direction == DMA_FROM_DEVICE)
+ ioadl_flags = IPR_IOADL_FLAGS_READ;
+
+ scsi_for_each_sg(scsi_cmd, sg, ipr_cmd->dma_use_sg, i) {
+ ioadl64[i].flags = cpu_to_be32(ioadl_flags);
+ ioadl64[i].data_len = cpu_to_be32(sg_dma_len(sg));
+ ioadl64[i].address = cpu_to_be64(sg_dma_address(sg));
+ }
+
+ ioadl64[i-1].flags |= cpu_to_be32(IPR_IOADL_FLAGS_LAST);
+ return 0;
+}
+
+/**
+ * ipr_build_ioadl - Build a scatter/gather list and map the buffer
+ * @ioa_cfg: ioa config struct
+ * @ipr_cmd: ipr command struct
+ *
+ * Return value:
+ * 0 on success / -1 on failure
+ **/
+static int ipr_build_ioadl(struct ipr_ioa_cfg *ioa_cfg,
+ struct ipr_cmnd *ipr_cmd)
+{
+ int i, nseg;
+ struct scatterlist *sg;
+ u32 length;
+ u32 ioadl_flags = 0;
+ struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
+ struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
+ struct ipr_ioadl_desc *ioadl = ipr_cmd->i.ioadl;
+
+ length = scsi_bufflen(scsi_cmd);
+ if (!length)
+ return 0;
+
+ nseg = scsi_dma_map(scsi_cmd);
+ if (nseg < 0) {
+ dev_err(&ioa_cfg->pdev->dev, "scsi_dma_map failed!\n");
+ return -1;
+ }
+
+ ipr_cmd->dma_use_sg = nseg;
+
+ if (scsi_cmd->sc_data_direction == DMA_TO_DEVICE) {
+ ioadl_flags = IPR_IOADL_FLAGS_WRITE;
+ ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
+ ioarcb->data_transfer_length = cpu_to_be32(length);
+ ioarcb->ioadl_len =
+ cpu_to_be32(sizeof(struct ipr_ioadl_desc) * ipr_cmd->dma_use_sg);
+ } else if (scsi_cmd->sc_data_direction == DMA_FROM_DEVICE) {
+ ioadl_flags = IPR_IOADL_FLAGS_READ;
+ ioarcb->read_data_transfer_length = cpu_to_be32(length);
+ ioarcb->read_ioadl_len =
+ cpu_to_be32(sizeof(struct ipr_ioadl_desc) * ipr_cmd->dma_use_sg);
+ }
+
+ if (ipr_cmd->dma_use_sg <= ARRAY_SIZE(ioarcb->u.add_data.u.ioadl)) {
+ ioadl = ioarcb->u.add_data.u.ioadl;
+ ioarcb->write_ioadl_addr = cpu_to_be32((ipr_cmd->dma_addr) +
+ offsetof(struct ipr_ioarcb, u.add_data));
+ ioarcb->read_ioadl_addr = ioarcb->write_ioadl_addr;
+ }
+
+ scsi_for_each_sg(scsi_cmd, sg, ipr_cmd->dma_use_sg, i) {
+ ioadl[i].flags_and_data_len =
+ cpu_to_be32(ioadl_flags | sg_dma_len(sg));
+ ioadl[i].address = cpu_to_be32(sg_dma_address(sg));
+ }
+
+ ioadl[i-1].flags_and_data_len |= cpu_to_be32(IPR_IOADL_FLAGS_LAST);
+ return 0;
+}
+
+/**
+ * ipr_erp_done - Process completion of ERP for a device
+ * @ipr_cmd: ipr command struct
+ *
+ * This function copies the sense buffer into the scsi_cmd
+ * struct and pushes the scsi_done function.
+ *
+ * Return value:
+ * nothing
+ **/
+static void ipr_erp_done(struct ipr_cmnd *ipr_cmd)
+{
+ struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
+ struct ipr_resource_entry *res = scsi_cmd->device->hostdata;
+ u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
+
+ if (IPR_IOASC_SENSE_KEY(ioasc) > 0) {
+ scsi_cmd->result |= (DID_ERROR << 16);
+ scmd_printk(KERN_ERR, scsi_cmd,
+ "Request Sense failed with IOASC: 0x%08X\n", ioasc);
+ } else {
+ memcpy(scsi_cmd->sense_buffer, ipr_cmd->sense_buffer,
+ SCSI_SENSE_BUFFERSIZE);
+ }
+
+ if (res) {
+ if (!ipr_is_naca_model(res))
+ res->needs_sync_complete = 1;
+ res->in_erp = 0;
+ }
+ scsi_dma_unmap(ipr_cmd->scsi_cmd);
+ list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
+ scsi_cmd->scsi_done(scsi_cmd);
+}
+
+/**
+ * ipr_reinit_ipr_cmnd_for_erp - Re-initialize a cmnd block to be used for ERP
+ * @ipr_cmd: ipr command struct
+ *
+ * Return value:
+ * none
+ **/
+static void ipr_reinit_ipr_cmnd_for_erp(struct ipr_cmnd *ipr_cmd)
+{
+ struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
+ struct ipr_ioasa *ioasa = &ipr_cmd->s.ioasa;
+ dma_addr_t dma_addr = ipr_cmd->dma_addr;
+
+ memset(&ioarcb->cmd_pkt, 0, sizeof(struct ipr_cmd_pkt));
+ ioarcb->data_transfer_length = 0;
+ ioarcb->read_data_transfer_length = 0;
+ ioarcb->ioadl_len = 0;
+ ioarcb->read_ioadl_len = 0;
+ ioasa->hdr.ioasc = 0;
+ ioasa->hdr.residual_data_len = 0;
+
+ if (ipr_cmd->ioa_cfg->sis64)
+ ioarcb->u.sis64_addr_data.data_ioadl_addr =
+ cpu_to_be64(dma_addr + offsetof(struct ipr_cmnd, i.ioadl64));
+ else {
+ ioarcb->write_ioadl_addr =
+ cpu_to_be32(dma_addr + offsetof(struct ipr_cmnd, i.ioadl));
+ ioarcb->read_ioadl_addr = ioarcb->write_ioadl_addr;
+ }
+}
+
+/**
+ * ipr_erp_request_sense - Send request sense to a device
+ * @ipr_cmd: ipr command struct
+ *
+ * This function sends a request sense to a device as a result
+ * of a check condition.
+ *
+ * Return value:
+ * nothing
+ **/
+static void ipr_erp_request_sense(struct ipr_cmnd *ipr_cmd)
+{
+ struct ipr_cmd_pkt *cmd_pkt = &ipr_cmd->ioarcb.cmd_pkt;
+ u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
+
+ if (IPR_IOASC_SENSE_KEY(ioasc) > 0) {
+ ipr_erp_done(ipr_cmd);
+ return;
+ }
+
+ ipr_reinit_ipr_cmnd_for_erp(ipr_cmd);
+
+ cmd_pkt->request_type = IPR_RQTYPE_SCSICDB;
+ cmd_pkt->cdb[0] = REQUEST_SENSE;
+ cmd_pkt->cdb[4] = SCSI_SENSE_BUFFERSIZE;
+ cmd_pkt->flags_hi |= IPR_FLAGS_HI_SYNC_OVERRIDE;
+ cmd_pkt->flags_hi |= IPR_FLAGS_HI_NO_ULEN_CHK;
+ cmd_pkt->timeout = cpu_to_be16(IPR_REQUEST_SENSE_TIMEOUT / HZ);
+
+ ipr_init_ioadl(ipr_cmd, ipr_cmd->sense_buffer_dma,
+ SCSI_SENSE_BUFFERSIZE, IPR_IOADL_FLAGS_READ_LAST);
+
+ ipr_do_req(ipr_cmd, ipr_erp_done, ipr_timeout,
+ IPR_REQUEST_SENSE_TIMEOUT * 2);
+}
+
+/**
+ * ipr_erp_cancel_all - Send cancel all to a device
+ * @ipr_cmd: ipr command struct
+ *
+ * This function sends a cancel all to a device to clear the
+ * queue. If we are running TCQ on the device, QERR is set to 1,
+ * which means all outstanding ops have been dropped on the floor.
+ * Cancel all will return them to us.
+ *
+ * Return value:
+ * nothing
+ **/
+static void ipr_erp_cancel_all(struct ipr_cmnd *ipr_cmd)
+{
+ struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
+ struct ipr_resource_entry *res = scsi_cmd->device->hostdata;
+ struct ipr_cmd_pkt *cmd_pkt;
+
+ res->in_erp = 1;
+
+ ipr_reinit_ipr_cmnd_for_erp(ipr_cmd);
+
+ if (!scsi_cmd->device->simple_tags) {
+ ipr_erp_request_sense(ipr_cmd);
+ return;
+ }
+
+ cmd_pkt = &ipr_cmd->ioarcb.cmd_pkt;
+ cmd_pkt->request_type = IPR_RQTYPE_IOACMD;
+ cmd_pkt->cdb[0] = IPR_CANCEL_ALL_REQUESTS;
+
+ ipr_do_req(ipr_cmd, ipr_erp_request_sense, ipr_timeout,
+ IPR_CANCEL_ALL_TIMEOUT);
+}
+
+/**
+ * ipr_dump_ioasa - Dump contents of IOASA
+ * @ioa_cfg: ioa config struct
+ * @ipr_cmd: ipr command struct
+ * @res: resource entry struct
+ *
+ * This function is invoked by the interrupt handler when ops
+ * fail. It will log the IOASA if appropriate. Only called
+ * for GPDD ops.
+ *
+ * Return value:
+ * none
+ **/
+static void ipr_dump_ioasa(struct ipr_ioa_cfg *ioa_cfg,
+ struct ipr_cmnd *ipr_cmd, struct ipr_resource_entry *res)
+{
+ int i;
+ u16 data_len;
+ u32 ioasc, fd_ioasc;
+ struct ipr_ioasa *ioasa = &ipr_cmd->s.ioasa;
+ __be32 *ioasa_data = (__be32 *)ioasa;
+ int error_index;
+
+ ioasc = be32_to_cpu(ioasa->hdr.ioasc) & IPR_IOASC_IOASC_MASK;
+ fd_ioasc = be32_to_cpu(ioasa->hdr.fd_ioasc) & IPR_IOASC_IOASC_MASK;
+
+ if (0 == ioasc)
+ return;
+
+ if (ioa_cfg->log_level < IPR_DEFAULT_LOG_LEVEL)
+ return;
+
+ if (ioasc == IPR_IOASC_BUS_WAS_RESET && fd_ioasc)
+ error_index = ipr_get_error(fd_ioasc);
+ else
+ error_index = ipr_get_error(ioasc);
+
+ if (ioa_cfg->log_level < IPR_MAX_LOG_LEVEL) {
+ /* Don't log an error if the IOA already logged one */
+ if (ioasa->hdr.ilid != 0)
+ return;
+
+ if (!ipr_is_gscsi(res))
+ return;
+
+ if (ipr_error_table[error_index].log_ioasa == 0)
+ return;
+ }
+
+ ipr_res_err(ioa_cfg, res, "%s\n", ipr_error_table[error_index].error);
+
+ data_len = be16_to_cpu(ioasa->hdr.ret_stat_len);
+ if (ioa_cfg->sis64 && sizeof(struct ipr_ioasa64) < data_len)
+ data_len = sizeof(struct ipr_ioasa64);
+ else if (!ioa_cfg->sis64 && sizeof(struct ipr_ioasa) < data_len)
+ data_len = sizeof(struct ipr_ioasa);
+
+ ipr_err("IOASA Dump:\n");
+
+ for (i = 0; i < data_len / 4; i += 4) {
+ ipr_err("%08X: %08X %08X %08X %08X\n", i*4,
+ be32_to_cpu(ioasa_data[i]),
+ be32_to_cpu(ioasa_data[i+1]),
+ be32_to_cpu(ioasa_data[i+2]),
+ be32_to_cpu(ioasa_data[i+3]));
+ }
+}
+
+/**
+ * ipr_gen_sense - Generate SCSI sense data from an IOASA
+ * @ioasa: IOASA
+ * @sense_buf: sense data buffer
+ *
+ * Return value:
+ * none
+ **/
+static void ipr_gen_sense(struct ipr_cmnd *ipr_cmd)
+{
+ u32 failing_lba;
+ u8 *sense_buf = ipr_cmd->scsi_cmd->sense_buffer;
+ struct ipr_resource_entry *res = ipr_cmd->scsi_cmd->device->hostdata;
+ struct ipr_ioasa *ioasa = &ipr_cmd->s.ioasa;
+ u32 ioasc = be32_to_cpu(ioasa->hdr.ioasc);
+
+ memset(sense_buf, 0, SCSI_SENSE_BUFFERSIZE);
+
+ if (ioasc >= IPR_FIRST_DRIVER_IOASC)
+ return;
+
+ ipr_cmd->scsi_cmd->result = SAM_STAT_CHECK_CONDITION;
+
+ if (ipr_is_vset_device(res) &&
+ ioasc == IPR_IOASC_MED_DO_NOT_REALLOC &&
+ ioasa->u.vset.failing_lba_hi != 0) {
+ sense_buf[0] = 0x72;
+ sense_buf[1] = IPR_IOASC_SENSE_KEY(ioasc);
+ sense_buf[2] = IPR_IOASC_SENSE_CODE(ioasc);
+ sense_buf[3] = IPR_IOASC_SENSE_QUAL(ioasc);
+
+ sense_buf[7] = 12;
+ sense_buf[8] = 0;
+ sense_buf[9] = 0x0A;
+ sense_buf[10] = 0x80;
+
+ failing_lba = be32_to_cpu(ioasa->u.vset.failing_lba_hi);
+
+ sense_buf[12] = (failing_lba & 0xff000000) >> 24;
+ sense_buf[13] = (failing_lba & 0x00ff0000) >> 16;
+ sense_buf[14] = (failing_lba & 0x0000ff00) >> 8;
+ sense_buf[15] = failing_lba & 0x000000ff;
+
+ failing_lba = be32_to_cpu(ioasa->u.vset.failing_lba_lo);
+
+ sense_buf[16] = (failing_lba & 0xff000000) >> 24;
+ sense_buf[17] = (failing_lba & 0x00ff0000) >> 16;
+ sense_buf[18] = (failing_lba & 0x0000ff00) >> 8;
+ sense_buf[19] = failing_lba & 0x000000ff;
+ } else {
+ sense_buf[0] = 0x70;
+ sense_buf[2] = IPR_IOASC_SENSE_KEY(ioasc);
+ sense_buf[12] = IPR_IOASC_SENSE_CODE(ioasc);
+ sense_buf[13] = IPR_IOASC_SENSE_QUAL(ioasc);
+
+ /* Illegal request */
+ if ((IPR_IOASC_SENSE_KEY(ioasc) == 0x05) &&
+ (be32_to_cpu(ioasa->hdr.ioasc_specific) & IPR_FIELD_POINTER_VALID)) {
+ sense_buf[7] = 10; /* additional length */
+
+ /* IOARCB was in error */
+ if (IPR_IOASC_SENSE_CODE(ioasc) == 0x24)
+ sense_buf[15] = 0xC0;
+ else /* Parameter data was invalid */
+ sense_buf[15] = 0x80;
+
+ sense_buf[16] =
+ ((IPR_FIELD_POINTER_MASK &
+ be32_to_cpu(ioasa->hdr.ioasc_specific)) >> 8) & 0xff;
+ sense_buf[17] =
+ (IPR_FIELD_POINTER_MASK &
+ be32_to_cpu(ioasa->hdr.ioasc_specific)) & 0xff;
+ } else {
+ if (ioasc == IPR_IOASC_MED_DO_NOT_REALLOC) {
+ if (ipr_is_vset_device(res))
+ failing_lba = be32_to_cpu(ioasa->u.vset.failing_lba_lo);
+ else
+ failing_lba = be32_to_cpu(ioasa->u.dasd.failing_lba);
+
+ sense_buf[0] |= 0x80; /* Or in the Valid bit */
+ sense_buf[3] = (failing_lba & 0xff000000) >> 24;
+ sense_buf[4] = (failing_lba & 0x00ff0000) >> 16;
+ sense_buf[5] = (failing_lba & 0x0000ff00) >> 8;
+ sense_buf[6] = failing_lba & 0x000000ff;
+ }
+
+ sense_buf[7] = 6; /* additional length */
+ }
+ }
+}
+
+/**
+ * ipr_get_autosense - Copy autosense data to sense buffer
+ * @ipr_cmd: ipr command struct
+ *
+ * This function copies the autosense buffer to the buffer
+ * in the scsi_cmd, if there is autosense available.
+ *
+ * Return value:
+ * 1 if autosense was available / 0 if not
+ **/
+static int ipr_get_autosense(struct ipr_cmnd *ipr_cmd)
+{
+ struct ipr_ioasa *ioasa = &ipr_cmd->s.ioasa;
+ struct ipr_ioasa64 *ioasa64 = &ipr_cmd->s.ioasa64;
+
+ if ((be32_to_cpu(ioasa->hdr.ioasc_specific) & IPR_AUTOSENSE_VALID) == 0)
+ return 0;
+
+ if (ipr_cmd->ioa_cfg->sis64)
+ memcpy(ipr_cmd->scsi_cmd->sense_buffer, ioasa64->auto_sense.data,
+ min_t(u16, be16_to_cpu(ioasa64->auto_sense.auto_sense_len),
+ SCSI_SENSE_BUFFERSIZE));
+ else
+ memcpy(ipr_cmd->scsi_cmd->sense_buffer, ioasa->auto_sense.data,
+ min_t(u16, be16_to_cpu(ioasa->auto_sense.auto_sense_len),
+ SCSI_SENSE_BUFFERSIZE));
+ return 1;
+}
+
+/**
+ * ipr_erp_start - Process an error response for a SCSI op
+ * @ioa_cfg: ioa config struct
+ * @ipr_cmd: ipr command struct
+ *
+ * This function determines whether or not to initiate ERP
+ * on the affected device.
+ *
+ * Return value:
+ * nothing
+ **/
+static void ipr_erp_start(struct ipr_ioa_cfg *ioa_cfg,
+ struct ipr_cmnd *ipr_cmd)
+{
+ struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
+ struct ipr_resource_entry *res = scsi_cmd->device->hostdata;
+ u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
+ u32 masked_ioasc = ioasc & IPR_IOASC_IOASC_MASK;
+
+ if (!res) {
+ ipr_scsi_eh_done(ipr_cmd);
+ return;
+ }
+
+ if (!ipr_is_gscsi(res) && masked_ioasc != IPR_IOASC_HW_DEV_BUS_STATUS)
+ ipr_gen_sense(ipr_cmd);
+
+ ipr_dump_ioasa(ioa_cfg, ipr_cmd, res);
+
+ switch (masked_ioasc) {
+ case IPR_IOASC_ABORTED_CMD_TERM_BY_HOST:
+ if (ipr_is_naca_model(res))
+ scsi_cmd->result |= (DID_ABORT << 16);
+ else
+ scsi_cmd->result |= (DID_IMM_RETRY << 16);
+ break;
+ case IPR_IOASC_IR_RESOURCE_HANDLE:
+ case IPR_IOASC_IR_NO_CMDS_TO_2ND_IOA:
+ scsi_cmd->result |= (DID_NO_CONNECT << 16);
+ break;
+ case IPR_IOASC_HW_SEL_TIMEOUT:
+ scsi_cmd->result |= (DID_NO_CONNECT << 16);
+ if (!ipr_is_naca_model(res))
+ res->needs_sync_complete = 1;
+ break;
+ case IPR_IOASC_SYNC_REQUIRED:
+ if (!res->in_erp)
+ res->needs_sync_complete = 1;
+ scsi_cmd->result |= (DID_IMM_RETRY << 16);
+ break;
+ case IPR_IOASC_MED_DO_NOT_REALLOC: /* prevent retries */
+ case IPR_IOASA_IR_DUAL_IOA_DISABLED:
+ scsi_cmd->result |= (DID_PASSTHROUGH << 16);
+ break;
+ case IPR_IOASC_BUS_WAS_RESET:
+ case IPR_IOASC_BUS_WAS_RESET_BY_OTHER:
+ /*
+ * Report the bus reset and ask for a retry. The device
+ * will give CC/UA the next command.
+ */
+ if (!res->resetting_device)
+ scsi_report_bus_reset(ioa_cfg->host, scsi_cmd->device->channel);
+ scsi_cmd->result |= (DID_ERROR << 16);
+ if (!ipr_is_naca_model(res))
+ res->needs_sync_complete = 1;
+ break;
+ case IPR_IOASC_HW_DEV_BUS_STATUS:
+ scsi_cmd->result |= IPR_IOASC_SENSE_STATUS(ioasc);
+ if (IPR_IOASC_SENSE_STATUS(ioasc) == SAM_STAT_CHECK_CONDITION) {
+ if (!ipr_get_autosense(ipr_cmd)) {
+ if (!ipr_is_naca_model(res)) {
+ ipr_erp_cancel_all(ipr_cmd);
+ return;
+ }
+ }
+ }
+ if (!ipr_is_naca_model(res))
+ res->needs_sync_complete = 1;
+ break;
+ case IPR_IOASC_NR_INIT_CMD_REQUIRED:
+ break;
+ case IPR_IOASC_IR_NON_OPTIMIZED:
+ if (res->raw_mode) {
+ res->raw_mode = 0;
+ scsi_cmd->result |= (DID_IMM_RETRY << 16);
+ } else
+ scsi_cmd->result |= (DID_ERROR << 16);
+ break;
+ default:
+ if (IPR_IOASC_SENSE_KEY(ioasc) > RECOVERED_ERROR)
+ scsi_cmd->result |= (DID_ERROR << 16);
+ if (!ipr_is_vset_device(res) && !ipr_is_naca_model(res))
+ res->needs_sync_complete = 1;
+ break;
+ }
+
+ scsi_dma_unmap(ipr_cmd->scsi_cmd);
+ list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
+ scsi_cmd->scsi_done(scsi_cmd);
+}
+
+/**
+ * ipr_scsi_done - mid-layer done function
+ * @ipr_cmd: ipr command struct
+ *
+ * This function is invoked by the interrupt handler for
+ * ops generated by the SCSI mid-layer
+ *
+ * Return value:
+ * none
+ **/
+static void ipr_scsi_done(struct ipr_cmnd *ipr_cmd)
+{
+ struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
+ struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
+ u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
+ unsigned long hrrq_flags;
+
+ scsi_set_resid(scsi_cmd, be32_to_cpu(ipr_cmd->s.ioasa.hdr.residual_data_len));
+
+ if (likely(IPR_IOASC_SENSE_KEY(ioasc) == 0)) {
+ scsi_dma_unmap(scsi_cmd);
+
+ spin_lock_irqsave(ipr_cmd->hrrq->lock, hrrq_flags);
+ list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
+ scsi_cmd->scsi_done(scsi_cmd);
+ spin_unlock_irqrestore(ipr_cmd->hrrq->lock, hrrq_flags);
+ } else {
+ spin_lock_irqsave(ipr_cmd->hrrq->lock, hrrq_flags);
+ ipr_erp_start(ioa_cfg, ipr_cmd);
+ spin_unlock_irqrestore(ipr_cmd->hrrq->lock, hrrq_flags);
+ }
+}
+
+/**
+ * ipr_queuecommand - Queue a mid-layer request
+ * @shost: scsi host struct
+ * @scsi_cmd: scsi command struct
+ *
+ * This function queues a request generated by the mid-layer.
+ *
+ * Return value:
+ * 0 on success
+ * SCSI_MLQUEUE_DEVICE_BUSY if device is busy
+ * SCSI_MLQUEUE_HOST_BUSY if host is busy
+ **/
+static int ipr_queuecommand(struct Scsi_Host *shost,
+ struct scsi_cmnd *scsi_cmd)
+{
+ struct ipr_ioa_cfg *ioa_cfg;
+ struct ipr_resource_entry *res;
+ struct ipr_ioarcb *ioarcb;
+ struct ipr_cmnd *ipr_cmd;
+ unsigned long hrrq_flags, lock_flags;
+ int rc;
+ struct ipr_hrr_queue *hrrq;
+ int hrrq_id;
+
+ ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
+
+ scsi_cmd->result = (DID_OK << 16);
+ res = scsi_cmd->device->hostdata;
+
+ if (ipr_is_gata(res) && res->sata_port) {
+ spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
+ rc = ata_sas_queuecmd(scsi_cmd, res->sata_port->ap);
+ spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
+ return rc;
+ }
+
+ hrrq_id = ipr_get_hrrq_index(ioa_cfg);
+ hrrq = &ioa_cfg->hrrq[hrrq_id];
+
+ spin_lock_irqsave(hrrq->lock, hrrq_flags);
+ /*
+ * We are currently blocking all devices due to a host reset
+ * We have told the host to stop giving us new requests, but
+ * ERP ops don't count. FIXME
+ */
+ if (unlikely(!hrrq->allow_cmds && !hrrq->ioa_is_dead && !hrrq->removing_ioa)) {
+ spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
+ return SCSI_MLQUEUE_HOST_BUSY;
+ }
+
+ /*
+ * FIXME - Create scsi_set_host_offline interface
+ * and the ioa_is_dead check can be removed
+ */
+ if (unlikely(hrrq->ioa_is_dead || hrrq->removing_ioa || !res)) {
+ spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
+ goto err_nodev;
+ }
+
+ ipr_cmd = __ipr_get_free_ipr_cmnd(hrrq);
+ if (ipr_cmd == NULL) {
+ spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
+ return SCSI_MLQUEUE_HOST_BUSY;
+ }
+ spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
+
+ ipr_init_ipr_cmnd(ipr_cmd, ipr_scsi_done);
+ ioarcb = &ipr_cmd->ioarcb;
+
+ memcpy(ioarcb->cmd_pkt.cdb, scsi_cmd->cmnd, scsi_cmd->cmd_len);
+ ipr_cmd->scsi_cmd = scsi_cmd;
+ ipr_cmd->done = ipr_scsi_eh_done;
+
+ if (ipr_is_gscsi(res) || ipr_is_vset_device(res)) {
+ if (scsi_cmd->underflow == 0)
+ ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_NO_ULEN_CHK;
+
+ ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_NO_LINK_DESC;
+ if (ipr_is_gscsi(res) && res->reset_occurred) {
+ res->reset_occurred = 0;
+ ioarcb->cmd_pkt.flags_lo |= IPR_FLAGS_LO_DELAY_AFTER_RST;
+ }
+ ioarcb->cmd_pkt.flags_lo |= IPR_FLAGS_LO_ALIGNED_BFR;
+ if (scsi_cmd->flags & SCMD_TAGGED)
+ ioarcb->cmd_pkt.flags_lo |= IPR_FLAGS_LO_SIMPLE_TASK;
+ else
+ ioarcb->cmd_pkt.flags_lo |= IPR_FLAGS_LO_UNTAGGED_TASK;
+ }
+
+ if (scsi_cmd->cmnd[0] >= 0xC0 &&
+ (!ipr_is_gscsi(res) || scsi_cmd->cmnd[0] == IPR_QUERY_RSRC_STATE)) {
+ ioarcb->cmd_pkt.request_type = IPR_RQTYPE_IOACMD;
+ }
+ if (res->raw_mode && ipr_is_af_dasd_device(res))
+ ioarcb->cmd_pkt.request_type = IPR_RQTYPE_PIPE;
+
+ if (ioa_cfg->sis64)
+ rc = ipr_build_ioadl64(ioa_cfg, ipr_cmd);
+ else
+ rc = ipr_build_ioadl(ioa_cfg, ipr_cmd);
+
+ spin_lock_irqsave(hrrq->lock, hrrq_flags);
+ if (unlikely(rc || (!hrrq->allow_cmds && !hrrq->ioa_is_dead))) {
+ list_add_tail(&ipr_cmd->queue, &hrrq->hrrq_free_q);
+ spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
+ if (!rc)
+ scsi_dma_unmap(scsi_cmd);
+ return SCSI_MLQUEUE_HOST_BUSY;
+ }
+
+ if (unlikely(hrrq->ioa_is_dead)) {
+ list_add_tail(&ipr_cmd->queue, &hrrq->hrrq_free_q);
+ spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
+ scsi_dma_unmap(scsi_cmd);
+ goto err_nodev;
+ }
+
+ ioarcb->res_handle = res->res_handle;
+ if (res->needs_sync_complete) {
+ ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_SYNC_COMPLETE;
+ res->needs_sync_complete = 0;
+ }
+ list_add_tail(&ipr_cmd->queue, &hrrq->hrrq_pending_q);
+ ipr_trc_hook(ipr_cmd, IPR_TRACE_START, IPR_GET_RES_PHYS_LOC(res));
+ ipr_send_command(ipr_cmd);
+ spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
+ return 0;
+
+err_nodev:
+ spin_lock_irqsave(hrrq->lock, hrrq_flags);
+ memset(scsi_cmd->sense_buffer, 0, SCSI_SENSE_BUFFERSIZE);
+ scsi_cmd->result = (DID_NO_CONNECT << 16);
+ scsi_cmd->scsi_done(scsi_cmd);
+ spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
+ return 0;
+}
+
+/**
+ * ipr_ioctl - IOCTL handler
+ * @sdev: scsi device struct
+ * @cmd: IOCTL cmd
+ * @arg: IOCTL arg
+ *
+ * Return value:
+ * 0 on success / other on failure
+ **/
+static int ipr_ioctl(struct scsi_device *sdev, int cmd, void __user *arg)
+{
+ struct ipr_resource_entry *res;
+
+ res = (struct ipr_resource_entry *)sdev->hostdata;
+ if (res && ipr_is_gata(res)) {
+ if (cmd == HDIO_GET_IDENTITY)
+ return -ENOTTY;
+ return ata_sas_scsi_ioctl(res->sata_port->ap, sdev, cmd, arg);
+ }
+
+ return -EINVAL;
+}
+
+/**
+ * ipr_info - Get information about the card/driver
+ * @scsi_host: scsi host struct
+ *
+ * Return value:
+ * pointer to buffer with description string
+ **/
+static const char *ipr_ioa_info(struct Scsi_Host *host)
+{
+ static char buffer[512];
+ struct ipr_ioa_cfg *ioa_cfg;
+ unsigned long lock_flags = 0;
+
+ ioa_cfg = (struct ipr_ioa_cfg *) host->hostdata;
+
+ spin_lock_irqsave(host->host_lock, lock_flags);
+ sprintf(buffer, "IBM %X Storage Adapter", ioa_cfg->type);
+ spin_unlock_irqrestore(host->host_lock, lock_flags);
+
+ return buffer;
+}
+
+static struct scsi_host_template driver_template = {
+ .module = THIS_MODULE,
+ .name = "IPR",
+ .info = ipr_ioa_info,
+ .ioctl = ipr_ioctl,
+ .queuecommand = ipr_queuecommand,
+ .eh_abort_handler = ipr_eh_abort,
+ .eh_device_reset_handler = ipr_eh_dev_reset,
+ .eh_host_reset_handler = ipr_eh_host_reset,
+ .slave_alloc = ipr_slave_alloc,
+ .slave_configure = ipr_slave_configure,
+ .slave_destroy = ipr_slave_destroy,
+ .scan_finished = ipr_scan_finished,
+ .target_alloc = ipr_target_alloc,
+ .target_destroy = ipr_target_destroy,
+ .change_queue_depth = ipr_change_queue_depth,
+ .bios_param = ipr_biosparam,
+ .can_queue = IPR_MAX_COMMANDS,
+ .this_id = -1,
+ .sg_tablesize = IPR_MAX_SGLIST,
+ .max_sectors = IPR_IOA_MAX_SECTORS,
+ .cmd_per_lun = IPR_MAX_CMD_PER_LUN,
+ .use_clustering = ENABLE_CLUSTERING,
+ .shost_attrs = ipr_ioa_attrs,
+ .sdev_attrs = ipr_dev_attrs,
+ .proc_name = IPR_NAME,
+ .use_blk_tags = 1,
+};
+
+/**
+ * ipr_ata_phy_reset - libata phy_reset handler
+ * @ap: ata port to reset
+ *
+ **/
+static void ipr_ata_phy_reset(struct ata_port *ap)
+{
+ unsigned long flags;
+ struct ipr_sata_port *sata_port = ap->private_data;
+ struct ipr_resource_entry *res = sata_port->res;
+ struct ipr_ioa_cfg *ioa_cfg = sata_port->ioa_cfg;
+ int rc;
+
+ ENTER;
+ spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
+ while (ioa_cfg->in_reset_reload) {
+ spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
+ wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
+ spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
+ }
+
+ if (!ioa_cfg->hrrq[IPR_INIT_HRRQ].allow_cmds)
+ goto out_unlock;
+
+ rc = ipr_device_reset(ioa_cfg, res);
+
+ if (rc) {
+ ap->link.device[0].class = ATA_DEV_NONE;
+ goto out_unlock;
+ }
+
+ ap->link.device[0].class = res->ata_class;
+ if (ap->link.device[0].class == ATA_DEV_UNKNOWN)
+ ap->link.device[0].class = ATA_DEV_NONE;
+
+out_unlock:
+ spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
+ LEAVE;
+}
+
+/**
+ * ipr_ata_post_internal - Cleanup after an internal command
+ * @qc: ATA queued command
+ *
+ * Return value:
+ * none
+ **/
+static void ipr_ata_post_internal(struct ata_queued_cmd *qc)
+{
+ struct ipr_sata_port *sata_port = qc->ap->private_data;
+ struct ipr_ioa_cfg *ioa_cfg = sata_port->ioa_cfg;
+ struct ipr_cmnd *ipr_cmd;
+ struct ipr_hrr_queue *hrrq;
+ unsigned long flags;
+
+ spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
+ while (ioa_cfg->in_reset_reload) {
+ spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
+ wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
+ spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
+ }
+
+ for_each_hrrq(hrrq, ioa_cfg) {
+ spin_lock(&hrrq->_lock);
+ list_for_each_entry(ipr_cmd, &hrrq->hrrq_pending_q, queue) {
+ if (ipr_cmd->qc == qc) {
+ ipr_device_reset(ioa_cfg, sata_port->res);
+ break;
+ }
+ }
+ spin_unlock(&hrrq->_lock);
+ }
+ spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
+}
+
+/**
+ * ipr_copy_sata_tf - Copy a SATA taskfile to an IOA data structure
+ * @regs: destination
+ * @tf: source ATA taskfile
+ *
+ * Return value:
+ * none
+ **/
+static void ipr_copy_sata_tf(struct ipr_ioarcb_ata_regs *regs,
+ struct ata_taskfile *tf)
+{
+ regs->feature = tf->feature;
+ regs->nsect = tf->nsect;
+ regs->lbal = tf->lbal;
+ regs->lbam = tf->lbam;
+ regs->lbah = tf->lbah;
+ regs->device = tf->device;
+ regs->command = tf->command;
+ regs->hob_feature = tf->hob_feature;
+ regs->hob_nsect = tf->hob_nsect;
+ regs->hob_lbal = tf->hob_lbal;
+ regs->hob_lbam = tf->hob_lbam;
+ regs->hob_lbah = tf->hob_lbah;
+ regs->ctl = tf->ctl;
+}
+
+/**
+ * ipr_sata_done - done function for SATA commands
+ * @ipr_cmd: ipr command struct
+ *
+ * This function is invoked by the interrupt handler for
+ * ops generated by the SCSI mid-layer to SATA devices
+ *
+ * Return value:
+ * none
+ **/
+static void ipr_sata_done(struct ipr_cmnd *ipr_cmd)
+{
+ struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
+ struct ata_queued_cmd *qc = ipr_cmd->qc;
+ struct ipr_sata_port *sata_port = qc->ap->private_data;
+ struct ipr_resource_entry *res = sata_port->res;
+ u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
+
+ spin_lock(&ipr_cmd->hrrq->_lock);
+ if (ipr_cmd->ioa_cfg->sis64)
+ memcpy(&sata_port->ioasa, &ipr_cmd->s.ioasa64.u.gata,
+ sizeof(struct ipr_ioasa_gata));
+ else
+ memcpy(&sata_port->ioasa, &ipr_cmd->s.ioasa.u.gata,
+ sizeof(struct ipr_ioasa_gata));
+ ipr_dump_ioasa(ioa_cfg, ipr_cmd, res);
+
+ if (be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc_specific) & IPR_ATA_DEVICE_WAS_RESET)
+ scsi_report_device_reset(ioa_cfg->host, res->bus, res->target);
+
+ if (IPR_IOASC_SENSE_KEY(ioasc) > RECOVERED_ERROR)
+ qc->err_mask |= __ac_err_mask(sata_port->ioasa.status);
+ else
+ qc->err_mask |= ac_err_mask(sata_port->ioasa.status);
+ list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
+ spin_unlock(&ipr_cmd->hrrq->_lock);
+ ata_qc_complete(qc);
+}
+
+/**
+ * ipr_build_ata_ioadl64 - Build an ATA scatter/gather list
+ * @ipr_cmd: ipr command struct
+ * @qc: ATA queued command
+ *
+ **/
+static void ipr_build_ata_ioadl64(struct ipr_cmnd *ipr_cmd,
+ struct ata_queued_cmd *qc)
+{
+ u32 ioadl_flags = 0;
+ struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
+ struct ipr_ioadl64_desc *ioadl64 = ipr_cmd->i.ata_ioadl.ioadl64;
+ struct ipr_ioadl64_desc *last_ioadl64 = NULL;
+ int len = qc->nbytes;
+ struct scatterlist *sg;
+ unsigned int si;
+ dma_addr_t dma_addr = ipr_cmd->dma_addr;
+
+ if (len == 0)
+ return;
+
+ if (qc->dma_dir == DMA_TO_DEVICE) {
+ ioadl_flags = IPR_IOADL_FLAGS_WRITE;
+ ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
+ } else if (qc->dma_dir == DMA_FROM_DEVICE)
+ ioadl_flags = IPR_IOADL_FLAGS_READ;
+
+ ioarcb->data_transfer_length = cpu_to_be32(len);
+ ioarcb->ioadl_len =
+ cpu_to_be32(sizeof(struct ipr_ioadl64_desc) * ipr_cmd->dma_use_sg);
+ ioarcb->u.sis64_addr_data.data_ioadl_addr =
+ cpu_to_be64(dma_addr + offsetof(struct ipr_cmnd, i.ata_ioadl.ioadl64));
+
+ for_each_sg(qc->sg, sg, qc->n_elem, si) {
+ ioadl64->flags = cpu_to_be32(ioadl_flags);
+ ioadl64->data_len = cpu_to_be32(sg_dma_len(sg));
+ ioadl64->address = cpu_to_be64(sg_dma_address(sg));
+
+ last_ioadl64 = ioadl64;
+ ioadl64++;
+ }
+
+ if (likely(last_ioadl64))
+ last_ioadl64->flags |= cpu_to_be32(IPR_IOADL_FLAGS_LAST);
+}
+
+/**
+ * ipr_build_ata_ioadl - Build an ATA scatter/gather list
+ * @ipr_cmd: ipr command struct
+ * @qc: ATA queued command
+ *
+ **/
+static void ipr_build_ata_ioadl(struct ipr_cmnd *ipr_cmd,
+ struct ata_queued_cmd *qc)
+{
+ u32 ioadl_flags = 0;
+ struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
+ struct ipr_ioadl_desc *ioadl = ipr_cmd->i.ioadl;
+ struct ipr_ioadl_desc *last_ioadl = NULL;
+ int len = qc->nbytes;
+ struct scatterlist *sg;
+ unsigned int si;
+
+ if (len == 0)
+ return;
+
+ if (qc->dma_dir == DMA_TO_DEVICE) {
+ ioadl_flags = IPR_IOADL_FLAGS_WRITE;
+ ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
+ ioarcb->data_transfer_length = cpu_to_be32(len);
+ ioarcb->ioadl_len =
+ cpu_to_be32(sizeof(struct ipr_ioadl_desc) * ipr_cmd->dma_use_sg);
+ } else if (qc->dma_dir == DMA_FROM_DEVICE) {
+ ioadl_flags = IPR_IOADL_FLAGS_READ;
+ ioarcb->read_data_transfer_length = cpu_to_be32(len);
+ ioarcb->read_ioadl_len =
+ cpu_to_be32(sizeof(struct ipr_ioadl_desc) * ipr_cmd->dma_use_sg);
+ }
+
+ for_each_sg(qc->sg, sg, qc->n_elem, si) {
+ ioadl->flags_and_data_len = cpu_to_be32(ioadl_flags | sg_dma_len(sg));
+ ioadl->address = cpu_to_be32(sg_dma_address(sg));
+
+ last_ioadl = ioadl;
+ ioadl++;
+ }
+
+ if (likely(last_ioadl))
+ last_ioadl->flags_and_data_len |= cpu_to_be32(IPR_IOADL_FLAGS_LAST);
+}
+
+/**
+ * ipr_qc_defer - Get a free ipr_cmd
+ * @qc: queued command
+ *
+ * Return value:
+ * 0 if success
+ **/
+static int ipr_qc_defer(struct ata_queued_cmd *qc)
+{
+ struct ata_port *ap = qc->ap;
+ struct ipr_sata_port *sata_port = ap->private_data;
+ struct ipr_ioa_cfg *ioa_cfg = sata_port->ioa_cfg;
+ struct ipr_cmnd *ipr_cmd;
+ struct ipr_hrr_queue *hrrq;
+ int hrrq_id;
+
+ hrrq_id = ipr_get_hrrq_index(ioa_cfg);
+ hrrq = &ioa_cfg->hrrq[hrrq_id];
+
+ qc->lldd_task = NULL;
+ spin_lock(&hrrq->_lock);
+ if (unlikely(hrrq->ioa_is_dead)) {
+ spin_unlock(&hrrq->_lock);
+ return 0;
+ }
+
+ if (unlikely(!hrrq->allow_cmds)) {
+ spin_unlock(&hrrq->_lock);
+ return ATA_DEFER_LINK;
+ }
+
+ ipr_cmd = __ipr_get_free_ipr_cmnd(hrrq);
+ if (ipr_cmd == NULL) {
+ spin_unlock(&hrrq->_lock);
+ return ATA_DEFER_LINK;
+ }
+
+ qc->lldd_task = ipr_cmd;
+ spin_unlock(&hrrq->_lock);
+ return 0;
+}
+
+/**
+ * ipr_qc_issue - Issue a SATA qc to a device
+ * @qc: queued command
+ *
+ * Return value:
+ * 0 if success
+ **/
+static unsigned int ipr_qc_issue(struct ata_queued_cmd *qc)
+{
+ struct ata_port *ap = qc->ap;
+ struct ipr_sata_port *sata_port = ap->private_data;
+ struct ipr_resource_entry *res = sata_port->res;
+ struct ipr_ioa_cfg *ioa_cfg = sata_port->ioa_cfg;
+ struct ipr_cmnd *ipr_cmd;
+ struct ipr_ioarcb *ioarcb;
+ struct ipr_ioarcb_ata_regs *regs;
+
+ if (qc->lldd_task == NULL)
+ ipr_qc_defer(qc);
+
+ ipr_cmd = qc->lldd_task;
+ if (ipr_cmd == NULL)
+ return AC_ERR_SYSTEM;
+
+ qc->lldd_task = NULL;
+ spin_lock(&ipr_cmd->hrrq->_lock);
+ if (unlikely(!ipr_cmd->hrrq->allow_cmds ||
+ ipr_cmd->hrrq->ioa_is_dead)) {
+ list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
+ spin_unlock(&ipr_cmd->hrrq->_lock);
+ return AC_ERR_SYSTEM;
+ }
+
+ ipr_init_ipr_cmnd(ipr_cmd, ipr_lock_and_done);
+ ioarcb = &ipr_cmd->ioarcb;
+
+ if (ioa_cfg->sis64) {
+ regs = &ipr_cmd->i.ata_ioadl.regs;
+ ioarcb->add_cmd_parms_offset = cpu_to_be16(sizeof(*ioarcb));
+ } else
+ regs = &ioarcb->u.add_data.u.regs;
+
+ memset(regs, 0, sizeof(*regs));
+ ioarcb->add_cmd_parms_len = cpu_to_be16(sizeof(*regs));
+
+ list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_pending_q);
+ ipr_cmd->qc = qc;
+ ipr_cmd->done = ipr_sata_done;
+ ipr_cmd->ioarcb.res_handle = res->res_handle;
+ ioarcb->cmd_pkt.request_type = IPR_RQTYPE_ATA_PASSTHRU;
+ ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_NO_LINK_DESC;
+ ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_NO_ULEN_CHK;
+ ipr_cmd->dma_use_sg = qc->n_elem;
+
+ if (ioa_cfg->sis64)
+ ipr_build_ata_ioadl64(ipr_cmd, qc);
+ else
+ ipr_build_ata_ioadl(ipr_cmd, qc);
+
+ regs->flags |= IPR_ATA_FLAG_STATUS_ON_GOOD_COMPLETION;
+ ipr_copy_sata_tf(regs, &qc->tf);
+ memcpy(ioarcb->cmd_pkt.cdb, qc->cdb, IPR_MAX_CDB_LEN);
+ ipr_trc_hook(ipr_cmd, IPR_TRACE_START, IPR_GET_RES_PHYS_LOC(res));
+
+ switch (qc->tf.protocol) {
+ case ATA_PROT_NODATA:
+ case ATA_PROT_PIO:
+ break;
+
+ case ATA_PROT_DMA:
+ regs->flags |= IPR_ATA_FLAG_XFER_TYPE_DMA;
+ break;
+
+ case ATAPI_PROT_PIO:
+ case ATAPI_PROT_NODATA:
+ regs->flags |= IPR_ATA_FLAG_PACKET_CMD;
+ break;
+
+ case ATAPI_PROT_DMA:
+ regs->flags |= IPR_ATA_FLAG_PACKET_CMD;
+ regs->flags |= IPR_ATA_FLAG_XFER_TYPE_DMA;
+ break;
+
+ default:
+ WARN_ON(1);
+ spin_unlock(&ipr_cmd->hrrq->_lock);
+ return AC_ERR_INVALID;
+ }
+
+ ipr_send_command(ipr_cmd);
+ spin_unlock(&ipr_cmd->hrrq->_lock);
+
+ return 0;
+}
+
+/**
+ * ipr_qc_fill_rtf - Read result TF
+ * @qc: ATA queued command
+ *
+ * Return value:
+ * true
+ **/
+static bool ipr_qc_fill_rtf(struct ata_queued_cmd *qc)
+{
+ struct ipr_sata_port *sata_port = qc->ap->private_data;
+ struct ipr_ioasa_gata *g = &sata_port->ioasa;
+ struct ata_taskfile *tf = &qc->result_tf;
+
+ tf->feature = g->error;
+ tf->nsect = g->nsect;
+ tf->lbal = g->lbal;
+ tf->lbam = g->lbam;
+ tf->lbah = g->lbah;
+ tf->device = g->device;
+ tf->command = g->status;
+ tf->hob_nsect = g->hob_nsect;
+ tf->hob_lbal = g->hob_lbal;
+ tf->hob_lbam = g->hob_lbam;
+ tf->hob_lbah = g->hob_lbah;
+
+ return true;
+}
+
+static struct ata_port_operations ipr_sata_ops = {
+ .phy_reset = ipr_ata_phy_reset,
+ .hardreset = ipr_sata_reset,
+ .post_internal_cmd = ipr_ata_post_internal,
+ .qc_prep = ata_noop_qc_prep,
+ .qc_defer = ipr_qc_defer,
+ .qc_issue = ipr_qc_issue,
+ .qc_fill_rtf = ipr_qc_fill_rtf,
+ .port_start = ata_sas_port_start,
+ .port_stop = ata_sas_port_stop
+};
+
+static struct ata_port_info sata_port_info = {
+ .flags = ATA_FLAG_SATA | ATA_FLAG_PIO_DMA |
+ ATA_FLAG_SAS_HOST,
+ .pio_mask = ATA_PIO4_ONLY,
+ .mwdma_mask = ATA_MWDMA2,
+ .udma_mask = ATA_UDMA6,
+ .port_ops = &ipr_sata_ops
+};
+
+#ifdef CONFIG_PPC_PSERIES
+static const u16 ipr_blocked_processors[] = {
+ PVR_NORTHSTAR,
+ PVR_PULSAR,
+ PVR_POWER4,
+ PVR_ICESTAR,
+ PVR_SSTAR,
+ PVR_POWER4p,
+ PVR_630,
+ PVR_630p
+};
+
+/**
+ * ipr_invalid_adapter - Determine if this adapter is supported on this hardware
+ * @ioa_cfg: ioa cfg struct
+ *
+ * Adapters that use Gemstone revision < 3.1 do not work reliably on
+ * certain pSeries hardware. This function determines if the given
+ * adapter is in one of these confgurations or not.
+ *
+ * Return value:
+ * 1 if adapter is not supported / 0 if adapter is supported
+ **/
+static int ipr_invalid_adapter(struct ipr_ioa_cfg *ioa_cfg)
+{
+ int i;
+
+ if ((ioa_cfg->type == 0x5702) && (ioa_cfg->pdev->revision < 4)) {
+ for (i = 0; i < ARRAY_SIZE(ipr_blocked_processors); i++) {
+ if (pvr_version_is(ipr_blocked_processors[i]))
+ return 1;
+ }
+ }
+ return 0;
+}
+#else
+#define ipr_invalid_adapter(ioa_cfg) 0
+#endif
+
+/**
+ * ipr_ioa_bringdown_done - IOA bring down completion.
+ * @ipr_cmd: ipr command struct
+ *
+ * This function processes the completion of an adapter bring down.
+ * It wakes any reset sleepers.
+ *
+ * Return value:
+ * IPR_RC_JOB_RETURN
+ **/
+static int ipr_ioa_bringdown_done(struct ipr_cmnd *ipr_cmd)
+{
+ struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
+ int i;
+
+ ENTER;
+ if (!ioa_cfg->hrrq[IPR_INIT_HRRQ].removing_ioa) {
+ ipr_trace;
+ spin_unlock_irq(ioa_cfg->host->host_lock);
+ scsi_unblock_requests(ioa_cfg->host);
+ spin_lock_irq(ioa_cfg->host->host_lock);
+ }
+
+ ioa_cfg->in_reset_reload = 0;
+ ioa_cfg->reset_retries = 0;
+ for (i = 0; i < ioa_cfg->hrrq_num; i++) {
+ spin_lock(&ioa_cfg->hrrq[i]._lock);
+ ioa_cfg->hrrq[i].ioa_is_dead = 1;
+ spin_unlock(&ioa_cfg->hrrq[i]._lock);
+ }
+ wmb();
+
+ list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
+ wake_up_all(&ioa_cfg->reset_wait_q);
+ LEAVE;
+
+ return IPR_RC_JOB_RETURN;
+}
+
+/**
+ * ipr_ioa_reset_done - IOA reset completion.
+ * @ipr_cmd: ipr command struct
+ *
+ * This function processes the completion of an adapter reset.
+ * It schedules any necessary mid-layer add/removes and
+ * wakes any reset sleepers.
+ *
+ * Return value:
+ * IPR_RC_JOB_RETURN
+ **/
+static int ipr_ioa_reset_done(struct ipr_cmnd *ipr_cmd)
+{
+ struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
+ struct ipr_resource_entry *res;
+ struct ipr_hostrcb *hostrcb, *temp;
+ int i = 0, j;
+
+ ENTER;
+ ioa_cfg->in_reset_reload = 0;
+ for (j = 0; j < ioa_cfg->hrrq_num; j++) {
+ spin_lock(&ioa_cfg->hrrq[j]._lock);
+ ioa_cfg->hrrq[j].allow_cmds = 1;
+ spin_unlock(&ioa_cfg->hrrq[j]._lock);
+ }
+ wmb();
+ ioa_cfg->reset_cmd = NULL;
+ ioa_cfg->doorbell |= IPR_RUNTIME_RESET;
+
+ list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
+ if (res->add_to_ml || res->del_from_ml) {
+ ipr_trace;
+ break;
+ }
+ }
+ schedule_work(&ioa_cfg->work_q);
+
+ list_for_each_entry_safe(hostrcb, temp, &ioa_cfg->hostrcb_free_q, queue) {
+ list_del(&hostrcb->queue);
+ if (i++ < IPR_NUM_LOG_HCAMS)
+ ipr_send_hcam(ioa_cfg, IPR_HCAM_CDB_OP_CODE_LOG_DATA, hostrcb);
+ else
+ ipr_send_hcam(ioa_cfg, IPR_HCAM_CDB_OP_CODE_CONFIG_CHANGE, hostrcb);
+ }
+
+ scsi_report_bus_reset(ioa_cfg->host, IPR_VSET_BUS);
+ dev_info(&ioa_cfg->pdev->dev, "IOA initialized.\n");
+
+ ioa_cfg->reset_retries = 0;
+ list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
+ wake_up_all(&ioa_cfg->reset_wait_q);
+
+ spin_unlock(ioa_cfg->host->host_lock);
+ scsi_unblock_requests(ioa_cfg->host);
+ spin_lock(ioa_cfg->host->host_lock);
+
+ if (!ioa_cfg->hrrq[IPR_INIT_HRRQ].allow_cmds)
+ scsi_block_requests(ioa_cfg->host);
+
+ schedule_work(&ioa_cfg->work_q);
+ LEAVE;
+ return IPR_RC_JOB_RETURN;
+}
+
+/**
+ * ipr_set_sup_dev_dflt - Initialize a Set Supported Device buffer
+ * @supported_dev: supported device struct
+ * @vpids: vendor product id struct
+ *
+ * Return value:
+ * none
+ **/
+static void ipr_set_sup_dev_dflt(struct ipr_supported_device *supported_dev,
+ struct ipr_std_inq_vpids *vpids)
+{
+ memset(supported_dev, 0, sizeof(struct ipr_supported_device));
+ memcpy(&supported_dev->vpids, vpids, sizeof(struct ipr_std_inq_vpids));
+ supported_dev->num_records = 1;
+ supported_dev->data_length =
+ cpu_to_be16(sizeof(struct ipr_supported_device));
+ supported_dev->reserved = 0;
+}
+
+/**
+ * ipr_set_supported_devs - Send Set Supported Devices for a device
+ * @ipr_cmd: ipr command struct
+ *
+ * This function sends a Set Supported Devices to the adapter
+ *
+ * Return value:
+ * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
+ **/
+static int ipr_set_supported_devs(struct ipr_cmnd *ipr_cmd)
+{
+ struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
+ struct ipr_supported_device *supp_dev = &ioa_cfg->vpd_cbs->supp_dev;
+ struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
+ struct ipr_resource_entry *res = ipr_cmd->u.res;
+
+ ipr_cmd->job_step = ipr_ioa_reset_done;
+
+ list_for_each_entry_continue(res, &ioa_cfg->used_res_q, queue) {
+ if (!ipr_is_scsi_disk(res))
+ continue;
+
+ ipr_cmd->u.res = res;
+ ipr_set_sup_dev_dflt(supp_dev, &res->std_inq_data.vpids);
+
+ ioarcb->res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
+ ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
+ ioarcb->cmd_pkt.request_type = IPR_RQTYPE_IOACMD;
+
+ ioarcb->cmd_pkt.cdb[0] = IPR_SET_SUPPORTED_DEVICES;
+ ioarcb->cmd_pkt.cdb[1] = IPR_SET_ALL_SUPPORTED_DEVICES;
+ ioarcb->cmd_pkt.cdb[7] = (sizeof(struct ipr_supported_device) >> 8) & 0xff;
+ ioarcb->cmd_pkt.cdb[8] = sizeof(struct ipr_supported_device) & 0xff;
+
+ ipr_init_ioadl(ipr_cmd,
+ ioa_cfg->vpd_cbs_dma +
+ offsetof(struct ipr_misc_cbs, supp_dev),
+ sizeof(struct ipr_supported_device),
+ IPR_IOADL_FLAGS_WRITE_LAST);
+
+ ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout,
+ IPR_SET_SUP_DEVICE_TIMEOUT);
+
+ if (!ioa_cfg->sis64)
+ ipr_cmd->job_step = ipr_set_supported_devs;
+ LEAVE;
+ return IPR_RC_JOB_RETURN;
+ }
+
+ LEAVE;
+ return IPR_RC_JOB_CONTINUE;
+}
+
+/**
+ * ipr_get_mode_page - Locate specified mode page
+ * @mode_pages: mode page buffer
+ * @page_code: page code to find
+ * @len: minimum required length for mode page
+ *
+ * Return value:
+ * pointer to mode page / NULL on failure
+ **/
+static void *ipr_get_mode_page(struct ipr_mode_pages *mode_pages,
+ u32 page_code, u32 len)
+{
+ struct ipr_mode_page_hdr *mode_hdr;
+ u32 page_length;
+ u32 length;
+
+ if (!mode_pages || (mode_pages->hdr.length == 0))
+ return NULL;
+
+ length = (mode_pages->hdr.length + 1) - 4 - mode_pages->hdr.block_desc_len;
+ mode_hdr = (struct ipr_mode_page_hdr *)
+ (mode_pages->data + mode_pages->hdr.block_desc_len);
+
+ while (length) {
+ if (IPR_GET_MODE_PAGE_CODE(mode_hdr) == page_code) {
+ if (mode_hdr->page_length >= (len - sizeof(struct ipr_mode_page_hdr)))
+ return mode_hdr;
+ break;
+ } else {
+ page_length = (sizeof(struct ipr_mode_page_hdr) +
+ mode_hdr->page_length);
+ length -= page_length;
+ mode_hdr = (struct ipr_mode_page_hdr *)
+ ((unsigned long)mode_hdr + page_length);
+ }
+ }
+ return NULL;
+}
+
+/**
+ * ipr_check_term_power - Check for term power errors
+ * @ioa_cfg: ioa config struct
+ * @mode_pages: IOAFP mode pages buffer
+ *
+ * Check the IOAFP's mode page 28 for term power errors
+ *
+ * Return value:
+ * nothing
+ **/
+static void ipr_check_term_power(struct ipr_ioa_cfg *ioa_cfg,
+ struct ipr_mode_pages *mode_pages)
+{
+ int i;
+ int entry_length;
+ struct ipr_dev_bus_entry *bus;
+ struct ipr_mode_page28 *mode_page;
+
+ mode_page = ipr_get_mode_page(mode_pages, 0x28,
+ sizeof(struct ipr_mode_page28));
+
+ entry_length = mode_page->entry_length;
+
+ bus = mode_page->bus;
+
+ for (i = 0; i < mode_page->num_entries; i++) {
+ if (bus->flags & IPR_SCSI_ATTR_NO_TERM_PWR) {
+ dev_err(&ioa_cfg->pdev->dev,
+ "Term power is absent on scsi bus %d\n",
+ bus->res_addr.bus);
+ }
+
+ bus = (struct ipr_dev_bus_entry *)((char *)bus + entry_length);
+ }
+}
+
+/**
+ * ipr_scsi_bus_speed_limit - Limit the SCSI speed based on SES table
+ * @ioa_cfg: ioa config struct
+ *
+ * Looks through the config table checking for SES devices. If
+ * the SES device is in the SES table indicating a maximum SCSI
+ * bus speed, the speed is limited for the bus.
+ *
+ * Return value:
+ * none
+ **/
+static void ipr_scsi_bus_speed_limit(struct ipr_ioa_cfg *ioa_cfg)
+{
+ u32 max_xfer_rate;
+ int i;
+
+ for (i = 0; i < IPR_MAX_NUM_BUSES; i++) {
+ max_xfer_rate = ipr_get_max_scsi_speed(ioa_cfg, i,
+ ioa_cfg->bus_attr[i].bus_width);
+
+ if (max_xfer_rate < ioa_cfg->bus_attr[i].max_xfer_rate)
+ ioa_cfg->bus_attr[i].max_xfer_rate = max_xfer_rate;
+ }
+}
+
+/**
+ * ipr_modify_ioafp_mode_page_28 - Modify IOAFP Mode Page 28
+ * @ioa_cfg: ioa config struct
+ * @mode_pages: mode page 28 buffer
+ *
+ * Updates mode page 28 based on driver configuration
+ *
+ * Return value:
+ * none
+ **/
+static void ipr_modify_ioafp_mode_page_28(struct ipr_ioa_cfg *ioa_cfg,
+ struct ipr_mode_pages *mode_pages)
+{
+ int i, entry_length;
+ struct ipr_dev_bus_entry *bus;
+ struct ipr_bus_attributes *bus_attr;
+ struct ipr_mode_page28 *mode_page;
+
+ mode_page = ipr_get_mode_page(mode_pages, 0x28,
+ sizeof(struct ipr_mode_page28));
+
+ entry_length = mode_page->entry_length;
+
+ /* Loop for each device bus entry */
+ for (i = 0, bus = mode_page->bus;
+ i < mode_page->num_entries;
+ i++, bus = (struct ipr_dev_bus_entry *)((u8 *)bus + entry_length)) {
+ if (bus->res_addr.bus > IPR_MAX_NUM_BUSES) {
+ dev_err(&ioa_cfg->pdev->dev,
+ "Invalid resource address reported: 0x%08X\n",
+ IPR_GET_PHYS_LOC(bus->res_addr));
+ continue;
+ }
+
+ bus_attr = &ioa_cfg->bus_attr[i];
+ bus->extended_reset_delay = IPR_EXTENDED_RESET_DELAY;
+ bus->bus_width = bus_attr->bus_width;
+ bus->max_xfer_rate = cpu_to_be32(bus_attr->max_xfer_rate);
+ bus->flags &= ~IPR_SCSI_ATTR_QAS_MASK;
+ if (bus_attr->qas_enabled)
+ bus->flags |= IPR_SCSI_ATTR_ENABLE_QAS;
+ else
+ bus->flags |= IPR_SCSI_ATTR_DISABLE_QAS;
+ }
+}
+
+/**
+ * ipr_build_mode_select - Build a mode select command
+ * @ipr_cmd: ipr command struct
+ * @res_handle: resource handle to send command to
+ * @parm: Byte 2 of Mode Sense command
+ * @dma_addr: DMA buffer address
+ * @xfer_len: data transfer length
+ *
+ * Return value:
+ * none
+ **/
+static void ipr_build_mode_select(struct ipr_cmnd *ipr_cmd,
+ __be32 res_handle, u8 parm,
+ dma_addr_t dma_addr, u8 xfer_len)
+{
+ struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
+
+ ioarcb->res_handle = res_handle;
+ ioarcb->cmd_pkt.request_type = IPR_RQTYPE_SCSICDB;
+ ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
+ ioarcb->cmd_pkt.cdb[0] = MODE_SELECT;
+ ioarcb->cmd_pkt.cdb[1] = parm;
+ ioarcb->cmd_pkt.cdb[4] = xfer_len;
+
+ ipr_init_ioadl(ipr_cmd, dma_addr, xfer_len, IPR_IOADL_FLAGS_WRITE_LAST);
+}
+
+/**
+ * ipr_ioafp_mode_select_page28 - Issue Mode Select Page 28 to IOA
+ * @ipr_cmd: ipr command struct
+ *
+ * This function sets up the SCSI bus attributes and sends
+ * a Mode Select for Page 28 to activate them.
+ *
+ * Return value:
+ * IPR_RC_JOB_RETURN
+ **/
+static int ipr_ioafp_mode_select_page28(struct ipr_cmnd *ipr_cmd)
+{
+ struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
+ struct ipr_mode_pages *mode_pages = &ioa_cfg->vpd_cbs->mode_pages;
+ int length;
+
+ ENTER;
+ ipr_scsi_bus_speed_limit(ioa_cfg);
+ ipr_check_term_power(ioa_cfg, mode_pages);
+ ipr_modify_ioafp_mode_page_28(ioa_cfg, mode_pages);
+ length = mode_pages->hdr.length + 1;
+ mode_pages->hdr.length = 0;
+
+ ipr_build_mode_select(ipr_cmd, cpu_to_be32(IPR_IOA_RES_HANDLE), 0x11,
+ ioa_cfg->vpd_cbs_dma + offsetof(struct ipr_misc_cbs, mode_pages),
+ length);
+
+ ipr_cmd->job_step = ipr_set_supported_devs;
+ ipr_cmd->u.res = list_entry(ioa_cfg->used_res_q.next,
+ struct ipr_resource_entry, queue);
+ ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, IPR_INTERNAL_TIMEOUT);
+
+ LEAVE;
+ return IPR_RC_JOB_RETURN;
+}
+
+/**
+ * ipr_build_mode_sense - Builds a mode sense command
+ * @ipr_cmd: ipr command struct
+ * @res: resource entry struct
+ * @parm: Byte 2 of mode sense command
+ * @dma_addr: DMA address of mode sense buffer
+ * @xfer_len: Size of DMA buffer
+ *
+ * Return value:
+ * none
+ **/
+static void ipr_build_mode_sense(struct ipr_cmnd *ipr_cmd,
+ __be32 res_handle,
+ u8 parm, dma_addr_t dma_addr, u8 xfer_len)
+{
+ struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
+
+ ioarcb->res_handle = res_handle;
+ ioarcb->cmd_pkt.cdb[0] = MODE_SENSE;
+ ioarcb->cmd_pkt.cdb[2] = parm;
+ ioarcb->cmd_pkt.cdb[4] = xfer_len;
+ ioarcb->cmd_pkt.request_type = IPR_RQTYPE_SCSICDB;
+
+ ipr_init_ioadl(ipr_cmd, dma_addr, xfer_len, IPR_IOADL_FLAGS_READ_LAST);
+}
+
+/**
+ * ipr_reset_cmd_failed - Handle failure of IOA reset command
+ * @ipr_cmd: ipr command struct
+ *
+ * This function handles the failure of an IOA bringup command.
+ *
+ * Return value:
+ * IPR_RC_JOB_RETURN
+ **/
+static int ipr_reset_cmd_failed(struct ipr_cmnd *ipr_cmd)
+{
+ struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
+ u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
+
+ dev_err(&ioa_cfg->pdev->dev,
+ "0x%02X failed with IOASC: 0x%08X\n",
+ ipr_cmd->ioarcb.cmd_pkt.cdb[0], ioasc);
+
+ ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
+ list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
+ return IPR_RC_JOB_RETURN;
+}
+
+/**
+ * ipr_reset_mode_sense_failed - Handle failure of IOAFP mode sense
+ * @ipr_cmd: ipr command struct
+ *
+ * This function handles the failure of a Mode Sense to the IOAFP.
+ * Some adapters do not handle all mode pages.
+ *
+ * Return value:
+ * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
+ **/
+static int ipr_reset_mode_sense_failed(struct ipr_cmnd *ipr_cmd)
+{
+ struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
+ u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
+
+ if (ioasc == IPR_IOASC_IR_INVALID_REQ_TYPE_OR_PKT) {
+ ipr_cmd->job_step = ipr_set_supported_devs;
+ ipr_cmd->u.res = list_entry(ioa_cfg->used_res_q.next,
+ struct ipr_resource_entry, queue);
+ return IPR_RC_JOB_CONTINUE;
+ }
+
+ return ipr_reset_cmd_failed(ipr_cmd);
+}
+
+/**
+ * ipr_ioafp_mode_sense_page28 - Issue Mode Sense Page 28 to IOA
+ * @ipr_cmd: ipr command struct
+ *
+ * This function send a Page 28 mode sense to the IOA to
+ * retrieve SCSI bus attributes.
+ *
+ * Return value:
+ * IPR_RC_JOB_RETURN
+ **/
+static int ipr_ioafp_mode_sense_page28(struct ipr_cmnd *ipr_cmd)
+{
+ struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
+
+ ENTER;
+ ipr_build_mode_sense(ipr_cmd, cpu_to_be32(IPR_IOA_RES_HANDLE),
+ 0x28, ioa_cfg->vpd_cbs_dma +
+ offsetof(struct ipr_misc_cbs, mode_pages),
+ sizeof(struct ipr_mode_pages));
+
+ ipr_cmd->job_step = ipr_ioafp_mode_select_page28;
+ ipr_cmd->job_step_failed = ipr_reset_mode_sense_failed;
+
+ ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, IPR_INTERNAL_TIMEOUT);
+
+ LEAVE;
+ return IPR_RC_JOB_RETURN;
+}
+
+/**
+ * ipr_ioafp_mode_select_page24 - Issue Mode Select to IOA
+ * @ipr_cmd: ipr command struct
+ *
+ * This function enables dual IOA RAID support if possible.
+ *
+ * Return value:
+ * IPR_RC_JOB_RETURN
+ **/
+static int ipr_ioafp_mode_select_page24(struct ipr_cmnd *ipr_cmd)
+{
+ struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
+ struct ipr_mode_pages *mode_pages = &ioa_cfg->vpd_cbs->mode_pages;
+ struct ipr_mode_page24 *mode_page;
+ int length;
+
+ ENTER;
+ mode_page = ipr_get_mode_page(mode_pages, 0x24,
+ sizeof(struct ipr_mode_page24));
+
+ if (mode_page)
+ mode_page->flags |= IPR_ENABLE_DUAL_IOA_AF;
+
+ length = mode_pages->hdr.length + 1;
+ mode_pages->hdr.length = 0;
+
+ ipr_build_mode_select(ipr_cmd, cpu_to_be32(IPR_IOA_RES_HANDLE), 0x11,
+ ioa_cfg->vpd_cbs_dma + offsetof(struct ipr_misc_cbs, mode_pages),
+ length);
+
+ ipr_cmd->job_step = ipr_ioafp_mode_sense_page28;
+ ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, IPR_INTERNAL_TIMEOUT);
+
+ LEAVE;
+ return IPR_RC_JOB_RETURN;
+}
+
+/**
+ * ipr_reset_mode_sense_page24_failed - Handle failure of IOAFP mode sense
+ * @ipr_cmd: ipr command struct
+ *
+ * This function handles the failure of a Mode Sense to the IOAFP.
+ * Some adapters do not handle all mode pages.
+ *
+ * Return value:
+ * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
+ **/
+static int ipr_reset_mode_sense_page24_failed(struct ipr_cmnd *ipr_cmd)
+{
+ u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
+
+ if (ioasc == IPR_IOASC_IR_INVALID_REQ_TYPE_OR_PKT) {
+ ipr_cmd->job_step = ipr_ioafp_mode_sense_page28;
+ return IPR_RC_JOB_CONTINUE;
+ }
+
+ return ipr_reset_cmd_failed(ipr_cmd);
+}
+
+/**
+ * ipr_ioafp_mode_sense_page24 - Issue Page 24 Mode Sense to IOA
+ * @ipr_cmd: ipr command struct
+ *
+ * This function send a mode sense to the IOA to retrieve
+ * the IOA Advanced Function Control mode page.
+ *
+ * Return value:
+ * IPR_RC_JOB_RETURN
+ **/
+static int ipr_ioafp_mode_sense_page24(struct ipr_cmnd *ipr_cmd)
+{
+ struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
+
+ ENTER;
+ ipr_build_mode_sense(ipr_cmd, cpu_to_be32(IPR_IOA_RES_HANDLE),
+ 0x24, ioa_cfg->vpd_cbs_dma +
+ offsetof(struct ipr_misc_cbs, mode_pages),
+ sizeof(struct ipr_mode_pages));
+
+ ipr_cmd->job_step = ipr_ioafp_mode_select_page24;
+ ipr_cmd->job_step_failed = ipr_reset_mode_sense_page24_failed;
+
+ ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, IPR_INTERNAL_TIMEOUT);
+
+ LEAVE;
+ return IPR_RC_JOB_RETURN;
+}
+
+/**
+ * ipr_init_res_table - Initialize the resource table
+ * @ipr_cmd: ipr command struct
+ *
+ * This function looks through the existing resource table, comparing
+ * it with the config table. This function will take care of old/new
+ * devices and schedule adding/removing them from the mid-layer
+ * as appropriate.
+ *
+ * Return value:
+ * IPR_RC_JOB_CONTINUE
+ **/
+static int ipr_init_res_table(struct ipr_cmnd *ipr_cmd)
+{
+ struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
+ struct ipr_resource_entry *res, *temp;
+ struct ipr_config_table_entry_wrapper cfgtew;
+ int entries, found, flag, i;
+ LIST_HEAD(old_res);
+
+ ENTER;
+ if (ioa_cfg->sis64)
+ flag = ioa_cfg->u.cfg_table64->hdr64.flags;
+ else
+ flag = ioa_cfg->u.cfg_table->hdr.flags;
+
+ if (flag & IPR_UCODE_DOWNLOAD_REQ)
+ dev_err(&ioa_cfg->pdev->dev, "Microcode download required\n");
+
+ list_for_each_entry_safe(res, temp, &ioa_cfg->used_res_q, queue)
+ list_move_tail(&res->queue, &old_res);
+
+ if (ioa_cfg->sis64)
+ entries = be16_to_cpu(ioa_cfg->u.cfg_table64->hdr64.num_entries);
+ else
+ entries = ioa_cfg->u.cfg_table->hdr.num_entries;
+
+ for (i = 0; i < entries; i++) {
+ if (ioa_cfg->sis64)
+ cfgtew.u.cfgte64 = &ioa_cfg->u.cfg_table64->dev[i];
+ else
+ cfgtew.u.cfgte = &ioa_cfg->u.cfg_table->dev[i];
+ found = 0;
+
+ list_for_each_entry_safe(res, temp, &old_res, queue) {
+ if (ipr_is_same_device(res, &cfgtew)) {
+ list_move_tail(&res->queue, &ioa_cfg->used_res_q);
+ found = 1;
+ break;
+ }
+ }
+
+ if (!found) {
+ if (list_empty(&ioa_cfg->free_res_q)) {
+ dev_err(&ioa_cfg->pdev->dev, "Too many devices attached\n");
+ break;
+ }
+
+ found = 1;
+ res = list_entry(ioa_cfg->free_res_q.next,
+ struct ipr_resource_entry, queue);
+ list_move_tail(&res->queue, &ioa_cfg->used_res_q);
+ ipr_init_res_entry(res, &cfgtew);
+ res->add_to_ml = 1;
+ } else if (res->sdev && (ipr_is_vset_device(res) || ipr_is_scsi_disk(res)))
+ res->sdev->allow_restart = 1;
+
+ if (found)
+ ipr_update_res_entry(res, &cfgtew);
+ }
+
+ list_for_each_entry_safe(res, temp, &old_res, queue) {
+ if (res->sdev) {
+ res->del_from_ml = 1;
+ res->res_handle = IPR_INVALID_RES_HANDLE;
+ list_move_tail(&res->queue, &ioa_cfg->used_res_q);
+ }
+ }
+
+ list_for_each_entry_safe(res, temp, &old_res, queue) {
+ ipr_clear_res_target(res);
+ list_move_tail(&res->queue, &ioa_cfg->free_res_q);
+ }
+
+ if (ioa_cfg->dual_raid && ipr_dual_ioa_raid)
+ ipr_cmd->job_step = ipr_ioafp_mode_sense_page24;
+ else
+ ipr_cmd->job_step = ipr_ioafp_mode_sense_page28;
+
+ LEAVE;
+ return IPR_RC_JOB_CONTINUE;
+}
+
+/**
+ * ipr_ioafp_query_ioa_cfg - Send a Query IOA Config to the adapter.
+ * @ipr_cmd: ipr command struct
+ *
+ * This function sends a Query IOA Configuration command
+ * to the adapter to retrieve the IOA configuration table.
+ *
+ * Return value:
+ * IPR_RC_JOB_RETURN
+ **/
+static int ipr_ioafp_query_ioa_cfg(struct ipr_cmnd *ipr_cmd)
+{
+ struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
+ struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
+ struct ipr_inquiry_page3 *ucode_vpd = &ioa_cfg->vpd_cbs->page3_data;
+ struct ipr_inquiry_cap *cap = &ioa_cfg->vpd_cbs->cap;
+
+ ENTER;
+ if (cap->cap & IPR_CAP_DUAL_IOA_RAID)
+ ioa_cfg->dual_raid = 1;
+ dev_info(&ioa_cfg->pdev->dev, "Adapter firmware version: %02X%02X%02X%02X\n",
+ ucode_vpd->major_release, ucode_vpd->card_type,
+ ucode_vpd->minor_release[0], ucode_vpd->minor_release[1]);
+ ioarcb->cmd_pkt.request_type = IPR_RQTYPE_IOACMD;
+ ioarcb->res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
+
+ ioarcb->cmd_pkt.cdb[0] = IPR_QUERY_IOA_CONFIG;
+ ioarcb->cmd_pkt.cdb[6] = (ioa_cfg->cfg_table_size >> 16) & 0xff;
+ ioarcb->cmd_pkt.cdb[7] = (ioa_cfg->cfg_table_size >> 8) & 0xff;
+ ioarcb->cmd_pkt.cdb[8] = ioa_cfg->cfg_table_size & 0xff;
+
+ ipr_init_ioadl(ipr_cmd, ioa_cfg->cfg_table_dma, ioa_cfg->cfg_table_size,
+ IPR_IOADL_FLAGS_READ_LAST);
+
+ ipr_cmd->job_step = ipr_init_res_table;
+
+ ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, IPR_INTERNAL_TIMEOUT);
+
+ LEAVE;
+ return IPR_RC_JOB_RETURN;
+}
+
+/**
+ * ipr_ioafp_inquiry - Send an Inquiry to the adapter.
+ * @ipr_cmd: ipr command struct
+ *
+ * This utility function sends an inquiry to the adapter.
+ *
+ * Return value:
+ * none
+ **/
+static void ipr_ioafp_inquiry(struct ipr_cmnd *ipr_cmd, u8 flags, u8 page,
+ dma_addr_t dma_addr, u8 xfer_len)
+{
+ struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
+
+ ENTER;
+ ioarcb->cmd_pkt.request_type = IPR_RQTYPE_SCSICDB;
+ ioarcb->res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
+
+ ioarcb->cmd_pkt.cdb[0] = INQUIRY;
+ ioarcb->cmd_pkt.cdb[1] = flags;
+ ioarcb->cmd_pkt.cdb[2] = page;
+ ioarcb->cmd_pkt.cdb[4] = xfer_len;
+
+ ipr_init_ioadl(ipr_cmd, dma_addr, xfer_len, IPR_IOADL_FLAGS_READ_LAST);
+
+ ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, IPR_INTERNAL_TIMEOUT);
+ LEAVE;
+}
+
+/**
+ * ipr_inquiry_page_supported - Is the given inquiry page supported
+ * @page0: inquiry page 0 buffer
+ * @page: page code.
+ *
+ * This function determines if the specified inquiry page is supported.
+ *
+ * Return value:
+ * 1 if page is supported / 0 if not
+ **/
+static int ipr_inquiry_page_supported(struct ipr_inquiry_page0 *page0, u8 page)
+{
+ int i;
+
+ for (i = 0; i < min_t(u8, page0->len, IPR_INQUIRY_PAGE0_ENTRIES); i++)
+ if (page0->page[i] == page)
+ return 1;
+
+ return 0;
+}
+
+/**
+ * ipr_ioafp_cap_inquiry - Send a Page 0xD0 Inquiry to the adapter.
+ * @ipr_cmd: ipr command struct
+ *
+ * This function sends a Page 0xD0 inquiry to the adapter
+ * to retrieve adapter capabilities.
+ *
+ * Return value:
+ * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
+ **/
+static int ipr_ioafp_cap_inquiry(struct ipr_cmnd *ipr_cmd)
+{
+ struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
+ struct ipr_inquiry_page0 *page0 = &ioa_cfg->vpd_cbs->page0_data;
+ struct ipr_inquiry_cap *cap = &ioa_cfg->vpd_cbs->cap;
+
+ ENTER;
+ ipr_cmd->job_step = ipr_ioafp_query_ioa_cfg;
+ memset(cap, 0, sizeof(*cap));
+
+ if (ipr_inquiry_page_supported(page0, 0xD0)) {
+ ipr_ioafp_inquiry(ipr_cmd, 1, 0xD0,
+ ioa_cfg->vpd_cbs_dma + offsetof(struct ipr_misc_cbs, cap),
+ sizeof(struct ipr_inquiry_cap));
+ return IPR_RC_JOB_RETURN;
+ }
+
+ LEAVE;
+ return IPR_RC_JOB_CONTINUE;
+}
+
+/**
+ * ipr_ioafp_page3_inquiry - Send a Page 3 Inquiry to the adapter.
+ * @ipr_cmd: ipr command struct
+ *
+ * This function sends a Page 3 inquiry to the adapter
+ * to retrieve software VPD information.
+ *
+ * Return value:
+ * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
+ **/
+static int ipr_ioafp_page3_inquiry(struct ipr_cmnd *ipr_cmd)
+{
+ struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
+
+ ENTER;
+
+ ipr_cmd->job_step = ipr_ioafp_cap_inquiry;
+
+ ipr_ioafp_inquiry(ipr_cmd, 1, 3,
+ ioa_cfg->vpd_cbs_dma + offsetof(struct ipr_misc_cbs, page3_data),
+ sizeof(struct ipr_inquiry_page3));
+
+ LEAVE;
+ return IPR_RC_JOB_RETURN;
+}
+
+/**
+ * ipr_ioafp_page0_inquiry - Send a Page 0 Inquiry to the adapter.
+ * @ipr_cmd: ipr command struct
+ *
+ * This function sends a Page 0 inquiry to the adapter
+ * to retrieve supported inquiry pages.
+ *
+ * Return value:
+ * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
+ **/
+static int ipr_ioafp_page0_inquiry(struct ipr_cmnd *ipr_cmd)
+{
+ struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
+ char type[5];
+
+ ENTER;
+
+ /* Grab the type out of the VPD and store it away */
+ memcpy(type, ioa_cfg->vpd_cbs->ioa_vpd.std_inq_data.vpids.product_id, 4);
+ type[4] = '\0';
+ ioa_cfg->type = simple_strtoul((char *)type, NULL, 16);
+
+ if (ipr_invalid_adapter(ioa_cfg)) {
+ dev_err(&ioa_cfg->pdev->dev,
+ "Adapter not supported in this hardware configuration.\n");
+
+ if (!ipr_testmode) {
+ ioa_cfg->reset_retries += IPR_NUM_RESET_RELOAD_RETRIES;
+ ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
+ list_add_tail(&ipr_cmd->queue,
+ &ioa_cfg->hrrq->hrrq_free_q);
+ return IPR_RC_JOB_RETURN;
+ }
+ }
+
+ ipr_cmd->job_step = ipr_ioafp_page3_inquiry;
+
+ ipr_ioafp_inquiry(ipr_cmd, 1, 0,
+ ioa_cfg->vpd_cbs_dma + offsetof(struct ipr_misc_cbs, page0_data),
+ sizeof(struct ipr_inquiry_page0));
+
+ LEAVE;
+ return IPR_RC_JOB_RETURN;
+}
+
+/**
+ * ipr_ioafp_std_inquiry - Send a Standard Inquiry to the adapter.
+ * @ipr_cmd: ipr command struct
+ *
+ * This function sends a standard inquiry to the adapter.
+ *
+ * Return value:
+ * IPR_RC_JOB_RETURN
+ **/
+static int ipr_ioafp_std_inquiry(struct ipr_cmnd *ipr_cmd)
+{
+ struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
+
+ ENTER;
+ ipr_cmd->job_step = ipr_ioafp_page0_inquiry;
+
+ ipr_ioafp_inquiry(ipr_cmd, 0, 0,
+ ioa_cfg->vpd_cbs_dma + offsetof(struct ipr_misc_cbs, ioa_vpd),
+ sizeof(struct ipr_ioa_vpd));
+
+ LEAVE;
+ return IPR_RC_JOB_RETURN;
+}
+
+/**
+ * ipr_ioafp_identify_hrrq - Send Identify Host RRQ.
+ * @ipr_cmd: ipr command struct
+ *
+ * This function send an Identify Host Request Response Queue
+ * command to establish the HRRQ with the adapter.
+ *
+ * Return value:
+ * IPR_RC_JOB_RETURN
+ **/
+static int ipr_ioafp_identify_hrrq(struct ipr_cmnd *ipr_cmd)
+{
+ struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
+ struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
+ struct ipr_hrr_queue *hrrq;
+
+ ENTER;
+ ipr_cmd->job_step = ipr_ioafp_std_inquiry;
+ dev_info(&ioa_cfg->pdev->dev, "Starting IOA initialization sequence.\n");
+
+ if (ioa_cfg->identify_hrrq_index < ioa_cfg->hrrq_num) {
+ hrrq = &ioa_cfg->hrrq[ioa_cfg->identify_hrrq_index];
+
+ ioarcb->cmd_pkt.cdb[0] = IPR_ID_HOST_RR_Q;
+ ioarcb->res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
+
+ ioarcb->cmd_pkt.request_type = IPR_RQTYPE_IOACMD;
+ if (ioa_cfg->sis64)
+ ioarcb->cmd_pkt.cdb[1] = 0x1;
+
+ if (ioa_cfg->nvectors == 1)
+ ioarcb->cmd_pkt.cdb[1] &= ~IPR_ID_HRRQ_SELE_ENABLE;
+ else
+ ioarcb->cmd_pkt.cdb[1] |= IPR_ID_HRRQ_SELE_ENABLE;
+
+ ioarcb->cmd_pkt.cdb[2] =
+ ((u64) hrrq->host_rrq_dma >> 24) & 0xff;
+ ioarcb->cmd_pkt.cdb[3] =
+ ((u64) hrrq->host_rrq_dma >> 16) & 0xff;
+ ioarcb->cmd_pkt.cdb[4] =
+ ((u64) hrrq->host_rrq_dma >> 8) & 0xff;
+ ioarcb->cmd_pkt.cdb[5] =
+ ((u64) hrrq->host_rrq_dma) & 0xff;
+ ioarcb->cmd_pkt.cdb[7] =
+ ((sizeof(u32) * hrrq->size) >> 8) & 0xff;
+ ioarcb->cmd_pkt.cdb[8] =
+ (sizeof(u32) * hrrq->size) & 0xff;
+
+ if (ioarcb->cmd_pkt.cdb[1] & IPR_ID_HRRQ_SELE_ENABLE)
+ ioarcb->cmd_pkt.cdb[9] =
+ ioa_cfg->identify_hrrq_index;
+
+ if (ioa_cfg->sis64) {
+ ioarcb->cmd_pkt.cdb[10] =
+ ((u64) hrrq->host_rrq_dma >> 56) & 0xff;
+ ioarcb->cmd_pkt.cdb[11] =
+ ((u64) hrrq->host_rrq_dma >> 48) & 0xff;
+ ioarcb->cmd_pkt.cdb[12] =
+ ((u64) hrrq->host_rrq_dma >> 40) & 0xff;
+ ioarcb->cmd_pkt.cdb[13] =
+ ((u64) hrrq->host_rrq_dma >> 32) & 0xff;
+ }
+
+ if (ioarcb->cmd_pkt.cdb[1] & IPR_ID_HRRQ_SELE_ENABLE)
+ ioarcb->cmd_pkt.cdb[14] =
+ ioa_cfg->identify_hrrq_index;
+
+ ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout,
+ IPR_INTERNAL_TIMEOUT);
+
+ if (++ioa_cfg->identify_hrrq_index < ioa_cfg->hrrq_num)
+ ipr_cmd->job_step = ipr_ioafp_identify_hrrq;
+
+ LEAVE;
+ return IPR_RC_JOB_RETURN;
+ }
+
+ LEAVE;
+ return IPR_RC_JOB_CONTINUE;
+}
+
+/**
+ * ipr_reset_timer_done - Adapter reset timer function
+ * @ipr_cmd: ipr command struct
+ *
+ * Description: This function is used in adapter reset processing
+ * for timing events. If the reset_cmd pointer in the IOA
+ * config struct is not this adapter's we are doing nested
+ * resets and fail_all_ops will take care of freeing the
+ * command block.
+ *
+ * Return value:
+ * none
+ **/
+static void ipr_reset_timer_done(struct ipr_cmnd *ipr_cmd)
+{
+ struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
+ unsigned long lock_flags = 0;
+
+ spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
+
+ if (ioa_cfg->reset_cmd == ipr_cmd) {
+ list_del(&ipr_cmd->queue);
+ ipr_cmd->done(ipr_cmd);
+ }
+
+ spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
+}
+
+/**
+ * ipr_reset_start_timer - Start a timer for adapter reset job
+ * @ipr_cmd: ipr command struct
+ * @timeout: timeout value
+ *
+ * Description: This function is used in adapter reset processing
+ * for timing events. If the reset_cmd pointer in the IOA
+ * config struct is not this adapter's we are doing nested
+ * resets and fail_all_ops will take care of freeing the
+ * command block.
+ *
+ * Return value:
+ * none
+ **/
+static void ipr_reset_start_timer(struct ipr_cmnd *ipr_cmd,
+ unsigned long timeout)
+{
+
+ ENTER;
+ list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_pending_q);
+ ipr_cmd->done = ipr_reset_ioa_job;
+
+ ipr_cmd->timer.data = (unsigned long) ipr_cmd;
+ ipr_cmd->timer.expires = jiffies + timeout;
+ ipr_cmd->timer.function = (void (*)(unsigned long))ipr_reset_timer_done;
+ add_timer(&ipr_cmd->timer);
+}
+
+/**
+ * ipr_init_ioa_mem - Initialize ioa_cfg control block
+ * @ioa_cfg: ioa cfg struct
+ *
+ * Return value:
+ * nothing
+ **/
+static void ipr_init_ioa_mem(struct ipr_ioa_cfg *ioa_cfg)
+{
+ struct ipr_hrr_queue *hrrq;
+
+ for_each_hrrq(hrrq, ioa_cfg) {
+ spin_lock(&hrrq->_lock);
+ memset(hrrq->host_rrq, 0, sizeof(u32) * hrrq->size);
+
+ /* Initialize Host RRQ pointers */
+ hrrq->hrrq_start = hrrq->host_rrq;
+ hrrq->hrrq_end = &hrrq->host_rrq[hrrq->size - 1];
+ hrrq->hrrq_curr = hrrq->hrrq_start;
+ hrrq->toggle_bit = 1;
+ spin_unlock(&hrrq->_lock);
+ }
+ wmb();
+
+ ioa_cfg->identify_hrrq_index = 0;
+ if (ioa_cfg->hrrq_num == 1)
+ atomic_set(&ioa_cfg->hrrq_index, 0);
+ else
+ atomic_set(&ioa_cfg->hrrq_index, 1);
+
+ /* Zero out config table */
+ memset(ioa_cfg->u.cfg_table, 0, ioa_cfg->cfg_table_size);
+}
+
+/**
+ * ipr_reset_next_stage - Process IPL stage change based on feedback register.
+ * @ipr_cmd: ipr command struct
+ *
+ * Return value:
+ * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
+ **/
+static int ipr_reset_next_stage(struct ipr_cmnd *ipr_cmd)
+{
+ unsigned long stage, stage_time;
+ u32 feedback;
+ volatile u32 int_reg;
+ struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
+ u64 maskval = 0;
+
+ feedback = readl(ioa_cfg->regs.init_feedback_reg);
+ stage = feedback & IPR_IPL_INIT_STAGE_MASK;
+ stage_time = feedback & IPR_IPL_INIT_STAGE_TIME_MASK;
+
+ ipr_dbg("IPL stage = 0x%lx, IPL stage time = %ld\n", stage, stage_time);
+
+ /* sanity check the stage_time value */
+ if (stage_time == 0)
+ stage_time = IPR_IPL_INIT_DEFAULT_STAGE_TIME;
+ else if (stage_time < IPR_IPL_INIT_MIN_STAGE_TIME)
+ stage_time = IPR_IPL_INIT_MIN_STAGE_TIME;
+ else if (stage_time > IPR_LONG_OPERATIONAL_TIMEOUT)
+ stage_time = IPR_LONG_OPERATIONAL_TIMEOUT;
+
+ if (stage == IPR_IPL_INIT_STAGE_UNKNOWN) {
+ writel(IPR_PCII_IPL_STAGE_CHANGE, ioa_cfg->regs.set_interrupt_mask_reg);
+ int_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg);
+ stage_time = ioa_cfg->transop_timeout;
+ ipr_cmd->job_step = ipr_ioafp_identify_hrrq;
+ } else if (stage == IPR_IPL_INIT_STAGE_TRANSOP) {
+ int_reg = readl(ioa_cfg->regs.sense_interrupt_reg32);
+ if (int_reg & IPR_PCII_IOA_TRANS_TO_OPER) {
+ ipr_cmd->job_step = ipr_ioafp_identify_hrrq;
+ maskval = IPR_PCII_IPL_STAGE_CHANGE;
+ maskval = (maskval << 32) | IPR_PCII_IOA_TRANS_TO_OPER;
+ writeq(maskval, ioa_cfg->regs.set_interrupt_mask_reg);
+ int_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg);
+ return IPR_RC_JOB_CONTINUE;
+ }
+ }
+
+ ipr_cmd->timer.data = (unsigned long) ipr_cmd;
+ ipr_cmd->timer.expires = jiffies + stage_time * HZ;
+ ipr_cmd->timer.function = (void (*)(unsigned long))ipr_oper_timeout;
+ ipr_cmd->done = ipr_reset_ioa_job;
+ add_timer(&ipr_cmd->timer);
+
+ list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_pending_q);
+
+ return IPR_RC_JOB_RETURN;
+}
+
+/**
+ * ipr_reset_enable_ioa - Enable the IOA following a reset.
+ * @ipr_cmd: ipr command struct
+ *
+ * This function reinitializes some control blocks and
+ * enables destructive diagnostics on the adapter.
+ *
+ * Return value:
+ * IPR_RC_JOB_RETURN
+ **/
+static int ipr_reset_enable_ioa(struct ipr_cmnd *ipr_cmd)
+{
+ struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
+ volatile u32 int_reg;
+ volatile u64 maskval;
+ int i;
+
+ ENTER;
+ ipr_cmd->job_step = ipr_ioafp_identify_hrrq;
+ ipr_init_ioa_mem(ioa_cfg);
+
+ for (i = 0; i < ioa_cfg->hrrq_num; i++) {
+ spin_lock(&ioa_cfg->hrrq[i]._lock);
+ ioa_cfg->hrrq[i].allow_interrupts = 1;
+ spin_unlock(&ioa_cfg->hrrq[i]._lock);
+ }
+ wmb();
+ if (ioa_cfg->sis64) {
+ /* Set the adapter to the correct endian mode. */
+ writel(IPR_ENDIAN_SWAP_KEY, ioa_cfg->regs.endian_swap_reg);
+ int_reg = readl(ioa_cfg->regs.endian_swap_reg);
+ }
+
+ int_reg = readl(ioa_cfg->regs.sense_interrupt_reg32);
+
+ if (int_reg & IPR_PCII_IOA_TRANS_TO_OPER) {
+ writel((IPR_PCII_ERROR_INTERRUPTS | IPR_PCII_HRRQ_UPDATED),
+ ioa_cfg->regs.clr_interrupt_mask_reg32);
+ int_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg);
+ return IPR_RC_JOB_CONTINUE;
+ }
+
+ /* Enable destructive diagnostics on IOA */
+ writel(ioa_cfg->doorbell, ioa_cfg->regs.set_uproc_interrupt_reg32);
+
+ if (ioa_cfg->sis64) {
+ maskval = IPR_PCII_IPL_STAGE_CHANGE;
+ maskval = (maskval << 32) | IPR_PCII_OPER_INTERRUPTS;
+ writeq(maskval, ioa_cfg->regs.clr_interrupt_mask_reg);
+ } else
+ writel(IPR_PCII_OPER_INTERRUPTS, ioa_cfg->regs.clr_interrupt_mask_reg32);
+
+ int_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg);
+
+ dev_info(&ioa_cfg->pdev->dev, "Initializing IOA.\n");
+
+ if (ioa_cfg->sis64) {
+ ipr_cmd->job_step = ipr_reset_next_stage;
+ return IPR_RC_JOB_CONTINUE;
+ }
+
+ ipr_cmd->timer.data = (unsigned long) ipr_cmd;
+ ipr_cmd->timer.expires = jiffies + (ioa_cfg->transop_timeout * HZ);
+ ipr_cmd->timer.function = (void (*)(unsigned long))ipr_oper_timeout;
+ ipr_cmd->done = ipr_reset_ioa_job;
+ add_timer(&ipr_cmd->timer);
+ list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_pending_q);
+
+ LEAVE;
+ return IPR_RC_JOB_RETURN;
+}
+
+/**
+ * ipr_reset_wait_for_dump - Wait for a dump to timeout.
+ * @ipr_cmd: ipr command struct
+ *
+ * This function is invoked when an adapter dump has run out
+ * of processing time.
+ *
+ * Return value:
+ * IPR_RC_JOB_CONTINUE
+ **/
+static int ipr_reset_wait_for_dump(struct ipr_cmnd *ipr_cmd)
+{
+ struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
+
+ if (ioa_cfg->sdt_state == GET_DUMP)
+ ioa_cfg->sdt_state = WAIT_FOR_DUMP;
+ else if (ioa_cfg->sdt_state == READ_DUMP)
+ ioa_cfg->sdt_state = ABORT_DUMP;
+
+ ioa_cfg->dump_timeout = 1;
+ ipr_cmd->job_step = ipr_reset_alert;
+
+ return IPR_RC_JOB_CONTINUE;
+}
+
+/**
+ * ipr_unit_check_no_data - Log a unit check/no data error log
+ * @ioa_cfg: ioa config struct
+ *
+ * Logs an error indicating the adapter unit checked, but for some
+ * reason, we were unable to fetch the unit check buffer.
+ *
+ * Return value:
+ * nothing
+ **/
+static void ipr_unit_check_no_data(struct ipr_ioa_cfg *ioa_cfg)
+{
+ ioa_cfg->errors_logged++;
+ dev_err(&ioa_cfg->pdev->dev, "IOA unit check with no data\n");
+}
+
+/**
+ * ipr_get_unit_check_buffer - Get the unit check buffer from the IOA
+ * @ioa_cfg: ioa config struct
+ *
+ * Fetches the unit check buffer from the adapter by clocking the data
+ * through the mailbox register.
+ *
+ * Return value:
+ * nothing
+ **/
+static void ipr_get_unit_check_buffer(struct ipr_ioa_cfg *ioa_cfg)
+{
+ unsigned long mailbox;
+ struct ipr_hostrcb *hostrcb;
+ struct ipr_uc_sdt sdt;
+ int rc, length;
+ u32 ioasc;
+
+ mailbox = readl(ioa_cfg->ioa_mailbox);
+
+ if (!ioa_cfg->sis64 && !ipr_sdt_is_fmt2(mailbox)) {
+ ipr_unit_check_no_data(ioa_cfg);
+ return;
+ }
+
+ memset(&sdt, 0, sizeof(struct ipr_uc_sdt));
+ rc = ipr_get_ldump_data_section(ioa_cfg, mailbox, (__be32 *) &sdt,
+ (sizeof(struct ipr_uc_sdt)) / sizeof(__be32));
+
+ if (rc || !(sdt.entry[0].flags & IPR_SDT_VALID_ENTRY) ||
+ ((be32_to_cpu(sdt.hdr.state) != IPR_FMT3_SDT_READY_TO_USE) &&
+ (be32_to_cpu(sdt.hdr.state) != IPR_FMT2_SDT_READY_TO_USE))) {
+ ipr_unit_check_no_data(ioa_cfg);
+ return;
+ }
+
+ /* Find length of the first sdt entry (UC buffer) */
+ if (be32_to_cpu(sdt.hdr.state) == IPR_FMT3_SDT_READY_TO_USE)
+ length = be32_to_cpu(sdt.entry[0].end_token);
+ else
+ length = (be32_to_cpu(sdt.entry[0].end_token) -
+ be32_to_cpu(sdt.entry[0].start_token)) &
+ IPR_FMT2_MBX_ADDR_MASK;
+
+ hostrcb = list_entry(ioa_cfg->hostrcb_free_q.next,
+ struct ipr_hostrcb, queue);
+ list_del(&hostrcb->queue);
+ memset(&hostrcb->hcam, 0, sizeof(hostrcb->hcam));
+
+ rc = ipr_get_ldump_data_section(ioa_cfg,
+ be32_to_cpu(sdt.entry[0].start_token),
+ (__be32 *)&hostrcb->hcam,
+ min(length, (int)sizeof(hostrcb->hcam)) / sizeof(__be32));
+
+ if (!rc) {
+ ipr_handle_log_data(ioa_cfg, hostrcb);
+ ioasc = be32_to_cpu(hostrcb->hcam.u.error.fd_ioasc);
+ if (ioasc == IPR_IOASC_NR_IOA_RESET_REQUIRED &&
+ ioa_cfg->sdt_state == GET_DUMP)
+ ioa_cfg->sdt_state = WAIT_FOR_DUMP;
+ } else
+ ipr_unit_check_no_data(ioa_cfg);
+
+ list_add_tail(&hostrcb->queue, &ioa_cfg->hostrcb_free_q);
+}
+
+/**
+ * ipr_reset_get_unit_check_job - Call to get the unit check buffer.
+ * @ipr_cmd: ipr command struct
+ *
+ * Description: This function will call to get the unit check buffer.
+ *
+ * Return value:
+ * IPR_RC_JOB_RETURN
+ **/
+static int ipr_reset_get_unit_check_job(struct ipr_cmnd *ipr_cmd)
+{
+ struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
+
+ ENTER;
+ ioa_cfg->ioa_unit_checked = 0;
+ ipr_get_unit_check_buffer(ioa_cfg);
+ ipr_cmd->job_step = ipr_reset_alert;
+ ipr_reset_start_timer(ipr_cmd, 0);
+
+ LEAVE;
+ return IPR_RC_JOB_RETURN;
+}
+
+/**
+ * ipr_reset_restore_cfg_space - Restore PCI config space.
+ * @ipr_cmd: ipr command struct
+ *
+ * Description: This function restores the saved PCI config space of
+ * the adapter, fails all outstanding ops back to the callers, and
+ * fetches the dump/unit check if applicable to this reset.
+ *
+ * Return value:
+ * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
+ **/
+static int ipr_reset_restore_cfg_space(struct ipr_cmnd *ipr_cmd)
+{
+ struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
+ u32 int_reg;
+
+ ENTER;
+ ioa_cfg->pdev->state_saved = true;
+ pci_restore_state(ioa_cfg->pdev);
+
+ if (ipr_set_pcix_cmd_reg(ioa_cfg)) {
+ ipr_cmd->s.ioasa.hdr.ioasc = cpu_to_be32(IPR_IOASC_PCI_ACCESS_ERROR);
+ return IPR_RC_JOB_CONTINUE;
+ }
+
+ ipr_fail_all_ops(ioa_cfg);
+
+ if (ioa_cfg->sis64) {
+ /* Set the adapter to the correct endian mode. */
+ writel(IPR_ENDIAN_SWAP_KEY, ioa_cfg->regs.endian_swap_reg);
+ int_reg = readl(ioa_cfg->regs.endian_swap_reg);
+ }
+
+ if (ioa_cfg->ioa_unit_checked) {
+ if (ioa_cfg->sis64) {
+ ipr_cmd->job_step = ipr_reset_get_unit_check_job;
+ ipr_reset_start_timer(ipr_cmd, IPR_DUMP_DELAY_TIMEOUT);
+ return IPR_RC_JOB_RETURN;
+ } else {
+ ioa_cfg->ioa_unit_checked = 0;
+ ipr_get_unit_check_buffer(ioa_cfg);
+ ipr_cmd->job_step = ipr_reset_alert;
+ ipr_reset_start_timer(ipr_cmd, 0);
+ return IPR_RC_JOB_RETURN;
+ }
+ }
+
+ if (ioa_cfg->in_ioa_bringdown) {
+ ipr_cmd->job_step = ipr_ioa_bringdown_done;
+ } else {
+ ipr_cmd->job_step = ipr_reset_enable_ioa;
+
+ if (GET_DUMP == ioa_cfg->sdt_state) {
+ ioa_cfg->sdt_state = READ_DUMP;
+ ioa_cfg->dump_timeout = 0;
+ if (ioa_cfg->sis64)
+ ipr_reset_start_timer(ipr_cmd, IPR_SIS64_DUMP_TIMEOUT);
+ else
+ ipr_reset_start_timer(ipr_cmd, IPR_SIS32_DUMP_TIMEOUT);
+ ipr_cmd->job_step = ipr_reset_wait_for_dump;
+ schedule_work(&ioa_cfg->work_q);
+ return IPR_RC_JOB_RETURN;
+ }
+ }
+
+ LEAVE;
+ return IPR_RC_JOB_CONTINUE;
+}
+
+/**
+ * ipr_reset_bist_done - BIST has completed on the adapter.
+ * @ipr_cmd: ipr command struct
+ *
+ * Description: Unblock config space and resume the reset process.
+ *
+ * Return value:
+ * IPR_RC_JOB_CONTINUE
+ **/
+static int ipr_reset_bist_done(struct ipr_cmnd *ipr_cmd)
+{
+ struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
+
+ ENTER;
+ if (ioa_cfg->cfg_locked)
+ pci_cfg_access_unlock(ioa_cfg->pdev);
+ ioa_cfg->cfg_locked = 0;
+ ipr_cmd->job_step = ipr_reset_restore_cfg_space;
+ LEAVE;
+ return IPR_RC_JOB_CONTINUE;
+}
+
+/**
+ * ipr_reset_start_bist - Run BIST on the adapter.
+ * @ipr_cmd: ipr command struct
+ *
+ * Description: This function runs BIST on the adapter, then delays 2 seconds.
+ *
+ * Return value:
+ * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
+ **/
+static int ipr_reset_start_bist(struct ipr_cmnd *ipr_cmd)
+{
+ struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
+ int rc = PCIBIOS_SUCCESSFUL;
+
+ ENTER;
+ if (ioa_cfg->ipr_chip->bist_method == IPR_MMIO)
+ writel(IPR_UPROCI_SIS64_START_BIST,
+ ioa_cfg->regs.set_uproc_interrupt_reg32);
+ else
+ rc = pci_write_config_byte(ioa_cfg->pdev, PCI_BIST, PCI_BIST_START);
+
+ if (rc == PCIBIOS_SUCCESSFUL) {
+ ipr_cmd->job_step = ipr_reset_bist_done;
+ ipr_reset_start_timer(ipr_cmd, IPR_WAIT_FOR_BIST_TIMEOUT);
+ rc = IPR_RC_JOB_RETURN;
+ } else {
+ if (ioa_cfg->cfg_locked)
+ pci_cfg_access_unlock(ipr_cmd->ioa_cfg->pdev);
+ ioa_cfg->cfg_locked = 0;
+ ipr_cmd->s.ioasa.hdr.ioasc = cpu_to_be32(IPR_IOASC_PCI_ACCESS_ERROR);
+ rc = IPR_RC_JOB_CONTINUE;
+ }
+
+ LEAVE;
+ return rc;
+}
+
+/**
+ * ipr_reset_slot_reset_done - Clear PCI reset to the adapter
+ * @ipr_cmd: ipr command struct
+ *
+ * Description: This clears PCI reset to the adapter and delays two seconds.
+ *
+ * Return value:
+ * IPR_RC_JOB_RETURN
+ **/
+static int ipr_reset_slot_reset_done(struct ipr_cmnd *ipr_cmd)
+{
+ ENTER;
+ ipr_cmd->job_step = ipr_reset_bist_done;
+ ipr_reset_start_timer(ipr_cmd, IPR_WAIT_FOR_BIST_TIMEOUT);
+ LEAVE;
+ return IPR_RC_JOB_RETURN;
+}
+
+/**
+ * ipr_reset_reset_work - Pulse a PCIe fundamental reset
+ * @work: work struct
+ *
+ * Description: This pulses warm reset to a slot.
+ *
+ **/
+static void ipr_reset_reset_work(struct work_struct *work)
+{
+ struct ipr_cmnd *ipr_cmd = container_of(work, struct ipr_cmnd, work);
+ struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
+ struct pci_dev *pdev = ioa_cfg->pdev;
+ unsigned long lock_flags = 0;
+
+ ENTER;
+ pci_set_pcie_reset_state(pdev, pcie_warm_reset);
+ msleep(jiffies_to_msecs(IPR_PCI_RESET_TIMEOUT));
+ pci_set_pcie_reset_state(pdev, pcie_deassert_reset);
+
+ spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
+ if (ioa_cfg->reset_cmd == ipr_cmd)
+ ipr_reset_ioa_job(ipr_cmd);
+ spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
+ LEAVE;
+}
+
+/**
+ * ipr_reset_slot_reset - Reset the PCI slot of the adapter.
+ * @ipr_cmd: ipr command struct
+ *
+ * Description: This asserts PCI reset to the adapter.
+ *
+ * Return value:
+ * IPR_RC_JOB_RETURN
+ **/
+static int ipr_reset_slot_reset(struct ipr_cmnd *ipr_cmd)
+{
+ struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
+
+ ENTER;
+ INIT_WORK(&ipr_cmd->work, ipr_reset_reset_work);
+ queue_work(ioa_cfg->reset_work_q, &ipr_cmd->work);
+ ipr_cmd->job_step = ipr_reset_slot_reset_done;
+ LEAVE;
+ return IPR_RC_JOB_RETURN;
+}
+
+/**
+ * ipr_reset_block_config_access_wait - Wait for permission to block config access
+ * @ipr_cmd: ipr command struct
+ *
+ * Description: This attempts to block config access to the IOA.
+ *
+ * Return value:
+ * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
+ **/
+static int ipr_reset_block_config_access_wait(struct ipr_cmnd *ipr_cmd)
+{
+ struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
+ int rc = IPR_RC_JOB_CONTINUE;
+
+ if (pci_cfg_access_trylock(ioa_cfg->pdev)) {
+ ioa_cfg->cfg_locked = 1;
+ ipr_cmd->job_step = ioa_cfg->reset;
+ } else {
+ if (ipr_cmd->u.time_left) {
+ rc = IPR_RC_JOB_RETURN;
+ ipr_cmd->u.time_left -= IPR_CHECK_FOR_RESET_TIMEOUT;
+ ipr_reset_start_timer(ipr_cmd,
+ IPR_CHECK_FOR_RESET_TIMEOUT);
+ } else {
+ ipr_cmd->job_step = ioa_cfg->reset;
+ dev_err(&ioa_cfg->pdev->dev,
+ "Timed out waiting to lock config access. Resetting anyway.\n");
+ }
+ }
+
+ return rc;
+}
+
+/**
+ * ipr_reset_block_config_access - Block config access to the IOA
+ * @ipr_cmd: ipr command struct
+ *
+ * Description: This attempts to block config access to the IOA
+ *
+ * Return value:
+ * IPR_RC_JOB_CONTINUE
+ **/
+static int ipr_reset_block_config_access(struct ipr_cmnd *ipr_cmd)
+{
+ ipr_cmd->ioa_cfg->cfg_locked = 0;
+ ipr_cmd->job_step = ipr_reset_block_config_access_wait;
+ ipr_cmd->u.time_left = IPR_WAIT_FOR_RESET_TIMEOUT;
+ return IPR_RC_JOB_CONTINUE;
+}
+
+/**
+ * ipr_reset_allowed - Query whether or not IOA can be reset
+ * @ioa_cfg: ioa config struct
+ *
+ * Return value:
+ * 0 if reset not allowed / non-zero if reset is allowed
+ **/
+static int ipr_reset_allowed(struct ipr_ioa_cfg *ioa_cfg)
+{
+ volatile u32 temp_reg;
+
+ temp_reg = readl(ioa_cfg->regs.sense_interrupt_reg);
+ return ((temp_reg & IPR_PCII_CRITICAL_OPERATION) == 0);
+}
+
+/**
+ * ipr_reset_wait_to_start_bist - Wait for permission to reset IOA.
+ * @ipr_cmd: ipr command struct
+ *
+ * Description: This function waits for adapter permission to run BIST,
+ * then runs BIST. If the adapter does not give permission after a
+ * reasonable time, we will reset the adapter anyway. The impact of
+ * resetting the adapter without warning the adapter is the risk of
+ * losing the persistent error log on the adapter. If the adapter is
+ * reset while it is writing to the flash on the adapter, the flash
+ * segment will have bad ECC and be zeroed.
+ *
+ * Return value:
+ * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
+ **/
+static int ipr_reset_wait_to_start_bist(struct ipr_cmnd *ipr_cmd)
+{
+ struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
+ int rc = IPR_RC_JOB_RETURN;
+
+ if (!ipr_reset_allowed(ioa_cfg) && ipr_cmd->u.time_left) {
+ ipr_cmd->u.time_left -= IPR_CHECK_FOR_RESET_TIMEOUT;
+ ipr_reset_start_timer(ipr_cmd, IPR_CHECK_FOR_RESET_TIMEOUT);
+ } else {
+ ipr_cmd->job_step = ipr_reset_block_config_access;
+ rc = IPR_RC_JOB_CONTINUE;
+ }
+
+ return rc;
+}
+
+/**
+ * ipr_reset_alert - Alert the adapter of a pending reset
+ * @ipr_cmd: ipr command struct
+ *
+ * Description: This function alerts the adapter that it will be reset.
+ * If memory space is not currently enabled, proceed directly
+ * to running BIST on the adapter. The timer must always be started
+ * so we guarantee we do not run BIST from ipr_isr.
+ *
+ * Return value:
+ * IPR_RC_JOB_RETURN
+ **/
+static int ipr_reset_alert(struct ipr_cmnd *ipr_cmd)
+{
+ struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
+ u16 cmd_reg;
+ int rc;
+
+ ENTER;
+ rc = pci_read_config_word(ioa_cfg->pdev, PCI_COMMAND, &cmd_reg);
+
+ if ((rc == PCIBIOS_SUCCESSFUL) && (cmd_reg & PCI_COMMAND_MEMORY)) {
+ ipr_mask_and_clear_interrupts(ioa_cfg, ~0);
+ writel(IPR_UPROCI_RESET_ALERT, ioa_cfg->regs.set_uproc_interrupt_reg32);
+ ipr_cmd->job_step = ipr_reset_wait_to_start_bist;
+ } else {
+ ipr_cmd->job_step = ipr_reset_block_config_access;
+ }
+
+ ipr_cmd->u.time_left = IPR_WAIT_FOR_RESET_TIMEOUT;
+ ipr_reset_start_timer(ipr_cmd, IPR_CHECK_FOR_RESET_TIMEOUT);
+
+ LEAVE;
+ return IPR_RC_JOB_RETURN;
+}
+
+/**
+ * ipr_reset_quiesce_done - Complete IOA disconnect
+ * @ipr_cmd: ipr command struct
+ *
+ * Description: Freeze the adapter to complete quiesce processing
+ *
+ * Return value:
+ * IPR_RC_JOB_CONTINUE
+ **/
+static int ipr_reset_quiesce_done(struct ipr_cmnd *ipr_cmd)
+{
+ struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
+
+ ENTER;
+ ipr_cmd->job_step = ipr_ioa_bringdown_done;
+ ipr_mask_and_clear_interrupts(ioa_cfg, ~IPR_PCII_IOA_TRANS_TO_OPER);
+ LEAVE;
+ return IPR_RC_JOB_CONTINUE;
+}
+
+/**
+ * ipr_reset_cancel_hcam_done - Check for outstanding commands
+ * @ipr_cmd: ipr command struct
+ *
+ * Description: Ensure nothing is outstanding to the IOA and
+ * proceed with IOA disconnect. Otherwise reset the IOA.
+ *
+ * Return value:
+ * IPR_RC_JOB_RETURN / IPR_RC_JOB_CONTINUE
+ **/
+static int ipr_reset_cancel_hcam_done(struct ipr_cmnd *ipr_cmd)
+{
+ struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
+ struct ipr_cmnd *loop_cmd;
+ struct ipr_hrr_queue *hrrq;
+ int rc = IPR_RC_JOB_CONTINUE;
+ int count = 0;
+
+ ENTER;
+ ipr_cmd->job_step = ipr_reset_quiesce_done;
+
+ for_each_hrrq(hrrq, ioa_cfg) {
+ spin_lock(&hrrq->_lock);
+ list_for_each_entry(loop_cmd, &hrrq->hrrq_pending_q, queue) {
+ count++;
+ ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
+ list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
+ rc = IPR_RC_JOB_RETURN;
+ break;
+ }
+ spin_unlock(&hrrq->_lock);
+
+ if (count)
+ break;
+ }
+
+ LEAVE;
+ return rc;
+}
+
+/**
+ * ipr_reset_cancel_hcam - Cancel outstanding HCAMs
+ * @ipr_cmd: ipr command struct
+ *
+ * Description: Cancel any oustanding HCAMs to the IOA.
+ *
+ * Return value:
+ * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
+ **/
+static int ipr_reset_cancel_hcam(struct ipr_cmnd *ipr_cmd)
+{
+ struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
+ int rc = IPR_RC_JOB_CONTINUE;
+ struct ipr_cmd_pkt *cmd_pkt;
+ struct ipr_cmnd *hcam_cmd;
+ struct ipr_hrr_queue *hrrq = &ioa_cfg->hrrq[IPR_INIT_HRRQ];
+
+ ENTER;
+ ipr_cmd->job_step = ipr_reset_cancel_hcam_done;
+
+ if (!hrrq->ioa_is_dead) {
+ if (!list_empty(&ioa_cfg->hostrcb_pending_q)) {
+ list_for_each_entry(hcam_cmd, &hrrq->hrrq_pending_q, queue) {
+ if (hcam_cmd->ioarcb.cmd_pkt.cdb[0] != IPR_HOST_CONTROLLED_ASYNC)
+ continue;
+
+ ipr_cmd->ioarcb.res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
+ ipr_cmd->ioarcb.cmd_pkt.request_type = IPR_RQTYPE_IOACMD;
+ cmd_pkt = &ipr_cmd->ioarcb.cmd_pkt;
+ cmd_pkt->request_type = IPR_RQTYPE_IOACMD;
+ cmd_pkt->cdb[0] = IPR_CANCEL_REQUEST;
+ cmd_pkt->cdb[1] = IPR_CANCEL_64BIT_IOARCB;
+ cmd_pkt->cdb[10] = ((u64) hcam_cmd->dma_addr >> 56) & 0xff;
+ cmd_pkt->cdb[11] = ((u64) hcam_cmd->dma_addr >> 48) & 0xff;
+ cmd_pkt->cdb[12] = ((u64) hcam_cmd->dma_addr >> 40) & 0xff;
+ cmd_pkt->cdb[13] = ((u64) hcam_cmd->dma_addr >> 32) & 0xff;
+ cmd_pkt->cdb[2] = ((u64) hcam_cmd->dma_addr >> 24) & 0xff;
+ cmd_pkt->cdb[3] = ((u64) hcam_cmd->dma_addr >> 16) & 0xff;
+ cmd_pkt->cdb[4] = ((u64) hcam_cmd->dma_addr >> 8) & 0xff;
+ cmd_pkt->cdb[5] = ((u64) hcam_cmd->dma_addr) & 0xff;
+
+ ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout,
+ IPR_CANCEL_TIMEOUT);
+
+ rc = IPR_RC_JOB_RETURN;
+ ipr_cmd->job_step = ipr_reset_cancel_hcam;
+ break;
+ }
+ }
+ } else
+ ipr_cmd->job_step = ipr_reset_alert;
+
+ LEAVE;
+ return rc;
+}
+
+/**
+ * ipr_reset_ucode_download_done - Microcode download completion
+ * @ipr_cmd: ipr command struct
+ *
+ * Description: This function unmaps the microcode download buffer.
+ *
+ * Return value:
+ * IPR_RC_JOB_CONTINUE
+ **/
+static int ipr_reset_ucode_download_done(struct ipr_cmnd *ipr_cmd)
+{
+ struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
+ struct ipr_sglist *sglist = ioa_cfg->ucode_sglist;
+
+ dma_unmap_sg(&ioa_cfg->pdev->dev, sglist->scatterlist,
+ sglist->num_sg, DMA_TO_DEVICE);
+
+ ipr_cmd->job_step = ipr_reset_alert;
+ return IPR_RC_JOB_CONTINUE;
+}
+
+/**
+ * ipr_reset_ucode_download - Download microcode to the adapter
+ * @ipr_cmd: ipr command struct
+ *
+ * Description: This function checks to see if it there is microcode
+ * to download to the adapter. If there is, a download is performed.
+ *
+ * Return value:
+ * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
+ **/
+static int ipr_reset_ucode_download(struct ipr_cmnd *ipr_cmd)
+{
+ struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
+ struct ipr_sglist *sglist = ioa_cfg->ucode_sglist;
+
+ ENTER;
+ ipr_cmd->job_step = ipr_reset_alert;
+
+ if (!sglist)
+ return IPR_RC_JOB_CONTINUE;
+
+ ipr_cmd->ioarcb.res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
+ ipr_cmd->ioarcb.cmd_pkt.request_type = IPR_RQTYPE_SCSICDB;
+ ipr_cmd->ioarcb.cmd_pkt.cdb[0] = WRITE_BUFFER;
+ ipr_cmd->ioarcb.cmd_pkt.cdb[1] = IPR_WR_BUF_DOWNLOAD_AND_SAVE;
+ ipr_cmd->ioarcb.cmd_pkt.cdb[6] = (sglist->buffer_len & 0xff0000) >> 16;
+ ipr_cmd->ioarcb.cmd_pkt.cdb[7] = (sglist->buffer_len & 0x00ff00) >> 8;
+ ipr_cmd->ioarcb.cmd_pkt.cdb[8] = sglist->buffer_len & 0x0000ff;
+
+ if (ioa_cfg->sis64)
+ ipr_build_ucode_ioadl64(ipr_cmd, sglist);
+ else
+ ipr_build_ucode_ioadl(ipr_cmd, sglist);
+ ipr_cmd->job_step = ipr_reset_ucode_download_done;
+
+ ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout,
+ IPR_WRITE_BUFFER_TIMEOUT);
+
+ LEAVE;
+ return IPR_RC_JOB_RETURN;
+}
+
+/**
+ * ipr_reset_shutdown_ioa - Shutdown the adapter
+ * @ipr_cmd: ipr command struct
+ *
+ * Description: This function issues an adapter shutdown of the
+ * specified type to the specified adapter as part of the
+ * adapter reset job.
+ *
+ * Return value:
+ * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
+ **/
+static int ipr_reset_shutdown_ioa(struct ipr_cmnd *ipr_cmd)
+{
+ struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
+ enum ipr_shutdown_type shutdown_type = ipr_cmd->u.shutdown_type;
+ unsigned long timeout;
+ int rc = IPR_RC_JOB_CONTINUE;
+
+ ENTER;
+ if (shutdown_type == IPR_SHUTDOWN_QUIESCE)
+ ipr_cmd->job_step = ipr_reset_cancel_hcam;
+ else if (shutdown_type != IPR_SHUTDOWN_NONE &&
+ !ioa_cfg->hrrq[IPR_INIT_HRRQ].ioa_is_dead) {
+ ipr_cmd->ioarcb.res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
+ ipr_cmd->ioarcb.cmd_pkt.request_type = IPR_RQTYPE_IOACMD;
+ ipr_cmd->ioarcb.cmd_pkt.cdb[0] = IPR_IOA_SHUTDOWN;
+ ipr_cmd->ioarcb.cmd_pkt.cdb[1] = shutdown_type;
+
+ if (shutdown_type == IPR_SHUTDOWN_NORMAL)
+ timeout = IPR_SHUTDOWN_TIMEOUT;
+ else if (shutdown_type == IPR_SHUTDOWN_PREPARE_FOR_NORMAL)
+ timeout = IPR_INTERNAL_TIMEOUT;
+ else if (ioa_cfg->dual_raid && ipr_dual_ioa_raid)
+ timeout = IPR_DUAL_IOA_ABBR_SHUTDOWN_TO;
+ else
+ timeout = IPR_ABBREV_SHUTDOWN_TIMEOUT;
+
+ ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, timeout);
+
+ rc = IPR_RC_JOB_RETURN;
+ ipr_cmd->job_step = ipr_reset_ucode_download;
+ } else
+ ipr_cmd->job_step = ipr_reset_alert;
+
+ LEAVE;
+ return rc;
+}
+
+/**
+ * ipr_reset_ioa_job - Adapter reset job
+ * @ipr_cmd: ipr command struct
+ *
+ * Description: This function is the job router for the adapter reset job.
+ *
+ * Return value:
+ * none
+ **/
+static void ipr_reset_ioa_job(struct ipr_cmnd *ipr_cmd)
+{
+ u32 rc, ioasc;
+ struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
+
+ do {
+ ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
+
+ if (ioa_cfg->reset_cmd != ipr_cmd) {
+ /*
+ * We are doing nested adapter resets and this is
+ * not the current reset job.
+ */
+ list_add_tail(&ipr_cmd->queue,
+ &ipr_cmd->hrrq->hrrq_free_q);
+ return;
+ }
+
+ if (IPR_IOASC_SENSE_KEY(ioasc)) {
+ rc = ipr_cmd->job_step_failed(ipr_cmd);
+ if (rc == IPR_RC_JOB_RETURN)
+ return;
+ }
+
+ ipr_reinit_ipr_cmnd(ipr_cmd);
+ ipr_cmd->job_step_failed = ipr_reset_cmd_failed;
+ rc = ipr_cmd->job_step(ipr_cmd);
+ } while (rc == IPR_RC_JOB_CONTINUE);
+}
+
+/**
+ * _ipr_initiate_ioa_reset - Initiate an adapter reset
+ * @ioa_cfg: ioa config struct
+ * @job_step: first job step of reset job
+ * @shutdown_type: shutdown type
+ *
+ * Description: This function will initiate the reset of the given adapter
+ * starting at the selected job step.
+ * If the caller needs to wait on the completion of the reset,
+ * the caller must sleep on the reset_wait_q.
+ *
+ * Return value:
+ * none
+ **/
+static void _ipr_initiate_ioa_reset(struct ipr_ioa_cfg *ioa_cfg,
+ int (*job_step) (struct ipr_cmnd *),
+ enum ipr_shutdown_type shutdown_type)
+{
+ struct ipr_cmnd *ipr_cmd;
+ int i;
+
+ ioa_cfg->in_reset_reload = 1;
+ for (i = 0; i < ioa_cfg->hrrq_num; i++) {
+ spin_lock(&ioa_cfg->hrrq[i]._lock);
+ ioa_cfg->hrrq[i].allow_cmds = 0;
+ spin_unlock(&ioa_cfg->hrrq[i]._lock);
+ }
+ wmb();
+ if (!ioa_cfg->hrrq[IPR_INIT_HRRQ].removing_ioa)
+ scsi_block_requests(ioa_cfg->host);
+
+ ipr_cmd = ipr_get_free_ipr_cmnd(ioa_cfg);
+ ioa_cfg->reset_cmd = ipr_cmd;
+ ipr_cmd->job_step = job_step;
+ ipr_cmd->u.shutdown_type = shutdown_type;
+
+ ipr_reset_ioa_job(ipr_cmd);
+}
+
+/**
+ * ipr_initiate_ioa_reset - Initiate an adapter reset
+ * @ioa_cfg: ioa config struct
+ * @shutdown_type: shutdown type
+ *
+ * Description: This function will initiate the reset of the given adapter.
+ * If the caller needs to wait on the completion of the reset,
+ * the caller must sleep on the reset_wait_q.
+ *
+ * Return value:
+ * none
+ **/
+static void ipr_initiate_ioa_reset(struct ipr_ioa_cfg *ioa_cfg,
+ enum ipr_shutdown_type shutdown_type)
+{
+ int i;
+
+ if (ioa_cfg->hrrq[IPR_INIT_HRRQ].ioa_is_dead)
+ return;
+
+ if (ioa_cfg->in_reset_reload) {
+ if (ioa_cfg->sdt_state == GET_DUMP)
+ ioa_cfg->sdt_state = WAIT_FOR_DUMP;
+ else if (ioa_cfg->sdt_state == READ_DUMP)
+ ioa_cfg->sdt_state = ABORT_DUMP;
+ }
+
+ if (ioa_cfg->reset_retries++ >= IPR_NUM_RESET_RELOAD_RETRIES) {
+ dev_err(&ioa_cfg->pdev->dev,
+ "IOA taken offline - error recovery failed\n");
+
+ ioa_cfg->reset_retries = 0;
+ for (i = 0; i < ioa_cfg->hrrq_num; i++) {
+ spin_lock(&ioa_cfg->hrrq[i]._lock);
+ ioa_cfg->hrrq[i].ioa_is_dead = 1;
+ spin_unlock(&ioa_cfg->hrrq[i]._lock);
+ }
+ wmb();
+
+ if (ioa_cfg->in_ioa_bringdown) {
+ ioa_cfg->reset_cmd = NULL;
+ ioa_cfg->in_reset_reload = 0;
+ ipr_fail_all_ops(ioa_cfg);
+ wake_up_all(&ioa_cfg->reset_wait_q);
+
+ if (!ioa_cfg->hrrq[IPR_INIT_HRRQ].removing_ioa) {
+ spin_unlock_irq(ioa_cfg->host->host_lock);
+ scsi_unblock_requests(ioa_cfg->host);
+ spin_lock_irq(ioa_cfg->host->host_lock);
+ }
+ return;
+ } else {
+ ioa_cfg->in_ioa_bringdown = 1;
+ shutdown_type = IPR_SHUTDOWN_NONE;
+ }
+ }
+
+ _ipr_initiate_ioa_reset(ioa_cfg, ipr_reset_shutdown_ioa,
+ shutdown_type);
+}
+
+/**
+ * ipr_reset_freeze - Hold off all I/O activity
+ * @ipr_cmd: ipr command struct
+ *
+ * Description: If the PCI slot is frozen, hold off all I/O
+ * activity; then, as soon as the slot is available again,
+ * initiate an adapter reset.
+ */
+static int ipr_reset_freeze(struct ipr_cmnd *ipr_cmd)
+{
+ struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
+ int i;
+
+ /* Disallow new interrupts, avoid loop */
+ for (i = 0; i < ioa_cfg->hrrq_num; i++) {
+ spin_lock(&ioa_cfg->hrrq[i]._lock);
+ ioa_cfg->hrrq[i].allow_interrupts = 0;
+ spin_unlock(&ioa_cfg->hrrq[i]._lock);
+ }
+ wmb();
+ list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_pending_q);
+ ipr_cmd->done = ipr_reset_ioa_job;
+ return IPR_RC_JOB_RETURN;
+}
+
+/**
+ * ipr_pci_mmio_enabled - Called when MMIO has been re-enabled
+ * @pdev: PCI device struct
+ *
+ * Description: This routine is called to tell us that the MMIO
+ * access to the IOA has been restored
+ */
+static pci_ers_result_t ipr_pci_mmio_enabled(struct pci_dev *pdev)
+{
+ unsigned long flags = 0;
+ struct ipr_ioa_cfg *ioa_cfg = pci_get_drvdata(pdev);
+
+ spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
+ if (!ioa_cfg->probe_done)
+ pci_save_state(pdev);
+ spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
+ return PCI_ERS_RESULT_NEED_RESET;
+}
+
+/**
+ * ipr_pci_frozen - Called when slot has experienced a PCI bus error.
+ * @pdev: PCI device struct
+ *
+ * Description: This routine is called to tell us that the PCI bus
+ * is down. Can't do anything here, except put the device driver
+ * into a holding pattern, waiting for the PCI bus to come back.
+ */
+static void ipr_pci_frozen(struct pci_dev *pdev)
+{
+ unsigned long flags = 0;
+ struct ipr_ioa_cfg *ioa_cfg = pci_get_drvdata(pdev);
+
+ spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
+ if (ioa_cfg->probe_done)
+ _ipr_initiate_ioa_reset(ioa_cfg, ipr_reset_freeze, IPR_SHUTDOWN_NONE);
+ spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
+}
+
+/**
+ * ipr_pci_slot_reset - Called when PCI slot has been reset.
+ * @pdev: PCI device struct
+ *
+ * Description: This routine is called by the pci error recovery
+ * code after the PCI slot has been reset, just before we
+ * should resume normal operations.
+ */
+static pci_ers_result_t ipr_pci_slot_reset(struct pci_dev *pdev)
+{
+ unsigned long flags = 0;
+ struct ipr_ioa_cfg *ioa_cfg = pci_get_drvdata(pdev);
+
+ spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
+ if (ioa_cfg->probe_done) {
+ if (ioa_cfg->needs_warm_reset)
+ ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
+ else
+ _ipr_initiate_ioa_reset(ioa_cfg, ipr_reset_restore_cfg_space,
+ IPR_SHUTDOWN_NONE);
+ } else
+ wake_up_all(&ioa_cfg->eeh_wait_q);
+ spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
+ return PCI_ERS_RESULT_RECOVERED;
+}
+
+/**
+ * ipr_pci_perm_failure - Called when PCI slot is dead for good.
+ * @pdev: PCI device struct
+ *
+ * Description: This routine is called when the PCI bus has
+ * permanently failed.
+ */
+static void ipr_pci_perm_failure(struct pci_dev *pdev)
+{
+ unsigned long flags = 0;
+ struct ipr_ioa_cfg *ioa_cfg = pci_get_drvdata(pdev);
+ int i;
+
+ spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
+ if (ioa_cfg->probe_done) {
+ if (ioa_cfg->sdt_state == WAIT_FOR_DUMP)
+ ioa_cfg->sdt_state = ABORT_DUMP;
+ ioa_cfg->reset_retries = IPR_NUM_RESET_RELOAD_RETRIES - 1;
+ ioa_cfg->in_ioa_bringdown = 1;
+ for (i = 0; i < ioa_cfg->hrrq_num; i++) {
+ spin_lock(&ioa_cfg->hrrq[i]._lock);
+ ioa_cfg->hrrq[i].allow_cmds = 0;
+ spin_unlock(&ioa_cfg->hrrq[i]._lock);
+ }
+ wmb();
+ ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
+ } else
+ wake_up_all(&ioa_cfg->eeh_wait_q);
+ spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
+}
+
+/**
+ * ipr_pci_error_detected - Called when a PCI error is detected.
+ * @pdev: PCI device struct
+ * @state: PCI channel state
+ *
+ * Description: Called when a PCI error is detected.
+ *
+ * Return value:
+ * PCI_ERS_RESULT_NEED_RESET or PCI_ERS_RESULT_DISCONNECT
+ */
+static pci_ers_result_t ipr_pci_error_detected(struct pci_dev *pdev,
+ pci_channel_state_t state)
+{
+ switch (state) {
+ case pci_channel_io_frozen:
+ ipr_pci_frozen(pdev);
+ return PCI_ERS_RESULT_CAN_RECOVER;
+ case pci_channel_io_perm_failure:
+ ipr_pci_perm_failure(pdev);
+ return PCI_ERS_RESULT_DISCONNECT;
+ break;
+ default:
+ break;
+ }
+ return PCI_ERS_RESULT_NEED_RESET;
+}
+
+/**
+ * ipr_probe_ioa_part2 - Initializes IOAs found in ipr_probe_ioa(..)
+ * @ioa_cfg: ioa cfg struct
+ *
+ * Description: This is the second phase of adapter intialization
+ * This function takes care of initilizing the adapter to the point
+ * where it can accept new commands.
+
+ * Return value:
+ * 0 on success / -EIO on failure
+ **/
+static int ipr_probe_ioa_part2(struct ipr_ioa_cfg *ioa_cfg)
+{
+ int rc = 0;
+ unsigned long host_lock_flags = 0;
+
+ ENTER;
+ spin_lock_irqsave(ioa_cfg->host->host_lock, host_lock_flags);
+ dev_dbg(&ioa_cfg->pdev->dev, "ioa_cfg adx: 0x%p\n", ioa_cfg);
+ ioa_cfg->probe_done = 1;
+ if (ioa_cfg->needs_hard_reset) {
+ ioa_cfg->needs_hard_reset = 0;
+ ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
+ } else
+ _ipr_initiate_ioa_reset(ioa_cfg, ipr_reset_enable_ioa,
+ IPR_SHUTDOWN_NONE);
+ spin_unlock_irqrestore(ioa_cfg->host->host_lock, host_lock_flags);
+
+ LEAVE;
+ return rc;
+}
+
+/**
+ * ipr_free_cmd_blks - Frees command blocks allocated for an adapter
+ * @ioa_cfg: ioa config struct
+ *
+ * Return value:
+ * none
+ **/
+static void ipr_free_cmd_blks(struct ipr_ioa_cfg *ioa_cfg)
+{
+ int i;
+
+ if (ioa_cfg->ipr_cmnd_list) {
+ for (i = 0; i < IPR_NUM_CMD_BLKS; i++) {
+ if (ioa_cfg->ipr_cmnd_list[i])
+ dma_pool_free(ioa_cfg->ipr_cmd_pool,
+ ioa_cfg->ipr_cmnd_list[i],
+ ioa_cfg->ipr_cmnd_list_dma[i]);
+
+ ioa_cfg->ipr_cmnd_list[i] = NULL;
+ }
+ }
+
+ if (ioa_cfg->ipr_cmd_pool)
+ dma_pool_destroy(ioa_cfg->ipr_cmd_pool);
+
+ kfree(ioa_cfg->ipr_cmnd_list);
+ kfree(ioa_cfg->ipr_cmnd_list_dma);
+ ioa_cfg->ipr_cmnd_list = NULL;
+ ioa_cfg->ipr_cmnd_list_dma = NULL;
+ ioa_cfg->ipr_cmd_pool = NULL;
+}
+
+/**
+ * ipr_free_mem - Frees memory allocated for an adapter
+ * @ioa_cfg: ioa cfg struct
+ *
+ * Return value:
+ * nothing
+ **/
+static void ipr_free_mem(struct ipr_ioa_cfg *ioa_cfg)
+{
+ int i;
+
+ kfree(ioa_cfg->res_entries);
+ dma_free_coherent(&ioa_cfg->pdev->dev, sizeof(struct ipr_misc_cbs),
+ ioa_cfg->vpd_cbs, ioa_cfg->vpd_cbs_dma);
+ ipr_free_cmd_blks(ioa_cfg);
+
+ for (i = 0; i < ioa_cfg->hrrq_num; i++)
+ dma_free_coherent(&ioa_cfg->pdev->dev,
+ sizeof(u32) * ioa_cfg->hrrq[i].size,
+ ioa_cfg->hrrq[i].host_rrq,
+ ioa_cfg->hrrq[i].host_rrq_dma);
+
+ dma_free_coherent(&ioa_cfg->pdev->dev, ioa_cfg->cfg_table_size,
+ ioa_cfg->u.cfg_table, ioa_cfg->cfg_table_dma);
+
+ for (i = 0; i < IPR_NUM_HCAMS; i++) {
+ dma_free_coherent(&ioa_cfg->pdev->dev,
+ sizeof(struct ipr_hostrcb),
+ ioa_cfg->hostrcb[i],
+ ioa_cfg->hostrcb_dma[i]);
+ }
+
+ ipr_free_dump(ioa_cfg);
+ kfree(ioa_cfg->trace);
+}
+
+/**
+ * ipr_free_irqs - Free all allocated IRQs for the adapter.
+ * @ioa_cfg: ipr cfg struct
+ *
+ * This function frees all allocated IRQs for the
+ * specified adapter.
+ *
+ * Return value:
+ * none
+ **/
+static void ipr_free_irqs(struct ipr_ioa_cfg *ioa_cfg)
+{
+ struct pci_dev *pdev = ioa_cfg->pdev;
+
+ if (ioa_cfg->intr_flag == IPR_USE_MSI ||
+ ioa_cfg->intr_flag == IPR_USE_MSIX) {
+ int i;
+ for (i = 0; i < ioa_cfg->nvectors; i++)
+ free_irq(ioa_cfg->vectors_info[i].vec,
+ &ioa_cfg->hrrq[i]);
+ } else
+ free_irq(pdev->irq, &ioa_cfg->hrrq[0]);
+
+ if (ioa_cfg->intr_flag == IPR_USE_MSI) {
+ pci_disable_msi(pdev);
+ ioa_cfg->intr_flag &= ~IPR_USE_MSI;
+ } else if (ioa_cfg->intr_flag == IPR_USE_MSIX) {
+ pci_disable_msix(pdev);
+ ioa_cfg->intr_flag &= ~IPR_USE_MSIX;
+ }
+}
+
+/**
+ * ipr_free_all_resources - Free all allocated resources for an adapter.
+ * @ipr_cmd: ipr command struct
+ *
+ * This function frees all allocated resources for the
+ * specified adapter.
+ *
+ * Return value:
+ * none
+ **/
+static void ipr_free_all_resources(struct ipr_ioa_cfg *ioa_cfg)
+{
+ struct pci_dev *pdev = ioa_cfg->pdev;
+
+ ENTER;
+ ipr_free_irqs(ioa_cfg);
+ if (ioa_cfg->reset_work_q)
+ destroy_workqueue(ioa_cfg->reset_work_q);
+ iounmap(ioa_cfg->hdw_dma_regs);
+ pci_release_regions(pdev);
+ ipr_free_mem(ioa_cfg);
+ scsi_host_put(ioa_cfg->host);
+ pci_disable_device(pdev);
+ LEAVE;
+}
+
+/**
+ * ipr_alloc_cmd_blks - Allocate command blocks for an adapter
+ * @ioa_cfg: ioa config struct
+ *
+ * Return value:
+ * 0 on success / -ENOMEM on allocation failure
+ **/
+static int ipr_alloc_cmd_blks(struct ipr_ioa_cfg *ioa_cfg)
+{
+ struct ipr_cmnd *ipr_cmd;
+ struct ipr_ioarcb *ioarcb;
+ dma_addr_t dma_addr;
+ int i, entries_each_hrrq, hrrq_id = 0;
+
+ ioa_cfg->ipr_cmd_pool = dma_pool_create(IPR_NAME, &ioa_cfg->pdev->dev,
+ sizeof(struct ipr_cmnd), 512, 0);
+
+ if (!ioa_cfg->ipr_cmd_pool)
+ return -ENOMEM;
+
+ ioa_cfg->ipr_cmnd_list = kcalloc(IPR_NUM_CMD_BLKS, sizeof(struct ipr_cmnd *), GFP_KERNEL);
+ ioa_cfg->ipr_cmnd_list_dma = kcalloc(IPR_NUM_CMD_BLKS, sizeof(dma_addr_t), GFP_KERNEL);
+
+ if (!ioa_cfg->ipr_cmnd_list || !ioa_cfg->ipr_cmnd_list_dma) {
+ ipr_free_cmd_blks(ioa_cfg);
+ return -ENOMEM;
+ }
+
+ for (i = 0; i < ioa_cfg->hrrq_num; i++) {
+ if (ioa_cfg->hrrq_num > 1) {
+ if (i == 0) {
+ entries_each_hrrq = IPR_NUM_INTERNAL_CMD_BLKS;
+ ioa_cfg->hrrq[i].min_cmd_id = 0;
+ ioa_cfg->hrrq[i].max_cmd_id =
+ (entries_each_hrrq - 1);
+ } else {
+ entries_each_hrrq =
+ IPR_NUM_BASE_CMD_BLKS/
+ (ioa_cfg->hrrq_num - 1);
+ ioa_cfg->hrrq[i].min_cmd_id =
+ IPR_NUM_INTERNAL_CMD_BLKS +
+ (i - 1) * entries_each_hrrq;
+ ioa_cfg->hrrq[i].max_cmd_id =
+ (IPR_NUM_INTERNAL_CMD_BLKS +
+ i * entries_each_hrrq - 1);
+ }
+ } else {
+ entries_each_hrrq = IPR_NUM_CMD_BLKS;
+ ioa_cfg->hrrq[i].min_cmd_id = 0;
+ ioa_cfg->hrrq[i].max_cmd_id = (entries_each_hrrq - 1);
+ }
+ ioa_cfg->hrrq[i].size = entries_each_hrrq;
+ }
+
+ BUG_ON(ioa_cfg->hrrq_num == 0);
+
+ i = IPR_NUM_CMD_BLKS -
+ ioa_cfg->hrrq[ioa_cfg->hrrq_num - 1].max_cmd_id - 1;
+ if (i > 0) {
+ ioa_cfg->hrrq[ioa_cfg->hrrq_num - 1].size += i;
+ ioa_cfg->hrrq[ioa_cfg->hrrq_num - 1].max_cmd_id += i;
+ }
+
+ for (i = 0; i < IPR_NUM_CMD_BLKS; i++) {
+ ipr_cmd = dma_pool_alloc(ioa_cfg->ipr_cmd_pool, GFP_KERNEL, &dma_addr);
+
+ if (!ipr_cmd) {
+ ipr_free_cmd_blks(ioa_cfg);
+ return -ENOMEM;
+ }
+
+ memset(ipr_cmd, 0, sizeof(*ipr_cmd));
+ ioa_cfg->ipr_cmnd_list[i] = ipr_cmd;
+ ioa_cfg->ipr_cmnd_list_dma[i] = dma_addr;
+
+ ioarcb = &ipr_cmd->ioarcb;
+ ipr_cmd->dma_addr = dma_addr;
+ if (ioa_cfg->sis64)
+ ioarcb->a.ioarcb_host_pci_addr64 = cpu_to_be64(dma_addr);
+ else
+ ioarcb->a.ioarcb_host_pci_addr = cpu_to_be32(dma_addr);
+
+ ioarcb->host_response_handle = cpu_to_be32(i << 2);
+ if (ioa_cfg->sis64) {
+ ioarcb->u.sis64_addr_data.data_ioadl_addr =
+ cpu_to_be64(dma_addr + offsetof(struct ipr_cmnd, i.ioadl64));
+ ioarcb->u.sis64_addr_data.ioasa_host_pci_addr =
+ cpu_to_be64(dma_addr + offsetof(struct ipr_cmnd, s.ioasa64));
+ } else {
+ ioarcb->write_ioadl_addr =
+ cpu_to_be32(dma_addr + offsetof(struct ipr_cmnd, i.ioadl));
+ ioarcb->read_ioadl_addr = ioarcb->write_ioadl_addr;
+ ioarcb->ioasa_host_pci_addr =
+ cpu_to_be32(dma_addr + offsetof(struct ipr_cmnd, s.ioasa));
+ }
+ ioarcb->ioasa_len = cpu_to_be16(sizeof(struct ipr_ioasa));
+ ipr_cmd->cmd_index = i;
+ ipr_cmd->ioa_cfg = ioa_cfg;
+ ipr_cmd->sense_buffer_dma = dma_addr +
+ offsetof(struct ipr_cmnd, sense_buffer);
+
+ ipr_cmd->ioarcb.cmd_pkt.hrrq_id = hrrq_id;
+ ipr_cmd->hrrq = &ioa_cfg->hrrq[hrrq_id];
+ list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
+ if (i >= ioa_cfg->hrrq[hrrq_id].max_cmd_id)
+ hrrq_id++;
+ }
+
+ return 0;
+}
+
+/**
+ * ipr_alloc_mem - Allocate memory for an adapter
+ * @ioa_cfg: ioa config struct
+ *
+ * Return value:
+ * 0 on success / non-zero for error
+ **/
+static int ipr_alloc_mem(struct ipr_ioa_cfg *ioa_cfg)
+{
+ struct pci_dev *pdev = ioa_cfg->pdev;
+ int i, rc = -ENOMEM;
+
+ ENTER;
+ ioa_cfg->res_entries = kzalloc(sizeof(struct ipr_resource_entry) *
+ ioa_cfg->max_devs_supported, GFP_KERNEL);
+
+ if (!ioa_cfg->res_entries)
+ goto out;
+
+ for (i = 0; i < ioa_cfg->max_devs_supported; i++) {
+ list_add_tail(&ioa_cfg->res_entries[i].queue, &ioa_cfg->free_res_q);
+ ioa_cfg->res_entries[i].ioa_cfg = ioa_cfg;
+ }
+
+ ioa_cfg->vpd_cbs = dma_alloc_coherent(&pdev->dev,
+ sizeof(struct ipr_misc_cbs),
+ &ioa_cfg->vpd_cbs_dma,
+ GFP_KERNEL);
+
+ if (!ioa_cfg->vpd_cbs)
+ goto out_free_res_entries;
+
+ if (ipr_alloc_cmd_blks(ioa_cfg))
+ goto out_free_vpd_cbs;
+
+ for (i = 0; i < ioa_cfg->hrrq_num; i++) {
+ ioa_cfg->hrrq[i].host_rrq = dma_alloc_coherent(&pdev->dev,
+ sizeof(u32) * ioa_cfg->hrrq[i].size,
+ &ioa_cfg->hrrq[i].host_rrq_dma,
+ GFP_KERNEL);
+
+ if (!ioa_cfg->hrrq[i].host_rrq) {
+ while (--i > 0)
+ dma_free_coherent(&pdev->dev,
+ sizeof(u32) * ioa_cfg->hrrq[i].size,
+ ioa_cfg->hrrq[i].host_rrq,
+ ioa_cfg->hrrq[i].host_rrq_dma);
+ goto out_ipr_free_cmd_blocks;
+ }
+ ioa_cfg->hrrq[i].ioa_cfg = ioa_cfg;
+ }
+
+ ioa_cfg->u.cfg_table = dma_alloc_coherent(&pdev->dev,
+ ioa_cfg->cfg_table_size,
+ &ioa_cfg->cfg_table_dma,
+ GFP_KERNEL);
+
+ if (!ioa_cfg->u.cfg_table)
+ goto out_free_host_rrq;
+
+ for (i = 0; i < IPR_NUM_HCAMS; i++) {
+ ioa_cfg->hostrcb[i] = dma_alloc_coherent(&pdev->dev,
+ sizeof(struct ipr_hostrcb),
+ &ioa_cfg->hostrcb_dma[i],
+ GFP_KERNEL);
+
+ if (!ioa_cfg->hostrcb[i])
+ goto out_free_hostrcb_dma;
+
+ ioa_cfg->hostrcb[i]->hostrcb_dma =
+ ioa_cfg->hostrcb_dma[i] + offsetof(struct ipr_hostrcb, hcam);
+ ioa_cfg->hostrcb[i]->ioa_cfg = ioa_cfg;
+ list_add_tail(&ioa_cfg->hostrcb[i]->queue, &ioa_cfg->hostrcb_free_q);
+ }
+
+ ioa_cfg->trace = kzalloc(sizeof(struct ipr_trace_entry) *
+ IPR_NUM_TRACE_ENTRIES, GFP_KERNEL);
+
+ if (!ioa_cfg->trace)
+ goto out_free_hostrcb_dma;
+
+ rc = 0;
+out:
+ LEAVE;
+ return rc;
+
+out_free_hostrcb_dma:
+ while (i-- > 0) {
+ dma_free_coherent(&pdev->dev, sizeof(struct ipr_hostrcb),
+ ioa_cfg->hostrcb[i],
+ ioa_cfg->hostrcb_dma[i]);
+ }
+ dma_free_coherent(&pdev->dev, ioa_cfg->cfg_table_size,
+ ioa_cfg->u.cfg_table, ioa_cfg->cfg_table_dma);
+out_free_host_rrq:
+ for (i = 0; i < ioa_cfg->hrrq_num; i++) {
+ dma_free_coherent(&pdev->dev,
+ sizeof(u32) * ioa_cfg->hrrq[i].size,
+ ioa_cfg->hrrq[i].host_rrq,
+ ioa_cfg->hrrq[i].host_rrq_dma);
+ }
+out_ipr_free_cmd_blocks:
+ ipr_free_cmd_blks(ioa_cfg);
+out_free_vpd_cbs:
+ dma_free_coherent(&pdev->dev, sizeof(struct ipr_misc_cbs),
+ ioa_cfg->vpd_cbs, ioa_cfg->vpd_cbs_dma);
+out_free_res_entries:
+ kfree(ioa_cfg->res_entries);
+ goto out;
+}
+
+/**
+ * ipr_initialize_bus_attr - Initialize SCSI bus attributes to default values
+ * @ioa_cfg: ioa config struct
+ *
+ * Return value:
+ * none
+ **/
+static void ipr_initialize_bus_attr(struct ipr_ioa_cfg *ioa_cfg)
+{
+ int i;
+
+ for (i = 0; i < IPR_MAX_NUM_BUSES; i++) {
+ ioa_cfg->bus_attr[i].bus = i;
+ ioa_cfg->bus_attr[i].qas_enabled = 0;
+ ioa_cfg->bus_attr[i].bus_width = IPR_DEFAULT_BUS_WIDTH;
+ if (ipr_max_speed < ARRAY_SIZE(ipr_max_bus_speeds))
+ ioa_cfg->bus_attr[i].max_xfer_rate = ipr_max_bus_speeds[ipr_max_speed];
+ else
+ ioa_cfg->bus_attr[i].max_xfer_rate = IPR_U160_SCSI_RATE;
+ }
+}
+
+/**
+ * ipr_init_regs - Initialize IOA registers
+ * @ioa_cfg: ioa config struct
+ *
+ * Return value:
+ * none
+ **/
+static void ipr_init_regs(struct ipr_ioa_cfg *ioa_cfg)
+{
+ const struct ipr_interrupt_offsets *p;
+ struct ipr_interrupts *t;
+ void __iomem *base;
+
+ p = &ioa_cfg->chip_cfg->regs;
+ t = &ioa_cfg->regs;
+ base = ioa_cfg->hdw_dma_regs;
+
+ t->set_interrupt_mask_reg = base + p->set_interrupt_mask_reg;
+ t->clr_interrupt_mask_reg = base + p->clr_interrupt_mask_reg;
+ t->clr_interrupt_mask_reg32 = base + p->clr_interrupt_mask_reg32;
+ t->sense_interrupt_mask_reg = base + p->sense_interrupt_mask_reg;
+ t->sense_interrupt_mask_reg32 = base + p->sense_interrupt_mask_reg32;
+ t->clr_interrupt_reg = base + p->clr_interrupt_reg;
+ t->clr_interrupt_reg32 = base + p->clr_interrupt_reg32;
+ t->sense_interrupt_reg = base + p->sense_interrupt_reg;
+ t->sense_interrupt_reg32 = base + p->sense_interrupt_reg32;
+ t->ioarrin_reg = base + p->ioarrin_reg;
+ t->sense_uproc_interrupt_reg = base + p->sense_uproc_interrupt_reg;
+ t->sense_uproc_interrupt_reg32 = base + p->sense_uproc_interrupt_reg32;
+ t->set_uproc_interrupt_reg = base + p->set_uproc_interrupt_reg;
+ t->set_uproc_interrupt_reg32 = base + p->set_uproc_interrupt_reg32;
+ t->clr_uproc_interrupt_reg = base + p->clr_uproc_interrupt_reg;
+ t->clr_uproc_interrupt_reg32 = base + p->clr_uproc_interrupt_reg32;
+
+ if (ioa_cfg->sis64) {
+ t->init_feedback_reg = base + p->init_feedback_reg;
+ t->dump_addr_reg = base + p->dump_addr_reg;
+ t->dump_data_reg = base + p->dump_data_reg;
+ t->endian_swap_reg = base + p->endian_swap_reg;
+ }
+}
+
+/**
+ * ipr_init_ioa_cfg - Initialize IOA config struct
+ * @ioa_cfg: ioa config struct
+ * @host: scsi host struct
+ * @pdev: PCI dev struct
+ *
+ * Return value:
+ * none
+ **/
+static void ipr_init_ioa_cfg(struct ipr_ioa_cfg *ioa_cfg,
+ struct Scsi_Host *host, struct pci_dev *pdev)
+{
+ int i;
+
+ ioa_cfg->host = host;
+ ioa_cfg->pdev = pdev;
+ ioa_cfg->log_level = ipr_log_level;
+ ioa_cfg->doorbell = IPR_DOORBELL;
+ sprintf(ioa_cfg->eye_catcher, IPR_EYECATCHER);
+ sprintf(ioa_cfg->trace_start, IPR_TRACE_START_LABEL);
+ sprintf(ioa_cfg->cfg_table_start, IPR_CFG_TBL_START);
+ sprintf(ioa_cfg->resource_table_label, IPR_RES_TABLE_LABEL);
+ sprintf(ioa_cfg->ipr_hcam_label, IPR_HCAM_LABEL);
+ sprintf(ioa_cfg->ipr_cmd_label, IPR_CMD_LABEL);
+
+ INIT_LIST_HEAD(&ioa_cfg->hostrcb_free_q);
+ INIT_LIST_HEAD(&ioa_cfg->hostrcb_pending_q);
+ INIT_LIST_HEAD(&ioa_cfg->free_res_q);
+ INIT_LIST_HEAD(&ioa_cfg->used_res_q);
+ INIT_WORK(&ioa_cfg->work_q, ipr_worker_thread);
+ init_waitqueue_head(&ioa_cfg->reset_wait_q);
+ init_waitqueue_head(&ioa_cfg->msi_wait_q);
+ init_waitqueue_head(&ioa_cfg->eeh_wait_q);
+ ioa_cfg->sdt_state = INACTIVE;
+
+ ipr_initialize_bus_attr(ioa_cfg);
+ ioa_cfg->max_devs_supported = ipr_max_devs;
+
+ if (ioa_cfg->sis64) {
+ host->max_id = IPR_MAX_SIS64_TARGETS_PER_BUS;
+ host->max_lun = IPR_MAX_SIS64_LUNS_PER_TARGET;
+ if (ipr_max_devs > IPR_MAX_SIS64_DEVS)
+ ioa_cfg->max_devs_supported = IPR_MAX_SIS64_DEVS;
+ ioa_cfg->cfg_table_size = (sizeof(struct ipr_config_table_hdr64)
+ + ((sizeof(struct ipr_config_table_entry64)
+ * ioa_cfg->max_devs_supported)));
+ } else {
+ host->max_id = IPR_MAX_NUM_TARGETS_PER_BUS;
+ host->max_lun = IPR_MAX_NUM_LUNS_PER_TARGET;
+ if (ipr_max_devs > IPR_MAX_PHYSICAL_DEVS)
+ ioa_cfg->max_devs_supported = IPR_MAX_PHYSICAL_DEVS;
+ ioa_cfg->cfg_table_size = (sizeof(struct ipr_config_table_hdr)
+ + ((sizeof(struct ipr_config_table_entry)
+ * ioa_cfg->max_devs_supported)));
+ }
+
+ host->max_channel = IPR_VSET_BUS;
+ host->unique_id = host->host_no;
+ host->max_cmd_len = IPR_MAX_CDB_LEN;
+ host->can_queue = ioa_cfg->max_cmds;
+ pci_set_drvdata(pdev, ioa_cfg);
+
+ for (i = 0; i < ARRAY_SIZE(ioa_cfg->hrrq); i++) {
+ INIT_LIST_HEAD(&ioa_cfg->hrrq[i].hrrq_free_q);
+ INIT_LIST_HEAD(&ioa_cfg->hrrq[i].hrrq_pending_q);
+ spin_lock_init(&ioa_cfg->hrrq[i]._lock);
+ if (i == 0)
+ ioa_cfg->hrrq[i].lock = ioa_cfg->host->host_lock;
+ else
+ ioa_cfg->hrrq[i].lock = &ioa_cfg->hrrq[i]._lock;
+ }
+}
+
+/**
+ * ipr_get_chip_info - Find adapter chip information
+ * @dev_id: PCI device id struct
+ *
+ * Return value:
+ * ptr to chip information on success / NULL on failure
+ **/
+static const struct ipr_chip_t *
+ipr_get_chip_info(const struct pci_device_id *dev_id)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(ipr_chip); i++)
+ if (ipr_chip[i].vendor == dev_id->vendor &&
+ ipr_chip[i].device == dev_id->device)
+ return &ipr_chip[i];
+ return NULL;
+}
+
+/**
+ * ipr_wait_for_pci_err_recovery - Wait for any PCI error recovery to complete
+ * during probe time
+ * @ioa_cfg: ioa config struct
+ *
+ * Return value:
+ * None
+ **/
+static void ipr_wait_for_pci_err_recovery(struct ipr_ioa_cfg *ioa_cfg)
+{
+ struct pci_dev *pdev = ioa_cfg->pdev;
+
+ if (pci_channel_offline(pdev)) {
+ wait_event_timeout(ioa_cfg->eeh_wait_q,
+ !pci_channel_offline(pdev),
+ IPR_PCI_ERROR_RECOVERY_TIMEOUT);
+ pci_restore_state(pdev);
+ }
+}
+
+static int ipr_enable_msix(struct ipr_ioa_cfg *ioa_cfg)
+{
+ struct msix_entry entries[IPR_MAX_MSIX_VECTORS];
+ int i, vectors;
+
+ for (i = 0; i < ARRAY_SIZE(entries); ++i)
+ entries[i].entry = i;
+
+ vectors = pci_enable_msix_range(ioa_cfg->pdev,
+ entries, 1, ipr_number_of_msix);
+ if (vectors < 0) {
+ ipr_wait_for_pci_err_recovery(ioa_cfg);
+ return vectors;
+ }
+
+ for (i = 0; i < vectors; i++)
+ ioa_cfg->vectors_info[i].vec = entries[i].vector;
+ ioa_cfg->nvectors = vectors;
+
+ return 0;
+}
+
+static int ipr_enable_msi(struct ipr_ioa_cfg *ioa_cfg)
+{
+ int i, vectors;
+
+ vectors = pci_enable_msi_range(ioa_cfg->pdev, 1, ipr_number_of_msix);
+ if (vectors < 0) {
+ ipr_wait_for_pci_err_recovery(ioa_cfg);
+ return vectors;
+ }
+
+ for (i = 0; i < vectors; i++)
+ ioa_cfg->vectors_info[i].vec = ioa_cfg->pdev->irq + i;
+ ioa_cfg->nvectors = vectors;
+
+ return 0;
+}
+
+static void name_msi_vectors(struct ipr_ioa_cfg *ioa_cfg)
+{
+ int vec_idx, n = sizeof(ioa_cfg->vectors_info[0].desc) - 1;
+
+ for (vec_idx = 0; vec_idx < ioa_cfg->nvectors; vec_idx++) {
+ snprintf(ioa_cfg->vectors_info[vec_idx].desc, n,
+ "host%d-%d", ioa_cfg->host->host_no, vec_idx);
+ ioa_cfg->vectors_info[vec_idx].
+ desc[strlen(ioa_cfg->vectors_info[vec_idx].desc)] = 0;
+ }
+}
+
+static int ipr_request_other_msi_irqs(struct ipr_ioa_cfg *ioa_cfg)
+{
+ int i, rc;
+
+ for (i = 1; i < ioa_cfg->nvectors; i++) {
+ rc = request_irq(ioa_cfg->vectors_info[i].vec,
+ ipr_isr_mhrrq,
+ 0,
+ ioa_cfg->vectors_info[i].desc,
+ &ioa_cfg->hrrq[i]);
+ if (rc) {
+ while (--i >= 0)
+ free_irq(ioa_cfg->vectors_info[i].vec,
+ &ioa_cfg->hrrq[i]);
+ return rc;
+ }
+ }
+ return 0;
+}
+
+/**
+ * ipr_test_intr - Handle the interrupt generated in ipr_test_msi().
+ * @pdev: PCI device struct
+ *
+ * Description: Simply set the msi_received flag to 1 indicating that
+ * Message Signaled Interrupts are supported.
+ *
+ * Return value:
+ * 0 on success / non-zero on failure
+ **/
+static irqreturn_t ipr_test_intr(int irq, void *devp)
+{
+ struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)devp;
+ unsigned long lock_flags = 0;
+ irqreturn_t rc = IRQ_HANDLED;
+
+ dev_info(&ioa_cfg->pdev->dev, "Received IRQ : %d\n", irq);
+ spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
+
+ ioa_cfg->msi_received = 1;
+ wake_up(&ioa_cfg->msi_wait_q);
+
+ spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
+ return rc;
+}
+
+/**
+ * ipr_test_msi - Test for Message Signaled Interrupt (MSI) support.
+ * @pdev: PCI device struct
+ *
+ * Description: The return value from pci_enable_msi_range() can not always be
+ * trusted. This routine sets up and initiates a test interrupt to determine
+ * if the interrupt is received via the ipr_test_intr() service routine.
+ * If the tests fails, the driver will fall back to LSI.
+ *
+ * Return value:
+ * 0 on success / non-zero on failure
+ **/
+static int ipr_test_msi(struct ipr_ioa_cfg *ioa_cfg, struct pci_dev *pdev)
+{
+ int rc;
+ volatile u32 int_reg;
+ unsigned long lock_flags = 0;
+
+ ENTER;
+
+ spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
+ init_waitqueue_head(&ioa_cfg->msi_wait_q);
+ ioa_cfg->msi_received = 0;
+ ipr_mask_and_clear_interrupts(ioa_cfg, ~IPR_PCII_IOA_TRANS_TO_OPER);
+ writel(IPR_PCII_IO_DEBUG_ACKNOWLEDGE, ioa_cfg->regs.clr_interrupt_mask_reg32);
+ int_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg);
+ spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
+
+ if (ioa_cfg->intr_flag == IPR_USE_MSIX)
+ rc = request_irq(ioa_cfg->vectors_info[0].vec, ipr_test_intr, 0, IPR_NAME, ioa_cfg);
+ else
+ rc = request_irq(pdev->irq, ipr_test_intr, 0, IPR_NAME, ioa_cfg);
+ if (rc) {
+ dev_err(&pdev->dev, "Can not assign irq %d\n", pdev->irq);
+ return rc;
+ } else if (ipr_debug)
+ dev_info(&pdev->dev, "IRQ assigned: %d\n", pdev->irq);
+
+ writel(IPR_PCII_IO_DEBUG_ACKNOWLEDGE, ioa_cfg->regs.sense_interrupt_reg32);
+ int_reg = readl(ioa_cfg->regs.sense_interrupt_reg);
+ wait_event_timeout(ioa_cfg->msi_wait_q, ioa_cfg->msi_received, HZ);
+ spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
+ ipr_mask_and_clear_interrupts(ioa_cfg, ~IPR_PCII_IOA_TRANS_TO_OPER);
+
+ if (!ioa_cfg->msi_received) {
+ /* MSI test failed */
+ dev_info(&pdev->dev, "MSI test failed. Falling back to LSI.\n");
+ rc = -EOPNOTSUPP;
+ } else if (ipr_debug)
+ dev_info(&pdev->dev, "MSI test succeeded.\n");
+
+ spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
+
+ if (ioa_cfg->intr_flag == IPR_USE_MSIX)
+ free_irq(ioa_cfg->vectors_info[0].vec, ioa_cfg);
+ else
+ free_irq(pdev->irq, ioa_cfg);
+
+ LEAVE;
+
+ return rc;
+}
+
+ /* ipr_probe_ioa - Allocates memory and does first stage of initialization
+ * @pdev: PCI device struct
+ * @dev_id: PCI device id struct
+ *
+ * Return value:
+ * 0 on success / non-zero on failure
+ **/
+static int ipr_probe_ioa(struct pci_dev *pdev,
+ const struct pci_device_id *dev_id)
+{
+ struct ipr_ioa_cfg *ioa_cfg;
+ struct Scsi_Host *host;
+ unsigned long ipr_regs_pci;
+ void __iomem *ipr_regs;
+ int rc = PCIBIOS_SUCCESSFUL;
+ volatile u32 mask, uproc, interrupts;
+ unsigned long lock_flags, driver_lock_flags;
+
+ ENTER;
+
+ dev_info(&pdev->dev, "Found IOA with IRQ: %d\n", pdev->irq);
+ host = scsi_host_alloc(&driver_template, sizeof(*ioa_cfg));
+
+ if (!host) {
+ dev_err(&pdev->dev, "call to scsi_host_alloc failed!\n");
+ rc = -ENOMEM;
+ goto out;
+ }
+
+ ioa_cfg = (struct ipr_ioa_cfg *)host->hostdata;
+ memset(ioa_cfg, 0, sizeof(struct ipr_ioa_cfg));
+ ata_host_init(&ioa_cfg->ata_host, &pdev->dev, &ipr_sata_ops);
+
+ ioa_cfg->ipr_chip = ipr_get_chip_info(dev_id);
+
+ if (!ioa_cfg->ipr_chip) {
+ dev_err(&pdev->dev, "Unknown adapter chipset 0x%04X 0x%04X\n",
+ dev_id->vendor, dev_id->device);
+ goto out_scsi_host_put;
+ }
+
+ /* set SIS 32 or SIS 64 */
+ ioa_cfg->sis64 = ioa_cfg->ipr_chip->sis_type == IPR_SIS64 ? 1 : 0;
+ ioa_cfg->chip_cfg = ioa_cfg->ipr_chip->cfg;
+ ioa_cfg->clear_isr = ioa_cfg->chip_cfg->clear_isr;
+ ioa_cfg->max_cmds = ioa_cfg->chip_cfg->max_cmds;
+
+ if (ipr_transop_timeout)
+ ioa_cfg->transop_timeout = ipr_transop_timeout;
+ else if (dev_id->driver_data & IPR_USE_LONG_TRANSOP_TIMEOUT)
+ ioa_cfg->transop_timeout = IPR_LONG_OPERATIONAL_TIMEOUT;
+ else
+ ioa_cfg->transop_timeout = IPR_OPERATIONAL_TIMEOUT;
+
+ ioa_cfg->revid = pdev->revision;
+
+ ipr_init_ioa_cfg(ioa_cfg, host, pdev);
+
+ ipr_regs_pci = pci_resource_start(pdev, 0);
+
+ rc = pci_request_regions(pdev, IPR_NAME);
+ if (rc < 0) {
+ dev_err(&pdev->dev,
+ "Couldn't register memory range of registers\n");
+ goto out_scsi_host_put;
+ }
+
+ rc = pci_enable_device(pdev);
+
+ if (rc || pci_channel_offline(pdev)) {
+ if (pci_channel_offline(pdev)) {
+ ipr_wait_for_pci_err_recovery(ioa_cfg);
+ rc = pci_enable_device(pdev);
+ }
+
+ if (rc) {
+ dev_err(&pdev->dev, "Cannot enable adapter\n");
+ ipr_wait_for_pci_err_recovery(ioa_cfg);
+ goto out_release_regions;
+ }
+ }
+
+ ipr_regs = pci_ioremap_bar(pdev, 0);
+
+ if (!ipr_regs) {
+ dev_err(&pdev->dev,
+ "Couldn't map memory range of registers\n");
+ rc = -ENOMEM;
+ goto out_disable;
+ }
+
+ ioa_cfg->hdw_dma_regs = ipr_regs;
+ ioa_cfg->hdw_dma_regs_pci = ipr_regs_pci;
+ ioa_cfg->ioa_mailbox = ioa_cfg->chip_cfg->mailbox + ipr_regs;
+
+ ipr_init_regs(ioa_cfg);
+
+ if (ioa_cfg->sis64) {
+ rc = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
+ if (rc < 0) {
+ dev_dbg(&pdev->dev, "Failed to set 64 bit DMA mask\n");
+ rc = dma_set_mask_and_coherent(&pdev->dev,
+ DMA_BIT_MASK(32));
+ }
+ } else
+ rc = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
+
+ if (rc < 0) {
+ dev_err(&pdev->dev, "Failed to set DMA mask\n");
+ goto cleanup_nomem;
+ }
+
+ rc = pci_write_config_byte(pdev, PCI_CACHE_LINE_SIZE,
+ ioa_cfg->chip_cfg->cache_line_size);
+
+ if (rc != PCIBIOS_SUCCESSFUL) {
+ dev_err(&pdev->dev, "Write of cache line size failed\n");
+ ipr_wait_for_pci_err_recovery(ioa_cfg);
+ rc = -EIO;
+ goto cleanup_nomem;
+ }
+
+ /* Issue MMIO read to ensure card is not in EEH */
+ interrupts = readl(ioa_cfg->regs.sense_interrupt_reg);
+ ipr_wait_for_pci_err_recovery(ioa_cfg);
+
+ if (ipr_number_of_msix > IPR_MAX_MSIX_VECTORS) {
+ dev_err(&pdev->dev, "The max number of MSIX is %d\n",
+ IPR_MAX_MSIX_VECTORS);
+ ipr_number_of_msix = IPR_MAX_MSIX_VECTORS;
+ }
+
+ if (ioa_cfg->ipr_chip->intr_type == IPR_USE_MSI &&
+ ipr_enable_msix(ioa_cfg) == 0)
+ ioa_cfg->intr_flag = IPR_USE_MSIX;
+ else if (ioa_cfg->ipr_chip->intr_type == IPR_USE_MSI &&
+ ipr_enable_msi(ioa_cfg) == 0)
+ ioa_cfg->intr_flag = IPR_USE_MSI;
+ else {
+ ioa_cfg->intr_flag = IPR_USE_LSI;
+ ioa_cfg->nvectors = 1;
+ dev_info(&pdev->dev, "Cannot enable MSI.\n");
+ }
+
+ pci_set_master(pdev);
+
+ if (pci_channel_offline(pdev)) {
+ ipr_wait_for_pci_err_recovery(ioa_cfg);
+ pci_set_master(pdev);
+ if (pci_channel_offline(pdev)) {
+ rc = -EIO;
+ goto out_msi_disable;
+ }
+ }
+
+ if (ioa_cfg->intr_flag == IPR_USE_MSI ||
+ ioa_cfg->intr_flag == IPR_USE_MSIX) {
+ rc = ipr_test_msi(ioa_cfg, pdev);
+ if (rc == -EOPNOTSUPP) {
+ ipr_wait_for_pci_err_recovery(ioa_cfg);
+ if (ioa_cfg->intr_flag == IPR_USE_MSI) {
+ ioa_cfg->intr_flag &= ~IPR_USE_MSI;
+ pci_disable_msi(pdev);
+ } else if (ioa_cfg->intr_flag == IPR_USE_MSIX) {
+ ioa_cfg->intr_flag &= ~IPR_USE_MSIX;
+ pci_disable_msix(pdev);
+ }
+
+ ioa_cfg->intr_flag = IPR_USE_LSI;
+ ioa_cfg->nvectors = 1;
+ }
+ else if (rc)
+ goto out_msi_disable;
+ else {
+ if (ioa_cfg->intr_flag == IPR_USE_MSI)
+ dev_info(&pdev->dev,
+ "Request for %d MSIs succeeded with starting IRQ: %d\n",
+ ioa_cfg->nvectors, pdev->irq);
+ else if (ioa_cfg->intr_flag == IPR_USE_MSIX)
+ dev_info(&pdev->dev,
+ "Request for %d MSIXs succeeded.",
+ ioa_cfg->nvectors);
+ }
+ }
+
+ ioa_cfg->hrrq_num = min3(ioa_cfg->nvectors,
+ (unsigned int)num_online_cpus(),
+ (unsigned int)IPR_MAX_HRRQ_NUM);
+
+ if ((rc = ipr_save_pcix_cmd_reg(ioa_cfg)))
+ goto out_msi_disable;
+
+ if ((rc = ipr_set_pcix_cmd_reg(ioa_cfg)))
+ goto out_msi_disable;
+
+ rc = ipr_alloc_mem(ioa_cfg);
+ if (rc < 0) {
+ dev_err(&pdev->dev,
+ "Couldn't allocate enough memory for device driver!\n");
+ goto out_msi_disable;
+ }
+
+ /* Save away PCI config space for use following IOA reset */
+ rc = pci_save_state(pdev);
+
+ if (rc != PCIBIOS_SUCCESSFUL) {
+ dev_err(&pdev->dev, "Failed to save PCI config space\n");
+ rc = -EIO;
+ goto cleanup_nolog;
+ }
+
+ /*
+ * If HRRQ updated interrupt is not masked, or reset alert is set,
+ * the card is in an unknown state and needs a hard reset
+ */
+ mask = readl(ioa_cfg->regs.sense_interrupt_mask_reg32);
+ interrupts = readl(ioa_cfg->regs.sense_interrupt_reg32);
+ uproc = readl(ioa_cfg->regs.sense_uproc_interrupt_reg32);
+ if ((mask & IPR_PCII_HRRQ_UPDATED) == 0 || (uproc & IPR_UPROCI_RESET_ALERT))
+ ioa_cfg->needs_hard_reset = 1;
+ if ((interrupts & IPR_PCII_ERROR_INTERRUPTS) || reset_devices)
+ ioa_cfg->needs_hard_reset = 1;
+ if (interrupts & IPR_PCII_IOA_UNIT_CHECKED)
+ ioa_cfg->ioa_unit_checked = 1;
+
+ spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
+ ipr_mask_and_clear_interrupts(ioa_cfg, ~IPR_PCII_IOA_TRANS_TO_OPER);
+ spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
+
+ if (ioa_cfg->intr_flag == IPR_USE_MSI
+ || ioa_cfg->intr_flag == IPR_USE_MSIX) {
+ name_msi_vectors(ioa_cfg);
+ rc = request_irq(ioa_cfg->vectors_info[0].vec, ipr_isr,
+ 0,
+ ioa_cfg->vectors_info[0].desc,
+ &ioa_cfg->hrrq[0]);
+ if (!rc)
+ rc = ipr_request_other_msi_irqs(ioa_cfg);
+ } else {
+ rc = request_irq(pdev->irq, ipr_isr,
+ IRQF_SHARED,
+ IPR_NAME, &ioa_cfg->hrrq[0]);
+ }
+ if (rc) {
+ dev_err(&pdev->dev, "Couldn't register IRQ %d! rc=%d\n",
+ pdev->irq, rc);
+ goto cleanup_nolog;
+ }
+
+ if ((dev_id->driver_data & IPR_USE_PCI_WARM_RESET) ||
+ (dev_id->device == PCI_DEVICE_ID_IBM_OBSIDIAN_E && !ioa_cfg->revid)) {
+ ioa_cfg->needs_warm_reset = 1;
+ ioa_cfg->reset = ipr_reset_slot_reset;
+
+ ioa_cfg->reset_work_q = alloc_ordered_workqueue("ipr_reset_%d",
+ WQ_MEM_RECLAIM, host->host_no);
+
+ if (!ioa_cfg->reset_work_q) {
+ dev_err(&pdev->dev, "Couldn't register reset workqueue\n");
+ goto out_free_irq;
+ }
+ } else
+ ioa_cfg->reset = ipr_reset_start_bist;
+
+ spin_lock_irqsave(&ipr_driver_lock, driver_lock_flags);
+ list_add_tail(&ioa_cfg->queue, &ipr_ioa_head);
+ spin_unlock_irqrestore(&ipr_driver_lock, driver_lock_flags);
+
+ LEAVE;
+out:
+ return rc;
+
+out_free_irq:
+ ipr_free_irqs(ioa_cfg);
+cleanup_nolog:
+ ipr_free_mem(ioa_cfg);
+out_msi_disable:
+ ipr_wait_for_pci_err_recovery(ioa_cfg);
+ if (ioa_cfg->intr_flag == IPR_USE_MSI)
+ pci_disable_msi(pdev);
+ else if (ioa_cfg->intr_flag == IPR_USE_MSIX)
+ pci_disable_msix(pdev);
+cleanup_nomem:
+ iounmap(ipr_regs);
+out_disable:
+ pci_disable_device(pdev);
+out_release_regions:
+ pci_release_regions(pdev);
+out_scsi_host_put:
+ scsi_host_put(host);
+ goto out;
+}
+
+/**
+ * ipr_initiate_ioa_bringdown - Bring down an adapter
+ * @ioa_cfg: ioa config struct
+ * @shutdown_type: shutdown type
+ *
+ * Description: This function will initiate bringing down the adapter.
+ * This consists of issuing an IOA shutdown to the adapter
+ * to flush the cache, and running BIST.
+ * If the caller needs to wait on the completion of the reset,
+ * the caller must sleep on the reset_wait_q.
+ *
+ * Return value:
+ * none
+ **/
+static void ipr_initiate_ioa_bringdown(struct ipr_ioa_cfg *ioa_cfg,
+ enum ipr_shutdown_type shutdown_type)
+{
+ ENTER;
+ if (ioa_cfg->sdt_state == WAIT_FOR_DUMP)
+ ioa_cfg->sdt_state = ABORT_DUMP;
+ ioa_cfg->reset_retries = 0;
+ ioa_cfg->in_ioa_bringdown = 1;
+ ipr_initiate_ioa_reset(ioa_cfg, shutdown_type);
+ LEAVE;
+}
+
+/**
+ * __ipr_remove - Remove a single adapter
+ * @pdev: pci device struct
+ *
+ * Adapter hot plug remove entry point.
+ *
+ * Return value:
+ * none
+ **/
+static void __ipr_remove(struct pci_dev *pdev)
+{
+ unsigned long host_lock_flags = 0;
+ struct ipr_ioa_cfg *ioa_cfg = pci_get_drvdata(pdev);
+ int i;
+ unsigned long driver_lock_flags;
+ ENTER;
+
+ spin_lock_irqsave(ioa_cfg->host->host_lock, host_lock_flags);
+ while (ioa_cfg->in_reset_reload) {
+ spin_unlock_irqrestore(ioa_cfg->host->host_lock, host_lock_flags);
+ wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
+ spin_lock_irqsave(ioa_cfg->host->host_lock, host_lock_flags);
+ }
+
+ for (i = 0; i < ioa_cfg->hrrq_num; i++) {
+ spin_lock(&ioa_cfg->hrrq[i]._lock);
+ ioa_cfg->hrrq[i].removing_ioa = 1;
+ spin_unlock(&ioa_cfg->hrrq[i]._lock);
+ }
+ wmb();
+ ipr_initiate_ioa_bringdown(ioa_cfg, IPR_SHUTDOWN_NORMAL);
+
+ spin_unlock_irqrestore(ioa_cfg->host->host_lock, host_lock_flags);
+ wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
+ flush_work(&ioa_cfg->work_q);
+ if (ioa_cfg->reset_work_q)
+ flush_workqueue(ioa_cfg->reset_work_q);
+ INIT_LIST_HEAD(&ioa_cfg->used_res_q);
+ spin_lock_irqsave(ioa_cfg->host->host_lock, host_lock_flags);
+
+ spin_lock_irqsave(&ipr_driver_lock, driver_lock_flags);
+ list_del(&ioa_cfg->queue);
+ spin_unlock_irqrestore(&ipr_driver_lock, driver_lock_flags);
+
+ if (ioa_cfg->sdt_state == ABORT_DUMP)
+ ioa_cfg->sdt_state = WAIT_FOR_DUMP;
+ spin_unlock_irqrestore(ioa_cfg->host->host_lock, host_lock_flags);
+
+ ipr_free_all_resources(ioa_cfg);
+
+ LEAVE;
+}
+
+/**
+ * ipr_remove - IOA hot plug remove entry point
+ * @pdev: pci device struct
+ *
+ * Adapter hot plug remove entry point.
+ *
+ * Return value:
+ * none
+ **/
+static void ipr_remove(struct pci_dev *pdev)
+{
+ struct ipr_ioa_cfg *ioa_cfg = pci_get_drvdata(pdev);
+
+ ENTER;
+
+ ipr_remove_trace_file(&ioa_cfg->host->shost_dev.kobj,
+ &ipr_trace_attr);
+ ipr_remove_dump_file(&ioa_cfg->host->shost_dev.kobj,
+ &ipr_dump_attr);
+ scsi_remove_host(ioa_cfg->host);
+
+ __ipr_remove(pdev);
+
+ LEAVE;
+}
+
+/**
+ * ipr_probe - Adapter hot plug add entry point
+ *
+ * Return value:
+ * 0 on success / non-zero on failure
+ **/
+static int ipr_probe(struct pci_dev *pdev, const struct pci_device_id *dev_id)
+{
+ struct ipr_ioa_cfg *ioa_cfg;
+ int rc, i;
+
+ rc = ipr_probe_ioa(pdev, dev_id);
+
+ if (rc)
+ return rc;
+
+ ioa_cfg = pci_get_drvdata(pdev);
+ rc = ipr_probe_ioa_part2(ioa_cfg);
+
+ if (rc) {
+ __ipr_remove(pdev);
+ return rc;
+ }
+
+ rc = scsi_add_host(ioa_cfg->host, &pdev->dev);
+
+ if (rc) {
+ __ipr_remove(pdev);
+ return rc;
+ }
+
+ rc = ipr_create_trace_file(&ioa_cfg->host->shost_dev.kobj,
+ &ipr_trace_attr);
+
+ if (rc) {
+ scsi_remove_host(ioa_cfg->host);
+ __ipr_remove(pdev);
+ return rc;
+ }
+
+ rc = ipr_create_dump_file(&ioa_cfg->host->shost_dev.kobj,
+ &ipr_dump_attr);
+
+ if (rc) {
+ ipr_remove_trace_file(&ioa_cfg->host->shost_dev.kobj,
+ &ipr_trace_attr);
+ scsi_remove_host(ioa_cfg->host);
+ __ipr_remove(pdev);
+ return rc;
+ }
+
+ scsi_scan_host(ioa_cfg->host);
+ ioa_cfg->iopoll_weight = ioa_cfg->chip_cfg->iopoll_weight;
+
+ if (ioa_cfg->iopoll_weight && ioa_cfg->sis64 && ioa_cfg->nvectors > 1) {
+ for (i = 1; i < ioa_cfg->hrrq_num; i++) {
+ blk_iopoll_init(&ioa_cfg->hrrq[i].iopoll,
+ ioa_cfg->iopoll_weight, ipr_iopoll);
+ blk_iopoll_enable(&ioa_cfg->hrrq[i].iopoll);
+ }
+ }
+
+ schedule_work(&ioa_cfg->work_q);
+ return 0;
+}
+
+/**
+ * ipr_shutdown - Shutdown handler.
+ * @pdev: pci device struct
+ *
+ * This function is invoked upon system shutdown/reboot. It will issue
+ * an adapter shutdown to the adapter to flush the write cache.
+ *
+ * Return value:
+ * none
+ **/
+static void ipr_shutdown(struct pci_dev *pdev)
+{
+ struct ipr_ioa_cfg *ioa_cfg = pci_get_drvdata(pdev);
+ unsigned long lock_flags = 0;
+ enum ipr_shutdown_type shutdown_type = IPR_SHUTDOWN_NORMAL;
+ int i;
+
+ spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
+ if (ioa_cfg->iopoll_weight && ioa_cfg->sis64 && ioa_cfg->nvectors > 1) {
+ ioa_cfg->iopoll_weight = 0;
+ for (i = 1; i < ioa_cfg->hrrq_num; i++)
+ blk_iopoll_disable(&ioa_cfg->hrrq[i].iopoll);
+ }
+
+ while (ioa_cfg->in_reset_reload) {
+ spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
+ wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
+ spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
+ }
+
+ if (ipr_fast_reboot && system_state == SYSTEM_RESTART && ioa_cfg->sis64)
+ shutdown_type = IPR_SHUTDOWN_QUIESCE;
+
+ ipr_initiate_ioa_bringdown(ioa_cfg, shutdown_type);
+ spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
+ wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
+ if (ipr_fast_reboot && system_state == SYSTEM_RESTART && ioa_cfg->sis64) {
+ ipr_free_irqs(ioa_cfg);
+ pci_disable_device(ioa_cfg->pdev);
+ }
+}
+
+static struct pci_device_id ipr_pci_table[] = {
+ { PCI_VENDOR_ID_MYLEX, PCI_DEVICE_ID_IBM_GEMSTONE,
+ PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_5702, 0, 0, 0 },
+ { PCI_VENDOR_ID_MYLEX, PCI_DEVICE_ID_IBM_GEMSTONE,
+ PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_5703, 0, 0, 0 },
+ { PCI_VENDOR_ID_MYLEX, PCI_DEVICE_ID_IBM_GEMSTONE,
+ PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_573D, 0, 0, 0 },
+ { PCI_VENDOR_ID_MYLEX, PCI_DEVICE_ID_IBM_GEMSTONE,
+ PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_573E, 0, 0, 0 },
+ { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CITRINE,
+ PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_571B, 0, 0, 0 },
+ { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CITRINE,
+ PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_572E, 0, 0, 0 },
+ { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CITRINE,
+ PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_571A, 0, 0, 0 },
+ { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CITRINE,
+ PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_575B, 0, 0,
+ IPR_USE_LONG_TRANSOP_TIMEOUT },
+ { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_OBSIDIAN,
+ PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_572A, 0, 0, 0 },
+ { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_OBSIDIAN,
+ PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_572B, 0, 0,
+ IPR_USE_LONG_TRANSOP_TIMEOUT },
+ { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_OBSIDIAN,
+ PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_575C, 0, 0,
+ IPR_USE_LONG_TRANSOP_TIMEOUT },
+ { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN,
+ PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_572A, 0, 0, 0 },
+ { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN,
+ PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_572B, 0, 0,
+ IPR_USE_LONG_TRANSOP_TIMEOUT},
+ { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN,
+ PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_575C, 0, 0,
+ IPR_USE_LONG_TRANSOP_TIMEOUT },
+ { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN_E,
+ PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_574E, 0, 0,
+ IPR_USE_LONG_TRANSOP_TIMEOUT },
+ { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN_E,
+ PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57B3, 0, 0, 0 },
+ { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN_E,
+ PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57CC, 0, 0, 0 },
+ { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN_E,
+ PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57B7, 0, 0,
+ IPR_USE_LONG_TRANSOP_TIMEOUT | IPR_USE_PCI_WARM_RESET },
+ { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_SNIPE,
+ PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_2780, 0, 0, 0 },
+ { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_SCAMP,
+ PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_571E, 0, 0, 0 },
+ { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_SCAMP,
+ PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_571F, 0, 0,
+ IPR_USE_LONG_TRANSOP_TIMEOUT },
+ { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_SCAMP,
+ PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_572F, 0, 0,
+ IPR_USE_LONG_TRANSOP_TIMEOUT },
+ { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROC_FPGA_E2,
+ PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57B5, 0, 0, 0 },
+ { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROC_FPGA_E2,
+ PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_574D, 0, 0, 0 },
+ { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROC_FPGA_E2,
+ PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57B2, 0, 0, 0 },
+ { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROC_FPGA_E2,
+ PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57C0, 0, 0, 0 },
+ { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROC_FPGA_E2,
+ PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57C3, 0, 0, 0 },
+ { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROC_FPGA_E2,
+ PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57C4, 0, 0, 0 },
+ { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
+ PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57B4, 0, 0, 0 },
+ { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
+ PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57B1, 0, 0, 0 },
+ { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
+ PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57C6, 0, 0, 0 },
+ { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
+ PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57C8, 0, 0, 0 },
+ { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
+ PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57CE, 0, 0, 0 },
+ { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
+ PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57D5, 0, 0, 0 },
+ { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
+ PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57D6, 0, 0, 0 },
+ { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
+ PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57D7, 0, 0, 0 },
+ { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
+ PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57D8, 0, 0, 0 },
+ { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
+ PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57D9, 0, 0, 0 },
+ { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
+ PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57DA, 0, 0, 0 },
+ { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
+ PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57EB, 0, 0, 0 },
+ { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
+ PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57EC, 0, 0, 0 },
+ { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
+ PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57ED, 0, 0, 0 },
+ { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
+ PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57EE, 0, 0, 0 },
+ { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
+ PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57EF, 0, 0, 0 },
+ { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
+ PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57F0, 0, 0, 0 },
+ { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
+ PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_2CCA, 0, 0, 0 },
+ { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
+ PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_2CD2, 0, 0, 0 },
+ { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
+ PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_2CCD, 0, 0, 0 },
+ { }
+};
+MODULE_DEVICE_TABLE(pci, ipr_pci_table);
+
+static const struct pci_error_handlers ipr_err_handler = {
+ .error_detected = ipr_pci_error_detected,
+ .mmio_enabled = ipr_pci_mmio_enabled,
+ .slot_reset = ipr_pci_slot_reset,
+};
+
+static struct pci_driver ipr_driver = {
+ .name = IPR_NAME,
+ .id_table = ipr_pci_table,
+ .probe = ipr_probe,
+ .remove = ipr_remove,
+ .shutdown = ipr_shutdown,
+ .err_handler = &ipr_err_handler,
+};
+
+/**
+ * ipr_halt_done - Shutdown prepare completion
+ *
+ * Return value:
+ * none
+ **/
+static void ipr_halt_done(struct ipr_cmnd *ipr_cmd)
+{
+ list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
+}
+
+/**
+ * ipr_halt - Issue shutdown prepare to all adapters
+ *
+ * Return value:
+ * NOTIFY_OK on success / NOTIFY_DONE on failure
+ **/
+static int ipr_halt(struct notifier_block *nb, ulong event, void *buf)
+{
+ struct ipr_cmnd *ipr_cmd;
+ struct ipr_ioa_cfg *ioa_cfg;
+ unsigned long flags = 0, driver_lock_flags;
+
+ if (event != SYS_RESTART && event != SYS_HALT && event != SYS_POWER_OFF)
+ return NOTIFY_DONE;
+
+ spin_lock_irqsave(&ipr_driver_lock, driver_lock_flags);
+
+ list_for_each_entry(ioa_cfg, &ipr_ioa_head, queue) {
+ spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
+ if (!ioa_cfg->hrrq[IPR_INIT_HRRQ].allow_cmds ||
+ (ipr_fast_reboot && event == SYS_RESTART && ioa_cfg->sis64)) {
+ spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
+ continue;
+ }
+
+ ipr_cmd = ipr_get_free_ipr_cmnd(ioa_cfg);
+ ipr_cmd->ioarcb.res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
+ ipr_cmd->ioarcb.cmd_pkt.request_type = IPR_RQTYPE_IOACMD;
+ ipr_cmd->ioarcb.cmd_pkt.cdb[0] = IPR_IOA_SHUTDOWN;
+ ipr_cmd->ioarcb.cmd_pkt.cdb[1] = IPR_SHUTDOWN_PREPARE_FOR_NORMAL;
+
+ ipr_do_req(ipr_cmd, ipr_halt_done, ipr_timeout, IPR_DEVICE_RESET_TIMEOUT);
+ spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
+ }
+ spin_unlock_irqrestore(&ipr_driver_lock, driver_lock_flags);
+
+ return NOTIFY_OK;
+}
+
+static struct notifier_block ipr_notifier = {
+ ipr_halt, NULL, 0
+};
+
+/**
+ * ipr_init - Module entry point
+ *
+ * Return value:
+ * 0 on success / negative value on failure
+ **/
+static int __init ipr_init(void)
+{
+ ipr_info("IBM Power RAID SCSI Device Driver version: %s %s\n",
+ IPR_DRIVER_VERSION, IPR_DRIVER_DATE);
+
+ register_reboot_notifier(&ipr_notifier);
+ return pci_register_driver(&ipr_driver);
+}
+
+/**
+ * ipr_exit - Module unload
+ *
+ * Module unload entry point.
+ *
+ * Return value:
+ * none
+ **/
+static void __exit ipr_exit(void)
+{
+ unregister_reboot_notifier(&ipr_notifier);
+ pci_unregister_driver(&ipr_driver);
+}
+
+module_init(ipr_init);
+module_exit(ipr_exit);
diff --git a/drivers/scsi/ipr.h b/drivers/scsi/ipr.h
new file mode 100644
index 000000000..73790a1d0
--- /dev/null
+++ b/drivers/scsi/ipr.h
@@ -0,0 +1,1973 @@
+/*
+ * ipr.h -- driver for IBM Power Linux RAID adapters
+ *
+ * Written By: Brian King <brking@us.ibm.com>, IBM Corporation
+ *
+ * Copyright (C) 2003, 2004 IBM Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ *
+ * Alan Cox <alan@lxorguk.ukuu.org.uk> - Removed several careless u32/dma_addr_t errors
+ * that broke 64bit platforms.
+ */
+
+#ifndef _IPR_H
+#define _IPR_H
+
+#include <asm/unaligned.h>
+#include <linux/types.h>
+#include <linux/completion.h>
+#include <linux/libata.h>
+#include <linux/list.h>
+#include <linux/kref.h>
+#include <linux/blk-iopoll.h>
+#include <scsi/scsi.h>
+#include <scsi/scsi_cmnd.h>
+
+/*
+ * Literals
+ */
+#define IPR_DRIVER_VERSION "2.6.1"
+#define IPR_DRIVER_DATE "(March 12, 2015)"
+
+/*
+ * IPR_MAX_CMD_PER_LUN: This defines the maximum number of outstanding
+ * ops per device for devices not running tagged command queuing.
+ * This can be adjusted at runtime through sysfs device attributes.
+ */
+#define IPR_MAX_CMD_PER_LUN 6
+#define IPR_MAX_CMD_PER_ATA_LUN 1
+
+/*
+ * IPR_NUM_BASE_CMD_BLKS: This defines the maximum number of
+ * ops the mid-layer can send to the adapter.
+ */
+#define IPR_NUM_BASE_CMD_BLKS (ioa_cfg->max_cmds)
+
+#define PCI_DEVICE_ID_IBM_OBSIDIAN_E 0x0339
+
+#define PCI_DEVICE_ID_IBM_CROC_FPGA_E2 0x033D
+#define PCI_DEVICE_ID_IBM_CROCODILE 0x034A
+
+#define IPR_SUBS_DEV_ID_2780 0x0264
+#define IPR_SUBS_DEV_ID_5702 0x0266
+#define IPR_SUBS_DEV_ID_5703 0x0278
+#define IPR_SUBS_DEV_ID_572E 0x028D
+#define IPR_SUBS_DEV_ID_573E 0x02D3
+#define IPR_SUBS_DEV_ID_573D 0x02D4
+#define IPR_SUBS_DEV_ID_571A 0x02C0
+#define IPR_SUBS_DEV_ID_571B 0x02BE
+#define IPR_SUBS_DEV_ID_571E 0x02BF
+#define IPR_SUBS_DEV_ID_571F 0x02D5
+#define IPR_SUBS_DEV_ID_572A 0x02C1
+#define IPR_SUBS_DEV_ID_572B 0x02C2
+#define IPR_SUBS_DEV_ID_572F 0x02C3
+#define IPR_SUBS_DEV_ID_574E 0x030A
+#define IPR_SUBS_DEV_ID_575B 0x030D
+#define IPR_SUBS_DEV_ID_575C 0x0338
+#define IPR_SUBS_DEV_ID_57B3 0x033A
+#define IPR_SUBS_DEV_ID_57B7 0x0360
+#define IPR_SUBS_DEV_ID_57B8 0x02C2
+
+#define IPR_SUBS_DEV_ID_57B4 0x033B
+#define IPR_SUBS_DEV_ID_57B2 0x035F
+#define IPR_SUBS_DEV_ID_57C0 0x0352
+#define IPR_SUBS_DEV_ID_57C3 0x0353
+#define IPR_SUBS_DEV_ID_57C4 0x0354
+#define IPR_SUBS_DEV_ID_57C6 0x0357
+#define IPR_SUBS_DEV_ID_57CC 0x035C
+
+#define IPR_SUBS_DEV_ID_57B5 0x033C
+#define IPR_SUBS_DEV_ID_57CE 0x035E
+#define IPR_SUBS_DEV_ID_57B1 0x0355
+
+#define IPR_SUBS_DEV_ID_574D 0x0356
+#define IPR_SUBS_DEV_ID_57C8 0x035D
+
+#define IPR_SUBS_DEV_ID_57D5 0x03FB
+#define IPR_SUBS_DEV_ID_57D6 0x03FC
+#define IPR_SUBS_DEV_ID_57D7 0x03FF
+#define IPR_SUBS_DEV_ID_57D8 0x03FE
+#define IPR_SUBS_DEV_ID_57D9 0x046D
+#define IPR_SUBS_DEV_ID_57DA 0x04CA
+#define IPR_SUBS_DEV_ID_57EB 0x0474
+#define IPR_SUBS_DEV_ID_57EC 0x0475
+#define IPR_SUBS_DEV_ID_57ED 0x0499
+#define IPR_SUBS_DEV_ID_57EE 0x049A
+#define IPR_SUBS_DEV_ID_57EF 0x049B
+#define IPR_SUBS_DEV_ID_57F0 0x049C
+#define IPR_SUBS_DEV_ID_2CCA 0x04C7
+#define IPR_SUBS_DEV_ID_2CD2 0x04C8
+#define IPR_SUBS_DEV_ID_2CCD 0x04C9
+#define IPR_NAME "ipr"
+
+/*
+ * Return codes
+ */
+#define IPR_RC_JOB_CONTINUE 1
+#define IPR_RC_JOB_RETURN 2
+
+/*
+ * IOASCs
+ */
+#define IPR_IOASC_NR_INIT_CMD_REQUIRED 0x02040200
+#define IPR_IOASC_NR_IOA_RESET_REQUIRED 0x02048000
+#define IPR_IOASC_SYNC_REQUIRED 0x023f0000
+#define IPR_IOASC_MED_DO_NOT_REALLOC 0x03110C00
+#define IPR_IOASC_HW_SEL_TIMEOUT 0x04050000
+#define IPR_IOASC_HW_DEV_BUS_STATUS 0x04448500
+#define IPR_IOASC_IOASC_MASK 0xFFFFFF00
+#define IPR_IOASC_SCSI_STATUS_MASK 0x000000FF
+#define IPR_IOASC_HW_CMD_FAILED 0x046E0000
+#define IPR_IOASC_IR_INVALID_REQ_TYPE_OR_PKT 0x05240000
+#define IPR_IOASC_IR_RESOURCE_HANDLE 0x05250000
+#define IPR_IOASC_IR_NO_CMDS_TO_2ND_IOA 0x05258100
+#define IPR_IOASA_IR_DUAL_IOA_DISABLED 0x052C8000
+#define IPR_IOASC_BUS_WAS_RESET 0x06290000
+#define IPR_IOASC_BUS_WAS_RESET_BY_OTHER 0x06298000
+#define IPR_IOASC_ABORTED_CMD_TERM_BY_HOST 0x0B5A0000
+#define IPR_IOASC_IR_NON_OPTIMIZED 0x05258200
+
+#define IPR_FIRST_DRIVER_IOASC 0x10000000
+#define IPR_IOASC_IOA_WAS_RESET 0x10000001
+#define IPR_IOASC_PCI_ACCESS_ERROR 0x10000002
+
+/* Driver data flags */
+#define IPR_USE_LONG_TRANSOP_TIMEOUT 0x00000001
+#define IPR_USE_PCI_WARM_RESET 0x00000002
+
+#define IPR_DEFAULT_MAX_ERROR_DUMP 984
+#define IPR_NUM_LOG_HCAMS 2
+#define IPR_NUM_CFG_CHG_HCAMS 2
+#define IPR_NUM_HCAMS (IPR_NUM_LOG_HCAMS + IPR_NUM_CFG_CHG_HCAMS)
+
+#define IPR_MAX_SIS64_TARGETS_PER_BUS 1024
+#define IPR_MAX_SIS64_LUNS_PER_TARGET 0xffffffff
+
+#define IPR_MAX_NUM_TARGETS_PER_BUS 256
+#define IPR_MAX_NUM_LUNS_PER_TARGET 256
+#define IPR_VSET_BUS 0xff
+#define IPR_IOA_BUS 0xff
+#define IPR_IOA_TARGET 0xff
+#define IPR_IOA_LUN 0xff
+#define IPR_MAX_NUM_BUSES 16
+
+#define IPR_NUM_RESET_RELOAD_RETRIES 3
+
+/* We need resources for HCAMS, IOA reset, IOA bringdown, and ERP */
+#define IPR_NUM_INTERNAL_CMD_BLKS (IPR_NUM_HCAMS + \
+ ((IPR_NUM_RESET_RELOAD_RETRIES + 1) * 2) + 4)
+
+#define IPR_MAX_COMMANDS 100
+#define IPR_NUM_CMD_BLKS (IPR_NUM_BASE_CMD_BLKS + \
+ IPR_NUM_INTERNAL_CMD_BLKS)
+
+#define IPR_MAX_PHYSICAL_DEVS 192
+#define IPR_DEFAULT_SIS64_DEVS 1024
+#define IPR_MAX_SIS64_DEVS 4096
+
+#define IPR_MAX_SGLIST 64
+#define IPR_IOA_MAX_SECTORS 32767
+#define IPR_VSET_MAX_SECTORS 512
+#define IPR_MAX_CDB_LEN 16
+#define IPR_MAX_HRRQ_RETRIES 3
+
+#define IPR_DEFAULT_BUS_WIDTH 16
+#define IPR_80MBs_SCSI_RATE ((80 * 10) / (IPR_DEFAULT_BUS_WIDTH / 8))
+#define IPR_U160_SCSI_RATE ((160 * 10) / (IPR_DEFAULT_BUS_WIDTH / 8))
+#define IPR_U320_SCSI_RATE ((320 * 10) / (IPR_DEFAULT_BUS_WIDTH / 8))
+#define IPR_MAX_SCSI_RATE(width) ((320 * 10) / ((width) / 8))
+
+#define IPR_IOA_RES_HANDLE 0xffffffff
+#define IPR_INVALID_RES_HANDLE 0
+#define IPR_IOA_RES_ADDR 0x00ffffff
+
+/*
+ * Adapter Commands
+ */
+#define IPR_CANCEL_REQUEST 0xC0
+#define IPR_CANCEL_64BIT_IOARCB 0x01
+#define IPR_QUERY_RSRC_STATE 0xC2
+#define IPR_RESET_DEVICE 0xC3
+#define IPR_RESET_TYPE_SELECT 0x80
+#define IPR_LUN_RESET 0x40
+#define IPR_TARGET_RESET 0x20
+#define IPR_BUS_RESET 0x10
+#define IPR_ATA_PHY_RESET 0x80
+#define IPR_ID_HOST_RR_Q 0xC4
+#define IPR_QUERY_IOA_CONFIG 0xC5
+#define IPR_CANCEL_ALL_REQUESTS 0xCE
+#define IPR_HOST_CONTROLLED_ASYNC 0xCF
+#define IPR_HCAM_CDB_OP_CODE_CONFIG_CHANGE 0x01
+#define IPR_HCAM_CDB_OP_CODE_LOG_DATA 0x02
+#define IPR_SET_SUPPORTED_DEVICES 0xFB
+#define IPR_SET_ALL_SUPPORTED_DEVICES 0x80
+#define IPR_IOA_SHUTDOWN 0xF7
+#define IPR_WR_BUF_DOWNLOAD_AND_SAVE 0x05
+
+/*
+ * Timeouts
+ */
+#define IPR_SHUTDOWN_TIMEOUT (ipr_fastfail ? 60 * HZ : 10 * 60 * HZ)
+#define IPR_VSET_RW_TIMEOUT (ipr_fastfail ? 30 * HZ : 2 * 60 * HZ)
+#define IPR_ABBREV_SHUTDOWN_TIMEOUT (10 * HZ)
+#define IPR_DUAL_IOA_ABBR_SHUTDOWN_TO (2 * 60 * HZ)
+#define IPR_DEVICE_RESET_TIMEOUT (ipr_fastfail ? 10 * HZ : 30 * HZ)
+#define IPR_CANCEL_TIMEOUT (ipr_fastfail ? 10 * HZ : 30 * HZ)
+#define IPR_CANCEL_ALL_TIMEOUT (ipr_fastfail ? 10 * HZ : 30 * HZ)
+#define IPR_ABORT_TASK_TIMEOUT (ipr_fastfail ? 10 * HZ : 30 * HZ)
+#define IPR_INTERNAL_TIMEOUT (ipr_fastfail ? 10 * HZ : 30 * HZ)
+#define IPR_WRITE_BUFFER_TIMEOUT (30 * 60 * HZ)
+#define IPR_SET_SUP_DEVICE_TIMEOUT (2 * 60 * HZ)
+#define IPR_REQUEST_SENSE_TIMEOUT (10 * HZ)
+#define IPR_OPERATIONAL_TIMEOUT (5 * 60)
+#define IPR_LONG_OPERATIONAL_TIMEOUT (12 * 60)
+#define IPR_WAIT_FOR_RESET_TIMEOUT (2 * HZ)
+#define IPR_CHECK_FOR_RESET_TIMEOUT (HZ / 10)
+#define IPR_WAIT_FOR_BIST_TIMEOUT (2 * HZ)
+#define IPR_PCI_ERROR_RECOVERY_TIMEOUT (120 * HZ)
+#define IPR_PCI_RESET_TIMEOUT (HZ / 2)
+#define IPR_SIS32_DUMP_TIMEOUT (15 * HZ)
+#define IPR_SIS64_DUMP_TIMEOUT (40 * HZ)
+#define IPR_DUMP_DELAY_SECONDS 4
+#define IPR_DUMP_DELAY_TIMEOUT (IPR_DUMP_DELAY_SECONDS * HZ)
+
+/*
+ * SCSI Literals
+ */
+#define IPR_VENDOR_ID_LEN 8
+#define IPR_PROD_ID_LEN 16
+#define IPR_SERIAL_NUM_LEN 8
+
+/*
+ * Hardware literals
+ */
+#define IPR_FMT2_MBX_ADDR_MASK 0x0fffffff
+#define IPR_FMT2_MBX_BAR_SEL_MASK 0xf0000000
+#define IPR_FMT2_MKR_BAR_SEL_SHIFT 28
+#define IPR_GET_FMT2_BAR_SEL(mbx) \
+(((mbx) & IPR_FMT2_MBX_BAR_SEL_MASK) >> IPR_FMT2_MKR_BAR_SEL_SHIFT)
+#define IPR_SDT_FMT2_BAR0_SEL 0x0
+#define IPR_SDT_FMT2_BAR1_SEL 0x1
+#define IPR_SDT_FMT2_BAR2_SEL 0x2
+#define IPR_SDT_FMT2_BAR3_SEL 0x3
+#define IPR_SDT_FMT2_BAR4_SEL 0x4
+#define IPR_SDT_FMT2_BAR5_SEL 0x5
+#define IPR_SDT_FMT2_EXP_ROM_SEL 0x8
+#define IPR_FMT2_SDT_READY_TO_USE 0xC4D4E3F2
+#define IPR_FMT3_SDT_READY_TO_USE 0xC4D4E3F3
+#define IPR_DOORBELL 0x82800000
+#define IPR_RUNTIME_RESET 0x40000000
+
+#define IPR_IPL_INIT_MIN_STAGE_TIME 5
+#define IPR_IPL_INIT_DEFAULT_STAGE_TIME 30
+#define IPR_IPL_INIT_STAGE_UNKNOWN 0x0
+#define IPR_IPL_INIT_STAGE_TRANSOP 0xB0000000
+#define IPR_IPL_INIT_STAGE_MASK 0xff000000
+#define IPR_IPL_INIT_STAGE_TIME_MASK 0x0000ffff
+#define IPR_PCII_IPL_STAGE_CHANGE (0x80000000 >> 0)
+
+#define IPR_PCII_IOA_TRANS_TO_OPER (0x80000000 >> 0)
+#define IPR_PCII_IOARCB_XFER_FAILED (0x80000000 >> 3)
+#define IPR_PCII_IOA_UNIT_CHECKED (0x80000000 >> 4)
+#define IPR_PCII_NO_HOST_RRQ (0x80000000 >> 5)
+#define IPR_PCII_CRITICAL_OPERATION (0x80000000 >> 6)
+#define IPR_PCII_IO_DEBUG_ACKNOWLEDGE (0x80000000 >> 7)
+#define IPR_PCII_IOARRIN_LOST (0x80000000 >> 27)
+#define IPR_PCII_MMIO_ERROR (0x80000000 >> 28)
+#define IPR_PCII_PROC_ERR_STATE (0x80000000 >> 29)
+#define IPR_PCII_HRRQ_UPDATED (0x80000000 >> 30)
+#define IPR_PCII_CORE_ISSUED_RST_REQ (0x80000000 >> 31)
+
+#define IPR_PCII_ERROR_INTERRUPTS \
+(IPR_PCII_IOARCB_XFER_FAILED | IPR_PCII_IOA_UNIT_CHECKED | \
+IPR_PCII_NO_HOST_RRQ | IPR_PCII_IOARRIN_LOST | IPR_PCII_MMIO_ERROR)
+
+#define IPR_PCII_OPER_INTERRUPTS \
+(IPR_PCII_ERROR_INTERRUPTS | IPR_PCII_HRRQ_UPDATED | IPR_PCII_IOA_TRANS_TO_OPER)
+
+#define IPR_UPROCI_RESET_ALERT (0x80000000 >> 7)
+#define IPR_UPROCI_IO_DEBUG_ALERT (0x80000000 >> 9)
+#define IPR_UPROCI_SIS64_START_BIST (0x80000000 >> 23)
+
+#define IPR_LDUMP_MAX_LONG_ACK_DELAY_IN_USEC 200000 /* 200 ms */
+#define IPR_LDUMP_MAX_SHORT_ACK_DELAY_IN_USEC 200000 /* 200 ms */
+
+/*
+ * Dump literals
+ */
+#define IPR_FMT2_MAX_IOA_DUMP_SIZE (4 * 1024 * 1024)
+#define IPR_FMT3_MAX_IOA_DUMP_SIZE (80 * 1024 * 1024)
+#define IPR_FMT2_NUM_SDT_ENTRIES 511
+#define IPR_FMT3_NUM_SDT_ENTRIES 0xFFF
+#define IPR_FMT2_MAX_NUM_DUMP_PAGES ((IPR_FMT2_MAX_IOA_DUMP_SIZE / PAGE_SIZE) + 1)
+#define IPR_FMT3_MAX_NUM_DUMP_PAGES ((IPR_FMT3_MAX_IOA_DUMP_SIZE / PAGE_SIZE) + 1)
+
+/*
+ * Misc literals
+ */
+#define IPR_NUM_IOADL_ENTRIES IPR_MAX_SGLIST
+#define IPR_MAX_MSIX_VECTORS 0x10
+#define IPR_MAX_HRRQ_NUM 0x10
+#define IPR_INIT_HRRQ 0x0
+
+/*
+ * Adapter interface types
+ */
+
+struct ipr_res_addr {
+ u8 reserved;
+ u8 bus;
+ u8 target;
+ u8 lun;
+#define IPR_GET_PHYS_LOC(res_addr) \
+ (((res_addr).bus << 16) | ((res_addr).target << 8) | (res_addr).lun)
+}__attribute__((packed, aligned (4)));
+
+struct ipr_std_inq_vpids {
+ u8 vendor_id[IPR_VENDOR_ID_LEN];
+ u8 product_id[IPR_PROD_ID_LEN];
+}__attribute__((packed));
+
+struct ipr_vpd {
+ struct ipr_std_inq_vpids vpids;
+ u8 sn[IPR_SERIAL_NUM_LEN];
+}__attribute__((packed));
+
+struct ipr_ext_vpd {
+ struct ipr_vpd vpd;
+ __be32 wwid[2];
+}__attribute__((packed));
+
+struct ipr_ext_vpd64 {
+ struct ipr_vpd vpd;
+ __be32 wwid[4];
+}__attribute__((packed));
+
+struct ipr_std_inq_data {
+ u8 peri_qual_dev_type;
+#define IPR_STD_INQ_PERI_QUAL(peri) ((peri) >> 5)
+#define IPR_STD_INQ_PERI_DEV_TYPE(peri) ((peri) & 0x1F)
+
+ u8 removeable_medium_rsvd;
+#define IPR_STD_INQ_REMOVEABLE_MEDIUM 0x80
+
+#define IPR_IS_DASD_DEVICE(std_inq) \
+((IPR_STD_INQ_PERI_DEV_TYPE((std_inq).peri_qual_dev_type) == TYPE_DISK) && \
+!(((std_inq).removeable_medium_rsvd) & IPR_STD_INQ_REMOVEABLE_MEDIUM))
+
+#define IPR_IS_SES_DEVICE(std_inq) \
+(IPR_STD_INQ_PERI_DEV_TYPE((std_inq).peri_qual_dev_type) == TYPE_ENCLOSURE)
+
+ u8 version;
+ u8 aen_naca_fmt;
+ u8 additional_len;
+ u8 sccs_rsvd;
+ u8 bq_enc_multi;
+ u8 sync_cmdq_flags;
+
+ struct ipr_std_inq_vpids vpids;
+
+ u8 ros_rsvd_ram_rsvd[4];
+
+ u8 serial_num[IPR_SERIAL_NUM_LEN];
+}__attribute__ ((packed));
+
+#define IPR_RES_TYPE_AF_DASD 0x00
+#define IPR_RES_TYPE_GENERIC_SCSI 0x01
+#define IPR_RES_TYPE_VOLUME_SET 0x02
+#define IPR_RES_TYPE_REMOTE_AF_DASD 0x03
+#define IPR_RES_TYPE_GENERIC_ATA 0x04
+#define IPR_RES_TYPE_ARRAY 0x05
+#define IPR_RES_TYPE_IOAFP 0xff
+
+struct ipr_config_table_entry {
+ u8 proto;
+#define IPR_PROTO_SATA 0x02
+#define IPR_PROTO_SATA_ATAPI 0x03
+#define IPR_PROTO_SAS_STP 0x06
+#define IPR_PROTO_SAS_STP_ATAPI 0x07
+ u8 array_id;
+ u8 flags;
+#define IPR_IS_IOA_RESOURCE 0x80
+ u8 rsvd_subtype;
+
+#define IPR_QUEUEING_MODEL(res) ((((res)->flags) & 0x70) >> 4)
+#define IPR_QUEUE_FROZEN_MODEL 0
+#define IPR_QUEUE_NACA_MODEL 1
+
+ struct ipr_res_addr res_addr;
+ __be32 res_handle;
+ __be32 lun_wwn[2];
+ struct ipr_std_inq_data std_inq_data;
+}__attribute__ ((packed, aligned (4)));
+
+struct ipr_config_table_entry64 {
+ u8 res_type;
+ u8 proto;
+ u8 vset_num;
+ u8 array_id;
+ __be16 flags;
+ __be16 res_flags;
+#define IPR_QUEUEING_MODEL64(res) ((((res)->res_flags) & 0x7000) >> 12)
+ __be32 res_handle;
+ u8 dev_id_type;
+ u8 reserved[3];
+ __be64 dev_id;
+ __be64 lun;
+ __be64 lun_wwn[2];
+#define IPR_MAX_RES_PATH_LENGTH 48
+ __be64 res_path;
+ struct ipr_std_inq_data std_inq_data;
+ u8 reserved2[4];
+ __be64 reserved3[2];
+ u8 reserved4[8];
+}__attribute__ ((packed, aligned (8)));
+
+struct ipr_config_table_hdr {
+ u8 num_entries;
+ u8 flags;
+#define IPR_UCODE_DOWNLOAD_REQ 0x10
+ __be16 reserved;
+}__attribute__((packed, aligned (4)));
+
+struct ipr_config_table_hdr64 {
+ __be16 num_entries;
+ __be16 reserved;
+ u8 flags;
+ u8 reserved2[11];
+}__attribute__((packed, aligned (4)));
+
+struct ipr_config_table {
+ struct ipr_config_table_hdr hdr;
+ struct ipr_config_table_entry dev[0];
+}__attribute__((packed, aligned (4)));
+
+struct ipr_config_table64 {
+ struct ipr_config_table_hdr64 hdr64;
+ struct ipr_config_table_entry64 dev[0];
+}__attribute__((packed, aligned (8)));
+
+struct ipr_config_table_entry_wrapper {
+ union {
+ struct ipr_config_table_entry *cfgte;
+ struct ipr_config_table_entry64 *cfgte64;
+ } u;
+};
+
+struct ipr_hostrcb_cfg_ch_not {
+ union {
+ struct ipr_config_table_entry cfgte;
+ struct ipr_config_table_entry64 cfgte64;
+ } u;
+ u8 reserved[936];
+}__attribute__((packed, aligned (4)));
+
+struct ipr_supported_device {
+ __be16 data_length;
+ u8 reserved;
+ u8 num_records;
+ struct ipr_std_inq_vpids vpids;
+ u8 reserved2[16];
+}__attribute__((packed, aligned (4)));
+
+struct ipr_hrr_queue {
+ struct ipr_ioa_cfg *ioa_cfg;
+ __be32 *host_rrq;
+ dma_addr_t host_rrq_dma;
+#define IPR_HRRQ_REQ_RESP_HANDLE_MASK 0xfffffffc
+#define IPR_HRRQ_RESP_BIT_SET 0x00000002
+#define IPR_HRRQ_TOGGLE_BIT 0x00000001
+#define IPR_HRRQ_REQ_RESP_HANDLE_SHIFT 2
+#define IPR_ID_HRRQ_SELE_ENABLE 0x02
+ volatile __be32 *hrrq_start;
+ volatile __be32 *hrrq_end;
+ volatile __be32 *hrrq_curr;
+
+ struct list_head hrrq_free_q;
+ struct list_head hrrq_pending_q;
+ spinlock_t _lock;
+ spinlock_t *lock;
+
+ volatile u32 toggle_bit;
+ u32 size;
+ u32 min_cmd_id;
+ u32 max_cmd_id;
+ u8 allow_interrupts:1;
+ u8 ioa_is_dead:1;
+ u8 allow_cmds:1;
+ u8 removing_ioa:1;
+
+ struct blk_iopoll iopoll;
+};
+
+/* Command packet structure */
+struct ipr_cmd_pkt {
+ u8 reserved; /* Reserved by IOA */
+ u8 hrrq_id;
+ u8 request_type;
+#define IPR_RQTYPE_SCSICDB 0x00
+#define IPR_RQTYPE_IOACMD 0x01
+#define IPR_RQTYPE_HCAM 0x02
+#define IPR_RQTYPE_ATA_PASSTHRU 0x04
+#define IPR_RQTYPE_PIPE 0x05
+
+ u8 reserved2;
+
+ u8 flags_hi;
+#define IPR_FLAGS_HI_WRITE_NOT_READ 0x80
+#define IPR_FLAGS_HI_NO_ULEN_CHK 0x20
+#define IPR_FLAGS_HI_SYNC_OVERRIDE 0x10
+#define IPR_FLAGS_HI_SYNC_COMPLETE 0x08
+#define IPR_FLAGS_HI_NO_LINK_DESC 0x04
+
+ u8 flags_lo;
+#define IPR_FLAGS_LO_ALIGNED_BFR 0x20
+#define IPR_FLAGS_LO_DELAY_AFTER_RST 0x10
+#define IPR_FLAGS_LO_UNTAGGED_TASK 0x00
+#define IPR_FLAGS_LO_SIMPLE_TASK 0x02
+#define IPR_FLAGS_LO_ORDERED_TASK 0x04
+#define IPR_FLAGS_LO_HEAD_OF_Q_TASK 0x06
+#define IPR_FLAGS_LO_ACA_TASK 0x08
+
+ u8 cdb[16];
+ __be16 timeout;
+}__attribute__ ((packed, aligned(4)));
+
+struct ipr_ioarcb_ata_regs { /* 22 bytes */
+ u8 flags;
+#define IPR_ATA_FLAG_PACKET_CMD 0x80
+#define IPR_ATA_FLAG_XFER_TYPE_DMA 0x40
+#define IPR_ATA_FLAG_STATUS_ON_GOOD_COMPLETION 0x20
+ u8 reserved[3];
+
+ __be16 data;
+ u8 feature;
+ u8 nsect;
+ u8 lbal;
+ u8 lbam;
+ u8 lbah;
+ u8 device;
+ u8 command;
+ u8 reserved2[3];
+ u8 hob_feature;
+ u8 hob_nsect;
+ u8 hob_lbal;
+ u8 hob_lbam;
+ u8 hob_lbah;
+ u8 ctl;
+}__attribute__ ((packed, aligned(2)));
+
+struct ipr_ioadl_desc {
+ __be32 flags_and_data_len;
+#define IPR_IOADL_FLAGS_MASK 0xff000000
+#define IPR_IOADL_GET_FLAGS(x) (be32_to_cpu(x) & IPR_IOADL_FLAGS_MASK)
+#define IPR_IOADL_DATA_LEN_MASK 0x00ffffff
+#define IPR_IOADL_GET_DATA_LEN(x) (be32_to_cpu(x) & IPR_IOADL_DATA_LEN_MASK)
+#define IPR_IOADL_FLAGS_READ 0x48000000
+#define IPR_IOADL_FLAGS_READ_LAST 0x49000000
+#define IPR_IOADL_FLAGS_WRITE 0x68000000
+#define IPR_IOADL_FLAGS_WRITE_LAST 0x69000000
+#define IPR_IOADL_FLAGS_LAST 0x01000000
+
+ __be32 address;
+}__attribute__((packed, aligned (8)));
+
+struct ipr_ioadl64_desc {
+ __be32 flags;
+ __be32 data_len;
+ __be64 address;
+}__attribute__((packed, aligned (16)));
+
+struct ipr_ata64_ioadl {
+ struct ipr_ioarcb_ata_regs regs;
+ u16 reserved[5];
+ struct ipr_ioadl64_desc ioadl64[IPR_NUM_IOADL_ENTRIES];
+}__attribute__((packed, aligned (16)));
+
+struct ipr_ioarcb_add_data {
+ union {
+ struct ipr_ioarcb_ata_regs regs;
+ struct ipr_ioadl_desc ioadl[5];
+ __be32 add_cmd_parms[10];
+ } u;
+}__attribute__ ((packed, aligned (4)));
+
+struct ipr_ioarcb_sis64_add_addr_ecb {
+ __be64 ioasa_host_pci_addr;
+ __be64 data_ioadl_addr;
+ __be64 reserved;
+ __be32 ext_control_buf[4];
+}__attribute__((packed, aligned (8)));
+
+/* IOA Request Control Block 128 bytes */
+struct ipr_ioarcb {
+ union {
+ __be32 ioarcb_host_pci_addr;
+ __be64 ioarcb_host_pci_addr64;
+ } a;
+ __be32 res_handle;
+ __be32 host_response_handle;
+ __be32 reserved1;
+ __be32 reserved2;
+ __be32 reserved3;
+
+ __be32 data_transfer_length;
+ __be32 read_data_transfer_length;
+ __be32 write_ioadl_addr;
+ __be32 ioadl_len;
+ __be32 read_ioadl_addr;
+ __be32 read_ioadl_len;
+
+ __be32 ioasa_host_pci_addr;
+ __be16 ioasa_len;
+ __be16 reserved4;
+
+ struct ipr_cmd_pkt cmd_pkt;
+
+ __be16 add_cmd_parms_offset;
+ __be16 add_cmd_parms_len;
+
+ union {
+ struct ipr_ioarcb_add_data add_data;
+ struct ipr_ioarcb_sis64_add_addr_ecb sis64_addr_data;
+ } u;
+
+}__attribute__((packed, aligned (4)));
+
+struct ipr_ioasa_vset {
+ __be32 failing_lba_hi;
+ __be32 failing_lba_lo;
+ __be32 reserved;
+}__attribute__((packed, aligned (4)));
+
+struct ipr_ioasa_af_dasd {
+ __be32 failing_lba;
+ __be32 reserved[2];
+}__attribute__((packed, aligned (4)));
+
+struct ipr_ioasa_gpdd {
+ u8 end_state;
+ u8 bus_phase;
+ __be16 reserved;
+ __be32 ioa_data[2];
+}__attribute__((packed, aligned (4)));
+
+struct ipr_ioasa_gata {
+ u8 error;
+ u8 nsect; /* Interrupt reason */
+ u8 lbal;
+ u8 lbam;
+ u8 lbah;
+ u8 device;
+ u8 status;
+ u8 alt_status; /* ATA CTL */
+ u8 hob_nsect;
+ u8 hob_lbal;
+ u8 hob_lbam;
+ u8 hob_lbah;
+}__attribute__((packed, aligned (4)));
+
+struct ipr_auto_sense {
+ __be16 auto_sense_len;
+ __be16 ioa_data_len;
+ __be32 data[SCSI_SENSE_BUFFERSIZE/sizeof(__be32)];
+};
+
+struct ipr_ioasa_hdr {
+ __be32 ioasc;
+#define IPR_IOASC_SENSE_KEY(ioasc) ((ioasc) >> 24)
+#define IPR_IOASC_SENSE_CODE(ioasc) (((ioasc) & 0x00ff0000) >> 16)
+#define IPR_IOASC_SENSE_QUAL(ioasc) (((ioasc) & 0x0000ff00) >> 8)
+#define IPR_IOASC_SENSE_STATUS(ioasc) ((ioasc) & 0x000000ff)
+
+ __be16 ret_stat_len; /* Length of the returned IOASA */
+
+ __be16 avail_stat_len; /* Total Length of status available. */
+
+ __be32 residual_data_len; /* number of bytes in the host data */
+ /* buffers that were not used by the IOARCB command. */
+
+ __be32 ilid;
+#define IPR_NO_ILID 0
+#define IPR_DRIVER_ILID 0xffffffff
+
+ __be32 fd_ioasc;
+
+ __be32 fd_phys_locator;
+
+ __be32 fd_res_handle;
+
+ __be32 ioasc_specific; /* status code specific field */
+#define IPR_ADDITIONAL_STATUS_FMT 0x80000000
+#define IPR_AUTOSENSE_VALID 0x40000000
+#define IPR_ATA_DEVICE_WAS_RESET 0x20000000
+#define IPR_IOASC_SPECIFIC_MASK 0x00ffffff
+#define IPR_FIELD_POINTER_VALID (0x80000000 >> 8)
+#define IPR_FIELD_POINTER_MASK 0x0000ffff
+
+}__attribute__((packed, aligned (4)));
+
+struct ipr_ioasa {
+ struct ipr_ioasa_hdr hdr;
+
+ union {
+ struct ipr_ioasa_vset vset;
+ struct ipr_ioasa_af_dasd dasd;
+ struct ipr_ioasa_gpdd gpdd;
+ struct ipr_ioasa_gata gata;
+ } u;
+
+ struct ipr_auto_sense auto_sense;
+}__attribute__((packed, aligned (4)));
+
+struct ipr_ioasa64 {
+ struct ipr_ioasa_hdr hdr;
+ u8 fd_res_path[8];
+
+ union {
+ struct ipr_ioasa_vset vset;
+ struct ipr_ioasa_af_dasd dasd;
+ struct ipr_ioasa_gpdd gpdd;
+ struct ipr_ioasa_gata gata;
+ } u;
+
+ struct ipr_auto_sense auto_sense;
+}__attribute__((packed, aligned (4)));
+
+struct ipr_mode_parm_hdr {
+ u8 length;
+ u8 medium_type;
+ u8 device_spec_parms;
+ u8 block_desc_len;
+}__attribute__((packed));
+
+struct ipr_mode_pages {
+ struct ipr_mode_parm_hdr hdr;
+ u8 data[255 - sizeof(struct ipr_mode_parm_hdr)];
+}__attribute__((packed));
+
+struct ipr_mode_page_hdr {
+ u8 ps_page_code;
+#define IPR_MODE_PAGE_PS 0x80
+#define IPR_GET_MODE_PAGE_CODE(hdr) ((hdr)->ps_page_code & 0x3F)
+ u8 page_length;
+}__attribute__ ((packed));
+
+struct ipr_dev_bus_entry {
+ struct ipr_res_addr res_addr;
+ u8 flags;
+#define IPR_SCSI_ATTR_ENABLE_QAS 0x80
+#define IPR_SCSI_ATTR_DISABLE_QAS 0x40
+#define IPR_SCSI_ATTR_QAS_MASK 0xC0
+#define IPR_SCSI_ATTR_ENABLE_TM 0x20
+#define IPR_SCSI_ATTR_NO_TERM_PWR 0x10
+#define IPR_SCSI_ATTR_TM_SUPPORTED 0x08
+#define IPR_SCSI_ATTR_LVD_TO_SE_NOT_ALLOWED 0x04
+
+ u8 scsi_id;
+ u8 bus_width;
+ u8 extended_reset_delay;
+#define IPR_EXTENDED_RESET_DELAY 7
+
+ __be32 max_xfer_rate;
+
+ u8 spinup_delay;
+ u8 reserved3;
+ __be16 reserved4;
+}__attribute__((packed, aligned (4)));
+
+struct ipr_mode_page28 {
+ struct ipr_mode_page_hdr hdr;
+ u8 num_entries;
+ u8 entry_length;
+ struct ipr_dev_bus_entry bus[0];
+}__attribute__((packed));
+
+struct ipr_mode_page24 {
+ struct ipr_mode_page_hdr hdr;
+ u8 flags;
+#define IPR_ENABLE_DUAL_IOA_AF 0x80
+}__attribute__((packed));
+
+struct ipr_ioa_vpd {
+ struct ipr_std_inq_data std_inq_data;
+ u8 ascii_part_num[12];
+ u8 reserved[40];
+ u8 ascii_plant_code[4];
+}__attribute__((packed));
+
+struct ipr_inquiry_page3 {
+ u8 peri_qual_dev_type;
+ u8 page_code;
+ u8 reserved1;
+ u8 page_length;
+ u8 ascii_len;
+ u8 reserved2[3];
+ u8 load_id[4];
+ u8 major_release;
+ u8 card_type;
+ u8 minor_release[2];
+ u8 ptf_number[4];
+ u8 patch_number[4];
+}__attribute__((packed));
+
+struct ipr_inquiry_cap {
+ u8 peri_qual_dev_type;
+ u8 page_code;
+ u8 reserved1;
+ u8 page_length;
+ u8 ascii_len;
+ u8 reserved2;
+ u8 sis_version[2];
+ u8 cap;
+#define IPR_CAP_DUAL_IOA_RAID 0x80
+ u8 reserved3[15];
+}__attribute__((packed));
+
+#define IPR_INQUIRY_PAGE0_ENTRIES 20
+struct ipr_inquiry_page0 {
+ u8 peri_qual_dev_type;
+ u8 page_code;
+ u8 reserved1;
+ u8 len;
+ u8 page[IPR_INQUIRY_PAGE0_ENTRIES];
+}__attribute__((packed));
+
+struct ipr_hostrcb_device_data_entry {
+ struct ipr_vpd vpd;
+ struct ipr_res_addr dev_res_addr;
+ struct ipr_vpd new_vpd;
+ struct ipr_vpd ioa_last_with_dev_vpd;
+ struct ipr_vpd cfc_last_with_dev_vpd;
+ __be32 ioa_data[5];
+}__attribute__((packed, aligned (4)));
+
+struct ipr_hostrcb_device_data_entry_enhanced {
+ struct ipr_ext_vpd vpd;
+ u8 ccin[4];
+ struct ipr_res_addr dev_res_addr;
+ struct ipr_ext_vpd new_vpd;
+ u8 new_ccin[4];
+ struct ipr_ext_vpd ioa_last_with_dev_vpd;
+ struct ipr_ext_vpd cfc_last_with_dev_vpd;
+}__attribute__((packed, aligned (4)));
+
+struct ipr_hostrcb64_device_data_entry_enhanced {
+ struct ipr_ext_vpd vpd;
+ u8 ccin[4];
+ u8 res_path[8];
+ struct ipr_ext_vpd new_vpd;
+ u8 new_ccin[4];
+ struct ipr_ext_vpd ioa_last_with_dev_vpd;
+ struct ipr_ext_vpd cfc_last_with_dev_vpd;
+}__attribute__((packed, aligned (4)));
+
+struct ipr_hostrcb_array_data_entry {
+ struct ipr_vpd vpd;
+ struct ipr_res_addr expected_dev_res_addr;
+ struct ipr_res_addr dev_res_addr;
+}__attribute__((packed, aligned (4)));
+
+struct ipr_hostrcb64_array_data_entry {
+ struct ipr_ext_vpd vpd;
+ u8 ccin[4];
+ u8 expected_res_path[8];
+ u8 res_path[8];
+}__attribute__((packed, aligned (4)));
+
+struct ipr_hostrcb_array_data_entry_enhanced {
+ struct ipr_ext_vpd vpd;
+ u8 ccin[4];
+ struct ipr_res_addr expected_dev_res_addr;
+ struct ipr_res_addr dev_res_addr;
+}__attribute__((packed, aligned (4)));
+
+struct ipr_hostrcb_type_ff_error {
+ __be32 ioa_data[758];
+}__attribute__((packed, aligned (4)));
+
+struct ipr_hostrcb_type_01_error {
+ __be32 seek_counter;
+ __be32 read_counter;
+ u8 sense_data[32];
+ __be32 ioa_data[236];
+}__attribute__((packed, aligned (4)));
+
+struct ipr_hostrcb_type_21_error {
+ __be32 wwn[4];
+ u8 res_path[8];
+ u8 primary_problem_desc[32];
+ u8 second_problem_desc[32];
+ __be32 sense_data[8];
+ __be32 cdb[4];
+ __be32 residual_trans_length;
+ __be32 length_of_error;
+ __be32 ioa_data[236];
+}__attribute__((packed, aligned (4)));
+
+struct ipr_hostrcb_type_02_error {
+ struct ipr_vpd ioa_vpd;
+ struct ipr_vpd cfc_vpd;
+ struct ipr_vpd ioa_last_attached_to_cfc_vpd;
+ struct ipr_vpd cfc_last_attached_to_ioa_vpd;
+ __be32 ioa_data[3];
+}__attribute__((packed, aligned (4)));
+
+struct ipr_hostrcb_type_12_error {
+ struct ipr_ext_vpd ioa_vpd;
+ struct ipr_ext_vpd cfc_vpd;
+ struct ipr_ext_vpd ioa_last_attached_to_cfc_vpd;
+ struct ipr_ext_vpd cfc_last_attached_to_ioa_vpd;
+ __be32 ioa_data[3];
+}__attribute__((packed, aligned (4)));
+
+struct ipr_hostrcb_type_03_error {
+ struct ipr_vpd ioa_vpd;
+ struct ipr_vpd cfc_vpd;
+ __be32 errors_detected;
+ __be32 errors_logged;
+ u8 ioa_data[12];
+ struct ipr_hostrcb_device_data_entry dev[3];
+}__attribute__((packed, aligned (4)));
+
+struct ipr_hostrcb_type_13_error {
+ struct ipr_ext_vpd ioa_vpd;
+ struct ipr_ext_vpd cfc_vpd;
+ __be32 errors_detected;
+ __be32 errors_logged;
+ struct ipr_hostrcb_device_data_entry_enhanced dev[3];
+}__attribute__((packed, aligned (4)));
+
+struct ipr_hostrcb_type_23_error {
+ struct ipr_ext_vpd ioa_vpd;
+ struct ipr_ext_vpd cfc_vpd;
+ __be32 errors_detected;
+ __be32 errors_logged;
+ struct ipr_hostrcb64_device_data_entry_enhanced dev[3];
+}__attribute__((packed, aligned (4)));
+
+struct ipr_hostrcb_type_04_error {
+ struct ipr_vpd ioa_vpd;
+ struct ipr_vpd cfc_vpd;
+ u8 ioa_data[12];
+ struct ipr_hostrcb_array_data_entry array_member[10];
+ __be32 exposed_mode_adn;
+ __be32 array_id;
+ struct ipr_vpd incomp_dev_vpd;
+ __be32 ioa_data2;
+ struct ipr_hostrcb_array_data_entry array_member2[8];
+ struct ipr_res_addr last_func_vset_res_addr;
+ u8 vset_serial_num[IPR_SERIAL_NUM_LEN];
+ u8 protection_level[8];
+}__attribute__((packed, aligned (4)));
+
+struct ipr_hostrcb_type_14_error {
+ struct ipr_ext_vpd ioa_vpd;
+ struct ipr_ext_vpd cfc_vpd;
+ __be32 exposed_mode_adn;
+ __be32 array_id;
+ struct ipr_res_addr last_func_vset_res_addr;
+ u8 vset_serial_num[IPR_SERIAL_NUM_LEN];
+ u8 protection_level[8];
+ __be32 num_entries;
+ struct ipr_hostrcb_array_data_entry_enhanced array_member[18];
+}__attribute__((packed, aligned (4)));
+
+struct ipr_hostrcb_type_24_error {
+ struct ipr_ext_vpd ioa_vpd;
+ struct ipr_ext_vpd cfc_vpd;
+ u8 reserved[2];
+ u8 exposed_mode_adn;
+#define IPR_INVALID_ARRAY_DEV_NUM 0xff
+ u8 array_id;
+ u8 last_res_path[8];
+ u8 protection_level[8];
+ struct ipr_ext_vpd64 array_vpd;
+ u8 description[16];
+ u8 reserved2[3];
+ u8 num_entries;
+ struct ipr_hostrcb64_array_data_entry array_member[32];
+}__attribute__((packed, aligned (4)));
+
+struct ipr_hostrcb_type_07_error {
+ u8 failure_reason[64];
+ struct ipr_vpd vpd;
+ u32 data[222];
+}__attribute__((packed, aligned (4)));
+
+struct ipr_hostrcb_type_17_error {
+ u8 failure_reason[64];
+ struct ipr_ext_vpd vpd;
+ u32 data[476];
+}__attribute__((packed, aligned (4)));
+
+struct ipr_hostrcb_config_element {
+ u8 type_status;
+#define IPR_PATH_CFG_TYPE_MASK 0xF0
+#define IPR_PATH_CFG_NOT_EXIST 0x00
+#define IPR_PATH_CFG_IOA_PORT 0x10
+#define IPR_PATH_CFG_EXP_PORT 0x20
+#define IPR_PATH_CFG_DEVICE_PORT 0x30
+#define IPR_PATH_CFG_DEVICE_LUN 0x40
+
+#define IPR_PATH_CFG_STATUS_MASK 0x0F
+#define IPR_PATH_CFG_NO_PROB 0x00
+#define IPR_PATH_CFG_DEGRADED 0x01
+#define IPR_PATH_CFG_FAILED 0x02
+#define IPR_PATH_CFG_SUSPECT 0x03
+#define IPR_PATH_NOT_DETECTED 0x04
+#define IPR_PATH_INCORRECT_CONN 0x05
+
+ u8 cascaded_expander;
+ u8 phy;
+ u8 link_rate;
+#define IPR_PHY_LINK_RATE_MASK 0x0F
+
+ __be32 wwid[2];
+}__attribute__((packed, aligned (4)));
+
+struct ipr_hostrcb64_config_element {
+ __be16 length;
+ u8 descriptor_id;
+#define IPR_DESCRIPTOR_MASK 0xC0
+#define IPR_DESCRIPTOR_SIS64 0x00
+
+ u8 reserved;
+ u8 type_status;
+
+ u8 reserved2[2];
+ u8 link_rate;
+
+ u8 res_path[8];
+ __be32 wwid[2];
+}__attribute__((packed, aligned (8)));
+
+struct ipr_hostrcb_fabric_desc {
+ __be16 length;
+ u8 ioa_port;
+ u8 cascaded_expander;
+ u8 phy;
+ u8 path_state;
+#define IPR_PATH_ACTIVE_MASK 0xC0
+#define IPR_PATH_NO_INFO 0x00
+#define IPR_PATH_ACTIVE 0x40
+#define IPR_PATH_NOT_ACTIVE 0x80
+
+#define IPR_PATH_STATE_MASK 0x0F
+#define IPR_PATH_STATE_NO_INFO 0x00
+#define IPR_PATH_HEALTHY 0x01
+#define IPR_PATH_DEGRADED 0x02
+#define IPR_PATH_FAILED 0x03
+
+ __be16 num_entries;
+ struct ipr_hostrcb_config_element elem[1];
+}__attribute__((packed, aligned (4)));
+
+struct ipr_hostrcb64_fabric_desc {
+ __be16 length;
+ u8 descriptor_id;
+
+ u8 reserved[2];
+ u8 path_state;
+
+ u8 reserved2[2];
+ u8 res_path[8];
+ u8 reserved3[6];
+ __be16 num_entries;
+ struct ipr_hostrcb64_config_element elem[1];
+}__attribute__((packed, aligned (8)));
+
+#define for_each_hrrq(hrrq, ioa_cfg) \
+ for (hrrq = (ioa_cfg)->hrrq; \
+ hrrq < ((ioa_cfg)->hrrq + (ioa_cfg)->hrrq_num); hrrq++)
+
+#define for_each_fabric_cfg(fabric, cfg) \
+ for (cfg = (fabric)->elem; \
+ cfg < ((fabric)->elem + be16_to_cpu((fabric)->num_entries)); \
+ cfg++)
+
+struct ipr_hostrcb_type_20_error {
+ u8 failure_reason[64];
+ u8 reserved[3];
+ u8 num_entries;
+ struct ipr_hostrcb_fabric_desc desc[1];
+}__attribute__((packed, aligned (4)));
+
+struct ipr_hostrcb_type_30_error {
+ u8 failure_reason[64];
+ u8 reserved[3];
+ u8 num_entries;
+ struct ipr_hostrcb64_fabric_desc desc[1];
+}__attribute__((packed, aligned (4)));
+
+struct ipr_hostrcb_error {
+ __be32 fd_ioasc;
+ struct ipr_res_addr fd_res_addr;
+ __be32 fd_res_handle;
+ __be32 prc;
+ union {
+ struct ipr_hostrcb_type_ff_error type_ff_error;
+ struct ipr_hostrcb_type_01_error type_01_error;
+ struct ipr_hostrcb_type_02_error type_02_error;
+ struct ipr_hostrcb_type_03_error type_03_error;
+ struct ipr_hostrcb_type_04_error type_04_error;
+ struct ipr_hostrcb_type_07_error type_07_error;
+ struct ipr_hostrcb_type_12_error type_12_error;
+ struct ipr_hostrcb_type_13_error type_13_error;
+ struct ipr_hostrcb_type_14_error type_14_error;
+ struct ipr_hostrcb_type_17_error type_17_error;
+ struct ipr_hostrcb_type_20_error type_20_error;
+ } u;
+}__attribute__((packed, aligned (4)));
+
+struct ipr_hostrcb64_error {
+ __be32 fd_ioasc;
+ __be32 ioa_fw_level;
+ __be32 fd_res_handle;
+ __be32 prc;
+ __be64 fd_dev_id;
+ __be64 fd_lun;
+ u8 fd_res_path[8];
+ __be64 time_stamp;
+ u8 reserved[16];
+ union {
+ struct ipr_hostrcb_type_ff_error type_ff_error;
+ struct ipr_hostrcb_type_12_error type_12_error;
+ struct ipr_hostrcb_type_17_error type_17_error;
+ struct ipr_hostrcb_type_21_error type_21_error;
+ struct ipr_hostrcb_type_23_error type_23_error;
+ struct ipr_hostrcb_type_24_error type_24_error;
+ struct ipr_hostrcb_type_30_error type_30_error;
+ } u;
+}__attribute__((packed, aligned (8)));
+
+struct ipr_hostrcb_raw {
+ __be32 data[sizeof(struct ipr_hostrcb_error)/sizeof(__be32)];
+}__attribute__((packed, aligned (4)));
+
+struct ipr_hcam {
+ u8 op_code;
+#define IPR_HOST_RCB_OP_CODE_CONFIG_CHANGE 0xE1
+#define IPR_HOST_RCB_OP_CODE_LOG_DATA 0xE2
+
+ u8 notify_type;
+#define IPR_HOST_RCB_NOTIF_TYPE_EXISTING_CHANGED 0x00
+#define IPR_HOST_RCB_NOTIF_TYPE_NEW_ENTRY 0x01
+#define IPR_HOST_RCB_NOTIF_TYPE_REM_ENTRY 0x02
+#define IPR_HOST_RCB_NOTIF_TYPE_ERROR_LOG_ENTRY 0x10
+#define IPR_HOST_RCB_NOTIF_TYPE_INFORMATION_ENTRY 0x11
+
+ u8 notifications_lost;
+#define IPR_HOST_RCB_NO_NOTIFICATIONS_LOST 0
+#define IPR_HOST_RCB_NOTIFICATIONS_LOST 0x80
+
+ u8 flags;
+#define IPR_HOSTRCB_INTERNAL_OPER 0x80
+#define IPR_HOSTRCB_ERR_RESP_SENT 0x40
+
+ u8 overlay_id;
+#define IPR_HOST_RCB_OVERLAY_ID_1 0x01
+#define IPR_HOST_RCB_OVERLAY_ID_2 0x02
+#define IPR_HOST_RCB_OVERLAY_ID_3 0x03
+#define IPR_HOST_RCB_OVERLAY_ID_4 0x04
+#define IPR_HOST_RCB_OVERLAY_ID_6 0x06
+#define IPR_HOST_RCB_OVERLAY_ID_7 0x07
+#define IPR_HOST_RCB_OVERLAY_ID_12 0x12
+#define IPR_HOST_RCB_OVERLAY_ID_13 0x13
+#define IPR_HOST_RCB_OVERLAY_ID_14 0x14
+#define IPR_HOST_RCB_OVERLAY_ID_16 0x16
+#define IPR_HOST_RCB_OVERLAY_ID_17 0x17
+#define IPR_HOST_RCB_OVERLAY_ID_20 0x20
+#define IPR_HOST_RCB_OVERLAY_ID_21 0x21
+#define IPR_HOST_RCB_OVERLAY_ID_23 0x23
+#define IPR_HOST_RCB_OVERLAY_ID_24 0x24
+#define IPR_HOST_RCB_OVERLAY_ID_26 0x26
+#define IPR_HOST_RCB_OVERLAY_ID_30 0x30
+#define IPR_HOST_RCB_OVERLAY_ID_DEFAULT 0xFF
+
+ u8 reserved1[3];
+ __be32 ilid;
+ __be32 time_since_last_ioa_reset;
+ __be32 reserved2;
+ __be32 length;
+
+ union {
+ struct ipr_hostrcb_error error;
+ struct ipr_hostrcb64_error error64;
+ struct ipr_hostrcb_cfg_ch_not ccn;
+ struct ipr_hostrcb_raw raw;
+ } u;
+}__attribute__((packed, aligned (4)));
+
+struct ipr_hostrcb {
+ struct ipr_hcam hcam;
+ dma_addr_t hostrcb_dma;
+ struct list_head queue;
+ struct ipr_ioa_cfg *ioa_cfg;
+ char rp_buffer[IPR_MAX_RES_PATH_LENGTH];
+};
+
+/* IPR smart dump table structures */
+struct ipr_sdt_entry {
+ __be32 start_token;
+ __be32 end_token;
+ u8 reserved[4];
+
+ u8 flags;
+#define IPR_SDT_ENDIAN 0x80
+#define IPR_SDT_VALID_ENTRY 0x20
+
+ u8 resv;
+ __be16 priority;
+}__attribute__((packed, aligned (4)));
+
+struct ipr_sdt_header {
+ __be32 state;
+ __be32 num_entries;
+ __be32 num_entries_used;
+ __be32 dump_size;
+}__attribute__((packed, aligned (4)));
+
+struct ipr_sdt {
+ struct ipr_sdt_header hdr;
+ struct ipr_sdt_entry entry[IPR_FMT3_NUM_SDT_ENTRIES];
+}__attribute__((packed, aligned (4)));
+
+struct ipr_uc_sdt {
+ struct ipr_sdt_header hdr;
+ struct ipr_sdt_entry entry[1];
+}__attribute__((packed, aligned (4)));
+
+/*
+ * Driver types
+ */
+struct ipr_bus_attributes {
+ u8 bus;
+ u8 qas_enabled;
+ u8 bus_width;
+ u8 reserved;
+ u32 max_xfer_rate;
+};
+
+struct ipr_sata_port {
+ struct ipr_ioa_cfg *ioa_cfg;
+ struct ata_port *ap;
+ struct ipr_resource_entry *res;
+ struct ipr_ioasa_gata ioasa;
+};
+
+struct ipr_resource_entry {
+ u8 needs_sync_complete:1;
+ u8 in_erp:1;
+ u8 add_to_ml:1;
+ u8 del_from_ml:1;
+ u8 resetting_device:1;
+ u8 reset_occurred:1;
+ u8 raw_mode:1;
+
+ u32 bus; /* AKA channel */
+ u32 target; /* AKA id */
+ u32 lun;
+#define IPR_ARRAY_VIRTUAL_BUS 0x1
+#define IPR_VSET_VIRTUAL_BUS 0x2
+#define IPR_IOAFP_VIRTUAL_BUS 0x3
+
+#define IPR_GET_RES_PHYS_LOC(res) \
+ (((res)->bus << 24) | ((res)->target << 8) | (res)->lun)
+
+ u8 ata_class;
+
+ u8 flags;
+ __be16 res_flags;
+
+ u8 type;
+
+ u8 qmodel;
+ struct ipr_std_inq_data std_inq_data;
+
+ __be32 res_handle;
+ __be64 dev_id;
+ __be64 lun_wwn;
+ struct scsi_lun dev_lun;
+ u8 res_path[8];
+
+ struct ipr_ioa_cfg *ioa_cfg;
+ struct scsi_device *sdev;
+ struct ipr_sata_port *sata_port;
+ struct list_head queue;
+}; /* struct ipr_resource_entry */
+
+struct ipr_resource_hdr {
+ u16 num_entries;
+ u16 reserved;
+};
+
+struct ipr_misc_cbs {
+ struct ipr_ioa_vpd ioa_vpd;
+ struct ipr_inquiry_page0 page0_data;
+ struct ipr_inquiry_page3 page3_data;
+ struct ipr_inquiry_cap cap;
+ struct ipr_mode_pages mode_pages;
+ struct ipr_supported_device supp_dev;
+};
+
+struct ipr_interrupt_offsets {
+ unsigned long set_interrupt_mask_reg;
+ unsigned long clr_interrupt_mask_reg;
+ unsigned long clr_interrupt_mask_reg32;
+ unsigned long sense_interrupt_mask_reg;
+ unsigned long sense_interrupt_mask_reg32;
+ unsigned long clr_interrupt_reg;
+ unsigned long clr_interrupt_reg32;
+
+ unsigned long sense_interrupt_reg;
+ unsigned long sense_interrupt_reg32;
+ unsigned long ioarrin_reg;
+ unsigned long sense_uproc_interrupt_reg;
+ unsigned long sense_uproc_interrupt_reg32;
+ unsigned long set_uproc_interrupt_reg;
+ unsigned long set_uproc_interrupt_reg32;
+ unsigned long clr_uproc_interrupt_reg;
+ unsigned long clr_uproc_interrupt_reg32;
+
+ unsigned long init_feedback_reg;
+
+ unsigned long dump_addr_reg;
+ unsigned long dump_data_reg;
+
+#define IPR_ENDIAN_SWAP_KEY 0x00080800
+ unsigned long endian_swap_reg;
+};
+
+struct ipr_interrupts {
+ void __iomem *set_interrupt_mask_reg;
+ void __iomem *clr_interrupt_mask_reg;
+ void __iomem *clr_interrupt_mask_reg32;
+ void __iomem *sense_interrupt_mask_reg;
+ void __iomem *sense_interrupt_mask_reg32;
+ void __iomem *clr_interrupt_reg;
+ void __iomem *clr_interrupt_reg32;
+
+ void __iomem *sense_interrupt_reg;
+ void __iomem *sense_interrupt_reg32;
+ void __iomem *ioarrin_reg;
+ void __iomem *sense_uproc_interrupt_reg;
+ void __iomem *sense_uproc_interrupt_reg32;
+ void __iomem *set_uproc_interrupt_reg;
+ void __iomem *set_uproc_interrupt_reg32;
+ void __iomem *clr_uproc_interrupt_reg;
+ void __iomem *clr_uproc_interrupt_reg32;
+
+ void __iomem *init_feedback_reg;
+
+ void __iomem *dump_addr_reg;
+ void __iomem *dump_data_reg;
+
+ void __iomem *endian_swap_reg;
+};
+
+struct ipr_chip_cfg_t {
+ u32 mailbox;
+ u16 max_cmds;
+ u8 cache_line_size;
+ u8 clear_isr;
+ u32 iopoll_weight;
+ struct ipr_interrupt_offsets regs;
+};
+
+struct ipr_chip_t {
+ u16 vendor;
+ u16 device;
+ u16 intr_type;
+#define IPR_USE_LSI 0x00
+#define IPR_USE_MSI 0x01
+#define IPR_USE_MSIX 0x02
+ u16 sis_type;
+#define IPR_SIS32 0x00
+#define IPR_SIS64 0x01
+ u16 bist_method;
+#define IPR_PCI_CFG 0x00
+#define IPR_MMIO 0x01
+ const struct ipr_chip_cfg_t *cfg;
+};
+
+enum ipr_shutdown_type {
+ IPR_SHUTDOWN_NORMAL = 0x00,
+ IPR_SHUTDOWN_PREPARE_FOR_NORMAL = 0x40,
+ IPR_SHUTDOWN_ABBREV = 0x80,
+ IPR_SHUTDOWN_NONE = 0x100,
+ IPR_SHUTDOWN_QUIESCE = 0x101,
+};
+
+struct ipr_trace_entry {
+ u32 time;
+
+ u8 op_code;
+ u8 ata_op_code;
+ u8 type;
+#define IPR_TRACE_START 0x00
+#define IPR_TRACE_FINISH 0xff
+ u8 cmd_index;
+
+ __be32 res_handle;
+ union {
+ u32 ioasc;
+ u32 add_data;
+ u32 res_addr;
+ } u;
+};
+
+struct ipr_sglist {
+ u32 order;
+ u32 num_sg;
+ u32 num_dma_sg;
+ u32 buffer_len;
+ struct scatterlist scatterlist[1];
+};
+
+enum ipr_sdt_state {
+ INACTIVE,
+ WAIT_FOR_DUMP,
+ GET_DUMP,
+ READ_DUMP,
+ ABORT_DUMP,
+ DUMP_OBTAINED
+};
+
+/* Per-controller data */
+struct ipr_ioa_cfg {
+ char eye_catcher[8];
+#define IPR_EYECATCHER "iprcfg"
+
+ struct list_head queue;
+
+ u8 in_reset_reload:1;
+ u8 in_ioa_bringdown:1;
+ u8 ioa_unit_checked:1;
+ u8 dump_taken:1;
+ u8 scan_done:1;
+ u8 needs_hard_reset:1;
+ u8 dual_raid:1;
+ u8 needs_warm_reset:1;
+ u8 msi_received:1;
+ u8 sis64:1;
+ u8 dump_timeout:1;
+ u8 cfg_locked:1;
+ u8 clear_isr:1;
+ u8 probe_done:1;
+
+ u8 revid;
+
+ /*
+ * Bitmaps for SIS64 generated target values
+ */
+ unsigned long target_ids[BITS_TO_LONGS(IPR_MAX_SIS64_DEVS)];
+ unsigned long array_ids[BITS_TO_LONGS(IPR_MAX_SIS64_DEVS)];
+ unsigned long vset_ids[BITS_TO_LONGS(IPR_MAX_SIS64_DEVS)];
+
+ u16 type; /* CCIN of the card */
+
+ u8 log_level;
+#define IPR_MAX_LOG_LEVEL 4
+#define IPR_DEFAULT_LOG_LEVEL 2
+
+#define IPR_NUM_TRACE_INDEX_BITS 8
+#define IPR_NUM_TRACE_ENTRIES (1 << IPR_NUM_TRACE_INDEX_BITS)
+#define IPR_TRACE_SIZE (sizeof(struct ipr_trace_entry) * IPR_NUM_TRACE_ENTRIES)
+ char trace_start[8];
+#define IPR_TRACE_START_LABEL "trace"
+ struct ipr_trace_entry *trace;
+ atomic_t trace_index;
+
+ char cfg_table_start[8];
+#define IPR_CFG_TBL_START "cfg"
+ union {
+ struct ipr_config_table *cfg_table;
+ struct ipr_config_table64 *cfg_table64;
+ } u;
+ dma_addr_t cfg_table_dma;
+ u32 cfg_table_size;
+ u32 max_devs_supported;
+
+ char resource_table_label[8];
+#define IPR_RES_TABLE_LABEL "res_tbl"
+ struct ipr_resource_entry *res_entries;
+ struct list_head free_res_q;
+ struct list_head used_res_q;
+
+ char ipr_hcam_label[8];
+#define IPR_HCAM_LABEL "hcams"
+ struct ipr_hostrcb *hostrcb[IPR_NUM_HCAMS];
+ dma_addr_t hostrcb_dma[IPR_NUM_HCAMS];
+ struct list_head hostrcb_free_q;
+ struct list_head hostrcb_pending_q;
+
+ struct ipr_hrr_queue hrrq[IPR_MAX_HRRQ_NUM];
+ u32 hrrq_num;
+ atomic_t hrrq_index;
+ u16 identify_hrrq_index;
+
+ struct ipr_bus_attributes bus_attr[IPR_MAX_NUM_BUSES];
+
+ unsigned int transop_timeout;
+ const struct ipr_chip_cfg_t *chip_cfg;
+ const struct ipr_chip_t *ipr_chip;
+
+ void __iomem *hdw_dma_regs; /* iomapped PCI memory space */
+ unsigned long hdw_dma_regs_pci; /* raw PCI memory space */
+ void __iomem *ioa_mailbox;
+ struct ipr_interrupts regs;
+
+ u16 saved_pcix_cmd_reg;
+ u16 reset_retries;
+
+ u32 errors_logged;
+ u32 doorbell;
+
+ struct Scsi_Host *host;
+ struct pci_dev *pdev;
+ struct ipr_sglist *ucode_sglist;
+ u8 saved_mode_page_len;
+
+ struct work_struct work_q;
+ struct workqueue_struct *reset_work_q;
+
+ wait_queue_head_t reset_wait_q;
+ wait_queue_head_t msi_wait_q;
+ wait_queue_head_t eeh_wait_q;
+
+ struct ipr_dump *dump;
+ enum ipr_sdt_state sdt_state;
+
+ struct ipr_misc_cbs *vpd_cbs;
+ dma_addr_t vpd_cbs_dma;
+
+ struct dma_pool *ipr_cmd_pool;
+
+ struct ipr_cmnd *reset_cmd;
+ int (*reset) (struct ipr_cmnd *);
+
+ struct ata_host ata_host;
+ char ipr_cmd_label[8];
+#define IPR_CMD_LABEL "ipr_cmd"
+ u32 max_cmds;
+ struct ipr_cmnd **ipr_cmnd_list;
+ dma_addr_t *ipr_cmnd_list_dma;
+
+ u16 intr_flag;
+ unsigned int nvectors;
+
+ struct {
+ unsigned short vec;
+ char desc[22];
+ } vectors_info[IPR_MAX_MSIX_VECTORS];
+
+ u32 iopoll_weight;
+
+}; /* struct ipr_ioa_cfg */
+
+struct ipr_cmnd {
+ struct ipr_ioarcb ioarcb;
+ union {
+ struct ipr_ioadl_desc ioadl[IPR_NUM_IOADL_ENTRIES];
+ struct ipr_ioadl64_desc ioadl64[IPR_NUM_IOADL_ENTRIES];
+ struct ipr_ata64_ioadl ata_ioadl;
+ } i;
+ union {
+ struct ipr_ioasa ioasa;
+ struct ipr_ioasa64 ioasa64;
+ } s;
+ struct list_head queue;
+ struct scsi_cmnd *scsi_cmd;
+ struct ata_queued_cmd *qc;
+ struct completion completion;
+ struct timer_list timer;
+ struct work_struct work;
+ void (*fast_done) (struct ipr_cmnd *);
+ void (*done) (struct ipr_cmnd *);
+ int (*job_step) (struct ipr_cmnd *);
+ int (*job_step_failed) (struct ipr_cmnd *);
+ u16 cmd_index;
+ u8 sense_buffer[SCSI_SENSE_BUFFERSIZE];
+ dma_addr_t sense_buffer_dma;
+ unsigned short dma_use_sg;
+ dma_addr_t dma_addr;
+ struct ipr_cmnd *sibling;
+ union {
+ enum ipr_shutdown_type shutdown_type;
+ struct ipr_hostrcb *hostrcb;
+ unsigned long time_left;
+ unsigned long scratch;
+ struct ipr_resource_entry *res;
+ struct scsi_device *sdev;
+ } u;
+
+ struct completion *eh_comp;
+ struct ipr_hrr_queue *hrrq;
+ struct ipr_ioa_cfg *ioa_cfg;
+};
+
+struct ipr_ses_table_entry {
+ char product_id[17];
+ char compare_product_id_byte[17];
+ u32 max_bus_speed_limit; /* MB/sec limit for this backplane */
+};
+
+struct ipr_dump_header {
+ u32 eye_catcher;
+#define IPR_DUMP_EYE_CATCHER 0xC5D4E3F2
+ u32 len;
+ u32 num_entries;
+ u32 first_entry_offset;
+ u32 status;
+#define IPR_DUMP_STATUS_SUCCESS 0
+#define IPR_DUMP_STATUS_QUAL_SUCCESS 2
+#define IPR_DUMP_STATUS_FAILED 0xffffffff
+ u32 os;
+#define IPR_DUMP_OS_LINUX 0x4C4E5558
+ u32 driver_name;
+#define IPR_DUMP_DRIVER_NAME 0x49505232
+}__attribute__((packed, aligned (4)));
+
+struct ipr_dump_entry_header {
+ u32 eye_catcher;
+#define IPR_DUMP_EYE_CATCHER 0xC5D4E3F2
+ u32 len;
+ u32 num_elems;
+ u32 offset;
+ u32 data_type;
+#define IPR_DUMP_DATA_TYPE_ASCII 0x41534349
+#define IPR_DUMP_DATA_TYPE_BINARY 0x42494E41
+ u32 id;
+#define IPR_DUMP_IOA_DUMP_ID 0x494F4131
+#define IPR_DUMP_LOCATION_ID 0x4C4F4341
+#define IPR_DUMP_TRACE_ID 0x54524143
+#define IPR_DUMP_DRIVER_VERSION_ID 0x44525652
+#define IPR_DUMP_DRIVER_TYPE_ID 0x54595045
+#define IPR_DUMP_IOA_CTRL_BLK 0x494F4342
+#define IPR_DUMP_PEND_OPS 0x414F5053
+ u32 status;
+}__attribute__((packed, aligned (4)));
+
+struct ipr_dump_location_entry {
+ struct ipr_dump_entry_header hdr;
+ u8 location[20];
+}__attribute__((packed));
+
+struct ipr_dump_trace_entry {
+ struct ipr_dump_entry_header hdr;
+ u32 trace[IPR_TRACE_SIZE / sizeof(u32)];
+}__attribute__((packed, aligned (4)));
+
+struct ipr_dump_version_entry {
+ struct ipr_dump_entry_header hdr;
+ u8 version[sizeof(IPR_DRIVER_VERSION)];
+};
+
+struct ipr_dump_ioa_type_entry {
+ struct ipr_dump_entry_header hdr;
+ u32 type;
+ u32 fw_version;
+};
+
+struct ipr_driver_dump {
+ struct ipr_dump_header hdr;
+ struct ipr_dump_version_entry version_entry;
+ struct ipr_dump_location_entry location_entry;
+ struct ipr_dump_ioa_type_entry ioa_type_entry;
+ struct ipr_dump_trace_entry trace_entry;
+}__attribute__((packed));
+
+struct ipr_ioa_dump {
+ struct ipr_dump_entry_header hdr;
+ struct ipr_sdt sdt;
+ __be32 **ioa_data;
+ u32 reserved;
+ u32 next_page_index;
+ u32 page_offset;
+ u32 format;
+}__attribute__((packed, aligned (4)));
+
+struct ipr_dump {
+ struct kref kref;
+ struct ipr_ioa_cfg *ioa_cfg;
+ struct ipr_driver_dump driver_dump;
+ struct ipr_ioa_dump ioa_dump;
+};
+
+struct ipr_error_table_t {
+ u32 ioasc;
+ int log_ioasa;
+ int log_hcam;
+ char *error;
+};
+
+struct ipr_software_inq_lid_info {
+ __be32 load_id;
+ __be32 timestamp[3];
+}__attribute__((packed, aligned (4)));
+
+struct ipr_ucode_image_header {
+ __be32 header_length;
+ __be32 lid_table_offset;
+ u8 major_release;
+ u8 card_type;
+ u8 minor_release[2];
+ u8 reserved[20];
+ char eyecatcher[16];
+ __be32 num_lids;
+ struct ipr_software_inq_lid_info lid[1];
+}__attribute__((packed, aligned (4)));
+
+/*
+ * Macros
+ */
+#define IPR_DBG_CMD(CMD) if (ipr_debug) { CMD; }
+
+#ifdef CONFIG_SCSI_IPR_TRACE
+#define ipr_create_trace_file(kobj, attr) sysfs_create_bin_file(kobj, attr)
+#define ipr_remove_trace_file(kobj, attr) sysfs_remove_bin_file(kobj, attr)
+#else
+#define ipr_create_trace_file(kobj, attr) 0
+#define ipr_remove_trace_file(kobj, attr) do { } while(0)
+#endif
+
+#ifdef CONFIG_SCSI_IPR_DUMP
+#define ipr_create_dump_file(kobj, attr) sysfs_create_bin_file(kobj, attr)
+#define ipr_remove_dump_file(kobj, attr) sysfs_remove_bin_file(kobj, attr)
+#else
+#define ipr_create_dump_file(kobj, attr) 0
+#define ipr_remove_dump_file(kobj, attr) do { } while(0)
+#endif
+
+/*
+ * Error logging macros
+ */
+#define ipr_err(...) printk(KERN_ERR IPR_NAME ": "__VA_ARGS__)
+#define ipr_info(...) printk(KERN_INFO IPR_NAME ": "__VA_ARGS__)
+#define ipr_dbg(...) IPR_DBG_CMD(printk(KERN_INFO IPR_NAME ": "__VA_ARGS__))
+
+#define ipr_res_printk(level, ioa_cfg, bus, target, lun, fmt, ...) \
+ printk(level IPR_NAME ": %d:%d:%d:%d: " fmt, (ioa_cfg)->host->host_no, \
+ bus, target, lun, ##__VA_ARGS__)
+
+#define ipr_res_err(ioa_cfg, res, fmt, ...) \
+ ipr_res_printk(KERN_ERR, ioa_cfg, (res)->bus, (res)->target, (res)->lun, fmt, ##__VA_ARGS__)
+
+#define ipr_ra_printk(level, ioa_cfg, ra, fmt, ...) \
+ printk(level IPR_NAME ": %d:%d:%d:%d: " fmt, (ioa_cfg)->host->host_no, \
+ (ra).bus, (ra).target, (ra).lun, ##__VA_ARGS__)
+
+#define ipr_ra_err(ioa_cfg, ra, fmt, ...) \
+ ipr_ra_printk(KERN_ERR, ioa_cfg, ra, fmt, ##__VA_ARGS__)
+
+#define ipr_phys_res_err(ioa_cfg, res, fmt, ...) \
+{ \
+ if ((res).bus >= IPR_MAX_NUM_BUSES) { \
+ ipr_err(fmt": unknown\n", ##__VA_ARGS__); \
+ } else { \
+ ipr_err(fmt": %d:%d:%d:%d\n", \
+ ##__VA_ARGS__, (ioa_cfg)->host->host_no, \
+ (res).bus, (res).target, (res).lun); \
+ } \
+}
+
+#define ipr_hcam_err(hostrcb, fmt, ...) \
+{ \
+ if (ipr_is_device(hostrcb)) { \
+ if ((hostrcb)->ioa_cfg->sis64) { \
+ printk(KERN_ERR IPR_NAME ": %s: " fmt, \
+ ipr_format_res_path(hostrcb->ioa_cfg, \
+ hostrcb->hcam.u.error64.fd_res_path, \
+ hostrcb->rp_buffer, \
+ sizeof(hostrcb->rp_buffer)), \
+ __VA_ARGS__); \
+ } else { \
+ ipr_ra_err((hostrcb)->ioa_cfg, \
+ (hostrcb)->hcam.u.error.fd_res_addr, \
+ fmt, __VA_ARGS__); \
+ } \
+ } else { \
+ dev_err(&(hostrcb)->ioa_cfg->pdev->dev, fmt, __VA_ARGS__); \
+ } \
+}
+
+#define ipr_trace ipr_dbg("%s: %s: Line: %d\n",\
+ __FILE__, __func__, __LINE__)
+
+#define ENTER IPR_DBG_CMD(printk(KERN_INFO IPR_NAME": Entering %s\n", __func__))
+#define LEAVE IPR_DBG_CMD(printk(KERN_INFO IPR_NAME": Leaving %s\n", __func__))
+
+#define ipr_err_separator \
+ipr_err("----------------------------------------------------------\n")
+
+
+/*
+ * Inlines
+ */
+
+/**
+ * ipr_is_ioa_resource - Determine if a resource is the IOA
+ * @res: resource entry struct
+ *
+ * Return value:
+ * 1 if IOA / 0 if not IOA
+ **/
+static inline int ipr_is_ioa_resource(struct ipr_resource_entry *res)
+{
+ return res->type == IPR_RES_TYPE_IOAFP;
+}
+
+/**
+ * ipr_is_af_dasd_device - Determine if a resource is an AF DASD
+ * @res: resource entry struct
+ *
+ * Return value:
+ * 1 if AF DASD / 0 if not AF DASD
+ **/
+static inline int ipr_is_af_dasd_device(struct ipr_resource_entry *res)
+{
+ return res->type == IPR_RES_TYPE_AF_DASD ||
+ res->type == IPR_RES_TYPE_REMOTE_AF_DASD;
+}
+
+/**
+ * ipr_is_vset_device - Determine if a resource is a VSET
+ * @res: resource entry struct
+ *
+ * Return value:
+ * 1 if VSET / 0 if not VSET
+ **/
+static inline int ipr_is_vset_device(struct ipr_resource_entry *res)
+{
+ return res->type == IPR_RES_TYPE_VOLUME_SET;
+}
+
+/**
+ * ipr_is_gscsi - Determine if a resource is a generic scsi resource
+ * @res: resource entry struct
+ *
+ * Return value:
+ * 1 if GSCSI / 0 if not GSCSI
+ **/
+static inline int ipr_is_gscsi(struct ipr_resource_entry *res)
+{
+ return res->type == IPR_RES_TYPE_GENERIC_SCSI;
+}
+
+/**
+ * ipr_is_scsi_disk - Determine if a resource is a SCSI disk
+ * @res: resource entry struct
+ *
+ * Return value:
+ * 1 if SCSI disk / 0 if not SCSI disk
+ **/
+static inline int ipr_is_scsi_disk(struct ipr_resource_entry *res)
+{
+ if (ipr_is_af_dasd_device(res) ||
+ (ipr_is_gscsi(res) && IPR_IS_DASD_DEVICE(res->std_inq_data)))
+ return 1;
+ else
+ return 0;
+}
+
+/**
+ * ipr_is_gata - Determine if a resource is a generic ATA resource
+ * @res: resource entry struct
+ *
+ * Return value:
+ * 1 if GATA / 0 if not GATA
+ **/
+static inline int ipr_is_gata(struct ipr_resource_entry *res)
+{
+ return res->type == IPR_RES_TYPE_GENERIC_ATA;
+}
+
+/**
+ * ipr_is_naca_model - Determine if a resource is using NACA queueing model
+ * @res: resource entry struct
+ *
+ * Return value:
+ * 1 if NACA queueing model / 0 if not NACA queueing model
+ **/
+static inline int ipr_is_naca_model(struct ipr_resource_entry *res)
+{
+ if (ipr_is_gscsi(res) && res->qmodel == IPR_QUEUE_NACA_MODEL)
+ return 1;
+ return 0;
+}
+
+/**
+ * ipr_is_device - Determine if the hostrcb structure is related to a device
+ * @hostrcb: host resource control blocks struct
+ *
+ * Return value:
+ * 1 if AF / 0 if not AF
+ **/
+static inline int ipr_is_device(struct ipr_hostrcb *hostrcb)
+{
+ struct ipr_res_addr *res_addr;
+ u8 *res_path;
+
+ if (hostrcb->ioa_cfg->sis64) {
+ res_path = &hostrcb->hcam.u.error64.fd_res_path[0];
+ if ((res_path[0] == 0x00 || res_path[0] == 0x80 ||
+ res_path[0] == 0x81) && res_path[2] != 0xFF)
+ return 1;
+ } else {
+ res_addr = &hostrcb->hcam.u.error.fd_res_addr;
+
+ if ((res_addr->bus < IPR_MAX_NUM_BUSES) &&
+ (res_addr->target < (IPR_MAX_NUM_TARGETS_PER_BUS - 1)))
+ return 1;
+ }
+ return 0;
+}
+
+/**
+ * ipr_sdt_is_fmt2 - Determine if a SDT address is in format 2
+ * @sdt_word: SDT address
+ *
+ * Return value:
+ * 1 if format 2 / 0 if not
+ **/
+static inline int ipr_sdt_is_fmt2(u32 sdt_word)
+{
+ u32 bar_sel = IPR_GET_FMT2_BAR_SEL(sdt_word);
+
+ switch (bar_sel) {
+ case IPR_SDT_FMT2_BAR0_SEL:
+ case IPR_SDT_FMT2_BAR1_SEL:
+ case IPR_SDT_FMT2_BAR2_SEL:
+ case IPR_SDT_FMT2_BAR3_SEL:
+ case IPR_SDT_FMT2_BAR4_SEL:
+ case IPR_SDT_FMT2_BAR5_SEL:
+ case IPR_SDT_FMT2_EXP_ROM_SEL:
+ return 1;
+ };
+
+ return 0;
+}
+
+#ifndef writeq
+static inline void writeq(u64 val, void __iomem *addr)
+{
+ writel(((u32) (val >> 32)), addr);
+ writel(((u32) (val)), (addr + 4));
+}
+#endif
+
+#endif /* _IPR_H */
diff --git a/drivers/scsi/ips.c b/drivers/scsi/ips.c
new file mode 100644
index 000000000..7542f11d3
--- /dev/null
+++ b/drivers/scsi/ips.c
@@ -0,0 +1,7175 @@
+/*****************************************************************************/
+/* ips.c -- driver for the Adaptec / IBM ServeRAID controller */
+/* */
+/* Written By: Keith Mitchell, IBM Corporation */
+/* Jack Hammer, Adaptec, Inc. */
+/* David Jeffery, Adaptec, Inc. */
+/* */
+/* Copyright (C) 2000 IBM Corporation */
+/* Copyright (C) 2002,2003 Adaptec, Inc. */
+/* */
+/* This program is free software; you can redistribute it and/or modify */
+/* it under the terms of the GNU General Public License as published by */
+/* the Free Software Foundation; either version 2 of the License, or */
+/* (at your option) any later version. */
+/* */
+/* This program is distributed in the hope that it will be useful, */
+/* but WITHOUT ANY WARRANTY; without even the implied warranty of */
+/* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the */
+/* GNU General Public License for more details. */
+/* */
+/* NO WARRANTY */
+/* THE PROGRAM IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OR */
+/* CONDITIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED INCLUDING, WITHOUT */
+/* LIMITATION, ANY WARRANTIES OR CONDITIONS OF TITLE, NON-INFRINGEMENT, */
+/* MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE. Each Recipient is */
+/* solely responsible for determining the appropriateness of using and */
+/* distributing the Program and assumes all risks associated with its */
+/* exercise of rights under this Agreement, including but not limited to */
+/* the risks and costs of program errors, damage to or loss of data, */
+/* programs or equipment, and unavailability or interruption of operations. */
+/* */
+/* DISCLAIMER OF LIABILITY */
+/* NEITHER RECIPIENT NOR ANY CONTRIBUTORS SHALL HAVE ANY LIABILITY FOR ANY */
+/* DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL */
+/* DAMAGES (INCLUDING WITHOUT LIMITATION LOST PROFITS), HOWEVER CAUSED AND */
+/* ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR */
+/* TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE */
+/* USE OR DISTRIBUTION OF THE PROGRAM OR THE EXERCISE OF ANY RIGHTS GRANTED */
+/* HEREUNDER, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGES */
+/* */
+/* You should have received a copy of the GNU General Public License */
+/* along with this program; if not, write to the Free Software */
+/* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */
+/* */
+/* Bugs/Comments/Suggestions about this driver should be mailed to: */
+/* ipslinux@adaptec.com */
+/* */
+/* For system support issues, contact your local IBM Customer support. */
+/* Directions to find IBM Customer Support for each country can be found at: */
+/* http://www.ibm.com/planetwide/ */
+/* */
+/*****************************************************************************/
+
+/*****************************************************************************/
+/* Change Log */
+/* */
+/* 0.99.02 - Breakup commands that are bigger than 8 * the stripe size */
+/* 0.99.03 - Make interrupt routine handle all completed request on the */
+/* adapter not just the first one */
+/* - Make sure passthru commands get woken up if we run out of */
+/* SCBs */
+/* - Send all of the commands on the queue at once rather than */
+/* one at a time since the card will support it. */
+/* 0.99.04 - Fix race condition in the passthru mechanism -- this required */
+/* the interface to the utilities to change */
+/* - Fix error recovery code */
+/* 0.99.05 - Fix an oops when we get certain passthru commands */
+/* 1.00.00 - Initial Public Release */
+/* Functionally equivalent to 0.99.05 */
+/* 3.60.00 - Bump max commands to 128 for use with firmware 3.60 */
+/* - Change version to 3.60 to coincide with release numbering. */
+/* 3.60.01 - Remove bogus error check in passthru routine */
+/* 3.60.02 - Make DCDB direction based on lookup table */
+/* - Only allow one DCDB command to a SCSI ID at a time */
+/* 4.00.00 - Add support for ServeRAID 4 */
+/* 4.00.01 - Add support for First Failure Data Capture */
+/* 4.00.02 - Fix problem with PT DCDB with no buffer */
+/* 4.00.03 - Add alternative passthru interface */
+/* - Add ability to flash BIOS */
+/* 4.00.04 - Rename structures/constants to be prefixed with IPS_ */
+/* 4.00.05 - Remove wish_block from init routine */
+/* - Use linux/spinlock.h instead of asm/spinlock.h for kernels */
+/* 2.3.18 and later */
+/* - Sync with other changes from the 2.3 kernels */
+/* 4.00.06 - Fix timeout with initial FFDC command */
+/* 4.00.06a - Port to 2.4 (trivial) -- Christoph Hellwig <hch@infradead.org> */
+/* 4.10.00 - Add support for ServeRAID 4M/4L */
+/* 4.10.13 - Fix for dynamic unload and proc file system */
+/* 4.20.03 - Rename version to coincide with new release schedules */
+/* Performance fixes */
+/* Fix truncation of /proc files with cat */
+/* Merge in changes through kernel 2.4.0test1ac21 */
+/* 4.20.13 - Fix some failure cases / reset code */
+/* - Hook into the reboot_notifier to flush the controller cache */
+/* 4.50.01 - Fix problem when there is a hole in logical drive numbering */
+/* 4.70.09 - Use a Common ( Large Buffer ) for Flashing from the JCRM CD */
+/* - Add IPSSEND Flash Support */
+/* - Set Sense Data for Unknown SCSI Command */
+/* - Use Slot Number from NVRAM Page 5 */
+/* - Restore caller's DCDB Structure */
+/* 4.70.12 - Corrective actions for bad controller ( during initialization )*/
+/* 4.70.13 - Don't Send CDB's if we already know the device is not present */
+/* - Don't release HA Lock in ips_next() until SC taken off queue */
+/* - Unregister SCSI device in ips_release() */
+/* 4.70.15 - Fix Breakup for very large ( non-SG ) requests in ips_done() */
+/* 4.71.00 - Change all memory allocations to not use GFP_DMA flag */
+/* Code Clean-Up for 2.4.x kernel */
+/* 4.72.00 - Allow for a Scatter-Gather Element to exceed MAX_XFER Size */
+/* 4.72.01 - I/O Mapped Memory release ( so "insmod ips" does not Fail ) */
+/* - Don't Issue Internal FFDC Command if there are Active Commands */
+/* - Close Window for getting too many IOCTL's active */
+/* 4.80.00 - Make ia64 Safe */
+/* 4.80.04 - Eliminate calls to strtok() if 2.4.x or greater */
+/* - Adjustments to Device Queue Depth */
+/* 4.80.14 - Take all semaphores off stack */
+/* - Clean Up New_IOCTL path */
+/* 4.80.20 - Set max_sectors in Scsi_Host structure ( if >= 2.4.7 kernel ) */
+/* - 5 second delay needed after resetting an i960 adapter */
+/* 4.80.26 - Clean up potential code problems ( Arjan's recommendations ) */
+/* 4.90.01 - Version Matching for FirmWare, BIOS, and Driver */
+/* 4.90.05 - Use New PCI Architecture to facilitate Hot Plug Development */
+/* 4.90.08 - Increase Delays in Flashing ( Trombone Only - 4H ) */
+/* 4.90.08 - Data Corruption if First Scatter Gather Element is > 64K */
+/* 4.90.11 - Don't actually RESET unless it's physically required */
+/* - Remove unused compile options */
+/* 5.00.01 - Sarasota ( 5i ) adapters must always be scanned first */
+/* - Get rid on IOCTL_NEW_COMMAND code */
+/* - Add Extended DCDB Commands for Tape Support in 5I */
+/* 5.10.12 - use pci_dma interfaces, update for 2.5 kernel changes */
+/* 5.10.15 - remove unused code (sem, macros, etc.) */
+/* 5.30.00 - use __devexit_p() */
+/* 6.00.00 - Add 6x Adapters and Battery Flash */
+/* 6.10.00 - Remove 1G Addressing Limitations */
+/* 6.11.xx - Get VersionInfo buffer off the stack ! DDTS 60401 */
+/* 6.11.xx - Make Logical Drive Info structure safe for DMA DDTS 60639 */
+/* 7.10.18 - Add highmem_io flag in SCSI Templete for 2.4 kernels */
+/* - Fix path/name for scsi_hosts.h include for 2.6 kernels */
+/* - Fix sort order of 7k */
+/* - Remove 3 unused "inline" functions */
+/* 7.12.xx - Use STATIC functions wherever possible */
+/* - Clean up deprecated MODULE_PARM calls */
+/* 7.12.05 - Remove Version Matching per IBM request */
+/*****************************************************************************/
+
+/*
+ * Conditional Compilation directives for this driver:
+ *
+ * IPS_DEBUG - Turn on debugging info
+ *
+ * Parameters:
+ *
+ * debug:<number> - Set debug level to <number>
+ * NOTE: only works when IPS_DEBUG compile directive is used.
+ * 1 - Normal debug messages
+ * 2 - Verbose debug messages
+ * 11 - Method trace (non interrupt)
+ * 12 - Method trace (includes interrupt)
+ *
+ * noi2o - Don't use I2O Queues (ServeRAID 4 only)
+ * nommap - Don't use memory mapped I/O
+ * ioctlsize - Initial size of the IOCTL buffer
+ */
+
+#include <asm/io.h>
+#include <asm/byteorder.h>
+#include <asm/page.h>
+#include <linux/stddef.h>
+#include <linux/string.h>
+#include <linux/errno.h>
+#include <linux/kernel.h>
+#include <linux/ioport.h>
+#include <linux/slab.h>
+#include <linux/delay.h>
+#include <linux/pci.h>
+#include <linux/proc_fs.h>
+#include <linux/reboot.h>
+#include <linux/interrupt.h>
+
+#include <linux/blkdev.h>
+#include <linux/types.h>
+#include <linux/dma-mapping.h>
+
+#include <scsi/sg.h>
+#include "scsi.h"
+#include <scsi/scsi_host.h>
+
+#include "ips.h"
+
+#include <linux/module.h>
+
+#include <linux/stat.h>
+
+#include <linux/spinlock.h>
+#include <linux/init.h>
+
+#include <linux/smp.h>
+
+#ifdef MODULE
+static char *ips = NULL;
+module_param(ips, charp, 0);
+#endif
+
+/*
+ * DRIVER_VER
+ */
+#define IPS_VERSION_HIGH IPS_VER_MAJOR_STRING "." IPS_VER_MINOR_STRING
+#define IPS_VERSION_LOW "." IPS_VER_BUILD_STRING " "
+
+#if !defined(__i386__) && !defined(__ia64__) && !defined(__x86_64__)
+#warning "This driver has only been tested on the x86/ia64/x86_64 platforms"
+#endif
+
+#define IPS_DMA_DIR(scb) ((!scb->scsi_cmd || ips_is_passthru(scb->scsi_cmd) || \
+ DMA_NONE == scb->scsi_cmd->sc_data_direction) ? \
+ PCI_DMA_BIDIRECTIONAL : \
+ scb->scsi_cmd->sc_data_direction)
+
+#ifdef IPS_DEBUG
+#define METHOD_TRACE(s, i) if (ips_debug >= (i+10)) printk(KERN_NOTICE s "\n");
+#define DEBUG(i, s) if (ips_debug >= i) printk(KERN_NOTICE s "\n");
+#define DEBUG_VAR(i, s, v...) if (ips_debug >= i) printk(KERN_NOTICE s "\n", v);
+#else
+#define METHOD_TRACE(s, i)
+#define DEBUG(i, s)
+#define DEBUG_VAR(i, s, v...)
+#endif
+
+/*
+ * Function prototypes
+ */
+static int ips_detect(struct scsi_host_template *);
+static int ips_release(struct Scsi_Host *);
+static int ips_eh_abort(struct scsi_cmnd *);
+static int ips_eh_reset(struct scsi_cmnd *);
+static int ips_queue(struct Scsi_Host *, struct scsi_cmnd *);
+static const char *ips_info(struct Scsi_Host *);
+static irqreturn_t do_ipsintr(int, void *);
+static int ips_hainit(ips_ha_t *);
+static int ips_map_status(ips_ha_t *, ips_scb_t *, ips_stat_t *);
+static int ips_send_wait(ips_ha_t *, ips_scb_t *, int, int);
+static int ips_send_cmd(ips_ha_t *, ips_scb_t *);
+static int ips_online(ips_ha_t *, ips_scb_t *);
+static int ips_inquiry(ips_ha_t *, ips_scb_t *);
+static int ips_rdcap(ips_ha_t *, ips_scb_t *);
+static int ips_msense(ips_ha_t *, ips_scb_t *);
+static int ips_reqsen(ips_ha_t *, ips_scb_t *);
+static int ips_deallocatescbs(ips_ha_t *, int);
+static int ips_allocatescbs(ips_ha_t *);
+static int ips_reset_copperhead(ips_ha_t *);
+static int ips_reset_copperhead_memio(ips_ha_t *);
+static int ips_reset_morpheus(ips_ha_t *);
+static int ips_issue_copperhead(ips_ha_t *, ips_scb_t *);
+static int ips_issue_copperhead_memio(ips_ha_t *, ips_scb_t *);
+static int ips_issue_i2o(ips_ha_t *, ips_scb_t *);
+static int ips_issue_i2o_memio(ips_ha_t *, ips_scb_t *);
+static int ips_isintr_copperhead(ips_ha_t *);
+static int ips_isintr_copperhead_memio(ips_ha_t *);
+static int ips_isintr_morpheus(ips_ha_t *);
+static int ips_wait(ips_ha_t *, int, int);
+static int ips_write_driver_status(ips_ha_t *, int);
+static int ips_read_adapter_status(ips_ha_t *, int);
+static int ips_read_subsystem_parameters(ips_ha_t *, int);
+static int ips_read_config(ips_ha_t *, int);
+static int ips_clear_adapter(ips_ha_t *, int);
+static int ips_readwrite_page5(ips_ha_t *, int, int);
+static int ips_init_copperhead(ips_ha_t *);
+static int ips_init_copperhead_memio(ips_ha_t *);
+static int ips_init_morpheus(ips_ha_t *);
+static int ips_isinit_copperhead(ips_ha_t *);
+static int ips_isinit_copperhead_memio(ips_ha_t *);
+static int ips_isinit_morpheus(ips_ha_t *);
+static int ips_erase_bios(ips_ha_t *);
+static int ips_program_bios(ips_ha_t *, char *, uint32_t, uint32_t);
+static int ips_verify_bios(ips_ha_t *, char *, uint32_t, uint32_t);
+static int ips_erase_bios_memio(ips_ha_t *);
+static int ips_program_bios_memio(ips_ha_t *, char *, uint32_t, uint32_t);
+static int ips_verify_bios_memio(ips_ha_t *, char *, uint32_t, uint32_t);
+static int ips_flash_copperhead(ips_ha_t *, ips_passthru_t *, ips_scb_t *);
+static int ips_flash_bios(ips_ha_t *, ips_passthru_t *, ips_scb_t *);
+static int ips_flash_firmware(ips_ha_t *, ips_passthru_t *, ips_scb_t *);
+static void ips_free_flash_copperhead(ips_ha_t * ha);
+static void ips_get_bios_version(ips_ha_t *, int);
+static void ips_identify_controller(ips_ha_t *);
+static void ips_chkstatus(ips_ha_t *, IPS_STATUS *);
+static void ips_enable_int_copperhead(ips_ha_t *);
+static void ips_enable_int_copperhead_memio(ips_ha_t *);
+static void ips_enable_int_morpheus(ips_ha_t *);
+static int ips_intr_copperhead(ips_ha_t *);
+static int ips_intr_morpheus(ips_ha_t *);
+static void ips_next(ips_ha_t *, int);
+static void ipsintr_blocking(ips_ha_t *, struct ips_scb *);
+static void ipsintr_done(ips_ha_t *, struct ips_scb *);
+static void ips_done(ips_ha_t *, ips_scb_t *);
+static void ips_free(ips_ha_t *);
+static void ips_init_scb(ips_ha_t *, ips_scb_t *);
+static void ips_freescb(ips_ha_t *, ips_scb_t *);
+static void ips_setup_funclist(ips_ha_t *);
+static void ips_statinit(ips_ha_t *);
+static void ips_statinit_memio(ips_ha_t *);
+static void ips_fix_ffdc_time(ips_ha_t *, ips_scb_t *, time_t);
+static void ips_ffdc_reset(ips_ha_t *, int);
+static void ips_ffdc_time(ips_ha_t *);
+static uint32_t ips_statupd_copperhead(ips_ha_t *);
+static uint32_t ips_statupd_copperhead_memio(ips_ha_t *);
+static uint32_t ips_statupd_morpheus(ips_ha_t *);
+static ips_scb_t *ips_getscb(ips_ha_t *);
+static void ips_putq_scb_head(ips_scb_queue_t *, ips_scb_t *);
+static void ips_putq_wait_tail(ips_wait_queue_t *, struct scsi_cmnd *);
+static void ips_putq_copp_tail(ips_copp_queue_t *,
+ ips_copp_wait_item_t *);
+static ips_scb_t *ips_removeq_scb_head(ips_scb_queue_t *);
+static ips_scb_t *ips_removeq_scb(ips_scb_queue_t *, ips_scb_t *);
+static struct scsi_cmnd *ips_removeq_wait_head(ips_wait_queue_t *);
+static struct scsi_cmnd *ips_removeq_wait(ips_wait_queue_t *,
+ struct scsi_cmnd *);
+static ips_copp_wait_item_t *ips_removeq_copp(ips_copp_queue_t *,
+ ips_copp_wait_item_t *);
+static ips_copp_wait_item_t *ips_removeq_copp_head(ips_copp_queue_t *);
+
+static int ips_is_passthru(struct scsi_cmnd *);
+static int ips_make_passthru(ips_ha_t *, struct scsi_cmnd *, ips_scb_t *, int);
+static int ips_usrcmd(ips_ha_t *, ips_passthru_t *, ips_scb_t *);
+static void ips_cleanup_passthru(ips_ha_t *, ips_scb_t *);
+static void ips_scmd_buf_write(struct scsi_cmnd * scmd, void *data,
+ unsigned int count);
+static void ips_scmd_buf_read(struct scsi_cmnd * scmd, void *data,
+ unsigned int count);
+
+static int ips_write_info(struct Scsi_Host *, char *, int);
+static int ips_show_info(struct seq_file *, struct Scsi_Host *);
+static int ips_host_info(ips_ha_t *, struct seq_file *);
+static int ips_abort_init(ips_ha_t * ha, int index);
+static int ips_init_phase2(int index);
+
+static int ips_init_phase1(struct pci_dev *pci_dev, int *indexPtr);
+static int ips_register_scsi(int index);
+
+static int ips_poll_for_flush_complete(ips_ha_t * ha);
+static void ips_flush_and_reset(ips_ha_t *ha);
+
+/*
+ * global variables
+ */
+static const char ips_name[] = "ips";
+static struct Scsi_Host *ips_sh[IPS_MAX_ADAPTERS]; /* Array of host controller structures */
+static ips_ha_t *ips_ha[IPS_MAX_ADAPTERS]; /* Array of HA structures */
+static unsigned int ips_next_controller;
+static unsigned int ips_num_controllers;
+static unsigned int ips_released_controllers;
+static int ips_hotplug;
+static int ips_cmd_timeout = 60;
+static int ips_reset_timeout = 60 * 5;
+static int ips_force_memio = 1; /* Always use Memory Mapped I/O */
+static int ips_force_i2o = 1; /* Always use I2O command delivery */
+static int ips_ioctlsize = IPS_IOCTL_SIZE; /* Size of the ioctl buffer */
+static int ips_cd_boot; /* Booting from Manager CD */
+static char *ips_FlashData = NULL; /* CD Boot - Flash Data Buffer */
+static dma_addr_t ips_flashbusaddr;
+static long ips_FlashDataInUse; /* CD Boot - Flash Data In Use Flag */
+static uint32_t MaxLiteCmds = 32; /* Max Active Cmds for a Lite Adapter */
+static struct scsi_host_template ips_driver_template = {
+ .detect = ips_detect,
+ .release = ips_release,
+ .info = ips_info,
+ .queuecommand = ips_queue,
+ .eh_abort_handler = ips_eh_abort,
+ .eh_host_reset_handler = ips_eh_reset,
+ .proc_name = "ips",
+ .show_info = ips_show_info,
+ .write_info = ips_write_info,
+ .slave_configure = ips_slave_configure,
+ .bios_param = ips_biosparam,
+ .this_id = -1,
+ .sg_tablesize = IPS_MAX_SG,
+ .cmd_per_lun = 3,
+ .use_clustering = ENABLE_CLUSTERING,
+ .no_write_same = 1,
+};
+
+
+/* This table describes all ServeRAID Adapters */
+static struct pci_device_id ips_pci_table[] = {
+ { 0x1014, 0x002E, PCI_ANY_ID, PCI_ANY_ID, 0, 0 },
+ { 0x1014, 0x01BD, PCI_ANY_ID, PCI_ANY_ID, 0, 0 },
+ { 0x9005, 0x0250, PCI_ANY_ID, PCI_ANY_ID, 0, 0 },
+ { 0, }
+};
+
+MODULE_DEVICE_TABLE( pci, ips_pci_table );
+
+static char ips_hot_plug_name[] = "ips";
+
+static int ips_insert_device(struct pci_dev *pci_dev, const struct pci_device_id *ent);
+static void ips_remove_device(struct pci_dev *pci_dev);
+
+static struct pci_driver ips_pci_driver = {
+ .name = ips_hot_plug_name,
+ .id_table = ips_pci_table,
+ .probe = ips_insert_device,
+ .remove = ips_remove_device,
+};
+
+
+/*
+ * Necessary forward function protoypes
+ */
+static int ips_halt(struct notifier_block *nb, ulong event, void *buf);
+
+#define MAX_ADAPTER_NAME 15
+
+static char ips_adapter_name[][30] = {
+ "ServeRAID",
+ "ServeRAID II",
+ "ServeRAID on motherboard",
+ "ServeRAID on motherboard",
+ "ServeRAID 3H",
+ "ServeRAID 3L",
+ "ServeRAID 4H",
+ "ServeRAID 4M",
+ "ServeRAID 4L",
+ "ServeRAID 4Mx",
+ "ServeRAID 4Lx",
+ "ServeRAID 5i",
+ "ServeRAID 5i",
+ "ServeRAID 6M",
+ "ServeRAID 6i",
+ "ServeRAID 7t",
+ "ServeRAID 7k",
+ "ServeRAID 7M"
+};
+
+static struct notifier_block ips_notifier = {
+ ips_halt, NULL, 0
+};
+
+/*
+ * Direction table
+ */
+static char ips_command_direction[] = {
+ IPS_DATA_NONE, IPS_DATA_NONE, IPS_DATA_IN, IPS_DATA_IN, IPS_DATA_OUT,
+ IPS_DATA_IN, IPS_DATA_IN, IPS_DATA_OUT, IPS_DATA_IN, IPS_DATA_UNK,
+ IPS_DATA_OUT, IPS_DATA_OUT, IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK,
+ IPS_DATA_IN, IPS_DATA_NONE, IPS_DATA_NONE, IPS_DATA_IN, IPS_DATA_OUT,
+ IPS_DATA_IN, IPS_DATA_OUT, IPS_DATA_NONE, IPS_DATA_NONE, IPS_DATA_OUT,
+ IPS_DATA_NONE, IPS_DATA_IN, IPS_DATA_NONE, IPS_DATA_IN, IPS_DATA_OUT,
+ IPS_DATA_NONE, IPS_DATA_UNK, IPS_DATA_IN, IPS_DATA_UNK, IPS_DATA_IN,
+ IPS_DATA_UNK, IPS_DATA_OUT, IPS_DATA_IN, IPS_DATA_UNK, IPS_DATA_UNK,
+ IPS_DATA_IN, IPS_DATA_IN, IPS_DATA_OUT, IPS_DATA_NONE, IPS_DATA_UNK,
+ IPS_DATA_IN, IPS_DATA_OUT, IPS_DATA_OUT, IPS_DATA_OUT, IPS_DATA_OUT,
+ IPS_DATA_OUT, IPS_DATA_NONE, IPS_DATA_IN, IPS_DATA_NONE, IPS_DATA_NONE,
+ IPS_DATA_IN, IPS_DATA_OUT, IPS_DATA_OUT, IPS_DATA_OUT, IPS_DATA_OUT,
+ IPS_DATA_IN, IPS_DATA_OUT, IPS_DATA_IN, IPS_DATA_OUT, IPS_DATA_OUT,
+ IPS_DATA_OUT, IPS_DATA_IN, IPS_DATA_IN, IPS_DATA_IN, IPS_DATA_NONE,
+ IPS_DATA_UNK, IPS_DATA_NONE, IPS_DATA_NONE, IPS_DATA_NONE, IPS_DATA_UNK,
+ IPS_DATA_NONE, IPS_DATA_OUT, IPS_DATA_IN, IPS_DATA_UNK, IPS_DATA_UNK,
+ IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK,
+ IPS_DATA_OUT, IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK,
+ IPS_DATA_IN, IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK,
+ IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK,
+ IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK,
+ IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK,
+ IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK,
+ IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK,
+ IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK,
+ IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK,
+ IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK,
+ IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK,
+ IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK,
+ IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK,
+ IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK,
+ IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK,
+ IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK,
+ IPS_DATA_NONE, IPS_DATA_NONE, IPS_DATA_UNK, IPS_DATA_IN, IPS_DATA_NONE,
+ IPS_DATA_OUT, IPS_DATA_UNK, IPS_DATA_NONE, IPS_DATA_UNK, IPS_DATA_OUT,
+ IPS_DATA_OUT, IPS_DATA_OUT, IPS_DATA_OUT, IPS_DATA_OUT, IPS_DATA_NONE,
+ IPS_DATA_UNK, IPS_DATA_IN, IPS_DATA_OUT, IPS_DATA_IN, IPS_DATA_IN,
+ IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK,
+ IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK,
+ IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK,
+ IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK,
+ IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK,
+ IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK,
+ IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK,
+ IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK,
+ IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK,
+ IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_OUT,
+ IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK,
+ IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK,
+ IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK,
+ IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK
+};
+
+
+/****************************************************************************/
+/* */
+/* Routine Name: ips_setup */
+/* */
+/* Routine Description: */
+/* */
+/* setup parameters to the driver */
+/* */
+/****************************************************************************/
+static int
+ips_setup(char *ips_str)
+{
+
+ int i;
+ char *key;
+ char *value;
+ IPS_OPTION options[] = {
+ {"noi2o", &ips_force_i2o, 0},
+ {"nommap", &ips_force_memio, 0},
+ {"ioctlsize", &ips_ioctlsize, IPS_IOCTL_SIZE},
+ {"cdboot", &ips_cd_boot, 0},
+ {"maxcmds", &MaxLiteCmds, 32},
+ };
+
+ /* Don't use strtok() anymore ( if 2.4 Kernel or beyond ) */
+ /* Search for value */
+ while ((key = strsep(&ips_str, ",."))) {
+ if (!*key)
+ continue;
+ value = strchr(key, ':');
+ if (value)
+ *value++ = '\0';
+ /*
+ * We now have key/value pairs.
+ * Update the variables
+ */
+ for (i = 0; i < ARRAY_SIZE(options); i++) {
+ if (strncasecmp
+ (key, options[i].option_name,
+ strlen(options[i].option_name)) == 0) {
+ if (value)
+ *options[i].option_flag =
+ simple_strtoul(value, NULL, 0);
+ else
+ *options[i].option_flag =
+ options[i].option_value;
+ break;
+ }
+ }
+ }
+
+ return (1);
+}
+
+__setup("ips=", ips_setup);
+
+/****************************************************************************/
+/* */
+/* Routine Name: ips_detect */
+/* */
+/* Routine Description: */
+/* */
+/* Detect and initialize the driver */
+/* */
+/* NOTE: this routine is called under the io_request_lock spinlock */
+/* */
+/****************************************************************************/
+static int
+ips_detect(struct scsi_host_template * SHT)
+{
+ int i;
+
+ METHOD_TRACE("ips_detect", 1);
+
+#ifdef MODULE
+ if (ips)
+ ips_setup(ips);
+#endif
+
+ for (i = 0; i < ips_num_controllers; i++) {
+ if (ips_register_scsi(i))
+ ips_free(ips_ha[i]);
+ ips_released_controllers++;
+ }
+ ips_hotplug = 1;
+ return (ips_num_controllers);
+}
+
+/****************************************************************************/
+/* configure the function pointers to use the functions that will work */
+/* with the found version of the adapter */
+/****************************************************************************/
+static void
+ips_setup_funclist(ips_ha_t * ha)
+{
+
+ /*
+ * Setup Functions
+ */
+ if (IPS_IS_MORPHEUS(ha) || IPS_IS_MARCO(ha)) {
+ /* morpheus / marco / sebring */
+ ha->func.isintr = ips_isintr_morpheus;
+ ha->func.isinit = ips_isinit_morpheus;
+ ha->func.issue = ips_issue_i2o_memio;
+ ha->func.init = ips_init_morpheus;
+ ha->func.statupd = ips_statupd_morpheus;
+ ha->func.reset = ips_reset_morpheus;
+ ha->func.intr = ips_intr_morpheus;
+ ha->func.enableint = ips_enable_int_morpheus;
+ } else if (IPS_USE_MEMIO(ha)) {
+ /* copperhead w/MEMIO */
+ ha->func.isintr = ips_isintr_copperhead_memio;
+ ha->func.isinit = ips_isinit_copperhead_memio;
+ ha->func.init = ips_init_copperhead_memio;
+ ha->func.statupd = ips_statupd_copperhead_memio;
+ ha->func.statinit = ips_statinit_memio;
+ ha->func.reset = ips_reset_copperhead_memio;
+ ha->func.intr = ips_intr_copperhead;
+ ha->func.erasebios = ips_erase_bios_memio;
+ ha->func.programbios = ips_program_bios_memio;
+ ha->func.verifybios = ips_verify_bios_memio;
+ ha->func.enableint = ips_enable_int_copperhead_memio;
+ if (IPS_USE_I2O_DELIVER(ha))
+ ha->func.issue = ips_issue_i2o_memio;
+ else
+ ha->func.issue = ips_issue_copperhead_memio;
+ } else {
+ /* copperhead */
+ ha->func.isintr = ips_isintr_copperhead;
+ ha->func.isinit = ips_isinit_copperhead;
+ ha->func.init = ips_init_copperhead;
+ ha->func.statupd = ips_statupd_copperhead;
+ ha->func.statinit = ips_statinit;
+ ha->func.reset = ips_reset_copperhead;
+ ha->func.intr = ips_intr_copperhead;
+ ha->func.erasebios = ips_erase_bios;
+ ha->func.programbios = ips_program_bios;
+ ha->func.verifybios = ips_verify_bios;
+ ha->func.enableint = ips_enable_int_copperhead;
+
+ if (IPS_USE_I2O_DELIVER(ha))
+ ha->func.issue = ips_issue_i2o;
+ else
+ ha->func.issue = ips_issue_copperhead;
+ }
+}
+
+/****************************************************************************/
+/* */
+/* Routine Name: ips_release */
+/* */
+/* Routine Description: */
+/* */
+/* Remove a driver */
+/* */
+/****************************************************************************/
+static int
+ips_release(struct Scsi_Host *sh)
+{
+ ips_scb_t *scb;
+ ips_ha_t *ha;
+ int i;
+
+ METHOD_TRACE("ips_release", 1);
+
+ scsi_remove_host(sh);
+
+ for (i = 0; i < IPS_MAX_ADAPTERS && ips_sh[i] != sh; i++) ;
+
+ if (i == IPS_MAX_ADAPTERS) {
+ printk(KERN_WARNING
+ "(%s) release, invalid Scsi_Host pointer.\n", ips_name);
+ BUG();
+ return (FALSE);
+ }
+
+ ha = IPS_HA(sh);
+
+ if (!ha)
+ return (FALSE);
+
+ /* flush the cache on the controller */
+ scb = &ha->scbs[ha->max_cmds - 1];
+
+ ips_init_scb(ha, scb);
+
+ scb->timeout = ips_cmd_timeout;
+ scb->cdb[0] = IPS_CMD_FLUSH;
+
+ scb->cmd.flush_cache.op_code = IPS_CMD_FLUSH;
+ scb->cmd.flush_cache.command_id = IPS_COMMAND_ID(ha, scb);
+ scb->cmd.flush_cache.state = IPS_NORM_STATE;
+ scb->cmd.flush_cache.reserved = 0;
+ scb->cmd.flush_cache.reserved2 = 0;
+ scb->cmd.flush_cache.reserved3 = 0;
+ scb->cmd.flush_cache.reserved4 = 0;
+
+ IPS_PRINTK(KERN_WARNING, ha->pcidev, "Flushing Cache.\n");
+
+ /* send command */
+ if (ips_send_wait(ha, scb, ips_cmd_timeout, IPS_INTR_ON) == IPS_FAILURE)
+ IPS_PRINTK(KERN_WARNING, ha->pcidev, "Incomplete Flush.\n");
+
+ IPS_PRINTK(KERN_WARNING, ha->pcidev, "Flushing Complete.\n");
+
+ ips_sh[i] = NULL;
+ ips_ha[i] = NULL;
+
+ /* free extra memory */
+ ips_free(ha);
+
+ /* free IRQ */
+ free_irq(ha->pcidev->irq, ha);
+
+ scsi_host_put(sh);
+
+ ips_released_controllers++;
+
+ return (FALSE);
+}
+
+/****************************************************************************/
+/* */
+/* Routine Name: ips_halt */
+/* */
+/* Routine Description: */
+/* */
+/* Perform cleanup when the system reboots */
+/* */
+/****************************************************************************/
+static int
+ips_halt(struct notifier_block *nb, ulong event, void *buf)
+{
+ ips_scb_t *scb;
+ ips_ha_t *ha;
+ int i;
+
+ if ((event != SYS_RESTART) && (event != SYS_HALT) &&
+ (event != SYS_POWER_OFF))
+ return (NOTIFY_DONE);
+
+ for (i = 0; i < ips_next_controller; i++) {
+ ha = (ips_ha_t *) ips_ha[i];
+
+ if (!ha)
+ continue;
+
+ if (!ha->active)
+ continue;
+
+ /* flush the cache on the controller */
+ scb = &ha->scbs[ha->max_cmds - 1];
+
+ ips_init_scb(ha, scb);
+
+ scb->timeout = ips_cmd_timeout;
+ scb->cdb[0] = IPS_CMD_FLUSH;
+
+ scb->cmd.flush_cache.op_code = IPS_CMD_FLUSH;
+ scb->cmd.flush_cache.command_id = IPS_COMMAND_ID(ha, scb);
+ scb->cmd.flush_cache.state = IPS_NORM_STATE;
+ scb->cmd.flush_cache.reserved = 0;
+ scb->cmd.flush_cache.reserved2 = 0;
+ scb->cmd.flush_cache.reserved3 = 0;
+ scb->cmd.flush_cache.reserved4 = 0;
+
+ IPS_PRINTK(KERN_WARNING, ha->pcidev, "Flushing Cache.\n");
+
+ /* send command */
+ if (ips_send_wait(ha, scb, ips_cmd_timeout, IPS_INTR_ON) ==
+ IPS_FAILURE)
+ IPS_PRINTK(KERN_WARNING, ha->pcidev,
+ "Incomplete Flush.\n");
+ else
+ IPS_PRINTK(KERN_WARNING, ha->pcidev,
+ "Flushing Complete.\n");
+ }
+
+ return (NOTIFY_OK);
+}
+
+/****************************************************************************/
+/* */
+/* Routine Name: ips_eh_abort */
+/* */
+/* Routine Description: */
+/* */
+/* Abort a command (using the new error code stuff) */
+/* Note: this routine is called under the io_request_lock */
+/****************************************************************************/
+int ips_eh_abort(struct scsi_cmnd *SC)
+{
+ ips_ha_t *ha;
+ ips_copp_wait_item_t *item;
+ int ret;
+ struct Scsi_Host *host;
+
+ METHOD_TRACE("ips_eh_abort", 1);
+
+ if (!SC)
+ return (FAILED);
+
+ host = SC->device->host;
+ ha = (ips_ha_t *) SC->device->host->hostdata;
+
+ if (!ha)
+ return (FAILED);
+
+ if (!ha->active)
+ return (FAILED);
+
+ spin_lock(host->host_lock);
+
+ /* See if the command is on the copp queue */
+ item = ha->copp_waitlist.head;
+ while ((item) && (item->scsi_cmd != SC))
+ item = item->next;
+
+ if (item) {
+ /* Found it */
+ ips_removeq_copp(&ha->copp_waitlist, item);
+ ret = (SUCCESS);
+
+ /* See if the command is on the wait queue */
+ } else if (ips_removeq_wait(&ha->scb_waitlist, SC)) {
+ /* command not sent yet */
+ ret = (SUCCESS);
+ } else {
+ /* command must have already been sent */
+ ret = (FAILED);
+ }
+
+ spin_unlock(host->host_lock);
+ return ret;
+}
+
+/****************************************************************************/
+/* */
+/* Routine Name: ips_eh_reset */
+/* */
+/* Routine Description: */
+/* */
+/* Reset the controller (with new eh error code) */
+/* */
+/* NOTE: this routine is called under the io_request_lock spinlock */
+/* */
+/****************************************************************************/
+static int __ips_eh_reset(struct scsi_cmnd *SC)
+{
+ int ret;
+ int i;
+ ips_ha_t *ha;
+ ips_scb_t *scb;
+ ips_copp_wait_item_t *item;
+
+ METHOD_TRACE("ips_eh_reset", 1);
+
+#ifdef NO_IPS_RESET
+ return (FAILED);
+#else
+
+ if (!SC) {
+ DEBUG(1, "Reset called with NULL scsi command");
+
+ return (FAILED);
+ }
+
+ ha = (ips_ha_t *) SC->device->host->hostdata;
+
+ if (!ha) {
+ DEBUG(1, "Reset called with NULL ha struct");
+
+ return (FAILED);
+ }
+
+ if (!ha->active)
+ return (FAILED);
+
+ /* See if the command is on the copp queue */
+ item = ha->copp_waitlist.head;
+ while ((item) && (item->scsi_cmd != SC))
+ item = item->next;
+
+ if (item) {
+ /* Found it */
+ ips_removeq_copp(&ha->copp_waitlist, item);
+ return (SUCCESS);
+ }
+
+ /* See if the command is on the wait queue */
+ if (ips_removeq_wait(&ha->scb_waitlist, SC)) {
+ /* command not sent yet */
+ return (SUCCESS);
+ }
+
+ /* An explanation for the casual observer: */
+ /* Part of the function of a RAID controller is automatic error */
+ /* detection and recovery. As such, the only problem that physically */
+ /* resetting an adapter will ever fix is when, for some reason, */
+ /* the driver is not successfully communicating with the adapter. */
+ /* Therefore, we will attempt to flush this adapter. If that succeeds, */
+ /* then there's no real purpose in a physical reset. This will complete */
+ /* much faster and avoids any problems that might be caused by a */
+ /* physical reset ( such as having to fail all the outstanding I/O's ). */
+
+ if (ha->ioctl_reset == 0) { /* IF Not an IOCTL Requested Reset */
+ scb = &ha->scbs[ha->max_cmds - 1];
+
+ ips_init_scb(ha, scb);
+
+ scb->timeout = ips_cmd_timeout;
+ scb->cdb[0] = IPS_CMD_FLUSH;
+
+ scb->cmd.flush_cache.op_code = IPS_CMD_FLUSH;
+ scb->cmd.flush_cache.command_id = IPS_COMMAND_ID(ha, scb);
+ scb->cmd.flush_cache.state = IPS_NORM_STATE;
+ scb->cmd.flush_cache.reserved = 0;
+ scb->cmd.flush_cache.reserved2 = 0;
+ scb->cmd.flush_cache.reserved3 = 0;
+ scb->cmd.flush_cache.reserved4 = 0;
+
+ /* Attempt the flush command */
+ ret = ips_send_wait(ha, scb, ips_cmd_timeout, IPS_INTR_IORL);
+ if (ret == IPS_SUCCESS) {
+ IPS_PRINTK(KERN_NOTICE, ha->pcidev,
+ "Reset Request - Flushed Cache\n");
+ return (SUCCESS);
+ }
+ }
+
+ /* Either we can't communicate with the adapter or it's an IOCTL request */
+ /* from a utility. A physical reset is needed at this point. */
+
+ ha->ioctl_reset = 0; /* Reset the IOCTL Requested Reset Flag */
+
+ /*
+ * command must have already been sent
+ * reset the controller
+ */
+ IPS_PRINTK(KERN_NOTICE, ha->pcidev, "Resetting controller.\n");
+ ret = (*ha->func.reset) (ha);
+
+ if (!ret) {
+ struct scsi_cmnd *scsi_cmd;
+
+ IPS_PRINTK(KERN_NOTICE, ha->pcidev,
+ "Controller reset failed - controller now offline.\n");
+
+ /* Now fail all of the active commands */
+ DEBUG_VAR(1, "(%s%d) Failing active commands",
+ ips_name, ha->host_num);
+
+ while ((scb = ips_removeq_scb_head(&ha->scb_activelist))) {
+ scb->scsi_cmd->result = DID_ERROR << 16;
+ scb->scsi_cmd->scsi_done(scb->scsi_cmd);
+ ips_freescb(ha, scb);
+ }
+
+ /* Now fail all of the pending commands */
+ DEBUG_VAR(1, "(%s%d) Failing pending commands",
+ ips_name, ha->host_num);
+
+ while ((scsi_cmd = ips_removeq_wait_head(&ha->scb_waitlist))) {
+ scsi_cmd->result = DID_ERROR;
+ scsi_cmd->scsi_done(scsi_cmd);
+ }
+
+ ha->active = FALSE;
+ return (FAILED);
+ }
+
+ if (!ips_clear_adapter(ha, IPS_INTR_IORL)) {
+ struct scsi_cmnd *scsi_cmd;
+
+ IPS_PRINTK(KERN_NOTICE, ha->pcidev,
+ "Controller reset failed - controller now offline.\n");
+
+ /* Now fail all of the active commands */
+ DEBUG_VAR(1, "(%s%d) Failing active commands",
+ ips_name, ha->host_num);
+
+ while ((scb = ips_removeq_scb_head(&ha->scb_activelist))) {
+ scb->scsi_cmd->result = DID_ERROR << 16;
+ scb->scsi_cmd->scsi_done(scb->scsi_cmd);
+ ips_freescb(ha, scb);
+ }
+
+ /* Now fail all of the pending commands */
+ DEBUG_VAR(1, "(%s%d) Failing pending commands",
+ ips_name, ha->host_num);
+
+ while ((scsi_cmd = ips_removeq_wait_head(&ha->scb_waitlist))) {
+ scsi_cmd->result = DID_ERROR << 16;
+ scsi_cmd->scsi_done(scsi_cmd);
+ }
+
+ ha->active = FALSE;
+ return (FAILED);
+ }
+
+ /* FFDC */
+ if (le32_to_cpu(ha->subsys->param[3]) & 0x300000) {
+ struct timeval tv;
+
+ do_gettimeofday(&tv);
+ ha->last_ffdc = tv.tv_sec;
+ ha->reset_count++;
+ ips_ffdc_reset(ha, IPS_INTR_IORL);
+ }
+
+ /* Now fail all of the active commands */
+ DEBUG_VAR(1, "(%s%d) Failing active commands", ips_name, ha->host_num);
+
+ while ((scb = ips_removeq_scb_head(&ha->scb_activelist))) {
+ scb->scsi_cmd->result = DID_RESET << 16;
+ scb->scsi_cmd->scsi_done(scb->scsi_cmd);
+ ips_freescb(ha, scb);
+ }
+
+ /* Reset DCDB active command bits */
+ for (i = 1; i < ha->nbus; i++)
+ ha->dcdb_active[i - 1] = 0;
+
+ /* Reset the number of active IOCTLs */
+ ha->num_ioctl = 0;
+
+ ips_next(ha, IPS_INTR_IORL);
+
+ return (SUCCESS);
+#endif /* NO_IPS_RESET */
+
+}
+
+static int ips_eh_reset(struct scsi_cmnd *SC)
+{
+ int rc;
+
+ spin_lock_irq(SC->device->host->host_lock);
+ rc = __ips_eh_reset(SC);
+ spin_unlock_irq(SC->device->host->host_lock);
+
+ return rc;
+}
+
+/****************************************************************************/
+/* */
+/* Routine Name: ips_queue */
+/* */
+/* Routine Description: */
+/* */
+/* Send a command to the controller */
+/* */
+/* NOTE: */
+/* Linux obtains io_request_lock before calling this function */
+/* */
+/****************************************************************************/
+static int ips_queue_lck(struct scsi_cmnd *SC, void (*done) (struct scsi_cmnd *))
+{
+ ips_ha_t *ha;
+ ips_passthru_t *pt;
+
+ METHOD_TRACE("ips_queue", 1);
+
+ ha = (ips_ha_t *) SC->device->host->hostdata;
+
+ if (!ha)
+ return (1);
+
+ if (!ha->active)
+ return (DID_ERROR);
+
+ if (ips_is_passthru(SC)) {
+ if (ha->copp_waitlist.count == IPS_MAX_IOCTL_QUEUE) {
+ SC->result = DID_BUS_BUSY << 16;
+ done(SC);
+
+ return (0);
+ }
+ } else if (ha->scb_waitlist.count == IPS_MAX_QUEUE) {
+ SC->result = DID_BUS_BUSY << 16;
+ done(SC);
+
+ return (0);
+ }
+
+ SC->scsi_done = done;
+
+ DEBUG_VAR(2, "(%s%d): ips_queue: cmd 0x%X (%d %d %d)",
+ ips_name,
+ ha->host_num,
+ SC->cmnd[0],
+ SC->device->channel, SC->device->id, SC->device->lun);
+
+ /* Check for command to initiator IDs */
+ if ((scmd_channel(SC) > 0)
+ && (scmd_id(SC) == ha->ha_id[scmd_channel(SC)])) {
+ SC->result = DID_NO_CONNECT << 16;
+ done(SC);
+
+ return (0);
+ }
+
+ if (ips_is_passthru(SC)) {
+
+ ips_copp_wait_item_t *scratch;
+
+ /* A Reset IOCTL is only sent by the boot CD in extreme cases. */
+ /* There can never be any system activity ( network or disk ), but check */
+ /* anyway just as a good practice. */
+ pt = (ips_passthru_t *) scsi_sglist(SC);
+ if ((pt->CoppCP.cmd.reset.op_code == IPS_CMD_RESET_CHANNEL) &&
+ (pt->CoppCP.cmd.reset.adapter_flag == 1)) {
+ if (ha->scb_activelist.count != 0) {
+ SC->result = DID_BUS_BUSY << 16;
+ done(SC);
+ return (0);
+ }
+ ha->ioctl_reset = 1; /* This reset request is from an IOCTL */
+ __ips_eh_reset(SC);
+ SC->result = DID_OK << 16;
+ SC->scsi_done(SC);
+ return (0);
+ }
+
+ /* allocate space for the scribble */
+ scratch = kmalloc(sizeof (ips_copp_wait_item_t), GFP_ATOMIC);
+
+ if (!scratch) {
+ SC->result = DID_ERROR << 16;
+ done(SC);
+
+ return (0);
+ }
+
+ scratch->scsi_cmd = SC;
+ scratch->next = NULL;
+
+ ips_putq_copp_tail(&ha->copp_waitlist, scratch);
+ } else {
+ ips_putq_wait_tail(&ha->scb_waitlist, SC);
+ }
+
+ ips_next(ha, IPS_INTR_IORL);
+
+ return (0);
+}
+
+static DEF_SCSI_QCMD(ips_queue)
+
+/****************************************************************************/
+/* */
+/* Routine Name: ips_biosparam */
+/* */
+/* Routine Description: */
+/* */
+/* Set bios geometry for the controller */
+/* */
+/****************************************************************************/
+static int ips_biosparam(struct scsi_device *sdev, struct block_device *bdev,
+ sector_t capacity, int geom[])
+{
+ ips_ha_t *ha = (ips_ha_t *) sdev->host->hostdata;
+ int heads;
+ int sectors;
+ int cylinders;
+
+ METHOD_TRACE("ips_biosparam", 1);
+
+ if (!ha)
+ /* ?!?! host adater info invalid */
+ return (0);
+
+ if (!ha->active)
+ return (0);
+
+ if (!ips_read_adapter_status(ha, IPS_INTR_ON))
+ /* ?!?! Enquiry command failed */
+ return (0);
+
+ if ((capacity > 0x400000) && ((ha->enq->ucMiscFlag & 0x8) == 0)) {
+ heads = IPS_NORM_HEADS;
+ sectors = IPS_NORM_SECTORS;
+ } else {
+ heads = IPS_COMP_HEADS;
+ sectors = IPS_COMP_SECTORS;
+ }
+
+ cylinders = (unsigned long) capacity / (heads * sectors);
+
+ DEBUG_VAR(2, "Geometry: heads: %d, sectors: %d, cylinders: %d",
+ heads, sectors, cylinders);
+
+ geom[0] = heads;
+ geom[1] = sectors;
+ geom[2] = cylinders;
+
+ return (0);
+}
+
+/****************************************************************************/
+/* */
+/* Routine Name: ips_slave_configure */
+/* */
+/* Routine Description: */
+/* */
+/* Set queue depths on devices once scan is complete */
+/* */
+/****************************************************************************/
+static int
+ips_slave_configure(struct scsi_device * SDptr)
+{
+ ips_ha_t *ha;
+ int min;
+
+ ha = IPS_HA(SDptr->host);
+ if (SDptr->tagged_supported && SDptr->type == TYPE_DISK) {
+ min = ha->max_cmds / 2;
+ if (ha->enq->ucLogDriveCount <= 2)
+ min = ha->max_cmds - 1;
+ scsi_change_queue_depth(SDptr, min);
+ }
+
+ SDptr->skip_ms_page_8 = 1;
+ SDptr->skip_ms_page_3f = 1;
+ return 0;
+}
+
+/****************************************************************************/
+/* */
+/* Routine Name: do_ipsintr */
+/* */
+/* Routine Description: */
+/* */
+/* Wrapper for the interrupt handler */
+/* */
+/****************************************************************************/
+static irqreturn_t
+do_ipsintr(int irq, void *dev_id)
+{
+ ips_ha_t *ha;
+ struct Scsi_Host *host;
+ int irqstatus;
+
+ METHOD_TRACE("do_ipsintr", 2);
+
+ ha = (ips_ha_t *) dev_id;
+ if (!ha)
+ return IRQ_NONE;
+ host = ips_sh[ha->host_num];
+ /* interrupt during initialization */
+ if (!host) {
+ (*ha->func.intr) (ha);
+ return IRQ_HANDLED;
+ }
+
+ spin_lock(host->host_lock);
+
+ if (!ha->active) {
+ spin_unlock(host->host_lock);
+ return IRQ_HANDLED;
+ }
+
+ irqstatus = (*ha->func.intr) (ha);
+
+ spin_unlock(host->host_lock);
+
+ /* start the next command */
+ ips_next(ha, IPS_INTR_ON);
+ return IRQ_RETVAL(irqstatus);
+}
+
+/****************************************************************************/
+/* */
+/* Routine Name: ips_intr_copperhead */
+/* */
+/* Routine Description: */
+/* */
+/* Polling interrupt handler */
+/* */
+/* ASSUMES interrupts are disabled */
+/* */
+/****************************************************************************/
+int
+ips_intr_copperhead(ips_ha_t * ha)
+{
+ ips_stat_t *sp;
+ ips_scb_t *scb;
+ IPS_STATUS cstatus;
+ int intrstatus;
+
+ METHOD_TRACE("ips_intr", 2);
+
+ if (!ha)
+ return 0;
+
+ if (!ha->active)
+ return 0;
+
+ intrstatus = (*ha->func.isintr) (ha);
+
+ if (!intrstatus) {
+ /*
+ * Unexpected/Shared interrupt
+ */
+
+ return 0;
+ }
+
+ while (TRUE) {
+ sp = &ha->sp;
+
+ intrstatus = (*ha->func.isintr) (ha);
+
+ if (!intrstatus)
+ break;
+ else
+ cstatus.value = (*ha->func.statupd) (ha);
+
+ if (cstatus.fields.command_id > (IPS_MAX_CMDS - 1)) {
+ /* Spurious Interrupt ? */
+ continue;
+ }
+
+ ips_chkstatus(ha, &cstatus);
+ scb = (ips_scb_t *) sp->scb_addr;
+
+ /*
+ * use the callback function to finish things up
+ * NOTE: interrupts are OFF for this
+ */
+ (*scb->callback) (ha, scb);
+ } /* end while */
+ return 1;
+}
+
+/****************************************************************************/
+/* */
+/* Routine Name: ips_intr_morpheus */
+/* */
+/* Routine Description: */
+/* */
+/* Polling interrupt handler */
+/* */
+/* ASSUMES interrupts are disabled */
+/* */
+/****************************************************************************/
+int
+ips_intr_morpheus(ips_ha_t * ha)
+{
+ ips_stat_t *sp;
+ ips_scb_t *scb;
+ IPS_STATUS cstatus;
+ int intrstatus;
+
+ METHOD_TRACE("ips_intr_morpheus", 2);
+
+ if (!ha)
+ return 0;
+
+ if (!ha->active)
+ return 0;
+
+ intrstatus = (*ha->func.isintr) (ha);
+
+ if (!intrstatus) {
+ /*
+ * Unexpected/Shared interrupt
+ */
+
+ return 0;
+ }
+
+ while (TRUE) {
+ sp = &ha->sp;
+
+ intrstatus = (*ha->func.isintr) (ha);
+
+ if (!intrstatus)
+ break;
+ else
+ cstatus.value = (*ha->func.statupd) (ha);
+
+ if (cstatus.value == 0xffffffff)
+ /* No more to process */
+ break;
+
+ if (cstatus.fields.command_id > (IPS_MAX_CMDS - 1)) {
+ IPS_PRINTK(KERN_WARNING, ha->pcidev,
+ "Spurious interrupt; no ccb.\n");
+
+ continue;
+ }
+
+ ips_chkstatus(ha, &cstatus);
+ scb = (ips_scb_t *) sp->scb_addr;
+
+ /*
+ * use the callback function to finish things up
+ * NOTE: interrupts are OFF for this
+ */
+ (*scb->callback) (ha, scb);
+ } /* end while */
+ return 1;
+}
+
+/****************************************************************************/
+/* */
+/* Routine Name: ips_info */
+/* */
+/* Routine Description: */
+/* */
+/* Return info about the driver */
+/* */
+/****************************************************************************/
+static const char *
+ips_info(struct Scsi_Host *SH)
+{
+ static char buffer[256];
+ char *bp;
+ ips_ha_t *ha;
+
+ METHOD_TRACE("ips_info", 1);
+
+ ha = IPS_HA(SH);
+
+ if (!ha)
+ return (NULL);
+
+ bp = &buffer[0];
+ memset(bp, 0, sizeof (buffer));
+
+ sprintf(bp, "%s%s%s Build %d", "IBM PCI ServeRAID ",
+ IPS_VERSION_HIGH, IPS_VERSION_LOW, IPS_BUILD_IDENT);
+
+ if (ha->ad_type > 0 && ha->ad_type <= MAX_ADAPTER_NAME) {
+ strcat(bp, " <");
+ strcat(bp, ips_adapter_name[ha->ad_type - 1]);
+ strcat(bp, ">");
+ }
+
+ return (bp);
+}
+
+static int
+ips_write_info(struct Scsi_Host *host, char *buffer, int length)
+{
+ int i;
+ ips_ha_t *ha = NULL;
+
+ /* Find our host structure */
+ for (i = 0; i < ips_next_controller; i++) {
+ if (ips_sh[i]) {
+ if (ips_sh[i] == host) {
+ ha = (ips_ha_t *) ips_sh[i]->hostdata;
+ break;
+ }
+ }
+ }
+
+ if (!ha)
+ return (-EINVAL);
+
+ return 0;
+}
+
+static int
+ips_show_info(struct seq_file *m, struct Scsi_Host *host)
+{
+ int i;
+ ips_ha_t *ha = NULL;
+
+ /* Find our host structure */
+ for (i = 0; i < ips_next_controller; i++) {
+ if (ips_sh[i]) {
+ if (ips_sh[i] == host) {
+ ha = (ips_ha_t *) ips_sh[i]->hostdata;
+ break;
+ }
+ }
+ }
+
+ if (!ha)
+ return (-EINVAL);
+
+ return ips_host_info(ha, m);
+}
+
+/*--------------------------------------------------------------------------*/
+/* Helper Functions */
+/*--------------------------------------------------------------------------*/
+
+/****************************************************************************/
+/* */
+/* Routine Name: ips_is_passthru */
+/* */
+/* Routine Description: */
+/* */
+/* Determine if the specified SCSI command is really a passthru command */
+/* */
+/****************************************************************************/
+static int ips_is_passthru(struct scsi_cmnd *SC)
+{
+ unsigned long flags;
+
+ METHOD_TRACE("ips_is_passthru", 1);
+
+ if (!SC)
+ return (0);
+
+ if ((SC->cmnd[0] == IPS_IOCTL_COMMAND) &&
+ (SC->device->channel == 0) &&
+ (SC->device->id == IPS_ADAPTER_ID) &&
+ (SC->device->lun == 0) && scsi_sglist(SC)) {
+ struct scatterlist *sg = scsi_sglist(SC);
+ char *buffer;
+
+ /* kmap_atomic() ensures addressability of the user buffer.*/
+ /* local_irq_save() protects the KM_IRQ0 address slot. */
+ local_irq_save(flags);
+ buffer = kmap_atomic(sg_page(sg)) + sg->offset;
+ if (buffer && buffer[0] == 'C' && buffer[1] == 'O' &&
+ buffer[2] == 'P' && buffer[3] == 'P') {
+ kunmap_atomic(buffer - sg->offset);
+ local_irq_restore(flags);
+ return 1;
+ }
+ kunmap_atomic(buffer - sg->offset);
+ local_irq_restore(flags);
+ }
+ return 0;
+}
+
+/****************************************************************************/
+/* */
+/* Routine Name: ips_alloc_passthru_buffer */
+/* */
+/* Routine Description: */
+/* allocate a buffer large enough for the ioctl data if the ioctl buffer */
+/* is too small or doesn't exist */
+/****************************************************************************/
+static int
+ips_alloc_passthru_buffer(ips_ha_t * ha, int length)
+{
+ void *bigger_buf;
+ dma_addr_t dma_busaddr;
+
+ if (ha->ioctl_data && length <= ha->ioctl_len)
+ return 0;
+ /* there is no buffer or it's not big enough, allocate a new one */
+ bigger_buf = pci_alloc_consistent(ha->pcidev, length, &dma_busaddr);
+ if (bigger_buf) {
+ /* free the old memory */
+ pci_free_consistent(ha->pcidev, ha->ioctl_len, ha->ioctl_data,
+ ha->ioctl_busaddr);
+ /* use the new memory */
+ ha->ioctl_data = (char *) bigger_buf;
+ ha->ioctl_len = length;
+ ha->ioctl_busaddr = dma_busaddr;
+ } else {
+ return -1;
+ }
+ return 0;
+}
+
+/****************************************************************************/
+/* */
+/* Routine Name: ips_make_passthru */
+/* */
+/* Routine Description: */
+/* */
+/* Make a passthru command out of the info in the Scsi block */
+/* */
+/****************************************************************************/
+static int
+ips_make_passthru(ips_ha_t *ha, struct scsi_cmnd *SC, ips_scb_t *scb, int intr)
+{
+ ips_passthru_t *pt;
+ int length = 0;
+ int i, ret;
+ struct scatterlist *sg = scsi_sglist(SC);
+
+ METHOD_TRACE("ips_make_passthru", 1);
+
+ scsi_for_each_sg(SC, sg, scsi_sg_count(SC), i)
+ length += sg->length;
+
+ if (length < sizeof (ips_passthru_t)) {
+ /* wrong size */
+ DEBUG_VAR(1, "(%s%d) Passthru structure wrong size",
+ ips_name, ha->host_num);
+ return (IPS_FAILURE);
+ }
+ if (ips_alloc_passthru_buffer(ha, length)) {
+ /* allocation failure! If ha->ioctl_data exists, use it to return
+ some error codes. Return a failed command to the scsi layer. */
+ if (ha->ioctl_data) {
+ pt = (ips_passthru_t *) ha->ioctl_data;
+ ips_scmd_buf_read(SC, pt, sizeof (ips_passthru_t));
+ pt->BasicStatus = 0x0B;
+ pt->ExtendedStatus = 0x00;
+ ips_scmd_buf_write(SC, pt, sizeof (ips_passthru_t));
+ }
+ return IPS_FAILURE;
+ }
+ ha->ioctl_datasize = length;
+
+ ips_scmd_buf_read(SC, ha->ioctl_data, ha->ioctl_datasize);
+ pt = (ips_passthru_t *) ha->ioctl_data;
+
+ /*
+ * Some notes about the passthru interface used
+ *
+ * IF the scsi op_code == 0x0d then we assume
+ * that the data came along with/goes with the
+ * packet we received from the sg driver. In this
+ * case the CmdBSize field of the pt structure is
+ * used for the size of the buffer.
+ */
+
+ switch (pt->CoppCmd) {
+ case IPS_NUMCTRLS:
+ memcpy(ha->ioctl_data + sizeof (ips_passthru_t),
+ &ips_num_controllers, sizeof (int));
+ ips_scmd_buf_write(SC, ha->ioctl_data,
+ sizeof (ips_passthru_t) + sizeof (int));
+ SC->result = DID_OK << 16;
+
+ return (IPS_SUCCESS_IMM);
+
+ case IPS_COPPUSRCMD:
+ case IPS_COPPIOCCMD:
+ if (SC->cmnd[0] == IPS_IOCTL_COMMAND) {
+ if (length < (sizeof (ips_passthru_t) + pt->CmdBSize)) {
+ /* wrong size */
+ DEBUG_VAR(1,
+ "(%s%d) Passthru structure wrong size",
+ ips_name, ha->host_num);
+
+ return (IPS_FAILURE);
+ }
+
+ if (ha->pcidev->device == IPS_DEVICEID_COPPERHEAD &&
+ pt->CoppCP.cmd.flashfw.op_code ==
+ IPS_CMD_RW_BIOSFW) {
+ ret = ips_flash_copperhead(ha, pt, scb);
+ ips_scmd_buf_write(SC, ha->ioctl_data,
+ sizeof (ips_passthru_t));
+ return ret;
+ }
+ if (ips_usrcmd(ha, pt, scb))
+ return (IPS_SUCCESS);
+ else
+ return (IPS_FAILURE);
+ }
+
+ break;
+
+ } /* end switch */
+
+ return (IPS_FAILURE);
+}
+
+/****************************************************************************/
+/* Routine Name: ips_flash_copperhead */
+/* Routine Description: */
+/* Flash the BIOS/FW on a Copperhead style controller */
+/****************************************************************************/
+static int
+ips_flash_copperhead(ips_ha_t * ha, ips_passthru_t * pt, ips_scb_t * scb)
+{
+ int datasize;
+
+ /* Trombone is the only copperhead that can do packet flash, but only
+ * for firmware. No one said it had to make sense. */
+ if (IPS_IS_TROMBONE(ha) && pt->CoppCP.cmd.flashfw.type == IPS_FW_IMAGE) {
+ if (ips_usrcmd(ha, pt, scb))
+ return IPS_SUCCESS;
+ else
+ return IPS_FAILURE;
+ }
+ pt->BasicStatus = 0x0B;
+ pt->ExtendedStatus = 0;
+ scb->scsi_cmd->result = DID_OK << 16;
+ /* IF it's OK to Use the "CD BOOT" Flash Buffer, then you can */
+ /* avoid allocating a huge buffer per adapter ( which can fail ). */
+ if (pt->CoppCP.cmd.flashfw.type == IPS_BIOS_IMAGE &&
+ pt->CoppCP.cmd.flashfw.direction == IPS_ERASE_BIOS) {
+ pt->BasicStatus = 0;
+ return ips_flash_bios(ha, pt, scb);
+ } else if (pt->CoppCP.cmd.flashfw.packet_num == 0) {
+ if (ips_FlashData && !test_and_set_bit(0, &ips_FlashDataInUse)){
+ ha->flash_data = ips_FlashData;
+ ha->flash_busaddr = ips_flashbusaddr;
+ ha->flash_len = PAGE_SIZE << 7;
+ ha->flash_datasize = 0;
+ } else if (!ha->flash_data) {
+ datasize = pt->CoppCP.cmd.flashfw.total_packets *
+ pt->CoppCP.cmd.flashfw.count;
+ ha->flash_data = pci_alloc_consistent(ha->pcidev,
+ datasize,
+ &ha->flash_busaddr);
+ if (!ha->flash_data){
+ printk(KERN_WARNING "Unable to allocate a flash buffer\n");
+ return IPS_FAILURE;
+ }
+ ha->flash_datasize = 0;
+ ha->flash_len = datasize;
+ } else
+ return IPS_FAILURE;
+ } else {
+ if (pt->CoppCP.cmd.flashfw.count + ha->flash_datasize >
+ ha->flash_len) {
+ ips_free_flash_copperhead(ha);
+ IPS_PRINTK(KERN_WARNING, ha->pcidev,
+ "failed size sanity check\n");
+ return IPS_FAILURE;
+ }
+ }
+ if (!ha->flash_data)
+ return IPS_FAILURE;
+ pt->BasicStatus = 0;
+ memcpy(&ha->flash_data[ha->flash_datasize], pt + 1,
+ pt->CoppCP.cmd.flashfw.count);
+ ha->flash_datasize += pt->CoppCP.cmd.flashfw.count;
+ if (pt->CoppCP.cmd.flashfw.packet_num ==
+ pt->CoppCP.cmd.flashfw.total_packets - 1) {
+ if (pt->CoppCP.cmd.flashfw.type == IPS_BIOS_IMAGE)
+ return ips_flash_bios(ha, pt, scb);
+ else if (pt->CoppCP.cmd.flashfw.type == IPS_FW_IMAGE)
+ return ips_flash_firmware(ha, pt, scb);
+ }
+ return IPS_SUCCESS_IMM;
+}
+
+/****************************************************************************/
+/* Routine Name: ips_flash_bios */
+/* Routine Description: */
+/* flashes the bios of a copperhead adapter */
+/****************************************************************************/
+static int
+ips_flash_bios(ips_ha_t * ha, ips_passthru_t * pt, ips_scb_t * scb)
+{
+
+ if (pt->CoppCP.cmd.flashfw.type == IPS_BIOS_IMAGE &&
+ pt->CoppCP.cmd.flashfw.direction == IPS_WRITE_BIOS) {
+ if ((!ha->func.programbios) || (!ha->func.erasebios) ||
+ (!ha->func.verifybios))
+ goto error;
+ if ((*ha->func.erasebios) (ha)) {
+ DEBUG_VAR(1,
+ "(%s%d) flash bios failed - unable to erase flash",
+ ips_name, ha->host_num);
+ goto error;
+ } else
+ if ((*ha->func.programbios) (ha,
+ ha->flash_data +
+ IPS_BIOS_HEADER,
+ ha->flash_datasize -
+ IPS_BIOS_HEADER, 0)) {
+ DEBUG_VAR(1,
+ "(%s%d) flash bios failed - unable to flash",
+ ips_name, ha->host_num);
+ goto error;
+ } else
+ if ((*ha->func.verifybios) (ha,
+ ha->flash_data +
+ IPS_BIOS_HEADER,
+ ha->flash_datasize -
+ IPS_BIOS_HEADER, 0)) {
+ DEBUG_VAR(1,
+ "(%s%d) flash bios failed - unable to verify flash",
+ ips_name, ha->host_num);
+ goto error;
+ }
+ ips_free_flash_copperhead(ha);
+ return IPS_SUCCESS_IMM;
+ } else if (pt->CoppCP.cmd.flashfw.type == IPS_BIOS_IMAGE &&
+ pt->CoppCP.cmd.flashfw.direction == IPS_ERASE_BIOS) {
+ if (!ha->func.erasebios)
+ goto error;
+ if ((*ha->func.erasebios) (ha)) {
+ DEBUG_VAR(1,
+ "(%s%d) flash bios failed - unable to erase flash",
+ ips_name, ha->host_num);
+ goto error;
+ }
+ return IPS_SUCCESS_IMM;
+ }
+ error:
+ pt->BasicStatus = 0x0B;
+ pt->ExtendedStatus = 0x00;
+ ips_free_flash_copperhead(ha);
+ return IPS_FAILURE;
+}
+
+/****************************************************************************/
+/* */
+/* Routine Name: ips_fill_scb_sg_single */
+/* */
+/* Routine Description: */
+/* Fill in a single scb sg_list element from an address */
+/* return a -1 if a breakup occurred */
+/****************************************************************************/
+static int
+ips_fill_scb_sg_single(ips_ha_t * ha, dma_addr_t busaddr,
+ ips_scb_t * scb, int indx, unsigned int e_len)
+{
+
+ int ret_val = 0;
+
+ if ((scb->data_len + e_len) > ha->max_xfer) {
+ e_len = ha->max_xfer - scb->data_len;
+ scb->breakup = indx;
+ ++scb->sg_break;
+ ret_val = -1;
+ } else {
+ scb->breakup = 0;
+ scb->sg_break = 0;
+ }
+ if (IPS_USE_ENH_SGLIST(ha)) {
+ scb->sg_list.enh_list[indx].address_lo =
+ cpu_to_le32(pci_dma_lo32(busaddr));
+ scb->sg_list.enh_list[indx].address_hi =
+ cpu_to_le32(pci_dma_hi32(busaddr));
+ scb->sg_list.enh_list[indx].length = cpu_to_le32(e_len);
+ } else {
+ scb->sg_list.std_list[indx].address =
+ cpu_to_le32(pci_dma_lo32(busaddr));
+ scb->sg_list.std_list[indx].length = cpu_to_le32(e_len);
+ }
+
+ ++scb->sg_len;
+ scb->data_len += e_len;
+ return ret_val;
+}
+
+/****************************************************************************/
+/* Routine Name: ips_flash_firmware */
+/* Routine Description: */
+/* flashes the firmware of a copperhead adapter */
+/****************************************************************************/
+static int
+ips_flash_firmware(ips_ha_t * ha, ips_passthru_t * pt, ips_scb_t * scb)
+{
+ IPS_SG_LIST sg_list;
+ uint32_t cmd_busaddr;
+
+ if (pt->CoppCP.cmd.flashfw.type == IPS_FW_IMAGE &&
+ pt->CoppCP.cmd.flashfw.direction == IPS_WRITE_FW) {
+ memset(&pt->CoppCP.cmd, 0, sizeof (IPS_HOST_COMMAND));
+ pt->CoppCP.cmd.flashfw.op_code = IPS_CMD_DOWNLOAD;
+ pt->CoppCP.cmd.flashfw.count = cpu_to_le32(ha->flash_datasize);
+ } else {
+ pt->BasicStatus = 0x0B;
+ pt->ExtendedStatus = 0x00;
+ ips_free_flash_copperhead(ha);
+ return IPS_FAILURE;
+ }
+ /* Save the S/G list pointer so it doesn't get clobbered */
+ sg_list.list = scb->sg_list.list;
+ cmd_busaddr = scb->scb_busaddr;
+ /* copy in the CP */
+ memcpy(&scb->cmd, &pt->CoppCP.cmd, sizeof (IPS_IOCTL_CMD));
+ /* FIX stuff that might be wrong */
+ scb->sg_list.list = sg_list.list;
+ scb->scb_busaddr = cmd_busaddr;
+ scb->bus = scb->scsi_cmd->device->channel;
+ scb->target_id = scb->scsi_cmd->device->id;
+ scb->lun = scb->scsi_cmd->device->lun;
+ scb->sg_len = 0;
+ scb->data_len = 0;
+ scb->flags = 0;
+ scb->op_code = 0;
+ scb->callback = ipsintr_done;
+ scb->timeout = ips_cmd_timeout;
+
+ scb->data_len = ha->flash_datasize;
+ scb->data_busaddr =
+ pci_map_single(ha->pcidev, ha->flash_data, scb->data_len,
+ IPS_DMA_DIR(scb));
+ scb->flags |= IPS_SCB_MAP_SINGLE;
+ scb->cmd.flashfw.command_id = IPS_COMMAND_ID(ha, scb);
+ scb->cmd.flashfw.buffer_addr = cpu_to_le32(scb->data_busaddr);
+ if (pt->TimeOut)
+ scb->timeout = pt->TimeOut;
+ scb->scsi_cmd->result = DID_OK << 16;
+ return IPS_SUCCESS;
+}
+
+/****************************************************************************/
+/* Routine Name: ips_free_flash_copperhead */
+/* Routine Description: */
+/* release the memory resources used to hold the flash image */
+/****************************************************************************/
+static void
+ips_free_flash_copperhead(ips_ha_t * ha)
+{
+ if (ha->flash_data == ips_FlashData)
+ test_and_clear_bit(0, &ips_FlashDataInUse);
+ else if (ha->flash_data)
+ pci_free_consistent(ha->pcidev, ha->flash_len, ha->flash_data,
+ ha->flash_busaddr);
+ ha->flash_data = NULL;
+}
+
+/****************************************************************************/
+/* */
+/* Routine Name: ips_usrcmd */
+/* */
+/* Routine Description: */
+/* */
+/* Process a user command and make it ready to send */
+/* */
+/****************************************************************************/
+static int
+ips_usrcmd(ips_ha_t * ha, ips_passthru_t * pt, ips_scb_t * scb)
+{
+ IPS_SG_LIST sg_list;
+ uint32_t cmd_busaddr;
+
+ METHOD_TRACE("ips_usrcmd", 1);
+
+ if ((!scb) || (!pt) || (!ha))
+ return (0);
+
+ /* Save the S/G list pointer so it doesn't get clobbered */
+ sg_list.list = scb->sg_list.list;
+ cmd_busaddr = scb->scb_busaddr;
+ /* copy in the CP */
+ memcpy(&scb->cmd, &pt->CoppCP.cmd, sizeof (IPS_IOCTL_CMD));
+ memcpy(&scb->dcdb, &pt->CoppCP.dcdb, sizeof (IPS_DCDB_TABLE));
+
+ /* FIX stuff that might be wrong */
+ scb->sg_list.list = sg_list.list;
+ scb->scb_busaddr = cmd_busaddr;
+ scb->bus = scb->scsi_cmd->device->channel;
+ scb->target_id = scb->scsi_cmd->device->id;
+ scb->lun = scb->scsi_cmd->device->lun;
+ scb->sg_len = 0;
+ scb->data_len = 0;
+ scb->flags = 0;
+ scb->op_code = 0;
+ scb->callback = ipsintr_done;
+ scb->timeout = ips_cmd_timeout;
+ scb->cmd.basic_io.command_id = IPS_COMMAND_ID(ha, scb);
+
+ /* we don't support DCDB/READ/WRITE Scatter Gather */
+ if ((scb->cmd.basic_io.op_code == IPS_CMD_READ_SG) ||
+ (scb->cmd.basic_io.op_code == IPS_CMD_WRITE_SG) ||
+ (scb->cmd.basic_io.op_code == IPS_CMD_DCDB_SG))
+ return (0);
+
+ if (pt->CmdBSize) {
+ scb->data_len = pt->CmdBSize;
+ scb->data_busaddr = ha->ioctl_busaddr + sizeof (ips_passthru_t);
+ } else {
+ scb->data_busaddr = 0L;
+ }
+
+ if (scb->cmd.dcdb.op_code == IPS_CMD_DCDB)
+ scb->cmd.dcdb.dcdb_address = cpu_to_le32(scb->scb_busaddr +
+ (unsigned long) &scb->
+ dcdb -
+ (unsigned long) scb);
+
+ if (pt->CmdBSize) {
+ if (scb->cmd.dcdb.op_code == IPS_CMD_DCDB)
+ scb->dcdb.buffer_pointer =
+ cpu_to_le32(scb->data_busaddr);
+ else
+ scb->cmd.basic_io.sg_addr =
+ cpu_to_le32(scb->data_busaddr);
+ }
+
+ /* set timeouts */
+ if (pt->TimeOut) {
+ scb->timeout = pt->TimeOut;
+
+ if (pt->TimeOut <= 10)
+ scb->dcdb.cmd_attribute |= IPS_TIMEOUT10;
+ else if (pt->TimeOut <= 60)
+ scb->dcdb.cmd_attribute |= IPS_TIMEOUT60;
+ else
+ scb->dcdb.cmd_attribute |= IPS_TIMEOUT20M;
+ }
+
+ /* assume success */
+ scb->scsi_cmd->result = DID_OK << 16;
+
+ /* success */
+ return (1);
+}
+
+/****************************************************************************/
+/* */
+/* Routine Name: ips_cleanup_passthru */
+/* */
+/* Routine Description: */
+/* */
+/* Cleanup after a passthru command */
+/* */
+/****************************************************************************/
+static void
+ips_cleanup_passthru(ips_ha_t * ha, ips_scb_t * scb)
+{
+ ips_passthru_t *pt;
+
+ METHOD_TRACE("ips_cleanup_passthru", 1);
+
+ if ((!scb) || (!scb->scsi_cmd) || (!scsi_sglist(scb->scsi_cmd))) {
+ DEBUG_VAR(1, "(%s%d) couldn't cleanup after passthru",
+ ips_name, ha->host_num);
+
+ return;
+ }
+ pt = (ips_passthru_t *) ha->ioctl_data;
+
+ /* Copy data back to the user */
+ if (scb->cmd.dcdb.op_code == IPS_CMD_DCDB) /* Copy DCDB Back to Caller's Area */
+ memcpy(&pt->CoppCP.dcdb, &scb->dcdb, sizeof (IPS_DCDB_TABLE));
+
+ pt->BasicStatus = scb->basic_status;
+ pt->ExtendedStatus = scb->extended_status;
+ pt->AdapterType = ha->ad_type;
+
+ if (ha->pcidev->device == IPS_DEVICEID_COPPERHEAD &&
+ (scb->cmd.flashfw.op_code == IPS_CMD_DOWNLOAD ||
+ scb->cmd.flashfw.op_code == IPS_CMD_RW_BIOSFW))
+ ips_free_flash_copperhead(ha);
+
+ ips_scmd_buf_write(scb->scsi_cmd, ha->ioctl_data, ha->ioctl_datasize);
+}
+
+/****************************************************************************/
+/* */
+/* Routine Name: ips_host_info */
+/* */
+/* Routine Description: */
+/* */
+/* The passthru interface for the driver */
+/* */
+/****************************************************************************/
+static int
+ips_host_info(ips_ha_t *ha, struct seq_file *m)
+{
+ METHOD_TRACE("ips_host_info", 1);
+
+ seq_puts(m, "\nIBM ServeRAID General Information:\n\n");
+
+ if ((le32_to_cpu(ha->nvram->signature) == IPS_NVRAM_P5_SIG) &&
+ (le16_to_cpu(ha->nvram->adapter_type) != 0))
+ seq_printf(m, "\tController Type : %s\n",
+ ips_adapter_name[ha->ad_type - 1]);
+ else
+ seq_puts(m, "\tController Type : Unknown\n");
+
+ if (ha->io_addr)
+ seq_printf(m,
+ "\tIO region : 0x%x (%d bytes)\n",
+ ha->io_addr, ha->io_len);
+
+ if (ha->mem_addr) {
+ seq_printf(m,
+ "\tMemory region : 0x%x (%d bytes)\n",
+ ha->mem_addr, ha->mem_len);
+ seq_printf(m,
+ "\tShared memory address : 0x%lx\n",
+ (unsigned long)ha->mem_ptr);
+ }
+
+ seq_printf(m, "\tIRQ number : %d\n", ha->pcidev->irq);
+
+ /* For the Next 3 lines Check for Binary 0 at the end and don't include it if it's there. */
+ /* That keeps everything happy for "text" operations on the proc file. */
+
+ if (le32_to_cpu(ha->nvram->signature) == IPS_NVRAM_P5_SIG) {
+ if (ha->nvram->bios_low[3] == 0) {
+ seq_printf(m,
+ "\tBIOS Version : %c%c%c%c%c%c%c\n",
+ ha->nvram->bios_high[0], ha->nvram->bios_high[1],
+ ha->nvram->bios_high[2], ha->nvram->bios_high[3],
+ ha->nvram->bios_low[0], ha->nvram->bios_low[1],
+ ha->nvram->bios_low[2]);
+
+ } else {
+ seq_printf(m,
+ "\tBIOS Version : %c%c%c%c%c%c%c%c\n",
+ ha->nvram->bios_high[0], ha->nvram->bios_high[1],
+ ha->nvram->bios_high[2], ha->nvram->bios_high[3],
+ ha->nvram->bios_low[0], ha->nvram->bios_low[1],
+ ha->nvram->bios_low[2], ha->nvram->bios_low[3]);
+ }
+
+ }
+
+ if (ha->enq->CodeBlkVersion[7] == 0) {
+ seq_printf(m,
+ "\tFirmware Version : %c%c%c%c%c%c%c\n",
+ ha->enq->CodeBlkVersion[0], ha->enq->CodeBlkVersion[1],
+ ha->enq->CodeBlkVersion[2], ha->enq->CodeBlkVersion[3],
+ ha->enq->CodeBlkVersion[4], ha->enq->CodeBlkVersion[5],
+ ha->enq->CodeBlkVersion[6]);
+ } else {
+ seq_printf(m,
+ "\tFirmware Version : %c%c%c%c%c%c%c%c\n",
+ ha->enq->CodeBlkVersion[0], ha->enq->CodeBlkVersion[1],
+ ha->enq->CodeBlkVersion[2], ha->enq->CodeBlkVersion[3],
+ ha->enq->CodeBlkVersion[4], ha->enq->CodeBlkVersion[5],
+ ha->enq->CodeBlkVersion[6], ha->enq->CodeBlkVersion[7]);
+ }
+
+ if (ha->enq->BootBlkVersion[7] == 0) {
+ seq_printf(m,
+ "\tBoot Block Version : %c%c%c%c%c%c%c\n",
+ ha->enq->BootBlkVersion[0], ha->enq->BootBlkVersion[1],
+ ha->enq->BootBlkVersion[2], ha->enq->BootBlkVersion[3],
+ ha->enq->BootBlkVersion[4], ha->enq->BootBlkVersion[5],
+ ha->enq->BootBlkVersion[6]);
+ } else {
+ seq_printf(m,
+ "\tBoot Block Version : %c%c%c%c%c%c%c%c\n",
+ ha->enq->BootBlkVersion[0], ha->enq->BootBlkVersion[1],
+ ha->enq->BootBlkVersion[2], ha->enq->BootBlkVersion[3],
+ ha->enq->BootBlkVersion[4], ha->enq->BootBlkVersion[5],
+ ha->enq->BootBlkVersion[6], ha->enq->BootBlkVersion[7]);
+ }
+
+ seq_printf(m, "\tDriver Version : %s%s\n",
+ IPS_VERSION_HIGH, IPS_VERSION_LOW);
+
+ seq_printf(m, "\tDriver Build : %d\n",
+ IPS_BUILD_IDENT);
+
+ seq_printf(m, "\tMax Physical Devices : %d\n",
+ ha->enq->ucMaxPhysicalDevices);
+ seq_printf(m, "\tMax Active Commands : %d\n",
+ ha->max_cmds);
+ seq_printf(m, "\tCurrent Queued Commands : %d\n",
+ ha->scb_waitlist.count);
+ seq_printf(m, "\tCurrent Active Commands : %d\n",
+ ha->scb_activelist.count - ha->num_ioctl);
+ seq_printf(m, "\tCurrent Queued PT Commands : %d\n",
+ ha->copp_waitlist.count);
+ seq_printf(m, "\tCurrent Active PT Commands : %d\n",
+ ha->num_ioctl);
+
+ seq_putc(m, '\n');
+
+ return 0;
+}
+
+/****************************************************************************/
+/* */
+/* Routine Name: ips_identify_controller */
+/* */
+/* Routine Description: */
+/* */
+/* Identify this controller */
+/* */
+/****************************************************************************/
+static void
+ips_identify_controller(ips_ha_t * ha)
+{
+ METHOD_TRACE("ips_identify_controller", 1);
+
+ switch (ha->pcidev->device) {
+ case IPS_DEVICEID_COPPERHEAD:
+ if (ha->pcidev->revision <= IPS_REVID_SERVERAID) {
+ ha->ad_type = IPS_ADTYPE_SERVERAID;
+ } else if (ha->pcidev->revision == IPS_REVID_SERVERAID2) {
+ ha->ad_type = IPS_ADTYPE_SERVERAID2;
+ } else if (ha->pcidev->revision == IPS_REVID_NAVAJO) {
+ ha->ad_type = IPS_ADTYPE_NAVAJO;
+ } else if ((ha->pcidev->revision == IPS_REVID_SERVERAID2)
+ && (ha->slot_num == 0)) {
+ ha->ad_type = IPS_ADTYPE_KIOWA;
+ } else if ((ha->pcidev->revision >= IPS_REVID_CLARINETP1) &&
+ (ha->pcidev->revision <= IPS_REVID_CLARINETP3)) {
+ if (ha->enq->ucMaxPhysicalDevices == 15)
+ ha->ad_type = IPS_ADTYPE_SERVERAID3L;
+ else
+ ha->ad_type = IPS_ADTYPE_SERVERAID3;
+ } else if ((ha->pcidev->revision >= IPS_REVID_TROMBONE32) &&
+ (ha->pcidev->revision <= IPS_REVID_TROMBONE64)) {
+ ha->ad_type = IPS_ADTYPE_SERVERAID4H;
+ }
+ break;
+
+ case IPS_DEVICEID_MORPHEUS:
+ switch (ha->pcidev->subsystem_device) {
+ case IPS_SUBDEVICEID_4L:
+ ha->ad_type = IPS_ADTYPE_SERVERAID4L;
+ break;
+
+ case IPS_SUBDEVICEID_4M:
+ ha->ad_type = IPS_ADTYPE_SERVERAID4M;
+ break;
+
+ case IPS_SUBDEVICEID_4MX:
+ ha->ad_type = IPS_ADTYPE_SERVERAID4MX;
+ break;
+
+ case IPS_SUBDEVICEID_4LX:
+ ha->ad_type = IPS_ADTYPE_SERVERAID4LX;
+ break;
+
+ case IPS_SUBDEVICEID_5I2:
+ ha->ad_type = IPS_ADTYPE_SERVERAID5I2;
+ break;
+
+ case IPS_SUBDEVICEID_5I1:
+ ha->ad_type = IPS_ADTYPE_SERVERAID5I1;
+ break;
+ }
+
+ break;
+
+ case IPS_DEVICEID_MARCO:
+ switch (ha->pcidev->subsystem_device) {
+ case IPS_SUBDEVICEID_6M:
+ ha->ad_type = IPS_ADTYPE_SERVERAID6M;
+ break;
+ case IPS_SUBDEVICEID_6I:
+ ha->ad_type = IPS_ADTYPE_SERVERAID6I;
+ break;
+ case IPS_SUBDEVICEID_7k:
+ ha->ad_type = IPS_ADTYPE_SERVERAID7k;
+ break;
+ case IPS_SUBDEVICEID_7M:
+ ha->ad_type = IPS_ADTYPE_SERVERAID7M;
+ break;
+ }
+ break;
+ }
+}
+
+/****************************************************************************/
+/* */
+/* Routine Name: ips_get_bios_version */
+/* */
+/* Routine Description: */
+/* */
+/* Get the BIOS revision number */
+/* */
+/****************************************************************************/
+static void
+ips_get_bios_version(ips_ha_t * ha, int intr)
+{
+ ips_scb_t *scb;
+ int ret;
+ uint8_t major;
+ uint8_t minor;
+ uint8_t subminor;
+ uint8_t *buffer;
+ char hexDigits[] =
+ { '0', '1', '2', '3', '4', '5', '6', '7', '8', '9', 'A', 'B', 'C',
+ 'D', 'E', 'F' };
+
+ METHOD_TRACE("ips_get_bios_version", 1);
+
+ major = 0;
+ minor = 0;
+
+ strncpy(ha->bios_version, " ?", 8);
+
+ if (ha->pcidev->device == IPS_DEVICEID_COPPERHEAD) {
+ if (IPS_USE_MEMIO(ha)) {
+ /* Memory Mapped I/O */
+
+ /* test 1st byte */
+ writel(0, ha->mem_ptr + IPS_REG_FLAP);
+ if (ha->pcidev->revision == IPS_REVID_TROMBONE64)
+ udelay(25); /* 25 us */
+
+ if (readb(ha->mem_ptr + IPS_REG_FLDP) != 0x55)
+ return;
+
+ writel(1, ha->mem_ptr + IPS_REG_FLAP);
+ if (ha->pcidev->revision == IPS_REVID_TROMBONE64)
+ udelay(25); /* 25 us */
+
+ if (readb(ha->mem_ptr + IPS_REG_FLDP) != 0xAA)
+ return;
+
+ /* Get Major version */
+ writel(0x1FF, ha->mem_ptr + IPS_REG_FLAP);
+ if (ha->pcidev->revision == IPS_REVID_TROMBONE64)
+ udelay(25); /* 25 us */
+
+ major = readb(ha->mem_ptr + IPS_REG_FLDP);
+
+ /* Get Minor version */
+ writel(0x1FE, ha->mem_ptr + IPS_REG_FLAP);
+ if (ha->pcidev->revision == IPS_REVID_TROMBONE64)
+ udelay(25); /* 25 us */
+ minor = readb(ha->mem_ptr + IPS_REG_FLDP);
+
+ /* Get SubMinor version */
+ writel(0x1FD, ha->mem_ptr + IPS_REG_FLAP);
+ if (ha->pcidev->revision == IPS_REVID_TROMBONE64)
+ udelay(25); /* 25 us */
+ subminor = readb(ha->mem_ptr + IPS_REG_FLDP);
+
+ } else {
+ /* Programmed I/O */
+
+ /* test 1st byte */
+ outl(0, ha->io_addr + IPS_REG_FLAP);
+ if (ha->pcidev->revision == IPS_REVID_TROMBONE64)
+ udelay(25); /* 25 us */
+
+ if (inb(ha->io_addr + IPS_REG_FLDP) != 0x55)
+ return;
+
+ outl(1, ha->io_addr + IPS_REG_FLAP);
+ if (ha->pcidev->revision == IPS_REVID_TROMBONE64)
+ udelay(25); /* 25 us */
+
+ if (inb(ha->io_addr + IPS_REG_FLDP) != 0xAA)
+ return;
+
+ /* Get Major version */
+ outl(0x1FF, ha->io_addr + IPS_REG_FLAP);
+ if (ha->pcidev->revision == IPS_REVID_TROMBONE64)
+ udelay(25); /* 25 us */
+
+ major = inb(ha->io_addr + IPS_REG_FLDP);
+
+ /* Get Minor version */
+ outl(0x1FE, ha->io_addr + IPS_REG_FLAP);
+ if (ha->pcidev->revision == IPS_REVID_TROMBONE64)
+ udelay(25); /* 25 us */
+
+ minor = inb(ha->io_addr + IPS_REG_FLDP);
+
+ /* Get SubMinor version */
+ outl(0x1FD, ha->io_addr + IPS_REG_FLAP);
+ if (ha->pcidev->revision == IPS_REVID_TROMBONE64)
+ udelay(25); /* 25 us */
+
+ subminor = inb(ha->io_addr + IPS_REG_FLDP);
+
+ }
+ } else {
+ /* Morpheus Family - Send Command to the card */
+
+ buffer = ha->ioctl_data;
+
+ memset(buffer, 0, 0x1000);
+
+ scb = &ha->scbs[ha->max_cmds - 1];
+
+ ips_init_scb(ha, scb);
+
+ scb->timeout = ips_cmd_timeout;
+ scb->cdb[0] = IPS_CMD_RW_BIOSFW;
+
+ scb->cmd.flashfw.op_code = IPS_CMD_RW_BIOSFW;
+ scb->cmd.flashfw.command_id = IPS_COMMAND_ID(ha, scb);
+ scb->cmd.flashfw.type = 1;
+ scb->cmd.flashfw.direction = 0;
+ scb->cmd.flashfw.count = cpu_to_le32(0x800);
+ scb->cmd.flashfw.total_packets = 1;
+ scb->cmd.flashfw.packet_num = 0;
+ scb->data_len = 0x1000;
+ scb->cmd.flashfw.buffer_addr = ha->ioctl_busaddr;
+
+ /* issue the command */
+ if (((ret =
+ ips_send_wait(ha, scb, ips_cmd_timeout,
+ intr)) == IPS_FAILURE)
+ || (ret == IPS_SUCCESS_IMM)
+ || ((scb->basic_status & IPS_GSC_STATUS_MASK) > 1)) {
+ /* Error occurred */
+
+ return;
+ }
+
+ if ((buffer[0xC0] == 0x55) && (buffer[0xC1] == 0xAA)) {
+ major = buffer[0x1ff + 0xC0]; /* Offset 0x1ff after the header (0xc0) */
+ minor = buffer[0x1fe + 0xC0]; /* Offset 0x1fe after the header (0xc0) */
+ subminor = buffer[0x1fd + 0xC0]; /* Offset 0x1fd after the header (0xc0) */
+ } else {
+ return;
+ }
+ }
+
+ ha->bios_version[0] = hexDigits[(major & 0xF0) >> 4];
+ ha->bios_version[1] = '.';
+ ha->bios_version[2] = hexDigits[major & 0x0F];
+ ha->bios_version[3] = hexDigits[subminor];
+ ha->bios_version[4] = '.';
+ ha->bios_version[5] = hexDigits[(minor & 0xF0) >> 4];
+ ha->bios_version[6] = hexDigits[minor & 0x0F];
+ ha->bios_version[7] = 0;
+}
+
+/****************************************************************************/
+/* */
+/* Routine Name: ips_hainit */
+/* */
+/* Routine Description: */
+/* */
+/* Initialize the controller */
+/* */
+/* NOTE: Assumes to be called from with a lock */
+/* */
+/****************************************************************************/
+static int
+ips_hainit(ips_ha_t * ha)
+{
+ int i;
+ struct timeval tv;
+
+ METHOD_TRACE("ips_hainit", 1);
+
+ if (!ha)
+ return (0);
+
+ if (ha->func.statinit)
+ (*ha->func.statinit) (ha);
+
+ if (ha->func.enableint)
+ (*ha->func.enableint) (ha);
+
+ /* Send FFDC */
+ ha->reset_count = 1;
+ do_gettimeofday(&tv);
+ ha->last_ffdc = tv.tv_sec;
+ ips_ffdc_reset(ha, IPS_INTR_IORL);
+
+ if (!ips_read_config(ha, IPS_INTR_IORL)) {
+ IPS_PRINTK(KERN_WARNING, ha->pcidev,
+ "unable to read config from controller.\n");
+
+ return (0);
+ }
+ /* end if */
+ if (!ips_read_adapter_status(ha, IPS_INTR_IORL)) {
+ IPS_PRINTK(KERN_WARNING, ha->pcidev,
+ "unable to read controller status.\n");
+
+ return (0);
+ }
+
+ /* Identify this controller */
+ ips_identify_controller(ha);
+
+ if (!ips_read_subsystem_parameters(ha, IPS_INTR_IORL)) {
+ IPS_PRINTK(KERN_WARNING, ha->pcidev,
+ "unable to read subsystem parameters.\n");
+
+ return (0);
+ }
+
+ /* write nvram user page 5 */
+ if (!ips_write_driver_status(ha, IPS_INTR_IORL)) {
+ IPS_PRINTK(KERN_WARNING, ha->pcidev,
+ "unable to write driver info to controller.\n");
+
+ return (0);
+ }
+
+ /* If there are Logical Drives and a Reset Occurred, then an EraseStripeLock is Needed */
+ if ((ha->conf->ucLogDriveCount > 0) && (ha->requires_esl == 1))
+ ips_clear_adapter(ha, IPS_INTR_IORL);
+
+ /* set limits on SID, LUN, BUS */
+ ha->ntargets = IPS_MAX_TARGETS + 1;
+ ha->nlun = 1;
+ ha->nbus = (ha->enq->ucMaxPhysicalDevices / IPS_MAX_TARGETS) + 1;
+
+ switch (ha->conf->logical_drive[0].ucStripeSize) {
+ case 4:
+ ha->max_xfer = 0x10000;
+ break;
+
+ case 5:
+ ha->max_xfer = 0x20000;
+ break;
+
+ case 6:
+ ha->max_xfer = 0x40000;
+ break;
+
+ case 7:
+ default:
+ ha->max_xfer = 0x80000;
+ break;
+ }
+
+ /* setup max concurrent commands */
+ if (le32_to_cpu(ha->subsys->param[4]) & 0x1) {
+ /* Use the new method */
+ ha->max_cmds = ha->enq->ucConcurrentCmdCount;
+ } else {
+ /* use the old method */
+ switch (ha->conf->logical_drive[0].ucStripeSize) {
+ case 4:
+ ha->max_cmds = 32;
+ break;
+
+ case 5:
+ ha->max_cmds = 16;
+ break;
+
+ case 6:
+ ha->max_cmds = 8;
+ break;
+
+ case 7:
+ default:
+ ha->max_cmds = 4;
+ break;
+ }
+ }
+
+ /* Limit the Active Commands on a Lite Adapter */
+ if ((ha->ad_type == IPS_ADTYPE_SERVERAID3L) ||
+ (ha->ad_type == IPS_ADTYPE_SERVERAID4L) ||
+ (ha->ad_type == IPS_ADTYPE_SERVERAID4LX)) {
+ if ((ha->max_cmds > MaxLiteCmds) && (MaxLiteCmds))
+ ha->max_cmds = MaxLiteCmds;
+ }
+
+ /* set controller IDs */
+ ha->ha_id[0] = IPS_ADAPTER_ID;
+ for (i = 1; i < ha->nbus; i++) {
+ ha->ha_id[i] = ha->conf->init_id[i - 1] & 0x1f;
+ ha->dcdb_active[i - 1] = 0;
+ }
+
+ return (1);
+}
+
+/****************************************************************************/
+/* */
+/* Routine Name: ips_next */
+/* */
+/* Routine Description: */
+/* */
+/* Take the next command off the queue and send it to the controller */
+/* */
+/****************************************************************************/
+static void
+ips_next(ips_ha_t * ha, int intr)
+{
+ ips_scb_t *scb;
+ struct scsi_cmnd *SC;
+ struct scsi_cmnd *p;
+ struct scsi_cmnd *q;
+ ips_copp_wait_item_t *item;
+ int ret;
+ struct Scsi_Host *host;
+ METHOD_TRACE("ips_next", 1);
+
+ if (!ha)
+ return;
+ host = ips_sh[ha->host_num];
+ /*
+ * Block access to the queue function so
+ * this command won't time out
+ */
+ if (intr == IPS_INTR_ON)
+ spin_lock(host->host_lock);
+
+ if ((ha->subsys->param[3] & 0x300000)
+ && (ha->scb_activelist.count == 0)) {
+ struct timeval tv;
+
+ do_gettimeofday(&tv);
+
+ if (tv.tv_sec - ha->last_ffdc > IPS_SECS_8HOURS) {
+ ha->last_ffdc = tv.tv_sec;
+ ips_ffdc_time(ha);
+ }
+ }
+
+ /*
+ * Send passthru commands
+ * These have priority over normal I/O
+ * but shouldn't affect performance too much
+ * since we limit the number that can be active
+ * on the card at any one time
+ */
+ while ((ha->num_ioctl < IPS_MAX_IOCTL) &&
+ (ha->copp_waitlist.head) && (scb = ips_getscb(ha))) {
+
+ item = ips_removeq_copp_head(&ha->copp_waitlist);
+ ha->num_ioctl++;
+ if (intr == IPS_INTR_ON)
+ spin_unlock(host->host_lock);
+ scb->scsi_cmd = item->scsi_cmd;
+ kfree(item);
+
+ ret = ips_make_passthru(ha, scb->scsi_cmd, scb, intr);
+
+ if (intr == IPS_INTR_ON)
+ spin_lock(host->host_lock);
+ switch (ret) {
+ case IPS_FAILURE:
+ if (scb->scsi_cmd) {
+ scb->scsi_cmd->result = DID_ERROR << 16;
+ scb->scsi_cmd->scsi_done(scb->scsi_cmd);
+ }
+
+ ips_freescb(ha, scb);
+ break;
+ case IPS_SUCCESS_IMM:
+ if (scb->scsi_cmd) {
+ scb->scsi_cmd->result = DID_OK << 16;
+ scb->scsi_cmd->scsi_done(scb->scsi_cmd);
+ }
+
+ ips_freescb(ha, scb);
+ break;
+ default:
+ break;
+ } /* end case */
+
+ if (ret != IPS_SUCCESS) {
+ ha->num_ioctl--;
+ continue;
+ }
+
+ ret = ips_send_cmd(ha, scb);
+
+ if (ret == IPS_SUCCESS)
+ ips_putq_scb_head(&ha->scb_activelist, scb);
+ else
+ ha->num_ioctl--;
+
+ switch (ret) {
+ case IPS_FAILURE:
+ if (scb->scsi_cmd) {
+ scb->scsi_cmd->result = DID_ERROR << 16;
+ }
+
+ ips_freescb(ha, scb);
+ break;
+ case IPS_SUCCESS_IMM:
+ ips_freescb(ha, scb);
+ break;
+ default:
+ break;
+ } /* end case */
+
+ }
+
+ /*
+ * Send "Normal" I/O commands
+ */
+
+ p = ha->scb_waitlist.head;
+ while ((p) && (scb = ips_getscb(ha))) {
+ if ((scmd_channel(p) > 0)
+ && (ha->
+ dcdb_active[scmd_channel(p) -
+ 1] & (1 << scmd_id(p)))) {
+ ips_freescb(ha, scb);
+ p = (struct scsi_cmnd *) p->host_scribble;
+ continue;
+ }
+
+ q = p;
+ SC = ips_removeq_wait(&ha->scb_waitlist, q);
+
+ if (intr == IPS_INTR_ON)
+ spin_unlock(host->host_lock); /* Unlock HA after command is taken off queue */
+
+ SC->result = DID_OK;
+ SC->host_scribble = NULL;
+
+ scb->target_id = SC->device->id;
+ scb->lun = SC->device->lun;
+ scb->bus = SC->device->channel;
+ scb->scsi_cmd = SC;
+ scb->breakup = 0;
+ scb->data_len = 0;
+ scb->callback = ipsintr_done;
+ scb->timeout = ips_cmd_timeout;
+ memset(&scb->cmd, 0, 16);
+
+ /* copy in the CDB */
+ memcpy(scb->cdb, SC->cmnd, SC->cmd_len);
+
+ scb->sg_count = scsi_dma_map(SC);
+ BUG_ON(scb->sg_count < 0);
+ if (scb->sg_count) {
+ struct scatterlist *sg;
+ int i;
+
+ scb->flags |= IPS_SCB_MAP_SG;
+
+ scsi_for_each_sg(SC, sg, scb->sg_count, i) {
+ if (ips_fill_scb_sg_single
+ (ha, sg_dma_address(sg), scb, i,
+ sg_dma_len(sg)) < 0)
+ break;
+ }
+ scb->dcdb.transfer_length = scb->data_len;
+ } else {
+ scb->data_busaddr = 0L;
+ scb->sg_len = 0;
+ scb->data_len = 0;
+ scb->dcdb.transfer_length = 0;
+ }
+
+ scb->dcdb.cmd_attribute =
+ ips_command_direction[scb->scsi_cmd->cmnd[0]];
+
+ /* Allow a WRITE BUFFER Command to Have no Data */
+ /* This is Used by Tape Flash Utilites */
+ if ((scb->scsi_cmd->cmnd[0] == WRITE_BUFFER) &&
+ (scb->data_len == 0))
+ scb->dcdb.cmd_attribute = 0;
+
+ if (!(scb->dcdb.cmd_attribute & 0x3))
+ scb->dcdb.transfer_length = 0;
+
+ if (scb->data_len >= IPS_MAX_XFER) {
+ scb->dcdb.cmd_attribute |= IPS_TRANSFER64K;
+ scb->dcdb.transfer_length = 0;
+ }
+ if (intr == IPS_INTR_ON)
+ spin_lock(host->host_lock);
+
+ ret = ips_send_cmd(ha, scb);
+
+ switch (ret) {
+ case IPS_SUCCESS:
+ ips_putq_scb_head(&ha->scb_activelist, scb);
+ break;
+ case IPS_FAILURE:
+ if (scb->scsi_cmd) {
+ scb->scsi_cmd->result = DID_ERROR << 16;
+ scb->scsi_cmd->scsi_done(scb->scsi_cmd);
+ }
+
+ if (scb->bus)
+ ha->dcdb_active[scb->bus - 1] &=
+ ~(1 << scb->target_id);
+
+ ips_freescb(ha, scb);
+ break;
+ case IPS_SUCCESS_IMM:
+ if (scb->scsi_cmd)
+ scb->scsi_cmd->scsi_done(scb->scsi_cmd);
+
+ if (scb->bus)
+ ha->dcdb_active[scb->bus - 1] &=
+ ~(1 << scb->target_id);
+
+ ips_freescb(ha, scb);
+ break;
+ default:
+ break;
+ } /* end case */
+
+ p = (struct scsi_cmnd *) p->host_scribble;
+
+ } /* end while */
+
+ if (intr == IPS_INTR_ON)
+ spin_unlock(host->host_lock);
+}
+
+/****************************************************************************/
+/* */
+/* Routine Name: ips_putq_scb_head */
+/* */
+/* Routine Description: */
+/* */
+/* Add an item to the head of the queue */
+/* */
+/* ASSUMED to be called from within the HA lock */
+/* */
+/****************************************************************************/
+static void
+ips_putq_scb_head(ips_scb_queue_t * queue, ips_scb_t * item)
+{
+ METHOD_TRACE("ips_putq_scb_head", 1);
+
+ if (!item)
+ return;
+
+ item->q_next = queue->head;
+ queue->head = item;
+
+ if (!queue->tail)
+ queue->tail = item;
+
+ queue->count++;
+}
+
+/****************************************************************************/
+/* */
+/* Routine Name: ips_removeq_scb_head */
+/* */
+/* Routine Description: */
+/* */
+/* Remove the head of the queue */
+/* */
+/* ASSUMED to be called from within the HA lock */
+/* */
+/****************************************************************************/
+static ips_scb_t *
+ips_removeq_scb_head(ips_scb_queue_t * queue)
+{
+ ips_scb_t *item;
+
+ METHOD_TRACE("ips_removeq_scb_head", 1);
+
+ item = queue->head;
+
+ if (!item) {
+ return (NULL);
+ }
+
+ queue->head = item->q_next;
+ item->q_next = NULL;
+
+ if (queue->tail == item)
+ queue->tail = NULL;
+
+ queue->count--;
+
+ return (item);
+}
+
+/****************************************************************************/
+/* */
+/* Routine Name: ips_removeq_scb */
+/* */
+/* Routine Description: */
+/* */
+/* Remove an item from a queue */
+/* */
+/* ASSUMED to be called from within the HA lock */
+/* */
+/****************************************************************************/
+static ips_scb_t *
+ips_removeq_scb(ips_scb_queue_t * queue, ips_scb_t * item)
+{
+ ips_scb_t *p;
+
+ METHOD_TRACE("ips_removeq_scb", 1);
+
+ if (!item)
+ return (NULL);
+
+ if (item == queue->head) {
+ return (ips_removeq_scb_head(queue));
+ }
+
+ p = queue->head;
+
+ while ((p) && (item != p->q_next))
+ p = p->q_next;
+
+ if (p) {
+ /* found a match */
+ p->q_next = item->q_next;
+
+ if (!item->q_next)
+ queue->tail = p;
+
+ item->q_next = NULL;
+ queue->count--;
+
+ return (item);
+ }
+
+ return (NULL);
+}
+
+/****************************************************************************/
+/* */
+/* Routine Name: ips_putq_wait_tail */
+/* */
+/* Routine Description: */
+/* */
+/* Add an item to the tail of the queue */
+/* */
+/* ASSUMED to be called from within the HA lock */
+/* */
+/****************************************************************************/
+static void ips_putq_wait_tail(ips_wait_queue_t *queue, struct scsi_cmnd *item)
+{
+ METHOD_TRACE("ips_putq_wait_tail", 1);
+
+ if (!item)
+ return;
+
+ item->host_scribble = NULL;
+
+ if (queue->tail)
+ queue->tail->host_scribble = (char *) item;
+
+ queue->tail = item;
+
+ if (!queue->head)
+ queue->head = item;
+
+ queue->count++;
+}
+
+/****************************************************************************/
+/* */
+/* Routine Name: ips_removeq_wait_head */
+/* */
+/* Routine Description: */
+/* */
+/* Remove the head of the queue */
+/* */
+/* ASSUMED to be called from within the HA lock */
+/* */
+/****************************************************************************/
+static struct scsi_cmnd *ips_removeq_wait_head(ips_wait_queue_t *queue)
+{
+ struct scsi_cmnd *item;
+
+ METHOD_TRACE("ips_removeq_wait_head", 1);
+
+ item = queue->head;
+
+ if (!item) {
+ return (NULL);
+ }
+
+ queue->head = (struct scsi_cmnd *) item->host_scribble;
+ item->host_scribble = NULL;
+
+ if (queue->tail == item)
+ queue->tail = NULL;
+
+ queue->count--;
+
+ return (item);
+}
+
+/****************************************************************************/
+/* */
+/* Routine Name: ips_removeq_wait */
+/* */
+/* Routine Description: */
+/* */
+/* Remove an item from a queue */
+/* */
+/* ASSUMED to be called from within the HA lock */
+/* */
+/****************************************************************************/
+static struct scsi_cmnd *ips_removeq_wait(ips_wait_queue_t *queue,
+ struct scsi_cmnd *item)
+{
+ struct scsi_cmnd *p;
+
+ METHOD_TRACE("ips_removeq_wait", 1);
+
+ if (!item)
+ return (NULL);
+
+ if (item == queue->head) {
+ return (ips_removeq_wait_head(queue));
+ }
+
+ p = queue->head;
+
+ while ((p) && (item != (struct scsi_cmnd *) p->host_scribble))
+ p = (struct scsi_cmnd *) p->host_scribble;
+
+ if (p) {
+ /* found a match */
+ p->host_scribble = item->host_scribble;
+
+ if (!item->host_scribble)
+ queue->tail = p;
+
+ item->host_scribble = NULL;
+ queue->count--;
+
+ return (item);
+ }
+
+ return (NULL);
+}
+
+/****************************************************************************/
+/* */
+/* Routine Name: ips_putq_copp_tail */
+/* */
+/* Routine Description: */
+/* */
+/* Add an item to the tail of the queue */
+/* */
+/* ASSUMED to be called from within the HA lock */
+/* */
+/****************************************************************************/
+static void
+ips_putq_copp_tail(ips_copp_queue_t * queue, ips_copp_wait_item_t * item)
+{
+ METHOD_TRACE("ips_putq_copp_tail", 1);
+
+ if (!item)
+ return;
+
+ item->next = NULL;
+
+ if (queue->tail)
+ queue->tail->next = item;
+
+ queue->tail = item;
+
+ if (!queue->head)
+ queue->head = item;
+
+ queue->count++;
+}
+
+/****************************************************************************/
+/* */
+/* Routine Name: ips_removeq_copp_head */
+/* */
+/* Routine Description: */
+/* */
+/* Remove the head of the queue */
+/* */
+/* ASSUMED to be called from within the HA lock */
+/* */
+/****************************************************************************/
+static ips_copp_wait_item_t *
+ips_removeq_copp_head(ips_copp_queue_t * queue)
+{
+ ips_copp_wait_item_t *item;
+
+ METHOD_TRACE("ips_removeq_copp_head", 1);
+
+ item = queue->head;
+
+ if (!item) {
+ return (NULL);
+ }
+
+ queue->head = item->next;
+ item->next = NULL;
+
+ if (queue->tail == item)
+ queue->tail = NULL;
+
+ queue->count--;
+
+ return (item);
+}
+
+/****************************************************************************/
+/* */
+/* Routine Name: ips_removeq_copp */
+/* */
+/* Routine Description: */
+/* */
+/* Remove an item from a queue */
+/* */
+/* ASSUMED to be called from within the HA lock */
+/* */
+/****************************************************************************/
+static ips_copp_wait_item_t *
+ips_removeq_copp(ips_copp_queue_t * queue, ips_copp_wait_item_t * item)
+{
+ ips_copp_wait_item_t *p;
+
+ METHOD_TRACE("ips_removeq_copp", 1);
+
+ if (!item)
+ return (NULL);
+
+ if (item == queue->head) {
+ return (ips_removeq_copp_head(queue));
+ }
+
+ p = queue->head;
+
+ while ((p) && (item != p->next))
+ p = p->next;
+
+ if (p) {
+ /* found a match */
+ p->next = item->next;
+
+ if (!item->next)
+ queue->tail = p;
+
+ item->next = NULL;
+ queue->count--;
+
+ return (item);
+ }
+
+ return (NULL);
+}
+
+/****************************************************************************/
+/* */
+/* Routine Name: ipsintr_blocking */
+/* */
+/* Routine Description: */
+/* */
+/* Finalize an interrupt for internal commands */
+/* */
+/****************************************************************************/
+static void
+ipsintr_blocking(ips_ha_t * ha, ips_scb_t * scb)
+{
+ METHOD_TRACE("ipsintr_blocking", 2);
+
+ ips_freescb(ha, scb);
+ if ((ha->waitflag == TRUE) && (ha->cmd_in_progress == scb->cdb[0])) {
+ ha->waitflag = FALSE;
+
+ return;
+ }
+}
+
+/****************************************************************************/
+/* */
+/* Routine Name: ipsintr_done */
+/* */
+/* Routine Description: */
+/* */
+/* Finalize an interrupt for non-internal commands */
+/* */
+/****************************************************************************/
+static void
+ipsintr_done(ips_ha_t * ha, ips_scb_t * scb)
+{
+ METHOD_TRACE("ipsintr_done", 2);
+
+ if (!scb) {
+ IPS_PRINTK(KERN_WARNING, ha->pcidev,
+ "Spurious interrupt; scb NULL.\n");
+
+ return;
+ }
+
+ if (scb->scsi_cmd == NULL) {
+ /* unexpected interrupt */
+ IPS_PRINTK(KERN_WARNING, ha->pcidev,
+ "Spurious interrupt; scsi_cmd not set.\n");
+
+ return;
+ }
+
+ ips_done(ha, scb);
+}
+
+/****************************************************************************/
+/* */
+/* Routine Name: ips_done */
+/* */
+/* Routine Description: */
+/* */
+/* Do housekeeping on completed commands */
+/* ASSUMED to be called form within the request lock */
+/****************************************************************************/
+static void
+ips_done(ips_ha_t * ha, ips_scb_t * scb)
+{
+ int ret;
+
+ METHOD_TRACE("ips_done", 1);
+
+ if (!scb)
+ return;
+
+ if ((scb->scsi_cmd) && (ips_is_passthru(scb->scsi_cmd))) {
+ ips_cleanup_passthru(ha, scb);
+ ha->num_ioctl--;
+ } else {
+ /*
+ * Check to see if this command had too much
+ * data and had to be broke up. If so, queue
+ * the rest of the data and continue.
+ */
+ if ((scb->breakup) || (scb->sg_break)) {
+ struct scatterlist *sg;
+ int i, sg_dma_index, ips_sg_index = 0;
+
+ /* we had a data breakup */
+ scb->data_len = 0;
+
+ sg = scsi_sglist(scb->scsi_cmd);
+
+ /* Spin forward to last dma chunk */
+ sg_dma_index = scb->breakup;
+ for (i = 0; i < scb->breakup; i++)
+ sg = sg_next(sg);
+
+ /* Take care of possible partial on last chunk */
+ ips_fill_scb_sg_single(ha,
+ sg_dma_address(sg),
+ scb, ips_sg_index++,
+ sg_dma_len(sg));
+
+ for (; sg_dma_index < scsi_sg_count(scb->scsi_cmd);
+ sg_dma_index++, sg = sg_next(sg)) {
+ if (ips_fill_scb_sg_single
+ (ha,
+ sg_dma_address(sg),
+ scb, ips_sg_index++,
+ sg_dma_len(sg)) < 0)
+ break;
+ }
+
+ scb->dcdb.transfer_length = scb->data_len;
+ scb->dcdb.cmd_attribute |=
+ ips_command_direction[scb->scsi_cmd->cmnd[0]];
+
+ if (!(scb->dcdb.cmd_attribute & 0x3))
+ scb->dcdb.transfer_length = 0;
+
+ if (scb->data_len >= IPS_MAX_XFER) {
+ scb->dcdb.cmd_attribute |= IPS_TRANSFER64K;
+ scb->dcdb.transfer_length = 0;
+ }
+
+ ret = ips_send_cmd(ha, scb);
+
+ switch (ret) {
+ case IPS_FAILURE:
+ if (scb->scsi_cmd) {
+ scb->scsi_cmd->result = DID_ERROR << 16;
+ scb->scsi_cmd->scsi_done(scb->scsi_cmd);
+ }
+
+ ips_freescb(ha, scb);
+ break;
+ case IPS_SUCCESS_IMM:
+ if (scb->scsi_cmd) {
+ scb->scsi_cmd->result = DID_ERROR << 16;
+ scb->scsi_cmd->scsi_done(scb->scsi_cmd);
+ }
+
+ ips_freescb(ha, scb);
+ break;
+ default:
+ break;
+ } /* end case */
+
+ return;
+ }
+ } /* end if passthru */
+
+ if (scb->bus) {
+ ha->dcdb_active[scb->bus - 1] &= ~(1 << scb->target_id);
+ }
+
+ scb->scsi_cmd->scsi_done(scb->scsi_cmd);
+
+ ips_freescb(ha, scb);
+}
+
+/****************************************************************************/
+/* */
+/* Routine Name: ips_map_status */
+/* */
+/* Routine Description: */
+/* */
+/* Map Controller Error codes to Linux Error Codes */
+/* */
+/****************************************************************************/
+static int
+ips_map_status(ips_ha_t * ha, ips_scb_t * scb, ips_stat_t * sp)
+{
+ int errcode;
+ int device_error;
+ uint32_t transfer_len;
+ IPS_DCDB_TABLE_TAPE *tapeDCDB;
+ IPS_SCSI_INQ_DATA inquiryData;
+
+ METHOD_TRACE("ips_map_status", 1);
+
+ if (scb->bus) {
+ DEBUG_VAR(2,
+ "(%s%d) Physical device error (%d %d %d): %x %x, Sense Key: %x, ASC: %x, ASCQ: %x",
+ ips_name, ha->host_num,
+ scb->scsi_cmd->device->channel,
+ scb->scsi_cmd->device->id, scb->scsi_cmd->device->lun,
+ scb->basic_status, scb->extended_status,
+ scb->extended_status ==
+ IPS_ERR_CKCOND ? scb->dcdb.sense_info[2] & 0xf : 0,
+ scb->extended_status ==
+ IPS_ERR_CKCOND ? scb->dcdb.sense_info[12] : 0,
+ scb->extended_status ==
+ IPS_ERR_CKCOND ? scb->dcdb.sense_info[13] : 0);
+ }
+
+ /* default driver error */
+ errcode = DID_ERROR;
+ device_error = 0;
+
+ switch (scb->basic_status & IPS_GSC_STATUS_MASK) {
+ case IPS_CMD_TIMEOUT:
+ errcode = DID_TIME_OUT;
+ break;
+
+ case IPS_INVAL_OPCO:
+ case IPS_INVAL_CMD_BLK:
+ case IPS_INVAL_PARM_BLK:
+ case IPS_LD_ERROR:
+ case IPS_CMD_CMPLT_WERROR:
+ break;
+
+ case IPS_PHYS_DRV_ERROR:
+ switch (scb->extended_status) {
+ case IPS_ERR_SEL_TO:
+ if (scb->bus)
+ errcode = DID_NO_CONNECT;
+
+ break;
+
+ case IPS_ERR_OU_RUN:
+ if ((scb->cmd.dcdb.op_code == IPS_CMD_EXTENDED_DCDB) ||
+ (scb->cmd.dcdb.op_code ==
+ IPS_CMD_EXTENDED_DCDB_SG)) {
+ tapeDCDB = (IPS_DCDB_TABLE_TAPE *) & scb->dcdb;
+ transfer_len = tapeDCDB->transfer_length;
+ } else {
+ transfer_len =
+ (uint32_t) scb->dcdb.transfer_length;
+ }
+
+ if ((scb->bus) && (transfer_len < scb->data_len)) {
+ /* Underrun - set default to no error */
+ errcode = DID_OK;
+
+ /* Restrict access to physical DASD */
+ if (scb->scsi_cmd->cmnd[0] == INQUIRY) {
+ ips_scmd_buf_read(scb->scsi_cmd,
+ &inquiryData, sizeof (inquiryData));
+ if ((inquiryData.DeviceType & 0x1f) == TYPE_DISK) {
+ errcode = DID_TIME_OUT;
+ break;
+ }
+ }
+ } else
+ errcode = DID_ERROR;
+
+ break;
+
+ case IPS_ERR_RECOVERY:
+ /* don't fail recovered errors */
+ if (scb->bus)
+ errcode = DID_OK;
+
+ break;
+
+ case IPS_ERR_HOST_RESET:
+ case IPS_ERR_DEV_RESET:
+ errcode = DID_RESET;
+ break;
+
+ case IPS_ERR_CKCOND:
+ if (scb->bus) {
+ if ((scb->cmd.dcdb.op_code ==
+ IPS_CMD_EXTENDED_DCDB)
+ || (scb->cmd.dcdb.op_code ==
+ IPS_CMD_EXTENDED_DCDB_SG)) {
+ tapeDCDB =
+ (IPS_DCDB_TABLE_TAPE *) & scb->dcdb;
+ memcpy(scb->scsi_cmd->sense_buffer,
+ tapeDCDB->sense_info,
+ SCSI_SENSE_BUFFERSIZE);
+ } else {
+ memcpy(scb->scsi_cmd->sense_buffer,
+ scb->dcdb.sense_info,
+ SCSI_SENSE_BUFFERSIZE);
+ }
+ device_error = 2; /* check condition */
+ }
+
+ errcode = DID_OK;
+
+ break;
+
+ default:
+ errcode = DID_ERROR;
+ break;
+
+ } /* end switch */
+ } /* end switch */
+
+ scb->scsi_cmd->result = device_error | (errcode << 16);
+
+ return (1);
+}
+
+/****************************************************************************/
+/* */
+/* Routine Name: ips_send_wait */
+/* */
+/* Routine Description: */
+/* */
+/* Send a command to the controller and wait for it to return */
+/* */
+/* The FFDC Time Stamp use this function for the callback, but doesn't */
+/* actually need to wait. */
+/****************************************************************************/
+static int
+ips_send_wait(ips_ha_t * ha, ips_scb_t * scb, int timeout, int intr)
+{
+ int ret;
+
+ METHOD_TRACE("ips_send_wait", 1);
+
+ if (intr != IPS_FFDC) { /* Won't be Waiting if this is a Time Stamp */
+ ha->waitflag = TRUE;
+ ha->cmd_in_progress = scb->cdb[0];
+ }
+ scb->callback = ipsintr_blocking;
+ ret = ips_send_cmd(ha, scb);
+
+ if ((ret == IPS_FAILURE) || (ret == IPS_SUCCESS_IMM))
+ return (ret);
+
+ if (intr != IPS_FFDC) /* Don't Wait around if this is a Time Stamp */
+ ret = ips_wait(ha, timeout, intr);
+
+ return (ret);
+}
+
+/****************************************************************************/
+/* */
+/* Routine Name: ips_scmd_buf_write */
+/* */
+/* Routine Description: */
+/* Write data to struct scsi_cmnd request_buffer at proper offsets */
+/****************************************************************************/
+static void
+ips_scmd_buf_write(struct scsi_cmnd *scmd, void *data, unsigned int count)
+{
+ unsigned long flags;
+
+ local_irq_save(flags);
+ scsi_sg_copy_from_buffer(scmd, data, count);
+ local_irq_restore(flags);
+}
+
+/****************************************************************************/
+/* */
+/* Routine Name: ips_scmd_buf_read */
+/* */
+/* Routine Description: */
+/* Copy data from a struct scsi_cmnd to a new, linear buffer */
+/****************************************************************************/
+static void
+ips_scmd_buf_read(struct scsi_cmnd *scmd, void *data, unsigned int count)
+{
+ unsigned long flags;
+
+ local_irq_save(flags);
+ scsi_sg_copy_to_buffer(scmd, data, count);
+ local_irq_restore(flags);
+}
+
+/****************************************************************************/
+/* */
+/* Routine Name: ips_send_cmd */
+/* */
+/* Routine Description: */
+/* */
+/* Map SCSI commands to ServeRAID commands for logical drives */
+/* */
+/****************************************************************************/
+static int
+ips_send_cmd(ips_ha_t * ha, ips_scb_t * scb)
+{
+ int ret;
+ char *sp;
+ int device_error;
+ IPS_DCDB_TABLE_TAPE *tapeDCDB;
+ int TimeOut;
+
+ METHOD_TRACE("ips_send_cmd", 1);
+
+ ret = IPS_SUCCESS;
+
+ if (!scb->scsi_cmd) {
+ /* internal command */
+
+ if (scb->bus > 0) {
+ /* Controller commands can't be issued */
+ /* to real devices -- fail them */
+ if ((ha->waitflag == TRUE) &&
+ (ha->cmd_in_progress == scb->cdb[0])) {
+ ha->waitflag = FALSE;
+ }
+
+ return (1);
+ }
+ } else if ((scb->bus == 0) && (!ips_is_passthru(scb->scsi_cmd))) {
+ /* command to logical bus -- interpret */
+ ret = IPS_SUCCESS_IMM;
+
+ switch (scb->scsi_cmd->cmnd[0]) {
+ case ALLOW_MEDIUM_REMOVAL:
+ case REZERO_UNIT:
+ case ERASE:
+ case WRITE_FILEMARKS:
+ case SPACE:
+ scb->scsi_cmd->result = DID_ERROR << 16;
+ break;
+
+ case START_STOP:
+ scb->scsi_cmd->result = DID_OK << 16;
+
+ case TEST_UNIT_READY:
+ case INQUIRY:
+ if (scb->target_id == IPS_ADAPTER_ID) {
+ /*
+ * Either we have a TUR
+ * or we have a SCSI inquiry
+ */
+ if (scb->scsi_cmd->cmnd[0] == TEST_UNIT_READY)
+ scb->scsi_cmd->result = DID_OK << 16;
+
+ if (scb->scsi_cmd->cmnd[0] == INQUIRY) {
+ IPS_SCSI_INQ_DATA inquiry;
+
+ memset(&inquiry, 0,
+ sizeof (IPS_SCSI_INQ_DATA));
+
+ inquiry.DeviceType =
+ IPS_SCSI_INQ_TYPE_PROCESSOR;
+ inquiry.DeviceTypeQualifier =
+ IPS_SCSI_INQ_LU_CONNECTED;
+ inquiry.Version = IPS_SCSI_INQ_REV2;
+ inquiry.ResponseDataFormat =
+ IPS_SCSI_INQ_RD_REV2;
+ inquiry.AdditionalLength = 31;
+ inquiry.Flags[0] =
+ IPS_SCSI_INQ_Address16;
+ inquiry.Flags[1] =
+ IPS_SCSI_INQ_WBus16 |
+ IPS_SCSI_INQ_Sync;
+ strncpy(inquiry.VendorId, "IBM ",
+ 8);
+ strncpy(inquiry.ProductId,
+ "SERVERAID ", 16);
+ strncpy(inquiry.ProductRevisionLevel,
+ "1.00", 4);
+
+ ips_scmd_buf_write(scb->scsi_cmd,
+ &inquiry,
+ sizeof (inquiry));
+
+ scb->scsi_cmd->result = DID_OK << 16;
+ }
+ } else {
+ scb->cmd.logical_info.op_code = IPS_CMD_GET_LD_INFO;
+ scb->cmd.logical_info.command_id = IPS_COMMAND_ID(ha, scb);
+ scb->cmd.logical_info.reserved = 0;
+ scb->cmd.logical_info.reserved2 = 0;
+ scb->data_len = sizeof (IPS_LD_INFO);
+ scb->data_busaddr = ha->logical_drive_info_dma_addr;
+ scb->flags = 0;
+ scb->cmd.logical_info.buffer_addr = scb->data_busaddr;
+ ret = IPS_SUCCESS;
+ }
+
+ break;
+
+ case REQUEST_SENSE:
+ ips_reqsen(ha, scb);
+ scb->scsi_cmd->result = DID_OK << 16;
+ break;
+
+ case READ_6:
+ case WRITE_6:
+ if (!scb->sg_len) {
+ scb->cmd.basic_io.op_code =
+ (scb->scsi_cmd->cmnd[0] ==
+ READ_6) ? IPS_CMD_READ : IPS_CMD_WRITE;
+ scb->cmd.basic_io.enhanced_sg = 0;
+ scb->cmd.basic_io.sg_addr =
+ cpu_to_le32(scb->data_busaddr);
+ } else {
+ scb->cmd.basic_io.op_code =
+ (scb->scsi_cmd->cmnd[0] ==
+ READ_6) ? IPS_CMD_READ_SG :
+ IPS_CMD_WRITE_SG;
+ scb->cmd.basic_io.enhanced_sg =
+ IPS_USE_ENH_SGLIST(ha) ? 0xFF : 0;
+ scb->cmd.basic_io.sg_addr =
+ cpu_to_le32(scb->sg_busaddr);
+ }
+
+ scb->cmd.basic_io.segment_4G = 0;
+ scb->cmd.basic_io.command_id = IPS_COMMAND_ID(ha, scb);
+ scb->cmd.basic_io.log_drv = scb->target_id;
+ scb->cmd.basic_io.sg_count = scb->sg_len;
+
+ if (scb->cmd.basic_io.lba)
+ le32_add_cpu(&scb->cmd.basic_io.lba,
+ le16_to_cpu(scb->cmd.basic_io.
+ sector_count));
+ else
+ scb->cmd.basic_io.lba =
+ (((scb->scsi_cmd->
+ cmnd[1] & 0x1f) << 16) | (scb->scsi_cmd->
+ cmnd[2] << 8) |
+ (scb->scsi_cmd->cmnd[3]));
+
+ scb->cmd.basic_io.sector_count =
+ cpu_to_le16(scb->data_len / IPS_BLKSIZE);
+
+ if (le16_to_cpu(scb->cmd.basic_io.sector_count) == 0)
+ scb->cmd.basic_io.sector_count =
+ cpu_to_le16(256);
+
+ ret = IPS_SUCCESS;
+ break;
+
+ case READ_10:
+ case WRITE_10:
+ if (!scb->sg_len) {
+ scb->cmd.basic_io.op_code =
+ (scb->scsi_cmd->cmnd[0] ==
+ READ_10) ? IPS_CMD_READ : IPS_CMD_WRITE;
+ scb->cmd.basic_io.enhanced_sg = 0;
+ scb->cmd.basic_io.sg_addr =
+ cpu_to_le32(scb->data_busaddr);
+ } else {
+ scb->cmd.basic_io.op_code =
+ (scb->scsi_cmd->cmnd[0] ==
+ READ_10) ? IPS_CMD_READ_SG :
+ IPS_CMD_WRITE_SG;
+ scb->cmd.basic_io.enhanced_sg =
+ IPS_USE_ENH_SGLIST(ha) ? 0xFF : 0;
+ scb->cmd.basic_io.sg_addr =
+ cpu_to_le32(scb->sg_busaddr);
+ }
+
+ scb->cmd.basic_io.segment_4G = 0;
+ scb->cmd.basic_io.command_id = IPS_COMMAND_ID(ha, scb);
+ scb->cmd.basic_io.log_drv = scb->target_id;
+ scb->cmd.basic_io.sg_count = scb->sg_len;
+
+ if (scb->cmd.basic_io.lba)
+ le32_add_cpu(&scb->cmd.basic_io.lba,
+ le16_to_cpu(scb->cmd.basic_io.
+ sector_count));
+ else
+ scb->cmd.basic_io.lba =
+ ((scb->scsi_cmd->cmnd[2] << 24) | (scb->
+ scsi_cmd->
+ cmnd[3]
+ << 16) |
+ (scb->scsi_cmd->cmnd[4] << 8) | scb->
+ scsi_cmd->cmnd[5]);
+
+ scb->cmd.basic_io.sector_count =
+ cpu_to_le16(scb->data_len / IPS_BLKSIZE);
+
+ if (cpu_to_le16(scb->cmd.basic_io.sector_count) == 0) {
+ /*
+ * This is a null condition
+ * we don't have to do anything
+ * so just return
+ */
+ scb->scsi_cmd->result = DID_OK << 16;
+ } else
+ ret = IPS_SUCCESS;
+
+ break;
+
+ case RESERVE:
+ case RELEASE:
+ scb->scsi_cmd->result = DID_OK << 16;
+ break;
+
+ case MODE_SENSE:
+ scb->cmd.basic_io.op_code = IPS_CMD_ENQUIRY;
+ scb->cmd.basic_io.command_id = IPS_COMMAND_ID(ha, scb);
+ scb->cmd.basic_io.segment_4G = 0;
+ scb->cmd.basic_io.enhanced_sg = 0;
+ scb->data_len = sizeof (*ha->enq);
+ scb->cmd.basic_io.sg_addr = ha->enq_busaddr;
+ ret = IPS_SUCCESS;
+ break;
+
+ case READ_CAPACITY:
+ scb->cmd.logical_info.op_code = IPS_CMD_GET_LD_INFO;
+ scb->cmd.logical_info.command_id = IPS_COMMAND_ID(ha, scb);
+ scb->cmd.logical_info.reserved = 0;
+ scb->cmd.logical_info.reserved2 = 0;
+ scb->cmd.logical_info.reserved3 = 0;
+ scb->data_len = sizeof (IPS_LD_INFO);
+ scb->data_busaddr = ha->logical_drive_info_dma_addr;
+ scb->flags = 0;
+ scb->cmd.logical_info.buffer_addr = scb->data_busaddr;
+ ret = IPS_SUCCESS;
+ break;
+
+ case SEND_DIAGNOSTIC:
+ case REASSIGN_BLOCKS:
+ case FORMAT_UNIT:
+ case SEEK_10:
+ case VERIFY:
+ case READ_DEFECT_DATA:
+ case READ_BUFFER:
+ case WRITE_BUFFER:
+ scb->scsi_cmd->result = DID_OK << 16;
+ break;
+
+ default:
+ /* Set the Return Info to appear like the Command was */
+ /* attempted, a Check Condition occurred, and Sense */
+ /* Data indicating an Invalid CDB OpCode is returned. */
+ sp = (char *) scb->scsi_cmd->sense_buffer;
+
+ sp[0] = 0x70; /* Error Code */
+ sp[2] = ILLEGAL_REQUEST; /* Sense Key 5 Illegal Req. */
+ sp[7] = 0x0A; /* Additional Sense Length */
+ sp[12] = 0x20; /* ASC = Invalid OpCode */
+ sp[13] = 0x00; /* ASCQ */
+
+ device_error = 2; /* Indicate Check Condition */
+ scb->scsi_cmd->result = device_error | (DID_OK << 16);
+ break;
+ } /* end switch */
+ }
+ /* end if */
+ if (ret == IPS_SUCCESS_IMM)
+ return (ret);
+
+ /* setup DCDB */
+ if (scb->bus > 0) {
+
+ /* If we already know the Device is Not there, no need to attempt a Command */
+ /* This also protects an NT FailOver Controller from getting CDB's sent to it */
+ if (ha->conf->dev[scb->bus - 1][scb->target_id].ucState == 0) {
+ scb->scsi_cmd->result = DID_NO_CONNECT << 16;
+ return (IPS_SUCCESS_IMM);
+ }
+
+ ha->dcdb_active[scb->bus - 1] |= (1 << scb->target_id);
+ scb->cmd.dcdb.command_id = IPS_COMMAND_ID(ha, scb);
+ scb->cmd.dcdb.dcdb_address = cpu_to_le32(scb->scb_busaddr +
+ (unsigned long) &scb->
+ dcdb -
+ (unsigned long) scb);
+ scb->cmd.dcdb.reserved = 0;
+ scb->cmd.dcdb.reserved2 = 0;
+ scb->cmd.dcdb.reserved3 = 0;
+ scb->cmd.dcdb.segment_4G = 0;
+ scb->cmd.dcdb.enhanced_sg = 0;
+
+ TimeOut = scb->scsi_cmd->request->timeout;
+
+ if (ha->subsys->param[4] & 0x00100000) { /* If NEW Tape DCDB is Supported */
+ if (!scb->sg_len) {
+ scb->cmd.dcdb.op_code = IPS_CMD_EXTENDED_DCDB;
+ } else {
+ scb->cmd.dcdb.op_code =
+ IPS_CMD_EXTENDED_DCDB_SG;
+ scb->cmd.dcdb.enhanced_sg =
+ IPS_USE_ENH_SGLIST(ha) ? 0xFF : 0;
+ }
+
+ tapeDCDB = (IPS_DCDB_TABLE_TAPE *) & scb->dcdb; /* Use Same Data Area as Old DCDB Struct */
+ tapeDCDB->device_address =
+ ((scb->bus - 1) << 4) | scb->target_id;
+ tapeDCDB->cmd_attribute |= IPS_DISCONNECT_ALLOWED;
+ tapeDCDB->cmd_attribute &= ~IPS_TRANSFER64K; /* Always Turn OFF 64K Size Flag */
+
+ if (TimeOut) {
+ if (TimeOut < (10 * HZ))
+ tapeDCDB->cmd_attribute |= IPS_TIMEOUT10; /* TimeOut is 10 Seconds */
+ else if (TimeOut < (60 * HZ))
+ tapeDCDB->cmd_attribute |= IPS_TIMEOUT60; /* TimeOut is 60 Seconds */
+ else if (TimeOut < (1200 * HZ))
+ tapeDCDB->cmd_attribute |= IPS_TIMEOUT20M; /* TimeOut is 20 Minutes */
+ }
+
+ tapeDCDB->cdb_length = scb->scsi_cmd->cmd_len;
+ tapeDCDB->reserved_for_LUN = 0;
+ tapeDCDB->transfer_length = scb->data_len;
+ if (scb->cmd.dcdb.op_code == IPS_CMD_EXTENDED_DCDB_SG)
+ tapeDCDB->buffer_pointer =
+ cpu_to_le32(scb->sg_busaddr);
+ else
+ tapeDCDB->buffer_pointer =
+ cpu_to_le32(scb->data_busaddr);
+ tapeDCDB->sg_count = scb->sg_len;
+ tapeDCDB->sense_length = sizeof (tapeDCDB->sense_info);
+ tapeDCDB->scsi_status = 0;
+ tapeDCDB->reserved = 0;
+ memcpy(tapeDCDB->scsi_cdb, scb->scsi_cmd->cmnd,
+ scb->scsi_cmd->cmd_len);
+ } else {
+ if (!scb->sg_len) {
+ scb->cmd.dcdb.op_code = IPS_CMD_DCDB;
+ } else {
+ scb->cmd.dcdb.op_code = IPS_CMD_DCDB_SG;
+ scb->cmd.dcdb.enhanced_sg =
+ IPS_USE_ENH_SGLIST(ha) ? 0xFF : 0;
+ }
+
+ scb->dcdb.device_address =
+ ((scb->bus - 1) << 4) | scb->target_id;
+ scb->dcdb.cmd_attribute |= IPS_DISCONNECT_ALLOWED;
+
+ if (TimeOut) {
+ if (TimeOut < (10 * HZ))
+ scb->dcdb.cmd_attribute |= IPS_TIMEOUT10; /* TimeOut is 10 Seconds */
+ else if (TimeOut < (60 * HZ))
+ scb->dcdb.cmd_attribute |= IPS_TIMEOUT60; /* TimeOut is 60 Seconds */
+ else if (TimeOut < (1200 * HZ))
+ scb->dcdb.cmd_attribute |= IPS_TIMEOUT20M; /* TimeOut is 20 Minutes */
+ }
+
+ scb->dcdb.transfer_length = scb->data_len;
+ if (scb->dcdb.cmd_attribute & IPS_TRANSFER64K)
+ scb->dcdb.transfer_length = 0;
+ if (scb->cmd.dcdb.op_code == IPS_CMD_DCDB_SG)
+ scb->dcdb.buffer_pointer =
+ cpu_to_le32(scb->sg_busaddr);
+ else
+ scb->dcdb.buffer_pointer =
+ cpu_to_le32(scb->data_busaddr);
+ scb->dcdb.cdb_length = scb->scsi_cmd->cmd_len;
+ scb->dcdb.sense_length = sizeof (scb->dcdb.sense_info);
+ scb->dcdb.sg_count = scb->sg_len;
+ scb->dcdb.reserved = 0;
+ memcpy(scb->dcdb.scsi_cdb, scb->scsi_cmd->cmnd,
+ scb->scsi_cmd->cmd_len);
+ scb->dcdb.scsi_status = 0;
+ scb->dcdb.reserved2[0] = 0;
+ scb->dcdb.reserved2[1] = 0;
+ scb->dcdb.reserved2[2] = 0;
+ }
+ }
+
+ return ((*ha->func.issue) (ha, scb));
+}
+
+/****************************************************************************/
+/* */
+/* Routine Name: ips_chk_status */
+/* */
+/* Routine Description: */
+/* */
+/* Check the status of commands to logical drives */
+/* Assumed to be called with the HA lock */
+/****************************************************************************/
+static void
+ips_chkstatus(ips_ha_t * ha, IPS_STATUS * pstatus)
+{
+ ips_scb_t *scb;
+ ips_stat_t *sp;
+ uint8_t basic_status;
+ uint8_t ext_status;
+ int errcode;
+ IPS_SCSI_INQ_DATA inquiryData;
+
+ METHOD_TRACE("ips_chkstatus", 1);
+
+ scb = &ha->scbs[pstatus->fields.command_id];
+ scb->basic_status = basic_status =
+ pstatus->fields.basic_status & IPS_BASIC_STATUS_MASK;
+ scb->extended_status = ext_status = pstatus->fields.extended_status;
+
+ sp = &ha->sp;
+ sp->residue_len = 0;
+ sp->scb_addr = (void *) scb;
+
+ /* Remove the item from the active queue */
+ ips_removeq_scb(&ha->scb_activelist, scb);
+
+ if (!scb->scsi_cmd)
+ /* internal commands are handled in do_ipsintr */
+ return;
+
+ DEBUG_VAR(2, "(%s%d) ips_chkstatus: cmd 0x%X id %d (%d %d %d)",
+ ips_name,
+ ha->host_num,
+ scb->cdb[0],
+ scb->cmd.basic_io.command_id,
+ scb->bus, scb->target_id, scb->lun);
+
+ if ((scb->scsi_cmd) && (ips_is_passthru(scb->scsi_cmd)))
+ /* passthru - just returns the raw result */
+ return;
+
+ errcode = DID_OK;
+
+ if (((basic_status & IPS_GSC_STATUS_MASK) == IPS_CMD_SUCCESS) ||
+ ((basic_status & IPS_GSC_STATUS_MASK) == IPS_CMD_RECOVERED_ERROR)) {
+
+ if (scb->bus == 0) {
+ if ((basic_status & IPS_GSC_STATUS_MASK) ==
+ IPS_CMD_RECOVERED_ERROR) {
+ DEBUG_VAR(1,
+ "(%s%d) Recovered Logical Drive Error OpCode: %x, BSB: %x, ESB: %x",
+ ips_name, ha->host_num,
+ scb->cmd.basic_io.op_code,
+ basic_status, ext_status);
+ }
+
+ switch (scb->scsi_cmd->cmnd[0]) {
+ case ALLOW_MEDIUM_REMOVAL:
+ case REZERO_UNIT:
+ case ERASE:
+ case WRITE_FILEMARKS:
+ case SPACE:
+ errcode = DID_ERROR;
+ break;
+
+ case START_STOP:
+ break;
+
+ case TEST_UNIT_READY:
+ if (!ips_online(ha, scb)) {
+ errcode = DID_TIME_OUT;
+ }
+ break;
+
+ case INQUIRY:
+ if (ips_online(ha, scb)) {
+ ips_inquiry(ha, scb);
+ } else {
+ errcode = DID_TIME_OUT;
+ }
+ break;
+
+ case REQUEST_SENSE:
+ ips_reqsen(ha, scb);
+ break;
+
+ case READ_6:
+ case WRITE_6:
+ case READ_10:
+ case WRITE_10:
+ case RESERVE:
+ case RELEASE:
+ break;
+
+ case MODE_SENSE:
+ if (!ips_online(ha, scb)
+ || !ips_msense(ha, scb)) {
+ errcode = DID_ERROR;
+ }
+ break;
+
+ case READ_CAPACITY:
+ if (ips_online(ha, scb))
+ ips_rdcap(ha, scb);
+ else {
+ errcode = DID_TIME_OUT;
+ }
+ break;
+
+ case SEND_DIAGNOSTIC:
+ case REASSIGN_BLOCKS:
+ break;
+
+ case FORMAT_UNIT:
+ errcode = DID_ERROR;
+ break;
+
+ case SEEK_10:
+ case VERIFY:
+ case READ_DEFECT_DATA:
+ case READ_BUFFER:
+ case WRITE_BUFFER:
+ break;
+
+ default:
+ errcode = DID_ERROR;
+ } /* end switch */
+
+ scb->scsi_cmd->result = errcode << 16;
+ } else { /* bus == 0 */
+ /* restrict access to physical drives */
+ if (scb->scsi_cmd->cmnd[0] == INQUIRY) {
+ ips_scmd_buf_read(scb->scsi_cmd,
+ &inquiryData, sizeof (inquiryData));
+ if ((inquiryData.DeviceType & 0x1f) == TYPE_DISK)
+ scb->scsi_cmd->result = DID_TIME_OUT << 16;
+ }
+ } /* else */
+ } else { /* recovered error / success */
+ if (scb->bus == 0) {
+ DEBUG_VAR(1,
+ "(%s%d) Unrecovered Logical Drive Error OpCode: %x, BSB: %x, ESB: %x",
+ ips_name, ha->host_num,
+ scb->cmd.basic_io.op_code, basic_status,
+ ext_status);
+ }
+
+ ips_map_status(ha, scb, sp);
+ } /* else */
+}
+
+/****************************************************************************/
+/* */
+/* Routine Name: ips_online */
+/* */
+/* Routine Description: */
+/* */
+/* Determine if a logical drive is online */
+/* */
+/****************************************************************************/
+static int
+ips_online(ips_ha_t * ha, ips_scb_t * scb)
+{
+ METHOD_TRACE("ips_online", 1);
+
+ if (scb->target_id >= IPS_MAX_LD)
+ return (0);
+
+ if ((scb->basic_status & IPS_GSC_STATUS_MASK) > 1) {
+ memset(ha->logical_drive_info, 0, sizeof (IPS_LD_INFO));
+ return (0);
+ }
+
+ if (ha->logical_drive_info->drive_info[scb->target_id].state !=
+ IPS_LD_OFFLINE
+ && ha->logical_drive_info->drive_info[scb->target_id].state !=
+ IPS_LD_FREE
+ && ha->logical_drive_info->drive_info[scb->target_id].state !=
+ IPS_LD_CRS
+ && ha->logical_drive_info->drive_info[scb->target_id].state !=
+ IPS_LD_SYS)
+ return (1);
+ else
+ return (0);
+}
+
+/****************************************************************************/
+/* */
+/* Routine Name: ips_inquiry */
+/* */
+/* Routine Description: */
+/* */
+/* Simulate an inquiry command to a logical drive */
+/* */
+/****************************************************************************/
+static int
+ips_inquiry(ips_ha_t * ha, ips_scb_t * scb)
+{
+ IPS_SCSI_INQ_DATA inquiry;
+
+ METHOD_TRACE("ips_inquiry", 1);
+
+ memset(&inquiry, 0, sizeof (IPS_SCSI_INQ_DATA));
+
+ inquiry.DeviceType = IPS_SCSI_INQ_TYPE_DASD;
+ inquiry.DeviceTypeQualifier = IPS_SCSI_INQ_LU_CONNECTED;
+ inquiry.Version = IPS_SCSI_INQ_REV2;
+ inquiry.ResponseDataFormat = IPS_SCSI_INQ_RD_REV2;
+ inquiry.AdditionalLength = 31;
+ inquiry.Flags[0] = IPS_SCSI_INQ_Address16;
+ inquiry.Flags[1] =
+ IPS_SCSI_INQ_WBus16 | IPS_SCSI_INQ_Sync | IPS_SCSI_INQ_CmdQue;
+ strncpy(inquiry.VendorId, "IBM ", 8);
+ strncpy(inquiry.ProductId, "SERVERAID ", 16);
+ strncpy(inquiry.ProductRevisionLevel, "1.00", 4);
+
+ ips_scmd_buf_write(scb->scsi_cmd, &inquiry, sizeof (inquiry));
+
+ return (1);
+}
+
+/****************************************************************************/
+/* */
+/* Routine Name: ips_rdcap */
+/* */
+/* Routine Description: */
+/* */
+/* Simulate a read capacity command to a logical drive */
+/* */
+/****************************************************************************/
+static int
+ips_rdcap(ips_ha_t * ha, ips_scb_t * scb)
+{
+ IPS_SCSI_CAPACITY cap;
+
+ METHOD_TRACE("ips_rdcap", 1);
+
+ if (scsi_bufflen(scb->scsi_cmd) < 8)
+ return (0);
+
+ cap.lba =
+ cpu_to_be32(le32_to_cpu
+ (ha->logical_drive_info->
+ drive_info[scb->target_id].sector_count) - 1);
+ cap.len = cpu_to_be32((uint32_t) IPS_BLKSIZE);
+
+ ips_scmd_buf_write(scb->scsi_cmd, &cap, sizeof (cap));
+
+ return (1);
+}
+
+/****************************************************************************/
+/* */
+/* Routine Name: ips_msense */
+/* */
+/* Routine Description: */
+/* */
+/* Simulate a mode sense command to a logical drive */
+/* */
+/****************************************************************************/
+static int
+ips_msense(ips_ha_t * ha, ips_scb_t * scb)
+{
+ uint16_t heads;
+ uint16_t sectors;
+ uint32_t cylinders;
+ IPS_SCSI_MODE_PAGE_DATA mdata;
+
+ METHOD_TRACE("ips_msense", 1);
+
+ if (le32_to_cpu(ha->enq->ulDriveSize[scb->target_id]) > 0x400000 &&
+ (ha->enq->ucMiscFlag & 0x8) == 0) {
+ heads = IPS_NORM_HEADS;
+ sectors = IPS_NORM_SECTORS;
+ } else {
+ heads = IPS_COMP_HEADS;
+ sectors = IPS_COMP_SECTORS;
+ }
+
+ cylinders =
+ (le32_to_cpu(ha->enq->ulDriveSize[scb->target_id]) -
+ 1) / (heads * sectors);
+
+ memset(&mdata, 0, sizeof (IPS_SCSI_MODE_PAGE_DATA));
+
+ mdata.hdr.BlockDescLength = 8;
+
+ switch (scb->scsi_cmd->cmnd[2] & 0x3f) {
+ case 0x03: /* page 3 */
+ mdata.pdata.pg3.PageCode = 3;
+ mdata.pdata.pg3.PageLength = sizeof (IPS_SCSI_MODE_PAGE3);
+ mdata.hdr.DataLength =
+ 3 + mdata.hdr.BlockDescLength + mdata.pdata.pg3.PageLength;
+ mdata.pdata.pg3.TracksPerZone = 0;
+ mdata.pdata.pg3.AltSectorsPerZone = 0;
+ mdata.pdata.pg3.AltTracksPerZone = 0;
+ mdata.pdata.pg3.AltTracksPerVolume = 0;
+ mdata.pdata.pg3.SectorsPerTrack = cpu_to_be16(sectors);
+ mdata.pdata.pg3.BytesPerSector = cpu_to_be16(IPS_BLKSIZE);
+ mdata.pdata.pg3.Interleave = cpu_to_be16(1);
+ mdata.pdata.pg3.TrackSkew = 0;
+ mdata.pdata.pg3.CylinderSkew = 0;
+ mdata.pdata.pg3.flags = IPS_SCSI_MP3_SoftSector;
+ break;
+
+ case 0x4:
+ mdata.pdata.pg4.PageCode = 4;
+ mdata.pdata.pg4.PageLength = sizeof (IPS_SCSI_MODE_PAGE4);
+ mdata.hdr.DataLength =
+ 3 + mdata.hdr.BlockDescLength + mdata.pdata.pg4.PageLength;
+ mdata.pdata.pg4.CylindersHigh =
+ cpu_to_be16((cylinders >> 8) & 0xFFFF);
+ mdata.pdata.pg4.CylindersLow = (cylinders & 0xFF);
+ mdata.pdata.pg4.Heads = heads;
+ mdata.pdata.pg4.WritePrecompHigh = 0;
+ mdata.pdata.pg4.WritePrecompLow = 0;
+ mdata.pdata.pg4.ReducedWriteCurrentHigh = 0;
+ mdata.pdata.pg4.ReducedWriteCurrentLow = 0;
+ mdata.pdata.pg4.StepRate = cpu_to_be16(1);
+ mdata.pdata.pg4.LandingZoneHigh = 0;
+ mdata.pdata.pg4.LandingZoneLow = 0;
+ mdata.pdata.pg4.flags = 0;
+ mdata.pdata.pg4.RotationalOffset = 0;
+ mdata.pdata.pg4.MediumRotationRate = 0;
+ break;
+ case 0x8:
+ mdata.pdata.pg8.PageCode = 8;
+ mdata.pdata.pg8.PageLength = sizeof (IPS_SCSI_MODE_PAGE8);
+ mdata.hdr.DataLength =
+ 3 + mdata.hdr.BlockDescLength + mdata.pdata.pg8.PageLength;
+ /* everything else is left set to 0 */
+ break;
+
+ default:
+ return (0);
+ } /* end switch */
+
+ ips_scmd_buf_write(scb->scsi_cmd, &mdata, sizeof (mdata));
+
+ return (1);
+}
+
+/****************************************************************************/
+/* */
+/* Routine Name: ips_reqsen */
+/* */
+/* Routine Description: */
+/* */
+/* Simulate a request sense command to a logical drive */
+/* */
+/****************************************************************************/
+static int
+ips_reqsen(ips_ha_t * ha, ips_scb_t * scb)
+{
+ IPS_SCSI_REQSEN reqsen;
+
+ METHOD_TRACE("ips_reqsen", 1);
+
+ memset(&reqsen, 0, sizeof (IPS_SCSI_REQSEN));
+
+ reqsen.ResponseCode =
+ IPS_SCSI_REQSEN_VALID | IPS_SCSI_REQSEN_CURRENT_ERR;
+ reqsen.AdditionalLength = 10;
+ reqsen.AdditionalSenseCode = IPS_SCSI_REQSEN_NO_SENSE;
+ reqsen.AdditionalSenseCodeQual = IPS_SCSI_REQSEN_NO_SENSE;
+
+ ips_scmd_buf_write(scb->scsi_cmd, &reqsen, sizeof (reqsen));
+
+ return (1);
+}
+
+/****************************************************************************/
+/* */
+/* Routine Name: ips_free */
+/* */
+/* Routine Description: */
+/* */
+/* Free any allocated space for this controller */
+/* */
+/****************************************************************************/
+static void
+ips_free(ips_ha_t * ha)
+{
+
+ METHOD_TRACE("ips_free", 1);
+
+ if (ha) {
+ if (ha->enq) {
+ pci_free_consistent(ha->pcidev, sizeof(IPS_ENQ),
+ ha->enq, ha->enq_busaddr);
+ ha->enq = NULL;
+ }
+
+ kfree(ha->conf);
+ ha->conf = NULL;
+
+ if (ha->adapt) {
+ pci_free_consistent(ha->pcidev,
+ sizeof (IPS_ADAPTER) +
+ sizeof (IPS_IO_CMD), ha->adapt,
+ ha->adapt->hw_status_start);
+ ha->adapt = NULL;
+ }
+
+ if (ha->logical_drive_info) {
+ pci_free_consistent(ha->pcidev,
+ sizeof (IPS_LD_INFO),
+ ha->logical_drive_info,
+ ha->logical_drive_info_dma_addr);
+ ha->logical_drive_info = NULL;
+ }
+
+ kfree(ha->nvram);
+ ha->nvram = NULL;
+
+ kfree(ha->subsys);
+ ha->subsys = NULL;
+
+ if (ha->ioctl_data) {
+ pci_free_consistent(ha->pcidev, ha->ioctl_len,
+ ha->ioctl_data, ha->ioctl_busaddr);
+ ha->ioctl_data = NULL;
+ ha->ioctl_datasize = 0;
+ ha->ioctl_len = 0;
+ }
+ ips_deallocatescbs(ha, ha->max_cmds);
+
+ /* free memory mapped (if applicable) */
+ if (ha->mem_ptr) {
+ iounmap(ha->ioremap_ptr);
+ ha->ioremap_ptr = NULL;
+ ha->mem_ptr = NULL;
+ }
+
+ ha->mem_addr = 0;
+
+ }
+}
+
+/****************************************************************************/
+/* */
+/* Routine Name: ips_deallocatescbs */
+/* */
+/* Routine Description: */
+/* */
+/* Free the command blocks */
+/* */
+/****************************************************************************/
+static int
+ips_deallocatescbs(ips_ha_t * ha, int cmds)
+{
+ if (ha->scbs) {
+ pci_free_consistent(ha->pcidev,
+ IPS_SGLIST_SIZE(ha) * IPS_MAX_SG * cmds,
+ ha->scbs->sg_list.list,
+ ha->scbs->sg_busaddr);
+ pci_free_consistent(ha->pcidev, sizeof (ips_scb_t) * cmds,
+ ha->scbs, ha->scbs->scb_busaddr);
+ ha->scbs = NULL;
+ } /* end if */
+ return 1;
+}
+
+/****************************************************************************/
+/* */
+/* Routine Name: ips_allocatescbs */
+/* */
+/* Routine Description: */
+/* */
+/* Allocate the command blocks */
+/* */
+/****************************************************************************/
+static int
+ips_allocatescbs(ips_ha_t * ha)
+{
+ ips_scb_t *scb_p;
+ IPS_SG_LIST ips_sg;
+ int i;
+ dma_addr_t command_dma, sg_dma;
+
+ METHOD_TRACE("ips_allocatescbs", 1);
+
+ /* Allocate memory for the SCBs */
+ ha->scbs =
+ pci_alloc_consistent(ha->pcidev, ha->max_cmds * sizeof (ips_scb_t),
+ &command_dma);
+ if (ha->scbs == NULL)
+ return 0;
+ ips_sg.list =
+ pci_alloc_consistent(ha->pcidev,
+ IPS_SGLIST_SIZE(ha) * IPS_MAX_SG *
+ ha->max_cmds, &sg_dma);
+ if (ips_sg.list == NULL) {
+ pci_free_consistent(ha->pcidev,
+ ha->max_cmds * sizeof (ips_scb_t), ha->scbs,
+ command_dma);
+ return 0;
+ }
+
+ memset(ha->scbs, 0, ha->max_cmds * sizeof (ips_scb_t));
+
+ for (i = 0; i < ha->max_cmds; i++) {
+ scb_p = &ha->scbs[i];
+ scb_p->scb_busaddr = command_dma + sizeof (ips_scb_t) * i;
+ /* set up S/G list */
+ if (IPS_USE_ENH_SGLIST(ha)) {
+ scb_p->sg_list.enh_list =
+ ips_sg.enh_list + i * IPS_MAX_SG;
+ scb_p->sg_busaddr =
+ sg_dma + IPS_SGLIST_SIZE(ha) * IPS_MAX_SG * i;
+ } else {
+ scb_p->sg_list.std_list =
+ ips_sg.std_list + i * IPS_MAX_SG;
+ scb_p->sg_busaddr =
+ sg_dma + IPS_SGLIST_SIZE(ha) * IPS_MAX_SG * i;
+ }
+
+ /* add to the free list */
+ if (i < ha->max_cmds - 1) {
+ scb_p->q_next = ha->scb_freelist;
+ ha->scb_freelist = scb_p;
+ }
+ }
+
+ /* success */
+ return (1);
+}
+
+/****************************************************************************/
+/* */
+/* Routine Name: ips_init_scb */
+/* */
+/* Routine Description: */
+/* */
+/* Initialize a CCB to default values */
+/* */
+/****************************************************************************/
+static void
+ips_init_scb(ips_ha_t * ha, ips_scb_t * scb)
+{
+ IPS_SG_LIST sg_list;
+ uint32_t cmd_busaddr, sg_busaddr;
+ METHOD_TRACE("ips_init_scb", 1);
+
+ if (scb == NULL)
+ return;
+
+ sg_list.list = scb->sg_list.list;
+ cmd_busaddr = scb->scb_busaddr;
+ sg_busaddr = scb->sg_busaddr;
+ /* zero fill */
+ memset(scb, 0, sizeof (ips_scb_t));
+ memset(ha->dummy, 0, sizeof (IPS_IO_CMD));
+
+ /* Initialize dummy command bucket */
+ ha->dummy->op_code = 0xFF;
+ ha->dummy->ccsar = cpu_to_le32(ha->adapt->hw_status_start
+ + sizeof (IPS_ADAPTER));
+ ha->dummy->command_id = IPS_MAX_CMDS;
+
+ /* set bus address of scb */
+ scb->scb_busaddr = cmd_busaddr;
+ scb->sg_busaddr = sg_busaddr;
+ scb->sg_list.list = sg_list.list;
+
+ /* Neptune Fix */
+ scb->cmd.basic_io.cccr = cpu_to_le32((uint32_t) IPS_BIT_ILE);
+ scb->cmd.basic_io.ccsar = cpu_to_le32(ha->adapt->hw_status_start
+ + sizeof (IPS_ADAPTER));
+}
+
+/****************************************************************************/
+/* */
+/* Routine Name: ips_get_scb */
+/* */
+/* Routine Description: */
+/* */
+/* Initialize a CCB to default values */
+/* */
+/* ASSUMED to be called from within a lock */
+/* */
+/****************************************************************************/
+static ips_scb_t *
+ips_getscb(ips_ha_t * ha)
+{
+ ips_scb_t *scb;
+
+ METHOD_TRACE("ips_getscb", 1);
+
+ if ((scb = ha->scb_freelist) == NULL) {
+
+ return (NULL);
+ }
+
+ ha->scb_freelist = scb->q_next;
+ scb->flags = 0;
+ scb->q_next = NULL;
+
+ ips_init_scb(ha, scb);
+
+ return (scb);
+}
+
+/****************************************************************************/
+/* */
+/* Routine Name: ips_free_scb */
+/* */
+/* Routine Description: */
+/* */
+/* Return an unused CCB back to the free list */
+/* */
+/* ASSUMED to be called from within a lock */
+/* */
+/****************************************************************************/
+static void
+ips_freescb(ips_ha_t * ha, ips_scb_t * scb)
+{
+
+ METHOD_TRACE("ips_freescb", 1);
+ if (scb->flags & IPS_SCB_MAP_SG)
+ scsi_dma_unmap(scb->scsi_cmd);
+ else if (scb->flags & IPS_SCB_MAP_SINGLE)
+ pci_unmap_single(ha->pcidev, scb->data_busaddr, scb->data_len,
+ IPS_DMA_DIR(scb));
+
+ /* check to make sure this is not our "special" scb */
+ if (IPS_COMMAND_ID(ha, scb) < (ha->max_cmds - 1)) {
+ scb->q_next = ha->scb_freelist;
+ ha->scb_freelist = scb;
+ }
+}
+
+/****************************************************************************/
+/* */
+/* Routine Name: ips_isinit_copperhead */
+/* */
+/* Routine Description: */
+/* */
+/* Is controller initialized ? */
+/* */
+/****************************************************************************/
+static int
+ips_isinit_copperhead(ips_ha_t * ha)
+{
+ uint8_t scpr;
+ uint8_t isr;
+
+ METHOD_TRACE("ips_isinit_copperhead", 1);
+
+ isr = inb(ha->io_addr + IPS_REG_HISR);
+ scpr = inb(ha->io_addr + IPS_REG_SCPR);
+
+ if (((isr & IPS_BIT_EI) == 0) && ((scpr & IPS_BIT_EBM) == 0))
+ return (0);
+ else
+ return (1);
+}
+
+/****************************************************************************/
+/* */
+/* Routine Name: ips_isinit_copperhead_memio */
+/* */
+/* Routine Description: */
+/* */
+/* Is controller initialized ? */
+/* */
+/****************************************************************************/
+static int
+ips_isinit_copperhead_memio(ips_ha_t * ha)
+{
+ uint8_t isr = 0;
+ uint8_t scpr;
+
+ METHOD_TRACE("ips_is_init_copperhead_memio", 1);
+
+ isr = readb(ha->mem_ptr + IPS_REG_HISR);
+ scpr = readb(ha->mem_ptr + IPS_REG_SCPR);
+
+ if (((isr & IPS_BIT_EI) == 0) && ((scpr & IPS_BIT_EBM) == 0))
+ return (0);
+ else
+ return (1);
+}
+
+/****************************************************************************/
+/* */
+/* Routine Name: ips_isinit_morpheus */
+/* */
+/* Routine Description: */
+/* */
+/* Is controller initialized ? */
+/* */
+/****************************************************************************/
+static int
+ips_isinit_morpheus(ips_ha_t * ha)
+{
+ uint32_t post;
+ uint32_t bits;
+
+ METHOD_TRACE("ips_is_init_morpheus", 1);
+
+ if (ips_isintr_morpheus(ha))
+ ips_flush_and_reset(ha);
+
+ post = readl(ha->mem_ptr + IPS_REG_I960_MSG0);
+ bits = readl(ha->mem_ptr + IPS_REG_I2O_HIR);
+
+ if (post == 0)
+ return (0);
+ else if (bits & 0x3)
+ return (0);
+ else
+ return (1);
+}
+
+/****************************************************************************/
+/* */
+/* Routine Name: ips_flush_and_reset */
+/* */
+/* Routine Description: */
+/* */
+/* Perform cleanup ( FLUSH and RESET ) when the adapter is in an unknown */
+/* state ( was trying to INIT and an interrupt was already pending ) ... */
+/* */
+/****************************************************************************/
+static void
+ips_flush_and_reset(ips_ha_t *ha)
+{
+ ips_scb_t *scb;
+ int ret;
+ int time;
+ int done;
+ dma_addr_t command_dma;
+
+ /* Create a usuable SCB */
+ scb = pci_alloc_consistent(ha->pcidev, sizeof(ips_scb_t), &command_dma);
+ if (scb) {
+ memset(scb, 0, sizeof(ips_scb_t));
+ ips_init_scb(ha, scb);
+ scb->scb_busaddr = command_dma;
+
+ scb->timeout = ips_cmd_timeout;
+ scb->cdb[0] = IPS_CMD_FLUSH;
+
+ scb->cmd.flush_cache.op_code = IPS_CMD_FLUSH;
+ scb->cmd.flush_cache.command_id = IPS_MAX_CMDS; /* Use an ID that would otherwise not exist */
+ scb->cmd.flush_cache.state = IPS_NORM_STATE;
+ scb->cmd.flush_cache.reserved = 0;
+ scb->cmd.flush_cache.reserved2 = 0;
+ scb->cmd.flush_cache.reserved3 = 0;
+ scb->cmd.flush_cache.reserved4 = 0;
+
+ ret = ips_send_cmd(ha, scb); /* Send the Flush Command */
+
+ if (ret == IPS_SUCCESS) {
+ time = 60 * IPS_ONE_SEC; /* Max Wait time is 60 seconds */
+ done = 0;
+
+ while ((time > 0) && (!done)) {
+ done = ips_poll_for_flush_complete(ha);
+ /* This may look evil, but it's only done during extremely rare start-up conditions ! */
+ udelay(1000);
+ time--;
+ }
+ }
+ }
+
+ /* Now RESET and INIT the adapter */
+ (*ha->func.reset) (ha);
+
+ pci_free_consistent(ha->pcidev, sizeof(ips_scb_t), scb, command_dma);
+ return;
+}
+
+/****************************************************************************/
+/* */
+/* Routine Name: ips_poll_for_flush_complete */
+/* */
+/* Routine Description: */
+/* */
+/* Poll for the Flush Command issued by ips_flush_and_reset() to complete */
+/* All other responses are just taken off the queue and ignored */
+/* */
+/****************************************************************************/
+static int
+ips_poll_for_flush_complete(ips_ha_t * ha)
+{
+ IPS_STATUS cstatus;
+
+ while (TRUE) {
+ cstatus.value = (*ha->func.statupd) (ha);
+
+ if (cstatus.value == 0xffffffff) /* If No Interrupt to process */
+ break;
+
+ /* Success is when we see the Flush Command ID */
+ if (cstatus.fields.command_id == IPS_MAX_CMDS)
+ return 1;
+ }
+
+ return 0;
+}
+
+/****************************************************************************/
+/* */
+/* Routine Name: ips_enable_int_copperhead */
+/* */
+/* Routine Description: */
+/* Turn on interrupts */
+/* */
+/****************************************************************************/
+static void
+ips_enable_int_copperhead(ips_ha_t * ha)
+{
+ METHOD_TRACE("ips_enable_int_copperhead", 1);
+
+ outb(ha->io_addr + IPS_REG_HISR, IPS_BIT_EI);
+ inb(ha->io_addr + IPS_REG_HISR); /*Ensure PCI Posting Completes*/
+}
+
+/****************************************************************************/
+/* */
+/* Routine Name: ips_enable_int_copperhead_memio */
+/* */
+/* Routine Description: */
+/* Turn on interrupts */
+/* */
+/****************************************************************************/
+static void
+ips_enable_int_copperhead_memio(ips_ha_t * ha)
+{
+ METHOD_TRACE("ips_enable_int_copperhead_memio", 1);
+
+ writeb(IPS_BIT_EI, ha->mem_ptr + IPS_REG_HISR);
+ readb(ha->mem_ptr + IPS_REG_HISR); /*Ensure PCI Posting Completes*/
+}
+
+/****************************************************************************/
+/* */
+/* Routine Name: ips_enable_int_morpheus */
+/* */
+/* Routine Description: */
+/* Turn on interrupts */
+/* */
+/****************************************************************************/
+static void
+ips_enable_int_morpheus(ips_ha_t * ha)
+{
+ uint32_t Oimr;
+
+ METHOD_TRACE("ips_enable_int_morpheus", 1);
+
+ Oimr = readl(ha->mem_ptr + IPS_REG_I960_OIMR);
+ Oimr &= ~0x08;
+ writel(Oimr, ha->mem_ptr + IPS_REG_I960_OIMR);
+ readl(ha->mem_ptr + IPS_REG_I960_OIMR); /*Ensure PCI Posting Completes*/
+}
+
+/****************************************************************************/
+/* */
+/* Routine Name: ips_init_copperhead */
+/* */
+/* Routine Description: */
+/* */
+/* Initialize a copperhead controller */
+/* */
+/****************************************************************************/
+static int
+ips_init_copperhead(ips_ha_t * ha)
+{
+ uint8_t Isr;
+ uint8_t Cbsp;
+ uint8_t PostByte[IPS_MAX_POST_BYTES];
+ uint8_t ConfigByte[IPS_MAX_CONFIG_BYTES];
+ int i, j;
+
+ METHOD_TRACE("ips_init_copperhead", 1);
+
+ for (i = 0; i < IPS_MAX_POST_BYTES; i++) {
+ for (j = 0; j < 45; j++) {
+ Isr = inb(ha->io_addr + IPS_REG_HISR);
+ if (Isr & IPS_BIT_GHI)
+ break;
+
+ /* Delay for 1 Second */
+ MDELAY(IPS_ONE_SEC);
+ }
+
+ if (j >= 45)
+ /* error occurred */
+ return (0);
+
+ PostByte[i] = inb(ha->io_addr + IPS_REG_ISPR);
+ outb(Isr, ha->io_addr + IPS_REG_HISR);
+ }
+
+ if (PostByte[0] < IPS_GOOD_POST_STATUS) {
+ IPS_PRINTK(KERN_WARNING, ha->pcidev,
+ "reset controller fails (post status %x %x).\n",
+ PostByte[0], PostByte[1]);
+
+ return (0);
+ }
+
+ for (i = 0; i < IPS_MAX_CONFIG_BYTES; i++) {
+ for (j = 0; j < 240; j++) {
+ Isr = inb(ha->io_addr + IPS_REG_HISR);
+ if (Isr & IPS_BIT_GHI)
+ break;
+
+ /* Delay for 1 Second */
+ MDELAY(IPS_ONE_SEC);
+ }
+
+ if (j >= 240)
+ /* error occurred */
+ return (0);
+
+ ConfigByte[i] = inb(ha->io_addr + IPS_REG_ISPR);
+ outb(Isr, ha->io_addr + IPS_REG_HISR);
+ }
+
+ for (i = 0; i < 240; i++) {
+ Cbsp = inb(ha->io_addr + IPS_REG_CBSP);
+
+ if ((Cbsp & IPS_BIT_OP) == 0)
+ break;
+
+ /* Delay for 1 Second */
+ MDELAY(IPS_ONE_SEC);
+ }
+
+ if (i >= 240)
+ /* reset failed */
+ return (0);
+
+ /* setup CCCR */
+ outl(0x1010, ha->io_addr + IPS_REG_CCCR);
+
+ /* Enable busmastering */
+ outb(IPS_BIT_EBM, ha->io_addr + IPS_REG_SCPR);
+
+ if (ha->pcidev->revision == IPS_REVID_TROMBONE64)
+ /* fix for anaconda64 */
+ outl(0, ha->io_addr + IPS_REG_NDAE);
+
+ /* Enable interrupts */
+ outb(IPS_BIT_EI, ha->io_addr + IPS_REG_HISR);
+
+ return (1);
+}
+
+/****************************************************************************/
+/* */
+/* Routine Name: ips_init_copperhead_memio */
+/* */
+/* Routine Description: */
+/* */
+/* Initialize a copperhead controller with memory mapped I/O */
+/* */
+/****************************************************************************/
+static int
+ips_init_copperhead_memio(ips_ha_t * ha)
+{
+ uint8_t Isr = 0;
+ uint8_t Cbsp;
+ uint8_t PostByte[IPS_MAX_POST_BYTES];
+ uint8_t ConfigByte[IPS_MAX_CONFIG_BYTES];
+ int i, j;
+
+ METHOD_TRACE("ips_init_copperhead_memio", 1);
+
+ for (i = 0; i < IPS_MAX_POST_BYTES; i++) {
+ for (j = 0; j < 45; j++) {
+ Isr = readb(ha->mem_ptr + IPS_REG_HISR);
+ if (Isr & IPS_BIT_GHI)
+ break;
+
+ /* Delay for 1 Second */
+ MDELAY(IPS_ONE_SEC);
+ }
+
+ if (j >= 45)
+ /* error occurred */
+ return (0);
+
+ PostByte[i] = readb(ha->mem_ptr + IPS_REG_ISPR);
+ writeb(Isr, ha->mem_ptr + IPS_REG_HISR);
+ }
+
+ if (PostByte[0] < IPS_GOOD_POST_STATUS) {
+ IPS_PRINTK(KERN_WARNING, ha->pcidev,
+ "reset controller fails (post status %x %x).\n",
+ PostByte[0], PostByte[1]);
+
+ return (0);
+ }
+
+ for (i = 0; i < IPS_MAX_CONFIG_BYTES; i++) {
+ for (j = 0; j < 240; j++) {
+ Isr = readb(ha->mem_ptr + IPS_REG_HISR);
+ if (Isr & IPS_BIT_GHI)
+ break;
+
+ /* Delay for 1 Second */
+ MDELAY(IPS_ONE_SEC);
+ }
+
+ if (j >= 240)
+ /* error occurred */
+ return (0);
+
+ ConfigByte[i] = readb(ha->mem_ptr + IPS_REG_ISPR);
+ writeb(Isr, ha->mem_ptr + IPS_REG_HISR);
+ }
+
+ for (i = 0; i < 240; i++) {
+ Cbsp = readb(ha->mem_ptr + IPS_REG_CBSP);
+
+ if ((Cbsp & IPS_BIT_OP) == 0)
+ break;
+
+ /* Delay for 1 Second */
+ MDELAY(IPS_ONE_SEC);
+ }
+
+ if (i >= 240)
+ /* error occurred */
+ return (0);
+
+ /* setup CCCR */
+ writel(0x1010, ha->mem_ptr + IPS_REG_CCCR);
+
+ /* Enable busmastering */
+ writeb(IPS_BIT_EBM, ha->mem_ptr + IPS_REG_SCPR);
+
+ if (ha->pcidev->revision == IPS_REVID_TROMBONE64)
+ /* fix for anaconda64 */
+ writel(0, ha->mem_ptr + IPS_REG_NDAE);
+
+ /* Enable interrupts */
+ writeb(IPS_BIT_EI, ha->mem_ptr + IPS_REG_HISR);
+
+ /* if we get here then everything went OK */
+ return (1);
+}
+
+/****************************************************************************/
+/* */
+/* Routine Name: ips_init_morpheus */
+/* */
+/* Routine Description: */
+/* */
+/* Initialize a morpheus controller */
+/* */
+/****************************************************************************/
+static int
+ips_init_morpheus(ips_ha_t * ha)
+{
+ uint32_t Post;
+ uint32_t Config;
+ uint32_t Isr;
+ uint32_t Oimr;
+ int i;
+
+ METHOD_TRACE("ips_init_morpheus", 1);
+
+ /* Wait up to 45 secs for Post */
+ for (i = 0; i < 45; i++) {
+ Isr = readl(ha->mem_ptr + IPS_REG_I2O_HIR);
+
+ if (Isr & IPS_BIT_I960_MSG0I)
+ break;
+
+ /* Delay for 1 Second */
+ MDELAY(IPS_ONE_SEC);
+ }
+
+ if (i >= 45) {
+ /* error occurred */
+ IPS_PRINTK(KERN_WARNING, ha->pcidev,
+ "timeout waiting for post.\n");
+
+ return (0);
+ }
+
+ Post = readl(ha->mem_ptr + IPS_REG_I960_MSG0);
+
+ if (Post == 0x4F00) { /* If Flashing the Battery PIC */
+ IPS_PRINTK(KERN_WARNING, ha->pcidev,
+ "Flashing Battery PIC, Please wait ...\n");
+
+ /* Clear the interrupt bit */
+ Isr = (uint32_t) IPS_BIT_I960_MSG0I;
+ writel(Isr, ha->mem_ptr + IPS_REG_I2O_HIR);
+
+ for (i = 0; i < 120; i++) { /* Wait Up to 2 Min. for Completion */
+ Post = readl(ha->mem_ptr + IPS_REG_I960_MSG0);
+ if (Post != 0x4F00)
+ break;
+ /* Delay for 1 Second */
+ MDELAY(IPS_ONE_SEC);
+ }
+
+ if (i >= 120) {
+ IPS_PRINTK(KERN_WARNING, ha->pcidev,
+ "timeout waiting for Battery PIC Flash\n");
+ return (0);
+ }
+
+ }
+
+ /* Clear the interrupt bit */
+ Isr = (uint32_t) IPS_BIT_I960_MSG0I;
+ writel(Isr, ha->mem_ptr + IPS_REG_I2O_HIR);
+
+ if (Post < (IPS_GOOD_POST_STATUS << 8)) {
+ IPS_PRINTK(KERN_WARNING, ha->pcidev,
+ "reset controller fails (post status %x).\n", Post);
+
+ return (0);
+ }
+
+ /* Wait up to 240 secs for config bytes */
+ for (i = 0; i < 240; i++) {
+ Isr = readl(ha->mem_ptr + IPS_REG_I2O_HIR);
+
+ if (Isr & IPS_BIT_I960_MSG1I)
+ break;
+
+ /* Delay for 1 Second */
+ MDELAY(IPS_ONE_SEC);
+ }
+
+ if (i >= 240) {
+ /* error occurred */
+ IPS_PRINTK(KERN_WARNING, ha->pcidev,
+ "timeout waiting for config.\n");
+
+ return (0);
+ }
+
+ Config = readl(ha->mem_ptr + IPS_REG_I960_MSG1);
+
+ /* Clear interrupt bit */
+ Isr = (uint32_t) IPS_BIT_I960_MSG1I;
+ writel(Isr, ha->mem_ptr + IPS_REG_I2O_HIR);
+
+ /* Turn on the interrupts */
+ Oimr = readl(ha->mem_ptr + IPS_REG_I960_OIMR);
+ Oimr &= ~0x8;
+ writel(Oimr, ha->mem_ptr + IPS_REG_I960_OIMR);
+
+ /* if we get here then everything went OK */
+
+ /* Since we did a RESET, an EraseStripeLock may be needed */
+ if (Post == 0xEF10) {
+ if ((Config == 0x000F) || (Config == 0x0009))
+ ha->requires_esl = 1;
+ }
+
+ return (1);
+}
+
+/****************************************************************************/
+/* */
+/* Routine Name: ips_reset_copperhead */
+/* */
+/* Routine Description: */
+/* */
+/* Reset the controller */
+/* */
+/****************************************************************************/
+static int
+ips_reset_copperhead(ips_ha_t * ha)
+{
+ int reset_counter;
+
+ METHOD_TRACE("ips_reset_copperhead", 1);
+
+ DEBUG_VAR(1, "(%s%d) ips_reset_copperhead: io addr: %x, irq: %d",
+ ips_name, ha->host_num, ha->io_addr, ha->pcidev->irq);
+
+ reset_counter = 0;
+
+ while (reset_counter < 2) {
+ reset_counter++;
+
+ outb(IPS_BIT_RST, ha->io_addr + IPS_REG_SCPR);
+
+ /* Delay for 1 Second */
+ MDELAY(IPS_ONE_SEC);
+
+ outb(0, ha->io_addr + IPS_REG_SCPR);
+
+ /* Delay for 1 Second */
+ MDELAY(IPS_ONE_SEC);
+
+ if ((*ha->func.init) (ha))
+ break;
+ else if (reset_counter >= 2) {
+
+ return (0);
+ }
+ }
+
+ return (1);
+}
+
+/****************************************************************************/
+/* */
+/* Routine Name: ips_reset_copperhead_memio */
+/* */
+/* Routine Description: */
+/* */
+/* Reset the controller */
+/* */
+/****************************************************************************/
+static int
+ips_reset_copperhead_memio(ips_ha_t * ha)
+{
+ int reset_counter;
+
+ METHOD_TRACE("ips_reset_copperhead_memio", 1);
+
+ DEBUG_VAR(1, "(%s%d) ips_reset_copperhead_memio: mem addr: %x, irq: %d",
+ ips_name, ha->host_num, ha->mem_addr, ha->pcidev->irq);
+
+ reset_counter = 0;
+
+ while (reset_counter < 2) {
+ reset_counter++;
+
+ writeb(IPS_BIT_RST, ha->mem_ptr + IPS_REG_SCPR);
+
+ /* Delay for 1 Second */
+ MDELAY(IPS_ONE_SEC);
+
+ writeb(0, ha->mem_ptr + IPS_REG_SCPR);
+
+ /* Delay for 1 Second */
+ MDELAY(IPS_ONE_SEC);
+
+ if ((*ha->func.init) (ha))
+ break;
+ else if (reset_counter >= 2) {
+
+ return (0);
+ }
+ }
+
+ return (1);
+}
+
+/****************************************************************************/
+/* */
+/* Routine Name: ips_reset_morpheus */
+/* */
+/* Routine Description: */
+/* */
+/* Reset the controller */
+/* */
+/****************************************************************************/
+static int
+ips_reset_morpheus(ips_ha_t * ha)
+{
+ int reset_counter;
+ uint8_t junk;
+
+ METHOD_TRACE("ips_reset_morpheus", 1);
+
+ DEBUG_VAR(1, "(%s%d) ips_reset_morpheus: mem addr: %x, irq: %d",
+ ips_name, ha->host_num, ha->mem_addr, ha->pcidev->irq);
+
+ reset_counter = 0;
+
+ while (reset_counter < 2) {
+ reset_counter++;
+
+ writel(0x80000000, ha->mem_ptr + IPS_REG_I960_IDR);
+
+ /* Delay for 5 Seconds */
+ MDELAY(5 * IPS_ONE_SEC);
+
+ /* Do a PCI config read to wait for adapter */
+ pci_read_config_byte(ha->pcidev, 4, &junk);
+
+ if ((*ha->func.init) (ha))
+ break;
+ else if (reset_counter >= 2) {
+
+ return (0);
+ }
+ }
+
+ return (1);
+}
+
+/****************************************************************************/
+/* */
+/* Routine Name: ips_statinit */
+/* */
+/* Routine Description: */
+/* */
+/* Initialize the status queues on the controller */
+/* */
+/****************************************************************************/
+static void
+ips_statinit(ips_ha_t * ha)
+{
+ uint32_t phys_status_start;
+
+ METHOD_TRACE("ips_statinit", 1);
+
+ ha->adapt->p_status_start = ha->adapt->status;
+ ha->adapt->p_status_end = ha->adapt->status + IPS_MAX_CMDS;
+ ha->adapt->p_status_tail = ha->adapt->status;
+
+ phys_status_start = ha->adapt->hw_status_start;
+ outl(phys_status_start, ha->io_addr + IPS_REG_SQSR);
+ outl(phys_status_start + IPS_STATUS_Q_SIZE,
+ ha->io_addr + IPS_REG_SQER);
+ outl(phys_status_start + IPS_STATUS_SIZE,
+ ha->io_addr + IPS_REG_SQHR);
+ outl(phys_status_start, ha->io_addr + IPS_REG_SQTR);
+
+ ha->adapt->hw_status_tail = phys_status_start;
+}
+
+/****************************************************************************/
+/* */
+/* Routine Name: ips_statinit_memio */
+/* */
+/* Routine Description: */
+/* */
+/* Initialize the status queues on the controller */
+/* */
+/****************************************************************************/
+static void
+ips_statinit_memio(ips_ha_t * ha)
+{
+ uint32_t phys_status_start;
+
+ METHOD_TRACE("ips_statinit_memio", 1);
+
+ ha->adapt->p_status_start = ha->adapt->status;
+ ha->adapt->p_status_end = ha->adapt->status + IPS_MAX_CMDS;
+ ha->adapt->p_status_tail = ha->adapt->status;
+
+ phys_status_start = ha->adapt->hw_status_start;
+ writel(phys_status_start, ha->mem_ptr + IPS_REG_SQSR);
+ writel(phys_status_start + IPS_STATUS_Q_SIZE,
+ ha->mem_ptr + IPS_REG_SQER);
+ writel(phys_status_start + IPS_STATUS_SIZE, ha->mem_ptr + IPS_REG_SQHR);
+ writel(phys_status_start, ha->mem_ptr + IPS_REG_SQTR);
+
+ ha->adapt->hw_status_tail = phys_status_start;
+}
+
+/****************************************************************************/
+/* */
+/* Routine Name: ips_statupd_copperhead */
+/* */
+/* Routine Description: */
+/* */
+/* Remove an element from the status queue */
+/* */
+/****************************************************************************/
+static uint32_t
+ips_statupd_copperhead(ips_ha_t * ha)
+{
+ METHOD_TRACE("ips_statupd_copperhead", 1);
+
+ if (ha->adapt->p_status_tail != ha->adapt->p_status_end) {
+ ha->adapt->p_status_tail++;
+ ha->adapt->hw_status_tail += sizeof (IPS_STATUS);
+ } else {
+ ha->adapt->p_status_tail = ha->adapt->p_status_start;
+ ha->adapt->hw_status_tail = ha->adapt->hw_status_start;
+ }
+
+ outl(ha->adapt->hw_status_tail,
+ ha->io_addr + IPS_REG_SQTR);
+
+ return (ha->adapt->p_status_tail->value);
+}
+
+/****************************************************************************/
+/* */
+/* Routine Name: ips_statupd_copperhead_memio */
+/* */
+/* Routine Description: */
+/* */
+/* Remove an element from the status queue */
+/* */
+/****************************************************************************/
+static uint32_t
+ips_statupd_copperhead_memio(ips_ha_t * ha)
+{
+ METHOD_TRACE("ips_statupd_copperhead_memio", 1);
+
+ if (ha->adapt->p_status_tail != ha->adapt->p_status_end) {
+ ha->adapt->p_status_tail++;
+ ha->adapt->hw_status_tail += sizeof (IPS_STATUS);
+ } else {
+ ha->adapt->p_status_tail = ha->adapt->p_status_start;
+ ha->adapt->hw_status_tail = ha->adapt->hw_status_start;
+ }
+
+ writel(ha->adapt->hw_status_tail, ha->mem_ptr + IPS_REG_SQTR);
+
+ return (ha->adapt->p_status_tail->value);
+}
+
+/****************************************************************************/
+/* */
+/* Routine Name: ips_statupd_morpheus */
+/* */
+/* Routine Description: */
+/* */
+/* Remove an element from the status queue */
+/* */
+/****************************************************************************/
+static uint32_t
+ips_statupd_morpheus(ips_ha_t * ha)
+{
+ uint32_t val;
+
+ METHOD_TRACE("ips_statupd_morpheus", 1);
+
+ val = readl(ha->mem_ptr + IPS_REG_I2O_OUTMSGQ);
+
+ return (val);
+}
+
+/****************************************************************************/
+/* */
+/* Routine Name: ips_issue_copperhead */
+/* */
+/* Routine Description: */
+/* */
+/* Send a command down to the controller */
+/* */
+/****************************************************************************/
+static int
+ips_issue_copperhead(ips_ha_t * ha, ips_scb_t * scb)
+{
+ uint32_t TimeOut;
+ uint32_t val;
+
+ METHOD_TRACE("ips_issue_copperhead", 1);
+
+ if (scb->scsi_cmd) {
+ DEBUG_VAR(2, "(%s%d) ips_issue: cmd 0x%X id %d (%d %d %d)",
+ ips_name,
+ ha->host_num,
+ scb->cdb[0],
+ scb->cmd.basic_io.command_id,
+ scb->bus, scb->target_id, scb->lun);
+ } else {
+ DEBUG_VAR(2, KERN_NOTICE "(%s%d) ips_issue: logical cmd id %d",
+ ips_name, ha->host_num, scb->cmd.basic_io.command_id);
+ }
+
+ TimeOut = 0;
+
+ while ((val =
+ le32_to_cpu(inl(ha->io_addr + IPS_REG_CCCR))) & IPS_BIT_SEM) {
+ udelay(1000);
+
+ if (++TimeOut >= IPS_SEM_TIMEOUT) {
+ if (!(val & IPS_BIT_START_STOP))
+ break;
+
+ IPS_PRINTK(KERN_WARNING, ha->pcidev,
+ "ips_issue val [0x%x].\n", val);
+ IPS_PRINTK(KERN_WARNING, ha->pcidev,
+ "ips_issue semaphore chk timeout.\n");
+
+ return (IPS_FAILURE);
+ } /* end if */
+ } /* end while */
+
+ outl(scb->scb_busaddr, ha->io_addr + IPS_REG_CCSAR);
+ outw(IPS_BIT_START_CMD, ha->io_addr + IPS_REG_CCCR);
+
+ return (IPS_SUCCESS);
+}
+
+/****************************************************************************/
+/* */
+/* Routine Name: ips_issue_copperhead_memio */
+/* */
+/* Routine Description: */
+/* */
+/* Send a command down to the controller */
+/* */
+/****************************************************************************/
+static int
+ips_issue_copperhead_memio(ips_ha_t * ha, ips_scb_t * scb)
+{
+ uint32_t TimeOut;
+ uint32_t val;
+
+ METHOD_TRACE("ips_issue_copperhead_memio", 1);
+
+ if (scb->scsi_cmd) {
+ DEBUG_VAR(2, "(%s%d) ips_issue: cmd 0x%X id %d (%d %d %d)",
+ ips_name,
+ ha->host_num,
+ scb->cdb[0],
+ scb->cmd.basic_io.command_id,
+ scb->bus, scb->target_id, scb->lun);
+ } else {
+ DEBUG_VAR(2, "(%s%d) ips_issue: logical cmd id %d",
+ ips_name, ha->host_num, scb->cmd.basic_io.command_id);
+ }
+
+ TimeOut = 0;
+
+ while ((val = readl(ha->mem_ptr + IPS_REG_CCCR)) & IPS_BIT_SEM) {
+ udelay(1000);
+
+ if (++TimeOut >= IPS_SEM_TIMEOUT) {
+ if (!(val & IPS_BIT_START_STOP))
+ break;
+
+ IPS_PRINTK(KERN_WARNING, ha->pcidev,
+ "ips_issue val [0x%x].\n", val);
+ IPS_PRINTK(KERN_WARNING, ha->pcidev,
+ "ips_issue semaphore chk timeout.\n");
+
+ return (IPS_FAILURE);
+ } /* end if */
+ } /* end while */
+
+ writel(scb->scb_busaddr, ha->mem_ptr + IPS_REG_CCSAR);
+ writel(IPS_BIT_START_CMD, ha->mem_ptr + IPS_REG_CCCR);
+
+ return (IPS_SUCCESS);
+}
+
+/****************************************************************************/
+/* */
+/* Routine Name: ips_issue_i2o */
+/* */
+/* Routine Description: */
+/* */
+/* Send a command down to the controller */
+/* */
+/****************************************************************************/
+static int
+ips_issue_i2o(ips_ha_t * ha, ips_scb_t * scb)
+{
+
+ METHOD_TRACE("ips_issue_i2o", 1);
+
+ if (scb->scsi_cmd) {
+ DEBUG_VAR(2, "(%s%d) ips_issue: cmd 0x%X id %d (%d %d %d)",
+ ips_name,
+ ha->host_num,
+ scb->cdb[0],
+ scb->cmd.basic_io.command_id,
+ scb->bus, scb->target_id, scb->lun);
+ } else {
+ DEBUG_VAR(2, "(%s%d) ips_issue: logical cmd id %d",
+ ips_name, ha->host_num, scb->cmd.basic_io.command_id);
+ }
+
+ outl(scb->scb_busaddr, ha->io_addr + IPS_REG_I2O_INMSGQ);
+
+ return (IPS_SUCCESS);
+}
+
+/****************************************************************************/
+/* */
+/* Routine Name: ips_issue_i2o_memio */
+/* */
+/* Routine Description: */
+/* */
+/* Send a command down to the controller */
+/* */
+/****************************************************************************/
+static int
+ips_issue_i2o_memio(ips_ha_t * ha, ips_scb_t * scb)
+{
+
+ METHOD_TRACE("ips_issue_i2o_memio", 1);
+
+ if (scb->scsi_cmd) {
+ DEBUG_VAR(2, "(%s%d) ips_issue: cmd 0x%X id %d (%d %d %d)",
+ ips_name,
+ ha->host_num,
+ scb->cdb[0],
+ scb->cmd.basic_io.command_id,
+ scb->bus, scb->target_id, scb->lun);
+ } else {
+ DEBUG_VAR(2, "(%s%d) ips_issue: logical cmd id %d",
+ ips_name, ha->host_num, scb->cmd.basic_io.command_id);
+ }
+
+ writel(scb->scb_busaddr, ha->mem_ptr + IPS_REG_I2O_INMSGQ);
+
+ return (IPS_SUCCESS);
+}
+
+/****************************************************************************/
+/* */
+/* Routine Name: ips_isintr_copperhead */
+/* */
+/* Routine Description: */
+/* */
+/* Test to see if an interrupt is for us */
+/* */
+/****************************************************************************/
+static int
+ips_isintr_copperhead(ips_ha_t * ha)
+{
+ uint8_t Isr;
+
+ METHOD_TRACE("ips_isintr_copperhead", 2);
+
+ Isr = inb(ha->io_addr + IPS_REG_HISR);
+
+ if (Isr == 0xFF)
+ /* ?!?! Nothing really there */
+ return (0);
+
+ if (Isr & IPS_BIT_SCE)
+ return (1);
+ else if (Isr & (IPS_BIT_SQO | IPS_BIT_GHI)) {
+ /* status queue overflow or GHI */
+ /* just clear the interrupt */
+ outb(Isr, ha->io_addr + IPS_REG_HISR);
+ }
+
+ return (0);
+}
+
+/****************************************************************************/
+/* */
+/* Routine Name: ips_isintr_copperhead_memio */
+/* */
+/* Routine Description: */
+/* */
+/* Test to see if an interrupt is for us */
+/* */
+/****************************************************************************/
+static int
+ips_isintr_copperhead_memio(ips_ha_t * ha)
+{
+ uint8_t Isr;
+
+ METHOD_TRACE("ips_isintr_memio", 2);
+
+ Isr = readb(ha->mem_ptr + IPS_REG_HISR);
+
+ if (Isr == 0xFF)
+ /* ?!?! Nothing really there */
+ return (0);
+
+ if (Isr & IPS_BIT_SCE)
+ return (1);
+ else if (Isr & (IPS_BIT_SQO | IPS_BIT_GHI)) {
+ /* status queue overflow or GHI */
+ /* just clear the interrupt */
+ writeb(Isr, ha->mem_ptr + IPS_REG_HISR);
+ }
+
+ return (0);
+}
+
+/****************************************************************************/
+/* */
+/* Routine Name: ips_isintr_morpheus */
+/* */
+/* Routine Description: */
+/* */
+/* Test to see if an interrupt is for us */
+/* */
+/****************************************************************************/
+static int
+ips_isintr_morpheus(ips_ha_t * ha)
+{
+ uint32_t Isr;
+
+ METHOD_TRACE("ips_isintr_morpheus", 2);
+
+ Isr = readl(ha->mem_ptr + IPS_REG_I2O_HIR);
+
+ if (Isr & IPS_BIT_I2O_OPQI)
+ return (1);
+ else
+ return (0);
+}
+
+/****************************************************************************/
+/* */
+/* Routine Name: ips_wait */
+/* */
+/* Routine Description: */
+/* */
+/* Wait for a command to complete */
+/* */
+/****************************************************************************/
+static int
+ips_wait(ips_ha_t * ha, int time, int intr)
+{
+ int ret;
+ int done;
+
+ METHOD_TRACE("ips_wait", 1);
+
+ ret = IPS_FAILURE;
+ done = FALSE;
+
+ time *= IPS_ONE_SEC; /* convert seconds */
+
+ while ((time > 0) && (!done)) {
+ if (intr == IPS_INTR_ON) {
+ if (ha->waitflag == FALSE) {
+ ret = IPS_SUCCESS;
+ done = TRUE;
+ break;
+ }
+ } else if (intr == IPS_INTR_IORL) {
+ if (ha->waitflag == FALSE) {
+ /*
+ * controller generated an interrupt to
+ * acknowledge completion of the command
+ * and ips_intr() has serviced the interrupt.
+ */
+ ret = IPS_SUCCESS;
+ done = TRUE;
+ break;
+ }
+
+ /*
+ * NOTE: we already have the io_request_lock so
+ * even if we get an interrupt it won't get serviced
+ * until after we finish.
+ */
+
+ (*ha->func.intr) (ha);
+ }
+
+ /* This looks like a very evil loop, but it only does this during start-up */
+ udelay(1000);
+ time--;
+ }
+
+ return (ret);
+}
+
+/****************************************************************************/
+/* */
+/* Routine Name: ips_write_driver_status */
+/* */
+/* Routine Description: */
+/* */
+/* Write OS/Driver version to Page 5 of the nvram on the controller */
+/* */
+/****************************************************************************/
+static int
+ips_write_driver_status(ips_ha_t * ha, int intr)
+{
+ METHOD_TRACE("ips_write_driver_status", 1);
+
+ if (!ips_readwrite_page5(ha, FALSE, intr)) {
+ IPS_PRINTK(KERN_WARNING, ha->pcidev,
+ "unable to read NVRAM page 5.\n");
+
+ return (0);
+ }
+
+ /* check to make sure the page has a valid */
+ /* signature */
+ if (le32_to_cpu(ha->nvram->signature) != IPS_NVRAM_P5_SIG) {
+ DEBUG_VAR(1,
+ "(%s%d) NVRAM page 5 has an invalid signature: %X.",
+ ips_name, ha->host_num, ha->nvram->signature);
+ ha->nvram->signature = IPS_NVRAM_P5_SIG;
+ }
+
+ DEBUG_VAR(2,
+ "(%s%d) Ad Type: %d, Ad Slot: %d, BIOS: %c%c%c%c %c%c%c%c.",
+ ips_name, ha->host_num, le16_to_cpu(ha->nvram->adapter_type),
+ ha->nvram->adapter_slot, ha->nvram->bios_high[0],
+ ha->nvram->bios_high[1], ha->nvram->bios_high[2],
+ ha->nvram->bios_high[3], ha->nvram->bios_low[0],
+ ha->nvram->bios_low[1], ha->nvram->bios_low[2],
+ ha->nvram->bios_low[3]);
+
+ ips_get_bios_version(ha, intr);
+
+ /* change values (as needed) */
+ ha->nvram->operating_system = IPS_OS_LINUX;
+ ha->nvram->adapter_type = ha->ad_type;
+ strncpy((char *) ha->nvram->driver_high, IPS_VERSION_HIGH, 4);
+ strncpy((char *) ha->nvram->driver_low, IPS_VERSION_LOW, 4);
+ strncpy((char *) ha->nvram->bios_high, ha->bios_version, 4);
+ strncpy((char *) ha->nvram->bios_low, ha->bios_version + 4, 4);
+
+ ha->nvram->versioning = 0; /* Indicate the Driver Does Not Support Versioning */
+
+ /* now update the page */
+ if (!ips_readwrite_page5(ha, TRUE, intr)) {
+ IPS_PRINTK(KERN_WARNING, ha->pcidev,
+ "unable to write NVRAM page 5.\n");
+
+ return (0);
+ }
+
+ /* IF NVRAM Page 5 is OK, Use it for Slot Number Info Because Linux Doesn't Do Slots */
+ ha->slot_num = ha->nvram->adapter_slot;
+
+ return (1);
+}
+
+/****************************************************************************/
+/* */
+/* Routine Name: ips_read_adapter_status */
+/* */
+/* Routine Description: */
+/* */
+/* Do an Inquiry command to the adapter */
+/* */
+/****************************************************************************/
+static int
+ips_read_adapter_status(ips_ha_t * ha, int intr)
+{
+ ips_scb_t *scb;
+ int ret;
+
+ METHOD_TRACE("ips_read_adapter_status", 1);
+
+ scb = &ha->scbs[ha->max_cmds - 1];
+
+ ips_init_scb(ha, scb);
+
+ scb->timeout = ips_cmd_timeout;
+ scb->cdb[0] = IPS_CMD_ENQUIRY;
+
+ scb->cmd.basic_io.op_code = IPS_CMD_ENQUIRY;
+ scb->cmd.basic_io.command_id = IPS_COMMAND_ID(ha, scb);
+ scb->cmd.basic_io.sg_count = 0;
+ scb->cmd.basic_io.lba = 0;
+ scb->cmd.basic_io.sector_count = 0;
+ scb->cmd.basic_io.log_drv = 0;
+ scb->data_len = sizeof (*ha->enq);
+ scb->cmd.basic_io.sg_addr = ha->enq_busaddr;
+
+ /* send command */
+ if (((ret =
+ ips_send_wait(ha, scb, ips_cmd_timeout, intr)) == IPS_FAILURE)
+ || (ret == IPS_SUCCESS_IMM)
+ || ((scb->basic_status & IPS_GSC_STATUS_MASK) > 1))
+ return (0);
+
+ return (1);
+}
+
+/****************************************************************************/
+/* */
+/* Routine Name: ips_read_subsystem_parameters */
+/* */
+/* Routine Description: */
+/* */
+/* Read subsystem parameters from the adapter */
+/* */
+/****************************************************************************/
+static int
+ips_read_subsystem_parameters(ips_ha_t * ha, int intr)
+{
+ ips_scb_t *scb;
+ int ret;
+
+ METHOD_TRACE("ips_read_subsystem_parameters", 1);
+
+ scb = &ha->scbs[ha->max_cmds - 1];
+
+ ips_init_scb(ha, scb);
+
+ scb->timeout = ips_cmd_timeout;
+ scb->cdb[0] = IPS_CMD_GET_SUBSYS;
+
+ scb->cmd.basic_io.op_code = IPS_CMD_GET_SUBSYS;
+ scb->cmd.basic_io.command_id = IPS_COMMAND_ID(ha, scb);
+ scb->cmd.basic_io.sg_count = 0;
+ scb->cmd.basic_io.lba = 0;
+ scb->cmd.basic_io.sector_count = 0;
+ scb->cmd.basic_io.log_drv = 0;
+ scb->data_len = sizeof (*ha->subsys);
+ scb->cmd.basic_io.sg_addr = ha->ioctl_busaddr;
+
+ /* send command */
+ if (((ret =
+ ips_send_wait(ha, scb, ips_cmd_timeout, intr)) == IPS_FAILURE)
+ || (ret == IPS_SUCCESS_IMM)
+ || ((scb->basic_status & IPS_GSC_STATUS_MASK) > 1))
+ return (0);
+
+ memcpy(ha->subsys, ha->ioctl_data, sizeof(*ha->subsys));
+ return (1);
+}
+
+/****************************************************************************/
+/* */
+/* Routine Name: ips_read_config */
+/* */
+/* Routine Description: */
+/* */
+/* Read the configuration on the adapter */
+/* */
+/****************************************************************************/
+static int
+ips_read_config(ips_ha_t * ha, int intr)
+{
+ ips_scb_t *scb;
+ int i;
+ int ret;
+
+ METHOD_TRACE("ips_read_config", 1);
+
+ /* set defaults for initiator IDs */
+ for (i = 0; i < 4; i++)
+ ha->conf->init_id[i] = 7;
+
+ scb = &ha->scbs[ha->max_cmds - 1];
+
+ ips_init_scb(ha, scb);
+
+ scb->timeout = ips_cmd_timeout;
+ scb->cdb[0] = IPS_CMD_READ_CONF;
+
+ scb->cmd.basic_io.op_code = IPS_CMD_READ_CONF;
+ scb->cmd.basic_io.command_id = IPS_COMMAND_ID(ha, scb);
+ scb->data_len = sizeof (*ha->conf);
+ scb->cmd.basic_io.sg_addr = ha->ioctl_busaddr;
+
+ /* send command */
+ if (((ret =
+ ips_send_wait(ha, scb, ips_cmd_timeout, intr)) == IPS_FAILURE)
+ || (ret == IPS_SUCCESS_IMM)
+ || ((scb->basic_status & IPS_GSC_STATUS_MASK) > 1)) {
+
+ memset(ha->conf, 0, sizeof (IPS_CONF));
+
+ /* reset initiator IDs */
+ for (i = 0; i < 4; i++)
+ ha->conf->init_id[i] = 7;
+
+ /* Allow Completed with Errors, so JCRM can access the Adapter to fix the problems */
+ if ((scb->basic_status & IPS_GSC_STATUS_MASK) ==
+ IPS_CMD_CMPLT_WERROR)
+ return (1);
+
+ return (0);
+ }
+
+ memcpy(ha->conf, ha->ioctl_data, sizeof(*ha->conf));
+ return (1);
+}
+
+/****************************************************************************/
+/* */
+/* Routine Name: ips_readwrite_page5 */
+/* */
+/* Routine Description: */
+/* */
+/* Read nvram page 5 from the adapter */
+/* */
+/****************************************************************************/
+static int
+ips_readwrite_page5(ips_ha_t * ha, int write, int intr)
+{
+ ips_scb_t *scb;
+ int ret;
+
+ METHOD_TRACE("ips_readwrite_page5", 1);
+
+ scb = &ha->scbs[ha->max_cmds - 1];
+
+ ips_init_scb(ha, scb);
+
+ scb->timeout = ips_cmd_timeout;
+ scb->cdb[0] = IPS_CMD_RW_NVRAM_PAGE;
+
+ scb->cmd.nvram.op_code = IPS_CMD_RW_NVRAM_PAGE;
+ scb->cmd.nvram.command_id = IPS_COMMAND_ID(ha, scb);
+ scb->cmd.nvram.page = 5;
+ scb->cmd.nvram.write = write;
+ scb->cmd.nvram.reserved = 0;
+ scb->cmd.nvram.reserved2 = 0;
+ scb->data_len = sizeof (*ha->nvram);
+ scb->cmd.nvram.buffer_addr = ha->ioctl_busaddr;
+ if (write)
+ memcpy(ha->ioctl_data, ha->nvram, sizeof(*ha->nvram));
+
+ /* issue the command */
+ if (((ret =
+ ips_send_wait(ha, scb, ips_cmd_timeout, intr)) == IPS_FAILURE)
+ || (ret == IPS_SUCCESS_IMM)
+ || ((scb->basic_status & IPS_GSC_STATUS_MASK) > 1)) {
+
+ memset(ha->nvram, 0, sizeof (IPS_NVRAM_P5));
+
+ return (0);
+ }
+ if (!write)
+ memcpy(ha->nvram, ha->ioctl_data, sizeof(*ha->nvram));
+ return (1);
+}
+
+/****************************************************************************/
+/* */
+/* Routine Name: ips_clear_adapter */
+/* */
+/* Routine Description: */
+/* */
+/* Clear the stripe lock tables */
+/* */
+/****************************************************************************/
+static int
+ips_clear_adapter(ips_ha_t * ha, int intr)
+{
+ ips_scb_t *scb;
+ int ret;
+
+ METHOD_TRACE("ips_clear_adapter", 1);
+
+ scb = &ha->scbs[ha->max_cmds - 1];
+
+ ips_init_scb(ha, scb);
+
+ scb->timeout = ips_reset_timeout;
+ scb->cdb[0] = IPS_CMD_CONFIG_SYNC;
+
+ scb->cmd.config_sync.op_code = IPS_CMD_CONFIG_SYNC;
+ scb->cmd.config_sync.command_id = IPS_COMMAND_ID(ha, scb);
+ scb->cmd.config_sync.channel = 0;
+ scb->cmd.config_sync.source_target = IPS_POCL;
+ scb->cmd.config_sync.reserved = 0;
+ scb->cmd.config_sync.reserved2 = 0;
+ scb->cmd.config_sync.reserved3 = 0;
+
+ /* issue command */
+ if (((ret =
+ ips_send_wait(ha, scb, ips_reset_timeout, intr)) == IPS_FAILURE)
+ || (ret == IPS_SUCCESS_IMM)
+ || ((scb->basic_status & IPS_GSC_STATUS_MASK) > 1))
+ return (0);
+
+ /* send unlock stripe command */
+ ips_init_scb(ha, scb);
+
+ scb->cdb[0] = IPS_CMD_ERROR_TABLE;
+ scb->timeout = ips_reset_timeout;
+
+ scb->cmd.unlock_stripe.op_code = IPS_CMD_ERROR_TABLE;
+ scb->cmd.unlock_stripe.command_id = IPS_COMMAND_ID(ha, scb);
+ scb->cmd.unlock_stripe.log_drv = 0;
+ scb->cmd.unlock_stripe.control = IPS_CSL;
+ scb->cmd.unlock_stripe.reserved = 0;
+ scb->cmd.unlock_stripe.reserved2 = 0;
+ scb->cmd.unlock_stripe.reserved3 = 0;
+
+ /* issue command */
+ if (((ret =
+ ips_send_wait(ha, scb, ips_cmd_timeout, intr)) == IPS_FAILURE)
+ || (ret == IPS_SUCCESS_IMM)
+ || ((scb->basic_status & IPS_GSC_STATUS_MASK) > 1))
+ return (0);
+
+ return (1);
+}
+
+/****************************************************************************/
+/* */
+/* Routine Name: ips_ffdc_reset */
+/* */
+/* Routine Description: */
+/* */
+/* FFDC: write reset info */
+/* */
+/****************************************************************************/
+static void
+ips_ffdc_reset(ips_ha_t * ha, int intr)
+{
+ ips_scb_t *scb;
+
+ METHOD_TRACE("ips_ffdc_reset", 1);
+
+ scb = &ha->scbs[ha->max_cmds - 1];
+
+ ips_init_scb(ha, scb);
+
+ scb->timeout = ips_cmd_timeout;
+ scb->cdb[0] = IPS_CMD_FFDC;
+ scb->cmd.ffdc.op_code = IPS_CMD_FFDC;
+ scb->cmd.ffdc.command_id = IPS_COMMAND_ID(ha, scb);
+ scb->cmd.ffdc.reset_count = ha->reset_count;
+ scb->cmd.ffdc.reset_type = 0x80;
+
+ /* convert time to what the card wants */
+ ips_fix_ffdc_time(ha, scb, ha->last_ffdc);
+
+ /* issue command */
+ ips_send_wait(ha, scb, ips_cmd_timeout, intr);
+}
+
+/****************************************************************************/
+/* */
+/* Routine Name: ips_ffdc_time */
+/* */
+/* Routine Description: */
+/* */
+/* FFDC: write time info */
+/* */
+/****************************************************************************/
+static void
+ips_ffdc_time(ips_ha_t * ha)
+{
+ ips_scb_t *scb;
+
+ METHOD_TRACE("ips_ffdc_time", 1);
+
+ DEBUG_VAR(1, "(%s%d) Sending time update.", ips_name, ha->host_num);
+
+ scb = &ha->scbs[ha->max_cmds - 1];
+
+ ips_init_scb(ha, scb);
+
+ scb->timeout = ips_cmd_timeout;
+ scb->cdb[0] = IPS_CMD_FFDC;
+ scb->cmd.ffdc.op_code = IPS_CMD_FFDC;
+ scb->cmd.ffdc.command_id = IPS_COMMAND_ID(ha, scb);
+ scb->cmd.ffdc.reset_count = 0;
+ scb->cmd.ffdc.reset_type = 0;
+
+ /* convert time to what the card wants */
+ ips_fix_ffdc_time(ha, scb, ha->last_ffdc);
+
+ /* issue command */
+ ips_send_wait(ha, scb, ips_cmd_timeout, IPS_FFDC);
+}
+
+/****************************************************************************/
+/* */
+/* Routine Name: ips_fix_ffdc_time */
+/* */
+/* Routine Description: */
+/* Adjust time_t to what the card wants */
+/* */
+/****************************************************************************/
+static void
+ips_fix_ffdc_time(ips_ha_t * ha, ips_scb_t * scb, time_t current_time)
+{
+ long days;
+ long rem;
+ int i;
+ int year;
+ int yleap;
+ int year_lengths[2] = { IPS_DAYS_NORMAL_YEAR, IPS_DAYS_LEAP_YEAR };
+ int month_lengths[12][2] = { {31, 31},
+ {28, 29},
+ {31, 31},
+ {30, 30},
+ {31, 31},
+ {30, 30},
+ {31, 31},
+ {31, 31},
+ {30, 30},
+ {31, 31},
+ {30, 30},
+ {31, 31}
+ };
+
+ METHOD_TRACE("ips_fix_ffdc_time", 1);
+
+ days = current_time / IPS_SECS_DAY;
+ rem = current_time % IPS_SECS_DAY;
+
+ scb->cmd.ffdc.hour = (rem / IPS_SECS_HOUR);
+ rem = rem % IPS_SECS_HOUR;
+ scb->cmd.ffdc.minute = (rem / IPS_SECS_MIN);
+ scb->cmd.ffdc.second = (rem % IPS_SECS_MIN);
+
+ year = IPS_EPOCH_YEAR;
+ while (days < 0 || days >= year_lengths[yleap = IPS_IS_LEAP_YEAR(year)]) {
+ int newy;
+
+ newy = year + (days / IPS_DAYS_NORMAL_YEAR);
+ if (days < 0)
+ --newy;
+ days -= (newy - year) * IPS_DAYS_NORMAL_YEAR +
+ IPS_NUM_LEAP_YEARS_THROUGH(newy - 1) -
+ IPS_NUM_LEAP_YEARS_THROUGH(year - 1);
+ year = newy;
+ }
+
+ scb->cmd.ffdc.yearH = year / 100;
+ scb->cmd.ffdc.yearL = year % 100;
+
+ for (i = 0; days >= month_lengths[i][yleap]; ++i)
+ days -= month_lengths[i][yleap];
+
+ scb->cmd.ffdc.month = i + 1;
+ scb->cmd.ffdc.day = days + 1;
+}
+
+/****************************************************************************
+ * BIOS Flash Routines *
+ ****************************************************************************/
+
+/****************************************************************************/
+/* */
+/* Routine Name: ips_erase_bios */
+/* */
+/* Routine Description: */
+/* Erase the BIOS on the adapter */
+/* */
+/****************************************************************************/
+static int
+ips_erase_bios(ips_ha_t * ha)
+{
+ int timeout;
+ uint8_t status = 0;
+
+ METHOD_TRACE("ips_erase_bios", 1);
+
+ status = 0;
+
+ /* Clear the status register */
+ outl(0, ha->io_addr + IPS_REG_FLAP);
+ if (ha->pcidev->revision == IPS_REVID_TROMBONE64)
+ udelay(25); /* 25 us */
+
+ outb(0x50, ha->io_addr + IPS_REG_FLDP);
+ if (ha->pcidev->revision == IPS_REVID_TROMBONE64)
+ udelay(25); /* 25 us */
+
+ /* Erase Setup */
+ outb(0x20, ha->io_addr + IPS_REG_FLDP);
+ if (ha->pcidev->revision == IPS_REVID_TROMBONE64)
+ udelay(25); /* 25 us */
+
+ /* Erase Confirm */
+ outb(0xD0, ha->io_addr + IPS_REG_FLDP);
+ if (ha->pcidev->revision == IPS_REVID_TROMBONE64)
+ udelay(25); /* 25 us */
+
+ /* Erase Status */
+ outb(0x70, ha->io_addr + IPS_REG_FLDP);
+ if (ha->pcidev->revision == IPS_REVID_TROMBONE64)
+ udelay(25); /* 25 us */
+
+ timeout = 80000; /* 80 seconds */
+
+ while (timeout > 0) {
+ if (ha->pcidev->revision == IPS_REVID_TROMBONE64) {
+ outl(0, ha->io_addr + IPS_REG_FLAP);
+ udelay(25); /* 25 us */
+ }
+
+ status = inb(ha->io_addr + IPS_REG_FLDP);
+
+ if (status & 0x80)
+ break;
+
+ MDELAY(1);
+ timeout--;
+ }
+
+ /* check for timeout */
+ if (timeout <= 0) {
+ /* timeout */
+
+ /* try to suspend the erase */
+ outb(0xB0, ha->io_addr + IPS_REG_FLDP);
+ if (ha->pcidev->revision == IPS_REVID_TROMBONE64)
+ udelay(25); /* 25 us */
+
+ /* wait for 10 seconds */
+ timeout = 10000;
+ while (timeout > 0) {
+ if (ha->pcidev->revision == IPS_REVID_TROMBONE64) {
+ outl(0, ha->io_addr + IPS_REG_FLAP);
+ udelay(25); /* 25 us */
+ }
+
+ status = inb(ha->io_addr + IPS_REG_FLDP);
+
+ if (status & 0xC0)
+ break;
+
+ MDELAY(1);
+ timeout--;
+ }
+
+ return (1);
+ }
+
+ /* check for valid VPP */
+ if (status & 0x08)
+ /* VPP failure */
+ return (1);
+
+ /* check for successful flash */
+ if (status & 0x30)
+ /* sequence error */
+ return (1);
+
+ /* Otherwise, we were successful */
+ /* clear status */
+ outb(0x50, ha->io_addr + IPS_REG_FLDP);
+ if (ha->pcidev->revision == IPS_REVID_TROMBONE64)
+ udelay(25); /* 25 us */
+
+ /* enable reads */
+ outb(0xFF, ha->io_addr + IPS_REG_FLDP);
+ if (ha->pcidev->revision == IPS_REVID_TROMBONE64)
+ udelay(25); /* 25 us */
+
+ return (0);
+}
+
+/****************************************************************************/
+/* */
+/* Routine Name: ips_erase_bios_memio */
+/* */
+/* Routine Description: */
+/* Erase the BIOS on the adapter */
+/* */
+/****************************************************************************/
+static int
+ips_erase_bios_memio(ips_ha_t * ha)
+{
+ int timeout;
+ uint8_t status;
+
+ METHOD_TRACE("ips_erase_bios_memio", 1);
+
+ status = 0;
+
+ /* Clear the status register */
+ writel(0, ha->mem_ptr + IPS_REG_FLAP);
+ if (ha->pcidev->revision == IPS_REVID_TROMBONE64)
+ udelay(25); /* 25 us */
+
+ writeb(0x50, ha->mem_ptr + IPS_REG_FLDP);
+ if (ha->pcidev->revision == IPS_REVID_TROMBONE64)
+ udelay(25); /* 25 us */
+
+ /* Erase Setup */
+ writeb(0x20, ha->mem_ptr + IPS_REG_FLDP);
+ if (ha->pcidev->revision == IPS_REVID_TROMBONE64)
+ udelay(25); /* 25 us */
+
+ /* Erase Confirm */
+ writeb(0xD0, ha->mem_ptr + IPS_REG_FLDP);
+ if (ha->pcidev->revision == IPS_REVID_TROMBONE64)
+ udelay(25); /* 25 us */
+
+ /* Erase Status */
+ writeb(0x70, ha->mem_ptr + IPS_REG_FLDP);
+ if (ha->pcidev->revision == IPS_REVID_TROMBONE64)
+ udelay(25); /* 25 us */
+
+ timeout = 80000; /* 80 seconds */
+
+ while (timeout > 0) {
+ if (ha->pcidev->revision == IPS_REVID_TROMBONE64) {
+ writel(0, ha->mem_ptr + IPS_REG_FLAP);
+ udelay(25); /* 25 us */
+ }
+
+ status = readb(ha->mem_ptr + IPS_REG_FLDP);
+
+ if (status & 0x80)
+ break;
+
+ MDELAY(1);
+ timeout--;
+ }
+
+ /* check for timeout */
+ if (timeout <= 0) {
+ /* timeout */
+
+ /* try to suspend the erase */
+ writeb(0xB0, ha->mem_ptr + IPS_REG_FLDP);
+ if (ha->pcidev->revision == IPS_REVID_TROMBONE64)
+ udelay(25); /* 25 us */
+
+ /* wait for 10 seconds */
+ timeout = 10000;
+ while (timeout > 0) {
+ if (ha->pcidev->revision == IPS_REVID_TROMBONE64) {
+ writel(0, ha->mem_ptr + IPS_REG_FLAP);
+ udelay(25); /* 25 us */
+ }
+
+ status = readb(ha->mem_ptr + IPS_REG_FLDP);
+
+ if (status & 0xC0)
+ break;
+
+ MDELAY(1);
+ timeout--;
+ }
+
+ return (1);
+ }
+
+ /* check for valid VPP */
+ if (status & 0x08)
+ /* VPP failure */
+ return (1);
+
+ /* check for successful flash */
+ if (status & 0x30)
+ /* sequence error */
+ return (1);
+
+ /* Otherwise, we were successful */
+ /* clear status */
+ writeb(0x50, ha->mem_ptr + IPS_REG_FLDP);
+ if (ha->pcidev->revision == IPS_REVID_TROMBONE64)
+ udelay(25); /* 25 us */
+
+ /* enable reads */
+ writeb(0xFF, ha->mem_ptr + IPS_REG_FLDP);
+ if (ha->pcidev->revision == IPS_REVID_TROMBONE64)
+ udelay(25); /* 25 us */
+
+ return (0);
+}
+
+/****************************************************************************/
+/* */
+/* Routine Name: ips_program_bios */
+/* */
+/* Routine Description: */
+/* Program the BIOS on the adapter */
+/* */
+/****************************************************************************/
+static int
+ips_program_bios(ips_ha_t * ha, char *buffer, uint32_t buffersize,
+ uint32_t offset)
+{
+ int i;
+ int timeout;
+ uint8_t status = 0;
+
+ METHOD_TRACE("ips_program_bios", 1);
+
+ status = 0;
+
+ for (i = 0; i < buffersize; i++) {
+ /* write a byte */
+ outl(i + offset, ha->io_addr + IPS_REG_FLAP);
+ if (ha->pcidev->revision == IPS_REVID_TROMBONE64)
+ udelay(25); /* 25 us */
+
+ outb(0x40, ha->io_addr + IPS_REG_FLDP);
+ if (ha->pcidev->revision == IPS_REVID_TROMBONE64)
+ udelay(25); /* 25 us */
+
+ outb(buffer[i], ha->io_addr + IPS_REG_FLDP);
+ if (ha->pcidev->revision == IPS_REVID_TROMBONE64)
+ udelay(25); /* 25 us */
+
+ /* wait up to one second */
+ timeout = 1000;
+ while (timeout > 0) {
+ if (ha->pcidev->revision == IPS_REVID_TROMBONE64) {
+ outl(0, ha->io_addr + IPS_REG_FLAP);
+ udelay(25); /* 25 us */
+ }
+
+ status = inb(ha->io_addr + IPS_REG_FLDP);
+
+ if (status & 0x80)
+ break;
+
+ MDELAY(1);
+ timeout--;
+ }
+
+ if (timeout == 0) {
+ /* timeout error */
+ outl(0, ha->io_addr + IPS_REG_FLAP);
+ if (ha->pcidev->revision == IPS_REVID_TROMBONE64)
+ udelay(25); /* 25 us */
+
+ outb(0xFF, ha->io_addr + IPS_REG_FLDP);
+ if (ha->pcidev->revision == IPS_REVID_TROMBONE64)
+ udelay(25); /* 25 us */
+
+ return (1);
+ }
+
+ /* check the status */
+ if (status & 0x18) {
+ /* programming error */
+ outl(0, ha->io_addr + IPS_REG_FLAP);
+ if (ha->pcidev->revision == IPS_REVID_TROMBONE64)
+ udelay(25); /* 25 us */
+
+ outb(0xFF, ha->io_addr + IPS_REG_FLDP);
+ if (ha->pcidev->revision == IPS_REVID_TROMBONE64)
+ udelay(25); /* 25 us */
+
+ return (1);
+ }
+ } /* end for */
+
+ /* Enable reading */
+ outl(0, ha->io_addr + IPS_REG_FLAP);
+ if (ha->pcidev->revision == IPS_REVID_TROMBONE64)
+ udelay(25); /* 25 us */
+
+ outb(0xFF, ha->io_addr + IPS_REG_FLDP);
+ if (ha->pcidev->revision == IPS_REVID_TROMBONE64)
+ udelay(25); /* 25 us */
+
+ return (0);
+}
+
+/****************************************************************************/
+/* */
+/* Routine Name: ips_program_bios_memio */
+/* */
+/* Routine Description: */
+/* Program the BIOS on the adapter */
+/* */
+/****************************************************************************/
+static int
+ips_program_bios_memio(ips_ha_t * ha, char *buffer, uint32_t buffersize,
+ uint32_t offset)
+{
+ int i;
+ int timeout;
+ uint8_t status = 0;
+
+ METHOD_TRACE("ips_program_bios_memio", 1);
+
+ status = 0;
+
+ for (i = 0; i < buffersize; i++) {
+ /* write a byte */
+ writel(i + offset, ha->mem_ptr + IPS_REG_FLAP);
+ if (ha->pcidev->revision == IPS_REVID_TROMBONE64)
+ udelay(25); /* 25 us */
+
+ writeb(0x40, ha->mem_ptr + IPS_REG_FLDP);
+ if (ha->pcidev->revision == IPS_REVID_TROMBONE64)
+ udelay(25); /* 25 us */
+
+ writeb(buffer[i], ha->mem_ptr + IPS_REG_FLDP);
+ if (ha->pcidev->revision == IPS_REVID_TROMBONE64)
+ udelay(25); /* 25 us */
+
+ /* wait up to one second */
+ timeout = 1000;
+ while (timeout > 0) {
+ if (ha->pcidev->revision == IPS_REVID_TROMBONE64) {
+ writel(0, ha->mem_ptr + IPS_REG_FLAP);
+ udelay(25); /* 25 us */
+ }
+
+ status = readb(ha->mem_ptr + IPS_REG_FLDP);
+
+ if (status & 0x80)
+ break;
+
+ MDELAY(1);
+ timeout--;
+ }
+
+ if (timeout == 0) {
+ /* timeout error */
+ writel(0, ha->mem_ptr + IPS_REG_FLAP);
+ if (ha->pcidev->revision == IPS_REVID_TROMBONE64)
+ udelay(25); /* 25 us */
+
+ writeb(0xFF, ha->mem_ptr + IPS_REG_FLDP);
+ if (ha->pcidev->revision == IPS_REVID_TROMBONE64)
+ udelay(25); /* 25 us */
+
+ return (1);
+ }
+
+ /* check the status */
+ if (status & 0x18) {
+ /* programming error */
+ writel(0, ha->mem_ptr + IPS_REG_FLAP);
+ if (ha->pcidev->revision == IPS_REVID_TROMBONE64)
+ udelay(25); /* 25 us */
+
+ writeb(0xFF, ha->mem_ptr + IPS_REG_FLDP);
+ if (ha->pcidev->revision == IPS_REVID_TROMBONE64)
+ udelay(25); /* 25 us */
+
+ return (1);
+ }
+ } /* end for */
+
+ /* Enable reading */
+ writel(0, ha->mem_ptr + IPS_REG_FLAP);
+ if (ha->pcidev->revision == IPS_REVID_TROMBONE64)
+ udelay(25); /* 25 us */
+
+ writeb(0xFF, ha->mem_ptr + IPS_REG_FLDP);
+ if (ha->pcidev->revision == IPS_REVID_TROMBONE64)
+ udelay(25); /* 25 us */
+
+ return (0);
+}
+
+/****************************************************************************/
+/* */
+/* Routine Name: ips_verify_bios */
+/* */
+/* Routine Description: */
+/* Verify the BIOS on the adapter */
+/* */
+/****************************************************************************/
+static int
+ips_verify_bios(ips_ha_t * ha, char *buffer, uint32_t buffersize,
+ uint32_t offset)
+{
+ uint8_t checksum;
+ int i;
+
+ METHOD_TRACE("ips_verify_bios", 1);
+
+ /* test 1st byte */
+ outl(0, ha->io_addr + IPS_REG_FLAP);
+ if (ha->pcidev->revision == IPS_REVID_TROMBONE64)
+ udelay(25); /* 25 us */
+
+ if (inb(ha->io_addr + IPS_REG_FLDP) != 0x55)
+ return (1);
+
+ outl(1, ha->io_addr + IPS_REG_FLAP);
+ if (ha->pcidev->revision == IPS_REVID_TROMBONE64)
+ udelay(25); /* 25 us */
+ if (inb(ha->io_addr + IPS_REG_FLDP) != 0xAA)
+ return (1);
+
+ checksum = 0xff;
+ for (i = 2; i < buffersize; i++) {
+
+ outl(i + offset, ha->io_addr + IPS_REG_FLAP);
+ if (ha->pcidev->revision == IPS_REVID_TROMBONE64)
+ udelay(25); /* 25 us */
+
+ checksum = (uint8_t) checksum + inb(ha->io_addr + IPS_REG_FLDP);
+ }
+
+ if (checksum != 0)
+ /* failure */
+ return (1);
+ else
+ /* success */
+ return (0);
+}
+
+/****************************************************************************/
+/* */
+/* Routine Name: ips_verify_bios_memio */
+/* */
+/* Routine Description: */
+/* Verify the BIOS on the adapter */
+/* */
+/****************************************************************************/
+static int
+ips_verify_bios_memio(ips_ha_t * ha, char *buffer, uint32_t buffersize,
+ uint32_t offset)
+{
+ uint8_t checksum;
+ int i;
+
+ METHOD_TRACE("ips_verify_bios_memio", 1);
+
+ /* test 1st byte */
+ writel(0, ha->mem_ptr + IPS_REG_FLAP);
+ if (ha->pcidev->revision == IPS_REVID_TROMBONE64)
+ udelay(25); /* 25 us */
+
+ if (readb(ha->mem_ptr + IPS_REG_FLDP) != 0x55)
+ return (1);
+
+ writel(1, ha->mem_ptr + IPS_REG_FLAP);
+ if (ha->pcidev->revision == IPS_REVID_TROMBONE64)
+ udelay(25); /* 25 us */
+ if (readb(ha->mem_ptr + IPS_REG_FLDP) != 0xAA)
+ return (1);
+
+ checksum = 0xff;
+ for (i = 2; i < buffersize; i++) {
+
+ writel(i + offset, ha->mem_ptr + IPS_REG_FLAP);
+ if (ha->pcidev->revision == IPS_REVID_TROMBONE64)
+ udelay(25); /* 25 us */
+
+ checksum =
+ (uint8_t) checksum + readb(ha->mem_ptr + IPS_REG_FLDP);
+ }
+
+ if (checksum != 0)
+ /* failure */
+ return (1);
+ else
+ /* success */
+ return (0);
+}
+
+/****************************************************************************/
+/* */
+/* Routine Name: ips_abort_init */
+/* */
+/* Routine Description: */
+/* cleanup routine for a failed adapter initialization */
+/****************************************************************************/
+static int
+ips_abort_init(ips_ha_t * ha, int index)
+{
+ ha->active = 0;
+ ips_free(ha);
+ ips_ha[index] = NULL;
+ ips_sh[index] = NULL;
+ return -1;
+}
+
+/****************************************************************************/
+/* */
+/* Routine Name: ips_shift_controllers */
+/* */
+/* Routine Description: */
+/* helper function for ordering adapters */
+/****************************************************************************/
+static void
+ips_shift_controllers(int lowindex, int highindex)
+{
+ ips_ha_t *ha_sav = ips_ha[highindex];
+ struct Scsi_Host *sh_sav = ips_sh[highindex];
+ int i;
+
+ for (i = highindex; i > lowindex; i--) {
+ ips_ha[i] = ips_ha[i - 1];
+ ips_sh[i] = ips_sh[i - 1];
+ ips_ha[i]->host_num = i;
+ }
+ ha_sav->host_num = lowindex;
+ ips_ha[lowindex] = ha_sav;
+ ips_sh[lowindex] = sh_sav;
+}
+
+/****************************************************************************/
+/* */
+/* Routine Name: ips_order_controllers */
+/* */
+/* Routine Description: */
+/* place controllers is the "proper" boot order */
+/****************************************************************************/
+static void
+ips_order_controllers(void)
+{
+ int i, j, tmp, position = 0;
+ IPS_NVRAM_P5 *nvram;
+ if (!ips_ha[0])
+ return;
+ nvram = ips_ha[0]->nvram;
+
+ if (nvram->adapter_order[0]) {
+ for (i = 1; i <= nvram->adapter_order[0]; i++) {
+ for (j = position; j < ips_num_controllers; j++) {
+ switch (ips_ha[j]->ad_type) {
+ case IPS_ADTYPE_SERVERAID6M:
+ case IPS_ADTYPE_SERVERAID7M:
+ if (nvram->adapter_order[i] == 'M') {
+ ips_shift_controllers(position,
+ j);
+ position++;
+ }
+ break;
+ case IPS_ADTYPE_SERVERAID4L:
+ case IPS_ADTYPE_SERVERAID4M:
+ case IPS_ADTYPE_SERVERAID4MX:
+ case IPS_ADTYPE_SERVERAID4LX:
+ if (nvram->adapter_order[i] == 'N') {
+ ips_shift_controllers(position,
+ j);
+ position++;
+ }
+ break;
+ case IPS_ADTYPE_SERVERAID6I:
+ case IPS_ADTYPE_SERVERAID5I2:
+ case IPS_ADTYPE_SERVERAID5I1:
+ case IPS_ADTYPE_SERVERAID7k:
+ if (nvram->adapter_order[i] == 'S') {
+ ips_shift_controllers(position,
+ j);
+ position++;
+ }
+ break;
+ case IPS_ADTYPE_SERVERAID:
+ case IPS_ADTYPE_SERVERAID2:
+ case IPS_ADTYPE_NAVAJO:
+ case IPS_ADTYPE_KIOWA:
+ case IPS_ADTYPE_SERVERAID3L:
+ case IPS_ADTYPE_SERVERAID3:
+ case IPS_ADTYPE_SERVERAID4H:
+ if (nvram->adapter_order[i] == 'A') {
+ ips_shift_controllers(position,
+ j);
+ position++;
+ }
+ break;
+ default:
+ break;
+ }
+ }
+ }
+ /* if adapter_order[0], then ordering is complete */
+ return;
+ }
+ /* old bios, use older ordering */
+ tmp = 0;
+ for (i = position; i < ips_num_controllers; i++) {
+ if (ips_ha[i]->ad_type == IPS_ADTYPE_SERVERAID5I2 ||
+ ips_ha[i]->ad_type == IPS_ADTYPE_SERVERAID5I1) {
+ ips_shift_controllers(position, i);
+ position++;
+ tmp = 1;
+ }
+ }
+ /* if there were no 5I cards, then don't do any extra ordering */
+ if (!tmp)
+ return;
+ for (i = position; i < ips_num_controllers; i++) {
+ if (ips_ha[i]->ad_type == IPS_ADTYPE_SERVERAID4L ||
+ ips_ha[i]->ad_type == IPS_ADTYPE_SERVERAID4M ||
+ ips_ha[i]->ad_type == IPS_ADTYPE_SERVERAID4LX ||
+ ips_ha[i]->ad_type == IPS_ADTYPE_SERVERAID4MX) {
+ ips_shift_controllers(position, i);
+ position++;
+ }
+ }
+
+ return;
+}
+
+/****************************************************************************/
+/* */
+/* Routine Name: ips_register_scsi */
+/* */
+/* Routine Description: */
+/* perform any registration and setup with the scsi layer */
+/****************************************************************************/
+static int
+ips_register_scsi(int index)
+{
+ struct Scsi_Host *sh;
+ ips_ha_t *ha, *oldha = ips_ha[index];
+ sh = scsi_host_alloc(&ips_driver_template, sizeof (ips_ha_t));
+ if (!sh) {
+ IPS_PRINTK(KERN_WARNING, oldha->pcidev,
+ "Unable to register controller with SCSI subsystem\n");
+ return -1;
+ }
+ ha = IPS_HA(sh);
+ memcpy(ha, oldha, sizeof (ips_ha_t));
+ free_irq(oldha->pcidev->irq, oldha);
+ /* Install the interrupt handler with the new ha */
+ if (request_irq(ha->pcidev->irq, do_ipsintr, IRQF_SHARED, ips_name, ha)) {
+ IPS_PRINTK(KERN_WARNING, ha->pcidev,
+ "Unable to install interrupt handler\n");
+ goto err_out_sh;
+ }
+
+ kfree(oldha);
+
+ /* Store away needed values for later use */
+ sh->unique_id = (ha->io_addr) ? ha->io_addr : ha->mem_addr;
+ sh->sg_tablesize = sh->hostt->sg_tablesize;
+ sh->can_queue = sh->hostt->can_queue;
+ sh->cmd_per_lun = sh->hostt->cmd_per_lun;
+ sh->use_clustering = sh->hostt->use_clustering;
+ sh->max_sectors = 128;
+
+ sh->max_id = ha->ntargets;
+ sh->max_lun = ha->nlun;
+ sh->max_channel = ha->nbus - 1;
+ sh->can_queue = ha->max_cmds - 1;
+
+ if (scsi_add_host(sh, &ha->pcidev->dev))
+ goto err_out;
+
+ ips_sh[index] = sh;
+ ips_ha[index] = ha;
+
+ scsi_scan_host(sh);
+
+ return 0;
+
+err_out:
+ free_irq(ha->pcidev->irq, ha);
+err_out_sh:
+ scsi_host_put(sh);
+ return -1;
+}
+
+/*---------------------------------------------------------------------------*/
+/* Routine Name: ips_remove_device */
+/* */
+/* Routine Description: */
+/* Remove one Adapter ( Hot Plugging ) */
+/*---------------------------------------------------------------------------*/
+static void
+ips_remove_device(struct pci_dev *pci_dev)
+{
+ struct Scsi_Host *sh = pci_get_drvdata(pci_dev);
+
+ pci_set_drvdata(pci_dev, NULL);
+
+ ips_release(sh);
+
+ pci_release_regions(pci_dev);
+ pci_disable_device(pci_dev);
+}
+
+/****************************************************************************/
+/* */
+/* Routine Name: ips_module_init */
+/* */
+/* Routine Description: */
+/* function called on module load */
+/****************************************************************************/
+static int __init
+ips_module_init(void)
+{
+ if (pci_register_driver(&ips_pci_driver) < 0)
+ return -ENODEV;
+ ips_driver_template.module = THIS_MODULE;
+ ips_order_controllers();
+ if (!ips_detect(&ips_driver_template)) {
+ pci_unregister_driver(&ips_pci_driver);
+ return -ENODEV;
+ }
+ register_reboot_notifier(&ips_notifier);
+ return 0;
+}
+
+/****************************************************************************/
+/* */
+/* Routine Name: ips_module_exit */
+/* */
+/* Routine Description: */
+/* function called on module unload */
+/****************************************************************************/
+static void __exit
+ips_module_exit(void)
+{
+ pci_unregister_driver(&ips_pci_driver);
+ unregister_reboot_notifier(&ips_notifier);
+}
+
+module_init(ips_module_init);
+module_exit(ips_module_exit);
+
+/*---------------------------------------------------------------------------*/
+/* Routine Name: ips_insert_device */
+/* */
+/* Routine Description: */
+/* Add One Adapter ( Hot Plug ) */
+/* */
+/* Return Value: */
+/* 0 if Successful, else non-zero */
+/*---------------------------------------------------------------------------*/
+static int
+ips_insert_device(struct pci_dev *pci_dev, const struct pci_device_id *ent)
+{
+ int index = -1;
+ int rc;
+
+ METHOD_TRACE("ips_insert_device", 1);
+ rc = pci_enable_device(pci_dev);
+ if (rc)
+ return rc;
+
+ rc = pci_request_regions(pci_dev, "ips");
+ if (rc)
+ goto err_out;
+
+ rc = ips_init_phase1(pci_dev, &index);
+ if (rc == SUCCESS)
+ rc = ips_init_phase2(index);
+
+ if (ips_hotplug)
+ if (ips_register_scsi(index)) {
+ ips_free(ips_ha[index]);
+ rc = -1;
+ }
+
+ if (rc == SUCCESS)
+ ips_num_controllers++;
+
+ ips_next_controller = ips_num_controllers;
+
+ if (rc < 0) {
+ rc = -ENODEV;
+ goto err_out_regions;
+ }
+
+ pci_set_drvdata(pci_dev, ips_sh[index]);
+ return 0;
+
+err_out_regions:
+ pci_release_regions(pci_dev);
+err_out:
+ pci_disable_device(pci_dev);
+ return rc;
+}
+
+/*---------------------------------------------------------------------------*/
+/* Routine Name: ips_init_phase1 */
+/* */
+/* Routine Description: */
+/* Adapter Initialization */
+/* */
+/* Return Value: */
+/* 0 if Successful, else non-zero */
+/*---------------------------------------------------------------------------*/
+static int
+ips_init_phase1(struct pci_dev *pci_dev, int *indexPtr)
+{
+ ips_ha_t *ha;
+ uint32_t io_addr;
+ uint32_t mem_addr;
+ uint32_t io_len;
+ uint32_t mem_len;
+ uint8_t bus;
+ uint8_t func;
+ int j;
+ int index;
+ dma_addr_t dma_address;
+ char __iomem *ioremap_ptr;
+ char __iomem *mem_ptr;
+ uint32_t IsDead;
+
+ METHOD_TRACE("ips_init_phase1", 1);
+ index = IPS_MAX_ADAPTERS;
+ for (j = 0; j < IPS_MAX_ADAPTERS; j++) {
+ if (ips_ha[j] == NULL) {
+ index = j;
+ break;
+ }
+ }
+
+ if (index >= IPS_MAX_ADAPTERS)
+ return -1;
+
+ /* stuff that we get in dev */
+ bus = pci_dev->bus->number;
+ func = pci_dev->devfn;
+
+ /* Init MEM/IO addresses to 0 */
+ mem_addr = 0;
+ io_addr = 0;
+ mem_len = 0;
+ io_len = 0;
+
+ for (j = 0; j < 2; j++) {
+ if (!pci_resource_start(pci_dev, j))
+ break;
+
+ if (pci_resource_flags(pci_dev, j) & IORESOURCE_IO) {
+ io_addr = pci_resource_start(pci_dev, j);
+ io_len = pci_resource_len(pci_dev, j);
+ } else {
+ mem_addr = pci_resource_start(pci_dev, j);
+ mem_len = pci_resource_len(pci_dev, j);
+ }
+ }
+
+ /* setup memory mapped area (if applicable) */
+ if (mem_addr) {
+ uint32_t base;
+ uint32_t offs;
+
+ base = mem_addr & PAGE_MASK;
+ offs = mem_addr - base;
+ ioremap_ptr = ioremap(base, PAGE_SIZE);
+ if (!ioremap_ptr)
+ return -1;
+ mem_ptr = ioremap_ptr + offs;
+ } else {
+ ioremap_ptr = NULL;
+ mem_ptr = NULL;
+ }
+
+ /* found a controller */
+ ha = kzalloc(sizeof (ips_ha_t), GFP_KERNEL);
+ if (ha == NULL) {
+ IPS_PRINTK(KERN_WARNING, pci_dev,
+ "Unable to allocate temporary ha struct\n");
+ return -1;
+ }
+
+ ips_sh[index] = NULL;
+ ips_ha[index] = ha;
+ ha->active = 1;
+
+ /* Store info in HA structure */
+ ha->io_addr = io_addr;
+ ha->io_len = io_len;
+ ha->mem_addr = mem_addr;
+ ha->mem_len = mem_len;
+ ha->mem_ptr = mem_ptr;
+ ha->ioremap_ptr = ioremap_ptr;
+ ha->host_num = (uint32_t) index;
+ ha->slot_num = PCI_SLOT(pci_dev->devfn);
+ ha->pcidev = pci_dev;
+
+ /*
+ * Set the pci_dev's dma_mask. Not all adapters support 64bit
+ * addressing so don't enable it if the adapter can't support
+ * it! Also, don't use 64bit addressing if dma addresses
+ * are guaranteed to be < 4G.
+ */
+ if (IPS_ENABLE_DMA64 && IPS_HAS_ENH_SGLIST(ha) &&
+ !pci_set_dma_mask(ha->pcidev, DMA_BIT_MASK(64))) {
+ (ha)->flags |= IPS_HA_ENH_SG;
+ } else {
+ if (pci_set_dma_mask(ha->pcidev, DMA_BIT_MASK(32)) != 0) {
+ printk(KERN_WARNING "Unable to set DMA Mask\n");
+ return ips_abort_init(ha, index);
+ }
+ }
+ if(ips_cd_boot && !ips_FlashData){
+ ips_FlashData = pci_alloc_consistent(pci_dev, PAGE_SIZE << 7,
+ &ips_flashbusaddr);
+ }
+
+ ha->enq = pci_alloc_consistent(pci_dev, sizeof (IPS_ENQ),
+ &ha->enq_busaddr);
+ if (!ha->enq) {
+ IPS_PRINTK(KERN_WARNING, pci_dev,
+ "Unable to allocate host inquiry structure\n");
+ return ips_abort_init(ha, index);
+ }
+
+ ha->adapt = pci_alloc_consistent(pci_dev, sizeof (IPS_ADAPTER) +
+ sizeof (IPS_IO_CMD), &dma_address);
+ if (!ha->adapt) {
+ IPS_PRINTK(KERN_WARNING, pci_dev,
+ "Unable to allocate host adapt & dummy structures\n");
+ return ips_abort_init(ha, index);
+ }
+ ha->adapt->hw_status_start = dma_address;
+ ha->dummy = (void *) (ha->adapt + 1);
+
+
+
+ ha->logical_drive_info = pci_alloc_consistent(pci_dev, sizeof (IPS_LD_INFO), &dma_address);
+ if (!ha->logical_drive_info) {
+ IPS_PRINTK(KERN_WARNING, pci_dev,
+ "Unable to allocate logical drive info structure\n");
+ return ips_abort_init(ha, index);
+ }
+ ha->logical_drive_info_dma_addr = dma_address;
+
+
+ ha->conf = kmalloc(sizeof (IPS_CONF), GFP_KERNEL);
+
+ if (!ha->conf) {
+ IPS_PRINTK(KERN_WARNING, pci_dev,
+ "Unable to allocate host conf structure\n");
+ return ips_abort_init(ha, index);
+ }
+
+ ha->nvram = kmalloc(sizeof (IPS_NVRAM_P5), GFP_KERNEL);
+
+ if (!ha->nvram) {
+ IPS_PRINTK(KERN_WARNING, pci_dev,
+ "Unable to allocate host NVRAM structure\n");
+ return ips_abort_init(ha, index);
+ }
+
+ ha->subsys = kmalloc(sizeof (IPS_SUBSYS), GFP_KERNEL);
+
+ if (!ha->subsys) {
+ IPS_PRINTK(KERN_WARNING, pci_dev,
+ "Unable to allocate host subsystem structure\n");
+ return ips_abort_init(ha, index);
+ }
+
+ /* the ioctl buffer is now used during adapter initialization, so its
+ * successful allocation is now required */
+ if (ips_ioctlsize < PAGE_SIZE)
+ ips_ioctlsize = PAGE_SIZE;
+
+ ha->ioctl_data = pci_alloc_consistent(pci_dev, ips_ioctlsize,
+ &ha->ioctl_busaddr);
+ ha->ioctl_len = ips_ioctlsize;
+ if (!ha->ioctl_data) {
+ IPS_PRINTK(KERN_WARNING, pci_dev,
+ "Unable to allocate IOCTL data\n");
+ return ips_abort_init(ha, index);
+ }
+
+ /*
+ * Setup Functions
+ */
+ ips_setup_funclist(ha);
+
+ if ((IPS_IS_MORPHEUS(ha)) || (IPS_IS_MARCO(ha))) {
+ /* If Morpheus appears dead, reset it */
+ IsDead = readl(ha->mem_ptr + IPS_REG_I960_MSG1);
+ if (IsDead == 0xDEADBEEF) {
+ ips_reset_morpheus(ha);
+ }
+ }
+
+ /*
+ * Initialize the card if it isn't already
+ */
+
+ if (!(*ha->func.isinit) (ha)) {
+ if (!(*ha->func.init) (ha)) {
+ /*
+ * Initialization failed
+ */
+ IPS_PRINTK(KERN_WARNING, pci_dev,
+ "Unable to initialize controller\n");
+ return ips_abort_init(ha, index);
+ }
+ }
+
+ *indexPtr = index;
+ return SUCCESS;
+}
+
+/*---------------------------------------------------------------------------*/
+/* Routine Name: ips_init_phase2 */
+/* */
+/* Routine Description: */
+/* Adapter Initialization Phase 2 */
+/* */
+/* Return Value: */
+/* 0 if Successful, else non-zero */
+/*---------------------------------------------------------------------------*/
+static int
+ips_init_phase2(int index)
+{
+ ips_ha_t *ha;
+
+ ha = ips_ha[index];
+
+ METHOD_TRACE("ips_init_phase2", 1);
+ if (!ha->active) {
+ ips_ha[index] = NULL;
+ return -1;
+ }
+
+ /* Install the interrupt handler */
+ if (request_irq(ha->pcidev->irq, do_ipsintr, IRQF_SHARED, ips_name, ha)) {
+ IPS_PRINTK(KERN_WARNING, ha->pcidev,
+ "Unable to install interrupt handler\n");
+ return ips_abort_init(ha, index);
+ }
+
+ /*
+ * Allocate a temporary SCB for initialization
+ */
+ ha->max_cmds = 1;
+ if (!ips_allocatescbs(ha)) {
+ IPS_PRINTK(KERN_WARNING, ha->pcidev,
+ "Unable to allocate a CCB\n");
+ free_irq(ha->pcidev->irq, ha);
+ return ips_abort_init(ha, index);
+ }
+
+ if (!ips_hainit(ha)) {
+ IPS_PRINTK(KERN_WARNING, ha->pcidev,
+ "Unable to initialize controller\n");
+ free_irq(ha->pcidev->irq, ha);
+ return ips_abort_init(ha, index);
+ }
+ /* Free the temporary SCB */
+ ips_deallocatescbs(ha, 1);
+
+ /* allocate CCBs */
+ if (!ips_allocatescbs(ha)) {
+ IPS_PRINTK(KERN_WARNING, ha->pcidev,
+ "Unable to allocate CCBs\n");
+ free_irq(ha->pcidev->irq, ha);
+ return ips_abort_init(ha, index);
+ }
+
+ return SUCCESS;
+}
+
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("IBM ServeRAID Adapter Driver " IPS_VER_STRING);
+MODULE_VERSION(IPS_VER_STRING);
+
+
+/*
+ * Overrides for Emacs so that we almost follow Linus's tabbing style.
+ * Emacs will notice this stuff at the end of the file and automatically
+ * adjust the settings for this buffer only. This must remain at the end
+ * of the file.
+ * ---------------------------------------------------------------------------
+ * Local variables:
+ * c-indent-level: 2
+ * c-brace-imaginary-offset: 0
+ * c-brace-offset: -2
+ * c-argdecl-indent: 2
+ * c-label-offset: -2
+ * c-continued-statement-offset: 2
+ * c-continued-brace-offset: 0
+ * indent-tabs-mode: nil
+ * tab-width: 8
+ * End:
+ */
diff --git a/drivers/scsi/ips.h b/drivers/scsi/ips.h
new file mode 100644
index 000000000..45b9566b9
--- /dev/null
+++ b/drivers/scsi/ips.h
@@ -0,0 +1,1251 @@
+/*****************************************************************************/
+/* ips.h -- driver for the Adaptec / IBM ServeRAID controller */
+/* */
+/* Written By: Keith Mitchell, IBM Corporation */
+/* Jack Hammer, Adaptec, Inc. */
+/* David Jeffery, Adaptec, Inc. */
+/* */
+/* Copyright (C) 1999 IBM Corporation */
+/* Copyright (C) 2003 Adaptec, Inc. */
+/* */
+/* This program is free software; you can redistribute it and/or modify */
+/* it under the terms of the GNU General Public License as published by */
+/* the Free Software Foundation; either version 2 of the License, or */
+/* (at your option) any later version. */
+/* */
+/* This program is distributed in the hope that it will be useful, */
+/* but WITHOUT ANY WARRANTY; without even the implied warranty of */
+/* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the */
+/* GNU General Public License for more details. */
+/* */
+/* NO WARRANTY */
+/* THE PROGRAM IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OR */
+/* CONDITIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED INCLUDING, WITHOUT */
+/* LIMITATION, ANY WARRANTIES OR CONDITIONS OF TITLE, NON-INFRINGEMENT, */
+/* MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE. Each Recipient is */
+/* solely responsible for determining the appropriateness of using and */
+/* distributing the Program and assumes all risks associated with its */
+/* exercise of rights under this Agreement, including but not limited to */
+/* the risks and costs of program errors, damage to or loss of data, */
+/* programs or equipment, and unavailability or interruption of operations. */
+/* */
+/* DISCLAIMER OF LIABILITY */
+/* NEITHER RECIPIENT NOR ANY CONTRIBUTORS SHALL HAVE ANY LIABILITY FOR ANY */
+/* DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL */
+/* DAMAGES (INCLUDING WITHOUT LIMITATION LOST PROFITS), HOWEVER CAUSED AND */
+/* ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR */
+/* TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE */
+/* USE OR DISTRIBUTION OF THE PROGRAM OR THE EXERCISE OF ANY RIGHTS GRANTED */
+/* HEREUNDER, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGES */
+/* */
+/* You should have received a copy of the GNU General Public License */
+/* along with this program; if not, write to the Free Software */
+/* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */
+/* */
+/* Bugs/Comments/Suggestions should be mailed to: */
+/* ipslinux@adaptec.com */
+/* */
+/*****************************************************************************/
+
+#ifndef _IPS_H_
+ #define _IPS_H_
+
+#include <linux/nmi.h>
+ #include <asm/uaccess.h>
+ #include <asm/io.h>
+
+ /*
+ * Some handy macros
+ */
+ #define IPS_HA(x) ((ips_ha_t *) x->hostdata)
+ #define IPS_COMMAND_ID(ha, scb) (int) (scb - ha->scbs)
+ #define IPS_IS_TROMBONE(ha) (((ha->pcidev->device == IPS_DEVICEID_COPPERHEAD) && \
+ (ha->pcidev->revision >= IPS_REVID_TROMBONE32) && \
+ (ha->pcidev->revision <= IPS_REVID_TROMBONE64)) ? 1 : 0)
+ #define IPS_IS_CLARINET(ha) (((ha->pcidev->device == IPS_DEVICEID_COPPERHEAD) && \
+ (ha->pcidev->revision >= IPS_REVID_CLARINETP1) && \
+ (ha->pcidev->revision <= IPS_REVID_CLARINETP3)) ? 1 : 0)
+ #define IPS_IS_MORPHEUS(ha) (ha->pcidev->device == IPS_DEVICEID_MORPHEUS)
+ #define IPS_IS_MARCO(ha) (ha->pcidev->device == IPS_DEVICEID_MARCO)
+ #define IPS_USE_I2O_DELIVER(ha) ((IPS_IS_MORPHEUS(ha) || \
+ (IPS_IS_TROMBONE(ha) && \
+ (ips_force_i2o))) ? 1 : 0)
+ #define IPS_USE_MEMIO(ha) ((IPS_IS_MORPHEUS(ha) || \
+ ((IPS_IS_TROMBONE(ha) || IPS_IS_CLARINET(ha)) && \
+ (ips_force_memio))) ? 1 : 0)
+
+ #define IPS_HAS_ENH_SGLIST(ha) (IPS_IS_MORPHEUS(ha) || IPS_IS_MARCO(ha))
+ #define IPS_USE_ENH_SGLIST(ha) ((ha)->flags & IPS_HA_ENH_SG)
+ #define IPS_SGLIST_SIZE(ha) (IPS_USE_ENH_SGLIST(ha) ? \
+ sizeof(IPS_ENH_SG_LIST) : sizeof(IPS_STD_SG_LIST))
+
+ #define IPS_PRINTK(level, pcidev, format, arg...) \
+ dev_printk(level , &((pcidev)->dev) , format , ## arg)
+
+ #define MDELAY(n) \
+ do { \
+ mdelay(n); \
+ touch_nmi_watchdog(); \
+ } while (0)
+
+ #ifndef min
+ #define min(x,y) ((x) < (y) ? x : y)
+ #endif
+
+ #ifndef __iomem /* For clean compiles in earlier kernels without __iomem annotations */
+ #define __iomem
+ #endif
+
+ #define pci_dma_hi32(a) ((a >> 16) >> 16)
+ #define pci_dma_lo32(a) (a & 0xffffffff)
+
+ #if (BITS_PER_LONG > 32) || defined(CONFIG_HIGHMEM64G)
+ #define IPS_ENABLE_DMA64 (1)
+ #else
+ #define IPS_ENABLE_DMA64 (0)
+ #endif
+
+ /*
+ * Adapter address map equates
+ */
+ #define IPS_REG_HISR 0x08 /* Host Interrupt Status Reg */
+ #define IPS_REG_CCSAR 0x10 /* Cmd Channel System Addr Reg */
+ #define IPS_REG_CCCR 0x14 /* Cmd Channel Control Reg */
+ #define IPS_REG_SQHR 0x20 /* Status Q Head Reg */
+ #define IPS_REG_SQTR 0x24 /* Status Q Tail Reg */
+ #define IPS_REG_SQER 0x28 /* Status Q End Reg */
+ #define IPS_REG_SQSR 0x2C /* Status Q Start Reg */
+ #define IPS_REG_SCPR 0x05 /* Subsystem control port reg */
+ #define IPS_REG_ISPR 0x06 /* interrupt status port reg */
+ #define IPS_REG_CBSP 0x07 /* CBSP register */
+ #define IPS_REG_FLAP 0x18 /* Flash address port */
+ #define IPS_REG_FLDP 0x1C /* Flash data port */
+ #define IPS_REG_NDAE 0x38 /* Anaconda 64 NDAE Register */
+ #define IPS_REG_I2O_INMSGQ 0x40 /* I2O Inbound Message Queue */
+ #define IPS_REG_I2O_OUTMSGQ 0x44 /* I2O Outbound Message Queue */
+ #define IPS_REG_I2O_HIR 0x30 /* I2O Interrupt Status */
+ #define IPS_REG_I960_IDR 0x20 /* i960 Inbound Doorbell */
+ #define IPS_REG_I960_MSG0 0x18 /* i960 Outbound Reg 0 */
+ #define IPS_REG_I960_MSG1 0x1C /* i960 Outbound Reg 1 */
+ #define IPS_REG_I960_OIMR 0x34 /* i960 Oubound Int Mask Reg */
+
+ /*
+ * Adapter register bit equates
+ */
+ #define IPS_BIT_GHI 0x04 /* HISR General Host Interrupt */
+ #define IPS_BIT_SQO 0x02 /* HISR Status Q Overflow */
+ #define IPS_BIT_SCE 0x01 /* HISR Status Channel Enqueue */
+ #define IPS_BIT_SEM 0x08 /* CCCR Semaphore Bit */
+ #define IPS_BIT_ILE 0x10 /* CCCR ILE Bit */
+ #define IPS_BIT_START_CMD 0x101A /* CCCR Start Command Channel */
+ #define IPS_BIT_START_STOP 0x0002 /* CCCR Start/Stop Bit */
+ #define IPS_BIT_RST 0x80 /* SCPR Reset Bit */
+ #define IPS_BIT_EBM 0x02 /* SCPR Enable Bus Master */
+ #define IPS_BIT_EI 0x80 /* HISR Enable Interrupts */
+ #define IPS_BIT_OP 0x01 /* OP bit in CBSP */
+ #define IPS_BIT_I2O_OPQI 0x08 /* General Host Interrupt */
+ #define IPS_BIT_I960_MSG0I 0x01 /* Message Register 0 Interrupt*/
+ #define IPS_BIT_I960_MSG1I 0x02 /* Message Register 1 Interrupt*/
+
+ /*
+ * Adapter Command ID Equates
+ */
+ #define IPS_CMD_GET_LD_INFO 0x19
+ #define IPS_CMD_GET_SUBSYS 0x40
+ #define IPS_CMD_READ_CONF 0x38
+ #define IPS_CMD_RW_NVRAM_PAGE 0xBC
+ #define IPS_CMD_READ 0x02
+ #define IPS_CMD_WRITE 0x03
+ #define IPS_CMD_FFDC 0xD7
+ #define IPS_CMD_ENQUIRY 0x05
+ #define IPS_CMD_FLUSH 0x0A
+ #define IPS_CMD_READ_SG 0x82
+ #define IPS_CMD_WRITE_SG 0x83
+ #define IPS_CMD_DCDB 0x04
+ #define IPS_CMD_DCDB_SG 0x84
+ #define IPS_CMD_EXTENDED_DCDB 0x95
+ #define IPS_CMD_EXTENDED_DCDB_SG 0x96
+ #define IPS_CMD_CONFIG_SYNC 0x58
+ #define IPS_CMD_ERROR_TABLE 0x17
+ #define IPS_CMD_DOWNLOAD 0x20
+ #define IPS_CMD_RW_BIOSFW 0x22
+ #define IPS_CMD_GET_VERSION_INFO 0xC6
+ #define IPS_CMD_RESET_CHANNEL 0x1A
+
+ /*
+ * Adapter Equates
+ */
+ #define IPS_CSL 0xFF
+ #define IPS_POCL 0x30
+ #define IPS_NORM_STATE 0x00
+ #define IPS_MAX_ADAPTER_TYPES 3
+ #define IPS_MAX_ADAPTERS 16
+ #define IPS_MAX_IOCTL 1
+ #define IPS_MAX_IOCTL_QUEUE 8
+ #define IPS_MAX_QUEUE 128
+ #define IPS_BLKSIZE 512
+ #define IPS_MAX_SG 17
+ #define IPS_MAX_LD 8
+ #define IPS_MAX_CHANNELS 4
+ #define IPS_MAX_TARGETS 15
+ #define IPS_MAX_CHUNKS 16
+ #define IPS_MAX_CMDS 128
+ #define IPS_MAX_XFER 0x10000
+ #define IPS_NVRAM_P5_SIG 0xFFDDBB99
+ #define IPS_MAX_POST_BYTES 0x02
+ #define IPS_MAX_CONFIG_BYTES 0x02
+ #define IPS_GOOD_POST_STATUS 0x80
+ #define IPS_SEM_TIMEOUT 2000
+ #define IPS_IOCTL_COMMAND 0x0D
+ #define IPS_INTR_ON 0
+ #define IPS_INTR_IORL 1
+ #define IPS_FFDC 99
+ #define IPS_ADAPTER_ID 0xF
+ #define IPS_VENDORID_IBM 0x1014
+ #define IPS_VENDORID_ADAPTEC 0x9005
+ #define IPS_DEVICEID_COPPERHEAD 0x002E
+ #define IPS_DEVICEID_MORPHEUS 0x01BD
+ #define IPS_DEVICEID_MARCO 0x0250
+ #define IPS_SUBDEVICEID_4M 0x01BE
+ #define IPS_SUBDEVICEID_4L 0x01BF
+ #define IPS_SUBDEVICEID_4MX 0x0208
+ #define IPS_SUBDEVICEID_4LX 0x020E
+ #define IPS_SUBDEVICEID_5I2 0x0259
+ #define IPS_SUBDEVICEID_5I1 0x0258
+ #define IPS_SUBDEVICEID_6M 0x0279
+ #define IPS_SUBDEVICEID_6I 0x028C
+ #define IPS_SUBDEVICEID_7k 0x028E
+ #define IPS_SUBDEVICEID_7M 0x028F
+ #define IPS_IOCTL_SIZE 8192
+ #define IPS_STATUS_SIZE 4
+ #define IPS_STATUS_Q_SIZE (IPS_MAX_CMDS+1) * IPS_STATUS_SIZE
+ #define IPS_IMAGE_SIZE 500 * 1024
+ #define IPS_MEMMAP_SIZE 128
+ #define IPS_ONE_MSEC 1
+ #define IPS_ONE_SEC 1000
+
+ /*
+ * Geometry Settings
+ */
+ #define IPS_COMP_HEADS 128
+ #define IPS_COMP_SECTORS 32
+ #define IPS_NORM_HEADS 254
+ #define IPS_NORM_SECTORS 63
+
+ /*
+ * Adapter Basic Status Codes
+ */
+ #define IPS_BASIC_STATUS_MASK 0xFF
+ #define IPS_GSC_STATUS_MASK 0x0F
+ #define IPS_CMD_SUCCESS 0x00
+ #define IPS_CMD_RECOVERED_ERROR 0x01
+ #define IPS_INVAL_OPCO 0x03
+ #define IPS_INVAL_CMD_BLK 0x04
+ #define IPS_INVAL_PARM_BLK 0x05
+ #define IPS_BUSY 0x08
+ #define IPS_CMD_CMPLT_WERROR 0x0C
+ #define IPS_LD_ERROR 0x0D
+ #define IPS_CMD_TIMEOUT 0x0E
+ #define IPS_PHYS_DRV_ERROR 0x0F
+
+ /*
+ * Adapter Extended Status Equates
+ */
+ #define IPS_ERR_SEL_TO 0xF0
+ #define IPS_ERR_OU_RUN 0xF2
+ #define IPS_ERR_HOST_RESET 0xF7
+ #define IPS_ERR_DEV_RESET 0xF8
+ #define IPS_ERR_RECOVERY 0xFC
+ #define IPS_ERR_CKCOND 0xFF
+
+ /*
+ * Operating System Defines
+ */
+ #define IPS_OS_WINDOWS_NT 0x01
+ #define IPS_OS_NETWARE 0x02
+ #define IPS_OS_OPENSERVER 0x03
+ #define IPS_OS_UNIXWARE 0x04
+ #define IPS_OS_SOLARIS 0x05
+ #define IPS_OS_OS2 0x06
+ #define IPS_OS_LINUX 0x07
+ #define IPS_OS_FREEBSD 0x08
+
+ /*
+ * Adapter Revision ID's
+ */
+ #define IPS_REVID_SERVERAID 0x02
+ #define IPS_REVID_NAVAJO 0x03
+ #define IPS_REVID_SERVERAID2 0x04
+ #define IPS_REVID_CLARINETP1 0x05
+ #define IPS_REVID_CLARINETP2 0x07
+ #define IPS_REVID_CLARINETP3 0x0D
+ #define IPS_REVID_TROMBONE32 0x0F
+ #define IPS_REVID_TROMBONE64 0x10
+
+ /*
+ * NVRAM Page 5 Adapter Defines
+ */
+ #define IPS_ADTYPE_SERVERAID 0x01
+ #define IPS_ADTYPE_SERVERAID2 0x02
+ #define IPS_ADTYPE_NAVAJO 0x03
+ #define IPS_ADTYPE_KIOWA 0x04
+ #define IPS_ADTYPE_SERVERAID3 0x05
+ #define IPS_ADTYPE_SERVERAID3L 0x06
+ #define IPS_ADTYPE_SERVERAID4H 0x07
+ #define IPS_ADTYPE_SERVERAID4M 0x08
+ #define IPS_ADTYPE_SERVERAID4L 0x09
+ #define IPS_ADTYPE_SERVERAID4MX 0x0A
+ #define IPS_ADTYPE_SERVERAID4LX 0x0B
+ #define IPS_ADTYPE_SERVERAID5I2 0x0C
+ #define IPS_ADTYPE_SERVERAID5I1 0x0D
+ #define IPS_ADTYPE_SERVERAID6M 0x0E
+ #define IPS_ADTYPE_SERVERAID6I 0x0F
+ #define IPS_ADTYPE_SERVERAID7t 0x10
+ #define IPS_ADTYPE_SERVERAID7k 0x11
+ #define IPS_ADTYPE_SERVERAID7M 0x12
+
+ /*
+ * Adapter Command/Status Packet Definitions
+ */
+ #define IPS_SUCCESS 0x01 /* Successfully completed */
+ #define IPS_SUCCESS_IMM 0x02 /* Success - Immediately */
+ #define IPS_FAILURE 0x04 /* Completed with Error */
+
+ /*
+ * Logical Drive Equates
+ */
+ #define IPS_LD_OFFLINE 0x02
+ #define IPS_LD_OKAY 0x03
+ #define IPS_LD_FREE 0x00
+ #define IPS_LD_SYS 0x06
+ #define IPS_LD_CRS 0x24
+
+ /*
+ * DCDB Table Equates
+ */
+ #define IPS_NO_DISCONNECT 0x00
+ #define IPS_DISCONNECT_ALLOWED 0x80
+ #define IPS_NO_AUTO_REQSEN 0x40
+ #define IPS_DATA_NONE 0x00
+ #define IPS_DATA_UNK 0x00
+ #define IPS_DATA_IN 0x01
+ #define IPS_DATA_OUT 0x02
+ #define IPS_TRANSFER64K 0x08
+ #define IPS_NOTIMEOUT 0x00
+ #define IPS_TIMEOUT10 0x10
+ #define IPS_TIMEOUT60 0x20
+ #define IPS_TIMEOUT20M 0x30
+
+ /*
+ * SCSI Inquiry Data Flags
+ */
+ #define IPS_SCSI_INQ_TYPE_DASD 0x00
+ #define IPS_SCSI_INQ_TYPE_PROCESSOR 0x03
+ #define IPS_SCSI_INQ_LU_CONNECTED 0x00
+ #define IPS_SCSI_INQ_RD_REV2 0x02
+ #define IPS_SCSI_INQ_REV2 0x02
+ #define IPS_SCSI_INQ_REV3 0x03
+ #define IPS_SCSI_INQ_Address16 0x01
+ #define IPS_SCSI_INQ_Address32 0x02
+ #define IPS_SCSI_INQ_MedChanger 0x08
+ #define IPS_SCSI_INQ_MultiPort 0x10
+ #define IPS_SCSI_INQ_EncServ 0x40
+ #define IPS_SCSI_INQ_SoftReset 0x01
+ #define IPS_SCSI_INQ_CmdQue 0x02
+ #define IPS_SCSI_INQ_Linked 0x08
+ #define IPS_SCSI_INQ_Sync 0x10
+ #define IPS_SCSI_INQ_WBus16 0x20
+ #define IPS_SCSI_INQ_WBus32 0x40
+ #define IPS_SCSI_INQ_RelAdr 0x80
+
+ /*
+ * SCSI Request Sense Data Flags
+ */
+ #define IPS_SCSI_REQSEN_VALID 0x80
+ #define IPS_SCSI_REQSEN_CURRENT_ERR 0x70
+ #define IPS_SCSI_REQSEN_NO_SENSE 0x00
+
+ /*
+ * SCSI Mode Page Equates
+ */
+ #define IPS_SCSI_MP3_SoftSector 0x01
+ #define IPS_SCSI_MP3_HardSector 0x02
+ #define IPS_SCSI_MP3_Removeable 0x04
+ #define IPS_SCSI_MP3_AllocateSurface 0x08
+
+ /*
+ * HA Flags
+ */
+
+ #define IPS_HA_ENH_SG 0x1
+
+ /*
+ * SCB Flags
+ */
+ #define IPS_SCB_MAP_SG 0x00008
+ #define IPS_SCB_MAP_SINGLE 0X00010
+
+ /*
+ * Passthru stuff
+ */
+ #define IPS_COPPUSRCMD (('C'<<8) | 65)
+ #define IPS_COPPIOCCMD (('C'<<8) | 66)
+ #define IPS_NUMCTRLS (('C'<<8) | 68)
+ #define IPS_CTRLINFO (('C'<<8) | 69)
+
+ /* flashing defines */
+ #define IPS_FW_IMAGE 0x00
+ #define IPS_BIOS_IMAGE 0x01
+ #define IPS_WRITE_FW 0x01
+ #define IPS_WRITE_BIOS 0x02
+ #define IPS_ERASE_BIOS 0x03
+ #define IPS_BIOS_HEADER 0xC0
+
+ /* time oriented stuff */
+ #define IPS_IS_LEAP_YEAR(y) (((y % 4 == 0) && ((y % 100 != 0) || (y % 400 == 0))) ? 1 : 0)
+ #define IPS_NUM_LEAP_YEARS_THROUGH(y) ((y) / 4 - (y) / 100 + (y) / 400)
+
+ #define IPS_SECS_MIN 60
+ #define IPS_SECS_HOUR 3600
+ #define IPS_SECS_8HOURS 28800
+ #define IPS_SECS_DAY 86400
+ #define IPS_DAYS_NORMAL_YEAR 365
+ #define IPS_DAYS_LEAP_YEAR 366
+ #define IPS_EPOCH_YEAR 1970
+
+ /*
+ * Scsi_Host Template
+ */
+ static int ips_biosparam(struct scsi_device *sdev, struct block_device *bdev,
+ sector_t capacity, int geom[]);
+ static int ips_slave_configure(struct scsi_device *SDptr);
+
+/*
+ * Raid Command Formats
+ */
+typedef struct {
+ uint8_t op_code;
+ uint8_t command_id;
+ uint8_t log_drv;
+ uint8_t sg_count;
+ uint32_t lba;
+ uint32_t sg_addr;
+ uint16_t sector_count;
+ uint8_t segment_4G;
+ uint8_t enhanced_sg;
+ uint32_t ccsar;
+ uint32_t cccr;
+} IPS_IO_CMD, *PIPS_IO_CMD;
+
+typedef struct {
+ uint8_t op_code;
+ uint8_t command_id;
+ uint16_t reserved;
+ uint32_t reserved2;
+ uint32_t buffer_addr;
+ uint32_t reserved3;
+ uint32_t ccsar;
+ uint32_t cccr;
+} IPS_LD_CMD, *PIPS_LD_CMD;
+
+typedef struct {
+ uint8_t op_code;
+ uint8_t command_id;
+ uint8_t reserved;
+ uint8_t reserved2;
+ uint32_t reserved3;
+ uint32_t buffer_addr;
+ uint32_t reserved4;
+} IPS_IOCTL_CMD, *PIPS_IOCTL_CMD;
+
+typedef struct {
+ uint8_t op_code;
+ uint8_t command_id;
+ uint8_t channel;
+ uint8_t reserved3;
+ uint8_t reserved4;
+ uint8_t reserved5;
+ uint8_t reserved6;
+ uint8_t reserved7;
+ uint8_t reserved8;
+ uint8_t reserved9;
+ uint8_t reserved10;
+ uint8_t reserved11;
+ uint8_t reserved12;
+ uint8_t reserved13;
+ uint8_t reserved14;
+ uint8_t adapter_flag;
+} IPS_RESET_CMD, *PIPS_RESET_CMD;
+
+typedef struct {
+ uint8_t op_code;
+ uint8_t command_id;
+ uint16_t reserved;
+ uint32_t reserved2;
+ uint32_t dcdb_address;
+ uint16_t reserved3;
+ uint8_t segment_4G;
+ uint8_t enhanced_sg;
+ uint32_t ccsar;
+ uint32_t cccr;
+} IPS_DCDB_CMD, *PIPS_DCDB_CMD;
+
+typedef struct {
+ uint8_t op_code;
+ uint8_t command_id;
+ uint8_t channel;
+ uint8_t source_target;
+ uint32_t reserved;
+ uint32_t reserved2;
+ uint32_t reserved3;
+ uint32_t ccsar;
+ uint32_t cccr;
+} IPS_CS_CMD, *PIPS_CS_CMD;
+
+typedef struct {
+ uint8_t op_code;
+ uint8_t command_id;
+ uint8_t log_drv;
+ uint8_t control;
+ uint32_t reserved;
+ uint32_t reserved2;
+ uint32_t reserved3;
+ uint32_t ccsar;
+ uint32_t cccr;
+} IPS_US_CMD, *PIPS_US_CMD;
+
+typedef struct {
+ uint8_t op_code;
+ uint8_t command_id;
+ uint8_t reserved;
+ uint8_t state;
+ uint32_t reserved2;
+ uint32_t reserved3;
+ uint32_t reserved4;
+ uint32_t ccsar;
+ uint32_t cccr;
+} IPS_FC_CMD, *PIPS_FC_CMD;
+
+typedef struct {
+ uint8_t op_code;
+ uint8_t command_id;
+ uint8_t reserved;
+ uint8_t desc;
+ uint32_t reserved2;
+ uint32_t buffer_addr;
+ uint32_t reserved3;
+ uint32_t ccsar;
+ uint32_t cccr;
+} IPS_STATUS_CMD, *PIPS_STATUS_CMD;
+
+typedef struct {
+ uint8_t op_code;
+ uint8_t command_id;
+ uint8_t page;
+ uint8_t write;
+ uint32_t reserved;
+ uint32_t buffer_addr;
+ uint32_t reserved2;
+ uint32_t ccsar;
+ uint32_t cccr;
+} IPS_NVRAM_CMD, *PIPS_NVRAM_CMD;
+
+typedef struct
+{
+ uint8_t op_code;
+ uint8_t command_id;
+ uint16_t reserved;
+ uint32_t count;
+ uint32_t buffer_addr;
+ uint32_t reserved2;
+} IPS_VERSION_INFO, *PIPS_VERSION_INFO;
+
+typedef struct {
+ uint8_t op_code;
+ uint8_t command_id;
+ uint8_t reset_count;
+ uint8_t reset_type;
+ uint8_t second;
+ uint8_t minute;
+ uint8_t hour;
+ uint8_t day;
+ uint8_t reserved1[4];
+ uint8_t month;
+ uint8_t yearH;
+ uint8_t yearL;
+ uint8_t reserved2;
+} IPS_FFDC_CMD, *PIPS_FFDC_CMD;
+
+typedef struct {
+ uint8_t op_code;
+ uint8_t command_id;
+ uint8_t type;
+ uint8_t direction;
+ uint32_t count;
+ uint32_t buffer_addr;
+ uint8_t total_packets;
+ uint8_t packet_num;
+ uint16_t reserved;
+} IPS_FLASHFW_CMD, *PIPS_FLASHFW_CMD;
+
+typedef struct {
+ uint8_t op_code;
+ uint8_t command_id;
+ uint8_t type;
+ uint8_t direction;
+ uint32_t count;
+ uint32_t buffer_addr;
+ uint32_t offset;
+} IPS_FLASHBIOS_CMD, *PIPS_FLASHBIOS_CMD;
+
+typedef union {
+ IPS_IO_CMD basic_io;
+ IPS_LD_CMD logical_info;
+ IPS_IOCTL_CMD ioctl_info;
+ IPS_DCDB_CMD dcdb;
+ IPS_CS_CMD config_sync;
+ IPS_US_CMD unlock_stripe;
+ IPS_FC_CMD flush_cache;
+ IPS_STATUS_CMD status;
+ IPS_NVRAM_CMD nvram;
+ IPS_FFDC_CMD ffdc;
+ IPS_FLASHFW_CMD flashfw;
+ IPS_FLASHBIOS_CMD flashbios;
+ IPS_VERSION_INFO version_info;
+ IPS_RESET_CMD reset;
+} IPS_HOST_COMMAND, *PIPS_HOST_COMMAND;
+
+typedef struct {
+ uint8_t logical_id;
+ uint8_t reserved;
+ uint8_t raid_level;
+ uint8_t state;
+ uint32_t sector_count;
+} IPS_DRIVE_INFO, *PIPS_DRIVE_INFO;
+
+typedef struct {
+ uint8_t no_of_log_drive;
+ uint8_t reserved[3];
+ IPS_DRIVE_INFO drive_info[IPS_MAX_LD];
+} IPS_LD_INFO, *PIPS_LD_INFO;
+
+typedef struct {
+ uint8_t device_address;
+ uint8_t cmd_attribute;
+ uint16_t transfer_length;
+ uint32_t buffer_pointer;
+ uint8_t cdb_length;
+ uint8_t sense_length;
+ uint8_t sg_count;
+ uint8_t reserved;
+ uint8_t scsi_cdb[12];
+ uint8_t sense_info[64];
+ uint8_t scsi_status;
+ uint8_t reserved2[3];
+} IPS_DCDB_TABLE, *PIPS_DCDB_TABLE;
+
+typedef struct {
+ uint8_t device_address;
+ uint8_t cmd_attribute;
+ uint8_t cdb_length;
+ uint8_t reserved_for_LUN;
+ uint32_t transfer_length;
+ uint32_t buffer_pointer;
+ uint16_t sg_count;
+ uint8_t sense_length;
+ uint8_t scsi_status;
+ uint32_t reserved;
+ uint8_t scsi_cdb[16];
+ uint8_t sense_info[56];
+} IPS_DCDB_TABLE_TAPE, *PIPS_DCDB_TABLE_TAPE;
+
+typedef union {
+ struct {
+ volatile uint8_t reserved;
+ volatile uint8_t command_id;
+ volatile uint8_t basic_status;
+ volatile uint8_t extended_status;
+ } fields;
+
+ volatile uint32_t value;
+} IPS_STATUS, *PIPS_STATUS;
+
+typedef struct {
+ IPS_STATUS status[IPS_MAX_CMDS + 1];
+ volatile PIPS_STATUS p_status_start;
+ volatile PIPS_STATUS p_status_end;
+ volatile PIPS_STATUS p_status_tail;
+ volatile uint32_t hw_status_start;
+ volatile uint32_t hw_status_tail;
+} IPS_ADAPTER, *PIPS_ADAPTER;
+
+typedef struct {
+ uint8_t ucLogDriveCount;
+ uint8_t ucMiscFlag;
+ uint8_t ucSLTFlag;
+ uint8_t ucBSTFlag;
+ uint8_t ucPwrChgCnt;
+ uint8_t ucWrongAdrCnt;
+ uint8_t ucUnidentCnt;
+ uint8_t ucNVramDevChgCnt;
+ uint8_t CodeBlkVersion[8];
+ uint8_t BootBlkVersion[8];
+ uint32_t ulDriveSize[IPS_MAX_LD];
+ uint8_t ucConcurrentCmdCount;
+ uint8_t ucMaxPhysicalDevices;
+ uint16_t usFlashRepgmCount;
+ uint8_t ucDefunctDiskCount;
+ uint8_t ucRebuildFlag;
+ uint8_t ucOfflineLogDrvCount;
+ uint8_t ucCriticalDrvCount;
+ uint16_t usConfigUpdateCount;
+ uint8_t ucBlkFlag;
+ uint8_t reserved;
+ uint16_t usAddrDeadDisk[IPS_MAX_CHANNELS * (IPS_MAX_TARGETS + 1)];
+} IPS_ENQ, *PIPS_ENQ;
+
+typedef struct {
+ uint8_t ucInitiator;
+ uint8_t ucParameters;
+ uint8_t ucMiscFlag;
+ uint8_t ucState;
+ uint32_t ulBlockCount;
+ uint8_t ucDeviceId[28];
+} IPS_DEVSTATE, *PIPS_DEVSTATE;
+
+typedef struct {
+ uint8_t ucChn;
+ uint8_t ucTgt;
+ uint16_t ucReserved;
+ uint32_t ulStartSect;
+ uint32_t ulNoOfSects;
+} IPS_CHUNK, *PIPS_CHUNK;
+
+typedef struct {
+ uint16_t ucUserField;
+ uint8_t ucState;
+ uint8_t ucRaidCacheParam;
+ uint8_t ucNoOfChunkUnits;
+ uint8_t ucStripeSize;
+ uint8_t ucParams;
+ uint8_t ucReserved;
+ uint32_t ulLogDrvSize;
+ IPS_CHUNK chunk[IPS_MAX_CHUNKS];
+} IPS_LD, *PIPS_LD;
+
+typedef struct {
+ uint8_t board_disc[8];
+ uint8_t processor[8];
+ uint8_t ucNoChanType;
+ uint8_t ucNoHostIntType;
+ uint8_t ucCompression;
+ uint8_t ucNvramType;
+ uint32_t ulNvramSize;
+} IPS_HARDWARE, *PIPS_HARDWARE;
+
+typedef struct {
+ uint8_t ucLogDriveCount;
+ uint8_t ucDateD;
+ uint8_t ucDateM;
+ uint8_t ucDateY;
+ uint8_t init_id[4];
+ uint8_t host_id[12];
+ uint8_t time_sign[8];
+ uint32_t UserOpt;
+ uint16_t user_field;
+ uint8_t ucRebuildRate;
+ uint8_t ucReserve;
+ IPS_HARDWARE hardware_disc;
+ IPS_LD logical_drive[IPS_MAX_LD];
+ IPS_DEVSTATE dev[IPS_MAX_CHANNELS][IPS_MAX_TARGETS+1];
+ uint8_t reserved[512];
+} IPS_CONF, *PIPS_CONF;
+
+typedef struct {
+ uint32_t signature;
+ uint8_t reserved1;
+ uint8_t adapter_slot;
+ uint16_t adapter_type;
+ uint8_t ctrl_bios[8];
+ uint8_t versioning; /* 1 = Versioning Supported, else 0 */
+ uint8_t version_mismatch; /* 1 = Versioning MisMatch, else 0 */
+ uint8_t reserved2;
+ uint8_t operating_system;
+ uint8_t driver_high[4];
+ uint8_t driver_low[4];
+ uint8_t BiosCompatibilityID[8];
+ uint8_t ReservedForOS2[8];
+ uint8_t bios_high[4]; /* Adapter's Flashed BIOS Version */
+ uint8_t bios_low[4];
+ uint8_t adapter_order[16]; /* BIOS Telling us the Sort Order */
+ uint8_t Filler[60];
+} IPS_NVRAM_P5, *PIPS_NVRAM_P5;
+
+/*--------------------------------------------------------------------------*/
+/* Data returned from a GetVersion Command */
+/*--------------------------------------------------------------------------*/
+
+ /* SubSystem Parameter[4] */
+#define IPS_GET_VERSION_SUPPORT 0x00018000 /* Mask for Versioning Support */
+
+typedef struct
+{
+ uint32_t revision;
+ uint8_t bootBlkVersion[32];
+ uint8_t bootBlkAttributes[4];
+ uint8_t codeBlkVersion[32];
+ uint8_t biosVersion[32];
+ uint8_t biosAttributes[4];
+ uint8_t compatibilityId[32];
+ uint8_t reserved[4];
+} IPS_VERSION_DATA;
+
+
+typedef struct _IPS_SUBSYS {
+ uint32_t param[128];
+} IPS_SUBSYS, *PIPS_SUBSYS;
+
+/**
+ ** SCSI Structures
+ **/
+
+/*
+ * Inquiry Data Format
+ */
+typedef struct {
+ uint8_t DeviceType;
+ uint8_t DeviceTypeQualifier;
+ uint8_t Version;
+ uint8_t ResponseDataFormat;
+ uint8_t AdditionalLength;
+ uint8_t Reserved;
+ uint8_t Flags[2];
+ uint8_t VendorId[8];
+ uint8_t ProductId[16];
+ uint8_t ProductRevisionLevel[4];
+ uint8_t Reserved2; /* Provides NULL terminator to name */
+} IPS_SCSI_INQ_DATA, *PIPS_SCSI_INQ_DATA;
+
+/*
+ * Read Capacity Data Format
+ */
+typedef struct {
+ uint32_t lba;
+ uint32_t len;
+} IPS_SCSI_CAPACITY;
+
+/*
+ * Request Sense Data Format
+ */
+typedef struct {
+ uint8_t ResponseCode;
+ uint8_t SegmentNumber;
+ uint8_t Flags;
+ uint8_t Information[4];
+ uint8_t AdditionalLength;
+ uint8_t CommandSpecific[4];
+ uint8_t AdditionalSenseCode;
+ uint8_t AdditionalSenseCodeQual;
+ uint8_t FRUCode;
+ uint8_t SenseKeySpecific[3];
+} IPS_SCSI_REQSEN;
+
+/*
+ * Sense Data Format - Page 3
+ */
+typedef struct {
+ uint8_t PageCode;
+ uint8_t PageLength;
+ uint16_t TracksPerZone;
+ uint16_t AltSectorsPerZone;
+ uint16_t AltTracksPerZone;
+ uint16_t AltTracksPerVolume;
+ uint16_t SectorsPerTrack;
+ uint16_t BytesPerSector;
+ uint16_t Interleave;
+ uint16_t TrackSkew;
+ uint16_t CylinderSkew;
+ uint8_t flags;
+ uint8_t reserved[3];
+} IPS_SCSI_MODE_PAGE3;
+
+/*
+ * Sense Data Format - Page 4
+ */
+typedef struct {
+ uint8_t PageCode;
+ uint8_t PageLength;
+ uint16_t CylindersHigh;
+ uint8_t CylindersLow;
+ uint8_t Heads;
+ uint16_t WritePrecompHigh;
+ uint8_t WritePrecompLow;
+ uint16_t ReducedWriteCurrentHigh;
+ uint8_t ReducedWriteCurrentLow;
+ uint16_t StepRate;
+ uint16_t LandingZoneHigh;
+ uint8_t LandingZoneLow;
+ uint8_t flags;
+ uint8_t RotationalOffset;
+ uint8_t Reserved;
+ uint16_t MediumRotationRate;
+ uint8_t Reserved2[2];
+} IPS_SCSI_MODE_PAGE4;
+
+/*
+ * Sense Data Format - Page 8
+ */
+typedef struct {
+ uint8_t PageCode;
+ uint8_t PageLength;
+ uint8_t flags;
+ uint8_t RetentPrio;
+ uint16_t DisPrefetchLen;
+ uint16_t MinPrefetchLen;
+ uint16_t MaxPrefetchLen;
+ uint16_t MaxPrefetchCeiling;
+} IPS_SCSI_MODE_PAGE8;
+
+/*
+ * Sense Data Format - Block Descriptor (DASD)
+ */
+typedef struct {
+ uint32_t NumberOfBlocks;
+ uint8_t DensityCode;
+ uint16_t BlockLengthHigh;
+ uint8_t BlockLengthLow;
+} IPS_SCSI_MODE_PAGE_BLKDESC;
+
+/*
+ * Sense Data Format - Mode Page Header
+ */
+typedef struct {
+ uint8_t DataLength;
+ uint8_t MediumType;
+ uint8_t Reserved;
+ uint8_t BlockDescLength;
+} IPS_SCSI_MODE_PAGE_HEADER;
+
+typedef struct {
+ IPS_SCSI_MODE_PAGE_HEADER hdr;
+ IPS_SCSI_MODE_PAGE_BLKDESC blkdesc;
+
+ union {
+ IPS_SCSI_MODE_PAGE3 pg3;
+ IPS_SCSI_MODE_PAGE4 pg4;
+ IPS_SCSI_MODE_PAGE8 pg8;
+ } pdata;
+} IPS_SCSI_MODE_PAGE_DATA;
+
+/*
+ * Scatter Gather list format
+ */
+typedef struct ips_sglist {
+ uint32_t address;
+ uint32_t length;
+} IPS_STD_SG_LIST;
+
+typedef struct ips_enh_sglist {
+ uint32_t address_lo;
+ uint32_t address_hi;
+ uint32_t length;
+ uint32_t reserved;
+} IPS_ENH_SG_LIST;
+
+typedef union {
+ void *list;
+ IPS_STD_SG_LIST *std_list;
+ IPS_ENH_SG_LIST *enh_list;
+} IPS_SG_LIST;
+
+typedef struct {
+ char *option_name;
+ int *option_flag;
+ int option_value;
+} IPS_OPTION;
+
+/*
+ * Status Info
+ */
+typedef struct ips_stat {
+ uint32_t residue_len;
+ void *scb_addr;
+ uint8_t padding[12 - sizeof(void *)];
+} ips_stat_t;
+
+/*
+ * SCB Queue Format
+ */
+typedef struct ips_scb_queue {
+ struct ips_scb *head;
+ struct ips_scb *tail;
+ int count;
+} ips_scb_queue_t;
+
+/*
+ * Wait queue_format
+ */
+typedef struct ips_wait_queue {
+ struct scsi_cmnd *head;
+ struct scsi_cmnd *tail;
+ int count;
+} ips_wait_queue_t;
+
+typedef struct ips_copp_wait_item {
+ struct scsi_cmnd *scsi_cmd;
+ struct ips_copp_wait_item *next;
+} ips_copp_wait_item_t;
+
+typedef struct ips_copp_queue {
+ struct ips_copp_wait_item *head;
+ struct ips_copp_wait_item *tail;
+ int count;
+} ips_copp_queue_t;
+
+/* forward decl for host structure */
+struct ips_ha;
+
+typedef struct {
+ int (*reset)(struct ips_ha *);
+ int (*issue)(struct ips_ha *, struct ips_scb *);
+ int (*isinit)(struct ips_ha *);
+ int (*isintr)(struct ips_ha *);
+ int (*init)(struct ips_ha *);
+ int (*erasebios)(struct ips_ha *);
+ int (*programbios)(struct ips_ha *, char *, uint32_t, uint32_t);
+ int (*verifybios)(struct ips_ha *, char *, uint32_t, uint32_t);
+ void (*statinit)(struct ips_ha *);
+ int (*intr)(struct ips_ha *);
+ void (*enableint)(struct ips_ha *);
+ uint32_t (*statupd)(struct ips_ha *);
+} ips_hw_func_t;
+
+typedef struct ips_ha {
+ uint8_t ha_id[IPS_MAX_CHANNELS+1];
+ uint32_t dcdb_active[IPS_MAX_CHANNELS];
+ uint32_t io_addr; /* Base I/O address */
+ uint8_t ntargets; /* Number of targets */
+ uint8_t nbus; /* Number of buses */
+ uint8_t nlun; /* Number of Luns */
+ uint16_t ad_type; /* Adapter type */
+ uint16_t host_num; /* Adapter number */
+ uint32_t max_xfer; /* Maximum Xfer size */
+ uint32_t max_cmds; /* Max concurrent commands */
+ uint32_t num_ioctl; /* Number of Ioctls */
+ ips_stat_t sp; /* Status packer pointer */
+ struct ips_scb *scbs; /* Array of all CCBS */
+ struct ips_scb *scb_freelist; /* SCB free list */
+ ips_wait_queue_t scb_waitlist; /* Pending SCB list */
+ ips_copp_queue_t copp_waitlist; /* Pending PT list */
+ ips_scb_queue_t scb_activelist; /* Active SCB list */
+ IPS_IO_CMD *dummy; /* dummy command */
+ IPS_ADAPTER *adapt; /* Adapter status area */
+ IPS_LD_INFO *logical_drive_info; /* Adapter Logical Drive Info */
+ dma_addr_t logical_drive_info_dma_addr; /* Logical Drive Info DMA Address */
+ IPS_ENQ *enq; /* Adapter Enquiry data */
+ IPS_CONF *conf; /* Adapter config data */
+ IPS_NVRAM_P5 *nvram; /* NVRAM page 5 data */
+ IPS_SUBSYS *subsys; /* Subsystem parameters */
+ char *ioctl_data; /* IOCTL data area */
+ uint32_t ioctl_datasize; /* IOCTL data size */
+ uint32_t cmd_in_progress; /* Current command in progress*/
+ int flags; /* */
+ uint8_t waitflag; /* are we waiting for cmd */
+ uint8_t active;
+ int ioctl_reset; /* IOCTL Requested Reset Flag */
+ uint16_t reset_count; /* number of resets */
+ time_t last_ffdc; /* last time we sent ffdc info*/
+ uint8_t slot_num; /* PCI Slot Number */
+ int ioctl_len; /* size of ioctl buffer */
+ dma_addr_t ioctl_busaddr; /* dma address of ioctl buffer*/
+ uint8_t bios_version[8]; /* BIOS Revision */
+ uint32_t mem_addr; /* Memory mapped address */
+ uint32_t io_len; /* Size of IO Address */
+ uint32_t mem_len; /* Size of memory address */
+ char __iomem *mem_ptr; /* Memory mapped Ptr */
+ char __iomem *ioremap_ptr;/* ioremapped memory pointer */
+ ips_hw_func_t func; /* hw function pointers */
+ struct pci_dev *pcidev; /* PCI device handle */
+ char *flash_data; /* Save Area for flash data */
+ int flash_len; /* length of flash buffer */
+ u32 flash_datasize; /* Save Area for flash data size */
+ dma_addr_t flash_busaddr; /* dma address of flash buffer*/
+ dma_addr_t enq_busaddr; /* dma address of enq struct */
+ uint8_t requires_esl; /* Requires an EraseStripeLock */
+} ips_ha_t;
+
+typedef void (*ips_scb_callback) (ips_ha_t *, struct ips_scb *);
+
+/*
+ * SCB Format
+ */
+typedef struct ips_scb {
+ IPS_HOST_COMMAND cmd;
+ IPS_DCDB_TABLE dcdb;
+ uint8_t target_id;
+ uint8_t bus;
+ uint8_t lun;
+ uint8_t cdb[12];
+ uint32_t scb_busaddr;
+ uint32_t old_data_busaddr; // Obsolete, but kept for old utility compatibility
+ uint32_t timeout;
+ uint8_t basic_status;
+ uint8_t extended_status;
+ uint8_t breakup;
+ uint8_t sg_break;
+ uint32_t data_len;
+ uint32_t sg_len;
+ uint32_t flags;
+ uint32_t op_code;
+ IPS_SG_LIST sg_list;
+ struct scsi_cmnd *scsi_cmd;
+ struct ips_scb *q_next;
+ ips_scb_callback callback;
+ uint32_t sg_busaddr;
+ int sg_count;
+ dma_addr_t data_busaddr;
+} ips_scb_t;
+
+typedef struct ips_scb_pt {
+ IPS_HOST_COMMAND cmd;
+ IPS_DCDB_TABLE dcdb;
+ uint8_t target_id;
+ uint8_t bus;
+ uint8_t lun;
+ uint8_t cdb[12];
+ uint32_t scb_busaddr;
+ uint32_t data_busaddr;
+ uint32_t timeout;
+ uint8_t basic_status;
+ uint8_t extended_status;
+ uint16_t breakup;
+ uint32_t data_len;
+ uint32_t sg_len;
+ uint32_t flags;
+ uint32_t op_code;
+ IPS_SG_LIST *sg_list;
+ struct scsi_cmnd *scsi_cmd;
+ struct ips_scb *q_next;
+ ips_scb_callback callback;
+} ips_scb_pt_t;
+
+/*
+ * Passthru Command Format
+ */
+typedef struct {
+ uint8_t CoppID[4];
+ uint32_t CoppCmd;
+ uint32_t PtBuffer;
+ uint8_t *CmdBuffer;
+ uint32_t CmdBSize;
+ ips_scb_pt_t CoppCP;
+ uint32_t TimeOut;
+ uint8_t BasicStatus;
+ uint8_t ExtendedStatus;
+ uint8_t AdapterType;
+ uint8_t reserved;
+} ips_passthru_t;
+
+#endif
+
+/* The Version Information below gets created by SED during the build process. */
+/* Do not modify the next line; it's what SED is looking for to do the insert. */
+/* Version Info */
+/*************************************************************************
+*
+* VERSION.H -- version numbers and copyright notices in various formats
+*
+*************************************************************************/
+
+#define IPS_VER_MAJOR 7
+#define IPS_VER_MAJOR_STRING __stringify(IPS_VER_MAJOR)
+#define IPS_VER_MINOR 12
+#define IPS_VER_MINOR_STRING __stringify(IPS_VER_MINOR)
+#define IPS_VER_BUILD 05
+#define IPS_VER_BUILD_STRING __stringify(IPS_VER_BUILD)
+#define IPS_VER_STRING IPS_VER_MAJOR_STRING "." \
+ IPS_VER_MINOR_STRING "." IPS_VER_BUILD_STRING
+#define IPS_RELEASE_ID 0x00020000
+#define IPS_BUILD_IDENT 761
+#define IPS_LEGALCOPYRIGHT_STRING "(C) Copyright IBM Corp. 1994, 2002. All Rights Reserved."
+#define IPS_ADAPTECCOPYRIGHT_STRING "(c) Copyright Adaptec, Inc. 2002 to 2004. All Rights Reserved."
+#define IPS_DELLCOPYRIGHT_STRING "(c) Copyright Dell 2004. All Rights Reserved."
+#define IPS_NT_LEGALCOPYRIGHT_STRING "(C) Copyright IBM Corp. 1994, 2002."
+
+/* Version numbers for various adapters */
+#define IPS_VER_SERVERAID1 "2.25.01"
+#define IPS_VER_SERVERAID2 "2.88.13"
+#define IPS_VER_NAVAJO "2.88.13"
+#define IPS_VER_SERVERAID3 "6.10.24"
+#define IPS_VER_SERVERAID4H "7.12.02"
+#define IPS_VER_SERVERAID4MLx "7.12.02"
+#define IPS_VER_SARASOTA "7.12.02"
+#define IPS_VER_MARCO "7.12.02"
+#define IPS_VER_SEBRING "7.12.02"
+#define IPS_VER_KEYWEST "7.12.02"
+
+/* Compatibility IDs for various adapters */
+#define IPS_COMPAT_UNKNOWN ""
+#define IPS_COMPAT_CURRENT "KW710"
+#define IPS_COMPAT_SERVERAID1 "2.25.01"
+#define IPS_COMPAT_SERVERAID2 "2.88.13"
+#define IPS_COMPAT_NAVAJO "2.88.13"
+#define IPS_COMPAT_KIOWA "2.88.13"
+#define IPS_COMPAT_SERVERAID3H "SB610"
+#define IPS_COMPAT_SERVERAID3L "SB610"
+#define IPS_COMPAT_SERVERAID4H "KW710"
+#define IPS_COMPAT_SERVERAID4M "KW710"
+#define IPS_COMPAT_SERVERAID4L "KW710"
+#define IPS_COMPAT_SERVERAID4Mx "KW710"
+#define IPS_COMPAT_SERVERAID4Lx "KW710"
+#define IPS_COMPAT_SARASOTA "KW710"
+#define IPS_COMPAT_MARCO "KW710"
+#define IPS_COMPAT_SEBRING "KW710"
+#define IPS_COMPAT_TAMPA "KW710"
+#define IPS_COMPAT_KEYWEST "KW710"
+#define IPS_COMPAT_BIOS "KW710"
+
+#define IPS_COMPAT_MAX_ADAPTER_TYPE 18
+#define IPS_COMPAT_ID_LENGTH 8
+
+#define IPS_DEFINE_COMPAT_TABLE(tablename) \
+ char tablename[IPS_COMPAT_MAX_ADAPTER_TYPE] [IPS_COMPAT_ID_LENGTH] = { \
+ IPS_COMPAT_UNKNOWN, \
+ IPS_COMPAT_SERVERAID1, \
+ IPS_COMPAT_SERVERAID2, \
+ IPS_COMPAT_NAVAJO, \
+ IPS_COMPAT_KIOWA, \
+ IPS_COMPAT_SERVERAID3H, \
+ IPS_COMPAT_SERVERAID3L, \
+ IPS_COMPAT_SERVERAID4H, \
+ IPS_COMPAT_SERVERAID4M, \
+ IPS_COMPAT_SERVERAID4L, \
+ IPS_COMPAT_SERVERAID4Mx, \
+ IPS_COMPAT_SERVERAID4Lx, \
+ IPS_COMPAT_SARASOTA, /* one-channel variety of SARASOTA */ \
+ IPS_COMPAT_SARASOTA, /* two-channel variety of SARASOTA */ \
+ IPS_COMPAT_MARCO, \
+ IPS_COMPAT_SEBRING, \
+ IPS_COMPAT_TAMPA, \
+ IPS_COMPAT_KEYWEST \
+ }
+
+
+/*
+ * Overrides for Emacs so that we almost follow Linus's tabbing style.
+ * Emacs will notice this stuff at the end of the file and automatically
+ * adjust the settings for this buffer only. This must remain at the end
+ * of the file.
+ * ---------------------------------------------------------------------------
+ * Local variables:
+ * c-indent-level: 2
+ * c-brace-imaginary-offset: 0
+ * c-brace-offset: -2
+ * c-argdecl-indent: 2
+ * c-label-offset: -2
+ * c-continued-statement-offset: 2
+ * c-continued-brace-offset: 0
+ * indent-tabs-mode: nil
+ * tab-width: 8
+ * End:
+ */
diff --git a/drivers/scsi/isci/Makefile b/drivers/scsi/isci/Makefile
new file mode 100644
index 000000000..3359e10e0
--- /dev/null
+++ b/drivers/scsi/isci/Makefile
@@ -0,0 +1,8 @@
+obj-$(CONFIG_SCSI_ISCI) += isci.o
+isci-objs := init.o phy.o request.o \
+ remote_device.o port.o \
+ host.o task.o probe_roms.o \
+ remote_node_context.o \
+ remote_node_table.o \
+ unsolicited_frame_control.o \
+ port_config.o \
diff --git a/drivers/scsi/isci/host.c b/drivers/scsi/isci/host.c
new file mode 100644
index 000000000..609dafd66
--- /dev/null
+++ b/drivers/scsi/isci/host.c
@@ -0,0 +1,2807 @@
+/*
+ * This file is provided under a dual BSD/GPLv2 license. When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ * The full GNU General Public License is included in this distribution
+ * in the file called LICENSE.GPL.
+ *
+ * BSD LICENSE
+ *
+ * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+#include <linux/circ_buf.h>
+#include <linux/device.h>
+#include <scsi/sas.h>
+#include "host.h"
+#include "isci.h"
+#include "port.h"
+#include "probe_roms.h"
+#include "remote_device.h"
+#include "request.h"
+#include "scu_completion_codes.h"
+#include "scu_event_codes.h"
+#include "registers.h"
+#include "scu_remote_node_context.h"
+#include "scu_task_context.h"
+
+#define SCU_CONTEXT_RAM_INIT_STALL_TIME 200
+
+#define smu_max_ports(dcc_value) \
+ (\
+ (((dcc_value) & SMU_DEVICE_CONTEXT_CAPACITY_MAX_LP_MASK) \
+ >> SMU_DEVICE_CONTEXT_CAPACITY_MAX_LP_SHIFT) + 1 \
+ )
+
+#define smu_max_task_contexts(dcc_value) \
+ (\
+ (((dcc_value) & SMU_DEVICE_CONTEXT_CAPACITY_MAX_TC_MASK) \
+ >> SMU_DEVICE_CONTEXT_CAPACITY_MAX_TC_SHIFT) + 1 \
+ )
+
+#define smu_max_rncs(dcc_value) \
+ (\
+ (((dcc_value) & SMU_DEVICE_CONTEXT_CAPACITY_MAX_RNC_MASK) \
+ >> SMU_DEVICE_CONTEXT_CAPACITY_MAX_RNC_SHIFT) + 1 \
+ )
+
+#define SCIC_SDS_CONTROLLER_PHY_START_TIMEOUT 100
+
+/**
+ *
+ *
+ * The number of milliseconds to wait while a given phy is consuming power
+ * before allowing another set of phys to consume power. Ultimately, this will
+ * be specified by OEM parameter.
+ */
+#define SCIC_SDS_CONTROLLER_POWER_CONTROL_INTERVAL 500
+
+/**
+ * NORMALIZE_PUT_POINTER() -
+ *
+ * This macro will normalize the completion queue put pointer so its value can
+ * be used as an array inde
+ */
+#define NORMALIZE_PUT_POINTER(x) \
+ ((x) & SMU_COMPLETION_QUEUE_PUT_POINTER_MASK)
+
+
+/**
+ * NORMALIZE_EVENT_POINTER() -
+ *
+ * This macro will normalize the completion queue event entry so its value can
+ * be used as an index.
+ */
+#define NORMALIZE_EVENT_POINTER(x) \
+ (\
+ ((x) & SMU_COMPLETION_QUEUE_GET_EVENT_POINTER_MASK) \
+ >> SMU_COMPLETION_QUEUE_GET_EVENT_POINTER_SHIFT \
+ )
+
+/**
+ * NORMALIZE_GET_POINTER() -
+ *
+ * This macro will normalize the completion queue get pointer so its value can
+ * be used as an index into an array
+ */
+#define NORMALIZE_GET_POINTER(x) \
+ ((x) & SMU_COMPLETION_QUEUE_GET_POINTER_MASK)
+
+/**
+ * NORMALIZE_GET_POINTER_CYCLE_BIT() -
+ *
+ * This macro will normalize the completion queue cycle pointer so it matches
+ * the completion queue cycle bit
+ */
+#define NORMALIZE_GET_POINTER_CYCLE_BIT(x) \
+ ((SMU_CQGR_CYCLE_BIT & (x)) << (31 - SMU_COMPLETION_QUEUE_GET_CYCLE_BIT_SHIFT))
+
+/**
+ * COMPLETION_QUEUE_CYCLE_BIT() -
+ *
+ * This macro will return the cycle bit of the completion queue entry
+ */
+#define COMPLETION_QUEUE_CYCLE_BIT(x) ((x) & 0x80000000)
+
+/* Init the state machine and call the state entry function (if any) */
+void sci_init_sm(struct sci_base_state_machine *sm,
+ const struct sci_base_state *state_table, u32 initial_state)
+{
+ sci_state_transition_t handler;
+
+ sm->initial_state_id = initial_state;
+ sm->previous_state_id = initial_state;
+ sm->current_state_id = initial_state;
+ sm->state_table = state_table;
+
+ handler = sm->state_table[initial_state].enter_state;
+ if (handler)
+ handler(sm);
+}
+
+/* Call the state exit fn, update the current state, call the state entry fn */
+void sci_change_state(struct sci_base_state_machine *sm, u32 next_state)
+{
+ sci_state_transition_t handler;
+
+ handler = sm->state_table[sm->current_state_id].exit_state;
+ if (handler)
+ handler(sm);
+
+ sm->previous_state_id = sm->current_state_id;
+ sm->current_state_id = next_state;
+
+ handler = sm->state_table[sm->current_state_id].enter_state;
+ if (handler)
+ handler(sm);
+}
+
+static bool sci_controller_completion_queue_has_entries(struct isci_host *ihost)
+{
+ u32 get_value = ihost->completion_queue_get;
+ u32 get_index = get_value & SMU_COMPLETION_QUEUE_GET_POINTER_MASK;
+
+ if (NORMALIZE_GET_POINTER_CYCLE_BIT(get_value) ==
+ COMPLETION_QUEUE_CYCLE_BIT(ihost->completion_queue[get_index]))
+ return true;
+
+ return false;
+}
+
+static bool sci_controller_isr(struct isci_host *ihost)
+{
+ if (sci_controller_completion_queue_has_entries(ihost))
+ return true;
+
+ /* we have a spurious interrupt it could be that we have already
+ * emptied the completion queue from a previous interrupt
+ * FIXME: really!?
+ */
+ writel(SMU_ISR_COMPLETION, &ihost->smu_registers->interrupt_status);
+
+ /* There is a race in the hardware that could cause us not to be
+ * notified of an interrupt completion if we do not take this
+ * step. We will mask then unmask the interrupts so if there is
+ * another interrupt pending the clearing of the interrupt
+ * source we get the next interrupt message.
+ */
+ spin_lock(&ihost->scic_lock);
+ if (test_bit(IHOST_IRQ_ENABLED, &ihost->flags)) {
+ writel(0xFF000000, &ihost->smu_registers->interrupt_mask);
+ writel(0, &ihost->smu_registers->interrupt_mask);
+ }
+ spin_unlock(&ihost->scic_lock);
+
+ return false;
+}
+
+irqreturn_t isci_msix_isr(int vec, void *data)
+{
+ struct isci_host *ihost = data;
+
+ if (sci_controller_isr(ihost))
+ tasklet_schedule(&ihost->completion_tasklet);
+
+ return IRQ_HANDLED;
+}
+
+static bool sci_controller_error_isr(struct isci_host *ihost)
+{
+ u32 interrupt_status;
+
+ interrupt_status =
+ readl(&ihost->smu_registers->interrupt_status);
+ interrupt_status &= (SMU_ISR_QUEUE_ERROR | SMU_ISR_QUEUE_SUSPEND);
+
+ if (interrupt_status != 0) {
+ /*
+ * There is an error interrupt pending so let it through and handle
+ * in the callback */
+ return true;
+ }
+
+ /*
+ * There is a race in the hardware that could cause us not to be notified
+ * of an interrupt completion if we do not take this step. We will mask
+ * then unmask the error interrupts so if there was another interrupt
+ * pending we will be notified.
+ * Could we write the value of (SMU_ISR_QUEUE_ERROR | SMU_ISR_QUEUE_SUSPEND)? */
+ writel(0xff, &ihost->smu_registers->interrupt_mask);
+ writel(0, &ihost->smu_registers->interrupt_mask);
+
+ return false;
+}
+
+static void sci_controller_task_completion(struct isci_host *ihost, u32 ent)
+{
+ u32 index = SCU_GET_COMPLETION_INDEX(ent);
+ struct isci_request *ireq = ihost->reqs[index];
+
+ /* Make sure that we really want to process this IO request */
+ if (test_bit(IREQ_ACTIVE, &ireq->flags) &&
+ ireq->io_tag != SCI_CONTROLLER_INVALID_IO_TAG &&
+ ISCI_TAG_SEQ(ireq->io_tag) == ihost->io_request_sequence[index])
+ /* Yep this is a valid io request pass it along to the
+ * io request handler
+ */
+ sci_io_request_tc_completion(ireq, ent);
+}
+
+static void sci_controller_sdma_completion(struct isci_host *ihost, u32 ent)
+{
+ u32 index;
+ struct isci_request *ireq;
+ struct isci_remote_device *idev;
+
+ index = SCU_GET_COMPLETION_INDEX(ent);
+
+ switch (scu_get_command_request_type(ent)) {
+ case SCU_CONTEXT_COMMAND_REQUEST_TYPE_POST_TC:
+ case SCU_CONTEXT_COMMAND_REQUEST_TYPE_DUMP_TC:
+ ireq = ihost->reqs[index];
+ dev_warn(&ihost->pdev->dev, "%s: %x for io request %p\n",
+ __func__, ent, ireq);
+ /* @todo For a post TC operation we need to fail the IO
+ * request
+ */
+ break;
+ case SCU_CONTEXT_COMMAND_REQUEST_TYPE_DUMP_RNC:
+ case SCU_CONTEXT_COMMAND_REQUEST_TYPE_OTHER_RNC:
+ case SCU_CONTEXT_COMMAND_REQUEST_TYPE_POST_RNC:
+ idev = ihost->device_table[index];
+ dev_warn(&ihost->pdev->dev, "%s: %x for device %p\n",
+ __func__, ent, idev);
+ /* @todo For a port RNC operation we need to fail the
+ * device
+ */
+ break;
+ default:
+ dev_warn(&ihost->pdev->dev, "%s: unknown completion type %x\n",
+ __func__, ent);
+ break;
+ }
+}
+
+static void sci_controller_unsolicited_frame(struct isci_host *ihost, u32 ent)
+{
+ u32 index;
+ u32 frame_index;
+
+ struct scu_unsolicited_frame_header *frame_header;
+ struct isci_phy *iphy;
+ struct isci_remote_device *idev;
+
+ enum sci_status result = SCI_FAILURE;
+
+ frame_index = SCU_GET_FRAME_INDEX(ent);
+
+ frame_header = ihost->uf_control.buffers.array[frame_index].header;
+ ihost->uf_control.buffers.array[frame_index].state = UNSOLICITED_FRAME_IN_USE;
+
+ if (SCU_GET_FRAME_ERROR(ent)) {
+ /*
+ * / @todo If the IAF frame or SIGNATURE FIS frame has an error will
+ * / this cause a problem? We expect the phy initialization will
+ * / fail if there is an error in the frame. */
+ sci_controller_release_frame(ihost, frame_index);
+ return;
+ }
+
+ if (frame_header->is_address_frame) {
+ index = SCU_GET_PROTOCOL_ENGINE_INDEX(ent);
+ iphy = &ihost->phys[index];
+ result = sci_phy_frame_handler(iphy, frame_index);
+ } else {
+
+ index = SCU_GET_COMPLETION_INDEX(ent);
+
+ if (index == SCIC_SDS_REMOTE_NODE_CONTEXT_INVALID_INDEX) {
+ /*
+ * This is a signature fis or a frame from a direct attached SATA
+ * device that has not yet been created. In either case forwared
+ * the frame to the PE and let it take care of the frame data. */
+ index = SCU_GET_PROTOCOL_ENGINE_INDEX(ent);
+ iphy = &ihost->phys[index];
+ result = sci_phy_frame_handler(iphy, frame_index);
+ } else {
+ if (index < ihost->remote_node_entries)
+ idev = ihost->device_table[index];
+ else
+ idev = NULL;
+
+ if (idev != NULL)
+ result = sci_remote_device_frame_handler(idev, frame_index);
+ else
+ sci_controller_release_frame(ihost, frame_index);
+ }
+ }
+
+ if (result != SCI_SUCCESS) {
+ /*
+ * / @todo Is there any reason to report some additional error message
+ * / when we get this failure notifiction? */
+ }
+}
+
+static void sci_controller_event_completion(struct isci_host *ihost, u32 ent)
+{
+ struct isci_remote_device *idev;
+ struct isci_request *ireq;
+ struct isci_phy *iphy;
+ u32 index;
+
+ index = SCU_GET_COMPLETION_INDEX(ent);
+
+ switch (scu_get_event_type(ent)) {
+ case SCU_EVENT_TYPE_SMU_COMMAND_ERROR:
+ /* / @todo The driver did something wrong and we need to fix the condtion. */
+ dev_err(&ihost->pdev->dev,
+ "%s: SCIC Controller 0x%p received SMU command error "
+ "0x%x\n",
+ __func__,
+ ihost,
+ ent);
+ break;
+
+ case SCU_EVENT_TYPE_SMU_PCQ_ERROR:
+ case SCU_EVENT_TYPE_SMU_ERROR:
+ case SCU_EVENT_TYPE_FATAL_MEMORY_ERROR:
+ /*
+ * / @todo This is a hardware failure and its likely that we want to
+ * / reset the controller. */
+ dev_err(&ihost->pdev->dev,
+ "%s: SCIC Controller 0x%p received fatal controller "
+ "event 0x%x\n",
+ __func__,
+ ihost,
+ ent);
+ break;
+
+ case SCU_EVENT_TYPE_TRANSPORT_ERROR:
+ ireq = ihost->reqs[index];
+ sci_io_request_event_handler(ireq, ent);
+ break;
+
+ case SCU_EVENT_TYPE_PTX_SCHEDULE_EVENT:
+ switch (scu_get_event_specifier(ent)) {
+ case SCU_EVENT_SPECIFIC_SMP_RESPONSE_NO_PE:
+ case SCU_EVENT_SPECIFIC_TASK_TIMEOUT:
+ ireq = ihost->reqs[index];
+ if (ireq != NULL)
+ sci_io_request_event_handler(ireq, ent);
+ else
+ dev_warn(&ihost->pdev->dev,
+ "%s: SCIC Controller 0x%p received "
+ "event 0x%x for io request object "
+ "that doesnt exist.\n",
+ __func__,
+ ihost,
+ ent);
+
+ break;
+
+ case SCU_EVENT_SPECIFIC_IT_NEXUS_TIMEOUT:
+ idev = ihost->device_table[index];
+ if (idev != NULL)
+ sci_remote_device_event_handler(idev, ent);
+ else
+ dev_warn(&ihost->pdev->dev,
+ "%s: SCIC Controller 0x%p received "
+ "event 0x%x for remote device object "
+ "that doesnt exist.\n",
+ __func__,
+ ihost,
+ ent);
+
+ break;
+ }
+ break;
+
+ case SCU_EVENT_TYPE_BROADCAST_CHANGE:
+ /*
+ * direct the broadcast change event to the phy first and then let
+ * the phy redirect the broadcast change to the port object */
+ case SCU_EVENT_TYPE_ERR_CNT_EVENT:
+ /*
+ * direct error counter event to the phy object since that is where
+ * we get the event notification. This is a type 4 event. */
+ case SCU_EVENT_TYPE_OSSP_EVENT:
+ index = SCU_GET_PROTOCOL_ENGINE_INDEX(ent);
+ iphy = &ihost->phys[index];
+ sci_phy_event_handler(iphy, ent);
+ break;
+
+ case SCU_EVENT_TYPE_RNC_SUSPEND_TX:
+ case SCU_EVENT_TYPE_RNC_SUSPEND_TX_RX:
+ case SCU_EVENT_TYPE_RNC_OPS_MISC:
+ if (index < ihost->remote_node_entries) {
+ idev = ihost->device_table[index];
+
+ if (idev != NULL)
+ sci_remote_device_event_handler(idev, ent);
+ } else
+ dev_err(&ihost->pdev->dev,
+ "%s: SCIC Controller 0x%p received event 0x%x "
+ "for remote device object 0x%0x that doesnt "
+ "exist.\n",
+ __func__,
+ ihost,
+ ent,
+ index);
+
+ break;
+
+ default:
+ dev_warn(&ihost->pdev->dev,
+ "%s: SCIC Controller received unknown event code %x\n",
+ __func__,
+ ent);
+ break;
+ }
+}
+
+static void sci_controller_process_completions(struct isci_host *ihost)
+{
+ u32 completion_count = 0;
+ u32 ent;
+ u32 get_index;
+ u32 get_cycle;
+ u32 event_get;
+ u32 event_cycle;
+
+ dev_dbg(&ihost->pdev->dev,
+ "%s: completion queue beginning get:0x%08x\n",
+ __func__,
+ ihost->completion_queue_get);
+
+ /* Get the component parts of the completion queue */
+ get_index = NORMALIZE_GET_POINTER(ihost->completion_queue_get);
+ get_cycle = SMU_CQGR_CYCLE_BIT & ihost->completion_queue_get;
+
+ event_get = NORMALIZE_EVENT_POINTER(ihost->completion_queue_get);
+ event_cycle = SMU_CQGR_EVENT_CYCLE_BIT & ihost->completion_queue_get;
+
+ while (
+ NORMALIZE_GET_POINTER_CYCLE_BIT(get_cycle)
+ == COMPLETION_QUEUE_CYCLE_BIT(ihost->completion_queue[get_index])
+ ) {
+ completion_count++;
+
+ ent = ihost->completion_queue[get_index];
+
+ /* increment the get pointer and check for rollover to toggle the cycle bit */
+ get_cycle ^= ((get_index+1) & SCU_MAX_COMPLETION_QUEUE_ENTRIES) <<
+ (SMU_COMPLETION_QUEUE_GET_CYCLE_BIT_SHIFT - SCU_MAX_COMPLETION_QUEUE_SHIFT);
+ get_index = (get_index+1) & (SCU_MAX_COMPLETION_QUEUE_ENTRIES-1);
+
+ dev_dbg(&ihost->pdev->dev,
+ "%s: completion queue entry:0x%08x\n",
+ __func__,
+ ent);
+
+ switch (SCU_GET_COMPLETION_TYPE(ent)) {
+ case SCU_COMPLETION_TYPE_TASK:
+ sci_controller_task_completion(ihost, ent);
+ break;
+
+ case SCU_COMPLETION_TYPE_SDMA:
+ sci_controller_sdma_completion(ihost, ent);
+ break;
+
+ case SCU_COMPLETION_TYPE_UFI:
+ sci_controller_unsolicited_frame(ihost, ent);
+ break;
+
+ case SCU_COMPLETION_TYPE_EVENT:
+ sci_controller_event_completion(ihost, ent);
+ break;
+
+ case SCU_COMPLETION_TYPE_NOTIFY: {
+ event_cycle ^= ((event_get+1) & SCU_MAX_EVENTS) <<
+ (SMU_COMPLETION_QUEUE_GET_EVENT_CYCLE_BIT_SHIFT - SCU_MAX_EVENTS_SHIFT);
+ event_get = (event_get+1) & (SCU_MAX_EVENTS-1);
+
+ sci_controller_event_completion(ihost, ent);
+ break;
+ }
+ default:
+ dev_warn(&ihost->pdev->dev,
+ "%s: SCIC Controller received unknown "
+ "completion type %x\n",
+ __func__,
+ ent);
+ break;
+ }
+ }
+
+ /* Update the get register if we completed one or more entries */
+ if (completion_count > 0) {
+ ihost->completion_queue_get =
+ SMU_CQGR_GEN_BIT(ENABLE) |
+ SMU_CQGR_GEN_BIT(EVENT_ENABLE) |
+ event_cycle |
+ SMU_CQGR_GEN_VAL(EVENT_POINTER, event_get) |
+ get_cycle |
+ SMU_CQGR_GEN_VAL(POINTER, get_index);
+
+ writel(ihost->completion_queue_get,
+ &ihost->smu_registers->completion_queue_get);
+
+ }
+
+ dev_dbg(&ihost->pdev->dev,
+ "%s: completion queue ending get:0x%08x\n",
+ __func__,
+ ihost->completion_queue_get);
+
+}
+
+static void sci_controller_error_handler(struct isci_host *ihost)
+{
+ u32 interrupt_status;
+
+ interrupt_status =
+ readl(&ihost->smu_registers->interrupt_status);
+
+ if ((interrupt_status & SMU_ISR_QUEUE_SUSPEND) &&
+ sci_controller_completion_queue_has_entries(ihost)) {
+
+ sci_controller_process_completions(ihost);
+ writel(SMU_ISR_QUEUE_SUSPEND, &ihost->smu_registers->interrupt_status);
+ } else {
+ dev_err(&ihost->pdev->dev, "%s: status: %#x\n", __func__,
+ interrupt_status);
+
+ sci_change_state(&ihost->sm, SCIC_FAILED);
+
+ return;
+ }
+
+ /* If we dont process any completions I am not sure that we want to do this.
+ * We are in the middle of a hardware fault and should probably be reset.
+ */
+ writel(0, &ihost->smu_registers->interrupt_mask);
+}
+
+irqreturn_t isci_intx_isr(int vec, void *data)
+{
+ irqreturn_t ret = IRQ_NONE;
+ struct isci_host *ihost = data;
+
+ if (sci_controller_isr(ihost)) {
+ writel(SMU_ISR_COMPLETION, &ihost->smu_registers->interrupt_status);
+ tasklet_schedule(&ihost->completion_tasklet);
+ ret = IRQ_HANDLED;
+ } else if (sci_controller_error_isr(ihost)) {
+ spin_lock(&ihost->scic_lock);
+ sci_controller_error_handler(ihost);
+ spin_unlock(&ihost->scic_lock);
+ ret = IRQ_HANDLED;
+ }
+
+ return ret;
+}
+
+irqreturn_t isci_error_isr(int vec, void *data)
+{
+ struct isci_host *ihost = data;
+
+ if (sci_controller_error_isr(ihost))
+ sci_controller_error_handler(ihost);
+
+ return IRQ_HANDLED;
+}
+
+/**
+ * isci_host_start_complete() - This function is called by the core library,
+ * through the ISCI Module, to indicate controller start status.
+ * @isci_host: This parameter specifies the ISCI host object
+ * @completion_status: This parameter specifies the completion status from the
+ * core library.
+ *
+ */
+static void isci_host_start_complete(struct isci_host *ihost, enum sci_status completion_status)
+{
+ if (completion_status != SCI_SUCCESS)
+ dev_info(&ihost->pdev->dev,
+ "controller start timed out, continuing...\n");
+ clear_bit(IHOST_START_PENDING, &ihost->flags);
+ wake_up(&ihost->eventq);
+}
+
+int isci_host_scan_finished(struct Scsi_Host *shost, unsigned long time)
+{
+ struct sas_ha_struct *ha = SHOST_TO_SAS_HA(shost);
+ struct isci_host *ihost = ha->lldd_ha;
+
+ if (test_bit(IHOST_START_PENDING, &ihost->flags))
+ return 0;
+
+ sas_drain_work(ha);
+
+ return 1;
+}
+
+/**
+ * sci_controller_get_suggested_start_timeout() - This method returns the
+ * suggested sci_controller_start() timeout amount. The user is free to
+ * use any timeout value, but this method provides the suggested minimum
+ * start timeout value. The returned value is based upon empirical
+ * information determined as a result of interoperability testing.
+ * @controller: the handle to the controller object for which to return the
+ * suggested start timeout.
+ *
+ * This method returns the number of milliseconds for the suggested start
+ * operation timeout.
+ */
+static u32 sci_controller_get_suggested_start_timeout(struct isci_host *ihost)
+{
+ /* Validate the user supplied parameters. */
+ if (!ihost)
+ return 0;
+
+ /*
+ * The suggested minimum timeout value for a controller start operation:
+ *
+ * Signature FIS Timeout
+ * + Phy Start Timeout
+ * + Number of Phy Spin Up Intervals
+ * ---------------------------------
+ * Number of milliseconds for the controller start operation.
+ *
+ * NOTE: The number of phy spin up intervals will be equivalent
+ * to the number of phys divided by the number phys allowed
+ * per interval - 1 (once OEM parameters are supported).
+ * Currently we assume only 1 phy per interval. */
+
+ return SCIC_SDS_SIGNATURE_FIS_TIMEOUT
+ + SCIC_SDS_CONTROLLER_PHY_START_TIMEOUT
+ + ((SCI_MAX_PHYS - 1) * SCIC_SDS_CONTROLLER_POWER_CONTROL_INTERVAL);
+}
+
+static void sci_controller_enable_interrupts(struct isci_host *ihost)
+{
+ set_bit(IHOST_IRQ_ENABLED, &ihost->flags);
+ writel(0, &ihost->smu_registers->interrupt_mask);
+}
+
+void sci_controller_disable_interrupts(struct isci_host *ihost)
+{
+ clear_bit(IHOST_IRQ_ENABLED, &ihost->flags);
+ writel(0xffffffff, &ihost->smu_registers->interrupt_mask);
+ readl(&ihost->smu_registers->interrupt_mask); /* flush */
+}
+
+static void sci_controller_enable_port_task_scheduler(struct isci_host *ihost)
+{
+ u32 port_task_scheduler_value;
+
+ port_task_scheduler_value =
+ readl(&ihost->scu_registers->peg0.ptsg.control);
+ port_task_scheduler_value |=
+ (SCU_PTSGCR_GEN_BIT(ETM_ENABLE) |
+ SCU_PTSGCR_GEN_BIT(PTSG_ENABLE));
+ writel(port_task_scheduler_value,
+ &ihost->scu_registers->peg0.ptsg.control);
+}
+
+static void sci_controller_assign_task_entries(struct isci_host *ihost)
+{
+ u32 task_assignment;
+
+ /*
+ * Assign all the TCs to function 0
+ * TODO: Do we actually need to read this register to write it back?
+ */
+
+ task_assignment =
+ readl(&ihost->smu_registers->task_context_assignment[0]);
+
+ task_assignment |= (SMU_TCA_GEN_VAL(STARTING, 0)) |
+ (SMU_TCA_GEN_VAL(ENDING, ihost->task_context_entries - 1)) |
+ (SMU_TCA_GEN_BIT(RANGE_CHECK_ENABLE));
+
+ writel(task_assignment,
+ &ihost->smu_registers->task_context_assignment[0]);
+
+}
+
+static void sci_controller_initialize_completion_queue(struct isci_host *ihost)
+{
+ u32 index;
+ u32 completion_queue_control_value;
+ u32 completion_queue_get_value;
+ u32 completion_queue_put_value;
+
+ ihost->completion_queue_get = 0;
+
+ completion_queue_control_value =
+ (SMU_CQC_QUEUE_LIMIT_SET(SCU_MAX_COMPLETION_QUEUE_ENTRIES - 1) |
+ SMU_CQC_EVENT_LIMIT_SET(SCU_MAX_EVENTS - 1));
+
+ writel(completion_queue_control_value,
+ &ihost->smu_registers->completion_queue_control);
+
+
+ /* Set the completion queue get pointer and enable the queue */
+ completion_queue_get_value = (
+ (SMU_CQGR_GEN_VAL(POINTER, 0))
+ | (SMU_CQGR_GEN_VAL(EVENT_POINTER, 0))
+ | (SMU_CQGR_GEN_BIT(ENABLE))
+ | (SMU_CQGR_GEN_BIT(EVENT_ENABLE))
+ );
+
+ writel(completion_queue_get_value,
+ &ihost->smu_registers->completion_queue_get);
+
+ /* Set the completion queue put pointer */
+ completion_queue_put_value = (
+ (SMU_CQPR_GEN_VAL(POINTER, 0))
+ | (SMU_CQPR_GEN_VAL(EVENT_POINTER, 0))
+ );
+
+ writel(completion_queue_put_value,
+ &ihost->smu_registers->completion_queue_put);
+
+ /* Initialize the cycle bit of the completion queue entries */
+ for (index = 0; index < SCU_MAX_COMPLETION_QUEUE_ENTRIES; index++) {
+ /*
+ * If get.cycle_bit != completion_queue.cycle_bit
+ * its not a valid completion queue entry
+ * so at system start all entries are invalid */
+ ihost->completion_queue[index] = 0x80000000;
+ }
+}
+
+static void sci_controller_initialize_unsolicited_frame_queue(struct isci_host *ihost)
+{
+ u32 frame_queue_control_value;
+ u32 frame_queue_get_value;
+ u32 frame_queue_put_value;
+
+ /* Write the queue size */
+ frame_queue_control_value =
+ SCU_UFQC_GEN_VAL(QUEUE_SIZE, SCU_MAX_UNSOLICITED_FRAMES);
+
+ writel(frame_queue_control_value,
+ &ihost->scu_registers->sdma.unsolicited_frame_queue_control);
+
+ /* Setup the get pointer for the unsolicited frame queue */
+ frame_queue_get_value = (
+ SCU_UFQGP_GEN_VAL(POINTER, 0)
+ | SCU_UFQGP_GEN_BIT(ENABLE_BIT)
+ );
+
+ writel(frame_queue_get_value,
+ &ihost->scu_registers->sdma.unsolicited_frame_get_pointer);
+ /* Setup the put pointer for the unsolicited frame queue */
+ frame_queue_put_value = SCU_UFQPP_GEN_VAL(POINTER, 0);
+ writel(frame_queue_put_value,
+ &ihost->scu_registers->sdma.unsolicited_frame_put_pointer);
+}
+
+void sci_controller_transition_to_ready(struct isci_host *ihost, enum sci_status status)
+{
+ if (ihost->sm.current_state_id == SCIC_STARTING) {
+ /*
+ * We move into the ready state, because some of the phys/ports
+ * may be up and operational.
+ */
+ sci_change_state(&ihost->sm, SCIC_READY);
+
+ isci_host_start_complete(ihost, status);
+ }
+}
+
+static bool is_phy_starting(struct isci_phy *iphy)
+{
+ enum sci_phy_states state;
+
+ state = iphy->sm.current_state_id;
+ switch (state) {
+ case SCI_PHY_STARTING:
+ case SCI_PHY_SUB_INITIAL:
+ case SCI_PHY_SUB_AWAIT_SAS_SPEED_EN:
+ case SCI_PHY_SUB_AWAIT_IAF_UF:
+ case SCI_PHY_SUB_AWAIT_SAS_POWER:
+ case SCI_PHY_SUB_AWAIT_SATA_POWER:
+ case SCI_PHY_SUB_AWAIT_SATA_PHY_EN:
+ case SCI_PHY_SUB_AWAIT_SATA_SPEED_EN:
+ case SCI_PHY_SUB_AWAIT_OSSP_EN:
+ case SCI_PHY_SUB_AWAIT_SIG_FIS_UF:
+ case SCI_PHY_SUB_FINAL:
+ return true;
+ default:
+ return false;
+ }
+}
+
+bool is_controller_start_complete(struct isci_host *ihost)
+{
+ int i;
+
+ for (i = 0; i < SCI_MAX_PHYS; i++) {
+ struct isci_phy *iphy = &ihost->phys[i];
+ u32 state = iphy->sm.current_state_id;
+
+ /* in apc mode we need to check every phy, in
+ * mpc mode we only need to check phys that have
+ * been configured into a port
+ */
+ if (is_port_config_apc(ihost))
+ /* pass */;
+ else if (!phy_get_non_dummy_port(iphy))
+ continue;
+
+ /* The controller start operation is complete iff:
+ * - all links have been given an opportunity to start
+ * - have no indication of a connected device
+ * - have an indication of a connected device and it has
+ * finished the link training process.
+ */
+ if ((iphy->is_in_link_training == false && state == SCI_PHY_INITIAL) ||
+ (iphy->is_in_link_training == false && state == SCI_PHY_STOPPED) ||
+ (iphy->is_in_link_training == true && is_phy_starting(iphy)) ||
+ (ihost->port_agent.phy_ready_mask != ihost->port_agent.phy_configured_mask))
+ return false;
+ }
+
+ return true;
+}
+
+/**
+ * sci_controller_start_next_phy - start phy
+ * @scic: controller
+ *
+ * If all the phys have been started, then attempt to transition the
+ * controller to the READY state and inform the user
+ * (sci_cb_controller_start_complete()).
+ */
+static enum sci_status sci_controller_start_next_phy(struct isci_host *ihost)
+{
+ struct sci_oem_params *oem = &ihost->oem_parameters;
+ struct isci_phy *iphy;
+ enum sci_status status;
+
+ status = SCI_SUCCESS;
+
+ if (ihost->phy_startup_timer_pending)
+ return status;
+
+ if (ihost->next_phy_to_start >= SCI_MAX_PHYS) {
+ if (is_controller_start_complete(ihost)) {
+ sci_controller_transition_to_ready(ihost, SCI_SUCCESS);
+ sci_del_timer(&ihost->phy_timer);
+ ihost->phy_startup_timer_pending = false;
+ }
+ } else {
+ iphy = &ihost->phys[ihost->next_phy_to_start];
+
+ if (oem->controller.mode_type == SCIC_PORT_MANUAL_CONFIGURATION_MODE) {
+ if (phy_get_non_dummy_port(iphy) == NULL) {
+ ihost->next_phy_to_start++;
+
+ /* Caution recursion ahead be forwarned
+ *
+ * The PHY was never added to a PORT in MPC mode
+ * so start the next phy in sequence This phy
+ * will never go link up and will not draw power
+ * the OEM parameters either configured the phy
+ * incorrectly for the PORT or it was never
+ * assigned to a PORT
+ */
+ return sci_controller_start_next_phy(ihost);
+ }
+ }
+
+ status = sci_phy_start(iphy);
+
+ if (status == SCI_SUCCESS) {
+ sci_mod_timer(&ihost->phy_timer,
+ SCIC_SDS_CONTROLLER_PHY_START_TIMEOUT);
+ ihost->phy_startup_timer_pending = true;
+ } else {
+ dev_warn(&ihost->pdev->dev,
+ "%s: Controller stop operation failed "
+ "to stop phy %d because of status "
+ "%d.\n",
+ __func__,
+ ihost->phys[ihost->next_phy_to_start].phy_index,
+ status);
+ }
+
+ ihost->next_phy_to_start++;
+ }
+
+ return status;
+}
+
+static void phy_startup_timeout(unsigned long data)
+{
+ struct sci_timer *tmr = (struct sci_timer *)data;
+ struct isci_host *ihost = container_of(tmr, typeof(*ihost), phy_timer);
+ unsigned long flags;
+ enum sci_status status;
+
+ spin_lock_irqsave(&ihost->scic_lock, flags);
+
+ if (tmr->cancel)
+ goto done;
+
+ ihost->phy_startup_timer_pending = false;
+
+ do {
+ status = sci_controller_start_next_phy(ihost);
+ } while (status != SCI_SUCCESS);
+
+done:
+ spin_unlock_irqrestore(&ihost->scic_lock, flags);
+}
+
+static u16 isci_tci_active(struct isci_host *ihost)
+{
+ return CIRC_CNT(ihost->tci_head, ihost->tci_tail, SCI_MAX_IO_REQUESTS);
+}
+
+static enum sci_status sci_controller_start(struct isci_host *ihost,
+ u32 timeout)
+{
+ enum sci_status result;
+ u16 index;
+
+ if (ihost->sm.current_state_id != SCIC_INITIALIZED) {
+ dev_warn(&ihost->pdev->dev, "%s invalid state: %d\n",
+ __func__, ihost->sm.current_state_id);
+ return SCI_FAILURE_INVALID_STATE;
+ }
+
+ /* Build the TCi free pool */
+ BUILD_BUG_ON(SCI_MAX_IO_REQUESTS > 1 << sizeof(ihost->tci_pool[0]) * 8);
+ ihost->tci_head = 0;
+ ihost->tci_tail = 0;
+ for (index = 0; index < ihost->task_context_entries; index++)
+ isci_tci_free(ihost, index);
+
+ /* Build the RNi free pool */
+ sci_remote_node_table_initialize(&ihost->available_remote_nodes,
+ ihost->remote_node_entries);
+
+ /*
+ * Before anything else lets make sure we will not be
+ * interrupted by the hardware.
+ */
+ sci_controller_disable_interrupts(ihost);
+
+ /* Enable the port task scheduler */
+ sci_controller_enable_port_task_scheduler(ihost);
+
+ /* Assign all the task entries to ihost physical function */
+ sci_controller_assign_task_entries(ihost);
+
+ /* Now initialize the completion queue */
+ sci_controller_initialize_completion_queue(ihost);
+
+ /* Initialize the unsolicited frame queue for use */
+ sci_controller_initialize_unsolicited_frame_queue(ihost);
+
+ /* Start all of the ports on this controller */
+ for (index = 0; index < ihost->logical_port_entries; index++) {
+ struct isci_port *iport = &ihost->ports[index];
+
+ result = sci_port_start(iport);
+ if (result)
+ return result;
+ }
+
+ sci_controller_start_next_phy(ihost);
+
+ sci_mod_timer(&ihost->timer, timeout);
+
+ sci_change_state(&ihost->sm, SCIC_STARTING);
+
+ return SCI_SUCCESS;
+}
+
+void isci_host_start(struct Scsi_Host *shost)
+{
+ struct isci_host *ihost = SHOST_TO_SAS_HA(shost)->lldd_ha;
+ unsigned long tmo = sci_controller_get_suggested_start_timeout(ihost);
+
+ set_bit(IHOST_START_PENDING, &ihost->flags);
+
+ spin_lock_irq(&ihost->scic_lock);
+ sci_controller_start(ihost, tmo);
+ sci_controller_enable_interrupts(ihost);
+ spin_unlock_irq(&ihost->scic_lock);
+}
+
+static void isci_host_stop_complete(struct isci_host *ihost)
+{
+ sci_controller_disable_interrupts(ihost);
+ clear_bit(IHOST_STOP_PENDING, &ihost->flags);
+ wake_up(&ihost->eventq);
+}
+
+static void sci_controller_completion_handler(struct isci_host *ihost)
+{
+ /* Empty out the completion queue */
+ if (sci_controller_completion_queue_has_entries(ihost))
+ sci_controller_process_completions(ihost);
+
+ /* Clear the interrupt and enable all interrupts again */
+ writel(SMU_ISR_COMPLETION, &ihost->smu_registers->interrupt_status);
+ /* Could we write the value of SMU_ISR_COMPLETION? */
+ writel(0xFF000000, &ihost->smu_registers->interrupt_mask);
+ writel(0, &ihost->smu_registers->interrupt_mask);
+}
+
+void ireq_done(struct isci_host *ihost, struct isci_request *ireq, struct sas_task *task)
+{
+ if (!test_bit(IREQ_ABORT_PATH_ACTIVE, &ireq->flags) &&
+ !(task->task_state_flags & SAS_TASK_STATE_ABORTED)) {
+ if (test_bit(IREQ_COMPLETE_IN_TARGET, &ireq->flags)) {
+ /* Normal notification (task_done) */
+ dev_dbg(&ihost->pdev->dev,
+ "%s: Normal - ireq/task = %p/%p\n",
+ __func__, ireq, task);
+ task->lldd_task = NULL;
+ task->task_done(task);
+ } else {
+ dev_dbg(&ihost->pdev->dev,
+ "%s: Error - ireq/task = %p/%p\n",
+ __func__, ireq, task);
+ if (sas_protocol_ata(task->task_proto))
+ task->lldd_task = NULL;
+ sas_task_abort(task);
+ }
+ } else
+ task->lldd_task = NULL;
+
+ if (test_and_clear_bit(IREQ_ABORT_PATH_ACTIVE, &ireq->flags))
+ wake_up_all(&ihost->eventq);
+
+ if (!test_bit(IREQ_NO_AUTO_FREE_TAG, &ireq->flags))
+ isci_free_tag(ihost, ireq->io_tag);
+}
+/**
+ * isci_host_completion_routine() - This function is the delayed service
+ * routine that calls the sci core library's completion handler. It's
+ * scheduled as a tasklet from the interrupt service routine when interrupts
+ * in use, or set as the timeout function in polled mode.
+ * @data: This parameter specifies the ISCI host object
+ *
+ */
+void isci_host_completion_routine(unsigned long data)
+{
+ struct isci_host *ihost = (struct isci_host *)data;
+ u16 active;
+
+ spin_lock_irq(&ihost->scic_lock);
+ sci_controller_completion_handler(ihost);
+ spin_unlock_irq(&ihost->scic_lock);
+
+ /*
+ * we subtract SCI_MAX_PORTS to account for the number of dummy TCs
+ * issued for hardware issue workaround
+ */
+ active = isci_tci_active(ihost) - SCI_MAX_PORTS;
+
+ /*
+ * the coalesence timeout doubles at each encoding step, so
+ * update it based on the ilog2 value of the outstanding requests
+ */
+ writel(SMU_ICC_GEN_VAL(NUMBER, active) |
+ SMU_ICC_GEN_VAL(TIMER, ISCI_COALESCE_BASE + ilog2(active)),
+ &ihost->smu_registers->interrupt_coalesce_control);
+}
+
+/**
+ * sci_controller_stop() - This method will stop an individual controller
+ * object.This method will invoke the associated user callback upon
+ * completion. The completion callback is called when the following
+ * conditions are met: -# the method return status is SCI_SUCCESS. -# the
+ * controller has been quiesced. This method will ensure that all IO
+ * requests are quiesced, phys are stopped, and all additional operation by
+ * the hardware is halted.
+ * @controller: the handle to the controller object to stop.
+ * @timeout: This parameter specifies the number of milliseconds in which the
+ * stop operation should complete.
+ *
+ * The controller must be in the STARTED or STOPPED state. Indicate if the
+ * controller stop method succeeded or failed in some way. SCI_SUCCESS if the
+ * stop operation successfully began. SCI_WARNING_ALREADY_IN_STATE if the
+ * controller is already in the STOPPED state. SCI_FAILURE_INVALID_STATE if the
+ * controller is not either in the STARTED or STOPPED states.
+ */
+static enum sci_status sci_controller_stop(struct isci_host *ihost, u32 timeout)
+{
+ if (ihost->sm.current_state_id != SCIC_READY) {
+ dev_warn(&ihost->pdev->dev, "%s invalid state: %d\n",
+ __func__, ihost->sm.current_state_id);
+ return SCI_FAILURE_INVALID_STATE;
+ }
+
+ sci_mod_timer(&ihost->timer, timeout);
+ sci_change_state(&ihost->sm, SCIC_STOPPING);
+ return SCI_SUCCESS;
+}
+
+/**
+ * sci_controller_reset() - This method will reset the supplied core
+ * controller regardless of the state of said controller. This operation is
+ * considered destructive. In other words, all current operations are wiped
+ * out. No IO completions for outstanding devices occur. Outstanding IO
+ * requests are not aborted or completed at the actual remote device.
+ * @controller: the handle to the controller object to reset.
+ *
+ * Indicate if the controller reset method succeeded or failed in some way.
+ * SCI_SUCCESS if the reset operation successfully started. SCI_FATAL_ERROR if
+ * the controller reset operation is unable to complete.
+ */
+static enum sci_status sci_controller_reset(struct isci_host *ihost)
+{
+ switch (ihost->sm.current_state_id) {
+ case SCIC_RESET:
+ case SCIC_READY:
+ case SCIC_STOPPING:
+ case SCIC_FAILED:
+ /*
+ * The reset operation is not a graceful cleanup, just
+ * perform the state transition.
+ */
+ sci_change_state(&ihost->sm, SCIC_RESETTING);
+ return SCI_SUCCESS;
+ default:
+ dev_warn(&ihost->pdev->dev, "%s invalid state: %d\n",
+ __func__, ihost->sm.current_state_id);
+ return SCI_FAILURE_INVALID_STATE;
+ }
+}
+
+static enum sci_status sci_controller_stop_phys(struct isci_host *ihost)
+{
+ u32 index;
+ enum sci_status status;
+ enum sci_status phy_status;
+
+ status = SCI_SUCCESS;
+
+ for (index = 0; index < SCI_MAX_PHYS; index++) {
+ phy_status = sci_phy_stop(&ihost->phys[index]);
+
+ if (phy_status != SCI_SUCCESS &&
+ phy_status != SCI_FAILURE_INVALID_STATE) {
+ status = SCI_FAILURE;
+
+ dev_warn(&ihost->pdev->dev,
+ "%s: Controller stop operation failed to stop "
+ "phy %d because of status %d.\n",
+ __func__,
+ ihost->phys[index].phy_index, phy_status);
+ }
+ }
+
+ return status;
+}
+
+
+/**
+ * isci_host_deinit - shutdown frame reception and dma
+ * @ihost: host to take down
+ *
+ * This is called in either the driver shutdown or the suspend path. In
+ * the shutdown case libsas went through port teardown and normal device
+ * removal (i.e. physical links stayed up to service scsi_device removal
+ * commands). In the suspend case we disable the hardware without
+ * notifying libsas of the link down events since we want libsas to
+ * remember the domain across the suspend/resume cycle
+ */
+void isci_host_deinit(struct isci_host *ihost)
+{
+ int i;
+
+ /* disable output data selects */
+ for (i = 0; i < isci_gpio_count(ihost); i++)
+ writel(SGPIO_HW_CONTROL, &ihost->scu_registers->peg0.sgpio.output_data_select[i]);
+
+ set_bit(IHOST_STOP_PENDING, &ihost->flags);
+
+ spin_lock_irq(&ihost->scic_lock);
+ sci_controller_stop(ihost, SCIC_CONTROLLER_STOP_TIMEOUT);
+ spin_unlock_irq(&ihost->scic_lock);
+
+ wait_for_stop(ihost);
+
+ /* phy stop is after controller stop to allow port and device to
+ * go idle before shutting down the phys, but the expectation is
+ * that i/o has been shut off well before we reach this
+ * function.
+ */
+ sci_controller_stop_phys(ihost);
+
+ /* disable sgpio: where the above wait should give time for the
+ * enclosure to sample the gpios going inactive
+ */
+ writel(0, &ihost->scu_registers->peg0.sgpio.interface_control);
+
+ spin_lock_irq(&ihost->scic_lock);
+ sci_controller_reset(ihost);
+ spin_unlock_irq(&ihost->scic_lock);
+
+ /* Cancel any/all outstanding port timers */
+ for (i = 0; i < ihost->logical_port_entries; i++) {
+ struct isci_port *iport = &ihost->ports[i];
+ del_timer_sync(&iport->timer.timer);
+ }
+
+ /* Cancel any/all outstanding phy timers */
+ for (i = 0; i < SCI_MAX_PHYS; i++) {
+ struct isci_phy *iphy = &ihost->phys[i];
+ del_timer_sync(&iphy->sata_timer.timer);
+ }
+
+ del_timer_sync(&ihost->port_agent.timer.timer);
+
+ del_timer_sync(&ihost->power_control.timer.timer);
+
+ del_timer_sync(&ihost->timer.timer);
+
+ del_timer_sync(&ihost->phy_timer.timer);
+}
+
+static void __iomem *scu_base(struct isci_host *isci_host)
+{
+ struct pci_dev *pdev = isci_host->pdev;
+ int id = isci_host->id;
+
+ return pcim_iomap_table(pdev)[SCI_SCU_BAR * 2] + SCI_SCU_BAR_SIZE * id;
+}
+
+static void __iomem *smu_base(struct isci_host *isci_host)
+{
+ struct pci_dev *pdev = isci_host->pdev;
+ int id = isci_host->id;
+
+ return pcim_iomap_table(pdev)[SCI_SMU_BAR * 2] + SCI_SMU_BAR_SIZE * id;
+}
+
+static void sci_controller_initial_state_enter(struct sci_base_state_machine *sm)
+{
+ struct isci_host *ihost = container_of(sm, typeof(*ihost), sm);
+
+ sci_change_state(&ihost->sm, SCIC_RESET);
+}
+
+static inline void sci_controller_starting_state_exit(struct sci_base_state_machine *sm)
+{
+ struct isci_host *ihost = container_of(sm, typeof(*ihost), sm);
+
+ sci_del_timer(&ihost->timer);
+}
+
+#define INTERRUPT_COALESCE_TIMEOUT_BASE_RANGE_LOWER_BOUND_NS 853
+#define INTERRUPT_COALESCE_TIMEOUT_BASE_RANGE_UPPER_BOUND_NS 1280
+#define INTERRUPT_COALESCE_TIMEOUT_MAX_US 2700000
+#define INTERRUPT_COALESCE_NUMBER_MAX 256
+#define INTERRUPT_COALESCE_TIMEOUT_ENCODE_MIN 7
+#define INTERRUPT_COALESCE_TIMEOUT_ENCODE_MAX 28
+
+/**
+ * sci_controller_set_interrupt_coalescence() - This method allows the user to
+ * configure the interrupt coalescence.
+ * @controller: This parameter represents the handle to the controller object
+ * for which its interrupt coalesce register is overridden.
+ * @coalesce_number: Used to control the number of entries in the Completion
+ * Queue before an interrupt is generated. If the number of entries exceed
+ * this number, an interrupt will be generated. The valid range of the input
+ * is [0, 256]. A setting of 0 results in coalescing being disabled.
+ * @coalesce_timeout: Timeout value in microseconds. The valid range of the
+ * input is [0, 2700000] . A setting of 0 is allowed and results in no
+ * interrupt coalescing timeout.
+ *
+ * Indicate if the user successfully set the interrupt coalesce parameters.
+ * SCI_SUCCESS The user successfully updated the interrutp coalescence.
+ * SCI_FAILURE_INVALID_PARAMETER_VALUE The user input value is out of range.
+ */
+static enum sci_status
+sci_controller_set_interrupt_coalescence(struct isci_host *ihost,
+ u32 coalesce_number,
+ u32 coalesce_timeout)
+{
+ u8 timeout_encode = 0;
+ u32 min = 0;
+ u32 max = 0;
+
+ /* Check if the input parameters fall in the range. */
+ if (coalesce_number > INTERRUPT_COALESCE_NUMBER_MAX)
+ return SCI_FAILURE_INVALID_PARAMETER_VALUE;
+
+ /*
+ * Defined encoding for interrupt coalescing timeout:
+ * Value Min Max Units
+ * ----- --- --- -----
+ * 0 - - Disabled
+ * 1 13.3 20.0 ns
+ * 2 26.7 40.0
+ * 3 53.3 80.0
+ * 4 106.7 160.0
+ * 5 213.3 320.0
+ * 6 426.7 640.0
+ * 7 853.3 1280.0
+ * 8 1.7 2.6 us
+ * 9 3.4 5.1
+ * 10 6.8 10.2
+ * 11 13.7 20.5
+ * 12 27.3 41.0
+ * 13 54.6 81.9
+ * 14 109.2 163.8
+ * 15 218.5 327.7
+ * 16 436.9 655.4
+ * 17 873.8 1310.7
+ * 18 1.7 2.6 ms
+ * 19 3.5 5.2
+ * 20 7.0 10.5
+ * 21 14.0 21.0
+ * 22 28.0 41.9
+ * 23 55.9 83.9
+ * 24 111.8 167.8
+ * 25 223.7 335.5
+ * 26 447.4 671.1
+ * 27 894.8 1342.2
+ * 28 1.8 2.7 s
+ * Others Undefined */
+
+ /*
+ * Use the table above to decide the encode of interrupt coalescing timeout
+ * value for register writing. */
+ if (coalesce_timeout == 0)
+ timeout_encode = 0;
+ else{
+ /* make the timeout value in unit of (10 ns). */
+ coalesce_timeout = coalesce_timeout * 100;
+ min = INTERRUPT_COALESCE_TIMEOUT_BASE_RANGE_LOWER_BOUND_NS / 10;
+ max = INTERRUPT_COALESCE_TIMEOUT_BASE_RANGE_UPPER_BOUND_NS / 10;
+
+ /* get the encode of timeout for register writing. */
+ for (timeout_encode = INTERRUPT_COALESCE_TIMEOUT_ENCODE_MIN;
+ timeout_encode <= INTERRUPT_COALESCE_TIMEOUT_ENCODE_MAX;
+ timeout_encode++) {
+ if (min <= coalesce_timeout && max > coalesce_timeout)
+ break;
+ else if (coalesce_timeout >= max && coalesce_timeout < min * 2
+ && coalesce_timeout <= INTERRUPT_COALESCE_TIMEOUT_MAX_US * 100) {
+ if ((coalesce_timeout - max) < (2 * min - coalesce_timeout))
+ break;
+ else{
+ timeout_encode++;
+ break;
+ }
+ } else {
+ max = max * 2;
+ min = min * 2;
+ }
+ }
+
+ if (timeout_encode == INTERRUPT_COALESCE_TIMEOUT_ENCODE_MAX + 1)
+ /* the value is out of range. */
+ return SCI_FAILURE_INVALID_PARAMETER_VALUE;
+ }
+
+ writel(SMU_ICC_GEN_VAL(NUMBER, coalesce_number) |
+ SMU_ICC_GEN_VAL(TIMER, timeout_encode),
+ &ihost->smu_registers->interrupt_coalesce_control);
+
+
+ ihost->interrupt_coalesce_number = (u16)coalesce_number;
+ ihost->interrupt_coalesce_timeout = coalesce_timeout / 100;
+
+ return SCI_SUCCESS;
+}
+
+
+static void sci_controller_ready_state_enter(struct sci_base_state_machine *sm)
+{
+ struct isci_host *ihost = container_of(sm, typeof(*ihost), sm);
+ u32 val;
+
+ /* enable clock gating for power control of the scu unit */
+ val = readl(&ihost->smu_registers->clock_gating_control);
+ val &= ~(SMU_CGUCR_GEN_BIT(REGCLK_ENABLE) |
+ SMU_CGUCR_GEN_BIT(TXCLK_ENABLE) |
+ SMU_CGUCR_GEN_BIT(XCLK_ENABLE));
+ val |= SMU_CGUCR_GEN_BIT(IDLE_ENABLE);
+ writel(val, &ihost->smu_registers->clock_gating_control);
+
+ /* set the default interrupt coalescence number and timeout value. */
+ sci_controller_set_interrupt_coalescence(ihost, 0, 0);
+}
+
+static void sci_controller_ready_state_exit(struct sci_base_state_machine *sm)
+{
+ struct isci_host *ihost = container_of(sm, typeof(*ihost), sm);
+
+ /* disable interrupt coalescence. */
+ sci_controller_set_interrupt_coalescence(ihost, 0, 0);
+}
+
+static enum sci_status sci_controller_stop_ports(struct isci_host *ihost)
+{
+ u32 index;
+ enum sci_status port_status;
+ enum sci_status status = SCI_SUCCESS;
+
+ for (index = 0; index < ihost->logical_port_entries; index++) {
+ struct isci_port *iport = &ihost->ports[index];
+
+ port_status = sci_port_stop(iport);
+
+ if ((port_status != SCI_SUCCESS) &&
+ (port_status != SCI_FAILURE_INVALID_STATE)) {
+ status = SCI_FAILURE;
+
+ dev_warn(&ihost->pdev->dev,
+ "%s: Controller stop operation failed to "
+ "stop port %d because of status %d.\n",
+ __func__,
+ iport->logical_port_index,
+ port_status);
+ }
+ }
+
+ return status;
+}
+
+static enum sci_status sci_controller_stop_devices(struct isci_host *ihost)
+{
+ u32 index;
+ enum sci_status status;
+ enum sci_status device_status;
+
+ status = SCI_SUCCESS;
+
+ for (index = 0; index < ihost->remote_node_entries; index++) {
+ if (ihost->device_table[index] != NULL) {
+ /* / @todo What timeout value do we want to provide to this request? */
+ device_status = sci_remote_device_stop(ihost->device_table[index], 0);
+
+ if ((device_status != SCI_SUCCESS) &&
+ (device_status != SCI_FAILURE_INVALID_STATE)) {
+ dev_warn(&ihost->pdev->dev,
+ "%s: Controller stop operation failed "
+ "to stop device 0x%p because of "
+ "status %d.\n",
+ __func__,
+ ihost->device_table[index], device_status);
+ }
+ }
+ }
+
+ return status;
+}
+
+static void sci_controller_stopping_state_enter(struct sci_base_state_machine *sm)
+{
+ struct isci_host *ihost = container_of(sm, typeof(*ihost), sm);
+
+ sci_controller_stop_devices(ihost);
+ sci_controller_stop_ports(ihost);
+
+ if (!sci_controller_has_remote_devices_stopping(ihost))
+ isci_host_stop_complete(ihost);
+}
+
+static void sci_controller_stopping_state_exit(struct sci_base_state_machine *sm)
+{
+ struct isci_host *ihost = container_of(sm, typeof(*ihost), sm);
+
+ sci_del_timer(&ihost->timer);
+}
+
+static void sci_controller_reset_hardware(struct isci_host *ihost)
+{
+ /* Disable interrupts so we dont take any spurious interrupts */
+ sci_controller_disable_interrupts(ihost);
+
+ /* Reset the SCU */
+ writel(0xFFFFFFFF, &ihost->smu_registers->soft_reset_control);
+
+ /* Delay for 1ms to before clearing the CQP and UFQPR. */
+ udelay(1000);
+
+ /* The write to the CQGR clears the CQP */
+ writel(0x00000000, &ihost->smu_registers->completion_queue_get);
+
+ /* The write to the UFQGP clears the UFQPR */
+ writel(0, &ihost->scu_registers->sdma.unsolicited_frame_get_pointer);
+
+ /* clear all interrupts */
+ writel(~SMU_INTERRUPT_STATUS_RESERVED_MASK, &ihost->smu_registers->interrupt_status);
+}
+
+static void sci_controller_resetting_state_enter(struct sci_base_state_machine *sm)
+{
+ struct isci_host *ihost = container_of(sm, typeof(*ihost), sm);
+
+ sci_controller_reset_hardware(ihost);
+ sci_change_state(&ihost->sm, SCIC_RESET);
+}
+
+static const struct sci_base_state sci_controller_state_table[] = {
+ [SCIC_INITIAL] = {
+ .enter_state = sci_controller_initial_state_enter,
+ },
+ [SCIC_RESET] = {},
+ [SCIC_INITIALIZING] = {},
+ [SCIC_INITIALIZED] = {},
+ [SCIC_STARTING] = {
+ .exit_state = sci_controller_starting_state_exit,
+ },
+ [SCIC_READY] = {
+ .enter_state = sci_controller_ready_state_enter,
+ .exit_state = sci_controller_ready_state_exit,
+ },
+ [SCIC_RESETTING] = {
+ .enter_state = sci_controller_resetting_state_enter,
+ },
+ [SCIC_STOPPING] = {
+ .enter_state = sci_controller_stopping_state_enter,
+ .exit_state = sci_controller_stopping_state_exit,
+ },
+ [SCIC_FAILED] = {}
+};
+
+static void controller_timeout(unsigned long data)
+{
+ struct sci_timer *tmr = (struct sci_timer *)data;
+ struct isci_host *ihost = container_of(tmr, typeof(*ihost), timer);
+ struct sci_base_state_machine *sm = &ihost->sm;
+ unsigned long flags;
+
+ spin_lock_irqsave(&ihost->scic_lock, flags);
+
+ if (tmr->cancel)
+ goto done;
+
+ if (sm->current_state_id == SCIC_STARTING)
+ sci_controller_transition_to_ready(ihost, SCI_FAILURE_TIMEOUT);
+ else if (sm->current_state_id == SCIC_STOPPING) {
+ sci_change_state(sm, SCIC_FAILED);
+ isci_host_stop_complete(ihost);
+ } else /* / @todo Now what do we want to do in this case? */
+ dev_err(&ihost->pdev->dev,
+ "%s: Controller timer fired when controller was not "
+ "in a state being timed.\n",
+ __func__);
+
+done:
+ spin_unlock_irqrestore(&ihost->scic_lock, flags);
+}
+
+static enum sci_status sci_controller_construct(struct isci_host *ihost,
+ void __iomem *scu_base,
+ void __iomem *smu_base)
+{
+ u8 i;
+
+ sci_init_sm(&ihost->sm, sci_controller_state_table, SCIC_INITIAL);
+
+ ihost->scu_registers = scu_base;
+ ihost->smu_registers = smu_base;
+
+ sci_port_configuration_agent_construct(&ihost->port_agent);
+
+ /* Construct the ports for this controller */
+ for (i = 0; i < SCI_MAX_PORTS; i++)
+ sci_port_construct(&ihost->ports[i], i, ihost);
+ sci_port_construct(&ihost->ports[i], SCIC_SDS_DUMMY_PORT, ihost);
+
+ /* Construct the phys for this controller */
+ for (i = 0; i < SCI_MAX_PHYS; i++) {
+ /* Add all the PHYs to the dummy port */
+ sci_phy_construct(&ihost->phys[i],
+ &ihost->ports[SCI_MAX_PORTS], i);
+ }
+
+ ihost->invalid_phy_mask = 0;
+
+ sci_init_timer(&ihost->timer, controller_timeout);
+
+ return sci_controller_reset(ihost);
+}
+
+int sci_oem_parameters_validate(struct sci_oem_params *oem, u8 version)
+{
+ int i;
+
+ for (i = 0; i < SCI_MAX_PORTS; i++)
+ if (oem->ports[i].phy_mask > SCIC_SDS_PARM_PHY_MASK_MAX)
+ return -EINVAL;
+
+ for (i = 0; i < SCI_MAX_PHYS; i++)
+ if (oem->phys[i].sas_address.high == 0 &&
+ oem->phys[i].sas_address.low == 0)
+ return -EINVAL;
+
+ if (oem->controller.mode_type == SCIC_PORT_AUTOMATIC_CONFIGURATION_MODE) {
+ for (i = 0; i < SCI_MAX_PHYS; i++)
+ if (oem->ports[i].phy_mask != 0)
+ return -EINVAL;
+ } else if (oem->controller.mode_type == SCIC_PORT_MANUAL_CONFIGURATION_MODE) {
+ u8 phy_mask = 0;
+
+ for (i = 0; i < SCI_MAX_PHYS; i++)
+ phy_mask |= oem->ports[i].phy_mask;
+
+ if (phy_mask == 0)
+ return -EINVAL;
+ } else
+ return -EINVAL;
+
+ if (oem->controller.max_concurr_spin_up > MAX_CONCURRENT_DEVICE_SPIN_UP_COUNT ||
+ oem->controller.max_concurr_spin_up < 1)
+ return -EINVAL;
+
+ if (oem->controller.do_enable_ssc) {
+ if (version < ISCI_ROM_VER_1_1 && oem->controller.do_enable_ssc != 1)
+ return -EINVAL;
+
+ if (version >= ISCI_ROM_VER_1_1) {
+ u8 test = oem->controller.ssc_sata_tx_spread_level;
+
+ switch (test) {
+ case 0:
+ case 2:
+ case 3:
+ case 6:
+ case 7:
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ test = oem->controller.ssc_sas_tx_spread_level;
+ if (oem->controller.ssc_sas_tx_type == 0) {
+ switch (test) {
+ case 0:
+ case 2:
+ case 3:
+ break;
+ default:
+ return -EINVAL;
+ }
+ } else if (oem->controller.ssc_sas_tx_type == 1) {
+ switch (test) {
+ case 0:
+ case 3:
+ case 6:
+ break;
+ default:
+ return -EINVAL;
+ }
+ }
+ }
+ }
+
+ return 0;
+}
+
+static u8 max_spin_up(struct isci_host *ihost)
+{
+ if (ihost->user_parameters.max_concurr_spinup)
+ return min_t(u8, ihost->user_parameters.max_concurr_spinup,
+ MAX_CONCURRENT_DEVICE_SPIN_UP_COUNT);
+ else
+ return min_t(u8, ihost->oem_parameters.controller.max_concurr_spin_up,
+ MAX_CONCURRENT_DEVICE_SPIN_UP_COUNT);
+}
+
+static void power_control_timeout(unsigned long data)
+{
+ struct sci_timer *tmr = (struct sci_timer *)data;
+ struct isci_host *ihost = container_of(tmr, typeof(*ihost), power_control.timer);
+ struct isci_phy *iphy;
+ unsigned long flags;
+ u8 i;
+
+ spin_lock_irqsave(&ihost->scic_lock, flags);
+
+ if (tmr->cancel)
+ goto done;
+
+ ihost->power_control.phys_granted_power = 0;
+
+ if (ihost->power_control.phys_waiting == 0) {
+ ihost->power_control.timer_started = false;
+ goto done;
+ }
+
+ for (i = 0; i < SCI_MAX_PHYS; i++) {
+
+ if (ihost->power_control.phys_waiting == 0)
+ break;
+
+ iphy = ihost->power_control.requesters[i];
+ if (iphy == NULL)
+ continue;
+
+ if (ihost->power_control.phys_granted_power >= max_spin_up(ihost))
+ break;
+
+ ihost->power_control.requesters[i] = NULL;
+ ihost->power_control.phys_waiting--;
+ ihost->power_control.phys_granted_power++;
+ sci_phy_consume_power_handler(iphy);
+
+ if (iphy->protocol == SAS_PROTOCOL_SSP) {
+ u8 j;
+
+ for (j = 0; j < SCI_MAX_PHYS; j++) {
+ struct isci_phy *requester = ihost->power_control.requesters[j];
+
+ /*
+ * Search the power_control queue to see if there are other phys
+ * attached to the same remote device. If found, take all of
+ * them out of await_sas_power state.
+ */
+ if (requester != NULL && requester != iphy) {
+ u8 other = memcmp(requester->frame_rcvd.iaf.sas_addr,
+ iphy->frame_rcvd.iaf.sas_addr,
+ sizeof(requester->frame_rcvd.iaf.sas_addr));
+
+ if (other == 0) {
+ ihost->power_control.requesters[j] = NULL;
+ ihost->power_control.phys_waiting--;
+ sci_phy_consume_power_handler(requester);
+ }
+ }
+ }
+ }
+ }
+
+ /*
+ * It doesn't matter if the power list is empty, we need to start the
+ * timer in case another phy becomes ready.
+ */
+ sci_mod_timer(tmr, SCIC_SDS_CONTROLLER_POWER_CONTROL_INTERVAL);
+ ihost->power_control.timer_started = true;
+
+done:
+ spin_unlock_irqrestore(&ihost->scic_lock, flags);
+}
+
+void sci_controller_power_control_queue_insert(struct isci_host *ihost,
+ struct isci_phy *iphy)
+{
+ BUG_ON(iphy == NULL);
+
+ if (ihost->power_control.phys_granted_power < max_spin_up(ihost)) {
+ ihost->power_control.phys_granted_power++;
+ sci_phy_consume_power_handler(iphy);
+
+ /*
+ * stop and start the power_control timer. When the timer fires, the
+ * no_of_phys_granted_power will be set to 0
+ */
+ if (ihost->power_control.timer_started)
+ sci_del_timer(&ihost->power_control.timer);
+
+ sci_mod_timer(&ihost->power_control.timer,
+ SCIC_SDS_CONTROLLER_POWER_CONTROL_INTERVAL);
+ ihost->power_control.timer_started = true;
+
+ } else {
+ /*
+ * There are phys, attached to the same sas address as this phy, are
+ * already in READY state, this phy don't need wait.
+ */
+ u8 i;
+ struct isci_phy *current_phy;
+
+ for (i = 0; i < SCI_MAX_PHYS; i++) {
+ u8 other;
+ current_phy = &ihost->phys[i];
+
+ other = memcmp(current_phy->frame_rcvd.iaf.sas_addr,
+ iphy->frame_rcvd.iaf.sas_addr,
+ sizeof(current_phy->frame_rcvd.iaf.sas_addr));
+
+ if (current_phy->sm.current_state_id == SCI_PHY_READY &&
+ current_phy->protocol == SAS_PROTOCOL_SSP &&
+ other == 0) {
+ sci_phy_consume_power_handler(iphy);
+ break;
+ }
+ }
+
+ if (i == SCI_MAX_PHYS) {
+ /* Add the phy in the waiting list */
+ ihost->power_control.requesters[iphy->phy_index] = iphy;
+ ihost->power_control.phys_waiting++;
+ }
+ }
+}
+
+void sci_controller_power_control_queue_remove(struct isci_host *ihost,
+ struct isci_phy *iphy)
+{
+ BUG_ON(iphy == NULL);
+
+ if (ihost->power_control.requesters[iphy->phy_index])
+ ihost->power_control.phys_waiting--;
+
+ ihost->power_control.requesters[iphy->phy_index] = NULL;
+}
+
+static int is_long_cable(int phy, unsigned char selection_byte)
+{
+ return !!(selection_byte & (1 << phy));
+}
+
+static int is_medium_cable(int phy, unsigned char selection_byte)
+{
+ return !!(selection_byte & (1 << (phy + 4)));
+}
+
+static enum cable_selections decode_selection_byte(
+ int phy,
+ unsigned char selection_byte)
+{
+ return ((selection_byte & (1 << phy)) ? 1 : 0)
+ + (selection_byte & (1 << (phy + 4)) ? 2 : 0);
+}
+
+static unsigned char *to_cable_select(struct isci_host *ihost)
+{
+ if (is_cable_select_overridden())
+ return ((unsigned char *)&cable_selection_override)
+ + ihost->id;
+ else
+ return &ihost->oem_parameters.controller.cable_selection_mask;
+}
+
+enum cable_selections decode_cable_selection(struct isci_host *ihost, int phy)
+{
+ return decode_selection_byte(phy, *to_cable_select(ihost));
+}
+
+char *lookup_cable_names(enum cable_selections selection)
+{
+ static char *cable_names[] = {
+ [short_cable] = "short",
+ [long_cable] = "long",
+ [medium_cable] = "medium",
+ [undefined_cable] = "<undefined, assumed long>" /* bit 0==1 */
+ };
+ return (selection <= undefined_cable) ? cable_names[selection]
+ : cable_names[undefined_cable];
+}
+
+#define AFE_REGISTER_WRITE_DELAY 10
+
+static void sci_controller_afe_initialization(struct isci_host *ihost)
+{
+ struct scu_afe_registers __iomem *afe = &ihost->scu_registers->afe;
+ const struct sci_oem_params *oem = &ihost->oem_parameters;
+ struct pci_dev *pdev = ihost->pdev;
+ u32 afe_status;
+ u32 phy_id;
+ unsigned char cable_selection_mask = *to_cable_select(ihost);
+
+ /* Clear DFX Status registers */
+ writel(0x0081000f, &afe->afe_dfx_master_control0);
+ udelay(AFE_REGISTER_WRITE_DELAY);
+
+ if (is_b0(pdev) || is_c0(pdev) || is_c1(pdev)) {
+ /* PM Rx Equalization Save, PM SPhy Rx Acknowledgement
+ * Timer, PM Stagger Timer
+ */
+ writel(0x0007FFFF, &afe->afe_pmsn_master_control2);
+ udelay(AFE_REGISTER_WRITE_DELAY);
+ }
+
+ /* Configure bias currents to normal */
+ if (is_a2(pdev))
+ writel(0x00005A00, &afe->afe_bias_control);
+ else if (is_b0(pdev) || is_c0(pdev))
+ writel(0x00005F00, &afe->afe_bias_control);
+ else if (is_c1(pdev))
+ writel(0x00005500, &afe->afe_bias_control);
+
+ udelay(AFE_REGISTER_WRITE_DELAY);
+
+ /* Enable PLL */
+ if (is_a2(pdev))
+ writel(0x80040908, &afe->afe_pll_control0);
+ else if (is_b0(pdev) || is_c0(pdev))
+ writel(0x80040A08, &afe->afe_pll_control0);
+ else if (is_c1(pdev)) {
+ writel(0x80000B08, &afe->afe_pll_control0);
+ udelay(AFE_REGISTER_WRITE_DELAY);
+ writel(0x00000B08, &afe->afe_pll_control0);
+ udelay(AFE_REGISTER_WRITE_DELAY);
+ writel(0x80000B08, &afe->afe_pll_control0);
+ }
+
+ udelay(AFE_REGISTER_WRITE_DELAY);
+
+ /* Wait for the PLL to lock */
+ do {
+ afe_status = readl(&afe->afe_common_block_status);
+ udelay(AFE_REGISTER_WRITE_DELAY);
+ } while ((afe_status & 0x00001000) == 0);
+
+ if (is_a2(pdev)) {
+ /* Shorten SAS SNW lock time (RxLock timer value from 76
+ * us to 50 us)
+ */
+ writel(0x7bcc96ad, &afe->afe_pmsn_master_control0);
+ udelay(AFE_REGISTER_WRITE_DELAY);
+ }
+
+ for (phy_id = 0; phy_id < SCI_MAX_PHYS; phy_id++) {
+ struct scu_afe_transceiver __iomem *xcvr = &afe->scu_afe_xcvr[phy_id];
+ const struct sci_phy_oem_params *oem_phy = &oem->phys[phy_id];
+ int cable_length_long =
+ is_long_cable(phy_id, cable_selection_mask);
+ int cable_length_medium =
+ is_medium_cable(phy_id, cable_selection_mask);
+
+ if (is_a2(pdev)) {
+ /* All defaults, except the Receive Word
+ * Alignament/Comma Detect Enable....(0xe800)
+ */
+ writel(0x00004512, &xcvr->afe_xcvr_control0);
+ udelay(AFE_REGISTER_WRITE_DELAY);
+
+ writel(0x0050100F, &xcvr->afe_xcvr_control1);
+ udelay(AFE_REGISTER_WRITE_DELAY);
+ } else if (is_b0(pdev)) {
+ /* Configure transmitter SSC parameters */
+ writel(0x00030000, &xcvr->afe_tx_ssc_control);
+ udelay(AFE_REGISTER_WRITE_DELAY);
+ } else if (is_c0(pdev)) {
+ /* Configure transmitter SSC parameters */
+ writel(0x00010202, &xcvr->afe_tx_ssc_control);
+ udelay(AFE_REGISTER_WRITE_DELAY);
+
+ /* All defaults, except the Receive Word
+ * Alignament/Comma Detect Enable....(0xe800)
+ */
+ writel(0x00014500, &xcvr->afe_xcvr_control0);
+ udelay(AFE_REGISTER_WRITE_DELAY);
+ } else if (is_c1(pdev)) {
+ /* Configure transmitter SSC parameters */
+ writel(0x00010202, &xcvr->afe_tx_ssc_control);
+ udelay(AFE_REGISTER_WRITE_DELAY);
+
+ /* All defaults, except the Receive Word
+ * Alignament/Comma Detect Enable....(0xe800)
+ */
+ writel(0x0001C500, &xcvr->afe_xcvr_control0);
+ udelay(AFE_REGISTER_WRITE_DELAY);
+ }
+
+ /* Power up TX and RX out from power down (PWRDNTX and
+ * PWRDNRX) & increase TX int & ext bias 20%....(0xe85c)
+ */
+ if (is_a2(pdev))
+ writel(0x000003F0, &xcvr->afe_channel_control);
+ else if (is_b0(pdev)) {
+ writel(0x000003D7, &xcvr->afe_channel_control);
+ udelay(AFE_REGISTER_WRITE_DELAY);
+
+ writel(0x000003D4, &xcvr->afe_channel_control);
+ } else if (is_c0(pdev)) {
+ writel(0x000001E7, &xcvr->afe_channel_control);
+ udelay(AFE_REGISTER_WRITE_DELAY);
+
+ writel(0x000001E4, &xcvr->afe_channel_control);
+ } else if (is_c1(pdev)) {
+ writel(cable_length_long ? 0x000002F7 : 0x000001F7,
+ &xcvr->afe_channel_control);
+ udelay(AFE_REGISTER_WRITE_DELAY);
+
+ writel(cable_length_long ? 0x000002F4 : 0x000001F4,
+ &xcvr->afe_channel_control);
+ }
+ udelay(AFE_REGISTER_WRITE_DELAY);
+
+ if (is_a2(pdev)) {
+ /* Enable TX equalization (0xe824) */
+ writel(0x00040000, &xcvr->afe_tx_control);
+ udelay(AFE_REGISTER_WRITE_DELAY);
+ }
+
+ if (is_a2(pdev) || is_b0(pdev))
+ /* RDPI=0x0(RX Power On), RXOOBDETPDNC=0x0,
+ * TPD=0x0(TX Power On), RDD=0x0(RX Detect
+ * Enabled) ....(0xe800)
+ */
+ writel(0x00004100, &xcvr->afe_xcvr_control0);
+ else if (is_c0(pdev))
+ writel(0x00014100, &xcvr->afe_xcvr_control0);
+ else if (is_c1(pdev))
+ writel(0x0001C100, &xcvr->afe_xcvr_control0);
+ udelay(AFE_REGISTER_WRITE_DELAY);
+
+ /* Leave DFE/FFE on */
+ if (is_a2(pdev))
+ writel(0x3F11103F, &xcvr->afe_rx_ssc_control0);
+ else if (is_b0(pdev)) {
+ writel(0x3F11103F, &xcvr->afe_rx_ssc_control0);
+ udelay(AFE_REGISTER_WRITE_DELAY);
+ /* Enable TX equalization (0xe824) */
+ writel(0x00040000, &xcvr->afe_tx_control);
+ } else if (is_c0(pdev)) {
+ writel(0x01400C0F, &xcvr->afe_rx_ssc_control1);
+ udelay(AFE_REGISTER_WRITE_DELAY);
+
+ writel(0x3F6F103F, &xcvr->afe_rx_ssc_control0);
+ udelay(AFE_REGISTER_WRITE_DELAY);
+
+ /* Enable TX equalization (0xe824) */
+ writel(0x00040000, &xcvr->afe_tx_control);
+ } else if (is_c1(pdev)) {
+ writel(cable_length_long ? 0x01500C0C :
+ cable_length_medium ? 0x01400C0D : 0x02400C0D,
+ &xcvr->afe_xcvr_control1);
+ udelay(AFE_REGISTER_WRITE_DELAY);
+
+ writel(0x000003E0, &xcvr->afe_dfx_rx_control1);
+ udelay(AFE_REGISTER_WRITE_DELAY);
+
+ writel(cable_length_long ? 0x33091C1F :
+ cable_length_medium ? 0x3315181F : 0x2B17161F,
+ &xcvr->afe_rx_ssc_control0);
+ udelay(AFE_REGISTER_WRITE_DELAY);
+
+ /* Enable TX equalization (0xe824) */
+ writel(0x00040000, &xcvr->afe_tx_control);
+ }
+
+ udelay(AFE_REGISTER_WRITE_DELAY);
+
+ writel(oem_phy->afe_tx_amp_control0, &xcvr->afe_tx_amp_control0);
+ udelay(AFE_REGISTER_WRITE_DELAY);
+
+ writel(oem_phy->afe_tx_amp_control1, &xcvr->afe_tx_amp_control1);
+ udelay(AFE_REGISTER_WRITE_DELAY);
+
+ writel(oem_phy->afe_tx_amp_control2, &xcvr->afe_tx_amp_control2);
+ udelay(AFE_REGISTER_WRITE_DELAY);
+
+ writel(oem_phy->afe_tx_amp_control3, &xcvr->afe_tx_amp_control3);
+ udelay(AFE_REGISTER_WRITE_DELAY);
+ }
+
+ /* Transfer control to the PEs */
+ writel(0x00010f00, &afe->afe_dfx_master_control0);
+ udelay(AFE_REGISTER_WRITE_DELAY);
+}
+
+static void sci_controller_initialize_power_control(struct isci_host *ihost)
+{
+ sci_init_timer(&ihost->power_control.timer, power_control_timeout);
+
+ memset(ihost->power_control.requesters, 0,
+ sizeof(ihost->power_control.requesters));
+
+ ihost->power_control.phys_waiting = 0;
+ ihost->power_control.phys_granted_power = 0;
+}
+
+static enum sci_status sci_controller_initialize(struct isci_host *ihost)
+{
+ struct sci_base_state_machine *sm = &ihost->sm;
+ enum sci_status result = SCI_FAILURE;
+ unsigned long i, state, val;
+
+ if (ihost->sm.current_state_id != SCIC_RESET) {
+ dev_warn(&ihost->pdev->dev, "%s invalid state: %d\n",
+ __func__, ihost->sm.current_state_id);
+ return SCI_FAILURE_INVALID_STATE;
+ }
+
+ sci_change_state(sm, SCIC_INITIALIZING);
+
+ sci_init_timer(&ihost->phy_timer, phy_startup_timeout);
+
+ ihost->next_phy_to_start = 0;
+ ihost->phy_startup_timer_pending = false;
+
+ sci_controller_initialize_power_control(ihost);
+
+ /*
+ * There is nothing to do here for B0 since we do not have to
+ * program the AFE registers.
+ * / @todo The AFE settings are supposed to be correct for the B0 but
+ * / presently they seem to be wrong. */
+ sci_controller_afe_initialization(ihost);
+
+
+ /* Take the hardware out of reset */
+ writel(0, &ihost->smu_registers->soft_reset_control);
+
+ /*
+ * / @todo Provide meaningfull error code for hardware failure
+ * result = SCI_FAILURE_CONTROLLER_HARDWARE; */
+ for (i = 100; i >= 1; i--) {
+ u32 status;
+
+ /* Loop until the hardware reports success */
+ udelay(SCU_CONTEXT_RAM_INIT_STALL_TIME);
+ status = readl(&ihost->smu_registers->control_status);
+
+ if ((status & SCU_RAM_INIT_COMPLETED) == SCU_RAM_INIT_COMPLETED)
+ break;
+ }
+ if (i == 0)
+ goto out;
+
+ /*
+ * Determine what are the actaul device capacities that the
+ * hardware will support */
+ val = readl(&ihost->smu_registers->device_context_capacity);
+
+ /* Record the smaller of the two capacity values */
+ ihost->logical_port_entries = min(smu_max_ports(val), SCI_MAX_PORTS);
+ ihost->task_context_entries = min(smu_max_task_contexts(val), SCI_MAX_IO_REQUESTS);
+ ihost->remote_node_entries = min(smu_max_rncs(val), SCI_MAX_REMOTE_DEVICES);
+
+ /*
+ * Make all PEs that are unassigned match up with the
+ * logical ports
+ */
+ for (i = 0; i < ihost->logical_port_entries; i++) {
+ struct scu_port_task_scheduler_group_registers __iomem
+ *ptsg = &ihost->scu_registers->peg0.ptsg;
+
+ writel(i, &ptsg->protocol_engine[i]);
+ }
+
+ /* Initialize hardware PCI Relaxed ordering in DMA engines */
+ val = readl(&ihost->scu_registers->sdma.pdma_configuration);
+ val |= SCU_PDMACR_GEN_BIT(PCI_RELAXED_ORDERING_ENABLE);
+ writel(val, &ihost->scu_registers->sdma.pdma_configuration);
+
+ val = readl(&ihost->scu_registers->sdma.cdma_configuration);
+ val |= SCU_CDMACR_GEN_BIT(PCI_RELAXED_ORDERING_ENABLE);
+ writel(val, &ihost->scu_registers->sdma.cdma_configuration);
+
+ /*
+ * Initialize the PHYs before the PORTs because the PHY registers
+ * are accessed during the port initialization.
+ */
+ for (i = 0; i < SCI_MAX_PHYS; i++) {
+ result = sci_phy_initialize(&ihost->phys[i],
+ &ihost->scu_registers->peg0.pe[i].tl,
+ &ihost->scu_registers->peg0.pe[i].ll);
+ if (result != SCI_SUCCESS)
+ goto out;
+ }
+
+ for (i = 0; i < ihost->logical_port_entries; i++) {
+ struct isci_port *iport = &ihost->ports[i];
+
+ iport->port_task_scheduler_registers = &ihost->scu_registers->peg0.ptsg.port[i];
+ iport->port_pe_configuration_register = &ihost->scu_registers->peg0.ptsg.protocol_engine[0];
+ iport->viit_registers = &ihost->scu_registers->peg0.viit[i];
+ }
+
+ result = sci_port_configuration_agent_initialize(ihost, &ihost->port_agent);
+
+ out:
+ /* Advance the controller state machine */
+ if (result == SCI_SUCCESS)
+ state = SCIC_INITIALIZED;
+ else
+ state = SCIC_FAILED;
+ sci_change_state(sm, state);
+
+ return result;
+}
+
+static int sci_controller_dma_alloc(struct isci_host *ihost)
+{
+ struct device *dev = &ihost->pdev->dev;
+ size_t size;
+ int i;
+
+ /* detect re-initialization */
+ if (ihost->completion_queue)
+ return 0;
+
+ size = SCU_MAX_COMPLETION_QUEUE_ENTRIES * sizeof(u32);
+ ihost->completion_queue = dmam_alloc_coherent(dev, size, &ihost->cq_dma,
+ GFP_KERNEL);
+ if (!ihost->completion_queue)
+ return -ENOMEM;
+
+ size = ihost->remote_node_entries * sizeof(union scu_remote_node_context);
+ ihost->remote_node_context_table = dmam_alloc_coherent(dev, size, &ihost->rnc_dma,
+ GFP_KERNEL);
+
+ if (!ihost->remote_node_context_table)
+ return -ENOMEM;
+
+ size = ihost->task_context_entries * sizeof(struct scu_task_context),
+ ihost->task_context_table = dmam_alloc_coherent(dev, size, &ihost->tc_dma,
+ GFP_KERNEL);
+ if (!ihost->task_context_table)
+ return -ENOMEM;
+
+ size = SCI_UFI_TOTAL_SIZE;
+ ihost->ufi_buf = dmam_alloc_coherent(dev, size, &ihost->ufi_dma, GFP_KERNEL);
+ if (!ihost->ufi_buf)
+ return -ENOMEM;
+
+ for (i = 0; i < SCI_MAX_IO_REQUESTS; i++) {
+ struct isci_request *ireq;
+ dma_addr_t dma;
+
+ ireq = dmam_alloc_coherent(dev, sizeof(*ireq), &dma, GFP_KERNEL);
+ if (!ireq)
+ return -ENOMEM;
+
+ ireq->tc = &ihost->task_context_table[i];
+ ireq->owning_controller = ihost;
+ ireq->request_daddr = dma;
+ ireq->isci_host = ihost;
+ ihost->reqs[i] = ireq;
+ }
+
+ return 0;
+}
+
+static int sci_controller_mem_init(struct isci_host *ihost)
+{
+ int err = sci_controller_dma_alloc(ihost);
+
+ if (err)
+ return err;
+
+ writel(lower_32_bits(ihost->cq_dma), &ihost->smu_registers->completion_queue_lower);
+ writel(upper_32_bits(ihost->cq_dma), &ihost->smu_registers->completion_queue_upper);
+
+ writel(lower_32_bits(ihost->rnc_dma), &ihost->smu_registers->remote_node_context_lower);
+ writel(upper_32_bits(ihost->rnc_dma), &ihost->smu_registers->remote_node_context_upper);
+
+ writel(lower_32_bits(ihost->tc_dma), &ihost->smu_registers->host_task_table_lower);
+ writel(upper_32_bits(ihost->tc_dma), &ihost->smu_registers->host_task_table_upper);
+
+ sci_unsolicited_frame_control_construct(ihost);
+
+ /*
+ * Inform the silicon as to the location of the UF headers and
+ * address table.
+ */
+ writel(lower_32_bits(ihost->uf_control.headers.physical_address),
+ &ihost->scu_registers->sdma.uf_header_base_address_lower);
+ writel(upper_32_bits(ihost->uf_control.headers.physical_address),
+ &ihost->scu_registers->sdma.uf_header_base_address_upper);
+
+ writel(lower_32_bits(ihost->uf_control.address_table.physical_address),
+ &ihost->scu_registers->sdma.uf_address_table_lower);
+ writel(upper_32_bits(ihost->uf_control.address_table.physical_address),
+ &ihost->scu_registers->sdma.uf_address_table_upper);
+
+ return 0;
+}
+
+/**
+ * isci_host_init - (re-)initialize hardware and internal (private) state
+ * @ihost: host to init
+ *
+ * Any public facing objects (like asd_sas_port, and asd_sas_phys), or
+ * one-time initialization objects like locks and waitqueues, are
+ * not touched (they are initialized in isci_host_alloc)
+ */
+int isci_host_init(struct isci_host *ihost)
+{
+ int i, err;
+ enum sci_status status;
+
+ spin_lock_irq(&ihost->scic_lock);
+ status = sci_controller_construct(ihost, scu_base(ihost), smu_base(ihost));
+ spin_unlock_irq(&ihost->scic_lock);
+ if (status != SCI_SUCCESS) {
+ dev_err(&ihost->pdev->dev,
+ "%s: sci_controller_construct failed - status = %x\n",
+ __func__,
+ status);
+ return -ENODEV;
+ }
+
+ spin_lock_irq(&ihost->scic_lock);
+ status = sci_controller_initialize(ihost);
+ spin_unlock_irq(&ihost->scic_lock);
+ if (status != SCI_SUCCESS) {
+ dev_warn(&ihost->pdev->dev,
+ "%s: sci_controller_initialize failed -"
+ " status = 0x%x\n",
+ __func__, status);
+ return -ENODEV;
+ }
+
+ err = sci_controller_mem_init(ihost);
+ if (err)
+ return err;
+
+ /* enable sgpio */
+ writel(1, &ihost->scu_registers->peg0.sgpio.interface_control);
+ for (i = 0; i < isci_gpio_count(ihost); i++)
+ writel(SGPIO_HW_CONTROL, &ihost->scu_registers->peg0.sgpio.output_data_select[i]);
+ writel(0, &ihost->scu_registers->peg0.sgpio.vendor_specific_code);
+
+ return 0;
+}
+
+void sci_controller_link_up(struct isci_host *ihost, struct isci_port *iport,
+ struct isci_phy *iphy)
+{
+ switch (ihost->sm.current_state_id) {
+ case SCIC_STARTING:
+ sci_del_timer(&ihost->phy_timer);
+ ihost->phy_startup_timer_pending = false;
+ ihost->port_agent.link_up_handler(ihost, &ihost->port_agent,
+ iport, iphy);
+ sci_controller_start_next_phy(ihost);
+ break;
+ case SCIC_READY:
+ ihost->port_agent.link_up_handler(ihost, &ihost->port_agent,
+ iport, iphy);
+ break;
+ default:
+ dev_dbg(&ihost->pdev->dev,
+ "%s: SCIC Controller linkup event from phy %d in "
+ "unexpected state %d\n", __func__, iphy->phy_index,
+ ihost->sm.current_state_id);
+ }
+}
+
+void sci_controller_link_down(struct isci_host *ihost, struct isci_port *iport,
+ struct isci_phy *iphy)
+{
+ switch (ihost->sm.current_state_id) {
+ case SCIC_STARTING:
+ case SCIC_READY:
+ ihost->port_agent.link_down_handler(ihost, &ihost->port_agent,
+ iport, iphy);
+ break;
+ default:
+ dev_dbg(&ihost->pdev->dev,
+ "%s: SCIC Controller linkdown event from phy %d in "
+ "unexpected state %d\n",
+ __func__,
+ iphy->phy_index,
+ ihost->sm.current_state_id);
+ }
+}
+
+bool sci_controller_has_remote_devices_stopping(struct isci_host *ihost)
+{
+ u32 index;
+
+ for (index = 0; index < ihost->remote_node_entries; index++) {
+ if ((ihost->device_table[index] != NULL) &&
+ (ihost->device_table[index]->sm.current_state_id == SCI_DEV_STOPPING))
+ return true;
+ }
+
+ return false;
+}
+
+void sci_controller_remote_device_stopped(struct isci_host *ihost,
+ struct isci_remote_device *idev)
+{
+ if (ihost->sm.current_state_id != SCIC_STOPPING) {
+ dev_dbg(&ihost->pdev->dev,
+ "SCIC Controller 0x%p remote device stopped event "
+ "from device 0x%p in unexpected state %d\n",
+ ihost, idev,
+ ihost->sm.current_state_id);
+ return;
+ }
+
+ if (!sci_controller_has_remote_devices_stopping(ihost))
+ isci_host_stop_complete(ihost);
+}
+
+void sci_controller_post_request(struct isci_host *ihost, u32 request)
+{
+ dev_dbg(&ihost->pdev->dev, "%s[%d]: %#x\n",
+ __func__, ihost->id, request);
+
+ writel(request, &ihost->smu_registers->post_context_port);
+}
+
+struct isci_request *sci_request_by_tag(struct isci_host *ihost, u16 io_tag)
+{
+ u16 task_index;
+ u16 task_sequence;
+
+ task_index = ISCI_TAG_TCI(io_tag);
+
+ if (task_index < ihost->task_context_entries) {
+ struct isci_request *ireq = ihost->reqs[task_index];
+
+ if (test_bit(IREQ_ACTIVE, &ireq->flags)) {
+ task_sequence = ISCI_TAG_SEQ(io_tag);
+
+ if (task_sequence == ihost->io_request_sequence[task_index])
+ return ireq;
+ }
+ }
+
+ return NULL;
+}
+
+/**
+ * This method allocates remote node index and the reserves the remote node
+ * context space for use. This method can fail if there are no more remote
+ * node index available.
+ * @scic: This is the controller object which contains the set of
+ * free remote node ids
+ * @sci_dev: This is the device object which is requesting the a remote node
+ * id
+ * @node_id: This is the remote node id that is assinged to the device if one
+ * is available
+ *
+ * enum sci_status SCI_FAILURE_OUT_OF_RESOURCES if there are no available remote
+ * node index available.
+ */
+enum sci_status sci_controller_allocate_remote_node_context(struct isci_host *ihost,
+ struct isci_remote_device *idev,
+ u16 *node_id)
+{
+ u16 node_index;
+ u32 remote_node_count = sci_remote_device_node_count(idev);
+
+ node_index = sci_remote_node_table_allocate_remote_node(
+ &ihost->available_remote_nodes, remote_node_count
+ );
+
+ if (node_index != SCIC_SDS_REMOTE_NODE_CONTEXT_INVALID_INDEX) {
+ ihost->device_table[node_index] = idev;
+
+ *node_id = node_index;
+
+ return SCI_SUCCESS;
+ }
+
+ return SCI_FAILURE_INSUFFICIENT_RESOURCES;
+}
+
+void sci_controller_free_remote_node_context(struct isci_host *ihost,
+ struct isci_remote_device *idev,
+ u16 node_id)
+{
+ u32 remote_node_count = sci_remote_device_node_count(idev);
+
+ if (ihost->device_table[node_id] == idev) {
+ ihost->device_table[node_id] = NULL;
+
+ sci_remote_node_table_release_remote_node_index(
+ &ihost->available_remote_nodes, remote_node_count, node_id
+ );
+ }
+}
+
+void sci_controller_copy_sata_response(void *response_buffer,
+ void *frame_header,
+ void *frame_buffer)
+{
+ /* XXX type safety? */
+ memcpy(response_buffer, frame_header, sizeof(u32));
+
+ memcpy(response_buffer + sizeof(u32),
+ frame_buffer,
+ sizeof(struct dev_to_host_fis) - sizeof(u32));
+}
+
+void sci_controller_release_frame(struct isci_host *ihost, u32 frame_index)
+{
+ if (sci_unsolicited_frame_control_release_frame(&ihost->uf_control, frame_index))
+ writel(ihost->uf_control.get,
+ &ihost->scu_registers->sdma.unsolicited_frame_get_pointer);
+}
+
+void isci_tci_free(struct isci_host *ihost, u16 tci)
+{
+ u16 tail = ihost->tci_tail & (SCI_MAX_IO_REQUESTS-1);
+
+ ihost->tci_pool[tail] = tci;
+ ihost->tci_tail = tail + 1;
+}
+
+static u16 isci_tci_alloc(struct isci_host *ihost)
+{
+ u16 head = ihost->tci_head & (SCI_MAX_IO_REQUESTS-1);
+ u16 tci = ihost->tci_pool[head];
+
+ ihost->tci_head = head + 1;
+ return tci;
+}
+
+static u16 isci_tci_space(struct isci_host *ihost)
+{
+ return CIRC_SPACE(ihost->tci_head, ihost->tci_tail, SCI_MAX_IO_REQUESTS);
+}
+
+u16 isci_alloc_tag(struct isci_host *ihost)
+{
+ if (isci_tci_space(ihost)) {
+ u16 tci = isci_tci_alloc(ihost);
+ u8 seq = ihost->io_request_sequence[tci];
+
+ return ISCI_TAG(seq, tci);
+ }
+
+ return SCI_CONTROLLER_INVALID_IO_TAG;
+}
+
+enum sci_status isci_free_tag(struct isci_host *ihost, u16 io_tag)
+{
+ u16 tci = ISCI_TAG_TCI(io_tag);
+ u16 seq = ISCI_TAG_SEQ(io_tag);
+
+ /* prevent tail from passing head */
+ if (isci_tci_active(ihost) == 0)
+ return SCI_FAILURE_INVALID_IO_TAG;
+
+ if (seq == ihost->io_request_sequence[tci]) {
+ ihost->io_request_sequence[tci] = (seq+1) & (SCI_MAX_SEQ-1);
+
+ isci_tci_free(ihost, tci);
+
+ return SCI_SUCCESS;
+ }
+ return SCI_FAILURE_INVALID_IO_TAG;
+}
+
+enum sci_status sci_controller_start_io(struct isci_host *ihost,
+ struct isci_remote_device *idev,
+ struct isci_request *ireq)
+{
+ enum sci_status status;
+
+ if (ihost->sm.current_state_id != SCIC_READY) {
+ dev_warn(&ihost->pdev->dev, "%s invalid state: %d\n",
+ __func__, ihost->sm.current_state_id);
+ return SCI_FAILURE_INVALID_STATE;
+ }
+
+ status = sci_remote_device_start_io(ihost, idev, ireq);
+ if (status != SCI_SUCCESS)
+ return status;
+
+ set_bit(IREQ_ACTIVE, &ireq->flags);
+ sci_controller_post_request(ihost, ireq->post_context);
+ return SCI_SUCCESS;
+}
+
+enum sci_status sci_controller_terminate_request(struct isci_host *ihost,
+ struct isci_remote_device *idev,
+ struct isci_request *ireq)
+{
+ /* terminate an ongoing (i.e. started) core IO request. This does not
+ * abort the IO request at the target, but rather removes the IO
+ * request from the host controller.
+ */
+ enum sci_status status;
+
+ if (ihost->sm.current_state_id != SCIC_READY) {
+ dev_warn(&ihost->pdev->dev, "%s invalid state: %d\n",
+ __func__, ihost->sm.current_state_id);
+ return SCI_FAILURE_INVALID_STATE;
+ }
+ status = sci_io_request_terminate(ireq);
+
+ dev_dbg(&ihost->pdev->dev, "%s: status=%d; ireq=%p; flags=%lx\n",
+ __func__, status, ireq, ireq->flags);
+
+ if ((status == SCI_SUCCESS) &&
+ !test_bit(IREQ_PENDING_ABORT, &ireq->flags) &&
+ !test_and_set_bit(IREQ_TC_ABORT_POSTED, &ireq->flags)) {
+ /* Utilize the original post context command and or in the
+ * POST_TC_ABORT request sub-type.
+ */
+ sci_controller_post_request(
+ ihost, ireq->post_context |
+ SCU_CONTEXT_COMMAND_REQUEST_POST_TC_ABORT);
+ }
+ return status;
+}
+
+/**
+ * sci_controller_complete_io() - This method will perform core specific
+ * completion operations for an IO request. After this method is invoked,
+ * the user should consider the IO request as invalid until it is properly
+ * reused (i.e. re-constructed).
+ * @ihost: The handle to the controller object for which to complete the
+ * IO request.
+ * @idev: The handle to the remote device object for which to complete
+ * the IO request.
+ * @ireq: the handle to the io request object to complete.
+ */
+enum sci_status sci_controller_complete_io(struct isci_host *ihost,
+ struct isci_remote_device *idev,
+ struct isci_request *ireq)
+{
+ enum sci_status status;
+ u16 index;
+
+ switch (ihost->sm.current_state_id) {
+ case SCIC_STOPPING:
+ /* XXX: Implement this function */
+ return SCI_FAILURE;
+ case SCIC_READY:
+ status = sci_remote_device_complete_io(ihost, idev, ireq);
+ if (status != SCI_SUCCESS)
+ return status;
+
+ index = ISCI_TAG_TCI(ireq->io_tag);
+ clear_bit(IREQ_ACTIVE, &ireq->flags);
+ return SCI_SUCCESS;
+ default:
+ dev_warn(&ihost->pdev->dev, "%s invalid state: %d\n",
+ __func__, ihost->sm.current_state_id);
+ return SCI_FAILURE_INVALID_STATE;
+ }
+
+}
+
+enum sci_status sci_controller_continue_io(struct isci_request *ireq)
+{
+ struct isci_host *ihost = ireq->owning_controller;
+
+ if (ihost->sm.current_state_id != SCIC_READY) {
+ dev_warn(&ihost->pdev->dev, "%s invalid state: %d\n",
+ __func__, ihost->sm.current_state_id);
+ return SCI_FAILURE_INVALID_STATE;
+ }
+
+ set_bit(IREQ_ACTIVE, &ireq->flags);
+ sci_controller_post_request(ihost, ireq->post_context);
+ return SCI_SUCCESS;
+}
+
+/**
+ * sci_controller_start_task() - This method is called by the SCIC user to
+ * send/start a framework task management request.
+ * @controller: the handle to the controller object for which to start the task
+ * management request.
+ * @remote_device: the handle to the remote device object for which to start
+ * the task management request.
+ * @task_request: the handle to the task request object to start.
+ */
+enum sci_task_status sci_controller_start_task(struct isci_host *ihost,
+ struct isci_remote_device *idev,
+ struct isci_request *ireq)
+{
+ enum sci_status status;
+
+ if (ihost->sm.current_state_id != SCIC_READY) {
+ dev_warn(&ihost->pdev->dev,
+ "%s: SCIC Controller starting task from invalid "
+ "state\n",
+ __func__);
+ return SCI_TASK_FAILURE_INVALID_STATE;
+ }
+
+ status = sci_remote_device_start_task(ihost, idev, ireq);
+ switch (status) {
+ case SCI_FAILURE_RESET_DEVICE_PARTIAL_SUCCESS:
+ set_bit(IREQ_ACTIVE, &ireq->flags);
+
+ /*
+ * We will let framework know this task request started successfully,
+ * although core is still woring on starting the request (to post tc when
+ * RNC is resumed.)
+ */
+ return SCI_SUCCESS;
+ case SCI_SUCCESS:
+ set_bit(IREQ_ACTIVE, &ireq->flags);
+ sci_controller_post_request(ihost, ireq->post_context);
+ break;
+ default:
+ break;
+ }
+
+ return status;
+}
+
+static int sci_write_gpio_tx_gp(struct isci_host *ihost, u8 reg_index, u8 reg_count, u8 *write_data)
+{
+ int d;
+
+ /* no support for TX_GP_CFG */
+ if (reg_index == 0)
+ return -EINVAL;
+
+ for (d = 0; d < isci_gpio_count(ihost); d++) {
+ u32 val = 0x444; /* all ODx.n clear */
+ int i;
+
+ for (i = 0; i < 3; i++) {
+ int bit = (i << 2) + 2;
+
+ bit = try_test_sas_gpio_gp_bit(to_sas_gpio_od(d, i),
+ write_data, reg_index,
+ reg_count);
+ if (bit < 0)
+ break;
+
+ /* if od is set, clear the 'invert' bit */
+ val &= ~(bit << ((i << 2) + 2));
+ }
+
+ if (i < 3)
+ break;
+ writel(val, &ihost->scu_registers->peg0.sgpio.output_data_select[d]);
+ }
+
+ /* unless reg_index is > 1, we should always be able to write at
+ * least one register
+ */
+ return d > 0;
+}
+
+int isci_gpio_write(struct sas_ha_struct *sas_ha, u8 reg_type, u8 reg_index,
+ u8 reg_count, u8 *write_data)
+{
+ struct isci_host *ihost = sas_ha->lldd_ha;
+ int written;
+
+ switch (reg_type) {
+ case SAS_GPIO_REG_TX_GP:
+ written = sci_write_gpio_tx_gp(ihost, reg_index, reg_count, write_data);
+ break;
+ default:
+ written = -EINVAL;
+ }
+
+ return written;
+}
diff --git a/drivers/scsi/isci/host.h b/drivers/scsi/isci/host.h
new file mode 100644
index 000000000..22a9bb1ab
--- /dev/null
+++ b/drivers/scsi/isci/host.h
@@ -0,0 +1,517 @@
+/*
+ * This file is provided under a dual BSD/GPLv2 license. When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ * The full GNU General Public License is included in this distribution
+ * in the file called LICENSE.GPL.
+ *
+ * BSD LICENSE
+ *
+ * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+#ifndef _SCI_HOST_H_
+#define _SCI_HOST_H_
+
+#include <scsi/sas_ata.h>
+#include "remote_device.h"
+#include "phy.h"
+#include "isci.h"
+#include "remote_node_table.h"
+#include "registers.h"
+#include "unsolicited_frame_control.h"
+#include "probe_roms.h"
+
+struct isci_request;
+struct scu_task_context;
+
+
+/**
+ * struct sci_power_control -
+ *
+ * This structure defines the fields for managing power control for direct
+ * attached disk devices.
+ */
+struct sci_power_control {
+ /**
+ * This field is set when the power control timer is running and cleared when
+ * it is not.
+ */
+ bool timer_started;
+
+ /**
+ * Timer to control when the directed attached disks can consume power.
+ */
+ struct sci_timer timer;
+
+ /**
+ * This field is used to keep track of how many phys are put into the
+ * requesters field.
+ */
+ u8 phys_waiting;
+
+ /**
+ * This field is used to keep track of how many phys have been granted to consume power
+ */
+ u8 phys_granted_power;
+
+ /**
+ * This field is an array of phys that we are waiting on. The phys are direct
+ * mapped into requesters via struct sci_phy.phy_index
+ */
+ struct isci_phy *requesters[SCI_MAX_PHYS];
+
+};
+
+struct sci_port_configuration_agent;
+typedef void (*port_config_fn)(struct isci_host *,
+ struct sci_port_configuration_agent *,
+ struct isci_port *, struct isci_phy *);
+bool is_port_config_apc(struct isci_host *ihost);
+bool is_controller_start_complete(struct isci_host *ihost);
+
+struct sci_port_configuration_agent {
+ u16 phy_configured_mask;
+ u16 phy_ready_mask;
+ struct {
+ u8 min_index;
+ u8 max_index;
+ } phy_valid_port_range[SCI_MAX_PHYS];
+ bool timer_pending;
+ port_config_fn link_up_handler;
+ port_config_fn link_down_handler;
+ struct sci_timer timer;
+};
+
+/**
+ * isci_host - primary host/controller object
+ * @timer: timeout start/stop operations
+ * @device_table: rni (hw remote node index) to remote device lookup table
+ * @available_remote_nodes: rni allocator
+ * @power_control: manage device spin up
+ * @io_request_sequence: generation number for tci's (task contexts)
+ * @task_context_table: hw task context table
+ * @remote_node_context_table: hw remote node context table
+ * @completion_queue: hw-producer driver-consumer communication ring
+ * @completion_queue_get: tracks the driver 'head' of the ring to notify hw
+ * @logical_port_entries: min({driver|silicon}-supported-port-count)
+ * @remote_node_entries: min({driver|silicon}-supported-node-count)
+ * @task_context_entries: min({driver|silicon}-supported-task-count)
+ * @phy_timer: phy startup timer
+ * @invalid_phy_mask: if an invalid_link_up notification is reported a bit for
+ * the phy index is set so further notifications are not
+ * made. Once the phy reports link up and is made part of a
+ * port then this bit is cleared.
+
+ */
+struct isci_host {
+ struct sci_base_state_machine sm;
+ /* XXX can we time this externally */
+ struct sci_timer timer;
+ /* XXX drop reference module params directly */
+ struct sci_user_parameters user_parameters;
+ /* XXX no need to be a union */
+ struct sci_oem_params oem_parameters;
+ struct sci_port_configuration_agent port_agent;
+ struct isci_remote_device *device_table[SCI_MAX_REMOTE_DEVICES];
+ struct sci_remote_node_table available_remote_nodes;
+ struct sci_power_control power_control;
+ u8 io_request_sequence[SCI_MAX_IO_REQUESTS];
+ struct scu_task_context *task_context_table;
+ dma_addr_t tc_dma;
+ union scu_remote_node_context *remote_node_context_table;
+ dma_addr_t rnc_dma;
+ u32 *completion_queue;
+ dma_addr_t cq_dma;
+ u32 completion_queue_get;
+ u32 logical_port_entries;
+ u32 remote_node_entries;
+ u32 task_context_entries;
+ void *ufi_buf;
+ dma_addr_t ufi_dma;
+ struct sci_unsolicited_frame_control uf_control;
+
+ /* phy startup */
+ struct sci_timer phy_timer;
+ /* XXX kill */
+ bool phy_startup_timer_pending;
+ u32 next_phy_to_start;
+ /* XXX convert to unsigned long and use bitops */
+ u8 invalid_phy_mask;
+
+ /* TODO attempt dynamic interrupt coalescing scheme */
+ u16 interrupt_coalesce_number;
+ u32 interrupt_coalesce_timeout;
+ struct smu_registers __iomem *smu_registers;
+ struct scu_registers __iomem *scu_registers;
+
+ u16 tci_head;
+ u16 tci_tail;
+ u16 tci_pool[SCI_MAX_IO_REQUESTS];
+
+ int id; /* unique within a given pci device */
+ struct isci_phy phys[SCI_MAX_PHYS];
+ struct isci_port ports[SCI_MAX_PORTS + 1]; /* includes dummy port */
+ struct asd_sas_port sas_ports[SCI_MAX_PORTS];
+ struct sas_ha_struct sas_ha;
+
+ struct pci_dev *pdev;
+ #define IHOST_START_PENDING 0
+ #define IHOST_STOP_PENDING 1
+ #define IHOST_IRQ_ENABLED 2
+ unsigned long flags;
+ wait_queue_head_t eventq;
+ struct tasklet_struct completion_tasklet;
+ spinlock_t scic_lock;
+ struct isci_request *reqs[SCI_MAX_IO_REQUESTS];
+ struct isci_remote_device devices[SCI_MAX_REMOTE_DEVICES];
+};
+
+/**
+ * enum sci_controller_states - This enumeration depicts all the states
+ * for the common controller state machine.
+ */
+enum sci_controller_states {
+ /**
+ * Simply the initial state for the base controller state machine.
+ */
+ SCIC_INITIAL = 0,
+
+ /**
+ * This state indicates that the controller is reset. The memory for
+ * the controller is in it's initial state, but the controller requires
+ * initialization.
+ * This state is entered from the INITIAL state.
+ * This state is entered from the RESETTING state.
+ */
+ SCIC_RESET,
+
+ /**
+ * This state is typically an action state that indicates the controller
+ * is in the process of initialization. In this state no new IO operations
+ * are permitted.
+ * This state is entered from the RESET state.
+ */
+ SCIC_INITIALIZING,
+
+ /**
+ * This state indicates that the controller has been successfully
+ * initialized. In this state no new IO operations are permitted.
+ * This state is entered from the INITIALIZING state.
+ */
+ SCIC_INITIALIZED,
+
+ /**
+ * This state indicates the the controller is in the process of becoming
+ * ready (i.e. starting). In this state no new IO operations are permitted.
+ * This state is entered from the INITIALIZED state.
+ */
+ SCIC_STARTING,
+
+ /**
+ * This state indicates the controller is now ready. Thus, the user
+ * is able to perform IO operations on the controller.
+ * This state is entered from the STARTING state.
+ */
+ SCIC_READY,
+
+ /**
+ * This state is typically an action state that indicates the controller
+ * is in the process of resetting. Thus, the user is unable to perform
+ * IO operations on the controller. A reset is considered destructive in
+ * most cases.
+ * This state is entered from the READY state.
+ * This state is entered from the FAILED state.
+ * This state is entered from the STOPPED state.
+ */
+ SCIC_RESETTING,
+
+ /**
+ * This state indicates that the controller is in the process of stopping.
+ * In this state no new IO operations are permitted, but existing IO
+ * operations are allowed to complete.
+ * This state is entered from the READY state.
+ */
+ SCIC_STOPPING,
+
+ /**
+ * This state indicates that the controller could not successfully be
+ * initialized. In this state no new IO operations are permitted.
+ * This state is entered from the INITIALIZING state.
+ * This state is entered from the STARTING state.
+ * This state is entered from the STOPPING state.
+ * This state is entered from the RESETTING state.
+ */
+ SCIC_FAILED,
+};
+
+/**
+ * struct isci_pci_info - This class represents the pci function containing the
+ * controllers. Depending on PCI SKU, there could be up to 2 controllers in
+ * the PCI function.
+ */
+#define SCI_MAX_MSIX_INT (SCI_NUM_MSI_X_INT*SCI_MAX_CONTROLLERS)
+
+struct isci_pci_info {
+ struct msix_entry msix_entries[SCI_MAX_MSIX_INT];
+ struct isci_host *hosts[SCI_MAX_CONTROLLERS];
+ struct isci_orom *orom;
+};
+
+static inline struct isci_pci_info *to_pci_info(struct pci_dev *pdev)
+{
+ return pci_get_drvdata(pdev);
+}
+
+static inline struct Scsi_Host *to_shost(struct isci_host *ihost)
+{
+ return ihost->sas_ha.core.shost;
+}
+
+#define for_each_isci_host(id, ihost, pdev) \
+ for (id = 0; id < SCI_MAX_CONTROLLERS && \
+ (ihost = to_pci_info(pdev)->hosts[id]); id++)
+
+static inline void wait_for_start(struct isci_host *ihost)
+{
+ wait_event(ihost->eventq, !test_bit(IHOST_START_PENDING, &ihost->flags));
+}
+
+static inline void wait_for_stop(struct isci_host *ihost)
+{
+ wait_event(ihost->eventq, !test_bit(IHOST_STOP_PENDING, &ihost->flags));
+}
+
+static inline void wait_for_device_start(struct isci_host *ihost, struct isci_remote_device *idev)
+{
+ wait_event(ihost->eventq, !test_bit(IDEV_START_PENDING, &idev->flags));
+}
+
+static inline void wait_for_device_stop(struct isci_host *ihost, struct isci_remote_device *idev)
+{
+ wait_event(ihost->eventq, !test_bit(IDEV_STOP_PENDING, &idev->flags));
+}
+
+static inline struct isci_host *dev_to_ihost(struct domain_device *dev)
+{
+ return dev->port->ha->lldd_ha;
+}
+
+static inline struct isci_host *idev_to_ihost(struct isci_remote_device *idev)
+{
+ return dev_to_ihost(idev->domain_dev);
+}
+
+/* we always use protocol engine group zero */
+#define ISCI_PEG 0
+
+/* see sci_controller_io_tag_allocate|free for how seq and tci are built */
+#define ISCI_TAG(seq, tci) (((u16) (seq)) << 12 | tci)
+
+/* these are returned by the hardware, so sanitize them */
+#define ISCI_TAG_SEQ(tag) (((tag) >> 12) & (SCI_MAX_SEQ-1))
+#define ISCI_TAG_TCI(tag) ((tag) & (SCI_MAX_IO_REQUESTS-1))
+
+/* interrupt coalescing baseline: 9 == 3 to 5us interrupt delay per command */
+#define ISCI_COALESCE_BASE 9
+
+/* expander attached sata devices require 3 rnc slots */
+static inline int sci_remote_device_node_count(struct isci_remote_device *idev)
+{
+ struct domain_device *dev = idev->domain_dev;
+
+ if (dev_is_sata(dev) && dev->parent)
+ return SCU_STP_REMOTE_NODE_COUNT;
+ return SCU_SSP_REMOTE_NODE_COUNT;
+}
+
+/**
+ * sci_controller_clear_invalid_phy() -
+ *
+ * This macro will clear the bit in the invalid phy mask for this controller
+ * object. This is used to control messages reported for invalid link up
+ * notifications.
+ */
+#define sci_controller_clear_invalid_phy(controller, phy) \
+ ((controller)->invalid_phy_mask &= ~(1 << (phy)->phy_index))
+
+static inline struct device *scirdev_to_dev(struct isci_remote_device *idev)
+{
+ if (!idev || !idev->isci_port || !idev->isci_port->isci_host)
+ return NULL;
+
+ return &idev->isci_port->isci_host->pdev->dev;
+}
+
+static inline bool is_a2(struct pci_dev *pdev)
+{
+ if (pdev->revision < 4)
+ return true;
+ return false;
+}
+
+static inline bool is_b0(struct pci_dev *pdev)
+{
+ if (pdev->revision == 4)
+ return true;
+ return false;
+}
+
+static inline bool is_c0(struct pci_dev *pdev)
+{
+ if (pdev->revision == 5)
+ return true;
+ return false;
+}
+
+static inline bool is_c1(struct pci_dev *pdev)
+{
+ if (pdev->revision >= 6)
+ return true;
+ return false;
+}
+
+enum cable_selections {
+ short_cable = 0,
+ long_cable = 1,
+ medium_cable = 2,
+ undefined_cable = 3
+};
+
+#define CABLE_OVERRIDE_DISABLED (0x10000)
+
+static inline int is_cable_select_overridden(void)
+{
+ return cable_selection_override < CABLE_OVERRIDE_DISABLED;
+}
+
+enum cable_selections decode_cable_selection(struct isci_host *ihost, int phy);
+void validate_cable_selections(struct isci_host *ihost);
+char *lookup_cable_names(enum cable_selections);
+
+/* set hw control for 'activity', even though active enclosures seem to drive
+ * the activity led on their own. Skip setting FSENG control on 'status' due
+ * to unexpected operation and 'error' due to not being a supported automatic
+ * FSENG output
+ */
+#define SGPIO_HW_CONTROL 0x00000443
+
+static inline int isci_gpio_count(struct isci_host *ihost)
+{
+ return ARRAY_SIZE(ihost->scu_registers->peg0.sgpio.output_data_select);
+}
+
+void sci_controller_post_request(struct isci_host *ihost,
+ u32 request);
+void sci_controller_release_frame(struct isci_host *ihost,
+ u32 frame_index);
+void sci_controller_copy_sata_response(void *response_buffer,
+ void *frame_header,
+ void *frame_buffer);
+enum sci_status sci_controller_allocate_remote_node_context(struct isci_host *ihost,
+ struct isci_remote_device *idev,
+ u16 *node_id);
+void sci_controller_free_remote_node_context(
+ struct isci_host *ihost,
+ struct isci_remote_device *idev,
+ u16 node_id);
+
+struct isci_request *sci_request_by_tag(struct isci_host *ihost, u16 io_tag);
+void sci_controller_power_control_queue_insert(struct isci_host *ihost,
+ struct isci_phy *iphy);
+void sci_controller_power_control_queue_remove(struct isci_host *ihost,
+ struct isci_phy *iphy);
+void sci_controller_link_up(struct isci_host *ihost, struct isci_port *iport,
+ struct isci_phy *iphy);
+void sci_controller_link_down(struct isci_host *ihost, struct isci_port *iport,
+ struct isci_phy *iphy);
+void sci_controller_remote_device_stopped(struct isci_host *ihost,
+ struct isci_remote_device *idev);
+
+enum sci_status sci_controller_continue_io(struct isci_request *ireq);
+int isci_host_scan_finished(struct Scsi_Host *, unsigned long);
+void isci_host_start(struct Scsi_Host *);
+u16 isci_alloc_tag(struct isci_host *ihost);
+enum sci_status isci_free_tag(struct isci_host *ihost, u16 io_tag);
+void isci_tci_free(struct isci_host *ihost, u16 tci);
+void ireq_done(struct isci_host *ihost, struct isci_request *ireq, struct sas_task *task);
+
+int isci_host_init(struct isci_host *);
+void isci_host_completion_routine(unsigned long data);
+void isci_host_deinit(struct isci_host *);
+void sci_controller_disable_interrupts(struct isci_host *ihost);
+bool sci_controller_has_remote_devices_stopping(struct isci_host *ihost);
+void sci_controller_transition_to_ready(struct isci_host *ihost, enum sci_status status);
+
+enum sci_status sci_controller_start_io(
+ struct isci_host *ihost,
+ struct isci_remote_device *idev,
+ struct isci_request *ireq);
+
+enum sci_task_status sci_controller_start_task(
+ struct isci_host *ihost,
+ struct isci_remote_device *idev,
+ struct isci_request *ireq);
+
+enum sci_status sci_controller_terminate_request(
+ struct isci_host *ihost,
+ struct isci_remote_device *idev,
+ struct isci_request *ireq);
+
+enum sci_status sci_controller_complete_io(
+ struct isci_host *ihost,
+ struct isci_remote_device *idev,
+ struct isci_request *ireq);
+
+void sci_port_configuration_agent_construct(
+ struct sci_port_configuration_agent *port_agent);
+
+enum sci_status sci_port_configuration_agent_initialize(
+ struct isci_host *ihost,
+ struct sci_port_configuration_agent *port_agent);
+
+int isci_gpio_write(struct sas_ha_struct *, u8 reg_type, u8 reg_index,
+ u8 reg_count, u8 *write_data);
+#endif
diff --git a/drivers/scsi/isci/init.c b/drivers/scsi/isci/init.c
new file mode 100644
index 000000000..cd41b63a2
--- /dev/null
+++ b/drivers/scsi/isci/init.c
@@ -0,0 +1,811 @@
+/*
+ * This file is provided under a dual BSD/GPLv2 license. When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ * The full GNU General Public License is included in this distribution
+ * in the file called LICENSE.GPL.
+ *
+ * BSD LICENSE
+ *
+ * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/firmware.h>
+#include <linux/efi.h>
+#include <asm/string.h>
+#include <scsi/scsi_host.h>
+#include "host.h"
+#include "isci.h"
+#include "task.h"
+#include "probe_roms.h"
+
+#define MAJ 1
+#define MIN 2
+#define BUILD 0
+#define DRV_VERSION __stringify(MAJ) "." __stringify(MIN) "." \
+ __stringify(BUILD)
+
+MODULE_VERSION(DRV_VERSION);
+
+static struct scsi_transport_template *isci_transport_template;
+
+static const struct pci_device_id isci_id_table[] = {
+ { PCI_VDEVICE(INTEL, 0x1D61),},
+ { PCI_VDEVICE(INTEL, 0x1D63),},
+ { PCI_VDEVICE(INTEL, 0x1D65),},
+ { PCI_VDEVICE(INTEL, 0x1D67),},
+ { PCI_VDEVICE(INTEL, 0x1D69),},
+ { PCI_VDEVICE(INTEL, 0x1D6B),},
+ { PCI_VDEVICE(INTEL, 0x1D60),},
+ { PCI_VDEVICE(INTEL, 0x1D62),},
+ { PCI_VDEVICE(INTEL, 0x1D64),},
+ { PCI_VDEVICE(INTEL, 0x1D66),},
+ { PCI_VDEVICE(INTEL, 0x1D68),},
+ { PCI_VDEVICE(INTEL, 0x1D6A),},
+ {}
+};
+
+MODULE_DEVICE_TABLE(pci, isci_id_table);
+
+/* linux isci specific settings */
+
+unsigned char no_outbound_task_to = 2;
+module_param(no_outbound_task_to, byte, 0);
+MODULE_PARM_DESC(no_outbound_task_to, "No Outbound Task Timeout (1us incr)");
+
+u16 ssp_max_occ_to = 20;
+module_param(ssp_max_occ_to, ushort, 0);
+MODULE_PARM_DESC(ssp_max_occ_to, "SSP Max occupancy timeout (100us incr)");
+
+u16 stp_max_occ_to = 5;
+module_param(stp_max_occ_to, ushort, 0);
+MODULE_PARM_DESC(stp_max_occ_to, "STP Max occupancy timeout (100us incr)");
+
+u16 ssp_inactive_to = 5;
+module_param(ssp_inactive_to, ushort, 0);
+MODULE_PARM_DESC(ssp_inactive_to, "SSP inactivity timeout (100us incr)");
+
+u16 stp_inactive_to = 5;
+module_param(stp_inactive_to, ushort, 0);
+MODULE_PARM_DESC(stp_inactive_to, "STP inactivity timeout (100us incr)");
+
+unsigned char phy_gen = SCIC_SDS_PARM_GEN2_SPEED;
+module_param(phy_gen, byte, 0);
+MODULE_PARM_DESC(phy_gen, "PHY generation (1: 1.5Gbps 2: 3.0Gbps 3: 6.0Gbps)");
+
+unsigned char max_concurr_spinup;
+module_param(max_concurr_spinup, byte, 0);
+MODULE_PARM_DESC(max_concurr_spinup, "Max concurrent device spinup");
+
+uint cable_selection_override = CABLE_OVERRIDE_DISABLED;
+module_param(cable_selection_override, uint, 0);
+
+MODULE_PARM_DESC(cable_selection_override,
+ "This field indicates length of the SAS/SATA cable between "
+ "host and device. If any bits > 15 are set (default) "
+ "indicates \"use platform defaults\"");
+
+static ssize_t isci_show_id(struct device *dev, struct device_attribute *attr, char *buf)
+{
+ struct Scsi_Host *shost = container_of(dev, typeof(*shost), shost_dev);
+ struct sas_ha_struct *sas_ha = SHOST_TO_SAS_HA(shost);
+ struct isci_host *ihost = container_of(sas_ha, typeof(*ihost), sas_ha);
+
+ return snprintf(buf, PAGE_SIZE, "%d\n", ihost->id);
+}
+
+static DEVICE_ATTR(isci_id, S_IRUGO, isci_show_id, NULL);
+
+struct device_attribute *isci_host_attrs[] = {
+ &dev_attr_isci_id,
+ NULL
+};
+
+static struct scsi_host_template isci_sht = {
+
+ .module = THIS_MODULE,
+ .name = DRV_NAME,
+ .proc_name = DRV_NAME,
+ .queuecommand = sas_queuecommand,
+ .target_alloc = sas_target_alloc,
+ .slave_configure = sas_slave_configure,
+ .scan_finished = isci_host_scan_finished,
+ .scan_start = isci_host_start,
+ .change_queue_depth = sas_change_queue_depth,
+ .bios_param = sas_bios_param,
+ .can_queue = ISCI_CAN_QUEUE_VAL,
+ .cmd_per_lun = 1,
+ .this_id = -1,
+ .sg_tablesize = SG_ALL,
+ .max_sectors = SCSI_DEFAULT_MAX_SECTORS,
+ .use_clustering = ENABLE_CLUSTERING,
+ .eh_abort_handler = sas_eh_abort_handler,
+ .eh_device_reset_handler = sas_eh_device_reset_handler,
+ .eh_bus_reset_handler = sas_eh_bus_reset_handler,
+ .target_destroy = sas_target_destroy,
+ .ioctl = sas_ioctl,
+ .shost_attrs = isci_host_attrs,
+ .use_blk_tags = 1,
+ .track_queue_depth = 1,
+};
+
+static struct sas_domain_function_template isci_transport_ops = {
+
+ /* The class calls these to notify the LLDD of an event. */
+ .lldd_port_formed = isci_port_formed,
+ .lldd_port_deformed = isci_port_deformed,
+
+ /* The class calls these when a device is found or gone. */
+ .lldd_dev_found = isci_remote_device_found,
+ .lldd_dev_gone = isci_remote_device_gone,
+
+ .lldd_execute_task = isci_task_execute_task,
+ /* Task Management Functions. Must be called from process context. */
+ .lldd_abort_task = isci_task_abort_task,
+ .lldd_abort_task_set = isci_task_abort_task_set,
+ .lldd_clear_aca = isci_task_clear_aca,
+ .lldd_clear_task_set = isci_task_clear_task_set,
+ .lldd_I_T_nexus_reset = isci_task_I_T_nexus_reset,
+ .lldd_lu_reset = isci_task_lu_reset,
+ .lldd_query_task = isci_task_query_task,
+
+ /* ata recovery called from ata-eh */
+ .lldd_ata_check_ready = isci_ata_check_ready,
+
+ /* Port and Adapter management */
+ .lldd_clear_nexus_port = isci_task_clear_nexus_port,
+ .lldd_clear_nexus_ha = isci_task_clear_nexus_ha,
+
+ /* Phy management */
+ .lldd_control_phy = isci_phy_control,
+
+ /* GPIO support */
+ .lldd_write_gpio = isci_gpio_write,
+};
+
+
+/******************************************************************************
+* P R O T E C T E D M E T H O D S
+******************************************************************************/
+
+
+
+/**
+ * isci_register_sas_ha() - This method initializes various lldd
+ * specific members of the sas_ha struct and calls the libsas
+ * sas_register_ha() function.
+ * @isci_host: This parameter specifies the lldd specific wrapper for the
+ * libsas sas_ha struct.
+ *
+ * This method returns an error code indicating success or failure. The user
+ * should check for possible memory allocation error return otherwise, a zero
+ * indicates success.
+ */
+static int isci_register_sas_ha(struct isci_host *isci_host)
+{
+ int i;
+ struct sas_ha_struct *sas_ha = &(isci_host->sas_ha);
+ struct asd_sas_phy **sas_phys;
+ struct asd_sas_port **sas_ports;
+
+ sas_phys = devm_kzalloc(&isci_host->pdev->dev,
+ SCI_MAX_PHYS * sizeof(void *),
+ GFP_KERNEL);
+ if (!sas_phys)
+ return -ENOMEM;
+
+ sas_ports = devm_kzalloc(&isci_host->pdev->dev,
+ SCI_MAX_PORTS * sizeof(void *),
+ GFP_KERNEL);
+ if (!sas_ports)
+ return -ENOMEM;
+
+ sas_ha->sas_ha_name = DRV_NAME;
+ sas_ha->lldd_module = THIS_MODULE;
+ sas_ha->sas_addr = &isci_host->phys[0].sas_addr[0];
+
+ for (i = 0; i < SCI_MAX_PHYS; i++) {
+ sas_phys[i] = &isci_host->phys[i].sas_phy;
+ sas_ports[i] = &isci_host->sas_ports[i];
+ }
+
+ sas_ha->sas_phy = sas_phys;
+ sas_ha->sas_port = sas_ports;
+ sas_ha->num_phys = SCI_MAX_PHYS;
+
+ sas_ha->strict_wide_ports = 1;
+
+ sas_register_ha(sas_ha);
+
+ return 0;
+}
+
+static void isci_unregister(struct isci_host *isci_host)
+{
+ struct Scsi_Host *shost;
+
+ if (!isci_host)
+ return;
+
+ sas_unregister_ha(&isci_host->sas_ha);
+
+ shost = to_shost(isci_host);
+ sas_remove_host(shost);
+ scsi_remove_host(shost);
+ scsi_host_put(shost);
+}
+
+static int isci_pci_init(struct pci_dev *pdev)
+{
+ int err, bar_num, bar_mask = 0;
+ void __iomem * const *iomap;
+
+ err = pcim_enable_device(pdev);
+ if (err) {
+ dev_err(&pdev->dev,
+ "failed enable PCI device %s!\n",
+ pci_name(pdev));
+ return err;
+ }
+
+ for (bar_num = 0; bar_num < SCI_PCI_BAR_COUNT; bar_num++)
+ bar_mask |= 1 << (bar_num * 2);
+
+ err = pcim_iomap_regions(pdev, bar_mask, DRV_NAME);
+ if (err)
+ return err;
+
+ iomap = pcim_iomap_table(pdev);
+ if (!iomap)
+ return -ENOMEM;
+
+ pci_set_master(pdev);
+
+ err = pci_set_dma_mask(pdev, DMA_BIT_MASK(64));
+ if (err) {
+ err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
+ if (err)
+ return err;
+ }
+
+ err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
+ if (err) {
+ err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
+ if (err)
+ return err;
+ }
+
+ return 0;
+}
+
+static int num_controllers(struct pci_dev *pdev)
+{
+ /* bar size alone can tell us if we are running with a dual controller
+ * part, no need to trust revision ids that might be under broken firmware
+ * control
+ */
+ resource_size_t scu_bar_size = pci_resource_len(pdev, SCI_SCU_BAR*2);
+ resource_size_t smu_bar_size = pci_resource_len(pdev, SCI_SMU_BAR*2);
+
+ if (scu_bar_size >= SCI_SCU_BAR_SIZE*SCI_MAX_CONTROLLERS &&
+ smu_bar_size >= SCI_SMU_BAR_SIZE*SCI_MAX_CONTROLLERS)
+ return SCI_MAX_CONTROLLERS;
+ else
+ return 1;
+}
+
+static int isci_setup_interrupts(struct pci_dev *pdev)
+{
+ int err, i, num_msix;
+ struct isci_host *ihost;
+ struct isci_pci_info *pci_info = to_pci_info(pdev);
+
+ /*
+ * Determine the number of vectors associated with this
+ * PCI function.
+ */
+ num_msix = num_controllers(pdev) * SCI_NUM_MSI_X_INT;
+
+ for (i = 0; i < num_msix; i++)
+ pci_info->msix_entries[i].entry = i;
+
+ err = pci_enable_msix_exact(pdev, pci_info->msix_entries, num_msix);
+ if (err)
+ goto intx;
+
+ for (i = 0; i < num_msix; i++) {
+ int id = i / SCI_NUM_MSI_X_INT;
+ struct msix_entry *msix = &pci_info->msix_entries[i];
+ irq_handler_t isr;
+
+ ihost = pci_info->hosts[id];
+ /* odd numbered vectors are error interrupts */
+ if (i & 1)
+ isr = isci_error_isr;
+ else
+ isr = isci_msix_isr;
+
+ err = devm_request_irq(&pdev->dev, msix->vector, isr, 0,
+ DRV_NAME"-msix", ihost);
+ if (!err)
+ continue;
+
+ dev_info(&pdev->dev, "msix setup failed falling back to intx\n");
+ while (i--) {
+ id = i / SCI_NUM_MSI_X_INT;
+ ihost = pci_info->hosts[id];
+ msix = &pci_info->msix_entries[i];
+ devm_free_irq(&pdev->dev, msix->vector, ihost);
+ }
+ pci_disable_msix(pdev);
+ goto intx;
+ }
+ return 0;
+
+ intx:
+ for_each_isci_host(i, ihost, pdev) {
+ err = devm_request_irq(&pdev->dev, pdev->irq, isci_intx_isr,
+ IRQF_SHARED, DRV_NAME"-intx", ihost);
+ if (err)
+ break;
+ }
+ return err;
+}
+
+static void isci_user_parameters_get(struct sci_user_parameters *u)
+{
+ int i;
+
+ for (i = 0; i < SCI_MAX_PHYS; i++) {
+ struct sci_phy_user_params *u_phy = &u->phys[i];
+
+ u_phy->max_speed_generation = phy_gen;
+
+ /* we are not exporting these for now */
+ u_phy->align_insertion_frequency = 0x7f;
+ u_phy->in_connection_align_insertion_frequency = 0xff;
+ u_phy->notify_enable_spin_up_insertion_frequency = 0x33;
+ }
+
+ u->stp_inactivity_timeout = stp_inactive_to;
+ u->ssp_inactivity_timeout = ssp_inactive_to;
+ u->stp_max_occupancy_timeout = stp_max_occ_to;
+ u->ssp_max_occupancy_timeout = ssp_max_occ_to;
+ u->no_outbound_task_timeout = no_outbound_task_to;
+ u->max_concurr_spinup = max_concurr_spinup;
+}
+
+static enum sci_status sci_user_parameters_set(struct isci_host *ihost,
+ struct sci_user_parameters *sci_parms)
+{
+ u16 index;
+
+ /*
+ * Validate the user parameters. If they are not legal, then
+ * return a failure.
+ */
+ for (index = 0; index < SCI_MAX_PHYS; index++) {
+ struct sci_phy_user_params *u;
+
+ u = &sci_parms->phys[index];
+
+ if (!((u->max_speed_generation <= SCIC_SDS_PARM_MAX_SPEED) &&
+ (u->max_speed_generation > SCIC_SDS_PARM_NO_SPEED)))
+ return SCI_FAILURE_INVALID_PARAMETER_VALUE;
+
+ if (u->in_connection_align_insertion_frequency < 3)
+ return SCI_FAILURE_INVALID_PARAMETER_VALUE;
+
+ if ((u->in_connection_align_insertion_frequency < 3) ||
+ (u->align_insertion_frequency == 0) ||
+ (u->notify_enable_spin_up_insertion_frequency == 0))
+ return SCI_FAILURE_INVALID_PARAMETER_VALUE;
+ }
+
+ if ((sci_parms->stp_inactivity_timeout == 0) ||
+ (sci_parms->ssp_inactivity_timeout == 0) ||
+ (sci_parms->stp_max_occupancy_timeout == 0) ||
+ (sci_parms->ssp_max_occupancy_timeout == 0) ||
+ (sci_parms->no_outbound_task_timeout == 0))
+ return SCI_FAILURE_INVALID_PARAMETER_VALUE;
+
+ memcpy(&ihost->user_parameters, sci_parms, sizeof(*sci_parms));
+
+ return SCI_SUCCESS;
+}
+
+static void sci_oem_defaults(struct isci_host *ihost)
+{
+ /* these defaults are overridden by the platform / firmware */
+ struct sci_user_parameters *user = &ihost->user_parameters;
+ struct sci_oem_params *oem = &ihost->oem_parameters;
+ int i;
+
+ /* Default to APC mode. */
+ oem->controller.mode_type = SCIC_PORT_AUTOMATIC_CONFIGURATION_MODE;
+
+ /* Default to APC mode. */
+ oem->controller.max_concurr_spin_up = 1;
+
+ /* Default to no SSC operation. */
+ oem->controller.do_enable_ssc = false;
+
+ /* Default to short cables on all phys. */
+ oem->controller.cable_selection_mask = 0;
+
+ /* Initialize all of the port parameter information to narrow ports. */
+ for (i = 0; i < SCI_MAX_PORTS; i++)
+ oem->ports[i].phy_mask = 0;
+
+ /* Initialize all of the phy parameter information. */
+ for (i = 0; i < SCI_MAX_PHYS; i++) {
+ /* Default to 3G (i.e. Gen 2). */
+ user->phys[i].max_speed_generation = SCIC_SDS_PARM_GEN2_SPEED;
+
+ /* the frequencies cannot be 0 */
+ user->phys[i].align_insertion_frequency = 0x7f;
+ user->phys[i].in_connection_align_insertion_frequency = 0xff;
+ user->phys[i].notify_enable_spin_up_insertion_frequency = 0x33;
+
+ /* Previous Vitesse based expanders had a arbitration issue that
+ * is worked around by having the upper 32-bits of SAS address
+ * with a value greater then the Vitesse company identifier.
+ * Hence, usage of 0x5FCFFFFF.
+ */
+ oem->phys[i].sas_address.low = 0x1 + ihost->id;
+ oem->phys[i].sas_address.high = 0x5FCFFFFF;
+ }
+
+ user->stp_inactivity_timeout = 5;
+ user->ssp_inactivity_timeout = 5;
+ user->stp_max_occupancy_timeout = 5;
+ user->ssp_max_occupancy_timeout = 20;
+ user->no_outbound_task_timeout = 2;
+}
+
+static struct isci_host *isci_host_alloc(struct pci_dev *pdev, int id)
+{
+ struct isci_orom *orom = to_pci_info(pdev)->orom;
+ struct sci_user_parameters sci_user_params;
+ u8 oem_version = ISCI_ROM_VER_1_0;
+ struct isci_host *ihost;
+ struct Scsi_Host *shost;
+ int err, i;
+
+ ihost = devm_kzalloc(&pdev->dev, sizeof(*ihost), GFP_KERNEL);
+ if (!ihost)
+ return NULL;
+
+ ihost->pdev = pdev;
+ ihost->id = id;
+ spin_lock_init(&ihost->scic_lock);
+ init_waitqueue_head(&ihost->eventq);
+ ihost->sas_ha.dev = &ihost->pdev->dev;
+ ihost->sas_ha.lldd_ha = ihost;
+ tasklet_init(&ihost->completion_tasklet,
+ isci_host_completion_routine, (unsigned long)ihost);
+
+ /* validate module parameters */
+ /* TODO: kill struct sci_user_parameters and reference directly */
+ sci_oem_defaults(ihost);
+ isci_user_parameters_get(&sci_user_params);
+ if (sci_user_parameters_set(ihost, &sci_user_params)) {
+ dev_warn(&pdev->dev,
+ "%s: sci_user_parameters_set failed\n", __func__);
+ return NULL;
+ }
+
+ /* sanity check platform (or 'firmware') oem parameters */
+ if (orom) {
+ if (id < 0 || id >= SCI_MAX_CONTROLLERS || id > orom->hdr.num_elements) {
+ dev_warn(&pdev->dev, "parsing firmware oem parameters failed\n");
+ return NULL;
+ }
+ ihost->oem_parameters = orom->ctrl[id];
+ oem_version = orom->hdr.version;
+ }
+
+ /* validate oem parameters (platform, firmware, or built-in defaults) */
+ if (sci_oem_parameters_validate(&ihost->oem_parameters, oem_version)) {
+ dev_warn(&pdev->dev, "oem parameter validation failed\n");
+ return NULL;
+ }
+
+ for (i = 0; i < SCI_MAX_PORTS; i++) {
+ struct isci_port *iport = &ihost->ports[i];
+
+ INIT_LIST_HEAD(&iport->remote_dev_list);
+ iport->isci_host = ihost;
+ }
+
+ for (i = 0; i < SCI_MAX_PHYS; i++)
+ isci_phy_init(&ihost->phys[i], ihost, i);
+
+ for (i = 0; i < SCI_MAX_REMOTE_DEVICES; i++) {
+ struct isci_remote_device *idev = &ihost->devices[i];
+
+ INIT_LIST_HEAD(&idev->node);
+ }
+
+ shost = scsi_host_alloc(&isci_sht, sizeof(void *));
+ if (!shost)
+ return NULL;
+
+ dev_info(&pdev->dev, "%sSCU controller %d: phy 3-0 cables: "
+ "{%s, %s, %s, %s}\n",
+ (is_cable_select_overridden() ? "* " : ""), ihost->id,
+ lookup_cable_names(decode_cable_selection(ihost, 3)),
+ lookup_cable_names(decode_cable_selection(ihost, 2)),
+ lookup_cable_names(decode_cable_selection(ihost, 1)),
+ lookup_cable_names(decode_cable_selection(ihost, 0)));
+
+ err = isci_host_init(ihost);
+ if (err)
+ goto err_shost;
+
+ SHOST_TO_SAS_HA(shost) = &ihost->sas_ha;
+ ihost->sas_ha.core.shost = shost;
+ shost->transportt = isci_transport_template;
+
+ shost->max_id = ~0;
+ shost->max_lun = ~0;
+ shost->max_cmd_len = MAX_COMMAND_SIZE;
+
+ err = scsi_add_host(shost, &pdev->dev);
+ if (err)
+ goto err_shost;
+
+ err = isci_register_sas_ha(ihost);
+ if (err)
+ goto err_shost_remove;
+
+ return ihost;
+
+ err_shost_remove:
+ scsi_remove_host(shost);
+ err_shost:
+ scsi_host_put(shost);
+
+ return NULL;
+}
+
+static int isci_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
+{
+ struct isci_pci_info *pci_info;
+ int err, i;
+ struct isci_host *isci_host;
+ const struct firmware *fw = NULL;
+ struct isci_orom *orom = NULL;
+ char *source = "(platform)";
+
+ dev_info(&pdev->dev, "driver configured for rev: %d silicon\n",
+ pdev->revision);
+
+ pci_info = devm_kzalloc(&pdev->dev, sizeof(*pci_info), GFP_KERNEL);
+ if (!pci_info)
+ return -ENOMEM;
+ pci_set_drvdata(pdev, pci_info);
+
+ if (efi_enabled(EFI_RUNTIME_SERVICES))
+ orom = isci_get_efi_var(pdev);
+
+ if (!orom)
+ orom = isci_request_oprom(pdev);
+
+ for (i = 0; orom && i < num_controllers(pdev); i++) {
+ if (sci_oem_parameters_validate(&orom->ctrl[i],
+ orom->hdr.version)) {
+ dev_warn(&pdev->dev,
+ "[%d]: invalid oem parameters detected, falling back to firmware\n", i);
+ orom = NULL;
+ break;
+ }
+ }
+
+ if (!orom) {
+ source = "(firmware)";
+ orom = isci_request_firmware(pdev, fw);
+ if (!orom) {
+ /* TODO convert this to WARN_TAINT_ONCE once the
+ * orom/efi parameter support is widely available
+ */
+ dev_warn(&pdev->dev,
+ "Loading user firmware failed, using default "
+ "values\n");
+ dev_warn(&pdev->dev,
+ "Default OEM configuration being used: 4 "
+ "narrow ports, and default SAS Addresses\n");
+ }
+ }
+
+ if (orom)
+ dev_info(&pdev->dev,
+ "OEM SAS parameters (version: %u.%u) loaded %s\n",
+ (orom->hdr.version & 0xf0) >> 4,
+ (orom->hdr.version & 0xf), source);
+
+ pci_info->orom = orom;
+
+ err = isci_pci_init(pdev);
+ if (err)
+ return err;
+
+ for (i = 0; i < num_controllers(pdev); i++) {
+ struct isci_host *h = isci_host_alloc(pdev, i);
+
+ if (!h) {
+ err = -ENOMEM;
+ goto err_host_alloc;
+ }
+ pci_info->hosts[i] = h;
+
+ /* turn on DIF support */
+ scsi_host_set_prot(to_shost(h),
+ SHOST_DIF_TYPE1_PROTECTION |
+ SHOST_DIF_TYPE2_PROTECTION |
+ SHOST_DIF_TYPE3_PROTECTION);
+ scsi_host_set_guard(to_shost(h), SHOST_DIX_GUARD_CRC);
+ }
+
+ err = isci_setup_interrupts(pdev);
+ if (err)
+ goto err_host_alloc;
+
+ for_each_isci_host(i, isci_host, pdev)
+ scsi_scan_host(to_shost(isci_host));
+
+ return 0;
+
+ err_host_alloc:
+ for_each_isci_host(i, isci_host, pdev)
+ isci_unregister(isci_host);
+ return err;
+}
+
+static void isci_pci_remove(struct pci_dev *pdev)
+{
+ struct isci_host *ihost;
+ int i;
+
+ for_each_isci_host(i, ihost, pdev) {
+ wait_for_start(ihost);
+ isci_unregister(ihost);
+ isci_host_deinit(ihost);
+ }
+}
+
+#ifdef CONFIG_PM_SLEEP
+static int isci_suspend(struct device *dev)
+{
+ struct pci_dev *pdev = to_pci_dev(dev);
+ struct isci_host *ihost;
+ int i;
+
+ for_each_isci_host(i, ihost, pdev) {
+ sas_suspend_ha(&ihost->sas_ha);
+ isci_host_deinit(ihost);
+ }
+
+ pci_save_state(pdev);
+ pci_disable_device(pdev);
+ pci_set_power_state(pdev, PCI_D3hot);
+
+ return 0;
+}
+
+static int isci_resume(struct device *dev)
+{
+ struct pci_dev *pdev = to_pci_dev(dev);
+ struct isci_host *ihost;
+ int rc, i;
+
+ pci_set_power_state(pdev, PCI_D0);
+ pci_restore_state(pdev);
+
+ rc = pcim_enable_device(pdev);
+ if (rc) {
+ dev_err(&pdev->dev,
+ "enabling device failure after resume(%d)\n", rc);
+ return rc;
+ }
+
+ pci_set_master(pdev);
+
+ for_each_isci_host(i, ihost, pdev) {
+ sas_prep_resume_ha(&ihost->sas_ha);
+
+ isci_host_init(ihost);
+ isci_host_start(ihost->sas_ha.core.shost);
+ wait_for_start(ihost);
+
+ sas_resume_ha(&ihost->sas_ha);
+ }
+
+ return 0;
+}
+#endif
+
+static SIMPLE_DEV_PM_OPS(isci_pm_ops, isci_suspend, isci_resume);
+
+static struct pci_driver isci_pci_driver = {
+ .name = DRV_NAME,
+ .id_table = isci_id_table,
+ .probe = isci_pci_probe,
+ .remove = isci_pci_remove,
+ .driver.pm = &isci_pm_ops,
+};
+
+static __init int isci_init(void)
+{
+ int err;
+
+ pr_info("%s: Intel(R) C600 SAS Controller Driver - version %s\n",
+ DRV_NAME, DRV_VERSION);
+
+ isci_transport_template = sas_domain_attach_transport(&isci_transport_ops);
+ if (!isci_transport_template)
+ return -ENOMEM;
+
+ err = pci_register_driver(&isci_pci_driver);
+ if (err)
+ sas_release_transport(isci_transport_template);
+
+ return err;
+}
+
+static __exit void isci_exit(void)
+{
+ pci_unregister_driver(&isci_pci_driver);
+ sas_release_transport(isci_transport_template);
+}
+
+MODULE_LICENSE("Dual BSD/GPL");
+MODULE_FIRMWARE(ISCI_FW_NAME);
+module_init(isci_init);
+module_exit(isci_exit);
diff --git a/drivers/scsi/isci/isci.h b/drivers/scsi/isci/isci.h
new file mode 100644
index 000000000..234ab46fc
--- /dev/null
+++ b/drivers/scsi/isci/isci.h
@@ -0,0 +1,539 @@
+/*
+ * This file is provided under a dual BSD/GPLv2 license. When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ * The full GNU General Public License is included in this distribution
+ * in the file called LICENSE.GPL.
+ *
+ * BSD LICENSE
+ *
+ * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef __ISCI_H__
+#define __ISCI_H__
+
+#include <linux/interrupt.h>
+#include <linux/types.h>
+
+#define DRV_NAME "isci"
+#define SCI_PCI_BAR_COUNT 2
+#define SCI_NUM_MSI_X_INT 2
+#define SCI_SMU_BAR 0
+#define SCI_SMU_BAR_SIZE (16*1024)
+#define SCI_SCU_BAR 1
+#define SCI_SCU_BAR_SIZE (4*1024*1024)
+#define SCI_IO_SPACE_BAR0 2
+#define SCI_IO_SPACE_BAR1 3
+#define ISCI_CAN_QUEUE_VAL 250 /* < SCI_MAX_IO_REQUESTS ? */
+#define SCIC_CONTROLLER_STOP_TIMEOUT 5000
+
+#define SCI_CONTROLLER_INVALID_IO_TAG 0xFFFF
+
+#define SCI_MAX_PHYS (4UL)
+#define SCI_MAX_PORTS SCI_MAX_PHYS
+#define SCI_MAX_SMP_PHYS (384) /* not silicon constrained */
+#define SCI_MAX_REMOTE_DEVICES (256UL)
+#define SCI_MAX_IO_REQUESTS (256UL)
+#define SCI_MAX_SEQ (16)
+#define SCI_MAX_MSIX_MESSAGES (2)
+#define SCI_MAX_SCATTER_GATHER_ELEMENTS 130 /* not silicon constrained */
+#define SCI_MAX_CONTROLLERS 2
+#define SCI_MAX_DOMAINS SCI_MAX_PORTS
+
+#define SCU_MAX_CRITICAL_NOTIFICATIONS (384)
+#define SCU_MAX_EVENTS_SHIFT (7)
+#define SCU_MAX_EVENTS (1 << SCU_MAX_EVENTS_SHIFT)
+#define SCU_MAX_UNSOLICITED_FRAMES (128)
+#define SCU_MAX_COMPLETION_QUEUE_SCRATCH (128)
+#define SCU_MAX_COMPLETION_QUEUE_ENTRIES (SCU_MAX_CRITICAL_NOTIFICATIONS \
+ + SCU_MAX_EVENTS \
+ + SCU_MAX_UNSOLICITED_FRAMES \
+ + SCI_MAX_IO_REQUESTS \
+ + SCU_MAX_COMPLETION_QUEUE_SCRATCH)
+#define SCU_MAX_COMPLETION_QUEUE_SHIFT (ilog2(SCU_MAX_COMPLETION_QUEUE_ENTRIES))
+
+#define SCU_ABSOLUTE_MAX_UNSOLICITED_FRAMES (4096)
+#define SCU_UNSOLICITED_FRAME_BUFFER_SIZE (1024U)
+#define SCU_INVALID_FRAME_INDEX (0xFFFF)
+
+#define SCU_IO_REQUEST_MAX_SGE_SIZE (0x00FFFFFF)
+#define SCU_IO_REQUEST_MAX_TRANSFER_LENGTH (0x00FFFFFF)
+
+static inline void check_sizes(void)
+{
+ BUILD_BUG_ON_NOT_POWER_OF_2(SCU_MAX_EVENTS);
+ BUILD_BUG_ON(SCU_MAX_UNSOLICITED_FRAMES <= 8);
+ BUILD_BUG_ON_NOT_POWER_OF_2(SCU_MAX_UNSOLICITED_FRAMES);
+ BUILD_BUG_ON_NOT_POWER_OF_2(SCU_MAX_COMPLETION_QUEUE_ENTRIES);
+ BUILD_BUG_ON(SCU_MAX_UNSOLICITED_FRAMES > SCU_ABSOLUTE_MAX_UNSOLICITED_FRAMES);
+ BUILD_BUG_ON_NOT_POWER_OF_2(SCI_MAX_IO_REQUESTS);
+ BUILD_BUG_ON_NOT_POWER_OF_2(SCI_MAX_SEQ);
+}
+
+/**
+ * enum sci_status - This is the general return status enumeration for non-IO,
+ * non-task management related SCI interface methods.
+ *
+ *
+ */
+enum sci_status {
+ /**
+ * This member indicates successful completion.
+ */
+ SCI_SUCCESS = 0,
+
+ /**
+ * This value indicates that the calling method completed successfully,
+ * but that the IO may have completed before having it's start method
+ * invoked. This occurs during SAT translation for requests that do
+ * not require an IO to the target or for any other requests that may
+ * be completed without having to submit IO.
+ */
+ SCI_SUCCESS_IO_COMPLETE_BEFORE_START,
+
+ /**
+ * This Value indicates that the SCU hardware returned an early response
+ * because the io request specified more data than is returned by the
+ * target device (mode pages, inquiry data, etc.). The completion routine
+ * will handle this case to get the actual number of bytes transferred.
+ */
+ SCI_SUCCESS_IO_DONE_EARLY,
+
+ /**
+ * This member indicates that the object for which a state change is
+ * being requested is already in said state.
+ */
+ SCI_WARNING_ALREADY_IN_STATE,
+
+ /**
+ * This member indicates interrupt coalescence timer may cause SAS
+ * specification compliance issues (i.e. SMP target mode response
+ * frames must be returned within 1.9 milliseconds).
+ */
+ SCI_WARNING_TIMER_CONFLICT,
+
+ /**
+ * This field indicates a sequence of action is not completed yet. Mostly,
+ * this status is used when multiple ATA commands are needed in a SATI translation.
+ */
+ SCI_WARNING_SEQUENCE_INCOMPLETE,
+
+ /**
+ * This member indicates that there was a general failure.
+ */
+ SCI_FAILURE,
+
+ /**
+ * This member indicates that the SCI implementation is unable to complete
+ * an operation due to a critical flaw the prevents any further operation
+ * (i.e. an invalid pointer).
+ */
+ SCI_FATAL_ERROR,
+
+ /**
+ * This member indicates the calling function failed, because the state
+ * of the controller is in a state that prevents successful completion.
+ */
+ SCI_FAILURE_INVALID_STATE,
+
+ /**
+ * This member indicates the calling function failed, because there is
+ * insufficient resources/memory to complete the request.
+ */
+ SCI_FAILURE_INSUFFICIENT_RESOURCES,
+
+ /**
+ * This member indicates the calling function failed, because the
+ * controller object required for the operation can't be located.
+ */
+ SCI_FAILURE_CONTROLLER_NOT_FOUND,
+
+ /**
+ * This member indicates the calling function failed, because the
+ * discovered controller type is not supported by the library.
+ */
+ SCI_FAILURE_UNSUPPORTED_CONTROLLER_TYPE,
+
+ /**
+ * This member indicates the calling function failed, because the
+ * requested initialization data version isn't supported.
+ */
+ SCI_FAILURE_UNSUPPORTED_INIT_DATA_VERSION,
+
+ /**
+ * This member indicates the calling function failed, because the
+ * requested configuration of SAS Phys into SAS Ports is not supported.
+ */
+ SCI_FAILURE_UNSUPPORTED_PORT_CONFIGURATION,
+
+ /**
+ * This member indicates the calling function failed, because the
+ * requested protocol is not supported by the remote device, port,
+ * or controller.
+ */
+ SCI_FAILURE_UNSUPPORTED_PROTOCOL,
+
+ /**
+ * This member indicates the calling function failed, because the
+ * requested information type is not supported by the SCI implementation.
+ */
+ SCI_FAILURE_UNSUPPORTED_INFORMATION_TYPE,
+
+ /**
+ * This member indicates the calling function failed, because the
+ * device already exists.
+ */
+ SCI_FAILURE_DEVICE_EXISTS,
+
+ /**
+ * This member indicates the calling function failed, because adding
+ * a phy to the object is not possible.
+ */
+ SCI_FAILURE_ADDING_PHY_UNSUPPORTED,
+
+ /**
+ * This member indicates the calling function failed, because the
+ * requested information type is not supported by the SCI implementation.
+ */
+ SCI_FAILURE_UNSUPPORTED_INFORMATION_FIELD,
+
+ /**
+ * This member indicates the calling function failed, because the SCI
+ * implementation does not support the supplied time limit.
+ */
+ SCI_FAILURE_UNSUPPORTED_TIME_LIMIT,
+
+ /**
+ * This member indicates the calling method failed, because the SCI
+ * implementation does not contain the specified Phy.
+ */
+ SCI_FAILURE_INVALID_PHY,
+
+ /**
+ * This member indicates the calling method failed, because the SCI
+ * implementation does not contain the specified Port.
+ */
+ SCI_FAILURE_INVALID_PORT,
+
+ /**
+ * This member indicates the calling method was partly successful
+ * The port was reset but not all phys in port are operational
+ */
+ SCI_FAILURE_RESET_PORT_PARTIAL_SUCCESS,
+
+ /**
+ * This member indicates that calling method failed
+ * The port reset did not complete because none of the phys are operational
+ */
+ SCI_FAILURE_RESET_PORT_FAILURE,
+
+ /**
+ * This member indicates the calling method failed, because the SCI
+ * implementation does not contain the specified remote device.
+ */
+ SCI_FAILURE_INVALID_REMOTE_DEVICE,
+
+ /**
+ * This member indicates the calling method failed, because the remote
+ * device is in a bad state and requires a reset.
+ */
+ SCI_FAILURE_REMOTE_DEVICE_RESET_REQUIRED,
+
+ /**
+ * This member indicates the calling method failed, because the SCI
+ * implementation does not contain or support the specified IO tag.
+ */
+ SCI_FAILURE_INVALID_IO_TAG,
+
+ /**
+ * This member indicates that the operation failed and the user should
+ * check the response data associated with the IO.
+ */
+ SCI_FAILURE_IO_RESPONSE_VALID,
+
+ /**
+ * This member indicates that the operation failed, the failure is
+ * controller implementation specific, and the response data associated
+ * with the request is not valid. You can query for the controller
+ * specific error information via sci_controller_get_request_status()
+ */
+ SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR,
+
+ /**
+ * This member indicated that the operation failed because the
+ * user requested this IO to be terminated.
+ */
+ SCI_FAILURE_IO_TERMINATED,
+
+ /**
+ * This member indicates that the operation failed and the associated
+ * request requires a SCSI abort task to be sent to the target.
+ */
+ SCI_FAILURE_IO_REQUIRES_SCSI_ABORT,
+
+ /**
+ * This member indicates that the operation failed because the supplied
+ * device could not be located.
+ */
+ SCI_FAILURE_DEVICE_NOT_FOUND,
+
+ /**
+ * This member indicates that the operation failed because the
+ * objects association is required and is not correctly set.
+ */
+ SCI_FAILURE_INVALID_ASSOCIATION,
+
+ /**
+ * This member indicates that the operation failed, because a timeout
+ * occurred.
+ */
+ SCI_FAILURE_TIMEOUT,
+
+ /**
+ * This member indicates that the operation failed, because the user
+ * specified a value that is either invalid or not supported.
+ */
+ SCI_FAILURE_INVALID_PARAMETER_VALUE,
+
+ /**
+ * This value indicates that the operation failed, because the number
+ * of messages (MSI-X) is not supported.
+ */
+ SCI_FAILURE_UNSUPPORTED_MESSAGE_COUNT,
+
+ /**
+ * This value indicates that the method failed due to a lack of
+ * available NCQ tags.
+ */
+ SCI_FAILURE_NO_NCQ_TAG_AVAILABLE,
+
+ /**
+ * This value indicates that a protocol violation has occurred on the
+ * link.
+ */
+ SCI_FAILURE_PROTOCOL_VIOLATION,
+
+ /**
+ * This value indicates a failure condition that retry may help to clear.
+ */
+ SCI_FAILURE_RETRY_REQUIRED,
+
+ /**
+ * This field indicates the retry limit was reached when a retry is attempted
+ */
+ SCI_FAILURE_RETRY_LIMIT_REACHED,
+
+ /**
+ * This member indicates the calling method was partly successful.
+ * Mostly, this status is used when a LUN_RESET issued to an expander attached
+ * STP device in READY NCQ substate needs to have RNC suspended/resumed
+ * before posting TC.
+ */
+ SCI_FAILURE_RESET_DEVICE_PARTIAL_SUCCESS,
+
+ /**
+ * This field indicates an illegal phy connection based on the routing attribute
+ * of both expander phy attached to each other.
+ */
+ SCI_FAILURE_ILLEGAL_ROUTING_ATTRIBUTE_CONFIGURATION,
+
+ /**
+ * This field indicates a CONFIG ROUTE INFO command has a response with function result
+ * INDEX DOES NOT EXIST, usually means exceeding max route index.
+ */
+ SCI_FAILURE_EXCEED_MAX_ROUTE_INDEX,
+
+ /**
+ * This value indicates that an unsupported PCI device ID has been
+ * specified. This indicates that attempts to invoke
+ * sci_library_allocate_controller() will fail.
+ */
+ SCI_FAILURE_UNSUPPORTED_PCI_DEVICE_ID
+
+};
+
+/**
+ * enum sci_io_status - This enumeration depicts all of the possible IO
+ * completion status values. Each value in this enumeration maps directly
+ * to a value in the enum sci_status enumeration. Please refer to that
+ * enumeration for detailed comments concerning what the status represents.
+ *
+ * Add the API to retrieve the SCU status from the core. Check to see that the
+ * following status are properly handled: - SCI_IO_FAILURE_UNSUPPORTED_PROTOCOL
+ * - SCI_IO_FAILURE_INVALID_IO_TAG
+ */
+enum sci_io_status {
+ SCI_IO_SUCCESS = SCI_SUCCESS,
+ SCI_IO_FAILURE = SCI_FAILURE,
+ SCI_IO_SUCCESS_COMPLETE_BEFORE_START = SCI_SUCCESS_IO_COMPLETE_BEFORE_START,
+ SCI_IO_SUCCESS_IO_DONE_EARLY = SCI_SUCCESS_IO_DONE_EARLY,
+ SCI_IO_FAILURE_INVALID_STATE = SCI_FAILURE_INVALID_STATE,
+ SCI_IO_FAILURE_INSUFFICIENT_RESOURCES = SCI_FAILURE_INSUFFICIENT_RESOURCES,
+ SCI_IO_FAILURE_UNSUPPORTED_PROTOCOL = SCI_FAILURE_UNSUPPORTED_PROTOCOL,
+ SCI_IO_FAILURE_RESPONSE_VALID = SCI_FAILURE_IO_RESPONSE_VALID,
+ SCI_IO_FAILURE_CONTROLLER_SPECIFIC_ERR = SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR,
+ SCI_IO_FAILURE_TERMINATED = SCI_FAILURE_IO_TERMINATED,
+ SCI_IO_FAILURE_REQUIRES_SCSI_ABORT = SCI_FAILURE_IO_REQUIRES_SCSI_ABORT,
+ SCI_IO_FAILURE_INVALID_PARAMETER_VALUE = SCI_FAILURE_INVALID_PARAMETER_VALUE,
+ SCI_IO_FAILURE_NO_NCQ_TAG_AVAILABLE = SCI_FAILURE_NO_NCQ_TAG_AVAILABLE,
+ SCI_IO_FAILURE_PROTOCOL_VIOLATION = SCI_FAILURE_PROTOCOL_VIOLATION,
+
+ SCI_IO_FAILURE_REMOTE_DEVICE_RESET_REQUIRED = SCI_FAILURE_REMOTE_DEVICE_RESET_REQUIRED,
+
+ SCI_IO_FAILURE_RETRY_REQUIRED = SCI_FAILURE_RETRY_REQUIRED,
+ SCI_IO_FAILURE_RETRY_LIMIT_REACHED = SCI_FAILURE_RETRY_LIMIT_REACHED,
+ SCI_IO_FAILURE_INVALID_REMOTE_DEVICE = SCI_FAILURE_INVALID_REMOTE_DEVICE
+};
+
+/**
+ * enum sci_task_status - This enumeration depicts all of the possible task
+ * completion status values. Each value in this enumeration maps directly
+ * to a value in the enum sci_status enumeration. Please refer to that
+ * enumeration for detailed comments concerning what the status represents.
+ *
+ * Check to see that the following status are properly handled:
+ */
+enum sci_task_status {
+ SCI_TASK_SUCCESS = SCI_SUCCESS,
+ SCI_TASK_FAILURE = SCI_FAILURE,
+ SCI_TASK_FAILURE_INVALID_STATE = SCI_FAILURE_INVALID_STATE,
+ SCI_TASK_FAILURE_INSUFFICIENT_RESOURCES = SCI_FAILURE_INSUFFICIENT_RESOURCES,
+ SCI_TASK_FAILURE_UNSUPPORTED_PROTOCOL = SCI_FAILURE_UNSUPPORTED_PROTOCOL,
+ SCI_TASK_FAILURE_INVALID_TAG = SCI_FAILURE_INVALID_IO_TAG,
+ SCI_TASK_FAILURE_RESPONSE_VALID = SCI_FAILURE_IO_RESPONSE_VALID,
+ SCI_TASK_FAILURE_CONTROLLER_SPECIFIC_ERR = SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR,
+ SCI_TASK_FAILURE_TERMINATED = SCI_FAILURE_IO_TERMINATED,
+ SCI_TASK_FAILURE_INVALID_PARAMETER_VALUE = SCI_FAILURE_INVALID_PARAMETER_VALUE,
+
+ SCI_TASK_FAILURE_REMOTE_DEVICE_RESET_REQUIRED = SCI_FAILURE_REMOTE_DEVICE_RESET_REQUIRED,
+ SCI_TASK_FAILURE_RESET_DEVICE_PARTIAL_SUCCESS = SCI_FAILURE_RESET_DEVICE_PARTIAL_SUCCESS
+
+};
+
+/**
+ * sci_swab32_cpy - convert between scsi and scu-hardware byte format
+ * @dest: receive the 4-byte endian swapped version of src
+ * @src: word aligned source buffer
+ *
+ * scu hardware handles SSP/SMP control, response, and unidentified
+ * frames in "big endian dword" order. Regardless of host endian this
+ * is always a swab32()-per-dword conversion of the standard definition,
+ * i.e. single byte fields swapped and multi-byte fields in little-
+ * endian
+ */
+static inline void sci_swab32_cpy(void *_dest, void *_src, ssize_t word_cnt)
+{
+ u32 *dest = _dest, *src = _src;
+
+ while (--word_cnt >= 0)
+ dest[word_cnt] = swab32(src[word_cnt]);
+}
+
+extern unsigned char no_outbound_task_to;
+extern u16 ssp_max_occ_to;
+extern u16 stp_max_occ_to;
+extern u16 ssp_inactive_to;
+extern u16 stp_inactive_to;
+extern unsigned char phy_gen;
+extern unsigned char max_concurr_spinup;
+extern uint cable_selection_override;
+
+irqreturn_t isci_msix_isr(int vec, void *data);
+irqreturn_t isci_intx_isr(int vec, void *data);
+irqreturn_t isci_error_isr(int vec, void *data);
+
+/*
+ * Each timer is associated with a cancellation flag that is set when
+ * del_timer() is called and checked in the timer callback function. This
+ * is needed since del_timer_sync() cannot be called with sci_lock held.
+ * For deinit however, del_timer_sync() is used without holding the lock.
+ */
+struct sci_timer {
+ struct timer_list timer;
+ bool cancel;
+};
+
+static inline
+void sci_init_timer(struct sci_timer *tmr, void (*fn)(unsigned long))
+{
+ tmr->timer.function = fn;
+ tmr->timer.data = (unsigned long) tmr;
+ tmr->cancel = 0;
+ init_timer(&tmr->timer);
+}
+
+static inline void sci_mod_timer(struct sci_timer *tmr, unsigned long msec)
+{
+ tmr->cancel = 0;
+ mod_timer(&tmr->timer, jiffies + msecs_to_jiffies(msec));
+}
+
+static inline void sci_del_timer(struct sci_timer *tmr)
+{
+ tmr->cancel = 1;
+ del_timer(&tmr->timer);
+}
+
+struct sci_base_state_machine {
+ const struct sci_base_state *state_table;
+ u32 initial_state_id;
+ u32 current_state_id;
+ u32 previous_state_id;
+};
+
+typedef void (*sci_state_transition_t)(struct sci_base_state_machine *sm);
+
+struct sci_base_state {
+ sci_state_transition_t enter_state; /* Called on state entry */
+ sci_state_transition_t exit_state; /* Called on state exit */
+};
+
+extern void sci_init_sm(struct sci_base_state_machine *sm,
+ const struct sci_base_state *state_table,
+ u32 initial_state);
+extern void sci_change_state(struct sci_base_state_machine *sm, u32 next_state);
+#endif /* __ISCI_H__ */
diff --git a/drivers/scsi/isci/phy.c b/drivers/scsi/isci/phy.c
new file mode 100644
index 000000000..cb87b2ef7
--- /dev/null
+++ b/drivers/scsi/isci/phy.c
@@ -0,0 +1,1487 @@
+/*
+ * This file is provided under a dual BSD/GPLv2 license. When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ * The full GNU General Public License is included in this distribution
+ * in the file called LICENSE.GPL.
+ *
+ * BSD LICENSE
+ *
+ * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "isci.h"
+#include "host.h"
+#include "phy.h"
+#include "scu_event_codes.h"
+#include "probe_roms.h"
+
+#undef C
+#define C(a) (#a)
+static const char *phy_state_name(enum sci_phy_states state)
+{
+ static const char * const strings[] = PHY_STATES;
+
+ return strings[state];
+}
+#undef C
+
+/* Maximum arbitration wait time in micro-seconds */
+#define SCIC_SDS_PHY_MAX_ARBITRATION_WAIT_TIME (700)
+
+enum sas_linkrate sci_phy_linkrate(struct isci_phy *iphy)
+{
+ return iphy->max_negotiated_speed;
+}
+
+static struct isci_host *phy_to_host(struct isci_phy *iphy)
+{
+ struct isci_phy *table = iphy - iphy->phy_index;
+ struct isci_host *ihost = container_of(table, typeof(*ihost), phys[0]);
+
+ return ihost;
+}
+
+static struct device *sciphy_to_dev(struct isci_phy *iphy)
+{
+ return &phy_to_host(iphy)->pdev->dev;
+}
+
+static enum sci_status
+sci_phy_transport_layer_initialization(struct isci_phy *iphy,
+ struct scu_transport_layer_registers __iomem *reg)
+{
+ u32 tl_control;
+
+ iphy->transport_layer_registers = reg;
+
+ writel(SCIC_SDS_REMOTE_NODE_CONTEXT_INVALID_INDEX,
+ &iphy->transport_layer_registers->stp_rni);
+
+ /*
+ * Hardware team recommends that we enable the STP prefetch for all
+ * transports
+ */
+ tl_control = readl(&iphy->transport_layer_registers->control);
+ tl_control |= SCU_TLCR_GEN_BIT(STP_WRITE_DATA_PREFETCH);
+ writel(tl_control, &iphy->transport_layer_registers->control);
+
+ return SCI_SUCCESS;
+}
+
+static enum sci_status
+sci_phy_link_layer_initialization(struct isci_phy *iphy,
+ struct scu_link_layer_registers __iomem *llr)
+{
+ struct isci_host *ihost = iphy->owning_port->owning_controller;
+ struct sci_phy_user_params *phy_user;
+ struct sci_phy_oem_params *phy_oem;
+ int phy_idx = iphy->phy_index;
+ struct sci_phy_cap phy_cap;
+ u32 phy_configuration;
+ u32 parity_check = 0;
+ u32 parity_count = 0;
+ u32 llctl, link_rate;
+ u32 clksm_value = 0;
+ u32 sp_timeouts = 0;
+
+ phy_user = &ihost->user_parameters.phys[phy_idx];
+ phy_oem = &ihost->oem_parameters.phys[phy_idx];
+ iphy->link_layer_registers = llr;
+
+ /* Set our IDENTIFY frame data */
+ #define SCI_END_DEVICE 0x01
+
+ writel(SCU_SAS_TIID_GEN_BIT(SMP_INITIATOR) |
+ SCU_SAS_TIID_GEN_BIT(SSP_INITIATOR) |
+ SCU_SAS_TIID_GEN_BIT(STP_INITIATOR) |
+ SCU_SAS_TIID_GEN_BIT(DA_SATA_HOST) |
+ SCU_SAS_TIID_GEN_VAL(DEVICE_TYPE, SCI_END_DEVICE),
+ &llr->transmit_identification);
+
+ /* Write the device SAS Address */
+ writel(0xFEDCBA98, &llr->sas_device_name_high);
+ writel(phy_idx, &llr->sas_device_name_low);
+
+ /* Write the source SAS Address */
+ writel(phy_oem->sas_address.high, &llr->source_sas_address_high);
+ writel(phy_oem->sas_address.low, &llr->source_sas_address_low);
+
+ /* Clear and Set the PHY Identifier */
+ writel(0, &llr->identify_frame_phy_id);
+ writel(SCU_SAS_TIPID_GEN_VALUE(ID, phy_idx), &llr->identify_frame_phy_id);
+
+ /* Change the initial state of the phy configuration register */
+ phy_configuration = readl(&llr->phy_configuration);
+
+ /* Hold OOB state machine in reset */
+ phy_configuration |= SCU_SAS_PCFG_GEN_BIT(OOB_RESET);
+ writel(phy_configuration, &llr->phy_configuration);
+
+ /* Configure the SNW capabilities */
+ phy_cap.all = 0;
+ phy_cap.start = 1;
+ phy_cap.gen3_no_ssc = 1;
+ phy_cap.gen2_no_ssc = 1;
+ phy_cap.gen1_no_ssc = 1;
+ if (ihost->oem_parameters.controller.do_enable_ssc) {
+ struct scu_afe_registers __iomem *afe = &ihost->scu_registers->afe;
+ struct scu_afe_transceiver __iomem *xcvr = &afe->scu_afe_xcvr[phy_idx];
+ struct isci_pci_info *pci_info = to_pci_info(ihost->pdev);
+ bool en_sas = false;
+ bool en_sata = false;
+ u32 sas_type = 0;
+ u32 sata_spread = 0x2;
+ u32 sas_spread = 0x2;
+
+ phy_cap.gen3_ssc = 1;
+ phy_cap.gen2_ssc = 1;
+ phy_cap.gen1_ssc = 1;
+
+ if (pci_info->orom->hdr.version < ISCI_ROM_VER_1_1)
+ en_sas = en_sata = true;
+ else {
+ sata_spread = ihost->oem_parameters.controller.ssc_sata_tx_spread_level;
+ sas_spread = ihost->oem_parameters.controller.ssc_sas_tx_spread_level;
+
+ if (sata_spread)
+ en_sata = true;
+
+ if (sas_spread) {
+ en_sas = true;
+ sas_type = ihost->oem_parameters.controller.ssc_sas_tx_type;
+ }
+
+ }
+
+ if (en_sas) {
+ u32 reg;
+
+ reg = readl(&xcvr->afe_xcvr_control0);
+ reg |= (0x00100000 | (sas_type << 19));
+ writel(reg, &xcvr->afe_xcvr_control0);
+
+ reg = readl(&xcvr->afe_tx_ssc_control);
+ reg |= sas_spread << 8;
+ writel(reg, &xcvr->afe_tx_ssc_control);
+ }
+
+ if (en_sata) {
+ u32 reg;
+
+ reg = readl(&xcvr->afe_tx_ssc_control);
+ reg |= sata_spread;
+ writel(reg, &xcvr->afe_tx_ssc_control);
+
+ reg = readl(&llr->stp_control);
+ reg |= 1 << 12;
+ writel(reg, &llr->stp_control);
+ }
+ }
+
+ /* The SAS specification indicates that the phy_capabilities that
+ * are transmitted shall have an even parity. Calculate the parity.
+ */
+ parity_check = phy_cap.all;
+ while (parity_check != 0) {
+ if (parity_check & 0x1)
+ parity_count++;
+ parity_check >>= 1;
+ }
+
+ /* If parity indicates there are an odd number of bits set, then
+ * set the parity bit to 1 in the phy capabilities.
+ */
+ if ((parity_count % 2) != 0)
+ phy_cap.parity = 1;
+
+ writel(phy_cap.all, &llr->phy_capabilities);
+
+ /* Set the enable spinup period but disable the ability to send
+ * notify enable spinup
+ */
+ writel(SCU_ENSPINUP_GEN_VAL(COUNT,
+ phy_user->notify_enable_spin_up_insertion_frequency),
+ &llr->notify_enable_spinup_control);
+
+ /* Write the ALIGN Insertion Ferequency for connected phy and
+ * inpendent of connected state
+ */
+ clksm_value = SCU_ALIGN_INSERTION_FREQUENCY_GEN_VAL(CONNECTED,
+ phy_user->in_connection_align_insertion_frequency);
+
+ clksm_value |= SCU_ALIGN_INSERTION_FREQUENCY_GEN_VAL(GENERAL,
+ phy_user->align_insertion_frequency);
+
+ writel(clksm_value, &llr->clock_skew_management);
+
+ if (is_c0(ihost->pdev) || is_c1(ihost->pdev)) {
+ writel(0x04210400, &llr->afe_lookup_table_control);
+ writel(0x020A7C05, &llr->sas_primitive_timeout);
+ } else
+ writel(0x02108421, &llr->afe_lookup_table_control);
+
+ llctl = SCU_SAS_LLCTL_GEN_VAL(NO_OUTBOUND_TASK_TIMEOUT,
+ (u8)ihost->user_parameters.no_outbound_task_timeout);
+
+ switch (phy_user->max_speed_generation) {
+ case SCIC_SDS_PARM_GEN3_SPEED:
+ link_rate = SCU_SAS_LINK_LAYER_CONTROL_MAX_LINK_RATE_GEN3;
+ break;
+ case SCIC_SDS_PARM_GEN2_SPEED:
+ link_rate = SCU_SAS_LINK_LAYER_CONTROL_MAX_LINK_RATE_GEN2;
+ break;
+ default:
+ link_rate = SCU_SAS_LINK_LAYER_CONTROL_MAX_LINK_RATE_GEN1;
+ break;
+ }
+ llctl |= SCU_SAS_LLCTL_GEN_VAL(MAX_LINK_RATE, link_rate);
+ writel(llctl, &llr->link_layer_control);
+
+ sp_timeouts = readl(&llr->sas_phy_timeouts);
+
+ /* Clear the default 0x36 (54us) RATE_CHANGE timeout value. */
+ sp_timeouts &= ~SCU_SAS_PHYTOV_GEN_VAL(RATE_CHANGE, 0xFF);
+
+ /* Set RATE_CHANGE timeout value to 0x3B (59us). This ensures SCU can
+ * lock with 3Gb drive when SCU max rate is set to 1.5Gb.
+ */
+ sp_timeouts |= SCU_SAS_PHYTOV_GEN_VAL(RATE_CHANGE, 0x3B);
+
+ writel(sp_timeouts, &llr->sas_phy_timeouts);
+
+ if (is_a2(ihost->pdev)) {
+ /* Program the max ARB time for the PHY to 700us so we
+ * inter-operate with the PMC expander which shuts down
+ * PHYs if the expander PHY generates too many breaks.
+ * This time value will guarantee that the initiator PHY
+ * will generate the break.
+ */
+ writel(SCIC_SDS_PHY_MAX_ARBITRATION_WAIT_TIME,
+ &llr->maximum_arbitration_wait_timer_timeout);
+ }
+
+ /* Disable link layer hang detection, rely on the OS timeout for
+ * I/O timeouts.
+ */
+ writel(0, &llr->link_layer_hang_detection_timeout);
+
+ /* We can exit the initial state to the stopped state */
+ sci_change_state(&iphy->sm, SCI_PHY_STOPPED);
+
+ return SCI_SUCCESS;
+}
+
+static void phy_sata_timeout(unsigned long data)
+{
+ struct sci_timer *tmr = (struct sci_timer *)data;
+ struct isci_phy *iphy = container_of(tmr, typeof(*iphy), sata_timer);
+ struct isci_host *ihost = iphy->owning_port->owning_controller;
+ unsigned long flags;
+
+ spin_lock_irqsave(&ihost->scic_lock, flags);
+
+ if (tmr->cancel)
+ goto done;
+
+ dev_dbg(sciphy_to_dev(iphy),
+ "%s: SCIC SDS Phy 0x%p did not receive signature fis before "
+ "timeout.\n",
+ __func__,
+ iphy);
+
+ sci_change_state(&iphy->sm, SCI_PHY_STARTING);
+done:
+ spin_unlock_irqrestore(&ihost->scic_lock, flags);
+}
+
+/**
+ * This method returns the port currently containing this phy. If the phy is
+ * currently contained by the dummy port, then the phy is considered to not
+ * be part of a port.
+ * @sci_phy: This parameter specifies the phy for which to retrieve the
+ * containing port.
+ *
+ * This method returns a handle to a port that contains the supplied phy.
+ * NULL This value is returned if the phy is not part of a real
+ * port (i.e. it's contained in the dummy port). !NULL All other
+ * values indicate a handle/pointer to the port containing the phy.
+ */
+struct isci_port *phy_get_non_dummy_port(struct isci_phy *iphy)
+{
+ struct isci_port *iport = iphy->owning_port;
+
+ if (iport->physical_port_index == SCIC_SDS_DUMMY_PORT)
+ return NULL;
+
+ return iphy->owning_port;
+}
+
+/**
+ * This method will assign a port to the phy object.
+ * @out]: iphy This parameter specifies the phy for which to assign a port
+ * object.
+ *
+ *
+ */
+void sci_phy_set_port(
+ struct isci_phy *iphy,
+ struct isci_port *iport)
+{
+ iphy->owning_port = iport;
+
+ if (iphy->bcn_received_while_port_unassigned) {
+ iphy->bcn_received_while_port_unassigned = false;
+ sci_port_broadcast_change_received(iphy->owning_port, iphy);
+ }
+}
+
+enum sci_status sci_phy_initialize(struct isci_phy *iphy,
+ struct scu_transport_layer_registers __iomem *tl,
+ struct scu_link_layer_registers __iomem *ll)
+{
+ /* Perfrom the initialization of the TL hardware */
+ sci_phy_transport_layer_initialization(iphy, tl);
+
+ /* Perofrm the initialization of the PE hardware */
+ sci_phy_link_layer_initialization(iphy, ll);
+
+ /* There is nothing that needs to be done in this state just
+ * transition to the stopped state
+ */
+ sci_change_state(&iphy->sm, SCI_PHY_STOPPED);
+
+ return SCI_SUCCESS;
+}
+
+/**
+ * This method assigns the direct attached device ID for this phy.
+ *
+ * @iphy The phy for which the direct attached device id is to
+ * be assigned.
+ * @device_id The direct attached device ID to assign to the phy.
+ * This will either be the RNi for the device or an invalid RNi if there
+ * is no current device assigned to the phy.
+ */
+void sci_phy_setup_transport(struct isci_phy *iphy, u32 device_id)
+{
+ u32 tl_control;
+
+ writel(device_id, &iphy->transport_layer_registers->stp_rni);
+
+ /*
+ * The read should guarantee that the first write gets posted
+ * before the next write
+ */
+ tl_control = readl(&iphy->transport_layer_registers->control);
+ tl_control |= SCU_TLCR_GEN_BIT(CLEAR_TCI_NCQ_MAPPING_TABLE);
+ writel(tl_control, &iphy->transport_layer_registers->control);
+}
+
+static void sci_phy_suspend(struct isci_phy *iphy)
+{
+ u32 scu_sas_pcfg_value;
+
+ scu_sas_pcfg_value =
+ readl(&iphy->link_layer_registers->phy_configuration);
+ scu_sas_pcfg_value |= SCU_SAS_PCFG_GEN_BIT(SUSPEND_PROTOCOL_ENGINE);
+ writel(scu_sas_pcfg_value,
+ &iphy->link_layer_registers->phy_configuration);
+
+ sci_phy_setup_transport(iphy, SCIC_SDS_REMOTE_NODE_CONTEXT_INVALID_INDEX);
+}
+
+void sci_phy_resume(struct isci_phy *iphy)
+{
+ u32 scu_sas_pcfg_value;
+
+ scu_sas_pcfg_value =
+ readl(&iphy->link_layer_registers->phy_configuration);
+ scu_sas_pcfg_value &= ~SCU_SAS_PCFG_GEN_BIT(SUSPEND_PROTOCOL_ENGINE);
+ writel(scu_sas_pcfg_value,
+ &iphy->link_layer_registers->phy_configuration);
+}
+
+void sci_phy_get_sas_address(struct isci_phy *iphy, struct sci_sas_address *sas)
+{
+ sas->high = readl(&iphy->link_layer_registers->source_sas_address_high);
+ sas->low = readl(&iphy->link_layer_registers->source_sas_address_low);
+}
+
+void sci_phy_get_attached_sas_address(struct isci_phy *iphy, struct sci_sas_address *sas)
+{
+ struct sas_identify_frame *iaf;
+
+ iaf = &iphy->frame_rcvd.iaf;
+ memcpy(sas, iaf->sas_addr, SAS_ADDR_SIZE);
+}
+
+void sci_phy_get_protocols(struct isci_phy *iphy, struct sci_phy_proto *proto)
+{
+ proto->all = readl(&iphy->link_layer_registers->transmit_identification);
+}
+
+enum sci_status sci_phy_start(struct isci_phy *iphy)
+{
+ enum sci_phy_states state = iphy->sm.current_state_id;
+
+ if (state != SCI_PHY_STOPPED) {
+ dev_dbg(sciphy_to_dev(iphy), "%s: in wrong state: %s\n",
+ __func__, phy_state_name(state));
+ return SCI_FAILURE_INVALID_STATE;
+ }
+
+ sci_change_state(&iphy->sm, SCI_PHY_STARTING);
+ return SCI_SUCCESS;
+}
+
+enum sci_status sci_phy_stop(struct isci_phy *iphy)
+{
+ enum sci_phy_states state = iphy->sm.current_state_id;
+
+ switch (state) {
+ case SCI_PHY_SUB_INITIAL:
+ case SCI_PHY_SUB_AWAIT_OSSP_EN:
+ case SCI_PHY_SUB_AWAIT_SAS_SPEED_EN:
+ case SCI_PHY_SUB_AWAIT_SAS_POWER:
+ case SCI_PHY_SUB_AWAIT_SATA_POWER:
+ case SCI_PHY_SUB_AWAIT_SATA_PHY_EN:
+ case SCI_PHY_SUB_AWAIT_SATA_SPEED_EN:
+ case SCI_PHY_SUB_AWAIT_SIG_FIS_UF:
+ case SCI_PHY_SUB_FINAL:
+ case SCI_PHY_READY:
+ break;
+ default:
+ dev_dbg(sciphy_to_dev(iphy), "%s: in wrong state: %s\n",
+ __func__, phy_state_name(state));
+ return SCI_FAILURE_INVALID_STATE;
+ }
+
+ sci_change_state(&iphy->sm, SCI_PHY_STOPPED);
+ return SCI_SUCCESS;
+}
+
+enum sci_status sci_phy_reset(struct isci_phy *iphy)
+{
+ enum sci_phy_states state = iphy->sm.current_state_id;
+
+ if (state != SCI_PHY_READY) {
+ dev_dbg(sciphy_to_dev(iphy), "%s: in wrong state: %s\n",
+ __func__, phy_state_name(state));
+ return SCI_FAILURE_INVALID_STATE;
+ }
+
+ sci_change_state(&iphy->sm, SCI_PHY_RESETTING);
+ return SCI_SUCCESS;
+}
+
+enum sci_status sci_phy_consume_power_handler(struct isci_phy *iphy)
+{
+ enum sci_phy_states state = iphy->sm.current_state_id;
+
+ switch (state) {
+ case SCI_PHY_SUB_AWAIT_SAS_POWER: {
+ u32 enable_spinup;
+
+ enable_spinup = readl(&iphy->link_layer_registers->notify_enable_spinup_control);
+ enable_spinup |= SCU_ENSPINUP_GEN_BIT(ENABLE);
+ writel(enable_spinup, &iphy->link_layer_registers->notify_enable_spinup_control);
+
+ /* Change state to the final state this substate machine has run to completion */
+ sci_change_state(&iphy->sm, SCI_PHY_SUB_FINAL);
+
+ return SCI_SUCCESS;
+ }
+ case SCI_PHY_SUB_AWAIT_SATA_POWER: {
+ u32 scu_sas_pcfg_value;
+
+ /* Release the spinup hold state and reset the OOB state machine */
+ scu_sas_pcfg_value =
+ readl(&iphy->link_layer_registers->phy_configuration);
+ scu_sas_pcfg_value &=
+ ~(SCU_SAS_PCFG_GEN_BIT(SATA_SPINUP_HOLD) | SCU_SAS_PCFG_GEN_BIT(OOB_ENABLE));
+ scu_sas_pcfg_value |= SCU_SAS_PCFG_GEN_BIT(OOB_RESET);
+ writel(scu_sas_pcfg_value,
+ &iphy->link_layer_registers->phy_configuration);
+
+ /* Now restart the OOB operation */
+ scu_sas_pcfg_value &= ~SCU_SAS_PCFG_GEN_BIT(OOB_RESET);
+ scu_sas_pcfg_value |= SCU_SAS_PCFG_GEN_BIT(OOB_ENABLE);
+ writel(scu_sas_pcfg_value,
+ &iphy->link_layer_registers->phy_configuration);
+
+ /* Change state to the final state this substate machine has run to completion */
+ sci_change_state(&iphy->sm, SCI_PHY_SUB_AWAIT_SATA_PHY_EN);
+
+ return SCI_SUCCESS;
+ }
+ default:
+ dev_dbg(sciphy_to_dev(iphy), "%s: in wrong state: %s\n",
+ __func__, phy_state_name(state));
+ return SCI_FAILURE_INVALID_STATE;
+ }
+}
+
+static void sci_phy_start_sas_link_training(struct isci_phy *iphy)
+{
+ /* continue the link training for the phy as if it were a SAS PHY
+ * instead of a SATA PHY. This is done because the completion queue had a SAS
+ * PHY DETECTED event when the state machine was expecting a SATA PHY event.
+ */
+ u32 phy_control;
+
+ phy_control = readl(&iphy->link_layer_registers->phy_configuration);
+ phy_control |= SCU_SAS_PCFG_GEN_BIT(SATA_SPINUP_HOLD);
+ writel(phy_control,
+ &iphy->link_layer_registers->phy_configuration);
+
+ sci_change_state(&iphy->sm, SCI_PHY_SUB_AWAIT_SAS_SPEED_EN);
+
+ iphy->protocol = SAS_PROTOCOL_SSP;
+}
+
+static void sci_phy_start_sata_link_training(struct isci_phy *iphy)
+{
+ /* This method continues the link training for the phy as if it were a SATA PHY
+ * instead of a SAS PHY. This is done because the completion queue had a SATA
+ * SPINUP HOLD event when the state machine was expecting a SAS PHY event. none
+ */
+ sci_change_state(&iphy->sm, SCI_PHY_SUB_AWAIT_SATA_POWER);
+
+ iphy->protocol = SAS_PROTOCOL_SATA;
+}
+
+/**
+ * sci_phy_complete_link_training - perform processing common to
+ * all protocols upon completion of link training.
+ * @sci_phy: This parameter specifies the phy object for which link training
+ * has completed.
+ * @max_link_rate: This parameter specifies the maximum link rate to be
+ * associated with this phy.
+ * @next_state: This parameter specifies the next state for the phy's starting
+ * sub-state machine.
+ *
+ */
+static void sci_phy_complete_link_training(struct isci_phy *iphy,
+ enum sas_linkrate max_link_rate,
+ u32 next_state)
+{
+ iphy->max_negotiated_speed = max_link_rate;
+
+ sci_change_state(&iphy->sm, next_state);
+}
+
+static const char *phy_event_name(u32 event_code)
+{
+ switch (scu_get_event_code(event_code)) {
+ case SCU_EVENT_PORT_SELECTOR_DETECTED:
+ return "port selector";
+ case SCU_EVENT_SENT_PORT_SELECTION:
+ return "port selection";
+ case SCU_EVENT_HARD_RESET_TRANSMITTED:
+ return "tx hard reset";
+ case SCU_EVENT_HARD_RESET_RECEIVED:
+ return "rx hard reset";
+ case SCU_EVENT_RECEIVED_IDENTIFY_TIMEOUT:
+ return "identify timeout";
+ case SCU_EVENT_LINK_FAILURE:
+ return "link fail";
+ case SCU_EVENT_SATA_SPINUP_HOLD:
+ return "sata spinup hold";
+ case SCU_EVENT_SAS_15_SSC:
+ case SCU_EVENT_SAS_15:
+ return "sas 1.5";
+ case SCU_EVENT_SAS_30_SSC:
+ case SCU_EVENT_SAS_30:
+ return "sas 3.0";
+ case SCU_EVENT_SAS_60_SSC:
+ case SCU_EVENT_SAS_60:
+ return "sas 6.0";
+ case SCU_EVENT_SATA_15_SSC:
+ case SCU_EVENT_SATA_15:
+ return "sata 1.5";
+ case SCU_EVENT_SATA_30_SSC:
+ case SCU_EVENT_SATA_30:
+ return "sata 3.0";
+ case SCU_EVENT_SATA_60_SSC:
+ case SCU_EVENT_SATA_60:
+ return "sata 6.0";
+ case SCU_EVENT_SAS_PHY_DETECTED:
+ return "sas detect";
+ case SCU_EVENT_SATA_PHY_DETECTED:
+ return "sata detect";
+ default:
+ return "unknown";
+ }
+}
+
+#define phy_event_dbg(iphy, state, code) \
+ dev_dbg(sciphy_to_dev(iphy), "phy-%d:%d: %s event: %s (%x)\n", \
+ phy_to_host(iphy)->id, iphy->phy_index, \
+ phy_state_name(state), phy_event_name(code), code)
+
+#define phy_event_warn(iphy, state, code) \
+ dev_warn(sciphy_to_dev(iphy), "phy-%d:%d: %s event: %s (%x)\n", \
+ phy_to_host(iphy)->id, iphy->phy_index, \
+ phy_state_name(state), phy_event_name(code), code)
+
+
+void scu_link_layer_set_txcomsas_timeout(struct isci_phy *iphy, u32 timeout)
+{
+ u32 val;
+
+ /* Extend timeout */
+ val = readl(&iphy->link_layer_registers->transmit_comsas_signal);
+ val &= ~SCU_SAS_LLTXCOMSAS_GEN_VAL(NEGTIME, SCU_SAS_LINK_LAYER_TXCOMSAS_NEGTIME_MASK);
+ val |= SCU_SAS_LLTXCOMSAS_GEN_VAL(NEGTIME, timeout);
+
+ writel(val, &iphy->link_layer_registers->transmit_comsas_signal);
+}
+
+enum sci_status sci_phy_event_handler(struct isci_phy *iphy, u32 event_code)
+{
+ enum sci_phy_states state = iphy->sm.current_state_id;
+
+ switch (state) {
+ case SCI_PHY_SUB_AWAIT_OSSP_EN:
+ switch (scu_get_event_code(event_code)) {
+ case SCU_EVENT_SAS_PHY_DETECTED:
+ sci_phy_start_sas_link_training(iphy);
+ iphy->is_in_link_training = true;
+ break;
+ case SCU_EVENT_SATA_SPINUP_HOLD:
+ sci_phy_start_sata_link_training(iphy);
+ iphy->is_in_link_training = true;
+ break;
+ case SCU_EVENT_RECEIVED_IDENTIFY_TIMEOUT:
+ /* Extend timeout value */
+ scu_link_layer_set_txcomsas_timeout(iphy, SCU_SAS_LINK_LAYER_TXCOMSAS_NEGTIME_EXTENDED);
+
+ /* Start the oob/sn state machine over again */
+ sci_change_state(&iphy->sm, SCI_PHY_STARTING);
+ break;
+ default:
+ phy_event_dbg(iphy, state, event_code);
+ return SCI_FAILURE;
+ }
+ return SCI_SUCCESS;
+ case SCI_PHY_SUB_AWAIT_SAS_SPEED_EN:
+ switch (scu_get_event_code(event_code)) {
+ case SCU_EVENT_SAS_PHY_DETECTED:
+ /*
+ * Why is this being reported again by the controller?
+ * We would re-enter this state so just stay here */
+ break;
+ case SCU_EVENT_SAS_15:
+ case SCU_EVENT_SAS_15_SSC:
+ sci_phy_complete_link_training(iphy, SAS_LINK_RATE_1_5_GBPS,
+ SCI_PHY_SUB_AWAIT_IAF_UF);
+ break;
+ case SCU_EVENT_SAS_30:
+ case SCU_EVENT_SAS_30_SSC:
+ sci_phy_complete_link_training(iphy, SAS_LINK_RATE_3_0_GBPS,
+ SCI_PHY_SUB_AWAIT_IAF_UF);
+ break;
+ case SCU_EVENT_SAS_60:
+ case SCU_EVENT_SAS_60_SSC:
+ sci_phy_complete_link_training(iphy, SAS_LINK_RATE_6_0_GBPS,
+ SCI_PHY_SUB_AWAIT_IAF_UF);
+ break;
+ case SCU_EVENT_SATA_SPINUP_HOLD:
+ /*
+ * We were doing SAS PHY link training and received a SATA PHY event
+ * continue OOB/SN as if this were a SATA PHY */
+ sci_phy_start_sata_link_training(iphy);
+ break;
+ case SCU_EVENT_LINK_FAILURE:
+ /* Change the timeout value to default */
+ scu_link_layer_set_txcomsas_timeout(iphy, SCU_SAS_LINK_LAYER_TXCOMSAS_NEGTIME_DEFAULT);
+
+ /* Link failure change state back to the starting state */
+ sci_change_state(&iphy->sm, SCI_PHY_STARTING);
+ break;
+ case SCU_EVENT_RECEIVED_IDENTIFY_TIMEOUT:
+ /* Extend the timeout value */
+ scu_link_layer_set_txcomsas_timeout(iphy, SCU_SAS_LINK_LAYER_TXCOMSAS_NEGTIME_EXTENDED);
+
+ /* Start the oob/sn state machine over again */
+ sci_change_state(&iphy->sm, SCI_PHY_STARTING);
+ break;
+ default:
+ phy_event_warn(iphy, state, event_code);
+ return SCI_FAILURE;
+ break;
+ }
+ return SCI_SUCCESS;
+ case SCI_PHY_SUB_AWAIT_IAF_UF:
+ switch (scu_get_event_code(event_code)) {
+ case SCU_EVENT_SAS_PHY_DETECTED:
+ /* Backup the state machine */
+ sci_phy_start_sas_link_training(iphy);
+ break;
+ case SCU_EVENT_SATA_SPINUP_HOLD:
+ /* We were doing SAS PHY link training and received a
+ * SATA PHY event continue OOB/SN as if this were a
+ * SATA PHY
+ */
+ sci_phy_start_sata_link_training(iphy);
+ break;
+ case SCU_EVENT_RECEIVED_IDENTIFY_TIMEOUT:
+ /* Extend the timeout value */
+ scu_link_layer_set_txcomsas_timeout(iphy, SCU_SAS_LINK_LAYER_TXCOMSAS_NEGTIME_EXTENDED);
+
+ /* Start the oob/sn state machine over again */
+ sci_change_state(&iphy->sm, SCI_PHY_STARTING);
+ break;
+ case SCU_EVENT_LINK_FAILURE:
+ scu_link_layer_set_txcomsas_timeout(iphy, SCU_SAS_LINK_LAYER_TXCOMSAS_NEGTIME_DEFAULT);
+ case SCU_EVENT_HARD_RESET_RECEIVED:
+ /* Start the oob/sn state machine over again */
+ sci_change_state(&iphy->sm, SCI_PHY_STARTING);
+ break;
+ default:
+ phy_event_warn(iphy, state, event_code);
+ return SCI_FAILURE;
+ }
+ return SCI_SUCCESS;
+ case SCI_PHY_SUB_AWAIT_SAS_POWER:
+ switch (scu_get_event_code(event_code)) {
+ case SCU_EVENT_LINK_FAILURE:
+ /* Change the timeout value to default */
+ scu_link_layer_set_txcomsas_timeout(iphy, SCU_SAS_LINK_LAYER_TXCOMSAS_NEGTIME_DEFAULT);
+
+ /* Link failure change state back to the starting state */
+ sci_change_state(&iphy->sm, SCI_PHY_STARTING);
+ break;
+ default:
+ phy_event_warn(iphy, state, event_code);
+ return SCI_FAILURE;
+ }
+ return SCI_SUCCESS;
+ case SCI_PHY_SUB_AWAIT_SATA_POWER:
+ switch (scu_get_event_code(event_code)) {
+ case SCU_EVENT_LINK_FAILURE:
+ /* Change the timeout value to default */
+ scu_link_layer_set_txcomsas_timeout(iphy, SCU_SAS_LINK_LAYER_TXCOMSAS_NEGTIME_DEFAULT);
+
+ /* Link failure change state back to the starting state */
+ sci_change_state(&iphy->sm, SCI_PHY_STARTING);
+ break;
+ case SCU_EVENT_SATA_SPINUP_HOLD:
+ /* These events are received every 10ms and are
+ * expected while in this state
+ */
+ break;
+
+ case SCU_EVENT_SAS_PHY_DETECTED:
+ /* There has been a change in the phy type before OOB/SN for the
+ * SATA finished start down the SAS link traning path.
+ */
+ sci_phy_start_sas_link_training(iphy);
+ break;
+
+ default:
+ phy_event_warn(iphy, state, event_code);
+ return SCI_FAILURE;
+ }
+ return SCI_SUCCESS;
+ case SCI_PHY_SUB_AWAIT_SATA_PHY_EN:
+ switch (scu_get_event_code(event_code)) {
+ case SCU_EVENT_LINK_FAILURE:
+ /* Change the timeout value to default */
+ scu_link_layer_set_txcomsas_timeout(iphy, SCU_SAS_LINK_LAYER_TXCOMSAS_NEGTIME_DEFAULT);
+
+ /* Link failure change state back to the starting state */
+ sci_change_state(&iphy->sm, SCI_PHY_STARTING);
+ break;
+ case SCU_EVENT_SATA_SPINUP_HOLD:
+ /* These events might be received since we dont know how many may be in
+ * the completion queue while waiting for power
+ */
+ break;
+ case SCU_EVENT_SATA_PHY_DETECTED:
+ iphy->protocol = SAS_PROTOCOL_SATA;
+
+ /* We have received the SATA PHY notification change state */
+ sci_change_state(&iphy->sm, SCI_PHY_SUB_AWAIT_SATA_SPEED_EN);
+ break;
+ case SCU_EVENT_SAS_PHY_DETECTED:
+ /* There has been a change in the phy type before OOB/SN for the
+ * SATA finished start down the SAS link traning path.
+ */
+ sci_phy_start_sas_link_training(iphy);
+ break;
+ default:
+ phy_event_warn(iphy, state, event_code);
+ return SCI_FAILURE;
+ }
+ return SCI_SUCCESS;
+ case SCI_PHY_SUB_AWAIT_SATA_SPEED_EN:
+ switch (scu_get_event_code(event_code)) {
+ case SCU_EVENT_SATA_PHY_DETECTED:
+ /*
+ * The hardware reports multiple SATA PHY detected events
+ * ignore the extras */
+ break;
+ case SCU_EVENT_SATA_15:
+ case SCU_EVENT_SATA_15_SSC:
+ sci_phy_complete_link_training(iphy, SAS_LINK_RATE_1_5_GBPS,
+ SCI_PHY_SUB_AWAIT_SIG_FIS_UF);
+ break;
+ case SCU_EVENT_SATA_30:
+ case SCU_EVENT_SATA_30_SSC:
+ sci_phy_complete_link_training(iphy, SAS_LINK_RATE_3_0_GBPS,
+ SCI_PHY_SUB_AWAIT_SIG_FIS_UF);
+ break;
+ case SCU_EVENT_SATA_60:
+ case SCU_EVENT_SATA_60_SSC:
+ sci_phy_complete_link_training(iphy, SAS_LINK_RATE_6_0_GBPS,
+ SCI_PHY_SUB_AWAIT_SIG_FIS_UF);
+ break;
+ case SCU_EVENT_LINK_FAILURE:
+ /* Change the timeout value to default */
+ scu_link_layer_set_txcomsas_timeout(iphy, SCU_SAS_LINK_LAYER_TXCOMSAS_NEGTIME_DEFAULT);
+
+ /* Link failure change state back to the starting state */
+ sci_change_state(&iphy->sm, SCI_PHY_STARTING);
+ break;
+ case SCU_EVENT_SAS_PHY_DETECTED:
+ /*
+ * There has been a change in the phy type before OOB/SN for the
+ * SATA finished start down the SAS link traning path. */
+ sci_phy_start_sas_link_training(iphy);
+ break;
+ default:
+ phy_event_warn(iphy, state, event_code);
+ return SCI_FAILURE;
+ }
+
+ return SCI_SUCCESS;
+ case SCI_PHY_SUB_AWAIT_SIG_FIS_UF:
+ switch (scu_get_event_code(event_code)) {
+ case SCU_EVENT_SATA_PHY_DETECTED:
+ /* Backup the state machine */
+ sci_change_state(&iphy->sm, SCI_PHY_SUB_AWAIT_SATA_SPEED_EN);
+ break;
+
+ case SCU_EVENT_LINK_FAILURE:
+ /* Change the timeout value to default */
+ scu_link_layer_set_txcomsas_timeout(iphy, SCU_SAS_LINK_LAYER_TXCOMSAS_NEGTIME_DEFAULT);
+
+ /* Link failure change state back to the starting state */
+ sci_change_state(&iphy->sm, SCI_PHY_STARTING);
+ break;
+
+ default:
+ phy_event_warn(iphy, state, event_code);
+ return SCI_FAILURE;
+ }
+ return SCI_SUCCESS;
+ case SCI_PHY_READY:
+ switch (scu_get_event_code(event_code)) {
+ case SCU_EVENT_LINK_FAILURE:
+ /* Set default timeout */
+ scu_link_layer_set_txcomsas_timeout(iphy, SCU_SAS_LINK_LAYER_TXCOMSAS_NEGTIME_DEFAULT);
+
+ /* Link failure change state back to the starting state */
+ sci_change_state(&iphy->sm, SCI_PHY_STARTING);
+ break;
+ case SCU_EVENT_BROADCAST_CHANGE:
+ case SCU_EVENT_BROADCAST_SES:
+ case SCU_EVENT_BROADCAST_RESERVED0:
+ case SCU_EVENT_BROADCAST_RESERVED1:
+ case SCU_EVENT_BROADCAST_EXPANDER:
+ case SCU_EVENT_BROADCAST_AEN:
+ /* Broadcast change received. Notify the port. */
+ if (phy_get_non_dummy_port(iphy) != NULL)
+ sci_port_broadcast_change_received(iphy->owning_port, iphy);
+ else
+ iphy->bcn_received_while_port_unassigned = true;
+ break;
+ case SCU_EVENT_BROADCAST_RESERVED3:
+ case SCU_EVENT_BROADCAST_RESERVED4:
+ default:
+ phy_event_warn(iphy, state, event_code);
+ return SCI_FAILURE_INVALID_STATE;
+ }
+ return SCI_SUCCESS;
+ case SCI_PHY_RESETTING:
+ switch (scu_get_event_code(event_code)) {
+ case SCU_EVENT_HARD_RESET_TRANSMITTED:
+ /* Link failure change state back to the starting state */
+ sci_change_state(&iphy->sm, SCI_PHY_STARTING);
+ break;
+ default:
+ phy_event_warn(iphy, state, event_code);
+ return SCI_FAILURE_INVALID_STATE;
+ break;
+ }
+ return SCI_SUCCESS;
+ default:
+ dev_dbg(sciphy_to_dev(iphy), "%s: in wrong state: %s\n",
+ __func__, phy_state_name(state));
+ return SCI_FAILURE_INVALID_STATE;
+ }
+}
+
+enum sci_status sci_phy_frame_handler(struct isci_phy *iphy, u32 frame_index)
+{
+ enum sci_phy_states state = iphy->sm.current_state_id;
+ struct isci_host *ihost = iphy->owning_port->owning_controller;
+ enum sci_status result;
+ unsigned long flags;
+
+ switch (state) {
+ case SCI_PHY_SUB_AWAIT_IAF_UF: {
+ u32 *frame_words;
+ struct sas_identify_frame iaf;
+
+ result = sci_unsolicited_frame_control_get_header(&ihost->uf_control,
+ frame_index,
+ (void **)&frame_words);
+
+ if (result != SCI_SUCCESS)
+ return result;
+
+ sci_swab32_cpy(&iaf, frame_words, sizeof(iaf) / sizeof(u32));
+ if (iaf.frame_type == 0) {
+ u32 state;
+
+ spin_lock_irqsave(&iphy->sas_phy.frame_rcvd_lock, flags);
+ memcpy(&iphy->frame_rcvd.iaf, &iaf, sizeof(iaf));
+ spin_unlock_irqrestore(&iphy->sas_phy.frame_rcvd_lock, flags);
+ if (iaf.smp_tport) {
+ /* We got the IAF for an expander PHY go to the final
+ * state since there are no power requirements for
+ * expander phys.
+ */
+ state = SCI_PHY_SUB_FINAL;
+ } else {
+ /* We got the IAF we can now go to the await spinup
+ * semaphore state
+ */
+ state = SCI_PHY_SUB_AWAIT_SAS_POWER;
+ }
+ sci_change_state(&iphy->sm, state);
+ result = SCI_SUCCESS;
+ } else
+ dev_warn(sciphy_to_dev(iphy),
+ "%s: PHY starting substate machine received "
+ "unexpected frame id %x\n",
+ __func__, frame_index);
+
+ sci_controller_release_frame(ihost, frame_index);
+ return result;
+ }
+ case SCI_PHY_SUB_AWAIT_SIG_FIS_UF: {
+ struct dev_to_host_fis *frame_header;
+ u32 *fis_frame_data;
+
+ result = sci_unsolicited_frame_control_get_header(&ihost->uf_control,
+ frame_index,
+ (void **)&frame_header);
+
+ if (result != SCI_SUCCESS)
+ return result;
+
+ if ((frame_header->fis_type == FIS_REGD2H) &&
+ !(frame_header->status & ATA_BUSY)) {
+ sci_unsolicited_frame_control_get_buffer(&ihost->uf_control,
+ frame_index,
+ (void **)&fis_frame_data);
+
+ spin_lock_irqsave(&iphy->sas_phy.frame_rcvd_lock, flags);
+ sci_controller_copy_sata_response(&iphy->frame_rcvd.fis,
+ frame_header,
+ fis_frame_data);
+ spin_unlock_irqrestore(&iphy->sas_phy.frame_rcvd_lock, flags);
+
+ /* got IAF we can now go to the await spinup semaphore state */
+ sci_change_state(&iphy->sm, SCI_PHY_SUB_FINAL);
+
+ result = SCI_SUCCESS;
+ } else
+ dev_warn(sciphy_to_dev(iphy),
+ "%s: PHY starting substate machine received "
+ "unexpected frame id %x\n",
+ __func__, frame_index);
+
+ /* Regardless of the result we are done with this frame with it */
+ sci_controller_release_frame(ihost, frame_index);
+
+ return result;
+ }
+ default:
+ dev_dbg(sciphy_to_dev(iphy), "%s: in wrong state: %s\n",
+ __func__, phy_state_name(state));
+ return SCI_FAILURE_INVALID_STATE;
+ }
+
+}
+
+static void sci_phy_starting_initial_substate_enter(struct sci_base_state_machine *sm)
+{
+ struct isci_phy *iphy = container_of(sm, typeof(*iphy), sm);
+
+ /* This is just an temporary state go off to the starting state */
+ sci_change_state(&iphy->sm, SCI_PHY_SUB_AWAIT_OSSP_EN);
+}
+
+static void sci_phy_starting_await_sas_power_substate_enter(struct sci_base_state_machine *sm)
+{
+ struct isci_phy *iphy = container_of(sm, typeof(*iphy), sm);
+ struct isci_host *ihost = iphy->owning_port->owning_controller;
+
+ sci_controller_power_control_queue_insert(ihost, iphy);
+}
+
+static void sci_phy_starting_await_sas_power_substate_exit(struct sci_base_state_machine *sm)
+{
+ struct isci_phy *iphy = container_of(sm, typeof(*iphy), sm);
+ struct isci_host *ihost = iphy->owning_port->owning_controller;
+
+ sci_controller_power_control_queue_remove(ihost, iphy);
+}
+
+static void sci_phy_starting_await_sata_power_substate_enter(struct sci_base_state_machine *sm)
+{
+ struct isci_phy *iphy = container_of(sm, typeof(*iphy), sm);
+ struct isci_host *ihost = iphy->owning_port->owning_controller;
+
+ sci_controller_power_control_queue_insert(ihost, iphy);
+}
+
+static void sci_phy_starting_await_sata_power_substate_exit(struct sci_base_state_machine *sm)
+{
+ struct isci_phy *iphy = container_of(sm, typeof(*iphy), sm);
+ struct isci_host *ihost = iphy->owning_port->owning_controller;
+
+ sci_controller_power_control_queue_remove(ihost, iphy);
+}
+
+static void sci_phy_starting_await_sata_phy_substate_enter(struct sci_base_state_machine *sm)
+{
+ struct isci_phy *iphy = container_of(sm, typeof(*iphy), sm);
+
+ sci_mod_timer(&iphy->sata_timer, SCIC_SDS_SATA_LINK_TRAINING_TIMEOUT);
+}
+
+static void sci_phy_starting_await_sata_phy_substate_exit(struct sci_base_state_machine *sm)
+{
+ struct isci_phy *iphy = container_of(sm, typeof(*iphy), sm);
+
+ sci_del_timer(&iphy->sata_timer);
+}
+
+static void sci_phy_starting_await_sata_speed_substate_enter(struct sci_base_state_machine *sm)
+{
+ struct isci_phy *iphy = container_of(sm, typeof(*iphy), sm);
+
+ sci_mod_timer(&iphy->sata_timer, SCIC_SDS_SATA_LINK_TRAINING_TIMEOUT);
+}
+
+static void sci_phy_starting_await_sata_speed_substate_exit(struct sci_base_state_machine *sm)
+{
+ struct isci_phy *iphy = container_of(sm, typeof(*iphy), sm);
+
+ sci_del_timer(&iphy->sata_timer);
+}
+
+static void sci_phy_starting_await_sig_fis_uf_substate_enter(struct sci_base_state_machine *sm)
+{
+ struct isci_phy *iphy = container_of(sm, typeof(*iphy), sm);
+
+ if (sci_port_link_detected(iphy->owning_port, iphy)) {
+
+ /*
+ * Clear the PE suspend condition so we can actually
+ * receive SIG FIS
+ * The hardware will not respond to the XRDY until the PE
+ * suspend condition is cleared.
+ */
+ sci_phy_resume(iphy);
+
+ sci_mod_timer(&iphy->sata_timer,
+ SCIC_SDS_SIGNATURE_FIS_TIMEOUT);
+ } else
+ iphy->is_in_link_training = false;
+}
+
+static void sci_phy_starting_await_sig_fis_uf_substate_exit(struct sci_base_state_machine *sm)
+{
+ struct isci_phy *iphy = container_of(sm, typeof(*iphy), sm);
+
+ sci_del_timer(&iphy->sata_timer);
+}
+
+static void sci_phy_starting_final_substate_enter(struct sci_base_state_machine *sm)
+{
+ struct isci_phy *iphy = container_of(sm, typeof(*iphy), sm);
+
+ /* State machine has run to completion so exit out and change
+ * the base state machine to the ready state
+ */
+ sci_change_state(&iphy->sm, SCI_PHY_READY);
+}
+
+/**
+ *
+ * @sci_phy: This is the struct isci_phy object to stop.
+ *
+ * This method will stop the struct isci_phy object. This does not reset the
+ * protocol engine it just suspends it and places it in a state where it will
+ * not cause the end device to power up. none
+ */
+static void scu_link_layer_stop_protocol_engine(
+ struct isci_phy *iphy)
+{
+ u32 scu_sas_pcfg_value;
+ u32 enable_spinup_value;
+
+ /* Suspend the protocol engine and place it in a sata spinup hold state */
+ scu_sas_pcfg_value =
+ readl(&iphy->link_layer_registers->phy_configuration);
+ scu_sas_pcfg_value |=
+ (SCU_SAS_PCFG_GEN_BIT(OOB_RESET) |
+ SCU_SAS_PCFG_GEN_BIT(SUSPEND_PROTOCOL_ENGINE) |
+ SCU_SAS_PCFG_GEN_BIT(SATA_SPINUP_HOLD));
+ writel(scu_sas_pcfg_value,
+ &iphy->link_layer_registers->phy_configuration);
+
+ /* Disable the notify enable spinup primitives */
+ enable_spinup_value = readl(&iphy->link_layer_registers->notify_enable_spinup_control);
+ enable_spinup_value &= ~SCU_ENSPINUP_GEN_BIT(ENABLE);
+ writel(enable_spinup_value, &iphy->link_layer_registers->notify_enable_spinup_control);
+}
+
+static void scu_link_layer_start_oob(struct isci_phy *iphy)
+{
+ struct scu_link_layer_registers __iomem *ll = iphy->link_layer_registers;
+ u32 val;
+
+ /** Reset OOB sequence - start */
+ val = readl(&ll->phy_configuration);
+ val &= ~(SCU_SAS_PCFG_GEN_BIT(OOB_RESET) |
+ SCU_SAS_PCFG_GEN_BIT(OOB_ENABLE) |
+ SCU_SAS_PCFG_GEN_BIT(HARD_RESET));
+ writel(val, &ll->phy_configuration);
+ readl(&ll->phy_configuration); /* flush */
+ /** Reset OOB sequence - end */
+
+ /** Start OOB sequence - start */
+ val = readl(&ll->phy_configuration);
+ val |= SCU_SAS_PCFG_GEN_BIT(OOB_ENABLE);
+ writel(val, &ll->phy_configuration);
+ readl(&ll->phy_configuration); /* flush */
+ /** Start OOB sequence - end */
+}
+
+/**
+ *
+ *
+ * This method will transmit a hard reset request on the specified phy. The SCU
+ * hardware requires that we reset the OOB state machine and set the hard reset
+ * bit in the phy configuration register. We then must start OOB over with the
+ * hard reset bit set.
+ */
+static void scu_link_layer_tx_hard_reset(
+ struct isci_phy *iphy)
+{
+ u32 phy_configuration_value;
+
+ /*
+ * SAS Phys must wait for the HARD_RESET_TX event notification to transition
+ * to the starting state. */
+ phy_configuration_value =
+ readl(&iphy->link_layer_registers->phy_configuration);
+ phy_configuration_value &= ~(SCU_SAS_PCFG_GEN_BIT(OOB_ENABLE));
+ phy_configuration_value |=
+ (SCU_SAS_PCFG_GEN_BIT(HARD_RESET) |
+ SCU_SAS_PCFG_GEN_BIT(OOB_RESET));
+ writel(phy_configuration_value,
+ &iphy->link_layer_registers->phy_configuration);
+
+ /* Now take the OOB state machine out of reset */
+ phy_configuration_value |= SCU_SAS_PCFG_GEN_BIT(OOB_ENABLE);
+ phy_configuration_value &= ~SCU_SAS_PCFG_GEN_BIT(OOB_RESET);
+ writel(phy_configuration_value,
+ &iphy->link_layer_registers->phy_configuration);
+}
+
+static void sci_phy_stopped_state_enter(struct sci_base_state_machine *sm)
+{
+ struct isci_phy *iphy = container_of(sm, typeof(*iphy), sm);
+ struct isci_port *iport = iphy->owning_port;
+ struct isci_host *ihost = iport->owning_controller;
+
+ /*
+ * @todo We need to get to the controller to place this PE in a
+ * reset state
+ */
+ sci_del_timer(&iphy->sata_timer);
+
+ scu_link_layer_stop_protocol_engine(iphy);
+
+ if (iphy->sm.previous_state_id != SCI_PHY_INITIAL)
+ sci_controller_link_down(ihost, phy_get_non_dummy_port(iphy), iphy);
+}
+
+static void sci_phy_starting_state_enter(struct sci_base_state_machine *sm)
+{
+ struct isci_phy *iphy = container_of(sm, typeof(*iphy), sm);
+ struct isci_port *iport = iphy->owning_port;
+ struct isci_host *ihost = iport->owning_controller;
+
+ scu_link_layer_stop_protocol_engine(iphy);
+ scu_link_layer_start_oob(iphy);
+
+ /* We don't know what kind of phy we are going to be just yet */
+ iphy->protocol = SAS_PROTOCOL_NONE;
+ iphy->bcn_received_while_port_unassigned = false;
+
+ if (iphy->sm.previous_state_id == SCI_PHY_READY)
+ sci_controller_link_down(ihost, phy_get_non_dummy_port(iphy), iphy);
+
+ sci_change_state(&iphy->sm, SCI_PHY_SUB_INITIAL);
+}
+
+static void sci_phy_ready_state_enter(struct sci_base_state_machine *sm)
+{
+ struct isci_phy *iphy = container_of(sm, typeof(*iphy), sm);
+ struct isci_port *iport = iphy->owning_port;
+ struct isci_host *ihost = iport->owning_controller;
+
+ sci_controller_link_up(ihost, phy_get_non_dummy_port(iphy), iphy);
+}
+
+static void sci_phy_ready_state_exit(struct sci_base_state_machine *sm)
+{
+ struct isci_phy *iphy = container_of(sm, typeof(*iphy), sm);
+
+ sci_phy_suspend(iphy);
+}
+
+static void sci_phy_resetting_state_enter(struct sci_base_state_machine *sm)
+{
+ struct isci_phy *iphy = container_of(sm, typeof(*iphy), sm);
+
+ /* The phy is being reset, therefore deactivate it from the port. In
+ * the resetting state we don't notify the user regarding link up and
+ * link down notifications
+ */
+ sci_port_deactivate_phy(iphy->owning_port, iphy, false);
+
+ if (iphy->protocol == SAS_PROTOCOL_SSP) {
+ scu_link_layer_tx_hard_reset(iphy);
+ } else {
+ /* The SCU does not need to have a discrete reset state so
+ * just go back to the starting state.
+ */
+ sci_change_state(&iphy->sm, SCI_PHY_STARTING);
+ }
+}
+
+static const struct sci_base_state sci_phy_state_table[] = {
+ [SCI_PHY_INITIAL] = { },
+ [SCI_PHY_STOPPED] = {
+ .enter_state = sci_phy_stopped_state_enter,
+ },
+ [SCI_PHY_STARTING] = {
+ .enter_state = sci_phy_starting_state_enter,
+ },
+ [SCI_PHY_SUB_INITIAL] = {
+ .enter_state = sci_phy_starting_initial_substate_enter,
+ },
+ [SCI_PHY_SUB_AWAIT_OSSP_EN] = { },
+ [SCI_PHY_SUB_AWAIT_SAS_SPEED_EN] = { },
+ [SCI_PHY_SUB_AWAIT_IAF_UF] = { },
+ [SCI_PHY_SUB_AWAIT_SAS_POWER] = {
+ .enter_state = sci_phy_starting_await_sas_power_substate_enter,
+ .exit_state = sci_phy_starting_await_sas_power_substate_exit,
+ },
+ [SCI_PHY_SUB_AWAIT_SATA_POWER] = {
+ .enter_state = sci_phy_starting_await_sata_power_substate_enter,
+ .exit_state = sci_phy_starting_await_sata_power_substate_exit
+ },
+ [SCI_PHY_SUB_AWAIT_SATA_PHY_EN] = {
+ .enter_state = sci_phy_starting_await_sata_phy_substate_enter,
+ .exit_state = sci_phy_starting_await_sata_phy_substate_exit
+ },
+ [SCI_PHY_SUB_AWAIT_SATA_SPEED_EN] = {
+ .enter_state = sci_phy_starting_await_sata_speed_substate_enter,
+ .exit_state = sci_phy_starting_await_sata_speed_substate_exit
+ },
+ [SCI_PHY_SUB_AWAIT_SIG_FIS_UF] = {
+ .enter_state = sci_phy_starting_await_sig_fis_uf_substate_enter,
+ .exit_state = sci_phy_starting_await_sig_fis_uf_substate_exit
+ },
+ [SCI_PHY_SUB_FINAL] = {
+ .enter_state = sci_phy_starting_final_substate_enter,
+ },
+ [SCI_PHY_READY] = {
+ .enter_state = sci_phy_ready_state_enter,
+ .exit_state = sci_phy_ready_state_exit,
+ },
+ [SCI_PHY_RESETTING] = {
+ .enter_state = sci_phy_resetting_state_enter,
+ },
+ [SCI_PHY_FINAL] = { },
+};
+
+void sci_phy_construct(struct isci_phy *iphy,
+ struct isci_port *iport, u8 phy_index)
+{
+ sci_init_sm(&iphy->sm, sci_phy_state_table, SCI_PHY_INITIAL);
+
+ /* Copy the rest of the input data to our locals */
+ iphy->owning_port = iport;
+ iphy->phy_index = phy_index;
+ iphy->bcn_received_while_port_unassigned = false;
+ iphy->protocol = SAS_PROTOCOL_NONE;
+ iphy->link_layer_registers = NULL;
+ iphy->max_negotiated_speed = SAS_LINK_RATE_UNKNOWN;
+
+ /* Create the SIGNATURE FIS Timeout timer for this phy */
+ sci_init_timer(&iphy->sata_timer, phy_sata_timeout);
+}
+
+void isci_phy_init(struct isci_phy *iphy, struct isci_host *ihost, int index)
+{
+ struct sci_oem_params *oem = &ihost->oem_parameters;
+ u64 sci_sas_addr;
+ __be64 sas_addr;
+
+ sci_sas_addr = oem->phys[index].sas_address.high;
+ sci_sas_addr <<= 32;
+ sci_sas_addr |= oem->phys[index].sas_address.low;
+ sas_addr = cpu_to_be64(sci_sas_addr);
+ memcpy(iphy->sas_addr, &sas_addr, sizeof(sas_addr));
+
+ iphy->sas_phy.enabled = 0;
+ iphy->sas_phy.id = index;
+ iphy->sas_phy.sas_addr = &iphy->sas_addr[0];
+ iphy->sas_phy.frame_rcvd = (u8 *)&iphy->frame_rcvd;
+ iphy->sas_phy.ha = &ihost->sas_ha;
+ iphy->sas_phy.lldd_phy = iphy;
+ iphy->sas_phy.enabled = 1;
+ iphy->sas_phy.class = SAS;
+ iphy->sas_phy.iproto = SAS_PROTOCOL_ALL;
+ iphy->sas_phy.tproto = 0;
+ iphy->sas_phy.type = PHY_TYPE_PHYSICAL;
+ iphy->sas_phy.role = PHY_ROLE_INITIATOR;
+ iphy->sas_phy.oob_mode = OOB_NOT_CONNECTED;
+ iphy->sas_phy.linkrate = SAS_LINK_RATE_UNKNOWN;
+ memset(&iphy->frame_rcvd, 0, sizeof(iphy->frame_rcvd));
+}
+
+
+/**
+ * isci_phy_control() - This function is one of the SAS Domain Template
+ * functions. This is a phy management function.
+ * @phy: This parameter specifies the sphy being controlled.
+ * @func: This parameter specifies the phy control function being invoked.
+ * @buf: This parameter is specific to the phy function being invoked.
+ *
+ * status, zero indicates success.
+ */
+int isci_phy_control(struct asd_sas_phy *sas_phy,
+ enum phy_func func,
+ void *buf)
+{
+ int ret = 0;
+ struct isci_phy *iphy = sas_phy->lldd_phy;
+ struct asd_sas_port *port = sas_phy->port;
+ struct isci_host *ihost = sas_phy->ha->lldd_ha;
+ unsigned long flags;
+
+ dev_dbg(&ihost->pdev->dev,
+ "%s: phy %p; func %d; buf %p; isci phy %p, port %p\n",
+ __func__, sas_phy, func, buf, iphy, port);
+
+ switch (func) {
+ case PHY_FUNC_DISABLE:
+ spin_lock_irqsave(&ihost->scic_lock, flags);
+ scu_link_layer_start_oob(iphy);
+ sci_phy_stop(iphy);
+ spin_unlock_irqrestore(&ihost->scic_lock, flags);
+ break;
+
+ case PHY_FUNC_LINK_RESET:
+ spin_lock_irqsave(&ihost->scic_lock, flags);
+ scu_link_layer_start_oob(iphy);
+ sci_phy_stop(iphy);
+ sci_phy_start(iphy);
+ spin_unlock_irqrestore(&ihost->scic_lock, flags);
+ break;
+
+ case PHY_FUNC_HARD_RESET:
+ if (!port)
+ return -ENODEV;
+
+ ret = isci_port_perform_hard_reset(ihost, port->lldd_port, iphy);
+
+ break;
+ case PHY_FUNC_GET_EVENTS: {
+ struct scu_link_layer_registers __iomem *r;
+ struct sas_phy *phy = sas_phy->phy;
+
+ r = iphy->link_layer_registers;
+ phy->running_disparity_error_count = readl(&r->running_disparity_error_count);
+ phy->loss_of_dword_sync_count = readl(&r->loss_of_sync_error_count);
+ phy->phy_reset_problem_count = readl(&r->phy_reset_problem_count);
+ phy->invalid_dword_count = readl(&r->invalid_dword_counter);
+ break;
+ }
+
+ default:
+ dev_dbg(&ihost->pdev->dev,
+ "%s: phy %p; func %d NOT IMPLEMENTED!\n",
+ __func__, sas_phy, func);
+ ret = -ENOSYS;
+ break;
+ }
+ return ret;
+}
diff --git a/drivers/scsi/isci/phy.h b/drivers/scsi/isci/phy.h
new file mode 100644
index 000000000..45fecfa36
--- /dev/null
+++ b/drivers/scsi/isci/phy.h
@@ -0,0 +1,460 @@
+/*
+ * This file is provided under a dual BSD/GPLv2 license. When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ * The full GNU General Public License is included in this distribution
+ * in the file called LICENSE.GPL.
+ *
+ * BSD LICENSE
+ *
+ * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+#ifndef _ISCI_PHY_H_
+#define _ISCI_PHY_H_
+
+#include <scsi/sas.h>
+#include <scsi/libsas.h>
+#include "isci.h"
+#include "sas.h"
+
+/* This is the timeout value for the SATA phy to wait for a SIGNATURE FIS
+ * before restarting the starting state machine. Technically, the old parallel
+ * ATA specification required up to 30 seconds for a device to issue its
+ * signature FIS as a result of a soft reset. Now we see that devices respond
+ * generally within 15 seconds, but we'll use 25 for now.
+ */
+#define SCIC_SDS_SIGNATURE_FIS_TIMEOUT 25000
+
+/* This is the timeout for the SATA OOB/SN because the hardware does not
+ * recognize a hot plug after OOB signal but before the SN signals. We need to
+ * make sure after a hotplug timeout if we have not received the speed event
+ * notification from the hardware that we restart the hardware OOB state
+ * machine.
+ */
+#define SCIC_SDS_SATA_LINK_TRAINING_TIMEOUT 250
+
+/**
+ * isci_phy - hba local phy infrastructure
+ * @sm:
+ * @protocol: attached device protocol
+ * @phy_index: physical index relative to the controller (0-3)
+ * @bcn_received_while_port_unassigned: bcn to report after port association
+ * @sata_timer: timeout SATA signature FIS arrival
+ */
+struct isci_phy {
+ struct sci_base_state_machine sm;
+ struct isci_port *owning_port;
+ enum sas_linkrate max_negotiated_speed;
+ enum sas_protocol protocol;
+ u8 phy_index;
+ bool bcn_received_while_port_unassigned;
+ bool is_in_link_training;
+ struct sci_timer sata_timer;
+ struct scu_transport_layer_registers __iomem *transport_layer_registers;
+ struct scu_link_layer_registers __iomem *link_layer_registers;
+ struct asd_sas_phy sas_phy;
+ u8 sas_addr[SAS_ADDR_SIZE];
+ union {
+ struct sas_identify_frame iaf;
+ struct dev_to_host_fis fis;
+ } frame_rcvd;
+};
+
+static inline struct isci_phy *to_iphy(struct asd_sas_phy *sas_phy)
+{
+ struct isci_phy *iphy = container_of(sas_phy, typeof(*iphy), sas_phy);
+
+ return iphy;
+}
+
+struct sci_phy_cap {
+ union {
+ struct {
+ /*
+ * The SAS specification indicates the start bit shall
+ * always be set to
+ * 1. This implementation will have the start bit set
+ * to 0 if the PHY CAPABILITIES were either not
+ * received or speed negotiation failed.
+ */
+ u8 start:1;
+ u8 tx_ssc_type:1;
+ u8 res1:2;
+ u8 req_logical_linkrate:4;
+
+ u32 gen1_no_ssc:1;
+ u32 gen1_ssc:1;
+ u32 gen2_no_ssc:1;
+ u32 gen2_ssc:1;
+ u32 gen3_no_ssc:1;
+ u32 gen3_ssc:1;
+ u32 res2:17;
+ u32 parity:1;
+ };
+ u32 all;
+ };
+} __packed;
+
+/* this data structure reflects the link layer transmit identification reg */
+struct sci_phy_proto {
+ union {
+ struct {
+ u16 _r_a:1;
+ u16 smp_iport:1;
+ u16 stp_iport:1;
+ u16 ssp_iport:1;
+ u16 _r_b:4;
+ u16 _r_c:1;
+ u16 smp_tport:1;
+ u16 stp_tport:1;
+ u16 ssp_tport:1;
+ u16 _r_d:4;
+ };
+ u16 all;
+ };
+} __packed;
+
+
+/**
+ * struct sci_phy_properties - This structure defines the properties common to
+ * all phys that can be retrieved.
+ *
+ *
+ */
+struct sci_phy_properties {
+ /**
+ * This field specifies the port that currently contains the
+ * supplied phy. This field may be set to NULL
+ * if the phy is not currently contained in a port.
+ */
+ struct isci_port *iport;
+
+ /**
+ * This field specifies the link rate at which the phy is
+ * currently operating.
+ */
+ enum sas_linkrate negotiated_link_rate;
+
+ /**
+ * This field specifies the index of the phy in relation to other
+ * phys within the controller. This index is zero relative.
+ */
+ u8 index;
+};
+
+/**
+ * struct sci_sas_phy_properties - This structure defines the properties,
+ * specific to a SAS phy, that can be retrieved.
+ *
+ *
+ */
+struct sci_sas_phy_properties {
+ /**
+ * This field delineates the Identify Address Frame received
+ * from the remote end point.
+ */
+ struct sas_identify_frame rcvd_iaf;
+
+ /**
+ * This field delineates the Phy capabilities structure received
+ * from the remote end point.
+ */
+ struct sci_phy_cap rcvd_cap;
+
+};
+
+/**
+ * struct sci_sata_phy_properties - This structure defines the properties,
+ * specific to a SATA phy, that can be retrieved.
+ *
+ *
+ */
+struct sci_sata_phy_properties {
+ /**
+ * This field delineates the signature FIS received from the
+ * attached target.
+ */
+ struct dev_to_host_fis signature_fis;
+
+ /**
+ * This field specifies to the user if a port selector is connected
+ * on the specified phy.
+ */
+ bool is_port_selector_present;
+
+};
+
+/**
+ * enum sci_phy_counter_id - This enumeration depicts the various pieces of
+ * optional information that can be retrieved for a specific phy.
+ *
+ *
+ */
+enum sci_phy_counter_id {
+ /**
+ * This PHY information field tracks the number of frames received.
+ */
+ SCIC_PHY_COUNTER_RECEIVED_FRAME,
+
+ /**
+ * This PHY information field tracks the number of frames transmitted.
+ */
+ SCIC_PHY_COUNTER_TRANSMITTED_FRAME,
+
+ /**
+ * This PHY information field tracks the number of DWORDs received.
+ */
+ SCIC_PHY_COUNTER_RECEIVED_FRAME_WORD,
+
+ /**
+ * This PHY information field tracks the number of DWORDs transmitted.
+ */
+ SCIC_PHY_COUNTER_TRANSMITTED_FRAME_DWORD,
+
+ /**
+ * This PHY information field tracks the number of times DWORD
+ * synchronization was lost.
+ */
+ SCIC_PHY_COUNTER_LOSS_OF_SYNC_ERROR,
+
+ /**
+ * This PHY information field tracks the number of received DWORDs with
+ * running disparity errors.
+ */
+ SCIC_PHY_COUNTER_RECEIVED_DISPARITY_ERROR,
+
+ /**
+ * This PHY information field tracks the number of received frames with a
+ * CRC error (not including short or truncated frames).
+ */
+ SCIC_PHY_COUNTER_RECEIVED_FRAME_CRC_ERROR,
+
+ /**
+ * This PHY information field tracks the number of DONE (ACK/NAK TIMEOUT)
+ * primitives received.
+ */
+ SCIC_PHY_COUNTER_RECEIVED_DONE_ACK_NAK_TIMEOUT,
+
+ /**
+ * This PHY information field tracks the number of DONE (ACK/NAK TIMEOUT)
+ * primitives transmitted.
+ */
+ SCIC_PHY_COUNTER_TRANSMITTED_DONE_ACK_NAK_TIMEOUT,
+
+ /**
+ * This PHY information field tracks the number of times the inactivity
+ * timer for connections on the phy has been utilized.
+ */
+ SCIC_PHY_COUNTER_INACTIVITY_TIMER_EXPIRED,
+
+ /**
+ * This PHY information field tracks the number of DONE (CREDIT TIMEOUT)
+ * primitives received.
+ */
+ SCIC_PHY_COUNTER_RECEIVED_DONE_CREDIT_TIMEOUT,
+
+ /**
+ * This PHY information field tracks the number of DONE (CREDIT TIMEOUT)
+ * primitives transmitted.
+ */
+ SCIC_PHY_COUNTER_TRANSMITTED_DONE_CREDIT_TIMEOUT,
+
+ /**
+ * This PHY information field tracks the number of CREDIT BLOCKED
+ * primitives received.
+ * @note Depending on remote device implementation, credit blocks
+ * may occur regularly.
+ */
+ SCIC_PHY_COUNTER_RECEIVED_CREDIT_BLOCKED,
+
+ /**
+ * This PHY information field contains the number of short frames
+ * received. A short frame is simply a frame smaller then what is
+ * allowed by either the SAS or SATA specification.
+ */
+ SCIC_PHY_COUNTER_RECEIVED_SHORT_FRAME,
+
+ /**
+ * This PHY information field contains the number of frames received after
+ * credit has been exhausted.
+ */
+ SCIC_PHY_COUNTER_RECEIVED_FRAME_WITHOUT_CREDIT,
+
+ /**
+ * This PHY information field contains the number of frames received after
+ * a DONE has been received.
+ */
+ SCIC_PHY_COUNTER_RECEIVED_FRAME_AFTER_DONE,
+
+ /**
+ * This PHY information field contains the number of times the phy
+ * failed to achieve DWORD synchronization during speed negotiation.
+ */
+ SCIC_PHY_COUNTER_SN_DWORD_SYNC_ERROR
+};
+
+/**
+ * enum sci_phy_states - phy state machine states
+ * @SCI_PHY_INITIAL: Simply the initial state for the base domain state
+ * machine.
+ * @SCI_PHY_STOPPED: phy has successfully been stopped. In this state
+ * no new IO operations are permitted on this phy.
+ * @SCI_PHY_STARTING: the phy is in the process of becomming ready. In
+ * this state no new IO operations are permitted on
+ * this phy.
+ * @SCI_PHY_SUB_INITIAL: Initial state
+ * @SCI_PHY_SUB_AWAIT_OSSP_EN: Wait state for the hardware OSSP event
+ * type notification
+ * @SCI_PHY_SUB_AWAIT_SAS_SPEED_EN: Wait state for the PHY speed
+ * notification
+ * @SCI_PHY_SUB_AWAIT_IAF_UF: Wait state for the IAF Unsolicited frame
+ * notification
+ * @SCI_PHY_SUB_AWAIT_SAS_POWER: Wait state for the request to consume
+ * power
+ * @SCI_PHY_SUB_AWAIT_SATA_POWER: Wait state for request to consume
+ * power
+ * @SCI_PHY_SUB_AWAIT_SATA_PHY_EN: Wait state for the SATA PHY
+ * notification
+ * @SCI_PHY_SUB_AWAIT_SATA_SPEED_EN: Wait for the SATA PHY speed
+ * notification
+ * @SCI_PHY_SUB_AWAIT_SIG_FIS_UF: Wait state for the SIGNATURE FIS
+ * unsolicited frame notification
+ * @SCI_PHY_SUB_FINAL: Exit state for this state machine
+ * @SCI_PHY_READY: phy is now ready. Thus, the user is able to perform
+ * IO operations utilizing this phy as long as it is
+ * currently part of a valid port. This state is
+ * entered from the STARTING state.
+ * @SCI_PHY_RESETTING: phy is in the process of being reset. In this
+ * state no new IO operations are permitted on this
+ * phy. This state is entered from the READY state.
+ * @SCI_PHY_FINAL: Simply the final state for the base phy state
+ * machine.
+ */
+#define PHY_STATES {\
+ C(PHY_INITIAL),\
+ C(PHY_STOPPED),\
+ C(PHY_STARTING),\
+ C(PHY_SUB_INITIAL),\
+ C(PHY_SUB_AWAIT_OSSP_EN),\
+ C(PHY_SUB_AWAIT_SAS_SPEED_EN),\
+ C(PHY_SUB_AWAIT_IAF_UF),\
+ C(PHY_SUB_AWAIT_SAS_POWER),\
+ C(PHY_SUB_AWAIT_SATA_POWER),\
+ C(PHY_SUB_AWAIT_SATA_PHY_EN),\
+ C(PHY_SUB_AWAIT_SATA_SPEED_EN),\
+ C(PHY_SUB_AWAIT_SIG_FIS_UF),\
+ C(PHY_SUB_FINAL),\
+ C(PHY_READY),\
+ C(PHY_RESETTING),\
+ C(PHY_FINAL),\
+ }
+#undef C
+#define C(a) SCI_##a
+enum sci_phy_states PHY_STATES;
+#undef C
+
+void sci_phy_construct(
+ struct isci_phy *iphy,
+ struct isci_port *iport,
+ u8 phy_index);
+
+struct isci_port *phy_get_non_dummy_port(struct isci_phy *iphy);
+
+void sci_phy_set_port(
+ struct isci_phy *iphy,
+ struct isci_port *iport);
+
+enum sci_status sci_phy_initialize(
+ struct isci_phy *iphy,
+ struct scu_transport_layer_registers __iomem *transport_layer_registers,
+ struct scu_link_layer_registers __iomem *link_layer_registers);
+
+enum sci_status sci_phy_start(
+ struct isci_phy *iphy);
+
+enum sci_status sci_phy_stop(
+ struct isci_phy *iphy);
+
+enum sci_status sci_phy_reset(
+ struct isci_phy *iphy);
+
+void sci_phy_resume(
+ struct isci_phy *iphy);
+
+void sci_phy_setup_transport(
+ struct isci_phy *iphy,
+ u32 device_id);
+
+enum sci_status sci_phy_event_handler(
+ struct isci_phy *iphy,
+ u32 event_code);
+
+enum sci_status sci_phy_frame_handler(
+ struct isci_phy *iphy,
+ u32 frame_index);
+
+enum sci_status sci_phy_consume_power_handler(
+ struct isci_phy *iphy);
+
+void sci_phy_get_sas_address(
+ struct isci_phy *iphy,
+ struct sci_sas_address *sas_address);
+
+void sci_phy_get_attached_sas_address(
+ struct isci_phy *iphy,
+ struct sci_sas_address *sas_address);
+
+struct sci_phy_proto;
+void sci_phy_get_protocols(
+ struct isci_phy *iphy,
+ struct sci_phy_proto *protocols);
+enum sas_linkrate sci_phy_linkrate(struct isci_phy *iphy);
+
+struct isci_host;
+void isci_phy_init(struct isci_phy *iphy, struct isci_host *ihost, int index);
+int isci_phy_control(struct asd_sas_phy *phy, enum phy_func func, void *buf);
+
+#endif /* !defined(_ISCI_PHY_H_) */
diff --git a/drivers/scsi/isci/port.c b/drivers/scsi/isci/port.c
new file mode 100644
index 000000000..13098b09a
--- /dev/null
+++ b/drivers/scsi/isci/port.c
@@ -0,0 +1,1770 @@
+/*
+ * This file is provided under a dual BSD/GPLv2 license. When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ * The full GNU General Public License is included in this distribution
+ * in the file called LICENSE.GPL.
+ *
+ * BSD LICENSE
+ *
+ * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "isci.h"
+#include "port.h"
+#include "request.h"
+
+#define SCIC_SDS_PORT_HARD_RESET_TIMEOUT (1000)
+#define SCU_DUMMY_INDEX (0xFFFF)
+
+#undef C
+#define C(a) (#a)
+const char *port_state_name(enum sci_port_states state)
+{
+ static const char * const strings[] = PORT_STATES;
+
+ return strings[state];
+}
+#undef C
+
+static struct device *sciport_to_dev(struct isci_port *iport)
+{
+ int i = iport->physical_port_index;
+ struct isci_port *table;
+ struct isci_host *ihost;
+
+ if (i == SCIC_SDS_DUMMY_PORT)
+ i = SCI_MAX_PORTS+1;
+
+ table = iport - i;
+ ihost = container_of(table, typeof(*ihost), ports[0]);
+
+ return &ihost->pdev->dev;
+}
+
+static void sci_port_get_protocols(struct isci_port *iport, struct sci_phy_proto *proto)
+{
+ u8 index;
+
+ proto->all = 0;
+ for (index = 0; index < SCI_MAX_PHYS; index++) {
+ struct isci_phy *iphy = iport->phy_table[index];
+
+ if (!iphy)
+ continue;
+ sci_phy_get_protocols(iphy, proto);
+ }
+}
+
+static u32 sci_port_get_phys(struct isci_port *iport)
+{
+ u32 index;
+ u32 mask;
+
+ mask = 0;
+ for (index = 0; index < SCI_MAX_PHYS; index++)
+ if (iport->phy_table[index])
+ mask |= (1 << index);
+
+ return mask;
+}
+
+/**
+ * sci_port_get_properties() - This method simply returns the properties
+ * regarding the port, such as: physical index, protocols, sas address, etc.
+ * @port: this parameter specifies the port for which to retrieve the physical
+ * index.
+ * @properties: This parameter specifies the properties structure into which to
+ * copy the requested information.
+ *
+ * Indicate if the user specified a valid port. SCI_SUCCESS This value is
+ * returned if the specified port was valid. SCI_FAILURE_INVALID_PORT This
+ * value is returned if the specified port is not valid. When this value is
+ * returned, no data is copied to the properties output parameter.
+ */
+enum sci_status sci_port_get_properties(struct isci_port *iport,
+ struct sci_port_properties *prop)
+{
+ if (!iport || iport->logical_port_index == SCIC_SDS_DUMMY_PORT)
+ return SCI_FAILURE_INVALID_PORT;
+
+ prop->index = iport->logical_port_index;
+ prop->phy_mask = sci_port_get_phys(iport);
+ sci_port_get_sas_address(iport, &prop->local.sas_address);
+ sci_port_get_protocols(iport, &prop->local.protocols);
+ sci_port_get_attached_sas_address(iport, &prop->remote.sas_address);
+
+ return SCI_SUCCESS;
+}
+
+static void sci_port_bcn_enable(struct isci_port *iport)
+{
+ struct isci_phy *iphy;
+ u32 val;
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(iport->phy_table); i++) {
+ iphy = iport->phy_table[i];
+ if (!iphy)
+ continue;
+ val = readl(&iphy->link_layer_registers->link_layer_control);
+ /* clear the bit by writing 1. */
+ writel(val, &iphy->link_layer_registers->link_layer_control);
+ }
+}
+
+static void isci_port_bc_change_received(struct isci_host *ihost,
+ struct isci_port *iport,
+ struct isci_phy *iphy)
+{
+ dev_dbg(&ihost->pdev->dev,
+ "%s: isci_phy = %p, sas_phy = %p\n",
+ __func__, iphy, &iphy->sas_phy);
+
+ ihost->sas_ha.notify_port_event(&iphy->sas_phy, PORTE_BROADCAST_RCVD);
+ sci_port_bcn_enable(iport);
+}
+
+static void isci_port_link_up(struct isci_host *isci_host,
+ struct isci_port *iport,
+ struct isci_phy *iphy)
+{
+ unsigned long flags;
+ struct sci_port_properties properties;
+ unsigned long success = true;
+
+ dev_dbg(&isci_host->pdev->dev,
+ "%s: isci_port = %p\n",
+ __func__, iport);
+
+ spin_lock_irqsave(&iphy->sas_phy.frame_rcvd_lock, flags);
+
+ sci_port_get_properties(iport, &properties);
+
+ if (iphy->protocol == SAS_PROTOCOL_SATA) {
+ u64 attached_sas_address;
+
+ iphy->sas_phy.oob_mode = SATA_OOB_MODE;
+ iphy->sas_phy.frame_rcvd_size = sizeof(struct dev_to_host_fis);
+
+ /*
+ * For direct-attached SATA devices, the SCI core will
+ * automagically assign a SAS address to the end device
+ * for the purpose of creating a port. This SAS address
+ * will not be the same as assigned to the PHY and needs
+ * to be obtained from struct sci_port_properties properties.
+ */
+ attached_sas_address = properties.remote.sas_address.high;
+ attached_sas_address <<= 32;
+ attached_sas_address |= properties.remote.sas_address.low;
+ swab64s(&attached_sas_address);
+
+ memcpy(&iphy->sas_phy.attached_sas_addr,
+ &attached_sas_address, sizeof(attached_sas_address));
+ } else if (iphy->protocol == SAS_PROTOCOL_SSP) {
+ iphy->sas_phy.oob_mode = SAS_OOB_MODE;
+ iphy->sas_phy.frame_rcvd_size = sizeof(struct sas_identify_frame);
+
+ /* Copy the attached SAS address from the IAF */
+ memcpy(iphy->sas_phy.attached_sas_addr,
+ iphy->frame_rcvd.iaf.sas_addr, SAS_ADDR_SIZE);
+ } else {
+ dev_err(&isci_host->pdev->dev, "%s: unknown target\n", __func__);
+ success = false;
+ }
+
+ iphy->sas_phy.phy->negotiated_linkrate = sci_phy_linkrate(iphy);
+
+ spin_unlock_irqrestore(&iphy->sas_phy.frame_rcvd_lock, flags);
+
+ /* Notify libsas that we have an address frame, if indeed
+ * we've found an SSP, SMP, or STP target */
+ if (success)
+ isci_host->sas_ha.notify_port_event(&iphy->sas_phy,
+ PORTE_BYTES_DMAED);
+}
+
+
+/**
+ * isci_port_link_down() - This function is called by the sci core when a link
+ * becomes inactive.
+ * @isci_host: This parameter specifies the isci host object.
+ * @phy: This parameter specifies the isci phy with the active link.
+ * @port: This parameter specifies the isci port with the active link.
+ *
+ */
+static void isci_port_link_down(struct isci_host *isci_host,
+ struct isci_phy *isci_phy,
+ struct isci_port *isci_port)
+{
+ struct isci_remote_device *isci_device;
+
+ dev_dbg(&isci_host->pdev->dev,
+ "%s: isci_port = %p\n", __func__, isci_port);
+
+ if (isci_port) {
+
+ /* check to see if this is the last phy on this port. */
+ if (isci_phy->sas_phy.port &&
+ isci_phy->sas_phy.port->num_phys == 1) {
+ /* change the state for all devices on this port. The
+ * next task sent to this device will be returned as
+ * SAS_TASK_UNDELIVERED, and the scsi mid layer will
+ * remove the target
+ */
+ list_for_each_entry(isci_device,
+ &isci_port->remote_dev_list,
+ node) {
+ dev_dbg(&isci_host->pdev->dev,
+ "%s: isci_device = %p\n",
+ __func__, isci_device);
+ set_bit(IDEV_GONE, &isci_device->flags);
+ }
+ }
+ }
+
+ /* Notify libsas of the borken link, this will trigger calls to our
+ * isci_port_deformed and isci_dev_gone functions.
+ */
+ sas_phy_disconnected(&isci_phy->sas_phy);
+ isci_host->sas_ha.notify_phy_event(&isci_phy->sas_phy,
+ PHYE_LOSS_OF_SIGNAL);
+
+ dev_dbg(&isci_host->pdev->dev,
+ "%s: isci_port = %p - Done\n", __func__, isci_port);
+}
+
+static bool is_port_ready_state(enum sci_port_states state)
+{
+ switch (state) {
+ case SCI_PORT_READY:
+ case SCI_PORT_SUB_WAITING:
+ case SCI_PORT_SUB_OPERATIONAL:
+ case SCI_PORT_SUB_CONFIGURING:
+ return true;
+ default:
+ return false;
+ }
+}
+
+/* flag dummy rnc hanling when exiting a ready state */
+static void port_state_machine_change(struct isci_port *iport,
+ enum sci_port_states state)
+{
+ struct sci_base_state_machine *sm = &iport->sm;
+ enum sci_port_states old_state = sm->current_state_id;
+
+ if (is_port_ready_state(old_state) && !is_port_ready_state(state))
+ iport->ready_exit = true;
+
+ sci_change_state(sm, state);
+ iport->ready_exit = false;
+}
+
+/**
+ * isci_port_hard_reset_complete() - This function is called by the sci core
+ * when the hard reset complete notification has been received.
+ * @port: This parameter specifies the sci port with the active link.
+ * @completion_status: This parameter specifies the core status for the reset
+ * process.
+ *
+ */
+static void isci_port_hard_reset_complete(struct isci_port *isci_port,
+ enum sci_status completion_status)
+{
+ struct isci_host *ihost = isci_port->owning_controller;
+
+ dev_dbg(&ihost->pdev->dev,
+ "%s: isci_port = %p, completion_status=%x\n",
+ __func__, isci_port, completion_status);
+
+ /* Save the status of the hard reset from the port. */
+ isci_port->hard_reset_status = completion_status;
+
+ if (completion_status != SCI_SUCCESS) {
+
+ /* The reset failed. The port state is now SCI_PORT_FAILED. */
+ if (isci_port->active_phy_mask == 0) {
+ int phy_idx = isci_port->last_active_phy;
+ struct isci_phy *iphy = &ihost->phys[phy_idx];
+
+ /* Generate the link down now to the host, since it
+ * was intercepted by the hard reset state machine when
+ * it really happened.
+ */
+ isci_port_link_down(ihost, iphy, isci_port);
+ }
+ /* Advance the port state so that link state changes will be
+ * noticed.
+ */
+ port_state_machine_change(isci_port, SCI_PORT_SUB_WAITING);
+
+ }
+ clear_bit(IPORT_RESET_PENDING, &isci_port->state);
+ wake_up(&ihost->eventq);
+
+}
+
+/* This method will return a true value if the specified phy can be assigned to
+ * this port The following is a list of phys for each port that are allowed: -
+ * Port 0 - 3 2 1 0 - Port 1 - 1 - Port 2 - 3 2 - Port 3 - 3 This method
+ * doesn't preclude all configurations. It merely ensures that a phy is part
+ * of the allowable set of phy identifiers for that port. For example, one
+ * could assign phy 3 to port 0 and no other phys. Please refer to
+ * sci_port_is_phy_mask_valid() for information regarding whether the
+ * phy_mask for a port can be supported. bool true if this is a valid phy
+ * assignment for the port false if this is not a valid phy assignment for the
+ * port
+ */
+bool sci_port_is_valid_phy_assignment(struct isci_port *iport, u32 phy_index)
+{
+ struct isci_host *ihost = iport->owning_controller;
+ struct sci_user_parameters *user = &ihost->user_parameters;
+
+ /* Initialize to invalid value. */
+ u32 existing_phy_index = SCI_MAX_PHYS;
+ u32 index;
+
+ if ((iport->physical_port_index == 1) && (phy_index != 1))
+ return false;
+
+ if (iport->physical_port_index == 3 && phy_index != 3)
+ return false;
+
+ if (iport->physical_port_index == 2 &&
+ (phy_index == 0 || phy_index == 1))
+ return false;
+
+ for (index = 0; index < SCI_MAX_PHYS; index++)
+ if (iport->phy_table[index] && index != phy_index)
+ existing_phy_index = index;
+
+ /* Ensure that all of the phys in the port are capable of
+ * operating at the same maximum link rate.
+ */
+ if (existing_phy_index < SCI_MAX_PHYS &&
+ user->phys[phy_index].max_speed_generation !=
+ user->phys[existing_phy_index].max_speed_generation)
+ return false;
+
+ return true;
+}
+
+/**
+ *
+ * @sci_port: This is the port object for which to determine if the phy mask
+ * can be supported.
+ *
+ * This method will return a true value if the port's phy mask can be supported
+ * by the SCU. The following is a list of valid PHY mask configurations for
+ * each port: - Port 0 - [[3 2] 1] 0 - Port 1 - [1] - Port 2 - [[3] 2]
+ * - Port 3 - [3] This method returns a boolean indication specifying if the
+ * phy mask can be supported. true if this is a valid phy assignment for the
+ * port false if this is not a valid phy assignment for the port
+ */
+static bool sci_port_is_phy_mask_valid(
+ struct isci_port *iport,
+ u32 phy_mask)
+{
+ if (iport->physical_port_index == 0) {
+ if (((phy_mask & 0x0F) == 0x0F)
+ || ((phy_mask & 0x03) == 0x03)
+ || ((phy_mask & 0x01) == 0x01)
+ || (phy_mask == 0))
+ return true;
+ } else if (iport->physical_port_index == 1) {
+ if (((phy_mask & 0x02) == 0x02)
+ || (phy_mask == 0))
+ return true;
+ } else if (iport->physical_port_index == 2) {
+ if (((phy_mask & 0x0C) == 0x0C)
+ || ((phy_mask & 0x04) == 0x04)
+ || (phy_mask == 0))
+ return true;
+ } else if (iport->physical_port_index == 3) {
+ if (((phy_mask & 0x08) == 0x08)
+ || (phy_mask == 0))
+ return true;
+ }
+
+ return false;
+}
+
+/*
+ * This method retrieves a currently active (i.e. connected) phy contained in
+ * the port. Currently, the lowest order phy that is connected is returned.
+ * This method returns a pointer to a SCIS_SDS_PHY object. NULL This value is
+ * returned if there are no currently active (i.e. connected to a remote end
+ * point) phys contained in the port. All other values specify a struct sci_phy
+ * object that is active in the port.
+ */
+static struct isci_phy *sci_port_get_a_connected_phy(struct isci_port *iport)
+{
+ u32 index;
+ struct isci_phy *iphy;
+
+ for (index = 0; index < SCI_MAX_PHYS; index++) {
+ /* Ensure that the phy is both part of the port and currently
+ * connected to the remote end-point.
+ */
+ iphy = iport->phy_table[index];
+ if (iphy && sci_port_active_phy(iport, iphy))
+ return iphy;
+ }
+
+ return NULL;
+}
+
+static enum sci_status sci_port_set_phy(struct isci_port *iport, struct isci_phy *iphy)
+{
+ /* Check to see if we can add this phy to a port
+ * that means that the phy is not part of a port and that the port does
+ * not already have a phy assinged to the phy index.
+ */
+ if (!iport->phy_table[iphy->phy_index] &&
+ !phy_get_non_dummy_port(iphy) &&
+ sci_port_is_valid_phy_assignment(iport, iphy->phy_index)) {
+ /* Phy is being added in the stopped state so we are in MPC mode
+ * make logical port index = physical port index
+ */
+ iport->logical_port_index = iport->physical_port_index;
+ iport->phy_table[iphy->phy_index] = iphy;
+ sci_phy_set_port(iphy, iport);
+
+ return SCI_SUCCESS;
+ }
+
+ return SCI_FAILURE;
+}
+
+static enum sci_status sci_port_clear_phy(struct isci_port *iport, struct isci_phy *iphy)
+{
+ /* Make sure that this phy is part of this port */
+ if (iport->phy_table[iphy->phy_index] == iphy &&
+ phy_get_non_dummy_port(iphy) == iport) {
+ struct isci_host *ihost = iport->owning_controller;
+
+ /* Yep it is assigned to this port so remove it */
+ sci_phy_set_port(iphy, &ihost->ports[SCI_MAX_PORTS]);
+ iport->phy_table[iphy->phy_index] = NULL;
+ return SCI_SUCCESS;
+ }
+
+ return SCI_FAILURE;
+}
+
+void sci_port_get_sas_address(struct isci_port *iport, struct sci_sas_address *sas)
+{
+ u32 index;
+
+ sas->high = 0;
+ sas->low = 0;
+ for (index = 0; index < SCI_MAX_PHYS; index++)
+ if (iport->phy_table[index])
+ sci_phy_get_sas_address(iport->phy_table[index], sas);
+}
+
+void sci_port_get_attached_sas_address(struct isci_port *iport, struct sci_sas_address *sas)
+{
+ struct isci_phy *iphy;
+
+ /*
+ * Ensure that the phy is both part of the port and currently
+ * connected to the remote end-point.
+ */
+ iphy = sci_port_get_a_connected_phy(iport);
+ if (iphy) {
+ if (iphy->protocol != SAS_PROTOCOL_SATA) {
+ sci_phy_get_attached_sas_address(iphy, sas);
+ } else {
+ sci_phy_get_sas_address(iphy, sas);
+ sas->low += iphy->phy_index;
+ }
+ } else {
+ sas->high = 0;
+ sas->low = 0;
+ }
+}
+
+/**
+ * sci_port_construct_dummy_rnc() - create dummy rnc for si workaround
+ *
+ * @sci_port: logical port on which we need to create the remote node context
+ * @rni: remote node index for this remote node context.
+ *
+ * This routine will construct a dummy remote node context data structure
+ * This structure will be posted to the hardware to work around a scheduler
+ * error in the hardware.
+ */
+static void sci_port_construct_dummy_rnc(struct isci_port *iport, u16 rni)
+{
+ union scu_remote_node_context *rnc;
+
+ rnc = &iport->owning_controller->remote_node_context_table[rni];
+
+ memset(rnc, 0, sizeof(union scu_remote_node_context));
+
+ rnc->ssp.remote_sas_address_hi = 0;
+ rnc->ssp.remote_sas_address_lo = 0;
+
+ rnc->ssp.remote_node_index = rni;
+ rnc->ssp.remote_node_port_width = 1;
+ rnc->ssp.logical_port_index = iport->physical_port_index;
+
+ rnc->ssp.nexus_loss_timer_enable = false;
+ rnc->ssp.check_bit = false;
+ rnc->ssp.is_valid = true;
+ rnc->ssp.is_remote_node_context = true;
+ rnc->ssp.function_number = 0;
+ rnc->ssp.arbitration_wait_time = 0;
+}
+
+/*
+ * construct a dummy task context data structure. This
+ * structure will be posted to the hardwre to work around a scheduler error
+ * in the hardware.
+ */
+static void sci_port_construct_dummy_task(struct isci_port *iport, u16 tag)
+{
+ struct isci_host *ihost = iport->owning_controller;
+ struct scu_task_context *task_context;
+
+ task_context = &ihost->task_context_table[ISCI_TAG_TCI(tag)];
+ memset(task_context, 0, sizeof(struct scu_task_context));
+
+ task_context->initiator_request = 1;
+ task_context->connection_rate = 1;
+ task_context->logical_port_index = iport->physical_port_index;
+ task_context->protocol_type = SCU_TASK_CONTEXT_PROTOCOL_SSP;
+ task_context->task_index = ISCI_TAG_TCI(tag);
+ task_context->valid = SCU_TASK_CONTEXT_VALID;
+ task_context->context_type = SCU_TASK_CONTEXT_TYPE;
+ task_context->remote_node_index = iport->reserved_rni;
+ task_context->do_not_dma_ssp_good_response = 1;
+ task_context->task_phase = 0x01;
+}
+
+static void sci_port_destroy_dummy_resources(struct isci_port *iport)
+{
+ struct isci_host *ihost = iport->owning_controller;
+
+ if (iport->reserved_tag != SCI_CONTROLLER_INVALID_IO_TAG)
+ isci_free_tag(ihost, iport->reserved_tag);
+
+ if (iport->reserved_rni != SCU_DUMMY_INDEX)
+ sci_remote_node_table_release_remote_node_index(&ihost->available_remote_nodes,
+ 1, iport->reserved_rni);
+
+ iport->reserved_rni = SCU_DUMMY_INDEX;
+ iport->reserved_tag = SCI_CONTROLLER_INVALID_IO_TAG;
+}
+
+void sci_port_setup_transports(struct isci_port *iport, u32 device_id)
+{
+ u8 index;
+
+ for (index = 0; index < SCI_MAX_PHYS; index++) {
+ if (iport->active_phy_mask & (1 << index))
+ sci_phy_setup_transport(iport->phy_table[index], device_id);
+ }
+}
+
+static void sci_port_resume_phy(struct isci_port *iport, struct isci_phy *iphy)
+{
+ sci_phy_resume(iphy);
+ iport->enabled_phy_mask |= 1 << iphy->phy_index;
+}
+
+static void sci_port_activate_phy(struct isci_port *iport,
+ struct isci_phy *iphy,
+ u8 flags)
+{
+ struct isci_host *ihost = iport->owning_controller;
+
+ if (iphy->protocol != SAS_PROTOCOL_SATA && (flags & PF_RESUME))
+ sci_phy_resume(iphy);
+
+ iport->active_phy_mask |= 1 << iphy->phy_index;
+
+ sci_controller_clear_invalid_phy(ihost, iphy);
+
+ if (flags & PF_NOTIFY)
+ isci_port_link_up(ihost, iport, iphy);
+}
+
+void sci_port_deactivate_phy(struct isci_port *iport, struct isci_phy *iphy,
+ bool do_notify_user)
+{
+ struct isci_host *ihost = iport->owning_controller;
+
+ iport->active_phy_mask &= ~(1 << iphy->phy_index);
+ iport->enabled_phy_mask &= ~(1 << iphy->phy_index);
+ if (!iport->active_phy_mask)
+ iport->last_active_phy = iphy->phy_index;
+
+ iphy->max_negotiated_speed = SAS_LINK_RATE_UNKNOWN;
+
+ /* Re-assign the phy back to the LP as if it were a narrow port for APC
+ * mode. For MPC mode, the phy will remain in the port.
+ */
+ if (iport->owning_controller->oem_parameters.controller.mode_type ==
+ SCIC_PORT_AUTOMATIC_CONFIGURATION_MODE)
+ writel(iphy->phy_index,
+ &iport->port_pe_configuration_register[iphy->phy_index]);
+
+ if (do_notify_user == true)
+ isci_port_link_down(ihost, iphy, iport);
+}
+
+static void sci_port_invalid_link_up(struct isci_port *iport, struct isci_phy *iphy)
+{
+ struct isci_host *ihost = iport->owning_controller;
+
+ /*
+ * Check to see if we have alreay reported this link as bad and if
+ * not go ahead and tell the SCI_USER that we have discovered an
+ * invalid link.
+ */
+ if ((ihost->invalid_phy_mask & (1 << iphy->phy_index)) == 0) {
+ ihost->invalid_phy_mask |= 1 << iphy->phy_index;
+ dev_warn(&ihost->pdev->dev, "Invalid link up!\n");
+ }
+}
+
+/**
+ * sci_port_general_link_up_handler - phy can be assigned to port?
+ * @sci_port: sci_port object for which has a phy that has gone link up.
+ * @sci_phy: This is the struct isci_phy object that has gone link up.
+ * @flags: PF_RESUME, PF_NOTIFY to sci_port_activate_phy
+ *
+ * Determine if this phy can be assigned to this port . If the phy is
+ * not a valid PHY for this port then the function will notify the user.
+ * A PHY can only be part of a port if it's attached SAS ADDRESS is the
+ * same as all other PHYs in the same port.
+ */
+static void sci_port_general_link_up_handler(struct isci_port *iport,
+ struct isci_phy *iphy,
+ u8 flags)
+{
+ struct sci_sas_address port_sas_address;
+ struct sci_sas_address phy_sas_address;
+
+ sci_port_get_attached_sas_address(iport, &port_sas_address);
+ sci_phy_get_attached_sas_address(iphy, &phy_sas_address);
+
+ /* If the SAS address of the new phy matches the SAS address of
+ * other phys in the port OR this is the first phy in the port,
+ * then activate the phy and allow it to be used for operations
+ * in this port.
+ */
+ if ((phy_sas_address.high == port_sas_address.high &&
+ phy_sas_address.low == port_sas_address.low) ||
+ iport->active_phy_mask == 0) {
+ struct sci_base_state_machine *sm = &iport->sm;
+
+ sci_port_activate_phy(iport, iphy, flags);
+ if (sm->current_state_id == SCI_PORT_RESETTING)
+ port_state_machine_change(iport, SCI_PORT_READY);
+ } else
+ sci_port_invalid_link_up(iport, iphy);
+}
+
+
+
+/**
+ * This method returns false if the port only has a single phy object assigned.
+ * If there are no phys or more than one phy then the method will return
+ * true.
+ * @sci_port: The port for which the wide port condition is to be checked.
+ *
+ * bool true Is returned if this is a wide ported port. false Is returned if
+ * this is a narrow port.
+ */
+static bool sci_port_is_wide(struct isci_port *iport)
+{
+ u32 index;
+ u32 phy_count = 0;
+
+ for (index = 0; index < SCI_MAX_PHYS; index++) {
+ if (iport->phy_table[index] != NULL) {
+ phy_count++;
+ }
+ }
+
+ return phy_count != 1;
+}
+
+/**
+ * This method is called by the PHY object when the link is detected. if the
+ * port wants the PHY to continue on to the link up state then the port
+ * layer must return true. If the port object returns false the phy object
+ * must halt its attempt to go link up.
+ * @sci_port: The port associated with the phy object.
+ * @sci_phy: The phy object that is trying to go link up.
+ *
+ * true if the phy object can continue to the link up condition. true Is
+ * returned if this phy can continue to the ready state. false Is returned if
+ * can not continue on to the ready state. This notification is in place for
+ * wide ports and direct attached phys. Since there are no wide ported SATA
+ * devices this could become an invalid port configuration.
+ */
+bool sci_port_link_detected(struct isci_port *iport, struct isci_phy *iphy)
+{
+ if ((iport->logical_port_index != SCIC_SDS_DUMMY_PORT) &&
+ (iphy->protocol == SAS_PROTOCOL_SATA)) {
+ if (sci_port_is_wide(iport)) {
+ sci_port_invalid_link_up(iport, iphy);
+ return false;
+ } else {
+ struct isci_host *ihost = iport->owning_controller;
+ struct isci_port *dst_port = &(ihost->ports[iphy->phy_index]);
+ writel(iphy->phy_index,
+ &dst_port->port_pe_configuration_register[iphy->phy_index]);
+ }
+ }
+
+ return true;
+}
+
+static void port_timeout(unsigned long data)
+{
+ struct sci_timer *tmr = (struct sci_timer *)data;
+ struct isci_port *iport = container_of(tmr, typeof(*iport), timer);
+ struct isci_host *ihost = iport->owning_controller;
+ unsigned long flags;
+ u32 current_state;
+
+ spin_lock_irqsave(&ihost->scic_lock, flags);
+
+ if (tmr->cancel)
+ goto done;
+
+ current_state = iport->sm.current_state_id;
+
+ if (current_state == SCI_PORT_RESETTING) {
+ /* if the port is still in the resetting state then the timeout
+ * fired before the reset completed.
+ */
+ port_state_machine_change(iport, SCI_PORT_FAILED);
+ } else if (current_state == SCI_PORT_STOPPED) {
+ /* if the port is stopped then the start request failed In this
+ * case stay in the stopped state.
+ */
+ dev_err(sciport_to_dev(iport),
+ "%s: SCIC Port 0x%p failed to stop before tiemout.\n",
+ __func__,
+ iport);
+ } else if (current_state == SCI_PORT_STOPPING) {
+ dev_dbg(sciport_to_dev(iport),
+ "%s: port%d: stop complete timeout\n",
+ __func__, iport->physical_port_index);
+ } else {
+ /* The port is in the ready state and we have a timer
+ * reporting a timeout this should not happen.
+ */
+ dev_err(sciport_to_dev(iport),
+ "%s: SCIC Port 0x%p is processing a timeout operation "
+ "in state %d.\n", __func__, iport, current_state);
+ }
+
+done:
+ spin_unlock_irqrestore(&ihost->scic_lock, flags);
+}
+
+/* --------------------------------------------------------------------------- */
+
+/**
+ * This function updates the hardwares VIIT entry for this port.
+ *
+ *
+ */
+static void sci_port_update_viit_entry(struct isci_port *iport)
+{
+ struct sci_sas_address sas_address;
+
+ sci_port_get_sas_address(iport, &sas_address);
+
+ writel(sas_address.high,
+ &iport->viit_registers->initiator_sas_address_hi);
+ writel(sas_address.low,
+ &iport->viit_registers->initiator_sas_address_lo);
+
+ /* This value get cleared just in case its not already cleared */
+ writel(0, &iport->viit_registers->reserved);
+
+ /* We are required to update the status register last */
+ writel(SCU_VIIT_ENTRY_ID_VIIT |
+ SCU_VIIT_IPPT_INITIATOR |
+ ((1 << iport->physical_port_index) << SCU_VIIT_ENTRY_LPVIE_SHIFT) |
+ SCU_VIIT_STATUS_ALL_VALID,
+ &iport->viit_registers->status);
+}
+
+enum sas_linkrate sci_port_get_max_allowed_speed(struct isci_port *iport)
+{
+ u16 index;
+ struct isci_phy *iphy;
+ enum sas_linkrate max_allowed_speed = SAS_LINK_RATE_6_0_GBPS;
+
+ /*
+ * Loop through all of the phys in this port and find the phy with the
+ * lowest maximum link rate. */
+ for (index = 0; index < SCI_MAX_PHYS; index++) {
+ iphy = iport->phy_table[index];
+ if (iphy && sci_port_active_phy(iport, iphy) &&
+ iphy->max_negotiated_speed < max_allowed_speed)
+ max_allowed_speed = iphy->max_negotiated_speed;
+ }
+
+ return max_allowed_speed;
+}
+
+static void sci_port_suspend_port_task_scheduler(struct isci_port *iport)
+{
+ u32 pts_control_value;
+
+ pts_control_value = readl(&iport->port_task_scheduler_registers->control);
+ pts_control_value |= SCU_PTSxCR_GEN_BIT(SUSPEND);
+ writel(pts_control_value, &iport->port_task_scheduler_registers->control);
+}
+
+/**
+ * sci_port_post_dummy_request() - post dummy/workaround request
+ * @sci_port: port to post task
+ *
+ * Prevent the hardware scheduler from posting new requests to the front
+ * of the scheduler queue causing a starvation problem for currently
+ * ongoing requests.
+ *
+ */
+static void sci_port_post_dummy_request(struct isci_port *iport)
+{
+ struct isci_host *ihost = iport->owning_controller;
+ u16 tag = iport->reserved_tag;
+ struct scu_task_context *tc;
+ u32 command;
+
+ tc = &ihost->task_context_table[ISCI_TAG_TCI(tag)];
+ tc->abort = 0;
+
+ command = SCU_CONTEXT_COMMAND_REQUEST_TYPE_POST_TC |
+ iport->physical_port_index << SCU_CONTEXT_COMMAND_LOGICAL_PORT_SHIFT |
+ ISCI_TAG_TCI(tag);
+
+ sci_controller_post_request(ihost, command);
+}
+
+/**
+ * This routine will abort the dummy request. This will alow the hardware to
+ * power down parts of the silicon to save power.
+ *
+ * @sci_port: The port on which the task must be aborted.
+ *
+ */
+static void sci_port_abort_dummy_request(struct isci_port *iport)
+{
+ struct isci_host *ihost = iport->owning_controller;
+ u16 tag = iport->reserved_tag;
+ struct scu_task_context *tc;
+ u32 command;
+
+ tc = &ihost->task_context_table[ISCI_TAG_TCI(tag)];
+ tc->abort = 1;
+
+ command = SCU_CONTEXT_COMMAND_REQUEST_POST_TC_ABORT |
+ iport->physical_port_index << SCU_CONTEXT_COMMAND_LOGICAL_PORT_SHIFT |
+ ISCI_TAG_TCI(tag);
+
+ sci_controller_post_request(ihost, command);
+}
+
+/**
+ *
+ * @sci_port: This is the struct isci_port object to resume.
+ *
+ * This method will resume the port task scheduler for this port object. none
+ */
+static void
+sci_port_resume_port_task_scheduler(struct isci_port *iport)
+{
+ u32 pts_control_value;
+
+ pts_control_value = readl(&iport->port_task_scheduler_registers->control);
+ pts_control_value &= ~SCU_PTSxCR_GEN_BIT(SUSPEND);
+ writel(pts_control_value, &iport->port_task_scheduler_registers->control);
+}
+
+static void sci_port_ready_substate_waiting_enter(struct sci_base_state_machine *sm)
+{
+ struct isci_port *iport = container_of(sm, typeof(*iport), sm);
+
+ sci_port_suspend_port_task_scheduler(iport);
+
+ iport->not_ready_reason = SCIC_PORT_NOT_READY_NO_ACTIVE_PHYS;
+
+ if (iport->active_phy_mask != 0) {
+ /* At least one of the phys on the port is ready */
+ port_state_machine_change(iport,
+ SCI_PORT_SUB_OPERATIONAL);
+ }
+}
+
+static void scic_sds_port_ready_substate_waiting_exit(
+ struct sci_base_state_machine *sm)
+{
+ struct isci_port *iport = container_of(sm, typeof(*iport), sm);
+ sci_port_resume_port_task_scheduler(iport);
+}
+
+static void sci_port_ready_substate_operational_enter(struct sci_base_state_machine *sm)
+{
+ u32 index;
+ struct isci_port *iport = container_of(sm, typeof(*iport), sm);
+ struct isci_host *ihost = iport->owning_controller;
+
+ dev_dbg(&ihost->pdev->dev, "%s: port%d ready\n",
+ __func__, iport->physical_port_index);
+
+ for (index = 0; index < SCI_MAX_PHYS; index++) {
+ if (iport->phy_table[index]) {
+ writel(iport->physical_port_index,
+ &iport->port_pe_configuration_register[
+ iport->phy_table[index]->phy_index]);
+ if (((iport->active_phy_mask^iport->enabled_phy_mask) & (1 << index)) != 0)
+ sci_port_resume_phy(iport, iport->phy_table[index]);
+ }
+ }
+
+ sci_port_update_viit_entry(iport);
+
+ /*
+ * Post the dummy task for the port so the hardware can schedule
+ * io correctly
+ */
+ sci_port_post_dummy_request(iport);
+}
+
+static void sci_port_invalidate_dummy_remote_node(struct isci_port *iport)
+{
+ struct isci_host *ihost = iport->owning_controller;
+ u8 phys_index = iport->physical_port_index;
+ union scu_remote_node_context *rnc;
+ u16 rni = iport->reserved_rni;
+ u32 command;
+
+ rnc = &ihost->remote_node_context_table[rni];
+
+ rnc->ssp.is_valid = false;
+
+ /* ensure the preceding tc abort request has reached the
+ * controller and give it ample time to act before posting the rnc
+ * invalidate
+ */
+ readl(&ihost->smu_registers->interrupt_status); /* flush */
+ udelay(10);
+
+ command = SCU_CONTEXT_COMMAND_POST_RNC_INVALIDATE |
+ phys_index << SCU_CONTEXT_COMMAND_LOGICAL_PORT_SHIFT | rni;
+
+ sci_controller_post_request(ihost, command);
+}
+
+/**
+ *
+ * @object: This is the object which is cast to a struct isci_port object.
+ *
+ * This method will perform the actions required by the struct isci_port on
+ * exiting the SCI_PORT_SUB_OPERATIONAL. This function reports
+ * the port not ready and suspends the port task scheduler. none
+ */
+static void sci_port_ready_substate_operational_exit(struct sci_base_state_machine *sm)
+{
+ struct isci_port *iport = container_of(sm, typeof(*iport), sm);
+ struct isci_host *ihost = iport->owning_controller;
+
+ /*
+ * Kill the dummy task for this port if it has not yet posted
+ * the hardware will treat this as a NOP and just return abort
+ * complete.
+ */
+ sci_port_abort_dummy_request(iport);
+
+ dev_dbg(&ihost->pdev->dev, "%s: port%d !ready\n",
+ __func__, iport->physical_port_index);
+
+ if (iport->ready_exit)
+ sci_port_invalidate_dummy_remote_node(iport);
+}
+
+static void sci_port_ready_substate_configuring_enter(struct sci_base_state_machine *sm)
+{
+ struct isci_port *iport = container_of(sm, typeof(*iport), sm);
+ struct isci_host *ihost = iport->owning_controller;
+
+ if (iport->active_phy_mask == 0) {
+ dev_dbg(&ihost->pdev->dev, "%s: port%d !ready\n",
+ __func__, iport->physical_port_index);
+
+ port_state_machine_change(iport, SCI_PORT_SUB_WAITING);
+ } else
+ port_state_machine_change(iport, SCI_PORT_SUB_OPERATIONAL);
+}
+
+enum sci_status sci_port_start(struct isci_port *iport)
+{
+ struct isci_host *ihost = iport->owning_controller;
+ enum sci_status status = SCI_SUCCESS;
+ enum sci_port_states state;
+ u32 phy_mask;
+
+ state = iport->sm.current_state_id;
+ if (state != SCI_PORT_STOPPED) {
+ dev_warn(sciport_to_dev(iport), "%s: in wrong state: %s\n",
+ __func__, port_state_name(state));
+ return SCI_FAILURE_INVALID_STATE;
+ }
+
+ if (iport->assigned_device_count > 0) {
+ /* TODO This is a start failure operation because
+ * there are still devices assigned to this port.
+ * There must be no devices assigned to a port on a
+ * start operation.
+ */
+ return SCI_FAILURE_UNSUPPORTED_PORT_CONFIGURATION;
+ }
+
+ if (iport->reserved_rni == SCU_DUMMY_INDEX) {
+ u16 rni = sci_remote_node_table_allocate_remote_node(
+ &ihost->available_remote_nodes, 1);
+
+ if (rni != SCU_DUMMY_INDEX)
+ sci_port_construct_dummy_rnc(iport, rni);
+ else
+ status = SCI_FAILURE_INSUFFICIENT_RESOURCES;
+ iport->reserved_rni = rni;
+ }
+
+ if (iport->reserved_tag == SCI_CONTROLLER_INVALID_IO_TAG) {
+ u16 tag;
+
+ tag = isci_alloc_tag(ihost);
+ if (tag == SCI_CONTROLLER_INVALID_IO_TAG)
+ status = SCI_FAILURE_INSUFFICIENT_RESOURCES;
+ else
+ sci_port_construct_dummy_task(iport, tag);
+ iport->reserved_tag = tag;
+ }
+
+ if (status == SCI_SUCCESS) {
+ phy_mask = sci_port_get_phys(iport);
+
+ /*
+ * There are one or more phys assigned to this port. Make sure
+ * the port's phy mask is in fact legal and supported by the
+ * silicon.
+ */
+ if (sci_port_is_phy_mask_valid(iport, phy_mask) == true) {
+ port_state_machine_change(iport,
+ SCI_PORT_READY);
+
+ return SCI_SUCCESS;
+ }
+ status = SCI_FAILURE;
+ }
+
+ if (status != SCI_SUCCESS)
+ sci_port_destroy_dummy_resources(iport);
+
+ return status;
+}
+
+enum sci_status sci_port_stop(struct isci_port *iport)
+{
+ enum sci_port_states state;
+
+ state = iport->sm.current_state_id;
+ switch (state) {
+ case SCI_PORT_STOPPED:
+ return SCI_SUCCESS;
+ case SCI_PORT_SUB_WAITING:
+ case SCI_PORT_SUB_OPERATIONAL:
+ case SCI_PORT_SUB_CONFIGURING:
+ case SCI_PORT_RESETTING:
+ port_state_machine_change(iport,
+ SCI_PORT_STOPPING);
+ return SCI_SUCCESS;
+ default:
+ dev_warn(sciport_to_dev(iport), "%s: in wrong state: %s\n",
+ __func__, port_state_name(state));
+ return SCI_FAILURE_INVALID_STATE;
+ }
+}
+
+static enum sci_status sci_port_hard_reset(struct isci_port *iport, u32 timeout)
+{
+ enum sci_status status = SCI_FAILURE_INVALID_PHY;
+ struct isci_phy *iphy = NULL;
+ enum sci_port_states state;
+ u32 phy_index;
+
+ state = iport->sm.current_state_id;
+ if (state != SCI_PORT_SUB_OPERATIONAL) {
+ dev_warn(sciport_to_dev(iport), "%s: in wrong state: %s\n",
+ __func__, port_state_name(state));
+ return SCI_FAILURE_INVALID_STATE;
+ }
+
+ /* Select a phy on which we can send the hard reset request. */
+ for (phy_index = 0; phy_index < SCI_MAX_PHYS && !iphy; phy_index++) {
+ iphy = iport->phy_table[phy_index];
+ if (iphy && !sci_port_active_phy(iport, iphy)) {
+ /*
+ * We found a phy but it is not ready select
+ * different phy
+ */
+ iphy = NULL;
+ }
+ }
+
+ /* If we have a phy then go ahead and start the reset procedure */
+ if (!iphy)
+ return status;
+ status = sci_phy_reset(iphy);
+
+ if (status != SCI_SUCCESS)
+ return status;
+
+ sci_mod_timer(&iport->timer, timeout);
+ iport->not_ready_reason = SCIC_PORT_NOT_READY_HARD_RESET_REQUESTED;
+
+ port_state_machine_change(iport, SCI_PORT_RESETTING);
+ return SCI_SUCCESS;
+}
+
+/**
+ * sci_port_add_phy() -
+ * @sci_port: This parameter specifies the port in which the phy will be added.
+ * @sci_phy: This parameter is the phy which is to be added to the port.
+ *
+ * This method will add a PHY to the selected port. This method returns an
+ * enum sci_status. SCI_SUCCESS the phy has been added to the port. Any other
+ * status is a failure to add the phy to the port.
+ */
+enum sci_status sci_port_add_phy(struct isci_port *iport,
+ struct isci_phy *iphy)
+{
+ enum sci_status status;
+ enum sci_port_states state;
+
+ sci_port_bcn_enable(iport);
+
+ state = iport->sm.current_state_id;
+ switch (state) {
+ case SCI_PORT_STOPPED: {
+ struct sci_sas_address port_sas_address;
+
+ /* Read the port assigned SAS Address if there is one */
+ sci_port_get_sas_address(iport, &port_sas_address);
+
+ if (port_sas_address.high != 0 && port_sas_address.low != 0) {
+ struct sci_sas_address phy_sas_address;
+
+ /* Make sure that the PHY SAS Address matches the SAS Address
+ * for this port
+ */
+ sci_phy_get_sas_address(iphy, &phy_sas_address);
+
+ if (port_sas_address.high != phy_sas_address.high ||
+ port_sas_address.low != phy_sas_address.low)
+ return SCI_FAILURE_UNSUPPORTED_PORT_CONFIGURATION;
+ }
+ return sci_port_set_phy(iport, iphy);
+ }
+ case SCI_PORT_SUB_WAITING:
+ case SCI_PORT_SUB_OPERATIONAL:
+ status = sci_port_set_phy(iport, iphy);
+
+ if (status != SCI_SUCCESS)
+ return status;
+
+ sci_port_general_link_up_handler(iport, iphy, PF_NOTIFY|PF_RESUME);
+ iport->not_ready_reason = SCIC_PORT_NOT_READY_RECONFIGURING;
+ port_state_machine_change(iport, SCI_PORT_SUB_CONFIGURING);
+
+ return status;
+ case SCI_PORT_SUB_CONFIGURING:
+ status = sci_port_set_phy(iport, iphy);
+
+ if (status != SCI_SUCCESS)
+ return status;
+ sci_port_general_link_up_handler(iport, iphy, PF_NOTIFY);
+
+ /* Re-enter the configuring state since this may be the last phy in
+ * the port.
+ */
+ port_state_machine_change(iport,
+ SCI_PORT_SUB_CONFIGURING);
+ return SCI_SUCCESS;
+ default:
+ dev_warn(sciport_to_dev(iport), "%s: in wrong state: %s\n",
+ __func__, port_state_name(state));
+ return SCI_FAILURE_INVALID_STATE;
+ }
+}
+
+/**
+ * sci_port_remove_phy() -
+ * @sci_port: This parameter specifies the port in which the phy will be added.
+ * @sci_phy: This parameter is the phy which is to be added to the port.
+ *
+ * This method will remove the PHY from the selected PORT. This method returns
+ * an enum sci_status. SCI_SUCCESS the phy has been removed from the port. Any
+ * other status is a failure to add the phy to the port.
+ */
+enum sci_status sci_port_remove_phy(struct isci_port *iport,
+ struct isci_phy *iphy)
+{
+ enum sci_status status;
+ enum sci_port_states state;
+
+ state = iport->sm.current_state_id;
+
+ switch (state) {
+ case SCI_PORT_STOPPED:
+ return sci_port_clear_phy(iport, iphy);
+ case SCI_PORT_SUB_OPERATIONAL:
+ status = sci_port_clear_phy(iport, iphy);
+ if (status != SCI_SUCCESS)
+ return status;
+
+ sci_port_deactivate_phy(iport, iphy, true);
+ iport->not_ready_reason = SCIC_PORT_NOT_READY_RECONFIGURING;
+ port_state_machine_change(iport,
+ SCI_PORT_SUB_CONFIGURING);
+ return SCI_SUCCESS;
+ case SCI_PORT_SUB_CONFIGURING:
+ status = sci_port_clear_phy(iport, iphy);
+
+ if (status != SCI_SUCCESS)
+ return status;
+ sci_port_deactivate_phy(iport, iphy, true);
+
+ /* Re-enter the configuring state since this may be the last phy in
+ * the port
+ */
+ port_state_machine_change(iport,
+ SCI_PORT_SUB_CONFIGURING);
+ return SCI_SUCCESS;
+ default:
+ dev_warn(sciport_to_dev(iport), "%s: in wrong state: %s\n",
+ __func__, port_state_name(state));
+ return SCI_FAILURE_INVALID_STATE;
+ }
+}
+
+enum sci_status sci_port_link_up(struct isci_port *iport,
+ struct isci_phy *iphy)
+{
+ enum sci_port_states state;
+
+ state = iport->sm.current_state_id;
+ switch (state) {
+ case SCI_PORT_SUB_WAITING:
+ /* Since this is the first phy going link up for the port we
+ * can just enable it and continue
+ */
+ sci_port_activate_phy(iport, iphy, PF_NOTIFY|PF_RESUME);
+
+ port_state_machine_change(iport,
+ SCI_PORT_SUB_OPERATIONAL);
+ return SCI_SUCCESS;
+ case SCI_PORT_SUB_OPERATIONAL:
+ sci_port_general_link_up_handler(iport, iphy, PF_NOTIFY|PF_RESUME);
+ return SCI_SUCCESS;
+ case SCI_PORT_RESETTING:
+ /* TODO We should make sure that the phy that has gone
+ * link up is the same one on which we sent the reset. It is
+ * possible that the phy on which we sent the reset is not the
+ * one that has gone link up and we want to make sure that
+ * phy being reset comes back. Consider the case where a
+ * reset is sent but before the hardware processes the reset it
+ * get a link up on the port because of a hot plug event.
+ * because of the reset request this phy will go link down
+ * almost immediately.
+ */
+
+ /* In the resetting state we don't notify the user regarding
+ * link up and link down notifications.
+ */
+ sci_port_general_link_up_handler(iport, iphy, PF_RESUME);
+ return SCI_SUCCESS;
+ default:
+ dev_warn(sciport_to_dev(iport), "%s: in wrong state: %s\n",
+ __func__, port_state_name(state));
+ return SCI_FAILURE_INVALID_STATE;
+ }
+}
+
+enum sci_status sci_port_link_down(struct isci_port *iport,
+ struct isci_phy *iphy)
+{
+ enum sci_port_states state;
+
+ state = iport->sm.current_state_id;
+ switch (state) {
+ case SCI_PORT_SUB_OPERATIONAL:
+ sci_port_deactivate_phy(iport, iphy, true);
+
+ /* If there are no active phys left in the port, then
+ * transition the port to the WAITING state until such time
+ * as a phy goes link up
+ */
+ if (iport->active_phy_mask == 0)
+ port_state_machine_change(iport,
+ SCI_PORT_SUB_WAITING);
+ return SCI_SUCCESS;
+ case SCI_PORT_RESETTING:
+ /* In the resetting state we don't notify the user regarding
+ * link up and link down notifications. */
+ sci_port_deactivate_phy(iport, iphy, false);
+ return SCI_SUCCESS;
+ default:
+ dev_warn(sciport_to_dev(iport), "%s: in wrong state: %s\n",
+ __func__, port_state_name(state));
+ return SCI_FAILURE_INVALID_STATE;
+ }
+}
+
+enum sci_status sci_port_start_io(struct isci_port *iport,
+ struct isci_remote_device *idev,
+ struct isci_request *ireq)
+{
+ enum sci_port_states state;
+
+ state = iport->sm.current_state_id;
+ switch (state) {
+ case SCI_PORT_SUB_WAITING:
+ return SCI_FAILURE_INVALID_STATE;
+ case SCI_PORT_SUB_OPERATIONAL:
+ iport->started_request_count++;
+ return SCI_SUCCESS;
+ default:
+ dev_warn(sciport_to_dev(iport), "%s: in wrong state: %s\n",
+ __func__, port_state_name(state));
+ return SCI_FAILURE_INVALID_STATE;
+ }
+}
+
+enum sci_status sci_port_complete_io(struct isci_port *iport,
+ struct isci_remote_device *idev,
+ struct isci_request *ireq)
+{
+ enum sci_port_states state;
+
+ state = iport->sm.current_state_id;
+ switch (state) {
+ case SCI_PORT_STOPPED:
+ dev_warn(sciport_to_dev(iport), "%s: in wrong state: %s\n",
+ __func__, port_state_name(state));
+ return SCI_FAILURE_INVALID_STATE;
+ case SCI_PORT_STOPPING:
+ sci_port_decrement_request_count(iport);
+
+ if (iport->started_request_count == 0)
+ port_state_machine_change(iport,
+ SCI_PORT_STOPPED);
+ break;
+ case SCI_PORT_READY:
+ case SCI_PORT_RESETTING:
+ case SCI_PORT_FAILED:
+ case SCI_PORT_SUB_WAITING:
+ case SCI_PORT_SUB_OPERATIONAL:
+ sci_port_decrement_request_count(iport);
+ break;
+ case SCI_PORT_SUB_CONFIGURING:
+ sci_port_decrement_request_count(iport);
+ if (iport->started_request_count == 0) {
+ port_state_machine_change(iport,
+ SCI_PORT_SUB_OPERATIONAL);
+ }
+ break;
+ }
+ return SCI_SUCCESS;
+}
+
+static void sci_port_enable_port_task_scheduler(struct isci_port *iport)
+{
+ u32 pts_control_value;
+
+ /* enable the port task scheduler in a suspended state */
+ pts_control_value = readl(&iport->port_task_scheduler_registers->control);
+ pts_control_value |= SCU_PTSxCR_GEN_BIT(ENABLE) | SCU_PTSxCR_GEN_BIT(SUSPEND);
+ writel(pts_control_value, &iport->port_task_scheduler_registers->control);
+}
+
+static void sci_port_disable_port_task_scheduler(struct isci_port *iport)
+{
+ u32 pts_control_value;
+
+ pts_control_value = readl(&iport->port_task_scheduler_registers->control);
+ pts_control_value &=
+ ~(SCU_PTSxCR_GEN_BIT(ENABLE) | SCU_PTSxCR_GEN_BIT(SUSPEND));
+ writel(pts_control_value, &iport->port_task_scheduler_registers->control);
+}
+
+static void sci_port_post_dummy_remote_node(struct isci_port *iport)
+{
+ struct isci_host *ihost = iport->owning_controller;
+ u8 phys_index = iport->physical_port_index;
+ union scu_remote_node_context *rnc;
+ u16 rni = iport->reserved_rni;
+ u32 command;
+
+ rnc = &ihost->remote_node_context_table[rni];
+ rnc->ssp.is_valid = true;
+
+ command = SCU_CONTEXT_COMMAND_POST_RNC_32 |
+ phys_index << SCU_CONTEXT_COMMAND_LOGICAL_PORT_SHIFT | rni;
+
+ sci_controller_post_request(ihost, command);
+
+ /* ensure hardware has seen the post rnc command and give it
+ * ample time to act before sending the suspend
+ */
+ readl(&ihost->smu_registers->interrupt_status); /* flush */
+ udelay(10);
+
+ command = SCU_CONTEXT_COMMAND_POST_RNC_SUSPEND_TX_RX |
+ phys_index << SCU_CONTEXT_COMMAND_LOGICAL_PORT_SHIFT | rni;
+
+ sci_controller_post_request(ihost, command);
+}
+
+static void sci_port_stopped_state_enter(struct sci_base_state_machine *sm)
+{
+ struct isci_port *iport = container_of(sm, typeof(*iport), sm);
+
+ if (iport->sm.previous_state_id == SCI_PORT_STOPPING) {
+ /*
+ * If we enter this state becasuse of a request to stop
+ * the port then we want to disable the hardwares port
+ * task scheduler. */
+ sci_port_disable_port_task_scheduler(iport);
+ }
+}
+
+static void sci_port_stopped_state_exit(struct sci_base_state_machine *sm)
+{
+ struct isci_port *iport = container_of(sm, typeof(*iport), sm);
+
+ /* Enable and suspend the port task scheduler */
+ sci_port_enable_port_task_scheduler(iport);
+}
+
+static void sci_port_ready_state_enter(struct sci_base_state_machine *sm)
+{
+ struct isci_port *iport = container_of(sm, typeof(*iport), sm);
+ struct isci_host *ihost = iport->owning_controller;
+ u32 prev_state;
+
+ prev_state = iport->sm.previous_state_id;
+ if (prev_state == SCI_PORT_RESETTING)
+ isci_port_hard_reset_complete(iport, SCI_SUCCESS);
+ else
+ dev_dbg(&ihost->pdev->dev, "%s: port%d !ready\n",
+ __func__, iport->physical_port_index);
+
+ /* Post and suspend the dummy remote node context for this port. */
+ sci_port_post_dummy_remote_node(iport);
+
+ /* Start the ready substate machine */
+ port_state_machine_change(iport,
+ SCI_PORT_SUB_WAITING);
+}
+
+static void sci_port_resetting_state_exit(struct sci_base_state_machine *sm)
+{
+ struct isci_port *iport = container_of(sm, typeof(*iport), sm);
+
+ sci_del_timer(&iport->timer);
+}
+
+static void sci_port_stopping_state_exit(struct sci_base_state_machine *sm)
+{
+ struct isci_port *iport = container_of(sm, typeof(*iport), sm);
+
+ sci_del_timer(&iport->timer);
+
+ sci_port_destroy_dummy_resources(iport);
+}
+
+static void sci_port_failed_state_enter(struct sci_base_state_machine *sm)
+{
+ struct isci_port *iport = container_of(sm, typeof(*iport), sm);
+
+ isci_port_hard_reset_complete(iport, SCI_FAILURE_TIMEOUT);
+}
+
+void sci_port_set_hang_detection_timeout(struct isci_port *iport, u32 timeout)
+{
+ int phy_index;
+ u32 phy_mask = iport->active_phy_mask;
+
+ if (timeout)
+ ++iport->hang_detect_users;
+ else if (iport->hang_detect_users > 1)
+ --iport->hang_detect_users;
+ else
+ iport->hang_detect_users = 0;
+
+ if (timeout || (iport->hang_detect_users == 0)) {
+ for (phy_index = 0; phy_index < SCI_MAX_PHYS; phy_index++) {
+ if ((phy_mask >> phy_index) & 1) {
+ writel(timeout,
+ &iport->phy_table[phy_index]
+ ->link_layer_registers
+ ->link_layer_hang_detection_timeout);
+ }
+ }
+ }
+}
+/* --------------------------------------------------------------------------- */
+
+static const struct sci_base_state sci_port_state_table[] = {
+ [SCI_PORT_STOPPED] = {
+ .enter_state = sci_port_stopped_state_enter,
+ .exit_state = sci_port_stopped_state_exit
+ },
+ [SCI_PORT_STOPPING] = {
+ .exit_state = sci_port_stopping_state_exit
+ },
+ [SCI_PORT_READY] = {
+ .enter_state = sci_port_ready_state_enter,
+ },
+ [SCI_PORT_SUB_WAITING] = {
+ .enter_state = sci_port_ready_substate_waiting_enter,
+ .exit_state = scic_sds_port_ready_substate_waiting_exit,
+ },
+ [SCI_PORT_SUB_OPERATIONAL] = {
+ .enter_state = sci_port_ready_substate_operational_enter,
+ .exit_state = sci_port_ready_substate_operational_exit
+ },
+ [SCI_PORT_SUB_CONFIGURING] = {
+ .enter_state = sci_port_ready_substate_configuring_enter
+ },
+ [SCI_PORT_RESETTING] = {
+ .exit_state = sci_port_resetting_state_exit
+ },
+ [SCI_PORT_FAILED] = {
+ .enter_state = sci_port_failed_state_enter,
+ }
+};
+
+void sci_port_construct(struct isci_port *iport, u8 index,
+ struct isci_host *ihost)
+{
+ sci_init_sm(&iport->sm, sci_port_state_table, SCI_PORT_STOPPED);
+
+ iport->logical_port_index = SCIC_SDS_DUMMY_PORT;
+ iport->physical_port_index = index;
+ iport->active_phy_mask = 0;
+ iport->enabled_phy_mask = 0;
+ iport->last_active_phy = 0;
+ iport->ready_exit = false;
+
+ iport->owning_controller = ihost;
+
+ iport->started_request_count = 0;
+ iport->assigned_device_count = 0;
+ iport->hang_detect_users = 0;
+
+ iport->reserved_rni = SCU_DUMMY_INDEX;
+ iport->reserved_tag = SCI_CONTROLLER_INVALID_IO_TAG;
+
+ sci_init_timer(&iport->timer, port_timeout);
+
+ iport->port_task_scheduler_registers = NULL;
+
+ for (index = 0; index < SCI_MAX_PHYS; index++)
+ iport->phy_table[index] = NULL;
+}
+
+void sci_port_broadcast_change_received(struct isci_port *iport, struct isci_phy *iphy)
+{
+ struct isci_host *ihost = iport->owning_controller;
+
+ /* notify the user. */
+ isci_port_bc_change_received(ihost, iport, iphy);
+}
+
+static void wait_port_reset(struct isci_host *ihost, struct isci_port *iport)
+{
+ wait_event(ihost->eventq, !test_bit(IPORT_RESET_PENDING, &iport->state));
+}
+
+int isci_port_perform_hard_reset(struct isci_host *ihost, struct isci_port *iport,
+ struct isci_phy *iphy)
+{
+ unsigned long flags;
+ enum sci_status status;
+ int ret = TMF_RESP_FUNC_COMPLETE;
+
+ dev_dbg(&ihost->pdev->dev, "%s: iport = %p\n",
+ __func__, iport);
+
+ spin_lock_irqsave(&ihost->scic_lock, flags);
+ set_bit(IPORT_RESET_PENDING, &iport->state);
+
+ #define ISCI_PORT_RESET_TIMEOUT SCIC_SDS_SIGNATURE_FIS_TIMEOUT
+ status = sci_port_hard_reset(iport, ISCI_PORT_RESET_TIMEOUT);
+
+ spin_unlock_irqrestore(&ihost->scic_lock, flags);
+
+ if (status == SCI_SUCCESS) {
+ wait_port_reset(ihost, iport);
+
+ dev_dbg(&ihost->pdev->dev,
+ "%s: iport = %p; hard reset completion\n",
+ __func__, iport);
+
+ if (iport->hard_reset_status != SCI_SUCCESS) {
+ ret = TMF_RESP_FUNC_FAILED;
+
+ dev_err(&ihost->pdev->dev,
+ "%s: iport = %p; hard reset failed (0x%x)\n",
+ __func__, iport, iport->hard_reset_status);
+ }
+ } else {
+ clear_bit(IPORT_RESET_PENDING, &iport->state);
+ wake_up(&ihost->eventq);
+ ret = TMF_RESP_FUNC_FAILED;
+
+ dev_err(&ihost->pdev->dev,
+ "%s: iport = %p; sci_port_hard_reset call"
+ " failed 0x%x\n",
+ __func__, iport, status);
+
+ }
+ return ret;
+}
+
+int isci_ata_check_ready(struct domain_device *dev)
+{
+ struct isci_port *iport = dev->port->lldd_port;
+ struct isci_host *ihost = dev_to_ihost(dev);
+ struct isci_remote_device *idev;
+ unsigned long flags;
+ int rc = 0;
+
+ spin_lock_irqsave(&ihost->scic_lock, flags);
+ idev = isci_lookup_device(dev);
+ spin_unlock_irqrestore(&ihost->scic_lock, flags);
+
+ if (!idev)
+ goto out;
+
+ if (test_bit(IPORT_RESET_PENDING, &iport->state))
+ goto out;
+
+ rc = !!iport->active_phy_mask;
+ out:
+ isci_put_device(idev);
+
+ return rc;
+}
+
+void isci_port_deformed(struct asd_sas_phy *phy)
+{
+ struct isci_host *ihost = phy->ha->lldd_ha;
+ struct isci_port *iport = phy->port->lldd_port;
+ unsigned long flags;
+ int i;
+
+ /* we got a port notification on a port that was subsequently
+ * torn down and libsas is just now catching up
+ */
+ if (!iport)
+ return;
+
+ spin_lock_irqsave(&ihost->scic_lock, flags);
+ for (i = 0; i < SCI_MAX_PHYS; i++) {
+ if (iport->active_phy_mask & 1 << i)
+ break;
+ }
+ spin_unlock_irqrestore(&ihost->scic_lock, flags);
+
+ if (i >= SCI_MAX_PHYS)
+ dev_dbg(&ihost->pdev->dev, "%s: port: %ld\n",
+ __func__, (long) (iport - &ihost->ports[0]));
+}
+
+void isci_port_formed(struct asd_sas_phy *phy)
+{
+ struct isci_host *ihost = phy->ha->lldd_ha;
+ struct isci_phy *iphy = to_iphy(phy);
+ struct asd_sas_port *port = phy->port;
+ struct isci_port *iport = NULL;
+ unsigned long flags;
+ int i;
+
+ /* initial ports are formed as the driver is still initializing,
+ * wait for that process to complete
+ */
+ wait_for_start(ihost);
+
+ spin_lock_irqsave(&ihost->scic_lock, flags);
+ for (i = 0; i < SCI_MAX_PORTS; i++) {
+ iport = &ihost->ports[i];
+ if (iport->active_phy_mask & 1 << iphy->phy_index)
+ break;
+ }
+ spin_unlock_irqrestore(&ihost->scic_lock, flags);
+
+ if (i >= SCI_MAX_PORTS)
+ iport = NULL;
+
+ port->lldd_port = iport;
+}
diff --git a/drivers/scsi/isci/port.h b/drivers/scsi/isci/port.h
new file mode 100644
index 000000000..861e8f728
--- /dev/null
+++ b/drivers/scsi/isci/port.h
@@ -0,0 +1,283 @@
+/*
+ * This file is provided under a dual BSD/GPLv2 license. When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ * The full GNU General Public License is included in this distribution
+ * in the file called LICENSE.GPL.
+ *
+ * BSD LICENSE
+ *
+ * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _ISCI_PORT_H_
+#define _ISCI_PORT_H_
+
+#include <scsi/libsas.h>
+#include "isci.h"
+#include "sas.h"
+#include "phy.h"
+
+#define SCIC_SDS_DUMMY_PORT 0xFF
+
+#define PF_NOTIFY (1 << 0)
+#define PF_RESUME (1 << 1)
+
+struct isci_phy;
+struct isci_host;
+
+enum isci_status {
+ isci_freed = 0x00,
+ isci_starting = 0x01,
+ isci_ready = 0x02,
+ isci_ready_for_io = 0x03,
+ isci_stopping = 0x04,
+ isci_stopped = 0x05,
+};
+
+/**
+ * struct isci_port - isci direct attached sas port object
+ * @ready_exit: several states constitute 'ready'. When exiting ready we
+ * need to take extra port-teardown actions that are
+ * skipped when exiting to another 'ready' state.
+ * @logical_port_index: software port index
+ * @physical_port_index: hardware port index
+ * @active_phy_mask: identifies phy members
+ * @enabled_phy_mask: phy mask for the port
+ * that are already part of the port
+ * @reserved_tag:
+ * @reserved_rni: reserver for port task scheduler workaround
+ * @started_request_count: reference count for outstanding commands
+ * @not_ready_reason: set during state transitions and notified
+ * @timer: timeout start/stop operations
+ */
+struct isci_port {
+ struct isci_host *isci_host;
+ struct list_head remote_dev_list;
+ #define IPORT_RESET_PENDING 0
+ unsigned long state;
+ enum sci_status hard_reset_status;
+ struct sci_base_state_machine sm;
+ bool ready_exit;
+ u8 logical_port_index;
+ u8 physical_port_index;
+ u8 active_phy_mask;
+ u8 enabled_phy_mask;
+ u8 last_active_phy;
+ u16 reserved_rni;
+ u16 reserved_tag;
+ u32 started_request_count;
+ u32 assigned_device_count;
+ u32 hang_detect_users;
+ u32 not_ready_reason;
+ struct isci_phy *phy_table[SCI_MAX_PHYS];
+ struct isci_host *owning_controller;
+ struct sci_timer timer;
+ struct scu_port_task_scheduler_registers __iomem *port_task_scheduler_registers;
+ /* XXX rework: only one register, no need to replicate per-port */
+ u32 __iomem *port_pe_configuration_register;
+ struct scu_viit_entry __iomem *viit_registers;
+};
+
+enum sci_port_not_ready_reason_code {
+ SCIC_PORT_NOT_READY_NO_ACTIVE_PHYS,
+ SCIC_PORT_NOT_READY_HARD_RESET_REQUESTED,
+ SCIC_PORT_NOT_READY_INVALID_PORT_CONFIGURATION,
+ SCIC_PORT_NOT_READY_RECONFIGURING,
+
+ SCIC_PORT_NOT_READY_REASON_CODE_MAX
+};
+
+struct sci_port_end_point_properties {
+ struct sci_sas_address sas_address;
+ struct sci_phy_proto protocols;
+};
+
+struct sci_port_properties {
+ u32 index;
+ struct sci_port_end_point_properties local;
+ struct sci_port_end_point_properties remote;
+ u32 phy_mask;
+};
+
+/**
+ * enum sci_port_states - port state machine states
+ * @SCI_PORT_STOPPED: port has successfully been stopped. In this state
+ * no new IO operations are permitted. This state is
+ * entered from the STOPPING state.
+ * @SCI_PORT_STOPPING: port is in the process of stopping. In this
+ * state no new IO operations are permitted, but
+ * existing IO operations are allowed to complete.
+ * This state is entered from the READY state.
+ * @SCI_PORT_READY: port is now ready. Thus, the user is able to
+ * perform IO operations on this port. This state is
+ * entered from the STARTING state.
+ * @SCI_PORT_SUB_WAITING: port is started and ready but has no active
+ * phys.
+ * @SCI_PORT_SUB_OPERATIONAL: port is started and ready and there is at
+ * least one phy operational.
+ * @SCI_PORT_SUB_CONFIGURING: port is started and there was an
+ * add/remove phy event. This state is only
+ * used in Automatic Port Configuration Mode
+ * (APC)
+ * @SCI_PORT_RESETTING: port is in the process of performing a hard
+ * reset. Thus, the user is unable to perform IO
+ * operations on this port. This state is entered
+ * from the READY state.
+ * @SCI_PORT_FAILED: port has failed a reset request. This state is
+ * entered when a port reset request times out. This
+ * state is entered from the RESETTING state.
+ */
+#define PORT_STATES {\
+ C(PORT_STOPPED),\
+ C(PORT_STOPPING),\
+ C(PORT_READY),\
+ C(PORT_SUB_WAITING),\
+ C(PORT_SUB_OPERATIONAL),\
+ C(PORT_SUB_CONFIGURING),\
+ C(PORT_RESETTING),\
+ C(PORT_FAILED),\
+ }
+#undef C
+#define C(a) SCI_##a
+enum sci_port_states PORT_STATES;
+#undef C
+
+static inline void sci_port_decrement_request_count(struct isci_port *iport)
+{
+ if (WARN_ONCE(iport->started_request_count == 0,
+ "%s: tried to decrement started_request_count past 0!?",
+ __func__))
+ /* pass */;
+ else
+ iport->started_request_count--;
+}
+
+#define sci_port_active_phy(port, phy) \
+ (((port)->active_phy_mask & (1 << (phy)->phy_index)) != 0)
+
+void sci_port_construct(
+ struct isci_port *iport,
+ u8 port_index,
+ struct isci_host *ihost);
+
+enum sci_status sci_port_start(struct isci_port *iport);
+enum sci_status sci_port_stop(struct isci_port *iport);
+
+enum sci_status sci_port_add_phy(
+ struct isci_port *iport,
+ struct isci_phy *iphy);
+
+enum sci_status sci_port_remove_phy(
+ struct isci_port *iport,
+ struct isci_phy *iphy);
+
+void sci_port_setup_transports(
+ struct isci_port *iport,
+ u32 device_id);
+
+void isci_port_bcn_enable(struct isci_host *, struct isci_port *);
+
+void sci_port_deactivate_phy(
+ struct isci_port *iport,
+ struct isci_phy *iphy,
+ bool do_notify_user);
+
+bool sci_port_link_detected(
+ struct isci_port *iport,
+ struct isci_phy *iphy);
+
+enum sci_status sci_port_get_properties(
+ struct isci_port *iport,
+ struct sci_port_properties *prop);
+
+enum sci_status sci_port_link_up(struct isci_port *iport,
+ struct isci_phy *iphy);
+enum sci_status sci_port_link_down(struct isci_port *iport,
+ struct isci_phy *iphy);
+
+struct isci_request;
+struct isci_remote_device;
+enum sci_status sci_port_start_io(
+ struct isci_port *iport,
+ struct isci_remote_device *idev,
+ struct isci_request *ireq);
+
+enum sci_status sci_port_complete_io(
+ struct isci_port *iport,
+ struct isci_remote_device *idev,
+ struct isci_request *ireq);
+
+enum sas_linkrate sci_port_get_max_allowed_speed(
+ struct isci_port *iport);
+
+void sci_port_broadcast_change_received(
+ struct isci_port *iport,
+ struct isci_phy *iphy);
+
+bool sci_port_is_valid_phy_assignment(
+ struct isci_port *iport,
+ u32 phy_index);
+
+void sci_port_get_sas_address(
+ struct isci_port *iport,
+ struct sci_sas_address *sas_address);
+
+void sci_port_get_attached_sas_address(
+ struct isci_port *iport,
+ struct sci_sas_address *sas_address);
+
+void sci_port_set_hang_detection_timeout(
+ struct isci_port *isci_port,
+ u32 timeout);
+
+void isci_port_formed(struct asd_sas_phy *);
+void isci_port_deformed(struct asd_sas_phy *);
+
+int isci_port_perform_hard_reset(struct isci_host *ihost, struct isci_port *iport,
+ struct isci_phy *iphy);
+int isci_ata_check_ready(struct domain_device *dev);
+#endif /* !defined(_ISCI_PORT_H_) */
diff --git a/drivers/scsi/isci/port_config.c b/drivers/scsi/isci/port_config.c
new file mode 100644
index 000000000..ac879745e
--- /dev/null
+++ b/drivers/scsi/isci/port_config.c
@@ -0,0 +1,760 @@
+/*
+ * This file is provided under a dual BSD/GPLv2 license. When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ * The full GNU General Public License is included in this distribution
+ * in the file called LICENSE.GPL.
+ *
+ * BSD LICENSE
+ *
+ * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "host.h"
+
+#define SCIC_SDS_MPC_RECONFIGURATION_TIMEOUT (10)
+#define SCIC_SDS_APC_RECONFIGURATION_TIMEOUT (10)
+#define SCIC_SDS_APC_WAIT_LINK_UP_NOTIFICATION (1000)
+
+enum SCIC_SDS_APC_ACTIVITY {
+ SCIC_SDS_APC_SKIP_PHY,
+ SCIC_SDS_APC_ADD_PHY,
+ SCIC_SDS_APC_START_TIMER,
+
+ SCIC_SDS_APC_ACTIVITY_MAX
+};
+
+/*
+ * ******************************************************************************
+ * General port configuration agent routines
+ * ****************************************************************************** */
+
+/**
+ *
+ * @address_one: A SAS Address to be compared.
+ * @address_two: A SAS Address to be compared.
+ *
+ * Compare the two SAS Address and if SAS Address One is greater than SAS
+ * Address Two then return > 0 else if SAS Address One is less than SAS Address
+ * Two return < 0 Otherwise they are the same return 0 A signed value of x > 0
+ * > y where x is returned for Address One > Address Two y is returned for
+ * Address One < Address Two 0 is returned ofr Address One = Address Two
+ */
+static s32 sci_sas_address_compare(
+ struct sci_sas_address address_one,
+ struct sci_sas_address address_two)
+{
+ if (address_one.high > address_two.high) {
+ return 1;
+ } else if (address_one.high < address_two.high) {
+ return -1;
+ } else if (address_one.low > address_two.low) {
+ return 1;
+ } else if (address_one.low < address_two.low) {
+ return -1;
+ }
+
+ /* The two SAS Address must be identical */
+ return 0;
+}
+
+/**
+ *
+ * @controller: The controller object used for the port search.
+ * @phy: The phy object to match.
+ *
+ * This routine will find a matching port for the phy. This means that the
+ * port and phy both have the same broadcast sas address and same received sas
+ * address. The port address or the NULL if there is no matching
+ * port. port address if the port can be found to match the phy.
+ * NULL if there is no matching port for the phy.
+ */
+static struct isci_port *sci_port_configuration_agent_find_port(
+ struct isci_host *ihost,
+ struct isci_phy *iphy)
+{
+ u8 i;
+ struct sci_sas_address port_sas_address;
+ struct sci_sas_address port_attached_device_address;
+ struct sci_sas_address phy_sas_address;
+ struct sci_sas_address phy_attached_device_address;
+
+ /*
+ * Since this phy can be a member of a wide port check to see if one or
+ * more phys match the sent and received SAS address as this phy in which
+ * case it should participate in the same port.
+ */
+ sci_phy_get_sas_address(iphy, &phy_sas_address);
+ sci_phy_get_attached_sas_address(iphy, &phy_attached_device_address);
+
+ for (i = 0; i < ihost->logical_port_entries; i++) {
+ struct isci_port *iport = &ihost->ports[i];
+
+ sci_port_get_sas_address(iport, &port_sas_address);
+ sci_port_get_attached_sas_address(iport, &port_attached_device_address);
+
+ if (sci_sas_address_compare(port_sas_address, phy_sas_address) == 0 &&
+ sci_sas_address_compare(port_attached_device_address, phy_attached_device_address) == 0)
+ return iport;
+ }
+
+ return NULL;
+}
+
+/**
+ *
+ * @controller: This is the controller object that contains the port agent
+ * @port_agent: This is the port configruation agent for the controller.
+ *
+ * This routine will validate the port configuration is correct for the SCU
+ * hardware. The SCU hardware allows for port configurations as follows. LP0
+ * -> (PE0), (PE0, PE1), (PE0, PE1, PE2, PE3) LP1 -> (PE1) LP2 -> (PE2), (PE2,
+ * PE3) LP3 -> (PE3) enum sci_status SCI_SUCCESS the port configuration is valid for
+ * this port configuration agent. SCI_FAILURE_UNSUPPORTED_PORT_CONFIGURATION
+ * the port configuration is not valid for this port configuration agent.
+ */
+static enum sci_status sci_port_configuration_agent_validate_ports(
+ struct isci_host *ihost,
+ struct sci_port_configuration_agent *port_agent)
+{
+ struct sci_sas_address first_address;
+ struct sci_sas_address second_address;
+
+ /*
+ * Sanity check the max ranges for all the phys the max index
+ * is always equal to the port range index */
+ if (port_agent->phy_valid_port_range[0].max_index != 0 ||
+ port_agent->phy_valid_port_range[1].max_index != 1 ||
+ port_agent->phy_valid_port_range[2].max_index != 2 ||
+ port_agent->phy_valid_port_range[3].max_index != 3)
+ return SCI_FAILURE_UNSUPPORTED_PORT_CONFIGURATION;
+
+ /*
+ * This is a request to configure a single x4 port or at least attempt
+ * to make all the phys into a single port */
+ if (port_agent->phy_valid_port_range[0].min_index == 0 &&
+ port_agent->phy_valid_port_range[1].min_index == 0 &&
+ port_agent->phy_valid_port_range[2].min_index == 0 &&
+ port_agent->phy_valid_port_range[3].min_index == 0)
+ return SCI_SUCCESS;
+
+ /*
+ * This is a degenerate case where phy 1 and phy 2 are assigned
+ * to the same port this is explicitly disallowed by the hardware
+ * unless they are part of the same x4 port and this condition was
+ * already checked above. */
+ if (port_agent->phy_valid_port_range[2].min_index == 1) {
+ return SCI_FAILURE_UNSUPPORTED_PORT_CONFIGURATION;
+ }
+
+ /*
+ * PE0 and PE3 can never have the same SAS Address unless they
+ * are part of the same x4 wide port and we have already checked
+ * for this condition. */
+ sci_phy_get_sas_address(&ihost->phys[0], &first_address);
+ sci_phy_get_sas_address(&ihost->phys[3], &second_address);
+
+ if (sci_sas_address_compare(first_address, second_address) == 0) {
+ return SCI_FAILURE_UNSUPPORTED_PORT_CONFIGURATION;
+ }
+
+ /*
+ * PE0 and PE1 are configured into a 2x1 ports make sure that the
+ * SAS Address for PE0 and PE2 are different since they can not be
+ * part of the same port. */
+ if (port_agent->phy_valid_port_range[0].min_index == 0 &&
+ port_agent->phy_valid_port_range[1].min_index == 1) {
+ sci_phy_get_sas_address(&ihost->phys[0], &first_address);
+ sci_phy_get_sas_address(&ihost->phys[2], &second_address);
+
+ if (sci_sas_address_compare(first_address, second_address) == 0) {
+ return SCI_FAILURE_UNSUPPORTED_PORT_CONFIGURATION;
+ }
+ }
+
+ /*
+ * PE2 and PE3 are configured into a 2x1 ports make sure that the
+ * SAS Address for PE1 and PE3 are different since they can not be
+ * part of the same port. */
+ if (port_agent->phy_valid_port_range[2].min_index == 2 &&
+ port_agent->phy_valid_port_range[3].min_index == 3) {
+ sci_phy_get_sas_address(&ihost->phys[1], &first_address);
+ sci_phy_get_sas_address(&ihost->phys[3], &second_address);
+
+ if (sci_sas_address_compare(first_address, second_address) == 0) {
+ return SCI_FAILURE_UNSUPPORTED_PORT_CONFIGURATION;
+ }
+ }
+
+ return SCI_SUCCESS;
+}
+
+/*
+ * ******************************************************************************
+ * Manual port configuration agent routines
+ * ****************************************************************************** */
+
+/* verify all of the phys in the same port are using the same SAS address */
+static enum sci_status
+sci_mpc_agent_validate_phy_configuration(struct isci_host *ihost,
+ struct sci_port_configuration_agent *port_agent)
+{
+ u32 phy_mask;
+ u32 assigned_phy_mask;
+ struct sci_sas_address sas_address;
+ struct sci_sas_address phy_assigned_address;
+ u8 port_index;
+ u8 phy_index;
+
+ assigned_phy_mask = 0;
+ sas_address.high = 0;
+ sas_address.low = 0;
+
+ for (port_index = 0; port_index < SCI_MAX_PORTS; port_index++) {
+ phy_mask = ihost->oem_parameters.ports[port_index].phy_mask;
+
+ if (!phy_mask)
+ continue;
+ /*
+ * Make sure that one or more of the phys were not already assinged to
+ * a different port. */
+ if ((phy_mask & ~assigned_phy_mask) == 0) {
+ return SCI_FAILURE_UNSUPPORTED_PORT_CONFIGURATION;
+ }
+
+ /* Find the starting phy index for this round through the loop */
+ for (phy_index = 0; phy_index < SCI_MAX_PHYS; phy_index++) {
+ if ((phy_mask & (1 << phy_index)) == 0)
+ continue;
+ sci_phy_get_sas_address(&ihost->phys[phy_index],
+ &sas_address);
+
+ /*
+ * The phy_index can be used as the starting point for the
+ * port range since the hardware starts all logical ports
+ * the same as the PE index. */
+ port_agent->phy_valid_port_range[phy_index].min_index = port_index;
+ port_agent->phy_valid_port_range[phy_index].max_index = phy_index;
+
+ if (phy_index != port_index) {
+ return SCI_FAILURE_UNSUPPORTED_PORT_CONFIGURATION;
+ }
+
+ break;
+ }
+
+ /*
+ * See how many additional phys are being added to this logical port.
+ * Note: We have not moved the current phy_index so we will actually
+ * compare the startting phy with itself.
+ * This is expected and required to add the phy to the port. */
+ while (phy_index < SCI_MAX_PHYS) {
+ if ((phy_mask & (1 << phy_index)) == 0)
+ continue;
+ sci_phy_get_sas_address(&ihost->phys[phy_index],
+ &phy_assigned_address);
+
+ if (sci_sas_address_compare(sas_address, phy_assigned_address) != 0) {
+ /*
+ * The phy mask specified that this phy is part of the same port
+ * as the starting phy and it is not so fail this configuration */
+ return SCI_FAILURE_UNSUPPORTED_PORT_CONFIGURATION;
+ }
+
+ port_agent->phy_valid_port_range[phy_index].min_index = port_index;
+ port_agent->phy_valid_port_range[phy_index].max_index = phy_index;
+
+ sci_port_add_phy(&ihost->ports[port_index],
+ &ihost->phys[phy_index]);
+
+ assigned_phy_mask |= (1 << phy_index);
+ phy_index++;
+ }
+
+ }
+
+ return sci_port_configuration_agent_validate_ports(ihost, port_agent);
+}
+
+static void mpc_agent_timeout(unsigned long data)
+{
+ u8 index;
+ struct sci_timer *tmr = (struct sci_timer *)data;
+ struct sci_port_configuration_agent *port_agent;
+ struct isci_host *ihost;
+ unsigned long flags;
+ u16 configure_phy_mask;
+
+ port_agent = container_of(tmr, typeof(*port_agent), timer);
+ ihost = container_of(port_agent, typeof(*ihost), port_agent);
+
+ spin_lock_irqsave(&ihost->scic_lock, flags);
+
+ if (tmr->cancel)
+ goto done;
+
+ port_agent->timer_pending = false;
+
+ /* Find the mask of phys that are reported read but as yet unconfigured into a port */
+ configure_phy_mask = ~port_agent->phy_configured_mask & port_agent->phy_ready_mask;
+
+ for (index = 0; index < SCI_MAX_PHYS; index++) {
+ struct isci_phy *iphy = &ihost->phys[index];
+
+ if (configure_phy_mask & (1 << index)) {
+ port_agent->link_up_handler(ihost, port_agent,
+ phy_get_non_dummy_port(iphy),
+ iphy);
+ }
+ }
+
+done:
+ spin_unlock_irqrestore(&ihost->scic_lock, flags);
+}
+
+static void sci_mpc_agent_link_up(struct isci_host *ihost,
+ struct sci_port_configuration_agent *port_agent,
+ struct isci_port *iport,
+ struct isci_phy *iphy)
+{
+ /* If the port is NULL then the phy was not assigned to a port.
+ * This is because the phy was not given the same SAS Address as
+ * the other PHYs in the port.
+ */
+ if (!iport)
+ return;
+
+ port_agent->phy_ready_mask |= (1 << iphy->phy_index);
+ sci_port_link_up(iport, iphy);
+ if ((iport->active_phy_mask & (1 << iphy->phy_index)))
+ port_agent->phy_configured_mask |= (1 << iphy->phy_index);
+}
+
+/**
+ *
+ * @controller: This is the controller object that receives the link down
+ * notification.
+ * @port: This is the port object associated with the phy. If the is no
+ * associated port this is an NULL. The port is an invalid
+ * handle only if the phy was never port of this port. This happens when
+ * the phy is not broadcasting the same SAS address as the other phys in the
+ * assigned port.
+ * @phy: This is the phy object which has gone link down.
+ *
+ * This function handles the manual port configuration link down notifications.
+ * Since all ports and phys are associated at initialization time we just turn
+ * around and notifiy the port object of the link down event. If this PHY is
+ * not associated with a port there is no action taken. Is it possible to get a
+ * link down notification from a phy that has no assocoated port?
+ */
+static void sci_mpc_agent_link_down(
+ struct isci_host *ihost,
+ struct sci_port_configuration_agent *port_agent,
+ struct isci_port *iport,
+ struct isci_phy *iphy)
+{
+ if (iport != NULL) {
+ /*
+ * If we can form a new port from the remainder of the phys
+ * then we want to start the timer to allow the SCI User to
+ * cleanup old devices and rediscover the port before
+ * rebuilding the port with the phys that remain in the ready
+ * state.
+ */
+ port_agent->phy_ready_mask &= ~(1 << iphy->phy_index);
+ port_agent->phy_configured_mask &= ~(1 << iphy->phy_index);
+
+ /*
+ * Check to see if there are more phys waiting to be
+ * configured into a port. If there are allow the SCI User
+ * to tear down this port, if necessary, and then reconstruct
+ * the port after the timeout.
+ */
+ if ((port_agent->phy_configured_mask == 0x0000) &&
+ (port_agent->phy_ready_mask != 0x0000) &&
+ !port_agent->timer_pending) {
+ port_agent->timer_pending = true;
+
+ sci_mod_timer(&port_agent->timer,
+ SCIC_SDS_MPC_RECONFIGURATION_TIMEOUT);
+ }
+
+ sci_port_link_down(iport, iphy);
+ }
+}
+
+/* verify phys are assigned a valid SAS address for automatic port
+ * configuration mode.
+ */
+static enum sci_status
+sci_apc_agent_validate_phy_configuration(struct isci_host *ihost,
+ struct sci_port_configuration_agent *port_agent)
+{
+ u8 phy_index;
+ u8 port_index;
+ struct sci_sas_address sas_address;
+ struct sci_sas_address phy_assigned_address;
+
+ phy_index = 0;
+
+ while (phy_index < SCI_MAX_PHYS) {
+ port_index = phy_index;
+
+ /* Get the assigned SAS Address for the first PHY on the controller. */
+ sci_phy_get_sas_address(&ihost->phys[phy_index],
+ &sas_address);
+
+ while (++phy_index < SCI_MAX_PHYS) {
+ sci_phy_get_sas_address(&ihost->phys[phy_index],
+ &phy_assigned_address);
+
+ /* Verify each of the SAS address are all the same for every PHY */
+ if (sci_sas_address_compare(sas_address, phy_assigned_address) == 0) {
+ port_agent->phy_valid_port_range[phy_index].min_index = port_index;
+ port_agent->phy_valid_port_range[phy_index].max_index = phy_index;
+ } else {
+ port_agent->phy_valid_port_range[phy_index].min_index = phy_index;
+ port_agent->phy_valid_port_range[phy_index].max_index = phy_index;
+ break;
+ }
+ }
+ }
+
+ return sci_port_configuration_agent_validate_ports(ihost, port_agent);
+}
+
+/*
+ * This routine will restart the automatic port configuration timeout
+ * timer for the next time period. This could be caused by either a link
+ * down event or a link up event where we can not yet tell to which a phy
+ * belongs.
+ */
+static void sci_apc_agent_start_timer(struct sci_port_configuration_agent *port_agent,
+ u32 timeout)
+{
+ port_agent->timer_pending = true;
+ sci_mod_timer(&port_agent->timer, timeout);
+}
+
+static void sci_apc_agent_configure_ports(struct isci_host *ihost,
+ struct sci_port_configuration_agent *port_agent,
+ struct isci_phy *iphy,
+ bool start_timer)
+{
+ u8 port_index;
+ enum sci_status status;
+ struct isci_port *iport;
+ enum SCIC_SDS_APC_ACTIVITY apc_activity = SCIC_SDS_APC_SKIP_PHY;
+
+ iport = sci_port_configuration_agent_find_port(ihost, iphy);
+
+ if (iport) {
+ if (sci_port_is_valid_phy_assignment(iport, iphy->phy_index))
+ apc_activity = SCIC_SDS_APC_ADD_PHY;
+ else
+ apc_activity = SCIC_SDS_APC_SKIP_PHY;
+ } else {
+ /*
+ * There is no matching Port for this PHY so lets search through the
+ * Ports and see if we can add the PHY to its own port or maybe start
+ * the timer and wait to see if a wider port can be made.
+ *
+ * Note the break when we reach the condition of the port id == phy id */
+ for (port_index = port_agent->phy_valid_port_range[iphy->phy_index].min_index;
+ port_index <= port_agent->phy_valid_port_range[iphy->phy_index].max_index;
+ port_index++) {
+
+ iport = &ihost->ports[port_index];
+
+ /* First we must make sure that this PHY can be added to this Port. */
+ if (sci_port_is_valid_phy_assignment(iport, iphy->phy_index)) {
+ /*
+ * Port contains a PHY with a greater PHY ID than the current
+ * PHY that has gone link up. This phy can not be part of any
+ * port so skip it and move on. */
+ if (iport->active_phy_mask > (1 << iphy->phy_index)) {
+ apc_activity = SCIC_SDS_APC_SKIP_PHY;
+ break;
+ }
+
+ /*
+ * We have reached the end of our Port list and have not found
+ * any reason why we should not either add the PHY to the port
+ * or wait for more phys to become active. */
+ if (iport->physical_port_index == iphy->phy_index) {
+ /*
+ * The Port either has no active PHYs.
+ * Consider that if the port had any active PHYs we would have
+ * or active PHYs with
+ * a lower PHY Id than this PHY. */
+ if (apc_activity != SCIC_SDS_APC_START_TIMER) {
+ apc_activity = SCIC_SDS_APC_ADD_PHY;
+ }
+
+ break;
+ }
+
+ /*
+ * The current Port has no active PHYs and this PHY could be part
+ * of this Port. Since we dont know as yet setup to start the
+ * timer and see if there is a better configuration. */
+ if (iport->active_phy_mask == 0) {
+ apc_activity = SCIC_SDS_APC_START_TIMER;
+ }
+ } else if (iport->active_phy_mask != 0) {
+ /*
+ * The Port has an active phy and the current Phy can not
+ * participate in this port so skip the PHY and see if
+ * there is a better configuration. */
+ apc_activity = SCIC_SDS_APC_SKIP_PHY;
+ }
+ }
+ }
+
+ /*
+ * Check to see if the start timer operations should instead map to an
+ * add phy operation. This is caused because we have been waiting to
+ * add a phy to a port but could not becuase the automatic port
+ * configuration engine had a choice of possible ports for the phy.
+ * Since we have gone through a timeout we are going to restrict the
+ * choice to the smallest possible port. */
+ if (
+ (start_timer == false)
+ && (apc_activity == SCIC_SDS_APC_START_TIMER)
+ ) {
+ apc_activity = SCIC_SDS_APC_ADD_PHY;
+ }
+
+ switch (apc_activity) {
+ case SCIC_SDS_APC_ADD_PHY:
+ status = sci_port_add_phy(iport, iphy);
+
+ if (status == SCI_SUCCESS) {
+ port_agent->phy_configured_mask |= (1 << iphy->phy_index);
+ }
+ break;
+
+ case SCIC_SDS_APC_START_TIMER:
+ sci_apc_agent_start_timer(port_agent,
+ SCIC_SDS_APC_WAIT_LINK_UP_NOTIFICATION);
+ break;
+
+ case SCIC_SDS_APC_SKIP_PHY:
+ default:
+ /* do nothing the PHY can not be made part of a port at this time. */
+ break;
+ }
+}
+
+/**
+ * sci_apc_agent_link_up - handle apc link up events
+ * @scic: This is the controller object that receives the link up
+ * notification.
+ * @sci_port: This is the port object associated with the phy. If the is no
+ * associated port this is an NULL.
+ * @sci_phy: This is the phy object which has gone link up.
+ *
+ * This method handles the automatic port configuration for link up
+ * notifications. Is it possible to get a link down notification from a phy
+ * that has no assocoated port?
+ */
+static void sci_apc_agent_link_up(struct isci_host *ihost,
+ struct sci_port_configuration_agent *port_agent,
+ struct isci_port *iport,
+ struct isci_phy *iphy)
+{
+ u8 phy_index = iphy->phy_index;
+
+ if (!iport) {
+ /* the phy is not the part of this port */
+ port_agent->phy_ready_mask |= 1 << phy_index;
+ sci_apc_agent_start_timer(port_agent,
+ SCIC_SDS_APC_WAIT_LINK_UP_NOTIFICATION);
+ } else {
+ /* the phy is already the part of the port */
+ port_agent->phy_ready_mask |= 1 << phy_index;
+ sci_port_link_up(iport, iphy);
+ }
+}
+
+/**
+ *
+ * @controller: This is the controller object that receives the link down
+ * notification.
+ * @iport: This is the port object associated with the phy. If the is no
+ * associated port this is an NULL.
+ * @iphy: This is the phy object which has gone link down.
+ *
+ * This method handles the automatic port configuration link down
+ * notifications. not associated with a port there is no action taken. Is it
+ * possible to get a link down notification from a phy that has no assocoated
+ * port?
+ */
+static void sci_apc_agent_link_down(
+ struct isci_host *ihost,
+ struct sci_port_configuration_agent *port_agent,
+ struct isci_port *iport,
+ struct isci_phy *iphy)
+{
+ port_agent->phy_ready_mask &= ~(1 << iphy->phy_index);
+
+ if (!iport)
+ return;
+ if (port_agent->phy_configured_mask & (1 << iphy->phy_index)) {
+ enum sci_status status;
+
+ status = sci_port_remove_phy(iport, iphy);
+
+ if (status == SCI_SUCCESS)
+ port_agent->phy_configured_mask &= ~(1 << iphy->phy_index);
+ }
+}
+
+/* configure the phys into ports when the timer fires */
+static void apc_agent_timeout(unsigned long data)
+{
+ u32 index;
+ struct sci_timer *tmr = (struct sci_timer *)data;
+ struct sci_port_configuration_agent *port_agent;
+ struct isci_host *ihost;
+ unsigned long flags;
+ u16 configure_phy_mask;
+
+ port_agent = container_of(tmr, typeof(*port_agent), timer);
+ ihost = container_of(port_agent, typeof(*ihost), port_agent);
+
+ spin_lock_irqsave(&ihost->scic_lock, flags);
+
+ if (tmr->cancel)
+ goto done;
+
+ port_agent->timer_pending = false;
+
+ configure_phy_mask = ~port_agent->phy_configured_mask & port_agent->phy_ready_mask;
+
+ if (!configure_phy_mask)
+ goto done;
+
+ for (index = 0; index < SCI_MAX_PHYS; index++) {
+ if ((configure_phy_mask & (1 << index)) == 0)
+ continue;
+
+ sci_apc_agent_configure_ports(ihost, port_agent,
+ &ihost->phys[index], false);
+ }
+
+ if (is_controller_start_complete(ihost))
+ sci_controller_transition_to_ready(ihost, SCI_SUCCESS);
+
+done:
+ spin_unlock_irqrestore(&ihost->scic_lock, flags);
+}
+
+/*
+ * ******************************************************************************
+ * Public port configuration agent routines
+ * ****************************************************************************** */
+
+/**
+ *
+ *
+ * This method will construct the port configuration agent for operation. This
+ * call is universal for both manual port configuration and automatic port
+ * configuration modes.
+ */
+void sci_port_configuration_agent_construct(
+ struct sci_port_configuration_agent *port_agent)
+{
+ u32 index;
+
+ port_agent->phy_configured_mask = 0x00;
+ port_agent->phy_ready_mask = 0x00;
+
+ port_agent->link_up_handler = NULL;
+ port_agent->link_down_handler = NULL;
+
+ port_agent->timer_pending = false;
+
+ for (index = 0; index < SCI_MAX_PORTS; index++) {
+ port_agent->phy_valid_port_range[index].min_index = 0;
+ port_agent->phy_valid_port_range[index].max_index = 0;
+ }
+}
+
+bool is_port_config_apc(struct isci_host *ihost)
+{
+ return ihost->port_agent.link_up_handler == sci_apc_agent_link_up;
+}
+
+enum sci_status sci_port_configuration_agent_initialize(
+ struct isci_host *ihost,
+ struct sci_port_configuration_agent *port_agent)
+{
+ enum sci_status status;
+ enum sci_port_configuration_mode mode;
+
+ mode = ihost->oem_parameters.controller.mode_type;
+
+ if (mode == SCIC_PORT_MANUAL_CONFIGURATION_MODE) {
+ status = sci_mpc_agent_validate_phy_configuration(
+ ihost, port_agent);
+
+ port_agent->link_up_handler = sci_mpc_agent_link_up;
+ port_agent->link_down_handler = sci_mpc_agent_link_down;
+
+ sci_init_timer(&port_agent->timer, mpc_agent_timeout);
+ } else {
+ status = sci_apc_agent_validate_phy_configuration(
+ ihost, port_agent);
+
+ port_agent->link_up_handler = sci_apc_agent_link_up;
+ port_agent->link_down_handler = sci_apc_agent_link_down;
+
+ sci_init_timer(&port_agent->timer, apc_agent_timeout);
+ }
+
+ return status;
+}
diff --git a/drivers/scsi/isci/probe_roms.c b/drivers/scsi/isci/probe_roms.c
new file mode 100644
index 000000000..8ac646e5e
--- /dev/null
+++ b/drivers/scsi/isci/probe_roms.c
@@ -0,0 +1,230 @@
+/*
+ * This file is provided under a dual BSD/GPLv2 license. When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ * The full GNU General Public License is included in this distribution
+ * in the file called LICENSE.GPL.
+ */
+
+/* probe_roms - scan for oem parameters */
+
+#include <linux/kernel.h>
+#include <linux/firmware.h>
+#include <linux/uaccess.h>
+#include <linux/efi.h>
+#include <asm/probe_roms.h>
+
+#include "isci.h"
+#include "task.h"
+#include "probe_roms.h"
+
+static efi_char16_t isci_efivar_name[] = {
+ 'R', 's', 't', 'S', 'c', 'u', 'O'
+};
+
+struct isci_orom *isci_request_oprom(struct pci_dev *pdev)
+{
+ void __iomem *oprom = pci_map_biosrom(pdev);
+ struct isci_orom *rom = NULL;
+ size_t len, i;
+ int j;
+ char oem_sig[4];
+ struct isci_oem_hdr oem_hdr;
+ u8 *tmp, sum;
+
+ if (!oprom)
+ return NULL;
+
+ len = pci_biosrom_size(pdev);
+ rom = devm_kzalloc(&pdev->dev, sizeof(*rom), GFP_KERNEL);
+ if (!rom) {
+ dev_warn(&pdev->dev,
+ "Unable to allocate memory for orom\n");
+ return NULL;
+ }
+
+ for (i = 0; i < len && rom; i += ISCI_OEM_SIG_SIZE) {
+ memcpy_fromio(oem_sig, oprom + i, ISCI_OEM_SIG_SIZE);
+
+ /* we think we found the OEM table */
+ if (memcmp(oem_sig, ISCI_OEM_SIG, ISCI_OEM_SIG_SIZE) == 0) {
+ size_t copy_len;
+
+ memcpy_fromio(&oem_hdr, oprom + i, sizeof(oem_hdr));
+
+ copy_len = min(oem_hdr.len - sizeof(oem_hdr),
+ sizeof(*rom));
+
+ memcpy_fromio(rom,
+ oprom + i + sizeof(oem_hdr),
+ copy_len);
+
+ /* calculate checksum */
+ tmp = (u8 *)&oem_hdr;
+ for (j = 0, sum = 0; j < sizeof(oem_hdr); j++, tmp++)
+ sum += *tmp;
+
+ tmp = (u8 *)rom;
+ for (j = 0; j < sizeof(*rom); j++, tmp++)
+ sum += *tmp;
+
+ if (sum != 0) {
+ dev_warn(&pdev->dev,
+ "OEM table checksum failed\n");
+ continue;
+ }
+
+ /* keep going if that's not the oem param table */
+ if (memcmp(rom->hdr.signature,
+ ISCI_ROM_SIG,
+ ISCI_ROM_SIG_SIZE) != 0)
+ continue;
+
+ dev_info(&pdev->dev,
+ "OEM parameter table found in OROM\n");
+ break;
+ }
+ }
+
+ if (i >= len) {
+ dev_err(&pdev->dev, "oprom parse error\n");
+ rom = NULL;
+ }
+ pci_unmap_biosrom(oprom);
+
+ return rom;
+}
+
+struct isci_orom *isci_request_firmware(struct pci_dev *pdev, const struct firmware *fw)
+{
+ struct isci_orom *orom = NULL, *data;
+ int i, j;
+
+ if (request_firmware(&fw, ISCI_FW_NAME, &pdev->dev) != 0)
+ return NULL;
+
+ if (fw->size < sizeof(*orom))
+ goto out;
+
+ data = (struct isci_orom *)fw->data;
+
+ if (strncmp(ISCI_ROM_SIG, data->hdr.signature,
+ strlen(ISCI_ROM_SIG)) != 0)
+ goto out;
+
+ orom = devm_kzalloc(&pdev->dev, fw->size, GFP_KERNEL);
+ if (!orom)
+ goto out;
+
+ memcpy(orom, fw->data, fw->size);
+
+ if (is_c0(pdev) || is_c1(pdev))
+ goto out;
+
+ /*
+ * deprecated: override default amp_control for pre-preproduction
+ * silicon revisions
+ */
+ for (i = 0; i < ARRAY_SIZE(orom->ctrl); i++)
+ for (j = 0; j < ARRAY_SIZE(orom->ctrl[i].phys); j++) {
+ orom->ctrl[i].phys[j].afe_tx_amp_control0 = 0xe7c03;
+ orom->ctrl[i].phys[j].afe_tx_amp_control1 = 0xe7c03;
+ orom->ctrl[i].phys[j].afe_tx_amp_control2 = 0xe7c03;
+ orom->ctrl[i].phys[j].afe_tx_amp_control3 = 0xe7c03;
+ }
+ out:
+ release_firmware(fw);
+
+ return orom;
+}
+
+static struct efi *get_efi(void)
+{
+#ifdef CONFIG_EFI
+ return &efi;
+#else
+ return NULL;
+#endif
+}
+
+struct isci_orom *isci_get_efi_var(struct pci_dev *pdev)
+{
+ efi_status_t status;
+ struct isci_orom *rom;
+ struct isci_oem_hdr *oem_hdr;
+ u8 *tmp, sum;
+ int j;
+ unsigned long data_len;
+ u8 *efi_data;
+ u32 efi_attrib = 0;
+
+ data_len = 1024;
+ efi_data = devm_kzalloc(&pdev->dev, data_len, GFP_KERNEL);
+ if (!efi_data) {
+ dev_warn(&pdev->dev,
+ "Unable to allocate memory for EFI data\n");
+ return NULL;
+ }
+
+ rom = (struct isci_orom *)(efi_data + sizeof(struct isci_oem_hdr));
+
+ if (get_efi())
+ status = get_efi()->get_variable(isci_efivar_name,
+ &ISCI_EFI_VENDOR_GUID,
+ &efi_attrib,
+ &data_len,
+ efi_data);
+ else
+ status = EFI_NOT_FOUND;
+
+ if (status != EFI_SUCCESS) {
+ dev_warn(&pdev->dev,
+ "Unable to obtain EFI var data for OEM parms\n");
+ return NULL;
+ }
+
+ oem_hdr = (struct isci_oem_hdr *)efi_data;
+
+ if (memcmp(oem_hdr->sig, ISCI_OEM_SIG, ISCI_OEM_SIG_SIZE) != 0) {
+ dev_warn(&pdev->dev,
+ "Invalid OEM header signature\n");
+ return NULL;
+ }
+
+ /* calculate checksum */
+ tmp = (u8 *)efi_data;
+ for (j = 0, sum = 0; j < (sizeof(*oem_hdr) + sizeof(*rom)); j++, tmp++)
+ sum += *tmp;
+
+ if (sum != 0) {
+ dev_warn(&pdev->dev,
+ "OEM table checksum failed\n");
+ return NULL;
+ }
+
+ if (memcmp(rom->hdr.signature,
+ ISCI_ROM_SIG,
+ ISCI_ROM_SIG_SIZE) != 0) {
+ dev_warn(&pdev->dev,
+ "Invalid OEM table signature\n");
+ return NULL;
+ }
+
+ return rom;
+}
diff --git a/drivers/scsi/isci/probe_roms.h b/drivers/scsi/isci/probe_roms.h
new file mode 100644
index 000000000..e08b57824
--- /dev/null
+++ b/drivers/scsi/isci/probe_roms.h
@@ -0,0 +1,330 @@
+/*
+ * This file is provided under a dual BSD/GPLv2 license. When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ * The full GNU General Public License is included in this distribution
+ * in the file called LICENSE.GPL.
+ *
+ * BSD LICENSE
+ *
+ * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+#ifndef _ISCI_PROBE_ROMS_H_
+#define _ISCI_PROBE_ROMS_H_
+
+#ifdef __KERNEL__
+#include <linux/firmware.h>
+#include <linux/pci.h>
+#include <linux/efi.h>
+#include "isci.h"
+
+#define SCIC_SDS_PARM_NO_SPEED 0
+
+/* generation 1 (i.e. 1.5 Gb/s) */
+#define SCIC_SDS_PARM_GEN1_SPEED 1
+
+/* generation 2 (i.e. 3.0 Gb/s) */
+#define SCIC_SDS_PARM_GEN2_SPEED 2
+
+/* generation 3 (i.e. 6.0 Gb/s) */
+#define SCIC_SDS_PARM_GEN3_SPEED 3
+#define SCIC_SDS_PARM_MAX_SPEED SCIC_SDS_PARM_GEN3_SPEED
+
+/* parameters that can be set by module parameters */
+struct sci_user_parameters {
+ struct sci_phy_user_params {
+ /**
+ * This field specifies the NOTIFY (ENABLE SPIN UP) primitive
+ * insertion frequency for this phy index.
+ */
+ u32 notify_enable_spin_up_insertion_frequency;
+
+ /**
+ * This method specifies the number of transmitted DWORDs within which
+ * to transmit a single ALIGN primitive. This value applies regardless
+ * of what type of device is attached or connection state. A value of
+ * 0 indicates that no ALIGN primitives will be inserted.
+ */
+ u16 align_insertion_frequency;
+
+ /**
+ * This method specifies the number of transmitted DWORDs within which
+ * to transmit 2 ALIGN primitives. This applies for SAS connections
+ * only. A minimum value of 3 is required for this field.
+ */
+ u16 in_connection_align_insertion_frequency;
+
+ /**
+ * This field indicates the maximum speed generation to be utilized
+ * by phys in the supplied port.
+ * - A value of 1 indicates generation 1 (i.e. 1.5 Gb/s).
+ * - A value of 2 indicates generation 2 (i.e. 3.0 Gb/s).
+ * - A value of 3 indicates generation 3 (i.e. 6.0 Gb/s).
+ */
+ u8 max_speed_generation;
+
+ } phys[SCI_MAX_PHYS];
+
+ /**
+ * This field specifies the maximum number of direct attached devices
+ * that can have power supplied to them simultaneously.
+ */
+ u8 max_concurr_spinup;
+
+ /**
+ * This field specifies the number of seconds to allow a phy to consume
+ * power before yielding to another phy.
+ *
+ */
+ u8 phy_spin_up_delay_interval;
+
+ /**
+ * These timer values specifies how long a link will remain open with no
+ * activity in increments of a microsecond, it can be in increments of
+ * 100 microseconds if the upper most bit is set.
+ *
+ */
+ u16 stp_inactivity_timeout;
+ u16 ssp_inactivity_timeout;
+
+ /**
+ * These timer values specifies how long a link will remain open in increments
+ * of 100 microseconds.
+ *
+ */
+ u16 stp_max_occupancy_timeout;
+ u16 ssp_max_occupancy_timeout;
+
+ /**
+ * This timer value specifies how long a link will remain open with no
+ * outbound traffic in increments of a microsecond.
+ *
+ */
+ u8 no_outbound_task_timeout;
+
+};
+
+#define SCIC_SDS_PARM_PHY_MASK_MIN 0x0
+#define SCIC_SDS_PARM_PHY_MASK_MAX 0xF
+#define MAX_CONCURRENT_DEVICE_SPIN_UP_COUNT 4
+
+struct sci_oem_params;
+int sci_oem_parameters_validate(struct sci_oem_params *oem, u8 version);
+
+struct isci_orom;
+struct isci_orom *isci_request_oprom(struct pci_dev *pdev);
+struct isci_orom *isci_request_firmware(struct pci_dev *pdev, const struct firmware *fw);
+struct isci_orom *isci_get_efi_var(struct pci_dev *pdev);
+
+struct isci_oem_hdr {
+ u8 sig[4];
+ u8 rev_major;
+ u8 rev_minor;
+ u16 len;
+ u8 checksum;
+ u8 reserved1;
+ u16 reserved2;
+} __attribute__ ((packed));
+
+#else
+#define SCI_MAX_PORTS 4
+#define SCI_MAX_PHYS 4
+#define SCI_MAX_CONTROLLERS 2
+#endif
+
+#define ISCI_FW_NAME "isci/isci_firmware.bin"
+
+#define ROMSIGNATURE 0xaa55
+
+#define ISCI_OEM_SIG "$OEM"
+#define ISCI_OEM_SIG_SIZE 4
+#define ISCI_ROM_SIG "ISCUOEMB"
+#define ISCI_ROM_SIG_SIZE 8
+
+#define ISCI_EFI_VENDOR_GUID \
+ EFI_GUID(0x193dfefa, 0xa445, 0x4302, 0x99, 0xd8, 0xef, 0x3a, 0xad, \
+ 0x1a, 0x04, 0xc6)
+#define ISCI_EFI_VAR_NAME "RstScuO"
+
+#define ISCI_ROM_VER_1_0 0x10
+#define ISCI_ROM_VER_1_1 0x11
+#define ISCI_ROM_VER_1_3 0x13
+#define ISCI_ROM_VER_LATEST ISCI_ROM_VER_1_3
+
+/* Allowed PORT configuration modes APC Automatic PORT configuration mode is
+ * defined by the OEM configuration parameters providing no PHY_MASK parameters
+ * for any PORT. i.e. There are no phys assigned to any of the ports at start.
+ * MPC Manual PORT configuration mode is defined by the OEM configuration
+ * parameters providing a PHY_MASK value for any PORT. It is assumed that any
+ * PORT with no PHY_MASK is an invalid port and not all PHYs must be assigned.
+ * A PORT_PHY mask that assigns just a single PHY to a port and no other PHYs
+ * being assigned is sufficient to declare manual PORT configuration.
+ */
+enum sci_port_configuration_mode {
+ SCIC_PORT_MANUAL_CONFIGURATION_MODE = 0,
+ SCIC_PORT_AUTOMATIC_CONFIGURATION_MODE = 1
+};
+
+struct sci_bios_oem_param_block_hdr {
+ uint8_t signature[ISCI_ROM_SIG_SIZE];
+ uint16_t total_block_length;
+ uint8_t hdr_length;
+ uint8_t version;
+ uint8_t preboot_source;
+ uint8_t num_elements;
+ uint16_t element_length;
+ uint8_t reserved[8];
+} __attribute__ ((packed));
+
+struct sci_oem_params {
+ struct {
+ uint8_t mode_type;
+ uint8_t max_concurr_spin_up;
+ /*
+ * This bitfield indicates the OEM's desired default Tx
+ * Spread Spectrum Clocking (SSC) settings for SATA and SAS.
+ * NOTE: Default SSC Modulation Frequency is 31.5KHz.
+ */
+ union {
+ struct {
+ /*
+ * NOTE: Max spread for SATA is +0 / -5000 PPM.
+ * Down-spreading SSC (only method allowed for SATA):
+ * SATA SSC Tx Disabled = 0x0
+ * SATA SSC Tx at +0 / -1419 PPM Spread = 0x2
+ * SATA SSC Tx at +0 / -2129 PPM Spread = 0x3
+ * SATA SSC Tx at +0 / -4257 PPM Spread = 0x6
+ * SATA SSC Tx at +0 / -4967 PPM Spread = 0x7
+ */
+ uint8_t ssc_sata_tx_spread_level:4;
+ /*
+ * SAS SSC Tx Disabled = 0x0
+ *
+ * NOTE: Max spread for SAS down-spreading +0 /
+ * -2300 PPM
+ * Down-spreading SSC:
+ * SAS SSC Tx at +0 / -1419 PPM Spread = 0x2
+ * SAS SSC Tx at +0 / -2129 PPM Spread = 0x3
+ *
+ * NOTE: Max spread for SAS center-spreading +2300 /
+ * -2300 PPM
+ * Center-spreading SSC:
+ * SAS SSC Tx at +1064 / -1064 PPM Spread = 0x3
+ * SAS SSC Tx at +2129 / -2129 PPM Spread = 0x6
+ */
+ uint8_t ssc_sas_tx_spread_level:3;
+ /*
+ * NOTE: Refer to the SSC section of the SAS 2.x
+ * Specification for proper setting of this field.
+ * For standard SAS Initiator SAS PHY operation it
+ * should be 0 for Down-spreading.
+ * SAS SSC Tx spread type:
+ * Down-spreading SSC = 0
+ * Center-spreading SSC = 1
+ */
+ uint8_t ssc_sas_tx_type:1;
+ };
+ uint8_t do_enable_ssc;
+ };
+ /*
+ * This field indicates length of the SAS/SATA cable between
+ * host and device.
+ * This field is used make relationship between analog
+ * parameters of the phy in the silicon and length of the cable.
+ * Supported cable attenuation levels:
+ * "short"- up to 3m, "medium"-3m to 6m, and "long"- more than
+ * 6m.
+ *
+ * This is bit mask field:
+ *
+ * BIT: (MSB) 7 6 5 4
+ * ASSIGNMENT: <phy3><phy2><phy1><phy0> - Medium cable
+ * length assignment
+ * BIT: 3 2 1 0 (LSB)
+ * ASSIGNMENT: <phy3><phy2><phy1><phy0> - Long cable length
+ * assignment
+ *
+ * BITS 7-4 are set when the cable length is assigned to medium
+ * BITS 3-0 are set when the cable length is assigned to long
+ *
+ * The BIT positions are clear when the cable length is
+ * assigned to short.
+ *
+ * Setting the bits for both long and medium cable length is
+ * undefined.
+ *
+ * A value of 0x84 would assign
+ * phy3 - medium
+ * phy2 - long
+ * phy1 - short
+ * phy0 - short
+ */
+ uint8_t cable_selection_mask;
+ } controller;
+
+ struct {
+ uint8_t phy_mask;
+ } ports[SCI_MAX_PORTS];
+
+ struct sci_phy_oem_params {
+ struct {
+ uint32_t high;
+ uint32_t low;
+ } sas_address;
+
+ uint32_t afe_tx_amp_control0;
+ uint32_t afe_tx_amp_control1;
+ uint32_t afe_tx_amp_control2;
+ uint32_t afe_tx_amp_control3;
+ } phys[SCI_MAX_PHYS];
+} __attribute__ ((packed));
+
+struct isci_orom {
+ struct sci_bios_oem_param_block_hdr hdr;
+ struct sci_oem_params ctrl[SCI_MAX_CONTROLLERS];
+} __attribute__ ((packed));
+
+#endif
diff --git a/drivers/scsi/isci/registers.h b/drivers/scsi/isci/registers.h
new file mode 100644
index 000000000..97f3ceb8d
--- /dev/null
+++ b/drivers/scsi/isci/registers.h
@@ -0,0 +1,1863 @@
+/*
+ * This file is provided under a dual BSD/GPLv2 license. When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ * The full GNU General Public License is included in this distribution
+ * in the file called LICENSE.GPL.
+ *
+ * BSD LICENSE
+ *
+ * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _SCU_REGISTERS_H_
+#define _SCU_REGISTERS_H_
+
+/**
+ * This file contains the constants and structures for the SCU memory mapped
+ * registers.
+ *
+ *
+ */
+
+#define SCU_VIIT_ENTRY_ID_MASK (0xC0000000)
+#define SCU_VIIT_ENTRY_ID_SHIFT (30)
+
+#define SCU_VIIT_ENTRY_FUNCTION_MASK (0x0FF00000)
+#define SCU_VIIT_ENTRY_FUNCTION_SHIFT (20)
+
+#define SCU_VIIT_ENTRY_IPPTMODE_MASK (0x0001F800)
+#define SCU_VIIT_ENTRY_IPPTMODE_SHIFT (12)
+
+#define SCU_VIIT_ENTRY_LPVIE_MASK (0x00000F00)
+#define SCU_VIIT_ENTRY_LPVIE_SHIFT (8)
+
+#define SCU_VIIT_ENTRY_STATUS_MASK (0x000000FF)
+#define SCU_VIIT_ENTRY_STATUS_SHIFT (0)
+
+#define SCU_VIIT_ENTRY_ID_INVALID (0 << SCU_VIIT_ENTRY_ID_SHIFT)
+#define SCU_VIIT_ENTRY_ID_VIIT (1 << SCU_VIIT_ENTRY_ID_SHIFT)
+#define SCU_VIIT_ENTRY_ID_IIT (2 << SCU_VIIT_ENTRY_ID_SHIFT)
+#define SCU_VIIT_ENTRY_ID_VIRT_EXP (3 << SCU_VIIT_ENTRY_ID_SHIFT)
+
+#define SCU_VIIT_IPPT_SSP_INITIATOR (0x01 << SCU_VIIT_ENTRY_IPPTMODE_SHIFT)
+#define SCU_VIIT_IPPT_SMP_INITIATOR (0x02 << SCU_VIIT_ENTRY_IPPTMODE_SHIFT)
+#define SCU_VIIT_IPPT_STP_INITIATOR (0x04 << SCU_VIIT_ENTRY_IPPTMODE_SHIFT)
+#define SCU_VIIT_IPPT_INITIATOR \
+ (\
+ SCU_VIIT_IPPT_SSP_INITIATOR \
+ | SCU_VIIT_IPPT_SMP_INITIATOR \
+ | SCU_VIIT_IPPT_STP_INITIATOR \
+ )
+
+#define SCU_VIIT_STATUS_RNC_VALID (0x01 << SCU_VIIT_ENTRY_STATUS_SHIFT)
+#define SCU_VIIT_STATUS_ADDRESS_VALID (0x02 << SCU_VIIT_ENTRY_STATUS_SHIFT)
+#define SCU_VIIT_STATUS_RNI_VALID (0x04 << SCU_VIIT_ENTRY_STATUS_SHIFT)
+#define SCU_VIIT_STATUS_ALL_VALID \
+ (\
+ SCU_VIIT_STATUS_RNC_VALID \
+ | SCU_VIIT_STATUS_ADDRESS_VALID \
+ | SCU_VIIT_STATUS_RNI_VALID \
+ )
+
+#define SCU_VIIT_IPPT_SMP_TARGET (0x10 << SCU_VIIT_ENTRY_IPPTMODE_SHIFT)
+
+/**
+ * struct scu_viit_entry - This is the SCU Virtual Initiator Table Entry
+ *
+ *
+ */
+struct scu_viit_entry {
+ /**
+ * This must be encoded as to the type of initiator that is being constructed
+ * for this port.
+ */
+ u32 status;
+
+ /**
+ * Virtual initiator high SAS Address
+ */
+ u32 initiator_sas_address_hi;
+
+ /**
+ * Virtual initiator low SAS Address
+ */
+ u32 initiator_sas_address_lo;
+
+ /**
+ * This must be 0
+ */
+ u32 reserved;
+
+};
+
+
+/* IIT Status Defines */
+#define SCU_IIT_ENTRY_ID_MASK (0xC0000000)
+#define SCU_IIT_ENTRY_ID_SHIFT (30)
+
+#define SCU_IIT_ENTRY_STATUS_UPDATE_MASK (0x20000000)
+#define SCU_IIT_ENTRY_STATUS_UPDATE_SHIFT (29)
+
+#define SCU_IIT_ENTRY_LPI_MASK (0x00000F00)
+#define SCU_IIT_ENTRY_LPI_SHIFT (8)
+
+#define SCU_IIT_ENTRY_STATUS_MASK (0x000000FF)
+#define SCU_IIT_ENTRY_STATUS_SHIFT (0)
+
+/* IIT Remote Initiator Defines */
+#define SCU_IIT_ENTRY_REMOTE_TAG_MASK (0x0000FFFF)
+#define SCU_IIT_ENTRY_REMOTE_TAG_SHIFT (0)
+
+#define SCU_IIT_ENTRY_REMOTE_RNC_MASK (0x0FFF0000)
+#define SCU_IIT_ENTRY_REMOTE_RNC_SHIFT (16)
+
+#define SCU_IIT_ENTRY_ID_INVALID (0 << SCU_IIT_ENTRY_ID_SHIFT)
+#define SCU_IIT_ENTRY_ID_VIIT (1 << SCU_IIT_ENTRY_ID_SHIFT)
+#define SCU_IIT_ENTRY_ID_IIT (2 << SCU_IIT_ENTRY_ID_SHIFT)
+#define SCU_IIT_ENTRY_ID_VIRT_EXP (3 << SCU_IIT_ENTRY_ID_SHIFT)
+
+/**
+ * struct scu_iit_entry - This will be implemented later when we support
+ * virtual functions
+ *
+ *
+ */
+struct scu_iit_entry {
+ u32 status;
+ u32 remote_initiator_sas_address_hi;
+ u32 remote_initiator_sas_address_lo;
+ u32 remote_initiator;
+
+};
+
+/* Generate a value for an SCU register */
+#define SCU_GEN_VALUE(name, value) \
+ (((value) << name ## _SHIFT) & (name ## _MASK))
+
+/*
+ * Generate a bit value for an SCU register
+ * Make sure that the register MASK is just a single bit */
+#define SCU_GEN_BIT(name) \
+ SCU_GEN_VALUE(name, ((u32)1))
+
+#define SCU_SET_BIT(name, reg_value) \
+ ((reg_value) | SCU_GEN_BIT(name))
+
+#define SCU_CLEAR_BIT(name, reg_value) \
+ ((reg_value)$ ~(SCU_GEN_BIT(name)))
+
+/*
+ * *****************************************************************************
+ * Unions for bitfield definitions of SCU Registers
+ * SMU Post Context Port
+ * ***************************************************************************** */
+#define SMU_POST_CONTEXT_PORT_CONTEXT_INDEX_SHIFT (0)
+#define SMU_POST_CONTEXT_PORT_CONTEXT_INDEX_MASK (0x00000FFF)
+#define SMU_POST_CONTEXT_PORT_LOGICAL_PORT_INDEX_SHIFT (12)
+#define SMU_POST_CONTEXT_PORT_LOGICAL_PORT_INDEX_MASK (0x0000F000)
+#define SMU_POST_CONTEXT_PORT_PROTOCOL_ENGINE_SHIFT (16)
+#define SMU_POST_CONTEXT_PORT_PROTOCOL_ENGINE_MASK (0x00030000)
+#define SMU_POST_CONTEXT_PORT_COMMAND_CONTEXT_SHIFT (18)
+#define SMU_POST_CONTEXT_PORT_COMMAND_CONTEXT_MASK (0x00FC0000)
+#define SMU_POST_CONTEXT_PORT_RESERVED_MASK (0xFF000000)
+
+#define SMU_PCP_GEN_VAL(name, value) \
+ SCU_GEN_VALUE(SMU_POST_CONTEXT_PORT_ ## name, value)
+
+/* ***************************************************************************** */
+#define SMU_INTERRUPT_STATUS_COMPLETION_SHIFT (31)
+#define SMU_INTERRUPT_STATUS_COMPLETION_MASK (0x80000000)
+#define SMU_INTERRUPT_STATUS_QUEUE_SUSPEND_SHIFT (1)
+#define SMU_INTERRUPT_STATUS_QUEUE_SUSPEND_MASK (0x00000002)
+#define SMU_INTERRUPT_STATUS_QUEUE_ERROR_SHIFT (0)
+#define SMU_INTERRUPT_STATUS_QUEUE_ERROR_MASK (0x00000001)
+#define SMU_INTERRUPT_STATUS_RESERVED_MASK (0x7FFFFFFC)
+
+#define SMU_ISR_GEN_BIT(name) \
+ SCU_GEN_BIT(SMU_INTERRUPT_STATUS_ ## name)
+
+#define SMU_ISR_QUEUE_ERROR SMU_ISR_GEN_BIT(QUEUE_ERROR)
+#define SMU_ISR_QUEUE_SUSPEND SMU_ISR_GEN_BIT(QUEUE_SUSPEND)
+#define SMU_ISR_COMPLETION SMU_ISR_GEN_BIT(COMPLETION)
+
+/* ***************************************************************************** */
+#define SMU_INTERRUPT_MASK_COMPLETION_SHIFT (31)
+#define SMU_INTERRUPT_MASK_COMPLETION_MASK (0x80000000)
+#define SMU_INTERRUPT_MASK_QUEUE_SUSPEND_SHIFT (1)
+#define SMU_INTERRUPT_MASK_QUEUE_SUSPEND_MASK (0x00000002)
+#define SMU_INTERRUPT_MASK_QUEUE_ERROR_SHIFT (0)
+#define SMU_INTERRUPT_MASK_QUEUE_ERROR_MASK (0x00000001)
+#define SMU_INTERRUPT_MASK_RESERVED_MASK (0x7FFFFFFC)
+
+#define SMU_IMR_GEN_BIT(name) \
+ SCU_GEN_BIT(SMU_INTERRUPT_MASK_ ## name)
+
+#define SMU_IMR_QUEUE_ERROR SMU_IMR_GEN_BIT(QUEUE_ERROR)
+#define SMU_IMR_QUEUE_SUSPEND SMU_IMR_GEN_BIT(QUEUE_SUSPEND)
+#define SMU_IMR_COMPLETION SMU_IMR_GEN_BIT(COMPLETION)
+
+/* ***************************************************************************** */
+#define SMU_INTERRUPT_COALESCING_CONTROL_TIMER_SHIFT (0)
+#define SMU_INTERRUPT_COALESCING_CONTROL_TIMER_MASK (0x0000001F)
+#define SMU_INTERRUPT_COALESCING_CONTROL_NUMBER_SHIFT (8)
+#define SMU_INTERRUPT_COALESCING_CONTROL_NUMBER_MASK (0x0000FF00)
+#define SMU_INTERRUPT_COALESCING_CONTROL_RESERVED_MASK (0xFFFF00E0)
+
+#define SMU_ICC_GEN_VAL(name, value) \
+ SCU_GEN_VALUE(SMU_INTERRUPT_COALESCING_CONTROL_ ## name, value)
+
+/* ***************************************************************************** */
+#define SMU_TASK_CONTEXT_RANGE_START_SHIFT (0)
+#define SMU_TASK_CONTEXT_RANGE_START_MASK (0x00000FFF)
+#define SMU_TASK_CONTEXT_RANGE_ENDING_SHIFT (16)
+#define SMU_TASK_CONTEXT_RANGE_ENDING_MASK (0x0FFF0000)
+#define SMU_TASK_CONTEXT_RANGE_ENABLE_SHIFT (31)
+#define SMU_TASK_CONTEXT_RANGE_ENABLE_MASK (0x80000000)
+#define SMU_TASK_CONTEXT_RANGE_RESERVED_MASK (0x7000F000)
+
+#define SMU_TCR_GEN_VAL(name, value) \
+ SCU_GEN_VALUE(SMU_TASK_CONTEXT_RANGE_ ## name, value)
+
+#define SMU_TCR_GEN_BIT(name, value) \
+ SCU_GEN_BIT(SMU_TASK_CONTEXT_RANGE_ ## name)
+
+/* ***************************************************************************** */
+
+#define SMU_COMPLETION_QUEUE_PUT_POINTER_SHIFT (0)
+#define SMU_COMPLETION_QUEUE_PUT_POINTER_MASK (0x00003FFF)
+#define SMU_COMPLETION_QUEUE_PUT_CYCLE_BIT_SHIFT (15)
+#define SMU_COMPLETION_QUEUE_PUT_CYCLE_BIT_MASK (0x00008000)
+#define SMU_COMPLETION_QUEUE_PUT_EVENT_POINTER_SHIFT (16)
+#define SMU_COMPLETION_QUEUE_PUT_EVENT_POINTER_MASK (0x03FF0000)
+#define SMU_COMPLETION_QUEUE_PUT_EVENT_CYCLE_BIT_SHIFT (26)
+#define SMU_COMPLETION_QUEUE_PUT_EVENT_CYCLE_BIT_MASK (0x04000000)
+#define SMU_COMPLETION_QUEUE_PUT_RESERVED_MASK (0xF8004000)
+
+#define SMU_CQPR_GEN_VAL(name, value) \
+ SCU_GEN_VALUE(SMU_COMPLETION_QUEUE_PUT_ ## name, value)
+
+#define SMU_CQPR_GEN_BIT(name) \
+ SCU_GEN_BIT(SMU_COMPLETION_QUEUE_PUT_ ## name)
+
+/* ***************************************************************************** */
+
+#define SMU_COMPLETION_QUEUE_GET_POINTER_SHIFT (0)
+#define SMU_COMPLETION_QUEUE_GET_POINTER_MASK (0x00003FFF)
+#define SMU_COMPLETION_QUEUE_GET_CYCLE_BIT_SHIFT (15)
+#define SMU_COMPLETION_QUEUE_GET_CYCLE_BIT_MASK (0x00008000)
+#define SMU_COMPLETION_QUEUE_GET_EVENT_POINTER_SHIFT (16)
+#define SMU_COMPLETION_QUEUE_GET_EVENT_POINTER_MASK (0x03FF0000)
+#define SMU_COMPLETION_QUEUE_GET_EVENT_CYCLE_BIT_SHIFT (26)
+#define SMU_COMPLETION_QUEUE_GET_EVENT_CYCLE_BIT_MASK (0x04000000)
+#define SMU_COMPLETION_QUEUE_GET_ENABLE_SHIFT (30)
+#define SMU_COMPLETION_QUEUE_GET_ENABLE_MASK (0x40000000)
+#define SMU_COMPLETION_QUEUE_GET_EVENT_ENABLE_SHIFT (31)
+#define SMU_COMPLETION_QUEUE_GET_EVENT_ENABLE_MASK (0x80000000)
+#define SMU_COMPLETION_QUEUE_GET_RESERVED_MASK (0x38004000)
+
+#define SMU_CQGR_GEN_VAL(name, value) \
+ SCU_GEN_VALUE(SMU_COMPLETION_QUEUE_GET_ ## name, value)
+
+#define SMU_CQGR_GEN_BIT(name) \
+ SCU_GEN_BIT(SMU_COMPLETION_QUEUE_GET_ ## name)
+
+#define SMU_CQGR_CYCLE_BIT \
+ SMU_CQGR_GEN_BIT(CYCLE_BIT)
+
+#define SMU_CQGR_EVENT_CYCLE_BIT \
+ SMU_CQGR_GEN_BIT(EVENT_CYCLE_BIT)
+
+#define SMU_CQGR_GET_POINTER_SET(value) \
+ SMU_CQGR_GEN_VAL(POINTER, value)
+
+
+/* ***************************************************************************** */
+#define SMU_COMPLETION_QUEUE_CONTROL_QUEUE_LIMIT_SHIFT (0)
+#define SMU_COMPLETION_QUEUE_CONTROL_QUEUE_LIMIT_MASK (0x00003FFF)
+#define SMU_COMPLETION_QUEUE_CONTROL_EVENT_LIMIT_SHIFT (16)
+#define SMU_COMPLETION_QUEUE_CONTROL_EVENT_LIMIT_MASK (0x03FF0000)
+#define SMU_COMPLETION_QUEUE_CONTROL_RESERVED_MASK (0xFC00C000)
+
+#define SMU_CQC_GEN_VAL(name, value) \
+ SCU_GEN_VALUE(SMU_COMPLETION_QUEUE_CONTROL_ ## name, value)
+
+#define SMU_CQC_QUEUE_LIMIT_SET(value) \
+ SMU_CQC_GEN_VAL(QUEUE_LIMIT, value)
+
+#define SMU_CQC_EVENT_LIMIT_SET(value) \
+ SMU_CQC_GEN_VAL(EVENT_LIMIT, value)
+
+
+/* ***************************************************************************** */
+#define SMU_DEVICE_CONTEXT_CAPACITY_MAX_TC_SHIFT (0)
+#define SMU_DEVICE_CONTEXT_CAPACITY_MAX_TC_MASK (0x00000FFF)
+#define SMU_DEVICE_CONTEXT_CAPACITY_MAX_LP_SHIFT (12)
+#define SMU_DEVICE_CONTEXT_CAPACITY_MAX_LP_MASK (0x00007000)
+#define SMU_DEVICE_CONTEXT_CAPACITY_MAX_RNC_SHIFT (15)
+#define SMU_DEVICE_CONTEXT_CAPACITY_MAX_RNC_MASK (0x07FF8000)
+#define SMU_DEVICE_CONTEXT_CAPACITY_MAX_PEG_SHIFT (27)
+#define SMU_DEVICE_CONTEXT_CAPACITY_MAX_PEG_MASK (0x08000000)
+#define SMU_DEVICE_CONTEXT_CAPACITY_RESERVED_MASK (0xF0000000)
+
+#define SMU_DCC_GEN_VAL(name, value) \
+ SCU_GEN_VALUE(SMU_DEVICE_CONTEXT_CAPACITY_ ## name, value)
+
+#define SMU_DCC_GET_MAX_PEG(value) \
+ (\
+ ((value) & SMU_DEVICE_CONTEXT_CAPACITY_MAX_PEG_MASK) \
+ >> SMU_DEVICE_CONTEXT_CAPACITY_MAX_LP_SHIFT \
+ )
+
+#define SMU_DCC_GET_MAX_LP(value) \
+ (\
+ ((value) & SMU_DEVICE_CONTEXT_CAPACITY_MAX_LP_MASK) \
+ >> SMU_DEVICE_CONTEXT_CAPACITY_MAX_LP_SHIFT \
+ )
+
+#define SMU_DCC_GET_MAX_TC(value) \
+ (\
+ ((value) & SMU_DEVICE_CONTEXT_CAPACITY_MAX_TC_MASK) \
+ >> SMU_DEVICE_CONTEXT_CAPACITY_MAX_TC_SHIFT \
+ )
+
+#define SMU_DCC_GET_MAX_RNC(value) \
+ (\
+ ((value) & SMU_DEVICE_CONTEXT_CAPACITY_MAX_RNC_MASK) \
+ >> SMU_DEVICE_CONTEXT_CAPACITY_MAX_RNC_SHIFT \
+ )
+
+/* ***************************************************************************** */
+#define SMU_CLOCK_GATING_CONTROL_IDLE_ENABLE_SHIFT (0)
+#define SMU_CLOCK_GATING_CONTROL_IDLE_ENABLE_MASK (0x00000001)
+#define SMU_CLOCK_GATING_CONTROL_XCLK_ENABLE_SHIFT (1)
+#define SMU_CLOCK_GATING_CONTROL_XCLK_ENABLE_MASK (0x00000002)
+#define SMU_CLOCK_GATING_CONTROL_TXCLK_ENABLE_SHIFT (2)
+#define SMU_CLOCK_GATING_CONTROL_TXCLK_ENABLE_MASK (0x00000004)
+#define SMU_CLOCK_GATING_CONTROL_REGCLK_ENABLE_SHIFT (3)
+#define SMU_CLOCK_GATING_CONTROL_REGCLK_ENABLE_MASK (0x00000008)
+#define SMU_CLOCK_GATING_CONTROL_IDLE_TIMEOUT_SHIFT (16)
+#define SMU_CLOCK_GATING_CONTROL_IDLE_TIMEOUT_MASK (0x000F0000)
+#define SMU_CLOCK_GATING_CONTROL_FORCE_IDLE_SHIFT (31)
+#define SMU_CLOCK_GATING_CONTROL_FORCE_IDLE_MASK (0x80000000)
+#define SMU_CLOCK_GATING_CONTROL_RESERVED_MASK (0x7FF0FFF0)
+
+#define SMU_CGUCR_GEN_VAL(name, value) \
+ SCU_GEN_VALUE(SMU_CLOCK_GATING_CONTROL_##name, value)
+
+#define SMU_CGUCR_GEN_BIT(name) \
+ SCU_GEN_BIT(SMU_CLOCK_GATING_CONTROL_##name)
+
+/* -------------------------------------------------------------------------- */
+
+#define SMU_CONTROL_STATUS_TASK_CONTEXT_RANGE_ENABLE_SHIFT (0)
+#define SMU_CONTROL_STATUS_TASK_CONTEXT_RANGE_ENABLE_MASK (0x00000001)
+#define SMU_CONTROL_STATUS_COMPLETION_BYTE_SWAP_ENABLE_SHIFT (1)
+#define SMU_CONTROL_STATUS_COMPLETION_BYTE_SWAP_ENABLE_MASK (0x00000002)
+#define SMU_CONTROL_STATUS_CONTEXT_RAM_INIT_COMPLETED_SHIFT (16)
+#define SMU_CONTROL_STATUS_CONTEXT_RAM_INIT_COMPLETED_MASK (0x00010000)
+#define SMU_CONTROL_STATUS_SCHEDULER_RAM_INIT_COMPLETED_SHIFT (17)
+#define SMU_CONTROL_STATUS_SCHEDULER_RAM_INIT_COMPLETED_MASK (0x00020000)
+#define SMU_CONTROL_STATUS_RESERVED_MASK (0xFFFCFFFC)
+
+#define SMU_SMUCSR_GEN_BIT(name) \
+ SCU_GEN_BIT(SMU_CONTROL_STATUS_ ## name)
+
+#define SMU_SMUCSR_SCHEDULER_RAM_INIT_COMPLETED \
+ (SMU_SMUCSR_GEN_BIT(SCHEDULER_RAM_INIT_COMPLETED))
+
+#define SMU_SMUCSR_CONTEXT_RAM_INIT_COMPLETED \
+ (SMU_SMUCSR_GEN_BIT(CONTEXT_RAM_INIT_COMPLETED))
+
+#define SCU_RAM_INIT_COMPLETED \
+ (\
+ SMU_SMUCSR_CONTEXT_RAM_INIT_COMPLETED \
+ | SMU_SMUCSR_SCHEDULER_RAM_INIT_COMPLETED \
+ )
+
+/* -------------------------------------------------------------------------- */
+
+#define SMU_SOFTRESET_CONTROL_RESET_PEG0_PE0_SHIFT (0)
+#define SMU_SOFTRESET_CONTROL_RESET_PEG0_PE0_MASK (0x00000001)
+#define SMU_SOFTRESET_CONTROL_RESET_PEG0_PE1_SHIFT (1)
+#define SMU_SOFTRESET_CONTROL_RESET_PEG0_PE1_MASK (0x00000002)
+#define SMU_SOFTRESET_CONTROL_RESET_PEG0_PE2_SHIFT (2)
+#define SMU_SOFTRESET_CONTROL_RESET_PEG0_PE2_MASK (0x00000004)
+#define SMU_SOFTRESET_CONTROL_RESET_PEG0_PE3_SHIFT (3)
+#define SMU_SOFTRESET_CONTROL_RESET_PEG0_PE3_MASK (0x00000008)
+#define SMU_SOFTRESET_CONTROL_RESET_PEG1_PE0_SHIFT (8)
+#define SMU_SOFTRESET_CONTROL_RESET_PEG1_PE0_MASK (0x00000100)
+#define SMU_SOFTRESET_CONTROL_RESET_PEG1_PE1_SHIFT (9)
+#define SMU_SOFTRESET_CONTROL_RESET_PEG1_PE1_MASK (0x00000200)
+#define SMU_SOFTRESET_CONTROL_RESET_PEG1_PE2_SHIFT (10)
+#define SMU_SOFTRESET_CONTROL_RESET_PEG1_PE2_MASK (0x00000400)
+#define SMU_SOFTRESET_CONTROL_RESET_PEG1_PE3_SHIFT (11)
+#define SMU_SOFTRESET_CONTROL_RESET_PEG1_PE3_MASK (0x00000800)
+
+#define SMU_RESET_PROTOCOL_ENGINE(peg, pe) \
+ ((1 << (pe)) << ((peg) * 8))
+
+#define SMU_RESET_PEG_PROTOCOL_ENGINES(peg) \
+ (\
+ SMU_RESET_PROTOCOL_ENGINE(peg, 0) \
+ | SMU_RESET_PROTOCOL_ENGINE(peg, 1) \
+ | SMU_RESET_PROTOCOL_ENGINE(peg, 2) \
+ | SMU_RESET_PROTOCOL_ENGINE(peg, 3) \
+ )
+
+#define SMU_RESET_ALL_PROTOCOL_ENGINES() \
+ (\
+ SMU_RESET_PEG_PROTOCOL_ENGINES(0) \
+ | SMU_RESET_PEG_PROTOCOL_ENGINES(1) \
+ )
+
+#define SMU_SOFTRESET_CONTROL_RESET_WIDE_PORT_PEG0_LP0_SHIFT (16)
+#define SMU_SOFTRESET_CONTROL_RESET_WIDE_PORT_PEG0_LP0_MASK (0x00010000)
+#define SMU_SOFTRESET_CONTROL_RESET_WIDE_PORT_PEG0_LP2_SHIFT (17)
+#define SMU_SOFTRESET_CONTROL_RESET_WIDE_PORT_PEG0_LP2_MASK (0x00020000)
+#define SMU_SOFTRESET_CONTROL_RESET_WIDE_PORT_PEG1_LP0_SHIFT (18)
+#define SMU_SOFTRESET_CONTROL_RESET_WIDE_PORT_PEG1_LP0_MASK (0x00040000)
+#define SMU_SOFTRESET_CONTROL_RESET_WIDE_PORT_PEG1_LP2_SHIFT (19)
+#define SMU_SOFTRESET_CONTROL_RESET_WIDE_PORT_PEG1_LP2_MASK (0x00080000)
+
+#define SMU_RESET_WIDE_PORT_QUEUE(peg, wide_port) \
+ ((1 << ((wide_port) / 2)) << ((peg) * 2) << 16)
+
+#define SMU_SOFTRESET_CONTROL_RESET_PEG0_SHIFT (20)
+#define SMU_SOFTRESET_CONTROL_RESET_PEG0_MASK (0x00100000)
+#define SMU_SOFTRESET_CONTROL_RESET_PEG1_SHIFT (21)
+#define SMU_SOFTRESET_CONTROL_RESET_PEG1_MASK (0x00200000)
+#define SMU_SOFTRESET_CONTROL_RESET_SCU_SHIFT (22)
+#define SMU_SOFTRESET_CONTROL_RESET_SCU_MASK (0x00400000)
+
+/*
+ * It seems to make sense that if you are going to reset the protocol
+ * engine group that you would also reset all of the protocol engines */
+#define SMU_RESET_PROTOCOL_ENGINE_GROUP(peg) \
+ (\
+ (1 << ((peg) + 20)) \
+ | SMU_RESET_WIDE_PORT_QUEUE(peg, 0) \
+ | SMU_RESET_WIDE_PORT_QUEUE(peg, 1) \
+ | SMU_RESET_PEG_PROTOCOL_ENGINES(peg) \
+ )
+
+#define SMU_RESET_ALL_PROTOCOL_ENGINE_GROUPS() \
+ (\
+ SMU_RESET_PROTOCOL_ENGINE_GROUP(0) \
+ | SMU_RESET_PROTOCOL_ENGINE_GROUP(1) \
+ )
+
+#define SMU_RESET_SCU() (0xFFFFFFFF)
+
+
+
+/* ***************************************************************************** */
+#define SMU_TASK_CONTEXT_ASSIGNMENT_STARTING_SHIFT (0)
+#define SMU_TASK_CONTEXT_ASSIGNMENT_STARTING_MASK (0x00000FFF)
+#define SMU_TASK_CONTEXT_ASSIGNMENT_ENDING_SHIFT (16)
+#define SMU_TASK_CONTEXT_ASSIGNMENT_ENDING_MASK (0x0FFF0000)
+#define SMU_TASK_CONTEXT_ASSIGNMENT_RANGE_CHECK_ENABLE_SHIFT (31)
+#define SMU_TASK_CONTEXT_ASSIGNMENT_RANGE_CHECK_ENABLE_MASK (0x80000000)
+#define SMU_TASK_CONTEXT_ASSIGNMENT_RESERVED_MASK (0x7000F000)
+
+#define SMU_TCA_GEN_VAL(name, value) \
+ SCU_GEN_VALUE(SMU_TASK_CONTEXT_ASSIGNMENT_ ## name, value)
+
+#define SMU_TCA_GEN_BIT(name) \
+ SCU_GEN_BIT(SMU_TASK_CONTEXT_ASSIGNMENT_ ## name)
+
+/* ***************************************************************************** */
+#define SCU_SDMA_UNSOLICITED_FRAME_QUEUE_CONTROL_QUEUE_SIZE_SHIFT (0)
+#define SCU_SDMA_UNSOLICITED_FRAME_QUEUE_CONTROL_QUEUE_SIZE_MASK (0x00000FFF)
+#define SCU_SDMA_UNSOLICITED_FRAME_QUEUE_CONTROL_RESERVED_MASK (0xFFFFF000)
+
+#define SCU_UFQC_GEN_VAL(name, value) \
+ SCU_GEN_VALUE(SCU_SDMA_UNSOLICITED_FRAME_QUEUE_CONTROL_ ## name, value)
+
+#define SCU_UFQC_QUEUE_SIZE_SET(value) \
+ SCU_UFQC_GEN_VAL(QUEUE_SIZE, value)
+
+/* ***************************************************************************** */
+#define SCU_SDMA_UNSOLICITED_FRAME_QUEUE_PUT_POINTER_SHIFT (0)
+#define SCU_SDMA_UNSOLICITED_FRAME_QUEUE_PUT_POINTER_MASK (0x00000FFF)
+#define SCU_SDMA_UNSOLICITED_FRAME_QUEUE_PUT_CYCLE_BIT_SHIFT (12)
+#define SCU_SDMA_UNSOLICITED_FRAME_QUEUE_PUT_CYCLE_BIT_MASK (0x00001000)
+#define SCU_SDMA_UNSOLICITED_FRAME_QUEUE_PUT_RESERVED_MASK (0xFFFFE000)
+
+#define SCU_UFQPP_GEN_VAL(name, value) \
+ SCU_GEN_VALUE(SCU_SDMA_UNSOLICITED_FRAME_QUEUE_PUT_ ## name, value)
+
+#define SCU_UFQPP_GEN_BIT(name) \
+ SCU_GEN_BIT(SCU_SDMA_UNSOLICITED_FRAME_QUEUE_PUT_ ## name)
+
+/*
+ * *****************************************************************************
+ * * SDMA Registers
+ * ***************************************************************************** */
+#define SCU_SDMA_UNSOLICITED_FRAME_QUEUE_GET_POINTER_SHIFT (0)
+#define SCU_SDMA_UNSOLICITED_FRAME_QUEUE_GET_POINTER_MASK (0x00000FFF)
+#define SCU_SDMA_UNSOLICITED_FRAME_QUEUE_GET_CYCLE_BIT_SHIFT (12)
+#define SCU_SDMA_UNSOLICITED_FRAME_QUEUE_GET_CYCLE_BIT_MASK (12)
+#define SCU_SDMA_UNSOLICITED_FRAME_QUEUE_GET_ENABLE_BIT_SHIFT (31)
+#define SCU_SDMA_UNSOLICITED_FRAME_QUEUE_GET_ENABLE_BIT_MASK (0x80000000)
+#define SCU_SDMA_UNSOLICITED_FRAME_QUEUE_GET_RESERVED_MASK (0x7FFFE000)
+
+#define SCU_UFQGP_GEN_VAL(name, value) \
+ SCU_GEN_VALUE(SCU_SDMA_UNSOLICITED_FRAME_QUEUE_GET_ ## name, value)
+
+#define SCU_UFQGP_GEN_BIT(name) \
+ SCU_GEN_BIT(SCU_SDMA_UNSOLICITED_FRAME_QUEUE_GET_ ## name)
+
+#define SCU_UFQGP_CYCLE_BIT(value) \
+ SCU_UFQGP_GEN_BIT(CYCLE_BIT, value)
+
+#define SCU_UFQGP_GET_POINTER(value) \
+ SCU_UFQGP_GEN_VALUE(POINTER, value)
+
+#define SCU_UFQGP_ENABLE(value) \
+ (SCU_UFQGP_GEN_BIT(ENABLE) | value)
+
+#define SCU_UFQGP_DISABLE(value) \
+ (~SCU_UFQGP_GEN_BIT(ENABLE) & value)
+
+#define SCU_UFQGP_VALUE(bit, value) \
+ (SCU_UFQGP_CYCLE_BIT(bit) | SCU_UFQGP_GET_POINTER(value))
+
+/* ***************************************************************************** */
+#define SCU_PDMA_CONFIGURATION_ADDRESS_MODIFIER_SHIFT (0)
+#define SCU_PDMA_CONFIGURATION_ADDRESS_MODIFIER_MASK (0x0000FFFF)
+#define SCU_PDMA_CONFIGURATION_PCI_RELAXED_ORDERING_ENABLE_SHIFT (16)
+#define SCU_PDMA_CONFIGURATION_PCI_RELAXED_ORDERING_ENABLE_MASK (0x00010000)
+#define SCU_PDMA_CONFIGURATION_PCI_NO_SNOOP_ENABLE_SHIFT (17)
+#define SCU_PDMA_CONFIGURATION_PCI_NO_SNOOP_ENABLE_MASK (0x00020000)
+#define SCU_PDMA_CONFIGURATION_BIG_ENDIAN_CONTROL_BYTE_SWAP_SHIFT (18)
+#define SCU_PDMA_CONFIGURATION_BIG_ENDIAN_CONTROL_BYTE_SWAP_MASK (0x00040000)
+#define SCU_PDMA_CONFIGURATION_BIG_ENDIAN_CONTROL_XPI_SGL_FETCH_SHIFT (19)
+#define SCU_PDMA_CONFIGURATION_BIG_ENDIAN_CONTROL_XPI_SGL_FETCH_MASK (0x00080000)
+#define SCU_PDMA_CONFIGURATION_BIG_ENDIAN_CONTROL_XPI_RX_HEADER_RAM_WRITE_SHIFT (20)
+#define SCU_PDMA_CONFIGURATION_BIG_ENDIAN_CONTROL_XPI_RX_HEADER_RAM_WRITE_MASK (0x00100000)
+#define SCU_PDMA_CONFIGURATION_BIG_ENDIAN_CONTROL_XPI_UF_ADDRESS_FETCH_SHIFT (21)
+#define SCU_PDMA_CONFIGURATION_BIG_ENDIAN_CONTROL_XPI_UF_ADDRESS_FETCH_MASK (0x00200000)
+#define SCU_PDMA_CONFIGURATION_ADDRESS_MODIFIER_SELECT_SHIFT (22)
+#define SCU_PDMA_CONFIGURATION_ADDRESS_MODIFIER_SELECT_MASK (0x00400000)
+#define SCU_PDMA_CONFIGURATION_RESERVED_MASK (0xFF800000)
+
+#define SCU_PDMACR_GEN_VALUE(name, value) \
+ SCU_GEN_VALUE(SCU_PDMA_CONFIGURATION_ ## name, value)
+
+#define SCU_PDMACR_GEN_BIT(name) \
+ SCU_GEN_BIT(SCU_PDMA_CONFIGURATION_ ## name)
+
+#define SCU_PDMACR_BE_GEN_BIT(name) \
+ SCU_PCMACR_GEN_BIT(BIG_ENDIAN_CONTROL_ ## name)
+
+/* ***************************************************************************** */
+#define SCU_CDMA_CONFIGURATION_PCI_RELAXED_ORDERING_ENABLE_SHIFT (8)
+#define SCU_CDMA_CONFIGURATION_PCI_RELAXED_ORDERING_ENABLE_MASK (0x00000100)
+
+#define SCU_CDMACR_GEN_BIT(name) \
+ SCU_GEN_BIT(SCU_CDMA_CONFIGURATION_ ## name)
+
+/*
+ * *****************************************************************************
+ * * SCU Link Layer Registers
+ * ***************************************************************************** */
+#define SCU_LINK_LAYER_SPEED_NEGOTIATION_TIMER_VALUES_TIMEOUT_SHIFT (0)
+#define SCU_LINK_LAYER_SPEED_NEGOTIATION_TIMER_VALUES_TIMEOUT_MASK (0x000000FF)
+#define SCU_LINK_LAYER_SPEED_NEGOTIATION_TIMER_VALUES_LOCK_TIME_SHIFT (8)
+#define SCU_LINK_LAYER_SPEED_NEGOTIATION_TIMER_VALUES_LOCK_TIME_MASK (0x0000FF00)
+#define SCU_LINK_LAYER_SPEED_NEGOTIATION_TIMER_VALUES_RATE_CHANGE_DELAY_SHIFT (16)
+#define SCU_LINK_LAYER_SPEED_NEGOTIATION_TIMER_VALUES_RATE_CHANGE_DELAY_MASK (0x00FF0000)
+#define SCU_LINK_LAYER_SPEED_NEGOTIATION_TIMER_VALUES_DWORD_SYNC_TIMEOUT_SHIFT (24)
+#define SCU_LINK_LAYER_SPEED_NEGOTIATION_TIMER_VALUES_DWORD_SYNC_TIMEOUT_MASK (0xFF000000)
+#define SCU_LINK_LAYER_SPEED_NECGOIATION_TIMER_VALUES_REQUIRED_MASK (0x00000000)
+#define SCU_LINK_LAYER_SPEED_NECGOIATION_TIMER_VALUES_DEFAULT_MASK (0x7D00676F)
+#define SCU_LINK_LAYER_SPEED_NECGOIATION_TIMER_VALUES_RESERVED_MASK (0x00FF0000)
+
+#define SCU_SAS_SPDTOV_GEN_VALUE(name, value) \
+ SCU_GEN_VALUE(SCU_LINK_LAYER_SPEED_NEGOTIATION_TIMER_VALUES_ ## name, value)
+
+
+#define SCU_LINK_STATUS_DWORD_SYNC_AQUIRED_SHIFT (2)
+#define SCU_LINK_STATUS_DWORD_SYNC_AQUIRED_MASK (0x00000004)
+#define SCU_LINK_STATUS_TRANSMIT_PORT_SELECTION_DONE_SHIFT (4)
+#define SCU_LINK_STATUS_TRANSMIT_PORT_SELECTION_DONE_MASK (0x00000010)
+#define SCU_LINK_STATUS_RECEIVER_CREDIT_EXHAUSTED_SHIFT (5)
+#define SCU_LINK_STATUS_RECEIVER_CREDIT_EXHAUSTED_MASK (0x00000020)
+#define SCU_LINK_STATUS_RESERVED_MASK (0xFFFFFFCD)
+
+#define SCU_SAS_LLSTA_GEN_BIT(name) \
+ SCU_GEN_BIT(SCU_LINK_STATUS_ ## name)
+
+
+/* TODO: Where is the SATA_PSELTOV register? */
+
+/*
+ * *****************************************************************************
+ * * SCU SAS Maximum Arbitration Wait Time Timeout Register
+ * ***************************************************************************** */
+#define SCU_SAS_MAX_ARBITRATION_WAIT_TIME_TIMEOUT_VALUE_SHIFT (0)
+#define SCU_SAS_MAX_ARBITRATION_WAIT_TIME_TIMEOUT_VALUE_MASK (0x00007FFF)
+#define SCU_SAS_MAX_ARBITRATION_WAIT_TIME_TIMEOUT_SCALE_SHIFT (15)
+#define SCU_SAS_MAX_ARBITRATION_WAIT_TIME_TIMEOUT_SCALE_MASK (0x00008000)
+
+#define SCU_SAS_MAWTTOV_GEN_VALUE(name, value) \
+ SCU_GEN_VALUE(SCU_SAS_MAX_ARBITRATION_WAIT_TIME_TIMEOUT_ ## name, value)
+
+#define SCU_SAS_MAWTTOV_GEN_BIT(name) \
+ SCU_GEN_BIT(SCU_SAS_MAX_ARBITRATION_WAIT_TIME_TIMEOUT_ ## name)
+
+
+/*
+ * TODO: Where is the SAS_LNKTOV regsiter?
+ * TODO: Where is the SAS_PHYTOV register? */
+
+#define SCU_SAS_TRANSMIT_IDENTIFICATION_SMP_TARGET_SHIFT (1)
+#define SCU_SAS_TRANSMIT_IDENTIFICATION_SMP_TARGET_MASK (0x00000002)
+#define SCU_SAS_TRANSMIT_IDENTIFICATION_STP_TARGET_SHIFT (2)
+#define SCU_SAS_TRANSMIT_IDENTIFICATION_STP_TARGET_MASK (0x00000004)
+#define SCU_SAS_TRANSMIT_IDENTIFICATION_SSP_TARGET_SHIFT (3)
+#define SCU_SAS_TRANSMIT_IDENTIFICATION_SSP_TARGET_MASK (0x00000008)
+#define SCU_SAS_TRANSMIT_IDENTIFICATION_DA_SATA_HOST_SHIFT (8)
+#define SCU_SAS_TRANSMIT_IDENTIFICATION_DA_SATA_HOST_MASK (0x00000100)
+#define SCU_SAS_TRANSMIT_IDENTIFICATION_SMP_INITIATOR_SHIFT (9)
+#define SCU_SAS_TRANSMIT_IDENTIFICATION_SMP_INITIATOR_MASK (0x00000200)
+#define SCU_SAS_TRANSMIT_IDENTIFICATION_STP_INITIATOR_SHIFT (10)
+#define SCU_SAS_TRANSMIT_IDENTIFICATION_STP_INITIATOR_MASK (0x00000400)
+#define SCU_SAS_TRANSMIT_IDENTIFICATION_SSP_INITIATOR_SHIFT (11)
+#define SCU_SAS_TRANSMIT_IDENTIFICATION_SSP_INITIATOR_MASK (0x00000800)
+#define SCU_SAS_TRANSMIT_IDENTIFICATION_REASON_CODE_SHIFT (16)
+#define SCU_SAS_TRANSMIT_IDENTIFICATION_REASON_CODE_MASK (0x000F0000)
+#define SCU_SAS_TRANSMIT_IDENTIFICATION_ADDRESS_FRAME_TYPE_SHIFT (24)
+#define SCU_SAS_TRANSMIT_IDENTIFICATION_ADDRESS_FRAME_TYPE_MASK (0x0F000000)
+#define SCU_SAS_TRANSMIT_IDENTIFICATION_DEVICE_TYPE_SHIFT (28)
+#define SCU_SAS_TRANSMIT_IDENTIFICATION_DEVICE_TYPE_MASK (0x70000000)
+#define SCU_SAS_TRANSMIT_IDENTIFICATION_RESERVED_MASK (0x80F0F1F1)
+
+#define SCU_SAS_TIID_GEN_VAL(name, value) \
+ SCU_GEN_VALUE(SCU_SAS_TRANSMIT_IDENTIFICATION_ ## name, value)
+
+#define SCU_SAS_TIID_GEN_BIT(name) \
+ SCU_GEN_BIT(SCU_SAS_TRANSMIT_IDENTIFICATION_ ## name)
+
+/* SAS Identify Frame PHY Identifier Register */
+#define SCU_LINK_LAYER_IDENTIFY_FRAME_PHY_IDENTIFIER_BREAK_REPLY_CAPABLE_SHIFT (16)
+#define SCU_LINK_LAYER_IDENTIFY_FRAME_PHY_IDENTIFIER_BREAK_REPLY_CAPABLE_MASK (0x00010000)
+#define SCU_LINK_LAYER_IDENTIFY_FRAME_PHY_IDENTIFIER_REQUESTED_INSIDE_ZPSDS_SHIFT (17)
+#define SCU_LINK_LAYER_IDENTIFY_FRAME_PHY_IDENTIFIER_REQUESTED_INSIDE_ZPSDS_MASK (0x00020000)
+#define SCU_LINK_LAYER_IDENTIFY_FRAME_PHY_IDENTIFIER_INSIDE_ZPSDS_PERSISTENT_SHIFT (18)
+#define SCU_LINK_LAYER_IDENTIFY_FRAME_PHY_IDENTIFIER_INSIDE_ZPSDS_PERSISTENT_MASK (0x00040000)
+#define SCU_LINK_LAYER_IDENTIFY_FRAME_PHY_IDENTIFIER_ID_SHIFT (24)
+#define SCU_LINK_LAYER_IDENTIFY_FRAME_PHY_IDENTIFIER_ID_MASK (0xFF000000)
+#define SCU_LINK_LAYER_IDENTIFY_FRAME_PHY_IDENTIFIER_RESERVED_MASK (0x00F800FF)
+
+#define SCU_SAS_TIPID_GEN_VALUE(name, value) \
+ SCU_GEN_VALUE(SCU_LINK_LAYER_IDENTIFY_FRAME_PHY_IDENTIFIER_ ## name, value)
+
+#define SCU_SAS_TIPID_GEN_BIT(name) \
+ SCU_GEN_BIT(SCU_LINK_LAYER_IDENTIFY_FRAME_PHY_IDENTIFIER_ ## name)
+
+
+#define SCU_SAS_PHY_CONFIGURATION_TX_PARITY_CHECK_SHIFT (4)
+#define SCU_SAS_PHY_CONFIGURATION_TX_PARITY_CHECK_MASK (0x00000010)
+#define SCU_SAS_PHY_CONFIGURATION_TX_BAD_CRC_SHIFT (6)
+#define SCU_SAS_PHY_CONFIGURATION_TX_BAD_CRC_MASK (0x00000040)
+#define SCU_SAS_PHY_CONFIGURATION_DISABLE_SCRAMBLER_SHIFT (7)
+#define SCU_SAS_PHY_CONFIGURATION_DISABLE_SCRAMBLER_MASK (0x00000080)
+#define SCU_SAS_PHY_CONFIGURATION_DISABLE_DESCRAMBLER_SHIFT (8)
+#define SCU_SAS_PHY_CONFIGURATION_DISABLE_DESCRAMBLER_MASK (0x00000100)
+#define SCU_SAS_PHY_CONFIGURATION_DISABLE_CREDIT_INSERTION_SHIFT (9)
+#define SCU_SAS_PHY_CONFIGURATION_DISABLE_CREDIT_INSERTION_MASK (0x00000200)
+#define SCU_SAS_PHY_CONFIGURATION_SUSPEND_PROTOCOL_ENGINE_SHIFT (11)
+#define SCU_SAS_PHY_CONFIGURATION_SUSPEND_PROTOCOL_ENGINE_MASK (0x00000800)
+#define SCU_SAS_PHY_CONFIGURATION_SATA_SPINUP_HOLD_SHIFT (12)
+#define SCU_SAS_PHY_CONFIGURATION_SATA_SPINUP_HOLD_MASK (0x00001000)
+#define SCU_SAS_PHY_CONFIGURATION_TRANSMIT_PORT_SELECTION_SIGNAL_SHIFT (13)
+#define SCU_SAS_PHY_CONFIGURATION_TRANSMIT_PORT_SELECTION_SIGNAL_MASK (0x00002000)
+#define SCU_SAS_PHY_CONFIGURATION_HARD_RESET_SHIFT (14)
+#define SCU_SAS_PHY_CONFIGURATION_HARD_RESET_MASK (0x00004000)
+#define SCU_SAS_PHY_CONFIGURATION_OOB_ENABLE_SHIFT (15)
+#define SCU_SAS_PHY_CONFIGURATION_OOB_ENABLE_MASK (0x00008000)
+#define SCU_SAS_PHY_CONFIGURATION_ENABLE_FRAME_TX_INSERT_ALIGN_SHIFT (23)
+#define SCU_SAS_PHY_CONFIGURATION_ENABLE_FRAME_TX_INSERT_ALIGN_MASK (0x00800000)
+#define SCU_SAS_PHY_CONFIGURATION_FORWARD_IDENTIFY_FRAME_SHIFT (27)
+#define SCU_SAS_PHY_CONFIGURATION_FORWARD_IDENTIFY_FRAME_MASK (0x08000000)
+#define SCU_SAS_PHY_CONFIGURATION_DISABLE_BYTE_TRANSPOSE_STP_FRAME_SHIFT (28)
+#define SCU_SAS_PHY_CONFIGURATION_DISABLE_BYTE_TRANSPOSE_STP_FRAME_MASK (0x10000000)
+#define SCU_SAS_PHY_CONFIGURATION_OOB_RESET_SHIFT (29)
+#define SCU_SAS_PHY_CONFIGURATION_OOB_RESET_MASK (0x20000000)
+#define SCU_SAS_PHY_CONFIGURATION_THREE_IAF_ENABLE_SHIFT (30)
+#define SCU_SAS_PHY_CONFIGURATION_THREE_IAF_ENABLE_MASK (0x40000000)
+#define SCU_SAS_PHY_CONFIGURATION_OOB_ALIGN0_ENABLE_SHIFT (31)
+#define SCU_SAS_PHY_CONFIGURATION_OOB_ALIGN0_ENABLE_MASK (0x80000000)
+#define SCU_SAS_PHY_CONFIGURATION_REQUIRED_MASK (0x0100000F)
+#define SCU_SAS_PHY_CONFIGURATION_DEFAULT_MASK (0x4180100F)
+#define SCU_SAS_PHY_CONFIGURATION_RESERVED_MASK (0x00000000)
+
+#define SCU_SAS_PCFG_GEN_BIT(name) \
+ SCU_GEN_BIT(SCU_SAS_PHY_CONFIGURATION_ ## name)
+
+#define SCU_LINK_LAYER_ALIGN_INSERTION_FREQUENCY_GENERAL_SHIFT (0)
+#define SCU_LINK_LAYER_ALIGN_INSERTION_FREQUENCY_GENERAL_MASK (0x000007FF)
+#define SCU_LINK_LAYER_ALIGN_INSERTION_FREQUENCY_CONNECTED_SHIFT (16)
+#define SCU_LINK_LAYER_ALIGN_INSERTION_FREQUENCY_CONNECTED_MASK (0x00ff0000)
+
+#define SCU_ALIGN_INSERTION_FREQUENCY_GEN_VAL(name, value) \
+ SCU_GEN_VALUE(SCU_LINK_LAYER_ALIGN_INSERTION_FREQUENCY_##name, value)
+
+#define SCU_LINK_LAYER_ENABLE_SPINUP_CONTROL_COUNT_SHIFT (0)
+#define SCU_LINK_LAYER_ENABLE_SPINUP_CONTROL_COUNT_MASK (0x0003FFFF)
+#define SCU_LINK_LAYER_ENABLE_SPINUP_CONTROL_ENABLE_SHIFT (31)
+#define SCU_LINK_LAYER_ENABLE_SPINUP_CONTROL_ENABLE_MASK (0x80000000)
+#define SCU_LINK_LAYER_ENABLE_SPINUP_CONTROL_RESERVED_MASK (0x7FFC0000)
+
+#define SCU_ENSPINUP_GEN_VAL(name, value) \
+ SCU_GEN_VALUE(SCU_LINK_LAYER_ENABLE_SPINUP_CONTROL_ ## name, value)
+
+#define SCU_ENSPINUP_GEN_BIT(name) \
+ SCU_GEN_BIT(SCU_LINK_LAYER_ENABLE_SPINUP_CONTROL_ ## name)
+
+
+#define SCU_LINK_LAYER_PHY_CAPABILITIES_TXSSCTYPE_SHIFT (1)
+#define SCU_LINK_LAYER_PHY_CAPABILITIES_TXSSCTYPE_MASK (0x00000002)
+#define SCU_LINK_LAYER_PHY_CAPABILITIES_RLLRATE_SHIFT (4)
+#define SCU_LINK_LAYER_PHY_CAPABILITIES_RLLRATE_MASK (0x000000F0)
+#define SCU_LINK_LAYER_PHY_CAPABILITIES_SWO15GBPS_SHIFT (8)
+#define SCU_LINK_LAYER_PHY_CAPABILITIES_SWO15GBPS_MASK (0x00000100)
+#define SCU_LINK_LAYER_PHY_CAPABILITIES_SW15GBPS_SHIFT (9)
+#define SCU_LINK_LAYER_PHY_CAPABILITIES_SW15GBPS_MASK (0x00000201)
+#define SCU_LINK_LAYER_PHY_CAPABILITIES_SWO30GBPS_SHIFT (10)
+#define SCU_LINK_LAYER_PHY_CAPABILITIES_SWO30GBPS_MASK (0x00000401)
+#define SCU_LINK_LAYER_PHY_CAPABILITIES_SW30GBPS_SHIFT (11)
+#define SCU_LINK_LAYER_PHY_CAPABILITIES_SW30GBPS_MASK (0x00000801)
+#define SCU_LINK_LAYER_PHY_CAPABILITIES_SWO60GBPS_SHIFT (12)
+#define SCU_LINK_LAYER_PHY_CAPABILITIES_SWO60GBPS_MASK (0x00001001)
+#define SCU_LINK_LAYER_PHY_CAPABILITIES_SW60GBPS_SHIFT (13)
+#define SCU_LINK_LAYER_PHY_CAPABILITIES_SW60GBPS_MASK (0x00002001)
+#define SCU_LINK_LAYER_PHY_CAPABILITIES_EVEN_PARITY_SHIFT (31)
+#define SCU_LINK_LAYER_PHY_CAPABILITIES_EVEN_PARITY_MASK (0x80000000)
+#define SCU_LINK_LAYER_PHY_CAPABILITIES_DEFAULT_MASK (0x00003F01)
+#define SCU_LINK_LAYER_PHY_CAPABILITIES_REQUIRED_MASK (0x00000001)
+#define SCU_LINK_LAYER_PHY_CAPABILITIES_RESERVED_MASK (0x7FFFC00D)
+
+#define SCU_SAS_PHYCAP_GEN_VAL(name, value) \
+ SCU_GEN_VALUE(SCU_LINK_LAYER_PHY_CAPABILITIES_ ## name, value)
+
+#define SCU_SAS_PHYCAP_GEN_BIT(name) \
+ SCU_GEN_BIT(SCU_LINK_LAYER_PHY_CAPABILITIES_ ## name)
+
+
+#define SCU_LINK_LAYER_PHY_SOURCE_ZONE_GROUP_CONTROL_VIRTUAL_EXPANDER_PHY_ZONE_GROUP_SHIFT (0)
+#define SCU_LINK_LAYER_PHY_SOURCE_ZONE_GROUP_CONTROL_VIRTUAL_EXPANDER_PHY_ZONE_GROUP_MASK (0x000000FF)
+#define SCU_LINK_LAYER_PHY_SOURCE_ZONE_GROUP_CONTROL_INSIDE_SOURCE_ZONE_GROUP_SHIFT (31)
+#define SCU_LINK_LAYER_PHY_SOURCE_ZONE_GROUP_CONTROL_INSIDE_SOURCE_ZONE_GROUP_MASK (0x80000000)
+#define SCU_LINK_LAYER_PHY_SOURCE_ZONE_GROUP_CONTROL_RESERVED_MASK (0x7FFFFF00)
+
+#define SCU_PSZGCR_GEN_VAL(name, value) \
+ SCU_GEN_VALUE(SCU_LINK_LAYER_PHY_SOURCE_ZONE_GROUP_CONTROL_ ## name, value)
+
+#define SCU_PSZGCR_GEN_BIT(name) \
+ SCU_GEN_BIT(SCU_LINK_LAYER_PHY_SOURCE_ZONE_GROUP_CONTROL_ ## name)
+
+#define SCU_PROTOCOL_ENGINE_GROUP_VIRTUAL_ZONING_EXPANDER_CONTROL_ZONE0_LOCKED_SHIFT (1)
+#define SCU_PROTOCOL_ENGINE_GROUP_VIRTUAL_ZONING_EXPANDER_CONTROL_ZONE0_LOCKED_MASK (0x00000002)
+#define SCU_PROTOCOL_ENGINE_GROUP_VIRTUAL_ZONING_EXPANDER_CONTROL_ZONE0_UPDATING_SHIFT (2)
+#define SCU_PROTOCOL_ENGINE_GROUP_VIRTUAL_ZONING_EXPANDER_CONTROL_ZONE0_UPDATING_MASK (0x00000004)
+#define SCU_PROTOCOL_ENGINE_GROUP_VIRTUAL_ZONING_EXPANDER_CONTROL_ZONE1_LOCKED_SHIFT (4)
+#define SCU_PROTOCOL_ENGINE_GROUP_VIRTUAL_ZONING_EXPANDER_CONTROL_ZONE1_LOCKED_MASK (0x00000010)
+#define SCU_PROTOCOL_ENGINE_GROUP_VIRTUAL_ZONING_EXPANDER_CONTROL_ZONE1_UPDATING_SHIFT (5)
+#define SCU_PROTOCOL_ENGINE_GROUP_VIRTUAL_ZONING_EXPANDER_CONTROL_ZONE1_UPDATING_MASK (0x00000020)
+#define SCU_PROTOCOL_ENGINE_GROUP_VIRTUAL_ZONING_EXPANDER_CONTROL_ZPT_ASSOCIATION_PE0_SHIFT (16)
+#define SCU_PROTOCOL_ENGINE_GROUP_VIRTUAL_ZONING_EXPANDER_CONTROL_ZPT_ASSOCIATION_PE0_MASK (0x00030000)
+#define SCU_PROTOCOL_ENGINE_GROUP_VIRTUAL_ZONING_EXPANDER_CONTROL_AIP_ENABLE_PE0_SHIFT (19)
+#define SCU_PROTOCOL_ENGINE_GROUP_VIRTUAL_ZONING_EXPANDER_CONTROL_AIP_ENABLE_PE0_MASK (0x00080000)
+#define SCU_PROTOCOL_ENGINE_GROUP_VIRTUAL_ZONING_EXPANDER_CONTROL_ZPT_ASSOCIATION_PE1_SHIFT (20)
+#define SCU_PROTOCOL_ENGINE_GROUP_VIRTUAL_ZONING_EXPANDER_CONTROL_ZPT_ASSOCIATION_PE1_MASK (0x00300000)
+#define SCU_PROTOCOL_ENGINE_GROUP_VIRTUAL_ZONING_EXPANDER_CONTROL_AIP_ENABLE_PE1_SHIFT (23)
+#define SCU_PROTOCOL_ENGINE_GROUP_VIRTUAL_ZONING_EXPANDER_CONTROL_AIP_ENABLE_PE1_MASK (0x00800000)
+#define SCU_PROTOCOL_ENGINE_GROUP_VIRTUAL_ZONING_EXPANDER_CONTROL_ZPT_ASSOCIATION_PE2_SHIFT (24)
+#define SCU_PROTOCOL_ENGINE_GROUP_VIRTUAL_ZONING_EXPANDER_CONTROL_ZPT_ASSOCIATION_PE2_MASK (0x03000000)
+#define SCU_PROTOCOL_ENGINE_GROUP_VIRTUAL_ZONING_EXPANDER_CONTROL_AIP_ENABLE_PE2_SHIFT (27)
+#define SCU_PROTOCOL_ENGINE_GROUP_VIRTUAL_ZONING_EXPANDER_CONTROL_AIP_ENABLE_PE2_MASK (0x08000000)
+#define SCU_PROTOCOL_ENGINE_GROUP_VIRTUAL_ZONING_EXPANDER_CONTROL_ZPT_ASSOCIATION_PE3_SHIFT (28)
+#define SCU_PROTOCOL_ENGINE_GROUP_VIRTUAL_ZONING_EXPANDER_CONTROL_ZPT_ASSOCIATION_PE3_MASK (0x30000000)
+#define SCU_PROTOCOL_ENGINE_GROUP_VIRTUAL_ZONING_EXPANDER_CONTROL_AIP_ENABLE_PE3_SHIFT (31)
+#define SCU_PROTOCOL_ENGINE_GROUP_VIRTUAL_ZONING_EXPANDER_CONTROL_AIP_ENABLE_PE3_MASK (0x80000000)
+#define SCU_PROTOCOL_ENGINE_GROUP_VIRTUAL_ZONING_EXPANDER_CONTROL_RESERVED_MASK (0x4444FFC9)
+
+#define SCU_PEG_SCUVZECR_GEN_VAL(name, val) \
+ SCU_GEN_VALUE(SCU_PROTOCOL_ENGINE_GROUP_VIRTUAL_ZONING_EXPANDER_CONTROL_ ## name, val)
+
+#define SCU_PEG_SCUVZECR_GEN_BIT(name) \
+ SCU_GEN_BIT(SCU_PROTOCOL_ENGINE_GROUP_VIRTUAL_ZONING_EXPANDER_CONTROL_ ## name)
+
+
+/*
+ * *****************************************************************************
+ * * Port Task Scheduler registers shift and mask values
+ * ***************************************************************************** */
+#define SCU_PTSG_CONTROL_IT_NEXUS_TIMEOUT_SHIFT (0)
+#define SCU_PTSG_CONTROL_IT_NEXUS_TIMEOUT_MASK (0x0000FFFF)
+#define SCU_PTSG_CONTROL_TASK_TIMEOUT_SHIFT (16)
+#define SCU_PTSG_CONTROL_TASK_TIMEOUT_MASK (0x00FF0000)
+#define SCU_PTSG_CONTROL_PTSG_ENABLE_SHIFT (24)
+#define SCU_PTSG_CONTROL_PTSG_ENABLE_MASK (0x01000000)
+#define SCU_PTSG_CONTROL_ETM_ENABLE_SHIFT (25)
+#define SCU_PTSG_CONTROL_ETM_ENABLE_MASK (0x02000000)
+#define SCU_PTSG_CONTROL_DEFAULT_MASK (0x00020002)
+#define SCU_PTSG_CONTROL_REQUIRED_MASK (0x00000000)
+#define SCU_PTSG_CONTROL_RESERVED_MASK (0xFC000000)
+
+#define SCU_PTSGCR_GEN_VAL(name, val) \
+ SCU_GEN_VALUE(SCU_PTSG_CONTROL_ ## name, val)
+
+#define SCU_PTSGCR_GEN_BIT(name) \
+ SCU_GEN_BIT(SCU_PTSG_CONTROL_ ## name)
+
+
+/* ***************************************************************************** */
+#define SCU_PTSG_REAL_TIME_CLOCK_SHIFT (0)
+#define SCU_PTSG_REAL_TIME_CLOCK_MASK (0x0000FFFF)
+#define SCU_PTSG_REAL_TIME_CLOCK_RESERVED_MASK (0xFFFF0000)
+
+#define SCU_RTCR_GEN_VAL(name, val) \
+ SCU_GEN_VALUE(SCU_PTSG_ ## name, val)
+
+
+#define SCU_PTSG_REAL_TIME_CLOCK_CONTROL_PRESCALER_VALUE_SHIFT (0)
+#define SCU_PTSG_REAL_TIME_CLOCK_CONTROL_PRESCALER_VALUE_MASK (0x00FFFFFF)
+#define SCU_PTSG_REAL_TIME_CLOCK_CONTROL_RESERVED_MASK (0xFF000000)
+
+#define SCU_RTCCR_GEN_VAL(name, val) \
+ SCU_GEN_VALUE(SCU_PTSG_REAL_TIME_CLOCK_CONTROL_ ## name, val)
+
+
+#define SCU_PTSG_PORT_TASK_SCHEDULER_CONTROL_SUSPEND_SHIFT (0)
+#define SCU_PTSG_PORT_TASK_SCHEDULER_CONTROL_SUSPEND_MASK (0x00000001)
+#define SCU_PTSG_PORT_TASK_SCHEDULER_CONTROL_ENABLE_SHIFT (1)
+#define SCU_PTSG_PORT_TASK_SCHEDULER_CONTROL_ENABLE_MASK (0x00000002)
+#define SCU_PTSG_PORT_TASK_SCHEDULER_CONTROL_RESERVED_MASK (0xFFFFFFFC)
+
+#define SCU_PTSxCR_GEN_BIT(name) \
+ SCU_GEN_BIT(SCU_PTSG_PORT_TASK_SCHEDULER_CONTROL_ ## name)
+
+
+#define SCU_PTSG_PORT_TASK_SCHEDULER_STATUS_NEXT_RN_VALID_SHIFT (0)
+#define SCU_PTSG_PORT_TASK_SCHEDULER_STATUS_NEXT_RN_VALID_MASK (0x00000001)
+#define SCU_PTSG_PORT_TASK_SCHEDULER_STATUS_ACTIVE_RNSC_LIST_VALID_SHIFT (1)
+#define SCU_PTSG_PORT_TASK_SCHEDULER_STATUS_ACTIVE_RNSC_LIST_VALID_MASK (0x00000002)
+#define SCU_PTSG_PORT_TASK_SCHEDULER_STATUS_PTS_SUSPENDED_SHIFT (2)
+#define SCU_PTSG_PORT_TASK_SCHEDULER_STATUS_PTS_SUSPENDED_MASK (0x00000004)
+#define SCU_PTSG_PORT_TASK_SCHEDULER_STATUS_RESERVED_MASK (0xFFFFFFF8)
+
+#define SCU_PTSxSR_GEN_BIT(name) \
+ SCU_GEN_BIT(SCU_PTSG_PORT_TASK_SCHEDULER_STATUS_ ## name)
+
+/*
+ * *****************************************************************************
+ * * SMU Registers
+ * ***************************************************************************** */
+
+/*
+ * ----------------------------------------------------------------------------
+ * SMU Registers
+ * These registers are based off of BAR0
+ *
+ * To calculate the offset for other functions use
+ * BAR0 + FN# * SystemPageSize * 2
+ *
+ * The TCA is only accessable from FN#0 (Physical Function) and each
+ * is programmed by (BAR0 + SCU_SMU_TCA_OFFSET + (FN# * 0x04)) or
+ * TCA0 for FN#0 is at BAR0 + 0x0400
+ * TCA1 for FN#1 is at BAR0 + 0x0404
+ * etc.
+ * ----------------------------------------------------------------------------
+ * Accessable to all FN#s */
+#define SCU_SMU_PCP_OFFSET 0x0000
+#define SCU_SMU_AMR_OFFSET 0x0004
+#define SCU_SMU_ISR_OFFSET 0x0010
+#define SCU_SMU_IMR_OFFSET 0x0014
+#define SCU_SMU_ICC_OFFSET 0x0018
+#define SCU_SMU_HTTLBAR_OFFSET 0x0020
+#define SCU_SMU_HTTUBAR_OFFSET 0x0024
+#define SCU_SMU_TCR_OFFSET 0x0028
+#define SCU_SMU_CQLBAR_OFFSET 0x0030
+#define SCU_SMU_CQUBAR_OFFSET 0x0034
+#define SCU_SMU_CQPR_OFFSET 0x0040
+#define SCU_SMU_CQGR_OFFSET 0x0044
+#define SCU_SMU_CQC_OFFSET 0x0048
+/* Accessable to FN#0 only */
+#define SCU_SMU_RNCLBAR_OFFSET 0x0080
+#define SCU_SMU_RNCUBAR_OFFSET 0x0084
+#define SCU_SMU_DCC_OFFSET 0x0090
+#define SCU_SMU_DFC_OFFSET 0x0094
+#define SCU_SMU_SMUCSR_OFFSET 0x0098
+#define SCU_SMU_SCUSRCR_OFFSET 0x009C
+#define SCU_SMU_SMAW_OFFSET 0x00A0
+#define SCU_SMU_SMDW_OFFSET 0x00A4
+/* Accessable to FN#0 only */
+#define SCU_SMU_TCA_OFFSET 0x0400
+/* Accessable to all FN#s */
+#define SCU_SMU_MT_MLAR0_OFFSET 0x2000
+#define SCU_SMU_MT_MUAR0_OFFSET 0x2004
+#define SCU_SMU_MT_MDR0_OFFSET 0x2008
+#define SCU_SMU_MT_VCR0_OFFSET 0x200C
+#define SCU_SMU_MT_MLAR1_OFFSET 0x2010
+#define SCU_SMU_MT_MUAR1_OFFSET 0x2014
+#define SCU_SMU_MT_MDR1_OFFSET 0x2018
+#define SCU_SMU_MT_VCR1_OFFSET 0x201C
+#define SCU_SMU_MPBA_OFFSET 0x3000
+
+/**
+ * struct smu_registers - These are the SMU registers
+ *
+ *
+ */
+struct smu_registers {
+/* 0x0000 PCP */
+ u32 post_context_port;
+/* 0x0004 AMR */
+ u32 address_modifier;
+ u32 reserved_08;
+ u32 reserved_0C;
+/* 0x0010 ISR */
+ u32 interrupt_status;
+/* 0x0014 IMR */
+ u32 interrupt_mask;
+/* 0x0018 ICC */
+ u32 interrupt_coalesce_control;
+ u32 reserved_1C;
+/* 0x0020 HTTLBAR */
+ u32 host_task_table_lower;
+/* 0x0024 HTTUBAR */
+ u32 host_task_table_upper;
+/* 0x0028 TCR */
+ u32 task_context_range;
+ u32 reserved_2C;
+/* 0x0030 CQLBAR */
+ u32 completion_queue_lower;
+/* 0x0034 CQUBAR */
+ u32 completion_queue_upper;
+ u32 reserved_38;
+ u32 reserved_3C;
+/* 0x0040 CQPR */
+ u32 completion_queue_put;
+/* 0x0044 CQGR */
+ u32 completion_queue_get;
+/* 0x0048 CQC */
+ u32 completion_queue_control;
+ u32 reserved_4C;
+ u32 reserved_5x[4];
+ u32 reserved_6x[4];
+ u32 reserved_7x[4];
+/*
+ * Accessable to FN#0 only
+ * 0x0080 RNCLBAR */
+ u32 remote_node_context_lower;
+/* 0x0084 RNCUBAR */
+ u32 remote_node_context_upper;
+ u32 reserved_88;
+ u32 reserved_8C;
+/* 0x0090 DCC */
+ u32 device_context_capacity;
+/* 0x0094 DFC */
+ u32 device_function_capacity;
+/* 0x0098 SMUCSR */
+ u32 control_status;
+/* 0x009C SCUSRCR */
+ u32 soft_reset_control;
+/* 0x00A0 SMAW */
+ u32 mmr_address_window;
+/* 0x00A4 SMDW */
+ u32 mmr_data_window;
+/* 0x00A8 CGUCR */
+ u32 clock_gating_control;
+/* 0x00AC CGUPC */
+ u32 clock_gating_performance;
+/* A whole bunch of reserved space */
+ u32 reserved_Bx[4];
+ u32 reserved_Cx[4];
+ u32 reserved_Dx[4];
+ u32 reserved_Ex[4];
+ u32 reserved_Fx[4];
+ u32 reserved_1xx[64];
+ u32 reserved_2xx[64];
+ u32 reserved_3xx[64];
+/*
+ * Accessable to FN#0 only
+ * 0x0400 TCA */
+ u32 task_context_assignment[256];
+/* MSI-X registers not included */
+};
+
+/*
+ * *****************************************************************************
+ * SDMA Registers
+ * ***************************************************************************** */
+#define SCU_SDMA_BASE 0x6000
+#define SCU_SDMA_PUFATLHAR_OFFSET 0x0000
+#define SCU_SDMA_PUFATUHAR_OFFSET 0x0004
+#define SCU_SDMA_UFLHBAR_OFFSET 0x0008
+#define SCU_SDMA_UFUHBAR_OFFSET 0x000C
+#define SCU_SDMA_UFQC_OFFSET 0x0010
+#define SCU_SDMA_UFQPP_OFFSET 0x0014
+#define SCU_SDMA_UFQGP_OFFSET 0x0018
+#define SCU_SDMA_PDMACR_OFFSET 0x001C
+#define SCU_SDMA_CDMACR_OFFSET 0x0080
+
+/**
+ * struct scu_sdma_registers - These are the SCU SDMA Registers
+ *
+ *
+ */
+struct scu_sdma_registers {
+/* 0x0000 PUFATLHAR */
+ u32 uf_address_table_lower;
+/* 0x0004 PUFATUHAR */
+ u32 uf_address_table_upper;
+/* 0x0008 UFLHBAR */
+ u32 uf_header_base_address_lower;
+/* 0x000C UFUHBAR */
+ u32 uf_header_base_address_upper;
+/* 0x0010 UFQC */
+ u32 unsolicited_frame_queue_control;
+/* 0x0014 UFQPP */
+ u32 unsolicited_frame_put_pointer;
+/* 0x0018 UFQGP */
+ u32 unsolicited_frame_get_pointer;
+/* 0x001C PDMACR */
+ u32 pdma_configuration;
+/* Reserved until offset 0x80 */
+ u32 reserved_0020_007C[0x18];
+/* 0x0080 CDMACR */
+ u32 cdma_configuration;
+/* Remainder SDMA register space */
+ u32 reserved_0084_0400[0xDF];
+
+};
+
+/*
+ * *****************************************************************************
+ * * SCU Link Registers
+ * ***************************************************************************** */
+#define SCU_PEG0_OFFSET 0x0000
+#define SCU_PEG1_OFFSET 0x8000
+
+#define SCU_TL0_OFFSET 0x0000
+#define SCU_TL1_OFFSET 0x0400
+#define SCU_TL2_OFFSET 0x0800
+#define SCU_TL3_OFFSET 0x0C00
+
+#define SCU_LL_OFFSET 0x0080
+#define SCU_LL0_OFFSET (SCU_TL0_OFFSET + SCU_LL_OFFSET)
+#define SCU_LL1_OFFSET (SCU_TL1_OFFSET + SCU_LL_OFFSET)
+#define SCU_LL2_OFFSET (SCU_TL2_OFFSET + SCU_LL_OFFSET)
+#define SCU_LL3_OFFSET (SCU_TL3_OFFSET + SCU_LL_OFFSET)
+
+/* Transport Layer Offsets (PEG + TL) */
+#define SCU_TLCR_OFFSET 0x0000
+#define SCU_TLADTR_OFFSET 0x0004
+#define SCU_TLTTMR_OFFSET 0x0008
+#define SCU_TLEECR0_OFFSET 0x000C
+#define SCU_STPTLDARNI_OFFSET 0x0010
+
+
+#define SCU_TLCR_HASH_SAS_CHECKING_ENABLE_SHIFT (0)
+#define SCU_TLCR_HASH_SAS_CHECKING_ENABLE_MASK (0x00000001)
+#define SCU_TLCR_CLEAR_TCI_NCQ_MAPPING_TABLE_SHIFT (1)
+#define SCU_TLCR_CLEAR_TCI_NCQ_MAPPING_TABLE_MASK (0x00000002)
+#define SCU_TLCR_STP_WRITE_DATA_PREFETCH_SHIFT (3)
+#define SCU_TLCR_STP_WRITE_DATA_PREFETCH_MASK (0x00000008)
+#define SCU_TLCR_CMD_NAK_STATUS_CODE_SHIFT (4)
+#define SCU_TLCR_CMD_NAK_STATUS_CODE_MASK (0x00000010)
+#define SCU_TLCR_RESERVED_MASK (0xFFFFFFEB)
+
+#define SCU_TLCR_GEN_BIT(name) \
+ SCU_GEN_BIT(SCU_TLCR_ ## name)
+
+/**
+ * struct scu_transport_layer_registers - These are the SCU Transport Layer
+ * registers
+ *
+ *
+ */
+struct scu_transport_layer_registers {
+ /* 0x0000 TLCR */
+ u32 control;
+ /* 0x0004 TLADTR */
+ u32 arbitration_delay_timer;
+ /* 0x0008 TLTTMR */
+ u32 timer_test_mode;
+ /* 0x000C reserved */
+ u32 reserved_0C;
+ /* 0x0010 STPTLDARNI */
+ u32 stp_rni;
+ /* 0x0014 TLFEWPORCTRL */
+ u32 tlfe_wpo_read_control;
+ /* 0x0018 TLFEWPORDATA */
+ u32 tlfe_wpo_read_data;
+ /* 0x001C RXTLSSCSR1 */
+ u32 rxtl_single_step_control_status_1;
+ /* 0x0020 RXTLSSCSR2 */
+ u32 rxtl_single_step_control_status_2;
+ /* 0x0024 AWTRDDCR */
+ u32 tlfe_awt_retry_delay_debug_control;
+ /* Remainder of TL memory space */
+ u32 reserved_0028_007F[0x16];
+
+};
+
+/* Protocol Engine Group Registers */
+#define SCU_SCUVZECRx_OFFSET 0x1080
+
+/* Link Layer Offsets (PEG + TL + LL) */
+#define SCU_SAS_SPDTOV_OFFSET 0x0000
+#define SCU_SAS_LLSTA_OFFSET 0x0004
+#define SCU_SATA_PSELTOV_OFFSET 0x0008
+#define SCU_SAS_TIMETOV_OFFSET 0x0010
+#define SCU_SAS_LOSTOT_OFFSET 0x0014
+#define SCU_SAS_LNKTOV_OFFSET 0x0018
+#define SCU_SAS_PHYTOV_OFFSET 0x001C
+#define SCU_SAS_AFERCNT_OFFSET 0x0020
+#define SCU_SAS_WERCNT_OFFSET 0x0024
+#define SCU_SAS_TIID_OFFSET 0x0028
+#define SCU_SAS_TIDNH_OFFSET 0x002C
+#define SCU_SAS_TIDNL_OFFSET 0x0030
+#define SCU_SAS_TISSAH_OFFSET 0x0034
+#define SCU_SAS_TISSAL_OFFSET 0x0038
+#define SCU_SAS_TIPID_OFFSET 0x003C
+#define SCU_SAS_TIRES2_OFFSET 0x0040
+#define SCU_SAS_ADRSTA_OFFSET 0x0044
+#define SCU_SAS_MAWTTOV_OFFSET 0x0048
+#define SCU_SAS_FRPLDFIL_OFFSET 0x0054
+#define SCU_SAS_RFCNT_OFFSET 0x0060
+#define SCU_SAS_TFCNT_OFFSET 0x0064
+#define SCU_SAS_RFDCNT_OFFSET 0x0068
+#define SCU_SAS_TFDCNT_OFFSET 0x006C
+#define SCU_SAS_LERCNT_OFFSET 0x0070
+#define SCU_SAS_RDISERRCNT_OFFSET 0x0074
+#define SCU_SAS_CRERCNT_OFFSET 0x0078
+#define SCU_STPCTL_OFFSET 0x007C
+#define SCU_SAS_PCFG_OFFSET 0x0080
+#define SCU_SAS_CLKSM_OFFSET 0x0084
+#define SCU_SAS_TXCOMWAKE_OFFSET 0x0088
+#define SCU_SAS_TXCOMINIT_OFFSET 0x008C
+#define SCU_SAS_TXCOMSAS_OFFSET 0x0090
+#define SCU_SAS_COMINIT_OFFSET 0x0094
+#define SCU_SAS_COMWAKE_OFFSET 0x0098
+#define SCU_SAS_COMSAS_OFFSET 0x009C
+#define SCU_SAS_SFERCNT_OFFSET 0x00A0
+#define SCU_SAS_CDFERCNT_OFFSET 0x00A4
+#define SCU_SAS_DNFERCNT_OFFSET 0x00A8
+#define SCU_SAS_PRSTERCNT_OFFSET 0x00AC
+#define SCU_SAS_CNTCTL_OFFSET 0x00B0
+#define SCU_SAS_SSPTOV_OFFSET 0x00B4
+#define SCU_FTCTL_OFFSET 0x00B8
+#define SCU_FRCTL_OFFSET 0x00BC
+#define SCU_FTWMRK_OFFSET 0x00C0
+#define SCU_ENSPINUP_OFFSET 0x00C4
+#define SCU_SAS_TRNTOV_OFFSET 0x00C8
+#define SCU_SAS_PHYCAP_OFFSET 0x00CC
+#define SCU_SAS_PHYCTL_OFFSET 0x00D0
+#define SCU_SAS_LLCTL_OFFSET 0x00D8
+#define SCU_AFE_XCVRCR_OFFSET 0x00DC
+#define SCU_AFE_LUTCR_OFFSET 0x00E0
+
+#define SCU_SAS_PHY_TIMER_TIMEOUT_VALUES_ALIGN_DETECTION_SHIFT (0UL)
+#define SCU_SAS_PHY_TIMER_TIMEOUT_VALUES_ALIGN_DETECTION_MASK (0x000000FFUL)
+#define SCU_SAS_PHY_TIMER_TIMEOUT_VALUES_HOT_PLUG_SHIFT (8UL)
+#define SCU_SAS_PHY_TIMER_TIMEOUT_VALUES_HOT_PLUG_MASK (0x0000FF00UL)
+#define SCU_SAS_PHY_TIMER_TIMEOUT_VALUES_COMSAS_DETECTION_SHIFT (16UL)
+#define SCU_SAS_PHY_TIMER_TIMEOUT_VALUES_COMSAS_DETECTION_MASK (0x00FF0000UL)
+#define SCU_SAS_PHY_TIMER_TIMEOUT_VALUES_RATE_CHANGE_SHIFT (24UL)
+#define SCU_SAS_PHY_TIMER_TIMEOUT_VALUES_RATE_CHANGE_MASK (0xFF000000UL)
+
+#define SCU_SAS_PHYTOV_GEN_VAL(name, value) \
+ SCU_GEN_VALUE(SCU_SAS_PHY_TIMER_TIMEOUT_VALUES_##name, value)
+
+#define SCU_SAS_LINK_LAYER_CONTROL_MAX_LINK_RATE_SHIFT (0)
+#define SCU_SAS_LINK_LAYER_CONTROL_MAX_LINK_RATE_MASK (0x00000003)
+#define SCU_SAS_LINK_LAYER_CONTROL_MAX_LINK_RATE_GEN1 (0)
+#define SCU_SAS_LINK_LAYER_CONTROL_MAX_LINK_RATE_GEN2 (1)
+#define SCU_SAS_LINK_LAYER_CONTROL_MAX_LINK_RATE_GEN3 (2)
+#define SCU_SAS_LINK_LAYER_CONTROL_BROADCAST_PRIMITIVE_SHIFT (2)
+#define SCU_SAS_LINK_LAYER_CONTROL_BROADCAST_PRIMITIVE_MASK (0x000003FC)
+#define SCU_SAS_LINK_LAYER_CONTROL_CLOSE_NO_ACTIVE_TASK_DISABLE_SHIFT (16)
+#define SCU_SAS_LINK_LAYER_CONTROL_CLOSE_NO_ACTIVE_TASK_DISABLE_MASK (0x00010000)
+#define SCU_SAS_LINK_LAYER_CONTROL_CLOSE_NO_OUTBOUND_TASK_DISABLE_SHIFT (17)
+#define SCU_SAS_LINK_LAYER_CONTROL_CLOSE_NO_OUTBOUND_TASK_DISABLE_MASK (0x00020000)
+#define SCU_SAS_LINK_LAYER_CONTROL_NO_OUTBOUND_TASK_TIMEOUT_SHIFT (24)
+#define SCU_SAS_LINK_LAYER_CONTROL_NO_OUTBOUND_TASK_TIMEOUT_MASK (0xFF000000)
+#define SCU_SAS_LINK_LAYER_CONTROL_RESERVED (0x00FCFC00)
+
+#define SCU_SAS_LLCTL_GEN_VAL(name, value) \
+ SCU_GEN_VALUE(SCU_SAS_LINK_LAYER_CONTROL_ ## name, value)
+
+#define SCU_SAS_LLCTL_GEN_BIT(name) \
+ SCU_GEN_BIT(SCU_SAS_LINK_LAYER_CONTROL_ ## name)
+
+#define SCU_SAS_LINK_LAYER_TXCOMSAS_NEGTIME_DEFAULT (0xF0)
+#define SCU_SAS_LINK_LAYER_TXCOMSAS_NEGTIME_EXTENDED (0x1FF)
+#define SCU_SAS_LINK_LAYER_TXCOMSAS_NEGTIME_SHIFT (0)
+#define SCU_SAS_LINK_LAYER_TXCOMSAS_NEGTIME_MASK (0x3FF)
+
+#define SCU_SAS_LLTXCOMSAS_GEN_VAL(name, value) \
+ SCU_GEN_VALUE(SCU_SAS_LINK_LAYER_TXCOMSAS_ ## name, value)
+
+
+/* #define SCU_FRXHECR_DCNT_OFFSET 0x00B0 */
+#define SCU_PSZGCR_OFFSET 0x00E4
+#define SCU_SAS_RECPHYCAP_OFFSET 0x00E8
+/* #define SCU_TX_LUTSEL_OFFSET 0x00B8 */
+
+#define SCU_SAS_PTxC_OFFSET 0x00D4 /* Same offset as SAS_TCTSTM */
+
+/**
+ * struct scu_link_layer_registers - SCU Link Layer Registers
+ *
+ *
+ */
+struct scu_link_layer_registers {
+/* 0x0000 SAS_SPDTOV */
+ u32 speed_negotiation_timers;
+/* 0x0004 SAS_LLSTA */
+ u32 link_layer_status;
+/* 0x0008 SATA_PSELTOV */
+ u32 port_selector_timeout;
+ u32 reserved0C;
+/* 0x0010 SAS_TIMETOV */
+ u32 timeout_unit_value;
+/* 0x0014 SAS_RCDTOV */
+ u32 rcd_timeout;
+/* 0x0018 SAS_LNKTOV */
+ u32 link_timer_timeouts;
+/* 0x001C SAS_PHYTOV */
+ u32 sas_phy_timeouts;
+/* 0x0020 SAS_AFERCNT */
+ u32 received_address_frame_error_counter;
+/* 0x0024 SAS_WERCNT */
+ u32 invalid_dword_counter;
+/* 0x0028 SAS_TIID */
+ u32 transmit_identification;
+/* 0x002C SAS_TIDNH */
+ u32 sas_device_name_high;
+/* 0x0030 SAS_TIDNL */
+ u32 sas_device_name_low;
+/* 0x0034 SAS_TISSAH */
+ u32 source_sas_address_high;
+/* 0x0038 SAS_TISSAL */
+ u32 source_sas_address_low;
+/* 0x003C SAS_TIPID */
+ u32 identify_frame_phy_id;
+/* 0x0040 SAS_TIRES2 */
+ u32 identify_frame_reserved;
+/* 0x0044 SAS_ADRSTA */
+ u32 received_address_frame;
+/* 0x0048 SAS_MAWTTOV */
+ u32 maximum_arbitration_wait_timer_timeout;
+/* 0x004C SAS_PTxC */
+ u32 transmit_primitive;
+/* 0x0050 SAS_RORES */
+ u32 error_counter_event_notification_control;
+/* 0x0054 SAS_FRPLDFIL */
+ u32 frxq_payload_fill_threshold;
+/* 0x0058 SAS_LLHANG_TOT */
+ u32 link_layer_hang_detection_timeout;
+ u32 reserved_5C;
+/* 0x0060 SAS_RFCNT */
+ u32 received_frame_count;
+/* 0x0064 SAS_TFCNT */
+ u32 transmit_frame_count;
+/* 0x0068 SAS_RFDCNT */
+ u32 received_dword_count;
+/* 0x006C SAS_TFDCNT */
+ u32 transmit_dword_count;
+/* 0x0070 SAS_LERCNT */
+ u32 loss_of_sync_error_count;
+/* 0x0074 SAS_RDISERRCNT */
+ u32 running_disparity_error_count;
+/* 0x0078 SAS_CRERCNT */
+ u32 received_frame_crc_error_count;
+/* 0x007C STPCTL */
+ u32 stp_control;
+/* 0x0080 SAS_PCFG */
+ u32 phy_configuration;
+/* 0x0084 SAS_CLKSM */
+ u32 clock_skew_management;
+/* 0x0088 SAS_TXCOMWAKE */
+ u32 transmit_comwake_signal;
+/* 0x008C SAS_TXCOMINIT */
+ u32 transmit_cominit_signal;
+/* 0x0090 SAS_TXCOMSAS */
+ u32 transmit_comsas_signal;
+/* 0x0094 SAS_COMINIT */
+ u32 cominit_control;
+/* 0x0098 SAS_COMWAKE */
+ u32 comwake_control;
+/* 0x009C SAS_COMSAS */
+ u32 comsas_control;
+/* 0x00A0 SAS_SFERCNT */
+ u32 received_short_frame_count;
+/* 0x00A4 SAS_CDFERCNT */
+ u32 received_frame_without_credit_count;
+/* 0x00A8 SAS_DNFERCNT */
+ u32 received_frame_after_done_count;
+/* 0x00AC SAS_PRSTERCNT */
+ u32 phy_reset_problem_count;
+/* 0x00B0 SAS_CNTCTL */
+ u32 counter_control;
+/* 0x00B4 SAS_SSPTOV */
+ u32 ssp_timer_timeout_values;
+/* 0x00B8 FTCTL */
+ u32 ftx_control;
+/* 0x00BC FRCTL */
+ u32 frx_control;
+/* 0x00C0 FTWMRK */
+ u32 ftx_watermark;
+/* 0x00C4 ENSPINUP */
+ u32 notify_enable_spinup_control;
+/* 0x00C8 SAS_TRNTOV */
+ u32 sas_training_sequence_timer_values;
+/* 0x00CC SAS_PHYCAP */
+ u32 phy_capabilities;
+/* 0x00D0 SAS_PHYCTL */
+ u32 phy_control;
+ u32 reserved_d4;
+/* 0x00D8 LLCTL */
+ u32 link_layer_control;
+/* 0x00DC AFE_XCVRCR */
+ u32 afe_xcvr_control;
+/* 0x00E0 AFE_LUTCR */
+ u32 afe_lookup_table_control;
+/* 0x00E4 PSZGCR */
+ u32 phy_source_zone_group_control;
+/* 0x00E8 SAS_RECPHYCAP */
+ u32 receive_phycap;
+ u32 reserved_ec;
+/* 0x00F0 SNAFERXRSTCTL */
+ u32 speed_negotiation_afe_rx_reset_control;
+/* 0x00F4 SAS_SSIPMCTL */
+ u32 power_management_control;
+/* 0x00F8 SAS_PSPREQ_PRIM */
+ u32 sas_pm_partial_request_primitive;
+/* 0x00FC SAS_PSSREQ_PRIM */
+ u32 sas_pm_slumber_request_primitive;
+/* 0x0100 SAS_PPSACK_PRIM */
+ u32 sas_pm_ack_primitive_register;
+/* 0x0104 SAS_PSNAK_PRIM */
+ u32 sas_pm_nak_primitive_register;
+/* 0x0108 SAS_SSIPMTOV */
+ u32 sas_primitive_timeout;
+ u32 reserved_10c;
+/* 0x0110 - 0x011C PLAPRDCTRLxREG */
+ u32 pla_product_control[4];
+/* 0x0120 PLAPRDSUMREG */
+ u32 pla_product_sum;
+/* 0x0124 PLACONTROLREG */
+ u32 pla_control;
+/* Remainder of memory space 896 bytes */
+ u32 reserved_0128_037f[0x96];
+
+};
+
+/*
+ * 0x00D4 // Same offset as SAS_TCTSTM SAS_PTxC
+ * u32 primitive_transmit_control; */
+
+/*
+ * ----------------------------------------------------------------------------
+ * SGPIO
+ * ---------------------------------------------------------------------------- */
+#define SCU_SGPIO_OFFSET 0x1400
+
+/* #define SCU_SGPIO_OFFSET 0x6000 // later moves to 0x1400 see HSD 652625 */
+#define SCU_SGPIO_SGICR_OFFSET 0x0000
+#define SCU_SGPIO_SGPBR_OFFSET 0x0004
+#define SCU_SGPIO_SGSDLR_OFFSET 0x0008
+#define SCU_SGPIO_SGSDUR_OFFSET 0x000C
+#define SCU_SGPIO_SGSIDLR_OFFSET 0x0010
+#define SCU_SGPIO_SGSIDUR_OFFSET 0x0014
+#define SCU_SGPIO_SGVSCR_OFFSET 0x0018
+/* Address from 0x0820 to 0x083C */
+#define SCU_SGPIO_SGODSR_OFFSET 0x0020
+
+/**
+ * struct scu_sgpio_registers - SCU SGPIO Registers
+ *
+ *
+ */
+struct scu_sgpio_registers {
+/* 0x0000 SGPIO_SGICR */
+ u32 interface_control;
+/* 0x0004 SGPIO_SGPBR */
+ u32 blink_rate;
+/* 0x0008 SGPIO_SGSDLR */
+ u32 start_drive_lower;
+/* 0x000C SGPIO_SGSDUR */
+ u32 start_drive_upper;
+/* 0x0010 SGPIO_SGSIDLR */
+ u32 serial_input_lower;
+/* 0x0014 SGPIO_SGSIDUR */
+ u32 serial_input_upper;
+/* 0x0018 SGPIO_SGVSCR */
+ u32 vendor_specific_code;
+/* 0x001C Reserved */
+ u32 reserved_001c;
+/* 0x0020 SGPIO_SGODSR */
+ u32 output_data_select[8];
+/* Remainder of memory space 256 bytes */
+ u32 reserved_1444_14ff[0x30];
+
+};
+
+/*
+ * *****************************************************************************
+ * * Defines for VIIT entry offsets
+ * * Access additional entries by SCU_VIIT_BASE + index * 0x10
+ * ***************************************************************************** */
+#define SCU_VIIT_BASE 0x1c00
+
+struct scu_viit_registers {
+ u32 registers[256];
+};
+
+/*
+ * *****************************************************************************
+ * * SCU PORT TASK SCHEDULER REGISTERS
+ * ***************************************************************************** */
+
+#define SCU_PTSG_BASE 0x1000
+
+#define SCU_PTSG_PTSGCR_OFFSET 0x0000
+#define SCU_PTSG_RTCR_OFFSET 0x0004
+#define SCU_PTSG_RTCCR_OFFSET 0x0008
+#define SCU_PTSG_PTS0CR_OFFSET 0x0010
+#define SCU_PTSG_PTS0SR_OFFSET 0x0014
+#define SCU_PTSG_PTS1CR_OFFSET 0x0018
+#define SCU_PTSG_PTS1SR_OFFSET 0x001C
+#define SCU_PTSG_PTS2CR_OFFSET 0x0020
+#define SCU_PTSG_PTS2SR_OFFSET 0x0024
+#define SCU_PTSG_PTS3CR_OFFSET 0x0028
+#define SCU_PTSG_PTS3SR_OFFSET 0x002C
+#define SCU_PTSG_PCSPE0CR_OFFSET 0x0030
+#define SCU_PTSG_PCSPE1CR_OFFSET 0x0034
+#define SCU_PTSG_PCSPE2CR_OFFSET 0x0038
+#define SCU_PTSG_PCSPE3CR_OFFSET 0x003C
+#define SCU_PTSG_ETMTSCCR_OFFSET 0x0040
+#define SCU_PTSG_ETMRNSCCR_OFFSET 0x0044
+
+/**
+ * struct scu_port_task_scheduler_registers - These are the control/stats pairs
+ * for each Port Task Scheduler.
+ *
+ *
+ */
+struct scu_port_task_scheduler_registers {
+ u32 control;
+ u32 status;
+};
+
+/**
+ * struct scu_port_task_scheduler_group_registers - These are the PORT Task
+ * Scheduler registers
+ *
+ *
+ */
+struct scu_port_task_scheduler_group_registers {
+/* 0x0000 PTSGCR */
+ u32 control;
+/* 0x0004 RTCR */
+ u32 real_time_clock;
+/* 0x0008 RTCCR */
+ u32 real_time_clock_control;
+/* 0x000C */
+ u32 reserved_0C;
+/*
+ * 0x0010 PTS0CR
+ * 0x0014 PTS0SR
+ * 0x0018 PTS1CR
+ * 0x001C PTS1SR
+ * 0x0020 PTS2CR
+ * 0x0024 PTS2SR
+ * 0x0028 PTS3CR
+ * 0x002C PTS3SR */
+ struct scu_port_task_scheduler_registers port[4];
+/*
+ * 0x0030 PCSPE0CR
+ * 0x0034 PCSPE1CR
+ * 0x0038 PCSPE2CR
+ * 0x003C PCSPE3CR */
+ u32 protocol_engine[4];
+/* 0x0040 ETMTSCCR */
+ u32 tc_scanning_interval_control;
+/* 0x0044 ETMRNSCCR */
+ u32 rnc_scanning_interval_control;
+/* Remainder of memory space 128 bytes */
+ u32 reserved_1048_107f[0x0E];
+
+};
+
+#define SCU_PTSG_SCUVZECR_OFFSET 0x003C
+
+/*
+ * *****************************************************************************
+ * * AFE REGISTERS
+ * ***************************************************************************** */
+#define SCU_AFE_MMR_BASE 0xE000
+
+/*
+ * AFE 0 is at offset 0x0800
+ * AFE 1 is at offset 0x0900
+ * AFE 2 is at offset 0x0a00
+ * AFE 3 is at offset 0x0b00 */
+struct scu_afe_transceiver {
+ /* 0x0000 AFE_XCVR_CTRL0 */
+ u32 afe_xcvr_control0;
+ /* 0x0004 AFE_XCVR_CTRL1 */
+ u32 afe_xcvr_control1;
+ /* 0x0008 */
+ u32 reserved_0008;
+ /* 0x000c afe_dfx_rx_control0 */
+ u32 afe_dfx_rx_control0;
+ /* 0x0010 AFE_DFX_RX_CTRL1 */
+ u32 afe_dfx_rx_control1;
+ /* 0x0014 */
+ u32 reserved_0014;
+ /* 0x0018 AFE_DFX_RX_STS0 */
+ u32 afe_dfx_rx_status0;
+ /* 0x001c AFE_DFX_RX_STS1 */
+ u32 afe_dfx_rx_status1;
+ /* 0x0020 */
+ u32 reserved_0020;
+ /* 0x0024 AFE_TX_CTRL */
+ u32 afe_tx_control;
+ /* 0x0028 AFE_TX_AMP_CTRL0 */
+ u32 afe_tx_amp_control0;
+ /* 0x002c AFE_TX_AMP_CTRL1 */
+ u32 afe_tx_amp_control1;
+ /* 0x0030 AFE_TX_AMP_CTRL2 */
+ u32 afe_tx_amp_control2;
+ /* 0x0034 AFE_TX_AMP_CTRL3 */
+ u32 afe_tx_amp_control3;
+ /* 0x0038 afe_tx_ssc_control */
+ u32 afe_tx_ssc_control;
+ /* 0x003c */
+ u32 reserved_003c;
+ /* 0x0040 AFE_RX_SSC_CTRL0 */
+ u32 afe_rx_ssc_control0;
+ /* 0x0044 AFE_RX_SSC_CTRL1 */
+ u32 afe_rx_ssc_control1;
+ /* 0x0048 AFE_RX_SSC_CTRL2 */
+ u32 afe_rx_ssc_control2;
+ /* 0x004c AFE_RX_EQ_STS0 */
+ u32 afe_rx_eq_status0;
+ /* 0x0050 AFE_RX_EQ_STS1 */
+ u32 afe_rx_eq_status1;
+ /* 0x0054 AFE_RX_CDR_STS */
+ u32 afe_rx_cdr_status;
+ /* 0x0058 */
+ u32 reserved_0058;
+ /* 0x005c AFE_CHAN_CTRL */
+ u32 afe_channel_control;
+ /* 0x0060-0x006c */
+ u32 reserved_0060_006c[0x04];
+ /* 0x0070 AFE_XCVR_EC_STS0 */
+ u32 afe_xcvr_error_capture_status0;
+ /* 0x0074 AFE_XCVR_EC_STS1 */
+ u32 afe_xcvr_error_capture_status1;
+ /* 0x0078 AFE_XCVR_EC_STS2 */
+ u32 afe_xcvr_error_capture_status2;
+ /* 0x007c afe_xcvr_ec_status3 */
+ u32 afe_xcvr_error_capture_status3;
+ /* 0x0080 AFE_XCVR_EC_STS4 */
+ u32 afe_xcvr_error_capture_status4;
+ /* 0x0084 AFE_XCVR_EC_STS5 */
+ u32 afe_xcvr_error_capture_status5;
+ /* 0x0088-0x00fc */
+ u32 reserved_008c_00fc[0x1e];
+};
+
+/**
+ * struct scu_afe_registers - AFE Regsiters
+ *
+ *
+ */
+/* Uaoa AFE registers */
+struct scu_afe_registers {
+ /* 0Xe000 AFE_BIAS_CTRL */
+ u32 afe_bias_control;
+ u32 reserved_0004;
+ /* 0x0008 AFE_PLL_CTRL0 */
+ u32 afe_pll_control0;
+ /* 0x000c AFE_PLL_CTRL1 */
+ u32 afe_pll_control1;
+ /* 0x0010 AFE_PLL_CTRL2 */
+ u32 afe_pll_control2;
+ /* 0x0014 AFE_CB_STS */
+ u32 afe_common_block_status;
+ /* 0x0018-0x007c */
+ u32 reserved_18_7c[0x1a];
+ /* 0x0080 AFE_PMSN_MCTRL0 */
+ u32 afe_pmsn_master_control0;
+ /* 0x0084 AFE_PMSN_MCTRL1 */
+ u32 afe_pmsn_master_control1;
+ /* 0x0088 AFE_PMSN_MCTRL2 */
+ u32 afe_pmsn_master_control2;
+ /* 0x008C-0x00fc */
+ u32 reserved_008c_00fc[0x1D];
+ /* 0x0100 AFE_DFX_MST_CTRL0 */
+ u32 afe_dfx_master_control0;
+ /* 0x0104 AFE_DFX_MST_CTRL1 */
+ u32 afe_dfx_master_control1;
+ /* 0x0108 AFE_DFX_DCL_CTRL */
+ u32 afe_dfx_dcl_control;
+ /* 0x010c AFE_DFX_DMON_CTRL */
+ u32 afe_dfx_digital_monitor_control;
+ /* 0x0110 AFE_DFX_AMONP_CTRL */
+ u32 afe_dfx_analog_p_monitor_control;
+ /* 0x0114 AFE_DFX_AMONN_CTRL */
+ u32 afe_dfx_analog_n_monitor_control;
+ /* 0x0118 AFE_DFX_NTL_STS */
+ u32 afe_dfx_ntl_status;
+ /* 0x011c AFE_DFX_FIFO_STS0 */
+ u32 afe_dfx_fifo_status0;
+ /* 0x0120 AFE_DFX_FIFO_STS1 */
+ u32 afe_dfx_fifo_status1;
+ /* 0x0124 AFE_DFX_MPAT_CTRL */
+ u32 afe_dfx_master_pattern_control;
+ /* 0x0128 AFE_DFX_P0_CTRL */
+ u32 afe_dfx_p0_control;
+ /* 0x012c-0x01a8 AFE_DFX_P0_DRx */
+ u32 afe_dfx_p0_data[32];
+ /* 0x01ac */
+ u32 reserved_01ac;
+ /* 0x01b0-0x020c AFE_DFX_P0_IRx */
+ u32 afe_dfx_p0_instruction[24];
+ /* 0x0210 */
+ u32 reserved_0210;
+ /* 0x0214 AFE_DFX_P1_CTRL */
+ u32 afe_dfx_p1_control;
+ /* 0x0218-0x245 AFE_DFX_P1_DRx */
+ u32 afe_dfx_p1_data[16];
+ /* 0x0258-0x029c */
+ u32 reserved_0258_029c[0x12];
+ /* 0x02a0-0x02bc AFE_DFX_P1_IRx */
+ u32 afe_dfx_p1_instruction[8];
+ /* 0x02c0-0x2fc */
+ u32 reserved_02c0_02fc[0x10];
+ /* 0x0300 AFE_DFX_TX_PMSN_CTRL */
+ u32 afe_dfx_tx_pmsn_control;
+ /* 0x0304 AFE_DFX_RX_PMSN_CTRL */
+ u32 afe_dfx_rx_pmsn_control;
+ u32 reserved_0308;
+ /* 0x030c AFE_DFX_NOA_CTRL0 */
+ u32 afe_dfx_noa_control0;
+ /* 0x0310 AFE_DFX_NOA_CTRL1 */
+ u32 afe_dfx_noa_control1;
+ /* 0x0314 AFE_DFX_NOA_CTRL2 */
+ u32 afe_dfx_noa_control2;
+ /* 0x0318 AFE_DFX_NOA_CTRL3 */
+ u32 afe_dfx_noa_control3;
+ /* 0x031c AFE_DFX_NOA_CTRL4 */
+ u32 afe_dfx_noa_control4;
+ /* 0x0320 AFE_DFX_NOA_CTRL5 */
+ u32 afe_dfx_noa_control5;
+ /* 0x0324 AFE_DFX_NOA_CTRL6 */
+ u32 afe_dfx_noa_control6;
+ /* 0x0328 AFE_DFX_NOA_CTRL7 */
+ u32 afe_dfx_noa_control7;
+ /* 0x032c-0x07fc */
+ u32 reserved_032c_07fc[0x135];
+
+ /* 0x0800-0x0bfc */
+ struct scu_afe_transceiver scu_afe_xcvr[4];
+
+ /* 0x0c00-0x0ffc */
+ u32 reserved_0c00_0ffc[0x0100];
+};
+
+struct scu_protocol_engine_group_registers {
+ u32 table[0xE0];
+};
+
+
+struct scu_viit_iit {
+ u32 table[256];
+};
+
+/**
+ * Placeholder for the ZONE Partition Table information ZONING will not be
+ * included in the 1.1 release.
+ *
+ *
+ */
+struct scu_zone_partition_table {
+ u32 table[2048];
+};
+
+/**
+ * Placeholder for the CRAM register since I am not sure if we need to
+ * read/write to these registers as yet.
+ *
+ *
+ */
+struct scu_completion_ram {
+ u32 ram[128];
+};
+
+/**
+ * Placeholder for the FBRAM registers since I am not sure if we need to
+ * read/write to these registers as yet.
+ *
+ *
+ */
+struct scu_frame_buffer_ram {
+ u32 ram[128];
+};
+
+#define scu_scratch_ram_SIZE_IN_DWORDS 256
+
+/**
+ * Placeholder for the scratch RAM registers.
+ *
+ *
+ */
+struct scu_scratch_ram {
+ u32 ram[scu_scratch_ram_SIZE_IN_DWORDS];
+};
+
+/**
+ * Placeholder since I am not yet sure what these registers are here for.
+ *
+ *
+ */
+struct noa_protocol_engine_partition {
+ u32 reserved[64];
+};
+
+/**
+ * Placeholder since I am not yet sure what these registers are here for.
+ *
+ *
+ */
+struct noa_hub_partition {
+ u32 reserved[64];
+};
+
+/**
+ * Placeholder since I am not yet sure what these registers are here for.
+ *
+ *
+ */
+struct noa_host_interface_partition {
+ u32 reserved[64];
+};
+
+/**
+ * struct transport_link_layer_pair - The SCU Hardware pairs up the TL
+ * registers with the LL registers so we must place them adjcent to make the
+ * array of registers in the PEG.
+ *
+ *
+ */
+struct transport_link_layer_pair {
+ struct scu_transport_layer_registers tl;
+ struct scu_link_layer_registers ll;
+};
+
+/**
+ * struct scu_peg_registers - SCU Protocol Engine Memory mapped register space.
+ * These registers are unique to each protocol engine group. There can be
+ * at most two PEG for a single SCU part.
+ *
+ *
+ */
+struct scu_peg_registers {
+ struct transport_link_layer_pair pe[4];
+ struct scu_port_task_scheduler_group_registers ptsg;
+ struct scu_protocol_engine_group_registers peg;
+ struct scu_sgpio_registers sgpio;
+ u32 reserved_01500_1BFF[0x1C0];
+ struct scu_viit_entry viit[64];
+ struct scu_zone_partition_table zpt0;
+ struct scu_zone_partition_table zpt1;
+};
+
+/**
+ * struct scu_registers - SCU regsiters including both PEG registers if we turn
+ * on that compile option. All of these registers are in the memory mapped
+ * space returned from BAR1.
+ *
+ *
+ */
+struct scu_registers {
+ /* 0x0000 - PEG 0 */
+ struct scu_peg_registers peg0;
+
+ /* 0x6000 - SDMA and Miscellaneous */
+ struct scu_sdma_registers sdma;
+ struct scu_completion_ram cram;
+ struct scu_frame_buffer_ram fbram;
+ u32 reserved_6800_69FF[0x80];
+ struct noa_protocol_engine_partition noa_pe;
+ struct noa_hub_partition noa_hub;
+ struct noa_host_interface_partition noa_if;
+ u32 reserved_6d00_7fff[0x4c0];
+
+ /* 0x8000 - PEG 1 */
+ struct scu_peg_registers peg1;
+
+ /* 0xE000 - AFE Registers */
+ struct scu_afe_registers afe;
+
+ /* 0xF000 - reserved */
+ u32 reserved_f000_211fff[0x80c00];
+
+ /* 0x212000 - scratch RAM */
+ struct scu_scratch_ram scratch_ram;
+};
+
+#endif /* _SCU_REGISTERS_HEADER_ */
diff --git a/drivers/scsi/isci/remote_device.c b/drivers/scsi/isci/remote_device.c
new file mode 100644
index 000000000..cc51f38b1
--- /dev/null
+++ b/drivers/scsi/isci/remote_device.c
@@ -0,0 +1,1726 @@
+/*
+ * This file is provided under a dual BSD/GPLv2 license. When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ * The full GNU General Public License is included in this distribution
+ * in the file called LICENSE.GPL.
+ *
+ * BSD LICENSE
+ *
+ * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+#include <scsi/sas.h>
+#include <linux/bitops.h>
+#include "isci.h"
+#include "port.h"
+#include "remote_device.h"
+#include "request.h"
+#include "remote_node_context.h"
+#include "scu_event_codes.h"
+#include "task.h"
+
+#undef C
+#define C(a) (#a)
+const char *dev_state_name(enum sci_remote_device_states state)
+{
+ static const char * const strings[] = REMOTE_DEV_STATES;
+
+ return strings[state];
+}
+#undef C
+
+enum sci_status sci_remote_device_suspend(struct isci_remote_device *idev,
+ enum sci_remote_node_suspension_reasons reason)
+{
+ return sci_remote_node_context_suspend(&idev->rnc, reason,
+ SCI_SOFTWARE_SUSPEND_EXPECTED_EVENT);
+}
+
+/**
+ * isci_remote_device_ready() - This function is called by the ihost when the
+ * remote device is ready. We mark the isci device as ready and signal the
+ * waiting proccess.
+ * @ihost: our valid isci_host
+ * @idev: remote device
+ *
+ */
+static void isci_remote_device_ready(struct isci_host *ihost, struct isci_remote_device *idev)
+{
+ dev_dbg(&ihost->pdev->dev,
+ "%s: idev = %p\n", __func__, idev);
+
+ clear_bit(IDEV_IO_NCQERROR, &idev->flags);
+ set_bit(IDEV_IO_READY, &idev->flags);
+ if (test_and_clear_bit(IDEV_START_PENDING, &idev->flags))
+ wake_up(&ihost->eventq);
+}
+
+static enum sci_status sci_remote_device_terminate_req(
+ struct isci_host *ihost,
+ struct isci_remote_device *idev,
+ int check_abort,
+ struct isci_request *ireq)
+{
+ if (!test_bit(IREQ_ACTIVE, &ireq->flags) ||
+ (ireq->target_device != idev) ||
+ (check_abort && !test_bit(IREQ_PENDING_ABORT, &ireq->flags)))
+ return SCI_SUCCESS;
+
+ dev_dbg(&ihost->pdev->dev,
+ "%s: idev=%p; flags=%lx; req=%p; req target=%p\n",
+ __func__, idev, idev->flags, ireq, ireq->target_device);
+
+ set_bit(IREQ_ABORT_PATH_ACTIVE, &ireq->flags);
+
+ return sci_controller_terminate_request(ihost, idev, ireq);
+}
+
+static enum sci_status sci_remote_device_terminate_reqs_checkabort(
+ struct isci_remote_device *idev,
+ int chk)
+{
+ struct isci_host *ihost = idev->owning_port->owning_controller;
+ enum sci_status status = SCI_SUCCESS;
+ u32 i;
+
+ for (i = 0; i < SCI_MAX_IO_REQUESTS; i++) {
+ struct isci_request *ireq = ihost->reqs[i];
+ enum sci_status s;
+
+ s = sci_remote_device_terminate_req(ihost, idev, chk, ireq);
+ if (s != SCI_SUCCESS)
+ status = s;
+ }
+ return status;
+}
+
+static bool isci_compare_suspendcount(
+ struct isci_remote_device *idev,
+ u32 localcount)
+{
+ smp_rmb();
+
+ /* Check for a change in the suspend count, or the RNC
+ * being destroyed.
+ */
+ return (localcount != idev->rnc.suspend_count)
+ || sci_remote_node_context_is_being_destroyed(&idev->rnc);
+}
+
+static bool isci_check_reqterm(
+ struct isci_host *ihost,
+ struct isci_remote_device *idev,
+ struct isci_request *ireq,
+ u32 localcount)
+{
+ unsigned long flags;
+ bool res;
+
+ spin_lock_irqsave(&ihost->scic_lock, flags);
+ res = isci_compare_suspendcount(idev, localcount)
+ && !test_bit(IREQ_ABORT_PATH_ACTIVE, &ireq->flags);
+ spin_unlock_irqrestore(&ihost->scic_lock, flags);
+
+ return res;
+}
+
+static bool isci_check_devempty(
+ struct isci_host *ihost,
+ struct isci_remote_device *idev,
+ u32 localcount)
+{
+ unsigned long flags;
+ bool res;
+
+ spin_lock_irqsave(&ihost->scic_lock, flags);
+ res = isci_compare_suspendcount(idev, localcount)
+ && idev->started_request_count == 0;
+ spin_unlock_irqrestore(&ihost->scic_lock, flags);
+
+ return res;
+}
+
+enum sci_status isci_remote_device_terminate_requests(
+ struct isci_host *ihost,
+ struct isci_remote_device *idev,
+ struct isci_request *ireq)
+{
+ enum sci_status status = SCI_SUCCESS;
+ unsigned long flags;
+ u32 rnc_suspend_count;
+
+ spin_lock_irqsave(&ihost->scic_lock, flags);
+
+ if (isci_get_device(idev) == NULL) {
+ dev_dbg(&ihost->pdev->dev, "%s: failed isci_get_device(idev=%p)\n",
+ __func__, idev);
+ spin_unlock_irqrestore(&ihost->scic_lock, flags);
+ status = SCI_FAILURE;
+ } else {
+ /* If already suspended, don't wait for another suspension. */
+ smp_rmb();
+ rnc_suspend_count
+ = sci_remote_node_context_is_suspended(&idev->rnc)
+ ? 0 : idev->rnc.suspend_count;
+
+ dev_dbg(&ihost->pdev->dev,
+ "%s: idev=%p, ireq=%p; started_request_count=%d, "
+ "rnc_suspend_count=%d, rnc.suspend_count=%d"
+ "about to wait\n",
+ __func__, idev, ireq, idev->started_request_count,
+ rnc_suspend_count, idev->rnc.suspend_count);
+
+ #define MAX_SUSPEND_MSECS 10000
+ if (ireq) {
+ /* Terminate a specific TC. */
+ set_bit(IREQ_NO_AUTO_FREE_TAG, &ireq->flags);
+ sci_remote_device_terminate_req(ihost, idev, 0, ireq);
+ spin_unlock_irqrestore(&ihost->scic_lock, flags);
+ if (!wait_event_timeout(ihost->eventq,
+ isci_check_reqterm(ihost, idev, ireq,
+ rnc_suspend_count),
+ msecs_to_jiffies(MAX_SUSPEND_MSECS))) {
+
+ dev_warn(&ihost->pdev->dev, "%s host%d timeout single\n",
+ __func__, ihost->id);
+ dev_dbg(&ihost->pdev->dev,
+ "%s: ******* Timeout waiting for "
+ "suspend; idev=%p, current state %s; "
+ "started_request_count=%d, flags=%lx\n\t"
+ "rnc_suspend_count=%d, rnc.suspend_count=%d "
+ "RNC: current state %s, current "
+ "suspend_type %x dest state %d;\n"
+ "ireq=%p, ireq->flags = %lx\n",
+ __func__, idev,
+ dev_state_name(idev->sm.current_state_id),
+ idev->started_request_count, idev->flags,
+ rnc_suspend_count, idev->rnc.suspend_count,
+ rnc_state_name(idev->rnc.sm.current_state_id),
+ idev->rnc.suspend_type,
+ idev->rnc.destination_state,
+ ireq, ireq->flags);
+ }
+ spin_lock_irqsave(&ihost->scic_lock, flags);
+ clear_bit(IREQ_NO_AUTO_FREE_TAG, &ireq->flags);
+ if (!test_bit(IREQ_ABORT_PATH_ACTIVE, &ireq->flags))
+ isci_free_tag(ihost, ireq->io_tag);
+ spin_unlock_irqrestore(&ihost->scic_lock, flags);
+ } else {
+ /* Terminate all TCs. */
+ sci_remote_device_terminate_requests(idev);
+ spin_unlock_irqrestore(&ihost->scic_lock, flags);
+ if (!wait_event_timeout(ihost->eventq,
+ isci_check_devempty(ihost, idev,
+ rnc_suspend_count),
+ msecs_to_jiffies(MAX_SUSPEND_MSECS))) {
+
+ dev_warn(&ihost->pdev->dev, "%s host%d timeout all\n",
+ __func__, ihost->id);
+ dev_dbg(&ihost->pdev->dev,
+ "%s: ******* Timeout waiting for "
+ "suspend; idev=%p, current state %s; "
+ "started_request_count=%d, flags=%lx\n\t"
+ "rnc_suspend_count=%d, "
+ "RNC: current state %s, "
+ "rnc.suspend_count=%d, current "
+ "suspend_type %x dest state %d\n",
+ __func__, idev,
+ dev_state_name(idev->sm.current_state_id),
+ idev->started_request_count, idev->flags,
+ rnc_suspend_count,
+ rnc_state_name(idev->rnc.sm.current_state_id),
+ idev->rnc.suspend_count,
+ idev->rnc.suspend_type,
+ idev->rnc.destination_state);
+ }
+ }
+ dev_dbg(&ihost->pdev->dev, "%s: idev=%p, wait done\n",
+ __func__, idev);
+ isci_put_device(idev);
+ }
+ return status;
+}
+
+/**
+* isci_remote_device_not_ready() - This function is called by the ihost when
+* the remote device is not ready. We mark the isci device as ready (not
+* "ready_for_io") and signal the waiting proccess.
+* @isci_host: This parameter specifies the isci host object.
+* @isci_device: This parameter specifies the remote device
+*
+* sci_lock is held on entrance to this function.
+*/
+static void isci_remote_device_not_ready(struct isci_host *ihost,
+ struct isci_remote_device *idev,
+ u32 reason)
+{
+ dev_dbg(&ihost->pdev->dev,
+ "%s: isci_device = %p; reason = %d\n", __func__, idev, reason);
+
+ switch (reason) {
+ case SCIC_REMOTE_DEVICE_NOT_READY_SATA_SDB_ERROR_FIS_RECEIVED:
+ set_bit(IDEV_IO_NCQERROR, &idev->flags);
+
+ /* Suspend the remote device so the I/O can be terminated. */
+ sci_remote_device_suspend(idev, SCI_SW_SUSPEND_NORMAL);
+
+ /* Kill all outstanding requests for the device. */
+ sci_remote_device_terminate_requests(idev);
+
+ /* Fall through into the default case... */
+ default:
+ clear_bit(IDEV_IO_READY, &idev->flags);
+ break;
+ }
+}
+
+/* called once the remote node context is ready to be freed.
+ * The remote device can now report that its stop operation is complete. none
+ */
+static void rnc_destruct_done(void *_dev)
+{
+ struct isci_remote_device *idev = _dev;
+
+ BUG_ON(idev->started_request_count != 0);
+ sci_change_state(&idev->sm, SCI_DEV_STOPPED);
+}
+
+enum sci_status sci_remote_device_terminate_requests(
+ struct isci_remote_device *idev)
+{
+ return sci_remote_device_terminate_reqs_checkabort(idev, 0);
+}
+
+enum sci_status sci_remote_device_stop(struct isci_remote_device *idev,
+ u32 timeout)
+{
+ struct sci_base_state_machine *sm = &idev->sm;
+ enum sci_remote_device_states state = sm->current_state_id;
+
+ switch (state) {
+ case SCI_DEV_INITIAL:
+ case SCI_DEV_FAILED:
+ case SCI_DEV_FINAL:
+ default:
+ dev_warn(scirdev_to_dev(idev), "%s: in wrong state: %s\n",
+ __func__, dev_state_name(state));
+ return SCI_FAILURE_INVALID_STATE;
+ case SCI_DEV_STOPPED:
+ return SCI_SUCCESS;
+ case SCI_DEV_STARTING:
+ /* device not started so there had better be no requests */
+ BUG_ON(idev->started_request_count != 0);
+ sci_remote_node_context_destruct(&idev->rnc,
+ rnc_destruct_done, idev);
+ /* Transition to the stopping state and wait for the
+ * remote node to complete being posted and invalidated.
+ */
+ sci_change_state(sm, SCI_DEV_STOPPING);
+ return SCI_SUCCESS;
+ case SCI_DEV_READY:
+ case SCI_STP_DEV_IDLE:
+ case SCI_STP_DEV_CMD:
+ case SCI_STP_DEV_NCQ:
+ case SCI_STP_DEV_NCQ_ERROR:
+ case SCI_STP_DEV_AWAIT_RESET:
+ case SCI_SMP_DEV_IDLE:
+ case SCI_SMP_DEV_CMD:
+ sci_change_state(sm, SCI_DEV_STOPPING);
+ if (idev->started_request_count == 0)
+ sci_remote_node_context_destruct(&idev->rnc,
+ rnc_destruct_done,
+ idev);
+ else {
+ sci_remote_device_suspend(
+ idev, SCI_SW_SUSPEND_LINKHANG_DETECT);
+ sci_remote_device_terminate_requests(idev);
+ }
+ return SCI_SUCCESS;
+ case SCI_DEV_STOPPING:
+ /* All requests should have been terminated, but if there is an
+ * attempt to stop a device already in the stopping state, then
+ * try again to terminate.
+ */
+ return sci_remote_device_terminate_requests(idev);
+ case SCI_DEV_RESETTING:
+ sci_change_state(sm, SCI_DEV_STOPPING);
+ return SCI_SUCCESS;
+ }
+}
+
+enum sci_status sci_remote_device_reset(struct isci_remote_device *idev)
+{
+ struct sci_base_state_machine *sm = &idev->sm;
+ enum sci_remote_device_states state = sm->current_state_id;
+
+ switch (state) {
+ case SCI_DEV_INITIAL:
+ case SCI_DEV_STOPPED:
+ case SCI_DEV_STARTING:
+ case SCI_SMP_DEV_IDLE:
+ case SCI_SMP_DEV_CMD:
+ case SCI_DEV_STOPPING:
+ case SCI_DEV_FAILED:
+ case SCI_DEV_RESETTING:
+ case SCI_DEV_FINAL:
+ default:
+ dev_warn(scirdev_to_dev(idev), "%s: in wrong state: %s\n",
+ __func__, dev_state_name(state));
+ return SCI_FAILURE_INVALID_STATE;
+ case SCI_DEV_READY:
+ case SCI_STP_DEV_IDLE:
+ case SCI_STP_DEV_CMD:
+ case SCI_STP_DEV_NCQ:
+ case SCI_STP_DEV_NCQ_ERROR:
+ case SCI_STP_DEV_AWAIT_RESET:
+ sci_change_state(sm, SCI_DEV_RESETTING);
+ return SCI_SUCCESS;
+ }
+}
+
+enum sci_status sci_remote_device_reset_complete(struct isci_remote_device *idev)
+{
+ struct sci_base_state_machine *sm = &idev->sm;
+ enum sci_remote_device_states state = sm->current_state_id;
+
+ if (state != SCI_DEV_RESETTING) {
+ dev_warn(scirdev_to_dev(idev), "%s: in wrong state: %s\n",
+ __func__, dev_state_name(state));
+ return SCI_FAILURE_INVALID_STATE;
+ }
+
+ sci_change_state(sm, SCI_DEV_READY);
+ return SCI_SUCCESS;
+}
+
+enum sci_status sci_remote_device_frame_handler(struct isci_remote_device *idev,
+ u32 frame_index)
+{
+ struct sci_base_state_machine *sm = &idev->sm;
+ enum sci_remote_device_states state = sm->current_state_id;
+ struct isci_host *ihost = idev->owning_port->owning_controller;
+ enum sci_status status;
+
+ switch (state) {
+ case SCI_DEV_INITIAL:
+ case SCI_DEV_STOPPED:
+ case SCI_DEV_STARTING:
+ case SCI_STP_DEV_IDLE:
+ case SCI_SMP_DEV_IDLE:
+ case SCI_DEV_FINAL:
+ default:
+ dev_warn(scirdev_to_dev(idev), "%s: in wrong state: %s\n",
+ __func__, dev_state_name(state));
+ /* Return the frame back to the controller */
+ sci_controller_release_frame(ihost, frame_index);
+ return SCI_FAILURE_INVALID_STATE;
+ case SCI_DEV_READY:
+ case SCI_STP_DEV_NCQ_ERROR:
+ case SCI_STP_DEV_AWAIT_RESET:
+ case SCI_DEV_STOPPING:
+ case SCI_DEV_FAILED:
+ case SCI_DEV_RESETTING: {
+ struct isci_request *ireq;
+ struct ssp_frame_hdr hdr;
+ void *frame_header;
+ ssize_t word_cnt;
+
+ status = sci_unsolicited_frame_control_get_header(&ihost->uf_control,
+ frame_index,
+ &frame_header);
+ if (status != SCI_SUCCESS)
+ return status;
+
+ word_cnt = sizeof(hdr) / sizeof(u32);
+ sci_swab32_cpy(&hdr, frame_header, word_cnt);
+
+ ireq = sci_request_by_tag(ihost, be16_to_cpu(hdr.tag));
+ if (ireq && ireq->target_device == idev) {
+ /* The IO request is now in charge of releasing the frame */
+ status = sci_io_request_frame_handler(ireq, frame_index);
+ } else {
+ /* We could not map this tag to a valid IO
+ * request Just toss the frame and continue
+ */
+ sci_controller_release_frame(ihost, frame_index);
+ }
+ break;
+ }
+ case SCI_STP_DEV_NCQ: {
+ struct dev_to_host_fis *hdr;
+
+ status = sci_unsolicited_frame_control_get_header(&ihost->uf_control,
+ frame_index,
+ (void **)&hdr);
+ if (status != SCI_SUCCESS)
+ return status;
+
+ if (hdr->fis_type == FIS_SETDEVBITS &&
+ (hdr->status & ATA_ERR)) {
+ idev->not_ready_reason = SCIC_REMOTE_DEVICE_NOT_READY_SATA_SDB_ERROR_FIS_RECEIVED;
+
+ /* TODO Check sactive and complete associated IO if any. */
+ sci_change_state(sm, SCI_STP_DEV_NCQ_ERROR);
+ } else if (hdr->fis_type == FIS_REGD2H &&
+ (hdr->status & ATA_ERR)) {
+ /*
+ * Some devices return D2H FIS when an NCQ error is detected.
+ * Treat this like an SDB error FIS ready reason.
+ */
+ idev->not_ready_reason = SCIC_REMOTE_DEVICE_NOT_READY_SATA_SDB_ERROR_FIS_RECEIVED;
+ sci_change_state(&idev->sm, SCI_STP_DEV_NCQ_ERROR);
+ } else
+ status = SCI_FAILURE;
+
+ sci_controller_release_frame(ihost, frame_index);
+ break;
+ }
+ case SCI_STP_DEV_CMD:
+ case SCI_SMP_DEV_CMD:
+ /* The device does not process any UF received from the hardware while
+ * in this state. All unsolicited frames are forwarded to the io request
+ * object.
+ */
+ status = sci_io_request_frame_handler(idev->working_request, frame_index);
+ break;
+ }
+
+ return status;
+}
+
+static bool is_remote_device_ready(struct isci_remote_device *idev)
+{
+
+ struct sci_base_state_machine *sm = &idev->sm;
+ enum sci_remote_device_states state = sm->current_state_id;
+
+ switch (state) {
+ case SCI_DEV_READY:
+ case SCI_STP_DEV_IDLE:
+ case SCI_STP_DEV_CMD:
+ case SCI_STP_DEV_NCQ:
+ case SCI_STP_DEV_NCQ_ERROR:
+ case SCI_STP_DEV_AWAIT_RESET:
+ case SCI_SMP_DEV_IDLE:
+ case SCI_SMP_DEV_CMD:
+ return true;
+ default:
+ return false;
+ }
+}
+
+/*
+ * called once the remote node context has transisitioned to a ready
+ * state (after suspending RX and/or TX due to early D2H fis)
+ */
+static void atapi_remote_device_resume_done(void *_dev)
+{
+ struct isci_remote_device *idev = _dev;
+ struct isci_request *ireq = idev->working_request;
+
+ sci_change_state(&ireq->sm, SCI_REQ_COMPLETED);
+}
+
+enum sci_status sci_remote_device_event_handler(struct isci_remote_device *idev,
+ u32 event_code)
+{
+ enum sci_status status;
+ struct sci_base_state_machine *sm = &idev->sm;
+ enum sci_remote_device_states state = sm->current_state_id;
+
+ switch (scu_get_event_type(event_code)) {
+ case SCU_EVENT_TYPE_RNC_OPS_MISC:
+ case SCU_EVENT_TYPE_RNC_SUSPEND_TX:
+ case SCU_EVENT_TYPE_RNC_SUSPEND_TX_RX:
+ status = sci_remote_node_context_event_handler(&idev->rnc, event_code);
+ break;
+ case SCU_EVENT_TYPE_PTX_SCHEDULE_EVENT:
+ if (scu_get_event_code(event_code) == SCU_EVENT_IT_NEXUS_TIMEOUT) {
+ status = SCI_SUCCESS;
+
+ /* Suspend the associated RNC */
+ sci_remote_device_suspend(idev, SCI_SW_SUSPEND_NORMAL);
+
+ dev_dbg(scirdev_to_dev(idev),
+ "%s: device: %p event code: %x: %s\n",
+ __func__, idev, event_code,
+ is_remote_device_ready(idev)
+ ? "I_T_Nexus_Timeout event"
+ : "I_T_Nexus_Timeout event in wrong state");
+
+ break;
+ }
+ /* Else, fall through and treat as unhandled... */
+ default:
+ dev_dbg(scirdev_to_dev(idev),
+ "%s: device: %p event code: %x: %s\n",
+ __func__, idev, event_code,
+ is_remote_device_ready(idev)
+ ? "unexpected event"
+ : "unexpected event in wrong state");
+ status = SCI_FAILURE_INVALID_STATE;
+ break;
+ }
+
+ if (status != SCI_SUCCESS)
+ return status;
+
+ /* Decode device-specific states that may require an RNC resume during
+ * normal operation. When the abort path is active, these resumes are
+ * managed when the abort path exits.
+ */
+ if (state == SCI_STP_DEV_ATAPI_ERROR) {
+ /* For ATAPI error state resume the RNC right away. */
+ if (scu_get_event_type(event_code) == SCU_EVENT_TYPE_RNC_SUSPEND_TX ||
+ scu_get_event_type(event_code) == SCU_EVENT_TYPE_RNC_SUSPEND_TX_RX) {
+ return sci_remote_node_context_resume(&idev->rnc,
+ atapi_remote_device_resume_done,
+ idev);
+ }
+ }
+
+ if (state == SCI_STP_DEV_IDLE) {
+
+ /* We pick up suspension events to handle specifically to this
+ * state. We resume the RNC right away.
+ */
+ if (scu_get_event_type(event_code) == SCU_EVENT_TYPE_RNC_SUSPEND_TX ||
+ scu_get_event_type(event_code) == SCU_EVENT_TYPE_RNC_SUSPEND_TX_RX)
+ status = sci_remote_node_context_resume(&idev->rnc, NULL, NULL);
+ }
+
+ return status;
+}
+
+static void sci_remote_device_start_request(struct isci_remote_device *idev,
+ struct isci_request *ireq,
+ enum sci_status status)
+{
+ struct isci_port *iport = idev->owning_port;
+
+ /* cleanup requests that failed after starting on the port */
+ if (status != SCI_SUCCESS)
+ sci_port_complete_io(iport, idev, ireq);
+ else {
+ kref_get(&idev->kref);
+ idev->started_request_count++;
+ }
+}
+
+enum sci_status sci_remote_device_start_io(struct isci_host *ihost,
+ struct isci_remote_device *idev,
+ struct isci_request *ireq)
+{
+ struct sci_base_state_machine *sm = &idev->sm;
+ enum sci_remote_device_states state = sm->current_state_id;
+ struct isci_port *iport = idev->owning_port;
+ enum sci_status status;
+
+ switch (state) {
+ case SCI_DEV_INITIAL:
+ case SCI_DEV_STOPPED:
+ case SCI_DEV_STARTING:
+ case SCI_STP_DEV_NCQ_ERROR:
+ case SCI_DEV_STOPPING:
+ case SCI_DEV_FAILED:
+ case SCI_DEV_RESETTING:
+ case SCI_DEV_FINAL:
+ default:
+ dev_warn(scirdev_to_dev(idev), "%s: in wrong state: %s\n",
+ __func__, dev_state_name(state));
+ return SCI_FAILURE_INVALID_STATE;
+ case SCI_DEV_READY:
+ /* attempt to start an io request for this device object. The remote
+ * device object will issue the start request for the io and if
+ * successful it will start the request for the port object then
+ * increment its own request count.
+ */
+ status = sci_port_start_io(iport, idev, ireq);
+ if (status != SCI_SUCCESS)
+ return status;
+
+ status = sci_remote_node_context_start_io(&idev->rnc, ireq);
+ if (status != SCI_SUCCESS)
+ break;
+
+ status = sci_request_start(ireq);
+ break;
+ case SCI_STP_DEV_IDLE: {
+ /* handle the start io operation for a sata device that is in
+ * the command idle state. - Evalute the type of IO request to
+ * be started - If its an NCQ request change to NCQ substate -
+ * If its any other command change to the CMD substate
+ *
+ * If this is a softreset we may want to have a different
+ * substate.
+ */
+ enum sci_remote_device_states new_state;
+ struct sas_task *task = isci_request_access_task(ireq);
+
+ status = sci_port_start_io(iport, idev, ireq);
+ if (status != SCI_SUCCESS)
+ return status;
+
+ status = sci_remote_node_context_start_io(&idev->rnc, ireq);
+ if (status != SCI_SUCCESS)
+ break;
+
+ status = sci_request_start(ireq);
+ if (status != SCI_SUCCESS)
+ break;
+
+ if (task->ata_task.use_ncq)
+ new_state = SCI_STP_DEV_NCQ;
+ else {
+ idev->working_request = ireq;
+ new_state = SCI_STP_DEV_CMD;
+ }
+ sci_change_state(sm, new_state);
+ break;
+ }
+ case SCI_STP_DEV_NCQ: {
+ struct sas_task *task = isci_request_access_task(ireq);
+
+ if (task->ata_task.use_ncq) {
+ status = sci_port_start_io(iport, idev, ireq);
+ if (status != SCI_SUCCESS)
+ return status;
+
+ status = sci_remote_node_context_start_io(&idev->rnc, ireq);
+ if (status != SCI_SUCCESS)
+ break;
+
+ status = sci_request_start(ireq);
+ } else
+ return SCI_FAILURE_INVALID_STATE;
+ break;
+ }
+ case SCI_STP_DEV_AWAIT_RESET:
+ return SCI_FAILURE_REMOTE_DEVICE_RESET_REQUIRED;
+ case SCI_SMP_DEV_IDLE:
+ status = sci_port_start_io(iport, idev, ireq);
+ if (status != SCI_SUCCESS)
+ return status;
+
+ status = sci_remote_node_context_start_io(&idev->rnc, ireq);
+ if (status != SCI_SUCCESS)
+ break;
+
+ status = sci_request_start(ireq);
+ if (status != SCI_SUCCESS)
+ break;
+
+ idev->working_request = ireq;
+ sci_change_state(&idev->sm, SCI_SMP_DEV_CMD);
+ break;
+ case SCI_STP_DEV_CMD:
+ case SCI_SMP_DEV_CMD:
+ /* device is already handling a command it can not accept new commands
+ * until this one is complete.
+ */
+ return SCI_FAILURE_INVALID_STATE;
+ }
+
+ sci_remote_device_start_request(idev, ireq, status);
+ return status;
+}
+
+static enum sci_status common_complete_io(struct isci_port *iport,
+ struct isci_remote_device *idev,
+ struct isci_request *ireq)
+{
+ enum sci_status status;
+
+ status = sci_request_complete(ireq);
+ if (status != SCI_SUCCESS)
+ return status;
+
+ status = sci_port_complete_io(iport, idev, ireq);
+ if (status != SCI_SUCCESS)
+ return status;
+
+ sci_remote_device_decrement_request_count(idev);
+ return status;
+}
+
+enum sci_status sci_remote_device_complete_io(struct isci_host *ihost,
+ struct isci_remote_device *idev,
+ struct isci_request *ireq)
+{
+ struct sci_base_state_machine *sm = &idev->sm;
+ enum sci_remote_device_states state = sm->current_state_id;
+ struct isci_port *iport = idev->owning_port;
+ enum sci_status status;
+
+ switch (state) {
+ case SCI_DEV_INITIAL:
+ case SCI_DEV_STOPPED:
+ case SCI_DEV_STARTING:
+ case SCI_STP_DEV_IDLE:
+ case SCI_SMP_DEV_IDLE:
+ case SCI_DEV_FAILED:
+ case SCI_DEV_FINAL:
+ default:
+ dev_warn(scirdev_to_dev(idev), "%s: in wrong state: %s\n",
+ __func__, dev_state_name(state));
+ return SCI_FAILURE_INVALID_STATE;
+ case SCI_DEV_READY:
+ case SCI_STP_DEV_AWAIT_RESET:
+ case SCI_DEV_RESETTING:
+ status = common_complete_io(iport, idev, ireq);
+ break;
+ case SCI_STP_DEV_CMD:
+ case SCI_STP_DEV_NCQ:
+ case SCI_STP_DEV_NCQ_ERROR:
+ case SCI_STP_DEV_ATAPI_ERROR:
+ status = common_complete_io(iport, idev, ireq);
+ if (status != SCI_SUCCESS)
+ break;
+
+ if (ireq->sci_status == SCI_FAILURE_REMOTE_DEVICE_RESET_REQUIRED) {
+ /* This request causes hardware error, device needs to be Lun Reset.
+ * So here we force the state machine to IDLE state so the rest IOs
+ * can reach RNC state handler, these IOs will be completed by RNC with
+ * status of "DEVICE_RESET_REQUIRED", instead of "INVALID STATE".
+ */
+ sci_change_state(sm, SCI_STP_DEV_AWAIT_RESET);
+ } else if (idev->started_request_count == 0)
+ sci_change_state(sm, SCI_STP_DEV_IDLE);
+ break;
+ case SCI_SMP_DEV_CMD:
+ status = common_complete_io(iport, idev, ireq);
+ if (status != SCI_SUCCESS)
+ break;
+ sci_change_state(sm, SCI_SMP_DEV_IDLE);
+ break;
+ case SCI_DEV_STOPPING:
+ status = common_complete_io(iport, idev, ireq);
+ if (status != SCI_SUCCESS)
+ break;
+
+ if (idev->started_request_count == 0)
+ sci_remote_node_context_destruct(&idev->rnc,
+ rnc_destruct_done,
+ idev);
+ break;
+ }
+
+ if (status != SCI_SUCCESS)
+ dev_err(scirdev_to_dev(idev),
+ "%s: Port:0x%p Device:0x%p Request:0x%p Status:0x%x "
+ "could not complete\n", __func__, iport,
+ idev, ireq, status);
+ else
+ isci_put_device(idev);
+
+ return status;
+}
+
+static void sci_remote_device_continue_request(void *dev)
+{
+ struct isci_remote_device *idev = dev;
+
+ /* we need to check if this request is still valid to continue. */
+ if (idev->working_request)
+ sci_controller_continue_io(idev->working_request);
+}
+
+enum sci_status sci_remote_device_start_task(struct isci_host *ihost,
+ struct isci_remote_device *idev,
+ struct isci_request *ireq)
+{
+ struct sci_base_state_machine *sm = &idev->sm;
+ enum sci_remote_device_states state = sm->current_state_id;
+ struct isci_port *iport = idev->owning_port;
+ enum sci_status status;
+
+ switch (state) {
+ case SCI_DEV_INITIAL:
+ case SCI_DEV_STOPPED:
+ case SCI_DEV_STARTING:
+ case SCI_SMP_DEV_IDLE:
+ case SCI_SMP_DEV_CMD:
+ case SCI_DEV_STOPPING:
+ case SCI_DEV_FAILED:
+ case SCI_DEV_RESETTING:
+ case SCI_DEV_FINAL:
+ default:
+ dev_warn(scirdev_to_dev(idev), "%s: in wrong state: %s\n",
+ __func__, dev_state_name(state));
+ return SCI_FAILURE_INVALID_STATE;
+ case SCI_STP_DEV_IDLE:
+ case SCI_STP_DEV_CMD:
+ case SCI_STP_DEV_NCQ:
+ case SCI_STP_DEV_NCQ_ERROR:
+ case SCI_STP_DEV_AWAIT_RESET:
+ status = sci_port_start_io(iport, idev, ireq);
+ if (status != SCI_SUCCESS)
+ return status;
+
+ status = sci_request_start(ireq);
+ if (status != SCI_SUCCESS)
+ goto out;
+
+ /* Note: If the remote device state is not IDLE this will
+ * replace the request that probably resulted in the task
+ * management request.
+ */
+ idev->working_request = ireq;
+ sci_change_state(sm, SCI_STP_DEV_CMD);
+
+ /* The remote node context must cleanup the TCi to NCQ mapping
+ * table. The only way to do this correctly is to either write
+ * to the TLCR register or to invalidate and repost the RNC. In
+ * either case the remote node context state machine will take
+ * the correct action when the remote node context is suspended
+ * and later resumed.
+ */
+ sci_remote_device_suspend(idev,
+ SCI_SW_SUSPEND_LINKHANG_DETECT);
+
+ status = sci_remote_node_context_start_task(&idev->rnc, ireq,
+ sci_remote_device_continue_request, idev);
+
+ out:
+ sci_remote_device_start_request(idev, ireq, status);
+ /* We need to let the controller start request handler know that
+ * it can't post TC yet. We will provide a callback function to
+ * post TC when RNC gets resumed.
+ */
+ return SCI_FAILURE_RESET_DEVICE_PARTIAL_SUCCESS;
+ case SCI_DEV_READY:
+ status = sci_port_start_io(iport, idev, ireq);
+ if (status != SCI_SUCCESS)
+ return status;
+
+ /* Resume the RNC as needed: */
+ status = sci_remote_node_context_start_task(&idev->rnc, ireq,
+ NULL, NULL);
+ if (status != SCI_SUCCESS)
+ break;
+
+ status = sci_request_start(ireq);
+ break;
+ }
+ sci_remote_device_start_request(idev, ireq, status);
+
+ return status;
+}
+
+void sci_remote_device_post_request(struct isci_remote_device *idev, u32 request)
+{
+ struct isci_port *iport = idev->owning_port;
+ u32 context;
+
+ context = request |
+ (ISCI_PEG << SCU_CONTEXT_COMMAND_PROTOCOL_ENGINE_GROUP_SHIFT) |
+ (iport->physical_port_index << SCU_CONTEXT_COMMAND_LOGICAL_PORT_SHIFT) |
+ idev->rnc.remote_node_index;
+
+ sci_controller_post_request(iport->owning_controller, context);
+}
+
+/* called once the remote node context has transisitioned to a
+ * ready state. This is the indication that the remote device object can also
+ * transition to ready.
+ */
+static void remote_device_resume_done(void *_dev)
+{
+ struct isci_remote_device *idev = _dev;
+
+ if (is_remote_device_ready(idev))
+ return;
+
+ /* go 'ready' if we are not already in a ready state */
+ sci_change_state(&idev->sm, SCI_DEV_READY);
+}
+
+static void sci_stp_remote_device_ready_idle_substate_resume_complete_handler(void *_dev)
+{
+ struct isci_remote_device *idev = _dev;
+ struct isci_host *ihost = idev->owning_port->owning_controller;
+
+ /* For NCQ operation we do not issue a isci_remote_device_not_ready().
+ * As a result, avoid sending the ready notification.
+ */
+ if (idev->sm.previous_state_id != SCI_STP_DEV_NCQ)
+ isci_remote_device_ready(ihost, idev);
+}
+
+static void sci_remote_device_initial_state_enter(struct sci_base_state_machine *sm)
+{
+ struct isci_remote_device *idev = container_of(sm, typeof(*idev), sm);
+
+ /* Initial state is a transitional state to the stopped state */
+ sci_change_state(&idev->sm, SCI_DEV_STOPPED);
+}
+
+/**
+ * sci_remote_device_destruct() - free remote node context and destruct
+ * @remote_device: This parameter specifies the remote device to be destructed.
+ *
+ * Remote device objects are a limited resource. As such, they must be
+ * protected. Thus calls to construct and destruct are mutually exclusive and
+ * non-reentrant. The return value shall indicate if the device was
+ * successfully destructed or if some failure occurred. enum sci_status This value
+ * is returned if the device is successfully destructed.
+ * SCI_FAILURE_INVALID_REMOTE_DEVICE This value is returned if the supplied
+ * device isn't valid (e.g. it's already been destoryed, the handle isn't
+ * valid, etc.).
+ */
+static enum sci_status sci_remote_device_destruct(struct isci_remote_device *idev)
+{
+ struct sci_base_state_machine *sm = &idev->sm;
+ enum sci_remote_device_states state = sm->current_state_id;
+ struct isci_host *ihost;
+
+ if (state != SCI_DEV_STOPPED) {
+ dev_warn(scirdev_to_dev(idev), "%s: in wrong state: %s\n",
+ __func__, dev_state_name(state));
+ return SCI_FAILURE_INVALID_STATE;
+ }
+
+ ihost = idev->owning_port->owning_controller;
+ sci_controller_free_remote_node_context(ihost, idev,
+ idev->rnc.remote_node_index);
+ idev->rnc.remote_node_index = SCIC_SDS_REMOTE_NODE_CONTEXT_INVALID_INDEX;
+ sci_change_state(sm, SCI_DEV_FINAL);
+
+ return SCI_SUCCESS;
+}
+
+/**
+ * isci_remote_device_deconstruct() - This function frees an isci_remote_device.
+ * @ihost: This parameter specifies the isci host object.
+ * @idev: This parameter specifies the remote device to be freed.
+ *
+ */
+static void isci_remote_device_deconstruct(struct isci_host *ihost, struct isci_remote_device *idev)
+{
+ dev_dbg(&ihost->pdev->dev,
+ "%s: isci_device = %p\n", __func__, idev);
+
+ /* There should not be any outstanding io's. All paths to
+ * here should go through isci_remote_device_nuke_requests.
+ * If we hit this condition, we will need a way to complete
+ * io requests in process */
+ BUG_ON(idev->started_request_count > 0);
+
+ sci_remote_device_destruct(idev);
+ list_del_init(&idev->node);
+ isci_put_device(idev);
+}
+
+static void sci_remote_device_stopped_state_enter(struct sci_base_state_machine *sm)
+{
+ struct isci_remote_device *idev = container_of(sm, typeof(*idev), sm);
+ struct isci_host *ihost = idev->owning_port->owning_controller;
+ u32 prev_state;
+
+ /* If we are entering from the stopping state let the SCI User know that
+ * the stop operation has completed.
+ */
+ prev_state = idev->sm.previous_state_id;
+ if (prev_state == SCI_DEV_STOPPING)
+ isci_remote_device_deconstruct(ihost, idev);
+
+ sci_controller_remote_device_stopped(ihost, idev);
+}
+
+static void sci_remote_device_starting_state_enter(struct sci_base_state_machine *sm)
+{
+ struct isci_remote_device *idev = container_of(sm, typeof(*idev), sm);
+ struct isci_host *ihost = idev->owning_port->owning_controller;
+
+ isci_remote_device_not_ready(ihost, idev,
+ SCIC_REMOTE_DEVICE_NOT_READY_START_REQUESTED);
+}
+
+static void sci_remote_device_ready_state_enter(struct sci_base_state_machine *sm)
+{
+ struct isci_remote_device *idev = container_of(sm, typeof(*idev), sm);
+ struct isci_host *ihost = idev->owning_port->owning_controller;
+ struct domain_device *dev = idev->domain_dev;
+
+ if (dev->dev_type == SAS_SATA_DEV || (dev->tproto & SAS_PROTOCOL_SATA)) {
+ sci_change_state(&idev->sm, SCI_STP_DEV_IDLE);
+ } else if (dev_is_expander(dev)) {
+ sci_change_state(&idev->sm, SCI_SMP_DEV_IDLE);
+ } else
+ isci_remote_device_ready(ihost, idev);
+}
+
+static void sci_remote_device_ready_state_exit(struct sci_base_state_machine *sm)
+{
+ struct isci_remote_device *idev = container_of(sm, typeof(*idev), sm);
+ struct domain_device *dev = idev->domain_dev;
+
+ if (dev->dev_type == SAS_END_DEVICE) {
+ struct isci_host *ihost = idev->owning_port->owning_controller;
+
+ isci_remote_device_not_ready(ihost, idev,
+ SCIC_REMOTE_DEVICE_NOT_READY_STOP_REQUESTED);
+ }
+}
+
+static void sci_remote_device_resetting_state_enter(struct sci_base_state_machine *sm)
+{
+ struct isci_remote_device *idev = container_of(sm, typeof(*idev), sm);
+ struct isci_host *ihost = idev->owning_port->owning_controller;
+
+ dev_dbg(&ihost->pdev->dev,
+ "%s: isci_device = %p\n", __func__, idev);
+
+ sci_remote_device_suspend(idev, SCI_SW_SUSPEND_LINKHANG_DETECT);
+}
+
+static void sci_remote_device_resetting_state_exit(struct sci_base_state_machine *sm)
+{
+ struct isci_remote_device *idev = container_of(sm, typeof(*idev), sm);
+ struct isci_host *ihost = idev->owning_port->owning_controller;
+
+ dev_dbg(&ihost->pdev->dev,
+ "%s: isci_device = %p\n", __func__, idev);
+
+ sci_remote_node_context_resume(&idev->rnc, NULL, NULL);
+}
+
+static void sci_stp_remote_device_ready_idle_substate_enter(struct sci_base_state_machine *sm)
+{
+ struct isci_remote_device *idev = container_of(sm, typeof(*idev), sm);
+
+ idev->working_request = NULL;
+ if (sci_remote_node_context_is_ready(&idev->rnc)) {
+ /*
+ * Since the RNC is ready, it's alright to finish completion
+ * processing (e.g. signal the remote device is ready). */
+ sci_stp_remote_device_ready_idle_substate_resume_complete_handler(idev);
+ } else {
+ sci_remote_node_context_resume(&idev->rnc,
+ sci_stp_remote_device_ready_idle_substate_resume_complete_handler,
+ idev);
+ }
+}
+
+static void sci_stp_remote_device_ready_cmd_substate_enter(struct sci_base_state_machine *sm)
+{
+ struct isci_remote_device *idev = container_of(sm, typeof(*idev), sm);
+ struct isci_host *ihost = idev->owning_port->owning_controller;
+
+ BUG_ON(idev->working_request == NULL);
+
+ isci_remote_device_not_ready(ihost, idev,
+ SCIC_REMOTE_DEVICE_NOT_READY_SATA_REQUEST_STARTED);
+}
+
+static void sci_stp_remote_device_ready_ncq_error_substate_enter(struct sci_base_state_machine *sm)
+{
+ struct isci_remote_device *idev = container_of(sm, typeof(*idev), sm);
+ struct isci_host *ihost = idev->owning_port->owning_controller;
+
+ if (idev->not_ready_reason == SCIC_REMOTE_DEVICE_NOT_READY_SATA_SDB_ERROR_FIS_RECEIVED)
+ isci_remote_device_not_ready(ihost, idev,
+ idev->not_ready_reason);
+}
+
+static void sci_smp_remote_device_ready_idle_substate_enter(struct sci_base_state_machine *sm)
+{
+ struct isci_remote_device *idev = container_of(sm, typeof(*idev), sm);
+ struct isci_host *ihost = idev->owning_port->owning_controller;
+
+ isci_remote_device_ready(ihost, idev);
+}
+
+static void sci_smp_remote_device_ready_cmd_substate_enter(struct sci_base_state_machine *sm)
+{
+ struct isci_remote_device *idev = container_of(sm, typeof(*idev), sm);
+ struct isci_host *ihost = idev->owning_port->owning_controller;
+
+ BUG_ON(idev->working_request == NULL);
+
+ isci_remote_device_not_ready(ihost, idev,
+ SCIC_REMOTE_DEVICE_NOT_READY_SMP_REQUEST_STARTED);
+}
+
+static void sci_smp_remote_device_ready_cmd_substate_exit(struct sci_base_state_machine *sm)
+{
+ struct isci_remote_device *idev = container_of(sm, typeof(*idev), sm);
+
+ idev->working_request = NULL;
+}
+
+static const struct sci_base_state sci_remote_device_state_table[] = {
+ [SCI_DEV_INITIAL] = {
+ .enter_state = sci_remote_device_initial_state_enter,
+ },
+ [SCI_DEV_STOPPED] = {
+ .enter_state = sci_remote_device_stopped_state_enter,
+ },
+ [SCI_DEV_STARTING] = {
+ .enter_state = sci_remote_device_starting_state_enter,
+ },
+ [SCI_DEV_READY] = {
+ .enter_state = sci_remote_device_ready_state_enter,
+ .exit_state = sci_remote_device_ready_state_exit
+ },
+ [SCI_STP_DEV_IDLE] = {
+ .enter_state = sci_stp_remote_device_ready_idle_substate_enter,
+ },
+ [SCI_STP_DEV_CMD] = {
+ .enter_state = sci_stp_remote_device_ready_cmd_substate_enter,
+ },
+ [SCI_STP_DEV_NCQ] = { },
+ [SCI_STP_DEV_NCQ_ERROR] = {
+ .enter_state = sci_stp_remote_device_ready_ncq_error_substate_enter,
+ },
+ [SCI_STP_DEV_ATAPI_ERROR] = { },
+ [SCI_STP_DEV_AWAIT_RESET] = { },
+ [SCI_SMP_DEV_IDLE] = {
+ .enter_state = sci_smp_remote_device_ready_idle_substate_enter,
+ },
+ [SCI_SMP_DEV_CMD] = {
+ .enter_state = sci_smp_remote_device_ready_cmd_substate_enter,
+ .exit_state = sci_smp_remote_device_ready_cmd_substate_exit,
+ },
+ [SCI_DEV_STOPPING] = { },
+ [SCI_DEV_FAILED] = { },
+ [SCI_DEV_RESETTING] = {
+ .enter_state = sci_remote_device_resetting_state_enter,
+ .exit_state = sci_remote_device_resetting_state_exit
+ },
+ [SCI_DEV_FINAL] = { },
+};
+
+/**
+ * sci_remote_device_construct() - common construction
+ * @sci_port: SAS/SATA port through which this device is accessed.
+ * @sci_dev: remote device to construct
+ *
+ * This routine just performs benign initialization and does not
+ * allocate the remote_node_context which is left to
+ * sci_remote_device_[de]a_construct(). sci_remote_device_destruct()
+ * frees the remote_node_context(s) for the device.
+ */
+static void sci_remote_device_construct(struct isci_port *iport,
+ struct isci_remote_device *idev)
+{
+ idev->owning_port = iport;
+ idev->started_request_count = 0;
+
+ sci_init_sm(&idev->sm, sci_remote_device_state_table, SCI_DEV_INITIAL);
+
+ sci_remote_node_context_construct(&idev->rnc,
+ SCIC_SDS_REMOTE_NODE_CONTEXT_INVALID_INDEX);
+}
+
+/**
+ * sci_remote_device_da_construct() - construct direct attached device.
+ *
+ * The information (e.g. IAF, Signature FIS, etc.) necessary to build
+ * the device is known to the SCI Core since it is contained in the
+ * sci_phy object. Remote node context(s) is/are a global resource
+ * allocated by this routine, freed by sci_remote_device_destruct().
+ *
+ * Returns:
+ * SCI_FAILURE_DEVICE_EXISTS - device has already been constructed.
+ * SCI_FAILURE_UNSUPPORTED_PROTOCOL - e.g. sas device attached to
+ * sata-only controller instance.
+ * SCI_FAILURE_INSUFFICIENT_RESOURCES - remote node contexts exhausted.
+ */
+static enum sci_status sci_remote_device_da_construct(struct isci_port *iport,
+ struct isci_remote_device *idev)
+{
+ enum sci_status status;
+ struct sci_port_properties properties;
+
+ sci_remote_device_construct(iport, idev);
+
+ sci_port_get_properties(iport, &properties);
+ /* Get accurate port width from port's phy mask for a DA device. */
+ idev->device_port_width = hweight32(properties.phy_mask);
+
+ status = sci_controller_allocate_remote_node_context(iport->owning_controller,
+ idev,
+ &idev->rnc.remote_node_index);
+
+ if (status != SCI_SUCCESS)
+ return status;
+
+ idev->connection_rate = sci_port_get_max_allowed_speed(iport);
+
+ return SCI_SUCCESS;
+}
+
+/**
+ * sci_remote_device_ea_construct() - construct expander attached device
+ *
+ * Remote node context(s) is/are a global resource allocated by this
+ * routine, freed by sci_remote_device_destruct().
+ *
+ * Returns:
+ * SCI_FAILURE_DEVICE_EXISTS - device has already been constructed.
+ * SCI_FAILURE_UNSUPPORTED_PROTOCOL - e.g. sas device attached to
+ * sata-only controller instance.
+ * SCI_FAILURE_INSUFFICIENT_RESOURCES - remote node contexts exhausted.
+ */
+static enum sci_status sci_remote_device_ea_construct(struct isci_port *iport,
+ struct isci_remote_device *idev)
+{
+ struct domain_device *dev = idev->domain_dev;
+ enum sci_status status;
+
+ sci_remote_device_construct(iport, idev);
+
+ status = sci_controller_allocate_remote_node_context(iport->owning_controller,
+ idev,
+ &idev->rnc.remote_node_index);
+ if (status != SCI_SUCCESS)
+ return status;
+
+ /* For SAS-2 the physical link rate is actually a logical link
+ * rate that incorporates multiplexing. The SCU doesn't
+ * incorporate multiplexing and for the purposes of the
+ * connection the logical link rate is that same as the
+ * physical. Furthermore, the SAS-2 and SAS-1.1 fields overlay
+ * one another, so this code works for both situations.
+ */
+ idev->connection_rate = min_t(u16, sci_port_get_max_allowed_speed(iport),
+ dev->linkrate);
+
+ /* / @todo Should I assign the port width by reading all of the phys on the port? */
+ idev->device_port_width = 1;
+
+ return SCI_SUCCESS;
+}
+
+enum sci_status sci_remote_device_resume(
+ struct isci_remote_device *idev,
+ scics_sds_remote_node_context_callback cb_fn,
+ void *cb_p)
+{
+ enum sci_status status;
+
+ status = sci_remote_node_context_resume(&idev->rnc, cb_fn, cb_p);
+ if (status != SCI_SUCCESS)
+ dev_dbg(scirdev_to_dev(idev), "%s: failed to resume: %d\n",
+ __func__, status);
+ return status;
+}
+
+static void isci_remote_device_resume_from_abort_complete(void *cbparam)
+{
+ struct isci_remote_device *idev = cbparam;
+ struct isci_host *ihost = idev->owning_port->owning_controller;
+ scics_sds_remote_node_context_callback abort_resume_cb =
+ idev->abort_resume_cb;
+
+ dev_dbg(scirdev_to_dev(idev), "%s: passing-along resume: %p\n",
+ __func__, abort_resume_cb);
+
+ if (abort_resume_cb != NULL) {
+ idev->abort_resume_cb = NULL;
+ abort_resume_cb(idev->abort_resume_cbparam);
+ }
+ clear_bit(IDEV_ABORT_PATH_RESUME_PENDING, &idev->flags);
+ wake_up(&ihost->eventq);
+}
+
+static bool isci_remote_device_test_resume_done(
+ struct isci_host *ihost,
+ struct isci_remote_device *idev)
+{
+ unsigned long flags;
+ bool done;
+
+ spin_lock_irqsave(&ihost->scic_lock, flags);
+ done = !test_bit(IDEV_ABORT_PATH_RESUME_PENDING, &idev->flags)
+ || test_bit(IDEV_STOP_PENDING, &idev->flags)
+ || sci_remote_node_context_is_being_destroyed(&idev->rnc);
+ spin_unlock_irqrestore(&ihost->scic_lock, flags);
+
+ return done;
+}
+
+void isci_remote_device_wait_for_resume_from_abort(
+ struct isci_host *ihost,
+ struct isci_remote_device *idev)
+{
+ dev_dbg(&ihost->pdev->dev, "%s: starting resume wait: %p\n",
+ __func__, idev);
+
+ #define MAX_RESUME_MSECS 10000
+ if (!wait_event_timeout(ihost->eventq,
+ isci_remote_device_test_resume_done(ihost, idev),
+ msecs_to_jiffies(MAX_RESUME_MSECS))) {
+
+ dev_warn(&ihost->pdev->dev, "%s: #### Timeout waiting for "
+ "resume: %p\n", __func__, idev);
+ }
+ clear_bit(IDEV_ABORT_PATH_RESUME_PENDING, &idev->flags);
+
+ dev_dbg(&ihost->pdev->dev, "%s: resume wait done: %p\n",
+ __func__, idev);
+}
+
+enum sci_status isci_remote_device_resume_from_abort(
+ struct isci_host *ihost,
+ struct isci_remote_device *idev)
+{
+ unsigned long flags;
+ enum sci_status status = SCI_SUCCESS;
+ int destroyed;
+
+ spin_lock_irqsave(&ihost->scic_lock, flags);
+ /* Preserve any current resume callbacks, for instance from other
+ * resumptions.
+ */
+ idev->abort_resume_cb = idev->rnc.user_callback;
+ idev->abort_resume_cbparam = idev->rnc.user_cookie;
+ set_bit(IDEV_ABORT_PATH_RESUME_PENDING, &idev->flags);
+ clear_bit(IDEV_ABORT_PATH_ACTIVE, &idev->flags);
+ destroyed = sci_remote_node_context_is_being_destroyed(&idev->rnc);
+ if (!destroyed)
+ status = sci_remote_device_resume(
+ idev, isci_remote_device_resume_from_abort_complete,
+ idev);
+ spin_unlock_irqrestore(&ihost->scic_lock, flags);
+ if (!destroyed && (status == SCI_SUCCESS))
+ isci_remote_device_wait_for_resume_from_abort(ihost, idev);
+ else
+ clear_bit(IDEV_ABORT_PATH_RESUME_PENDING, &idev->flags);
+
+ return status;
+}
+
+/**
+ * sci_remote_device_start() - This method will start the supplied remote
+ * device. This method enables normal IO requests to flow through to the
+ * remote device.
+ * @remote_device: This parameter specifies the device to be started.
+ * @timeout: This parameter specifies the number of milliseconds in which the
+ * start operation should complete.
+ *
+ * An indication of whether the device was successfully started. SCI_SUCCESS
+ * This value is returned if the device was successfully started.
+ * SCI_FAILURE_INVALID_PHY This value is returned if the user attempts to start
+ * the device when there have been no phys added to it.
+ */
+static enum sci_status sci_remote_device_start(struct isci_remote_device *idev,
+ u32 timeout)
+{
+ struct sci_base_state_machine *sm = &idev->sm;
+ enum sci_remote_device_states state = sm->current_state_id;
+ enum sci_status status;
+
+ if (state != SCI_DEV_STOPPED) {
+ dev_warn(scirdev_to_dev(idev), "%s: in wrong state: %s\n",
+ __func__, dev_state_name(state));
+ return SCI_FAILURE_INVALID_STATE;
+ }
+
+ status = sci_remote_device_resume(idev, remote_device_resume_done,
+ idev);
+ if (status != SCI_SUCCESS)
+ return status;
+
+ sci_change_state(sm, SCI_DEV_STARTING);
+
+ return SCI_SUCCESS;
+}
+
+static enum sci_status isci_remote_device_construct(struct isci_port *iport,
+ struct isci_remote_device *idev)
+{
+ struct isci_host *ihost = iport->isci_host;
+ struct domain_device *dev = idev->domain_dev;
+ enum sci_status status;
+
+ if (dev->parent && dev_is_expander(dev->parent))
+ status = sci_remote_device_ea_construct(iport, idev);
+ else
+ status = sci_remote_device_da_construct(iport, idev);
+
+ if (status != SCI_SUCCESS) {
+ dev_dbg(&ihost->pdev->dev, "%s: construct failed: %d\n",
+ __func__, status);
+
+ return status;
+ }
+
+ /* start the device. */
+ status = sci_remote_device_start(idev, ISCI_REMOTE_DEVICE_START_TIMEOUT);
+
+ if (status != SCI_SUCCESS)
+ dev_warn(&ihost->pdev->dev, "remote device start failed: %d\n",
+ status);
+
+ return status;
+}
+
+/**
+ * This function builds the isci_remote_device when a libsas dev_found message
+ * is received.
+ * @isci_host: This parameter specifies the isci host object.
+ * @port: This parameter specifies the isci_port conected to this device.
+ *
+ * pointer to new isci_remote_device.
+ */
+static struct isci_remote_device *
+isci_remote_device_alloc(struct isci_host *ihost, struct isci_port *iport)
+{
+ struct isci_remote_device *idev;
+ int i;
+
+ for (i = 0; i < SCI_MAX_REMOTE_DEVICES; i++) {
+ idev = &ihost->devices[i];
+ if (!test_and_set_bit(IDEV_ALLOCATED, &idev->flags))
+ break;
+ }
+
+ if (i >= SCI_MAX_REMOTE_DEVICES) {
+ dev_warn(&ihost->pdev->dev, "%s: failed\n", __func__);
+ return NULL;
+ }
+ if (WARN_ONCE(!list_empty(&idev->node), "found non-idle remote device\n"))
+ return NULL;
+
+ return idev;
+}
+
+void isci_remote_device_release(struct kref *kref)
+{
+ struct isci_remote_device *idev = container_of(kref, typeof(*idev), kref);
+ struct isci_host *ihost = idev->isci_port->isci_host;
+
+ idev->domain_dev = NULL;
+ idev->isci_port = NULL;
+ clear_bit(IDEV_START_PENDING, &idev->flags);
+ clear_bit(IDEV_STOP_PENDING, &idev->flags);
+ clear_bit(IDEV_IO_READY, &idev->flags);
+ clear_bit(IDEV_GONE, &idev->flags);
+ smp_mb__before_atomic();
+ clear_bit(IDEV_ALLOCATED, &idev->flags);
+ wake_up(&ihost->eventq);
+}
+
+/**
+ * isci_remote_device_stop() - This function is called internally to stop the
+ * remote device.
+ * @isci_host: This parameter specifies the isci host object.
+ * @isci_device: This parameter specifies the remote device.
+ *
+ * The status of the ihost request to stop.
+ */
+enum sci_status isci_remote_device_stop(struct isci_host *ihost, struct isci_remote_device *idev)
+{
+ enum sci_status status;
+ unsigned long flags;
+
+ dev_dbg(&ihost->pdev->dev,
+ "%s: isci_device = %p\n", __func__, idev);
+
+ spin_lock_irqsave(&ihost->scic_lock, flags);
+ idev->domain_dev->lldd_dev = NULL; /* disable new lookups */
+ set_bit(IDEV_GONE, &idev->flags);
+
+ set_bit(IDEV_STOP_PENDING, &idev->flags);
+ status = sci_remote_device_stop(idev, 50);
+ spin_unlock_irqrestore(&ihost->scic_lock, flags);
+
+ /* Wait for the stop complete callback. */
+ if (WARN_ONCE(status != SCI_SUCCESS, "failed to stop device\n"))
+ /* nothing to wait for */;
+ else
+ wait_for_device_stop(ihost, idev);
+
+ dev_dbg(&ihost->pdev->dev,
+ "%s: isci_device = %p, waiting done.\n", __func__, idev);
+
+ return status;
+}
+
+/**
+ * isci_remote_device_gone() - This function is called by libsas when a domain
+ * device is removed.
+ * @domain_device: This parameter specifies the libsas domain device.
+ *
+ */
+void isci_remote_device_gone(struct domain_device *dev)
+{
+ struct isci_host *ihost = dev_to_ihost(dev);
+ struct isci_remote_device *idev = dev->lldd_dev;
+
+ dev_dbg(&ihost->pdev->dev,
+ "%s: domain_device = %p, isci_device = %p, isci_port = %p\n",
+ __func__, dev, idev, idev->isci_port);
+
+ isci_remote_device_stop(ihost, idev);
+}
+
+
+/**
+ * isci_remote_device_found() - This function is called by libsas when a remote
+ * device is discovered. A remote device object is created and started. the
+ * function then sleeps until the sci core device started message is
+ * received.
+ * @domain_device: This parameter specifies the libsas domain device.
+ *
+ * status, zero indicates success.
+ */
+int isci_remote_device_found(struct domain_device *dev)
+{
+ struct isci_host *isci_host = dev_to_ihost(dev);
+ struct isci_port *isci_port = dev->port->lldd_port;
+ struct isci_remote_device *isci_device;
+ enum sci_status status;
+
+ dev_dbg(&isci_host->pdev->dev,
+ "%s: domain_device = %p\n", __func__, dev);
+
+ if (!isci_port)
+ return -ENODEV;
+
+ isci_device = isci_remote_device_alloc(isci_host, isci_port);
+ if (!isci_device)
+ return -ENODEV;
+
+ kref_init(&isci_device->kref);
+ INIT_LIST_HEAD(&isci_device->node);
+
+ spin_lock_irq(&isci_host->scic_lock);
+ isci_device->domain_dev = dev;
+ isci_device->isci_port = isci_port;
+ list_add_tail(&isci_device->node, &isci_port->remote_dev_list);
+
+ set_bit(IDEV_START_PENDING, &isci_device->flags);
+ status = isci_remote_device_construct(isci_port, isci_device);
+
+ dev_dbg(&isci_host->pdev->dev,
+ "%s: isci_device = %p\n",
+ __func__, isci_device);
+
+ if (status == SCI_SUCCESS) {
+ /* device came up, advertise it to the world */
+ dev->lldd_dev = isci_device;
+ } else
+ isci_put_device(isci_device);
+ spin_unlock_irq(&isci_host->scic_lock);
+
+ /* wait for the device ready callback. */
+ wait_for_device_start(isci_host, isci_device);
+
+ return status == SCI_SUCCESS ? 0 : -ENODEV;
+}
+
+enum sci_status isci_remote_device_suspend_terminate(
+ struct isci_host *ihost,
+ struct isci_remote_device *idev,
+ struct isci_request *ireq)
+{
+ unsigned long flags;
+ enum sci_status status;
+
+ /* Put the device into suspension. */
+ spin_lock_irqsave(&ihost->scic_lock, flags);
+ set_bit(IDEV_ABORT_PATH_ACTIVE, &idev->flags);
+ sci_remote_device_suspend(idev, SCI_SW_SUSPEND_LINKHANG_DETECT);
+ spin_unlock_irqrestore(&ihost->scic_lock, flags);
+
+ /* Terminate and wait for the completions. */
+ status = isci_remote_device_terminate_requests(ihost, idev, ireq);
+ if (status != SCI_SUCCESS)
+ dev_dbg(&ihost->pdev->dev,
+ "%s: isci_remote_device_terminate_requests(%p) "
+ "returned %d!\n",
+ __func__, idev, status);
+
+ /* NOTE: RNC resumption is left to the caller! */
+ return status;
+}
+
+int isci_remote_device_is_safe_to_abort(
+ struct isci_remote_device *idev)
+{
+ return sci_remote_node_context_is_safe_to_abort(&idev->rnc);
+}
+
+enum sci_status sci_remote_device_abort_requests_pending_abort(
+ struct isci_remote_device *idev)
+{
+ return sci_remote_device_terminate_reqs_checkabort(idev, 1);
+}
+
+enum sci_status isci_remote_device_reset_complete(
+ struct isci_host *ihost,
+ struct isci_remote_device *idev)
+{
+ unsigned long flags;
+ enum sci_status status;
+
+ spin_lock_irqsave(&ihost->scic_lock, flags);
+ status = sci_remote_device_reset_complete(idev);
+ spin_unlock_irqrestore(&ihost->scic_lock, flags);
+
+ return status;
+}
+
+void isci_dev_set_hang_detection_timeout(
+ struct isci_remote_device *idev,
+ u32 timeout)
+{
+ if (dev_is_sata(idev->domain_dev)) {
+ if (timeout) {
+ if (test_and_set_bit(IDEV_RNC_LLHANG_ENABLED,
+ &idev->flags))
+ return; /* Already enabled. */
+ } else if (!test_and_clear_bit(IDEV_RNC_LLHANG_ENABLED,
+ &idev->flags))
+ return; /* Not enabled. */
+
+ sci_port_set_hang_detection_timeout(idev->owning_port,
+ timeout);
+ }
+}
diff --git a/drivers/scsi/isci/remote_device.h b/drivers/scsi/isci/remote_device.h
new file mode 100644
index 000000000..47a013fff
--- /dev/null
+++ b/drivers/scsi/isci/remote_device.h
@@ -0,0 +1,387 @@
+/*
+ * This file is provided under a dual BSD/GPLv2 license. When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ * The full GNU General Public License is included in this distribution
+ * in the file called LICENSE.GPL.
+ *
+ * BSD LICENSE
+ *
+ * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _ISCI_REMOTE_DEVICE_H_
+#define _ISCI_REMOTE_DEVICE_H_
+#include <scsi/libsas.h>
+#include <linux/kref.h>
+#include "scu_remote_node_context.h"
+#include "remote_node_context.h"
+#include "port.h"
+
+enum sci_remote_device_not_ready_reason_code {
+ SCIC_REMOTE_DEVICE_NOT_READY_START_REQUESTED,
+ SCIC_REMOTE_DEVICE_NOT_READY_STOP_REQUESTED,
+ SCIC_REMOTE_DEVICE_NOT_READY_SATA_REQUEST_STARTED,
+ SCIC_REMOTE_DEVICE_NOT_READY_SATA_SDB_ERROR_FIS_RECEIVED,
+ SCIC_REMOTE_DEVICE_NOT_READY_SMP_REQUEST_STARTED,
+ SCIC_REMOTE_DEVICE_NOT_READY_REASON_CODE_MAX
+};
+
+/**
+ * isci_remote_device - isci representation of a sas expander / end point
+ * @device_port_width: hw setting for number of simultaneous connections
+ * @connection_rate: per-taskcontext connection rate for this device
+ * @working_request: SATA requests have no tag we for unaccelerated
+ * protocols we need a method to associate unsolicited
+ * frames with a pending request
+ */
+struct isci_remote_device {
+ #define IDEV_START_PENDING 0
+ #define IDEV_STOP_PENDING 1
+ #define IDEV_ALLOCATED 2
+ #define IDEV_GONE 3
+ #define IDEV_IO_READY 4
+ #define IDEV_IO_NCQERROR 5
+ #define IDEV_RNC_LLHANG_ENABLED 6
+ #define IDEV_ABORT_PATH_ACTIVE 7
+ #define IDEV_ABORT_PATH_RESUME_PENDING 8
+ unsigned long flags;
+ struct kref kref;
+ struct isci_port *isci_port;
+ struct domain_device *domain_dev;
+ struct list_head node;
+ struct sci_base_state_machine sm;
+ u32 device_port_width;
+ enum sas_linkrate connection_rate;
+ struct isci_port *owning_port;
+ struct sci_remote_node_context rnc;
+ /* XXX unify with device reference counting and delete */
+ u32 started_request_count;
+ struct isci_request *working_request;
+ u32 not_ready_reason;
+ scics_sds_remote_node_context_callback abort_resume_cb;
+ void *abort_resume_cbparam;
+};
+
+#define ISCI_REMOTE_DEVICE_START_TIMEOUT 5000
+
+/* device reference routines must be called under sci_lock */
+static inline struct isci_remote_device *isci_get_device(
+ struct isci_remote_device *idev)
+{
+ if (idev)
+ kref_get(&idev->kref);
+ return idev;
+}
+
+static inline struct isci_remote_device *isci_lookup_device(struct domain_device *dev)
+{
+ struct isci_remote_device *idev = dev->lldd_dev;
+
+ if (idev && !test_bit(IDEV_GONE, &idev->flags)) {
+ kref_get(&idev->kref);
+ return idev;
+ }
+
+ return NULL;
+}
+
+void isci_remote_device_release(struct kref *kref);
+static inline void isci_put_device(struct isci_remote_device *idev)
+{
+ if (idev)
+ kref_put(&idev->kref, isci_remote_device_release);
+}
+
+enum sci_status isci_remote_device_stop(struct isci_host *ihost,
+ struct isci_remote_device *idev);
+void isci_remote_device_nuke_requests(struct isci_host *ihost,
+ struct isci_remote_device *idev);
+void isci_remote_device_gone(struct domain_device *domain_dev);
+int isci_remote_device_found(struct domain_device *domain_dev);
+
+/**
+ * sci_remote_device_stop() - This method will stop both transmission and
+ * reception of link activity for the supplied remote device. This method
+ * disables normal IO requests from flowing through to the remote device.
+ * @remote_device: This parameter specifies the device to be stopped.
+ * @timeout: This parameter specifies the number of milliseconds in which the
+ * stop operation should complete.
+ *
+ * An indication of whether the device was successfully stopped. SCI_SUCCESS
+ * This value is returned if the transmission and reception for the device was
+ * successfully stopped.
+ */
+enum sci_status sci_remote_device_stop(
+ struct isci_remote_device *idev,
+ u32 timeout);
+
+/**
+ * sci_remote_device_reset() - This method will reset the device making it
+ * ready for operation. This method must be called anytime the device is
+ * reset either through a SMP phy control or a port hard reset request.
+ * @remote_device: This parameter specifies the device to be reset.
+ *
+ * This method does not actually cause the device hardware to be reset. This
+ * method resets the software object so that it will be operational after a
+ * device hardware reset completes. An indication of whether the device reset
+ * was accepted. SCI_SUCCESS This value is returned if the device reset is
+ * started.
+ */
+enum sci_status sci_remote_device_reset(
+ struct isci_remote_device *idev);
+
+/**
+ * sci_remote_device_reset_complete() - This method informs the device object
+ * that the reset operation is complete and the device can resume operation
+ * again.
+ * @remote_device: This parameter specifies the device which is to be informed
+ * of the reset complete operation.
+ *
+ * An indication that the device is resuming operation. SCI_SUCCESS the device
+ * is resuming operation.
+ */
+enum sci_status sci_remote_device_reset_complete(
+ struct isci_remote_device *idev);
+
+/**
+ * enum sci_remote_device_states - This enumeration depicts all the states
+ * for the common remote device state machine.
+ * @SCI_DEV_INITIAL: Simply the initial state for the base remote device
+ * state machine.
+ *
+ * @SCI_DEV_STOPPED: This state indicates that the remote device has
+ * successfully been stopped. In this state no new IO operations are
+ * permitted. This state is entered from the INITIAL state. This state
+ * is entered from the STOPPING state.
+ *
+ * @SCI_DEV_STARTING: This state indicates the the remote device is in
+ * the process of becoming ready (i.e. starting). In this state no new
+ * IO operations are permitted. This state is entered from the STOPPED
+ * state.
+ *
+ * @SCI_DEV_READY: This state indicates the remote device is now ready.
+ * Thus, the user is able to perform IO operations on the remote device.
+ * This state is entered from the STARTING state.
+ *
+ * @SCI_STP_DEV_IDLE: This is the idle substate for the stp remote
+ * device. When there are no active IO for the device it is is in this
+ * state.
+ *
+ * @SCI_STP_DEV_CMD: This is the command state for for the STP remote
+ * device. This state is entered when the device is processing a
+ * non-NCQ command. The device object will fail any new start IO
+ * requests until this command is complete.
+ *
+ * @SCI_STP_DEV_NCQ: This is the NCQ state for the STP remote device.
+ * This state is entered when the device is processing an NCQ reuqest.
+ * It will remain in this state so long as there is one or more NCQ
+ * requests being processed.
+ *
+ * @SCI_STP_DEV_NCQ_ERROR: This is the NCQ error state for the STP
+ * remote device. This state is entered when an SDB error FIS is
+ * received by the device object while in the NCQ state. The device
+ * object will only accept a READ LOG command while in this state.
+ *
+ * @SCI_STP_DEV_ATAPI_ERROR: This is the ATAPI error state for the STP
+ * ATAPI remote device. This state is entered when ATAPI device sends
+ * error status FIS without data while the device object is in CMD
+ * state. A suspension event is expected in this state. The device
+ * object will resume right away.
+ *
+ * @SCI_STP_DEV_AWAIT_RESET: This is the READY substate indicates the
+ * device is waiting for the RESET task coming to be recovered from
+ * certain hardware specific error.
+ *
+ * @SCI_SMP_DEV_IDLE: This is the ready operational substate for the
+ * remote device. This is the normal operational state for a remote
+ * device.
+ *
+ * @SCI_SMP_DEV_CMD: This is the suspended state for the remote device.
+ * This is the state that the device is placed in when a RNC suspend is
+ * received by the SCU hardware.
+ *
+ * @SCI_DEV_STOPPING: This state indicates that the remote device is in
+ * the process of stopping. In this state no new IO operations are
+ * permitted, but existing IO operations are allowed to complete. This
+ * state is entered from the READY state. This state is entered from
+ * the FAILED state.
+ *
+ * @SCI_DEV_FAILED: This state indicates that the remote device has
+ * failed. In this state no new IO operations are permitted. This
+ * state is entered from the INITIALIZING state. This state is entered
+ * from the READY state.
+ *
+ * @SCI_DEV_RESETTING: This state indicates the device is being reset.
+ * In this state no new IO operations are permitted. This state is
+ * entered from the READY state.
+ *
+ * @SCI_DEV_FINAL: Simply the final state for the base remote device
+ * state machine.
+ */
+#define REMOTE_DEV_STATES {\
+ C(DEV_INITIAL),\
+ C(DEV_STOPPED),\
+ C(DEV_STARTING),\
+ C(DEV_READY),\
+ C(STP_DEV_IDLE),\
+ C(STP_DEV_CMD),\
+ C(STP_DEV_NCQ),\
+ C(STP_DEV_NCQ_ERROR),\
+ C(STP_DEV_ATAPI_ERROR),\
+ C(STP_DEV_AWAIT_RESET),\
+ C(SMP_DEV_IDLE),\
+ C(SMP_DEV_CMD),\
+ C(DEV_STOPPING),\
+ C(DEV_FAILED),\
+ C(DEV_RESETTING),\
+ C(DEV_FINAL),\
+ }
+#undef C
+#define C(a) SCI_##a
+enum sci_remote_device_states REMOTE_DEV_STATES;
+#undef C
+const char *dev_state_name(enum sci_remote_device_states state);
+
+static inline struct isci_remote_device *rnc_to_dev(struct sci_remote_node_context *rnc)
+{
+ struct isci_remote_device *idev;
+
+ idev = container_of(rnc, typeof(*idev), rnc);
+
+ return idev;
+}
+
+static inline bool dev_is_expander(struct domain_device *dev)
+{
+ return dev->dev_type == SAS_EDGE_EXPANDER_DEVICE || dev->dev_type == SAS_FANOUT_EXPANDER_DEVICE;
+}
+
+static inline void sci_remote_device_decrement_request_count(struct isci_remote_device *idev)
+{
+ /* XXX delete this voodoo when converting to the top-level device
+ * reference count
+ */
+ if (WARN_ONCE(idev->started_request_count == 0,
+ "%s: tried to decrement started_request_count past 0!?",
+ __func__))
+ /* pass */;
+ else
+ idev->started_request_count--;
+}
+
+void isci_dev_set_hang_detection_timeout(struct isci_remote_device *idev, u32 timeout);
+
+enum sci_status sci_remote_device_frame_handler(
+ struct isci_remote_device *idev,
+ u32 frame_index);
+
+enum sci_status sci_remote_device_event_handler(
+ struct isci_remote_device *idev,
+ u32 event_code);
+
+enum sci_status sci_remote_device_start_io(
+ struct isci_host *ihost,
+ struct isci_remote_device *idev,
+ struct isci_request *ireq);
+
+enum sci_status sci_remote_device_start_task(
+ struct isci_host *ihost,
+ struct isci_remote_device *idev,
+ struct isci_request *ireq);
+
+enum sci_status sci_remote_device_complete_io(
+ struct isci_host *ihost,
+ struct isci_remote_device *idev,
+ struct isci_request *ireq);
+
+void sci_remote_device_post_request(
+ struct isci_remote_device *idev,
+ u32 request);
+
+enum sci_status sci_remote_device_terminate_requests(
+ struct isci_remote_device *idev);
+
+int isci_remote_device_is_safe_to_abort(
+ struct isci_remote_device *idev);
+
+enum sci_status
+sci_remote_device_abort_requests_pending_abort(
+ struct isci_remote_device *idev);
+
+enum sci_status isci_remote_device_suspend(
+ struct isci_host *ihost,
+ struct isci_remote_device *idev);
+
+enum sci_status sci_remote_device_resume(
+ struct isci_remote_device *idev,
+ scics_sds_remote_node_context_callback cb_fn,
+ void *cb_p);
+
+enum sci_status isci_remote_device_resume_from_abort(
+ struct isci_host *ihost,
+ struct isci_remote_device *idev);
+
+enum sci_status isci_remote_device_reset(
+ struct isci_host *ihost,
+ struct isci_remote_device *idev);
+
+enum sci_status isci_remote_device_reset_complete(
+ struct isci_host *ihost,
+ struct isci_remote_device *idev);
+
+enum sci_status isci_remote_device_suspend_terminate(
+ struct isci_host *ihost,
+ struct isci_remote_device *idev,
+ struct isci_request *ireq);
+
+enum sci_status isci_remote_device_terminate_requests(
+ struct isci_host *ihost,
+ struct isci_remote_device *idev,
+ struct isci_request *ireq);
+enum sci_status sci_remote_device_suspend(struct isci_remote_device *idev,
+ enum sci_remote_node_suspension_reasons reason);
+#endif /* !defined(_ISCI_REMOTE_DEVICE_H_) */
diff --git a/drivers/scsi/isci/remote_node_context.c b/drivers/scsi/isci/remote_node_context.c
new file mode 100644
index 000000000..191010063
--- /dev/null
+++ b/drivers/scsi/isci/remote_node_context.c
@@ -0,0 +1,809 @@
+/*
+ * This file is provided under a dual BSD/GPLv2 license. When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ * The full GNU General Public License is included in this distribution
+ * in the file called LICENSE.GPL.
+ *
+ * BSD LICENSE
+ *
+ * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+#include <scsi/sas_ata.h>
+#include "host.h"
+#include "isci.h"
+#include "remote_device.h"
+#include "remote_node_context.h"
+#include "scu_event_codes.h"
+#include "scu_task_context.h"
+
+#undef C
+#define C(a) (#a)
+const char *rnc_state_name(enum scis_sds_remote_node_context_states state)
+{
+ static const char * const strings[] = RNC_STATES;
+
+ return strings[state];
+}
+#undef C
+
+/**
+ *
+ * @sci_rnc: The state of the remote node context object to check.
+ *
+ * This method will return true if the remote node context is in a READY state
+ * otherwise it will return false bool true if the remote node context is in
+ * the ready state. false if the remote node context is not in the ready state.
+ */
+bool sci_remote_node_context_is_ready(
+ struct sci_remote_node_context *sci_rnc)
+{
+ u32 current_state = sci_rnc->sm.current_state_id;
+
+ if (current_state == SCI_RNC_READY) {
+ return true;
+ }
+
+ return false;
+}
+
+bool sci_remote_node_context_is_suspended(struct sci_remote_node_context *sci_rnc)
+{
+ u32 current_state = sci_rnc->sm.current_state_id;
+
+ if (current_state == SCI_RNC_TX_RX_SUSPENDED)
+ return true;
+ return false;
+}
+
+static union scu_remote_node_context *sci_rnc_by_id(struct isci_host *ihost, u16 id)
+{
+ if (id < ihost->remote_node_entries &&
+ ihost->device_table[id])
+ return &ihost->remote_node_context_table[id];
+
+ return NULL;
+}
+
+static void sci_remote_node_context_construct_buffer(struct sci_remote_node_context *sci_rnc)
+{
+ struct isci_remote_device *idev = rnc_to_dev(sci_rnc);
+ struct domain_device *dev = idev->domain_dev;
+ int rni = sci_rnc->remote_node_index;
+ union scu_remote_node_context *rnc;
+ struct isci_host *ihost;
+ __le64 sas_addr;
+
+ ihost = idev->owning_port->owning_controller;
+ rnc = sci_rnc_by_id(ihost, rni);
+
+ memset(rnc, 0, sizeof(union scu_remote_node_context)
+ * sci_remote_device_node_count(idev));
+
+ rnc->ssp.remote_node_index = rni;
+ rnc->ssp.remote_node_port_width = idev->device_port_width;
+ rnc->ssp.logical_port_index = idev->owning_port->physical_port_index;
+
+ /* sas address is __be64, context ram format is __le64 */
+ sas_addr = cpu_to_le64(SAS_ADDR(dev->sas_addr));
+ rnc->ssp.remote_sas_address_hi = upper_32_bits(sas_addr);
+ rnc->ssp.remote_sas_address_lo = lower_32_bits(sas_addr);
+
+ rnc->ssp.nexus_loss_timer_enable = true;
+ rnc->ssp.check_bit = false;
+ rnc->ssp.is_valid = false;
+ rnc->ssp.is_remote_node_context = true;
+ rnc->ssp.function_number = 0;
+
+ rnc->ssp.arbitration_wait_time = 0;
+
+ if (dev_is_sata(dev)) {
+ rnc->ssp.connection_occupancy_timeout =
+ ihost->user_parameters.stp_max_occupancy_timeout;
+ rnc->ssp.connection_inactivity_timeout =
+ ihost->user_parameters.stp_inactivity_timeout;
+ } else {
+ rnc->ssp.connection_occupancy_timeout =
+ ihost->user_parameters.ssp_max_occupancy_timeout;
+ rnc->ssp.connection_inactivity_timeout =
+ ihost->user_parameters.ssp_inactivity_timeout;
+ }
+
+ rnc->ssp.initial_arbitration_wait_time = 0;
+
+ /* Open Address Frame Parameters */
+ rnc->ssp.oaf_connection_rate = idev->connection_rate;
+ rnc->ssp.oaf_features = 0;
+ rnc->ssp.oaf_source_zone_group = 0;
+ rnc->ssp.oaf_more_compatibility_features = 0;
+}
+/**
+ *
+ * @sci_rnc:
+ * @callback:
+ * @callback_parameter:
+ *
+ * This method will setup the remote node context object so it will transition
+ * to its ready state. If the remote node context is already setup to
+ * transition to its final state then this function does nothing. none
+ */
+static void sci_remote_node_context_setup_to_resume(
+ struct sci_remote_node_context *sci_rnc,
+ scics_sds_remote_node_context_callback callback,
+ void *callback_parameter,
+ enum sci_remote_node_context_destination_state dest_param)
+{
+ if (sci_rnc->destination_state != RNC_DEST_FINAL) {
+ sci_rnc->destination_state = dest_param;
+ if (callback != NULL) {
+ sci_rnc->user_callback = callback;
+ sci_rnc->user_cookie = callback_parameter;
+ }
+ }
+}
+
+static void sci_remote_node_context_setup_to_destroy(
+ struct sci_remote_node_context *sci_rnc,
+ scics_sds_remote_node_context_callback callback,
+ void *callback_parameter)
+{
+ struct isci_host *ihost = idev_to_ihost(rnc_to_dev(sci_rnc));
+
+ sci_rnc->destination_state = RNC_DEST_FINAL;
+ sci_rnc->user_callback = callback;
+ sci_rnc->user_cookie = callback_parameter;
+
+ wake_up(&ihost->eventq);
+}
+
+/**
+ *
+ *
+ * This method just calls the user callback function and then resets the
+ * callback.
+ */
+static void sci_remote_node_context_notify_user(
+ struct sci_remote_node_context *rnc)
+{
+ if (rnc->user_callback != NULL) {
+ (*rnc->user_callback)(rnc->user_cookie);
+
+ rnc->user_callback = NULL;
+ rnc->user_cookie = NULL;
+ }
+}
+
+static void sci_remote_node_context_continue_state_transitions(struct sci_remote_node_context *rnc)
+{
+ switch (rnc->destination_state) {
+ case RNC_DEST_READY:
+ case RNC_DEST_SUSPENDED_RESUME:
+ rnc->destination_state = RNC_DEST_READY;
+ /* Fall through... */
+ case RNC_DEST_FINAL:
+ sci_remote_node_context_resume(rnc, rnc->user_callback,
+ rnc->user_cookie);
+ break;
+ default:
+ rnc->destination_state = RNC_DEST_UNSPECIFIED;
+ break;
+ }
+}
+
+static void sci_remote_node_context_validate_context_buffer(struct sci_remote_node_context *sci_rnc)
+{
+ union scu_remote_node_context *rnc_buffer;
+ struct isci_remote_device *idev = rnc_to_dev(sci_rnc);
+ struct domain_device *dev = idev->domain_dev;
+ struct isci_host *ihost = idev->owning_port->owning_controller;
+
+ rnc_buffer = sci_rnc_by_id(ihost, sci_rnc->remote_node_index);
+
+ rnc_buffer->ssp.is_valid = true;
+
+ if (dev_is_sata(dev) && dev->parent) {
+ sci_remote_device_post_request(idev, SCU_CONTEXT_COMMAND_POST_RNC_96);
+ } else {
+ sci_remote_device_post_request(idev, SCU_CONTEXT_COMMAND_POST_RNC_32);
+
+ if (!dev->parent)
+ sci_port_setup_transports(idev->owning_port,
+ sci_rnc->remote_node_index);
+ }
+}
+
+static void sci_remote_node_context_invalidate_context_buffer(struct sci_remote_node_context *sci_rnc)
+{
+ union scu_remote_node_context *rnc_buffer;
+ struct isci_remote_device *idev = rnc_to_dev(sci_rnc);
+ struct isci_host *ihost = idev->owning_port->owning_controller;
+
+ rnc_buffer = sci_rnc_by_id(ihost, sci_rnc->remote_node_index);
+
+ rnc_buffer->ssp.is_valid = false;
+
+ sci_remote_device_post_request(rnc_to_dev(sci_rnc),
+ SCU_CONTEXT_COMMAND_POST_RNC_INVALIDATE);
+}
+
+static void sci_remote_node_context_initial_state_enter(struct sci_base_state_machine *sm)
+{
+ struct sci_remote_node_context *rnc = container_of(sm, typeof(*rnc), sm);
+ struct isci_remote_device *idev = rnc_to_dev(rnc);
+ struct isci_host *ihost = idev->owning_port->owning_controller;
+
+ /* Check to see if we have gotten back to the initial state because
+ * someone requested to destroy the remote node context object.
+ */
+ if (sm->previous_state_id == SCI_RNC_INVALIDATING) {
+ rnc->destination_state = RNC_DEST_UNSPECIFIED;
+ sci_remote_node_context_notify_user(rnc);
+
+ smp_wmb();
+ wake_up(&ihost->eventq);
+ }
+}
+
+static void sci_remote_node_context_posting_state_enter(struct sci_base_state_machine *sm)
+{
+ struct sci_remote_node_context *sci_rnc = container_of(sm, typeof(*sci_rnc), sm);
+
+ sci_remote_node_context_validate_context_buffer(sci_rnc);
+}
+
+static void sci_remote_node_context_invalidating_state_enter(struct sci_base_state_machine *sm)
+{
+ struct sci_remote_node_context *rnc = container_of(sm, typeof(*rnc), sm);
+
+ /* Terminate all outstanding requests. */
+ sci_remote_device_terminate_requests(rnc_to_dev(rnc));
+ sci_remote_node_context_invalidate_context_buffer(rnc);
+}
+
+static void sci_remote_node_context_resuming_state_enter(struct sci_base_state_machine *sm)
+{
+ struct sci_remote_node_context *rnc = container_of(sm, typeof(*rnc), sm);
+ struct isci_remote_device *idev;
+ struct domain_device *dev;
+
+ idev = rnc_to_dev(rnc);
+ dev = idev->domain_dev;
+
+ /*
+ * For direct attached SATA devices we need to clear the TLCR
+ * NCQ to TCi tag mapping on the phy and in cases where we
+ * resume because of a target reset we also need to update
+ * the STPTLDARNI register with the RNi of the device
+ */
+ if (dev_is_sata(dev) && !dev->parent)
+ sci_port_setup_transports(idev->owning_port, rnc->remote_node_index);
+
+ sci_remote_device_post_request(idev, SCU_CONTEXT_COMMAND_POST_RNC_RESUME);
+}
+
+static void sci_remote_node_context_ready_state_enter(struct sci_base_state_machine *sm)
+{
+ struct sci_remote_node_context *rnc = container_of(sm, typeof(*rnc), sm);
+ enum sci_remote_node_context_destination_state dest_select;
+ int tell_user = 1;
+
+ dest_select = rnc->destination_state;
+ rnc->destination_state = RNC_DEST_UNSPECIFIED;
+
+ if ((dest_select == RNC_DEST_SUSPENDED) ||
+ (dest_select == RNC_DEST_SUSPENDED_RESUME)) {
+ sci_remote_node_context_suspend(
+ rnc, rnc->suspend_reason,
+ SCI_SOFTWARE_SUSPEND_EXPECTED_EVENT);
+
+ if (dest_select == RNC_DEST_SUSPENDED_RESUME)
+ tell_user = 0; /* Wait until ready again. */
+ }
+ if (tell_user)
+ sci_remote_node_context_notify_user(rnc);
+}
+
+static void sci_remote_node_context_tx_suspended_state_enter(struct sci_base_state_machine *sm)
+{
+ struct sci_remote_node_context *rnc = container_of(sm, typeof(*rnc), sm);
+
+ sci_remote_node_context_continue_state_transitions(rnc);
+}
+
+static void sci_remote_node_context_tx_rx_suspended_state_enter(struct sci_base_state_machine *sm)
+{
+ struct sci_remote_node_context *rnc = container_of(sm, typeof(*rnc), sm);
+ struct isci_remote_device *idev = rnc_to_dev(rnc);
+ struct isci_host *ihost = idev->owning_port->owning_controller;
+ u32 new_count = rnc->suspend_count + 1;
+
+ if (new_count == 0)
+ rnc->suspend_count = 1;
+ else
+ rnc->suspend_count = new_count;
+ smp_wmb();
+
+ /* Terminate outstanding requests pending abort. */
+ sci_remote_device_abort_requests_pending_abort(idev);
+
+ wake_up(&ihost->eventq);
+ sci_remote_node_context_continue_state_transitions(rnc);
+}
+
+static void sci_remote_node_context_await_suspend_state_exit(
+ struct sci_base_state_machine *sm)
+{
+ struct sci_remote_node_context *rnc
+ = container_of(sm, typeof(*rnc), sm);
+ struct isci_remote_device *idev = rnc_to_dev(rnc);
+
+ if (dev_is_sata(idev->domain_dev))
+ isci_dev_set_hang_detection_timeout(idev, 0);
+}
+
+static const struct sci_base_state sci_remote_node_context_state_table[] = {
+ [SCI_RNC_INITIAL] = {
+ .enter_state = sci_remote_node_context_initial_state_enter,
+ },
+ [SCI_RNC_POSTING] = {
+ .enter_state = sci_remote_node_context_posting_state_enter,
+ },
+ [SCI_RNC_INVALIDATING] = {
+ .enter_state = sci_remote_node_context_invalidating_state_enter,
+ },
+ [SCI_RNC_RESUMING] = {
+ .enter_state = sci_remote_node_context_resuming_state_enter,
+ },
+ [SCI_RNC_READY] = {
+ .enter_state = sci_remote_node_context_ready_state_enter,
+ },
+ [SCI_RNC_TX_SUSPENDED] = {
+ .enter_state = sci_remote_node_context_tx_suspended_state_enter,
+ },
+ [SCI_RNC_TX_RX_SUSPENDED] = {
+ .enter_state = sci_remote_node_context_tx_rx_suspended_state_enter,
+ },
+ [SCI_RNC_AWAIT_SUSPENSION] = {
+ .exit_state = sci_remote_node_context_await_suspend_state_exit,
+ },
+};
+
+void sci_remote_node_context_construct(struct sci_remote_node_context *rnc,
+ u16 remote_node_index)
+{
+ memset(rnc, 0, sizeof(struct sci_remote_node_context));
+
+ rnc->remote_node_index = remote_node_index;
+ rnc->destination_state = RNC_DEST_UNSPECIFIED;
+
+ sci_init_sm(&rnc->sm, sci_remote_node_context_state_table, SCI_RNC_INITIAL);
+}
+
+enum sci_status sci_remote_node_context_event_handler(struct sci_remote_node_context *sci_rnc,
+ u32 event_code)
+{
+ enum scis_sds_remote_node_context_states state;
+ u32 next_state;
+
+ state = sci_rnc->sm.current_state_id;
+ switch (state) {
+ case SCI_RNC_POSTING:
+ switch (scu_get_event_code(event_code)) {
+ case SCU_EVENT_POST_RNC_COMPLETE:
+ sci_change_state(&sci_rnc->sm, SCI_RNC_READY);
+ break;
+ default:
+ goto out;
+ }
+ break;
+ case SCI_RNC_INVALIDATING:
+ if (scu_get_event_code(event_code) == SCU_EVENT_POST_RNC_INVALIDATE_COMPLETE) {
+ if (sci_rnc->destination_state == RNC_DEST_FINAL)
+ next_state = SCI_RNC_INITIAL;
+ else
+ next_state = SCI_RNC_POSTING;
+ sci_change_state(&sci_rnc->sm, next_state);
+ } else {
+ switch (scu_get_event_type(event_code)) {
+ case SCU_EVENT_TYPE_RNC_SUSPEND_TX:
+ case SCU_EVENT_TYPE_RNC_SUSPEND_TX_RX:
+ /* We really dont care if the hardware is going to suspend
+ * the device since it's being invalidated anyway */
+ dev_warn(scirdev_to_dev(rnc_to_dev(sci_rnc)),
+ "%s: SCIC Remote Node Context 0x%p was "
+ "suspeneded by hardware while being "
+ "invalidated.\n", __func__, sci_rnc);
+ break;
+ default:
+ goto out;
+ }
+ }
+ break;
+ case SCI_RNC_RESUMING:
+ if (scu_get_event_code(event_code) == SCU_EVENT_POST_RCN_RELEASE) {
+ sci_change_state(&sci_rnc->sm, SCI_RNC_READY);
+ } else {
+ switch (scu_get_event_type(event_code)) {
+ case SCU_EVENT_TYPE_RNC_SUSPEND_TX:
+ case SCU_EVENT_TYPE_RNC_SUSPEND_TX_RX:
+ /* We really dont care if the hardware is going to suspend
+ * the device since it's being resumed anyway */
+ dev_warn(scirdev_to_dev(rnc_to_dev(sci_rnc)),
+ "%s: SCIC Remote Node Context 0x%p was "
+ "suspeneded by hardware while being resumed.\n",
+ __func__, sci_rnc);
+ break;
+ default:
+ goto out;
+ }
+ }
+ break;
+ case SCI_RNC_READY:
+ switch (scu_get_event_type(event_code)) {
+ case SCU_EVENT_TL_RNC_SUSPEND_TX:
+ sci_change_state(&sci_rnc->sm, SCI_RNC_TX_SUSPENDED);
+ sci_rnc->suspend_type = scu_get_event_type(event_code);
+ break;
+ case SCU_EVENT_TL_RNC_SUSPEND_TX_RX:
+ sci_change_state(&sci_rnc->sm, SCI_RNC_TX_RX_SUSPENDED);
+ sci_rnc->suspend_type = scu_get_event_type(event_code);
+ break;
+ default:
+ goto out;
+ }
+ break;
+ case SCI_RNC_AWAIT_SUSPENSION:
+ switch (scu_get_event_type(event_code)) {
+ case SCU_EVENT_TL_RNC_SUSPEND_TX:
+ next_state = SCI_RNC_TX_SUSPENDED;
+ break;
+ case SCU_EVENT_TL_RNC_SUSPEND_TX_RX:
+ next_state = SCI_RNC_TX_RX_SUSPENDED;
+ break;
+ default:
+ goto out;
+ }
+ if (sci_rnc->suspend_type == scu_get_event_type(event_code))
+ sci_change_state(&sci_rnc->sm, next_state);
+ break;
+ default:
+ dev_warn(scirdev_to_dev(rnc_to_dev(sci_rnc)),
+ "%s: invalid state: %s\n", __func__,
+ rnc_state_name(state));
+ return SCI_FAILURE_INVALID_STATE;
+ }
+ return SCI_SUCCESS;
+
+ out:
+ dev_warn(scirdev_to_dev(rnc_to_dev(sci_rnc)),
+ "%s: code: %#x state: %s\n", __func__, event_code,
+ rnc_state_name(state));
+ return SCI_FAILURE;
+
+}
+
+enum sci_status sci_remote_node_context_destruct(struct sci_remote_node_context *sci_rnc,
+ scics_sds_remote_node_context_callback cb_fn,
+ void *cb_p)
+{
+ enum scis_sds_remote_node_context_states state;
+
+ state = sci_rnc->sm.current_state_id;
+ switch (state) {
+ case SCI_RNC_INVALIDATING:
+ sci_remote_node_context_setup_to_destroy(sci_rnc, cb_fn, cb_p);
+ return SCI_SUCCESS;
+ case SCI_RNC_POSTING:
+ case SCI_RNC_RESUMING:
+ case SCI_RNC_READY:
+ case SCI_RNC_TX_SUSPENDED:
+ case SCI_RNC_TX_RX_SUSPENDED:
+ sci_remote_node_context_setup_to_destroy(sci_rnc, cb_fn, cb_p);
+ sci_change_state(&sci_rnc->sm, SCI_RNC_INVALIDATING);
+ return SCI_SUCCESS;
+ case SCI_RNC_AWAIT_SUSPENSION:
+ sci_remote_node_context_setup_to_destroy(sci_rnc, cb_fn, cb_p);
+ return SCI_SUCCESS;
+ case SCI_RNC_INITIAL:
+ dev_warn(scirdev_to_dev(rnc_to_dev(sci_rnc)),
+ "%s: invalid state: %s\n", __func__,
+ rnc_state_name(state));
+ /* We have decided that the destruct request on the remote node context
+ * can not fail since it is either in the initial/destroyed state or is
+ * can be destroyed.
+ */
+ return SCI_SUCCESS;
+ default:
+ dev_warn(scirdev_to_dev(rnc_to_dev(sci_rnc)),
+ "%s: invalid state %s\n", __func__,
+ rnc_state_name(state));
+ return SCI_FAILURE_INVALID_STATE;
+ }
+}
+
+enum sci_status sci_remote_node_context_suspend(
+ struct sci_remote_node_context *sci_rnc,
+ enum sci_remote_node_suspension_reasons suspend_reason,
+ u32 suspend_type)
+{
+ enum scis_sds_remote_node_context_states state
+ = sci_rnc->sm.current_state_id;
+ struct isci_remote_device *idev = rnc_to_dev(sci_rnc);
+ enum sci_status status = SCI_FAILURE_INVALID_STATE;
+ enum sci_remote_node_context_destination_state dest_param =
+ RNC_DEST_UNSPECIFIED;
+
+ dev_dbg(scirdev_to_dev(idev),
+ "%s: current state %s, current suspend_type %x dest state %d,"
+ " arg suspend_reason %d, arg suspend_type %x",
+ __func__, rnc_state_name(state), sci_rnc->suspend_type,
+ sci_rnc->destination_state, suspend_reason,
+ suspend_type);
+
+ /* Disable automatic state continuations if explicitly suspending. */
+ if ((suspend_reason == SCI_HW_SUSPEND) ||
+ (sci_rnc->destination_state == RNC_DEST_FINAL))
+ dest_param = sci_rnc->destination_state;
+
+ switch (state) {
+ case SCI_RNC_READY:
+ break;
+ case SCI_RNC_INVALIDATING:
+ if (sci_rnc->destination_state == RNC_DEST_FINAL) {
+ dev_warn(scirdev_to_dev(idev),
+ "%s: already destroying %p\n",
+ __func__, sci_rnc);
+ return SCI_FAILURE_INVALID_STATE;
+ }
+ /* Fall through and handle like SCI_RNC_POSTING */
+ case SCI_RNC_RESUMING:
+ /* Fall through and handle like SCI_RNC_POSTING */
+ case SCI_RNC_POSTING:
+ /* Set the destination state to AWAIT - this signals the
+ * entry into the SCI_RNC_READY state that a suspension
+ * needs to be done immediately.
+ */
+ if (sci_rnc->destination_state != RNC_DEST_FINAL)
+ sci_rnc->destination_state = RNC_DEST_SUSPENDED;
+ sci_rnc->suspend_type = suspend_type;
+ sci_rnc->suspend_reason = suspend_reason;
+ return SCI_SUCCESS;
+
+ case SCI_RNC_TX_SUSPENDED:
+ if (suspend_type == SCU_EVENT_TL_RNC_SUSPEND_TX)
+ status = SCI_SUCCESS;
+ break;
+ case SCI_RNC_TX_RX_SUSPENDED:
+ if (suspend_type == SCU_EVENT_TL_RNC_SUSPEND_TX_RX)
+ status = SCI_SUCCESS;
+ break;
+ case SCI_RNC_AWAIT_SUSPENSION:
+ if ((sci_rnc->suspend_type == SCU_EVENT_TL_RNC_SUSPEND_TX_RX)
+ || (suspend_type == sci_rnc->suspend_type))
+ return SCI_SUCCESS;
+ break;
+ default:
+ dev_warn(scirdev_to_dev(rnc_to_dev(sci_rnc)),
+ "%s: invalid state %s\n", __func__,
+ rnc_state_name(state));
+ return SCI_FAILURE_INVALID_STATE;
+ }
+ sci_rnc->destination_state = dest_param;
+ sci_rnc->suspend_type = suspend_type;
+ sci_rnc->suspend_reason = suspend_reason;
+
+ if (status == SCI_SUCCESS) { /* Already in the destination state? */
+ struct isci_host *ihost = idev->owning_port->owning_controller;
+
+ wake_up_all(&ihost->eventq); /* Let observers look. */
+ return SCI_SUCCESS;
+ }
+ if ((suspend_reason == SCI_SW_SUSPEND_NORMAL) ||
+ (suspend_reason == SCI_SW_SUSPEND_LINKHANG_DETECT)) {
+
+ if (suspend_reason == SCI_SW_SUSPEND_LINKHANG_DETECT)
+ isci_dev_set_hang_detection_timeout(idev, 0x00000001);
+
+ sci_remote_device_post_request(
+ idev, SCI_SOFTWARE_SUSPEND_CMD);
+ }
+ if (state != SCI_RNC_AWAIT_SUSPENSION)
+ sci_change_state(&sci_rnc->sm, SCI_RNC_AWAIT_SUSPENSION);
+
+ return SCI_SUCCESS;
+}
+
+enum sci_status sci_remote_node_context_resume(struct sci_remote_node_context *sci_rnc,
+ scics_sds_remote_node_context_callback cb_fn,
+ void *cb_p)
+{
+ enum scis_sds_remote_node_context_states state;
+ struct isci_remote_device *idev = rnc_to_dev(sci_rnc);
+
+ state = sci_rnc->sm.current_state_id;
+ dev_dbg(scirdev_to_dev(idev),
+ "%s: state %s, cb_fn = %p, cb_p = %p; dest_state = %d; "
+ "dev resume path %s\n",
+ __func__, rnc_state_name(state), cb_fn, cb_p,
+ sci_rnc->destination_state,
+ test_bit(IDEV_ABORT_PATH_ACTIVE, &idev->flags)
+ ? "<abort active>" : "<normal>");
+
+ switch (state) {
+ case SCI_RNC_INITIAL:
+ if (sci_rnc->remote_node_index == SCIC_SDS_REMOTE_NODE_CONTEXT_INVALID_INDEX)
+ return SCI_FAILURE_INVALID_STATE;
+
+ sci_remote_node_context_setup_to_resume(sci_rnc, cb_fn, cb_p,
+ RNC_DEST_READY);
+ if (!test_bit(IDEV_ABORT_PATH_ACTIVE, &idev->flags)) {
+ sci_remote_node_context_construct_buffer(sci_rnc);
+ sci_change_state(&sci_rnc->sm, SCI_RNC_POSTING);
+ }
+ return SCI_SUCCESS;
+
+ case SCI_RNC_POSTING:
+ case SCI_RNC_INVALIDATING:
+ case SCI_RNC_RESUMING:
+ /* We are still waiting to post when a resume was
+ * requested.
+ */
+ switch (sci_rnc->destination_state) {
+ case RNC_DEST_SUSPENDED:
+ case RNC_DEST_SUSPENDED_RESUME:
+ /* Previously waiting to suspend after posting.
+ * Now continue onto resumption.
+ */
+ sci_remote_node_context_setup_to_resume(
+ sci_rnc, cb_fn, cb_p,
+ RNC_DEST_SUSPENDED_RESUME);
+ break;
+ default:
+ sci_remote_node_context_setup_to_resume(
+ sci_rnc, cb_fn, cb_p,
+ RNC_DEST_READY);
+ break;
+ }
+ return SCI_SUCCESS;
+
+ case SCI_RNC_TX_SUSPENDED:
+ case SCI_RNC_TX_RX_SUSPENDED:
+ {
+ struct domain_device *dev = idev->domain_dev;
+ /* If this is an expander attached SATA device we must
+ * invalidate and repost the RNC since this is the only
+ * way to clear the TCi to NCQ tag mapping table for
+ * the RNi. All other device types we can just resume.
+ */
+ sci_remote_node_context_setup_to_resume(
+ sci_rnc, cb_fn, cb_p, RNC_DEST_READY);
+
+ if (!test_bit(IDEV_ABORT_PATH_ACTIVE, &idev->flags)) {
+ if ((dev_is_sata(dev) && dev->parent) ||
+ (sci_rnc->destination_state == RNC_DEST_FINAL))
+ sci_change_state(&sci_rnc->sm,
+ SCI_RNC_INVALIDATING);
+ else
+ sci_change_state(&sci_rnc->sm,
+ SCI_RNC_RESUMING);
+ }
+ }
+ return SCI_SUCCESS;
+
+ case SCI_RNC_AWAIT_SUSPENSION:
+ sci_remote_node_context_setup_to_resume(
+ sci_rnc, cb_fn, cb_p, RNC_DEST_SUSPENDED_RESUME);
+ return SCI_SUCCESS;
+ default:
+ dev_warn(scirdev_to_dev(rnc_to_dev(sci_rnc)),
+ "%s: invalid state %s\n", __func__,
+ rnc_state_name(state));
+ return SCI_FAILURE_INVALID_STATE;
+ }
+}
+
+enum sci_status sci_remote_node_context_start_io(struct sci_remote_node_context *sci_rnc,
+ struct isci_request *ireq)
+{
+ enum scis_sds_remote_node_context_states state;
+
+ state = sci_rnc->sm.current_state_id;
+
+ switch (state) {
+ case SCI_RNC_READY:
+ return SCI_SUCCESS;
+ case SCI_RNC_TX_SUSPENDED:
+ case SCI_RNC_TX_RX_SUSPENDED:
+ case SCI_RNC_AWAIT_SUSPENSION:
+ dev_warn(scirdev_to_dev(rnc_to_dev(sci_rnc)),
+ "%s: invalid state %s\n", __func__,
+ rnc_state_name(state));
+ return SCI_FAILURE_REMOTE_DEVICE_RESET_REQUIRED;
+ default:
+ dev_dbg(scirdev_to_dev(rnc_to_dev(sci_rnc)),
+ "%s: invalid state %s\n", __func__,
+ rnc_state_name(state));
+ return SCI_FAILURE_INVALID_STATE;
+ }
+}
+
+enum sci_status sci_remote_node_context_start_task(
+ struct sci_remote_node_context *sci_rnc,
+ struct isci_request *ireq,
+ scics_sds_remote_node_context_callback cb_fn,
+ void *cb_p)
+{
+ enum sci_status status = sci_remote_node_context_resume(sci_rnc,
+ cb_fn, cb_p);
+ if (status != SCI_SUCCESS)
+ dev_warn(scirdev_to_dev(rnc_to_dev(sci_rnc)),
+ "%s: resume failed: %d\n", __func__, status);
+ return status;
+}
+
+int sci_remote_node_context_is_safe_to_abort(
+ struct sci_remote_node_context *sci_rnc)
+{
+ enum scis_sds_remote_node_context_states state;
+
+ state = sci_rnc->sm.current_state_id;
+ switch (state) {
+ case SCI_RNC_INVALIDATING:
+ case SCI_RNC_TX_RX_SUSPENDED:
+ return 1;
+ case SCI_RNC_POSTING:
+ case SCI_RNC_RESUMING:
+ case SCI_RNC_READY:
+ case SCI_RNC_TX_SUSPENDED:
+ case SCI_RNC_AWAIT_SUSPENSION:
+ case SCI_RNC_INITIAL:
+ return 0;
+ default:
+ dev_warn(scirdev_to_dev(rnc_to_dev(sci_rnc)),
+ "%s: invalid state %d\n", __func__, state);
+ return 0;
+ }
+}
diff --git a/drivers/scsi/isci/remote_node_context.h b/drivers/scsi/isci/remote_node_context.h
new file mode 100644
index 000000000..c7ee81d01
--- /dev/null
+++ b/drivers/scsi/isci/remote_node_context.h
@@ -0,0 +1,236 @@
+/*
+ * This file is provided under a dual BSD/GPLv2 license. When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ * The full GNU General Public License is included in this distribution
+ * in the file called LICENSE.GPL.
+ *
+ * BSD LICENSE
+ *
+ * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _SCIC_SDS_REMOTE_NODE_CONTEXT_H_
+#define _SCIC_SDS_REMOTE_NODE_CONTEXT_H_
+
+/**
+ * This file contains the structures, constants, and prototypes associated with
+ * the remote node context in the silicon. It exists to model and manage
+ * the remote node context in the silicon.
+ *
+ *
+ */
+
+#include "isci.h"
+
+/**
+ *
+ *
+ * This constant represents an invalid remote device id, it is used to program
+ * the STPDARNI register so the driver knows when it has received a SIGNATURE
+ * FIS from the SCU.
+ */
+#define SCIC_SDS_REMOTE_NODE_CONTEXT_INVALID_INDEX 0x0FFF
+
+enum sci_remote_node_suspension_reasons {
+ SCI_HW_SUSPEND,
+ SCI_SW_SUSPEND_NORMAL,
+ SCI_SW_SUSPEND_LINKHANG_DETECT
+};
+#define SCI_SOFTWARE_SUSPEND_CMD SCU_CONTEXT_COMMAND_POST_RNC_SUSPEND_TX_RX
+#define SCI_SOFTWARE_SUSPEND_EXPECTED_EVENT SCU_EVENT_TL_RNC_SUSPEND_TX_RX
+
+struct isci_request;
+struct isci_remote_device;
+struct sci_remote_node_context;
+
+typedef void (*scics_sds_remote_node_context_callback)(void *);
+
+/**
+ * enum sci_remote_node_context_states
+ * @SCI_RNC_INITIAL initial state for a remote node context. On a resume
+ * request the remote node context will transition to the posting state.
+ *
+ * @SCI_RNC_POSTING: transition state that posts the RNi to the hardware. Once
+ * the RNC is posted the remote node context will be made ready.
+ *
+ * @SCI_RNC_INVALIDATING: transition state that will post an RNC invalidate to
+ * the hardware. Once the invalidate is complete the remote node context will
+ * transition to the posting state.
+ *
+ * @SCI_RNC_RESUMING: transition state that will post an RNC resume to the
+ * hardare. Once the event notification of resume complete is received the
+ * remote node context will transition to the ready state.
+ *
+ * @SCI_RNC_READY: state that the remote node context must be in to accept io
+ * request operations.
+ *
+ * @SCI_RNC_TX_SUSPENDED: state that the remote node context transitions to when
+ * it gets a TX suspend notification from the hardware.
+ *
+ * @SCI_RNC_TX_RX_SUSPENDED: state that the remote node context transitions to
+ * when it gets a TX RX suspend notification from the hardware.
+ *
+ * @SCI_RNC_AWAIT_SUSPENSION: wait state for the remote node context that waits
+ * for a suspend notification from the hardware. This state is entered when
+ * either there is a request to supend the remote node context or when there is
+ * a TC completion where the remote node will be suspended by the hardware.
+ */
+#define RNC_STATES {\
+ C(RNC_INITIAL),\
+ C(RNC_POSTING),\
+ C(RNC_INVALIDATING),\
+ C(RNC_RESUMING),\
+ C(RNC_READY),\
+ C(RNC_TX_SUSPENDED),\
+ C(RNC_TX_RX_SUSPENDED),\
+ C(RNC_AWAIT_SUSPENSION),\
+ }
+#undef C
+#define C(a) SCI_##a
+enum scis_sds_remote_node_context_states RNC_STATES;
+#undef C
+const char *rnc_state_name(enum scis_sds_remote_node_context_states state);
+
+/**
+ *
+ *
+ * This enumeration is used to define the end destination state for the remote
+ * node context.
+ */
+enum sci_remote_node_context_destination_state {
+ RNC_DEST_UNSPECIFIED,
+ RNC_DEST_READY,
+ RNC_DEST_FINAL,
+ RNC_DEST_SUSPENDED, /* Set when suspend during post/invalidate */
+ RNC_DEST_SUSPENDED_RESUME /* Set when a resume was done during posting
+ * or invalidating and already suspending.
+ */
+};
+
+/**
+ * struct sci_remote_node_context - This structure contains the data
+ * associated with the remote node context object. The remote node context
+ * (RNC) object models the the remote device information necessary to manage
+ * the silicon RNC.
+ */
+struct sci_remote_node_context {
+ /**
+ * This field indicates the remote node index (RNI) associated with
+ * this RNC.
+ */
+ u16 remote_node_index;
+
+ /**
+ * This field is the recored suspension type of the remote node
+ * context suspension.
+ */
+ u32 suspend_type;
+ enum sci_remote_node_suspension_reasons suspend_reason;
+ u32 suspend_count;
+
+ /**
+ * This field is true if the remote node context is resuming from its current
+ * state. This can cause an automatic resume on receiving a suspension
+ * notification.
+ */
+ enum sci_remote_node_context_destination_state destination_state;
+
+ /**
+ * This field contains the callback function that the user requested to be
+ * called when the requested state transition is complete.
+ */
+ scics_sds_remote_node_context_callback user_callback;
+
+ /**
+ * This field contains the parameter that is called when the user requested
+ * state transition is completed.
+ */
+ void *user_cookie;
+
+ /**
+ * This field contains the data for the object's state machine.
+ */
+ struct sci_base_state_machine sm;
+};
+
+void sci_remote_node_context_construct(struct sci_remote_node_context *rnc,
+ u16 remote_node_index);
+
+
+bool sci_remote_node_context_is_ready(
+ struct sci_remote_node_context *sci_rnc);
+
+bool sci_remote_node_context_is_suspended(struct sci_remote_node_context *sci_rnc);
+
+enum sci_status sci_remote_node_context_event_handler(struct sci_remote_node_context *sci_rnc,
+ u32 event_code);
+enum sci_status sci_remote_node_context_destruct(struct sci_remote_node_context *sci_rnc,
+ scics_sds_remote_node_context_callback callback,
+ void *callback_parameter);
+enum sci_status sci_remote_node_context_suspend(struct sci_remote_node_context *sci_rnc,
+ enum sci_remote_node_suspension_reasons reason,
+ u32 suspension_code);
+enum sci_status sci_remote_node_context_resume(struct sci_remote_node_context *sci_rnc,
+ scics_sds_remote_node_context_callback cb_fn,
+ void *cb_p);
+enum sci_status sci_remote_node_context_start_task(struct sci_remote_node_context *sci_rnc,
+ struct isci_request *ireq,
+ scics_sds_remote_node_context_callback cb_fn,
+ void *cb_p);
+enum sci_status sci_remote_node_context_start_io(struct sci_remote_node_context *sci_rnc,
+ struct isci_request *ireq);
+int sci_remote_node_context_is_safe_to_abort(
+ struct sci_remote_node_context *sci_rnc);
+
+static inline bool sci_remote_node_context_is_being_destroyed(
+ struct sci_remote_node_context *sci_rnc)
+{
+ return (sci_rnc->destination_state == RNC_DEST_FINAL)
+ || ((sci_rnc->sm.current_state_id == SCI_RNC_INITIAL)
+ && (sci_rnc->destination_state == RNC_DEST_UNSPECIFIED));
+}
+#endif /* _SCIC_SDS_REMOTE_NODE_CONTEXT_H_ */
diff --git a/drivers/scsi/isci/remote_node_table.c b/drivers/scsi/isci/remote_node_table.c
new file mode 100644
index 000000000..301b31419
--- /dev/null
+++ b/drivers/scsi/isci/remote_node_table.c
@@ -0,0 +1,598 @@
+/*
+ * This file is provided under a dual BSD/GPLv2 license. When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ * The full GNU General Public License is included in this distribution
+ * in the file called LICENSE.GPL.
+ *
+ * BSD LICENSE
+ *
+ * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/**
+ * This file contains the implementation of the SCIC_SDS_REMOTE_NODE_TABLE
+ * public, protected, and private methods.
+ *
+ *
+ */
+#include "remote_node_table.h"
+#include "remote_node_context.h"
+
+/**
+ *
+ * @remote_node_table: This is the remote node index table from which the
+ * selection will be made.
+ * @group_table_index: This is the index to the group table from which to
+ * search for an available selection.
+ *
+ * This routine will find the bit position in absolute bit terms of the next 32
+ * + bit position. If there are available bits in the first u32 then it is
+ * just bit position. u32 This is the absolute bit position for an available
+ * group.
+ */
+static u32 sci_remote_node_table_get_group_index(
+ struct sci_remote_node_table *remote_node_table,
+ u32 group_table_index)
+{
+ u32 dword_index;
+ u32 *group_table;
+ u32 bit_index;
+
+ group_table = remote_node_table->remote_node_groups[group_table_index];
+
+ for (dword_index = 0; dword_index < remote_node_table->group_array_size; dword_index++) {
+ if (group_table[dword_index] != 0) {
+ for (bit_index = 0; bit_index < 32; bit_index++) {
+ if ((group_table[dword_index] & (1 << bit_index)) != 0) {
+ return (dword_index * 32) + bit_index;
+ }
+ }
+ }
+ }
+
+ return SCIC_SDS_REMOTE_NODE_TABLE_INVALID_INDEX;
+}
+
+/**
+ *
+ * @out]: remote_node_table This the remote node table in which to clear the
+ * selector.
+ * @set_index: This is the remote node selector in which the change will be
+ * made.
+ * @group_index: This is the bit index in the table to be modified.
+ *
+ * This method will clear the group index entry in the specified group index
+ * table. none
+ */
+static void sci_remote_node_table_clear_group_index(
+ struct sci_remote_node_table *remote_node_table,
+ u32 group_table_index,
+ u32 group_index)
+{
+ u32 dword_index;
+ u32 bit_index;
+ u32 *group_table;
+
+ BUG_ON(group_table_index >= SCU_STP_REMOTE_NODE_COUNT);
+ BUG_ON(group_index >= (u32)(remote_node_table->group_array_size * 32));
+
+ dword_index = group_index / 32;
+ bit_index = group_index % 32;
+ group_table = remote_node_table->remote_node_groups[group_table_index];
+
+ group_table[dword_index] = group_table[dword_index] & ~(1 << bit_index);
+}
+
+/**
+ *
+ * @out]: remote_node_table This the remote node table in which to set the
+ * selector.
+ * @group_table_index: This is the remote node selector in which the change
+ * will be made.
+ * @group_index: This is the bit position in the table to be modified.
+ *
+ * This method will set the group index bit entry in the specified gropu index
+ * table. none
+ */
+static void sci_remote_node_table_set_group_index(
+ struct sci_remote_node_table *remote_node_table,
+ u32 group_table_index,
+ u32 group_index)
+{
+ u32 dword_index;
+ u32 bit_index;
+ u32 *group_table;
+
+ BUG_ON(group_table_index >= SCU_STP_REMOTE_NODE_COUNT);
+ BUG_ON(group_index >= (u32)(remote_node_table->group_array_size * 32));
+
+ dword_index = group_index / 32;
+ bit_index = group_index % 32;
+ group_table = remote_node_table->remote_node_groups[group_table_index];
+
+ group_table[dword_index] = group_table[dword_index] | (1 << bit_index);
+}
+
+/**
+ *
+ * @out]: remote_node_table This is the remote node table in which to modify
+ * the remote node availability.
+ * @remote_node_index: This is the remote node index that is being returned to
+ * the table.
+ *
+ * This method will set the remote to available in the remote node allocation
+ * table. none
+ */
+static void sci_remote_node_table_set_node_index(
+ struct sci_remote_node_table *remote_node_table,
+ u32 remote_node_index)
+{
+ u32 dword_location;
+ u32 dword_remainder;
+ u32 slot_normalized;
+ u32 slot_position;
+
+ BUG_ON(
+ (remote_node_table->available_nodes_array_size * SCIC_SDS_REMOTE_NODE_SETS_PER_DWORD)
+ <= (remote_node_index / SCU_STP_REMOTE_NODE_COUNT)
+ );
+
+ dword_location = remote_node_index / SCIC_SDS_REMOTE_NODES_PER_DWORD;
+ dword_remainder = remote_node_index % SCIC_SDS_REMOTE_NODES_PER_DWORD;
+ slot_normalized = (dword_remainder / SCU_STP_REMOTE_NODE_COUNT) * sizeof(u32);
+ slot_position = remote_node_index % SCU_STP_REMOTE_NODE_COUNT;
+
+ remote_node_table->available_remote_nodes[dword_location] |=
+ 1 << (slot_normalized + slot_position);
+}
+
+/**
+ *
+ * @out]: remote_node_table This is the remote node table from which to clear
+ * the available remote node bit.
+ * @remote_node_index: This is the remote node index which is to be cleared
+ * from the table.
+ *
+ * This method clears the remote node index from the table of available remote
+ * nodes. none
+ */
+static void sci_remote_node_table_clear_node_index(
+ struct sci_remote_node_table *remote_node_table,
+ u32 remote_node_index)
+{
+ u32 dword_location;
+ u32 dword_remainder;
+ u32 slot_position;
+ u32 slot_normalized;
+
+ BUG_ON(
+ (remote_node_table->available_nodes_array_size * SCIC_SDS_REMOTE_NODE_SETS_PER_DWORD)
+ <= (remote_node_index / SCU_STP_REMOTE_NODE_COUNT)
+ );
+
+ dword_location = remote_node_index / SCIC_SDS_REMOTE_NODES_PER_DWORD;
+ dword_remainder = remote_node_index % SCIC_SDS_REMOTE_NODES_PER_DWORD;
+ slot_normalized = (dword_remainder / SCU_STP_REMOTE_NODE_COUNT) * sizeof(u32);
+ slot_position = remote_node_index % SCU_STP_REMOTE_NODE_COUNT;
+
+ remote_node_table->available_remote_nodes[dword_location] &=
+ ~(1 << (slot_normalized + slot_position));
+}
+
+/**
+ *
+ * @out]: remote_node_table The remote node table from which the slot will be
+ * cleared.
+ * @group_index: The index for the slot that is to be cleared.
+ *
+ * This method clears the entire table slot at the specified slot index. none
+ */
+static void sci_remote_node_table_clear_group(
+ struct sci_remote_node_table *remote_node_table,
+ u32 group_index)
+{
+ u32 dword_location;
+ u32 dword_remainder;
+ u32 dword_value;
+
+ BUG_ON(
+ (remote_node_table->available_nodes_array_size * SCIC_SDS_REMOTE_NODE_SETS_PER_DWORD)
+ <= (group_index / SCU_STP_REMOTE_NODE_COUNT)
+ );
+
+ dword_location = group_index / SCIC_SDS_REMOTE_NODE_SETS_PER_DWORD;
+ dword_remainder = group_index % SCIC_SDS_REMOTE_NODE_SETS_PER_DWORD;
+
+ dword_value = remote_node_table->available_remote_nodes[dword_location];
+ dword_value &= ~(SCIC_SDS_REMOTE_NODE_TABLE_FULL_SLOT_VALUE << (dword_remainder * 4));
+ remote_node_table->available_remote_nodes[dword_location] = dword_value;
+}
+
+/**
+ *
+ * @remote_node_table:
+ *
+ * THis method sets an entire remote node group in the remote node table.
+ */
+static void sci_remote_node_table_set_group(
+ struct sci_remote_node_table *remote_node_table,
+ u32 group_index)
+{
+ u32 dword_location;
+ u32 dword_remainder;
+ u32 dword_value;
+
+ BUG_ON(
+ (remote_node_table->available_nodes_array_size * SCIC_SDS_REMOTE_NODE_SETS_PER_DWORD)
+ <= (group_index / SCU_STP_REMOTE_NODE_COUNT)
+ );
+
+ dword_location = group_index / SCIC_SDS_REMOTE_NODE_SETS_PER_DWORD;
+ dword_remainder = group_index % SCIC_SDS_REMOTE_NODE_SETS_PER_DWORD;
+
+ dword_value = remote_node_table->available_remote_nodes[dword_location];
+ dword_value |= (SCIC_SDS_REMOTE_NODE_TABLE_FULL_SLOT_VALUE << (dword_remainder * 4));
+ remote_node_table->available_remote_nodes[dword_location] = dword_value;
+}
+
+/**
+ *
+ * @remote_node_table: This is the remote node table that for which the group
+ * value is to be returned.
+ * @group_index: This is the group index to use to find the group value.
+ *
+ * This method will return the group value for the specified group index. The
+ * bit values at the specified remote node group index.
+ */
+static u8 sci_remote_node_table_get_group_value(
+ struct sci_remote_node_table *remote_node_table,
+ u32 group_index)
+{
+ u32 dword_location;
+ u32 dword_remainder;
+ u32 dword_value;
+
+ dword_location = group_index / SCIC_SDS_REMOTE_NODE_SETS_PER_DWORD;
+ dword_remainder = group_index % SCIC_SDS_REMOTE_NODE_SETS_PER_DWORD;
+
+ dword_value = remote_node_table->available_remote_nodes[dword_location];
+ dword_value &= (SCIC_SDS_REMOTE_NODE_TABLE_FULL_SLOT_VALUE << (dword_remainder * 4));
+ dword_value = dword_value >> (dword_remainder * 4);
+
+ return (u8)dword_value;
+}
+
+/**
+ *
+ * @out]: remote_node_table The remote that which is to be initialized.
+ * @remote_node_entries: The number of entries to put in the table.
+ *
+ * This method will initialize the remote node table for use. none
+ */
+void sci_remote_node_table_initialize(
+ struct sci_remote_node_table *remote_node_table,
+ u32 remote_node_entries)
+{
+ u32 index;
+
+ /*
+ * Initialize the raw data we could improve the speed by only initializing
+ * those entries that we are actually going to be used */
+ memset(
+ remote_node_table->available_remote_nodes,
+ 0x00,
+ sizeof(remote_node_table->available_remote_nodes)
+ );
+
+ memset(
+ remote_node_table->remote_node_groups,
+ 0x00,
+ sizeof(remote_node_table->remote_node_groups)
+ );
+
+ /* Initialize the available remote node sets */
+ remote_node_table->available_nodes_array_size = (u16)
+ (remote_node_entries / SCIC_SDS_REMOTE_NODES_PER_DWORD)
+ + ((remote_node_entries % SCIC_SDS_REMOTE_NODES_PER_DWORD) != 0);
+
+
+ /* Initialize each full DWORD to a FULL SET of remote nodes */
+ for (index = 0; index < remote_node_entries; index++) {
+ sci_remote_node_table_set_node_index(remote_node_table, index);
+ }
+
+ remote_node_table->group_array_size = (u16)
+ (remote_node_entries / (SCU_STP_REMOTE_NODE_COUNT * 32))
+ + ((remote_node_entries % (SCU_STP_REMOTE_NODE_COUNT * 32)) != 0);
+
+ for (index = 0; index < (remote_node_entries / SCU_STP_REMOTE_NODE_COUNT); index++) {
+ /*
+ * These are all guaranteed to be full slot values so fill them in the
+ * available sets of 3 remote nodes */
+ sci_remote_node_table_set_group_index(remote_node_table, 2, index);
+ }
+
+ /* Now fill in any remainders that we may find */
+ if ((remote_node_entries % SCU_STP_REMOTE_NODE_COUNT) == 2) {
+ sci_remote_node_table_set_group_index(remote_node_table, 1, index);
+ } else if ((remote_node_entries % SCU_STP_REMOTE_NODE_COUNT) == 1) {
+ sci_remote_node_table_set_group_index(remote_node_table, 0, index);
+ }
+}
+
+/**
+ *
+ * @out]: remote_node_table The remote node table from which to allocate a
+ * remote node.
+ * @table_index: The group index that is to be used for the search.
+ *
+ * This method will allocate a single RNi from the remote node table. The
+ * table index will determine from which remote node group table to search.
+ * This search may fail and another group node table can be specified. The
+ * function is designed to allow a serach of the available single remote node
+ * group up to the triple remote node group. If an entry is found in the
+ * specified table the remote node is removed and the remote node groups are
+ * updated. The RNi value or an invalid remote node context if an RNi can not
+ * be found.
+ */
+static u16 sci_remote_node_table_allocate_single_remote_node(
+ struct sci_remote_node_table *remote_node_table,
+ u32 group_table_index)
+{
+ u8 index;
+ u8 group_value;
+ u32 group_index;
+ u16 remote_node_index = SCIC_SDS_REMOTE_NODE_CONTEXT_INVALID_INDEX;
+
+ group_index = sci_remote_node_table_get_group_index(
+ remote_node_table, group_table_index);
+
+ /* We could not find an available slot in the table selector 0 */
+ if (group_index != SCIC_SDS_REMOTE_NODE_TABLE_INVALID_INDEX) {
+ group_value = sci_remote_node_table_get_group_value(
+ remote_node_table, group_index);
+
+ for (index = 0; index < SCU_STP_REMOTE_NODE_COUNT; index++) {
+ if (((1 << index) & group_value) != 0) {
+ /* We have selected a bit now clear it */
+ remote_node_index = (u16)(group_index * SCU_STP_REMOTE_NODE_COUNT
+ + index);
+
+ sci_remote_node_table_clear_group_index(
+ remote_node_table, group_table_index, group_index
+ );
+
+ sci_remote_node_table_clear_node_index(
+ remote_node_table, remote_node_index
+ );
+
+ if (group_table_index > 0) {
+ sci_remote_node_table_set_group_index(
+ remote_node_table, group_table_index - 1, group_index
+ );
+ }
+
+ break;
+ }
+ }
+ }
+
+ return remote_node_index;
+}
+
+/**
+ *
+ * @remote_node_table: This is the remote node table from which to allocate the
+ * remote node entries.
+ * @group_table_index: THis is the group table index which must equal two (2)
+ * for this operation.
+ *
+ * This method will allocate three consecutive remote node context entries. If
+ * there are no remaining triple entries the function will return a failure.
+ * The remote node index that represents three consecutive remote node entries
+ * or an invalid remote node context if none can be found.
+ */
+static u16 sci_remote_node_table_allocate_triple_remote_node(
+ struct sci_remote_node_table *remote_node_table,
+ u32 group_table_index)
+{
+ u32 group_index;
+ u16 remote_node_index = SCIC_SDS_REMOTE_NODE_CONTEXT_INVALID_INDEX;
+
+ group_index = sci_remote_node_table_get_group_index(
+ remote_node_table, group_table_index);
+
+ if (group_index != SCIC_SDS_REMOTE_NODE_TABLE_INVALID_INDEX) {
+ remote_node_index = (u16)group_index * SCU_STP_REMOTE_NODE_COUNT;
+
+ sci_remote_node_table_clear_group_index(
+ remote_node_table, group_table_index, group_index
+ );
+
+ sci_remote_node_table_clear_group(
+ remote_node_table, group_index
+ );
+ }
+
+ return remote_node_index;
+}
+
+/**
+ *
+ * @remote_node_table: This is the remote node table from which the remote node
+ * allocation is to take place.
+ * @remote_node_count: This is ther remote node count which is one of
+ * SCU_SSP_REMOTE_NODE_COUNT(1) or SCU_STP_REMOTE_NODE_COUNT(3).
+ *
+ * This method will allocate a remote node that mataches the remote node count
+ * specified by the caller. Valid values for remote node count is
+ * SCU_SSP_REMOTE_NODE_COUNT(1) or SCU_STP_REMOTE_NODE_COUNT(3). u16 This is
+ * the remote node index that is returned or an invalid remote node context.
+ */
+u16 sci_remote_node_table_allocate_remote_node(
+ struct sci_remote_node_table *remote_node_table,
+ u32 remote_node_count)
+{
+ u16 remote_node_index = SCIC_SDS_REMOTE_NODE_CONTEXT_INVALID_INDEX;
+
+ if (remote_node_count == SCU_SSP_REMOTE_NODE_COUNT) {
+ remote_node_index =
+ sci_remote_node_table_allocate_single_remote_node(
+ remote_node_table, 0);
+
+ if (remote_node_index == SCIC_SDS_REMOTE_NODE_CONTEXT_INVALID_INDEX) {
+ remote_node_index =
+ sci_remote_node_table_allocate_single_remote_node(
+ remote_node_table, 1);
+ }
+
+ if (remote_node_index == SCIC_SDS_REMOTE_NODE_CONTEXT_INVALID_INDEX) {
+ remote_node_index =
+ sci_remote_node_table_allocate_single_remote_node(
+ remote_node_table, 2);
+ }
+ } else if (remote_node_count == SCU_STP_REMOTE_NODE_COUNT) {
+ remote_node_index =
+ sci_remote_node_table_allocate_triple_remote_node(
+ remote_node_table, 2);
+ }
+
+ return remote_node_index;
+}
+
+/**
+ *
+ * @remote_node_table:
+ *
+ * This method will free a single remote node index back to the remote node
+ * table. This routine will update the remote node groups
+ */
+static void sci_remote_node_table_release_single_remote_node(
+ struct sci_remote_node_table *remote_node_table,
+ u16 remote_node_index)
+{
+ u32 group_index;
+ u8 group_value;
+
+ group_index = remote_node_index / SCU_STP_REMOTE_NODE_COUNT;
+
+ group_value = sci_remote_node_table_get_group_value(remote_node_table, group_index);
+
+ /*
+ * Assert that we are not trying to add an entry to a slot that is already
+ * full. */
+ BUG_ON(group_value == SCIC_SDS_REMOTE_NODE_TABLE_FULL_SLOT_VALUE);
+
+ if (group_value == 0x00) {
+ /*
+ * There are no entries in this slot so it must be added to the single
+ * slot table. */
+ sci_remote_node_table_set_group_index(remote_node_table, 0, group_index);
+ } else if ((group_value & (group_value - 1)) == 0) {
+ /*
+ * There is only one entry in this slot so it must be moved from the
+ * single slot table to the dual slot table */
+ sci_remote_node_table_clear_group_index(remote_node_table, 0, group_index);
+ sci_remote_node_table_set_group_index(remote_node_table, 1, group_index);
+ } else {
+ /*
+ * There are two entries in the slot so it must be moved from the dual
+ * slot table to the tripple slot table. */
+ sci_remote_node_table_clear_group_index(remote_node_table, 1, group_index);
+ sci_remote_node_table_set_group_index(remote_node_table, 2, group_index);
+ }
+
+ sci_remote_node_table_set_node_index(remote_node_table, remote_node_index);
+}
+
+/**
+ *
+ * @remote_node_table: This is the remote node table to which the remote node
+ * index is to be freed.
+ *
+ * This method will release a group of three consecutive remote nodes back to
+ * the free remote nodes.
+ */
+static void sci_remote_node_table_release_triple_remote_node(
+ struct sci_remote_node_table *remote_node_table,
+ u16 remote_node_index)
+{
+ u32 group_index;
+
+ group_index = remote_node_index / SCU_STP_REMOTE_NODE_COUNT;
+
+ sci_remote_node_table_set_group_index(
+ remote_node_table, 2, group_index
+ );
+
+ sci_remote_node_table_set_group(remote_node_table, group_index);
+}
+
+/**
+ *
+ * @remote_node_table: The remote node table to which the remote node index is
+ * to be freed.
+ * @remote_node_count: This is the count of consecutive remote nodes that are
+ * to be freed.
+ *
+ * This method will release the remote node index back into the remote node
+ * table free pool.
+ */
+void sci_remote_node_table_release_remote_node_index(
+ struct sci_remote_node_table *remote_node_table,
+ u32 remote_node_count,
+ u16 remote_node_index)
+{
+ if (remote_node_count == SCU_SSP_REMOTE_NODE_COUNT) {
+ sci_remote_node_table_release_single_remote_node(
+ remote_node_table, remote_node_index);
+ } else if (remote_node_count == SCU_STP_REMOTE_NODE_COUNT) {
+ sci_remote_node_table_release_triple_remote_node(
+ remote_node_table, remote_node_index);
+ }
+}
+
diff --git a/drivers/scsi/isci/remote_node_table.h b/drivers/scsi/isci/remote_node_table.h
new file mode 100644
index 000000000..721ab982d
--- /dev/null
+++ b/drivers/scsi/isci/remote_node_table.h
@@ -0,0 +1,188 @@
+/*
+ * This file is provided under a dual BSD/GPLv2 license. When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ * The full GNU General Public License is included in this distribution
+ * in the file called LICENSE.GPL.
+ *
+ * BSD LICENSE
+ *
+ * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _SCIC_SDS_REMOTE_NODE_TABLE_H_
+#define _SCIC_SDS_REMOTE_NODE_TABLE_H_
+
+#include "isci.h"
+
+/**
+ *
+ *
+ * Remote node sets are sets of remote node index in the remtoe node table The
+ * SCU hardware requires that STP remote node entries take three consecutive
+ * remote node index so the table is arranged in sets of three. The bits are
+ * used as 0111 0111 to make a byte and the bits define the set of three remote
+ * nodes to use as a sequence.
+ */
+#define SCIC_SDS_REMOTE_NODE_SETS_PER_BYTE 2
+
+/**
+ *
+ *
+ * Since the remote node table is organized as DWORDS take the remote node sets
+ * in bytes and represent them in DWORDs. The lowest ordered bits are the ones
+ * used in case full DWORD is not being used. i.e. 0000 0000 0000 0000 0111
+ * 0111 0111 0111 // if only a single WORD is in use in the DWORD.
+ */
+#define SCIC_SDS_REMOTE_NODE_SETS_PER_DWORD \
+ (sizeof(u32) * SCIC_SDS_REMOTE_NODE_SETS_PER_BYTE)
+/**
+ *
+ *
+ * This is a count of the numeber of remote nodes that can be represented in a
+ * byte
+ */
+#define SCIC_SDS_REMOTE_NODES_PER_BYTE \
+ (SCU_STP_REMOTE_NODE_COUNT * SCIC_SDS_REMOTE_NODE_SETS_PER_BYTE)
+
+/**
+ *
+ *
+ * This is a count of the number of remote nodes that can be represented in a
+ * DWROD
+ */
+#define SCIC_SDS_REMOTE_NODES_PER_DWORD \
+ (sizeof(u32) * SCIC_SDS_REMOTE_NODES_PER_BYTE)
+
+/**
+ *
+ *
+ * This is the number of bits in a remote node group
+ */
+#define SCIC_SDS_REMOTE_NODES_BITS_PER_GROUP 4
+
+#define SCIC_SDS_REMOTE_NODE_TABLE_INVALID_INDEX (0xFFFFFFFF)
+#define SCIC_SDS_REMOTE_NODE_TABLE_FULL_SLOT_VALUE (0x07)
+#define SCIC_SDS_REMOTE_NODE_TABLE_EMPTY_SLOT_VALUE (0x00)
+
+/**
+ *
+ *
+ * Expander attached sata remote node count
+ */
+#define SCU_STP_REMOTE_NODE_COUNT 3
+
+/**
+ *
+ *
+ * Expander or direct attached ssp remote node count
+ */
+#define SCU_SSP_REMOTE_NODE_COUNT 1
+
+/**
+ *
+ *
+ * Direct attached STP remote node count
+ */
+#define SCU_SATA_REMOTE_NODE_COUNT 1
+
+/**
+ * struct sci_remote_node_table -
+ *
+ *
+ */
+struct sci_remote_node_table {
+ /**
+ * This field contains the array size in dwords
+ */
+ u16 available_nodes_array_size;
+
+ /**
+ * This field contains the array size of the
+ */
+ u16 group_array_size;
+
+ /**
+ * This field is the array of available remote node entries in bits.
+ * Because of the way STP remote node data is allocated on the SCU hardware
+ * the remote nodes must occupy three consecutive remote node context
+ * entries. For ease of allocation and de-allocation we have broken the
+ * sets of three into a single nibble. When the STP RNi is allocated all
+ * of the bits in the nibble are cleared. This math results in a table size
+ * of MAX_REMOTE_NODES / CONSECUTIVE RNi ENTRIES for STP / 2 entries per byte.
+ */
+ u32 available_remote_nodes[
+ (SCI_MAX_REMOTE_DEVICES / SCIC_SDS_REMOTE_NODES_PER_DWORD)
+ + ((SCI_MAX_REMOTE_DEVICES % SCIC_SDS_REMOTE_NODES_PER_DWORD) != 0)];
+
+ /**
+ * This field is the nibble selector for the above table. There are three
+ * possible selectors each for fast lookup when trying to find one, two or
+ * three remote node entries.
+ */
+ u32 remote_node_groups[
+ SCU_STP_REMOTE_NODE_COUNT][
+ (SCI_MAX_REMOTE_DEVICES / (32 * SCU_STP_REMOTE_NODE_COUNT))
+ + ((SCI_MAX_REMOTE_DEVICES % (32 * SCU_STP_REMOTE_NODE_COUNT)) != 0)];
+
+};
+
+/* --------------------------------------------------------------------------- */
+
+void sci_remote_node_table_initialize(
+ struct sci_remote_node_table *remote_node_table,
+ u32 remote_node_entries);
+
+u16 sci_remote_node_table_allocate_remote_node(
+ struct sci_remote_node_table *remote_node_table,
+ u32 remote_node_count);
+
+void sci_remote_node_table_release_remote_node_index(
+ struct sci_remote_node_table *remote_node_table,
+ u32 remote_node_count,
+ u16 remote_node_index);
+
+#endif /* _SCIC_SDS_REMOTE_NODE_TABLE_H_ */
diff --git a/drivers/scsi/isci/request.c b/drivers/scsi/isci/request.c
new file mode 100644
index 000000000..cfd0084f1
--- /dev/null
+++ b/drivers/scsi/isci/request.c
@@ -0,0 +1,3528 @@
+/*
+ * This file is provided under a dual BSD/GPLv2 license. When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ * The full GNU General Public License is included in this distribution
+ * in the file called LICENSE.GPL.
+ *
+ * BSD LICENSE
+ *
+ * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <scsi/scsi_cmnd.h>
+#include "isci.h"
+#include "task.h"
+#include "request.h"
+#include "scu_completion_codes.h"
+#include "scu_event_codes.h"
+#include "sas.h"
+
+#undef C
+#define C(a) (#a)
+const char *req_state_name(enum sci_base_request_states state)
+{
+ static const char * const strings[] = REQUEST_STATES;
+
+ return strings[state];
+}
+#undef C
+
+static struct scu_sgl_element_pair *to_sgl_element_pair(struct isci_request *ireq,
+ int idx)
+{
+ if (idx == 0)
+ return &ireq->tc->sgl_pair_ab;
+ else if (idx == 1)
+ return &ireq->tc->sgl_pair_cd;
+ else if (idx < 0)
+ return NULL;
+ else
+ return &ireq->sg_table[idx - 2];
+}
+
+static dma_addr_t to_sgl_element_pair_dma(struct isci_host *ihost,
+ struct isci_request *ireq, u32 idx)
+{
+ u32 offset;
+
+ if (idx == 0) {
+ offset = (void *) &ireq->tc->sgl_pair_ab -
+ (void *) &ihost->task_context_table[0];
+ return ihost->tc_dma + offset;
+ } else if (idx == 1) {
+ offset = (void *) &ireq->tc->sgl_pair_cd -
+ (void *) &ihost->task_context_table[0];
+ return ihost->tc_dma + offset;
+ }
+
+ return sci_io_request_get_dma_addr(ireq, &ireq->sg_table[idx - 2]);
+}
+
+static void init_sgl_element(struct scu_sgl_element *e, struct scatterlist *sg)
+{
+ e->length = sg_dma_len(sg);
+ e->address_upper = upper_32_bits(sg_dma_address(sg));
+ e->address_lower = lower_32_bits(sg_dma_address(sg));
+ e->address_modifier = 0;
+}
+
+static void sci_request_build_sgl(struct isci_request *ireq)
+{
+ struct isci_host *ihost = ireq->isci_host;
+ struct sas_task *task = isci_request_access_task(ireq);
+ struct scatterlist *sg = NULL;
+ dma_addr_t dma_addr;
+ u32 sg_idx = 0;
+ struct scu_sgl_element_pair *scu_sg = NULL;
+ struct scu_sgl_element_pair *prev_sg = NULL;
+
+ if (task->num_scatter > 0) {
+ sg = task->scatter;
+
+ while (sg) {
+ scu_sg = to_sgl_element_pair(ireq, sg_idx);
+ init_sgl_element(&scu_sg->A, sg);
+ sg = sg_next(sg);
+ if (sg) {
+ init_sgl_element(&scu_sg->B, sg);
+ sg = sg_next(sg);
+ } else
+ memset(&scu_sg->B, 0, sizeof(scu_sg->B));
+
+ if (prev_sg) {
+ dma_addr = to_sgl_element_pair_dma(ihost,
+ ireq,
+ sg_idx);
+
+ prev_sg->next_pair_upper =
+ upper_32_bits(dma_addr);
+ prev_sg->next_pair_lower =
+ lower_32_bits(dma_addr);
+ }
+
+ prev_sg = scu_sg;
+ sg_idx++;
+ }
+ } else { /* handle when no sg */
+ scu_sg = to_sgl_element_pair(ireq, sg_idx);
+
+ dma_addr = dma_map_single(&ihost->pdev->dev,
+ task->scatter,
+ task->total_xfer_len,
+ task->data_dir);
+
+ ireq->zero_scatter_daddr = dma_addr;
+
+ scu_sg->A.length = task->total_xfer_len;
+ scu_sg->A.address_upper = upper_32_bits(dma_addr);
+ scu_sg->A.address_lower = lower_32_bits(dma_addr);
+ }
+
+ if (scu_sg) {
+ scu_sg->next_pair_upper = 0;
+ scu_sg->next_pair_lower = 0;
+ }
+}
+
+static void sci_io_request_build_ssp_command_iu(struct isci_request *ireq)
+{
+ struct ssp_cmd_iu *cmd_iu;
+ struct sas_task *task = isci_request_access_task(ireq);
+
+ cmd_iu = &ireq->ssp.cmd;
+
+ memcpy(cmd_iu->LUN, task->ssp_task.LUN, 8);
+ cmd_iu->add_cdb_len = 0;
+ cmd_iu->_r_a = 0;
+ cmd_iu->_r_b = 0;
+ cmd_iu->en_fburst = 0; /* unsupported */
+ cmd_iu->task_prio = task->ssp_task.task_prio;
+ cmd_iu->task_attr = task->ssp_task.task_attr;
+ cmd_iu->_r_c = 0;
+
+ sci_swab32_cpy(&cmd_iu->cdb, task->ssp_task.cmd->cmnd,
+ (task->ssp_task.cmd->cmd_len+3) / sizeof(u32));
+}
+
+static void sci_task_request_build_ssp_task_iu(struct isci_request *ireq)
+{
+ struct ssp_task_iu *task_iu;
+ struct sas_task *task = isci_request_access_task(ireq);
+ struct isci_tmf *isci_tmf = isci_request_access_tmf(ireq);
+
+ task_iu = &ireq->ssp.tmf;
+
+ memset(task_iu, 0, sizeof(struct ssp_task_iu));
+
+ memcpy(task_iu->LUN, task->ssp_task.LUN, 8);
+
+ task_iu->task_func = isci_tmf->tmf_code;
+ task_iu->task_tag =
+ (test_bit(IREQ_TMF, &ireq->flags)) ?
+ isci_tmf->io_tag :
+ SCI_CONTROLLER_INVALID_IO_TAG;
+}
+
+/**
+ * This method is will fill in the SCU Task Context for any type of SSP request.
+ * @sci_req:
+ * @task_context:
+ *
+ */
+static void scu_ssp_reqeust_construct_task_context(
+ struct isci_request *ireq,
+ struct scu_task_context *task_context)
+{
+ dma_addr_t dma_addr;
+ struct isci_remote_device *idev;
+ struct isci_port *iport;
+
+ idev = ireq->target_device;
+ iport = idev->owning_port;
+
+ /* Fill in the TC with the its required data */
+ task_context->abort = 0;
+ task_context->priority = 0;
+ task_context->initiator_request = 1;
+ task_context->connection_rate = idev->connection_rate;
+ task_context->protocol_engine_index = ISCI_PEG;
+ task_context->logical_port_index = iport->physical_port_index;
+ task_context->protocol_type = SCU_TASK_CONTEXT_PROTOCOL_SSP;
+ task_context->valid = SCU_TASK_CONTEXT_VALID;
+ task_context->context_type = SCU_TASK_CONTEXT_TYPE;
+
+ task_context->remote_node_index = idev->rnc.remote_node_index;
+ task_context->command_code = 0;
+
+ task_context->link_layer_control = 0;
+ task_context->do_not_dma_ssp_good_response = 1;
+ task_context->strict_ordering = 0;
+ task_context->control_frame = 0;
+ task_context->timeout_enable = 0;
+ task_context->block_guard_enable = 0;
+
+ task_context->address_modifier = 0;
+
+ /* task_context->type.ssp.tag = ireq->io_tag; */
+ task_context->task_phase = 0x01;
+
+ ireq->post_context = (SCU_CONTEXT_COMMAND_REQUEST_TYPE_POST_TC |
+ (ISCI_PEG << SCU_CONTEXT_COMMAND_PROTOCOL_ENGINE_GROUP_SHIFT) |
+ (iport->physical_port_index <<
+ SCU_CONTEXT_COMMAND_LOGICAL_PORT_SHIFT) |
+ ISCI_TAG_TCI(ireq->io_tag));
+
+ /*
+ * Copy the physical address for the command buffer to the
+ * SCU Task Context
+ */
+ dma_addr = sci_io_request_get_dma_addr(ireq, &ireq->ssp.cmd);
+
+ task_context->command_iu_upper = upper_32_bits(dma_addr);
+ task_context->command_iu_lower = lower_32_bits(dma_addr);
+
+ /*
+ * Copy the physical address for the response buffer to the
+ * SCU Task Context
+ */
+ dma_addr = sci_io_request_get_dma_addr(ireq, &ireq->ssp.rsp);
+
+ task_context->response_iu_upper = upper_32_bits(dma_addr);
+ task_context->response_iu_lower = lower_32_bits(dma_addr);
+}
+
+static u8 scu_bg_blk_size(struct scsi_device *sdp)
+{
+ switch (sdp->sector_size) {
+ case 512:
+ return 0;
+ case 1024:
+ return 1;
+ case 4096:
+ return 3;
+ default:
+ return 0xff;
+ }
+}
+
+static u32 scu_dif_bytes(u32 len, u32 sector_size)
+{
+ return (len >> ilog2(sector_size)) * 8;
+}
+
+static void scu_ssp_ireq_dif_insert(struct isci_request *ireq, u8 type, u8 op)
+{
+ struct scu_task_context *tc = ireq->tc;
+ struct scsi_cmnd *scmd = ireq->ttype_ptr.io_task_ptr->uldd_task;
+ u8 blk_sz = scu_bg_blk_size(scmd->device);
+
+ tc->block_guard_enable = 1;
+ tc->blk_prot_en = 1;
+ tc->blk_sz = blk_sz;
+ /* DIF write insert */
+ tc->blk_prot_func = 0x2;
+
+ tc->transfer_length_bytes += scu_dif_bytes(tc->transfer_length_bytes,
+ scmd->device->sector_size);
+
+ /* always init to 0, used by hw */
+ tc->interm_crc_val = 0;
+
+ tc->init_crc_seed = 0;
+ tc->app_tag_verify = 0;
+ tc->app_tag_gen = 0;
+ tc->ref_tag_seed_verify = 0;
+
+ /* always init to same as bg_blk_sz */
+ tc->UD_bytes_immed_val = scmd->device->sector_size;
+
+ tc->reserved_DC_0 = 0;
+
+ /* always init to 8 */
+ tc->DIF_bytes_immed_val = 8;
+
+ tc->reserved_DC_1 = 0;
+ tc->bgc_blk_sz = scmd->device->sector_size;
+ tc->reserved_E0_0 = 0;
+ tc->app_tag_gen_mask = 0;
+
+ /** setup block guard control **/
+ tc->bgctl = 0;
+
+ /* DIF write insert */
+ tc->bgctl_f.op = 0x2;
+
+ tc->app_tag_verify_mask = 0;
+
+ /* must init to 0 for hw */
+ tc->blk_guard_err = 0;
+
+ tc->reserved_E8_0 = 0;
+
+ if ((type & SCSI_PROT_DIF_TYPE1) || (type & SCSI_PROT_DIF_TYPE2))
+ tc->ref_tag_seed_gen = scsi_get_lba(scmd) & 0xffffffff;
+ else if (type & SCSI_PROT_DIF_TYPE3)
+ tc->ref_tag_seed_gen = 0;
+}
+
+static void scu_ssp_ireq_dif_strip(struct isci_request *ireq, u8 type, u8 op)
+{
+ struct scu_task_context *tc = ireq->tc;
+ struct scsi_cmnd *scmd = ireq->ttype_ptr.io_task_ptr->uldd_task;
+ u8 blk_sz = scu_bg_blk_size(scmd->device);
+
+ tc->block_guard_enable = 1;
+ tc->blk_prot_en = 1;
+ tc->blk_sz = blk_sz;
+ /* DIF read strip */
+ tc->blk_prot_func = 0x1;
+
+ tc->transfer_length_bytes += scu_dif_bytes(tc->transfer_length_bytes,
+ scmd->device->sector_size);
+
+ /* always init to 0, used by hw */
+ tc->interm_crc_val = 0;
+
+ tc->init_crc_seed = 0;
+ tc->app_tag_verify = 0;
+ tc->app_tag_gen = 0;
+
+ if ((type & SCSI_PROT_DIF_TYPE1) || (type & SCSI_PROT_DIF_TYPE2))
+ tc->ref_tag_seed_verify = scsi_get_lba(scmd) & 0xffffffff;
+ else if (type & SCSI_PROT_DIF_TYPE3)
+ tc->ref_tag_seed_verify = 0;
+
+ /* always init to same as bg_blk_sz */
+ tc->UD_bytes_immed_val = scmd->device->sector_size;
+
+ tc->reserved_DC_0 = 0;
+
+ /* always init to 8 */
+ tc->DIF_bytes_immed_val = 8;
+
+ tc->reserved_DC_1 = 0;
+ tc->bgc_blk_sz = scmd->device->sector_size;
+ tc->reserved_E0_0 = 0;
+ tc->app_tag_gen_mask = 0;
+
+ /** setup block guard control **/
+ tc->bgctl = 0;
+
+ /* DIF read strip */
+ tc->bgctl_f.crc_verify = 1;
+ tc->bgctl_f.op = 0x1;
+ if ((type & SCSI_PROT_DIF_TYPE1) || (type & SCSI_PROT_DIF_TYPE2)) {
+ tc->bgctl_f.ref_tag_chk = 1;
+ tc->bgctl_f.app_f_detect = 1;
+ } else if (type & SCSI_PROT_DIF_TYPE3)
+ tc->bgctl_f.app_ref_f_detect = 1;
+
+ tc->app_tag_verify_mask = 0;
+
+ /* must init to 0 for hw */
+ tc->blk_guard_err = 0;
+
+ tc->reserved_E8_0 = 0;
+ tc->ref_tag_seed_gen = 0;
+}
+
+/**
+ * This method is will fill in the SCU Task Context for a SSP IO request.
+ * @sci_req:
+ *
+ */
+static void scu_ssp_io_request_construct_task_context(struct isci_request *ireq,
+ enum dma_data_direction dir,
+ u32 len)
+{
+ struct scu_task_context *task_context = ireq->tc;
+ struct sas_task *sas_task = ireq->ttype_ptr.io_task_ptr;
+ struct scsi_cmnd *scmd = sas_task->uldd_task;
+ u8 prot_type = scsi_get_prot_type(scmd);
+ u8 prot_op = scsi_get_prot_op(scmd);
+
+ scu_ssp_reqeust_construct_task_context(ireq, task_context);
+
+ task_context->ssp_command_iu_length =
+ sizeof(struct ssp_cmd_iu) / sizeof(u32);
+ task_context->type.ssp.frame_type = SSP_COMMAND;
+
+ switch (dir) {
+ case DMA_FROM_DEVICE:
+ case DMA_NONE:
+ default:
+ task_context->task_type = SCU_TASK_TYPE_IOREAD;
+ break;
+ case DMA_TO_DEVICE:
+ task_context->task_type = SCU_TASK_TYPE_IOWRITE;
+ break;
+ }
+
+ task_context->transfer_length_bytes = len;
+
+ if (task_context->transfer_length_bytes > 0)
+ sci_request_build_sgl(ireq);
+
+ if (prot_type != SCSI_PROT_DIF_TYPE0) {
+ if (prot_op == SCSI_PROT_READ_STRIP)
+ scu_ssp_ireq_dif_strip(ireq, prot_type, prot_op);
+ else if (prot_op == SCSI_PROT_WRITE_INSERT)
+ scu_ssp_ireq_dif_insert(ireq, prot_type, prot_op);
+ }
+}
+
+/**
+ * This method will fill in the SCU Task Context for a SSP Task request. The
+ * following important settings are utilized: -# priority ==
+ * SCU_TASK_PRIORITY_HIGH. This ensures that the task request is issued
+ * ahead of other task destined for the same Remote Node. -# task_type ==
+ * SCU_TASK_TYPE_IOREAD. This simply indicates that a normal request type
+ * (i.e. non-raw frame) is being utilized to perform task management. -#
+ * control_frame == 1. This ensures that the proper endianess is set so
+ * that the bytes are transmitted in the right order for a task frame.
+ * @sci_req: This parameter specifies the task request object being
+ * constructed.
+ *
+ */
+static void scu_ssp_task_request_construct_task_context(struct isci_request *ireq)
+{
+ struct scu_task_context *task_context = ireq->tc;
+
+ scu_ssp_reqeust_construct_task_context(ireq, task_context);
+
+ task_context->control_frame = 1;
+ task_context->priority = SCU_TASK_PRIORITY_HIGH;
+ task_context->task_type = SCU_TASK_TYPE_RAW_FRAME;
+ task_context->transfer_length_bytes = 0;
+ task_context->type.ssp.frame_type = SSP_TASK;
+ task_context->ssp_command_iu_length =
+ sizeof(struct ssp_task_iu) / sizeof(u32);
+}
+
+/**
+ * This method is will fill in the SCU Task Context for any type of SATA
+ * request. This is called from the various SATA constructors.
+ * @sci_req: The general IO request object which is to be used in
+ * constructing the SCU task context.
+ * @task_context: The buffer pointer for the SCU task context which is being
+ * constructed.
+ *
+ * The general io request construction is complete. The buffer assignment for
+ * the command buffer is complete. none Revisit task context construction to
+ * determine what is common for SSP/SMP/STP task context structures.
+ */
+static void scu_sata_reqeust_construct_task_context(
+ struct isci_request *ireq,
+ struct scu_task_context *task_context)
+{
+ dma_addr_t dma_addr;
+ struct isci_remote_device *idev;
+ struct isci_port *iport;
+
+ idev = ireq->target_device;
+ iport = idev->owning_port;
+
+ /* Fill in the TC with the its required data */
+ task_context->abort = 0;
+ task_context->priority = SCU_TASK_PRIORITY_NORMAL;
+ task_context->initiator_request = 1;
+ task_context->connection_rate = idev->connection_rate;
+ task_context->protocol_engine_index = ISCI_PEG;
+ task_context->logical_port_index = iport->physical_port_index;
+ task_context->protocol_type = SCU_TASK_CONTEXT_PROTOCOL_STP;
+ task_context->valid = SCU_TASK_CONTEXT_VALID;
+ task_context->context_type = SCU_TASK_CONTEXT_TYPE;
+
+ task_context->remote_node_index = idev->rnc.remote_node_index;
+ task_context->command_code = 0;
+
+ task_context->link_layer_control = 0;
+ task_context->do_not_dma_ssp_good_response = 1;
+ task_context->strict_ordering = 0;
+ task_context->control_frame = 0;
+ task_context->timeout_enable = 0;
+ task_context->block_guard_enable = 0;
+
+ task_context->address_modifier = 0;
+ task_context->task_phase = 0x01;
+
+ task_context->ssp_command_iu_length =
+ (sizeof(struct host_to_dev_fis) - sizeof(u32)) / sizeof(u32);
+
+ /* Set the first word of the H2D REG FIS */
+ task_context->type.words[0] = *(u32 *)&ireq->stp.cmd;
+
+ ireq->post_context = (SCU_CONTEXT_COMMAND_REQUEST_TYPE_POST_TC |
+ (ISCI_PEG << SCU_CONTEXT_COMMAND_PROTOCOL_ENGINE_GROUP_SHIFT) |
+ (iport->physical_port_index <<
+ SCU_CONTEXT_COMMAND_LOGICAL_PORT_SHIFT) |
+ ISCI_TAG_TCI(ireq->io_tag));
+ /*
+ * Copy the physical address for the command buffer to the SCU Task
+ * Context. We must offset the command buffer by 4 bytes because the
+ * first 4 bytes are transfered in the body of the TC.
+ */
+ dma_addr = sci_io_request_get_dma_addr(ireq,
+ ((char *) &ireq->stp.cmd) +
+ sizeof(u32));
+
+ task_context->command_iu_upper = upper_32_bits(dma_addr);
+ task_context->command_iu_lower = lower_32_bits(dma_addr);
+
+ /* SATA Requests do not have a response buffer */
+ task_context->response_iu_upper = 0;
+ task_context->response_iu_lower = 0;
+}
+
+static void scu_stp_raw_request_construct_task_context(struct isci_request *ireq)
+{
+ struct scu_task_context *task_context = ireq->tc;
+
+ scu_sata_reqeust_construct_task_context(ireq, task_context);
+
+ task_context->control_frame = 0;
+ task_context->priority = SCU_TASK_PRIORITY_NORMAL;
+ task_context->task_type = SCU_TASK_TYPE_SATA_RAW_FRAME;
+ task_context->type.stp.fis_type = FIS_REGH2D;
+ task_context->transfer_length_bytes = sizeof(struct host_to_dev_fis) - sizeof(u32);
+}
+
+static enum sci_status sci_stp_pio_request_construct(struct isci_request *ireq,
+ bool copy_rx_frame)
+{
+ struct isci_stp_request *stp_req = &ireq->stp.req;
+
+ scu_stp_raw_request_construct_task_context(ireq);
+
+ stp_req->status = 0;
+ stp_req->sgl.offset = 0;
+ stp_req->sgl.set = SCU_SGL_ELEMENT_PAIR_A;
+
+ if (copy_rx_frame) {
+ sci_request_build_sgl(ireq);
+ stp_req->sgl.index = 0;
+ } else {
+ /* The user does not want the data copied to the SGL buffer location */
+ stp_req->sgl.index = -1;
+ }
+
+ return SCI_SUCCESS;
+}
+
+/**
+ *
+ * @sci_req: This parameter specifies the request to be constructed as an
+ * optimized request.
+ * @optimized_task_type: This parameter specifies whether the request is to be
+ * an UDMA request or a NCQ request. - A value of 0 indicates UDMA. - A
+ * value of 1 indicates NCQ.
+ *
+ * This method will perform request construction common to all types of STP
+ * requests that are optimized by the silicon (i.e. UDMA, NCQ). This method
+ * returns an indication as to whether the construction was successful.
+ */
+static void sci_stp_optimized_request_construct(struct isci_request *ireq,
+ u8 optimized_task_type,
+ u32 len,
+ enum dma_data_direction dir)
+{
+ struct scu_task_context *task_context = ireq->tc;
+
+ /* Build the STP task context structure */
+ scu_sata_reqeust_construct_task_context(ireq, task_context);
+
+ /* Copy over the SGL elements */
+ sci_request_build_sgl(ireq);
+
+ /* Copy over the number of bytes to be transfered */
+ task_context->transfer_length_bytes = len;
+
+ if (dir == DMA_TO_DEVICE) {
+ /*
+ * The difference between the DMA IN and DMA OUT request task type
+ * values are consistent with the difference between FPDMA READ
+ * and FPDMA WRITE values. Add the supplied task type parameter
+ * to this difference to set the task type properly for this
+ * DATA OUT (WRITE) case. */
+ task_context->task_type = optimized_task_type + (SCU_TASK_TYPE_DMA_OUT
+ - SCU_TASK_TYPE_DMA_IN);
+ } else {
+ /*
+ * For the DATA IN (READ) case, simply save the supplied
+ * optimized task type. */
+ task_context->task_type = optimized_task_type;
+ }
+}
+
+static void sci_atapi_construct(struct isci_request *ireq)
+{
+ struct host_to_dev_fis *h2d_fis = &ireq->stp.cmd;
+ struct sas_task *task;
+
+ /* To simplify the implementation we take advantage of the
+ * silicon's partial acceleration of atapi protocol (dma data
+ * transfers), so we promote all commands to dma protocol. This
+ * breaks compatibility with ATA_HORKAGE_ATAPI_MOD16_DMA drives.
+ */
+ h2d_fis->features |= ATAPI_PKT_DMA;
+
+ scu_stp_raw_request_construct_task_context(ireq);
+
+ task = isci_request_access_task(ireq);
+ if (task->data_dir == DMA_NONE)
+ task->total_xfer_len = 0;
+
+ /* clear the response so we can detect arrivial of an
+ * unsolicited h2d fis
+ */
+ ireq->stp.rsp.fis_type = 0;
+}
+
+static enum sci_status
+sci_io_request_construct_sata(struct isci_request *ireq,
+ u32 len,
+ enum dma_data_direction dir,
+ bool copy)
+{
+ enum sci_status status = SCI_SUCCESS;
+ struct sas_task *task = isci_request_access_task(ireq);
+ struct domain_device *dev = ireq->target_device->domain_dev;
+
+ /* check for management protocols */
+ if (test_bit(IREQ_TMF, &ireq->flags)) {
+ struct isci_tmf *tmf = isci_request_access_tmf(ireq);
+
+ dev_err(&ireq->owning_controller->pdev->dev,
+ "%s: Request 0x%p received un-handled SAT "
+ "management protocol 0x%x.\n",
+ __func__, ireq, tmf->tmf_code);
+
+ return SCI_FAILURE;
+ }
+
+ if (!sas_protocol_ata(task->task_proto)) {
+ dev_err(&ireq->owning_controller->pdev->dev,
+ "%s: Non-ATA protocol in SATA path: 0x%x\n",
+ __func__,
+ task->task_proto);
+ return SCI_FAILURE;
+
+ }
+
+ /* ATAPI */
+ if (dev->sata_dev.class == ATA_DEV_ATAPI &&
+ task->ata_task.fis.command == ATA_CMD_PACKET) {
+ sci_atapi_construct(ireq);
+ return SCI_SUCCESS;
+ }
+
+ /* non data */
+ if (task->data_dir == DMA_NONE) {
+ scu_stp_raw_request_construct_task_context(ireq);
+ return SCI_SUCCESS;
+ }
+
+ /* NCQ */
+ if (task->ata_task.use_ncq) {
+ sci_stp_optimized_request_construct(ireq,
+ SCU_TASK_TYPE_FPDMAQ_READ,
+ len, dir);
+ return SCI_SUCCESS;
+ }
+
+ /* DMA */
+ if (task->ata_task.dma_xfer) {
+ sci_stp_optimized_request_construct(ireq,
+ SCU_TASK_TYPE_DMA_IN,
+ len, dir);
+ return SCI_SUCCESS;
+ } else /* PIO */
+ return sci_stp_pio_request_construct(ireq, copy);
+
+ return status;
+}
+
+static enum sci_status sci_io_request_construct_basic_ssp(struct isci_request *ireq)
+{
+ struct sas_task *task = isci_request_access_task(ireq);
+
+ ireq->protocol = SAS_PROTOCOL_SSP;
+
+ scu_ssp_io_request_construct_task_context(ireq,
+ task->data_dir,
+ task->total_xfer_len);
+
+ sci_io_request_build_ssp_command_iu(ireq);
+
+ sci_change_state(&ireq->sm, SCI_REQ_CONSTRUCTED);
+
+ return SCI_SUCCESS;
+}
+
+enum sci_status sci_task_request_construct_ssp(
+ struct isci_request *ireq)
+{
+ /* Construct the SSP Task SCU Task Context */
+ scu_ssp_task_request_construct_task_context(ireq);
+
+ /* Fill in the SSP Task IU */
+ sci_task_request_build_ssp_task_iu(ireq);
+
+ sci_change_state(&ireq->sm, SCI_REQ_CONSTRUCTED);
+
+ return SCI_SUCCESS;
+}
+
+static enum sci_status sci_io_request_construct_basic_sata(struct isci_request *ireq)
+{
+ enum sci_status status;
+ bool copy = false;
+ struct sas_task *task = isci_request_access_task(ireq);
+
+ ireq->protocol = SAS_PROTOCOL_STP;
+
+ copy = (task->data_dir == DMA_NONE) ? false : true;
+
+ status = sci_io_request_construct_sata(ireq,
+ task->total_xfer_len,
+ task->data_dir,
+ copy);
+
+ if (status == SCI_SUCCESS)
+ sci_change_state(&ireq->sm, SCI_REQ_CONSTRUCTED);
+
+ return status;
+}
+
+/**
+ * sci_req_tx_bytes - bytes transferred when reply underruns request
+ * @ireq: request that was terminated early
+ */
+#define SCU_TASK_CONTEXT_SRAM 0x200000
+static u32 sci_req_tx_bytes(struct isci_request *ireq)
+{
+ struct isci_host *ihost = ireq->owning_controller;
+ u32 ret_val = 0;
+
+ if (readl(&ihost->smu_registers->address_modifier) == 0) {
+ void __iomem *scu_reg_base = ihost->scu_registers;
+
+ /* get the bytes of data from the Address == BAR1 + 20002Ch + (256*TCi) where
+ * BAR1 is the scu_registers
+ * 0x20002C = 0x200000 + 0x2c
+ * = start of task context SRAM + offset of (type.ssp.data_offset)
+ * TCi is the io_tag of struct sci_request
+ */
+ ret_val = readl(scu_reg_base +
+ (SCU_TASK_CONTEXT_SRAM + offsetof(struct scu_task_context, type.ssp.data_offset)) +
+ ((sizeof(struct scu_task_context)) * ISCI_TAG_TCI(ireq->io_tag)));
+ }
+
+ return ret_val;
+}
+
+enum sci_status sci_request_start(struct isci_request *ireq)
+{
+ enum sci_base_request_states state;
+ struct scu_task_context *tc = ireq->tc;
+ struct isci_host *ihost = ireq->owning_controller;
+
+ state = ireq->sm.current_state_id;
+ if (state != SCI_REQ_CONSTRUCTED) {
+ dev_warn(&ihost->pdev->dev,
+ "%s: SCIC IO Request requested to start while in wrong "
+ "state %d\n", __func__, state);
+ return SCI_FAILURE_INVALID_STATE;
+ }
+
+ tc->task_index = ISCI_TAG_TCI(ireq->io_tag);
+
+ switch (tc->protocol_type) {
+ case SCU_TASK_CONTEXT_PROTOCOL_SMP:
+ case SCU_TASK_CONTEXT_PROTOCOL_SSP:
+ /* SSP/SMP Frame */
+ tc->type.ssp.tag = ireq->io_tag;
+ tc->type.ssp.target_port_transfer_tag = 0xFFFF;
+ break;
+
+ case SCU_TASK_CONTEXT_PROTOCOL_STP:
+ /* STP/SATA Frame
+ * tc->type.stp.ncq_tag = ireq->ncq_tag;
+ */
+ break;
+
+ case SCU_TASK_CONTEXT_PROTOCOL_NONE:
+ /* / @todo When do we set no protocol type? */
+ break;
+
+ default:
+ /* This should never happen since we build the IO
+ * requests */
+ break;
+ }
+
+ /* Add to the post_context the io tag value */
+ ireq->post_context |= ISCI_TAG_TCI(ireq->io_tag);
+
+ /* Everything is good go ahead and change state */
+ sci_change_state(&ireq->sm, SCI_REQ_STARTED);
+
+ return SCI_SUCCESS;
+}
+
+enum sci_status
+sci_io_request_terminate(struct isci_request *ireq)
+{
+ enum sci_base_request_states state;
+
+ state = ireq->sm.current_state_id;
+
+ switch (state) {
+ case SCI_REQ_CONSTRUCTED:
+ /* Set to make sure no HW terminate posting is done: */
+ set_bit(IREQ_TC_ABORT_POSTED, &ireq->flags);
+ ireq->scu_status = SCU_TASK_DONE_TASK_ABORT;
+ ireq->sci_status = SCI_FAILURE_IO_TERMINATED;
+ sci_change_state(&ireq->sm, SCI_REQ_COMPLETED);
+ return SCI_SUCCESS;
+ case SCI_REQ_STARTED:
+ case SCI_REQ_TASK_WAIT_TC_COMP:
+ case SCI_REQ_SMP_WAIT_RESP:
+ case SCI_REQ_SMP_WAIT_TC_COMP:
+ case SCI_REQ_STP_UDMA_WAIT_TC_COMP:
+ case SCI_REQ_STP_UDMA_WAIT_D2H:
+ case SCI_REQ_STP_NON_DATA_WAIT_H2D:
+ case SCI_REQ_STP_NON_DATA_WAIT_D2H:
+ case SCI_REQ_STP_PIO_WAIT_H2D:
+ case SCI_REQ_STP_PIO_WAIT_FRAME:
+ case SCI_REQ_STP_PIO_DATA_IN:
+ case SCI_REQ_STP_PIO_DATA_OUT:
+ case SCI_REQ_ATAPI_WAIT_H2D:
+ case SCI_REQ_ATAPI_WAIT_PIO_SETUP:
+ case SCI_REQ_ATAPI_WAIT_D2H:
+ case SCI_REQ_ATAPI_WAIT_TC_COMP:
+ /* Fall through and change state to ABORTING... */
+ case SCI_REQ_TASK_WAIT_TC_RESP:
+ /* The task frame was already confirmed to have been
+ * sent by the SCU HW. Since the state machine is
+ * now only waiting for the task response itself,
+ * abort the request and complete it immediately
+ * and don't wait for the task response.
+ */
+ sci_change_state(&ireq->sm, SCI_REQ_ABORTING);
+ /* Fall through and handle like ABORTING... */
+ case SCI_REQ_ABORTING:
+ if (!isci_remote_device_is_safe_to_abort(ireq->target_device))
+ set_bit(IREQ_PENDING_ABORT, &ireq->flags);
+ else
+ clear_bit(IREQ_PENDING_ABORT, &ireq->flags);
+ /* If the request is only waiting on the remote device
+ * suspension, return SUCCESS so the caller will wait too.
+ */
+ return SCI_SUCCESS;
+ case SCI_REQ_COMPLETED:
+ default:
+ dev_warn(&ireq->owning_controller->pdev->dev,
+ "%s: SCIC IO Request requested to abort while in wrong "
+ "state %d\n", __func__, ireq->sm.current_state_id);
+ break;
+ }
+
+ return SCI_FAILURE_INVALID_STATE;
+}
+
+enum sci_status sci_request_complete(struct isci_request *ireq)
+{
+ enum sci_base_request_states state;
+ struct isci_host *ihost = ireq->owning_controller;
+
+ state = ireq->sm.current_state_id;
+ if (WARN_ONCE(state != SCI_REQ_COMPLETED,
+ "isci: request completion from wrong state (%s)\n",
+ req_state_name(state)))
+ return SCI_FAILURE_INVALID_STATE;
+
+ if (ireq->saved_rx_frame_index != SCU_INVALID_FRAME_INDEX)
+ sci_controller_release_frame(ihost,
+ ireq->saved_rx_frame_index);
+
+ /* XXX can we just stop the machine and remove the 'final' state? */
+ sci_change_state(&ireq->sm, SCI_REQ_FINAL);
+ return SCI_SUCCESS;
+}
+
+enum sci_status sci_io_request_event_handler(struct isci_request *ireq,
+ u32 event_code)
+{
+ enum sci_base_request_states state;
+ struct isci_host *ihost = ireq->owning_controller;
+
+ state = ireq->sm.current_state_id;
+
+ if (state != SCI_REQ_STP_PIO_DATA_IN) {
+ dev_warn(&ihost->pdev->dev, "%s: (%x) in wrong state %s\n",
+ __func__, event_code, req_state_name(state));
+
+ return SCI_FAILURE_INVALID_STATE;
+ }
+
+ switch (scu_get_event_specifier(event_code)) {
+ case SCU_TASK_DONE_CRC_ERR << SCU_EVENT_SPECIFIC_CODE_SHIFT:
+ /* We are waiting for data and the SCU has R_ERR the data frame.
+ * Go back to waiting for the D2H Register FIS
+ */
+ sci_change_state(&ireq->sm, SCI_REQ_STP_PIO_WAIT_FRAME);
+ return SCI_SUCCESS;
+ default:
+ dev_err(&ihost->pdev->dev,
+ "%s: pio request unexpected event %#x\n",
+ __func__, event_code);
+
+ /* TODO Should we fail the PIO request when we get an
+ * unexpected event?
+ */
+ return SCI_FAILURE;
+ }
+}
+
+/*
+ * This function copies response data for requests returning response data
+ * instead of sense data.
+ * @sci_req: This parameter specifies the request object for which to copy
+ * the response data.
+ */
+static void sci_io_request_copy_response(struct isci_request *ireq)
+{
+ void *resp_buf;
+ u32 len;
+ struct ssp_response_iu *ssp_response;
+ struct isci_tmf *isci_tmf = isci_request_access_tmf(ireq);
+
+ ssp_response = &ireq->ssp.rsp;
+
+ resp_buf = &isci_tmf->resp.resp_iu;
+
+ len = min_t(u32,
+ SSP_RESP_IU_MAX_SIZE,
+ be32_to_cpu(ssp_response->response_data_len));
+
+ memcpy(resp_buf, ssp_response->resp_data, len);
+}
+
+static enum sci_status
+request_started_state_tc_event(struct isci_request *ireq,
+ u32 completion_code)
+{
+ struct ssp_response_iu *resp_iu;
+ u8 datapres;
+
+ /* TODO: Any SDMA return code of other than 0 is bad decode 0x003C0000
+ * to determine SDMA status
+ */
+ switch (SCU_GET_COMPLETION_TL_STATUS(completion_code)) {
+ case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_GOOD):
+ ireq->scu_status = SCU_TASK_DONE_GOOD;
+ ireq->sci_status = SCI_SUCCESS;
+ break;
+ case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_EARLY_RESP): {
+ /* There are times when the SCU hardware will return an early
+ * response because the io request specified more data than is
+ * returned by the target device (mode pages, inquiry data,
+ * etc.). We must check the response stats to see if this is
+ * truly a failed request or a good request that just got
+ * completed early.
+ */
+ struct ssp_response_iu *resp = &ireq->ssp.rsp;
+ ssize_t word_cnt = SSP_RESP_IU_MAX_SIZE / sizeof(u32);
+
+ sci_swab32_cpy(&ireq->ssp.rsp,
+ &ireq->ssp.rsp,
+ word_cnt);
+
+ if (resp->status == 0) {
+ ireq->scu_status = SCU_TASK_DONE_GOOD;
+ ireq->sci_status = SCI_SUCCESS_IO_DONE_EARLY;
+ } else {
+ ireq->scu_status = SCU_TASK_DONE_CHECK_RESPONSE;
+ ireq->sci_status = SCI_FAILURE_IO_RESPONSE_VALID;
+ }
+ break;
+ }
+ case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_CHECK_RESPONSE): {
+ ssize_t word_cnt = SSP_RESP_IU_MAX_SIZE / sizeof(u32);
+
+ sci_swab32_cpy(&ireq->ssp.rsp,
+ &ireq->ssp.rsp,
+ word_cnt);
+
+ ireq->scu_status = SCU_TASK_DONE_CHECK_RESPONSE;
+ ireq->sci_status = SCI_FAILURE_IO_RESPONSE_VALID;
+ break;
+ }
+
+ case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_RESP_LEN_ERR):
+ /* TODO With TASK_DONE_RESP_LEN_ERR is the response frame
+ * guaranteed to be received before this completion status is
+ * posted?
+ */
+ resp_iu = &ireq->ssp.rsp;
+ datapres = resp_iu->datapres;
+
+ if (datapres == 1 || datapres == 2) {
+ ireq->scu_status = SCU_TASK_DONE_CHECK_RESPONSE;
+ ireq->sci_status = SCI_FAILURE_IO_RESPONSE_VALID;
+ } else {
+ ireq->scu_status = SCU_TASK_DONE_GOOD;
+ ireq->sci_status = SCI_SUCCESS;
+ }
+ break;
+ /* only stp device gets suspended. */
+ case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_ACK_NAK_TO):
+ case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_LL_PERR):
+ case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_NAK_ERR):
+ case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_DATA_LEN_ERR):
+ case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_LL_ABORT_ERR):
+ case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_XR_WD_LEN):
+ case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_MAX_PLD_ERR):
+ case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_UNEXP_RESP):
+ case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_UNEXP_SDBFIS):
+ case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_REG_ERR):
+ case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_SDB_ERR):
+ if (ireq->protocol == SAS_PROTOCOL_STP) {
+ ireq->scu_status = SCU_GET_COMPLETION_TL_STATUS(completion_code) >>
+ SCU_COMPLETION_TL_STATUS_SHIFT;
+ ireq->sci_status = SCI_FAILURE_REMOTE_DEVICE_RESET_REQUIRED;
+ } else {
+ ireq->scu_status = SCU_GET_COMPLETION_TL_STATUS(completion_code) >>
+ SCU_COMPLETION_TL_STATUS_SHIFT;
+ ireq->sci_status = SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR;
+ }
+ break;
+
+ /* both stp/ssp device gets suspended */
+ case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_LF_ERR):
+ case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_OPEN_REJECT_WRONG_DESTINATION):
+ case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_OPEN_REJECT_RESERVED_ABANDON_1):
+ case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_OPEN_REJECT_RESERVED_ABANDON_2):
+ case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_OPEN_REJECT_RESERVED_ABANDON_3):
+ case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_OPEN_REJECT_BAD_DESTINATION):
+ case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_OPEN_REJECT_ZONE_VIOLATION):
+ case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_OPEN_REJECT_STP_RESOURCES_BUSY):
+ case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_OPEN_REJECT_PROTOCOL_NOT_SUPPORTED):
+ case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_OPEN_REJECT_CONNECTION_RATE_NOT_SUPPORTED):
+ ireq->scu_status = SCU_GET_COMPLETION_TL_STATUS(completion_code) >>
+ SCU_COMPLETION_TL_STATUS_SHIFT;
+ ireq->sci_status = SCI_FAILURE_REMOTE_DEVICE_RESET_REQUIRED;
+ break;
+
+ /* neither ssp nor stp gets suspended. */
+ case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_NAK_CMD_ERR):
+ case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_UNEXP_XR):
+ case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_XR_IU_LEN_ERR):
+ case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_SDMA_ERR):
+ case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_OFFSET_ERR):
+ case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_EXCESS_DATA):
+ case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_SMP_RESP_TO_ERR):
+ case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_SMP_UFI_ERR):
+ case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_SMP_FRM_TYPE_ERR):
+ case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_SMP_LL_RX_ERR):
+ case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_UNEXP_DATA):
+ case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_OPEN_FAIL):
+ case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_VIIT_ENTRY_NV):
+ case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_IIT_ENTRY_NV):
+ case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_RNCNV_OUTBOUND):
+ default:
+ ireq->scu_status = SCU_GET_COMPLETION_TL_STATUS(completion_code) >>
+ SCU_COMPLETION_TL_STATUS_SHIFT;
+ ireq->sci_status = SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR;
+ break;
+ }
+
+ /*
+ * TODO: This is probably wrong for ACK/NAK timeout conditions
+ */
+
+ /* In all cases we will treat this as the completion of the IO req. */
+ sci_change_state(&ireq->sm, SCI_REQ_COMPLETED);
+ return SCI_SUCCESS;
+}
+
+static enum sci_status
+request_aborting_state_tc_event(struct isci_request *ireq,
+ u32 completion_code)
+{
+ switch (SCU_GET_COMPLETION_TL_STATUS(completion_code)) {
+ case (SCU_TASK_DONE_GOOD << SCU_COMPLETION_TL_STATUS_SHIFT):
+ case (SCU_TASK_DONE_TASK_ABORT << SCU_COMPLETION_TL_STATUS_SHIFT):
+ ireq->scu_status = SCU_TASK_DONE_TASK_ABORT;
+ ireq->sci_status = SCI_FAILURE_IO_TERMINATED;
+ sci_change_state(&ireq->sm, SCI_REQ_COMPLETED);
+ break;
+
+ default:
+ /* Unless we get some strange error wait for the task abort to complete
+ * TODO: Should there be a state change for this completion?
+ */
+ break;
+ }
+
+ return SCI_SUCCESS;
+}
+
+static enum sci_status ssp_task_request_await_tc_event(struct isci_request *ireq,
+ u32 completion_code)
+{
+ switch (SCU_GET_COMPLETION_TL_STATUS(completion_code)) {
+ case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_GOOD):
+ ireq->scu_status = SCU_TASK_DONE_GOOD;
+ ireq->sci_status = SCI_SUCCESS;
+ sci_change_state(&ireq->sm, SCI_REQ_TASK_WAIT_TC_RESP);
+ break;
+ case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_ACK_NAK_TO):
+ /* Currently, the decision is to simply allow the task request
+ * to timeout if the task IU wasn't received successfully.
+ * There is a potential for receiving multiple task responses if
+ * we decide to send the task IU again.
+ */
+ dev_warn(&ireq->owning_controller->pdev->dev,
+ "%s: TaskRequest:0x%p CompletionCode:%x - "
+ "ACK/NAK timeout\n", __func__, ireq,
+ completion_code);
+
+ sci_change_state(&ireq->sm, SCI_REQ_TASK_WAIT_TC_RESP);
+ break;
+ default:
+ /*
+ * All other completion status cause the IO to be complete.
+ * If a NAK was received, then it is up to the user to retry
+ * the request.
+ */
+ ireq->scu_status = SCU_NORMALIZE_COMPLETION_STATUS(completion_code);
+ ireq->sci_status = SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR;
+ sci_change_state(&ireq->sm, SCI_REQ_COMPLETED);
+ break;
+ }
+
+ return SCI_SUCCESS;
+}
+
+static enum sci_status
+smp_request_await_response_tc_event(struct isci_request *ireq,
+ u32 completion_code)
+{
+ switch (SCU_GET_COMPLETION_TL_STATUS(completion_code)) {
+ case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_GOOD):
+ /* In the AWAIT RESPONSE state, any TC completion is
+ * unexpected. but if the TC has success status, we
+ * complete the IO anyway.
+ */
+ ireq->scu_status = SCU_TASK_DONE_GOOD;
+ ireq->sci_status = SCI_SUCCESS;
+ sci_change_state(&ireq->sm, SCI_REQ_COMPLETED);
+ break;
+ case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_SMP_RESP_TO_ERR):
+ case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_SMP_UFI_ERR):
+ case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_SMP_FRM_TYPE_ERR):
+ case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_SMP_LL_RX_ERR):
+ /* These status has been seen in a specific LSI
+ * expander, which sometimes is not able to send smp
+ * response within 2 ms. This causes our hardware break
+ * the connection and set TC completion with one of
+ * these SMP_XXX_XX_ERR status. For these type of error,
+ * we ask ihost user to retry the request.
+ */
+ ireq->scu_status = SCU_TASK_DONE_SMP_RESP_TO_ERR;
+ ireq->sci_status = SCI_FAILURE_RETRY_REQUIRED;
+ sci_change_state(&ireq->sm, SCI_REQ_COMPLETED);
+ break;
+ default:
+ /* All other completion status cause the IO to be complete. If a NAK
+ * was received, then it is up to the user to retry the request
+ */
+ ireq->scu_status = SCU_NORMALIZE_COMPLETION_STATUS(completion_code);
+ ireq->sci_status = SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR;
+ sci_change_state(&ireq->sm, SCI_REQ_COMPLETED);
+ break;
+ }
+
+ return SCI_SUCCESS;
+}
+
+static enum sci_status
+smp_request_await_tc_event(struct isci_request *ireq,
+ u32 completion_code)
+{
+ switch (SCU_GET_COMPLETION_TL_STATUS(completion_code)) {
+ case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_GOOD):
+ ireq->scu_status = SCU_TASK_DONE_GOOD;
+ ireq->sci_status = SCI_SUCCESS;
+ sci_change_state(&ireq->sm, SCI_REQ_COMPLETED);
+ break;
+ default:
+ /* All other completion status cause the IO to be
+ * complete. If a NAK was received, then it is up to
+ * the user to retry the request.
+ */
+ ireq->scu_status = SCU_NORMALIZE_COMPLETION_STATUS(completion_code);
+ ireq->sci_status = SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR;
+ sci_change_state(&ireq->sm, SCI_REQ_COMPLETED);
+ break;
+ }
+
+ return SCI_SUCCESS;
+}
+
+static struct scu_sgl_element *pio_sgl_next(struct isci_stp_request *stp_req)
+{
+ struct scu_sgl_element *sgl;
+ struct scu_sgl_element_pair *sgl_pair;
+ struct isci_request *ireq = to_ireq(stp_req);
+ struct isci_stp_pio_sgl *pio_sgl = &stp_req->sgl;
+
+ sgl_pair = to_sgl_element_pair(ireq, pio_sgl->index);
+ if (!sgl_pair)
+ sgl = NULL;
+ else if (pio_sgl->set == SCU_SGL_ELEMENT_PAIR_A) {
+ if (sgl_pair->B.address_lower == 0 &&
+ sgl_pair->B.address_upper == 0) {
+ sgl = NULL;
+ } else {
+ pio_sgl->set = SCU_SGL_ELEMENT_PAIR_B;
+ sgl = &sgl_pair->B;
+ }
+ } else {
+ if (sgl_pair->next_pair_lower == 0 &&
+ sgl_pair->next_pair_upper == 0) {
+ sgl = NULL;
+ } else {
+ pio_sgl->index++;
+ pio_sgl->set = SCU_SGL_ELEMENT_PAIR_A;
+ sgl_pair = to_sgl_element_pair(ireq, pio_sgl->index);
+ sgl = &sgl_pair->A;
+ }
+ }
+
+ return sgl;
+}
+
+static enum sci_status
+stp_request_non_data_await_h2d_tc_event(struct isci_request *ireq,
+ u32 completion_code)
+{
+ switch (SCU_GET_COMPLETION_TL_STATUS(completion_code)) {
+ case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_GOOD):
+ ireq->scu_status = SCU_TASK_DONE_GOOD;
+ ireq->sci_status = SCI_SUCCESS;
+ sci_change_state(&ireq->sm, SCI_REQ_STP_NON_DATA_WAIT_D2H);
+ break;
+
+ default:
+ /* All other completion status cause the IO to be
+ * complete. If a NAK was received, then it is up to
+ * the user to retry the request.
+ */
+ ireq->scu_status = SCU_NORMALIZE_COMPLETION_STATUS(completion_code);
+ ireq->sci_status = SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR;
+ sci_change_state(&ireq->sm, SCI_REQ_COMPLETED);
+ break;
+ }
+
+ return SCI_SUCCESS;
+}
+
+#define SCU_MAX_FRAME_BUFFER_SIZE 0x400 /* 1K is the maximum SCU frame data payload */
+
+/* transmit DATA_FIS from (current sgl + offset) for input
+ * parameter length. current sgl and offset is alreay stored in the IO request
+ */
+static enum sci_status sci_stp_request_pio_data_out_trasmit_data_frame(
+ struct isci_request *ireq,
+ u32 length)
+{
+ struct isci_stp_request *stp_req = &ireq->stp.req;
+ struct scu_task_context *task_context = ireq->tc;
+ struct scu_sgl_element_pair *sgl_pair;
+ struct scu_sgl_element *current_sgl;
+
+ /* Recycle the TC and reconstruct it for sending out DATA FIS containing
+ * for the data from current_sgl+offset for the input length
+ */
+ sgl_pair = to_sgl_element_pair(ireq, stp_req->sgl.index);
+ if (stp_req->sgl.set == SCU_SGL_ELEMENT_PAIR_A)
+ current_sgl = &sgl_pair->A;
+ else
+ current_sgl = &sgl_pair->B;
+
+ /* update the TC */
+ task_context->command_iu_upper = current_sgl->address_upper;
+ task_context->command_iu_lower = current_sgl->address_lower;
+ task_context->transfer_length_bytes = length;
+ task_context->type.stp.fis_type = FIS_DATA;
+
+ /* send the new TC out. */
+ return sci_controller_continue_io(ireq);
+}
+
+static enum sci_status sci_stp_request_pio_data_out_transmit_data(struct isci_request *ireq)
+{
+ struct isci_stp_request *stp_req = &ireq->stp.req;
+ struct scu_sgl_element_pair *sgl_pair;
+ enum sci_status status = SCI_SUCCESS;
+ struct scu_sgl_element *sgl;
+ u32 offset;
+ u32 len = 0;
+
+ offset = stp_req->sgl.offset;
+ sgl_pair = to_sgl_element_pair(ireq, stp_req->sgl.index);
+ if (WARN_ONCE(!sgl_pair, "%s: null sgl element", __func__))
+ return SCI_FAILURE;
+
+ if (stp_req->sgl.set == SCU_SGL_ELEMENT_PAIR_A) {
+ sgl = &sgl_pair->A;
+ len = sgl_pair->A.length - offset;
+ } else {
+ sgl = &sgl_pair->B;
+ len = sgl_pair->B.length - offset;
+ }
+
+ if (stp_req->pio_len == 0)
+ return SCI_SUCCESS;
+
+ if (stp_req->pio_len >= len) {
+ status = sci_stp_request_pio_data_out_trasmit_data_frame(ireq, len);
+ if (status != SCI_SUCCESS)
+ return status;
+ stp_req->pio_len -= len;
+
+ /* update the current sgl, offset and save for future */
+ sgl = pio_sgl_next(stp_req);
+ offset = 0;
+ } else if (stp_req->pio_len < len) {
+ sci_stp_request_pio_data_out_trasmit_data_frame(ireq, stp_req->pio_len);
+
+ /* Sgl offset will be adjusted and saved for future */
+ offset += stp_req->pio_len;
+ sgl->address_lower += stp_req->pio_len;
+ stp_req->pio_len = 0;
+ }
+
+ stp_req->sgl.offset = offset;
+
+ return status;
+}
+
+/**
+ *
+ * @stp_request: The request that is used for the SGL processing.
+ * @data_buffer: The buffer of data to be copied.
+ * @length: The length of the data transfer.
+ *
+ * Copy the data from the buffer for the length specified to the IO reqeust SGL
+ * specified data region. enum sci_status
+ */
+static enum sci_status
+sci_stp_request_pio_data_in_copy_data_buffer(struct isci_stp_request *stp_req,
+ u8 *data_buf, u32 len)
+{
+ struct isci_request *ireq;
+ u8 *src_addr;
+ int copy_len;
+ struct sas_task *task;
+ struct scatterlist *sg;
+ void *kaddr;
+ int total_len = len;
+
+ ireq = to_ireq(stp_req);
+ task = isci_request_access_task(ireq);
+ src_addr = data_buf;
+
+ if (task->num_scatter > 0) {
+ sg = task->scatter;
+
+ while (total_len > 0) {
+ struct page *page = sg_page(sg);
+
+ copy_len = min_t(int, total_len, sg_dma_len(sg));
+ kaddr = kmap_atomic(page);
+ memcpy(kaddr + sg->offset, src_addr, copy_len);
+ kunmap_atomic(kaddr);
+ total_len -= copy_len;
+ src_addr += copy_len;
+ sg = sg_next(sg);
+ }
+ } else {
+ BUG_ON(task->total_xfer_len < total_len);
+ memcpy(task->scatter, src_addr, total_len);
+ }
+
+ return SCI_SUCCESS;
+}
+
+/**
+ *
+ * @sci_req: The PIO DATA IN request that is to receive the data.
+ * @data_buffer: The buffer to copy from.
+ *
+ * Copy the data buffer to the io request data region. enum sci_status
+ */
+static enum sci_status sci_stp_request_pio_data_in_copy_data(
+ struct isci_stp_request *stp_req,
+ u8 *data_buffer)
+{
+ enum sci_status status;
+
+ /*
+ * If there is less than 1K remaining in the transfer request
+ * copy just the data for the transfer */
+ if (stp_req->pio_len < SCU_MAX_FRAME_BUFFER_SIZE) {
+ status = sci_stp_request_pio_data_in_copy_data_buffer(
+ stp_req, data_buffer, stp_req->pio_len);
+
+ if (status == SCI_SUCCESS)
+ stp_req->pio_len = 0;
+ } else {
+ /* We are transfering the whole frame so copy */
+ status = sci_stp_request_pio_data_in_copy_data_buffer(
+ stp_req, data_buffer, SCU_MAX_FRAME_BUFFER_SIZE);
+
+ if (status == SCI_SUCCESS)
+ stp_req->pio_len -= SCU_MAX_FRAME_BUFFER_SIZE;
+ }
+
+ return status;
+}
+
+static enum sci_status
+stp_request_pio_await_h2d_completion_tc_event(struct isci_request *ireq,
+ u32 completion_code)
+{
+ enum sci_status status = SCI_SUCCESS;
+
+ switch (SCU_GET_COMPLETION_TL_STATUS(completion_code)) {
+ case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_GOOD):
+ ireq->scu_status = SCU_TASK_DONE_GOOD;
+ ireq->sci_status = SCI_SUCCESS;
+ sci_change_state(&ireq->sm, SCI_REQ_STP_PIO_WAIT_FRAME);
+ break;
+
+ default:
+ /* All other completion status cause the IO to be
+ * complete. If a NAK was received, then it is up to
+ * the user to retry the request.
+ */
+ ireq->scu_status = SCU_NORMALIZE_COMPLETION_STATUS(completion_code);
+ ireq->sci_status = SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR;
+ sci_change_state(&ireq->sm, SCI_REQ_COMPLETED);
+ break;
+ }
+
+ return status;
+}
+
+static enum sci_status
+pio_data_out_tx_done_tc_event(struct isci_request *ireq,
+ u32 completion_code)
+{
+ enum sci_status status = SCI_SUCCESS;
+ bool all_frames_transferred = false;
+ struct isci_stp_request *stp_req = &ireq->stp.req;
+
+ switch (SCU_GET_COMPLETION_TL_STATUS(completion_code)) {
+ case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_GOOD):
+ /* Transmit data */
+ if (stp_req->pio_len != 0) {
+ status = sci_stp_request_pio_data_out_transmit_data(ireq);
+ if (status == SCI_SUCCESS) {
+ if (stp_req->pio_len == 0)
+ all_frames_transferred = true;
+ }
+ } else if (stp_req->pio_len == 0) {
+ /*
+ * this will happen if the all data is written at the
+ * first time after the pio setup fis is received
+ */
+ all_frames_transferred = true;
+ }
+
+ /* all data transferred. */
+ if (all_frames_transferred) {
+ /*
+ * Change the state to SCI_REQ_STP_PIO_DATA_IN
+ * and wait for PIO_SETUP fis / or D2H REg fis. */
+ sci_change_state(&ireq->sm, SCI_REQ_STP_PIO_WAIT_FRAME);
+ }
+ break;
+
+ default:
+ /*
+ * All other completion status cause the IO to be complete.
+ * If a NAK was received, then it is up to the user to retry
+ * the request.
+ */
+ ireq->scu_status = SCU_NORMALIZE_COMPLETION_STATUS(completion_code);
+ ireq->sci_status = SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR;
+ sci_change_state(&ireq->sm, SCI_REQ_COMPLETED);
+ break;
+ }
+
+ return status;
+}
+
+static enum sci_status sci_stp_request_udma_general_frame_handler(struct isci_request *ireq,
+ u32 frame_index)
+{
+ struct isci_host *ihost = ireq->owning_controller;
+ struct dev_to_host_fis *frame_header;
+ enum sci_status status;
+ u32 *frame_buffer;
+
+ status = sci_unsolicited_frame_control_get_header(&ihost->uf_control,
+ frame_index,
+ (void **)&frame_header);
+
+ if ((status == SCI_SUCCESS) &&
+ (frame_header->fis_type == FIS_REGD2H)) {
+ sci_unsolicited_frame_control_get_buffer(&ihost->uf_control,
+ frame_index,
+ (void **)&frame_buffer);
+
+ sci_controller_copy_sata_response(&ireq->stp.rsp,
+ frame_header,
+ frame_buffer);
+ }
+
+ sci_controller_release_frame(ihost, frame_index);
+
+ return status;
+}
+
+static enum sci_status process_unsolicited_fis(struct isci_request *ireq,
+ u32 frame_index)
+{
+ struct isci_host *ihost = ireq->owning_controller;
+ enum sci_status status;
+ struct dev_to_host_fis *frame_header;
+ u32 *frame_buffer;
+
+ status = sci_unsolicited_frame_control_get_header(&ihost->uf_control,
+ frame_index,
+ (void **)&frame_header);
+
+ if (status != SCI_SUCCESS)
+ return status;
+
+ if (frame_header->fis_type != FIS_REGD2H) {
+ dev_err(&ireq->isci_host->pdev->dev,
+ "%s ERROR: invalid fis type 0x%X\n",
+ __func__, frame_header->fis_type);
+ return SCI_FAILURE;
+ }
+
+ sci_unsolicited_frame_control_get_buffer(&ihost->uf_control,
+ frame_index,
+ (void **)&frame_buffer);
+
+ sci_controller_copy_sata_response(&ireq->stp.rsp,
+ (u32 *)frame_header,
+ frame_buffer);
+
+ /* Frame has been decoded return it to the controller */
+ sci_controller_release_frame(ihost, frame_index);
+
+ return status;
+}
+
+static enum sci_status atapi_d2h_reg_frame_handler(struct isci_request *ireq,
+ u32 frame_index)
+{
+ struct sas_task *task = isci_request_access_task(ireq);
+ enum sci_status status;
+
+ status = process_unsolicited_fis(ireq, frame_index);
+
+ if (status == SCI_SUCCESS) {
+ if (ireq->stp.rsp.status & ATA_ERR)
+ status = SCI_IO_FAILURE_RESPONSE_VALID;
+ } else {
+ status = SCI_IO_FAILURE_RESPONSE_VALID;
+ }
+
+ if (status != SCI_SUCCESS) {
+ ireq->scu_status = SCU_TASK_DONE_CHECK_RESPONSE;
+ ireq->sci_status = status;
+ } else {
+ ireq->scu_status = SCU_TASK_DONE_GOOD;
+ ireq->sci_status = SCI_SUCCESS;
+ }
+
+ /* the d2h ufi is the end of non-data commands */
+ if (task->data_dir == DMA_NONE)
+ sci_change_state(&ireq->sm, SCI_REQ_COMPLETED);
+
+ return status;
+}
+
+static void scu_atapi_reconstruct_raw_frame_task_context(struct isci_request *ireq)
+{
+ struct ata_device *dev = sas_to_ata_dev(ireq->target_device->domain_dev);
+ void *atapi_cdb = ireq->ttype_ptr.io_task_ptr->ata_task.atapi_packet;
+ struct scu_task_context *task_context = ireq->tc;
+
+ /* fill in the SCU Task Context for a DATA fis containing CDB in Raw Frame
+ * type. The TC for previous Packet fis was already there, we only need to
+ * change the H2D fis content.
+ */
+ memset(&ireq->stp.cmd, 0, sizeof(struct host_to_dev_fis));
+ memcpy(((u8 *)&ireq->stp.cmd + sizeof(u32)), atapi_cdb, ATAPI_CDB_LEN);
+ memset(&(task_context->type.stp), 0, sizeof(struct stp_task_context));
+ task_context->type.stp.fis_type = FIS_DATA;
+ task_context->transfer_length_bytes = dev->cdb_len;
+}
+
+static void scu_atapi_construct_task_context(struct isci_request *ireq)
+{
+ struct ata_device *dev = sas_to_ata_dev(ireq->target_device->domain_dev);
+ struct sas_task *task = isci_request_access_task(ireq);
+ struct scu_task_context *task_context = ireq->tc;
+ int cdb_len = dev->cdb_len;
+
+ /* reference: SSTL 1.13.4.2
+ * task_type, sata_direction
+ */
+ if (task->data_dir == DMA_TO_DEVICE) {
+ task_context->task_type = SCU_TASK_TYPE_PACKET_DMA_OUT;
+ task_context->sata_direction = 0;
+ } else {
+ /* todo: for NO_DATA command, we need to send out raw frame. */
+ task_context->task_type = SCU_TASK_TYPE_PACKET_DMA_IN;
+ task_context->sata_direction = 1;
+ }
+
+ memset(&task_context->type.stp, 0, sizeof(task_context->type.stp));
+ task_context->type.stp.fis_type = FIS_DATA;
+
+ memset(&ireq->stp.cmd, 0, sizeof(ireq->stp.cmd));
+ memcpy(&ireq->stp.cmd.lbal, task->ata_task.atapi_packet, cdb_len);
+ task_context->ssp_command_iu_length = cdb_len / sizeof(u32);
+
+ /* task phase is set to TX_CMD */
+ task_context->task_phase = 0x1;
+
+ /* retry counter */
+ task_context->stp_retry_count = 0;
+
+ /* data transfer size. */
+ task_context->transfer_length_bytes = task->total_xfer_len;
+
+ /* setup sgl */
+ sci_request_build_sgl(ireq);
+}
+
+enum sci_status
+sci_io_request_frame_handler(struct isci_request *ireq,
+ u32 frame_index)
+{
+ struct isci_host *ihost = ireq->owning_controller;
+ struct isci_stp_request *stp_req = &ireq->stp.req;
+ enum sci_base_request_states state;
+ enum sci_status status;
+ ssize_t word_cnt;
+
+ state = ireq->sm.current_state_id;
+ switch (state) {
+ case SCI_REQ_STARTED: {
+ struct ssp_frame_hdr ssp_hdr;
+ void *frame_header;
+
+ sci_unsolicited_frame_control_get_header(&ihost->uf_control,
+ frame_index,
+ &frame_header);
+
+ word_cnt = sizeof(struct ssp_frame_hdr) / sizeof(u32);
+ sci_swab32_cpy(&ssp_hdr, frame_header, word_cnt);
+
+ if (ssp_hdr.frame_type == SSP_RESPONSE) {
+ struct ssp_response_iu *resp_iu;
+ ssize_t word_cnt = SSP_RESP_IU_MAX_SIZE / sizeof(u32);
+
+ sci_unsolicited_frame_control_get_buffer(&ihost->uf_control,
+ frame_index,
+ (void **)&resp_iu);
+
+ sci_swab32_cpy(&ireq->ssp.rsp, resp_iu, word_cnt);
+
+ resp_iu = &ireq->ssp.rsp;
+
+ if (resp_iu->datapres == 0x01 ||
+ resp_iu->datapres == 0x02) {
+ ireq->scu_status = SCU_TASK_DONE_CHECK_RESPONSE;
+ ireq->sci_status = SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR;
+ } else {
+ ireq->scu_status = SCU_TASK_DONE_GOOD;
+ ireq->sci_status = SCI_SUCCESS;
+ }
+ } else {
+ /* not a response frame, why did it get forwarded? */
+ dev_err(&ihost->pdev->dev,
+ "%s: SCIC IO Request 0x%p received unexpected "
+ "frame %d type 0x%02x\n", __func__, ireq,
+ frame_index, ssp_hdr.frame_type);
+ }
+
+ /*
+ * In any case we are done with this frame buffer return it to
+ * the controller
+ */
+ sci_controller_release_frame(ihost, frame_index);
+
+ return SCI_SUCCESS;
+ }
+
+ case SCI_REQ_TASK_WAIT_TC_RESP:
+ sci_io_request_copy_response(ireq);
+ sci_change_state(&ireq->sm, SCI_REQ_COMPLETED);
+ sci_controller_release_frame(ihost, frame_index);
+ return SCI_SUCCESS;
+
+ case SCI_REQ_SMP_WAIT_RESP: {
+ struct sas_task *task = isci_request_access_task(ireq);
+ struct scatterlist *sg = &task->smp_task.smp_resp;
+ void *frame_header, *kaddr;
+ u8 *rsp;
+
+ sci_unsolicited_frame_control_get_header(&ihost->uf_control,
+ frame_index,
+ &frame_header);
+ kaddr = kmap_atomic(sg_page(sg));
+ rsp = kaddr + sg->offset;
+ sci_swab32_cpy(rsp, frame_header, 1);
+
+ if (rsp[0] == SMP_RESPONSE) {
+ void *smp_resp;
+
+ sci_unsolicited_frame_control_get_buffer(&ihost->uf_control,
+ frame_index,
+ &smp_resp);
+
+ word_cnt = (sg->length/4)-1;
+ if (word_cnt > 0)
+ word_cnt = min_t(unsigned int, word_cnt,
+ SCU_UNSOLICITED_FRAME_BUFFER_SIZE/4);
+ sci_swab32_cpy(rsp + 4, smp_resp, word_cnt);
+
+ ireq->scu_status = SCU_TASK_DONE_GOOD;
+ ireq->sci_status = SCI_SUCCESS;
+ sci_change_state(&ireq->sm, SCI_REQ_SMP_WAIT_TC_COMP);
+ } else {
+ /*
+ * This was not a response frame why did it get
+ * forwarded?
+ */
+ dev_err(&ihost->pdev->dev,
+ "%s: SCIC SMP Request 0x%p received unexpected "
+ "frame %d type 0x%02x\n",
+ __func__,
+ ireq,
+ frame_index,
+ rsp[0]);
+
+ ireq->scu_status = SCU_TASK_DONE_SMP_FRM_TYPE_ERR;
+ ireq->sci_status = SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR;
+ sci_change_state(&ireq->sm, SCI_REQ_COMPLETED);
+ }
+ kunmap_atomic(kaddr);
+
+ sci_controller_release_frame(ihost, frame_index);
+
+ return SCI_SUCCESS;
+ }
+
+ case SCI_REQ_STP_UDMA_WAIT_TC_COMP:
+ return sci_stp_request_udma_general_frame_handler(ireq,
+ frame_index);
+
+ case SCI_REQ_STP_UDMA_WAIT_D2H:
+ /* Use the general frame handler to copy the resposne data */
+ status = sci_stp_request_udma_general_frame_handler(ireq, frame_index);
+
+ if (status != SCI_SUCCESS)
+ return status;
+
+ ireq->scu_status = SCU_TASK_DONE_CHECK_RESPONSE;
+ ireq->sci_status = SCI_FAILURE_IO_RESPONSE_VALID;
+ sci_change_state(&ireq->sm, SCI_REQ_COMPLETED);
+ return SCI_SUCCESS;
+
+ case SCI_REQ_STP_NON_DATA_WAIT_D2H: {
+ struct dev_to_host_fis *frame_header;
+ u32 *frame_buffer;
+
+ status = sci_unsolicited_frame_control_get_header(&ihost->uf_control,
+ frame_index,
+ (void **)&frame_header);
+
+ if (status != SCI_SUCCESS) {
+ dev_err(&ihost->pdev->dev,
+ "%s: SCIC IO Request 0x%p could not get frame "
+ "header for frame index %d, status %x\n",
+ __func__,
+ stp_req,
+ frame_index,
+ status);
+
+ return status;
+ }
+
+ switch (frame_header->fis_type) {
+ case FIS_REGD2H:
+ sci_unsolicited_frame_control_get_buffer(&ihost->uf_control,
+ frame_index,
+ (void **)&frame_buffer);
+
+ sci_controller_copy_sata_response(&ireq->stp.rsp,
+ frame_header,
+ frame_buffer);
+
+ /* The command has completed with error */
+ ireq->scu_status = SCU_TASK_DONE_CHECK_RESPONSE;
+ ireq->sci_status = SCI_FAILURE_IO_RESPONSE_VALID;
+ break;
+
+ default:
+ dev_warn(&ihost->pdev->dev,
+ "%s: IO Request:0x%p Frame Id:%d protocol "
+ "violation occurred\n", __func__, stp_req,
+ frame_index);
+
+ ireq->scu_status = SCU_TASK_DONE_UNEXP_FIS;
+ ireq->sci_status = SCI_FAILURE_PROTOCOL_VIOLATION;
+ break;
+ }
+
+ sci_change_state(&ireq->sm, SCI_REQ_COMPLETED);
+
+ /* Frame has been decoded return it to the controller */
+ sci_controller_release_frame(ihost, frame_index);
+
+ return status;
+ }
+
+ case SCI_REQ_STP_PIO_WAIT_FRAME: {
+ struct sas_task *task = isci_request_access_task(ireq);
+ struct dev_to_host_fis *frame_header;
+ u32 *frame_buffer;
+
+ status = sci_unsolicited_frame_control_get_header(&ihost->uf_control,
+ frame_index,
+ (void **)&frame_header);
+
+ if (status != SCI_SUCCESS) {
+ dev_err(&ihost->pdev->dev,
+ "%s: SCIC IO Request 0x%p could not get frame "
+ "header for frame index %d, status %x\n",
+ __func__, stp_req, frame_index, status);
+ return status;
+ }
+
+ switch (frame_header->fis_type) {
+ case FIS_PIO_SETUP:
+ /* Get from the frame buffer the PIO Setup Data */
+ sci_unsolicited_frame_control_get_buffer(&ihost->uf_control,
+ frame_index,
+ (void **)&frame_buffer);
+
+ /* Get the data from the PIO Setup The SCU Hardware
+ * returns first word in the frame_header and the rest
+ * of the data is in the frame buffer so we need to
+ * back up one dword
+ */
+
+ /* transfer_count: first 16bits in the 4th dword */
+ stp_req->pio_len = frame_buffer[3] & 0xffff;
+
+ /* status: 4th byte in the 3rd dword */
+ stp_req->status = (frame_buffer[2] >> 24) & 0xff;
+
+ sci_controller_copy_sata_response(&ireq->stp.rsp,
+ frame_header,
+ frame_buffer);
+
+ ireq->stp.rsp.status = stp_req->status;
+
+ /* The next state is dependent on whether the
+ * request was PIO Data-in or Data out
+ */
+ if (task->data_dir == DMA_FROM_DEVICE) {
+ sci_change_state(&ireq->sm, SCI_REQ_STP_PIO_DATA_IN);
+ } else if (task->data_dir == DMA_TO_DEVICE) {
+ /* Transmit data */
+ status = sci_stp_request_pio_data_out_transmit_data(ireq);
+ if (status != SCI_SUCCESS)
+ break;
+ sci_change_state(&ireq->sm, SCI_REQ_STP_PIO_DATA_OUT);
+ }
+ break;
+
+ case FIS_SETDEVBITS:
+ sci_change_state(&ireq->sm, SCI_REQ_STP_PIO_WAIT_FRAME);
+ break;
+
+ case FIS_REGD2H:
+ if (frame_header->status & ATA_BUSY) {
+ /*
+ * Now why is the drive sending a D2H Register
+ * FIS when it is still busy? Do nothing since
+ * we are still in the right state.
+ */
+ dev_dbg(&ihost->pdev->dev,
+ "%s: SCIC PIO Request 0x%p received "
+ "D2H Register FIS with BSY status "
+ "0x%x\n",
+ __func__,
+ stp_req,
+ frame_header->status);
+ break;
+ }
+
+ sci_unsolicited_frame_control_get_buffer(&ihost->uf_control,
+ frame_index,
+ (void **)&frame_buffer);
+
+ sci_controller_copy_sata_response(&ireq->stp.rsp,
+ frame_header,
+ frame_buffer);
+
+ ireq->scu_status = SCU_TASK_DONE_CHECK_RESPONSE;
+ ireq->sci_status = SCI_FAILURE_IO_RESPONSE_VALID;
+ sci_change_state(&ireq->sm, SCI_REQ_COMPLETED);
+ break;
+
+ default:
+ /* FIXME: what do we do here? */
+ break;
+ }
+
+ /* Frame is decoded return it to the controller */
+ sci_controller_release_frame(ihost, frame_index);
+
+ return status;
+ }
+
+ case SCI_REQ_STP_PIO_DATA_IN: {
+ struct dev_to_host_fis *frame_header;
+ struct sata_fis_data *frame_buffer;
+
+ status = sci_unsolicited_frame_control_get_header(&ihost->uf_control,
+ frame_index,
+ (void **)&frame_header);
+
+ if (status != SCI_SUCCESS) {
+ dev_err(&ihost->pdev->dev,
+ "%s: SCIC IO Request 0x%p could not get frame "
+ "header for frame index %d, status %x\n",
+ __func__,
+ stp_req,
+ frame_index,
+ status);
+ return status;
+ }
+
+ if (frame_header->fis_type != FIS_DATA) {
+ dev_err(&ihost->pdev->dev,
+ "%s: SCIC PIO Request 0x%p received frame %d "
+ "with fis type 0x%02x when expecting a data "
+ "fis.\n",
+ __func__,
+ stp_req,
+ frame_index,
+ frame_header->fis_type);
+
+ ireq->scu_status = SCU_TASK_DONE_GOOD;
+ ireq->sci_status = SCI_FAILURE_IO_REQUIRES_SCSI_ABORT;
+ sci_change_state(&ireq->sm, SCI_REQ_COMPLETED);
+
+ /* Frame is decoded return it to the controller */
+ sci_controller_release_frame(ihost, frame_index);
+ return status;
+ }
+
+ if (stp_req->sgl.index < 0) {
+ ireq->saved_rx_frame_index = frame_index;
+ stp_req->pio_len = 0;
+ } else {
+ sci_unsolicited_frame_control_get_buffer(&ihost->uf_control,
+ frame_index,
+ (void **)&frame_buffer);
+
+ status = sci_stp_request_pio_data_in_copy_data(stp_req,
+ (u8 *)frame_buffer);
+
+ /* Frame is decoded return it to the controller */
+ sci_controller_release_frame(ihost, frame_index);
+ }
+
+ /* Check for the end of the transfer, are there more
+ * bytes remaining for this data transfer
+ */
+ if (status != SCI_SUCCESS || stp_req->pio_len != 0)
+ return status;
+
+ if ((stp_req->status & ATA_BUSY) == 0) {
+ ireq->scu_status = SCU_TASK_DONE_CHECK_RESPONSE;
+ ireq->sci_status = SCI_FAILURE_IO_RESPONSE_VALID;
+ sci_change_state(&ireq->sm, SCI_REQ_COMPLETED);
+ } else {
+ sci_change_state(&ireq->sm, SCI_REQ_STP_PIO_WAIT_FRAME);
+ }
+ return status;
+ }
+
+ case SCI_REQ_ATAPI_WAIT_PIO_SETUP: {
+ struct sas_task *task = isci_request_access_task(ireq);
+
+ sci_controller_release_frame(ihost, frame_index);
+ ireq->target_device->working_request = ireq;
+ if (task->data_dir == DMA_NONE) {
+ sci_change_state(&ireq->sm, SCI_REQ_ATAPI_WAIT_TC_COMP);
+ scu_atapi_reconstruct_raw_frame_task_context(ireq);
+ } else {
+ sci_change_state(&ireq->sm, SCI_REQ_ATAPI_WAIT_D2H);
+ scu_atapi_construct_task_context(ireq);
+ }
+
+ sci_controller_continue_io(ireq);
+ return SCI_SUCCESS;
+ }
+ case SCI_REQ_ATAPI_WAIT_D2H:
+ return atapi_d2h_reg_frame_handler(ireq, frame_index);
+ case SCI_REQ_ABORTING:
+ /*
+ * TODO: Is it even possible to get an unsolicited frame in the
+ * aborting state?
+ */
+ sci_controller_release_frame(ihost, frame_index);
+ return SCI_SUCCESS;
+
+ default:
+ dev_warn(&ihost->pdev->dev,
+ "%s: SCIC IO Request given unexpected frame %x while "
+ "in state %d\n",
+ __func__,
+ frame_index,
+ state);
+
+ sci_controller_release_frame(ihost, frame_index);
+ return SCI_FAILURE_INVALID_STATE;
+ }
+}
+
+static enum sci_status stp_request_udma_await_tc_event(struct isci_request *ireq,
+ u32 completion_code)
+{
+ enum sci_status status = SCI_SUCCESS;
+
+ switch (SCU_GET_COMPLETION_TL_STATUS(completion_code)) {
+ case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_GOOD):
+ ireq->scu_status = SCU_TASK_DONE_GOOD;
+ ireq->sci_status = SCI_SUCCESS;
+ sci_change_state(&ireq->sm, SCI_REQ_COMPLETED);
+ break;
+ case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_UNEXP_FIS):
+ case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_REG_ERR):
+ /* We must check ther response buffer to see if the D2H
+ * Register FIS was received before we got the TC
+ * completion.
+ */
+ if (ireq->stp.rsp.fis_type == FIS_REGD2H) {
+ sci_remote_device_suspend(ireq->target_device,
+ SCI_SW_SUSPEND_NORMAL);
+
+ ireq->scu_status = SCU_TASK_DONE_CHECK_RESPONSE;
+ ireq->sci_status = SCI_FAILURE_IO_RESPONSE_VALID;
+ sci_change_state(&ireq->sm, SCI_REQ_COMPLETED);
+ } else {
+ /* If we have an error completion status for the
+ * TC then we can expect a D2H register FIS from
+ * the device so we must change state to wait
+ * for it
+ */
+ sci_change_state(&ireq->sm, SCI_REQ_STP_UDMA_WAIT_D2H);
+ }
+ break;
+
+ /* TODO Check to see if any of these completion status need to
+ * wait for the device to host register fis.
+ */
+ /* TODO We can retry the command for SCU_TASK_DONE_CMD_LL_R_ERR
+ * - this comes only for B0
+ */
+ default:
+ /* All other completion status cause the IO to be complete. */
+ ireq->scu_status = SCU_NORMALIZE_COMPLETION_STATUS(completion_code);
+ ireq->sci_status = SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR;
+ sci_change_state(&ireq->sm, SCI_REQ_COMPLETED);
+ break;
+ }
+
+ return status;
+}
+
+static enum sci_status atapi_raw_completion(struct isci_request *ireq, u32 completion_code,
+ enum sci_base_request_states next)
+{
+ enum sci_status status = SCI_SUCCESS;
+
+ switch (SCU_GET_COMPLETION_TL_STATUS(completion_code)) {
+ case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_GOOD):
+ ireq->scu_status = SCU_TASK_DONE_GOOD;
+ ireq->sci_status = SCI_SUCCESS;
+ sci_change_state(&ireq->sm, next);
+ break;
+ default:
+ /* All other completion status cause the IO to be complete.
+ * If a NAK was received, then it is up to the user to retry
+ * the request.
+ */
+ ireq->scu_status = SCU_NORMALIZE_COMPLETION_STATUS(completion_code);
+ ireq->sci_status = SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR;
+
+ sci_change_state(&ireq->sm, SCI_REQ_COMPLETED);
+ break;
+ }
+
+ return status;
+}
+
+static enum sci_status atapi_data_tc_completion_handler(struct isci_request *ireq,
+ u32 completion_code)
+{
+ struct isci_remote_device *idev = ireq->target_device;
+ struct dev_to_host_fis *d2h = &ireq->stp.rsp;
+ enum sci_status status = SCI_SUCCESS;
+
+ switch (SCU_GET_COMPLETION_TL_STATUS(completion_code)) {
+ case (SCU_TASK_DONE_GOOD << SCU_COMPLETION_TL_STATUS_SHIFT):
+ sci_change_state(&ireq->sm, SCI_REQ_COMPLETED);
+ break;
+
+ case (SCU_TASK_DONE_UNEXP_FIS << SCU_COMPLETION_TL_STATUS_SHIFT): {
+ u16 len = sci_req_tx_bytes(ireq);
+
+ /* likely non-error data underrrun, workaround missing
+ * d2h frame from the controller
+ */
+ if (d2h->fis_type != FIS_REGD2H) {
+ d2h->fis_type = FIS_REGD2H;
+ d2h->flags = (1 << 6);
+ d2h->status = 0x50;
+ d2h->error = 0;
+ d2h->lbal = 0;
+ d2h->byte_count_low = len & 0xff;
+ d2h->byte_count_high = len >> 8;
+ d2h->device = 0xa0;
+ d2h->lbal_exp = 0;
+ d2h->lbam_exp = 0;
+ d2h->lbah_exp = 0;
+ d2h->_r_a = 0;
+ d2h->sector_count = 0x3;
+ d2h->sector_count_exp = 0;
+ d2h->_r_b = 0;
+ d2h->_r_c = 0;
+ d2h->_r_d = 0;
+ }
+
+ ireq->scu_status = SCU_TASK_DONE_GOOD;
+ ireq->sci_status = SCI_SUCCESS_IO_DONE_EARLY;
+ status = ireq->sci_status;
+
+ /* the hw will have suspended the rnc, so complete the
+ * request upon pending resume
+ */
+ sci_change_state(&idev->sm, SCI_STP_DEV_ATAPI_ERROR);
+ break;
+ }
+ case (SCU_TASK_DONE_EXCESS_DATA << SCU_COMPLETION_TL_STATUS_SHIFT):
+ /* In this case, there is no UF coming after.
+ * compelte the IO now.
+ */
+ ireq->scu_status = SCU_TASK_DONE_GOOD;
+ ireq->sci_status = SCI_SUCCESS;
+ sci_change_state(&ireq->sm, SCI_REQ_COMPLETED);
+ break;
+
+ default:
+ if (d2h->fis_type == FIS_REGD2H) {
+ /* UF received change the device state to ATAPI_ERROR */
+ status = ireq->sci_status;
+ sci_change_state(&idev->sm, SCI_STP_DEV_ATAPI_ERROR);
+ } else {
+ /* If receiving any non-success TC status, no UF
+ * received yet, then an UF for the status fis
+ * is coming after (XXX: suspect this is
+ * actually a protocol error or a bug like the
+ * DONE_UNEXP_FIS case)
+ */
+ ireq->scu_status = SCU_TASK_DONE_CHECK_RESPONSE;
+ ireq->sci_status = SCI_FAILURE_IO_RESPONSE_VALID;
+
+ sci_change_state(&ireq->sm, SCI_REQ_ATAPI_WAIT_D2H);
+ }
+ break;
+ }
+
+ return status;
+}
+
+static int sci_request_smp_completion_status_is_tx_suspend(
+ unsigned int completion_status)
+{
+ switch (completion_status) {
+ case SCU_TASK_OPEN_REJECT_WRONG_DESTINATION:
+ case SCU_TASK_OPEN_REJECT_RESERVED_ABANDON_1:
+ case SCU_TASK_OPEN_REJECT_RESERVED_ABANDON_2:
+ case SCU_TASK_OPEN_REJECT_RESERVED_ABANDON_3:
+ case SCU_TASK_OPEN_REJECT_BAD_DESTINATION:
+ case SCU_TASK_OPEN_REJECT_ZONE_VIOLATION:
+ return 1;
+ }
+ return 0;
+}
+
+static int sci_request_smp_completion_status_is_tx_rx_suspend(
+ unsigned int completion_status)
+{
+ return 0; /* There are no Tx/Rx SMP suspend conditions. */
+}
+
+static int sci_request_ssp_completion_status_is_tx_suspend(
+ unsigned int completion_status)
+{
+ switch (completion_status) {
+ case SCU_TASK_DONE_TX_RAW_CMD_ERR:
+ case SCU_TASK_DONE_LF_ERR:
+ case SCU_TASK_OPEN_REJECT_WRONG_DESTINATION:
+ case SCU_TASK_OPEN_REJECT_RESERVED_ABANDON_1:
+ case SCU_TASK_OPEN_REJECT_RESERVED_ABANDON_2:
+ case SCU_TASK_OPEN_REJECT_RESERVED_ABANDON_3:
+ case SCU_TASK_OPEN_REJECT_BAD_DESTINATION:
+ case SCU_TASK_OPEN_REJECT_ZONE_VIOLATION:
+ case SCU_TASK_OPEN_REJECT_STP_RESOURCES_BUSY:
+ case SCU_TASK_OPEN_REJECT_PROTOCOL_NOT_SUPPORTED:
+ case SCU_TASK_OPEN_REJECT_CONNECTION_RATE_NOT_SUPPORTED:
+ return 1;
+ }
+ return 0;
+}
+
+static int sci_request_ssp_completion_status_is_tx_rx_suspend(
+ unsigned int completion_status)
+{
+ return 0; /* There are no Tx/Rx SSP suspend conditions. */
+}
+
+static int sci_request_stpsata_completion_status_is_tx_suspend(
+ unsigned int completion_status)
+{
+ switch (completion_status) {
+ case SCU_TASK_DONE_TX_RAW_CMD_ERR:
+ case SCU_TASK_DONE_LL_R_ERR:
+ case SCU_TASK_DONE_LL_PERR:
+ case SCU_TASK_DONE_REG_ERR:
+ case SCU_TASK_DONE_SDB_ERR:
+ case SCU_TASK_OPEN_REJECT_WRONG_DESTINATION:
+ case SCU_TASK_OPEN_REJECT_RESERVED_ABANDON_1:
+ case SCU_TASK_OPEN_REJECT_RESERVED_ABANDON_2:
+ case SCU_TASK_OPEN_REJECT_RESERVED_ABANDON_3:
+ case SCU_TASK_OPEN_REJECT_BAD_DESTINATION:
+ case SCU_TASK_OPEN_REJECT_ZONE_VIOLATION:
+ case SCU_TASK_OPEN_REJECT_STP_RESOURCES_BUSY:
+ case SCU_TASK_OPEN_REJECT_PROTOCOL_NOT_SUPPORTED:
+ case SCU_TASK_OPEN_REJECT_CONNECTION_RATE_NOT_SUPPORTED:
+ return 1;
+ }
+ return 0;
+}
+
+
+static int sci_request_stpsata_completion_status_is_tx_rx_suspend(
+ unsigned int completion_status)
+{
+ switch (completion_status) {
+ case SCU_TASK_DONE_LF_ERR:
+ case SCU_TASK_DONE_LL_SY_TERM:
+ case SCU_TASK_DONE_LL_LF_TERM:
+ case SCU_TASK_DONE_BREAK_RCVD:
+ case SCU_TASK_DONE_INV_FIS_LEN:
+ case SCU_TASK_DONE_UNEXP_FIS:
+ case SCU_TASK_DONE_UNEXP_SDBFIS:
+ case SCU_TASK_DONE_MAX_PLD_ERR:
+ return 1;
+ }
+ return 0;
+}
+
+static void sci_request_handle_suspending_completions(
+ struct isci_request *ireq,
+ u32 completion_code)
+{
+ int is_tx = 0;
+ int is_tx_rx = 0;
+
+ switch (ireq->protocol) {
+ case SAS_PROTOCOL_SMP:
+ is_tx = sci_request_smp_completion_status_is_tx_suspend(
+ completion_code);
+ is_tx_rx = sci_request_smp_completion_status_is_tx_rx_suspend(
+ completion_code);
+ break;
+ case SAS_PROTOCOL_SSP:
+ is_tx = sci_request_ssp_completion_status_is_tx_suspend(
+ completion_code);
+ is_tx_rx = sci_request_ssp_completion_status_is_tx_rx_suspend(
+ completion_code);
+ break;
+ case SAS_PROTOCOL_STP:
+ is_tx = sci_request_stpsata_completion_status_is_tx_suspend(
+ completion_code);
+ is_tx_rx =
+ sci_request_stpsata_completion_status_is_tx_rx_suspend(
+ completion_code);
+ break;
+ default:
+ dev_warn(&ireq->isci_host->pdev->dev,
+ "%s: request %p has no valid protocol\n",
+ __func__, ireq);
+ break;
+ }
+ if (is_tx || is_tx_rx) {
+ BUG_ON(is_tx && is_tx_rx);
+
+ sci_remote_node_context_suspend(
+ &ireq->target_device->rnc,
+ SCI_HW_SUSPEND,
+ (is_tx_rx) ? SCU_EVENT_TL_RNC_SUSPEND_TX_RX
+ : SCU_EVENT_TL_RNC_SUSPEND_TX);
+ }
+}
+
+enum sci_status
+sci_io_request_tc_completion(struct isci_request *ireq,
+ u32 completion_code)
+{
+ enum sci_base_request_states state;
+ struct isci_host *ihost = ireq->owning_controller;
+
+ state = ireq->sm.current_state_id;
+
+ /* Decode those completions that signal upcoming suspension events. */
+ sci_request_handle_suspending_completions(
+ ireq, SCU_GET_COMPLETION_TL_STATUS(completion_code));
+
+ switch (state) {
+ case SCI_REQ_STARTED:
+ return request_started_state_tc_event(ireq, completion_code);
+
+ case SCI_REQ_TASK_WAIT_TC_COMP:
+ return ssp_task_request_await_tc_event(ireq,
+ completion_code);
+
+ case SCI_REQ_SMP_WAIT_RESP:
+ return smp_request_await_response_tc_event(ireq,
+ completion_code);
+
+ case SCI_REQ_SMP_WAIT_TC_COMP:
+ return smp_request_await_tc_event(ireq, completion_code);
+
+ case SCI_REQ_STP_UDMA_WAIT_TC_COMP:
+ return stp_request_udma_await_tc_event(ireq,
+ completion_code);
+
+ case SCI_REQ_STP_NON_DATA_WAIT_H2D:
+ return stp_request_non_data_await_h2d_tc_event(ireq,
+ completion_code);
+
+ case SCI_REQ_STP_PIO_WAIT_H2D:
+ return stp_request_pio_await_h2d_completion_tc_event(ireq,
+ completion_code);
+
+ case SCI_REQ_STP_PIO_DATA_OUT:
+ return pio_data_out_tx_done_tc_event(ireq, completion_code);
+
+ case SCI_REQ_ABORTING:
+ return request_aborting_state_tc_event(ireq,
+ completion_code);
+
+ case SCI_REQ_ATAPI_WAIT_H2D:
+ return atapi_raw_completion(ireq, completion_code,
+ SCI_REQ_ATAPI_WAIT_PIO_SETUP);
+
+ case SCI_REQ_ATAPI_WAIT_TC_COMP:
+ return atapi_raw_completion(ireq, completion_code,
+ SCI_REQ_ATAPI_WAIT_D2H);
+
+ case SCI_REQ_ATAPI_WAIT_D2H:
+ return atapi_data_tc_completion_handler(ireq, completion_code);
+
+ default:
+ dev_warn(&ihost->pdev->dev, "%s: %x in wrong state %s\n",
+ __func__, completion_code, req_state_name(state));
+ return SCI_FAILURE_INVALID_STATE;
+ }
+}
+
+/**
+ * isci_request_process_response_iu() - This function sets the status and
+ * response iu, in the task struct, from the request object for the upper
+ * layer driver.
+ * @sas_task: This parameter is the task struct from the upper layer driver.
+ * @resp_iu: This parameter points to the response iu of the completed request.
+ * @dev: This parameter specifies the linux device struct.
+ *
+ * none.
+ */
+static void isci_request_process_response_iu(
+ struct sas_task *task,
+ struct ssp_response_iu *resp_iu,
+ struct device *dev)
+{
+ dev_dbg(dev,
+ "%s: resp_iu = %p "
+ "resp_iu->status = 0x%x,\nresp_iu->datapres = %d "
+ "resp_iu->response_data_len = %x, "
+ "resp_iu->sense_data_len = %x\nrepsonse data: ",
+ __func__,
+ resp_iu,
+ resp_iu->status,
+ resp_iu->datapres,
+ resp_iu->response_data_len,
+ resp_iu->sense_data_len);
+
+ task->task_status.stat = resp_iu->status;
+
+ /* libsas updates the task status fields based on the response iu. */
+ sas_ssp_task_response(dev, task, resp_iu);
+}
+
+/**
+ * isci_request_set_open_reject_status() - This function prepares the I/O
+ * completion for OPEN_REJECT conditions.
+ * @request: This parameter is the completed isci_request object.
+ * @response_ptr: This parameter specifies the service response for the I/O.
+ * @status_ptr: This parameter specifies the exec status for the I/O.
+ * @open_rej_reason: This parameter specifies the encoded reason for the
+ * abandon-class reject.
+ *
+ * none.
+ */
+static void isci_request_set_open_reject_status(
+ struct isci_request *request,
+ struct sas_task *task,
+ enum service_response *response_ptr,
+ enum exec_status *status_ptr,
+ enum sas_open_rej_reason open_rej_reason)
+{
+ /* Task in the target is done. */
+ set_bit(IREQ_COMPLETE_IN_TARGET, &request->flags);
+ *response_ptr = SAS_TASK_UNDELIVERED;
+ *status_ptr = SAS_OPEN_REJECT;
+ task->task_status.open_rej_reason = open_rej_reason;
+}
+
+/**
+ * isci_request_handle_controller_specific_errors() - This function decodes
+ * controller-specific I/O completion error conditions.
+ * @request: This parameter is the completed isci_request object.
+ * @response_ptr: This parameter specifies the service response for the I/O.
+ * @status_ptr: This parameter specifies the exec status for the I/O.
+ *
+ * none.
+ */
+static void isci_request_handle_controller_specific_errors(
+ struct isci_remote_device *idev,
+ struct isci_request *request,
+ struct sas_task *task,
+ enum service_response *response_ptr,
+ enum exec_status *status_ptr)
+{
+ unsigned int cstatus;
+
+ cstatus = request->scu_status;
+
+ dev_dbg(&request->isci_host->pdev->dev,
+ "%s: %p SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR "
+ "- controller status = 0x%x\n",
+ __func__, request, cstatus);
+
+ /* Decode the controller-specific errors; most
+ * important is to recognize those conditions in which
+ * the target may still have a task outstanding that
+ * must be aborted.
+ *
+ * Note that there are SCU completion codes being
+ * named in the decode below for which SCIC has already
+ * done work to handle them in a way other than as
+ * a controller-specific completion code; these are left
+ * in the decode below for completeness sake.
+ */
+ switch (cstatus) {
+ case SCU_TASK_DONE_DMASETUP_DIRERR:
+ /* Also SCU_TASK_DONE_SMP_FRM_TYPE_ERR: */
+ case SCU_TASK_DONE_XFERCNT_ERR:
+ /* Also SCU_TASK_DONE_SMP_UFI_ERR: */
+ if (task->task_proto == SAS_PROTOCOL_SMP) {
+ /* SCU_TASK_DONE_SMP_UFI_ERR == Task Done. */
+ *response_ptr = SAS_TASK_COMPLETE;
+
+ /* See if the device has been/is being stopped. Note
+ * that we ignore the quiesce state, since we are
+ * concerned about the actual device state.
+ */
+ if (!idev)
+ *status_ptr = SAS_DEVICE_UNKNOWN;
+ else
+ *status_ptr = SAS_ABORTED_TASK;
+
+ set_bit(IREQ_COMPLETE_IN_TARGET, &request->flags);
+ } else {
+ /* Task in the target is not done. */
+ *response_ptr = SAS_TASK_UNDELIVERED;
+
+ if (!idev)
+ *status_ptr = SAS_DEVICE_UNKNOWN;
+ else
+ *status_ptr = SAM_STAT_TASK_ABORTED;
+
+ clear_bit(IREQ_COMPLETE_IN_TARGET, &request->flags);
+ }
+
+ break;
+
+ case SCU_TASK_DONE_CRC_ERR:
+ case SCU_TASK_DONE_NAK_CMD_ERR:
+ case SCU_TASK_DONE_EXCESS_DATA:
+ case SCU_TASK_DONE_UNEXP_FIS:
+ /* Also SCU_TASK_DONE_UNEXP_RESP: */
+ case SCU_TASK_DONE_VIIT_ENTRY_NV: /* TODO - conditions? */
+ case SCU_TASK_DONE_IIT_ENTRY_NV: /* TODO - conditions? */
+ case SCU_TASK_DONE_RNCNV_OUTBOUND: /* TODO - conditions? */
+ /* These are conditions in which the target
+ * has completed the task, so that no cleanup
+ * is necessary.
+ */
+ *response_ptr = SAS_TASK_COMPLETE;
+
+ /* See if the device has been/is being stopped. Note
+ * that we ignore the quiesce state, since we are
+ * concerned about the actual device state.
+ */
+ if (!idev)
+ *status_ptr = SAS_DEVICE_UNKNOWN;
+ else
+ *status_ptr = SAS_ABORTED_TASK;
+
+ set_bit(IREQ_COMPLETE_IN_TARGET, &request->flags);
+ break;
+
+
+ /* Note that the only open reject completion codes seen here will be
+ * abandon-class codes; all others are automatically retried in the SCU.
+ */
+ case SCU_TASK_OPEN_REJECT_WRONG_DESTINATION:
+
+ isci_request_set_open_reject_status(
+ request, task, response_ptr, status_ptr,
+ SAS_OREJ_WRONG_DEST);
+ break;
+
+ case SCU_TASK_OPEN_REJECT_ZONE_VIOLATION:
+
+ /* Note - the return of AB0 will change when
+ * libsas implements detection of zone violations.
+ */
+ isci_request_set_open_reject_status(
+ request, task, response_ptr, status_ptr,
+ SAS_OREJ_RESV_AB0);
+ break;
+
+ case SCU_TASK_OPEN_REJECT_RESERVED_ABANDON_1:
+
+ isci_request_set_open_reject_status(
+ request, task, response_ptr, status_ptr,
+ SAS_OREJ_RESV_AB1);
+ break;
+
+ case SCU_TASK_OPEN_REJECT_RESERVED_ABANDON_2:
+
+ isci_request_set_open_reject_status(
+ request, task, response_ptr, status_ptr,
+ SAS_OREJ_RESV_AB2);
+ break;
+
+ case SCU_TASK_OPEN_REJECT_RESERVED_ABANDON_3:
+
+ isci_request_set_open_reject_status(
+ request, task, response_ptr, status_ptr,
+ SAS_OREJ_RESV_AB3);
+ break;
+
+ case SCU_TASK_OPEN_REJECT_BAD_DESTINATION:
+
+ isci_request_set_open_reject_status(
+ request, task, response_ptr, status_ptr,
+ SAS_OREJ_BAD_DEST);
+ break;
+
+ case SCU_TASK_OPEN_REJECT_STP_RESOURCES_BUSY:
+
+ isci_request_set_open_reject_status(
+ request, task, response_ptr, status_ptr,
+ SAS_OREJ_STP_NORES);
+ break;
+
+ case SCU_TASK_OPEN_REJECT_PROTOCOL_NOT_SUPPORTED:
+
+ isci_request_set_open_reject_status(
+ request, task, response_ptr, status_ptr,
+ SAS_OREJ_EPROTO);
+ break;
+
+ case SCU_TASK_OPEN_REJECT_CONNECTION_RATE_NOT_SUPPORTED:
+
+ isci_request_set_open_reject_status(
+ request, task, response_ptr, status_ptr,
+ SAS_OREJ_CONN_RATE);
+ break;
+
+ case SCU_TASK_DONE_LL_R_ERR:
+ /* Also SCU_TASK_DONE_ACK_NAK_TO: */
+ case SCU_TASK_DONE_LL_PERR:
+ case SCU_TASK_DONE_LL_SY_TERM:
+ /* Also SCU_TASK_DONE_NAK_ERR:*/
+ case SCU_TASK_DONE_LL_LF_TERM:
+ /* Also SCU_TASK_DONE_DATA_LEN_ERR: */
+ case SCU_TASK_DONE_LL_ABORT_ERR:
+ case SCU_TASK_DONE_SEQ_INV_TYPE:
+ /* Also SCU_TASK_DONE_UNEXP_XR: */
+ case SCU_TASK_DONE_XR_IU_LEN_ERR:
+ case SCU_TASK_DONE_INV_FIS_LEN:
+ /* Also SCU_TASK_DONE_XR_WD_LEN: */
+ case SCU_TASK_DONE_SDMA_ERR:
+ case SCU_TASK_DONE_OFFSET_ERR:
+ case SCU_TASK_DONE_MAX_PLD_ERR:
+ case SCU_TASK_DONE_LF_ERR:
+ case SCU_TASK_DONE_SMP_RESP_TO_ERR: /* Escalate to dev reset? */
+ case SCU_TASK_DONE_SMP_LL_RX_ERR:
+ case SCU_TASK_DONE_UNEXP_DATA:
+ case SCU_TASK_DONE_UNEXP_SDBFIS:
+ case SCU_TASK_DONE_REG_ERR:
+ case SCU_TASK_DONE_SDB_ERR:
+ case SCU_TASK_DONE_TASK_ABORT:
+ default:
+ /* Task in the target is not done. */
+ *response_ptr = SAS_TASK_UNDELIVERED;
+ *status_ptr = SAM_STAT_TASK_ABORTED;
+
+ if (task->task_proto == SAS_PROTOCOL_SMP)
+ set_bit(IREQ_COMPLETE_IN_TARGET, &request->flags);
+ else
+ clear_bit(IREQ_COMPLETE_IN_TARGET, &request->flags);
+ break;
+ }
+}
+
+static void isci_process_stp_response(struct sas_task *task, struct dev_to_host_fis *fis)
+{
+ struct task_status_struct *ts = &task->task_status;
+ struct ata_task_resp *resp = (void *)&ts->buf[0];
+
+ resp->frame_len = sizeof(*fis);
+ memcpy(resp->ending_fis, fis, sizeof(*fis));
+ ts->buf_valid_size = sizeof(*resp);
+
+ /* If an error is flagged let libata decode the fis */
+ if (ac_err_mask(fis->status))
+ ts->stat = SAS_PROTO_RESPONSE;
+ else
+ ts->stat = SAM_STAT_GOOD;
+
+ ts->resp = SAS_TASK_COMPLETE;
+}
+
+static void isci_request_io_request_complete(struct isci_host *ihost,
+ struct isci_request *request,
+ enum sci_io_status completion_status)
+{
+ struct sas_task *task = isci_request_access_task(request);
+ struct ssp_response_iu *resp_iu;
+ unsigned long task_flags;
+ struct isci_remote_device *idev = request->target_device;
+ enum service_response response = SAS_TASK_UNDELIVERED;
+ enum exec_status status = SAS_ABORTED_TASK;
+
+ dev_dbg(&ihost->pdev->dev,
+ "%s: request = %p, task = %p, "
+ "task->data_dir = %d completion_status = 0x%x\n",
+ __func__, request, task, task->data_dir, completion_status);
+
+ /* The request is done from an SCU HW perspective. */
+
+ /* This is an active request being completed from the core. */
+ switch (completion_status) {
+
+ case SCI_IO_FAILURE_RESPONSE_VALID:
+ dev_dbg(&ihost->pdev->dev,
+ "%s: SCI_IO_FAILURE_RESPONSE_VALID (%p/%p)\n",
+ __func__, request, task);
+
+ if (sas_protocol_ata(task->task_proto)) {
+ isci_process_stp_response(task, &request->stp.rsp);
+ } else if (SAS_PROTOCOL_SSP == task->task_proto) {
+
+ /* crack the iu response buffer. */
+ resp_iu = &request->ssp.rsp;
+ isci_request_process_response_iu(task, resp_iu,
+ &ihost->pdev->dev);
+
+ } else if (SAS_PROTOCOL_SMP == task->task_proto) {
+
+ dev_err(&ihost->pdev->dev,
+ "%s: SCI_IO_FAILURE_RESPONSE_VALID: "
+ "SAS_PROTOCOL_SMP protocol\n",
+ __func__);
+
+ } else
+ dev_err(&ihost->pdev->dev,
+ "%s: unknown protocol\n", __func__);
+
+ /* use the task status set in the task struct by the
+ * isci_request_process_response_iu call.
+ */
+ set_bit(IREQ_COMPLETE_IN_TARGET, &request->flags);
+ response = task->task_status.resp;
+ status = task->task_status.stat;
+ break;
+
+ case SCI_IO_SUCCESS:
+ case SCI_IO_SUCCESS_IO_DONE_EARLY:
+
+ response = SAS_TASK_COMPLETE;
+ status = SAM_STAT_GOOD;
+ set_bit(IREQ_COMPLETE_IN_TARGET, &request->flags);
+
+ if (completion_status == SCI_IO_SUCCESS_IO_DONE_EARLY) {
+
+ /* This was an SSP / STP / SATA transfer.
+ * There is a possibility that less data than
+ * the maximum was transferred.
+ */
+ u32 transferred_length = sci_req_tx_bytes(request);
+
+ task->task_status.residual
+ = task->total_xfer_len - transferred_length;
+
+ /* If there were residual bytes, call this an
+ * underrun.
+ */
+ if (task->task_status.residual != 0)
+ status = SAS_DATA_UNDERRUN;
+
+ dev_dbg(&ihost->pdev->dev,
+ "%s: SCI_IO_SUCCESS_IO_DONE_EARLY %d\n",
+ __func__, status);
+
+ } else
+ dev_dbg(&ihost->pdev->dev, "%s: SCI_IO_SUCCESS\n",
+ __func__);
+ break;
+
+ case SCI_IO_FAILURE_TERMINATED:
+
+ dev_dbg(&ihost->pdev->dev,
+ "%s: SCI_IO_FAILURE_TERMINATED (%p/%p)\n",
+ __func__, request, task);
+
+ /* The request was terminated explicitly. */
+ set_bit(IREQ_COMPLETE_IN_TARGET, &request->flags);
+ response = SAS_TASK_UNDELIVERED;
+
+ /* See if the device has been/is being stopped. Note
+ * that we ignore the quiesce state, since we are
+ * concerned about the actual device state.
+ */
+ if (!idev)
+ status = SAS_DEVICE_UNKNOWN;
+ else
+ status = SAS_ABORTED_TASK;
+ break;
+
+ case SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR:
+
+ isci_request_handle_controller_specific_errors(idev, request,
+ task, &response,
+ &status);
+ break;
+
+ case SCI_IO_FAILURE_REMOTE_DEVICE_RESET_REQUIRED:
+ /* This is a special case, in that the I/O completion
+ * is telling us that the device needs a reset.
+ * In order for the device reset condition to be
+ * noticed, the I/O has to be handled in the error
+ * handler. Set the reset flag and cause the
+ * SCSI error thread to be scheduled.
+ */
+ spin_lock_irqsave(&task->task_state_lock, task_flags);
+ task->task_state_flags |= SAS_TASK_NEED_DEV_RESET;
+ spin_unlock_irqrestore(&task->task_state_lock, task_flags);
+
+ /* Fail the I/O. */
+ response = SAS_TASK_UNDELIVERED;
+ status = SAM_STAT_TASK_ABORTED;
+
+ clear_bit(IREQ_COMPLETE_IN_TARGET, &request->flags);
+ break;
+
+ case SCI_FAILURE_RETRY_REQUIRED:
+
+ /* Fail the I/O so it can be retried. */
+ response = SAS_TASK_UNDELIVERED;
+ if (!idev)
+ status = SAS_DEVICE_UNKNOWN;
+ else
+ status = SAS_ABORTED_TASK;
+
+ set_bit(IREQ_COMPLETE_IN_TARGET, &request->flags);
+ break;
+
+
+ default:
+ /* Catch any otherwise unhandled error codes here. */
+ dev_dbg(&ihost->pdev->dev,
+ "%s: invalid completion code: 0x%x - "
+ "isci_request = %p\n",
+ __func__, completion_status, request);
+
+ response = SAS_TASK_UNDELIVERED;
+
+ /* See if the device has been/is being stopped. Note
+ * that we ignore the quiesce state, since we are
+ * concerned about the actual device state.
+ */
+ if (!idev)
+ status = SAS_DEVICE_UNKNOWN;
+ else
+ status = SAS_ABORTED_TASK;
+
+ if (SAS_PROTOCOL_SMP == task->task_proto)
+ set_bit(IREQ_COMPLETE_IN_TARGET, &request->flags);
+ else
+ clear_bit(IREQ_COMPLETE_IN_TARGET, &request->flags);
+ break;
+ }
+
+ switch (task->task_proto) {
+ case SAS_PROTOCOL_SSP:
+ if (task->data_dir == DMA_NONE)
+ break;
+ if (task->num_scatter == 0)
+ /* 0 indicates a single dma address */
+ dma_unmap_single(&ihost->pdev->dev,
+ request->zero_scatter_daddr,
+ task->total_xfer_len, task->data_dir);
+ else /* unmap the sgl dma addresses */
+ dma_unmap_sg(&ihost->pdev->dev, task->scatter,
+ request->num_sg_entries, task->data_dir);
+ break;
+ case SAS_PROTOCOL_SMP: {
+ struct scatterlist *sg = &task->smp_task.smp_req;
+ struct smp_req *smp_req;
+ void *kaddr;
+
+ dma_unmap_sg(&ihost->pdev->dev, sg, 1, DMA_TO_DEVICE);
+
+ /* need to swab it back in case the command buffer is re-used */
+ kaddr = kmap_atomic(sg_page(sg));
+ smp_req = kaddr + sg->offset;
+ sci_swab32_cpy(smp_req, smp_req, sg->length / sizeof(u32));
+ kunmap_atomic(kaddr);
+ break;
+ }
+ default:
+ break;
+ }
+
+ spin_lock_irqsave(&task->task_state_lock, task_flags);
+
+ task->task_status.resp = response;
+ task->task_status.stat = status;
+
+ if (test_bit(IREQ_COMPLETE_IN_TARGET, &request->flags)) {
+ /* Normal notification (task_done) */
+ task->task_state_flags |= SAS_TASK_STATE_DONE;
+ task->task_state_flags &= ~(SAS_TASK_AT_INITIATOR |
+ SAS_TASK_STATE_PENDING);
+ }
+ spin_unlock_irqrestore(&task->task_state_lock, task_flags);
+
+ /* complete the io request to the core. */
+ sci_controller_complete_io(ihost, request->target_device, request);
+
+ /* set terminated handle so it cannot be completed or
+ * terminated again, and to cause any calls into abort
+ * task to recognize the already completed case.
+ */
+ set_bit(IREQ_TERMINATED, &request->flags);
+
+ ireq_done(ihost, request, task);
+}
+
+static void sci_request_started_state_enter(struct sci_base_state_machine *sm)
+{
+ struct isci_request *ireq = container_of(sm, typeof(*ireq), sm);
+ struct domain_device *dev = ireq->target_device->domain_dev;
+ enum sci_base_request_states state;
+ struct sas_task *task;
+
+ /* XXX as hch said always creating an internal sas_task for tmf
+ * requests would simplify the driver
+ */
+ task = (test_bit(IREQ_TMF, &ireq->flags)) ? NULL : isci_request_access_task(ireq);
+
+ /* all unaccelerated request types (non ssp or ncq) handled with
+ * substates
+ */
+ if (!task && dev->dev_type == SAS_END_DEVICE) {
+ state = SCI_REQ_TASK_WAIT_TC_COMP;
+ } else if (task && task->task_proto == SAS_PROTOCOL_SMP) {
+ state = SCI_REQ_SMP_WAIT_RESP;
+ } else if (task && sas_protocol_ata(task->task_proto) &&
+ !task->ata_task.use_ncq) {
+ if (dev->sata_dev.class == ATA_DEV_ATAPI &&
+ task->ata_task.fis.command == ATA_CMD_PACKET) {
+ state = SCI_REQ_ATAPI_WAIT_H2D;
+ } else if (task->data_dir == DMA_NONE) {
+ state = SCI_REQ_STP_NON_DATA_WAIT_H2D;
+ } else if (task->ata_task.dma_xfer) {
+ state = SCI_REQ_STP_UDMA_WAIT_TC_COMP;
+ } else /* PIO */ {
+ state = SCI_REQ_STP_PIO_WAIT_H2D;
+ }
+ } else {
+ /* SSP or NCQ are fully accelerated, no substates */
+ return;
+ }
+ sci_change_state(sm, state);
+}
+
+static void sci_request_completed_state_enter(struct sci_base_state_machine *sm)
+{
+ struct isci_request *ireq = container_of(sm, typeof(*ireq), sm);
+ struct isci_host *ihost = ireq->owning_controller;
+
+ /* Tell the SCI_USER that the IO request is complete */
+ if (!test_bit(IREQ_TMF, &ireq->flags))
+ isci_request_io_request_complete(ihost, ireq,
+ ireq->sci_status);
+ else
+ isci_task_request_complete(ihost, ireq, ireq->sci_status);
+}
+
+static void sci_request_aborting_state_enter(struct sci_base_state_machine *sm)
+{
+ struct isci_request *ireq = container_of(sm, typeof(*ireq), sm);
+
+ /* Setting the abort bit in the Task Context is required by the silicon. */
+ ireq->tc->abort = 1;
+}
+
+static void sci_stp_request_started_non_data_await_h2d_completion_enter(struct sci_base_state_machine *sm)
+{
+ struct isci_request *ireq = container_of(sm, typeof(*ireq), sm);
+
+ ireq->target_device->working_request = ireq;
+}
+
+static void sci_stp_request_started_pio_await_h2d_completion_enter(struct sci_base_state_machine *sm)
+{
+ struct isci_request *ireq = container_of(sm, typeof(*ireq), sm);
+
+ ireq->target_device->working_request = ireq;
+}
+
+static const struct sci_base_state sci_request_state_table[] = {
+ [SCI_REQ_INIT] = { },
+ [SCI_REQ_CONSTRUCTED] = { },
+ [SCI_REQ_STARTED] = {
+ .enter_state = sci_request_started_state_enter,
+ },
+ [SCI_REQ_STP_NON_DATA_WAIT_H2D] = {
+ .enter_state = sci_stp_request_started_non_data_await_h2d_completion_enter,
+ },
+ [SCI_REQ_STP_NON_DATA_WAIT_D2H] = { },
+ [SCI_REQ_STP_PIO_WAIT_H2D] = {
+ .enter_state = sci_stp_request_started_pio_await_h2d_completion_enter,
+ },
+ [SCI_REQ_STP_PIO_WAIT_FRAME] = { },
+ [SCI_REQ_STP_PIO_DATA_IN] = { },
+ [SCI_REQ_STP_PIO_DATA_OUT] = { },
+ [SCI_REQ_STP_UDMA_WAIT_TC_COMP] = { },
+ [SCI_REQ_STP_UDMA_WAIT_D2H] = { },
+ [SCI_REQ_TASK_WAIT_TC_COMP] = { },
+ [SCI_REQ_TASK_WAIT_TC_RESP] = { },
+ [SCI_REQ_SMP_WAIT_RESP] = { },
+ [SCI_REQ_SMP_WAIT_TC_COMP] = { },
+ [SCI_REQ_ATAPI_WAIT_H2D] = { },
+ [SCI_REQ_ATAPI_WAIT_PIO_SETUP] = { },
+ [SCI_REQ_ATAPI_WAIT_D2H] = { },
+ [SCI_REQ_ATAPI_WAIT_TC_COMP] = { },
+ [SCI_REQ_COMPLETED] = {
+ .enter_state = sci_request_completed_state_enter,
+ },
+ [SCI_REQ_ABORTING] = {
+ .enter_state = sci_request_aborting_state_enter,
+ },
+ [SCI_REQ_FINAL] = { },
+};
+
+static void
+sci_general_request_construct(struct isci_host *ihost,
+ struct isci_remote_device *idev,
+ struct isci_request *ireq)
+{
+ sci_init_sm(&ireq->sm, sci_request_state_table, SCI_REQ_INIT);
+
+ ireq->target_device = idev;
+ ireq->protocol = SAS_PROTOCOL_NONE;
+ ireq->saved_rx_frame_index = SCU_INVALID_FRAME_INDEX;
+
+ ireq->sci_status = SCI_SUCCESS;
+ ireq->scu_status = 0;
+ ireq->post_context = 0xFFFFFFFF;
+}
+
+static enum sci_status
+sci_io_request_construct(struct isci_host *ihost,
+ struct isci_remote_device *idev,
+ struct isci_request *ireq)
+{
+ struct domain_device *dev = idev->domain_dev;
+ enum sci_status status = SCI_SUCCESS;
+
+ /* Build the common part of the request */
+ sci_general_request_construct(ihost, idev, ireq);
+
+ if (idev->rnc.remote_node_index == SCIC_SDS_REMOTE_NODE_CONTEXT_INVALID_INDEX)
+ return SCI_FAILURE_INVALID_REMOTE_DEVICE;
+
+ if (dev->dev_type == SAS_END_DEVICE)
+ /* pass */;
+ else if (dev_is_sata(dev))
+ memset(&ireq->stp.cmd, 0, sizeof(ireq->stp.cmd));
+ else if (dev_is_expander(dev))
+ /* pass */;
+ else
+ return SCI_FAILURE_UNSUPPORTED_PROTOCOL;
+
+ memset(ireq->tc, 0, offsetof(struct scu_task_context, sgl_pair_ab));
+
+ return status;
+}
+
+enum sci_status sci_task_request_construct(struct isci_host *ihost,
+ struct isci_remote_device *idev,
+ u16 io_tag, struct isci_request *ireq)
+{
+ struct domain_device *dev = idev->domain_dev;
+ enum sci_status status = SCI_SUCCESS;
+
+ /* Build the common part of the request */
+ sci_general_request_construct(ihost, idev, ireq);
+
+ if (dev->dev_type == SAS_END_DEVICE || dev_is_sata(dev)) {
+ set_bit(IREQ_TMF, &ireq->flags);
+ memset(ireq->tc, 0, sizeof(struct scu_task_context));
+
+ /* Set the protocol indicator. */
+ if (dev_is_sata(dev))
+ ireq->protocol = SAS_PROTOCOL_STP;
+ else
+ ireq->protocol = SAS_PROTOCOL_SSP;
+ } else
+ status = SCI_FAILURE_UNSUPPORTED_PROTOCOL;
+
+ return status;
+}
+
+static enum sci_status isci_request_ssp_request_construct(
+ struct isci_request *request)
+{
+ enum sci_status status;
+
+ dev_dbg(&request->isci_host->pdev->dev,
+ "%s: request = %p\n",
+ __func__,
+ request);
+ status = sci_io_request_construct_basic_ssp(request);
+ return status;
+}
+
+static enum sci_status isci_request_stp_request_construct(struct isci_request *ireq)
+{
+ struct sas_task *task = isci_request_access_task(ireq);
+ struct host_to_dev_fis *fis = &ireq->stp.cmd;
+ struct ata_queued_cmd *qc = task->uldd_task;
+ enum sci_status status;
+
+ dev_dbg(&ireq->isci_host->pdev->dev,
+ "%s: ireq = %p\n",
+ __func__,
+ ireq);
+
+ memcpy(fis, &task->ata_task.fis, sizeof(struct host_to_dev_fis));
+ if (!task->ata_task.device_control_reg_update)
+ fis->flags |= 0x80;
+ fis->flags &= 0xF0;
+
+ status = sci_io_request_construct_basic_sata(ireq);
+
+ if (qc && (qc->tf.command == ATA_CMD_FPDMA_WRITE ||
+ qc->tf.command == ATA_CMD_FPDMA_READ)) {
+ fis->sector_count = qc->tag << 3;
+ ireq->tc->type.stp.ncq_tag = qc->tag;
+ }
+
+ return status;
+}
+
+static enum sci_status
+sci_io_request_construct_smp(struct device *dev,
+ struct isci_request *ireq,
+ struct sas_task *task)
+{
+ struct scatterlist *sg = &task->smp_task.smp_req;
+ struct isci_remote_device *idev;
+ struct scu_task_context *task_context;
+ struct isci_port *iport;
+ struct smp_req *smp_req;
+ void *kaddr;
+ u8 req_len;
+ u32 cmd;
+
+ kaddr = kmap_atomic(sg_page(sg));
+ smp_req = kaddr + sg->offset;
+ /*
+ * Look at the SMP requests' header fields; for certain SAS 1.x SMP
+ * functions under SAS 2.0, a zero request length really indicates
+ * a non-zero default length.
+ */
+ if (smp_req->req_len == 0) {
+ switch (smp_req->func) {
+ case SMP_DISCOVER:
+ case SMP_REPORT_PHY_ERR_LOG:
+ case SMP_REPORT_PHY_SATA:
+ case SMP_REPORT_ROUTE_INFO:
+ smp_req->req_len = 2;
+ break;
+ case SMP_CONF_ROUTE_INFO:
+ case SMP_PHY_CONTROL:
+ case SMP_PHY_TEST_FUNCTION:
+ smp_req->req_len = 9;
+ break;
+ /* Default - zero is a valid default for 2.0. */
+ }
+ }
+ req_len = smp_req->req_len;
+ sci_swab32_cpy(smp_req, smp_req, sg->length / sizeof(u32));
+ cmd = *(u32 *) smp_req;
+ kunmap_atomic(kaddr);
+
+ if (!dma_map_sg(dev, sg, 1, DMA_TO_DEVICE))
+ return SCI_FAILURE;
+
+ ireq->protocol = SAS_PROTOCOL_SMP;
+
+ /* byte swap the smp request. */
+
+ task_context = ireq->tc;
+
+ idev = ireq->target_device;
+ iport = idev->owning_port;
+
+ /*
+ * Fill in the TC with the its required data
+ * 00h
+ */
+ task_context->priority = 0;
+ task_context->initiator_request = 1;
+ task_context->connection_rate = idev->connection_rate;
+ task_context->protocol_engine_index = ISCI_PEG;
+ task_context->logical_port_index = iport->physical_port_index;
+ task_context->protocol_type = SCU_TASK_CONTEXT_PROTOCOL_SMP;
+ task_context->abort = 0;
+ task_context->valid = SCU_TASK_CONTEXT_VALID;
+ task_context->context_type = SCU_TASK_CONTEXT_TYPE;
+
+ /* 04h */
+ task_context->remote_node_index = idev->rnc.remote_node_index;
+ task_context->command_code = 0;
+ task_context->task_type = SCU_TASK_TYPE_SMP_REQUEST;
+
+ /* 08h */
+ task_context->link_layer_control = 0;
+ task_context->do_not_dma_ssp_good_response = 1;
+ task_context->strict_ordering = 0;
+ task_context->control_frame = 1;
+ task_context->timeout_enable = 0;
+ task_context->block_guard_enable = 0;
+
+ /* 0ch */
+ task_context->address_modifier = 0;
+
+ /* 10h */
+ task_context->ssp_command_iu_length = req_len;
+
+ /* 14h */
+ task_context->transfer_length_bytes = 0;
+
+ /*
+ * 18h ~ 30h, protocol specific
+ * since commandIU has been build by framework at this point, we just
+ * copy the frist DWord from command IU to this location. */
+ memcpy(&task_context->type.smp, &cmd, sizeof(u32));
+
+ /*
+ * 40h
+ * "For SMP you could program it to zero. We would prefer that way
+ * so that done code will be consistent." - Venki
+ */
+ task_context->task_phase = 0;
+
+ ireq->post_context = (SCU_CONTEXT_COMMAND_REQUEST_TYPE_POST_TC |
+ (ISCI_PEG << SCU_CONTEXT_COMMAND_PROTOCOL_ENGINE_GROUP_SHIFT) |
+ (iport->physical_port_index <<
+ SCU_CONTEXT_COMMAND_LOGICAL_PORT_SHIFT) |
+ ISCI_TAG_TCI(ireq->io_tag));
+ /*
+ * Copy the physical address for the command buffer to the SCU Task
+ * Context command buffer should not contain command header.
+ */
+ task_context->command_iu_upper = upper_32_bits(sg_dma_address(sg));
+ task_context->command_iu_lower = lower_32_bits(sg_dma_address(sg) + sizeof(u32));
+
+ /* SMP response comes as UF, so no need to set response IU address. */
+ task_context->response_iu_upper = 0;
+ task_context->response_iu_lower = 0;
+
+ sci_change_state(&ireq->sm, SCI_REQ_CONSTRUCTED);
+
+ return SCI_SUCCESS;
+}
+
+/*
+ * isci_smp_request_build() - This function builds the smp request.
+ * @ireq: This parameter points to the isci_request allocated in the
+ * request construct function.
+ *
+ * SCI_SUCCESS on successfull completion, or specific failure code.
+ */
+static enum sci_status isci_smp_request_build(struct isci_request *ireq)
+{
+ struct sas_task *task = isci_request_access_task(ireq);
+ struct device *dev = &ireq->isci_host->pdev->dev;
+ enum sci_status status = SCI_FAILURE;
+
+ status = sci_io_request_construct_smp(dev, ireq, task);
+ if (status != SCI_SUCCESS)
+ dev_dbg(&ireq->isci_host->pdev->dev,
+ "%s: failed with status = %d\n",
+ __func__,
+ status);
+
+ return status;
+}
+
+/**
+ * isci_io_request_build() - This function builds the io request object.
+ * @ihost: This parameter specifies the ISCI host object
+ * @request: This parameter points to the isci_request object allocated in the
+ * request construct function.
+ * @sci_device: This parameter is the handle for the sci core's remote device
+ * object that is the destination for this request.
+ *
+ * SCI_SUCCESS on successfull completion, or specific failure code.
+ */
+static enum sci_status isci_io_request_build(struct isci_host *ihost,
+ struct isci_request *request,
+ struct isci_remote_device *idev)
+{
+ enum sci_status status = SCI_SUCCESS;
+ struct sas_task *task = isci_request_access_task(request);
+
+ dev_dbg(&ihost->pdev->dev,
+ "%s: idev = 0x%p; request = %p, "
+ "num_scatter = %d\n",
+ __func__,
+ idev,
+ request,
+ task->num_scatter);
+
+ /* map the sgl addresses, if present.
+ * libata does the mapping for sata devices
+ * before we get the request.
+ */
+ if (task->num_scatter &&
+ !sas_protocol_ata(task->task_proto) &&
+ !(SAS_PROTOCOL_SMP & task->task_proto)) {
+
+ request->num_sg_entries = dma_map_sg(
+ &ihost->pdev->dev,
+ task->scatter,
+ task->num_scatter,
+ task->data_dir
+ );
+
+ if (request->num_sg_entries == 0)
+ return SCI_FAILURE_INSUFFICIENT_RESOURCES;
+ }
+
+ status = sci_io_request_construct(ihost, idev, request);
+
+ if (status != SCI_SUCCESS) {
+ dev_dbg(&ihost->pdev->dev,
+ "%s: failed request construct\n",
+ __func__);
+ return SCI_FAILURE;
+ }
+
+ switch (task->task_proto) {
+ case SAS_PROTOCOL_SMP:
+ status = isci_smp_request_build(request);
+ break;
+ case SAS_PROTOCOL_SSP:
+ status = isci_request_ssp_request_construct(request);
+ break;
+ case SAS_PROTOCOL_SATA:
+ case SAS_PROTOCOL_STP:
+ case SAS_PROTOCOL_SATA | SAS_PROTOCOL_STP:
+ status = isci_request_stp_request_construct(request);
+ break;
+ default:
+ dev_dbg(&ihost->pdev->dev,
+ "%s: unknown protocol\n", __func__);
+ return SCI_FAILURE;
+ }
+
+ return SCI_SUCCESS;
+}
+
+static struct isci_request *isci_request_from_tag(struct isci_host *ihost, u16 tag)
+{
+ struct isci_request *ireq;
+
+ ireq = ihost->reqs[ISCI_TAG_TCI(tag)];
+ ireq->io_tag = tag;
+ ireq->io_request_completion = NULL;
+ ireq->flags = 0;
+ ireq->num_sg_entries = 0;
+
+ return ireq;
+}
+
+static struct isci_request *isci_io_request_from_tag(struct isci_host *ihost,
+ struct sas_task *task,
+ u16 tag)
+{
+ struct isci_request *ireq;
+
+ ireq = isci_request_from_tag(ihost, tag);
+ ireq->ttype_ptr.io_task_ptr = task;
+ clear_bit(IREQ_TMF, &ireq->flags);
+ task->lldd_task = ireq;
+
+ return ireq;
+}
+
+struct isci_request *isci_tmf_request_from_tag(struct isci_host *ihost,
+ struct isci_tmf *isci_tmf,
+ u16 tag)
+{
+ struct isci_request *ireq;
+
+ ireq = isci_request_from_tag(ihost, tag);
+ ireq->ttype_ptr.tmf_task_ptr = isci_tmf;
+ set_bit(IREQ_TMF, &ireq->flags);
+
+ return ireq;
+}
+
+int isci_request_execute(struct isci_host *ihost, struct isci_remote_device *idev,
+ struct sas_task *task, u16 tag)
+{
+ enum sci_status status = SCI_FAILURE_UNSUPPORTED_PROTOCOL;
+ struct isci_request *ireq;
+ unsigned long flags;
+ int ret = 0;
+
+ /* do common allocation and init of request object. */
+ ireq = isci_io_request_from_tag(ihost, task, tag);
+
+ status = isci_io_request_build(ihost, ireq, idev);
+ if (status != SCI_SUCCESS) {
+ dev_dbg(&ihost->pdev->dev,
+ "%s: request_construct failed - status = 0x%x\n",
+ __func__,
+ status);
+ return status;
+ }
+
+ spin_lock_irqsave(&ihost->scic_lock, flags);
+
+ if (test_bit(IDEV_IO_NCQERROR, &idev->flags)) {
+
+ if (isci_task_is_ncq_recovery(task)) {
+
+ /* The device is in an NCQ recovery state. Issue the
+ * request on the task side. Note that it will
+ * complete on the I/O request side because the
+ * request was built that way (ie.
+ * ireq->is_task_management_request is false).
+ */
+ status = sci_controller_start_task(ihost,
+ idev,
+ ireq);
+ } else {
+ status = SCI_FAILURE;
+ }
+ } else {
+ /* send the request, let the core assign the IO TAG. */
+ status = sci_controller_start_io(ihost, idev,
+ ireq);
+ }
+
+ if (status != SCI_SUCCESS &&
+ status != SCI_FAILURE_REMOTE_DEVICE_RESET_REQUIRED) {
+ dev_dbg(&ihost->pdev->dev,
+ "%s: failed request start (0x%x)\n",
+ __func__, status);
+ spin_unlock_irqrestore(&ihost->scic_lock, flags);
+ return status;
+ }
+ /* Either I/O started OK, or the core has signaled that
+ * the device needs a target reset.
+ */
+ if (status != SCI_SUCCESS) {
+ /* The request did not really start in the
+ * hardware, so clear the request handle
+ * here so no terminations will be done.
+ */
+ set_bit(IREQ_TERMINATED, &ireq->flags);
+ }
+ spin_unlock_irqrestore(&ihost->scic_lock, flags);
+
+ if (status ==
+ SCI_FAILURE_REMOTE_DEVICE_RESET_REQUIRED) {
+ /* Signal libsas that we need the SCSI error
+ * handler thread to work on this I/O and that
+ * we want a device reset.
+ */
+ spin_lock_irqsave(&task->task_state_lock, flags);
+ task->task_state_flags |= SAS_TASK_NEED_DEV_RESET;
+ spin_unlock_irqrestore(&task->task_state_lock, flags);
+
+ /* Cause this task to be scheduled in the SCSI error
+ * handler thread.
+ */
+ sas_task_abort(task);
+
+ /* Change the status, since we are holding
+ * the I/O until it is managed by the SCSI
+ * error handler.
+ */
+ status = SCI_SUCCESS;
+ }
+
+ return ret;
+}
diff --git a/drivers/scsi/isci/request.h b/drivers/scsi/isci/request.h
new file mode 100644
index 000000000..aff95317f
--- /dev/null
+++ b/drivers/scsi/isci/request.h
@@ -0,0 +1,310 @@
+/*
+ * This file is provided under a dual BSD/GPLv2 license. When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ * The full GNU General Public License is included in this distribution
+ * in the file called LICENSE.GPL.
+ *
+ * BSD LICENSE
+ *
+ * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _ISCI_REQUEST_H_
+#define _ISCI_REQUEST_H_
+
+#include "isci.h"
+#include "host.h"
+#include "scu_task_context.h"
+
+/**
+ * isci_stp_request - extra request infrastructure to handle pio/atapi protocol
+ * @pio_len - number of bytes requested at PIO setup
+ * @status - pio setup ending status value to tell us if we need
+ * to wait for another fis or if the transfer is complete. Upon
+ * receipt of a d2h fis this will be the status field of that fis.
+ * @sgl - track pio transfer progress as we iterate through the sgl
+ */
+struct isci_stp_request {
+ u32 pio_len;
+ u8 status;
+
+ struct isci_stp_pio_sgl {
+ int index;
+ u8 set;
+ u32 offset;
+ } sgl;
+};
+
+struct isci_request {
+ #define IREQ_COMPLETE_IN_TARGET 0
+ #define IREQ_TERMINATED 1
+ #define IREQ_TMF 2
+ #define IREQ_ACTIVE 3
+ #define IREQ_PENDING_ABORT 4 /* Set == device was not suspended yet */
+ #define IREQ_TC_ABORT_POSTED 5
+ #define IREQ_ABORT_PATH_ACTIVE 6
+ #define IREQ_NO_AUTO_FREE_TAG 7 /* Set when being explicitly managed */
+ unsigned long flags;
+ /* XXX kill ttype and ttype_ptr, allocate full sas_task */
+ union ttype_ptr_union {
+ struct sas_task *io_task_ptr; /* When ttype==io_task */
+ struct isci_tmf *tmf_task_ptr; /* When ttype==tmf_task */
+ } ttype_ptr;
+ struct isci_host *isci_host;
+ dma_addr_t request_daddr;
+ dma_addr_t zero_scatter_daddr;
+ unsigned int num_sg_entries;
+ /* Note: "io_request_completion" is completed in two different ways
+ * depending on whether this is a TMF or regular request.
+ * - TMF requests are completed in the thread that started them;
+ * - regular requests are completed in the request completion callback
+ * function.
+ * This difference in operation allows the aborter of a TMF request
+ * to be sure that once the TMF request completes, the I/O that the
+ * TMF was aborting is guaranteed to have completed.
+ *
+ * XXX kill io_request_completion
+ */
+ struct completion *io_request_completion;
+ struct sci_base_state_machine sm;
+ struct isci_host *owning_controller;
+ struct isci_remote_device *target_device;
+ u16 io_tag;
+ enum sas_protocol protocol;
+ u32 scu_status; /* hardware result */
+ u32 sci_status; /* upper layer disposition */
+ u32 post_context;
+ struct scu_task_context *tc;
+ /* could be larger with sg chaining */
+ #define SCU_SGL_SIZE ((SCI_MAX_SCATTER_GATHER_ELEMENTS + 1) / 2)
+ struct scu_sgl_element_pair sg_table[SCU_SGL_SIZE] __attribute__ ((aligned(32)));
+ /* This field is a pointer to the stored rx frame data. It is used in
+ * STP internal requests and SMP response frames. If this field is
+ * non-NULL the saved frame must be released on IO request completion.
+ */
+ u32 saved_rx_frame_index;
+
+ union {
+ struct {
+ union {
+ struct ssp_cmd_iu cmd;
+ struct ssp_task_iu tmf;
+ };
+ union {
+ struct ssp_response_iu rsp;
+ u8 rsp_buf[SSP_RESP_IU_MAX_SIZE];
+ };
+ } ssp;
+ struct {
+ struct isci_stp_request req;
+ struct host_to_dev_fis cmd;
+ struct dev_to_host_fis rsp;
+ } stp;
+ };
+};
+
+static inline struct isci_request *to_ireq(struct isci_stp_request *stp_req)
+{
+ struct isci_request *ireq;
+
+ ireq = container_of(stp_req, typeof(*ireq), stp.req);
+ return ireq;
+}
+
+/**
+ * enum sci_base_request_states - request state machine states
+ *
+ * @SCI_REQ_INIT: Simply the initial state for the base request state machine.
+ *
+ * @SCI_REQ_CONSTRUCTED: This state indicates that the request has been
+ * constructed. This state is entered from the INITIAL state.
+ *
+ * @SCI_REQ_STARTED: This state indicates that the request has been started.
+ * This state is entered from the CONSTRUCTED state.
+ *
+ * @SCI_REQ_STP_UDMA_WAIT_TC_COMP:
+ * @SCI_REQ_STP_UDMA_WAIT_D2H:
+ * @SCI_REQ_STP_NON_DATA_WAIT_H2D:
+ * @SCI_REQ_STP_NON_DATA_WAIT_D2H:
+ *
+ * @SCI_REQ_STP_PIO_WAIT_H2D: While in this state the IO request object is
+ * waiting for the TC completion notification for the H2D Register FIS
+ *
+ * @SCI_REQ_STP_PIO_WAIT_FRAME: While in this state the IO request object is
+ * waiting for either a PIO Setup FIS or a D2H register FIS. The type of frame
+ * received is based on the result of the prior frame and line conditions.
+ *
+ * @SCI_REQ_STP_PIO_DATA_IN: While in this state the IO request object is
+ * waiting for a DATA frame from the device.
+ *
+ * @SCI_REQ_STP_PIO_DATA_OUT: While in this state the IO request object is
+ * waiting to transmit the next data frame to the device.
+ *
+ * @SCI_REQ_ATAPI_WAIT_H2D: While in this state the IO request object is
+ * waiting for the TC completion notification for the H2D Register FIS
+ *
+ * @SCI_REQ_ATAPI_WAIT_PIO_SETUP: While in this state the IO request object is
+ * waiting for either a PIO Setup.
+ *
+ * @SCI_REQ_ATAPI_WAIT_D2H: The non-data IO transit to this state in this state
+ * after receiving TC completion. While in this state IO request object is
+ * waiting for D2H status frame as UF.
+ *
+ * @SCI_REQ_ATAPI_WAIT_TC_COMP: When transmitting raw frames hardware reports
+ * task context completion after every frame submission, so in the
+ * non-accelerated case we need to expect the completion for the "cdb" frame.
+ *
+ * @SCI_REQ_TASK_WAIT_TC_COMP: The AWAIT_TC_COMPLETION sub-state indicates that
+ * the started raw task management request is waiting for the transmission of
+ * the initial frame (i.e. command, task, etc.).
+ *
+ * @SCI_REQ_TASK_WAIT_TC_RESP: This sub-state indicates that the started task
+ * management request is waiting for the reception of an unsolicited frame
+ * (i.e. response IU).
+ *
+ * @SCI_REQ_SMP_WAIT_RESP: This sub-state indicates that the started task
+ * management request is waiting for the reception of an unsolicited frame
+ * (i.e. response IU).
+ *
+ * @SCI_REQ_SMP_WAIT_TC_COMP: The AWAIT_TC_COMPLETION sub-state indicates that
+ * the started SMP request is waiting for the transmission of the initial frame
+ * (i.e. command, task, etc.).
+ *
+ * @SCI_REQ_COMPLETED: This state indicates that the request has completed.
+ * This state is entered from the STARTED state. This state is entered from the
+ * ABORTING state.
+ *
+ * @SCI_REQ_ABORTING: This state indicates that the request is in the process
+ * of being terminated/aborted. This state is entered from the CONSTRUCTED
+ * state. This state is entered from the STARTED state.
+ *
+ * @SCI_REQ_FINAL: Simply the final state for the base request state machine.
+ */
+#define REQUEST_STATES {\
+ C(REQ_INIT),\
+ C(REQ_CONSTRUCTED),\
+ C(REQ_STARTED),\
+ C(REQ_STP_UDMA_WAIT_TC_COMP),\
+ C(REQ_STP_UDMA_WAIT_D2H),\
+ C(REQ_STP_NON_DATA_WAIT_H2D),\
+ C(REQ_STP_NON_DATA_WAIT_D2H),\
+ C(REQ_STP_PIO_WAIT_H2D),\
+ C(REQ_STP_PIO_WAIT_FRAME),\
+ C(REQ_STP_PIO_DATA_IN),\
+ C(REQ_STP_PIO_DATA_OUT),\
+ C(REQ_ATAPI_WAIT_H2D),\
+ C(REQ_ATAPI_WAIT_PIO_SETUP),\
+ C(REQ_ATAPI_WAIT_D2H),\
+ C(REQ_ATAPI_WAIT_TC_COMP),\
+ C(REQ_TASK_WAIT_TC_COMP),\
+ C(REQ_TASK_WAIT_TC_RESP),\
+ C(REQ_SMP_WAIT_RESP),\
+ C(REQ_SMP_WAIT_TC_COMP),\
+ C(REQ_COMPLETED),\
+ C(REQ_ABORTING),\
+ C(REQ_FINAL),\
+ }
+#undef C
+#define C(a) SCI_##a
+enum sci_base_request_states REQUEST_STATES;
+#undef C
+const char *req_state_name(enum sci_base_request_states state);
+
+enum sci_status sci_request_start(struct isci_request *ireq);
+enum sci_status sci_io_request_terminate(struct isci_request *ireq);
+enum sci_status
+sci_io_request_event_handler(struct isci_request *ireq,
+ u32 event_code);
+enum sci_status
+sci_io_request_frame_handler(struct isci_request *ireq,
+ u32 frame_index);
+enum sci_status
+sci_task_request_terminate(struct isci_request *ireq);
+extern enum sci_status
+sci_request_complete(struct isci_request *ireq);
+extern enum sci_status
+sci_io_request_tc_completion(struct isci_request *ireq, u32 code);
+
+/* XXX open code in caller */
+static inline dma_addr_t
+sci_io_request_get_dma_addr(struct isci_request *ireq, void *virt_addr)
+{
+
+ char *requested_addr = (char *)virt_addr;
+ char *base_addr = (char *)ireq;
+
+ BUG_ON(requested_addr < base_addr);
+ BUG_ON((requested_addr - base_addr) >= sizeof(*ireq));
+
+ return ireq->request_daddr + (requested_addr - base_addr);
+}
+
+#define isci_request_access_task(req) ((req)->ttype_ptr.io_task_ptr)
+
+#define isci_request_access_tmf(req) ((req)->ttype_ptr.tmf_task_ptr)
+
+struct isci_request *isci_tmf_request_from_tag(struct isci_host *ihost,
+ struct isci_tmf *isci_tmf,
+ u16 tag);
+int isci_request_execute(struct isci_host *ihost, struct isci_remote_device *idev,
+ struct sas_task *task, u16 tag);
+enum sci_status
+sci_task_request_construct(struct isci_host *ihost,
+ struct isci_remote_device *idev,
+ u16 io_tag,
+ struct isci_request *ireq);
+enum sci_status sci_task_request_construct_ssp(struct isci_request *ireq);
+void sci_smp_request_copy_response(struct isci_request *ireq);
+
+static inline int isci_task_is_ncq_recovery(struct sas_task *task)
+{
+ return (sas_protocol_ata(task->task_proto) &&
+ task->ata_task.fis.command == ATA_CMD_READ_LOG_EXT &&
+ task->ata_task.fis.lbal == ATA_LOG_SATA_NCQ);
+
+}
+#endif /* !defined(_ISCI_REQUEST_H_) */
diff --git a/drivers/scsi/isci/sas.h b/drivers/scsi/isci/sas.h
new file mode 100644
index 000000000..dc26b4aea
--- /dev/null
+++ b/drivers/scsi/isci/sas.h
@@ -0,0 +1,217 @@
+/*
+ * This file is provided under a dual BSD/GPLv2 license. When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ * The full GNU General Public License is included in this distribution
+ * in the file called LICENSE.GPL.
+ *
+ * BSD LICENSE
+ *
+ * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _SCI_SAS_H_
+#define _SCI_SAS_H_
+
+#include <linux/kernel.h>
+
+/*
+ * SATA FIS Types These constants depict the various SATA FIS types devined in
+ * the serial ATA specification.
+ * XXX: This needs to go into <scsi/sas.h>
+ */
+#define FIS_REGH2D 0x27
+#define FIS_REGD2H 0x34
+#define FIS_SETDEVBITS 0xA1
+#define FIS_DMA_ACTIVATE 0x39
+#define FIS_DMA_SETUP 0x41
+#define FIS_BIST_ACTIVATE 0x58
+#define FIS_PIO_SETUP 0x5F
+#define FIS_DATA 0x46
+
+/**************************************************************************/
+#define SSP_RESP_IU_MAX_SIZE 280
+
+/*
+ * contents of the SSP COMMAND INFORMATION UNIT.
+ * For specific information on each of these individual fields please
+ * reference the SAS specification SSP transport layer section.
+ * XXX: This needs to go into <scsi/sas.h>
+ */
+struct ssp_cmd_iu {
+ u8 LUN[8];
+ u8 add_cdb_len:6;
+ u8 _r_a:2;
+ u8 _r_b;
+ u8 en_fburst:1;
+ u8 task_prio:4;
+ u8 task_attr:3;
+ u8 _r_c;
+
+ u8 cdb[16];
+} __packed;
+
+/*
+ * contents of the SSP TASK INFORMATION UNIT.
+ * For specific information on each of these individual fields please
+ * reference the SAS specification SSP transport layer section.
+ * XXX: This needs to go into <scsi/sas.h>
+ */
+struct ssp_task_iu {
+ u8 LUN[8];
+ u8 _r_a;
+ u8 task_func;
+ u8 _r_b[4];
+ u16 task_tag;
+ u8 _r_c[12];
+} __packed;
+
+
+/*
+ * struct smp_req_phy_id - This structure defines the contents of
+ * an SMP Request that is comprised of the struct smp_request_header and a
+ * phy identifier.
+ * Examples: SMP_REQUEST_DISCOVER, SMP_REQUEST_REPORT_PHY_SATA.
+ *
+ * For specific information on each of these individual fields please reference
+ * the SAS specification.
+ */
+struct smp_req_phy_id {
+ u8 _r_a[4]; /* bytes 4-7 */
+
+ u8 ign_zone_grp:1; /* byte 8 */
+ u8 _r_b:7;
+
+ u8 phy_id; /* byte 9 */
+ u8 _r_c; /* byte 10 */
+ u8 _r_d; /* byte 11 */
+} __packed;
+
+/*
+ * struct smp_req_config_route_info - This structure defines the
+ * contents of an SMP Configure Route Information request.
+ *
+ * For specific information on each of these individual fields please reference
+ * the SAS specification.
+ */
+struct smp_req_conf_rtinfo {
+ u16 exp_change_cnt; /* bytes 4-5 */
+ u8 exp_rt_idx_hi; /* byte 6 */
+ u8 exp_rt_idx; /* byte 7 */
+
+ u8 _r_a; /* byte 8 */
+ u8 phy_id; /* byte 9 */
+ u16 _r_b; /* bytes 10-11 */
+
+ u8 _r_c:7; /* byte 12 */
+ u8 dis_rt_entry:1;
+ u8 _r_d[3]; /* bytes 13-15 */
+
+ u8 rt_sas_addr[8]; /* bytes 16-23 */
+ u8 _r_e[16]; /* bytes 24-39 */
+} __packed;
+
+/*
+ * struct smp_req_phycntl - This structure defines the contents of an
+ * SMP Phy Controller request.
+ *
+ * For specific information on each of these individual fields please reference
+ * the SAS specification.
+ */
+struct smp_req_phycntl {
+ u16 exp_change_cnt; /* byte 4-5 */
+
+ u8 _r_a[3]; /* bytes 6-8 */
+
+ u8 phy_id; /* byte 9 */
+ u8 phy_op; /* byte 10 */
+
+ u8 upd_pathway:1; /* byte 11 */
+ u8 _r_b:7;
+
+ u8 _r_c[12]; /* byte 12-23 */
+
+ u8 att_dev_name[8]; /* byte 24-31 */
+
+ u8 _r_d:4; /* byte 32 */
+ u8 min_linkrate:4;
+
+ u8 _r_e:4; /* byte 33 */
+ u8 max_linkrate:4;
+
+ u8 _r_f[2]; /* byte 34-35 */
+
+ u8 pathway:4; /* byte 36 */
+ u8 _r_g:4;
+
+ u8 _r_h[3]; /* bytes 37-39 */
+} __packed;
+
+/*
+ * struct smp_req - This structure simply unionizes the existing request
+ * structures into a common request type.
+ *
+ * XXX: This data structure may need to go to scsi/sas.h
+ */
+struct smp_req {
+ u8 type; /* byte 0 */
+ u8 func; /* byte 1 */
+ u8 alloc_resp_len; /* byte 2 */
+ u8 req_len; /* byte 3 */
+ u8 req_data[0];
+} __packed;
+
+/*
+ * struct sci_sas_address - This structure depicts how a SAS address is
+ * represented by SCI.
+ * XXX convert this to u8 [SAS_ADDR_SIZE] like the rest of libsas
+ *
+ */
+struct sci_sas_address {
+ u32 high;
+ u32 low;
+};
+#endif
diff --git a/drivers/scsi/isci/scu_completion_codes.h b/drivers/scsi/isci/scu_completion_codes.h
new file mode 100644
index 000000000..071cb74a2
--- /dev/null
+++ b/drivers/scsi/isci/scu_completion_codes.h
@@ -0,0 +1,285 @@
+/*
+ * This file is provided under a dual BSD/GPLv2 license. When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ * The full GNU General Public License is included in this distribution
+ * in the file called LICENSE.GPL.
+ *
+ * BSD LICENSE
+ *
+ * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _SCU_COMPLETION_CODES_HEADER_
+#define _SCU_COMPLETION_CODES_HEADER_
+
+/**
+ * This file contains the constants and macros for the SCU hardware completion
+ * codes.
+ *
+ *
+ */
+
+#define SCU_COMPLETION_TYPE_SHIFT 28
+#define SCU_COMPLETION_TYPE_MASK 0x70000000
+
+/**
+ * SCU_COMPLETION_TYPE() -
+ *
+ * This macro constructs an SCU completion type
+ */
+#define SCU_COMPLETION_TYPE(type) \
+ ((u32)(type) << SCU_COMPLETION_TYPE_SHIFT)
+
+/**
+ * SCU_COMPLETION_TYPE() -
+ *
+ * These macros contain the SCU completion types SCU_COMPLETION_TYPE
+ */
+#define SCU_COMPLETION_TYPE_TASK SCU_COMPLETION_TYPE(0)
+#define SCU_COMPLETION_TYPE_SDMA SCU_COMPLETION_TYPE(1)
+#define SCU_COMPLETION_TYPE_UFI SCU_COMPLETION_TYPE(2)
+#define SCU_COMPLETION_TYPE_EVENT SCU_COMPLETION_TYPE(3)
+#define SCU_COMPLETION_TYPE_NOTIFY SCU_COMPLETION_TYPE(4)
+
+/**
+ *
+ *
+ * These constants provide the shift and mask values for the various parts of
+ * an SCU completion code.
+ */
+#define SCU_COMPLETION_STATUS_MASK 0x0FFC0000
+#define SCU_COMPLETION_TL_STATUS_MASK 0x0FC00000
+#define SCU_COMPLETION_TL_STATUS_SHIFT 22
+#define SCU_COMPLETION_SDMA_STATUS_MASK 0x003C0000
+#define SCU_COMPLETION_PEG_MASK 0x00010000
+#define SCU_COMPLETION_PORT_MASK 0x00007000
+#define SCU_COMPLETION_PE_MASK SCU_COMPLETION_PORT_MASK
+#define SCU_COMPLETION_PE_SHIFT 12
+#define SCU_COMPLETION_INDEX_MASK 0x00000FFF
+
+/**
+ * SCU_GET_COMPLETION_TYPE() -
+ *
+ * This macro returns the SCU completion type.
+ */
+#define SCU_GET_COMPLETION_TYPE(completion_code) \
+ ((completion_code) & SCU_COMPLETION_TYPE_MASK)
+
+/**
+ * SCU_GET_COMPLETION_STATUS() -
+ *
+ * This macro returns the SCU completion status.
+ */
+#define SCU_GET_COMPLETION_STATUS(completion_code) \
+ ((completion_code) & SCU_COMPLETION_STATUS_MASK)
+
+/**
+ * SCU_GET_COMPLETION_TL_STATUS() -
+ *
+ * This macro returns the transport layer completion status.
+ */
+#define SCU_GET_COMPLETION_TL_STATUS(completion_code) \
+ ((completion_code) & SCU_COMPLETION_TL_STATUS_MASK)
+
+/**
+ * SCU_MAKE_COMPLETION_STATUS() -
+ *
+ * This macro takes a completion code and performs the shift and mask
+ * operations to turn it into a completion code that can be compared to a
+ * SCU_GET_COMPLETION_TL_STATUS.
+ */
+#define SCU_MAKE_COMPLETION_STATUS(completion_code) \
+ ((u32)(completion_code) << SCU_COMPLETION_TL_STATUS_SHIFT)
+
+/**
+ * SCU_NORMALIZE_COMPLETION_STATUS() -
+ *
+ * This macro takes a SCU_GET_COMPLETION_TL_STATUS and normalizes it for a
+ * return code.
+ */
+#define SCU_NORMALIZE_COMPLETION_STATUS(completion_code) \
+ (\
+ ((completion_code) & SCU_COMPLETION_TL_STATUS_MASK) \
+ >> SCU_COMPLETION_TL_STATUS_SHIFT \
+ )
+
+/**
+ * SCU_GET_COMPLETION_SDMA_STATUS() -
+ *
+ * This macro returns the SDMA completion status.
+ */
+#define SCU_GET_COMPLETION_SDMA_STATUS(completion_code) \
+ ((completion_code) & SCU_COMPLETION_SDMA_STATUS_MASK)
+
+/**
+ * SCU_GET_COMPLETION_PEG() -
+ *
+ * This macro returns the Protocol Engine Group from the completion code.
+ */
+#define SCU_GET_COMPLETION_PEG(completion_code) \
+ ((completion_code) & SCU_COMPLETION_PEG_MASK)
+
+/**
+ * SCU_GET_COMPLETION_PORT() -
+ *
+ * This macro reuturns the logical port index from the completion code.
+ */
+#define SCU_GET_COMPLETION_PORT(completion_code) \
+ ((completion_code) & SCU_COMPLETION_PORT_MASK)
+
+/**
+ * SCU_GET_PROTOCOL_ENGINE_INDEX() -
+ *
+ * This macro returns the PE index from the completion code.
+ */
+#define SCU_GET_PROTOCOL_ENGINE_INDEX(completion_code) \
+ (((completion_code) & SCU_COMPLETION_PE_MASK) >> SCU_COMPLETION_PE_SHIFT)
+
+/**
+ * SCU_GET_COMPLETION_INDEX() -
+ *
+ * This macro returns the index of the completion which is either a TCi or an
+ * RNi depending on the completion type.
+ */
+#define SCU_GET_COMPLETION_INDEX(completion_code) \
+ ((completion_code) & SCU_COMPLETION_INDEX_MASK)
+
+#define SCU_UNSOLICITED_FRAME_MASK 0x0FFF0000
+#define SCU_UNSOLICITED_FRAME_SHIFT 16
+
+/**
+ * SCU_GET_FRAME_INDEX() -
+ *
+ * This macro returns a normalized frame index from an unsolicited frame
+ * completion.
+ */
+#define SCU_GET_FRAME_INDEX(completion_code) \
+ (\
+ ((completion_code) & SCU_UNSOLICITED_FRAME_MASK) \
+ >> SCU_UNSOLICITED_FRAME_SHIFT \
+ )
+
+#define SCU_UNSOLICITED_FRAME_ERROR_MASK 0x00008000
+
+/**
+ * SCU_GET_FRAME_ERROR() -
+ *
+ * This macro returns a zero (0) value if there is no frame error otherwise it
+ * returns non-zero (!0).
+ */
+#define SCU_GET_FRAME_ERROR(completion_code) \
+ ((completion_code) & SCU_UNSOLICITED_FRAME_ERROR_MASK)
+
+/**
+ *
+ *
+ * These constants represent normalized completion codes which must be shifted
+ * 18 bits to match it with the hardware completion code. In a 16-bit compiler,
+ * immediate constants are 16-bit values (the size of an int). If we shift
+ * those by 18 bits, we completely lose the value. To ensure the value is a
+ * 32-bit value like we want, each immediate value must be cast to a u32.
+ */
+#define SCU_TASK_DONE_GOOD ((u32)0x00)
+#define SCU_TASK_DONE_TX_RAW_CMD_ERR ((u32)0x08)
+#define SCU_TASK_DONE_CRC_ERR ((u32)0x14)
+#define SCU_TASK_DONE_CHECK_RESPONSE ((u32)0x14)
+#define SCU_TASK_DONE_GEN_RESPONSE ((u32)0x15)
+#define SCU_TASK_DONE_NAK_CMD_ERR ((u32)0x16)
+#define SCU_TASK_DONE_CMD_LL_R_ERR ((u32)0x16)
+#define SCU_TASK_DONE_LL_R_ERR ((u32)0x17)
+#define SCU_TASK_DONE_ACK_NAK_TO ((u32)0x17)
+#define SCU_TASK_DONE_LL_PERR ((u32)0x18)
+#define SCU_TASK_DONE_LL_SY_TERM ((u32)0x19)
+#define SCU_TASK_DONE_NAK_ERR ((u32)0x19)
+#define SCU_TASK_DONE_LL_LF_TERM ((u32)0x1A)
+#define SCU_TASK_DONE_DATA_LEN_ERR ((u32)0x1A)
+#define SCU_TASK_DONE_LL_CL_TERM ((u32)0x1B)
+#define SCU_TASK_DONE_BREAK_RCVD ((u32)0x1B)
+#define SCU_TASK_DONE_LL_ABORT_ERR ((u32)0x1B)
+#define SCU_TASK_DONE_SEQ_INV_TYPE ((u32)0x1C)
+#define SCU_TASK_DONE_UNEXP_XR ((u32)0x1C)
+#define SCU_TASK_DONE_INV_FIS_TYPE ((u32)0x1D)
+#define SCU_TASK_DONE_XR_IU_LEN_ERR ((u32)0x1D)
+#define SCU_TASK_DONE_INV_FIS_LEN ((u32)0x1E)
+#define SCU_TASK_DONE_XR_WD_LEN ((u32)0x1E)
+#define SCU_TASK_DONE_SDMA_ERR ((u32)0x1F)
+#define SCU_TASK_DONE_OFFSET_ERR ((u32)0x20)
+#define SCU_TASK_DONE_MAX_PLD_ERR ((u32)0x21)
+#define SCU_TASK_DONE_EXCESS_DATA ((u32)0x22)
+#define SCU_TASK_DONE_LF_ERR ((u32)0x23)
+#define SCU_TASK_DONE_UNEXP_FIS ((u32)0x24)
+#define SCU_TASK_DONE_UNEXP_RESP ((u32)0x24)
+#define SCU_TASK_DONE_EARLY_RESP ((u32)0x25)
+#define SCU_TASK_DONE_SMP_RESP_TO_ERR ((u32)0x26)
+#define SCU_TASK_DONE_DMASETUP_DIRERR ((u32)0x27)
+#define SCU_TASK_DONE_SMP_UFI_ERR ((u32)0x27)
+#define SCU_TASK_DONE_XFERCNT_ERR ((u32)0x28)
+#define SCU_TASK_DONE_SMP_FRM_TYPE_ERR ((u32)0x28)
+#define SCU_TASK_DONE_SMP_LL_RX_ERR ((u32)0x29)
+#define SCU_TASK_DONE_RESP_LEN_ERR ((u32)0x2A)
+#define SCU_TASK_DONE_UNEXP_DATA ((u32)0x2B)
+#define SCU_TASK_DONE_OPEN_FAIL ((u32)0x2C)
+#define SCU_TASK_DONE_UNEXP_SDBFIS ((u32)0x2D)
+#define SCU_TASK_DONE_REG_ERR ((u32)0x2E)
+#define SCU_TASK_DONE_SDB_ERR ((u32)0x2F)
+#define SCU_TASK_DONE_TASK_ABORT ((u32)0x30)
+#define SCU_TASK_DONE_CMD_SDMA_ERR ((U32)0x32)
+#define SCU_TASK_DONE_CMD_LL_ABORT_ERR ((U32)0x33)
+#define SCU_TASK_OPEN_REJECT_WRONG_DESTINATION ((u32)0x34)
+#define SCU_TASK_OPEN_REJECT_RESERVED_ABANDON_1 ((u32)0x35)
+#define SCU_TASK_OPEN_REJECT_RESERVED_ABANDON_2 ((u32)0x36)
+#define SCU_TASK_OPEN_REJECT_RESERVED_ABANDON_3 ((u32)0x37)
+#define SCU_TASK_OPEN_REJECT_BAD_DESTINATION ((u32)0x38)
+#define SCU_TASK_OPEN_REJECT_ZONE_VIOLATION ((u32)0x39)
+#define SCU_TASK_DONE_VIIT_ENTRY_NV ((u32)0x3A)
+#define SCU_TASK_DONE_IIT_ENTRY_NV ((u32)0x3B)
+#define SCU_TASK_DONE_RNCNV_OUTBOUND ((u32)0x3C)
+#define SCU_TASK_OPEN_REJECT_STP_RESOURCES_BUSY ((u32)0x3D)
+#define SCU_TASK_OPEN_REJECT_PROTOCOL_NOT_SUPPORTED ((u32)0x3E)
+#define SCU_TASK_OPEN_REJECT_CONNECTION_RATE_NOT_SUPPORTED ((u32)0x3F)
+
+#endif /* _SCU_COMPLETION_CODES_HEADER_ */
diff --git a/drivers/scsi/isci/scu_event_codes.h b/drivers/scsi/isci/scu_event_codes.h
new file mode 100644
index 000000000..36a945ad5
--- /dev/null
+++ b/drivers/scsi/isci/scu_event_codes.h
@@ -0,0 +1,336 @@
+/*
+ * This file is provided under a dual BSD/GPLv2 license. When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ * The full GNU General Public License is included in this distribution
+ * in the file called LICENSE.GPL.
+ *
+ * BSD LICENSE
+ *
+ * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef __SCU_EVENT_CODES_HEADER__
+#define __SCU_EVENT_CODES_HEADER__
+
+/**
+ * This file contains the constants and macros for the SCU event codes.
+ *
+ *
+ */
+
+#define SCU_EVENT_TYPE_CODE_SHIFT 24
+#define SCU_EVENT_TYPE_CODE_MASK 0x0F000000
+
+#define SCU_EVENT_SPECIFIC_CODE_SHIFT 18
+#define SCU_EVENT_SPECIFIC_CODE_MASK 0x00FC0000
+
+#define SCU_EVENT_CODE_MASK \
+ (SCU_EVENT_TYPE_CODE_MASK | SCU_EVENT_SPECIFIC_CODE_MASK)
+
+/**
+ * SCU_EVENT_TYPE() -
+ *
+ * This macro constructs an SCU event type from the type value.
+ */
+#define SCU_EVENT_TYPE(type) \
+ ((u32)(type) << SCU_EVENT_TYPE_CODE_SHIFT)
+
+/**
+ * SCU_EVENT_SPECIFIC() -
+ *
+ * This macro constructs an SCU event specifier from the code value.
+ */
+#define SCU_EVENT_SPECIFIC(code) \
+ ((u32)(code) << SCU_EVENT_SPECIFIC_CODE_SHIFT)
+
+/**
+ * SCU_EVENT_MESSAGE() -
+ *
+ * This macro constructs a combines an SCU event type and SCU event specifier
+ * from the type and code values.
+ */
+#define SCU_EVENT_MESSAGE(type, code) \
+ ((type) | SCU_EVENT_SPECIFIC(code))
+
+/**
+ * SCU_EVENT_TYPE() -
+ *
+ * SCU_EVENT_TYPES
+ */
+#define SCU_EVENT_TYPE_SMU_COMMAND_ERROR SCU_EVENT_TYPE(0x08)
+#define SCU_EVENT_TYPE_SMU_PCQ_ERROR SCU_EVENT_TYPE(0x09)
+#define SCU_EVENT_TYPE_SMU_ERROR SCU_EVENT_TYPE(0x00)
+#define SCU_EVENT_TYPE_TRANSPORT_ERROR SCU_EVENT_TYPE(0x01)
+#define SCU_EVENT_TYPE_BROADCAST_CHANGE SCU_EVENT_TYPE(0x02)
+#define SCU_EVENT_TYPE_OSSP_EVENT SCU_EVENT_TYPE(0x03)
+#define SCU_EVENT_TYPE_FATAL_MEMORY_ERROR SCU_EVENT_TYPE(0x0F)
+#define SCU_EVENT_TYPE_RNC_SUSPEND_TX SCU_EVENT_TYPE(0x04)
+#define SCU_EVENT_TYPE_RNC_SUSPEND_TX_RX SCU_EVENT_TYPE(0x05)
+#define SCU_EVENT_TYPE_RNC_OPS_MISC SCU_EVENT_TYPE(0x06)
+#define SCU_EVENT_TYPE_PTX_SCHEDULE_EVENT SCU_EVENT_TYPE(0x07)
+#define SCU_EVENT_TYPE_ERR_CNT_EVENT SCU_EVENT_TYPE(0x0A)
+
+/**
+ *
+ *
+ * SCU_EVENT_SPECIFIERS
+ */
+#define SCU_EVENT_SPECIFIER_DRIVER_SUSPEND 0x20
+#define SCU_EVENT_SPECIFIER_RNC_RELEASE 0x00
+
+/**
+ *
+ *
+ * SMU_COMMAND_EVENTS
+ */
+#define SCU_EVENT_INVALID_CONTEXT_COMMAND \
+ SCU_EVENT_MESSAGE(SCU_EVENT_TYPE_SMU_COMMAND_ERROR, 0x00)
+
+/**
+ *
+ *
+ * SMU_PCQ_EVENTS
+ */
+#define SCU_EVENT_UNCORRECTABLE_PCQ_ERROR \
+ SCU_EVENT_MESSAGE(SCU_EVENT_TYPE_SMU_PCQ_ERROR, 0x00)
+
+/**
+ *
+ *
+ * SMU_EVENTS
+ */
+#define SCU_EVENT_UNCORRECTABLE_REGISTER_WRITE \
+ SCU_EVENT_MESSAGE(SCU_EVENT_TYPE_SMU_ERROR, 0x02)
+#define SCU_EVENT_UNCORRECTABLE_REGISTER_READ \
+ SCU_EVENT_MESSAGE(SCU_EVENT_TYPE_SMU_ERROR, 0x03)
+#define SCU_EVENT_PCIE_INTERFACE_ERROR \
+ SCU_EVENT_MESSAGE(SCU_EVENT_TYPE_SMU_ERROR, 0x04)
+#define SCU_EVENT_FUNCTION_LEVEL_RESET \
+ SCU_EVENT_MESSAGE(SCU_EVENT_TYPE_SMU_ERROR, 0x05)
+
+/**
+ *
+ *
+ * TRANSPORT_LEVEL_ERRORS
+ */
+#define SCU_EVENT_ACK_NAK_TIMEOUT_ERROR \
+ SCU_EVENT_MESSAGE(SCU_EVENT_TYPE_TRANSPORT_ERROR, 0x00)
+
+/**
+ *
+ *
+ * BROADCAST_CHANGE_EVENTS
+ */
+#define SCU_EVENT_BROADCAST_CHANGE \
+ SCU_EVENT_MESSAGE(SCU_EVENT_TYPE_BROADCAST_CHANGE, 0x01)
+#define SCU_EVENT_BROADCAST_RESERVED0 \
+ SCU_EVENT_MESSAGE(SCU_EVENT_TYPE_BROADCAST_CHANGE, 0x02)
+#define SCU_EVENT_BROADCAST_RESERVED1 \
+ SCU_EVENT_MESSAGE(SCU_EVENT_TYPE_BROADCAST_CHANGE, 0x03)
+#define SCU_EVENT_BROADCAST_SES \
+ SCU_EVENT_MESSAGE(SCU_EVENT_TYPE_BROADCAST_CHANGE, 0x04)
+#define SCU_EVENT_BROADCAST_EXPANDER \
+ SCU_EVENT_MESSAGE(SCU_EVENT_TYPE_BROADCAST_CHANGE, 0x05)
+#define SCU_EVENT_BROADCAST_AEN \
+ SCU_EVENT_MESSAGE(SCU_EVENT_TYPE_BROADCAST_CHANGE, 0x06)
+#define SCU_EVENT_BROADCAST_RESERVED3 \
+ SCU_EVENT_MESSAGE(SCU_EVENT_TYPE_BROADCAST_CHANGE, 0x07)
+#define SCU_EVENT_BROADCAST_RESERVED4 \
+ SCU_EVENT_MESSAGE(SCU_EVENT_TYPE_BROADCAST_CHANGE, 0x08)
+#define SCU_EVENT_PE_SUSPENDED \
+ SCU_EVENT_MESSAGE(SCU_EVENT_TYPE_BROADCAST_CHANGE, 0x09)
+
+/**
+ *
+ *
+ * OSSP_EVENTS
+ */
+#define SCU_EVENT_PORT_SELECTOR_DETECTED \
+ SCU_EVENT_MESSAGE(SCU_EVENT_TYPE_OSSP_EVENT, 0x10)
+#define SCU_EVENT_SENT_PORT_SELECTION \
+ SCU_EVENT_MESSAGE(SCU_EVENT_TYPE_OSSP_EVENT, 0x11)
+#define SCU_EVENT_HARD_RESET_TRANSMITTED \
+ SCU_EVENT_MESSAGE(SCU_EVENT_TYPE_OSSP_EVENT, 0x12)
+#define SCU_EVENT_HARD_RESET_RECEIVED \
+ SCU_EVENT_MESSAGE(SCU_EVENT_TYPE_OSSP_EVENT, 0x13)
+#define SCU_EVENT_RECEIVED_IDENTIFY_TIMEOUT \
+ SCU_EVENT_MESSAGE(SCU_EVENT_TYPE_OSSP_EVENT, 0x15)
+#define SCU_EVENT_LINK_FAILURE \
+ SCU_EVENT_MESSAGE(SCU_EVENT_TYPE_OSSP_EVENT, 0x16)
+#define SCU_EVENT_SATA_SPINUP_HOLD \
+ SCU_EVENT_MESSAGE(SCU_EVENT_TYPE_OSSP_EVENT, 0x17)
+#define SCU_EVENT_SAS_15_SSC \
+ SCU_EVENT_MESSAGE(SCU_EVENT_TYPE_OSSP_EVENT, 0x18)
+#define SCU_EVENT_SAS_15 \
+ SCU_EVENT_MESSAGE(SCU_EVENT_TYPE_OSSP_EVENT, 0x19)
+#define SCU_EVENT_SAS_30_SSC \
+ SCU_EVENT_MESSAGE(SCU_EVENT_TYPE_OSSP_EVENT, 0x1A)
+#define SCU_EVENT_SAS_30 \
+ SCU_EVENT_MESSAGE(SCU_EVENT_TYPE_OSSP_EVENT, 0x1B)
+#define SCU_EVENT_SAS_60_SSC \
+ SCU_EVENT_MESSAGE(SCU_EVENT_TYPE_OSSP_EVENT, 0x1C)
+#define SCU_EVENT_SAS_60 \
+ SCU_EVENT_MESSAGE(SCU_EVENT_TYPE_OSSP_EVENT, 0x1D)
+#define SCU_EVENT_SATA_15_SSC \
+ SCU_EVENT_MESSAGE(SCU_EVENT_TYPE_OSSP_EVENT, 0x1E)
+#define SCU_EVENT_SATA_15 \
+ SCU_EVENT_MESSAGE(SCU_EVENT_TYPE_OSSP_EVENT, 0x1F)
+#define SCU_EVENT_SATA_30_SSC \
+ SCU_EVENT_MESSAGE(SCU_EVENT_TYPE_OSSP_EVENT, 0x20)
+#define SCU_EVENT_SATA_30 \
+ SCU_EVENT_MESSAGE(SCU_EVENT_TYPE_OSSP_EVENT, 0x21)
+#define SCU_EVENT_SATA_60_SSC \
+ SCU_EVENT_MESSAGE(SCU_EVENT_TYPE_OSSP_EVENT, 0x22)
+#define SCU_EVENT_SATA_60 \
+ SCU_EVENT_MESSAGE(SCU_EVENT_TYPE_OSSP_EVENT, 0x23)
+#define SCU_EVENT_SAS_PHY_DETECTED \
+ SCU_EVENT_MESSAGE(SCU_EVENT_TYPE_OSSP_EVENT, 0x24)
+#define SCU_EVENT_SATA_PHY_DETECTED \
+ SCU_EVENT_MESSAGE(SCU_EVENT_TYPE_OSSP_EVENT, 0x25)
+
+/**
+ *
+ *
+ * FATAL_INTERNAL_MEMORY_ERROR_EVENTS
+ */
+#define SCU_EVENT_TSC_RNSC_UNCORRECTABLE_ERROR \
+ SCU_EVENT_MESSAGE(SCU_EVENT_TYPE_FATAL_MEMORY_ERROR, 0x00)
+#define SCU_EVENT_TC_RNC_UNCORRECTABLE_ERROR \
+ SCU_EVENT_MESSAGE(SCU_EVENT_TYPE_FATAL_MEMORY_ERROR, 0x01)
+#define SCU_EVENT_ZPT_UNCORRECTABLE_ERROR \
+ SCU_EVENT_MESSAGE(SCU_EVENT_TYPE_FATAL_MEMORY_ERROR, 0x02)
+
+/**
+ *
+ *
+ * REMOTE_NODE_SUSPEND_EVENTS
+ */
+#define SCU_EVENT_TL_RNC_SUSPEND_TX \
+ SCU_EVENT_MESSAGE(SCU_EVENT_TYPE_RNC_SUSPEND_TX, 0x00)
+#define SCU_EVENT_TL_RNC_SUSPEND_TX_RX \
+ SCU_EVENT_MESSAGE(SCU_EVENT_TYPE_RNC_SUSPEND_TX_RX, 0x00)
+#define SCU_EVENT_DRIVER_POST_RNC_SUSPEND_TX \
+ SCU_EVENT_MESSAGE(SCU_EVENT_TYPE_RNC_SUSPEND_TX, 0x20)
+#define SCU_EVENT_DRIVER_POST_RNC_SUSPEND_TX_RX \
+ SCU_EVENT_MESSAGE(SCU_EVENT_TYPE_RNC_SUSPEND_TX_RX, 0x20)
+
+/**
+ *
+ *
+ * REMOTE_NODE_MISC_EVENTS
+ */
+#define SCU_EVENT_POST_RCN_RELEASE \
+ SCU_EVENT_MESSAGE(SCU_EVENT_TYPE_RNC_OPS_MISC, SCU_EVENT_SPECIFIER_RNC_RELEASE)
+#define SCU_EVENT_POST_IT_NEXUS_LOSS_TIMER_ENABLE \
+ SCU_EVENT_MESSAGE(SCU_EVENT_TYPE_RNC_OPS_MISC, 0x01)
+#define SCU_EVENT_POST_IT_NEXUS_LOSS_TIMER_DISABLE \
+ SCU_EVENT_MESSAGE(SCU_EVENT_TYPE_RNC_OPS_MISC, 0x02)
+#define SCU_EVENT_POST_RNC_COMPLETE \
+ SCU_EVENT_MESSAGE(SCU_EVENT_TYPE_RNC_OPS_MISC, 0x03)
+#define SCU_EVENT_POST_RNC_INVALIDATE_COMPLETE \
+ SCU_EVENT_MESSAGE(SCU_EVENT_TYPE_RNC_OPS_MISC, 0x04)
+
+/**
+ *
+ *
+ * ERROR_COUNT_EVENT
+ */
+#define SCU_EVENT_RX_CREDIT_BLOCKED_RECEIVED \
+ SCU_EVENT_MESSAGE(SCU_EVENT_TYPE_ERR_CNT_EVENT, 0x00)
+#define SCU_EVENT_TX_DONE_CREDIT_TIMEOUT \
+ SCU_EVENT_MESSAGE(SCU_EVENT_TYPE_ERR_CNT_EVENT, 0x01)
+#define SCU_EVENT_RX_DONE_CREDIT_TIMEOUT \
+ SCU_EVENT_MESSAGE(SCU_EVENT_TYPE_ERR_CNT_EVENT, 0x02)
+
+/**
+ * scu_get_event_type() -
+ *
+ * This macro returns the SCU event type from the event code.
+ */
+#define scu_get_event_type(event_code) \
+ ((event_code) & SCU_EVENT_TYPE_CODE_MASK)
+
+/**
+ * scu_get_event_specifier() -
+ *
+ * This macro returns the SCU event specifier from the event code.
+ */
+#define scu_get_event_specifier(event_code) \
+ ((event_code) & SCU_EVENT_SPECIFIC_CODE_MASK)
+
+/**
+ * scu_get_event_code() -
+ *
+ * This macro returns the combined SCU event type and SCU event specifier from
+ * the event code.
+ */
+#define scu_get_event_code(event_code) \
+ ((event_code) & SCU_EVENT_CODE_MASK)
+
+
+/**
+ *
+ *
+ * PTS_SCHEDULE_EVENT
+ */
+#define SCU_EVENT_SMP_RESPONSE_NO_PE \
+ SCU_EVENT_MESSAGE(SCU_EVENT_TYPE_PTX_SCHEDULE_EVENT, 0x00)
+#define SCU_EVENT_SPECIFIC_SMP_RESPONSE_NO_PE \
+ scu_get_event_specifier(SCU_EVENT_SMP_RESPONSE_NO_PE)
+
+#define SCU_EVENT_TASK_TIMEOUT \
+ SCU_EVENT_MESSAGE(SCU_EVENT_TYPE_PTX_SCHEDULE_EVENT, 0x01)
+#define SCU_EVENT_SPECIFIC_TASK_TIMEOUT \
+ scu_get_event_specifier(SCU_EVENT_TASK_TIMEOUT)
+
+#define SCU_EVENT_IT_NEXUS_TIMEOUT \
+ SCU_EVENT_MESSAGE(SCU_EVENT_TYPE_PTX_SCHEDULE_EVENT, 0x02)
+#define SCU_EVENT_SPECIFIC_IT_NEXUS_TIMEOUT \
+ scu_get_event_specifier(SCU_EVENT_IT_NEXUS_TIMEOUT)
+
+
+#endif /* __SCU_EVENT_CODES_HEADER__ */
diff --git a/drivers/scsi/isci/scu_remote_node_context.h b/drivers/scsi/isci/scu_remote_node_context.h
new file mode 100644
index 000000000..33745adc8
--- /dev/null
+++ b/drivers/scsi/isci/scu_remote_node_context.h
@@ -0,0 +1,229 @@
+/*
+ * This file is provided under a dual BSD/GPLv2 license. When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ * The full GNU General Public License is included in this distribution
+ * in the file called LICENSE.GPL.
+ *
+ * BSD LICENSE
+ *
+ * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef __SCU_REMOTE_NODE_CONTEXT_HEADER__
+#define __SCU_REMOTE_NODE_CONTEXT_HEADER__
+
+/**
+ * This file contains the structures and constatns used by the SCU hardware to
+ * describe a remote node context.
+ *
+ *
+ */
+
+/**
+ * struct ssp_remote_node_context - This structure contains the SCU hardware
+ * definition for an SSP remote node.
+ *
+ *
+ */
+struct ssp_remote_node_context {
+ /* WORD 0 */
+
+ /**
+ * This field is the remote node index assigned for this remote node. All
+ * remote nodes must have a unique remote node index. The value of the remote
+ * node index can not exceed the maximum number of remote nodes reported in
+ * the SCU device context capacity register.
+ */
+ u32 remote_node_index:12;
+ u32 reserved0_1:4;
+
+ /**
+ * This field tells the SCU hardware how many simultaneous connections that
+ * this remote node will support.
+ */
+ u32 remote_node_port_width:4;
+
+ /**
+ * This field tells the SCU hardware which logical port to associate with this
+ * remote node.
+ */
+ u32 logical_port_index:3;
+ u32 reserved0_2:5;
+
+ /**
+ * This field will enable the I_T nexus loss timer for this remote node.
+ */
+ u32 nexus_loss_timer_enable:1;
+
+ /**
+ * This field is the for driver debug only and is not used.
+ */
+ u32 check_bit:1;
+
+ /**
+ * This field must be set to true when the hardware DMAs the remote node
+ * context to the hardware SRAM. When the remote node is being invalidated
+ * this field must be set to false.
+ */
+ u32 is_valid:1;
+
+ /**
+ * This field must be set to true.
+ */
+ u32 is_remote_node_context:1;
+
+ /* WORD 1 - 2 */
+
+ /**
+ * This is the low word of the remote device SAS Address
+ */
+ u32 remote_sas_address_lo;
+
+ /**
+ * This field is the high word of the remote device SAS Address
+ */
+ u32 remote_sas_address_hi;
+
+ /* WORD 3 */
+ /**
+ * This field reprensets the function number assigned to this remote device.
+ * This value must match the virtual function number that is being used to
+ * communicate to the device.
+ */
+ u32 function_number:8;
+ u32 reserved3_1:8;
+
+ /**
+ * This field provides the driver a way to cheat on the arbitration wait time
+ * for this remote node.
+ */
+ u32 arbitration_wait_time:16;
+
+ /* WORD 4 */
+ /**
+ * This field tells the SCU hardware how long this device may occupy the
+ * connection before it must be closed.
+ */
+ u32 connection_occupancy_timeout:16;
+
+ /**
+ * This field tells the SCU hardware how long to maintain a connection when
+ * there are no frames being transmitted on the link.
+ */
+ u32 connection_inactivity_timeout:16;
+
+ /* WORD 5 */
+ /**
+ * This field allows the driver to cheat on the arbitration wait time for this
+ * remote node.
+ */
+ u32 initial_arbitration_wait_time:16;
+
+ /**
+ * This field is tells the hardware what to program for the connection rate in
+ * the open address frame. See the SAS spec for valid values.
+ */
+ u32 oaf_connection_rate:4;
+
+ /**
+ * This field tells the SCU hardware what to program for the features in the
+ * open address frame. See the SAS spec for valid values.
+ */
+ u32 oaf_features:4;
+
+ /**
+ * This field tells the SCU hardware what to use for the source zone group in
+ * the open address frame. See the SAS spec for more details on zoning.
+ */
+ u32 oaf_source_zone_group:8;
+
+ /* WORD 6 */
+ /**
+ * This field tells the SCU hardware what to use as the more capibilities in
+ * the open address frame. See the SAS Spec for details.
+ */
+ u32 oaf_more_compatibility_features;
+
+ /* WORD 7 */
+ u32 reserved7;
+
+};
+
+/**
+ * struct stp_remote_node_context - This structure contains the SCU hardware
+ * definition for a STP remote node.
+ *
+ * STP Targets are not yet supported so this definition is a placeholder until
+ * we do support them.
+ */
+struct stp_remote_node_context {
+ /**
+ * Placeholder data for the STP remote node.
+ */
+ u32 data[8];
+
+};
+
+/**
+ * This union combines the SAS and SATA remote node definitions.
+ *
+ * union scu_remote_node_context
+ */
+union scu_remote_node_context {
+ /**
+ * SSP Remote Node
+ */
+ struct ssp_remote_node_context ssp;
+
+ /**
+ * STP Remote Node
+ */
+ struct stp_remote_node_context stp;
+
+};
+
+#endif /* __SCU_REMOTE_NODE_CONTEXT_HEADER__ */
diff --git a/drivers/scsi/isci/scu_task_context.h b/drivers/scsi/isci/scu_task_context.h
new file mode 100644
index 000000000..869a979eb
--- /dev/null
+++ b/drivers/scsi/isci/scu_task_context.h
@@ -0,0 +1,965 @@
+/*
+ * This file is provided under a dual BSD/GPLv2 license. When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ * The full GNU General Public License is included in this distribution
+ * in the file called LICENSE.GPL.
+ *
+ * BSD LICENSE
+ *
+ * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _SCU_TASK_CONTEXT_H_
+#define _SCU_TASK_CONTEXT_H_
+
+/**
+ * This file contains the structures and constants for the SCU hardware task
+ * context.
+ *
+ *
+ */
+
+
+/**
+ * enum scu_ssp_task_type - This enumberation defines the various SSP task
+ * types the SCU hardware will accept. The definition for the various task
+ * types the SCU hardware will accept can be found in the DS specification.
+ *
+ *
+ */
+typedef enum {
+ SCU_TASK_TYPE_IOREAD, /* /< IO READ direction or no direction */
+ SCU_TASK_TYPE_IOWRITE, /* /< IO Write direction */
+ SCU_TASK_TYPE_SMP_REQUEST, /* /< SMP Request type */
+ SCU_TASK_TYPE_RESPONSE, /* /< Driver generated response frame (targt mode) */
+ SCU_TASK_TYPE_RAW_FRAME, /* /< Raw frame request type */
+ SCU_TASK_TYPE_PRIMITIVE /* /< Request for a primitive to be transmitted */
+} scu_ssp_task_type;
+
+/**
+ * enum scu_sata_task_type - This enumeration defines the various SATA task
+ * types the SCU hardware will accept. The definition for the various task
+ * types the SCU hardware will accept can be found in the DS specification.
+ *
+ *
+ */
+typedef enum {
+ SCU_TASK_TYPE_DMA_IN, /* /< Read request */
+ SCU_TASK_TYPE_FPDMAQ_READ, /* /< NCQ read request */
+ SCU_TASK_TYPE_PACKET_DMA_IN, /* /< Packet read request */
+ SCU_TASK_TYPE_SATA_RAW_FRAME, /* /< Raw frame request */
+ RESERVED_4,
+ RESERVED_5,
+ RESERVED_6,
+ RESERVED_7,
+ SCU_TASK_TYPE_DMA_OUT, /* /< Write request */
+ SCU_TASK_TYPE_FPDMAQ_WRITE, /* /< NCQ write Request */
+ SCU_TASK_TYPE_PACKET_DMA_OUT /* /< Packet write request */
+} scu_sata_task_type;
+
+
+/**
+ *
+ *
+ * SCU_CONTEXT_TYPE
+ */
+#define SCU_TASK_CONTEXT_TYPE 0
+#define SCU_RNC_CONTEXT_TYPE 1
+
+/**
+ *
+ *
+ * SCU_TASK_CONTEXT_VALIDITY
+ */
+#define SCU_TASK_CONTEXT_INVALID 0
+#define SCU_TASK_CONTEXT_VALID 1
+
+/**
+ *
+ *
+ * SCU_COMMAND_CODE
+ */
+#define SCU_COMMAND_CODE_INITIATOR_NEW_TASK 0
+#define SCU_COMMAND_CODE_ACTIVE_TASK 1
+#define SCU_COMMAND_CODE_PRIMITIVE_SEQ_TASK 2
+#define SCU_COMMAND_CODE_TARGET_RAW_FRAMES 3
+
+/**
+ *
+ *
+ * SCU_TASK_PRIORITY
+ */
+/**
+ *
+ *
+ * This priority is used when there is no priority request for this request.
+ */
+#define SCU_TASK_PRIORITY_NORMAL 0
+
+/**
+ *
+ *
+ * This priority indicates that the task should be scheduled to the head of the
+ * queue. The task will NOT be executed if the TX is suspended for the remote
+ * node.
+ */
+#define SCU_TASK_PRIORITY_HEAD_OF_Q 1
+
+/**
+ *
+ *
+ * This priority indicates that the task will be executed before all
+ * SCU_TASK_PRIORITY_NORMAL and SCU_TASK_PRIORITY_HEAD_OF_Q tasks. The task
+ * WILL be executed if the TX is suspended for the remote node.
+ */
+#define SCU_TASK_PRIORITY_HIGH 2
+
+/**
+ *
+ *
+ * This task priority is reserved and should not be used.
+ */
+#define SCU_TASK_PRIORITY_RESERVED 3
+
+#define SCU_TASK_INITIATOR_MODE 1
+#define SCU_TASK_TARGET_MODE 0
+
+#define SCU_TASK_REGULAR 0
+#define SCU_TASK_ABORTED 1
+
+/* direction bit defintion */
+/**
+ *
+ *
+ * SATA_DIRECTION
+ */
+#define SCU_SATA_WRITE_DATA_DIRECTION 0
+#define SCU_SATA_READ_DATA_DIRECTION 1
+
+/**
+ *
+ *
+ * SCU_COMMAND_CONTEXT_MACROS These macros provide the mask and shift
+ * operations to construct the various SCU commands
+ */
+#define SCU_CONTEXT_COMMAND_REQUEST_TYPE_SHIFT 21
+#define SCU_CONTEXT_COMMAND_REQUEST_TYPE_MASK 0x00E00000
+#define scu_get_command_request_type(x) \
+ ((x) & SCU_CONTEXT_COMMAND_REQUEST_TYPE_MASK)
+
+#define SCU_CONTEXT_COMMAND_REQUEST_SUBTYPE_SHIFT 18
+#define SCU_CONTEXT_COMMAND_REQUEST_SUBTYPE_MASK 0x001C0000
+#define scu_get_command_request_subtype(x) \
+ ((x) & SCU_CONTEXT_COMMAND_REQUEST_SUBTYPE_MASK)
+
+#define SCU_CONTEXT_COMMAND_REQUEST_FULLTYPE_MASK \
+ (\
+ SCU_CONTEXT_COMMAND_REQUEST_TYPE_MASK \
+ | SCU_CONTEXT_COMMAND_REQUEST_SUBTYPE_MASK \
+ )
+#define scu_get_command_request_full_type(x) \
+ ((x) & SCU_CONTEXT_COMMAND_REQUEST_FULLTYPE_MASK)
+
+#define SCU_CONTEXT_COMMAND_PROTOCOL_ENGINE_GROUP_SHIFT 16
+#define SCU_CONTEXT_COMMAND_PROTOCOL_ENGINE_GROUP_MASK 0x00010000
+#define scu_get_command_protocl_engine_group(x) \
+ ((x) & SCU_CONTEXT_COMMAND_PROTOCOL_ENGINE_GROUP_MASK)
+
+#define SCU_CONTEXT_COMMAND_LOGICAL_PORT_SHIFT 12
+#define SCU_CONTEXT_COMMAND_LOGICAL_PORT_MASK 0x00007000
+#define scu_get_command_reqeust_logical_port(x) \
+ ((x) & SCU_CONTEXT_COMMAND_LOGICAL_PORT_MASK)
+
+
+#define MAKE_SCU_CONTEXT_COMMAND_TYPE(type) \
+ ((u32)(type) << SCU_CONTEXT_COMMAND_REQUEST_TYPE_SHIFT)
+
+/**
+ * MAKE_SCU_CONTEXT_COMMAND_TYPE() -
+ *
+ * SCU_COMMAND_TYPES These constants provide the grouping of the different SCU
+ * command types.
+ */
+#define SCU_CONTEXT_COMMAND_REQUEST_TYPE_POST_TC MAKE_SCU_CONTEXT_COMMAND_TYPE(0)
+#define SCU_CONTEXT_COMMAND_REQUEST_TYPE_DUMP_TC MAKE_SCU_CONTEXT_COMMAND_TYPE(1)
+#define SCU_CONTEXT_COMMAND_REQUEST_TYPE_POST_RNC MAKE_SCU_CONTEXT_COMMAND_TYPE(2)
+#define SCU_CONTEXT_COMMAND_REQUEST_TYPE_DUMP_RNC MAKE_SCU_CONTEXT_COMMAND_TYPE(3)
+#define SCU_CONTEXT_COMMAND_REQUEST_TYPE_OTHER_RNC MAKE_SCU_CONTEXT_COMMAND_TYPE(6)
+
+#define MAKE_SCU_CONTEXT_COMMAND_REQUEST(type, command) \
+ ((type) | ((command) << SCU_CONTEXT_COMMAND_REQUEST_SUBTYPE_SHIFT))
+
+/**
+ *
+ *
+ * SCU_REQUEST_TYPES These constants are the various request types that can be
+ * posted to the SCU hardware.
+ */
+#define SCU_CONTEXT_COMMAND_REQUST_POST_TC \
+ (MAKE_SCU_CONTEXT_COMMAND_REQUEST(SCU_CONTEXT_COMMAND_REQUEST_TYPE_POST_TC, 0))
+
+#define SCU_CONTEXT_COMMAND_REQUEST_POST_TC_ABORT \
+ (MAKE_SCU_CONTEXT_COMMAND_REQUEST(SCU_CONTEXT_COMMAND_REQUEST_TYPE_POST_TC, 1))
+
+#define SCU_CONTEXT_COMMAND_REQUST_DUMP_TC \
+ (MAKE_SCU_CONTEXT_COMMAND_REQUEST(SCU_CONTEXT_COMMAND_REQUEST_TYPE_DUMP_TC, 0))
+
+#define SCU_CONTEXT_COMMAND_POST_RNC_32 \
+ (MAKE_SCU_CONTEXT_COMMAND_REQUEST(SCU_CONTEXT_COMMAND_REQUEST_TYPE_POST_RNC, 0))
+
+#define SCU_CONTEXT_COMMAND_POST_RNC_96 \
+ (MAKE_SCU_CONTEXT_COMMAND_REQUEST(SCU_CONTEXT_COMMAND_REQUEST_TYPE_POST_RNC, 1))
+
+#define SCU_CONTEXT_COMMAND_POST_RNC_INVALIDATE \
+ (MAKE_SCU_CONTEXT_COMMAND_REQUEST(SCU_CONTEXT_COMMAND_REQUEST_TYPE_POST_RNC, 2))
+
+#define SCU_CONTEXT_COMMAND_DUMP_RNC_32 \
+ (MAKE_SCU_CONTEXT_COMMAND_REQUEST(SCU_CONTEXT_COMMAND_REQUEST_TYPE_DUMP_RNC, 0))
+
+#define SCU_CONTEXT_COMMAND_DUMP_RNC_96 \
+ (MAKE_SCU_CONTEXT_COMMAND_REQUEST(SCU_CONTEXT_COMMAND_REQUEST_TYPE_DUMP_RNC, 1))
+
+#define SCU_CONTEXT_COMMAND_POST_RNC_SUSPEND_TX \
+ (MAKE_SCU_CONTEXT_COMMAND_REQUEST(SCU_CONTEXT_COMMAND_REQUEST_TYPE_OTHER_RNC, 0))
+
+#define SCU_CONTEXT_COMMAND_POST_RNC_SUSPEND_TX_RX \
+ (MAKE_SCU_CONTEXT_COMMAND_REQUEST(SCU_CONTEXT_COMMAND_REQUEST_TYPE_OTHER_RNC, 1))
+
+#define SCU_CONTEXT_COMMAND_POST_RNC_RESUME \
+ (MAKE_SCU_CONTEXT_COMMAND_REQUEST(SCU_CONTEXT_COMMAND_REQUEST_TYPE_OTHER_RNC, 2))
+
+#define SCU_CONTEXT_IT_NEXUS_LOSS_TIMER_ENABLE \
+ (MAKE_SCU_CONTEXT_COMMAND_REQUEST(SCU_CONTEXT_COMMAND_REQUEST_TYPE_OTHER_RNC, 3))
+
+#define SCU_CONTEXT_IT_NEXUS_LOSS_TIMER_DISABLE \
+ (MAKE_SCU_CONTEXT_COMMAND_REQUEST(SCU_CONTEXT_COMMAND_REQUEST_TYPE_OTHER_RNC, 4))
+
+/**
+ *
+ *
+ * SCU_TASK_CONTEXT_PROTOCOL SCU Task context protocol types this is uesd to
+ * program the SCU Task context protocol field in word 0x00.
+ */
+#define SCU_TASK_CONTEXT_PROTOCOL_SMP 0x00
+#define SCU_TASK_CONTEXT_PROTOCOL_SSP 0x01
+#define SCU_TASK_CONTEXT_PROTOCOL_STP 0x02
+#define SCU_TASK_CONTEXT_PROTOCOL_NONE 0x07
+
+/**
+ * struct ssp_task_context - This is the SCU hardware definition for an SSP
+ * request.
+ *
+ *
+ */
+struct ssp_task_context {
+ /* OFFSET 0x18 */
+ u32 reserved00:24;
+ u32 frame_type:8;
+
+ /* OFFSET 0x1C */
+ u32 reserved01;
+
+ /* OFFSET 0x20 */
+ u32 fill_bytes:2;
+ u32 reserved02:6;
+ u32 changing_data_pointer:1;
+ u32 retransmit:1;
+ u32 retry_data_frame:1;
+ u32 tlr_control:2;
+ u32 reserved03:19;
+
+ /* OFFSET 0x24 */
+ u32 uiRsvd4;
+
+ /* OFFSET 0x28 */
+ u32 target_port_transfer_tag:16;
+ u32 tag:16;
+
+ /* OFFSET 0x2C */
+ u32 data_offset;
+};
+
+/**
+ * struct stp_task_context - This is the SCU hardware definition for an STP
+ * request.
+ *
+ *
+ */
+struct stp_task_context {
+ /* OFFSET 0x18 */
+ u32 fis_type:8;
+ u32 pm_port:4;
+ u32 reserved0:3;
+ u32 control:1;
+ u32 command:8;
+ u32 features:8;
+
+ /* OFFSET 0x1C */
+ u32 reserved1;
+
+ /* OFFSET 0x20 */
+ u32 reserved2;
+
+ /* OFFSET 0x24 */
+ u32 reserved3;
+
+ /* OFFSET 0x28 */
+ u32 ncq_tag:5;
+ u32 reserved4:27;
+
+ /* OFFSET 0x2C */
+ u32 data_offset; /* TODO: What is this used for? */
+};
+
+/**
+ * struct smp_task_context - This is the SCU hardware definition for an SMP
+ * request.
+ *
+ *
+ */
+struct smp_task_context {
+ /* OFFSET 0x18 */
+ u32 response_length:8;
+ u32 function_result:8;
+ u32 function:8;
+ u32 frame_type:8;
+
+ /* OFFSET 0x1C */
+ u32 smp_response_ufi:12;
+ u32 reserved1:20;
+
+ /* OFFSET 0x20 */
+ u32 reserved2;
+
+ /* OFFSET 0x24 */
+ u32 reserved3;
+
+ /* OFFSET 0x28 */
+ u32 reserved4;
+
+ /* OFFSET 0x2C */
+ u32 reserved5;
+};
+
+/**
+ * struct primitive_task_context - This is the SCU hardware definition used
+ * when the driver wants to send a primitive on the link.
+ *
+ *
+ */
+struct primitive_task_context {
+ /* OFFSET 0x18 */
+ /**
+ * This field is the control word and it must be 0.
+ */
+ u32 control; /* /< must be set to 0 */
+
+ /* OFFSET 0x1C */
+ /**
+ * This field specifies the primitive that is to be transmitted.
+ */
+ u32 sequence;
+
+ /* OFFSET 0x20 */
+ u32 reserved0;
+
+ /* OFFSET 0x24 */
+ u32 reserved1;
+
+ /* OFFSET 0x28 */
+ u32 reserved2;
+
+ /* OFFSET 0x2C */
+ u32 reserved3;
+};
+
+/**
+ * The union of the protocols that can be selected in the SCU task context
+ * field.
+ *
+ * protocol_context
+ */
+union protocol_context {
+ struct ssp_task_context ssp;
+ struct stp_task_context stp;
+ struct smp_task_context smp;
+ struct primitive_task_context primitive;
+ u32 words[6];
+};
+
+/**
+ * struct scu_sgl_element - This structure represents a single SCU defined SGL
+ * element. SCU SGLs contain a 64 bit address with the maximum data transfer
+ * being 24 bits in size. The SGL can not cross a 4GB boundary.
+ *
+ * struct scu_sgl_element
+ */
+struct scu_sgl_element {
+ /**
+ * This field is the upper 32 bits of the 64 bit physical address.
+ */
+ u32 address_upper;
+
+ /**
+ * This field is the lower 32 bits of the 64 bit physical address.
+ */
+ u32 address_lower;
+
+ /**
+ * This field is the number of bytes to transfer.
+ */
+ u32 length:24;
+
+ /**
+ * This field is the address modifier to be used when a virtual function is
+ * requesting a data transfer.
+ */
+ u32 address_modifier:8;
+
+};
+
+#define SCU_SGL_ELEMENT_PAIR_A 0
+#define SCU_SGL_ELEMENT_PAIR_B 1
+
+/**
+ * struct scu_sgl_element_pair - This structure is the SCU hardware definition
+ * of a pair of SGL elements. The SCU hardware always works on SGL pairs.
+ * They are refered to in the DS specification as SGL A and SGL B. Each SGL
+ * pair is followed by the address of the next pair.
+ *
+ *
+ */
+struct scu_sgl_element_pair {
+ /* OFFSET 0x60-0x68 */
+ /**
+ * This field is the SGL element A of the SGL pair.
+ */
+ struct scu_sgl_element A;
+
+ /* OFFSET 0x6C-0x74 */
+ /**
+ * This field is the SGL element B of the SGL pair.
+ */
+ struct scu_sgl_element B;
+
+ /* OFFSET 0x78-0x7C */
+ /**
+ * This field is the upper 32 bits of the 64 bit address to the next SGL
+ * element pair.
+ */
+ u32 next_pair_upper;
+
+ /**
+ * This field is the lower 32 bits of the 64 bit address to the next SGL
+ * element pair.
+ */
+ u32 next_pair_lower;
+
+};
+
+/**
+ * struct transport_snapshot - This structure is the SCU hardware scratch area
+ * for the task context. This is set to 0 by the driver but can be read by
+ * issuing a dump TC request to the SCU.
+ *
+ *
+ */
+struct transport_snapshot {
+ /* OFFSET 0x48 */
+ u32 xfer_rdy_write_data_length;
+
+ /* OFFSET 0x4C */
+ u32 data_offset;
+
+ /* OFFSET 0x50 */
+ u32 data_transfer_size:24;
+ u32 reserved_50_0:8;
+
+ /* OFFSET 0x54 */
+ u32 next_initiator_write_data_offset;
+
+ /* OFFSET 0x58 */
+ u32 next_initiator_write_data_xfer_size:24;
+ u32 reserved_58_0:8;
+};
+
+/**
+ * struct scu_task_context - This structure defines the contents of the SCU
+ * silicon task context. It lays out all of the fields according to the
+ * expected order and location for the Storage Controller unit.
+ *
+ *
+ */
+struct scu_task_context {
+ /* OFFSET 0x00 ------ */
+ /**
+ * This field must be encoded to one of the valid SCU task priority values
+ * - SCU_TASK_PRIORITY_NORMAL
+ * - SCU_TASK_PRIORITY_HEAD_OF_Q
+ * - SCU_TASK_PRIORITY_HIGH
+ */
+ u32 priority:2;
+
+ /**
+ * This field must be set to true if this is an initiator generated request.
+ * Until target mode is supported all task requests are initiator requests.
+ */
+ u32 initiator_request:1;
+
+ /**
+ * This field must be set to one of the valid connection rates valid values
+ * are 0x8, 0x9, and 0xA.
+ */
+ u32 connection_rate:4;
+
+ /**
+ * This field muse be programed when generating an SMP response since the SMP
+ * connection remains open until the SMP response is generated.
+ */
+ u32 protocol_engine_index:3;
+
+ /**
+ * This field must contain the logical port for the task request.
+ */
+ u32 logical_port_index:3;
+
+ /**
+ * This field must be set to one of the SCU_TASK_CONTEXT_PROTOCOL values
+ * - SCU_TASK_CONTEXT_PROTOCOL_SMP
+ * - SCU_TASK_CONTEXT_PROTOCOL_SSP
+ * - SCU_TASK_CONTEXT_PROTOCOL_STP
+ * - SCU_TASK_CONTEXT_PROTOCOL_NONE
+ */
+ u32 protocol_type:3;
+
+ /**
+ * This filed must be set to the TCi allocated for this task
+ */
+ u32 task_index:12;
+
+ /**
+ * This field is reserved and must be set to 0x00
+ */
+ u32 reserved_00_0:1;
+
+ /**
+ * For a normal task request this must be set to 0. If this is an abort of
+ * this task request it must be set to 1.
+ */
+ u32 abort:1;
+
+ /**
+ * This field must be set to true for the SCU hardware to process the task.
+ */
+ u32 valid:1;
+
+ /**
+ * This field must be set to SCU_TASK_CONTEXT_TYPE
+ */
+ u32 context_type:1;
+
+ /* OFFSET 0x04 */
+ /**
+ * This field contains the RNi that is the target of this request.
+ */
+ u32 remote_node_index:12;
+
+ /**
+ * This field is programmed if this is a mirrored request, which we are not
+ * using, in which case it is the RNi for the mirrored target.
+ */
+ u32 mirrored_node_index:12;
+
+ /**
+ * This field is programmed with the direction of the SATA reqeust
+ * - SCU_SATA_WRITE_DATA_DIRECTION
+ * - SCU_SATA_READ_DATA_DIRECTION
+ */
+ u32 sata_direction:1;
+
+ /**
+ * This field is programmsed with one of the following SCU_COMMAND_CODE
+ * - SCU_COMMAND_CODE_INITIATOR_NEW_TASK
+ * - SCU_COMMAND_CODE_ACTIVE_TASK
+ * - SCU_COMMAND_CODE_PRIMITIVE_SEQ_TASK
+ * - SCU_COMMAND_CODE_TARGET_RAW_FRAMES
+ */
+ u32 command_code:2;
+
+ /**
+ * This field is set to true if the remote node should be suspended.
+ * This bit is only valid for SSP & SMP target devices.
+ */
+ u32 suspend_node:1;
+
+ /**
+ * This field is programmed with one of the following command type codes
+ *
+ * For SAS requests use the scu_ssp_task_type
+ * - SCU_TASK_TYPE_IOREAD
+ * - SCU_TASK_TYPE_IOWRITE
+ * - SCU_TASK_TYPE_SMP_REQUEST
+ * - SCU_TASK_TYPE_RESPONSE
+ * - SCU_TASK_TYPE_RAW_FRAME
+ * - SCU_TASK_TYPE_PRIMITIVE
+ *
+ * For SATA requests use the scu_sata_task_type
+ * - SCU_TASK_TYPE_DMA_IN
+ * - SCU_TASK_TYPE_FPDMAQ_READ
+ * - SCU_TASK_TYPE_PACKET_DMA_IN
+ * - SCU_TASK_TYPE_SATA_RAW_FRAME
+ * - SCU_TASK_TYPE_DMA_OUT
+ * - SCU_TASK_TYPE_FPDMAQ_WRITE
+ * - SCU_TASK_TYPE_PACKET_DMA_OUT
+ */
+ u32 task_type:4;
+
+ /* OFFSET 0x08 */
+ /**
+ * This field is reserved and the must be set to 0x00
+ */
+ u32 link_layer_control:8; /* presently all reserved */
+
+ /**
+ * This field is set to true when TLR is to be enabled
+ */
+ u32 ssp_tlr_enable:1;
+
+ /**
+ * This is field specifies if the SCU DMAs a response frame to host
+ * memory for good response frames when operating in target mode.
+ */
+ u32 dma_ssp_target_good_response:1;
+
+ /**
+ * This field indicates if the SCU should DMA the response frame to
+ * host memory.
+ */
+ u32 do_not_dma_ssp_good_response:1;
+
+ /**
+ * This field is set to true when strict ordering is to be enabled
+ */
+ u32 strict_ordering:1;
+
+ /**
+ * This field indicates the type of endianess to be utilized for the
+ * frame. command, task, and response frames utilized control_frame
+ * set to 1.
+ */
+ u32 control_frame:1;
+
+ /**
+ * This field is reserved and the driver should set to 0x00
+ */
+ u32 tl_control_reserved:3;
+
+ /**
+ * This field is set to true when the SCU hardware task timeout control is to
+ * be enabled
+ */
+ u32 timeout_enable:1;
+
+ /**
+ * This field is reserved and the driver should set it to 0x00
+ */
+ u32 pts_control_reserved:7;
+
+ /**
+ * This field should be set to true when block guard is to be enabled
+ */
+ u32 block_guard_enable:1;
+
+ /**
+ * This field is reserved and the driver should set to 0x00
+ */
+ u32 sdma_control_reserved:7;
+
+ /* OFFSET 0x0C */
+ /**
+ * This field is the address modifier for this io request it should be
+ * programmed with the virtual function that is making the request.
+ */
+ u32 address_modifier:16;
+
+ /**
+ * @todo What we support mirrored SMP response frame?
+ */
+ u32 mirrored_protocol_engine:3; /* mirrored protocol Engine Index */
+
+ /**
+ * If this is a mirrored request the logical port index for the mirrored RNi
+ * must be programmed.
+ */
+ u32 mirrored_logical_port:4; /* mirrored local port index */
+
+ /**
+ * This field is reserved and the driver must set it to 0x00
+ */
+ u32 reserved_0C_0:8;
+
+ /**
+ * This field must be set to true if the mirrored request processing is to be
+ * enabled.
+ */
+ u32 mirror_request_enable:1; /* Mirrored request Enable */
+
+ /* OFFSET 0x10 */
+ /**
+ * This field is the command iu length in dwords
+ */
+ u32 ssp_command_iu_length:8;
+
+ /**
+ * This is the target TLR enable bit it must be set to 0 when creatning the
+ * task context.
+ */
+ u32 xfer_ready_tlr_enable:1;
+
+ /**
+ * This field is reserved and the driver must set it to 0x00
+ */
+ u32 reserved_10_0:7;
+
+ /**
+ * This is the maximum burst size that the SCU hardware will send in one
+ * connection its value is (N x 512) and N must be a multiple of 2. If the
+ * value is 0x00 then maximum burst size is disabled.
+ */
+ u32 ssp_max_burst_size:16;
+
+ /* OFFSET 0x14 */
+ /**
+ * This filed is set to the number of bytes to be transfered in the request.
+ */
+ u32 transfer_length_bytes:24; /* In terms of bytes */
+
+ /**
+ * This field is reserved and the driver should set it to 0x00
+ */
+ u32 reserved_14_0:8;
+
+ /* OFFSET 0x18-0x2C */
+ /**
+ * This union provides for the protocol specif part of the SCU Task Context.
+ */
+ union protocol_context type;
+
+ /* OFFSET 0x30-0x34 */
+ /**
+ * This field is the upper 32 bits of the 64 bit physical address of the
+ * command iu buffer
+ */
+ u32 command_iu_upper;
+
+ /**
+ * This field is the lower 32 bits of the 64 bit physical address of the
+ * command iu buffer
+ */
+ u32 command_iu_lower;
+
+ /* OFFSET 0x38-0x3C */
+ /**
+ * This field is the upper 32 bits of the 64 bit physical address of the
+ * response iu buffer
+ */
+ u32 response_iu_upper;
+
+ /**
+ * This field is the lower 32 bits of the 64 bit physical address of the
+ * response iu buffer
+ */
+ u32 response_iu_lower;
+
+ /* OFFSET 0x40 */
+ /**
+ * This field is set to the task phase of the SCU hardware. The driver must
+ * set this to 0x01
+ */
+ u32 task_phase:8;
+
+ /**
+ * This field is set to the transport layer task status. The driver must set
+ * this to 0x00
+ */
+ u32 task_status:8;
+
+ /**
+ * This field is used during initiator write TLR
+ */
+ u32 previous_extended_tag:4;
+
+ /**
+ * This field is set the maximum number of retries for a STP non-data FIS
+ */
+ u32 stp_retry_count:2;
+
+ /**
+ * This field is reserved and the driver must set it to 0x00
+ */
+ u32 reserved_40_1:2;
+
+ /**
+ * This field is used by the SCU TL to determine when to take a snapshot when
+ * tranmitting read data frames.
+ * - 0x00 The entire IO
+ * - 0x01 32k
+ * - 0x02 64k
+ * - 0x04 128k
+ * - 0x08 256k
+ */
+ u32 ssp_tlr_threshold:4;
+
+ /**
+ * This field is reserved and the driver must set it to 0x00
+ */
+ u32 reserved_40_2:4;
+
+ /* OFFSET 0x44 */
+ u32 write_data_length; /* read only set to 0 */
+
+ /* OFFSET 0x48-0x58 */
+ struct transport_snapshot snapshot; /* read only set to 0 */
+
+ /* OFFSET 0x5C */
+ u32 blk_prot_en:1;
+ u32 blk_sz:2;
+ u32 blk_prot_func:2;
+ u32 reserved_5C_0:9;
+ u32 active_sgl_element:2; /* read only set to 0 */
+ u32 sgl_exhausted:1; /* read only set to 0 */
+ u32 payload_data_transfer_error:4; /* read only set to 0 */
+ u32 frame_buffer_offset:11; /* read only set to 0 */
+
+ /* OFFSET 0x60-0x7C */
+ /**
+ * This field is the first SGL element pair found in the TC data structure.
+ */
+ struct scu_sgl_element_pair sgl_pair_ab;
+ /* OFFSET 0x80-0x9C */
+ /**
+ * This field is the second SGL element pair found in the TC data structure.
+ */
+ struct scu_sgl_element_pair sgl_pair_cd;
+
+ /* OFFSET 0xA0-BC */
+ struct scu_sgl_element_pair sgl_snapshot_ac;
+
+ /* OFFSET 0xC0 */
+ u32 active_sgl_element_pair; /* read only set to 0 */
+
+ /* OFFSET 0xC4-0xCC */
+ u32 reserved_C4_CC[3];
+
+ /* OFFSET 0xD0 */
+ u32 interm_crc_val:16;
+ u32 init_crc_seed:16;
+
+ /* OFFSET 0xD4 */
+ u32 app_tag_verify:16;
+ u32 app_tag_gen:16;
+
+ /* OFFSET 0xD8 */
+ u32 ref_tag_seed_verify;
+
+ /* OFFSET 0xDC */
+ u32 UD_bytes_immed_val:13;
+ u32 reserved_DC_0:3;
+ u32 DIF_bytes_immed_val:4;
+ u32 reserved_DC_1:12;
+
+ /* OFFSET 0xE0 */
+ u32 bgc_blk_sz:13;
+ u32 reserved_E0_0:3;
+ u32 app_tag_gen_mask:16;
+
+ /* OFFSET 0xE4 */
+ union {
+ u16 bgctl;
+ struct {
+ u16 crc_verify:1;
+ u16 app_tag_chk:1;
+ u16 ref_tag_chk:1;
+ u16 op:2;
+ u16 legacy:1;
+ u16 invert_crc_seed:1;
+ u16 ref_tag_gen:1;
+ u16 fixed_ref_tag:1;
+ u16 invert_crc:1;
+ u16 app_ref_f_detect:1;
+ u16 uninit_dif_check_err:1;
+ u16 uninit_dif_bypass:1;
+ u16 app_f_detect:1;
+ u16 reserved_0:2;
+ } bgctl_f;
+ };
+
+ u16 app_tag_verify_mask;
+
+ /* OFFSET 0xE8 */
+ u32 blk_guard_err:8;
+ u32 reserved_E8_0:24;
+
+ /* OFFSET 0xEC */
+ u32 ref_tag_seed_gen;
+
+ /* OFFSET 0xF0 */
+ u32 intermediate_crc_valid_snapshot:16;
+ u32 reserved_F0_0:16;
+
+ /* OFFSET 0xF4 */
+ u32 reference_tag_seed_for_verify_function_snapshot;
+
+ /* OFFSET 0xF8 */
+ u32 snapshot_of_reserved_dword_DC_of_tc;
+
+ /* OFFSET 0xFC */
+ u32 reference_tag_seed_for_generate_function_snapshot;
+
+} __packed;
+
+#endif /* _SCU_TASK_CONTEXT_H_ */
diff --git a/drivers/scsi/isci/task.c b/drivers/scsi/isci/task.c
new file mode 100644
index 000000000..6dcaed0c1
--- /dev/null
+++ b/drivers/scsi/isci/task.c
@@ -0,0 +1,805 @@
+/*
+ * This file is provided under a dual BSD/GPLv2 license. When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ * The full GNU General Public License is included in this distribution
+ * in the file called LICENSE.GPL.
+ *
+ * BSD LICENSE
+ *
+ * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <linux/completion.h>
+#include <linux/irqflags.h>
+#include "sas.h"
+#include <scsi/libsas.h>
+#include "remote_device.h"
+#include "remote_node_context.h"
+#include "isci.h"
+#include "request.h"
+#include "task.h"
+#include "host.h"
+
+/**
+* isci_task_refuse() - complete the request to the upper layer driver in
+* the case where an I/O needs to be completed back in the submit path.
+* @ihost: host on which the the request was queued
+* @task: request to complete
+* @response: response code for the completed task.
+* @status: status code for the completed task.
+*
+*/
+static void isci_task_refuse(struct isci_host *ihost, struct sas_task *task,
+ enum service_response response,
+ enum exec_status status)
+
+{
+ unsigned long flags;
+
+ /* Normal notification (task_done) */
+ dev_dbg(&ihost->pdev->dev, "%s: task = %p, response=%d, status=%d\n",
+ __func__, task, response, status);
+
+ spin_lock_irqsave(&task->task_state_lock, flags);
+
+ task->task_status.resp = response;
+ task->task_status.stat = status;
+
+ /* Normal notification (task_done) */
+ task->task_state_flags |= SAS_TASK_STATE_DONE;
+ task->task_state_flags &= ~(SAS_TASK_AT_INITIATOR |
+ SAS_TASK_STATE_PENDING);
+ task->lldd_task = NULL;
+ spin_unlock_irqrestore(&task->task_state_lock, flags);
+
+ task->task_done(task);
+}
+
+#define for_each_sas_task(num, task) \
+ for (; num > 0; num--,\
+ task = list_entry(task->list.next, struct sas_task, list))
+
+
+static inline int isci_device_io_ready(struct isci_remote_device *idev,
+ struct sas_task *task)
+{
+ return idev ? test_bit(IDEV_IO_READY, &idev->flags) ||
+ (test_bit(IDEV_IO_NCQERROR, &idev->flags) &&
+ isci_task_is_ncq_recovery(task))
+ : 0;
+}
+/**
+ * isci_task_execute_task() - This function is one of the SAS Domain Template
+ * functions. This function is called by libsas to send a task down to
+ * hardware.
+ * @task: This parameter specifies the SAS task to send.
+ * @gfp_flags: This parameter specifies the context of this call.
+ *
+ * status, zero indicates success.
+ */
+int isci_task_execute_task(struct sas_task *task, gfp_t gfp_flags)
+{
+ struct isci_host *ihost = dev_to_ihost(task->dev);
+ struct isci_remote_device *idev;
+ unsigned long flags;
+ enum sci_status status = SCI_FAILURE;
+ bool io_ready;
+ u16 tag;
+
+ spin_lock_irqsave(&ihost->scic_lock, flags);
+ idev = isci_lookup_device(task->dev);
+ io_ready = isci_device_io_ready(idev, task);
+ tag = isci_alloc_tag(ihost);
+ spin_unlock_irqrestore(&ihost->scic_lock, flags);
+
+ dev_dbg(&ihost->pdev->dev,
+ "task: %p, dev: %p idev: %p:%#lx cmd = %p\n",
+ task, task->dev, idev, idev ? idev->flags : 0,
+ task->uldd_task);
+
+ if (!idev) {
+ isci_task_refuse(ihost, task, SAS_TASK_UNDELIVERED,
+ SAS_DEVICE_UNKNOWN);
+ } else if (!io_ready || tag == SCI_CONTROLLER_INVALID_IO_TAG) {
+ /* Indicate QUEUE_FULL so that the scsi midlayer
+ * retries.
+ */
+ isci_task_refuse(ihost, task, SAS_TASK_COMPLETE,
+ SAS_QUEUE_FULL);
+ } else {
+ /* There is a device and it's ready for I/O. */
+ spin_lock_irqsave(&task->task_state_lock, flags);
+
+ if (task->task_state_flags & SAS_TASK_STATE_ABORTED) {
+ /* The I/O was aborted. */
+ spin_unlock_irqrestore(&task->task_state_lock, flags);
+
+ isci_task_refuse(ihost, task,
+ SAS_TASK_UNDELIVERED,
+ SAM_STAT_TASK_ABORTED);
+ } else {
+ task->task_state_flags |= SAS_TASK_AT_INITIATOR;
+ spin_unlock_irqrestore(&task->task_state_lock, flags);
+
+ /* build and send the request. */
+ status = isci_request_execute(ihost, idev, task, tag);
+
+ if (status != SCI_SUCCESS) {
+ spin_lock_irqsave(&task->task_state_lock, flags);
+ /* Did not really start this command. */
+ task->task_state_flags &= ~SAS_TASK_AT_INITIATOR;
+ spin_unlock_irqrestore(&task->task_state_lock, flags);
+
+ if (test_bit(IDEV_GONE, &idev->flags)) {
+ /* Indicate that the device
+ * is gone.
+ */
+ isci_task_refuse(ihost, task,
+ SAS_TASK_UNDELIVERED,
+ SAS_DEVICE_UNKNOWN);
+ } else {
+ /* Indicate QUEUE_FULL so that
+ * the scsi midlayer retries.
+ * If the request failed for
+ * remote device reasons, it
+ * gets returned as
+ * SAS_TASK_UNDELIVERED next
+ * time through.
+ */
+ isci_task_refuse(ihost, task,
+ SAS_TASK_COMPLETE,
+ SAS_QUEUE_FULL);
+ }
+ }
+ }
+ }
+
+ if (status != SCI_SUCCESS && tag != SCI_CONTROLLER_INVALID_IO_TAG) {
+ spin_lock_irqsave(&ihost->scic_lock, flags);
+ /* command never hit the device, so just free
+ * the tci and skip the sequence increment
+ */
+ isci_tci_free(ihost, ISCI_TAG_TCI(tag));
+ spin_unlock_irqrestore(&ihost->scic_lock, flags);
+ }
+
+ isci_put_device(idev);
+ return 0;
+}
+
+static struct isci_request *isci_task_request_build(struct isci_host *ihost,
+ struct isci_remote_device *idev,
+ u16 tag, struct isci_tmf *isci_tmf)
+{
+ enum sci_status status = SCI_FAILURE;
+ struct isci_request *ireq = NULL;
+ struct domain_device *dev;
+
+ dev_dbg(&ihost->pdev->dev,
+ "%s: isci_tmf = %p\n", __func__, isci_tmf);
+
+ dev = idev->domain_dev;
+
+ /* do common allocation and init of request object. */
+ ireq = isci_tmf_request_from_tag(ihost, isci_tmf, tag);
+ if (!ireq)
+ return NULL;
+
+ /* let the core do it's construct. */
+ status = sci_task_request_construct(ihost, idev, tag,
+ ireq);
+
+ if (status != SCI_SUCCESS) {
+ dev_warn(&ihost->pdev->dev,
+ "%s: sci_task_request_construct failed - "
+ "status = 0x%x\n",
+ __func__,
+ status);
+ return NULL;
+ }
+
+ /* XXX convert to get this from task->tproto like other drivers */
+ if (dev->dev_type == SAS_END_DEVICE) {
+ isci_tmf->proto = SAS_PROTOCOL_SSP;
+ status = sci_task_request_construct_ssp(ireq);
+ if (status != SCI_SUCCESS)
+ return NULL;
+ }
+
+ return ireq;
+}
+
+static int isci_task_execute_tmf(struct isci_host *ihost,
+ struct isci_remote_device *idev,
+ struct isci_tmf *tmf, unsigned long timeout_ms)
+{
+ DECLARE_COMPLETION_ONSTACK(completion);
+ enum sci_task_status status = SCI_TASK_FAILURE;
+ struct isci_request *ireq;
+ int ret = TMF_RESP_FUNC_FAILED;
+ unsigned long flags;
+ unsigned long timeleft;
+ u16 tag;
+
+ spin_lock_irqsave(&ihost->scic_lock, flags);
+ tag = isci_alloc_tag(ihost);
+ spin_unlock_irqrestore(&ihost->scic_lock, flags);
+
+ if (tag == SCI_CONTROLLER_INVALID_IO_TAG)
+ return ret;
+
+ /* sanity check, return TMF_RESP_FUNC_FAILED
+ * if the device is not there and ready.
+ */
+ if (!idev ||
+ (!test_bit(IDEV_IO_READY, &idev->flags) &&
+ !test_bit(IDEV_IO_NCQERROR, &idev->flags))) {
+ dev_dbg(&ihost->pdev->dev,
+ "%s: idev = %p not ready (%#lx)\n",
+ __func__,
+ idev, idev ? idev->flags : 0);
+ goto err_tci;
+ } else
+ dev_dbg(&ihost->pdev->dev,
+ "%s: idev = %p\n",
+ __func__, idev);
+
+ /* Assign the pointer to the TMF's completion kernel wait structure. */
+ tmf->complete = &completion;
+ tmf->status = SCI_FAILURE_TIMEOUT;
+
+ ireq = isci_task_request_build(ihost, idev, tag, tmf);
+ if (!ireq)
+ goto err_tci;
+
+ spin_lock_irqsave(&ihost->scic_lock, flags);
+
+ /* start the TMF io. */
+ status = sci_controller_start_task(ihost, idev, ireq);
+
+ if (status != SCI_TASK_SUCCESS) {
+ dev_dbg(&ihost->pdev->dev,
+ "%s: start_io failed - status = 0x%x, request = %p\n",
+ __func__,
+ status,
+ ireq);
+ spin_unlock_irqrestore(&ihost->scic_lock, flags);
+ goto err_tci;
+ }
+ spin_unlock_irqrestore(&ihost->scic_lock, flags);
+
+ /* The RNC must be unsuspended before the TMF can get a response. */
+ isci_remote_device_resume_from_abort(ihost, idev);
+
+ /* Wait for the TMF to complete, or a timeout. */
+ timeleft = wait_for_completion_timeout(&completion,
+ msecs_to_jiffies(timeout_ms));
+
+ if (timeleft == 0) {
+ /* The TMF did not complete - this could be because
+ * of an unplug. Terminate the TMF request now.
+ */
+ isci_remote_device_suspend_terminate(ihost, idev, ireq);
+ }
+
+ isci_print_tmf(ihost, tmf);
+
+ if (tmf->status == SCI_SUCCESS)
+ ret = TMF_RESP_FUNC_COMPLETE;
+ else if (tmf->status == SCI_FAILURE_IO_RESPONSE_VALID) {
+ dev_dbg(&ihost->pdev->dev,
+ "%s: tmf.status == "
+ "SCI_FAILURE_IO_RESPONSE_VALID\n",
+ __func__);
+ ret = TMF_RESP_FUNC_COMPLETE;
+ }
+ /* Else - leave the default "failed" status alone. */
+
+ dev_dbg(&ihost->pdev->dev,
+ "%s: completed request = %p\n",
+ __func__,
+ ireq);
+
+ return ret;
+
+ err_tci:
+ spin_lock_irqsave(&ihost->scic_lock, flags);
+ isci_tci_free(ihost, ISCI_TAG_TCI(tag));
+ spin_unlock_irqrestore(&ihost->scic_lock, flags);
+
+ return ret;
+}
+
+static void isci_task_build_tmf(struct isci_tmf *tmf,
+ enum isci_tmf_function_codes code)
+{
+ memset(tmf, 0, sizeof(*tmf));
+ tmf->tmf_code = code;
+}
+
+static void isci_task_build_abort_task_tmf(struct isci_tmf *tmf,
+ enum isci_tmf_function_codes code,
+ struct isci_request *old_request)
+{
+ isci_task_build_tmf(tmf, code);
+ tmf->io_tag = old_request->io_tag;
+}
+
+/**
+ * isci_task_send_lu_reset_sas() - This function is called by of the SAS Domain
+ * Template functions.
+ * @lun: This parameter specifies the lun to be reset.
+ *
+ * status, zero indicates success.
+ */
+static int isci_task_send_lu_reset_sas(
+ struct isci_host *isci_host,
+ struct isci_remote_device *isci_device,
+ u8 *lun)
+{
+ struct isci_tmf tmf;
+ int ret = TMF_RESP_FUNC_FAILED;
+
+ dev_dbg(&isci_host->pdev->dev,
+ "%s: isci_host = %p, isci_device = %p\n",
+ __func__, isci_host, isci_device);
+ /* Send the LUN reset to the target. By the time the call returns,
+ * the TMF has fully exected in the target (in which case the return
+ * value is "TMF_RESP_FUNC_COMPLETE", or the request timed-out (or
+ * was otherwise unable to be executed ("TMF_RESP_FUNC_FAILED").
+ */
+ isci_task_build_tmf(&tmf, isci_tmf_ssp_lun_reset);
+
+ #define ISCI_LU_RESET_TIMEOUT_MS 2000 /* 2 second timeout. */
+ ret = isci_task_execute_tmf(isci_host, isci_device, &tmf, ISCI_LU_RESET_TIMEOUT_MS);
+
+ if (ret == TMF_RESP_FUNC_COMPLETE)
+ dev_dbg(&isci_host->pdev->dev,
+ "%s: %p: TMF_LU_RESET passed\n",
+ __func__, isci_device);
+ else
+ dev_dbg(&isci_host->pdev->dev,
+ "%s: %p: TMF_LU_RESET failed (%x)\n",
+ __func__, isci_device, ret);
+
+ return ret;
+}
+
+int isci_task_lu_reset(struct domain_device *dev, u8 *lun)
+{
+ struct isci_host *ihost = dev_to_ihost(dev);
+ struct isci_remote_device *idev;
+ unsigned long flags;
+ int ret = TMF_RESP_FUNC_COMPLETE;
+
+ spin_lock_irqsave(&ihost->scic_lock, flags);
+ idev = isci_get_device(dev->lldd_dev);
+ spin_unlock_irqrestore(&ihost->scic_lock, flags);
+
+ dev_dbg(&ihost->pdev->dev,
+ "%s: domain_device=%p, isci_host=%p; isci_device=%p\n",
+ __func__, dev, ihost, idev);
+
+ if (!idev) {
+ /* If the device is gone, escalate to I_T_Nexus_Reset. */
+ dev_dbg(&ihost->pdev->dev, "%s: No dev\n", __func__);
+
+ ret = TMF_RESP_FUNC_FAILED;
+ goto out;
+ }
+
+ /* Suspend the RNC, kill all TCs */
+ if (isci_remote_device_suspend_terminate(ihost, idev, NULL)
+ != SCI_SUCCESS) {
+ /* The suspend/terminate only fails if isci_get_device fails */
+ ret = TMF_RESP_FUNC_FAILED;
+ goto out;
+ }
+ /* All pending I/Os have been terminated and cleaned up. */
+ if (!test_bit(IDEV_GONE, &idev->flags)) {
+ if (dev_is_sata(dev))
+ sas_ata_schedule_reset(dev);
+ else
+ /* Send the task management part of the reset. */
+ ret = isci_task_send_lu_reset_sas(ihost, idev, lun);
+ }
+ out:
+ isci_put_device(idev);
+ return ret;
+}
+
+
+/* int (*lldd_clear_nexus_port)(struct asd_sas_port *); */
+int isci_task_clear_nexus_port(struct asd_sas_port *port)
+{
+ return TMF_RESP_FUNC_FAILED;
+}
+
+
+
+int isci_task_clear_nexus_ha(struct sas_ha_struct *ha)
+{
+ return TMF_RESP_FUNC_FAILED;
+}
+
+/* Task Management Functions. Must be called from process context. */
+
+/**
+ * isci_task_abort_task() - This function is one of the SAS Domain Template
+ * functions. This function is called by libsas to abort a specified task.
+ * @task: This parameter specifies the SAS task to abort.
+ *
+ * status, zero indicates success.
+ */
+int isci_task_abort_task(struct sas_task *task)
+{
+ struct isci_host *ihost = dev_to_ihost(task->dev);
+ DECLARE_COMPLETION_ONSTACK(aborted_io_completion);
+ struct isci_request *old_request = NULL;
+ struct isci_remote_device *idev = NULL;
+ struct isci_tmf tmf;
+ int ret = TMF_RESP_FUNC_FAILED;
+ unsigned long flags;
+ int target_done_already = 0;
+
+ /* Get the isci_request reference from the task. Note that
+ * this check does not depend on the pending request list
+ * in the device, because tasks driving resets may land here
+ * after completion in the core.
+ */
+ spin_lock_irqsave(&ihost->scic_lock, flags);
+ spin_lock(&task->task_state_lock);
+
+ old_request = task->lldd_task;
+
+ /* If task is already done, the request isn't valid */
+ if (!(task->task_state_flags & SAS_TASK_STATE_DONE) &&
+ (task->task_state_flags & SAS_TASK_AT_INITIATOR) &&
+ old_request) {
+ idev = isci_get_device(task->dev->lldd_dev);
+ target_done_already = test_bit(IREQ_COMPLETE_IN_TARGET,
+ &old_request->flags);
+ }
+ spin_unlock(&task->task_state_lock);
+ spin_unlock_irqrestore(&ihost->scic_lock, flags);
+
+ dev_warn(&ihost->pdev->dev,
+ "%s: dev = %p (%s%s), task = %p, old_request == %p\n",
+ __func__, idev,
+ (dev_is_sata(task->dev) ? "STP/SATA"
+ : ((dev_is_expander(task->dev))
+ ? "SMP"
+ : "SSP")),
+ ((idev) ? ((test_bit(IDEV_GONE, &idev->flags))
+ ? " IDEV_GONE"
+ : "")
+ : " <NULL>"),
+ task, old_request);
+
+ /* Device reset conditions signalled in task_state_flags are the
+ * responsbility of libsas to observe at the start of the error
+ * handler thread.
+ */
+ if (!idev || !old_request) {
+ /* The request has already completed and there
+ * is nothing to do here other than to set the task
+ * done bit, and indicate that the task abort function
+ * was successful.
+ */
+ spin_lock_irqsave(&task->task_state_lock, flags);
+ task->task_state_flags |= SAS_TASK_STATE_DONE;
+ task->task_state_flags &= ~(SAS_TASK_AT_INITIATOR |
+ SAS_TASK_STATE_PENDING);
+ spin_unlock_irqrestore(&task->task_state_lock, flags);
+
+ ret = TMF_RESP_FUNC_COMPLETE;
+
+ dev_warn(&ihost->pdev->dev,
+ "%s: abort task not needed for %p\n",
+ __func__, task);
+ goto out;
+ }
+ /* Suspend the RNC, kill the TC */
+ if (isci_remote_device_suspend_terminate(ihost, idev, old_request)
+ != SCI_SUCCESS) {
+ dev_warn(&ihost->pdev->dev,
+ "%s: isci_remote_device_reset_terminate(dev=%p, "
+ "req=%p, task=%p) failed\n",
+ __func__, idev, old_request, task);
+ ret = TMF_RESP_FUNC_FAILED;
+ goto out;
+ }
+ spin_lock_irqsave(&ihost->scic_lock, flags);
+
+ if (task->task_proto == SAS_PROTOCOL_SMP ||
+ sas_protocol_ata(task->task_proto) ||
+ target_done_already ||
+ test_bit(IDEV_GONE, &idev->flags)) {
+
+ spin_unlock_irqrestore(&ihost->scic_lock, flags);
+
+ /* No task to send, so explicitly resume the device here */
+ isci_remote_device_resume_from_abort(ihost, idev);
+
+ dev_warn(&ihost->pdev->dev,
+ "%s: %s request"
+ " or complete_in_target (%d), "
+ "or IDEV_GONE (%d), thus no TMF\n",
+ __func__,
+ ((task->task_proto == SAS_PROTOCOL_SMP)
+ ? "SMP"
+ : (sas_protocol_ata(task->task_proto)
+ ? "SATA/STP"
+ : "<other>")
+ ),
+ test_bit(IREQ_COMPLETE_IN_TARGET,
+ &old_request->flags),
+ test_bit(IDEV_GONE, &idev->flags));
+
+ spin_lock_irqsave(&task->task_state_lock, flags);
+ task->task_state_flags &= ~(SAS_TASK_AT_INITIATOR |
+ SAS_TASK_STATE_PENDING);
+ task->task_state_flags |= SAS_TASK_STATE_DONE;
+ spin_unlock_irqrestore(&task->task_state_lock, flags);
+
+ ret = TMF_RESP_FUNC_COMPLETE;
+ } else {
+ /* Fill in the tmf structure */
+ isci_task_build_abort_task_tmf(&tmf, isci_tmf_ssp_task_abort,
+ old_request);
+
+ spin_unlock_irqrestore(&ihost->scic_lock, flags);
+
+ /* Send the task management request. */
+ #define ISCI_ABORT_TASK_TIMEOUT_MS 500 /* 1/2 second timeout */
+ ret = isci_task_execute_tmf(ihost, idev, &tmf,
+ ISCI_ABORT_TASK_TIMEOUT_MS);
+ }
+out:
+ dev_warn(&ihost->pdev->dev,
+ "%s: Done; dev = %p, task = %p , old_request == %p\n",
+ __func__, idev, task, old_request);
+ isci_put_device(idev);
+ return ret;
+}
+
+/**
+ * isci_task_abort_task_set() - This function is one of the SAS Domain Template
+ * functions. This is one of the Task Management functoins called by libsas,
+ * to abort all task for the given lun.
+ * @d_device: This parameter specifies the domain device associated with this
+ * request.
+ * @lun: This parameter specifies the lun associated with this request.
+ *
+ * status, zero indicates success.
+ */
+int isci_task_abort_task_set(
+ struct domain_device *d_device,
+ u8 *lun)
+{
+ return TMF_RESP_FUNC_FAILED;
+}
+
+
+/**
+ * isci_task_clear_aca() - This function is one of the SAS Domain Template
+ * functions. This is one of the Task Management functoins called by libsas.
+ * @d_device: This parameter specifies the domain device associated with this
+ * request.
+ * @lun: This parameter specifies the lun associated with this request.
+ *
+ * status, zero indicates success.
+ */
+int isci_task_clear_aca(
+ struct domain_device *d_device,
+ u8 *lun)
+{
+ return TMF_RESP_FUNC_FAILED;
+}
+
+
+
+/**
+ * isci_task_clear_task_set() - This function is one of the SAS Domain Template
+ * functions. This is one of the Task Management functoins called by libsas.
+ * @d_device: This parameter specifies the domain device associated with this
+ * request.
+ * @lun: This parameter specifies the lun associated with this request.
+ *
+ * status, zero indicates success.
+ */
+int isci_task_clear_task_set(
+ struct domain_device *d_device,
+ u8 *lun)
+{
+ return TMF_RESP_FUNC_FAILED;
+}
+
+
+/**
+ * isci_task_query_task() - This function is implemented to cause libsas to
+ * correctly escalate the failed abort to a LUN or target reset (this is
+ * because sas_scsi_find_task libsas function does not correctly interpret
+ * all return codes from the abort task call). When TMF_RESP_FUNC_SUCC is
+ * returned, libsas turns this into a LUN reset; when FUNC_FAILED is
+ * returned, libsas will turn this into a target reset
+ * @task: This parameter specifies the sas task being queried.
+ * @lun: This parameter specifies the lun associated with this request.
+ *
+ * status, zero indicates success.
+ */
+int isci_task_query_task(
+ struct sas_task *task)
+{
+ /* See if there is a pending device reset for this device. */
+ if (task->task_state_flags & SAS_TASK_NEED_DEV_RESET)
+ return TMF_RESP_FUNC_FAILED;
+ else
+ return TMF_RESP_FUNC_SUCC;
+}
+
+/*
+ * isci_task_request_complete() - This function is called by the sci core when
+ * an task request completes.
+ * @ihost: This parameter specifies the ISCI host object
+ * @ireq: This parameter is the completed isci_request object.
+ * @completion_status: This parameter specifies the completion status from the
+ * sci core.
+ *
+ * none.
+ */
+void
+isci_task_request_complete(struct isci_host *ihost,
+ struct isci_request *ireq,
+ enum sci_task_status completion_status)
+{
+ struct isci_tmf *tmf = isci_request_access_tmf(ireq);
+ struct completion *tmf_complete = NULL;
+
+ dev_dbg(&ihost->pdev->dev,
+ "%s: request = %p, status=%d\n",
+ __func__, ireq, completion_status);
+
+ set_bit(IREQ_COMPLETE_IN_TARGET, &ireq->flags);
+
+ if (tmf) {
+ tmf->status = completion_status;
+
+ if (tmf->proto == SAS_PROTOCOL_SSP) {
+ memcpy(&tmf->resp.resp_iu,
+ &ireq->ssp.rsp,
+ SSP_RESP_IU_MAX_SIZE);
+ } else if (tmf->proto == SAS_PROTOCOL_SATA) {
+ memcpy(&tmf->resp.d2h_fis,
+ &ireq->stp.rsp,
+ sizeof(struct dev_to_host_fis));
+ }
+ /* PRINT_TMF( ((struct isci_tmf *)request->task)); */
+ tmf_complete = tmf->complete;
+ }
+ sci_controller_complete_io(ihost, ireq->target_device, ireq);
+ /* set the 'terminated' flag handle to make sure it cannot be terminated
+ * or completed again.
+ */
+ set_bit(IREQ_TERMINATED, &ireq->flags);
+
+ if (test_and_clear_bit(IREQ_ABORT_PATH_ACTIVE, &ireq->flags))
+ wake_up_all(&ihost->eventq);
+
+ if (!test_bit(IREQ_NO_AUTO_FREE_TAG, &ireq->flags))
+ isci_free_tag(ihost, ireq->io_tag);
+
+ /* The task management part completes last. */
+ if (tmf_complete)
+ complete(tmf_complete);
+}
+
+static int isci_reset_device(struct isci_host *ihost,
+ struct domain_device *dev,
+ struct isci_remote_device *idev)
+{
+ int rc = TMF_RESP_FUNC_COMPLETE, reset_stat = -1;
+ struct sas_phy *phy = sas_get_local_phy(dev);
+ struct isci_port *iport = dev->port->lldd_port;
+
+ dev_dbg(&ihost->pdev->dev, "%s: idev %p\n", __func__, idev);
+
+ /* Suspend the RNC, terminate all outstanding TCs. */
+ if (isci_remote_device_suspend_terminate(ihost, idev, NULL)
+ != SCI_SUCCESS) {
+ rc = TMF_RESP_FUNC_FAILED;
+ goto out;
+ }
+ /* Note that since the termination for outstanding requests succeeded,
+ * this function will return success. This is because the resets will
+ * only fail if the device has been removed (ie. hotplug), and the
+ * primary duty of this function is to cleanup tasks, so that is the
+ * relevant status.
+ */
+ if (!test_bit(IDEV_GONE, &idev->flags)) {
+ if (scsi_is_sas_phy_local(phy)) {
+ struct isci_phy *iphy = &ihost->phys[phy->number];
+
+ reset_stat = isci_port_perform_hard_reset(ihost, iport,
+ iphy);
+ } else
+ reset_stat = sas_phy_reset(phy, !dev_is_sata(dev));
+ }
+ /* Explicitly resume the RNC here, since there was no task sent. */
+ isci_remote_device_resume_from_abort(ihost, idev);
+
+ dev_dbg(&ihost->pdev->dev, "%s: idev %p complete, reset_stat=%d.\n",
+ __func__, idev, reset_stat);
+ out:
+ sas_put_local_phy(phy);
+ return rc;
+}
+
+int isci_task_I_T_nexus_reset(struct domain_device *dev)
+{
+ struct isci_host *ihost = dev_to_ihost(dev);
+ struct isci_remote_device *idev;
+ unsigned long flags;
+ int ret;
+
+ spin_lock_irqsave(&ihost->scic_lock, flags);
+ idev = isci_get_device(dev->lldd_dev);
+ spin_unlock_irqrestore(&ihost->scic_lock, flags);
+
+ if (!idev) {
+ /* XXX: need to cleanup any ireqs targeting this
+ * domain_device
+ */
+ ret = -ENODEV;
+ goto out;
+ }
+
+ ret = isci_reset_device(ihost, dev, idev);
+ out:
+ isci_put_device(idev);
+ return ret;
+}
diff --git a/drivers/scsi/isci/task.h b/drivers/scsi/isci/task.h
new file mode 100644
index 000000000..8f4531f22
--- /dev/null
+++ b/drivers/scsi/isci/task.h
@@ -0,0 +1,189 @@
+/*
+ * This file is provided under a dual BSD/GPLv2 license. When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ * The full GNU General Public License is included in this distribution
+ * in the file called LICENSE.GPL.
+ *
+ * BSD LICENSE
+ *
+ * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+#ifndef _ISCI_TASK_H_
+#define _ISCI_TASK_H_
+
+#include <scsi/sas_ata.h>
+#include "host.h"
+
+#define ISCI_TERMINATION_TIMEOUT_MSEC 500
+
+struct isci_request;
+
+/**
+ * enum isci_tmf_function_codes - This enum defines the possible preparations
+ * of task management requests.
+ *
+ *
+ */
+enum isci_tmf_function_codes {
+
+ isci_tmf_func_none = 0,
+ isci_tmf_ssp_task_abort = TMF_ABORT_TASK,
+ isci_tmf_ssp_lun_reset = TMF_LU_RESET,
+};
+
+/**
+ * struct isci_tmf - This class represents the task management object which
+ * acts as an interface to libsas for processing task management requests
+ *
+ *
+ */
+struct isci_tmf {
+
+ struct completion *complete;
+ enum sas_protocol proto;
+ union {
+ struct ssp_response_iu resp_iu;
+ struct dev_to_host_fis d2h_fis;
+ u8 rsp_buf[SSP_RESP_IU_MAX_SIZE];
+ } resp;
+ unsigned char lun[8];
+ u16 io_tag;
+ enum isci_tmf_function_codes tmf_code;
+ int status;
+};
+
+static inline void isci_print_tmf(struct isci_host *ihost, struct isci_tmf *tmf)
+{
+ if (SAS_PROTOCOL_SATA == tmf->proto)
+ dev_dbg(&ihost->pdev->dev,
+ "%s: status = %x\n"
+ "tmf->resp.d2h_fis.status = %x\n"
+ "tmf->resp.d2h_fis.error = %x\n",
+ __func__,
+ tmf->status,
+ tmf->resp.d2h_fis.status,
+ tmf->resp.d2h_fis.error);
+ else
+ dev_dbg(&ihost->pdev->dev,
+ "%s: status = %x\n"
+ "tmf->resp.resp_iu.data_present = %x\n"
+ "tmf->resp.resp_iu.status = %x\n"
+ "tmf->resp.resp_iu.data_length = %x\n"
+ "tmf->resp.resp_iu.data[0] = %x\n"
+ "tmf->resp.resp_iu.data[1] = %x\n"
+ "tmf->resp.resp_iu.data[2] = %x\n"
+ "tmf->resp.resp_iu.data[3] = %x\n",
+ __func__,
+ tmf->status,
+ tmf->resp.resp_iu.datapres,
+ tmf->resp.resp_iu.status,
+ be32_to_cpu(tmf->resp.resp_iu.response_data_len),
+ tmf->resp.resp_iu.resp_data[0],
+ tmf->resp.resp_iu.resp_data[1],
+ tmf->resp.resp_iu.resp_data[2],
+ tmf->resp.resp_iu.resp_data[3]);
+}
+
+
+int isci_task_execute_task(
+ struct sas_task *task,
+ gfp_t gfp_flags);
+
+int isci_task_abort_task(
+ struct sas_task *task);
+
+int isci_task_abort_task_set(
+ struct domain_device *d_device,
+ u8 *lun);
+
+int isci_task_clear_aca(
+ struct domain_device *d_device,
+ u8 *lun);
+
+int isci_task_clear_task_set(
+ struct domain_device *d_device,
+ u8 *lun);
+
+int isci_task_query_task(
+ struct sas_task *task);
+
+int isci_task_lu_reset(
+ struct domain_device *d_device,
+ u8 *lun);
+
+int isci_task_clear_nexus_port(
+ struct asd_sas_port *port);
+
+int isci_task_clear_nexus_ha(
+ struct sas_ha_struct *ha);
+
+int isci_task_I_T_nexus_reset(
+ struct domain_device *d_device);
+
+void isci_task_request_complete(
+ struct isci_host *isci_host,
+ struct isci_request *request,
+ enum sci_task_status completion_status);
+
+u16 isci_task_ssp_request_get_io_tag_to_manage(
+ struct isci_request *request);
+
+u8 isci_task_ssp_request_get_function(
+ struct isci_request *request);
+
+
+void *isci_task_ssp_request_get_response_data_address(
+ struct isci_request *request);
+
+u32 isci_task_ssp_request_get_response_data_length(
+ struct isci_request *request);
+
+int isci_queuecommand(
+ struct scsi_cmnd *scsi_cmd,
+ void (*donefunc)(struct scsi_cmnd *));
+
+#endif /* !defined(_SCI_TASK_H_) */
diff --git a/drivers/scsi/isci/unsolicited_frame_control.c b/drivers/scsi/isci/unsolicited_frame_control.c
new file mode 100644
index 000000000..04a6d0d59
--- /dev/null
+++ b/drivers/scsi/isci/unsolicited_frame_control.c
@@ -0,0 +1,211 @@
+/*
+ * This file is provided under a dual BSD/GPLv2 license. When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ * The full GNU General Public License is included in this distribution
+ * in the file called LICENSE.GPL.
+ *
+ * BSD LICENSE
+ *
+ * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "host.h"
+#include "unsolicited_frame_control.h"
+#include "registers.h"
+
+void sci_unsolicited_frame_control_construct(struct isci_host *ihost)
+{
+ struct sci_unsolicited_frame_control *uf_control = &ihost->uf_control;
+ struct sci_unsolicited_frame *uf;
+ dma_addr_t dma = ihost->ufi_dma;
+ void *virt = ihost->ufi_buf;
+ int i;
+
+ /*
+ * The Unsolicited Frame buffers are set at the start of the UF
+ * memory descriptor entry. The headers and address table will be
+ * placed after the buffers.
+ */
+
+ /*
+ * Program the location of the UF header table into the SCU.
+ * Notes:
+ * - The address must align on a 64-byte boundary. Guaranteed to be
+ * on 64-byte boundary already 1KB boundary for unsolicited frames.
+ * - Program unused header entries to overlap with the last
+ * unsolicited frame. The silicon will never DMA to these unused
+ * headers, since we program the UF address table pointers to
+ * NULL.
+ */
+ uf_control->headers.physical_address = dma + SCI_UFI_BUF_SIZE;
+ uf_control->headers.array = virt + SCI_UFI_BUF_SIZE;
+
+ /*
+ * Program the location of the UF address table into the SCU.
+ * Notes:
+ * - The address must align on a 64-bit boundary. Guaranteed to be on 64
+ * byte boundary already due to above programming headers being on a
+ * 64-bit boundary and headers are on a 64-bytes in size.
+ */
+ uf_control->address_table.physical_address = dma + SCI_UFI_BUF_SIZE + SCI_UFI_HDR_SIZE;
+ uf_control->address_table.array = virt + SCI_UFI_BUF_SIZE + SCI_UFI_HDR_SIZE;
+ uf_control->get = 0;
+
+ /*
+ * UF buffer requirements are:
+ * - The last entry in the UF queue is not NULL.
+ * - There is a power of 2 number of entries (NULL or not-NULL)
+ * programmed into the queue.
+ * - Aligned on a 1KB boundary. */
+
+ /*
+ * Program the actual used UF buffers into the UF address table and
+ * the controller's array of UFs.
+ */
+ for (i = 0; i < SCU_MAX_UNSOLICITED_FRAMES; i++) {
+ uf = &uf_control->buffers.array[i];
+
+ uf_control->address_table.array[i] = dma;
+
+ uf->buffer = virt;
+ uf->header = &uf_control->headers.array[i];
+ uf->state = UNSOLICITED_FRAME_EMPTY;
+
+ /*
+ * Increment the address of the physical and virtual memory
+ * pointers. Everything is aligned on 1k boundary with an
+ * increment of 1k.
+ */
+ virt += SCU_UNSOLICITED_FRAME_BUFFER_SIZE;
+ dma += SCU_UNSOLICITED_FRAME_BUFFER_SIZE;
+ }
+}
+
+enum sci_status sci_unsolicited_frame_control_get_header(struct sci_unsolicited_frame_control *uf_control,
+ u32 frame_index,
+ void **frame_header)
+{
+ if (frame_index < SCU_MAX_UNSOLICITED_FRAMES) {
+ /* Skip the first word in the frame since this is a controll word used
+ * by the hardware.
+ */
+ *frame_header = &uf_control->buffers.array[frame_index].header->data;
+
+ return SCI_SUCCESS;
+ }
+
+ return SCI_FAILURE_INVALID_PARAMETER_VALUE;
+}
+
+enum sci_status sci_unsolicited_frame_control_get_buffer(struct sci_unsolicited_frame_control *uf_control,
+ u32 frame_index,
+ void **frame_buffer)
+{
+ if (frame_index < SCU_MAX_UNSOLICITED_FRAMES) {
+ *frame_buffer = uf_control->buffers.array[frame_index].buffer;
+
+ return SCI_SUCCESS;
+ }
+
+ return SCI_FAILURE_INVALID_PARAMETER_VALUE;
+}
+
+bool sci_unsolicited_frame_control_release_frame(struct sci_unsolicited_frame_control *uf_control,
+ u32 frame_index)
+{
+ u32 frame_get;
+ u32 frame_cycle;
+
+ frame_get = uf_control->get & (SCU_MAX_UNSOLICITED_FRAMES - 1);
+ frame_cycle = uf_control->get & SCU_MAX_UNSOLICITED_FRAMES;
+
+ /*
+ * In the event there are NULL entries in the UF table, we need to
+ * advance the get pointer in order to find out if this frame should
+ * be released (i.e. update the get pointer)
+ */
+ while (lower_32_bits(uf_control->address_table.array[frame_get]) == 0 &&
+ upper_32_bits(uf_control->address_table.array[frame_get]) == 0 &&
+ frame_get < SCU_MAX_UNSOLICITED_FRAMES)
+ frame_get++;
+
+ /*
+ * The table has a NULL entry as it's last element. This is
+ * illegal.
+ */
+ BUG_ON(frame_get >= SCU_MAX_UNSOLICITED_FRAMES);
+ if (frame_index >= SCU_MAX_UNSOLICITED_FRAMES)
+ return false;
+
+ uf_control->buffers.array[frame_index].state = UNSOLICITED_FRAME_RELEASED;
+
+ if (frame_get != frame_index) {
+ /*
+ * Frames remain in use until we advance the get pointer
+ * so there is nothing we can do here
+ */
+ return false;
+ }
+
+ /*
+ * The frame index is equal to the current get pointer so we
+ * can now free up all of the frame entries that
+ */
+ while (uf_control->buffers.array[frame_get].state == UNSOLICITED_FRAME_RELEASED) {
+ uf_control->buffers.array[frame_get].state = UNSOLICITED_FRAME_EMPTY;
+
+ if (frame_get+1 == SCU_MAX_UNSOLICITED_FRAMES-1) {
+ frame_cycle ^= SCU_MAX_UNSOLICITED_FRAMES;
+ frame_get = 0;
+ } else
+ frame_get++;
+ }
+
+ uf_control->get = SCU_UFQGP_GEN_BIT(ENABLE_BIT) | frame_cycle | frame_get;
+
+ return true;
+}
diff --git a/drivers/scsi/isci/unsolicited_frame_control.h b/drivers/scsi/isci/unsolicited_frame_control.h
new file mode 100644
index 000000000..1bc551ec6
--- /dev/null
+++ b/drivers/scsi/isci/unsolicited_frame_control.h
@@ -0,0 +1,282 @@
+/*
+ * This file is provided under a dual BSD/GPLv2 license. When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ * The full GNU General Public License is included in this distribution
+ * in the file called LICENSE.GPL.
+ *
+ * BSD LICENSE
+ *
+ * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _SCIC_SDS_UNSOLICITED_FRAME_CONTROL_H_
+#define _SCIC_SDS_UNSOLICITED_FRAME_CONTROL_H_
+
+#include "isci.h"
+
+#define SCU_UNSOLICITED_FRAME_HEADER_DATA_DWORDS 15
+
+/**
+ * struct scu_unsolicited_frame_header -
+ *
+ * This structure delineates the format of an unsolicited frame header. The
+ * first DWORD are UF attributes defined by the silicon architecture. The data
+ * depicts actual header information received on the link.
+ */
+struct scu_unsolicited_frame_header {
+ /**
+ * This field indicates if there is an Initiator Index Table entry with
+ * which this header is associated.
+ */
+ u32 iit_exists:1;
+
+ /**
+ * This field simply indicates the protocol type (i.e. SSP, STP, SMP).
+ */
+ u32 protocol_type:3;
+
+ /**
+ * This field indicates if the frame is an address frame (IAF or OAF)
+ * or if it is a information unit frame.
+ */
+ u32 is_address_frame:1;
+
+ /**
+ * This field simply indicates the connection rate at which the frame
+ * was received.
+ */
+ u32 connection_rate:4;
+
+ u32 reserved:23;
+
+ /**
+ * This field represents the actual header data received on the link.
+ */
+ u32 data[SCU_UNSOLICITED_FRAME_HEADER_DATA_DWORDS];
+
+};
+
+
+
+/**
+ * enum unsolicited_frame_state -
+ *
+ * This enumeration represents the current unsolicited frame state. The
+ * controller object can not updtate the hardware unsolicited frame put pointer
+ * unless it has already processed the priror unsolicited frames.
+ */
+enum unsolicited_frame_state {
+ /**
+ * This state is when the frame is empty and not in use. It is
+ * different from the released state in that the hardware could DMA
+ * data to this frame buffer.
+ */
+ UNSOLICITED_FRAME_EMPTY,
+
+ /**
+ * This state is set when the frame buffer is in use by by some
+ * object in the system.
+ */
+ UNSOLICITED_FRAME_IN_USE,
+
+ /**
+ * This state is set when the frame is returned to the free pool
+ * but one or more frames prior to this one are still in use.
+ * Once all of the frame before this one are freed it will go to
+ * the empty state.
+ */
+ UNSOLICITED_FRAME_RELEASED,
+
+ UNSOLICITED_FRAME_MAX_STATES
+};
+
+/**
+ * struct sci_unsolicited_frame -
+ *
+ * This is the unsolicited frame data structure it acts as the container for
+ * the current frame state, frame header and frame buffer.
+ */
+struct sci_unsolicited_frame {
+ /**
+ * This field contains the current frame state
+ */
+ enum unsolicited_frame_state state;
+
+ /**
+ * This field points to the frame header data.
+ */
+ struct scu_unsolicited_frame_header *header;
+
+ /**
+ * This field points to the frame buffer data.
+ */
+ void *buffer;
+
+};
+
+/**
+ * struct sci_uf_header_array -
+ *
+ * This structure contains all of the unsolicited frame header information.
+ */
+struct sci_uf_header_array {
+ /**
+ * This field is represents a virtual pointer to the start
+ * address of the UF address table. The table contains
+ * 64-bit pointers as required by the hardware.
+ */
+ struct scu_unsolicited_frame_header *array;
+
+ /**
+ * This field specifies the physical address location for the UF
+ * buffer array.
+ */
+ dma_addr_t physical_address;
+
+};
+
+/**
+ * struct sci_uf_buffer_array -
+ *
+ * This structure contains all of the unsolicited frame buffer (actual payload)
+ * information.
+ */
+struct sci_uf_buffer_array {
+ /**
+ * This field is the unsolicited frame data its used to manage
+ * the data for the unsolicited frame requests. It also represents
+ * the virtual address location that corresponds to the
+ * physical_address field.
+ */
+ struct sci_unsolicited_frame array[SCU_MAX_UNSOLICITED_FRAMES];
+
+ /**
+ * This field specifies the physical address location for the UF
+ * buffer array.
+ */
+ dma_addr_t physical_address;
+};
+
+/**
+ * struct sci_uf_address_table_array -
+ *
+ * This object maintains all of the unsolicited frame address table specific
+ * data. The address table is a collection of 64-bit pointers that point to
+ * 1KB buffers into which the silicon will DMA unsolicited frames.
+ */
+struct sci_uf_address_table_array {
+ /**
+ * This field represents a virtual pointer that refers to the
+ * starting address of the UF address table.
+ * 64-bit pointers are required by the hardware.
+ */
+ u64 *array;
+
+ /**
+ * This field specifies the physical address location for the UF
+ * address table.
+ */
+ dma_addr_t physical_address;
+
+};
+
+/**
+ * struct sci_unsolicited_frame_control -
+ *
+ * This object contains all of the data necessary to handle unsolicited frames.
+ */
+struct sci_unsolicited_frame_control {
+ /**
+ * This field is the software copy of the unsolicited frame queue
+ * get pointer. The controller object writes this value to the
+ * hardware to let the hardware put more unsolicited frame entries.
+ */
+ u32 get;
+
+ /**
+ * This field contains all of the unsolicited frame header
+ * specific fields.
+ */
+ struct sci_uf_header_array headers;
+
+ /**
+ * This field contains all of the unsolicited frame buffer
+ * specific fields.
+ */
+ struct sci_uf_buffer_array buffers;
+
+ /**
+ * This field contains all of the unsolicited frame address table
+ * specific fields.
+ */
+ struct sci_uf_address_table_array address_table;
+
+};
+
+#define SCI_UFI_BUF_SIZE (SCU_MAX_UNSOLICITED_FRAMES * SCU_UNSOLICITED_FRAME_BUFFER_SIZE)
+#define SCI_UFI_HDR_SIZE (SCU_MAX_UNSOLICITED_FRAMES * sizeof(struct scu_unsolicited_frame_header))
+#define SCI_UFI_TOTAL_SIZE (SCI_UFI_BUF_SIZE + SCI_UFI_HDR_SIZE + SCU_MAX_UNSOLICITED_FRAMES * sizeof(u64))
+
+struct isci_host;
+
+void sci_unsolicited_frame_control_construct(struct isci_host *ihost);
+
+enum sci_status sci_unsolicited_frame_control_get_header(
+ struct sci_unsolicited_frame_control *uf_control,
+ u32 frame_index,
+ void **frame_header);
+
+enum sci_status sci_unsolicited_frame_control_get_buffer(
+ struct sci_unsolicited_frame_control *uf_control,
+ u32 frame_index,
+ void **frame_buffer);
+
+bool sci_unsolicited_frame_control_release_frame(
+ struct sci_unsolicited_frame_control *uf_control,
+ u32 frame_index);
+
+#endif /* _SCIC_SDS_UNSOLICITED_FRAME_CONTROL_H_ */
diff --git a/drivers/scsi/iscsi_boot_sysfs.c b/drivers/scsi/iscsi_boot_sysfs.c
new file mode 100644
index 000000000..680bf6f0c
--- /dev/null
+++ b/drivers/scsi/iscsi_boot_sysfs.c
@@ -0,0 +1,495 @@
+/*
+ * Export the iSCSI boot info to userland via sysfs.
+ *
+ * Copyright (C) 2010 Red Hat, Inc. All rights reserved.
+ * Copyright (C) 2010 Mike Christie
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License v2.0 as published by
+ * the Free Software Foundation
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/module.h>
+#include <linux/string.h>
+#include <linux/slab.h>
+#include <linux/sysfs.h>
+#include <linux/capability.h>
+#include <linux/iscsi_boot_sysfs.h>
+
+
+MODULE_AUTHOR("Mike Christie <michaelc@cs.wisc.edu>");
+MODULE_DESCRIPTION("sysfs interface and helpers to export iSCSI boot information");
+MODULE_LICENSE("GPL");
+/*
+ * The kobject and attribute structures.
+ */
+struct iscsi_boot_attr {
+ struct attribute attr;
+ int type;
+ ssize_t (*show) (void *data, int type, char *buf);
+};
+
+/*
+ * The routine called for all sysfs attributes.
+ */
+static ssize_t iscsi_boot_show_attribute(struct kobject *kobj,
+ struct attribute *attr, char *buf)
+{
+ struct iscsi_boot_kobj *boot_kobj =
+ container_of(kobj, struct iscsi_boot_kobj, kobj);
+ struct iscsi_boot_attr *boot_attr =
+ container_of(attr, struct iscsi_boot_attr, attr);
+ ssize_t ret = -EIO;
+ char *str = buf;
+
+ if (!capable(CAP_SYS_ADMIN))
+ return -EACCES;
+
+ if (boot_kobj->show)
+ ret = boot_kobj->show(boot_kobj->data, boot_attr->type, str);
+ return ret;
+}
+
+static const struct sysfs_ops iscsi_boot_attr_ops = {
+ .show = iscsi_boot_show_attribute,
+};
+
+static void iscsi_boot_kobj_release(struct kobject *kobj)
+{
+ struct iscsi_boot_kobj *boot_kobj =
+ container_of(kobj, struct iscsi_boot_kobj, kobj);
+
+ if (boot_kobj->release)
+ boot_kobj->release(boot_kobj->data);
+ kfree(boot_kobj);
+}
+
+static struct kobj_type iscsi_boot_ktype = {
+ .release = iscsi_boot_kobj_release,
+ .sysfs_ops = &iscsi_boot_attr_ops,
+};
+
+#define iscsi_boot_rd_attr(fnname, sysfs_name, attr_type) \
+static struct iscsi_boot_attr iscsi_boot_attr_##fnname = { \
+ .attr = { .name = __stringify(sysfs_name), .mode = 0444 }, \
+ .type = attr_type, \
+}
+
+/* Target attrs */
+iscsi_boot_rd_attr(tgt_index, index, ISCSI_BOOT_TGT_INDEX);
+iscsi_boot_rd_attr(tgt_flags, flags, ISCSI_BOOT_TGT_FLAGS);
+iscsi_boot_rd_attr(tgt_ip, ip-addr, ISCSI_BOOT_TGT_IP_ADDR);
+iscsi_boot_rd_attr(tgt_port, port, ISCSI_BOOT_TGT_PORT);
+iscsi_boot_rd_attr(tgt_lun, lun, ISCSI_BOOT_TGT_LUN);
+iscsi_boot_rd_attr(tgt_chap, chap-type, ISCSI_BOOT_TGT_CHAP_TYPE);
+iscsi_boot_rd_attr(tgt_nic, nic-assoc, ISCSI_BOOT_TGT_NIC_ASSOC);
+iscsi_boot_rd_attr(tgt_name, target-name, ISCSI_BOOT_TGT_NAME);
+iscsi_boot_rd_attr(tgt_chap_name, chap-name, ISCSI_BOOT_TGT_CHAP_NAME);
+iscsi_boot_rd_attr(tgt_chap_secret, chap-secret, ISCSI_BOOT_TGT_CHAP_SECRET);
+iscsi_boot_rd_attr(tgt_chap_rev_name, rev-chap-name,
+ ISCSI_BOOT_TGT_REV_CHAP_NAME);
+iscsi_boot_rd_attr(tgt_chap_rev_secret, rev-chap-name-secret,
+ ISCSI_BOOT_TGT_REV_CHAP_SECRET);
+
+static struct attribute *target_attrs[] = {
+ &iscsi_boot_attr_tgt_index.attr,
+ &iscsi_boot_attr_tgt_flags.attr,
+ &iscsi_boot_attr_tgt_ip.attr,
+ &iscsi_boot_attr_tgt_port.attr,
+ &iscsi_boot_attr_tgt_lun.attr,
+ &iscsi_boot_attr_tgt_chap.attr,
+ &iscsi_boot_attr_tgt_nic.attr,
+ &iscsi_boot_attr_tgt_name.attr,
+ &iscsi_boot_attr_tgt_chap_name.attr,
+ &iscsi_boot_attr_tgt_chap_secret.attr,
+ &iscsi_boot_attr_tgt_chap_rev_name.attr,
+ &iscsi_boot_attr_tgt_chap_rev_secret.attr,
+ NULL
+};
+
+static umode_t iscsi_boot_tgt_attr_is_visible(struct kobject *kobj,
+ struct attribute *attr, int i)
+{
+ struct iscsi_boot_kobj *boot_kobj =
+ container_of(kobj, struct iscsi_boot_kobj, kobj);
+
+ if (attr == &iscsi_boot_attr_tgt_index.attr)
+ return boot_kobj->is_visible(boot_kobj->data,
+ ISCSI_BOOT_TGT_INDEX);
+ else if (attr == &iscsi_boot_attr_tgt_flags.attr)
+ return boot_kobj->is_visible(boot_kobj->data,
+ ISCSI_BOOT_TGT_FLAGS);
+ else if (attr == &iscsi_boot_attr_tgt_ip.attr)
+ return boot_kobj->is_visible(boot_kobj->data,
+ ISCSI_BOOT_TGT_IP_ADDR);
+ else if (attr == &iscsi_boot_attr_tgt_port.attr)
+ return boot_kobj->is_visible(boot_kobj->data,
+ ISCSI_BOOT_TGT_PORT);
+ else if (attr == &iscsi_boot_attr_tgt_lun.attr)
+ return boot_kobj->is_visible(boot_kobj->data,
+ ISCSI_BOOT_TGT_LUN);
+ else if (attr == &iscsi_boot_attr_tgt_chap.attr)
+ return boot_kobj->is_visible(boot_kobj->data,
+ ISCSI_BOOT_TGT_CHAP_TYPE);
+ else if (attr == &iscsi_boot_attr_tgt_nic.attr)
+ return boot_kobj->is_visible(boot_kobj->data,
+ ISCSI_BOOT_TGT_NIC_ASSOC);
+ else if (attr == &iscsi_boot_attr_tgt_name.attr)
+ return boot_kobj->is_visible(boot_kobj->data,
+ ISCSI_BOOT_TGT_NAME);
+ else if (attr == &iscsi_boot_attr_tgt_chap_name.attr)
+ return boot_kobj->is_visible(boot_kobj->data,
+ ISCSI_BOOT_TGT_CHAP_NAME);
+ else if (attr == &iscsi_boot_attr_tgt_chap_secret.attr)
+ return boot_kobj->is_visible(boot_kobj->data,
+ ISCSI_BOOT_TGT_CHAP_SECRET);
+ else if (attr == &iscsi_boot_attr_tgt_chap_rev_name.attr)
+ return boot_kobj->is_visible(boot_kobj->data,
+ ISCSI_BOOT_TGT_REV_CHAP_NAME);
+ else if (attr == &iscsi_boot_attr_tgt_chap_rev_secret.attr)
+ return boot_kobj->is_visible(boot_kobj->data,
+ ISCSI_BOOT_TGT_REV_CHAP_SECRET);
+ return 0;
+}
+
+static struct attribute_group iscsi_boot_target_attr_group = {
+ .attrs = target_attrs,
+ .is_visible = iscsi_boot_tgt_attr_is_visible,
+};
+
+/* Ethernet attrs */
+iscsi_boot_rd_attr(eth_index, index, ISCSI_BOOT_ETH_INDEX);
+iscsi_boot_rd_attr(eth_flags, flags, ISCSI_BOOT_ETH_FLAGS);
+iscsi_boot_rd_attr(eth_ip, ip-addr, ISCSI_BOOT_ETH_IP_ADDR);
+iscsi_boot_rd_attr(eth_subnet, subnet-mask, ISCSI_BOOT_ETH_SUBNET_MASK);
+iscsi_boot_rd_attr(eth_origin, origin, ISCSI_BOOT_ETH_ORIGIN);
+iscsi_boot_rd_attr(eth_gateway, gateway, ISCSI_BOOT_ETH_GATEWAY);
+iscsi_boot_rd_attr(eth_primary_dns, primary-dns, ISCSI_BOOT_ETH_PRIMARY_DNS);
+iscsi_boot_rd_attr(eth_secondary_dns, secondary-dns,
+ ISCSI_BOOT_ETH_SECONDARY_DNS);
+iscsi_boot_rd_attr(eth_dhcp, dhcp, ISCSI_BOOT_ETH_DHCP);
+iscsi_boot_rd_attr(eth_vlan, vlan, ISCSI_BOOT_ETH_VLAN);
+iscsi_boot_rd_attr(eth_mac, mac, ISCSI_BOOT_ETH_MAC);
+iscsi_boot_rd_attr(eth_hostname, hostname, ISCSI_BOOT_ETH_HOSTNAME);
+
+static struct attribute *ethernet_attrs[] = {
+ &iscsi_boot_attr_eth_index.attr,
+ &iscsi_boot_attr_eth_flags.attr,
+ &iscsi_boot_attr_eth_ip.attr,
+ &iscsi_boot_attr_eth_subnet.attr,
+ &iscsi_boot_attr_eth_origin.attr,
+ &iscsi_boot_attr_eth_gateway.attr,
+ &iscsi_boot_attr_eth_primary_dns.attr,
+ &iscsi_boot_attr_eth_secondary_dns.attr,
+ &iscsi_boot_attr_eth_dhcp.attr,
+ &iscsi_boot_attr_eth_vlan.attr,
+ &iscsi_boot_attr_eth_mac.attr,
+ &iscsi_boot_attr_eth_hostname.attr,
+ NULL
+};
+
+static umode_t iscsi_boot_eth_attr_is_visible(struct kobject *kobj,
+ struct attribute *attr, int i)
+{
+ struct iscsi_boot_kobj *boot_kobj =
+ container_of(kobj, struct iscsi_boot_kobj, kobj);
+
+ if (attr == &iscsi_boot_attr_eth_index.attr)
+ return boot_kobj->is_visible(boot_kobj->data,
+ ISCSI_BOOT_ETH_INDEX);
+ else if (attr == &iscsi_boot_attr_eth_flags.attr)
+ return boot_kobj->is_visible(boot_kobj->data,
+ ISCSI_BOOT_ETH_FLAGS);
+ else if (attr == &iscsi_boot_attr_eth_ip.attr)
+ return boot_kobj->is_visible(boot_kobj->data,
+ ISCSI_BOOT_ETH_IP_ADDR);
+ else if (attr == &iscsi_boot_attr_eth_subnet.attr)
+ return boot_kobj->is_visible(boot_kobj->data,
+ ISCSI_BOOT_ETH_SUBNET_MASK);
+ else if (attr == &iscsi_boot_attr_eth_origin.attr)
+ return boot_kobj->is_visible(boot_kobj->data,
+ ISCSI_BOOT_ETH_ORIGIN);
+ else if (attr == &iscsi_boot_attr_eth_gateway.attr)
+ return boot_kobj->is_visible(boot_kobj->data,
+ ISCSI_BOOT_ETH_GATEWAY);
+ else if (attr == &iscsi_boot_attr_eth_primary_dns.attr)
+ return boot_kobj->is_visible(boot_kobj->data,
+ ISCSI_BOOT_ETH_PRIMARY_DNS);
+ else if (attr == &iscsi_boot_attr_eth_secondary_dns.attr)
+ return boot_kobj->is_visible(boot_kobj->data,
+ ISCSI_BOOT_ETH_SECONDARY_DNS);
+ else if (attr == &iscsi_boot_attr_eth_dhcp.attr)
+ return boot_kobj->is_visible(boot_kobj->data,
+ ISCSI_BOOT_ETH_DHCP);
+ else if (attr == &iscsi_boot_attr_eth_vlan.attr)
+ return boot_kobj->is_visible(boot_kobj->data,
+ ISCSI_BOOT_ETH_VLAN);
+ else if (attr == &iscsi_boot_attr_eth_mac.attr)
+ return boot_kobj->is_visible(boot_kobj->data,
+ ISCSI_BOOT_ETH_MAC);
+ else if (attr == &iscsi_boot_attr_eth_hostname.attr)
+ return boot_kobj->is_visible(boot_kobj->data,
+ ISCSI_BOOT_ETH_HOSTNAME);
+ return 0;
+}
+
+static struct attribute_group iscsi_boot_ethernet_attr_group = {
+ .attrs = ethernet_attrs,
+ .is_visible = iscsi_boot_eth_attr_is_visible,
+};
+
+/* Initiator attrs */
+iscsi_boot_rd_attr(ini_index, index, ISCSI_BOOT_INI_INDEX);
+iscsi_boot_rd_attr(ini_flags, flags, ISCSI_BOOT_INI_FLAGS);
+iscsi_boot_rd_attr(ini_isns, isns-server, ISCSI_BOOT_INI_ISNS_SERVER);
+iscsi_boot_rd_attr(ini_slp, slp-server, ISCSI_BOOT_INI_SLP_SERVER);
+iscsi_boot_rd_attr(ini_primary_radius, pri-radius-server,
+ ISCSI_BOOT_INI_PRI_RADIUS_SERVER);
+iscsi_boot_rd_attr(ini_secondary_radius, sec-radius-server,
+ ISCSI_BOOT_INI_SEC_RADIUS_SERVER);
+iscsi_boot_rd_attr(ini_name, initiator-name, ISCSI_BOOT_INI_INITIATOR_NAME);
+
+static struct attribute *initiator_attrs[] = {
+ &iscsi_boot_attr_ini_index.attr,
+ &iscsi_boot_attr_ini_flags.attr,
+ &iscsi_boot_attr_ini_isns.attr,
+ &iscsi_boot_attr_ini_slp.attr,
+ &iscsi_boot_attr_ini_primary_radius.attr,
+ &iscsi_boot_attr_ini_secondary_radius.attr,
+ &iscsi_boot_attr_ini_name.attr,
+ NULL
+};
+
+static umode_t iscsi_boot_ini_attr_is_visible(struct kobject *kobj,
+ struct attribute *attr, int i)
+{
+ struct iscsi_boot_kobj *boot_kobj =
+ container_of(kobj, struct iscsi_boot_kobj, kobj);
+
+ if (attr == &iscsi_boot_attr_ini_index.attr)
+ return boot_kobj->is_visible(boot_kobj->data,
+ ISCSI_BOOT_INI_INDEX);
+ if (attr == &iscsi_boot_attr_ini_flags.attr)
+ return boot_kobj->is_visible(boot_kobj->data,
+ ISCSI_BOOT_INI_FLAGS);
+ if (attr == &iscsi_boot_attr_ini_isns.attr)
+ return boot_kobj->is_visible(boot_kobj->data,
+ ISCSI_BOOT_INI_ISNS_SERVER);
+ if (attr == &iscsi_boot_attr_ini_slp.attr)
+ return boot_kobj->is_visible(boot_kobj->data,
+ ISCSI_BOOT_INI_SLP_SERVER);
+ if (attr == &iscsi_boot_attr_ini_primary_radius.attr)
+ return boot_kobj->is_visible(boot_kobj->data,
+ ISCSI_BOOT_INI_PRI_RADIUS_SERVER);
+ if (attr == &iscsi_boot_attr_ini_secondary_radius.attr)
+ return boot_kobj->is_visible(boot_kobj->data,
+ ISCSI_BOOT_INI_SEC_RADIUS_SERVER);
+ if (attr == &iscsi_boot_attr_ini_name.attr)
+ return boot_kobj->is_visible(boot_kobj->data,
+ ISCSI_BOOT_INI_INITIATOR_NAME);
+
+ return 0;
+}
+
+static struct attribute_group iscsi_boot_initiator_attr_group = {
+ .attrs = initiator_attrs,
+ .is_visible = iscsi_boot_ini_attr_is_visible,
+};
+
+static struct iscsi_boot_kobj *
+iscsi_boot_create_kobj(struct iscsi_boot_kset *boot_kset,
+ struct attribute_group *attr_group,
+ const char *name, int index, void *data,
+ ssize_t (*show) (void *data, int type, char *buf),
+ umode_t (*is_visible) (void *data, int type),
+ void (*release) (void *data))
+{
+ struct iscsi_boot_kobj *boot_kobj;
+
+ boot_kobj = kzalloc(sizeof(*boot_kobj), GFP_KERNEL);
+ if (!boot_kobj)
+ return NULL;
+ INIT_LIST_HEAD(&boot_kobj->list);
+
+ boot_kobj->kobj.kset = boot_kset->kset;
+ if (kobject_init_and_add(&boot_kobj->kobj, &iscsi_boot_ktype,
+ NULL, name, index)) {
+ kfree(boot_kobj);
+ return NULL;
+ }
+ boot_kobj->data = data;
+ boot_kobj->show = show;
+ boot_kobj->is_visible = is_visible;
+ boot_kobj->release = release;
+
+ if (sysfs_create_group(&boot_kobj->kobj, attr_group)) {
+ /*
+ * We do not want to free this because the caller
+ * will assume that since the creation call failed
+ * the boot kobj was not setup and the normal release
+ * path is not being run.
+ */
+ boot_kobj->release = NULL;
+ kobject_put(&boot_kobj->kobj);
+ return NULL;
+ }
+ boot_kobj->attr_group = attr_group;
+
+ kobject_uevent(&boot_kobj->kobj, KOBJ_ADD);
+ /* Nothing broke so lets add it to the list. */
+ list_add_tail(&boot_kobj->list, &boot_kset->kobj_list);
+ return boot_kobj;
+}
+
+static void iscsi_boot_remove_kobj(struct iscsi_boot_kobj *boot_kobj)
+{
+ list_del(&boot_kobj->list);
+ sysfs_remove_group(&boot_kobj->kobj, boot_kobj->attr_group);
+ kobject_put(&boot_kobj->kobj);
+}
+
+/**
+ * iscsi_boot_create_target() - create boot target sysfs dir
+ * @boot_kset: boot kset
+ * @index: the target id
+ * @data: driver specific data for target
+ * @show: attr show function
+ * @is_visible: attr visibility function
+ * @release: release function
+ *
+ * Note: The boot sysfs lib will free the data passed in for the caller
+ * when all refs to the target kobject have been released.
+ */
+struct iscsi_boot_kobj *
+iscsi_boot_create_target(struct iscsi_boot_kset *boot_kset, int index,
+ void *data,
+ ssize_t (*show) (void *data, int type, char *buf),
+ umode_t (*is_visible) (void *data, int type),
+ void (*release) (void *data))
+{
+ return iscsi_boot_create_kobj(boot_kset, &iscsi_boot_target_attr_group,
+ "target%d", index, data, show, is_visible,
+ release);
+}
+EXPORT_SYMBOL_GPL(iscsi_boot_create_target);
+
+/**
+ * iscsi_boot_create_initiator() - create boot initiator sysfs dir
+ * @boot_kset: boot kset
+ * @index: the initiator id
+ * @data: driver specific data
+ * @show: attr show function
+ * @is_visible: attr visibility function
+ * @release: release function
+ *
+ * Note: The boot sysfs lib will free the data passed in for the caller
+ * when all refs to the initiator kobject have been released.
+ */
+struct iscsi_boot_kobj *
+iscsi_boot_create_initiator(struct iscsi_boot_kset *boot_kset, int index,
+ void *data,
+ ssize_t (*show) (void *data, int type, char *buf),
+ umode_t (*is_visible) (void *data, int type),
+ void (*release) (void *data))
+{
+ return iscsi_boot_create_kobj(boot_kset,
+ &iscsi_boot_initiator_attr_group,
+ "initiator", index, data, show,
+ is_visible, release);
+}
+EXPORT_SYMBOL_GPL(iscsi_boot_create_initiator);
+
+/**
+ * iscsi_boot_create_ethernet() - create boot ethernet sysfs dir
+ * @boot_kset: boot kset
+ * @index: the ethernet device id
+ * @data: driver specific data
+ * @show: attr show function
+ * @is_visible: attr visibility function
+ * @release: release function
+ *
+ * Note: The boot sysfs lib will free the data passed in for the caller
+ * when all refs to the ethernet kobject have been released.
+ */
+struct iscsi_boot_kobj *
+iscsi_boot_create_ethernet(struct iscsi_boot_kset *boot_kset, int index,
+ void *data,
+ ssize_t (*show) (void *data, int type, char *buf),
+ umode_t (*is_visible) (void *data, int type),
+ void (*release) (void *data))
+{
+ return iscsi_boot_create_kobj(boot_kset,
+ &iscsi_boot_ethernet_attr_group,
+ "ethernet%d", index, data, show,
+ is_visible, release);
+}
+EXPORT_SYMBOL_GPL(iscsi_boot_create_ethernet);
+
+/**
+ * iscsi_boot_create_kset() - creates root sysfs tree
+ * @set_name: name of root dir
+ */
+struct iscsi_boot_kset *iscsi_boot_create_kset(const char *set_name)
+{
+ struct iscsi_boot_kset *boot_kset;
+
+ boot_kset = kzalloc(sizeof(*boot_kset), GFP_KERNEL);
+ if (!boot_kset)
+ return NULL;
+
+ boot_kset->kset = kset_create_and_add(set_name, NULL, firmware_kobj);
+ if (!boot_kset->kset) {
+ kfree(boot_kset);
+ return NULL;
+ }
+
+ INIT_LIST_HEAD(&boot_kset->kobj_list);
+ return boot_kset;
+}
+EXPORT_SYMBOL_GPL(iscsi_boot_create_kset);
+
+/**
+ * iscsi_boot_create_host_kset() - creates root sysfs tree for a scsi host
+ * @hostno: host number of scsi host
+ */
+struct iscsi_boot_kset *iscsi_boot_create_host_kset(unsigned int hostno)
+{
+ struct iscsi_boot_kset *boot_kset;
+ char *set_name;
+
+ set_name = kasprintf(GFP_KERNEL, "iscsi_boot%u", hostno);
+ if (!set_name)
+ return NULL;
+
+ boot_kset = iscsi_boot_create_kset(set_name);
+ kfree(set_name);
+ return boot_kset;
+}
+EXPORT_SYMBOL_GPL(iscsi_boot_create_host_kset);
+
+/**
+ * iscsi_boot_destroy_kset() - destroy kset and kobjects under it
+ * @boot_kset: boot kset
+ *
+ * This will remove the kset and kobjects and attrs under it.
+ */
+void iscsi_boot_destroy_kset(struct iscsi_boot_kset *boot_kset)
+{
+ struct iscsi_boot_kobj *boot_kobj, *tmp_kobj;
+
+ if (!boot_kset)
+ return;
+
+ list_for_each_entry_safe(boot_kobj, tmp_kobj,
+ &boot_kset->kobj_list, list)
+ iscsi_boot_remove_kobj(boot_kobj);
+
+ kset_unregister(boot_kset->kset);
+ kfree(boot_kset);
+}
+EXPORT_SYMBOL_GPL(iscsi_boot_destroy_kset);
diff --git a/drivers/scsi/iscsi_tcp.c b/drivers/scsi/iscsi_tcp.c
new file mode 100644
index 000000000..0b8af186e
--- /dev/null
+++ b/drivers/scsi/iscsi_tcp.c
@@ -0,0 +1,1030 @@
+/*
+ * iSCSI Initiator over TCP/IP Data-Path
+ *
+ * Copyright (C) 2004 Dmitry Yusupov
+ * Copyright (C) 2004 Alex Aizman
+ * Copyright (C) 2005 - 2006 Mike Christie
+ * Copyright (C) 2006 Red Hat, Inc. All rights reserved.
+ * maintained by open-iscsi@googlegroups.com
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published
+ * by the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * See the file COPYING included with this distribution for more details.
+ *
+ * Credits:
+ * Christoph Hellwig
+ * FUJITA Tomonori
+ * Arne Redlich
+ * Zhenyu Wang
+ */
+
+#include <linux/types.h>
+#include <linux/inet.h>
+#include <linux/slab.h>
+#include <linux/file.h>
+#include <linux/blkdev.h>
+#include <linux/crypto.h>
+#include <linux/delay.h>
+#include <linux/kfifo.h>
+#include <linux/scatterlist.h>
+#include <linux/module.h>
+#include <net/tcp.h>
+#include <scsi/scsi_cmnd.h>
+#include <scsi/scsi_device.h>
+#include <scsi/scsi_host.h>
+#include <scsi/scsi.h>
+#include <scsi/scsi_transport_iscsi.h>
+
+#include "iscsi_tcp.h"
+
+MODULE_AUTHOR("Mike Christie <michaelc@cs.wisc.edu>, "
+ "Dmitry Yusupov <dmitry_yus@yahoo.com>, "
+ "Alex Aizman <itn780@yahoo.com>");
+MODULE_DESCRIPTION("iSCSI/TCP data-path");
+MODULE_LICENSE("GPL");
+
+static struct scsi_transport_template *iscsi_sw_tcp_scsi_transport;
+static struct scsi_host_template iscsi_sw_tcp_sht;
+static struct iscsi_transport iscsi_sw_tcp_transport;
+
+static unsigned int iscsi_max_lun = ~0;
+module_param_named(max_lun, iscsi_max_lun, uint, S_IRUGO);
+
+static int iscsi_sw_tcp_dbg;
+module_param_named(debug_iscsi_tcp, iscsi_sw_tcp_dbg, int,
+ S_IRUGO | S_IWUSR);
+MODULE_PARM_DESC(debug_iscsi_tcp, "Turn on debugging for iscsi_tcp module "
+ "Set to 1 to turn on, and zero to turn off. Default is off.");
+
+#define ISCSI_SW_TCP_DBG(_conn, dbg_fmt, arg...) \
+ do { \
+ if (iscsi_sw_tcp_dbg) \
+ iscsi_conn_printk(KERN_INFO, _conn, \
+ "%s " dbg_fmt, \
+ __func__, ##arg); \
+ } while (0);
+
+
+/**
+ * iscsi_sw_tcp_recv - TCP receive in sendfile fashion
+ * @rd_desc: read descriptor
+ * @skb: socket buffer
+ * @offset: offset in skb
+ * @len: skb->len - offset
+ */
+static int iscsi_sw_tcp_recv(read_descriptor_t *rd_desc, struct sk_buff *skb,
+ unsigned int offset, size_t len)
+{
+ struct iscsi_conn *conn = rd_desc->arg.data;
+ unsigned int consumed, total_consumed = 0;
+ int status;
+
+ ISCSI_SW_TCP_DBG(conn, "in %d bytes\n", skb->len - offset);
+
+ do {
+ status = 0;
+ consumed = iscsi_tcp_recv_skb(conn, skb, offset, 0, &status);
+ offset += consumed;
+ total_consumed += consumed;
+ } while (consumed != 0 && status != ISCSI_TCP_SKB_DONE);
+
+ ISCSI_SW_TCP_DBG(conn, "read %d bytes status %d\n",
+ skb->len - offset, status);
+ return total_consumed;
+}
+
+/**
+ * iscsi_sw_sk_state_check - check socket state
+ * @sk: socket
+ *
+ * If the socket is in CLOSE or CLOSE_WAIT we should
+ * not close the connection if there is still some
+ * data pending.
+ *
+ * Must be called with sk_callback_lock.
+ */
+static inline int iscsi_sw_sk_state_check(struct sock *sk)
+{
+ struct iscsi_conn *conn = sk->sk_user_data;
+
+ if ((sk->sk_state == TCP_CLOSE_WAIT || sk->sk_state == TCP_CLOSE) &&
+ (conn->session->state != ISCSI_STATE_LOGGING_OUT) &&
+ !atomic_read(&sk->sk_rmem_alloc)) {
+ ISCSI_SW_TCP_DBG(conn, "TCP_CLOSE|TCP_CLOSE_WAIT\n");
+ iscsi_conn_failure(conn, ISCSI_ERR_TCP_CONN_CLOSE);
+ return -ECONNRESET;
+ }
+ return 0;
+}
+
+static void iscsi_sw_tcp_data_ready(struct sock *sk)
+{
+ struct iscsi_conn *conn;
+ struct iscsi_tcp_conn *tcp_conn;
+ read_descriptor_t rd_desc;
+
+ read_lock(&sk->sk_callback_lock);
+ conn = sk->sk_user_data;
+ if (!conn) {
+ read_unlock(&sk->sk_callback_lock);
+ return;
+ }
+ tcp_conn = conn->dd_data;
+
+ /*
+ * Use rd_desc to pass 'conn' to iscsi_tcp_recv.
+ * We set count to 1 because we want the network layer to
+ * hand us all the skbs that are available. iscsi_tcp_recv
+ * handled pdus that cross buffers or pdus that still need data.
+ */
+ rd_desc.arg.data = conn;
+ rd_desc.count = 1;
+ tcp_read_sock(sk, &rd_desc, iscsi_sw_tcp_recv);
+
+ iscsi_sw_sk_state_check(sk);
+
+ /* If we had to (atomically) map a highmem page,
+ * unmap it now. */
+ iscsi_tcp_segment_unmap(&tcp_conn->in.segment);
+ read_unlock(&sk->sk_callback_lock);
+}
+
+static void iscsi_sw_tcp_state_change(struct sock *sk)
+{
+ struct iscsi_tcp_conn *tcp_conn;
+ struct iscsi_sw_tcp_conn *tcp_sw_conn;
+ struct iscsi_conn *conn;
+ struct iscsi_session *session;
+ void (*old_state_change)(struct sock *);
+
+ read_lock(&sk->sk_callback_lock);
+ conn = sk->sk_user_data;
+ if (!conn) {
+ read_unlock(&sk->sk_callback_lock);
+ return;
+ }
+ session = conn->session;
+
+ iscsi_sw_sk_state_check(sk);
+
+ tcp_conn = conn->dd_data;
+ tcp_sw_conn = tcp_conn->dd_data;
+ old_state_change = tcp_sw_conn->old_state_change;
+
+ read_unlock(&sk->sk_callback_lock);
+
+ old_state_change(sk);
+}
+
+/**
+ * iscsi_write_space - Called when more output buffer space is available
+ * @sk: socket space is available for
+ **/
+static void iscsi_sw_tcp_write_space(struct sock *sk)
+{
+ struct iscsi_conn *conn;
+ struct iscsi_tcp_conn *tcp_conn;
+ struct iscsi_sw_tcp_conn *tcp_sw_conn;
+ void (*old_write_space)(struct sock *);
+
+ read_lock_bh(&sk->sk_callback_lock);
+ conn = sk->sk_user_data;
+ if (!conn) {
+ read_unlock_bh(&sk->sk_callback_lock);
+ return;
+ }
+
+ tcp_conn = conn->dd_data;
+ tcp_sw_conn = tcp_conn->dd_data;
+ old_write_space = tcp_sw_conn->old_write_space;
+ read_unlock_bh(&sk->sk_callback_lock);
+
+ old_write_space(sk);
+
+ ISCSI_SW_TCP_DBG(conn, "iscsi_write_space\n");
+ iscsi_conn_queue_work(conn);
+}
+
+static void iscsi_sw_tcp_conn_set_callbacks(struct iscsi_conn *conn)
+{
+ struct iscsi_tcp_conn *tcp_conn = conn->dd_data;
+ struct iscsi_sw_tcp_conn *tcp_sw_conn = tcp_conn->dd_data;
+ struct sock *sk = tcp_sw_conn->sock->sk;
+
+ /* assign new callbacks */
+ write_lock_bh(&sk->sk_callback_lock);
+ sk->sk_user_data = conn;
+ tcp_sw_conn->old_data_ready = sk->sk_data_ready;
+ tcp_sw_conn->old_state_change = sk->sk_state_change;
+ tcp_sw_conn->old_write_space = sk->sk_write_space;
+ sk->sk_data_ready = iscsi_sw_tcp_data_ready;
+ sk->sk_state_change = iscsi_sw_tcp_state_change;
+ sk->sk_write_space = iscsi_sw_tcp_write_space;
+ write_unlock_bh(&sk->sk_callback_lock);
+}
+
+static void
+iscsi_sw_tcp_conn_restore_callbacks(struct iscsi_conn *conn)
+{
+ struct iscsi_tcp_conn *tcp_conn = conn->dd_data;
+ struct iscsi_sw_tcp_conn *tcp_sw_conn = tcp_conn->dd_data;
+ struct sock *sk = tcp_sw_conn->sock->sk;
+
+ /* restore socket callbacks, see also: iscsi_conn_set_callbacks() */
+ write_lock_bh(&sk->sk_callback_lock);
+ sk->sk_user_data = NULL;
+ sk->sk_data_ready = tcp_sw_conn->old_data_ready;
+ sk->sk_state_change = tcp_sw_conn->old_state_change;
+ sk->sk_write_space = tcp_sw_conn->old_write_space;
+ sk->sk_no_check_tx = 0;
+ write_unlock_bh(&sk->sk_callback_lock);
+}
+
+/**
+ * iscsi_sw_tcp_xmit_segment - transmit segment
+ * @tcp_conn: the iSCSI TCP connection
+ * @segment: the buffer to transmnit
+ *
+ * This function transmits as much of the buffer as
+ * the network layer will accept, and returns the number of
+ * bytes transmitted.
+ *
+ * If CRC hashing is enabled, the function will compute the
+ * hash as it goes. When the entire segment has been transmitted,
+ * it will retrieve the hash value and send it as well.
+ */
+static int iscsi_sw_tcp_xmit_segment(struct iscsi_tcp_conn *tcp_conn,
+ struct iscsi_segment *segment)
+{
+ struct iscsi_sw_tcp_conn *tcp_sw_conn = tcp_conn->dd_data;
+ struct socket *sk = tcp_sw_conn->sock;
+ unsigned int copied = 0;
+ int r = 0;
+
+ while (!iscsi_tcp_segment_done(tcp_conn, segment, 0, r)) {
+ struct scatterlist *sg;
+ unsigned int offset, copy;
+ int flags = 0;
+
+ r = 0;
+ offset = segment->copied;
+ copy = segment->size - offset;
+
+ if (segment->total_copied + segment->size < segment->total_size)
+ flags |= MSG_MORE;
+
+ /* Use sendpage if we can; else fall back to sendmsg */
+ if (!segment->data) {
+ sg = segment->sg;
+ offset += segment->sg_offset + sg->offset;
+ r = tcp_sw_conn->sendpage(sk, sg_page(sg), offset,
+ copy, flags);
+ } else {
+ struct msghdr msg = { .msg_flags = flags };
+ struct kvec iov = {
+ .iov_base = segment->data + offset,
+ .iov_len = copy
+ };
+
+ r = kernel_sendmsg(sk, &msg, &iov, 1, copy);
+ }
+
+ if (r < 0) {
+ iscsi_tcp_segment_unmap(segment);
+ return r;
+ }
+ copied += r;
+ }
+ return copied;
+}
+
+/**
+ * iscsi_sw_tcp_xmit - TCP transmit
+ **/
+static int iscsi_sw_tcp_xmit(struct iscsi_conn *conn)
+{
+ struct iscsi_tcp_conn *tcp_conn = conn->dd_data;
+ struct iscsi_sw_tcp_conn *tcp_sw_conn = tcp_conn->dd_data;
+ struct iscsi_segment *segment = &tcp_sw_conn->out.segment;
+ unsigned int consumed = 0;
+ int rc = 0;
+
+ while (1) {
+ rc = iscsi_sw_tcp_xmit_segment(tcp_conn, segment);
+ /*
+ * We may not have been able to send data because the conn
+ * is getting stopped. libiscsi will know so propagate err
+ * for it to do the right thing.
+ */
+ if (rc == -EAGAIN)
+ return rc;
+ else if (rc < 0) {
+ rc = ISCSI_ERR_XMIT_FAILED;
+ goto error;
+ } else if (rc == 0)
+ break;
+
+ consumed += rc;
+
+ if (segment->total_copied >= segment->total_size) {
+ if (segment->done != NULL) {
+ rc = segment->done(tcp_conn, segment);
+ if (rc != 0)
+ goto error;
+ }
+ }
+ }
+
+ ISCSI_SW_TCP_DBG(conn, "xmit %d bytes\n", consumed);
+
+ conn->txdata_octets += consumed;
+ return consumed;
+
+error:
+ /* Transmit error. We could initiate error recovery
+ * here. */
+ ISCSI_SW_TCP_DBG(conn, "Error sending PDU, errno=%d\n", rc);
+ iscsi_conn_failure(conn, rc);
+ return -EIO;
+}
+
+/**
+ * iscsi_tcp_xmit_qlen - return the number of bytes queued for xmit
+ */
+static inline int iscsi_sw_tcp_xmit_qlen(struct iscsi_conn *conn)
+{
+ struct iscsi_tcp_conn *tcp_conn = conn->dd_data;
+ struct iscsi_sw_tcp_conn *tcp_sw_conn = tcp_conn->dd_data;
+ struct iscsi_segment *segment = &tcp_sw_conn->out.segment;
+
+ return segment->total_copied - segment->total_size;
+}
+
+static int iscsi_sw_tcp_pdu_xmit(struct iscsi_task *task)
+{
+ struct iscsi_conn *conn = task->conn;
+ unsigned long pflags = current->flags;
+ int rc = 0;
+
+ current->flags |= PF_MEMALLOC;
+
+ while (iscsi_sw_tcp_xmit_qlen(conn)) {
+ rc = iscsi_sw_tcp_xmit(conn);
+ if (rc == 0) {
+ rc = -EAGAIN;
+ break;
+ }
+ if (rc < 0)
+ break;
+ rc = 0;
+ }
+
+ tsk_restore_flags(current, pflags, PF_MEMALLOC);
+ return rc;
+}
+
+/*
+ * This is called when we're done sending the header.
+ * Simply copy the data_segment to the send segment, and return.
+ */
+static int iscsi_sw_tcp_send_hdr_done(struct iscsi_tcp_conn *tcp_conn,
+ struct iscsi_segment *segment)
+{
+ struct iscsi_sw_tcp_conn *tcp_sw_conn = tcp_conn->dd_data;
+
+ tcp_sw_conn->out.segment = tcp_sw_conn->out.data_segment;
+ ISCSI_SW_TCP_DBG(tcp_conn->iscsi_conn,
+ "Header done. Next segment size %u total_size %u\n",
+ tcp_sw_conn->out.segment.size,
+ tcp_sw_conn->out.segment.total_size);
+ return 0;
+}
+
+static void iscsi_sw_tcp_send_hdr_prep(struct iscsi_conn *conn, void *hdr,
+ size_t hdrlen)
+{
+ struct iscsi_tcp_conn *tcp_conn = conn->dd_data;
+ struct iscsi_sw_tcp_conn *tcp_sw_conn = tcp_conn->dd_data;
+
+ ISCSI_SW_TCP_DBG(conn, "%s\n", conn->hdrdgst_en ?
+ "digest enabled" : "digest disabled");
+
+ /* Clear the data segment - needs to be filled in by the
+ * caller using iscsi_tcp_send_data_prep() */
+ memset(&tcp_sw_conn->out.data_segment, 0,
+ sizeof(struct iscsi_segment));
+
+ /* If header digest is enabled, compute the CRC and
+ * place the digest into the same buffer. We make
+ * sure that both iscsi_tcp_task and mtask have
+ * sufficient room.
+ */
+ if (conn->hdrdgst_en) {
+ iscsi_tcp_dgst_header(&tcp_sw_conn->tx_hash, hdr, hdrlen,
+ hdr + hdrlen);
+ hdrlen += ISCSI_DIGEST_SIZE;
+ }
+
+ /* Remember header pointer for later, when we need
+ * to decide whether there's a payload to go along
+ * with the header. */
+ tcp_sw_conn->out.hdr = hdr;
+
+ iscsi_segment_init_linear(&tcp_sw_conn->out.segment, hdr, hdrlen,
+ iscsi_sw_tcp_send_hdr_done, NULL);
+}
+
+/*
+ * Prepare the send buffer for the payload data.
+ * Padding and checksumming will all be taken care
+ * of by the iscsi_segment routines.
+ */
+static int
+iscsi_sw_tcp_send_data_prep(struct iscsi_conn *conn, struct scatterlist *sg,
+ unsigned int count, unsigned int offset,
+ unsigned int len)
+{
+ struct iscsi_tcp_conn *tcp_conn = conn->dd_data;
+ struct iscsi_sw_tcp_conn *tcp_sw_conn = tcp_conn->dd_data;
+ struct hash_desc *tx_hash = NULL;
+ unsigned int hdr_spec_len;
+
+ ISCSI_SW_TCP_DBG(conn, "offset=%d, datalen=%d %s\n", offset, len,
+ conn->datadgst_en ?
+ "digest enabled" : "digest disabled");
+
+ /* Make sure the datalen matches what the caller
+ said he would send. */
+ hdr_spec_len = ntoh24(tcp_sw_conn->out.hdr->dlength);
+ WARN_ON(iscsi_padded(len) != iscsi_padded(hdr_spec_len));
+
+ if (conn->datadgst_en)
+ tx_hash = &tcp_sw_conn->tx_hash;
+
+ return iscsi_segment_seek_sg(&tcp_sw_conn->out.data_segment,
+ sg, count, offset, len,
+ NULL, tx_hash);
+}
+
+static void
+iscsi_sw_tcp_send_linear_data_prep(struct iscsi_conn *conn, void *data,
+ size_t len)
+{
+ struct iscsi_tcp_conn *tcp_conn = conn->dd_data;
+ struct iscsi_sw_tcp_conn *tcp_sw_conn = tcp_conn->dd_data;
+ struct hash_desc *tx_hash = NULL;
+ unsigned int hdr_spec_len;
+
+ ISCSI_SW_TCP_DBG(conn, "datalen=%zd %s\n", len, conn->datadgst_en ?
+ "digest enabled" : "digest disabled");
+
+ /* Make sure the datalen matches what the caller
+ said he would send. */
+ hdr_spec_len = ntoh24(tcp_sw_conn->out.hdr->dlength);
+ WARN_ON(iscsi_padded(len) != iscsi_padded(hdr_spec_len));
+
+ if (conn->datadgst_en)
+ tx_hash = &tcp_sw_conn->tx_hash;
+
+ iscsi_segment_init_linear(&tcp_sw_conn->out.data_segment,
+ data, len, NULL, tx_hash);
+}
+
+static int iscsi_sw_tcp_pdu_init(struct iscsi_task *task,
+ unsigned int offset, unsigned int count)
+{
+ struct iscsi_conn *conn = task->conn;
+ int err = 0;
+
+ iscsi_sw_tcp_send_hdr_prep(conn, task->hdr, task->hdr_len);
+
+ if (!count)
+ return 0;
+
+ if (!task->sc)
+ iscsi_sw_tcp_send_linear_data_prep(conn, task->data, count);
+ else {
+ struct scsi_data_buffer *sdb = scsi_out(task->sc);
+
+ err = iscsi_sw_tcp_send_data_prep(conn, sdb->table.sgl,
+ sdb->table.nents, offset,
+ count);
+ }
+
+ if (err) {
+ /* got invalid offset/len */
+ return -EIO;
+ }
+ return 0;
+}
+
+static int iscsi_sw_tcp_pdu_alloc(struct iscsi_task *task, uint8_t opcode)
+{
+ struct iscsi_tcp_task *tcp_task = task->dd_data;
+
+ task->hdr = task->dd_data + sizeof(*tcp_task);
+ task->hdr_max = sizeof(struct iscsi_sw_tcp_hdrbuf) - ISCSI_DIGEST_SIZE;
+ return 0;
+}
+
+static struct iscsi_cls_conn *
+iscsi_sw_tcp_conn_create(struct iscsi_cls_session *cls_session,
+ uint32_t conn_idx)
+{
+ struct iscsi_conn *conn;
+ struct iscsi_cls_conn *cls_conn;
+ struct iscsi_tcp_conn *tcp_conn;
+ struct iscsi_sw_tcp_conn *tcp_sw_conn;
+
+ cls_conn = iscsi_tcp_conn_setup(cls_session, sizeof(*tcp_sw_conn),
+ conn_idx);
+ if (!cls_conn)
+ return NULL;
+ conn = cls_conn->dd_data;
+ tcp_conn = conn->dd_data;
+ tcp_sw_conn = tcp_conn->dd_data;
+
+ tcp_sw_conn->tx_hash.tfm = crypto_alloc_hash("crc32c", 0,
+ CRYPTO_ALG_ASYNC);
+ tcp_sw_conn->tx_hash.flags = 0;
+ if (IS_ERR(tcp_sw_conn->tx_hash.tfm))
+ goto free_conn;
+
+ tcp_sw_conn->rx_hash.tfm = crypto_alloc_hash("crc32c", 0,
+ CRYPTO_ALG_ASYNC);
+ tcp_sw_conn->rx_hash.flags = 0;
+ if (IS_ERR(tcp_sw_conn->rx_hash.tfm))
+ goto free_tx_tfm;
+ tcp_conn->rx_hash = &tcp_sw_conn->rx_hash;
+
+ return cls_conn;
+
+free_tx_tfm:
+ crypto_free_hash(tcp_sw_conn->tx_hash.tfm);
+free_conn:
+ iscsi_conn_printk(KERN_ERR, conn,
+ "Could not create connection due to crc32c "
+ "loading error. Make sure the crc32c "
+ "module is built as a module or into the "
+ "kernel\n");
+ iscsi_tcp_conn_teardown(cls_conn);
+ return NULL;
+}
+
+static void iscsi_sw_tcp_release_conn(struct iscsi_conn *conn)
+{
+ struct iscsi_session *session = conn->session;
+ struct iscsi_tcp_conn *tcp_conn = conn->dd_data;
+ struct iscsi_sw_tcp_conn *tcp_sw_conn = tcp_conn->dd_data;
+ struct socket *sock = tcp_sw_conn->sock;
+
+ if (!sock)
+ return;
+
+ sock_hold(sock->sk);
+ iscsi_sw_tcp_conn_restore_callbacks(conn);
+ sock_put(sock->sk);
+
+ spin_lock_bh(&session->frwd_lock);
+ tcp_sw_conn->sock = NULL;
+ spin_unlock_bh(&session->frwd_lock);
+ sockfd_put(sock);
+}
+
+static void iscsi_sw_tcp_conn_destroy(struct iscsi_cls_conn *cls_conn)
+{
+ struct iscsi_conn *conn = cls_conn->dd_data;
+ struct iscsi_tcp_conn *tcp_conn = conn->dd_data;
+ struct iscsi_sw_tcp_conn *tcp_sw_conn = tcp_conn->dd_data;
+
+ iscsi_sw_tcp_release_conn(conn);
+
+ if (tcp_sw_conn->tx_hash.tfm)
+ crypto_free_hash(tcp_sw_conn->tx_hash.tfm);
+ if (tcp_sw_conn->rx_hash.tfm)
+ crypto_free_hash(tcp_sw_conn->rx_hash.tfm);
+
+ iscsi_tcp_conn_teardown(cls_conn);
+}
+
+static void iscsi_sw_tcp_conn_stop(struct iscsi_cls_conn *cls_conn, int flag)
+{
+ struct iscsi_conn *conn = cls_conn->dd_data;
+ struct iscsi_tcp_conn *tcp_conn = conn->dd_data;
+ struct iscsi_sw_tcp_conn *tcp_sw_conn = tcp_conn->dd_data;
+ struct socket *sock = tcp_sw_conn->sock;
+
+ /* userspace may have goofed up and not bound us */
+ if (!sock)
+ return;
+
+ sock->sk->sk_err = EIO;
+ wake_up_interruptible(sk_sleep(sock->sk));
+
+ /* stop xmit side */
+ iscsi_suspend_tx(conn);
+
+ /* stop recv side and release socket */
+ iscsi_sw_tcp_release_conn(conn);
+
+ iscsi_conn_stop(cls_conn, flag);
+}
+
+static int
+iscsi_sw_tcp_conn_bind(struct iscsi_cls_session *cls_session,
+ struct iscsi_cls_conn *cls_conn, uint64_t transport_eph,
+ int is_leading)
+{
+ struct iscsi_session *session = cls_session->dd_data;
+ struct iscsi_conn *conn = cls_conn->dd_data;
+ struct iscsi_tcp_conn *tcp_conn = conn->dd_data;
+ struct iscsi_sw_tcp_conn *tcp_sw_conn = tcp_conn->dd_data;
+ struct sock *sk;
+ struct socket *sock;
+ int err;
+
+ /* lookup for existing socket */
+ sock = sockfd_lookup((int)transport_eph, &err);
+ if (!sock) {
+ iscsi_conn_printk(KERN_ERR, conn,
+ "sockfd_lookup failed %d\n", err);
+ return -EEXIST;
+ }
+
+ err = iscsi_conn_bind(cls_session, cls_conn, is_leading);
+ if (err)
+ goto free_socket;
+
+ spin_lock_bh(&session->frwd_lock);
+ /* bind iSCSI connection and socket */
+ tcp_sw_conn->sock = sock;
+ spin_unlock_bh(&session->frwd_lock);
+
+ /* setup Socket parameters */
+ sk = sock->sk;
+ sk->sk_reuse = SK_CAN_REUSE;
+ sk->sk_sndtimeo = 15 * HZ; /* FIXME: make it configurable */
+ sk->sk_allocation = GFP_ATOMIC;
+ sk_set_memalloc(sk);
+
+ iscsi_sw_tcp_conn_set_callbacks(conn);
+ tcp_sw_conn->sendpage = tcp_sw_conn->sock->ops->sendpage;
+ /*
+ * set receive state machine into initial state
+ */
+ iscsi_tcp_hdr_recv_prep(tcp_conn);
+ return 0;
+
+free_socket:
+ sockfd_put(sock);
+ return err;
+}
+
+static int iscsi_sw_tcp_conn_set_param(struct iscsi_cls_conn *cls_conn,
+ enum iscsi_param param, char *buf,
+ int buflen)
+{
+ struct iscsi_conn *conn = cls_conn->dd_data;
+ struct iscsi_tcp_conn *tcp_conn = conn->dd_data;
+ struct iscsi_sw_tcp_conn *tcp_sw_conn = tcp_conn->dd_data;
+
+ switch(param) {
+ case ISCSI_PARAM_HDRDGST_EN:
+ iscsi_set_param(cls_conn, param, buf, buflen);
+ break;
+ case ISCSI_PARAM_DATADGST_EN:
+ iscsi_set_param(cls_conn, param, buf, buflen);
+ tcp_sw_conn->sendpage = conn->datadgst_en ?
+ sock_no_sendpage : tcp_sw_conn->sock->ops->sendpage;
+ break;
+ case ISCSI_PARAM_MAX_R2T:
+ return iscsi_tcp_set_max_r2t(conn, buf);
+ default:
+ return iscsi_set_param(cls_conn, param, buf, buflen);
+ }
+
+ return 0;
+}
+
+static int iscsi_sw_tcp_conn_get_param(struct iscsi_cls_conn *cls_conn,
+ enum iscsi_param param, char *buf)
+{
+ struct iscsi_conn *conn = cls_conn->dd_data;
+ struct iscsi_tcp_conn *tcp_conn = conn->dd_data;
+ struct iscsi_sw_tcp_conn *tcp_sw_conn = tcp_conn->dd_data;
+ struct sockaddr_in6 addr;
+ int rc, len;
+
+ switch(param) {
+ case ISCSI_PARAM_CONN_PORT:
+ case ISCSI_PARAM_CONN_ADDRESS:
+ case ISCSI_PARAM_LOCAL_PORT:
+ spin_lock_bh(&conn->session->frwd_lock);
+ if (!tcp_sw_conn || !tcp_sw_conn->sock) {
+ spin_unlock_bh(&conn->session->frwd_lock);
+ return -ENOTCONN;
+ }
+ if (param == ISCSI_PARAM_LOCAL_PORT)
+ rc = kernel_getsockname(tcp_sw_conn->sock,
+ (struct sockaddr *)&addr, &len);
+ else
+ rc = kernel_getpeername(tcp_sw_conn->sock,
+ (struct sockaddr *)&addr, &len);
+ spin_unlock_bh(&conn->session->frwd_lock);
+ if (rc)
+ return rc;
+
+ return iscsi_conn_get_addr_param((struct sockaddr_storage *)
+ &addr, param, buf);
+ default:
+ return iscsi_conn_get_param(cls_conn, param, buf);
+ }
+
+ return 0;
+}
+
+static int iscsi_sw_tcp_host_get_param(struct Scsi_Host *shost,
+ enum iscsi_host_param param, char *buf)
+{
+ struct iscsi_sw_tcp_host *tcp_sw_host = iscsi_host_priv(shost);
+ struct iscsi_session *session = tcp_sw_host->session;
+ struct iscsi_conn *conn;
+ struct iscsi_tcp_conn *tcp_conn;
+ struct iscsi_sw_tcp_conn *tcp_sw_conn;
+ struct sockaddr_in6 addr;
+ int rc, len;
+
+ switch (param) {
+ case ISCSI_HOST_PARAM_IPADDRESS:
+ if (!session)
+ return -ENOTCONN;
+
+ spin_lock_bh(&session->frwd_lock);
+ conn = session->leadconn;
+ if (!conn) {
+ spin_unlock_bh(&session->frwd_lock);
+ return -ENOTCONN;
+ }
+ tcp_conn = conn->dd_data;
+
+ tcp_sw_conn = tcp_conn->dd_data;
+ if (!tcp_sw_conn->sock) {
+ spin_unlock_bh(&session->frwd_lock);
+ return -ENOTCONN;
+ }
+
+ rc = kernel_getsockname(tcp_sw_conn->sock,
+ (struct sockaddr *)&addr, &len);
+ spin_unlock_bh(&session->frwd_lock);
+ if (rc)
+ return rc;
+
+ return iscsi_conn_get_addr_param((struct sockaddr_storage *)
+ &addr, param, buf);
+ default:
+ return iscsi_host_get_param(shost, param, buf);
+ }
+
+ return 0;
+}
+
+static void
+iscsi_sw_tcp_conn_get_stats(struct iscsi_cls_conn *cls_conn,
+ struct iscsi_stats *stats)
+{
+ struct iscsi_conn *conn = cls_conn->dd_data;
+ struct iscsi_tcp_conn *tcp_conn = conn->dd_data;
+ struct iscsi_sw_tcp_conn *tcp_sw_conn = tcp_conn->dd_data;
+
+ stats->custom_length = 3;
+ strcpy(stats->custom[0].desc, "tx_sendpage_failures");
+ stats->custom[0].value = tcp_sw_conn->sendpage_failures_cnt;
+ strcpy(stats->custom[1].desc, "rx_discontiguous_hdr");
+ stats->custom[1].value = tcp_sw_conn->discontiguous_hdr_cnt;
+ strcpy(stats->custom[2].desc, "eh_abort_cnt");
+ stats->custom[2].value = conn->eh_abort_cnt;
+
+ iscsi_tcp_conn_get_stats(cls_conn, stats);
+}
+
+static struct iscsi_cls_session *
+iscsi_sw_tcp_session_create(struct iscsi_endpoint *ep, uint16_t cmds_max,
+ uint16_t qdepth, uint32_t initial_cmdsn)
+{
+ struct iscsi_cls_session *cls_session;
+ struct iscsi_session *session;
+ struct iscsi_sw_tcp_host *tcp_sw_host;
+ struct Scsi_Host *shost;
+
+ if (ep) {
+ printk(KERN_ERR "iscsi_tcp: invalid ep %p.\n", ep);
+ return NULL;
+ }
+
+ shost = iscsi_host_alloc(&iscsi_sw_tcp_sht,
+ sizeof(struct iscsi_sw_tcp_host), 1);
+ if (!shost)
+ return NULL;
+ shost->transportt = iscsi_sw_tcp_scsi_transport;
+ shost->cmd_per_lun = qdepth;
+ shost->max_lun = iscsi_max_lun;
+ shost->max_id = 0;
+ shost->max_channel = 0;
+ shost->max_cmd_len = SCSI_MAX_VARLEN_CDB_SIZE;
+
+ if (iscsi_host_add(shost, NULL))
+ goto free_host;
+
+ cls_session = iscsi_session_setup(&iscsi_sw_tcp_transport, shost,
+ cmds_max, 0,
+ sizeof(struct iscsi_tcp_task) +
+ sizeof(struct iscsi_sw_tcp_hdrbuf),
+ initial_cmdsn, 0);
+ if (!cls_session)
+ goto remove_host;
+ session = cls_session->dd_data;
+ tcp_sw_host = iscsi_host_priv(shost);
+ tcp_sw_host->session = session;
+
+ shost->can_queue = session->scsi_cmds_max;
+ if (iscsi_tcp_r2tpool_alloc(session))
+ goto remove_session;
+ return cls_session;
+
+remove_session:
+ iscsi_session_teardown(cls_session);
+remove_host:
+ iscsi_host_remove(shost);
+free_host:
+ iscsi_host_free(shost);
+ return NULL;
+}
+
+static void iscsi_sw_tcp_session_destroy(struct iscsi_cls_session *cls_session)
+{
+ struct Scsi_Host *shost = iscsi_session_to_shost(cls_session);
+
+ iscsi_tcp_r2tpool_free(cls_session->dd_data);
+ iscsi_session_teardown(cls_session);
+
+ iscsi_host_remove(shost);
+ iscsi_host_free(shost);
+}
+
+static umode_t iscsi_sw_tcp_attr_is_visible(int param_type, int param)
+{
+ switch (param_type) {
+ case ISCSI_HOST_PARAM:
+ switch (param) {
+ case ISCSI_HOST_PARAM_NETDEV_NAME:
+ case ISCSI_HOST_PARAM_HWADDRESS:
+ case ISCSI_HOST_PARAM_IPADDRESS:
+ case ISCSI_HOST_PARAM_INITIATOR_NAME:
+ return S_IRUGO;
+ default:
+ return 0;
+ }
+ case ISCSI_PARAM:
+ switch (param) {
+ case ISCSI_PARAM_MAX_RECV_DLENGTH:
+ case ISCSI_PARAM_MAX_XMIT_DLENGTH:
+ case ISCSI_PARAM_HDRDGST_EN:
+ case ISCSI_PARAM_DATADGST_EN:
+ case ISCSI_PARAM_CONN_ADDRESS:
+ case ISCSI_PARAM_CONN_PORT:
+ case ISCSI_PARAM_LOCAL_PORT:
+ case ISCSI_PARAM_EXP_STATSN:
+ case ISCSI_PARAM_PERSISTENT_ADDRESS:
+ case ISCSI_PARAM_PERSISTENT_PORT:
+ case ISCSI_PARAM_PING_TMO:
+ case ISCSI_PARAM_RECV_TMO:
+ case ISCSI_PARAM_INITIAL_R2T_EN:
+ case ISCSI_PARAM_MAX_R2T:
+ case ISCSI_PARAM_IMM_DATA_EN:
+ case ISCSI_PARAM_FIRST_BURST:
+ case ISCSI_PARAM_MAX_BURST:
+ case ISCSI_PARAM_PDU_INORDER_EN:
+ case ISCSI_PARAM_DATASEQ_INORDER_EN:
+ case ISCSI_PARAM_ERL:
+ case ISCSI_PARAM_TARGET_NAME:
+ case ISCSI_PARAM_TPGT:
+ case ISCSI_PARAM_USERNAME:
+ case ISCSI_PARAM_PASSWORD:
+ case ISCSI_PARAM_USERNAME_IN:
+ case ISCSI_PARAM_PASSWORD_IN:
+ case ISCSI_PARAM_FAST_ABORT:
+ case ISCSI_PARAM_ABORT_TMO:
+ case ISCSI_PARAM_LU_RESET_TMO:
+ case ISCSI_PARAM_TGT_RESET_TMO:
+ case ISCSI_PARAM_IFACE_NAME:
+ case ISCSI_PARAM_INITIATOR_NAME:
+ return S_IRUGO;
+ default:
+ return 0;
+ }
+ }
+
+ return 0;
+}
+
+static int iscsi_sw_tcp_slave_alloc(struct scsi_device *sdev)
+{
+ set_bit(QUEUE_FLAG_BIDI, &sdev->request_queue->queue_flags);
+ return 0;
+}
+
+static int iscsi_sw_tcp_slave_configure(struct scsi_device *sdev)
+{
+ blk_queue_bounce_limit(sdev->request_queue, BLK_BOUNCE_ANY);
+ blk_queue_dma_alignment(sdev->request_queue, 0);
+ return 0;
+}
+
+static struct scsi_host_template iscsi_sw_tcp_sht = {
+ .module = THIS_MODULE,
+ .name = "iSCSI Initiator over TCP/IP",
+ .queuecommand = iscsi_queuecommand,
+ .change_queue_depth = scsi_change_queue_depth,
+ .can_queue = ISCSI_DEF_XMIT_CMDS_MAX - 1,
+ .sg_tablesize = 4096,
+ .max_sectors = 0xFFFF,
+ .cmd_per_lun = ISCSI_DEF_CMD_PER_LUN,
+ .eh_abort_handler = iscsi_eh_abort,
+ .eh_device_reset_handler= iscsi_eh_device_reset,
+ .eh_target_reset_handler = iscsi_eh_recover_target,
+ .use_clustering = DISABLE_CLUSTERING,
+ .slave_alloc = iscsi_sw_tcp_slave_alloc,
+ .slave_configure = iscsi_sw_tcp_slave_configure,
+ .target_alloc = iscsi_target_alloc,
+ .proc_name = "iscsi_tcp",
+ .this_id = -1,
+ .track_queue_depth = 1,
+};
+
+static struct iscsi_transport iscsi_sw_tcp_transport = {
+ .owner = THIS_MODULE,
+ .name = "tcp",
+ .caps = CAP_RECOVERY_L0 | CAP_MULTI_R2T | CAP_HDRDGST
+ | CAP_DATADGST,
+ /* session management */
+ .create_session = iscsi_sw_tcp_session_create,
+ .destroy_session = iscsi_sw_tcp_session_destroy,
+ /* connection management */
+ .create_conn = iscsi_sw_tcp_conn_create,
+ .bind_conn = iscsi_sw_tcp_conn_bind,
+ .destroy_conn = iscsi_sw_tcp_conn_destroy,
+ .attr_is_visible = iscsi_sw_tcp_attr_is_visible,
+ .set_param = iscsi_sw_tcp_conn_set_param,
+ .get_conn_param = iscsi_sw_tcp_conn_get_param,
+ .get_session_param = iscsi_session_get_param,
+ .start_conn = iscsi_conn_start,
+ .stop_conn = iscsi_sw_tcp_conn_stop,
+ /* iscsi host params */
+ .get_host_param = iscsi_sw_tcp_host_get_param,
+ .set_host_param = iscsi_host_set_param,
+ /* IO */
+ .send_pdu = iscsi_conn_send_pdu,
+ .get_stats = iscsi_sw_tcp_conn_get_stats,
+ /* iscsi task/cmd helpers */
+ .init_task = iscsi_tcp_task_init,
+ .xmit_task = iscsi_tcp_task_xmit,
+ .cleanup_task = iscsi_tcp_cleanup_task,
+ /* low level pdu helpers */
+ .xmit_pdu = iscsi_sw_tcp_pdu_xmit,
+ .init_pdu = iscsi_sw_tcp_pdu_init,
+ .alloc_pdu = iscsi_sw_tcp_pdu_alloc,
+ /* recovery */
+ .session_recovery_timedout = iscsi_session_recovery_timedout,
+};
+
+static int __init iscsi_sw_tcp_init(void)
+{
+ if (iscsi_max_lun < 1) {
+ printk(KERN_ERR "iscsi_tcp: Invalid max_lun value of %u\n",
+ iscsi_max_lun);
+ return -EINVAL;
+ }
+
+ iscsi_sw_tcp_scsi_transport = iscsi_register_transport(
+ &iscsi_sw_tcp_transport);
+ if (!iscsi_sw_tcp_scsi_transport)
+ return -ENODEV;
+
+ return 0;
+}
+
+static void __exit iscsi_sw_tcp_exit(void)
+{
+ iscsi_unregister_transport(&iscsi_sw_tcp_transport);
+}
+
+module_init(iscsi_sw_tcp_init);
+module_exit(iscsi_sw_tcp_exit);
diff --git a/drivers/scsi/iscsi_tcp.h b/drivers/scsi/iscsi_tcp.h
new file mode 100644
index 000000000..f42ecb238
--- /dev/null
+++ b/drivers/scsi/iscsi_tcp.h
@@ -0,0 +1,68 @@
+/*
+ * iSCSI Initiator TCP Transport
+ * Copyright (C) 2004 Dmitry Yusupov
+ * Copyright (C) 2004 Alex Aizman
+ * Copyright (C) 2005 - 2006 Mike Christie
+ * Copyright (C) 2006 Red Hat, Inc. All rights reserved.
+ * maintained by open-iscsi@googlegroups.com
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published
+ * by the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * See the file COPYING included with this distribution for more details.
+ */
+
+#ifndef ISCSI_SW_TCP_H
+#define ISCSI_SW_TCP_H
+
+#include <scsi/libiscsi.h>
+#include <scsi/libiscsi_tcp.h>
+
+struct socket;
+struct iscsi_tcp_conn;
+
+/* Socket connection send helper */
+struct iscsi_sw_tcp_send {
+ struct iscsi_hdr *hdr;
+ struct iscsi_segment segment;
+ struct iscsi_segment data_segment;
+};
+
+struct iscsi_sw_tcp_conn {
+ struct socket *sock;
+
+ struct iscsi_sw_tcp_send out;
+ /* old values for socket callbacks */
+ void (*old_data_ready)(struct sock *);
+ void (*old_state_change)(struct sock *);
+ void (*old_write_space)(struct sock *);
+
+ /* data and header digests */
+ struct hash_desc tx_hash; /* CRC32C (Tx) */
+ struct hash_desc rx_hash; /* CRC32C (Rx) */
+
+ /* MIB custom statistics */
+ uint32_t sendpage_failures_cnt;
+ uint32_t discontiguous_hdr_cnt;
+
+ ssize_t (*sendpage)(struct socket *, struct page *, int, size_t, int);
+};
+
+struct iscsi_sw_tcp_host {
+ struct iscsi_session *session;
+};
+
+struct iscsi_sw_tcp_hdrbuf {
+ struct iscsi_hdr hdrbuf;
+ char hdrextbuf[ISCSI_MAX_AHS_SIZE +
+ ISCSI_DIGEST_SIZE];
+};
+
+#endif /* ISCSI_SW_TCP_H */
diff --git a/drivers/scsi/jazz_esp.c b/drivers/scsi/jazz_esp.c
new file mode 100644
index 000000000..9aaa74e34
--- /dev/null
+++ b/drivers/scsi/jazz_esp.c
@@ -0,0 +1,248 @@
+/* jazz_esp.c: ESP front-end for MIPS JAZZ systems.
+ *
+ * Copyright (C) 2007 Thomas Bogendörfer (tsbogend@alpha.frankende)
+ */
+
+#include <linux/kernel.h>
+#include <linux/gfp.h>
+#include <linux/types.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/platform_device.h>
+#include <linux/dma-mapping.h>
+
+#include <asm/irq.h>
+#include <asm/io.h>
+#include <asm/dma.h>
+
+#include <asm/jazz.h>
+#include <asm/jazzdma.h>
+
+#include <scsi/scsi_host.h>
+
+#include "esp_scsi.h"
+
+#define DRV_MODULE_NAME "jazz_esp"
+#define PFX DRV_MODULE_NAME ": "
+#define DRV_VERSION "1.000"
+#define DRV_MODULE_RELDATE "May 19, 2007"
+
+static void jazz_esp_write8(struct esp *esp, u8 val, unsigned long reg)
+{
+ *(volatile u8 *)(esp->regs + reg) = val;
+}
+
+static u8 jazz_esp_read8(struct esp *esp, unsigned long reg)
+{
+ return *(volatile u8 *)(esp->regs + reg);
+}
+
+static dma_addr_t jazz_esp_map_single(struct esp *esp, void *buf,
+ size_t sz, int dir)
+{
+ return dma_map_single(esp->dev, buf, sz, dir);
+}
+
+static int jazz_esp_map_sg(struct esp *esp, struct scatterlist *sg,
+ int num_sg, int dir)
+{
+ return dma_map_sg(esp->dev, sg, num_sg, dir);
+}
+
+static void jazz_esp_unmap_single(struct esp *esp, dma_addr_t addr,
+ size_t sz, int dir)
+{
+ dma_unmap_single(esp->dev, addr, sz, dir);
+}
+
+static void jazz_esp_unmap_sg(struct esp *esp, struct scatterlist *sg,
+ int num_sg, int dir)
+{
+ dma_unmap_sg(esp->dev, sg, num_sg, dir);
+}
+
+static int jazz_esp_irq_pending(struct esp *esp)
+{
+ if (jazz_esp_read8(esp, ESP_STATUS) & ESP_STAT_INTR)
+ return 1;
+ return 0;
+}
+
+static void jazz_esp_reset_dma(struct esp *esp)
+{
+ vdma_disable ((int)esp->dma_regs);
+}
+
+static void jazz_esp_dma_drain(struct esp *esp)
+{
+ /* nothing to do */
+}
+
+static void jazz_esp_dma_invalidate(struct esp *esp)
+{
+ vdma_disable ((int)esp->dma_regs);
+}
+
+static void jazz_esp_send_dma_cmd(struct esp *esp, u32 addr, u32 esp_count,
+ u32 dma_count, int write, u8 cmd)
+{
+ BUG_ON(!(cmd & ESP_CMD_DMA));
+
+ jazz_esp_write8(esp, (esp_count >> 0) & 0xff, ESP_TCLOW);
+ jazz_esp_write8(esp, (esp_count >> 8) & 0xff, ESP_TCMED);
+ vdma_disable ((int)esp->dma_regs);
+ if (write)
+ vdma_set_mode ((int)esp->dma_regs, DMA_MODE_READ);
+ else
+ vdma_set_mode ((int)esp->dma_regs, DMA_MODE_WRITE);
+
+ vdma_set_addr ((int)esp->dma_regs, addr);
+ vdma_set_count ((int)esp->dma_regs, dma_count);
+ vdma_enable ((int)esp->dma_regs);
+
+ scsi_esp_cmd(esp, cmd);
+}
+
+static int jazz_esp_dma_error(struct esp *esp)
+{
+ u32 enable = vdma_get_enable((int)esp->dma_regs);
+
+ if (enable & (R4030_MEM_INTR|R4030_ADDR_INTR))
+ return 1;
+
+ return 0;
+}
+
+static const struct esp_driver_ops jazz_esp_ops = {
+ .esp_write8 = jazz_esp_write8,
+ .esp_read8 = jazz_esp_read8,
+ .map_single = jazz_esp_map_single,
+ .map_sg = jazz_esp_map_sg,
+ .unmap_single = jazz_esp_unmap_single,
+ .unmap_sg = jazz_esp_unmap_sg,
+ .irq_pending = jazz_esp_irq_pending,
+ .reset_dma = jazz_esp_reset_dma,
+ .dma_drain = jazz_esp_dma_drain,
+ .dma_invalidate = jazz_esp_dma_invalidate,
+ .send_dma_cmd = jazz_esp_send_dma_cmd,
+ .dma_error = jazz_esp_dma_error,
+};
+
+static int esp_jazz_probe(struct platform_device *dev)
+{
+ struct scsi_host_template *tpnt = &scsi_esp_template;
+ struct Scsi_Host *host;
+ struct esp *esp;
+ struct resource *res;
+ int err;
+
+ host = scsi_host_alloc(tpnt, sizeof(struct esp));
+
+ err = -ENOMEM;
+ if (!host)
+ goto fail;
+
+ host->max_id = 8;
+ esp = shost_priv(host);
+
+ esp->host = host;
+ esp->dev = dev;
+ esp->ops = &jazz_esp_ops;
+
+ res = platform_get_resource(dev, IORESOURCE_MEM, 0);
+ if (!res)
+ goto fail_unlink;
+
+ esp->regs = (void __iomem *)res->start;
+ if (!esp->regs)
+ goto fail_unlink;
+
+ res = platform_get_resource(dev, IORESOURCE_MEM, 1);
+ if (!res)
+ goto fail_unlink;
+
+ esp->dma_regs = (void __iomem *)res->start;
+
+ esp->command_block = dma_alloc_coherent(esp->dev, 16,
+ &esp->command_block_dma,
+ GFP_KERNEL);
+ if (!esp->command_block)
+ goto fail_unmap_regs;
+
+ host->irq = platform_get_irq(dev, 0);
+ err = request_irq(host->irq, scsi_esp_intr, IRQF_SHARED, "ESP", esp);
+ if (err < 0)
+ goto fail_unmap_command_block;
+
+ esp->scsi_id = 7;
+ esp->host->this_id = esp->scsi_id;
+ esp->scsi_id_mask = (1 << esp->scsi_id);
+ esp->cfreq = 40000000;
+
+ dev_set_drvdata(&dev->dev, esp);
+
+ err = scsi_esp_register(esp, &dev->dev);
+ if (err)
+ goto fail_free_irq;
+
+ return 0;
+
+fail_free_irq:
+ free_irq(host->irq, esp);
+fail_unmap_command_block:
+ dma_free_coherent(esp->dev, 16,
+ esp->command_block,
+ esp->command_block_dma);
+fail_unmap_regs:
+fail_unlink:
+ scsi_host_put(host);
+fail:
+ return err;
+}
+
+static int esp_jazz_remove(struct platform_device *dev)
+{
+ struct esp *esp = dev_get_drvdata(&dev->dev);
+ unsigned int irq = esp->host->irq;
+
+ scsi_esp_unregister(esp);
+
+ free_irq(irq, esp);
+ dma_free_coherent(esp->dev, 16,
+ esp->command_block,
+ esp->command_block_dma);
+
+ scsi_host_put(esp->host);
+
+ return 0;
+}
+
+/* work with hotplug and coldplug */
+MODULE_ALIAS("platform:jazz_esp");
+
+static struct platform_driver esp_jazz_driver = {
+ .probe = esp_jazz_probe,
+ .remove = esp_jazz_remove,
+ .driver = {
+ .name = "jazz_esp",
+ },
+};
+
+static int __init jazz_esp_init(void)
+{
+ return platform_driver_register(&esp_jazz_driver);
+}
+
+static void __exit jazz_esp_exit(void)
+{
+ platform_driver_unregister(&esp_jazz_driver);
+}
+
+MODULE_DESCRIPTION("JAZZ ESP SCSI driver");
+MODULE_AUTHOR("Thomas Bogendoerfer (tsbogend@alpha.franken.de)");
+MODULE_LICENSE("GPL");
+MODULE_VERSION(DRV_VERSION);
+
+module_init(jazz_esp_init);
+module_exit(jazz_esp_exit);
diff --git a/drivers/scsi/lasi700.c b/drivers/scsi/lasi700.c
new file mode 100644
index 000000000..5c4ded997
--- /dev/null
+++ b/drivers/scsi/lasi700.c
@@ -0,0 +1,187 @@
+/* -*- mode: c; c-basic-offset: 8 -*- */
+
+/* PARISC LASI driver for the 53c700 chip
+ *
+ * Copyright (C) 2001 by James.Bottomley@HansenPartnership.com
+**-----------------------------------------------------------------------------
+**
+** This program is free software; you can redistribute it and/or modify
+** it under the terms of the GNU General Public License as published by
+** the Free Software Foundation; either version 2 of the License, or
+** (at your option) any later version.
+**
+** This program is distributed in the hope that it will be useful,
+** but WITHOUT ANY WARRANTY; without even the implied warranty of
+** MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+** GNU General Public License for more details.
+**
+** You should have received a copy of the GNU General Public License
+** along with this program; if not, write to the Free Software
+** Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+**
+**-----------------------------------------------------------------------------
+ */
+
+/*
+ * Many thanks to Richard Hirst <rhirst@linuxcare.com> for patiently
+ * debugging this driver on the parisc architecture and suggesting
+ * many improvements and bug fixes.
+ *
+ * Thanks also go to Linuxcare Inc. for providing several PARISC
+ * machines for me to debug the driver on.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/types.h>
+#include <linux/stat.h>
+#include <linux/mm.h>
+#include <linux/blkdev.h>
+#include <linux/ioport.h>
+#include <linux/dma-mapping.h>
+#include <linux/slab.h>
+
+#include <asm/page.h>
+#include <asm/pgtable.h>
+#include <asm/irq.h>
+#include <asm/hardware.h>
+#include <asm/parisc-device.h>
+#include <asm/delay.h>
+
+#include <scsi/scsi_host.h>
+#include <scsi/scsi_device.h>
+#include <scsi/scsi_transport.h>
+#include <scsi/scsi_transport_spi.h>
+
+#include "53c700.h"
+
+MODULE_AUTHOR("James Bottomley");
+MODULE_DESCRIPTION("lasi700 SCSI Driver");
+MODULE_LICENSE("GPL");
+
+#define LASI_700_SVERSION 0x00071
+#define LASI_710_SVERSION 0x00082
+
+#define LASI700_ID_TABLE { \
+ .hw_type = HPHW_FIO, \
+ .sversion = LASI_700_SVERSION, \
+ .hversion = HVERSION_ANY_ID, \
+ .hversion_rev = HVERSION_REV_ANY_ID, \
+}
+
+#define LASI710_ID_TABLE { \
+ .hw_type = HPHW_FIO, \
+ .sversion = LASI_710_SVERSION, \
+ .hversion = HVERSION_ANY_ID, \
+ .hversion_rev = HVERSION_REV_ANY_ID, \
+}
+
+#define LASI700_CLOCK 25
+#define LASI710_CLOCK 40
+#define LASI_SCSI_CORE_OFFSET 0x100
+
+static struct parisc_device_id lasi700_ids[] = {
+ LASI700_ID_TABLE,
+ LASI710_ID_TABLE,
+ { 0 }
+};
+
+static struct scsi_host_template lasi700_template = {
+ .name = "LASI SCSI 53c700",
+ .proc_name = "lasi700",
+ .this_id = 7,
+ .module = THIS_MODULE,
+};
+MODULE_DEVICE_TABLE(parisc, lasi700_ids);
+
+static int __init
+lasi700_probe(struct parisc_device *dev)
+{
+ unsigned long base = dev->hpa.start + LASI_SCSI_CORE_OFFSET;
+ struct NCR_700_Host_Parameters *hostdata;
+ struct Scsi_Host *host;
+
+ hostdata = kzalloc(sizeof(*hostdata), GFP_KERNEL);
+ if (!hostdata) {
+ dev_printk(KERN_ERR, &dev->dev, "Failed to allocate host data\n");
+ return -ENOMEM;
+ }
+
+ hostdata->dev = &dev->dev;
+ dma_set_mask(&dev->dev, DMA_BIT_MASK(32));
+ hostdata->base = ioremap_nocache(base, 0x100);
+ hostdata->differential = 0;
+
+ if (dev->id.sversion == LASI_700_SVERSION) {
+ hostdata->clock = LASI700_CLOCK;
+ hostdata->force_le_on_be = 1;
+ } else {
+ hostdata->clock = LASI710_CLOCK;
+ hostdata->force_le_on_be = 0;
+ hostdata->chip710 = 1;
+ hostdata->dmode_extra = DMODE_FC2;
+ hostdata->burst_length = 8;
+ }
+
+ host = NCR_700_detect(&lasi700_template, hostdata, &dev->dev);
+ if (!host)
+ goto out_kfree;
+ host->this_id = 7;
+ host->base = base;
+ host->irq = dev->irq;
+ if(request_irq(dev->irq, NCR_700_intr, IRQF_SHARED, "lasi700", host)) {
+ printk(KERN_ERR "lasi700: request_irq failed!\n");
+ goto out_put_host;
+ }
+
+ dev_set_drvdata(&dev->dev, host);
+ scsi_scan_host(host);
+
+ return 0;
+
+ out_put_host:
+ scsi_host_put(host);
+ out_kfree:
+ iounmap(hostdata->base);
+ kfree(hostdata);
+ return -ENODEV;
+}
+
+static int __exit
+lasi700_driver_remove(struct parisc_device *dev)
+{
+ struct Scsi_Host *host = dev_get_drvdata(&dev->dev);
+ struct NCR_700_Host_Parameters *hostdata =
+ (struct NCR_700_Host_Parameters *)host->hostdata[0];
+
+ scsi_remove_host(host);
+ NCR_700_release(host);
+ free_irq(host->irq, host);
+ iounmap(hostdata->base);
+ kfree(hostdata);
+
+ return 0;
+}
+
+static struct parisc_driver lasi700_driver = {
+ .name = "lasi_scsi",
+ .id_table = lasi700_ids,
+ .probe = lasi700_probe,
+ .remove = lasi700_driver_remove,
+};
+
+static int __init
+lasi700_init(void)
+{
+ return register_parisc_driver(&lasi700_driver);
+}
+
+static void __exit
+lasi700_exit(void)
+{
+ unregister_parisc_driver(&lasi700_driver);
+}
+
+module_init(lasi700_init);
+module_exit(lasi700_exit);
diff --git a/drivers/scsi/libfc/Makefile b/drivers/scsi/libfc/Makefile
new file mode 100644
index 000000000..4bb23ac86
--- /dev/null
+++ b/drivers/scsi/libfc/Makefile
@@ -0,0 +1,14 @@
+# $Id: Makefile
+
+obj-$(CONFIG_LIBFC) += libfc.o
+
+libfc-objs := \
+ fc_libfc.o \
+ fc_disc.o \
+ fc_exch.o \
+ fc_elsct.o \
+ fc_frame.o \
+ fc_lport.o \
+ fc_rport.o \
+ fc_fcp.o \
+ fc_npiv.o
diff --git a/drivers/scsi/libfc/fc_disc.c b/drivers/scsi/libfc/fc_disc.c
new file mode 100644
index 000000000..880a9068c
--- /dev/null
+++ b/drivers/scsi/libfc/fc_disc.c
@@ -0,0 +1,753 @@
+/*
+ * Copyright(c) 2007 - 2008 Intel Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Maintained at www.Open-FCoE.org
+ */
+
+/*
+ * Target Discovery
+ *
+ * This block discovers all FC-4 remote ports, including FCP initiators. It
+ * also handles RSCN events and re-discovery if necessary.
+ */
+
+/*
+ * DISC LOCKING
+ *
+ * The disc mutex is can be locked when acquiring rport locks, but may not
+ * be held when acquiring the lport lock. Refer to fc_lport.c for more
+ * details.
+ */
+
+#include <linux/timer.h>
+#include <linux/slab.h>
+#include <linux/err.h>
+#include <linux/export.h>
+#include <asm/unaligned.h>
+
+#include <scsi/fc/fc_gs.h>
+
+#include <scsi/libfc.h>
+
+#include "fc_libfc.h"
+
+#define FC_DISC_RETRY_LIMIT 3 /* max retries */
+#define FC_DISC_RETRY_DELAY 500UL /* (msecs) delay */
+
+static void fc_disc_gpn_ft_req(struct fc_disc *);
+static void fc_disc_gpn_ft_resp(struct fc_seq *, struct fc_frame *, void *);
+static void fc_disc_done(struct fc_disc *, enum fc_disc_event);
+static void fc_disc_timeout(struct work_struct *);
+static int fc_disc_single(struct fc_lport *, struct fc_disc_port *);
+static void fc_disc_restart(struct fc_disc *);
+
+/**
+ * fc_disc_stop_rports() - Delete all the remote ports associated with the lport
+ * @disc: The discovery job to stop remote ports on
+ *
+ * Locking Note: This function expects that the lport mutex is locked before
+ * calling it.
+ */
+static void fc_disc_stop_rports(struct fc_disc *disc)
+{
+ struct fc_lport *lport;
+ struct fc_rport_priv *rdata;
+
+ lport = fc_disc_lport(disc);
+
+ mutex_lock(&disc->disc_mutex);
+ list_for_each_entry_rcu(rdata, &disc->rports, peers)
+ lport->tt.rport_logoff(rdata);
+ mutex_unlock(&disc->disc_mutex);
+}
+
+/**
+ * fc_disc_recv_rscn_req() - Handle Registered State Change Notification (RSCN)
+ * @disc: The discovery object to which the RSCN applies
+ * @fp: The RSCN frame
+ *
+ * Locking Note: This function expects that the disc_mutex is locked
+ * before it is called.
+ */
+static void fc_disc_recv_rscn_req(struct fc_disc *disc, struct fc_frame *fp)
+{
+ struct fc_lport *lport;
+ struct fc_els_rscn *rp;
+ struct fc_els_rscn_page *pp;
+ struct fc_seq_els_data rjt_data;
+ unsigned int len;
+ int redisc = 0;
+ enum fc_els_rscn_ev_qual ev_qual;
+ enum fc_els_rscn_addr_fmt fmt;
+ LIST_HEAD(disc_ports);
+ struct fc_disc_port *dp, *next;
+
+ lport = fc_disc_lport(disc);
+
+ FC_DISC_DBG(disc, "Received an RSCN event\n");
+
+ /* make sure the frame contains an RSCN message */
+ rp = fc_frame_payload_get(fp, sizeof(*rp));
+ if (!rp)
+ goto reject;
+ /* make sure the page length is as expected (4 bytes) */
+ if (rp->rscn_page_len != sizeof(*pp))
+ goto reject;
+ /* get the RSCN payload length */
+ len = ntohs(rp->rscn_plen);
+ if (len < sizeof(*rp))
+ goto reject;
+ /* make sure the frame contains the expected payload */
+ rp = fc_frame_payload_get(fp, len);
+ if (!rp)
+ goto reject;
+ /* payload must be a multiple of the RSCN page size */
+ len -= sizeof(*rp);
+ if (len % sizeof(*pp))
+ goto reject;
+
+ for (pp = (void *)(rp + 1); len > 0; len -= sizeof(*pp), pp++) {
+ ev_qual = pp->rscn_page_flags >> ELS_RSCN_EV_QUAL_BIT;
+ ev_qual &= ELS_RSCN_EV_QUAL_MASK;
+ fmt = pp->rscn_page_flags >> ELS_RSCN_ADDR_FMT_BIT;
+ fmt &= ELS_RSCN_ADDR_FMT_MASK;
+ /*
+ * if we get an address format other than port
+ * (area, domain, fabric), then do a full discovery
+ */
+ switch (fmt) {
+ case ELS_ADDR_FMT_PORT:
+ FC_DISC_DBG(disc, "Port address format for port "
+ "(%6.6x)\n", ntoh24(pp->rscn_fid));
+ dp = kzalloc(sizeof(*dp), GFP_KERNEL);
+ if (!dp) {
+ redisc = 1;
+ break;
+ }
+ dp->lp = lport;
+ dp->port_id = ntoh24(pp->rscn_fid);
+ list_add_tail(&dp->peers, &disc_ports);
+ break;
+ case ELS_ADDR_FMT_AREA:
+ case ELS_ADDR_FMT_DOM:
+ case ELS_ADDR_FMT_FAB:
+ default:
+ FC_DISC_DBG(disc, "Address format is (%d)\n", fmt);
+ redisc = 1;
+ break;
+ }
+ }
+ lport->tt.seq_els_rsp_send(fp, ELS_LS_ACC, NULL);
+
+ /*
+ * If not doing a complete rediscovery, do GPN_ID on
+ * the individual ports mentioned in the list.
+ * If any of these get an error, do a full rediscovery.
+ * In any case, go through the list and free the entries.
+ */
+ list_for_each_entry_safe(dp, next, &disc_ports, peers) {
+ list_del(&dp->peers);
+ if (!redisc)
+ redisc = fc_disc_single(lport, dp);
+ kfree(dp);
+ }
+ if (redisc) {
+ FC_DISC_DBG(disc, "RSCN received: rediscovering\n");
+ fc_disc_restart(disc);
+ } else {
+ FC_DISC_DBG(disc, "RSCN received: not rediscovering. "
+ "redisc %d state %d in_prog %d\n",
+ redisc, lport->state, disc->pending);
+ }
+ fc_frame_free(fp);
+ return;
+reject:
+ FC_DISC_DBG(disc, "Received a bad RSCN frame\n");
+ rjt_data.reason = ELS_RJT_LOGIC;
+ rjt_data.explan = ELS_EXPL_NONE;
+ lport->tt.seq_els_rsp_send(fp, ELS_LS_RJT, &rjt_data);
+ fc_frame_free(fp);
+}
+
+/**
+ * fc_disc_recv_req() - Handle incoming requests
+ * @lport: The local port receiving the request
+ * @fp: The request frame
+ *
+ * Locking Note: This function is called from the EM and will lock
+ * the disc_mutex before calling the handler for the
+ * request.
+ */
+static void fc_disc_recv_req(struct fc_lport *lport, struct fc_frame *fp)
+{
+ u8 op;
+ struct fc_disc *disc = &lport->disc;
+
+ op = fc_frame_payload_op(fp);
+ switch (op) {
+ case ELS_RSCN:
+ mutex_lock(&disc->disc_mutex);
+ fc_disc_recv_rscn_req(disc, fp);
+ mutex_unlock(&disc->disc_mutex);
+ break;
+ default:
+ FC_DISC_DBG(disc, "Received an unsupported request, "
+ "the opcode is (%x)\n", op);
+ fc_frame_free(fp);
+ break;
+ }
+}
+
+/**
+ * fc_disc_restart() - Restart discovery
+ * @disc: The discovery object to be restarted
+ *
+ * Locking Note: This function expects that the disc mutex
+ * is already locked.
+ */
+static void fc_disc_restart(struct fc_disc *disc)
+{
+ if (!disc->disc_callback)
+ return;
+
+ FC_DISC_DBG(disc, "Restarting discovery\n");
+
+ disc->requested = 1;
+ if (disc->pending)
+ return;
+
+ /*
+ * Advance disc_id. This is an arbitrary non-zero number that will
+ * match the value in the fc_rport_priv after discovery for all
+ * freshly-discovered remote ports. Avoid wrapping to zero.
+ */
+ disc->disc_id = (disc->disc_id + 2) | 1;
+ disc->retry_count = 0;
+ fc_disc_gpn_ft_req(disc);
+}
+
+/**
+ * fc_disc_start() - Start discovery on a local port
+ * @lport: The local port to have discovery started on
+ * @disc_callback: Callback function to be called when discovery is complete
+ */
+static void fc_disc_start(void (*disc_callback)(struct fc_lport *,
+ enum fc_disc_event),
+ struct fc_lport *lport)
+{
+ struct fc_disc *disc = &lport->disc;
+
+ /*
+ * At this point we may have a new disc job or an existing
+ * one. Either way, let's lock when we make changes to it
+ * and send the GPN_FT request.
+ */
+ mutex_lock(&disc->disc_mutex);
+ disc->disc_callback = disc_callback;
+ fc_disc_restart(disc);
+ mutex_unlock(&disc->disc_mutex);
+}
+
+/**
+ * fc_disc_done() - Discovery has been completed
+ * @disc: The discovery context
+ * @event: The discovery completion status
+ *
+ * Locking Note: This function expects that the disc mutex is locked before
+ * it is called. The discovery callback is then made with the lock released,
+ * and the lock is re-taken before returning from this function
+ */
+static void fc_disc_done(struct fc_disc *disc, enum fc_disc_event event)
+{
+ struct fc_lport *lport = fc_disc_lport(disc);
+ struct fc_rport_priv *rdata;
+
+ FC_DISC_DBG(disc, "Discovery complete\n");
+
+ disc->pending = 0;
+ if (disc->requested) {
+ fc_disc_restart(disc);
+ return;
+ }
+
+ /*
+ * Go through all remote ports. If they were found in the latest
+ * discovery, reverify or log them in. Otherwise, log them out.
+ * Skip ports which were never discovered. These are the dNS port
+ * and ports which were created by PLOGI.
+ */
+ list_for_each_entry_rcu(rdata, &disc->rports, peers) {
+ if (!rdata->disc_id)
+ continue;
+ if (rdata->disc_id == disc->disc_id)
+ lport->tt.rport_login(rdata);
+ else
+ lport->tt.rport_logoff(rdata);
+ }
+
+ mutex_unlock(&disc->disc_mutex);
+ disc->disc_callback(lport, event);
+ mutex_lock(&disc->disc_mutex);
+}
+
+/**
+ * fc_disc_error() - Handle error on dNS request
+ * @disc: The discovery context
+ * @fp: The error code encoded as a frame pointer
+ */
+static void fc_disc_error(struct fc_disc *disc, struct fc_frame *fp)
+{
+ struct fc_lport *lport = fc_disc_lport(disc);
+ unsigned long delay = 0;
+
+ FC_DISC_DBG(disc, "Error %ld, retries %d/%d\n",
+ PTR_ERR(fp), disc->retry_count,
+ FC_DISC_RETRY_LIMIT);
+
+ if (!fp || PTR_ERR(fp) == -FC_EX_TIMEOUT) {
+ /*
+ * Memory allocation failure, or the exchange timed out,
+ * retry after delay.
+ */
+ if (disc->retry_count < FC_DISC_RETRY_LIMIT) {
+ /* go ahead and retry */
+ if (!fp)
+ delay = msecs_to_jiffies(FC_DISC_RETRY_DELAY);
+ else {
+ delay = msecs_to_jiffies(lport->e_d_tov);
+
+ /* timeout faster first time */
+ if (!disc->retry_count)
+ delay /= 4;
+ }
+ disc->retry_count++;
+ schedule_delayed_work(&disc->disc_work, delay);
+ } else
+ fc_disc_done(disc, DISC_EV_FAILED);
+ } else if (PTR_ERR(fp) == -FC_EX_CLOSED) {
+ /*
+ * if discovery fails due to lport reset, clear
+ * pending flag so that subsequent discovery can
+ * continue
+ */
+ disc->pending = 0;
+ }
+}
+
+/**
+ * fc_disc_gpn_ft_req() - Send Get Port Names by FC-4 type (GPN_FT) request
+ * @lport: The discovery context
+ *
+ * Locking Note: This function expects that the disc_mutex is locked
+ * before it is called.
+ */
+static void fc_disc_gpn_ft_req(struct fc_disc *disc)
+{
+ struct fc_frame *fp;
+ struct fc_lport *lport = fc_disc_lport(disc);
+
+ WARN_ON(!fc_lport_test_ready(lport));
+
+ disc->pending = 1;
+ disc->requested = 0;
+
+ disc->buf_len = 0;
+ disc->seq_count = 0;
+ fp = fc_frame_alloc(lport,
+ sizeof(struct fc_ct_hdr) +
+ sizeof(struct fc_ns_gid_ft));
+ if (!fp)
+ goto err;
+
+ if (lport->tt.elsct_send(lport, 0, fp,
+ FC_NS_GPN_FT,
+ fc_disc_gpn_ft_resp,
+ disc, 3 * lport->r_a_tov))
+ return;
+err:
+ fc_disc_error(disc, NULL);
+}
+
+/**
+ * fc_disc_gpn_ft_parse() - Parse the body of the dNS GPN_FT response.
+ * @lport: The local port the GPN_FT was received on
+ * @buf: The GPN_FT response buffer
+ * @len: The size of response buffer
+ *
+ * Goes through the list of IDs and names resulting from a request.
+ */
+static int fc_disc_gpn_ft_parse(struct fc_disc *disc, void *buf, size_t len)
+{
+ struct fc_lport *lport;
+ struct fc_gpn_ft_resp *np;
+ char *bp;
+ size_t plen;
+ size_t tlen;
+ int error = 0;
+ struct fc_rport_identifiers ids;
+ struct fc_rport_priv *rdata;
+
+ lport = fc_disc_lport(disc);
+ disc->seq_count++;
+
+ /*
+ * Handle partial name record left over from previous call.
+ */
+ bp = buf;
+ plen = len;
+ np = (struct fc_gpn_ft_resp *)bp;
+ tlen = disc->buf_len;
+ disc->buf_len = 0;
+ if (tlen) {
+ WARN_ON(tlen >= sizeof(*np));
+ plen = sizeof(*np) - tlen;
+ WARN_ON(plen <= 0);
+ WARN_ON(plen >= sizeof(*np));
+ if (plen > len)
+ plen = len;
+ np = &disc->partial_buf;
+ memcpy((char *)np + tlen, bp, plen);
+
+ /*
+ * Set bp so that the loop below will advance it to the
+ * first valid full name element.
+ */
+ bp -= tlen;
+ len += tlen;
+ plen += tlen;
+ disc->buf_len = (unsigned char) plen;
+ if (plen == sizeof(*np))
+ disc->buf_len = 0;
+ }
+
+ /*
+ * Handle full name records, including the one filled from above.
+ * Normally, np == bp and plen == len, but from the partial case above,
+ * bp, len describe the overall buffer, and np, plen describe the
+ * partial buffer, which if would usually be full now.
+ * After the first time through the loop, things return to "normal".
+ */
+ while (plen >= sizeof(*np)) {
+ ids.port_id = ntoh24(np->fp_fid);
+ ids.port_name = ntohll(np->fp_wwpn);
+
+ if (ids.port_id != lport->port_id &&
+ ids.port_name != lport->wwpn) {
+ rdata = lport->tt.rport_create(lport, ids.port_id);
+ if (rdata) {
+ rdata->ids.port_name = ids.port_name;
+ rdata->disc_id = disc->disc_id;
+ } else {
+ printk(KERN_WARNING "libfc: Failed to allocate "
+ "memory for the newly discovered port "
+ "(%6.6x)\n", ids.port_id);
+ error = -ENOMEM;
+ }
+ }
+
+ if (np->fp_flags & FC_NS_FID_LAST) {
+ fc_disc_done(disc, DISC_EV_SUCCESS);
+ len = 0;
+ break;
+ }
+ len -= sizeof(*np);
+ bp += sizeof(*np);
+ np = (struct fc_gpn_ft_resp *)bp;
+ plen = len;
+ }
+
+ /*
+ * Save any partial record at the end of the buffer for next time.
+ */
+ if (error == 0 && len > 0 && len < sizeof(*np)) {
+ if (np != &disc->partial_buf) {
+ FC_DISC_DBG(disc, "Partial buffer remains "
+ "for discovery\n");
+ memcpy(&disc->partial_buf, np, len);
+ }
+ disc->buf_len = (unsigned char) len;
+ }
+ return error;
+}
+
+/**
+ * fc_disc_timeout() - Handler for discovery timeouts
+ * @work: Structure holding discovery context that needs to retry discovery
+ */
+static void fc_disc_timeout(struct work_struct *work)
+{
+ struct fc_disc *disc = container_of(work,
+ struct fc_disc,
+ disc_work.work);
+ mutex_lock(&disc->disc_mutex);
+ fc_disc_gpn_ft_req(disc);
+ mutex_unlock(&disc->disc_mutex);
+}
+
+/**
+ * fc_disc_gpn_ft_resp() - Handle a response frame from Get Port Names (GPN_FT)
+ * @sp: The sequence that the GPN_FT response was received on
+ * @fp: The GPN_FT response frame
+ * @lp_arg: The discovery context
+ *
+ * Locking Note: This function is called without disc mutex held, and
+ * should do all its processing with the mutex held
+ */
+static void fc_disc_gpn_ft_resp(struct fc_seq *sp, struct fc_frame *fp,
+ void *disc_arg)
+{
+ struct fc_disc *disc = disc_arg;
+ struct fc_ct_hdr *cp;
+ struct fc_frame_header *fh;
+ enum fc_disc_event event = DISC_EV_NONE;
+ unsigned int seq_cnt;
+ unsigned int len;
+ int error = 0;
+
+ mutex_lock(&disc->disc_mutex);
+ FC_DISC_DBG(disc, "Received a GPN_FT response\n");
+
+ if (IS_ERR(fp)) {
+ fc_disc_error(disc, fp);
+ mutex_unlock(&disc->disc_mutex);
+ return;
+ }
+
+ WARN_ON(!fc_frame_is_linear(fp)); /* buffer must be contiguous */
+ fh = fc_frame_header_get(fp);
+ len = fr_len(fp) - sizeof(*fh);
+ seq_cnt = ntohs(fh->fh_seq_cnt);
+ if (fr_sof(fp) == FC_SOF_I3 && seq_cnt == 0 && disc->seq_count == 0) {
+ cp = fc_frame_payload_get(fp, sizeof(*cp));
+ if (!cp) {
+ FC_DISC_DBG(disc, "GPN_FT response too short, len %d\n",
+ fr_len(fp));
+ event = DISC_EV_FAILED;
+ } else if (ntohs(cp->ct_cmd) == FC_FS_ACC) {
+
+ /* Accepted, parse the response. */
+ len -= sizeof(*cp);
+ error = fc_disc_gpn_ft_parse(disc, cp + 1, len);
+ } else if (ntohs(cp->ct_cmd) == FC_FS_RJT) {
+ FC_DISC_DBG(disc, "GPN_FT rejected reason %x exp %x "
+ "(check zoning)\n", cp->ct_reason,
+ cp->ct_explan);
+ event = DISC_EV_FAILED;
+ if (cp->ct_reason == FC_FS_RJT_UNABL &&
+ cp->ct_explan == FC_FS_EXP_FTNR)
+ event = DISC_EV_SUCCESS;
+ } else {
+ FC_DISC_DBG(disc, "GPN_FT unexpected response code "
+ "%x\n", ntohs(cp->ct_cmd));
+ event = DISC_EV_FAILED;
+ }
+ } else if (fr_sof(fp) == FC_SOF_N3 && seq_cnt == disc->seq_count) {
+ error = fc_disc_gpn_ft_parse(disc, fh + 1, len);
+ } else {
+ FC_DISC_DBG(disc, "GPN_FT unexpected frame - out of sequence? "
+ "seq_cnt %x expected %x sof %x eof %x\n",
+ seq_cnt, disc->seq_count, fr_sof(fp), fr_eof(fp));
+ event = DISC_EV_FAILED;
+ }
+ if (error)
+ fc_disc_error(disc, fp);
+ else if (event != DISC_EV_NONE)
+ fc_disc_done(disc, event);
+ fc_frame_free(fp);
+ mutex_unlock(&disc->disc_mutex);
+}
+
+/**
+ * fc_disc_gpn_id_resp() - Handle a response frame from Get Port Names (GPN_ID)
+ * @sp: The sequence the GPN_ID is on
+ * @fp: The response frame
+ * @rdata_arg: The remote port that sent the GPN_ID response
+ *
+ * Locking Note: This function is called without disc mutex held.
+ */
+static void fc_disc_gpn_id_resp(struct fc_seq *sp, struct fc_frame *fp,
+ void *rdata_arg)
+{
+ struct fc_rport_priv *rdata = rdata_arg;
+ struct fc_rport_priv *new_rdata;
+ struct fc_lport *lport;
+ struct fc_disc *disc;
+ struct fc_ct_hdr *cp;
+ struct fc_ns_gid_pn *pn;
+ u64 port_name;
+
+ lport = rdata->local_port;
+ disc = &lport->disc;
+
+ mutex_lock(&disc->disc_mutex);
+ if (PTR_ERR(fp) == -FC_EX_CLOSED)
+ goto out;
+ if (IS_ERR(fp))
+ goto redisc;
+
+ cp = fc_frame_payload_get(fp, sizeof(*cp));
+ if (!cp)
+ goto redisc;
+ if (ntohs(cp->ct_cmd) == FC_FS_ACC) {
+ if (fr_len(fp) < sizeof(struct fc_frame_header) +
+ sizeof(*cp) + sizeof(*pn))
+ goto redisc;
+ pn = (struct fc_ns_gid_pn *)(cp + 1);
+ port_name = get_unaligned_be64(&pn->fn_wwpn);
+ if (rdata->ids.port_name == -1)
+ rdata->ids.port_name = port_name;
+ else if (rdata->ids.port_name != port_name) {
+ FC_DISC_DBG(disc, "GPN_ID accepted. WWPN changed. "
+ "Port-id %6.6x wwpn %16.16llx\n",
+ rdata->ids.port_id, port_name);
+ lport->tt.rport_logoff(rdata);
+
+ new_rdata = lport->tt.rport_create(lport,
+ rdata->ids.port_id);
+ if (new_rdata) {
+ new_rdata->disc_id = disc->disc_id;
+ lport->tt.rport_login(new_rdata);
+ }
+ goto out;
+ }
+ rdata->disc_id = disc->disc_id;
+ lport->tt.rport_login(rdata);
+ } else if (ntohs(cp->ct_cmd) == FC_FS_RJT) {
+ FC_DISC_DBG(disc, "GPN_ID rejected reason %x exp %x\n",
+ cp->ct_reason, cp->ct_explan);
+ lport->tt.rport_logoff(rdata);
+ } else {
+ FC_DISC_DBG(disc, "GPN_ID unexpected response code %x\n",
+ ntohs(cp->ct_cmd));
+redisc:
+ fc_disc_restart(disc);
+ }
+out:
+ mutex_unlock(&disc->disc_mutex);
+ kref_put(&rdata->kref, lport->tt.rport_destroy);
+}
+
+/**
+ * fc_disc_gpn_id_req() - Send Get Port Names by ID (GPN_ID) request
+ * @lport: The local port to initiate discovery on
+ * @rdata: remote port private data
+ *
+ * Locking Note: This function expects that the disc_mutex is locked
+ * before it is called.
+ * On failure, an error code is returned.
+ */
+static int fc_disc_gpn_id_req(struct fc_lport *lport,
+ struct fc_rport_priv *rdata)
+{
+ struct fc_frame *fp;
+
+ fp = fc_frame_alloc(lport, sizeof(struct fc_ct_hdr) +
+ sizeof(struct fc_ns_fid));
+ if (!fp)
+ return -ENOMEM;
+ if (!lport->tt.elsct_send(lport, rdata->ids.port_id, fp, FC_NS_GPN_ID,
+ fc_disc_gpn_id_resp, rdata,
+ 3 * lport->r_a_tov))
+ return -ENOMEM;
+ kref_get(&rdata->kref);
+ return 0;
+}
+
+/**
+ * fc_disc_single() - Discover the directory information for a single target
+ * @lport: The local port the remote port is associated with
+ * @dp: The port to rediscover
+ *
+ * Locking Note: This function expects that the disc_mutex is locked
+ * before it is called.
+ */
+static int fc_disc_single(struct fc_lport *lport, struct fc_disc_port *dp)
+{
+ struct fc_rport_priv *rdata;
+
+ rdata = lport->tt.rport_create(lport, dp->port_id);
+ if (!rdata)
+ return -ENOMEM;
+ rdata->disc_id = 0;
+ return fc_disc_gpn_id_req(lport, rdata);
+}
+
+/**
+ * fc_disc_stop() - Stop discovery for a given lport
+ * @lport: The local port that discovery should stop on
+ */
+static void fc_disc_stop(struct fc_lport *lport)
+{
+ struct fc_disc *disc = &lport->disc;
+
+ if (disc->pending)
+ cancel_delayed_work_sync(&disc->disc_work);
+ fc_disc_stop_rports(disc);
+}
+
+/**
+ * fc_disc_stop_final() - Stop discovery for a given lport
+ * @lport: The lport that discovery should stop on
+ *
+ * This function will block until discovery has been
+ * completely stopped and all rports have been deleted.
+ */
+static void fc_disc_stop_final(struct fc_lport *lport)
+{
+ fc_disc_stop(lport);
+ lport->tt.rport_flush_queue();
+}
+
+/**
+ * fc_disc_config() - Configure the discovery layer for a local port
+ * @lport: The local port that needs the discovery layer to be configured
+ * @priv: Private data structre for users of the discovery layer
+ */
+void fc_disc_config(struct fc_lport *lport, void *priv)
+{
+ struct fc_disc *disc = &lport->disc;
+
+ if (!lport->tt.disc_start)
+ lport->tt.disc_start = fc_disc_start;
+
+ if (!lport->tt.disc_stop)
+ lport->tt.disc_stop = fc_disc_stop;
+
+ if (!lport->tt.disc_stop_final)
+ lport->tt.disc_stop_final = fc_disc_stop_final;
+
+ if (!lport->tt.disc_recv_req)
+ lport->tt.disc_recv_req = fc_disc_recv_req;
+
+ disc = &lport->disc;
+
+ disc->priv = priv;
+}
+EXPORT_SYMBOL(fc_disc_config);
+
+/**
+ * fc_disc_init() - Initialize the discovery layer for a local port
+ * @lport: The local port that needs the discovery layer to be initialized
+ */
+void fc_disc_init(struct fc_lport *lport)
+{
+ struct fc_disc *disc = &lport->disc;
+
+ INIT_DELAYED_WORK(&disc->disc_work, fc_disc_timeout);
+ mutex_init(&disc->disc_mutex);
+ INIT_LIST_HEAD(&disc->rports);
+}
+EXPORT_SYMBOL(fc_disc_init);
diff --git a/drivers/scsi/libfc/fc_elsct.c b/drivers/scsi/libfc/fc_elsct.c
new file mode 100644
index 000000000..c2384d501
--- /dev/null
+++ b/drivers/scsi/libfc/fc_elsct.c
@@ -0,0 +1,152 @@
+/*
+ * Copyright(c) 2008 Intel Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Maintained at www.Open-FCoE.org
+ */
+
+/*
+ * Provide interface to send ELS/CT FC frames
+ */
+
+#include <linux/export.h>
+#include <asm/unaligned.h>
+#include <scsi/fc/fc_gs.h>
+#include <scsi/fc/fc_ns.h>
+#include <scsi/fc/fc_els.h>
+#include <scsi/libfc.h>
+#include <scsi/fc_encode.h>
+#include "fc_libfc.h"
+
+/**
+ * fc_elsct_send() - Send an ELS or CT frame
+ * @lport: The local port to send the frame on
+ * @did: The destination ID for the frame
+ * @fp: The frame to be sent
+ * @op: The operational code
+ * @resp: The callback routine when the response is received
+ * @arg: The argument to pass to the response callback routine
+ * @timer_msec: The timeout period for the frame (in msecs)
+ */
+struct fc_seq *fc_elsct_send(struct fc_lport *lport, u32 did,
+ struct fc_frame *fp, unsigned int op,
+ void (*resp)(struct fc_seq *,
+ struct fc_frame *,
+ void *),
+ void *arg, u32 timer_msec)
+{
+ enum fc_rctl r_ctl;
+ enum fc_fh_type fh_type;
+ int rc;
+
+ /* ELS requests */
+ if ((op >= ELS_LS_RJT) && (op <= ELS_AUTH_ELS))
+ rc = fc_els_fill(lport, did, fp, op, &r_ctl, &fh_type);
+ else {
+ /* CT requests */
+ rc = fc_ct_fill(lport, did, fp, op, &r_ctl, &fh_type, &did);
+ }
+
+ if (rc) {
+ fc_frame_free(fp);
+ return NULL;
+ }
+
+ fc_fill_fc_hdr(fp, r_ctl, did, lport->port_id, fh_type,
+ FC_FCTL_REQ, 0);
+
+ return lport->tt.exch_seq_send(lport, fp, resp, NULL, arg, timer_msec);
+}
+EXPORT_SYMBOL(fc_elsct_send);
+
+/**
+ * fc_elsct_init() - Initialize the ELS/CT layer
+ * @lport: The local port to initialize the ELS/CT layer for
+ */
+int fc_elsct_init(struct fc_lport *lport)
+{
+ if (!lport->tt.elsct_send)
+ lport->tt.elsct_send = fc_elsct_send;
+
+ return 0;
+}
+EXPORT_SYMBOL(fc_elsct_init);
+
+/**
+ * fc_els_resp_type() - Return a string describing the ELS response
+ * @fp: The frame pointer or possible error code
+ */
+const char *fc_els_resp_type(struct fc_frame *fp)
+{
+ const char *msg;
+ struct fc_frame_header *fh;
+ struct fc_ct_hdr *ct;
+
+ if (IS_ERR(fp)) {
+ switch (-PTR_ERR(fp)) {
+ case FC_NO_ERR:
+ msg = "response no error";
+ break;
+ case FC_EX_TIMEOUT:
+ msg = "response timeout";
+ break;
+ case FC_EX_CLOSED:
+ msg = "response closed";
+ break;
+ default:
+ msg = "response unknown error";
+ break;
+ }
+ } else {
+ fh = fc_frame_header_get(fp);
+ switch (fh->fh_type) {
+ case FC_TYPE_ELS:
+ switch (fc_frame_payload_op(fp)) {
+ case ELS_LS_ACC:
+ msg = "accept";
+ break;
+ case ELS_LS_RJT:
+ msg = "reject";
+ break;
+ default:
+ msg = "response unknown ELS";
+ break;
+ }
+ break;
+ case FC_TYPE_CT:
+ ct = fc_frame_payload_get(fp, sizeof(*ct));
+ if (ct) {
+ switch (ntohs(ct->ct_cmd)) {
+ case FC_FS_ACC:
+ msg = "CT accept";
+ break;
+ case FC_FS_RJT:
+ msg = "CT reject";
+ break;
+ default:
+ msg = "response unknown CT";
+ break;
+ }
+ } else {
+ msg = "short CT response";
+ }
+ break;
+ default:
+ msg = "response not ELS or CT";
+ break;
+ }
+ }
+ return msg;
+}
diff --git a/drivers/scsi/libfc/fc_exch.c b/drivers/scsi/libfc/fc_exch.c
new file mode 100644
index 000000000..1b3a09473
--- /dev/null
+++ b/drivers/scsi/libfc/fc_exch.c
@@ -0,0 +1,2631 @@
+/*
+ * Copyright(c) 2007 Intel Corporation. All rights reserved.
+ * Copyright(c) 2008 Red Hat, Inc. All rights reserved.
+ * Copyright(c) 2008 Mike Christie
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Maintained at www.Open-FCoE.org
+ */
+
+/*
+ * Fibre Channel exchange and sequence handling.
+ */
+
+#include <linux/timer.h>
+#include <linux/slab.h>
+#include <linux/err.h>
+#include <linux/export.h>
+#include <linux/log2.h>
+
+#include <scsi/fc/fc_fc2.h>
+
+#include <scsi/libfc.h>
+#include <scsi/fc_encode.h>
+
+#include "fc_libfc.h"
+
+u16 fc_cpu_mask; /* cpu mask for possible cpus */
+EXPORT_SYMBOL(fc_cpu_mask);
+static u16 fc_cpu_order; /* 2's power to represent total possible cpus */
+static struct kmem_cache *fc_em_cachep; /* cache for exchanges */
+static struct workqueue_struct *fc_exch_workqueue;
+
+/*
+ * Structure and function definitions for managing Fibre Channel Exchanges
+ * and Sequences.
+ *
+ * The three primary structures used here are fc_exch_mgr, fc_exch, and fc_seq.
+ *
+ * fc_exch_mgr holds the exchange state for an N port
+ *
+ * fc_exch holds state for one exchange and links to its active sequence.
+ *
+ * fc_seq holds the state for an individual sequence.
+ */
+
+/**
+ * struct fc_exch_pool - Per cpu exchange pool
+ * @next_index: Next possible free exchange index
+ * @total_exches: Total allocated exchanges
+ * @lock: Exch pool lock
+ * @ex_list: List of exchanges
+ *
+ * This structure manages per cpu exchanges in array of exchange pointers.
+ * This array is allocated followed by struct fc_exch_pool memory for
+ * assigned range of exchanges to per cpu pool.
+ */
+struct fc_exch_pool {
+ spinlock_t lock;
+ struct list_head ex_list;
+ u16 next_index;
+ u16 total_exches;
+
+ /* two cache of free slot in exch array */
+ u16 left;
+ u16 right;
+} ____cacheline_aligned_in_smp;
+
+/**
+ * struct fc_exch_mgr - The Exchange Manager (EM).
+ * @class: Default class for new sequences
+ * @kref: Reference counter
+ * @min_xid: Minimum exchange ID
+ * @max_xid: Maximum exchange ID
+ * @ep_pool: Reserved exchange pointers
+ * @pool_max_index: Max exch array index in exch pool
+ * @pool: Per cpu exch pool
+ * @stats: Statistics structure
+ *
+ * This structure is the center for creating exchanges and sequences.
+ * It manages the allocation of exchange IDs.
+ */
+struct fc_exch_mgr {
+ struct fc_exch_pool __percpu *pool;
+ mempool_t *ep_pool;
+ enum fc_class class;
+ struct kref kref;
+ u16 min_xid;
+ u16 max_xid;
+ u16 pool_max_index;
+
+ struct {
+ atomic_t no_free_exch;
+ atomic_t no_free_exch_xid;
+ atomic_t xid_not_found;
+ atomic_t xid_busy;
+ atomic_t seq_not_found;
+ atomic_t non_bls_resp;
+ } stats;
+};
+
+/**
+ * struct fc_exch_mgr_anchor - primary structure for list of EMs
+ * @ema_list: Exchange Manager Anchor list
+ * @mp: Exchange Manager associated with this anchor
+ * @match: Routine to determine if this anchor's EM should be used
+ *
+ * When walking the list of anchors the match routine will be called
+ * for each anchor to determine if that EM should be used. The last
+ * anchor in the list will always match to handle any exchanges not
+ * handled by other EMs. The non-default EMs would be added to the
+ * anchor list by HW that provides offloads.
+ */
+struct fc_exch_mgr_anchor {
+ struct list_head ema_list;
+ struct fc_exch_mgr *mp;
+ bool (*match)(struct fc_frame *);
+};
+
+static void fc_exch_rrq(struct fc_exch *);
+static void fc_seq_ls_acc(struct fc_frame *);
+static void fc_seq_ls_rjt(struct fc_frame *, enum fc_els_rjt_reason,
+ enum fc_els_rjt_explan);
+static void fc_exch_els_rec(struct fc_frame *);
+static void fc_exch_els_rrq(struct fc_frame *);
+
+/*
+ * Internal implementation notes.
+ *
+ * The exchange manager is one by default in libfc but LLD may choose
+ * to have one per CPU. The sequence manager is one per exchange manager
+ * and currently never separated.
+ *
+ * Section 9.8 in FC-FS-2 specifies: "The SEQ_ID is a one-byte field
+ * assigned by the Sequence Initiator that shall be unique for a specific
+ * D_ID and S_ID pair while the Sequence is open." Note that it isn't
+ * qualified by exchange ID, which one might think it would be.
+ * In practice this limits the number of open sequences and exchanges to 256
+ * per session. For most targets we could treat this limit as per exchange.
+ *
+ * The exchange and its sequence are freed when the last sequence is received.
+ * It's possible for the remote port to leave an exchange open without
+ * sending any sequences.
+ *
+ * Notes on reference counts:
+ *
+ * Exchanges are reference counted and exchange gets freed when the reference
+ * count becomes zero.
+ *
+ * Timeouts:
+ * Sequences are timed out for E_D_TOV and R_A_TOV.
+ *
+ * Sequence event handling:
+ *
+ * The following events may occur on initiator sequences:
+ *
+ * Send.
+ * For now, the whole thing is sent.
+ * Receive ACK
+ * This applies only to class F.
+ * The sequence is marked complete.
+ * ULP completion.
+ * The upper layer calls fc_exch_done() when done
+ * with exchange and sequence tuple.
+ * RX-inferred completion.
+ * When we receive the next sequence on the same exchange, we can
+ * retire the previous sequence ID. (XXX not implemented).
+ * Timeout.
+ * R_A_TOV frees the sequence ID. If we're waiting for ACK,
+ * E_D_TOV causes abort and calls upper layer response handler
+ * with FC_EX_TIMEOUT error.
+ * Receive RJT
+ * XXX defer.
+ * Send ABTS
+ * On timeout.
+ *
+ * The following events may occur on recipient sequences:
+ *
+ * Receive
+ * Allocate sequence for first frame received.
+ * Hold during receive handler.
+ * Release when final frame received.
+ * Keep status of last N of these for the ELS RES command. XXX TBD.
+ * Receive ABTS
+ * Deallocate sequence
+ * Send RJT
+ * Deallocate
+ *
+ * For now, we neglect conditions where only part of a sequence was
+ * received or transmitted, or where out-of-order receipt is detected.
+ */
+
+/*
+ * Locking notes:
+ *
+ * The EM code run in a per-CPU worker thread.
+ *
+ * To protect against concurrency between a worker thread code and timers,
+ * sequence allocation and deallocation must be locked.
+ * - exchange refcnt can be done atomicly without locks.
+ * - sequence allocation must be locked by exch lock.
+ * - If the EM pool lock and ex_lock must be taken at the same time, then the
+ * EM pool lock must be taken before the ex_lock.
+ */
+
+/*
+ * opcode names for debugging.
+ */
+static char *fc_exch_rctl_names[] = FC_RCTL_NAMES_INIT;
+
+/**
+ * fc_exch_name_lookup() - Lookup name by opcode
+ * @op: Opcode to be looked up
+ * @table: Opcode/name table
+ * @max_index: Index not to be exceeded
+ *
+ * This routine is used to determine a human-readable string identifying
+ * a R_CTL opcode.
+ */
+static inline const char *fc_exch_name_lookup(unsigned int op, char **table,
+ unsigned int max_index)
+{
+ const char *name = NULL;
+
+ if (op < max_index)
+ name = table[op];
+ if (!name)
+ name = "unknown";
+ return name;
+}
+
+/**
+ * fc_exch_rctl_name() - Wrapper routine for fc_exch_name_lookup()
+ * @op: The opcode to be looked up
+ */
+static const char *fc_exch_rctl_name(unsigned int op)
+{
+ return fc_exch_name_lookup(op, fc_exch_rctl_names,
+ ARRAY_SIZE(fc_exch_rctl_names));
+}
+
+/**
+ * fc_exch_hold() - Increment an exchange's reference count
+ * @ep: Echange to be held
+ */
+static inline void fc_exch_hold(struct fc_exch *ep)
+{
+ atomic_inc(&ep->ex_refcnt);
+}
+
+/**
+ * fc_exch_setup_hdr() - Initialize a FC header by initializing some fields
+ * and determine SOF and EOF.
+ * @ep: The exchange to that will use the header
+ * @fp: The frame whose header is to be modified
+ * @f_ctl: F_CTL bits that will be used for the frame header
+ *
+ * The fields initialized by this routine are: fh_ox_id, fh_rx_id,
+ * fh_seq_id, fh_seq_cnt and the SOF and EOF.
+ */
+static void fc_exch_setup_hdr(struct fc_exch *ep, struct fc_frame *fp,
+ u32 f_ctl)
+{
+ struct fc_frame_header *fh = fc_frame_header_get(fp);
+ u16 fill;
+
+ fr_sof(fp) = ep->class;
+ if (ep->seq.cnt)
+ fr_sof(fp) = fc_sof_normal(ep->class);
+
+ if (f_ctl & FC_FC_END_SEQ) {
+ fr_eof(fp) = FC_EOF_T;
+ if (fc_sof_needs_ack(ep->class))
+ fr_eof(fp) = FC_EOF_N;
+ /*
+ * From F_CTL.
+ * The number of fill bytes to make the length a 4-byte
+ * multiple is the low order 2-bits of the f_ctl.
+ * The fill itself will have been cleared by the frame
+ * allocation.
+ * After this, the length will be even, as expected by
+ * the transport.
+ */
+ fill = fr_len(fp) & 3;
+ if (fill) {
+ fill = 4 - fill;
+ /* TODO, this may be a problem with fragmented skb */
+ skb_put(fp_skb(fp), fill);
+ hton24(fh->fh_f_ctl, f_ctl | fill);
+ }
+ } else {
+ WARN_ON(fr_len(fp) % 4 != 0); /* no pad to non last frame */
+ fr_eof(fp) = FC_EOF_N;
+ }
+
+ /* Initialize remaining fh fields from fc_fill_fc_hdr */
+ fh->fh_ox_id = htons(ep->oxid);
+ fh->fh_rx_id = htons(ep->rxid);
+ fh->fh_seq_id = ep->seq.id;
+ fh->fh_seq_cnt = htons(ep->seq.cnt);
+}
+
+/**
+ * fc_exch_release() - Decrement an exchange's reference count
+ * @ep: Exchange to be released
+ *
+ * If the reference count reaches zero and the exchange is complete,
+ * it is freed.
+ */
+static void fc_exch_release(struct fc_exch *ep)
+{
+ struct fc_exch_mgr *mp;
+
+ if (atomic_dec_and_test(&ep->ex_refcnt)) {
+ mp = ep->em;
+ if (ep->destructor)
+ ep->destructor(&ep->seq, ep->arg);
+ WARN_ON(!(ep->esb_stat & ESB_ST_COMPLETE));
+ mempool_free(ep, mp->ep_pool);
+ }
+}
+
+/**
+ * fc_exch_timer_cancel() - cancel exch timer
+ * @ep: The exchange whose timer to be canceled
+ */
+static inline void fc_exch_timer_cancel(struct fc_exch *ep)
+{
+ if (cancel_delayed_work(&ep->timeout_work)) {
+ FC_EXCH_DBG(ep, "Exchange timer canceled\n");
+ atomic_dec(&ep->ex_refcnt); /* drop hold for timer */
+ }
+}
+
+/**
+ * fc_exch_timer_set_locked() - Start a timer for an exchange w/ the
+ * the exchange lock held
+ * @ep: The exchange whose timer will start
+ * @timer_msec: The timeout period
+ *
+ * Used for upper level protocols to time out the exchange.
+ * The timer is cancelled when it fires or when the exchange completes.
+ */
+static inline void fc_exch_timer_set_locked(struct fc_exch *ep,
+ unsigned int timer_msec)
+{
+ if (ep->state & (FC_EX_RST_CLEANUP | FC_EX_DONE))
+ return;
+
+ FC_EXCH_DBG(ep, "Exchange timer armed : %d msecs\n", timer_msec);
+
+ fc_exch_hold(ep); /* hold for timer */
+ if (!queue_delayed_work(fc_exch_workqueue, &ep->timeout_work,
+ msecs_to_jiffies(timer_msec)))
+ fc_exch_release(ep);
+}
+
+/**
+ * fc_exch_timer_set() - Lock the exchange and set the timer
+ * @ep: The exchange whose timer will start
+ * @timer_msec: The timeout period
+ */
+static void fc_exch_timer_set(struct fc_exch *ep, unsigned int timer_msec)
+{
+ spin_lock_bh(&ep->ex_lock);
+ fc_exch_timer_set_locked(ep, timer_msec);
+ spin_unlock_bh(&ep->ex_lock);
+}
+
+/**
+ * fc_exch_done_locked() - Complete an exchange with the exchange lock held
+ * @ep: The exchange that is complete
+ *
+ * Note: May sleep if invoked from outside a response handler.
+ */
+static int fc_exch_done_locked(struct fc_exch *ep)
+{
+ int rc = 1;
+
+ /*
+ * We must check for completion in case there are two threads
+ * tyring to complete this. But the rrq code will reuse the
+ * ep, and in that case we only clear the resp and set it as
+ * complete, so it can be reused by the timer to send the rrq.
+ */
+ if (ep->state & FC_EX_DONE)
+ return rc;
+ ep->esb_stat |= ESB_ST_COMPLETE;
+
+ if (!(ep->esb_stat & ESB_ST_REC_QUAL)) {
+ ep->state |= FC_EX_DONE;
+ fc_exch_timer_cancel(ep);
+ rc = 0;
+ }
+ return rc;
+}
+
+/**
+ * fc_exch_ptr_get() - Return an exchange from an exchange pool
+ * @pool: Exchange Pool to get an exchange from
+ * @index: Index of the exchange within the pool
+ *
+ * Use the index to get an exchange from within an exchange pool. exches
+ * will point to an array of exchange pointers. The index will select
+ * the exchange within the array.
+ */
+static inline struct fc_exch *fc_exch_ptr_get(struct fc_exch_pool *pool,
+ u16 index)
+{
+ struct fc_exch **exches = (struct fc_exch **)(pool + 1);
+ return exches[index];
+}
+
+/**
+ * fc_exch_ptr_set() - Assign an exchange to a slot in an exchange pool
+ * @pool: The pool to assign the exchange to
+ * @index: The index in the pool where the exchange will be assigned
+ * @ep: The exchange to assign to the pool
+ */
+static inline void fc_exch_ptr_set(struct fc_exch_pool *pool, u16 index,
+ struct fc_exch *ep)
+{
+ ((struct fc_exch **)(pool + 1))[index] = ep;
+}
+
+/**
+ * fc_exch_delete() - Delete an exchange
+ * @ep: The exchange to be deleted
+ */
+static void fc_exch_delete(struct fc_exch *ep)
+{
+ struct fc_exch_pool *pool;
+ u16 index;
+
+ pool = ep->pool;
+ spin_lock_bh(&pool->lock);
+ WARN_ON(pool->total_exches <= 0);
+ pool->total_exches--;
+
+ /* update cache of free slot */
+ index = (ep->xid - ep->em->min_xid) >> fc_cpu_order;
+ if (pool->left == FC_XID_UNKNOWN)
+ pool->left = index;
+ else if (pool->right == FC_XID_UNKNOWN)
+ pool->right = index;
+ else
+ pool->next_index = index;
+
+ fc_exch_ptr_set(pool, index, NULL);
+ list_del(&ep->ex_list);
+ spin_unlock_bh(&pool->lock);
+ fc_exch_release(ep); /* drop hold for exch in mp */
+}
+
+static int fc_seq_send_locked(struct fc_lport *lport, struct fc_seq *sp,
+ struct fc_frame *fp)
+{
+ struct fc_exch *ep;
+ struct fc_frame_header *fh = fc_frame_header_get(fp);
+ int error = -ENXIO;
+ u32 f_ctl;
+ u8 fh_type = fh->fh_type;
+
+ ep = fc_seq_exch(sp);
+
+ if (ep->esb_stat & (ESB_ST_COMPLETE | ESB_ST_ABNORMAL)) {
+ fc_frame_free(fp);
+ goto out;
+ }
+
+ WARN_ON(!(ep->esb_stat & ESB_ST_SEQ_INIT));
+
+ f_ctl = ntoh24(fh->fh_f_ctl);
+ fc_exch_setup_hdr(ep, fp, f_ctl);
+ fr_encaps(fp) = ep->encaps;
+
+ /*
+ * update sequence count if this frame is carrying
+ * multiple FC frames when sequence offload is enabled
+ * by LLD.
+ */
+ if (fr_max_payload(fp))
+ sp->cnt += DIV_ROUND_UP((fr_len(fp) - sizeof(*fh)),
+ fr_max_payload(fp));
+ else
+ sp->cnt++;
+
+ /*
+ * Send the frame.
+ */
+ error = lport->tt.frame_send(lport, fp);
+
+ if (fh_type == FC_TYPE_BLS)
+ goto out;
+
+ /*
+ * Update the exchange and sequence flags,
+ * assuming all frames for the sequence have been sent.
+ * We can only be called to send once for each sequence.
+ */
+ ep->f_ctl = f_ctl & ~FC_FC_FIRST_SEQ; /* not first seq */
+ if (f_ctl & FC_FC_SEQ_INIT)
+ ep->esb_stat &= ~ESB_ST_SEQ_INIT;
+out:
+ return error;
+}
+
+/**
+ * fc_seq_send() - Send a frame using existing sequence/exchange pair
+ * @lport: The local port that the exchange will be sent on
+ * @sp: The sequence to be sent
+ * @fp: The frame to be sent on the exchange
+ *
+ * Note: The frame will be freed either by a direct call to fc_frame_free(fp)
+ * or indirectly by calling libfc_function_template.frame_send().
+ */
+static int fc_seq_send(struct fc_lport *lport, struct fc_seq *sp,
+ struct fc_frame *fp)
+{
+ struct fc_exch *ep;
+ int error;
+ ep = fc_seq_exch(sp);
+ spin_lock_bh(&ep->ex_lock);
+ error = fc_seq_send_locked(lport, sp, fp);
+ spin_unlock_bh(&ep->ex_lock);
+ return error;
+}
+
+/**
+ * fc_seq_alloc() - Allocate a sequence for a given exchange
+ * @ep: The exchange to allocate a new sequence for
+ * @seq_id: The sequence ID to be used
+ *
+ * We don't support multiple originated sequences on the same exchange.
+ * By implication, any previously originated sequence on this exchange
+ * is complete, and we reallocate the same sequence.
+ */
+static struct fc_seq *fc_seq_alloc(struct fc_exch *ep, u8 seq_id)
+{
+ struct fc_seq *sp;
+
+ sp = &ep->seq;
+ sp->ssb_stat = 0;
+ sp->cnt = 0;
+ sp->id = seq_id;
+ return sp;
+}
+
+/**
+ * fc_seq_start_next_locked() - Allocate a new sequence on the same
+ * exchange as the supplied sequence
+ * @sp: The sequence/exchange to get a new sequence for
+ */
+static struct fc_seq *fc_seq_start_next_locked(struct fc_seq *sp)
+{
+ struct fc_exch *ep = fc_seq_exch(sp);
+
+ sp = fc_seq_alloc(ep, ep->seq_id++);
+ FC_EXCH_DBG(ep, "f_ctl %6x seq %2x\n",
+ ep->f_ctl, sp->id);
+ return sp;
+}
+
+/**
+ * fc_seq_start_next() - Lock the exchange and get a new sequence
+ * for a given sequence/exchange pair
+ * @sp: The sequence/exchange to get a new exchange for
+ */
+static struct fc_seq *fc_seq_start_next(struct fc_seq *sp)
+{
+ struct fc_exch *ep = fc_seq_exch(sp);
+
+ spin_lock_bh(&ep->ex_lock);
+ sp = fc_seq_start_next_locked(sp);
+ spin_unlock_bh(&ep->ex_lock);
+
+ return sp;
+}
+
+/*
+ * Set the response handler for the exchange associated with a sequence.
+ *
+ * Note: May sleep if invoked from outside a response handler.
+ */
+static void fc_seq_set_resp(struct fc_seq *sp,
+ void (*resp)(struct fc_seq *, struct fc_frame *,
+ void *),
+ void *arg)
+{
+ struct fc_exch *ep = fc_seq_exch(sp);
+ DEFINE_WAIT(wait);
+
+ spin_lock_bh(&ep->ex_lock);
+ while (ep->resp_active && ep->resp_task != current) {
+ prepare_to_wait(&ep->resp_wq, &wait, TASK_UNINTERRUPTIBLE);
+ spin_unlock_bh(&ep->ex_lock);
+
+ schedule();
+
+ spin_lock_bh(&ep->ex_lock);
+ }
+ finish_wait(&ep->resp_wq, &wait);
+ ep->resp = resp;
+ ep->arg = arg;
+ spin_unlock_bh(&ep->ex_lock);
+}
+
+/**
+ * fc_exch_abort_locked() - Abort an exchange
+ * @ep: The exchange to be aborted
+ * @timer_msec: The period of time to wait before aborting
+ *
+ * Locking notes: Called with exch lock held
+ *
+ * Return value: 0 on success else error code
+ */
+static int fc_exch_abort_locked(struct fc_exch *ep,
+ unsigned int timer_msec)
+{
+ struct fc_seq *sp;
+ struct fc_frame *fp;
+ int error;
+
+ if (ep->esb_stat & (ESB_ST_COMPLETE | ESB_ST_ABNORMAL) ||
+ ep->state & (FC_EX_DONE | FC_EX_RST_CLEANUP))
+ return -ENXIO;
+
+ /*
+ * Send the abort on a new sequence if possible.
+ */
+ sp = fc_seq_start_next_locked(&ep->seq);
+ if (!sp)
+ return -ENOMEM;
+
+ if (timer_msec)
+ fc_exch_timer_set_locked(ep, timer_msec);
+
+ if (ep->sid) {
+ /*
+ * Send an abort for the sequence that timed out.
+ */
+ fp = fc_frame_alloc(ep->lp, 0);
+ if (fp) {
+ ep->esb_stat |= ESB_ST_SEQ_INIT;
+ fc_fill_fc_hdr(fp, FC_RCTL_BA_ABTS, ep->did, ep->sid,
+ FC_TYPE_BLS, FC_FC_END_SEQ |
+ FC_FC_SEQ_INIT, 0);
+ error = fc_seq_send_locked(ep->lp, sp, fp);
+ } else {
+ error = -ENOBUFS;
+ }
+ } else {
+ /*
+ * If not logged into the fabric, don't send ABTS but leave
+ * sequence active until next timeout.
+ */
+ error = 0;
+ }
+ ep->esb_stat |= ESB_ST_ABNORMAL;
+ return error;
+}
+
+/**
+ * fc_seq_exch_abort() - Abort an exchange and sequence
+ * @req_sp: The sequence to be aborted
+ * @timer_msec: The period of time to wait before aborting
+ *
+ * Generally called because of a timeout or an abort from the upper layer.
+ *
+ * Return value: 0 on success else error code
+ */
+static int fc_seq_exch_abort(const struct fc_seq *req_sp,
+ unsigned int timer_msec)
+{
+ struct fc_exch *ep;
+ int error;
+
+ ep = fc_seq_exch(req_sp);
+ spin_lock_bh(&ep->ex_lock);
+ error = fc_exch_abort_locked(ep, timer_msec);
+ spin_unlock_bh(&ep->ex_lock);
+ return error;
+}
+
+/**
+ * fc_invoke_resp() - invoke ep->resp()
+ *
+ * Notes:
+ * It is assumed that after initialization finished (this means the
+ * first unlock of ex_lock after fc_exch_alloc()) ep->resp and ep->arg are
+ * modified only via fc_seq_set_resp(). This guarantees that none of these
+ * two variables changes if ep->resp_active > 0.
+ *
+ * If an fc_seq_set_resp() call is busy modifying ep->resp and ep->arg when
+ * this function is invoked, the first spin_lock_bh() call in this function
+ * will wait until fc_seq_set_resp() has finished modifying these variables.
+ *
+ * Since fc_exch_done() invokes fc_seq_set_resp() it is guaranteed that that
+ * ep->resp() won't be invoked after fc_exch_done() has returned.
+ *
+ * The response handler itself may invoke fc_exch_done(), which will clear the
+ * ep->resp pointer.
+ *
+ * Return value:
+ * Returns true if and only if ep->resp has been invoked.
+ */
+static bool fc_invoke_resp(struct fc_exch *ep, struct fc_seq *sp,
+ struct fc_frame *fp)
+{
+ void (*resp)(struct fc_seq *, struct fc_frame *fp, void *arg);
+ void *arg;
+ bool res = false;
+
+ spin_lock_bh(&ep->ex_lock);
+ ep->resp_active++;
+ if (ep->resp_task != current)
+ ep->resp_task = !ep->resp_task ? current : NULL;
+ resp = ep->resp;
+ arg = ep->arg;
+ spin_unlock_bh(&ep->ex_lock);
+
+ if (resp) {
+ resp(sp, fp, arg);
+ res = true;
+ } else if (!IS_ERR(fp)) {
+ fc_frame_free(fp);
+ }
+
+ spin_lock_bh(&ep->ex_lock);
+ if (--ep->resp_active == 0)
+ ep->resp_task = NULL;
+ spin_unlock_bh(&ep->ex_lock);
+
+ if (ep->resp_active == 0)
+ wake_up(&ep->resp_wq);
+
+ return res;
+}
+
+/**
+ * fc_exch_timeout() - Handle exchange timer expiration
+ * @work: The work_struct identifying the exchange that timed out
+ */
+static void fc_exch_timeout(struct work_struct *work)
+{
+ struct fc_exch *ep = container_of(work, struct fc_exch,
+ timeout_work.work);
+ struct fc_seq *sp = &ep->seq;
+ u32 e_stat;
+ int rc = 1;
+
+ FC_EXCH_DBG(ep, "Exchange timed out\n");
+
+ spin_lock_bh(&ep->ex_lock);
+ if (ep->state & (FC_EX_RST_CLEANUP | FC_EX_DONE))
+ goto unlock;
+
+ e_stat = ep->esb_stat;
+ if (e_stat & ESB_ST_COMPLETE) {
+ ep->esb_stat = e_stat & ~ESB_ST_REC_QUAL;
+ spin_unlock_bh(&ep->ex_lock);
+ if (e_stat & ESB_ST_REC_QUAL)
+ fc_exch_rrq(ep);
+ goto done;
+ } else {
+ if (e_stat & ESB_ST_ABNORMAL)
+ rc = fc_exch_done_locked(ep);
+ spin_unlock_bh(&ep->ex_lock);
+ if (!rc)
+ fc_exch_delete(ep);
+ fc_invoke_resp(ep, sp, ERR_PTR(-FC_EX_TIMEOUT));
+ fc_seq_set_resp(sp, NULL, ep->arg);
+ fc_seq_exch_abort(sp, 2 * ep->r_a_tov);
+ goto done;
+ }
+unlock:
+ spin_unlock_bh(&ep->ex_lock);
+done:
+ /*
+ * This release matches the hold taken when the timer was set.
+ */
+ fc_exch_release(ep);
+}
+
+/**
+ * fc_exch_em_alloc() - Allocate an exchange from a specified EM.
+ * @lport: The local port that the exchange is for
+ * @mp: The exchange manager that will allocate the exchange
+ *
+ * Returns pointer to allocated fc_exch with exch lock held.
+ */
+static struct fc_exch *fc_exch_em_alloc(struct fc_lport *lport,
+ struct fc_exch_mgr *mp)
+{
+ struct fc_exch *ep;
+ unsigned int cpu;
+ u16 index;
+ struct fc_exch_pool *pool;
+
+ /* allocate memory for exchange */
+ ep = mempool_alloc(mp->ep_pool, GFP_ATOMIC);
+ if (!ep) {
+ atomic_inc(&mp->stats.no_free_exch);
+ goto out;
+ }
+ memset(ep, 0, sizeof(*ep));
+
+ cpu = get_cpu();
+ pool = per_cpu_ptr(mp->pool, cpu);
+ spin_lock_bh(&pool->lock);
+ put_cpu();
+
+ /* peek cache of free slot */
+ if (pool->left != FC_XID_UNKNOWN) {
+ index = pool->left;
+ pool->left = FC_XID_UNKNOWN;
+ goto hit;
+ }
+ if (pool->right != FC_XID_UNKNOWN) {
+ index = pool->right;
+ pool->right = FC_XID_UNKNOWN;
+ goto hit;
+ }
+
+ index = pool->next_index;
+ /* allocate new exch from pool */
+ while (fc_exch_ptr_get(pool, index)) {
+ index = index == mp->pool_max_index ? 0 : index + 1;
+ if (index == pool->next_index)
+ goto err;
+ }
+ pool->next_index = index == mp->pool_max_index ? 0 : index + 1;
+hit:
+ fc_exch_hold(ep); /* hold for exch in mp */
+ spin_lock_init(&ep->ex_lock);
+ /*
+ * Hold exch lock for caller to prevent fc_exch_reset()
+ * from releasing exch while fc_exch_alloc() caller is
+ * still working on exch.
+ */
+ spin_lock_bh(&ep->ex_lock);
+
+ fc_exch_ptr_set(pool, index, ep);
+ list_add_tail(&ep->ex_list, &pool->ex_list);
+ fc_seq_alloc(ep, ep->seq_id++);
+ pool->total_exches++;
+ spin_unlock_bh(&pool->lock);
+
+ /*
+ * update exchange
+ */
+ ep->oxid = ep->xid = (index << fc_cpu_order | cpu) + mp->min_xid;
+ ep->em = mp;
+ ep->pool = pool;
+ ep->lp = lport;
+ ep->f_ctl = FC_FC_FIRST_SEQ; /* next seq is first seq */
+ ep->rxid = FC_XID_UNKNOWN;
+ ep->class = mp->class;
+ ep->resp_active = 0;
+ init_waitqueue_head(&ep->resp_wq);
+ INIT_DELAYED_WORK(&ep->timeout_work, fc_exch_timeout);
+out:
+ return ep;
+err:
+ spin_unlock_bh(&pool->lock);
+ atomic_inc(&mp->stats.no_free_exch_xid);
+ mempool_free(ep, mp->ep_pool);
+ return NULL;
+}
+
+/**
+ * fc_exch_alloc() - Allocate an exchange from an EM on a
+ * local port's list of EMs.
+ * @lport: The local port that will own the exchange
+ * @fp: The FC frame that the exchange will be for
+ *
+ * This function walks the list of exchange manager(EM)
+ * anchors to select an EM for a new exchange allocation. The
+ * EM is selected when a NULL match function pointer is encountered
+ * or when a call to a match function returns true.
+ */
+static inline struct fc_exch *fc_exch_alloc(struct fc_lport *lport,
+ struct fc_frame *fp)
+{
+ struct fc_exch_mgr_anchor *ema;
+
+ list_for_each_entry(ema, &lport->ema_list, ema_list)
+ if (!ema->match || ema->match(fp))
+ return fc_exch_em_alloc(lport, ema->mp);
+ return NULL;
+}
+
+/**
+ * fc_exch_find() - Lookup and hold an exchange
+ * @mp: The exchange manager to lookup the exchange from
+ * @xid: The XID of the exchange to look up
+ */
+static struct fc_exch *fc_exch_find(struct fc_exch_mgr *mp, u16 xid)
+{
+ struct fc_exch_pool *pool;
+ struct fc_exch *ep = NULL;
+
+ if ((xid >= mp->min_xid) && (xid <= mp->max_xid)) {
+ pool = per_cpu_ptr(mp->pool, xid & fc_cpu_mask);
+ spin_lock_bh(&pool->lock);
+ ep = fc_exch_ptr_get(pool, (xid - mp->min_xid) >> fc_cpu_order);
+ if (ep) {
+ WARN_ON(ep->xid != xid);
+ fc_exch_hold(ep);
+ }
+ spin_unlock_bh(&pool->lock);
+ }
+ return ep;
+}
+
+
+/**
+ * fc_exch_done() - Indicate that an exchange/sequence tuple is complete and
+ * the memory allocated for the related objects may be freed.
+ * @sp: The sequence that has completed
+ *
+ * Note: May sleep if invoked from outside a response handler.
+ */
+static void fc_exch_done(struct fc_seq *sp)
+{
+ struct fc_exch *ep = fc_seq_exch(sp);
+ int rc;
+
+ spin_lock_bh(&ep->ex_lock);
+ rc = fc_exch_done_locked(ep);
+ spin_unlock_bh(&ep->ex_lock);
+
+ fc_seq_set_resp(sp, NULL, ep->arg);
+ if (!rc)
+ fc_exch_delete(ep);
+}
+
+/**
+ * fc_exch_resp() - Allocate a new exchange for a response frame
+ * @lport: The local port that the exchange was for
+ * @mp: The exchange manager to allocate the exchange from
+ * @fp: The response frame
+ *
+ * Sets the responder ID in the frame header.
+ */
+static struct fc_exch *fc_exch_resp(struct fc_lport *lport,
+ struct fc_exch_mgr *mp,
+ struct fc_frame *fp)
+{
+ struct fc_exch *ep;
+ struct fc_frame_header *fh;
+
+ ep = fc_exch_alloc(lport, fp);
+ if (ep) {
+ ep->class = fc_frame_class(fp);
+
+ /*
+ * Set EX_CTX indicating we're responding on this exchange.
+ */
+ ep->f_ctl |= FC_FC_EX_CTX; /* we're responding */
+ ep->f_ctl &= ~FC_FC_FIRST_SEQ; /* not new */
+ fh = fc_frame_header_get(fp);
+ ep->sid = ntoh24(fh->fh_d_id);
+ ep->did = ntoh24(fh->fh_s_id);
+ ep->oid = ep->did;
+
+ /*
+ * Allocated exchange has placed the XID in the
+ * originator field. Move it to the responder field,
+ * and set the originator XID from the frame.
+ */
+ ep->rxid = ep->xid;
+ ep->oxid = ntohs(fh->fh_ox_id);
+ ep->esb_stat |= ESB_ST_RESP | ESB_ST_SEQ_INIT;
+ if ((ntoh24(fh->fh_f_ctl) & FC_FC_SEQ_INIT) == 0)
+ ep->esb_stat &= ~ESB_ST_SEQ_INIT;
+
+ fc_exch_hold(ep); /* hold for caller */
+ spin_unlock_bh(&ep->ex_lock); /* lock from fc_exch_alloc */
+ }
+ return ep;
+}
+
+/**
+ * fc_seq_lookup_recip() - Find a sequence where the other end
+ * originated the sequence
+ * @lport: The local port that the frame was sent to
+ * @mp: The Exchange Manager to lookup the exchange from
+ * @fp: The frame associated with the sequence we're looking for
+ *
+ * If fc_pf_rjt_reason is FC_RJT_NONE then this function will have a hold
+ * on the ep that should be released by the caller.
+ */
+static enum fc_pf_rjt_reason fc_seq_lookup_recip(struct fc_lport *lport,
+ struct fc_exch_mgr *mp,
+ struct fc_frame *fp)
+{
+ struct fc_frame_header *fh = fc_frame_header_get(fp);
+ struct fc_exch *ep = NULL;
+ struct fc_seq *sp = NULL;
+ enum fc_pf_rjt_reason reject = FC_RJT_NONE;
+ u32 f_ctl;
+ u16 xid;
+
+ f_ctl = ntoh24(fh->fh_f_ctl);
+ WARN_ON((f_ctl & FC_FC_SEQ_CTX) != 0);
+
+ /*
+ * Lookup or create the exchange if we will be creating the sequence.
+ */
+ if (f_ctl & FC_FC_EX_CTX) {
+ xid = ntohs(fh->fh_ox_id); /* we originated exch */
+ ep = fc_exch_find(mp, xid);
+ if (!ep) {
+ atomic_inc(&mp->stats.xid_not_found);
+ reject = FC_RJT_OX_ID;
+ goto out;
+ }
+ if (ep->rxid == FC_XID_UNKNOWN)
+ ep->rxid = ntohs(fh->fh_rx_id);
+ else if (ep->rxid != ntohs(fh->fh_rx_id)) {
+ reject = FC_RJT_OX_ID;
+ goto rel;
+ }
+ } else {
+ xid = ntohs(fh->fh_rx_id); /* we are the responder */
+
+ /*
+ * Special case for MDS issuing an ELS TEST with a
+ * bad rxid of 0.
+ * XXX take this out once we do the proper reject.
+ */
+ if (xid == 0 && fh->fh_r_ctl == FC_RCTL_ELS_REQ &&
+ fc_frame_payload_op(fp) == ELS_TEST) {
+ fh->fh_rx_id = htons(FC_XID_UNKNOWN);
+ xid = FC_XID_UNKNOWN;
+ }
+
+ /*
+ * new sequence - find the exchange
+ */
+ ep = fc_exch_find(mp, xid);
+ if ((f_ctl & FC_FC_FIRST_SEQ) && fc_sof_is_init(fr_sof(fp))) {
+ if (ep) {
+ atomic_inc(&mp->stats.xid_busy);
+ reject = FC_RJT_RX_ID;
+ goto rel;
+ }
+ ep = fc_exch_resp(lport, mp, fp);
+ if (!ep) {
+ reject = FC_RJT_EXCH_EST; /* XXX */
+ goto out;
+ }
+ xid = ep->xid; /* get our XID */
+ } else if (!ep) {
+ atomic_inc(&mp->stats.xid_not_found);
+ reject = FC_RJT_RX_ID; /* XID not found */
+ goto out;
+ }
+ }
+
+ spin_lock_bh(&ep->ex_lock);
+ /*
+ * At this point, we have the exchange held.
+ * Find or create the sequence.
+ */
+ if (fc_sof_is_init(fr_sof(fp))) {
+ sp = &ep->seq;
+ sp->ssb_stat |= SSB_ST_RESP;
+ sp->id = fh->fh_seq_id;
+ } else {
+ sp = &ep->seq;
+ if (sp->id != fh->fh_seq_id) {
+ atomic_inc(&mp->stats.seq_not_found);
+ if (f_ctl & FC_FC_END_SEQ) {
+ /*
+ * Update sequence_id based on incoming last
+ * frame of sequence exchange. This is needed
+ * for FC target where DDP has been used
+ * on target where, stack is indicated only
+ * about last frame's (payload _header) header.
+ * Whereas "seq_id" which is part of
+ * frame_header is allocated by initiator
+ * which is totally different from "seq_id"
+ * allocated when XFER_RDY was sent by target.
+ * To avoid false -ve which results into not
+ * sending RSP, hence write request on other
+ * end never finishes.
+ */
+ sp->ssb_stat |= SSB_ST_RESP;
+ sp->id = fh->fh_seq_id;
+ } else {
+ spin_unlock_bh(&ep->ex_lock);
+
+ /* sequence/exch should exist */
+ reject = FC_RJT_SEQ_ID;
+ goto rel;
+ }
+ }
+ }
+ WARN_ON(ep != fc_seq_exch(sp));
+
+ if (f_ctl & FC_FC_SEQ_INIT)
+ ep->esb_stat |= ESB_ST_SEQ_INIT;
+ spin_unlock_bh(&ep->ex_lock);
+
+ fr_seq(fp) = sp;
+out:
+ return reject;
+rel:
+ fc_exch_done(&ep->seq);
+ fc_exch_release(ep); /* hold from fc_exch_find/fc_exch_resp */
+ return reject;
+}
+
+/**
+ * fc_seq_lookup_orig() - Find a sequence where this end
+ * originated the sequence
+ * @mp: The Exchange Manager to lookup the exchange from
+ * @fp: The frame associated with the sequence we're looking for
+ *
+ * Does not hold the sequence for the caller.
+ */
+static struct fc_seq *fc_seq_lookup_orig(struct fc_exch_mgr *mp,
+ struct fc_frame *fp)
+{
+ struct fc_frame_header *fh = fc_frame_header_get(fp);
+ struct fc_exch *ep;
+ struct fc_seq *sp = NULL;
+ u32 f_ctl;
+ u16 xid;
+
+ f_ctl = ntoh24(fh->fh_f_ctl);
+ WARN_ON((f_ctl & FC_FC_SEQ_CTX) != FC_FC_SEQ_CTX);
+ xid = ntohs((f_ctl & FC_FC_EX_CTX) ? fh->fh_ox_id : fh->fh_rx_id);
+ ep = fc_exch_find(mp, xid);
+ if (!ep)
+ return NULL;
+ if (ep->seq.id == fh->fh_seq_id) {
+ /*
+ * Save the RX_ID if we didn't previously know it.
+ */
+ sp = &ep->seq;
+ if ((f_ctl & FC_FC_EX_CTX) != 0 &&
+ ep->rxid == FC_XID_UNKNOWN) {
+ ep->rxid = ntohs(fh->fh_rx_id);
+ }
+ }
+ fc_exch_release(ep);
+ return sp;
+}
+
+/**
+ * fc_exch_set_addr() - Set the source and destination IDs for an exchange
+ * @ep: The exchange to set the addresses for
+ * @orig_id: The originator's ID
+ * @resp_id: The responder's ID
+ *
+ * Note this must be done before the first sequence of the exchange is sent.
+ */
+static void fc_exch_set_addr(struct fc_exch *ep,
+ u32 orig_id, u32 resp_id)
+{
+ ep->oid = orig_id;
+ if (ep->esb_stat & ESB_ST_RESP) {
+ ep->sid = resp_id;
+ ep->did = orig_id;
+ } else {
+ ep->sid = orig_id;
+ ep->did = resp_id;
+ }
+}
+
+/**
+ * fc_seq_els_rsp_send() - Send an ELS response using information from
+ * the existing sequence/exchange.
+ * @fp: The received frame
+ * @els_cmd: The ELS command to be sent
+ * @els_data: The ELS data to be sent
+ *
+ * The received frame is not freed.
+ */
+static void fc_seq_els_rsp_send(struct fc_frame *fp, enum fc_els_cmd els_cmd,
+ struct fc_seq_els_data *els_data)
+{
+ switch (els_cmd) {
+ case ELS_LS_RJT:
+ fc_seq_ls_rjt(fp, els_data->reason, els_data->explan);
+ break;
+ case ELS_LS_ACC:
+ fc_seq_ls_acc(fp);
+ break;
+ case ELS_RRQ:
+ fc_exch_els_rrq(fp);
+ break;
+ case ELS_REC:
+ fc_exch_els_rec(fp);
+ break;
+ default:
+ FC_LPORT_DBG(fr_dev(fp), "Invalid ELS CMD:%x\n", els_cmd);
+ }
+}
+
+/**
+ * fc_seq_send_last() - Send a sequence that is the last in the exchange
+ * @sp: The sequence that is to be sent
+ * @fp: The frame that will be sent on the sequence
+ * @rctl: The R_CTL information to be sent
+ * @fh_type: The frame header type
+ */
+static void fc_seq_send_last(struct fc_seq *sp, struct fc_frame *fp,
+ enum fc_rctl rctl, enum fc_fh_type fh_type)
+{
+ u32 f_ctl;
+ struct fc_exch *ep = fc_seq_exch(sp);
+
+ f_ctl = FC_FC_LAST_SEQ | FC_FC_END_SEQ | FC_FC_SEQ_INIT;
+ f_ctl |= ep->f_ctl;
+ fc_fill_fc_hdr(fp, rctl, ep->did, ep->sid, fh_type, f_ctl, 0);
+ fc_seq_send_locked(ep->lp, sp, fp);
+}
+
+/**
+ * fc_seq_send_ack() - Send an acknowledgement that we've received a frame
+ * @sp: The sequence to send the ACK on
+ * @rx_fp: The received frame that is being acknoledged
+ *
+ * Send ACK_1 (or equiv.) indicating we received something.
+ */
+static void fc_seq_send_ack(struct fc_seq *sp, const struct fc_frame *rx_fp)
+{
+ struct fc_frame *fp;
+ struct fc_frame_header *rx_fh;
+ struct fc_frame_header *fh;
+ struct fc_exch *ep = fc_seq_exch(sp);
+ struct fc_lport *lport = ep->lp;
+ unsigned int f_ctl;
+
+ /*
+ * Don't send ACKs for class 3.
+ */
+ if (fc_sof_needs_ack(fr_sof(rx_fp))) {
+ fp = fc_frame_alloc(lport, 0);
+ if (!fp)
+ return;
+
+ fh = fc_frame_header_get(fp);
+ fh->fh_r_ctl = FC_RCTL_ACK_1;
+ fh->fh_type = FC_TYPE_BLS;
+
+ /*
+ * Form f_ctl by inverting EX_CTX and SEQ_CTX (bits 23, 22).
+ * Echo FIRST_SEQ, LAST_SEQ, END_SEQ, END_CONN, SEQ_INIT.
+ * Bits 9-8 are meaningful (retransmitted or unidirectional).
+ * Last ACK uses bits 7-6 (continue sequence),
+ * bits 5-4 are meaningful (what kind of ACK to use).
+ */
+ rx_fh = fc_frame_header_get(rx_fp);
+ f_ctl = ntoh24(rx_fh->fh_f_ctl);
+ f_ctl &= FC_FC_EX_CTX | FC_FC_SEQ_CTX |
+ FC_FC_FIRST_SEQ | FC_FC_LAST_SEQ |
+ FC_FC_END_SEQ | FC_FC_END_CONN | FC_FC_SEQ_INIT |
+ FC_FC_RETX_SEQ | FC_FC_UNI_TX;
+ f_ctl ^= FC_FC_EX_CTX | FC_FC_SEQ_CTX;
+ hton24(fh->fh_f_ctl, f_ctl);
+
+ fc_exch_setup_hdr(ep, fp, f_ctl);
+ fh->fh_seq_id = rx_fh->fh_seq_id;
+ fh->fh_seq_cnt = rx_fh->fh_seq_cnt;
+ fh->fh_parm_offset = htonl(1); /* ack single frame */
+
+ fr_sof(fp) = fr_sof(rx_fp);
+ if (f_ctl & FC_FC_END_SEQ)
+ fr_eof(fp) = FC_EOF_T;
+ else
+ fr_eof(fp) = FC_EOF_N;
+
+ lport->tt.frame_send(lport, fp);
+ }
+}
+
+/**
+ * fc_exch_send_ba_rjt() - Send BLS Reject
+ * @rx_fp: The frame being rejected
+ * @reason: The reason the frame is being rejected
+ * @explan: The explanation for the rejection
+ *
+ * This is for rejecting BA_ABTS only.
+ */
+static void fc_exch_send_ba_rjt(struct fc_frame *rx_fp,
+ enum fc_ba_rjt_reason reason,
+ enum fc_ba_rjt_explan explan)
+{
+ struct fc_frame *fp;
+ struct fc_frame_header *rx_fh;
+ struct fc_frame_header *fh;
+ struct fc_ba_rjt *rp;
+ struct fc_lport *lport;
+ unsigned int f_ctl;
+
+ lport = fr_dev(rx_fp);
+ fp = fc_frame_alloc(lport, sizeof(*rp));
+ if (!fp)
+ return;
+ fh = fc_frame_header_get(fp);
+ rx_fh = fc_frame_header_get(rx_fp);
+
+ memset(fh, 0, sizeof(*fh) + sizeof(*rp));
+
+ rp = fc_frame_payload_get(fp, sizeof(*rp));
+ rp->br_reason = reason;
+ rp->br_explan = explan;
+
+ /*
+ * seq_id, cs_ctl, df_ctl and param/offset are zero.
+ */
+ memcpy(fh->fh_s_id, rx_fh->fh_d_id, 3);
+ memcpy(fh->fh_d_id, rx_fh->fh_s_id, 3);
+ fh->fh_ox_id = rx_fh->fh_ox_id;
+ fh->fh_rx_id = rx_fh->fh_rx_id;
+ fh->fh_seq_cnt = rx_fh->fh_seq_cnt;
+ fh->fh_r_ctl = FC_RCTL_BA_RJT;
+ fh->fh_type = FC_TYPE_BLS;
+
+ /*
+ * Form f_ctl by inverting EX_CTX and SEQ_CTX (bits 23, 22).
+ * Echo FIRST_SEQ, LAST_SEQ, END_SEQ, END_CONN, SEQ_INIT.
+ * Bits 9-8 are meaningful (retransmitted or unidirectional).
+ * Last ACK uses bits 7-6 (continue sequence),
+ * bits 5-4 are meaningful (what kind of ACK to use).
+ * Always set LAST_SEQ, END_SEQ.
+ */
+ f_ctl = ntoh24(rx_fh->fh_f_ctl);
+ f_ctl &= FC_FC_EX_CTX | FC_FC_SEQ_CTX |
+ FC_FC_END_CONN | FC_FC_SEQ_INIT |
+ FC_FC_RETX_SEQ | FC_FC_UNI_TX;
+ f_ctl ^= FC_FC_EX_CTX | FC_FC_SEQ_CTX;
+ f_ctl |= FC_FC_LAST_SEQ | FC_FC_END_SEQ;
+ f_ctl &= ~FC_FC_FIRST_SEQ;
+ hton24(fh->fh_f_ctl, f_ctl);
+
+ fr_sof(fp) = fc_sof_class(fr_sof(rx_fp));
+ fr_eof(fp) = FC_EOF_T;
+ if (fc_sof_needs_ack(fr_sof(fp)))
+ fr_eof(fp) = FC_EOF_N;
+
+ lport->tt.frame_send(lport, fp);
+}
+
+/**
+ * fc_exch_recv_abts() - Handle an incoming ABTS
+ * @ep: The exchange the abort was on
+ * @rx_fp: The ABTS frame
+ *
+ * This would be for target mode usually, but could be due to lost
+ * FCP transfer ready, confirm or RRQ. We always handle this as an
+ * exchange abort, ignoring the parameter.
+ */
+static void fc_exch_recv_abts(struct fc_exch *ep, struct fc_frame *rx_fp)
+{
+ struct fc_frame *fp;
+ struct fc_ba_acc *ap;
+ struct fc_frame_header *fh;
+ struct fc_seq *sp;
+
+ if (!ep)
+ goto reject;
+
+ fp = fc_frame_alloc(ep->lp, sizeof(*ap));
+ if (!fp)
+ goto free;
+
+ spin_lock_bh(&ep->ex_lock);
+ if (ep->esb_stat & ESB_ST_COMPLETE) {
+ spin_unlock_bh(&ep->ex_lock);
+
+ fc_frame_free(fp);
+ goto reject;
+ }
+ if (!(ep->esb_stat & ESB_ST_REC_QUAL)) {
+ ep->esb_stat |= ESB_ST_REC_QUAL;
+ fc_exch_hold(ep); /* hold for REC_QUAL */
+ }
+ fc_exch_timer_set_locked(ep, ep->r_a_tov);
+ fh = fc_frame_header_get(fp);
+ ap = fc_frame_payload_get(fp, sizeof(*ap));
+ memset(ap, 0, sizeof(*ap));
+ sp = &ep->seq;
+ ap->ba_high_seq_cnt = htons(0xffff);
+ if (sp->ssb_stat & SSB_ST_RESP) {
+ ap->ba_seq_id = sp->id;
+ ap->ba_seq_id_val = FC_BA_SEQ_ID_VAL;
+ ap->ba_high_seq_cnt = fh->fh_seq_cnt;
+ ap->ba_low_seq_cnt = htons(sp->cnt);
+ }
+ sp = fc_seq_start_next_locked(sp);
+ fc_seq_send_last(sp, fp, FC_RCTL_BA_ACC, FC_TYPE_BLS);
+ ep->esb_stat |= ESB_ST_ABNORMAL;
+ spin_unlock_bh(&ep->ex_lock);
+
+free:
+ fc_frame_free(rx_fp);
+ return;
+
+reject:
+ fc_exch_send_ba_rjt(rx_fp, FC_BA_RJT_UNABLE, FC_BA_RJT_INV_XID);
+ goto free;
+}
+
+/**
+ * fc_seq_assign() - Assign exchange and sequence for incoming request
+ * @lport: The local port that received the request
+ * @fp: The request frame
+ *
+ * On success, the sequence pointer will be returned and also in fr_seq(@fp).
+ * A reference will be held on the exchange/sequence for the caller, which
+ * must call fc_seq_release().
+ */
+static struct fc_seq *fc_seq_assign(struct fc_lport *lport, struct fc_frame *fp)
+{
+ struct fc_exch_mgr_anchor *ema;
+
+ WARN_ON(lport != fr_dev(fp));
+ WARN_ON(fr_seq(fp));
+ fr_seq(fp) = NULL;
+
+ list_for_each_entry(ema, &lport->ema_list, ema_list)
+ if ((!ema->match || ema->match(fp)) &&
+ fc_seq_lookup_recip(lport, ema->mp, fp) == FC_RJT_NONE)
+ break;
+ return fr_seq(fp);
+}
+
+/**
+ * fc_seq_release() - Release the hold
+ * @sp: The sequence.
+ */
+static void fc_seq_release(struct fc_seq *sp)
+{
+ fc_exch_release(fc_seq_exch(sp));
+}
+
+/**
+ * fc_exch_recv_req() - Handler for an incoming request
+ * @lport: The local port that received the request
+ * @mp: The EM that the exchange is on
+ * @fp: The request frame
+ *
+ * This is used when the other end is originating the exchange
+ * and the sequence.
+ */
+static void fc_exch_recv_req(struct fc_lport *lport, struct fc_exch_mgr *mp,
+ struct fc_frame *fp)
+{
+ struct fc_frame_header *fh = fc_frame_header_get(fp);
+ struct fc_seq *sp = NULL;
+ struct fc_exch *ep = NULL;
+ enum fc_pf_rjt_reason reject;
+
+ /* We can have the wrong fc_lport at this point with NPIV, which is a
+ * problem now that we know a new exchange needs to be allocated
+ */
+ lport = fc_vport_id_lookup(lport, ntoh24(fh->fh_d_id));
+ if (!lport) {
+ fc_frame_free(fp);
+ return;
+ }
+ fr_dev(fp) = lport;
+
+ BUG_ON(fr_seq(fp)); /* XXX remove later */
+
+ /*
+ * If the RX_ID is 0xffff, don't allocate an exchange.
+ * The upper-level protocol may request one later, if needed.
+ */
+ if (fh->fh_rx_id == htons(FC_XID_UNKNOWN))
+ return lport->tt.lport_recv(lport, fp);
+
+ reject = fc_seq_lookup_recip(lport, mp, fp);
+ if (reject == FC_RJT_NONE) {
+ sp = fr_seq(fp); /* sequence will be held */
+ ep = fc_seq_exch(sp);
+ fc_seq_send_ack(sp, fp);
+ ep->encaps = fr_encaps(fp);
+
+ /*
+ * Call the receive function.
+ *
+ * The receive function may allocate a new sequence
+ * over the old one, so we shouldn't change the
+ * sequence after this.
+ *
+ * The frame will be freed by the receive function.
+ * If new exch resp handler is valid then call that
+ * first.
+ */
+ if (!fc_invoke_resp(ep, sp, fp))
+ lport->tt.lport_recv(lport, fp);
+ fc_exch_release(ep); /* release from lookup */
+ } else {
+ FC_LPORT_DBG(lport, "exch/seq lookup failed: reject %x\n",
+ reject);
+ fc_frame_free(fp);
+ }
+}
+
+/**
+ * fc_exch_recv_seq_resp() - Handler for an incoming response where the other
+ * end is the originator of the sequence that is a
+ * response to our initial exchange
+ * @mp: The EM that the exchange is on
+ * @fp: The response frame
+ */
+static void fc_exch_recv_seq_resp(struct fc_exch_mgr *mp, struct fc_frame *fp)
+{
+ struct fc_frame_header *fh = fc_frame_header_get(fp);
+ struct fc_seq *sp;
+ struct fc_exch *ep;
+ enum fc_sof sof;
+ u32 f_ctl;
+ int rc;
+
+ ep = fc_exch_find(mp, ntohs(fh->fh_ox_id));
+ if (!ep) {
+ atomic_inc(&mp->stats.xid_not_found);
+ goto out;
+ }
+ if (ep->esb_stat & ESB_ST_COMPLETE) {
+ atomic_inc(&mp->stats.xid_not_found);
+ goto rel;
+ }
+ if (ep->rxid == FC_XID_UNKNOWN)
+ ep->rxid = ntohs(fh->fh_rx_id);
+ if (ep->sid != 0 && ep->sid != ntoh24(fh->fh_d_id)) {
+ atomic_inc(&mp->stats.xid_not_found);
+ goto rel;
+ }
+ if (ep->did != ntoh24(fh->fh_s_id) &&
+ ep->did != FC_FID_FLOGI) {
+ atomic_inc(&mp->stats.xid_not_found);
+ goto rel;
+ }
+ sof = fr_sof(fp);
+ sp = &ep->seq;
+ if (fc_sof_is_init(sof)) {
+ sp->ssb_stat |= SSB_ST_RESP;
+ sp->id = fh->fh_seq_id;
+ } else if (sp->id != fh->fh_seq_id) {
+ atomic_inc(&mp->stats.seq_not_found);
+ goto rel;
+ }
+
+ f_ctl = ntoh24(fh->fh_f_ctl);
+ fr_seq(fp) = sp;
+
+ spin_lock_bh(&ep->ex_lock);
+ if (f_ctl & FC_FC_SEQ_INIT)
+ ep->esb_stat |= ESB_ST_SEQ_INIT;
+ spin_unlock_bh(&ep->ex_lock);
+
+ if (fc_sof_needs_ack(sof))
+ fc_seq_send_ack(sp, fp);
+
+ if (fh->fh_type != FC_TYPE_FCP && fr_eof(fp) == FC_EOF_T &&
+ (f_ctl & (FC_FC_LAST_SEQ | FC_FC_END_SEQ)) ==
+ (FC_FC_LAST_SEQ | FC_FC_END_SEQ)) {
+ spin_lock_bh(&ep->ex_lock);
+ rc = fc_exch_done_locked(ep);
+ WARN_ON(fc_seq_exch(sp) != ep);
+ spin_unlock_bh(&ep->ex_lock);
+ if (!rc)
+ fc_exch_delete(ep);
+ }
+
+ /*
+ * Call the receive function.
+ * The sequence is held (has a refcnt) for us,
+ * but not for the receive function.
+ *
+ * The receive function may allocate a new sequence
+ * over the old one, so we shouldn't change the
+ * sequence after this.
+ *
+ * The frame will be freed by the receive function.
+ * If new exch resp handler is valid then call that
+ * first.
+ */
+ fc_invoke_resp(ep, sp, fp);
+
+ fc_exch_release(ep);
+ return;
+rel:
+ fc_exch_release(ep);
+out:
+ fc_frame_free(fp);
+}
+
+/**
+ * fc_exch_recv_resp() - Handler for a sequence where other end is
+ * responding to our sequence
+ * @mp: The EM that the exchange is on
+ * @fp: The response frame
+ */
+static void fc_exch_recv_resp(struct fc_exch_mgr *mp, struct fc_frame *fp)
+{
+ struct fc_seq *sp;
+
+ sp = fc_seq_lookup_orig(mp, fp); /* doesn't hold sequence */
+
+ if (!sp)
+ atomic_inc(&mp->stats.xid_not_found);
+ else
+ atomic_inc(&mp->stats.non_bls_resp);
+
+ fc_frame_free(fp);
+}
+
+/**
+ * fc_exch_abts_resp() - Handler for a response to an ABT
+ * @ep: The exchange that the frame is on
+ * @fp: The response frame
+ *
+ * This response would be to an ABTS cancelling an exchange or sequence.
+ * The response can be either BA_ACC or BA_RJT
+ */
+static void fc_exch_abts_resp(struct fc_exch *ep, struct fc_frame *fp)
+{
+ struct fc_frame_header *fh;
+ struct fc_ba_acc *ap;
+ struct fc_seq *sp;
+ u16 low;
+ u16 high;
+ int rc = 1, has_rec = 0;
+
+ fh = fc_frame_header_get(fp);
+ FC_EXCH_DBG(ep, "exch: BLS rctl %x - %s\n", fh->fh_r_ctl,
+ fc_exch_rctl_name(fh->fh_r_ctl));
+
+ if (cancel_delayed_work_sync(&ep->timeout_work)) {
+ FC_EXCH_DBG(ep, "Exchange timer canceled due to ABTS response\n");
+ fc_exch_release(ep); /* release from pending timer hold */
+ }
+
+ spin_lock_bh(&ep->ex_lock);
+ switch (fh->fh_r_ctl) {
+ case FC_RCTL_BA_ACC:
+ ap = fc_frame_payload_get(fp, sizeof(*ap));
+ if (!ap)
+ break;
+
+ /*
+ * Decide whether to establish a Recovery Qualifier.
+ * We do this if there is a non-empty SEQ_CNT range and
+ * SEQ_ID is the same as the one we aborted.
+ */
+ low = ntohs(ap->ba_low_seq_cnt);
+ high = ntohs(ap->ba_high_seq_cnt);
+ if ((ep->esb_stat & ESB_ST_REC_QUAL) == 0 &&
+ (ap->ba_seq_id_val != FC_BA_SEQ_ID_VAL ||
+ ap->ba_seq_id == ep->seq_id) && low != high) {
+ ep->esb_stat |= ESB_ST_REC_QUAL;
+ fc_exch_hold(ep); /* hold for recovery qualifier */
+ has_rec = 1;
+ }
+ break;
+ case FC_RCTL_BA_RJT:
+ break;
+ default:
+ break;
+ }
+
+ /* do we need to do some other checks here. Can we reuse more of
+ * fc_exch_recv_seq_resp
+ */
+ sp = &ep->seq;
+ /*
+ * do we want to check END_SEQ as well as LAST_SEQ here?
+ */
+ if (ep->fh_type != FC_TYPE_FCP &&
+ ntoh24(fh->fh_f_ctl) & FC_FC_LAST_SEQ)
+ rc = fc_exch_done_locked(ep);
+ spin_unlock_bh(&ep->ex_lock);
+
+ fc_exch_hold(ep);
+ if (!rc)
+ fc_exch_delete(ep);
+ fc_invoke_resp(ep, sp, fp);
+ if (has_rec)
+ fc_exch_timer_set(ep, ep->r_a_tov);
+ fc_exch_release(ep);
+}
+
+/**
+ * fc_exch_recv_bls() - Handler for a BLS sequence
+ * @mp: The EM that the exchange is on
+ * @fp: The request frame
+ *
+ * The BLS frame is always a sequence initiated by the remote side.
+ * We may be either the originator or recipient of the exchange.
+ */
+static void fc_exch_recv_bls(struct fc_exch_mgr *mp, struct fc_frame *fp)
+{
+ struct fc_frame_header *fh;
+ struct fc_exch *ep;
+ u32 f_ctl;
+
+ fh = fc_frame_header_get(fp);
+ f_ctl = ntoh24(fh->fh_f_ctl);
+ fr_seq(fp) = NULL;
+
+ ep = fc_exch_find(mp, (f_ctl & FC_FC_EX_CTX) ?
+ ntohs(fh->fh_ox_id) : ntohs(fh->fh_rx_id));
+ if (ep && (f_ctl & FC_FC_SEQ_INIT)) {
+ spin_lock_bh(&ep->ex_lock);
+ ep->esb_stat |= ESB_ST_SEQ_INIT;
+ spin_unlock_bh(&ep->ex_lock);
+ }
+ if (f_ctl & FC_FC_SEQ_CTX) {
+ /*
+ * A response to a sequence we initiated.
+ * This should only be ACKs for class 2 or F.
+ */
+ switch (fh->fh_r_ctl) {
+ case FC_RCTL_ACK_1:
+ case FC_RCTL_ACK_0:
+ break;
+ default:
+ if (ep)
+ FC_EXCH_DBG(ep, "BLS rctl %x - %s received\n",
+ fh->fh_r_ctl,
+ fc_exch_rctl_name(fh->fh_r_ctl));
+ break;
+ }
+ fc_frame_free(fp);
+ } else {
+ switch (fh->fh_r_ctl) {
+ case FC_RCTL_BA_RJT:
+ case FC_RCTL_BA_ACC:
+ if (ep)
+ fc_exch_abts_resp(ep, fp);
+ else
+ fc_frame_free(fp);
+ break;
+ case FC_RCTL_BA_ABTS:
+ fc_exch_recv_abts(ep, fp);
+ break;
+ default: /* ignore junk */
+ fc_frame_free(fp);
+ break;
+ }
+ }
+ if (ep)
+ fc_exch_release(ep); /* release hold taken by fc_exch_find */
+}
+
+/**
+ * fc_seq_ls_acc() - Accept sequence with LS_ACC
+ * @rx_fp: The received frame, not freed here.
+ *
+ * If this fails due to allocation or transmit congestion, assume the
+ * originator will repeat the sequence.
+ */
+static void fc_seq_ls_acc(struct fc_frame *rx_fp)
+{
+ struct fc_lport *lport;
+ struct fc_els_ls_acc *acc;
+ struct fc_frame *fp;
+
+ lport = fr_dev(rx_fp);
+ fp = fc_frame_alloc(lport, sizeof(*acc));
+ if (!fp)
+ return;
+ acc = fc_frame_payload_get(fp, sizeof(*acc));
+ memset(acc, 0, sizeof(*acc));
+ acc->la_cmd = ELS_LS_ACC;
+ fc_fill_reply_hdr(fp, rx_fp, FC_RCTL_ELS_REP, 0);
+ lport->tt.frame_send(lport, fp);
+}
+
+/**
+ * fc_seq_ls_rjt() - Reject a sequence with ELS LS_RJT
+ * @rx_fp: The received frame, not freed here.
+ * @reason: The reason the sequence is being rejected
+ * @explan: The explanation for the rejection
+ *
+ * If this fails due to allocation or transmit congestion, assume the
+ * originator will repeat the sequence.
+ */
+static void fc_seq_ls_rjt(struct fc_frame *rx_fp, enum fc_els_rjt_reason reason,
+ enum fc_els_rjt_explan explan)
+{
+ struct fc_lport *lport;
+ struct fc_els_ls_rjt *rjt;
+ struct fc_frame *fp;
+
+ lport = fr_dev(rx_fp);
+ fp = fc_frame_alloc(lport, sizeof(*rjt));
+ if (!fp)
+ return;
+ rjt = fc_frame_payload_get(fp, sizeof(*rjt));
+ memset(rjt, 0, sizeof(*rjt));
+ rjt->er_cmd = ELS_LS_RJT;
+ rjt->er_reason = reason;
+ rjt->er_explan = explan;
+ fc_fill_reply_hdr(fp, rx_fp, FC_RCTL_ELS_REP, 0);
+ lport->tt.frame_send(lport, fp);
+}
+
+/**
+ * fc_exch_reset() - Reset an exchange
+ * @ep: The exchange to be reset
+ *
+ * Note: May sleep if invoked from outside a response handler.
+ */
+static void fc_exch_reset(struct fc_exch *ep)
+{
+ struct fc_seq *sp;
+ int rc = 1;
+
+ spin_lock_bh(&ep->ex_lock);
+ fc_exch_abort_locked(ep, 0);
+ ep->state |= FC_EX_RST_CLEANUP;
+ fc_exch_timer_cancel(ep);
+ if (ep->esb_stat & ESB_ST_REC_QUAL)
+ atomic_dec(&ep->ex_refcnt); /* drop hold for rec_qual */
+ ep->esb_stat &= ~ESB_ST_REC_QUAL;
+ sp = &ep->seq;
+ rc = fc_exch_done_locked(ep);
+ spin_unlock_bh(&ep->ex_lock);
+
+ fc_exch_hold(ep);
+
+ if (!rc)
+ fc_exch_delete(ep);
+
+ fc_invoke_resp(ep, sp, ERR_PTR(-FC_EX_CLOSED));
+ fc_seq_set_resp(sp, NULL, ep->arg);
+ fc_exch_release(ep);
+}
+
+/**
+ * fc_exch_pool_reset() - Reset a per cpu exchange pool
+ * @lport: The local port that the exchange pool is on
+ * @pool: The exchange pool to be reset
+ * @sid: The source ID
+ * @did: The destination ID
+ *
+ * Resets a per cpu exches pool, releasing all of its sequences
+ * and exchanges. If sid is non-zero then reset only exchanges
+ * we sourced from the local port's FID. If did is non-zero then
+ * only reset exchanges destined for the local port's FID.
+ */
+static void fc_exch_pool_reset(struct fc_lport *lport,
+ struct fc_exch_pool *pool,
+ u32 sid, u32 did)
+{
+ struct fc_exch *ep;
+ struct fc_exch *next;
+
+ spin_lock_bh(&pool->lock);
+restart:
+ list_for_each_entry_safe(ep, next, &pool->ex_list, ex_list) {
+ if ((lport == ep->lp) &&
+ (sid == 0 || sid == ep->sid) &&
+ (did == 0 || did == ep->did)) {
+ fc_exch_hold(ep);
+ spin_unlock_bh(&pool->lock);
+
+ fc_exch_reset(ep);
+
+ fc_exch_release(ep);
+ spin_lock_bh(&pool->lock);
+
+ /*
+ * must restart loop incase while lock
+ * was down multiple eps were released.
+ */
+ goto restart;
+ }
+ }
+ pool->next_index = 0;
+ pool->left = FC_XID_UNKNOWN;
+ pool->right = FC_XID_UNKNOWN;
+ spin_unlock_bh(&pool->lock);
+}
+
+/**
+ * fc_exch_mgr_reset() - Reset all EMs of a local port
+ * @lport: The local port whose EMs are to be reset
+ * @sid: The source ID
+ * @did: The destination ID
+ *
+ * Reset all EMs associated with a given local port. Release all
+ * sequences and exchanges. If sid is non-zero then reset only the
+ * exchanges sent from the local port's FID. If did is non-zero then
+ * reset only exchanges destined for the local port's FID.
+ */
+void fc_exch_mgr_reset(struct fc_lport *lport, u32 sid, u32 did)
+{
+ struct fc_exch_mgr_anchor *ema;
+ unsigned int cpu;
+
+ list_for_each_entry(ema, &lport->ema_list, ema_list) {
+ for_each_possible_cpu(cpu)
+ fc_exch_pool_reset(lport,
+ per_cpu_ptr(ema->mp->pool, cpu),
+ sid, did);
+ }
+}
+EXPORT_SYMBOL(fc_exch_mgr_reset);
+
+/**
+ * fc_exch_lookup() - find an exchange
+ * @lport: The local port
+ * @xid: The exchange ID
+ *
+ * Returns exchange pointer with hold for caller, or NULL if not found.
+ */
+static struct fc_exch *fc_exch_lookup(struct fc_lport *lport, u32 xid)
+{
+ struct fc_exch_mgr_anchor *ema;
+
+ list_for_each_entry(ema, &lport->ema_list, ema_list)
+ if (ema->mp->min_xid <= xid && xid <= ema->mp->max_xid)
+ return fc_exch_find(ema->mp, xid);
+ return NULL;
+}
+
+/**
+ * fc_exch_els_rec() - Handler for ELS REC (Read Exchange Concise) requests
+ * @rfp: The REC frame, not freed here.
+ *
+ * Note that the requesting port may be different than the S_ID in the request.
+ */
+static void fc_exch_els_rec(struct fc_frame *rfp)
+{
+ struct fc_lport *lport;
+ struct fc_frame *fp;
+ struct fc_exch *ep;
+ struct fc_els_rec *rp;
+ struct fc_els_rec_acc *acc;
+ enum fc_els_rjt_reason reason = ELS_RJT_LOGIC;
+ enum fc_els_rjt_explan explan;
+ u32 sid;
+ u16 rxid;
+ u16 oxid;
+
+ lport = fr_dev(rfp);
+ rp = fc_frame_payload_get(rfp, sizeof(*rp));
+ explan = ELS_EXPL_INV_LEN;
+ if (!rp)
+ goto reject;
+ sid = ntoh24(rp->rec_s_id);
+ rxid = ntohs(rp->rec_rx_id);
+ oxid = ntohs(rp->rec_ox_id);
+
+ ep = fc_exch_lookup(lport,
+ sid == fc_host_port_id(lport->host) ? oxid : rxid);
+ explan = ELS_EXPL_OXID_RXID;
+ if (!ep)
+ goto reject;
+ if (ep->oid != sid || oxid != ep->oxid)
+ goto rel;
+ if (rxid != FC_XID_UNKNOWN && rxid != ep->rxid)
+ goto rel;
+ fp = fc_frame_alloc(lport, sizeof(*acc));
+ if (!fp)
+ goto out;
+
+ acc = fc_frame_payload_get(fp, sizeof(*acc));
+ memset(acc, 0, sizeof(*acc));
+ acc->reca_cmd = ELS_LS_ACC;
+ acc->reca_ox_id = rp->rec_ox_id;
+ memcpy(acc->reca_ofid, rp->rec_s_id, 3);
+ acc->reca_rx_id = htons(ep->rxid);
+ if (ep->sid == ep->oid)
+ hton24(acc->reca_rfid, ep->did);
+ else
+ hton24(acc->reca_rfid, ep->sid);
+ acc->reca_fc4value = htonl(ep->seq.rec_data);
+ acc->reca_e_stat = htonl(ep->esb_stat & (ESB_ST_RESP |
+ ESB_ST_SEQ_INIT |
+ ESB_ST_COMPLETE));
+ fc_fill_reply_hdr(fp, rfp, FC_RCTL_ELS_REP, 0);
+ lport->tt.frame_send(lport, fp);
+out:
+ fc_exch_release(ep);
+ return;
+
+rel:
+ fc_exch_release(ep);
+reject:
+ fc_seq_ls_rjt(rfp, reason, explan);
+}
+
+/**
+ * fc_exch_rrq_resp() - Handler for RRQ responses
+ * @sp: The sequence that the RRQ is on
+ * @fp: The RRQ frame
+ * @arg: The exchange that the RRQ is on
+ *
+ * TODO: fix error handler.
+ */
+static void fc_exch_rrq_resp(struct fc_seq *sp, struct fc_frame *fp, void *arg)
+{
+ struct fc_exch *aborted_ep = arg;
+ unsigned int op;
+
+ if (IS_ERR(fp)) {
+ int err = PTR_ERR(fp);
+
+ if (err == -FC_EX_CLOSED || err == -FC_EX_TIMEOUT)
+ goto cleanup;
+ FC_EXCH_DBG(aborted_ep, "Cannot process RRQ, "
+ "frame error %d\n", err);
+ return;
+ }
+
+ op = fc_frame_payload_op(fp);
+ fc_frame_free(fp);
+
+ switch (op) {
+ case ELS_LS_RJT:
+ FC_EXCH_DBG(aborted_ep, "LS_RJT for RRQ\n");
+ /* fall through */
+ case ELS_LS_ACC:
+ goto cleanup;
+ default:
+ FC_EXCH_DBG(aborted_ep, "unexpected response op %x for RRQ\n",
+ op);
+ return;
+ }
+
+cleanup:
+ fc_exch_done(&aborted_ep->seq);
+ /* drop hold for rec qual */
+ fc_exch_release(aborted_ep);
+}
+
+
+/**
+ * fc_exch_seq_send() - Send a frame using a new exchange and sequence
+ * @lport: The local port to send the frame on
+ * @fp: The frame to be sent
+ * @resp: The response handler for this request
+ * @destructor: The destructor for the exchange
+ * @arg: The argument to be passed to the response handler
+ * @timer_msec: The timeout period for the exchange
+ *
+ * The frame pointer with some of the header's fields must be
+ * filled before calling this routine, those fields are:
+ *
+ * - routing control
+ * - FC port did
+ * - FC port sid
+ * - FC header type
+ * - frame control
+ * - parameter or relative offset
+ */
+static struct fc_seq *fc_exch_seq_send(struct fc_lport *lport,
+ struct fc_frame *fp,
+ void (*resp)(struct fc_seq *,
+ struct fc_frame *fp,
+ void *arg),
+ void (*destructor)(struct fc_seq *,
+ void *),
+ void *arg, u32 timer_msec)
+{
+ struct fc_exch *ep;
+ struct fc_seq *sp = NULL;
+ struct fc_frame_header *fh;
+ struct fc_fcp_pkt *fsp = NULL;
+ int rc = 1;
+
+ ep = fc_exch_alloc(lport, fp);
+ if (!ep) {
+ fc_frame_free(fp);
+ return NULL;
+ }
+ ep->esb_stat |= ESB_ST_SEQ_INIT;
+ fh = fc_frame_header_get(fp);
+ fc_exch_set_addr(ep, ntoh24(fh->fh_s_id), ntoh24(fh->fh_d_id));
+ ep->resp = resp;
+ ep->destructor = destructor;
+ ep->arg = arg;
+ ep->r_a_tov = FC_DEF_R_A_TOV;
+ ep->lp = lport;
+ sp = &ep->seq;
+
+ ep->fh_type = fh->fh_type; /* save for possbile timeout handling */
+ ep->f_ctl = ntoh24(fh->fh_f_ctl);
+ fc_exch_setup_hdr(ep, fp, ep->f_ctl);
+ sp->cnt++;
+
+ if (ep->xid <= lport->lro_xid && fh->fh_r_ctl == FC_RCTL_DD_UNSOL_CMD) {
+ fsp = fr_fsp(fp);
+ fc_fcp_ddp_setup(fr_fsp(fp), ep->xid);
+ }
+
+ if (unlikely(lport->tt.frame_send(lport, fp)))
+ goto err;
+
+ if (timer_msec)
+ fc_exch_timer_set_locked(ep, timer_msec);
+ ep->f_ctl &= ~FC_FC_FIRST_SEQ; /* not first seq */
+
+ if (ep->f_ctl & FC_FC_SEQ_INIT)
+ ep->esb_stat &= ~ESB_ST_SEQ_INIT;
+ spin_unlock_bh(&ep->ex_lock);
+ return sp;
+err:
+ if (fsp)
+ fc_fcp_ddp_done(fsp);
+ rc = fc_exch_done_locked(ep);
+ spin_unlock_bh(&ep->ex_lock);
+ if (!rc)
+ fc_exch_delete(ep);
+ return NULL;
+}
+
+/**
+ * fc_exch_rrq() - Send an ELS RRQ (Reinstate Recovery Qualifier) command
+ * @ep: The exchange to send the RRQ on
+ *
+ * This tells the remote port to stop blocking the use of
+ * the exchange and the seq_cnt range.
+ */
+static void fc_exch_rrq(struct fc_exch *ep)
+{
+ struct fc_lport *lport;
+ struct fc_els_rrq *rrq;
+ struct fc_frame *fp;
+ u32 did;
+
+ lport = ep->lp;
+
+ fp = fc_frame_alloc(lport, sizeof(*rrq));
+ if (!fp)
+ goto retry;
+
+ rrq = fc_frame_payload_get(fp, sizeof(*rrq));
+ memset(rrq, 0, sizeof(*rrq));
+ rrq->rrq_cmd = ELS_RRQ;
+ hton24(rrq->rrq_s_id, ep->sid);
+ rrq->rrq_ox_id = htons(ep->oxid);
+ rrq->rrq_rx_id = htons(ep->rxid);
+
+ did = ep->did;
+ if (ep->esb_stat & ESB_ST_RESP)
+ did = ep->sid;
+
+ fc_fill_fc_hdr(fp, FC_RCTL_ELS_REQ, did,
+ lport->port_id, FC_TYPE_ELS,
+ FC_FC_FIRST_SEQ | FC_FC_END_SEQ | FC_FC_SEQ_INIT, 0);
+
+ if (fc_exch_seq_send(lport, fp, fc_exch_rrq_resp, NULL, ep,
+ lport->e_d_tov))
+ return;
+
+retry:
+ spin_lock_bh(&ep->ex_lock);
+ if (ep->state & (FC_EX_RST_CLEANUP | FC_EX_DONE)) {
+ spin_unlock_bh(&ep->ex_lock);
+ /* drop hold for rec qual */
+ fc_exch_release(ep);
+ return;
+ }
+ ep->esb_stat |= ESB_ST_REC_QUAL;
+ fc_exch_timer_set_locked(ep, ep->r_a_tov);
+ spin_unlock_bh(&ep->ex_lock);
+}
+
+/**
+ * fc_exch_els_rrq() - Handler for ELS RRQ (Reset Recovery Qualifier) requests
+ * @fp: The RRQ frame, not freed here.
+ */
+static void fc_exch_els_rrq(struct fc_frame *fp)
+{
+ struct fc_lport *lport;
+ struct fc_exch *ep = NULL; /* request or subject exchange */
+ struct fc_els_rrq *rp;
+ u32 sid;
+ u16 xid;
+ enum fc_els_rjt_explan explan;
+
+ lport = fr_dev(fp);
+ rp = fc_frame_payload_get(fp, sizeof(*rp));
+ explan = ELS_EXPL_INV_LEN;
+ if (!rp)
+ goto reject;
+
+ /*
+ * lookup subject exchange.
+ */
+ sid = ntoh24(rp->rrq_s_id); /* subject source */
+ xid = fc_host_port_id(lport->host) == sid ?
+ ntohs(rp->rrq_ox_id) : ntohs(rp->rrq_rx_id);
+ ep = fc_exch_lookup(lport, xid);
+ explan = ELS_EXPL_OXID_RXID;
+ if (!ep)
+ goto reject;
+ spin_lock_bh(&ep->ex_lock);
+ if (ep->oxid != ntohs(rp->rrq_ox_id))
+ goto unlock_reject;
+ if (ep->rxid != ntohs(rp->rrq_rx_id) &&
+ ep->rxid != FC_XID_UNKNOWN)
+ goto unlock_reject;
+ explan = ELS_EXPL_SID;
+ if (ep->sid != sid)
+ goto unlock_reject;
+
+ /*
+ * Clear Recovery Qualifier state, and cancel timer if complete.
+ */
+ if (ep->esb_stat & ESB_ST_REC_QUAL) {
+ ep->esb_stat &= ~ESB_ST_REC_QUAL;
+ atomic_dec(&ep->ex_refcnt); /* drop hold for rec qual */
+ }
+ if (ep->esb_stat & ESB_ST_COMPLETE)
+ fc_exch_timer_cancel(ep);
+
+ spin_unlock_bh(&ep->ex_lock);
+
+ /*
+ * Send LS_ACC.
+ */
+ fc_seq_ls_acc(fp);
+ goto out;
+
+unlock_reject:
+ spin_unlock_bh(&ep->ex_lock);
+reject:
+ fc_seq_ls_rjt(fp, ELS_RJT_LOGIC, explan);
+out:
+ if (ep)
+ fc_exch_release(ep); /* drop hold from fc_exch_find */
+}
+
+/**
+ * fc_exch_update_stats() - update exches stats to lport
+ * @lport: The local port to update exchange manager stats
+ */
+void fc_exch_update_stats(struct fc_lport *lport)
+{
+ struct fc_host_statistics *st;
+ struct fc_exch_mgr_anchor *ema;
+ struct fc_exch_mgr *mp;
+
+ st = &lport->host_stats;
+
+ list_for_each_entry(ema, &lport->ema_list, ema_list) {
+ mp = ema->mp;
+ st->fc_no_free_exch += atomic_read(&mp->stats.no_free_exch);
+ st->fc_no_free_exch_xid +=
+ atomic_read(&mp->stats.no_free_exch_xid);
+ st->fc_xid_not_found += atomic_read(&mp->stats.xid_not_found);
+ st->fc_xid_busy += atomic_read(&mp->stats.xid_busy);
+ st->fc_seq_not_found += atomic_read(&mp->stats.seq_not_found);
+ st->fc_non_bls_resp += atomic_read(&mp->stats.non_bls_resp);
+ }
+}
+EXPORT_SYMBOL(fc_exch_update_stats);
+
+/**
+ * fc_exch_mgr_add() - Add an exchange manager to a local port's list of EMs
+ * @lport: The local port to add the exchange manager to
+ * @mp: The exchange manager to be added to the local port
+ * @match: The match routine that indicates when this EM should be used
+ */
+struct fc_exch_mgr_anchor *fc_exch_mgr_add(struct fc_lport *lport,
+ struct fc_exch_mgr *mp,
+ bool (*match)(struct fc_frame *))
+{
+ struct fc_exch_mgr_anchor *ema;
+
+ ema = kmalloc(sizeof(*ema), GFP_ATOMIC);
+ if (!ema)
+ return ema;
+
+ ema->mp = mp;
+ ema->match = match;
+ /* add EM anchor to EM anchors list */
+ list_add_tail(&ema->ema_list, &lport->ema_list);
+ kref_get(&mp->kref);
+ return ema;
+}
+EXPORT_SYMBOL(fc_exch_mgr_add);
+
+/**
+ * fc_exch_mgr_destroy() - Destroy an exchange manager
+ * @kref: The reference to the EM to be destroyed
+ */
+static void fc_exch_mgr_destroy(struct kref *kref)
+{
+ struct fc_exch_mgr *mp = container_of(kref, struct fc_exch_mgr, kref);
+
+ mempool_destroy(mp->ep_pool);
+ free_percpu(mp->pool);
+ kfree(mp);
+}
+
+/**
+ * fc_exch_mgr_del() - Delete an EM from a local port's list
+ * @ema: The exchange manager anchor identifying the EM to be deleted
+ */
+void fc_exch_mgr_del(struct fc_exch_mgr_anchor *ema)
+{
+ /* remove EM anchor from EM anchors list */
+ list_del(&ema->ema_list);
+ kref_put(&ema->mp->kref, fc_exch_mgr_destroy);
+ kfree(ema);
+}
+EXPORT_SYMBOL(fc_exch_mgr_del);
+
+/**
+ * fc_exch_mgr_list_clone() - Share all exchange manager objects
+ * @src: Source lport to clone exchange managers from
+ * @dst: New lport that takes references to all the exchange managers
+ */
+int fc_exch_mgr_list_clone(struct fc_lport *src, struct fc_lport *dst)
+{
+ struct fc_exch_mgr_anchor *ema, *tmp;
+
+ list_for_each_entry(ema, &src->ema_list, ema_list) {
+ if (!fc_exch_mgr_add(dst, ema->mp, ema->match))
+ goto err;
+ }
+ return 0;
+err:
+ list_for_each_entry_safe(ema, tmp, &dst->ema_list, ema_list)
+ fc_exch_mgr_del(ema);
+ return -ENOMEM;
+}
+EXPORT_SYMBOL(fc_exch_mgr_list_clone);
+
+/**
+ * fc_exch_mgr_alloc() - Allocate an exchange manager
+ * @lport: The local port that the new EM will be associated with
+ * @class: The default FC class for new exchanges
+ * @min_xid: The minimum XID for exchanges from the new EM
+ * @max_xid: The maximum XID for exchanges from the new EM
+ * @match: The match routine for the new EM
+ */
+struct fc_exch_mgr *fc_exch_mgr_alloc(struct fc_lport *lport,
+ enum fc_class class,
+ u16 min_xid, u16 max_xid,
+ bool (*match)(struct fc_frame *))
+{
+ struct fc_exch_mgr *mp;
+ u16 pool_exch_range;
+ size_t pool_size;
+ unsigned int cpu;
+ struct fc_exch_pool *pool;
+
+ if (max_xid <= min_xid || max_xid == FC_XID_UNKNOWN ||
+ (min_xid & fc_cpu_mask) != 0) {
+ FC_LPORT_DBG(lport, "Invalid min_xid 0x:%x and max_xid 0x:%x\n",
+ min_xid, max_xid);
+ return NULL;
+ }
+
+ /*
+ * allocate memory for EM
+ */
+ mp = kzalloc(sizeof(struct fc_exch_mgr), GFP_ATOMIC);
+ if (!mp)
+ return NULL;
+
+ mp->class = class;
+ /* adjust em exch xid range for offload */
+ mp->min_xid = min_xid;
+
+ /* reduce range so per cpu pool fits into PCPU_MIN_UNIT_SIZE pool */
+ pool_exch_range = (PCPU_MIN_UNIT_SIZE - sizeof(*pool)) /
+ sizeof(struct fc_exch *);
+ if ((max_xid - min_xid + 1) / (fc_cpu_mask + 1) > pool_exch_range) {
+ mp->max_xid = pool_exch_range * (fc_cpu_mask + 1) +
+ min_xid - 1;
+ } else {
+ mp->max_xid = max_xid;
+ pool_exch_range = (mp->max_xid - mp->min_xid + 1) /
+ (fc_cpu_mask + 1);
+ }
+
+ mp->ep_pool = mempool_create_slab_pool(2, fc_em_cachep);
+ if (!mp->ep_pool)
+ goto free_mp;
+
+ /*
+ * Setup per cpu exch pool with entire exchange id range equally
+ * divided across all cpus. The exch pointers array memory is
+ * allocated for exch range per pool.
+ */
+ mp->pool_max_index = pool_exch_range - 1;
+
+ /*
+ * Allocate and initialize per cpu exch pool
+ */
+ pool_size = sizeof(*pool) + pool_exch_range * sizeof(struct fc_exch *);
+ mp->pool = __alloc_percpu(pool_size, __alignof__(struct fc_exch_pool));
+ if (!mp->pool)
+ goto free_mempool;
+ for_each_possible_cpu(cpu) {
+ pool = per_cpu_ptr(mp->pool, cpu);
+ pool->next_index = 0;
+ pool->left = FC_XID_UNKNOWN;
+ pool->right = FC_XID_UNKNOWN;
+ spin_lock_init(&pool->lock);
+ INIT_LIST_HEAD(&pool->ex_list);
+ }
+
+ kref_init(&mp->kref);
+ if (!fc_exch_mgr_add(lport, mp, match)) {
+ free_percpu(mp->pool);
+ goto free_mempool;
+ }
+
+ /*
+ * Above kref_init() sets mp->kref to 1 and then
+ * call to fc_exch_mgr_add incremented mp->kref again,
+ * so adjust that extra increment.
+ */
+ kref_put(&mp->kref, fc_exch_mgr_destroy);
+ return mp;
+
+free_mempool:
+ mempool_destroy(mp->ep_pool);
+free_mp:
+ kfree(mp);
+ return NULL;
+}
+EXPORT_SYMBOL(fc_exch_mgr_alloc);
+
+/**
+ * fc_exch_mgr_free() - Free all exchange managers on a local port
+ * @lport: The local port whose EMs are to be freed
+ */
+void fc_exch_mgr_free(struct fc_lport *lport)
+{
+ struct fc_exch_mgr_anchor *ema, *next;
+
+ flush_workqueue(fc_exch_workqueue);
+ list_for_each_entry_safe(ema, next, &lport->ema_list, ema_list)
+ fc_exch_mgr_del(ema);
+}
+EXPORT_SYMBOL(fc_exch_mgr_free);
+
+/**
+ * fc_find_ema() - Lookup and return appropriate Exchange Manager Anchor depending
+ * upon 'xid'.
+ * @f_ctl: f_ctl
+ * @lport: The local port the frame was received on
+ * @fh: The received frame header
+ */
+static struct fc_exch_mgr_anchor *fc_find_ema(u32 f_ctl,
+ struct fc_lport *lport,
+ struct fc_frame_header *fh)
+{
+ struct fc_exch_mgr_anchor *ema;
+ u16 xid;
+
+ if (f_ctl & FC_FC_EX_CTX)
+ xid = ntohs(fh->fh_ox_id);
+ else {
+ xid = ntohs(fh->fh_rx_id);
+ if (xid == FC_XID_UNKNOWN)
+ return list_entry(lport->ema_list.prev,
+ typeof(*ema), ema_list);
+ }
+
+ list_for_each_entry(ema, &lport->ema_list, ema_list) {
+ if ((xid >= ema->mp->min_xid) &&
+ (xid <= ema->mp->max_xid))
+ return ema;
+ }
+ return NULL;
+}
+/**
+ * fc_exch_recv() - Handler for received frames
+ * @lport: The local port the frame was received on
+ * @fp: The received frame
+ */
+void fc_exch_recv(struct fc_lport *lport, struct fc_frame *fp)
+{
+ struct fc_frame_header *fh = fc_frame_header_get(fp);
+ struct fc_exch_mgr_anchor *ema;
+ u32 f_ctl;
+
+ /* lport lock ? */
+ if (!lport || lport->state == LPORT_ST_DISABLED) {
+ FC_LPORT_DBG(lport, "Receiving frames for an lport that "
+ "has not been initialized correctly\n");
+ fc_frame_free(fp);
+ return;
+ }
+
+ f_ctl = ntoh24(fh->fh_f_ctl);
+ ema = fc_find_ema(f_ctl, lport, fh);
+ if (!ema) {
+ FC_LPORT_DBG(lport, "Unable to find Exchange Manager Anchor,"
+ "fc_ctl <0x%x>, xid <0x%x>\n",
+ f_ctl,
+ (f_ctl & FC_FC_EX_CTX) ?
+ ntohs(fh->fh_ox_id) :
+ ntohs(fh->fh_rx_id));
+ fc_frame_free(fp);
+ return;
+ }
+
+ /*
+ * If frame is marked invalid, just drop it.
+ */
+ switch (fr_eof(fp)) {
+ case FC_EOF_T:
+ if (f_ctl & FC_FC_END_SEQ)
+ skb_trim(fp_skb(fp), fr_len(fp) - FC_FC_FILL(f_ctl));
+ /* fall through */
+ case FC_EOF_N:
+ if (fh->fh_type == FC_TYPE_BLS)
+ fc_exch_recv_bls(ema->mp, fp);
+ else if ((f_ctl & (FC_FC_EX_CTX | FC_FC_SEQ_CTX)) ==
+ FC_FC_EX_CTX)
+ fc_exch_recv_seq_resp(ema->mp, fp);
+ else if (f_ctl & FC_FC_SEQ_CTX)
+ fc_exch_recv_resp(ema->mp, fp);
+ else /* no EX_CTX and no SEQ_CTX */
+ fc_exch_recv_req(lport, ema->mp, fp);
+ break;
+ default:
+ FC_LPORT_DBG(lport, "dropping invalid frame (eof %x)",
+ fr_eof(fp));
+ fc_frame_free(fp);
+ }
+}
+EXPORT_SYMBOL(fc_exch_recv);
+
+/**
+ * fc_exch_init() - Initialize the exchange layer for a local port
+ * @lport: The local port to initialize the exchange layer for
+ */
+int fc_exch_init(struct fc_lport *lport)
+{
+ if (!lport->tt.seq_start_next)
+ lport->tt.seq_start_next = fc_seq_start_next;
+
+ if (!lport->tt.seq_set_resp)
+ lport->tt.seq_set_resp = fc_seq_set_resp;
+
+ if (!lport->tt.exch_seq_send)
+ lport->tt.exch_seq_send = fc_exch_seq_send;
+
+ if (!lport->tt.seq_send)
+ lport->tt.seq_send = fc_seq_send;
+
+ if (!lport->tt.seq_els_rsp_send)
+ lport->tt.seq_els_rsp_send = fc_seq_els_rsp_send;
+
+ if (!lport->tt.exch_done)
+ lport->tt.exch_done = fc_exch_done;
+
+ if (!lport->tt.exch_mgr_reset)
+ lport->tt.exch_mgr_reset = fc_exch_mgr_reset;
+
+ if (!lport->tt.seq_exch_abort)
+ lport->tt.seq_exch_abort = fc_seq_exch_abort;
+
+ if (!lport->tt.seq_assign)
+ lport->tt.seq_assign = fc_seq_assign;
+
+ if (!lport->tt.seq_release)
+ lport->tt.seq_release = fc_seq_release;
+
+ return 0;
+}
+EXPORT_SYMBOL(fc_exch_init);
+
+/**
+ * fc_setup_exch_mgr() - Setup an exchange manager
+ */
+int fc_setup_exch_mgr(void)
+{
+ fc_em_cachep = kmem_cache_create("libfc_em", sizeof(struct fc_exch),
+ 0, SLAB_HWCACHE_ALIGN, NULL);
+ if (!fc_em_cachep)
+ return -ENOMEM;
+
+ /*
+ * Initialize fc_cpu_mask and fc_cpu_order. The
+ * fc_cpu_mask is set for nr_cpu_ids rounded up
+ * to order of 2's * power and order is stored
+ * in fc_cpu_order as this is later required in
+ * mapping between an exch id and exch array index
+ * in per cpu exch pool.
+ *
+ * This round up is required to align fc_cpu_mask
+ * to exchange id's lower bits such that all incoming
+ * frames of an exchange gets delivered to the same
+ * cpu on which exchange originated by simple bitwise
+ * AND operation between fc_cpu_mask and exchange id.
+ */
+ fc_cpu_order = ilog2(roundup_pow_of_two(nr_cpu_ids));
+ fc_cpu_mask = (1 << fc_cpu_order) - 1;
+
+ fc_exch_workqueue = create_singlethread_workqueue("fc_exch_workqueue");
+ if (!fc_exch_workqueue)
+ goto err;
+ return 0;
+err:
+ kmem_cache_destroy(fc_em_cachep);
+ return -ENOMEM;
+}
+
+/**
+ * fc_destroy_exch_mgr() - Destroy an exchange manager
+ */
+void fc_destroy_exch_mgr(void)
+{
+ destroy_workqueue(fc_exch_workqueue);
+ kmem_cache_destroy(fc_em_cachep);
+}
diff --git a/drivers/scsi/libfc/fc_fcp.c b/drivers/scsi/libfc/fc_fcp.c
new file mode 100644
index 000000000..c6795941b
--- /dev/null
+++ b/drivers/scsi/libfc/fc_fcp.c
@@ -0,0 +1,2245 @@
+/*
+ * Copyright(c) 2007 Intel Corporation. All rights reserved.
+ * Copyright(c) 2008 Red Hat, Inc. All rights reserved.
+ * Copyright(c) 2008 Mike Christie
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Maintained at www.Open-FCoE.org
+ */
+
+#include <linux/module.h>
+#include <linux/delay.h>
+#include <linux/kernel.h>
+#include <linux/types.h>
+#include <linux/spinlock.h>
+#include <linux/scatterlist.h>
+#include <linux/err.h>
+#include <linux/crc32.h>
+#include <linux/slab.h>
+
+#include <scsi/scsi_tcq.h>
+#include <scsi/scsi.h>
+#include <scsi/scsi_host.h>
+#include <scsi/scsi_device.h>
+#include <scsi/scsi_cmnd.h>
+
+#include <scsi/fc/fc_fc2.h>
+
+#include <scsi/libfc.h>
+#include <scsi/fc_encode.h>
+
+#include "fc_libfc.h"
+
+static struct kmem_cache *scsi_pkt_cachep;
+
+/* SRB state definitions */
+#define FC_SRB_FREE 0 /* cmd is free */
+#define FC_SRB_CMD_SENT (1 << 0) /* cmd has been sent */
+#define FC_SRB_RCV_STATUS (1 << 1) /* response has arrived */
+#define FC_SRB_ABORT_PENDING (1 << 2) /* cmd abort sent to device */
+#define FC_SRB_ABORTED (1 << 3) /* abort acknowledged */
+#define FC_SRB_DISCONTIG (1 << 4) /* non-sequential data recvd */
+#define FC_SRB_COMPL (1 << 5) /* fc_io_compl has been run */
+#define FC_SRB_FCP_PROCESSING_TMO (1 << 6) /* timer function processing */
+
+#define FC_SRB_READ (1 << 1)
+#define FC_SRB_WRITE (1 << 0)
+
+/*
+ * The SCp.ptr should be tested and set under the scsi_pkt_queue lock
+ */
+#define CMD_SP(Cmnd) ((struct fc_fcp_pkt *)(Cmnd)->SCp.ptr)
+#define CMD_ENTRY_STATUS(Cmnd) ((Cmnd)->SCp.have_data_in)
+#define CMD_COMPL_STATUS(Cmnd) ((Cmnd)->SCp.this_residual)
+#define CMD_SCSI_STATUS(Cmnd) ((Cmnd)->SCp.Status)
+#define CMD_RESID_LEN(Cmnd) ((Cmnd)->SCp.buffers_residual)
+
+/**
+ * struct fc_fcp_internal - FCP layer internal data
+ * @scsi_pkt_pool: Memory pool to draw FCP packets from
+ * @scsi_queue_lock: Protects the scsi_pkt_queue
+ * @scsi_pkt_queue: Current FCP packets
+ * @last_can_queue_ramp_down_time: ramp down time
+ * @last_can_queue_ramp_up_time: ramp up time
+ * @max_can_queue: max can_queue size
+ */
+struct fc_fcp_internal {
+ mempool_t *scsi_pkt_pool;
+ spinlock_t scsi_queue_lock;
+ struct list_head scsi_pkt_queue;
+ unsigned long last_can_queue_ramp_down_time;
+ unsigned long last_can_queue_ramp_up_time;
+ int max_can_queue;
+};
+
+#define fc_get_scsi_internal(x) ((struct fc_fcp_internal *)(x)->scsi_priv)
+
+/*
+ * function prototypes
+ * FC scsi I/O related functions
+ */
+static void fc_fcp_recv_data(struct fc_fcp_pkt *, struct fc_frame *);
+static void fc_fcp_recv(struct fc_seq *, struct fc_frame *, void *);
+static void fc_fcp_resp(struct fc_fcp_pkt *, struct fc_frame *);
+static void fc_fcp_complete_locked(struct fc_fcp_pkt *);
+static void fc_tm_done(struct fc_seq *, struct fc_frame *, void *);
+static void fc_fcp_error(struct fc_fcp_pkt *, struct fc_frame *);
+static void fc_fcp_recovery(struct fc_fcp_pkt *, u8 code);
+static void fc_fcp_timeout(unsigned long);
+static void fc_fcp_rec(struct fc_fcp_pkt *);
+static void fc_fcp_rec_error(struct fc_fcp_pkt *, struct fc_frame *);
+static void fc_fcp_rec_resp(struct fc_seq *, struct fc_frame *, void *);
+static void fc_io_compl(struct fc_fcp_pkt *);
+
+static void fc_fcp_srr(struct fc_fcp_pkt *, enum fc_rctl, u32);
+static void fc_fcp_srr_resp(struct fc_seq *, struct fc_frame *, void *);
+static void fc_fcp_srr_error(struct fc_fcp_pkt *, struct fc_frame *);
+
+/*
+ * command status codes
+ */
+#define FC_COMPLETE 0
+#define FC_CMD_ABORTED 1
+#define FC_CMD_RESET 2
+#define FC_CMD_PLOGO 3
+#define FC_SNS_RCV 4
+#define FC_TRANS_ERR 5
+#define FC_DATA_OVRRUN 6
+#define FC_DATA_UNDRUN 7
+#define FC_ERROR 8
+#define FC_HRD_ERROR 9
+#define FC_CRC_ERROR 10
+#define FC_TIMED_OUT 11
+
+/*
+ * Error recovery timeout values.
+ */
+#define FC_SCSI_TM_TOV (10 * HZ)
+#define FC_HOST_RESET_TIMEOUT (30 * HZ)
+#define FC_CAN_QUEUE_PERIOD (60 * HZ)
+
+#define FC_MAX_ERROR_CNT 5
+#define FC_MAX_RECOV_RETRY 3
+
+#define FC_FCP_DFLT_QUEUE_DEPTH 32
+
+/**
+ * fc_fcp_pkt_alloc() - Allocate a fcp_pkt
+ * @lport: The local port that the FCP packet is for
+ * @gfp: GFP flags for allocation
+ *
+ * Return value: fcp_pkt structure or null on allocation failure.
+ * Context: Can be called from process context, no lock is required.
+ */
+static struct fc_fcp_pkt *fc_fcp_pkt_alloc(struct fc_lport *lport, gfp_t gfp)
+{
+ struct fc_fcp_internal *si = fc_get_scsi_internal(lport);
+ struct fc_fcp_pkt *fsp;
+
+ fsp = mempool_alloc(si->scsi_pkt_pool, gfp);
+ if (fsp) {
+ memset(fsp, 0, sizeof(*fsp));
+ fsp->lp = lport;
+ fsp->xfer_ddp = FC_XID_UNKNOWN;
+ atomic_set(&fsp->ref_cnt, 1);
+ init_timer(&fsp->timer);
+ fsp->timer.data = (unsigned long)fsp;
+ INIT_LIST_HEAD(&fsp->list);
+ spin_lock_init(&fsp->scsi_pkt_lock);
+ } else {
+ per_cpu_ptr(lport->stats, get_cpu())->FcpPktAllocFails++;
+ put_cpu();
+ }
+ return fsp;
+}
+
+/**
+ * fc_fcp_pkt_release() - Release hold on a fcp_pkt
+ * @fsp: The FCP packet to be released
+ *
+ * Context: Can be called from process or interrupt context,
+ * no lock is required.
+ */
+static void fc_fcp_pkt_release(struct fc_fcp_pkt *fsp)
+{
+ if (atomic_dec_and_test(&fsp->ref_cnt)) {
+ struct fc_fcp_internal *si = fc_get_scsi_internal(fsp->lp);
+
+ mempool_free(fsp, si->scsi_pkt_pool);
+ }
+}
+
+/**
+ * fc_fcp_pkt_hold() - Hold a fcp_pkt
+ * @fsp: The FCP packet to be held
+ */
+static void fc_fcp_pkt_hold(struct fc_fcp_pkt *fsp)
+{
+ atomic_inc(&fsp->ref_cnt);
+}
+
+/**
+ * fc_fcp_pkt_destory() - Release hold on a fcp_pkt
+ * @seq: The sequence that the FCP packet is on (required by destructor API)
+ * @fsp: The FCP packet to be released
+ *
+ * This routine is called by a destructor callback in the exch_seq_send()
+ * routine of the libfc Transport Template. The 'struct fc_seq' is a required
+ * argument even though it is not used by this routine.
+ *
+ * Context: No locking required.
+ */
+static void fc_fcp_pkt_destroy(struct fc_seq *seq, void *fsp)
+{
+ fc_fcp_pkt_release(fsp);
+}
+
+/**
+ * fc_fcp_lock_pkt() - Lock a fcp_pkt and increase its reference count
+ * @fsp: The FCP packet to be locked and incremented
+ *
+ * We should only return error if we return a command to SCSI-ml before
+ * getting a response. This can happen in cases where we send a abort, but
+ * do not wait for the response and the abort and command can be passing
+ * each other on the wire/network-layer.
+ *
+ * Note: this function locks the packet and gets a reference to allow
+ * callers to call the completion function while the lock is held and
+ * not have to worry about the packets refcount.
+ *
+ * TODO: Maybe we should just have callers grab/release the lock and
+ * have a function that they call to verify the fsp and grab a ref if
+ * needed.
+ */
+static inline int fc_fcp_lock_pkt(struct fc_fcp_pkt *fsp)
+{
+ spin_lock_bh(&fsp->scsi_pkt_lock);
+ if (fsp->state & FC_SRB_COMPL) {
+ spin_unlock_bh(&fsp->scsi_pkt_lock);
+ return -EPERM;
+ }
+
+ fc_fcp_pkt_hold(fsp);
+ return 0;
+}
+
+/**
+ * fc_fcp_unlock_pkt() - Release a fcp_pkt's lock and decrement its
+ * reference count
+ * @fsp: The FCP packet to be unlocked and decremented
+ */
+static inline void fc_fcp_unlock_pkt(struct fc_fcp_pkt *fsp)
+{
+ spin_unlock_bh(&fsp->scsi_pkt_lock);
+ fc_fcp_pkt_release(fsp);
+}
+
+/**
+ * fc_fcp_timer_set() - Start a timer for a fcp_pkt
+ * @fsp: The FCP packet to start a timer for
+ * @delay: The timeout period in jiffies
+ */
+static void fc_fcp_timer_set(struct fc_fcp_pkt *fsp, unsigned long delay)
+{
+ if (!(fsp->state & FC_SRB_COMPL))
+ mod_timer(&fsp->timer, jiffies + delay);
+}
+
+/**
+ * fc_fcp_send_abort() - Send an abort for exchanges associated with a
+ * fcp_pkt
+ * @fsp: The FCP packet to abort exchanges on
+ */
+static int fc_fcp_send_abort(struct fc_fcp_pkt *fsp)
+{
+ if (!fsp->seq_ptr)
+ return -EINVAL;
+
+ per_cpu_ptr(fsp->lp->stats, get_cpu())->FcpPktAborts++;
+ put_cpu();
+
+ fsp->state |= FC_SRB_ABORT_PENDING;
+ return fsp->lp->tt.seq_exch_abort(fsp->seq_ptr, 0);
+}
+
+/**
+ * fc_fcp_retry_cmd() - Retry a fcp_pkt
+ * @fsp: The FCP packet to be retried
+ *
+ * Sets the status code to be FC_ERROR and then calls
+ * fc_fcp_complete_locked() which in turn calls fc_io_compl().
+ * fc_io_compl() will notify the SCSI-ml that the I/O is done.
+ * The SCSI-ml will retry the command.
+ */
+static void fc_fcp_retry_cmd(struct fc_fcp_pkt *fsp)
+{
+ if (fsp->seq_ptr) {
+ fsp->lp->tt.exch_done(fsp->seq_ptr);
+ fsp->seq_ptr = NULL;
+ }
+
+ fsp->state &= ~FC_SRB_ABORT_PENDING;
+ fsp->io_status = 0;
+ fsp->status_code = FC_ERROR;
+ fc_fcp_complete_locked(fsp);
+}
+
+/**
+ * fc_fcp_ddp_setup() - Calls a LLD's ddp_setup routine to set up DDP context
+ * @fsp: The FCP packet that will manage the DDP frames
+ * @xid: The XID that will be used for the DDP exchange
+ */
+void fc_fcp_ddp_setup(struct fc_fcp_pkt *fsp, u16 xid)
+{
+ struct fc_lport *lport;
+
+ lport = fsp->lp;
+ if ((fsp->req_flags & FC_SRB_READ) &&
+ (lport->lro_enabled) && (lport->tt.ddp_setup)) {
+ if (lport->tt.ddp_setup(lport, xid, scsi_sglist(fsp->cmd),
+ scsi_sg_count(fsp->cmd)))
+ fsp->xfer_ddp = xid;
+ }
+}
+
+/**
+ * fc_fcp_ddp_done() - Calls a LLD's ddp_done routine to release any
+ * DDP related resources for a fcp_pkt
+ * @fsp: The FCP packet that DDP had been used on
+ */
+void fc_fcp_ddp_done(struct fc_fcp_pkt *fsp)
+{
+ struct fc_lport *lport;
+
+ if (!fsp)
+ return;
+
+ if (fsp->xfer_ddp == FC_XID_UNKNOWN)
+ return;
+
+ lport = fsp->lp;
+ if (lport->tt.ddp_done) {
+ fsp->xfer_len = lport->tt.ddp_done(lport, fsp->xfer_ddp);
+ fsp->xfer_ddp = FC_XID_UNKNOWN;
+ }
+}
+
+/**
+ * fc_fcp_can_queue_ramp_up() - increases can_queue
+ * @lport: lport to ramp up can_queue
+ */
+static void fc_fcp_can_queue_ramp_up(struct fc_lport *lport)
+{
+ struct fc_fcp_internal *si = fc_get_scsi_internal(lport);
+ unsigned long flags;
+ int can_queue;
+
+ spin_lock_irqsave(lport->host->host_lock, flags);
+
+ if (si->last_can_queue_ramp_up_time &&
+ (time_before(jiffies, si->last_can_queue_ramp_up_time +
+ FC_CAN_QUEUE_PERIOD)))
+ goto unlock;
+
+ if (time_before(jiffies, si->last_can_queue_ramp_down_time +
+ FC_CAN_QUEUE_PERIOD))
+ goto unlock;
+
+ si->last_can_queue_ramp_up_time = jiffies;
+
+ can_queue = lport->host->can_queue << 1;
+ if (can_queue >= si->max_can_queue) {
+ can_queue = si->max_can_queue;
+ si->last_can_queue_ramp_down_time = 0;
+ }
+ lport->host->can_queue = can_queue;
+ shost_printk(KERN_ERR, lport->host, "libfc: increased "
+ "can_queue to %d.\n", can_queue);
+
+unlock:
+ spin_unlock_irqrestore(lport->host->host_lock, flags);
+}
+
+/**
+ * fc_fcp_can_queue_ramp_down() - reduces can_queue
+ * @lport: lport to reduce can_queue
+ *
+ * If we are getting memory allocation failures, then we may
+ * be trying to execute too many commands. We let the running
+ * commands complete or timeout, then try again with a reduced
+ * can_queue. Eventually we will hit the point where we run
+ * on all reserved structs.
+ */
+static void fc_fcp_can_queue_ramp_down(struct fc_lport *lport)
+{
+ struct fc_fcp_internal *si = fc_get_scsi_internal(lport);
+ unsigned long flags;
+ int can_queue;
+
+ spin_lock_irqsave(lport->host->host_lock, flags);
+
+ if (si->last_can_queue_ramp_down_time &&
+ (time_before(jiffies, si->last_can_queue_ramp_down_time +
+ FC_CAN_QUEUE_PERIOD)))
+ goto unlock;
+
+ si->last_can_queue_ramp_down_time = jiffies;
+
+ can_queue = lport->host->can_queue;
+ can_queue >>= 1;
+ if (!can_queue)
+ can_queue = 1;
+ lport->host->can_queue = can_queue;
+ shost_printk(KERN_ERR, lport->host, "libfc: Could not allocate frame.\n"
+ "Reducing can_queue to %d.\n", can_queue);
+
+unlock:
+ spin_unlock_irqrestore(lport->host->host_lock, flags);
+}
+
+/*
+ * fc_fcp_frame_alloc() - Allocates fc_frame structure and buffer.
+ * @lport: fc lport struct
+ * @len: payload length
+ *
+ * Allocates fc_frame structure and buffer but if fails to allocate
+ * then reduce can_queue.
+ */
+static inline struct fc_frame *fc_fcp_frame_alloc(struct fc_lport *lport,
+ size_t len)
+{
+ struct fc_frame *fp;
+
+ fp = fc_frame_alloc(lport, len);
+ if (likely(fp))
+ return fp;
+
+ per_cpu_ptr(lport->stats, get_cpu())->FcpFrameAllocFails++;
+ put_cpu();
+ /* error case */
+ fc_fcp_can_queue_ramp_down(lport);
+ return NULL;
+}
+
+/**
+ * fc_fcp_recv_data() - Handler for receiving SCSI-FCP data from a target
+ * @fsp: The FCP packet the data is on
+ * @fp: The data frame
+ */
+static void fc_fcp_recv_data(struct fc_fcp_pkt *fsp, struct fc_frame *fp)
+{
+ struct scsi_cmnd *sc = fsp->cmd;
+ struct fc_lport *lport = fsp->lp;
+ struct fc_stats *stats;
+ struct fc_frame_header *fh;
+ size_t start_offset;
+ size_t offset;
+ u32 crc;
+ u32 copy_len = 0;
+ size_t len;
+ void *buf;
+ struct scatterlist *sg;
+ u32 nents;
+ u8 host_bcode = FC_COMPLETE;
+
+ fh = fc_frame_header_get(fp);
+ offset = ntohl(fh->fh_parm_offset);
+ start_offset = offset;
+ len = fr_len(fp) - sizeof(*fh);
+ buf = fc_frame_payload_get(fp, 0);
+
+ /*
+ * if this I/O is ddped then clear it and initiate recovery since data
+ * frames are expected to be placed directly in that case.
+ *
+ * Indicate error to scsi-ml because something went wrong with the
+ * ddp handling to get us here.
+ */
+ if (fsp->xfer_ddp != FC_XID_UNKNOWN) {
+ fc_fcp_ddp_done(fsp);
+ FC_FCP_DBG(fsp, "DDP I/O in fc_fcp_recv_data set ERROR\n");
+ host_bcode = FC_ERROR;
+ goto err;
+ }
+ if (offset + len > fsp->data_len) {
+ /* this should never happen */
+ if ((fr_flags(fp) & FCPHF_CRC_UNCHECKED) &&
+ fc_frame_crc_check(fp))
+ goto crc_err;
+ FC_FCP_DBG(fsp, "data received past end. len %zx offset %zx "
+ "data_len %x\n", len, offset, fsp->data_len);
+
+ /* Data is corrupted indicate scsi-ml should retry */
+ host_bcode = FC_DATA_OVRRUN;
+ goto err;
+ }
+ if (offset != fsp->xfer_len)
+ fsp->state |= FC_SRB_DISCONTIG;
+
+ sg = scsi_sglist(sc);
+ nents = scsi_sg_count(sc);
+
+ if (!(fr_flags(fp) & FCPHF_CRC_UNCHECKED)) {
+ copy_len = fc_copy_buffer_to_sglist(buf, len, sg, &nents,
+ &offset, NULL);
+ } else {
+ crc = crc32(~0, (u8 *) fh, sizeof(*fh));
+ copy_len = fc_copy_buffer_to_sglist(buf, len, sg, &nents,
+ &offset, &crc);
+ buf = fc_frame_payload_get(fp, 0);
+ if (len % 4)
+ crc = crc32(crc, buf + len, 4 - (len % 4));
+
+ if (~crc != le32_to_cpu(fr_crc(fp))) {
+crc_err:
+ stats = per_cpu_ptr(lport->stats, get_cpu());
+ stats->ErrorFrames++;
+ /* per cpu count, not total count, but OK for limit */
+ if (stats->InvalidCRCCount++ < FC_MAX_ERROR_CNT)
+ printk(KERN_WARNING "libfc: CRC error on data "
+ "frame for port (%6.6x)\n",
+ lport->port_id);
+ put_cpu();
+ /*
+ * Assume the frame is total garbage.
+ * We may have copied it over the good part
+ * of the buffer.
+ * If so, we need to retry the entire operation.
+ * Otherwise, ignore it.
+ */
+ if (fsp->state & FC_SRB_DISCONTIG) {
+ host_bcode = FC_CRC_ERROR;
+ goto err;
+ }
+ return;
+ }
+ }
+
+ if (fsp->xfer_contig_end == start_offset)
+ fsp->xfer_contig_end += copy_len;
+ fsp->xfer_len += copy_len;
+
+ /*
+ * In the very rare event that this data arrived after the response
+ * and completes the transfer, call the completion handler.
+ */
+ if (unlikely(fsp->state & FC_SRB_RCV_STATUS) &&
+ fsp->xfer_len == fsp->data_len - fsp->scsi_resid)
+ fc_fcp_complete_locked(fsp);
+ return;
+err:
+ fc_fcp_recovery(fsp, host_bcode);
+}
+
+/**
+ * fc_fcp_send_data() - Send SCSI data to a target
+ * @fsp: The FCP packet the data is on
+ * @sp: The sequence the data is to be sent on
+ * @offset: The starting offset for this data request
+ * @seq_blen: The burst length for this data request
+ *
+ * Called after receiving a Transfer Ready data descriptor.
+ * If the LLD is capable of sequence offload then send down the
+ * seq_blen amount of data in single frame, otherwise send
+ * multiple frames of the maximum frame payload supported by
+ * the target port.
+ */
+static int fc_fcp_send_data(struct fc_fcp_pkt *fsp, struct fc_seq *seq,
+ size_t offset, size_t seq_blen)
+{
+ struct fc_exch *ep;
+ struct scsi_cmnd *sc;
+ struct scatterlist *sg;
+ struct fc_frame *fp = NULL;
+ struct fc_lport *lport = fsp->lp;
+ struct page *page;
+ size_t remaining;
+ size_t t_blen;
+ size_t tlen;
+ size_t sg_bytes;
+ size_t frame_offset, fh_parm_offset;
+ size_t off;
+ int error;
+ void *data = NULL;
+ void *page_addr;
+ int using_sg = lport->sg_supp;
+ u32 f_ctl;
+
+ WARN_ON(seq_blen <= 0);
+ if (unlikely(offset + seq_blen > fsp->data_len)) {
+ /* this should never happen */
+ FC_FCP_DBG(fsp, "xfer-ready past end. seq_blen %zx "
+ "offset %zx\n", seq_blen, offset);
+ fc_fcp_send_abort(fsp);
+ return 0;
+ } else if (offset != fsp->xfer_len) {
+ /* Out of Order Data Request - no problem, but unexpected. */
+ FC_FCP_DBG(fsp, "xfer-ready non-contiguous. "
+ "seq_blen %zx offset %zx\n", seq_blen, offset);
+ }
+
+ /*
+ * if LLD is capable of seq_offload then set transport
+ * burst length (t_blen) to seq_blen, otherwise set t_blen
+ * to max FC frame payload previously set in fsp->max_payload.
+ */
+ t_blen = fsp->max_payload;
+ if (lport->seq_offload) {
+ t_blen = min(seq_blen, (size_t)lport->lso_max);
+ FC_FCP_DBG(fsp, "fsp=%p:lso:blen=%zx lso_max=0x%x t_blen=%zx\n",
+ fsp, seq_blen, lport->lso_max, t_blen);
+ }
+
+ if (t_blen > 512)
+ t_blen &= ~(512 - 1); /* round down to block size */
+ sc = fsp->cmd;
+
+ remaining = seq_blen;
+ fh_parm_offset = frame_offset = offset;
+ tlen = 0;
+ seq = lport->tt.seq_start_next(seq);
+ f_ctl = FC_FC_REL_OFF;
+ WARN_ON(!seq);
+
+ sg = scsi_sglist(sc);
+
+ while (remaining > 0 && sg) {
+ if (offset >= sg->length) {
+ offset -= sg->length;
+ sg = sg_next(sg);
+ continue;
+ }
+ if (!fp) {
+ tlen = min(t_blen, remaining);
+
+ /*
+ * TODO. Temporary workaround. fc_seq_send() can't
+ * handle odd lengths in non-linear skbs.
+ * This will be the final fragment only.
+ */
+ if (tlen % 4)
+ using_sg = 0;
+ fp = fc_frame_alloc(lport, using_sg ? 0 : tlen);
+ if (!fp)
+ return -ENOMEM;
+
+ data = fc_frame_header_get(fp) + 1;
+ fh_parm_offset = frame_offset;
+ fr_max_payload(fp) = fsp->max_payload;
+ }
+
+ off = offset + sg->offset;
+ sg_bytes = min(tlen, sg->length - offset);
+ sg_bytes = min(sg_bytes,
+ (size_t) (PAGE_SIZE - (off & ~PAGE_MASK)));
+ page = sg_page(sg) + (off >> PAGE_SHIFT);
+ if (using_sg) {
+ get_page(page);
+ skb_fill_page_desc(fp_skb(fp),
+ skb_shinfo(fp_skb(fp))->nr_frags,
+ page, off & ~PAGE_MASK, sg_bytes);
+ fp_skb(fp)->data_len += sg_bytes;
+ fr_len(fp) += sg_bytes;
+ fp_skb(fp)->truesize += PAGE_SIZE;
+ } else {
+ /*
+ * The scatterlist item may be bigger than PAGE_SIZE,
+ * but we must not cross pages inside the kmap.
+ */
+ page_addr = kmap_atomic(page);
+ memcpy(data, (char *)page_addr + (off & ~PAGE_MASK),
+ sg_bytes);
+ kunmap_atomic(page_addr);
+ data += sg_bytes;
+ }
+ offset += sg_bytes;
+ frame_offset += sg_bytes;
+ tlen -= sg_bytes;
+ remaining -= sg_bytes;
+
+ if ((skb_shinfo(fp_skb(fp))->nr_frags < FC_FRAME_SG_LEN) &&
+ (tlen))
+ continue;
+
+ /*
+ * Send sequence with transfer sequence initiative in case
+ * this is last FCP frame of the sequence.
+ */
+ if (remaining == 0)
+ f_ctl |= FC_FC_SEQ_INIT | FC_FC_END_SEQ;
+
+ ep = fc_seq_exch(seq);
+ fc_fill_fc_hdr(fp, FC_RCTL_DD_SOL_DATA, ep->did, ep->sid,
+ FC_TYPE_FCP, f_ctl, fh_parm_offset);
+
+ /*
+ * send fragment using for a sequence.
+ */
+ error = lport->tt.seq_send(lport, seq, fp);
+ if (error) {
+ WARN_ON(1); /* send error should be rare */
+ return error;
+ }
+ fp = NULL;
+ }
+ fsp->xfer_len += seq_blen; /* premature count? */
+ return 0;
+}
+
+/**
+ * fc_fcp_abts_resp() - Receive an ABTS response
+ * @fsp: The FCP packet that is being aborted
+ * @fp: The response frame
+ */
+static void fc_fcp_abts_resp(struct fc_fcp_pkt *fsp, struct fc_frame *fp)
+{
+ int ba_done = 1;
+ struct fc_ba_rjt *brp;
+ struct fc_frame_header *fh;
+
+ fh = fc_frame_header_get(fp);
+ switch (fh->fh_r_ctl) {
+ case FC_RCTL_BA_ACC:
+ break;
+ case FC_RCTL_BA_RJT:
+ brp = fc_frame_payload_get(fp, sizeof(*brp));
+ if (brp && brp->br_reason == FC_BA_RJT_LOG_ERR)
+ break;
+ /* fall thru */
+ default:
+ /*
+ * we will let the command timeout
+ * and scsi-ml recover in this case,
+ * therefore cleared the ba_done flag.
+ */
+ ba_done = 0;
+ }
+
+ if (ba_done) {
+ fsp->state |= FC_SRB_ABORTED;
+ fsp->state &= ~FC_SRB_ABORT_PENDING;
+
+ if (fsp->wait_for_comp)
+ complete(&fsp->tm_done);
+ else
+ fc_fcp_complete_locked(fsp);
+ }
+}
+
+/**
+ * fc_fcp_recv() - Receive an FCP frame
+ * @seq: The sequence the frame is on
+ * @fp: The received frame
+ * @arg: The related FCP packet
+ *
+ * Context: Called from Soft IRQ context. Can not be called
+ * holding the FCP packet list lock.
+ */
+static void fc_fcp_recv(struct fc_seq *seq, struct fc_frame *fp, void *arg)
+{
+ struct fc_fcp_pkt *fsp = (struct fc_fcp_pkt *)arg;
+ struct fc_lport *lport = fsp->lp;
+ struct fc_frame_header *fh;
+ struct fcp_txrdy *dd;
+ u8 r_ctl;
+ int rc = 0;
+
+ if (IS_ERR(fp)) {
+ fc_fcp_error(fsp, fp);
+ return;
+ }
+
+ fh = fc_frame_header_get(fp);
+ r_ctl = fh->fh_r_ctl;
+
+ if (lport->state != LPORT_ST_READY)
+ goto out;
+ if (fc_fcp_lock_pkt(fsp))
+ goto out;
+
+ if (fh->fh_type == FC_TYPE_BLS) {
+ fc_fcp_abts_resp(fsp, fp);
+ goto unlock;
+ }
+
+ if (fsp->state & (FC_SRB_ABORTED | FC_SRB_ABORT_PENDING))
+ goto unlock;
+
+ if (r_ctl == FC_RCTL_DD_DATA_DESC) {
+ /*
+ * received XFER RDY from the target
+ * need to send data to the target
+ */
+ WARN_ON(fr_flags(fp) & FCPHF_CRC_UNCHECKED);
+ dd = fc_frame_payload_get(fp, sizeof(*dd));
+ WARN_ON(!dd);
+
+ rc = fc_fcp_send_data(fsp, seq,
+ (size_t) ntohl(dd->ft_data_ro),
+ (size_t) ntohl(dd->ft_burst_len));
+ if (!rc)
+ seq->rec_data = fsp->xfer_len;
+ } else if (r_ctl == FC_RCTL_DD_SOL_DATA) {
+ /*
+ * received a DATA frame
+ * next we will copy the data to the system buffer
+ */
+ WARN_ON(fr_len(fp) < sizeof(*fh)); /* len may be 0 */
+ fc_fcp_recv_data(fsp, fp);
+ seq->rec_data = fsp->xfer_contig_end;
+ } else if (r_ctl == FC_RCTL_DD_CMD_STATUS) {
+ WARN_ON(fr_flags(fp) & FCPHF_CRC_UNCHECKED);
+
+ fc_fcp_resp(fsp, fp);
+ } else {
+ FC_FCP_DBG(fsp, "unexpected frame. r_ctl %x\n", r_ctl);
+ }
+unlock:
+ fc_fcp_unlock_pkt(fsp);
+out:
+ fc_frame_free(fp);
+}
+
+/**
+ * fc_fcp_resp() - Handler for FCP responses
+ * @fsp: The FCP packet the response is for
+ * @fp: The response frame
+ */
+static void fc_fcp_resp(struct fc_fcp_pkt *fsp, struct fc_frame *fp)
+{
+ struct fc_frame_header *fh;
+ struct fcp_resp *fc_rp;
+ struct fcp_resp_ext *rp_ex;
+ struct fcp_resp_rsp_info *fc_rp_info;
+ u32 plen;
+ u32 expected_len;
+ u32 respl = 0;
+ u32 snsl = 0;
+ u8 flags = 0;
+
+ plen = fr_len(fp);
+ fh = (struct fc_frame_header *)fr_hdr(fp);
+ if (unlikely(plen < sizeof(*fh) + sizeof(*fc_rp)))
+ goto len_err;
+ plen -= sizeof(*fh);
+ fc_rp = (struct fcp_resp *)(fh + 1);
+ fsp->cdb_status = fc_rp->fr_status;
+ flags = fc_rp->fr_flags;
+ fsp->scsi_comp_flags = flags;
+ expected_len = fsp->data_len;
+
+ /* if ddp, update xfer len */
+ fc_fcp_ddp_done(fsp);
+
+ if (unlikely((flags & ~FCP_CONF_REQ) || fc_rp->fr_status)) {
+ rp_ex = (void *)(fc_rp + 1);
+ if (flags & (FCP_RSP_LEN_VAL | FCP_SNS_LEN_VAL)) {
+ if (plen < sizeof(*fc_rp) + sizeof(*rp_ex))
+ goto len_err;
+ fc_rp_info = (struct fcp_resp_rsp_info *)(rp_ex + 1);
+ if (flags & FCP_RSP_LEN_VAL) {
+ respl = ntohl(rp_ex->fr_rsp_len);
+ if ((respl != FCP_RESP_RSP_INFO_LEN4) &&
+ (respl != FCP_RESP_RSP_INFO_LEN8))
+ goto len_err;
+ if (fsp->wait_for_comp) {
+ /* Abuse cdb_status for rsp code */
+ fsp->cdb_status = fc_rp_info->rsp_code;
+ complete(&fsp->tm_done);
+ /*
+ * tmfs will not have any scsi cmd so
+ * exit here
+ */
+ return;
+ }
+ }
+ if (flags & FCP_SNS_LEN_VAL) {
+ snsl = ntohl(rp_ex->fr_sns_len);
+ if (snsl > SCSI_SENSE_BUFFERSIZE)
+ snsl = SCSI_SENSE_BUFFERSIZE;
+ memcpy(fsp->cmd->sense_buffer,
+ (char *)fc_rp_info + respl, snsl);
+ }
+ }
+ if (flags & (FCP_RESID_UNDER | FCP_RESID_OVER)) {
+ if (plen < sizeof(*fc_rp) + sizeof(rp_ex->fr_resid))
+ goto len_err;
+ if (flags & FCP_RESID_UNDER) {
+ fsp->scsi_resid = ntohl(rp_ex->fr_resid);
+ /*
+ * The cmnd->underflow is the minimum number of
+ * bytes that must be transferred for this
+ * command. Provided a sense condition is not
+ * present, make sure the actual amount
+ * transferred is at least the underflow value
+ * or fail.
+ */
+ if (!(flags & FCP_SNS_LEN_VAL) &&
+ (fc_rp->fr_status == 0) &&
+ (scsi_bufflen(fsp->cmd) -
+ fsp->scsi_resid) < fsp->cmd->underflow)
+ goto err;
+ expected_len -= fsp->scsi_resid;
+ } else {
+ fsp->status_code = FC_ERROR;
+ }
+ }
+ }
+ fsp->state |= FC_SRB_RCV_STATUS;
+
+ /*
+ * Check for missing or extra data frames.
+ */
+ if (unlikely(fsp->cdb_status == SAM_STAT_GOOD &&
+ fsp->xfer_len != expected_len)) {
+ if (fsp->xfer_len < expected_len) {
+ /*
+ * Some data may be queued locally,
+ * Wait a at least one jiffy to see if it is delivered.
+ * If this expires without data, we may do SRR.
+ */
+ fc_fcp_timer_set(fsp, 2);
+ return;
+ }
+ fsp->status_code = FC_DATA_OVRRUN;
+ FC_FCP_DBG(fsp, "tgt %6.6x xfer len %zx greater than expected, "
+ "len %x, data len %x\n",
+ fsp->rport->port_id,
+ fsp->xfer_len, expected_len, fsp->data_len);
+ }
+ fc_fcp_complete_locked(fsp);
+ return;
+
+len_err:
+ FC_FCP_DBG(fsp, "short FCP response. flags 0x%x len %u respl %u "
+ "snsl %u\n", flags, fr_len(fp), respl, snsl);
+err:
+ fsp->status_code = FC_ERROR;
+ fc_fcp_complete_locked(fsp);
+}
+
+/**
+ * fc_fcp_complete_locked() - Complete processing of a fcp_pkt with the
+ * fcp_pkt lock held
+ * @fsp: The FCP packet to be completed
+ *
+ * This function may sleep if a timer is pending. The packet lock must be
+ * held, and the host lock must not be held.
+ */
+static void fc_fcp_complete_locked(struct fc_fcp_pkt *fsp)
+{
+ struct fc_lport *lport = fsp->lp;
+ struct fc_seq *seq;
+ struct fc_exch *ep;
+ u32 f_ctl;
+
+ if (fsp->state & FC_SRB_ABORT_PENDING)
+ return;
+
+ if (fsp->state & FC_SRB_ABORTED) {
+ if (!fsp->status_code)
+ fsp->status_code = FC_CMD_ABORTED;
+ } else {
+ /*
+ * Test for transport underrun, independent of response
+ * underrun status.
+ */
+ if (fsp->cdb_status == SAM_STAT_GOOD &&
+ fsp->xfer_len < fsp->data_len && !fsp->io_status &&
+ (!(fsp->scsi_comp_flags & FCP_RESID_UNDER) ||
+ fsp->xfer_len < fsp->data_len - fsp->scsi_resid))
+ fsp->status_code = FC_DATA_UNDRUN;
+ }
+
+ seq = fsp->seq_ptr;
+ if (seq) {
+ fsp->seq_ptr = NULL;
+ if (unlikely(fsp->scsi_comp_flags & FCP_CONF_REQ)) {
+ struct fc_frame *conf_frame;
+ struct fc_seq *csp;
+
+ csp = lport->tt.seq_start_next(seq);
+ conf_frame = fc_fcp_frame_alloc(fsp->lp, 0);
+ if (conf_frame) {
+ f_ctl = FC_FC_SEQ_INIT;
+ f_ctl |= FC_FC_LAST_SEQ | FC_FC_END_SEQ;
+ ep = fc_seq_exch(seq);
+ fc_fill_fc_hdr(conf_frame, FC_RCTL_DD_SOL_CTL,
+ ep->did, ep->sid,
+ FC_TYPE_FCP, f_ctl, 0);
+ lport->tt.seq_send(lport, csp, conf_frame);
+ }
+ }
+ lport->tt.exch_done(seq);
+ }
+ /*
+ * Some resets driven by SCSI are not I/Os and do not have
+ * SCSI commands associated with the requests. We should not
+ * call I/O completion if we do not have a SCSI command.
+ */
+ if (fsp->cmd)
+ fc_io_compl(fsp);
+}
+
+/**
+ * fc_fcp_cleanup_cmd() - Cancel the active exchange on a fcp_pkt
+ * @fsp: The FCP packet whose exchanges should be canceled
+ * @error: The reason for the cancellation
+ */
+static void fc_fcp_cleanup_cmd(struct fc_fcp_pkt *fsp, int error)
+{
+ struct fc_lport *lport = fsp->lp;
+
+ if (fsp->seq_ptr) {
+ lport->tt.exch_done(fsp->seq_ptr);
+ fsp->seq_ptr = NULL;
+ }
+ fsp->status_code = error;
+}
+
+/**
+ * fc_fcp_cleanup_each_cmd() - Cancel all exchanges on a local port
+ * @lport: The local port whose exchanges should be canceled
+ * @id: The target's ID
+ * @lun: The LUN
+ * @error: The reason for cancellation
+ *
+ * If lun or id is -1, they are ignored.
+ */
+static void fc_fcp_cleanup_each_cmd(struct fc_lport *lport, unsigned int id,
+ unsigned int lun, int error)
+{
+ struct fc_fcp_internal *si = fc_get_scsi_internal(lport);
+ struct fc_fcp_pkt *fsp;
+ struct scsi_cmnd *sc_cmd;
+ unsigned long flags;
+
+ spin_lock_irqsave(&si->scsi_queue_lock, flags);
+restart:
+ list_for_each_entry(fsp, &si->scsi_pkt_queue, list) {
+ sc_cmd = fsp->cmd;
+ if (id != -1 && scmd_id(sc_cmd) != id)
+ continue;
+
+ if (lun != -1 && sc_cmd->device->lun != lun)
+ continue;
+
+ fc_fcp_pkt_hold(fsp);
+ spin_unlock_irqrestore(&si->scsi_queue_lock, flags);
+
+ if (!fc_fcp_lock_pkt(fsp)) {
+ fc_fcp_cleanup_cmd(fsp, error);
+ fc_io_compl(fsp);
+ fc_fcp_unlock_pkt(fsp);
+ }
+
+ fc_fcp_pkt_release(fsp);
+ spin_lock_irqsave(&si->scsi_queue_lock, flags);
+ /*
+ * while we dropped the lock multiple pkts could
+ * have been released, so we have to start over.
+ */
+ goto restart;
+ }
+ spin_unlock_irqrestore(&si->scsi_queue_lock, flags);
+}
+
+/**
+ * fc_fcp_abort_io() - Abort all FCP-SCSI exchanges on a local port
+ * @lport: The local port whose exchanges are to be aborted
+ */
+static void fc_fcp_abort_io(struct fc_lport *lport)
+{
+ fc_fcp_cleanup_each_cmd(lport, -1, -1, FC_HRD_ERROR);
+}
+
+/**
+ * fc_fcp_pkt_send() - Send a fcp_pkt
+ * @lport: The local port to send the FCP packet on
+ * @fsp: The FCP packet to send
+ *
+ * Return: Zero for success and -1 for failure
+ * Locks: Called without locks held
+ */
+static int fc_fcp_pkt_send(struct fc_lport *lport, struct fc_fcp_pkt *fsp)
+{
+ struct fc_fcp_internal *si = fc_get_scsi_internal(lport);
+ unsigned long flags;
+ int rc;
+
+ fsp->cmd->SCp.ptr = (char *)fsp;
+ fsp->cdb_cmd.fc_dl = htonl(fsp->data_len);
+ fsp->cdb_cmd.fc_flags = fsp->req_flags & ~FCP_CFL_LEN_MASK;
+
+ int_to_scsilun(fsp->cmd->device->lun, &fsp->cdb_cmd.fc_lun);
+ memcpy(fsp->cdb_cmd.fc_cdb, fsp->cmd->cmnd, fsp->cmd->cmd_len);
+
+ spin_lock_irqsave(&si->scsi_queue_lock, flags);
+ list_add_tail(&fsp->list, &si->scsi_pkt_queue);
+ spin_unlock_irqrestore(&si->scsi_queue_lock, flags);
+ rc = lport->tt.fcp_cmd_send(lport, fsp, fc_fcp_recv);
+ if (unlikely(rc)) {
+ spin_lock_irqsave(&si->scsi_queue_lock, flags);
+ fsp->cmd->SCp.ptr = NULL;
+ list_del(&fsp->list);
+ spin_unlock_irqrestore(&si->scsi_queue_lock, flags);
+ }
+
+ return rc;
+}
+
+/**
+ * get_fsp_rec_tov() - Helper function to get REC_TOV
+ * @fsp: the FCP packet
+ *
+ * Returns rec tov in jiffies as rpriv->e_d_tov + 1 second
+ */
+static inline unsigned int get_fsp_rec_tov(struct fc_fcp_pkt *fsp)
+{
+ struct fc_rport_libfc_priv *rpriv = fsp->rport->dd_data;
+
+ return msecs_to_jiffies(rpriv->e_d_tov) + HZ;
+}
+
+/**
+ * fc_fcp_cmd_send() - Send a FCP command
+ * @lport: The local port to send the command on
+ * @fsp: The FCP packet the command is on
+ * @resp: The handler for the response
+ */
+static int fc_fcp_cmd_send(struct fc_lport *lport, struct fc_fcp_pkt *fsp,
+ void (*resp)(struct fc_seq *,
+ struct fc_frame *fp,
+ void *arg))
+{
+ struct fc_frame *fp;
+ struct fc_seq *seq;
+ struct fc_rport *rport;
+ struct fc_rport_libfc_priv *rpriv;
+ const size_t len = sizeof(fsp->cdb_cmd);
+ int rc = 0;
+
+ if (fc_fcp_lock_pkt(fsp))
+ return 0;
+
+ fp = fc_fcp_frame_alloc(lport, sizeof(fsp->cdb_cmd));
+ if (!fp) {
+ rc = -1;
+ goto unlock;
+ }
+
+ memcpy(fc_frame_payload_get(fp, len), &fsp->cdb_cmd, len);
+ fr_fsp(fp) = fsp;
+ rport = fsp->rport;
+ fsp->max_payload = rport->maxframe_size;
+ rpriv = rport->dd_data;
+
+ fc_fill_fc_hdr(fp, FC_RCTL_DD_UNSOL_CMD, rport->port_id,
+ rpriv->local_port->port_id, FC_TYPE_FCP,
+ FC_FCTL_REQ, 0);
+
+ seq = lport->tt.exch_seq_send(lport, fp, resp, fc_fcp_pkt_destroy,
+ fsp, 0);
+ if (!seq) {
+ rc = -1;
+ goto unlock;
+ }
+ fsp->seq_ptr = seq;
+ fc_fcp_pkt_hold(fsp); /* hold for fc_fcp_pkt_destroy */
+
+ setup_timer(&fsp->timer, fc_fcp_timeout, (unsigned long)fsp);
+ if (rpriv->flags & FC_RP_FLAGS_REC_SUPPORTED)
+ fc_fcp_timer_set(fsp, get_fsp_rec_tov(fsp));
+
+unlock:
+ fc_fcp_unlock_pkt(fsp);
+ return rc;
+}
+
+/**
+ * fc_fcp_error() - Handler for FCP layer errors
+ * @fsp: The FCP packet the error is on
+ * @fp: The frame that has errored
+ */
+static void fc_fcp_error(struct fc_fcp_pkt *fsp, struct fc_frame *fp)
+{
+ int error = PTR_ERR(fp);
+
+ if (fc_fcp_lock_pkt(fsp))
+ return;
+
+ if (error == -FC_EX_CLOSED) {
+ fc_fcp_retry_cmd(fsp);
+ goto unlock;
+ }
+
+ /*
+ * clear abort pending, because the lower layer
+ * decided to force completion.
+ */
+ fsp->state &= ~FC_SRB_ABORT_PENDING;
+ fsp->status_code = FC_CMD_PLOGO;
+ fc_fcp_complete_locked(fsp);
+unlock:
+ fc_fcp_unlock_pkt(fsp);
+}
+
+/**
+ * fc_fcp_pkt_abort() - Abort a fcp_pkt
+ * @fsp: The FCP packet to abort on
+ *
+ * Called to send an abort and then wait for abort completion
+ */
+static int fc_fcp_pkt_abort(struct fc_fcp_pkt *fsp)
+{
+ int rc = FAILED;
+ unsigned long ticks_left;
+
+ if (fc_fcp_send_abort(fsp))
+ return FAILED;
+
+ init_completion(&fsp->tm_done);
+ fsp->wait_for_comp = 1;
+
+ spin_unlock_bh(&fsp->scsi_pkt_lock);
+ ticks_left = wait_for_completion_timeout(&fsp->tm_done,
+ FC_SCSI_TM_TOV);
+ spin_lock_bh(&fsp->scsi_pkt_lock);
+ fsp->wait_for_comp = 0;
+
+ if (!ticks_left) {
+ FC_FCP_DBG(fsp, "target abort cmd failed\n");
+ } else if (fsp->state & FC_SRB_ABORTED) {
+ FC_FCP_DBG(fsp, "target abort cmd passed\n");
+ rc = SUCCESS;
+ fc_fcp_complete_locked(fsp);
+ }
+
+ return rc;
+}
+
+/**
+ * fc_lun_reset_send() - Send LUN reset command
+ * @data: The FCP packet that identifies the LUN to be reset
+ */
+static void fc_lun_reset_send(unsigned long data)
+{
+ struct fc_fcp_pkt *fsp = (struct fc_fcp_pkt *)data;
+ struct fc_lport *lport = fsp->lp;
+
+ if (lport->tt.fcp_cmd_send(lport, fsp, fc_tm_done)) {
+ if (fsp->recov_retry++ >= FC_MAX_RECOV_RETRY)
+ return;
+ if (fc_fcp_lock_pkt(fsp))
+ return;
+ setup_timer(&fsp->timer, fc_lun_reset_send, (unsigned long)fsp);
+ fc_fcp_timer_set(fsp, get_fsp_rec_tov(fsp));
+ fc_fcp_unlock_pkt(fsp);
+ }
+}
+
+/**
+ * fc_lun_reset() - Send a LUN RESET command to a device
+ * and wait for the reply
+ * @lport: The local port to sent the command on
+ * @fsp: The FCP packet that identifies the LUN to be reset
+ * @id: The SCSI command ID
+ * @lun: The LUN ID to be reset
+ */
+static int fc_lun_reset(struct fc_lport *lport, struct fc_fcp_pkt *fsp,
+ unsigned int id, unsigned int lun)
+{
+ int rc;
+
+ fsp->cdb_cmd.fc_dl = htonl(fsp->data_len);
+ fsp->cdb_cmd.fc_tm_flags = FCP_TMF_LUN_RESET;
+ int_to_scsilun(lun, &fsp->cdb_cmd.fc_lun);
+
+ fsp->wait_for_comp = 1;
+ init_completion(&fsp->tm_done);
+
+ fc_lun_reset_send((unsigned long)fsp);
+
+ /*
+ * wait for completion of reset
+ * after that make sure all commands are terminated
+ */
+ rc = wait_for_completion_timeout(&fsp->tm_done, FC_SCSI_TM_TOV);
+
+ spin_lock_bh(&fsp->scsi_pkt_lock);
+ fsp->state |= FC_SRB_COMPL;
+ spin_unlock_bh(&fsp->scsi_pkt_lock);
+
+ del_timer_sync(&fsp->timer);
+
+ spin_lock_bh(&fsp->scsi_pkt_lock);
+ if (fsp->seq_ptr) {
+ lport->tt.exch_done(fsp->seq_ptr);
+ fsp->seq_ptr = NULL;
+ }
+ fsp->wait_for_comp = 0;
+ spin_unlock_bh(&fsp->scsi_pkt_lock);
+
+ if (!rc) {
+ FC_SCSI_DBG(lport, "lun reset failed\n");
+ return FAILED;
+ }
+
+ /* cdb_status holds the tmf's rsp code */
+ if (fsp->cdb_status != FCP_TMF_CMPL)
+ return FAILED;
+
+ FC_SCSI_DBG(lport, "lun reset to lun %u completed\n", lun);
+ fc_fcp_cleanup_each_cmd(lport, id, lun, FC_CMD_ABORTED);
+ return SUCCESS;
+}
+
+/**
+ * fc_tm_done() - Task Management response handler
+ * @seq: The sequence that the response is on
+ * @fp: The response frame
+ * @arg: The FCP packet the response is for
+ */
+static void fc_tm_done(struct fc_seq *seq, struct fc_frame *fp, void *arg)
+{
+ struct fc_fcp_pkt *fsp = arg;
+ struct fc_frame_header *fh;
+
+ if (IS_ERR(fp)) {
+ /*
+ * If there is an error just let it timeout or wait
+ * for TMF to be aborted if it timedout.
+ *
+ * scsi-eh will escalate for when either happens.
+ */
+ return;
+ }
+
+ if (fc_fcp_lock_pkt(fsp))
+ goto out;
+
+ /*
+ * raced with eh timeout handler.
+ */
+ if (!fsp->seq_ptr || !fsp->wait_for_comp)
+ goto out_unlock;
+
+ fh = fc_frame_header_get(fp);
+ if (fh->fh_type != FC_TYPE_BLS)
+ fc_fcp_resp(fsp, fp);
+ fsp->seq_ptr = NULL;
+ fsp->lp->tt.exch_done(seq);
+out_unlock:
+ fc_fcp_unlock_pkt(fsp);
+out:
+ fc_frame_free(fp);
+}
+
+/**
+ * fc_fcp_cleanup() - Cleanup all FCP exchanges on a local port
+ * @lport: The local port to be cleaned up
+ */
+static void fc_fcp_cleanup(struct fc_lport *lport)
+{
+ fc_fcp_cleanup_each_cmd(lport, -1, -1, FC_ERROR);
+}
+
+/**
+ * fc_fcp_timeout() - Handler for fcp_pkt timeouts
+ * @data: The FCP packet that has timed out
+ *
+ * If REC is supported then just issue it and return. The REC exchange will
+ * complete or time out and recovery can continue at that point. Otherwise,
+ * if the response has been received without all the data it has been
+ * ER_TIMEOUT since the response was received. If the response has not been
+ * received we see if data was received recently. If it has been then we
+ * continue waiting, otherwise, we abort the command.
+ */
+static void fc_fcp_timeout(unsigned long data)
+{
+ struct fc_fcp_pkt *fsp = (struct fc_fcp_pkt *)data;
+ struct fc_rport *rport = fsp->rport;
+ struct fc_rport_libfc_priv *rpriv = rport->dd_data;
+
+ if (fc_fcp_lock_pkt(fsp))
+ return;
+
+ if (fsp->cdb_cmd.fc_tm_flags)
+ goto unlock;
+
+ fsp->state |= FC_SRB_FCP_PROCESSING_TMO;
+
+ if (rpriv->flags & FC_RP_FLAGS_REC_SUPPORTED)
+ fc_fcp_rec(fsp);
+ else if (fsp->state & FC_SRB_RCV_STATUS)
+ fc_fcp_complete_locked(fsp);
+ else
+ fc_fcp_recovery(fsp, FC_TIMED_OUT);
+ fsp->state &= ~FC_SRB_FCP_PROCESSING_TMO;
+unlock:
+ fc_fcp_unlock_pkt(fsp);
+}
+
+/**
+ * fc_fcp_rec() - Send a REC ELS request
+ * @fsp: The FCP packet to send the REC request on
+ */
+static void fc_fcp_rec(struct fc_fcp_pkt *fsp)
+{
+ struct fc_lport *lport;
+ struct fc_frame *fp;
+ struct fc_rport *rport;
+ struct fc_rport_libfc_priv *rpriv;
+
+ lport = fsp->lp;
+ rport = fsp->rport;
+ rpriv = rport->dd_data;
+ if (!fsp->seq_ptr || rpriv->rp_state != RPORT_ST_READY) {
+ fsp->status_code = FC_HRD_ERROR;
+ fsp->io_status = 0;
+ fc_fcp_complete_locked(fsp);
+ return;
+ }
+
+ fp = fc_fcp_frame_alloc(lport, sizeof(struct fc_els_rec));
+ if (!fp)
+ goto retry;
+
+ fr_seq(fp) = fsp->seq_ptr;
+ fc_fill_fc_hdr(fp, FC_RCTL_ELS_REQ, rport->port_id,
+ rpriv->local_port->port_id, FC_TYPE_ELS,
+ FC_FCTL_REQ, 0);
+ if (lport->tt.elsct_send(lport, rport->port_id, fp, ELS_REC,
+ fc_fcp_rec_resp, fsp,
+ 2 * lport->r_a_tov)) {
+ fc_fcp_pkt_hold(fsp); /* hold while REC outstanding */
+ return;
+ }
+retry:
+ if (fsp->recov_retry++ < FC_MAX_RECOV_RETRY)
+ fc_fcp_timer_set(fsp, get_fsp_rec_tov(fsp));
+ else
+ fc_fcp_recovery(fsp, FC_TIMED_OUT);
+}
+
+/**
+ * fc_fcp_rec_resp() - Handler for REC ELS responses
+ * @seq: The sequence the response is on
+ * @fp: The response frame
+ * @arg: The FCP packet the response is on
+ *
+ * If the response is a reject then the scsi layer will handle
+ * the timeout. If the response is a LS_ACC then if the I/O was not completed
+ * set the timeout and return. If the I/O was completed then complete the
+ * exchange and tell the SCSI layer.
+ */
+static void fc_fcp_rec_resp(struct fc_seq *seq, struct fc_frame *fp, void *arg)
+{
+ struct fc_fcp_pkt *fsp = (struct fc_fcp_pkt *)arg;
+ struct fc_els_rec_acc *recp;
+ struct fc_els_ls_rjt *rjt;
+ u32 e_stat;
+ u8 opcode;
+ u32 offset;
+ enum dma_data_direction data_dir;
+ enum fc_rctl r_ctl;
+ struct fc_rport_libfc_priv *rpriv;
+
+ if (IS_ERR(fp)) {
+ fc_fcp_rec_error(fsp, fp);
+ return;
+ }
+
+ if (fc_fcp_lock_pkt(fsp))
+ goto out;
+
+ fsp->recov_retry = 0;
+ opcode = fc_frame_payload_op(fp);
+ if (opcode == ELS_LS_RJT) {
+ rjt = fc_frame_payload_get(fp, sizeof(*rjt));
+ switch (rjt->er_reason) {
+ default:
+ FC_FCP_DBG(fsp, "device %x unexpected REC reject "
+ "reason %d expl %d\n",
+ fsp->rport->port_id, rjt->er_reason,
+ rjt->er_explan);
+ /* fall through */
+ case ELS_RJT_UNSUP:
+ FC_FCP_DBG(fsp, "device does not support REC\n");
+ rpriv = fsp->rport->dd_data;
+ /*
+ * if we do not spport RECs or got some bogus
+ * reason then resetup timer so we check for
+ * making progress.
+ */
+ rpriv->flags &= ~FC_RP_FLAGS_REC_SUPPORTED;
+ break;
+ case ELS_RJT_LOGIC:
+ case ELS_RJT_UNAB:
+ /*
+ * If no data transfer, the command frame got dropped
+ * so we just retry. If data was transferred, we
+ * lost the response but the target has no record,
+ * so we abort and retry.
+ */
+ if (rjt->er_explan == ELS_EXPL_OXID_RXID &&
+ fsp->xfer_len == 0) {
+ fc_fcp_retry_cmd(fsp);
+ break;
+ }
+ fc_fcp_recovery(fsp, FC_ERROR);
+ break;
+ }
+ } else if (opcode == ELS_LS_ACC) {
+ if (fsp->state & FC_SRB_ABORTED)
+ goto unlock_out;
+
+ data_dir = fsp->cmd->sc_data_direction;
+ recp = fc_frame_payload_get(fp, sizeof(*recp));
+ offset = ntohl(recp->reca_fc4value);
+ e_stat = ntohl(recp->reca_e_stat);
+
+ if (e_stat & ESB_ST_COMPLETE) {
+
+ /*
+ * The exchange is complete.
+ *
+ * For output, we must've lost the response.
+ * For input, all data must've been sent.
+ * We lost may have lost the response
+ * (and a confirmation was requested) and maybe
+ * some data.
+ *
+ * If all data received, send SRR
+ * asking for response. If partial data received,
+ * or gaps, SRR requests data at start of gap.
+ * Recovery via SRR relies on in-order-delivery.
+ */
+ if (data_dir == DMA_TO_DEVICE) {
+ r_ctl = FC_RCTL_DD_CMD_STATUS;
+ } else if (fsp->xfer_contig_end == offset) {
+ r_ctl = FC_RCTL_DD_CMD_STATUS;
+ } else {
+ offset = fsp->xfer_contig_end;
+ r_ctl = FC_RCTL_DD_SOL_DATA;
+ }
+ fc_fcp_srr(fsp, r_ctl, offset);
+ } else if (e_stat & ESB_ST_SEQ_INIT) {
+ /*
+ * The remote port has the initiative, so just
+ * keep waiting for it to complete.
+ */
+ fc_fcp_timer_set(fsp, get_fsp_rec_tov(fsp));
+ } else {
+
+ /*
+ * The exchange is incomplete, we have seq. initiative.
+ * Lost response with requested confirmation,
+ * lost confirmation, lost transfer ready or
+ * lost write data.
+ *
+ * For output, if not all data was received, ask
+ * for transfer ready to be repeated.
+ *
+ * If we received or sent all the data, send SRR to
+ * request response.
+ *
+ * If we lost a response, we may have lost some read
+ * data as well.
+ */
+ r_ctl = FC_RCTL_DD_SOL_DATA;
+ if (data_dir == DMA_TO_DEVICE) {
+ r_ctl = FC_RCTL_DD_CMD_STATUS;
+ if (offset < fsp->data_len)
+ r_ctl = FC_RCTL_DD_DATA_DESC;
+ } else if (offset == fsp->xfer_contig_end) {
+ r_ctl = FC_RCTL_DD_CMD_STATUS;
+ } else if (fsp->xfer_contig_end < offset) {
+ offset = fsp->xfer_contig_end;
+ }
+ fc_fcp_srr(fsp, r_ctl, offset);
+ }
+ }
+unlock_out:
+ fc_fcp_unlock_pkt(fsp);
+out:
+ fc_fcp_pkt_release(fsp); /* drop hold for outstanding REC */
+ fc_frame_free(fp);
+}
+
+/**
+ * fc_fcp_rec_error() - Handler for REC errors
+ * @fsp: The FCP packet the error is on
+ * @fp: The REC frame
+ */
+static void fc_fcp_rec_error(struct fc_fcp_pkt *fsp, struct fc_frame *fp)
+{
+ int error = PTR_ERR(fp);
+
+ if (fc_fcp_lock_pkt(fsp))
+ goto out;
+
+ switch (error) {
+ case -FC_EX_CLOSED:
+ fc_fcp_retry_cmd(fsp);
+ break;
+
+ default:
+ FC_FCP_DBG(fsp, "REC %p fid %6.6x error unexpected error %d\n",
+ fsp, fsp->rport->port_id, error);
+ fsp->status_code = FC_CMD_PLOGO;
+ /* fall through */
+
+ case -FC_EX_TIMEOUT:
+ /*
+ * Assume REC or LS_ACC was lost.
+ * The exchange manager will have aborted REC, so retry.
+ */
+ FC_FCP_DBG(fsp, "REC fid %6.6x error error %d retry %d/%d\n",
+ fsp->rport->port_id, error, fsp->recov_retry,
+ FC_MAX_RECOV_RETRY);
+ if (fsp->recov_retry++ < FC_MAX_RECOV_RETRY)
+ fc_fcp_rec(fsp);
+ else
+ fc_fcp_recovery(fsp, FC_ERROR);
+ break;
+ }
+ fc_fcp_unlock_pkt(fsp);
+out:
+ fc_fcp_pkt_release(fsp); /* drop hold for outstanding REC */
+}
+
+/**
+ * fc_fcp_recovery() - Handler for fcp_pkt recovery
+ * @fsp: The FCP pkt that needs to be aborted
+ */
+static void fc_fcp_recovery(struct fc_fcp_pkt *fsp, u8 code)
+{
+ fsp->status_code = code;
+ fsp->cdb_status = 0;
+ fsp->io_status = 0;
+ /*
+ * if this fails then we let the scsi command timer fire and
+ * scsi-ml escalate.
+ */
+ fc_fcp_send_abort(fsp);
+}
+
+/**
+ * fc_fcp_srr() - Send a SRR request (Sequence Retransmission Request)
+ * @fsp: The FCP packet the SRR is to be sent on
+ * @r_ctl: The R_CTL field for the SRR request
+ * This is called after receiving status but insufficient data, or
+ * when expecting status but the request has timed out.
+ */
+static void fc_fcp_srr(struct fc_fcp_pkt *fsp, enum fc_rctl r_ctl, u32 offset)
+{
+ struct fc_lport *lport = fsp->lp;
+ struct fc_rport *rport;
+ struct fc_rport_libfc_priv *rpriv;
+ struct fc_exch *ep = fc_seq_exch(fsp->seq_ptr);
+ struct fc_seq *seq;
+ struct fcp_srr *srr;
+ struct fc_frame *fp;
+ unsigned int rec_tov;
+
+ rport = fsp->rport;
+ rpriv = rport->dd_data;
+
+ if (!(rpriv->flags & FC_RP_FLAGS_RETRY) ||
+ rpriv->rp_state != RPORT_ST_READY)
+ goto retry; /* shouldn't happen */
+ fp = fc_fcp_frame_alloc(lport, sizeof(*srr));
+ if (!fp)
+ goto retry;
+
+ srr = fc_frame_payload_get(fp, sizeof(*srr));
+ memset(srr, 0, sizeof(*srr));
+ srr->srr_op = ELS_SRR;
+ srr->srr_ox_id = htons(ep->oxid);
+ srr->srr_rx_id = htons(ep->rxid);
+ srr->srr_r_ctl = r_ctl;
+ srr->srr_rel_off = htonl(offset);
+
+ fc_fill_fc_hdr(fp, FC_RCTL_ELS4_REQ, rport->port_id,
+ rpriv->local_port->port_id, FC_TYPE_FCP,
+ FC_FCTL_REQ, 0);
+
+ rec_tov = get_fsp_rec_tov(fsp);
+ seq = lport->tt.exch_seq_send(lport, fp, fc_fcp_srr_resp,
+ fc_fcp_pkt_destroy,
+ fsp, jiffies_to_msecs(rec_tov));
+ if (!seq)
+ goto retry;
+
+ fsp->recov_seq = seq;
+ fsp->xfer_len = offset;
+ fsp->xfer_contig_end = offset;
+ fsp->state &= ~FC_SRB_RCV_STATUS;
+ fc_fcp_pkt_hold(fsp); /* hold for outstanding SRR */
+ return;
+retry:
+ fc_fcp_retry_cmd(fsp);
+}
+
+/**
+ * fc_fcp_srr_resp() - Handler for SRR response
+ * @seq: The sequence the SRR is on
+ * @fp: The SRR frame
+ * @arg: The FCP packet the SRR is on
+ */
+static void fc_fcp_srr_resp(struct fc_seq *seq, struct fc_frame *fp, void *arg)
+{
+ struct fc_fcp_pkt *fsp = arg;
+ struct fc_frame_header *fh;
+
+ if (IS_ERR(fp)) {
+ fc_fcp_srr_error(fsp, fp);
+ return;
+ }
+
+ if (fc_fcp_lock_pkt(fsp))
+ goto out;
+
+ fh = fc_frame_header_get(fp);
+ /*
+ * BUG? fc_fcp_srr_error calls exch_done which would release
+ * the ep. But if fc_fcp_srr_error had got -FC_EX_TIMEOUT,
+ * then fc_exch_timeout would be sending an abort. The exch_done
+ * call by fc_fcp_srr_error would prevent fc_exch.c from seeing
+ * an abort response though.
+ */
+ if (fh->fh_type == FC_TYPE_BLS) {
+ fc_fcp_unlock_pkt(fsp);
+ return;
+ }
+
+ switch (fc_frame_payload_op(fp)) {
+ case ELS_LS_ACC:
+ fsp->recov_retry = 0;
+ fc_fcp_timer_set(fsp, get_fsp_rec_tov(fsp));
+ break;
+ case ELS_LS_RJT:
+ default:
+ fc_fcp_recovery(fsp, FC_ERROR);
+ break;
+ }
+ fc_fcp_unlock_pkt(fsp);
+out:
+ fsp->lp->tt.exch_done(seq);
+ fc_frame_free(fp);
+}
+
+/**
+ * fc_fcp_srr_error() - Handler for SRR errors
+ * @fsp: The FCP packet that the SRR error is on
+ * @fp: The SRR frame
+ */
+static void fc_fcp_srr_error(struct fc_fcp_pkt *fsp, struct fc_frame *fp)
+{
+ if (fc_fcp_lock_pkt(fsp))
+ goto out;
+ switch (PTR_ERR(fp)) {
+ case -FC_EX_TIMEOUT:
+ if (fsp->recov_retry++ < FC_MAX_RECOV_RETRY)
+ fc_fcp_rec(fsp);
+ else
+ fc_fcp_recovery(fsp, FC_TIMED_OUT);
+ break;
+ case -FC_EX_CLOSED: /* e.g., link failure */
+ /* fall through */
+ default:
+ fc_fcp_retry_cmd(fsp);
+ break;
+ }
+ fc_fcp_unlock_pkt(fsp);
+out:
+ fsp->lp->tt.exch_done(fsp->recov_seq);
+}
+
+/**
+ * fc_fcp_lport_queue_ready() - Determine if the lport and it's queue is ready
+ * @lport: The local port to be checked
+ */
+static inline int fc_fcp_lport_queue_ready(struct fc_lport *lport)
+{
+ /* lock ? */
+ return (lport->state == LPORT_ST_READY) &&
+ lport->link_up && !lport->qfull;
+}
+
+/**
+ * fc_queuecommand() - The queuecommand function of the SCSI template
+ * @shost: The Scsi_Host that the command was issued to
+ * @cmd: The scsi_cmnd to be executed
+ *
+ * This is the i/o strategy routine, called by the SCSI layer.
+ */
+int fc_queuecommand(struct Scsi_Host *shost, struct scsi_cmnd *sc_cmd)
+{
+ struct fc_lport *lport = shost_priv(shost);
+ struct fc_rport *rport = starget_to_rport(scsi_target(sc_cmd->device));
+ struct fc_fcp_pkt *fsp;
+ struct fc_rport_libfc_priv *rpriv;
+ int rval;
+ int rc = 0;
+ struct fc_stats *stats;
+
+ rval = fc_remote_port_chkready(rport);
+ if (rval) {
+ sc_cmd->result = rval;
+ sc_cmd->scsi_done(sc_cmd);
+ return 0;
+ }
+
+ if (!*(struct fc_remote_port **)rport->dd_data) {
+ /*
+ * rport is transitioning from blocked/deleted to
+ * online
+ */
+ sc_cmd->result = DID_IMM_RETRY << 16;
+ sc_cmd->scsi_done(sc_cmd);
+ goto out;
+ }
+
+ rpriv = rport->dd_data;
+
+ if (!fc_fcp_lport_queue_ready(lport)) {
+ if (lport->qfull)
+ fc_fcp_can_queue_ramp_down(lport);
+ rc = SCSI_MLQUEUE_HOST_BUSY;
+ goto out;
+ }
+
+ fsp = fc_fcp_pkt_alloc(lport, GFP_ATOMIC);
+ if (fsp == NULL) {
+ rc = SCSI_MLQUEUE_HOST_BUSY;
+ goto out;
+ }
+
+ /*
+ * build the libfc request pkt
+ */
+ fsp->cmd = sc_cmd; /* save the cmd */
+ fsp->rport = rport; /* set the remote port ptr */
+
+ /*
+ * set up the transfer length
+ */
+ fsp->data_len = scsi_bufflen(sc_cmd);
+ fsp->xfer_len = 0;
+
+ /*
+ * setup the data direction
+ */
+ stats = per_cpu_ptr(lport->stats, get_cpu());
+ if (sc_cmd->sc_data_direction == DMA_FROM_DEVICE) {
+ fsp->req_flags = FC_SRB_READ;
+ stats->InputRequests++;
+ stats->InputBytes += fsp->data_len;
+ } else if (sc_cmd->sc_data_direction == DMA_TO_DEVICE) {
+ fsp->req_flags = FC_SRB_WRITE;
+ stats->OutputRequests++;
+ stats->OutputBytes += fsp->data_len;
+ } else {
+ fsp->req_flags = 0;
+ stats->ControlRequests++;
+ }
+ put_cpu();
+
+ /*
+ * send it to the lower layer
+ * if we get -1 return then put the request in the pending
+ * queue.
+ */
+ rval = fc_fcp_pkt_send(lport, fsp);
+ if (rval != 0) {
+ fsp->state = FC_SRB_FREE;
+ fc_fcp_pkt_release(fsp);
+ rc = SCSI_MLQUEUE_HOST_BUSY;
+ }
+out:
+ return rc;
+}
+EXPORT_SYMBOL(fc_queuecommand);
+
+/**
+ * fc_io_compl() - Handle responses for completed commands
+ * @fsp: The FCP packet that is complete
+ *
+ * Translates fcp_pkt errors to a Linux SCSI errors.
+ * The fcp packet lock must be held when calling.
+ */
+static void fc_io_compl(struct fc_fcp_pkt *fsp)
+{
+ struct fc_fcp_internal *si;
+ struct scsi_cmnd *sc_cmd;
+ struct fc_lport *lport;
+ unsigned long flags;
+
+ /* release outstanding ddp context */
+ fc_fcp_ddp_done(fsp);
+
+ fsp->state |= FC_SRB_COMPL;
+ if (!(fsp->state & FC_SRB_FCP_PROCESSING_TMO)) {
+ spin_unlock_bh(&fsp->scsi_pkt_lock);
+ del_timer_sync(&fsp->timer);
+ spin_lock_bh(&fsp->scsi_pkt_lock);
+ }
+
+ lport = fsp->lp;
+ si = fc_get_scsi_internal(lport);
+
+ /*
+ * if can_queue ramp down is done then try can_queue ramp up
+ * since commands are completing now.
+ */
+ if (si->last_can_queue_ramp_down_time)
+ fc_fcp_can_queue_ramp_up(lport);
+
+ sc_cmd = fsp->cmd;
+ CMD_SCSI_STATUS(sc_cmd) = fsp->cdb_status;
+ switch (fsp->status_code) {
+ case FC_COMPLETE:
+ if (fsp->cdb_status == 0) {
+ /*
+ * good I/O status
+ */
+ sc_cmd->result = DID_OK << 16;
+ if (fsp->scsi_resid)
+ CMD_RESID_LEN(sc_cmd) = fsp->scsi_resid;
+ } else {
+ /*
+ * transport level I/O was ok but scsi
+ * has non zero status
+ */
+ sc_cmd->result = (DID_OK << 16) | fsp->cdb_status;
+ }
+ break;
+ case FC_ERROR:
+ FC_FCP_DBG(fsp, "Returning DID_ERROR to scsi-ml "
+ "due to FC_ERROR\n");
+ sc_cmd->result = DID_ERROR << 16;
+ break;
+ case FC_DATA_UNDRUN:
+ if ((fsp->cdb_status == 0) && !(fsp->req_flags & FC_SRB_READ)) {
+ /*
+ * scsi status is good but transport level
+ * underrun.
+ */
+ if (fsp->state & FC_SRB_RCV_STATUS) {
+ sc_cmd->result = DID_OK << 16;
+ } else {
+ FC_FCP_DBG(fsp, "Returning DID_ERROR to scsi-ml"
+ " due to FC_DATA_UNDRUN (trans)\n");
+ sc_cmd->result = DID_ERROR << 16;
+ }
+ } else {
+ /*
+ * scsi got underrun, this is an error
+ */
+ FC_FCP_DBG(fsp, "Returning DID_ERROR to scsi-ml "
+ "due to FC_DATA_UNDRUN (scsi)\n");
+ CMD_RESID_LEN(sc_cmd) = fsp->scsi_resid;
+ sc_cmd->result = (DID_ERROR << 16) | fsp->cdb_status;
+ }
+ break;
+ case FC_DATA_OVRRUN:
+ /*
+ * overrun is an error
+ */
+ FC_FCP_DBG(fsp, "Returning DID_ERROR to scsi-ml "
+ "due to FC_DATA_OVRRUN\n");
+ sc_cmd->result = (DID_ERROR << 16) | fsp->cdb_status;
+ break;
+ case FC_CMD_ABORTED:
+ FC_FCP_DBG(fsp, "Returning DID_ERROR to scsi-ml "
+ "due to FC_CMD_ABORTED\n");
+ sc_cmd->result = (DID_ERROR << 16) | fsp->io_status;
+ break;
+ case FC_CMD_RESET:
+ FC_FCP_DBG(fsp, "Returning DID_RESET to scsi-ml "
+ "due to FC_CMD_RESET\n");
+ sc_cmd->result = (DID_RESET << 16);
+ break;
+ case FC_HRD_ERROR:
+ FC_FCP_DBG(fsp, "Returning DID_NO_CONNECT to scsi-ml "
+ "due to FC_HRD_ERROR\n");
+ sc_cmd->result = (DID_NO_CONNECT << 16);
+ break;
+ case FC_CRC_ERROR:
+ FC_FCP_DBG(fsp, "Returning DID_PARITY to scsi-ml "
+ "due to FC_CRC_ERROR\n");
+ sc_cmd->result = (DID_PARITY << 16);
+ break;
+ case FC_TIMED_OUT:
+ FC_FCP_DBG(fsp, "Returning DID_BUS_BUSY to scsi-ml "
+ "due to FC_TIMED_OUT\n");
+ sc_cmd->result = (DID_BUS_BUSY << 16) | fsp->io_status;
+ break;
+ default:
+ FC_FCP_DBG(fsp, "Returning DID_ERROR to scsi-ml "
+ "due to unknown error\n");
+ sc_cmd->result = (DID_ERROR << 16);
+ break;
+ }
+
+ if (lport->state != LPORT_ST_READY && fsp->status_code != FC_COMPLETE)
+ sc_cmd->result = (DID_TRANSPORT_DISRUPTED << 16);
+
+ spin_lock_irqsave(&si->scsi_queue_lock, flags);
+ list_del(&fsp->list);
+ sc_cmd->SCp.ptr = NULL;
+ spin_unlock_irqrestore(&si->scsi_queue_lock, flags);
+ sc_cmd->scsi_done(sc_cmd);
+
+ /* release ref from initial allocation in queue command */
+ fc_fcp_pkt_release(fsp);
+}
+
+/**
+ * fc_eh_abort() - Abort a command
+ * @sc_cmd: The SCSI command to abort
+ *
+ * From SCSI host template.
+ * Send an ABTS to the target device and wait for the response.
+ */
+int fc_eh_abort(struct scsi_cmnd *sc_cmd)
+{
+ struct fc_fcp_pkt *fsp;
+ struct fc_lport *lport;
+ struct fc_fcp_internal *si;
+ int rc = FAILED;
+ unsigned long flags;
+ int rval;
+
+ rval = fc_block_scsi_eh(sc_cmd);
+ if (rval)
+ return rval;
+
+ lport = shost_priv(sc_cmd->device->host);
+ if (lport->state != LPORT_ST_READY)
+ return rc;
+ else if (!lport->link_up)
+ return rc;
+
+ si = fc_get_scsi_internal(lport);
+ spin_lock_irqsave(&si->scsi_queue_lock, flags);
+ fsp = CMD_SP(sc_cmd);
+ if (!fsp) {
+ /* command completed while scsi eh was setting up */
+ spin_unlock_irqrestore(&si->scsi_queue_lock, flags);
+ return SUCCESS;
+ }
+ /* grab a ref so the fsp and sc_cmd cannot be released from under us */
+ fc_fcp_pkt_hold(fsp);
+ spin_unlock_irqrestore(&si->scsi_queue_lock, flags);
+
+ if (fc_fcp_lock_pkt(fsp)) {
+ /* completed while we were waiting for timer to be deleted */
+ rc = SUCCESS;
+ goto release_pkt;
+ }
+
+ rc = fc_fcp_pkt_abort(fsp);
+ fc_fcp_unlock_pkt(fsp);
+
+release_pkt:
+ fc_fcp_pkt_release(fsp);
+ return rc;
+}
+EXPORT_SYMBOL(fc_eh_abort);
+
+/**
+ * fc_eh_device_reset() - Reset a single LUN
+ * @sc_cmd: The SCSI command which identifies the device whose
+ * LUN is to be reset
+ *
+ * Set from SCSI host template.
+ */
+int fc_eh_device_reset(struct scsi_cmnd *sc_cmd)
+{
+ struct fc_lport *lport;
+ struct fc_fcp_pkt *fsp;
+ struct fc_rport *rport = starget_to_rport(scsi_target(sc_cmd->device));
+ int rc = FAILED;
+ int rval;
+
+ rval = fc_block_scsi_eh(sc_cmd);
+ if (rval)
+ return rval;
+
+ lport = shost_priv(sc_cmd->device->host);
+
+ if (lport->state != LPORT_ST_READY)
+ return rc;
+
+ FC_SCSI_DBG(lport, "Resetting rport (%6.6x)\n", rport->port_id);
+
+ fsp = fc_fcp_pkt_alloc(lport, GFP_NOIO);
+ if (fsp == NULL) {
+ printk(KERN_WARNING "libfc: could not allocate scsi_pkt\n");
+ goto out;
+ }
+
+ /*
+ * Build the libfc request pkt. Do not set the scsi cmnd, because
+ * the sc passed in is not setup for execution like when sent
+ * through the queuecommand callout.
+ */
+ fsp->rport = rport; /* set the remote port ptr */
+
+ /*
+ * flush outstanding commands
+ */
+ rc = fc_lun_reset(lport, fsp, scmd_id(sc_cmd), sc_cmd->device->lun);
+ fsp->state = FC_SRB_FREE;
+ fc_fcp_pkt_release(fsp);
+
+out:
+ return rc;
+}
+EXPORT_SYMBOL(fc_eh_device_reset);
+
+/**
+ * fc_eh_host_reset() - Reset a Scsi_Host.
+ * @sc_cmd: The SCSI command that identifies the SCSI host to be reset
+ */
+int fc_eh_host_reset(struct scsi_cmnd *sc_cmd)
+{
+ struct Scsi_Host *shost = sc_cmd->device->host;
+ struct fc_lport *lport = shost_priv(shost);
+ unsigned long wait_tmo;
+
+ FC_SCSI_DBG(lport, "Resetting host\n");
+
+ fc_block_scsi_eh(sc_cmd);
+
+ lport->tt.lport_reset(lport);
+ wait_tmo = jiffies + FC_HOST_RESET_TIMEOUT;
+ while (!fc_fcp_lport_queue_ready(lport) && time_before(jiffies,
+ wait_tmo))
+ msleep(1000);
+
+ if (fc_fcp_lport_queue_ready(lport)) {
+ shost_printk(KERN_INFO, shost, "libfc: Host reset succeeded "
+ "on port (%6.6x)\n", lport->port_id);
+ return SUCCESS;
+ } else {
+ shost_printk(KERN_INFO, shost, "libfc: Host reset failed, "
+ "port (%6.6x) is not ready.\n",
+ lport->port_id);
+ return FAILED;
+ }
+}
+EXPORT_SYMBOL(fc_eh_host_reset);
+
+/**
+ * fc_slave_alloc() - Configure the queue depth of a Scsi_Host
+ * @sdev: The SCSI device that identifies the SCSI host
+ *
+ * Configures queue depth based on host's cmd_per_len. If not set
+ * then we use the libfc default.
+ */
+int fc_slave_alloc(struct scsi_device *sdev)
+{
+ struct fc_rport *rport = starget_to_rport(scsi_target(sdev));
+
+ if (!rport || fc_remote_port_chkready(rport))
+ return -ENXIO;
+
+ scsi_change_queue_depth(sdev, FC_FCP_DFLT_QUEUE_DEPTH);
+ return 0;
+}
+EXPORT_SYMBOL(fc_slave_alloc);
+
+/**
+ * fc_fcp_destory() - Tear down the FCP layer for a given local port
+ * @lport: The local port that no longer needs the FCP layer
+ */
+void fc_fcp_destroy(struct fc_lport *lport)
+{
+ struct fc_fcp_internal *si = fc_get_scsi_internal(lport);
+
+ if (!list_empty(&si->scsi_pkt_queue))
+ printk(KERN_ERR "libfc: Leaked SCSI packets when destroying "
+ "port (%6.6x)\n", lport->port_id);
+
+ mempool_destroy(si->scsi_pkt_pool);
+ kfree(si);
+ lport->scsi_priv = NULL;
+}
+EXPORT_SYMBOL(fc_fcp_destroy);
+
+int fc_setup_fcp(void)
+{
+ int rc = 0;
+
+ scsi_pkt_cachep = kmem_cache_create("libfc_fcp_pkt",
+ sizeof(struct fc_fcp_pkt),
+ 0, SLAB_HWCACHE_ALIGN, NULL);
+ if (!scsi_pkt_cachep) {
+ printk(KERN_ERR "libfc: Unable to allocate SRB cache, "
+ "module load failed!");
+ rc = -ENOMEM;
+ }
+
+ return rc;
+}
+
+void fc_destroy_fcp(void)
+{
+ if (scsi_pkt_cachep)
+ kmem_cache_destroy(scsi_pkt_cachep);
+}
+
+/**
+ * fc_fcp_init() - Initialize the FCP layer for a local port
+ * @lport: The local port to initialize the exchange layer for
+ */
+int fc_fcp_init(struct fc_lport *lport)
+{
+ int rc;
+ struct fc_fcp_internal *si;
+
+ if (!lport->tt.fcp_cmd_send)
+ lport->tt.fcp_cmd_send = fc_fcp_cmd_send;
+
+ if (!lport->tt.fcp_cleanup)
+ lport->tt.fcp_cleanup = fc_fcp_cleanup;
+
+ if (!lport->tt.fcp_abort_io)
+ lport->tt.fcp_abort_io = fc_fcp_abort_io;
+
+ si = kzalloc(sizeof(struct fc_fcp_internal), GFP_KERNEL);
+ if (!si)
+ return -ENOMEM;
+ lport->scsi_priv = si;
+ si->max_can_queue = lport->host->can_queue;
+ INIT_LIST_HEAD(&si->scsi_pkt_queue);
+ spin_lock_init(&si->scsi_queue_lock);
+
+ si->scsi_pkt_pool = mempool_create_slab_pool(2, scsi_pkt_cachep);
+ if (!si->scsi_pkt_pool) {
+ rc = -ENOMEM;
+ goto free_internal;
+ }
+ return 0;
+
+free_internal:
+ kfree(si);
+ return rc;
+}
+EXPORT_SYMBOL(fc_fcp_init);
diff --git a/drivers/scsi/libfc/fc_frame.c b/drivers/scsi/libfc/fc_frame.c
new file mode 100644
index 000000000..0382ac069
--- /dev/null
+++ b/drivers/scsi/libfc/fc_frame.c
@@ -0,0 +1,91 @@
+/*
+ * Copyright(c) 2007 Intel Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Maintained at www.Open-FCoE.org
+ */
+
+/*
+ * Frame allocation.
+ */
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/skbuff.h>
+#include <linux/crc32.h>
+#include <linux/gfp.h>
+
+#include <scsi/fc_frame.h>
+
+/*
+ * Check the CRC in a frame.
+ */
+u32 fc_frame_crc_check(struct fc_frame *fp)
+{
+ u32 crc;
+ u32 error;
+ const u8 *bp;
+ unsigned int len;
+
+ WARN_ON(!fc_frame_is_linear(fp));
+ fr_flags(fp) &= ~FCPHF_CRC_UNCHECKED;
+ len = (fr_len(fp) + 3) & ~3; /* round up length to include fill */
+ bp = (const u8 *) fr_hdr(fp);
+ crc = ~crc32(~0, bp, len);
+ error = crc ^ fr_crc(fp);
+ return error;
+}
+EXPORT_SYMBOL(fc_frame_crc_check);
+
+/*
+ * Allocate a frame intended to be sent.
+ * Get an sk_buff for the frame and set the length.
+ */
+struct fc_frame *_fc_frame_alloc(size_t len)
+{
+ struct fc_frame *fp;
+ struct sk_buff *skb;
+
+ WARN_ON((len % sizeof(u32)) != 0);
+ len += sizeof(struct fc_frame_header);
+ skb = alloc_skb_fclone(len + FC_FRAME_HEADROOM + FC_FRAME_TAILROOM +
+ NET_SKB_PAD, GFP_ATOMIC);
+ if (!skb)
+ return NULL;
+ skb_reserve(skb, NET_SKB_PAD + FC_FRAME_HEADROOM);
+ fp = (struct fc_frame *) skb;
+ fc_frame_init(fp);
+ skb_put(skb, len);
+ return fp;
+}
+EXPORT_SYMBOL(_fc_frame_alloc);
+
+struct fc_frame *fc_frame_alloc_fill(struct fc_lport *lp, size_t payload_len)
+{
+ struct fc_frame *fp;
+ size_t fill;
+
+ fill = payload_len % 4;
+ if (fill != 0)
+ fill = 4 - fill;
+ fp = _fc_frame_alloc(payload_len + fill);
+ if (fp) {
+ memset((char *) fr_hdr(fp) + payload_len, 0, fill);
+ /* trim is OK, we just allocated it so there are no fragments */
+ skb_trim(fp_skb(fp),
+ payload_len + sizeof(struct fc_frame_header));
+ }
+ return fp;
+}
+EXPORT_SYMBOL(fc_frame_alloc_fill);
diff --git a/drivers/scsi/libfc/fc_libfc.c b/drivers/scsi/libfc/fc_libfc.c
new file mode 100644
index 000000000..c11a638f3
--- /dev/null
+++ b/drivers/scsi/libfc/fc_libfc.c
@@ -0,0 +1,331 @@
+/*
+ * Copyright(c) 2009 Intel Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Maintained at www.Open-FCoE.org
+ */
+
+#include <linux/kernel.h>
+#include <linux/types.h>
+#include <linux/scatterlist.h>
+#include <linux/crc32.h>
+#include <linux/module.h>
+
+#include <scsi/libfc.h>
+#include <scsi/fc_encode.h>
+
+#include "fc_libfc.h"
+
+MODULE_AUTHOR("Open-FCoE.org");
+MODULE_DESCRIPTION("libfc");
+MODULE_LICENSE("GPL v2");
+
+unsigned int fc_debug_logging;
+module_param_named(debug_logging, fc_debug_logging, int, S_IRUGO|S_IWUSR);
+MODULE_PARM_DESC(debug_logging, "a bit mask of logging levels");
+
+DEFINE_MUTEX(fc_prov_mutex);
+static LIST_HEAD(fc_local_ports);
+struct blocking_notifier_head fc_lport_notifier_head =
+ BLOCKING_NOTIFIER_INIT(fc_lport_notifier_head);
+EXPORT_SYMBOL(fc_lport_notifier_head);
+
+/*
+ * Providers which primarily send requests and PRLIs.
+ */
+struct fc4_prov *fc_active_prov[FC_FC4_PROV_SIZE] = {
+ [0] = &fc_rport_t0_prov,
+ [FC_TYPE_FCP] = &fc_rport_fcp_init,
+};
+
+/*
+ * Providers which receive requests.
+ */
+struct fc4_prov *fc_passive_prov[FC_FC4_PROV_SIZE] = {
+ [FC_TYPE_ELS] = &fc_lport_els_prov,
+};
+
+/**
+ * libfc_init() - Initialize libfc.ko
+ */
+static int __init libfc_init(void)
+{
+ int rc = 0;
+
+ rc = fc_setup_fcp();
+ if (rc)
+ return rc;
+
+ rc = fc_setup_exch_mgr();
+ if (rc)
+ goto destroy_pkt_cache;
+
+ rc = fc_setup_rport();
+ if (rc)
+ goto destroy_em;
+
+ return rc;
+destroy_em:
+ fc_destroy_exch_mgr();
+destroy_pkt_cache:
+ fc_destroy_fcp();
+ return rc;
+}
+module_init(libfc_init);
+
+/**
+ * libfc_exit() - Tear down libfc.ko
+ */
+static void __exit libfc_exit(void)
+{
+ fc_destroy_fcp();
+ fc_destroy_exch_mgr();
+ fc_destroy_rport();
+}
+module_exit(libfc_exit);
+
+/**
+ * fc_copy_buffer_to_sglist() - This routine copies the data of a buffer
+ * into a scatter-gather list (SG list).
+ *
+ * @buf: pointer to the data buffer.
+ * @len: the byte-length of the data buffer.
+ * @sg: pointer to the pointer of the SG list.
+ * @nents: pointer to the remaining number of entries in the SG list.
+ * @offset: pointer to the current offset in the SG list.
+ * @crc: pointer to the 32-bit crc value.
+ * If crc is NULL, CRC is not calculated.
+ */
+u32 fc_copy_buffer_to_sglist(void *buf, size_t len,
+ struct scatterlist *sg,
+ u32 *nents, size_t *offset,
+ u32 *crc)
+{
+ size_t remaining = len;
+ u32 copy_len = 0;
+
+ while (remaining > 0 && sg) {
+ size_t off, sg_bytes;
+ void *page_addr;
+
+ if (*offset >= sg->length) {
+ /*
+ * Check for end and drop resources
+ * from the last iteration.
+ */
+ if (!(*nents))
+ break;
+ --(*nents);
+ *offset -= sg->length;
+ sg = sg_next(sg);
+ continue;
+ }
+ sg_bytes = min(remaining, sg->length - *offset);
+
+ /*
+ * The scatterlist item may be bigger than PAGE_SIZE,
+ * but we are limited to mapping PAGE_SIZE at a time.
+ */
+ off = *offset + sg->offset;
+ sg_bytes = min(sg_bytes,
+ (size_t)(PAGE_SIZE - (off & ~PAGE_MASK)));
+ page_addr = kmap_atomic(sg_page(sg) + (off >> PAGE_SHIFT));
+ if (crc)
+ *crc = crc32(*crc, buf, sg_bytes);
+ memcpy((char *)page_addr + (off & ~PAGE_MASK), buf, sg_bytes);
+ kunmap_atomic(page_addr);
+ buf += sg_bytes;
+ *offset += sg_bytes;
+ remaining -= sg_bytes;
+ copy_len += sg_bytes;
+ }
+ return copy_len;
+}
+
+/**
+ * fc_fill_hdr() - fill FC header fields based on request
+ * @fp: reply frame containing header to be filled in
+ * @in_fp: request frame containing header to use in filling in reply
+ * @r_ctl: R_CTL value for header
+ * @f_ctl: F_CTL value for header, with 0 pad
+ * @seq_cnt: sequence count for the header, ignored if frame has a sequence
+ * @parm_offset: parameter / offset value
+ */
+void fc_fill_hdr(struct fc_frame *fp, const struct fc_frame *in_fp,
+ enum fc_rctl r_ctl, u32 f_ctl, u16 seq_cnt, u32 parm_offset)
+{
+ struct fc_frame_header *fh;
+ struct fc_frame_header *in_fh;
+ struct fc_seq *sp;
+ u32 fill;
+
+ fh = __fc_frame_header_get(fp);
+ in_fh = __fc_frame_header_get(in_fp);
+
+ if (f_ctl & FC_FC_END_SEQ) {
+ fill = -fr_len(fp) & 3;
+ if (fill) {
+ /* TODO, this may be a problem with fragmented skb */
+ memset(skb_put(fp_skb(fp), fill), 0, fill);
+ f_ctl |= fill;
+ }
+ fr_eof(fp) = FC_EOF_T;
+ } else {
+ WARN_ON(fr_len(fp) % 4 != 0); /* no pad to non last frame */
+ fr_eof(fp) = FC_EOF_N;
+ }
+
+ fh->fh_r_ctl = r_ctl;
+ memcpy(fh->fh_d_id, in_fh->fh_s_id, sizeof(fh->fh_d_id));
+ memcpy(fh->fh_s_id, in_fh->fh_d_id, sizeof(fh->fh_s_id));
+ fh->fh_type = in_fh->fh_type;
+ hton24(fh->fh_f_ctl, f_ctl);
+ fh->fh_ox_id = in_fh->fh_ox_id;
+ fh->fh_rx_id = in_fh->fh_rx_id;
+ fh->fh_cs_ctl = 0;
+ fh->fh_df_ctl = 0;
+ fh->fh_parm_offset = htonl(parm_offset);
+
+ sp = fr_seq(in_fp);
+ if (sp) {
+ fr_seq(fp) = sp;
+ fh->fh_seq_id = sp->id;
+ seq_cnt = sp->cnt;
+ } else {
+ fh->fh_seq_id = 0;
+ }
+ fh->fh_seq_cnt = ntohs(seq_cnt);
+ fr_sof(fp) = seq_cnt ? FC_SOF_N3 : FC_SOF_I3;
+ fr_encaps(fp) = fr_encaps(in_fp);
+}
+EXPORT_SYMBOL(fc_fill_hdr);
+
+/**
+ * fc_fill_reply_hdr() - fill FC reply header fields based on request
+ * @fp: reply frame containing header to be filled in
+ * @in_fp: request frame containing header to use in filling in reply
+ * @r_ctl: R_CTL value for reply
+ * @parm_offset: parameter / offset value
+ */
+void fc_fill_reply_hdr(struct fc_frame *fp, const struct fc_frame *in_fp,
+ enum fc_rctl r_ctl, u32 parm_offset)
+{
+ struct fc_seq *sp;
+
+ sp = fr_seq(in_fp);
+ if (sp)
+ fr_seq(fp) = fr_dev(in_fp)->tt.seq_start_next(sp);
+ fc_fill_hdr(fp, in_fp, r_ctl, FC_FCTL_RESP, 0, parm_offset);
+}
+EXPORT_SYMBOL(fc_fill_reply_hdr);
+
+/**
+ * fc_fc4_conf_lport_params() - Modify "service_params" of specified lport
+ * if there is service provider (target provider) registered with libfc
+ * for specified "fc_ft_type"
+ * @lport: Local port which service_params needs to be modified
+ * @type: FC-4 type, such as FC_TYPE_FCP
+ */
+void fc_fc4_conf_lport_params(struct fc_lport *lport, enum fc_fh_type type)
+{
+ struct fc4_prov *prov_entry;
+ BUG_ON(type >= FC_FC4_PROV_SIZE);
+ BUG_ON(!lport);
+ prov_entry = fc_passive_prov[type];
+ if (type == FC_TYPE_FCP) {
+ if (prov_entry && prov_entry->recv)
+ lport->service_params |= FCP_SPPF_TARG_FCN;
+ }
+}
+
+void fc_lport_iterate(void (*notify)(struct fc_lport *, void *), void *arg)
+{
+ struct fc_lport *lport;
+
+ mutex_lock(&fc_prov_mutex);
+ list_for_each_entry(lport, &fc_local_ports, lport_list)
+ notify(lport, arg);
+ mutex_unlock(&fc_prov_mutex);
+}
+EXPORT_SYMBOL(fc_lport_iterate);
+
+/**
+ * fc_fc4_register_provider() - register FC-4 upper-level provider.
+ * @type: FC-4 type, such as FC_TYPE_FCP
+ * @prov: structure describing provider including ops vector.
+ *
+ * Returns 0 on success, negative error otherwise.
+ */
+int fc_fc4_register_provider(enum fc_fh_type type, struct fc4_prov *prov)
+{
+ struct fc4_prov **prov_entry;
+ int ret = 0;
+
+ if (type >= FC_FC4_PROV_SIZE)
+ return -EINVAL;
+ mutex_lock(&fc_prov_mutex);
+ prov_entry = (prov->recv ? fc_passive_prov : fc_active_prov) + type;
+ if (*prov_entry)
+ ret = -EBUSY;
+ else
+ *prov_entry = prov;
+ mutex_unlock(&fc_prov_mutex);
+ return ret;
+}
+EXPORT_SYMBOL(fc_fc4_register_provider);
+
+/**
+ * fc_fc4_deregister_provider() - deregister FC-4 upper-level provider.
+ * @type: FC-4 type, such as FC_TYPE_FCP
+ * @prov: structure describing provider including ops vector.
+ */
+void fc_fc4_deregister_provider(enum fc_fh_type type, struct fc4_prov *prov)
+{
+ BUG_ON(type >= FC_FC4_PROV_SIZE);
+ mutex_lock(&fc_prov_mutex);
+ if (prov->recv)
+ RCU_INIT_POINTER(fc_passive_prov[type], NULL);
+ else
+ RCU_INIT_POINTER(fc_active_prov[type], NULL);
+ mutex_unlock(&fc_prov_mutex);
+ synchronize_rcu();
+}
+EXPORT_SYMBOL(fc_fc4_deregister_provider);
+
+/**
+ * fc_fc4_add_lport() - add new local port to list and run notifiers.
+ * @lport: The new local port.
+ */
+void fc_fc4_add_lport(struct fc_lport *lport)
+{
+ mutex_lock(&fc_prov_mutex);
+ list_add_tail(&lport->lport_list, &fc_local_ports);
+ blocking_notifier_call_chain(&fc_lport_notifier_head,
+ FC_LPORT_EV_ADD, lport);
+ mutex_unlock(&fc_prov_mutex);
+}
+
+/**
+ * fc_fc4_del_lport() - remove local port from list and run notifiers.
+ * @lport: The new local port.
+ */
+void fc_fc4_del_lport(struct fc_lport *lport)
+{
+ mutex_lock(&fc_prov_mutex);
+ list_del(&lport->lport_list);
+ blocking_notifier_call_chain(&fc_lport_notifier_head,
+ FC_LPORT_EV_DEL, lport);
+ mutex_unlock(&fc_prov_mutex);
+}
diff --git a/drivers/scsi/libfc/fc_libfc.h b/drivers/scsi/libfc/fc_libfc.h
new file mode 100644
index 000000000..b74189d89
--- /dev/null
+++ b/drivers/scsi/libfc/fc_libfc.h
@@ -0,0 +1,139 @@
+/*
+ * Copyright(c) 2009 Intel Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Maintained at www.Open-FCoE.org
+ */
+
+#ifndef _FC_LIBFC_H_
+#define _FC_LIBFC_H_
+
+#define FC_LIBFC_LOGGING 0x01 /* General logging, not categorized */
+#define FC_LPORT_LOGGING 0x02 /* lport layer logging */
+#define FC_DISC_LOGGING 0x04 /* discovery layer logging */
+#define FC_RPORT_LOGGING 0x08 /* rport layer logging */
+#define FC_FCP_LOGGING 0x10 /* I/O path logging */
+#define FC_EM_LOGGING 0x20 /* Exchange Manager logging */
+#define FC_EXCH_LOGGING 0x40 /* Exchange/Sequence logging */
+#define FC_SCSI_LOGGING 0x80 /* SCSI logging (mostly error handling) */
+
+extern unsigned int fc_debug_logging;
+
+#define FC_CHECK_LOGGING(LEVEL, CMD) \
+ do { \
+ if (unlikely(fc_debug_logging & LEVEL)) \
+ do { \
+ CMD; \
+ } while (0); \
+ } while (0)
+
+#define FC_LIBFC_DBG(fmt, args...) \
+ FC_CHECK_LOGGING(FC_LIBFC_LOGGING, \
+ pr_info("libfc: " fmt, ##args))
+
+#define FC_LPORT_DBG(lport, fmt, args...) \
+ FC_CHECK_LOGGING(FC_LPORT_LOGGING, \
+ pr_info("host%u: lport %6.6x: " fmt, \
+ (lport)->host->host_no, \
+ (lport)->port_id, ##args))
+
+#define FC_DISC_DBG(disc, fmt, args...) \
+ FC_CHECK_LOGGING(FC_DISC_LOGGING, \
+ pr_info("host%u: disc: " fmt, \
+ fc_disc_lport(disc)->host->host_no, \
+ ##args))
+
+#define FC_RPORT_ID_DBG(lport, port_id, fmt, args...) \
+ FC_CHECK_LOGGING(FC_RPORT_LOGGING, \
+ pr_info("host%u: rport %6.6x: " fmt, \
+ (lport)->host->host_no, \
+ (port_id), ##args))
+
+#define FC_RPORT_DBG(rdata, fmt, args...) \
+ FC_RPORT_ID_DBG((rdata)->local_port, (rdata)->ids.port_id, fmt, ##args)
+
+#define FC_FCP_DBG(pkt, fmt, args...) \
+ FC_CHECK_LOGGING(FC_FCP_LOGGING, \
+ { \
+ if ((pkt)->seq_ptr) { \
+ struct fc_exch *_ep = NULL; \
+ _ep = fc_seq_exch((pkt)->seq_ptr); \
+ pr_info("host%u: fcp: %6.6x: " \
+ "xid %04x-%04x: " fmt, \
+ (pkt)->lp->host->host_no, \
+ (pkt)->rport->port_id, \
+ (_ep)->oxid, (_ep)->rxid, ##args); \
+ } else { \
+ pr_info("host%u: fcp: %6.6x: " fmt, \
+ (pkt)->lp->host->host_no, \
+ (pkt)->rport->port_id, ##args); \
+ } \
+ })
+
+#define FC_EXCH_DBG(exch, fmt, args...) \
+ FC_CHECK_LOGGING(FC_EXCH_LOGGING, \
+ pr_info("host%u: xid %4x: " fmt, \
+ (exch)->lp->host->host_no, \
+ exch->xid, ##args))
+
+#define FC_SCSI_DBG(lport, fmt, args...) \
+ FC_CHECK_LOGGING(FC_SCSI_LOGGING, \
+ pr_info("host%u: scsi: " fmt, \
+ (lport)->host->host_no, ##args))
+
+/*
+ * FC-4 Providers.
+ */
+extern struct fc4_prov *fc_active_prov[]; /* providers without recv */
+extern struct fc4_prov *fc_passive_prov[]; /* providers with recv */
+extern struct mutex fc_prov_mutex; /* lock over table changes */
+
+extern struct fc4_prov fc_rport_t0_prov; /* type 0 provider */
+extern struct fc4_prov fc_lport_els_prov; /* ELS provider */
+extern struct fc4_prov fc_rport_fcp_init; /* FCP initiator provider */
+
+/*
+ * Set up direct-data placement for this I/O request
+ */
+void fc_fcp_ddp_setup(struct fc_fcp_pkt *fsp, u16 xid);
+void fc_fcp_ddp_done(struct fc_fcp_pkt *fsp);
+
+/*
+ * Module setup functions
+ */
+int fc_setup_exch_mgr(void);
+void fc_destroy_exch_mgr(void);
+int fc_setup_rport(void);
+void fc_destroy_rport(void);
+int fc_setup_fcp(void);
+void fc_destroy_fcp(void);
+
+/*
+ * Internal libfc functions
+ */
+const char *fc_els_resp_type(struct fc_frame *);
+extern void fc_fc4_add_lport(struct fc_lport *);
+extern void fc_fc4_del_lport(struct fc_lport *);
+extern void fc_fc4_conf_lport_params(struct fc_lport *, enum fc_fh_type);
+
+/*
+ * Copies a buffer into an sg list
+ */
+u32 fc_copy_buffer_to_sglist(void *buf, size_t len,
+ struct scatterlist *sg,
+ u32 *nents, size_t *offset,
+ u32 *crc);
+
+#endif /* _FC_LIBFC_H_ */
diff --git a/drivers/scsi/libfc/fc_lport.c b/drivers/scsi/libfc/fc_lport.c
new file mode 100644
index 000000000..e01a29863
--- /dev/null
+++ b/drivers/scsi/libfc/fc_lport.c
@@ -0,0 +1,2144 @@
+/*
+ * Copyright(c) 2007 Intel Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Maintained at www.Open-FCoE.org
+ */
+
+/*
+ * PORT LOCKING NOTES
+ *
+ * These comments only apply to the 'port code' which consists of the lport,
+ * disc and rport blocks.
+ *
+ * MOTIVATION
+ *
+ * The lport, disc and rport blocks all have mutexes that are used to protect
+ * those objects. The main motivation for these locks is to prevent from
+ * having an lport reset just before we send a frame. In that scenario the
+ * lport's FID would get set to zero and then we'd send a frame with an
+ * invalid SID. We also need to ensure that states don't change unexpectedly
+ * while processing another state.
+ *
+ * HIERARCHY
+ *
+ * The following hierarchy defines the locking rules. A greater lock
+ * may be held before acquiring a lesser lock, but a lesser lock should never
+ * be held while attempting to acquire a greater lock. Here is the hierarchy-
+ *
+ * lport > disc, lport > rport, disc > rport
+ *
+ * CALLBACKS
+ *
+ * The callbacks cause complications with this scheme. There is a callback
+ * from the rport (to either lport or disc) and a callback from disc
+ * (to the lport).
+ *
+ * As rports exit the rport state machine a callback is made to the owner of
+ * the rport to notify success or failure. Since the callback is likely to
+ * cause the lport or disc to grab its lock we cannot hold the rport lock
+ * while making the callback. To ensure that the rport is not free'd while
+ * processing the callback the rport callbacks are serialized through a
+ * single-threaded workqueue. An rport would never be free'd while in a
+ * callback handler because no other rport work in this queue can be executed
+ * at the same time.
+ *
+ * When discovery succeeds or fails a callback is made to the lport as
+ * notification. Currently, successful discovery causes the lport to take no
+ * action. A failure will cause the lport to reset. There is likely a circular
+ * locking problem with this implementation.
+ */
+
+/*
+ * LPORT LOCKING
+ *
+ * The critical sections protected by the lport's mutex are quite broad and
+ * may be improved upon in the future. The lport code and its locking doesn't
+ * influence the I/O path, so excessive locking doesn't penalize I/O
+ * performance.
+ *
+ * The strategy is to lock whenever processing a request or response. Note
+ * that every _enter_* function corresponds to a state change. They generally
+ * change the lports state and then send a request out on the wire. We lock
+ * before calling any of these functions to protect that state change. This
+ * means that the entry points into the lport block manage the locks while
+ * the state machine can transition between states (i.e. _enter_* functions)
+ * while always staying protected.
+ *
+ * When handling responses we also hold the lport mutex broadly. When the
+ * lport receives the response frame it locks the mutex and then calls the
+ * appropriate handler for the particuar response. Generally a response will
+ * trigger a state change and so the lock must already be held.
+ *
+ * Retries also have to consider the locking. The retries occur from a work
+ * context and the work function will lock the lport and then retry the state
+ * (i.e. _enter_* function).
+ */
+
+#include <linux/timer.h>
+#include <linux/delay.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <asm/unaligned.h>
+
+#include <scsi/fc/fc_gs.h>
+
+#include <scsi/libfc.h>
+#include <scsi/fc_encode.h>
+#include <linux/scatterlist.h>
+
+#include "fc_libfc.h"
+
+/* Fabric IDs to use for point-to-point mode, chosen on whims. */
+#define FC_LOCAL_PTP_FID_LO 0x010101
+#define FC_LOCAL_PTP_FID_HI 0x010102
+
+#define DNS_DELAY 3 /* Discovery delay after RSCN (in seconds)*/
+
+static void fc_lport_error(struct fc_lport *, struct fc_frame *);
+
+static void fc_lport_enter_reset(struct fc_lport *);
+static void fc_lport_enter_flogi(struct fc_lport *);
+static void fc_lport_enter_dns(struct fc_lport *);
+static void fc_lport_enter_ns(struct fc_lport *, enum fc_lport_state);
+static void fc_lport_enter_scr(struct fc_lport *);
+static void fc_lport_enter_ready(struct fc_lport *);
+static void fc_lport_enter_logo(struct fc_lport *);
+static void fc_lport_enter_fdmi(struct fc_lport *lport);
+static void fc_lport_enter_ms(struct fc_lport *, enum fc_lport_state);
+
+static const char *fc_lport_state_names[] = {
+ [LPORT_ST_DISABLED] = "disabled",
+ [LPORT_ST_FLOGI] = "FLOGI",
+ [LPORT_ST_DNS] = "dNS",
+ [LPORT_ST_RNN_ID] = "RNN_ID",
+ [LPORT_ST_RSNN_NN] = "RSNN_NN",
+ [LPORT_ST_RSPN_ID] = "RSPN_ID",
+ [LPORT_ST_RFT_ID] = "RFT_ID",
+ [LPORT_ST_RFF_ID] = "RFF_ID",
+ [LPORT_ST_FDMI] = "FDMI",
+ [LPORT_ST_RHBA] = "RHBA",
+ [LPORT_ST_RPA] = "RPA",
+ [LPORT_ST_DHBA] = "DHBA",
+ [LPORT_ST_DPRT] = "DPRT",
+ [LPORT_ST_SCR] = "SCR",
+ [LPORT_ST_READY] = "Ready",
+ [LPORT_ST_LOGO] = "LOGO",
+ [LPORT_ST_RESET] = "reset",
+};
+
+/**
+ * struct fc_bsg_info - FC Passthrough managemet structure
+ * @job: The passthrough job
+ * @lport: The local port to pass through a command
+ * @rsp_code: The expected response code
+ * @sg: job->reply_payload.sg_list
+ * @nents: job->reply_payload.sg_cnt
+ * @offset: The offset into the response data
+ */
+struct fc_bsg_info {
+ struct fc_bsg_job *job;
+ struct fc_lport *lport;
+ u16 rsp_code;
+ struct scatterlist *sg;
+ u32 nents;
+ size_t offset;
+};
+
+/**
+ * fc_frame_drop() - Dummy frame handler
+ * @lport: The local port the frame was received on
+ * @fp: The received frame
+ */
+static int fc_frame_drop(struct fc_lport *lport, struct fc_frame *fp)
+{
+ fc_frame_free(fp);
+ return 0;
+}
+
+/**
+ * fc_lport_rport_callback() - Event handler for rport events
+ * @lport: The lport which is receiving the event
+ * @rdata: private remote port data
+ * @event: The event that occurred
+ *
+ * Locking Note: The rport lock should not be held when calling
+ * this function.
+ */
+static void fc_lport_rport_callback(struct fc_lport *lport,
+ struct fc_rport_priv *rdata,
+ enum fc_rport_event event)
+{
+ FC_LPORT_DBG(lport, "Received a %d event for port (%6.6x)\n", event,
+ rdata->ids.port_id);
+
+ mutex_lock(&lport->lp_mutex);
+ switch (event) {
+ case RPORT_EV_READY:
+ if (lport->state == LPORT_ST_DNS) {
+ lport->dns_rdata = rdata;
+ fc_lport_enter_ns(lport, LPORT_ST_RNN_ID);
+ } else if (lport->state == LPORT_ST_FDMI) {
+ lport->ms_rdata = rdata;
+ fc_lport_enter_ms(lport, LPORT_ST_DHBA);
+ } else {
+ FC_LPORT_DBG(lport, "Received an READY event "
+ "on port (%6.6x) for the directory "
+ "server, but the lport is not "
+ "in the DNS or FDMI state, it's in the "
+ "%d state", rdata->ids.port_id,
+ lport->state);
+ lport->tt.rport_logoff(rdata);
+ }
+ break;
+ case RPORT_EV_LOGO:
+ case RPORT_EV_FAILED:
+ case RPORT_EV_STOP:
+ if (rdata->ids.port_id == FC_FID_DIR_SERV)
+ lport->dns_rdata = NULL;
+ else if (rdata->ids.port_id == FC_FID_MGMT_SERV)
+ lport->ms_rdata = NULL;
+ break;
+ case RPORT_EV_NONE:
+ break;
+ }
+ mutex_unlock(&lport->lp_mutex);
+}
+
+/**
+ * fc_lport_state() - Return a string which represents the lport's state
+ * @lport: The lport whose state is to converted to a string
+ */
+static const char *fc_lport_state(struct fc_lport *lport)
+{
+ const char *cp;
+
+ cp = fc_lport_state_names[lport->state];
+ if (!cp)
+ cp = "unknown";
+ return cp;
+}
+
+/**
+ * fc_lport_ptp_setup() - Create an rport for point-to-point mode
+ * @lport: The lport to attach the ptp rport to
+ * @remote_fid: The FID of the ptp rport
+ * @remote_wwpn: The WWPN of the ptp rport
+ * @remote_wwnn: The WWNN of the ptp rport
+ */
+static void fc_lport_ptp_setup(struct fc_lport *lport,
+ u32 remote_fid, u64 remote_wwpn,
+ u64 remote_wwnn)
+{
+ mutex_lock(&lport->disc.disc_mutex);
+ if (lport->ptp_rdata) {
+ lport->tt.rport_logoff(lport->ptp_rdata);
+ kref_put(&lport->ptp_rdata->kref, lport->tt.rport_destroy);
+ }
+ lport->ptp_rdata = lport->tt.rport_create(lport, remote_fid);
+ kref_get(&lport->ptp_rdata->kref);
+ lport->ptp_rdata->ids.port_name = remote_wwpn;
+ lport->ptp_rdata->ids.node_name = remote_wwnn;
+ mutex_unlock(&lport->disc.disc_mutex);
+
+ lport->tt.rport_login(lport->ptp_rdata);
+
+ fc_lport_enter_ready(lport);
+}
+
+/**
+ * fc_get_host_port_state() - Return the port state of the given Scsi_Host
+ * @shost: The SCSI host whose port state is to be determined
+ */
+void fc_get_host_port_state(struct Scsi_Host *shost)
+{
+ struct fc_lport *lport = shost_priv(shost);
+
+ mutex_lock(&lport->lp_mutex);
+ if (!lport->link_up)
+ fc_host_port_state(shost) = FC_PORTSTATE_LINKDOWN;
+ else
+ switch (lport->state) {
+ case LPORT_ST_READY:
+ fc_host_port_state(shost) = FC_PORTSTATE_ONLINE;
+ break;
+ default:
+ fc_host_port_state(shost) = FC_PORTSTATE_OFFLINE;
+ }
+ mutex_unlock(&lport->lp_mutex);
+}
+EXPORT_SYMBOL(fc_get_host_port_state);
+
+/**
+ * fc_get_host_speed() - Return the speed of the given Scsi_Host
+ * @shost: The SCSI host whose port speed is to be determined
+ */
+void fc_get_host_speed(struct Scsi_Host *shost)
+{
+ struct fc_lport *lport = shost_priv(shost);
+
+ fc_host_speed(shost) = lport->link_speed;
+}
+EXPORT_SYMBOL(fc_get_host_speed);
+
+/**
+ * fc_get_host_stats() - Return the Scsi_Host's statistics
+ * @shost: The SCSI host whose statistics are to be returned
+ */
+struct fc_host_statistics *fc_get_host_stats(struct Scsi_Host *shost)
+{
+ struct fc_host_statistics *fc_stats;
+ struct fc_lport *lport = shost_priv(shost);
+ struct timespec v0, v1;
+ unsigned int cpu;
+ u64 fcp_in_bytes = 0;
+ u64 fcp_out_bytes = 0;
+
+ fc_stats = &lport->host_stats;
+ memset(fc_stats, 0, sizeof(struct fc_host_statistics));
+
+ jiffies_to_timespec(jiffies, &v0);
+ jiffies_to_timespec(lport->boot_time, &v1);
+ fc_stats->seconds_since_last_reset = (v0.tv_sec - v1.tv_sec);
+
+ for_each_possible_cpu(cpu) {
+ struct fc_stats *stats;
+
+ stats = per_cpu_ptr(lport->stats, cpu);
+
+ fc_stats->tx_frames += stats->TxFrames;
+ fc_stats->tx_words += stats->TxWords;
+ fc_stats->rx_frames += stats->RxFrames;
+ fc_stats->rx_words += stats->RxWords;
+ fc_stats->error_frames += stats->ErrorFrames;
+ fc_stats->invalid_crc_count += stats->InvalidCRCCount;
+ fc_stats->fcp_input_requests += stats->InputRequests;
+ fc_stats->fcp_output_requests += stats->OutputRequests;
+ fc_stats->fcp_control_requests += stats->ControlRequests;
+ fcp_in_bytes += stats->InputBytes;
+ fcp_out_bytes += stats->OutputBytes;
+ fc_stats->fcp_packet_alloc_failures += stats->FcpPktAllocFails;
+ fc_stats->fcp_packet_aborts += stats->FcpPktAborts;
+ fc_stats->fcp_frame_alloc_failures += stats->FcpFrameAllocFails;
+ fc_stats->link_failure_count += stats->LinkFailureCount;
+ }
+ fc_stats->fcp_input_megabytes = div_u64(fcp_in_bytes, 1000000);
+ fc_stats->fcp_output_megabytes = div_u64(fcp_out_bytes, 1000000);
+ fc_stats->lip_count = -1;
+ fc_stats->nos_count = -1;
+ fc_stats->loss_of_sync_count = -1;
+ fc_stats->loss_of_signal_count = -1;
+ fc_stats->prim_seq_protocol_err_count = -1;
+ fc_stats->dumped_frames = -1;
+
+ /* update exches stats */
+ fc_exch_update_stats(lport);
+
+ return fc_stats;
+}
+EXPORT_SYMBOL(fc_get_host_stats);
+
+/**
+ * fc_lport_flogi_fill() - Fill in FLOGI command for request
+ * @lport: The local port the FLOGI is for
+ * @flogi: The FLOGI command
+ * @op: The opcode
+ */
+static void fc_lport_flogi_fill(struct fc_lport *lport,
+ struct fc_els_flogi *flogi,
+ unsigned int op)
+{
+ struct fc_els_csp *sp;
+ struct fc_els_cssp *cp;
+
+ memset(flogi, 0, sizeof(*flogi));
+ flogi->fl_cmd = (u8) op;
+ put_unaligned_be64(lport->wwpn, &flogi->fl_wwpn);
+ put_unaligned_be64(lport->wwnn, &flogi->fl_wwnn);
+ sp = &flogi->fl_csp;
+ sp->sp_hi_ver = 0x20;
+ sp->sp_lo_ver = 0x20;
+ sp->sp_bb_cred = htons(10); /* this gets set by gateway */
+ sp->sp_bb_data = htons((u16) lport->mfs);
+ cp = &flogi->fl_cssp[3 - 1]; /* class 3 parameters */
+ cp->cp_class = htons(FC_CPC_VALID | FC_CPC_SEQ);
+ if (op != ELS_FLOGI) {
+ sp->sp_features = htons(FC_SP_FT_CIRO);
+ sp->sp_tot_seq = htons(255); /* seq. we accept */
+ sp->sp_rel_off = htons(0x1f);
+ sp->sp_e_d_tov = htonl(lport->e_d_tov);
+
+ cp->cp_rdfs = htons((u16) lport->mfs);
+ cp->cp_con_seq = htons(255);
+ cp->cp_open_seq = 1;
+ }
+}
+
+/**
+ * fc_lport_add_fc4_type() - Add a supported FC-4 type to a local port
+ * @lport: The local port to add a new FC-4 type to
+ * @type: The new FC-4 type
+ */
+static void fc_lport_add_fc4_type(struct fc_lport *lport, enum fc_fh_type type)
+{
+ __be32 *mp;
+
+ mp = &lport->fcts.ff_type_map[type / FC_NS_BPW];
+ *mp = htonl(ntohl(*mp) | 1UL << (type % FC_NS_BPW));
+}
+
+/**
+ * fc_lport_recv_rlir_req() - Handle received Registered Link Incident Report.
+ * @lport: Fibre Channel local port receiving the RLIR
+ * @fp: The RLIR request frame
+ *
+ * Locking Note: The lport lock is expected to be held before calling
+ * this function.
+ */
+static void fc_lport_recv_rlir_req(struct fc_lport *lport, struct fc_frame *fp)
+{
+ FC_LPORT_DBG(lport, "Received RLIR request while in state %s\n",
+ fc_lport_state(lport));
+
+ lport->tt.seq_els_rsp_send(fp, ELS_LS_ACC, NULL);
+ fc_frame_free(fp);
+}
+
+/**
+ * fc_lport_recv_echo_req() - Handle received ECHO request
+ * @lport: The local port receiving the ECHO
+ * @fp: ECHO request frame
+ *
+ * Locking Note: The lport lock is expected to be held before calling
+ * this function.
+ */
+static void fc_lport_recv_echo_req(struct fc_lport *lport,
+ struct fc_frame *in_fp)
+{
+ struct fc_frame *fp;
+ unsigned int len;
+ void *pp;
+ void *dp;
+
+ FC_LPORT_DBG(lport, "Received ECHO request while in state %s\n",
+ fc_lport_state(lport));
+
+ len = fr_len(in_fp) - sizeof(struct fc_frame_header);
+ pp = fc_frame_payload_get(in_fp, len);
+
+ if (len < sizeof(__be32))
+ len = sizeof(__be32);
+
+ fp = fc_frame_alloc(lport, len);
+ if (fp) {
+ dp = fc_frame_payload_get(fp, len);
+ memcpy(dp, pp, len);
+ *((__be32 *)dp) = htonl(ELS_LS_ACC << 24);
+ fc_fill_reply_hdr(fp, in_fp, FC_RCTL_ELS_REP, 0);
+ lport->tt.frame_send(lport, fp);
+ }
+ fc_frame_free(in_fp);
+}
+
+/**
+ * fc_lport_recv_rnid_req() - Handle received Request Node ID data request
+ * @lport: The local port receiving the RNID
+ * @fp: The RNID request frame
+ *
+ * Locking Note: The lport lock is expected to be held before calling
+ * this function.
+ */
+static void fc_lport_recv_rnid_req(struct fc_lport *lport,
+ struct fc_frame *in_fp)
+{
+ struct fc_frame *fp;
+ struct fc_els_rnid *req;
+ struct {
+ struct fc_els_rnid_resp rnid;
+ struct fc_els_rnid_cid cid;
+ struct fc_els_rnid_gen gen;
+ } *rp;
+ struct fc_seq_els_data rjt_data;
+ u8 fmt;
+ size_t len;
+
+ FC_LPORT_DBG(lport, "Received RNID request while in state %s\n",
+ fc_lport_state(lport));
+
+ req = fc_frame_payload_get(in_fp, sizeof(*req));
+ if (!req) {
+ rjt_data.reason = ELS_RJT_LOGIC;
+ rjt_data.explan = ELS_EXPL_NONE;
+ lport->tt.seq_els_rsp_send(in_fp, ELS_LS_RJT, &rjt_data);
+ } else {
+ fmt = req->rnid_fmt;
+ len = sizeof(*rp);
+ if (fmt != ELS_RNIDF_GEN ||
+ ntohl(lport->rnid_gen.rnid_atype) == 0) {
+ fmt = ELS_RNIDF_NONE; /* nothing to provide */
+ len -= sizeof(rp->gen);
+ }
+ fp = fc_frame_alloc(lport, len);
+ if (fp) {
+ rp = fc_frame_payload_get(fp, len);
+ memset(rp, 0, len);
+ rp->rnid.rnid_cmd = ELS_LS_ACC;
+ rp->rnid.rnid_fmt = fmt;
+ rp->rnid.rnid_cid_len = sizeof(rp->cid);
+ rp->cid.rnid_wwpn = htonll(lport->wwpn);
+ rp->cid.rnid_wwnn = htonll(lport->wwnn);
+ if (fmt == ELS_RNIDF_GEN) {
+ rp->rnid.rnid_sid_len = sizeof(rp->gen);
+ memcpy(&rp->gen, &lport->rnid_gen,
+ sizeof(rp->gen));
+ }
+ fc_fill_reply_hdr(fp, in_fp, FC_RCTL_ELS_REP, 0);
+ lport->tt.frame_send(lport, fp);
+ }
+ }
+ fc_frame_free(in_fp);
+}
+
+/**
+ * fc_lport_recv_logo_req() - Handle received fabric LOGO request
+ * @lport: The local port receiving the LOGO
+ * @fp: The LOGO request frame
+ *
+ * Locking Note: The lport lock is expected to be held before calling
+ * this function.
+ */
+static void fc_lport_recv_logo_req(struct fc_lport *lport, struct fc_frame *fp)
+{
+ lport->tt.seq_els_rsp_send(fp, ELS_LS_ACC, NULL);
+ fc_lport_enter_reset(lport);
+ fc_frame_free(fp);
+}
+
+/**
+ * fc_fabric_login() - Start the lport state machine
+ * @lport: The local port that should log into the fabric
+ *
+ * Locking Note: This function should not be called
+ * with the lport lock held.
+ */
+int fc_fabric_login(struct fc_lport *lport)
+{
+ int rc = -1;
+
+ mutex_lock(&lport->lp_mutex);
+ if (lport->state == LPORT_ST_DISABLED ||
+ lport->state == LPORT_ST_LOGO) {
+ fc_lport_state_enter(lport, LPORT_ST_RESET);
+ fc_lport_enter_reset(lport);
+ rc = 0;
+ }
+ mutex_unlock(&lport->lp_mutex);
+
+ return rc;
+}
+EXPORT_SYMBOL(fc_fabric_login);
+
+/**
+ * __fc_linkup() - Handler for transport linkup events
+ * @lport: The lport whose link is up
+ *
+ * Locking: must be called with the lp_mutex held
+ */
+void __fc_linkup(struct fc_lport *lport)
+{
+ if (!lport->link_up) {
+ lport->link_up = 1;
+
+ if (lport->state == LPORT_ST_RESET)
+ fc_lport_enter_flogi(lport);
+ }
+}
+
+/**
+ * fc_linkup() - Handler for transport linkup events
+ * @lport: The local port whose link is up
+ */
+void fc_linkup(struct fc_lport *lport)
+{
+ printk(KERN_INFO "host%d: libfc: Link up on port (%6.6x)\n",
+ lport->host->host_no, lport->port_id);
+
+ mutex_lock(&lport->lp_mutex);
+ __fc_linkup(lport);
+ mutex_unlock(&lport->lp_mutex);
+}
+EXPORT_SYMBOL(fc_linkup);
+
+/**
+ * __fc_linkdown() - Handler for transport linkdown events
+ * @lport: The lport whose link is down
+ *
+ * Locking: must be called with the lp_mutex held
+ */
+void __fc_linkdown(struct fc_lport *lport)
+{
+ if (lport->link_up) {
+ lport->link_up = 0;
+ fc_lport_enter_reset(lport);
+ lport->tt.fcp_cleanup(lport);
+ }
+}
+
+/**
+ * fc_linkdown() - Handler for transport linkdown events
+ * @lport: The local port whose link is down
+ */
+void fc_linkdown(struct fc_lport *lport)
+{
+ printk(KERN_INFO "host%d: libfc: Link down on port (%6.6x)\n",
+ lport->host->host_no, lport->port_id);
+
+ mutex_lock(&lport->lp_mutex);
+ __fc_linkdown(lport);
+ mutex_unlock(&lport->lp_mutex);
+}
+EXPORT_SYMBOL(fc_linkdown);
+
+/**
+ * fc_fabric_logoff() - Logout of the fabric
+ * @lport: The local port to logoff the fabric
+ *
+ * Return value:
+ * 0 for success, -1 for failure
+ */
+int fc_fabric_logoff(struct fc_lport *lport)
+{
+ lport->tt.disc_stop_final(lport);
+ mutex_lock(&lport->lp_mutex);
+ if (lport->dns_rdata)
+ lport->tt.rport_logoff(lport->dns_rdata);
+ mutex_unlock(&lport->lp_mutex);
+ lport->tt.rport_flush_queue();
+ mutex_lock(&lport->lp_mutex);
+ fc_lport_enter_logo(lport);
+ mutex_unlock(&lport->lp_mutex);
+ cancel_delayed_work_sync(&lport->retry_work);
+ return 0;
+}
+EXPORT_SYMBOL(fc_fabric_logoff);
+
+/**
+ * fc_lport_destroy() - Unregister a fc_lport
+ * @lport: The local port to unregister
+ *
+ * Note:
+ * exit routine for fc_lport instance
+ * clean-up all the allocated memory
+ * and free up other system resources.
+ *
+ */
+int fc_lport_destroy(struct fc_lport *lport)
+{
+ mutex_lock(&lport->lp_mutex);
+ lport->state = LPORT_ST_DISABLED;
+ lport->link_up = 0;
+ lport->tt.frame_send = fc_frame_drop;
+ mutex_unlock(&lport->lp_mutex);
+
+ lport->tt.fcp_abort_io(lport);
+ lport->tt.disc_stop_final(lport);
+ lport->tt.exch_mgr_reset(lport, 0, 0);
+ cancel_delayed_work_sync(&lport->retry_work);
+ fc_fc4_del_lport(lport);
+ return 0;
+}
+EXPORT_SYMBOL(fc_lport_destroy);
+
+/**
+ * fc_set_mfs() - Set the maximum frame size for a local port
+ * @lport: The local port to set the MFS for
+ * @mfs: The new MFS
+ */
+int fc_set_mfs(struct fc_lport *lport, u32 mfs)
+{
+ unsigned int old_mfs;
+ int rc = -EINVAL;
+
+ mutex_lock(&lport->lp_mutex);
+
+ old_mfs = lport->mfs;
+
+ if (mfs >= FC_MIN_MAX_FRAME) {
+ mfs &= ~3;
+ if (mfs > FC_MAX_FRAME)
+ mfs = FC_MAX_FRAME;
+ mfs -= sizeof(struct fc_frame_header);
+ lport->mfs = mfs;
+ rc = 0;
+ }
+
+ if (!rc && mfs < old_mfs)
+ fc_lport_enter_reset(lport);
+
+ mutex_unlock(&lport->lp_mutex);
+
+ return rc;
+}
+EXPORT_SYMBOL(fc_set_mfs);
+
+/**
+ * fc_lport_disc_callback() - Callback for discovery events
+ * @lport: The local port receiving the event
+ * @event: The discovery event
+ */
+static void fc_lport_disc_callback(struct fc_lport *lport,
+ enum fc_disc_event event)
+{
+ switch (event) {
+ case DISC_EV_SUCCESS:
+ FC_LPORT_DBG(lport, "Discovery succeeded\n");
+ break;
+ case DISC_EV_FAILED:
+ printk(KERN_ERR "host%d: libfc: "
+ "Discovery failed for port (%6.6x)\n",
+ lport->host->host_no, lport->port_id);
+ mutex_lock(&lport->lp_mutex);
+ fc_lport_enter_reset(lport);
+ mutex_unlock(&lport->lp_mutex);
+ break;
+ case DISC_EV_NONE:
+ WARN_ON(1);
+ break;
+ }
+}
+
+/**
+ * fc_rport_enter_ready() - Enter the ready state and start discovery
+ * @lport: The local port that is ready
+ *
+ * Locking Note: The lport lock is expected to be held before calling
+ * this routine.
+ */
+static void fc_lport_enter_ready(struct fc_lport *lport)
+{
+ FC_LPORT_DBG(lport, "Entered READY from state %s\n",
+ fc_lport_state(lport));
+
+ fc_lport_state_enter(lport, LPORT_ST_READY);
+ if (lport->vport)
+ fc_vport_set_state(lport->vport, FC_VPORT_ACTIVE);
+ fc_vports_linkchange(lport);
+
+ if (!lport->ptp_rdata)
+ lport->tt.disc_start(fc_lport_disc_callback, lport);
+}
+
+/**
+ * fc_lport_set_port_id() - set the local port Port ID
+ * @lport: The local port which will have its Port ID set.
+ * @port_id: The new port ID.
+ * @fp: The frame containing the incoming request, or NULL.
+ *
+ * Locking Note: The lport lock is expected to be held before calling
+ * this function.
+ */
+static void fc_lport_set_port_id(struct fc_lport *lport, u32 port_id,
+ struct fc_frame *fp)
+{
+ if (port_id)
+ printk(KERN_INFO "host%d: Assigned Port ID %6.6x\n",
+ lport->host->host_no, port_id);
+
+ lport->port_id = port_id;
+
+ /* Update the fc_host */
+ fc_host_port_id(lport->host) = port_id;
+
+ if (lport->tt.lport_set_port_id)
+ lport->tt.lport_set_port_id(lport, port_id, fp);
+}
+
+/**
+ * fc_lport_set_port_id() - set the local port Port ID for point-to-multipoint
+ * @lport: The local port which will have its Port ID set.
+ * @port_id: The new port ID.
+ *
+ * Called by the lower-level driver when transport sets the local port_id.
+ * This is used in VN_port to VN_port mode for FCoE, and causes FLOGI and
+ * discovery to be skipped.
+ */
+void fc_lport_set_local_id(struct fc_lport *lport, u32 port_id)
+{
+ mutex_lock(&lport->lp_mutex);
+
+ fc_lport_set_port_id(lport, port_id, NULL);
+
+ switch (lport->state) {
+ case LPORT_ST_RESET:
+ case LPORT_ST_FLOGI:
+ if (port_id)
+ fc_lport_enter_ready(lport);
+ break;
+ default:
+ break;
+ }
+ mutex_unlock(&lport->lp_mutex);
+}
+EXPORT_SYMBOL(fc_lport_set_local_id);
+
+/**
+ * fc_lport_recv_flogi_req() - Receive a FLOGI request
+ * @lport: The local port that received the request
+ * @rx_fp: The FLOGI frame
+ *
+ * A received FLOGI request indicates a point-to-point connection.
+ * Accept it with the common service parameters indicating our N port.
+ * Set up to do a PLOGI if we have the higher-number WWPN.
+ *
+ * Locking Note: The lport lock is expected to be held before calling
+ * this function.
+ */
+static void fc_lport_recv_flogi_req(struct fc_lport *lport,
+ struct fc_frame *rx_fp)
+{
+ struct fc_frame *fp;
+ struct fc_frame_header *fh;
+ struct fc_els_flogi *flp;
+ struct fc_els_flogi *new_flp;
+ u64 remote_wwpn;
+ u32 remote_fid;
+ u32 local_fid;
+
+ FC_LPORT_DBG(lport, "Received FLOGI request while in state %s\n",
+ fc_lport_state(lport));
+
+ remote_fid = fc_frame_sid(rx_fp);
+ flp = fc_frame_payload_get(rx_fp, sizeof(*flp));
+ if (!flp)
+ goto out;
+ remote_wwpn = get_unaligned_be64(&flp->fl_wwpn);
+ if (remote_wwpn == lport->wwpn) {
+ printk(KERN_WARNING "host%d: libfc: Received FLOGI from port "
+ "with same WWPN %16.16llx\n",
+ lport->host->host_no, remote_wwpn);
+ goto out;
+ }
+ FC_LPORT_DBG(lport, "FLOGI from port WWPN %16.16llx\n", remote_wwpn);
+
+ /*
+ * XXX what is the right thing to do for FIDs?
+ * The originator might expect our S_ID to be 0xfffffe.
+ * But if so, both of us could end up with the same FID.
+ */
+ local_fid = FC_LOCAL_PTP_FID_LO;
+ if (remote_wwpn < lport->wwpn) {
+ local_fid = FC_LOCAL_PTP_FID_HI;
+ if (!remote_fid || remote_fid == local_fid)
+ remote_fid = FC_LOCAL_PTP_FID_LO;
+ } else if (!remote_fid) {
+ remote_fid = FC_LOCAL_PTP_FID_HI;
+ }
+
+ fc_lport_set_port_id(lport, local_fid, rx_fp);
+
+ fp = fc_frame_alloc(lport, sizeof(*flp));
+ if (fp) {
+ new_flp = fc_frame_payload_get(fp, sizeof(*flp));
+ fc_lport_flogi_fill(lport, new_flp, ELS_FLOGI);
+ new_flp->fl_cmd = (u8) ELS_LS_ACC;
+
+ /*
+ * Send the response. If this fails, the originator should
+ * repeat the sequence.
+ */
+ fc_fill_reply_hdr(fp, rx_fp, FC_RCTL_ELS_REP, 0);
+ fh = fc_frame_header_get(fp);
+ hton24(fh->fh_s_id, local_fid);
+ hton24(fh->fh_d_id, remote_fid);
+ lport->tt.frame_send(lport, fp);
+
+ } else {
+ fc_lport_error(lport, fp);
+ }
+ fc_lport_ptp_setup(lport, remote_fid, remote_wwpn,
+ get_unaligned_be64(&flp->fl_wwnn));
+out:
+ fc_frame_free(rx_fp);
+}
+
+/**
+ * fc_lport_recv_els_req() - The generic lport ELS request handler
+ * @lport: The local port that received the request
+ * @fp: The request frame
+ *
+ * This function will see if the lport handles the request or
+ * if an rport should handle the request.
+ *
+ * Locking Note: This function should not be called with the lport
+ * lock held because it will grab the lock.
+ */
+static void fc_lport_recv_els_req(struct fc_lport *lport,
+ struct fc_frame *fp)
+{
+ void (*recv)(struct fc_lport *, struct fc_frame *);
+
+ mutex_lock(&lport->lp_mutex);
+
+ /*
+ * Handle special ELS cases like FLOGI, LOGO, and
+ * RSCN here. These don't require a session.
+ * Even if we had a session, it might not be ready.
+ */
+ if (!lport->link_up)
+ fc_frame_free(fp);
+ else {
+ /*
+ * Check opcode.
+ */
+ recv = lport->tt.rport_recv_req;
+ switch (fc_frame_payload_op(fp)) {
+ case ELS_FLOGI:
+ if (!lport->point_to_multipoint)
+ recv = fc_lport_recv_flogi_req;
+ break;
+ case ELS_LOGO:
+ if (fc_frame_sid(fp) == FC_FID_FLOGI)
+ recv = fc_lport_recv_logo_req;
+ break;
+ case ELS_RSCN:
+ recv = lport->tt.disc_recv_req;
+ break;
+ case ELS_ECHO:
+ recv = fc_lport_recv_echo_req;
+ break;
+ case ELS_RLIR:
+ recv = fc_lport_recv_rlir_req;
+ break;
+ case ELS_RNID:
+ recv = fc_lport_recv_rnid_req;
+ break;
+ }
+
+ recv(lport, fp);
+ }
+ mutex_unlock(&lport->lp_mutex);
+}
+
+static int fc_lport_els_prli(struct fc_rport_priv *rdata, u32 spp_len,
+ const struct fc_els_spp *spp_in,
+ struct fc_els_spp *spp_out)
+{
+ return FC_SPP_RESP_INVL;
+}
+
+struct fc4_prov fc_lport_els_prov = {
+ .prli = fc_lport_els_prli,
+ .recv = fc_lport_recv_els_req,
+};
+
+/**
+ * fc_lport_recv_req() - The generic lport request handler
+ * @lport: The lport that received the request
+ * @fp: The frame the request is in
+ *
+ * Locking Note: This function should not be called with the lport
+ * lock held because it may grab the lock.
+ */
+static void fc_lport_recv_req(struct fc_lport *lport,
+ struct fc_frame *fp)
+{
+ struct fc_frame_header *fh = fc_frame_header_get(fp);
+ struct fc_seq *sp = fr_seq(fp);
+ struct fc4_prov *prov;
+
+ /*
+ * Use RCU read lock and module_lock to be sure module doesn't
+ * deregister and get unloaded while we're calling it.
+ * try_module_get() is inlined and accepts a NULL parameter.
+ * Only ELSes and FCP target ops should come through here.
+ * The locking is unfortunate, and a better scheme is being sought.
+ */
+
+ rcu_read_lock();
+ if (fh->fh_type >= FC_FC4_PROV_SIZE)
+ goto drop;
+ prov = rcu_dereference(fc_passive_prov[fh->fh_type]);
+ if (!prov || !try_module_get(prov->module))
+ goto drop;
+ rcu_read_unlock();
+ prov->recv(lport, fp);
+ module_put(prov->module);
+ return;
+drop:
+ rcu_read_unlock();
+ FC_LPORT_DBG(lport, "dropping unexpected frame type %x\n", fh->fh_type);
+ fc_frame_free(fp);
+ if (sp)
+ lport->tt.exch_done(sp);
+}
+
+/**
+ * fc_lport_reset() - Reset a local port
+ * @lport: The local port which should be reset
+ *
+ * Locking Note: This functions should not be called with the
+ * lport lock held.
+ */
+int fc_lport_reset(struct fc_lport *lport)
+{
+ cancel_delayed_work_sync(&lport->retry_work);
+ mutex_lock(&lport->lp_mutex);
+ fc_lport_enter_reset(lport);
+ mutex_unlock(&lport->lp_mutex);
+ return 0;
+}
+EXPORT_SYMBOL(fc_lport_reset);
+
+/**
+ * fc_lport_reset_locked() - Reset the local port w/ the lport lock held
+ * @lport: The local port to be reset
+ *
+ * Locking Note: The lport lock is expected to be held before calling
+ * this routine.
+ */
+static void fc_lport_reset_locked(struct fc_lport *lport)
+{
+ if (lport->dns_rdata)
+ lport->tt.rport_logoff(lport->dns_rdata);
+
+ if (lport->ptp_rdata) {
+ lport->tt.rport_logoff(lport->ptp_rdata);
+ kref_put(&lport->ptp_rdata->kref, lport->tt.rport_destroy);
+ lport->ptp_rdata = NULL;
+ }
+
+ lport->tt.disc_stop(lport);
+
+ lport->tt.exch_mgr_reset(lport, 0, 0);
+ fc_host_fabric_name(lport->host) = 0;
+
+ if (lport->port_id && (!lport->point_to_multipoint || !lport->link_up))
+ fc_lport_set_port_id(lport, 0, NULL);
+}
+
+/**
+ * fc_lport_enter_reset() - Reset the local port
+ * @lport: The local port to be reset
+ *
+ * Locking Note: The lport lock is expected to be held before calling
+ * this routine.
+ */
+static void fc_lport_enter_reset(struct fc_lport *lport)
+{
+ FC_LPORT_DBG(lport, "Entered RESET state from %s state\n",
+ fc_lport_state(lport));
+
+ if (lport->state == LPORT_ST_DISABLED || lport->state == LPORT_ST_LOGO)
+ return;
+
+ if (lport->vport) {
+ if (lport->link_up)
+ fc_vport_set_state(lport->vport, FC_VPORT_INITIALIZING);
+ else
+ fc_vport_set_state(lport->vport, FC_VPORT_LINKDOWN);
+ }
+ fc_lport_state_enter(lport, LPORT_ST_RESET);
+ fc_host_post_event(lport->host, fc_get_event_number(),
+ FCH_EVT_LIPRESET, 0);
+ fc_vports_linkchange(lport);
+ fc_lport_reset_locked(lport);
+ if (lport->link_up)
+ fc_lport_enter_flogi(lport);
+}
+
+/**
+ * fc_lport_enter_disabled() - Disable the local port
+ * @lport: The local port to be reset
+ *
+ * Locking Note: The lport lock is expected to be held before calling
+ * this routine.
+ */
+static void fc_lport_enter_disabled(struct fc_lport *lport)
+{
+ FC_LPORT_DBG(lport, "Entered disabled state from %s state\n",
+ fc_lport_state(lport));
+
+ fc_lport_state_enter(lport, LPORT_ST_DISABLED);
+ fc_vports_linkchange(lport);
+ fc_lport_reset_locked(lport);
+}
+
+/**
+ * fc_lport_error() - Handler for any errors
+ * @lport: The local port that the error was on
+ * @fp: The error code encoded in a frame pointer
+ *
+ * If the error was caused by a resource allocation failure
+ * then wait for half a second and retry, otherwise retry
+ * after the e_d_tov time.
+ */
+static void fc_lport_error(struct fc_lport *lport, struct fc_frame *fp)
+{
+ unsigned long delay = 0;
+ FC_LPORT_DBG(lport, "Error %ld in state %s, retries %d\n",
+ IS_ERR(fp) ? -PTR_ERR(fp) : 0, fc_lport_state(lport),
+ lport->retry_count);
+
+ if (PTR_ERR(fp) == -FC_EX_CLOSED)
+ return;
+
+ /*
+ * Memory allocation failure, or the exchange timed out
+ * or we received LS_RJT.
+ * Retry after delay
+ */
+ if (lport->retry_count < lport->max_retry_count) {
+ lport->retry_count++;
+ if (!fp)
+ delay = msecs_to_jiffies(500);
+ else
+ delay = msecs_to_jiffies(lport->e_d_tov);
+
+ schedule_delayed_work(&lport->retry_work, delay);
+ } else
+ fc_lport_enter_reset(lport);
+}
+
+/**
+ * fc_lport_ns_resp() - Handle response to a name server
+ * registration exchange
+ * @sp: current sequence in exchange
+ * @fp: response frame
+ * @lp_arg: Fibre Channel host port instance
+ *
+ * Locking Note: This function will be called without the lport lock
+ * held, but it will lock, call an _enter_* function or fc_lport_error()
+ * and then unlock the lport.
+ */
+static void fc_lport_ns_resp(struct fc_seq *sp, struct fc_frame *fp,
+ void *lp_arg)
+{
+ struct fc_lport *lport = lp_arg;
+ struct fc_frame_header *fh;
+ struct fc_ct_hdr *ct;
+
+ FC_LPORT_DBG(lport, "Received a ns %s\n", fc_els_resp_type(fp));
+
+ if (fp == ERR_PTR(-FC_EX_CLOSED))
+ return;
+
+ mutex_lock(&lport->lp_mutex);
+
+ if (lport->state < LPORT_ST_RNN_ID || lport->state > LPORT_ST_RFF_ID) {
+ FC_LPORT_DBG(lport, "Received a name server response, "
+ "but in state %s\n", fc_lport_state(lport));
+ if (IS_ERR(fp))
+ goto err;
+ goto out;
+ }
+
+ if (IS_ERR(fp)) {
+ fc_lport_error(lport, fp);
+ goto err;
+ }
+
+ fh = fc_frame_header_get(fp);
+ ct = fc_frame_payload_get(fp, sizeof(*ct));
+
+ if (fh && ct && fh->fh_type == FC_TYPE_CT &&
+ ct->ct_fs_type == FC_FST_DIR &&
+ ct->ct_fs_subtype == FC_NS_SUBTYPE &&
+ ntohs(ct->ct_cmd) == FC_FS_ACC)
+ switch (lport->state) {
+ case LPORT_ST_RNN_ID:
+ fc_lport_enter_ns(lport, LPORT_ST_RSNN_NN);
+ break;
+ case LPORT_ST_RSNN_NN:
+ fc_lport_enter_ns(lport, LPORT_ST_RSPN_ID);
+ break;
+ case LPORT_ST_RSPN_ID:
+ fc_lport_enter_ns(lport, LPORT_ST_RFT_ID);
+ break;
+ case LPORT_ST_RFT_ID:
+ fc_lport_enter_ns(lport, LPORT_ST_RFF_ID);
+ break;
+ case LPORT_ST_RFF_ID:
+ if (lport->fdmi_enabled)
+ fc_lport_enter_fdmi(lport);
+ else
+ fc_lport_enter_scr(lport);
+ break;
+ default:
+ /* should have already been caught by state checks */
+ break;
+ }
+ else
+ fc_lport_error(lport, fp);
+out:
+ fc_frame_free(fp);
+err:
+ mutex_unlock(&lport->lp_mutex);
+}
+
+/**
+ * fc_lport_ms_resp() - Handle response to a management server
+ * exchange
+ * @sp: current sequence in exchange
+ * @fp: response frame
+ * @lp_arg: Fibre Channel host port instance
+ *
+ * Locking Note: This function will be called without the lport lock
+ * held, but it will lock, call an _enter_* function or fc_lport_error()
+ * and then unlock the lport.
+ */
+static void fc_lport_ms_resp(struct fc_seq *sp, struct fc_frame *fp,
+ void *lp_arg)
+{
+ struct fc_lport *lport = lp_arg;
+ struct fc_frame_header *fh;
+ struct fc_ct_hdr *ct;
+
+ FC_LPORT_DBG(lport, "Received a ms %s\n", fc_els_resp_type(fp));
+
+ if (fp == ERR_PTR(-FC_EX_CLOSED))
+ return;
+
+ mutex_lock(&lport->lp_mutex);
+
+ if (lport->state < LPORT_ST_RHBA || lport->state > LPORT_ST_DPRT) {
+ FC_LPORT_DBG(lport, "Received a management server response, "
+ "but in state %s\n", fc_lport_state(lport));
+ if (IS_ERR(fp))
+ goto err;
+ goto out;
+ }
+
+ if (IS_ERR(fp)) {
+ fc_lport_error(lport, fp);
+ goto err;
+ }
+
+ fh = fc_frame_header_get(fp);
+ ct = fc_frame_payload_get(fp, sizeof(*ct));
+
+ if (fh && ct && fh->fh_type == FC_TYPE_CT &&
+ ct->ct_fs_type == FC_FST_MGMT &&
+ ct->ct_fs_subtype == FC_FDMI_SUBTYPE) {
+ FC_LPORT_DBG(lport, "Received a management server response, "
+ "reason=%d explain=%d\n",
+ ct->ct_reason,
+ ct->ct_explan);
+
+ switch (lport->state) {
+ case LPORT_ST_RHBA:
+ if (ntohs(ct->ct_cmd) == FC_FS_ACC)
+ fc_lport_enter_ms(lport, LPORT_ST_RPA);
+ else /* Error Skip RPA */
+ fc_lport_enter_scr(lport);
+ break;
+ case LPORT_ST_RPA:
+ fc_lport_enter_scr(lport);
+ break;
+ case LPORT_ST_DPRT:
+ fc_lport_enter_ms(lport, LPORT_ST_RHBA);
+ break;
+ case LPORT_ST_DHBA:
+ fc_lport_enter_ms(lport, LPORT_ST_DPRT);
+ break;
+ default:
+ /* should have already been caught by state checks */
+ break;
+ }
+ } else {
+ /* Invalid Frame? */
+ fc_lport_error(lport, fp);
+ }
+out:
+ fc_frame_free(fp);
+err:
+ mutex_unlock(&lport->lp_mutex);
+}
+
+/**
+ * fc_lport_scr_resp() - Handle response to State Change Register (SCR) request
+ * @sp: current sequence in SCR exchange
+ * @fp: response frame
+ * @lp_arg: Fibre Channel lport port instance that sent the registration request
+ *
+ * Locking Note: This function will be called without the lport lock
+ * held, but it will lock, call an _enter_* function or fc_lport_error
+ * and then unlock the lport.
+ */
+static void fc_lport_scr_resp(struct fc_seq *sp, struct fc_frame *fp,
+ void *lp_arg)
+{
+ struct fc_lport *lport = lp_arg;
+ u8 op;
+
+ FC_LPORT_DBG(lport, "Received a SCR %s\n", fc_els_resp_type(fp));
+
+ if (fp == ERR_PTR(-FC_EX_CLOSED))
+ return;
+
+ mutex_lock(&lport->lp_mutex);
+
+ if (lport->state != LPORT_ST_SCR) {
+ FC_LPORT_DBG(lport, "Received a SCR response, but in state "
+ "%s\n", fc_lport_state(lport));
+ if (IS_ERR(fp))
+ goto err;
+ goto out;
+ }
+
+ if (IS_ERR(fp)) {
+ fc_lport_error(lport, fp);
+ goto err;
+ }
+
+ op = fc_frame_payload_op(fp);
+ if (op == ELS_LS_ACC)
+ fc_lport_enter_ready(lport);
+ else
+ fc_lport_error(lport, fp);
+
+out:
+ fc_frame_free(fp);
+err:
+ mutex_unlock(&lport->lp_mutex);
+}
+
+/**
+ * fc_lport_enter_scr() - Send a SCR (State Change Register) request
+ * @lport: The local port to register for state changes
+ *
+ * Locking Note: The lport lock is expected to be held before calling
+ * this routine.
+ */
+static void fc_lport_enter_scr(struct fc_lport *lport)
+{
+ struct fc_frame *fp;
+
+ FC_LPORT_DBG(lport, "Entered SCR state from %s state\n",
+ fc_lport_state(lport));
+
+ fc_lport_state_enter(lport, LPORT_ST_SCR);
+
+ fp = fc_frame_alloc(lport, sizeof(struct fc_els_scr));
+ if (!fp) {
+ fc_lport_error(lport, fp);
+ return;
+ }
+
+ if (!lport->tt.elsct_send(lport, FC_FID_FCTRL, fp, ELS_SCR,
+ fc_lport_scr_resp, lport,
+ 2 * lport->r_a_tov))
+ fc_lport_error(lport, NULL);
+}
+
+/**
+ * fc_lport_enter_ns() - register some object with the name server
+ * @lport: Fibre Channel local port to register
+ *
+ * Locking Note: The lport lock is expected to be held before calling
+ * this routine.
+ */
+static void fc_lport_enter_ns(struct fc_lport *lport, enum fc_lport_state state)
+{
+ struct fc_frame *fp;
+ enum fc_ns_req cmd;
+ int size = sizeof(struct fc_ct_hdr);
+ size_t len;
+
+ FC_LPORT_DBG(lport, "Entered %s state from %s state\n",
+ fc_lport_state_names[state],
+ fc_lport_state(lport));
+
+ fc_lport_state_enter(lport, state);
+
+ switch (state) {
+ case LPORT_ST_RNN_ID:
+ cmd = FC_NS_RNN_ID;
+ size += sizeof(struct fc_ns_rn_id);
+ break;
+ case LPORT_ST_RSNN_NN:
+ len = strnlen(fc_host_symbolic_name(lport->host), 255);
+ /* if there is no symbolic name, skip to RFT_ID */
+ if (!len)
+ return fc_lport_enter_ns(lport, LPORT_ST_RFT_ID);
+ cmd = FC_NS_RSNN_NN;
+ size += sizeof(struct fc_ns_rsnn) + len;
+ break;
+ case LPORT_ST_RSPN_ID:
+ len = strnlen(fc_host_symbolic_name(lport->host), 255);
+ /* if there is no symbolic name, skip to RFT_ID */
+ if (!len)
+ return fc_lport_enter_ns(lport, LPORT_ST_RFT_ID);
+ cmd = FC_NS_RSPN_ID;
+ size += sizeof(struct fc_ns_rspn) + len;
+ break;
+ case LPORT_ST_RFT_ID:
+ cmd = FC_NS_RFT_ID;
+ size += sizeof(struct fc_ns_rft);
+ break;
+ case LPORT_ST_RFF_ID:
+ cmd = FC_NS_RFF_ID;
+ size += sizeof(struct fc_ns_rff_id);
+ break;
+ default:
+ fc_lport_error(lport, NULL);
+ return;
+ }
+
+ fp = fc_frame_alloc(lport, size);
+ if (!fp) {
+ fc_lport_error(lport, fp);
+ return;
+ }
+
+ if (!lport->tt.elsct_send(lport, FC_FID_DIR_SERV, fp, cmd,
+ fc_lport_ns_resp,
+ lport, 3 * lport->r_a_tov))
+ fc_lport_error(lport, fp);
+}
+
+static struct fc_rport_operations fc_lport_rport_ops = {
+ .event_callback = fc_lport_rport_callback,
+};
+
+/**
+ * fc_rport_enter_dns() - Create a fc_rport for the name server
+ * @lport: The local port requesting a remote port for the name server
+ *
+ * Locking Note: The lport lock is expected to be held before calling
+ * this routine.
+ */
+static void fc_lport_enter_dns(struct fc_lport *lport)
+{
+ struct fc_rport_priv *rdata;
+
+ FC_LPORT_DBG(lport, "Entered DNS state from %s state\n",
+ fc_lport_state(lport));
+
+ fc_lport_state_enter(lport, LPORT_ST_DNS);
+
+ mutex_lock(&lport->disc.disc_mutex);
+ rdata = lport->tt.rport_create(lport, FC_FID_DIR_SERV);
+ mutex_unlock(&lport->disc.disc_mutex);
+ if (!rdata)
+ goto err;
+
+ rdata->ops = &fc_lport_rport_ops;
+ lport->tt.rport_login(rdata);
+ return;
+
+err:
+ fc_lport_error(lport, NULL);
+}
+
+/**
+ * fc_lport_enter_ms() - management server commands
+ * @lport: Fibre Channel local port to register
+ *
+ * Locking Note: The lport lock is expected to be held before calling
+ * this routine.
+ */
+static void fc_lport_enter_ms(struct fc_lport *lport, enum fc_lport_state state)
+{
+ struct fc_frame *fp;
+ enum fc_fdmi_req cmd;
+ int size = sizeof(struct fc_ct_hdr);
+ size_t len;
+ int numattrs;
+
+ FC_LPORT_DBG(lport, "Entered %s state from %s state\n",
+ fc_lport_state_names[state],
+ fc_lport_state(lport));
+
+ fc_lport_state_enter(lport, state);
+
+ switch (state) {
+ case LPORT_ST_RHBA:
+ cmd = FC_FDMI_RHBA;
+ /* Number of HBA Attributes */
+ numattrs = 10;
+ len = sizeof(struct fc_fdmi_rhba);
+ len -= sizeof(struct fc_fdmi_attr_entry);
+ len += (numattrs * FC_FDMI_ATTR_ENTRY_HEADER_LEN);
+ len += FC_FDMI_HBA_ATTR_NODENAME_LEN;
+ len += FC_FDMI_HBA_ATTR_MANUFACTURER_LEN;
+ len += FC_FDMI_HBA_ATTR_SERIALNUMBER_LEN;
+ len += FC_FDMI_HBA_ATTR_MODEL_LEN;
+ len += FC_FDMI_HBA_ATTR_MODELDESCR_LEN;
+ len += FC_FDMI_HBA_ATTR_HARDWAREVERSION_LEN;
+ len += FC_FDMI_HBA_ATTR_DRIVERVERSION_LEN;
+ len += FC_FDMI_HBA_ATTR_OPTIONROMVERSION_LEN;
+ len += FC_FDMI_HBA_ATTR_FIRMWAREVERSION_LEN;
+ len += FC_FDMI_HBA_ATTR_OSNAMEVERSION_LEN;
+
+ size += len;
+ break;
+ case LPORT_ST_RPA:
+ cmd = FC_FDMI_RPA;
+ /* Number of Port Attributes */
+ numattrs = 6;
+ len = sizeof(struct fc_fdmi_rpa);
+ len -= sizeof(struct fc_fdmi_attr_entry);
+ len += (numattrs * FC_FDMI_ATTR_ENTRY_HEADER_LEN);
+ len += FC_FDMI_PORT_ATTR_FC4TYPES_LEN;
+ len += FC_FDMI_PORT_ATTR_SUPPORTEDSPEED_LEN;
+ len += FC_FDMI_PORT_ATTR_CURRENTPORTSPEED_LEN;
+ len += FC_FDMI_PORT_ATTR_MAXFRAMESIZE_LEN;
+ len += FC_FDMI_PORT_ATTR_OSDEVICENAME_LEN;
+ len += FC_FDMI_PORT_ATTR_HOSTNAME_LEN;
+
+ size += len;
+ break;
+ case LPORT_ST_DPRT:
+ cmd = FC_FDMI_DPRT;
+ len = sizeof(struct fc_fdmi_dprt);
+ size += len;
+ break;
+ case LPORT_ST_DHBA:
+ cmd = FC_FDMI_DHBA;
+ len = sizeof(struct fc_fdmi_dhba);
+ size += len;
+ break;
+ default:
+ fc_lport_error(lport, NULL);
+ return;
+ }
+
+ FC_LPORT_DBG(lport, "Cmd=0x%x Len %d size %d\n",
+ cmd, (int)len, size);
+ fp = fc_frame_alloc(lport, size);
+ if (!fp) {
+ fc_lport_error(lport, fp);
+ return;
+ }
+
+ if (!lport->tt.elsct_send(lport, FC_FID_MGMT_SERV, fp, cmd,
+ fc_lport_ms_resp,
+ lport, 3 * lport->r_a_tov))
+ fc_lport_error(lport, fp);
+}
+
+/**
+ * fc_rport_enter_fdmi() - Create a fc_rport for the management server
+ * @lport: The local port requesting a remote port for the management server
+ *
+ * Locking Note: The lport lock is expected to be held before calling
+ * this routine.
+ */
+static void fc_lport_enter_fdmi(struct fc_lport *lport)
+{
+ struct fc_rport_priv *rdata;
+
+ FC_LPORT_DBG(lport, "Entered FDMI state from %s state\n",
+ fc_lport_state(lport));
+
+ fc_lport_state_enter(lport, LPORT_ST_FDMI);
+
+ mutex_lock(&lport->disc.disc_mutex);
+ rdata = lport->tt.rport_create(lport, FC_FID_MGMT_SERV);
+ mutex_unlock(&lport->disc.disc_mutex);
+ if (!rdata)
+ goto err;
+
+ rdata->ops = &fc_lport_rport_ops;
+ lport->tt.rport_login(rdata);
+ return;
+
+err:
+ fc_lport_error(lport, NULL);
+}
+
+/**
+ * fc_lport_timeout() - Handler for the retry_work timer
+ * @work: The work struct of the local port
+ */
+static void fc_lport_timeout(struct work_struct *work)
+{
+ struct fc_lport *lport =
+ container_of(work, struct fc_lport,
+ retry_work.work);
+
+ mutex_lock(&lport->lp_mutex);
+
+ switch (lport->state) {
+ case LPORT_ST_DISABLED:
+ break;
+ case LPORT_ST_READY:
+ break;
+ case LPORT_ST_RESET:
+ break;
+ case LPORT_ST_FLOGI:
+ fc_lport_enter_flogi(lport);
+ break;
+ case LPORT_ST_DNS:
+ fc_lport_enter_dns(lport);
+ break;
+ case LPORT_ST_RNN_ID:
+ case LPORT_ST_RSNN_NN:
+ case LPORT_ST_RSPN_ID:
+ case LPORT_ST_RFT_ID:
+ case LPORT_ST_RFF_ID:
+ fc_lport_enter_ns(lport, lport->state);
+ break;
+ case LPORT_ST_FDMI:
+ fc_lport_enter_fdmi(lport);
+ break;
+ case LPORT_ST_RHBA:
+ case LPORT_ST_RPA:
+ case LPORT_ST_DHBA:
+ case LPORT_ST_DPRT:
+ FC_LPORT_DBG(lport, "Skipping lport state %s to SCR\n",
+ fc_lport_state(lport));
+ /* fall thru */
+ case LPORT_ST_SCR:
+ fc_lport_enter_scr(lport);
+ break;
+ case LPORT_ST_LOGO:
+ fc_lport_enter_logo(lport);
+ break;
+ }
+
+ mutex_unlock(&lport->lp_mutex);
+}
+
+/**
+ * fc_lport_logo_resp() - Handle response to LOGO request
+ * @sp: The sequence that the LOGO was on
+ * @fp: The LOGO frame
+ * @lp_arg: The lport port that received the LOGO request
+ *
+ * Locking Note: This function will be called without the lport lock
+ * held, but it will lock, call an _enter_* function or fc_lport_error()
+ * and then unlock the lport.
+ */
+void fc_lport_logo_resp(struct fc_seq *sp, struct fc_frame *fp,
+ void *lp_arg)
+{
+ struct fc_lport *lport = lp_arg;
+ u8 op;
+
+ FC_LPORT_DBG(lport, "Received a LOGO %s\n", fc_els_resp_type(fp));
+
+ if (fp == ERR_PTR(-FC_EX_CLOSED))
+ return;
+
+ mutex_lock(&lport->lp_mutex);
+
+ if (lport->state != LPORT_ST_LOGO) {
+ FC_LPORT_DBG(lport, "Received a LOGO response, but in state "
+ "%s\n", fc_lport_state(lport));
+ if (IS_ERR(fp))
+ goto err;
+ goto out;
+ }
+
+ if (IS_ERR(fp)) {
+ fc_lport_error(lport, fp);
+ goto err;
+ }
+
+ op = fc_frame_payload_op(fp);
+ if (op == ELS_LS_ACC)
+ fc_lport_enter_disabled(lport);
+ else
+ fc_lport_error(lport, fp);
+
+out:
+ fc_frame_free(fp);
+err:
+ mutex_unlock(&lport->lp_mutex);
+}
+EXPORT_SYMBOL(fc_lport_logo_resp);
+
+/**
+ * fc_rport_enter_logo() - Logout of the fabric
+ * @lport: The local port to be logged out
+ *
+ * Locking Note: The lport lock is expected to be held before calling
+ * this routine.
+ */
+static void fc_lport_enter_logo(struct fc_lport *lport)
+{
+ struct fc_frame *fp;
+ struct fc_els_logo *logo;
+
+ FC_LPORT_DBG(lport, "Entered LOGO state from %s state\n",
+ fc_lport_state(lport));
+
+ fc_lport_state_enter(lport, LPORT_ST_LOGO);
+ fc_vports_linkchange(lport);
+
+ fp = fc_frame_alloc(lport, sizeof(*logo));
+ if (!fp) {
+ fc_lport_error(lport, fp);
+ return;
+ }
+
+ if (!lport->tt.elsct_send(lport, FC_FID_FLOGI, fp, ELS_LOGO,
+ fc_lport_logo_resp, lport,
+ 2 * lport->r_a_tov))
+ fc_lport_error(lport, NULL);
+}
+
+/**
+ * fc_lport_flogi_resp() - Handle response to FLOGI request
+ * @sp: The sequence that the FLOGI was on
+ * @fp: The FLOGI response frame
+ * @lp_arg: The lport port that received the FLOGI response
+ *
+ * Locking Note: This function will be called without the lport lock
+ * held, but it will lock, call an _enter_* function or fc_lport_error()
+ * and then unlock the lport.
+ */
+void fc_lport_flogi_resp(struct fc_seq *sp, struct fc_frame *fp,
+ void *lp_arg)
+{
+ struct fc_lport *lport = lp_arg;
+ struct fc_frame_header *fh;
+ struct fc_els_flogi *flp;
+ u32 did;
+ u16 csp_flags;
+ unsigned int r_a_tov;
+ unsigned int e_d_tov;
+ u16 mfs;
+
+ FC_LPORT_DBG(lport, "Received a FLOGI %s\n", fc_els_resp_type(fp));
+
+ if (fp == ERR_PTR(-FC_EX_CLOSED))
+ return;
+
+ mutex_lock(&lport->lp_mutex);
+
+ if (lport->state != LPORT_ST_FLOGI) {
+ FC_LPORT_DBG(lport, "Received a FLOGI response, but in state "
+ "%s\n", fc_lport_state(lport));
+ if (IS_ERR(fp))
+ goto err;
+ goto out;
+ }
+
+ if (IS_ERR(fp)) {
+ fc_lport_error(lport, fp);
+ goto err;
+ }
+
+ fh = fc_frame_header_get(fp);
+ did = fc_frame_did(fp);
+ if (fh->fh_r_ctl != FC_RCTL_ELS_REP || did == 0 ||
+ fc_frame_payload_op(fp) != ELS_LS_ACC) {
+ FC_LPORT_DBG(lport, "FLOGI not accepted or bad response\n");
+ fc_lport_error(lport, fp);
+ goto err;
+ }
+
+ flp = fc_frame_payload_get(fp, sizeof(*flp));
+ if (!flp) {
+ FC_LPORT_DBG(lport, "FLOGI bad response\n");
+ fc_lport_error(lport, fp);
+ goto err;
+ }
+
+ mfs = ntohs(flp->fl_csp.sp_bb_data) &
+ FC_SP_BB_DATA_MASK;
+
+ if (mfs < FC_SP_MIN_MAX_PAYLOAD || mfs > FC_SP_MAX_MAX_PAYLOAD) {
+ FC_LPORT_DBG(lport, "FLOGI bad mfs:%hu response, "
+ "lport->mfs:%hu\n", mfs, lport->mfs);
+ fc_lport_error(lport, fp);
+ goto err;
+ }
+
+ if (mfs <= lport->mfs) {
+ lport->mfs = mfs;
+ fc_host_maxframe_size(lport->host) = mfs;
+ }
+
+ csp_flags = ntohs(flp->fl_csp.sp_features);
+ r_a_tov = ntohl(flp->fl_csp.sp_r_a_tov);
+ e_d_tov = ntohl(flp->fl_csp.sp_e_d_tov);
+ if (csp_flags & FC_SP_FT_EDTR)
+ e_d_tov /= 1000000;
+
+ lport->npiv_enabled = !!(csp_flags & FC_SP_FT_NPIV_ACC);
+
+ if ((csp_flags & FC_SP_FT_FPORT) == 0) {
+ if (e_d_tov > lport->e_d_tov)
+ lport->e_d_tov = e_d_tov;
+ lport->r_a_tov = 2 * e_d_tov;
+ fc_lport_set_port_id(lport, did, fp);
+ printk(KERN_INFO "host%d: libfc: "
+ "Port (%6.6x) entered "
+ "point-to-point mode\n",
+ lport->host->host_no, did);
+ fc_lport_ptp_setup(lport, fc_frame_sid(fp),
+ get_unaligned_be64(
+ &flp->fl_wwpn),
+ get_unaligned_be64(
+ &flp->fl_wwnn));
+ } else {
+ lport->e_d_tov = e_d_tov;
+ lport->r_a_tov = r_a_tov;
+ fc_host_fabric_name(lport->host) =
+ get_unaligned_be64(&flp->fl_wwnn);
+ fc_lport_set_port_id(lport, did, fp);
+ fc_lport_enter_dns(lport);
+ }
+
+out:
+ fc_frame_free(fp);
+err:
+ mutex_unlock(&lport->lp_mutex);
+}
+EXPORT_SYMBOL(fc_lport_flogi_resp);
+
+/**
+ * fc_rport_enter_flogi() - Send a FLOGI request to the fabric manager
+ * @lport: Fibre Channel local port to be logged in to the fabric
+ *
+ * Locking Note: The lport lock is expected to be held before calling
+ * this routine.
+ */
+static void fc_lport_enter_flogi(struct fc_lport *lport)
+{
+ struct fc_frame *fp;
+
+ FC_LPORT_DBG(lport, "Entered FLOGI state from %s state\n",
+ fc_lport_state(lport));
+
+ fc_lport_state_enter(lport, LPORT_ST_FLOGI);
+
+ if (lport->point_to_multipoint) {
+ if (lport->port_id)
+ fc_lport_enter_ready(lport);
+ return;
+ }
+
+ fp = fc_frame_alloc(lport, sizeof(struct fc_els_flogi));
+ if (!fp)
+ return fc_lport_error(lport, fp);
+
+ if (!lport->tt.elsct_send(lport, FC_FID_FLOGI, fp,
+ lport->vport ? ELS_FDISC : ELS_FLOGI,
+ fc_lport_flogi_resp, lport,
+ lport->vport ? 2 * lport->r_a_tov :
+ lport->e_d_tov))
+ fc_lport_error(lport, NULL);
+}
+
+/**
+ * fc_lport_config() - Configure a fc_lport
+ * @lport: The local port to be configured
+ */
+int fc_lport_config(struct fc_lport *lport)
+{
+ INIT_DELAYED_WORK(&lport->retry_work, fc_lport_timeout);
+ mutex_init(&lport->lp_mutex);
+
+ fc_lport_state_enter(lport, LPORT_ST_DISABLED);
+
+ fc_lport_add_fc4_type(lport, FC_TYPE_FCP);
+ fc_lport_add_fc4_type(lport, FC_TYPE_CT);
+ fc_fc4_conf_lport_params(lport, FC_TYPE_FCP);
+
+ return 0;
+}
+EXPORT_SYMBOL(fc_lport_config);
+
+/**
+ * fc_lport_init() - Initialize the lport layer for a local port
+ * @lport: The local port to initialize the exchange layer for
+ */
+int fc_lport_init(struct fc_lport *lport)
+{
+ if (!lport->tt.lport_recv)
+ lport->tt.lport_recv = fc_lport_recv_req;
+
+ if (!lport->tt.lport_reset)
+ lport->tt.lport_reset = fc_lport_reset;
+
+ fc_host_port_type(lport->host) = FC_PORTTYPE_NPORT;
+ fc_host_node_name(lport->host) = lport->wwnn;
+ fc_host_port_name(lport->host) = lport->wwpn;
+ fc_host_supported_classes(lport->host) = FC_COS_CLASS3;
+ memset(fc_host_supported_fc4s(lport->host), 0,
+ sizeof(fc_host_supported_fc4s(lport->host)));
+ fc_host_supported_fc4s(lport->host)[2] = 1;
+ fc_host_supported_fc4s(lport->host)[7] = 1;
+
+ /* This value is also unchanging */
+ memset(fc_host_active_fc4s(lport->host), 0,
+ sizeof(fc_host_active_fc4s(lport->host)));
+ fc_host_active_fc4s(lport->host)[2] = 1;
+ fc_host_active_fc4s(lport->host)[7] = 1;
+ fc_host_maxframe_size(lport->host) = lport->mfs;
+ fc_host_supported_speeds(lport->host) = 0;
+ if (lport->link_supported_speeds & FC_PORTSPEED_1GBIT)
+ fc_host_supported_speeds(lport->host) |= FC_PORTSPEED_1GBIT;
+ if (lport->link_supported_speeds & FC_PORTSPEED_10GBIT)
+ fc_host_supported_speeds(lport->host) |= FC_PORTSPEED_10GBIT;
+ fc_fc4_add_lport(lport);
+
+ return 0;
+}
+EXPORT_SYMBOL(fc_lport_init);
+
+/**
+ * fc_lport_bsg_resp() - The common response handler for FC Passthrough requests
+ * @sp: The sequence for the FC Passthrough response
+ * @fp: The response frame
+ * @info_arg: The BSG info that the response is for
+ */
+static void fc_lport_bsg_resp(struct fc_seq *sp, struct fc_frame *fp,
+ void *info_arg)
+{
+ struct fc_bsg_info *info = info_arg;
+ struct fc_bsg_job *job = info->job;
+ struct fc_lport *lport = info->lport;
+ struct fc_frame_header *fh;
+ size_t len;
+ void *buf;
+
+ if (IS_ERR(fp)) {
+ job->reply->result = (PTR_ERR(fp) == -FC_EX_CLOSED) ?
+ -ECONNABORTED : -ETIMEDOUT;
+ job->reply_len = sizeof(uint32_t);
+ job->state_flags |= FC_RQST_STATE_DONE;
+ job->job_done(job);
+ kfree(info);
+ return;
+ }
+
+ mutex_lock(&lport->lp_mutex);
+ fh = fc_frame_header_get(fp);
+ len = fr_len(fp) - sizeof(*fh);
+ buf = fc_frame_payload_get(fp, 0);
+
+ if (fr_sof(fp) == FC_SOF_I3 && !ntohs(fh->fh_seq_cnt)) {
+ /* Get the response code from the first frame payload */
+ unsigned short cmd = (info->rsp_code == FC_FS_ACC) ?
+ ntohs(((struct fc_ct_hdr *)buf)->ct_cmd) :
+ (unsigned short)fc_frame_payload_op(fp);
+
+ /* Save the reply status of the job */
+ job->reply->reply_data.ctels_reply.status =
+ (cmd == info->rsp_code) ?
+ FC_CTELS_STATUS_OK : FC_CTELS_STATUS_REJECT;
+ }
+
+ job->reply->reply_payload_rcv_len +=
+ fc_copy_buffer_to_sglist(buf, len, info->sg, &info->nents,
+ &info->offset, NULL);
+
+ if (fr_eof(fp) == FC_EOF_T &&
+ (ntoh24(fh->fh_f_ctl) & (FC_FC_LAST_SEQ | FC_FC_END_SEQ)) ==
+ (FC_FC_LAST_SEQ | FC_FC_END_SEQ)) {
+ if (job->reply->reply_payload_rcv_len >
+ job->reply_payload.payload_len)
+ job->reply->reply_payload_rcv_len =
+ job->reply_payload.payload_len;
+ job->reply->result = 0;
+ job->state_flags |= FC_RQST_STATE_DONE;
+ job->job_done(job);
+ kfree(info);
+ }
+ fc_frame_free(fp);
+ mutex_unlock(&lport->lp_mutex);
+}
+
+/**
+ * fc_lport_els_request() - Send ELS passthrough request
+ * @job: The BSG Passthrough job
+ * @lport: The local port sending the request
+ * @did: The destination port id
+ *
+ * Locking Note: The lport lock is expected to be held before calling
+ * this routine.
+ */
+static int fc_lport_els_request(struct fc_bsg_job *job,
+ struct fc_lport *lport,
+ u32 did, u32 tov)
+{
+ struct fc_bsg_info *info;
+ struct fc_frame *fp;
+ struct fc_frame_header *fh;
+ char *pp;
+ int len;
+
+ fp = fc_frame_alloc(lport, job->request_payload.payload_len);
+ if (!fp)
+ return -ENOMEM;
+
+ len = job->request_payload.payload_len;
+ pp = fc_frame_payload_get(fp, len);
+
+ sg_copy_to_buffer(job->request_payload.sg_list,
+ job->request_payload.sg_cnt,
+ pp, len);
+
+ fh = fc_frame_header_get(fp);
+ fh->fh_r_ctl = FC_RCTL_ELS_REQ;
+ hton24(fh->fh_d_id, did);
+ hton24(fh->fh_s_id, lport->port_id);
+ fh->fh_type = FC_TYPE_ELS;
+ hton24(fh->fh_f_ctl, FC_FCTL_REQ);
+ fh->fh_cs_ctl = 0;
+ fh->fh_df_ctl = 0;
+ fh->fh_parm_offset = 0;
+
+ info = kzalloc(sizeof(struct fc_bsg_info), GFP_KERNEL);
+ if (!info) {
+ fc_frame_free(fp);
+ return -ENOMEM;
+ }
+
+ info->job = job;
+ info->lport = lport;
+ info->rsp_code = ELS_LS_ACC;
+ info->nents = job->reply_payload.sg_cnt;
+ info->sg = job->reply_payload.sg_list;
+
+ if (!lport->tt.exch_seq_send(lport, fp, fc_lport_bsg_resp,
+ NULL, info, tov)) {
+ kfree(info);
+ return -ECOMM;
+ }
+ return 0;
+}
+
+/**
+ * fc_lport_ct_request() - Send CT Passthrough request
+ * @job: The BSG Passthrough job
+ * @lport: The local port sending the request
+ * @did: The destination FC-ID
+ * @tov: The timeout period to wait for the response
+ *
+ * Locking Note: The lport lock is expected to be held before calling
+ * this routine.
+ */
+static int fc_lport_ct_request(struct fc_bsg_job *job,
+ struct fc_lport *lport, u32 did, u32 tov)
+{
+ struct fc_bsg_info *info;
+ struct fc_frame *fp;
+ struct fc_frame_header *fh;
+ struct fc_ct_req *ct;
+ size_t len;
+
+ fp = fc_frame_alloc(lport, sizeof(struct fc_ct_hdr) +
+ job->request_payload.payload_len);
+ if (!fp)
+ return -ENOMEM;
+
+ len = job->request_payload.payload_len;
+ ct = fc_frame_payload_get(fp, len);
+
+ sg_copy_to_buffer(job->request_payload.sg_list,
+ job->request_payload.sg_cnt,
+ ct, len);
+
+ fh = fc_frame_header_get(fp);
+ fh->fh_r_ctl = FC_RCTL_DD_UNSOL_CTL;
+ hton24(fh->fh_d_id, did);
+ hton24(fh->fh_s_id, lport->port_id);
+ fh->fh_type = FC_TYPE_CT;
+ hton24(fh->fh_f_ctl, FC_FCTL_REQ);
+ fh->fh_cs_ctl = 0;
+ fh->fh_df_ctl = 0;
+ fh->fh_parm_offset = 0;
+
+ info = kzalloc(sizeof(struct fc_bsg_info), GFP_KERNEL);
+ if (!info) {
+ fc_frame_free(fp);
+ return -ENOMEM;
+ }
+
+ info->job = job;
+ info->lport = lport;
+ info->rsp_code = FC_FS_ACC;
+ info->nents = job->reply_payload.sg_cnt;
+ info->sg = job->reply_payload.sg_list;
+
+ if (!lport->tt.exch_seq_send(lport, fp, fc_lport_bsg_resp,
+ NULL, info, tov)) {
+ kfree(info);
+ return -ECOMM;
+ }
+ return 0;
+}
+
+/**
+ * fc_lport_bsg_request() - The common entry point for sending
+ * FC Passthrough requests
+ * @job: The BSG passthrough job
+ */
+int fc_lport_bsg_request(struct fc_bsg_job *job)
+{
+ struct request *rsp = job->req->next_rq;
+ struct Scsi_Host *shost = job->shost;
+ struct fc_lport *lport = shost_priv(shost);
+ struct fc_rport *rport;
+ struct fc_rport_priv *rdata;
+ int rc = -EINVAL;
+ u32 did;
+
+ job->reply->reply_payload_rcv_len = 0;
+ if (rsp)
+ rsp->resid_len = job->reply_payload.payload_len;
+
+ mutex_lock(&lport->lp_mutex);
+
+ switch (job->request->msgcode) {
+ case FC_BSG_RPT_ELS:
+ rport = job->rport;
+ if (!rport)
+ break;
+
+ rdata = rport->dd_data;
+ rc = fc_lport_els_request(job, lport, rport->port_id,
+ rdata->e_d_tov);
+ break;
+
+ case FC_BSG_RPT_CT:
+ rport = job->rport;
+ if (!rport)
+ break;
+
+ rdata = rport->dd_data;
+ rc = fc_lport_ct_request(job, lport, rport->port_id,
+ rdata->e_d_tov);
+ break;
+
+ case FC_BSG_HST_CT:
+ did = ntoh24(job->request->rqst_data.h_ct.port_id);
+ if (did == FC_FID_DIR_SERV)
+ rdata = lport->dns_rdata;
+ else
+ rdata = lport->tt.rport_lookup(lport, did);
+
+ if (!rdata)
+ break;
+
+ rc = fc_lport_ct_request(job, lport, did, rdata->e_d_tov);
+ break;
+
+ case FC_BSG_HST_ELS_NOLOGIN:
+ did = ntoh24(job->request->rqst_data.h_els.port_id);
+ rc = fc_lport_els_request(job, lport, did, lport->e_d_tov);
+ break;
+ }
+
+ mutex_unlock(&lport->lp_mutex);
+ return rc;
+}
+EXPORT_SYMBOL(fc_lport_bsg_request);
diff --git a/drivers/scsi/libfc/fc_npiv.c b/drivers/scsi/libfc/fc_npiv.c
new file mode 100644
index 000000000..9fbf78ed8
--- /dev/null
+++ b/drivers/scsi/libfc/fc_npiv.c
@@ -0,0 +1,159 @@
+/*
+ * Copyright(c) 2009 Intel Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Maintained at www.Open-FCoE.org
+ */
+
+/*
+ * NPIV VN_Port helper functions for libfc
+ */
+
+#include <scsi/libfc.h>
+#include <linux/export.h>
+
+/**
+ * fc_vport_create() - Create a new NPIV vport instance
+ * @vport: fc_vport structure from scsi_transport_fc
+ * @privsize: driver private data size to allocate along with the Scsi_Host
+ */
+
+struct fc_lport *libfc_vport_create(struct fc_vport *vport, int privsize)
+{
+ struct Scsi_Host *shost = vport_to_shost(vport);
+ struct fc_lport *n_port = shost_priv(shost);
+ struct fc_lport *vn_port;
+
+ vn_port = libfc_host_alloc(shost->hostt, privsize);
+ if (!vn_port)
+ return vn_port;
+
+ vn_port->vport = vport;
+ vport->dd_data = vn_port;
+
+ mutex_lock(&n_port->lp_mutex);
+ list_add_tail(&vn_port->list, &n_port->vports);
+ mutex_unlock(&n_port->lp_mutex);
+
+ return vn_port;
+}
+EXPORT_SYMBOL(libfc_vport_create);
+
+/**
+ * fc_vport_id_lookup() - find NPIV lport that matches a given fabric ID
+ * @n_port: Top level N_Port which may have multiple NPIV VN_Ports
+ * @port_id: Fabric ID to find a match for
+ *
+ * Returns: matching lport pointer or NULL if there is no match
+ */
+struct fc_lport *fc_vport_id_lookup(struct fc_lport *n_port, u32 port_id)
+{
+ struct fc_lport *lport = NULL;
+ struct fc_lport *vn_port;
+
+ if (n_port->port_id == port_id)
+ return n_port;
+
+ if (port_id == FC_FID_FLOGI)
+ return n_port; /* for point-to-point */
+
+ mutex_lock(&n_port->lp_mutex);
+ list_for_each_entry(vn_port, &n_port->vports, list) {
+ if (vn_port->port_id == port_id) {
+ lport = vn_port;
+ break;
+ }
+ }
+ mutex_unlock(&n_port->lp_mutex);
+
+ return lport;
+}
+EXPORT_SYMBOL(fc_vport_id_lookup);
+
+/*
+ * When setting the link state of vports during an lport state change, it's
+ * necessary to hold the lp_mutex of both the N_Port and the VN_Port.
+ * This tells the lockdep engine to treat the nested locking of the VN_Port
+ * as a different lock class.
+ */
+enum libfc_lport_mutex_class {
+ LPORT_MUTEX_NORMAL = 0,
+ LPORT_MUTEX_VN_PORT = 1,
+};
+
+/**
+ * __fc_vport_setlink() - update link and status on a VN_Port
+ * @n_port: parent N_Port
+ * @vn_port: VN_Port to update
+ *
+ * Locking: must be called with both the N_Port and VN_Port lp_mutex held
+ */
+static void __fc_vport_setlink(struct fc_lport *n_port,
+ struct fc_lport *vn_port)
+{
+ struct fc_vport *vport = vn_port->vport;
+
+ if (vn_port->state == LPORT_ST_DISABLED)
+ return;
+
+ if (n_port->state == LPORT_ST_READY) {
+ if (n_port->npiv_enabled) {
+ fc_vport_set_state(vport, FC_VPORT_INITIALIZING);
+ __fc_linkup(vn_port);
+ } else {
+ fc_vport_set_state(vport, FC_VPORT_NO_FABRIC_SUPP);
+ __fc_linkdown(vn_port);
+ }
+ } else {
+ fc_vport_set_state(vport, FC_VPORT_LINKDOWN);
+ __fc_linkdown(vn_port);
+ }
+}
+
+/**
+ * fc_vport_setlink() - update link and status on a VN_Port
+ * @vn_port: virtual port to update
+ */
+void fc_vport_setlink(struct fc_lport *vn_port)
+{
+ struct fc_vport *vport = vn_port->vport;
+ struct Scsi_Host *shost = vport_to_shost(vport);
+ struct fc_lport *n_port = shost_priv(shost);
+
+ mutex_lock(&n_port->lp_mutex);
+ mutex_lock_nested(&vn_port->lp_mutex, LPORT_MUTEX_VN_PORT);
+ __fc_vport_setlink(n_port, vn_port);
+ mutex_unlock(&vn_port->lp_mutex);
+ mutex_unlock(&n_port->lp_mutex);
+}
+EXPORT_SYMBOL(fc_vport_setlink);
+
+/**
+ * fc_vports_linkchange() - change the link state of all vports
+ * @n_port: Parent N_Port that has changed state
+ *
+ * Locking: called with the n_port lp_mutex held
+ */
+void fc_vports_linkchange(struct fc_lport *n_port)
+{
+ struct fc_lport *vn_port;
+
+ list_for_each_entry(vn_port, &n_port->vports, list) {
+ mutex_lock_nested(&vn_port->lp_mutex, LPORT_MUTEX_VN_PORT);
+ __fc_vport_setlink(n_port, vn_port);
+ mutex_unlock(&vn_port->lp_mutex);
+ }
+}
+
diff --git a/drivers/scsi/libfc/fc_rport.c b/drivers/scsi/libfc/fc_rport.c
new file mode 100644
index 000000000..589ff9aed
--- /dev/null
+++ b/drivers/scsi/libfc/fc_rport.c
@@ -0,0 +1,2069 @@
+/*
+ * Copyright(c) 2007 - 2008 Intel Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Maintained at www.Open-FCoE.org
+ */
+
+/*
+ * RPORT GENERAL INFO
+ *
+ * This file contains all processing regarding fc_rports. It contains the
+ * rport state machine and does all rport interaction with the transport class.
+ * There should be no other places in libfc that interact directly with the
+ * transport class in regards to adding and deleting rports.
+ *
+ * fc_rport's represent N_Port's within the fabric.
+ */
+
+/*
+ * RPORT LOCKING
+ *
+ * The rport should never hold the rport mutex and then attempt to acquire
+ * either the lport or disc mutexes. The rport's mutex is considered lesser
+ * than both the lport's mutex and the disc mutex. Refer to fc_lport.c for
+ * more comments on the hierarchy.
+ *
+ * The locking strategy is similar to the lport's strategy. The lock protects
+ * the rport's states and is held and released by the entry points to the rport
+ * block. All _enter_* functions correspond to rport states and expect the rport
+ * mutex to be locked before calling them. This means that rports only handle
+ * one request or response at a time, since they're not critical for the I/O
+ * path this potential over-use of the mutex is acceptable.
+ */
+
+#include <linux/kernel.h>
+#include <linux/spinlock.h>
+#include <linux/interrupt.h>
+#include <linux/slab.h>
+#include <linux/rcupdate.h>
+#include <linux/timer.h>
+#include <linux/workqueue.h>
+#include <linux/export.h>
+#include <asm/unaligned.h>
+
+#include <scsi/libfc.h>
+#include <scsi/fc_encode.h>
+
+#include "fc_libfc.h"
+
+static struct workqueue_struct *rport_event_queue;
+
+static void fc_rport_enter_flogi(struct fc_rport_priv *);
+static void fc_rport_enter_plogi(struct fc_rport_priv *);
+static void fc_rport_enter_prli(struct fc_rport_priv *);
+static void fc_rport_enter_rtv(struct fc_rport_priv *);
+static void fc_rport_enter_ready(struct fc_rport_priv *);
+static void fc_rport_enter_logo(struct fc_rport_priv *);
+static void fc_rport_enter_adisc(struct fc_rport_priv *);
+
+static void fc_rport_recv_plogi_req(struct fc_lport *, struct fc_frame *);
+static void fc_rport_recv_prli_req(struct fc_rport_priv *, struct fc_frame *);
+static void fc_rport_recv_prlo_req(struct fc_rport_priv *, struct fc_frame *);
+static void fc_rport_recv_logo_req(struct fc_lport *, struct fc_frame *);
+static void fc_rport_timeout(struct work_struct *);
+static void fc_rport_error(struct fc_rport_priv *, struct fc_frame *);
+static void fc_rport_error_retry(struct fc_rport_priv *, struct fc_frame *);
+static void fc_rport_work(struct work_struct *);
+
+static const char *fc_rport_state_names[] = {
+ [RPORT_ST_INIT] = "Init",
+ [RPORT_ST_FLOGI] = "FLOGI",
+ [RPORT_ST_PLOGI_WAIT] = "PLOGI_WAIT",
+ [RPORT_ST_PLOGI] = "PLOGI",
+ [RPORT_ST_PRLI] = "PRLI",
+ [RPORT_ST_RTV] = "RTV",
+ [RPORT_ST_READY] = "Ready",
+ [RPORT_ST_ADISC] = "ADISC",
+ [RPORT_ST_DELETE] = "Delete",
+};
+
+/**
+ * fc_rport_lookup() - Lookup a remote port by port_id
+ * @lport: The local port to lookup the remote port on
+ * @port_id: The remote port ID to look up
+ *
+ * The caller must hold either disc_mutex or rcu_read_lock().
+ */
+static struct fc_rport_priv *fc_rport_lookup(const struct fc_lport *lport,
+ u32 port_id)
+{
+ struct fc_rport_priv *rdata;
+
+ list_for_each_entry_rcu(rdata, &lport->disc.rports, peers)
+ if (rdata->ids.port_id == port_id)
+ return rdata;
+ return NULL;
+}
+
+/**
+ * fc_rport_create() - Create a new remote port
+ * @lport: The local port this remote port will be associated with
+ * @ids: The identifiers for the new remote port
+ *
+ * The remote port will start in the INIT state.
+ *
+ * Locking note: must be called with the disc_mutex held.
+ */
+static struct fc_rport_priv *fc_rport_create(struct fc_lport *lport,
+ u32 port_id)
+{
+ struct fc_rport_priv *rdata;
+
+ rdata = lport->tt.rport_lookup(lport, port_id);
+ if (rdata)
+ return rdata;
+
+ rdata = kzalloc(sizeof(*rdata) + lport->rport_priv_size, GFP_KERNEL);
+ if (!rdata)
+ return NULL;
+
+ rdata->ids.node_name = -1;
+ rdata->ids.port_name = -1;
+ rdata->ids.port_id = port_id;
+ rdata->ids.roles = FC_RPORT_ROLE_UNKNOWN;
+
+ kref_init(&rdata->kref);
+ mutex_init(&rdata->rp_mutex);
+ rdata->local_port = lport;
+ rdata->rp_state = RPORT_ST_INIT;
+ rdata->event = RPORT_EV_NONE;
+ rdata->flags = FC_RP_FLAGS_REC_SUPPORTED;
+ rdata->e_d_tov = lport->e_d_tov;
+ rdata->r_a_tov = lport->r_a_tov;
+ rdata->maxframe_size = FC_MIN_MAX_PAYLOAD;
+ INIT_DELAYED_WORK(&rdata->retry_work, fc_rport_timeout);
+ INIT_WORK(&rdata->event_work, fc_rport_work);
+ if (port_id != FC_FID_DIR_SERV) {
+ rdata->lld_event_callback = lport->tt.rport_event_callback;
+ list_add_rcu(&rdata->peers, &lport->disc.rports);
+ }
+ return rdata;
+}
+
+/**
+ * fc_rport_destroy() - Free a remote port after last reference is released
+ * @kref: The remote port's kref
+ */
+static void fc_rport_destroy(struct kref *kref)
+{
+ struct fc_rport_priv *rdata;
+
+ rdata = container_of(kref, struct fc_rport_priv, kref);
+ kfree_rcu(rdata, rcu);
+}
+
+/**
+ * fc_rport_state() - Return a string identifying the remote port's state
+ * @rdata: The remote port
+ */
+static const char *fc_rport_state(struct fc_rport_priv *rdata)
+{
+ const char *cp;
+
+ cp = fc_rport_state_names[rdata->rp_state];
+ if (!cp)
+ cp = "Unknown";
+ return cp;
+}
+
+/**
+ * fc_set_rport_loss_tmo() - Set the remote port loss timeout
+ * @rport: The remote port that gets a new timeout value
+ * @timeout: The new timeout value (in seconds)
+ */
+void fc_set_rport_loss_tmo(struct fc_rport *rport, u32 timeout)
+{
+ if (timeout)
+ rport->dev_loss_tmo = timeout;
+ else
+ rport->dev_loss_tmo = 1;
+}
+EXPORT_SYMBOL(fc_set_rport_loss_tmo);
+
+/**
+ * fc_plogi_get_maxframe() - Get the maximum payload from the common service
+ * parameters in a FLOGI frame
+ * @flp: The FLOGI or PLOGI payload
+ * @maxval: The maximum frame size upper limit; this may be less than what
+ * is in the service parameters
+ */
+static unsigned int fc_plogi_get_maxframe(struct fc_els_flogi *flp,
+ unsigned int maxval)
+{
+ unsigned int mfs;
+
+ /*
+ * Get max payload from the common service parameters and the
+ * class 3 receive data field size.
+ */
+ mfs = ntohs(flp->fl_csp.sp_bb_data) & FC_SP_BB_DATA_MASK;
+ if (mfs >= FC_SP_MIN_MAX_PAYLOAD && mfs < maxval)
+ maxval = mfs;
+ mfs = ntohs(flp->fl_cssp[3 - 1].cp_rdfs);
+ if (mfs >= FC_SP_MIN_MAX_PAYLOAD && mfs < maxval)
+ maxval = mfs;
+ return maxval;
+}
+
+/**
+ * fc_rport_state_enter() - Change the state of a remote port
+ * @rdata: The remote port whose state should change
+ * @new: The new state
+ *
+ * Locking Note: Called with the rport lock held
+ */
+static void fc_rport_state_enter(struct fc_rport_priv *rdata,
+ enum fc_rport_state new)
+{
+ if (rdata->rp_state != new)
+ rdata->retries = 0;
+ rdata->rp_state = new;
+}
+
+/**
+ * fc_rport_work() - Handler for remote port events in the rport_event_queue
+ * @work: Handle to the remote port being dequeued
+ */
+static void fc_rport_work(struct work_struct *work)
+{
+ u32 port_id;
+ struct fc_rport_priv *rdata =
+ container_of(work, struct fc_rport_priv, event_work);
+ struct fc_rport_libfc_priv *rpriv;
+ enum fc_rport_event event;
+ struct fc_lport *lport = rdata->local_port;
+ struct fc_rport_operations *rport_ops;
+ struct fc_rport_identifiers ids;
+ struct fc_rport *rport;
+ struct fc4_prov *prov;
+ u8 type;
+
+ mutex_lock(&rdata->rp_mutex);
+ event = rdata->event;
+ rport_ops = rdata->ops;
+ rport = rdata->rport;
+
+ FC_RPORT_DBG(rdata, "work event %u\n", event);
+
+ switch (event) {
+ case RPORT_EV_READY:
+ ids = rdata->ids;
+ rdata->event = RPORT_EV_NONE;
+ rdata->major_retries = 0;
+ kref_get(&rdata->kref);
+ mutex_unlock(&rdata->rp_mutex);
+
+ if (!rport)
+ rport = fc_remote_port_add(lport->host, 0, &ids);
+ if (!rport) {
+ FC_RPORT_DBG(rdata, "Failed to add the rport\n");
+ lport->tt.rport_logoff(rdata);
+ kref_put(&rdata->kref, lport->tt.rport_destroy);
+ return;
+ }
+ mutex_lock(&rdata->rp_mutex);
+ if (rdata->rport)
+ FC_RPORT_DBG(rdata, "rport already allocated\n");
+ rdata->rport = rport;
+ rport->maxframe_size = rdata->maxframe_size;
+ rport->supported_classes = rdata->supported_classes;
+
+ rpriv = rport->dd_data;
+ rpriv->local_port = lport;
+ rpriv->rp_state = rdata->rp_state;
+ rpriv->flags = rdata->flags;
+ rpriv->e_d_tov = rdata->e_d_tov;
+ rpriv->r_a_tov = rdata->r_a_tov;
+ mutex_unlock(&rdata->rp_mutex);
+
+ if (rport_ops && rport_ops->event_callback) {
+ FC_RPORT_DBG(rdata, "callback ev %d\n", event);
+ rport_ops->event_callback(lport, rdata, event);
+ }
+ if (rdata->lld_event_callback) {
+ FC_RPORT_DBG(rdata, "lld callback ev %d\n", event);
+ rdata->lld_event_callback(lport, rdata, event);
+ }
+ kref_put(&rdata->kref, lport->tt.rport_destroy);
+ break;
+
+ case RPORT_EV_FAILED:
+ case RPORT_EV_LOGO:
+ case RPORT_EV_STOP:
+ if (rdata->prli_count) {
+ mutex_lock(&fc_prov_mutex);
+ for (type = 1; type < FC_FC4_PROV_SIZE; type++) {
+ prov = fc_passive_prov[type];
+ if (prov && prov->prlo)
+ prov->prlo(rdata);
+ }
+ mutex_unlock(&fc_prov_mutex);
+ }
+ port_id = rdata->ids.port_id;
+ mutex_unlock(&rdata->rp_mutex);
+
+ if (rport_ops && rport_ops->event_callback) {
+ FC_RPORT_DBG(rdata, "callback ev %d\n", event);
+ rport_ops->event_callback(lport, rdata, event);
+ }
+ if (rdata->lld_event_callback) {
+ FC_RPORT_DBG(rdata, "lld callback ev %d\n", event);
+ rdata->lld_event_callback(lport, rdata, event);
+ }
+ cancel_delayed_work_sync(&rdata->retry_work);
+
+ /*
+ * Reset any outstanding exchanges before freeing rport.
+ */
+ lport->tt.exch_mgr_reset(lport, 0, port_id);
+ lport->tt.exch_mgr_reset(lport, port_id, 0);
+
+ if (rport) {
+ rpriv = rport->dd_data;
+ rpriv->rp_state = RPORT_ST_DELETE;
+ mutex_lock(&rdata->rp_mutex);
+ rdata->rport = NULL;
+ mutex_unlock(&rdata->rp_mutex);
+ fc_remote_port_delete(rport);
+ }
+
+ mutex_lock(&lport->disc.disc_mutex);
+ mutex_lock(&rdata->rp_mutex);
+ if (rdata->rp_state == RPORT_ST_DELETE) {
+ if (port_id == FC_FID_DIR_SERV) {
+ rdata->event = RPORT_EV_NONE;
+ mutex_unlock(&rdata->rp_mutex);
+ kref_put(&rdata->kref, lport->tt.rport_destroy);
+ } else if ((rdata->flags & FC_RP_STARTED) &&
+ rdata->major_retries <
+ lport->max_rport_retry_count) {
+ rdata->major_retries++;
+ rdata->event = RPORT_EV_NONE;
+ FC_RPORT_DBG(rdata, "work restart\n");
+ fc_rport_enter_flogi(rdata);
+ mutex_unlock(&rdata->rp_mutex);
+ } else {
+ FC_RPORT_DBG(rdata, "work delete\n");
+ list_del_rcu(&rdata->peers);
+ mutex_unlock(&rdata->rp_mutex);
+ kref_put(&rdata->kref, lport->tt.rport_destroy);
+ }
+ } else {
+ /*
+ * Re-open for events. Reissue READY event if ready.
+ */
+ rdata->event = RPORT_EV_NONE;
+ if (rdata->rp_state == RPORT_ST_READY)
+ fc_rport_enter_ready(rdata);
+ mutex_unlock(&rdata->rp_mutex);
+ }
+ mutex_unlock(&lport->disc.disc_mutex);
+ break;
+
+ default:
+ mutex_unlock(&rdata->rp_mutex);
+ break;
+ }
+}
+
+/**
+ * fc_rport_login() - Start the remote port login state machine
+ * @rdata: The remote port to be logged in to
+ *
+ * Locking Note: Called without the rport lock held. This
+ * function will hold the rport lock, call an _enter_*
+ * function and then unlock the rport.
+ *
+ * This indicates the intent to be logged into the remote port.
+ * If it appears we are already logged in, ADISC is used to verify
+ * the setup.
+ */
+static int fc_rport_login(struct fc_rport_priv *rdata)
+{
+ mutex_lock(&rdata->rp_mutex);
+
+ rdata->flags |= FC_RP_STARTED;
+ switch (rdata->rp_state) {
+ case RPORT_ST_READY:
+ FC_RPORT_DBG(rdata, "ADISC port\n");
+ fc_rport_enter_adisc(rdata);
+ break;
+ case RPORT_ST_DELETE:
+ FC_RPORT_DBG(rdata, "Restart deleted port\n");
+ break;
+ default:
+ FC_RPORT_DBG(rdata, "Login to port\n");
+ fc_rport_enter_flogi(rdata);
+ break;
+ }
+ mutex_unlock(&rdata->rp_mutex);
+
+ return 0;
+}
+
+/**
+ * fc_rport_enter_delete() - Schedule a remote port to be deleted
+ * @rdata: The remote port to be deleted
+ * @event: The event to report as the reason for deletion
+ *
+ * Locking Note: Called with the rport lock held.
+ *
+ * Allow state change into DELETE only once.
+ *
+ * Call queue_work only if there's no event already pending.
+ * Set the new event so that the old pending event will not occur.
+ * Since we have the mutex, even if fc_rport_work() is already started,
+ * it'll see the new event.
+ */
+static void fc_rport_enter_delete(struct fc_rport_priv *rdata,
+ enum fc_rport_event event)
+{
+ if (rdata->rp_state == RPORT_ST_DELETE)
+ return;
+
+ FC_RPORT_DBG(rdata, "Delete port\n");
+
+ fc_rport_state_enter(rdata, RPORT_ST_DELETE);
+
+ if (rdata->event == RPORT_EV_NONE)
+ queue_work(rport_event_queue, &rdata->event_work);
+ rdata->event = event;
+}
+
+/**
+ * fc_rport_logoff() - Logoff and remove a remote port
+ * @rdata: The remote port to be logged off of
+ *
+ * Locking Note: Called without the rport lock held. This
+ * function will hold the rport lock, call an _enter_*
+ * function and then unlock the rport.
+ */
+static int fc_rport_logoff(struct fc_rport_priv *rdata)
+{
+ mutex_lock(&rdata->rp_mutex);
+
+ FC_RPORT_DBG(rdata, "Remove port\n");
+
+ rdata->flags &= ~FC_RP_STARTED;
+ if (rdata->rp_state == RPORT_ST_DELETE) {
+ FC_RPORT_DBG(rdata, "Port in Delete state, not removing\n");
+ goto out;
+ }
+ fc_rport_enter_logo(rdata);
+
+ /*
+ * Change the state to Delete so that we discard
+ * the response.
+ */
+ fc_rport_enter_delete(rdata, RPORT_EV_STOP);
+out:
+ mutex_unlock(&rdata->rp_mutex);
+ return 0;
+}
+
+/**
+ * fc_rport_enter_ready() - Transition to the RPORT_ST_READY state
+ * @rdata: The remote port that is ready
+ *
+ * Locking Note: The rport lock is expected to be held before calling
+ * this routine.
+ */
+static void fc_rport_enter_ready(struct fc_rport_priv *rdata)
+{
+ fc_rport_state_enter(rdata, RPORT_ST_READY);
+
+ FC_RPORT_DBG(rdata, "Port is Ready\n");
+
+ if (rdata->event == RPORT_EV_NONE)
+ queue_work(rport_event_queue, &rdata->event_work);
+ rdata->event = RPORT_EV_READY;
+}
+
+/**
+ * fc_rport_timeout() - Handler for the retry_work timer
+ * @work: Handle to the remote port that has timed out
+ *
+ * Locking Note: Called without the rport lock held. This
+ * function will hold the rport lock, call an _enter_*
+ * function and then unlock the rport.
+ */
+static void fc_rport_timeout(struct work_struct *work)
+{
+ struct fc_rport_priv *rdata =
+ container_of(work, struct fc_rport_priv, retry_work.work);
+
+ mutex_lock(&rdata->rp_mutex);
+
+ switch (rdata->rp_state) {
+ case RPORT_ST_FLOGI:
+ fc_rport_enter_flogi(rdata);
+ break;
+ case RPORT_ST_PLOGI:
+ fc_rport_enter_plogi(rdata);
+ break;
+ case RPORT_ST_PRLI:
+ fc_rport_enter_prli(rdata);
+ break;
+ case RPORT_ST_RTV:
+ fc_rport_enter_rtv(rdata);
+ break;
+ case RPORT_ST_ADISC:
+ fc_rport_enter_adisc(rdata);
+ break;
+ case RPORT_ST_PLOGI_WAIT:
+ case RPORT_ST_READY:
+ case RPORT_ST_INIT:
+ case RPORT_ST_DELETE:
+ break;
+ }
+
+ mutex_unlock(&rdata->rp_mutex);
+}
+
+/**
+ * fc_rport_error() - Error handler, called once retries have been exhausted
+ * @rdata: The remote port the error is happened on
+ * @fp: The error code encapsulated in a frame pointer
+ *
+ * Locking Note: The rport lock is expected to be held before
+ * calling this routine
+ */
+static void fc_rport_error(struct fc_rport_priv *rdata, struct fc_frame *fp)
+{
+ FC_RPORT_DBG(rdata, "Error %ld in state %s, retries %d\n",
+ IS_ERR(fp) ? -PTR_ERR(fp) : 0,
+ fc_rport_state(rdata), rdata->retries);
+
+ switch (rdata->rp_state) {
+ case RPORT_ST_FLOGI:
+ case RPORT_ST_PLOGI:
+ rdata->flags &= ~FC_RP_STARTED;
+ fc_rport_enter_delete(rdata, RPORT_EV_FAILED);
+ break;
+ case RPORT_ST_RTV:
+ fc_rport_enter_ready(rdata);
+ break;
+ case RPORT_ST_PRLI:
+ case RPORT_ST_ADISC:
+ fc_rport_enter_logo(rdata);
+ break;
+ case RPORT_ST_PLOGI_WAIT:
+ case RPORT_ST_DELETE:
+ case RPORT_ST_READY:
+ case RPORT_ST_INIT:
+ break;
+ }
+}
+
+/**
+ * fc_rport_error_retry() - Handler for remote port state retries
+ * @rdata: The remote port whose state is to be retried
+ * @fp: The error code encapsulated in a frame pointer
+ *
+ * If the error was an exchange timeout retry immediately,
+ * otherwise wait for E_D_TOV.
+ *
+ * Locking Note: The rport lock is expected to be held before
+ * calling this routine
+ */
+static void fc_rport_error_retry(struct fc_rport_priv *rdata,
+ struct fc_frame *fp)
+{
+ unsigned long delay = msecs_to_jiffies(FC_DEF_E_D_TOV);
+
+ /* make sure this isn't an FC_EX_CLOSED error, never retry those */
+ if (PTR_ERR(fp) == -FC_EX_CLOSED)
+ goto out;
+
+ if (rdata->retries < rdata->local_port->max_rport_retry_count) {
+ FC_RPORT_DBG(rdata, "Error %ld in state %s, retrying\n",
+ PTR_ERR(fp), fc_rport_state(rdata));
+ rdata->retries++;
+ /* no additional delay on exchange timeouts */
+ if (PTR_ERR(fp) == -FC_EX_TIMEOUT)
+ delay = 0;
+ schedule_delayed_work(&rdata->retry_work, delay);
+ return;
+ }
+
+out:
+ fc_rport_error(rdata, fp);
+}
+
+/**
+ * fc_rport_login_complete() - Handle parameters and completion of p-mp login.
+ * @rdata: The remote port which we logged into or which logged into us.
+ * @fp: The FLOGI or PLOGI request or response frame
+ *
+ * Returns non-zero error if a problem is detected with the frame.
+ * Does not free the frame.
+ *
+ * This is only used in point-to-multipoint mode for FIP currently.
+ */
+static int fc_rport_login_complete(struct fc_rport_priv *rdata,
+ struct fc_frame *fp)
+{
+ struct fc_lport *lport = rdata->local_port;
+ struct fc_els_flogi *flogi;
+ unsigned int e_d_tov;
+ u16 csp_flags;
+
+ flogi = fc_frame_payload_get(fp, sizeof(*flogi));
+ if (!flogi)
+ return -EINVAL;
+
+ csp_flags = ntohs(flogi->fl_csp.sp_features);
+
+ if (fc_frame_payload_op(fp) == ELS_FLOGI) {
+ if (csp_flags & FC_SP_FT_FPORT) {
+ FC_RPORT_DBG(rdata, "Fabric bit set in FLOGI\n");
+ return -EINVAL;
+ }
+ } else {
+
+ /*
+ * E_D_TOV is not valid on an incoming FLOGI request.
+ */
+ e_d_tov = ntohl(flogi->fl_csp.sp_e_d_tov);
+ if (csp_flags & FC_SP_FT_EDTR)
+ e_d_tov /= 1000000;
+ if (e_d_tov > rdata->e_d_tov)
+ rdata->e_d_tov = e_d_tov;
+ }
+ rdata->maxframe_size = fc_plogi_get_maxframe(flogi, lport->mfs);
+ return 0;
+}
+
+/**
+ * fc_rport_flogi_resp() - Handle response to FLOGI request for p-mp mode
+ * @sp: The sequence that the FLOGI was on
+ * @fp: The FLOGI response frame
+ * @rp_arg: The remote port that received the FLOGI response
+ */
+static void fc_rport_flogi_resp(struct fc_seq *sp, struct fc_frame *fp,
+ void *rp_arg)
+{
+ struct fc_rport_priv *rdata = rp_arg;
+ struct fc_lport *lport = rdata->local_port;
+ struct fc_els_flogi *flogi;
+ unsigned int r_a_tov;
+
+ FC_RPORT_DBG(rdata, "Received a FLOGI %s\n", fc_els_resp_type(fp));
+
+ if (fp == ERR_PTR(-FC_EX_CLOSED))
+ goto put;
+
+ mutex_lock(&rdata->rp_mutex);
+
+ if (rdata->rp_state != RPORT_ST_FLOGI) {
+ FC_RPORT_DBG(rdata, "Received a FLOGI response, but in state "
+ "%s\n", fc_rport_state(rdata));
+ if (IS_ERR(fp))
+ goto err;
+ goto out;
+ }
+
+ if (IS_ERR(fp)) {
+ fc_rport_error(rdata, fp);
+ goto err;
+ }
+
+ if (fc_frame_payload_op(fp) != ELS_LS_ACC)
+ goto bad;
+ if (fc_rport_login_complete(rdata, fp))
+ goto bad;
+
+ flogi = fc_frame_payload_get(fp, sizeof(*flogi));
+ if (!flogi)
+ goto bad;
+ r_a_tov = ntohl(flogi->fl_csp.sp_r_a_tov);
+ if (r_a_tov > rdata->r_a_tov)
+ rdata->r_a_tov = r_a_tov;
+
+ if (rdata->ids.port_name < lport->wwpn)
+ fc_rport_enter_plogi(rdata);
+ else
+ fc_rport_state_enter(rdata, RPORT_ST_PLOGI_WAIT);
+out:
+ fc_frame_free(fp);
+err:
+ mutex_unlock(&rdata->rp_mutex);
+put:
+ kref_put(&rdata->kref, rdata->local_port->tt.rport_destroy);
+ return;
+bad:
+ FC_RPORT_DBG(rdata, "Bad FLOGI response\n");
+ fc_rport_error_retry(rdata, fp);
+ goto out;
+}
+
+/**
+ * fc_rport_enter_flogi() - Send a FLOGI request to the remote port for p-mp
+ * @rdata: The remote port to send a FLOGI to
+ *
+ * Locking Note: The rport lock is expected to be held before calling
+ * this routine.
+ */
+static void fc_rport_enter_flogi(struct fc_rport_priv *rdata)
+{
+ struct fc_lport *lport = rdata->local_port;
+ struct fc_frame *fp;
+
+ if (!lport->point_to_multipoint)
+ return fc_rport_enter_plogi(rdata);
+
+ FC_RPORT_DBG(rdata, "Entered FLOGI state from %s state\n",
+ fc_rport_state(rdata));
+
+ fc_rport_state_enter(rdata, RPORT_ST_FLOGI);
+
+ fp = fc_frame_alloc(lport, sizeof(struct fc_els_flogi));
+ if (!fp)
+ return fc_rport_error_retry(rdata, fp);
+
+ if (!lport->tt.elsct_send(lport, rdata->ids.port_id, fp, ELS_FLOGI,
+ fc_rport_flogi_resp, rdata,
+ 2 * lport->r_a_tov))
+ fc_rport_error_retry(rdata, NULL);
+ else
+ kref_get(&rdata->kref);
+}
+
+/**
+ * fc_rport_recv_flogi_req() - Handle Fabric Login (FLOGI) request in p-mp mode
+ * @lport: The local port that received the PLOGI request
+ * @rx_fp: The PLOGI request frame
+ */
+static void fc_rport_recv_flogi_req(struct fc_lport *lport,
+ struct fc_frame *rx_fp)
+{
+ struct fc_disc *disc;
+ struct fc_els_flogi *flp;
+ struct fc_rport_priv *rdata;
+ struct fc_frame *fp = rx_fp;
+ struct fc_seq_els_data rjt_data;
+ u32 sid;
+
+ sid = fc_frame_sid(fp);
+
+ FC_RPORT_ID_DBG(lport, sid, "Received FLOGI request\n");
+
+ disc = &lport->disc;
+ mutex_lock(&disc->disc_mutex);
+
+ if (!lport->point_to_multipoint) {
+ rjt_data.reason = ELS_RJT_UNSUP;
+ rjt_data.explan = ELS_EXPL_NONE;
+ goto reject;
+ }
+
+ flp = fc_frame_payload_get(fp, sizeof(*flp));
+ if (!flp) {
+ rjt_data.reason = ELS_RJT_LOGIC;
+ rjt_data.explan = ELS_EXPL_INV_LEN;
+ goto reject;
+ }
+
+ rdata = lport->tt.rport_lookup(lport, sid);
+ if (!rdata) {
+ rjt_data.reason = ELS_RJT_FIP;
+ rjt_data.explan = ELS_EXPL_NOT_NEIGHBOR;
+ goto reject;
+ }
+ mutex_lock(&rdata->rp_mutex);
+
+ FC_RPORT_DBG(rdata, "Received FLOGI in %s state\n",
+ fc_rport_state(rdata));
+
+ switch (rdata->rp_state) {
+ case RPORT_ST_INIT:
+ /*
+ * If received the FLOGI request on RPORT which is INIT state
+ * (means not transition to FLOGI either fc_rport timeout
+ * function didn;t trigger or this end hasn;t received
+ * beacon yet from other end. In that case only, allow RPORT
+ * state machine to continue, otherwise fall through which
+ * causes the code to send reject response.
+ * NOTE; Not checking for FIP->state such as VNMP_UP or
+ * VNMP_CLAIM because if FIP state is not one of those,
+ * RPORT wouldn;t have created and 'rport_lookup' would have
+ * failed anyway in that case.
+ */
+ if (lport->point_to_multipoint)
+ break;
+ case RPORT_ST_DELETE:
+ mutex_unlock(&rdata->rp_mutex);
+ rjt_data.reason = ELS_RJT_FIP;
+ rjt_data.explan = ELS_EXPL_NOT_NEIGHBOR;
+ goto reject;
+ case RPORT_ST_FLOGI:
+ case RPORT_ST_PLOGI_WAIT:
+ case RPORT_ST_PLOGI:
+ break;
+ case RPORT_ST_PRLI:
+ case RPORT_ST_RTV:
+ case RPORT_ST_READY:
+ case RPORT_ST_ADISC:
+ /*
+ * Set the remote port to be deleted and to then restart.
+ * This queues work to be sure exchanges are reset.
+ */
+ fc_rport_enter_delete(rdata, RPORT_EV_LOGO);
+ mutex_unlock(&rdata->rp_mutex);
+ rjt_data.reason = ELS_RJT_BUSY;
+ rjt_data.explan = ELS_EXPL_NONE;
+ goto reject;
+ }
+ if (fc_rport_login_complete(rdata, fp)) {
+ mutex_unlock(&rdata->rp_mutex);
+ rjt_data.reason = ELS_RJT_LOGIC;
+ rjt_data.explan = ELS_EXPL_NONE;
+ goto reject;
+ }
+
+ fp = fc_frame_alloc(lport, sizeof(*flp));
+ if (!fp)
+ goto out;
+
+ fc_flogi_fill(lport, fp);
+ flp = fc_frame_payload_get(fp, sizeof(*flp));
+ flp->fl_cmd = ELS_LS_ACC;
+
+ fc_fill_reply_hdr(fp, rx_fp, FC_RCTL_ELS_REP, 0);
+ lport->tt.frame_send(lport, fp);
+
+ if (rdata->ids.port_name < lport->wwpn)
+ fc_rport_enter_plogi(rdata);
+ else
+ fc_rport_state_enter(rdata, RPORT_ST_PLOGI_WAIT);
+out:
+ mutex_unlock(&rdata->rp_mutex);
+ mutex_unlock(&disc->disc_mutex);
+ fc_frame_free(rx_fp);
+ return;
+
+reject:
+ mutex_unlock(&disc->disc_mutex);
+ lport->tt.seq_els_rsp_send(rx_fp, ELS_LS_RJT, &rjt_data);
+ fc_frame_free(rx_fp);
+}
+
+/**
+ * fc_rport_plogi_resp() - Handler for ELS PLOGI responses
+ * @sp: The sequence the PLOGI is on
+ * @fp: The PLOGI response frame
+ * @rdata_arg: The remote port that sent the PLOGI response
+ *
+ * Locking Note: This function will be called without the rport lock
+ * held, but it will lock, call an _enter_* function or fc_rport_error
+ * and then unlock the rport.
+ */
+static void fc_rport_plogi_resp(struct fc_seq *sp, struct fc_frame *fp,
+ void *rdata_arg)
+{
+ struct fc_rport_priv *rdata = rdata_arg;
+ struct fc_lport *lport = rdata->local_port;
+ struct fc_els_flogi *plp = NULL;
+ u16 csp_seq;
+ u16 cssp_seq;
+ u8 op;
+
+ mutex_lock(&rdata->rp_mutex);
+
+ FC_RPORT_DBG(rdata, "Received a PLOGI %s\n", fc_els_resp_type(fp));
+
+ if (rdata->rp_state != RPORT_ST_PLOGI) {
+ FC_RPORT_DBG(rdata, "Received a PLOGI response, but in state "
+ "%s\n", fc_rport_state(rdata));
+ if (IS_ERR(fp))
+ goto err;
+ goto out;
+ }
+
+ if (IS_ERR(fp)) {
+ fc_rport_error_retry(rdata, fp);
+ goto err;
+ }
+
+ op = fc_frame_payload_op(fp);
+ if (op == ELS_LS_ACC &&
+ (plp = fc_frame_payload_get(fp, sizeof(*plp))) != NULL) {
+ rdata->ids.port_name = get_unaligned_be64(&plp->fl_wwpn);
+ rdata->ids.node_name = get_unaligned_be64(&plp->fl_wwnn);
+
+ /* save plogi response sp_features for further reference */
+ rdata->sp_features = ntohs(plp->fl_csp.sp_features);
+
+ if (lport->point_to_multipoint)
+ fc_rport_login_complete(rdata, fp);
+ csp_seq = ntohs(plp->fl_csp.sp_tot_seq);
+ cssp_seq = ntohs(plp->fl_cssp[3 - 1].cp_con_seq);
+ if (cssp_seq < csp_seq)
+ csp_seq = cssp_seq;
+ rdata->max_seq = csp_seq;
+ rdata->maxframe_size = fc_plogi_get_maxframe(plp, lport->mfs);
+ fc_rport_enter_prli(rdata);
+ } else
+ fc_rport_error_retry(rdata, fp);
+
+out:
+ fc_frame_free(fp);
+err:
+ mutex_unlock(&rdata->rp_mutex);
+ kref_put(&rdata->kref, rdata->local_port->tt.rport_destroy);
+}
+
+static bool
+fc_rport_compatible_roles(struct fc_lport *lport, struct fc_rport_priv *rdata)
+{
+ if (rdata->ids.roles == FC_PORT_ROLE_UNKNOWN)
+ return true;
+ if ((rdata->ids.roles & FC_PORT_ROLE_FCP_TARGET) &&
+ (lport->service_params & FCP_SPPF_INIT_FCN))
+ return true;
+ if ((rdata->ids.roles & FC_PORT_ROLE_FCP_INITIATOR) &&
+ (lport->service_params & FCP_SPPF_TARG_FCN))
+ return true;
+ return false;
+}
+
+/**
+ * fc_rport_enter_plogi() - Send Port Login (PLOGI) request
+ * @rdata: The remote port to send a PLOGI to
+ *
+ * Locking Note: The rport lock is expected to be held before calling
+ * this routine.
+ */
+static void fc_rport_enter_plogi(struct fc_rport_priv *rdata)
+{
+ struct fc_lport *lport = rdata->local_port;
+ struct fc_frame *fp;
+
+ if (!fc_rport_compatible_roles(lport, rdata)) {
+ FC_RPORT_DBG(rdata, "PLOGI suppressed for incompatible role\n");
+ fc_rport_state_enter(rdata, RPORT_ST_PLOGI_WAIT);
+ return;
+ }
+
+ FC_RPORT_DBG(rdata, "Port entered PLOGI state from %s state\n",
+ fc_rport_state(rdata));
+
+ fc_rport_state_enter(rdata, RPORT_ST_PLOGI);
+
+ rdata->maxframe_size = FC_MIN_MAX_PAYLOAD;
+ fp = fc_frame_alloc(lport, sizeof(struct fc_els_flogi));
+ if (!fp) {
+ FC_RPORT_DBG(rdata, "%s frame alloc failed\n", __func__);
+ fc_rport_error_retry(rdata, fp);
+ return;
+ }
+ rdata->e_d_tov = lport->e_d_tov;
+
+ if (!lport->tt.elsct_send(lport, rdata->ids.port_id, fp, ELS_PLOGI,
+ fc_rport_plogi_resp, rdata,
+ 2 * lport->r_a_tov))
+ fc_rport_error_retry(rdata, NULL);
+ else
+ kref_get(&rdata->kref);
+}
+
+/**
+ * fc_rport_prli_resp() - Process Login (PRLI) response handler
+ * @sp: The sequence the PRLI response was on
+ * @fp: The PRLI response frame
+ * @rdata_arg: The remote port that sent the PRLI response
+ *
+ * Locking Note: This function will be called without the rport lock
+ * held, but it will lock, call an _enter_* function or fc_rport_error
+ * and then unlock the rport.
+ */
+static void fc_rport_prli_resp(struct fc_seq *sp, struct fc_frame *fp,
+ void *rdata_arg)
+{
+ struct fc_rport_priv *rdata = rdata_arg;
+ struct {
+ struct fc_els_prli prli;
+ struct fc_els_spp spp;
+ } *pp;
+ struct fc_els_spp temp_spp;
+ struct fc4_prov *prov;
+ u32 roles = FC_RPORT_ROLE_UNKNOWN;
+ u32 fcp_parm = 0;
+ u8 op;
+ u8 resp_code = 0;
+
+ mutex_lock(&rdata->rp_mutex);
+
+ FC_RPORT_DBG(rdata, "Received a PRLI %s\n", fc_els_resp_type(fp));
+
+ if (rdata->rp_state != RPORT_ST_PRLI) {
+ FC_RPORT_DBG(rdata, "Received a PRLI response, but in state "
+ "%s\n", fc_rport_state(rdata));
+ if (IS_ERR(fp))
+ goto err;
+ goto out;
+ }
+
+ if (IS_ERR(fp)) {
+ fc_rport_error_retry(rdata, fp);
+ goto err;
+ }
+
+ /* reinitialize remote port roles */
+ rdata->ids.roles = FC_RPORT_ROLE_UNKNOWN;
+
+ op = fc_frame_payload_op(fp);
+ if (op == ELS_LS_ACC) {
+ pp = fc_frame_payload_get(fp, sizeof(*pp));
+ if (!pp)
+ goto out;
+
+ resp_code = (pp->spp.spp_flags & FC_SPP_RESP_MASK);
+ FC_RPORT_DBG(rdata, "PRLI spp_flags = 0x%x\n",
+ pp->spp.spp_flags);
+ rdata->spp_type = pp->spp.spp_type;
+ if (resp_code != FC_SPP_RESP_ACK) {
+ if (resp_code == FC_SPP_RESP_CONF)
+ fc_rport_error(rdata, fp);
+ else
+ fc_rport_error_retry(rdata, fp);
+ goto out;
+ }
+ if (pp->prli.prli_spp_len < sizeof(pp->spp))
+ goto out;
+
+ fcp_parm = ntohl(pp->spp.spp_params);
+ if (fcp_parm & FCP_SPPF_RETRY)
+ rdata->flags |= FC_RP_FLAGS_RETRY;
+ if (fcp_parm & FCP_SPPF_CONF_COMPL)
+ rdata->flags |= FC_RP_FLAGS_CONF_REQ;
+
+ prov = fc_passive_prov[FC_TYPE_FCP];
+ if (prov) {
+ memset(&temp_spp, 0, sizeof(temp_spp));
+ prov->prli(rdata, pp->prli.prli_spp_len,
+ &pp->spp, &temp_spp);
+ }
+
+ rdata->supported_classes = FC_COS_CLASS3;
+ if (fcp_parm & FCP_SPPF_INIT_FCN)
+ roles |= FC_RPORT_ROLE_FCP_INITIATOR;
+ if (fcp_parm & FCP_SPPF_TARG_FCN)
+ roles |= FC_RPORT_ROLE_FCP_TARGET;
+
+ rdata->ids.roles = roles;
+ fc_rport_enter_rtv(rdata);
+
+ } else {
+ FC_RPORT_DBG(rdata, "Bad ELS response for PRLI command\n");
+ fc_rport_error_retry(rdata, fp);
+ }
+
+out:
+ fc_frame_free(fp);
+err:
+ mutex_unlock(&rdata->rp_mutex);
+ kref_put(&rdata->kref, rdata->local_port->tt.rport_destroy);
+}
+
+/**
+ * fc_rport_enter_prli() - Send Process Login (PRLI) request
+ * @rdata: The remote port to send the PRLI request to
+ *
+ * Locking Note: The rport lock is expected to be held before calling
+ * this routine.
+ */
+static void fc_rport_enter_prli(struct fc_rport_priv *rdata)
+{
+ struct fc_lport *lport = rdata->local_port;
+ struct {
+ struct fc_els_prli prli;
+ struct fc_els_spp spp;
+ } *pp;
+ struct fc_frame *fp;
+ struct fc4_prov *prov;
+
+ /*
+ * If the rport is one of the well known addresses
+ * we skip PRLI and RTV and go straight to READY.
+ */
+ if (rdata->ids.port_id >= FC_FID_DOM_MGR) {
+ fc_rport_enter_ready(rdata);
+ return;
+ }
+
+ FC_RPORT_DBG(rdata, "Port entered PRLI state from %s state\n",
+ fc_rport_state(rdata));
+
+ fc_rport_state_enter(rdata, RPORT_ST_PRLI);
+
+ fp = fc_frame_alloc(lport, sizeof(*pp));
+ if (!fp) {
+ fc_rport_error_retry(rdata, fp);
+ return;
+ }
+
+ fc_prli_fill(lport, fp);
+
+ prov = fc_passive_prov[FC_TYPE_FCP];
+ if (prov) {
+ pp = fc_frame_payload_get(fp, sizeof(*pp));
+ prov->prli(rdata, sizeof(pp->spp), NULL, &pp->spp);
+ }
+
+ fc_fill_fc_hdr(fp, FC_RCTL_ELS_REQ, rdata->ids.port_id,
+ fc_host_port_id(lport->host), FC_TYPE_ELS,
+ FC_FC_FIRST_SEQ | FC_FC_END_SEQ | FC_FC_SEQ_INIT, 0);
+
+ if (!lport->tt.exch_seq_send(lport, fp, fc_rport_prli_resp,
+ NULL, rdata, 2 * lport->r_a_tov))
+ fc_rport_error_retry(rdata, NULL);
+ else
+ kref_get(&rdata->kref);
+}
+
+/**
+ * fc_rport_els_rtv_resp() - Handler for Request Timeout Value (RTV) responses
+ * @sp: The sequence the RTV was on
+ * @fp: The RTV response frame
+ * @rdata_arg: The remote port that sent the RTV response
+ *
+ * Many targets don't seem to support this.
+ *
+ * Locking Note: This function will be called without the rport lock
+ * held, but it will lock, call an _enter_* function or fc_rport_error
+ * and then unlock the rport.
+ */
+static void fc_rport_rtv_resp(struct fc_seq *sp, struct fc_frame *fp,
+ void *rdata_arg)
+{
+ struct fc_rport_priv *rdata = rdata_arg;
+ u8 op;
+
+ mutex_lock(&rdata->rp_mutex);
+
+ FC_RPORT_DBG(rdata, "Received a RTV %s\n", fc_els_resp_type(fp));
+
+ if (rdata->rp_state != RPORT_ST_RTV) {
+ FC_RPORT_DBG(rdata, "Received a RTV response, but in state "
+ "%s\n", fc_rport_state(rdata));
+ if (IS_ERR(fp))
+ goto err;
+ goto out;
+ }
+
+ if (IS_ERR(fp)) {
+ fc_rport_error(rdata, fp);
+ goto err;
+ }
+
+ op = fc_frame_payload_op(fp);
+ if (op == ELS_LS_ACC) {
+ struct fc_els_rtv_acc *rtv;
+ u32 toq;
+ u32 tov;
+
+ rtv = fc_frame_payload_get(fp, sizeof(*rtv));
+ if (rtv) {
+ toq = ntohl(rtv->rtv_toq);
+ tov = ntohl(rtv->rtv_r_a_tov);
+ if (tov == 0)
+ tov = 1;
+ rdata->r_a_tov = tov;
+ tov = ntohl(rtv->rtv_e_d_tov);
+ if (toq & FC_ELS_RTV_EDRES)
+ tov /= 1000000;
+ if (tov == 0)
+ tov = 1;
+ rdata->e_d_tov = tov;
+ }
+ }
+
+ fc_rport_enter_ready(rdata);
+
+out:
+ fc_frame_free(fp);
+err:
+ mutex_unlock(&rdata->rp_mutex);
+ kref_put(&rdata->kref, rdata->local_port->tt.rport_destroy);
+}
+
+/**
+ * fc_rport_enter_rtv() - Send Request Timeout Value (RTV) request
+ * @rdata: The remote port to send the RTV request to
+ *
+ * Locking Note: The rport lock is expected to be held before calling
+ * this routine.
+ */
+static void fc_rport_enter_rtv(struct fc_rport_priv *rdata)
+{
+ struct fc_frame *fp;
+ struct fc_lport *lport = rdata->local_port;
+
+ FC_RPORT_DBG(rdata, "Port entered RTV state from %s state\n",
+ fc_rport_state(rdata));
+
+ fc_rport_state_enter(rdata, RPORT_ST_RTV);
+
+ fp = fc_frame_alloc(lport, sizeof(struct fc_els_rtv));
+ if (!fp) {
+ fc_rport_error_retry(rdata, fp);
+ return;
+ }
+
+ if (!lport->tt.elsct_send(lport, rdata->ids.port_id, fp, ELS_RTV,
+ fc_rport_rtv_resp, rdata,
+ 2 * lport->r_a_tov))
+ fc_rport_error_retry(rdata, NULL);
+ else
+ kref_get(&rdata->kref);
+}
+
+/**
+ * fc_rport_logo_resp() - Handler for logout (LOGO) responses
+ * @sp: The sequence the LOGO was on
+ * @fp: The LOGO response frame
+ * @lport_arg: The local port
+ */
+static void fc_rport_logo_resp(struct fc_seq *sp, struct fc_frame *fp,
+ void *lport_arg)
+{
+ struct fc_lport *lport = lport_arg;
+
+ FC_RPORT_ID_DBG(lport, fc_seq_exch(sp)->did,
+ "Received a LOGO %s\n", fc_els_resp_type(fp));
+ if (IS_ERR(fp))
+ return;
+ fc_frame_free(fp);
+}
+
+/**
+ * fc_rport_enter_logo() - Send a logout (LOGO) request
+ * @rdata: The remote port to send the LOGO request to
+ *
+ * Locking Note: The rport lock is expected to be held before calling
+ * this routine.
+ */
+static void fc_rport_enter_logo(struct fc_rport_priv *rdata)
+{
+ struct fc_lport *lport = rdata->local_port;
+ struct fc_frame *fp;
+
+ FC_RPORT_DBG(rdata, "Port sending LOGO from %s state\n",
+ fc_rport_state(rdata));
+
+ fp = fc_frame_alloc(lport, sizeof(struct fc_els_logo));
+ if (!fp)
+ return;
+ (void)lport->tt.elsct_send(lport, rdata->ids.port_id, fp, ELS_LOGO,
+ fc_rport_logo_resp, lport, 0);
+}
+
+/**
+ * fc_rport_els_adisc_resp() - Handler for Address Discovery (ADISC) responses
+ * @sp: The sequence the ADISC response was on
+ * @fp: The ADISC response frame
+ * @rdata_arg: The remote port that sent the ADISC response
+ *
+ * Locking Note: This function will be called without the rport lock
+ * held, but it will lock, call an _enter_* function or fc_rport_error
+ * and then unlock the rport.
+ */
+static void fc_rport_adisc_resp(struct fc_seq *sp, struct fc_frame *fp,
+ void *rdata_arg)
+{
+ struct fc_rport_priv *rdata = rdata_arg;
+ struct fc_els_adisc *adisc;
+ u8 op;
+
+ mutex_lock(&rdata->rp_mutex);
+
+ FC_RPORT_DBG(rdata, "Received a ADISC response\n");
+
+ if (rdata->rp_state != RPORT_ST_ADISC) {
+ FC_RPORT_DBG(rdata, "Received a ADISC resp but in state %s\n",
+ fc_rport_state(rdata));
+ if (IS_ERR(fp))
+ goto err;
+ goto out;
+ }
+
+ if (IS_ERR(fp)) {
+ fc_rport_error(rdata, fp);
+ goto err;
+ }
+
+ /*
+ * If address verification failed. Consider us logged out of the rport.
+ * Since the rport is still in discovery, we want to be
+ * logged in, so go to PLOGI state. Otherwise, go back to READY.
+ */
+ op = fc_frame_payload_op(fp);
+ adisc = fc_frame_payload_get(fp, sizeof(*adisc));
+ if (op != ELS_LS_ACC || !adisc ||
+ ntoh24(adisc->adisc_port_id) != rdata->ids.port_id ||
+ get_unaligned_be64(&adisc->adisc_wwpn) != rdata->ids.port_name ||
+ get_unaligned_be64(&adisc->adisc_wwnn) != rdata->ids.node_name) {
+ FC_RPORT_DBG(rdata, "ADISC error or mismatch\n");
+ fc_rport_enter_flogi(rdata);
+ } else {
+ FC_RPORT_DBG(rdata, "ADISC OK\n");
+ fc_rport_enter_ready(rdata);
+ }
+out:
+ fc_frame_free(fp);
+err:
+ mutex_unlock(&rdata->rp_mutex);
+ kref_put(&rdata->kref, rdata->local_port->tt.rport_destroy);
+}
+
+/**
+ * fc_rport_enter_adisc() - Send Address Discover (ADISC) request
+ * @rdata: The remote port to send the ADISC request to
+ *
+ * Locking Note: The rport lock is expected to be held before calling
+ * this routine.
+ */
+static void fc_rport_enter_adisc(struct fc_rport_priv *rdata)
+{
+ struct fc_lport *lport = rdata->local_port;
+ struct fc_frame *fp;
+
+ FC_RPORT_DBG(rdata, "sending ADISC from %s state\n",
+ fc_rport_state(rdata));
+
+ fc_rport_state_enter(rdata, RPORT_ST_ADISC);
+
+ fp = fc_frame_alloc(lport, sizeof(struct fc_els_adisc));
+ if (!fp) {
+ fc_rport_error_retry(rdata, fp);
+ return;
+ }
+ if (!lport->tt.elsct_send(lport, rdata->ids.port_id, fp, ELS_ADISC,
+ fc_rport_adisc_resp, rdata,
+ 2 * lport->r_a_tov))
+ fc_rport_error_retry(rdata, NULL);
+ else
+ kref_get(&rdata->kref);
+}
+
+/**
+ * fc_rport_recv_adisc_req() - Handler for Address Discovery (ADISC) requests
+ * @rdata: The remote port that sent the ADISC request
+ * @in_fp: The ADISC request frame
+ *
+ * Locking Note: Called with the lport and rport locks held.
+ */
+static void fc_rport_recv_adisc_req(struct fc_rport_priv *rdata,
+ struct fc_frame *in_fp)
+{
+ struct fc_lport *lport = rdata->local_port;
+ struct fc_frame *fp;
+ struct fc_els_adisc *adisc;
+ struct fc_seq_els_data rjt_data;
+
+ FC_RPORT_DBG(rdata, "Received ADISC request\n");
+
+ adisc = fc_frame_payload_get(in_fp, sizeof(*adisc));
+ if (!adisc) {
+ rjt_data.reason = ELS_RJT_PROT;
+ rjt_data.explan = ELS_EXPL_INV_LEN;
+ lport->tt.seq_els_rsp_send(in_fp, ELS_LS_RJT, &rjt_data);
+ goto drop;
+ }
+
+ fp = fc_frame_alloc(lport, sizeof(*adisc));
+ if (!fp)
+ goto drop;
+ fc_adisc_fill(lport, fp);
+ adisc = fc_frame_payload_get(fp, sizeof(*adisc));
+ adisc->adisc_cmd = ELS_LS_ACC;
+ fc_fill_reply_hdr(fp, in_fp, FC_RCTL_ELS_REP, 0);
+ lport->tt.frame_send(lport, fp);
+drop:
+ fc_frame_free(in_fp);
+}
+
+/**
+ * fc_rport_recv_rls_req() - Handle received Read Link Status request
+ * @rdata: The remote port that sent the RLS request
+ * @rx_fp: The PRLI request frame
+ *
+ * Locking Note: The rport lock is expected to be held before calling
+ * this function.
+ */
+static void fc_rport_recv_rls_req(struct fc_rport_priv *rdata,
+ struct fc_frame *rx_fp)
+
+{
+ struct fc_lport *lport = rdata->local_port;
+ struct fc_frame *fp;
+ struct fc_els_rls *rls;
+ struct fc_els_rls_resp *rsp;
+ struct fc_els_lesb *lesb;
+ struct fc_seq_els_data rjt_data;
+ struct fc_host_statistics *hst;
+
+ FC_RPORT_DBG(rdata, "Received RLS request while in state %s\n",
+ fc_rport_state(rdata));
+
+ rls = fc_frame_payload_get(rx_fp, sizeof(*rls));
+ if (!rls) {
+ rjt_data.reason = ELS_RJT_PROT;
+ rjt_data.explan = ELS_EXPL_INV_LEN;
+ goto out_rjt;
+ }
+
+ fp = fc_frame_alloc(lport, sizeof(*rsp));
+ if (!fp) {
+ rjt_data.reason = ELS_RJT_UNAB;
+ rjt_data.explan = ELS_EXPL_INSUF_RES;
+ goto out_rjt;
+ }
+
+ rsp = fc_frame_payload_get(fp, sizeof(*rsp));
+ memset(rsp, 0, sizeof(*rsp));
+ rsp->rls_cmd = ELS_LS_ACC;
+ lesb = &rsp->rls_lesb;
+ if (lport->tt.get_lesb) {
+ /* get LESB from LLD if it supports it */
+ lport->tt.get_lesb(lport, lesb);
+ } else {
+ fc_get_host_stats(lport->host);
+ hst = &lport->host_stats;
+ lesb->lesb_link_fail = htonl(hst->link_failure_count);
+ lesb->lesb_sync_loss = htonl(hst->loss_of_sync_count);
+ lesb->lesb_sig_loss = htonl(hst->loss_of_signal_count);
+ lesb->lesb_prim_err = htonl(hst->prim_seq_protocol_err_count);
+ lesb->lesb_inv_word = htonl(hst->invalid_tx_word_count);
+ lesb->lesb_inv_crc = htonl(hst->invalid_crc_count);
+ }
+
+ fc_fill_reply_hdr(fp, rx_fp, FC_RCTL_ELS_REP, 0);
+ lport->tt.frame_send(lport, fp);
+ goto out;
+
+out_rjt:
+ lport->tt.seq_els_rsp_send(rx_fp, ELS_LS_RJT, &rjt_data);
+out:
+ fc_frame_free(rx_fp);
+}
+
+/**
+ * fc_rport_recv_els_req() - Handler for validated ELS requests
+ * @lport: The local port that received the ELS request
+ * @fp: The ELS request frame
+ *
+ * Handle incoming ELS requests that require port login.
+ * The ELS opcode has already been validated by the caller.
+ *
+ * Locking Note: Called with the lport lock held.
+ */
+static void fc_rport_recv_els_req(struct fc_lport *lport, struct fc_frame *fp)
+{
+ struct fc_rport_priv *rdata;
+ struct fc_seq_els_data els_data;
+
+ mutex_lock(&lport->disc.disc_mutex);
+ rdata = lport->tt.rport_lookup(lport, fc_frame_sid(fp));
+ if (!rdata) {
+ mutex_unlock(&lport->disc.disc_mutex);
+ goto reject;
+ }
+ mutex_lock(&rdata->rp_mutex);
+ mutex_unlock(&lport->disc.disc_mutex);
+
+ switch (rdata->rp_state) {
+ case RPORT_ST_PRLI:
+ case RPORT_ST_RTV:
+ case RPORT_ST_READY:
+ case RPORT_ST_ADISC:
+ break;
+ default:
+ mutex_unlock(&rdata->rp_mutex);
+ goto reject;
+ }
+
+ switch (fc_frame_payload_op(fp)) {
+ case ELS_PRLI:
+ fc_rport_recv_prli_req(rdata, fp);
+ break;
+ case ELS_PRLO:
+ fc_rport_recv_prlo_req(rdata, fp);
+ break;
+ case ELS_ADISC:
+ fc_rport_recv_adisc_req(rdata, fp);
+ break;
+ case ELS_RRQ:
+ lport->tt.seq_els_rsp_send(fp, ELS_RRQ, NULL);
+ fc_frame_free(fp);
+ break;
+ case ELS_REC:
+ lport->tt.seq_els_rsp_send(fp, ELS_REC, NULL);
+ fc_frame_free(fp);
+ break;
+ case ELS_RLS:
+ fc_rport_recv_rls_req(rdata, fp);
+ break;
+ default:
+ fc_frame_free(fp); /* can't happen */
+ break;
+ }
+
+ mutex_unlock(&rdata->rp_mutex);
+ return;
+
+reject:
+ els_data.reason = ELS_RJT_UNAB;
+ els_data.explan = ELS_EXPL_PLOGI_REQD;
+ lport->tt.seq_els_rsp_send(fp, ELS_LS_RJT, &els_data);
+ fc_frame_free(fp);
+}
+
+/**
+ * fc_rport_recv_req() - Handler for requests
+ * @lport: The local port that received the request
+ * @fp: The request frame
+ *
+ * Locking Note: Called with the lport lock held.
+ */
+static void fc_rport_recv_req(struct fc_lport *lport, struct fc_frame *fp)
+{
+ struct fc_seq_els_data els_data;
+
+ /*
+ * Handle FLOGI, PLOGI and LOGO requests separately, since they
+ * don't require prior login.
+ * Check for unsupported opcodes first and reject them.
+ * For some ops, it would be incorrect to reject with "PLOGI required".
+ */
+ switch (fc_frame_payload_op(fp)) {
+ case ELS_FLOGI:
+ fc_rport_recv_flogi_req(lport, fp);
+ break;
+ case ELS_PLOGI:
+ fc_rport_recv_plogi_req(lport, fp);
+ break;
+ case ELS_LOGO:
+ fc_rport_recv_logo_req(lport, fp);
+ break;
+ case ELS_PRLI:
+ case ELS_PRLO:
+ case ELS_ADISC:
+ case ELS_RRQ:
+ case ELS_REC:
+ case ELS_RLS:
+ fc_rport_recv_els_req(lport, fp);
+ break;
+ default:
+ els_data.reason = ELS_RJT_UNSUP;
+ els_data.explan = ELS_EXPL_NONE;
+ lport->tt.seq_els_rsp_send(fp, ELS_LS_RJT, &els_data);
+ fc_frame_free(fp);
+ break;
+ }
+}
+
+/**
+ * fc_rport_recv_plogi_req() - Handler for Port Login (PLOGI) requests
+ * @lport: The local port that received the PLOGI request
+ * @rx_fp: The PLOGI request frame
+ *
+ * Locking Note: The rport lock is held before calling this function.
+ */
+static void fc_rport_recv_plogi_req(struct fc_lport *lport,
+ struct fc_frame *rx_fp)
+{
+ struct fc_disc *disc;
+ struct fc_rport_priv *rdata;
+ struct fc_frame *fp = rx_fp;
+ struct fc_els_flogi *pl;
+ struct fc_seq_els_data rjt_data;
+ u32 sid;
+
+ sid = fc_frame_sid(fp);
+
+ FC_RPORT_ID_DBG(lport, sid, "Received PLOGI request\n");
+
+ pl = fc_frame_payload_get(fp, sizeof(*pl));
+ if (!pl) {
+ FC_RPORT_ID_DBG(lport, sid, "Received PLOGI too short\n");
+ rjt_data.reason = ELS_RJT_PROT;
+ rjt_data.explan = ELS_EXPL_INV_LEN;
+ goto reject;
+ }
+
+ disc = &lport->disc;
+ mutex_lock(&disc->disc_mutex);
+ rdata = lport->tt.rport_create(lport, sid);
+ if (!rdata) {
+ mutex_unlock(&disc->disc_mutex);
+ rjt_data.reason = ELS_RJT_UNAB;
+ rjt_data.explan = ELS_EXPL_INSUF_RES;
+ goto reject;
+ }
+
+ mutex_lock(&rdata->rp_mutex);
+ mutex_unlock(&disc->disc_mutex);
+
+ rdata->ids.port_name = get_unaligned_be64(&pl->fl_wwpn);
+ rdata->ids.node_name = get_unaligned_be64(&pl->fl_wwnn);
+
+ /*
+ * If the rport was just created, possibly due to the incoming PLOGI,
+ * set the state appropriately and accept the PLOGI.
+ *
+ * If we had also sent a PLOGI, and if the received PLOGI is from a
+ * higher WWPN, we accept it, otherwise an LS_RJT is sent with reason
+ * "command already in progress".
+ *
+ * XXX TBD: If the session was ready before, the PLOGI should result in
+ * all outstanding exchanges being reset.
+ */
+ switch (rdata->rp_state) {
+ case RPORT_ST_INIT:
+ FC_RPORT_DBG(rdata, "Received PLOGI in INIT state\n");
+ break;
+ case RPORT_ST_PLOGI_WAIT:
+ FC_RPORT_DBG(rdata, "Received PLOGI in PLOGI_WAIT state\n");
+ break;
+ case RPORT_ST_PLOGI:
+ FC_RPORT_DBG(rdata, "Received PLOGI in PLOGI state\n");
+ if (rdata->ids.port_name < lport->wwpn) {
+ mutex_unlock(&rdata->rp_mutex);
+ rjt_data.reason = ELS_RJT_INPROG;
+ rjt_data.explan = ELS_EXPL_NONE;
+ goto reject;
+ }
+ break;
+ case RPORT_ST_PRLI:
+ case RPORT_ST_RTV:
+ case RPORT_ST_READY:
+ case RPORT_ST_ADISC:
+ FC_RPORT_DBG(rdata, "Received PLOGI in logged-in state %d "
+ "- ignored for now\n", rdata->rp_state);
+ /* XXX TBD - should reset */
+ break;
+ case RPORT_ST_FLOGI:
+ case RPORT_ST_DELETE:
+ FC_RPORT_DBG(rdata, "Received PLOGI in state %s - send busy\n",
+ fc_rport_state(rdata));
+ mutex_unlock(&rdata->rp_mutex);
+ rjt_data.reason = ELS_RJT_BUSY;
+ rjt_data.explan = ELS_EXPL_NONE;
+ goto reject;
+ }
+ if (!fc_rport_compatible_roles(lport, rdata)) {
+ FC_RPORT_DBG(rdata, "Received PLOGI for incompatible role\n");
+ mutex_unlock(&rdata->rp_mutex);
+ rjt_data.reason = ELS_RJT_LOGIC;
+ rjt_data.explan = ELS_EXPL_NONE;
+ goto reject;
+ }
+
+ /*
+ * Get session payload size from incoming PLOGI.
+ */
+ rdata->maxframe_size = fc_plogi_get_maxframe(pl, lport->mfs);
+
+ /*
+ * Send LS_ACC. If this fails, the originator should retry.
+ */
+ fp = fc_frame_alloc(lport, sizeof(*pl));
+ if (!fp)
+ goto out;
+
+ fc_plogi_fill(lport, fp, ELS_LS_ACC);
+ fc_fill_reply_hdr(fp, rx_fp, FC_RCTL_ELS_REP, 0);
+ lport->tt.frame_send(lport, fp);
+ fc_rport_enter_prli(rdata);
+out:
+ mutex_unlock(&rdata->rp_mutex);
+ fc_frame_free(rx_fp);
+ return;
+
+reject:
+ lport->tt.seq_els_rsp_send(fp, ELS_LS_RJT, &rjt_data);
+ fc_frame_free(fp);
+}
+
+/**
+ * fc_rport_recv_prli_req() - Handler for process login (PRLI) requests
+ * @rdata: The remote port that sent the PRLI request
+ * @rx_fp: The PRLI request frame
+ *
+ * Locking Note: The rport lock is expected to be held before calling
+ * this function.
+ */
+static void fc_rport_recv_prli_req(struct fc_rport_priv *rdata,
+ struct fc_frame *rx_fp)
+{
+ struct fc_lport *lport = rdata->local_port;
+ struct fc_frame *fp;
+ struct {
+ struct fc_els_prli prli;
+ struct fc_els_spp spp;
+ } *pp;
+ struct fc_els_spp *rspp; /* request service param page */
+ struct fc_els_spp *spp; /* response spp */
+ unsigned int len;
+ unsigned int plen;
+ enum fc_els_spp_resp resp;
+ enum fc_els_spp_resp passive;
+ struct fc_seq_els_data rjt_data;
+ struct fc4_prov *prov;
+
+ FC_RPORT_DBG(rdata, "Received PRLI request while in state %s\n",
+ fc_rport_state(rdata));
+
+ len = fr_len(rx_fp) - sizeof(struct fc_frame_header);
+ pp = fc_frame_payload_get(rx_fp, sizeof(*pp));
+ if (!pp)
+ goto reject_len;
+ plen = ntohs(pp->prli.prli_len);
+ if ((plen % 4) != 0 || plen > len || plen < 16)
+ goto reject_len;
+ if (plen < len)
+ len = plen;
+ plen = pp->prli.prli_spp_len;
+ if ((plen % 4) != 0 || plen < sizeof(*spp) ||
+ plen > len || len < sizeof(*pp) || plen < 12)
+ goto reject_len;
+ rspp = &pp->spp;
+
+ fp = fc_frame_alloc(lport, len);
+ if (!fp) {
+ rjt_data.reason = ELS_RJT_UNAB;
+ rjt_data.explan = ELS_EXPL_INSUF_RES;
+ goto reject;
+ }
+ pp = fc_frame_payload_get(fp, len);
+ WARN_ON(!pp);
+ memset(pp, 0, len);
+ pp->prli.prli_cmd = ELS_LS_ACC;
+ pp->prli.prli_spp_len = plen;
+ pp->prli.prli_len = htons(len);
+ len -= sizeof(struct fc_els_prli);
+
+ /*
+ * Go through all the service parameter pages and build
+ * response. If plen indicates longer SPP than standard,
+ * use that. The entire response has been pre-cleared above.
+ */
+ spp = &pp->spp;
+ mutex_lock(&fc_prov_mutex);
+ while (len >= plen) {
+ rdata->spp_type = rspp->spp_type;
+ spp->spp_type = rspp->spp_type;
+ spp->spp_type_ext = rspp->spp_type_ext;
+ resp = 0;
+
+ if (rspp->spp_type < FC_FC4_PROV_SIZE) {
+ prov = fc_active_prov[rspp->spp_type];
+ if (prov)
+ resp = prov->prli(rdata, plen, rspp, spp);
+ prov = fc_passive_prov[rspp->spp_type];
+ if (prov) {
+ passive = prov->prli(rdata, plen, rspp, spp);
+ if (!resp || passive == FC_SPP_RESP_ACK)
+ resp = passive;
+ }
+ }
+ if (!resp) {
+ if (spp->spp_flags & FC_SPP_EST_IMG_PAIR)
+ resp |= FC_SPP_RESP_CONF;
+ else
+ resp |= FC_SPP_RESP_INVL;
+ }
+ spp->spp_flags |= resp;
+ len -= plen;
+ rspp = (struct fc_els_spp *)((char *)rspp + plen);
+ spp = (struct fc_els_spp *)((char *)spp + plen);
+ }
+ mutex_unlock(&fc_prov_mutex);
+
+ /*
+ * Send LS_ACC. If this fails, the originator should retry.
+ */
+ fc_fill_reply_hdr(fp, rx_fp, FC_RCTL_ELS_REP, 0);
+ lport->tt.frame_send(lport, fp);
+
+ switch (rdata->rp_state) {
+ case RPORT_ST_PRLI:
+ fc_rport_enter_ready(rdata);
+ break;
+ default:
+ break;
+ }
+ goto drop;
+
+reject_len:
+ rjt_data.reason = ELS_RJT_PROT;
+ rjt_data.explan = ELS_EXPL_INV_LEN;
+reject:
+ lport->tt.seq_els_rsp_send(rx_fp, ELS_LS_RJT, &rjt_data);
+drop:
+ fc_frame_free(rx_fp);
+}
+
+/**
+ * fc_rport_recv_prlo_req() - Handler for process logout (PRLO) requests
+ * @rdata: The remote port that sent the PRLO request
+ * @rx_fp: The PRLO request frame
+ *
+ * Locking Note: The rport lock is expected to be held before calling
+ * this function.
+ */
+static void fc_rport_recv_prlo_req(struct fc_rport_priv *rdata,
+ struct fc_frame *rx_fp)
+{
+ struct fc_lport *lport = rdata->local_port;
+ struct fc_frame *fp;
+ struct {
+ struct fc_els_prlo prlo;
+ struct fc_els_spp spp;
+ } *pp;
+ struct fc_els_spp *rspp; /* request service param page */
+ struct fc_els_spp *spp; /* response spp */
+ unsigned int len;
+ unsigned int plen;
+ struct fc_seq_els_data rjt_data;
+
+ FC_RPORT_DBG(rdata, "Received PRLO request while in state %s\n",
+ fc_rport_state(rdata));
+
+ len = fr_len(rx_fp) - sizeof(struct fc_frame_header);
+ pp = fc_frame_payload_get(rx_fp, sizeof(*pp));
+ if (!pp)
+ goto reject_len;
+ plen = ntohs(pp->prlo.prlo_len);
+ if (plen != 20)
+ goto reject_len;
+ if (plen < len)
+ len = plen;
+
+ rspp = &pp->spp;
+
+ fp = fc_frame_alloc(lport, len);
+ if (!fp) {
+ rjt_data.reason = ELS_RJT_UNAB;
+ rjt_data.explan = ELS_EXPL_INSUF_RES;
+ goto reject;
+ }
+
+ pp = fc_frame_payload_get(fp, len);
+ WARN_ON(!pp);
+ memset(pp, 0, len);
+ pp->prlo.prlo_cmd = ELS_LS_ACC;
+ pp->prlo.prlo_obs = 0x10;
+ pp->prlo.prlo_len = htons(len);
+ spp = &pp->spp;
+ spp->spp_type = rspp->spp_type;
+ spp->spp_type_ext = rspp->spp_type_ext;
+ spp->spp_flags = FC_SPP_RESP_ACK;
+
+ fc_rport_enter_delete(rdata, RPORT_EV_LOGO);
+
+ fc_fill_reply_hdr(fp, rx_fp, FC_RCTL_ELS_REP, 0);
+ lport->tt.frame_send(lport, fp);
+ goto drop;
+
+reject_len:
+ rjt_data.reason = ELS_RJT_PROT;
+ rjt_data.explan = ELS_EXPL_INV_LEN;
+reject:
+ lport->tt.seq_els_rsp_send(rx_fp, ELS_LS_RJT, &rjt_data);
+drop:
+ fc_frame_free(rx_fp);
+}
+
+/**
+ * fc_rport_recv_logo_req() - Handler for logout (LOGO) requests
+ * @lport: The local port that received the LOGO request
+ * @fp: The LOGO request frame
+ *
+ * Locking Note: The rport lock is expected to be held before calling
+ * this function.
+ */
+static void fc_rport_recv_logo_req(struct fc_lport *lport, struct fc_frame *fp)
+{
+ struct fc_rport_priv *rdata;
+ u32 sid;
+
+ lport->tt.seq_els_rsp_send(fp, ELS_LS_ACC, NULL);
+
+ sid = fc_frame_sid(fp);
+
+ mutex_lock(&lport->disc.disc_mutex);
+ rdata = lport->tt.rport_lookup(lport, sid);
+ if (rdata) {
+ mutex_lock(&rdata->rp_mutex);
+ FC_RPORT_DBG(rdata, "Received LOGO request while in state %s\n",
+ fc_rport_state(rdata));
+
+ fc_rport_enter_delete(rdata, RPORT_EV_LOGO);
+ mutex_unlock(&rdata->rp_mutex);
+ } else
+ FC_RPORT_ID_DBG(lport, sid,
+ "Received LOGO from non-logged-in port\n");
+ mutex_unlock(&lport->disc.disc_mutex);
+ fc_frame_free(fp);
+}
+
+/**
+ * fc_rport_flush_queue() - Flush the rport_event_queue
+ */
+static void fc_rport_flush_queue(void)
+{
+ flush_workqueue(rport_event_queue);
+}
+
+/**
+ * fc_rport_init() - Initialize the remote port layer for a local port
+ * @lport: The local port to initialize the remote port layer for
+ */
+int fc_rport_init(struct fc_lport *lport)
+{
+ if (!lport->tt.rport_lookup)
+ lport->tt.rport_lookup = fc_rport_lookup;
+
+ if (!lport->tt.rport_create)
+ lport->tt.rport_create = fc_rport_create;
+
+ if (!lport->tt.rport_login)
+ lport->tt.rport_login = fc_rport_login;
+
+ if (!lport->tt.rport_logoff)
+ lport->tt.rport_logoff = fc_rport_logoff;
+
+ if (!lport->tt.rport_recv_req)
+ lport->tt.rport_recv_req = fc_rport_recv_req;
+
+ if (!lport->tt.rport_flush_queue)
+ lport->tt.rport_flush_queue = fc_rport_flush_queue;
+
+ if (!lport->tt.rport_destroy)
+ lport->tt.rport_destroy = fc_rport_destroy;
+
+ return 0;
+}
+EXPORT_SYMBOL(fc_rport_init);
+
+/**
+ * fc_rport_fcp_prli() - Handle incoming PRLI for the FCP initiator.
+ * @rdata: remote port private
+ * @spp_len: service parameter page length
+ * @rspp: received service parameter page
+ * @spp: response service parameter page
+ *
+ * Returns the value for the response code to be placed in spp_flags;
+ * Returns 0 if not an initiator.
+ */
+static int fc_rport_fcp_prli(struct fc_rport_priv *rdata, u32 spp_len,
+ const struct fc_els_spp *rspp,
+ struct fc_els_spp *spp)
+{
+ struct fc_lport *lport = rdata->local_port;
+ u32 fcp_parm;
+
+ fcp_parm = ntohl(rspp->spp_params);
+ rdata->ids.roles = FC_RPORT_ROLE_UNKNOWN;
+ if (fcp_parm & FCP_SPPF_INIT_FCN)
+ rdata->ids.roles |= FC_RPORT_ROLE_FCP_INITIATOR;
+ if (fcp_parm & FCP_SPPF_TARG_FCN)
+ rdata->ids.roles |= FC_RPORT_ROLE_FCP_TARGET;
+ if (fcp_parm & FCP_SPPF_RETRY)
+ rdata->flags |= FC_RP_FLAGS_RETRY;
+ rdata->supported_classes = FC_COS_CLASS3;
+
+ if (!(lport->service_params & FCP_SPPF_INIT_FCN))
+ return 0;
+
+ spp->spp_flags |= rspp->spp_flags & FC_SPP_EST_IMG_PAIR;
+
+ /*
+ * OR in our service parameters with other providers (target), if any.
+ */
+ fcp_parm = ntohl(spp->spp_params);
+ spp->spp_params = htonl(fcp_parm | lport->service_params);
+ return FC_SPP_RESP_ACK;
+}
+
+/*
+ * FC-4 provider ops for FCP initiator.
+ */
+struct fc4_prov fc_rport_fcp_init = {
+ .prli = fc_rport_fcp_prli,
+};
+
+/**
+ * fc_rport_t0_prli() - Handle incoming PRLI parameters for type 0
+ * @rdata: remote port private
+ * @spp_len: service parameter page length
+ * @rspp: received service parameter page
+ * @spp: response service parameter page
+ */
+static int fc_rport_t0_prli(struct fc_rport_priv *rdata, u32 spp_len,
+ const struct fc_els_spp *rspp,
+ struct fc_els_spp *spp)
+{
+ if (rspp->spp_flags & FC_SPP_EST_IMG_PAIR)
+ return FC_SPP_RESP_INVL;
+ return FC_SPP_RESP_ACK;
+}
+
+/*
+ * FC-4 provider ops for type 0 service parameters.
+ *
+ * This handles the special case of type 0 which is always successful
+ * but doesn't do anything otherwise.
+ */
+struct fc4_prov fc_rport_t0_prov = {
+ .prli = fc_rport_t0_prli,
+};
+
+/**
+ * fc_setup_rport() - Initialize the rport_event_queue
+ */
+int fc_setup_rport(void)
+{
+ rport_event_queue = create_singlethread_workqueue("fc_rport_eq");
+ if (!rport_event_queue)
+ return -ENOMEM;
+ return 0;
+}
+
+/**
+ * fc_destroy_rport() - Destroy the rport_event_queue
+ */
+void fc_destroy_rport(void)
+{
+ destroy_workqueue(rport_event_queue);
+}
+
+/**
+ * fc_rport_terminate_io() - Stop all outstanding I/O on a remote port
+ * @rport: The remote port whose I/O should be terminated
+ */
+void fc_rport_terminate_io(struct fc_rport *rport)
+{
+ struct fc_rport_libfc_priv *rpriv = rport->dd_data;
+ struct fc_lport *lport = rpriv->local_port;
+
+ lport->tt.exch_mgr_reset(lport, 0, rport->port_id);
+ lport->tt.exch_mgr_reset(lport, rport->port_id, 0);
+}
+EXPORT_SYMBOL(fc_rport_terminate_io);
diff --git a/drivers/scsi/libiscsi.c b/drivers/scsi/libiscsi.c
new file mode 100644
index 000000000..8053f24f0
--- /dev/null
+++ b/drivers/scsi/libiscsi.c
@@ -0,0 +1,3649 @@
+/*
+ * iSCSI lib functions
+ *
+ * Copyright (C) 2006 Red Hat, Inc. All rights reserved.
+ * Copyright (C) 2004 - 2006 Mike Christie
+ * Copyright (C) 2004 - 2005 Dmitry Yusupov
+ * Copyright (C) 2004 - 2005 Alex Aizman
+ * maintained by open-iscsi@googlegroups.com
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ */
+#include <linux/types.h>
+#include <linux/kfifo.h>
+#include <linux/delay.h>
+#include <linux/log2.h>
+#include <linux/slab.h>
+#include <linux/module.h>
+#include <asm/unaligned.h>
+#include <net/tcp.h>
+#include <scsi/scsi_cmnd.h>
+#include <scsi/scsi_device.h>
+#include <scsi/scsi_eh.h>
+#include <scsi/scsi_tcq.h>
+#include <scsi/scsi_host.h>
+#include <scsi/scsi.h>
+#include <scsi/iscsi_proto.h>
+#include <scsi/scsi_transport.h>
+#include <scsi/scsi_transport_iscsi.h>
+#include <scsi/libiscsi.h>
+
+static int iscsi_dbg_lib_conn;
+module_param_named(debug_libiscsi_conn, iscsi_dbg_lib_conn, int,
+ S_IRUGO | S_IWUSR);
+MODULE_PARM_DESC(debug_libiscsi_conn,
+ "Turn on debugging for connections in libiscsi module. "
+ "Set to 1 to turn on, and zero to turn off. Default is off.");
+
+static int iscsi_dbg_lib_session;
+module_param_named(debug_libiscsi_session, iscsi_dbg_lib_session, int,
+ S_IRUGO | S_IWUSR);
+MODULE_PARM_DESC(debug_libiscsi_session,
+ "Turn on debugging for sessions in libiscsi module. "
+ "Set to 1 to turn on, and zero to turn off. Default is off.");
+
+static int iscsi_dbg_lib_eh;
+module_param_named(debug_libiscsi_eh, iscsi_dbg_lib_eh, int,
+ S_IRUGO | S_IWUSR);
+MODULE_PARM_DESC(debug_libiscsi_eh,
+ "Turn on debugging for error handling in libiscsi module. "
+ "Set to 1 to turn on, and zero to turn off. Default is off.");
+
+#define ISCSI_DBG_CONN(_conn, dbg_fmt, arg...) \
+ do { \
+ if (iscsi_dbg_lib_conn) \
+ iscsi_conn_printk(KERN_INFO, _conn, \
+ "%s " dbg_fmt, \
+ __func__, ##arg); \
+ } while (0);
+
+#define ISCSI_DBG_SESSION(_session, dbg_fmt, arg...) \
+ do { \
+ if (iscsi_dbg_lib_session) \
+ iscsi_session_printk(KERN_INFO, _session, \
+ "%s " dbg_fmt, \
+ __func__, ##arg); \
+ } while (0);
+
+#define ISCSI_DBG_EH(_session, dbg_fmt, arg...) \
+ do { \
+ if (iscsi_dbg_lib_eh) \
+ iscsi_session_printk(KERN_INFO, _session, \
+ "%s " dbg_fmt, \
+ __func__, ##arg); \
+ } while (0);
+
+inline void iscsi_conn_queue_work(struct iscsi_conn *conn)
+{
+ struct Scsi_Host *shost = conn->session->host;
+ struct iscsi_host *ihost = shost_priv(shost);
+
+ if (ihost->workq)
+ queue_work(ihost->workq, &conn->xmitwork);
+}
+EXPORT_SYMBOL_GPL(iscsi_conn_queue_work);
+
+static void __iscsi_update_cmdsn(struct iscsi_session *session,
+ uint32_t exp_cmdsn, uint32_t max_cmdsn)
+{
+ /*
+ * standard specifies this check for when to update expected and
+ * max sequence numbers
+ */
+ if (iscsi_sna_lt(max_cmdsn, exp_cmdsn - 1))
+ return;
+
+ if (exp_cmdsn != session->exp_cmdsn &&
+ !iscsi_sna_lt(exp_cmdsn, session->exp_cmdsn))
+ session->exp_cmdsn = exp_cmdsn;
+
+ if (max_cmdsn != session->max_cmdsn &&
+ !iscsi_sna_lt(max_cmdsn, session->max_cmdsn))
+ session->max_cmdsn = max_cmdsn;
+}
+
+void iscsi_update_cmdsn(struct iscsi_session *session, struct iscsi_nopin *hdr)
+{
+ __iscsi_update_cmdsn(session, be32_to_cpu(hdr->exp_cmdsn),
+ be32_to_cpu(hdr->max_cmdsn));
+}
+EXPORT_SYMBOL_GPL(iscsi_update_cmdsn);
+
+/**
+ * iscsi_prep_data_out_pdu - initialize Data-Out
+ * @task: scsi command task
+ * @r2t: R2T info
+ * @hdr: iscsi data in pdu
+ *
+ * Notes:
+ * Initialize Data-Out within this R2T sequence and finds
+ * proper data_offset within this SCSI command.
+ *
+ * This function is called with connection lock taken.
+ **/
+void iscsi_prep_data_out_pdu(struct iscsi_task *task, struct iscsi_r2t_info *r2t,
+ struct iscsi_data *hdr)
+{
+ struct iscsi_conn *conn = task->conn;
+ unsigned int left = r2t->data_length - r2t->sent;
+
+ task->hdr_len = sizeof(struct iscsi_data);
+
+ memset(hdr, 0, sizeof(struct iscsi_data));
+ hdr->ttt = r2t->ttt;
+ hdr->datasn = cpu_to_be32(r2t->datasn);
+ r2t->datasn++;
+ hdr->opcode = ISCSI_OP_SCSI_DATA_OUT;
+ hdr->lun = task->lun;
+ hdr->itt = task->hdr_itt;
+ hdr->exp_statsn = r2t->exp_statsn;
+ hdr->offset = cpu_to_be32(r2t->data_offset + r2t->sent);
+ if (left > conn->max_xmit_dlength) {
+ hton24(hdr->dlength, conn->max_xmit_dlength);
+ r2t->data_count = conn->max_xmit_dlength;
+ hdr->flags = 0;
+ } else {
+ hton24(hdr->dlength, left);
+ r2t->data_count = left;
+ hdr->flags = ISCSI_FLAG_CMD_FINAL;
+ }
+ conn->dataout_pdus_cnt++;
+}
+EXPORT_SYMBOL_GPL(iscsi_prep_data_out_pdu);
+
+static int iscsi_add_hdr(struct iscsi_task *task, unsigned len)
+{
+ unsigned exp_len = task->hdr_len + len;
+
+ if (exp_len > task->hdr_max) {
+ WARN_ON(1);
+ return -EINVAL;
+ }
+
+ WARN_ON(len & (ISCSI_PAD_LEN - 1)); /* caller must pad the AHS */
+ task->hdr_len = exp_len;
+ return 0;
+}
+
+/*
+ * make an extended cdb AHS
+ */
+static int iscsi_prep_ecdb_ahs(struct iscsi_task *task)
+{
+ struct scsi_cmnd *cmd = task->sc;
+ unsigned rlen, pad_len;
+ unsigned short ahslength;
+ struct iscsi_ecdb_ahdr *ecdb_ahdr;
+ int rc;
+
+ ecdb_ahdr = iscsi_next_hdr(task);
+ rlen = cmd->cmd_len - ISCSI_CDB_SIZE;
+
+ BUG_ON(rlen > sizeof(ecdb_ahdr->ecdb));
+ ahslength = rlen + sizeof(ecdb_ahdr->reserved);
+
+ pad_len = iscsi_padding(rlen);
+
+ rc = iscsi_add_hdr(task, sizeof(ecdb_ahdr->ahslength) +
+ sizeof(ecdb_ahdr->ahstype) + ahslength + pad_len);
+ if (rc)
+ return rc;
+
+ if (pad_len)
+ memset(&ecdb_ahdr->ecdb[rlen], 0, pad_len);
+
+ ecdb_ahdr->ahslength = cpu_to_be16(ahslength);
+ ecdb_ahdr->ahstype = ISCSI_AHSTYPE_CDB;
+ ecdb_ahdr->reserved = 0;
+ memcpy(ecdb_ahdr->ecdb, cmd->cmnd + ISCSI_CDB_SIZE, rlen);
+
+ ISCSI_DBG_SESSION(task->conn->session,
+ "iscsi_prep_ecdb_ahs: varlen_cdb_len %d "
+ "rlen %d pad_len %d ahs_length %d iscsi_headers_size "
+ "%u\n", cmd->cmd_len, rlen, pad_len, ahslength,
+ task->hdr_len);
+ return 0;
+}
+
+static int iscsi_prep_bidi_ahs(struct iscsi_task *task)
+{
+ struct scsi_cmnd *sc = task->sc;
+ struct iscsi_rlength_ahdr *rlen_ahdr;
+ int rc;
+
+ rlen_ahdr = iscsi_next_hdr(task);
+ rc = iscsi_add_hdr(task, sizeof(*rlen_ahdr));
+ if (rc)
+ return rc;
+
+ rlen_ahdr->ahslength =
+ cpu_to_be16(sizeof(rlen_ahdr->read_length) +
+ sizeof(rlen_ahdr->reserved));
+ rlen_ahdr->ahstype = ISCSI_AHSTYPE_RLENGTH;
+ rlen_ahdr->reserved = 0;
+ rlen_ahdr->read_length = cpu_to_be32(scsi_in(sc)->length);
+
+ ISCSI_DBG_SESSION(task->conn->session,
+ "bidi-in rlen_ahdr->read_length(%d) "
+ "rlen_ahdr->ahslength(%d)\n",
+ be32_to_cpu(rlen_ahdr->read_length),
+ be16_to_cpu(rlen_ahdr->ahslength));
+ return 0;
+}
+
+/**
+ * iscsi_check_tmf_restrictions - check if a task is affected by TMF
+ * @task: iscsi task
+ * @opcode: opcode to check for
+ *
+ * During TMF a task has to be checked if it's affected.
+ * All unrelated I/O can be passed through, but I/O to the
+ * affected LUN should be restricted.
+ * If 'fast_abort' is set we won't be sending any I/O to the
+ * affected LUN.
+ * Otherwise the target is waiting for all TTTs to be completed,
+ * so we have to send all outstanding Data-Out PDUs to the target.
+ */
+static int iscsi_check_tmf_restrictions(struct iscsi_task *task, int opcode)
+{
+ struct iscsi_conn *conn = task->conn;
+ struct iscsi_tm *tmf = &conn->tmhdr;
+ u64 hdr_lun;
+
+ if (conn->tmf_state == TMF_INITIAL)
+ return 0;
+
+ if ((tmf->opcode & ISCSI_OPCODE_MASK) != ISCSI_OP_SCSI_TMFUNC)
+ return 0;
+
+ switch (ISCSI_TM_FUNC_VALUE(tmf)) {
+ case ISCSI_TM_FUNC_LOGICAL_UNIT_RESET:
+ /*
+ * Allow PDUs for unrelated LUNs
+ */
+ hdr_lun = scsilun_to_int(&tmf->lun);
+ if (hdr_lun != task->sc->device->lun)
+ return 0;
+ /* fall through */
+ case ISCSI_TM_FUNC_TARGET_WARM_RESET:
+ /*
+ * Fail all SCSI cmd PDUs
+ */
+ if (opcode != ISCSI_OP_SCSI_DATA_OUT) {
+ iscsi_conn_printk(KERN_INFO, conn,
+ "task [op %x/%x itt "
+ "0x%x/0x%x] "
+ "rejected.\n",
+ task->hdr->opcode, opcode,
+ task->itt, task->hdr_itt);
+ return -EACCES;
+ }
+ /*
+ * And also all data-out PDUs in response to R2T
+ * if fast_abort is set.
+ */
+ if (conn->session->fast_abort) {
+ iscsi_conn_printk(KERN_INFO, conn,
+ "task [op %x/%x itt "
+ "0x%x/0x%x] fast abort.\n",
+ task->hdr->opcode, opcode,
+ task->itt, task->hdr_itt);
+ return -EACCES;
+ }
+ break;
+ case ISCSI_TM_FUNC_ABORT_TASK:
+ /*
+ * the caller has already checked if the task
+ * they want to abort was in the pending queue so if
+ * we are here the cmd pdu has gone out already, and
+ * we will only hit this for data-outs
+ */
+ if (opcode == ISCSI_OP_SCSI_DATA_OUT &&
+ task->hdr_itt == tmf->rtt) {
+ ISCSI_DBG_SESSION(conn->session,
+ "Preventing task %x/%x from sending "
+ "data-out due to abort task in "
+ "progress\n", task->itt,
+ task->hdr_itt);
+ return -EACCES;
+ }
+ break;
+ }
+
+ return 0;
+}
+
+/**
+ * iscsi_prep_scsi_cmd_pdu - prep iscsi scsi cmd pdu
+ * @task: iscsi task
+ *
+ * Prep basic iSCSI PDU fields for a scsi cmd pdu. The LLD should set
+ * fields like dlength or final based on how much data it sends
+ */
+static int iscsi_prep_scsi_cmd_pdu(struct iscsi_task *task)
+{
+ struct iscsi_conn *conn = task->conn;
+ struct iscsi_session *session = conn->session;
+ struct scsi_cmnd *sc = task->sc;
+ struct iscsi_scsi_req *hdr;
+ unsigned hdrlength, cmd_len, transfer_length;
+ itt_t itt;
+ int rc;
+
+ rc = iscsi_check_tmf_restrictions(task, ISCSI_OP_SCSI_CMD);
+ if (rc)
+ return rc;
+
+ if (conn->session->tt->alloc_pdu) {
+ rc = conn->session->tt->alloc_pdu(task, ISCSI_OP_SCSI_CMD);
+ if (rc)
+ return rc;
+ }
+ hdr = (struct iscsi_scsi_req *)task->hdr;
+ itt = hdr->itt;
+ memset(hdr, 0, sizeof(*hdr));
+
+ if (session->tt->parse_pdu_itt)
+ hdr->itt = task->hdr_itt = itt;
+ else
+ hdr->itt = task->hdr_itt = build_itt(task->itt,
+ task->conn->session->age);
+ task->hdr_len = 0;
+ rc = iscsi_add_hdr(task, sizeof(*hdr));
+ if (rc)
+ return rc;
+ hdr->opcode = ISCSI_OP_SCSI_CMD;
+ hdr->flags = ISCSI_ATTR_SIMPLE;
+ int_to_scsilun(sc->device->lun, &hdr->lun);
+ task->lun = hdr->lun;
+ hdr->exp_statsn = cpu_to_be32(conn->exp_statsn);
+ cmd_len = sc->cmd_len;
+ if (cmd_len < ISCSI_CDB_SIZE)
+ memset(&hdr->cdb[cmd_len], 0, ISCSI_CDB_SIZE - cmd_len);
+ else if (cmd_len > ISCSI_CDB_SIZE) {
+ rc = iscsi_prep_ecdb_ahs(task);
+ if (rc)
+ return rc;
+ cmd_len = ISCSI_CDB_SIZE;
+ }
+ memcpy(hdr->cdb, sc->cmnd, cmd_len);
+
+ task->imm_count = 0;
+ if (scsi_bidi_cmnd(sc)) {
+ hdr->flags |= ISCSI_FLAG_CMD_READ;
+ rc = iscsi_prep_bidi_ahs(task);
+ if (rc)
+ return rc;
+ }
+
+ if (scsi_get_prot_op(sc) != SCSI_PROT_NORMAL)
+ task->protected = true;
+
+ transfer_length = scsi_transfer_length(sc);
+ hdr->data_length = cpu_to_be32(transfer_length);
+ if (sc->sc_data_direction == DMA_TO_DEVICE) {
+ struct iscsi_r2t_info *r2t = &task->unsol_r2t;
+
+ hdr->flags |= ISCSI_FLAG_CMD_WRITE;
+ /*
+ * Write counters:
+ *
+ * imm_count bytes to be sent right after
+ * SCSI PDU Header
+ *
+ * unsol_count bytes(as Data-Out) to be sent
+ * without R2T ack right after
+ * immediate data
+ *
+ * r2t data_length bytes to be sent via R2T ack's
+ *
+ * pad_count bytes to be sent as zero-padding
+ */
+ memset(r2t, 0, sizeof(*r2t));
+
+ if (session->imm_data_en) {
+ if (transfer_length >= session->first_burst)
+ task->imm_count = min(session->first_burst,
+ conn->max_xmit_dlength);
+ else
+ task->imm_count = min(transfer_length,
+ conn->max_xmit_dlength);
+ hton24(hdr->dlength, task->imm_count);
+ } else
+ zero_data(hdr->dlength);
+
+ if (!session->initial_r2t_en) {
+ r2t->data_length = min(session->first_burst,
+ transfer_length) -
+ task->imm_count;
+ r2t->data_offset = task->imm_count;
+ r2t->ttt = cpu_to_be32(ISCSI_RESERVED_TAG);
+ r2t->exp_statsn = cpu_to_be32(conn->exp_statsn);
+ }
+
+ if (!task->unsol_r2t.data_length)
+ /* No unsolicit Data-Out's */
+ hdr->flags |= ISCSI_FLAG_CMD_FINAL;
+ } else {
+ hdr->flags |= ISCSI_FLAG_CMD_FINAL;
+ zero_data(hdr->dlength);
+
+ if (sc->sc_data_direction == DMA_FROM_DEVICE)
+ hdr->flags |= ISCSI_FLAG_CMD_READ;
+ }
+
+ /* calculate size of additional header segments (AHSs) */
+ hdrlength = task->hdr_len - sizeof(*hdr);
+
+ WARN_ON(hdrlength & (ISCSI_PAD_LEN-1));
+ hdrlength /= ISCSI_PAD_LEN;
+
+ WARN_ON(hdrlength >= 256);
+ hdr->hlength = hdrlength & 0xFF;
+ hdr->cmdsn = task->cmdsn = cpu_to_be32(session->cmdsn);
+
+ if (session->tt->init_task && session->tt->init_task(task))
+ return -EIO;
+
+ task->state = ISCSI_TASK_RUNNING;
+ session->cmdsn++;
+
+ conn->scsicmd_pdus_cnt++;
+ ISCSI_DBG_SESSION(session, "iscsi prep [%s cid %d sc %p cdb 0x%x "
+ "itt 0x%x len %d bidi_len %d cmdsn %d win %d]\n",
+ scsi_bidi_cmnd(sc) ? "bidirectional" :
+ sc->sc_data_direction == DMA_TO_DEVICE ?
+ "write" : "read", conn->id, sc, sc->cmnd[0],
+ task->itt, transfer_length,
+ scsi_bidi_cmnd(sc) ? scsi_in(sc)->length : 0,
+ session->cmdsn,
+ session->max_cmdsn - session->exp_cmdsn + 1);
+ return 0;
+}
+
+/**
+ * iscsi_free_task - free a task
+ * @task: iscsi cmd task
+ *
+ * Must be called with session back_lock.
+ * This function returns the scsi command to scsi-ml or cleans
+ * up mgmt tasks then returns the task to the pool.
+ */
+static void iscsi_free_task(struct iscsi_task *task)
+{
+ struct iscsi_conn *conn = task->conn;
+ struct iscsi_session *session = conn->session;
+ struct scsi_cmnd *sc = task->sc;
+ int oldstate = task->state;
+
+ ISCSI_DBG_SESSION(session, "freeing task itt 0x%x state %d sc %p\n",
+ task->itt, task->state, task->sc);
+
+ session->tt->cleanup_task(task);
+ task->state = ISCSI_TASK_FREE;
+ task->sc = NULL;
+ /*
+ * login task is preallocated so do not free
+ */
+ if (conn->login_task == task)
+ return;
+
+ kfifo_in(&session->cmdpool.queue, (void*)&task, sizeof(void*));
+
+ if (sc) {
+ /* SCSI eh reuses commands to verify us */
+ sc->SCp.ptr = NULL;
+ /*
+ * queue command may call this to free the task, so
+ * it will decide how to return sc to scsi-ml.
+ */
+ if (oldstate != ISCSI_TASK_REQUEUE_SCSIQ)
+ sc->scsi_done(sc);
+ }
+}
+
+void __iscsi_get_task(struct iscsi_task *task)
+{
+ atomic_inc(&task->refcount);
+}
+EXPORT_SYMBOL_GPL(__iscsi_get_task);
+
+void __iscsi_put_task(struct iscsi_task *task)
+{
+ if (atomic_dec_and_test(&task->refcount))
+ iscsi_free_task(task);
+}
+EXPORT_SYMBOL_GPL(__iscsi_put_task);
+
+void iscsi_put_task(struct iscsi_task *task)
+{
+ struct iscsi_session *session = task->conn->session;
+
+ /* regular RX path uses back_lock */
+ spin_lock_bh(&session->back_lock);
+ __iscsi_put_task(task);
+ spin_unlock_bh(&session->back_lock);
+}
+EXPORT_SYMBOL_GPL(iscsi_put_task);
+
+/**
+ * iscsi_complete_task - finish a task
+ * @task: iscsi cmd task
+ * @state: state to complete task with
+ *
+ * Must be called with session back_lock.
+ */
+static void iscsi_complete_task(struct iscsi_task *task, int state)
+{
+ struct iscsi_conn *conn = task->conn;
+
+ ISCSI_DBG_SESSION(conn->session,
+ "complete task itt 0x%x state %d sc %p\n",
+ task->itt, task->state, task->sc);
+ if (task->state == ISCSI_TASK_COMPLETED ||
+ task->state == ISCSI_TASK_ABRT_TMF ||
+ task->state == ISCSI_TASK_ABRT_SESS_RECOV ||
+ task->state == ISCSI_TASK_REQUEUE_SCSIQ)
+ return;
+ WARN_ON_ONCE(task->state == ISCSI_TASK_FREE);
+ task->state = state;
+
+ if (!list_empty(&task->running))
+ list_del_init(&task->running);
+
+ if (conn->task == task)
+ conn->task = NULL;
+
+ if (conn->ping_task == task)
+ conn->ping_task = NULL;
+
+ /* release get from queueing */
+ __iscsi_put_task(task);
+}
+
+/**
+ * iscsi_complete_scsi_task - finish scsi task normally
+ * @task: iscsi task for scsi cmd
+ * @exp_cmdsn: expected cmd sn in cpu format
+ * @max_cmdsn: max cmd sn in cpu format
+ *
+ * This is used when drivers do not need or cannot perform
+ * lower level pdu processing.
+ *
+ * Called with session back_lock
+ */
+void iscsi_complete_scsi_task(struct iscsi_task *task,
+ uint32_t exp_cmdsn, uint32_t max_cmdsn)
+{
+ struct iscsi_conn *conn = task->conn;
+
+ ISCSI_DBG_SESSION(conn->session, "[itt 0x%x]\n", task->itt);
+
+ conn->last_recv = jiffies;
+ __iscsi_update_cmdsn(conn->session, exp_cmdsn, max_cmdsn);
+ iscsi_complete_task(task, ISCSI_TASK_COMPLETED);
+}
+EXPORT_SYMBOL_GPL(iscsi_complete_scsi_task);
+
+
+/*
+ * session back_lock must be held and if not called for a task that is
+ * still pending or from the xmit thread, then xmit thread must
+ * be suspended.
+ */
+static void fail_scsi_task(struct iscsi_task *task, int err)
+{
+ struct iscsi_conn *conn = task->conn;
+ struct scsi_cmnd *sc;
+ int state;
+
+ /*
+ * if a command completes and we get a successful tmf response
+ * we will hit this because the scsi eh abort code does not take
+ * a ref to the task.
+ */
+ sc = task->sc;
+ if (!sc)
+ return;
+
+ if (task->state == ISCSI_TASK_PENDING) {
+ /*
+ * cmd never made it to the xmit thread, so we should not count
+ * the cmd in the sequencing
+ */
+ conn->session->queued_cmdsn--;
+ /* it was never sent so just complete like normal */
+ state = ISCSI_TASK_COMPLETED;
+ } else if (err == DID_TRANSPORT_DISRUPTED)
+ state = ISCSI_TASK_ABRT_SESS_RECOV;
+ else
+ state = ISCSI_TASK_ABRT_TMF;
+
+ sc->result = err << 16;
+ if (!scsi_bidi_cmnd(sc))
+ scsi_set_resid(sc, scsi_bufflen(sc));
+ else {
+ scsi_out(sc)->resid = scsi_out(sc)->length;
+ scsi_in(sc)->resid = scsi_in(sc)->length;
+ }
+
+ /* regular RX path uses back_lock */
+ spin_lock_bh(&conn->session->back_lock);
+ iscsi_complete_task(task, state);
+ spin_unlock_bh(&conn->session->back_lock);
+}
+
+static int iscsi_prep_mgmt_task(struct iscsi_conn *conn,
+ struct iscsi_task *task)
+{
+ struct iscsi_session *session = conn->session;
+ struct iscsi_hdr *hdr = task->hdr;
+ struct iscsi_nopout *nop = (struct iscsi_nopout *)hdr;
+ uint8_t opcode = hdr->opcode & ISCSI_OPCODE_MASK;
+
+ if (conn->session->state == ISCSI_STATE_LOGGING_OUT)
+ return -ENOTCONN;
+
+ if (opcode != ISCSI_OP_LOGIN && opcode != ISCSI_OP_TEXT)
+ nop->exp_statsn = cpu_to_be32(conn->exp_statsn);
+ /*
+ * pre-format CmdSN for outgoing PDU.
+ */
+ nop->cmdsn = cpu_to_be32(session->cmdsn);
+ if (hdr->itt != RESERVED_ITT) {
+ /*
+ * TODO: We always use immediate for normal session pdus.
+ * If we start to send tmfs or nops as non-immediate then
+ * we should start checking the cmdsn numbers for mgmt tasks.
+ *
+ * During discovery sessions iscsid sends TEXT as non immediate,
+ * but we always only send one PDU at a time.
+ */
+ if (conn->c_stage == ISCSI_CONN_STARTED &&
+ !(hdr->opcode & ISCSI_OP_IMMEDIATE)) {
+ session->queued_cmdsn++;
+ session->cmdsn++;
+ }
+ }
+
+ if (session->tt->init_task && session->tt->init_task(task))
+ return -EIO;
+
+ if ((hdr->opcode & ISCSI_OPCODE_MASK) == ISCSI_OP_LOGOUT)
+ session->state = ISCSI_STATE_LOGGING_OUT;
+
+ task->state = ISCSI_TASK_RUNNING;
+ ISCSI_DBG_SESSION(session, "mgmtpdu [op 0x%x hdr->itt 0x%x "
+ "datalen %d]\n", hdr->opcode & ISCSI_OPCODE_MASK,
+ hdr->itt, task->data_count);
+ return 0;
+}
+
+static struct iscsi_task *
+__iscsi_conn_send_pdu(struct iscsi_conn *conn, struct iscsi_hdr *hdr,
+ char *data, uint32_t data_size)
+{
+ struct iscsi_session *session = conn->session;
+ struct iscsi_host *ihost = shost_priv(session->host);
+ uint8_t opcode = hdr->opcode & ISCSI_OPCODE_MASK;
+ struct iscsi_task *task;
+ itt_t itt;
+
+ if (session->state == ISCSI_STATE_TERMINATE)
+ return NULL;
+
+ if (opcode == ISCSI_OP_LOGIN || opcode == ISCSI_OP_TEXT) {
+ /*
+ * Login and Text are sent serially, in
+ * request-followed-by-response sequence.
+ * Same task can be used. Same ITT must be used.
+ * Note that login_task is preallocated at conn_create().
+ */
+ if (conn->login_task->state != ISCSI_TASK_FREE) {
+ iscsi_conn_printk(KERN_ERR, conn, "Login/Text in "
+ "progress. Cannot start new task.\n");
+ return NULL;
+ }
+
+ if (data_size > ISCSI_DEF_MAX_RECV_SEG_LEN) {
+ iscsi_conn_printk(KERN_ERR, conn, "Invalid buffer len of %u for login task. Max len is %u\n", data_size, ISCSI_DEF_MAX_RECV_SEG_LEN);
+ return NULL;
+ }
+
+ task = conn->login_task;
+ } else {
+ if (session->state != ISCSI_STATE_LOGGED_IN)
+ return NULL;
+
+ if (data_size != 0) {
+ iscsi_conn_printk(KERN_ERR, conn, "Can not send data buffer of len %u for op 0x%x\n", data_size, opcode);
+ return NULL;
+ }
+
+ BUG_ON(conn->c_stage == ISCSI_CONN_INITIAL_STAGE);
+ BUG_ON(conn->c_stage == ISCSI_CONN_STOPPED);
+
+ if (!kfifo_out(&session->cmdpool.queue,
+ (void*)&task, sizeof(void*)))
+ return NULL;
+ }
+ /*
+ * released in complete pdu for task we expect a response for, and
+ * released by the lld when it has transmitted the task for
+ * pdus we do not expect a response for.
+ */
+ atomic_set(&task->refcount, 1);
+ task->conn = conn;
+ task->sc = NULL;
+ INIT_LIST_HEAD(&task->running);
+ task->state = ISCSI_TASK_PENDING;
+
+ if (data_size) {
+ memcpy(task->data, data, data_size);
+ task->data_count = data_size;
+ } else
+ task->data_count = 0;
+
+ if (conn->session->tt->alloc_pdu) {
+ if (conn->session->tt->alloc_pdu(task, hdr->opcode)) {
+ iscsi_conn_printk(KERN_ERR, conn, "Could not allocate "
+ "pdu for mgmt task.\n");
+ goto free_task;
+ }
+ }
+
+ itt = task->hdr->itt;
+ task->hdr_len = sizeof(struct iscsi_hdr);
+ memcpy(task->hdr, hdr, sizeof(struct iscsi_hdr));
+
+ if (hdr->itt != RESERVED_ITT) {
+ if (session->tt->parse_pdu_itt)
+ task->hdr->itt = itt;
+ else
+ task->hdr->itt = build_itt(task->itt,
+ task->conn->session->age);
+ }
+
+ if (!ihost->workq) {
+ if (iscsi_prep_mgmt_task(conn, task))
+ goto free_task;
+
+ if (session->tt->xmit_task(task))
+ goto free_task;
+ } else {
+ list_add_tail(&task->running, &conn->mgmtqueue);
+ iscsi_conn_queue_work(conn);
+ }
+
+ return task;
+
+free_task:
+ /* regular RX path uses back_lock */
+ spin_lock_bh(&session->back_lock);
+ __iscsi_put_task(task);
+ spin_unlock_bh(&session->back_lock);
+ return NULL;
+}
+
+int iscsi_conn_send_pdu(struct iscsi_cls_conn *cls_conn, struct iscsi_hdr *hdr,
+ char *data, uint32_t data_size)
+{
+ struct iscsi_conn *conn = cls_conn->dd_data;
+ struct iscsi_session *session = conn->session;
+ int err = 0;
+
+ spin_lock_bh(&session->frwd_lock);
+ if (!__iscsi_conn_send_pdu(conn, hdr, data, data_size))
+ err = -EPERM;
+ spin_unlock_bh(&session->frwd_lock);
+ return err;
+}
+EXPORT_SYMBOL_GPL(iscsi_conn_send_pdu);
+
+/**
+ * iscsi_cmd_rsp - SCSI Command Response processing
+ * @conn: iscsi connection
+ * @hdr: iscsi header
+ * @task: scsi command task
+ * @data: cmd data buffer
+ * @datalen: len of buffer
+ *
+ * iscsi_cmd_rsp sets up the scsi_cmnd fields based on the PDU and
+ * then completes the command and task.
+ **/
+static void iscsi_scsi_cmd_rsp(struct iscsi_conn *conn, struct iscsi_hdr *hdr,
+ struct iscsi_task *task, char *data,
+ int datalen)
+{
+ struct iscsi_scsi_rsp *rhdr = (struct iscsi_scsi_rsp *)hdr;
+ struct iscsi_session *session = conn->session;
+ struct scsi_cmnd *sc = task->sc;
+
+ iscsi_update_cmdsn(session, (struct iscsi_nopin*)rhdr);
+ conn->exp_statsn = be32_to_cpu(rhdr->statsn) + 1;
+
+ sc->result = (DID_OK << 16) | rhdr->cmd_status;
+
+ if (task->protected) {
+ sector_t sector;
+ u8 ascq;
+
+ /**
+ * Transports that didn't implement check_protection
+ * callback but still published T10-PI support to scsi-mid
+ * deserve this BUG_ON.
+ **/
+ BUG_ON(!session->tt->check_protection);
+
+ ascq = session->tt->check_protection(task, &sector);
+ if (ascq) {
+ sc->result = DRIVER_SENSE << 24 |
+ SAM_STAT_CHECK_CONDITION;
+ scsi_build_sense_buffer(1, sc->sense_buffer,
+ ILLEGAL_REQUEST, 0x10, ascq);
+ sc->sense_buffer[7] = 0xc; /* Additional sense length */
+ sc->sense_buffer[8] = 0; /* Information desc type */
+ sc->sense_buffer[9] = 0xa; /* Additional desc length */
+ sc->sense_buffer[10] = 0x80; /* Validity bit */
+
+ put_unaligned_be64(sector, &sc->sense_buffer[12]);
+ goto out;
+ }
+ }
+
+ if (rhdr->response != ISCSI_STATUS_CMD_COMPLETED) {
+ sc->result = DID_ERROR << 16;
+ goto out;
+ }
+
+ if (rhdr->cmd_status == SAM_STAT_CHECK_CONDITION) {
+ uint16_t senselen;
+
+ if (datalen < 2) {
+invalid_datalen:
+ iscsi_conn_printk(KERN_ERR, conn,
+ "Got CHECK_CONDITION but invalid data "
+ "buffer size of %d\n", datalen);
+ sc->result = DID_BAD_TARGET << 16;
+ goto out;
+ }
+
+ senselen = get_unaligned_be16(data);
+ if (datalen < senselen)
+ goto invalid_datalen;
+
+ memcpy(sc->sense_buffer, data + 2,
+ min_t(uint16_t, senselen, SCSI_SENSE_BUFFERSIZE));
+ ISCSI_DBG_SESSION(session, "copied %d bytes of sense\n",
+ min_t(uint16_t, senselen,
+ SCSI_SENSE_BUFFERSIZE));
+ }
+
+ if (rhdr->flags & (ISCSI_FLAG_CMD_BIDI_UNDERFLOW |
+ ISCSI_FLAG_CMD_BIDI_OVERFLOW)) {
+ int res_count = be32_to_cpu(rhdr->bi_residual_count);
+
+ if (scsi_bidi_cmnd(sc) && res_count > 0 &&
+ (rhdr->flags & ISCSI_FLAG_CMD_BIDI_OVERFLOW ||
+ res_count <= scsi_in(sc)->length))
+ scsi_in(sc)->resid = res_count;
+ else
+ sc->result = (DID_BAD_TARGET << 16) | rhdr->cmd_status;
+ }
+
+ if (rhdr->flags & (ISCSI_FLAG_CMD_UNDERFLOW |
+ ISCSI_FLAG_CMD_OVERFLOW)) {
+ int res_count = be32_to_cpu(rhdr->residual_count);
+
+ if (res_count > 0 &&
+ (rhdr->flags & ISCSI_FLAG_CMD_OVERFLOW ||
+ res_count <= scsi_bufflen(sc)))
+ /* write side for bidi or uni-io set_resid */
+ scsi_set_resid(sc, res_count);
+ else
+ sc->result = (DID_BAD_TARGET << 16) | rhdr->cmd_status;
+ }
+out:
+ ISCSI_DBG_SESSION(session, "cmd rsp done [sc %p res %d itt 0x%x]\n",
+ sc, sc->result, task->itt);
+ conn->scsirsp_pdus_cnt++;
+ iscsi_complete_task(task, ISCSI_TASK_COMPLETED);
+}
+
+/**
+ * iscsi_data_in_rsp - SCSI Data-In Response processing
+ * @conn: iscsi connection
+ * @hdr: iscsi pdu
+ * @task: scsi command task
+ **/
+static void
+iscsi_data_in_rsp(struct iscsi_conn *conn, struct iscsi_hdr *hdr,
+ struct iscsi_task *task)
+{
+ struct iscsi_data_rsp *rhdr = (struct iscsi_data_rsp *)hdr;
+ struct scsi_cmnd *sc = task->sc;
+
+ if (!(rhdr->flags & ISCSI_FLAG_DATA_STATUS))
+ return;
+
+ iscsi_update_cmdsn(conn->session, (struct iscsi_nopin *)hdr);
+ sc->result = (DID_OK << 16) | rhdr->cmd_status;
+ conn->exp_statsn = be32_to_cpu(rhdr->statsn) + 1;
+ if (rhdr->flags & (ISCSI_FLAG_DATA_UNDERFLOW |
+ ISCSI_FLAG_DATA_OVERFLOW)) {
+ int res_count = be32_to_cpu(rhdr->residual_count);
+
+ if (res_count > 0 &&
+ (rhdr->flags & ISCSI_FLAG_CMD_OVERFLOW ||
+ res_count <= scsi_in(sc)->length))
+ scsi_in(sc)->resid = res_count;
+ else
+ sc->result = (DID_BAD_TARGET << 16) | rhdr->cmd_status;
+ }
+
+ ISCSI_DBG_SESSION(conn->session, "data in with status done "
+ "[sc %p res %d itt 0x%x]\n",
+ sc, sc->result, task->itt);
+ conn->scsirsp_pdus_cnt++;
+ iscsi_complete_task(task, ISCSI_TASK_COMPLETED);
+}
+
+static void iscsi_tmf_rsp(struct iscsi_conn *conn, struct iscsi_hdr *hdr)
+{
+ struct iscsi_tm_rsp *tmf = (struct iscsi_tm_rsp *)hdr;
+
+ conn->exp_statsn = be32_to_cpu(hdr->statsn) + 1;
+ conn->tmfrsp_pdus_cnt++;
+
+ if (conn->tmf_state != TMF_QUEUED)
+ return;
+
+ if (tmf->response == ISCSI_TMF_RSP_COMPLETE)
+ conn->tmf_state = TMF_SUCCESS;
+ else if (tmf->response == ISCSI_TMF_RSP_NO_TASK)
+ conn->tmf_state = TMF_NOT_FOUND;
+ else
+ conn->tmf_state = TMF_FAILED;
+ wake_up(&conn->ehwait);
+}
+
+static void iscsi_send_nopout(struct iscsi_conn *conn, struct iscsi_nopin *rhdr)
+{
+ struct iscsi_nopout hdr;
+ struct iscsi_task *task;
+
+ if (!rhdr && conn->ping_task)
+ return;
+
+ memset(&hdr, 0, sizeof(struct iscsi_nopout));
+ hdr.opcode = ISCSI_OP_NOOP_OUT | ISCSI_OP_IMMEDIATE;
+ hdr.flags = ISCSI_FLAG_CMD_FINAL;
+
+ if (rhdr) {
+ hdr.lun = rhdr->lun;
+ hdr.ttt = rhdr->ttt;
+ hdr.itt = RESERVED_ITT;
+ } else
+ hdr.ttt = RESERVED_ITT;
+
+ task = __iscsi_conn_send_pdu(conn, (struct iscsi_hdr *)&hdr, NULL, 0);
+ if (!task)
+ iscsi_conn_printk(KERN_ERR, conn, "Could not send nopout\n");
+ else if (!rhdr) {
+ /* only track our nops */
+ conn->ping_task = task;
+ conn->last_ping = jiffies;
+ }
+}
+
+static int iscsi_nop_out_rsp(struct iscsi_task *task,
+ struct iscsi_nopin *nop, char *data, int datalen)
+{
+ struct iscsi_conn *conn = task->conn;
+ int rc = 0;
+
+ if (conn->ping_task != task) {
+ /*
+ * If this is not in response to one of our
+ * nops then it must be from userspace.
+ */
+ if (iscsi_recv_pdu(conn->cls_conn, (struct iscsi_hdr *)nop,
+ data, datalen))
+ rc = ISCSI_ERR_CONN_FAILED;
+ } else
+ mod_timer(&conn->transport_timer, jiffies + conn->recv_timeout);
+ iscsi_complete_task(task, ISCSI_TASK_COMPLETED);
+ return rc;
+}
+
+static int iscsi_handle_reject(struct iscsi_conn *conn, struct iscsi_hdr *hdr,
+ char *data, int datalen)
+{
+ struct iscsi_reject *reject = (struct iscsi_reject *)hdr;
+ struct iscsi_hdr rejected_pdu;
+ int opcode, rc = 0;
+
+ conn->exp_statsn = be32_to_cpu(reject->statsn) + 1;
+
+ if (ntoh24(reject->dlength) > datalen ||
+ ntoh24(reject->dlength) < sizeof(struct iscsi_hdr)) {
+ iscsi_conn_printk(KERN_ERR, conn, "Cannot handle rejected "
+ "pdu. Invalid data length (pdu dlength "
+ "%u, datalen %d\n", ntoh24(reject->dlength),
+ datalen);
+ return ISCSI_ERR_PROTO;
+ }
+ memcpy(&rejected_pdu, data, sizeof(struct iscsi_hdr));
+ opcode = rejected_pdu.opcode & ISCSI_OPCODE_MASK;
+
+ switch (reject->reason) {
+ case ISCSI_REASON_DATA_DIGEST_ERROR:
+ iscsi_conn_printk(KERN_ERR, conn,
+ "pdu (op 0x%x itt 0x%x) rejected "
+ "due to DataDigest error.\n",
+ opcode, rejected_pdu.itt);
+ break;
+ case ISCSI_REASON_IMM_CMD_REJECT:
+ iscsi_conn_printk(KERN_ERR, conn,
+ "pdu (op 0x%x itt 0x%x) rejected. Too many "
+ "immediate commands.\n",
+ opcode, rejected_pdu.itt);
+ /*
+ * We only send one TMF at a time so if the target could not
+ * handle it, then it should get fixed (RFC mandates that
+ * a target can handle one immediate TMF per conn).
+ *
+ * For nops-outs, we could have sent more than one if
+ * the target is sending us lots of nop-ins
+ */
+ if (opcode != ISCSI_OP_NOOP_OUT)
+ return 0;
+
+ if (rejected_pdu.itt == cpu_to_be32(ISCSI_RESERVED_TAG)) {
+ /*
+ * nop-out in response to target's nop-out rejected.
+ * Just resend.
+ */
+ /* In RX path we are under back lock */
+ spin_unlock(&conn->session->back_lock);
+ spin_lock(&conn->session->frwd_lock);
+ iscsi_send_nopout(conn,
+ (struct iscsi_nopin*)&rejected_pdu);
+ spin_unlock(&conn->session->frwd_lock);
+ spin_lock(&conn->session->back_lock);
+ } else {
+ struct iscsi_task *task;
+ /*
+ * Our nop as ping got dropped. We know the target
+ * and transport are ok so just clean up
+ */
+ task = iscsi_itt_to_task(conn, rejected_pdu.itt);
+ if (!task) {
+ iscsi_conn_printk(KERN_ERR, conn,
+ "Invalid pdu reject. Could "
+ "not lookup rejected task.\n");
+ rc = ISCSI_ERR_BAD_ITT;
+ } else
+ rc = iscsi_nop_out_rsp(task,
+ (struct iscsi_nopin*)&rejected_pdu,
+ NULL, 0);
+ }
+ break;
+ default:
+ iscsi_conn_printk(KERN_ERR, conn,
+ "pdu (op 0x%x itt 0x%x) rejected. Reason "
+ "code 0x%x\n", rejected_pdu.opcode,
+ rejected_pdu.itt, reject->reason);
+ break;
+ }
+ return rc;
+}
+
+/**
+ * iscsi_itt_to_task - look up task by itt
+ * @conn: iscsi connection
+ * @itt: itt
+ *
+ * This should be used for mgmt tasks like login and nops, or if
+ * the LDD's itt space does not include the session age.
+ *
+ * The session back_lock must be held.
+ */
+struct iscsi_task *iscsi_itt_to_task(struct iscsi_conn *conn, itt_t itt)
+{
+ struct iscsi_session *session = conn->session;
+ int i;
+
+ if (itt == RESERVED_ITT)
+ return NULL;
+
+ if (session->tt->parse_pdu_itt)
+ session->tt->parse_pdu_itt(conn, itt, &i, NULL);
+ else
+ i = get_itt(itt);
+ if (i >= session->cmds_max)
+ return NULL;
+
+ return session->cmds[i];
+}
+EXPORT_SYMBOL_GPL(iscsi_itt_to_task);
+
+/**
+ * __iscsi_complete_pdu - complete pdu
+ * @conn: iscsi conn
+ * @hdr: iscsi header
+ * @data: data buffer
+ * @datalen: len of data buffer
+ *
+ * Completes pdu processing by freeing any resources allocated at
+ * queuecommand or send generic. session back_lock must be held and verify
+ * itt must have been called.
+ */
+int __iscsi_complete_pdu(struct iscsi_conn *conn, struct iscsi_hdr *hdr,
+ char *data, int datalen)
+{
+ struct iscsi_session *session = conn->session;
+ int opcode = hdr->opcode & ISCSI_OPCODE_MASK, rc = 0;
+ struct iscsi_task *task;
+ uint32_t itt;
+
+ conn->last_recv = jiffies;
+ rc = iscsi_verify_itt(conn, hdr->itt);
+ if (rc)
+ return rc;
+
+ if (hdr->itt != RESERVED_ITT)
+ itt = get_itt(hdr->itt);
+ else
+ itt = ~0U;
+
+ ISCSI_DBG_SESSION(session, "[op 0x%x cid %d itt 0x%x len %d]\n",
+ opcode, conn->id, itt, datalen);
+
+ if (itt == ~0U) {
+ iscsi_update_cmdsn(session, (struct iscsi_nopin*)hdr);
+
+ switch(opcode) {
+ case ISCSI_OP_NOOP_IN:
+ if (datalen) {
+ rc = ISCSI_ERR_PROTO;
+ break;
+ }
+
+ if (hdr->ttt == cpu_to_be32(ISCSI_RESERVED_TAG))
+ break;
+
+ /* In RX path we are under back lock */
+ spin_unlock(&session->back_lock);
+ spin_lock(&session->frwd_lock);
+ iscsi_send_nopout(conn, (struct iscsi_nopin*)hdr);
+ spin_unlock(&session->frwd_lock);
+ spin_lock(&session->back_lock);
+ break;
+ case ISCSI_OP_REJECT:
+ rc = iscsi_handle_reject(conn, hdr, data, datalen);
+ break;
+ case ISCSI_OP_ASYNC_EVENT:
+ conn->exp_statsn = be32_to_cpu(hdr->statsn) + 1;
+ if (iscsi_recv_pdu(conn->cls_conn, hdr, data, datalen))
+ rc = ISCSI_ERR_CONN_FAILED;
+ break;
+ default:
+ rc = ISCSI_ERR_BAD_OPCODE;
+ break;
+ }
+ goto out;
+ }
+
+ switch(opcode) {
+ case ISCSI_OP_SCSI_CMD_RSP:
+ case ISCSI_OP_SCSI_DATA_IN:
+ task = iscsi_itt_to_ctask(conn, hdr->itt);
+ if (!task)
+ return ISCSI_ERR_BAD_ITT;
+ task->last_xfer = jiffies;
+ break;
+ case ISCSI_OP_R2T:
+ /*
+ * LLD handles R2Ts if they need to.
+ */
+ return 0;
+ case ISCSI_OP_LOGOUT_RSP:
+ case ISCSI_OP_LOGIN_RSP:
+ case ISCSI_OP_TEXT_RSP:
+ case ISCSI_OP_SCSI_TMFUNC_RSP:
+ case ISCSI_OP_NOOP_IN:
+ task = iscsi_itt_to_task(conn, hdr->itt);
+ if (!task)
+ return ISCSI_ERR_BAD_ITT;
+ break;
+ default:
+ return ISCSI_ERR_BAD_OPCODE;
+ }
+
+ switch(opcode) {
+ case ISCSI_OP_SCSI_CMD_RSP:
+ iscsi_scsi_cmd_rsp(conn, hdr, task, data, datalen);
+ break;
+ case ISCSI_OP_SCSI_DATA_IN:
+ iscsi_data_in_rsp(conn, hdr, task);
+ break;
+ case ISCSI_OP_LOGOUT_RSP:
+ iscsi_update_cmdsn(session, (struct iscsi_nopin*)hdr);
+ if (datalen) {
+ rc = ISCSI_ERR_PROTO;
+ break;
+ }
+ conn->exp_statsn = be32_to_cpu(hdr->statsn) + 1;
+ goto recv_pdu;
+ case ISCSI_OP_LOGIN_RSP:
+ case ISCSI_OP_TEXT_RSP:
+ iscsi_update_cmdsn(session, (struct iscsi_nopin*)hdr);
+ /*
+ * login related PDU's exp_statsn is handled in
+ * userspace
+ */
+ goto recv_pdu;
+ case ISCSI_OP_SCSI_TMFUNC_RSP:
+ iscsi_update_cmdsn(session, (struct iscsi_nopin*)hdr);
+ if (datalen) {
+ rc = ISCSI_ERR_PROTO;
+ break;
+ }
+
+ iscsi_tmf_rsp(conn, hdr);
+ iscsi_complete_task(task, ISCSI_TASK_COMPLETED);
+ break;
+ case ISCSI_OP_NOOP_IN:
+ iscsi_update_cmdsn(session, (struct iscsi_nopin*)hdr);
+ if (hdr->ttt != cpu_to_be32(ISCSI_RESERVED_TAG) || datalen) {
+ rc = ISCSI_ERR_PROTO;
+ break;
+ }
+ conn->exp_statsn = be32_to_cpu(hdr->statsn) + 1;
+
+ rc = iscsi_nop_out_rsp(task, (struct iscsi_nopin*)hdr,
+ data, datalen);
+ break;
+ default:
+ rc = ISCSI_ERR_BAD_OPCODE;
+ break;
+ }
+
+out:
+ return rc;
+recv_pdu:
+ if (iscsi_recv_pdu(conn->cls_conn, hdr, data, datalen))
+ rc = ISCSI_ERR_CONN_FAILED;
+ iscsi_complete_task(task, ISCSI_TASK_COMPLETED);
+ return rc;
+}
+EXPORT_SYMBOL_GPL(__iscsi_complete_pdu);
+
+int iscsi_complete_pdu(struct iscsi_conn *conn, struct iscsi_hdr *hdr,
+ char *data, int datalen)
+{
+ int rc;
+
+ spin_lock(&conn->session->back_lock);
+ rc = __iscsi_complete_pdu(conn, hdr, data, datalen);
+ spin_unlock(&conn->session->back_lock);
+ return rc;
+}
+EXPORT_SYMBOL_GPL(iscsi_complete_pdu);
+
+int iscsi_verify_itt(struct iscsi_conn *conn, itt_t itt)
+{
+ struct iscsi_session *session = conn->session;
+ int age = 0, i = 0;
+
+ if (itt == RESERVED_ITT)
+ return 0;
+
+ if (session->tt->parse_pdu_itt)
+ session->tt->parse_pdu_itt(conn, itt, &i, &age);
+ else {
+ i = get_itt(itt);
+ age = ((__force u32)itt >> ISCSI_AGE_SHIFT) & ISCSI_AGE_MASK;
+ }
+
+ if (age != session->age) {
+ iscsi_conn_printk(KERN_ERR, conn,
+ "received itt %x expected session age (%x)\n",
+ (__force u32)itt, session->age);
+ return ISCSI_ERR_BAD_ITT;
+ }
+
+ if (i >= session->cmds_max) {
+ iscsi_conn_printk(KERN_ERR, conn,
+ "received invalid itt index %u (max cmds "
+ "%u.\n", i, session->cmds_max);
+ return ISCSI_ERR_BAD_ITT;
+ }
+ return 0;
+}
+EXPORT_SYMBOL_GPL(iscsi_verify_itt);
+
+/**
+ * iscsi_itt_to_ctask - look up ctask by itt
+ * @conn: iscsi connection
+ * @itt: itt
+ *
+ * This should be used for cmd tasks.
+ *
+ * The session back_lock must be held.
+ */
+struct iscsi_task *iscsi_itt_to_ctask(struct iscsi_conn *conn, itt_t itt)
+{
+ struct iscsi_task *task;
+
+ if (iscsi_verify_itt(conn, itt))
+ return NULL;
+
+ task = iscsi_itt_to_task(conn, itt);
+ if (!task || !task->sc)
+ return NULL;
+
+ if (task->sc->SCp.phase != conn->session->age) {
+ iscsi_session_printk(KERN_ERR, conn->session,
+ "task's session age %d, expected %d\n",
+ task->sc->SCp.phase, conn->session->age);
+ return NULL;
+ }
+
+ return task;
+}
+EXPORT_SYMBOL_GPL(iscsi_itt_to_ctask);
+
+void iscsi_session_failure(struct iscsi_session *session,
+ enum iscsi_err err)
+{
+ struct iscsi_conn *conn;
+ struct device *dev;
+
+ spin_lock_bh(&session->frwd_lock);
+ conn = session->leadconn;
+ if (session->state == ISCSI_STATE_TERMINATE || !conn) {
+ spin_unlock_bh(&session->frwd_lock);
+ return;
+ }
+
+ dev = get_device(&conn->cls_conn->dev);
+ spin_unlock_bh(&session->frwd_lock);
+ if (!dev)
+ return;
+ /*
+ * if the host is being removed bypass the connection
+ * recovery initialization because we are going to kill
+ * the session.
+ */
+ if (err == ISCSI_ERR_INVALID_HOST)
+ iscsi_conn_error_event(conn->cls_conn, err);
+ else
+ iscsi_conn_failure(conn, err);
+ put_device(dev);
+}
+EXPORT_SYMBOL_GPL(iscsi_session_failure);
+
+void iscsi_conn_failure(struct iscsi_conn *conn, enum iscsi_err err)
+{
+ struct iscsi_session *session = conn->session;
+
+ spin_lock_bh(&session->frwd_lock);
+ if (session->state == ISCSI_STATE_FAILED) {
+ spin_unlock_bh(&session->frwd_lock);
+ return;
+ }
+
+ if (conn->stop_stage == 0)
+ session->state = ISCSI_STATE_FAILED;
+ spin_unlock_bh(&session->frwd_lock);
+
+ set_bit(ISCSI_SUSPEND_BIT, &conn->suspend_tx);
+ set_bit(ISCSI_SUSPEND_BIT, &conn->suspend_rx);
+ iscsi_conn_error_event(conn->cls_conn, err);
+}
+EXPORT_SYMBOL_GPL(iscsi_conn_failure);
+
+static int iscsi_check_cmdsn_window_closed(struct iscsi_conn *conn)
+{
+ struct iscsi_session *session = conn->session;
+
+ /*
+ * Check for iSCSI window and take care of CmdSN wrap-around
+ */
+ if (!iscsi_sna_lte(session->queued_cmdsn, session->max_cmdsn)) {
+ ISCSI_DBG_SESSION(session, "iSCSI CmdSN closed. ExpCmdSn "
+ "%u MaxCmdSN %u CmdSN %u/%u\n",
+ session->exp_cmdsn, session->max_cmdsn,
+ session->cmdsn, session->queued_cmdsn);
+ return -ENOSPC;
+ }
+ return 0;
+}
+
+static int iscsi_xmit_task(struct iscsi_conn *conn)
+{
+ struct iscsi_task *task = conn->task;
+ int rc;
+
+ if (test_bit(ISCSI_SUSPEND_BIT, &conn->suspend_tx))
+ return -ENODATA;
+
+ __iscsi_get_task(task);
+ spin_unlock_bh(&conn->session->frwd_lock);
+ rc = conn->session->tt->xmit_task(task);
+ spin_lock_bh(&conn->session->frwd_lock);
+ if (!rc) {
+ /* done with this task */
+ task->last_xfer = jiffies;
+ conn->task = NULL;
+ }
+ /* regular RX path uses back_lock */
+ spin_lock(&conn->session->back_lock);
+ __iscsi_put_task(task);
+ spin_unlock(&conn->session->back_lock);
+ return rc;
+}
+
+/**
+ * iscsi_requeue_task - requeue task to run from session workqueue
+ * @task: task to requeue
+ *
+ * LLDs that need to run a task from the session workqueue should call
+ * this. The session frwd_lock must be held. This should only be called
+ * by software drivers.
+ */
+void iscsi_requeue_task(struct iscsi_task *task)
+{
+ struct iscsi_conn *conn = task->conn;
+
+ /*
+ * this may be on the requeue list already if the xmit_task callout
+ * is handling the r2ts while we are adding new ones
+ */
+ if (list_empty(&task->running))
+ list_add_tail(&task->running, &conn->requeue);
+ iscsi_conn_queue_work(conn);
+}
+EXPORT_SYMBOL_GPL(iscsi_requeue_task);
+
+/**
+ * iscsi_data_xmit - xmit any command into the scheduled connection
+ * @conn: iscsi connection
+ *
+ * Notes:
+ * The function can return -EAGAIN in which case the caller must
+ * re-schedule it again later or recover. '0' return code means
+ * successful xmit.
+ **/
+static int iscsi_data_xmit(struct iscsi_conn *conn)
+{
+ struct iscsi_task *task;
+ int rc = 0;
+
+ spin_lock_bh(&conn->session->frwd_lock);
+ if (test_bit(ISCSI_SUSPEND_BIT, &conn->suspend_tx)) {
+ ISCSI_DBG_SESSION(conn->session, "Tx suspended!\n");
+ spin_unlock_bh(&conn->session->frwd_lock);
+ return -ENODATA;
+ }
+
+ if (conn->task) {
+ rc = iscsi_xmit_task(conn);
+ if (rc)
+ goto done;
+ }
+
+ /*
+ * process mgmt pdus like nops before commands since we should
+ * only have one nop-out as a ping from us and targets should not
+ * overflow us with nop-ins
+ */
+check_mgmt:
+ while (!list_empty(&conn->mgmtqueue)) {
+ conn->task = list_entry(conn->mgmtqueue.next,
+ struct iscsi_task, running);
+ list_del_init(&conn->task->running);
+ if (iscsi_prep_mgmt_task(conn, conn->task)) {
+ /* regular RX path uses back_lock */
+ spin_lock_bh(&conn->session->back_lock);
+ __iscsi_put_task(conn->task);
+ spin_unlock_bh(&conn->session->back_lock);
+ conn->task = NULL;
+ continue;
+ }
+ rc = iscsi_xmit_task(conn);
+ if (rc)
+ goto done;
+ }
+
+ /* process pending command queue */
+ while (!list_empty(&conn->cmdqueue)) {
+ conn->task = list_entry(conn->cmdqueue.next, struct iscsi_task,
+ running);
+ list_del_init(&conn->task->running);
+ if (conn->session->state == ISCSI_STATE_LOGGING_OUT) {
+ fail_scsi_task(conn->task, DID_IMM_RETRY);
+ continue;
+ }
+ rc = iscsi_prep_scsi_cmd_pdu(conn->task);
+ if (rc) {
+ if (rc == -ENOMEM || rc == -EACCES) {
+ list_add_tail(&conn->task->running,
+ &conn->cmdqueue);
+ conn->task = NULL;
+ goto done;
+ } else
+ fail_scsi_task(conn->task, DID_ABORT);
+ continue;
+ }
+ rc = iscsi_xmit_task(conn);
+ if (rc)
+ goto done;
+ /*
+ * we could continuously get new task requests so
+ * we need to check the mgmt queue for nops that need to
+ * be sent to aviod starvation
+ */
+ if (!list_empty(&conn->mgmtqueue))
+ goto check_mgmt;
+ }
+
+ while (!list_empty(&conn->requeue)) {
+ /*
+ * we always do fastlogout - conn stop code will clean up.
+ */
+ if (conn->session->state == ISCSI_STATE_LOGGING_OUT)
+ break;
+
+ task = list_entry(conn->requeue.next, struct iscsi_task,
+ running);
+ if (iscsi_check_tmf_restrictions(task, ISCSI_OP_SCSI_DATA_OUT))
+ break;
+
+ conn->task = task;
+ list_del_init(&conn->task->running);
+ conn->task->state = ISCSI_TASK_RUNNING;
+ rc = iscsi_xmit_task(conn);
+ if (rc)
+ goto done;
+ if (!list_empty(&conn->mgmtqueue))
+ goto check_mgmt;
+ }
+ spin_unlock_bh(&conn->session->frwd_lock);
+ return -ENODATA;
+
+done:
+ spin_unlock_bh(&conn->session->frwd_lock);
+ return rc;
+}
+
+static void iscsi_xmitworker(struct work_struct *work)
+{
+ struct iscsi_conn *conn =
+ container_of(work, struct iscsi_conn, xmitwork);
+ int rc;
+ /*
+ * serialize Xmit worker on a per-connection basis.
+ */
+ do {
+ rc = iscsi_data_xmit(conn);
+ } while (rc >= 0 || rc == -EAGAIN);
+}
+
+static inline struct iscsi_task *iscsi_alloc_task(struct iscsi_conn *conn,
+ struct scsi_cmnd *sc)
+{
+ struct iscsi_task *task;
+
+ if (!kfifo_out(&conn->session->cmdpool.queue,
+ (void *) &task, sizeof(void *)))
+ return NULL;
+
+ sc->SCp.phase = conn->session->age;
+ sc->SCp.ptr = (char *) task;
+
+ atomic_set(&task->refcount, 1);
+ task->state = ISCSI_TASK_PENDING;
+ task->conn = conn;
+ task->sc = sc;
+ task->have_checked_conn = false;
+ task->last_timeout = jiffies;
+ task->last_xfer = jiffies;
+ task->protected = false;
+ INIT_LIST_HEAD(&task->running);
+ return task;
+}
+
+enum {
+ FAILURE_BAD_HOST = 1,
+ FAILURE_SESSION_FAILED,
+ FAILURE_SESSION_FREED,
+ FAILURE_WINDOW_CLOSED,
+ FAILURE_OOM,
+ FAILURE_SESSION_TERMINATE,
+ FAILURE_SESSION_IN_RECOVERY,
+ FAILURE_SESSION_RECOVERY_TIMEOUT,
+ FAILURE_SESSION_LOGGING_OUT,
+ FAILURE_SESSION_NOT_READY,
+};
+
+int iscsi_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *sc)
+{
+ struct iscsi_cls_session *cls_session;
+ struct iscsi_host *ihost;
+ int reason = 0;
+ struct iscsi_session *session;
+ struct iscsi_conn *conn;
+ struct iscsi_task *task = NULL;
+
+ sc->result = 0;
+ sc->SCp.ptr = NULL;
+
+ ihost = shost_priv(host);
+
+ cls_session = starget_to_session(scsi_target(sc->device));
+ session = cls_session->dd_data;
+ spin_lock_bh(&session->frwd_lock);
+
+ reason = iscsi_session_chkready(cls_session);
+ if (reason) {
+ sc->result = reason;
+ goto fault;
+ }
+
+ if (session->state != ISCSI_STATE_LOGGED_IN) {
+ /*
+ * to handle the race between when we set the recovery state
+ * and block the session we requeue here (commands could
+ * be entering our queuecommand while a block is starting
+ * up because the block code is not locked)
+ */
+ switch (session->state) {
+ case ISCSI_STATE_FAILED:
+ case ISCSI_STATE_IN_RECOVERY:
+ reason = FAILURE_SESSION_IN_RECOVERY;
+ sc->result = DID_IMM_RETRY << 16;
+ break;
+ case ISCSI_STATE_LOGGING_OUT:
+ reason = FAILURE_SESSION_LOGGING_OUT;
+ sc->result = DID_IMM_RETRY << 16;
+ break;
+ case ISCSI_STATE_RECOVERY_FAILED:
+ reason = FAILURE_SESSION_RECOVERY_TIMEOUT;
+ sc->result = DID_TRANSPORT_FAILFAST << 16;
+ break;
+ case ISCSI_STATE_TERMINATE:
+ reason = FAILURE_SESSION_TERMINATE;
+ sc->result = DID_NO_CONNECT << 16;
+ break;
+ default:
+ reason = FAILURE_SESSION_FREED;
+ sc->result = DID_NO_CONNECT << 16;
+ }
+ goto fault;
+ }
+
+ conn = session->leadconn;
+ if (!conn) {
+ reason = FAILURE_SESSION_FREED;
+ sc->result = DID_NO_CONNECT << 16;
+ goto fault;
+ }
+
+ if (test_bit(ISCSI_SUSPEND_BIT, &conn->suspend_tx)) {
+ reason = FAILURE_SESSION_IN_RECOVERY;
+ sc->result = DID_REQUEUE;
+ goto fault;
+ }
+
+ if (iscsi_check_cmdsn_window_closed(conn)) {
+ reason = FAILURE_WINDOW_CLOSED;
+ goto reject;
+ }
+
+ task = iscsi_alloc_task(conn, sc);
+ if (!task) {
+ reason = FAILURE_OOM;
+ goto reject;
+ }
+
+ if (!ihost->workq) {
+ reason = iscsi_prep_scsi_cmd_pdu(task);
+ if (reason) {
+ if (reason == -ENOMEM || reason == -EACCES) {
+ reason = FAILURE_OOM;
+ goto prepd_reject;
+ } else {
+ sc->result = DID_ABORT << 16;
+ goto prepd_fault;
+ }
+ }
+ if (session->tt->xmit_task(task)) {
+ session->cmdsn--;
+ reason = FAILURE_SESSION_NOT_READY;
+ goto prepd_reject;
+ }
+ } else {
+ list_add_tail(&task->running, &conn->cmdqueue);
+ iscsi_conn_queue_work(conn);
+ }
+
+ session->queued_cmdsn++;
+ spin_unlock_bh(&session->frwd_lock);
+ return 0;
+
+prepd_reject:
+ iscsi_complete_task(task, ISCSI_TASK_REQUEUE_SCSIQ);
+reject:
+ spin_unlock_bh(&session->frwd_lock);
+ ISCSI_DBG_SESSION(session, "cmd 0x%x rejected (%d)\n",
+ sc->cmnd[0], reason);
+ return SCSI_MLQUEUE_TARGET_BUSY;
+
+prepd_fault:
+ iscsi_complete_task(task, ISCSI_TASK_REQUEUE_SCSIQ);
+fault:
+ spin_unlock_bh(&session->frwd_lock);
+ ISCSI_DBG_SESSION(session, "iscsi: cmd 0x%x is not queued (%d)\n",
+ sc->cmnd[0], reason);
+ if (!scsi_bidi_cmnd(sc))
+ scsi_set_resid(sc, scsi_bufflen(sc));
+ else {
+ scsi_out(sc)->resid = scsi_out(sc)->length;
+ scsi_in(sc)->resid = scsi_in(sc)->length;
+ }
+ sc->scsi_done(sc);
+ return 0;
+}
+EXPORT_SYMBOL_GPL(iscsi_queuecommand);
+
+int iscsi_target_alloc(struct scsi_target *starget)
+{
+ struct iscsi_cls_session *cls_session = starget_to_session(starget);
+ struct iscsi_session *session = cls_session->dd_data;
+
+ starget->can_queue = session->scsi_cmds_max;
+ return 0;
+}
+EXPORT_SYMBOL_GPL(iscsi_target_alloc);
+
+static void iscsi_tmf_timedout(unsigned long data)
+{
+ struct iscsi_conn *conn = (struct iscsi_conn *)data;
+ struct iscsi_session *session = conn->session;
+
+ spin_lock(&session->frwd_lock);
+ if (conn->tmf_state == TMF_QUEUED) {
+ conn->tmf_state = TMF_TIMEDOUT;
+ ISCSI_DBG_EH(session, "tmf timedout\n");
+ /* unblock eh_abort() */
+ wake_up(&conn->ehwait);
+ }
+ spin_unlock(&session->frwd_lock);
+}
+
+static int iscsi_exec_task_mgmt_fn(struct iscsi_conn *conn,
+ struct iscsi_tm *hdr, int age,
+ int timeout)
+{
+ struct iscsi_session *session = conn->session;
+ struct iscsi_task *task;
+
+ task = __iscsi_conn_send_pdu(conn, (struct iscsi_hdr *)hdr,
+ NULL, 0);
+ if (!task) {
+ spin_unlock_bh(&session->frwd_lock);
+ iscsi_conn_printk(KERN_ERR, conn, "Could not send TMF.\n");
+ iscsi_conn_failure(conn, ISCSI_ERR_CONN_FAILED);
+ spin_lock_bh(&session->frwd_lock);
+ return -EPERM;
+ }
+ conn->tmfcmd_pdus_cnt++;
+ conn->tmf_timer.expires = timeout * HZ + jiffies;
+ conn->tmf_timer.function = iscsi_tmf_timedout;
+ conn->tmf_timer.data = (unsigned long)conn;
+ add_timer(&conn->tmf_timer);
+ ISCSI_DBG_EH(session, "tmf set timeout\n");
+
+ spin_unlock_bh(&session->frwd_lock);
+ mutex_unlock(&session->eh_mutex);
+
+ /*
+ * block eh thread until:
+ *
+ * 1) tmf response
+ * 2) tmf timeout
+ * 3) session is terminated or restarted or userspace has
+ * given up on recovery
+ */
+ wait_event_interruptible(conn->ehwait, age != session->age ||
+ session->state != ISCSI_STATE_LOGGED_IN ||
+ conn->tmf_state != TMF_QUEUED);
+ if (signal_pending(current))
+ flush_signals(current);
+ del_timer_sync(&conn->tmf_timer);
+
+ mutex_lock(&session->eh_mutex);
+ spin_lock_bh(&session->frwd_lock);
+ /* if the session drops it will clean up the task */
+ if (age != session->age ||
+ session->state != ISCSI_STATE_LOGGED_IN)
+ return -ENOTCONN;
+ return 0;
+}
+
+/*
+ * Fail commands. session lock held and recv side suspended and xmit
+ * thread flushed
+ */
+static void fail_scsi_tasks(struct iscsi_conn *conn, u64 lun, int error)
+{
+ struct iscsi_task *task;
+ int i;
+
+ for (i = 0; i < conn->session->cmds_max; i++) {
+ task = conn->session->cmds[i];
+ if (!task->sc || task->state == ISCSI_TASK_FREE)
+ continue;
+
+ if (lun != -1 && lun != task->sc->device->lun)
+ continue;
+
+ ISCSI_DBG_SESSION(conn->session,
+ "failing sc %p itt 0x%x state %d\n",
+ task->sc, task->itt, task->state);
+ fail_scsi_task(task, error);
+ }
+}
+
+/**
+ * iscsi_suspend_queue - suspend iscsi_queuecommand
+ * @conn: iscsi conn to stop queueing IO on
+ *
+ * This grabs the session frwd_lock to make sure no one is in
+ * xmit_task/queuecommand, and then sets suspend to prevent
+ * new commands from being queued. This only needs to be called
+ * by offload drivers that need to sync a path like ep disconnect
+ * with the iscsi_queuecommand/xmit_task. To start IO again libiscsi
+ * will call iscsi_start_tx and iscsi_unblock_session when in FFP.
+ */
+void iscsi_suspend_queue(struct iscsi_conn *conn)
+{
+ spin_lock_bh(&conn->session->frwd_lock);
+ set_bit(ISCSI_SUSPEND_BIT, &conn->suspend_tx);
+ spin_unlock_bh(&conn->session->frwd_lock);
+}
+EXPORT_SYMBOL_GPL(iscsi_suspend_queue);
+
+/**
+ * iscsi_suspend_tx - suspend iscsi_data_xmit
+ * @conn: iscsi conn tp stop processing IO on.
+ *
+ * This function sets the suspend bit to prevent iscsi_data_xmit
+ * from sending new IO, and if work is queued on the xmit thread
+ * it will wait for it to be completed.
+ */
+void iscsi_suspend_tx(struct iscsi_conn *conn)
+{
+ struct Scsi_Host *shost = conn->session->host;
+ struct iscsi_host *ihost = shost_priv(shost);
+
+ set_bit(ISCSI_SUSPEND_BIT, &conn->suspend_tx);
+ if (ihost->workq)
+ flush_workqueue(ihost->workq);
+}
+EXPORT_SYMBOL_GPL(iscsi_suspend_tx);
+
+static void iscsi_start_tx(struct iscsi_conn *conn)
+{
+ clear_bit(ISCSI_SUSPEND_BIT, &conn->suspend_tx);
+ iscsi_conn_queue_work(conn);
+}
+
+/*
+ * We want to make sure a ping is in flight. It has timed out.
+ * And we are not busy processing a pdu that is making
+ * progress but got started before the ping and is taking a while
+ * to complete so the ping is just stuck behind it in a queue.
+ */
+static int iscsi_has_ping_timed_out(struct iscsi_conn *conn)
+{
+ if (conn->ping_task &&
+ time_before_eq(conn->last_recv + (conn->recv_timeout * HZ) +
+ (conn->ping_timeout * HZ), jiffies))
+ return 1;
+ else
+ return 0;
+}
+
+static enum blk_eh_timer_return iscsi_eh_cmd_timed_out(struct scsi_cmnd *sc)
+{
+ enum blk_eh_timer_return rc = BLK_EH_NOT_HANDLED;
+ struct iscsi_task *task = NULL, *running_task;
+ struct iscsi_cls_session *cls_session;
+ struct iscsi_session *session;
+ struct iscsi_conn *conn;
+ int i;
+
+ cls_session = starget_to_session(scsi_target(sc->device));
+ session = cls_session->dd_data;
+
+ ISCSI_DBG_EH(session, "scsi cmd %p timedout\n", sc);
+
+ spin_lock(&session->frwd_lock);
+ task = (struct iscsi_task *)sc->SCp.ptr;
+ if (!task) {
+ /*
+ * Raced with completion. Blk layer has taken ownership
+ * so let timeout code complete it now.
+ */
+ rc = BLK_EH_HANDLED;
+ goto done;
+ }
+
+ if (session->state != ISCSI_STATE_LOGGED_IN) {
+ /*
+ * We are probably in the middle of iscsi recovery so let
+ * that complete and handle the error.
+ */
+ rc = BLK_EH_RESET_TIMER;
+ goto done;
+ }
+
+ conn = session->leadconn;
+ if (!conn) {
+ /* In the middle of shuting down */
+ rc = BLK_EH_RESET_TIMER;
+ goto done;
+ }
+
+ /*
+ * If we have sent (at least queued to the network layer) a pdu or
+ * recvd one for the task since the last timeout ask for
+ * more time. If on the next timeout we have not made progress
+ * we can check if it is the task or connection when we send the
+ * nop as a ping.
+ */
+ if (time_after(task->last_xfer, task->last_timeout)) {
+ ISCSI_DBG_EH(session, "Command making progress. Asking "
+ "scsi-ml for more time to complete. "
+ "Last data xfer at %lu. Last timeout was at "
+ "%lu\n.", task->last_xfer, task->last_timeout);
+ task->have_checked_conn = false;
+ rc = BLK_EH_RESET_TIMER;
+ goto done;
+ }
+
+ if (!conn->recv_timeout && !conn->ping_timeout)
+ goto done;
+ /*
+ * if the ping timedout then we are in the middle of cleaning up
+ * and can let the iscsi eh handle it
+ */
+ if (iscsi_has_ping_timed_out(conn)) {
+ rc = BLK_EH_RESET_TIMER;
+ goto done;
+ }
+
+ for (i = 0; i < conn->session->cmds_max; i++) {
+ running_task = conn->session->cmds[i];
+ if (!running_task->sc || running_task == task ||
+ running_task->state != ISCSI_TASK_RUNNING)
+ continue;
+
+ /*
+ * Only check if cmds started before this one have made
+ * progress, or this could never fail
+ */
+ if (time_after(running_task->sc->jiffies_at_alloc,
+ task->sc->jiffies_at_alloc))
+ continue;
+
+ if (time_after(running_task->last_xfer, task->last_timeout)) {
+ /*
+ * This task has not made progress, but a task
+ * started before us has transferred data since
+ * we started/last-checked. We could be queueing
+ * too many tasks or the LU is bad.
+ *
+ * If the device is bad the cmds ahead of us on
+ * other devs will complete, and this loop will
+ * eventually fail starting the scsi eh.
+ */
+ ISCSI_DBG_EH(session, "Command has not made progress "
+ "but commands ahead of it have. "
+ "Asking scsi-ml for more time to "
+ "complete. Our last xfer vs running task "
+ "last xfer %lu/%lu. Last check %lu.\n",
+ task->last_xfer, running_task->last_xfer,
+ task->last_timeout);
+ rc = BLK_EH_RESET_TIMER;
+ goto done;
+ }
+ }
+
+ /* Assumes nop timeout is shorter than scsi cmd timeout */
+ if (task->have_checked_conn)
+ goto done;
+
+ /*
+ * Checking the transport already or nop from a cmd timeout still
+ * running
+ */
+ if (conn->ping_task) {
+ task->have_checked_conn = true;
+ rc = BLK_EH_RESET_TIMER;
+ goto done;
+ }
+
+ /* Make sure there is a transport check done */
+ iscsi_send_nopout(conn, NULL);
+ task->have_checked_conn = true;
+ rc = BLK_EH_RESET_TIMER;
+
+done:
+ if (task)
+ task->last_timeout = jiffies;
+ spin_unlock(&session->frwd_lock);
+ ISCSI_DBG_EH(session, "return %s\n", rc == BLK_EH_RESET_TIMER ?
+ "timer reset" : "nh");
+ return rc;
+}
+
+static void iscsi_check_transport_timeouts(unsigned long data)
+{
+ struct iscsi_conn *conn = (struct iscsi_conn *)data;
+ struct iscsi_session *session = conn->session;
+ unsigned long recv_timeout, next_timeout = 0, last_recv;
+
+ spin_lock(&session->frwd_lock);
+ if (session->state != ISCSI_STATE_LOGGED_IN)
+ goto done;
+
+ recv_timeout = conn->recv_timeout;
+ if (!recv_timeout)
+ goto done;
+
+ recv_timeout *= HZ;
+ last_recv = conn->last_recv;
+
+ if (iscsi_has_ping_timed_out(conn)) {
+ iscsi_conn_printk(KERN_ERR, conn, "ping timeout of %d secs "
+ "expired, recv timeout %d, last rx %lu, "
+ "last ping %lu, now %lu\n",
+ conn->ping_timeout, conn->recv_timeout,
+ last_recv, conn->last_ping, jiffies);
+ spin_unlock(&session->frwd_lock);
+ iscsi_conn_failure(conn, ISCSI_ERR_NOP_TIMEDOUT);
+ return;
+ }
+
+ if (time_before_eq(last_recv + recv_timeout, jiffies)) {
+ /* send a ping to try to provoke some traffic */
+ ISCSI_DBG_CONN(conn, "Sending nopout as ping\n");
+ iscsi_send_nopout(conn, NULL);
+ next_timeout = conn->last_ping + (conn->ping_timeout * HZ);
+ } else
+ next_timeout = last_recv + recv_timeout;
+
+ ISCSI_DBG_CONN(conn, "Setting next tmo %lu\n", next_timeout);
+ mod_timer(&conn->transport_timer, next_timeout);
+done:
+ spin_unlock(&session->frwd_lock);
+}
+
+static void iscsi_prep_abort_task_pdu(struct iscsi_task *task,
+ struct iscsi_tm *hdr)
+{
+ memset(hdr, 0, sizeof(*hdr));
+ hdr->opcode = ISCSI_OP_SCSI_TMFUNC | ISCSI_OP_IMMEDIATE;
+ hdr->flags = ISCSI_TM_FUNC_ABORT_TASK & ISCSI_FLAG_TM_FUNC_MASK;
+ hdr->flags |= ISCSI_FLAG_CMD_FINAL;
+ hdr->lun = task->lun;
+ hdr->rtt = task->hdr_itt;
+ hdr->refcmdsn = task->cmdsn;
+}
+
+int iscsi_eh_abort(struct scsi_cmnd *sc)
+{
+ struct iscsi_cls_session *cls_session;
+ struct iscsi_session *session;
+ struct iscsi_conn *conn;
+ struct iscsi_task *task;
+ struct iscsi_tm *hdr;
+ int rc, age;
+
+ cls_session = starget_to_session(scsi_target(sc->device));
+ session = cls_session->dd_data;
+
+ ISCSI_DBG_EH(session, "aborting sc %p\n", sc);
+
+ mutex_lock(&session->eh_mutex);
+ spin_lock_bh(&session->frwd_lock);
+ /*
+ * if session was ISCSI_STATE_IN_RECOVERY then we may not have
+ * got the command.
+ */
+ if (!sc->SCp.ptr) {
+ ISCSI_DBG_EH(session, "sc never reached iscsi layer or "
+ "it completed.\n");
+ spin_unlock_bh(&session->frwd_lock);
+ mutex_unlock(&session->eh_mutex);
+ return SUCCESS;
+ }
+
+ /*
+ * If we are not logged in or we have started a new session
+ * then let the host reset code handle this
+ */
+ if (!session->leadconn || session->state != ISCSI_STATE_LOGGED_IN ||
+ sc->SCp.phase != session->age) {
+ spin_unlock_bh(&session->frwd_lock);
+ mutex_unlock(&session->eh_mutex);
+ ISCSI_DBG_EH(session, "failing abort due to dropped "
+ "session.\n");
+ return FAILED;
+ }
+
+ conn = session->leadconn;
+ conn->eh_abort_cnt++;
+ age = session->age;
+
+ task = (struct iscsi_task *)sc->SCp.ptr;
+ ISCSI_DBG_EH(session, "aborting [sc %p itt 0x%x]\n",
+ sc, task->itt);
+
+ /* task completed before time out */
+ if (!task->sc) {
+ ISCSI_DBG_EH(session, "sc completed while abort in progress\n");
+ goto success;
+ }
+
+ if (task->state == ISCSI_TASK_PENDING) {
+ fail_scsi_task(task, DID_ABORT);
+ goto success;
+ }
+
+ /* only have one tmf outstanding at a time */
+ if (conn->tmf_state != TMF_INITIAL)
+ goto failed;
+ conn->tmf_state = TMF_QUEUED;
+
+ hdr = &conn->tmhdr;
+ iscsi_prep_abort_task_pdu(task, hdr);
+
+ if (iscsi_exec_task_mgmt_fn(conn, hdr, age, session->abort_timeout)) {
+ rc = FAILED;
+ goto failed;
+ }
+
+ switch (conn->tmf_state) {
+ case TMF_SUCCESS:
+ spin_unlock_bh(&session->frwd_lock);
+ /*
+ * stop tx side incase the target had sent a abort rsp but
+ * the initiator was still writing out data.
+ */
+ iscsi_suspend_tx(conn);
+ /*
+ * we do not stop the recv side because targets have been
+ * good and have never sent us a successful tmf response
+ * then sent more data for the cmd.
+ */
+ spin_lock_bh(&session->frwd_lock);
+ fail_scsi_task(task, DID_ABORT);
+ conn->tmf_state = TMF_INITIAL;
+ memset(hdr, 0, sizeof(*hdr));
+ spin_unlock_bh(&session->frwd_lock);
+ iscsi_start_tx(conn);
+ goto success_unlocked;
+ case TMF_TIMEDOUT:
+ spin_unlock_bh(&session->frwd_lock);
+ iscsi_conn_failure(conn, ISCSI_ERR_SCSI_EH_SESSION_RST);
+ goto failed_unlocked;
+ case TMF_NOT_FOUND:
+ if (!sc->SCp.ptr) {
+ conn->tmf_state = TMF_INITIAL;
+ memset(hdr, 0, sizeof(*hdr));
+ /* task completed before tmf abort response */
+ ISCSI_DBG_EH(session, "sc completed while abort in "
+ "progress\n");
+ goto success;
+ }
+ /* fall through */
+ default:
+ conn->tmf_state = TMF_INITIAL;
+ goto failed;
+ }
+
+success:
+ spin_unlock_bh(&session->frwd_lock);
+success_unlocked:
+ ISCSI_DBG_EH(session, "abort success [sc %p itt 0x%x]\n",
+ sc, task->itt);
+ mutex_unlock(&session->eh_mutex);
+ return SUCCESS;
+
+failed:
+ spin_unlock_bh(&session->frwd_lock);
+failed_unlocked:
+ ISCSI_DBG_EH(session, "abort failed [sc %p itt 0x%x]\n", sc,
+ task ? task->itt : 0);
+ mutex_unlock(&session->eh_mutex);
+ return FAILED;
+}
+EXPORT_SYMBOL_GPL(iscsi_eh_abort);
+
+static void iscsi_prep_lun_reset_pdu(struct scsi_cmnd *sc, struct iscsi_tm *hdr)
+{
+ memset(hdr, 0, sizeof(*hdr));
+ hdr->opcode = ISCSI_OP_SCSI_TMFUNC | ISCSI_OP_IMMEDIATE;
+ hdr->flags = ISCSI_TM_FUNC_LOGICAL_UNIT_RESET & ISCSI_FLAG_TM_FUNC_MASK;
+ hdr->flags |= ISCSI_FLAG_CMD_FINAL;
+ int_to_scsilun(sc->device->lun, &hdr->lun);
+ hdr->rtt = RESERVED_ITT;
+}
+
+int iscsi_eh_device_reset(struct scsi_cmnd *sc)
+{
+ struct iscsi_cls_session *cls_session;
+ struct iscsi_session *session;
+ struct iscsi_conn *conn;
+ struct iscsi_tm *hdr;
+ int rc = FAILED;
+
+ cls_session = starget_to_session(scsi_target(sc->device));
+ session = cls_session->dd_data;
+
+ ISCSI_DBG_EH(session, "LU Reset [sc %p lun %llu]\n", sc,
+ sc->device->lun);
+
+ mutex_lock(&session->eh_mutex);
+ spin_lock_bh(&session->frwd_lock);
+ /*
+ * Just check if we are not logged in. We cannot check for
+ * the phase because the reset could come from a ioctl.
+ */
+ if (!session->leadconn || session->state != ISCSI_STATE_LOGGED_IN)
+ goto unlock;
+ conn = session->leadconn;
+
+ /* only have one tmf outstanding at a time */
+ if (conn->tmf_state != TMF_INITIAL)
+ goto unlock;
+ conn->tmf_state = TMF_QUEUED;
+
+ hdr = &conn->tmhdr;
+ iscsi_prep_lun_reset_pdu(sc, hdr);
+
+ if (iscsi_exec_task_mgmt_fn(conn, hdr, session->age,
+ session->lu_reset_timeout)) {
+ rc = FAILED;
+ goto unlock;
+ }
+
+ switch (conn->tmf_state) {
+ case TMF_SUCCESS:
+ break;
+ case TMF_TIMEDOUT:
+ spin_unlock_bh(&session->frwd_lock);
+ iscsi_conn_failure(conn, ISCSI_ERR_SCSI_EH_SESSION_RST);
+ goto done;
+ default:
+ conn->tmf_state = TMF_INITIAL;
+ goto unlock;
+ }
+
+ rc = SUCCESS;
+ spin_unlock_bh(&session->frwd_lock);
+
+ iscsi_suspend_tx(conn);
+
+ spin_lock_bh(&session->frwd_lock);
+ memset(hdr, 0, sizeof(*hdr));
+ fail_scsi_tasks(conn, sc->device->lun, DID_ERROR);
+ conn->tmf_state = TMF_INITIAL;
+ spin_unlock_bh(&session->frwd_lock);
+
+ iscsi_start_tx(conn);
+ goto done;
+
+unlock:
+ spin_unlock_bh(&session->frwd_lock);
+done:
+ ISCSI_DBG_EH(session, "dev reset result = %s\n",
+ rc == SUCCESS ? "SUCCESS" : "FAILED");
+ mutex_unlock(&session->eh_mutex);
+ return rc;
+}
+EXPORT_SYMBOL_GPL(iscsi_eh_device_reset);
+
+void iscsi_session_recovery_timedout(struct iscsi_cls_session *cls_session)
+{
+ struct iscsi_session *session = cls_session->dd_data;
+
+ spin_lock_bh(&session->frwd_lock);
+ if (session->state != ISCSI_STATE_LOGGED_IN) {
+ session->state = ISCSI_STATE_RECOVERY_FAILED;
+ if (session->leadconn)
+ wake_up(&session->leadconn->ehwait);
+ }
+ spin_unlock_bh(&session->frwd_lock);
+}
+EXPORT_SYMBOL_GPL(iscsi_session_recovery_timedout);
+
+/**
+ * iscsi_eh_session_reset - drop session and attempt relogin
+ * @sc: scsi command
+ *
+ * This function will wait for a relogin, session termination from
+ * userspace, or a recovery/replacement timeout.
+ */
+int iscsi_eh_session_reset(struct scsi_cmnd *sc)
+{
+ struct iscsi_cls_session *cls_session;
+ struct iscsi_session *session;
+ struct iscsi_conn *conn;
+
+ cls_session = starget_to_session(scsi_target(sc->device));
+ session = cls_session->dd_data;
+ conn = session->leadconn;
+
+ mutex_lock(&session->eh_mutex);
+ spin_lock_bh(&session->frwd_lock);
+ if (session->state == ISCSI_STATE_TERMINATE) {
+failed:
+ ISCSI_DBG_EH(session,
+ "failing session reset: Could not log back into "
+ "%s, %s [age %d]\n", session->targetname,
+ conn->persistent_address, session->age);
+ spin_unlock_bh(&session->frwd_lock);
+ mutex_unlock(&session->eh_mutex);
+ return FAILED;
+ }
+
+ spin_unlock_bh(&session->frwd_lock);
+ mutex_unlock(&session->eh_mutex);
+ /*
+ * we drop the lock here but the leadconn cannot be destoyed while
+ * we are in the scsi eh
+ */
+ iscsi_conn_failure(conn, ISCSI_ERR_SCSI_EH_SESSION_RST);
+
+ ISCSI_DBG_EH(session, "wait for relogin\n");
+ wait_event_interruptible(conn->ehwait,
+ session->state == ISCSI_STATE_TERMINATE ||
+ session->state == ISCSI_STATE_LOGGED_IN ||
+ session->state == ISCSI_STATE_RECOVERY_FAILED);
+ if (signal_pending(current))
+ flush_signals(current);
+
+ mutex_lock(&session->eh_mutex);
+ spin_lock_bh(&session->frwd_lock);
+ if (session->state == ISCSI_STATE_LOGGED_IN) {
+ ISCSI_DBG_EH(session,
+ "session reset succeeded for %s,%s\n",
+ session->targetname, conn->persistent_address);
+ } else
+ goto failed;
+ spin_unlock_bh(&session->frwd_lock);
+ mutex_unlock(&session->eh_mutex);
+ return SUCCESS;
+}
+EXPORT_SYMBOL_GPL(iscsi_eh_session_reset);
+
+static void iscsi_prep_tgt_reset_pdu(struct scsi_cmnd *sc, struct iscsi_tm *hdr)
+{
+ memset(hdr, 0, sizeof(*hdr));
+ hdr->opcode = ISCSI_OP_SCSI_TMFUNC | ISCSI_OP_IMMEDIATE;
+ hdr->flags = ISCSI_TM_FUNC_TARGET_WARM_RESET & ISCSI_FLAG_TM_FUNC_MASK;
+ hdr->flags |= ISCSI_FLAG_CMD_FINAL;
+ hdr->rtt = RESERVED_ITT;
+}
+
+/**
+ * iscsi_eh_target_reset - reset target
+ * @sc: scsi command
+ *
+ * This will attempt to send a warm target reset.
+ */
+int iscsi_eh_target_reset(struct scsi_cmnd *sc)
+{
+ struct iscsi_cls_session *cls_session;
+ struct iscsi_session *session;
+ struct iscsi_conn *conn;
+ struct iscsi_tm *hdr;
+ int rc = FAILED;
+
+ cls_session = starget_to_session(scsi_target(sc->device));
+ session = cls_session->dd_data;
+
+ ISCSI_DBG_EH(session, "tgt Reset [sc %p tgt %s]\n", sc,
+ session->targetname);
+
+ mutex_lock(&session->eh_mutex);
+ spin_lock_bh(&session->frwd_lock);
+ /*
+ * Just check if we are not logged in. We cannot check for
+ * the phase because the reset could come from a ioctl.
+ */
+ if (!session->leadconn || session->state != ISCSI_STATE_LOGGED_IN)
+ goto unlock;
+ conn = session->leadconn;
+
+ /* only have one tmf outstanding at a time */
+ if (conn->tmf_state != TMF_INITIAL)
+ goto unlock;
+ conn->tmf_state = TMF_QUEUED;
+
+ hdr = &conn->tmhdr;
+ iscsi_prep_tgt_reset_pdu(sc, hdr);
+
+ if (iscsi_exec_task_mgmt_fn(conn, hdr, session->age,
+ session->tgt_reset_timeout)) {
+ rc = FAILED;
+ goto unlock;
+ }
+
+ switch (conn->tmf_state) {
+ case TMF_SUCCESS:
+ break;
+ case TMF_TIMEDOUT:
+ spin_unlock_bh(&session->frwd_lock);
+ iscsi_conn_failure(conn, ISCSI_ERR_SCSI_EH_SESSION_RST);
+ goto done;
+ default:
+ conn->tmf_state = TMF_INITIAL;
+ goto unlock;
+ }
+
+ rc = SUCCESS;
+ spin_unlock_bh(&session->frwd_lock);
+
+ iscsi_suspend_tx(conn);
+
+ spin_lock_bh(&session->frwd_lock);
+ memset(hdr, 0, sizeof(*hdr));
+ fail_scsi_tasks(conn, -1, DID_ERROR);
+ conn->tmf_state = TMF_INITIAL;
+ spin_unlock_bh(&session->frwd_lock);
+
+ iscsi_start_tx(conn);
+ goto done;
+
+unlock:
+ spin_unlock_bh(&session->frwd_lock);
+done:
+ ISCSI_DBG_EH(session, "tgt %s reset result = %s\n", session->targetname,
+ rc == SUCCESS ? "SUCCESS" : "FAILED");
+ mutex_unlock(&session->eh_mutex);
+ return rc;
+}
+EXPORT_SYMBOL_GPL(iscsi_eh_target_reset);
+
+/**
+ * iscsi_eh_recover_target - reset target and possibly the session
+ * @sc: scsi command
+ *
+ * This will attempt to send a warm target reset. If that fails,
+ * we will escalate to ERL0 session recovery.
+ */
+int iscsi_eh_recover_target(struct scsi_cmnd *sc)
+{
+ int rc;
+
+ rc = iscsi_eh_target_reset(sc);
+ if (rc == FAILED)
+ rc = iscsi_eh_session_reset(sc);
+ return rc;
+}
+EXPORT_SYMBOL_GPL(iscsi_eh_recover_target);
+
+/*
+ * Pre-allocate a pool of @max items of @item_size. By default, the pool
+ * should be accessed via kfifo_{get,put} on q->queue.
+ * Optionally, the caller can obtain the array of object pointers
+ * by passing in a non-NULL @items pointer
+ */
+int
+iscsi_pool_init(struct iscsi_pool *q, int max, void ***items, int item_size)
+{
+ int i, num_arrays = 1;
+
+ memset(q, 0, sizeof(*q));
+
+ q->max = max;
+
+ /* If the user passed an items pointer, he wants a copy of
+ * the array. */
+ if (items)
+ num_arrays++;
+ q->pool = kzalloc(num_arrays * max * sizeof(void*), GFP_KERNEL);
+ if (q->pool == NULL)
+ return -ENOMEM;
+
+ kfifo_init(&q->queue, (void*)q->pool, max * sizeof(void*));
+
+ for (i = 0; i < max; i++) {
+ q->pool[i] = kzalloc(item_size, GFP_KERNEL);
+ if (q->pool[i] == NULL) {
+ q->max = i;
+ goto enomem;
+ }
+ kfifo_in(&q->queue, (void*)&q->pool[i], sizeof(void*));
+ }
+
+ if (items) {
+ *items = q->pool + max;
+ memcpy(*items, q->pool, max * sizeof(void *));
+ }
+
+ return 0;
+
+enomem:
+ iscsi_pool_free(q);
+ return -ENOMEM;
+}
+EXPORT_SYMBOL_GPL(iscsi_pool_init);
+
+void iscsi_pool_free(struct iscsi_pool *q)
+{
+ int i;
+
+ for (i = 0; i < q->max; i++)
+ kfree(q->pool[i]);
+ kfree(q->pool);
+}
+EXPORT_SYMBOL_GPL(iscsi_pool_free);
+
+/**
+ * iscsi_host_add - add host to system
+ * @shost: scsi host
+ * @pdev: parent device
+ *
+ * This should be called by partial offload and software iscsi drivers
+ * to add a host to the system.
+ */
+int iscsi_host_add(struct Scsi_Host *shost, struct device *pdev)
+{
+ if (!shost->can_queue)
+ shost->can_queue = ISCSI_DEF_XMIT_CMDS_MAX;
+
+ if (!shost->cmd_per_lun)
+ shost->cmd_per_lun = ISCSI_DEF_CMD_PER_LUN;
+
+ if (!shost->transportt->eh_timed_out)
+ shost->transportt->eh_timed_out = iscsi_eh_cmd_timed_out;
+ return scsi_add_host(shost, pdev);
+}
+EXPORT_SYMBOL_GPL(iscsi_host_add);
+
+/**
+ * iscsi_host_alloc - allocate a host and driver data
+ * @sht: scsi host template
+ * @dd_data_size: driver host data size
+ * @xmit_can_sleep: bool indicating if LLD will queue IO from a work queue
+ *
+ * This should be called by partial offload and software iscsi drivers.
+ * To access the driver specific memory use the iscsi_host_priv() macro.
+ */
+struct Scsi_Host *iscsi_host_alloc(struct scsi_host_template *sht,
+ int dd_data_size, bool xmit_can_sleep)
+{
+ struct Scsi_Host *shost;
+ struct iscsi_host *ihost;
+
+ shost = scsi_host_alloc(sht, sizeof(struct iscsi_host) + dd_data_size);
+ if (!shost)
+ return NULL;
+ ihost = shost_priv(shost);
+
+ if (xmit_can_sleep) {
+ snprintf(ihost->workq_name, sizeof(ihost->workq_name),
+ "iscsi_q_%d", shost->host_no);
+ ihost->workq = create_singlethread_workqueue(ihost->workq_name);
+ if (!ihost->workq)
+ goto free_host;
+ }
+
+ spin_lock_init(&ihost->lock);
+ ihost->state = ISCSI_HOST_SETUP;
+ ihost->num_sessions = 0;
+ init_waitqueue_head(&ihost->session_removal_wq);
+ return shost;
+
+free_host:
+ scsi_host_put(shost);
+ return NULL;
+}
+EXPORT_SYMBOL_GPL(iscsi_host_alloc);
+
+static void iscsi_notify_host_removed(struct iscsi_cls_session *cls_session)
+{
+ iscsi_session_failure(cls_session->dd_data, ISCSI_ERR_INVALID_HOST);
+}
+
+/**
+ * iscsi_host_remove - remove host and sessions
+ * @shost: scsi host
+ *
+ * If there are any sessions left, this will initiate the removal and wait
+ * for the completion.
+ */
+void iscsi_host_remove(struct Scsi_Host *shost)
+{
+ struct iscsi_host *ihost = shost_priv(shost);
+ unsigned long flags;
+
+ spin_lock_irqsave(&ihost->lock, flags);
+ ihost->state = ISCSI_HOST_REMOVED;
+ spin_unlock_irqrestore(&ihost->lock, flags);
+
+ iscsi_host_for_each_session(shost, iscsi_notify_host_removed);
+ wait_event_interruptible(ihost->session_removal_wq,
+ ihost->num_sessions == 0);
+ if (signal_pending(current))
+ flush_signals(current);
+
+ scsi_remove_host(shost);
+ if (ihost->workq)
+ destroy_workqueue(ihost->workq);
+}
+EXPORT_SYMBOL_GPL(iscsi_host_remove);
+
+void iscsi_host_free(struct Scsi_Host *shost)
+{
+ struct iscsi_host *ihost = shost_priv(shost);
+
+ kfree(ihost->netdev);
+ kfree(ihost->hwaddress);
+ kfree(ihost->initiatorname);
+ scsi_host_put(shost);
+}
+EXPORT_SYMBOL_GPL(iscsi_host_free);
+
+static void iscsi_host_dec_session_cnt(struct Scsi_Host *shost)
+{
+ struct iscsi_host *ihost = shost_priv(shost);
+ unsigned long flags;
+
+ shost = scsi_host_get(shost);
+ if (!shost) {
+ printk(KERN_ERR "Invalid state. Cannot notify host removal "
+ "of session teardown event because host already "
+ "removed.\n");
+ return;
+ }
+
+ spin_lock_irqsave(&ihost->lock, flags);
+ ihost->num_sessions--;
+ if (ihost->num_sessions == 0)
+ wake_up(&ihost->session_removal_wq);
+ spin_unlock_irqrestore(&ihost->lock, flags);
+ scsi_host_put(shost);
+}
+
+/**
+ * iscsi_session_setup - create iscsi cls session and host and session
+ * @iscsit: iscsi transport template
+ * @shost: scsi host
+ * @cmds_max: session can queue
+ * @cmd_task_size: LLD task private data size
+ * @initial_cmdsn: initial CmdSN
+ *
+ * This can be used by software iscsi_transports that allocate
+ * a session per scsi host.
+ *
+ * Callers should set cmds_max to the largest total numer (mgmt + scsi) of
+ * tasks they support. The iscsi layer reserves ISCSI_MGMT_CMDS_MAX tasks
+ * for nop handling and login/logout requests.
+ */
+struct iscsi_cls_session *
+iscsi_session_setup(struct iscsi_transport *iscsit, struct Scsi_Host *shost,
+ uint16_t cmds_max, int dd_size, int cmd_task_size,
+ uint32_t initial_cmdsn, unsigned int id)
+{
+ struct iscsi_host *ihost = shost_priv(shost);
+ struct iscsi_session *session;
+ struct iscsi_cls_session *cls_session;
+ int cmd_i, scsi_cmds, total_cmds = cmds_max;
+ unsigned long flags;
+
+ spin_lock_irqsave(&ihost->lock, flags);
+ if (ihost->state == ISCSI_HOST_REMOVED) {
+ spin_unlock_irqrestore(&ihost->lock, flags);
+ return NULL;
+ }
+ ihost->num_sessions++;
+ spin_unlock_irqrestore(&ihost->lock, flags);
+
+ if (!total_cmds)
+ total_cmds = ISCSI_DEF_XMIT_CMDS_MAX;
+ /*
+ * The iscsi layer needs some tasks for nop handling and tmfs,
+ * so the cmds_max must at least be greater than ISCSI_MGMT_CMDS_MAX
+ * + 1 command for scsi IO.
+ */
+ if (total_cmds < ISCSI_TOTAL_CMDS_MIN) {
+ printk(KERN_ERR "iscsi: invalid can_queue of %d. can_queue "
+ "must be a power of two that is at least %d.\n",
+ total_cmds, ISCSI_TOTAL_CMDS_MIN);
+ goto dec_session_count;
+ }
+
+ if (total_cmds > ISCSI_TOTAL_CMDS_MAX) {
+ printk(KERN_ERR "iscsi: invalid can_queue of %d. can_queue "
+ "must be a power of 2 less than or equal to %d.\n",
+ cmds_max, ISCSI_TOTAL_CMDS_MAX);
+ total_cmds = ISCSI_TOTAL_CMDS_MAX;
+ }
+
+ if (!is_power_of_2(total_cmds)) {
+ printk(KERN_ERR "iscsi: invalid can_queue of %d. can_queue "
+ "must be a power of 2.\n", total_cmds);
+ total_cmds = rounddown_pow_of_two(total_cmds);
+ if (total_cmds < ISCSI_TOTAL_CMDS_MIN)
+ return NULL;
+ printk(KERN_INFO "iscsi: Rounding can_queue to %d.\n",
+ total_cmds);
+ }
+ scsi_cmds = total_cmds - ISCSI_MGMT_CMDS_MAX;
+
+ cls_session = iscsi_alloc_session(shost, iscsit,
+ sizeof(struct iscsi_session) +
+ dd_size);
+ if (!cls_session)
+ goto dec_session_count;
+ session = cls_session->dd_data;
+ session->cls_session = cls_session;
+ session->host = shost;
+ session->state = ISCSI_STATE_FREE;
+ session->fast_abort = 1;
+ session->tgt_reset_timeout = 30;
+ session->lu_reset_timeout = 15;
+ session->abort_timeout = 10;
+ session->scsi_cmds_max = scsi_cmds;
+ session->cmds_max = total_cmds;
+ session->queued_cmdsn = session->cmdsn = initial_cmdsn;
+ session->exp_cmdsn = initial_cmdsn + 1;
+ session->max_cmdsn = initial_cmdsn + 1;
+ session->max_r2t = 1;
+ session->tt = iscsit;
+ session->dd_data = cls_session->dd_data + sizeof(*session);
+
+ mutex_init(&session->eh_mutex);
+ spin_lock_init(&session->frwd_lock);
+ spin_lock_init(&session->back_lock);
+
+ /* initialize SCSI PDU commands pool */
+ if (iscsi_pool_init(&session->cmdpool, session->cmds_max,
+ (void***)&session->cmds,
+ cmd_task_size + sizeof(struct iscsi_task)))
+ goto cmdpool_alloc_fail;
+
+ /* pre-format cmds pool with ITT */
+ for (cmd_i = 0; cmd_i < session->cmds_max; cmd_i++) {
+ struct iscsi_task *task = session->cmds[cmd_i];
+
+ if (cmd_task_size)
+ task->dd_data = &task[1];
+ task->itt = cmd_i;
+ task->state = ISCSI_TASK_FREE;
+ INIT_LIST_HEAD(&task->running);
+ }
+
+ if (!try_module_get(iscsit->owner))
+ goto module_get_fail;
+
+ if (iscsi_add_session(cls_session, id))
+ goto cls_session_fail;
+
+ return cls_session;
+
+cls_session_fail:
+ module_put(iscsit->owner);
+module_get_fail:
+ iscsi_pool_free(&session->cmdpool);
+cmdpool_alloc_fail:
+ iscsi_free_session(cls_session);
+dec_session_count:
+ iscsi_host_dec_session_cnt(shost);
+ return NULL;
+}
+EXPORT_SYMBOL_GPL(iscsi_session_setup);
+
+/**
+ * iscsi_session_teardown - destroy session, host, and cls_session
+ * @cls_session: iscsi session
+ *
+ * The driver must have called iscsi_remove_session before
+ * calling this.
+ */
+void iscsi_session_teardown(struct iscsi_cls_session *cls_session)
+{
+ struct iscsi_session *session = cls_session->dd_data;
+ struct module *owner = cls_session->transport->owner;
+ struct Scsi_Host *shost = session->host;
+
+ iscsi_pool_free(&session->cmdpool);
+
+ kfree(session->password);
+ kfree(session->password_in);
+ kfree(session->username);
+ kfree(session->username_in);
+ kfree(session->targetname);
+ kfree(session->targetalias);
+ kfree(session->initiatorname);
+ kfree(session->boot_root);
+ kfree(session->boot_nic);
+ kfree(session->boot_target);
+ kfree(session->ifacename);
+ kfree(session->portal_type);
+ kfree(session->discovery_parent_type);
+
+ iscsi_destroy_session(cls_session);
+ iscsi_host_dec_session_cnt(shost);
+ module_put(owner);
+}
+EXPORT_SYMBOL_GPL(iscsi_session_teardown);
+
+/**
+ * iscsi_conn_setup - create iscsi_cls_conn and iscsi_conn
+ * @cls_session: iscsi_cls_session
+ * @dd_size: private driver data size
+ * @conn_idx: cid
+ */
+struct iscsi_cls_conn *
+iscsi_conn_setup(struct iscsi_cls_session *cls_session, int dd_size,
+ uint32_t conn_idx)
+{
+ struct iscsi_session *session = cls_session->dd_data;
+ struct iscsi_conn *conn;
+ struct iscsi_cls_conn *cls_conn;
+ char *data;
+
+ cls_conn = iscsi_create_conn(cls_session, sizeof(*conn) + dd_size,
+ conn_idx);
+ if (!cls_conn)
+ return NULL;
+ conn = cls_conn->dd_data;
+ memset(conn, 0, sizeof(*conn) + dd_size);
+
+ conn->dd_data = cls_conn->dd_data + sizeof(*conn);
+ conn->session = session;
+ conn->cls_conn = cls_conn;
+ conn->c_stage = ISCSI_CONN_INITIAL_STAGE;
+ conn->id = conn_idx;
+ conn->exp_statsn = 0;
+ conn->tmf_state = TMF_INITIAL;
+
+ init_timer(&conn->transport_timer);
+ conn->transport_timer.data = (unsigned long)conn;
+ conn->transport_timer.function = iscsi_check_transport_timeouts;
+
+ INIT_LIST_HEAD(&conn->mgmtqueue);
+ INIT_LIST_HEAD(&conn->cmdqueue);
+ INIT_LIST_HEAD(&conn->requeue);
+ INIT_WORK(&conn->xmitwork, iscsi_xmitworker);
+
+ /* allocate login_task used for the login/text sequences */
+ spin_lock_bh(&session->frwd_lock);
+ if (!kfifo_out(&session->cmdpool.queue,
+ (void*)&conn->login_task,
+ sizeof(void*))) {
+ spin_unlock_bh(&session->frwd_lock);
+ goto login_task_alloc_fail;
+ }
+ spin_unlock_bh(&session->frwd_lock);
+
+ data = (char *) __get_free_pages(GFP_KERNEL,
+ get_order(ISCSI_DEF_MAX_RECV_SEG_LEN));
+ if (!data)
+ goto login_task_data_alloc_fail;
+ conn->login_task->data = conn->data = data;
+
+ init_timer(&conn->tmf_timer);
+ init_waitqueue_head(&conn->ehwait);
+
+ return cls_conn;
+
+login_task_data_alloc_fail:
+ kfifo_in(&session->cmdpool.queue, (void*)&conn->login_task,
+ sizeof(void*));
+login_task_alloc_fail:
+ iscsi_destroy_conn(cls_conn);
+ return NULL;
+}
+EXPORT_SYMBOL_GPL(iscsi_conn_setup);
+
+/**
+ * iscsi_conn_teardown - teardown iscsi connection
+ * cls_conn: iscsi class connection
+ *
+ * TODO: we may need to make this into a two step process
+ * like scsi-mls remove + put host
+ */
+void iscsi_conn_teardown(struct iscsi_cls_conn *cls_conn)
+{
+ struct iscsi_conn *conn = cls_conn->dd_data;
+ struct iscsi_session *session = conn->session;
+ unsigned long flags;
+
+ del_timer_sync(&conn->transport_timer);
+
+ spin_lock_bh(&session->frwd_lock);
+ conn->c_stage = ISCSI_CONN_CLEANUP_WAIT;
+ if (session->leadconn == conn) {
+ /*
+ * leading connection? then give up on recovery.
+ */
+ session->state = ISCSI_STATE_TERMINATE;
+ wake_up(&conn->ehwait);
+ }
+ spin_unlock_bh(&session->frwd_lock);
+
+ /*
+ * Block until all in-progress commands for this connection
+ * time out or fail.
+ */
+ for (;;) {
+ spin_lock_irqsave(session->host->host_lock, flags);
+ if (!atomic_read(&session->host->host_busy)) { /* OK for ERL == 0 */
+ spin_unlock_irqrestore(session->host->host_lock, flags);
+ break;
+ }
+ spin_unlock_irqrestore(session->host->host_lock, flags);
+ msleep_interruptible(500);
+ iscsi_conn_printk(KERN_INFO, conn, "iscsi conn_destroy(): "
+ "host_busy %d host_failed %d\n",
+ atomic_read(&session->host->host_busy),
+ session->host->host_failed);
+ /*
+ * force eh_abort() to unblock
+ */
+ wake_up(&conn->ehwait);
+ }
+
+ /* flush queued up work because we free the connection below */
+ iscsi_suspend_tx(conn);
+
+ spin_lock_bh(&session->frwd_lock);
+ free_pages((unsigned long) conn->data,
+ get_order(ISCSI_DEF_MAX_RECV_SEG_LEN));
+ kfree(conn->persistent_address);
+ kfree(conn->local_ipaddr);
+ /* regular RX path uses back_lock */
+ spin_lock_bh(&session->back_lock);
+ kfifo_in(&session->cmdpool.queue, (void*)&conn->login_task,
+ sizeof(void*));
+ spin_unlock_bh(&session->back_lock);
+ if (session->leadconn == conn)
+ session->leadconn = NULL;
+ spin_unlock_bh(&session->frwd_lock);
+
+ iscsi_destroy_conn(cls_conn);
+}
+EXPORT_SYMBOL_GPL(iscsi_conn_teardown);
+
+int iscsi_conn_start(struct iscsi_cls_conn *cls_conn)
+{
+ struct iscsi_conn *conn = cls_conn->dd_data;
+ struct iscsi_session *session = conn->session;
+
+ if (!session) {
+ iscsi_conn_printk(KERN_ERR, conn,
+ "can't start unbound connection\n");
+ return -EPERM;
+ }
+
+ if ((session->imm_data_en || !session->initial_r2t_en) &&
+ session->first_burst > session->max_burst) {
+ iscsi_conn_printk(KERN_INFO, conn, "invalid burst lengths: "
+ "first_burst %d max_burst %d\n",
+ session->first_burst, session->max_burst);
+ return -EINVAL;
+ }
+
+ if (conn->ping_timeout && !conn->recv_timeout) {
+ iscsi_conn_printk(KERN_ERR, conn, "invalid recv timeout of "
+ "zero. Using 5 seconds\n.");
+ conn->recv_timeout = 5;
+ }
+
+ if (conn->recv_timeout && !conn->ping_timeout) {
+ iscsi_conn_printk(KERN_ERR, conn, "invalid ping timeout of "
+ "zero. Using 5 seconds.\n");
+ conn->ping_timeout = 5;
+ }
+
+ spin_lock_bh(&session->frwd_lock);
+ conn->c_stage = ISCSI_CONN_STARTED;
+ session->state = ISCSI_STATE_LOGGED_IN;
+ session->queued_cmdsn = session->cmdsn;
+
+ conn->last_recv = jiffies;
+ conn->last_ping = jiffies;
+ if (conn->recv_timeout && conn->ping_timeout)
+ mod_timer(&conn->transport_timer,
+ jiffies + (conn->recv_timeout * HZ));
+
+ switch(conn->stop_stage) {
+ case STOP_CONN_RECOVER:
+ /*
+ * unblock eh_abort() if it is blocked. re-try all
+ * commands after successful recovery
+ */
+ conn->stop_stage = 0;
+ conn->tmf_state = TMF_INITIAL;
+ session->age++;
+ if (session->age == 16)
+ session->age = 0;
+ break;
+ case STOP_CONN_TERM:
+ conn->stop_stage = 0;
+ break;
+ default:
+ break;
+ }
+ spin_unlock_bh(&session->frwd_lock);
+
+ iscsi_unblock_session(session->cls_session);
+ wake_up(&conn->ehwait);
+ return 0;
+}
+EXPORT_SYMBOL_GPL(iscsi_conn_start);
+
+static void
+fail_mgmt_tasks(struct iscsi_session *session, struct iscsi_conn *conn)
+{
+ struct iscsi_task *task;
+ int i, state;
+
+ for (i = 0; i < conn->session->cmds_max; i++) {
+ task = conn->session->cmds[i];
+ if (task->sc)
+ continue;
+
+ if (task->state == ISCSI_TASK_FREE)
+ continue;
+
+ ISCSI_DBG_SESSION(conn->session,
+ "failing mgmt itt 0x%x state %d\n",
+ task->itt, task->state);
+ state = ISCSI_TASK_ABRT_SESS_RECOV;
+ if (task->state == ISCSI_TASK_PENDING)
+ state = ISCSI_TASK_COMPLETED;
+ iscsi_complete_task(task, state);
+
+ }
+}
+
+static void iscsi_start_session_recovery(struct iscsi_session *session,
+ struct iscsi_conn *conn, int flag)
+{
+ int old_stop_stage;
+
+ mutex_lock(&session->eh_mutex);
+ spin_lock_bh(&session->frwd_lock);
+ if (conn->stop_stage == STOP_CONN_TERM) {
+ spin_unlock_bh(&session->frwd_lock);
+ mutex_unlock(&session->eh_mutex);
+ return;
+ }
+
+ /*
+ * When this is called for the in_login state, we only want to clean
+ * up the login task and connection. We do not need to block and set
+ * the recovery state again
+ */
+ if (flag == STOP_CONN_TERM)
+ session->state = ISCSI_STATE_TERMINATE;
+ else if (conn->stop_stage != STOP_CONN_RECOVER)
+ session->state = ISCSI_STATE_IN_RECOVERY;
+
+ old_stop_stage = conn->stop_stage;
+ conn->stop_stage = flag;
+ spin_unlock_bh(&session->frwd_lock);
+
+ del_timer_sync(&conn->transport_timer);
+ iscsi_suspend_tx(conn);
+
+ spin_lock_bh(&session->frwd_lock);
+ conn->c_stage = ISCSI_CONN_STOPPED;
+ spin_unlock_bh(&session->frwd_lock);
+
+ /*
+ * for connection level recovery we should not calculate
+ * header digest. conn->hdr_size used for optimization
+ * in hdr_extract() and will be re-negotiated at
+ * set_param() time.
+ */
+ if (flag == STOP_CONN_RECOVER) {
+ conn->hdrdgst_en = 0;
+ conn->datadgst_en = 0;
+ if (session->state == ISCSI_STATE_IN_RECOVERY &&
+ old_stop_stage != STOP_CONN_RECOVER) {
+ ISCSI_DBG_SESSION(session, "blocking session\n");
+ iscsi_block_session(session->cls_session);
+ }
+ }
+
+ /*
+ * flush queues.
+ */
+ spin_lock_bh(&session->frwd_lock);
+ fail_scsi_tasks(conn, -1, DID_TRANSPORT_DISRUPTED);
+ fail_mgmt_tasks(session, conn);
+ memset(&conn->tmhdr, 0, sizeof(conn->tmhdr));
+ spin_unlock_bh(&session->frwd_lock);
+ mutex_unlock(&session->eh_mutex);
+}
+
+void iscsi_conn_stop(struct iscsi_cls_conn *cls_conn, int flag)
+{
+ struct iscsi_conn *conn = cls_conn->dd_data;
+ struct iscsi_session *session = conn->session;
+
+ switch (flag) {
+ case STOP_CONN_RECOVER:
+ case STOP_CONN_TERM:
+ iscsi_start_session_recovery(session, conn, flag);
+ break;
+ default:
+ iscsi_conn_printk(KERN_ERR, conn,
+ "invalid stop flag %d\n", flag);
+ }
+}
+EXPORT_SYMBOL_GPL(iscsi_conn_stop);
+
+int iscsi_conn_bind(struct iscsi_cls_session *cls_session,
+ struct iscsi_cls_conn *cls_conn, int is_leading)
+{
+ struct iscsi_session *session = cls_session->dd_data;
+ struct iscsi_conn *conn = cls_conn->dd_data;
+
+ spin_lock_bh(&session->frwd_lock);
+ if (is_leading)
+ session->leadconn = conn;
+ spin_unlock_bh(&session->frwd_lock);
+
+ /*
+ * Unblock xmitworker(), Login Phase will pass through.
+ */
+ clear_bit(ISCSI_SUSPEND_BIT, &conn->suspend_rx);
+ clear_bit(ISCSI_SUSPEND_BIT, &conn->suspend_tx);
+ return 0;
+}
+EXPORT_SYMBOL_GPL(iscsi_conn_bind);
+
+int iscsi_switch_str_param(char **param, char *new_val_buf)
+{
+ char *new_val;
+
+ if (*param) {
+ if (!strcmp(*param, new_val_buf))
+ return 0;
+ }
+
+ new_val = kstrdup(new_val_buf, GFP_NOIO);
+ if (!new_val)
+ return -ENOMEM;
+
+ kfree(*param);
+ *param = new_val;
+ return 0;
+}
+EXPORT_SYMBOL_GPL(iscsi_switch_str_param);
+
+int iscsi_set_param(struct iscsi_cls_conn *cls_conn,
+ enum iscsi_param param, char *buf, int buflen)
+{
+ struct iscsi_conn *conn = cls_conn->dd_data;
+ struct iscsi_session *session = conn->session;
+ int val;
+
+ switch(param) {
+ case ISCSI_PARAM_FAST_ABORT:
+ sscanf(buf, "%d", &session->fast_abort);
+ break;
+ case ISCSI_PARAM_ABORT_TMO:
+ sscanf(buf, "%d", &session->abort_timeout);
+ break;
+ case ISCSI_PARAM_LU_RESET_TMO:
+ sscanf(buf, "%d", &session->lu_reset_timeout);
+ break;
+ case ISCSI_PARAM_TGT_RESET_TMO:
+ sscanf(buf, "%d", &session->tgt_reset_timeout);
+ break;
+ case ISCSI_PARAM_PING_TMO:
+ sscanf(buf, "%d", &conn->ping_timeout);
+ break;
+ case ISCSI_PARAM_RECV_TMO:
+ sscanf(buf, "%d", &conn->recv_timeout);
+ break;
+ case ISCSI_PARAM_MAX_RECV_DLENGTH:
+ sscanf(buf, "%d", &conn->max_recv_dlength);
+ break;
+ case ISCSI_PARAM_MAX_XMIT_DLENGTH:
+ sscanf(buf, "%d", &conn->max_xmit_dlength);
+ break;
+ case ISCSI_PARAM_HDRDGST_EN:
+ sscanf(buf, "%d", &conn->hdrdgst_en);
+ break;
+ case ISCSI_PARAM_DATADGST_EN:
+ sscanf(buf, "%d", &conn->datadgst_en);
+ break;
+ case ISCSI_PARAM_INITIAL_R2T_EN:
+ sscanf(buf, "%d", &session->initial_r2t_en);
+ break;
+ case ISCSI_PARAM_MAX_R2T:
+ sscanf(buf, "%hu", &session->max_r2t);
+ break;
+ case ISCSI_PARAM_IMM_DATA_EN:
+ sscanf(buf, "%d", &session->imm_data_en);
+ break;
+ case ISCSI_PARAM_FIRST_BURST:
+ sscanf(buf, "%d", &session->first_burst);
+ break;
+ case ISCSI_PARAM_MAX_BURST:
+ sscanf(buf, "%d", &session->max_burst);
+ break;
+ case ISCSI_PARAM_PDU_INORDER_EN:
+ sscanf(buf, "%d", &session->pdu_inorder_en);
+ break;
+ case ISCSI_PARAM_DATASEQ_INORDER_EN:
+ sscanf(buf, "%d", &session->dataseq_inorder_en);
+ break;
+ case ISCSI_PARAM_ERL:
+ sscanf(buf, "%d", &session->erl);
+ break;
+ case ISCSI_PARAM_EXP_STATSN:
+ sscanf(buf, "%u", &conn->exp_statsn);
+ break;
+ case ISCSI_PARAM_USERNAME:
+ return iscsi_switch_str_param(&session->username, buf);
+ case ISCSI_PARAM_USERNAME_IN:
+ return iscsi_switch_str_param(&session->username_in, buf);
+ case ISCSI_PARAM_PASSWORD:
+ return iscsi_switch_str_param(&session->password, buf);
+ case ISCSI_PARAM_PASSWORD_IN:
+ return iscsi_switch_str_param(&session->password_in, buf);
+ case ISCSI_PARAM_TARGET_NAME:
+ return iscsi_switch_str_param(&session->targetname, buf);
+ case ISCSI_PARAM_TARGET_ALIAS:
+ return iscsi_switch_str_param(&session->targetalias, buf);
+ case ISCSI_PARAM_TPGT:
+ sscanf(buf, "%d", &session->tpgt);
+ break;
+ case ISCSI_PARAM_PERSISTENT_PORT:
+ sscanf(buf, "%d", &conn->persistent_port);
+ break;
+ case ISCSI_PARAM_PERSISTENT_ADDRESS:
+ return iscsi_switch_str_param(&conn->persistent_address, buf);
+ case ISCSI_PARAM_IFACE_NAME:
+ return iscsi_switch_str_param(&session->ifacename, buf);
+ case ISCSI_PARAM_INITIATOR_NAME:
+ return iscsi_switch_str_param(&session->initiatorname, buf);
+ case ISCSI_PARAM_BOOT_ROOT:
+ return iscsi_switch_str_param(&session->boot_root, buf);
+ case ISCSI_PARAM_BOOT_NIC:
+ return iscsi_switch_str_param(&session->boot_nic, buf);
+ case ISCSI_PARAM_BOOT_TARGET:
+ return iscsi_switch_str_param(&session->boot_target, buf);
+ case ISCSI_PARAM_PORTAL_TYPE:
+ return iscsi_switch_str_param(&session->portal_type, buf);
+ case ISCSI_PARAM_DISCOVERY_PARENT_TYPE:
+ return iscsi_switch_str_param(&session->discovery_parent_type,
+ buf);
+ case ISCSI_PARAM_DISCOVERY_SESS:
+ sscanf(buf, "%d", &val);
+ session->discovery_sess = !!val;
+ break;
+ case ISCSI_PARAM_LOCAL_IPADDR:
+ return iscsi_switch_str_param(&conn->local_ipaddr, buf);
+ default:
+ return -ENOSYS;
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(iscsi_set_param);
+
+int iscsi_session_get_param(struct iscsi_cls_session *cls_session,
+ enum iscsi_param param, char *buf)
+{
+ struct iscsi_session *session = cls_session->dd_data;
+ int len;
+
+ switch(param) {
+ case ISCSI_PARAM_FAST_ABORT:
+ len = sprintf(buf, "%d\n", session->fast_abort);
+ break;
+ case ISCSI_PARAM_ABORT_TMO:
+ len = sprintf(buf, "%d\n", session->abort_timeout);
+ break;
+ case ISCSI_PARAM_LU_RESET_TMO:
+ len = sprintf(buf, "%d\n", session->lu_reset_timeout);
+ break;
+ case ISCSI_PARAM_TGT_RESET_TMO:
+ len = sprintf(buf, "%d\n", session->tgt_reset_timeout);
+ break;
+ case ISCSI_PARAM_INITIAL_R2T_EN:
+ len = sprintf(buf, "%d\n", session->initial_r2t_en);
+ break;
+ case ISCSI_PARAM_MAX_R2T:
+ len = sprintf(buf, "%hu\n", session->max_r2t);
+ break;
+ case ISCSI_PARAM_IMM_DATA_EN:
+ len = sprintf(buf, "%d\n", session->imm_data_en);
+ break;
+ case ISCSI_PARAM_FIRST_BURST:
+ len = sprintf(buf, "%u\n", session->first_burst);
+ break;
+ case ISCSI_PARAM_MAX_BURST:
+ len = sprintf(buf, "%u\n", session->max_burst);
+ break;
+ case ISCSI_PARAM_PDU_INORDER_EN:
+ len = sprintf(buf, "%d\n", session->pdu_inorder_en);
+ break;
+ case ISCSI_PARAM_DATASEQ_INORDER_EN:
+ len = sprintf(buf, "%d\n", session->dataseq_inorder_en);
+ break;
+ case ISCSI_PARAM_DEF_TASKMGMT_TMO:
+ len = sprintf(buf, "%d\n", session->def_taskmgmt_tmo);
+ break;
+ case ISCSI_PARAM_ERL:
+ len = sprintf(buf, "%d\n", session->erl);
+ break;
+ case ISCSI_PARAM_TARGET_NAME:
+ len = sprintf(buf, "%s\n", session->targetname);
+ break;
+ case ISCSI_PARAM_TARGET_ALIAS:
+ len = sprintf(buf, "%s\n", session->targetalias);
+ break;
+ case ISCSI_PARAM_TPGT:
+ len = sprintf(buf, "%d\n", session->tpgt);
+ break;
+ case ISCSI_PARAM_USERNAME:
+ len = sprintf(buf, "%s\n", session->username);
+ break;
+ case ISCSI_PARAM_USERNAME_IN:
+ len = sprintf(buf, "%s\n", session->username_in);
+ break;
+ case ISCSI_PARAM_PASSWORD:
+ len = sprintf(buf, "%s\n", session->password);
+ break;
+ case ISCSI_PARAM_PASSWORD_IN:
+ len = sprintf(buf, "%s\n", session->password_in);
+ break;
+ case ISCSI_PARAM_IFACE_NAME:
+ len = sprintf(buf, "%s\n", session->ifacename);
+ break;
+ case ISCSI_PARAM_INITIATOR_NAME:
+ len = sprintf(buf, "%s\n", session->initiatorname);
+ break;
+ case ISCSI_PARAM_BOOT_ROOT:
+ len = sprintf(buf, "%s\n", session->boot_root);
+ break;
+ case ISCSI_PARAM_BOOT_NIC:
+ len = sprintf(buf, "%s\n", session->boot_nic);
+ break;
+ case ISCSI_PARAM_BOOT_TARGET:
+ len = sprintf(buf, "%s\n", session->boot_target);
+ break;
+ case ISCSI_PARAM_AUTO_SND_TGT_DISABLE:
+ len = sprintf(buf, "%u\n", session->auto_snd_tgt_disable);
+ break;
+ case ISCSI_PARAM_DISCOVERY_SESS:
+ len = sprintf(buf, "%u\n", session->discovery_sess);
+ break;
+ case ISCSI_PARAM_PORTAL_TYPE:
+ len = sprintf(buf, "%s\n", session->portal_type);
+ break;
+ case ISCSI_PARAM_CHAP_AUTH_EN:
+ len = sprintf(buf, "%u\n", session->chap_auth_en);
+ break;
+ case ISCSI_PARAM_DISCOVERY_LOGOUT_EN:
+ len = sprintf(buf, "%u\n", session->discovery_logout_en);
+ break;
+ case ISCSI_PARAM_BIDI_CHAP_EN:
+ len = sprintf(buf, "%u\n", session->bidi_chap_en);
+ break;
+ case ISCSI_PARAM_DISCOVERY_AUTH_OPTIONAL:
+ len = sprintf(buf, "%u\n", session->discovery_auth_optional);
+ break;
+ case ISCSI_PARAM_DEF_TIME2WAIT:
+ len = sprintf(buf, "%d\n", session->time2wait);
+ break;
+ case ISCSI_PARAM_DEF_TIME2RETAIN:
+ len = sprintf(buf, "%d\n", session->time2retain);
+ break;
+ case ISCSI_PARAM_TSID:
+ len = sprintf(buf, "%u\n", session->tsid);
+ break;
+ case ISCSI_PARAM_ISID:
+ len = sprintf(buf, "%02x%02x%02x%02x%02x%02x\n",
+ session->isid[0], session->isid[1],
+ session->isid[2], session->isid[3],
+ session->isid[4], session->isid[5]);
+ break;
+ case ISCSI_PARAM_DISCOVERY_PARENT_IDX:
+ len = sprintf(buf, "%u\n", session->discovery_parent_idx);
+ break;
+ case ISCSI_PARAM_DISCOVERY_PARENT_TYPE:
+ if (session->discovery_parent_type)
+ len = sprintf(buf, "%s\n",
+ session->discovery_parent_type);
+ else
+ len = sprintf(buf, "\n");
+ break;
+ default:
+ return -ENOSYS;
+ }
+
+ return len;
+}
+EXPORT_SYMBOL_GPL(iscsi_session_get_param);
+
+int iscsi_conn_get_addr_param(struct sockaddr_storage *addr,
+ enum iscsi_param param, char *buf)
+{
+ struct sockaddr_in6 *sin6 = NULL;
+ struct sockaddr_in *sin = NULL;
+ int len;
+
+ switch (addr->ss_family) {
+ case AF_INET:
+ sin = (struct sockaddr_in *)addr;
+ break;
+ case AF_INET6:
+ sin6 = (struct sockaddr_in6 *)addr;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ switch (param) {
+ case ISCSI_PARAM_CONN_ADDRESS:
+ case ISCSI_HOST_PARAM_IPADDRESS:
+ if (sin)
+ len = sprintf(buf, "%pI4\n", &sin->sin_addr.s_addr);
+ else
+ len = sprintf(buf, "%pI6\n", &sin6->sin6_addr);
+ break;
+ case ISCSI_PARAM_CONN_PORT:
+ case ISCSI_PARAM_LOCAL_PORT:
+ if (sin)
+ len = sprintf(buf, "%hu\n", be16_to_cpu(sin->sin_port));
+ else
+ len = sprintf(buf, "%hu\n",
+ be16_to_cpu(sin6->sin6_port));
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return len;
+}
+EXPORT_SYMBOL_GPL(iscsi_conn_get_addr_param);
+
+int iscsi_conn_get_param(struct iscsi_cls_conn *cls_conn,
+ enum iscsi_param param, char *buf)
+{
+ struct iscsi_conn *conn = cls_conn->dd_data;
+ int len;
+
+ switch(param) {
+ case ISCSI_PARAM_PING_TMO:
+ len = sprintf(buf, "%u\n", conn->ping_timeout);
+ break;
+ case ISCSI_PARAM_RECV_TMO:
+ len = sprintf(buf, "%u\n", conn->recv_timeout);
+ break;
+ case ISCSI_PARAM_MAX_RECV_DLENGTH:
+ len = sprintf(buf, "%u\n", conn->max_recv_dlength);
+ break;
+ case ISCSI_PARAM_MAX_XMIT_DLENGTH:
+ len = sprintf(buf, "%u\n", conn->max_xmit_dlength);
+ break;
+ case ISCSI_PARAM_HDRDGST_EN:
+ len = sprintf(buf, "%d\n", conn->hdrdgst_en);
+ break;
+ case ISCSI_PARAM_DATADGST_EN:
+ len = sprintf(buf, "%d\n", conn->datadgst_en);
+ break;
+ case ISCSI_PARAM_IFMARKER_EN:
+ len = sprintf(buf, "%d\n", conn->ifmarker_en);
+ break;
+ case ISCSI_PARAM_OFMARKER_EN:
+ len = sprintf(buf, "%d\n", conn->ofmarker_en);
+ break;
+ case ISCSI_PARAM_EXP_STATSN:
+ len = sprintf(buf, "%u\n", conn->exp_statsn);
+ break;
+ case ISCSI_PARAM_PERSISTENT_PORT:
+ len = sprintf(buf, "%d\n", conn->persistent_port);
+ break;
+ case ISCSI_PARAM_PERSISTENT_ADDRESS:
+ len = sprintf(buf, "%s\n", conn->persistent_address);
+ break;
+ case ISCSI_PARAM_STATSN:
+ len = sprintf(buf, "%u\n", conn->statsn);
+ break;
+ case ISCSI_PARAM_MAX_SEGMENT_SIZE:
+ len = sprintf(buf, "%u\n", conn->max_segment_size);
+ break;
+ case ISCSI_PARAM_KEEPALIVE_TMO:
+ len = sprintf(buf, "%u\n", conn->keepalive_tmo);
+ break;
+ case ISCSI_PARAM_LOCAL_PORT:
+ len = sprintf(buf, "%u\n", conn->local_port);
+ break;
+ case ISCSI_PARAM_TCP_TIMESTAMP_STAT:
+ len = sprintf(buf, "%u\n", conn->tcp_timestamp_stat);
+ break;
+ case ISCSI_PARAM_TCP_NAGLE_DISABLE:
+ len = sprintf(buf, "%u\n", conn->tcp_nagle_disable);
+ break;
+ case ISCSI_PARAM_TCP_WSF_DISABLE:
+ len = sprintf(buf, "%u\n", conn->tcp_wsf_disable);
+ break;
+ case ISCSI_PARAM_TCP_TIMER_SCALE:
+ len = sprintf(buf, "%u\n", conn->tcp_timer_scale);
+ break;
+ case ISCSI_PARAM_TCP_TIMESTAMP_EN:
+ len = sprintf(buf, "%u\n", conn->tcp_timestamp_en);
+ break;
+ case ISCSI_PARAM_IP_FRAGMENT_DISABLE:
+ len = sprintf(buf, "%u\n", conn->fragment_disable);
+ break;
+ case ISCSI_PARAM_IPV4_TOS:
+ len = sprintf(buf, "%u\n", conn->ipv4_tos);
+ break;
+ case ISCSI_PARAM_IPV6_TC:
+ len = sprintf(buf, "%u\n", conn->ipv6_traffic_class);
+ break;
+ case ISCSI_PARAM_IPV6_FLOW_LABEL:
+ len = sprintf(buf, "%u\n", conn->ipv6_flow_label);
+ break;
+ case ISCSI_PARAM_IS_FW_ASSIGNED_IPV6:
+ len = sprintf(buf, "%u\n", conn->is_fw_assigned_ipv6);
+ break;
+ case ISCSI_PARAM_TCP_XMIT_WSF:
+ len = sprintf(buf, "%u\n", conn->tcp_xmit_wsf);
+ break;
+ case ISCSI_PARAM_TCP_RECV_WSF:
+ len = sprintf(buf, "%u\n", conn->tcp_recv_wsf);
+ break;
+ case ISCSI_PARAM_LOCAL_IPADDR:
+ len = sprintf(buf, "%s\n", conn->local_ipaddr);
+ break;
+ default:
+ return -ENOSYS;
+ }
+
+ return len;
+}
+EXPORT_SYMBOL_GPL(iscsi_conn_get_param);
+
+int iscsi_host_get_param(struct Scsi_Host *shost, enum iscsi_host_param param,
+ char *buf)
+{
+ struct iscsi_host *ihost = shost_priv(shost);
+ int len;
+
+ switch (param) {
+ case ISCSI_HOST_PARAM_NETDEV_NAME:
+ len = sprintf(buf, "%s\n", ihost->netdev);
+ break;
+ case ISCSI_HOST_PARAM_HWADDRESS:
+ len = sprintf(buf, "%s\n", ihost->hwaddress);
+ break;
+ case ISCSI_HOST_PARAM_INITIATOR_NAME:
+ len = sprintf(buf, "%s\n", ihost->initiatorname);
+ break;
+ default:
+ return -ENOSYS;
+ }
+
+ return len;
+}
+EXPORT_SYMBOL_GPL(iscsi_host_get_param);
+
+int iscsi_host_set_param(struct Scsi_Host *shost, enum iscsi_host_param param,
+ char *buf, int buflen)
+{
+ struct iscsi_host *ihost = shost_priv(shost);
+
+ switch (param) {
+ case ISCSI_HOST_PARAM_NETDEV_NAME:
+ return iscsi_switch_str_param(&ihost->netdev, buf);
+ case ISCSI_HOST_PARAM_HWADDRESS:
+ return iscsi_switch_str_param(&ihost->hwaddress, buf);
+ case ISCSI_HOST_PARAM_INITIATOR_NAME:
+ return iscsi_switch_str_param(&ihost->initiatorname, buf);
+ default:
+ return -ENOSYS;
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(iscsi_host_set_param);
+
+MODULE_AUTHOR("Mike Christie");
+MODULE_DESCRIPTION("iSCSI library functions");
+MODULE_LICENSE("GPL");
diff --git a/drivers/scsi/libiscsi_tcp.c b/drivers/scsi/libiscsi_tcp.c
new file mode 100644
index 000000000..60cb6dc3c
--- /dev/null
+++ b/drivers/scsi/libiscsi_tcp.c
@@ -0,0 +1,1214 @@
+/*
+ * iSCSI over TCP/IP Data-Path lib
+ *
+ * Copyright (C) 2004 Dmitry Yusupov
+ * Copyright (C) 2004 Alex Aizman
+ * Copyright (C) 2005 - 2006 Mike Christie
+ * Copyright (C) 2006 Red Hat, Inc. All rights reserved.
+ * maintained by open-iscsi@googlegroups.com
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published
+ * by the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * See the file COPYING included with this distribution for more details.
+ *
+ * Credits:
+ * Christoph Hellwig
+ * FUJITA Tomonori
+ * Arne Redlich
+ * Zhenyu Wang
+ */
+
+#include <linux/types.h>
+#include <linux/list.h>
+#include <linux/inet.h>
+#include <linux/slab.h>
+#include <linux/file.h>
+#include <linux/blkdev.h>
+#include <linux/crypto.h>
+#include <linux/delay.h>
+#include <linux/kfifo.h>
+#include <linux/scatterlist.h>
+#include <linux/module.h>
+#include <net/tcp.h>
+#include <scsi/scsi_cmnd.h>
+#include <scsi/scsi_device.h>
+#include <scsi/scsi_host.h>
+#include <scsi/scsi.h>
+#include <scsi/scsi_transport_iscsi.h>
+
+#include "iscsi_tcp.h"
+
+MODULE_AUTHOR("Mike Christie <michaelc@cs.wisc.edu>, "
+ "Dmitry Yusupov <dmitry_yus@yahoo.com>, "
+ "Alex Aizman <itn780@yahoo.com>");
+MODULE_DESCRIPTION("iSCSI/TCP data-path");
+MODULE_LICENSE("GPL");
+
+static int iscsi_dbg_libtcp;
+module_param_named(debug_libiscsi_tcp, iscsi_dbg_libtcp, int,
+ S_IRUGO | S_IWUSR);
+MODULE_PARM_DESC(debug_libiscsi_tcp, "Turn on debugging for libiscsi_tcp "
+ "module. Set to 1 to turn on, and zero to turn off. Default "
+ "is off.");
+
+#define ISCSI_DBG_TCP(_conn, dbg_fmt, arg...) \
+ do { \
+ if (iscsi_dbg_libtcp) \
+ iscsi_conn_printk(KERN_INFO, _conn, \
+ "%s " dbg_fmt, \
+ __func__, ##arg); \
+ } while (0);
+
+static int iscsi_tcp_hdr_recv_done(struct iscsi_tcp_conn *tcp_conn,
+ struct iscsi_segment *segment);
+
+/*
+ * Scatterlist handling: inside the iscsi_segment, we
+ * remember an index into the scatterlist, and set data/size
+ * to the current scatterlist entry. For highmem pages, we
+ * kmap as needed.
+ *
+ * Note that the page is unmapped when we return from
+ * TCP's data_ready handler, so we may end up mapping and
+ * unmapping the same page repeatedly. The whole reason
+ * for this is that we shouldn't keep the page mapped
+ * outside the softirq.
+ */
+
+/**
+ * iscsi_tcp_segment_init_sg - init indicated scatterlist entry
+ * @segment: the buffer object
+ * @sg: scatterlist
+ * @offset: byte offset into that sg entry
+ *
+ * This function sets up the segment so that subsequent
+ * data is copied to the indicated sg entry, at the given
+ * offset.
+ */
+static inline void
+iscsi_tcp_segment_init_sg(struct iscsi_segment *segment,
+ struct scatterlist *sg, unsigned int offset)
+{
+ segment->sg = sg;
+ segment->sg_offset = offset;
+ segment->size = min(sg->length - offset,
+ segment->total_size - segment->total_copied);
+ segment->data = NULL;
+}
+
+/**
+ * iscsi_tcp_segment_map - map the current S/G page
+ * @segment: iscsi_segment
+ * @recv: 1 if called from recv path
+ *
+ * We only need to possibly kmap data if scatter lists are being used,
+ * because the iscsi passthrough and internal IO paths will never use high
+ * mem pages.
+ */
+static void iscsi_tcp_segment_map(struct iscsi_segment *segment, int recv)
+{
+ struct scatterlist *sg;
+
+ if (segment->data != NULL || !segment->sg)
+ return;
+
+ sg = segment->sg;
+ BUG_ON(segment->sg_mapped);
+ BUG_ON(sg->length == 0);
+
+ /*
+ * If the page count is greater than one it is ok to send
+ * to the network layer's zero copy send path. If not we
+ * have to go the slow sendmsg path. We always map for the
+ * recv path.
+ */
+ if (page_count(sg_page(sg)) >= 1 && !recv)
+ return;
+
+ if (recv) {
+ segment->atomic_mapped = true;
+ segment->sg_mapped = kmap_atomic(sg_page(sg));
+ } else {
+ segment->atomic_mapped = false;
+ /* the xmit path can sleep with the page mapped so use kmap */
+ segment->sg_mapped = kmap(sg_page(sg));
+ }
+
+ segment->data = segment->sg_mapped + sg->offset + segment->sg_offset;
+}
+
+void iscsi_tcp_segment_unmap(struct iscsi_segment *segment)
+{
+ if (segment->sg_mapped) {
+ if (segment->atomic_mapped)
+ kunmap_atomic(segment->sg_mapped);
+ else
+ kunmap(sg_page(segment->sg));
+ segment->sg_mapped = NULL;
+ segment->data = NULL;
+ }
+}
+EXPORT_SYMBOL_GPL(iscsi_tcp_segment_unmap);
+
+/*
+ * Splice the digest buffer into the buffer
+ */
+static inline void
+iscsi_tcp_segment_splice_digest(struct iscsi_segment *segment, void *digest)
+{
+ segment->data = digest;
+ segment->digest_len = ISCSI_DIGEST_SIZE;
+ segment->total_size += ISCSI_DIGEST_SIZE;
+ segment->size = ISCSI_DIGEST_SIZE;
+ segment->copied = 0;
+ segment->sg = NULL;
+ segment->hash = NULL;
+}
+
+/**
+ * iscsi_tcp_segment_done - check whether the segment is complete
+ * @tcp_conn: iscsi tcp connection
+ * @segment: iscsi segment to check
+ * @recv: set to one of this is called from the recv path
+ * @copied: number of bytes copied
+ *
+ * Check if we're done receiving this segment. If the receive
+ * buffer is full but we expect more data, move on to the
+ * next entry in the scatterlist.
+ *
+ * If the amount of data we received isn't a multiple of 4,
+ * we will transparently receive the pad bytes, too.
+ *
+ * This function must be re-entrant.
+ */
+int iscsi_tcp_segment_done(struct iscsi_tcp_conn *tcp_conn,
+ struct iscsi_segment *segment, int recv,
+ unsigned copied)
+{
+ struct scatterlist sg;
+ unsigned int pad;
+
+ ISCSI_DBG_TCP(tcp_conn->iscsi_conn, "copied %u %u size %u %s\n",
+ segment->copied, copied, segment->size,
+ recv ? "recv" : "xmit");
+ if (segment->hash && copied) {
+ /*
+ * If a segment is kmapd we must unmap it before sending
+ * to the crypto layer since that will try to kmap it again.
+ */
+ iscsi_tcp_segment_unmap(segment);
+
+ if (!segment->data) {
+ sg_init_table(&sg, 1);
+ sg_set_page(&sg, sg_page(segment->sg), copied,
+ segment->copied + segment->sg_offset +
+ segment->sg->offset);
+ } else
+ sg_init_one(&sg, segment->data + segment->copied,
+ copied);
+ crypto_hash_update(segment->hash, &sg, copied);
+ }
+
+ segment->copied += copied;
+ if (segment->copied < segment->size) {
+ iscsi_tcp_segment_map(segment, recv);
+ return 0;
+ }
+
+ segment->total_copied += segment->copied;
+ segment->copied = 0;
+ segment->size = 0;
+
+ /* Unmap the current scatterlist page, if there is one. */
+ iscsi_tcp_segment_unmap(segment);
+
+ /* Do we have more scatterlist entries? */
+ ISCSI_DBG_TCP(tcp_conn->iscsi_conn, "total copied %u total size %u\n",
+ segment->total_copied, segment->total_size);
+ if (segment->total_copied < segment->total_size) {
+ /* Proceed to the next entry in the scatterlist. */
+ iscsi_tcp_segment_init_sg(segment, sg_next(segment->sg),
+ 0);
+ iscsi_tcp_segment_map(segment, recv);
+ BUG_ON(segment->size == 0);
+ return 0;
+ }
+
+ /* Do we need to handle padding? */
+ if (!(tcp_conn->iscsi_conn->session->tt->caps & CAP_PADDING_OFFLOAD)) {
+ pad = iscsi_padding(segment->total_copied);
+ if (pad != 0) {
+ ISCSI_DBG_TCP(tcp_conn->iscsi_conn,
+ "consume %d pad bytes\n", pad);
+ segment->total_size += pad;
+ segment->size = pad;
+ segment->data = segment->padbuf;
+ return 0;
+ }
+ }
+
+ /*
+ * Set us up for transferring the data digest. hdr digest
+ * is completely handled in hdr done function.
+ */
+ if (segment->hash) {
+ crypto_hash_final(segment->hash, segment->digest);
+ iscsi_tcp_segment_splice_digest(segment,
+ recv ? segment->recv_digest : segment->digest);
+ return 0;
+ }
+
+ return 1;
+}
+EXPORT_SYMBOL_GPL(iscsi_tcp_segment_done);
+
+/**
+ * iscsi_tcp_segment_recv - copy data to segment
+ * @tcp_conn: the iSCSI TCP connection
+ * @segment: the buffer to copy to
+ * @ptr: data pointer
+ * @len: amount of data available
+ *
+ * This function copies up to @len bytes to the
+ * given buffer, and returns the number of bytes
+ * consumed, which can actually be less than @len.
+ *
+ * If hash digest is enabled, the function will update the
+ * hash while copying.
+ * Combining these two operations doesn't buy us a lot (yet),
+ * but in the future we could implement combined copy+crc,
+ * just way we do for network layer checksums.
+ */
+static int
+iscsi_tcp_segment_recv(struct iscsi_tcp_conn *tcp_conn,
+ struct iscsi_segment *segment, const void *ptr,
+ unsigned int len)
+{
+ unsigned int copy = 0, copied = 0;
+
+ while (!iscsi_tcp_segment_done(tcp_conn, segment, 1, copy)) {
+ if (copied == len) {
+ ISCSI_DBG_TCP(tcp_conn->iscsi_conn,
+ "copied %d bytes\n", len);
+ break;
+ }
+
+ copy = min(len - copied, segment->size - segment->copied);
+ ISCSI_DBG_TCP(tcp_conn->iscsi_conn, "copying %d\n", copy);
+ memcpy(segment->data + segment->copied, ptr + copied, copy);
+ copied += copy;
+ }
+ return copied;
+}
+
+inline void
+iscsi_tcp_dgst_header(struct hash_desc *hash, const void *hdr, size_t hdrlen,
+ unsigned char digest[ISCSI_DIGEST_SIZE])
+{
+ struct scatterlist sg;
+
+ sg_init_one(&sg, hdr, hdrlen);
+ crypto_hash_digest(hash, &sg, hdrlen, digest);
+}
+EXPORT_SYMBOL_GPL(iscsi_tcp_dgst_header);
+
+static inline int
+iscsi_tcp_dgst_verify(struct iscsi_tcp_conn *tcp_conn,
+ struct iscsi_segment *segment)
+{
+ if (!segment->digest_len)
+ return 1;
+
+ if (memcmp(segment->recv_digest, segment->digest,
+ segment->digest_len)) {
+ ISCSI_DBG_TCP(tcp_conn->iscsi_conn, "digest mismatch\n");
+ return 0;
+ }
+
+ return 1;
+}
+
+/*
+ * Helper function to set up segment buffer
+ */
+static inline void
+__iscsi_segment_init(struct iscsi_segment *segment, size_t size,
+ iscsi_segment_done_fn_t *done, struct hash_desc *hash)
+{
+ memset(segment, 0, sizeof(*segment));
+ segment->total_size = size;
+ segment->done = done;
+
+ if (hash) {
+ segment->hash = hash;
+ crypto_hash_init(hash);
+ }
+}
+
+inline void
+iscsi_segment_init_linear(struct iscsi_segment *segment, void *data,
+ size_t size, iscsi_segment_done_fn_t *done,
+ struct hash_desc *hash)
+{
+ __iscsi_segment_init(segment, size, done, hash);
+ segment->data = data;
+ segment->size = size;
+}
+EXPORT_SYMBOL_GPL(iscsi_segment_init_linear);
+
+inline int
+iscsi_segment_seek_sg(struct iscsi_segment *segment,
+ struct scatterlist *sg_list, unsigned int sg_count,
+ unsigned int offset, size_t size,
+ iscsi_segment_done_fn_t *done, struct hash_desc *hash)
+{
+ struct scatterlist *sg;
+ unsigned int i;
+
+ __iscsi_segment_init(segment, size, done, hash);
+ for_each_sg(sg_list, sg, sg_count, i) {
+ if (offset < sg->length) {
+ iscsi_tcp_segment_init_sg(segment, sg, offset);
+ return 0;
+ }
+ offset -= sg->length;
+ }
+
+ return ISCSI_ERR_DATA_OFFSET;
+}
+EXPORT_SYMBOL_GPL(iscsi_segment_seek_sg);
+
+/**
+ * iscsi_tcp_hdr_recv_prep - prep segment for hdr reception
+ * @tcp_conn: iscsi connection to prep for
+ *
+ * This function always passes NULL for the hash argument, because when this
+ * function is called we do not yet know the final size of the header and want
+ * to delay the digest processing until we know that.
+ */
+void iscsi_tcp_hdr_recv_prep(struct iscsi_tcp_conn *tcp_conn)
+{
+ ISCSI_DBG_TCP(tcp_conn->iscsi_conn,
+ "(%s)\n", tcp_conn->iscsi_conn->hdrdgst_en ?
+ "digest enabled" : "digest disabled");
+ iscsi_segment_init_linear(&tcp_conn->in.segment,
+ tcp_conn->in.hdr_buf, sizeof(struct iscsi_hdr),
+ iscsi_tcp_hdr_recv_done, NULL);
+}
+EXPORT_SYMBOL_GPL(iscsi_tcp_hdr_recv_prep);
+
+/*
+ * Handle incoming reply to any other type of command
+ */
+static int
+iscsi_tcp_data_recv_done(struct iscsi_tcp_conn *tcp_conn,
+ struct iscsi_segment *segment)
+{
+ struct iscsi_conn *conn = tcp_conn->iscsi_conn;
+ int rc = 0;
+
+ if (!iscsi_tcp_dgst_verify(tcp_conn, segment))
+ return ISCSI_ERR_DATA_DGST;
+
+ rc = iscsi_complete_pdu(conn, tcp_conn->in.hdr,
+ conn->data, tcp_conn->in.datalen);
+ if (rc)
+ return rc;
+
+ iscsi_tcp_hdr_recv_prep(tcp_conn);
+ return 0;
+}
+
+static void
+iscsi_tcp_data_recv_prep(struct iscsi_tcp_conn *tcp_conn)
+{
+ struct iscsi_conn *conn = tcp_conn->iscsi_conn;
+ struct hash_desc *rx_hash = NULL;
+
+ if (conn->datadgst_en &&
+ !(conn->session->tt->caps & CAP_DIGEST_OFFLOAD))
+ rx_hash = tcp_conn->rx_hash;
+
+ iscsi_segment_init_linear(&tcp_conn->in.segment,
+ conn->data, tcp_conn->in.datalen,
+ iscsi_tcp_data_recv_done, rx_hash);
+}
+
+/**
+ * iscsi_tcp_cleanup_task - free tcp_task resources
+ * @task: iscsi task
+ *
+ * must be called with session back_lock
+ */
+void iscsi_tcp_cleanup_task(struct iscsi_task *task)
+{
+ struct iscsi_tcp_task *tcp_task = task->dd_data;
+ struct iscsi_r2t_info *r2t;
+
+ /* nothing to do for mgmt */
+ if (!task->sc)
+ return;
+
+ spin_lock_bh(&tcp_task->queue2pool);
+ /* flush task's r2t queues */
+ while (kfifo_out(&tcp_task->r2tqueue, (void*)&r2t, sizeof(void*))) {
+ kfifo_in(&tcp_task->r2tpool.queue, (void*)&r2t,
+ sizeof(void*));
+ ISCSI_DBG_TCP(task->conn, "pending r2t dropped\n");
+ }
+
+ r2t = tcp_task->r2t;
+ if (r2t != NULL) {
+ kfifo_in(&tcp_task->r2tpool.queue, (void*)&r2t,
+ sizeof(void*));
+ tcp_task->r2t = NULL;
+ }
+ spin_unlock_bh(&tcp_task->queue2pool);
+}
+EXPORT_SYMBOL_GPL(iscsi_tcp_cleanup_task);
+
+/**
+ * iscsi_tcp_data_in - SCSI Data-In Response processing
+ * @conn: iscsi connection
+ * @task: scsi command task
+ */
+static int iscsi_tcp_data_in(struct iscsi_conn *conn, struct iscsi_task *task)
+{
+ struct iscsi_tcp_conn *tcp_conn = conn->dd_data;
+ struct iscsi_tcp_task *tcp_task = task->dd_data;
+ struct iscsi_data_rsp *rhdr = (struct iscsi_data_rsp *)tcp_conn->in.hdr;
+ int datasn = be32_to_cpu(rhdr->datasn);
+ unsigned total_in_length = scsi_in(task->sc)->length;
+
+ /*
+ * lib iscsi will update this in the completion handling if there
+ * is status.
+ */
+ if (!(rhdr->flags & ISCSI_FLAG_DATA_STATUS))
+ iscsi_update_cmdsn(conn->session, (struct iscsi_nopin*)rhdr);
+
+ if (tcp_conn->in.datalen == 0)
+ return 0;
+
+ if (tcp_task->exp_datasn != datasn) {
+ ISCSI_DBG_TCP(conn, "task->exp_datasn(%d) != rhdr->datasn(%d)"
+ "\n", tcp_task->exp_datasn, datasn);
+ return ISCSI_ERR_DATASN;
+ }
+
+ tcp_task->exp_datasn++;
+
+ tcp_task->data_offset = be32_to_cpu(rhdr->offset);
+ if (tcp_task->data_offset + tcp_conn->in.datalen > total_in_length) {
+ ISCSI_DBG_TCP(conn, "data_offset(%d) + data_len(%d) > "
+ "total_length_in(%d)\n", tcp_task->data_offset,
+ tcp_conn->in.datalen, total_in_length);
+ return ISCSI_ERR_DATA_OFFSET;
+ }
+
+ conn->datain_pdus_cnt++;
+ return 0;
+}
+
+/**
+ * iscsi_tcp_r2t_rsp - iSCSI R2T Response processing
+ * @conn: iscsi connection
+ * @task: scsi command task
+ */
+static int iscsi_tcp_r2t_rsp(struct iscsi_conn *conn, struct iscsi_task *task)
+{
+ struct iscsi_session *session = conn->session;
+ struct iscsi_tcp_task *tcp_task = task->dd_data;
+ struct iscsi_tcp_conn *tcp_conn = conn->dd_data;
+ struct iscsi_r2t_rsp *rhdr = (struct iscsi_r2t_rsp *)tcp_conn->in.hdr;
+ struct iscsi_r2t_info *r2t;
+ int r2tsn = be32_to_cpu(rhdr->r2tsn);
+ u32 data_length;
+ u32 data_offset;
+ int rc;
+
+ if (tcp_conn->in.datalen) {
+ iscsi_conn_printk(KERN_ERR, conn,
+ "invalid R2t with datalen %d\n",
+ tcp_conn->in.datalen);
+ return ISCSI_ERR_DATALEN;
+ }
+
+ if (tcp_task->exp_datasn != r2tsn){
+ ISCSI_DBG_TCP(conn, "task->exp_datasn(%d) != rhdr->r2tsn(%d)\n",
+ tcp_task->exp_datasn, r2tsn);
+ return ISCSI_ERR_R2TSN;
+ }
+
+ /* fill-in new R2T associated with the task */
+ iscsi_update_cmdsn(session, (struct iscsi_nopin*)rhdr);
+
+ if (!task->sc || session->state != ISCSI_STATE_LOGGED_IN) {
+ iscsi_conn_printk(KERN_INFO, conn,
+ "dropping R2T itt %d in recovery.\n",
+ task->itt);
+ return 0;
+ }
+
+ data_length = be32_to_cpu(rhdr->data_length);
+ if (data_length == 0) {
+ iscsi_conn_printk(KERN_ERR, conn,
+ "invalid R2T with zero data len\n");
+ return ISCSI_ERR_DATALEN;
+ }
+
+ if (data_length > session->max_burst)
+ ISCSI_DBG_TCP(conn, "invalid R2T with data len %u and max "
+ "burst %u. Attempting to execute request.\n",
+ data_length, session->max_burst);
+
+ data_offset = be32_to_cpu(rhdr->data_offset);
+ if (data_offset + data_length > scsi_out(task->sc)->length) {
+ iscsi_conn_printk(KERN_ERR, conn,
+ "invalid R2T with data len %u at offset %u "
+ "and total length %d\n", data_length,
+ data_offset, scsi_out(task->sc)->length);
+ return ISCSI_ERR_DATALEN;
+ }
+
+ spin_lock(&tcp_task->pool2queue);
+ rc = kfifo_out(&tcp_task->r2tpool.queue, (void *)&r2t, sizeof(void *));
+ if (!rc) {
+ iscsi_conn_printk(KERN_ERR, conn, "Could not allocate R2T. "
+ "Target has sent more R2Ts than it "
+ "negotiated for or driver has leaked.\n");
+ spin_unlock(&tcp_task->pool2queue);
+ return ISCSI_ERR_PROTO;
+ }
+
+ r2t->exp_statsn = rhdr->statsn;
+ r2t->data_length = data_length;
+ r2t->data_offset = data_offset;
+
+ r2t->ttt = rhdr->ttt; /* no flip */
+ r2t->datasn = 0;
+ r2t->sent = 0;
+
+ tcp_task->exp_datasn = r2tsn + 1;
+ kfifo_in(&tcp_task->r2tqueue, (void*)&r2t, sizeof(void*));
+ conn->r2t_pdus_cnt++;
+ spin_unlock(&tcp_task->pool2queue);
+
+ iscsi_requeue_task(task);
+ return 0;
+}
+
+/*
+ * Handle incoming reply to DataIn command
+ */
+static int
+iscsi_tcp_process_data_in(struct iscsi_tcp_conn *tcp_conn,
+ struct iscsi_segment *segment)
+{
+ struct iscsi_conn *conn = tcp_conn->iscsi_conn;
+ struct iscsi_hdr *hdr = tcp_conn->in.hdr;
+ int rc;
+
+ if (!iscsi_tcp_dgst_verify(tcp_conn, segment))
+ return ISCSI_ERR_DATA_DGST;
+
+ /* check for non-exceptional status */
+ if (hdr->flags & ISCSI_FLAG_DATA_STATUS) {
+ rc = iscsi_complete_pdu(conn, tcp_conn->in.hdr, NULL, 0);
+ if (rc)
+ return rc;
+ }
+
+ iscsi_tcp_hdr_recv_prep(tcp_conn);
+ return 0;
+}
+
+/**
+ * iscsi_tcp_hdr_dissect - process PDU header
+ * @conn: iSCSI connection
+ * @hdr: PDU header
+ *
+ * This function analyzes the header of the PDU received,
+ * and performs several sanity checks. If the PDU is accompanied
+ * by data, the receive buffer is set up to copy the incoming data
+ * to the correct location.
+ */
+static int
+iscsi_tcp_hdr_dissect(struct iscsi_conn *conn, struct iscsi_hdr *hdr)
+{
+ int rc = 0, opcode, ahslen;
+ struct iscsi_tcp_conn *tcp_conn = conn->dd_data;
+ struct iscsi_task *task;
+
+ /* verify PDU length */
+ tcp_conn->in.datalen = ntoh24(hdr->dlength);
+ if (tcp_conn->in.datalen > conn->max_recv_dlength) {
+ iscsi_conn_printk(KERN_ERR, conn,
+ "iscsi_tcp: datalen %d > %d\n",
+ tcp_conn->in.datalen, conn->max_recv_dlength);
+ return ISCSI_ERR_DATALEN;
+ }
+
+ /* Additional header segments. So far, we don't
+ * process additional headers.
+ */
+ ahslen = hdr->hlength << 2;
+
+ opcode = hdr->opcode & ISCSI_OPCODE_MASK;
+ /* verify itt (itt encoding: age+cid+itt) */
+ rc = iscsi_verify_itt(conn, hdr->itt);
+ if (rc)
+ return rc;
+
+ ISCSI_DBG_TCP(conn, "opcode 0x%x ahslen %d datalen %d\n",
+ opcode, ahslen, tcp_conn->in.datalen);
+
+ switch(opcode) {
+ case ISCSI_OP_SCSI_DATA_IN:
+ spin_lock(&conn->session->back_lock);
+ task = iscsi_itt_to_ctask(conn, hdr->itt);
+ if (!task)
+ rc = ISCSI_ERR_BAD_ITT;
+ else
+ rc = iscsi_tcp_data_in(conn, task);
+ if (rc) {
+ spin_unlock(&conn->session->back_lock);
+ break;
+ }
+
+ if (tcp_conn->in.datalen) {
+ struct iscsi_tcp_task *tcp_task = task->dd_data;
+ struct hash_desc *rx_hash = NULL;
+ struct scsi_data_buffer *sdb = scsi_in(task->sc);
+
+ /*
+ * Setup copy of Data-In into the Scsi_Cmnd
+ * Scatterlist case:
+ * We set up the iscsi_segment to point to the next
+ * scatterlist entry to copy to. As we go along,
+ * we move on to the next scatterlist entry and
+ * update the digest per-entry.
+ */
+ if (conn->datadgst_en &&
+ !(conn->session->tt->caps & CAP_DIGEST_OFFLOAD))
+ rx_hash = tcp_conn->rx_hash;
+
+ ISCSI_DBG_TCP(conn, "iscsi_tcp_begin_data_in( "
+ "offset=%d, datalen=%d)\n",
+ tcp_task->data_offset,
+ tcp_conn->in.datalen);
+ task->last_xfer = jiffies;
+ rc = iscsi_segment_seek_sg(&tcp_conn->in.segment,
+ sdb->table.sgl,
+ sdb->table.nents,
+ tcp_task->data_offset,
+ tcp_conn->in.datalen,
+ iscsi_tcp_process_data_in,
+ rx_hash);
+ spin_unlock(&conn->session->back_lock);
+ return rc;
+ }
+ rc = __iscsi_complete_pdu(conn, hdr, NULL, 0);
+ spin_unlock(&conn->session->back_lock);
+ break;
+ case ISCSI_OP_SCSI_CMD_RSP:
+ if (tcp_conn->in.datalen) {
+ iscsi_tcp_data_recv_prep(tcp_conn);
+ return 0;
+ }
+ rc = iscsi_complete_pdu(conn, hdr, NULL, 0);
+ break;
+ case ISCSI_OP_R2T:
+ spin_lock(&conn->session->back_lock);
+ task = iscsi_itt_to_ctask(conn, hdr->itt);
+ spin_unlock(&conn->session->back_lock);
+ if (!task)
+ rc = ISCSI_ERR_BAD_ITT;
+ else if (ahslen)
+ rc = ISCSI_ERR_AHSLEN;
+ else if (task->sc->sc_data_direction == DMA_TO_DEVICE) {
+ task->last_xfer = jiffies;
+ spin_lock(&conn->session->frwd_lock);
+ rc = iscsi_tcp_r2t_rsp(conn, task);
+ spin_unlock(&conn->session->frwd_lock);
+ } else
+ rc = ISCSI_ERR_PROTO;
+ break;
+ case ISCSI_OP_LOGIN_RSP:
+ case ISCSI_OP_TEXT_RSP:
+ case ISCSI_OP_REJECT:
+ case ISCSI_OP_ASYNC_EVENT:
+ /*
+ * It is possible that we could get a PDU with a buffer larger
+ * than 8K, but there are no targets that currently do this.
+ * For now we fail until we find a vendor that needs it
+ */
+ if (ISCSI_DEF_MAX_RECV_SEG_LEN < tcp_conn->in.datalen) {
+ iscsi_conn_printk(KERN_ERR, conn,
+ "iscsi_tcp: received buffer of "
+ "len %u but conn buffer is only %u "
+ "(opcode %0x)\n",
+ tcp_conn->in.datalen,
+ ISCSI_DEF_MAX_RECV_SEG_LEN, opcode);
+ rc = ISCSI_ERR_PROTO;
+ break;
+ }
+
+ /* If there's data coming in with the response,
+ * receive it to the connection's buffer.
+ */
+ if (tcp_conn->in.datalen) {
+ iscsi_tcp_data_recv_prep(tcp_conn);
+ return 0;
+ }
+ /* fall through */
+ case ISCSI_OP_LOGOUT_RSP:
+ case ISCSI_OP_NOOP_IN:
+ case ISCSI_OP_SCSI_TMFUNC_RSP:
+ rc = iscsi_complete_pdu(conn, hdr, NULL, 0);
+ break;
+ default:
+ rc = ISCSI_ERR_BAD_OPCODE;
+ break;
+ }
+
+ if (rc == 0) {
+ /* Anything that comes with data should have
+ * been handled above. */
+ if (tcp_conn->in.datalen)
+ return ISCSI_ERR_PROTO;
+ iscsi_tcp_hdr_recv_prep(tcp_conn);
+ }
+
+ return rc;
+}
+
+/**
+ * iscsi_tcp_hdr_recv_done - process PDU header
+ *
+ * This is the callback invoked when the PDU header has
+ * been received. If the header is followed by additional
+ * header segments, we go back for more data.
+ */
+static int
+iscsi_tcp_hdr_recv_done(struct iscsi_tcp_conn *tcp_conn,
+ struct iscsi_segment *segment)
+{
+ struct iscsi_conn *conn = tcp_conn->iscsi_conn;
+ struct iscsi_hdr *hdr;
+
+ /* Check if there are additional header segments
+ * *prior* to computing the digest, because we
+ * may need to go back to the caller for more.
+ */
+ hdr = (struct iscsi_hdr *) tcp_conn->in.hdr_buf;
+ if (segment->copied == sizeof(struct iscsi_hdr) && hdr->hlength) {
+ /* Bump the header length - the caller will
+ * just loop around and get the AHS for us, and
+ * call again. */
+ unsigned int ahslen = hdr->hlength << 2;
+
+ /* Make sure we don't overflow */
+ if (sizeof(*hdr) + ahslen > sizeof(tcp_conn->in.hdr_buf))
+ return ISCSI_ERR_AHSLEN;
+
+ segment->total_size += ahslen;
+ segment->size += ahslen;
+ return 0;
+ }
+
+ /* We're done processing the header. See if we're doing
+ * header digests; if so, set up the recv_digest buffer
+ * and go back for more. */
+ if (conn->hdrdgst_en &&
+ !(conn->session->tt->caps & CAP_DIGEST_OFFLOAD)) {
+ if (segment->digest_len == 0) {
+ /*
+ * Even if we offload the digest processing we
+ * splice it in so we can increment the skb/segment
+ * counters in preparation for the data segment.
+ */
+ iscsi_tcp_segment_splice_digest(segment,
+ segment->recv_digest);
+ return 0;
+ }
+
+ iscsi_tcp_dgst_header(tcp_conn->rx_hash, hdr,
+ segment->total_copied - ISCSI_DIGEST_SIZE,
+ segment->digest);
+
+ if (!iscsi_tcp_dgst_verify(tcp_conn, segment))
+ return ISCSI_ERR_HDR_DGST;
+ }
+
+ tcp_conn->in.hdr = hdr;
+ return iscsi_tcp_hdr_dissect(conn, hdr);
+}
+
+/**
+ * iscsi_tcp_recv_segment_is_hdr - tests if we are reading in a header
+ * @tcp_conn: iscsi tcp conn
+ *
+ * returns non zero if we are currently processing or setup to process
+ * a header.
+ */
+inline int iscsi_tcp_recv_segment_is_hdr(struct iscsi_tcp_conn *tcp_conn)
+{
+ return tcp_conn->in.segment.done == iscsi_tcp_hdr_recv_done;
+}
+EXPORT_SYMBOL_GPL(iscsi_tcp_recv_segment_is_hdr);
+
+/**
+ * iscsi_tcp_recv_skb - Process skb
+ * @conn: iscsi connection
+ * @skb: network buffer with header and/or data segment
+ * @offset: offset in skb
+ * @offload: bool indicating if transfer was offloaded
+ *
+ * Will return status of transfer in status. And will return
+ * number of bytes copied.
+ */
+int iscsi_tcp_recv_skb(struct iscsi_conn *conn, struct sk_buff *skb,
+ unsigned int offset, bool offloaded, int *status)
+{
+ struct iscsi_tcp_conn *tcp_conn = conn->dd_data;
+ struct iscsi_segment *segment = &tcp_conn->in.segment;
+ struct skb_seq_state seq;
+ unsigned int consumed = 0;
+ int rc = 0;
+
+ ISCSI_DBG_TCP(conn, "in %d bytes\n", skb->len - offset);
+ /*
+ * Update for each skb instead of pdu, because over slow networks a
+ * data_in's data could take a while to read in. We also want to
+ * account for r2ts.
+ */
+ conn->last_recv = jiffies;
+
+ if (unlikely(conn->suspend_rx)) {
+ ISCSI_DBG_TCP(conn, "Rx suspended!\n");
+ *status = ISCSI_TCP_SUSPENDED;
+ return 0;
+ }
+
+ if (offloaded) {
+ segment->total_copied = segment->total_size;
+ goto segment_done;
+ }
+
+ skb_prepare_seq_read(skb, offset, skb->len, &seq);
+ while (1) {
+ unsigned int avail;
+ const u8 *ptr;
+
+ avail = skb_seq_read(consumed, &ptr, &seq);
+ if (avail == 0) {
+ ISCSI_DBG_TCP(conn, "no more data avail. Consumed %d\n",
+ consumed);
+ *status = ISCSI_TCP_SKB_DONE;
+ goto skb_done;
+ }
+ BUG_ON(segment->copied >= segment->size);
+
+ ISCSI_DBG_TCP(conn, "skb %p ptr=%p avail=%u\n", skb, ptr,
+ avail);
+ rc = iscsi_tcp_segment_recv(tcp_conn, segment, ptr, avail);
+ BUG_ON(rc == 0);
+ consumed += rc;
+
+ if (segment->total_copied >= segment->total_size) {
+ skb_abort_seq_read(&seq);
+ goto segment_done;
+ }
+ }
+
+segment_done:
+ *status = ISCSI_TCP_SEGMENT_DONE;
+ ISCSI_DBG_TCP(conn, "segment done\n");
+ rc = segment->done(tcp_conn, segment);
+ if (rc != 0) {
+ *status = ISCSI_TCP_CONN_ERR;
+ ISCSI_DBG_TCP(conn, "Error receiving PDU, errno=%d\n", rc);
+ iscsi_conn_failure(conn, rc);
+ return 0;
+ }
+ /* The done() functions sets up the next segment. */
+
+skb_done:
+ conn->rxdata_octets += consumed;
+ return consumed;
+}
+EXPORT_SYMBOL_GPL(iscsi_tcp_recv_skb);
+
+/**
+ * iscsi_tcp_task_init - Initialize iSCSI SCSI_READ or SCSI_WRITE commands
+ * @conn: iscsi connection
+ * @task: scsi command task
+ * @sc: scsi command
+ */
+int iscsi_tcp_task_init(struct iscsi_task *task)
+{
+ struct iscsi_tcp_task *tcp_task = task->dd_data;
+ struct iscsi_conn *conn = task->conn;
+ struct scsi_cmnd *sc = task->sc;
+ int err;
+
+ if (!sc) {
+ /*
+ * mgmt tasks do not have a scatterlist since they come
+ * in from the iscsi interface.
+ */
+ ISCSI_DBG_TCP(conn, "mtask deq [itt 0x%x]\n", task->itt);
+
+ return conn->session->tt->init_pdu(task, 0, task->data_count);
+ }
+
+ BUG_ON(kfifo_len(&tcp_task->r2tqueue));
+ tcp_task->exp_datasn = 0;
+
+ /* Prepare PDU, optionally w/ immediate data */
+ ISCSI_DBG_TCP(conn, "task deq [itt 0x%x imm %d unsol %d]\n",
+ task->itt, task->imm_count, task->unsol_r2t.data_length);
+
+ err = conn->session->tt->init_pdu(task, 0, task->imm_count);
+ if (err)
+ return err;
+ task->imm_count = 0;
+ return 0;
+}
+EXPORT_SYMBOL_GPL(iscsi_tcp_task_init);
+
+static struct iscsi_r2t_info *iscsi_tcp_get_curr_r2t(struct iscsi_task *task)
+{
+ struct iscsi_tcp_task *tcp_task = task->dd_data;
+ struct iscsi_r2t_info *r2t = NULL;
+
+ if (iscsi_task_has_unsol_data(task))
+ r2t = &task->unsol_r2t;
+ else {
+ spin_lock_bh(&tcp_task->queue2pool);
+ if (tcp_task->r2t) {
+ r2t = tcp_task->r2t;
+ /* Continue with this R2T? */
+ if (r2t->data_length <= r2t->sent) {
+ ISCSI_DBG_TCP(task->conn,
+ " done with r2t %p\n", r2t);
+ kfifo_in(&tcp_task->r2tpool.queue,
+ (void *)&tcp_task->r2t,
+ sizeof(void *));
+ tcp_task->r2t = r2t = NULL;
+ }
+ }
+
+ if (r2t == NULL) {
+ if (kfifo_out(&tcp_task->r2tqueue,
+ (void *)&tcp_task->r2t, sizeof(void *)) !=
+ sizeof(void *))
+ r2t = NULL;
+ else
+ r2t = tcp_task->r2t;
+ }
+ spin_unlock_bh(&tcp_task->queue2pool);
+ }
+
+ return r2t;
+}
+
+/**
+ * iscsi_tcp_task_xmit - xmit normal PDU task
+ * @task: iscsi command task
+ *
+ * We're expected to return 0 when everything was transmitted successfully,
+ * -EAGAIN if there's still data in the queue, or != 0 for any other kind
+ * of error.
+ */
+int iscsi_tcp_task_xmit(struct iscsi_task *task)
+{
+ struct iscsi_conn *conn = task->conn;
+ struct iscsi_session *session = conn->session;
+ struct iscsi_r2t_info *r2t;
+ int rc = 0;
+
+flush:
+ /* Flush any pending data first. */
+ rc = session->tt->xmit_pdu(task);
+ if (rc < 0)
+ return rc;
+
+ /* mgmt command */
+ if (!task->sc) {
+ if (task->hdr->itt == RESERVED_ITT)
+ iscsi_put_task(task);
+ return 0;
+ }
+
+ /* Are we done already? */
+ if (task->sc->sc_data_direction != DMA_TO_DEVICE)
+ return 0;
+
+ r2t = iscsi_tcp_get_curr_r2t(task);
+ if (r2t == NULL) {
+ /* Waiting for more R2Ts to arrive. */
+ ISCSI_DBG_TCP(conn, "no R2Ts yet\n");
+ return 0;
+ }
+
+ rc = conn->session->tt->alloc_pdu(task, ISCSI_OP_SCSI_DATA_OUT);
+ if (rc)
+ return rc;
+ iscsi_prep_data_out_pdu(task, r2t, (struct iscsi_data *) task->hdr);
+
+ ISCSI_DBG_TCP(conn, "sol dout %p [dsn %d itt 0x%x doff %d dlen %d]\n",
+ r2t, r2t->datasn - 1, task->hdr->itt,
+ r2t->data_offset + r2t->sent, r2t->data_count);
+
+ rc = conn->session->tt->init_pdu(task, r2t->data_offset + r2t->sent,
+ r2t->data_count);
+ if (rc) {
+ iscsi_conn_failure(conn, ISCSI_ERR_XMIT_FAILED);
+ return rc;
+ }
+
+ r2t->sent += r2t->data_count;
+ goto flush;
+}
+EXPORT_SYMBOL_GPL(iscsi_tcp_task_xmit);
+
+struct iscsi_cls_conn *
+iscsi_tcp_conn_setup(struct iscsi_cls_session *cls_session, int dd_data_size,
+ uint32_t conn_idx)
+
+{
+ struct iscsi_conn *conn;
+ struct iscsi_cls_conn *cls_conn;
+ struct iscsi_tcp_conn *tcp_conn;
+
+ cls_conn = iscsi_conn_setup(cls_session,
+ sizeof(*tcp_conn) + dd_data_size, conn_idx);
+ if (!cls_conn)
+ return NULL;
+ conn = cls_conn->dd_data;
+ /*
+ * due to strange issues with iser these are not set
+ * in iscsi_conn_setup
+ */
+ conn->max_recv_dlength = ISCSI_DEF_MAX_RECV_SEG_LEN;
+
+ tcp_conn = conn->dd_data;
+ tcp_conn->iscsi_conn = conn;
+ tcp_conn->dd_data = conn->dd_data + sizeof(*tcp_conn);
+ return cls_conn;
+}
+EXPORT_SYMBOL_GPL(iscsi_tcp_conn_setup);
+
+void iscsi_tcp_conn_teardown(struct iscsi_cls_conn *cls_conn)
+{
+ iscsi_conn_teardown(cls_conn);
+}
+EXPORT_SYMBOL_GPL(iscsi_tcp_conn_teardown);
+
+int iscsi_tcp_r2tpool_alloc(struct iscsi_session *session)
+{
+ int i;
+ int cmd_i;
+
+ /*
+ * initialize per-task: R2T pool and xmit queue
+ */
+ for (cmd_i = 0; cmd_i < session->cmds_max; cmd_i++) {
+ struct iscsi_task *task = session->cmds[cmd_i];
+ struct iscsi_tcp_task *tcp_task = task->dd_data;
+
+ /*
+ * pre-allocated x2 as much r2ts to handle race when
+ * target acks DataOut faster than we data_xmit() queues
+ * could replenish r2tqueue.
+ */
+
+ /* R2T pool */
+ if (iscsi_pool_init(&tcp_task->r2tpool,
+ session->max_r2t * 2, NULL,
+ sizeof(struct iscsi_r2t_info))) {
+ goto r2t_alloc_fail;
+ }
+
+ /* R2T xmit queue */
+ if (kfifo_alloc(&tcp_task->r2tqueue,
+ session->max_r2t * 4 * sizeof(void*), GFP_KERNEL)) {
+ iscsi_pool_free(&tcp_task->r2tpool);
+ goto r2t_alloc_fail;
+ }
+ spin_lock_init(&tcp_task->pool2queue);
+ spin_lock_init(&tcp_task->queue2pool);
+ }
+
+ return 0;
+
+r2t_alloc_fail:
+ for (i = 0; i < cmd_i; i++) {
+ struct iscsi_task *task = session->cmds[i];
+ struct iscsi_tcp_task *tcp_task = task->dd_data;
+
+ kfifo_free(&tcp_task->r2tqueue);
+ iscsi_pool_free(&tcp_task->r2tpool);
+ }
+ return -ENOMEM;
+}
+EXPORT_SYMBOL_GPL(iscsi_tcp_r2tpool_alloc);
+
+void iscsi_tcp_r2tpool_free(struct iscsi_session *session)
+{
+ int i;
+
+ for (i = 0; i < session->cmds_max; i++) {
+ struct iscsi_task *task = session->cmds[i];
+ struct iscsi_tcp_task *tcp_task = task->dd_data;
+
+ kfifo_free(&tcp_task->r2tqueue);
+ iscsi_pool_free(&tcp_task->r2tpool);
+ }
+}
+EXPORT_SYMBOL_GPL(iscsi_tcp_r2tpool_free);
+
+int iscsi_tcp_set_max_r2t(struct iscsi_conn *conn, char *buf)
+{
+ struct iscsi_session *session = conn->session;
+ unsigned short r2ts = 0;
+
+ sscanf(buf, "%hu", &r2ts);
+ if (session->max_r2t == r2ts)
+ return 0;
+
+ if (!r2ts || !is_power_of_2(r2ts))
+ return -EINVAL;
+
+ session->max_r2t = r2ts;
+ iscsi_tcp_r2tpool_free(session);
+ return iscsi_tcp_r2tpool_alloc(session);
+}
+EXPORT_SYMBOL_GPL(iscsi_tcp_set_max_r2t);
+
+void iscsi_tcp_conn_get_stats(struct iscsi_cls_conn *cls_conn,
+ struct iscsi_stats *stats)
+{
+ struct iscsi_conn *conn = cls_conn->dd_data;
+
+ stats->txdata_octets = conn->txdata_octets;
+ stats->rxdata_octets = conn->rxdata_octets;
+ stats->scsicmd_pdus = conn->scsicmd_pdus_cnt;
+ stats->dataout_pdus = conn->dataout_pdus_cnt;
+ stats->scsirsp_pdus = conn->scsirsp_pdus_cnt;
+ stats->datain_pdus = conn->datain_pdus_cnt;
+ stats->r2t_pdus = conn->r2t_pdus_cnt;
+ stats->tmfcmd_pdus = conn->tmfcmd_pdus_cnt;
+ stats->tmfrsp_pdus = conn->tmfrsp_pdus_cnt;
+}
+EXPORT_SYMBOL_GPL(iscsi_tcp_conn_get_stats);
diff --git a/drivers/scsi/libsas/Kconfig b/drivers/scsi/libsas/Kconfig
new file mode 100644
index 000000000..9dafe64e7
--- /dev/null
+++ b/drivers/scsi/libsas/Kconfig
@@ -0,0 +1,48 @@
+#
+# Kernel configuration file for the SAS Class
+#
+# Copyright (C) 2005 Adaptec, Inc. All rights reserved.
+# Copyright (C) 2005 Luben Tuikov <luben_tuikov@adaptec.com>
+#
+# This file is licensed under GPLv2.
+#
+# This program is free software; you can redistribute it and/or
+# modify it under the terms of the GNU General Public License as
+# published by the Free Software Foundation; version 2 of the
+# License.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+# General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307
+# USA
+#
+
+config SCSI_SAS_LIBSAS
+ tristate "SAS Domain Transport Attributes"
+ depends on SCSI
+ select SCSI_SAS_ATTRS
+ help
+ This provides transport specific helpers for SAS drivers which
+ use the domain device construct (like the aic94xxx).
+
+config SCSI_SAS_ATA
+ bool "ATA support for libsas (requires libata)"
+ depends on SCSI_SAS_LIBSAS
+ depends on ATA = y || ATA = SCSI_SAS_LIBSAS
+ help
+ Builds in ATA support into libsas. Will necessitate
+ the loading of libata along with libsas.
+
+config SCSI_SAS_HOST_SMP
+ bool "Support for SMP interpretation for SAS hosts"
+ default y
+ depends on SCSI_SAS_LIBSAS
+ help
+ Allows sas hosts to receive SMP frames. Selecting this
+ option builds an SMP interpreter into libsas. Say
+ N here if you want to save the few kb this consumes.
diff --git a/drivers/scsi/libsas/Makefile b/drivers/scsi/libsas/Makefile
new file mode 100644
index 000000000..2e70140f7
--- /dev/null
+++ b/drivers/scsi/libsas/Makefile
@@ -0,0 +1,35 @@
+#
+# Kernel Makefile for the libsas helpers
+#
+# Copyright (C) 2005 Adaptec, Inc. All rights reserved.
+# Copyright (C) 2005 Luben Tuikov <luben_tuikov@adaptec.com>
+#
+# This file is licensed under GPLv2.
+#
+# This program is free software; you can redistribute it and/or
+# modify it under the terms of the GNU General Public License as
+# published by the Free Software Foundation; version 2 of the
+# License.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+# General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307
+# USA
+
+obj-$(CONFIG_SCSI_SAS_LIBSAS) += libsas.o
+libsas-y += sas_init.o \
+ sas_phy.o \
+ sas_port.o \
+ sas_event.o \
+ sas_dump.o \
+ sas_discover.o \
+ sas_expander.o \
+ sas_scsi_host.o \
+ sas_task.o
+libsas-$(CONFIG_SCSI_SAS_ATA) += sas_ata.o
+libsas-$(CONFIG_SCSI_SAS_HOST_SMP) += sas_host_smp.o
diff --git a/drivers/scsi/libsas/sas_ata.c b/drivers/scsi/libsas/sas_ata.c
new file mode 100644
index 000000000..9c706d8c1
--- /dev/null
+++ b/drivers/scsi/libsas/sas_ata.c
@@ -0,0 +1,865 @@
+/*
+ * Support for SATA devices on Serial Attached SCSI (SAS) controllers
+ *
+ * Copyright (C) 2006 IBM Corporation
+ *
+ * Written by: Darrick J. Wong <djwong@us.ibm.com>, IBM Corporation
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of the
+ * License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307
+ * USA
+ */
+
+#include <linux/scatterlist.h>
+#include <linux/slab.h>
+#include <linux/async.h>
+#include <linux/export.h>
+
+#include <scsi/sas_ata.h>
+#include "sas_internal.h"
+#include <scsi/scsi_host.h>
+#include <scsi/scsi_device.h>
+#include <scsi/scsi_tcq.h>
+#include <scsi/scsi.h>
+#include <scsi/scsi_transport.h>
+#include <scsi/scsi_transport_sas.h>
+#include "../scsi_sas_internal.h"
+#include "../scsi_transport_api.h"
+#include <scsi/scsi_eh.h>
+
+static enum ata_completion_errors sas_to_ata_err(struct task_status_struct *ts)
+{
+ /* Cheesy attempt to translate SAS errors into ATA. Hah! */
+
+ /* transport error */
+ if (ts->resp == SAS_TASK_UNDELIVERED)
+ return AC_ERR_ATA_BUS;
+
+ /* ts->resp == SAS_TASK_COMPLETE */
+ /* task delivered, what happened afterwards? */
+ switch (ts->stat) {
+ case SAS_DEV_NO_RESPONSE:
+ return AC_ERR_TIMEOUT;
+
+ case SAS_INTERRUPTED:
+ case SAS_PHY_DOWN:
+ case SAS_NAK_R_ERR:
+ return AC_ERR_ATA_BUS;
+
+
+ case SAS_DATA_UNDERRUN:
+ /*
+ * Some programs that use the taskfile interface
+ * (smartctl in particular) can cause underrun
+ * problems. Ignore these errors, perhaps at our
+ * peril.
+ */
+ return 0;
+
+ case SAS_DATA_OVERRUN:
+ case SAS_QUEUE_FULL:
+ case SAS_DEVICE_UNKNOWN:
+ case SAS_SG_ERR:
+ return AC_ERR_INVALID;
+
+ case SAS_OPEN_TO:
+ case SAS_OPEN_REJECT:
+ SAS_DPRINTK("%s: Saw error %d. What to do?\n",
+ __func__, ts->stat);
+ return AC_ERR_OTHER;
+
+ case SAM_STAT_CHECK_CONDITION:
+ case SAS_ABORTED_TASK:
+ return AC_ERR_DEV;
+
+ case SAS_PROTO_RESPONSE:
+ /* This means the ending_fis has the error
+ * value; return 0 here to collect it */
+ return 0;
+ default:
+ return 0;
+ }
+}
+
+static void sas_ata_task_done(struct sas_task *task)
+{
+ struct ata_queued_cmd *qc = task->uldd_task;
+ struct domain_device *dev = task->dev;
+ struct task_status_struct *stat = &task->task_status;
+ struct ata_task_resp *resp = (struct ata_task_resp *)stat->buf;
+ struct sas_ha_struct *sas_ha = dev->port->ha;
+ enum ata_completion_errors ac;
+ unsigned long flags;
+ struct ata_link *link;
+ struct ata_port *ap;
+
+ spin_lock_irqsave(&dev->done_lock, flags);
+ if (test_bit(SAS_HA_FROZEN, &sas_ha->state))
+ task = NULL;
+ else if (qc && qc->scsicmd)
+ ASSIGN_SAS_TASK(qc->scsicmd, NULL);
+ spin_unlock_irqrestore(&dev->done_lock, flags);
+
+ /* check if libsas-eh got to the task before us */
+ if (unlikely(!task))
+ return;
+
+ if (!qc)
+ goto qc_already_gone;
+
+ ap = qc->ap;
+ link = &ap->link;
+
+ spin_lock_irqsave(ap->lock, flags);
+ /* check if we lost the race with libata/sas_ata_post_internal() */
+ if (unlikely(ap->pflags & ATA_PFLAG_FROZEN)) {
+ spin_unlock_irqrestore(ap->lock, flags);
+ if (qc->scsicmd)
+ goto qc_already_gone;
+ else {
+ /* if eh is not involved and the port is frozen then the
+ * ata internal abort process has taken responsibility
+ * for this sas_task
+ */
+ return;
+ }
+ }
+
+ if (stat->stat == SAS_PROTO_RESPONSE || stat->stat == SAM_STAT_GOOD ||
+ ((stat->stat == SAM_STAT_CHECK_CONDITION &&
+ dev->sata_dev.class == ATA_DEV_ATAPI))) {
+ memcpy(dev->sata_dev.fis, resp->ending_fis, ATA_RESP_FIS_SIZE);
+
+ if (!link->sactive) {
+ qc->err_mask |= ac_err_mask(dev->sata_dev.fis[2]);
+ } else {
+ link->eh_info.err_mask |= ac_err_mask(dev->sata_dev.fis[2]);
+ if (unlikely(link->eh_info.err_mask))
+ qc->flags |= ATA_QCFLAG_FAILED;
+ }
+ } else {
+ ac = sas_to_ata_err(stat);
+ if (ac) {
+ SAS_DPRINTK("%s: SAS error %x\n", __func__,
+ stat->stat);
+ /* We saw a SAS error. Send a vague error. */
+ if (!link->sactive) {
+ qc->err_mask = ac;
+ } else {
+ link->eh_info.err_mask |= AC_ERR_DEV;
+ qc->flags |= ATA_QCFLAG_FAILED;
+ }
+
+ dev->sata_dev.fis[3] = 0x04; /* status err */
+ dev->sata_dev.fis[2] = ATA_ERR;
+ }
+ }
+
+ qc->lldd_task = NULL;
+ ata_qc_complete(qc);
+ spin_unlock_irqrestore(ap->lock, flags);
+
+qc_already_gone:
+ sas_free_task(task);
+}
+
+static unsigned int sas_ata_qc_issue(struct ata_queued_cmd *qc)
+{
+ unsigned long flags;
+ struct sas_task *task;
+ struct scatterlist *sg;
+ int ret = AC_ERR_SYSTEM;
+ unsigned int si, xfer = 0;
+ struct ata_port *ap = qc->ap;
+ struct domain_device *dev = ap->private_data;
+ struct sas_ha_struct *sas_ha = dev->port->ha;
+ struct Scsi_Host *host = sas_ha->core.shost;
+ struct sas_internal *i = to_sas_internal(host->transportt);
+
+ /* TODO: audit callers to ensure they are ready for qc_issue to
+ * unconditionally re-enable interrupts
+ */
+ local_irq_save(flags);
+ spin_unlock(ap->lock);
+
+ /* If the device fell off, no sense in issuing commands */
+ if (test_bit(SAS_DEV_GONE, &dev->state))
+ goto out;
+
+ task = sas_alloc_task(GFP_ATOMIC);
+ if (!task)
+ goto out;
+ task->dev = dev;
+ task->task_proto = SAS_PROTOCOL_STP;
+ task->task_done = sas_ata_task_done;
+
+ if (qc->tf.command == ATA_CMD_FPDMA_WRITE ||
+ qc->tf.command == ATA_CMD_FPDMA_READ) {
+ /* Need to zero out the tag libata assigned us */
+ qc->tf.nsect = 0;
+ }
+
+ ata_tf_to_fis(&qc->tf, qc->dev->link->pmp, 1, (u8 *)&task->ata_task.fis);
+ task->uldd_task = qc;
+ if (ata_is_atapi(qc->tf.protocol)) {
+ memcpy(task->ata_task.atapi_packet, qc->cdb, qc->dev->cdb_len);
+ task->total_xfer_len = qc->nbytes;
+ task->num_scatter = qc->n_elem;
+ } else {
+ for_each_sg(qc->sg, sg, qc->n_elem, si)
+ xfer += sg->length;
+
+ task->total_xfer_len = xfer;
+ task->num_scatter = si;
+ }
+
+ task->data_dir = qc->dma_dir;
+ task->scatter = qc->sg;
+ task->ata_task.retry_count = 1;
+ task->task_state_flags = SAS_TASK_STATE_PENDING;
+ qc->lldd_task = task;
+
+ switch (qc->tf.protocol) {
+ case ATA_PROT_NCQ:
+ task->ata_task.use_ncq = 1;
+ /* fall through */
+ case ATAPI_PROT_DMA:
+ case ATA_PROT_DMA:
+ task->ata_task.dma_xfer = 1;
+ break;
+ }
+
+ if (qc->scsicmd)
+ ASSIGN_SAS_TASK(qc->scsicmd, task);
+
+ ret = i->dft->lldd_execute_task(task, GFP_ATOMIC);
+ if (ret) {
+ SAS_DPRINTK("lldd_execute_task returned: %d\n", ret);
+
+ if (qc->scsicmd)
+ ASSIGN_SAS_TASK(qc->scsicmd, NULL);
+ sas_free_task(task);
+ ret = AC_ERR_SYSTEM;
+ }
+
+ out:
+ spin_lock(ap->lock);
+ local_irq_restore(flags);
+ return ret;
+}
+
+static bool sas_ata_qc_fill_rtf(struct ata_queued_cmd *qc)
+{
+ struct domain_device *dev = qc->ap->private_data;
+
+ ata_tf_from_fis(dev->sata_dev.fis, &qc->result_tf);
+ return true;
+}
+
+static struct sas_internal *dev_to_sas_internal(struct domain_device *dev)
+{
+ return to_sas_internal(dev->port->ha->core.shost->transportt);
+}
+
+static int sas_get_ata_command_set(struct domain_device *dev);
+
+int sas_get_ata_info(struct domain_device *dev, struct ex_phy *phy)
+{
+ if (phy->attached_tproto & SAS_PROTOCOL_STP)
+ dev->tproto = phy->attached_tproto;
+ if (phy->attached_sata_dev)
+ dev->tproto |= SAS_SATA_DEV;
+
+ if (phy->attached_dev_type == SAS_SATA_PENDING)
+ dev->dev_type = SAS_SATA_PENDING;
+ else {
+ int res;
+
+ dev->dev_type = SAS_SATA_DEV;
+ res = sas_get_report_phy_sata(dev->parent, phy->phy_id,
+ &dev->sata_dev.rps_resp);
+ if (res) {
+ SAS_DPRINTK("report phy sata to %016llx:0x%x returned "
+ "0x%x\n", SAS_ADDR(dev->parent->sas_addr),
+ phy->phy_id, res);
+ return res;
+ }
+ memcpy(dev->frame_rcvd, &dev->sata_dev.rps_resp.rps.fis,
+ sizeof(struct dev_to_host_fis));
+ dev->sata_dev.class = sas_get_ata_command_set(dev);
+ }
+ return 0;
+}
+
+static int sas_ata_clear_pending(struct domain_device *dev, struct ex_phy *phy)
+{
+ int res;
+
+ /* we weren't pending, so successfully end the reset sequence now */
+ if (dev->dev_type != SAS_SATA_PENDING)
+ return 1;
+
+ /* hmmm, if this succeeds do we need to repost the domain_device to the
+ * lldd so it can pick up new parameters?
+ */
+ res = sas_get_ata_info(dev, phy);
+ if (res)
+ return 0; /* retry */
+ else
+ return 1;
+}
+
+static int smp_ata_check_ready(struct ata_link *link)
+{
+ int res;
+ struct ata_port *ap = link->ap;
+ struct domain_device *dev = ap->private_data;
+ struct domain_device *ex_dev = dev->parent;
+ struct sas_phy *phy = sas_get_local_phy(dev);
+ struct ex_phy *ex_phy = &ex_dev->ex_dev.ex_phy[phy->number];
+
+ res = sas_ex_phy_discover(ex_dev, phy->number);
+ sas_put_local_phy(phy);
+
+ /* break the wait early if the expander is unreachable,
+ * otherwise keep polling
+ */
+ if (res == -ECOMM)
+ return res;
+ if (res != SMP_RESP_FUNC_ACC)
+ return 0;
+
+ switch (ex_phy->attached_dev_type) {
+ case SAS_SATA_PENDING:
+ return 0;
+ case SAS_END_DEVICE:
+ if (ex_phy->attached_sata_dev)
+ return sas_ata_clear_pending(dev, ex_phy);
+ default:
+ return -ENODEV;
+ }
+}
+
+static int local_ata_check_ready(struct ata_link *link)
+{
+ struct ata_port *ap = link->ap;
+ struct domain_device *dev = ap->private_data;
+ struct sas_internal *i = dev_to_sas_internal(dev);
+
+ if (i->dft->lldd_ata_check_ready)
+ return i->dft->lldd_ata_check_ready(dev);
+ else {
+ /* lldd's that don't implement 'ready' checking get the
+ * old default behavior of not coordinating reset
+ * recovery with libata
+ */
+ return 1;
+ }
+}
+
+static int sas_ata_printk(const char *level, const struct domain_device *ddev,
+ const char *fmt, ...)
+{
+ struct ata_port *ap = ddev->sata_dev.ap;
+ struct device *dev = &ddev->rphy->dev;
+ struct va_format vaf;
+ va_list args;
+ int r;
+
+ va_start(args, fmt);
+
+ vaf.fmt = fmt;
+ vaf.va = &args;
+
+ r = printk("%ssas: ata%u: %s: %pV",
+ level, ap->print_id, dev_name(dev), &vaf);
+
+ va_end(args);
+
+ return r;
+}
+
+static int sas_ata_hard_reset(struct ata_link *link, unsigned int *class,
+ unsigned long deadline)
+{
+ int ret = 0, res;
+ struct sas_phy *phy;
+ struct ata_port *ap = link->ap;
+ int (*check_ready)(struct ata_link *link);
+ struct domain_device *dev = ap->private_data;
+ struct sas_internal *i = dev_to_sas_internal(dev);
+
+ res = i->dft->lldd_I_T_nexus_reset(dev);
+ if (res == -ENODEV)
+ return res;
+
+ if (res != TMF_RESP_FUNC_COMPLETE)
+ sas_ata_printk(KERN_DEBUG, dev, "Unable to reset ata device?\n");
+
+ phy = sas_get_local_phy(dev);
+ if (scsi_is_sas_phy_local(phy))
+ check_ready = local_ata_check_ready;
+ else
+ check_ready = smp_ata_check_ready;
+ sas_put_local_phy(phy);
+
+ ret = ata_wait_after_reset(link, deadline, check_ready);
+ if (ret && ret != -EAGAIN)
+ sas_ata_printk(KERN_ERR, dev, "reset failed (errno=%d)\n", ret);
+
+ *class = dev->sata_dev.class;
+
+ ap->cbl = ATA_CBL_SATA;
+ return ret;
+}
+
+/*
+ * notify the lldd to forget the sas_task for this internal ata command
+ * that bypasses scsi-eh
+ */
+static void sas_ata_internal_abort(struct sas_task *task)
+{
+ struct sas_internal *si = dev_to_sas_internal(task->dev);
+ unsigned long flags;
+ int res;
+
+ spin_lock_irqsave(&task->task_state_lock, flags);
+ if (task->task_state_flags & SAS_TASK_STATE_ABORTED ||
+ task->task_state_flags & SAS_TASK_STATE_DONE) {
+ spin_unlock_irqrestore(&task->task_state_lock, flags);
+ SAS_DPRINTK("%s: Task %p already finished.\n", __func__,
+ task);
+ goto out;
+ }
+ task->task_state_flags |= SAS_TASK_STATE_ABORTED;
+ spin_unlock_irqrestore(&task->task_state_lock, flags);
+
+ res = si->dft->lldd_abort_task(task);
+
+ spin_lock_irqsave(&task->task_state_lock, flags);
+ if (task->task_state_flags & SAS_TASK_STATE_DONE ||
+ res == TMF_RESP_FUNC_COMPLETE) {
+ spin_unlock_irqrestore(&task->task_state_lock, flags);
+ goto out;
+ }
+
+ /* XXX we are not prepared to deal with ->lldd_abort_task()
+ * failures. TODO: lldds need to unconditionally forget about
+ * aborted ata tasks, otherwise we (likely) leak the sas task
+ * here
+ */
+ SAS_DPRINTK("%s: Task %p leaked.\n", __func__, task);
+
+ if (!(task->task_state_flags & SAS_TASK_STATE_DONE))
+ task->task_state_flags &= ~SAS_TASK_STATE_ABORTED;
+ spin_unlock_irqrestore(&task->task_state_lock, flags);
+
+ return;
+ out:
+ sas_free_task(task);
+}
+
+static void sas_ata_post_internal(struct ata_queued_cmd *qc)
+{
+ if (qc->flags & ATA_QCFLAG_FAILED)
+ qc->err_mask |= AC_ERR_OTHER;
+
+ if (qc->err_mask) {
+ /*
+ * Find the sas_task and kill it. By this point, libata
+ * has decided to kill the qc and has frozen the port.
+ * In this state sas_ata_task_done() will no longer free
+ * the sas_task, so we need to notify the lldd (via
+ * ->lldd_abort_task) that the task is dead and free it
+ * ourselves.
+ */
+ struct sas_task *task = qc->lldd_task;
+
+ qc->lldd_task = NULL;
+ if (!task)
+ return;
+ task->uldd_task = NULL;
+ sas_ata_internal_abort(task);
+ }
+}
+
+
+static void sas_ata_set_dmamode(struct ata_port *ap, struct ata_device *ata_dev)
+{
+ struct domain_device *dev = ap->private_data;
+ struct sas_internal *i = dev_to_sas_internal(dev);
+
+ if (i->dft->lldd_ata_set_dmamode)
+ i->dft->lldd_ata_set_dmamode(dev);
+}
+
+static void sas_ata_sched_eh(struct ata_port *ap)
+{
+ struct domain_device *dev = ap->private_data;
+ struct sas_ha_struct *ha = dev->port->ha;
+ unsigned long flags;
+
+ spin_lock_irqsave(&ha->lock, flags);
+ if (!test_and_set_bit(SAS_DEV_EH_PENDING, &dev->state))
+ ha->eh_active++;
+ ata_std_sched_eh(ap);
+ spin_unlock_irqrestore(&ha->lock, flags);
+}
+
+void sas_ata_end_eh(struct ata_port *ap)
+{
+ struct domain_device *dev = ap->private_data;
+ struct sas_ha_struct *ha = dev->port->ha;
+ unsigned long flags;
+
+ spin_lock_irqsave(&ha->lock, flags);
+ if (test_and_clear_bit(SAS_DEV_EH_PENDING, &dev->state))
+ ha->eh_active--;
+ spin_unlock_irqrestore(&ha->lock, flags);
+}
+
+static struct ata_port_operations sas_sata_ops = {
+ .prereset = ata_std_prereset,
+ .hardreset = sas_ata_hard_reset,
+ .postreset = ata_std_postreset,
+ .error_handler = ata_std_error_handler,
+ .post_internal_cmd = sas_ata_post_internal,
+ .qc_defer = ata_std_qc_defer,
+ .qc_prep = ata_noop_qc_prep,
+ .qc_issue = sas_ata_qc_issue,
+ .qc_fill_rtf = sas_ata_qc_fill_rtf,
+ .port_start = ata_sas_port_start,
+ .port_stop = ata_sas_port_stop,
+ .set_dmamode = sas_ata_set_dmamode,
+ .sched_eh = sas_ata_sched_eh,
+ .end_eh = sas_ata_end_eh,
+};
+
+static struct ata_port_info sata_port_info = {
+ .flags = ATA_FLAG_SATA | ATA_FLAG_PIO_DMA | ATA_FLAG_NCQ |
+ ATA_FLAG_SAS_HOST,
+ .pio_mask = ATA_PIO4,
+ .mwdma_mask = ATA_MWDMA2,
+ .udma_mask = ATA_UDMA6,
+ .port_ops = &sas_sata_ops
+};
+
+int sas_ata_init(struct domain_device *found_dev)
+{
+ struct sas_ha_struct *ha = found_dev->port->ha;
+ struct Scsi_Host *shost = ha->core.shost;
+ struct ata_port *ap;
+ int rc;
+
+ ata_host_init(&found_dev->sata_dev.ata_host, ha->dev, &sas_sata_ops);
+ ap = ata_sas_port_alloc(&found_dev->sata_dev.ata_host,
+ &sata_port_info,
+ shost);
+ if (!ap) {
+ SAS_DPRINTK("ata_sas_port_alloc failed.\n");
+ return -ENODEV;
+ }
+
+ ap->private_data = found_dev;
+ ap->cbl = ATA_CBL_SATA;
+ ap->scsi_host = shost;
+ rc = ata_sas_port_init(ap);
+ if (rc) {
+ ata_sas_port_destroy(ap);
+ return rc;
+ }
+ found_dev->sata_dev.ap = ap;
+
+ return 0;
+}
+
+void sas_ata_task_abort(struct sas_task *task)
+{
+ struct ata_queued_cmd *qc = task->uldd_task;
+ struct completion *waiting;
+
+ /* Bounce SCSI-initiated commands to the SCSI EH */
+ if (qc->scsicmd) {
+ struct request_queue *q = qc->scsicmd->device->request_queue;
+ unsigned long flags;
+
+ spin_lock_irqsave(q->queue_lock, flags);
+ blk_abort_request(qc->scsicmd->request);
+ spin_unlock_irqrestore(q->queue_lock, flags);
+ return;
+ }
+
+ /* Internal command, fake a timeout and complete. */
+ qc->flags &= ~ATA_QCFLAG_ACTIVE;
+ qc->flags |= ATA_QCFLAG_FAILED;
+ qc->err_mask |= AC_ERR_TIMEOUT;
+ waiting = qc->private_data;
+ complete(waiting);
+}
+
+static int sas_get_ata_command_set(struct domain_device *dev)
+{
+ struct dev_to_host_fis *fis =
+ (struct dev_to_host_fis *) dev->frame_rcvd;
+ struct ata_taskfile tf;
+
+ if (dev->dev_type == SAS_SATA_PENDING)
+ return ATA_DEV_UNKNOWN;
+
+ ata_tf_from_fis((const u8 *)fis, &tf);
+
+ return ata_dev_classify(&tf);
+}
+
+void sas_probe_sata(struct asd_sas_port *port)
+{
+ struct domain_device *dev, *n;
+
+ mutex_lock(&port->ha->disco_mutex);
+ list_for_each_entry(dev, &port->disco_list, disco_list_node) {
+ if (!dev_is_sata(dev))
+ continue;
+
+ ata_sas_async_probe(dev->sata_dev.ap);
+ }
+ mutex_unlock(&port->ha->disco_mutex);
+
+ list_for_each_entry_safe(dev, n, &port->disco_list, disco_list_node) {
+ if (!dev_is_sata(dev))
+ continue;
+
+ sas_ata_wait_eh(dev);
+
+ /* if libata could not bring the link up, don't surface
+ * the device
+ */
+ if (ata_dev_disabled(sas_to_ata_dev(dev)))
+ sas_fail_probe(dev, __func__, -ENODEV);
+ }
+
+}
+
+static void sas_ata_flush_pm_eh(struct asd_sas_port *port, const char *func)
+{
+ struct domain_device *dev, *n;
+
+ list_for_each_entry_safe(dev, n, &port->dev_list, dev_list_node) {
+ if (!dev_is_sata(dev))
+ continue;
+
+ sas_ata_wait_eh(dev);
+
+ /* if libata failed to power manage the device, tear it down */
+ if (ata_dev_disabled(sas_to_ata_dev(dev)))
+ sas_fail_probe(dev, func, -ENODEV);
+ }
+}
+
+void sas_suspend_sata(struct asd_sas_port *port)
+{
+ struct domain_device *dev;
+
+ mutex_lock(&port->ha->disco_mutex);
+ list_for_each_entry(dev, &port->dev_list, dev_list_node) {
+ struct sata_device *sata;
+
+ if (!dev_is_sata(dev))
+ continue;
+
+ sata = &dev->sata_dev;
+ if (sata->ap->pm_mesg.event == PM_EVENT_SUSPEND)
+ continue;
+
+ ata_sas_port_suspend(sata->ap);
+ }
+ mutex_unlock(&port->ha->disco_mutex);
+
+ sas_ata_flush_pm_eh(port, __func__);
+}
+
+void sas_resume_sata(struct asd_sas_port *port)
+{
+ struct domain_device *dev;
+
+ mutex_lock(&port->ha->disco_mutex);
+ list_for_each_entry(dev, &port->dev_list, dev_list_node) {
+ struct sata_device *sata;
+
+ if (!dev_is_sata(dev))
+ continue;
+
+ sata = &dev->sata_dev;
+ if (sata->ap->pm_mesg.event == PM_EVENT_ON)
+ continue;
+
+ ata_sas_port_resume(sata->ap);
+ }
+ mutex_unlock(&port->ha->disco_mutex);
+
+ sas_ata_flush_pm_eh(port, __func__);
+}
+
+/**
+ * sas_discover_sata -- discover an STP/SATA domain device
+ * @dev: pointer to struct domain_device of interest
+ *
+ * Devices directly attached to a HA port, have no parents. All other
+ * devices do, and should have their "parent" pointer set appropriately
+ * before calling this function.
+ */
+int sas_discover_sata(struct domain_device *dev)
+{
+ int res;
+
+ if (dev->dev_type == SAS_SATA_PM)
+ return -ENODEV;
+
+ dev->sata_dev.class = sas_get_ata_command_set(dev);
+ sas_fill_in_rphy(dev, dev->rphy);
+
+ res = sas_notify_lldd_dev_found(dev);
+ if (res)
+ return res;
+
+ sas_discover_event(dev->port, DISCE_PROBE);
+ return 0;
+}
+
+static void async_sas_ata_eh(void *data, async_cookie_t cookie)
+{
+ struct domain_device *dev = data;
+ struct ata_port *ap = dev->sata_dev.ap;
+ struct sas_ha_struct *ha = dev->port->ha;
+
+ sas_ata_printk(KERN_DEBUG, dev, "dev error handler\n");
+ ata_scsi_port_error_handler(ha->core.shost, ap);
+ sas_put_device(dev);
+}
+
+void sas_ata_strategy_handler(struct Scsi_Host *shost)
+{
+ struct sas_ha_struct *sas_ha = SHOST_TO_SAS_HA(shost);
+ ASYNC_DOMAIN_EXCLUSIVE(async);
+ int i;
+
+ /* it's ok to defer revalidation events during ata eh, these
+ * disks are in one of three states:
+ * 1/ present for initial domain discovery, and these
+ * resets will cause bcn flutters
+ * 2/ hot removed, we'll discover that after eh fails
+ * 3/ hot added after initial discovery, lost the race, and need
+ * to catch the next train.
+ */
+ sas_disable_revalidation(sas_ha);
+
+ spin_lock_irq(&sas_ha->phy_port_lock);
+ for (i = 0; i < sas_ha->num_phys; i++) {
+ struct asd_sas_port *port = sas_ha->sas_port[i];
+ struct domain_device *dev;
+
+ spin_lock(&port->dev_list_lock);
+ list_for_each_entry(dev, &port->dev_list, dev_list_node) {
+ if (!dev_is_sata(dev))
+ continue;
+
+ /* hold a reference over eh since we may be
+ * racing with final remove once all commands
+ * are completed
+ */
+ kref_get(&dev->kref);
+
+ async_schedule_domain(async_sas_ata_eh, dev, &async);
+ }
+ spin_unlock(&port->dev_list_lock);
+ }
+ spin_unlock_irq(&sas_ha->phy_port_lock);
+
+ async_synchronize_full_domain(&async);
+
+ sas_enable_revalidation(sas_ha);
+}
+
+void sas_ata_eh(struct Scsi_Host *shost, struct list_head *work_q,
+ struct list_head *done_q)
+{
+ struct scsi_cmnd *cmd, *n;
+ struct domain_device *eh_dev;
+
+ do {
+ LIST_HEAD(sata_q);
+ eh_dev = NULL;
+
+ list_for_each_entry_safe(cmd, n, work_q, eh_entry) {
+ struct domain_device *ddev = cmd_to_domain_dev(cmd);
+
+ if (!dev_is_sata(ddev) || TO_SAS_TASK(cmd))
+ continue;
+ if (eh_dev && eh_dev != ddev)
+ continue;
+ eh_dev = ddev;
+ list_move(&cmd->eh_entry, &sata_q);
+ }
+
+ if (!list_empty(&sata_q)) {
+ struct ata_port *ap = eh_dev->sata_dev.ap;
+
+ sas_ata_printk(KERN_DEBUG, eh_dev, "cmd error handler\n");
+ ata_scsi_cmd_error_handler(shost, ap, &sata_q);
+ /*
+ * ata's error handler may leave the cmd on the list
+ * so make sure they don't remain on a stack list
+ * about to go out of scope.
+ *
+ * This looks strange, since the commands are
+ * now part of no list, but the next error
+ * action will be ata_port_error_handler()
+ * which takes no list and sweeps them up
+ * anyway from the ata tag array.
+ */
+ while (!list_empty(&sata_q))
+ list_del_init(sata_q.next);
+ }
+ } while (eh_dev);
+}
+
+void sas_ata_schedule_reset(struct domain_device *dev)
+{
+ struct ata_eh_info *ehi;
+ struct ata_port *ap;
+ unsigned long flags;
+
+ if (!dev_is_sata(dev))
+ return;
+
+ ap = dev->sata_dev.ap;
+ ehi = &ap->link.eh_info;
+
+ spin_lock_irqsave(ap->lock, flags);
+ ehi->err_mask |= AC_ERR_TIMEOUT;
+ ehi->action |= ATA_EH_RESET;
+ ata_port_schedule_eh(ap);
+ spin_unlock_irqrestore(ap->lock, flags);
+}
+EXPORT_SYMBOL_GPL(sas_ata_schedule_reset);
+
+void sas_ata_wait_eh(struct domain_device *dev)
+{
+ struct ata_port *ap;
+
+ if (!dev_is_sata(dev))
+ return;
+
+ ap = dev->sata_dev.ap;
+ ata_port_wait_eh(ap);
+}
diff --git a/drivers/scsi/libsas/sas_discover.c b/drivers/scsi/libsas/sas_discover.c
new file mode 100644
index 000000000..60de66252
--- /dev/null
+++ b/drivers/scsi/libsas/sas_discover.c
@@ -0,0 +1,592 @@
+/*
+ * Serial Attached SCSI (SAS) Discover process
+ *
+ * Copyright (C) 2005 Adaptec, Inc. All rights reserved.
+ * Copyright (C) 2005 Luben Tuikov <luben_tuikov@adaptec.com>
+ *
+ * This file is licensed under GPLv2.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of the
+ * License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
+ *
+ */
+
+#include <linux/scatterlist.h>
+#include <linux/slab.h>
+#include <linux/async.h>
+#include <scsi/scsi_host.h>
+#include <scsi/scsi_eh.h>
+#include "sas_internal.h"
+
+#include <scsi/scsi_transport.h>
+#include <scsi/scsi_transport_sas.h>
+#include <scsi/sas_ata.h>
+#include "../scsi_sas_internal.h"
+
+/* ---------- Basic task processing for discovery purposes ---------- */
+
+void sas_init_dev(struct domain_device *dev)
+{
+ switch (dev->dev_type) {
+ case SAS_END_DEVICE:
+ INIT_LIST_HEAD(&dev->ssp_dev.eh_list_node);
+ break;
+ case SAS_EDGE_EXPANDER_DEVICE:
+ case SAS_FANOUT_EXPANDER_DEVICE:
+ INIT_LIST_HEAD(&dev->ex_dev.children);
+ mutex_init(&dev->ex_dev.cmd_mutex);
+ break;
+ default:
+ break;
+ }
+}
+
+/* ---------- Domain device discovery ---------- */
+
+/**
+ * sas_get_port_device -- Discover devices which caused port creation
+ * @port: pointer to struct sas_port of interest
+ *
+ * Devices directly attached to a HA port, have no parent. This is
+ * how we know they are (domain) "root" devices. All other devices
+ * do, and should have their "parent" pointer set appropriately as
+ * soon as a child device is discovered.
+ */
+static int sas_get_port_device(struct asd_sas_port *port)
+{
+ struct asd_sas_phy *phy;
+ struct sas_rphy *rphy;
+ struct domain_device *dev;
+ int rc = -ENODEV;
+
+ dev = sas_alloc_device();
+ if (!dev)
+ return -ENOMEM;
+
+ spin_lock_irq(&port->phy_list_lock);
+ if (list_empty(&port->phy_list)) {
+ spin_unlock_irq(&port->phy_list_lock);
+ sas_put_device(dev);
+ return -ENODEV;
+ }
+ phy = container_of(port->phy_list.next, struct asd_sas_phy, port_phy_el);
+ spin_lock(&phy->frame_rcvd_lock);
+ memcpy(dev->frame_rcvd, phy->frame_rcvd, min(sizeof(dev->frame_rcvd),
+ (size_t)phy->frame_rcvd_size));
+ spin_unlock(&phy->frame_rcvd_lock);
+ spin_unlock_irq(&port->phy_list_lock);
+
+ if (dev->frame_rcvd[0] == 0x34 && port->oob_mode == SATA_OOB_MODE) {
+ struct dev_to_host_fis *fis =
+ (struct dev_to_host_fis *) dev->frame_rcvd;
+ if (fis->interrupt_reason == 1 && fis->lbal == 1 &&
+ fis->byte_count_low==0x69 && fis->byte_count_high == 0x96
+ && (fis->device & ~0x10) == 0)
+ dev->dev_type = SAS_SATA_PM;
+ else
+ dev->dev_type = SAS_SATA_DEV;
+ dev->tproto = SAS_PROTOCOL_SATA;
+ } else {
+ struct sas_identify_frame *id =
+ (struct sas_identify_frame *) dev->frame_rcvd;
+ dev->dev_type = id->dev_type;
+ dev->iproto = id->initiator_bits;
+ dev->tproto = id->target_bits;
+ }
+
+ sas_init_dev(dev);
+
+ dev->port = port;
+ switch (dev->dev_type) {
+ case SAS_SATA_DEV:
+ rc = sas_ata_init(dev);
+ if (rc) {
+ rphy = NULL;
+ break;
+ }
+ /* fall through */
+ case SAS_END_DEVICE:
+ rphy = sas_end_device_alloc(port->port);
+ break;
+ case SAS_EDGE_EXPANDER_DEVICE:
+ rphy = sas_expander_alloc(port->port,
+ SAS_EDGE_EXPANDER_DEVICE);
+ break;
+ case SAS_FANOUT_EXPANDER_DEVICE:
+ rphy = sas_expander_alloc(port->port,
+ SAS_FANOUT_EXPANDER_DEVICE);
+ break;
+ default:
+ printk("ERROR: Unidentified device type %d\n", dev->dev_type);
+ rphy = NULL;
+ break;
+ }
+
+ if (!rphy) {
+ sas_put_device(dev);
+ return rc;
+ }
+
+ rphy->identify.phy_identifier = phy->phy->identify.phy_identifier;
+ memcpy(dev->sas_addr, port->attached_sas_addr, SAS_ADDR_SIZE);
+ sas_fill_in_rphy(dev, rphy);
+ sas_hash_addr(dev->hashed_sas_addr, dev->sas_addr);
+ port->port_dev = dev;
+ dev->linkrate = port->linkrate;
+ dev->min_linkrate = port->linkrate;
+ dev->max_linkrate = port->linkrate;
+ dev->pathways = port->num_phys;
+ memset(port->disc.fanout_sas_addr, 0, SAS_ADDR_SIZE);
+ memset(port->disc.eeds_a, 0, SAS_ADDR_SIZE);
+ memset(port->disc.eeds_b, 0, SAS_ADDR_SIZE);
+ port->disc.max_level = 0;
+ sas_device_set_phy(dev, port->port);
+
+ dev->rphy = rphy;
+ get_device(&dev->rphy->dev);
+
+ if (dev_is_sata(dev) || dev->dev_type == SAS_END_DEVICE)
+ list_add_tail(&dev->disco_list_node, &port->disco_list);
+ else {
+ spin_lock_irq(&port->dev_list_lock);
+ list_add_tail(&dev->dev_list_node, &port->dev_list);
+ spin_unlock_irq(&port->dev_list_lock);
+ }
+
+ spin_lock_irq(&port->phy_list_lock);
+ list_for_each_entry(phy, &port->phy_list, port_phy_el)
+ sas_phy_set_target(phy, dev);
+ spin_unlock_irq(&port->phy_list_lock);
+
+ return 0;
+}
+
+/* ---------- Discover and Revalidate ---------- */
+
+int sas_notify_lldd_dev_found(struct domain_device *dev)
+{
+ int res = 0;
+ struct sas_ha_struct *sas_ha = dev->port->ha;
+ struct Scsi_Host *shost = sas_ha->core.shost;
+ struct sas_internal *i = to_sas_internal(shost->transportt);
+
+ if (!i->dft->lldd_dev_found)
+ return 0;
+
+ res = i->dft->lldd_dev_found(dev);
+ if (res) {
+ printk("sas: driver on pcidev %s cannot handle "
+ "device %llx, error:%d\n",
+ dev_name(sas_ha->dev),
+ SAS_ADDR(dev->sas_addr), res);
+ }
+ set_bit(SAS_DEV_FOUND, &dev->state);
+ kref_get(&dev->kref);
+ return res;
+}
+
+
+void sas_notify_lldd_dev_gone(struct domain_device *dev)
+{
+ struct sas_ha_struct *sas_ha = dev->port->ha;
+ struct Scsi_Host *shost = sas_ha->core.shost;
+ struct sas_internal *i = to_sas_internal(shost->transportt);
+
+ if (!i->dft->lldd_dev_gone)
+ return;
+
+ if (test_and_clear_bit(SAS_DEV_FOUND, &dev->state)) {
+ i->dft->lldd_dev_gone(dev);
+ sas_put_device(dev);
+ }
+}
+
+static void sas_probe_devices(struct work_struct *work)
+{
+ struct domain_device *dev, *n;
+ struct sas_discovery_event *ev = to_sas_discovery_event(work);
+ struct asd_sas_port *port = ev->port;
+
+ clear_bit(DISCE_PROBE, &port->disc.pending);
+
+ /* devices must be domain members before link recovery and probe */
+ list_for_each_entry(dev, &port->disco_list, disco_list_node) {
+ spin_lock_irq(&port->dev_list_lock);
+ list_add_tail(&dev->dev_list_node, &port->dev_list);
+ spin_unlock_irq(&port->dev_list_lock);
+ }
+
+ sas_probe_sata(port);
+
+ list_for_each_entry_safe(dev, n, &port->disco_list, disco_list_node) {
+ int err;
+
+ err = sas_rphy_add(dev->rphy);
+ if (err)
+ sas_fail_probe(dev, __func__, err);
+ else
+ list_del_init(&dev->disco_list_node);
+ }
+}
+
+static void sas_suspend_devices(struct work_struct *work)
+{
+ struct asd_sas_phy *phy;
+ struct domain_device *dev;
+ struct sas_discovery_event *ev = to_sas_discovery_event(work);
+ struct asd_sas_port *port = ev->port;
+ struct Scsi_Host *shost = port->ha->core.shost;
+ struct sas_internal *si = to_sas_internal(shost->transportt);
+
+ clear_bit(DISCE_SUSPEND, &port->disc.pending);
+
+ sas_suspend_sata(port);
+
+ /* lldd is free to forget the domain_device across the
+ * suspension, we force the issue here to keep the reference
+ * counts aligned
+ */
+ list_for_each_entry(dev, &port->dev_list, dev_list_node)
+ sas_notify_lldd_dev_gone(dev);
+
+ /* we are suspending, so we know events are disabled and
+ * phy_list is not being mutated
+ */
+ list_for_each_entry(phy, &port->phy_list, port_phy_el) {
+ if (si->dft->lldd_port_formed)
+ si->dft->lldd_port_deformed(phy);
+ phy->suspended = 1;
+ port->suspended = 1;
+ }
+}
+
+static void sas_resume_devices(struct work_struct *work)
+{
+ struct sas_discovery_event *ev = to_sas_discovery_event(work);
+ struct asd_sas_port *port = ev->port;
+
+ clear_bit(DISCE_RESUME, &port->disc.pending);
+
+ sas_resume_sata(port);
+}
+
+/**
+ * sas_discover_end_dev -- discover an end device (SSP, etc)
+ * @end: pointer to domain device of interest
+ *
+ * See comment in sas_discover_sata().
+ */
+int sas_discover_end_dev(struct domain_device *dev)
+{
+ int res;
+
+ res = sas_notify_lldd_dev_found(dev);
+ if (res)
+ return res;
+ sas_discover_event(dev->port, DISCE_PROBE);
+
+ return 0;
+}
+
+/* ---------- Device registration and unregistration ---------- */
+
+void sas_free_device(struct kref *kref)
+{
+ struct domain_device *dev = container_of(kref, typeof(*dev), kref);
+
+ put_device(&dev->rphy->dev);
+ dev->rphy = NULL;
+
+ if (dev->parent)
+ sas_put_device(dev->parent);
+
+ sas_port_put_phy(dev->phy);
+ dev->phy = NULL;
+
+ /* remove the phys and ports, everything else should be gone */
+ if (dev->dev_type == SAS_EDGE_EXPANDER_DEVICE || dev->dev_type == SAS_FANOUT_EXPANDER_DEVICE)
+ kfree(dev->ex_dev.ex_phy);
+
+ if (dev_is_sata(dev) && dev->sata_dev.ap) {
+ ata_sas_port_destroy(dev->sata_dev.ap);
+ dev->sata_dev.ap = NULL;
+ }
+
+ kfree(dev);
+}
+
+static void sas_unregister_common_dev(struct asd_sas_port *port, struct domain_device *dev)
+{
+ struct sas_ha_struct *ha = port->ha;
+
+ sas_notify_lldd_dev_gone(dev);
+ if (!dev->parent)
+ dev->port->port_dev = NULL;
+ else
+ list_del_init(&dev->siblings);
+
+ spin_lock_irq(&port->dev_list_lock);
+ list_del_init(&dev->dev_list_node);
+ if (dev_is_sata(dev))
+ sas_ata_end_eh(dev->sata_dev.ap);
+ spin_unlock_irq(&port->dev_list_lock);
+
+ spin_lock_irq(&ha->lock);
+ if (dev->dev_type == SAS_END_DEVICE &&
+ !list_empty(&dev->ssp_dev.eh_list_node)) {
+ list_del_init(&dev->ssp_dev.eh_list_node);
+ ha->eh_active--;
+ }
+ spin_unlock_irq(&ha->lock);
+
+ sas_put_device(dev);
+}
+
+static void sas_destruct_devices(struct work_struct *work)
+{
+ struct domain_device *dev, *n;
+ struct sas_discovery_event *ev = to_sas_discovery_event(work);
+ struct asd_sas_port *port = ev->port;
+
+ clear_bit(DISCE_DESTRUCT, &port->disc.pending);
+
+ list_for_each_entry_safe(dev, n, &port->destroy_list, disco_list_node) {
+ list_del_init(&dev->disco_list_node);
+
+ sas_remove_children(&dev->rphy->dev);
+ sas_rphy_delete(dev->rphy);
+ sas_unregister_common_dev(port, dev);
+ }
+}
+
+void sas_unregister_dev(struct asd_sas_port *port, struct domain_device *dev)
+{
+ if (!test_bit(SAS_DEV_DESTROY, &dev->state) &&
+ !list_empty(&dev->disco_list_node)) {
+ /* this rphy never saw sas_rphy_add */
+ list_del_init(&dev->disco_list_node);
+ sas_rphy_free(dev->rphy);
+ sas_unregister_common_dev(port, dev);
+ return;
+ }
+
+ if (!test_and_set_bit(SAS_DEV_DESTROY, &dev->state)) {
+ sas_rphy_unlink(dev->rphy);
+ list_move_tail(&dev->disco_list_node, &port->destroy_list);
+ sas_discover_event(dev->port, DISCE_DESTRUCT);
+ }
+}
+
+void sas_unregister_domain_devices(struct asd_sas_port *port, int gone)
+{
+ struct domain_device *dev, *n;
+
+ list_for_each_entry_safe_reverse(dev, n, &port->dev_list, dev_list_node) {
+ if (gone)
+ set_bit(SAS_DEV_GONE, &dev->state);
+ sas_unregister_dev(port, dev);
+ }
+
+ list_for_each_entry_safe(dev, n, &port->disco_list, disco_list_node)
+ sas_unregister_dev(port, dev);
+
+ port->port->rphy = NULL;
+
+}
+
+void sas_device_set_phy(struct domain_device *dev, struct sas_port *port)
+{
+ struct sas_ha_struct *ha;
+ struct sas_phy *new_phy;
+
+ if (!dev)
+ return;
+
+ ha = dev->port->ha;
+ new_phy = sas_port_get_phy(port);
+
+ /* pin and record last seen phy */
+ spin_lock_irq(&ha->phy_port_lock);
+ if (new_phy) {
+ sas_port_put_phy(dev->phy);
+ dev->phy = new_phy;
+ }
+ spin_unlock_irq(&ha->phy_port_lock);
+}
+
+/* ---------- Discovery and Revalidation ---------- */
+
+/**
+ * sas_discover_domain -- discover the domain
+ * @port: port to the domain of interest
+ *
+ * NOTE: this process _must_ quit (return) as soon as any connection
+ * errors are encountered. Connection recovery is done elsewhere.
+ * Discover process only interrogates devices in order to discover the
+ * domain.
+ */
+static void sas_discover_domain(struct work_struct *work)
+{
+ struct domain_device *dev;
+ int error = 0;
+ struct sas_discovery_event *ev = to_sas_discovery_event(work);
+ struct asd_sas_port *port = ev->port;
+
+ clear_bit(DISCE_DISCOVER_DOMAIN, &port->disc.pending);
+
+ if (port->port_dev)
+ return;
+
+ error = sas_get_port_device(port);
+ if (error)
+ return;
+ dev = port->port_dev;
+
+ SAS_DPRINTK("DOING DISCOVERY on port %d, pid:%d\n", port->id,
+ task_pid_nr(current));
+
+ switch (dev->dev_type) {
+ case SAS_END_DEVICE:
+ error = sas_discover_end_dev(dev);
+ break;
+ case SAS_EDGE_EXPANDER_DEVICE:
+ case SAS_FANOUT_EXPANDER_DEVICE:
+ error = sas_discover_root_expander(dev);
+ break;
+ case SAS_SATA_DEV:
+ case SAS_SATA_PM:
+#ifdef CONFIG_SCSI_SAS_ATA
+ error = sas_discover_sata(dev);
+ break;
+#else
+ SAS_DPRINTK("ATA device seen but CONFIG_SCSI_SAS_ATA=N so cannot attach\n");
+ /* Fall through */
+#endif
+ default:
+ error = -ENXIO;
+ SAS_DPRINTK("unhandled device %d\n", dev->dev_type);
+ break;
+ }
+
+ if (error) {
+ sas_rphy_free(dev->rphy);
+ list_del_init(&dev->disco_list_node);
+ spin_lock_irq(&port->dev_list_lock);
+ list_del_init(&dev->dev_list_node);
+ spin_unlock_irq(&port->dev_list_lock);
+
+ sas_put_device(dev);
+ port->port_dev = NULL;
+ }
+
+ SAS_DPRINTK("DONE DISCOVERY on port %d, pid:%d, result:%d\n", port->id,
+ task_pid_nr(current), error);
+}
+
+static void sas_revalidate_domain(struct work_struct *work)
+{
+ int res = 0;
+ struct sas_discovery_event *ev = to_sas_discovery_event(work);
+ struct asd_sas_port *port = ev->port;
+ struct sas_ha_struct *ha = port->ha;
+ struct domain_device *ddev = port->port_dev;
+
+ /* prevent revalidation from finding sata links in recovery */
+ mutex_lock(&ha->disco_mutex);
+ if (test_bit(SAS_HA_ATA_EH_ACTIVE, &ha->state)) {
+ SAS_DPRINTK("REVALIDATION DEFERRED on port %d, pid:%d\n",
+ port->id, task_pid_nr(current));
+ goto out;
+ }
+
+ clear_bit(DISCE_REVALIDATE_DOMAIN, &port->disc.pending);
+
+ SAS_DPRINTK("REVALIDATING DOMAIN on port %d, pid:%d\n", port->id,
+ task_pid_nr(current));
+
+ if (ddev && (ddev->dev_type == SAS_FANOUT_EXPANDER_DEVICE ||
+ ddev->dev_type == SAS_EDGE_EXPANDER_DEVICE))
+ res = sas_ex_revalidate_domain(ddev);
+
+ SAS_DPRINTK("done REVALIDATING DOMAIN on port %d, pid:%d, res 0x%x\n",
+ port->id, task_pid_nr(current), res);
+ out:
+ mutex_unlock(&ha->disco_mutex);
+}
+
+/* ---------- Events ---------- */
+
+static void sas_chain_work(struct sas_ha_struct *ha, struct sas_work *sw)
+{
+ /* chained work is not subject to SA_HA_DRAINING or
+ * SAS_HA_REGISTERED, because it is either submitted in the
+ * workqueue, or known to be submitted from a context that is
+ * not racing against draining
+ */
+ scsi_queue_work(ha->core.shost, &sw->work);
+}
+
+static void sas_chain_event(int event, unsigned long *pending,
+ struct sas_work *sw,
+ struct sas_ha_struct *ha)
+{
+ if (!test_and_set_bit(event, pending)) {
+ unsigned long flags;
+
+ spin_lock_irqsave(&ha->lock, flags);
+ sas_chain_work(ha, sw);
+ spin_unlock_irqrestore(&ha->lock, flags);
+ }
+}
+
+int sas_discover_event(struct asd_sas_port *port, enum discover_event ev)
+{
+ struct sas_discovery *disc;
+
+ if (!port)
+ return 0;
+ disc = &port->disc;
+
+ BUG_ON(ev >= DISC_NUM_EVENTS);
+
+ sas_chain_event(ev, &disc->pending, &disc->disc_work[ev].work, port->ha);
+
+ return 0;
+}
+
+/**
+ * sas_init_disc -- initialize the discovery struct in the port
+ * @port: pointer to struct port
+ *
+ * Called when the ports are being initialized.
+ */
+void sas_init_disc(struct sas_discovery *disc, struct asd_sas_port *port)
+{
+ int i;
+
+ static const work_func_t sas_event_fns[DISC_NUM_EVENTS] = {
+ [DISCE_DISCOVER_DOMAIN] = sas_discover_domain,
+ [DISCE_REVALIDATE_DOMAIN] = sas_revalidate_domain,
+ [DISCE_PROBE] = sas_probe_devices,
+ [DISCE_SUSPEND] = sas_suspend_devices,
+ [DISCE_RESUME] = sas_resume_devices,
+ [DISCE_DESTRUCT] = sas_destruct_devices,
+ };
+
+ disc->pending = 0;
+ for (i = 0; i < DISC_NUM_EVENTS; i++) {
+ INIT_SAS_WORK(&disc->disc_work[i].work, sas_event_fns[i]);
+ disc->disc_work[i].port = port;
+ }
+}
diff --git a/drivers/scsi/libsas/sas_dump.c b/drivers/scsi/libsas/sas_dump.c
new file mode 100644
index 000000000..cd6f99c1a
--- /dev/null
+++ b/drivers/scsi/libsas/sas_dump.c
@@ -0,0 +1,73 @@
+/*
+ * Serial Attached SCSI (SAS) Dump/Debugging routines
+ *
+ * Copyright (C) 2005 Adaptec, Inc. All rights reserved.
+ * Copyright (C) 2005 Luben Tuikov <luben_tuikov@adaptec.com>
+ *
+ * This file is licensed under GPLv2.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of the
+ * License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
+ *
+ */
+
+#include "sas_dump.h"
+
+static const char *sas_hae_str[] = {
+ [0] = "HAE_RESET",
+};
+
+static const char *sas_porte_str[] = {
+ [0] = "PORTE_BYTES_DMAED",
+ [1] = "PORTE_BROADCAST_RCVD",
+ [2] = "PORTE_LINK_RESET_ERR",
+ [3] = "PORTE_TIMER_EVENT",
+ [4] = "PORTE_HARD_RESET",
+};
+
+static const char *sas_phye_str[] = {
+ [0] = "PHYE_LOSS_OF_SIGNAL",
+ [1] = "PHYE_OOB_DONE",
+ [2] = "PHYE_OOB_ERROR",
+ [3] = "PHYE_SPINUP_HOLD",
+ [4] = "PHYE_RESUME_TIMEOUT",
+};
+
+void sas_dprint_porte(int phyid, enum port_event pe)
+{
+ SAS_DPRINTK("phy%d: port event: %s\n", phyid, sas_porte_str[pe]);
+}
+void sas_dprint_phye(int phyid, enum phy_event pe)
+{
+ SAS_DPRINTK("phy%d: phy event: %s\n", phyid, sas_phye_str[pe]);
+}
+
+void sas_dprint_hae(struct sas_ha_struct *sas_ha, enum ha_event he)
+{
+ SAS_DPRINTK("ha %s: %s event\n", dev_name(sas_ha->dev),
+ sas_hae_str[he]);
+}
+
+void sas_dump_port(struct asd_sas_port *port)
+{
+ SAS_DPRINTK("port%d: class:0x%x\n", port->id, port->class);
+ SAS_DPRINTK("port%d: sas_addr:%llx\n", port->id,
+ SAS_ADDR(port->sas_addr));
+ SAS_DPRINTK("port%d: attached_sas_addr:%llx\n", port->id,
+ SAS_ADDR(port->attached_sas_addr));
+ SAS_DPRINTK("port%d: iproto:0x%x\n", port->id, port->iproto);
+ SAS_DPRINTK("port%d: tproto:0x%x\n", port->id, port->tproto);
+ SAS_DPRINTK("port%d: oob_mode:0x%x\n", port->id, port->oob_mode);
+ SAS_DPRINTK("port%d: num_phys:%d\n", port->id, port->num_phys);
+}
diff --git a/drivers/scsi/libsas/sas_dump.h b/drivers/scsi/libsas/sas_dump.h
new file mode 100644
index 000000000..800e4c690
--- /dev/null
+++ b/drivers/scsi/libsas/sas_dump.h
@@ -0,0 +1,30 @@
+/*
+ * Serial Attached SCSI (SAS) Dump/Debugging routines header file
+ *
+ * Copyright (C) 2005 Adaptec, Inc. All rights reserved.
+ * Copyright (C) 2005 Luben Tuikov <luben_tuikov@adaptec.com>
+ *
+ * This file is licensed under GPLv2.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of the
+ * License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
+ *
+ */
+
+#include "sas_internal.h"
+
+void sas_dprint_porte(int phyid, enum port_event pe);
+void sas_dprint_phye(int phyid, enum phy_event pe);
+void sas_dprint_hae(struct sas_ha_struct *sas_ha, enum ha_event he);
+void sas_dump_port(struct asd_sas_port *port);
diff --git a/drivers/scsi/libsas/sas_event.c b/drivers/scsi/libsas/sas_event.c
new file mode 100644
index 000000000..aadbd5314
--- /dev/null
+++ b/drivers/scsi/libsas/sas_event.c
@@ -0,0 +1,165 @@
+/*
+ * Serial Attached SCSI (SAS) Event processing
+ *
+ * Copyright (C) 2005 Adaptec, Inc. All rights reserved.
+ * Copyright (C) 2005 Luben Tuikov <luben_tuikov@adaptec.com>
+ *
+ * This file is licensed under GPLv2.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of the
+ * License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
+ *
+ */
+
+#include <linux/export.h>
+#include <scsi/scsi_host.h>
+#include "sas_internal.h"
+#include "sas_dump.h"
+
+void sas_queue_work(struct sas_ha_struct *ha, struct sas_work *sw)
+{
+ if (!test_bit(SAS_HA_REGISTERED, &ha->state))
+ return;
+
+ if (test_bit(SAS_HA_DRAINING, &ha->state)) {
+ /* add it to the defer list, if not already pending */
+ if (list_empty(&sw->drain_node))
+ list_add(&sw->drain_node, &ha->defer_q);
+ } else
+ scsi_queue_work(ha->core.shost, &sw->work);
+}
+
+static void sas_queue_event(int event, unsigned long *pending,
+ struct sas_work *work,
+ struct sas_ha_struct *ha)
+{
+ if (!test_and_set_bit(event, pending)) {
+ unsigned long flags;
+
+ spin_lock_irqsave(&ha->lock, flags);
+ sas_queue_work(ha, work);
+ spin_unlock_irqrestore(&ha->lock, flags);
+ }
+}
+
+
+void __sas_drain_work(struct sas_ha_struct *ha)
+{
+ struct workqueue_struct *wq = ha->core.shost->work_q;
+ struct sas_work *sw, *_sw;
+
+ set_bit(SAS_HA_DRAINING, &ha->state);
+ /* flush submitters */
+ spin_lock_irq(&ha->lock);
+ spin_unlock_irq(&ha->lock);
+
+ drain_workqueue(wq);
+
+ spin_lock_irq(&ha->lock);
+ clear_bit(SAS_HA_DRAINING, &ha->state);
+ list_for_each_entry_safe(sw, _sw, &ha->defer_q, drain_node) {
+ list_del_init(&sw->drain_node);
+ sas_queue_work(ha, sw);
+ }
+ spin_unlock_irq(&ha->lock);
+}
+
+int sas_drain_work(struct sas_ha_struct *ha)
+{
+ int err;
+
+ err = mutex_lock_interruptible(&ha->drain_mutex);
+ if (err)
+ return err;
+ if (test_bit(SAS_HA_REGISTERED, &ha->state))
+ __sas_drain_work(ha);
+ mutex_unlock(&ha->drain_mutex);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(sas_drain_work);
+
+void sas_disable_revalidation(struct sas_ha_struct *ha)
+{
+ mutex_lock(&ha->disco_mutex);
+ set_bit(SAS_HA_ATA_EH_ACTIVE, &ha->state);
+ mutex_unlock(&ha->disco_mutex);
+}
+
+void sas_enable_revalidation(struct sas_ha_struct *ha)
+{
+ int i;
+
+ mutex_lock(&ha->disco_mutex);
+ clear_bit(SAS_HA_ATA_EH_ACTIVE, &ha->state);
+ for (i = 0; i < ha->num_phys; i++) {
+ struct asd_sas_port *port = ha->sas_port[i];
+ const int ev = DISCE_REVALIDATE_DOMAIN;
+ struct sas_discovery *d = &port->disc;
+
+ if (!test_and_clear_bit(ev, &d->pending))
+ continue;
+
+ sas_queue_event(ev, &d->pending, &d->disc_work[ev].work, ha);
+ }
+ mutex_unlock(&ha->disco_mutex);
+}
+
+static void notify_ha_event(struct sas_ha_struct *sas_ha, enum ha_event event)
+{
+ BUG_ON(event >= HA_NUM_EVENTS);
+
+ sas_queue_event(event, &sas_ha->pending,
+ &sas_ha->ha_events[event].work, sas_ha);
+}
+
+static void notify_port_event(struct asd_sas_phy *phy, enum port_event event)
+{
+ struct sas_ha_struct *ha = phy->ha;
+
+ BUG_ON(event >= PORT_NUM_EVENTS);
+
+ sas_queue_event(event, &phy->port_events_pending,
+ &phy->port_events[event].work, ha);
+}
+
+void sas_notify_phy_event(struct asd_sas_phy *phy, enum phy_event event)
+{
+ struct sas_ha_struct *ha = phy->ha;
+
+ BUG_ON(event >= PHY_NUM_EVENTS);
+
+ sas_queue_event(event, &phy->phy_events_pending,
+ &phy->phy_events[event].work, ha);
+}
+
+int sas_init_events(struct sas_ha_struct *sas_ha)
+{
+ static const work_func_t sas_ha_event_fns[HA_NUM_EVENTS] = {
+ [HAE_RESET] = sas_hae_reset,
+ };
+
+ int i;
+
+ for (i = 0; i < HA_NUM_EVENTS; i++) {
+ INIT_SAS_WORK(&sas_ha->ha_events[i].work, sas_ha_event_fns[i]);
+ sas_ha->ha_events[i].ha = sas_ha;
+ }
+
+ sas_ha->notify_ha_event = notify_ha_event;
+ sas_ha->notify_port_event = notify_port_event;
+ sas_ha->notify_phy_event = sas_notify_phy_event;
+
+ return 0;
+}
diff --git a/drivers/scsi/libsas/sas_expander.c b/drivers/scsi/libsas/sas_expander.c
new file mode 100644
index 000000000..022bb6e10
--- /dev/null
+++ b/drivers/scsi/libsas/sas_expander.c
@@ -0,0 +1,2186 @@
+/*
+ * Serial Attached SCSI (SAS) Expander discovery and configuration
+ *
+ * Copyright (C) 2005 Adaptec, Inc. All rights reserved.
+ * Copyright (C) 2005 Luben Tuikov <luben_tuikov@adaptec.com>
+ *
+ * This file is licensed under GPLv2.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of the
+ * License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
+ *
+ */
+
+#include <linux/scatterlist.h>
+#include <linux/blkdev.h>
+#include <linux/slab.h>
+
+#include "sas_internal.h"
+
+#include <scsi/sas_ata.h>
+#include <scsi/scsi_transport.h>
+#include <scsi/scsi_transport_sas.h>
+#include "../scsi_sas_internal.h"
+
+static int sas_discover_expander(struct domain_device *dev);
+static int sas_configure_routing(struct domain_device *dev, u8 *sas_addr);
+static int sas_configure_phy(struct domain_device *dev, int phy_id,
+ u8 *sas_addr, int include);
+static int sas_disable_routing(struct domain_device *dev, u8 *sas_addr);
+
+/* ---------- SMP task management ---------- */
+
+static void smp_task_timedout(unsigned long _task)
+{
+ struct sas_task *task = (void *) _task;
+ unsigned long flags;
+
+ spin_lock_irqsave(&task->task_state_lock, flags);
+ if (!(task->task_state_flags & SAS_TASK_STATE_DONE))
+ task->task_state_flags |= SAS_TASK_STATE_ABORTED;
+ spin_unlock_irqrestore(&task->task_state_lock, flags);
+
+ complete(&task->slow_task->completion);
+}
+
+static void smp_task_done(struct sas_task *task)
+{
+ if (!del_timer(&task->slow_task->timer))
+ return;
+ complete(&task->slow_task->completion);
+}
+
+/* Give it some long enough timeout. In seconds. */
+#define SMP_TIMEOUT 10
+
+static int smp_execute_task(struct domain_device *dev, void *req, int req_size,
+ void *resp, int resp_size)
+{
+ int res, retry;
+ struct sas_task *task = NULL;
+ struct sas_internal *i =
+ to_sas_internal(dev->port->ha->core.shost->transportt);
+
+ mutex_lock(&dev->ex_dev.cmd_mutex);
+ for (retry = 0; retry < 3; retry++) {
+ if (test_bit(SAS_DEV_GONE, &dev->state)) {
+ res = -ECOMM;
+ break;
+ }
+
+ task = sas_alloc_slow_task(GFP_KERNEL);
+ if (!task) {
+ res = -ENOMEM;
+ break;
+ }
+ task->dev = dev;
+ task->task_proto = dev->tproto;
+ sg_init_one(&task->smp_task.smp_req, req, req_size);
+ sg_init_one(&task->smp_task.smp_resp, resp, resp_size);
+
+ task->task_done = smp_task_done;
+
+ task->slow_task->timer.data = (unsigned long) task;
+ task->slow_task->timer.function = smp_task_timedout;
+ task->slow_task->timer.expires = jiffies + SMP_TIMEOUT*HZ;
+ add_timer(&task->slow_task->timer);
+
+ res = i->dft->lldd_execute_task(task, GFP_KERNEL);
+
+ if (res) {
+ del_timer(&task->slow_task->timer);
+ SAS_DPRINTK("executing SMP task failed:%d\n", res);
+ break;
+ }
+
+ wait_for_completion(&task->slow_task->completion);
+ res = -ECOMM;
+ if ((task->task_state_flags & SAS_TASK_STATE_ABORTED)) {
+ SAS_DPRINTK("smp task timed out or aborted\n");
+ i->dft->lldd_abort_task(task);
+ if (!(task->task_state_flags & SAS_TASK_STATE_DONE)) {
+ SAS_DPRINTK("SMP task aborted and not done\n");
+ break;
+ }
+ }
+ if (task->task_status.resp == SAS_TASK_COMPLETE &&
+ task->task_status.stat == SAM_STAT_GOOD) {
+ res = 0;
+ break;
+ }
+ if (task->task_status.resp == SAS_TASK_COMPLETE &&
+ task->task_status.stat == SAS_DATA_UNDERRUN) {
+ /* no error, but return the number of bytes of
+ * underrun */
+ res = task->task_status.residual;
+ break;
+ }
+ if (task->task_status.resp == SAS_TASK_COMPLETE &&
+ task->task_status.stat == SAS_DATA_OVERRUN) {
+ res = -EMSGSIZE;
+ break;
+ }
+ if (task->task_status.resp == SAS_TASK_UNDELIVERED &&
+ task->task_status.stat == SAS_DEVICE_UNKNOWN)
+ break;
+ else {
+ SAS_DPRINTK("%s: task to dev %016llx response: 0x%x "
+ "status 0x%x\n", __func__,
+ SAS_ADDR(dev->sas_addr),
+ task->task_status.resp,
+ task->task_status.stat);
+ sas_free_task(task);
+ task = NULL;
+ }
+ }
+ mutex_unlock(&dev->ex_dev.cmd_mutex);
+
+ BUG_ON(retry == 3 && task != NULL);
+ sas_free_task(task);
+ return res;
+}
+
+/* ---------- Allocations ---------- */
+
+static inline void *alloc_smp_req(int size)
+{
+ u8 *p = kzalloc(size, GFP_KERNEL);
+ if (p)
+ p[0] = SMP_REQUEST;
+ return p;
+}
+
+static inline void *alloc_smp_resp(int size)
+{
+ return kzalloc(size, GFP_KERNEL);
+}
+
+static char sas_route_char(struct domain_device *dev, struct ex_phy *phy)
+{
+ switch (phy->routing_attr) {
+ case TABLE_ROUTING:
+ if (dev->ex_dev.t2t_supp)
+ return 'U';
+ else
+ return 'T';
+ case DIRECT_ROUTING:
+ return 'D';
+ case SUBTRACTIVE_ROUTING:
+ return 'S';
+ default:
+ return '?';
+ }
+}
+
+static enum sas_device_type to_dev_type(struct discover_resp *dr)
+{
+ /* This is detecting a failure to transmit initial dev to host
+ * FIS as described in section J.5 of sas-2 r16
+ */
+ if (dr->attached_dev_type == SAS_PHY_UNUSED && dr->attached_sata_dev &&
+ dr->linkrate >= SAS_LINK_RATE_1_5_GBPS)
+ return SAS_SATA_PENDING;
+ else
+ return dr->attached_dev_type;
+}
+
+static void sas_set_ex_phy(struct domain_device *dev, int phy_id, void *rsp)
+{
+ enum sas_device_type dev_type;
+ enum sas_linkrate linkrate;
+ u8 sas_addr[SAS_ADDR_SIZE];
+ struct smp_resp *resp = rsp;
+ struct discover_resp *dr = &resp->disc;
+ struct sas_ha_struct *ha = dev->port->ha;
+ struct expander_device *ex = &dev->ex_dev;
+ struct ex_phy *phy = &ex->ex_phy[phy_id];
+ struct sas_rphy *rphy = dev->rphy;
+ bool new_phy = !phy->phy;
+ char *type;
+
+ if (new_phy) {
+ if (WARN_ON_ONCE(test_bit(SAS_HA_ATA_EH_ACTIVE, &ha->state)))
+ return;
+ phy->phy = sas_phy_alloc(&rphy->dev, phy_id);
+
+ /* FIXME: error_handling */
+ BUG_ON(!phy->phy);
+ }
+
+ switch (resp->result) {
+ case SMP_RESP_PHY_VACANT:
+ phy->phy_state = PHY_VACANT;
+ break;
+ default:
+ phy->phy_state = PHY_NOT_PRESENT;
+ break;
+ case SMP_RESP_FUNC_ACC:
+ phy->phy_state = PHY_EMPTY; /* do not know yet */
+ break;
+ }
+
+ /* check if anything important changed to squelch debug */
+ dev_type = phy->attached_dev_type;
+ linkrate = phy->linkrate;
+ memcpy(sas_addr, phy->attached_sas_addr, SAS_ADDR_SIZE);
+
+ /* Handle vacant phy - rest of dr data is not valid so skip it */
+ if (phy->phy_state == PHY_VACANT) {
+ memset(phy->attached_sas_addr, 0, SAS_ADDR_SIZE);
+ phy->attached_dev_type = SAS_PHY_UNUSED;
+ if (!test_bit(SAS_HA_ATA_EH_ACTIVE, &ha->state)) {
+ phy->phy_id = phy_id;
+ goto skip;
+ } else
+ goto out;
+ }
+
+ phy->attached_dev_type = to_dev_type(dr);
+ if (test_bit(SAS_HA_ATA_EH_ACTIVE, &ha->state))
+ goto out;
+ phy->phy_id = phy_id;
+ phy->linkrate = dr->linkrate;
+ phy->attached_sata_host = dr->attached_sata_host;
+ phy->attached_sata_dev = dr->attached_sata_dev;
+ phy->attached_sata_ps = dr->attached_sata_ps;
+ phy->attached_iproto = dr->iproto << 1;
+ phy->attached_tproto = dr->tproto << 1;
+ /* help some expanders that fail to zero sas_address in the 'no
+ * device' case
+ */
+ if (phy->attached_dev_type == SAS_PHY_UNUSED ||
+ phy->linkrate < SAS_LINK_RATE_1_5_GBPS)
+ memset(phy->attached_sas_addr, 0, SAS_ADDR_SIZE);
+ else
+ memcpy(phy->attached_sas_addr, dr->attached_sas_addr, SAS_ADDR_SIZE);
+ phy->attached_phy_id = dr->attached_phy_id;
+ phy->phy_change_count = dr->change_count;
+ phy->routing_attr = dr->routing_attr;
+ phy->virtual = dr->virtual;
+ phy->last_da_index = -1;
+
+ phy->phy->identify.sas_address = SAS_ADDR(phy->attached_sas_addr);
+ phy->phy->identify.device_type = dr->attached_dev_type;
+ phy->phy->identify.initiator_port_protocols = phy->attached_iproto;
+ phy->phy->identify.target_port_protocols = phy->attached_tproto;
+ if (!phy->attached_tproto && dr->attached_sata_dev)
+ phy->phy->identify.target_port_protocols = SAS_PROTOCOL_SATA;
+ phy->phy->identify.phy_identifier = phy_id;
+ phy->phy->minimum_linkrate_hw = dr->hmin_linkrate;
+ phy->phy->maximum_linkrate_hw = dr->hmax_linkrate;
+ phy->phy->minimum_linkrate = dr->pmin_linkrate;
+ phy->phy->maximum_linkrate = dr->pmax_linkrate;
+ phy->phy->negotiated_linkrate = phy->linkrate;
+
+ skip:
+ if (new_phy)
+ if (sas_phy_add(phy->phy)) {
+ sas_phy_free(phy->phy);
+ return;
+ }
+
+ out:
+ switch (phy->attached_dev_type) {
+ case SAS_SATA_PENDING:
+ type = "stp pending";
+ break;
+ case SAS_PHY_UNUSED:
+ type = "no device";
+ break;
+ case SAS_END_DEVICE:
+ if (phy->attached_iproto) {
+ if (phy->attached_tproto)
+ type = "host+target";
+ else
+ type = "host";
+ } else {
+ if (dr->attached_sata_dev)
+ type = "stp";
+ else
+ type = "ssp";
+ }
+ break;
+ case SAS_EDGE_EXPANDER_DEVICE:
+ case SAS_FANOUT_EXPANDER_DEVICE:
+ type = "smp";
+ break;
+ default:
+ type = "unknown";
+ }
+
+ /* this routine is polled by libata error recovery so filter
+ * unimportant messages
+ */
+ if (new_phy || phy->attached_dev_type != dev_type ||
+ phy->linkrate != linkrate ||
+ SAS_ADDR(phy->attached_sas_addr) != SAS_ADDR(sas_addr))
+ /* pass */;
+ else
+ return;
+
+ /* if the attached device type changed and ata_eh is active,
+ * make sure we run revalidation when eh completes (see:
+ * sas_enable_revalidation)
+ */
+ if (test_bit(SAS_HA_ATA_EH_ACTIVE, &ha->state))
+ set_bit(DISCE_REVALIDATE_DOMAIN, &dev->port->disc.pending);
+
+ SAS_DPRINTK("%sex %016llx phy%02d:%c:%X attached: %016llx (%s)\n",
+ test_bit(SAS_HA_ATA_EH_ACTIVE, &ha->state) ? "ata: " : "",
+ SAS_ADDR(dev->sas_addr), phy->phy_id,
+ sas_route_char(dev, phy), phy->linkrate,
+ SAS_ADDR(phy->attached_sas_addr), type);
+}
+
+/* check if we have an existing attached ata device on this expander phy */
+struct domain_device *sas_ex_to_ata(struct domain_device *ex_dev, int phy_id)
+{
+ struct ex_phy *ex_phy = &ex_dev->ex_dev.ex_phy[phy_id];
+ struct domain_device *dev;
+ struct sas_rphy *rphy;
+
+ if (!ex_phy->port)
+ return NULL;
+
+ rphy = ex_phy->port->rphy;
+ if (!rphy)
+ return NULL;
+
+ dev = sas_find_dev_by_rphy(rphy);
+
+ if (dev && dev_is_sata(dev))
+ return dev;
+
+ return NULL;
+}
+
+#define DISCOVER_REQ_SIZE 16
+#define DISCOVER_RESP_SIZE 56
+
+static int sas_ex_phy_discover_helper(struct domain_device *dev, u8 *disc_req,
+ u8 *disc_resp, int single)
+{
+ struct discover_resp *dr;
+ int res;
+
+ disc_req[9] = single;
+
+ res = smp_execute_task(dev, disc_req, DISCOVER_REQ_SIZE,
+ disc_resp, DISCOVER_RESP_SIZE);
+ if (res)
+ return res;
+ dr = &((struct smp_resp *)disc_resp)->disc;
+ if (memcmp(dev->sas_addr, dr->attached_sas_addr, SAS_ADDR_SIZE) == 0) {
+ sas_printk("Found loopback topology, just ignore it!\n");
+ return 0;
+ }
+ sas_set_ex_phy(dev, single, disc_resp);
+ return 0;
+}
+
+int sas_ex_phy_discover(struct domain_device *dev, int single)
+{
+ struct expander_device *ex = &dev->ex_dev;
+ int res = 0;
+ u8 *disc_req;
+ u8 *disc_resp;
+
+ disc_req = alloc_smp_req(DISCOVER_REQ_SIZE);
+ if (!disc_req)
+ return -ENOMEM;
+
+ disc_resp = alloc_smp_resp(DISCOVER_RESP_SIZE);
+ if (!disc_resp) {
+ kfree(disc_req);
+ return -ENOMEM;
+ }
+
+ disc_req[1] = SMP_DISCOVER;
+
+ if (0 <= single && single < ex->num_phys) {
+ res = sas_ex_phy_discover_helper(dev, disc_req, disc_resp, single);
+ } else {
+ int i;
+
+ for (i = 0; i < ex->num_phys; i++) {
+ res = sas_ex_phy_discover_helper(dev, disc_req,
+ disc_resp, i);
+ if (res)
+ goto out_err;
+ }
+ }
+out_err:
+ kfree(disc_resp);
+ kfree(disc_req);
+ return res;
+}
+
+static int sas_expander_discover(struct domain_device *dev)
+{
+ struct expander_device *ex = &dev->ex_dev;
+ int res = -ENOMEM;
+
+ ex->ex_phy = kzalloc(sizeof(*ex->ex_phy)*ex->num_phys, GFP_KERNEL);
+ if (!ex->ex_phy)
+ return -ENOMEM;
+
+ res = sas_ex_phy_discover(dev, -1);
+ if (res)
+ goto out_err;
+
+ return 0;
+ out_err:
+ kfree(ex->ex_phy);
+ ex->ex_phy = NULL;
+ return res;
+}
+
+#define MAX_EXPANDER_PHYS 128
+
+static void ex_assign_report_general(struct domain_device *dev,
+ struct smp_resp *resp)
+{
+ struct report_general_resp *rg = &resp->rg;
+
+ dev->ex_dev.ex_change_count = be16_to_cpu(rg->change_count);
+ dev->ex_dev.max_route_indexes = be16_to_cpu(rg->route_indexes);
+ dev->ex_dev.num_phys = min(rg->num_phys, (u8)MAX_EXPANDER_PHYS);
+ dev->ex_dev.t2t_supp = rg->t2t_supp;
+ dev->ex_dev.conf_route_table = rg->conf_route_table;
+ dev->ex_dev.configuring = rg->configuring;
+ memcpy(dev->ex_dev.enclosure_logical_id, rg->enclosure_logical_id, 8);
+}
+
+#define RG_REQ_SIZE 8
+#define RG_RESP_SIZE 32
+
+static int sas_ex_general(struct domain_device *dev)
+{
+ u8 *rg_req;
+ struct smp_resp *rg_resp;
+ int res;
+ int i;
+
+ rg_req = alloc_smp_req(RG_REQ_SIZE);
+ if (!rg_req)
+ return -ENOMEM;
+
+ rg_resp = alloc_smp_resp(RG_RESP_SIZE);
+ if (!rg_resp) {
+ kfree(rg_req);
+ return -ENOMEM;
+ }
+
+ rg_req[1] = SMP_REPORT_GENERAL;
+
+ for (i = 0; i < 5; i++) {
+ res = smp_execute_task(dev, rg_req, RG_REQ_SIZE, rg_resp,
+ RG_RESP_SIZE);
+
+ if (res) {
+ SAS_DPRINTK("RG to ex %016llx failed:0x%x\n",
+ SAS_ADDR(dev->sas_addr), res);
+ goto out;
+ } else if (rg_resp->result != SMP_RESP_FUNC_ACC) {
+ SAS_DPRINTK("RG:ex %016llx returned SMP result:0x%x\n",
+ SAS_ADDR(dev->sas_addr), rg_resp->result);
+ res = rg_resp->result;
+ goto out;
+ }
+
+ ex_assign_report_general(dev, rg_resp);
+
+ if (dev->ex_dev.configuring) {
+ SAS_DPRINTK("RG: ex %llx self-configuring...\n",
+ SAS_ADDR(dev->sas_addr));
+ schedule_timeout_interruptible(5*HZ);
+ } else
+ break;
+ }
+out:
+ kfree(rg_req);
+ kfree(rg_resp);
+ return res;
+}
+
+static void ex_assign_manuf_info(struct domain_device *dev, void
+ *_mi_resp)
+{
+ u8 *mi_resp = _mi_resp;
+ struct sas_rphy *rphy = dev->rphy;
+ struct sas_expander_device *edev = rphy_to_expander_device(rphy);
+
+ memcpy(edev->vendor_id, mi_resp + 12, SAS_EXPANDER_VENDOR_ID_LEN);
+ memcpy(edev->product_id, mi_resp + 20, SAS_EXPANDER_PRODUCT_ID_LEN);
+ memcpy(edev->product_rev, mi_resp + 36,
+ SAS_EXPANDER_PRODUCT_REV_LEN);
+
+ if (mi_resp[8] & 1) {
+ memcpy(edev->component_vendor_id, mi_resp + 40,
+ SAS_EXPANDER_COMPONENT_VENDOR_ID_LEN);
+ edev->component_id = mi_resp[48] << 8 | mi_resp[49];
+ edev->component_revision_id = mi_resp[50];
+ }
+}
+
+#define MI_REQ_SIZE 8
+#define MI_RESP_SIZE 64
+
+static int sas_ex_manuf_info(struct domain_device *dev)
+{
+ u8 *mi_req;
+ u8 *mi_resp;
+ int res;
+
+ mi_req = alloc_smp_req(MI_REQ_SIZE);
+ if (!mi_req)
+ return -ENOMEM;
+
+ mi_resp = alloc_smp_resp(MI_RESP_SIZE);
+ if (!mi_resp) {
+ kfree(mi_req);
+ return -ENOMEM;
+ }
+
+ mi_req[1] = SMP_REPORT_MANUF_INFO;
+
+ res = smp_execute_task(dev, mi_req, MI_REQ_SIZE, mi_resp,MI_RESP_SIZE);
+ if (res) {
+ SAS_DPRINTK("MI: ex %016llx failed:0x%x\n",
+ SAS_ADDR(dev->sas_addr), res);
+ goto out;
+ } else if (mi_resp[2] != SMP_RESP_FUNC_ACC) {
+ SAS_DPRINTK("MI ex %016llx returned SMP result:0x%x\n",
+ SAS_ADDR(dev->sas_addr), mi_resp[2]);
+ goto out;
+ }
+
+ ex_assign_manuf_info(dev, mi_resp);
+out:
+ kfree(mi_req);
+ kfree(mi_resp);
+ return res;
+}
+
+#define PC_REQ_SIZE 44
+#define PC_RESP_SIZE 8
+
+int sas_smp_phy_control(struct domain_device *dev, int phy_id,
+ enum phy_func phy_func,
+ struct sas_phy_linkrates *rates)
+{
+ u8 *pc_req;
+ u8 *pc_resp;
+ int res;
+
+ pc_req = alloc_smp_req(PC_REQ_SIZE);
+ if (!pc_req)
+ return -ENOMEM;
+
+ pc_resp = alloc_smp_resp(PC_RESP_SIZE);
+ if (!pc_resp) {
+ kfree(pc_req);
+ return -ENOMEM;
+ }
+
+ pc_req[1] = SMP_PHY_CONTROL;
+ pc_req[9] = phy_id;
+ pc_req[10]= phy_func;
+ if (rates) {
+ pc_req[32] = rates->minimum_linkrate << 4;
+ pc_req[33] = rates->maximum_linkrate << 4;
+ }
+
+ res = smp_execute_task(dev, pc_req, PC_REQ_SIZE, pc_resp,PC_RESP_SIZE);
+
+ kfree(pc_resp);
+ kfree(pc_req);
+ return res;
+}
+
+static void sas_ex_disable_phy(struct domain_device *dev, int phy_id)
+{
+ struct expander_device *ex = &dev->ex_dev;
+ struct ex_phy *phy = &ex->ex_phy[phy_id];
+
+ sas_smp_phy_control(dev, phy_id, PHY_FUNC_DISABLE, NULL);
+ phy->linkrate = SAS_PHY_DISABLED;
+}
+
+static void sas_ex_disable_port(struct domain_device *dev, u8 *sas_addr)
+{
+ struct expander_device *ex = &dev->ex_dev;
+ int i;
+
+ for (i = 0; i < ex->num_phys; i++) {
+ struct ex_phy *phy = &ex->ex_phy[i];
+
+ if (phy->phy_state == PHY_VACANT ||
+ phy->phy_state == PHY_NOT_PRESENT)
+ continue;
+
+ if (SAS_ADDR(phy->attached_sas_addr) == SAS_ADDR(sas_addr))
+ sas_ex_disable_phy(dev, i);
+ }
+}
+
+static int sas_dev_present_in_domain(struct asd_sas_port *port,
+ u8 *sas_addr)
+{
+ struct domain_device *dev;
+
+ if (SAS_ADDR(port->sas_addr) == SAS_ADDR(sas_addr))
+ return 1;
+ list_for_each_entry(dev, &port->dev_list, dev_list_node) {
+ if (SAS_ADDR(dev->sas_addr) == SAS_ADDR(sas_addr))
+ return 1;
+ }
+ return 0;
+}
+
+#define RPEL_REQ_SIZE 16
+#define RPEL_RESP_SIZE 32
+int sas_smp_get_phy_events(struct sas_phy *phy)
+{
+ int res;
+ u8 *req;
+ u8 *resp;
+ struct sas_rphy *rphy = dev_to_rphy(phy->dev.parent);
+ struct domain_device *dev = sas_find_dev_by_rphy(rphy);
+
+ req = alloc_smp_req(RPEL_REQ_SIZE);
+ if (!req)
+ return -ENOMEM;
+
+ resp = alloc_smp_resp(RPEL_RESP_SIZE);
+ if (!resp) {
+ kfree(req);
+ return -ENOMEM;
+ }
+
+ req[1] = SMP_REPORT_PHY_ERR_LOG;
+ req[9] = phy->number;
+
+ res = smp_execute_task(dev, req, RPEL_REQ_SIZE,
+ resp, RPEL_RESP_SIZE);
+
+ if (!res)
+ goto out;
+
+ phy->invalid_dword_count = scsi_to_u32(&resp[12]);
+ phy->running_disparity_error_count = scsi_to_u32(&resp[16]);
+ phy->loss_of_dword_sync_count = scsi_to_u32(&resp[20]);
+ phy->phy_reset_problem_count = scsi_to_u32(&resp[24]);
+
+ out:
+ kfree(resp);
+ return res;
+
+}
+
+#ifdef CONFIG_SCSI_SAS_ATA
+
+#define RPS_REQ_SIZE 16
+#define RPS_RESP_SIZE 60
+
+int sas_get_report_phy_sata(struct domain_device *dev, int phy_id,
+ struct smp_resp *rps_resp)
+{
+ int res;
+ u8 *rps_req = alloc_smp_req(RPS_REQ_SIZE);
+ u8 *resp = (u8 *)rps_resp;
+
+ if (!rps_req)
+ return -ENOMEM;
+
+ rps_req[1] = SMP_REPORT_PHY_SATA;
+ rps_req[9] = phy_id;
+
+ res = smp_execute_task(dev, rps_req, RPS_REQ_SIZE,
+ rps_resp, RPS_RESP_SIZE);
+
+ /* 0x34 is the FIS type for the D2H fis. There's a potential
+ * standards cockup here. sas-2 explicitly specifies the FIS
+ * should be encoded so that FIS type is in resp[24].
+ * However, some expanders endian reverse this. Undo the
+ * reversal here */
+ if (!res && resp[27] == 0x34 && resp[24] != 0x34) {
+ int i;
+
+ for (i = 0; i < 5; i++) {
+ int j = 24 + (i*4);
+ u8 a, b;
+ a = resp[j + 0];
+ b = resp[j + 1];
+ resp[j + 0] = resp[j + 3];
+ resp[j + 1] = resp[j + 2];
+ resp[j + 2] = b;
+ resp[j + 3] = a;
+ }
+ }
+
+ kfree(rps_req);
+ return res;
+}
+#endif
+
+static void sas_ex_get_linkrate(struct domain_device *parent,
+ struct domain_device *child,
+ struct ex_phy *parent_phy)
+{
+ struct expander_device *parent_ex = &parent->ex_dev;
+ struct sas_port *port;
+ int i;
+
+ child->pathways = 0;
+
+ port = parent_phy->port;
+
+ for (i = 0; i < parent_ex->num_phys; i++) {
+ struct ex_phy *phy = &parent_ex->ex_phy[i];
+
+ if (phy->phy_state == PHY_VACANT ||
+ phy->phy_state == PHY_NOT_PRESENT)
+ continue;
+
+ if (SAS_ADDR(phy->attached_sas_addr) ==
+ SAS_ADDR(child->sas_addr)) {
+
+ child->min_linkrate = min(parent->min_linkrate,
+ phy->linkrate);
+ child->max_linkrate = max(parent->max_linkrate,
+ phy->linkrate);
+ child->pathways++;
+ sas_port_add_phy(port, phy->phy);
+ }
+ }
+ child->linkrate = min(parent_phy->linkrate, child->max_linkrate);
+ child->pathways = min(child->pathways, parent->pathways);
+}
+
+static struct domain_device *sas_ex_discover_end_dev(
+ struct domain_device *parent, int phy_id)
+{
+ struct expander_device *parent_ex = &parent->ex_dev;
+ struct ex_phy *phy = &parent_ex->ex_phy[phy_id];
+ struct domain_device *child = NULL;
+ struct sas_rphy *rphy;
+ int res;
+
+ if (phy->attached_sata_host || phy->attached_sata_ps)
+ return NULL;
+
+ child = sas_alloc_device();
+ if (!child)
+ return NULL;
+
+ kref_get(&parent->kref);
+ child->parent = parent;
+ child->port = parent->port;
+ child->iproto = phy->attached_iproto;
+ memcpy(child->sas_addr, phy->attached_sas_addr, SAS_ADDR_SIZE);
+ sas_hash_addr(child->hashed_sas_addr, child->sas_addr);
+ if (!phy->port) {
+ phy->port = sas_port_alloc(&parent->rphy->dev, phy_id);
+ if (unlikely(!phy->port))
+ goto out_err;
+ if (unlikely(sas_port_add(phy->port) != 0)) {
+ sas_port_free(phy->port);
+ goto out_err;
+ }
+ }
+ sas_ex_get_linkrate(parent, child, phy);
+ sas_device_set_phy(child, phy->port);
+
+#ifdef CONFIG_SCSI_SAS_ATA
+ if ((phy->attached_tproto & SAS_PROTOCOL_STP) || phy->attached_sata_dev) {
+ res = sas_get_ata_info(child, phy);
+ if (res)
+ goto out_free;
+
+ sas_init_dev(child);
+ res = sas_ata_init(child);
+ if (res)
+ goto out_free;
+ rphy = sas_end_device_alloc(phy->port);
+ if (!rphy)
+ goto out_free;
+
+ child->rphy = rphy;
+ get_device(&rphy->dev);
+
+ list_add_tail(&child->disco_list_node, &parent->port->disco_list);
+
+ res = sas_discover_sata(child);
+ if (res) {
+ SAS_DPRINTK("sas_discover_sata() for device %16llx at "
+ "%016llx:0x%x returned 0x%x\n",
+ SAS_ADDR(child->sas_addr),
+ SAS_ADDR(parent->sas_addr), phy_id, res);
+ goto out_list_del;
+ }
+ } else
+#endif
+ if (phy->attached_tproto & SAS_PROTOCOL_SSP) {
+ child->dev_type = SAS_END_DEVICE;
+ rphy = sas_end_device_alloc(phy->port);
+ /* FIXME: error handling */
+ if (unlikely(!rphy))
+ goto out_free;
+ child->tproto = phy->attached_tproto;
+ sas_init_dev(child);
+
+ child->rphy = rphy;
+ get_device(&rphy->dev);
+ sas_fill_in_rphy(child, rphy);
+
+ list_add_tail(&child->disco_list_node, &parent->port->disco_list);
+
+ res = sas_discover_end_dev(child);
+ if (res) {
+ SAS_DPRINTK("sas_discover_end_dev() for device %16llx "
+ "at %016llx:0x%x returned 0x%x\n",
+ SAS_ADDR(child->sas_addr),
+ SAS_ADDR(parent->sas_addr), phy_id, res);
+ goto out_list_del;
+ }
+ } else {
+ SAS_DPRINTK("target proto 0x%x at %016llx:0x%x not handled\n",
+ phy->attached_tproto, SAS_ADDR(parent->sas_addr),
+ phy_id);
+ goto out_free;
+ }
+
+ list_add_tail(&child->siblings, &parent_ex->children);
+ return child;
+
+ out_list_del:
+ sas_rphy_free(child->rphy);
+ list_del(&child->disco_list_node);
+ spin_lock_irq(&parent->port->dev_list_lock);
+ list_del(&child->dev_list_node);
+ spin_unlock_irq(&parent->port->dev_list_lock);
+ out_free:
+ sas_port_delete(phy->port);
+ out_err:
+ phy->port = NULL;
+ sas_put_device(child);
+ return NULL;
+}
+
+/* See if this phy is part of a wide port */
+static bool sas_ex_join_wide_port(struct domain_device *parent, int phy_id)
+{
+ struct ex_phy *phy = &parent->ex_dev.ex_phy[phy_id];
+ int i;
+
+ for (i = 0; i < parent->ex_dev.num_phys; i++) {
+ struct ex_phy *ephy = &parent->ex_dev.ex_phy[i];
+
+ if (ephy == phy)
+ continue;
+
+ if (!memcmp(phy->attached_sas_addr, ephy->attached_sas_addr,
+ SAS_ADDR_SIZE) && ephy->port) {
+ sas_port_add_phy(ephy->port, phy->phy);
+ phy->port = ephy->port;
+ phy->phy_state = PHY_DEVICE_DISCOVERED;
+ return true;
+ }
+ }
+
+ return false;
+}
+
+static struct domain_device *sas_ex_discover_expander(
+ struct domain_device *parent, int phy_id)
+{
+ struct sas_expander_device *parent_ex = rphy_to_expander_device(parent->rphy);
+ struct ex_phy *phy = &parent->ex_dev.ex_phy[phy_id];
+ struct domain_device *child = NULL;
+ struct sas_rphy *rphy;
+ struct sas_expander_device *edev;
+ struct asd_sas_port *port;
+ int res;
+
+ if (phy->routing_attr == DIRECT_ROUTING) {
+ SAS_DPRINTK("ex %016llx:0x%x:D <--> ex %016llx:0x%x is not "
+ "allowed\n",
+ SAS_ADDR(parent->sas_addr), phy_id,
+ SAS_ADDR(phy->attached_sas_addr),
+ phy->attached_phy_id);
+ return NULL;
+ }
+ child = sas_alloc_device();
+ if (!child)
+ return NULL;
+
+ phy->port = sas_port_alloc(&parent->rphy->dev, phy_id);
+ /* FIXME: better error handling */
+ BUG_ON(sas_port_add(phy->port) != 0);
+
+
+ switch (phy->attached_dev_type) {
+ case SAS_EDGE_EXPANDER_DEVICE:
+ rphy = sas_expander_alloc(phy->port,
+ SAS_EDGE_EXPANDER_DEVICE);
+ break;
+ case SAS_FANOUT_EXPANDER_DEVICE:
+ rphy = sas_expander_alloc(phy->port,
+ SAS_FANOUT_EXPANDER_DEVICE);
+ break;
+ default:
+ rphy = NULL; /* shut gcc up */
+ BUG();
+ }
+ port = parent->port;
+ child->rphy = rphy;
+ get_device(&rphy->dev);
+ edev = rphy_to_expander_device(rphy);
+ child->dev_type = phy->attached_dev_type;
+ kref_get(&parent->kref);
+ child->parent = parent;
+ child->port = port;
+ child->iproto = phy->attached_iproto;
+ child->tproto = phy->attached_tproto;
+ memcpy(child->sas_addr, phy->attached_sas_addr, SAS_ADDR_SIZE);
+ sas_hash_addr(child->hashed_sas_addr, child->sas_addr);
+ sas_ex_get_linkrate(parent, child, phy);
+ edev->level = parent_ex->level + 1;
+ parent->port->disc.max_level = max(parent->port->disc.max_level,
+ edev->level);
+ sas_init_dev(child);
+ sas_fill_in_rphy(child, rphy);
+ sas_rphy_add(rphy);
+
+ spin_lock_irq(&parent->port->dev_list_lock);
+ list_add_tail(&child->dev_list_node, &parent->port->dev_list);
+ spin_unlock_irq(&parent->port->dev_list_lock);
+
+ res = sas_discover_expander(child);
+ if (res) {
+ sas_rphy_delete(rphy);
+ spin_lock_irq(&parent->port->dev_list_lock);
+ list_del(&child->dev_list_node);
+ spin_unlock_irq(&parent->port->dev_list_lock);
+ sas_put_device(child);
+ return NULL;
+ }
+ list_add_tail(&child->siblings, &parent->ex_dev.children);
+ return child;
+}
+
+static int sas_ex_discover_dev(struct domain_device *dev, int phy_id)
+{
+ struct expander_device *ex = &dev->ex_dev;
+ struct ex_phy *ex_phy = &ex->ex_phy[phy_id];
+ struct domain_device *child = NULL;
+ int res = 0;
+
+ /* Phy state */
+ if (ex_phy->linkrate == SAS_SATA_SPINUP_HOLD) {
+ if (!sas_smp_phy_control(dev, phy_id, PHY_FUNC_LINK_RESET, NULL))
+ res = sas_ex_phy_discover(dev, phy_id);
+ if (res)
+ return res;
+ }
+
+ /* Parent and domain coherency */
+ if (!dev->parent && (SAS_ADDR(ex_phy->attached_sas_addr) ==
+ SAS_ADDR(dev->port->sas_addr))) {
+ sas_add_parent_port(dev, phy_id);
+ return 0;
+ }
+ if (dev->parent && (SAS_ADDR(ex_phy->attached_sas_addr) ==
+ SAS_ADDR(dev->parent->sas_addr))) {
+ sas_add_parent_port(dev, phy_id);
+ if (ex_phy->routing_attr == TABLE_ROUTING)
+ sas_configure_phy(dev, phy_id, dev->port->sas_addr, 1);
+ return 0;
+ }
+
+ if (sas_dev_present_in_domain(dev->port, ex_phy->attached_sas_addr))
+ sas_ex_disable_port(dev, ex_phy->attached_sas_addr);
+
+ if (ex_phy->attached_dev_type == SAS_PHY_UNUSED) {
+ if (ex_phy->routing_attr == DIRECT_ROUTING) {
+ memset(ex_phy->attached_sas_addr, 0, SAS_ADDR_SIZE);
+ sas_configure_routing(dev, ex_phy->attached_sas_addr);
+ }
+ return 0;
+ } else if (ex_phy->linkrate == SAS_LINK_RATE_UNKNOWN)
+ return 0;
+
+ if (ex_phy->attached_dev_type != SAS_END_DEVICE &&
+ ex_phy->attached_dev_type != SAS_FANOUT_EXPANDER_DEVICE &&
+ ex_phy->attached_dev_type != SAS_EDGE_EXPANDER_DEVICE &&
+ ex_phy->attached_dev_type != SAS_SATA_PENDING) {
+ SAS_DPRINTK("unknown device type(0x%x) attached to ex %016llx "
+ "phy 0x%x\n", ex_phy->attached_dev_type,
+ SAS_ADDR(dev->sas_addr),
+ phy_id);
+ return 0;
+ }
+
+ res = sas_configure_routing(dev, ex_phy->attached_sas_addr);
+ if (res) {
+ SAS_DPRINTK("configure routing for dev %016llx "
+ "reported 0x%x. Forgotten\n",
+ SAS_ADDR(ex_phy->attached_sas_addr), res);
+ sas_disable_routing(dev, ex_phy->attached_sas_addr);
+ return res;
+ }
+
+ if (sas_ex_join_wide_port(dev, phy_id)) {
+ SAS_DPRINTK("Attaching ex phy%d to wide port %016llx\n",
+ phy_id, SAS_ADDR(ex_phy->attached_sas_addr));
+ return res;
+ }
+
+ switch (ex_phy->attached_dev_type) {
+ case SAS_END_DEVICE:
+ case SAS_SATA_PENDING:
+ child = sas_ex_discover_end_dev(dev, phy_id);
+ break;
+ case SAS_FANOUT_EXPANDER_DEVICE:
+ if (SAS_ADDR(dev->port->disc.fanout_sas_addr)) {
+ SAS_DPRINTK("second fanout expander %016llx phy 0x%x "
+ "attached to ex %016llx phy 0x%x\n",
+ SAS_ADDR(ex_phy->attached_sas_addr),
+ ex_phy->attached_phy_id,
+ SAS_ADDR(dev->sas_addr),
+ phy_id);
+ sas_ex_disable_phy(dev, phy_id);
+ break;
+ } else
+ memcpy(dev->port->disc.fanout_sas_addr,
+ ex_phy->attached_sas_addr, SAS_ADDR_SIZE);
+ /* fallthrough */
+ case SAS_EDGE_EXPANDER_DEVICE:
+ child = sas_ex_discover_expander(dev, phy_id);
+ break;
+ default:
+ break;
+ }
+
+ if (child) {
+ int i;
+
+ for (i = 0; i < ex->num_phys; i++) {
+ if (ex->ex_phy[i].phy_state == PHY_VACANT ||
+ ex->ex_phy[i].phy_state == PHY_NOT_PRESENT)
+ continue;
+ /*
+ * Due to races, the phy might not get added to the
+ * wide port, so we add the phy to the wide port here.
+ */
+ if (SAS_ADDR(ex->ex_phy[i].attached_sas_addr) ==
+ SAS_ADDR(child->sas_addr)) {
+ ex->ex_phy[i].phy_state= PHY_DEVICE_DISCOVERED;
+ if (sas_ex_join_wide_port(dev, i))
+ SAS_DPRINTK("Attaching ex phy%d to wide port %016llx\n",
+ i, SAS_ADDR(ex->ex_phy[i].attached_sas_addr));
+
+ }
+ }
+ }
+
+ return res;
+}
+
+static int sas_find_sub_addr(struct domain_device *dev, u8 *sub_addr)
+{
+ struct expander_device *ex = &dev->ex_dev;
+ int i;
+
+ for (i = 0; i < ex->num_phys; i++) {
+ struct ex_phy *phy = &ex->ex_phy[i];
+
+ if (phy->phy_state == PHY_VACANT ||
+ phy->phy_state == PHY_NOT_PRESENT)
+ continue;
+
+ if ((phy->attached_dev_type == SAS_EDGE_EXPANDER_DEVICE ||
+ phy->attached_dev_type == SAS_FANOUT_EXPANDER_DEVICE) &&
+ phy->routing_attr == SUBTRACTIVE_ROUTING) {
+
+ memcpy(sub_addr, phy->attached_sas_addr,SAS_ADDR_SIZE);
+
+ return 1;
+ }
+ }
+ return 0;
+}
+
+static int sas_check_level_subtractive_boundary(struct domain_device *dev)
+{
+ struct expander_device *ex = &dev->ex_dev;
+ struct domain_device *child;
+ u8 sub_addr[8] = {0, };
+
+ list_for_each_entry(child, &ex->children, siblings) {
+ if (child->dev_type != SAS_EDGE_EXPANDER_DEVICE &&
+ child->dev_type != SAS_FANOUT_EXPANDER_DEVICE)
+ continue;
+ if (sub_addr[0] == 0) {
+ sas_find_sub_addr(child, sub_addr);
+ continue;
+ } else {
+ u8 s2[8];
+
+ if (sas_find_sub_addr(child, s2) &&
+ (SAS_ADDR(sub_addr) != SAS_ADDR(s2))) {
+
+ SAS_DPRINTK("ex %016llx->%016llx-?->%016llx "
+ "diverges from subtractive "
+ "boundary %016llx\n",
+ SAS_ADDR(dev->sas_addr),
+ SAS_ADDR(child->sas_addr),
+ SAS_ADDR(s2),
+ SAS_ADDR(sub_addr));
+
+ sas_ex_disable_port(child, s2);
+ }
+ }
+ }
+ return 0;
+}
+/**
+ * sas_ex_discover_devices -- discover devices attached to this expander
+ * dev: pointer to the expander domain device
+ * single: if you want to do a single phy, else set to -1;
+ *
+ * Configure this expander for use with its devices and register the
+ * devices of this expander.
+ */
+static int sas_ex_discover_devices(struct domain_device *dev, int single)
+{
+ struct expander_device *ex = &dev->ex_dev;
+ int i = 0, end = ex->num_phys;
+ int res = 0;
+
+ if (0 <= single && single < end) {
+ i = single;
+ end = i+1;
+ }
+
+ for ( ; i < end; i++) {
+ struct ex_phy *ex_phy = &ex->ex_phy[i];
+
+ if (ex_phy->phy_state == PHY_VACANT ||
+ ex_phy->phy_state == PHY_NOT_PRESENT ||
+ ex_phy->phy_state == PHY_DEVICE_DISCOVERED)
+ continue;
+
+ switch (ex_phy->linkrate) {
+ case SAS_PHY_DISABLED:
+ case SAS_PHY_RESET_PROBLEM:
+ case SAS_SATA_PORT_SELECTOR:
+ continue;
+ default:
+ res = sas_ex_discover_dev(dev, i);
+ if (res)
+ break;
+ continue;
+ }
+ }
+
+ if (!res)
+ sas_check_level_subtractive_boundary(dev);
+
+ return res;
+}
+
+static int sas_check_ex_subtractive_boundary(struct domain_device *dev)
+{
+ struct expander_device *ex = &dev->ex_dev;
+ int i;
+ u8 *sub_sas_addr = NULL;
+
+ if (dev->dev_type != SAS_EDGE_EXPANDER_DEVICE)
+ return 0;
+
+ for (i = 0; i < ex->num_phys; i++) {
+ struct ex_phy *phy = &ex->ex_phy[i];
+
+ if (phy->phy_state == PHY_VACANT ||
+ phy->phy_state == PHY_NOT_PRESENT)
+ continue;
+
+ if ((phy->attached_dev_type == SAS_FANOUT_EXPANDER_DEVICE ||
+ phy->attached_dev_type == SAS_EDGE_EXPANDER_DEVICE) &&
+ phy->routing_attr == SUBTRACTIVE_ROUTING) {
+
+ if (!sub_sas_addr)
+ sub_sas_addr = &phy->attached_sas_addr[0];
+ else if (SAS_ADDR(sub_sas_addr) !=
+ SAS_ADDR(phy->attached_sas_addr)) {
+
+ SAS_DPRINTK("ex %016llx phy 0x%x "
+ "diverges(%016llx) on subtractive "
+ "boundary(%016llx). Disabled\n",
+ SAS_ADDR(dev->sas_addr), i,
+ SAS_ADDR(phy->attached_sas_addr),
+ SAS_ADDR(sub_sas_addr));
+ sas_ex_disable_phy(dev, i);
+ }
+ }
+ }
+ return 0;
+}
+
+static void sas_print_parent_topology_bug(struct domain_device *child,
+ struct ex_phy *parent_phy,
+ struct ex_phy *child_phy)
+{
+ static const char *ex_type[] = {
+ [SAS_EDGE_EXPANDER_DEVICE] = "edge",
+ [SAS_FANOUT_EXPANDER_DEVICE] = "fanout",
+ };
+ struct domain_device *parent = child->parent;
+
+ sas_printk("%s ex %016llx phy 0x%x <--> %s ex %016llx "
+ "phy 0x%x has %c:%c routing link!\n",
+
+ ex_type[parent->dev_type],
+ SAS_ADDR(parent->sas_addr),
+ parent_phy->phy_id,
+
+ ex_type[child->dev_type],
+ SAS_ADDR(child->sas_addr),
+ child_phy->phy_id,
+
+ sas_route_char(parent, parent_phy),
+ sas_route_char(child, child_phy));
+}
+
+static int sas_check_eeds(struct domain_device *child,
+ struct ex_phy *parent_phy,
+ struct ex_phy *child_phy)
+{
+ int res = 0;
+ struct domain_device *parent = child->parent;
+
+ if (SAS_ADDR(parent->port->disc.fanout_sas_addr) != 0) {
+ res = -ENODEV;
+ SAS_DPRINTK("edge ex %016llx phy S:0x%x <--> edge ex %016llx "
+ "phy S:0x%x, while there is a fanout ex %016llx\n",
+ SAS_ADDR(parent->sas_addr),
+ parent_phy->phy_id,
+ SAS_ADDR(child->sas_addr),
+ child_phy->phy_id,
+ SAS_ADDR(parent->port->disc.fanout_sas_addr));
+ } else if (SAS_ADDR(parent->port->disc.eeds_a) == 0) {
+ memcpy(parent->port->disc.eeds_a, parent->sas_addr,
+ SAS_ADDR_SIZE);
+ memcpy(parent->port->disc.eeds_b, child->sas_addr,
+ SAS_ADDR_SIZE);
+ } else if (((SAS_ADDR(parent->port->disc.eeds_a) ==
+ SAS_ADDR(parent->sas_addr)) ||
+ (SAS_ADDR(parent->port->disc.eeds_a) ==
+ SAS_ADDR(child->sas_addr)))
+ &&
+ ((SAS_ADDR(parent->port->disc.eeds_b) ==
+ SAS_ADDR(parent->sas_addr)) ||
+ (SAS_ADDR(parent->port->disc.eeds_b) ==
+ SAS_ADDR(child->sas_addr))))
+ ;
+ else {
+ res = -ENODEV;
+ SAS_DPRINTK("edge ex %016llx phy 0x%x <--> edge ex %016llx "
+ "phy 0x%x link forms a third EEDS!\n",
+ SAS_ADDR(parent->sas_addr),
+ parent_phy->phy_id,
+ SAS_ADDR(child->sas_addr),
+ child_phy->phy_id);
+ }
+
+ return res;
+}
+
+/* Here we spill over 80 columns. It is intentional.
+ */
+static int sas_check_parent_topology(struct domain_device *child)
+{
+ struct expander_device *child_ex = &child->ex_dev;
+ struct expander_device *parent_ex;
+ int i;
+ int res = 0;
+
+ if (!child->parent)
+ return 0;
+
+ if (child->parent->dev_type != SAS_EDGE_EXPANDER_DEVICE &&
+ child->parent->dev_type != SAS_FANOUT_EXPANDER_DEVICE)
+ return 0;
+
+ parent_ex = &child->parent->ex_dev;
+
+ for (i = 0; i < parent_ex->num_phys; i++) {
+ struct ex_phy *parent_phy = &parent_ex->ex_phy[i];
+ struct ex_phy *child_phy;
+
+ if (parent_phy->phy_state == PHY_VACANT ||
+ parent_phy->phy_state == PHY_NOT_PRESENT)
+ continue;
+
+ if (SAS_ADDR(parent_phy->attached_sas_addr) != SAS_ADDR(child->sas_addr))
+ continue;
+
+ child_phy = &child_ex->ex_phy[parent_phy->attached_phy_id];
+
+ switch (child->parent->dev_type) {
+ case SAS_EDGE_EXPANDER_DEVICE:
+ if (child->dev_type == SAS_FANOUT_EXPANDER_DEVICE) {
+ if (parent_phy->routing_attr != SUBTRACTIVE_ROUTING ||
+ child_phy->routing_attr != TABLE_ROUTING) {
+ sas_print_parent_topology_bug(child, parent_phy, child_phy);
+ res = -ENODEV;
+ }
+ } else if (parent_phy->routing_attr == SUBTRACTIVE_ROUTING) {
+ if (child_phy->routing_attr == SUBTRACTIVE_ROUTING) {
+ res = sas_check_eeds(child, parent_phy, child_phy);
+ } else if (child_phy->routing_attr != TABLE_ROUTING) {
+ sas_print_parent_topology_bug(child, parent_phy, child_phy);
+ res = -ENODEV;
+ }
+ } else if (parent_phy->routing_attr == TABLE_ROUTING) {
+ if (child_phy->routing_attr == SUBTRACTIVE_ROUTING ||
+ (child_phy->routing_attr == TABLE_ROUTING &&
+ child_ex->t2t_supp && parent_ex->t2t_supp)) {
+ /* All good */;
+ } else {
+ sas_print_parent_topology_bug(child, parent_phy, child_phy);
+ res = -ENODEV;
+ }
+ }
+ break;
+ case SAS_FANOUT_EXPANDER_DEVICE:
+ if (parent_phy->routing_attr != TABLE_ROUTING ||
+ child_phy->routing_attr != SUBTRACTIVE_ROUTING) {
+ sas_print_parent_topology_bug(child, parent_phy, child_phy);
+ res = -ENODEV;
+ }
+ break;
+ default:
+ break;
+ }
+ }
+
+ return res;
+}
+
+#define RRI_REQ_SIZE 16
+#define RRI_RESP_SIZE 44
+
+static int sas_configure_present(struct domain_device *dev, int phy_id,
+ u8 *sas_addr, int *index, int *present)
+{
+ int i, res = 0;
+ struct expander_device *ex = &dev->ex_dev;
+ struct ex_phy *phy = &ex->ex_phy[phy_id];
+ u8 *rri_req;
+ u8 *rri_resp;
+
+ *present = 0;
+ *index = 0;
+
+ rri_req = alloc_smp_req(RRI_REQ_SIZE);
+ if (!rri_req)
+ return -ENOMEM;
+
+ rri_resp = alloc_smp_resp(RRI_RESP_SIZE);
+ if (!rri_resp) {
+ kfree(rri_req);
+ return -ENOMEM;
+ }
+
+ rri_req[1] = SMP_REPORT_ROUTE_INFO;
+ rri_req[9] = phy_id;
+
+ for (i = 0; i < ex->max_route_indexes ; i++) {
+ *(__be16 *)(rri_req+6) = cpu_to_be16(i);
+ res = smp_execute_task(dev, rri_req, RRI_REQ_SIZE, rri_resp,
+ RRI_RESP_SIZE);
+ if (res)
+ goto out;
+ res = rri_resp[2];
+ if (res == SMP_RESP_NO_INDEX) {
+ SAS_DPRINTK("overflow of indexes: dev %016llx "
+ "phy 0x%x index 0x%x\n",
+ SAS_ADDR(dev->sas_addr), phy_id, i);
+ goto out;
+ } else if (res != SMP_RESP_FUNC_ACC) {
+ SAS_DPRINTK("%s: dev %016llx phy 0x%x index 0x%x "
+ "result 0x%x\n", __func__,
+ SAS_ADDR(dev->sas_addr), phy_id, i, res);
+ goto out;
+ }
+ if (SAS_ADDR(sas_addr) != 0) {
+ if (SAS_ADDR(rri_resp+16) == SAS_ADDR(sas_addr)) {
+ *index = i;
+ if ((rri_resp[12] & 0x80) == 0x80)
+ *present = 0;
+ else
+ *present = 1;
+ goto out;
+ } else if (SAS_ADDR(rri_resp+16) == 0) {
+ *index = i;
+ *present = 0;
+ goto out;
+ }
+ } else if (SAS_ADDR(rri_resp+16) == 0 &&
+ phy->last_da_index < i) {
+ phy->last_da_index = i;
+ *index = i;
+ *present = 0;
+ goto out;
+ }
+ }
+ res = -1;
+out:
+ kfree(rri_req);
+ kfree(rri_resp);
+ return res;
+}
+
+#define CRI_REQ_SIZE 44
+#define CRI_RESP_SIZE 8
+
+static int sas_configure_set(struct domain_device *dev, int phy_id,
+ u8 *sas_addr, int index, int include)
+{
+ int res;
+ u8 *cri_req;
+ u8 *cri_resp;
+
+ cri_req = alloc_smp_req(CRI_REQ_SIZE);
+ if (!cri_req)
+ return -ENOMEM;
+
+ cri_resp = alloc_smp_resp(CRI_RESP_SIZE);
+ if (!cri_resp) {
+ kfree(cri_req);
+ return -ENOMEM;
+ }
+
+ cri_req[1] = SMP_CONF_ROUTE_INFO;
+ *(__be16 *)(cri_req+6) = cpu_to_be16(index);
+ cri_req[9] = phy_id;
+ if (SAS_ADDR(sas_addr) == 0 || !include)
+ cri_req[12] |= 0x80;
+ memcpy(cri_req+16, sas_addr, SAS_ADDR_SIZE);
+
+ res = smp_execute_task(dev, cri_req, CRI_REQ_SIZE, cri_resp,
+ CRI_RESP_SIZE);
+ if (res)
+ goto out;
+ res = cri_resp[2];
+ if (res == SMP_RESP_NO_INDEX) {
+ SAS_DPRINTK("overflow of indexes: dev %016llx phy 0x%x "
+ "index 0x%x\n",
+ SAS_ADDR(dev->sas_addr), phy_id, index);
+ }
+out:
+ kfree(cri_req);
+ kfree(cri_resp);
+ return res;
+}
+
+static int sas_configure_phy(struct domain_device *dev, int phy_id,
+ u8 *sas_addr, int include)
+{
+ int index;
+ int present;
+ int res;
+
+ res = sas_configure_present(dev, phy_id, sas_addr, &index, &present);
+ if (res)
+ return res;
+ if (include ^ present)
+ return sas_configure_set(dev, phy_id, sas_addr, index,include);
+
+ return res;
+}
+
+/**
+ * sas_configure_parent -- configure routing table of parent
+ * parent: parent expander
+ * child: child expander
+ * sas_addr: SAS port identifier of device directly attached to child
+ */
+static int sas_configure_parent(struct domain_device *parent,
+ struct domain_device *child,
+ u8 *sas_addr, int include)
+{
+ struct expander_device *ex_parent = &parent->ex_dev;
+ int res = 0;
+ int i;
+
+ if (parent->parent) {
+ res = sas_configure_parent(parent->parent, parent, sas_addr,
+ include);
+ if (res)
+ return res;
+ }
+
+ if (ex_parent->conf_route_table == 0) {
+ SAS_DPRINTK("ex %016llx has self-configuring routing table\n",
+ SAS_ADDR(parent->sas_addr));
+ return 0;
+ }
+
+ for (i = 0; i < ex_parent->num_phys; i++) {
+ struct ex_phy *phy = &ex_parent->ex_phy[i];
+
+ if ((phy->routing_attr == TABLE_ROUTING) &&
+ (SAS_ADDR(phy->attached_sas_addr) ==
+ SAS_ADDR(child->sas_addr))) {
+ res = sas_configure_phy(parent, i, sas_addr, include);
+ if (res)
+ return res;
+ }
+ }
+
+ return res;
+}
+
+/**
+ * sas_configure_routing -- configure routing
+ * dev: expander device
+ * sas_addr: port identifier of device directly attached to the expander device
+ */
+static int sas_configure_routing(struct domain_device *dev, u8 *sas_addr)
+{
+ if (dev->parent)
+ return sas_configure_parent(dev->parent, dev, sas_addr, 1);
+ return 0;
+}
+
+static int sas_disable_routing(struct domain_device *dev, u8 *sas_addr)
+{
+ if (dev->parent)
+ return sas_configure_parent(dev->parent, dev, sas_addr, 0);
+ return 0;
+}
+
+/**
+ * sas_discover_expander -- expander discovery
+ * @ex: pointer to expander domain device
+ *
+ * See comment in sas_discover_sata().
+ */
+static int sas_discover_expander(struct domain_device *dev)
+{
+ int res;
+
+ res = sas_notify_lldd_dev_found(dev);
+ if (res)
+ return res;
+
+ res = sas_ex_general(dev);
+ if (res)
+ goto out_err;
+ res = sas_ex_manuf_info(dev);
+ if (res)
+ goto out_err;
+
+ res = sas_expander_discover(dev);
+ if (res) {
+ SAS_DPRINTK("expander %016llx discovery failed(0x%x)\n",
+ SAS_ADDR(dev->sas_addr), res);
+ goto out_err;
+ }
+
+ sas_check_ex_subtractive_boundary(dev);
+ res = sas_check_parent_topology(dev);
+ if (res)
+ goto out_err;
+ return 0;
+out_err:
+ sas_notify_lldd_dev_gone(dev);
+ return res;
+}
+
+static int sas_ex_level_discovery(struct asd_sas_port *port, const int level)
+{
+ int res = 0;
+ struct domain_device *dev;
+
+ list_for_each_entry(dev, &port->dev_list, dev_list_node) {
+ if (dev->dev_type == SAS_EDGE_EXPANDER_DEVICE ||
+ dev->dev_type == SAS_FANOUT_EXPANDER_DEVICE) {
+ struct sas_expander_device *ex =
+ rphy_to_expander_device(dev->rphy);
+
+ if (level == ex->level)
+ res = sas_ex_discover_devices(dev, -1);
+ else if (level > 0)
+ res = sas_ex_discover_devices(port->port_dev, -1);
+
+ }
+ }
+
+ return res;
+}
+
+static int sas_ex_bfs_disc(struct asd_sas_port *port)
+{
+ int res;
+ int level;
+
+ do {
+ level = port->disc.max_level;
+ res = sas_ex_level_discovery(port, level);
+ mb();
+ } while (level < port->disc.max_level);
+
+ return res;
+}
+
+int sas_discover_root_expander(struct domain_device *dev)
+{
+ int res;
+ struct sas_expander_device *ex = rphy_to_expander_device(dev->rphy);
+
+ res = sas_rphy_add(dev->rphy);
+ if (res)
+ goto out_err;
+
+ ex->level = dev->port->disc.max_level; /* 0 */
+ res = sas_discover_expander(dev);
+ if (res)
+ goto out_err2;
+
+ sas_ex_bfs_disc(dev->port);
+
+ return res;
+
+out_err2:
+ sas_rphy_remove(dev->rphy);
+out_err:
+ return res;
+}
+
+/* ---------- Domain revalidation ---------- */
+
+static int sas_get_phy_discover(struct domain_device *dev,
+ int phy_id, struct smp_resp *disc_resp)
+{
+ int res;
+ u8 *disc_req;
+
+ disc_req = alloc_smp_req(DISCOVER_REQ_SIZE);
+ if (!disc_req)
+ return -ENOMEM;
+
+ disc_req[1] = SMP_DISCOVER;
+ disc_req[9] = phy_id;
+
+ res = smp_execute_task(dev, disc_req, DISCOVER_REQ_SIZE,
+ disc_resp, DISCOVER_RESP_SIZE);
+ if (res)
+ goto out;
+ else if (disc_resp->result != SMP_RESP_FUNC_ACC) {
+ res = disc_resp->result;
+ goto out;
+ }
+out:
+ kfree(disc_req);
+ return res;
+}
+
+static int sas_get_phy_change_count(struct domain_device *dev,
+ int phy_id, int *pcc)
+{
+ int res;
+ struct smp_resp *disc_resp;
+
+ disc_resp = alloc_smp_resp(DISCOVER_RESP_SIZE);
+ if (!disc_resp)
+ return -ENOMEM;
+
+ res = sas_get_phy_discover(dev, phy_id, disc_resp);
+ if (!res)
+ *pcc = disc_resp->disc.change_count;
+
+ kfree(disc_resp);
+ return res;
+}
+
+static int sas_get_phy_attached_dev(struct domain_device *dev, int phy_id,
+ u8 *sas_addr, enum sas_device_type *type)
+{
+ int res;
+ struct smp_resp *disc_resp;
+ struct discover_resp *dr;
+
+ disc_resp = alloc_smp_resp(DISCOVER_RESP_SIZE);
+ if (!disc_resp)
+ return -ENOMEM;
+ dr = &disc_resp->disc;
+
+ res = sas_get_phy_discover(dev, phy_id, disc_resp);
+ if (res == 0) {
+ memcpy(sas_addr, disc_resp->disc.attached_sas_addr, 8);
+ *type = to_dev_type(dr);
+ if (*type == 0)
+ memset(sas_addr, 0, 8);
+ }
+ kfree(disc_resp);
+ return res;
+}
+
+static int sas_find_bcast_phy(struct domain_device *dev, int *phy_id,
+ int from_phy, bool update)
+{
+ struct expander_device *ex = &dev->ex_dev;
+ int res = 0;
+ int i;
+
+ for (i = from_phy; i < ex->num_phys; i++) {
+ int phy_change_count = 0;
+
+ res = sas_get_phy_change_count(dev, i, &phy_change_count);
+ switch (res) {
+ case SMP_RESP_PHY_VACANT:
+ case SMP_RESP_NO_PHY:
+ continue;
+ case SMP_RESP_FUNC_ACC:
+ break;
+ default:
+ return res;
+ }
+
+ if (phy_change_count != ex->ex_phy[i].phy_change_count) {
+ if (update)
+ ex->ex_phy[i].phy_change_count =
+ phy_change_count;
+ *phy_id = i;
+ return 0;
+ }
+ }
+ return 0;
+}
+
+static int sas_get_ex_change_count(struct domain_device *dev, int *ecc)
+{
+ int res;
+ u8 *rg_req;
+ struct smp_resp *rg_resp;
+
+ rg_req = alloc_smp_req(RG_REQ_SIZE);
+ if (!rg_req)
+ return -ENOMEM;
+
+ rg_resp = alloc_smp_resp(RG_RESP_SIZE);
+ if (!rg_resp) {
+ kfree(rg_req);
+ return -ENOMEM;
+ }
+
+ rg_req[1] = SMP_REPORT_GENERAL;
+
+ res = smp_execute_task(dev, rg_req, RG_REQ_SIZE, rg_resp,
+ RG_RESP_SIZE);
+ if (res)
+ goto out;
+ if (rg_resp->result != SMP_RESP_FUNC_ACC) {
+ res = rg_resp->result;
+ goto out;
+ }
+
+ *ecc = be16_to_cpu(rg_resp->rg.change_count);
+out:
+ kfree(rg_resp);
+ kfree(rg_req);
+ return res;
+}
+/**
+ * sas_find_bcast_dev - find the device issue BROADCAST(CHANGE).
+ * @dev:domain device to be detect.
+ * @src_dev: the device which originated BROADCAST(CHANGE).
+ *
+ * Add self-configuration expander support. Suppose two expander cascading,
+ * when the first level expander is self-configuring, hotplug the disks in
+ * second level expander, BROADCAST(CHANGE) will not only be originated
+ * in the second level expander, but also be originated in the first level
+ * expander (see SAS protocol SAS 2r-14, 7.11 for detail), it is to say,
+ * expander changed count in two level expanders will all increment at least
+ * once, but the phy which chang count has changed is the source device which
+ * we concerned.
+ */
+
+static int sas_find_bcast_dev(struct domain_device *dev,
+ struct domain_device **src_dev)
+{
+ struct expander_device *ex = &dev->ex_dev;
+ int ex_change_count = -1;
+ int phy_id = -1;
+ int res;
+ struct domain_device *ch;
+
+ res = sas_get_ex_change_count(dev, &ex_change_count);
+ if (res)
+ goto out;
+ if (ex_change_count != -1 && ex_change_count != ex->ex_change_count) {
+ /* Just detect if this expander phys phy change count changed,
+ * in order to determine if this expander originate BROADCAST,
+ * and do not update phy change count field in our structure.
+ */
+ res = sas_find_bcast_phy(dev, &phy_id, 0, false);
+ if (phy_id != -1) {
+ *src_dev = dev;
+ ex->ex_change_count = ex_change_count;
+ SAS_DPRINTK("Expander phy change count has changed\n");
+ return res;
+ } else
+ SAS_DPRINTK("Expander phys DID NOT change\n");
+ }
+ list_for_each_entry(ch, &ex->children, siblings) {
+ if (ch->dev_type == SAS_EDGE_EXPANDER_DEVICE || ch->dev_type == SAS_FANOUT_EXPANDER_DEVICE) {
+ res = sas_find_bcast_dev(ch, src_dev);
+ if (*src_dev)
+ return res;
+ }
+ }
+out:
+ return res;
+}
+
+static void sas_unregister_ex_tree(struct asd_sas_port *port, struct domain_device *dev)
+{
+ struct expander_device *ex = &dev->ex_dev;
+ struct domain_device *child, *n;
+
+ list_for_each_entry_safe(child, n, &ex->children, siblings) {
+ set_bit(SAS_DEV_GONE, &child->state);
+ if (child->dev_type == SAS_EDGE_EXPANDER_DEVICE ||
+ child->dev_type == SAS_FANOUT_EXPANDER_DEVICE)
+ sas_unregister_ex_tree(port, child);
+ else
+ sas_unregister_dev(port, child);
+ }
+ sas_unregister_dev(port, dev);
+}
+
+static void sas_unregister_devs_sas_addr(struct domain_device *parent,
+ int phy_id, bool last)
+{
+ struct expander_device *ex_dev = &parent->ex_dev;
+ struct ex_phy *phy = &ex_dev->ex_phy[phy_id];
+ struct domain_device *child, *n, *found = NULL;
+ if (last) {
+ list_for_each_entry_safe(child, n,
+ &ex_dev->children, siblings) {
+ if (SAS_ADDR(child->sas_addr) ==
+ SAS_ADDR(phy->attached_sas_addr)) {
+ set_bit(SAS_DEV_GONE, &child->state);
+ if (child->dev_type == SAS_EDGE_EXPANDER_DEVICE ||
+ child->dev_type == SAS_FANOUT_EXPANDER_DEVICE)
+ sas_unregister_ex_tree(parent->port, child);
+ else
+ sas_unregister_dev(parent->port, child);
+ found = child;
+ break;
+ }
+ }
+ sas_disable_routing(parent, phy->attached_sas_addr);
+ }
+ memset(phy->attached_sas_addr, 0, SAS_ADDR_SIZE);
+ if (phy->port) {
+ sas_port_delete_phy(phy->port, phy->phy);
+ sas_device_set_phy(found, phy->port);
+ if (phy->port->num_phys == 0)
+ sas_port_delete(phy->port);
+ phy->port = NULL;
+ }
+}
+
+static int sas_discover_bfs_by_root_level(struct domain_device *root,
+ const int level)
+{
+ struct expander_device *ex_root = &root->ex_dev;
+ struct domain_device *child;
+ int res = 0;
+
+ list_for_each_entry(child, &ex_root->children, siblings) {
+ if (child->dev_type == SAS_EDGE_EXPANDER_DEVICE ||
+ child->dev_type == SAS_FANOUT_EXPANDER_DEVICE) {
+ struct sas_expander_device *ex =
+ rphy_to_expander_device(child->rphy);
+
+ if (level > ex->level)
+ res = sas_discover_bfs_by_root_level(child,
+ level);
+ else if (level == ex->level)
+ res = sas_ex_discover_devices(child, -1);
+ }
+ }
+ return res;
+}
+
+static int sas_discover_bfs_by_root(struct domain_device *dev)
+{
+ int res;
+ struct sas_expander_device *ex = rphy_to_expander_device(dev->rphy);
+ int level = ex->level+1;
+
+ res = sas_ex_discover_devices(dev, -1);
+ if (res)
+ goto out;
+ do {
+ res = sas_discover_bfs_by_root_level(dev, level);
+ mb();
+ level += 1;
+ } while (level <= dev->port->disc.max_level);
+out:
+ return res;
+}
+
+static int sas_discover_new(struct domain_device *dev, int phy_id)
+{
+ struct ex_phy *ex_phy = &dev->ex_dev.ex_phy[phy_id];
+ struct domain_device *child;
+ int res;
+
+ SAS_DPRINTK("ex %016llx phy%d new device attached\n",
+ SAS_ADDR(dev->sas_addr), phy_id);
+ res = sas_ex_phy_discover(dev, phy_id);
+ if (res)
+ return res;
+
+ if (sas_ex_join_wide_port(dev, phy_id))
+ return 0;
+
+ res = sas_ex_discover_devices(dev, phy_id);
+ if (res)
+ return res;
+ list_for_each_entry(child, &dev->ex_dev.children, siblings) {
+ if (SAS_ADDR(child->sas_addr) ==
+ SAS_ADDR(ex_phy->attached_sas_addr)) {
+ if (child->dev_type == SAS_EDGE_EXPANDER_DEVICE ||
+ child->dev_type == SAS_FANOUT_EXPANDER_DEVICE)
+ res = sas_discover_bfs_by_root(child);
+ break;
+ }
+ }
+ return res;
+}
+
+static bool dev_type_flutter(enum sas_device_type new, enum sas_device_type old)
+{
+ if (old == new)
+ return true;
+
+ /* treat device directed resets as flutter, if we went
+ * SAS_END_DEVICE to SAS_SATA_PENDING the link needs recovery
+ */
+ if ((old == SAS_SATA_PENDING && new == SAS_END_DEVICE) ||
+ (old == SAS_END_DEVICE && new == SAS_SATA_PENDING))
+ return true;
+
+ return false;
+}
+
+static int sas_rediscover_dev(struct domain_device *dev, int phy_id, bool last)
+{
+ struct expander_device *ex = &dev->ex_dev;
+ struct ex_phy *phy = &ex->ex_phy[phy_id];
+ enum sas_device_type type = SAS_PHY_UNUSED;
+ u8 sas_addr[8];
+ int res;
+
+ memset(sas_addr, 0, 8);
+ res = sas_get_phy_attached_dev(dev, phy_id, sas_addr, &type);
+ switch (res) {
+ case SMP_RESP_NO_PHY:
+ phy->phy_state = PHY_NOT_PRESENT;
+ sas_unregister_devs_sas_addr(dev, phy_id, last);
+ return res;
+ case SMP_RESP_PHY_VACANT:
+ phy->phy_state = PHY_VACANT;
+ sas_unregister_devs_sas_addr(dev, phy_id, last);
+ return res;
+ case SMP_RESP_FUNC_ACC:
+ break;
+ case -ECOMM:
+ break;
+ default:
+ return res;
+ }
+
+ if ((SAS_ADDR(sas_addr) == 0) || (res == -ECOMM)) {
+ phy->phy_state = PHY_EMPTY;
+ sas_unregister_devs_sas_addr(dev, phy_id, last);
+ return res;
+ } else if (SAS_ADDR(sas_addr) == SAS_ADDR(phy->attached_sas_addr) &&
+ dev_type_flutter(type, phy->attached_dev_type)) {
+ struct domain_device *ata_dev = sas_ex_to_ata(dev, phy_id);
+ char *action = "";
+
+ sas_ex_phy_discover(dev, phy_id);
+
+ if (ata_dev && phy->attached_dev_type == SAS_SATA_PENDING)
+ action = ", needs recovery";
+ SAS_DPRINTK("ex %016llx phy 0x%x broadcast flutter%s\n",
+ SAS_ADDR(dev->sas_addr), phy_id, action);
+ return res;
+ }
+
+ /* delete the old link */
+ if (SAS_ADDR(phy->attached_sas_addr) &&
+ SAS_ADDR(sas_addr) != SAS_ADDR(phy->attached_sas_addr)) {
+ SAS_DPRINTK("ex %016llx phy 0x%x replace %016llx\n",
+ SAS_ADDR(dev->sas_addr), phy_id,
+ SAS_ADDR(phy->attached_sas_addr));
+ sas_unregister_devs_sas_addr(dev, phy_id, last);
+ }
+
+ return sas_discover_new(dev, phy_id);
+}
+
+/**
+ * sas_rediscover - revalidate the domain.
+ * @dev:domain device to be detect.
+ * @phy_id: the phy id will be detected.
+ *
+ * NOTE: this process _must_ quit (return) as soon as any connection
+ * errors are encountered. Connection recovery is done elsewhere.
+ * Discover process only interrogates devices in order to discover the
+ * domain.For plugging out, we un-register the device only when it is
+ * the last phy in the port, for other phys in this port, we just delete it
+ * from the port.For inserting, we do discovery when it is the
+ * first phy,for other phys in this port, we add it to the port to
+ * forming the wide-port.
+ */
+static int sas_rediscover(struct domain_device *dev, const int phy_id)
+{
+ struct expander_device *ex = &dev->ex_dev;
+ struct ex_phy *changed_phy = &ex->ex_phy[phy_id];
+ int res = 0;
+ int i;
+ bool last = true; /* is this the last phy of the port */
+
+ SAS_DPRINTK("ex %016llx phy%d originated BROADCAST(CHANGE)\n",
+ SAS_ADDR(dev->sas_addr), phy_id);
+
+ if (SAS_ADDR(changed_phy->attached_sas_addr) != 0) {
+ for (i = 0; i < ex->num_phys; i++) {
+ struct ex_phy *phy = &ex->ex_phy[i];
+
+ if (i == phy_id)
+ continue;
+ if (SAS_ADDR(phy->attached_sas_addr) ==
+ SAS_ADDR(changed_phy->attached_sas_addr)) {
+ SAS_DPRINTK("phy%d part of wide port with "
+ "phy%d\n", phy_id, i);
+ last = false;
+ break;
+ }
+ }
+ res = sas_rediscover_dev(dev, phy_id, last);
+ } else
+ res = sas_discover_new(dev, phy_id);
+ return res;
+}
+
+/**
+ * sas_revalidate_domain -- revalidate the domain
+ * @port: port to the domain of interest
+ *
+ * NOTE: this process _must_ quit (return) as soon as any connection
+ * errors are encountered. Connection recovery is done elsewhere.
+ * Discover process only interrogates devices in order to discover the
+ * domain.
+ */
+int sas_ex_revalidate_domain(struct domain_device *port_dev)
+{
+ int res;
+ struct domain_device *dev = NULL;
+
+ res = sas_find_bcast_dev(port_dev, &dev);
+ while (res == 0 && dev) {
+ struct expander_device *ex = &dev->ex_dev;
+ int i = 0, phy_id;
+
+ do {
+ phy_id = -1;
+ res = sas_find_bcast_phy(dev, &phy_id, i, true);
+ if (phy_id == -1)
+ break;
+ res = sas_rediscover(dev, phy_id);
+ i = phy_id + 1;
+ } while (i < ex->num_phys);
+
+ dev = NULL;
+ res = sas_find_bcast_dev(port_dev, &dev);
+ }
+ return res;
+}
+
+int sas_smp_handler(struct Scsi_Host *shost, struct sas_rphy *rphy,
+ struct request *req)
+{
+ struct domain_device *dev;
+ int ret, type;
+ struct request *rsp = req->next_rq;
+
+ if (!rsp) {
+ printk("%s: space for a smp response is missing\n",
+ __func__);
+ return -EINVAL;
+ }
+
+ /* no rphy means no smp target support (ie aic94xx host) */
+ if (!rphy)
+ return sas_smp_host_handler(shost, req, rsp);
+
+ type = rphy->identify.device_type;
+
+ if (type != SAS_EDGE_EXPANDER_DEVICE &&
+ type != SAS_FANOUT_EXPANDER_DEVICE) {
+ printk("%s: can we send a smp request to a device?\n",
+ __func__);
+ return -EINVAL;
+ }
+
+ dev = sas_find_dev_by_rphy(rphy);
+ if (!dev) {
+ printk("%s: fail to find a domain_device?\n", __func__);
+ return -EINVAL;
+ }
+
+ /* do we need to support multiple segments? */
+ if (bio_multiple_segments(req->bio) ||
+ bio_multiple_segments(rsp->bio)) {
+ printk("%s: multiple segments req %u, rsp %u\n",
+ __func__, blk_rq_bytes(req), blk_rq_bytes(rsp));
+ return -EINVAL;
+ }
+
+ ret = smp_execute_task(dev, bio_data(req->bio), blk_rq_bytes(req),
+ bio_data(rsp->bio), blk_rq_bytes(rsp));
+ if (ret > 0) {
+ /* positive number is the untransferred residual */
+ rsp->resid_len = ret;
+ req->resid_len = 0;
+ ret = 0;
+ } else if (ret == 0) {
+ rsp->resid_len = 0;
+ req->resid_len = 0;
+ }
+
+ return ret;
+}
diff --git a/drivers/scsi/libsas/sas_host_smp.c b/drivers/scsi/libsas/sas_host_smp.c
new file mode 100644
index 000000000..d24792575
--- /dev/null
+++ b/drivers/scsi/libsas/sas_host_smp.c
@@ -0,0 +1,383 @@
+/*
+ * Serial Attached SCSI (SAS) Expander discovery and configuration
+ *
+ * Copyright (C) 2007 James E.J. Bottomley
+ * <James.Bottomley@HansenPartnership.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; version 2 only.
+ */
+#include <linux/scatterlist.h>
+#include <linux/blkdev.h>
+#include <linux/slab.h>
+#include <linux/export.h>
+
+#include "sas_internal.h"
+
+#include <scsi/scsi_transport.h>
+#include <scsi/scsi_transport_sas.h>
+#include "../scsi_sas_internal.h"
+
+static void sas_host_smp_discover(struct sas_ha_struct *sas_ha, u8 *resp_data,
+ u8 phy_id)
+{
+ struct sas_phy *phy;
+ struct sas_rphy *rphy;
+
+ if (phy_id >= sas_ha->num_phys) {
+ resp_data[2] = SMP_RESP_NO_PHY;
+ return;
+ }
+ resp_data[2] = SMP_RESP_FUNC_ACC;
+
+ phy = sas_ha->sas_phy[phy_id]->phy;
+ resp_data[9] = phy_id;
+ resp_data[13] = phy->negotiated_linkrate;
+ memcpy(resp_data + 16, sas_ha->sas_addr, SAS_ADDR_SIZE);
+ memcpy(resp_data + 24, sas_ha->sas_phy[phy_id]->attached_sas_addr,
+ SAS_ADDR_SIZE);
+ resp_data[40] = (phy->minimum_linkrate << 4) |
+ phy->minimum_linkrate_hw;
+ resp_data[41] = (phy->maximum_linkrate << 4) |
+ phy->maximum_linkrate_hw;
+
+ if (!sas_ha->sas_phy[phy_id]->port ||
+ !sas_ha->sas_phy[phy_id]->port->port_dev)
+ return;
+
+ rphy = sas_ha->sas_phy[phy_id]->port->port_dev->rphy;
+ resp_data[12] = rphy->identify.device_type << 4;
+ resp_data[14] = rphy->identify.initiator_port_protocols;
+ resp_data[15] = rphy->identify.target_port_protocols;
+}
+
+/**
+ * to_sas_gpio_gp_bit - given the gpio frame data find the byte/bit position of 'od'
+ * @od: od bit to find
+ * @data: incoming bitstream (from frame)
+ * @index: requested data register index (from frame)
+ * @count: total number of registers in the bitstream (from frame)
+ * @bit: bit position of 'od' in the returned byte
+ *
+ * returns NULL if 'od' is not in 'data'
+ *
+ * From SFF-8485 v0.7:
+ * "In GPIO_TX[1], bit 0 of byte 3 contains the first bit (i.e., OD0.0)
+ * and bit 7 of byte 0 contains the 32nd bit (i.e., OD10.1).
+ *
+ * In GPIO_TX[2], bit 0 of byte 3 contains the 33rd bit (i.e., OD10.2)
+ * and bit 7 of byte 0 contains the 64th bit (i.e., OD21.0)."
+ *
+ * The general-purpose (raw-bitstream) RX registers have the same layout
+ * although 'od' is renamed 'id' for 'input data'.
+ *
+ * SFF-8489 defines the behavior of the LEDs in response to the 'od' values.
+ */
+static u8 *to_sas_gpio_gp_bit(unsigned int od, u8 *data, u8 index, u8 count, u8 *bit)
+{
+ unsigned int reg;
+ u8 byte;
+
+ /* gp registers start at index 1 */
+ if (index == 0)
+ return NULL;
+
+ index--; /* make index 0-based */
+ if (od < index * 32)
+ return NULL;
+
+ od -= index * 32;
+ reg = od >> 5;
+
+ if (reg >= count)
+ return NULL;
+
+ od &= (1 << 5) - 1;
+ byte = 3 - (od >> 3);
+ *bit = od & ((1 << 3) - 1);
+
+ return &data[reg * 4 + byte];
+}
+
+int try_test_sas_gpio_gp_bit(unsigned int od, u8 *data, u8 index, u8 count)
+{
+ u8 *byte;
+ u8 bit;
+
+ byte = to_sas_gpio_gp_bit(od, data, index, count, &bit);
+ if (!byte)
+ return -1;
+
+ return (*byte >> bit) & 1;
+}
+EXPORT_SYMBOL(try_test_sas_gpio_gp_bit);
+
+static int sas_host_smp_write_gpio(struct sas_ha_struct *sas_ha, u8 *resp_data,
+ u8 reg_type, u8 reg_index, u8 reg_count,
+ u8 *req_data)
+{
+ struct sas_internal *i = to_sas_internal(sas_ha->core.shost->transportt);
+ int written;
+
+ if (i->dft->lldd_write_gpio == NULL) {
+ resp_data[2] = SMP_RESP_FUNC_UNK;
+ return 0;
+ }
+
+ written = i->dft->lldd_write_gpio(sas_ha, reg_type, reg_index,
+ reg_count, req_data);
+
+ if (written < 0) {
+ resp_data[2] = SMP_RESP_FUNC_FAILED;
+ written = 0;
+ } else
+ resp_data[2] = SMP_RESP_FUNC_ACC;
+
+ return written;
+}
+
+static void sas_report_phy_sata(struct sas_ha_struct *sas_ha, u8 *resp_data,
+ u8 phy_id)
+{
+ struct sas_rphy *rphy;
+ struct dev_to_host_fis *fis;
+ int i;
+
+ if (phy_id >= sas_ha->num_phys) {
+ resp_data[2] = SMP_RESP_NO_PHY;
+ return;
+ }
+
+ resp_data[2] = SMP_RESP_PHY_NO_SATA;
+
+ if (!sas_ha->sas_phy[phy_id]->port)
+ return;
+
+ rphy = sas_ha->sas_phy[phy_id]->port->port_dev->rphy;
+ fis = (struct dev_to_host_fis *)
+ sas_ha->sas_phy[phy_id]->port->port_dev->frame_rcvd;
+ if (rphy->identify.target_port_protocols != SAS_PROTOCOL_SATA)
+ return;
+
+ resp_data[2] = SMP_RESP_FUNC_ACC;
+ resp_data[9] = phy_id;
+ memcpy(resp_data + 16, sas_ha->sas_phy[phy_id]->attached_sas_addr,
+ SAS_ADDR_SIZE);
+
+ /* check to see if we have a valid d2h fis */
+ if (fis->fis_type != 0x34)
+ return;
+
+ /* the d2h fis is required by the standard to be in LE format */
+ for (i = 0; i < 20; i += 4) {
+ u8 *dst = resp_data + 24 + i, *src =
+ &sas_ha->sas_phy[phy_id]->port->port_dev->frame_rcvd[i];
+ dst[0] = src[3];
+ dst[1] = src[2];
+ dst[2] = src[1];
+ dst[3] = src[0];
+ }
+}
+
+static void sas_phy_control(struct sas_ha_struct *sas_ha, u8 phy_id,
+ u8 phy_op, enum sas_linkrate min,
+ enum sas_linkrate max, u8 *resp_data)
+{
+ struct sas_internal *i =
+ to_sas_internal(sas_ha->core.shost->transportt);
+ struct sas_phy_linkrates rates;
+ struct asd_sas_phy *asd_phy;
+
+ if (phy_id >= sas_ha->num_phys) {
+ resp_data[2] = SMP_RESP_NO_PHY;
+ return;
+ }
+
+ asd_phy = sas_ha->sas_phy[phy_id];
+ switch (phy_op) {
+ case PHY_FUNC_NOP:
+ case PHY_FUNC_LINK_RESET:
+ case PHY_FUNC_HARD_RESET:
+ case PHY_FUNC_DISABLE:
+ case PHY_FUNC_CLEAR_ERROR_LOG:
+ case PHY_FUNC_CLEAR_AFFIL:
+ case PHY_FUNC_TX_SATA_PS_SIGNAL:
+ break;
+
+ default:
+ resp_data[2] = SMP_RESP_PHY_UNK_OP;
+ return;
+ }
+
+ rates.minimum_linkrate = min;
+ rates.maximum_linkrate = max;
+
+ /* filter reset requests through libata eh */
+ if (phy_op == PHY_FUNC_LINK_RESET && sas_try_ata_reset(asd_phy) == 0) {
+ resp_data[2] = SMP_RESP_FUNC_ACC;
+ return;
+ }
+
+ if (i->dft->lldd_control_phy(asd_phy, phy_op, &rates))
+ resp_data[2] = SMP_RESP_FUNC_FAILED;
+ else
+ resp_data[2] = SMP_RESP_FUNC_ACC;
+}
+
+int sas_smp_host_handler(struct Scsi_Host *shost, struct request *req,
+ struct request *rsp)
+{
+ u8 *req_data = NULL, *resp_data = NULL, *buf;
+ struct sas_ha_struct *sas_ha = SHOST_TO_SAS_HA(shost);
+ int error = -EINVAL;
+
+ /* eight is the minimum size for request and response frames */
+ if (blk_rq_bytes(req) < 8 || blk_rq_bytes(rsp) < 8)
+ goto out;
+
+ if (bio_offset(req->bio) + blk_rq_bytes(req) > PAGE_SIZE ||
+ bio_offset(rsp->bio) + blk_rq_bytes(rsp) > PAGE_SIZE) {
+ shost_printk(KERN_ERR, shost,
+ "SMP request/response frame crosses page boundary");
+ goto out;
+ }
+
+ req_data = kzalloc(blk_rq_bytes(req), GFP_KERNEL);
+
+ /* make sure frame can always be built ... we copy
+ * back only the requested length */
+ resp_data = kzalloc(max(blk_rq_bytes(rsp), 128U), GFP_KERNEL);
+
+ if (!req_data || !resp_data) {
+ error = -ENOMEM;
+ goto out;
+ }
+
+ local_irq_disable();
+ buf = kmap_atomic(bio_page(req->bio));
+ memcpy(req_data, buf, blk_rq_bytes(req));
+ kunmap_atomic(buf - bio_offset(req->bio));
+ local_irq_enable();
+
+ if (req_data[0] != SMP_REQUEST)
+ goto out;
+
+ /* always succeeds ... even if we can't process the request
+ * the result is in the response frame */
+ error = 0;
+
+ /* set up default don't know response */
+ resp_data[0] = SMP_RESPONSE;
+ resp_data[1] = req_data[1];
+ resp_data[2] = SMP_RESP_FUNC_UNK;
+
+ switch (req_data[1]) {
+ case SMP_REPORT_GENERAL:
+ req->resid_len -= 8;
+ rsp->resid_len -= 32;
+ resp_data[2] = SMP_RESP_FUNC_ACC;
+ resp_data[9] = sas_ha->num_phys;
+ break;
+
+ case SMP_REPORT_MANUF_INFO:
+ req->resid_len -= 8;
+ rsp->resid_len -= 64;
+ resp_data[2] = SMP_RESP_FUNC_ACC;
+ memcpy(resp_data + 12, shost->hostt->name,
+ SAS_EXPANDER_VENDOR_ID_LEN);
+ memcpy(resp_data + 20, "libsas virt phy",
+ SAS_EXPANDER_PRODUCT_ID_LEN);
+ break;
+
+ case SMP_READ_GPIO_REG:
+ /* FIXME: need GPIO support in the transport class */
+ break;
+
+ case SMP_DISCOVER:
+ req->resid_len -= 16;
+ if ((int)req->resid_len < 0) {
+ req->resid_len = 0;
+ error = -EINVAL;
+ goto out;
+ }
+ rsp->resid_len -= 56;
+ sas_host_smp_discover(sas_ha, resp_data, req_data[9]);
+ break;
+
+ case SMP_REPORT_PHY_ERR_LOG:
+ /* FIXME: could implement this with additional
+ * libsas callbacks providing the HW supports it */
+ break;
+
+ case SMP_REPORT_PHY_SATA:
+ req->resid_len -= 16;
+ if ((int)req->resid_len < 0) {
+ req->resid_len = 0;
+ error = -EINVAL;
+ goto out;
+ }
+ rsp->resid_len -= 60;
+ sas_report_phy_sata(sas_ha, resp_data, req_data[9]);
+ break;
+
+ case SMP_REPORT_ROUTE_INFO:
+ /* Can't implement; hosts have no routes */
+ break;
+
+ case SMP_WRITE_GPIO_REG: {
+ /* SFF-8485 v0.7 */
+ const int base_frame_size = 11;
+ int to_write = req_data[4];
+
+ if (blk_rq_bytes(req) < base_frame_size + to_write * 4 ||
+ req->resid_len < base_frame_size + to_write * 4) {
+ resp_data[2] = SMP_RESP_INV_FRM_LEN;
+ break;
+ }
+
+ to_write = sas_host_smp_write_gpio(sas_ha, resp_data, req_data[2],
+ req_data[3], to_write, &req_data[8]);
+ req->resid_len -= base_frame_size + to_write * 4;
+ rsp->resid_len -= 8;
+ break;
+ }
+
+ case SMP_CONF_ROUTE_INFO:
+ /* Can't implement; hosts have no routes */
+ break;
+
+ case SMP_PHY_CONTROL:
+ req->resid_len -= 44;
+ if ((int)req->resid_len < 0) {
+ req->resid_len = 0;
+ error = -EINVAL;
+ goto out;
+ }
+ rsp->resid_len -= 8;
+ sas_phy_control(sas_ha, req_data[9], req_data[10],
+ req_data[32] >> 4, req_data[33] >> 4,
+ resp_data);
+ break;
+
+ case SMP_PHY_TEST_FUNCTION:
+ /* FIXME: should this be implemented? */
+ break;
+
+ default:
+ /* probably a 2.0 function */
+ break;
+ }
+
+ local_irq_disable();
+ buf = kmap_atomic(bio_page(rsp->bio));
+ memcpy(buf, resp_data, blk_rq_bytes(rsp));
+ flush_kernel_dcache_page(bio_page(rsp->bio));
+ kunmap_atomic(buf - bio_offset(rsp->bio));
+ local_irq_enable();
+
+ out:
+ kfree(req_data);
+ kfree(resp_data);
+ return error;
+}
diff --git a/drivers/scsi/libsas/sas_init.c b/drivers/scsi/libsas/sas_init.c
new file mode 100644
index 000000000..362da44f2
--- /dev/null
+++ b/drivers/scsi/libsas/sas_init.c
@@ -0,0 +1,601 @@
+/*
+ * Serial Attached SCSI (SAS) Transport Layer initialization
+ *
+ * Copyright (C) 2005 Adaptec, Inc. All rights reserved.
+ * Copyright (C) 2005 Luben Tuikov <luben_tuikov@adaptec.com>
+ *
+ * This file is licensed under GPLv2.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of the
+ * License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307
+ * USA
+ *
+ */
+
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/init.h>
+#include <linux/device.h>
+#include <linux/spinlock.h>
+#include <scsi/sas_ata.h>
+#include <scsi/scsi_host.h>
+#include <scsi/scsi_device.h>
+#include <scsi/scsi_transport.h>
+#include <scsi/scsi_transport_sas.h>
+
+#include "sas_internal.h"
+
+#include "../scsi_sas_internal.h"
+
+static struct kmem_cache *sas_task_cache;
+
+struct sas_task *sas_alloc_task(gfp_t flags)
+{
+ struct sas_task *task = kmem_cache_zalloc(sas_task_cache, flags);
+
+ if (task) {
+ spin_lock_init(&task->task_state_lock);
+ task->task_state_flags = SAS_TASK_STATE_PENDING;
+ }
+
+ return task;
+}
+EXPORT_SYMBOL_GPL(sas_alloc_task);
+
+struct sas_task *sas_alloc_slow_task(gfp_t flags)
+{
+ struct sas_task *task = sas_alloc_task(flags);
+ struct sas_task_slow *slow = kmalloc(sizeof(*slow), flags);
+
+ if (!task || !slow) {
+ if (task)
+ kmem_cache_free(sas_task_cache, task);
+ kfree(slow);
+ return NULL;
+ }
+
+ task->slow_task = slow;
+ init_timer(&slow->timer);
+ init_completion(&slow->completion);
+
+ return task;
+}
+EXPORT_SYMBOL_GPL(sas_alloc_slow_task);
+
+void sas_free_task(struct sas_task *task)
+{
+ if (task) {
+ kfree(task->slow_task);
+ kmem_cache_free(sas_task_cache, task);
+ }
+}
+EXPORT_SYMBOL_GPL(sas_free_task);
+
+/*------------ SAS addr hash -----------*/
+void sas_hash_addr(u8 *hashed, const u8 *sas_addr)
+{
+ const u32 poly = 0x00DB2777;
+ u32 r = 0;
+ int i;
+
+ for (i = 0; i < 8; i++) {
+ int b;
+ for (b = 7; b >= 0; b--) {
+ r <<= 1;
+ if ((1 << b) & sas_addr[i]) {
+ if (!(r & 0x01000000))
+ r ^= poly;
+ } else if (r & 0x01000000)
+ r ^= poly;
+ }
+ }
+
+ hashed[0] = (r >> 16) & 0xFF;
+ hashed[1] = (r >> 8) & 0xFF ;
+ hashed[2] = r & 0xFF;
+}
+
+
+/* ---------- HA events ---------- */
+
+void sas_hae_reset(struct work_struct *work)
+{
+ struct sas_ha_event *ev = to_sas_ha_event(work);
+ struct sas_ha_struct *ha = ev->ha;
+
+ clear_bit(HAE_RESET, &ha->pending);
+}
+
+int sas_register_ha(struct sas_ha_struct *sas_ha)
+{
+ int error = 0;
+
+ mutex_init(&sas_ha->disco_mutex);
+ spin_lock_init(&sas_ha->phy_port_lock);
+ sas_hash_addr(sas_ha->hashed_sas_addr, sas_ha->sas_addr);
+
+ set_bit(SAS_HA_REGISTERED, &sas_ha->state);
+ spin_lock_init(&sas_ha->lock);
+ mutex_init(&sas_ha->drain_mutex);
+ init_waitqueue_head(&sas_ha->eh_wait_q);
+ INIT_LIST_HEAD(&sas_ha->defer_q);
+ INIT_LIST_HEAD(&sas_ha->eh_dev_q);
+
+ error = sas_register_phys(sas_ha);
+ if (error) {
+ printk(KERN_NOTICE "couldn't register sas phys:%d\n", error);
+ return error;
+ }
+
+ error = sas_register_ports(sas_ha);
+ if (error) {
+ printk(KERN_NOTICE "couldn't register sas ports:%d\n", error);
+ goto Undo_phys;
+ }
+
+ error = sas_init_events(sas_ha);
+ if (error) {
+ printk(KERN_NOTICE "couldn't start event thread:%d\n", error);
+ goto Undo_ports;
+ }
+
+ INIT_LIST_HEAD(&sas_ha->eh_done_q);
+ INIT_LIST_HEAD(&sas_ha->eh_ata_q);
+
+ return 0;
+
+Undo_ports:
+ sas_unregister_ports(sas_ha);
+Undo_phys:
+
+ return error;
+}
+
+static void sas_disable_events(struct sas_ha_struct *sas_ha)
+{
+ /* Set the state to unregistered to avoid further unchained
+ * events to be queued, and flush any in-progress drainers
+ */
+ mutex_lock(&sas_ha->drain_mutex);
+ spin_lock_irq(&sas_ha->lock);
+ clear_bit(SAS_HA_REGISTERED, &sas_ha->state);
+ spin_unlock_irq(&sas_ha->lock);
+ __sas_drain_work(sas_ha);
+ mutex_unlock(&sas_ha->drain_mutex);
+}
+
+int sas_unregister_ha(struct sas_ha_struct *sas_ha)
+{
+ sas_disable_events(sas_ha);
+ sas_unregister_ports(sas_ha);
+
+ /* flush unregistration work */
+ mutex_lock(&sas_ha->drain_mutex);
+ __sas_drain_work(sas_ha);
+ mutex_unlock(&sas_ha->drain_mutex);
+
+ return 0;
+}
+
+static int sas_get_linkerrors(struct sas_phy *phy)
+{
+ if (scsi_is_sas_phy_local(phy)) {
+ struct Scsi_Host *shost = dev_to_shost(phy->dev.parent);
+ struct sas_ha_struct *sas_ha = SHOST_TO_SAS_HA(shost);
+ struct asd_sas_phy *asd_phy = sas_ha->sas_phy[phy->number];
+ struct sas_internal *i =
+ to_sas_internal(sas_ha->core.shost->transportt);
+
+ return i->dft->lldd_control_phy(asd_phy, PHY_FUNC_GET_EVENTS, NULL);
+ }
+
+ return sas_smp_get_phy_events(phy);
+}
+
+int sas_try_ata_reset(struct asd_sas_phy *asd_phy)
+{
+ struct domain_device *dev = NULL;
+
+ /* try to route user requested link resets through libata */
+ if (asd_phy->port)
+ dev = asd_phy->port->port_dev;
+
+ /* validate that dev has been probed */
+ if (dev)
+ dev = sas_find_dev_by_rphy(dev->rphy);
+
+ if (dev && dev_is_sata(dev)) {
+ sas_ata_schedule_reset(dev);
+ sas_ata_wait_eh(dev);
+ return 0;
+ }
+
+ return -ENODEV;
+}
+
+/**
+ * transport_sas_phy_reset - reset a phy and permit libata to manage the link
+ *
+ * phy reset request via sysfs in host workqueue context so we know we
+ * can block on eh and safely traverse the domain_device topology
+ */
+static int transport_sas_phy_reset(struct sas_phy *phy, int hard_reset)
+{
+ enum phy_func reset_type;
+
+ if (hard_reset)
+ reset_type = PHY_FUNC_HARD_RESET;
+ else
+ reset_type = PHY_FUNC_LINK_RESET;
+
+ if (scsi_is_sas_phy_local(phy)) {
+ struct Scsi_Host *shost = dev_to_shost(phy->dev.parent);
+ struct sas_ha_struct *sas_ha = SHOST_TO_SAS_HA(shost);
+ struct asd_sas_phy *asd_phy = sas_ha->sas_phy[phy->number];
+ struct sas_internal *i =
+ to_sas_internal(sas_ha->core.shost->transportt);
+
+ if (!hard_reset && sas_try_ata_reset(asd_phy) == 0)
+ return 0;
+ return i->dft->lldd_control_phy(asd_phy, reset_type, NULL);
+ } else {
+ struct sas_rphy *rphy = dev_to_rphy(phy->dev.parent);
+ struct domain_device *ddev = sas_find_dev_by_rphy(rphy);
+ struct domain_device *ata_dev = sas_ex_to_ata(ddev, phy->number);
+
+ if (ata_dev && !hard_reset) {
+ sas_ata_schedule_reset(ata_dev);
+ sas_ata_wait_eh(ata_dev);
+ return 0;
+ } else
+ return sas_smp_phy_control(ddev, phy->number, reset_type, NULL);
+ }
+}
+
+static int sas_phy_enable(struct sas_phy *phy, int enable)
+{
+ int ret;
+ enum phy_func cmd;
+
+ if (enable)
+ cmd = PHY_FUNC_LINK_RESET;
+ else
+ cmd = PHY_FUNC_DISABLE;
+
+ if (scsi_is_sas_phy_local(phy)) {
+ struct Scsi_Host *shost = dev_to_shost(phy->dev.parent);
+ struct sas_ha_struct *sas_ha = SHOST_TO_SAS_HA(shost);
+ struct asd_sas_phy *asd_phy = sas_ha->sas_phy[phy->number];
+ struct sas_internal *i =
+ to_sas_internal(sas_ha->core.shost->transportt);
+
+ if (enable)
+ ret = transport_sas_phy_reset(phy, 0);
+ else
+ ret = i->dft->lldd_control_phy(asd_phy, cmd, NULL);
+ } else {
+ struct sas_rphy *rphy = dev_to_rphy(phy->dev.parent);
+ struct domain_device *ddev = sas_find_dev_by_rphy(rphy);
+
+ if (enable)
+ ret = transport_sas_phy_reset(phy, 0);
+ else
+ ret = sas_smp_phy_control(ddev, phy->number, cmd, NULL);
+ }
+ return ret;
+}
+
+int sas_phy_reset(struct sas_phy *phy, int hard_reset)
+{
+ int ret;
+ enum phy_func reset_type;
+
+ if (!phy->enabled)
+ return -ENODEV;
+
+ if (hard_reset)
+ reset_type = PHY_FUNC_HARD_RESET;
+ else
+ reset_type = PHY_FUNC_LINK_RESET;
+
+ if (scsi_is_sas_phy_local(phy)) {
+ struct Scsi_Host *shost = dev_to_shost(phy->dev.parent);
+ struct sas_ha_struct *sas_ha = SHOST_TO_SAS_HA(shost);
+ struct asd_sas_phy *asd_phy = sas_ha->sas_phy[phy->number];
+ struct sas_internal *i =
+ to_sas_internal(sas_ha->core.shost->transportt);
+
+ ret = i->dft->lldd_control_phy(asd_phy, reset_type, NULL);
+ } else {
+ struct sas_rphy *rphy = dev_to_rphy(phy->dev.parent);
+ struct domain_device *ddev = sas_find_dev_by_rphy(rphy);
+ ret = sas_smp_phy_control(ddev, phy->number, reset_type, NULL);
+ }
+ return ret;
+}
+
+int sas_set_phy_speed(struct sas_phy *phy,
+ struct sas_phy_linkrates *rates)
+{
+ int ret;
+
+ if ((rates->minimum_linkrate &&
+ rates->minimum_linkrate > phy->maximum_linkrate) ||
+ (rates->maximum_linkrate &&
+ rates->maximum_linkrate < phy->minimum_linkrate))
+ return -EINVAL;
+
+ if (rates->minimum_linkrate &&
+ rates->minimum_linkrate < phy->minimum_linkrate_hw)
+ rates->minimum_linkrate = phy->minimum_linkrate_hw;
+
+ if (rates->maximum_linkrate &&
+ rates->maximum_linkrate > phy->maximum_linkrate_hw)
+ rates->maximum_linkrate = phy->maximum_linkrate_hw;
+
+ if (scsi_is_sas_phy_local(phy)) {
+ struct Scsi_Host *shost = dev_to_shost(phy->dev.parent);
+ struct sas_ha_struct *sas_ha = SHOST_TO_SAS_HA(shost);
+ struct asd_sas_phy *asd_phy = sas_ha->sas_phy[phy->number];
+ struct sas_internal *i =
+ to_sas_internal(sas_ha->core.shost->transportt);
+
+ ret = i->dft->lldd_control_phy(asd_phy, PHY_FUNC_SET_LINK_RATE,
+ rates);
+ } else {
+ struct sas_rphy *rphy = dev_to_rphy(phy->dev.parent);
+ struct domain_device *ddev = sas_find_dev_by_rphy(rphy);
+ ret = sas_smp_phy_control(ddev, phy->number,
+ PHY_FUNC_LINK_RESET, rates);
+
+ }
+
+ return ret;
+}
+
+void sas_prep_resume_ha(struct sas_ha_struct *ha)
+{
+ int i;
+
+ set_bit(SAS_HA_REGISTERED, &ha->state);
+
+ /* clear out any stale link events/data from the suspension path */
+ for (i = 0; i < ha->num_phys; i++) {
+ struct asd_sas_phy *phy = ha->sas_phy[i];
+
+ memset(phy->attached_sas_addr, 0, SAS_ADDR_SIZE);
+ phy->port_events_pending = 0;
+ phy->phy_events_pending = 0;
+ phy->frame_rcvd_size = 0;
+ }
+}
+EXPORT_SYMBOL(sas_prep_resume_ha);
+
+static int phys_suspended(struct sas_ha_struct *ha)
+{
+ int i, rc = 0;
+
+ for (i = 0; i < ha->num_phys; i++) {
+ struct asd_sas_phy *phy = ha->sas_phy[i];
+
+ if (phy->suspended)
+ rc++;
+ }
+
+ return rc;
+}
+
+void sas_resume_ha(struct sas_ha_struct *ha)
+{
+ const unsigned long tmo = msecs_to_jiffies(25000);
+ int i;
+
+ /* deform ports on phys that did not resume
+ * at this point we may be racing the phy coming back (as posted
+ * by the lldd). So we post the event and once we are in the
+ * libsas context check that the phy remains suspended before
+ * tearing it down.
+ */
+ i = phys_suspended(ha);
+ if (i)
+ dev_info(ha->dev, "waiting up to 25 seconds for %d phy%s to resume\n",
+ i, i > 1 ? "s" : "");
+ wait_event_timeout(ha->eh_wait_q, phys_suspended(ha) == 0, tmo);
+ for (i = 0; i < ha->num_phys; i++) {
+ struct asd_sas_phy *phy = ha->sas_phy[i];
+
+ if (phy->suspended) {
+ dev_warn(&phy->phy->dev, "resume timeout\n");
+ sas_notify_phy_event(phy, PHYE_RESUME_TIMEOUT);
+ }
+ }
+
+ /* all phys are back up or timed out, turn on i/o so we can
+ * flush out disks that did not return
+ */
+ scsi_unblock_requests(ha->core.shost);
+ sas_drain_work(ha);
+}
+EXPORT_SYMBOL(sas_resume_ha);
+
+void sas_suspend_ha(struct sas_ha_struct *ha)
+{
+ int i;
+
+ sas_disable_events(ha);
+ scsi_block_requests(ha->core.shost);
+ for (i = 0; i < ha->num_phys; i++) {
+ struct asd_sas_port *port = ha->sas_port[i];
+
+ sas_discover_event(port, DISCE_SUSPEND);
+ }
+
+ /* flush suspend events while unregistered */
+ mutex_lock(&ha->drain_mutex);
+ __sas_drain_work(ha);
+ mutex_unlock(&ha->drain_mutex);
+}
+EXPORT_SYMBOL(sas_suspend_ha);
+
+static void sas_phy_release(struct sas_phy *phy)
+{
+ kfree(phy->hostdata);
+ phy->hostdata = NULL;
+}
+
+static void phy_reset_work(struct work_struct *work)
+{
+ struct sas_phy_data *d = container_of(work, typeof(*d), reset_work.work);
+
+ d->reset_result = transport_sas_phy_reset(d->phy, d->hard_reset);
+}
+
+static void phy_enable_work(struct work_struct *work)
+{
+ struct sas_phy_data *d = container_of(work, typeof(*d), enable_work.work);
+
+ d->enable_result = sas_phy_enable(d->phy, d->enable);
+}
+
+static int sas_phy_setup(struct sas_phy *phy)
+{
+ struct sas_phy_data *d = kzalloc(sizeof(*d), GFP_KERNEL);
+
+ if (!d)
+ return -ENOMEM;
+
+ mutex_init(&d->event_lock);
+ INIT_SAS_WORK(&d->reset_work, phy_reset_work);
+ INIT_SAS_WORK(&d->enable_work, phy_enable_work);
+ d->phy = phy;
+ phy->hostdata = d;
+
+ return 0;
+}
+
+static int queue_phy_reset(struct sas_phy *phy, int hard_reset)
+{
+ struct Scsi_Host *shost = dev_to_shost(phy->dev.parent);
+ struct sas_ha_struct *ha = SHOST_TO_SAS_HA(shost);
+ struct sas_phy_data *d = phy->hostdata;
+ int rc;
+
+ if (!d)
+ return -ENOMEM;
+
+ /* libsas workqueue coordinates ata-eh reset with discovery */
+ mutex_lock(&d->event_lock);
+ d->reset_result = 0;
+ d->hard_reset = hard_reset;
+
+ spin_lock_irq(&ha->lock);
+ sas_queue_work(ha, &d->reset_work);
+ spin_unlock_irq(&ha->lock);
+
+ rc = sas_drain_work(ha);
+ if (rc == 0)
+ rc = d->reset_result;
+ mutex_unlock(&d->event_lock);
+
+ return rc;
+}
+
+static int queue_phy_enable(struct sas_phy *phy, int enable)
+{
+ struct Scsi_Host *shost = dev_to_shost(phy->dev.parent);
+ struct sas_ha_struct *ha = SHOST_TO_SAS_HA(shost);
+ struct sas_phy_data *d = phy->hostdata;
+ int rc;
+
+ if (!d)
+ return -ENOMEM;
+
+ /* libsas workqueue coordinates ata-eh reset with discovery */
+ mutex_lock(&d->event_lock);
+ d->enable_result = 0;
+ d->enable = enable;
+
+ spin_lock_irq(&ha->lock);
+ sas_queue_work(ha, &d->enable_work);
+ spin_unlock_irq(&ha->lock);
+
+ rc = sas_drain_work(ha);
+ if (rc == 0)
+ rc = d->enable_result;
+ mutex_unlock(&d->event_lock);
+
+ return rc;
+}
+
+static struct sas_function_template sft = {
+ .phy_enable = queue_phy_enable,
+ .phy_reset = queue_phy_reset,
+ .phy_setup = sas_phy_setup,
+ .phy_release = sas_phy_release,
+ .set_phy_speed = sas_set_phy_speed,
+ .get_linkerrors = sas_get_linkerrors,
+ .smp_handler = sas_smp_handler,
+};
+
+struct scsi_transport_template *
+sas_domain_attach_transport(struct sas_domain_function_template *dft)
+{
+ struct scsi_transport_template *stt = sas_attach_transport(&sft);
+ struct sas_internal *i;
+
+ if (!stt)
+ return stt;
+
+ i = to_sas_internal(stt);
+ i->dft = dft;
+ stt->create_work_queue = 1;
+ stt->eh_timed_out = sas_scsi_timed_out;
+ stt->eh_strategy_handler = sas_scsi_recover_host;
+
+ return stt;
+}
+EXPORT_SYMBOL_GPL(sas_domain_attach_transport);
+
+
+void sas_domain_release_transport(struct scsi_transport_template *stt)
+{
+ sas_release_transport(stt);
+}
+EXPORT_SYMBOL_GPL(sas_domain_release_transport);
+
+/* ---------- SAS Class register/unregister ---------- */
+
+static int __init sas_class_init(void)
+{
+ sas_task_cache = KMEM_CACHE(sas_task, SLAB_HWCACHE_ALIGN);
+ if (!sas_task_cache)
+ return -ENOMEM;
+
+ return 0;
+}
+
+static void __exit sas_class_exit(void)
+{
+ kmem_cache_destroy(sas_task_cache);
+}
+
+MODULE_AUTHOR("Luben Tuikov <luben_tuikov@adaptec.com>");
+MODULE_DESCRIPTION("SAS Transport Layer");
+MODULE_LICENSE("GPL v2");
+
+module_init(sas_class_init);
+module_exit(sas_class_exit);
+
+EXPORT_SYMBOL_GPL(sas_register_ha);
+EXPORT_SYMBOL_GPL(sas_unregister_ha);
diff --git a/drivers/scsi/libsas/sas_internal.h b/drivers/scsi/libsas/sas_internal.h
new file mode 100644
index 000000000..9cf0bc260
--- /dev/null
+++ b/drivers/scsi/libsas/sas_internal.h
@@ -0,0 +1,200 @@
+/*
+ * Serial Attached SCSI (SAS) class internal header file
+ *
+ * Copyright (C) 2005 Adaptec, Inc. All rights reserved.
+ * Copyright (C) 2005 Luben Tuikov <luben_tuikov@adaptec.com>
+ *
+ * This file is licensed under GPLv2.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of the
+ * License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307
+ * USA
+ *
+ */
+
+#ifndef _SAS_INTERNAL_H_
+#define _SAS_INTERNAL_H_
+
+#include <scsi/scsi.h>
+#include <scsi/scsi_host.h>
+#include <scsi/scsi_transport_sas.h>
+#include <scsi/libsas.h>
+#include <scsi/sas_ata.h>
+
+#define sas_printk(fmt, ...) printk(KERN_NOTICE "sas: " fmt, ## __VA_ARGS__)
+
+#define SAS_DPRINTK(fmt, ...) printk(KERN_DEBUG "sas: " fmt, ## __VA_ARGS__)
+
+#define TO_SAS_TASK(_scsi_cmd) ((void *)(_scsi_cmd)->host_scribble)
+#define ASSIGN_SAS_TASK(_sc, _t) do { (_sc)->host_scribble = (void *) _t; } while (0)
+
+struct sas_phy_data {
+ /* let reset be performed in sas_queue_work() context */
+ struct sas_phy *phy;
+ struct mutex event_lock;
+ int hard_reset;
+ int reset_result;
+ struct sas_work reset_work;
+ int enable;
+ int enable_result;
+ struct sas_work enable_work;
+};
+
+void sas_scsi_recover_host(struct Scsi_Host *shost);
+
+int sas_show_class(enum sas_class class, char *buf);
+int sas_show_proto(enum sas_protocol proto, char *buf);
+int sas_show_linkrate(enum sas_linkrate linkrate, char *buf);
+int sas_show_oob_mode(enum sas_oob_mode oob_mode, char *buf);
+
+int sas_register_phys(struct sas_ha_struct *sas_ha);
+void sas_unregister_phys(struct sas_ha_struct *sas_ha);
+
+int sas_register_ports(struct sas_ha_struct *sas_ha);
+void sas_unregister_ports(struct sas_ha_struct *sas_ha);
+
+enum blk_eh_timer_return sas_scsi_timed_out(struct scsi_cmnd *);
+
+int sas_init_events(struct sas_ha_struct *sas_ha);
+void sas_disable_revalidation(struct sas_ha_struct *ha);
+void sas_enable_revalidation(struct sas_ha_struct *ha);
+void __sas_drain_work(struct sas_ha_struct *ha);
+
+void sas_deform_port(struct asd_sas_phy *phy, int gone);
+
+void sas_porte_bytes_dmaed(struct work_struct *work);
+void sas_porte_broadcast_rcvd(struct work_struct *work);
+void sas_porte_link_reset_err(struct work_struct *work);
+void sas_porte_timer_event(struct work_struct *work);
+void sas_porte_hard_reset(struct work_struct *work);
+void sas_queue_work(struct sas_ha_struct *ha, struct sas_work *sw);
+
+int sas_notify_lldd_dev_found(struct domain_device *);
+void sas_notify_lldd_dev_gone(struct domain_device *);
+
+int sas_smp_phy_control(struct domain_device *dev, int phy_id,
+ enum phy_func phy_func, struct sas_phy_linkrates *);
+int sas_smp_get_phy_events(struct sas_phy *phy);
+
+void sas_notify_phy_event(struct asd_sas_phy *phy, enum phy_event event);
+void sas_device_set_phy(struct domain_device *dev, struct sas_port *port);
+struct domain_device *sas_find_dev_by_rphy(struct sas_rphy *rphy);
+struct domain_device *sas_ex_to_ata(struct domain_device *ex_dev, int phy_id);
+int sas_ex_phy_discover(struct domain_device *dev, int single);
+int sas_get_report_phy_sata(struct domain_device *dev, int phy_id,
+ struct smp_resp *rps_resp);
+int sas_try_ata_reset(struct asd_sas_phy *phy);
+void sas_hae_reset(struct work_struct *work);
+
+void sas_free_device(struct kref *kref);
+
+#ifdef CONFIG_SCSI_SAS_HOST_SMP
+extern int sas_smp_host_handler(struct Scsi_Host *shost, struct request *req,
+ struct request *rsp);
+#else
+static inline int sas_smp_host_handler(struct Scsi_Host *shost,
+ struct request *req,
+ struct request *rsp)
+{
+ shost_printk(KERN_ERR, shost,
+ "Cannot send SMP to a sas host (not enabled in CONFIG)\n");
+ return -EINVAL;
+}
+#endif
+
+static inline void sas_fail_probe(struct domain_device *dev, const char *func, int err)
+{
+ SAS_DPRINTK("%s: for %s device %16llx returned %d\n",
+ func, dev->parent ? "exp-attached" :
+ "direct-attached",
+ SAS_ADDR(dev->sas_addr), err);
+ sas_unregister_dev(dev->port, dev);
+}
+
+static inline void sas_fill_in_rphy(struct domain_device *dev,
+ struct sas_rphy *rphy)
+{
+ rphy->identify.sas_address = SAS_ADDR(dev->sas_addr);
+ rphy->identify.initiator_port_protocols = dev->iproto;
+ rphy->identify.target_port_protocols = dev->tproto;
+ switch (dev->dev_type) {
+ case SAS_SATA_DEV:
+ /* FIXME: need sata device type */
+ case SAS_END_DEVICE:
+ case SAS_SATA_PENDING:
+ rphy->identify.device_type = SAS_END_DEVICE;
+ break;
+ case SAS_EDGE_EXPANDER_DEVICE:
+ rphy->identify.device_type = SAS_EDGE_EXPANDER_DEVICE;
+ break;
+ case SAS_FANOUT_EXPANDER_DEVICE:
+ rphy->identify.device_type = SAS_FANOUT_EXPANDER_DEVICE;
+ break;
+ default:
+ rphy->identify.device_type = SAS_PHY_UNUSED;
+ break;
+ }
+}
+
+static inline void sas_phy_set_target(struct asd_sas_phy *p, struct domain_device *dev)
+{
+ struct sas_phy *phy = p->phy;
+
+ if (dev) {
+ if (dev_is_sata(dev))
+ phy->identify.device_type = SAS_END_DEVICE;
+ else
+ phy->identify.device_type = dev->dev_type;
+ phy->identify.target_port_protocols = dev->tproto;
+ } else {
+ phy->identify.device_type = SAS_PHY_UNUSED;
+ phy->identify.target_port_protocols = 0;
+ }
+}
+
+static inline void sas_add_parent_port(struct domain_device *dev, int phy_id)
+{
+ struct expander_device *ex = &dev->ex_dev;
+ struct ex_phy *ex_phy = &ex->ex_phy[phy_id];
+
+ if (!ex->parent_port) {
+ ex->parent_port = sas_port_alloc(&dev->rphy->dev, phy_id);
+ /* FIXME: error handling */
+ BUG_ON(!ex->parent_port);
+ BUG_ON(sas_port_add(ex->parent_port));
+ sas_port_mark_backlink(ex->parent_port);
+ }
+ sas_port_add_phy(ex->parent_port, ex_phy->phy);
+}
+
+static inline struct domain_device *sas_alloc_device(void)
+{
+ struct domain_device *dev = kzalloc(sizeof(*dev), GFP_KERNEL);
+
+ if (dev) {
+ INIT_LIST_HEAD(&dev->siblings);
+ INIT_LIST_HEAD(&dev->dev_list_node);
+ INIT_LIST_HEAD(&dev->disco_list_node);
+ kref_init(&dev->kref);
+ spin_lock_init(&dev->done_lock);
+ }
+ return dev;
+}
+
+static inline void sas_put_device(struct domain_device *dev)
+{
+ kref_put(&dev->kref, sas_free_device);
+}
+
+#endif /* _SAS_INTERNAL_H_ */
diff --git a/drivers/scsi/libsas/sas_phy.c b/drivers/scsi/libsas/sas_phy.c
new file mode 100644
index 000000000..cdee446c2
--- /dev/null
+++ b/drivers/scsi/libsas/sas_phy.c
@@ -0,0 +1,181 @@
+/*
+ * Serial Attached SCSI (SAS) Phy class
+ *
+ * Copyright (C) 2005 Adaptec, Inc. All rights reserved.
+ * Copyright (C) 2005 Luben Tuikov <luben_tuikov@adaptec.com>
+ *
+ * This file is licensed under GPLv2.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of the
+ * License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
+ *
+ */
+
+#include "sas_internal.h"
+#include <scsi/scsi_host.h>
+#include <scsi/scsi_transport.h>
+#include <scsi/scsi_transport_sas.h>
+#include "../scsi_sas_internal.h"
+
+/* ---------- Phy events ---------- */
+
+static void sas_phye_loss_of_signal(struct work_struct *work)
+{
+ struct asd_sas_event *ev = to_asd_sas_event(work);
+ struct asd_sas_phy *phy = ev->phy;
+
+ clear_bit(PHYE_LOSS_OF_SIGNAL, &phy->phy_events_pending);
+ phy->error = 0;
+ sas_deform_port(phy, 1);
+}
+
+static void sas_phye_oob_done(struct work_struct *work)
+{
+ struct asd_sas_event *ev = to_asd_sas_event(work);
+ struct asd_sas_phy *phy = ev->phy;
+
+ clear_bit(PHYE_OOB_DONE, &phy->phy_events_pending);
+ phy->error = 0;
+}
+
+static void sas_phye_oob_error(struct work_struct *work)
+{
+ struct asd_sas_event *ev = to_asd_sas_event(work);
+ struct asd_sas_phy *phy = ev->phy;
+ struct sas_ha_struct *sas_ha = phy->ha;
+ struct asd_sas_port *port = phy->port;
+ struct sas_internal *i =
+ to_sas_internal(sas_ha->core.shost->transportt);
+
+ clear_bit(PHYE_OOB_ERROR, &phy->phy_events_pending);
+
+ sas_deform_port(phy, 1);
+
+ if (!port && phy->enabled && i->dft->lldd_control_phy) {
+ phy->error++;
+ switch (phy->error) {
+ case 1:
+ case 2:
+ i->dft->lldd_control_phy(phy, PHY_FUNC_HARD_RESET,
+ NULL);
+ break;
+ case 3:
+ default:
+ phy->error = 0;
+ phy->enabled = 0;
+ i->dft->lldd_control_phy(phy, PHY_FUNC_DISABLE, NULL);
+ break;
+ }
+ }
+}
+
+static void sas_phye_spinup_hold(struct work_struct *work)
+{
+ struct asd_sas_event *ev = to_asd_sas_event(work);
+ struct asd_sas_phy *phy = ev->phy;
+ struct sas_ha_struct *sas_ha = phy->ha;
+ struct sas_internal *i =
+ to_sas_internal(sas_ha->core.shost->transportt);
+
+ clear_bit(PHYE_SPINUP_HOLD, &phy->phy_events_pending);
+
+ phy->error = 0;
+ i->dft->lldd_control_phy(phy, PHY_FUNC_RELEASE_SPINUP_HOLD, NULL);
+}
+
+static void sas_phye_resume_timeout(struct work_struct *work)
+{
+ struct asd_sas_event *ev = to_asd_sas_event(work);
+ struct asd_sas_phy *phy = ev->phy;
+
+ clear_bit(PHYE_RESUME_TIMEOUT, &phy->phy_events_pending);
+
+ /* phew, lldd got the phy back in the nick of time */
+ if (!phy->suspended) {
+ dev_info(&phy->phy->dev, "resume timeout cancelled\n");
+ return;
+ }
+
+ phy->error = 0;
+ phy->suspended = 0;
+ sas_deform_port(phy, 1);
+}
+
+
+/* ---------- Phy class registration ---------- */
+
+int sas_register_phys(struct sas_ha_struct *sas_ha)
+{
+ int i;
+
+ static const work_func_t sas_phy_event_fns[PHY_NUM_EVENTS] = {
+ [PHYE_LOSS_OF_SIGNAL] = sas_phye_loss_of_signal,
+ [PHYE_OOB_DONE] = sas_phye_oob_done,
+ [PHYE_OOB_ERROR] = sas_phye_oob_error,
+ [PHYE_SPINUP_HOLD] = sas_phye_spinup_hold,
+ [PHYE_RESUME_TIMEOUT] = sas_phye_resume_timeout,
+
+ };
+
+ static const work_func_t sas_port_event_fns[PORT_NUM_EVENTS] = {
+ [PORTE_BYTES_DMAED] = sas_porte_bytes_dmaed,
+ [PORTE_BROADCAST_RCVD] = sas_porte_broadcast_rcvd,
+ [PORTE_LINK_RESET_ERR] = sas_porte_link_reset_err,
+ [PORTE_TIMER_EVENT] = sas_porte_timer_event,
+ [PORTE_HARD_RESET] = sas_porte_hard_reset,
+ };
+
+ /* Now register the phys. */
+ for (i = 0; i < sas_ha->num_phys; i++) {
+ int k;
+ struct asd_sas_phy *phy = sas_ha->sas_phy[i];
+
+ phy->error = 0;
+ INIT_LIST_HEAD(&phy->port_phy_el);
+ for (k = 0; k < PORT_NUM_EVENTS; k++) {
+ INIT_SAS_WORK(&phy->port_events[k].work, sas_port_event_fns[k]);
+ phy->port_events[k].phy = phy;
+ }
+
+ for (k = 0; k < PHY_NUM_EVENTS; k++) {
+ INIT_SAS_WORK(&phy->phy_events[k].work, sas_phy_event_fns[k]);
+ phy->phy_events[k].phy = phy;
+ }
+
+ phy->port = NULL;
+ phy->ha = sas_ha;
+ spin_lock_init(&phy->frame_rcvd_lock);
+ spin_lock_init(&phy->sas_prim_lock);
+ phy->frame_rcvd_size = 0;
+
+ phy->phy = sas_phy_alloc(&sas_ha->core.shost->shost_gendev, i);
+ if (!phy->phy)
+ return -ENOMEM;
+
+ phy->phy->identify.initiator_port_protocols =
+ phy->iproto;
+ phy->phy->identify.target_port_protocols = phy->tproto;
+ phy->phy->identify.sas_address = SAS_ADDR(sas_ha->sas_addr);
+ phy->phy->identify.phy_identifier = i;
+ phy->phy->minimum_linkrate_hw = SAS_LINK_RATE_UNKNOWN;
+ phy->phy->maximum_linkrate_hw = SAS_LINK_RATE_UNKNOWN;
+ phy->phy->minimum_linkrate = SAS_LINK_RATE_UNKNOWN;
+ phy->phy->maximum_linkrate = SAS_LINK_RATE_UNKNOWN;
+ phy->phy->negotiated_linkrate = SAS_LINK_RATE_UNKNOWN;
+
+ sas_phy_add(phy->phy);
+ }
+
+ return 0;
+}
diff --git a/drivers/scsi/libsas/sas_port.c b/drivers/scsi/libsas/sas_port.c
new file mode 100644
index 000000000..d3c5297c6
--- /dev/null
+++ b/drivers/scsi/libsas/sas_port.c
@@ -0,0 +1,355 @@
+/*
+ * Serial Attached SCSI (SAS) Port class
+ *
+ * Copyright (C) 2005 Adaptec, Inc. All rights reserved.
+ * Copyright (C) 2005 Luben Tuikov <luben_tuikov@adaptec.com>
+ *
+ * This file is licensed under GPLv2.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of the
+ * License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
+ *
+ */
+
+#include "sas_internal.h"
+
+#include <scsi/scsi_transport.h>
+#include <scsi/scsi_transport_sas.h>
+#include "../scsi_sas_internal.h"
+
+static bool phy_is_wideport_member(struct asd_sas_port *port, struct asd_sas_phy *phy)
+{
+ struct sas_ha_struct *sas_ha = phy->ha;
+
+ if (memcmp(port->attached_sas_addr, phy->attached_sas_addr,
+ SAS_ADDR_SIZE) != 0 || (sas_ha->strict_wide_ports &&
+ memcmp(port->sas_addr, phy->sas_addr, SAS_ADDR_SIZE) != 0))
+ return false;
+ return true;
+}
+
+static void sas_resume_port(struct asd_sas_phy *phy)
+{
+ struct domain_device *dev;
+ struct asd_sas_port *port = phy->port;
+ struct sas_ha_struct *sas_ha = phy->ha;
+ struct sas_internal *si = to_sas_internal(sas_ha->core.shost->transportt);
+
+ if (si->dft->lldd_port_formed)
+ si->dft->lldd_port_formed(phy);
+
+ if (port->suspended)
+ port->suspended = 0;
+ else {
+ /* we only need to handle "link returned" actions once */
+ return;
+ }
+
+ /* if the port came back:
+ * 1/ presume every device came back
+ * 2/ force the next revalidation to check all expander phys
+ */
+ list_for_each_entry(dev, &port->dev_list, dev_list_node) {
+ int i, rc;
+
+ rc = sas_notify_lldd_dev_found(dev);
+ if (rc) {
+ sas_unregister_dev(port, dev);
+ continue;
+ }
+
+ if (dev->dev_type == SAS_EDGE_EXPANDER_DEVICE || dev->dev_type == SAS_FANOUT_EXPANDER_DEVICE) {
+ dev->ex_dev.ex_change_count = -1;
+ for (i = 0; i < dev->ex_dev.num_phys; i++) {
+ struct ex_phy *phy = &dev->ex_dev.ex_phy[i];
+
+ phy->phy_change_count = -1;
+ }
+ }
+ }
+
+ sas_discover_event(port, DISCE_RESUME);
+}
+
+/**
+ * sas_form_port -- add this phy to a port
+ * @phy: the phy of interest
+ *
+ * This function adds this phy to an existing port, thus creating a wide
+ * port, or it creates a port and adds the phy to the port.
+ */
+static void sas_form_port(struct asd_sas_phy *phy)
+{
+ int i;
+ struct sas_ha_struct *sas_ha = phy->ha;
+ struct asd_sas_port *port = phy->port;
+ struct sas_internal *si =
+ to_sas_internal(sas_ha->core.shost->transportt);
+ unsigned long flags;
+
+ if (port) {
+ if (!phy_is_wideport_member(port, phy))
+ sas_deform_port(phy, 0);
+ else if (phy->suspended) {
+ phy->suspended = 0;
+ sas_resume_port(phy);
+
+ /* phy came back, try to cancel the timeout */
+ wake_up(&sas_ha->eh_wait_q);
+ return;
+ } else {
+ SAS_DPRINTK("%s: phy%d belongs to port%d already(%d)!\n",
+ __func__, phy->id, phy->port->id,
+ phy->port->num_phys);
+ return;
+ }
+ }
+
+ /* see if the phy should be part of a wide port */
+ spin_lock_irqsave(&sas_ha->phy_port_lock, flags);
+ for (i = 0; i < sas_ha->num_phys; i++) {
+ port = sas_ha->sas_port[i];
+ spin_lock(&port->phy_list_lock);
+ if (*(u64 *) port->sas_addr &&
+ phy_is_wideport_member(port, phy) && port->num_phys > 0) {
+ /* wide port */
+ SAS_DPRINTK("phy%d matched wide port%d\n", phy->id,
+ port->id);
+ break;
+ }
+ spin_unlock(&port->phy_list_lock);
+ }
+ /* The phy does not match any existing port, create a new one */
+ if (i == sas_ha->num_phys) {
+ for (i = 0; i < sas_ha->num_phys; i++) {
+ port = sas_ha->sas_port[i];
+ spin_lock(&port->phy_list_lock);
+ if (*(u64 *)port->sas_addr == 0
+ && port->num_phys == 0) {
+ memcpy(port->sas_addr, phy->sas_addr,
+ SAS_ADDR_SIZE);
+ break;
+ }
+ spin_unlock(&port->phy_list_lock);
+ }
+ }
+
+ if (i >= sas_ha->num_phys) {
+ printk(KERN_NOTICE "%s: couldn't find a free port, bug?\n",
+ __func__);
+ spin_unlock_irqrestore(&sas_ha->phy_port_lock, flags);
+ return;
+ }
+
+ /* add the phy to the port */
+ list_add_tail(&phy->port_phy_el, &port->phy_list);
+ sas_phy_set_target(phy, port->port_dev);
+ phy->port = port;
+ port->num_phys++;
+ port->phy_mask |= (1U << phy->id);
+
+ if (*(u64 *)port->attached_sas_addr == 0) {
+ port->class = phy->class;
+ memcpy(port->attached_sas_addr, phy->attached_sas_addr,
+ SAS_ADDR_SIZE);
+ port->iproto = phy->iproto;
+ port->tproto = phy->tproto;
+ port->oob_mode = phy->oob_mode;
+ port->linkrate = phy->linkrate;
+ } else
+ port->linkrate = max(port->linkrate, phy->linkrate);
+ spin_unlock(&port->phy_list_lock);
+ spin_unlock_irqrestore(&sas_ha->phy_port_lock, flags);
+
+ if (!port->port) {
+ port->port = sas_port_alloc(phy->phy->dev.parent, port->id);
+ BUG_ON(!port->port);
+ sas_port_add(port->port);
+ }
+ sas_port_add_phy(port->port, phy->phy);
+
+ SAS_DPRINTK("%s added to %s, phy_mask:0x%x (%16llx)\n",
+ dev_name(&phy->phy->dev), dev_name(&port->port->dev),
+ port->phy_mask,
+ SAS_ADDR(port->attached_sas_addr));
+
+ if (port->port_dev)
+ port->port_dev->pathways = port->num_phys;
+
+ /* Tell the LLDD about this port formation. */
+ if (si->dft->lldd_port_formed)
+ si->dft->lldd_port_formed(phy);
+
+ sas_discover_event(phy->port, DISCE_DISCOVER_DOMAIN);
+}
+
+/**
+ * sas_deform_port -- remove this phy from the port it belongs to
+ * @phy: the phy of interest
+ *
+ * This is called when the physical link to the other phy has been
+ * lost (on this phy), in Event thread context. We cannot delay here.
+ */
+void sas_deform_port(struct asd_sas_phy *phy, int gone)
+{
+ struct sas_ha_struct *sas_ha = phy->ha;
+ struct asd_sas_port *port = phy->port;
+ struct sas_internal *si =
+ to_sas_internal(sas_ha->core.shost->transportt);
+ struct domain_device *dev;
+ unsigned long flags;
+
+ if (!port)
+ return; /* done by a phy event */
+
+ dev = port->port_dev;
+ if (dev)
+ dev->pathways--;
+
+ if (port->num_phys == 1) {
+ sas_unregister_domain_devices(port, gone);
+ sas_port_delete(port->port);
+ port->port = NULL;
+ } else {
+ sas_port_delete_phy(port->port, phy->phy);
+ sas_device_set_phy(dev, port->port);
+ }
+
+ if (si->dft->lldd_port_deformed)
+ si->dft->lldd_port_deformed(phy);
+
+ spin_lock_irqsave(&sas_ha->phy_port_lock, flags);
+ spin_lock(&port->phy_list_lock);
+
+ list_del_init(&phy->port_phy_el);
+ sas_phy_set_target(phy, NULL);
+ phy->port = NULL;
+ port->num_phys--;
+ port->phy_mask &= ~(1U << phy->id);
+
+ if (port->num_phys == 0) {
+ INIT_LIST_HEAD(&port->phy_list);
+ memset(port->sas_addr, 0, SAS_ADDR_SIZE);
+ memset(port->attached_sas_addr, 0, SAS_ADDR_SIZE);
+ port->class = 0;
+ port->iproto = 0;
+ port->tproto = 0;
+ port->oob_mode = 0;
+ port->phy_mask = 0;
+ }
+ spin_unlock(&port->phy_list_lock);
+ spin_unlock_irqrestore(&sas_ha->phy_port_lock, flags);
+
+ return;
+}
+
+/* ---------- SAS port events ---------- */
+
+void sas_porte_bytes_dmaed(struct work_struct *work)
+{
+ struct asd_sas_event *ev = to_asd_sas_event(work);
+ struct asd_sas_phy *phy = ev->phy;
+
+ clear_bit(PORTE_BYTES_DMAED, &phy->port_events_pending);
+
+ sas_form_port(phy);
+}
+
+void sas_porte_broadcast_rcvd(struct work_struct *work)
+{
+ struct asd_sas_event *ev = to_asd_sas_event(work);
+ struct asd_sas_phy *phy = ev->phy;
+ unsigned long flags;
+ u32 prim;
+
+ clear_bit(PORTE_BROADCAST_RCVD, &phy->port_events_pending);
+
+ spin_lock_irqsave(&phy->sas_prim_lock, flags);
+ prim = phy->sas_prim;
+ spin_unlock_irqrestore(&phy->sas_prim_lock, flags);
+
+ SAS_DPRINTK("broadcast received: %d\n", prim);
+ sas_discover_event(phy->port, DISCE_REVALIDATE_DOMAIN);
+}
+
+void sas_porte_link_reset_err(struct work_struct *work)
+{
+ struct asd_sas_event *ev = to_asd_sas_event(work);
+ struct asd_sas_phy *phy = ev->phy;
+
+ clear_bit(PORTE_LINK_RESET_ERR, &phy->port_events_pending);
+
+ sas_deform_port(phy, 1);
+}
+
+void sas_porte_timer_event(struct work_struct *work)
+{
+ struct asd_sas_event *ev = to_asd_sas_event(work);
+ struct asd_sas_phy *phy = ev->phy;
+
+ clear_bit(PORTE_TIMER_EVENT, &phy->port_events_pending);
+
+ sas_deform_port(phy, 1);
+}
+
+void sas_porte_hard_reset(struct work_struct *work)
+{
+ struct asd_sas_event *ev = to_asd_sas_event(work);
+ struct asd_sas_phy *phy = ev->phy;
+
+ clear_bit(PORTE_HARD_RESET, &phy->port_events_pending);
+
+ sas_deform_port(phy, 1);
+}
+
+/* ---------- SAS port registration ---------- */
+
+static void sas_init_port(struct asd_sas_port *port,
+ struct sas_ha_struct *sas_ha, int i)
+{
+ memset(port, 0, sizeof(*port));
+ port->id = i;
+ INIT_LIST_HEAD(&port->dev_list);
+ INIT_LIST_HEAD(&port->disco_list);
+ INIT_LIST_HEAD(&port->destroy_list);
+ spin_lock_init(&port->phy_list_lock);
+ INIT_LIST_HEAD(&port->phy_list);
+ port->ha = sas_ha;
+
+ spin_lock_init(&port->dev_list_lock);
+}
+
+int sas_register_ports(struct sas_ha_struct *sas_ha)
+{
+ int i;
+
+ /* initialize the ports and discovery */
+ for (i = 0; i < sas_ha->num_phys; i++) {
+ struct asd_sas_port *port = sas_ha->sas_port[i];
+
+ sas_init_port(port, sas_ha, i);
+ sas_init_disc(&port->disc, port);
+ }
+ return 0;
+}
+
+void sas_unregister_ports(struct sas_ha_struct *sas_ha)
+{
+ int i;
+
+ for (i = 0; i < sas_ha->num_phys; i++)
+ if (sas_ha->sas_phy[i]->port)
+ sas_deform_port(sas_ha->sas_phy[i], 0);
+
+}
diff --git a/drivers/scsi/libsas/sas_scsi_host.c b/drivers/scsi/libsas/sas_scsi_host.c
new file mode 100644
index 000000000..519dac4e3
--- /dev/null
+++ b/drivers/scsi/libsas/sas_scsi_host.c
@@ -0,0 +1,1013 @@
+/*
+ * Serial Attached SCSI (SAS) class SCSI Host glue.
+ *
+ * Copyright (C) 2005 Adaptec, Inc. All rights reserved.
+ * Copyright (C) 2005 Luben Tuikov <luben_tuikov@adaptec.com>
+ *
+ * This file is licensed under GPLv2.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of the
+ * License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307
+ * USA
+ *
+ */
+
+#include <linux/kthread.h>
+#include <linux/firmware.h>
+#include <linux/export.h>
+#include <linux/ctype.h>
+
+#include "sas_internal.h"
+
+#include <scsi/scsi_host.h>
+#include <scsi/scsi_device.h>
+#include <scsi/scsi_tcq.h>
+#include <scsi/scsi.h>
+#include <scsi/scsi_eh.h>
+#include <scsi/scsi_transport.h>
+#include <scsi/scsi_transport_sas.h>
+#include <scsi/sas_ata.h>
+#include "../scsi_sas_internal.h"
+#include "../scsi_transport_api.h"
+#include "../scsi_priv.h"
+
+#include <linux/err.h>
+#include <linux/blkdev.h>
+#include <linux/freezer.h>
+#include <linux/gfp.h>
+#include <linux/scatterlist.h>
+#include <linux/libata.h>
+
+/* record final status and free the task */
+static void sas_end_task(struct scsi_cmnd *sc, struct sas_task *task)
+{
+ struct task_status_struct *ts = &task->task_status;
+ int hs = 0, stat = 0;
+
+ if (ts->resp == SAS_TASK_UNDELIVERED) {
+ /* transport error */
+ hs = DID_NO_CONNECT;
+ } else { /* ts->resp == SAS_TASK_COMPLETE */
+ /* task delivered, what happened afterwards? */
+ switch (ts->stat) {
+ case SAS_DEV_NO_RESPONSE:
+ case SAS_INTERRUPTED:
+ case SAS_PHY_DOWN:
+ case SAS_NAK_R_ERR:
+ case SAS_OPEN_TO:
+ hs = DID_NO_CONNECT;
+ break;
+ case SAS_DATA_UNDERRUN:
+ scsi_set_resid(sc, ts->residual);
+ if (scsi_bufflen(sc) - scsi_get_resid(sc) < sc->underflow)
+ hs = DID_ERROR;
+ break;
+ case SAS_DATA_OVERRUN:
+ hs = DID_ERROR;
+ break;
+ case SAS_QUEUE_FULL:
+ hs = DID_SOFT_ERROR; /* retry */
+ break;
+ case SAS_DEVICE_UNKNOWN:
+ hs = DID_BAD_TARGET;
+ break;
+ case SAS_SG_ERR:
+ hs = DID_PARITY;
+ break;
+ case SAS_OPEN_REJECT:
+ if (ts->open_rej_reason == SAS_OREJ_RSVD_RETRY)
+ hs = DID_SOFT_ERROR; /* retry */
+ else
+ hs = DID_ERROR;
+ break;
+ case SAS_PROTO_RESPONSE:
+ SAS_DPRINTK("LLDD:%s sent SAS_PROTO_RESP for an SSP "
+ "task; please report this\n",
+ task->dev->port->ha->sas_ha_name);
+ break;
+ case SAS_ABORTED_TASK:
+ hs = DID_ABORT;
+ break;
+ case SAM_STAT_CHECK_CONDITION:
+ memcpy(sc->sense_buffer, ts->buf,
+ min(SCSI_SENSE_BUFFERSIZE, ts->buf_valid_size));
+ stat = SAM_STAT_CHECK_CONDITION;
+ break;
+ default:
+ stat = ts->stat;
+ break;
+ }
+ }
+
+ sc->result = (hs << 16) | stat;
+ ASSIGN_SAS_TASK(sc, NULL);
+ sas_free_task(task);
+}
+
+static void sas_scsi_task_done(struct sas_task *task)
+{
+ struct scsi_cmnd *sc = task->uldd_task;
+ struct domain_device *dev = task->dev;
+ struct sas_ha_struct *ha = dev->port->ha;
+ unsigned long flags;
+
+ spin_lock_irqsave(&dev->done_lock, flags);
+ if (test_bit(SAS_HA_FROZEN, &ha->state))
+ task = NULL;
+ else
+ ASSIGN_SAS_TASK(sc, NULL);
+ spin_unlock_irqrestore(&dev->done_lock, flags);
+
+ if (unlikely(!task)) {
+ /* task will be completed by the error handler */
+ SAS_DPRINTK("task done but aborted\n");
+ return;
+ }
+
+ if (unlikely(!sc)) {
+ SAS_DPRINTK("task_done called with non existing SCSI cmnd!\n");
+ sas_free_task(task);
+ return;
+ }
+
+ sas_end_task(sc, task);
+ sc->scsi_done(sc);
+}
+
+static struct sas_task *sas_create_task(struct scsi_cmnd *cmd,
+ struct domain_device *dev,
+ gfp_t gfp_flags)
+{
+ struct sas_task *task = sas_alloc_task(gfp_flags);
+ struct scsi_lun lun;
+
+ if (!task)
+ return NULL;
+
+ task->uldd_task = cmd;
+ ASSIGN_SAS_TASK(cmd, task);
+
+ task->dev = dev;
+ task->task_proto = task->dev->tproto; /* BUG_ON(!SSP) */
+
+ task->ssp_task.retry_count = 1;
+ int_to_scsilun(cmd->device->lun, &lun);
+ memcpy(task->ssp_task.LUN, &lun.scsi_lun, 8);
+ task->ssp_task.task_attr = TASK_ATTR_SIMPLE;
+ task->ssp_task.cmd = cmd;
+
+ task->scatter = scsi_sglist(cmd);
+ task->num_scatter = scsi_sg_count(cmd);
+ task->total_xfer_len = scsi_bufflen(cmd);
+ task->data_dir = cmd->sc_data_direction;
+
+ task->task_done = sas_scsi_task_done;
+
+ return task;
+}
+
+int sas_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *cmd)
+{
+ struct sas_internal *i = to_sas_internal(host->transportt);
+ struct domain_device *dev = cmd_to_domain_dev(cmd);
+ struct sas_task *task;
+ int res = 0;
+
+ /* If the device fell off, no sense in issuing commands */
+ if (test_bit(SAS_DEV_GONE, &dev->state)) {
+ cmd->result = DID_BAD_TARGET << 16;
+ goto out_done;
+ }
+
+ if (dev_is_sata(dev)) {
+ spin_lock_irq(dev->sata_dev.ap->lock);
+ res = ata_sas_queuecmd(cmd, dev->sata_dev.ap);
+ spin_unlock_irq(dev->sata_dev.ap->lock);
+ return res;
+ }
+
+ task = sas_create_task(cmd, dev, GFP_ATOMIC);
+ if (!task)
+ return SCSI_MLQUEUE_HOST_BUSY;
+
+ res = i->dft->lldd_execute_task(task, GFP_ATOMIC);
+ if (res)
+ goto out_free_task;
+ return 0;
+
+out_free_task:
+ SAS_DPRINTK("lldd_execute_task returned: %d\n", res);
+ ASSIGN_SAS_TASK(cmd, NULL);
+ sas_free_task(task);
+ if (res == -SAS_QUEUE_FULL)
+ cmd->result = DID_SOFT_ERROR << 16; /* retry */
+ else
+ cmd->result = DID_ERROR << 16;
+out_done:
+ cmd->scsi_done(cmd);
+ return 0;
+}
+
+static void sas_eh_finish_cmd(struct scsi_cmnd *cmd)
+{
+ struct sas_ha_struct *sas_ha = SHOST_TO_SAS_HA(cmd->device->host);
+ struct sas_task *task = TO_SAS_TASK(cmd);
+
+ /* At this point, we only get called following an actual abort
+ * of the task, so we should be guaranteed not to be racing with
+ * any completions from the LLD. Task is freed after this.
+ */
+ sas_end_task(cmd, task);
+
+ /* now finish the command and move it on to the error
+ * handler done list, this also takes it off the
+ * error handler pending list.
+ */
+ scsi_eh_finish_cmd(cmd, &sas_ha->eh_done_q);
+}
+
+static void sas_eh_defer_cmd(struct scsi_cmnd *cmd)
+{
+ struct domain_device *dev = cmd_to_domain_dev(cmd);
+ struct sas_ha_struct *ha = dev->port->ha;
+ struct sas_task *task = TO_SAS_TASK(cmd);
+
+ if (!dev_is_sata(dev)) {
+ sas_eh_finish_cmd(cmd);
+ return;
+ }
+
+ /* report the timeout to libata */
+ sas_end_task(cmd, task);
+ list_move_tail(&cmd->eh_entry, &ha->eh_ata_q);
+}
+
+static void sas_scsi_clear_queue_lu(struct list_head *error_q, struct scsi_cmnd *my_cmd)
+{
+ struct scsi_cmnd *cmd, *n;
+
+ list_for_each_entry_safe(cmd, n, error_q, eh_entry) {
+ if (cmd->device->sdev_target == my_cmd->device->sdev_target &&
+ cmd->device->lun == my_cmd->device->lun)
+ sas_eh_defer_cmd(cmd);
+ }
+}
+
+static void sas_scsi_clear_queue_I_T(struct list_head *error_q,
+ struct domain_device *dev)
+{
+ struct scsi_cmnd *cmd, *n;
+
+ list_for_each_entry_safe(cmd, n, error_q, eh_entry) {
+ struct domain_device *x = cmd_to_domain_dev(cmd);
+
+ if (x == dev)
+ sas_eh_finish_cmd(cmd);
+ }
+}
+
+static void sas_scsi_clear_queue_port(struct list_head *error_q,
+ struct asd_sas_port *port)
+{
+ struct scsi_cmnd *cmd, *n;
+
+ list_for_each_entry_safe(cmd, n, error_q, eh_entry) {
+ struct domain_device *dev = cmd_to_domain_dev(cmd);
+ struct asd_sas_port *x = dev->port;
+
+ if (x == port)
+ sas_eh_finish_cmd(cmd);
+ }
+}
+
+enum task_disposition {
+ TASK_IS_DONE,
+ TASK_IS_ABORTED,
+ TASK_IS_AT_LU,
+ TASK_IS_NOT_AT_LU,
+ TASK_ABORT_FAILED,
+};
+
+static enum task_disposition sas_scsi_find_task(struct sas_task *task)
+{
+ unsigned long flags;
+ int i, res;
+ struct sas_internal *si =
+ to_sas_internal(task->dev->port->ha->core.shost->transportt);
+
+ for (i = 0; i < 5; i++) {
+ SAS_DPRINTK("%s: aborting task 0x%p\n", __func__, task);
+ res = si->dft->lldd_abort_task(task);
+
+ spin_lock_irqsave(&task->task_state_lock, flags);
+ if (task->task_state_flags & SAS_TASK_STATE_DONE) {
+ spin_unlock_irqrestore(&task->task_state_lock, flags);
+ SAS_DPRINTK("%s: task 0x%p is done\n", __func__,
+ task);
+ return TASK_IS_DONE;
+ }
+ spin_unlock_irqrestore(&task->task_state_lock, flags);
+
+ if (res == TMF_RESP_FUNC_COMPLETE) {
+ SAS_DPRINTK("%s: task 0x%p is aborted\n",
+ __func__, task);
+ return TASK_IS_ABORTED;
+ } else if (si->dft->lldd_query_task) {
+ SAS_DPRINTK("%s: querying task 0x%p\n",
+ __func__, task);
+ res = si->dft->lldd_query_task(task);
+ switch (res) {
+ case TMF_RESP_FUNC_SUCC:
+ SAS_DPRINTK("%s: task 0x%p at LU\n",
+ __func__, task);
+ return TASK_IS_AT_LU;
+ case TMF_RESP_FUNC_COMPLETE:
+ SAS_DPRINTK("%s: task 0x%p not at LU\n",
+ __func__, task);
+ return TASK_IS_NOT_AT_LU;
+ case TMF_RESP_FUNC_FAILED:
+ SAS_DPRINTK("%s: task 0x%p failed to abort\n",
+ __func__, task);
+ return TASK_ABORT_FAILED;
+ }
+
+ }
+ }
+ return res;
+}
+
+static int sas_recover_lu(struct domain_device *dev, struct scsi_cmnd *cmd)
+{
+ int res = TMF_RESP_FUNC_FAILED;
+ struct scsi_lun lun;
+ struct sas_internal *i =
+ to_sas_internal(dev->port->ha->core.shost->transportt);
+
+ int_to_scsilun(cmd->device->lun, &lun);
+
+ SAS_DPRINTK("eh: device %llx LUN %llx has the task\n",
+ SAS_ADDR(dev->sas_addr),
+ cmd->device->lun);
+
+ if (i->dft->lldd_abort_task_set)
+ res = i->dft->lldd_abort_task_set(dev, lun.scsi_lun);
+
+ if (res == TMF_RESP_FUNC_FAILED) {
+ if (i->dft->lldd_clear_task_set)
+ res = i->dft->lldd_clear_task_set(dev, lun.scsi_lun);
+ }
+
+ if (res == TMF_RESP_FUNC_FAILED) {
+ if (i->dft->lldd_lu_reset)
+ res = i->dft->lldd_lu_reset(dev, lun.scsi_lun);
+ }
+
+ return res;
+}
+
+static int sas_recover_I_T(struct domain_device *dev)
+{
+ int res = TMF_RESP_FUNC_FAILED;
+ struct sas_internal *i =
+ to_sas_internal(dev->port->ha->core.shost->transportt);
+
+ SAS_DPRINTK("I_T nexus reset for dev %016llx\n",
+ SAS_ADDR(dev->sas_addr));
+
+ if (i->dft->lldd_I_T_nexus_reset)
+ res = i->dft->lldd_I_T_nexus_reset(dev);
+
+ return res;
+}
+
+/* take a reference on the last known good phy for this device */
+struct sas_phy *sas_get_local_phy(struct domain_device *dev)
+{
+ struct sas_ha_struct *ha = dev->port->ha;
+ struct sas_phy *phy;
+ unsigned long flags;
+
+ /* a published domain device always has a valid phy, it may be
+ * stale, but it is never NULL
+ */
+ BUG_ON(!dev->phy);
+
+ spin_lock_irqsave(&ha->phy_port_lock, flags);
+ phy = dev->phy;
+ get_device(&phy->dev);
+ spin_unlock_irqrestore(&ha->phy_port_lock, flags);
+
+ return phy;
+}
+EXPORT_SYMBOL_GPL(sas_get_local_phy);
+
+static void sas_wait_eh(struct domain_device *dev)
+{
+ struct sas_ha_struct *ha = dev->port->ha;
+ DEFINE_WAIT(wait);
+
+ if (dev_is_sata(dev)) {
+ ata_port_wait_eh(dev->sata_dev.ap);
+ return;
+ }
+ retry:
+ spin_lock_irq(&ha->lock);
+
+ while (test_bit(SAS_DEV_EH_PENDING, &dev->state)) {
+ prepare_to_wait(&ha->eh_wait_q, &wait, TASK_UNINTERRUPTIBLE);
+ spin_unlock_irq(&ha->lock);
+ schedule();
+ spin_lock_irq(&ha->lock);
+ }
+ finish_wait(&ha->eh_wait_q, &wait);
+
+ spin_unlock_irq(&ha->lock);
+
+ /* make sure SCSI EH is complete */
+ if (scsi_host_in_recovery(ha->core.shost)) {
+ msleep(10);
+ goto retry;
+ }
+}
+EXPORT_SYMBOL(sas_wait_eh);
+
+static int sas_queue_reset(struct domain_device *dev, int reset_type,
+ u64 lun, int wait)
+{
+ struct sas_ha_struct *ha = dev->port->ha;
+ int scheduled = 0, tries = 100;
+
+ /* ata: promote lun reset to bus reset */
+ if (dev_is_sata(dev)) {
+ sas_ata_schedule_reset(dev);
+ if (wait)
+ sas_ata_wait_eh(dev);
+ return SUCCESS;
+ }
+
+ while (!scheduled && tries--) {
+ spin_lock_irq(&ha->lock);
+ if (!test_bit(SAS_DEV_EH_PENDING, &dev->state) &&
+ !test_bit(reset_type, &dev->state)) {
+ scheduled = 1;
+ ha->eh_active++;
+ list_add_tail(&dev->ssp_dev.eh_list_node, &ha->eh_dev_q);
+ set_bit(SAS_DEV_EH_PENDING, &dev->state);
+ set_bit(reset_type, &dev->state);
+ int_to_scsilun(lun, &dev->ssp_dev.reset_lun);
+ scsi_schedule_eh(ha->core.shost);
+ }
+ spin_unlock_irq(&ha->lock);
+
+ if (wait)
+ sas_wait_eh(dev);
+
+ if (scheduled)
+ return SUCCESS;
+ }
+
+ SAS_DPRINTK("%s reset of %s failed\n",
+ reset_type == SAS_DEV_LU_RESET ? "LUN" : "Bus",
+ dev_name(&dev->rphy->dev));
+
+ return FAILED;
+}
+
+int sas_eh_abort_handler(struct scsi_cmnd *cmd)
+{
+ int res;
+ struct sas_task *task = TO_SAS_TASK(cmd);
+ struct Scsi_Host *host = cmd->device->host;
+ struct sas_internal *i = to_sas_internal(host->transportt);
+
+ if (current != host->ehandler)
+ return FAILED;
+
+ if (!i->dft->lldd_abort_task)
+ return FAILED;
+
+ res = i->dft->lldd_abort_task(task);
+ if (res == TMF_RESP_FUNC_SUCC || res == TMF_RESP_FUNC_COMPLETE)
+ return SUCCESS;
+
+ return FAILED;
+}
+EXPORT_SYMBOL_GPL(sas_eh_abort_handler);
+
+/* Attempt to send a LUN reset message to a device */
+int sas_eh_device_reset_handler(struct scsi_cmnd *cmd)
+{
+ int res;
+ struct scsi_lun lun;
+ struct Scsi_Host *host = cmd->device->host;
+ struct domain_device *dev = cmd_to_domain_dev(cmd);
+ struct sas_internal *i = to_sas_internal(host->transportt);
+
+ if (current != host->ehandler)
+ return sas_queue_reset(dev, SAS_DEV_LU_RESET, cmd->device->lun, 0);
+
+ int_to_scsilun(cmd->device->lun, &lun);
+
+ if (!i->dft->lldd_lu_reset)
+ return FAILED;
+
+ res = i->dft->lldd_lu_reset(dev, lun.scsi_lun);
+ if (res == TMF_RESP_FUNC_SUCC || res == TMF_RESP_FUNC_COMPLETE)
+ return SUCCESS;
+
+ return FAILED;
+}
+
+int sas_eh_bus_reset_handler(struct scsi_cmnd *cmd)
+{
+ int res;
+ struct Scsi_Host *host = cmd->device->host;
+ struct domain_device *dev = cmd_to_domain_dev(cmd);
+ struct sas_internal *i = to_sas_internal(host->transportt);
+
+ if (current != host->ehandler)
+ return sas_queue_reset(dev, SAS_DEV_RESET, 0, 0);
+
+ if (!i->dft->lldd_I_T_nexus_reset)
+ return FAILED;
+
+ res = i->dft->lldd_I_T_nexus_reset(dev);
+ if (res == TMF_RESP_FUNC_SUCC || res == TMF_RESP_FUNC_COMPLETE ||
+ res == -ENODEV)
+ return SUCCESS;
+
+ return FAILED;
+}
+
+/* Try to reset a device */
+static int try_to_reset_cmd_device(struct scsi_cmnd *cmd)
+{
+ int res;
+ struct Scsi_Host *shost = cmd->device->host;
+
+ if (!shost->hostt->eh_device_reset_handler)
+ goto try_bus_reset;
+
+ res = shost->hostt->eh_device_reset_handler(cmd);
+ if (res == SUCCESS)
+ return res;
+
+try_bus_reset:
+ if (shost->hostt->eh_bus_reset_handler)
+ return shost->hostt->eh_bus_reset_handler(cmd);
+
+ return FAILED;
+}
+
+static void sas_eh_handle_sas_errors(struct Scsi_Host *shost, struct list_head *work_q)
+{
+ struct scsi_cmnd *cmd, *n;
+ enum task_disposition res = TASK_IS_DONE;
+ int tmf_resp, need_reset;
+ struct sas_internal *i = to_sas_internal(shost->transportt);
+ unsigned long flags;
+ struct sas_ha_struct *ha = SHOST_TO_SAS_HA(shost);
+ LIST_HEAD(done);
+
+ /* clean out any commands that won the completion vs eh race */
+ list_for_each_entry_safe(cmd, n, work_q, eh_entry) {
+ struct domain_device *dev = cmd_to_domain_dev(cmd);
+ struct sas_task *task;
+
+ spin_lock_irqsave(&dev->done_lock, flags);
+ /* by this point the lldd has either observed
+ * SAS_HA_FROZEN and is leaving the task alone, or has
+ * won the race with eh and decided to complete it
+ */
+ task = TO_SAS_TASK(cmd);
+ spin_unlock_irqrestore(&dev->done_lock, flags);
+
+ if (!task)
+ list_move_tail(&cmd->eh_entry, &done);
+ }
+
+ Again:
+ list_for_each_entry_safe(cmd, n, work_q, eh_entry) {
+ struct sas_task *task = TO_SAS_TASK(cmd);
+
+ list_del_init(&cmd->eh_entry);
+
+ spin_lock_irqsave(&task->task_state_lock, flags);
+ need_reset = task->task_state_flags & SAS_TASK_NEED_DEV_RESET;
+ spin_unlock_irqrestore(&task->task_state_lock, flags);
+
+ if (need_reset) {
+ SAS_DPRINTK("%s: task 0x%p requests reset\n",
+ __func__, task);
+ goto reset;
+ }
+
+ SAS_DPRINTK("trying to find task 0x%p\n", task);
+ res = sas_scsi_find_task(task);
+
+ cmd->eh_eflags = 0;
+
+ switch (res) {
+ case TASK_IS_DONE:
+ SAS_DPRINTK("%s: task 0x%p is done\n", __func__,
+ task);
+ sas_eh_defer_cmd(cmd);
+ continue;
+ case TASK_IS_ABORTED:
+ SAS_DPRINTK("%s: task 0x%p is aborted\n",
+ __func__, task);
+ sas_eh_defer_cmd(cmd);
+ continue;
+ case TASK_IS_AT_LU:
+ SAS_DPRINTK("task 0x%p is at LU: lu recover\n", task);
+ reset:
+ tmf_resp = sas_recover_lu(task->dev, cmd);
+ if (tmf_resp == TMF_RESP_FUNC_COMPLETE) {
+ SAS_DPRINTK("dev %016llx LU %llx is "
+ "recovered\n",
+ SAS_ADDR(task->dev),
+ cmd->device->lun);
+ sas_eh_defer_cmd(cmd);
+ sas_scsi_clear_queue_lu(work_q, cmd);
+ goto Again;
+ }
+ /* fallthrough */
+ case TASK_IS_NOT_AT_LU:
+ case TASK_ABORT_FAILED:
+ SAS_DPRINTK("task 0x%p is not at LU: I_T recover\n",
+ task);
+ tmf_resp = sas_recover_I_T(task->dev);
+ if (tmf_resp == TMF_RESP_FUNC_COMPLETE ||
+ tmf_resp == -ENODEV) {
+ struct domain_device *dev = task->dev;
+ SAS_DPRINTK("I_T %016llx recovered\n",
+ SAS_ADDR(task->dev->sas_addr));
+ sas_eh_finish_cmd(cmd);
+ sas_scsi_clear_queue_I_T(work_q, dev);
+ goto Again;
+ }
+ /* Hammer time :-) */
+ try_to_reset_cmd_device(cmd);
+ if (i->dft->lldd_clear_nexus_port) {
+ struct asd_sas_port *port = task->dev->port;
+ SAS_DPRINTK("clearing nexus for port:%d\n",
+ port->id);
+ res = i->dft->lldd_clear_nexus_port(port);
+ if (res == TMF_RESP_FUNC_COMPLETE) {
+ SAS_DPRINTK("clear nexus port:%d "
+ "succeeded\n", port->id);
+ sas_eh_finish_cmd(cmd);
+ sas_scsi_clear_queue_port(work_q,
+ port);
+ goto Again;
+ }
+ }
+ if (i->dft->lldd_clear_nexus_ha) {
+ SAS_DPRINTK("clear nexus ha\n");
+ res = i->dft->lldd_clear_nexus_ha(ha);
+ if (res == TMF_RESP_FUNC_COMPLETE) {
+ SAS_DPRINTK("clear nexus ha "
+ "succeeded\n");
+ sas_eh_finish_cmd(cmd);
+ goto clear_q;
+ }
+ }
+ /* If we are here -- this means that no amount
+ * of effort could recover from errors. Quite
+ * possibly the HA just disappeared.
+ */
+ SAS_DPRINTK("error from device %llx, LUN %llx "
+ "couldn't be recovered in any way\n",
+ SAS_ADDR(task->dev->sas_addr),
+ cmd->device->lun);
+
+ sas_eh_finish_cmd(cmd);
+ goto clear_q;
+ }
+ }
+ out:
+ list_splice_tail(&done, work_q);
+ list_splice_tail_init(&ha->eh_ata_q, work_q);
+ return;
+
+ clear_q:
+ SAS_DPRINTK("--- Exit %s -- clear_q\n", __func__);
+ list_for_each_entry_safe(cmd, n, work_q, eh_entry)
+ sas_eh_finish_cmd(cmd);
+ goto out;
+}
+
+static void sas_eh_handle_resets(struct Scsi_Host *shost)
+{
+ struct sas_ha_struct *ha = SHOST_TO_SAS_HA(shost);
+ struct sas_internal *i = to_sas_internal(shost->transportt);
+
+ /* handle directed resets to sas devices */
+ spin_lock_irq(&ha->lock);
+ while (!list_empty(&ha->eh_dev_q)) {
+ struct domain_device *dev;
+ struct ssp_device *ssp;
+
+ ssp = list_entry(ha->eh_dev_q.next, typeof(*ssp), eh_list_node);
+ list_del_init(&ssp->eh_list_node);
+ dev = container_of(ssp, typeof(*dev), ssp_dev);
+ kref_get(&dev->kref);
+ WARN_ONCE(dev_is_sata(dev), "ssp reset to ata device?\n");
+
+ spin_unlock_irq(&ha->lock);
+
+ if (test_and_clear_bit(SAS_DEV_LU_RESET, &dev->state))
+ i->dft->lldd_lu_reset(dev, ssp->reset_lun.scsi_lun);
+
+ if (test_and_clear_bit(SAS_DEV_RESET, &dev->state))
+ i->dft->lldd_I_T_nexus_reset(dev);
+
+ sas_put_device(dev);
+ spin_lock_irq(&ha->lock);
+ clear_bit(SAS_DEV_EH_PENDING, &dev->state);
+ ha->eh_active--;
+ }
+ spin_unlock_irq(&ha->lock);
+}
+
+
+void sas_scsi_recover_host(struct Scsi_Host *shost)
+{
+ struct sas_ha_struct *ha = SHOST_TO_SAS_HA(shost);
+ LIST_HEAD(eh_work_q);
+ int tries = 0;
+ bool retry;
+
+retry:
+ tries++;
+ retry = true;
+ spin_lock_irq(shost->host_lock);
+ list_splice_init(&shost->eh_cmd_q, &eh_work_q);
+ spin_unlock_irq(shost->host_lock);
+
+ SAS_DPRINTK("Enter %s busy: %d failed: %d\n",
+ __func__, atomic_read(&shost->host_busy), shost->host_failed);
+ /*
+ * Deal with commands that still have SAS tasks (i.e. they didn't
+ * complete via the normal sas_task completion mechanism),
+ * SAS_HA_FROZEN gives eh dominion over all sas_task completion.
+ */
+ set_bit(SAS_HA_FROZEN, &ha->state);
+ sas_eh_handle_sas_errors(shost, &eh_work_q);
+ clear_bit(SAS_HA_FROZEN, &ha->state);
+ if (list_empty(&eh_work_q))
+ goto out;
+
+ /*
+ * Now deal with SCSI commands that completed ok but have a an error
+ * code (and hopefully sense data) attached. This is roughly what
+ * scsi_unjam_host does, but we skip scsi_eh_abort_cmds because any
+ * command we see here has no sas_task and is thus unknown to the HA.
+ */
+ sas_ata_eh(shost, &eh_work_q, &ha->eh_done_q);
+ if (!scsi_eh_get_sense(&eh_work_q, &ha->eh_done_q))
+ scsi_eh_ready_devs(shost, &eh_work_q, &ha->eh_done_q);
+
+out:
+ sas_eh_handle_resets(shost);
+
+ /* now link into libata eh --- if we have any ata devices */
+ sas_ata_strategy_handler(shost);
+
+ scsi_eh_flush_done_q(&ha->eh_done_q);
+
+ /* check if any new eh work was scheduled during the last run */
+ spin_lock_irq(&ha->lock);
+ if (ha->eh_active == 0) {
+ shost->host_eh_scheduled = 0;
+ retry = false;
+ }
+ spin_unlock_irq(&ha->lock);
+
+ if (retry)
+ goto retry;
+
+ SAS_DPRINTK("--- Exit %s: busy: %d failed: %d tries: %d\n",
+ __func__, atomic_read(&shost->host_busy),
+ shost->host_failed, tries);
+}
+
+enum blk_eh_timer_return sas_scsi_timed_out(struct scsi_cmnd *cmd)
+{
+ scmd_dbg(cmd, "command %p timed out\n", cmd);
+
+ return BLK_EH_NOT_HANDLED;
+}
+
+int sas_ioctl(struct scsi_device *sdev, int cmd, void __user *arg)
+{
+ struct domain_device *dev = sdev_to_domain_dev(sdev);
+
+ if (dev_is_sata(dev))
+ return ata_sas_scsi_ioctl(dev->sata_dev.ap, sdev, cmd, arg);
+
+ return -EINVAL;
+}
+
+struct domain_device *sas_find_dev_by_rphy(struct sas_rphy *rphy)
+{
+ struct Scsi_Host *shost = dev_to_shost(rphy->dev.parent);
+ struct sas_ha_struct *ha = SHOST_TO_SAS_HA(shost);
+ struct domain_device *found_dev = NULL;
+ int i;
+ unsigned long flags;
+
+ spin_lock_irqsave(&ha->phy_port_lock, flags);
+ for (i = 0; i < ha->num_phys; i++) {
+ struct asd_sas_port *port = ha->sas_port[i];
+ struct domain_device *dev;
+
+ spin_lock(&port->dev_list_lock);
+ list_for_each_entry(dev, &port->dev_list, dev_list_node) {
+ if (rphy == dev->rphy) {
+ found_dev = dev;
+ spin_unlock(&port->dev_list_lock);
+ goto found;
+ }
+ }
+ spin_unlock(&port->dev_list_lock);
+ }
+ found:
+ spin_unlock_irqrestore(&ha->phy_port_lock, flags);
+
+ return found_dev;
+}
+
+int sas_target_alloc(struct scsi_target *starget)
+{
+ struct sas_rphy *rphy = dev_to_rphy(starget->dev.parent);
+ struct domain_device *found_dev = sas_find_dev_by_rphy(rphy);
+
+ if (!found_dev)
+ return -ENODEV;
+
+ kref_get(&found_dev->kref);
+ starget->hostdata = found_dev;
+ return 0;
+}
+
+#define SAS_DEF_QD 256
+
+int sas_slave_configure(struct scsi_device *scsi_dev)
+{
+ struct domain_device *dev = sdev_to_domain_dev(scsi_dev);
+ struct sas_ha_struct *sas_ha;
+
+ BUG_ON(dev->rphy->identify.device_type != SAS_END_DEVICE);
+
+ if (dev_is_sata(dev)) {
+ ata_sas_slave_configure(scsi_dev, dev->sata_dev.ap);
+ return 0;
+ }
+
+ sas_ha = dev->port->ha;
+
+ sas_read_port_mode_page(scsi_dev);
+
+ if (scsi_dev->tagged_supported) {
+ scsi_change_queue_depth(scsi_dev, SAS_DEF_QD);
+ } else {
+ SAS_DPRINTK("device %llx, LUN %llx doesn't support "
+ "TCQ\n", SAS_ADDR(dev->sas_addr),
+ scsi_dev->lun);
+ scsi_change_queue_depth(scsi_dev, 1);
+ }
+
+ scsi_dev->allow_restart = 1;
+
+ return 0;
+}
+
+int sas_change_queue_depth(struct scsi_device *sdev, int depth)
+{
+ struct domain_device *dev = sdev_to_domain_dev(sdev);
+
+ if (dev_is_sata(dev))
+ return __ata_change_queue_depth(dev->sata_dev.ap, sdev, depth);
+
+ if (!sdev->tagged_supported)
+ depth = 1;
+ return scsi_change_queue_depth(sdev, depth);
+}
+
+int sas_bios_param(struct scsi_device *scsi_dev,
+ struct block_device *bdev,
+ sector_t capacity, int *hsc)
+{
+ hsc[0] = 255;
+ hsc[1] = 63;
+ sector_div(capacity, 255*63);
+ hsc[2] = capacity;
+
+ return 0;
+}
+
+/*
+ * Tell an upper layer that it needs to initiate an abort for a given task.
+ * This should only ever be called by an LLDD.
+ */
+void sas_task_abort(struct sas_task *task)
+{
+ struct scsi_cmnd *sc = task->uldd_task;
+
+ /* Escape for libsas internal commands */
+ if (!sc) {
+ struct sas_task_slow *slow = task->slow_task;
+
+ if (!slow)
+ return;
+ if (!del_timer(&slow->timer))
+ return;
+ slow->timer.function(slow->timer.data);
+ return;
+ }
+
+ if (dev_is_sata(task->dev)) {
+ sas_ata_task_abort(task);
+ } else {
+ struct request_queue *q = sc->device->request_queue;
+ unsigned long flags;
+
+ spin_lock_irqsave(q->queue_lock, flags);
+ blk_abort_request(sc->request);
+ spin_unlock_irqrestore(q->queue_lock, flags);
+ }
+}
+
+void sas_target_destroy(struct scsi_target *starget)
+{
+ struct domain_device *found_dev = starget->hostdata;
+
+ if (!found_dev)
+ return;
+
+ starget->hostdata = NULL;
+ sas_put_device(found_dev);
+}
+
+static void sas_parse_addr(u8 *sas_addr, const char *p)
+{
+ int i;
+ for (i = 0; i < SAS_ADDR_SIZE; i++) {
+ u8 h, l;
+ if (!*p)
+ break;
+ h = isdigit(*p) ? *p-'0' : toupper(*p)-'A'+10;
+ p++;
+ l = isdigit(*p) ? *p-'0' : toupper(*p)-'A'+10;
+ p++;
+ sas_addr[i] = (h<<4) | l;
+ }
+}
+
+#define SAS_STRING_ADDR_SIZE 16
+
+int sas_request_addr(struct Scsi_Host *shost, u8 *addr)
+{
+ int res;
+ const struct firmware *fw;
+
+ res = request_firmware(&fw, "sas_addr", &shost->shost_gendev);
+ if (res)
+ return res;
+
+ if (fw->size < SAS_STRING_ADDR_SIZE) {
+ res = -ENODEV;
+ goto out;
+ }
+
+ sas_parse_addr(addr, fw->data);
+
+out:
+ release_firmware(fw);
+ return res;
+}
+EXPORT_SYMBOL_GPL(sas_request_addr);
+
+EXPORT_SYMBOL_GPL(sas_queuecommand);
+EXPORT_SYMBOL_GPL(sas_target_alloc);
+EXPORT_SYMBOL_GPL(sas_slave_configure);
+EXPORT_SYMBOL_GPL(sas_change_queue_depth);
+EXPORT_SYMBOL_GPL(sas_bios_param);
+EXPORT_SYMBOL_GPL(sas_task_abort);
+EXPORT_SYMBOL_GPL(sas_phy_reset);
+EXPORT_SYMBOL_GPL(sas_eh_device_reset_handler);
+EXPORT_SYMBOL_GPL(sas_eh_bus_reset_handler);
+EXPORT_SYMBOL_GPL(sas_target_destroy);
+EXPORT_SYMBOL_GPL(sas_ioctl);
diff --git a/drivers/scsi/libsas/sas_task.c b/drivers/scsi/libsas/sas_task.c
new file mode 100644
index 000000000..a78e5bd3e
--- /dev/null
+++ b/drivers/scsi/libsas/sas_task.c
@@ -0,0 +1,37 @@
+#include <linux/kernel.h>
+#include <linux/export.h>
+#include <scsi/sas.h>
+#include <scsi/libsas.h>
+
+/* fill task_status_struct based on SSP response frame */
+void sas_ssp_task_response(struct device *dev, struct sas_task *task,
+ struct ssp_response_iu *iu)
+{
+ struct task_status_struct *tstat = &task->task_status;
+
+ tstat->resp = SAS_TASK_COMPLETE;
+
+ if (iu->datapres == 0)
+ tstat->stat = iu->status;
+ else if (iu->datapres == 1)
+ tstat->stat = iu->resp_data[3];
+ else if (iu->datapres == 2) {
+ tstat->stat = SAM_STAT_CHECK_CONDITION;
+ tstat->buf_valid_size =
+ min_t(int, SAS_STATUS_BUF_SIZE,
+ be32_to_cpu(iu->sense_data_len));
+ memcpy(tstat->buf, iu->sense_data, tstat->buf_valid_size);
+
+ if (iu->status != SAM_STAT_CHECK_CONDITION)
+ dev_printk(KERN_WARNING, dev,
+ "dev %llx sent sense data, but "
+ "stat(%x) is not CHECK CONDITION\n",
+ SAS_ADDR(task->dev->sas_addr),
+ iu->status);
+ }
+ else
+ /* when datapres contains corrupt/unknown value... */
+ tstat->stat = SAM_STAT_CHECK_CONDITION;
+}
+EXPORT_SYMBOL_GPL(sas_ssp_task_response);
+
diff --git a/drivers/scsi/lpfc/Makefile b/drivers/scsi/lpfc/Makefile
new file mode 100644
index 000000000..e2516ba8e
--- /dev/null
+++ b/drivers/scsi/lpfc/Makefile
@@ -0,0 +1,33 @@
+#/*******************************************************************
+# * This file is part of the Emulex Linux Device Driver for *
+# * Fibre Channel Host Bus Adapters. *
+# * Copyright (C) 2004-2012 Emulex. All rights reserved. *
+# * EMULEX and SLI are trademarks of Emulex. *
+# * www.emulex.com *
+# * *
+# * This program is free software; you can redistribute it and/or *
+# * modify it under the terms of version 2 of the GNU General *
+# * Public License as published by the Free Software Foundation. *
+# * This program is distributed in the hope that it will be useful. *
+# * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND *
+# * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY, *
+# * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE *
+# * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD *
+# * TO BE LEGALLY INVALID. See the GNU General Public License for *
+# * more details, a copy of which can be found in the file COPYING *
+# * included with this package. *
+# *******************************************************************/
+######################################################################
+
+ccflags-$(GCOV) := -fprofile-arcs -ftest-coverage
+ccflags-$(GCOV) += -O0
+
+ifdef WARNINGS_BECOME_ERRORS
+ccflags-y += -Werror
+endif
+
+obj-$(CONFIG_SCSI_LPFC) := lpfc.o
+
+lpfc-objs := lpfc_mem.o lpfc_sli.o lpfc_ct.o lpfc_els.o lpfc_hbadisc.o \
+ lpfc_init.o lpfc_mbox.o lpfc_nportdisc.o lpfc_scsi.o lpfc_attr.o \
+ lpfc_vport.o lpfc_debugfs.o lpfc_bsg.o
diff --git a/drivers/scsi/lpfc/lpfc.h b/drivers/scsi/lpfc/lpfc.h
new file mode 100644
index 000000000..9b81a34d7
--- /dev/null
+++ b/drivers/scsi/lpfc/lpfc.h
@@ -0,0 +1,1063 @@
+/*******************************************************************
+ * This file is part of the Emulex Linux Device Driver for *
+ * Fibre Channel Host Bus Adapters. *
+ * Copyright (C) 2004-2015 Emulex. All rights reserved. *
+ * EMULEX and SLI are trademarks of Emulex. *
+ * www.emulex.com *
+ * Portions Copyright (C) 2004-2005 Christoph Hellwig *
+ * *
+ * This program is free software; you can redistribute it and/or *
+ * modify it under the terms of version 2 of the GNU General *
+ * Public License as published by the Free Software Foundation. *
+ * This program is distributed in the hope that it will be useful. *
+ * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND *
+ * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY, *
+ * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE *
+ * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD *
+ * TO BE LEGALLY INVALID. See the GNU General Public License for *
+ * more details, a copy of which can be found in the file COPYING *
+ * included with this package. *
+ *******************************************************************/
+
+#include <scsi/scsi_host.h>
+
+#if defined(CONFIG_DEBUG_FS) && !defined(CONFIG_SCSI_LPFC_DEBUG_FS)
+#define CONFIG_SCSI_LPFC_DEBUG_FS
+#endif
+
+struct lpfc_sli2_slim;
+
+#define ELX_MODEL_NAME_SIZE 80
+
+#define LPFC_PCI_DEV_LP 0x1
+#define LPFC_PCI_DEV_OC 0x2
+
+#define LPFC_SLI_REV2 2
+#define LPFC_SLI_REV3 3
+#define LPFC_SLI_REV4 4
+
+#define LPFC_MAX_TARGET 4096 /* max number of targets supported */
+#define LPFC_MAX_DISC_THREADS 64 /* max outstanding discovery els
+ requests */
+#define LPFC_MAX_NS_RETRY 3 /* Number of retry attempts to contact
+ the NameServer before giving up. */
+#define LPFC_CMD_PER_LUN 3 /* max outstanding cmds per lun */
+#define LPFC_DEFAULT_SG_SEG_CNT 64 /* sg element count per scsi cmnd */
+#define LPFC_DEFAULT_MENLO_SG_SEG_CNT 128 /* sg element count per scsi
+ cmnd for menlo needs nearly twice as for firmware
+ downloads using bsg */
+
+#define LPFC_MIN_SG_SLI4_BUF_SZ 0x800 /* based on LPFC_DEFAULT_SG_SEG_CNT */
+#define LPFC_MAX_SG_SLI4_SEG_CNT_DIF 128 /* sg element count per scsi cmnd */
+#define LPFC_MAX_SG_SEG_CNT_DIF 512 /* sg element count per scsi cmnd */
+#define LPFC_MAX_SG_SEG_CNT 4096 /* sg element count per scsi cmnd */
+#define LPFC_MAX_SGL_SEG_CNT 512 /* SGL element count per scsi cmnd */
+#define LPFC_MAX_BPL_SEG_CNT 4096 /* BPL element count per scsi cmnd */
+
+#define LPFC_MAX_SGE_SIZE 0x80000000 /* Maximum data allowed in a SGE */
+#define LPFC_IOCB_LIST_CNT 2250 /* list of IOCBs for fast-path usage. */
+#define LPFC_Q_RAMP_UP_INTERVAL 120 /* lun q_depth ramp up interval */
+#define LPFC_VNAME_LEN 100 /* vport symbolic name length */
+#define LPFC_TGTQ_INTERVAL 40000 /* Min amount of time between tgt
+ queue depth change in millisecs */
+#define LPFC_TGTQ_RAMPUP_PCENT 5 /* Target queue rampup in percentage */
+#define LPFC_MIN_TGT_QDEPTH 10
+#define LPFC_MAX_TGT_QDEPTH 0xFFFF
+
+#define LPFC_MAX_BUCKET_COUNT 20 /* Maximum no. of buckets for stat data
+ collection. */
+/*
+ * Following time intervals are used of adjusting SCSI device
+ * queue depths when there are driver resource error or Firmware
+ * resource error.
+ */
+/* 1 Second */
+#define QUEUE_RAMP_DOWN_INTERVAL (msecs_to_jiffies(1000 * 1))
+
+/* Number of exchanges reserved for discovery to complete */
+#define LPFC_DISC_IOCB_BUFF_COUNT 20
+
+#define LPFC_HB_MBOX_INTERVAL 5 /* Heart beat interval in seconds. */
+#define LPFC_HB_MBOX_TIMEOUT 30 /* Heart beat timeout in seconds. */
+
+#define LPFC_LOOK_AHEAD_OFF 0 /* Look ahead logic is turned off */
+
+/* Error Attention event polling interval */
+#define LPFC_ERATT_POLL_INTERVAL 5 /* EATT poll interval in seconds */
+
+/* Define macros for 64 bit support */
+#define putPaddrLow(addr) ((uint32_t) (0xffffffff & (u64)(addr)))
+#define putPaddrHigh(addr) ((uint32_t) (0xffffffff & (((u64)(addr))>>32)))
+#define getPaddr(high, low) ((dma_addr_t)( \
+ (( (u64)(high)<<16 ) << 16)|( (u64)(low))))
+/* Provide maximum configuration definitions. */
+#define LPFC_DRVR_TIMEOUT 16 /* driver iocb timeout value in sec */
+#define FC_MAX_ADPTMSG 64
+
+#define MAX_HBAEVT 32
+
+/* Number of MSI-X vectors the driver uses */
+#define LPFC_MSIX_VECTORS 2
+
+/* lpfc wait event data ready flag */
+#define LPFC_DATA_READY (1<<0)
+
+/* queue dump line buffer size */
+#define LPFC_LBUF_SZ 128
+
+/* mailbox system shutdown options */
+#define LPFC_MBX_NO_WAIT 0
+#define LPFC_MBX_WAIT 1
+
+enum lpfc_polling_flags {
+ ENABLE_FCP_RING_POLLING = 0x1,
+ DISABLE_FCP_RING_INT = 0x2
+};
+
+/* Provide DMA memory definitions the driver uses per port instance. */
+struct lpfc_dmabuf {
+ struct list_head list;
+ void *virt; /* virtual address ptr */
+ dma_addr_t phys; /* mapped address */
+ uint32_t buffer_tag; /* used for tagged queue ring */
+};
+
+struct lpfc_dma_pool {
+ struct lpfc_dmabuf *elements;
+ uint32_t max_count;
+ uint32_t current_count;
+};
+
+struct hbq_dmabuf {
+ struct lpfc_dmabuf hbuf;
+ struct lpfc_dmabuf dbuf;
+ uint32_t size;
+ uint32_t tag;
+ struct lpfc_cq_event cq_event;
+ unsigned long time_stamp;
+};
+
+/* Priority bit. Set value to exceed low water mark in lpfc_mem. */
+#define MEM_PRI 0x100
+
+
+/****************************************************************************/
+/* Device VPD save area */
+/****************************************************************************/
+typedef struct lpfc_vpd {
+ uint32_t status; /* vpd status value */
+ uint32_t length; /* number of bytes actually returned */
+ struct {
+ uint32_t rsvd1; /* Revision numbers */
+ uint32_t biuRev;
+ uint32_t smRev;
+ uint32_t smFwRev;
+ uint32_t endecRev;
+ uint16_t rBit;
+ uint8_t fcphHigh;
+ uint8_t fcphLow;
+ uint8_t feaLevelHigh;
+ uint8_t feaLevelLow;
+ uint32_t postKernRev;
+ uint32_t opFwRev;
+ uint8_t opFwName[16];
+ uint32_t sli1FwRev;
+ uint8_t sli1FwName[16];
+ uint32_t sli2FwRev;
+ uint8_t sli2FwName[16];
+ } rev;
+ struct {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint32_t rsvd3 :19; /* Reserved */
+ uint32_t cdss : 1; /* Configure Data Security SLI */
+ uint32_t rsvd2 : 3; /* Reserved */
+ uint32_t cbg : 1; /* Configure BlockGuard */
+ uint32_t cmv : 1; /* Configure Max VPIs */
+ uint32_t ccrp : 1; /* Config Command Ring Polling */
+ uint32_t csah : 1; /* Configure Synchronous Abort Handling */
+ uint32_t chbs : 1; /* Cofigure Host Backing store */
+ uint32_t cinb : 1; /* Enable Interrupt Notification Block */
+ uint32_t cerbm : 1; /* Configure Enhanced Receive Buf Mgmt */
+ uint32_t cmx : 1; /* Configure Max XRIs */
+ uint32_t cmr : 1; /* Configure Max RPIs */
+#else /* __LITTLE_ENDIAN */
+ uint32_t cmr : 1; /* Configure Max RPIs */
+ uint32_t cmx : 1; /* Configure Max XRIs */
+ uint32_t cerbm : 1; /* Configure Enhanced Receive Buf Mgmt */
+ uint32_t cinb : 1; /* Enable Interrupt Notification Block */
+ uint32_t chbs : 1; /* Cofigure Host Backing store */
+ uint32_t csah : 1; /* Configure Synchronous Abort Handling */
+ uint32_t ccrp : 1; /* Config Command Ring Polling */
+ uint32_t cmv : 1; /* Configure Max VPIs */
+ uint32_t cbg : 1; /* Configure BlockGuard */
+ uint32_t rsvd2 : 3; /* Reserved */
+ uint32_t cdss : 1; /* Configure Data Security SLI */
+ uint32_t rsvd3 :19; /* Reserved */
+#endif
+ } sli3Feat;
+} lpfc_vpd_t;
+
+struct lpfc_scsi_buf;
+
+
+/*
+ * lpfc stat counters
+ */
+struct lpfc_stats {
+ /* Statistics for ELS commands */
+ uint32_t elsLogiCol;
+ uint32_t elsRetryExceeded;
+ uint32_t elsXmitRetry;
+ uint32_t elsDelayRetry;
+ uint32_t elsRcvDrop;
+ uint32_t elsRcvFrame;
+ uint32_t elsRcvRSCN;
+ uint32_t elsRcvRNID;
+ uint32_t elsRcvFARP;
+ uint32_t elsRcvFARPR;
+ uint32_t elsRcvFLOGI;
+ uint32_t elsRcvPLOGI;
+ uint32_t elsRcvADISC;
+ uint32_t elsRcvPDISC;
+ uint32_t elsRcvFAN;
+ uint32_t elsRcvLOGO;
+ uint32_t elsRcvPRLO;
+ uint32_t elsRcvPRLI;
+ uint32_t elsRcvLIRR;
+ uint32_t elsRcvRLS;
+ uint32_t elsRcvRPS;
+ uint32_t elsRcvRPL;
+ uint32_t elsRcvRRQ;
+ uint32_t elsRcvRTV;
+ uint32_t elsRcvECHO;
+ uint32_t elsXmitFLOGI;
+ uint32_t elsXmitFDISC;
+ uint32_t elsXmitPLOGI;
+ uint32_t elsXmitPRLI;
+ uint32_t elsXmitADISC;
+ uint32_t elsXmitLOGO;
+ uint32_t elsXmitSCR;
+ uint32_t elsXmitRNID;
+ uint32_t elsXmitFARP;
+ uint32_t elsXmitFARPR;
+ uint32_t elsXmitACC;
+ uint32_t elsXmitLSRJT;
+
+ uint32_t frameRcvBcast;
+ uint32_t frameRcvMulti;
+ uint32_t strayXmitCmpl;
+ uint32_t frameXmitDelay;
+ uint32_t xriCmdCmpl;
+ uint32_t xriStatErr;
+ uint32_t LinkUp;
+ uint32_t LinkDown;
+ uint32_t LinkMultiEvent;
+ uint32_t NoRcvBuf;
+ uint32_t fcpCmd;
+ uint32_t fcpCmpl;
+ uint32_t fcpRspErr;
+ uint32_t fcpRemoteStop;
+ uint32_t fcpPortRjt;
+ uint32_t fcpPortBusy;
+ uint32_t fcpError;
+ uint32_t fcpLocalErr;
+};
+
+struct lpfc_hba;
+
+
+enum discovery_state {
+ LPFC_VPORT_UNKNOWN = 0, /* vport state is unknown */
+ LPFC_VPORT_FAILED = 1, /* vport has failed */
+ LPFC_LOCAL_CFG_LINK = 6, /* local NPORT Id configured */
+ LPFC_FLOGI = 7, /* FLOGI sent to Fabric */
+ LPFC_FDISC = 8, /* FDISC sent for vport */
+ LPFC_FABRIC_CFG_LINK = 9, /* Fabric assigned NPORT Id
+ * configured */
+ LPFC_NS_REG = 10, /* Register with NameServer */
+ LPFC_NS_QRY = 11, /* Query NameServer for NPort ID list */
+ LPFC_BUILD_DISC_LIST = 12, /* Build ADISC and PLOGI lists for
+ * device authentication / discovery */
+ LPFC_DISC_AUTH = 13, /* Processing ADISC list */
+ LPFC_VPORT_READY = 32,
+};
+
+enum hba_state {
+ LPFC_LINK_UNKNOWN = 0, /* HBA state is unknown */
+ LPFC_WARM_START = 1, /* HBA state after selective reset */
+ LPFC_INIT_START = 2, /* Initial state after board reset */
+ LPFC_INIT_MBX_CMDS = 3, /* Initialize HBA with mbox commands */
+ LPFC_LINK_DOWN = 4, /* HBA initialized, link is down */
+ LPFC_LINK_UP = 5, /* Link is up - issue READ_LA */
+ LPFC_CLEAR_LA = 6, /* authentication cmplt - issue
+ * CLEAR_LA */
+ LPFC_HBA_READY = 32,
+ LPFC_HBA_ERROR = -1
+};
+
+struct lpfc_vport {
+ struct lpfc_hba *phba;
+ struct list_head listentry;
+ uint8_t port_type;
+#define LPFC_PHYSICAL_PORT 1
+#define LPFC_NPIV_PORT 2
+#define LPFC_FABRIC_PORT 3
+ enum discovery_state port_state;
+
+ uint16_t vpi;
+ uint16_t vfi;
+ uint8_t vpi_state;
+#define LPFC_VPI_REGISTERED 0x1
+
+ uint32_t fc_flag; /* FC flags */
+/* Several of these flags are HBA centric and should be moved to
+ * phba->link_flag (e.g. FC_PTP, FC_PUBLIC_LOOP)
+ */
+#define FC_PT2PT 0x1 /* pt2pt with no fabric */
+#define FC_PT2PT_PLOGI 0x2 /* pt2pt initiate PLOGI */
+#define FC_DISC_TMO 0x4 /* Discovery timer running */
+#define FC_PUBLIC_LOOP 0x8 /* Public loop */
+#define FC_LBIT 0x10 /* LOGIN bit in loopinit set */
+#define FC_RSCN_MODE 0x20 /* RSCN cmd rcv'ed */
+#define FC_NLP_MORE 0x40 /* More node to process in node tbl */
+#define FC_OFFLINE_MODE 0x80 /* Interface is offline for diag */
+#define FC_FABRIC 0x100 /* We are fabric attached */
+#define FC_VPORT_LOGO_RCVD 0x200 /* LOGO received on vport */
+#define FC_RSCN_DISCOVERY 0x400 /* Auth all devices after RSCN */
+#define FC_LOGO_RCVD_DID_CHNG 0x800 /* FDISC on phys port detect DID chng*/
+#define FC_SCSI_SCAN_TMO 0x4000 /* scsi scan timer running */
+#define FC_ABORT_DISCOVERY 0x8000 /* we want to abort discovery */
+#define FC_NDISC_ACTIVE 0x10000 /* NPort discovery active */
+#define FC_BYPASSED_MODE 0x20000 /* NPort is in bypassed mode */
+#define FC_VPORT_NEEDS_REG_VPI 0x80000 /* Needs to have its vpi registered */
+#define FC_RSCN_DEFERRED 0x100000 /* A deferred RSCN being processed */
+#define FC_VPORT_NEEDS_INIT_VPI 0x200000 /* Need to INIT_VPI before FDISC */
+#define FC_VPORT_CVL_RCVD 0x400000 /* VLink failed due to CVL */
+#define FC_VFI_REGISTERED 0x800000 /* VFI is registered */
+#define FC_FDISC_COMPLETED 0x1000000/* FDISC completed */
+#define FC_DISC_DELAYED 0x2000000/* Delay NPort discovery */
+
+ uint32_t ct_flags;
+#define FC_CT_RFF_ID 0x1 /* RFF_ID accepted by switch */
+#define FC_CT_RNN_ID 0x2 /* RNN_ID accepted by switch */
+#define FC_CT_RSNN_NN 0x4 /* RSNN_NN accepted by switch */
+#define FC_CT_RSPN_ID 0x8 /* RSPN_ID accepted by switch */
+#define FC_CT_RFT_ID 0x10 /* RFT_ID accepted by switch */
+
+ struct list_head fc_nodes;
+
+ /* Keep counters for the number of entries in each list. */
+ uint16_t fc_plogi_cnt;
+ uint16_t fc_adisc_cnt;
+ uint16_t fc_reglogin_cnt;
+ uint16_t fc_prli_cnt;
+ uint16_t fc_unmap_cnt;
+ uint16_t fc_map_cnt;
+ uint16_t fc_npr_cnt;
+ uint16_t fc_unused_cnt;
+ struct serv_parm fc_sparam; /* buffer for our service parameters */
+
+ uint32_t fc_myDID; /* fibre channel S_ID */
+ uint32_t fc_prevDID; /* previous fibre channel S_ID */
+ struct lpfc_name fabric_portname;
+ struct lpfc_name fabric_nodename;
+
+ int32_t stopped; /* HBA has not been restarted since last ERATT */
+ uint8_t fc_linkspeed; /* Link speed after last READ_LA */
+
+ uint32_t num_disc_nodes; /*in addition to hba_state */
+
+ uint32_t fc_nlp_cnt; /* outstanding NODELIST requests */
+ uint32_t fc_rscn_id_cnt; /* count of RSCNs payloads in list */
+ uint32_t fc_rscn_flush; /* flag use of fc_rscn_id_list */
+ struct lpfc_dmabuf *fc_rscn_id_list[FC_MAX_HOLD_RSCN];
+ struct lpfc_name fc_nodename; /* fc nodename */
+ struct lpfc_name fc_portname; /* fc portname */
+
+ struct lpfc_work_evt disc_timeout_evt;
+
+ struct timer_list fc_disctmo; /* Discovery rescue timer */
+ uint8_t fc_ns_retry; /* retries for fabric nameserver */
+ uint32_t fc_prli_sent; /* cntr for outstanding PRLIs */
+
+ spinlock_t work_port_lock;
+ uint32_t work_port_events; /* Timeout to be handled */
+#define WORKER_DISC_TMO 0x1 /* vport: Discovery timeout */
+#define WORKER_ELS_TMO 0x2 /* vport: ELS timeout */
+#define WORKER_FDMI_TMO 0x4 /* vport: FDMI timeout */
+#define WORKER_DELAYED_DISC_TMO 0x8 /* vport: delayed discovery */
+
+#define WORKER_MBOX_TMO 0x100 /* hba: MBOX timeout */
+#define WORKER_HB_TMO 0x200 /* hba: Heart beat timeout */
+#define WORKER_FABRIC_BLOCK_TMO 0x400 /* hba: fabric block timeout */
+#define WORKER_RAMP_DOWN_QUEUE 0x800 /* hba: Decrease Q depth */
+#define WORKER_RAMP_UP_QUEUE 0x1000 /* hba: Increase Q depth */
+#define WORKER_SERVICE_TXQ 0x2000 /* hba: IOCBs on the txq */
+
+ struct timer_list fc_fdmitmo;
+ struct timer_list els_tmofunc;
+ struct timer_list delayed_disc_tmo;
+
+ int unreg_vpi_cmpl;
+
+ uint8_t load_flag;
+#define FC_LOADING 0x1 /* HBA in process of loading drvr */
+#define FC_UNLOADING 0x2 /* HBA in process of unloading drvr */
+ /* Vport Config Parameters */
+ uint32_t cfg_scan_down;
+ uint32_t cfg_lun_queue_depth;
+ uint32_t cfg_nodev_tmo;
+ uint32_t cfg_devloss_tmo;
+ uint32_t cfg_restrict_login;
+ uint32_t cfg_peer_port_login;
+ uint32_t cfg_fcp_class;
+ uint32_t cfg_use_adisc;
+ uint32_t cfg_fdmi_on;
+#define LPFC_FDMI_SUPPORT 1 /* bit 0 - FDMI supported? */
+#define LPFC_FDMI_REG_DELAY 2 /* bit 1 - 60 sec registration delay */
+#define LPFC_FDMI_ALL_ATTRIB 4 /* bit 2 - register ALL attributes? */
+ uint32_t cfg_discovery_threads;
+ uint32_t cfg_log_verbose;
+ uint32_t cfg_max_luns;
+ uint32_t cfg_enable_da_id;
+ uint32_t cfg_max_scsicmpl_time;
+ uint32_t cfg_tgt_queue_depth;
+ uint32_t cfg_first_burst_size;
+
+ uint32_t dev_loss_tmo_changed;
+
+ struct fc_vport *fc_vport;
+
+#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
+ struct dentry *debug_disc_trc;
+ struct dentry *debug_nodelist;
+ struct dentry *vport_debugfs_root;
+ struct lpfc_debugfs_trc *disc_trc;
+ atomic_t disc_trc_cnt;
+#endif
+ uint8_t stat_data_enabled;
+ uint8_t stat_data_blocked;
+ struct list_head rcv_buffer_list;
+ unsigned long rcv_buffer_time_stamp;
+ uint32_t vport_flag;
+#define STATIC_VPORT 1
+};
+
+struct hbq_s {
+ uint16_t entry_count; /* Current number of HBQ slots */
+ uint16_t buffer_count; /* Current number of buffers posted */
+ uint32_t next_hbqPutIdx; /* Index to next HBQ slot to use */
+ uint32_t hbqPutIdx; /* HBQ slot to use */
+ uint32_t local_hbqGetIdx; /* Local copy of Get index from Port */
+ void *hbq_virt; /* Virtual ptr to this hbq */
+ struct list_head hbq_buffer_list; /* buffers assigned to this HBQ */
+ /* Callback for HBQ buffer allocation */
+ struct hbq_dmabuf *(*hbq_alloc_buffer) (struct lpfc_hba *);
+ /* Callback for HBQ buffer free */
+ void (*hbq_free_buffer) (struct lpfc_hba *,
+ struct hbq_dmabuf *);
+};
+
+#define LPFC_MAX_HBQS 4
+/* this matches the position in the lpfc_hbq_defs array */
+#define LPFC_ELS_HBQ 0
+#define LPFC_EXTRA_HBQ 1
+
+enum hba_temp_state {
+ HBA_NORMAL_TEMP,
+ HBA_OVER_TEMP
+};
+
+enum intr_type_t {
+ NONE = 0,
+ INTx,
+ MSI,
+ MSIX,
+};
+
+#define LPFC_CT_CTX_MAX 64
+struct unsol_rcv_ct_ctx {
+ uint32_t ctxt_id;
+ uint32_t SID;
+ uint32_t valid;
+#define UNSOL_INVALID 0
+#define UNSOL_VALID 1
+ uint16_t oxid;
+ uint16_t rxid;
+};
+
+#define LPFC_USER_LINK_SPEED_AUTO 0 /* auto select (default)*/
+#define LPFC_USER_LINK_SPEED_1G 1 /* 1 Gigabaud */
+#define LPFC_USER_LINK_SPEED_2G 2 /* 2 Gigabaud */
+#define LPFC_USER_LINK_SPEED_4G 4 /* 4 Gigabaud */
+#define LPFC_USER_LINK_SPEED_8G 8 /* 8 Gigabaud */
+#define LPFC_USER_LINK_SPEED_10G 10 /* 10 Gigabaud */
+#define LPFC_USER_LINK_SPEED_16G 16 /* 16 Gigabaud */
+#define LPFC_USER_LINK_SPEED_MAX LPFC_USER_LINK_SPEED_16G
+#define LPFC_USER_LINK_SPEED_BITMAP ((1 << LPFC_USER_LINK_SPEED_16G) | \
+ (1 << LPFC_USER_LINK_SPEED_10G) | \
+ (1 << LPFC_USER_LINK_SPEED_8G) | \
+ (1 << LPFC_USER_LINK_SPEED_4G) | \
+ (1 << LPFC_USER_LINK_SPEED_2G) | \
+ (1 << LPFC_USER_LINK_SPEED_1G) | \
+ (1 << LPFC_USER_LINK_SPEED_AUTO))
+#define LPFC_LINK_SPEED_STRING "0, 1, 2, 4, 8, 10, 16"
+
+enum nemb_type {
+ nemb_mse = 1,
+ nemb_hbd
+};
+
+enum mbox_type {
+ mbox_rd = 1,
+ mbox_wr
+};
+
+enum dma_type {
+ dma_mbox = 1,
+ dma_ebuf
+};
+
+enum sta_type {
+ sta_pre_addr = 1,
+ sta_pos_addr
+};
+
+struct lpfc_mbox_ext_buf_ctx {
+ uint32_t state;
+#define LPFC_BSG_MBOX_IDLE 0
+#define LPFC_BSG_MBOX_HOST 1
+#define LPFC_BSG_MBOX_PORT 2
+#define LPFC_BSG_MBOX_DONE 3
+#define LPFC_BSG_MBOX_ABTS 4
+ enum nemb_type nembType;
+ enum mbox_type mboxType;
+ uint32_t numBuf;
+ uint32_t mbxTag;
+ uint32_t seqNum;
+ struct lpfc_dmabuf *mbx_dmabuf;
+ struct list_head ext_dmabuf_list;
+};
+
+struct lpfc_hba {
+ /* SCSI interface function jump table entries */
+ int (*lpfc_new_scsi_buf)
+ (struct lpfc_vport *, int);
+ struct lpfc_scsi_buf * (*lpfc_get_scsi_buf)
+ (struct lpfc_hba *, struct lpfc_nodelist *);
+ int (*lpfc_scsi_prep_dma_buf)
+ (struct lpfc_hba *, struct lpfc_scsi_buf *);
+ void (*lpfc_scsi_unprep_dma_buf)
+ (struct lpfc_hba *, struct lpfc_scsi_buf *);
+ void (*lpfc_release_scsi_buf)
+ (struct lpfc_hba *, struct lpfc_scsi_buf *);
+ void (*lpfc_rampdown_queue_depth)
+ (struct lpfc_hba *);
+ void (*lpfc_scsi_prep_cmnd)
+ (struct lpfc_vport *, struct lpfc_scsi_buf *,
+ struct lpfc_nodelist *);
+
+ /* IOCB interface function jump table entries */
+ int (*__lpfc_sli_issue_iocb)
+ (struct lpfc_hba *, uint32_t,
+ struct lpfc_iocbq *, uint32_t);
+ void (*__lpfc_sli_release_iocbq)(struct lpfc_hba *,
+ struct lpfc_iocbq *);
+ int (*lpfc_hba_down_post)(struct lpfc_hba *phba);
+ IOCB_t * (*lpfc_get_iocb_from_iocbq)
+ (struct lpfc_iocbq *);
+ void (*lpfc_scsi_cmd_iocb_cmpl)
+ (struct lpfc_hba *, struct lpfc_iocbq *, struct lpfc_iocbq *);
+
+ /* MBOX interface function jump table entries */
+ int (*lpfc_sli_issue_mbox)
+ (struct lpfc_hba *, LPFC_MBOXQ_t *, uint32_t);
+
+ /* Slow-path IOCB process function jump table entries */
+ void (*lpfc_sli_handle_slow_ring_event)
+ (struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
+ uint32_t mask);
+
+ /* INIT device interface function jump table entries */
+ int (*lpfc_sli_hbq_to_firmware)
+ (struct lpfc_hba *, uint32_t, struct hbq_dmabuf *);
+ int (*lpfc_sli_brdrestart)
+ (struct lpfc_hba *);
+ int (*lpfc_sli_brdready)
+ (struct lpfc_hba *, uint32_t);
+ void (*lpfc_handle_eratt)
+ (struct lpfc_hba *);
+ void (*lpfc_stop_port)
+ (struct lpfc_hba *);
+ int (*lpfc_hba_init_link)
+ (struct lpfc_hba *, uint32_t);
+ int (*lpfc_hba_down_link)
+ (struct lpfc_hba *, uint32_t);
+ int (*lpfc_selective_reset)
+ (struct lpfc_hba *);
+
+ int (*lpfc_bg_scsi_prep_dma_buf)
+ (struct lpfc_hba *, struct lpfc_scsi_buf *);
+ /* Add new entries here */
+
+ /* SLI4 specific HBA data structure */
+ struct lpfc_sli4_hba sli4_hba;
+
+ struct lpfc_sli sli;
+ uint8_t pci_dev_grp; /* lpfc PCI dev group: 0x0, 0x1, 0x2,... */
+ uint32_t sli_rev; /* SLI2, SLI3, or SLI4 */
+ uint32_t sli3_options; /* Mask of enabled SLI3 options */
+#define LPFC_SLI3_HBQ_ENABLED 0x01
+#define LPFC_SLI3_NPIV_ENABLED 0x02
+#define LPFC_SLI3_VPORT_TEARDOWN 0x04
+#define LPFC_SLI3_CRP_ENABLED 0x08
+#define LPFC_SLI3_BG_ENABLED 0x20
+#define LPFC_SLI3_DSS_ENABLED 0x40
+#define LPFC_SLI4_PERFH_ENABLED 0x80
+#define LPFC_SLI4_PHWQ_ENABLED 0x100
+ uint32_t iocb_cmd_size;
+ uint32_t iocb_rsp_size;
+
+ enum hba_state link_state;
+ uint32_t link_flag; /* link state flags */
+#define LS_LOOPBACK_MODE 0x1 /* NPort is in Loopback mode */
+ /* This flag is set while issuing */
+ /* INIT_LINK mailbox command */
+#define LS_NPIV_FAB_SUPPORTED 0x2 /* Fabric supports NPIV */
+#define LS_IGNORE_ERATT 0x4 /* intr handler should ignore ERATT */
+
+ uint32_t hba_flag; /* hba generic flags */
+#define HBA_ERATT_HANDLED 0x1 /* This flag is set when eratt handled */
+#define DEFER_ERATT 0x2 /* Deferred error attention in progress */
+#define HBA_FCOE_MODE 0x4 /* HBA function in FCoE Mode */
+#define HBA_SP_QUEUE_EVT 0x8 /* Slow-path qevt posted to worker thread*/
+#define HBA_POST_RECEIVE_BUFFER 0x10 /* Rcv buffers need to be posted */
+#define FCP_XRI_ABORT_EVENT 0x20
+#define ELS_XRI_ABORT_EVENT 0x40
+#define ASYNC_EVENT 0x80
+#define LINK_DISABLED 0x100 /* Link disabled by user */
+#define FCF_TS_INPROG 0x200 /* FCF table scan in progress */
+#define FCF_RR_INPROG 0x400 /* FCF roundrobin flogi in progress */
+#define HBA_FIP_SUPPORT 0x800 /* FIP support in HBA */
+#define HBA_AER_ENABLED 0x1000 /* AER enabled with HBA */
+#define HBA_DEVLOSS_TMO 0x2000 /* HBA in devloss timeout */
+#define HBA_RRQ_ACTIVE 0x4000 /* process the rrq active list */
+#define HBA_FCP_IOQ_FLUSH 0x8000 /* FCP I/O queues being flushed */
+#define HBA_FW_DUMP_OP 0x10000 /* Skips fn reset before FW dump */
+ uint32_t fcp_ring_in_use; /* When polling test if intr-hndlr active*/
+ struct lpfc_dmabuf slim2p;
+
+ MAILBOX_t *mbox;
+ uint32_t *mbox_ext;
+ struct lpfc_mbox_ext_buf_ctx mbox_ext_buf_ctx;
+ uint32_t ha_copy;
+ struct _PCB *pcb;
+ struct _IOCB *IOCBs;
+
+ struct lpfc_dmabuf hbqslimp;
+
+ uint16_t pci_cfg_value;
+
+ uint8_t fc_linkspeed; /* Link speed after last READ_LA */
+
+ uint32_t fc_eventTag; /* event tag for link attention */
+ uint32_t link_events;
+
+ /* These fields used to be binfo */
+ uint32_t fc_pref_DID; /* preferred D_ID */
+ uint8_t fc_pref_ALPA; /* preferred AL_PA */
+ uint32_t fc_edtovResol; /* E_D_TOV timer resolution */
+ uint32_t fc_edtov; /* E_D_TOV timer value */
+ uint32_t fc_arbtov; /* ARB_TOV timer value */
+ uint32_t fc_ratov; /* R_A_TOV timer value */
+ uint32_t fc_rttov; /* R_T_TOV timer value */
+ uint32_t fc_altov; /* AL_TOV timer value */
+ uint32_t fc_crtov; /* C_R_TOV timer value */
+ uint32_t fc_citov; /* C_I_TOV timer value */
+
+ struct serv_parm fc_fabparam; /* fabric service parameters buffer */
+ uint8_t alpa_map[128]; /* AL_PA map from READ_LA */
+
+ uint32_t lmt;
+
+ uint32_t fc_topology; /* link topology, from LINK INIT */
+ uint32_t fc_topology_changed; /* link topology, from LINK INIT */
+
+ struct lpfc_stats fc_stat;
+
+ struct lpfc_nodelist fc_fcpnodev; /* nodelist entry for no device */
+ uint32_t nport_event_cnt; /* timestamp for nlplist entry */
+
+ uint8_t wwnn[8];
+ uint8_t wwpn[8];
+ uint32_t RandomData[7];
+
+ /* HBA Config Parameters */
+ uint32_t cfg_ack0;
+ uint32_t cfg_enable_npiv;
+ uint32_t cfg_enable_rrq;
+ uint32_t cfg_topology;
+ uint32_t cfg_link_speed;
+#define LPFC_FCF_FOV 1 /* Fast fcf failover */
+#define LPFC_FCF_PRIORITY 2 /* Priority fcf failover */
+ uint32_t cfg_fcf_failover_policy;
+ uint32_t cfg_fcp_io_sched;
+ uint32_t cfg_fcp2_no_tgt_reset;
+ uint32_t cfg_cr_delay;
+ uint32_t cfg_cr_count;
+ uint32_t cfg_multi_ring_support;
+ uint32_t cfg_multi_ring_rctl;
+ uint32_t cfg_multi_ring_type;
+ uint32_t cfg_poll;
+ uint32_t cfg_poll_tmo;
+ uint32_t cfg_task_mgmt_tmo;
+ uint32_t cfg_use_msi;
+ uint32_t cfg_fcp_imax;
+ uint32_t cfg_fcp_cpu_map;
+ uint32_t cfg_fcp_io_channel;
+ uint32_t cfg_total_seg_cnt;
+ uint32_t cfg_sg_seg_cnt;
+ uint32_t cfg_prot_sg_seg_cnt;
+ uint32_t cfg_sg_dma_buf_size;
+ uint64_t cfg_soft_wwnn;
+ uint64_t cfg_soft_wwpn;
+ uint32_t cfg_hba_queue_depth;
+ uint32_t cfg_enable_hba_reset;
+ uint32_t cfg_enable_hba_heartbeat;
+ uint32_t cfg_fof;
+ uint32_t cfg_EnableXLane;
+ uint8_t cfg_oas_tgt_wwpn[8];
+ uint8_t cfg_oas_vpt_wwpn[8];
+ uint32_t cfg_oas_lun_state;
+#define OAS_LUN_ENABLE 1
+#define OAS_LUN_DISABLE 0
+ uint32_t cfg_oas_lun_status;
+#define OAS_LUN_STATUS_EXISTS 0x01
+ uint32_t cfg_oas_flags;
+#define OAS_FIND_ANY_VPORT 0x01
+#define OAS_FIND_ANY_TARGET 0x02
+#define OAS_LUN_VALID 0x04
+ uint32_t cfg_XLanePriority;
+ uint32_t cfg_enable_bg;
+ uint32_t cfg_hostmem_hgp;
+ uint32_t cfg_log_verbose;
+ uint32_t cfg_aer_support;
+ uint32_t cfg_sriov_nr_virtfn;
+ uint32_t cfg_request_firmware_upgrade;
+ uint32_t cfg_iocb_cnt;
+ uint32_t cfg_suppress_link_up;
+ uint32_t cfg_rrq_xri_bitmap_sz;
+#define LPFC_INITIALIZE_LINK 0 /* do normal init_link mbox */
+#define LPFC_DELAY_INIT_LINK 1 /* layered driver hold off */
+#define LPFC_DELAY_INIT_LINK_INDEFINITELY 2 /* wait, manual intervention */
+ uint32_t cfg_enable_dss;
+ lpfc_vpd_t vpd; /* vital product data */
+
+ struct pci_dev *pcidev;
+ struct list_head work_list;
+ uint32_t work_ha; /* Host Attention Bits for WT */
+ uint32_t work_ha_mask; /* HA Bits owned by WT */
+ uint32_t work_hs; /* HS stored in case of ERRAT */
+ uint32_t work_status[2]; /* Extra status from SLIM */
+
+ wait_queue_head_t work_waitq;
+ struct task_struct *worker_thread;
+ unsigned long data_flags;
+
+ uint32_t hbq_in_use; /* HBQs in use flag */
+ struct list_head rb_pend_list; /* Received buffers to be processed */
+ uint32_t hbq_count; /* Count of configured HBQs */
+ struct hbq_s hbqs[LPFC_MAX_HBQS]; /* local copy of hbq indicies */
+
+ atomic_t fcp_qidx; /* next work queue to post work to */
+
+ unsigned long pci_bar0_map; /* Physical address for PCI BAR0 */
+ unsigned long pci_bar1_map; /* Physical address for PCI BAR1 */
+ unsigned long pci_bar2_map; /* Physical address for PCI BAR2 */
+ void __iomem *slim_memmap_p; /* Kernel memory mapped address for
+ PCI BAR0 */
+ void __iomem *ctrl_regs_memmap_p;/* Kernel memory mapped address for
+ PCI BAR2 */
+
+ void __iomem *pci_bar0_memmap_p; /* Kernel memory mapped address for
+ PCI BAR0 with dual-ULP support */
+ void __iomem *pci_bar2_memmap_p; /* Kernel memory mapped address for
+ PCI BAR2 with dual-ULP support */
+ void __iomem *pci_bar4_memmap_p; /* Kernel memory mapped address for
+ PCI BAR4 with dual-ULP support */
+#define PCI_64BIT_BAR0 0
+#define PCI_64BIT_BAR2 2
+#define PCI_64BIT_BAR4 4
+ void __iomem *MBslimaddr; /* virtual address for mbox cmds */
+ void __iomem *HAregaddr; /* virtual address for host attn reg */
+ void __iomem *CAregaddr; /* virtual address for chip attn reg */
+ void __iomem *HSregaddr; /* virtual address for host status
+ reg */
+ void __iomem *HCregaddr; /* virtual address for host ctl reg */
+
+ struct lpfc_hgp __iomem *host_gp; /* Host side get/put pointers */
+ struct lpfc_pgp *port_gp;
+ uint32_t __iomem *hbq_put; /* Address in SLIM to HBQ put ptrs */
+ uint32_t *hbq_get; /* Host mem address of HBQ get ptrs */
+
+ int brd_no; /* FC board number */
+ char SerialNumber[32]; /* adapter Serial Number */
+ char OptionROMVersion[32]; /* adapter BIOS / Fcode version */
+ char ModelDesc[256]; /* Model Description */
+ char ModelName[80]; /* Model Name */
+ char ProgramType[256]; /* Program Type */
+ char Port[20]; /* Port No */
+ uint8_t vpd_flag; /* VPD data flag */
+
+#define VPD_MODEL_DESC 0x1 /* valid vpd model description */
+#define VPD_MODEL_NAME 0x2 /* valid vpd model name */
+#define VPD_PROGRAM_TYPE 0x4 /* valid vpd program type */
+#define VPD_PORT 0x8 /* valid vpd port data */
+#define VPD_MASK 0xf /* mask for any vpd data */
+
+ uint8_t soft_wwn_enable;
+
+ struct timer_list fcp_poll_timer;
+ struct timer_list eratt_poll;
+
+ /*
+ * stat counters
+ */
+ uint64_t fc4InputRequests;
+ uint64_t fc4OutputRequests;
+ uint64_t fc4ControlRequests;
+ uint64_t bg_guard_err_cnt;
+ uint64_t bg_apptag_err_cnt;
+ uint64_t bg_reftag_err_cnt;
+
+ /* fastpath list. */
+ spinlock_t scsi_buf_list_get_lock; /* SCSI buf alloc list lock */
+ spinlock_t scsi_buf_list_put_lock; /* SCSI buf free list lock */
+ struct list_head lpfc_scsi_buf_list_get;
+ struct list_head lpfc_scsi_buf_list_put;
+ uint32_t total_scsi_bufs;
+ struct list_head lpfc_iocb_list;
+ uint32_t total_iocbq_bufs;
+ struct list_head active_rrq_list;
+ spinlock_t hbalock;
+
+ /* pci_mem_pools */
+ struct pci_pool *lpfc_scsi_dma_buf_pool;
+ struct pci_pool *lpfc_mbuf_pool;
+ struct pci_pool *lpfc_hrb_pool; /* header receive buffer pool */
+ struct pci_pool *lpfc_drb_pool; /* data receive buffer pool */
+ struct pci_pool *lpfc_hbq_pool; /* SLI3 hbq buffer pool */
+ struct lpfc_dma_pool lpfc_mbuf_safety_pool;
+
+ mempool_t *mbox_mem_pool;
+ mempool_t *nlp_mem_pool;
+ mempool_t *rrq_pool;
+ mempool_t *active_rrq_pool;
+
+ struct fc_host_statistics link_stats;
+ enum intr_type_t intr_type;
+ uint32_t intr_mode;
+#define LPFC_INTR_ERROR 0xFFFFFFFF
+ struct msix_entry msix_entries[LPFC_MSIX_VECTORS];
+
+ struct list_head port_list;
+ struct lpfc_vport *pport; /* physical lpfc_vport pointer */
+ uint16_t max_vpi; /* Maximum virtual nports */
+#define LPFC_MAX_VPI 0xFFFF /* Max number of VPI supported */
+ uint16_t max_vports; /*
+ * For IOV HBAs max_vpi can change
+ * after a reset. max_vports is max
+ * number of vports present. This can
+ * be greater than max_vpi.
+ */
+ uint16_t vpi_base;
+ uint16_t vfi_base;
+ unsigned long *vpi_bmask; /* vpi allocation table */
+ uint16_t *vpi_ids;
+ uint16_t vpi_count;
+ struct list_head lpfc_vpi_blk_list;
+
+ /* Data structure used by fabric iocb scheduler */
+ struct list_head fabric_iocb_list;
+ atomic_t fabric_iocb_count;
+ struct timer_list fabric_block_timer;
+ unsigned long bit_flags;
+#define FABRIC_COMANDS_BLOCKED 0
+ atomic_t num_rsrc_err;
+ atomic_t num_cmd_success;
+ unsigned long last_rsrc_error_time;
+ unsigned long last_ramp_down_time;
+#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
+ struct dentry *hba_debugfs_root;
+ atomic_t debugfs_vport_count;
+ struct dentry *debug_hbqinfo;
+ struct dentry *debug_dumpHostSlim;
+ struct dentry *debug_dumpHBASlim;
+ struct dentry *debug_dumpData; /* BlockGuard BPL */
+ struct dentry *debug_dumpDif; /* BlockGuard BPL */
+ struct dentry *debug_InjErrLBA; /* LBA to inject errors at */
+ struct dentry *debug_InjErrNPortID; /* NPortID to inject errors at */
+ struct dentry *debug_InjErrWWPN; /* WWPN to inject errors at */
+ struct dentry *debug_writeGuard; /* inject write guard_tag errors */
+ struct dentry *debug_writeApp; /* inject write app_tag errors */
+ struct dentry *debug_writeRef; /* inject write ref_tag errors */
+ struct dentry *debug_readGuard; /* inject read guard_tag errors */
+ struct dentry *debug_readApp; /* inject read app_tag errors */
+ struct dentry *debug_readRef; /* inject read ref_tag errors */
+
+ /* T10 DIF error injection */
+ uint32_t lpfc_injerr_wgrd_cnt;
+ uint32_t lpfc_injerr_wapp_cnt;
+ uint32_t lpfc_injerr_wref_cnt;
+ uint32_t lpfc_injerr_rgrd_cnt;
+ uint32_t lpfc_injerr_rapp_cnt;
+ uint32_t lpfc_injerr_rref_cnt;
+ uint32_t lpfc_injerr_nportid;
+ struct lpfc_name lpfc_injerr_wwpn;
+ sector_t lpfc_injerr_lba;
+#define LPFC_INJERR_LBA_OFF (sector_t)(-1)
+
+ struct dentry *debug_slow_ring_trc;
+ struct lpfc_debugfs_trc *slow_ring_trc;
+ atomic_t slow_ring_trc_cnt;
+ /* iDiag debugfs sub-directory */
+ struct dentry *idiag_root;
+ struct dentry *idiag_pci_cfg;
+ struct dentry *idiag_bar_acc;
+ struct dentry *idiag_que_info;
+ struct dentry *idiag_que_acc;
+ struct dentry *idiag_drb_acc;
+ struct dentry *idiag_ctl_acc;
+ struct dentry *idiag_mbx_acc;
+ struct dentry *idiag_ext_acc;
+#endif
+
+ /* Used for deferred freeing of ELS data buffers */
+ struct list_head elsbuf;
+ int elsbuf_cnt;
+ int elsbuf_prev_cnt;
+
+ uint8_t temp_sensor_support;
+ /* Fields used for heart beat. */
+ unsigned long last_completion_time;
+ unsigned long skipped_hb;
+ struct timer_list hb_tmofunc;
+ uint8_t hb_outstanding;
+ struct timer_list rrq_tmr;
+ enum hba_temp_state over_temp_state;
+ /* ndlp reference management */
+ spinlock_t ndlp_lock;
+ /*
+ * Following bit will be set for all buffer tags which are not
+ * associated with any HBQ.
+ */
+#define QUE_BUFTAG_BIT (1<<31)
+ uint32_t buffer_tag_count;
+ int wait_4_mlo_maint_flg;
+ wait_queue_head_t wait_4_mlo_m_q;
+ /* data structure used for latency data collection */
+#define LPFC_NO_BUCKET 0
+#define LPFC_LINEAR_BUCKET 1
+#define LPFC_POWER2_BUCKET 2
+ uint8_t bucket_type;
+ uint32_t bucket_base;
+ uint32_t bucket_step;
+
+/* Maximum number of events that can be outstanding at any time*/
+#define LPFC_MAX_EVT_COUNT 512
+ atomic_t fast_event_count;
+ uint32_t fcoe_eventtag;
+ uint32_t fcoe_eventtag_at_fcf_scan;
+ uint32_t fcoe_cvl_eventtag;
+ uint32_t fcoe_cvl_eventtag_attn;
+ struct lpfc_fcf fcf;
+ uint8_t fc_map[3];
+ uint8_t valid_vlan;
+ uint16_t vlan_id;
+ struct list_head fcf_conn_rec_list;
+
+ spinlock_t ct_ev_lock; /* synchronize access to ct_ev_waiters */
+ struct list_head ct_ev_waiters;
+ struct unsol_rcv_ct_ctx ct_ctx[LPFC_CT_CTX_MAX];
+ uint32_t ctx_idx;
+
+ uint8_t menlo_flag; /* menlo generic flags */
+#define HBA_MENLO_SUPPORT 0x1 /* HBA supports menlo commands */
+ uint32_t iocb_cnt;
+ uint32_t iocb_max;
+ atomic_t sdev_cnt;
+ uint8_t fips_spec_rev;
+ uint8_t fips_level;
+ spinlock_t devicelock; /* lock for luns list */
+ mempool_t *device_data_mem_pool;
+ struct list_head luns;
+};
+
+static inline struct Scsi_Host *
+lpfc_shost_from_vport(struct lpfc_vport *vport)
+{
+ return container_of((void *) vport, struct Scsi_Host, hostdata[0]);
+}
+
+static inline void
+lpfc_set_loopback_flag(struct lpfc_hba *phba)
+{
+ if (phba->cfg_topology == FLAGS_LOCAL_LB)
+ phba->link_flag |= LS_LOOPBACK_MODE;
+ else
+ phba->link_flag &= ~LS_LOOPBACK_MODE;
+}
+
+static inline int
+lpfc_is_link_up(struct lpfc_hba *phba)
+{
+ return phba->link_state == LPFC_LINK_UP ||
+ phba->link_state == LPFC_CLEAR_LA ||
+ phba->link_state == LPFC_HBA_READY;
+}
+
+static inline void
+lpfc_worker_wake_up(struct lpfc_hba *phba)
+{
+ /* Set the lpfc data pending flag */
+ set_bit(LPFC_DATA_READY, &phba->data_flags);
+
+ /* Wake up worker thread */
+ wake_up(&phba->work_waitq);
+ return;
+}
+
+static inline int
+lpfc_readl(void __iomem *addr, uint32_t *data)
+{
+ uint32_t temp;
+ temp = readl(addr);
+ if (temp == 0xffffffff)
+ return -EIO;
+ *data = temp;
+ return 0;
+}
+
+static inline int
+lpfc_sli_read_hs(struct lpfc_hba *phba)
+{
+ /*
+ * There was a link/board error. Read the status register to retrieve
+ * the error event and process it.
+ */
+ phba->sli.slistat.err_attn_event++;
+
+ /* Save status info and check for unplug error */
+ if (lpfc_readl(phba->HSregaddr, &phba->work_hs) ||
+ lpfc_readl(phba->MBslimaddr + 0xa8, &phba->work_status[0]) ||
+ lpfc_readl(phba->MBslimaddr + 0xac, &phba->work_status[1])) {
+ return -EIO;
+ }
+
+ /* Clear chip Host Attention error bit */
+ writel(HA_ERATT, phba->HAregaddr);
+ readl(phba->HAregaddr); /* flush */
+ phba->pport->stopped = 1;
+
+ return 0;
+}
diff --git a/drivers/scsi/lpfc/lpfc_attr.c b/drivers/scsi/lpfc/lpfc_attr.c
new file mode 100644
index 000000000..d65bd178d
--- /dev/null
+++ b/drivers/scsi/lpfc/lpfc_attr.c
@@ -0,0 +1,5885 @@
+/*******************************************************************
+ * This file is part of the Emulex Linux Device Driver for *
+ * Fibre Channel Host Bus Adapters. *
+ * Copyright (C) 2004-2015 Emulex. All rights reserved. *
+ * EMULEX and SLI are trademarks of Emulex. *
+ * www.emulex.com *
+ * Portions Copyright (C) 2004-2005 Christoph Hellwig *
+ * *
+ * This program is free software; you can redistribute it and/or *
+ * modify it under the terms of version 2 of the GNU General *
+ * Public License as published by the Free Software Foundation. *
+ * This program is distributed in the hope that it will be useful. *
+ * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND *
+ * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY, *
+ * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE *
+ * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD *
+ * TO BE LEGALLY INVALID. See the GNU General Public License for *
+ * more details, a copy of which can be found in the file COPYING *
+ * included with this package. *
+ *******************************************************************/
+
+#include <linux/ctype.h>
+#include <linux/delay.h>
+#include <linux/pci.h>
+#include <linux/interrupt.h>
+#include <linux/module.h>
+#include <linux/aer.h>
+#include <linux/gfp.h>
+#include <linux/kernel.h>
+
+#include <scsi/scsi.h>
+#include <scsi/scsi_device.h>
+#include <scsi/scsi_host.h>
+#include <scsi/scsi_tcq.h>
+#include <scsi/scsi_transport_fc.h>
+#include <scsi/fc/fc_fs.h>
+
+#include "lpfc_hw4.h"
+#include "lpfc_hw.h"
+#include "lpfc_sli.h"
+#include "lpfc_sli4.h"
+#include "lpfc_nl.h"
+#include "lpfc_disc.h"
+#include "lpfc_scsi.h"
+#include "lpfc.h"
+#include "lpfc_logmsg.h"
+#include "lpfc_version.h"
+#include "lpfc_compat.h"
+#include "lpfc_crtn.h"
+#include "lpfc_vport.h"
+
+#define LPFC_DEF_DEVLOSS_TMO 30
+#define LPFC_MIN_DEVLOSS_TMO 1
+#define LPFC_MAX_DEVLOSS_TMO 255
+
+/*
+ * Write key size should be multiple of 4. If write key is changed
+ * make sure that library write key is also changed.
+ */
+#define LPFC_REG_WRITE_KEY_SIZE 4
+#define LPFC_REG_WRITE_KEY "EMLX"
+
+/**
+ * lpfc_jedec_to_ascii - Hex to ascii convertor according to JEDEC rules
+ * @incr: integer to convert.
+ * @hdw: ascii string holding converted integer plus a string terminator.
+ *
+ * Description:
+ * JEDEC Joint Electron Device Engineering Council.
+ * Convert a 32 bit integer composed of 8 nibbles into an 8 byte ascii
+ * character string. The string is then terminated with a NULL in byte 9.
+ * Hex 0-9 becomes ascii '0' to '9'.
+ * Hex a-f becomes ascii '=' to 'B' capital B.
+ *
+ * Notes:
+ * Coded for 32 bit integers only.
+ **/
+static void
+lpfc_jedec_to_ascii(int incr, char hdw[])
+{
+ int i, j;
+ for (i = 0; i < 8; i++) {
+ j = (incr & 0xf);
+ if (j <= 9)
+ hdw[7 - i] = 0x30 + j;
+ else
+ hdw[7 - i] = 0x61 + j - 10;
+ incr = (incr >> 4);
+ }
+ hdw[8] = 0;
+ return;
+}
+
+/**
+ * lpfc_drvr_version_show - Return the Emulex driver string with version number
+ * @dev: class unused variable.
+ * @attr: device attribute, not used.
+ * @buf: on return contains the module description text.
+ *
+ * Returns: size of formatted string.
+ **/
+static ssize_t
+lpfc_drvr_version_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ return snprintf(buf, PAGE_SIZE, LPFC_MODULE_DESC "\n");
+}
+
+/**
+ * lpfc_enable_fip_show - Return the fip mode of the HBA
+ * @dev: class unused variable.
+ * @attr: device attribute, not used.
+ * @buf: on return contains the module description text.
+ *
+ * Returns: size of formatted string.
+ **/
+static ssize_t
+lpfc_enable_fip_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct Scsi_Host *shost = class_to_shost(dev);
+ struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
+ struct lpfc_hba *phba = vport->phba;
+
+ if (phba->hba_flag & HBA_FIP_SUPPORT)
+ return snprintf(buf, PAGE_SIZE, "1\n");
+ else
+ return snprintf(buf, PAGE_SIZE, "0\n");
+}
+
+static ssize_t
+lpfc_bg_info_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct Scsi_Host *shost = class_to_shost(dev);
+ struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
+ struct lpfc_hba *phba = vport->phba;
+
+ if (phba->cfg_enable_bg)
+ if (phba->sli3_options & LPFC_SLI3_BG_ENABLED)
+ return snprintf(buf, PAGE_SIZE, "BlockGuard Enabled\n");
+ else
+ return snprintf(buf, PAGE_SIZE,
+ "BlockGuard Not Supported\n");
+ else
+ return snprintf(buf, PAGE_SIZE,
+ "BlockGuard Disabled\n");
+}
+
+static ssize_t
+lpfc_bg_guard_err_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct Scsi_Host *shost = class_to_shost(dev);
+ struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
+ struct lpfc_hba *phba = vport->phba;
+
+ return snprintf(buf, PAGE_SIZE, "%llu\n",
+ (unsigned long long)phba->bg_guard_err_cnt);
+}
+
+static ssize_t
+lpfc_bg_apptag_err_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct Scsi_Host *shost = class_to_shost(dev);
+ struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
+ struct lpfc_hba *phba = vport->phba;
+
+ return snprintf(buf, PAGE_SIZE, "%llu\n",
+ (unsigned long long)phba->bg_apptag_err_cnt);
+}
+
+static ssize_t
+lpfc_bg_reftag_err_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct Scsi_Host *shost = class_to_shost(dev);
+ struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
+ struct lpfc_hba *phba = vport->phba;
+
+ return snprintf(buf, PAGE_SIZE, "%llu\n",
+ (unsigned long long)phba->bg_reftag_err_cnt);
+}
+
+/**
+ * lpfc_info_show - Return some pci info about the host in ascii
+ * @dev: class converted to a Scsi_host structure.
+ * @attr: device attribute, not used.
+ * @buf: on return contains the formatted text from lpfc_info().
+ *
+ * Returns: size of formatted string.
+ **/
+static ssize_t
+lpfc_info_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct Scsi_Host *host = class_to_shost(dev);
+
+ return snprintf(buf, PAGE_SIZE, "%s\n",lpfc_info(host));
+}
+
+/**
+ * lpfc_serialnum_show - Return the hba serial number in ascii
+ * @dev: class converted to a Scsi_host structure.
+ * @attr: device attribute, not used.
+ * @buf: on return contains the formatted text serial number.
+ *
+ * Returns: size of formatted string.
+ **/
+static ssize_t
+lpfc_serialnum_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct Scsi_Host *shost = class_to_shost(dev);
+ struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
+ struct lpfc_hba *phba = vport->phba;
+
+ return snprintf(buf, PAGE_SIZE, "%s\n",phba->SerialNumber);
+}
+
+/**
+ * lpfc_temp_sensor_show - Return the temperature sensor level
+ * @dev: class converted to a Scsi_host structure.
+ * @attr: device attribute, not used.
+ * @buf: on return contains the formatted support level.
+ *
+ * Description:
+ * Returns a number indicating the temperature sensor level currently
+ * supported, zero or one in ascii.
+ *
+ * Returns: size of formatted string.
+ **/
+static ssize_t
+lpfc_temp_sensor_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct Scsi_Host *shost = class_to_shost(dev);
+ struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
+ struct lpfc_hba *phba = vport->phba;
+ return snprintf(buf, PAGE_SIZE, "%d\n",phba->temp_sensor_support);
+}
+
+/**
+ * lpfc_modeldesc_show - Return the model description of the hba
+ * @dev: class converted to a Scsi_host structure.
+ * @attr: device attribute, not used.
+ * @buf: on return contains the scsi vpd model description.
+ *
+ * Returns: size of formatted string.
+ **/
+static ssize_t
+lpfc_modeldesc_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct Scsi_Host *shost = class_to_shost(dev);
+ struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
+ struct lpfc_hba *phba = vport->phba;
+
+ return snprintf(buf, PAGE_SIZE, "%s\n",phba->ModelDesc);
+}
+
+/**
+ * lpfc_modelname_show - Return the model name of the hba
+ * @dev: class converted to a Scsi_host structure.
+ * @attr: device attribute, not used.
+ * @buf: on return contains the scsi vpd model name.
+ *
+ * Returns: size of formatted string.
+ **/
+static ssize_t
+lpfc_modelname_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct Scsi_Host *shost = class_to_shost(dev);
+ struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
+ struct lpfc_hba *phba = vport->phba;
+
+ return snprintf(buf, PAGE_SIZE, "%s\n",phba->ModelName);
+}
+
+/**
+ * lpfc_programtype_show - Return the program type of the hba
+ * @dev: class converted to a Scsi_host structure.
+ * @attr: device attribute, not used.
+ * @buf: on return contains the scsi vpd program type.
+ *
+ * Returns: size of formatted string.
+ **/
+static ssize_t
+lpfc_programtype_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct Scsi_Host *shost = class_to_shost(dev);
+ struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
+ struct lpfc_hba *phba = vport->phba;
+
+ return snprintf(buf, PAGE_SIZE, "%s\n",phba->ProgramType);
+}
+
+/**
+ * lpfc_mlomgmt_show - Return the Menlo Maintenance sli flag
+ * @dev: class converted to a Scsi_host structure.
+ * @attr: device attribute, not used.
+ * @buf: on return contains the Menlo Maintenance sli flag.
+ *
+ * Returns: size of formatted string.
+ **/
+static ssize_t
+lpfc_mlomgmt_show(struct device *dev, struct device_attribute *attr, char *buf)
+{
+ struct Scsi_Host *shost = class_to_shost(dev);
+ struct lpfc_vport *vport = (struct lpfc_vport *)shost->hostdata;
+ struct lpfc_hba *phba = vport->phba;
+
+ return snprintf(buf, PAGE_SIZE, "%d\n",
+ (phba->sli.sli_flag & LPFC_MENLO_MAINT));
+}
+
+/**
+ * lpfc_vportnum_show - Return the port number in ascii of the hba
+ * @dev: class converted to a Scsi_host structure.
+ * @attr: device attribute, not used.
+ * @buf: on return contains scsi vpd program type.
+ *
+ * Returns: size of formatted string.
+ **/
+static ssize_t
+lpfc_vportnum_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct Scsi_Host *shost = class_to_shost(dev);
+ struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
+ struct lpfc_hba *phba = vport->phba;
+
+ return snprintf(buf, PAGE_SIZE, "%s\n",phba->Port);
+}
+
+/**
+ * lpfc_fwrev_show - Return the firmware rev running in the hba
+ * @dev: class converted to a Scsi_host structure.
+ * @attr: device attribute, not used.
+ * @buf: on return contains the scsi vpd program type.
+ *
+ * Returns: size of formatted string.
+ **/
+static ssize_t
+lpfc_fwrev_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct Scsi_Host *shost = class_to_shost(dev);
+ struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
+ struct lpfc_hba *phba = vport->phba;
+ uint32_t if_type;
+ uint8_t sli_family;
+ char fwrev[FW_REV_STR_SIZE];
+ int len;
+
+ lpfc_decode_firmware_rev(phba, fwrev, 1);
+ if_type = phba->sli4_hba.pc_sli4_params.if_type;
+ sli_family = phba->sli4_hba.pc_sli4_params.sli_family;
+
+ if (phba->sli_rev < LPFC_SLI_REV4)
+ len = snprintf(buf, PAGE_SIZE, "%s, sli-%d\n",
+ fwrev, phba->sli_rev);
+ else
+ len = snprintf(buf, PAGE_SIZE, "%s, sli-%d:%d:%x\n",
+ fwrev, phba->sli_rev, if_type, sli_family);
+
+ return len;
+}
+
+/**
+ * lpfc_hdw_show - Return the jedec information about the hba
+ * @dev: class converted to a Scsi_host structure.
+ * @attr: device attribute, not used.
+ * @buf: on return contains the scsi vpd program type.
+ *
+ * Returns: size of formatted string.
+ **/
+static ssize_t
+lpfc_hdw_show(struct device *dev, struct device_attribute *attr, char *buf)
+{
+ char hdw[9];
+ struct Scsi_Host *shost = class_to_shost(dev);
+ struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
+ struct lpfc_hba *phba = vport->phba;
+ lpfc_vpd_t *vp = &phba->vpd;
+
+ lpfc_jedec_to_ascii(vp->rev.biuRev, hdw);
+ return snprintf(buf, PAGE_SIZE, "%s\n", hdw);
+}
+
+/**
+ * lpfc_option_rom_version_show - Return the adapter ROM FCode version
+ * @dev: class converted to a Scsi_host structure.
+ * @attr: device attribute, not used.
+ * @buf: on return contains the ROM and FCode ascii strings.
+ *
+ * Returns: size of formatted string.
+ **/
+static ssize_t
+lpfc_option_rom_version_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct Scsi_Host *shost = class_to_shost(dev);
+ struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
+ struct lpfc_hba *phba = vport->phba;
+ char fwrev[FW_REV_STR_SIZE];
+
+ if (phba->sli_rev < LPFC_SLI_REV4)
+ return snprintf(buf, PAGE_SIZE, "%s\n", phba->OptionROMVersion);
+
+ lpfc_decode_firmware_rev(phba, fwrev, 1);
+ return snprintf(buf, PAGE_SIZE, "%s\n", fwrev);
+}
+
+/**
+ * lpfc_state_show - Return the link state of the port
+ * @dev: class converted to a Scsi_host structure.
+ * @attr: device attribute, not used.
+ * @buf: on return contains text describing the state of the link.
+ *
+ * Notes:
+ * The switch statement has no default so zero will be returned.
+ *
+ * Returns: size of formatted string.
+ **/
+static ssize_t
+lpfc_link_state_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct Scsi_Host *shost = class_to_shost(dev);
+ struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
+ struct lpfc_hba *phba = vport->phba;
+ int len = 0;
+
+ switch (phba->link_state) {
+ case LPFC_LINK_UNKNOWN:
+ case LPFC_WARM_START:
+ case LPFC_INIT_START:
+ case LPFC_INIT_MBX_CMDS:
+ case LPFC_LINK_DOWN:
+ case LPFC_HBA_ERROR:
+ if (phba->hba_flag & LINK_DISABLED)
+ len += snprintf(buf + len, PAGE_SIZE-len,
+ "Link Down - User disabled\n");
+ else
+ len += snprintf(buf + len, PAGE_SIZE-len,
+ "Link Down\n");
+ break;
+ case LPFC_LINK_UP:
+ case LPFC_CLEAR_LA:
+ case LPFC_HBA_READY:
+ len += snprintf(buf + len, PAGE_SIZE-len, "Link Up - ");
+
+ switch (vport->port_state) {
+ case LPFC_LOCAL_CFG_LINK:
+ len += snprintf(buf + len, PAGE_SIZE-len,
+ "Configuring Link\n");
+ break;
+ case LPFC_FDISC:
+ case LPFC_FLOGI:
+ case LPFC_FABRIC_CFG_LINK:
+ case LPFC_NS_REG:
+ case LPFC_NS_QRY:
+ case LPFC_BUILD_DISC_LIST:
+ case LPFC_DISC_AUTH:
+ len += snprintf(buf + len, PAGE_SIZE - len,
+ "Discovery\n");
+ break;
+ case LPFC_VPORT_READY:
+ len += snprintf(buf + len, PAGE_SIZE - len, "Ready\n");
+ break;
+
+ case LPFC_VPORT_FAILED:
+ len += snprintf(buf + len, PAGE_SIZE - len, "Failed\n");
+ break;
+
+ case LPFC_VPORT_UNKNOWN:
+ len += snprintf(buf + len, PAGE_SIZE - len,
+ "Unknown\n");
+ break;
+ }
+ if (phba->sli.sli_flag & LPFC_MENLO_MAINT)
+ len += snprintf(buf + len, PAGE_SIZE-len,
+ " Menlo Maint Mode\n");
+ else if (phba->fc_topology == LPFC_TOPOLOGY_LOOP) {
+ if (vport->fc_flag & FC_PUBLIC_LOOP)
+ len += snprintf(buf + len, PAGE_SIZE-len,
+ " Public Loop\n");
+ else
+ len += snprintf(buf + len, PAGE_SIZE-len,
+ " Private Loop\n");
+ } else {
+ if (vport->fc_flag & FC_FABRIC)
+ len += snprintf(buf + len, PAGE_SIZE-len,
+ " Fabric\n");
+ else
+ len += snprintf(buf + len, PAGE_SIZE-len,
+ " Point-2-Point\n");
+ }
+ }
+
+ return len;
+}
+
+/**
+ * lpfc_sli4_protocol_show - Return the fip mode of the HBA
+ * @dev: class unused variable.
+ * @attr: device attribute, not used.
+ * @buf: on return contains the module description text.
+ *
+ * Returns: size of formatted string.
+ **/
+static ssize_t
+lpfc_sli4_protocol_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct Scsi_Host *shost = class_to_shost(dev);
+ struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
+ struct lpfc_hba *phba = vport->phba;
+
+ if (phba->sli_rev < LPFC_SLI_REV4)
+ return snprintf(buf, PAGE_SIZE, "fc\n");
+
+ if (phba->sli4_hba.lnk_info.lnk_dv == LPFC_LNK_DAT_VAL) {
+ if (phba->sli4_hba.lnk_info.lnk_tp == LPFC_LNK_TYPE_GE)
+ return snprintf(buf, PAGE_SIZE, "fcoe\n");
+ if (phba->sli4_hba.lnk_info.lnk_tp == LPFC_LNK_TYPE_FC)
+ return snprintf(buf, PAGE_SIZE, "fc\n");
+ }
+ return snprintf(buf, PAGE_SIZE, "unknown\n");
+}
+
+/**
+ * lpfc_oas_supported_show - Return whether or not Optimized Access Storage
+ * (OAS) is supported.
+ * @dev: class unused variable.
+ * @attr: device attribute, not used.
+ * @buf: on return contains the module description text.
+ *
+ * Returns: size of formatted string.
+ **/
+static ssize_t
+lpfc_oas_supported_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct Scsi_Host *shost = class_to_shost(dev);
+ struct lpfc_vport *vport = (struct lpfc_vport *)shost->hostdata;
+ struct lpfc_hba *phba = vport->phba;
+
+ return snprintf(buf, PAGE_SIZE, "%d\n",
+ phba->sli4_hba.pc_sli4_params.oas_supported);
+}
+
+/**
+ * lpfc_link_state_store - Transition the link_state on an HBA port
+ * @dev: class device that is converted into a Scsi_host.
+ * @attr: device attribute, not used.
+ * @buf: one or more lpfc_polling_flags values.
+ * @count: not used.
+ *
+ * Returns:
+ * -EINVAL if the buffer is not "up" or "down"
+ * return from link state change function if non-zero
+ * length of the buf on success
+ **/
+static ssize_t
+lpfc_link_state_store(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct Scsi_Host *shost = class_to_shost(dev);
+ struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
+ struct lpfc_hba *phba = vport->phba;
+
+ int status = -EINVAL;
+
+ if ((strncmp(buf, "up", sizeof("up") - 1) == 0) &&
+ (phba->link_state == LPFC_LINK_DOWN))
+ status = phba->lpfc_hba_init_link(phba, MBX_NOWAIT);
+ else if ((strncmp(buf, "down", sizeof("down") - 1) == 0) &&
+ (phba->link_state >= LPFC_LINK_UP))
+ status = phba->lpfc_hba_down_link(phba, MBX_NOWAIT);
+
+ if (status == 0)
+ return strlen(buf);
+ else
+ return status;
+}
+
+/**
+ * lpfc_num_discovered_ports_show - Return sum of mapped and unmapped vports
+ * @dev: class device that is converted into a Scsi_host.
+ * @attr: device attribute, not used.
+ * @buf: on return contains the sum of fc mapped and unmapped.
+ *
+ * Description:
+ * Returns the ascii text number of the sum of the fc mapped and unmapped
+ * vport counts.
+ *
+ * Returns: size of formatted string.
+ **/
+static ssize_t
+lpfc_num_discovered_ports_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct Scsi_Host *shost = class_to_shost(dev);
+ struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
+
+ return snprintf(buf, PAGE_SIZE, "%d\n",
+ vport->fc_map_cnt + vport->fc_unmap_cnt);
+}
+
+/**
+ * lpfc_issue_lip - Misnomer, name carried over from long ago
+ * @shost: Scsi_Host pointer.
+ *
+ * Description:
+ * Bring the link down gracefully then re-init the link. The firmware will
+ * re-init the fiber channel interface as required. Does not issue a LIP.
+ *
+ * Returns:
+ * -EPERM port offline or management commands are being blocked
+ * -ENOMEM cannot allocate memory for the mailbox command
+ * -EIO error sending the mailbox command
+ * zero for success
+ **/
+static int
+lpfc_issue_lip(struct Scsi_Host *shost)
+{
+ struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
+ struct lpfc_hba *phba = vport->phba;
+ LPFC_MBOXQ_t *pmboxq;
+ int mbxstatus = MBXERR_ERROR;
+
+ if ((vport->fc_flag & FC_OFFLINE_MODE) ||
+ (phba->sli.sli_flag & LPFC_BLOCK_MGMT_IO))
+ return -EPERM;
+
+ pmboxq = mempool_alloc(phba->mbox_mem_pool,GFP_KERNEL);
+
+ if (!pmboxq)
+ return -ENOMEM;
+
+ memset((void *)pmboxq, 0, sizeof (LPFC_MBOXQ_t));
+ pmboxq->u.mb.mbxCommand = MBX_DOWN_LINK;
+ pmboxq->u.mb.mbxOwner = OWN_HOST;
+
+ mbxstatus = lpfc_sli_issue_mbox_wait(phba, pmboxq, LPFC_MBOX_TMO * 2);
+
+ if ((mbxstatus == MBX_SUCCESS) &&
+ (pmboxq->u.mb.mbxStatus == 0 ||
+ pmboxq->u.mb.mbxStatus == MBXERR_LINK_DOWN)) {
+ memset((void *)pmboxq, 0, sizeof (LPFC_MBOXQ_t));
+ lpfc_init_link(phba, pmboxq, phba->cfg_topology,
+ phba->cfg_link_speed);
+ mbxstatus = lpfc_sli_issue_mbox_wait(phba, pmboxq,
+ phba->fc_ratov * 2);
+ if ((mbxstatus == MBX_SUCCESS) &&
+ (pmboxq->u.mb.mbxStatus == MBXERR_SEC_NO_PERMISSION))
+ lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
+ "2859 SLI authentication is required "
+ "for INIT_LINK but has not done yet\n");
+ }
+
+ lpfc_set_loopback_flag(phba);
+ if (mbxstatus != MBX_TIMEOUT)
+ mempool_free(pmboxq, phba->mbox_mem_pool);
+
+ if (mbxstatus == MBXERR_ERROR)
+ return -EIO;
+
+ return 0;
+}
+
+/**
+ * lpfc_do_offline - Issues a mailbox command to bring the link down
+ * @phba: lpfc_hba pointer.
+ * @type: LPFC_EVT_OFFLINE, LPFC_EVT_WARM_START, LPFC_EVT_KILL.
+ *
+ * Notes:
+ * Assumes any error from lpfc_do_offline() will be negative.
+ * Can wait up to 5 seconds for the port ring buffers count
+ * to reach zero, prints a warning if it is not zero and continues.
+ * lpfc_workq_post_event() returns a non-zero return code if call fails.
+ *
+ * Returns:
+ * -EIO error posting the event
+ * zero for success
+ **/
+static int
+lpfc_do_offline(struct lpfc_hba *phba, uint32_t type)
+{
+ struct completion online_compl;
+ struct lpfc_sli_ring *pring;
+ struct lpfc_sli *psli;
+ int status = 0;
+ int cnt = 0;
+ int i;
+ int rc;
+
+ init_completion(&online_compl);
+ rc = lpfc_workq_post_event(phba, &status, &online_compl,
+ LPFC_EVT_OFFLINE_PREP);
+ if (rc == 0)
+ return -ENOMEM;
+
+ wait_for_completion(&online_compl);
+
+ if (status != 0)
+ return -EIO;
+
+ psli = &phba->sli;
+
+ /* Wait a little for things to settle down, but not
+ * long enough for dev loss timeout to expire.
+ */
+ for (i = 0; i < psli->num_rings; i++) {
+ pring = &psli->ring[i];
+ while (!list_empty(&pring->txcmplq)) {
+ msleep(10);
+ if (cnt++ > 500) { /* 5 secs */
+ lpfc_printf_log(phba,
+ KERN_WARNING, LOG_INIT,
+ "0466 Outstanding IO when "
+ "bringing Adapter offline\n");
+ break;
+ }
+ }
+ }
+
+ init_completion(&online_compl);
+ rc = lpfc_workq_post_event(phba, &status, &online_compl, type);
+ if (rc == 0)
+ return -ENOMEM;
+
+ wait_for_completion(&online_compl);
+
+ if (status != 0)
+ return -EIO;
+
+ return 0;
+}
+
+/**
+ * lpfc_selective_reset - Offline then onlines the port
+ * @phba: lpfc_hba pointer.
+ *
+ * Description:
+ * If the port is configured to allow a reset then the hba is brought
+ * offline then online.
+ *
+ * Notes:
+ * Assumes any error from lpfc_do_offline() will be negative.
+ * Do not make this function static.
+ *
+ * Returns:
+ * lpfc_do_offline() return code if not zero
+ * -EIO reset not configured or error posting the event
+ * zero for success
+ **/
+int
+lpfc_selective_reset(struct lpfc_hba *phba)
+{
+ struct completion online_compl;
+ int status = 0;
+ int rc;
+
+ if (!phba->cfg_enable_hba_reset)
+ return -EACCES;
+
+ if (!(phba->pport->fc_flag & FC_OFFLINE_MODE)) {
+ status = lpfc_do_offline(phba, LPFC_EVT_OFFLINE);
+
+ if (status != 0)
+ return status;
+ }
+
+ init_completion(&online_compl);
+ rc = lpfc_workq_post_event(phba, &status, &online_compl,
+ LPFC_EVT_ONLINE);
+ if (rc == 0)
+ return -ENOMEM;
+
+ wait_for_completion(&online_compl);
+
+ if (status != 0)
+ return -EIO;
+
+ return 0;
+}
+
+/**
+ * lpfc_issue_reset - Selectively resets an adapter
+ * @dev: class device that is converted into a Scsi_host.
+ * @attr: device attribute, not used.
+ * @buf: containing the string "selective".
+ * @count: unused variable.
+ *
+ * Description:
+ * If the buf contains the string "selective" then lpfc_selective_reset()
+ * is called to perform the reset.
+ *
+ * Notes:
+ * Assumes any error from lpfc_selective_reset() will be negative.
+ * If lpfc_selective_reset() returns zero then the length of the buffer
+ * is returned which indicates success
+ *
+ * Returns:
+ * -EINVAL if the buffer does not contain the string "selective"
+ * length of buf if lpfc-selective_reset() if the call succeeds
+ * return value of lpfc_selective_reset() if the call fails
+**/
+static ssize_t
+lpfc_issue_reset(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct Scsi_Host *shost = class_to_shost(dev);
+ struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
+ struct lpfc_hba *phba = vport->phba;
+ int status = -EINVAL;
+
+ if (!phba->cfg_enable_hba_reset)
+ return -EACCES;
+
+ if (strncmp(buf, "selective", sizeof("selective") - 1) == 0)
+ status = phba->lpfc_selective_reset(phba);
+
+ if (status == 0)
+ return strlen(buf);
+ else
+ return status;
+}
+
+/**
+ * lpfc_sli4_pdev_status_reg_wait - Wait for pdev status register for readyness
+ * @phba: lpfc_hba pointer.
+ *
+ * Description:
+ * SLI4 interface type-2 device to wait on the sliport status register for
+ * the readyness after performing a firmware reset.
+ *
+ * Returns:
+ * zero for success, -EPERM when port does not have privilege to perform the
+ * reset, -EIO when port timeout from recovering from the reset.
+ *
+ * Note:
+ * As the caller will interpret the return code by value, be careful in making
+ * change or addition to return codes.
+ **/
+int
+lpfc_sli4_pdev_status_reg_wait(struct lpfc_hba *phba)
+{
+ struct lpfc_register portstat_reg = {0};
+ int i;
+
+ msleep(100);
+ lpfc_readl(phba->sli4_hba.u.if_type2.STATUSregaddr,
+ &portstat_reg.word0);
+
+ /* verify if privileged for the request operation */
+ if (!bf_get(lpfc_sliport_status_rn, &portstat_reg) &&
+ !bf_get(lpfc_sliport_status_err, &portstat_reg))
+ return -EPERM;
+
+ /* wait for the SLI port firmware ready after firmware reset */
+ for (i = 0; i < LPFC_FW_RESET_MAXIMUM_WAIT_10MS_CNT; i++) {
+ msleep(10);
+ lpfc_readl(phba->sli4_hba.u.if_type2.STATUSregaddr,
+ &portstat_reg.word0);
+ if (!bf_get(lpfc_sliport_status_err, &portstat_reg))
+ continue;
+ if (!bf_get(lpfc_sliport_status_rn, &portstat_reg))
+ continue;
+ if (!bf_get(lpfc_sliport_status_rdy, &portstat_reg))
+ continue;
+ break;
+ }
+
+ if (i < LPFC_FW_RESET_MAXIMUM_WAIT_10MS_CNT)
+ return 0;
+ else
+ return -EIO;
+}
+
+/**
+ * lpfc_sli4_pdev_reg_request - Request physical dev to perform a register acc
+ * @phba: lpfc_hba pointer.
+ *
+ * Description:
+ * Request SLI4 interface type-2 device to perform a physical register set
+ * access.
+ *
+ * Returns:
+ * zero for success
+ **/
+static ssize_t
+lpfc_sli4_pdev_reg_request(struct lpfc_hba *phba, uint32_t opcode)
+{
+ struct completion online_compl;
+ struct pci_dev *pdev = phba->pcidev;
+ uint32_t before_fc_flag;
+ uint32_t sriov_nr_virtfn;
+ uint32_t reg_val;
+ int status = 0, rc = 0;
+ int job_posted = 1, sriov_err;
+
+ if (!phba->cfg_enable_hba_reset)
+ return -EACCES;
+
+ if ((phba->sli_rev < LPFC_SLI_REV4) ||
+ (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) !=
+ LPFC_SLI_INTF_IF_TYPE_2))
+ return -EPERM;
+
+ /* Keep state if we need to restore back */
+ before_fc_flag = phba->pport->fc_flag;
+ sriov_nr_virtfn = phba->cfg_sriov_nr_virtfn;
+
+ /* Disable SR-IOV virtual functions if enabled */
+ if (phba->cfg_sriov_nr_virtfn) {
+ pci_disable_sriov(pdev);
+ phba->cfg_sriov_nr_virtfn = 0;
+ }
+
+ if (opcode == LPFC_FW_DUMP)
+ phba->hba_flag |= HBA_FW_DUMP_OP;
+
+ status = lpfc_do_offline(phba, LPFC_EVT_OFFLINE);
+
+ if (status != 0) {
+ phba->hba_flag &= ~HBA_FW_DUMP_OP;
+ return status;
+ }
+
+ /* wait for the device to be quiesced before firmware reset */
+ msleep(100);
+
+ reg_val = readl(phba->sli4_hba.conf_regs_memmap_p +
+ LPFC_CTL_PDEV_CTL_OFFSET);
+
+ if (opcode == LPFC_FW_DUMP)
+ reg_val |= LPFC_FW_DUMP_REQUEST;
+ else if (opcode == LPFC_FW_RESET)
+ reg_val |= LPFC_CTL_PDEV_CTL_FRST;
+ else if (opcode == LPFC_DV_RESET)
+ reg_val |= LPFC_CTL_PDEV_CTL_DRST;
+
+ writel(reg_val, phba->sli4_hba.conf_regs_memmap_p +
+ LPFC_CTL_PDEV_CTL_OFFSET);
+ /* flush */
+ readl(phba->sli4_hba.conf_regs_memmap_p + LPFC_CTL_PDEV_CTL_OFFSET);
+
+ /* delay driver action following IF_TYPE_2 reset */
+ rc = lpfc_sli4_pdev_status_reg_wait(phba);
+
+ if (rc == -EPERM) {
+ /* no privilege for reset */
+ lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
+ "3150 No privilege to perform the requested "
+ "access: x%x\n", reg_val);
+ } else if (rc == -EIO) {
+ /* reset failed, there is nothing more we can do */
+ lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
+ "3153 Fail to perform the requested "
+ "access: x%x\n", reg_val);
+ return rc;
+ }
+
+ /* keep the original port state */
+ if (before_fc_flag & FC_OFFLINE_MODE)
+ goto out;
+
+ init_completion(&online_compl);
+ job_posted = lpfc_workq_post_event(phba, &status, &online_compl,
+ LPFC_EVT_ONLINE);
+ if (!job_posted)
+ goto out;
+
+ wait_for_completion(&online_compl);
+
+out:
+ /* in any case, restore the virtual functions enabled as before */
+ if (sriov_nr_virtfn) {
+ sriov_err =
+ lpfc_sli_probe_sriov_nr_virtfn(phba, sriov_nr_virtfn);
+ if (!sriov_err)
+ phba->cfg_sriov_nr_virtfn = sriov_nr_virtfn;
+ }
+
+ /* return proper error code */
+ if (!rc) {
+ if (!job_posted)
+ rc = -ENOMEM;
+ else if (status)
+ rc = -EIO;
+ }
+ return rc;
+}
+
+/**
+ * lpfc_nport_evt_cnt_show - Return the number of nport events
+ * @dev: class device that is converted into a Scsi_host.
+ * @attr: device attribute, not used.
+ * @buf: on return contains the ascii number of nport events.
+ *
+ * Returns: size of formatted string.
+ **/
+static ssize_t
+lpfc_nport_evt_cnt_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct Scsi_Host *shost = class_to_shost(dev);
+ struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
+ struct lpfc_hba *phba = vport->phba;
+
+ return snprintf(buf, PAGE_SIZE, "%d\n", phba->nport_event_cnt);
+}
+
+/**
+ * lpfc_board_mode_show - Return the state of the board
+ * @dev: class device that is converted into a Scsi_host.
+ * @attr: device attribute, not used.
+ * @buf: on return contains the state of the adapter.
+ *
+ * Returns: size of formatted string.
+ **/
+static ssize_t
+lpfc_board_mode_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct Scsi_Host *shost = class_to_shost(dev);
+ struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
+ struct lpfc_hba *phba = vport->phba;
+ char * state;
+
+ if (phba->link_state == LPFC_HBA_ERROR)
+ state = "error";
+ else if (phba->link_state == LPFC_WARM_START)
+ state = "warm start";
+ else if (phba->link_state == LPFC_INIT_START)
+ state = "offline";
+ else
+ state = "online";
+
+ return snprintf(buf, PAGE_SIZE, "%s\n", state);
+}
+
+/**
+ * lpfc_board_mode_store - Puts the hba in online, offline, warm or error state
+ * @dev: class device that is converted into a Scsi_host.
+ * @attr: device attribute, not used.
+ * @buf: containing one of the strings "online", "offline", "warm" or "error".
+ * @count: unused variable.
+ *
+ * Returns:
+ * -EACCES if enable hba reset not enabled
+ * -EINVAL if the buffer does not contain a valid string (see above)
+ * -EIO if lpfc_workq_post_event() or lpfc_do_offline() fails
+ * buf length greater than zero indicates success
+ **/
+static ssize_t
+lpfc_board_mode_store(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct Scsi_Host *shost = class_to_shost(dev);
+ struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
+ struct lpfc_hba *phba = vport->phba;
+ struct completion online_compl;
+ char *board_mode_str = NULL;
+ int status = 0;
+ int rc;
+
+ if (!phba->cfg_enable_hba_reset) {
+ status = -EACCES;
+ goto board_mode_out;
+ }
+
+ lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT,
+ "3050 lpfc_board_mode set to %s\n", buf);
+
+ init_completion(&online_compl);
+
+ if(strncmp(buf, "online", sizeof("online") - 1) == 0) {
+ rc = lpfc_workq_post_event(phba, &status, &online_compl,
+ LPFC_EVT_ONLINE);
+ if (rc == 0) {
+ status = -ENOMEM;
+ goto board_mode_out;
+ }
+ wait_for_completion(&online_compl);
+ } else if (strncmp(buf, "offline", sizeof("offline") - 1) == 0)
+ status = lpfc_do_offline(phba, LPFC_EVT_OFFLINE);
+ else if (strncmp(buf, "warm", sizeof("warm") - 1) == 0)
+ if (phba->sli_rev == LPFC_SLI_REV4)
+ status = -EINVAL;
+ else
+ status = lpfc_do_offline(phba, LPFC_EVT_WARM_START);
+ else if (strncmp(buf, "error", sizeof("error") - 1) == 0)
+ if (phba->sli_rev == LPFC_SLI_REV4)
+ status = -EINVAL;
+ else
+ status = lpfc_do_offline(phba, LPFC_EVT_KILL);
+ else if (strncmp(buf, "dump", sizeof("dump") - 1) == 0)
+ status = lpfc_sli4_pdev_reg_request(phba, LPFC_FW_DUMP);
+ else if (strncmp(buf, "fw_reset", sizeof("fw_reset") - 1) == 0)
+ status = lpfc_sli4_pdev_reg_request(phba, LPFC_FW_RESET);
+ else if (strncmp(buf, "dv_reset", sizeof("dv_reset") - 1) == 0)
+ status = lpfc_sli4_pdev_reg_request(phba, LPFC_DV_RESET);
+ else
+ status = -EINVAL;
+
+board_mode_out:
+ if (!status)
+ return strlen(buf);
+ else {
+ board_mode_str = strchr(buf, '\n');
+ if (board_mode_str)
+ *board_mode_str = '\0';
+ lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT,
+ "3097 Failed \"%s\", status(%d), "
+ "fc_flag(x%x)\n",
+ buf, status, phba->pport->fc_flag);
+ return status;
+ }
+}
+
+/**
+ * lpfc_get_hba_info - Return various bits of informaton about the adapter
+ * @phba: pointer to the adapter structure.
+ * @mxri: max xri count.
+ * @axri: available xri count.
+ * @mrpi: max rpi count.
+ * @arpi: available rpi count.
+ * @mvpi: max vpi count.
+ * @avpi: available vpi count.
+ *
+ * Description:
+ * If an integer pointer for an count is not null then the value for the
+ * count is returned.
+ *
+ * Returns:
+ * zero on error
+ * one for success
+ **/
+static int
+lpfc_get_hba_info(struct lpfc_hba *phba,
+ uint32_t *mxri, uint32_t *axri,
+ uint32_t *mrpi, uint32_t *arpi,
+ uint32_t *mvpi, uint32_t *avpi)
+{
+ struct lpfc_mbx_read_config *rd_config;
+ LPFC_MBOXQ_t *pmboxq;
+ MAILBOX_t *pmb;
+ int rc = 0;
+ uint32_t max_vpi;
+
+ /*
+ * prevent udev from issuing mailbox commands until the port is
+ * configured.
+ */
+ if (phba->link_state < LPFC_LINK_DOWN ||
+ !phba->mbox_mem_pool ||
+ (phba->sli.sli_flag & LPFC_SLI_ACTIVE) == 0)
+ return 0;
+
+ if (phba->sli.sli_flag & LPFC_BLOCK_MGMT_IO)
+ return 0;
+
+ pmboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
+ if (!pmboxq)
+ return 0;
+ memset(pmboxq, 0, sizeof (LPFC_MBOXQ_t));
+
+ pmb = &pmboxq->u.mb;
+ pmb->mbxCommand = MBX_READ_CONFIG;
+ pmb->mbxOwner = OWN_HOST;
+ pmboxq->context1 = NULL;
+
+ if (phba->pport->fc_flag & FC_OFFLINE_MODE)
+ rc = MBX_NOT_FINISHED;
+ else
+ rc = lpfc_sli_issue_mbox_wait(phba, pmboxq, phba->fc_ratov * 2);
+
+ if (rc != MBX_SUCCESS) {
+ if (rc != MBX_TIMEOUT)
+ mempool_free(pmboxq, phba->mbox_mem_pool);
+ return 0;
+ }
+
+ if (phba->sli_rev == LPFC_SLI_REV4) {
+ rd_config = &pmboxq->u.mqe.un.rd_config;
+ if (mrpi)
+ *mrpi = bf_get(lpfc_mbx_rd_conf_rpi_count, rd_config);
+ if (arpi)
+ *arpi = bf_get(lpfc_mbx_rd_conf_rpi_count, rd_config) -
+ phba->sli4_hba.max_cfg_param.rpi_used;
+ if (mxri)
+ *mxri = bf_get(lpfc_mbx_rd_conf_xri_count, rd_config);
+ if (axri)
+ *axri = bf_get(lpfc_mbx_rd_conf_xri_count, rd_config) -
+ phba->sli4_hba.max_cfg_param.xri_used;
+
+ /* Account for differences with SLI-3. Get vpi count from
+ * mailbox data and subtract one for max vpi value.
+ */
+ max_vpi = (bf_get(lpfc_mbx_rd_conf_vpi_count, rd_config) > 0) ?
+ (bf_get(lpfc_mbx_rd_conf_vpi_count, rd_config) - 1) : 0;
+
+ if (mvpi)
+ *mvpi = max_vpi;
+ if (avpi)
+ *avpi = max_vpi - phba->sli4_hba.max_cfg_param.vpi_used;
+ } else {
+ if (mrpi)
+ *mrpi = pmb->un.varRdConfig.max_rpi;
+ if (arpi)
+ *arpi = pmb->un.varRdConfig.avail_rpi;
+ if (mxri)
+ *mxri = pmb->un.varRdConfig.max_xri;
+ if (axri)
+ *axri = pmb->un.varRdConfig.avail_xri;
+ if (mvpi)
+ *mvpi = pmb->un.varRdConfig.max_vpi;
+ if (avpi)
+ *avpi = pmb->un.varRdConfig.avail_vpi;
+ }
+
+ mempool_free(pmboxq, phba->mbox_mem_pool);
+ return 1;
+}
+
+/**
+ * lpfc_max_rpi_show - Return maximum rpi
+ * @dev: class device that is converted into a Scsi_host.
+ * @attr: device attribute, not used.
+ * @buf: on return contains the maximum rpi count in decimal or "Unknown".
+ *
+ * Description:
+ * Calls lpfc_get_hba_info() asking for just the mrpi count.
+ * If lpfc_get_hba_info() returns zero (failure) the buffer text is set
+ * to "Unknown" and the buffer length is returned, therefore the caller
+ * must check for "Unknown" in the buffer to detect a failure.
+ *
+ * Returns: size of formatted string.
+ **/
+static ssize_t
+lpfc_max_rpi_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct Scsi_Host *shost = class_to_shost(dev);
+ struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
+ struct lpfc_hba *phba = vport->phba;
+ uint32_t cnt;
+
+ if (lpfc_get_hba_info(phba, NULL, NULL, &cnt, NULL, NULL, NULL))
+ return snprintf(buf, PAGE_SIZE, "%d\n", cnt);
+ return snprintf(buf, PAGE_SIZE, "Unknown\n");
+}
+
+/**
+ * lpfc_used_rpi_show - Return maximum rpi minus available rpi
+ * @dev: class device that is converted into a Scsi_host.
+ * @attr: device attribute, not used.
+ * @buf: containing the used rpi count in decimal or "Unknown".
+ *
+ * Description:
+ * Calls lpfc_get_hba_info() asking for just the mrpi and arpi counts.
+ * If lpfc_get_hba_info() returns zero (failure) the buffer text is set
+ * to "Unknown" and the buffer length is returned, therefore the caller
+ * must check for "Unknown" in the buffer to detect a failure.
+ *
+ * Returns: size of formatted string.
+ **/
+static ssize_t
+lpfc_used_rpi_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct Scsi_Host *shost = class_to_shost(dev);
+ struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
+ struct lpfc_hba *phba = vport->phba;
+ uint32_t cnt, acnt;
+
+ if (lpfc_get_hba_info(phba, NULL, NULL, &cnt, &acnt, NULL, NULL))
+ return snprintf(buf, PAGE_SIZE, "%d\n", (cnt - acnt));
+ return snprintf(buf, PAGE_SIZE, "Unknown\n");
+}
+
+/**
+ * lpfc_max_xri_show - Return maximum xri
+ * @dev: class device that is converted into a Scsi_host.
+ * @attr: device attribute, not used.
+ * @buf: on return contains the maximum xri count in decimal or "Unknown".
+ *
+ * Description:
+ * Calls lpfc_get_hba_info() asking for just the mrpi count.
+ * If lpfc_get_hba_info() returns zero (failure) the buffer text is set
+ * to "Unknown" and the buffer length is returned, therefore the caller
+ * must check for "Unknown" in the buffer to detect a failure.
+ *
+ * Returns: size of formatted string.
+ **/
+static ssize_t
+lpfc_max_xri_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct Scsi_Host *shost = class_to_shost(dev);
+ struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
+ struct lpfc_hba *phba = vport->phba;
+ uint32_t cnt;
+
+ if (lpfc_get_hba_info(phba, &cnt, NULL, NULL, NULL, NULL, NULL))
+ return snprintf(buf, PAGE_SIZE, "%d\n", cnt);
+ return snprintf(buf, PAGE_SIZE, "Unknown\n");
+}
+
+/**
+ * lpfc_used_xri_show - Return maximum xpi minus the available xpi
+ * @dev: class device that is converted into a Scsi_host.
+ * @attr: device attribute, not used.
+ * @buf: on return contains the used xri count in decimal or "Unknown".
+ *
+ * Description:
+ * Calls lpfc_get_hba_info() asking for just the mxri and axri counts.
+ * If lpfc_get_hba_info() returns zero (failure) the buffer text is set
+ * to "Unknown" and the buffer length is returned, therefore the caller
+ * must check for "Unknown" in the buffer to detect a failure.
+ *
+ * Returns: size of formatted string.
+ **/
+static ssize_t
+lpfc_used_xri_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct Scsi_Host *shost = class_to_shost(dev);
+ struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
+ struct lpfc_hba *phba = vport->phba;
+ uint32_t cnt, acnt;
+
+ if (lpfc_get_hba_info(phba, &cnt, &acnt, NULL, NULL, NULL, NULL))
+ return snprintf(buf, PAGE_SIZE, "%d\n", (cnt - acnt));
+ return snprintf(buf, PAGE_SIZE, "Unknown\n");
+}
+
+/**
+ * lpfc_max_vpi_show - Return maximum vpi
+ * @dev: class device that is converted into a Scsi_host.
+ * @attr: device attribute, not used.
+ * @buf: on return contains the maximum vpi count in decimal or "Unknown".
+ *
+ * Description:
+ * Calls lpfc_get_hba_info() asking for just the mvpi count.
+ * If lpfc_get_hba_info() returns zero (failure) the buffer text is set
+ * to "Unknown" and the buffer length is returned, therefore the caller
+ * must check for "Unknown" in the buffer to detect a failure.
+ *
+ * Returns: size of formatted string.
+ **/
+static ssize_t
+lpfc_max_vpi_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct Scsi_Host *shost = class_to_shost(dev);
+ struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
+ struct lpfc_hba *phba = vport->phba;
+ uint32_t cnt;
+
+ if (lpfc_get_hba_info(phba, NULL, NULL, NULL, NULL, &cnt, NULL))
+ return snprintf(buf, PAGE_SIZE, "%d\n", cnt);
+ return snprintf(buf, PAGE_SIZE, "Unknown\n");
+}
+
+/**
+ * lpfc_used_vpi_show - Return maximum vpi minus the available vpi
+ * @dev: class device that is converted into a Scsi_host.
+ * @attr: device attribute, not used.
+ * @buf: on return contains the used vpi count in decimal or "Unknown".
+ *
+ * Description:
+ * Calls lpfc_get_hba_info() asking for just the mvpi and avpi counts.
+ * If lpfc_get_hba_info() returns zero (failure) the buffer text is set
+ * to "Unknown" and the buffer length is returned, therefore the caller
+ * must check for "Unknown" in the buffer to detect a failure.
+ *
+ * Returns: size of formatted string.
+ **/
+static ssize_t
+lpfc_used_vpi_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct Scsi_Host *shost = class_to_shost(dev);
+ struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
+ struct lpfc_hba *phba = vport->phba;
+ uint32_t cnt, acnt;
+
+ if (lpfc_get_hba_info(phba, NULL, NULL, NULL, NULL, &cnt, &acnt))
+ return snprintf(buf, PAGE_SIZE, "%d\n", (cnt - acnt));
+ return snprintf(buf, PAGE_SIZE, "Unknown\n");
+}
+
+/**
+ * lpfc_npiv_info_show - Return text about NPIV support for the adapter
+ * @dev: class device that is converted into a Scsi_host.
+ * @attr: device attribute, not used.
+ * @buf: text that must be interpreted to determine if npiv is supported.
+ *
+ * Description:
+ * Buffer will contain text indicating npiv is not suppoerted on the port,
+ * the port is an NPIV physical port, or it is an npiv virtual port with
+ * the id of the vport.
+ *
+ * Returns: size of formatted string.
+ **/
+static ssize_t
+lpfc_npiv_info_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct Scsi_Host *shost = class_to_shost(dev);
+ struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
+ struct lpfc_hba *phba = vport->phba;
+
+ if (!(phba->max_vpi))
+ return snprintf(buf, PAGE_SIZE, "NPIV Not Supported\n");
+ if (vport->port_type == LPFC_PHYSICAL_PORT)
+ return snprintf(buf, PAGE_SIZE, "NPIV Physical\n");
+ return snprintf(buf, PAGE_SIZE, "NPIV Virtual (VPI %d)\n", vport->vpi);
+}
+
+/**
+ * lpfc_poll_show - Return text about poll support for the adapter
+ * @dev: class device that is converted into a Scsi_host.
+ * @attr: device attribute, not used.
+ * @buf: on return contains the cfg_poll in hex.
+ *
+ * Notes:
+ * cfg_poll should be a lpfc_polling_flags type.
+ *
+ * Returns: size of formatted string.
+ **/
+static ssize_t
+lpfc_poll_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct Scsi_Host *shost = class_to_shost(dev);
+ struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
+ struct lpfc_hba *phba = vport->phba;
+
+ return snprintf(buf, PAGE_SIZE, "%#x\n", phba->cfg_poll);
+}
+
+/**
+ * lpfc_poll_store - Set the value of cfg_poll for the adapter
+ * @dev: class device that is converted into a Scsi_host.
+ * @attr: device attribute, not used.
+ * @buf: one or more lpfc_polling_flags values.
+ * @count: not used.
+ *
+ * Notes:
+ * buf contents converted to integer and checked for a valid value.
+ *
+ * Returns:
+ * -EINVAL if the buffer connot be converted or is out of range
+ * length of the buf on success
+ **/
+static ssize_t
+lpfc_poll_store(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct Scsi_Host *shost = class_to_shost(dev);
+ struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
+ struct lpfc_hba *phba = vport->phba;
+ uint32_t creg_val;
+ uint32_t old_val;
+ int val=0;
+
+ if (!isdigit(buf[0]))
+ return -EINVAL;
+
+ if (sscanf(buf, "%i", &val) != 1)
+ return -EINVAL;
+
+ if ((val & 0x3) != val)
+ return -EINVAL;
+
+ if (phba->sli_rev == LPFC_SLI_REV4)
+ val = 0;
+
+ lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT,
+ "3051 lpfc_poll changed from %d to %d\n",
+ phba->cfg_poll, val);
+
+ spin_lock_irq(&phba->hbalock);
+
+ old_val = phba->cfg_poll;
+
+ if (val & ENABLE_FCP_RING_POLLING) {
+ if ((val & DISABLE_FCP_RING_INT) &&
+ !(old_val & DISABLE_FCP_RING_INT)) {
+ if (lpfc_readl(phba->HCregaddr, &creg_val)) {
+ spin_unlock_irq(&phba->hbalock);
+ return -EINVAL;
+ }
+ creg_val &= ~(HC_R0INT_ENA << LPFC_FCP_RING);
+ writel(creg_val, phba->HCregaddr);
+ readl(phba->HCregaddr); /* flush */
+
+ lpfc_poll_start_timer(phba);
+ }
+ } else if (val != 0x0) {
+ spin_unlock_irq(&phba->hbalock);
+ return -EINVAL;
+ }
+
+ if (!(val & DISABLE_FCP_RING_INT) &&
+ (old_val & DISABLE_FCP_RING_INT))
+ {
+ spin_unlock_irq(&phba->hbalock);
+ del_timer(&phba->fcp_poll_timer);
+ spin_lock_irq(&phba->hbalock);
+ if (lpfc_readl(phba->HCregaddr, &creg_val)) {
+ spin_unlock_irq(&phba->hbalock);
+ return -EINVAL;
+ }
+ creg_val |= (HC_R0INT_ENA << LPFC_FCP_RING);
+ writel(creg_val, phba->HCregaddr);
+ readl(phba->HCregaddr); /* flush */
+ }
+
+ phba->cfg_poll = val;
+
+ spin_unlock_irq(&phba->hbalock);
+
+ return strlen(buf);
+}
+
+/**
+ * lpfc_fips_level_show - Return the current FIPS level for the HBA
+ * @dev: class unused variable.
+ * @attr: device attribute, not used.
+ * @buf: on return contains the module description text.
+ *
+ * Returns: size of formatted string.
+ **/
+static ssize_t
+lpfc_fips_level_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct Scsi_Host *shost = class_to_shost(dev);
+ struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
+ struct lpfc_hba *phba = vport->phba;
+
+ return snprintf(buf, PAGE_SIZE, "%d\n", phba->fips_level);
+}
+
+/**
+ * lpfc_fips_rev_show - Return the FIPS Spec revision for the HBA
+ * @dev: class unused variable.
+ * @attr: device attribute, not used.
+ * @buf: on return contains the module description text.
+ *
+ * Returns: size of formatted string.
+ **/
+static ssize_t
+lpfc_fips_rev_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct Scsi_Host *shost = class_to_shost(dev);
+ struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
+ struct lpfc_hba *phba = vport->phba;
+
+ return snprintf(buf, PAGE_SIZE, "%d\n", phba->fips_spec_rev);
+}
+
+/**
+ * lpfc_dss_show - Return the current state of dss and the configured state
+ * @dev: class converted to a Scsi_host structure.
+ * @attr: device attribute, not used.
+ * @buf: on return contains the formatted text.
+ *
+ * Returns: size of formatted string.
+ **/
+static ssize_t
+lpfc_dss_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct Scsi_Host *shost = class_to_shost(dev);
+ struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
+ struct lpfc_hba *phba = vport->phba;
+
+ return snprintf(buf, PAGE_SIZE, "%s - %sOperational\n",
+ (phba->cfg_enable_dss) ? "Enabled" : "Disabled",
+ (phba->sli3_options & LPFC_SLI3_DSS_ENABLED) ?
+ "" : "Not ");
+}
+
+/**
+ * lpfc_sriov_hw_max_virtfn_show - Return maximum number of virtual functions
+ * @dev: class converted to a Scsi_host structure.
+ * @attr: device attribute, not used.
+ * @buf: on return contains the formatted support level.
+ *
+ * Description:
+ * Returns the maximum number of virtual functions a physical function can
+ * support, 0 will be returned if called on virtual function.
+ *
+ * Returns: size of formatted string.
+ **/
+static ssize_t
+lpfc_sriov_hw_max_virtfn_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct Scsi_Host *shost = class_to_shost(dev);
+ struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
+ struct lpfc_hba *phba = vport->phba;
+ uint16_t max_nr_virtfn;
+
+ max_nr_virtfn = lpfc_sli_sriov_nr_virtfn_get(phba);
+ return snprintf(buf, PAGE_SIZE, "%d\n", max_nr_virtfn);
+}
+
+/**
+ * lpfc_param_show - Return a cfg attribute value in decimal
+ *
+ * Description:
+ * Macro that given an attr e.g. hba_queue_depth expands
+ * into a function with the name lpfc_hba_queue_depth_show.
+ *
+ * lpfc_##attr##_show: Return the decimal value of an adapters cfg_xxx field.
+ * @dev: class device that is converted into a Scsi_host.
+ * @attr: device attribute, not used.
+ * @buf: on return contains the attribute value in decimal.
+ *
+ * Returns: size of formatted string.
+ **/
+#define lpfc_param_show(attr) \
+static ssize_t \
+lpfc_##attr##_show(struct device *dev, struct device_attribute *attr, \
+ char *buf) \
+{ \
+ struct Scsi_Host *shost = class_to_shost(dev);\
+ struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;\
+ struct lpfc_hba *phba = vport->phba;\
+ uint val = 0;\
+ val = phba->cfg_##attr;\
+ return snprintf(buf, PAGE_SIZE, "%d\n",\
+ phba->cfg_##attr);\
+}
+
+/**
+ * lpfc_param_hex_show - Return a cfg attribute value in hex
+ *
+ * Description:
+ * Macro that given an attr e.g. hba_queue_depth expands
+ * into a function with the name lpfc_hba_queue_depth_show
+ *
+ * lpfc_##attr##_show: Return the hex value of an adapters cfg_xxx field.
+ * @dev: class device that is converted into a Scsi_host.
+ * @attr: device attribute, not used.
+ * @buf: on return contains the attribute value in hexadecimal.
+ *
+ * Returns: size of formatted string.
+ **/
+#define lpfc_param_hex_show(attr) \
+static ssize_t \
+lpfc_##attr##_show(struct device *dev, struct device_attribute *attr, \
+ char *buf) \
+{ \
+ struct Scsi_Host *shost = class_to_shost(dev);\
+ struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;\
+ struct lpfc_hba *phba = vport->phba;\
+ uint val = 0;\
+ val = phba->cfg_##attr;\
+ return snprintf(buf, PAGE_SIZE, "%#x\n",\
+ phba->cfg_##attr);\
+}
+
+/**
+ * lpfc_param_init - Initializes a cfg attribute
+ *
+ * Description:
+ * Macro that given an attr e.g. hba_queue_depth expands
+ * into a function with the name lpfc_hba_queue_depth_init. The macro also
+ * takes a default argument, a minimum and maximum argument.
+ *
+ * lpfc_##attr##_init: Initializes an attribute.
+ * @phba: pointer the the adapter structure.
+ * @val: integer attribute value.
+ *
+ * Validates the min and max values then sets the adapter config field
+ * accordingly, or uses the default if out of range and prints an error message.
+ *
+ * Returns:
+ * zero on success
+ * -EINVAL if default used
+ **/
+#define lpfc_param_init(attr, default, minval, maxval) \
+static int \
+lpfc_##attr##_init(struct lpfc_hba *phba, uint val) \
+{ \
+ if (val >= minval && val <= maxval) {\
+ phba->cfg_##attr = val;\
+ return 0;\
+ }\
+ lpfc_printf_log(phba, KERN_ERR, LOG_INIT, \
+ "0449 lpfc_"#attr" attribute cannot be set to %d, "\
+ "allowed range is ["#minval", "#maxval"]\n", val); \
+ phba->cfg_##attr = default;\
+ return -EINVAL;\
+}
+
+/**
+ * lpfc_param_set - Set a cfg attribute value
+ *
+ * Description:
+ * Macro that given an attr e.g. hba_queue_depth expands
+ * into a function with the name lpfc_hba_queue_depth_set
+ *
+ * lpfc_##attr##_set: Sets an attribute value.
+ * @phba: pointer the the adapter structure.
+ * @val: integer attribute value.
+ *
+ * Description:
+ * Validates the min and max values then sets the
+ * adapter config field if in the valid range. prints error message
+ * and does not set the parameter if invalid.
+ *
+ * Returns:
+ * zero on success
+ * -EINVAL if val is invalid
+ **/
+#define lpfc_param_set(attr, default, minval, maxval) \
+static int \
+lpfc_##attr##_set(struct lpfc_hba *phba, uint val) \
+{ \
+ if (val >= minval && val <= maxval) {\
+ lpfc_printf_log(phba, KERN_ERR, LOG_INIT, \
+ "3052 lpfc_" #attr " changed from %d to %d\n", \
+ phba->cfg_##attr, val); \
+ phba->cfg_##attr = val;\
+ return 0;\
+ }\
+ lpfc_printf_log(phba, KERN_ERR, LOG_INIT, \
+ "0450 lpfc_"#attr" attribute cannot be set to %d, "\
+ "allowed range is ["#minval", "#maxval"]\n", val); \
+ return -EINVAL;\
+}
+
+/**
+ * lpfc_param_store - Set a vport attribute value
+ *
+ * Description:
+ * Macro that given an attr e.g. hba_queue_depth expands
+ * into a function with the name lpfc_hba_queue_depth_store.
+ *
+ * lpfc_##attr##_store: Set an sttribute value.
+ * @dev: class device that is converted into a Scsi_host.
+ * @attr: device attribute, not used.
+ * @buf: contains the attribute value in ascii.
+ * @count: not used.
+ *
+ * Description:
+ * Convert the ascii text number to an integer, then
+ * use the lpfc_##attr##_set function to set the value.
+ *
+ * Returns:
+ * -EINVAL if val is invalid or lpfc_##attr##_set() fails
+ * length of buffer upon success.
+ **/
+#define lpfc_param_store(attr) \
+static ssize_t \
+lpfc_##attr##_store(struct device *dev, struct device_attribute *attr, \
+ const char *buf, size_t count) \
+{ \
+ struct Scsi_Host *shost = class_to_shost(dev);\
+ struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;\
+ struct lpfc_hba *phba = vport->phba;\
+ uint val = 0;\
+ if (!isdigit(buf[0]))\
+ return -EINVAL;\
+ if (sscanf(buf, "%i", &val) != 1)\
+ return -EINVAL;\
+ if (lpfc_##attr##_set(phba, val) == 0) \
+ return strlen(buf);\
+ else \
+ return -EINVAL;\
+}
+
+/**
+ * lpfc_vport_param_show - Return decimal formatted cfg attribute value
+ *
+ * Description:
+ * Macro that given an attr e.g. hba_queue_depth expands
+ * into a function with the name lpfc_hba_queue_depth_show
+ *
+ * lpfc_##attr##_show: prints the attribute value in decimal.
+ * @dev: class device that is converted into a Scsi_host.
+ * @attr: device attribute, not used.
+ * @buf: on return contains the attribute value in decimal.
+ *
+ * Returns: length of formatted string.
+ **/
+#define lpfc_vport_param_show(attr) \
+static ssize_t \
+lpfc_##attr##_show(struct device *dev, struct device_attribute *attr, \
+ char *buf) \
+{ \
+ struct Scsi_Host *shost = class_to_shost(dev);\
+ struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;\
+ uint val = 0;\
+ val = vport->cfg_##attr;\
+ return snprintf(buf, PAGE_SIZE, "%d\n", vport->cfg_##attr);\
+}
+
+/**
+ * lpfc_vport_param_hex_show - Return hex formatted attribute value
+ *
+ * Description:
+ * Macro that given an attr e.g.
+ * hba_queue_depth expands into a function with the name
+ * lpfc_hba_queue_depth_show
+ *
+ * lpfc_##attr##_show: prints the attribute value in hexadecimal.
+ * @dev: class device that is converted into a Scsi_host.
+ * @attr: device attribute, not used.
+ * @buf: on return contains the attribute value in hexadecimal.
+ *
+ * Returns: length of formatted string.
+ **/
+#define lpfc_vport_param_hex_show(attr) \
+static ssize_t \
+lpfc_##attr##_show(struct device *dev, struct device_attribute *attr, \
+ char *buf) \
+{ \
+ struct Scsi_Host *shost = class_to_shost(dev);\
+ struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;\
+ uint val = 0;\
+ val = vport->cfg_##attr;\
+ return snprintf(buf, PAGE_SIZE, "%#x\n", vport->cfg_##attr);\
+}
+
+/**
+ * lpfc_vport_param_init - Initialize a vport cfg attribute
+ *
+ * Description:
+ * Macro that given an attr e.g. hba_queue_depth expands
+ * into a function with the name lpfc_hba_queue_depth_init. The macro also
+ * takes a default argument, a minimum and maximum argument.
+ *
+ * lpfc_##attr##_init: validates the min and max values then sets the
+ * adapter config field accordingly, or uses the default if out of range
+ * and prints an error message.
+ * @phba: pointer the the adapter structure.
+ * @val: integer attribute value.
+ *
+ * Returns:
+ * zero on success
+ * -EINVAL if default used
+ **/
+#define lpfc_vport_param_init(attr, default, minval, maxval) \
+static int \
+lpfc_##attr##_init(struct lpfc_vport *vport, uint val) \
+{ \
+ if (val >= minval && val <= maxval) {\
+ vport->cfg_##attr = val;\
+ return 0;\
+ }\
+ lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT, \
+ "0423 lpfc_"#attr" attribute cannot be set to %d, "\
+ "allowed range is ["#minval", "#maxval"]\n", val); \
+ vport->cfg_##attr = default;\
+ return -EINVAL;\
+}
+
+/**
+ * lpfc_vport_param_set - Set a vport cfg attribute
+ *
+ * Description:
+ * Macro that given an attr e.g. hba_queue_depth expands
+ * into a function with the name lpfc_hba_queue_depth_set
+ *
+ * lpfc_##attr##_set: validates the min and max values then sets the
+ * adapter config field if in the valid range. prints error message
+ * and does not set the parameter if invalid.
+ * @phba: pointer the the adapter structure.
+ * @val: integer attribute value.
+ *
+ * Returns:
+ * zero on success
+ * -EINVAL if val is invalid
+ **/
+#define lpfc_vport_param_set(attr, default, minval, maxval) \
+static int \
+lpfc_##attr##_set(struct lpfc_vport *vport, uint val) \
+{ \
+ if (val >= minval && val <= maxval) {\
+ lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT, \
+ "3053 lpfc_" #attr \
+ " changed from %d (x%x) to %d (x%x)\n", \
+ vport->cfg_##attr, vport->cfg_##attr, \
+ val, val); \
+ vport->cfg_##attr = val;\
+ return 0;\
+ }\
+ lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT, \
+ "0424 lpfc_"#attr" attribute cannot be set to %d, "\
+ "allowed range is ["#minval", "#maxval"]\n", val); \
+ return -EINVAL;\
+}
+
+/**
+ * lpfc_vport_param_store - Set a vport attribute
+ *
+ * Description:
+ * Macro that given an attr e.g. hba_queue_depth
+ * expands into a function with the name lpfc_hba_queue_depth_store
+ *
+ * lpfc_##attr##_store: convert the ascii text number to an integer, then
+ * use the lpfc_##attr##_set function to set the value.
+ * @cdev: class device that is converted into a Scsi_host.
+ * @buf: contains the attribute value in decimal.
+ * @count: not used.
+ *
+ * Returns:
+ * -EINVAL if val is invalid or lpfc_##attr##_set() fails
+ * length of buffer upon success.
+ **/
+#define lpfc_vport_param_store(attr) \
+static ssize_t \
+lpfc_##attr##_store(struct device *dev, struct device_attribute *attr, \
+ const char *buf, size_t count) \
+{ \
+ struct Scsi_Host *shost = class_to_shost(dev);\
+ struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;\
+ uint val = 0;\
+ if (!isdigit(buf[0]))\
+ return -EINVAL;\
+ if (sscanf(buf, "%i", &val) != 1)\
+ return -EINVAL;\
+ if (lpfc_##attr##_set(vport, val) == 0) \
+ return strlen(buf);\
+ else \
+ return -EINVAL;\
+}
+
+
+#define LPFC_ATTR(name, defval, minval, maxval, desc) \
+static uint lpfc_##name = defval;\
+module_param(lpfc_##name, uint, S_IRUGO);\
+MODULE_PARM_DESC(lpfc_##name, desc);\
+lpfc_param_init(name, defval, minval, maxval)
+
+#define LPFC_ATTR_R(name, defval, minval, maxval, desc) \
+static uint lpfc_##name = defval;\
+module_param(lpfc_##name, uint, S_IRUGO);\
+MODULE_PARM_DESC(lpfc_##name, desc);\
+lpfc_param_show(name)\
+lpfc_param_init(name, defval, minval, maxval)\
+static DEVICE_ATTR(lpfc_##name, S_IRUGO , lpfc_##name##_show, NULL)
+
+#define LPFC_ATTR_RW(name, defval, minval, maxval, desc) \
+static uint lpfc_##name = defval;\
+module_param(lpfc_##name, uint, S_IRUGO);\
+MODULE_PARM_DESC(lpfc_##name, desc);\
+lpfc_param_show(name)\
+lpfc_param_init(name, defval, minval, maxval)\
+lpfc_param_set(name, defval, minval, maxval)\
+lpfc_param_store(name)\
+static DEVICE_ATTR(lpfc_##name, S_IRUGO | S_IWUSR,\
+ lpfc_##name##_show, lpfc_##name##_store)
+
+#define LPFC_ATTR_HEX_R(name, defval, minval, maxval, desc) \
+static uint lpfc_##name = defval;\
+module_param(lpfc_##name, uint, S_IRUGO);\
+MODULE_PARM_DESC(lpfc_##name, desc);\
+lpfc_param_hex_show(name)\
+lpfc_param_init(name, defval, minval, maxval)\
+static DEVICE_ATTR(lpfc_##name, S_IRUGO , lpfc_##name##_show, NULL)
+
+#define LPFC_ATTR_HEX_RW(name, defval, minval, maxval, desc) \
+static uint lpfc_##name = defval;\
+module_param(lpfc_##name, uint, S_IRUGO);\
+MODULE_PARM_DESC(lpfc_##name, desc);\
+lpfc_param_hex_show(name)\
+lpfc_param_init(name, defval, minval, maxval)\
+lpfc_param_set(name, defval, minval, maxval)\
+lpfc_param_store(name)\
+static DEVICE_ATTR(lpfc_##name, S_IRUGO | S_IWUSR,\
+ lpfc_##name##_show, lpfc_##name##_store)
+
+#define LPFC_VPORT_ATTR(name, defval, minval, maxval, desc) \
+static uint lpfc_##name = defval;\
+module_param(lpfc_##name, uint, S_IRUGO);\
+MODULE_PARM_DESC(lpfc_##name, desc);\
+lpfc_vport_param_init(name, defval, minval, maxval)
+
+#define LPFC_VPORT_ATTR_R(name, defval, minval, maxval, desc) \
+static uint lpfc_##name = defval;\
+module_param(lpfc_##name, uint, S_IRUGO);\
+MODULE_PARM_DESC(lpfc_##name, desc);\
+lpfc_vport_param_show(name)\
+lpfc_vport_param_init(name, defval, minval, maxval)\
+static DEVICE_ATTR(lpfc_##name, S_IRUGO , lpfc_##name##_show, NULL)
+
+#define LPFC_VPORT_ULL_ATTR_R(name, defval, minval, maxval, desc) \
+static uint64_t lpfc_##name = defval;\
+module_param(lpfc_##name, ullong, S_IRUGO);\
+MODULE_PARM_DESC(lpfc_##name, desc);\
+lpfc_vport_param_show(name)\
+lpfc_vport_param_init(name, defval, minval, maxval)\
+static DEVICE_ATTR(lpfc_##name, S_IRUGO , lpfc_##name##_show, NULL)
+
+#define LPFC_VPORT_ATTR_RW(name, defval, minval, maxval, desc) \
+static uint lpfc_##name = defval;\
+module_param(lpfc_##name, uint, S_IRUGO);\
+MODULE_PARM_DESC(lpfc_##name, desc);\
+lpfc_vport_param_show(name)\
+lpfc_vport_param_init(name, defval, minval, maxval)\
+lpfc_vport_param_set(name, defval, minval, maxval)\
+lpfc_vport_param_store(name)\
+static DEVICE_ATTR(lpfc_##name, S_IRUGO | S_IWUSR,\
+ lpfc_##name##_show, lpfc_##name##_store)
+
+#define LPFC_VPORT_ATTR_HEX_R(name, defval, minval, maxval, desc) \
+static uint lpfc_##name = defval;\
+module_param(lpfc_##name, uint, S_IRUGO);\
+MODULE_PARM_DESC(lpfc_##name, desc);\
+lpfc_vport_param_hex_show(name)\
+lpfc_vport_param_init(name, defval, minval, maxval)\
+static DEVICE_ATTR(lpfc_##name, S_IRUGO , lpfc_##name##_show, NULL)
+
+#define LPFC_VPORT_ATTR_HEX_RW(name, defval, minval, maxval, desc) \
+static uint lpfc_##name = defval;\
+module_param(lpfc_##name, uint, S_IRUGO);\
+MODULE_PARM_DESC(lpfc_##name, desc);\
+lpfc_vport_param_hex_show(name)\
+lpfc_vport_param_init(name, defval, minval, maxval)\
+lpfc_vport_param_set(name, defval, minval, maxval)\
+lpfc_vport_param_store(name)\
+static DEVICE_ATTR(lpfc_##name, S_IRUGO | S_IWUSR,\
+ lpfc_##name##_show, lpfc_##name##_store)
+
+static DEVICE_ATTR(bg_info, S_IRUGO, lpfc_bg_info_show, NULL);
+static DEVICE_ATTR(bg_guard_err, S_IRUGO, lpfc_bg_guard_err_show, NULL);
+static DEVICE_ATTR(bg_apptag_err, S_IRUGO, lpfc_bg_apptag_err_show, NULL);
+static DEVICE_ATTR(bg_reftag_err, S_IRUGO, lpfc_bg_reftag_err_show, NULL);
+static DEVICE_ATTR(info, S_IRUGO, lpfc_info_show, NULL);
+static DEVICE_ATTR(serialnum, S_IRUGO, lpfc_serialnum_show, NULL);
+static DEVICE_ATTR(modeldesc, S_IRUGO, lpfc_modeldesc_show, NULL);
+static DEVICE_ATTR(modelname, S_IRUGO, lpfc_modelname_show, NULL);
+static DEVICE_ATTR(programtype, S_IRUGO, lpfc_programtype_show, NULL);
+static DEVICE_ATTR(portnum, S_IRUGO, lpfc_vportnum_show, NULL);
+static DEVICE_ATTR(fwrev, S_IRUGO, lpfc_fwrev_show, NULL);
+static DEVICE_ATTR(hdw, S_IRUGO, lpfc_hdw_show, NULL);
+static DEVICE_ATTR(link_state, S_IRUGO | S_IWUSR, lpfc_link_state_show,
+ lpfc_link_state_store);
+static DEVICE_ATTR(option_rom_version, S_IRUGO,
+ lpfc_option_rom_version_show, NULL);
+static DEVICE_ATTR(num_discovered_ports, S_IRUGO,
+ lpfc_num_discovered_ports_show, NULL);
+static DEVICE_ATTR(menlo_mgmt_mode, S_IRUGO, lpfc_mlomgmt_show, NULL);
+static DEVICE_ATTR(nport_evt_cnt, S_IRUGO, lpfc_nport_evt_cnt_show, NULL);
+static DEVICE_ATTR(lpfc_drvr_version, S_IRUGO, lpfc_drvr_version_show, NULL);
+static DEVICE_ATTR(lpfc_enable_fip, S_IRUGO, lpfc_enable_fip_show, NULL);
+static DEVICE_ATTR(board_mode, S_IRUGO | S_IWUSR,
+ lpfc_board_mode_show, lpfc_board_mode_store);
+static DEVICE_ATTR(issue_reset, S_IWUSR, NULL, lpfc_issue_reset);
+static DEVICE_ATTR(max_vpi, S_IRUGO, lpfc_max_vpi_show, NULL);
+static DEVICE_ATTR(used_vpi, S_IRUGO, lpfc_used_vpi_show, NULL);
+static DEVICE_ATTR(max_rpi, S_IRUGO, lpfc_max_rpi_show, NULL);
+static DEVICE_ATTR(used_rpi, S_IRUGO, lpfc_used_rpi_show, NULL);
+static DEVICE_ATTR(max_xri, S_IRUGO, lpfc_max_xri_show, NULL);
+static DEVICE_ATTR(used_xri, S_IRUGO, lpfc_used_xri_show, NULL);
+static DEVICE_ATTR(npiv_info, S_IRUGO, lpfc_npiv_info_show, NULL);
+static DEVICE_ATTR(lpfc_temp_sensor, S_IRUGO, lpfc_temp_sensor_show, NULL);
+static DEVICE_ATTR(lpfc_fips_level, S_IRUGO, lpfc_fips_level_show, NULL);
+static DEVICE_ATTR(lpfc_fips_rev, S_IRUGO, lpfc_fips_rev_show, NULL);
+static DEVICE_ATTR(lpfc_dss, S_IRUGO, lpfc_dss_show, NULL);
+static DEVICE_ATTR(lpfc_sriov_hw_max_virtfn, S_IRUGO,
+ lpfc_sriov_hw_max_virtfn_show, NULL);
+static DEVICE_ATTR(protocol, S_IRUGO, lpfc_sli4_protocol_show, NULL);
+static DEVICE_ATTR(lpfc_xlane_supported, S_IRUGO, lpfc_oas_supported_show,
+ NULL);
+
+static char *lpfc_soft_wwn_key = "C99G71SL8032A";
+#define WWN_SZ 8
+/**
+ * lpfc_wwn_set - Convert string to the 8 byte WWN value.
+ * @buf: WWN string.
+ * @cnt: Length of string.
+ * @wwn: Array to receive converted wwn value.
+ *
+ * Returns:
+ * -EINVAL if the buffer does not contain a valid wwn
+ * 0 success
+ **/
+static size_t
+lpfc_wwn_set(const char *buf, size_t cnt, char wwn[])
+{
+ unsigned int i, j;
+
+ /* Count may include a LF at end of string */
+ if (buf[cnt-1] == '\n')
+ cnt--;
+
+ if ((cnt < 16) || (cnt > 18) || ((cnt == 17) && (*buf++ != 'x')) ||
+ ((cnt == 18) && ((*buf++ != '0') || (*buf++ != 'x'))))
+ return -EINVAL;
+
+ memset(wwn, 0, WWN_SZ);
+
+ /* Validate and store the new name */
+ for (i = 0, j = 0; i < 16; i++) {
+ if ((*buf >= 'a') && (*buf <= 'f'))
+ j = ((j << 4) | ((*buf++ - 'a') + 10));
+ else if ((*buf >= 'A') && (*buf <= 'F'))
+ j = ((j << 4) | ((*buf++ - 'A') + 10));
+ else if ((*buf >= '0') && (*buf <= '9'))
+ j = ((j << 4) | (*buf++ - '0'));
+ else
+ return -EINVAL;
+ if (i % 2) {
+ wwn[i/2] = j & 0xff;
+ j = 0;
+ }
+ }
+ return 0;
+}
+/**
+ * lpfc_soft_wwn_enable_store - Allows setting of the wwn if the key is valid
+ * @dev: class device that is converted into a Scsi_host.
+ * @attr: device attribute, not used.
+ * @buf: containing the string lpfc_soft_wwn_key.
+ * @count: must be size of lpfc_soft_wwn_key.
+ *
+ * Returns:
+ * -EINVAL if the buffer does not contain lpfc_soft_wwn_key
+ * length of buf indicates success
+ **/
+static ssize_t
+lpfc_soft_wwn_enable_store(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct Scsi_Host *shost = class_to_shost(dev);
+ struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
+ struct lpfc_hba *phba = vport->phba;
+ unsigned int cnt = count;
+
+ /*
+ * We're doing a simple sanity check for soft_wwpn setting.
+ * We require that the user write a specific key to enable
+ * the soft_wwpn attribute to be settable. Once the attribute
+ * is written, the enable key resets. If further updates are
+ * desired, the key must be written again to re-enable the
+ * attribute.
+ *
+ * The "key" is not secret - it is a hardcoded string shown
+ * here. The intent is to protect against the random user or
+ * application that is just writing attributes.
+ */
+
+ /* count may include a LF at end of string */
+ if (buf[cnt-1] == '\n')
+ cnt--;
+
+ if ((cnt != strlen(lpfc_soft_wwn_key)) ||
+ (strncmp(buf, lpfc_soft_wwn_key, strlen(lpfc_soft_wwn_key)) != 0))
+ return -EINVAL;
+
+ phba->soft_wwn_enable = 1;
+ return count;
+}
+static DEVICE_ATTR(lpfc_soft_wwn_enable, S_IWUSR, NULL,
+ lpfc_soft_wwn_enable_store);
+
+/**
+ * lpfc_soft_wwpn_show - Return the cfg soft ww port name of the adapter
+ * @dev: class device that is converted into a Scsi_host.
+ * @attr: device attribute, not used.
+ * @buf: on return contains the wwpn in hexadecimal.
+ *
+ * Returns: size of formatted string.
+ **/
+static ssize_t
+lpfc_soft_wwpn_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct Scsi_Host *shost = class_to_shost(dev);
+ struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
+ struct lpfc_hba *phba = vport->phba;
+
+ return snprintf(buf, PAGE_SIZE, "0x%llx\n",
+ (unsigned long long)phba->cfg_soft_wwpn);
+}
+
+/**
+ * lpfc_soft_wwpn_store - Set the ww port name of the adapter
+ * @dev class device that is converted into a Scsi_host.
+ * @attr: device attribute, not used.
+ * @buf: contains the wwpn in hexadecimal.
+ * @count: number of wwpn bytes in buf
+ *
+ * Returns:
+ * -EACCES hba reset not enabled, adapter over temp
+ * -EINVAL soft wwn not enabled, count is invalid, invalid wwpn byte invalid
+ * -EIO error taking adapter offline or online
+ * value of count on success
+ **/
+static ssize_t
+lpfc_soft_wwpn_store(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct Scsi_Host *shost = class_to_shost(dev);
+ struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
+ struct lpfc_hba *phba = vport->phba;
+ struct completion online_compl;
+ int stat1 = 0, stat2 = 0;
+ unsigned int cnt = count;
+ u8 wwpn[WWN_SZ];
+ int rc;
+
+ if (!phba->cfg_enable_hba_reset)
+ return -EACCES;
+ spin_lock_irq(&phba->hbalock);
+ if (phba->over_temp_state == HBA_OVER_TEMP) {
+ spin_unlock_irq(&phba->hbalock);
+ return -EACCES;
+ }
+ spin_unlock_irq(&phba->hbalock);
+ /* count may include a LF at end of string */
+ if (buf[cnt-1] == '\n')
+ cnt--;
+
+ if (!phba->soft_wwn_enable)
+ return -EINVAL;
+
+ /* lock setting wwpn, wwnn down */
+ phba->soft_wwn_enable = 0;
+
+ rc = lpfc_wwn_set(buf, cnt, wwpn);
+ if (!rc) {
+ /* not able to set wwpn, unlock it */
+ phba->soft_wwn_enable = 1;
+ return rc;
+ }
+
+ phba->cfg_soft_wwpn = wwn_to_u64(wwpn);
+ fc_host_port_name(shost) = phba->cfg_soft_wwpn;
+ if (phba->cfg_soft_wwnn)
+ fc_host_node_name(shost) = phba->cfg_soft_wwnn;
+
+ dev_printk(KERN_NOTICE, &phba->pcidev->dev,
+ "lpfc%d: Reinitializing to use soft_wwpn\n", phba->brd_no);
+
+ stat1 = lpfc_do_offline(phba, LPFC_EVT_OFFLINE);
+ if (stat1)
+ lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+ "0463 lpfc_soft_wwpn attribute set failed to "
+ "reinit adapter - %d\n", stat1);
+ init_completion(&online_compl);
+ rc = lpfc_workq_post_event(phba, &stat2, &online_compl,
+ LPFC_EVT_ONLINE);
+ if (rc == 0)
+ return -ENOMEM;
+
+ wait_for_completion(&online_compl);
+ if (stat2)
+ lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+ "0464 lpfc_soft_wwpn attribute set failed to "
+ "reinit adapter - %d\n", stat2);
+ return (stat1 || stat2) ? -EIO : count;
+}
+static DEVICE_ATTR(lpfc_soft_wwpn, S_IRUGO | S_IWUSR,
+ lpfc_soft_wwpn_show, lpfc_soft_wwpn_store);
+
+/**
+ * lpfc_soft_wwnn_show - Return the cfg soft ww node name for the adapter
+ * @dev: class device that is converted into a Scsi_host.
+ * @attr: device attribute, not used.
+ * @buf: on return contains the wwnn in hexadecimal.
+ *
+ * Returns: size of formatted string.
+ **/
+static ssize_t
+lpfc_soft_wwnn_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct Scsi_Host *shost = class_to_shost(dev);
+ struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
+ return snprintf(buf, PAGE_SIZE, "0x%llx\n",
+ (unsigned long long)phba->cfg_soft_wwnn);
+}
+
+/**
+ * lpfc_soft_wwnn_store - sets the ww node name of the adapter
+ * @cdev: class device that is converted into a Scsi_host.
+ * @buf: contains the ww node name in hexadecimal.
+ * @count: number of wwnn bytes in buf.
+ *
+ * Returns:
+ * -EINVAL soft wwn not enabled, count is invalid, invalid wwnn byte invalid
+ * value of count on success
+ **/
+static ssize_t
+lpfc_soft_wwnn_store(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct Scsi_Host *shost = class_to_shost(dev);
+ struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
+ unsigned int cnt = count;
+ u8 wwnn[WWN_SZ];
+ int rc;
+
+ /* count may include a LF at end of string */
+ if (buf[cnt-1] == '\n')
+ cnt--;
+
+ if (!phba->soft_wwn_enable)
+ return -EINVAL;
+
+ rc = lpfc_wwn_set(buf, cnt, wwnn);
+ if (!rc) {
+ /* Allow wwnn to be set many times, as long as the enable
+ * is set. However, once the wwpn is set, everything locks.
+ */
+ return rc;
+ }
+
+ phba->cfg_soft_wwnn = wwn_to_u64(wwnn);
+
+ dev_printk(KERN_NOTICE, &phba->pcidev->dev,
+ "lpfc%d: soft_wwnn set. Value will take effect upon "
+ "setting of the soft_wwpn\n", phba->brd_no);
+
+ return count;
+}
+static DEVICE_ATTR(lpfc_soft_wwnn, S_IRUGO | S_IWUSR,
+ lpfc_soft_wwnn_show, lpfc_soft_wwnn_store);
+
+/**
+ * lpfc_oas_tgt_show - Return wwpn of target whose luns maybe enabled for
+ * Optimized Access Storage (OAS) operations.
+ * @dev: class device that is converted into a Scsi_host.
+ * @attr: device attribute, not used.
+ * @buf: buffer for passing information.
+ *
+ * Returns:
+ * value of count
+ **/
+static ssize_t
+lpfc_oas_tgt_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct Scsi_Host *shost = class_to_shost(dev);
+ struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
+
+ return snprintf(buf, PAGE_SIZE, "0x%llx\n",
+ wwn_to_u64(phba->cfg_oas_tgt_wwpn));
+}
+
+/**
+ * lpfc_oas_tgt_store - Store wwpn of target whose luns maybe enabled for
+ * Optimized Access Storage (OAS) operations.
+ * @dev: class device that is converted into a Scsi_host.
+ * @attr: device attribute, not used.
+ * @buf: buffer for passing information.
+ * @count: Size of the data buffer.
+ *
+ * Returns:
+ * -EINVAL count is invalid, invalid wwpn byte invalid
+ * -EPERM oas is not supported by hba
+ * value of count on success
+ **/
+static ssize_t
+lpfc_oas_tgt_store(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct Scsi_Host *shost = class_to_shost(dev);
+ struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
+ unsigned int cnt = count;
+ uint8_t wwpn[WWN_SZ];
+ int rc;
+
+ if (!phba->cfg_fof)
+ return -EPERM;
+
+ /* count may include a LF at end of string */
+ if (buf[cnt-1] == '\n')
+ cnt--;
+
+ rc = lpfc_wwn_set(buf, cnt, wwpn);
+ if (rc)
+ return rc;
+
+ memcpy(phba->cfg_oas_tgt_wwpn, wwpn, (8 * sizeof(uint8_t)));
+ memcpy(phba->sli4_hba.oas_next_tgt_wwpn, wwpn, (8 * sizeof(uint8_t)));
+ if (wwn_to_u64(wwpn) == 0)
+ phba->cfg_oas_flags |= OAS_FIND_ANY_TARGET;
+ else
+ phba->cfg_oas_flags &= ~OAS_FIND_ANY_TARGET;
+ phba->cfg_oas_flags &= ~OAS_LUN_VALID;
+ phba->sli4_hba.oas_next_lun = FIND_FIRST_OAS_LUN;
+ return count;
+}
+static DEVICE_ATTR(lpfc_xlane_tgt, S_IRUGO | S_IWUSR,
+ lpfc_oas_tgt_show, lpfc_oas_tgt_store);
+
+/**
+ * lpfc_oas_vpt_show - Return wwpn of vport whose targets maybe enabled
+ * for Optimized Access Storage (OAS) operations.
+ * @dev: class device that is converted into a Scsi_host.
+ * @attr: device attribute, not used.
+ * @buf: buffer for passing information.
+ *
+ * Returns:
+ * value of count on success
+ **/
+static ssize_t
+lpfc_oas_vpt_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct Scsi_Host *shost = class_to_shost(dev);
+ struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
+
+ return snprintf(buf, PAGE_SIZE, "0x%llx\n",
+ wwn_to_u64(phba->cfg_oas_vpt_wwpn));
+}
+
+/**
+ * lpfc_oas_vpt_store - Store wwpn of vport whose targets maybe enabled
+ * for Optimized Access Storage (OAS) operations.
+ * @dev: class device that is converted into a Scsi_host.
+ * @attr: device attribute, not used.
+ * @buf: buffer for passing information.
+ * @count: Size of the data buffer.
+ *
+ * Returns:
+ * -EINVAL count is invalid, invalid wwpn byte invalid
+ * -EPERM oas is not supported by hba
+ * value of count on success
+ **/
+static ssize_t
+lpfc_oas_vpt_store(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct Scsi_Host *shost = class_to_shost(dev);
+ struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
+ unsigned int cnt = count;
+ uint8_t wwpn[WWN_SZ];
+ int rc;
+
+ if (!phba->cfg_fof)
+ return -EPERM;
+
+ /* count may include a LF at end of string */
+ if (buf[cnt-1] == '\n')
+ cnt--;
+
+ rc = lpfc_wwn_set(buf, cnt, wwpn);
+ if (rc)
+ return rc;
+
+ memcpy(phba->cfg_oas_vpt_wwpn, wwpn, (8 * sizeof(uint8_t)));
+ memcpy(phba->sli4_hba.oas_next_vpt_wwpn, wwpn, (8 * sizeof(uint8_t)));
+ if (wwn_to_u64(wwpn) == 0)
+ phba->cfg_oas_flags |= OAS_FIND_ANY_VPORT;
+ else
+ phba->cfg_oas_flags &= ~OAS_FIND_ANY_VPORT;
+ phba->cfg_oas_flags &= ~OAS_LUN_VALID;
+ phba->sli4_hba.oas_next_lun = FIND_FIRST_OAS_LUN;
+ return count;
+}
+static DEVICE_ATTR(lpfc_xlane_vpt, S_IRUGO | S_IWUSR,
+ lpfc_oas_vpt_show, lpfc_oas_vpt_store);
+
+/**
+ * lpfc_oas_lun_state_show - Return the current state (enabled or disabled)
+ * of whether luns will be enabled or disabled
+ * for Optimized Access Storage (OAS) operations.
+ * @dev: class device that is converted into a Scsi_host.
+ * @attr: device attribute, not used.
+ * @buf: buffer for passing information.
+ *
+ * Returns:
+ * size of formatted string.
+ **/
+static ssize_t
+lpfc_oas_lun_state_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct Scsi_Host *shost = class_to_shost(dev);
+ struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
+
+ return snprintf(buf, PAGE_SIZE, "%d\n", phba->cfg_oas_lun_state);
+}
+
+/**
+ * lpfc_oas_lun_state_store - Store the state (enabled or disabled)
+ * of whether luns will be enabled or disabled
+ * for Optimized Access Storage (OAS) operations.
+ * @dev: class device that is converted into a Scsi_host.
+ * @attr: device attribute, not used.
+ * @buf: buffer for passing information.
+ * @count: Size of the data buffer.
+ *
+ * Returns:
+ * -EINVAL count is invalid, invalid wwpn byte invalid
+ * -EPERM oas is not supported by hba
+ * value of count on success
+ **/
+static ssize_t
+lpfc_oas_lun_state_store(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct Scsi_Host *shost = class_to_shost(dev);
+ struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
+ int val = 0;
+
+ if (!phba->cfg_fof)
+ return -EPERM;
+
+ if (!isdigit(buf[0]))
+ return -EINVAL;
+
+ if (sscanf(buf, "%i", &val) != 1)
+ return -EINVAL;
+
+ if ((val != 0) && (val != 1))
+ return -EINVAL;
+
+ phba->cfg_oas_lun_state = val;
+
+ return strlen(buf);
+}
+static DEVICE_ATTR(lpfc_xlane_lun_state, S_IRUGO | S_IWUSR,
+ lpfc_oas_lun_state_show, lpfc_oas_lun_state_store);
+
+/**
+ * lpfc_oas_lun_status_show - Return the status of the Optimized Access
+ * Storage (OAS) lun returned by the
+ * lpfc_oas_lun_show function.
+ * @dev: class device that is converted into a Scsi_host.
+ * @attr: device attribute, not used.
+ * @buf: buffer for passing information.
+ *
+ * Returns:
+ * size of formatted string.
+ **/
+static ssize_t
+lpfc_oas_lun_status_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct Scsi_Host *shost = class_to_shost(dev);
+ struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
+
+ if (!(phba->cfg_oas_flags & OAS_LUN_VALID))
+ return -EFAULT;
+
+ return snprintf(buf, PAGE_SIZE, "%d\n", phba->cfg_oas_lun_status);
+}
+static DEVICE_ATTR(lpfc_xlane_lun_status, S_IRUGO,
+ lpfc_oas_lun_status_show, NULL);
+
+
+/**
+ * lpfc_oas_lun_state_set - enable or disable a lun for Optimized Access Storage
+ * (OAS) operations.
+ * @phba: lpfc_hba pointer.
+ * @ndlp: pointer to fcp target node.
+ * @lun: the fc lun for setting oas state.
+ * @oas_state: the oas state to be set to the lun.
+ *
+ * Returns:
+ * SUCCESS : 0
+ * -EPERM OAS is not enabled or not supported by this port.
+ *
+ */
+static size_t
+lpfc_oas_lun_state_set(struct lpfc_hba *phba, uint8_t vpt_wwpn[],
+ uint8_t tgt_wwpn[], uint64_t lun, uint32_t oas_state)
+{
+
+ int rc = 0;
+
+ if (!phba->cfg_fof)
+ return -EPERM;
+
+ if (oas_state) {
+ if (!lpfc_enable_oas_lun(phba, (struct lpfc_name *)vpt_wwpn,
+ (struct lpfc_name *)tgt_wwpn, lun))
+ rc = -ENOMEM;
+ } else {
+ lpfc_disable_oas_lun(phba, (struct lpfc_name *)vpt_wwpn,
+ (struct lpfc_name *)tgt_wwpn, lun);
+ }
+ return rc;
+
+}
+
+/**
+ * lpfc_oas_lun_get_next - get the next lun that has been enabled for Optimized
+ * Access Storage (OAS) operations.
+ * @phba: lpfc_hba pointer.
+ * @vpt_wwpn: wwpn of the vport associated with the returned lun
+ * @tgt_wwpn: wwpn of the target associated with the returned lun
+ * @lun_status: status of the lun returned lun
+ *
+ * Returns the first or next lun enabled for OAS operations for the vport/target
+ * specified. If a lun is found, its vport wwpn, target wwpn and status is
+ * returned. If the lun is not found, NOT_OAS_ENABLED_LUN is returned.
+ *
+ * Return:
+ * lun that is OAS enabled for the vport/target
+ * NOT_OAS_ENABLED_LUN when no oas enabled lun found.
+ */
+static uint64_t
+lpfc_oas_lun_get_next(struct lpfc_hba *phba, uint8_t vpt_wwpn[],
+ uint8_t tgt_wwpn[], uint32_t *lun_status)
+{
+ uint64_t found_lun;
+
+ if (unlikely(!phba) || !vpt_wwpn || !tgt_wwpn)
+ return NOT_OAS_ENABLED_LUN;
+ if (lpfc_find_next_oas_lun(phba, (struct lpfc_name *)
+ phba->sli4_hba.oas_next_vpt_wwpn,
+ (struct lpfc_name *)
+ phba->sli4_hba.oas_next_tgt_wwpn,
+ &phba->sli4_hba.oas_next_lun,
+ (struct lpfc_name *)vpt_wwpn,
+ (struct lpfc_name *)tgt_wwpn,
+ &found_lun, lun_status))
+ return found_lun;
+ else
+ return NOT_OAS_ENABLED_LUN;
+}
+
+/**
+ * lpfc_oas_lun_state_change - enable/disable a lun for OAS operations
+ * @phba: lpfc_hba pointer.
+ * @vpt_wwpn: vport wwpn by reference.
+ * @tgt_wwpn: target wwpn by reference.
+ * @lun: the fc lun for setting oas state.
+ * @oas_state: the oas state to be set to the oas_lun.
+ *
+ * This routine enables (OAS_LUN_ENABLE) or disables (OAS_LUN_DISABLE)
+ * a lun for OAS operations.
+ *
+ * Return:
+ * SUCCESS: 0
+ * -ENOMEM: failed to enable an lun for OAS operations
+ * -EPERM: OAS is not enabled
+ */
+static ssize_t
+lpfc_oas_lun_state_change(struct lpfc_hba *phba, uint8_t vpt_wwpn[],
+ uint8_t tgt_wwpn[], uint64_t lun,
+ uint32_t oas_state)
+{
+
+ int rc;
+
+ rc = lpfc_oas_lun_state_set(phba, vpt_wwpn, tgt_wwpn, lun,
+ oas_state);
+ return rc;
+}
+
+/**
+ * lpfc_oas_lun_show - Return oas enabled luns from a chosen target
+ * @dev: class device that is converted into a Scsi_host.
+ * @attr: device attribute, not used.
+ * @buf: buffer for passing information.
+ *
+ * This routine returns a lun enabled for OAS each time the function
+ * is called.
+ *
+ * Returns:
+ * SUCCESS: size of formatted string.
+ * -EFAULT: target or vport wwpn was not set properly.
+ * -EPERM: oas is not enabled.
+ **/
+static ssize_t
+lpfc_oas_lun_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct Scsi_Host *shost = class_to_shost(dev);
+ struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
+
+ uint64_t oas_lun;
+ int len = 0;
+
+ if (!phba->cfg_fof)
+ return -EPERM;
+
+ if (wwn_to_u64(phba->cfg_oas_vpt_wwpn) == 0)
+ if (!(phba->cfg_oas_flags & OAS_FIND_ANY_VPORT))
+ return -EFAULT;
+
+ if (wwn_to_u64(phba->cfg_oas_tgt_wwpn) == 0)
+ if (!(phba->cfg_oas_flags & OAS_FIND_ANY_TARGET))
+ return -EFAULT;
+
+ oas_lun = lpfc_oas_lun_get_next(phba, phba->cfg_oas_vpt_wwpn,
+ phba->cfg_oas_tgt_wwpn,
+ &phba->cfg_oas_lun_status);
+ if (oas_lun != NOT_OAS_ENABLED_LUN)
+ phba->cfg_oas_flags |= OAS_LUN_VALID;
+
+ len += snprintf(buf + len, PAGE_SIZE-len, "0x%llx", oas_lun);
+
+ return len;
+}
+
+/**
+ * lpfc_oas_lun_store - Sets the OAS state for lun
+ * @dev: class device that is converted into a Scsi_host.
+ * @attr: device attribute, not used.
+ * @buf: buffer for passing information.
+ *
+ * This function sets the OAS state for lun. Before this function is called,
+ * the vport wwpn, target wwpn, and oas state need to be set.
+ *
+ * Returns:
+ * SUCCESS: size of formatted string.
+ * -EFAULT: target or vport wwpn was not set properly.
+ * -EPERM: oas is not enabled.
+ * size of formatted string.
+ **/
+static ssize_t
+lpfc_oas_lun_store(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct Scsi_Host *shost = class_to_shost(dev);
+ struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
+ uint64_t scsi_lun;
+ ssize_t rc;
+
+ if (!phba->cfg_fof)
+ return -EPERM;
+
+ if (wwn_to_u64(phba->cfg_oas_vpt_wwpn) == 0)
+ return -EFAULT;
+
+ if (wwn_to_u64(phba->cfg_oas_tgt_wwpn) == 0)
+ return -EFAULT;
+
+ if (!isdigit(buf[0]))
+ return -EINVAL;
+
+ if (sscanf(buf, "0x%llx", &scsi_lun) != 1)
+ return -EINVAL;
+
+ lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
+ "3372 Try to set vport 0x%llx target 0x%llx lun:%lld "
+ "with oas set to %d\n",
+ wwn_to_u64(phba->cfg_oas_vpt_wwpn),
+ wwn_to_u64(phba->cfg_oas_tgt_wwpn), scsi_lun,
+ phba->cfg_oas_lun_state);
+
+ rc = lpfc_oas_lun_state_change(phba, phba->cfg_oas_vpt_wwpn,
+ phba->cfg_oas_tgt_wwpn, scsi_lun,
+ phba->cfg_oas_lun_state);
+
+ if (rc)
+ return rc;
+
+ return count;
+}
+static DEVICE_ATTR(lpfc_xlane_lun, S_IRUGO | S_IWUSR,
+ lpfc_oas_lun_show, lpfc_oas_lun_store);
+
+static int lpfc_poll = 0;
+module_param(lpfc_poll, int, S_IRUGO);
+MODULE_PARM_DESC(lpfc_poll, "FCP ring polling mode control:"
+ " 0 - none,"
+ " 1 - poll with interrupts enabled"
+ " 3 - poll and disable FCP ring interrupts");
+
+static DEVICE_ATTR(lpfc_poll, S_IRUGO | S_IWUSR,
+ lpfc_poll_show, lpfc_poll_store);
+
+int lpfc_sli_mode = 0;
+module_param(lpfc_sli_mode, int, S_IRUGO);
+MODULE_PARM_DESC(lpfc_sli_mode, "SLI mode selector:"
+ " 0 - auto (SLI-3 if supported),"
+ " 2 - select SLI-2 even on SLI-3 capable HBAs,"
+ " 3 - select SLI-3");
+
+int lpfc_enable_npiv = 1;
+module_param(lpfc_enable_npiv, int, S_IRUGO);
+MODULE_PARM_DESC(lpfc_enable_npiv, "Enable NPIV functionality");
+lpfc_param_show(enable_npiv);
+lpfc_param_init(enable_npiv, 1, 0, 1);
+static DEVICE_ATTR(lpfc_enable_npiv, S_IRUGO, lpfc_enable_npiv_show, NULL);
+
+LPFC_ATTR_R(fcf_failover_policy, 1, 1, 2,
+ "FCF Fast failover=1 Priority failover=2");
+
+int lpfc_enable_rrq = 2;
+module_param(lpfc_enable_rrq, int, S_IRUGO);
+MODULE_PARM_DESC(lpfc_enable_rrq, "Enable RRQ functionality");
+lpfc_param_show(enable_rrq);
+/*
+# lpfc_enable_rrq: Track XRI/OXID reuse after IO failures
+# 0x0 = disabled, XRI/OXID use not tracked.
+# 0x1 = XRI/OXID reuse is timed with ratov, RRQ sent.
+# 0x2 = XRI/OXID reuse is timed with ratov, No RRQ sent.
+*/
+lpfc_param_init(enable_rrq, 2, 0, 2);
+static DEVICE_ATTR(lpfc_enable_rrq, S_IRUGO, lpfc_enable_rrq_show, NULL);
+
+/*
+# lpfc_suppress_link_up: Bring link up at initialization
+# 0x0 = bring link up (issue MBX_INIT_LINK)
+# 0x1 = do NOT bring link up at initialization(MBX_INIT_LINK)
+# 0x2 = never bring up link
+# Default value is 0.
+*/
+LPFC_ATTR_R(suppress_link_up, LPFC_INITIALIZE_LINK, LPFC_INITIALIZE_LINK,
+ LPFC_DELAY_INIT_LINK_INDEFINITELY,
+ "Suppress Link Up at initialization");
+/*
+# lpfc_cnt: Number of IOCBs allocated for ELS, CT, and ABTS
+# 1 - (1024)
+# 2 - (2048)
+# 3 - (3072)
+# 4 - (4096)
+# 5 - (5120)
+*/
+static ssize_t
+lpfc_iocb_hw_show(struct device *dev, struct device_attribute *attr, char *buf)
+{
+ struct Scsi_Host *shost = class_to_shost(dev);
+ struct lpfc_hba *phba = ((struct lpfc_vport *) shost->hostdata)->phba;
+
+ return snprintf(buf, PAGE_SIZE, "%d\n", phba->iocb_max);
+}
+
+static DEVICE_ATTR(iocb_hw, S_IRUGO,
+ lpfc_iocb_hw_show, NULL);
+static ssize_t
+lpfc_txq_hw_show(struct device *dev, struct device_attribute *attr, char *buf)
+{
+ struct Scsi_Host *shost = class_to_shost(dev);
+ struct lpfc_hba *phba = ((struct lpfc_vport *) shost->hostdata)->phba;
+
+ return snprintf(buf, PAGE_SIZE, "%d\n",
+ phba->sli.ring[LPFC_ELS_RING].txq_max);
+}
+
+static DEVICE_ATTR(txq_hw, S_IRUGO,
+ lpfc_txq_hw_show, NULL);
+static ssize_t
+lpfc_txcmplq_hw_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct Scsi_Host *shost = class_to_shost(dev);
+ struct lpfc_hba *phba = ((struct lpfc_vport *) shost->hostdata)->phba;
+
+ return snprintf(buf, PAGE_SIZE, "%d\n",
+ phba->sli.ring[LPFC_ELS_RING].txcmplq_max);
+}
+
+static DEVICE_ATTR(txcmplq_hw, S_IRUGO,
+ lpfc_txcmplq_hw_show, NULL);
+
+int lpfc_iocb_cnt = 2;
+module_param(lpfc_iocb_cnt, int, S_IRUGO);
+MODULE_PARM_DESC(lpfc_iocb_cnt,
+ "Number of IOCBs alloc for ELS, CT, and ABTS: 1k to 5k IOCBs");
+lpfc_param_show(iocb_cnt);
+lpfc_param_init(iocb_cnt, 2, 1, 5);
+static DEVICE_ATTR(lpfc_iocb_cnt, S_IRUGO,
+ lpfc_iocb_cnt_show, NULL);
+
+/*
+# lpfc_nodev_tmo: If set, it will hold all I/O errors on devices that disappear
+# until the timer expires. Value range is [0,255]. Default value is 30.
+*/
+static int lpfc_nodev_tmo = LPFC_DEF_DEVLOSS_TMO;
+static int lpfc_devloss_tmo = LPFC_DEF_DEVLOSS_TMO;
+module_param(lpfc_nodev_tmo, int, 0);
+MODULE_PARM_DESC(lpfc_nodev_tmo,
+ "Seconds driver will hold I/O waiting "
+ "for a device to come back");
+
+/**
+ * lpfc_nodev_tmo_show - Return the hba dev loss timeout value
+ * @dev: class converted to a Scsi_host structure.
+ * @attr: device attribute, not used.
+ * @buf: on return contains the dev loss timeout in decimal.
+ *
+ * Returns: size of formatted string.
+ **/
+static ssize_t
+lpfc_nodev_tmo_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct Scsi_Host *shost = class_to_shost(dev);
+ struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
+
+ return snprintf(buf, PAGE_SIZE, "%d\n", vport->cfg_devloss_tmo);
+}
+
+/**
+ * lpfc_nodev_tmo_init - Set the hba nodev timeout value
+ * @vport: lpfc vport structure pointer.
+ * @val: contains the nodev timeout value.
+ *
+ * Description:
+ * If the devloss tmo is already set then nodev tmo is set to devloss tmo,
+ * a kernel error message is printed and zero is returned.
+ * Else if val is in range then nodev tmo and devloss tmo are set to val.
+ * Otherwise nodev tmo is set to the default value.
+ *
+ * Returns:
+ * zero if already set or if val is in range
+ * -EINVAL val out of range
+ **/
+static int
+lpfc_nodev_tmo_init(struct lpfc_vport *vport, int val)
+{
+ if (vport->cfg_devloss_tmo != LPFC_DEF_DEVLOSS_TMO) {
+ vport->cfg_nodev_tmo = vport->cfg_devloss_tmo;
+ if (val != LPFC_DEF_DEVLOSS_TMO)
+ lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT,
+ "0407 Ignoring nodev_tmo module "
+ "parameter because devloss_tmo is "
+ "set.\n");
+ return 0;
+ }
+
+ if (val >= LPFC_MIN_DEVLOSS_TMO && val <= LPFC_MAX_DEVLOSS_TMO) {
+ vport->cfg_nodev_tmo = val;
+ vport->cfg_devloss_tmo = val;
+ return 0;
+ }
+ lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT,
+ "0400 lpfc_nodev_tmo attribute cannot be set to"
+ " %d, allowed range is [%d, %d]\n",
+ val, LPFC_MIN_DEVLOSS_TMO, LPFC_MAX_DEVLOSS_TMO);
+ vport->cfg_nodev_tmo = LPFC_DEF_DEVLOSS_TMO;
+ return -EINVAL;
+}
+
+/**
+ * lpfc_update_rport_devloss_tmo - Update dev loss tmo value
+ * @vport: lpfc vport structure pointer.
+ *
+ * Description:
+ * Update all the ndlp's dev loss tmo with the vport devloss tmo value.
+ **/
+static void
+lpfc_update_rport_devloss_tmo(struct lpfc_vport *vport)
+{
+ struct Scsi_Host *shost;
+ struct lpfc_nodelist *ndlp;
+
+ shost = lpfc_shost_from_vport(vport);
+ spin_lock_irq(shost->host_lock);
+ list_for_each_entry(ndlp, &vport->fc_nodes, nlp_listp)
+ if (NLP_CHK_NODE_ACT(ndlp) && ndlp->rport)
+ ndlp->rport->dev_loss_tmo = vport->cfg_devloss_tmo;
+ spin_unlock_irq(shost->host_lock);
+}
+
+/**
+ * lpfc_nodev_tmo_set - Set the vport nodev tmo and devloss tmo values
+ * @vport: lpfc vport structure pointer.
+ * @val: contains the tmo value.
+ *
+ * Description:
+ * If the devloss tmo is already set or the vport dev loss tmo has changed
+ * then a kernel error message is printed and zero is returned.
+ * Else if val is in range then nodev tmo and devloss tmo are set to val.
+ * Otherwise nodev tmo is set to the default value.
+ *
+ * Returns:
+ * zero if already set or if val is in range
+ * -EINVAL val out of range
+ **/
+static int
+lpfc_nodev_tmo_set(struct lpfc_vport *vport, int val)
+{
+ if (vport->dev_loss_tmo_changed ||
+ (lpfc_devloss_tmo != LPFC_DEF_DEVLOSS_TMO)) {
+ lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT,
+ "0401 Ignoring change to nodev_tmo "
+ "because devloss_tmo is set.\n");
+ return 0;
+ }
+ if (val >= LPFC_MIN_DEVLOSS_TMO && val <= LPFC_MAX_DEVLOSS_TMO) {
+ vport->cfg_nodev_tmo = val;
+ vport->cfg_devloss_tmo = val;
+ /*
+ * For compat: set the fc_host dev loss so new rports
+ * will get the value.
+ */
+ fc_host_dev_loss_tmo(lpfc_shost_from_vport(vport)) = val;
+ lpfc_update_rport_devloss_tmo(vport);
+ return 0;
+ }
+ lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT,
+ "0403 lpfc_nodev_tmo attribute cannot be set to"
+ "%d, allowed range is [%d, %d]\n",
+ val, LPFC_MIN_DEVLOSS_TMO, LPFC_MAX_DEVLOSS_TMO);
+ return -EINVAL;
+}
+
+lpfc_vport_param_store(nodev_tmo)
+
+static DEVICE_ATTR(lpfc_nodev_tmo, S_IRUGO | S_IWUSR,
+ lpfc_nodev_tmo_show, lpfc_nodev_tmo_store);
+
+/*
+# lpfc_devloss_tmo: If set, it will hold all I/O errors on devices that
+# disappear until the timer expires. Value range is [0,255]. Default
+# value is 30.
+*/
+module_param(lpfc_devloss_tmo, int, S_IRUGO);
+MODULE_PARM_DESC(lpfc_devloss_tmo,
+ "Seconds driver will hold I/O waiting "
+ "for a device to come back");
+lpfc_vport_param_init(devloss_tmo, LPFC_DEF_DEVLOSS_TMO,
+ LPFC_MIN_DEVLOSS_TMO, LPFC_MAX_DEVLOSS_TMO)
+lpfc_vport_param_show(devloss_tmo)
+
+/**
+ * lpfc_devloss_tmo_set - Sets vport nodev tmo, devloss tmo values, changed bit
+ * @vport: lpfc vport structure pointer.
+ * @val: contains the tmo value.
+ *
+ * Description:
+ * If val is in a valid range then set the vport nodev tmo,
+ * devloss tmo, also set the vport dev loss tmo changed flag.
+ * Else a kernel error message is printed.
+ *
+ * Returns:
+ * zero if val is in range
+ * -EINVAL val out of range
+ **/
+static int
+lpfc_devloss_tmo_set(struct lpfc_vport *vport, int val)
+{
+ if (val >= LPFC_MIN_DEVLOSS_TMO && val <= LPFC_MAX_DEVLOSS_TMO) {
+ vport->cfg_nodev_tmo = val;
+ vport->cfg_devloss_tmo = val;
+ vport->dev_loss_tmo_changed = 1;
+ fc_host_dev_loss_tmo(lpfc_shost_from_vport(vport)) = val;
+ lpfc_update_rport_devloss_tmo(vport);
+ return 0;
+ }
+
+ lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT,
+ "0404 lpfc_devloss_tmo attribute cannot be set to"
+ " %d, allowed range is [%d, %d]\n",
+ val, LPFC_MIN_DEVLOSS_TMO, LPFC_MAX_DEVLOSS_TMO);
+ return -EINVAL;
+}
+
+lpfc_vport_param_store(devloss_tmo)
+static DEVICE_ATTR(lpfc_devloss_tmo, S_IRUGO | S_IWUSR,
+ lpfc_devloss_tmo_show, lpfc_devloss_tmo_store);
+
+/*
+# lpfc_log_verbose: Only turn this flag on if you are willing to risk being
+# deluged with LOTS of information.
+# You can set a bit mask to record specific types of verbose messages:
+# See lpfc_logmsh.h for definitions.
+*/
+LPFC_VPORT_ATTR_HEX_RW(log_verbose, 0x0, 0x0, 0xffffffff,
+ "Verbose logging bit-mask");
+
+/*
+# lpfc_enable_da_id: This turns on the DA_ID CT command that deregisters
+# objects that have been registered with the nameserver after login.
+*/
+LPFC_VPORT_ATTR_R(enable_da_id, 1, 0, 1,
+ "Deregister nameserver objects before LOGO");
+
+/*
+# lun_queue_depth: This parameter is used to limit the number of outstanding
+# commands per FCP LUN. Value range is [1,512]. Default value is 30.
+# If this parameter value is greater than 1/8th the maximum number of exchanges
+# supported by the HBA port, then the lun queue depth will be reduced to
+# 1/8th the maximum number of exchanges.
+*/
+LPFC_VPORT_ATTR_R(lun_queue_depth, 30, 1, 512,
+ "Max number of FCP commands we can queue to a specific LUN");
+
+/*
+# tgt_queue_depth: This parameter is used to limit the number of outstanding
+# commands per target port. Value range is [10,65535]. Default value is 65535.
+*/
+LPFC_VPORT_ATTR_R(tgt_queue_depth, 65535, 10, 65535,
+ "Max number of FCP commands we can queue to a specific target port");
+
+/*
+# hba_queue_depth: This parameter is used to limit the number of outstanding
+# commands per lpfc HBA. Value range is [32,8192]. If this parameter
+# value is greater than the maximum number of exchanges supported by the HBA,
+# then maximum number of exchanges supported by the HBA is used to determine
+# the hba_queue_depth.
+*/
+LPFC_ATTR_R(hba_queue_depth, 8192, 32, 8192,
+ "Max number of FCP commands we can queue to a lpfc HBA");
+
+/*
+# peer_port_login: This parameter allows/prevents logins
+# between peer ports hosted on the same physical port.
+# When this parameter is set 0 peer ports of same physical port
+# are not allowed to login to each other.
+# When this parameter is set 1 peer ports of same physical port
+# are allowed to login to each other.
+# Default value of this parameter is 0.
+*/
+LPFC_VPORT_ATTR_R(peer_port_login, 0, 0, 1,
+ "Allow peer ports on the same physical port to login to each "
+ "other.");
+
+/*
+# restrict_login: This parameter allows/prevents logins
+# between Virtual Ports and remote initiators.
+# When this parameter is not set (0) Virtual Ports will accept PLOGIs from
+# other initiators and will attempt to PLOGI all remote ports.
+# When this parameter is set (1) Virtual Ports will reject PLOGIs from
+# remote ports and will not attempt to PLOGI to other initiators.
+# This parameter does not restrict to the physical port.
+# This parameter does not restrict logins to Fabric resident remote ports.
+# Default value of this parameter is 1.
+*/
+static int lpfc_restrict_login = 1;
+module_param(lpfc_restrict_login, int, S_IRUGO);
+MODULE_PARM_DESC(lpfc_restrict_login,
+ "Restrict virtual ports login to remote initiators.");
+lpfc_vport_param_show(restrict_login);
+
+/**
+ * lpfc_restrict_login_init - Set the vport restrict login flag
+ * @vport: lpfc vport structure pointer.
+ * @val: contains the restrict login value.
+ *
+ * Description:
+ * If val is not in a valid range then log a kernel error message and set
+ * the vport restrict login to one.
+ * If the port type is physical clear the restrict login flag and return.
+ * Else set the restrict login flag to val.
+ *
+ * Returns:
+ * zero if val is in range
+ * -EINVAL val out of range
+ **/
+static int
+lpfc_restrict_login_init(struct lpfc_vport *vport, int val)
+{
+ if (val < 0 || val > 1) {
+ lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT,
+ "0422 lpfc_restrict_login attribute cannot "
+ "be set to %d, allowed range is [0, 1]\n",
+ val);
+ vport->cfg_restrict_login = 1;
+ return -EINVAL;
+ }
+ if (vport->port_type == LPFC_PHYSICAL_PORT) {
+ vport->cfg_restrict_login = 0;
+ return 0;
+ }
+ vport->cfg_restrict_login = val;
+ return 0;
+}
+
+/**
+ * lpfc_restrict_login_set - Set the vport restrict login flag
+ * @vport: lpfc vport structure pointer.
+ * @val: contains the restrict login value.
+ *
+ * Description:
+ * If val is not in a valid range then log a kernel error message and set
+ * the vport restrict login to one.
+ * If the port type is physical and the val is not zero log a kernel
+ * error message, clear the restrict login flag and return zero.
+ * Else set the restrict login flag to val.
+ *
+ * Returns:
+ * zero if val is in range
+ * -EINVAL val out of range
+ **/
+static int
+lpfc_restrict_login_set(struct lpfc_vport *vport, int val)
+{
+ if (val < 0 || val > 1) {
+ lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT,
+ "0425 lpfc_restrict_login attribute cannot "
+ "be set to %d, allowed range is [0, 1]\n",
+ val);
+ vport->cfg_restrict_login = 1;
+ return -EINVAL;
+ }
+ if (vport->port_type == LPFC_PHYSICAL_PORT && val != 0) {
+ lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT,
+ "0468 lpfc_restrict_login must be 0 for "
+ "Physical ports.\n");
+ vport->cfg_restrict_login = 0;
+ return 0;
+ }
+ vport->cfg_restrict_login = val;
+ return 0;
+}
+lpfc_vport_param_store(restrict_login);
+static DEVICE_ATTR(lpfc_restrict_login, S_IRUGO | S_IWUSR,
+ lpfc_restrict_login_show, lpfc_restrict_login_store);
+
+/*
+# Some disk devices have a "select ID" or "select Target" capability.
+# From a protocol standpoint "select ID" usually means select the
+# Fibre channel "ALPA". In the FC-AL Profile there is an "informative
+# annex" which contains a table that maps a "select ID" (a number
+# between 0 and 7F) to an ALPA. By default, for compatibility with
+# older drivers, the lpfc driver scans this table from low ALPA to high
+# ALPA.
+#
+# Turning on the scan-down variable (on = 1, off = 0) will
+# cause the lpfc driver to use an inverted table, effectively
+# scanning ALPAs from high to low. Value range is [0,1]. Default value is 1.
+#
+# (Note: This "select ID" functionality is a LOOP ONLY characteristic
+# and will not work across a fabric. Also this parameter will take
+# effect only in the case when ALPA map is not available.)
+*/
+LPFC_VPORT_ATTR_R(scan_down, 1, 0, 1,
+ "Start scanning for devices from highest ALPA to lowest");
+
+/*
+# lpfc_topology: link topology for init link
+# 0x0 = attempt loop mode then point-to-point
+# 0x01 = internal loopback mode
+# 0x02 = attempt point-to-point mode only
+# 0x04 = attempt loop mode only
+# 0x06 = attempt point-to-point mode then loop
+# Set point-to-point mode if you want to run as an N_Port.
+# Set loop mode if you want to run as an NL_Port. Value range is [0,0x6].
+# Default value is 0.
+*/
+
+/**
+ * lpfc_topology_set - Set the adapters topology field
+ * @phba: lpfc_hba pointer.
+ * @val: topology value.
+ *
+ * Description:
+ * If val is in a valid range then set the adapter's topology field and
+ * issue a lip; if the lip fails reset the topology to the old value.
+ *
+ * If the value is not in range log a kernel error message and return an error.
+ *
+ * Returns:
+ * zero if val is in range and lip okay
+ * non-zero return value from lpfc_issue_lip()
+ * -EINVAL val out of range
+ **/
+static ssize_t
+lpfc_topology_store(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct Scsi_Host *shost = class_to_shost(dev);
+ struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
+ struct lpfc_hba *phba = vport->phba;
+ int val = 0;
+ int nolip = 0;
+ const char *val_buf = buf;
+ int err;
+ uint32_t prev_val;
+
+ if (!strncmp(buf, "nolip ", strlen("nolip "))) {
+ nolip = 1;
+ val_buf = &buf[strlen("nolip ")];
+ }
+
+ if (!isdigit(val_buf[0]))
+ return -EINVAL;
+ if (sscanf(val_buf, "%i", &val) != 1)
+ return -EINVAL;
+
+ if (val >= 0 && val <= 6) {
+ prev_val = phba->cfg_topology;
+ phba->cfg_topology = val;
+ if (phba->cfg_link_speed == LPFC_USER_LINK_SPEED_16G &&
+ val == 4) {
+ lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT,
+ "3113 Loop mode not supported at speed %d\n",
+ phba->cfg_link_speed);
+ phba->cfg_topology = prev_val;
+ return -EINVAL;
+ }
+ if (nolip)
+ return strlen(buf);
+
+ lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT,
+ "3054 lpfc_topology changed from %d to %d\n",
+ prev_val, val);
+ if (prev_val != val && phba->sli_rev == LPFC_SLI_REV4)
+ phba->fc_topology_changed = 1;
+ err = lpfc_issue_lip(lpfc_shost_from_vport(phba->pport));
+ if (err) {
+ phba->cfg_topology = prev_val;
+ return -EINVAL;
+ } else
+ return strlen(buf);
+ }
+ lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+ "%d:0467 lpfc_topology attribute cannot be set to %d, "
+ "allowed range is [0, 6]\n",
+ phba->brd_no, val);
+ return -EINVAL;
+}
+static int lpfc_topology = 0;
+module_param(lpfc_topology, int, S_IRUGO);
+MODULE_PARM_DESC(lpfc_topology, "Select Fibre Channel topology");
+lpfc_param_show(topology)
+lpfc_param_init(topology, 0, 0, 6)
+static DEVICE_ATTR(lpfc_topology, S_IRUGO | S_IWUSR,
+ lpfc_topology_show, lpfc_topology_store);
+
+/**
+ * lpfc_static_vport_show: Read callback function for
+ * lpfc_static_vport sysfs file.
+ * @dev: Pointer to class device object.
+ * @attr: device attribute structure.
+ * @buf: Data buffer.
+ *
+ * This function is the read call back function for
+ * lpfc_static_vport sysfs file. The lpfc_static_vport
+ * sysfs file report the mageability of the vport.
+ **/
+static ssize_t
+lpfc_static_vport_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct Scsi_Host *shost = class_to_shost(dev);
+ struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
+ if (vport->vport_flag & STATIC_VPORT)
+ sprintf(buf, "1\n");
+ else
+ sprintf(buf, "0\n");
+
+ return strlen(buf);
+}
+
+/*
+ * Sysfs attribute to control the statistical data collection.
+ */
+static DEVICE_ATTR(lpfc_static_vport, S_IRUGO,
+ lpfc_static_vport_show, NULL);
+
+/**
+ * lpfc_stat_data_ctrl_store - write call back for lpfc_stat_data_ctrl sysfs file
+ * @dev: Pointer to class device.
+ * @buf: Data buffer.
+ * @count: Size of the data buffer.
+ *
+ * This function get called when an user write to the lpfc_stat_data_ctrl
+ * sysfs file. This function parse the command written to the sysfs file
+ * and take appropriate action. These commands are used for controlling
+ * driver statistical data collection.
+ * Following are the command this function handles.
+ *
+ * setbucket <bucket_type> <base> <step>
+ * = Set the latency buckets.
+ * destroybucket = destroy all the buckets.
+ * start = start data collection
+ * stop = stop data collection
+ * reset = reset the collected data
+ **/
+static ssize_t
+lpfc_stat_data_ctrl_store(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct Scsi_Host *shost = class_to_shost(dev);
+ struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
+ struct lpfc_hba *phba = vport->phba;
+#define LPFC_MAX_DATA_CTRL_LEN 1024
+ static char bucket_data[LPFC_MAX_DATA_CTRL_LEN];
+ unsigned long i;
+ char *str_ptr, *token;
+ struct lpfc_vport **vports;
+ struct Scsi_Host *v_shost;
+ char *bucket_type_str, *base_str, *step_str;
+ unsigned long base, step, bucket_type;
+
+ if (!strncmp(buf, "setbucket", strlen("setbucket"))) {
+ if (strlen(buf) > (LPFC_MAX_DATA_CTRL_LEN - 1))
+ return -EINVAL;
+
+ strncpy(bucket_data, buf, LPFC_MAX_DATA_CTRL_LEN);
+ str_ptr = &bucket_data[0];
+ /* Ignore this token - this is command token */
+ token = strsep(&str_ptr, "\t ");
+ if (!token)
+ return -EINVAL;
+
+ bucket_type_str = strsep(&str_ptr, "\t ");
+ if (!bucket_type_str)
+ return -EINVAL;
+
+ if (!strncmp(bucket_type_str, "linear", strlen("linear")))
+ bucket_type = LPFC_LINEAR_BUCKET;
+ else if (!strncmp(bucket_type_str, "power2", strlen("power2")))
+ bucket_type = LPFC_POWER2_BUCKET;
+ else
+ return -EINVAL;
+
+ base_str = strsep(&str_ptr, "\t ");
+ if (!base_str)
+ return -EINVAL;
+ base = simple_strtoul(base_str, NULL, 0);
+
+ step_str = strsep(&str_ptr, "\t ");
+ if (!step_str)
+ return -EINVAL;
+ step = simple_strtoul(step_str, NULL, 0);
+ if (!step)
+ return -EINVAL;
+
+ /* Block the data collection for every vport */
+ vports = lpfc_create_vport_work_array(phba);
+ if (vports == NULL)
+ return -ENOMEM;
+
+ for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) {
+ v_shost = lpfc_shost_from_vport(vports[i]);
+ spin_lock_irq(v_shost->host_lock);
+ /* Block and reset data collection */
+ vports[i]->stat_data_blocked = 1;
+ if (vports[i]->stat_data_enabled)
+ lpfc_vport_reset_stat_data(vports[i]);
+ spin_unlock_irq(v_shost->host_lock);
+ }
+
+ /* Set the bucket attributes */
+ phba->bucket_type = bucket_type;
+ phba->bucket_base = base;
+ phba->bucket_step = step;
+
+ for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) {
+ v_shost = lpfc_shost_from_vport(vports[i]);
+
+ /* Unblock data collection */
+ spin_lock_irq(v_shost->host_lock);
+ vports[i]->stat_data_blocked = 0;
+ spin_unlock_irq(v_shost->host_lock);
+ }
+ lpfc_destroy_vport_work_array(phba, vports);
+ return strlen(buf);
+ }
+
+ if (!strncmp(buf, "destroybucket", strlen("destroybucket"))) {
+ vports = lpfc_create_vport_work_array(phba);
+ if (vports == NULL)
+ return -ENOMEM;
+
+ for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) {
+ v_shost = lpfc_shost_from_vport(vports[i]);
+ spin_lock_irq(shost->host_lock);
+ vports[i]->stat_data_blocked = 1;
+ lpfc_free_bucket(vport);
+ vport->stat_data_enabled = 0;
+ vports[i]->stat_data_blocked = 0;
+ spin_unlock_irq(shost->host_lock);
+ }
+ lpfc_destroy_vport_work_array(phba, vports);
+ phba->bucket_type = LPFC_NO_BUCKET;
+ phba->bucket_base = 0;
+ phba->bucket_step = 0;
+ return strlen(buf);
+ }
+
+ if (!strncmp(buf, "start", strlen("start"))) {
+ /* If no buckets configured return error */
+ if (phba->bucket_type == LPFC_NO_BUCKET)
+ return -EINVAL;
+ spin_lock_irq(shost->host_lock);
+ if (vport->stat_data_enabled) {
+ spin_unlock_irq(shost->host_lock);
+ return strlen(buf);
+ }
+ lpfc_alloc_bucket(vport);
+ vport->stat_data_enabled = 1;
+ spin_unlock_irq(shost->host_lock);
+ return strlen(buf);
+ }
+
+ if (!strncmp(buf, "stop", strlen("stop"))) {
+ spin_lock_irq(shost->host_lock);
+ if (vport->stat_data_enabled == 0) {
+ spin_unlock_irq(shost->host_lock);
+ return strlen(buf);
+ }
+ lpfc_free_bucket(vport);
+ vport->stat_data_enabled = 0;
+ spin_unlock_irq(shost->host_lock);
+ return strlen(buf);
+ }
+
+ if (!strncmp(buf, "reset", strlen("reset"))) {
+ if ((phba->bucket_type == LPFC_NO_BUCKET)
+ || !vport->stat_data_enabled)
+ return strlen(buf);
+ spin_lock_irq(shost->host_lock);
+ vport->stat_data_blocked = 1;
+ lpfc_vport_reset_stat_data(vport);
+ vport->stat_data_blocked = 0;
+ spin_unlock_irq(shost->host_lock);
+ return strlen(buf);
+ }
+ return -EINVAL;
+}
+
+
+/**
+ * lpfc_stat_data_ctrl_show - Read function for lpfc_stat_data_ctrl sysfs file
+ * @dev: Pointer to class device object.
+ * @buf: Data buffer.
+ *
+ * This function is the read call back function for
+ * lpfc_stat_data_ctrl sysfs file. This function report the
+ * current statistical data collection state.
+ **/
+static ssize_t
+lpfc_stat_data_ctrl_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct Scsi_Host *shost = class_to_shost(dev);
+ struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
+ struct lpfc_hba *phba = vport->phba;
+ int index = 0;
+ int i;
+ char *bucket_type;
+ unsigned long bucket_value;
+
+ switch (phba->bucket_type) {
+ case LPFC_LINEAR_BUCKET:
+ bucket_type = "linear";
+ break;
+ case LPFC_POWER2_BUCKET:
+ bucket_type = "power2";
+ break;
+ default:
+ bucket_type = "No Bucket";
+ break;
+ }
+
+ sprintf(&buf[index], "Statistical Data enabled :%d, "
+ "blocked :%d, Bucket type :%s, Bucket base :%d,"
+ " Bucket step :%d\nLatency Ranges :",
+ vport->stat_data_enabled, vport->stat_data_blocked,
+ bucket_type, phba->bucket_base, phba->bucket_step);
+ index = strlen(buf);
+ if (phba->bucket_type != LPFC_NO_BUCKET) {
+ for (i = 0; i < LPFC_MAX_BUCKET_COUNT; i++) {
+ if (phba->bucket_type == LPFC_LINEAR_BUCKET)
+ bucket_value = phba->bucket_base +
+ phba->bucket_step * i;
+ else
+ bucket_value = phba->bucket_base +
+ (1 << i) * phba->bucket_step;
+
+ if (index + 10 > PAGE_SIZE)
+ break;
+ sprintf(&buf[index], "%08ld ", bucket_value);
+ index = strlen(buf);
+ }
+ }
+ sprintf(&buf[index], "\n");
+ return strlen(buf);
+}
+
+/*
+ * Sysfs attribute to control the statistical data collection.
+ */
+static DEVICE_ATTR(lpfc_stat_data_ctrl, S_IRUGO | S_IWUSR,
+ lpfc_stat_data_ctrl_show, lpfc_stat_data_ctrl_store);
+
+/*
+ * lpfc_drvr_stat_data: sysfs attr to get driver statistical data.
+ */
+
+/*
+ * Each Bucket takes 11 characters and 1 new line + 17 bytes WWN
+ * for each target.
+ */
+#define STAT_DATA_SIZE_PER_TARGET(NUM_BUCKETS) ((NUM_BUCKETS) * 11 + 18)
+#define MAX_STAT_DATA_SIZE_PER_TARGET \
+ STAT_DATA_SIZE_PER_TARGET(LPFC_MAX_BUCKET_COUNT)
+
+
+/**
+ * sysfs_drvr_stat_data_read - Read function for lpfc_drvr_stat_data attribute
+ * @filp: sysfs file
+ * @kobj: Pointer to the kernel object
+ * @bin_attr: Attribute object
+ * @buff: Buffer pointer
+ * @off: File offset
+ * @count: Buffer size
+ *
+ * This function is the read call back function for lpfc_drvr_stat_data
+ * sysfs file. This function export the statistical data to user
+ * applications.
+ **/
+static ssize_t
+sysfs_drvr_stat_data_read(struct file *filp, struct kobject *kobj,
+ struct bin_attribute *bin_attr,
+ char *buf, loff_t off, size_t count)
+{
+ struct device *dev = container_of(kobj, struct device,
+ kobj);
+ struct Scsi_Host *shost = class_to_shost(dev);
+ struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
+ struct lpfc_hba *phba = vport->phba;
+ int i = 0, index = 0;
+ unsigned long nport_index;
+ struct lpfc_nodelist *ndlp = NULL;
+ nport_index = (unsigned long)off /
+ MAX_STAT_DATA_SIZE_PER_TARGET;
+
+ if (!vport->stat_data_enabled || vport->stat_data_blocked
+ || (phba->bucket_type == LPFC_NO_BUCKET))
+ return 0;
+
+ spin_lock_irq(shost->host_lock);
+ list_for_each_entry(ndlp, &vport->fc_nodes, nlp_listp) {
+ if (!NLP_CHK_NODE_ACT(ndlp) || !ndlp->lat_data)
+ continue;
+
+ if (nport_index > 0) {
+ nport_index--;
+ continue;
+ }
+
+ if ((index + MAX_STAT_DATA_SIZE_PER_TARGET)
+ > count)
+ break;
+
+ if (!ndlp->lat_data)
+ continue;
+
+ /* Print the WWN */
+ sprintf(&buf[index], "%02x%02x%02x%02x%02x%02x%02x%02x:",
+ ndlp->nlp_portname.u.wwn[0],
+ ndlp->nlp_portname.u.wwn[1],
+ ndlp->nlp_portname.u.wwn[2],
+ ndlp->nlp_portname.u.wwn[3],
+ ndlp->nlp_portname.u.wwn[4],
+ ndlp->nlp_portname.u.wwn[5],
+ ndlp->nlp_portname.u.wwn[6],
+ ndlp->nlp_portname.u.wwn[7]);
+
+ index = strlen(buf);
+
+ for (i = 0; i < LPFC_MAX_BUCKET_COUNT; i++) {
+ sprintf(&buf[index], "%010u,",
+ ndlp->lat_data[i].cmd_count);
+ index = strlen(buf);
+ }
+ sprintf(&buf[index], "\n");
+ index = strlen(buf);
+ }
+ spin_unlock_irq(shost->host_lock);
+ return index;
+}
+
+static struct bin_attribute sysfs_drvr_stat_data_attr = {
+ .attr = {
+ .name = "lpfc_drvr_stat_data",
+ .mode = S_IRUSR,
+ },
+ .size = LPFC_MAX_TARGET * MAX_STAT_DATA_SIZE_PER_TARGET,
+ .read = sysfs_drvr_stat_data_read,
+ .write = NULL,
+};
+
+/*
+# lpfc_link_speed: Link speed selection for initializing the Fibre Channel
+# connection.
+# Value range is [0,16]. Default value is 0.
+*/
+/**
+ * lpfc_link_speed_set - Set the adapters link speed
+ * @phba: lpfc_hba pointer.
+ * @val: link speed value.
+ *
+ * Description:
+ * If val is in a valid range then set the adapter's link speed field and
+ * issue a lip; if the lip fails reset the link speed to the old value.
+ *
+ * Notes:
+ * If the value is not in range log a kernel error message and return an error.
+ *
+ * Returns:
+ * zero if val is in range and lip okay.
+ * non-zero return value from lpfc_issue_lip()
+ * -EINVAL val out of range
+ **/
+static ssize_t
+lpfc_link_speed_store(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct Scsi_Host *shost = class_to_shost(dev);
+ struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
+ struct lpfc_hba *phba = vport->phba;
+ int val = LPFC_USER_LINK_SPEED_AUTO;
+ int nolip = 0;
+ const char *val_buf = buf;
+ int err;
+ uint32_t prev_val;
+
+ if (!strncmp(buf, "nolip ", strlen("nolip "))) {
+ nolip = 1;
+ val_buf = &buf[strlen("nolip ")];
+ }
+
+ if (!isdigit(val_buf[0]))
+ return -EINVAL;
+ if (sscanf(val_buf, "%i", &val) != 1)
+ return -EINVAL;
+
+ lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT,
+ "3055 lpfc_link_speed changed from %d to %d %s\n",
+ phba->cfg_link_speed, val, nolip ? "(nolip)" : "(lip)");
+
+ if (((val == LPFC_USER_LINK_SPEED_1G) && !(phba->lmt & LMT_1Gb)) ||
+ ((val == LPFC_USER_LINK_SPEED_2G) && !(phba->lmt & LMT_2Gb)) ||
+ ((val == LPFC_USER_LINK_SPEED_4G) && !(phba->lmt & LMT_4Gb)) ||
+ ((val == LPFC_USER_LINK_SPEED_8G) && !(phba->lmt & LMT_8Gb)) ||
+ ((val == LPFC_USER_LINK_SPEED_10G) && !(phba->lmt & LMT_10Gb)) ||
+ ((val == LPFC_USER_LINK_SPEED_16G) && !(phba->lmt & LMT_16Gb))) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+ "2879 lpfc_link_speed attribute cannot be set "
+ "to %d. Speed is not supported by this port.\n",
+ val);
+ return -EINVAL;
+ }
+ if (val == LPFC_USER_LINK_SPEED_16G &&
+ phba->fc_topology == LPFC_TOPOLOGY_LOOP) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+ "3112 lpfc_link_speed attribute cannot be set "
+ "to %d. Speed is not supported in loop mode.\n",
+ val);
+ return -EINVAL;
+ }
+ if ((val >= 0) && (val <= LPFC_USER_LINK_SPEED_MAX) &&
+ (LPFC_USER_LINK_SPEED_BITMAP & (1 << val))) {
+ prev_val = phba->cfg_link_speed;
+ phba->cfg_link_speed = val;
+ if (nolip)
+ return strlen(buf);
+
+ err = lpfc_issue_lip(lpfc_shost_from_vport(phba->pport));
+ if (err) {
+ phba->cfg_link_speed = prev_val;
+ return -EINVAL;
+ } else
+ return strlen(buf);
+ }
+ lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+ "0469 lpfc_link_speed attribute cannot be set to %d, "
+ "allowed values are ["LPFC_LINK_SPEED_STRING"]\n", val);
+ return -EINVAL;
+}
+
+static int lpfc_link_speed = 0;
+module_param(lpfc_link_speed, int, S_IRUGO);
+MODULE_PARM_DESC(lpfc_link_speed, "Select link speed");
+lpfc_param_show(link_speed)
+
+/**
+ * lpfc_link_speed_init - Set the adapters link speed
+ * @phba: lpfc_hba pointer.
+ * @val: link speed value.
+ *
+ * Description:
+ * If val is in a valid range then set the adapter's link speed field.
+ *
+ * Notes:
+ * If the value is not in range log a kernel error message, clear the link
+ * speed and return an error.
+ *
+ * Returns:
+ * zero if val saved.
+ * -EINVAL val out of range
+ **/
+static int
+lpfc_link_speed_init(struct lpfc_hba *phba, int val)
+{
+ if (val == LPFC_USER_LINK_SPEED_16G && phba->cfg_topology == 4) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+ "3111 lpfc_link_speed of %d cannot "
+ "support loop mode, setting topology to default.\n",
+ val);
+ phba->cfg_topology = 0;
+ }
+ if ((val >= 0) && (val <= LPFC_USER_LINK_SPEED_MAX) &&
+ (LPFC_USER_LINK_SPEED_BITMAP & (1 << val))) {
+ phba->cfg_link_speed = val;
+ return 0;
+ }
+ lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+ "0405 lpfc_link_speed attribute cannot "
+ "be set to %d, allowed values are "
+ "["LPFC_LINK_SPEED_STRING"]\n", val);
+ phba->cfg_link_speed = LPFC_USER_LINK_SPEED_AUTO;
+ return -EINVAL;
+}
+
+static DEVICE_ATTR(lpfc_link_speed, S_IRUGO | S_IWUSR,
+ lpfc_link_speed_show, lpfc_link_speed_store);
+
+/*
+# lpfc_aer_support: Support PCIe device Advanced Error Reporting (AER)
+# 0 = aer disabled or not supported
+# 1 = aer supported and enabled (default)
+# Value range is [0,1]. Default value is 1.
+*/
+
+/**
+ * lpfc_aer_support_store - Set the adapter for aer support
+ *
+ * @dev: class device that is converted into a Scsi_host.
+ * @attr: device attribute, not used.
+ * @buf: containing enable or disable aer flag.
+ * @count: unused variable.
+ *
+ * Description:
+ * If the val is 1 and currently the device's AER capability was not
+ * enabled, invoke the kernel's enable AER helper routine, trying to
+ * enable the device's AER capability. If the helper routine enabling
+ * AER returns success, update the device's cfg_aer_support flag to
+ * indicate AER is supported by the device; otherwise, if the device
+ * AER capability is already enabled to support AER, then do nothing.
+ *
+ * If the val is 0 and currently the device's AER support was enabled,
+ * invoke the kernel's disable AER helper routine. After that, update
+ * the device's cfg_aer_support flag to indicate AER is not supported
+ * by the device; otherwise, if the device AER capability is already
+ * disabled from supporting AER, then do nothing.
+ *
+ * Returns:
+ * length of the buf on success if val is in range the intended mode
+ * is supported.
+ * -EINVAL if val out of range or intended mode is not supported.
+ **/
+static ssize_t
+lpfc_aer_support_store(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct Scsi_Host *shost = class_to_shost(dev);
+ struct lpfc_vport *vport = (struct lpfc_vport *)shost->hostdata;
+ struct lpfc_hba *phba = vport->phba;
+ int val = 0, rc = -EINVAL;
+
+ if (!isdigit(buf[0]))
+ return -EINVAL;
+ if (sscanf(buf, "%i", &val) != 1)
+ return -EINVAL;
+
+ switch (val) {
+ case 0:
+ if (phba->hba_flag & HBA_AER_ENABLED) {
+ rc = pci_disable_pcie_error_reporting(phba->pcidev);
+ if (!rc) {
+ spin_lock_irq(&phba->hbalock);
+ phba->hba_flag &= ~HBA_AER_ENABLED;
+ spin_unlock_irq(&phba->hbalock);
+ phba->cfg_aer_support = 0;
+ rc = strlen(buf);
+ } else
+ rc = -EPERM;
+ } else {
+ phba->cfg_aer_support = 0;
+ rc = strlen(buf);
+ }
+ break;
+ case 1:
+ if (!(phba->hba_flag & HBA_AER_ENABLED)) {
+ rc = pci_enable_pcie_error_reporting(phba->pcidev);
+ if (!rc) {
+ spin_lock_irq(&phba->hbalock);
+ phba->hba_flag |= HBA_AER_ENABLED;
+ spin_unlock_irq(&phba->hbalock);
+ phba->cfg_aer_support = 1;
+ rc = strlen(buf);
+ } else
+ rc = -EPERM;
+ } else {
+ phba->cfg_aer_support = 1;
+ rc = strlen(buf);
+ }
+ break;
+ default:
+ rc = -EINVAL;
+ break;
+ }
+ return rc;
+}
+
+static int lpfc_aer_support = 1;
+module_param(lpfc_aer_support, int, S_IRUGO);
+MODULE_PARM_DESC(lpfc_aer_support, "Enable PCIe device AER support");
+lpfc_param_show(aer_support)
+
+/**
+ * lpfc_aer_support_init - Set the initial adapters aer support flag
+ * @phba: lpfc_hba pointer.
+ * @val: enable aer or disable aer flag.
+ *
+ * Description:
+ * If val is in a valid range [0,1], then set the adapter's initial
+ * cfg_aer_support field. It will be up to the driver's probe_one
+ * routine to determine whether the device's AER support can be set
+ * or not.
+ *
+ * Notes:
+ * If the value is not in range log a kernel error message, and
+ * choose the default value of setting AER support and return.
+ *
+ * Returns:
+ * zero if val saved.
+ * -EINVAL val out of range
+ **/
+static int
+lpfc_aer_support_init(struct lpfc_hba *phba, int val)
+{
+ if (val == 0 || val == 1) {
+ phba->cfg_aer_support = val;
+ return 0;
+ }
+ lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+ "2712 lpfc_aer_support attribute value %d out "
+ "of range, allowed values are 0|1, setting it "
+ "to default value of 1\n", val);
+ /* By default, try to enable AER on a device */
+ phba->cfg_aer_support = 1;
+ return -EINVAL;
+}
+
+static DEVICE_ATTR(lpfc_aer_support, S_IRUGO | S_IWUSR,
+ lpfc_aer_support_show, lpfc_aer_support_store);
+
+/**
+ * lpfc_aer_cleanup_state - Clean up aer state to the aer enabled device
+ * @dev: class device that is converted into a Scsi_host.
+ * @attr: device attribute, not used.
+ * @buf: containing flag 1 for aer cleanup state.
+ * @count: unused variable.
+ *
+ * Description:
+ * If the @buf contains 1 and the device currently has the AER support
+ * enabled, then invokes the kernel AER helper routine
+ * pci_cleanup_aer_uncorrect_error_status to clean up the uncorrectable
+ * error status register.
+ *
+ * Notes:
+ *
+ * Returns:
+ * -EINVAL if the buf does not contain the 1 or the device is not currently
+ * enabled with the AER support.
+ **/
+static ssize_t
+lpfc_aer_cleanup_state(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct Scsi_Host *shost = class_to_shost(dev);
+ struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
+ struct lpfc_hba *phba = vport->phba;
+ int val, rc = -1;
+
+ if (!isdigit(buf[0]))
+ return -EINVAL;
+ if (sscanf(buf, "%i", &val) != 1)
+ return -EINVAL;
+ if (val != 1)
+ return -EINVAL;
+
+ if (phba->hba_flag & HBA_AER_ENABLED)
+ rc = pci_cleanup_aer_uncorrect_error_status(phba->pcidev);
+
+ if (rc == 0)
+ return strlen(buf);
+ else
+ return -EPERM;
+}
+
+static DEVICE_ATTR(lpfc_aer_state_cleanup, S_IWUSR, NULL,
+ lpfc_aer_cleanup_state);
+
+/**
+ * lpfc_sriov_nr_virtfn_store - Enable the adapter for sr-iov virtual functions
+ *
+ * @dev: class device that is converted into a Scsi_host.
+ * @attr: device attribute, not used.
+ * @buf: containing the string the number of vfs to be enabled.
+ * @count: unused variable.
+ *
+ * Description:
+ * When this api is called either through user sysfs, the driver shall
+ * try to enable or disable SR-IOV virtual functions according to the
+ * following:
+ *
+ * If zero virtual function has been enabled to the physical function,
+ * the driver shall invoke the pci enable virtual function api trying
+ * to enable the virtual functions. If the nr_vfn provided is greater
+ * than the maximum supported, the maximum virtual function number will
+ * be used for invoking the api; otherwise, the nr_vfn provided shall
+ * be used for invoking the api. If the api call returned success, the
+ * actual number of virtual functions enabled will be set to the driver
+ * cfg_sriov_nr_virtfn; otherwise, -EINVAL shall be returned and driver
+ * cfg_sriov_nr_virtfn remains zero.
+ *
+ * If none-zero virtual functions have already been enabled to the
+ * physical function, as reflected by the driver's cfg_sriov_nr_virtfn,
+ * -EINVAL will be returned and the driver does nothing;
+ *
+ * If the nr_vfn provided is zero and none-zero virtual functions have
+ * been enabled, as indicated by the driver's cfg_sriov_nr_virtfn, the
+ * disabling virtual function api shall be invoded to disable all the
+ * virtual functions and driver's cfg_sriov_nr_virtfn shall be set to
+ * zero. Otherwise, if zero virtual function has been enabled, do
+ * nothing.
+ *
+ * Returns:
+ * length of the buf on success if val is in range the intended mode
+ * is supported.
+ * -EINVAL if val out of range or intended mode is not supported.
+ **/
+static ssize_t
+lpfc_sriov_nr_virtfn_store(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct Scsi_Host *shost = class_to_shost(dev);
+ struct lpfc_vport *vport = (struct lpfc_vport *)shost->hostdata;
+ struct lpfc_hba *phba = vport->phba;
+ struct pci_dev *pdev = phba->pcidev;
+ int val = 0, rc = -EINVAL;
+
+ /* Sanity check on user data */
+ if (!isdigit(buf[0]))
+ return -EINVAL;
+ if (sscanf(buf, "%i", &val) != 1)
+ return -EINVAL;
+ if (val < 0)
+ return -EINVAL;
+
+ /* Request disabling virtual functions */
+ if (val == 0) {
+ if (phba->cfg_sriov_nr_virtfn > 0) {
+ pci_disable_sriov(pdev);
+ phba->cfg_sriov_nr_virtfn = 0;
+ }
+ return strlen(buf);
+ }
+
+ /* Request enabling virtual functions */
+ if (phba->cfg_sriov_nr_virtfn > 0) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+ "3018 There are %d virtual functions "
+ "enabled on physical function.\n",
+ phba->cfg_sriov_nr_virtfn);
+ return -EEXIST;
+ }
+
+ if (val <= LPFC_MAX_VFN_PER_PFN)
+ phba->cfg_sriov_nr_virtfn = val;
+ else {
+ lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+ "3019 Enabling %d virtual functions is not "
+ "allowed.\n", val);
+ return -EINVAL;
+ }
+
+ rc = lpfc_sli_probe_sriov_nr_virtfn(phba, phba->cfg_sriov_nr_virtfn);
+ if (rc) {
+ phba->cfg_sriov_nr_virtfn = 0;
+ rc = -EPERM;
+ } else
+ rc = strlen(buf);
+
+ return rc;
+}
+
+static int lpfc_sriov_nr_virtfn = LPFC_DEF_VFN_PER_PFN;
+module_param(lpfc_sriov_nr_virtfn, int, S_IRUGO|S_IWUSR);
+MODULE_PARM_DESC(lpfc_sriov_nr_virtfn, "Enable PCIe device SR-IOV virtual fn");
+lpfc_param_show(sriov_nr_virtfn)
+
+/**
+ * lpfc_sriov_nr_virtfn_init - Set the initial sr-iov virtual function enable
+ * @phba: lpfc_hba pointer.
+ * @val: link speed value.
+ *
+ * Description:
+ * If val is in a valid range [0,255], then set the adapter's initial
+ * cfg_sriov_nr_virtfn field. If it's greater than the maximum, the maximum
+ * number shall be used instead. It will be up to the driver's probe_one
+ * routine to determine whether the device's SR-IOV is supported or not.
+ *
+ * Returns:
+ * zero if val saved.
+ * -EINVAL val out of range
+ **/
+static int
+lpfc_sriov_nr_virtfn_init(struct lpfc_hba *phba, int val)
+{
+ if (val >= 0 && val <= LPFC_MAX_VFN_PER_PFN) {
+ phba->cfg_sriov_nr_virtfn = val;
+ return 0;
+ }
+
+ lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+ "3017 Enabling %d virtual functions is not "
+ "allowed.\n", val);
+ return -EINVAL;
+}
+static DEVICE_ATTR(lpfc_sriov_nr_virtfn, S_IRUGO | S_IWUSR,
+ lpfc_sriov_nr_virtfn_show, lpfc_sriov_nr_virtfn_store);
+
+/**
+ * lpfc_request_firmware_store - Request for Linux generic firmware upgrade
+ *
+ * @dev: class device that is converted into a Scsi_host.
+ * @attr: device attribute, not used.
+ * @buf: containing the string the number of vfs to be enabled.
+ * @count: unused variable.
+ *
+ * Description:
+ *
+ * Returns:
+ * length of the buf on success if val is in range the intended mode
+ * is supported.
+ * -EINVAL if val out of range or intended mode is not supported.
+ **/
+static ssize_t
+lpfc_request_firmware_upgrade_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct Scsi_Host *shost = class_to_shost(dev);
+ struct lpfc_vport *vport = (struct lpfc_vport *)shost->hostdata;
+ struct lpfc_hba *phba = vport->phba;
+ int val = 0, rc = -EINVAL;
+
+ /* Sanity check on user data */
+ if (!isdigit(buf[0]))
+ return -EINVAL;
+ if (sscanf(buf, "%i", &val) != 1)
+ return -EINVAL;
+ if (val != 1)
+ return -EINVAL;
+
+ rc = lpfc_sli4_request_firmware_update(phba, RUN_FW_UPGRADE);
+ if (rc)
+ rc = -EPERM;
+ else
+ rc = strlen(buf);
+ return rc;
+}
+
+static int lpfc_req_fw_upgrade;
+module_param(lpfc_req_fw_upgrade, int, S_IRUGO|S_IWUSR);
+MODULE_PARM_DESC(lpfc_req_fw_upgrade, "Enable Linux generic firmware upgrade");
+lpfc_param_show(request_firmware_upgrade)
+
+/**
+ * lpfc_request_firmware_upgrade_init - Enable initial linux generic fw upgrade
+ * @phba: lpfc_hba pointer.
+ * @val: 0 or 1.
+ *
+ * Description:
+ * Set the initial Linux generic firmware upgrade enable or disable flag.
+ *
+ * Returns:
+ * zero if val saved.
+ * -EINVAL val out of range
+ **/
+static int
+lpfc_request_firmware_upgrade_init(struct lpfc_hba *phba, int val)
+{
+ if (val >= 0 && val <= 1) {
+ phba->cfg_request_firmware_upgrade = val;
+ return 0;
+ }
+ return -EINVAL;
+}
+static DEVICE_ATTR(lpfc_req_fw_upgrade, S_IRUGO | S_IWUSR,
+ lpfc_request_firmware_upgrade_show,
+ lpfc_request_firmware_upgrade_store);
+
+/**
+ * lpfc_fcp_imax_store
+ *
+ * @dev: class device that is converted into a Scsi_host.
+ * @attr: device attribute, not used.
+ * @buf: string with the number of fast-path FCP interrupts per second.
+ * @count: unused variable.
+ *
+ * Description:
+ * If val is in a valid range [636,651042], then set the adapter's
+ * maximum number of fast-path FCP interrupts per second.
+ *
+ * Returns:
+ * length of the buf on success if val is in range the intended mode
+ * is supported.
+ * -EINVAL if val out of range or intended mode is not supported.
+ **/
+static ssize_t
+lpfc_fcp_imax_store(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct Scsi_Host *shost = class_to_shost(dev);
+ struct lpfc_vport *vport = (struct lpfc_vport *)shost->hostdata;
+ struct lpfc_hba *phba = vport->phba;
+ int val = 0, i;
+
+ /* fcp_imax is only valid for SLI4 */
+ if (phba->sli_rev != LPFC_SLI_REV4)
+ return -EINVAL;
+
+ /* Sanity check on user data */
+ if (!isdigit(buf[0]))
+ return -EINVAL;
+ if (sscanf(buf, "%i", &val) != 1)
+ return -EINVAL;
+
+ /*
+ * Value range for the HBA is [5000,5000000]
+ * The value for each EQ depends on how many EQs are configured.
+ */
+ if (val < LPFC_MIN_IMAX || val > LPFC_MAX_IMAX)
+ return -EINVAL;
+
+ phba->cfg_fcp_imax = (uint32_t)val;
+ for (i = 0; i < phba->cfg_fcp_io_channel; i += LPFC_MAX_EQ_DELAY)
+ lpfc_modify_fcp_eq_delay(phba, i);
+
+ return strlen(buf);
+}
+
+/*
+# lpfc_fcp_imax: The maximum number of fast-path FCP interrupts per second
+# for the HBA.
+#
+# Value range is [5,000 to 5,000,000]. Default value is 50,000.
+*/
+static int lpfc_fcp_imax = LPFC_DEF_IMAX;
+module_param(lpfc_fcp_imax, int, S_IRUGO|S_IWUSR);
+MODULE_PARM_DESC(lpfc_fcp_imax,
+ "Set the maximum number of FCP interrupts per second per HBA");
+lpfc_param_show(fcp_imax)
+
+/**
+ * lpfc_fcp_imax_init - Set the initial sr-iov virtual function enable
+ * @phba: lpfc_hba pointer.
+ * @val: link speed value.
+ *
+ * Description:
+ * If val is in a valid range [636,651042], then initialize the adapter's
+ * maximum number of fast-path FCP interrupts per second.
+ *
+ * Returns:
+ * zero if val saved.
+ * -EINVAL val out of range
+ **/
+static int
+lpfc_fcp_imax_init(struct lpfc_hba *phba, int val)
+{
+ if (phba->sli_rev != LPFC_SLI_REV4) {
+ phba->cfg_fcp_imax = 0;
+ return 0;
+ }
+
+ if (val >= LPFC_MIN_IMAX && val <= LPFC_MAX_IMAX) {
+ phba->cfg_fcp_imax = val;
+ return 0;
+ }
+
+ lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+ "3016 fcp_imax: %d out of range, using default\n", val);
+ phba->cfg_fcp_imax = LPFC_DEF_IMAX;
+
+ return 0;
+}
+
+static DEVICE_ATTR(lpfc_fcp_imax, S_IRUGO | S_IWUSR,
+ lpfc_fcp_imax_show, lpfc_fcp_imax_store);
+
+/**
+ * lpfc_state_show - Display current driver CPU affinity
+ * @dev: class converted to a Scsi_host structure.
+ * @attr: device attribute, not used.
+ * @buf: on return contains text describing the state of the link.
+ *
+ * Returns: size of formatted string.
+ **/
+static ssize_t
+lpfc_fcp_cpu_map_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct Scsi_Host *shost = class_to_shost(dev);
+ struct lpfc_vport *vport = (struct lpfc_vport *)shost->hostdata;
+ struct lpfc_hba *phba = vport->phba;
+ struct lpfc_vector_map_info *cpup;
+ int len = 0;
+
+ if ((phba->sli_rev != LPFC_SLI_REV4) ||
+ (phba->intr_type != MSIX))
+ return len;
+
+ switch (phba->cfg_fcp_cpu_map) {
+ case 0:
+ len += snprintf(buf + len, PAGE_SIZE-len,
+ "fcp_cpu_map: No mapping (%d)\n",
+ phba->cfg_fcp_cpu_map);
+ return len;
+ case 1:
+ len += snprintf(buf + len, PAGE_SIZE-len,
+ "fcp_cpu_map: HBA centric mapping (%d): "
+ "%d online CPUs\n",
+ phba->cfg_fcp_cpu_map,
+ phba->sli4_hba.num_online_cpu);
+ break;
+ case 2:
+ len += snprintf(buf + len, PAGE_SIZE-len,
+ "fcp_cpu_map: Driver centric mapping (%d): "
+ "%d online CPUs\n",
+ phba->cfg_fcp_cpu_map,
+ phba->sli4_hba.num_online_cpu);
+ break;
+ }
+
+ while (phba->sli4_hba.curr_disp_cpu < phba->sli4_hba.num_present_cpu) {
+ cpup = &phba->sli4_hba.cpu_map[phba->sli4_hba.curr_disp_cpu];
+
+ /* margin should fit in this and the truncated message */
+ if (cpup->irq == LPFC_VECTOR_MAP_EMPTY)
+ len += snprintf(buf + len, PAGE_SIZE-len,
+ "CPU %02d io_chan %02d "
+ "physid %d coreid %d\n",
+ phba->sli4_hba.curr_disp_cpu,
+ cpup->channel_id, cpup->phys_id,
+ cpup->core_id);
+ else
+ len += snprintf(buf + len, PAGE_SIZE-len,
+ "CPU %02d io_chan %02d "
+ "physid %d coreid %d IRQ %d\n",
+ phba->sli4_hba.curr_disp_cpu,
+ cpup->channel_id, cpup->phys_id,
+ cpup->core_id, cpup->irq);
+
+ phba->sli4_hba.curr_disp_cpu++;
+
+ /* display max number of CPUs keeping some margin */
+ if (phba->sli4_hba.curr_disp_cpu <
+ phba->sli4_hba.num_present_cpu &&
+ (len >= (PAGE_SIZE - 64))) {
+ len += snprintf(buf + len, PAGE_SIZE-len, "more...\n");
+ break;
+ }
+ }
+
+ if (phba->sli4_hba.curr_disp_cpu == phba->sli4_hba.num_present_cpu)
+ phba->sli4_hba.curr_disp_cpu = 0;
+
+ return len;
+}
+
+/**
+ * lpfc_fcp_cpu_map_store - Change CPU affinity of driver vectors
+ * @dev: class device that is converted into a Scsi_host.
+ * @attr: device attribute, not used.
+ * @buf: one or more lpfc_polling_flags values.
+ * @count: not used.
+ *
+ * Returns:
+ * -EINVAL - Not implemented yet.
+ **/
+static ssize_t
+lpfc_fcp_cpu_map_store(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ int status = -EINVAL;
+ return status;
+}
+
+/*
+# lpfc_fcp_cpu_map: Defines how to map CPUs to IRQ vectors
+# for the HBA.
+#
+# Value range is [0 to 2]. Default value is LPFC_DRIVER_CPU_MAP (2).
+# 0 - Do not affinitze IRQ vectors
+# 1 - Affintize HBA vectors with respect to each HBA
+# (start with CPU0 for each HBA)
+# 2 - Affintize HBA vectors with respect to the entire driver
+# (round robin thru all CPUs across all HBAs)
+*/
+static int lpfc_fcp_cpu_map = LPFC_DRIVER_CPU_MAP;
+module_param(lpfc_fcp_cpu_map, int, S_IRUGO|S_IWUSR);
+MODULE_PARM_DESC(lpfc_fcp_cpu_map,
+ "Defines how to map CPUs to IRQ vectors per HBA");
+
+/**
+ * lpfc_fcp_cpu_map_init - Set the initial sr-iov virtual function enable
+ * @phba: lpfc_hba pointer.
+ * @val: link speed value.
+ *
+ * Description:
+ * If val is in a valid range [0-2], then affinitze the adapter's
+ * MSIX vectors.
+ *
+ * Returns:
+ * zero if val saved.
+ * -EINVAL val out of range
+ **/
+static int
+lpfc_fcp_cpu_map_init(struct lpfc_hba *phba, int val)
+{
+ if (phba->sli_rev != LPFC_SLI_REV4) {
+ phba->cfg_fcp_cpu_map = 0;
+ return 0;
+ }
+
+ if (val >= LPFC_MIN_CPU_MAP && val <= LPFC_MAX_CPU_MAP) {
+ phba->cfg_fcp_cpu_map = val;
+ return 0;
+ }
+
+ lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+ "3326 fcp_cpu_map: %d out of range, using default\n",
+ val);
+ phba->cfg_fcp_cpu_map = LPFC_DRIVER_CPU_MAP;
+
+ return 0;
+}
+
+static DEVICE_ATTR(lpfc_fcp_cpu_map, S_IRUGO | S_IWUSR,
+ lpfc_fcp_cpu_map_show, lpfc_fcp_cpu_map_store);
+
+/*
+# lpfc_fcp_class: Determines FC class to use for the FCP protocol.
+# Value range is [2,3]. Default value is 3.
+*/
+LPFC_VPORT_ATTR_R(fcp_class, 3, 2, 3,
+ "Select Fibre Channel class of service for FCP sequences");
+
+/*
+# lpfc_use_adisc: Use ADISC for FCP rediscovery instead of PLOGI. Value range
+# is [0,1]. Default value is 0.
+*/
+LPFC_VPORT_ATTR_RW(use_adisc, 0, 0, 1,
+ "Use ADISC on rediscovery to authenticate FCP devices");
+
+/*
+# lpfc_first_burst_size: First burst size to use on the NPorts
+# that support first burst.
+# Value range is [0,65536]. Default value is 0.
+*/
+LPFC_VPORT_ATTR_RW(first_burst_size, 0, 0, 65536,
+ "First burst size for Targets that support first burst");
+
+/*
+# lpfc_max_scsicmpl_time: Use scsi command completion time to control I/O queue
+# depth. Default value is 0. When the value of this parameter is zero the
+# SCSI command completion time is not used for controlling I/O queue depth. When
+# the parameter is set to a non-zero value, the I/O queue depth is controlled
+# to limit the I/O completion time to the parameter value.
+# The value is set in milliseconds.
+*/
+static int lpfc_max_scsicmpl_time;
+module_param(lpfc_max_scsicmpl_time, int, S_IRUGO);
+MODULE_PARM_DESC(lpfc_max_scsicmpl_time,
+ "Use command completion time to control queue depth");
+lpfc_vport_param_show(max_scsicmpl_time);
+lpfc_vport_param_init(max_scsicmpl_time, 0, 0, 60000);
+static int
+lpfc_max_scsicmpl_time_set(struct lpfc_vport *vport, int val)
+{
+ struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
+ struct lpfc_nodelist *ndlp, *next_ndlp;
+
+ if (val == vport->cfg_max_scsicmpl_time)
+ return 0;
+ if ((val < 0) || (val > 60000))
+ return -EINVAL;
+ vport->cfg_max_scsicmpl_time = val;
+
+ spin_lock_irq(shost->host_lock);
+ list_for_each_entry_safe(ndlp, next_ndlp, &vport->fc_nodes, nlp_listp) {
+ if (!NLP_CHK_NODE_ACT(ndlp))
+ continue;
+ if (ndlp->nlp_state == NLP_STE_UNUSED_NODE)
+ continue;
+ ndlp->cmd_qdepth = vport->cfg_tgt_queue_depth;
+ }
+ spin_unlock_irq(shost->host_lock);
+ return 0;
+}
+lpfc_vport_param_store(max_scsicmpl_time);
+static DEVICE_ATTR(lpfc_max_scsicmpl_time, S_IRUGO | S_IWUSR,
+ lpfc_max_scsicmpl_time_show,
+ lpfc_max_scsicmpl_time_store);
+
+/*
+# lpfc_ack0: Use ACK0, instead of ACK1 for class 2 acknowledgement. Value
+# range is [0,1]. Default value is 0.
+*/
+LPFC_ATTR_R(ack0, 0, 0, 1, "Enable ACK0 support");
+
+/*
+# lpfc_fcp_io_sched: Determine scheduling algrithmn for issuing FCP cmds
+# range is [0,1]. Default value is 0.
+# For [0], FCP commands are issued to Work Queues ina round robin fashion.
+# For [1], FCP commands are issued to a Work Queue associated with the
+# current CPU.
+# It would be set to 1 by the driver if it's able to set up cpu affinity
+# for FCP I/Os through Work Queue associated with the current CPU. Otherwise,
+# roundrobin scheduling of FCP I/Os through WQs will be used.
+*/
+LPFC_ATTR_RW(fcp_io_sched, 0, 0, 1, "Determine scheduling algorithm for "
+ "issuing commands [0] - Round Robin, [1] - Current CPU");
+
+/*
+# lpfc_fcp2_no_tgt_reset: Determine bus reset behavior
+# range is [0,1]. Default value is 0.
+# For [0], bus reset issues target reset to ALL devices
+# For [1], bus reset issues target reset to non-FCP2 devices
+*/
+LPFC_ATTR_RW(fcp2_no_tgt_reset, 0, 0, 1, "Determine bus reset behavior for "
+ "FCP2 devices [0] - issue tgt reset, [1] - no tgt reset");
+
+
+/*
+# lpfc_cr_delay & lpfc_cr_count: Default values for I/O colaesing
+# cr_delay (msec) or cr_count outstanding commands. cr_delay can take
+# value [0,63]. cr_count can take value [1,255]. Default value of cr_delay
+# is 0. Default value of cr_count is 1. The cr_count feature is disabled if
+# cr_delay is set to 0.
+*/
+LPFC_ATTR_RW(cr_delay, 0, 0, 63, "A count of milliseconds after which an "
+ "interrupt response is generated");
+
+LPFC_ATTR_RW(cr_count, 1, 1, 255, "A count of I/O completions after which an "
+ "interrupt response is generated");
+
+/*
+# lpfc_multi_ring_support: Determines how many rings to spread available
+# cmd/rsp IOCB entries across.
+# Value range is [1,2]. Default value is 1.
+*/
+LPFC_ATTR_R(multi_ring_support, 1, 1, 2, "Determines number of primary "
+ "SLI rings to spread IOCB entries across");
+
+/*
+# lpfc_multi_ring_rctl: If lpfc_multi_ring_support is enabled, this
+# identifies what rctl value to configure the additional ring for.
+# Value range is [1,0xff]. Default value is 4 (Unsolicated Data).
+*/
+LPFC_ATTR_R(multi_ring_rctl, FC_RCTL_DD_UNSOL_DATA, 1,
+ 255, "Identifies RCTL for additional ring configuration");
+
+/*
+# lpfc_multi_ring_type: If lpfc_multi_ring_support is enabled, this
+# identifies what type value to configure the additional ring for.
+# Value range is [1,0xff]. Default value is 5 (LLC/SNAP).
+*/
+LPFC_ATTR_R(multi_ring_type, FC_TYPE_IP, 1,
+ 255, "Identifies TYPE for additional ring configuration");
+
+/*
+# lpfc_fdmi_on: controls FDMI support.
+# Set NOT Set
+# bit 0 = FDMI support no FDMI support
+# LPFC_FDMI_SUPPORT just turns basic support on/off
+# bit 1 = Register delay no register delay (60 seconds)
+# LPFC_FDMI_REG_DELAY 60 sec registration delay after FDMI login
+# bit 2 = All attributes Use a attribute subset
+# LPFC_FDMI_ALL_ATTRIB applies to both port and HBA attributes
+# Port attrutes subset: 1 thru 6 OR all: 1 thru 0xd 0x101 0x102 0x103
+# HBA attributes subset: 1 thru 0xb OR all: 1 thru 0xc
+# Value range [0,7]. Default value is 0.
+*/
+LPFC_VPORT_ATTR_RW(fdmi_on, 0, 0, 7, "Enable FDMI support");
+
+/*
+# Specifies the maximum number of ELS cmds we can have outstanding (for
+# discovery). Value range is [1,64]. Default value = 32.
+*/
+LPFC_VPORT_ATTR(discovery_threads, 32, 1, 64, "Maximum number of ELS commands "
+ "during discovery");
+
+/*
+# lpfc_max_luns: maximum allowed LUN ID. This is the highest LUN ID that
+# will be scanned by the SCSI midlayer when sequential scanning is
+# used; and is also the highest LUN ID allowed when the SCSI midlayer
+# parses REPORT_LUN responses. The lpfc driver has no LUN count or
+# LUN ID limit, but the SCSI midlayer requires this field for the uses
+# above. The lpfc driver limits the default value to 255 for two reasons.
+# As it bounds the sequential scan loop, scanning for thousands of luns
+# on a target can take minutes of wall clock time. Additionally,
+# there are FC targets, such as JBODs, that only recognize 8-bits of
+# LUN ID. When they receive a value greater than 8 bits, they chop off
+# the high order bits. In other words, they see LUN IDs 0, 256, 512,
+# and so on all as LUN ID 0. This causes the linux kernel, which sees
+# valid responses at each of the LUN IDs, to believe there are multiple
+# devices present, when in fact, there is only 1.
+# A customer that is aware of their target behaviors, and the results as
+# indicated above, is welcome to increase the lpfc_max_luns value.
+# As mentioned, this value is not used by the lpfc driver, only the
+# SCSI midlayer.
+# Value range is [0,65535]. Default value is 255.
+# NOTE: The SCSI layer might probe all allowed LUN on some old targets.
+*/
+LPFC_VPORT_ULL_ATTR_R(max_luns, 255, 0, 65535, "Maximum allowed LUN ID");
+
+/*
+# lpfc_poll_tmo: .Milliseconds driver will wait between polling FCP ring.
+# Value range is [1,255], default value is 10.
+*/
+LPFC_ATTR_RW(poll_tmo, 10, 1, 255,
+ "Milliseconds driver will wait between polling FCP ring");
+
+/*
+# lpfc_task_mgmt_tmo: Maximum time to wait for task management commands
+# to complete in seconds. Value range is [5,180], default value is 60.
+*/
+LPFC_ATTR_RW(task_mgmt_tmo, 60, 5, 180,
+ "Maximum time to wait for task management commands to complete");
+/*
+# lpfc_use_msi: Use MSI (Message Signaled Interrupts) in systems that
+# support this feature
+# 0 = MSI disabled
+# 1 = MSI enabled
+# 2 = MSI-X enabled (default)
+# Value range is [0,2]. Default value is 2.
+*/
+LPFC_ATTR_R(use_msi, 2, 0, 2, "Use Message Signaled Interrupts (1) or "
+ "MSI-X (2), if possible");
+
+/*
+# lpfc_fcp_io_channel: Set the number of FCP EQ/CQ/WQ IO channels
+#
+# Value range is [1,7]. Default value is 4.
+*/
+LPFC_ATTR_R(fcp_io_channel, LPFC_FCP_IO_CHAN_DEF, LPFC_FCP_IO_CHAN_MIN,
+ LPFC_FCP_IO_CHAN_MAX,
+ "Set the number of FCP I/O channels");
+
+/*
+# lpfc_enable_hba_reset: Allow or prevent HBA resets to the hardware.
+# 0 = HBA resets disabled
+# 1 = HBA resets enabled (default)
+# Value range is [0,1]. Default value is 1.
+*/
+LPFC_ATTR_R(enable_hba_reset, 1, 0, 1, "Enable HBA resets from the driver.");
+
+/*
+# lpfc_enable_hba_heartbeat: Disable HBA heartbeat timer..
+# 0 = HBA Heartbeat disabled
+# 1 = HBA Heartbeat enabled (default)
+# Value range is [0,1]. Default value is 1.
+*/
+LPFC_ATTR_R(enable_hba_heartbeat, 0, 0, 1, "Enable HBA Heartbeat.");
+
+/*
+# lpfc_EnableXLane: Enable Express Lane Feature
+# 0x0 Express Lane Feature disabled
+# 0x1 Express Lane Feature enabled
+# Value range is [0,1]. Default value is 0.
+*/
+LPFC_ATTR_R(EnableXLane, 0, 0, 1, "Enable Express Lane Feature.");
+
+/*
+# lpfc_XLanePriority: Define CS_CTL priority for Express Lane Feature
+# 0x0 - 0x7f = CS_CTL field in FC header (high 7 bits)
+# Value range is [0x0,0x7f]. Default value is 0
+*/
+LPFC_ATTR_RW(XLanePriority, 0, 0x0, 0x7f, "CS_CTL for Express Lane Feature.");
+
+/*
+# lpfc_enable_bg: Enable BlockGuard (Emulex's Implementation of T10-DIF)
+# 0 = BlockGuard disabled (default)
+# 1 = BlockGuard enabled
+# Value range is [0,1]. Default value is 0.
+*/
+LPFC_ATTR_R(enable_bg, 0, 0, 1, "Enable BlockGuard Support");
+
+/*
+# lpfc_fcp_look_ahead: Look ahead for completions in FCP start routine
+# 0 = disabled (default)
+# 1 = enabled
+# Value range is [0,1]. Default value is 0.
+#
+# This feature in under investigation and may be supported in the future.
+*/
+unsigned int lpfc_fcp_look_ahead = LPFC_LOOK_AHEAD_OFF;
+
+/*
+# lpfc_prot_mask: i
+# - Bit mask of host protection capabilities used to register with the
+# SCSI mid-layer
+# - Only meaningful if BG is turned on (lpfc_enable_bg=1).
+# - Allows you to ultimately specify which profiles to use
+# - Default will result in registering capabilities for all profiles.
+# - SHOST_DIF_TYPE1_PROTECTION 1
+# HBA supports T10 DIF Type 1: HBA to Target Type 1 Protection
+# - SHOST_DIX_TYPE0_PROTECTION 8
+# HBA supports DIX Type 0: Host to HBA protection only
+# - SHOST_DIX_TYPE1_PROTECTION 16
+# HBA supports DIX Type 1: Host to HBA Type 1 protection
+#
+*/
+unsigned int lpfc_prot_mask = SHOST_DIF_TYPE1_PROTECTION |
+ SHOST_DIX_TYPE0_PROTECTION |
+ SHOST_DIX_TYPE1_PROTECTION;
+
+module_param(lpfc_prot_mask, uint, S_IRUGO);
+MODULE_PARM_DESC(lpfc_prot_mask, "host protection mask");
+
+/*
+# lpfc_prot_guard: i
+# - Bit mask of protection guard types to register with the SCSI mid-layer
+# - Guard types are currently either 1) T10-DIF CRC 2) IP checksum
+# - Allows you to ultimately specify which profiles to use
+# - Default will result in registering capabilities for all guard types
+#
+*/
+unsigned char lpfc_prot_guard = SHOST_DIX_GUARD_IP;
+module_param(lpfc_prot_guard, byte, S_IRUGO);
+MODULE_PARM_DESC(lpfc_prot_guard, "host protection guard type");
+
+/*
+ * Delay initial NPort discovery when Clean Address bit is cleared in
+ * FLOGI/FDISC accept and FCID/Fabric name/Fabric portname is changed.
+ * This parameter can have value 0 or 1.
+ * When this parameter is set to 0, no delay is added to the initial
+ * discovery.
+ * When this parameter is set to non-zero value, initial Nport discovery is
+ * delayed by ra_tov seconds when Clean Address bit is cleared in FLOGI/FDISC
+ * accept and FCID/Fabric name/Fabric portname is changed.
+ * Driver always delay Nport discovery for subsequent FLOGI/FDISC completion
+ * when Clean Address bit is cleared in FLOGI/FDISC
+ * accept and FCID/Fabric name/Fabric portname is changed.
+ * Default value is 0.
+ */
+int lpfc_delay_discovery;
+module_param(lpfc_delay_discovery, int, S_IRUGO);
+MODULE_PARM_DESC(lpfc_delay_discovery,
+ "Delay NPort discovery when Clean Address bit is cleared. "
+ "Allowed values: 0,1.");
+
+/*
+ * lpfc_sg_seg_cnt - Initial Maximum DMA Segment Count
+ * This value can be set to values between 64 and 4096. The default value is
+ * 64, but may be increased to allow for larger Max I/O sizes. The scsi layer
+ * will be allowed to request I/Os of sizes up to (MAX_SEG_COUNT * SEG_SIZE).
+ * Because of the additional overhead involved in setting up T10-DIF,
+ * this parameter will be limited to 128 if BlockGuard is enabled under SLI4
+ * and will be limited to 512 if BlockGuard is enabled under SLI3.
+ */
+LPFC_ATTR_R(sg_seg_cnt, LPFC_DEFAULT_SG_SEG_CNT, LPFC_DEFAULT_SG_SEG_CNT,
+ LPFC_MAX_SG_SEG_CNT, "Max Scatter Gather Segment Count");
+
+/*
+ * This parameter will be depricated, the driver cannot limit the
+ * protection data s/g list.
+ */
+LPFC_ATTR_R(prot_sg_seg_cnt, LPFC_DEFAULT_SG_SEG_CNT,
+ LPFC_DEFAULT_SG_SEG_CNT, LPFC_MAX_SG_SEG_CNT,
+ "Max Protection Scatter Gather Segment Count");
+
+struct device_attribute *lpfc_hba_attrs[] = {
+ &dev_attr_bg_info,
+ &dev_attr_bg_guard_err,
+ &dev_attr_bg_apptag_err,
+ &dev_attr_bg_reftag_err,
+ &dev_attr_info,
+ &dev_attr_serialnum,
+ &dev_attr_modeldesc,
+ &dev_attr_modelname,
+ &dev_attr_programtype,
+ &dev_attr_portnum,
+ &dev_attr_fwrev,
+ &dev_attr_hdw,
+ &dev_attr_option_rom_version,
+ &dev_attr_link_state,
+ &dev_attr_num_discovered_ports,
+ &dev_attr_menlo_mgmt_mode,
+ &dev_attr_lpfc_drvr_version,
+ &dev_attr_lpfc_enable_fip,
+ &dev_attr_lpfc_temp_sensor,
+ &dev_attr_lpfc_log_verbose,
+ &dev_attr_lpfc_lun_queue_depth,
+ &dev_attr_lpfc_tgt_queue_depth,
+ &dev_attr_lpfc_hba_queue_depth,
+ &dev_attr_lpfc_peer_port_login,
+ &dev_attr_lpfc_nodev_tmo,
+ &dev_attr_lpfc_devloss_tmo,
+ &dev_attr_lpfc_fcp_class,
+ &dev_attr_lpfc_use_adisc,
+ &dev_attr_lpfc_first_burst_size,
+ &dev_attr_lpfc_ack0,
+ &dev_attr_lpfc_topology,
+ &dev_attr_lpfc_scan_down,
+ &dev_attr_lpfc_link_speed,
+ &dev_attr_lpfc_fcp_io_sched,
+ &dev_attr_lpfc_fcp2_no_tgt_reset,
+ &dev_attr_lpfc_cr_delay,
+ &dev_attr_lpfc_cr_count,
+ &dev_attr_lpfc_multi_ring_support,
+ &dev_attr_lpfc_multi_ring_rctl,
+ &dev_attr_lpfc_multi_ring_type,
+ &dev_attr_lpfc_fdmi_on,
+ &dev_attr_lpfc_max_luns,
+ &dev_attr_lpfc_enable_npiv,
+ &dev_attr_lpfc_fcf_failover_policy,
+ &dev_attr_lpfc_enable_rrq,
+ &dev_attr_nport_evt_cnt,
+ &dev_attr_board_mode,
+ &dev_attr_max_vpi,
+ &dev_attr_used_vpi,
+ &dev_attr_max_rpi,
+ &dev_attr_used_rpi,
+ &dev_attr_max_xri,
+ &dev_attr_used_xri,
+ &dev_attr_npiv_info,
+ &dev_attr_issue_reset,
+ &dev_attr_lpfc_poll,
+ &dev_attr_lpfc_poll_tmo,
+ &dev_attr_lpfc_task_mgmt_tmo,
+ &dev_attr_lpfc_use_msi,
+ &dev_attr_lpfc_fcp_imax,
+ &dev_attr_lpfc_fcp_cpu_map,
+ &dev_attr_lpfc_fcp_io_channel,
+ &dev_attr_lpfc_enable_bg,
+ &dev_attr_lpfc_soft_wwnn,
+ &dev_attr_lpfc_soft_wwpn,
+ &dev_attr_lpfc_soft_wwn_enable,
+ &dev_attr_lpfc_enable_hba_reset,
+ &dev_attr_lpfc_enable_hba_heartbeat,
+ &dev_attr_lpfc_EnableXLane,
+ &dev_attr_lpfc_XLanePriority,
+ &dev_attr_lpfc_xlane_lun,
+ &dev_attr_lpfc_xlane_tgt,
+ &dev_attr_lpfc_xlane_vpt,
+ &dev_attr_lpfc_xlane_lun_state,
+ &dev_attr_lpfc_xlane_lun_status,
+ &dev_attr_lpfc_sg_seg_cnt,
+ &dev_attr_lpfc_max_scsicmpl_time,
+ &dev_attr_lpfc_stat_data_ctrl,
+ &dev_attr_lpfc_prot_sg_seg_cnt,
+ &dev_attr_lpfc_aer_support,
+ &dev_attr_lpfc_aer_state_cleanup,
+ &dev_attr_lpfc_sriov_nr_virtfn,
+ &dev_attr_lpfc_req_fw_upgrade,
+ &dev_attr_lpfc_suppress_link_up,
+ &dev_attr_lpfc_iocb_cnt,
+ &dev_attr_iocb_hw,
+ &dev_attr_txq_hw,
+ &dev_attr_txcmplq_hw,
+ &dev_attr_lpfc_fips_level,
+ &dev_attr_lpfc_fips_rev,
+ &dev_attr_lpfc_dss,
+ &dev_attr_lpfc_sriov_hw_max_virtfn,
+ &dev_attr_protocol,
+ &dev_attr_lpfc_xlane_supported,
+ NULL,
+};
+
+struct device_attribute *lpfc_vport_attrs[] = {
+ &dev_attr_info,
+ &dev_attr_link_state,
+ &dev_attr_num_discovered_ports,
+ &dev_attr_lpfc_drvr_version,
+ &dev_attr_lpfc_log_verbose,
+ &dev_attr_lpfc_lun_queue_depth,
+ &dev_attr_lpfc_tgt_queue_depth,
+ &dev_attr_lpfc_nodev_tmo,
+ &dev_attr_lpfc_devloss_tmo,
+ &dev_attr_lpfc_hba_queue_depth,
+ &dev_attr_lpfc_peer_port_login,
+ &dev_attr_lpfc_restrict_login,
+ &dev_attr_lpfc_fcp_class,
+ &dev_attr_lpfc_use_adisc,
+ &dev_attr_lpfc_first_burst_size,
+ &dev_attr_lpfc_fdmi_on,
+ &dev_attr_lpfc_max_luns,
+ &dev_attr_nport_evt_cnt,
+ &dev_attr_npiv_info,
+ &dev_attr_lpfc_enable_da_id,
+ &dev_attr_lpfc_max_scsicmpl_time,
+ &dev_attr_lpfc_stat_data_ctrl,
+ &dev_attr_lpfc_static_vport,
+ &dev_attr_lpfc_fips_level,
+ &dev_attr_lpfc_fips_rev,
+ NULL,
+};
+
+/**
+ * sysfs_ctlreg_write - Write method for writing to ctlreg
+ * @filp: open sysfs file
+ * @kobj: kernel kobject that contains the kernel class device.
+ * @bin_attr: kernel attributes passed to us.
+ * @buf: contains the data to be written to the adapter IOREG space.
+ * @off: offset into buffer to beginning of data.
+ * @count: bytes to transfer.
+ *
+ * Description:
+ * Accessed via /sys/class/scsi_host/hostxxx/ctlreg.
+ * Uses the adapter io control registers to send buf contents to the adapter.
+ *
+ * Returns:
+ * -ERANGE off and count combo out of range
+ * -EINVAL off, count or buff address invalid
+ * -EPERM adapter is offline
+ * value of count, buf contents written
+ **/
+static ssize_t
+sysfs_ctlreg_write(struct file *filp, struct kobject *kobj,
+ struct bin_attribute *bin_attr,
+ char *buf, loff_t off, size_t count)
+{
+ size_t buf_off;
+ struct device *dev = container_of(kobj, struct device, kobj);
+ struct Scsi_Host *shost = class_to_shost(dev);
+ struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
+ struct lpfc_hba *phba = vport->phba;
+
+ if (phba->sli_rev >= LPFC_SLI_REV4)
+ return -EPERM;
+
+ if ((off + count) > FF_REG_AREA_SIZE)
+ return -ERANGE;
+
+ if (count <= LPFC_REG_WRITE_KEY_SIZE)
+ return 0;
+
+ if (off % 4 || count % 4 || (unsigned long)buf % 4)
+ return -EINVAL;
+
+ /* This is to protect HBA registers from accidental writes. */
+ if (memcmp(buf, LPFC_REG_WRITE_KEY, LPFC_REG_WRITE_KEY_SIZE))
+ return -EINVAL;
+
+ if (!(vport->fc_flag & FC_OFFLINE_MODE))
+ return -EPERM;
+
+ spin_lock_irq(&phba->hbalock);
+ for (buf_off = 0; buf_off < count - LPFC_REG_WRITE_KEY_SIZE;
+ buf_off += sizeof(uint32_t))
+ writel(*((uint32_t *)(buf + buf_off + LPFC_REG_WRITE_KEY_SIZE)),
+ phba->ctrl_regs_memmap_p + off + buf_off);
+
+ spin_unlock_irq(&phba->hbalock);
+
+ return count;
+}
+
+/**
+ * sysfs_ctlreg_read - Read method for reading from ctlreg
+ * @filp: open sysfs file
+ * @kobj: kernel kobject that contains the kernel class device.
+ * @bin_attr: kernel attributes passed to us.
+ * @buf: if successful contains the data from the adapter IOREG space.
+ * @off: offset into buffer to beginning of data.
+ * @count: bytes to transfer.
+ *
+ * Description:
+ * Accessed via /sys/class/scsi_host/hostxxx/ctlreg.
+ * Uses the adapter io control registers to read data into buf.
+ *
+ * Returns:
+ * -ERANGE off and count combo out of range
+ * -EINVAL off, count or buff address invalid
+ * value of count, buf contents read
+ **/
+static ssize_t
+sysfs_ctlreg_read(struct file *filp, struct kobject *kobj,
+ struct bin_attribute *bin_attr,
+ char *buf, loff_t off, size_t count)
+{
+ size_t buf_off;
+ uint32_t * tmp_ptr;
+ struct device *dev = container_of(kobj, struct device, kobj);
+ struct Scsi_Host *shost = class_to_shost(dev);
+ struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
+ struct lpfc_hba *phba = vport->phba;
+
+ if (phba->sli_rev >= LPFC_SLI_REV4)
+ return -EPERM;
+
+ if (off > FF_REG_AREA_SIZE)
+ return -ERANGE;
+
+ if ((off + count) > FF_REG_AREA_SIZE)
+ count = FF_REG_AREA_SIZE - off;
+
+ if (count == 0) return 0;
+
+ if (off % 4 || count % 4 || (unsigned long)buf % 4)
+ return -EINVAL;
+
+ spin_lock_irq(&phba->hbalock);
+
+ for (buf_off = 0; buf_off < count; buf_off += sizeof(uint32_t)) {
+ tmp_ptr = (uint32_t *)(buf + buf_off);
+ *tmp_ptr = readl(phba->ctrl_regs_memmap_p + off + buf_off);
+ }
+
+ spin_unlock_irq(&phba->hbalock);
+
+ return count;
+}
+
+static struct bin_attribute sysfs_ctlreg_attr = {
+ .attr = {
+ .name = "ctlreg",
+ .mode = S_IRUSR | S_IWUSR,
+ },
+ .size = 256,
+ .read = sysfs_ctlreg_read,
+ .write = sysfs_ctlreg_write,
+};
+
+/**
+ * sysfs_mbox_write - Write method for writing information via mbox
+ * @filp: open sysfs file
+ * @kobj: kernel kobject that contains the kernel class device.
+ * @bin_attr: kernel attributes passed to us.
+ * @buf: contains the data to be written to sysfs mbox.
+ * @off: offset into buffer to beginning of data.
+ * @count: bytes to transfer.
+ *
+ * Description:
+ * Deprecated function. All mailbox access from user space is performed via the
+ * bsg interface.
+ *
+ * Returns:
+ * -EPERM operation not permitted
+ **/
+static ssize_t
+sysfs_mbox_write(struct file *filp, struct kobject *kobj,
+ struct bin_attribute *bin_attr,
+ char *buf, loff_t off, size_t count)
+{
+ return -EPERM;
+}
+
+/**
+ * sysfs_mbox_read - Read method for reading information via mbox
+ * @filp: open sysfs file
+ * @kobj: kernel kobject that contains the kernel class device.
+ * @bin_attr: kernel attributes passed to us.
+ * @buf: contains the data to be read from sysfs mbox.
+ * @off: offset into buffer to beginning of data.
+ * @count: bytes to transfer.
+ *
+ * Description:
+ * Deprecated function. All mailbox access from user space is performed via the
+ * bsg interface.
+ *
+ * Returns:
+ * -EPERM operation not permitted
+ **/
+static ssize_t
+sysfs_mbox_read(struct file *filp, struct kobject *kobj,
+ struct bin_attribute *bin_attr,
+ char *buf, loff_t off, size_t count)
+{
+ return -EPERM;
+}
+
+static struct bin_attribute sysfs_mbox_attr = {
+ .attr = {
+ .name = "mbox",
+ .mode = S_IRUSR | S_IWUSR,
+ },
+ .size = MAILBOX_SYSFS_MAX,
+ .read = sysfs_mbox_read,
+ .write = sysfs_mbox_write,
+};
+
+/**
+ * lpfc_alloc_sysfs_attr - Creates the ctlreg and mbox entries
+ * @vport: address of lpfc vport structure.
+ *
+ * Return codes:
+ * zero on success
+ * error return code from sysfs_create_bin_file()
+ **/
+int
+lpfc_alloc_sysfs_attr(struct lpfc_vport *vport)
+{
+ struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
+ int error;
+
+ error = sysfs_create_bin_file(&shost->shost_dev.kobj,
+ &sysfs_drvr_stat_data_attr);
+
+ /* Virtual ports do not need ctrl_reg and mbox */
+ if (error || vport->port_type == LPFC_NPIV_PORT)
+ goto out;
+
+ error = sysfs_create_bin_file(&shost->shost_dev.kobj,
+ &sysfs_ctlreg_attr);
+ if (error)
+ goto out_remove_stat_attr;
+
+ error = sysfs_create_bin_file(&shost->shost_dev.kobj,
+ &sysfs_mbox_attr);
+ if (error)
+ goto out_remove_ctlreg_attr;
+
+ return 0;
+out_remove_ctlreg_attr:
+ sysfs_remove_bin_file(&shost->shost_dev.kobj, &sysfs_ctlreg_attr);
+out_remove_stat_attr:
+ sysfs_remove_bin_file(&shost->shost_dev.kobj,
+ &sysfs_drvr_stat_data_attr);
+out:
+ return error;
+}
+
+/**
+ * lpfc_free_sysfs_attr - Removes the ctlreg and mbox entries
+ * @vport: address of lpfc vport structure.
+ **/
+void
+lpfc_free_sysfs_attr(struct lpfc_vport *vport)
+{
+ struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
+ sysfs_remove_bin_file(&shost->shost_dev.kobj,
+ &sysfs_drvr_stat_data_attr);
+ /* Virtual ports do not need ctrl_reg and mbox */
+ if (vport->port_type == LPFC_NPIV_PORT)
+ return;
+ sysfs_remove_bin_file(&shost->shost_dev.kobj, &sysfs_mbox_attr);
+ sysfs_remove_bin_file(&shost->shost_dev.kobj, &sysfs_ctlreg_attr);
+}
+
+
+/*
+ * Dynamic FC Host Attributes Support
+ */
+
+/**
+ * lpfc_get_host_port_id - Copy the vport DID into the scsi host port id
+ * @shost: kernel scsi host pointer.
+ **/
+static void
+lpfc_get_host_port_id(struct Scsi_Host *shost)
+{
+ struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
+
+ /* note: fc_myDID already in cpu endianness */
+ fc_host_port_id(shost) = vport->fc_myDID;
+}
+
+/**
+ * lpfc_get_host_port_type - Set the value of the scsi host port type
+ * @shost: kernel scsi host pointer.
+ **/
+static void
+lpfc_get_host_port_type(struct Scsi_Host *shost)
+{
+ struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
+ struct lpfc_hba *phba = vport->phba;
+
+ spin_lock_irq(shost->host_lock);
+
+ if (vport->port_type == LPFC_NPIV_PORT) {
+ fc_host_port_type(shost) = FC_PORTTYPE_NPIV;
+ } else if (lpfc_is_link_up(phba)) {
+ if (phba->fc_topology == LPFC_TOPOLOGY_LOOP) {
+ if (vport->fc_flag & FC_PUBLIC_LOOP)
+ fc_host_port_type(shost) = FC_PORTTYPE_NLPORT;
+ else
+ fc_host_port_type(shost) = FC_PORTTYPE_LPORT;
+ } else {
+ if (vport->fc_flag & FC_FABRIC)
+ fc_host_port_type(shost) = FC_PORTTYPE_NPORT;
+ else
+ fc_host_port_type(shost) = FC_PORTTYPE_PTP;
+ }
+ } else
+ fc_host_port_type(shost) = FC_PORTTYPE_UNKNOWN;
+
+ spin_unlock_irq(shost->host_lock);
+}
+
+/**
+ * lpfc_get_host_port_state - Set the value of the scsi host port state
+ * @shost: kernel scsi host pointer.
+ **/
+static void
+lpfc_get_host_port_state(struct Scsi_Host *shost)
+{
+ struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
+ struct lpfc_hba *phba = vport->phba;
+
+ spin_lock_irq(shost->host_lock);
+
+ if (vport->fc_flag & FC_OFFLINE_MODE)
+ fc_host_port_state(shost) = FC_PORTSTATE_OFFLINE;
+ else {
+ switch (phba->link_state) {
+ case LPFC_LINK_UNKNOWN:
+ case LPFC_LINK_DOWN:
+ fc_host_port_state(shost) = FC_PORTSTATE_LINKDOWN;
+ break;
+ case LPFC_LINK_UP:
+ case LPFC_CLEAR_LA:
+ case LPFC_HBA_READY:
+ /* Links up, reports port state accordingly */
+ if (vport->port_state < LPFC_VPORT_READY)
+ fc_host_port_state(shost) =
+ FC_PORTSTATE_BYPASSED;
+ else
+ fc_host_port_state(shost) =
+ FC_PORTSTATE_ONLINE;
+ break;
+ case LPFC_HBA_ERROR:
+ fc_host_port_state(shost) = FC_PORTSTATE_ERROR;
+ break;
+ default:
+ fc_host_port_state(shost) = FC_PORTSTATE_UNKNOWN;
+ break;
+ }
+ }
+
+ spin_unlock_irq(shost->host_lock);
+}
+
+/**
+ * lpfc_get_host_speed - Set the value of the scsi host speed
+ * @shost: kernel scsi host pointer.
+ **/
+static void
+lpfc_get_host_speed(struct Scsi_Host *shost)
+{
+ struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
+ struct lpfc_hba *phba = vport->phba;
+
+ spin_lock_irq(shost->host_lock);
+
+ if (lpfc_is_link_up(phba)) {
+ switch(phba->fc_linkspeed) {
+ case LPFC_LINK_SPEED_1GHZ:
+ fc_host_speed(shost) = FC_PORTSPEED_1GBIT;
+ break;
+ case LPFC_LINK_SPEED_2GHZ:
+ fc_host_speed(shost) = FC_PORTSPEED_2GBIT;
+ break;
+ case LPFC_LINK_SPEED_4GHZ:
+ fc_host_speed(shost) = FC_PORTSPEED_4GBIT;
+ break;
+ case LPFC_LINK_SPEED_8GHZ:
+ fc_host_speed(shost) = FC_PORTSPEED_8GBIT;
+ break;
+ case LPFC_LINK_SPEED_10GHZ:
+ fc_host_speed(shost) = FC_PORTSPEED_10GBIT;
+ break;
+ case LPFC_LINK_SPEED_16GHZ:
+ fc_host_speed(shost) = FC_PORTSPEED_16GBIT;
+ break;
+ default:
+ fc_host_speed(shost) = FC_PORTSPEED_UNKNOWN;
+ break;
+ }
+ } else
+ fc_host_speed(shost) = FC_PORTSPEED_UNKNOWN;
+
+ spin_unlock_irq(shost->host_lock);
+}
+
+/**
+ * lpfc_get_host_fabric_name - Set the value of the scsi host fabric name
+ * @shost: kernel scsi host pointer.
+ **/
+static void
+lpfc_get_host_fabric_name (struct Scsi_Host *shost)
+{
+ struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
+ struct lpfc_hba *phba = vport->phba;
+ u64 node_name;
+
+ spin_lock_irq(shost->host_lock);
+
+ if ((vport->port_state > LPFC_FLOGI) &&
+ ((vport->fc_flag & FC_FABRIC) ||
+ ((phba->fc_topology == LPFC_TOPOLOGY_LOOP) &&
+ (vport->fc_flag & FC_PUBLIC_LOOP))))
+ node_name = wwn_to_u64(phba->fc_fabparam.nodeName.u.wwn);
+ else
+ /* fabric is local port if there is no F/FL_Port */
+ node_name = 0;
+
+ spin_unlock_irq(shost->host_lock);
+
+ fc_host_fabric_name(shost) = node_name;
+}
+
+/**
+ * lpfc_get_stats - Return statistical information about the adapter
+ * @shost: kernel scsi host pointer.
+ *
+ * Notes:
+ * NULL on error for link down, no mbox pool, sli2 active,
+ * management not allowed, memory allocation error, or mbox error.
+ *
+ * Returns:
+ * NULL for error
+ * address of the adapter host statistics
+ **/
+static struct fc_host_statistics *
+lpfc_get_stats(struct Scsi_Host *shost)
+{
+ struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
+ struct lpfc_hba *phba = vport->phba;
+ struct lpfc_sli *psli = &phba->sli;
+ struct fc_host_statistics *hs = &phba->link_stats;
+ struct lpfc_lnk_stat * lso = &psli->lnk_stat_offsets;
+ LPFC_MBOXQ_t *pmboxq;
+ MAILBOX_t *pmb;
+ unsigned long seconds;
+ int rc = 0;
+
+ /*
+ * prevent udev from issuing mailbox commands until the port is
+ * configured.
+ */
+ if (phba->link_state < LPFC_LINK_DOWN ||
+ !phba->mbox_mem_pool ||
+ (phba->sli.sli_flag & LPFC_SLI_ACTIVE) == 0)
+ return NULL;
+
+ if (phba->sli.sli_flag & LPFC_BLOCK_MGMT_IO)
+ return NULL;
+
+ pmboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
+ if (!pmboxq)
+ return NULL;
+ memset(pmboxq, 0, sizeof (LPFC_MBOXQ_t));
+
+ pmb = &pmboxq->u.mb;
+ pmb->mbxCommand = MBX_READ_STATUS;
+ pmb->mbxOwner = OWN_HOST;
+ pmboxq->context1 = NULL;
+ pmboxq->vport = vport;
+
+ if (vport->fc_flag & FC_OFFLINE_MODE)
+ rc = lpfc_sli_issue_mbox(phba, pmboxq, MBX_POLL);
+ else
+ rc = lpfc_sli_issue_mbox_wait(phba, pmboxq, phba->fc_ratov * 2);
+
+ if (rc != MBX_SUCCESS) {
+ if (rc != MBX_TIMEOUT)
+ mempool_free(pmboxq, phba->mbox_mem_pool);
+ return NULL;
+ }
+
+ memset(hs, 0, sizeof (struct fc_host_statistics));
+
+ hs->tx_frames = pmb->un.varRdStatus.xmitFrameCnt;
+ /*
+ * The MBX_READ_STATUS returns tx_k_bytes which has to
+ * converted to words
+ */
+ hs->tx_words = (uint64_t)
+ ((uint64_t)pmb->un.varRdStatus.xmitByteCnt
+ * (uint64_t)256);
+ hs->rx_frames = pmb->un.varRdStatus.rcvFrameCnt;
+ hs->rx_words = (uint64_t)
+ ((uint64_t)pmb->un.varRdStatus.rcvByteCnt
+ * (uint64_t)256);
+
+ memset(pmboxq, 0, sizeof (LPFC_MBOXQ_t));
+ pmb->mbxCommand = MBX_READ_LNK_STAT;
+ pmb->mbxOwner = OWN_HOST;
+ pmboxq->context1 = NULL;
+ pmboxq->vport = vport;
+
+ if (vport->fc_flag & FC_OFFLINE_MODE)
+ rc = lpfc_sli_issue_mbox(phba, pmboxq, MBX_POLL);
+ else
+ rc = lpfc_sli_issue_mbox_wait(phba, pmboxq, phba->fc_ratov * 2);
+
+ if (rc != MBX_SUCCESS) {
+ if (rc != MBX_TIMEOUT)
+ mempool_free(pmboxq, phba->mbox_mem_pool);
+ return NULL;
+ }
+
+ hs->link_failure_count = pmb->un.varRdLnk.linkFailureCnt;
+ hs->loss_of_sync_count = pmb->un.varRdLnk.lossSyncCnt;
+ hs->loss_of_signal_count = pmb->un.varRdLnk.lossSignalCnt;
+ hs->prim_seq_protocol_err_count = pmb->un.varRdLnk.primSeqErrCnt;
+ hs->invalid_tx_word_count = pmb->un.varRdLnk.invalidXmitWord;
+ hs->invalid_crc_count = pmb->un.varRdLnk.crcCnt;
+ hs->error_frames = pmb->un.varRdLnk.crcCnt;
+
+ hs->link_failure_count -= lso->link_failure_count;
+ hs->loss_of_sync_count -= lso->loss_of_sync_count;
+ hs->loss_of_signal_count -= lso->loss_of_signal_count;
+ hs->prim_seq_protocol_err_count -= lso->prim_seq_protocol_err_count;
+ hs->invalid_tx_word_count -= lso->invalid_tx_word_count;
+ hs->invalid_crc_count -= lso->invalid_crc_count;
+ hs->error_frames -= lso->error_frames;
+
+ if (phba->hba_flag & HBA_FCOE_MODE) {
+ hs->lip_count = -1;
+ hs->nos_count = (phba->link_events >> 1);
+ hs->nos_count -= lso->link_events;
+ } else if (phba->fc_topology == LPFC_TOPOLOGY_LOOP) {
+ hs->lip_count = (phba->fc_eventTag >> 1);
+ hs->lip_count -= lso->link_events;
+ hs->nos_count = -1;
+ } else {
+ hs->lip_count = -1;
+ hs->nos_count = (phba->fc_eventTag >> 1);
+ hs->nos_count -= lso->link_events;
+ }
+
+ hs->dumped_frames = -1;
+
+ seconds = get_seconds();
+ if (seconds < psli->stats_start)
+ hs->seconds_since_last_reset = seconds +
+ ((unsigned long)-1 - psli->stats_start);
+ else
+ hs->seconds_since_last_reset = seconds - psli->stats_start;
+
+ mempool_free(pmboxq, phba->mbox_mem_pool);
+
+ return hs;
+}
+
+/**
+ * lpfc_reset_stats - Copy the adapter link stats information
+ * @shost: kernel scsi host pointer.
+ **/
+static void
+lpfc_reset_stats(struct Scsi_Host *shost)
+{
+ struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
+ struct lpfc_hba *phba = vport->phba;
+ struct lpfc_sli *psli = &phba->sli;
+ struct lpfc_lnk_stat *lso = &psli->lnk_stat_offsets;
+ LPFC_MBOXQ_t *pmboxq;
+ MAILBOX_t *pmb;
+ int rc = 0;
+
+ if (phba->sli.sli_flag & LPFC_BLOCK_MGMT_IO)
+ return;
+
+ pmboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
+ if (!pmboxq)
+ return;
+ memset(pmboxq, 0, sizeof(LPFC_MBOXQ_t));
+
+ pmb = &pmboxq->u.mb;
+ pmb->mbxCommand = MBX_READ_STATUS;
+ pmb->mbxOwner = OWN_HOST;
+ pmb->un.varWords[0] = 0x1; /* reset request */
+ pmboxq->context1 = NULL;
+ pmboxq->vport = vport;
+
+ if ((vport->fc_flag & FC_OFFLINE_MODE) ||
+ (!(psli->sli_flag & LPFC_SLI_ACTIVE)))
+ rc = lpfc_sli_issue_mbox(phba, pmboxq, MBX_POLL);
+ else
+ rc = lpfc_sli_issue_mbox_wait(phba, pmboxq, phba->fc_ratov * 2);
+
+ if (rc != MBX_SUCCESS) {
+ if (rc != MBX_TIMEOUT)
+ mempool_free(pmboxq, phba->mbox_mem_pool);
+ return;
+ }
+
+ memset(pmboxq, 0, sizeof(LPFC_MBOXQ_t));
+ pmb->mbxCommand = MBX_READ_LNK_STAT;
+ pmb->mbxOwner = OWN_HOST;
+ pmboxq->context1 = NULL;
+ pmboxq->vport = vport;
+
+ if ((vport->fc_flag & FC_OFFLINE_MODE) ||
+ (!(psli->sli_flag & LPFC_SLI_ACTIVE)))
+ rc = lpfc_sli_issue_mbox(phba, pmboxq, MBX_POLL);
+ else
+ rc = lpfc_sli_issue_mbox_wait(phba, pmboxq, phba->fc_ratov * 2);
+
+ if (rc != MBX_SUCCESS) {
+ if (rc != MBX_TIMEOUT)
+ mempool_free( pmboxq, phba->mbox_mem_pool);
+ return;
+ }
+
+ lso->link_failure_count = pmb->un.varRdLnk.linkFailureCnt;
+ lso->loss_of_sync_count = pmb->un.varRdLnk.lossSyncCnt;
+ lso->loss_of_signal_count = pmb->un.varRdLnk.lossSignalCnt;
+ lso->prim_seq_protocol_err_count = pmb->un.varRdLnk.primSeqErrCnt;
+ lso->invalid_tx_word_count = pmb->un.varRdLnk.invalidXmitWord;
+ lso->invalid_crc_count = pmb->un.varRdLnk.crcCnt;
+ lso->error_frames = pmb->un.varRdLnk.crcCnt;
+ if (phba->hba_flag & HBA_FCOE_MODE)
+ lso->link_events = (phba->link_events >> 1);
+ else
+ lso->link_events = (phba->fc_eventTag >> 1);
+
+ psli->stats_start = get_seconds();
+
+ mempool_free(pmboxq, phba->mbox_mem_pool);
+
+ return;
+}
+
+/*
+ * The LPFC driver treats linkdown handling as target loss events so there
+ * are no sysfs handlers for link_down_tmo.
+ */
+
+/**
+ * lpfc_get_node_by_target - Return the nodelist for a target
+ * @starget: kernel scsi target pointer.
+ *
+ * Returns:
+ * address of the node list if found
+ * NULL target not found
+ **/
+static struct lpfc_nodelist *
+lpfc_get_node_by_target(struct scsi_target *starget)
+{
+ struct Scsi_Host *shost = dev_to_shost(starget->dev.parent);
+ struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
+ struct lpfc_nodelist *ndlp;
+
+ spin_lock_irq(shost->host_lock);
+ /* Search for this, mapped, target ID */
+ list_for_each_entry(ndlp, &vport->fc_nodes, nlp_listp) {
+ if (NLP_CHK_NODE_ACT(ndlp) &&
+ ndlp->nlp_state == NLP_STE_MAPPED_NODE &&
+ starget->id == ndlp->nlp_sid) {
+ spin_unlock_irq(shost->host_lock);
+ return ndlp;
+ }
+ }
+ spin_unlock_irq(shost->host_lock);
+ return NULL;
+}
+
+/**
+ * lpfc_get_starget_port_id - Set the target port id to the ndlp DID or -1
+ * @starget: kernel scsi target pointer.
+ **/
+static void
+lpfc_get_starget_port_id(struct scsi_target *starget)
+{
+ struct lpfc_nodelist *ndlp = lpfc_get_node_by_target(starget);
+
+ fc_starget_port_id(starget) = ndlp ? ndlp->nlp_DID : -1;
+}
+
+/**
+ * lpfc_get_starget_node_name - Set the target node name
+ * @starget: kernel scsi target pointer.
+ *
+ * Description: Set the target node name to the ndlp node name wwn or zero.
+ **/
+static void
+lpfc_get_starget_node_name(struct scsi_target *starget)
+{
+ struct lpfc_nodelist *ndlp = lpfc_get_node_by_target(starget);
+
+ fc_starget_node_name(starget) =
+ ndlp ? wwn_to_u64(ndlp->nlp_nodename.u.wwn) : 0;
+}
+
+/**
+ * lpfc_get_starget_port_name - Set the target port name
+ * @starget: kernel scsi target pointer.
+ *
+ * Description: set the target port name to the ndlp port name wwn or zero.
+ **/
+static void
+lpfc_get_starget_port_name(struct scsi_target *starget)
+{
+ struct lpfc_nodelist *ndlp = lpfc_get_node_by_target(starget);
+
+ fc_starget_port_name(starget) =
+ ndlp ? wwn_to_u64(ndlp->nlp_portname.u.wwn) : 0;
+}
+
+/**
+ * lpfc_set_rport_loss_tmo - Set the rport dev loss tmo
+ * @rport: fc rport address.
+ * @timeout: new value for dev loss tmo.
+ *
+ * Description:
+ * If timeout is non zero set the dev_loss_tmo to timeout, else set
+ * dev_loss_tmo to one.
+ **/
+static void
+lpfc_set_rport_loss_tmo(struct fc_rport *rport, uint32_t timeout)
+{
+ if (timeout)
+ rport->dev_loss_tmo = timeout;
+ else
+ rport->dev_loss_tmo = 1;
+}
+
+/**
+ * lpfc_rport_show_function - Return rport target information
+ *
+ * Description:
+ * Macro that uses field to generate a function with the name lpfc_show_rport_
+ *
+ * lpfc_show_rport_##field: returns the bytes formatted in buf
+ * @cdev: class converted to an fc_rport.
+ * @buf: on return contains the target_field or zero.
+ *
+ * Returns: size of formatted string.
+ **/
+#define lpfc_rport_show_function(field, format_string, sz, cast) \
+static ssize_t \
+lpfc_show_rport_##field (struct device *dev, \
+ struct device_attribute *attr, \
+ char *buf) \
+{ \
+ struct fc_rport *rport = transport_class_to_rport(dev); \
+ struct lpfc_rport_data *rdata = rport->hostdata; \
+ return snprintf(buf, sz, format_string, \
+ (rdata->target) ? cast rdata->target->field : 0); \
+}
+
+#define lpfc_rport_rd_attr(field, format_string, sz) \
+ lpfc_rport_show_function(field, format_string, sz, ) \
+static FC_RPORT_ATTR(field, S_IRUGO, lpfc_show_rport_##field, NULL)
+
+/**
+ * lpfc_set_vport_symbolic_name - Set the vport's symbolic name
+ * @fc_vport: The fc_vport who's symbolic name has been changed.
+ *
+ * Description:
+ * This function is called by the transport after the @fc_vport's symbolic name
+ * has been changed. This function re-registers the symbolic name with the
+ * switch to propagate the change into the fabric if the vport is active.
+ **/
+static void
+lpfc_set_vport_symbolic_name(struct fc_vport *fc_vport)
+{
+ struct lpfc_vport *vport = *(struct lpfc_vport **)fc_vport->dd_data;
+
+ if (vport->port_state == LPFC_VPORT_READY)
+ lpfc_ns_cmd(vport, SLI_CTNS_RSPN_ID, 0, 0);
+}
+
+/**
+ * lpfc_hba_log_verbose_init - Set hba's log verbose level
+ * @phba: Pointer to lpfc_hba struct.
+ *
+ * This function is called by the lpfc_get_cfgparam() routine to set the
+ * module lpfc_log_verbose into the @phba cfg_log_verbose for use with
+ * log message according to the module's lpfc_log_verbose parameter setting
+ * before hba port or vport created.
+ **/
+static void
+lpfc_hba_log_verbose_init(struct lpfc_hba *phba, uint32_t verbose)
+{
+ phba->cfg_log_verbose = verbose;
+}
+
+struct fc_function_template lpfc_transport_functions = {
+ /* fixed attributes the driver supports */
+ .show_host_node_name = 1,
+ .show_host_port_name = 1,
+ .show_host_supported_classes = 1,
+ .show_host_supported_fc4s = 1,
+ .show_host_supported_speeds = 1,
+ .show_host_maxframe_size = 1,
+ .show_host_symbolic_name = 1,
+
+ /* dynamic attributes the driver supports */
+ .get_host_port_id = lpfc_get_host_port_id,
+ .show_host_port_id = 1,
+
+ .get_host_port_type = lpfc_get_host_port_type,
+ .show_host_port_type = 1,
+
+ .get_host_port_state = lpfc_get_host_port_state,
+ .show_host_port_state = 1,
+
+ /* active_fc4s is shown but doesn't change (thus no get function) */
+ .show_host_active_fc4s = 1,
+
+ .get_host_speed = lpfc_get_host_speed,
+ .show_host_speed = 1,
+
+ .get_host_fabric_name = lpfc_get_host_fabric_name,
+ .show_host_fabric_name = 1,
+
+ /*
+ * The LPFC driver treats linkdown handling as target loss events
+ * so there are no sysfs handlers for link_down_tmo.
+ */
+
+ .get_fc_host_stats = lpfc_get_stats,
+ .reset_fc_host_stats = lpfc_reset_stats,
+
+ .dd_fcrport_size = sizeof(struct lpfc_rport_data),
+ .show_rport_maxframe_size = 1,
+ .show_rport_supported_classes = 1,
+
+ .set_rport_dev_loss_tmo = lpfc_set_rport_loss_tmo,
+ .show_rport_dev_loss_tmo = 1,
+
+ .get_starget_port_id = lpfc_get_starget_port_id,
+ .show_starget_port_id = 1,
+
+ .get_starget_node_name = lpfc_get_starget_node_name,
+ .show_starget_node_name = 1,
+
+ .get_starget_port_name = lpfc_get_starget_port_name,
+ .show_starget_port_name = 1,
+
+ .issue_fc_host_lip = lpfc_issue_lip,
+ .dev_loss_tmo_callbk = lpfc_dev_loss_tmo_callbk,
+ .terminate_rport_io = lpfc_terminate_rport_io,
+
+ .dd_fcvport_size = sizeof(struct lpfc_vport *),
+
+ .vport_disable = lpfc_vport_disable,
+
+ .set_vport_symbolic_name = lpfc_set_vport_symbolic_name,
+
+ .bsg_request = lpfc_bsg_request,
+ .bsg_timeout = lpfc_bsg_timeout,
+};
+
+struct fc_function_template lpfc_vport_transport_functions = {
+ /* fixed attributes the driver supports */
+ .show_host_node_name = 1,
+ .show_host_port_name = 1,
+ .show_host_supported_classes = 1,
+ .show_host_supported_fc4s = 1,
+ .show_host_supported_speeds = 1,
+ .show_host_maxframe_size = 1,
+ .show_host_symbolic_name = 1,
+
+ /* dynamic attributes the driver supports */
+ .get_host_port_id = lpfc_get_host_port_id,
+ .show_host_port_id = 1,
+
+ .get_host_port_type = lpfc_get_host_port_type,
+ .show_host_port_type = 1,
+
+ .get_host_port_state = lpfc_get_host_port_state,
+ .show_host_port_state = 1,
+
+ /* active_fc4s is shown but doesn't change (thus no get function) */
+ .show_host_active_fc4s = 1,
+
+ .get_host_speed = lpfc_get_host_speed,
+ .show_host_speed = 1,
+
+ .get_host_fabric_name = lpfc_get_host_fabric_name,
+ .show_host_fabric_name = 1,
+
+ /*
+ * The LPFC driver treats linkdown handling as target loss events
+ * so there are no sysfs handlers for link_down_tmo.
+ */
+
+ .get_fc_host_stats = lpfc_get_stats,
+ .reset_fc_host_stats = lpfc_reset_stats,
+
+ .dd_fcrport_size = sizeof(struct lpfc_rport_data),
+ .show_rport_maxframe_size = 1,
+ .show_rport_supported_classes = 1,
+
+ .set_rport_dev_loss_tmo = lpfc_set_rport_loss_tmo,
+ .show_rport_dev_loss_tmo = 1,
+
+ .get_starget_port_id = lpfc_get_starget_port_id,
+ .show_starget_port_id = 1,
+
+ .get_starget_node_name = lpfc_get_starget_node_name,
+ .show_starget_node_name = 1,
+
+ .get_starget_port_name = lpfc_get_starget_port_name,
+ .show_starget_port_name = 1,
+
+ .dev_loss_tmo_callbk = lpfc_dev_loss_tmo_callbk,
+ .terminate_rport_io = lpfc_terminate_rport_io,
+
+ .vport_disable = lpfc_vport_disable,
+
+ .set_vport_symbolic_name = lpfc_set_vport_symbolic_name,
+};
+
+/**
+ * lpfc_get_cfgparam - Used during probe_one to init the adapter structure
+ * @phba: lpfc_hba pointer.
+ **/
+void
+lpfc_get_cfgparam(struct lpfc_hba *phba)
+{
+ lpfc_fcp_io_sched_init(phba, lpfc_fcp_io_sched);
+ lpfc_fcp2_no_tgt_reset_init(phba, lpfc_fcp2_no_tgt_reset);
+ lpfc_cr_delay_init(phba, lpfc_cr_delay);
+ lpfc_cr_count_init(phba, lpfc_cr_count);
+ lpfc_multi_ring_support_init(phba, lpfc_multi_ring_support);
+ lpfc_multi_ring_rctl_init(phba, lpfc_multi_ring_rctl);
+ lpfc_multi_ring_type_init(phba, lpfc_multi_ring_type);
+ lpfc_ack0_init(phba, lpfc_ack0);
+ lpfc_topology_init(phba, lpfc_topology);
+ lpfc_link_speed_init(phba, lpfc_link_speed);
+ lpfc_poll_tmo_init(phba, lpfc_poll_tmo);
+ lpfc_task_mgmt_tmo_init(phba, lpfc_task_mgmt_tmo);
+ lpfc_enable_npiv_init(phba, lpfc_enable_npiv);
+ lpfc_fcf_failover_policy_init(phba, lpfc_fcf_failover_policy);
+ lpfc_enable_rrq_init(phba, lpfc_enable_rrq);
+ lpfc_use_msi_init(phba, lpfc_use_msi);
+ lpfc_fcp_imax_init(phba, lpfc_fcp_imax);
+ lpfc_fcp_cpu_map_init(phba, lpfc_fcp_cpu_map);
+ lpfc_fcp_io_channel_init(phba, lpfc_fcp_io_channel);
+ lpfc_enable_hba_reset_init(phba, lpfc_enable_hba_reset);
+ lpfc_enable_hba_heartbeat_init(phba, lpfc_enable_hba_heartbeat);
+ lpfc_EnableXLane_init(phba, lpfc_EnableXLane);
+ if (phba->sli_rev != LPFC_SLI_REV4)
+ phba->cfg_EnableXLane = 0;
+ lpfc_XLanePriority_init(phba, lpfc_XLanePriority);
+ memset(phba->cfg_oas_tgt_wwpn, 0, (8 * sizeof(uint8_t)));
+ memset(phba->cfg_oas_vpt_wwpn, 0, (8 * sizeof(uint8_t)));
+ phba->cfg_oas_lun_state = 0;
+ phba->cfg_oas_lun_status = 0;
+ phba->cfg_oas_flags = 0;
+ lpfc_enable_bg_init(phba, lpfc_enable_bg);
+ if (phba->sli_rev == LPFC_SLI_REV4)
+ phba->cfg_poll = 0;
+ else
+ phba->cfg_poll = lpfc_poll;
+ phba->cfg_soft_wwnn = 0L;
+ phba->cfg_soft_wwpn = 0L;
+ lpfc_sg_seg_cnt_init(phba, lpfc_sg_seg_cnt);
+ lpfc_prot_sg_seg_cnt_init(phba, lpfc_prot_sg_seg_cnt);
+ lpfc_hba_queue_depth_init(phba, lpfc_hba_queue_depth);
+ lpfc_hba_log_verbose_init(phba, lpfc_log_verbose);
+ lpfc_aer_support_init(phba, lpfc_aer_support);
+ lpfc_sriov_nr_virtfn_init(phba, lpfc_sriov_nr_virtfn);
+ lpfc_request_firmware_upgrade_init(phba, lpfc_req_fw_upgrade);
+ lpfc_suppress_link_up_init(phba, lpfc_suppress_link_up);
+ lpfc_iocb_cnt_init(phba, lpfc_iocb_cnt);
+ phba->cfg_enable_dss = 1;
+ return;
+}
+
+/**
+ * lpfc_get_vport_cfgparam - Used during port create, init the vport structure
+ * @vport: lpfc_vport pointer.
+ **/
+void
+lpfc_get_vport_cfgparam(struct lpfc_vport *vport)
+{
+ lpfc_log_verbose_init(vport, lpfc_log_verbose);
+ lpfc_lun_queue_depth_init(vport, lpfc_lun_queue_depth);
+ lpfc_tgt_queue_depth_init(vport, lpfc_tgt_queue_depth);
+ lpfc_devloss_tmo_init(vport, lpfc_devloss_tmo);
+ lpfc_nodev_tmo_init(vport, lpfc_nodev_tmo);
+ lpfc_peer_port_login_init(vport, lpfc_peer_port_login);
+ lpfc_restrict_login_init(vport, lpfc_restrict_login);
+ lpfc_fcp_class_init(vport, lpfc_fcp_class);
+ lpfc_use_adisc_init(vport, lpfc_use_adisc);
+ lpfc_first_burst_size_init(vport, lpfc_first_burst_size);
+ lpfc_max_scsicmpl_time_init(vport, lpfc_max_scsicmpl_time);
+ lpfc_fdmi_on_init(vport, lpfc_fdmi_on);
+ lpfc_discovery_threads_init(vport, lpfc_discovery_threads);
+ lpfc_max_luns_init(vport, lpfc_max_luns);
+ lpfc_scan_down_init(vport, lpfc_scan_down);
+ lpfc_enable_da_id_init(vport, lpfc_enable_da_id);
+ return;
+}
diff --git a/drivers/scsi/lpfc/lpfc_bsg.c b/drivers/scsi/lpfc/lpfc_bsg.c
new file mode 100644
index 000000000..b70506807
--- /dev/null
+++ b/drivers/scsi/lpfc/lpfc_bsg.c
@@ -0,0 +1,5410 @@
+/*******************************************************************
+ * This file is part of the Emulex Linux Device Driver for *
+ * Fibre Channel Host Bus Adapters. *
+ * Copyright (C) 2009-2015 Emulex. All rights reserved. *
+ * EMULEX and SLI are trademarks of Emulex. *
+ * www.emulex.com *
+ * *
+ * This program is free software; you can redistribute it and/or *
+ * modify it under the terms of version 2 of the GNU General *
+ * Public License as published by the Free Software Foundation. *
+ * This program is distributed in the hope that it will be useful. *
+ * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND *
+ * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY, *
+ * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE *
+ * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD *
+ * TO BE LEGALLY INVALID. See the GNU General Public License for *
+ * more details, a copy of which can be found in the file COPYING *
+ * included with this package. *
+ *******************************************************************/
+
+#include <linux/interrupt.h>
+#include <linux/mempool.h>
+#include <linux/pci.h>
+#include <linux/slab.h>
+#include <linux/delay.h>
+#include <linux/list.h>
+
+#include <scsi/scsi.h>
+#include <scsi/scsi_host.h>
+#include <scsi/scsi_transport_fc.h>
+#include <scsi/scsi_bsg_fc.h>
+#include <scsi/fc/fc_fs.h>
+
+#include "lpfc_hw4.h"
+#include "lpfc_hw.h"
+#include "lpfc_sli.h"
+#include "lpfc_sli4.h"
+#include "lpfc_nl.h"
+#include "lpfc_bsg.h"
+#include "lpfc_disc.h"
+#include "lpfc_scsi.h"
+#include "lpfc.h"
+#include "lpfc_logmsg.h"
+#include "lpfc_crtn.h"
+#include "lpfc_debugfs.h"
+#include "lpfc_vport.h"
+#include "lpfc_version.h"
+
+struct lpfc_bsg_event {
+ struct list_head node;
+ struct kref kref;
+ wait_queue_head_t wq;
+
+ /* Event type and waiter identifiers */
+ uint32_t type_mask;
+ uint32_t req_id;
+ uint32_t reg_id;
+
+ /* next two flags are here for the auto-delete logic */
+ unsigned long wait_time_stamp;
+ int waiting;
+
+ /* seen and not seen events */
+ struct list_head events_to_get;
+ struct list_head events_to_see;
+
+ /* driver data associated with the job */
+ void *dd_data;
+};
+
+struct lpfc_bsg_iocb {
+ struct lpfc_iocbq *cmdiocbq;
+ struct lpfc_dmabuf *rmp;
+ struct lpfc_nodelist *ndlp;
+};
+
+struct lpfc_bsg_mbox {
+ LPFC_MBOXQ_t *pmboxq;
+ MAILBOX_t *mb;
+ struct lpfc_dmabuf *dmabuffers; /* for BIU diags */
+ uint8_t *ext; /* extended mailbox data */
+ uint32_t mbOffset; /* from app */
+ uint32_t inExtWLen; /* from app */
+ uint32_t outExtWLen; /* from app */
+};
+
+#define MENLO_DID 0x0000FC0E
+
+struct lpfc_bsg_menlo {
+ struct lpfc_iocbq *cmdiocbq;
+ struct lpfc_dmabuf *rmp;
+};
+
+#define TYPE_EVT 1
+#define TYPE_IOCB 2
+#define TYPE_MBOX 3
+#define TYPE_MENLO 4
+struct bsg_job_data {
+ uint32_t type;
+ struct fc_bsg_job *set_job; /* job waiting for this iocb to finish */
+ union {
+ struct lpfc_bsg_event *evt;
+ struct lpfc_bsg_iocb iocb;
+ struct lpfc_bsg_mbox mbox;
+ struct lpfc_bsg_menlo menlo;
+ } context_un;
+};
+
+struct event_data {
+ struct list_head node;
+ uint32_t type;
+ uint32_t immed_dat;
+ void *data;
+ uint32_t len;
+};
+
+#define BUF_SZ_4K 4096
+#define SLI_CT_ELX_LOOPBACK 0x10
+
+enum ELX_LOOPBACK_CMD {
+ ELX_LOOPBACK_XRI_SETUP,
+ ELX_LOOPBACK_DATA,
+};
+
+#define ELX_LOOPBACK_HEADER_SZ \
+ (size_t)(&((struct lpfc_sli_ct_request *)NULL)->un)
+
+struct lpfc_dmabufext {
+ struct lpfc_dmabuf dma;
+ uint32_t size;
+ uint32_t flag;
+};
+
+static void
+lpfc_free_bsg_buffers(struct lpfc_hba *phba, struct lpfc_dmabuf *mlist)
+{
+ struct lpfc_dmabuf *mlast, *next_mlast;
+
+ if (mlist) {
+ list_for_each_entry_safe(mlast, next_mlast, &mlist->list,
+ list) {
+ lpfc_mbuf_free(phba, mlast->virt, mlast->phys);
+ list_del(&mlast->list);
+ kfree(mlast);
+ }
+ lpfc_mbuf_free(phba, mlist->virt, mlist->phys);
+ kfree(mlist);
+ }
+ return;
+}
+
+static struct lpfc_dmabuf *
+lpfc_alloc_bsg_buffers(struct lpfc_hba *phba, unsigned int size,
+ int outbound_buffers, struct ulp_bde64 *bpl,
+ int *bpl_entries)
+{
+ struct lpfc_dmabuf *mlist = NULL;
+ struct lpfc_dmabuf *mp;
+ unsigned int bytes_left = size;
+
+ /* Verify we can support the size specified */
+ if (!size || (size > (*bpl_entries * LPFC_BPL_SIZE)))
+ return NULL;
+
+ /* Determine the number of dma buffers to allocate */
+ *bpl_entries = (size % LPFC_BPL_SIZE ? size/LPFC_BPL_SIZE + 1 :
+ size/LPFC_BPL_SIZE);
+
+ /* Allocate dma buffer and place in BPL passed */
+ while (bytes_left) {
+ /* Allocate dma buffer */
+ mp = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
+ if (!mp) {
+ if (mlist)
+ lpfc_free_bsg_buffers(phba, mlist);
+ return NULL;
+ }
+
+ INIT_LIST_HEAD(&mp->list);
+ mp->virt = lpfc_mbuf_alloc(phba, MEM_PRI, &(mp->phys));
+
+ if (!mp->virt) {
+ kfree(mp);
+ if (mlist)
+ lpfc_free_bsg_buffers(phba, mlist);
+ return NULL;
+ }
+
+ /* Queue it to a linked list */
+ if (!mlist)
+ mlist = mp;
+ else
+ list_add_tail(&mp->list, &mlist->list);
+
+ /* Add buffer to buffer pointer list */
+ if (outbound_buffers)
+ bpl->tus.f.bdeFlags = BUFF_TYPE_BDE_64;
+ else
+ bpl->tus.f.bdeFlags = BUFF_TYPE_BDE_64I;
+ bpl->addrLow = le32_to_cpu(putPaddrLow(mp->phys));
+ bpl->addrHigh = le32_to_cpu(putPaddrHigh(mp->phys));
+ bpl->tus.f.bdeSize = (uint16_t)
+ (bytes_left >= LPFC_BPL_SIZE ? LPFC_BPL_SIZE :
+ bytes_left);
+ bytes_left -= bpl->tus.f.bdeSize;
+ bpl->tus.w = le32_to_cpu(bpl->tus.w);
+ bpl++;
+ }
+ return mlist;
+}
+
+static unsigned int
+lpfc_bsg_copy_data(struct lpfc_dmabuf *dma_buffers,
+ struct fc_bsg_buffer *bsg_buffers,
+ unsigned int bytes_to_transfer, int to_buffers)
+{
+
+ struct lpfc_dmabuf *mp;
+ unsigned int transfer_bytes, bytes_copied = 0;
+ unsigned int sg_offset, dma_offset;
+ unsigned char *dma_address, *sg_address;
+ LIST_HEAD(temp_list);
+ struct sg_mapping_iter miter;
+ unsigned long flags;
+ unsigned int sg_flags = SG_MITER_ATOMIC;
+ bool sg_valid;
+
+ list_splice_init(&dma_buffers->list, &temp_list);
+ list_add(&dma_buffers->list, &temp_list);
+ sg_offset = 0;
+ if (to_buffers)
+ sg_flags |= SG_MITER_FROM_SG;
+ else
+ sg_flags |= SG_MITER_TO_SG;
+ sg_miter_start(&miter, bsg_buffers->sg_list, bsg_buffers->sg_cnt,
+ sg_flags);
+ local_irq_save(flags);
+ sg_valid = sg_miter_next(&miter);
+ list_for_each_entry(mp, &temp_list, list) {
+ dma_offset = 0;
+ while (bytes_to_transfer && sg_valid &&
+ (dma_offset < LPFC_BPL_SIZE)) {
+ dma_address = mp->virt + dma_offset;
+ if (sg_offset) {
+ /* Continue previous partial transfer of sg */
+ sg_address = miter.addr + sg_offset;
+ transfer_bytes = miter.length - sg_offset;
+ } else {
+ sg_address = miter.addr;
+ transfer_bytes = miter.length;
+ }
+ if (bytes_to_transfer < transfer_bytes)
+ transfer_bytes = bytes_to_transfer;
+ if (transfer_bytes > (LPFC_BPL_SIZE - dma_offset))
+ transfer_bytes = LPFC_BPL_SIZE - dma_offset;
+ if (to_buffers)
+ memcpy(dma_address, sg_address, transfer_bytes);
+ else
+ memcpy(sg_address, dma_address, transfer_bytes);
+ dma_offset += transfer_bytes;
+ sg_offset += transfer_bytes;
+ bytes_to_transfer -= transfer_bytes;
+ bytes_copied += transfer_bytes;
+ if (sg_offset >= miter.length) {
+ sg_offset = 0;
+ sg_valid = sg_miter_next(&miter);
+ }
+ }
+ }
+ sg_miter_stop(&miter);
+ local_irq_restore(flags);
+ list_del_init(&dma_buffers->list);
+ list_splice(&temp_list, &dma_buffers->list);
+ return bytes_copied;
+}
+
+/**
+ * lpfc_bsg_send_mgmt_cmd_cmp - lpfc_bsg_send_mgmt_cmd's completion handler
+ * @phba: Pointer to HBA context object.
+ * @cmdiocbq: Pointer to command iocb.
+ * @rspiocbq: Pointer to response iocb.
+ *
+ * This function is the completion handler for iocbs issued using
+ * lpfc_bsg_send_mgmt_cmd function. This function is called by the
+ * ring event handler function without any lock held. This function
+ * can be called from both worker thread context and interrupt
+ * context. This function also can be called from another thread which
+ * cleans up the SLI layer objects.
+ * This function copies the contents of the response iocb to the
+ * response iocb memory object provided by the caller of
+ * lpfc_sli_issue_iocb_wait and then wakes up the thread which
+ * sleeps for the iocb completion.
+ **/
+static void
+lpfc_bsg_send_mgmt_cmd_cmp(struct lpfc_hba *phba,
+ struct lpfc_iocbq *cmdiocbq,
+ struct lpfc_iocbq *rspiocbq)
+{
+ struct bsg_job_data *dd_data;
+ struct fc_bsg_job *job;
+ IOCB_t *rsp;
+ struct lpfc_dmabuf *bmp, *cmp, *rmp;
+ struct lpfc_nodelist *ndlp;
+ struct lpfc_bsg_iocb *iocb;
+ unsigned long flags;
+ unsigned int rsp_size;
+ int rc = 0;
+
+ dd_data = cmdiocbq->context1;
+
+ /* Determine if job has been aborted */
+ spin_lock_irqsave(&phba->ct_ev_lock, flags);
+ job = dd_data->set_job;
+ if (job) {
+ /* Prevent timeout handling from trying to abort job */
+ job->dd_data = NULL;
+ }
+ spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
+
+ /* Close the timeout handler abort window */
+ spin_lock_irqsave(&phba->hbalock, flags);
+ cmdiocbq->iocb_flag &= ~LPFC_IO_CMD_OUTSTANDING;
+ spin_unlock_irqrestore(&phba->hbalock, flags);
+
+ iocb = &dd_data->context_un.iocb;
+ ndlp = iocb->ndlp;
+ rmp = iocb->rmp;
+ cmp = cmdiocbq->context2;
+ bmp = cmdiocbq->context3;
+ rsp = &rspiocbq->iocb;
+
+ /* Copy the completed data or set the error status */
+
+ if (job) {
+ if (rsp->ulpStatus) {
+ if (rsp->ulpStatus == IOSTAT_LOCAL_REJECT) {
+ switch (rsp->un.ulpWord[4] & IOERR_PARAM_MASK) {
+ case IOERR_SEQUENCE_TIMEOUT:
+ rc = -ETIMEDOUT;
+ break;
+ case IOERR_INVALID_RPI:
+ rc = -EFAULT;
+ break;
+ default:
+ rc = -EACCES;
+ break;
+ }
+ } else {
+ rc = -EACCES;
+ }
+ } else {
+ rsp_size = rsp->un.genreq64.bdl.bdeSize;
+ job->reply->reply_payload_rcv_len =
+ lpfc_bsg_copy_data(rmp, &job->reply_payload,
+ rsp_size, 0);
+ }
+ }
+
+ lpfc_free_bsg_buffers(phba, cmp);
+ lpfc_free_bsg_buffers(phba, rmp);
+ lpfc_mbuf_free(phba, bmp->virt, bmp->phys);
+ kfree(bmp);
+ lpfc_sli_release_iocbq(phba, cmdiocbq);
+ lpfc_nlp_put(ndlp);
+ kfree(dd_data);
+
+ /* Complete the job if the job is still active */
+
+ if (job) {
+ job->reply->result = rc;
+ job->job_done(job);
+ }
+ return;
+}
+
+/**
+ * lpfc_bsg_send_mgmt_cmd - send a CT command from a bsg request
+ * @job: fc_bsg_job to handle
+ **/
+static int
+lpfc_bsg_send_mgmt_cmd(struct fc_bsg_job *job)
+{
+ struct lpfc_vport *vport = (struct lpfc_vport *)job->shost->hostdata;
+ struct lpfc_hba *phba = vport->phba;
+ struct lpfc_rport_data *rdata = job->rport->dd_data;
+ struct lpfc_nodelist *ndlp = rdata->pnode;
+ struct ulp_bde64 *bpl = NULL;
+ uint32_t timeout;
+ struct lpfc_iocbq *cmdiocbq = NULL;
+ IOCB_t *cmd;
+ struct lpfc_dmabuf *bmp = NULL, *cmp = NULL, *rmp = NULL;
+ int request_nseg;
+ int reply_nseg;
+ struct bsg_job_data *dd_data;
+ unsigned long flags;
+ uint32_t creg_val;
+ int rc = 0;
+ int iocb_stat;
+
+ /* in case no data is transferred */
+ job->reply->reply_payload_rcv_len = 0;
+
+ /* allocate our bsg tracking structure */
+ dd_data = kmalloc(sizeof(struct bsg_job_data), GFP_KERNEL);
+ if (!dd_data) {
+ lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
+ "2733 Failed allocation of dd_data\n");
+ rc = -ENOMEM;
+ goto no_dd_data;
+ }
+
+ if (!lpfc_nlp_get(ndlp)) {
+ rc = -ENODEV;
+ goto no_ndlp;
+ }
+
+ if (ndlp->nlp_flag & NLP_ELS_SND_MASK) {
+ rc = -ENODEV;
+ goto free_ndlp;
+ }
+
+ cmdiocbq = lpfc_sli_get_iocbq(phba);
+ if (!cmdiocbq) {
+ rc = -ENOMEM;
+ goto free_ndlp;
+ }
+
+ cmd = &cmdiocbq->iocb;
+
+ bmp = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
+ if (!bmp) {
+ rc = -ENOMEM;
+ goto free_cmdiocbq;
+ }
+ bmp->virt = lpfc_mbuf_alloc(phba, 0, &bmp->phys);
+ if (!bmp->virt) {
+ rc = -ENOMEM;
+ goto free_bmp;
+ }
+
+ INIT_LIST_HEAD(&bmp->list);
+
+ bpl = (struct ulp_bde64 *) bmp->virt;
+ request_nseg = LPFC_BPL_SIZE/sizeof(struct ulp_bde64);
+ cmp = lpfc_alloc_bsg_buffers(phba, job->request_payload.payload_len,
+ 1, bpl, &request_nseg);
+ if (!cmp) {
+ rc = -ENOMEM;
+ goto free_bmp;
+ }
+ lpfc_bsg_copy_data(cmp, &job->request_payload,
+ job->request_payload.payload_len, 1);
+
+ bpl += request_nseg;
+ reply_nseg = LPFC_BPL_SIZE/sizeof(struct ulp_bde64) - request_nseg;
+ rmp = lpfc_alloc_bsg_buffers(phba, job->reply_payload.payload_len, 0,
+ bpl, &reply_nseg);
+ if (!rmp) {
+ rc = -ENOMEM;
+ goto free_cmp;
+ }
+
+ cmd->un.genreq64.bdl.ulpIoTag32 = 0;
+ cmd->un.genreq64.bdl.addrHigh = putPaddrHigh(bmp->phys);
+ cmd->un.genreq64.bdl.addrLow = putPaddrLow(bmp->phys);
+ cmd->un.genreq64.bdl.bdeFlags = BUFF_TYPE_BLP_64;
+ cmd->un.genreq64.bdl.bdeSize =
+ (request_nseg + reply_nseg) * sizeof(struct ulp_bde64);
+ cmd->ulpCommand = CMD_GEN_REQUEST64_CR;
+ cmd->un.genreq64.w5.hcsw.Fctl = (SI | LA);
+ cmd->un.genreq64.w5.hcsw.Dfctl = 0;
+ cmd->un.genreq64.w5.hcsw.Rctl = FC_RCTL_DD_UNSOL_CTL;
+ cmd->un.genreq64.w5.hcsw.Type = FC_TYPE_CT;
+ cmd->ulpBdeCount = 1;
+ cmd->ulpLe = 1;
+ cmd->ulpClass = CLASS3;
+ cmd->ulpContext = ndlp->nlp_rpi;
+ if (phba->sli_rev == LPFC_SLI_REV4)
+ cmd->ulpContext = phba->sli4_hba.rpi_ids[ndlp->nlp_rpi];
+ cmd->ulpOwner = OWN_CHIP;
+ cmdiocbq->vport = phba->pport;
+ cmdiocbq->context3 = bmp;
+ cmdiocbq->iocb_flag |= LPFC_IO_LIBDFC;
+ timeout = phba->fc_ratov * 2;
+ cmd->ulpTimeout = timeout;
+
+ cmdiocbq->iocb_cmpl = lpfc_bsg_send_mgmt_cmd_cmp;
+ cmdiocbq->context1 = dd_data;
+ cmdiocbq->context2 = cmp;
+ cmdiocbq->context3 = bmp;
+ cmdiocbq->context_un.ndlp = ndlp;
+ dd_data->type = TYPE_IOCB;
+ dd_data->set_job = job;
+ dd_data->context_un.iocb.cmdiocbq = cmdiocbq;
+ dd_data->context_un.iocb.ndlp = ndlp;
+ dd_data->context_un.iocb.rmp = rmp;
+ job->dd_data = dd_data;
+
+ if (phba->cfg_poll & DISABLE_FCP_RING_INT) {
+ if (lpfc_readl(phba->HCregaddr, &creg_val)) {
+ rc = -EIO ;
+ goto free_rmp;
+ }
+ creg_val |= (HC_R0INT_ENA << LPFC_FCP_RING);
+ writel(creg_val, phba->HCregaddr);
+ readl(phba->HCregaddr); /* flush */
+ }
+
+ iocb_stat = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, cmdiocbq, 0);
+
+ if (iocb_stat == IOCB_SUCCESS) {
+ spin_lock_irqsave(&phba->hbalock, flags);
+ /* make sure the I/O had not been completed yet */
+ if (cmdiocbq->iocb_flag & LPFC_IO_LIBDFC) {
+ /* open up abort window to timeout handler */
+ cmdiocbq->iocb_flag |= LPFC_IO_CMD_OUTSTANDING;
+ }
+ spin_unlock_irqrestore(&phba->hbalock, flags);
+ return 0; /* done for now */
+ } else if (iocb_stat == IOCB_BUSY) {
+ rc = -EAGAIN;
+ } else {
+ rc = -EIO;
+ }
+
+ /* iocb failed so cleanup */
+ job->dd_data = NULL;
+
+free_rmp:
+ lpfc_free_bsg_buffers(phba, rmp);
+free_cmp:
+ lpfc_free_bsg_buffers(phba, cmp);
+free_bmp:
+ if (bmp->virt)
+ lpfc_mbuf_free(phba, bmp->virt, bmp->phys);
+ kfree(bmp);
+free_cmdiocbq:
+ lpfc_sli_release_iocbq(phba, cmdiocbq);
+free_ndlp:
+ lpfc_nlp_put(ndlp);
+no_ndlp:
+ kfree(dd_data);
+no_dd_data:
+ /* make error code available to userspace */
+ job->reply->result = rc;
+ job->dd_data = NULL;
+ return rc;
+}
+
+/**
+ * lpfc_bsg_rport_els_cmp - lpfc_bsg_rport_els's completion handler
+ * @phba: Pointer to HBA context object.
+ * @cmdiocbq: Pointer to command iocb.
+ * @rspiocbq: Pointer to response iocb.
+ *
+ * This function is the completion handler for iocbs issued using
+ * lpfc_bsg_rport_els_cmp function. This function is called by the
+ * ring event handler function without any lock held. This function
+ * can be called from both worker thread context and interrupt
+ * context. This function also can be called from other thread which
+ * cleans up the SLI layer objects.
+ * This function copies the contents of the response iocb to the
+ * response iocb memory object provided by the caller of
+ * lpfc_sli_issue_iocb_wait and then wakes up the thread which
+ * sleeps for the iocb completion.
+ **/
+static void
+lpfc_bsg_rport_els_cmp(struct lpfc_hba *phba,
+ struct lpfc_iocbq *cmdiocbq,
+ struct lpfc_iocbq *rspiocbq)
+{
+ struct bsg_job_data *dd_data;
+ struct fc_bsg_job *job;
+ IOCB_t *rsp;
+ struct lpfc_nodelist *ndlp;
+ struct lpfc_dmabuf *pcmd = NULL, *prsp = NULL;
+ struct fc_bsg_ctels_reply *els_reply;
+ uint8_t *rjt_data;
+ unsigned long flags;
+ unsigned int rsp_size;
+ int rc = 0;
+
+ dd_data = cmdiocbq->context1;
+ ndlp = dd_data->context_un.iocb.ndlp;
+ cmdiocbq->context1 = ndlp;
+
+ /* Determine if job has been aborted */
+ spin_lock_irqsave(&phba->ct_ev_lock, flags);
+ job = dd_data->set_job;
+ if (job) {
+ /* Prevent timeout handling from trying to abort job */
+ job->dd_data = NULL;
+ }
+ spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
+
+ /* Close the timeout handler abort window */
+ spin_lock_irqsave(&phba->hbalock, flags);
+ cmdiocbq->iocb_flag &= ~LPFC_IO_CMD_OUTSTANDING;
+ spin_unlock_irqrestore(&phba->hbalock, flags);
+
+ rsp = &rspiocbq->iocb;
+ pcmd = (struct lpfc_dmabuf *)cmdiocbq->context2;
+ prsp = (struct lpfc_dmabuf *)pcmd->list.next;
+
+ /* Copy the completed job data or determine the job status if job is
+ * still active
+ */
+
+ if (job) {
+ if (rsp->ulpStatus == IOSTAT_SUCCESS) {
+ rsp_size = rsp->un.elsreq64.bdl.bdeSize;
+ job->reply->reply_payload_rcv_len =
+ sg_copy_from_buffer(job->reply_payload.sg_list,
+ job->reply_payload.sg_cnt,
+ prsp->virt,
+ rsp_size);
+ } else if (rsp->ulpStatus == IOSTAT_LS_RJT) {
+ job->reply->reply_payload_rcv_len =
+ sizeof(struct fc_bsg_ctels_reply);
+ /* LS_RJT data returned in word 4 */
+ rjt_data = (uint8_t *)&rsp->un.ulpWord[4];
+ els_reply = &job->reply->reply_data.ctels_reply;
+ els_reply->status = FC_CTELS_STATUS_REJECT;
+ els_reply->rjt_data.action = rjt_data[3];
+ els_reply->rjt_data.reason_code = rjt_data[2];
+ els_reply->rjt_data.reason_explanation = rjt_data[1];
+ els_reply->rjt_data.vendor_unique = rjt_data[0];
+ } else {
+ rc = -EIO;
+ }
+ }
+
+ lpfc_nlp_put(ndlp);
+ lpfc_els_free_iocb(phba, cmdiocbq);
+ kfree(dd_data);
+
+ /* Complete the job if the job is still active */
+
+ if (job) {
+ job->reply->result = rc;
+ job->job_done(job);
+ }
+ return;
+}
+
+/**
+ * lpfc_bsg_rport_els - send an ELS command from a bsg request
+ * @job: fc_bsg_job to handle
+ **/
+static int
+lpfc_bsg_rport_els(struct fc_bsg_job *job)
+{
+ struct lpfc_vport *vport = (struct lpfc_vport *)job->shost->hostdata;
+ struct lpfc_hba *phba = vport->phba;
+ struct lpfc_rport_data *rdata = job->rport->dd_data;
+ struct lpfc_nodelist *ndlp = rdata->pnode;
+ uint32_t elscmd;
+ uint32_t cmdsize;
+ struct lpfc_iocbq *cmdiocbq;
+ uint16_t rpi = 0;
+ struct bsg_job_data *dd_data;
+ unsigned long flags;
+ uint32_t creg_val;
+ int rc = 0;
+
+ /* in case no data is transferred */
+ job->reply->reply_payload_rcv_len = 0;
+
+ /* verify the els command is not greater than the
+ * maximum ELS transfer size.
+ */
+
+ if (job->request_payload.payload_len > FCELSSIZE) {
+ rc = -EINVAL;
+ goto no_dd_data;
+ }
+
+ /* allocate our bsg tracking structure */
+ dd_data = kmalloc(sizeof(struct bsg_job_data), GFP_KERNEL);
+ if (!dd_data) {
+ lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
+ "2735 Failed allocation of dd_data\n");
+ rc = -ENOMEM;
+ goto no_dd_data;
+ }
+
+ elscmd = job->request->rqst_data.r_els.els_code;
+ cmdsize = job->request_payload.payload_len;
+
+ if (!lpfc_nlp_get(ndlp)) {
+ rc = -ENODEV;
+ goto free_dd_data;
+ }
+
+ /* We will use the allocated dma buffers by prep els iocb for command
+ * and response to ensure if the job times out and the request is freed,
+ * we won't be dma into memory that is no longer allocated to for the
+ * request.
+ */
+
+ cmdiocbq = lpfc_prep_els_iocb(vport, 1, cmdsize, 0, ndlp,
+ ndlp->nlp_DID, elscmd);
+ if (!cmdiocbq) {
+ rc = -EIO;
+ goto release_ndlp;
+ }
+
+ rpi = ndlp->nlp_rpi;
+
+ /* Transfer the request payload to allocated command dma buffer */
+
+ sg_copy_to_buffer(job->request_payload.sg_list,
+ job->request_payload.sg_cnt,
+ ((struct lpfc_dmabuf *)cmdiocbq->context2)->virt,
+ cmdsize);
+
+ if (phba->sli_rev == LPFC_SLI_REV4)
+ cmdiocbq->iocb.ulpContext = phba->sli4_hba.rpi_ids[rpi];
+ else
+ cmdiocbq->iocb.ulpContext = rpi;
+ cmdiocbq->iocb_flag |= LPFC_IO_LIBDFC;
+ cmdiocbq->context1 = dd_data;
+ cmdiocbq->context_un.ndlp = ndlp;
+ cmdiocbq->iocb_cmpl = lpfc_bsg_rport_els_cmp;
+ dd_data->type = TYPE_IOCB;
+ dd_data->set_job = job;
+ dd_data->context_un.iocb.cmdiocbq = cmdiocbq;
+ dd_data->context_un.iocb.ndlp = ndlp;
+ dd_data->context_un.iocb.rmp = NULL;
+ job->dd_data = dd_data;
+
+ if (phba->cfg_poll & DISABLE_FCP_RING_INT) {
+ if (lpfc_readl(phba->HCregaddr, &creg_val)) {
+ rc = -EIO;
+ goto linkdown_err;
+ }
+ creg_val |= (HC_R0INT_ENA << LPFC_FCP_RING);
+ writel(creg_val, phba->HCregaddr);
+ readl(phba->HCregaddr); /* flush */
+ }
+
+ rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, cmdiocbq, 0);
+
+ if (rc == IOCB_SUCCESS) {
+ spin_lock_irqsave(&phba->hbalock, flags);
+ /* make sure the I/O had not been completed/released */
+ if (cmdiocbq->iocb_flag & LPFC_IO_LIBDFC) {
+ /* open up abort window to timeout handler */
+ cmdiocbq->iocb_flag |= LPFC_IO_CMD_OUTSTANDING;
+ }
+ spin_unlock_irqrestore(&phba->hbalock, flags);
+ return 0; /* done for now */
+ } else if (rc == IOCB_BUSY) {
+ rc = -EAGAIN;
+ } else {
+ rc = -EIO;
+ }
+
+ /* iocb failed so cleanup */
+ job->dd_data = NULL;
+
+linkdown_err:
+ cmdiocbq->context1 = ndlp;
+ lpfc_els_free_iocb(phba, cmdiocbq);
+
+release_ndlp:
+ lpfc_nlp_put(ndlp);
+
+free_dd_data:
+ kfree(dd_data);
+
+no_dd_data:
+ /* make error code available to userspace */
+ job->reply->result = rc;
+ job->dd_data = NULL;
+ return rc;
+}
+
+/**
+ * lpfc_bsg_event_free - frees an allocated event structure
+ * @kref: Pointer to a kref.
+ *
+ * Called from kref_put. Back cast the kref into an event structure address.
+ * Free any events to get, delete associated nodes, free any events to see,
+ * free any data then free the event itself.
+ **/
+static void
+lpfc_bsg_event_free(struct kref *kref)
+{
+ struct lpfc_bsg_event *evt = container_of(kref, struct lpfc_bsg_event,
+ kref);
+ struct event_data *ed;
+
+ list_del(&evt->node);
+
+ while (!list_empty(&evt->events_to_get)) {
+ ed = list_entry(evt->events_to_get.next, typeof(*ed), node);
+ list_del(&ed->node);
+ kfree(ed->data);
+ kfree(ed);
+ }
+
+ while (!list_empty(&evt->events_to_see)) {
+ ed = list_entry(evt->events_to_see.next, typeof(*ed), node);
+ list_del(&ed->node);
+ kfree(ed->data);
+ kfree(ed);
+ }
+
+ kfree(evt->dd_data);
+ kfree(evt);
+}
+
+/**
+ * lpfc_bsg_event_ref - increments the kref for an event
+ * @evt: Pointer to an event structure.
+ **/
+static inline void
+lpfc_bsg_event_ref(struct lpfc_bsg_event *evt)
+{
+ kref_get(&evt->kref);
+}
+
+/**
+ * lpfc_bsg_event_unref - Uses kref_put to free an event structure
+ * @evt: Pointer to an event structure.
+ **/
+static inline void
+lpfc_bsg_event_unref(struct lpfc_bsg_event *evt)
+{
+ kref_put(&evt->kref, lpfc_bsg_event_free);
+}
+
+/**
+ * lpfc_bsg_event_new - allocate and initialize a event structure
+ * @ev_mask: Mask of events.
+ * @ev_reg_id: Event reg id.
+ * @ev_req_id: Event request id.
+ **/
+static struct lpfc_bsg_event *
+lpfc_bsg_event_new(uint32_t ev_mask, int ev_reg_id, uint32_t ev_req_id)
+{
+ struct lpfc_bsg_event *evt = kzalloc(sizeof(*evt), GFP_KERNEL);
+
+ if (!evt)
+ return NULL;
+
+ INIT_LIST_HEAD(&evt->events_to_get);
+ INIT_LIST_HEAD(&evt->events_to_see);
+ evt->type_mask = ev_mask;
+ evt->req_id = ev_req_id;
+ evt->reg_id = ev_reg_id;
+ evt->wait_time_stamp = jiffies;
+ evt->dd_data = NULL;
+ init_waitqueue_head(&evt->wq);
+ kref_init(&evt->kref);
+ return evt;
+}
+
+/**
+ * diag_cmd_data_free - Frees an lpfc dma buffer extension
+ * @phba: Pointer to HBA context object.
+ * @mlist: Pointer to an lpfc dma buffer extension.
+ **/
+static int
+diag_cmd_data_free(struct lpfc_hba *phba, struct lpfc_dmabufext *mlist)
+{
+ struct lpfc_dmabufext *mlast;
+ struct pci_dev *pcidev;
+ struct list_head head, *curr, *next;
+
+ if ((!mlist) || (!lpfc_is_link_up(phba) &&
+ (phba->link_flag & LS_LOOPBACK_MODE))) {
+ return 0;
+ }
+
+ pcidev = phba->pcidev;
+ list_add_tail(&head, &mlist->dma.list);
+
+ list_for_each_safe(curr, next, &head) {
+ mlast = list_entry(curr, struct lpfc_dmabufext , dma.list);
+ if (mlast->dma.virt)
+ dma_free_coherent(&pcidev->dev,
+ mlast->size,
+ mlast->dma.virt,
+ mlast->dma.phys);
+ kfree(mlast);
+ }
+ return 0;
+}
+
+/**
+ * lpfc_bsg_ct_unsol_event - process an unsolicited CT command
+ * @phba:
+ * @pring:
+ * @piocbq:
+ *
+ * This function is called when an unsolicited CT command is received. It
+ * forwards the event to any processes registered to receive CT events.
+ **/
+int
+lpfc_bsg_ct_unsol_event(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
+ struct lpfc_iocbq *piocbq)
+{
+ uint32_t evt_req_id = 0;
+ uint32_t cmd;
+ uint32_t len;
+ struct lpfc_dmabuf *dmabuf = NULL;
+ struct lpfc_bsg_event *evt;
+ struct event_data *evt_dat = NULL;
+ struct lpfc_iocbq *iocbq;
+ size_t offset = 0;
+ struct list_head head;
+ struct ulp_bde64 *bde;
+ dma_addr_t dma_addr;
+ int i;
+ struct lpfc_dmabuf *bdeBuf1 = piocbq->context2;
+ struct lpfc_dmabuf *bdeBuf2 = piocbq->context3;
+ struct lpfc_hbq_entry *hbqe;
+ struct lpfc_sli_ct_request *ct_req;
+ struct fc_bsg_job *job = NULL;
+ struct bsg_job_data *dd_data = NULL;
+ unsigned long flags;
+ int size = 0;
+
+ INIT_LIST_HEAD(&head);
+ list_add_tail(&head, &piocbq->list);
+
+ if (piocbq->iocb.ulpBdeCount == 0 ||
+ piocbq->iocb.un.cont64[0].tus.f.bdeSize == 0)
+ goto error_ct_unsol_exit;
+
+ if (phba->link_state == LPFC_HBA_ERROR ||
+ (!(phba->sli.sli_flag & LPFC_SLI_ACTIVE)))
+ goto error_ct_unsol_exit;
+
+ if (phba->sli3_options & LPFC_SLI3_HBQ_ENABLED)
+ dmabuf = bdeBuf1;
+ else {
+ dma_addr = getPaddr(piocbq->iocb.un.cont64[0].addrHigh,
+ piocbq->iocb.un.cont64[0].addrLow);
+ dmabuf = lpfc_sli_ringpostbuf_get(phba, pring, dma_addr);
+ }
+ if (dmabuf == NULL)
+ goto error_ct_unsol_exit;
+ ct_req = (struct lpfc_sli_ct_request *)dmabuf->virt;
+ evt_req_id = ct_req->FsType;
+ cmd = ct_req->CommandResponse.bits.CmdRsp;
+ len = ct_req->CommandResponse.bits.Size;
+ if (!(phba->sli3_options & LPFC_SLI3_HBQ_ENABLED))
+ lpfc_sli_ringpostbuf_put(phba, pring, dmabuf);
+
+ spin_lock_irqsave(&phba->ct_ev_lock, flags);
+ list_for_each_entry(evt, &phba->ct_ev_waiters, node) {
+ if (!(evt->type_mask & FC_REG_CT_EVENT) ||
+ evt->req_id != evt_req_id)
+ continue;
+
+ lpfc_bsg_event_ref(evt);
+ spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
+ evt_dat = kzalloc(sizeof(*evt_dat), GFP_KERNEL);
+ if (evt_dat == NULL) {
+ spin_lock_irqsave(&phba->ct_ev_lock, flags);
+ lpfc_bsg_event_unref(evt);
+ lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
+ "2614 Memory allocation failed for "
+ "CT event\n");
+ break;
+ }
+
+ if (phba->sli3_options & LPFC_SLI3_HBQ_ENABLED) {
+ /* take accumulated byte count from the last iocbq */
+ iocbq = list_entry(head.prev, typeof(*iocbq), list);
+ evt_dat->len = iocbq->iocb.unsli3.rcvsli3.acc_len;
+ } else {
+ list_for_each_entry(iocbq, &head, list) {
+ for (i = 0; i < iocbq->iocb.ulpBdeCount; i++)
+ evt_dat->len +=
+ iocbq->iocb.un.cont64[i].tus.f.bdeSize;
+ }
+ }
+
+ evt_dat->data = kzalloc(evt_dat->len, GFP_KERNEL);
+ if (evt_dat->data == NULL) {
+ lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
+ "2615 Memory allocation failed for "
+ "CT event data, size %d\n",
+ evt_dat->len);
+ kfree(evt_dat);
+ spin_lock_irqsave(&phba->ct_ev_lock, flags);
+ lpfc_bsg_event_unref(evt);
+ spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
+ goto error_ct_unsol_exit;
+ }
+
+ list_for_each_entry(iocbq, &head, list) {
+ size = 0;
+ if (phba->sli3_options & LPFC_SLI3_HBQ_ENABLED) {
+ bdeBuf1 = iocbq->context2;
+ bdeBuf2 = iocbq->context3;
+ }
+ for (i = 0; i < iocbq->iocb.ulpBdeCount; i++) {
+ if (phba->sli3_options &
+ LPFC_SLI3_HBQ_ENABLED) {
+ if (i == 0) {
+ hbqe = (struct lpfc_hbq_entry *)
+ &iocbq->iocb.un.ulpWord[0];
+ size = hbqe->bde.tus.f.bdeSize;
+ dmabuf = bdeBuf1;
+ } else if (i == 1) {
+ hbqe = (struct lpfc_hbq_entry *)
+ &iocbq->iocb.unsli3.
+ sli3Words[4];
+ size = hbqe->bde.tus.f.bdeSize;
+ dmabuf = bdeBuf2;
+ }
+ if ((offset + size) > evt_dat->len)
+ size = evt_dat->len - offset;
+ } else {
+ size = iocbq->iocb.un.cont64[i].
+ tus.f.bdeSize;
+ bde = &iocbq->iocb.un.cont64[i];
+ dma_addr = getPaddr(bde->addrHigh,
+ bde->addrLow);
+ dmabuf = lpfc_sli_ringpostbuf_get(phba,
+ pring, dma_addr);
+ }
+ if (!dmabuf) {
+ lpfc_printf_log(phba, KERN_ERR,
+ LOG_LIBDFC, "2616 No dmabuf "
+ "found for iocbq 0x%p\n",
+ iocbq);
+ kfree(evt_dat->data);
+ kfree(evt_dat);
+ spin_lock_irqsave(&phba->ct_ev_lock,
+ flags);
+ lpfc_bsg_event_unref(evt);
+ spin_unlock_irqrestore(
+ &phba->ct_ev_lock, flags);
+ goto error_ct_unsol_exit;
+ }
+ memcpy((char *)(evt_dat->data) + offset,
+ dmabuf->virt, size);
+ offset += size;
+ if (evt_req_id != SLI_CT_ELX_LOOPBACK &&
+ !(phba->sli3_options &
+ LPFC_SLI3_HBQ_ENABLED)) {
+ lpfc_sli_ringpostbuf_put(phba, pring,
+ dmabuf);
+ } else {
+ switch (cmd) {
+ case ELX_LOOPBACK_DATA:
+ if (phba->sli_rev <
+ LPFC_SLI_REV4)
+ diag_cmd_data_free(phba,
+ (struct lpfc_dmabufext
+ *)dmabuf);
+ break;
+ case ELX_LOOPBACK_XRI_SETUP:
+ if ((phba->sli_rev ==
+ LPFC_SLI_REV2) ||
+ (phba->sli3_options &
+ LPFC_SLI3_HBQ_ENABLED
+ )) {
+ lpfc_in_buf_free(phba,
+ dmabuf);
+ } else {
+ lpfc_post_buffer(phba,
+ pring,
+ 1);
+ }
+ break;
+ default:
+ if (!(phba->sli3_options &
+ LPFC_SLI3_HBQ_ENABLED))
+ lpfc_post_buffer(phba,
+ pring,
+ 1);
+ break;
+ }
+ }
+ }
+ }
+
+ spin_lock_irqsave(&phba->ct_ev_lock, flags);
+ if (phba->sli_rev == LPFC_SLI_REV4) {
+ evt_dat->immed_dat = phba->ctx_idx;
+ phba->ctx_idx = (phba->ctx_idx + 1) % LPFC_CT_CTX_MAX;
+ /* Provide warning for over-run of the ct_ctx array */
+ if (phba->ct_ctx[evt_dat->immed_dat].valid ==
+ UNSOL_VALID)
+ lpfc_printf_log(phba, KERN_WARNING, LOG_ELS,
+ "2717 CT context array entry "
+ "[%d] over-run: oxid:x%x, "
+ "sid:x%x\n", phba->ctx_idx,
+ phba->ct_ctx[
+ evt_dat->immed_dat].oxid,
+ phba->ct_ctx[
+ evt_dat->immed_dat].SID);
+ phba->ct_ctx[evt_dat->immed_dat].rxid =
+ piocbq->iocb.ulpContext;
+ phba->ct_ctx[evt_dat->immed_dat].oxid =
+ piocbq->iocb.unsli3.rcvsli3.ox_id;
+ phba->ct_ctx[evt_dat->immed_dat].SID =
+ piocbq->iocb.un.rcvels.remoteID;
+ phba->ct_ctx[evt_dat->immed_dat].valid = UNSOL_VALID;
+ } else
+ evt_dat->immed_dat = piocbq->iocb.ulpContext;
+
+ evt_dat->type = FC_REG_CT_EVENT;
+ list_add(&evt_dat->node, &evt->events_to_see);
+ if (evt_req_id == SLI_CT_ELX_LOOPBACK) {
+ wake_up_interruptible(&evt->wq);
+ lpfc_bsg_event_unref(evt);
+ break;
+ }
+
+ list_move(evt->events_to_see.prev, &evt->events_to_get);
+
+ dd_data = (struct bsg_job_data *)evt->dd_data;
+ job = dd_data->set_job;
+ dd_data->set_job = NULL;
+ lpfc_bsg_event_unref(evt);
+ if (job) {
+ job->reply->reply_payload_rcv_len = size;
+ /* make error code available to userspace */
+ job->reply->result = 0;
+ job->dd_data = NULL;
+ /* complete the job back to userspace */
+ spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
+ job->job_done(job);
+ spin_lock_irqsave(&phba->ct_ev_lock, flags);
+ }
+ }
+ spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
+
+error_ct_unsol_exit:
+ if (!list_empty(&head))
+ list_del(&head);
+ if ((phba->sli_rev < LPFC_SLI_REV4) &&
+ (evt_req_id == SLI_CT_ELX_LOOPBACK))
+ return 0;
+ return 1;
+}
+
+/**
+ * lpfc_bsg_ct_unsol_abort - handler ct abort to management plane
+ * @phba: Pointer to HBA context object.
+ * @dmabuf: pointer to a dmabuf that describes the FC sequence
+ *
+ * This function handles abort to the CT command toward management plane
+ * for SLI4 port.
+ *
+ * If the pending context of a CT command to management plane present, clears
+ * such context and returns 1 for handled; otherwise, it returns 0 indicating
+ * no context exists.
+ **/
+int
+lpfc_bsg_ct_unsol_abort(struct lpfc_hba *phba, struct hbq_dmabuf *dmabuf)
+{
+ struct fc_frame_header fc_hdr;
+ struct fc_frame_header *fc_hdr_ptr = &fc_hdr;
+ int ctx_idx, handled = 0;
+ uint16_t oxid, rxid;
+ uint32_t sid;
+
+ memcpy(fc_hdr_ptr, dmabuf->hbuf.virt, sizeof(struct fc_frame_header));
+ sid = sli4_sid_from_fc_hdr(fc_hdr_ptr);
+ oxid = be16_to_cpu(fc_hdr_ptr->fh_ox_id);
+ rxid = be16_to_cpu(fc_hdr_ptr->fh_rx_id);
+
+ for (ctx_idx = 0; ctx_idx < LPFC_CT_CTX_MAX; ctx_idx++) {
+ if (phba->ct_ctx[ctx_idx].valid != UNSOL_VALID)
+ continue;
+ if (phba->ct_ctx[ctx_idx].rxid != rxid)
+ continue;
+ if (phba->ct_ctx[ctx_idx].oxid != oxid)
+ continue;
+ if (phba->ct_ctx[ctx_idx].SID != sid)
+ continue;
+ phba->ct_ctx[ctx_idx].valid = UNSOL_INVALID;
+ handled = 1;
+ }
+ return handled;
+}
+
+/**
+ * lpfc_bsg_hba_set_event - process a SET_EVENT bsg vendor command
+ * @job: SET_EVENT fc_bsg_job
+ **/
+static int
+lpfc_bsg_hba_set_event(struct fc_bsg_job *job)
+{
+ struct lpfc_vport *vport = (struct lpfc_vport *)job->shost->hostdata;
+ struct lpfc_hba *phba = vport->phba;
+ struct set_ct_event *event_req;
+ struct lpfc_bsg_event *evt;
+ int rc = 0;
+ struct bsg_job_data *dd_data = NULL;
+ uint32_t ev_mask;
+ unsigned long flags;
+
+ if (job->request_len <
+ sizeof(struct fc_bsg_request) + sizeof(struct set_ct_event)) {
+ lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
+ "2612 Received SET_CT_EVENT below minimum "
+ "size\n");
+ rc = -EINVAL;
+ goto job_error;
+ }
+
+ event_req = (struct set_ct_event *)
+ job->request->rqst_data.h_vendor.vendor_cmd;
+ ev_mask = ((uint32_t)(unsigned long)event_req->type_mask &
+ FC_REG_EVENT_MASK);
+ spin_lock_irqsave(&phba->ct_ev_lock, flags);
+ list_for_each_entry(evt, &phba->ct_ev_waiters, node) {
+ if (evt->reg_id == event_req->ev_reg_id) {
+ lpfc_bsg_event_ref(evt);
+ evt->wait_time_stamp = jiffies;
+ dd_data = (struct bsg_job_data *)evt->dd_data;
+ break;
+ }
+ }
+ spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
+
+ if (&evt->node == &phba->ct_ev_waiters) {
+ /* no event waiting struct yet - first call */
+ dd_data = kmalloc(sizeof(struct bsg_job_data), GFP_KERNEL);
+ if (dd_data == NULL) {
+ lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
+ "2734 Failed allocation of dd_data\n");
+ rc = -ENOMEM;
+ goto job_error;
+ }
+ evt = lpfc_bsg_event_new(ev_mask, event_req->ev_reg_id,
+ event_req->ev_req_id);
+ if (!evt) {
+ lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
+ "2617 Failed allocation of event "
+ "waiter\n");
+ rc = -ENOMEM;
+ goto job_error;
+ }
+ dd_data->type = TYPE_EVT;
+ dd_data->set_job = NULL;
+ dd_data->context_un.evt = evt;
+ evt->dd_data = (void *)dd_data;
+ spin_lock_irqsave(&phba->ct_ev_lock, flags);
+ list_add(&evt->node, &phba->ct_ev_waiters);
+ lpfc_bsg_event_ref(evt);
+ evt->wait_time_stamp = jiffies;
+ spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
+ }
+
+ spin_lock_irqsave(&phba->ct_ev_lock, flags);
+ evt->waiting = 1;
+ dd_data->set_job = job; /* for unsolicited command */
+ job->dd_data = dd_data; /* for fc transport timeout callback*/
+ spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
+ return 0; /* call job done later */
+
+job_error:
+ if (dd_data != NULL)
+ kfree(dd_data);
+
+ job->dd_data = NULL;
+ return rc;
+}
+
+/**
+ * lpfc_bsg_hba_get_event - process a GET_EVENT bsg vendor command
+ * @job: GET_EVENT fc_bsg_job
+ **/
+static int
+lpfc_bsg_hba_get_event(struct fc_bsg_job *job)
+{
+ struct lpfc_vport *vport = (struct lpfc_vport *)job->shost->hostdata;
+ struct lpfc_hba *phba = vport->phba;
+ struct get_ct_event *event_req;
+ struct get_ct_event_reply *event_reply;
+ struct lpfc_bsg_event *evt, *evt_next;
+ struct event_data *evt_dat = NULL;
+ unsigned long flags;
+ uint32_t rc = 0;
+
+ if (job->request_len <
+ sizeof(struct fc_bsg_request) + sizeof(struct get_ct_event)) {
+ lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
+ "2613 Received GET_CT_EVENT request below "
+ "minimum size\n");
+ rc = -EINVAL;
+ goto job_error;
+ }
+
+ event_req = (struct get_ct_event *)
+ job->request->rqst_data.h_vendor.vendor_cmd;
+
+ event_reply = (struct get_ct_event_reply *)
+ job->reply->reply_data.vendor_reply.vendor_rsp;
+ spin_lock_irqsave(&phba->ct_ev_lock, flags);
+ list_for_each_entry_safe(evt, evt_next, &phba->ct_ev_waiters, node) {
+ if (evt->reg_id == event_req->ev_reg_id) {
+ if (list_empty(&evt->events_to_get))
+ break;
+ lpfc_bsg_event_ref(evt);
+ evt->wait_time_stamp = jiffies;
+ evt_dat = list_entry(evt->events_to_get.prev,
+ struct event_data, node);
+ list_del(&evt_dat->node);
+ break;
+ }
+ }
+ spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
+
+ /* The app may continue to ask for event data until it gets
+ * an error indicating that there isn't anymore
+ */
+ if (evt_dat == NULL) {
+ job->reply->reply_payload_rcv_len = 0;
+ rc = -ENOENT;
+ goto job_error;
+ }
+
+ if (evt_dat->len > job->request_payload.payload_len) {
+ evt_dat->len = job->request_payload.payload_len;
+ lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
+ "2618 Truncated event data at %d "
+ "bytes\n",
+ job->request_payload.payload_len);
+ }
+
+ event_reply->type = evt_dat->type;
+ event_reply->immed_data = evt_dat->immed_dat;
+ if (evt_dat->len > 0)
+ job->reply->reply_payload_rcv_len =
+ sg_copy_from_buffer(job->request_payload.sg_list,
+ job->request_payload.sg_cnt,
+ evt_dat->data, evt_dat->len);
+ else
+ job->reply->reply_payload_rcv_len = 0;
+
+ if (evt_dat) {
+ kfree(evt_dat->data);
+ kfree(evt_dat);
+ }
+
+ spin_lock_irqsave(&phba->ct_ev_lock, flags);
+ lpfc_bsg_event_unref(evt);
+ spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
+ job->dd_data = NULL;
+ job->reply->result = 0;
+ job->job_done(job);
+ return 0;
+
+job_error:
+ job->dd_data = NULL;
+ job->reply->result = rc;
+ return rc;
+}
+
+/**
+ * lpfc_issue_ct_rsp_cmp - lpfc_issue_ct_rsp's completion handler
+ * @phba: Pointer to HBA context object.
+ * @cmdiocbq: Pointer to command iocb.
+ * @rspiocbq: Pointer to response iocb.
+ *
+ * This function is the completion handler for iocbs issued using
+ * lpfc_issue_ct_rsp_cmp function. This function is called by the
+ * ring event handler function without any lock held. This function
+ * can be called from both worker thread context and interrupt
+ * context. This function also can be called from other thread which
+ * cleans up the SLI layer objects.
+ * This function copy the contents of the response iocb to the
+ * response iocb memory object provided by the caller of
+ * lpfc_sli_issue_iocb_wait and then wakes up the thread which
+ * sleeps for the iocb completion.
+ **/
+static void
+lpfc_issue_ct_rsp_cmp(struct lpfc_hba *phba,
+ struct lpfc_iocbq *cmdiocbq,
+ struct lpfc_iocbq *rspiocbq)
+{
+ struct bsg_job_data *dd_data;
+ struct fc_bsg_job *job;
+ IOCB_t *rsp;
+ struct lpfc_dmabuf *bmp, *cmp;
+ struct lpfc_nodelist *ndlp;
+ unsigned long flags;
+ int rc = 0;
+
+ dd_data = cmdiocbq->context1;
+
+ /* Determine if job has been aborted */
+ spin_lock_irqsave(&phba->ct_ev_lock, flags);
+ job = dd_data->set_job;
+ if (job) {
+ /* Prevent timeout handling from trying to abort job */
+ job->dd_data = NULL;
+ }
+ spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
+
+ /* Close the timeout handler abort window */
+ spin_lock_irqsave(&phba->hbalock, flags);
+ cmdiocbq->iocb_flag &= ~LPFC_IO_CMD_OUTSTANDING;
+ spin_unlock_irqrestore(&phba->hbalock, flags);
+
+ ndlp = dd_data->context_un.iocb.ndlp;
+ cmp = cmdiocbq->context2;
+ bmp = cmdiocbq->context3;
+ rsp = &rspiocbq->iocb;
+
+ /* Copy the completed job data or set the error status */
+
+ if (job) {
+ if (rsp->ulpStatus) {
+ if (rsp->ulpStatus == IOSTAT_LOCAL_REJECT) {
+ switch (rsp->un.ulpWord[4] & IOERR_PARAM_MASK) {
+ case IOERR_SEQUENCE_TIMEOUT:
+ rc = -ETIMEDOUT;
+ break;
+ case IOERR_INVALID_RPI:
+ rc = -EFAULT;
+ break;
+ default:
+ rc = -EACCES;
+ break;
+ }
+ } else {
+ rc = -EACCES;
+ }
+ } else {
+ job->reply->reply_payload_rcv_len = 0;
+ }
+ }
+
+ lpfc_free_bsg_buffers(phba, cmp);
+ lpfc_mbuf_free(phba, bmp->virt, bmp->phys);
+ kfree(bmp);
+ lpfc_sli_release_iocbq(phba, cmdiocbq);
+ lpfc_nlp_put(ndlp);
+ kfree(dd_data);
+
+ /* Complete the job if the job is still active */
+
+ if (job) {
+ job->reply->result = rc;
+ job->job_done(job);
+ }
+ return;
+}
+
+/**
+ * lpfc_issue_ct_rsp - issue a ct response
+ * @phba: Pointer to HBA context object.
+ * @job: Pointer to the job object.
+ * @tag: tag index value into the ports context exchange array.
+ * @bmp: Pointer to a dma buffer descriptor.
+ * @num_entry: Number of enties in the bde.
+ **/
+static int
+lpfc_issue_ct_rsp(struct lpfc_hba *phba, struct fc_bsg_job *job, uint32_t tag,
+ struct lpfc_dmabuf *cmp, struct lpfc_dmabuf *bmp,
+ int num_entry)
+{
+ IOCB_t *icmd;
+ struct lpfc_iocbq *ctiocb = NULL;
+ int rc = 0;
+ struct lpfc_nodelist *ndlp = NULL;
+ struct bsg_job_data *dd_data;
+ unsigned long flags;
+ uint32_t creg_val;
+
+ /* allocate our bsg tracking structure */
+ dd_data = kmalloc(sizeof(struct bsg_job_data), GFP_KERNEL);
+ if (!dd_data) {
+ lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
+ "2736 Failed allocation of dd_data\n");
+ rc = -ENOMEM;
+ goto no_dd_data;
+ }
+
+ /* Allocate buffer for command iocb */
+ ctiocb = lpfc_sli_get_iocbq(phba);
+ if (!ctiocb) {
+ rc = -ENOMEM;
+ goto no_ctiocb;
+ }
+
+ icmd = &ctiocb->iocb;
+ icmd->un.xseq64.bdl.ulpIoTag32 = 0;
+ icmd->un.xseq64.bdl.addrHigh = putPaddrHigh(bmp->phys);
+ icmd->un.xseq64.bdl.addrLow = putPaddrLow(bmp->phys);
+ icmd->un.xseq64.bdl.bdeFlags = BUFF_TYPE_BLP_64;
+ icmd->un.xseq64.bdl.bdeSize = (num_entry * sizeof(struct ulp_bde64));
+ icmd->un.xseq64.w5.hcsw.Fctl = (LS | LA);
+ icmd->un.xseq64.w5.hcsw.Dfctl = 0;
+ icmd->un.xseq64.w5.hcsw.Rctl = FC_RCTL_DD_SOL_CTL;
+ icmd->un.xseq64.w5.hcsw.Type = FC_TYPE_CT;
+
+ /* Fill in rest of iocb */
+ icmd->ulpCommand = CMD_XMIT_SEQUENCE64_CX;
+ icmd->ulpBdeCount = 1;
+ icmd->ulpLe = 1;
+ icmd->ulpClass = CLASS3;
+ if (phba->sli_rev == LPFC_SLI_REV4) {
+ /* Do not issue unsol response if oxid not marked as valid */
+ if (phba->ct_ctx[tag].valid != UNSOL_VALID) {
+ rc = IOCB_ERROR;
+ goto issue_ct_rsp_exit;
+ }
+ icmd->ulpContext = phba->ct_ctx[tag].rxid;
+ icmd->unsli3.rcvsli3.ox_id = phba->ct_ctx[tag].oxid;
+ ndlp = lpfc_findnode_did(phba->pport, phba->ct_ctx[tag].SID);
+ if (!ndlp) {
+ lpfc_printf_log(phba, KERN_WARNING, LOG_ELS,
+ "2721 ndlp null for oxid %x SID %x\n",
+ icmd->ulpContext,
+ phba->ct_ctx[tag].SID);
+ rc = IOCB_ERROR;
+ goto issue_ct_rsp_exit;
+ }
+
+ /* Check if the ndlp is active */
+ if (!ndlp || !NLP_CHK_NODE_ACT(ndlp)) {
+ rc = IOCB_ERROR;
+ goto issue_ct_rsp_exit;
+ }
+
+ /* get a refernece count so the ndlp doesn't go away while
+ * we respond
+ */
+ if (!lpfc_nlp_get(ndlp)) {
+ rc = IOCB_ERROR;
+ goto issue_ct_rsp_exit;
+ }
+
+ icmd->un.ulpWord[3] =
+ phba->sli4_hba.rpi_ids[ndlp->nlp_rpi];
+
+ /* The exchange is done, mark the entry as invalid */
+ phba->ct_ctx[tag].valid = UNSOL_INVALID;
+ } else
+ icmd->ulpContext = (ushort) tag;
+
+ icmd->ulpTimeout = phba->fc_ratov * 2;
+
+ /* Xmit CT response on exchange <xid> */
+ lpfc_printf_log(phba, KERN_INFO, LOG_ELS,
+ "2722 Xmit CT response on exchange x%x Data: x%x x%x x%x\n",
+ icmd->ulpContext, icmd->ulpIoTag, tag, phba->link_state);
+
+ ctiocb->iocb_cmpl = NULL;
+ ctiocb->iocb_flag |= LPFC_IO_LIBDFC;
+ ctiocb->vport = phba->pport;
+ ctiocb->context1 = dd_data;
+ ctiocb->context2 = cmp;
+ ctiocb->context3 = bmp;
+ ctiocb->context_un.ndlp = ndlp;
+ ctiocb->iocb_cmpl = lpfc_issue_ct_rsp_cmp;
+
+ dd_data->type = TYPE_IOCB;
+ dd_data->set_job = job;
+ dd_data->context_un.iocb.cmdiocbq = ctiocb;
+ dd_data->context_un.iocb.ndlp = ndlp;
+ dd_data->context_un.iocb.rmp = NULL;
+ job->dd_data = dd_data;
+
+ if (phba->cfg_poll & DISABLE_FCP_RING_INT) {
+ if (lpfc_readl(phba->HCregaddr, &creg_val)) {
+ rc = -IOCB_ERROR;
+ goto issue_ct_rsp_exit;
+ }
+ creg_val |= (HC_R0INT_ENA << LPFC_FCP_RING);
+ writel(creg_val, phba->HCregaddr);
+ readl(phba->HCregaddr); /* flush */
+ }
+
+ rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, ctiocb, 0);
+
+ if (rc == IOCB_SUCCESS) {
+ spin_lock_irqsave(&phba->hbalock, flags);
+ /* make sure the I/O had not been completed/released */
+ if (ctiocb->iocb_flag & LPFC_IO_LIBDFC) {
+ /* open up abort window to timeout handler */
+ ctiocb->iocb_flag |= LPFC_IO_CMD_OUTSTANDING;
+ }
+ spin_unlock_irqrestore(&phba->hbalock, flags);
+ return 0; /* done for now */
+ }
+
+ /* iocb failed so cleanup */
+ job->dd_data = NULL;
+
+issue_ct_rsp_exit:
+ lpfc_sli_release_iocbq(phba, ctiocb);
+no_ctiocb:
+ kfree(dd_data);
+no_dd_data:
+ return rc;
+}
+
+/**
+ * lpfc_bsg_send_mgmt_rsp - process a SEND_MGMT_RESP bsg vendor command
+ * @job: SEND_MGMT_RESP fc_bsg_job
+ **/
+static int
+lpfc_bsg_send_mgmt_rsp(struct fc_bsg_job *job)
+{
+ struct lpfc_vport *vport = (struct lpfc_vport *)job->shost->hostdata;
+ struct lpfc_hba *phba = vport->phba;
+ struct send_mgmt_resp *mgmt_resp = (struct send_mgmt_resp *)
+ job->request->rqst_data.h_vendor.vendor_cmd;
+ struct ulp_bde64 *bpl;
+ struct lpfc_dmabuf *bmp = NULL, *cmp = NULL;
+ int bpl_entries;
+ uint32_t tag = mgmt_resp->tag;
+ unsigned long reqbfrcnt =
+ (unsigned long)job->request_payload.payload_len;
+ int rc = 0;
+
+ /* in case no data is transferred */
+ job->reply->reply_payload_rcv_len = 0;
+
+ if (!reqbfrcnt || (reqbfrcnt > (80 * BUF_SZ_4K))) {
+ rc = -ERANGE;
+ goto send_mgmt_rsp_exit;
+ }
+
+ bmp = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
+ if (!bmp) {
+ rc = -ENOMEM;
+ goto send_mgmt_rsp_exit;
+ }
+
+ bmp->virt = lpfc_mbuf_alloc(phba, 0, &bmp->phys);
+ if (!bmp->virt) {
+ rc = -ENOMEM;
+ goto send_mgmt_rsp_free_bmp;
+ }
+
+ INIT_LIST_HEAD(&bmp->list);
+ bpl = (struct ulp_bde64 *) bmp->virt;
+ bpl_entries = (LPFC_BPL_SIZE/sizeof(struct ulp_bde64));
+ cmp = lpfc_alloc_bsg_buffers(phba, job->request_payload.payload_len,
+ 1, bpl, &bpl_entries);
+ if (!cmp) {
+ rc = -ENOMEM;
+ goto send_mgmt_rsp_free_bmp;
+ }
+ lpfc_bsg_copy_data(cmp, &job->request_payload,
+ job->request_payload.payload_len, 1);
+
+ rc = lpfc_issue_ct_rsp(phba, job, tag, cmp, bmp, bpl_entries);
+
+ if (rc == IOCB_SUCCESS)
+ return 0; /* done for now */
+
+ rc = -EACCES;
+
+ lpfc_free_bsg_buffers(phba, cmp);
+
+send_mgmt_rsp_free_bmp:
+ if (bmp->virt)
+ lpfc_mbuf_free(phba, bmp->virt, bmp->phys);
+ kfree(bmp);
+send_mgmt_rsp_exit:
+ /* make error code available to userspace */
+ job->reply->result = rc;
+ job->dd_data = NULL;
+ return rc;
+}
+
+/**
+ * lpfc_bsg_diag_mode_enter - process preparing into device diag loopback mode
+ * @phba: Pointer to HBA context object.
+ *
+ * This function is responsible for preparing driver for diag loopback
+ * on device.
+ */
+static int
+lpfc_bsg_diag_mode_enter(struct lpfc_hba *phba)
+{
+ struct lpfc_vport **vports;
+ struct Scsi_Host *shost;
+ struct lpfc_sli *psli;
+ struct lpfc_sli_ring *pring;
+ int i = 0;
+
+ psli = &phba->sli;
+ if (!psli)
+ return -ENODEV;
+
+ pring = &psli->ring[LPFC_FCP_RING];
+ if (!pring)
+ return -ENODEV;
+
+ if ((phba->link_state == LPFC_HBA_ERROR) ||
+ (psli->sli_flag & LPFC_BLOCK_MGMT_IO) ||
+ (!(psli->sli_flag & LPFC_SLI_ACTIVE)))
+ return -EACCES;
+
+ vports = lpfc_create_vport_work_array(phba);
+ if (vports) {
+ for (i = 0; i <= phba->max_vpi && vports[i] != NULL; i++) {
+ shost = lpfc_shost_from_vport(vports[i]);
+ scsi_block_requests(shost);
+ }
+ lpfc_destroy_vport_work_array(phba, vports);
+ } else {
+ shost = lpfc_shost_from_vport(phba->pport);
+ scsi_block_requests(shost);
+ }
+
+ while (!list_empty(&pring->txcmplq)) {
+ if (i++ > 500) /* wait up to 5 seconds */
+ break;
+ msleep(10);
+ }
+ return 0;
+}
+
+/**
+ * lpfc_bsg_diag_mode_exit - exit process from device diag loopback mode
+ * @phba: Pointer to HBA context object.
+ *
+ * This function is responsible for driver exit processing of setting up
+ * diag loopback mode on device.
+ */
+static void
+lpfc_bsg_diag_mode_exit(struct lpfc_hba *phba)
+{
+ struct Scsi_Host *shost;
+ struct lpfc_vport **vports;
+ int i;
+
+ vports = lpfc_create_vport_work_array(phba);
+ if (vports) {
+ for (i = 0; i <= phba->max_vpi && vports[i] != NULL; i++) {
+ shost = lpfc_shost_from_vport(vports[i]);
+ scsi_unblock_requests(shost);
+ }
+ lpfc_destroy_vport_work_array(phba, vports);
+ } else {
+ shost = lpfc_shost_from_vport(phba->pport);
+ scsi_unblock_requests(shost);
+ }
+ return;
+}
+
+/**
+ * lpfc_sli3_bsg_diag_loopback_mode - process an sli3 bsg vendor command
+ * @phba: Pointer to HBA context object.
+ * @job: LPFC_BSG_VENDOR_DIAG_MODE
+ *
+ * This function is responsible for placing an sli3 port into diagnostic
+ * loopback mode in order to perform a diagnostic loopback test.
+ * All new scsi requests are blocked, a small delay is used to allow the
+ * scsi requests to complete then the link is brought down. If the link is
+ * is placed in loopback mode then scsi requests are again allowed
+ * so the scsi mid-layer doesn't give up on the port.
+ * All of this is done in-line.
+ */
+static int
+lpfc_sli3_bsg_diag_loopback_mode(struct lpfc_hba *phba, struct fc_bsg_job *job)
+{
+ struct diag_mode_set *loopback_mode;
+ uint32_t link_flags;
+ uint32_t timeout;
+ LPFC_MBOXQ_t *pmboxq = NULL;
+ int mbxstatus = MBX_SUCCESS;
+ int i = 0;
+ int rc = 0;
+
+ /* no data to return just the return code */
+ job->reply->reply_payload_rcv_len = 0;
+
+ if (job->request_len < sizeof(struct fc_bsg_request) +
+ sizeof(struct diag_mode_set)) {
+ lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
+ "2738 Received DIAG MODE request size:%d "
+ "below the minimum size:%d\n",
+ job->request_len,
+ (int)(sizeof(struct fc_bsg_request) +
+ sizeof(struct diag_mode_set)));
+ rc = -EINVAL;
+ goto job_error;
+ }
+
+ rc = lpfc_bsg_diag_mode_enter(phba);
+ if (rc)
+ goto job_error;
+
+ /* bring the link to diagnostic mode */
+ loopback_mode = (struct diag_mode_set *)
+ job->request->rqst_data.h_vendor.vendor_cmd;
+ link_flags = loopback_mode->type;
+ timeout = loopback_mode->timeout * 100;
+
+ pmboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
+ if (!pmboxq) {
+ rc = -ENOMEM;
+ goto loopback_mode_exit;
+ }
+ memset((void *)pmboxq, 0, sizeof(LPFC_MBOXQ_t));
+ pmboxq->u.mb.mbxCommand = MBX_DOWN_LINK;
+ pmboxq->u.mb.mbxOwner = OWN_HOST;
+
+ mbxstatus = lpfc_sli_issue_mbox_wait(phba, pmboxq, LPFC_MBOX_TMO);
+
+ if ((mbxstatus == MBX_SUCCESS) && (pmboxq->u.mb.mbxStatus == 0)) {
+ /* wait for link down before proceeding */
+ i = 0;
+ while (phba->link_state != LPFC_LINK_DOWN) {
+ if (i++ > timeout) {
+ rc = -ETIMEDOUT;
+ goto loopback_mode_exit;
+ }
+ msleep(10);
+ }
+
+ memset((void *)pmboxq, 0, sizeof(LPFC_MBOXQ_t));
+ if (link_flags == INTERNAL_LOOP_BACK)
+ pmboxq->u.mb.un.varInitLnk.link_flags = FLAGS_LOCAL_LB;
+ else
+ pmboxq->u.mb.un.varInitLnk.link_flags =
+ FLAGS_TOPOLOGY_MODE_LOOP;
+
+ pmboxq->u.mb.mbxCommand = MBX_INIT_LINK;
+ pmboxq->u.mb.mbxOwner = OWN_HOST;
+
+ mbxstatus = lpfc_sli_issue_mbox_wait(phba, pmboxq,
+ LPFC_MBOX_TMO);
+
+ if ((mbxstatus != MBX_SUCCESS) || (pmboxq->u.mb.mbxStatus))
+ rc = -ENODEV;
+ else {
+ spin_lock_irq(&phba->hbalock);
+ phba->link_flag |= LS_LOOPBACK_MODE;
+ spin_unlock_irq(&phba->hbalock);
+ /* wait for the link attention interrupt */
+ msleep(100);
+
+ i = 0;
+ while (phba->link_state != LPFC_HBA_READY) {
+ if (i++ > timeout) {
+ rc = -ETIMEDOUT;
+ break;
+ }
+
+ msleep(10);
+ }
+ }
+
+ } else
+ rc = -ENODEV;
+
+loopback_mode_exit:
+ lpfc_bsg_diag_mode_exit(phba);
+
+ /*
+ * Let SLI layer release mboxq if mbox command completed after timeout.
+ */
+ if (pmboxq && mbxstatus != MBX_TIMEOUT)
+ mempool_free(pmboxq, phba->mbox_mem_pool);
+
+job_error:
+ /* make error code available to userspace */
+ job->reply->result = rc;
+ /* complete the job back to userspace if no error */
+ if (rc == 0)
+ job->job_done(job);
+ return rc;
+}
+
+/**
+ * lpfc_sli4_bsg_set_link_diag_state - set sli4 link diag state
+ * @phba: Pointer to HBA context object.
+ * @diag: Flag for set link to diag or nomral operation state.
+ *
+ * This function is responsible for issuing a sli4 mailbox command for setting
+ * link to either diag state or normal operation state.
+ */
+static int
+lpfc_sli4_bsg_set_link_diag_state(struct lpfc_hba *phba, uint32_t diag)
+{
+ LPFC_MBOXQ_t *pmboxq;
+ struct lpfc_mbx_set_link_diag_state *link_diag_state;
+ uint32_t req_len, alloc_len;
+ int mbxstatus = MBX_SUCCESS, rc;
+
+ pmboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
+ if (!pmboxq)
+ return -ENOMEM;
+
+ req_len = (sizeof(struct lpfc_mbx_set_link_diag_state) -
+ sizeof(struct lpfc_sli4_cfg_mhdr));
+ alloc_len = lpfc_sli4_config(phba, pmboxq, LPFC_MBOX_SUBSYSTEM_FCOE,
+ LPFC_MBOX_OPCODE_FCOE_LINK_DIAG_STATE,
+ req_len, LPFC_SLI4_MBX_EMBED);
+ if (alloc_len != req_len) {
+ rc = -ENOMEM;
+ goto link_diag_state_set_out;
+ }
+ lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
+ "3128 Set link to diagnostic state:x%x (x%x/x%x)\n",
+ diag, phba->sli4_hba.lnk_info.lnk_tp,
+ phba->sli4_hba.lnk_info.lnk_no);
+
+ link_diag_state = &pmboxq->u.mqe.un.link_diag_state;
+ bf_set(lpfc_mbx_set_diag_state_diag_bit_valid, &link_diag_state->u.req,
+ LPFC_DIAG_STATE_DIAG_BIT_VALID_CHANGE);
+ bf_set(lpfc_mbx_set_diag_state_link_num, &link_diag_state->u.req,
+ phba->sli4_hba.lnk_info.lnk_no);
+ bf_set(lpfc_mbx_set_diag_state_link_type, &link_diag_state->u.req,
+ phba->sli4_hba.lnk_info.lnk_tp);
+ if (diag)
+ bf_set(lpfc_mbx_set_diag_state_diag,
+ &link_diag_state->u.req, 1);
+ else
+ bf_set(lpfc_mbx_set_diag_state_diag,
+ &link_diag_state->u.req, 0);
+
+ mbxstatus = lpfc_sli_issue_mbox_wait(phba, pmboxq, LPFC_MBOX_TMO);
+
+ if ((mbxstatus == MBX_SUCCESS) && (pmboxq->u.mb.mbxStatus == 0))
+ rc = 0;
+ else
+ rc = -ENODEV;
+
+link_diag_state_set_out:
+ if (pmboxq && (mbxstatus != MBX_TIMEOUT))
+ mempool_free(pmboxq, phba->mbox_mem_pool);
+
+ return rc;
+}
+
+/**
+ * lpfc_sli4_bsg_set_internal_loopback - set sli4 internal loopback diagnostic
+ * @phba: Pointer to HBA context object.
+ *
+ * This function is responsible for issuing a sli4 mailbox command for setting
+ * up internal loopback diagnostic.
+ */
+static int
+lpfc_sli4_bsg_set_internal_loopback(struct lpfc_hba *phba)
+{
+ LPFC_MBOXQ_t *pmboxq;
+ uint32_t req_len, alloc_len;
+ struct lpfc_mbx_set_link_diag_loopback *link_diag_loopback;
+ int mbxstatus = MBX_SUCCESS, rc = 0;
+
+ pmboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
+ if (!pmboxq)
+ return -ENOMEM;
+ req_len = (sizeof(struct lpfc_mbx_set_link_diag_loopback) -
+ sizeof(struct lpfc_sli4_cfg_mhdr));
+ alloc_len = lpfc_sli4_config(phba, pmboxq, LPFC_MBOX_SUBSYSTEM_FCOE,
+ LPFC_MBOX_OPCODE_FCOE_LINK_DIAG_LOOPBACK,
+ req_len, LPFC_SLI4_MBX_EMBED);
+ if (alloc_len != req_len) {
+ mempool_free(pmboxq, phba->mbox_mem_pool);
+ return -ENOMEM;
+ }
+ link_diag_loopback = &pmboxq->u.mqe.un.link_diag_loopback;
+ bf_set(lpfc_mbx_set_diag_state_link_num,
+ &link_diag_loopback->u.req, phba->sli4_hba.lnk_info.lnk_no);
+ bf_set(lpfc_mbx_set_diag_state_link_type,
+ &link_diag_loopback->u.req, phba->sli4_hba.lnk_info.lnk_tp);
+ bf_set(lpfc_mbx_set_diag_lpbk_type, &link_diag_loopback->u.req,
+ LPFC_DIAG_LOOPBACK_TYPE_INTERNAL);
+
+ mbxstatus = lpfc_sli_issue_mbox_wait(phba, pmboxq, LPFC_MBOX_TMO);
+ if ((mbxstatus != MBX_SUCCESS) || (pmboxq->u.mb.mbxStatus)) {
+ lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
+ "3127 Failed setup loopback mode mailbox "
+ "command, rc:x%x, status:x%x\n", mbxstatus,
+ pmboxq->u.mb.mbxStatus);
+ rc = -ENODEV;
+ }
+ if (pmboxq && (mbxstatus != MBX_TIMEOUT))
+ mempool_free(pmboxq, phba->mbox_mem_pool);
+ return rc;
+}
+
+/**
+ * lpfc_sli4_diag_fcport_reg_setup - setup port registrations for diagnostic
+ * @phba: Pointer to HBA context object.
+ *
+ * This function set up SLI4 FC port registrations for diagnostic run, which
+ * includes all the rpis, vfi, and also vpi.
+ */
+static int
+lpfc_sli4_diag_fcport_reg_setup(struct lpfc_hba *phba)
+{
+ int rc;
+
+ if (phba->pport->fc_flag & FC_VFI_REGISTERED) {
+ lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
+ "3136 Port still had vfi registered: "
+ "mydid:x%x, fcfi:%d, vfi:%d, vpi:%d\n",
+ phba->pport->fc_myDID, phba->fcf.fcfi,
+ phba->sli4_hba.vfi_ids[phba->pport->vfi],
+ phba->vpi_ids[phba->pport->vpi]);
+ return -EINVAL;
+ }
+ rc = lpfc_issue_reg_vfi(phba->pport);
+ return rc;
+}
+
+/**
+ * lpfc_sli4_bsg_diag_loopback_mode - process an sli4 bsg vendor command
+ * @phba: Pointer to HBA context object.
+ * @job: LPFC_BSG_VENDOR_DIAG_MODE
+ *
+ * This function is responsible for placing an sli4 port into diagnostic
+ * loopback mode in order to perform a diagnostic loopback test.
+ */
+static int
+lpfc_sli4_bsg_diag_loopback_mode(struct lpfc_hba *phba, struct fc_bsg_job *job)
+{
+ struct diag_mode_set *loopback_mode;
+ uint32_t link_flags, timeout;
+ int i, rc = 0;
+
+ /* no data to return just the return code */
+ job->reply->reply_payload_rcv_len = 0;
+
+ if (job->request_len < sizeof(struct fc_bsg_request) +
+ sizeof(struct diag_mode_set)) {
+ lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
+ "3011 Received DIAG MODE request size:%d "
+ "below the minimum size:%d\n",
+ job->request_len,
+ (int)(sizeof(struct fc_bsg_request) +
+ sizeof(struct diag_mode_set)));
+ rc = -EINVAL;
+ goto job_error;
+ }
+
+ rc = lpfc_bsg_diag_mode_enter(phba);
+ if (rc)
+ goto job_error;
+
+ /* indicate we are in loobpack diagnostic mode */
+ spin_lock_irq(&phba->hbalock);
+ phba->link_flag |= LS_LOOPBACK_MODE;
+ spin_unlock_irq(&phba->hbalock);
+
+ /* reset port to start frome scratch */
+ rc = lpfc_selective_reset(phba);
+ if (rc)
+ goto job_error;
+
+ /* bring the link to diagnostic mode */
+ lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
+ "3129 Bring link to diagnostic state.\n");
+ loopback_mode = (struct diag_mode_set *)
+ job->request->rqst_data.h_vendor.vendor_cmd;
+ link_flags = loopback_mode->type;
+ timeout = loopback_mode->timeout * 100;
+
+ rc = lpfc_sli4_bsg_set_link_diag_state(phba, 1);
+ if (rc) {
+ lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
+ "3130 Failed to bring link to diagnostic "
+ "state, rc:x%x\n", rc);
+ goto loopback_mode_exit;
+ }
+
+ /* wait for link down before proceeding */
+ i = 0;
+ while (phba->link_state != LPFC_LINK_DOWN) {
+ if (i++ > timeout) {
+ rc = -ETIMEDOUT;
+ lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
+ "3131 Timeout waiting for link to "
+ "diagnostic mode, timeout:%d ms\n",
+ timeout * 10);
+ goto loopback_mode_exit;
+ }
+ msleep(10);
+ }
+
+ /* set up loopback mode */
+ lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
+ "3132 Set up loopback mode:x%x\n", link_flags);
+
+ if (link_flags == INTERNAL_LOOP_BACK)
+ rc = lpfc_sli4_bsg_set_internal_loopback(phba);
+ else if (link_flags == EXTERNAL_LOOP_BACK)
+ rc = lpfc_hba_init_link_fc_topology(phba,
+ FLAGS_TOPOLOGY_MODE_PT_PT,
+ MBX_NOWAIT);
+ else {
+ rc = -EINVAL;
+ lpfc_printf_log(phba, KERN_ERR, LOG_LIBDFC,
+ "3141 Loopback mode:x%x not supported\n",
+ link_flags);
+ goto loopback_mode_exit;
+ }
+
+ if (!rc) {
+ /* wait for the link attention interrupt */
+ msleep(100);
+ i = 0;
+ while (phba->link_state < LPFC_LINK_UP) {
+ if (i++ > timeout) {
+ rc = -ETIMEDOUT;
+ lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
+ "3137 Timeout waiting for link up "
+ "in loopback mode, timeout:%d ms\n",
+ timeout * 10);
+ break;
+ }
+ msleep(10);
+ }
+ }
+
+ /* port resource registration setup for loopback diagnostic */
+ if (!rc) {
+ /* set up a none zero myDID for loopback test */
+ phba->pport->fc_myDID = 1;
+ rc = lpfc_sli4_diag_fcport_reg_setup(phba);
+ } else
+ goto loopback_mode_exit;
+
+ if (!rc) {
+ /* wait for the port ready */
+ msleep(100);
+ i = 0;
+ while (phba->link_state != LPFC_HBA_READY) {
+ if (i++ > timeout) {
+ rc = -ETIMEDOUT;
+ lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
+ "3133 Timeout waiting for port "
+ "loopback mode ready, timeout:%d ms\n",
+ timeout * 10);
+ break;
+ }
+ msleep(10);
+ }
+ }
+
+loopback_mode_exit:
+ /* clear loopback diagnostic mode */
+ if (rc) {
+ spin_lock_irq(&phba->hbalock);
+ phba->link_flag &= ~LS_LOOPBACK_MODE;
+ spin_unlock_irq(&phba->hbalock);
+ }
+ lpfc_bsg_diag_mode_exit(phba);
+
+job_error:
+ /* make error code available to userspace */
+ job->reply->result = rc;
+ /* complete the job back to userspace if no error */
+ if (rc == 0)
+ job->job_done(job);
+ return rc;
+}
+
+/**
+ * lpfc_bsg_diag_loopback_mode - bsg vendor command for diag loopback mode
+ * @job: LPFC_BSG_VENDOR_DIAG_MODE
+ *
+ * This function is responsible for responding to check and dispatch bsg diag
+ * command from the user to proper driver action routines.
+ */
+static int
+lpfc_bsg_diag_loopback_mode(struct fc_bsg_job *job)
+{
+ struct Scsi_Host *shost;
+ struct lpfc_vport *vport;
+ struct lpfc_hba *phba;
+ int rc;
+
+ shost = job->shost;
+ if (!shost)
+ return -ENODEV;
+ vport = (struct lpfc_vport *)job->shost->hostdata;
+ if (!vport)
+ return -ENODEV;
+ phba = vport->phba;
+ if (!phba)
+ return -ENODEV;
+
+ if (phba->sli_rev < LPFC_SLI_REV4)
+ rc = lpfc_sli3_bsg_diag_loopback_mode(phba, job);
+ else if (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) ==
+ LPFC_SLI_INTF_IF_TYPE_2)
+ rc = lpfc_sli4_bsg_diag_loopback_mode(phba, job);
+ else
+ rc = -ENODEV;
+
+ return rc;
+}
+
+/**
+ * lpfc_sli4_bsg_diag_mode_end - sli4 bsg vendor command for ending diag mode
+ * @job: LPFC_BSG_VENDOR_DIAG_MODE_END
+ *
+ * This function is responsible for responding to check and dispatch bsg diag
+ * command from the user to proper driver action routines.
+ */
+static int
+lpfc_sli4_bsg_diag_mode_end(struct fc_bsg_job *job)
+{
+ struct Scsi_Host *shost;
+ struct lpfc_vport *vport;
+ struct lpfc_hba *phba;
+ struct diag_mode_set *loopback_mode_end_cmd;
+ uint32_t timeout;
+ int rc, i;
+
+ shost = job->shost;
+ if (!shost)
+ return -ENODEV;
+ vport = (struct lpfc_vport *)job->shost->hostdata;
+ if (!vport)
+ return -ENODEV;
+ phba = vport->phba;
+ if (!phba)
+ return -ENODEV;
+
+ if (phba->sli_rev < LPFC_SLI_REV4)
+ return -ENODEV;
+ if (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) !=
+ LPFC_SLI_INTF_IF_TYPE_2)
+ return -ENODEV;
+
+ /* clear loopback diagnostic mode */
+ spin_lock_irq(&phba->hbalock);
+ phba->link_flag &= ~LS_LOOPBACK_MODE;
+ spin_unlock_irq(&phba->hbalock);
+ loopback_mode_end_cmd = (struct diag_mode_set *)
+ job->request->rqst_data.h_vendor.vendor_cmd;
+ timeout = loopback_mode_end_cmd->timeout * 100;
+
+ rc = lpfc_sli4_bsg_set_link_diag_state(phba, 0);
+ if (rc) {
+ lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
+ "3139 Failed to bring link to diagnostic "
+ "state, rc:x%x\n", rc);
+ goto loopback_mode_end_exit;
+ }
+
+ /* wait for link down before proceeding */
+ i = 0;
+ while (phba->link_state != LPFC_LINK_DOWN) {
+ if (i++ > timeout) {
+ lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
+ "3140 Timeout waiting for link to "
+ "diagnostic mode_end, timeout:%d ms\n",
+ timeout * 10);
+ /* there is nothing much we can do here */
+ break;
+ }
+ msleep(10);
+ }
+
+ /* reset port resource registrations */
+ rc = lpfc_selective_reset(phba);
+ phba->pport->fc_myDID = 0;
+
+loopback_mode_end_exit:
+ /* make return code available to userspace */
+ job->reply->result = rc;
+ /* complete the job back to userspace if no error */
+ if (rc == 0)
+ job->job_done(job);
+ return rc;
+}
+
+/**
+ * lpfc_sli4_bsg_link_diag_test - sli4 bsg vendor command for diag link test
+ * @job: LPFC_BSG_VENDOR_DIAG_LINK_TEST
+ *
+ * This function is to perform SLI4 diag link test request from the user
+ * applicaiton.
+ */
+static int
+lpfc_sli4_bsg_link_diag_test(struct fc_bsg_job *job)
+{
+ struct Scsi_Host *shost;
+ struct lpfc_vport *vport;
+ struct lpfc_hba *phba;
+ LPFC_MBOXQ_t *pmboxq;
+ struct sli4_link_diag *link_diag_test_cmd;
+ uint32_t req_len, alloc_len;
+ struct lpfc_mbx_run_link_diag_test *run_link_diag_test;
+ union lpfc_sli4_cfg_shdr *shdr;
+ uint32_t shdr_status, shdr_add_status;
+ struct diag_status *diag_status_reply;
+ int mbxstatus, rc = 0;
+
+ shost = job->shost;
+ if (!shost) {
+ rc = -ENODEV;
+ goto job_error;
+ }
+ vport = (struct lpfc_vport *)job->shost->hostdata;
+ if (!vport) {
+ rc = -ENODEV;
+ goto job_error;
+ }
+ phba = vport->phba;
+ if (!phba) {
+ rc = -ENODEV;
+ goto job_error;
+ }
+
+ if (phba->sli_rev < LPFC_SLI_REV4) {
+ rc = -ENODEV;
+ goto job_error;
+ }
+ if (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) !=
+ LPFC_SLI_INTF_IF_TYPE_2) {
+ rc = -ENODEV;
+ goto job_error;
+ }
+
+ if (job->request_len < sizeof(struct fc_bsg_request) +
+ sizeof(struct sli4_link_diag)) {
+ lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
+ "3013 Received LINK DIAG TEST request "
+ " size:%d below the minimum size:%d\n",
+ job->request_len,
+ (int)(sizeof(struct fc_bsg_request) +
+ sizeof(struct sli4_link_diag)));
+ rc = -EINVAL;
+ goto job_error;
+ }
+
+ rc = lpfc_bsg_diag_mode_enter(phba);
+ if (rc)
+ goto job_error;
+
+ link_diag_test_cmd = (struct sli4_link_diag *)
+ job->request->rqst_data.h_vendor.vendor_cmd;
+
+ rc = lpfc_sli4_bsg_set_link_diag_state(phba, 1);
+
+ if (rc)
+ goto job_error;
+
+ pmboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
+ if (!pmboxq) {
+ rc = -ENOMEM;
+ goto link_diag_test_exit;
+ }
+
+ req_len = (sizeof(struct lpfc_mbx_set_link_diag_state) -
+ sizeof(struct lpfc_sli4_cfg_mhdr));
+ alloc_len = lpfc_sli4_config(phba, pmboxq, LPFC_MBOX_SUBSYSTEM_FCOE,
+ LPFC_MBOX_OPCODE_FCOE_LINK_DIAG_STATE,
+ req_len, LPFC_SLI4_MBX_EMBED);
+ if (alloc_len != req_len) {
+ rc = -ENOMEM;
+ goto link_diag_test_exit;
+ }
+ run_link_diag_test = &pmboxq->u.mqe.un.link_diag_test;
+ bf_set(lpfc_mbx_run_diag_test_link_num, &run_link_diag_test->u.req,
+ phba->sli4_hba.lnk_info.lnk_no);
+ bf_set(lpfc_mbx_run_diag_test_link_type, &run_link_diag_test->u.req,
+ phba->sli4_hba.lnk_info.lnk_tp);
+ bf_set(lpfc_mbx_run_diag_test_test_id, &run_link_diag_test->u.req,
+ link_diag_test_cmd->test_id);
+ bf_set(lpfc_mbx_run_diag_test_loops, &run_link_diag_test->u.req,
+ link_diag_test_cmd->loops);
+ bf_set(lpfc_mbx_run_diag_test_test_ver, &run_link_diag_test->u.req,
+ link_diag_test_cmd->test_version);
+ bf_set(lpfc_mbx_run_diag_test_err_act, &run_link_diag_test->u.req,
+ link_diag_test_cmd->error_action);
+
+ mbxstatus = lpfc_sli_issue_mbox(phba, pmboxq, MBX_POLL);
+
+ shdr = (union lpfc_sli4_cfg_shdr *)
+ &pmboxq->u.mqe.un.sli4_config.header.cfg_shdr;
+ shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
+ shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
+ if (shdr_status || shdr_add_status || mbxstatus) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_LIBDFC,
+ "3010 Run link diag test mailbox failed with "
+ "mbx_status x%x status x%x, add_status x%x\n",
+ mbxstatus, shdr_status, shdr_add_status);
+ }
+
+ diag_status_reply = (struct diag_status *)
+ job->reply->reply_data.vendor_reply.vendor_rsp;
+
+ if (job->reply_len <
+ sizeof(struct fc_bsg_request) + sizeof(struct diag_status)) {
+ lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
+ "3012 Received Run link diag test reply "
+ "below minimum size (%d): reply_len:%d\n",
+ (int)(sizeof(struct fc_bsg_request) +
+ sizeof(struct diag_status)),
+ job->reply_len);
+ rc = -EINVAL;
+ goto job_error;
+ }
+
+ diag_status_reply->mbox_status = mbxstatus;
+ diag_status_reply->shdr_status = shdr_status;
+ diag_status_reply->shdr_add_status = shdr_add_status;
+
+link_diag_test_exit:
+ rc = lpfc_sli4_bsg_set_link_diag_state(phba, 0);
+
+ if (pmboxq)
+ mempool_free(pmboxq, phba->mbox_mem_pool);
+
+ lpfc_bsg_diag_mode_exit(phba);
+
+job_error:
+ /* make error code available to userspace */
+ job->reply->result = rc;
+ /* complete the job back to userspace if no error */
+ if (rc == 0)
+ job->job_done(job);
+ return rc;
+}
+
+/**
+ * lpfcdiag_loop_self_reg - obtains a remote port login id
+ * @phba: Pointer to HBA context object
+ * @rpi: Pointer to a remote port login id
+ *
+ * This function obtains a remote port login id so the diag loopback test
+ * can send and receive its own unsolicited CT command.
+ **/
+static int lpfcdiag_loop_self_reg(struct lpfc_hba *phba, uint16_t *rpi)
+{
+ LPFC_MBOXQ_t *mbox;
+ struct lpfc_dmabuf *dmabuff;
+ int status;
+
+ mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
+ if (!mbox)
+ return -ENOMEM;
+
+ if (phba->sli_rev < LPFC_SLI_REV4)
+ status = lpfc_reg_rpi(phba, 0, phba->pport->fc_myDID,
+ (uint8_t *)&phba->pport->fc_sparam,
+ mbox, *rpi);
+ else {
+ *rpi = lpfc_sli4_alloc_rpi(phba);
+ status = lpfc_reg_rpi(phba, phba->pport->vpi,
+ phba->pport->fc_myDID,
+ (uint8_t *)&phba->pport->fc_sparam,
+ mbox, *rpi);
+ }
+
+ if (status) {
+ mempool_free(mbox, phba->mbox_mem_pool);
+ if (phba->sli_rev == LPFC_SLI_REV4)
+ lpfc_sli4_free_rpi(phba, *rpi);
+ return -ENOMEM;
+ }
+
+ dmabuff = (struct lpfc_dmabuf *) mbox->context1;
+ mbox->context1 = NULL;
+ mbox->context2 = NULL;
+ status = lpfc_sli_issue_mbox_wait(phba, mbox, LPFC_MBOX_TMO);
+
+ if ((status != MBX_SUCCESS) || (mbox->u.mb.mbxStatus)) {
+ lpfc_mbuf_free(phba, dmabuff->virt, dmabuff->phys);
+ kfree(dmabuff);
+ if (status != MBX_TIMEOUT)
+ mempool_free(mbox, phba->mbox_mem_pool);
+ if (phba->sli_rev == LPFC_SLI_REV4)
+ lpfc_sli4_free_rpi(phba, *rpi);
+ return -ENODEV;
+ }
+
+ if (phba->sli_rev < LPFC_SLI_REV4)
+ *rpi = mbox->u.mb.un.varWords[0];
+
+ lpfc_mbuf_free(phba, dmabuff->virt, dmabuff->phys);
+ kfree(dmabuff);
+ mempool_free(mbox, phba->mbox_mem_pool);
+ return 0;
+}
+
+/**
+ * lpfcdiag_loop_self_unreg - unregs from the rpi
+ * @phba: Pointer to HBA context object
+ * @rpi: Remote port login id
+ *
+ * This function unregisters the rpi obtained in lpfcdiag_loop_self_reg
+ **/
+static int lpfcdiag_loop_self_unreg(struct lpfc_hba *phba, uint16_t rpi)
+{
+ LPFC_MBOXQ_t *mbox;
+ int status;
+
+ /* Allocate mboxq structure */
+ mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
+ if (mbox == NULL)
+ return -ENOMEM;
+
+ if (phba->sli_rev < LPFC_SLI_REV4)
+ lpfc_unreg_login(phba, 0, rpi, mbox);
+ else
+ lpfc_unreg_login(phba, phba->pport->vpi,
+ phba->sli4_hba.rpi_ids[rpi], mbox);
+
+ status = lpfc_sli_issue_mbox_wait(phba, mbox, LPFC_MBOX_TMO);
+
+ if ((status != MBX_SUCCESS) || (mbox->u.mb.mbxStatus)) {
+ if (status != MBX_TIMEOUT)
+ mempool_free(mbox, phba->mbox_mem_pool);
+ return -EIO;
+ }
+ mempool_free(mbox, phba->mbox_mem_pool);
+ if (phba->sli_rev == LPFC_SLI_REV4)
+ lpfc_sli4_free_rpi(phba, rpi);
+ return 0;
+}
+
+/**
+ * lpfcdiag_loop_get_xri - obtains the transmit and receive ids
+ * @phba: Pointer to HBA context object
+ * @rpi: Remote port login id
+ * @txxri: Pointer to transmit exchange id
+ * @rxxri: Pointer to response exchabge id
+ *
+ * This function obtains the transmit and receive ids required to send
+ * an unsolicited ct command with a payload. A special lpfc FsType and CmdRsp
+ * flags are used to the unsolicted response handler is able to process
+ * the ct command sent on the same port.
+ **/
+static int lpfcdiag_loop_get_xri(struct lpfc_hba *phba, uint16_t rpi,
+ uint16_t *txxri, uint16_t * rxxri)
+{
+ struct lpfc_bsg_event *evt;
+ struct lpfc_iocbq *cmdiocbq, *rspiocbq;
+ IOCB_t *cmd, *rsp;
+ struct lpfc_dmabuf *dmabuf;
+ struct ulp_bde64 *bpl = NULL;
+ struct lpfc_sli_ct_request *ctreq = NULL;
+ int ret_val = 0;
+ int time_left;
+ int iocb_stat = IOCB_SUCCESS;
+ unsigned long flags;
+
+ *txxri = 0;
+ *rxxri = 0;
+ evt = lpfc_bsg_event_new(FC_REG_CT_EVENT, current->pid,
+ SLI_CT_ELX_LOOPBACK);
+ if (!evt)
+ return -ENOMEM;
+
+ spin_lock_irqsave(&phba->ct_ev_lock, flags);
+ list_add(&evt->node, &phba->ct_ev_waiters);
+ lpfc_bsg_event_ref(evt);
+ spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
+
+ cmdiocbq = lpfc_sli_get_iocbq(phba);
+ rspiocbq = lpfc_sli_get_iocbq(phba);
+
+ dmabuf = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
+ if (dmabuf) {
+ dmabuf->virt = lpfc_mbuf_alloc(phba, 0, &dmabuf->phys);
+ if (dmabuf->virt) {
+ INIT_LIST_HEAD(&dmabuf->list);
+ bpl = (struct ulp_bde64 *) dmabuf->virt;
+ memset(bpl, 0, sizeof(*bpl));
+ ctreq = (struct lpfc_sli_ct_request *)(bpl + 1);
+ bpl->addrHigh =
+ le32_to_cpu(putPaddrHigh(dmabuf->phys +
+ sizeof(*bpl)));
+ bpl->addrLow =
+ le32_to_cpu(putPaddrLow(dmabuf->phys +
+ sizeof(*bpl)));
+ bpl->tus.f.bdeFlags = 0;
+ bpl->tus.f.bdeSize = ELX_LOOPBACK_HEADER_SZ;
+ bpl->tus.w = le32_to_cpu(bpl->tus.w);
+ }
+ }
+
+ if (cmdiocbq == NULL || rspiocbq == NULL ||
+ dmabuf == NULL || bpl == NULL || ctreq == NULL ||
+ dmabuf->virt == NULL) {
+ ret_val = -ENOMEM;
+ goto err_get_xri_exit;
+ }
+
+ cmd = &cmdiocbq->iocb;
+ rsp = &rspiocbq->iocb;
+
+ memset(ctreq, 0, ELX_LOOPBACK_HEADER_SZ);
+
+ ctreq->RevisionId.bits.Revision = SLI_CT_REVISION;
+ ctreq->RevisionId.bits.InId = 0;
+ ctreq->FsType = SLI_CT_ELX_LOOPBACK;
+ ctreq->FsSubType = 0;
+ ctreq->CommandResponse.bits.CmdRsp = ELX_LOOPBACK_XRI_SETUP;
+ ctreq->CommandResponse.bits.Size = 0;
+
+
+ cmd->un.xseq64.bdl.addrHigh = putPaddrHigh(dmabuf->phys);
+ cmd->un.xseq64.bdl.addrLow = putPaddrLow(dmabuf->phys);
+ cmd->un.xseq64.bdl.bdeFlags = BUFF_TYPE_BLP_64;
+ cmd->un.xseq64.bdl.bdeSize = sizeof(*bpl);
+
+ cmd->un.xseq64.w5.hcsw.Fctl = LA;
+ cmd->un.xseq64.w5.hcsw.Dfctl = 0;
+ cmd->un.xseq64.w5.hcsw.Rctl = FC_RCTL_DD_UNSOL_CTL;
+ cmd->un.xseq64.w5.hcsw.Type = FC_TYPE_CT;
+
+ cmd->ulpCommand = CMD_XMIT_SEQUENCE64_CR;
+ cmd->ulpBdeCount = 1;
+ cmd->ulpLe = 1;
+ cmd->ulpClass = CLASS3;
+ cmd->ulpContext = rpi;
+
+ cmdiocbq->iocb_flag |= LPFC_IO_LIBDFC;
+ cmdiocbq->vport = phba->pport;
+ cmdiocbq->iocb_cmpl = NULL;
+
+ iocb_stat = lpfc_sli_issue_iocb_wait(phba, LPFC_ELS_RING, cmdiocbq,
+ rspiocbq,
+ (phba->fc_ratov * 2)
+ + LPFC_DRVR_TIMEOUT);
+ if ((iocb_stat != IOCB_SUCCESS) || (rsp->ulpStatus != IOSTAT_SUCCESS)) {
+ ret_val = -EIO;
+ goto err_get_xri_exit;
+ }
+ *txxri = rsp->ulpContext;
+
+ evt->waiting = 1;
+ evt->wait_time_stamp = jiffies;
+ time_left = wait_event_interruptible_timeout(
+ evt->wq, !list_empty(&evt->events_to_see),
+ msecs_to_jiffies(1000 *
+ ((phba->fc_ratov * 2) + LPFC_DRVR_TIMEOUT)));
+ if (list_empty(&evt->events_to_see))
+ ret_val = (time_left) ? -EINTR : -ETIMEDOUT;
+ else {
+ spin_lock_irqsave(&phba->ct_ev_lock, flags);
+ list_move(evt->events_to_see.prev, &evt->events_to_get);
+ spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
+ *rxxri = (list_entry(evt->events_to_get.prev,
+ typeof(struct event_data),
+ node))->immed_dat;
+ }
+ evt->waiting = 0;
+
+err_get_xri_exit:
+ spin_lock_irqsave(&phba->ct_ev_lock, flags);
+ lpfc_bsg_event_unref(evt); /* release ref */
+ lpfc_bsg_event_unref(evt); /* delete */
+ spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
+
+ if (dmabuf) {
+ if (dmabuf->virt)
+ lpfc_mbuf_free(phba, dmabuf->virt, dmabuf->phys);
+ kfree(dmabuf);
+ }
+
+ if (cmdiocbq && (iocb_stat != IOCB_TIMEDOUT))
+ lpfc_sli_release_iocbq(phba, cmdiocbq);
+ if (rspiocbq)
+ lpfc_sli_release_iocbq(phba, rspiocbq);
+ return ret_val;
+}
+
+/**
+ * lpfc_bsg_dma_page_alloc - allocate a bsg mbox page sized dma buffers
+ * @phba: Pointer to HBA context object
+ *
+ * This function allocates BSG_MBOX_SIZE (4KB) page size dma buffer and.
+ * returns the pointer to the buffer.
+ **/
+static struct lpfc_dmabuf *
+lpfc_bsg_dma_page_alloc(struct lpfc_hba *phba)
+{
+ struct lpfc_dmabuf *dmabuf;
+ struct pci_dev *pcidev = phba->pcidev;
+
+ /* allocate dma buffer struct */
+ dmabuf = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
+ if (!dmabuf)
+ return NULL;
+
+ INIT_LIST_HEAD(&dmabuf->list);
+
+ /* now, allocate dma buffer */
+ dmabuf->virt = dma_zalloc_coherent(&pcidev->dev, BSG_MBOX_SIZE,
+ &(dmabuf->phys), GFP_KERNEL);
+
+ if (!dmabuf->virt) {
+ kfree(dmabuf);
+ return NULL;
+ }
+
+ return dmabuf;
+}
+
+/**
+ * lpfc_bsg_dma_page_free - free a bsg mbox page sized dma buffer
+ * @phba: Pointer to HBA context object.
+ * @dmabuf: Pointer to the bsg mbox page sized dma buffer descriptor.
+ *
+ * This routine just simply frees a dma buffer and its associated buffer
+ * descriptor referred by @dmabuf.
+ **/
+static void
+lpfc_bsg_dma_page_free(struct lpfc_hba *phba, struct lpfc_dmabuf *dmabuf)
+{
+ struct pci_dev *pcidev = phba->pcidev;
+
+ if (!dmabuf)
+ return;
+
+ if (dmabuf->virt)
+ dma_free_coherent(&pcidev->dev, BSG_MBOX_SIZE,
+ dmabuf->virt, dmabuf->phys);
+ kfree(dmabuf);
+ return;
+}
+
+/**
+ * lpfc_bsg_dma_page_list_free - free a list of bsg mbox page sized dma buffers
+ * @phba: Pointer to HBA context object.
+ * @dmabuf_list: Pointer to a list of bsg mbox page sized dma buffer descs.
+ *
+ * This routine just simply frees all dma buffers and their associated buffer
+ * descriptors referred by @dmabuf_list.
+ **/
+static void
+lpfc_bsg_dma_page_list_free(struct lpfc_hba *phba,
+ struct list_head *dmabuf_list)
+{
+ struct lpfc_dmabuf *dmabuf, *next_dmabuf;
+
+ if (list_empty(dmabuf_list))
+ return;
+
+ list_for_each_entry_safe(dmabuf, next_dmabuf, dmabuf_list, list) {
+ list_del_init(&dmabuf->list);
+ lpfc_bsg_dma_page_free(phba, dmabuf);
+ }
+ return;
+}
+
+/**
+ * diag_cmd_data_alloc - fills in a bde struct with dma buffers
+ * @phba: Pointer to HBA context object
+ * @bpl: Pointer to 64 bit bde structure
+ * @size: Number of bytes to process
+ * @nocopydata: Flag to copy user data into the allocated buffer
+ *
+ * This function allocates page size buffers and populates an lpfc_dmabufext.
+ * If allowed the user data pointed to with indataptr is copied into the kernel
+ * memory. The chained list of page size buffers is returned.
+ **/
+static struct lpfc_dmabufext *
+diag_cmd_data_alloc(struct lpfc_hba *phba,
+ struct ulp_bde64 *bpl, uint32_t size,
+ int nocopydata)
+{
+ struct lpfc_dmabufext *mlist = NULL;
+ struct lpfc_dmabufext *dmp;
+ int cnt, offset = 0, i = 0;
+ struct pci_dev *pcidev;
+
+ pcidev = phba->pcidev;
+
+ while (size) {
+ /* We get chunks of 4K */
+ if (size > BUF_SZ_4K)
+ cnt = BUF_SZ_4K;
+ else
+ cnt = size;
+
+ /* allocate struct lpfc_dmabufext buffer header */
+ dmp = kmalloc(sizeof(struct lpfc_dmabufext), GFP_KERNEL);
+ if (!dmp)
+ goto out;
+
+ INIT_LIST_HEAD(&dmp->dma.list);
+
+ /* Queue it to a linked list */
+ if (mlist)
+ list_add_tail(&dmp->dma.list, &mlist->dma.list);
+ else
+ mlist = dmp;
+
+ /* allocate buffer */
+ dmp->dma.virt = dma_alloc_coherent(&pcidev->dev,
+ cnt,
+ &(dmp->dma.phys),
+ GFP_KERNEL);
+
+ if (!dmp->dma.virt)
+ goto out;
+
+ dmp->size = cnt;
+
+ if (nocopydata) {
+ bpl->tus.f.bdeFlags = 0;
+ pci_dma_sync_single_for_device(phba->pcidev,
+ dmp->dma.phys, LPFC_BPL_SIZE, PCI_DMA_TODEVICE);
+
+ } else {
+ memset((uint8_t *)dmp->dma.virt, 0, cnt);
+ bpl->tus.f.bdeFlags = BUFF_TYPE_BDE_64I;
+ }
+
+ /* build buffer ptr list for IOCB */
+ bpl->addrLow = le32_to_cpu(putPaddrLow(dmp->dma.phys));
+ bpl->addrHigh = le32_to_cpu(putPaddrHigh(dmp->dma.phys));
+ bpl->tus.f.bdeSize = (ushort) cnt;
+ bpl->tus.w = le32_to_cpu(bpl->tus.w);
+ bpl++;
+
+ i++;
+ offset += cnt;
+ size -= cnt;
+ }
+
+ if (mlist) {
+ mlist->flag = i;
+ return mlist;
+ }
+out:
+ diag_cmd_data_free(phba, mlist);
+ return NULL;
+}
+
+/**
+ * lpfcdiag_loop_post_rxbufs - post the receive buffers for an unsol CT cmd
+ * @phba: Pointer to HBA context object
+ * @rxxri: Receive exchange id
+ * @len: Number of data bytes
+ *
+ * This function allocates and posts a data buffer of sufficient size to receive
+ * an unsolicted CT command.
+ **/
+static int lpfcdiag_loop_post_rxbufs(struct lpfc_hba *phba, uint16_t rxxri,
+ size_t len)
+{
+ struct lpfc_sli *psli = &phba->sli;
+ struct lpfc_sli_ring *pring = &psli->ring[LPFC_ELS_RING];
+ struct lpfc_iocbq *cmdiocbq;
+ IOCB_t *cmd = NULL;
+ struct list_head head, *curr, *next;
+ struct lpfc_dmabuf *rxbmp;
+ struct lpfc_dmabuf *dmp;
+ struct lpfc_dmabuf *mp[2] = {NULL, NULL};
+ struct ulp_bde64 *rxbpl = NULL;
+ uint32_t num_bde;
+ struct lpfc_dmabufext *rxbuffer = NULL;
+ int ret_val = 0;
+ int iocb_stat;
+ int i = 0;
+
+ cmdiocbq = lpfc_sli_get_iocbq(phba);
+ rxbmp = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
+ if (rxbmp != NULL) {
+ rxbmp->virt = lpfc_mbuf_alloc(phba, 0, &rxbmp->phys);
+ if (rxbmp->virt) {
+ INIT_LIST_HEAD(&rxbmp->list);
+ rxbpl = (struct ulp_bde64 *) rxbmp->virt;
+ rxbuffer = diag_cmd_data_alloc(phba, rxbpl, len, 0);
+ }
+ }
+
+ if (!cmdiocbq || !rxbmp || !rxbpl || !rxbuffer) {
+ ret_val = -ENOMEM;
+ goto err_post_rxbufs_exit;
+ }
+
+ /* Queue buffers for the receive exchange */
+ num_bde = (uint32_t)rxbuffer->flag;
+ dmp = &rxbuffer->dma;
+
+ cmd = &cmdiocbq->iocb;
+ i = 0;
+
+ INIT_LIST_HEAD(&head);
+ list_add_tail(&head, &dmp->list);
+ list_for_each_safe(curr, next, &head) {
+ mp[i] = list_entry(curr, struct lpfc_dmabuf, list);
+ list_del(curr);
+
+ if (phba->sli3_options & LPFC_SLI3_HBQ_ENABLED) {
+ mp[i]->buffer_tag = lpfc_sli_get_buffer_tag(phba);
+ cmd->un.quexri64cx.buff.bde.addrHigh =
+ putPaddrHigh(mp[i]->phys);
+ cmd->un.quexri64cx.buff.bde.addrLow =
+ putPaddrLow(mp[i]->phys);
+ cmd->un.quexri64cx.buff.bde.tus.f.bdeSize =
+ ((struct lpfc_dmabufext *)mp[i])->size;
+ cmd->un.quexri64cx.buff.buffer_tag = mp[i]->buffer_tag;
+ cmd->ulpCommand = CMD_QUE_XRI64_CX;
+ cmd->ulpPU = 0;
+ cmd->ulpLe = 1;
+ cmd->ulpBdeCount = 1;
+ cmd->unsli3.que_xri64cx_ext_words.ebde_count = 0;
+
+ } else {
+ cmd->un.cont64[i].addrHigh = putPaddrHigh(mp[i]->phys);
+ cmd->un.cont64[i].addrLow = putPaddrLow(mp[i]->phys);
+ cmd->un.cont64[i].tus.f.bdeSize =
+ ((struct lpfc_dmabufext *)mp[i])->size;
+ cmd->ulpBdeCount = ++i;
+
+ if ((--num_bde > 0) && (i < 2))
+ continue;
+
+ cmd->ulpCommand = CMD_QUE_XRI_BUF64_CX;
+ cmd->ulpLe = 1;
+ }
+
+ cmd->ulpClass = CLASS3;
+ cmd->ulpContext = rxxri;
+
+ iocb_stat = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, cmdiocbq,
+ 0);
+ if (iocb_stat == IOCB_ERROR) {
+ diag_cmd_data_free(phba,
+ (struct lpfc_dmabufext *)mp[0]);
+ if (mp[1])
+ diag_cmd_data_free(phba,
+ (struct lpfc_dmabufext *)mp[1]);
+ dmp = list_entry(next, struct lpfc_dmabuf, list);
+ ret_val = -EIO;
+ goto err_post_rxbufs_exit;
+ }
+
+ lpfc_sli_ringpostbuf_put(phba, pring, mp[0]);
+ if (mp[1]) {
+ lpfc_sli_ringpostbuf_put(phba, pring, mp[1]);
+ mp[1] = NULL;
+ }
+
+ /* The iocb was freed by lpfc_sli_issue_iocb */
+ cmdiocbq = lpfc_sli_get_iocbq(phba);
+ if (!cmdiocbq) {
+ dmp = list_entry(next, struct lpfc_dmabuf, list);
+ ret_val = -EIO;
+ goto err_post_rxbufs_exit;
+ }
+
+ cmd = &cmdiocbq->iocb;
+ i = 0;
+ }
+ list_del(&head);
+
+err_post_rxbufs_exit:
+
+ if (rxbmp) {
+ if (rxbmp->virt)
+ lpfc_mbuf_free(phba, rxbmp->virt, rxbmp->phys);
+ kfree(rxbmp);
+ }
+
+ if (cmdiocbq)
+ lpfc_sli_release_iocbq(phba, cmdiocbq);
+ return ret_val;
+}
+
+/**
+ * lpfc_bsg_diag_loopback_run - run loopback on a port by issue ct cmd to itself
+ * @job: LPFC_BSG_VENDOR_DIAG_TEST fc_bsg_job
+ *
+ * This function receives a user data buffer to be transmitted and received on
+ * the same port, the link must be up and in loopback mode prior
+ * to being called.
+ * 1. A kernel buffer is allocated to copy the user data into.
+ * 2. The port registers with "itself".
+ * 3. The transmit and receive exchange ids are obtained.
+ * 4. The receive exchange id is posted.
+ * 5. A new els loopback event is created.
+ * 6. The command and response iocbs are allocated.
+ * 7. The cmd iocb FsType is set to elx loopback and the CmdRsp to looppback.
+ *
+ * This function is meant to be called n times while the port is in loopback
+ * so it is the apps responsibility to issue a reset to take the port out
+ * of loopback mode.
+ **/
+static int
+lpfc_bsg_diag_loopback_run(struct fc_bsg_job *job)
+{
+ struct lpfc_vport *vport = (struct lpfc_vport *)job->shost->hostdata;
+ struct lpfc_hba *phba = vport->phba;
+ struct diag_mode_test *diag_mode;
+ struct lpfc_bsg_event *evt;
+ struct event_data *evdat;
+ struct lpfc_sli *psli = &phba->sli;
+ uint32_t size;
+ uint32_t full_size;
+ size_t segment_len = 0, segment_offset = 0, current_offset = 0;
+ uint16_t rpi = 0;
+ struct lpfc_iocbq *cmdiocbq, *rspiocbq = NULL;
+ IOCB_t *cmd, *rsp = NULL;
+ struct lpfc_sli_ct_request *ctreq;
+ struct lpfc_dmabuf *txbmp;
+ struct ulp_bde64 *txbpl = NULL;
+ struct lpfc_dmabufext *txbuffer = NULL;
+ struct list_head head;
+ struct lpfc_dmabuf *curr;
+ uint16_t txxri = 0, rxxri;
+ uint32_t num_bde;
+ uint8_t *ptr = NULL, *rx_databuf = NULL;
+ int rc = 0;
+ int time_left;
+ int iocb_stat = IOCB_SUCCESS;
+ unsigned long flags;
+ void *dataout = NULL;
+ uint32_t total_mem;
+
+ /* in case no data is returned return just the return code */
+ job->reply->reply_payload_rcv_len = 0;
+
+ if (job->request_len <
+ sizeof(struct fc_bsg_request) + sizeof(struct diag_mode_test)) {
+ lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
+ "2739 Received DIAG TEST request below minimum "
+ "size\n");
+ rc = -EINVAL;
+ goto loopback_test_exit;
+ }
+
+ if (job->request_payload.payload_len !=
+ job->reply_payload.payload_len) {
+ rc = -EINVAL;
+ goto loopback_test_exit;
+ }
+ diag_mode = (struct diag_mode_test *)
+ job->request->rqst_data.h_vendor.vendor_cmd;
+
+ if ((phba->link_state == LPFC_HBA_ERROR) ||
+ (psli->sli_flag & LPFC_BLOCK_MGMT_IO) ||
+ (!(psli->sli_flag & LPFC_SLI_ACTIVE))) {
+ rc = -EACCES;
+ goto loopback_test_exit;
+ }
+
+ if (!lpfc_is_link_up(phba) || !(phba->link_flag & LS_LOOPBACK_MODE)) {
+ rc = -EACCES;
+ goto loopback_test_exit;
+ }
+
+ size = job->request_payload.payload_len;
+ full_size = size + ELX_LOOPBACK_HEADER_SZ; /* plus the header */
+
+ if ((size == 0) || (size > 80 * BUF_SZ_4K)) {
+ rc = -ERANGE;
+ goto loopback_test_exit;
+ }
+
+ if (full_size >= BUF_SZ_4K) {
+ /*
+ * Allocate memory for ioctl data. If buffer is bigger than 64k,
+ * then we allocate 64k and re-use that buffer over and over to
+ * xfer the whole block. This is because Linux kernel has a
+ * problem allocating more than 120k of kernel space memory. Saw
+ * problem with GET_FCPTARGETMAPPING...
+ */
+ if (size <= (64 * 1024))
+ total_mem = full_size;
+ else
+ total_mem = 64 * 1024;
+ } else
+ /* Allocate memory for ioctl data */
+ total_mem = BUF_SZ_4K;
+
+ dataout = kmalloc(total_mem, GFP_KERNEL);
+ if (dataout == NULL) {
+ rc = -ENOMEM;
+ goto loopback_test_exit;
+ }
+
+ ptr = dataout;
+ ptr += ELX_LOOPBACK_HEADER_SZ;
+ sg_copy_to_buffer(job->request_payload.sg_list,
+ job->request_payload.sg_cnt,
+ ptr, size);
+ rc = lpfcdiag_loop_self_reg(phba, &rpi);
+ if (rc)
+ goto loopback_test_exit;
+
+ if (phba->sli_rev < LPFC_SLI_REV4) {
+ rc = lpfcdiag_loop_get_xri(phba, rpi, &txxri, &rxxri);
+ if (rc) {
+ lpfcdiag_loop_self_unreg(phba, rpi);
+ goto loopback_test_exit;
+ }
+
+ rc = lpfcdiag_loop_post_rxbufs(phba, rxxri, full_size);
+ if (rc) {
+ lpfcdiag_loop_self_unreg(phba, rpi);
+ goto loopback_test_exit;
+ }
+ }
+ evt = lpfc_bsg_event_new(FC_REG_CT_EVENT, current->pid,
+ SLI_CT_ELX_LOOPBACK);
+ if (!evt) {
+ lpfcdiag_loop_self_unreg(phba, rpi);
+ rc = -ENOMEM;
+ goto loopback_test_exit;
+ }
+
+ spin_lock_irqsave(&phba->ct_ev_lock, flags);
+ list_add(&evt->node, &phba->ct_ev_waiters);
+ lpfc_bsg_event_ref(evt);
+ spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
+
+ cmdiocbq = lpfc_sli_get_iocbq(phba);
+ if (phba->sli_rev < LPFC_SLI_REV4)
+ rspiocbq = lpfc_sli_get_iocbq(phba);
+ txbmp = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
+
+ if (txbmp) {
+ txbmp->virt = lpfc_mbuf_alloc(phba, 0, &txbmp->phys);
+ if (txbmp->virt) {
+ INIT_LIST_HEAD(&txbmp->list);
+ txbpl = (struct ulp_bde64 *) txbmp->virt;
+ txbuffer = diag_cmd_data_alloc(phba,
+ txbpl, full_size, 0);
+ }
+ }
+
+ if (!cmdiocbq || !txbmp || !txbpl || !txbuffer || !txbmp->virt) {
+ rc = -ENOMEM;
+ goto err_loopback_test_exit;
+ }
+ if ((phba->sli_rev < LPFC_SLI_REV4) && !rspiocbq) {
+ rc = -ENOMEM;
+ goto err_loopback_test_exit;
+ }
+
+ cmd = &cmdiocbq->iocb;
+ if (phba->sli_rev < LPFC_SLI_REV4)
+ rsp = &rspiocbq->iocb;
+
+ INIT_LIST_HEAD(&head);
+ list_add_tail(&head, &txbuffer->dma.list);
+ list_for_each_entry(curr, &head, list) {
+ segment_len = ((struct lpfc_dmabufext *)curr)->size;
+ if (current_offset == 0) {
+ ctreq = curr->virt;
+ memset(ctreq, 0, ELX_LOOPBACK_HEADER_SZ);
+ ctreq->RevisionId.bits.Revision = SLI_CT_REVISION;
+ ctreq->RevisionId.bits.InId = 0;
+ ctreq->FsType = SLI_CT_ELX_LOOPBACK;
+ ctreq->FsSubType = 0;
+ ctreq->CommandResponse.bits.CmdRsp = ELX_LOOPBACK_DATA;
+ ctreq->CommandResponse.bits.Size = size;
+ segment_offset = ELX_LOOPBACK_HEADER_SZ;
+ } else
+ segment_offset = 0;
+
+ BUG_ON(segment_offset >= segment_len);
+ memcpy(curr->virt + segment_offset,
+ ptr + current_offset,
+ segment_len - segment_offset);
+
+ current_offset += segment_len - segment_offset;
+ BUG_ON(current_offset > size);
+ }
+ list_del(&head);
+
+ /* Build the XMIT_SEQUENCE iocb */
+ num_bde = (uint32_t)txbuffer->flag;
+
+ cmd->un.xseq64.bdl.addrHigh = putPaddrHigh(txbmp->phys);
+ cmd->un.xseq64.bdl.addrLow = putPaddrLow(txbmp->phys);
+ cmd->un.xseq64.bdl.bdeFlags = BUFF_TYPE_BLP_64;
+ cmd->un.xseq64.bdl.bdeSize = (num_bde * sizeof(struct ulp_bde64));
+
+ cmd->un.xseq64.w5.hcsw.Fctl = (LS | LA);
+ cmd->un.xseq64.w5.hcsw.Dfctl = 0;
+ cmd->un.xseq64.w5.hcsw.Rctl = FC_RCTL_DD_UNSOL_CTL;
+ cmd->un.xseq64.w5.hcsw.Type = FC_TYPE_CT;
+
+ cmd->ulpCommand = CMD_XMIT_SEQUENCE64_CX;
+ cmd->ulpBdeCount = 1;
+ cmd->ulpLe = 1;
+ cmd->ulpClass = CLASS3;
+
+ if (phba->sli_rev < LPFC_SLI_REV4) {
+ cmd->ulpContext = txxri;
+ } else {
+ cmd->un.xseq64.bdl.ulpIoTag32 = 0;
+ cmd->un.ulpWord[3] = phba->sli4_hba.rpi_ids[rpi];
+ cmdiocbq->context3 = txbmp;
+ cmdiocbq->sli4_xritag = NO_XRI;
+ cmd->unsli3.rcvsli3.ox_id = 0xffff;
+ }
+ cmdiocbq->iocb_flag |= LPFC_IO_LIBDFC;
+ cmdiocbq->iocb_flag |= LPFC_IO_LOOPBACK;
+ cmdiocbq->vport = phba->pport;
+ cmdiocbq->iocb_cmpl = NULL;
+ iocb_stat = lpfc_sli_issue_iocb_wait(phba, LPFC_ELS_RING, cmdiocbq,
+ rspiocbq, (phba->fc_ratov * 2) +
+ LPFC_DRVR_TIMEOUT);
+
+ if ((iocb_stat != IOCB_SUCCESS) ||
+ ((phba->sli_rev < LPFC_SLI_REV4) &&
+ (rsp->ulpStatus != IOSTAT_SUCCESS))) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_LIBDFC,
+ "3126 Failed loopback test issue iocb: "
+ "iocb_stat:x%x\n", iocb_stat);
+ rc = -EIO;
+ goto err_loopback_test_exit;
+ }
+
+ evt->waiting = 1;
+ time_left = wait_event_interruptible_timeout(
+ evt->wq, !list_empty(&evt->events_to_see),
+ msecs_to_jiffies(1000 *
+ ((phba->fc_ratov * 2) + LPFC_DRVR_TIMEOUT)));
+ evt->waiting = 0;
+ if (list_empty(&evt->events_to_see)) {
+ rc = (time_left) ? -EINTR : -ETIMEDOUT;
+ lpfc_printf_log(phba, KERN_ERR, LOG_LIBDFC,
+ "3125 Not receiving unsolicited event, "
+ "rc:x%x\n", rc);
+ } else {
+ spin_lock_irqsave(&phba->ct_ev_lock, flags);
+ list_move(evt->events_to_see.prev, &evt->events_to_get);
+ evdat = list_entry(evt->events_to_get.prev,
+ typeof(*evdat), node);
+ spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
+ rx_databuf = evdat->data;
+ if (evdat->len != full_size) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_LIBDFC,
+ "1603 Loopback test did not receive expected "
+ "data length. actual length 0x%x expected "
+ "length 0x%x\n",
+ evdat->len, full_size);
+ rc = -EIO;
+ } else if (rx_databuf == NULL)
+ rc = -EIO;
+ else {
+ rc = IOCB_SUCCESS;
+ /* skip over elx loopback header */
+ rx_databuf += ELX_LOOPBACK_HEADER_SZ;
+ job->reply->reply_payload_rcv_len =
+ sg_copy_from_buffer(job->reply_payload.sg_list,
+ job->reply_payload.sg_cnt,
+ rx_databuf, size);
+ job->reply->reply_payload_rcv_len = size;
+ }
+ }
+
+err_loopback_test_exit:
+ lpfcdiag_loop_self_unreg(phba, rpi);
+
+ spin_lock_irqsave(&phba->ct_ev_lock, flags);
+ lpfc_bsg_event_unref(evt); /* release ref */
+ lpfc_bsg_event_unref(evt); /* delete */
+ spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
+
+ if ((cmdiocbq != NULL) && (iocb_stat != IOCB_TIMEDOUT))
+ lpfc_sli_release_iocbq(phba, cmdiocbq);
+
+ if (rspiocbq != NULL)
+ lpfc_sli_release_iocbq(phba, rspiocbq);
+
+ if (txbmp != NULL) {
+ if (txbpl != NULL) {
+ if (txbuffer != NULL)
+ diag_cmd_data_free(phba, txbuffer);
+ lpfc_mbuf_free(phba, txbmp->virt, txbmp->phys);
+ }
+ kfree(txbmp);
+ }
+
+loopback_test_exit:
+ kfree(dataout);
+ /* make error code available to userspace */
+ job->reply->result = rc;
+ job->dd_data = NULL;
+ /* complete the job back to userspace if no error */
+ if (rc == IOCB_SUCCESS)
+ job->job_done(job);
+ return rc;
+}
+
+/**
+ * lpfc_bsg_get_dfc_rev - process a GET_DFC_REV bsg vendor command
+ * @job: GET_DFC_REV fc_bsg_job
+ **/
+static int
+lpfc_bsg_get_dfc_rev(struct fc_bsg_job *job)
+{
+ struct lpfc_vport *vport = (struct lpfc_vport *)job->shost->hostdata;
+ struct lpfc_hba *phba = vport->phba;
+ struct get_mgmt_rev *event_req;
+ struct get_mgmt_rev_reply *event_reply;
+ int rc = 0;
+
+ if (job->request_len <
+ sizeof(struct fc_bsg_request) + sizeof(struct get_mgmt_rev)) {
+ lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
+ "2740 Received GET_DFC_REV request below "
+ "minimum size\n");
+ rc = -EINVAL;
+ goto job_error;
+ }
+
+ event_req = (struct get_mgmt_rev *)
+ job->request->rqst_data.h_vendor.vendor_cmd;
+
+ event_reply = (struct get_mgmt_rev_reply *)
+ job->reply->reply_data.vendor_reply.vendor_rsp;
+
+ if (job->reply_len <
+ sizeof(struct fc_bsg_request) + sizeof(struct get_mgmt_rev_reply)) {
+ lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
+ "2741 Received GET_DFC_REV reply below "
+ "minimum size\n");
+ rc = -EINVAL;
+ goto job_error;
+ }
+
+ event_reply->info.a_Major = MANAGEMENT_MAJOR_REV;
+ event_reply->info.a_Minor = MANAGEMENT_MINOR_REV;
+job_error:
+ job->reply->result = rc;
+ if (rc == 0)
+ job->job_done(job);
+ return rc;
+}
+
+/**
+ * lpfc_bsg_issue_mbox_cmpl - lpfc_bsg_issue_mbox mbox completion handler
+ * @phba: Pointer to HBA context object.
+ * @pmboxq: Pointer to mailbox command.
+ *
+ * This is completion handler function for mailbox commands issued from
+ * lpfc_bsg_issue_mbox function. This function is called by the
+ * mailbox event handler function with no lock held. This function
+ * will wake up thread waiting on the wait queue pointed by context1
+ * of the mailbox.
+ **/
+static void
+lpfc_bsg_issue_mbox_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmboxq)
+{
+ struct bsg_job_data *dd_data;
+ struct fc_bsg_job *job;
+ uint32_t size;
+ unsigned long flags;
+ uint8_t *pmb, *pmb_buf;
+
+ dd_data = pmboxq->context1;
+
+ /*
+ * The outgoing buffer is readily referred from the dma buffer,
+ * just need to get header part from mailboxq structure.
+ */
+ pmb = (uint8_t *)&pmboxq->u.mb;
+ pmb_buf = (uint8_t *)dd_data->context_un.mbox.mb;
+ memcpy(pmb_buf, pmb, sizeof(MAILBOX_t));
+
+ /* Determine if job has been aborted */
+
+ spin_lock_irqsave(&phba->ct_ev_lock, flags);
+ job = dd_data->set_job;
+ if (job) {
+ /* Prevent timeout handling from trying to abort job */
+ job->dd_data = NULL;
+ }
+ spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
+
+ /* Copy the mailbox data to the job if it is still active */
+
+ if (job) {
+ size = job->reply_payload.payload_len;
+ job->reply->reply_payload_rcv_len =
+ sg_copy_from_buffer(job->reply_payload.sg_list,
+ job->reply_payload.sg_cnt,
+ pmb_buf, size);
+ }
+
+ dd_data->set_job = NULL;
+ mempool_free(dd_data->context_un.mbox.pmboxq, phba->mbox_mem_pool);
+ lpfc_bsg_dma_page_free(phba, dd_data->context_un.mbox.dmabuffers);
+ kfree(dd_data);
+
+ /* Complete the job if the job is still active */
+
+ if (job) {
+ job->reply->result = 0;
+ job->job_done(job);
+ }
+ return;
+}
+
+/**
+ * lpfc_bsg_check_cmd_access - test for a supported mailbox command
+ * @phba: Pointer to HBA context object.
+ * @mb: Pointer to a mailbox object.
+ * @vport: Pointer to a vport object.
+ *
+ * Some commands require the port to be offline, some may not be called from
+ * the application.
+ **/
+static int lpfc_bsg_check_cmd_access(struct lpfc_hba *phba,
+ MAILBOX_t *mb, struct lpfc_vport *vport)
+{
+ /* return negative error values for bsg job */
+ switch (mb->mbxCommand) {
+ /* Offline only */
+ case MBX_INIT_LINK:
+ case MBX_DOWN_LINK:
+ case MBX_CONFIG_LINK:
+ case MBX_CONFIG_RING:
+ case MBX_RESET_RING:
+ case MBX_UNREG_LOGIN:
+ case MBX_CLEAR_LA:
+ case MBX_DUMP_CONTEXT:
+ case MBX_RUN_DIAGS:
+ case MBX_RESTART:
+ case MBX_SET_MASK:
+ if (!(vport->fc_flag & FC_OFFLINE_MODE)) {
+ lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
+ "2743 Command 0x%x is illegal in on-line "
+ "state\n",
+ mb->mbxCommand);
+ return -EPERM;
+ }
+ case MBX_WRITE_NV:
+ case MBX_WRITE_VPARMS:
+ case MBX_LOAD_SM:
+ case MBX_READ_NV:
+ case MBX_READ_CONFIG:
+ case MBX_READ_RCONFIG:
+ case MBX_READ_STATUS:
+ case MBX_READ_XRI:
+ case MBX_READ_REV:
+ case MBX_READ_LNK_STAT:
+ case MBX_DUMP_MEMORY:
+ case MBX_DOWN_LOAD:
+ case MBX_UPDATE_CFG:
+ case MBX_KILL_BOARD:
+ case MBX_READ_TOPOLOGY:
+ case MBX_LOAD_AREA:
+ case MBX_LOAD_EXP_ROM:
+ case MBX_BEACON:
+ case MBX_DEL_LD_ENTRY:
+ case MBX_SET_DEBUG:
+ case MBX_WRITE_WWN:
+ case MBX_SLI4_CONFIG:
+ case MBX_READ_EVENT_LOG:
+ case MBX_READ_EVENT_LOG_STATUS:
+ case MBX_WRITE_EVENT_LOG:
+ case MBX_PORT_CAPABILITIES:
+ case MBX_PORT_IOV_CONTROL:
+ case MBX_RUN_BIU_DIAG64:
+ break;
+ case MBX_SET_VARIABLE:
+ lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
+ "1226 mbox: set_variable 0x%x, 0x%x\n",
+ mb->un.varWords[0],
+ mb->un.varWords[1]);
+ if ((mb->un.varWords[0] == SETVAR_MLOMNT)
+ && (mb->un.varWords[1] == 1)) {
+ phba->wait_4_mlo_maint_flg = 1;
+ } else if (mb->un.varWords[0] == SETVAR_MLORST) {
+ spin_lock_irq(&phba->hbalock);
+ phba->link_flag &= ~LS_LOOPBACK_MODE;
+ spin_unlock_irq(&phba->hbalock);
+ phba->fc_topology = LPFC_TOPOLOGY_PT_PT;
+ }
+ break;
+ case MBX_READ_SPARM64:
+ case MBX_REG_LOGIN:
+ case MBX_REG_LOGIN64:
+ case MBX_CONFIG_PORT:
+ case MBX_RUN_BIU_DIAG:
+ default:
+ lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
+ "2742 Unknown Command 0x%x\n",
+ mb->mbxCommand);
+ return -EPERM;
+ }
+
+ return 0; /* ok */
+}
+
+/**
+ * lpfc_bsg_mbox_ext_cleanup - clean up context of multi-buffer mbox session
+ * @phba: Pointer to HBA context object.
+ *
+ * This is routine clean up and reset BSG handling of multi-buffer mbox
+ * command session.
+ **/
+static void
+lpfc_bsg_mbox_ext_session_reset(struct lpfc_hba *phba)
+{
+ if (phba->mbox_ext_buf_ctx.state == LPFC_BSG_MBOX_IDLE)
+ return;
+
+ /* free all memory, including dma buffers */
+ lpfc_bsg_dma_page_list_free(phba,
+ &phba->mbox_ext_buf_ctx.ext_dmabuf_list);
+ lpfc_bsg_dma_page_free(phba, phba->mbox_ext_buf_ctx.mbx_dmabuf);
+ /* multi-buffer write mailbox command pass-through complete */
+ memset((char *)&phba->mbox_ext_buf_ctx, 0,
+ sizeof(struct lpfc_mbox_ext_buf_ctx));
+ INIT_LIST_HEAD(&phba->mbox_ext_buf_ctx.ext_dmabuf_list);
+
+ return;
+}
+
+/**
+ * lpfc_bsg_issue_mbox_ext_handle_job - job handler for multi-buffer mbox cmpl
+ * @phba: Pointer to HBA context object.
+ * @pmboxq: Pointer to mailbox command.
+ *
+ * This is routine handles BSG job for mailbox commands completions with
+ * multiple external buffers.
+ **/
+static struct fc_bsg_job *
+lpfc_bsg_issue_mbox_ext_handle_job(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmboxq)
+{
+ struct bsg_job_data *dd_data;
+ struct fc_bsg_job *job;
+ uint8_t *pmb, *pmb_buf;
+ unsigned long flags;
+ uint32_t size;
+ int rc = 0;
+ struct lpfc_dmabuf *dmabuf;
+ struct lpfc_sli_config_mbox *sli_cfg_mbx;
+ uint8_t *pmbx;
+
+ dd_data = pmboxq->context1;
+
+ /* Determine if job has been aborted */
+ spin_lock_irqsave(&phba->ct_ev_lock, flags);
+ job = dd_data->set_job;
+ if (job) {
+ /* Prevent timeout handling from trying to abort job */
+ job->dd_data = NULL;
+ }
+ spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
+
+ /*
+ * The outgoing buffer is readily referred from the dma buffer,
+ * just need to get header part from mailboxq structure.
+ */
+
+ pmb = (uint8_t *)&pmboxq->u.mb;
+ pmb_buf = (uint8_t *)dd_data->context_un.mbox.mb;
+ /* Copy the byte swapped response mailbox back to the user */
+ memcpy(pmb_buf, pmb, sizeof(MAILBOX_t));
+ /* if there is any non-embedded extended data copy that too */
+ dmabuf = phba->mbox_ext_buf_ctx.mbx_dmabuf;
+ sli_cfg_mbx = (struct lpfc_sli_config_mbox *)dmabuf->virt;
+ if (!bsg_bf_get(lpfc_mbox_hdr_emb,
+ &sli_cfg_mbx->un.sli_config_emb0_subsys.sli_config_hdr)) {
+ pmbx = (uint8_t *)dmabuf->virt;
+ /* byte swap the extended data following the mailbox command */
+ lpfc_sli_pcimem_bcopy(&pmbx[sizeof(MAILBOX_t)],
+ &pmbx[sizeof(MAILBOX_t)],
+ sli_cfg_mbx->un.sli_config_emb0_subsys.mse[0].buf_len);
+ }
+
+ /* Complete the job if the job is still active */
+
+ if (job) {
+ size = job->reply_payload.payload_len;
+ job->reply->reply_payload_rcv_len =
+ sg_copy_from_buffer(job->reply_payload.sg_list,
+ job->reply_payload.sg_cnt,
+ pmb_buf, size);
+
+ /* result for successful */
+ job->reply->result = 0;
+
+ lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
+ "2937 SLI_CONFIG ext-buffer maibox command "
+ "(x%x/x%x) complete bsg job done, bsize:%d\n",
+ phba->mbox_ext_buf_ctx.nembType,
+ phba->mbox_ext_buf_ctx.mboxType, size);
+ lpfc_idiag_mbxacc_dump_bsg_mbox(phba,
+ phba->mbox_ext_buf_ctx.nembType,
+ phba->mbox_ext_buf_ctx.mboxType,
+ dma_ebuf, sta_pos_addr,
+ phba->mbox_ext_buf_ctx.mbx_dmabuf, 0);
+ } else {
+ lpfc_printf_log(phba, KERN_ERR, LOG_LIBDFC,
+ "2938 SLI_CONFIG ext-buffer maibox "
+ "command (x%x/x%x) failure, rc:x%x\n",
+ phba->mbox_ext_buf_ctx.nembType,
+ phba->mbox_ext_buf_ctx.mboxType, rc);
+ }
+
+
+ /* state change */
+ phba->mbox_ext_buf_ctx.state = LPFC_BSG_MBOX_DONE;
+ kfree(dd_data);
+ return job;
+}
+
+/**
+ * lpfc_bsg_issue_read_mbox_ext_cmpl - compl handler for multi-buffer read mbox
+ * @phba: Pointer to HBA context object.
+ * @pmboxq: Pointer to mailbox command.
+ *
+ * This is completion handler function for mailbox read commands with multiple
+ * external buffers.
+ **/
+static void
+lpfc_bsg_issue_read_mbox_ext_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmboxq)
+{
+ struct fc_bsg_job *job;
+
+ job = lpfc_bsg_issue_mbox_ext_handle_job(phba, pmboxq);
+
+ /* handle the BSG job with mailbox command */
+ if (!job)
+ pmboxq->u.mb.mbxStatus = MBXERR_ERROR;
+
+ lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
+ "2939 SLI_CONFIG ext-buffer rd maibox command "
+ "complete, ctxState:x%x, mbxStatus:x%x\n",
+ phba->mbox_ext_buf_ctx.state, pmboxq->u.mb.mbxStatus);
+
+ if (pmboxq->u.mb.mbxStatus || phba->mbox_ext_buf_ctx.numBuf == 1)
+ lpfc_bsg_mbox_ext_session_reset(phba);
+
+ /* free base driver mailbox structure memory */
+ mempool_free(pmboxq, phba->mbox_mem_pool);
+
+ /* if the job is still active, call job done */
+ if (job)
+ job->job_done(job);
+
+ return;
+}
+
+/**
+ * lpfc_bsg_issue_write_mbox_ext_cmpl - cmpl handler for multi-buffer write mbox
+ * @phba: Pointer to HBA context object.
+ * @pmboxq: Pointer to mailbox command.
+ *
+ * This is completion handler function for mailbox write commands with multiple
+ * external buffers.
+ **/
+static void
+lpfc_bsg_issue_write_mbox_ext_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmboxq)
+{
+ struct fc_bsg_job *job;
+
+ job = lpfc_bsg_issue_mbox_ext_handle_job(phba, pmboxq);
+
+ /* handle the BSG job with the mailbox command */
+ if (!job)
+ pmboxq->u.mb.mbxStatus = MBXERR_ERROR;
+
+ lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
+ "2940 SLI_CONFIG ext-buffer wr maibox command "
+ "complete, ctxState:x%x, mbxStatus:x%x\n",
+ phba->mbox_ext_buf_ctx.state, pmboxq->u.mb.mbxStatus);
+
+ /* free all memory, including dma buffers */
+ mempool_free(pmboxq, phba->mbox_mem_pool);
+ lpfc_bsg_mbox_ext_session_reset(phba);
+
+ /* if the job is still active, call job done */
+ if (job)
+ job->job_done(job);
+
+ return;
+}
+
+static void
+lpfc_bsg_sli_cfg_dma_desc_setup(struct lpfc_hba *phba, enum nemb_type nemb_tp,
+ uint32_t index, struct lpfc_dmabuf *mbx_dmabuf,
+ struct lpfc_dmabuf *ext_dmabuf)
+{
+ struct lpfc_sli_config_mbox *sli_cfg_mbx;
+
+ /* pointer to the start of mailbox command */
+ sli_cfg_mbx = (struct lpfc_sli_config_mbox *)mbx_dmabuf->virt;
+
+ if (nemb_tp == nemb_mse) {
+ if (index == 0) {
+ sli_cfg_mbx->un.sli_config_emb0_subsys.
+ mse[index].pa_hi =
+ putPaddrHigh(mbx_dmabuf->phys +
+ sizeof(MAILBOX_t));
+ sli_cfg_mbx->un.sli_config_emb0_subsys.
+ mse[index].pa_lo =
+ putPaddrLow(mbx_dmabuf->phys +
+ sizeof(MAILBOX_t));
+ lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
+ "2943 SLI_CONFIG(mse)[%d], "
+ "bufLen:%d, addrHi:x%x, addrLo:x%x\n",
+ index,
+ sli_cfg_mbx->un.sli_config_emb0_subsys.
+ mse[index].buf_len,
+ sli_cfg_mbx->un.sli_config_emb0_subsys.
+ mse[index].pa_hi,
+ sli_cfg_mbx->un.sli_config_emb0_subsys.
+ mse[index].pa_lo);
+ } else {
+ sli_cfg_mbx->un.sli_config_emb0_subsys.
+ mse[index].pa_hi =
+ putPaddrHigh(ext_dmabuf->phys);
+ sli_cfg_mbx->un.sli_config_emb0_subsys.
+ mse[index].pa_lo =
+ putPaddrLow(ext_dmabuf->phys);
+ lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
+ "2944 SLI_CONFIG(mse)[%d], "
+ "bufLen:%d, addrHi:x%x, addrLo:x%x\n",
+ index,
+ sli_cfg_mbx->un.sli_config_emb0_subsys.
+ mse[index].buf_len,
+ sli_cfg_mbx->un.sli_config_emb0_subsys.
+ mse[index].pa_hi,
+ sli_cfg_mbx->un.sli_config_emb0_subsys.
+ mse[index].pa_lo);
+ }
+ } else {
+ if (index == 0) {
+ sli_cfg_mbx->un.sli_config_emb1_subsys.
+ hbd[index].pa_hi =
+ putPaddrHigh(mbx_dmabuf->phys +
+ sizeof(MAILBOX_t));
+ sli_cfg_mbx->un.sli_config_emb1_subsys.
+ hbd[index].pa_lo =
+ putPaddrLow(mbx_dmabuf->phys +
+ sizeof(MAILBOX_t));
+ lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
+ "3007 SLI_CONFIG(hbd)[%d], "
+ "bufLen:%d, addrHi:x%x, addrLo:x%x\n",
+ index,
+ bsg_bf_get(lpfc_mbox_sli_config_ecmn_hbd_len,
+ &sli_cfg_mbx->un.
+ sli_config_emb1_subsys.hbd[index]),
+ sli_cfg_mbx->un.sli_config_emb1_subsys.
+ hbd[index].pa_hi,
+ sli_cfg_mbx->un.sli_config_emb1_subsys.
+ hbd[index].pa_lo);
+
+ } else {
+ sli_cfg_mbx->un.sli_config_emb1_subsys.
+ hbd[index].pa_hi =
+ putPaddrHigh(ext_dmabuf->phys);
+ sli_cfg_mbx->un.sli_config_emb1_subsys.
+ hbd[index].pa_lo =
+ putPaddrLow(ext_dmabuf->phys);
+ lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
+ "3008 SLI_CONFIG(hbd)[%d], "
+ "bufLen:%d, addrHi:x%x, addrLo:x%x\n",
+ index,
+ bsg_bf_get(lpfc_mbox_sli_config_ecmn_hbd_len,
+ &sli_cfg_mbx->un.
+ sli_config_emb1_subsys.hbd[index]),
+ sli_cfg_mbx->un.sli_config_emb1_subsys.
+ hbd[index].pa_hi,
+ sli_cfg_mbx->un.sli_config_emb1_subsys.
+ hbd[index].pa_lo);
+ }
+ }
+ return;
+}
+
+/**
+ * lpfc_bsg_sli_cfg_mse_read_cmd_ext - sli_config non-embedded mailbox cmd read
+ * @phba: Pointer to HBA context object.
+ * @mb: Pointer to a BSG mailbox object.
+ * @nemb_tp: Enumerate of non-embedded mailbox command type.
+ * @dmabuff: Pointer to a DMA buffer descriptor.
+ *
+ * This routine performs SLI_CONFIG (0x9B) read mailbox command operation with
+ * non-embedded external bufffers.
+ **/
+static int
+lpfc_bsg_sli_cfg_read_cmd_ext(struct lpfc_hba *phba, struct fc_bsg_job *job,
+ enum nemb_type nemb_tp,
+ struct lpfc_dmabuf *dmabuf)
+{
+ struct lpfc_sli_config_mbox *sli_cfg_mbx;
+ struct dfc_mbox_req *mbox_req;
+ struct lpfc_dmabuf *curr_dmabuf, *next_dmabuf;
+ uint32_t ext_buf_cnt, ext_buf_index;
+ struct lpfc_dmabuf *ext_dmabuf = NULL;
+ struct bsg_job_data *dd_data = NULL;
+ LPFC_MBOXQ_t *pmboxq = NULL;
+ MAILBOX_t *pmb;
+ uint8_t *pmbx;
+ int rc, i;
+
+ mbox_req =
+ (struct dfc_mbox_req *)job->request->rqst_data.h_vendor.vendor_cmd;
+
+ /* pointer to the start of mailbox command */
+ sli_cfg_mbx = (struct lpfc_sli_config_mbox *)dmabuf->virt;
+
+ if (nemb_tp == nemb_mse) {
+ ext_buf_cnt = bsg_bf_get(lpfc_mbox_hdr_mse_cnt,
+ &sli_cfg_mbx->un.sli_config_emb0_subsys.sli_config_hdr);
+ if (ext_buf_cnt > LPFC_MBX_SLI_CONFIG_MAX_MSE) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_LIBDFC,
+ "2945 Handled SLI_CONFIG(mse) rd, "
+ "ext_buf_cnt(%d) out of range(%d)\n",
+ ext_buf_cnt,
+ LPFC_MBX_SLI_CONFIG_MAX_MSE);
+ rc = -ERANGE;
+ goto job_error;
+ }
+ lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
+ "2941 Handled SLI_CONFIG(mse) rd, "
+ "ext_buf_cnt:%d\n", ext_buf_cnt);
+ } else {
+ /* sanity check on interface type for support */
+ if (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) !=
+ LPFC_SLI_INTF_IF_TYPE_2) {
+ rc = -ENODEV;
+ goto job_error;
+ }
+ /* nemb_tp == nemb_hbd */
+ ext_buf_cnt = sli_cfg_mbx->un.sli_config_emb1_subsys.hbd_count;
+ if (ext_buf_cnt > LPFC_MBX_SLI_CONFIG_MAX_HBD) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_LIBDFC,
+ "2946 Handled SLI_CONFIG(hbd) rd, "
+ "ext_buf_cnt(%d) out of range(%d)\n",
+ ext_buf_cnt,
+ LPFC_MBX_SLI_CONFIG_MAX_HBD);
+ rc = -ERANGE;
+ goto job_error;
+ }
+ lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
+ "2942 Handled SLI_CONFIG(hbd) rd, "
+ "ext_buf_cnt:%d\n", ext_buf_cnt);
+ }
+
+ /* before dma descriptor setup */
+ lpfc_idiag_mbxacc_dump_bsg_mbox(phba, nemb_tp, mbox_rd, dma_mbox,
+ sta_pre_addr, dmabuf, ext_buf_cnt);
+
+ /* reject non-embedded mailbox command with none external buffer */
+ if (ext_buf_cnt == 0) {
+ rc = -EPERM;
+ goto job_error;
+ } else if (ext_buf_cnt > 1) {
+ /* additional external read buffers */
+ for (i = 1; i < ext_buf_cnt; i++) {
+ ext_dmabuf = lpfc_bsg_dma_page_alloc(phba);
+ if (!ext_dmabuf) {
+ rc = -ENOMEM;
+ goto job_error;
+ }
+ list_add_tail(&ext_dmabuf->list,
+ &phba->mbox_ext_buf_ctx.ext_dmabuf_list);
+ }
+ }
+
+ /* bsg tracking structure */
+ dd_data = kmalloc(sizeof(struct bsg_job_data), GFP_KERNEL);
+ if (!dd_data) {
+ rc = -ENOMEM;
+ goto job_error;
+ }
+
+ /* mailbox command structure for base driver */
+ pmboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
+ if (!pmboxq) {
+ rc = -ENOMEM;
+ goto job_error;
+ }
+ memset(pmboxq, 0, sizeof(LPFC_MBOXQ_t));
+
+ /* for the first external buffer */
+ lpfc_bsg_sli_cfg_dma_desc_setup(phba, nemb_tp, 0, dmabuf, dmabuf);
+
+ /* for the rest of external buffer descriptors if any */
+ if (ext_buf_cnt > 1) {
+ ext_buf_index = 1;
+ list_for_each_entry_safe(curr_dmabuf, next_dmabuf,
+ &phba->mbox_ext_buf_ctx.ext_dmabuf_list, list) {
+ lpfc_bsg_sli_cfg_dma_desc_setup(phba, nemb_tp,
+ ext_buf_index, dmabuf,
+ curr_dmabuf);
+ ext_buf_index++;
+ }
+ }
+
+ /* after dma descriptor setup */
+ lpfc_idiag_mbxacc_dump_bsg_mbox(phba, nemb_tp, mbox_rd, dma_mbox,
+ sta_pos_addr, dmabuf, ext_buf_cnt);
+
+ /* construct base driver mbox command */
+ pmb = &pmboxq->u.mb;
+ pmbx = (uint8_t *)dmabuf->virt;
+ memcpy(pmb, pmbx, sizeof(*pmb));
+ pmb->mbxOwner = OWN_HOST;
+ pmboxq->vport = phba->pport;
+
+ /* multi-buffer handling context */
+ phba->mbox_ext_buf_ctx.nembType = nemb_tp;
+ phba->mbox_ext_buf_ctx.mboxType = mbox_rd;
+ phba->mbox_ext_buf_ctx.numBuf = ext_buf_cnt;
+ phba->mbox_ext_buf_ctx.mbxTag = mbox_req->extMboxTag;
+ phba->mbox_ext_buf_ctx.seqNum = mbox_req->extSeqNum;
+ phba->mbox_ext_buf_ctx.mbx_dmabuf = dmabuf;
+
+ /* callback for multi-buffer read mailbox command */
+ pmboxq->mbox_cmpl = lpfc_bsg_issue_read_mbox_ext_cmpl;
+
+ /* context fields to callback function */
+ pmboxq->context1 = dd_data;
+ dd_data->type = TYPE_MBOX;
+ dd_data->set_job = job;
+ dd_data->context_un.mbox.pmboxq = pmboxq;
+ dd_data->context_un.mbox.mb = (MAILBOX_t *)pmbx;
+ job->dd_data = dd_data;
+
+ /* state change */
+ phba->mbox_ext_buf_ctx.state = LPFC_BSG_MBOX_PORT;
+
+ /*
+ * Non-embedded mailbox subcommand data gets byte swapped here because
+ * the lower level driver code only does the first 64 mailbox words.
+ */
+ if ((!bsg_bf_get(lpfc_mbox_hdr_emb,
+ &sli_cfg_mbx->un.sli_config_emb0_subsys.sli_config_hdr)) &&
+ (nemb_tp == nemb_mse))
+ lpfc_sli_pcimem_bcopy(&pmbx[sizeof(MAILBOX_t)],
+ &pmbx[sizeof(MAILBOX_t)],
+ sli_cfg_mbx->un.sli_config_emb0_subsys.
+ mse[0].buf_len);
+
+ rc = lpfc_sli_issue_mbox(phba, pmboxq, MBX_NOWAIT);
+ if ((rc == MBX_SUCCESS) || (rc == MBX_BUSY)) {
+ lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
+ "2947 Issued SLI_CONFIG ext-buffer "
+ "maibox command, rc:x%x\n", rc);
+ return SLI_CONFIG_HANDLED;
+ }
+ lpfc_printf_log(phba, KERN_ERR, LOG_LIBDFC,
+ "2948 Failed to issue SLI_CONFIG ext-buffer "
+ "maibox command, rc:x%x\n", rc);
+ rc = -EPIPE;
+
+job_error:
+ if (pmboxq)
+ mempool_free(pmboxq, phba->mbox_mem_pool);
+ lpfc_bsg_dma_page_list_free(phba,
+ &phba->mbox_ext_buf_ctx.ext_dmabuf_list);
+ kfree(dd_data);
+ phba->mbox_ext_buf_ctx.state = LPFC_BSG_MBOX_IDLE;
+ return rc;
+}
+
+/**
+ * lpfc_bsg_sli_cfg_write_cmd_ext - sli_config non-embedded mailbox cmd write
+ * @phba: Pointer to HBA context object.
+ * @mb: Pointer to a BSG mailbox object.
+ * @dmabuff: Pointer to a DMA buffer descriptor.
+ *
+ * This routine performs SLI_CONFIG (0x9B) write mailbox command operation with
+ * non-embedded external bufffers.
+ **/
+static int
+lpfc_bsg_sli_cfg_write_cmd_ext(struct lpfc_hba *phba, struct fc_bsg_job *job,
+ enum nemb_type nemb_tp,
+ struct lpfc_dmabuf *dmabuf)
+{
+ struct dfc_mbox_req *mbox_req;
+ struct lpfc_sli_config_mbox *sli_cfg_mbx;
+ uint32_t ext_buf_cnt;
+ struct bsg_job_data *dd_data = NULL;
+ LPFC_MBOXQ_t *pmboxq = NULL;
+ MAILBOX_t *pmb;
+ uint8_t *mbx;
+ int rc = SLI_CONFIG_NOT_HANDLED, i;
+
+ mbox_req =
+ (struct dfc_mbox_req *)job->request->rqst_data.h_vendor.vendor_cmd;
+
+ /* pointer to the start of mailbox command */
+ sli_cfg_mbx = (struct lpfc_sli_config_mbox *)dmabuf->virt;
+
+ if (nemb_tp == nemb_mse) {
+ ext_buf_cnt = bsg_bf_get(lpfc_mbox_hdr_mse_cnt,
+ &sli_cfg_mbx->un.sli_config_emb0_subsys.sli_config_hdr);
+ if (ext_buf_cnt > LPFC_MBX_SLI_CONFIG_MAX_MSE) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_LIBDFC,
+ "2953 Failed SLI_CONFIG(mse) wr, "
+ "ext_buf_cnt(%d) out of range(%d)\n",
+ ext_buf_cnt,
+ LPFC_MBX_SLI_CONFIG_MAX_MSE);
+ return -ERANGE;
+ }
+ lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
+ "2949 Handled SLI_CONFIG(mse) wr, "
+ "ext_buf_cnt:%d\n", ext_buf_cnt);
+ } else {
+ /* sanity check on interface type for support */
+ if (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) !=
+ LPFC_SLI_INTF_IF_TYPE_2)
+ return -ENODEV;
+ /* nemb_tp == nemb_hbd */
+ ext_buf_cnt = sli_cfg_mbx->un.sli_config_emb1_subsys.hbd_count;
+ if (ext_buf_cnt > LPFC_MBX_SLI_CONFIG_MAX_HBD) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_LIBDFC,
+ "2954 Failed SLI_CONFIG(hbd) wr, "
+ "ext_buf_cnt(%d) out of range(%d)\n",
+ ext_buf_cnt,
+ LPFC_MBX_SLI_CONFIG_MAX_HBD);
+ return -ERANGE;
+ }
+ lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
+ "2950 Handled SLI_CONFIG(hbd) wr, "
+ "ext_buf_cnt:%d\n", ext_buf_cnt);
+ }
+
+ /* before dma buffer descriptor setup */
+ lpfc_idiag_mbxacc_dump_bsg_mbox(phba, nemb_tp, mbox_wr, dma_mbox,
+ sta_pre_addr, dmabuf, ext_buf_cnt);
+
+ if (ext_buf_cnt == 0)
+ return -EPERM;
+
+ /* for the first external buffer */
+ lpfc_bsg_sli_cfg_dma_desc_setup(phba, nemb_tp, 0, dmabuf, dmabuf);
+
+ /* after dma descriptor setup */
+ lpfc_idiag_mbxacc_dump_bsg_mbox(phba, nemb_tp, mbox_wr, dma_mbox,
+ sta_pos_addr, dmabuf, ext_buf_cnt);
+
+ /* log for looking forward */
+ for (i = 1; i < ext_buf_cnt; i++) {
+ if (nemb_tp == nemb_mse)
+ lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
+ "2951 SLI_CONFIG(mse), buf[%d]-length:%d\n",
+ i, sli_cfg_mbx->un.sli_config_emb0_subsys.
+ mse[i].buf_len);
+ else
+ lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
+ "2952 SLI_CONFIG(hbd), buf[%d]-length:%d\n",
+ i, bsg_bf_get(lpfc_mbox_sli_config_ecmn_hbd_len,
+ &sli_cfg_mbx->un.sli_config_emb1_subsys.
+ hbd[i]));
+ }
+
+ /* multi-buffer handling context */
+ phba->mbox_ext_buf_ctx.nembType = nemb_tp;
+ phba->mbox_ext_buf_ctx.mboxType = mbox_wr;
+ phba->mbox_ext_buf_ctx.numBuf = ext_buf_cnt;
+ phba->mbox_ext_buf_ctx.mbxTag = mbox_req->extMboxTag;
+ phba->mbox_ext_buf_ctx.seqNum = mbox_req->extSeqNum;
+ phba->mbox_ext_buf_ctx.mbx_dmabuf = dmabuf;
+
+ if (ext_buf_cnt == 1) {
+ /* bsg tracking structure */
+ dd_data = kmalloc(sizeof(struct bsg_job_data), GFP_KERNEL);
+ if (!dd_data) {
+ rc = -ENOMEM;
+ goto job_error;
+ }
+
+ /* mailbox command structure for base driver */
+ pmboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
+ if (!pmboxq) {
+ rc = -ENOMEM;
+ goto job_error;
+ }
+ memset(pmboxq, 0, sizeof(LPFC_MBOXQ_t));
+ pmb = &pmboxq->u.mb;
+ mbx = (uint8_t *)dmabuf->virt;
+ memcpy(pmb, mbx, sizeof(*pmb));
+ pmb->mbxOwner = OWN_HOST;
+ pmboxq->vport = phba->pport;
+
+ /* callback for multi-buffer read mailbox command */
+ pmboxq->mbox_cmpl = lpfc_bsg_issue_write_mbox_ext_cmpl;
+
+ /* context fields to callback function */
+ pmboxq->context1 = dd_data;
+ dd_data->type = TYPE_MBOX;
+ dd_data->set_job = job;
+ dd_data->context_un.mbox.pmboxq = pmboxq;
+ dd_data->context_un.mbox.mb = (MAILBOX_t *)mbx;
+ job->dd_data = dd_data;
+
+ /* state change */
+
+ phba->mbox_ext_buf_ctx.state = LPFC_BSG_MBOX_PORT;
+ rc = lpfc_sli_issue_mbox(phba, pmboxq, MBX_NOWAIT);
+ if ((rc == MBX_SUCCESS) || (rc == MBX_BUSY)) {
+ lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
+ "2955 Issued SLI_CONFIG ext-buffer "
+ "maibox command, rc:x%x\n", rc);
+ return SLI_CONFIG_HANDLED;
+ }
+ lpfc_printf_log(phba, KERN_ERR, LOG_LIBDFC,
+ "2956 Failed to issue SLI_CONFIG ext-buffer "
+ "maibox command, rc:x%x\n", rc);
+ rc = -EPIPE;
+ goto job_error;
+ }
+
+ /* wait for additoinal external buffers */
+
+ job->reply->result = 0;
+ job->job_done(job);
+ return SLI_CONFIG_HANDLED;
+
+job_error:
+ if (pmboxq)
+ mempool_free(pmboxq, phba->mbox_mem_pool);
+ kfree(dd_data);
+
+ return rc;
+}
+
+/**
+ * lpfc_bsg_handle_sli_cfg_mbox - handle sli-cfg mailbox cmd with ext buffer
+ * @phba: Pointer to HBA context object.
+ * @mb: Pointer to a BSG mailbox object.
+ * @dmabuff: Pointer to a DMA buffer descriptor.
+ *
+ * This routine handles SLI_CONFIG (0x9B) mailbox command with non-embedded
+ * external bufffers, including both 0x9B with non-embedded MSEs and 0x9B
+ * with embedded sussystem 0x1 and opcodes with external HBDs.
+ **/
+static int
+lpfc_bsg_handle_sli_cfg_mbox(struct lpfc_hba *phba, struct fc_bsg_job *job,
+ struct lpfc_dmabuf *dmabuf)
+{
+ struct lpfc_sli_config_mbox *sli_cfg_mbx;
+ uint32_t subsys;
+ uint32_t opcode;
+ int rc = SLI_CONFIG_NOT_HANDLED;
+
+ /* state change on new multi-buffer pass-through mailbox command */
+ phba->mbox_ext_buf_ctx.state = LPFC_BSG_MBOX_HOST;
+
+ sli_cfg_mbx = (struct lpfc_sli_config_mbox *)dmabuf->virt;
+
+ if (!bsg_bf_get(lpfc_mbox_hdr_emb,
+ &sli_cfg_mbx->un.sli_config_emb0_subsys.sli_config_hdr)) {
+ subsys = bsg_bf_get(lpfc_emb0_subcmnd_subsys,
+ &sli_cfg_mbx->un.sli_config_emb0_subsys);
+ opcode = bsg_bf_get(lpfc_emb0_subcmnd_opcode,
+ &sli_cfg_mbx->un.sli_config_emb0_subsys);
+ if (subsys == SLI_CONFIG_SUBSYS_FCOE) {
+ switch (opcode) {
+ case FCOE_OPCODE_READ_FCF:
+ case FCOE_OPCODE_GET_DPORT_RESULTS:
+ lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
+ "2957 Handled SLI_CONFIG "
+ "subsys_fcoe, opcode:x%x\n",
+ opcode);
+ rc = lpfc_bsg_sli_cfg_read_cmd_ext(phba, job,
+ nemb_mse, dmabuf);
+ break;
+ case FCOE_OPCODE_ADD_FCF:
+ case FCOE_OPCODE_SET_DPORT_MODE:
+ case LPFC_MBOX_OPCODE_FCOE_LINK_DIAG_STATE:
+ lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
+ "2958 Handled SLI_CONFIG "
+ "subsys_fcoe, opcode:x%x\n",
+ opcode);
+ rc = lpfc_bsg_sli_cfg_write_cmd_ext(phba, job,
+ nemb_mse, dmabuf);
+ break;
+ default:
+ lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
+ "2959 Reject SLI_CONFIG "
+ "subsys_fcoe, opcode:x%x\n",
+ opcode);
+ rc = -EPERM;
+ break;
+ }
+ } else if (subsys == SLI_CONFIG_SUBSYS_COMN) {
+ switch (opcode) {
+ case COMN_OPCODE_GET_CNTL_ADDL_ATTRIBUTES:
+ case COMN_OPCODE_GET_CNTL_ATTRIBUTES:
+ case COMN_OPCODE_GET_PROFILE_CONFIG:
+ lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
+ "3106 Handled SLI_CONFIG "
+ "subsys_comn, opcode:x%x\n",
+ opcode);
+ rc = lpfc_bsg_sli_cfg_read_cmd_ext(phba, job,
+ nemb_mse, dmabuf);
+ break;
+ default:
+ lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
+ "3107 Reject SLI_CONFIG "
+ "subsys_comn, opcode:x%x\n",
+ opcode);
+ rc = -EPERM;
+ break;
+ }
+ } else {
+ lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
+ "2977 Reject SLI_CONFIG "
+ "subsys:x%d, opcode:x%x\n",
+ subsys, opcode);
+ rc = -EPERM;
+ }
+ } else {
+ subsys = bsg_bf_get(lpfc_emb1_subcmnd_subsys,
+ &sli_cfg_mbx->un.sli_config_emb1_subsys);
+ opcode = bsg_bf_get(lpfc_emb1_subcmnd_opcode,
+ &sli_cfg_mbx->un.sli_config_emb1_subsys);
+ if (subsys == SLI_CONFIG_SUBSYS_COMN) {
+ switch (opcode) {
+ case COMN_OPCODE_READ_OBJECT:
+ case COMN_OPCODE_READ_OBJECT_LIST:
+ lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
+ "2960 Handled SLI_CONFIG "
+ "subsys_comn, opcode:x%x\n",
+ opcode);
+ rc = lpfc_bsg_sli_cfg_read_cmd_ext(phba, job,
+ nemb_hbd, dmabuf);
+ break;
+ case COMN_OPCODE_WRITE_OBJECT:
+ lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
+ "2961 Handled SLI_CONFIG "
+ "subsys_comn, opcode:x%x\n",
+ opcode);
+ rc = lpfc_bsg_sli_cfg_write_cmd_ext(phba, job,
+ nemb_hbd, dmabuf);
+ break;
+ default:
+ lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
+ "2962 Not handled SLI_CONFIG "
+ "subsys_comn, opcode:x%x\n",
+ opcode);
+ rc = SLI_CONFIG_NOT_HANDLED;
+ break;
+ }
+ } else {
+ lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
+ "2978 Not handled SLI_CONFIG "
+ "subsys:x%d, opcode:x%x\n",
+ subsys, opcode);
+ rc = SLI_CONFIG_NOT_HANDLED;
+ }
+ }
+
+ /* state reset on not handled new multi-buffer mailbox command */
+ if (rc != SLI_CONFIG_HANDLED)
+ phba->mbox_ext_buf_ctx.state = LPFC_BSG_MBOX_IDLE;
+
+ return rc;
+}
+
+/**
+ * lpfc_bsg_mbox_ext_abort_req - request to abort mbox command with ext buffers
+ * @phba: Pointer to HBA context object.
+ *
+ * This routine is for requesting to abort a pass-through mailbox command with
+ * multiple external buffers due to error condition.
+ **/
+static void
+lpfc_bsg_mbox_ext_abort(struct lpfc_hba *phba)
+{
+ if (phba->mbox_ext_buf_ctx.state == LPFC_BSG_MBOX_PORT)
+ phba->mbox_ext_buf_ctx.state = LPFC_BSG_MBOX_ABTS;
+ else
+ lpfc_bsg_mbox_ext_session_reset(phba);
+ return;
+}
+
+/**
+ * lpfc_bsg_read_ebuf_get - get the next mailbox read external buffer
+ * @phba: Pointer to HBA context object.
+ * @dmabuf: Pointer to a DMA buffer descriptor.
+ *
+ * This routine extracts the next mailbox read external buffer back to
+ * user space through BSG.
+ **/
+static int
+lpfc_bsg_read_ebuf_get(struct lpfc_hba *phba, struct fc_bsg_job *job)
+{
+ struct lpfc_sli_config_mbox *sli_cfg_mbx;
+ struct lpfc_dmabuf *dmabuf;
+ uint8_t *pbuf;
+ uint32_t size;
+ uint32_t index;
+
+ index = phba->mbox_ext_buf_ctx.seqNum;
+ phba->mbox_ext_buf_ctx.seqNum++;
+
+ sli_cfg_mbx = (struct lpfc_sli_config_mbox *)
+ phba->mbox_ext_buf_ctx.mbx_dmabuf->virt;
+
+ if (phba->mbox_ext_buf_ctx.nembType == nemb_mse) {
+ size = bsg_bf_get(lpfc_mbox_sli_config_mse_len,
+ &sli_cfg_mbx->un.sli_config_emb0_subsys.mse[index]);
+ lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
+ "2963 SLI_CONFIG (mse) ext-buffer rd get "
+ "buffer[%d], size:%d\n", index, size);
+ } else {
+ size = bsg_bf_get(lpfc_mbox_sli_config_ecmn_hbd_len,
+ &sli_cfg_mbx->un.sli_config_emb1_subsys.hbd[index]);
+ lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
+ "2964 SLI_CONFIG (hbd) ext-buffer rd get "
+ "buffer[%d], size:%d\n", index, size);
+ }
+ if (list_empty(&phba->mbox_ext_buf_ctx.ext_dmabuf_list))
+ return -EPIPE;
+ dmabuf = list_first_entry(&phba->mbox_ext_buf_ctx.ext_dmabuf_list,
+ struct lpfc_dmabuf, list);
+ list_del_init(&dmabuf->list);
+
+ /* after dma buffer descriptor setup */
+ lpfc_idiag_mbxacc_dump_bsg_mbox(phba, phba->mbox_ext_buf_ctx.nembType,
+ mbox_rd, dma_ebuf, sta_pos_addr,
+ dmabuf, index);
+
+ pbuf = (uint8_t *)dmabuf->virt;
+ job->reply->reply_payload_rcv_len =
+ sg_copy_from_buffer(job->reply_payload.sg_list,
+ job->reply_payload.sg_cnt,
+ pbuf, size);
+
+ lpfc_bsg_dma_page_free(phba, dmabuf);
+
+ if (phba->mbox_ext_buf_ctx.seqNum == phba->mbox_ext_buf_ctx.numBuf) {
+ lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
+ "2965 SLI_CONFIG (hbd) ext-buffer rd mbox "
+ "command session done\n");
+ lpfc_bsg_mbox_ext_session_reset(phba);
+ }
+
+ job->reply->result = 0;
+ job->job_done(job);
+
+ return SLI_CONFIG_HANDLED;
+}
+
+/**
+ * lpfc_bsg_write_ebuf_set - set the next mailbox write external buffer
+ * @phba: Pointer to HBA context object.
+ * @dmabuf: Pointer to a DMA buffer descriptor.
+ *
+ * This routine sets up the next mailbox read external buffer obtained
+ * from user space through BSG.
+ **/
+static int
+lpfc_bsg_write_ebuf_set(struct lpfc_hba *phba, struct fc_bsg_job *job,
+ struct lpfc_dmabuf *dmabuf)
+{
+ struct lpfc_sli_config_mbox *sli_cfg_mbx;
+ struct bsg_job_data *dd_data = NULL;
+ LPFC_MBOXQ_t *pmboxq = NULL;
+ MAILBOX_t *pmb;
+ enum nemb_type nemb_tp;
+ uint8_t *pbuf;
+ uint32_t size;
+ uint32_t index;
+ int rc;
+
+ index = phba->mbox_ext_buf_ctx.seqNum;
+ phba->mbox_ext_buf_ctx.seqNum++;
+ nemb_tp = phba->mbox_ext_buf_ctx.nembType;
+
+ sli_cfg_mbx = (struct lpfc_sli_config_mbox *)
+ phba->mbox_ext_buf_ctx.mbx_dmabuf->virt;
+
+ dd_data = kmalloc(sizeof(struct bsg_job_data), GFP_KERNEL);
+ if (!dd_data) {
+ rc = -ENOMEM;
+ goto job_error;
+ }
+
+ pbuf = (uint8_t *)dmabuf->virt;
+ size = job->request_payload.payload_len;
+ sg_copy_to_buffer(job->request_payload.sg_list,
+ job->request_payload.sg_cnt,
+ pbuf, size);
+
+ if (phba->mbox_ext_buf_ctx.nembType == nemb_mse) {
+ lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
+ "2966 SLI_CONFIG (mse) ext-buffer wr set "
+ "buffer[%d], size:%d\n",
+ phba->mbox_ext_buf_ctx.seqNum, size);
+
+ } else {
+ lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
+ "2967 SLI_CONFIG (hbd) ext-buffer wr set "
+ "buffer[%d], size:%d\n",
+ phba->mbox_ext_buf_ctx.seqNum, size);
+
+ }
+
+ /* set up external buffer descriptor and add to external buffer list */
+ lpfc_bsg_sli_cfg_dma_desc_setup(phba, nemb_tp, index,
+ phba->mbox_ext_buf_ctx.mbx_dmabuf,
+ dmabuf);
+ list_add_tail(&dmabuf->list, &phba->mbox_ext_buf_ctx.ext_dmabuf_list);
+
+ /* after write dma buffer */
+ lpfc_idiag_mbxacc_dump_bsg_mbox(phba, phba->mbox_ext_buf_ctx.nembType,
+ mbox_wr, dma_ebuf, sta_pos_addr,
+ dmabuf, index);
+
+ if (phba->mbox_ext_buf_ctx.seqNum == phba->mbox_ext_buf_ctx.numBuf) {
+ lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
+ "2968 SLI_CONFIG ext-buffer wr all %d "
+ "ebuffers received\n",
+ phba->mbox_ext_buf_ctx.numBuf);
+ /* mailbox command structure for base driver */
+ pmboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
+ if (!pmboxq) {
+ rc = -ENOMEM;
+ goto job_error;
+ }
+ memset(pmboxq, 0, sizeof(LPFC_MBOXQ_t));
+ pbuf = (uint8_t *)phba->mbox_ext_buf_ctx.mbx_dmabuf->virt;
+ pmb = &pmboxq->u.mb;
+ memcpy(pmb, pbuf, sizeof(*pmb));
+ pmb->mbxOwner = OWN_HOST;
+ pmboxq->vport = phba->pport;
+
+ /* callback for multi-buffer write mailbox command */
+ pmboxq->mbox_cmpl = lpfc_bsg_issue_write_mbox_ext_cmpl;
+
+ /* context fields to callback function */
+ pmboxq->context1 = dd_data;
+ dd_data->type = TYPE_MBOX;
+ dd_data->set_job = job;
+ dd_data->context_un.mbox.pmboxq = pmboxq;
+ dd_data->context_un.mbox.mb = (MAILBOX_t *)pbuf;
+ job->dd_data = dd_data;
+
+ /* state change */
+ phba->mbox_ext_buf_ctx.state = LPFC_BSG_MBOX_PORT;
+
+ rc = lpfc_sli_issue_mbox(phba, pmboxq, MBX_NOWAIT);
+ if ((rc == MBX_SUCCESS) || (rc == MBX_BUSY)) {
+ lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
+ "2969 Issued SLI_CONFIG ext-buffer "
+ "maibox command, rc:x%x\n", rc);
+ return SLI_CONFIG_HANDLED;
+ }
+ lpfc_printf_log(phba, KERN_ERR, LOG_LIBDFC,
+ "2970 Failed to issue SLI_CONFIG ext-buffer "
+ "maibox command, rc:x%x\n", rc);
+ rc = -EPIPE;
+ goto job_error;
+ }
+
+ /* wait for additoinal external buffers */
+ job->reply->result = 0;
+ job->job_done(job);
+ return SLI_CONFIG_HANDLED;
+
+job_error:
+ lpfc_bsg_dma_page_free(phba, dmabuf);
+ kfree(dd_data);
+
+ return rc;
+}
+
+/**
+ * lpfc_bsg_handle_sli_cfg_ebuf - handle ext buffer with sli-cfg mailbox cmd
+ * @phba: Pointer to HBA context object.
+ * @mb: Pointer to a BSG mailbox object.
+ * @dmabuff: Pointer to a DMA buffer descriptor.
+ *
+ * This routine handles the external buffer with SLI_CONFIG (0x9B) mailbox
+ * command with multiple non-embedded external buffers.
+ **/
+static int
+lpfc_bsg_handle_sli_cfg_ebuf(struct lpfc_hba *phba, struct fc_bsg_job *job,
+ struct lpfc_dmabuf *dmabuf)
+{
+ int rc;
+
+ lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
+ "2971 SLI_CONFIG buffer (type:x%x)\n",
+ phba->mbox_ext_buf_ctx.mboxType);
+
+ if (phba->mbox_ext_buf_ctx.mboxType == mbox_rd) {
+ if (phba->mbox_ext_buf_ctx.state != LPFC_BSG_MBOX_DONE) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_LIBDFC,
+ "2972 SLI_CONFIG rd buffer state "
+ "mismatch:x%x\n",
+ phba->mbox_ext_buf_ctx.state);
+ lpfc_bsg_mbox_ext_abort(phba);
+ return -EPIPE;
+ }
+ rc = lpfc_bsg_read_ebuf_get(phba, job);
+ if (rc == SLI_CONFIG_HANDLED)
+ lpfc_bsg_dma_page_free(phba, dmabuf);
+ } else { /* phba->mbox_ext_buf_ctx.mboxType == mbox_wr */
+ if (phba->mbox_ext_buf_ctx.state != LPFC_BSG_MBOX_HOST) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_LIBDFC,
+ "2973 SLI_CONFIG wr buffer state "
+ "mismatch:x%x\n",
+ phba->mbox_ext_buf_ctx.state);
+ lpfc_bsg_mbox_ext_abort(phba);
+ return -EPIPE;
+ }
+ rc = lpfc_bsg_write_ebuf_set(phba, job, dmabuf);
+ }
+ return rc;
+}
+
+/**
+ * lpfc_bsg_handle_sli_cfg_ext - handle sli-cfg mailbox with external buffer
+ * @phba: Pointer to HBA context object.
+ * @mb: Pointer to a BSG mailbox object.
+ * @dmabuff: Pointer to a DMA buffer descriptor.
+ *
+ * This routine checkes and handles non-embedded multi-buffer SLI_CONFIG
+ * (0x9B) mailbox commands and external buffers.
+ **/
+static int
+lpfc_bsg_handle_sli_cfg_ext(struct lpfc_hba *phba, struct fc_bsg_job *job,
+ struct lpfc_dmabuf *dmabuf)
+{
+ struct dfc_mbox_req *mbox_req;
+ int rc = SLI_CONFIG_NOT_HANDLED;
+
+ mbox_req =
+ (struct dfc_mbox_req *)job->request->rqst_data.h_vendor.vendor_cmd;
+
+ /* mbox command with/without single external buffer */
+ if (mbox_req->extMboxTag == 0 && mbox_req->extSeqNum == 0)
+ return rc;
+
+ /* mbox command and first external buffer */
+ if (phba->mbox_ext_buf_ctx.state == LPFC_BSG_MBOX_IDLE) {
+ if (mbox_req->extSeqNum == 1) {
+ lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
+ "2974 SLI_CONFIG mailbox: tag:%d, "
+ "seq:%d\n", mbox_req->extMboxTag,
+ mbox_req->extSeqNum);
+ rc = lpfc_bsg_handle_sli_cfg_mbox(phba, job, dmabuf);
+ return rc;
+ } else
+ goto sli_cfg_ext_error;
+ }
+
+ /*
+ * handle additional external buffers
+ */
+
+ /* check broken pipe conditions */
+ if (mbox_req->extMboxTag != phba->mbox_ext_buf_ctx.mbxTag)
+ goto sli_cfg_ext_error;
+ if (mbox_req->extSeqNum > phba->mbox_ext_buf_ctx.numBuf)
+ goto sli_cfg_ext_error;
+ if (mbox_req->extSeqNum != phba->mbox_ext_buf_ctx.seqNum + 1)
+ goto sli_cfg_ext_error;
+
+ lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
+ "2975 SLI_CONFIG mailbox external buffer: "
+ "extSta:x%x, tag:%d, seq:%d\n",
+ phba->mbox_ext_buf_ctx.state, mbox_req->extMboxTag,
+ mbox_req->extSeqNum);
+ rc = lpfc_bsg_handle_sli_cfg_ebuf(phba, job, dmabuf);
+ return rc;
+
+sli_cfg_ext_error:
+ /* all other cases, broken pipe */
+ lpfc_printf_log(phba, KERN_ERR, LOG_LIBDFC,
+ "2976 SLI_CONFIG mailbox broken pipe: "
+ "ctxSta:x%x, ctxNumBuf:%d "
+ "ctxTag:%d, ctxSeq:%d, tag:%d, seq:%d\n",
+ phba->mbox_ext_buf_ctx.state,
+ phba->mbox_ext_buf_ctx.numBuf,
+ phba->mbox_ext_buf_ctx.mbxTag,
+ phba->mbox_ext_buf_ctx.seqNum,
+ mbox_req->extMboxTag, mbox_req->extSeqNum);
+
+ lpfc_bsg_mbox_ext_session_reset(phba);
+
+ return -EPIPE;
+}
+
+/**
+ * lpfc_bsg_issue_mbox - issues a mailbox command on behalf of an app
+ * @phba: Pointer to HBA context object.
+ * @mb: Pointer to a mailbox object.
+ * @vport: Pointer to a vport object.
+ *
+ * Allocate a tracking object, mailbox command memory, get a mailbox
+ * from the mailbox pool, copy the caller mailbox command.
+ *
+ * If offline and the sli is active we need to poll for the command (port is
+ * being reset) and com-plete the job, otherwise issue the mailbox command and
+ * let our completion handler finish the command.
+ **/
+static int
+lpfc_bsg_issue_mbox(struct lpfc_hba *phba, struct fc_bsg_job *job,
+ struct lpfc_vport *vport)
+{
+ LPFC_MBOXQ_t *pmboxq = NULL; /* internal mailbox queue */
+ MAILBOX_t *pmb; /* shortcut to the pmboxq mailbox */
+ /* a 4k buffer to hold the mb and extended data from/to the bsg */
+ uint8_t *pmbx = NULL;
+ struct bsg_job_data *dd_data = NULL; /* bsg data tracking structure */
+ struct lpfc_dmabuf *dmabuf = NULL;
+ struct dfc_mbox_req *mbox_req;
+ struct READ_EVENT_LOG_VAR *rdEventLog;
+ uint32_t transmit_length, receive_length, mode;
+ struct lpfc_mbx_sli4_config *sli4_config;
+ struct lpfc_mbx_nembed_cmd *nembed_sge;
+ struct mbox_header *header;
+ struct ulp_bde64 *bde;
+ uint8_t *ext = NULL;
+ int rc = 0;
+ uint8_t *from;
+ uint32_t size;
+
+ /* in case no data is transferred */
+ job->reply->reply_payload_rcv_len = 0;
+
+ /* sanity check to protect driver */
+ if (job->reply_payload.payload_len > BSG_MBOX_SIZE ||
+ job->request_payload.payload_len > BSG_MBOX_SIZE) {
+ rc = -ERANGE;
+ goto job_done;
+ }
+
+ /*
+ * Don't allow mailbox commands to be sent when blocked or when in
+ * the middle of discovery
+ */
+ if (phba->sli.sli_flag & LPFC_BLOCK_MGMT_IO) {
+ rc = -EAGAIN;
+ goto job_done;
+ }
+
+ mbox_req =
+ (struct dfc_mbox_req *)job->request->rqst_data.h_vendor.vendor_cmd;
+
+ /* check if requested extended data lengths are valid */
+ if ((mbox_req->inExtWLen > BSG_MBOX_SIZE/sizeof(uint32_t)) ||
+ (mbox_req->outExtWLen > BSG_MBOX_SIZE/sizeof(uint32_t))) {
+ rc = -ERANGE;
+ goto job_done;
+ }
+
+ dmabuf = lpfc_bsg_dma_page_alloc(phba);
+ if (!dmabuf || !dmabuf->virt) {
+ rc = -ENOMEM;
+ goto job_done;
+ }
+
+ /* Get the mailbox command or external buffer from BSG */
+ pmbx = (uint8_t *)dmabuf->virt;
+ size = job->request_payload.payload_len;
+ sg_copy_to_buffer(job->request_payload.sg_list,
+ job->request_payload.sg_cnt, pmbx, size);
+
+ /* Handle possible SLI_CONFIG with non-embedded payloads */
+ if (phba->sli_rev == LPFC_SLI_REV4) {
+ rc = lpfc_bsg_handle_sli_cfg_ext(phba, job, dmabuf);
+ if (rc == SLI_CONFIG_HANDLED)
+ goto job_cont;
+ if (rc)
+ goto job_done;
+ /* SLI_CONFIG_NOT_HANDLED for other mailbox commands */
+ }
+
+ rc = lpfc_bsg_check_cmd_access(phba, (MAILBOX_t *)pmbx, vport);
+ if (rc != 0)
+ goto job_done; /* must be negative */
+
+ /* allocate our bsg tracking structure */
+ dd_data = kmalloc(sizeof(struct bsg_job_data), GFP_KERNEL);
+ if (!dd_data) {
+ lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
+ "2727 Failed allocation of dd_data\n");
+ rc = -ENOMEM;
+ goto job_done;
+ }
+
+ pmboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
+ if (!pmboxq) {
+ rc = -ENOMEM;
+ goto job_done;
+ }
+ memset(pmboxq, 0, sizeof(LPFC_MBOXQ_t));
+
+ pmb = &pmboxq->u.mb;
+ memcpy(pmb, pmbx, sizeof(*pmb));
+ pmb->mbxOwner = OWN_HOST;
+ pmboxq->vport = vport;
+
+ /* If HBA encountered an error attention, allow only DUMP
+ * or RESTART mailbox commands until the HBA is restarted.
+ */
+ if (phba->pport->stopped &&
+ pmb->mbxCommand != MBX_DUMP_MEMORY &&
+ pmb->mbxCommand != MBX_RESTART &&
+ pmb->mbxCommand != MBX_WRITE_VPARMS &&
+ pmb->mbxCommand != MBX_WRITE_WWN)
+ lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX,
+ "2797 mbox: Issued mailbox cmd "
+ "0x%x while in stopped state.\n",
+ pmb->mbxCommand);
+
+ /* extended mailbox commands will need an extended buffer */
+ if (mbox_req->inExtWLen || mbox_req->outExtWLen) {
+ from = pmbx;
+ ext = from + sizeof(MAILBOX_t);
+ pmboxq->context2 = ext;
+ pmboxq->in_ext_byte_len =
+ mbox_req->inExtWLen * sizeof(uint32_t);
+ pmboxq->out_ext_byte_len =
+ mbox_req->outExtWLen * sizeof(uint32_t);
+ pmboxq->mbox_offset_word = mbox_req->mbOffset;
+ }
+
+ /* biu diag will need a kernel buffer to transfer the data
+ * allocate our own buffer and setup the mailbox command to
+ * use ours
+ */
+ if (pmb->mbxCommand == MBX_RUN_BIU_DIAG64) {
+ transmit_length = pmb->un.varWords[1];
+ receive_length = pmb->un.varWords[4];
+ /* transmit length cannot be greater than receive length or
+ * mailbox extension size
+ */
+ if ((transmit_length > receive_length) ||
+ (transmit_length > BSG_MBOX_SIZE - sizeof(MAILBOX_t))) {
+ rc = -ERANGE;
+ goto job_done;
+ }
+ pmb->un.varBIUdiag.un.s2.xmit_bde64.addrHigh =
+ putPaddrHigh(dmabuf->phys + sizeof(MAILBOX_t));
+ pmb->un.varBIUdiag.un.s2.xmit_bde64.addrLow =
+ putPaddrLow(dmabuf->phys + sizeof(MAILBOX_t));
+
+ pmb->un.varBIUdiag.un.s2.rcv_bde64.addrHigh =
+ putPaddrHigh(dmabuf->phys + sizeof(MAILBOX_t)
+ + pmb->un.varBIUdiag.un.s2.xmit_bde64.tus.f.bdeSize);
+ pmb->un.varBIUdiag.un.s2.rcv_bde64.addrLow =
+ putPaddrLow(dmabuf->phys + sizeof(MAILBOX_t)
+ + pmb->un.varBIUdiag.un.s2.xmit_bde64.tus.f.bdeSize);
+ } else if (pmb->mbxCommand == MBX_READ_EVENT_LOG) {
+ rdEventLog = &pmb->un.varRdEventLog;
+ receive_length = rdEventLog->rcv_bde64.tus.f.bdeSize;
+ mode = bf_get(lpfc_event_log, rdEventLog);
+
+ /* receive length cannot be greater than mailbox
+ * extension size
+ */
+ if (receive_length > BSG_MBOX_SIZE - sizeof(MAILBOX_t)) {
+ rc = -ERANGE;
+ goto job_done;
+ }
+
+ /* mode zero uses a bde like biu diags command */
+ if (mode == 0) {
+ pmb->un.varWords[3] = putPaddrLow(dmabuf->phys
+ + sizeof(MAILBOX_t));
+ pmb->un.varWords[4] = putPaddrHigh(dmabuf->phys
+ + sizeof(MAILBOX_t));
+ }
+ } else if (phba->sli_rev == LPFC_SLI_REV4) {
+ /* Let type 4 (well known data) through because the data is
+ * returned in varwords[4-8]
+ * otherwise check the recieve length and fetch the buffer addr
+ */
+ if ((pmb->mbxCommand == MBX_DUMP_MEMORY) &&
+ (pmb->un.varDmp.type != DMP_WELL_KNOWN)) {
+ /* rebuild the command for sli4 using our own buffers
+ * like we do for biu diags
+ */
+ receive_length = pmb->un.varWords[2];
+ /* receive length cannot be greater than mailbox
+ * extension size
+ */
+ if (receive_length == 0) {
+ rc = -ERANGE;
+ goto job_done;
+ }
+ pmb->un.varWords[3] = putPaddrLow(dmabuf->phys
+ + sizeof(MAILBOX_t));
+ pmb->un.varWords[4] = putPaddrHigh(dmabuf->phys
+ + sizeof(MAILBOX_t));
+ } else if ((pmb->mbxCommand == MBX_UPDATE_CFG) &&
+ pmb->un.varUpdateCfg.co) {
+ bde = (struct ulp_bde64 *)&pmb->un.varWords[4];
+
+ /* bde size cannot be greater than mailbox ext size */
+ if (bde->tus.f.bdeSize >
+ BSG_MBOX_SIZE - sizeof(MAILBOX_t)) {
+ rc = -ERANGE;
+ goto job_done;
+ }
+ bde->addrHigh = putPaddrHigh(dmabuf->phys
+ + sizeof(MAILBOX_t));
+ bde->addrLow = putPaddrLow(dmabuf->phys
+ + sizeof(MAILBOX_t));
+ } else if (pmb->mbxCommand == MBX_SLI4_CONFIG) {
+ /* Handling non-embedded SLI_CONFIG mailbox command */
+ sli4_config = &pmboxq->u.mqe.un.sli4_config;
+ if (!bf_get(lpfc_mbox_hdr_emb,
+ &sli4_config->header.cfg_mhdr)) {
+ /* rebuild the command for sli4 using our
+ * own buffers like we do for biu diags
+ */
+ header = (struct mbox_header *)
+ &pmb->un.varWords[0];
+ nembed_sge = (struct lpfc_mbx_nembed_cmd *)
+ &pmb->un.varWords[0];
+ receive_length = nembed_sge->sge[0].length;
+
+ /* receive length cannot be greater than
+ * mailbox extension size
+ */
+ if ((receive_length == 0) ||
+ (receive_length >
+ BSG_MBOX_SIZE - sizeof(MAILBOX_t))) {
+ rc = -ERANGE;
+ goto job_done;
+ }
+
+ nembed_sge->sge[0].pa_hi =
+ putPaddrHigh(dmabuf->phys
+ + sizeof(MAILBOX_t));
+ nembed_sge->sge[0].pa_lo =
+ putPaddrLow(dmabuf->phys
+ + sizeof(MAILBOX_t));
+ }
+ }
+ }
+
+ dd_data->context_un.mbox.dmabuffers = dmabuf;
+
+ /* setup wake call as IOCB callback */
+ pmboxq->mbox_cmpl = lpfc_bsg_issue_mbox_cmpl;
+
+ /* setup context field to pass wait_queue pointer to wake function */
+ pmboxq->context1 = dd_data;
+ dd_data->type = TYPE_MBOX;
+ dd_data->set_job = job;
+ dd_data->context_un.mbox.pmboxq = pmboxq;
+ dd_data->context_un.mbox.mb = (MAILBOX_t *)pmbx;
+ dd_data->context_un.mbox.ext = ext;
+ dd_data->context_un.mbox.mbOffset = mbox_req->mbOffset;
+ dd_data->context_un.mbox.inExtWLen = mbox_req->inExtWLen;
+ dd_data->context_un.mbox.outExtWLen = mbox_req->outExtWLen;
+ job->dd_data = dd_data;
+
+ if ((vport->fc_flag & FC_OFFLINE_MODE) ||
+ (!(phba->sli.sli_flag & LPFC_SLI_ACTIVE))) {
+ rc = lpfc_sli_issue_mbox(phba, pmboxq, MBX_POLL);
+ if (rc != MBX_SUCCESS) {
+ rc = (rc == MBX_TIMEOUT) ? -ETIME : -ENODEV;
+ goto job_done;
+ }
+
+ /* job finished, copy the data */
+ memcpy(pmbx, pmb, sizeof(*pmb));
+ job->reply->reply_payload_rcv_len =
+ sg_copy_from_buffer(job->reply_payload.sg_list,
+ job->reply_payload.sg_cnt,
+ pmbx, size);
+ /* not waiting mbox already done */
+ rc = 0;
+ goto job_done;
+ }
+
+ rc = lpfc_sli_issue_mbox(phba, pmboxq, MBX_NOWAIT);
+ if ((rc == MBX_SUCCESS) || (rc == MBX_BUSY))
+ return 1; /* job started */
+
+job_done:
+ /* common exit for error or job completed inline */
+ if (pmboxq)
+ mempool_free(pmboxq, phba->mbox_mem_pool);
+ lpfc_bsg_dma_page_free(phba, dmabuf);
+ kfree(dd_data);
+
+job_cont:
+ return rc;
+}
+
+/**
+ * lpfc_bsg_mbox_cmd - process an fc bsg LPFC_BSG_VENDOR_MBOX command
+ * @job: MBOX fc_bsg_job for LPFC_BSG_VENDOR_MBOX.
+ **/
+static int
+lpfc_bsg_mbox_cmd(struct fc_bsg_job *job)
+{
+ struct lpfc_vport *vport = (struct lpfc_vport *)job->shost->hostdata;
+ struct lpfc_hba *phba = vport->phba;
+ struct dfc_mbox_req *mbox_req;
+ int rc = 0;
+
+ /* mix-and-match backward compatibility */
+ job->reply->reply_payload_rcv_len = 0;
+ if (job->request_len <
+ sizeof(struct fc_bsg_request) + sizeof(struct dfc_mbox_req)) {
+ lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
+ "2737 Mix-and-match backward compatibility "
+ "between MBOX_REQ old size:%d and "
+ "new request size:%d\n",
+ (int)(job->request_len -
+ sizeof(struct fc_bsg_request)),
+ (int)sizeof(struct dfc_mbox_req));
+ mbox_req = (struct dfc_mbox_req *)
+ job->request->rqst_data.h_vendor.vendor_cmd;
+ mbox_req->extMboxTag = 0;
+ mbox_req->extSeqNum = 0;
+ }
+
+ rc = lpfc_bsg_issue_mbox(phba, job, vport);
+
+ if (rc == 0) {
+ /* job done */
+ job->reply->result = 0;
+ job->dd_data = NULL;
+ job->job_done(job);
+ } else if (rc == 1)
+ /* job submitted, will complete later*/
+ rc = 0; /* return zero, no error */
+ else {
+ /* some error occurred */
+ job->reply->result = rc;
+ job->dd_data = NULL;
+ }
+
+ return rc;
+}
+
+/**
+ * lpfc_bsg_menlo_cmd_cmp - lpfc_menlo_cmd completion handler
+ * @phba: Pointer to HBA context object.
+ * @cmdiocbq: Pointer to command iocb.
+ * @rspiocbq: Pointer to response iocb.
+ *
+ * This function is the completion handler for iocbs issued using
+ * lpfc_menlo_cmd function. This function is called by the
+ * ring event handler function without any lock held. This function
+ * can be called from both worker thread context and interrupt
+ * context. This function also can be called from another thread which
+ * cleans up the SLI layer objects.
+ * This function copies the contents of the response iocb to the
+ * response iocb memory object provided by the caller of
+ * lpfc_sli_issue_iocb_wait and then wakes up the thread which
+ * sleeps for the iocb completion.
+ **/
+static void
+lpfc_bsg_menlo_cmd_cmp(struct lpfc_hba *phba,
+ struct lpfc_iocbq *cmdiocbq,
+ struct lpfc_iocbq *rspiocbq)
+{
+ struct bsg_job_data *dd_data;
+ struct fc_bsg_job *job;
+ IOCB_t *rsp;
+ struct lpfc_dmabuf *bmp, *cmp, *rmp;
+ struct lpfc_bsg_menlo *menlo;
+ unsigned long flags;
+ struct menlo_response *menlo_resp;
+ unsigned int rsp_size;
+ int rc = 0;
+
+ dd_data = cmdiocbq->context1;
+ cmp = cmdiocbq->context2;
+ bmp = cmdiocbq->context3;
+ menlo = &dd_data->context_un.menlo;
+ rmp = menlo->rmp;
+ rsp = &rspiocbq->iocb;
+
+ /* Determine if job has been aborted */
+ spin_lock_irqsave(&phba->ct_ev_lock, flags);
+ job = dd_data->set_job;
+ if (job) {
+ /* Prevent timeout handling from trying to abort job */
+ job->dd_data = NULL;
+ }
+ spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
+
+ /* Copy the job data or set the failing status for the job */
+
+ if (job) {
+ /* always return the xri, this would be used in the case
+ * of a menlo download to allow the data to be sent as a
+ * continuation of the exchange.
+ */
+
+ menlo_resp = (struct menlo_response *)
+ job->reply->reply_data.vendor_reply.vendor_rsp;
+ menlo_resp->xri = rsp->ulpContext;
+ if (rsp->ulpStatus) {
+ if (rsp->ulpStatus == IOSTAT_LOCAL_REJECT) {
+ switch (rsp->un.ulpWord[4] & IOERR_PARAM_MASK) {
+ case IOERR_SEQUENCE_TIMEOUT:
+ rc = -ETIMEDOUT;
+ break;
+ case IOERR_INVALID_RPI:
+ rc = -EFAULT;
+ break;
+ default:
+ rc = -EACCES;
+ break;
+ }
+ } else {
+ rc = -EACCES;
+ }
+ } else {
+ rsp_size = rsp->un.genreq64.bdl.bdeSize;
+ job->reply->reply_payload_rcv_len =
+ lpfc_bsg_copy_data(rmp, &job->reply_payload,
+ rsp_size, 0);
+ }
+
+ }
+
+ lpfc_sli_release_iocbq(phba, cmdiocbq);
+ lpfc_free_bsg_buffers(phba, cmp);
+ lpfc_free_bsg_buffers(phba, rmp);
+ lpfc_mbuf_free(phba, bmp->virt, bmp->phys);
+ kfree(bmp);
+ kfree(dd_data);
+
+ /* Complete the job if active */
+
+ if (job) {
+ job->reply->result = rc;
+ job->job_done(job);
+ }
+
+ return;
+}
+
+/**
+ * lpfc_menlo_cmd - send an ioctl for menlo hardware
+ * @job: fc_bsg_job to handle
+ *
+ * This function issues a gen request 64 CR ioctl for all menlo cmd requests,
+ * all the command completions will return the xri for the command.
+ * For menlo data requests a gen request 64 CX is used to continue the exchange
+ * supplied in the menlo request header xri field.
+ **/
+static int
+lpfc_menlo_cmd(struct fc_bsg_job *job)
+{
+ struct lpfc_vport *vport = (struct lpfc_vport *)job->shost->hostdata;
+ struct lpfc_hba *phba = vport->phba;
+ struct lpfc_iocbq *cmdiocbq;
+ IOCB_t *cmd;
+ int rc = 0;
+ struct menlo_command *menlo_cmd;
+ struct menlo_response *menlo_resp;
+ struct lpfc_dmabuf *bmp = NULL, *cmp = NULL, *rmp = NULL;
+ int request_nseg;
+ int reply_nseg;
+ struct bsg_job_data *dd_data;
+ struct ulp_bde64 *bpl = NULL;
+
+ /* in case no data is returned return just the return code */
+ job->reply->reply_payload_rcv_len = 0;
+
+ if (job->request_len <
+ sizeof(struct fc_bsg_request) +
+ sizeof(struct menlo_command)) {
+ lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
+ "2784 Received MENLO_CMD request below "
+ "minimum size\n");
+ rc = -ERANGE;
+ goto no_dd_data;
+ }
+
+ if (job->reply_len <
+ sizeof(struct fc_bsg_request) + sizeof(struct menlo_response)) {
+ lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
+ "2785 Received MENLO_CMD reply below "
+ "minimum size\n");
+ rc = -ERANGE;
+ goto no_dd_data;
+ }
+
+ if (!(phba->menlo_flag & HBA_MENLO_SUPPORT)) {
+ lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
+ "2786 Adapter does not support menlo "
+ "commands\n");
+ rc = -EPERM;
+ goto no_dd_data;
+ }
+
+ menlo_cmd = (struct menlo_command *)
+ job->request->rqst_data.h_vendor.vendor_cmd;
+
+ menlo_resp = (struct menlo_response *)
+ job->reply->reply_data.vendor_reply.vendor_rsp;
+
+ /* allocate our bsg tracking structure */
+ dd_data = kmalloc(sizeof(struct bsg_job_data), GFP_KERNEL);
+ if (!dd_data) {
+ lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
+ "2787 Failed allocation of dd_data\n");
+ rc = -ENOMEM;
+ goto no_dd_data;
+ }
+
+ bmp = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
+ if (!bmp) {
+ rc = -ENOMEM;
+ goto free_dd;
+ }
+
+ bmp->virt = lpfc_mbuf_alloc(phba, 0, &bmp->phys);
+ if (!bmp->virt) {
+ rc = -ENOMEM;
+ goto free_bmp;
+ }
+
+ INIT_LIST_HEAD(&bmp->list);
+
+ bpl = (struct ulp_bde64 *)bmp->virt;
+ request_nseg = LPFC_BPL_SIZE/sizeof(struct ulp_bde64);
+ cmp = lpfc_alloc_bsg_buffers(phba, job->request_payload.payload_len,
+ 1, bpl, &request_nseg);
+ if (!cmp) {
+ rc = -ENOMEM;
+ goto free_bmp;
+ }
+ lpfc_bsg_copy_data(cmp, &job->request_payload,
+ job->request_payload.payload_len, 1);
+
+ bpl += request_nseg;
+ reply_nseg = LPFC_BPL_SIZE/sizeof(struct ulp_bde64) - request_nseg;
+ rmp = lpfc_alloc_bsg_buffers(phba, job->reply_payload.payload_len, 0,
+ bpl, &reply_nseg);
+ if (!rmp) {
+ rc = -ENOMEM;
+ goto free_cmp;
+ }
+
+ cmdiocbq = lpfc_sli_get_iocbq(phba);
+ if (!cmdiocbq) {
+ rc = -ENOMEM;
+ goto free_rmp;
+ }
+
+ cmd = &cmdiocbq->iocb;
+ cmd->un.genreq64.bdl.ulpIoTag32 = 0;
+ cmd->un.genreq64.bdl.addrHigh = putPaddrHigh(bmp->phys);
+ cmd->un.genreq64.bdl.addrLow = putPaddrLow(bmp->phys);
+ cmd->un.genreq64.bdl.bdeFlags = BUFF_TYPE_BLP_64;
+ cmd->un.genreq64.bdl.bdeSize =
+ (request_nseg + reply_nseg) * sizeof(struct ulp_bde64);
+ cmd->un.genreq64.w5.hcsw.Fctl = (SI | LA);
+ cmd->un.genreq64.w5.hcsw.Dfctl = 0;
+ cmd->un.genreq64.w5.hcsw.Rctl = FC_RCTL_DD_UNSOL_CMD;
+ cmd->un.genreq64.w5.hcsw.Type = MENLO_TRANSPORT_TYPE; /* 0xfe */
+ cmd->ulpBdeCount = 1;
+ cmd->ulpClass = CLASS3;
+ cmd->ulpOwner = OWN_CHIP;
+ cmd->ulpLe = 1; /* Limited Edition */
+ cmdiocbq->iocb_flag |= LPFC_IO_LIBDFC;
+ cmdiocbq->vport = phba->pport;
+ /* We want the firmware to timeout before we do */
+ cmd->ulpTimeout = MENLO_TIMEOUT - 5;
+ cmdiocbq->iocb_cmpl = lpfc_bsg_menlo_cmd_cmp;
+ cmdiocbq->context1 = dd_data;
+ cmdiocbq->context2 = cmp;
+ cmdiocbq->context3 = bmp;
+ if (menlo_cmd->cmd == LPFC_BSG_VENDOR_MENLO_CMD) {
+ cmd->ulpCommand = CMD_GEN_REQUEST64_CR;
+ cmd->ulpPU = MENLO_PU; /* 3 */
+ cmd->un.ulpWord[4] = MENLO_DID; /* 0x0000FC0E */
+ cmd->ulpContext = MENLO_CONTEXT; /* 0 */
+ } else {
+ cmd->ulpCommand = CMD_GEN_REQUEST64_CX;
+ cmd->ulpPU = 1;
+ cmd->un.ulpWord[4] = 0;
+ cmd->ulpContext = menlo_cmd->xri;
+ }
+
+ dd_data->type = TYPE_MENLO;
+ dd_data->set_job = job;
+ dd_data->context_un.menlo.cmdiocbq = cmdiocbq;
+ dd_data->context_un.menlo.rmp = rmp;
+ job->dd_data = dd_data;
+
+ rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, cmdiocbq,
+ MENLO_TIMEOUT - 5);
+ if (rc == IOCB_SUCCESS)
+ return 0; /* done for now */
+
+ lpfc_sli_release_iocbq(phba, cmdiocbq);
+
+free_rmp:
+ lpfc_free_bsg_buffers(phba, rmp);
+free_cmp:
+ lpfc_free_bsg_buffers(phba, cmp);
+free_bmp:
+ if (bmp->virt)
+ lpfc_mbuf_free(phba, bmp->virt, bmp->phys);
+ kfree(bmp);
+free_dd:
+ kfree(dd_data);
+no_dd_data:
+ /* make error code available to userspace */
+ job->reply->result = rc;
+ job->dd_data = NULL;
+ return rc;
+}
+
+/**
+ * lpfc_bsg_hst_vendor - process a vendor-specific fc_bsg_job
+ * @job: fc_bsg_job to handle
+ **/
+static int
+lpfc_bsg_hst_vendor(struct fc_bsg_job *job)
+{
+ int command = job->request->rqst_data.h_vendor.vendor_cmd[0];
+ int rc;
+
+ switch (command) {
+ case LPFC_BSG_VENDOR_SET_CT_EVENT:
+ rc = lpfc_bsg_hba_set_event(job);
+ break;
+ case LPFC_BSG_VENDOR_GET_CT_EVENT:
+ rc = lpfc_bsg_hba_get_event(job);
+ break;
+ case LPFC_BSG_VENDOR_SEND_MGMT_RESP:
+ rc = lpfc_bsg_send_mgmt_rsp(job);
+ break;
+ case LPFC_BSG_VENDOR_DIAG_MODE:
+ rc = lpfc_bsg_diag_loopback_mode(job);
+ break;
+ case LPFC_BSG_VENDOR_DIAG_MODE_END:
+ rc = lpfc_sli4_bsg_diag_mode_end(job);
+ break;
+ case LPFC_BSG_VENDOR_DIAG_RUN_LOOPBACK:
+ rc = lpfc_bsg_diag_loopback_run(job);
+ break;
+ case LPFC_BSG_VENDOR_LINK_DIAG_TEST:
+ rc = lpfc_sli4_bsg_link_diag_test(job);
+ break;
+ case LPFC_BSG_VENDOR_GET_MGMT_REV:
+ rc = lpfc_bsg_get_dfc_rev(job);
+ break;
+ case LPFC_BSG_VENDOR_MBOX:
+ rc = lpfc_bsg_mbox_cmd(job);
+ break;
+ case LPFC_BSG_VENDOR_MENLO_CMD:
+ case LPFC_BSG_VENDOR_MENLO_DATA:
+ rc = lpfc_menlo_cmd(job);
+ break;
+ default:
+ rc = -EINVAL;
+ job->reply->reply_payload_rcv_len = 0;
+ /* make error code available to userspace */
+ job->reply->result = rc;
+ break;
+ }
+
+ return rc;
+}
+
+/**
+ * lpfc_bsg_request - handle a bsg request from the FC transport
+ * @job: fc_bsg_job to handle
+ **/
+int
+lpfc_bsg_request(struct fc_bsg_job *job)
+{
+ uint32_t msgcode;
+ int rc;
+
+ msgcode = job->request->msgcode;
+ switch (msgcode) {
+ case FC_BSG_HST_VENDOR:
+ rc = lpfc_bsg_hst_vendor(job);
+ break;
+ case FC_BSG_RPT_ELS:
+ rc = lpfc_bsg_rport_els(job);
+ break;
+ case FC_BSG_RPT_CT:
+ rc = lpfc_bsg_send_mgmt_cmd(job);
+ break;
+ default:
+ rc = -EINVAL;
+ job->reply->reply_payload_rcv_len = 0;
+ /* make error code available to userspace */
+ job->reply->result = rc;
+ break;
+ }
+
+ return rc;
+}
+
+/**
+ * lpfc_bsg_timeout - handle timeout of a bsg request from the FC transport
+ * @job: fc_bsg_job that has timed out
+ *
+ * This function just aborts the job's IOCB. The aborted IOCB will return to
+ * the waiting function which will handle passing the error back to userspace
+ **/
+int
+lpfc_bsg_timeout(struct fc_bsg_job *job)
+{
+ struct lpfc_vport *vport = (struct lpfc_vport *)job->shost->hostdata;
+ struct lpfc_hba *phba = vport->phba;
+ struct lpfc_iocbq *cmdiocb;
+ struct lpfc_sli_ring *pring = &phba->sli.ring[LPFC_ELS_RING];
+ struct bsg_job_data *dd_data;
+ unsigned long flags;
+ int rc = 0;
+ LIST_HEAD(completions);
+ struct lpfc_iocbq *check_iocb, *next_iocb;
+
+ /* if job's driver data is NULL, the command completed or is in the
+ * the process of completing. In this case, return status to request
+ * so the timeout is retried. This avoids double completion issues
+ * and the request will be pulled off the timer queue when the
+ * command's completion handler executes. Otherwise, prevent the
+ * command's completion handler from executing the job done callback
+ * and continue processing to abort the outstanding the command.
+ */
+
+ spin_lock_irqsave(&phba->ct_ev_lock, flags);
+ dd_data = (struct bsg_job_data *)job->dd_data;
+ if (dd_data) {
+ dd_data->set_job = NULL;
+ job->dd_data = NULL;
+ } else {
+ spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
+ return -EAGAIN;
+ }
+
+ switch (dd_data->type) {
+ case TYPE_IOCB:
+ /* Check to see if IOCB was issued to the port or not. If not,
+ * remove it from the txq queue and call cancel iocbs.
+ * Otherwise, call abort iotag
+ */
+ cmdiocb = dd_data->context_un.iocb.cmdiocbq;
+ spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
+
+ spin_lock_irqsave(&phba->hbalock, flags);
+ /* make sure the I/O abort window is still open */
+ if (!(cmdiocb->iocb_flag & LPFC_IO_CMD_OUTSTANDING)) {
+ spin_unlock_irqrestore(&phba->hbalock, flags);
+ return -EAGAIN;
+ }
+ list_for_each_entry_safe(check_iocb, next_iocb, &pring->txq,
+ list) {
+ if (check_iocb == cmdiocb) {
+ list_move_tail(&check_iocb->list, &completions);
+ break;
+ }
+ }
+ if (list_empty(&completions))
+ lpfc_sli_issue_abort_iotag(phba, pring, cmdiocb);
+ spin_unlock_irqrestore(&phba->hbalock, flags);
+ if (!list_empty(&completions)) {
+ lpfc_sli_cancel_iocbs(phba, &completions,
+ IOSTAT_LOCAL_REJECT,
+ IOERR_SLI_ABORTED);
+ }
+ break;
+
+ case TYPE_EVT:
+ spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
+ break;
+
+ case TYPE_MBOX:
+ /* Update the ext buf ctx state if needed */
+
+ if (phba->mbox_ext_buf_ctx.state == LPFC_BSG_MBOX_PORT)
+ phba->mbox_ext_buf_ctx.state = LPFC_BSG_MBOX_ABTS;
+ spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
+ break;
+ case TYPE_MENLO:
+ /* Check to see if IOCB was issued to the port or not. If not,
+ * remove it from the txq queue and call cancel iocbs.
+ * Otherwise, call abort iotag.
+ */
+ cmdiocb = dd_data->context_un.menlo.cmdiocbq;
+ spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
+
+ spin_lock_irqsave(&phba->hbalock, flags);
+ list_for_each_entry_safe(check_iocb, next_iocb, &pring->txq,
+ list) {
+ if (check_iocb == cmdiocb) {
+ list_move_tail(&check_iocb->list, &completions);
+ break;
+ }
+ }
+ if (list_empty(&completions))
+ lpfc_sli_issue_abort_iotag(phba, pring, cmdiocb);
+ spin_unlock_irqrestore(&phba->hbalock, flags);
+ if (!list_empty(&completions)) {
+ lpfc_sli_cancel_iocbs(phba, &completions,
+ IOSTAT_LOCAL_REJECT,
+ IOERR_SLI_ABORTED);
+ }
+ break;
+ default:
+ spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
+ break;
+ }
+
+ /* scsi transport fc fc_bsg_job_timeout expects a zero return code,
+ * otherwise an error message will be displayed on the console
+ * so always return success (zero)
+ */
+ return rc;
+}
diff --git a/drivers/scsi/lpfc/lpfc_bsg.h b/drivers/scsi/lpfc/lpfc_bsg.h
new file mode 100644
index 000000000..e557bcdbc
--- /dev/null
+++ b/drivers/scsi/lpfc/lpfc_bsg.h
@@ -0,0 +1,289 @@
+/*******************************************************************
+ * This file is part of the Emulex Linux Device Driver for *
+ * Fibre Channel Host Bus Adapters. *
+ * Copyright (C) 2010-2015 Emulex. All rights reserved. *
+ * EMULEX and SLI are trademarks of Emulex. *
+ * www.emulex.com *
+ * *
+ * This program is free software; you can redistribute it and/or *
+ * modify it under the terms of version 2 of the GNU General *
+ * Public License as published by the Free Software Foundation. *
+ * This program is distributed in the hope that it will be useful. *
+ * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND *
+ * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY, *
+ * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE *
+ * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD *
+ * TO BE LEGALLY INVALID. See the GNU General Public License for *
+ * more details, a copy of which can be found in the file COPYING *
+ * included with this package. *
+ *******************************************************************/
+/* bsg definitions
+ * No pointers to user data are allowed, all application buffers and sizes will
+ * derived through the bsg interface.
+ *
+ * These are the vendor unique structures passed in using the bsg
+ * FC_BSG_HST_VENDOR message code type.
+ */
+#define LPFC_BSG_VENDOR_SET_CT_EVENT 1
+#define LPFC_BSG_VENDOR_GET_CT_EVENT 2
+#define LPFC_BSG_VENDOR_SEND_MGMT_RESP 3
+#define LPFC_BSG_VENDOR_DIAG_MODE 4
+#define LPFC_BSG_VENDOR_DIAG_RUN_LOOPBACK 5
+#define LPFC_BSG_VENDOR_GET_MGMT_REV 6
+#define LPFC_BSG_VENDOR_MBOX 7
+#define LPFC_BSG_VENDOR_MENLO_CMD 8
+#define LPFC_BSG_VENDOR_MENLO_DATA 9
+#define LPFC_BSG_VENDOR_DIAG_MODE_END 10
+#define LPFC_BSG_VENDOR_LINK_DIAG_TEST 11
+
+struct set_ct_event {
+ uint32_t command;
+ uint32_t type_mask;
+ uint32_t ev_req_id;
+ uint32_t ev_reg_id;
+};
+
+struct get_ct_event {
+ uint32_t command;
+ uint32_t ev_reg_id;
+ uint32_t ev_req_id;
+};
+
+struct get_ct_event_reply {
+ uint32_t immed_data;
+ uint32_t type;
+};
+
+struct send_mgmt_resp {
+ uint32_t command;
+ uint32_t tag;
+};
+
+
+#define INTERNAL_LOOP_BACK 0x1 /* adapter short cuts the loop internally */
+#define EXTERNAL_LOOP_BACK 0x2 /* requires an external loopback plug */
+
+struct diag_mode_set {
+ uint32_t command;
+ uint32_t type;
+ uint32_t timeout;
+};
+
+struct sli4_link_diag {
+ uint32_t command;
+ uint32_t timeout;
+ uint32_t test_id;
+ uint32_t loops;
+ uint32_t test_version;
+ uint32_t error_action;
+};
+
+struct diag_mode_test {
+ uint32_t command;
+};
+
+struct diag_status {
+ uint32_t mbox_status;
+ uint32_t shdr_status;
+ uint32_t shdr_add_status;
+};
+
+#define LPFC_WWNN_TYPE 0
+#define LPFC_WWPN_TYPE 1
+
+struct get_mgmt_rev {
+ uint32_t command;
+};
+
+#define MANAGEMENT_MAJOR_REV 1
+#define MANAGEMENT_MINOR_REV 1
+
+/* the MgmtRevInfo structure */
+struct MgmtRevInfo {
+ uint32_t a_Major;
+ uint32_t a_Minor;
+};
+
+struct get_mgmt_rev_reply {
+ struct MgmtRevInfo info;
+};
+
+#define BSG_MBOX_SIZE 4096 /* mailbox command plus extended data */
+
+/* BSG mailbox request header */
+struct dfc_mbox_req {
+ uint32_t command;
+ uint32_t mbOffset;
+ uint32_t inExtWLen;
+ uint32_t outExtWLen;
+ uint32_t extMboxTag;
+ uint32_t extSeqNum;
+};
+
+/* Used for menlo command or menlo data. The xri is only used for menlo data */
+struct menlo_command {
+ uint32_t cmd;
+ uint32_t xri;
+};
+
+struct menlo_response {
+ uint32_t xri; /* return the xri of the iocb exchange */
+};
+
+/*
+ * macros and data structures for handling sli-config mailbox command
+ * pass-through support, this header file is shared between user and
+ * kernel spaces, note the set of macros are duplicates from lpfc_hw4.h,
+ * with macro names prefixed with bsg_, as the macros defined in
+ * lpfc_hw4.h are not accessible from user space.
+ */
+
+/* Macros to deal with bit fields. Each bit field must have 3 #defines
+ * associated with it (_SHIFT, _MASK, and _WORD).
+ * EG. For a bit field that is in the 7th bit of the "field4" field of a
+ * structure and is 2 bits in size the following #defines must exist:
+ * struct temp {
+ * uint32_t field1;
+ * uint32_t field2;
+ * uint32_t field3;
+ * uint32_t field4;
+ * #define example_bit_field_SHIFT 7
+ * #define example_bit_field_MASK 0x03
+ * #define example_bit_field_WORD field4
+ * uint32_t field5;
+ * };
+ * Then the macros below may be used to get or set the value of that field.
+ * EG. To get the value of the bit field from the above example:
+ * struct temp t1;
+ * value = bsg_bf_get(example_bit_field, &t1);
+ * And then to set that bit field:
+ * bsg_bf_set(example_bit_field, &t1, 2);
+ * Or clear that bit field:
+ * bsg_bf_set(example_bit_field, &t1, 0);
+ */
+#define bsg_bf_get_le32(name, ptr) \
+ ((le32_to_cpu((ptr)->name##_WORD) >> name##_SHIFT) & name##_MASK)
+#define bsg_bf_get(name, ptr) \
+ (((ptr)->name##_WORD >> name##_SHIFT) & name##_MASK)
+#define bsg_bf_set_le32(name, ptr, value) \
+ ((ptr)->name##_WORD = cpu_to_le32(((((value) & \
+ name##_MASK) << name##_SHIFT) | (le32_to_cpu((ptr)->name##_WORD) & \
+ ~(name##_MASK << name##_SHIFT)))))
+#define bsg_bf_set(name, ptr, value) \
+ ((ptr)->name##_WORD = ((((value) & name##_MASK) << name##_SHIFT) | \
+ ((ptr)->name##_WORD & ~(name##_MASK << name##_SHIFT))))
+
+/*
+ * The sli_config structure specified here is based on the following
+ * restriction:
+ *
+ * -- SLI_CONFIG EMB=0, carrying MSEs, will carry subcommands without
+ * carrying HBD.
+ * -- SLI_CONFIG EMB=1, not carrying MSE, will carry subcommands with or
+ * without carrying HBDs.
+ */
+
+struct lpfc_sli_config_mse {
+ uint32_t pa_lo;
+ uint32_t pa_hi;
+ uint32_t buf_len;
+#define lpfc_mbox_sli_config_mse_len_SHIFT 0
+#define lpfc_mbox_sli_config_mse_len_MASK 0xffffff
+#define lpfc_mbox_sli_config_mse_len_WORD buf_len
+};
+
+struct lpfc_sli_config_hbd {
+ uint32_t buf_len;
+#define lpfc_mbox_sli_config_ecmn_hbd_len_SHIFT 0
+#define lpfc_mbox_sli_config_ecmn_hbd_len_MASK 0xffffff
+#define lpfc_mbox_sli_config_ecmn_hbd_len_WORD buf_len
+ uint32_t pa_lo;
+ uint32_t pa_hi;
+};
+
+struct lpfc_sli_config_hdr {
+ uint32_t word1;
+#define lpfc_mbox_hdr_emb_SHIFT 0
+#define lpfc_mbox_hdr_emb_MASK 0x00000001
+#define lpfc_mbox_hdr_emb_WORD word1
+#define lpfc_mbox_hdr_mse_cnt_SHIFT 3
+#define lpfc_mbox_hdr_mse_cnt_MASK 0x0000001f
+#define lpfc_mbox_hdr_mse_cnt_WORD word1
+ uint32_t payload_length;
+ uint32_t tag_lo;
+ uint32_t tag_hi;
+ uint32_t reserved5;
+};
+
+struct lpfc_sli_config_emb0_subsys {
+ struct lpfc_sli_config_hdr sli_config_hdr;
+#define LPFC_MBX_SLI_CONFIG_MAX_MSE 19
+ struct lpfc_sli_config_mse mse[LPFC_MBX_SLI_CONFIG_MAX_MSE];
+ uint32_t padding;
+ uint32_t word64;
+#define lpfc_emb0_subcmnd_opcode_SHIFT 0
+#define lpfc_emb0_subcmnd_opcode_MASK 0xff
+#define lpfc_emb0_subcmnd_opcode_WORD word64
+#define lpfc_emb0_subcmnd_subsys_SHIFT 8
+#define lpfc_emb0_subcmnd_subsys_MASK 0xff
+#define lpfc_emb0_subcmnd_subsys_WORD word64
+/* Subsystem FCOE (0x0C) OpCodes */
+#define SLI_CONFIG_SUBSYS_FCOE 0x0C
+#define FCOE_OPCODE_READ_FCF 0x08
+#define FCOE_OPCODE_ADD_FCF 0x09
+#define FCOE_OPCODE_SET_DPORT_MODE 0x27
+#define FCOE_OPCODE_GET_DPORT_RESULTS 0x28
+};
+
+struct lpfc_sli_config_emb1_subsys {
+ struct lpfc_sli_config_hdr sli_config_hdr;
+ uint32_t word6;
+#define lpfc_emb1_subcmnd_opcode_SHIFT 0
+#define lpfc_emb1_subcmnd_opcode_MASK 0xff
+#define lpfc_emb1_subcmnd_opcode_WORD word6
+#define lpfc_emb1_subcmnd_subsys_SHIFT 8
+#define lpfc_emb1_subcmnd_subsys_MASK 0xff
+#define lpfc_emb1_subcmnd_subsys_WORD word6
+/* Subsystem COMN (0x01) OpCodes */
+#define SLI_CONFIG_SUBSYS_COMN 0x01
+#define COMN_OPCODE_GET_PROFILE_CONFIG 0xA4
+#define COMN_OPCODE_READ_OBJECT 0xAB
+#define COMN_OPCODE_WRITE_OBJECT 0xAC
+#define COMN_OPCODE_READ_OBJECT_LIST 0xAD
+#define COMN_OPCODE_DELETE_OBJECT 0xAE
+#define COMN_OPCODE_GET_CNTL_ADDL_ATTRIBUTES 0x79
+#define COMN_OPCODE_GET_CNTL_ATTRIBUTES 0x20
+ uint32_t timeout;
+ uint32_t request_length;
+ uint32_t word9;
+#define lpfc_subcmnd_version_SHIFT 0
+#define lpfc_subcmnd_version_MASK 0xff
+#define lpfc_subcmnd_version_WORD word9
+ uint32_t word10;
+#define lpfc_subcmnd_ask_rd_len_SHIFT 0
+#define lpfc_subcmnd_ask_rd_len_MASK 0xffffff
+#define lpfc_subcmnd_ask_rd_len_WORD word10
+ uint32_t rd_offset;
+ uint32_t obj_name[26];
+ uint32_t hbd_count;
+#define LPFC_MBX_SLI_CONFIG_MAX_HBD 8
+ struct lpfc_sli_config_hbd hbd[LPFC_MBX_SLI_CONFIG_MAX_HBD];
+};
+
+struct lpfc_sli_config_mbox {
+ uint32_t word0;
+#define lpfc_mqe_status_SHIFT 16
+#define lpfc_mqe_status_MASK 0x0000FFFF
+#define lpfc_mqe_status_WORD word0
+#define lpfc_mqe_command_SHIFT 8
+#define lpfc_mqe_command_MASK 0x000000FF
+#define lpfc_mqe_command_WORD word0
+ union {
+ struct lpfc_sli_config_emb0_subsys sli_config_emb0_subsys;
+ struct lpfc_sli_config_emb1_subsys sli_config_emb1_subsys;
+ } un;
+};
+
+/* driver only */
+#define SLI_CONFIG_NOT_HANDLED 0
+#define SLI_CONFIG_HANDLED 1
diff --git a/drivers/scsi/lpfc/lpfc_compat.h b/drivers/scsi/lpfc/lpfc_compat.h
new file mode 100644
index 000000000..c88e556ea
--- /dev/null
+++ b/drivers/scsi/lpfc/lpfc_compat.h
@@ -0,0 +1,96 @@
+/*******************************************************************
+ * This file is part of the Emulex Linux Device Driver for *
+ * Fibre Channel Host Bus Adapters. *
+ * Copyright (C) 2004-2011 Emulex. All rights reserved. *
+ * EMULEX and SLI are trademarks of Emulex. *
+ * www.emulex.com *
+ * *
+ * This program is free software; you can redistribute it and/or *
+ * modify it under the terms of version 2 of the GNU General *
+ * Public License as published by the Free Software Foundation. *
+ * This program is distributed in the hope that it will be useful. *
+ * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND *
+ * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY, *
+ * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE *
+ * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD *
+ * TO BE LEGALLY INVALID. See the GNU General Public License for *
+ * more details, a copy of which can be found in the file COPYING *
+ * included with this package. *
+ *******************************************************************/
+
+/*
+ * This file provides macros to aid compilation in the Linux 2.4 kernel
+ * over various platform architectures.
+ */
+
+/*******************************************************************
+Note: HBA's SLI memory contains little-endian LW.
+Thus to access it from a little-endian host,
+memcpy_toio() and memcpy_fromio() can be used.
+However on a big-endian host, copy 4 bytes at a time,
+using writel() and readl().
+ *******************************************************************/
+#include <asm/byteorder.h>
+
+#ifdef __BIG_ENDIAN
+
+static inline void
+lpfc_memcpy_to_slim(void __iomem *dest, void *src, unsigned int bytes)
+{
+ uint32_t __iomem *dest32;
+ uint32_t *src32;
+ unsigned int four_bytes;
+
+
+ dest32 = (uint32_t __iomem *) dest;
+ src32 = (uint32_t *) src;
+
+ /* write input bytes, 4 bytes at a time */
+ for (four_bytes = bytes /4; four_bytes > 0; four_bytes--) {
+ writel( *src32, dest32);
+ readl(dest32); /* flush */
+ dest32++;
+ src32++;
+ }
+
+ return;
+}
+
+static inline void
+lpfc_memcpy_from_slim( void *dest, void __iomem *src, unsigned int bytes)
+{
+ uint32_t *dest32;
+ uint32_t __iomem *src32;
+ unsigned int four_bytes;
+
+
+ dest32 = (uint32_t *) dest;
+ src32 = (uint32_t __iomem *) src;
+
+ /* read input bytes, 4 bytes at a time */
+ for (four_bytes = bytes /4; four_bytes > 0; four_bytes--) {
+ *dest32 = readl( src32);
+ dest32++;
+ src32++;
+ }
+
+ return;
+}
+
+#else
+
+static inline void
+lpfc_memcpy_to_slim( void __iomem *dest, void *src, unsigned int bytes)
+{
+ /* convert bytes in argument list to word count for copy function */
+ __iowrite32_copy(dest, src, bytes / sizeof(uint32_t));
+}
+
+static inline void
+lpfc_memcpy_from_slim( void *dest, void __iomem *src, unsigned int bytes)
+{
+ /* actually returns 1 byte past dest */
+ memcpy_fromio( dest, src, bytes);
+}
+
+#endif /* __BIG_ENDIAN */
diff --git a/drivers/scsi/lpfc/lpfc_crtn.h b/drivers/scsi/lpfc/lpfc_crtn.h
new file mode 100644
index 000000000..587e3e962
--- /dev/null
+++ b/drivers/scsi/lpfc/lpfc_crtn.h
@@ -0,0 +1,500 @@
+/*******************************************************************
+ * This file is part of the Emulex Linux Device Driver for *
+ * Fibre Channel Host Bus Adapters. *
+ * Copyright (C) 2004-2015 Emulex. All rights reserved. *
+ * EMULEX and SLI are trademarks of Emulex. *
+ * www.emulex.com *
+ * *
+ * This program is free software; you can redistribute it and/or *
+ * modify it under the terms of version 2 of the GNU General *
+ * Public License as published by the Free Software Foundation. *
+ * This program is distributed in the hope that it will be useful. *
+ * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND *
+ * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY, *
+ * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE *
+ * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD *
+ * TO BE LEGALLY INVALID. See the GNU General Public License for *
+ * more details, a copy of which can be found in the file COPYING *
+ * included with this package. *
+ *******************************************************************/
+
+typedef int (*node_filter)(struct lpfc_nodelist *, void *);
+
+struct fc_rport;
+void lpfc_down_link(struct lpfc_hba *, LPFC_MBOXQ_t *);
+void lpfc_sli_read_link_ste(struct lpfc_hba *);
+void lpfc_dump_mem(struct lpfc_hba *, LPFC_MBOXQ_t *, uint16_t, uint16_t);
+void lpfc_dump_wakeup_param(struct lpfc_hba *, LPFC_MBOXQ_t *);
+int lpfc_dump_static_vport(struct lpfc_hba *, LPFC_MBOXQ_t *, uint16_t);
+int lpfc_sli4_dump_cfg_rg23(struct lpfc_hba *, struct lpfcMboxq *);
+void lpfc_read_nv(struct lpfc_hba *, LPFC_MBOXQ_t *);
+void lpfc_config_async(struct lpfc_hba *, LPFC_MBOXQ_t *, uint32_t);
+
+void lpfc_heart_beat(struct lpfc_hba *, LPFC_MBOXQ_t *);
+int lpfc_read_topology(struct lpfc_hba *, LPFC_MBOXQ_t *, struct lpfc_dmabuf *);
+void lpfc_clear_la(struct lpfc_hba *, LPFC_MBOXQ_t *);
+void lpfc_issue_clear_la(struct lpfc_hba *, struct lpfc_vport *);
+void lpfc_config_link(struct lpfc_hba *, LPFC_MBOXQ_t *);
+int lpfc_config_msi(struct lpfc_hba *, LPFC_MBOXQ_t *);
+int lpfc_read_sparam(struct lpfc_hba *, LPFC_MBOXQ_t *, int);
+void lpfc_read_config(struct lpfc_hba *, LPFC_MBOXQ_t *);
+void lpfc_read_lnk_stat(struct lpfc_hba *, LPFC_MBOXQ_t *);
+int lpfc_reg_rpi(struct lpfc_hba *, uint16_t, uint32_t, uint8_t *,
+ LPFC_MBOXQ_t *, uint16_t);
+void lpfc_set_var(struct lpfc_hba *, LPFC_MBOXQ_t *, uint32_t, uint32_t);
+void lpfc_unreg_login(struct lpfc_hba *, uint16_t, uint32_t, LPFC_MBOXQ_t *);
+void lpfc_unreg_did(struct lpfc_hba *, uint16_t, uint32_t, LPFC_MBOXQ_t *);
+void lpfc_sli4_unreg_all_rpis(struct lpfc_vport *);
+
+void lpfc_reg_vpi(struct lpfc_vport *, LPFC_MBOXQ_t *);
+void lpfc_register_new_vport(struct lpfc_hba *, struct lpfc_vport *,
+ struct lpfc_nodelist *);
+void lpfc_unreg_vpi(struct lpfc_hba *, uint16_t, LPFC_MBOXQ_t *);
+void lpfc_init_link(struct lpfc_hba *, LPFC_MBOXQ_t *, uint32_t, uint32_t);
+void lpfc_request_features(struct lpfc_hba *, struct lpfcMboxq *);
+void lpfc_supported_pages(struct lpfcMboxq *);
+void lpfc_pc_sli4_params(struct lpfcMboxq *);
+int lpfc_pc_sli4_params_get(struct lpfc_hba *, LPFC_MBOXQ_t *);
+int lpfc_sli4_mbox_rsrc_extent(struct lpfc_hba *, struct lpfcMboxq *,
+ uint16_t, uint16_t, bool);
+int lpfc_get_sli4_parameters(struct lpfc_hba *, LPFC_MBOXQ_t *);
+struct lpfc_vport *lpfc_find_vport_by_did(struct lpfc_hba *, uint32_t);
+void lpfc_cleanup_rcv_buffers(struct lpfc_vport *);
+void lpfc_rcv_seq_check_edtov(struct lpfc_vport *);
+void lpfc_cleanup_rpis(struct lpfc_vport *, int);
+void lpfc_cleanup_pending_mbox(struct lpfc_vport *);
+int lpfc_linkdown(struct lpfc_hba *);
+void lpfc_linkdown_port(struct lpfc_vport *);
+void lpfc_port_link_failure(struct lpfc_vport *);
+void lpfc_mbx_cmpl_read_topology(struct lpfc_hba *, LPFC_MBOXQ_t *);
+void lpfc_init_vpi_cmpl(struct lpfc_hba *, LPFC_MBOXQ_t *);
+void lpfc_cancel_all_vport_retry_delay_timer(struct lpfc_hba *);
+void lpfc_retry_pport_discovery(struct lpfc_hba *);
+void lpfc_release_rpi(struct lpfc_hba *, struct lpfc_vport *, uint16_t);
+
+void lpfc_mbx_cmpl_reg_login(struct lpfc_hba *, LPFC_MBOXQ_t *);
+void lpfc_mbx_cmpl_dflt_rpi(struct lpfc_hba *, LPFC_MBOXQ_t *);
+void lpfc_mbx_cmpl_fabric_reg_login(struct lpfc_hba *, LPFC_MBOXQ_t *);
+void lpfc_mbx_cmpl_ns_reg_login(struct lpfc_hba *, LPFC_MBOXQ_t *);
+void lpfc_mbx_cmpl_fdmi_reg_login(struct lpfc_hba *, LPFC_MBOXQ_t *);
+void lpfc_mbx_cmpl_reg_vfi(struct lpfc_hba *, LPFC_MBOXQ_t *);
+void lpfc_unregister_vfi_cmpl(struct lpfc_hba *, LPFC_MBOXQ_t *);
+void lpfc_enqueue_node(struct lpfc_vport *, struct lpfc_nodelist *);
+void lpfc_dequeue_node(struct lpfc_vport *, struct lpfc_nodelist *);
+struct lpfc_nodelist *lpfc_enable_node(struct lpfc_vport *,
+ struct lpfc_nodelist *, int);
+void lpfc_nlp_set_state(struct lpfc_vport *, struct lpfc_nodelist *, int);
+void lpfc_drop_node(struct lpfc_vport *, struct lpfc_nodelist *);
+void lpfc_set_disctmo(struct lpfc_vport *);
+int lpfc_can_disctmo(struct lpfc_vport *);
+int lpfc_unreg_rpi(struct lpfc_vport *, struct lpfc_nodelist *);
+void lpfc_unreg_all_rpis(struct lpfc_vport *);
+void lpfc_unreg_hba_rpis(struct lpfc_hba *);
+void lpfc_unreg_default_rpis(struct lpfc_vport *);
+void lpfc_issue_reg_vpi(struct lpfc_hba *, struct lpfc_vport *);
+
+int lpfc_check_sli_ndlp(struct lpfc_hba *, struct lpfc_sli_ring *,
+ struct lpfc_iocbq *, struct lpfc_nodelist *);
+void lpfc_nlp_init(struct lpfc_vport *, struct lpfc_nodelist *, uint32_t);
+struct lpfc_nodelist *lpfc_nlp_get(struct lpfc_nodelist *);
+int lpfc_nlp_put(struct lpfc_nodelist *);
+int lpfc_nlp_not_used(struct lpfc_nodelist *ndlp);
+struct lpfc_nodelist *lpfc_setup_disc_node(struct lpfc_vport *, uint32_t);
+void lpfc_disc_list_loopmap(struct lpfc_vport *);
+void lpfc_disc_start(struct lpfc_vport *);
+void lpfc_cleanup_discovery_resources(struct lpfc_vport *);
+void lpfc_cleanup(struct lpfc_vport *);
+void lpfc_disc_timeout(unsigned long);
+
+int lpfc_unregister_fcf_prep(struct lpfc_hba *);
+struct lpfc_nodelist *__lpfc_findnode_rpi(struct lpfc_vport *, uint16_t);
+struct lpfc_nodelist *lpfc_findnode_rpi(struct lpfc_vport *, uint16_t);
+void lpfc_worker_wake_up(struct lpfc_hba *);
+int lpfc_workq_post_event(struct lpfc_hba *, void *, void *, uint32_t);
+int lpfc_do_work(void *);
+int lpfc_disc_state_machine(struct lpfc_vport *, struct lpfc_nodelist *, void *,
+ uint32_t);
+
+void lpfc_do_scr_ns_plogi(struct lpfc_hba *, struct lpfc_vport *);
+int lpfc_check_sparm(struct lpfc_vport *, struct lpfc_nodelist *,
+ struct serv_parm *, uint32_t, int);
+int lpfc_els_abort(struct lpfc_hba *, struct lpfc_nodelist *);
+void lpfc_more_plogi(struct lpfc_vport *);
+void lpfc_more_adisc(struct lpfc_vport *);
+void lpfc_end_rscn(struct lpfc_vport *);
+int lpfc_els_chk_latt(struct lpfc_vport *);
+int lpfc_els_abort_flogi(struct lpfc_hba *);
+int lpfc_initial_flogi(struct lpfc_vport *);
+void lpfc_issue_init_vfi(struct lpfc_vport *);
+int lpfc_initial_fdisc(struct lpfc_vport *);
+int lpfc_issue_els_plogi(struct lpfc_vport *, uint32_t, uint8_t);
+int lpfc_issue_els_prli(struct lpfc_vport *, struct lpfc_nodelist *, uint8_t);
+int lpfc_issue_els_adisc(struct lpfc_vport *, struct lpfc_nodelist *, uint8_t);
+int lpfc_issue_els_logo(struct lpfc_vport *, struct lpfc_nodelist *, uint8_t);
+int lpfc_issue_els_npiv_logo(struct lpfc_vport *, struct lpfc_nodelist *);
+int lpfc_issue_els_scr(struct lpfc_vport *, uint32_t, uint8_t);
+int lpfc_issue_fabric_reglogin(struct lpfc_vport *);
+int lpfc_els_free_iocb(struct lpfc_hba *, struct lpfc_iocbq *);
+int lpfc_ct_free_iocb(struct lpfc_hba *, struct lpfc_iocbq *);
+int lpfc_els_rsp_acc(struct lpfc_vport *, uint32_t, struct lpfc_iocbq *,
+ struct lpfc_nodelist *, LPFC_MBOXQ_t *);
+int lpfc_els_rsp_reject(struct lpfc_vport *, uint32_t, struct lpfc_iocbq *,
+ struct lpfc_nodelist *, LPFC_MBOXQ_t *);
+int lpfc_els_rsp_adisc_acc(struct lpfc_vport *, struct lpfc_iocbq *,
+ struct lpfc_nodelist *);
+int lpfc_els_rsp_prli_acc(struct lpfc_vport *, struct lpfc_iocbq *,
+ struct lpfc_nodelist *);
+void lpfc_cancel_retry_delay_tmo(struct lpfc_vport *, struct lpfc_nodelist *);
+void lpfc_els_retry_delay(unsigned long);
+void lpfc_els_retry_delay_handler(struct lpfc_nodelist *);
+void lpfc_els_unsol_event(struct lpfc_hba *, struct lpfc_sli_ring *,
+ struct lpfc_iocbq *);
+int lpfc_els_handle_rscn(struct lpfc_vport *);
+void lpfc_els_flush_rscn(struct lpfc_vport *);
+int lpfc_rscn_payload_check(struct lpfc_vport *, uint32_t);
+void lpfc_els_flush_all_cmd(struct lpfc_hba *);
+void lpfc_els_flush_cmd(struct lpfc_vport *);
+int lpfc_els_disc_adisc(struct lpfc_vport *);
+int lpfc_els_disc_plogi(struct lpfc_vport *);
+void lpfc_els_timeout(unsigned long);
+void lpfc_els_timeout_handler(struct lpfc_vport *);
+struct lpfc_iocbq *lpfc_prep_els_iocb(struct lpfc_vport *, uint8_t, uint16_t,
+ uint8_t, struct lpfc_nodelist *,
+ uint32_t, uint32_t);
+void lpfc_hb_timeout_handler(struct lpfc_hba *);
+
+void lpfc_ct_unsol_event(struct lpfc_hba *, struct lpfc_sli_ring *,
+ struct lpfc_iocbq *);
+int lpfc_ct_handle_unsol_abort(struct lpfc_hba *, struct hbq_dmabuf *);
+int lpfc_ns_cmd(struct lpfc_vport *, int, uint8_t, uint32_t);
+int lpfc_fdmi_cmd(struct lpfc_vport *, struct lpfc_nodelist *, int);
+void lpfc_fdmi_tmo(unsigned long);
+void lpfc_fdmi_timeout_handler(struct lpfc_vport *);
+void lpfc_delayed_disc_tmo(unsigned long);
+void lpfc_delayed_disc_timeout_handler(struct lpfc_vport *);
+
+int lpfc_config_port_prep(struct lpfc_hba *);
+void lpfc_update_vport_wwn(struct lpfc_vport *vport);
+int lpfc_config_port_post(struct lpfc_hba *);
+int lpfc_hba_down_prep(struct lpfc_hba *);
+int lpfc_hba_down_post(struct lpfc_hba *);
+void lpfc_hba_init(struct lpfc_hba *, uint32_t *);
+int lpfc_post_buffer(struct lpfc_hba *, struct lpfc_sli_ring *, int);
+void lpfc_decode_firmware_rev(struct lpfc_hba *, char *, int);
+int lpfc_online(struct lpfc_hba *);
+void lpfc_unblock_mgmt_io(struct lpfc_hba *);
+void lpfc_offline_prep(struct lpfc_hba *, int);
+void lpfc_offline(struct lpfc_hba *);
+void lpfc_reset_hba(struct lpfc_hba *);
+
+int lpfc_fof_queue_create(struct lpfc_hba *);
+int lpfc_fof_queue_setup(struct lpfc_hba *);
+int lpfc_fof_queue_destroy(struct lpfc_hba *);
+irqreturn_t lpfc_sli4_fof_intr_handler(int, void *);
+
+int lpfc_sli_setup(struct lpfc_hba *);
+int lpfc_sli_queue_setup(struct lpfc_hba *);
+
+void lpfc_handle_eratt(struct lpfc_hba *);
+void lpfc_handle_latt(struct lpfc_hba *);
+irqreturn_t lpfc_sli_intr_handler(int, void *);
+irqreturn_t lpfc_sli_sp_intr_handler(int, void *);
+irqreturn_t lpfc_sli_fp_intr_handler(int, void *);
+irqreturn_t lpfc_sli4_intr_handler(int, void *);
+irqreturn_t lpfc_sli4_hba_intr_handler(int, void *);
+
+void lpfc_read_rev(struct lpfc_hba *, LPFC_MBOXQ_t *);
+void lpfc_sli4_swap_str(struct lpfc_hba *, LPFC_MBOXQ_t *);
+void lpfc_config_ring(struct lpfc_hba *, int, LPFC_MBOXQ_t *);
+void lpfc_config_port(struct lpfc_hba *, LPFC_MBOXQ_t *);
+void lpfc_kill_board(struct lpfc_hba *, LPFC_MBOXQ_t *);
+void lpfc_mbox_put(struct lpfc_hba *, LPFC_MBOXQ_t *);
+LPFC_MBOXQ_t *lpfc_mbox_get(struct lpfc_hba *);
+void __lpfc_mbox_cmpl_put(struct lpfc_hba *, LPFC_MBOXQ_t *);
+void lpfc_mbox_cmpl_put(struct lpfc_hba *, LPFC_MBOXQ_t *);
+int lpfc_mbox_cmd_check(struct lpfc_hba *, LPFC_MBOXQ_t *);
+int lpfc_mbox_dev_check(struct lpfc_hba *);
+int lpfc_mbox_tmo_val(struct lpfc_hba *, LPFC_MBOXQ_t *);
+void lpfc_init_vfi(struct lpfcMboxq *, struct lpfc_vport *);
+void lpfc_reg_vfi(struct lpfcMboxq *, struct lpfc_vport *, dma_addr_t);
+void lpfc_init_vpi(struct lpfc_hba *, struct lpfcMboxq *, uint16_t);
+void lpfc_unreg_vfi(struct lpfcMboxq *, struct lpfc_vport *);
+void lpfc_reg_fcfi(struct lpfc_hba *, struct lpfcMboxq *);
+void lpfc_unreg_fcfi(struct lpfcMboxq *, uint16_t);
+void lpfc_resume_rpi(struct lpfcMboxq *, struct lpfc_nodelist *);
+int lpfc_check_pending_fcoe_event(struct lpfc_hba *, uint8_t);
+void lpfc_issue_init_vpi(struct lpfc_vport *);
+
+void lpfc_config_hbq(struct lpfc_hba *, uint32_t, struct lpfc_hbq_init *,
+ uint32_t , LPFC_MBOXQ_t *);
+struct hbq_dmabuf *lpfc_els_hbq_alloc(struct lpfc_hba *);
+void lpfc_els_hbq_free(struct lpfc_hba *, struct hbq_dmabuf *);
+struct hbq_dmabuf *lpfc_sli4_rb_alloc(struct lpfc_hba *);
+void lpfc_sli4_rb_free(struct lpfc_hba *, struct hbq_dmabuf *);
+void lpfc_sli4_build_dflt_fcf_record(struct lpfc_hba *, struct fcf_record *,
+ uint16_t);
+void lpfc_unregister_fcf(struct lpfc_hba *);
+void lpfc_unregister_fcf_rescan(struct lpfc_hba *);
+void lpfc_unregister_unused_fcf(struct lpfc_hba *);
+int lpfc_sli4_redisc_fcf_table(struct lpfc_hba *);
+void lpfc_fcf_redisc_wait_start_timer(struct lpfc_hba *);
+void lpfc_sli4_fcf_dead_failthrough(struct lpfc_hba *);
+uint16_t lpfc_sli4_fcf_rr_next_index_get(struct lpfc_hba *);
+void lpfc_sli4_set_fcf_flogi_fail(struct lpfc_hba *, uint16_t);
+int lpfc_sli4_fcf_rr_index_set(struct lpfc_hba *, uint16_t);
+void lpfc_sli4_fcf_rr_index_clear(struct lpfc_hba *, uint16_t);
+int lpfc_sli4_fcf_rr_next_proc(struct lpfc_vport *, uint16_t);
+void lpfc_sli4_clear_fcf_rr_bmask(struct lpfc_hba *);
+
+int lpfc_mem_alloc(struct lpfc_hba *, int align);
+int lpfc_mem_alloc_active_rrq_pool_s4(struct lpfc_hba *);
+void lpfc_mem_free(struct lpfc_hba *);
+void lpfc_mem_free_all(struct lpfc_hba *);
+void lpfc_stop_vport_timers(struct lpfc_vport *);
+
+void lpfc_poll_timeout(unsigned long ptr);
+void lpfc_poll_start_timer(struct lpfc_hba *);
+void lpfc_poll_eratt(unsigned long);
+int
+lpfc_sli_handle_fast_ring_event(struct lpfc_hba *,
+ struct lpfc_sli_ring *, uint32_t);
+
+struct lpfc_iocbq *__lpfc_sli_get_iocbq(struct lpfc_hba *);
+struct lpfc_iocbq * lpfc_sli_get_iocbq(struct lpfc_hba *);
+void lpfc_sli_release_iocbq(struct lpfc_hba *, struct lpfc_iocbq *);
+uint16_t lpfc_sli_next_iotag(struct lpfc_hba *, struct lpfc_iocbq *);
+void lpfc_sli_cancel_iocbs(struct lpfc_hba *, struct list_head *, uint32_t,
+ uint32_t);
+void lpfc_sli_wake_mbox_wait(struct lpfc_hba *, LPFC_MBOXQ_t *);
+int lpfc_selective_reset(struct lpfc_hba *);
+void lpfc_reset_barrier(struct lpfc_hba *);
+int lpfc_sli_brdready(struct lpfc_hba *, uint32_t);
+int lpfc_sli_brdkill(struct lpfc_hba *);
+int lpfc_sli_brdreset(struct lpfc_hba *);
+int lpfc_sli_brdrestart(struct lpfc_hba *);
+int lpfc_sli_hba_setup(struct lpfc_hba *);
+int lpfc_sli_config_port(struct lpfc_hba *, int);
+int lpfc_sli_host_down(struct lpfc_vport *);
+int lpfc_sli_hba_down(struct lpfc_hba *);
+int lpfc_sli_issue_mbox(struct lpfc_hba *, LPFC_MBOXQ_t *, uint32_t);
+int lpfc_sli_handle_mb_event(struct lpfc_hba *);
+void lpfc_sli_mbox_sys_shutdown(struct lpfc_hba *, int);
+int lpfc_sli_check_eratt(struct lpfc_hba *);
+void lpfc_sli_handle_slow_ring_event(struct lpfc_hba *,
+ struct lpfc_sli_ring *, uint32_t);
+void lpfc_sli4_handle_received_buffer(struct lpfc_hba *, struct hbq_dmabuf *);
+void lpfc_sli_def_mbox_cmpl(struct lpfc_hba *, LPFC_MBOXQ_t *);
+void lpfc_sli4_unreg_rpi_cmpl_clr(struct lpfc_hba *, LPFC_MBOXQ_t *);
+int lpfc_sli_issue_iocb(struct lpfc_hba *, uint32_t,
+ struct lpfc_iocbq *, uint32_t);
+void lpfc_sli_pcimem_bcopy(void *, void *, uint32_t);
+void lpfc_sli_bemem_bcopy(void *, void *, uint32_t);
+void lpfc_sli_abort_iocb_ring(struct lpfc_hba *, struct lpfc_sli_ring *);
+void lpfc_sli_abort_fcp_rings(struct lpfc_hba *phba);
+void lpfc_sli_hba_iocb_abort(struct lpfc_hba *);
+void lpfc_sli_flush_fcp_rings(struct lpfc_hba *);
+int lpfc_sli_ringpostbuf_put(struct lpfc_hba *, struct lpfc_sli_ring *,
+ struct lpfc_dmabuf *);
+struct lpfc_dmabuf *lpfc_sli_ringpostbuf_get(struct lpfc_hba *,
+ struct lpfc_sli_ring *,
+ dma_addr_t);
+
+uint32_t lpfc_sli_get_buffer_tag(struct lpfc_hba *);
+struct lpfc_dmabuf * lpfc_sli_ring_taggedbuf_get(struct lpfc_hba *,
+ struct lpfc_sli_ring *, uint32_t );
+
+int lpfc_sli_hbq_count(void);
+int lpfc_sli_hbqbuf_add_hbqs(struct lpfc_hba *, uint32_t);
+void lpfc_sli_hbqbuf_free_all(struct lpfc_hba *);
+int lpfc_sli_hbq_size(void);
+int lpfc_sli_issue_abort_iotag(struct lpfc_hba *, struct lpfc_sli_ring *,
+ struct lpfc_iocbq *);
+int lpfc_sli_sum_iocb(struct lpfc_vport *, uint16_t, uint64_t, lpfc_ctx_cmd);
+int lpfc_sli_abort_iocb(struct lpfc_vport *, struct lpfc_sli_ring *, uint16_t,
+ uint64_t, lpfc_ctx_cmd);
+int
+lpfc_sli_abort_taskmgmt(struct lpfc_vport *, struct lpfc_sli_ring *,
+ uint16_t, uint64_t, lpfc_ctx_cmd);
+
+void lpfc_mbox_timeout(unsigned long);
+void lpfc_mbox_timeout_handler(struct lpfc_hba *);
+
+struct lpfc_nodelist *lpfc_findnode_did(struct lpfc_vport *, uint32_t);
+struct lpfc_nodelist *lpfc_findnode_wwpn(struct lpfc_vport *,
+ struct lpfc_name *);
+
+int lpfc_sli_issue_mbox_wait(struct lpfc_hba *, LPFC_MBOXQ_t *, uint32_t);
+
+int lpfc_sli_issue_iocb_wait(struct lpfc_hba *, uint32_t,
+ struct lpfc_iocbq *, struct lpfc_iocbq *,
+ uint32_t);
+void lpfc_sli_abort_fcp_cmpl(struct lpfc_hba *, struct lpfc_iocbq *,
+ struct lpfc_iocbq *);
+
+void lpfc_sli_free_hbq(struct lpfc_hba *, struct hbq_dmabuf *);
+
+void *lpfc_mbuf_alloc(struct lpfc_hba *, int, dma_addr_t *);
+void __lpfc_mbuf_free(struct lpfc_hba *, void *, dma_addr_t);
+void lpfc_mbuf_free(struct lpfc_hba *, void *, dma_addr_t);
+
+void lpfc_in_buf_free(struct lpfc_hba *, struct lpfc_dmabuf *);
+/* Function prototypes. */
+const char* lpfc_info(struct Scsi_Host *);
+int lpfc_scan_finished(struct Scsi_Host *, unsigned long);
+
+int lpfc_init_api_table_setup(struct lpfc_hba *, uint8_t);
+int lpfc_sli_api_table_setup(struct lpfc_hba *, uint8_t);
+int lpfc_scsi_api_table_setup(struct lpfc_hba *, uint8_t);
+int lpfc_mbox_api_table_setup(struct lpfc_hba *, uint8_t);
+int lpfc_api_table_setup(struct lpfc_hba *, uint8_t);
+
+void lpfc_get_cfgparam(struct lpfc_hba *);
+void lpfc_get_vport_cfgparam(struct lpfc_vport *);
+int lpfc_alloc_sysfs_attr(struct lpfc_vport *);
+void lpfc_free_sysfs_attr(struct lpfc_vport *);
+extern struct device_attribute *lpfc_hba_attrs[];
+extern struct device_attribute *lpfc_vport_attrs[];
+extern struct scsi_host_template lpfc_template;
+extern struct scsi_host_template lpfc_template_s3;
+extern struct scsi_host_template lpfc_vport_template;
+extern struct fc_function_template lpfc_transport_functions;
+extern struct fc_function_template lpfc_vport_transport_functions;
+extern int lpfc_sli_mode;
+extern int lpfc_enable_npiv;
+extern int lpfc_delay_discovery;
+
+int lpfc_vport_symbolic_node_name(struct lpfc_vport *, char *, size_t);
+int lpfc_vport_symbolic_port_name(struct lpfc_vport *, char *, size_t);
+void lpfc_terminate_rport_io(struct fc_rport *);
+void lpfc_dev_loss_tmo_callbk(struct fc_rport *rport);
+
+struct lpfc_vport *lpfc_create_port(struct lpfc_hba *, int, struct device *);
+int lpfc_vport_disable(struct fc_vport *fc_vport, bool disable);
+int lpfc_mbx_unreg_vpi(struct lpfc_vport *);
+void destroy_port(struct lpfc_vport *);
+int lpfc_get_instance(void);
+void lpfc_host_attrib_init(struct Scsi_Host *);
+
+extern void lpfc_debugfs_initialize(struct lpfc_vport *);
+extern void lpfc_debugfs_terminate(struct lpfc_vport *);
+extern void lpfc_debugfs_disc_trc(struct lpfc_vport *, int, char *, uint32_t,
+ uint32_t, uint32_t);
+extern void lpfc_debugfs_slow_ring_trc(struct lpfc_hba *, char *, uint32_t,
+ uint32_t, uint32_t);
+extern struct lpfc_hbq_init *lpfc_hbq_defs[];
+
+/* SLI4 if_type 2 externs. */
+int lpfc_sli4_alloc_resource_identifiers(struct lpfc_hba *);
+int lpfc_sli4_dealloc_resource_identifiers(struct lpfc_hba *);
+int lpfc_sli4_get_allocated_extnts(struct lpfc_hba *, uint16_t,
+ uint16_t *, uint16_t *);
+int lpfc_sli4_get_avail_extnt_rsrc(struct lpfc_hba *, uint16_t,
+ uint16_t *, uint16_t *);
+
+/* externs BlockGuard */
+extern char *_dump_buf_data;
+extern unsigned long _dump_buf_data_order;
+extern char *_dump_buf_dif;
+extern unsigned long _dump_buf_dif_order;
+extern spinlock_t _dump_buf_lock;
+extern int _dump_buf_done;
+extern spinlock_t pgcnt_lock;
+extern unsigned int pgcnt;
+extern unsigned int lpfc_prot_mask;
+extern unsigned char lpfc_prot_guard;
+extern unsigned int lpfc_fcp_look_ahead;
+
+/* Interface exported by fabric iocb scheduler */
+void lpfc_fabric_abort_nport(struct lpfc_nodelist *);
+void lpfc_fabric_abort_hba(struct lpfc_hba *);
+void lpfc_fabric_block_timeout(unsigned long);
+void lpfc_unblock_fabric_iocbs(struct lpfc_hba *);
+void lpfc_rampdown_queue_depth(struct lpfc_hba *);
+void lpfc_ramp_down_queue_handler(struct lpfc_hba *);
+void lpfc_scsi_dev_block(struct lpfc_hba *);
+
+void
+lpfc_send_els_failure_event(struct lpfc_hba *, struct lpfc_iocbq *,
+ struct lpfc_iocbq *);
+struct lpfc_fast_path_event *lpfc_alloc_fast_evt(struct lpfc_hba *);
+void lpfc_free_fast_evt(struct lpfc_hba *, struct lpfc_fast_path_event *);
+void lpfc_create_static_vport(struct lpfc_hba *);
+void lpfc_stop_hba_timers(struct lpfc_hba *);
+void lpfc_stop_port(struct lpfc_hba *);
+void __lpfc_sli4_stop_fcf_redisc_wait_timer(struct lpfc_hba *);
+void lpfc_sli4_stop_fcf_redisc_wait_timer(struct lpfc_hba *);
+void lpfc_parse_fcoe_conf(struct lpfc_hba *, uint8_t *, uint32_t);
+int lpfc_parse_vpd(struct lpfc_hba *, uint8_t *, int);
+void lpfc_start_fdiscs(struct lpfc_hba *phba);
+struct lpfc_vport *lpfc_find_vport_by_vpid(struct lpfc_hba *, uint16_t);
+struct lpfc_sglq *__lpfc_get_active_sglq(struct lpfc_hba *, uint16_t);
+#define ScsiResult(host_code, scsi_code) (((host_code) << 16) | scsi_code)
+#define HBA_EVENT_RSCN 5
+#define HBA_EVENT_LINK_UP 2
+#define HBA_EVENT_LINK_DOWN 3
+
+/* functions to support SGIOv4/bsg interface */
+int lpfc_bsg_request(struct fc_bsg_job *);
+int lpfc_bsg_timeout(struct fc_bsg_job *);
+int lpfc_bsg_ct_unsol_event(struct lpfc_hba *, struct lpfc_sli_ring *,
+ struct lpfc_iocbq *);
+int lpfc_bsg_ct_unsol_abort(struct lpfc_hba *, struct hbq_dmabuf *);
+void __lpfc_sli_ringtx_put(struct lpfc_hba *, struct lpfc_sli_ring *,
+ struct lpfc_iocbq *);
+struct lpfc_iocbq *lpfc_sli_ringtx_get(struct lpfc_hba *,
+ struct lpfc_sli_ring *);
+int __lpfc_sli_issue_iocb(struct lpfc_hba *, uint32_t,
+ struct lpfc_iocbq *, uint32_t);
+uint32_t lpfc_drain_txq(struct lpfc_hba *);
+void lpfc_clr_rrq_active(struct lpfc_hba *, uint16_t, struct lpfc_node_rrq *);
+int lpfc_test_rrq_active(struct lpfc_hba *, struct lpfc_nodelist *, uint16_t);
+void lpfc_handle_rrq_active(struct lpfc_hba *);
+int lpfc_send_rrq(struct lpfc_hba *, struct lpfc_node_rrq *);
+int lpfc_set_rrq_active(struct lpfc_hba *, struct lpfc_nodelist *,
+ uint16_t, uint16_t, uint16_t);
+uint16_t lpfc_sli4_xri_inrange(struct lpfc_hba *, uint16_t);
+void lpfc_cleanup_vports_rrqs(struct lpfc_vport *, struct lpfc_nodelist *);
+struct lpfc_node_rrq *lpfc_get_active_rrq(struct lpfc_vport *, uint16_t,
+ uint32_t);
+void lpfc_idiag_mbxacc_dump_bsg_mbox(struct lpfc_hba *, enum nemb_type,
+ enum mbox_type, enum dma_type, enum sta_type,
+ struct lpfc_dmabuf *, uint32_t);
+void lpfc_idiag_mbxacc_dump_issue_mbox(struct lpfc_hba *, MAILBOX_t *);
+int lpfc_wr_object(struct lpfc_hba *, struct list_head *, uint32_t, uint32_t *);
+/* functions to support SR-IOV */
+int lpfc_sli_probe_sriov_nr_virtfn(struct lpfc_hba *, int);
+uint16_t lpfc_sli_sriov_nr_virtfn_get(struct lpfc_hba *);
+int lpfc_sli4_queue_create(struct lpfc_hba *);
+void lpfc_sli4_queue_destroy(struct lpfc_hba *);
+void lpfc_sli4_abts_err_handler(struct lpfc_hba *, struct lpfc_nodelist *,
+ struct sli4_wcqe_xri_aborted *);
+void lpfc_sli_abts_recover_port(struct lpfc_vport *,
+ struct lpfc_nodelist *);
+int lpfc_hba_init_link_fc_topology(struct lpfc_hba *, uint32_t, uint32_t);
+int lpfc_issue_reg_vfi(struct lpfc_vport *);
+int lpfc_issue_unreg_vfi(struct lpfc_vport *);
+int lpfc_selective_reset(struct lpfc_hba *);
+int lpfc_sli4_read_config(struct lpfc_hba *);
+void lpfc_sli4_node_prep(struct lpfc_hba *);
+int lpfc_sli4_xri_sgl_update(struct lpfc_hba *);
+void lpfc_free_sgl_list(struct lpfc_hba *, struct list_head *);
+uint32_t lpfc_sli_port_speed_get(struct lpfc_hba *);
+int lpfc_sli4_request_firmware_update(struct lpfc_hba *, uint8_t);
+void lpfc_sli4_offline_eratt(struct lpfc_hba *);
+
+struct lpfc_device_data *lpfc_create_device_data(struct lpfc_hba *,
+ struct lpfc_name *,
+ struct lpfc_name *,
+ uint64_t, bool);
+void lpfc_delete_device_data(struct lpfc_hba *, struct lpfc_device_data*);
+struct lpfc_device_data *__lpfc_get_device_data(struct lpfc_hba *,
+ struct list_head *list,
+ struct lpfc_name *,
+ struct lpfc_name *, uint64_t);
+bool lpfc_enable_oas_lun(struct lpfc_hba *, struct lpfc_name *,
+ struct lpfc_name *, uint64_t);
+bool lpfc_disable_oas_lun(struct lpfc_hba *, struct lpfc_name *,
+ struct lpfc_name *, uint64_t);
+bool lpfc_find_next_oas_lun(struct lpfc_hba *, struct lpfc_name *,
+ struct lpfc_name *, uint64_t *, struct lpfc_name *,
+ struct lpfc_name *, uint64_t *, uint32_t *);
diff --git a/drivers/scsi/lpfc/lpfc_ct.c b/drivers/scsi/lpfc/lpfc_ct.c
new file mode 100644
index 000000000..af129966b
--- /dev/null
+++ b/drivers/scsi/lpfc/lpfc_ct.c
@@ -0,0 +1,2278 @@
+/*******************************************************************
+ * This file is part of the Emulex Linux Device Driver for *
+ * Fibre Channel Host Bus Adapters. *
+ * Copyright (C) 2004-2015 Emulex. All rights reserved. *
+ * EMULEX and SLI are trademarks of Emulex. *
+ * www.emulex.com *
+ * *
+ * This program is free software; you can redistribute it and/or *
+ * modify it under the terms of version 2 of the GNU General *
+ * Public License as published by the Free Software Foundation. *
+ * This program is distributed in the hope that it will be useful. *
+ * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND *
+ * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY, *
+ * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE *
+ * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD *
+ * TO BE LEGALLY INVALID. See the GNU General Public License for *
+ * more details, a copy of which can be found in the file COPYING *
+ * included with this package. *
+ *******************************************************************/
+
+/*
+ * Fibre Channel SCSI LAN Device Driver CT support: FC Generic Services FC-GS
+ */
+
+#include <linux/blkdev.h>
+#include <linux/pci.h>
+#include <linux/interrupt.h>
+#include <linux/slab.h>
+#include <linux/utsname.h>
+
+#include <scsi/scsi.h>
+#include <scsi/scsi_device.h>
+#include <scsi/scsi_host.h>
+#include <scsi/scsi_transport_fc.h>
+#include <scsi/fc/fc_fs.h>
+
+#include "lpfc_hw4.h"
+#include "lpfc_hw.h"
+#include "lpfc_sli.h"
+#include "lpfc_sli4.h"
+#include "lpfc_nl.h"
+#include "lpfc_disc.h"
+#include "lpfc_scsi.h"
+#include "lpfc.h"
+#include "lpfc_logmsg.h"
+#include "lpfc_crtn.h"
+#include "lpfc_version.h"
+#include "lpfc_vport.h"
+#include "lpfc_debugfs.h"
+
+/* FDMI Port Speed definitions */
+#define HBA_PORTSPEED_1GBIT 0x0001 /* 1 GBit/sec */
+#define HBA_PORTSPEED_2GBIT 0x0002 /* 2 GBit/sec */
+#define HBA_PORTSPEED_4GBIT 0x0008 /* 4 GBit/sec */
+#define HBA_PORTSPEED_10GBIT 0x0004 /* 10 GBit/sec */
+#define HBA_PORTSPEED_8GBIT 0x0010 /* 8 GBit/sec */
+#define HBA_PORTSPEED_16GBIT 0x0020 /* 16 GBit/sec */
+#define HBA_PORTSPEED_UNKNOWN 0x0800 /* Unknown */
+
+#define FOURBYTES 4
+
+
+static char *lpfc_release_version = LPFC_DRIVER_VERSION;
+
+static void
+lpfc_ct_ignore_hbq_buffer(struct lpfc_hba *phba, struct lpfc_iocbq *piocbq,
+ struct lpfc_dmabuf *mp, uint32_t size)
+{
+ if (!mp) {
+ lpfc_printf_log(phba, KERN_INFO, LOG_ELS,
+ "0146 Ignoring unsolicited CT No HBQ "
+ "status = x%x\n",
+ piocbq->iocb.ulpStatus);
+ }
+ lpfc_printf_log(phba, KERN_INFO, LOG_ELS,
+ "0145 Ignoring unsolicted CT HBQ Size:%d "
+ "status = x%x\n",
+ size, piocbq->iocb.ulpStatus);
+}
+
+static void
+lpfc_ct_unsol_buffer(struct lpfc_hba *phba, struct lpfc_iocbq *piocbq,
+ struct lpfc_dmabuf *mp, uint32_t size)
+{
+ lpfc_ct_ignore_hbq_buffer(phba, piocbq, mp, size);
+}
+
+void
+lpfc_ct_unsol_event(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
+ struct lpfc_iocbq *piocbq)
+{
+ struct lpfc_dmabuf *mp = NULL;
+ IOCB_t *icmd = &piocbq->iocb;
+ int i;
+ struct lpfc_iocbq *iocbq;
+ dma_addr_t paddr;
+ uint32_t size;
+ struct list_head head;
+ struct lpfc_dmabuf *bdeBuf;
+
+ if (lpfc_bsg_ct_unsol_event(phba, pring, piocbq) == 0)
+ return;
+
+ if (unlikely(icmd->ulpStatus == IOSTAT_NEED_BUFFER)) {
+ lpfc_sli_hbqbuf_add_hbqs(phba, LPFC_ELS_HBQ);
+ } else if ((icmd->ulpStatus == IOSTAT_LOCAL_REJECT) &&
+ ((icmd->un.ulpWord[4] & IOERR_PARAM_MASK) ==
+ IOERR_RCV_BUFFER_WAITING)) {
+ /* Not enough posted buffers; Try posting more buffers */
+ phba->fc_stat.NoRcvBuf++;
+ if (!(phba->sli3_options & LPFC_SLI3_HBQ_ENABLED))
+ lpfc_post_buffer(phba, pring, 2);
+ return;
+ }
+
+ /* If there are no BDEs associated with this IOCB,
+ * there is nothing to do.
+ */
+ if (icmd->ulpBdeCount == 0)
+ return;
+
+ if (phba->sli3_options & LPFC_SLI3_HBQ_ENABLED) {
+ INIT_LIST_HEAD(&head);
+ list_add_tail(&head, &piocbq->list);
+ list_for_each_entry(iocbq, &head, list) {
+ icmd = &iocbq->iocb;
+ if (icmd->ulpBdeCount == 0)
+ continue;
+ bdeBuf = iocbq->context2;
+ iocbq->context2 = NULL;
+ size = icmd->un.cont64[0].tus.f.bdeSize;
+ lpfc_ct_unsol_buffer(phba, piocbq, bdeBuf, size);
+ lpfc_in_buf_free(phba, bdeBuf);
+ if (icmd->ulpBdeCount == 2) {
+ bdeBuf = iocbq->context3;
+ iocbq->context3 = NULL;
+ size = icmd->unsli3.rcvsli3.bde2.tus.f.bdeSize;
+ lpfc_ct_unsol_buffer(phba, piocbq, bdeBuf,
+ size);
+ lpfc_in_buf_free(phba, bdeBuf);
+ }
+ }
+ list_del(&head);
+ } else {
+ INIT_LIST_HEAD(&head);
+ list_add_tail(&head, &piocbq->list);
+ list_for_each_entry(iocbq, &head, list) {
+ icmd = &iocbq->iocb;
+ if (icmd->ulpBdeCount == 0)
+ lpfc_ct_unsol_buffer(phba, iocbq, NULL, 0);
+ for (i = 0; i < icmd->ulpBdeCount; i++) {
+ paddr = getPaddr(icmd->un.cont64[i].addrHigh,
+ icmd->un.cont64[i].addrLow);
+ mp = lpfc_sli_ringpostbuf_get(phba, pring,
+ paddr);
+ size = icmd->un.cont64[i].tus.f.bdeSize;
+ lpfc_ct_unsol_buffer(phba, iocbq, mp, size);
+ lpfc_in_buf_free(phba, mp);
+ }
+ lpfc_post_buffer(phba, pring, i);
+ }
+ list_del(&head);
+ }
+}
+
+/**
+ * lpfc_ct_handle_unsol_abort - ct upper level protocol abort handler
+ * @phba: Pointer to HBA context object.
+ * @dmabuf: pointer to a dmabuf that describes the FC sequence
+ *
+ * This function serves as the upper level protocol abort handler for CT
+ * protocol.
+ *
+ * Return 1 if abort has been handled, 0 otherwise.
+ **/
+int
+lpfc_ct_handle_unsol_abort(struct lpfc_hba *phba, struct hbq_dmabuf *dmabuf)
+{
+ int handled;
+
+ /* CT upper level goes through BSG */
+ handled = lpfc_bsg_ct_unsol_abort(phba, dmabuf);
+
+ return handled;
+}
+
+static void
+lpfc_free_ct_rsp(struct lpfc_hba *phba, struct lpfc_dmabuf *mlist)
+{
+ struct lpfc_dmabuf *mlast, *next_mlast;
+
+ list_for_each_entry_safe(mlast, next_mlast, &mlist->list, list) {
+ lpfc_mbuf_free(phba, mlast->virt, mlast->phys);
+ list_del(&mlast->list);
+ kfree(mlast);
+ }
+ lpfc_mbuf_free(phba, mlist->virt, mlist->phys);
+ kfree(mlist);
+ return;
+}
+
+static struct lpfc_dmabuf *
+lpfc_alloc_ct_rsp(struct lpfc_hba *phba, int cmdcode, struct ulp_bde64 *bpl,
+ uint32_t size, int *entries)
+{
+ struct lpfc_dmabuf *mlist = NULL;
+ struct lpfc_dmabuf *mp;
+ int cnt, i = 0;
+
+ /* We get chunks of FCELSSIZE */
+ cnt = size > FCELSSIZE ? FCELSSIZE: size;
+
+ while (size) {
+ /* Allocate buffer for rsp payload */
+ mp = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
+ if (!mp) {
+ if (mlist)
+ lpfc_free_ct_rsp(phba, mlist);
+ return NULL;
+ }
+
+ INIT_LIST_HEAD(&mp->list);
+
+ if (cmdcode == be16_to_cpu(SLI_CTNS_GID_FT) ||
+ cmdcode == be16_to_cpu(SLI_CTNS_GFF_ID))
+ mp->virt = lpfc_mbuf_alloc(phba, MEM_PRI, &(mp->phys));
+ else
+ mp->virt = lpfc_mbuf_alloc(phba, 0, &(mp->phys));
+
+ if (!mp->virt) {
+ kfree(mp);
+ if (mlist)
+ lpfc_free_ct_rsp(phba, mlist);
+ return NULL;
+ }
+
+ /* Queue it to a linked list */
+ if (!mlist)
+ mlist = mp;
+ else
+ list_add_tail(&mp->list, &mlist->list);
+
+ bpl->tus.f.bdeFlags = BUFF_TYPE_BDE_64I;
+ /* build buffer ptr list for IOCB */
+ bpl->addrLow = le32_to_cpu(putPaddrLow(mp->phys) );
+ bpl->addrHigh = le32_to_cpu(putPaddrHigh(mp->phys) );
+ bpl->tus.f.bdeSize = (uint16_t) cnt;
+ bpl->tus.w = le32_to_cpu(bpl->tus.w);
+ bpl++;
+
+ i++;
+ size -= cnt;
+ }
+
+ *entries = i;
+ return mlist;
+}
+
+int
+lpfc_ct_free_iocb(struct lpfc_hba *phba, struct lpfc_iocbq *ctiocb)
+{
+ struct lpfc_dmabuf *buf_ptr;
+
+ if (ctiocb->context_un.ndlp) {
+ lpfc_nlp_put(ctiocb->context_un.ndlp);
+ ctiocb->context_un.ndlp = NULL;
+ }
+ if (ctiocb->context1) {
+ buf_ptr = (struct lpfc_dmabuf *) ctiocb->context1;
+ lpfc_mbuf_free(phba, buf_ptr->virt, buf_ptr->phys);
+ kfree(buf_ptr);
+ ctiocb->context1 = NULL;
+ }
+ if (ctiocb->context2) {
+ lpfc_free_ct_rsp(phba, (struct lpfc_dmabuf *) ctiocb->context2);
+ ctiocb->context2 = NULL;
+ }
+
+ if (ctiocb->context3) {
+ buf_ptr = (struct lpfc_dmabuf *) ctiocb->context3;
+ lpfc_mbuf_free(phba, buf_ptr->virt, buf_ptr->phys);
+ kfree(buf_ptr);
+ ctiocb->context3 = NULL;
+ }
+ lpfc_sli_release_iocbq(phba, ctiocb);
+ return 0;
+}
+
+static int
+lpfc_gen_req(struct lpfc_vport *vport, struct lpfc_dmabuf *bmp,
+ struct lpfc_dmabuf *inp, struct lpfc_dmabuf *outp,
+ void (*cmpl) (struct lpfc_hba *, struct lpfc_iocbq *,
+ struct lpfc_iocbq *),
+ struct lpfc_nodelist *ndlp, uint32_t usr_flg, uint32_t num_entry,
+ uint32_t tmo, uint8_t retry)
+{
+ struct lpfc_hba *phba = vport->phba;
+ IOCB_t *icmd;
+ struct lpfc_iocbq *geniocb;
+ int rc;
+
+ /* Allocate buffer for command iocb */
+ geniocb = lpfc_sli_get_iocbq(phba);
+
+ if (geniocb == NULL)
+ return 1;
+
+ icmd = &geniocb->iocb;
+ icmd->un.genreq64.bdl.ulpIoTag32 = 0;
+ icmd->un.genreq64.bdl.addrHigh = putPaddrHigh(bmp->phys);
+ icmd->un.genreq64.bdl.addrLow = putPaddrLow(bmp->phys);
+ icmd->un.genreq64.bdl.bdeFlags = BUFF_TYPE_BLP_64;
+ icmd->un.genreq64.bdl.bdeSize = (num_entry * sizeof (struct ulp_bde64));
+
+ if (usr_flg)
+ geniocb->context3 = NULL;
+ else
+ geniocb->context3 = (uint8_t *) bmp;
+
+ /* Save for completion so we can release these resources */
+ geniocb->context1 = (uint8_t *) inp;
+ geniocb->context2 = (uint8_t *) outp;
+ geniocb->context_un.ndlp = lpfc_nlp_get(ndlp);
+
+ /* Fill in payload, bp points to frame payload */
+ icmd->ulpCommand = CMD_GEN_REQUEST64_CR;
+
+ /* Fill in rest of iocb */
+ icmd->un.genreq64.w5.hcsw.Fctl = (SI | LA);
+ icmd->un.genreq64.w5.hcsw.Dfctl = 0;
+ icmd->un.genreq64.w5.hcsw.Rctl = FC_RCTL_DD_UNSOL_CTL;
+ icmd->un.genreq64.w5.hcsw.Type = FC_TYPE_CT;
+
+ if (!tmo) {
+ /* FC spec states we need 3 * ratov for CT requests */
+ tmo = (3 * phba->fc_ratov);
+ }
+ icmd->ulpTimeout = tmo;
+ icmd->ulpBdeCount = 1;
+ icmd->ulpLe = 1;
+ icmd->ulpClass = CLASS3;
+ icmd->ulpContext = ndlp->nlp_rpi;
+ if (phba->sli_rev == LPFC_SLI_REV4)
+ icmd->ulpContext = phba->sli4_hba.rpi_ids[ndlp->nlp_rpi];
+
+ if (phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) {
+ /* For GEN_REQUEST64_CR, use the RPI */
+ icmd->ulpCt_h = 0;
+ icmd->ulpCt_l = 0;
+ }
+
+ /* Issue GEN REQ IOCB for NPORT <did> */
+ lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
+ "0119 Issue GEN REQ IOCB to NPORT x%x "
+ "Data: x%x x%x\n",
+ ndlp->nlp_DID, icmd->ulpIoTag,
+ vport->port_state);
+ geniocb->iocb_cmpl = cmpl;
+ geniocb->drvrTimeout = icmd->ulpTimeout + LPFC_DRVR_TIMEOUT;
+ geniocb->vport = vport;
+ geniocb->retry = retry;
+ rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, geniocb, 0);
+
+ if (rc == IOCB_ERROR) {
+ lpfc_sli_release_iocbq(phba, geniocb);
+ return 1;
+ }
+
+ return 0;
+}
+
+static int
+lpfc_ct_cmd(struct lpfc_vport *vport, struct lpfc_dmabuf *inmp,
+ struct lpfc_dmabuf *bmp, struct lpfc_nodelist *ndlp,
+ void (*cmpl) (struct lpfc_hba *, struct lpfc_iocbq *,
+ struct lpfc_iocbq *),
+ uint32_t rsp_size, uint8_t retry)
+{
+ struct lpfc_hba *phba = vport->phba;
+ struct ulp_bde64 *bpl = (struct ulp_bde64 *) bmp->virt;
+ struct lpfc_dmabuf *outmp;
+ int cnt = 0, status;
+ int cmdcode = ((struct lpfc_sli_ct_request *) inmp->virt)->
+ CommandResponse.bits.CmdRsp;
+
+ bpl++; /* Skip past ct request */
+
+ /* Put buffer(s) for ct rsp in bpl */
+ outmp = lpfc_alloc_ct_rsp(phba, cmdcode, bpl, rsp_size, &cnt);
+ if (!outmp)
+ return -ENOMEM;
+ /*
+ * Form the CT IOCB. The total number of BDEs in this IOCB
+ * is the single command plus response count from
+ * lpfc_alloc_ct_rsp.
+ */
+ cnt += 1;
+ status = lpfc_gen_req(vport, bmp, inmp, outmp, cmpl, ndlp, 0,
+ cnt, 0, retry);
+ if (status) {
+ lpfc_free_ct_rsp(phba, outmp);
+ return -ENOMEM;
+ }
+ return 0;
+}
+
+struct lpfc_vport *
+lpfc_find_vport_by_did(struct lpfc_hba *phba, uint32_t did) {
+ struct lpfc_vport *vport_curr;
+ unsigned long flags;
+
+ spin_lock_irqsave(&phba->hbalock, flags);
+ list_for_each_entry(vport_curr, &phba->port_list, listentry) {
+ if ((vport_curr->fc_myDID) && (vport_curr->fc_myDID == did)) {
+ spin_unlock_irqrestore(&phba->hbalock, flags);
+ return vport_curr;
+ }
+ }
+ spin_unlock_irqrestore(&phba->hbalock, flags);
+ return NULL;
+}
+
+static int
+lpfc_ns_rsp(struct lpfc_vport *vport, struct lpfc_dmabuf *mp, uint32_t Size)
+{
+ struct lpfc_hba *phba = vport->phba;
+ struct lpfc_sli_ct_request *Response =
+ (struct lpfc_sli_ct_request *) mp->virt;
+ struct lpfc_nodelist *ndlp = NULL;
+ struct lpfc_dmabuf *mlast, *next_mp;
+ uint32_t *ctptr = (uint32_t *) & Response->un.gid.PortType;
+ uint32_t Did, CTentry;
+ int Cnt;
+ struct list_head head;
+
+ lpfc_set_disctmo(vport);
+ vport->num_disc_nodes = 0;
+ vport->fc_ns_retry = 0;
+
+
+ list_add_tail(&head, &mp->list);
+ list_for_each_entry_safe(mp, next_mp, &head, list) {
+ mlast = mp;
+
+ Cnt = Size > FCELSSIZE ? FCELSSIZE : Size;
+
+ Size -= Cnt;
+
+ if (!ctptr) {
+ ctptr = (uint32_t *) mlast->virt;
+ } else
+ Cnt -= 16; /* subtract length of CT header */
+
+ /* Loop through entire NameServer list of DIDs */
+ while (Cnt >= sizeof (uint32_t)) {
+ /* Get next DID from NameServer List */
+ CTentry = *ctptr++;
+ Did = ((be32_to_cpu(CTentry)) & Mask_DID);
+
+ ndlp = NULL;
+
+ /*
+ * Check for rscn processing or not
+ * To conserve rpi's, filter out addresses for other
+ * vports on the same physical HBAs.
+ */
+ if ((Did != vport->fc_myDID) &&
+ ((lpfc_find_vport_by_did(phba, Did) == NULL) ||
+ vport->cfg_peer_port_login)) {
+ if ((vport->port_type != LPFC_NPIV_PORT) ||
+ (!(vport->ct_flags & FC_CT_RFF_ID)) ||
+ (!vport->cfg_restrict_login)) {
+ ndlp = lpfc_setup_disc_node(vport, Did);
+ if (ndlp && NLP_CHK_NODE_ACT(ndlp)) {
+ lpfc_debugfs_disc_trc(vport,
+ LPFC_DISC_TRC_CT,
+ "Parse GID_FTrsp: "
+ "did:x%x flg:x%x x%x",
+ Did, ndlp->nlp_flag,
+ vport->fc_flag);
+
+ lpfc_printf_vlog(vport,
+ KERN_INFO,
+ LOG_DISCOVERY,
+ "0238 Process "
+ "x%x NameServer Rsp"
+ "Data: x%x x%x x%x\n",
+ Did, ndlp->nlp_flag,
+ vport->fc_flag,
+ vport->fc_rscn_id_cnt);
+ } else {
+ lpfc_debugfs_disc_trc(vport,
+ LPFC_DISC_TRC_CT,
+ "Skip1 GID_FTrsp: "
+ "did:x%x flg:x%x cnt:%d",
+ Did, vport->fc_flag,
+ vport->fc_rscn_id_cnt);
+
+ lpfc_printf_vlog(vport,
+ KERN_INFO,
+ LOG_DISCOVERY,
+ "0239 Skip x%x "
+ "NameServer Rsp Data: "
+ "x%x x%x\n",
+ Did, vport->fc_flag,
+ vport->fc_rscn_id_cnt);
+ }
+
+ } else {
+ if (!(vport->fc_flag & FC_RSCN_MODE) ||
+ (lpfc_rscn_payload_check(vport, Did))) {
+ lpfc_debugfs_disc_trc(vport,
+ LPFC_DISC_TRC_CT,
+ "Query GID_FTrsp: "
+ "did:x%x flg:x%x cnt:%d",
+ Did, vport->fc_flag,
+ vport->fc_rscn_id_cnt);
+
+ /* This NPortID was previously
+ * a FCP target, * Don't even
+ * bother to send GFF_ID.
+ */
+ ndlp = lpfc_findnode_did(vport,
+ Did);
+ if (ndlp &&
+ NLP_CHK_NODE_ACT(ndlp)
+ && (ndlp->nlp_type &
+ NLP_FCP_TARGET))
+ lpfc_setup_disc_node
+ (vport, Did);
+ else if (lpfc_ns_cmd(vport,
+ SLI_CTNS_GFF_ID,
+ 0, Did) == 0)
+ vport->num_disc_nodes++;
+ else
+ lpfc_setup_disc_node
+ (vport, Did);
+ }
+ else {
+ lpfc_debugfs_disc_trc(vport,
+ LPFC_DISC_TRC_CT,
+ "Skip2 GID_FTrsp: "
+ "did:x%x flg:x%x cnt:%d",
+ Did, vport->fc_flag,
+ vport->fc_rscn_id_cnt);
+
+ lpfc_printf_vlog(vport,
+ KERN_INFO,
+ LOG_DISCOVERY,
+ "0245 Skip x%x "
+ "NameServer Rsp Data: "
+ "x%x x%x\n",
+ Did, vport->fc_flag,
+ vport->fc_rscn_id_cnt);
+ }
+ }
+ }
+ if (CTentry & (cpu_to_be32(SLI_CT_LAST_ENTRY)))
+ goto nsout1;
+ Cnt -= sizeof (uint32_t);
+ }
+ ctptr = NULL;
+
+ }
+
+nsout1:
+ list_del(&head);
+ return 0;
+}
+
+static void
+lpfc_cmpl_ct_cmd_gid_ft(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
+ struct lpfc_iocbq *rspiocb)
+{
+ struct lpfc_vport *vport = cmdiocb->vport;
+ struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
+ IOCB_t *irsp;
+ struct lpfc_dmabuf *bmp;
+ struct lpfc_dmabuf *outp;
+ struct lpfc_sli_ct_request *CTrsp;
+ struct lpfc_nodelist *ndlp;
+ int rc;
+
+ /* First save ndlp, before we overwrite it */
+ ndlp = cmdiocb->context_un.ndlp;
+
+ /* we pass cmdiocb to state machine which needs rspiocb as well */
+ cmdiocb->context_un.rsp_iocb = rspiocb;
+
+ outp = (struct lpfc_dmabuf *) cmdiocb->context2;
+ bmp = (struct lpfc_dmabuf *) cmdiocb->context3;
+ irsp = &rspiocb->iocb;
+
+ lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_CT,
+ "GID_FT cmpl: status:x%x/x%x rtry:%d",
+ irsp->ulpStatus, irsp->un.ulpWord[4], vport->fc_ns_retry);
+
+ /* Don't bother processing response if vport is being torn down. */
+ if (vport->load_flag & FC_UNLOADING) {
+ if (vport->fc_flag & FC_RSCN_MODE)
+ lpfc_els_flush_rscn(vport);
+ goto out;
+ }
+
+ if (lpfc_els_chk_latt(vport)) {
+ lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY,
+ "0216 Link event during NS query\n");
+ if (vport->fc_flag & FC_RSCN_MODE)
+ lpfc_els_flush_rscn(vport);
+ lpfc_vport_set_state(vport, FC_VPORT_FAILED);
+ goto out;
+ }
+ if (lpfc_error_lost_link(irsp)) {
+ lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY,
+ "0226 NS query failed due to link event\n");
+ if (vport->fc_flag & FC_RSCN_MODE)
+ lpfc_els_flush_rscn(vport);
+ goto out;
+ }
+ if (irsp->ulpStatus) {
+ /* Check for retry */
+ if (vport->fc_ns_retry < LPFC_MAX_NS_RETRY) {
+ if (irsp->ulpStatus != IOSTAT_LOCAL_REJECT ||
+ (irsp->un.ulpWord[4] & IOERR_PARAM_MASK) !=
+ IOERR_NO_RESOURCES)
+ vport->fc_ns_retry++;
+
+ /* CT command is being retried */
+ rc = lpfc_ns_cmd(vport, SLI_CTNS_GID_FT,
+ vport->fc_ns_retry, 0);
+ if (rc == 0)
+ goto out;
+ }
+ if (vport->fc_flag & FC_RSCN_MODE)
+ lpfc_els_flush_rscn(vport);
+ lpfc_vport_set_state(vport, FC_VPORT_FAILED);
+ lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
+ "0257 GID_FT Query error: 0x%x 0x%x\n",
+ irsp->ulpStatus, vport->fc_ns_retry);
+ } else {
+ /* Good status, continue checking */
+ CTrsp = (struct lpfc_sli_ct_request *) outp->virt;
+ if (CTrsp->CommandResponse.bits.CmdRsp ==
+ cpu_to_be16(SLI_CT_RESPONSE_FS_ACC)) {
+ lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY,
+ "0208 NameServer Rsp Data: x%x\n",
+ vport->fc_flag);
+ lpfc_ns_rsp(vport, outp,
+ (uint32_t) (irsp->un.genreq64.bdl.bdeSize));
+ } else if (CTrsp->CommandResponse.bits.CmdRsp ==
+ be16_to_cpu(SLI_CT_RESPONSE_FS_RJT)) {
+ /* NameServer Rsp Error */
+ if ((CTrsp->ReasonCode == SLI_CT_UNABLE_TO_PERFORM_REQ)
+ && (CTrsp->Explanation == SLI_CT_NO_FC4_TYPES)) {
+ lpfc_printf_vlog(vport, KERN_INFO,
+ LOG_DISCOVERY,
+ "0269 No NameServer Entries "
+ "Data: x%x x%x x%x x%x\n",
+ CTrsp->CommandResponse.bits.CmdRsp,
+ (uint32_t) CTrsp->ReasonCode,
+ (uint32_t) CTrsp->Explanation,
+ vport->fc_flag);
+
+ lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_CT,
+ "GID_FT no entry cmd:x%x rsn:x%x exp:x%x",
+ (uint32_t)CTrsp->CommandResponse.bits.CmdRsp,
+ (uint32_t) CTrsp->ReasonCode,
+ (uint32_t) CTrsp->Explanation);
+ } else {
+ lpfc_printf_vlog(vport, KERN_INFO,
+ LOG_DISCOVERY,
+ "0240 NameServer Rsp Error "
+ "Data: x%x x%x x%x x%x\n",
+ CTrsp->CommandResponse.bits.CmdRsp,
+ (uint32_t) CTrsp->ReasonCode,
+ (uint32_t) CTrsp->Explanation,
+ vport->fc_flag);
+
+ lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_CT,
+ "GID_FT rsp err1 cmd:x%x rsn:x%x exp:x%x",
+ (uint32_t)CTrsp->CommandResponse.bits.CmdRsp,
+ (uint32_t) CTrsp->ReasonCode,
+ (uint32_t) CTrsp->Explanation);
+ }
+
+
+ } else {
+ /* NameServer Rsp Error */
+ lpfc_printf_vlog(vport, KERN_ERR, LOG_DISCOVERY,
+ "0241 NameServer Rsp Error "
+ "Data: x%x x%x x%x x%x\n",
+ CTrsp->CommandResponse.bits.CmdRsp,
+ (uint32_t) CTrsp->ReasonCode,
+ (uint32_t) CTrsp->Explanation,
+ vport->fc_flag);
+
+ lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_CT,
+ "GID_FT rsp err2 cmd:x%x rsn:x%x exp:x%x",
+ (uint32_t)CTrsp->CommandResponse.bits.CmdRsp,
+ (uint32_t) CTrsp->ReasonCode,
+ (uint32_t) CTrsp->Explanation);
+ }
+ }
+ /* Link up / RSCN discovery */
+ if (vport->num_disc_nodes == 0) {
+ /*
+ * The driver has cycled through all Nports in the RSCN payload.
+ * Complete the handling by cleaning up and marking the
+ * current driver state.
+ */
+ if (vport->port_state >= LPFC_DISC_AUTH) {
+ if (vport->fc_flag & FC_RSCN_MODE) {
+ lpfc_els_flush_rscn(vport);
+ spin_lock_irq(shost->host_lock);
+ vport->fc_flag |= FC_RSCN_MODE; /* RSCN still */
+ spin_unlock_irq(shost->host_lock);
+ }
+ else
+ lpfc_els_flush_rscn(vport);
+ }
+
+ lpfc_disc_start(vport);
+ }
+out:
+ cmdiocb->context_un.ndlp = ndlp; /* Now restore ndlp for free */
+ lpfc_ct_free_iocb(phba, cmdiocb);
+ return;
+}
+
+static void
+lpfc_cmpl_ct_cmd_gff_id(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
+ struct lpfc_iocbq *rspiocb)
+{
+ struct lpfc_vport *vport = cmdiocb->vport;
+ struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
+ IOCB_t *irsp = &rspiocb->iocb;
+ struct lpfc_dmabuf *inp = (struct lpfc_dmabuf *) cmdiocb->context1;
+ struct lpfc_dmabuf *outp = (struct lpfc_dmabuf *) cmdiocb->context2;
+ struct lpfc_sli_ct_request *CTrsp;
+ int did, rc, retry;
+ uint8_t fbits;
+ struct lpfc_nodelist *ndlp;
+
+ did = ((struct lpfc_sli_ct_request *) inp->virt)->un.gff.PortId;
+ did = be32_to_cpu(did);
+
+ lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_CT,
+ "GFF_ID cmpl: status:x%x/x%x did:x%x",
+ irsp->ulpStatus, irsp->un.ulpWord[4], did);
+
+ if (irsp->ulpStatus == IOSTAT_SUCCESS) {
+ /* Good status, continue checking */
+ CTrsp = (struct lpfc_sli_ct_request *) outp->virt;
+ fbits = CTrsp->un.gff_acc.fbits[FCP_TYPE_FEATURE_OFFSET];
+
+ if (CTrsp->CommandResponse.bits.CmdRsp ==
+ be16_to_cpu(SLI_CT_RESPONSE_FS_ACC)) {
+ if ((fbits & FC4_FEATURE_INIT) &&
+ !(fbits & FC4_FEATURE_TARGET)) {
+ lpfc_printf_vlog(vport, KERN_INFO,
+ LOG_DISCOVERY,
+ "0270 Skip x%x GFF "
+ "NameServer Rsp Data: (init) "
+ "x%x x%x\n", did, fbits,
+ vport->fc_rscn_id_cnt);
+ goto out;
+ }
+ }
+ }
+ else {
+ /* Check for retry */
+ if (cmdiocb->retry < LPFC_MAX_NS_RETRY) {
+ retry = 1;
+ if (irsp->ulpStatus == IOSTAT_LOCAL_REJECT) {
+ switch ((irsp->un.ulpWord[4] &
+ IOERR_PARAM_MASK)) {
+
+ case IOERR_NO_RESOURCES:
+ /* We don't increment the retry
+ * count for this case.
+ */
+ break;
+ case IOERR_LINK_DOWN:
+ case IOERR_SLI_ABORTED:
+ case IOERR_SLI_DOWN:
+ retry = 0;
+ break;
+ default:
+ cmdiocb->retry++;
+ }
+ }
+ else
+ cmdiocb->retry++;
+
+ if (retry) {
+ /* CT command is being retried */
+ rc = lpfc_ns_cmd(vport, SLI_CTNS_GFF_ID,
+ cmdiocb->retry, did);
+ if (rc == 0) {
+ /* success */
+ lpfc_ct_free_iocb(phba, cmdiocb);
+ return;
+ }
+ }
+ }
+ lpfc_printf_vlog(vport, KERN_ERR, LOG_DISCOVERY,
+ "0267 NameServer GFF Rsp "
+ "x%x Error (%d %d) Data: x%x x%x\n",
+ did, irsp->ulpStatus, irsp->un.ulpWord[4],
+ vport->fc_flag, vport->fc_rscn_id_cnt);
+ }
+
+ /* This is a target port, unregistered port, or the GFF_ID failed */
+ ndlp = lpfc_setup_disc_node(vport, did);
+ if (ndlp && NLP_CHK_NODE_ACT(ndlp)) {
+ lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY,
+ "0242 Process x%x GFF "
+ "NameServer Rsp Data: x%x x%x x%x\n",
+ did, ndlp->nlp_flag, vport->fc_flag,
+ vport->fc_rscn_id_cnt);
+ } else {
+ lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY,
+ "0243 Skip x%x GFF "
+ "NameServer Rsp Data: x%x x%x\n", did,
+ vport->fc_flag, vport->fc_rscn_id_cnt);
+ }
+out:
+ /* Link up / RSCN discovery */
+ if (vport->num_disc_nodes)
+ vport->num_disc_nodes--;
+ if (vport->num_disc_nodes == 0) {
+ /*
+ * The driver has cycled through all Nports in the RSCN payload.
+ * Complete the handling by cleaning up and marking the
+ * current driver state.
+ */
+ if (vport->port_state >= LPFC_DISC_AUTH) {
+ if (vport->fc_flag & FC_RSCN_MODE) {
+ lpfc_els_flush_rscn(vport);
+ spin_lock_irq(shost->host_lock);
+ vport->fc_flag |= FC_RSCN_MODE; /* RSCN still */
+ spin_unlock_irq(shost->host_lock);
+ }
+ else
+ lpfc_els_flush_rscn(vport);
+ }
+ lpfc_disc_start(vport);
+ }
+ lpfc_ct_free_iocb(phba, cmdiocb);
+ return;
+}
+
+
+static void
+lpfc_cmpl_ct(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
+ struct lpfc_iocbq *rspiocb)
+{
+ struct lpfc_vport *vport = cmdiocb->vport;
+ struct lpfc_dmabuf *inp;
+ struct lpfc_dmabuf *outp;
+ IOCB_t *irsp;
+ struct lpfc_sli_ct_request *CTrsp;
+ struct lpfc_nodelist *ndlp;
+ int cmdcode, rc;
+ uint8_t retry;
+ uint32_t latt;
+
+ /* First save ndlp, before we overwrite it */
+ ndlp = cmdiocb->context_un.ndlp;
+
+ /* we pass cmdiocb to state machine which needs rspiocb as well */
+ cmdiocb->context_un.rsp_iocb = rspiocb;
+
+ inp = (struct lpfc_dmabuf *) cmdiocb->context1;
+ outp = (struct lpfc_dmabuf *) cmdiocb->context2;
+ irsp = &rspiocb->iocb;
+
+ cmdcode = be16_to_cpu(((struct lpfc_sli_ct_request *) inp->virt)->
+ CommandResponse.bits.CmdRsp);
+ CTrsp = (struct lpfc_sli_ct_request *) outp->virt;
+
+ latt = lpfc_els_chk_latt(vport);
+
+ /* RFT request completes status <ulpStatus> CmdRsp <CmdRsp> */
+ lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY,
+ "0209 CT Request completes, latt %d, "
+ "ulpStatus x%x CmdRsp x%x, Context x%x, Tag x%x\n",
+ latt, irsp->ulpStatus,
+ CTrsp->CommandResponse.bits.CmdRsp,
+ cmdiocb->iocb.ulpContext, cmdiocb->iocb.ulpIoTag);
+
+ lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_CT,
+ "CT cmd cmpl: status:x%x/x%x cmd:x%x",
+ irsp->ulpStatus, irsp->un.ulpWord[4], cmdcode);
+
+ if (irsp->ulpStatus) {
+ lpfc_printf_vlog(vport, KERN_ERR, LOG_DISCOVERY,
+ "0268 NS cmd x%x Error (x%x x%x)\n",
+ cmdcode, irsp->ulpStatus, irsp->un.ulpWord[4]);
+
+ if ((irsp->ulpStatus == IOSTAT_LOCAL_REJECT) &&
+ (((irsp->un.ulpWord[4] & IOERR_PARAM_MASK) ==
+ IOERR_SLI_DOWN) ||
+ ((irsp->un.ulpWord[4] & IOERR_PARAM_MASK) ==
+ IOERR_SLI_ABORTED)))
+ goto out;
+
+ retry = cmdiocb->retry;
+ if (retry >= LPFC_MAX_NS_RETRY)
+ goto out;
+
+ retry++;
+ lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY,
+ "0250 Retrying NS cmd %x\n", cmdcode);
+ rc = lpfc_ns_cmd(vport, cmdcode, retry, 0);
+ if (rc == 0)
+ goto out;
+ }
+
+out:
+ cmdiocb->context_un.ndlp = ndlp; /* Now restore ndlp for free */
+ lpfc_ct_free_iocb(phba, cmdiocb);
+ return;
+}
+
+static void
+lpfc_cmpl_ct_cmd_rft_id(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
+ struct lpfc_iocbq *rspiocb)
+{
+ IOCB_t *irsp = &rspiocb->iocb;
+ struct lpfc_vport *vport = cmdiocb->vport;
+
+ if (irsp->ulpStatus == IOSTAT_SUCCESS) {
+ struct lpfc_dmabuf *outp;
+ struct lpfc_sli_ct_request *CTrsp;
+
+ outp = (struct lpfc_dmabuf *) cmdiocb->context2;
+ CTrsp = (struct lpfc_sli_ct_request *) outp->virt;
+ if (CTrsp->CommandResponse.bits.CmdRsp ==
+ be16_to_cpu(SLI_CT_RESPONSE_FS_ACC))
+ vport->ct_flags |= FC_CT_RFT_ID;
+ }
+ lpfc_cmpl_ct(phba, cmdiocb, rspiocb);
+ return;
+}
+
+static void
+lpfc_cmpl_ct_cmd_rnn_id(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
+ struct lpfc_iocbq *rspiocb)
+{
+ IOCB_t *irsp = &rspiocb->iocb;
+ struct lpfc_vport *vport = cmdiocb->vport;
+
+ if (irsp->ulpStatus == IOSTAT_SUCCESS) {
+ struct lpfc_dmabuf *outp;
+ struct lpfc_sli_ct_request *CTrsp;
+
+ outp = (struct lpfc_dmabuf *) cmdiocb->context2;
+ CTrsp = (struct lpfc_sli_ct_request *) outp->virt;
+ if (CTrsp->CommandResponse.bits.CmdRsp ==
+ be16_to_cpu(SLI_CT_RESPONSE_FS_ACC))
+ vport->ct_flags |= FC_CT_RNN_ID;
+ }
+ lpfc_cmpl_ct(phba, cmdiocb, rspiocb);
+ return;
+}
+
+static void
+lpfc_cmpl_ct_cmd_rspn_id(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
+ struct lpfc_iocbq *rspiocb)
+{
+ IOCB_t *irsp = &rspiocb->iocb;
+ struct lpfc_vport *vport = cmdiocb->vport;
+
+ if (irsp->ulpStatus == IOSTAT_SUCCESS) {
+ struct lpfc_dmabuf *outp;
+ struct lpfc_sli_ct_request *CTrsp;
+
+ outp = (struct lpfc_dmabuf *) cmdiocb->context2;
+ CTrsp = (struct lpfc_sli_ct_request *) outp->virt;
+ if (CTrsp->CommandResponse.bits.CmdRsp ==
+ be16_to_cpu(SLI_CT_RESPONSE_FS_ACC))
+ vport->ct_flags |= FC_CT_RSPN_ID;
+ }
+ lpfc_cmpl_ct(phba, cmdiocb, rspiocb);
+ return;
+}
+
+static void
+lpfc_cmpl_ct_cmd_rsnn_nn(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
+ struct lpfc_iocbq *rspiocb)
+{
+ IOCB_t *irsp = &rspiocb->iocb;
+ struct lpfc_vport *vport = cmdiocb->vport;
+
+ if (irsp->ulpStatus == IOSTAT_SUCCESS) {
+ struct lpfc_dmabuf *outp;
+ struct lpfc_sli_ct_request *CTrsp;
+
+ outp = (struct lpfc_dmabuf *) cmdiocb->context2;
+ CTrsp = (struct lpfc_sli_ct_request *) outp->virt;
+ if (CTrsp->CommandResponse.bits.CmdRsp ==
+ be16_to_cpu(SLI_CT_RESPONSE_FS_ACC))
+ vport->ct_flags |= FC_CT_RSNN_NN;
+ }
+ lpfc_cmpl_ct(phba, cmdiocb, rspiocb);
+ return;
+}
+
+static void
+lpfc_cmpl_ct_cmd_da_id(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
+ struct lpfc_iocbq *rspiocb)
+{
+ struct lpfc_vport *vport = cmdiocb->vport;
+
+ /* even if it fails we will act as though it succeeded. */
+ vport->ct_flags = 0;
+ lpfc_cmpl_ct(phba, cmdiocb, rspiocb);
+ return;
+}
+
+static void
+lpfc_cmpl_ct_cmd_rff_id(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
+ struct lpfc_iocbq *rspiocb)
+{
+ IOCB_t *irsp = &rspiocb->iocb;
+ struct lpfc_vport *vport = cmdiocb->vport;
+
+ if (irsp->ulpStatus == IOSTAT_SUCCESS) {
+ struct lpfc_dmabuf *outp;
+ struct lpfc_sli_ct_request *CTrsp;
+
+ outp = (struct lpfc_dmabuf *) cmdiocb->context2;
+ CTrsp = (struct lpfc_sli_ct_request *) outp->virt;
+ if (CTrsp->CommandResponse.bits.CmdRsp ==
+ be16_to_cpu(SLI_CT_RESPONSE_FS_ACC))
+ vport->ct_flags |= FC_CT_RFF_ID;
+ }
+ lpfc_cmpl_ct(phba, cmdiocb, rspiocb);
+ return;
+}
+
+int
+lpfc_vport_symbolic_port_name(struct lpfc_vport *vport, char *symbol,
+ size_t size)
+{
+ int n;
+ uint8_t *wwn = vport->phba->wwpn;
+
+ n = snprintf(symbol, size,
+ "Emulex PPN-%02x:%02x:%02x:%02x:%02x:%02x:%02x:%02x",
+ wwn[0], wwn[1], wwn[2], wwn[3],
+ wwn[4], wwn[5], wwn[6], wwn[7]);
+
+ if (vport->port_type == LPFC_PHYSICAL_PORT)
+ return n;
+
+ if (n < size)
+ n += snprintf(symbol + n, size - n, " VPort-%d", vport->vpi);
+
+ if (n < size &&
+ strlen(vport->fc_vport->symbolic_name))
+ n += snprintf(symbol + n, size - n, " VName-%s",
+ vport->fc_vport->symbolic_name);
+ return n;
+}
+
+int
+lpfc_vport_symbolic_node_name(struct lpfc_vport *vport, char *symbol,
+ size_t size)
+{
+ char fwrev[FW_REV_STR_SIZE];
+ int n;
+
+ lpfc_decode_firmware_rev(vport->phba, fwrev, 0);
+
+ n = snprintf(symbol, size, "Emulex %s", vport->phba->ModelName);
+
+ if (size < n)
+ return n;
+ n += snprintf(symbol + n, size - n, " FV%s", fwrev);
+
+ if (size < n)
+ return n;
+ n += snprintf(symbol + n, size - n, " DV%s", lpfc_release_version);
+
+ if (size < n)
+ return n;
+ n += snprintf(symbol + n, size - n, " HN:%s", init_utsname()->nodename);
+
+ /* Note :- OS name is "Linux" */
+ if (size < n)
+ return n;
+ n += snprintf(symbol + n, size - n, " OS:%s", init_utsname()->sysname);
+
+ return n;
+}
+
+static uint32_t
+lpfc_find_map_node(struct lpfc_vport *vport)
+{
+ struct lpfc_nodelist *ndlp, *next_ndlp;
+ struct Scsi_Host *shost;
+ uint32_t cnt = 0;
+
+ shost = lpfc_shost_from_vport(vport);
+ spin_lock_irq(shost->host_lock);
+ list_for_each_entry_safe(ndlp, next_ndlp, &vport->fc_nodes, nlp_listp) {
+ if (ndlp->nlp_type & NLP_FABRIC)
+ continue;
+ if ((ndlp->nlp_state == NLP_STE_MAPPED_NODE) ||
+ (ndlp->nlp_state == NLP_STE_UNMAPPED_NODE))
+ cnt++;
+ }
+ spin_unlock_irq(shost->host_lock);
+ return cnt;
+}
+
+/*
+ * lpfc_ns_cmd
+ * Description:
+ * Issue Cmd to NameServer
+ * SLI_CTNS_GID_FT
+ * LI_CTNS_RFT_ID
+ */
+int
+lpfc_ns_cmd(struct lpfc_vport *vport, int cmdcode,
+ uint8_t retry, uint32_t context)
+{
+ struct lpfc_nodelist * ndlp;
+ struct lpfc_hba *phba = vport->phba;
+ struct lpfc_dmabuf *mp, *bmp;
+ struct lpfc_sli_ct_request *CtReq;
+ struct ulp_bde64 *bpl;
+ void (*cmpl) (struct lpfc_hba *, struct lpfc_iocbq *,
+ struct lpfc_iocbq *) = NULL;
+ uint32_t rsp_size = 1024;
+ size_t size;
+ int rc = 0;
+
+ ndlp = lpfc_findnode_did(vport, NameServer_DID);
+ if (!ndlp || !NLP_CHK_NODE_ACT(ndlp)
+ || ndlp->nlp_state != NLP_STE_UNMAPPED_NODE) {
+ rc=1;
+ goto ns_cmd_exit;
+ }
+
+ /* fill in BDEs for command */
+ /* Allocate buffer for command payload */
+ mp = kmalloc(sizeof (struct lpfc_dmabuf), GFP_KERNEL);
+ if (!mp) {
+ rc=2;
+ goto ns_cmd_exit;
+ }
+
+ INIT_LIST_HEAD(&mp->list);
+ mp->virt = lpfc_mbuf_alloc(phba, MEM_PRI, &(mp->phys));
+ if (!mp->virt) {
+ rc=3;
+ goto ns_cmd_free_mp;
+ }
+
+ /* Allocate buffer for Buffer ptr list */
+ bmp = kmalloc(sizeof (struct lpfc_dmabuf), GFP_KERNEL);
+ if (!bmp) {
+ rc=4;
+ goto ns_cmd_free_mpvirt;
+ }
+
+ INIT_LIST_HEAD(&bmp->list);
+ bmp->virt = lpfc_mbuf_alloc(phba, MEM_PRI, &(bmp->phys));
+ if (!bmp->virt) {
+ rc=5;
+ goto ns_cmd_free_bmp;
+ }
+
+ /* NameServer Req */
+ lpfc_printf_vlog(vport, KERN_INFO ,LOG_DISCOVERY,
+ "0236 NameServer Req Data: x%x x%x x%x\n",
+ cmdcode, vport->fc_flag, vport->fc_rscn_id_cnt);
+
+ bpl = (struct ulp_bde64 *) bmp->virt;
+ memset(bpl, 0, sizeof(struct ulp_bde64));
+ bpl->addrHigh = le32_to_cpu(putPaddrHigh(mp->phys) );
+ bpl->addrLow = le32_to_cpu(putPaddrLow(mp->phys) );
+ bpl->tus.f.bdeFlags = 0;
+ if (cmdcode == SLI_CTNS_GID_FT)
+ bpl->tus.f.bdeSize = GID_REQUEST_SZ;
+ else if (cmdcode == SLI_CTNS_GFF_ID)
+ bpl->tus.f.bdeSize = GFF_REQUEST_SZ;
+ else if (cmdcode == SLI_CTNS_RFT_ID)
+ bpl->tus.f.bdeSize = RFT_REQUEST_SZ;
+ else if (cmdcode == SLI_CTNS_RNN_ID)
+ bpl->tus.f.bdeSize = RNN_REQUEST_SZ;
+ else if (cmdcode == SLI_CTNS_RSPN_ID)
+ bpl->tus.f.bdeSize = RSPN_REQUEST_SZ;
+ else if (cmdcode == SLI_CTNS_RSNN_NN)
+ bpl->tus.f.bdeSize = RSNN_REQUEST_SZ;
+ else if (cmdcode == SLI_CTNS_DA_ID)
+ bpl->tus.f.bdeSize = DA_ID_REQUEST_SZ;
+ else if (cmdcode == SLI_CTNS_RFF_ID)
+ bpl->tus.f.bdeSize = RFF_REQUEST_SZ;
+ else
+ bpl->tus.f.bdeSize = 0;
+ bpl->tus.w = le32_to_cpu(bpl->tus.w);
+
+ CtReq = (struct lpfc_sli_ct_request *) mp->virt;
+ memset(CtReq, 0, sizeof (struct lpfc_sli_ct_request));
+ CtReq->RevisionId.bits.Revision = SLI_CT_REVISION;
+ CtReq->RevisionId.bits.InId = 0;
+ CtReq->FsType = SLI_CT_DIRECTORY_SERVICE;
+ CtReq->FsSubType = SLI_CT_DIRECTORY_NAME_SERVER;
+ CtReq->CommandResponse.bits.Size = 0;
+ switch (cmdcode) {
+ case SLI_CTNS_GID_FT:
+ CtReq->CommandResponse.bits.CmdRsp =
+ cpu_to_be16(SLI_CTNS_GID_FT);
+ CtReq->un.gid.Fc4Type = SLI_CTPT_FCP;
+ if (vport->port_state < LPFC_NS_QRY)
+ vport->port_state = LPFC_NS_QRY;
+ lpfc_set_disctmo(vport);
+ cmpl = lpfc_cmpl_ct_cmd_gid_ft;
+ rsp_size = FC_MAX_NS_RSP;
+ break;
+
+ case SLI_CTNS_GFF_ID:
+ CtReq->CommandResponse.bits.CmdRsp =
+ cpu_to_be16(SLI_CTNS_GFF_ID);
+ CtReq->un.gff.PortId = cpu_to_be32(context);
+ cmpl = lpfc_cmpl_ct_cmd_gff_id;
+ break;
+
+ case SLI_CTNS_RFT_ID:
+ vport->ct_flags &= ~FC_CT_RFT_ID;
+ CtReq->CommandResponse.bits.CmdRsp =
+ cpu_to_be16(SLI_CTNS_RFT_ID);
+ CtReq->un.rft.PortId = cpu_to_be32(vport->fc_myDID);
+ CtReq->un.rft.fcpReg = 1;
+ cmpl = lpfc_cmpl_ct_cmd_rft_id;
+ break;
+
+ case SLI_CTNS_RNN_ID:
+ vport->ct_flags &= ~FC_CT_RNN_ID;
+ CtReq->CommandResponse.bits.CmdRsp =
+ cpu_to_be16(SLI_CTNS_RNN_ID);
+ CtReq->un.rnn.PortId = cpu_to_be32(vport->fc_myDID);
+ memcpy(CtReq->un.rnn.wwnn, &vport->fc_nodename,
+ sizeof (struct lpfc_name));
+ cmpl = lpfc_cmpl_ct_cmd_rnn_id;
+ break;
+
+ case SLI_CTNS_RSPN_ID:
+ vport->ct_flags &= ~FC_CT_RSPN_ID;
+ CtReq->CommandResponse.bits.CmdRsp =
+ cpu_to_be16(SLI_CTNS_RSPN_ID);
+ CtReq->un.rspn.PortId = cpu_to_be32(vport->fc_myDID);
+ size = sizeof(CtReq->un.rspn.symbname);
+ CtReq->un.rspn.len =
+ lpfc_vport_symbolic_port_name(vport,
+ CtReq->un.rspn.symbname, size);
+ cmpl = lpfc_cmpl_ct_cmd_rspn_id;
+ break;
+ case SLI_CTNS_RSNN_NN:
+ vport->ct_flags &= ~FC_CT_RSNN_NN;
+ CtReq->CommandResponse.bits.CmdRsp =
+ cpu_to_be16(SLI_CTNS_RSNN_NN);
+ memcpy(CtReq->un.rsnn.wwnn, &vport->fc_nodename,
+ sizeof (struct lpfc_name));
+ size = sizeof(CtReq->un.rsnn.symbname);
+ CtReq->un.rsnn.len =
+ lpfc_vport_symbolic_node_name(vport,
+ CtReq->un.rsnn.symbname, size);
+ cmpl = lpfc_cmpl_ct_cmd_rsnn_nn;
+ break;
+ case SLI_CTNS_DA_ID:
+ /* Implement DA_ID Nameserver request */
+ CtReq->CommandResponse.bits.CmdRsp =
+ cpu_to_be16(SLI_CTNS_DA_ID);
+ CtReq->un.da_id.port_id = cpu_to_be32(vport->fc_myDID);
+ cmpl = lpfc_cmpl_ct_cmd_da_id;
+ break;
+ case SLI_CTNS_RFF_ID:
+ vport->ct_flags &= ~FC_CT_RFF_ID;
+ CtReq->CommandResponse.bits.CmdRsp =
+ cpu_to_be16(SLI_CTNS_RFF_ID);
+ CtReq->un.rff.PortId = cpu_to_be32(vport->fc_myDID);
+ CtReq->un.rff.fbits = FC4_FEATURE_INIT;
+ CtReq->un.rff.type_code = FC_TYPE_FCP;
+ cmpl = lpfc_cmpl_ct_cmd_rff_id;
+ break;
+ }
+ /* The lpfc_ct_cmd/lpfc_get_req shall increment ndlp reference count
+ * to hold ndlp reference for the corresponding callback function.
+ */
+ if (!lpfc_ct_cmd(vport, mp, bmp, ndlp, cmpl, rsp_size, retry)) {
+ /* On success, The cmpl function will free the buffers */
+ lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_CT,
+ "Issue CT cmd: cmd:x%x did:x%x",
+ cmdcode, ndlp->nlp_DID, 0);
+ return 0;
+ }
+ rc=6;
+
+ /* Decrement ndlp reference count to release ndlp reference held
+ * for the failed command's callback function.
+ */
+ lpfc_nlp_put(ndlp);
+
+ lpfc_mbuf_free(phba, bmp->virt, bmp->phys);
+ns_cmd_free_bmp:
+ kfree(bmp);
+ns_cmd_free_mpvirt:
+ lpfc_mbuf_free(phba, mp->virt, mp->phys);
+ns_cmd_free_mp:
+ kfree(mp);
+ns_cmd_exit:
+ lpfc_printf_vlog(vport, KERN_ERR, LOG_DISCOVERY,
+ "0266 Issue NameServer Req x%x err %d Data: x%x x%x\n",
+ cmdcode, rc, vport->fc_flag, vport->fc_rscn_id_cnt);
+ return 1;
+}
+
+static void
+lpfc_cmpl_ct_cmd_fdmi(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
+ struct lpfc_iocbq * rspiocb)
+{
+ struct lpfc_dmabuf *inp = cmdiocb->context1;
+ struct lpfc_dmabuf *outp = cmdiocb->context2;
+ struct lpfc_sli_ct_request *CTrsp = outp->virt;
+ struct lpfc_sli_ct_request *CTcmd = inp->virt;
+ struct lpfc_nodelist *ndlp;
+ uint16_t fdmi_cmd = CTcmd->CommandResponse.bits.CmdRsp;
+ uint16_t fdmi_rsp = CTrsp->CommandResponse.bits.CmdRsp;
+ struct lpfc_vport *vport = cmdiocb->vport;
+ IOCB_t *irsp = &rspiocb->iocb;
+ uint32_t latt;
+
+ latt = lpfc_els_chk_latt(vport);
+ lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_CT,
+ "FDMI cmpl: status:x%x/x%x latt:%d",
+ irsp->ulpStatus, irsp->un.ulpWord[4], latt);
+
+ if (latt || irsp->ulpStatus) {
+ lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY,
+ "0229 FDMI cmd %04x failed, latt = %d "
+ "ulpStatus: x%x, rid x%x\n",
+ be16_to_cpu(fdmi_cmd), latt, irsp->ulpStatus,
+ irsp->un.ulpWord[4]);
+ goto fail_out;
+ }
+
+ ndlp = lpfc_findnode_did(vport, FDMI_DID);
+ if (!ndlp || !NLP_CHK_NODE_ACT(ndlp))
+ goto fail_out;
+
+ if (fdmi_rsp == cpu_to_be16(SLI_CT_RESPONSE_FS_RJT)) {
+ /* FDMI rsp failed */
+ lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY,
+ "0220 FDMI rsp failed Data: x%x\n",
+ be16_to_cpu(fdmi_cmd));
+ }
+
+fail_out:
+ lpfc_ct_free_iocb(phba, cmdiocb);
+}
+
+static void
+lpfc_cmpl_ct_disc_fdmi(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
+ struct lpfc_iocbq *rspiocb)
+{
+ struct lpfc_vport *vport = cmdiocb->vport;
+ struct lpfc_dmabuf *inp = cmdiocb->context1;
+ struct lpfc_sli_ct_request *CTcmd = inp->virt;
+ uint16_t fdmi_cmd = CTcmd->CommandResponse.bits.CmdRsp;
+ struct lpfc_nodelist *ndlp;
+
+ lpfc_cmpl_ct_cmd_fdmi(phba, cmdiocb, rspiocb);
+
+ ndlp = lpfc_findnode_did(vport, FDMI_DID);
+ if (!ndlp || !NLP_CHK_NODE_ACT(ndlp))
+ return;
+
+ /*
+ * Need to cycle thru FDMI registration for discovery
+ * DHBA -> DPRT -> RHBA -> RPA
+ */
+ switch (be16_to_cpu(fdmi_cmd)) {
+ case SLI_MGMT_RHBA:
+ lpfc_fdmi_cmd(vport, ndlp, SLI_MGMT_RPA);
+ break;
+
+ case SLI_MGMT_DHBA:
+ lpfc_fdmi_cmd(vport, ndlp, SLI_MGMT_DPRT);
+ break;
+
+ case SLI_MGMT_DPRT:
+ lpfc_fdmi_cmd(vport, ndlp, SLI_MGMT_RHBA);
+ break;
+ }
+}
+
+
+int
+lpfc_fdmi_cmd(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, int cmdcode)
+{
+ struct lpfc_hba *phba = vport->phba;
+ struct lpfc_dmabuf *mp, *bmp;
+ struct lpfc_sli_ct_request *CtReq;
+ struct ulp_bde64 *bpl;
+ uint32_t size;
+ uint32_t rsp_size;
+ struct lpfc_fdmi_reg_hba *rh;
+ struct lpfc_fdmi_port_entry *pe;
+ struct lpfc_fdmi_reg_portattr *pab = NULL;
+ struct lpfc_fdmi_attr_block *ab = NULL;
+ struct lpfc_fdmi_attr_entry *ae;
+ struct lpfc_fdmi_attr_def *ad;
+ void (*cmpl) (struct lpfc_hba *, struct lpfc_iocbq *,
+ struct lpfc_iocbq *);
+
+ if (ndlp == NULL) {
+ ndlp = lpfc_findnode_did(vport, FDMI_DID);
+ if (!ndlp || !NLP_CHK_NODE_ACT(ndlp))
+ return 0;
+ cmpl = lpfc_cmpl_ct_cmd_fdmi; /* cmd interface */
+ } else {
+ cmpl = lpfc_cmpl_ct_disc_fdmi; /* called from discovery */
+ }
+
+ /* fill in BDEs for command */
+ /* Allocate buffer for command payload */
+ mp = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
+ if (!mp)
+ goto fdmi_cmd_exit;
+
+ mp->virt = lpfc_mbuf_alloc(phba, 0, &(mp->phys));
+ if (!mp->virt)
+ goto fdmi_cmd_free_mp;
+
+ /* Allocate buffer for Buffer ptr list */
+ bmp = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
+ if (!bmp)
+ goto fdmi_cmd_free_mpvirt;
+
+ bmp->virt = lpfc_mbuf_alloc(phba, 0, &(bmp->phys));
+ if (!bmp->virt)
+ goto fdmi_cmd_free_bmp;
+
+ INIT_LIST_HEAD(&mp->list);
+ INIT_LIST_HEAD(&bmp->list);
+
+ /* FDMI request */
+ lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY,
+ "0218 FDMI Request Data: x%x x%x x%x\n",
+ vport->fc_flag, vport->port_state, cmdcode);
+ CtReq = (struct lpfc_sli_ct_request *)mp->virt;
+
+ /* First populate the CT_IU preamble */
+ memset(CtReq, 0, sizeof(struct lpfc_sli_ct_request));
+ CtReq->RevisionId.bits.Revision = SLI_CT_REVISION;
+ CtReq->RevisionId.bits.InId = 0;
+
+ CtReq->FsType = SLI_CT_MANAGEMENT_SERVICE;
+ CtReq->FsSubType = SLI_CT_FDMI_Subtypes;
+
+ CtReq->CommandResponse.bits.CmdRsp = cpu_to_be16(cmdcode);
+ rsp_size = LPFC_BPL_SIZE;
+ size = 0;
+
+ /* Next fill in the specific FDMI cmd information */
+ switch (cmdcode) {
+ case SLI_MGMT_RHAT:
+ case SLI_MGMT_RHBA:
+ {
+ lpfc_vpd_t *vp = &phba->vpd;
+ uint32_t i, j, incr;
+ int len = 0;
+
+ rh = (struct lpfc_fdmi_reg_hba *)&CtReq->un.PortID;
+ /* HBA Identifier */
+ memcpy(&rh->hi.PortName, &vport->fc_sparam.portName,
+ sizeof(struct lpfc_name));
+
+ if (cmdcode == SLI_MGMT_RHBA) {
+ /* Registered Port List */
+ /* One entry (port) per adapter */
+ rh->rpl.EntryCnt = cpu_to_be32(1);
+ memcpy(&rh->rpl.pe, &vport->fc_sparam.portName,
+ sizeof(struct lpfc_name));
+
+ /* point to the HBA attribute block */
+ size = 2 * sizeof(struct lpfc_name) +
+ FOURBYTES;
+ } else {
+ size = sizeof(struct lpfc_name);
+ }
+ ab = (struct lpfc_fdmi_attr_block *)
+ ((uint8_t *)rh + size);
+ ab->EntryCnt = 0;
+ size += FOURBYTES;
+
+ /*
+ * Point to beginning of first HBA attribute entry
+ */
+ /* #1 HBA attribute entry */
+ ad = (struct lpfc_fdmi_attr_def *)
+ ((uint8_t *)rh + size);
+ ae = (struct lpfc_fdmi_attr_entry *)&ad->AttrValue;
+ memset(ae, 0, sizeof(struct lpfc_name));
+ ad->AttrType = cpu_to_be16(RHBA_NODENAME);
+ ad->AttrLen = cpu_to_be16(FOURBYTES
+ + sizeof(struct lpfc_name));
+ memcpy(&ae->un.NodeName, &vport->fc_sparam.nodeName,
+ sizeof(struct lpfc_name));
+ ab->EntryCnt++;
+ size += FOURBYTES + sizeof(struct lpfc_name);
+ if ((size + LPFC_FDMI_MAX_AE_SIZE) >
+ (LPFC_BPL_SIZE - LPFC_CT_PREAMBLE))
+ goto hba_out;
+
+ /* #2 HBA attribute entry */
+ ad = (struct lpfc_fdmi_attr_def *)
+ ((uint8_t *)rh + size);
+ ae = (struct lpfc_fdmi_attr_entry *)&ad->AttrValue;
+ memset(ae, 0, sizeof(ae->un.Manufacturer));
+ ad->AttrType = cpu_to_be16(RHBA_MANUFACTURER);
+ strncpy(ae->un.Manufacturer, "Emulex Corporation",
+ sizeof(ae->un.Manufacturer));
+ len = strnlen(ae->un.Manufacturer,
+ sizeof(ae->un.Manufacturer));
+ len += (len & 3) ? (4 - (len & 3)) : 4;
+ ad->AttrLen = cpu_to_be16(FOURBYTES + len);
+ ab->EntryCnt++;
+ size += FOURBYTES + len;
+ if ((size + LPFC_FDMI_MAX_AE_SIZE) >
+ (LPFC_BPL_SIZE - LPFC_CT_PREAMBLE))
+ goto hba_out;
+
+ /* #3 HBA attribute entry */
+ ad = (struct lpfc_fdmi_attr_def *)
+ ((uint8_t *)rh + size);
+ ae = (struct lpfc_fdmi_attr_entry *)&ad->AttrValue;
+ memset(ae, 0, sizeof(ae->un.SerialNumber));
+ ad->AttrType = cpu_to_be16(RHBA_SERIAL_NUMBER);
+ strncpy(ae->un.SerialNumber, phba->SerialNumber,
+ sizeof(ae->un.SerialNumber));
+ len = strnlen(ae->un.SerialNumber,
+ sizeof(ae->un.SerialNumber));
+ len += (len & 3) ? (4 - (len & 3)) : 4;
+ ad->AttrLen = cpu_to_be16(FOURBYTES + len);
+ ab->EntryCnt++;
+ size += FOURBYTES + len;
+ if ((size + LPFC_FDMI_MAX_AE_SIZE) >
+ (LPFC_BPL_SIZE - LPFC_CT_PREAMBLE))
+ goto hba_out;
+
+ /* #4 HBA attribute entry */
+ ad = (struct lpfc_fdmi_attr_def *)
+ ((uint8_t *)rh + size);
+ ae = (struct lpfc_fdmi_attr_entry *)&ad->AttrValue;
+ memset(ae, 0, sizeof(ae->un.Model));
+ ad->AttrType = cpu_to_be16(RHBA_MODEL);
+ strncpy(ae->un.Model, phba->ModelName,
+ sizeof(ae->un.Model));
+ len = strnlen(ae->un.Model, sizeof(ae->un.Model));
+ len += (len & 3) ? (4 - (len & 3)) : 4;
+ ad->AttrLen = cpu_to_be16(FOURBYTES + len);
+ ab->EntryCnt++;
+ size += FOURBYTES + len;
+ if ((size + LPFC_FDMI_MAX_AE_SIZE) >
+ (LPFC_BPL_SIZE - LPFC_CT_PREAMBLE))
+ goto hba_out;
+
+ /* #5 HBA attribute entry */
+ ad = (struct lpfc_fdmi_attr_def *)
+ ((uint8_t *)rh + size);
+ ae = (struct lpfc_fdmi_attr_entry *)&ad->AttrValue;
+ memset(ae, 0, sizeof(ae->un.ModelDescription));
+ ad->AttrType = cpu_to_be16(RHBA_MODEL_DESCRIPTION);
+ strncpy(ae->un.ModelDescription, phba->ModelDesc,
+ sizeof(ae->un.ModelDescription));
+ len = strnlen(ae->un.ModelDescription,
+ sizeof(ae->un.ModelDescription));
+ len += (len & 3) ? (4 - (len & 3)) : 4;
+ ad->AttrLen = cpu_to_be16(FOURBYTES + len);
+ ab->EntryCnt++;
+ size += FOURBYTES + len;
+ if ((size + 8) > (LPFC_BPL_SIZE - LPFC_CT_PREAMBLE))
+ goto hba_out;
+
+ /* #6 HBA attribute entry */
+ ad = (struct lpfc_fdmi_attr_def *)
+ ((uint8_t *)rh + size);
+ ae = (struct lpfc_fdmi_attr_entry *)&ad->AttrValue;
+ memset(ae, 0, 8);
+ ad->AttrType = cpu_to_be16(RHBA_HARDWARE_VERSION);
+ ad->AttrLen = cpu_to_be16(FOURBYTES + 8);
+ /* Convert JEDEC ID to ascii for hardware version */
+ incr = vp->rev.biuRev;
+ for (i = 0; i < 8; i++) {
+ j = (incr & 0xf);
+ if (j <= 9)
+ ae->un.HardwareVersion[7 - i] =
+ (char)((uint8_t)0x30 +
+ (uint8_t)j);
+ else
+ ae->un.HardwareVersion[7 - i] =
+ (char)((uint8_t)0x61 +
+ (uint8_t)(j - 10));
+ incr = (incr >> 4);
+ }
+ ab->EntryCnt++;
+ size += FOURBYTES + 8;
+ if ((size + LPFC_FDMI_MAX_AE_SIZE) >
+ (LPFC_BPL_SIZE - LPFC_CT_PREAMBLE))
+ goto hba_out;
+
+ /* #7 HBA attribute entry */
+ ad = (struct lpfc_fdmi_attr_def *)
+ ((uint8_t *)rh + size);
+ ae = (struct lpfc_fdmi_attr_entry *)&ad->AttrValue;
+ memset(ae, 0, sizeof(ae->un.DriverVersion));
+ ad->AttrType = cpu_to_be16(RHBA_DRIVER_VERSION);
+ strncpy(ae->un.DriverVersion, lpfc_release_version,
+ sizeof(ae->un.DriverVersion));
+ len = strnlen(ae->un.DriverVersion,
+ sizeof(ae->un.DriverVersion));
+ len += (len & 3) ? (4 - (len & 3)) : 4;
+ ad->AttrLen = cpu_to_be16(FOURBYTES + len);
+ ab->EntryCnt++;
+ size += FOURBYTES + len;
+ if ((size + LPFC_FDMI_MAX_AE_SIZE) >
+ (LPFC_BPL_SIZE - LPFC_CT_PREAMBLE))
+ goto hba_out;
+
+ /* #8 HBA attribute entry */
+ ad = (struct lpfc_fdmi_attr_def *)
+ ((uint8_t *)rh + size);
+ ae = (struct lpfc_fdmi_attr_entry *)&ad->AttrValue;
+ memset(ae, 0, sizeof(ae->un.OptionROMVersion));
+ ad->AttrType = cpu_to_be16(RHBA_OPTION_ROM_VERSION);
+ strncpy(ae->un.OptionROMVersion, phba->OptionROMVersion,
+ sizeof(ae->un.OptionROMVersion));
+ len = strnlen(ae->un.OptionROMVersion,
+ sizeof(ae->un.OptionROMVersion));
+ len += (len & 3) ? (4 - (len & 3)) : 4;
+ ad->AttrLen = cpu_to_be16(FOURBYTES + len);
+ ab->EntryCnt++;
+ size += FOURBYTES + len;
+ if ((size + LPFC_FDMI_MAX_AE_SIZE) >
+ (LPFC_BPL_SIZE - LPFC_CT_PREAMBLE))
+ goto hba_out;
+
+ /* #9 HBA attribute entry */
+ ad = (struct lpfc_fdmi_attr_def *)
+ ((uint8_t *)rh + size);
+ ae = (struct lpfc_fdmi_attr_entry *)&ad->AttrValue;
+ memset(ae, 0, sizeof(ae->un.FirmwareVersion));
+ ad->AttrType = cpu_to_be16(RHBA_FIRMWARE_VERSION);
+ lpfc_decode_firmware_rev(phba, ae->un.FirmwareVersion,
+ 1);
+ len = strnlen(ae->un.FirmwareVersion,
+ sizeof(ae->un.FirmwareVersion));
+ len += (len & 3) ? (4 - (len & 3)) : 4;
+ ad->AttrLen = cpu_to_be16(FOURBYTES + len);
+ ab->EntryCnt++;
+ size += FOURBYTES + len;
+ if ((size + LPFC_FDMI_MAX_AE_SIZE) >
+ (LPFC_BPL_SIZE - LPFC_CT_PREAMBLE))
+ goto hba_out;
+
+ /* #10 HBA attribute entry */
+ ad = (struct lpfc_fdmi_attr_def *)
+ ((uint8_t *)rh + size);
+ ae = (struct lpfc_fdmi_attr_entry *)&ad->AttrValue;
+ memset(ae, 0, sizeof(ae->un.OsNameVersion));
+ ad->AttrType = cpu_to_be16(RHBA_OS_NAME_VERSION);
+ snprintf(ae->un.OsNameVersion,
+ sizeof(ae->un.OsNameVersion),
+ "%s %s %s",
+ init_utsname()->sysname,
+ init_utsname()->release,
+ init_utsname()->version);
+ len = strnlen(ae->un.OsNameVersion,
+ sizeof(ae->un.OsNameVersion));
+ len += (len & 3) ? (4 - (len & 3)) : 4;
+ ad->AttrLen = cpu_to_be16(FOURBYTES + len);
+ ab->EntryCnt++;
+ size += FOURBYTES + len;
+ if ((size + 4) > (LPFC_BPL_SIZE - LPFC_CT_PREAMBLE))
+ goto hba_out;
+
+ /* #11 HBA attribute entry */
+ ad = (struct lpfc_fdmi_attr_def *)
+ ((uint8_t *)rh + size);
+ ae = (struct lpfc_fdmi_attr_entry *)&ad->AttrValue;
+ ad->AttrType =
+ cpu_to_be16(RHBA_MAX_CT_PAYLOAD_LEN);
+ ad->AttrLen = cpu_to_be16(FOURBYTES + 4);
+ ae->un.MaxCTPayloadLen = cpu_to_be32(LPFC_MAX_CT_SIZE);
+ ab->EntryCnt++;
+ size += FOURBYTES + 4;
+ if ((size + LPFC_FDMI_MAX_AE_SIZE) >
+ (LPFC_BPL_SIZE - LPFC_CT_PREAMBLE))
+ goto hba_out;
+
+ /*
+ * Currently switches don't seem to support the
+ * following extended HBA attributes.
+ */
+ if (!(vport->cfg_fdmi_on & LPFC_FDMI_ALL_ATTRIB))
+ goto hba_out;
+
+ /* #12 HBA attribute entry */
+ ad = (struct lpfc_fdmi_attr_def *)
+ ((uint8_t *)rh + size);
+ ae = (struct lpfc_fdmi_attr_entry *)&ad->AttrValue;
+ memset(ae, 0, sizeof(ae->un.NodeSymName));
+ ad->AttrType = cpu_to_be16(RHBA_SYM_NODENAME);
+ len = lpfc_vport_symbolic_node_name(vport,
+ ae->un.NodeSymName, sizeof(ae->un.NodeSymName));
+ len += (len & 3) ? (4 - (len & 3)) : 4;
+ ad->AttrLen = cpu_to_be16(FOURBYTES + len);
+ ab->EntryCnt++;
+ size += FOURBYTES + len;
+hba_out:
+ ab->EntryCnt = cpu_to_be32(ab->EntryCnt);
+ /* Total size */
+ size = GID_REQUEST_SZ - 4 + size;
+ }
+ break;
+
+ case SLI_MGMT_RPRT:
+ case SLI_MGMT_RPA:
+ {
+ lpfc_vpd_t *vp;
+ struct serv_parm *hsp;
+ int len = 0;
+
+ vp = &phba->vpd;
+
+ if (cmdcode == SLI_MGMT_RPRT) {
+ rh = (struct lpfc_fdmi_reg_hba *)
+ &CtReq->un.PortID;
+ /* HBA Identifier */
+ memcpy(&rh->hi.PortName,
+ &vport->fc_sparam.portName,
+ sizeof(struct lpfc_name));
+ pab = (struct lpfc_fdmi_reg_portattr *)
+ &rh->rpl.EntryCnt;
+ } else
+ pab = (struct lpfc_fdmi_reg_portattr *)
+ &CtReq->un.PortID;
+ size = sizeof(struct lpfc_name) + FOURBYTES;
+ memcpy((uint8_t *)&pab->PortName,
+ (uint8_t *)&vport->fc_sparam.portName,
+ sizeof(struct lpfc_name));
+ pab->ab.EntryCnt = 0;
+
+ /* #1 Port attribute entry */
+ ad = (struct lpfc_fdmi_attr_def *)
+ ((uint8_t *)pab + size);
+ ae = (struct lpfc_fdmi_attr_entry *)&ad->AttrValue;
+ memset(ae, 0, sizeof(ae->un.FC4Types));
+ ad->AttrType =
+ cpu_to_be16(RPRT_SUPPORTED_FC4_TYPES);
+ ad->AttrLen = cpu_to_be16(FOURBYTES + 32);
+ ae->un.FC4Types[0] = 0x40; /* Type 1 - ELS */
+ ae->un.FC4Types[1] = 0x80; /* Type 8 - FCP */
+ ae->un.FC4Types[4] = 0x80; /* Type 32 - CT */
+ pab->ab.EntryCnt++;
+ size += FOURBYTES + 32;
+
+ /* #2 Port attribute entry */
+ ad = (struct lpfc_fdmi_attr_def *)
+ ((uint8_t *)pab + size);
+ ae = (struct lpfc_fdmi_attr_entry *)&ad->AttrValue;
+ ad->AttrType = cpu_to_be16(RPRT_SUPPORTED_SPEED);
+ ad->AttrLen = cpu_to_be16(FOURBYTES + 4);
+ ae->un.SupportSpeed = 0;
+ if (phba->lmt & LMT_16Gb)
+ ae->un.SupportSpeed |= HBA_PORTSPEED_16GBIT;
+ if (phba->lmt & LMT_10Gb)
+ ae->un.SupportSpeed |= HBA_PORTSPEED_10GBIT;
+ if (phba->lmt & LMT_8Gb)
+ ae->un.SupportSpeed |= HBA_PORTSPEED_8GBIT;
+ if (phba->lmt & LMT_4Gb)
+ ae->un.SupportSpeed |= HBA_PORTSPEED_4GBIT;
+ if (phba->lmt & LMT_2Gb)
+ ae->un.SupportSpeed |= HBA_PORTSPEED_2GBIT;
+ if (phba->lmt & LMT_1Gb)
+ ae->un.SupportSpeed |= HBA_PORTSPEED_1GBIT;
+ ae->un.SupportSpeed =
+ cpu_to_be32(ae->un.SupportSpeed);
+
+ pab->ab.EntryCnt++;
+ size += FOURBYTES + 4;
+
+ /* #3 Port attribute entry */
+ ad = (struct lpfc_fdmi_attr_def *)
+ ((uint8_t *)pab + size);
+ ae = (struct lpfc_fdmi_attr_entry *)&ad->AttrValue;
+ ad->AttrType = cpu_to_be16(RPRT_PORT_SPEED);
+ ad->AttrLen = cpu_to_be16(FOURBYTES + 4);
+ switch (phba->fc_linkspeed) {
+ case LPFC_LINK_SPEED_1GHZ:
+ ae->un.PortSpeed = HBA_PORTSPEED_1GBIT;
+ break;
+ case LPFC_LINK_SPEED_2GHZ:
+ ae->un.PortSpeed = HBA_PORTSPEED_2GBIT;
+ break;
+ case LPFC_LINK_SPEED_4GHZ:
+ ae->un.PortSpeed = HBA_PORTSPEED_4GBIT;
+ break;
+ case LPFC_LINK_SPEED_8GHZ:
+ ae->un.PortSpeed = HBA_PORTSPEED_8GBIT;
+ break;
+ case LPFC_LINK_SPEED_10GHZ:
+ ae->un.PortSpeed = HBA_PORTSPEED_10GBIT;
+ break;
+ case LPFC_LINK_SPEED_16GHZ:
+ ae->un.PortSpeed = HBA_PORTSPEED_16GBIT;
+ break;
+ default:
+ ae->un.PortSpeed = HBA_PORTSPEED_UNKNOWN;
+ break;
+ }
+ ae->un.PortSpeed = cpu_to_be32(ae->un.PortSpeed);
+ pab->ab.EntryCnt++;
+ size += FOURBYTES + 4;
+
+ /* #4 Port attribute entry */
+ ad = (struct lpfc_fdmi_attr_def *)
+ ((uint8_t *)pab + size);
+ ae = (struct lpfc_fdmi_attr_entry *)&ad->AttrValue;
+ ad->AttrType = cpu_to_be16(RPRT_MAX_FRAME_SIZE);
+ ad->AttrLen = cpu_to_be16(FOURBYTES + 4);
+ hsp = (struct serv_parm *)&vport->fc_sparam;
+ ae->un.MaxFrameSize =
+ (((uint32_t)hsp->cmn.
+ bbRcvSizeMsb) << 8) | (uint32_t)hsp->cmn.
+ bbRcvSizeLsb;
+ ae->un.MaxFrameSize =
+ cpu_to_be32(ae->un.MaxFrameSize);
+ pab->ab.EntryCnt++;
+ size += FOURBYTES + 4;
+ if ((size + LPFC_FDMI_MAX_AE_SIZE) >
+ (LPFC_BPL_SIZE - LPFC_CT_PREAMBLE))
+ goto port_out;
+
+ /* #5 Port attribute entry */
+ ad = (struct lpfc_fdmi_attr_def *)
+ ((uint8_t *)pab + size);
+ ae = (struct lpfc_fdmi_attr_entry *)&ad->AttrValue;
+ memset(ae, 0, sizeof(ae->un.OsDeviceName));
+ ad->AttrType = cpu_to_be16(RPRT_OS_DEVICE_NAME);
+ strncpy((char *)ae->un.OsDeviceName, LPFC_DRIVER_NAME,
+ sizeof(ae->un.OsDeviceName));
+ len = strnlen((char *)ae->un.OsDeviceName,
+ sizeof(ae->un.OsDeviceName));
+ len += (len & 3) ? (4 - (len & 3)) : 4;
+ ad->AttrLen = cpu_to_be16(FOURBYTES + len);
+ pab->ab.EntryCnt++;
+ size += FOURBYTES + len;
+ if ((size + LPFC_FDMI_MAX_AE_SIZE) >
+ (LPFC_BPL_SIZE - LPFC_CT_PREAMBLE))
+ goto port_out;
+
+ /* #6 Port attribute entry */
+ ad = (struct lpfc_fdmi_attr_def *)
+ ((uint8_t *)pab + size);
+ ae = (struct lpfc_fdmi_attr_entry *)&ad->AttrValue;
+ memset(ae, 0, sizeof(ae->un.HostName));
+ snprintf(ae->un.HostName, sizeof(ae->un.HostName), "%s",
+ init_utsname()->nodename);
+ ad->AttrType = cpu_to_be16(RPRT_HOST_NAME);
+ len = strnlen(ae->un.HostName,
+ sizeof(ae->un.HostName));
+ len += (len & 3) ? (4 - (len & 3)) : 4;
+ ad->AttrLen =
+ cpu_to_be16(FOURBYTES + len);
+ pab->ab.EntryCnt++;
+ size += FOURBYTES + len;
+ if ((size + sizeof(struct lpfc_name)) >
+ (LPFC_BPL_SIZE - LPFC_CT_PREAMBLE))
+ goto port_out;
+
+ /*
+ * Currently switches don't seem to support the
+ * following extended Port attributes.
+ */
+ if (!(vport->cfg_fdmi_on & LPFC_FDMI_ALL_ATTRIB))
+ goto port_out;
+
+ /* #7 Port attribute entry */
+ ad = (struct lpfc_fdmi_attr_def *)
+ ((uint8_t *)pab + size);
+ ae = (struct lpfc_fdmi_attr_entry *)&ad->AttrValue;
+ memset(ae, 0, sizeof(struct lpfc_name));
+ ad->AttrType = cpu_to_be16(RPRT_NODENAME);
+ ad->AttrLen = cpu_to_be16(FOURBYTES
+ + sizeof(struct lpfc_name));
+ memcpy(&ae->un.NodeName, &vport->fc_sparam.nodeName,
+ sizeof(struct lpfc_name));
+ pab->ab.EntryCnt++;
+ size += FOURBYTES + sizeof(struct lpfc_name);
+ if ((size + sizeof(struct lpfc_name)) >
+ (LPFC_BPL_SIZE - LPFC_CT_PREAMBLE))
+ goto port_out;
+
+ /* #8 Port attribute entry */
+ ad = (struct lpfc_fdmi_attr_def *)
+ ((uint8_t *)pab + size);
+ ae = (struct lpfc_fdmi_attr_entry *)&ad->AttrValue;
+ memset(ae, 0, sizeof(struct lpfc_name));
+ ad->AttrType = cpu_to_be16(RPRT_PORTNAME);
+ ad->AttrLen = cpu_to_be16(FOURBYTES
+ + sizeof(struct lpfc_name));
+ memcpy(&ae->un.PortName, &vport->fc_sparam.portName,
+ sizeof(struct lpfc_name));
+ pab->ab.EntryCnt++;
+ size += FOURBYTES + sizeof(struct lpfc_name);
+ if ((size + LPFC_FDMI_MAX_AE_SIZE) >
+ (LPFC_BPL_SIZE - LPFC_CT_PREAMBLE))
+ goto port_out;
+
+ /* #9 Port attribute entry */
+ ad = (struct lpfc_fdmi_attr_def *)
+ ((uint8_t *)pab + size);
+ ae = (struct lpfc_fdmi_attr_entry *)&ad->AttrValue;
+ memset(ae, 0, sizeof(ae->un.NodeSymName));
+ ad->AttrType = cpu_to_be16(RPRT_SYM_PORTNAME);
+ len = lpfc_vport_symbolic_port_name(vport,
+ ae->un.NodeSymName, sizeof(ae->un.NodeSymName));
+ len += (len & 3) ? (4 - (len & 3)) : 4;
+ ad->AttrLen = cpu_to_be16(FOURBYTES + len);
+ pab->ab.EntryCnt++;
+ size += FOURBYTES + len;
+ if ((size + 4) > (LPFC_BPL_SIZE - LPFC_CT_PREAMBLE))
+ goto port_out;
+
+ /* #10 Port attribute entry */
+ ad = (struct lpfc_fdmi_attr_def *)
+ ((uint8_t *)pab + size);
+ ae = (struct lpfc_fdmi_attr_entry *)&ad->AttrValue;
+ ad->AttrType = cpu_to_be16(RPRT_PORT_TYPE);
+ ae->un.PortState = 0;
+ ad->AttrLen = cpu_to_be16(FOURBYTES + 4);
+ pab->ab.EntryCnt++;
+ size += FOURBYTES + 4;
+ if ((size + 4) > (LPFC_BPL_SIZE - LPFC_CT_PREAMBLE))
+ goto port_out;
+
+ /* #11 Port attribute entry */
+ ad = (struct lpfc_fdmi_attr_def *)
+ ((uint8_t *)pab + size);
+ ae = (struct lpfc_fdmi_attr_entry *)&ad->AttrValue;
+ ad->AttrType = cpu_to_be16(RPRT_SUPPORTED_CLASS);
+ ae->un.SupportClass =
+ cpu_to_be32(FC_COS_CLASS2 | FC_COS_CLASS3);
+ ad->AttrLen = cpu_to_be16(FOURBYTES + 4);
+ pab->ab.EntryCnt++;
+ size += FOURBYTES + 4;
+ if ((size + sizeof(struct lpfc_name)) >
+ (LPFC_BPL_SIZE - LPFC_CT_PREAMBLE))
+ goto port_out;
+
+ /* #12 Port attribute entry */
+ ad = (struct lpfc_fdmi_attr_def *)
+ ((uint8_t *)pab + size);
+ ae = (struct lpfc_fdmi_attr_entry *)&ad->AttrValue;
+ memset(ae, 0, sizeof(struct lpfc_name));
+ ad->AttrType = cpu_to_be16(RPRT_FABRICNAME);
+ ad->AttrLen = cpu_to_be16(FOURBYTES
+ + sizeof(struct lpfc_name));
+ memcpy(&ae->un.FabricName, &vport->fabric_nodename,
+ sizeof(struct lpfc_name));
+ pab->ab.EntryCnt++;
+ size += FOURBYTES + sizeof(struct lpfc_name);
+ if ((size + LPFC_FDMI_MAX_AE_SIZE) >
+ (LPFC_BPL_SIZE - LPFC_CT_PREAMBLE))
+ goto port_out;
+
+ /* #13 Port attribute entry */
+ ad = (struct lpfc_fdmi_attr_def *)
+ ((uint8_t *)pab + size);
+ ae = (struct lpfc_fdmi_attr_entry *)&ad->AttrValue;
+ memset(ae, 0, sizeof(ae->un.FC4Types));
+ ad->AttrType =
+ cpu_to_be16(RPRT_ACTIVE_FC4_TYPES);
+ ad->AttrLen = cpu_to_be16(FOURBYTES + 32);
+ ae->un.FC4Types[0] = 0x40; /* Type 1 - ELS */
+ ae->un.FC4Types[1] = 0x80; /* Type 8 - FCP */
+ ae->un.FC4Types[4] = 0x80; /* Type 32 - CT */
+ pab->ab.EntryCnt++;
+ size += FOURBYTES + 32;
+ if ((size + 4) > (LPFC_BPL_SIZE - LPFC_CT_PREAMBLE))
+ goto port_out;
+
+ /* #257 Port attribute entry */
+ ad = (struct lpfc_fdmi_attr_def *)
+ ((uint8_t *)pab + size);
+ ae = (struct lpfc_fdmi_attr_entry *)&ad->AttrValue;
+ ad->AttrType = cpu_to_be16(RPRT_PORT_STATE);
+ ae->un.PortState = 0;
+ ad->AttrLen = cpu_to_be16(FOURBYTES + 4);
+ pab->ab.EntryCnt++;
+ size += FOURBYTES + 4;
+ if ((size + 4) > (LPFC_BPL_SIZE - LPFC_CT_PREAMBLE))
+ goto port_out;
+
+ /* #258 Port attribute entry */
+ ad = (struct lpfc_fdmi_attr_def *)
+ ((uint8_t *)pab + size);
+ ae = (struct lpfc_fdmi_attr_entry *)&ad->AttrValue;
+ ad->AttrType = cpu_to_be16(RPRT_DISC_PORT);
+ ae->un.PortState = lpfc_find_map_node(vport);
+ ae->un.PortState = cpu_to_be32(ae->un.PortState);
+ ad->AttrLen = cpu_to_be16(FOURBYTES + 4);
+ pab->ab.EntryCnt++;
+ size += FOURBYTES + 4;
+ if ((size + 4) > (LPFC_BPL_SIZE - LPFC_CT_PREAMBLE))
+ goto port_out;
+
+ /* #259 Port attribute entry */
+ ad = (struct lpfc_fdmi_attr_def *)
+ ((uint8_t *)pab + size);
+ ae = (struct lpfc_fdmi_attr_entry *)&ad->AttrValue;
+ ad->AttrType = cpu_to_be16(RPRT_PORT_ID);
+ ae->un.PortId = cpu_to_be32(vport->fc_myDID);
+ ad->AttrLen = cpu_to_be16(FOURBYTES + 4);
+ pab->ab.EntryCnt++;
+ size += FOURBYTES + 4;
+port_out:
+ pab->ab.EntryCnt = cpu_to_be32(pab->ab.EntryCnt);
+ /* Total size */
+ size = GID_REQUEST_SZ - 4 + size;
+ }
+ break;
+
+ case SLI_MGMT_GHAT:
+ case SLI_MGMT_GRPL:
+ rsp_size = FC_MAX_NS_RSP;
+ case SLI_MGMT_DHBA:
+ case SLI_MGMT_DHAT:
+ pe = (struct lpfc_fdmi_port_entry *)&CtReq->un.PortID;
+ memcpy((uint8_t *)&pe->PortName,
+ (uint8_t *)&vport->fc_sparam.portName,
+ sizeof(struct lpfc_name));
+ size = GID_REQUEST_SZ - 4 + sizeof(struct lpfc_name);
+ break;
+
+ case SLI_MGMT_GPAT:
+ case SLI_MGMT_GPAS:
+ rsp_size = FC_MAX_NS_RSP;
+ case SLI_MGMT_DPRT:
+ case SLI_MGMT_DPA:
+ pe = (struct lpfc_fdmi_port_entry *)&CtReq->un.PortID;
+ memcpy((uint8_t *)&pe->PortName,
+ (uint8_t *)&vport->fc_sparam.portName,
+ sizeof(struct lpfc_name));
+ size = GID_REQUEST_SZ - 4 + sizeof(struct lpfc_name);
+ break;
+ case SLI_MGMT_GRHL:
+ size = GID_REQUEST_SZ - 4;
+ break;
+ default:
+ lpfc_printf_vlog(vport, KERN_WARNING, LOG_DISCOVERY,
+ "0298 FDMI cmdcode x%x not supported\n",
+ cmdcode);
+ goto fdmi_cmd_free_bmpvirt;
+ }
+ CtReq->CommandResponse.bits.Size = cpu_to_be16(rsp_size);
+
+ bpl = (struct ulp_bde64 *)bmp->virt;
+ bpl->addrHigh = le32_to_cpu(putPaddrHigh(mp->phys));
+ bpl->addrLow = le32_to_cpu(putPaddrLow(mp->phys));
+ bpl->tus.f.bdeFlags = 0;
+ bpl->tus.f.bdeSize = size;
+
+ /*
+ * The lpfc_ct_cmd/lpfc_get_req shall increment ndlp reference count
+ * to hold ndlp reference for the corresponding callback function.
+ */
+ if (!lpfc_ct_cmd(vport, mp, bmp, ndlp, cmpl, rsp_size, 0))
+ return 0;
+
+ /*
+ * Decrement ndlp reference count to release ndlp reference held
+ * for the failed command's callback function.
+ */
+ lpfc_nlp_put(ndlp);
+
+fdmi_cmd_free_bmpvirt:
+ lpfc_mbuf_free(phba, bmp->virt, bmp->phys);
+fdmi_cmd_free_bmp:
+ kfree(bmp);
+fdmi_cmd_free_mpvirt:
+ lpfc_mbuf_free(phba, mp->virt, mp->phys);
+fdmi_cmd_free_mp:
+ kfree(mp);
+fdmi_cmd_exit:
+ /* Issue FDMI request failed */
+ lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY,
+ "0244 Issue FDMI request failed Data: x%x\n",
+ cmdcode);
+ return 1;
+}
+
+/**
+ * lpfc_delayed_disc_tmo - Timeout handler for delayed discovery timer.
+ * @ptr - Context object of the timer.
+ *
+ * This function set the WORKER_DELAYED_DISC_TMO flag and wake up
+ * the worker thread.
+ **/
+void
+lpfc_delayed_disc_tmo(unsigned long ptr)
+{
+ struct lpfc_vport *vport = (struct lpfc_vport *)ptr;
+ struct lpfc_hba *phba = vport->phba;
+ uint32_t tmo_posted;
+ unsigned long iflag;
+
+ spin_lock_irqsave(&vport->work_port_lock, iflag);
+ tmo_posted = vport->work_port_events & WORKER_DELAYED_DISC_TMO;
+ if (!tmo_posted)
+ vport->work_port_events |= WORKER_DELAYED_DISC_TMO;
+ spin_unlock_irqrestore(&vport->work_port_lock, iflag);
+
+ if (!tmo_posted)
+ lpfc_worker_wake_up(phba);
+ return;
+}
+
+/**
+ * lpfc_delayed_disc_timeout_handler - Function called by worker thread to
+ * handle delayed discovery.
+ * @vport: pointer to a host virtual N_Port data structure.
+ *
+ * This function start nport discovery of the vport.
+ **/
+void
+lpfc_delayed_disc_timeout_handler(struct lpfc_vport *vport)
+{
+ struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
+
+ spin_lock_irq(shost->host_lock);
+ if (!(vport->fc_flag & FC_DISC_DELAYED)) {
+ spin_unlock_irq(shost->host_lock);
+ return;
+ }
+ vport->fc_flag &= ~FC_DISC_DELAYED;
+ spin_unlock_irq(shost->host_lock);
+
+ lpfc_do_scr_ns_plogi(vport->phba, vport);
+}
+
+void
+lpfc_fdmi_tmo(unsigned long ptr)
+{
+ struct lpfc_vport *vport = (struct lpfc_vport *)ptr;
+ struct lpfc_hba *phba = vport->phba;
+ uint32_t tmo_posted;
+ unsigned long iflag;
+
+ spin_lock_irqsave(&vport->work_port_lock, iflag);
+ tmo_posted = vport->work_port_events & WORKER_FDMI_TMO;
+ if (!tmo_posted)
+ vport->work_port_events |= WORKER_FDMI_TMO;
+ spin_unlock_irqrestore(&vport->work_port_lock, iflag);
+
+ if (!tmo_posted)
+ lpfc_worker_wake_up(phba);
+ return;
+}
+
+void
+lpfc_fdmi_timeout_handler(struct lpfc_vport *vport)
+{
+ struct lpfc_nodelist *ndlp;
+
+ ndlp = lpfc_findnode_did(vport, FDMI_DID);
+ if (ndlp && NLP_CHK_NODE_ACT(ndlp)) {
+ if (init_utsname()->nodename[0] != '\0')
+ lpfc_fdmi_cmd(vport, ndlp, SLI_MGMT_DHBA);
+ else
+ mod_timer(&vport->fc_fdmitmo, jiffies +
+ msecs_to_jiffies(1000 * 60));
+ }
+ return;
+}
+
+void
+lpfc_decode_firmware_rev(struct lpfc_hba *phba, char *fwrevision, int flag)
+{
+ struct lpfc_sli *psli = &phba->sli;
+ lpfc_vpd_t *vp = &phba->vpd;
+ uint32_t b1, b2, b3, b4, i, rev;
+ char c;
+ uint32_t *ptr, str[4];
+ uint8_t *fwname;
+
+ if (phba->sli_rev == LPFC_SLI_REV4)
+ snprintf(fwrevision, FW_REV_STR_SIZE, "%s", vp->rev.opFwName);
+ else if (vp->rev.rBit) {
+ if (psli->sli_flag & LPFC_SLI_ACTIVE)
+ rev = vp->rev.sli2FwRev;
+ else
+ rev = vp->rev.sli1FwRev;
+
+ b1 = (rev & 0x0000f000) >> 12;
+ b2 = (rev & 0x00000f00) >> 8;
+ b3 = (rev & 0x000000c0) >> 6;
+ b4 = (rev & 0x00000030) >> 4;
+
+ switch (b4) {
+ case 0:
+ c = 'N';
+ break;
+ case 1:
+ c = 'A';
+ break;
+ case 2:
+ c = 'B';
+ break;
+ case 3:
+ c = 'X';
+ break;
+ default:
+ c = 0;
+ break;
+ }
+ b4 = (rev & 0x0000000f);
+
+ if (psli->sli_flag & LPFC_SLI_ACTIVE)
+ fwname = vp->rev.sli2FwName;
+ else
+ fwname = vp->rev.sli1FwName;
+
+ for (i = 0; i < 16; i++)
+ if (fwname[i] == 0x20)
+ fwname[i] = 0;
+
+ ptr = (uint32_t*)fwname;
+
+ for (i = 0; i < 3; i++)
+ str[i] = be32_to_cpu(*ptr++);
+
+ if (c == 0) {
+ if (flag)
+ sprintf(fwrevision, "%d.%d%d (%s)",
+ b1, b2, b3, (char *)str);
+ else
+ sprintf(fwrevision, "%d.%d%d", b1,
+ b2, b3);
+ } else {
+ if (flag)
+ sprintf(fwrevision, "%d.%d%d%c%d (%s)",
+ b1, b2, b3, c,
+ b4, (char *)str);
+ else
+ sprintf(fwrevision, "%d.%d%d%c%d",
+ b1, b2, b3, c, b4);
+ }
+ } else {
+ rev = vp->rev.smFwRev;
+
+ b1 = (rev & 0xff000000) >> 24;
+ b2 = (rev & 0x00f00000) >> 20;
+ b3 = (rev & 0x000f0000) >> 16;
+ c = (rev & 0x0000ff00) >> 8;
+ b4 = (rev & 0x000000ff);
+
+ sprintf(fwrevision, "%d.%d%d%c%d", b1, b2, b3, c, b4);
+ }
+ return;
+}
diff --git a/drivers/scsi/lpfc/lpfc_debugfs.c b/drivers/scsi/lpfc/lpfc_debugfs.c
new file mode 100644
index 000000000..513edcb0c
--- /dev/null
+++ b/drivers/scsi/lpfc/lpfc_debugfs.c
@@ -0,0 +1,4698 @@
+/*******************************************************************
+ * This file is part of the Emulex Linux Device Driver for *
+ * Fibre Channel Host Bus Adapters. *
+ * Copyright (C) 2007-2015 Emulex. All rights reserved. *
+ * EMULEX and SLI are trademarks of Emulex. *
+ * www.emulex.com *
+ * *
+ * This program is free software; you can redistribute it and/or *
+ * modify it under the terms of version 2 of the GNU General *
+ * Public License as published by the Free Software Foundation. *
+ * This program is distributed in the hope that it will be useful. *
+ * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND *
+ * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY, *
+ * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE *
+ * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD *
+ * TO BE LEGALLY INVALID. See the GNU General Public License for *
+ * more details, a copy of which can be found in the file COPYING *
+ * included with this package. *
+ *******************************************************************/
+
+#include <linux/blkdev.h>
+#include <linux/delay.h>
+#include <linux/module.h>
+#include <linux/dma-mapping.h>
+#include <linux/idr.h>
+#include <linux/interrupt.h>
+#include <linux/kthread.h>
+#include <linux/slab.h>
+#include <linux/pci.h>
+#include <linux/spinlock.h>
+#include <linux/ctype.h>
+
+#include <scsi/scsi.h>
+#include <scsi/scsi_device.h>
+#include <scsi/scsi_host.h>
+#include <scsi/scsi_transport_fc.h>
+
+#include "lpfc_hw4.h"
+#include "lpfc_hw.h"
+#include "lpfc_sli.h"
+#include "lpfc_sli4.h"
+#include "lpfc_nl.h"
+#include "lpfc_disc.h"
+#include "lpfc_scsi.h"
+#include "lpfc.h"
+#include "lpfc_logmsg.h"
+#include "lpfc_crtn.h"
+#include "lpfc_vport.h"
+#include "lpfc_version.h"
+#include "lpfc_compat.h"
+#include "lpfc_debugfs.h"
+#include "lpfc_bsg.h"
+
+#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
+/*
+ * debugfs interface
+ *
+ * To access this interface the user should:
+ * # mount -t debugfs none /sys/kernel/debug
+ *
+ * The lpfc debugfs directory hierarchy is:
+ * /sys/kernel/debug/lpfc/fnX/vportY
+ * where X is the lpfc hba function unique_id
+ * where Y is the vport VPI on that hba
+ *
+ * Debugging services available per vport:
+ * discovery_trace
+ * This is an ACSII readable file that contains a trace of the last
+ * lpfc_debugfs_max_disc_trc events that happened on a specific vport.
+ * See lpfc_debugfs.h for different categories of discovery events.
+ * To enable the discovery trace, the following module parameters must be set:
+ * lpfc_debugfs_enable=1 Turns on lpfc debugfs filesystem support
+ * lpfc_debugfs_max_disc_trc=X Where X is the event trace depth for
+ * EACH vport. X MUST also be a power of 2.
+ * lpfc_debugfs_mask_disc_trc=Y Where Y is an event mask as defined in
+ * lpfc_debugfs.h .
+ *
+ * slow_ring_trace
+ * This is an ACSII readable file that contains a trace of the last
+ * lpfc_debugfs_max_slow_ring_trc events that happened on a specific HBA.
+ * To enable the slow ring trace, the following module parameters must be set:
+ * lpfc_debugfs_enable=1 Turns on lpfc debugfs filesystem support
+ * lpfc_debugfs_max_slow_ring_trc=X Where X is the event trace depth for
+ * the HBA. X MUST also be a power of 2.
+ */
+static int lpfc_debugfs_enable = 1;
+module_param(lpfc_debugfs_enable, int, S_IRUGO);
+MODULE_PARM_DESC(lpfc_debugfs_enable, "Enable debugfs services");
+
+/* This MUST be a power of 2 */
+static int lpfc_debugfs_max_disc_trc;
+module_param(lpfc_debugfs_max_disc_trc, int, S_IRUGO);
+MODULE_PARM_DESC(lpfc_debugfs_max_disc_trc,
+ "Set debugfs discovery trace depth");
+
+/* This MUST be a power of 2 */
+static int lpfc_debugfs_max_slow_ring_trc;
+module_param(lpfc_debugfs_max_slow_ring_trc, int, S_IRUGO);
+MODULE_PARM_DESC(lpfc_debugfs_max_slow_ring_trc,
+ "Set debugfs slow ring trace depth");
+
+static int lpfc_debugfs_mask_disc_trc;
+module_param(lpfc_debugfs_mask_disc_trc, int, S_IRUGO);
+MODULE_PARM_DESC(lpfc_debugfs_mask_disc_trc,
+ "Set debugfs discovery trace mask");
+
+#include <linux/debugfs.h>
+
+static atomic_t lpfc_debugfs_seq_trc_cnt = ATOMIC_INIT(0);
+static unsigned long lpfc_debugfs_start_time = 0L;
+
+/* iDiag */
+static struct lpfc_idiag idiag;
+
+/**
+ * lpfc_debugfs_disc_trc_data - Dump discovery logging to a buffer
+ * @vport: The vport to gather the log info from.
+ * @buf: The buffer to dump log into.
+ * @size: The maximum amount of data to process.
+ *
+ * Description:
+ * This routine gathers the lpfc discovery debugfs data from the @vport and
+ * dumps it to @buf up to @size number of bytes. It will start at the next entry
+ * in the log and process the log until the end of the buffer. Then it will
+ * gather from the beginning of the log and process until the current entry.
+ *
+ * Notes:
+ * Discovery logging will be disabled while while this routine dumps the log.
+ *
+ * Return Value:
+ * This routine returns the amount of bytes that were dumped into @buf and will
+ * not exceed @size.
+ **/
+static int
+lpfc_debugfs_disc_trc_data(struct lpfc_vport *vport, char *buf, int size)
+{
+ int i, index, len, enable;
+ uint32_t ms;
+ struct lpfc_debugfs_trc *dtp;
+ char *buffer;
+
+ buffer = kmalloc(LPFC_DEBUG_TRC_ENTRY_SIZE, GFP_KERNEL);
+ if (!buffer)
+ return 0;
+
+ enable = lpfc_debugfs_enable;
+ lpfc_debugfs_enable = 0;
+
+ len = 0;
+ index = (atomic_read(&vport->disc_trc_cnt) + 1) &
+ (lpfc_debugfs_max_disc_trc - 1);
+ for (i = index; i < lpfc_debugfs_max_disc_trc; i++) {
+ dtp = vport->disc_trc + i;
+ if (!dtp->fmt)
+ continue;
+ ms = jiffies_to_msecs(dtp->jif - lpfc_debugfs_start_time);
+ snprintf(buffer,
+ LPFC_DEBUG_TRC_ENTRY_SIZE, "%010d:%010d ms:%s\n",
+ dtp->seq_cnt, ms, dtp->fmt);
+ len += snprintf(buf+len, size-len, buffer,
+ dtp->data1, dtp->data2, dtp->data3);
+ }
+ for (i = 0; i < index; i++) {
+ dtp = vport->disc_trc + i;
+ if (!dtp->fmt)
+ continue;
+ ms = jiffies_to_msecs(dtp->jif - lpfc_debugfs_start_time);
+ snprintf(buffer,
+ LPFC_DEBUG_TRC_ENTRY_SIZE, "%010d:%010d ms:%s\n",
+ dtp->seq_cnt, ms, dtp->fmt);
+ len += snprintf(buf+len, size-len, buffer,
+ dtp->data1, dtp->data2, dtp->data3);
+ }
+
+ lpfc_debugfs_enable = enable;
+ kfree(buffer);
+
+ return len;
+}
+
+/**
+ * lpfc_debugfs_slow_ring_trc_data - Dump slow ring logging to a buffer
+ * @phba: The HBA to gather the log info from.
+ * @buf: The buffer to dump log into.
+ * @size: The maximum amount of data to process.
+ *
+ * Description:
+ * This routine gathers the lpfc slow ring debugfs data from the @phba and
+ * dumps it to @buf up to @size number of bytes. It will start at the next entry
+ * in the log and process the log until the end of the buffer. Then it will
+ * gather from the beginning of the log and process until the current entry.
+ *
+ * Notes:
+ * Slow ring logging will be disabled while while this routine dumps the log.
+ *
+ * Return Value:
+ * This routine returns the amount of bytes that were dumped into @buf and will
+ * not exceed @size.
+ **/
+static int
+lpfc_debugfs_slow_ring_trc_data(struct lpfc_hba *phba, char *buf, int size)
+{
+ int i, index, len, enable;
+ uint32_t ms;
+ struct lpfc_debugfs_trc *dtp;
+ char *buffer;
+
+ buffer = kmalloc(LPFC_DEBUG_TRC_ENTRY_SIZE, GFP_KERNEL);
+ if (!buffer)
+ return 0;
+
+ enable = lpfc_debugfs_enable;
+ lpfc_debugfs_enable = 0;
+
+ len = 0;
+ index = (atomic_read(&phba->slow_ring_trc_cnt) + 1) &
+ (lpfc_debugfs_max_slow_ring_trc - 1);
+ for (i = index; i < lpfc_debugfs_max_slow_ring_trc; i++) {
+ dtp = phba->slow_ring_trc + i;
+ if (!dtp->fmt)
+ continue;
+ ms = jiffies_to_msecs(dtp->jif - lpfc_debugfs_start_time);
+ snprintf(buffer,
+ LPFC_DEBUG_TRC_ENTRY_SIZE, "%010d:%010d ms:%s\n",
+ dtp->seq_cnt, ms, dtp->fmt);
+ len += snprintf(buf+len, size-len, buffer,
+ dtp->data1, dtp->data2, dtp->data3);
+ }
+ for (i = 0; i < index; i++) {
+ dtp = phba->slow_ring_trc + i;
+ if (!dtp->fmt)
+ continue;
+ ms = jiffies_to_msecs(dtp->jif - lpfc_debugfs_start_time);
+ snprintf(buffer,
+ LPFC_DEBUG_TRC_ENTRY_SIZE, "%010d:%010d ms:%s\n",
+ dtp->seq_cnt, ms, dtp->fmt);
+ len += snprintf(buf+len, size-len, buffer,
+ dtp->data1, dtp->data2, dtp->data3);
+ }
+
+ lpfc_debugfs_enable = enable;
+ kfree(buffer);
+
+ return len;
+}
+
+static int lpfc_debugfs_last_hbq = -1;
+
+/**
+ * lpfc_debugfs_hbqinfo_data - Dump host buffer queue info to a buffer
+ * @phba: The HBA to gather host buffer info from.
+ * @buf: The buffer to dump log into.
+ * @size: The maximum amount of data to process.
+ *
+ * Description:
+ * This routine dumps the host buffer queue info from the @phba to @buf up to
+ * @size number of bytes. A header that describes the current hbq state will be
+ * dumped to @buf first and then info on each hbq entry will be dumped to @buf
+ * until @size bytes have been dumped or all the hbq info has been dumped.
+ *
+ * Notes:
+ * This routine will rotate through each configured HBQ each time called.
+ *
+ * Return Value:
+ * This routine returns the amount of bytes that were dumped into @buf and will
+ * not exceed @size.
+ **/
+static int
+lpfc_debugfs_hbqinfo_data(struct lpfc_hba *phba, char *buf, int size)
+{
+ int len = 0;
+ int i, j, found, posted, low;
+ uint32_t phys, raw_index, getidx;
+ struct lpfc_hbq_init *hip;
+ struct hbq_s *hbqs;
+ struct lpfc_hbq_entry *hbqe;
+ struct lpfc_dmabuf *d_buf;
+ struct hbq_dmabuf *hbq_buf;
+
+ if (phba->sli_rev != 3)
+ return 0;
+
+ spin_lock_irq(&phba->hbalock);
+
+ /* toggle between multiple hbqs, if any */
+ i = lpfc_sli_hbq_count();
+ if (i > 1) {
+ lpfc_debugfs_last_hbq++;
+ if (lpfc_debugfs_last_hbq >= i)
+ lpfc_debugfs_last_hbq = 0;
+ }
+ else
+ lpfc_debugfs_last_hbq = 0;
+
+ i = lpfc_debugfs_last_hbq;
+
+ len += snprintf(buf+len, size-len, "HBQ %d Info\n", i);
+
+ hbqs = &phba->hbqs[i];
+ posted = 0;
+ list_for_each_entry(d_buf, &hbqs->hbq_buffer_list, list)
+ posted++;
+
+ hip = lpfc_hbq_defs[i];
+ len += snprintf(buf+len, size-len,
+ "idx:%d prof:%d rn:%d bufcnt:%d icnt:%d acnt:%d posted %d\n",
+ hip->hbq_index, hip->profile, hip->rn,
+ hip->buffer_count, hip->init_count, hip->add_count, posted);
+
+ raw_index = phba->hbq_get[i];
+ getidx = le32_to_cpu(raw_index);
+ len += snprintf(buf+len, size-len,
+ "entrys:%d bufcnt:%d Put:%d nPut:%d localGet:%d hbaGet:%d\n",
+ hbqs->entry_count, hbqs->buffer_count, hbqs->hbqPutIdx,
+ hbqs->next_hbqPutIdx, hbqs->local_hbqGetIdx, getidx);
+
+ hbqe = (struct lpfc_hbq_entry *) phba->hbqs[i].hbq_virt;
+ for (j=0; j<hbqs->entry_count; j++) {
+ len += snprintf(buf+len, size-len,
+ "%03d: %08x %04x %05x ", j,
+ le32_to_cpu(hbqe->bde.addrLow),
+ le32_to_cpu(hbqe->bde.tus.w),
+ le32_to_cpu(hbqe->buffer_tag));
+ i = 0;
+ found = 0;
+
+ /* First calculate if slot has an associated posted buffer */
+ low = hbqs->hbqPutIdx - posted;
+ if (low >= 0) {
+ if ((j >= hbqs->hbqPutIdx) || (j < low)) {
+ len += snprintf(buf+len, size-len, "Unused\n");
+ goto skipit;
+ }
+ }
+ else {
+ if ((j >= hbqs->hbqPutIdx) &&
+ (j < (hbqs->entry_count+low))) {
+ len += snprintf(buf+len, size-len, "Unused\n");
+ goto skipit;
+ }
+ }
+
+ /* Get the Buffer info for the posted buffer */
+ list_for_each_entry(d_buf, &hbqs->hbq_buffer_list, list) {
+ hbq_buf = container_of(d_buf, struct hbq_dmabuf, dbuf);
+ phys = ((uint64_t)hbq_buf->dbuf.phys & 0xffffffff);
+ if (phys == le32_to_cpu(hbqe->bde.addrLow)) {
+ len += snprintf(buf+len, size-len,
+ "Buf%d: %p %06x\n", i,
+ hbq_buf->dbuf.virt, hbq_buf->tag);
+ found = 1;
+ break;
+ }
+ i++;
+ }
+ if (!found) {
+ len += snprintf(buf+len, size-len, "No DMAinfo?\n");
+ }
+skipit:
+ hbqe++;
+ if (len > LPFC_HBQINFO_SIZE - 54)
+ break;
+ }
+ spin_unlock_irq(&phba->hbalock);
+ return len;
+}
+
+static int lpfc_debugfs_last_hba_slim_off;
+
+/**
+ * lpfc_debugfs_dumpHBASlim_data - Dump HBA SLIM info to a buffer
+ * @phba: The HBA to gather SLIM info from.
+ * @buf: The buffer to dump log into.
+ * @size: The maximum amount of data to process.
+ *
+ * Description:
+ * This routine dumps the current contents of HBA SLIM for the HBA associated
+ * with @phba to @buf up to @size bytes of data. This is the raw HBA SLIM data.
+ *
+ * Notes:
+ * This routine will only dump up to 1024 bytes of data each time called and
+ * should be called multiple times to dump the entire HBA SLIM.
+ *
+ * Return Value:
+ * This routine returns the amount of bytes that were dumped into @buf and will
+ * not exceed @size.
+ **/
+static int
+lpfc_debugfs_dumpHBASlim_data(struct lpfc_hba *phba, char *buf, int size)
+{
+ int len = 0;
+ int i, off;
+ uint32_t *ptr;
+ char *buffer;
+
+ buffer = kmalloc(1024, GFP_KERNEL);
+ if (!buffer)
+ return 0;
+
+ off = 0;
+ spin_lock_irq(&phba->hbalock);
+
+ len += snprintf(buf+len, size-len, "HBA SLIM\n");
+ lpfc_memcpy_from_slim(buffer,
+ phba->MBslimaddr + lpfc_debugfs_last_hba_slim_off, 1024);
+
+ ptr = (uint32_t *)&buffer[0];
+ off = lpfc_debugfs_last_hba_slim_off;
+
+ /* Set it up for the next time */
+ lpfc_debugfs_last_hba_slim_off += 1024;
+ if (lpfc_debugfs_last_hba_slim_off >= 4096)
+ lpfc_debugfs_last_hba_slim_off = 0;
+
+ i = 1024;
+ while (i > 0) {
+ len += snprintf(buf+len, size-len,
+ "%08x: %08x %08x %08x %08x %08x %08x %08x %08x\n",
+ off, *ptr, *(ptr+1), *(ptr+2), *(ptr+3), *(ptr+4),
+ *(ptr+5), *(ptr+6), *(ptr+7));
+ ptr += 8;
+ i -= (8 * sizeof(uint32_t));
+ off += (8 * sizeof(uint32_t));
+ }
+
+ spin_unlock_irq(&phba->hbalock);
+ kfree(buffer);
+
+ return len;
+}
+
+/**
+ * lpfc_debugfs_dumpHostSlim_data - Dump host SLIM info to a buffer
+ * @phba: The HBA to gather Host SLIM info from.
+ * @buf: The buffer to dump log into.
+ * @size: The maximum amount of data to process.
+ *
+ * Description:
+ * This routine dumps the current contents of host SLIM for the host associated
+ * with @phba to @buf up to @size bytes of data. The dump will contain the
+ * Mailbox, PCB, Rings, and Registers that are located in host memory.
+ *
+ * Return Value:
+ * This routine returns the amount of bytes that were dumped into @buf and will
+ * not exceed @size.
+ **/
+static int
+lpfc_debugfs_dumpHostSlim_data(struct lpfc_hba *phba, char *buf, int size)
+{
+ int len = 0;
+ int i, off;
+ uint32_t word0, word1, word2, word3;
+ uint32_t *ptr;
+ struct lpfc_pgp *pgpp;
+ struct lpfc_sli *psli = &phba->sli;
+ struct lpfc_sli_ring *pring;
+
+ off = 0;
+ spin_lock_irq(&phba->hbalock);
+
+ len += snprintf(buf+len, size-len, "SLIM Mailbox\n");
+ ptr = (uint32_t *)phba->slim2p.virt;
+ i = sizeof(MAILBOX_t);
+ while (i > 0) {
+ len += snprintf(buf+len, size-len,
+ "%08x: %08x %08x %08x %08x %08x %08x %08x %08x\n",
+ off, *ptr, *(ptr+1), *(ptr+2), *(ptr+3), *(ptr+4),
+ *(ptr+5), *(ptr+6), *(ptr+7));
+ ptr += 8;
+ i -= (8 * sizeof(uint32_t));
+ off += (8 * sizeof(uint32_t));
+ }
+
+ len += snprintf(buf+len, size-len, "SLIM PCB\n");
+ ptr = (uint32_t *)phba->pcb;
+ i = sizeof(PCB_t);
+ while (i > 0) {
+ len += snprintf(buf+len, size-len,
+ "%08x: %08x %08x %08x %08x %08x %08x %08x %08x\n",
+ off, *ptr, *(ptr+1), *(ptr+2), *(ptr+3), *(ptr+4),
+ *(ptr+5), *(ptr+6), *(ptr+7));
+ ptr += 8;
+ i -= (8 * sizeof(uint32_t));
+ off += (8 * sizeof(uint32_t));
+ }
+
+ for (i = 0; i < 4; i++) {
+ pgpp = &phba->port_gp[i];
+ pring = &psli->ring[i];
+ len += snprintf(buf+len, size-len,
+ "Ring %d: CMD GetInx:%d (Max:%d Next:%d "
+ "Local:%d flg:x%x) RSP PutInx:%d Max:%d\n",
+ i, pgpp->cmdGetInx, pring->sli.sli3.numCiocb,
+ pring->sli.sli3.next_cmdidx,
+ pring->sli.sli3.local_getidx,
+ pring->flag, pgpp->rspPutInx,
+ pring->sli.sli3.numRiocb);
+ }
+
+ if (phba->sli_rev <= LPFC_SLI_REV3) {
+ word0 = readl(phba->HAregaddr);
+ word1 = readl(phba->CAregaddr);
+ word2 = readl(phba->HSregaddr);
+ word3 = readl(phba->HCregaddr);
+ len += snprintf(buf+len, size-len, "HA:%08x CA:%08x HS:%08x "
+ "HC:%08x\n", word0, word1, word2, word3);
+ }
+ spin_unlock_irq(&phba->hbalock);
+ return len;
+}
+
+/**
+ * lpfc_debugfs_nodelist_data - Dump target node list to a buffer
+ * @vport: The vport to gather target node info from.
+ * @buf: The buffer to dump log into.
+ * @size: The maximum amount of data to process.
+ *
+ * Description:
+ * This routine dumps the current target node list associated with @vport to
+ * @buf up to @size bytes of data. Each node entry in the dump will contain a
+ * node state, DID, WWPN, WWNN, RPI, flags, type, and other useful fields.
+ *
+ * Return Value:
+ * This routine returns the amount of bytes that were dumped into @buf and will
+ * not exceed @size.
+ **/
+static int
+lpfc_debugfs_nodelist_data(struct lpfc_vport *vport, char *buf, int size)
+{
+ int len = 0;
+ int cnt;
+ struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
+ struct lpfc_nodelist *ndlp;
+ unsigned char *statep, *name;
+
+ cnt = (LPFC_NODELIST_SIZE / LPFC_NODELIST_ENTRY_SIZE);
+
+ spin_lock_irq(shost->host_lock);
+ list_for_each_entry(ndlp, &vport->fc_nodes, nlp_listp) {
+ if (!cnt) {
+ len += snprintf(buf+len, size-len,
+ "Missing Nodelist Entries\n");
+ break;
+ }
+ cnt--;
+ switch (ndlp->nlp_state) {
+ case NLP_STE_UNUSED_NODE:
+ statep = "UNUSED";
+ break;
+ case NLP_STE_PLOGI_ISSUE:
+ statep = "PLOGI ";
+ break;
+ case NLP_STE_ADISC_ISSUE:
+ statep = "ADISC ";
+ break;
+ case NLP_STE_REG_LOGIN_ISSUE:
+ statep = "REGLOG";
+ break;
+ case NLP_STE_PRLI_ISSUE:
+ statep = "PRLI ";
+ break;
+ case NLP_STE_LOGO_ISSUE:
+ statep = "LOGO ";
+ break;
+ case NLP_STE_UNMAPPED_NODE:
+ statep = "UNMAP ";
+ break;
+ case NLP_STE_MAPPED_NODE:
+ statep = "MAPPED";
+ break;
+ case NLP_STE_NPR_NODE:
+ statep = "NPR ";
+ break;
+ default:
+ statep = "UNKNOWN";
+ }
+ len += snprintf(buf+len, size-len, "%s DID:x%06x ",
+ statep, ndlp->nlp_DID);
+ name = (unsigned char *)&ndlp->nlp_portname;
+ len += snprintf(buf+len, size-len,
+ "WWPN %02x:%02x:%02x:%02x:%02x:%02x:%02x:%02x ",
+ *name, *(name+1), *(name+2), *(name+3),
+ *(name+4), *(name+5), *(name+6), *(name+7));
+ name = (unsigned char *)&ndlp->nlp_nodename;
+ len += snprintf(buf+len, size-len,
+ "WWNN %02x:%02x:%02x:%02x:%02x:%02x:%02x:%02x ",
+ *name, *(name+1), *(name+2), *(name+3),
+ *(name+4), *(name+5), *(name+6), *(name+7));
+ if (ndlp->nlp_flag & NLP_RPI_REGISTERED)
+ len += snprintf(buf+len, size-len, "RPI:%03d ",
+ ndlp->nlp_rpi);
+ else
+ len += snprintf(buf+len, size-len, "RPI:none ");
+ len += snprintf(buf+len, size-len, "flag:x%08x ",
+ ndlp->nlp_flag);
+ if (!ndlp->nlp_type)
+ len += snprintf(buf+len, size-len, "UNKNOWN_TYPE ");
+ if (ndlp->nlp_type & NLP_FC_NODE)
+ len += snprintf(buf+len, size-len, "FC_NODE ");
+ if (ndlp->nlp_type & NLP_FABRIC)
+ len += snprintf(buf+len, size-len, "FABRIC ");
+ if (ndlp->nlp_type & NLP_FCP_TARGET)
+ len += snprintf(buf+len, size-len, "FCP_TGT sid:%d ",
+ ndlp->nlp_sid);
+ if (ndlp->nlp_type & NLP_FCP_INITIATOR)
+ len += snprintf(buf+len, size-len, "FCP_INITIATOR ");
+ len += snprintf(buf+len, size-len, "usgmap:%x ",
+ ndlp->nlp_usg_map);
+ len += snprintf(buf+len, size-len, "refcnt:%x",
+ atomic_read(&ndlp->kref.refcount));
+ len += snprintf(buf+len, size-len, "\n");
+ }
+ spin_unlock_irq(shost->host_lock);
+ return len;
+}
+#endif
+
+/**
+ * lpfc_debugfs_disc_trc - Store discovery trace log
+ * @vport: The vport to associate this trace string with for retrieval.
+ * @mask: Log entry classification.
+ * @fmt: Format string to be displayed when dumping the log.
+ * @data1: 1st data parameter to be applied to @fmt.
+ * @data2: 2nd data parameter to be applied to @fmt.
+ * @data3: 3rd data parameter to be applied to @fmt.
+ *
+ * Description:
+ * This routine is used by the driver code to add a debugfs log entry to the
+ * discovery trace buffer associated with @vport. Only entries with a @mask that
+ * match the current debugfs discovery mask will be saved. Entries that do not
+ * match will be thrown away. @fmt, @data1, @data2, and @data3 are used like
+ * printf when displaying the log.
+ **/
+inline void
+lpfc_debugfs_disc_trc(struct lpfc_vport *vport, int mask, char *fmt,
+ uint32_t data1, uint32_t data2, uint32_t data3)
+{
+#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
+ struct lpfc_debugfs_trc *dtp;
+ int index;
+
+ if (!(lpfc_debugfs_mask_disc_trc & mask))
+ return;
+
+ if (!lpfc_debugfs_enable || !lpfc_debugfs_max_disc_trc ||
+ !vport || !vport->disc_trc)
+ return;
+
+ index = atomic_inc_return(&vport->disc_trc_cnt) &
+ (lpfc_debugfs_max_disc_trc - 1);
+ dtp = vport->disc_trc + index;
+ dtp->fmt = fmt;
+ dtp->data1 = data1;
+ dtp->data2 = data2;
+ dtp->data3 = data3;
+ dtp->seq_cnt = atomic_inc_return(&lpfc_debugfs_seq_trc_cnt);
+ dtp->jif = jiffies;
+#endif
+ return;
+}
+
+/**
+ * lpfc_debugfs_slow_ring_trc - Store slow ring trace log
+ * @phba: The phba to associate this trace string with for retrieval.
+ * @fmt: Format string to be displayed when dumping the log.
+ * @data1: 1st data parameter to be applied to @fmt.
+ * @data2: 2nd data parameter to be applied to @fmt.
+ * @data3: 3rd data parameter to be applied to @fmt.
+ *
+ * Description:
+ * This routine is used by the driver code to add a debugfs log entry to the
+ * discovery trace buffer associated with @vport. @fmt, @data1, @data2, and
+ * @data3 are used like printf when displaying the log.
+ **/
+inline void
+lpfc_debugfs_slow_ring_trc(struct lpfc_hba *phba, char *fmt,
+ uint32_t data1, uint32_t data2, uint32_t data3)
+{
+#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
+ struct lpfc_debugfs_trc *dtp;
+ int index;
+
+ if (!lpfc_debugfs_enable || !lpfc_debugfs_max_slow_ring_trc ||
+ !phba || !phba->slow_ring_trc)
+ return;
+
+ index = atomic_inc_return(&phba->slow_ring_trc_cnt) &
+ (lpfc_debugfs_max_slow_ring_trc - 1);
+ dtp = phba->slow_ring_trc + index;
+ dtp->fmt = fmt;
+ dtp->data1 = data1;
+ dtp->data2 = data2;
+ dtp->data3 = data3;
+ dtp->seq_cnt = atomic_inc_return(&lpfc_debugfs_seq_trc_cnt);
+ dtp->jif = jiffies;
+#endif
+ return;
+}
+
+#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
+/**
+ * lpfc_debugfs_disc_trc_open - Open the discovery trace log
+ * @inode: The inode pointer that contains a vport pointer.
+ * @file: The file pointer to attach the log output.
+ *
+ * Description:
+ * This routine is the entry point for the debugfs open file operation. It gets
+ * the vport from the i_private field in @inode, allocates the necessary buffer
+ * for the log, fills the buffer from the in-memory log for this vport, and then
+ * returns a pointer to that log in the private_data field in @file.
+ *
+ * Returns:
+ * This function returns zero if successful. On error it will return an negative
+ * error value.
+ **/
+static int
+lpfc_debugfs_disc_trc_open(struct inode *inode, struct file *file)
+{
+ struct lpfc_vport *vport = inode->i_private;
+ struct lpfc_debug *debug;
+ int size;
+ int rc = -ENOMEM;
+
+ if (!lpfc_debugfs_max_disc_trc) {
+ rc = -ENOSPC;
+ goto out;
+ }
+
+ debug = kmalloc(sizeof(*debug), GFP_KERNEL);
+ if (!debug)
+ goto out;
+
+ /* Round to page boundary */
+ size = (lpfc_debugfs_max_disc_trc * LPFC_DEBUG_TRC_ENTRY_SIZE);
+ size = PAGE_ALIGN(size);
+
+ debug->buffer = kmalloc(size, GFP_KERNEL);
+ if (!debug->buffer) {
+ kfree(debug);
+ goto out;
+ }
+
+ debug->len = lpfc_debugfs_disc_trc_data(vport, debug->buffer, size);
+ file->private_data = debug;
+
+ rc = 0;
+out:
+ return rc;
+}
+
+/**
+ * lpfc_debugfs_slow_ring_trc_open - Open the Slow Ring trace log
+ * @inode: The inode pointer that contains a vport pointer.
+ * @file: The file pointer to attach the log output.
+ *
+ * Description:
+ * This routine is the entry point for the debugfs open file operation. It gets
+ * the vport from the i_private field in @inode, allocates the necessary buffer
+ * for the log, fills the buffer from the in-memory log for this vport, and then
+ * returns a pointer to that log in the private_data field in @file.
+ *
+ * Returns:
+ * This function returns zero if successful. On error it will return an negative
+ * error value.
+ **/
+static int
+lpfc_debugfs_slow_ring_trc_open(struct inode *inode, struct file *file)
+{
+ struct lpfc_hba *phba = inode->i_private;
+ struct lpfc_debug *debug;
+ int size;
+ int rc = -ENOMEM;
+
+ if (!lpfc_debugfs_max_slow_ring_trc) {
+ rc = -ENOSPC;
+ goto out;
+ }
+
+ debug = kmalloc(sizeof(*debug), GFP_KERNEL);
+ if (!debug)
+ goto out;
+
+ /* Round to page boundary */
+ size = (lpfc_debugfs_max_slow_ring_trc * LPFC_DEBUG_TRC_ENTRY_SIZE);
+ size = PAGE_ALIGN(size);
+
+ debug->buffer = kmalloc(size, GFP_KERNEL);
+ if (!debug->buffer) {
+ kfree(debug);
+ goto out;
+ }
+
+ debug->len = lpfc_debugfs_slow_ring_trc_data(phba, debug->buffer, size);
+ file->private_data = debug;
+
+ rc = 0;
+out:
+ return rc;
+}
+
+/**
+ * lpfc_debugfs_hbqinfo_open - Open the hbqinfo debugfs buffer
+ * @inode: The inode pointer that contains a vport pointer.
+ * @file: The file pointer to attach the log output.
+ *
+ * Description:
+ * This routine is the entry point for the debugfs open file operation. It gets
+ * the vport from the i_private field in @inode, allocates the necessary buffer
+ * for the log, fills the buffer from the in-memory log for this vport, and then
+ * returns a pointer to that log in the private_data field in @file.
+ *
+ * Returns:
+ * This function returns zero if successful. On error it will return an negative
+ * error value.
+ **/
+static int
+lpfc_debugfs_hbqinfo_open(struct inode *inode, struct file *file)
+{
+ struct lpfc_hba *phba = inode->i_private;
+ struct lpfc_debug *debug;
+ int rc = -ENOMEM;
+
+ debug = kmalloc(sizeof(*debug), GFP_KERNEL);
+ if (!debug)
+ goto out;
+
+ /* Round to page boundary */
+ debug->buffer = kmalloc(LPFC_HBQINFO_SIZE, GFP_KERNEL);
+ if (!debug->buffer) {
+ kfree(debug);
+ goto out;
+ }
+
+ debug->len = lpfc_debugfs_hbqinfo_data(phba, debug->buffer,
+ LPFC_HBQINFO_SIZE);
+ file->private_data = debug;
+
+ rc = 0;
+out:
+ return rc;
+}
+
+/**
+ * lpfc_debugfs_dumpHBASlim_open - Open the Dump HBA SLIM debugfs buffer
+ * @inode: The inode pointer that contains a vport pointer.
+ * @file: The file pointer to attach the log output.
+ *
+ * Description:
+ * This routine is the entry point for the debugfs open file operation. It gets
+ * the vport from the i_private field in @inode, allocates the necessary buffer
+ * for the log, fills the buffer from the in-memory log for this vport, and then
+ * returns a pointer to that log in the private_data field in @file.
+ *
+ * Returns:
+ * This function returns zero if successful. On error it will return an negative
+ * error value.
+ **/
+static int
+lpfc_debugfs_dumpHBASlim_open(struct inode *inode, struct file *file)
+{
+ struct lpfc_hba *phba = inode->i_private;
+ struct lpfc_debug *debug;
+ int rc = -ENOMEM;
+
+ debug = kmalloc(sizeof(*debug), GFP_KERNEL);
+ if (!debug)
+ goto out;
+
+ /* Round to page boundary */
+ debug->buffer = kmalloc(LPFC_DUMPHBASLIM_SIZE, GFP_KERNEL);
+ if (!debug->buffer) {
+ kfree(debug);
+ goto out;
+ }
+
+ debug->len = lpfc_debugfs_dumpHBASlim_data(phba, debug->buffer,
+ LPFC_DUMPHBASLIM_SIZE);
+ file->private_data = debug;
+
+ rc = 0;
+out:
+ return rc;
+}
+
+/**
+ * lpfc_debugfs_dumpHostSlim_open - Open the Dump Host SLIM debugfs buffer
+ * @inode: The inode pointer that contains a vport pointer.
+ * @file: The file pointer to attach the log output.
+ *
+ * Description:
+ * This routine is the entry point for the debugfs open file operation. It gets
+ * the vport from the i_private field in @inode, allocates the necessary buffer
+ * for the log, fills the buffer from the in-memory log for this vport, and then
+ * returns a pointer to that log in the private_data field in @file.
+ *
+ * Returns:
+ * This function returns zero if successful. On error it will return an negative
+ * error value.
+ **/
+static int
+lpfc_debugfs_dumpHostSlim_open(struct inode *inode, struct file *file)
+{
+ struct lpfc_hba *phba = inode->i_private;
+ struct lpfc_debug *debug;
+ int rc = -ENOMEM;
+
+ debug = kmalloc(sizeof(*debug), GFP_KERNEL);
+ if (!debug)
+ goto out;
+
+ /* Round to page boundary */
+ debug->buffer = kmalloc(LPFC_DUMPHOSTSLIM_SIZE, GFP_KERNEL);
+ if (!debug->buffer) {
+ kfree(debug);
+ goto out;
+ }
+
+ debug->len = lpfc_debugfs_dumpHostSlim_data(phba, debug->buffer,
+ LPFC_DUMPHOSTSLIM_SIZE);
+ file->private_data = debug;
+
+ rc = 0;
+out:
+ return rc;
+}
+
+static int
+lpfc_debugfs_dumpData_open(struct inode *inode, struct file *file)
+{
+ struct lpfc_debug *debug;
+ int rc = -ENOMEM;
+
+ if (!_dump_buf_data)
+ return -EBUSY;
+
+ debug = kmalloc(sizeof(*debug), GFP_KERNEL);
+ if (!debug)
+ goto out;
+
+ /* Round to page boundary */
+ printk(KERN_ERR "9059 BLKGRD: %s: _dump_buf_data=0x%p\n",
+ __func__, _dump_buf_data);
+ debug->buffer = _dump_buf_data;
+ if (!debug->buffer) {
+ kfree(debug);
+ goto out;
+ }
+
+ debug->len = (1 << _dump_buf_data_order) << PAGE_SHIFT;
+ file->private_data = debug;
+
+ rc = 0;
+out:
+ return rc;
+}
+
+static int
+lpfc_debugfs_dumpDif_open(struct inode *inode, struct file *file)
+{
+ struct lpfc_debug *debug;
+ int rc = -ENOMEM;
+
+ if (!_dump_buf_dif)
+ return -EBUSY;
+
+ debug = kmalloc(sizeof(*debug), GFP_KERNEL);
+ if (!debug)
+ goto out;
+
+ /* Round to page boundary */
+ printk(KERN_ERR "9060 BLKGRD: %s: _dump_buf_dif=0x%p file=%pD\n",
+ __func__, _dump_buf_dif, file);
+ debug->buffer = _dump_buf_dif;
+ if (!debug->buffer) {
+ kfree(debug);
+ goto out;
+ }
+
+ debug->len = (1 << _dump_buf_dif_order) << PAGE_SHIFT;
+ file->private_data = debug;
+
+ rc = 0;
+out:
+ return rc;
+}
+
+static ssize_t
+lpfc_debugfs_dumpDataDif_write(struct file *file, const char __user *buf,
+ size_t nbytes, loff_t *ppos)
+{
+ /*
+ * The Data/DIF buffers only save one failing IO
+ * The write op is used as a reset mechanism after an IO has
+ * already been saved to the next one can be saved
+ */
+ spin_lock(&_dump_buf_lock);
+
+ memset((void *)_dump_buf_data, 0,
+ ((1 << PAGE_SHIFT) << _dump_buf_data_order));
+ memset((void *)_dump_buf_dif, 0,
+ ((1 << PAGE_SHIFT) << _dump_buf_dif_order));
+
+ _dump_buf_done = 0;
+
+ spin_unlock(&_dump_buf_lock);
+
+ return nbytes;
+}
+
+static ssize_t
+lpfc_debugfs_dif_err_read(struct file *file, char __user *buf,
+ size_t nbytes, loff_t *ppos)
+{
+ struct dentry *dent = file->f_path.dentry;
+ struct lpfc_hba *phba = file->private_data;
+ char cbuf[32];
+ uint64_t tmp = 0;
+ int cnt = 0;
+
+ if (dent == phba->debug_writeGuard)
+ cnt = snprintf(cbuf, 32, "%u\n", phba->lpfc_injerr_wgrd_cnt);
+ else if (dent == phba->debug_writeApp)
+ cnt = snprintf(cbuf, 32, "%u\n", phba->lpfc_injerr_wapp_cnt);
+ else if (dent == phba->debug_writeRef)
+ cnt = snprintf(cbuf, 32, "%u\n", phba->lpfc_injerr_wref_cnt);
+ else if (dent == phba->debug_readGuard)
+ cnt = snprintf(cbuf, 32, "%u\n", phba->lpfc_injerr_rgrd_cnt);
+ else if (dent == phba->debug_readApp)
+ cnt = snprintf(cbuf, 32, "%u\n", phba->lpfc_injerr_rapp_cnt);
+ else if (dent == phba->debug_readRef)
+ cnt = snprintf(cbuf, 32, "%u\n", phba->lpfc_injerr_rref_cnt);
+ else if (dent == phba->debug_InjErrNPortID)
+ cnt = snprintf(cbuf, 32, "0x%06x\n", phba->lpfc_injerr_nportid);
+ else if (dent == phba->debug_InjErrWWPN) {
+ memcpy(&tmp, &phba->lpfc_injerr_wwpn, sizeof(struct lpfc_name));
+ tmp = cpu_to_be64(tmp);
+ cnt = snprintf(cbuf, 32, "0x%016llx\n", tmp);
+ } else if (dent == phba->debug_InjErrLBA) {
+ if (phba->lpfc_injerr_lba == (sector_t)(-1))
+ cnt = snprintf(cbuf, 32, "off\n");
+ else
+ cnt = snprintf(cbuf, 32, "0x%llx\n",
+ (uint64_t) phba->lpfc_injerr_lba);
+ } else
+ lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+ "0547 Unknown debugfs error injection entry\n");
+
+ return simple_read_from_buffer(buf, nbytes, ppos, &cbuf, cnt);
+}
+
+static ssize_t
+lpfc_debugfs_dif_err_write(struct file *file, const char __user *buf,
+ size_t nbytes, loff_t *ppos)
+{
+ struct dentry *dent = file->f_path.dentry;
+ struct lpfc_hba *phba = file->private_data;
+ char dstbuf[32];
+ uint64_t tmp = 0;
+ int size;
+
+ memset(dstbuf, 0, 32);
+ size = (nbytes < 32) ? nbytes : 32;
+ if (copy_from_user(dstbuf, buf, size))
+ return 0;
+
+ if (dent == phba->debug_InjErrLBA) {
+ if ((buf[0] == 'o') && (buf[1] == 'f') && (buf[2] == 'f'))
+ tmp = (uint64_t)(-1);
+ }
+
+ if ((tmp == 0) && (kstrtoull(dstbuf, 0, &tmp)))
+ return 0;
+
+ if (dent == phba->debug_writeGuard)
+ phba->lpfc_injerr_wgrd_cnt = (uint32_t)tmp;
+ else if (dent == phba->debug_writeApp)
+ phba->lpfc_injerr_wapp_cnt = (uint32_t)tmp;
+ else if (dent == phba->debug_writeRef)
+ phba->lpfc_injerr_wref_cnt = (uint32_t)tmp;
+ else if (dent == phba->debug_readGuard)
+ phba->lpfc_injerr_rgrd_cnt = (uint32_t)tmp;
+ else if (dent == phba->debug_readApp)
+ phba->lpfc_injerr_rapp_cnt = (uint32_t)tmp;
+ else if (dent == phba->debug_readRef)
+ phba->lpfc_injerr_rref_cnt = (uint32_t)tmp;
+ else if (dent == phba->debug_InjErrLBA)
+ phba->lpfc_injerr_lba = (sector_t)tmp;
+ else if (dent == phba->debug_InjErrNPortID)
+ phba->lpfc_injerr_nportid = (uint32_t)(tmp & Mask_DID);
+ else if (dent == phba->debug_InjErrWWPN) {
+ tmp = cpu_to_be64(tmp);
+ memcpy(&phba->lpfc_injerr_wwpn, &tmp, sizeof(struct lpfc_name));
+ } else
+ lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+ "0548 Unknown debugfs error injection entry\n");
+
+ return nbytes;
+}
+
+static int
+lpfc_debugfs_dif_err_release(struct inode *inode, struct file *file)
+{
+ return 0;
+}
+
+/**
+ * lpfc_debugfs_nodelist_open - Open the nodelist debugfs file
+ * @inode: The inode pointer that contains a vport pointer.
+ * @file: The file pointer to attach the log output.
+ *
+ * Description:
+ * This routine is the entry point for the debugfs open file operation. It gets
+ * the vport from the i_private field in @inode, allocates the necessary buffer
+ * for the log, fills the buffer from the in-memory log for this vport, and then
+ * returns a pointer to that log in the private_data field in @file.
+ *
+ * Returns:
+ * This function returns zero if successful. On error it will return an negative
+ * error value.
+ **/
+static int
+lpfc_debugfs_nodelist_open(struct inode *inode, struct file *file)
+{
+ struct lpfc_vport *vport = inode->i_private;
+ struct lpfc_debug *debug;
+ int rc = -ENOMEM;
+
+ debug = kmalloc(sizeof(*debug), GFP_KERNEL);
+ if (!debug)
+ goto out;
+
+ /* Round to page boundary */
+ debug->buffer = kmalloc(LPFC_NODELIST_SIZE, GFP_KERNEL);
+ if (!debug->buffer) {
+ kfree(debug);
+ goto out;
+ }
+
+ debug->len = lpfc_debugfs_nodelist_data(vport, debug->buffer,
+ LPFC_NODELIST_SIZE);
+ file->private_data = debug;
+
+ rc = 0;
+out:
+ return rc;
+}
+
+/**
+ * lpfc_debugfs_lseek - Seek through a debugfs file
+ * @file: The file pointer to seek through.
+ * @off: The offset to seek to or the amount to seek by.
+ * @whence: Indicates how to seek.
+ *
+ * Description:
+ * This routine is the entry point for the debugfs lseek file operation. The
+ * @whence parameter indicates whether @off is the offset to directly seek to,
+ * or if it is a value to seek forward or reverse by. This function figures out
+ * what the new offset of the debugfs file will be and assigns that value to the
+ * f_pos field of @file.
+ *
+ * Returns:
+ * This function returns the new offset if successful and returns a negative
+ * error if unable to process the seek.
+ **/
+static loff_t
+lpfc_debugfs_lseek(struct file *file, loff_t off, int whence)
+{
+ struct lpfc_debug *debug = file->private_data;
+ return fixed_size_llseek(file, off, whence, debug->len);
+}
+
+/**
+ * lpfc_debugfs_read - Read a debugfs file
+ * @file: The file pointer to read from.
+ * @buf: The buffer to copy the data to.
+ * @nbytes: The number of bytes to read.
+ * @ppos: The position in the file to start reading from.
+ *
+ * Description:
+ * This routine reads data from from the buffer indicated in the private_data
+ * field of @file. It will start reading at @ppos and copy up to @nbytes of
+ * data to @buf.
+ *
+ * Returns:
+ * This function returns the amount of data that was read (this could be less
+ * than @nbytes if the end of the file was reached) or a negative error value.
+ **/
+static ssize_t
+lpfc_debugfs_read(struct file *file, char __user *buf,
+ size_t nbytes, loff_t *ppos)
+{
+ struct lpfc_debug *debug = file->private_data;
+
+ return simple_read_from_buffer(buf, nbytes, ppos, debug->buffer,
+ debug->len);
+}
+
+/**
+ * lpfc_debugfs_release - Release the buffer used to store debugfs file data
+ * @inode: The inode pointer that contains a vport pointer. (unused)
+ * @file: The file pointer that contains the buffer to release.
+ *
+ * Description:
+ * This routine frees the buffer that was allocated when the debugfs file was
+ * opened.
+ *
+ * Returns:
+ * This function returns zero.
+ **/
+static int
+lpfc_debugfs_release(struct inode *inode, struct file *file)
+{
+ struct lpfc_debug *debug = file->private_data;
+
+ kfree(debug->buffer);
+ kfree(debug);
+
+ return 0;
+}
+
+static int
+lpfc_debugfs_dumpDataDif_release(struct inode *inode, struct file *file)
+{
+ struct lpfc_debug *debug = file->private_data;
+
+ debug->buffer = NULL;
+ kfree(debug);
+
+ return 0;
+}
+
+/*
+ * ---------------------------------
+ * iDiag debugfs file access methods
+ * ---------------------------------
+ *
+ * All access methods are through the proper SLI4 PCI function's debugfs
+ * iDiag directory:
+ *
+ * /sys/kernel/debug/lpfc/fn<#>/iDiag
+ */
+
+/**
+ * lpfc_idiag_cmd_get - Get and parse idiag debugfs comands from user space
+ * @buf: The pointer to the user space buffer.
+ * @nbytes: The number of bytes in the user space buffer.
+ * @idiag_cmd: pointer to the idiag command struct.
+ *
+ * This routine reads data from debugfs user space buffer and parses the
+ * buffer for getting the idiag command and arguments. The while space in
+ * between the set of data is used as the parsing separator.
+ *
+ * This routine returns 0 when successful, it returns proper error code
+ * back to the user space in error conditions.
+ */
+static int lpfc_idiag_cmd_get(const char __user *buf, size_t nbytes,
+ struct lpfc_idiag_cmd *idiag_cmd)
+{
+ char mybuf[64];
+ char *pbuf, *step_str;
+ int i;
+ size_t bsize;
+
+ /* Protect copy from user */
+ if (!access_ok(VERIFY_READ, buf, nbytes))
+ return -EFAULT;
+
+ memset(mybuf, 0, sizeof(mybuf));
+ memset(idiag_cmd, 0, sizeof(*idiag_cmd));
+ bsize = min(nbytes, (sizeof(mybuf)-1));
+
+ if (copy_from_user(mybuf, buf, bsize))
+ return -EFAULT;
+ pbuf = &mybuf[0];
+ step_str = strsep(&pbuf, "\t ");
+
+ /* The opcode must present */
+ if (!step_str)
+ return -EINVAL;
+
+ idiag_cmd->opcode = simple_strtol(step_str, NULL, 0);
+ if (idiag_cmd->opcode == 0)
+ return -EINVAL;
+
+ for (i = 0; i < LPFC_IDIAG_CMD_DATA_SIZE; i++) {
+ step_str = strsep(&pbuf, "\t ");
+ if (!step_str)
+ return i;
+ idiag_cmd->data[i] = simple_strtol(step_str, NULL, 0);
+ }
+ return i;
+}
+
+/**
+ * lpfc_idiag_open - idiag open debugfs
+ * @inode: The inode pointer that contains a pointer to phba.
+ * @file: The file pointer to attach the file operation.
+ *
+ * Description:
+ * This routine is the entry point for the debugfs open file operation. It
+ * gets the reference to phba from the i_private field in @inode, it then
+ * allocates buffer for the file operation, performs the necessary PCI config
+ * space read into the allocated buffer according to the idiag user command
+ * setup, and then returns a pointer to buffer in the private_data field in
+ * @file.
+ *
+ * Returns:
+ * This function returns zero if successful. On error it will return an
+ * negative error value.
+ **/
+static int
+lpfc_idiag_open(struct inode *inode, struct file *file)
+{
+ struct lpfc_debug *debug;
+
+ debug = kmalloc(sizeof(*debug), GFP_KERNEL);
+ if (!debug)
+ return -ENOMEM;
+
+ debug->i_private = inode->i_private;
+ debug->buffer = NULL;
+ file->private_data = debug;
+
+ return 0;
+}
+
+/**
+ * lpfc_idiag_release - Release idiag access file operation
+ * @inode: The inode pointer that contains a vport pointer. (unused)
+ * @file: The file pointer that contains the buffer to release.
+ *
+ * Description:
+ * This routine is the generic release routine for the idiag access file
+ * operation, it frees the buffer that was allocated when the debugfs file
+ * was opened.
+ *
+ * Returns:
+ * This function returns zero.
+ **/
+static int
+lpfc_idiag_release(struct inode *inode, struct file *file)
+{
+ struct lpfc_debug *debug = file->private_data;
+
+ /* Free the buffers to the file operation */
+ kfree(debug->buffer);
+ kfree(debug);
+
+ return 0;
+}
+
+/**
+ * lpfc_idiag_cmd_release - Release idiag cmd access file operation
+ * @inode: The inode pointer that contains a vport pointer. (unused)
+ * @file: The file pointer that contains the buffer to release.
+ *
+ * Description:
+ * This routine frees the buffer that was allocated when the debugfs file
+ * was opened. It also reset the fields in the idiag command struct in the
+ * case of command for write operation.
+ *
+ * Returns:
+ * This function returns zero.
+ **/
+static int
+lpfc_idiag_cmd_release(struct inode *inode, struct file *file)
+{
+ struct lpfc_debug *debug = file->private_data;
+
+ if (debug->op == LPFC_IDIAG_OP_WR) {
+ switch (idiag.cmd.opcode) {
+ case LPFC_IDIAG_CMD_PCICFG_WR:
+ case LPFC_IDIAG_CMD_PCICFG_ST:
+ case LPFC_IDIAG_CMD_PCICFG_CL:
+ case LPFC_IDIAG_CMD_QUEACC_WR:
+ case LPFC_IDIAG_CMD_QUEACC_ST:
+ case LPFC_IDIAG_CMD_QUEACC_CL:
+ memset(&idiag, 0, sizeof(idiag));
+ break;
+ default:
+ break;
+ }
+ }
+
+ /* Free the buffers to the file operation */
+ kfree(debug->buffer);
+ kfree(debug);
+
+ return 0;
+}
+
+/**
+ * lpfc_idiag_pcicfg_read - idiag debugfs read pcicfg
+ * @file: The file pointer to read from.
+ * @buf: The buffer to copy the data to.
+ * @nbytes: The number of bytes to read.
+ * @ppos: The position in the file to start reading from.
+ *
+ * Description:
+ * This routine reads data from the @phba pci config space according to the
+ * idiag command, and copies to user @buf. Depending on the PCI config space
+ * read command setup, it does either a single register read of a byte
+ * (8 bits), a word (16 bits), or a dword (32 bits) or browsing through all
+ * registers from the 4K extended PCI config space.
+ *
+ * Returns:
+ * This function returns the amount of data that was read (this could be less
+ * than @nbytes if the end of the file was reached) or a negative error value.
+ **/
+static ssize_t
+lpfc_idiag_pcicfg_read(struct file *file, char __user *buf, size_t nbytes,
+ loff_t *ppos)
+{
+ struct lpfc_debug *debug = file->private_data;
+ struct lpfc_hba *phba = (struct lpfc_hba *)debug->i_private;
+ int offset_label, offset, len = 0, index = LPFC_PCI_CFG_RD_SIZE;
+ int where, count;
+ char *pbuffer;
+ struct pci_dev *pdev;
+ uint32_t u32val;
+ uint16_t u16val;
+ uint8_t u8val;
+
+ pdev = phba->pcidev;
+ if (!pdev)
+ return 0;
+
+ /* This is a user read operation */
+ debug->op = LPFC_IDIAG_OP_RD;
+
+ if (!debug->buffer)
+ debug->buffer = kmalloc(LPFC_PCI_CFG_SIZE, GFP_KERNEL);
+ if (!debug->buffer)
+ return 0;
+ pbuffer = debug->buffer;
+
+ if (*ppos)
+ return 0;
+
+ if (idiag.cmd.opcode == LPFC_IDIAG_CMD_PCICFG_RD) {
+ where = idiag.cmd.data[IDIAG_PCICFG_WHERE_INDX];
+ count = idiag.cmd.data[IDIAG_PCICFG_COUNT_INDX];
+ } else
+ return 0;
+
+ /* Read single PCI config space register */
+ switch (count) {
+ case SIZE_U8: /* byte (8 bits) */
+ pci_read_config_byte(pdev, where, &u8val);
+ len += snprintf(pbuffer+len, LPFC_PCI_CFG_SIZE-len,
+ "%03x: %02x\n", where, u8val);
+ break;
+ case SIZE_U16: /* word (16 bits) */
+ pci_read_config_word(pdev, where, &u16val);
+ len += snprintf(pbuffer+len, LPFC_PCI_CFG_SIZE-len,
+ "%03x: %04x\n", where, u16val);
+ break;
+ case SIZE_U32: /* double word (32 bits) */
+ pci_read_config_dword(pdev, where, &u32val);
+ len += snprintf(pbuffer+len, LPFC_PCI_CFG_SIZE-len,
+ "%03x: %08x\n", where, u32val);
+ break;
+ case LPFC_PCI_CFG_BROWSE: /* browse all */
+ goto pcicfg_browse;
+ break;
+ default:
+ /* illegal count */
+ len = 0;
+ break;
+ }
+ return simple_read_from_buffer(buf, nbytes, ppos, pbuffer, len);
+
+pcicfg_browse:
+
+ /* Browse all PCI config space registers */
+ offset_label = idiag.offset.last_rd;
+ offset = offset_label;
+
+ /* Read PCI config space */
+ len += snprintf(pbuffer+len, LPFC_PCI_CFG_SIZE-len,
+ "%03x: ", offset_label);
+ while (index > 0) {
+ pci_read_config_dword(pdev, offset, &u32val);
+ len += snprintf(pbuffer+len, LPFC_PCI_CFG_SIZE-len,
+ "%08x ", u32val);
+ offset += sizeof(uint32_t);
+ if (offset >= LPFC_PCI_CFG_SIZE) {
+ len += snprintf(pbuffer+len,
+ LPFC_PCI_CFG_SIZE-len, "\n");
+ break;
+ }
+ index -= sizeof(uint32_t);
+ if (!index)
+ len += snprintf(pbuffer+len, LPFC_PCI_CFG_SIZE-len,
+ "\n");
+ else if (!(index % (8 * sizeof(uint32_t)))) {
+ offset_label += (8 * sizeof(uint32_t));
+ len += snprintf(pbuffer+len, LPFC_PCI_CFG_SIZE-len,
+ "\n%03x: ", offset_label);
+ }
+ }
+
+ /* Set up the offset for next portion of pci cfg read */
+ if (index == 0) {
+ idiag.offset.last_rd += LPFC_PCI_CFG_RD_SIZE;
+ if (idiag.offset.last_rd >= LPFC_PCI_CFG_SIZE)
+ idiag.offset.last_rd = 0;
+ } else
+ idiag.offset.last_rd = 0;
+
+ return simple_read_from_buffer(buf, nbytes, ppos, pbuffer, len);
+}
+
+/**
+ * lpfc_idiag_pcicfg_write - Syntax check and set up idiag pcicfg commands
+ * @file: The file pointer to read from.
+ * @buf: The buffer to copy the user data from.
+ * @nbytes: The number of bytes to get.
+ * @ppos: The position in the file to start reading from.
+ *
+ * This routine get the debugfs idiag command struct from user space and
+ * then perform the syntax check for PCI config space read or write command
+ * accordingly. In the case of PCI config space read command, it sets up
+ * the command in the idiag command struct for the debugfs read operation.
+ * In the case of PCI config space write operation, it executes the write
+ * operation into the PCI config space accordingly.
+ *
+ * It returns the @nbytges passing in from debugfs user space when successful.
+ * In case of error conditions, it returns proper error code back to the user
+ * space.
+ */
+static ssize_t
+lpfc_idiag_pcicfg_write(struct file *file, const char __user *buf,
+ size_t nbytes, loff_t *ppos)
+{
+ struct lpfc_debug *debug = file->private_data;
+ struct lpfc_hba *phba = (struct lpfc_hba *)debug->i_private;
+ uint32_t where, value, count;
+ uint32_t u32val;
+ uint16_t u16val;
+ uint8_t u8val;
+ struct pci_dev *pdev;
+ int rc;
+
+ pdev = phba->pcidev;
+ if (!pdev)
+ return -EFAULT;
+
+ /* This is a user write operation */
+ debug->op = LPFC_IDIAG_OP_WR;
+
+ rc = lpfc_idiag_cmd_get(buf, nbytes, &idiag.cmd);
+ if (rc < 0)
+ return rc;
+
+ if (idiag.cmd.opcode == LPFC_IDIAG_CMD_PCICFG_RD) {
+ /* Sanity check on PCI config read command line arguments */
+ if (rc != LPFC_PCI_CFG_RD_CMD_ARG)
+ goto error_out;
+ /* Read command from PCI config space, set up command fields */
+ where = idiag.cmd.data[IDIAG_PCICFG_WHERE_INDX];
+ count = idiag.cmd.data[IDIAG_PCICFG_COUNT_INDX];
+ if (count == LPFC_PCI_CFG_BROWSE) {
+ if (where % sizeof(uint32_t))
+ goto error_out;
+ /* Starting offset to browse */
+ idiag.offset.last_rd = where;
+ } else if ((count != sizeof(uint8_t)) &&
+ (count != sizeof(uint16_t)) &&
+ (count != sizeof(uint32_t)))
+ goto error_out;
+ if (count == sizeof(uint8_t)) {
+ if (where > LPFC_PCI_CFG_SIZE - sizeof(uint8_t))
+ goto error_out;
+ if (where % sizeof(uint8_t))
+ goto error_out;
+ }
+ if (count == sizeof(uint16_t)) {
+ if (where > LPFC_PCI_CFG_SIZE - sizeof(uint16_t))
+ goto error_out;
+ if (where % sizeof(uint16_t))
+ goto error_out;
+ }
+ if (count == sizeof(uint32_t)) {
+ if (where > LPFC_PCI_CFG_SIZE - sizeof(uint32_t))
+ goto error_out;
+ if (where % sizeof(uint32_t))
+ goto error_out;
+ }
+ } else if (idiag.cmd.opcode == LPFC_IDIAG_CMD_PCICFG_WR ||
+ idiag.cmd.opcode == LPFC_IDIAG_CMD_PCICFG_ST ||
+ idiag.cmd.opcode == LPFC_IDIAG_CMD_PCICFG_CL) {
+ /* Sanity check on PCI config write command line arguments */
+ if (rc != LPFC_PCI_CFG_WR_CMD_ARG)
+ goto error_out;
+ /* Write command to PCI config space, read-modify-write */
+ where = idiag.cmd.data[IDIAG_PCICFG_WHERE_INDX];
+ count = idiag.cmd.data[IDIAG_PCICFG_COUNT_INDX];
+ value = idiag.cmd.data[IDIAG_PCICFG_VALUE_INDX];
+ /* Sanity checks */
+ if ((count != sizeof(uint8_t)) &&
+ (count != sizeof(uint16_t)) &&
+ (count != sizeof(uint32_t)))
+ goto error_out;
+ if (count == sizeof(uint8_t)) {
+ if (where > LPFC_PCI_CFG_SIZE - sizeof(uint8_t))
+ goto error_out;
+ if (where % sizeof(uint8_t))
+ goto error_out;
+ if (idiag.cmd.opcode == LPFC_IDIAG_CMD_PCICFG_WR)
+ pci_write_config_byte(pdev, where,
+ (uint8_t)value);
+ if (idiag.cmd.opcode == LPFC_IDIAG_CMD_PCICFG_ST) {
+ rc = pci_read_config_byte(pdev, where, &u8val);
+ if (!rc) {
+ u8val |= (uint8_t)value;
+ pci_write_config_byte(pdev, where,
+ u8val);
+ }
+ }
+ if (idiag.cmd.opcode == LPFC_IDIAG_CMD_PCICFG_CL) {
+ rc = pci_read_config_byte(pdev, where, &u8val);
+ if (!rc) {
+ u8val &= (uint8_t)(~value);
+ pci_write_config_byte(pdev, where,
+ u8val);
+ }
+ }
+ }
+ if (count == sizeof(uint16_t)) {
+ if (where > LPFC_PCI_CFG_SIZE - sizeof(uint16_t))
+ goto error_out;
+ if (where % sizeof(uint16_t))
+ goto error_out;
+ if (idiag.cmd.opcode == LPFC_IDIAG_CMD_PCICFG_WR)
+ pci_write_config_word(pdev, where,
+ (uint16_t)value);
+ if (idiag.cmd.opcode == LPFC_IDIAG_CMD_PCICFG_ST) {
+ rc = pci_read_config_word(pdev, where, &u16val);
+ if (!rc) {
+ u16val |= (uint16_t)value;
+ pci_write_config_word(pdev, where,
+ u16val);
+ }
+ }
+ if (idiag.cmd.opcode == LPFC_IDIAG_CMD_PCICFG_CL) {
+ rc = pci_read_config_word(pdev, where, &u16val);
+ if (!rc) {
+ u16val &= (uint16_t)(~value);
+ pci_write_config_word(pdev, where,
+ u16val);
+ }
+ }
+ }
+ if (count == sizeof(uint32_t)) {
+ if (where > LPFC_PCI_CFG_SIZE - sizeof(uint32_t))
+ goto error_out;
+ if (where % sizeof(uint32_t))
+ goto error_out;
+ if (idiag.cmd.opcode == LPFC_IDIAG_CMD_PCICFG_WR)
+ pci_write_config_dword(pdev, where, value);
+ if (idiag.cmd.opcode == LPFC_IDIAG_CMD_PCICFG_ST) {
+ rc = pci_read_config_dword(pdev, where,
+ &u32val);
+ if (!rc) {
+ u32val |= value;
+ pci_write_config_dword(pdev, where,
+ u32val);
+ }
+ }
+ if (idiag.cmd.opcode == LPFC_IDIAG_CMD_PCICFG_CL) {
+ rc = pci_read_config_dword(pdev, where,
+ &u32val);
+ if (!rc) {
+ u32val &= ~value;
+ pci_write_config_dword(pdev, where,
+ u32val);
+ }
+ }
+ }
+ } else
+ /* All other opecodes are illegal for now */
+ goto error_out;
+
+ return nbytes;
+error_out:
+ memset(&idiag, 0, sizeof(idiag));
+ return -EINVAL;
+}
+
+/**
+ * lpfc_idiag_baracc_read - idiag debugfs pci bar access read
+ * @file: The file pointer to read from.
+ * @buf: The buffer to copy the data to.
+ * @nbytes: The number of bytes to read.
+ * @ppos: The position in the file to start reading from.
+ *
+ * Description:
+ * This routine reads data from the @phba pci bar memory mapped space
+ * according to the idiag command, and copies to user @buf.
+ *
+ * Returns:
+ * This function returns the amount of data that was read (this could be less
+ * than @nbytes if the end of the file was reached) or a negative error value.
+ **/
+static ssize_t
+lpfc_idiag_baracc_read(struct file *file, char __user *buf, size_t nbytes,
+ loff_t *ppos)
+{
+ struct lpfc_debug *debug = file->private_data;
+ struct lpfc_hba *phba = (struct lpfc_hba *)debug->i_private;
+ int offset_label, offset, offset_run, len = 0, index;
+ int bar_num, acc_range, bar_size;
+ char *pbuffer;
+ void __iomem *mem_mapped_bar;
+ uint32_t if_type;
+ struct pci_dev *pdev;
+ uint32_t u32val;
+
+ pdev = phba->pcidev;
+ if (!pdev)
+ return 0;
+
+ /* This is a user read operation */
+ debug->op = LPFC_IDIAG_OP_RD;
+
+ if (!debug->buffer)
+ debug->buffer = kmalloc(LPFC_PCI_BAR_RD_BUF_SIZE, GFP_KERNEL);
+ if (!debug->buffer)
+ return 0;
+ pbuffer = debug->buffer;
+
+ if (*ppos)
+ return 0;
+
+ if (idiag.cmd.opcode == LPFC_IDIAG_CMD_BARACC_RD) {
+ bar_num = idiag.cmd.data[IDIAG_BARACC_BAR_NUM_INDX];
+ offset = idiag.cmd.data[IDIAG_BARACC_OFF_SET_INDX];
+ acc_range = idiag.cmd.data[IDIAG_BARACC_ACC_MOD_INDX];
+ bar_size = idiag.cmd.data[IDIAG_BARACC_BAR_SZE_INDX];
+ } else
+ return 0;
+
+ if (acc_range == 0)
+ return 0;
+
+ if_type = bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf);
+ if (if_type == LPFC_SLI_INTF_IF_TYPE_0) {
+ if (bar_num == IDIAG_BARACC_BAR_0)
+ mem_mapped_bar = phba->sli4_hba.conf_regs_memmap_p;
+ else if (bar_num == IDIAG_BARACC_BAR_1)
+ mem_mapped_bar = phba->sli4_hba.ctrl_regs_memmap_p;
+ else if (bar_num == IDIAG_BARACC_BAR_2)
+ mem_mapped_bar = phba->sli4_hba.drbl_regs_memmap_p;
+ else
+ return 0;
+ } else if (if_type == LPFC_SLI_INTF_IF_TYPE_2) {
+ if (bar_num == IDIAG_BARACC_BAR_0)
+ mem_mapped_bar = phba->sli4_hba.conf_regs_memmap_p;
+ else
+ return 0;
+ } else
+ return 0;
+
+ /* Read single PCI bar space register */
+ if (acc_range == SINGLE_WORD) {
+ offset_run = offset;
+ u32val = readl(mem_mapped_bar + offset_run);
+ len += snprintf(pbuffer+len, LPFC_PCI_BAR_RD_BUF_SIZE-len,
+ "%05x: %08x\n", offset_run, u32val);
+ } else
+ goto baracc_browse;
+
+ return simple_read_from_buffer(buf, nbytes, ppos, pbuffer, len);
+
+baracc_browse:
+
+ /* Browse all PCI bar space registers */
+ offset_label = idiag.offset.last_rd;
+ offset_run = offset_label;
+
+ /* Read PCI bar memory mapped space */
+ len += snprintf(pbuffer+len, LPFC_PCI_BAR_RD_BUF_SIZE-len,
+ "%05x: ", offset_label);
+ index = LPFC_PCI_BAR_RD_SIZE;
+ while (index > 0) {
+ u32val = readl(mem_mapped_bar + offset_run);
+ len += snprintf(pbuffer+len, LPFC_PCI_BAR_RD_BUF_SIZE-len,
+ "%08x ", u32val);
+ offset_run += sizeof(uint32_t);
+ if (acc_range == LPFC_PCI_BAR_BROWSE) {
+ if (offset_run >= bar_size) {
+ len += snprintf(pbuffer+len,
+ LPFC_PCI_BAR_RD_BUF_SIZE-len, "\n");
+ break;
+ }
+ } else {
+ if (offset_run >= offset +
+ (acc_range * sizeof(uint32_t))) {
+ len += snprintf(pbuffer+len,
+ LPFC_PCI_BAR_RD_BUF_SIZE-len, "\n");
+ break;
+ }
+ }
+ index -= sizeof(uint32_t);
+ if (!index)
+ len += snprintf(pbuffer+len,
+ LPFC_PCI_BAR_RD_BUF_SIZE-len, "\n");
+ else if (!(index % (8 * sizeof(uint32_t)))) {
+ offset_label += (8 * sizeof(uint32_t));
+ len += snprintf(pbuffer+len,
+ LPFC_PCI_BAR_RD_BUF_SIZE-len,
+ "\n%05x: ", offset_label);
+ }
+ }
+
+ /* Set up the offset for next portion of pci bar read */
+ if (index == 0) {
+ idiag.offset.last_rd += LPFC_PCI_BAR_RD_SIZE;
+ if (acc_range == LPFC_PCI_BAR_BROWSE) {
+ if (idiag.offset.last_rd >= bar_size)
+ idiag.offset.last_rd = 0;
+ } else {
+ if (offset_run >= offset +
+ (acc_range * sizeof(uint32_t)))
+ idiag.offset.last_rd = offset;
+ }
+ } else {
+ if (acc_range == LPFC_PCI_BAR_BROWSE)
+ idiag.offset.last_rd = 0;
+ else
+ idiag.offset.last_rd = offset;
+ }
+
+ return simple_read_from_buffer(buf, nbytes, ppos, pbuffer, len);
+}
+
+/**
+ * lpfc_idiag_baracc_write - Syntax check and set up idiag bar access commands
+ * @file: The file pointer to read from.
+ * @buf: The buffer to copy the user data from.
+ * @nbytes: The number of bytes to get.
+ * @ppos: The position in the file to start reading from.
+ *
+ * This routine get the debugfs idiag command struct from user space and
+ * then perform the syntax check for PCI bar memory mapped space read or
+ * write command accordingly. In the case of PCI bar memory mapped space
+ * read command, it sets up the command in the idiag command struct for
+ * the debugfs read operation. In the case of PCI bar memorpy mapped space
+ * write operation, it executes the write operation into the PCI bar memory
+ * mapped space accordingly.
+ *
+ * It returns the @nbytges passing in from debugfs user space when successful.
+ * In case of error conditions, it returns proper error code back to the user
+ * space.
+ */
+static ssize_t
+lpfc_idiag_baracc_write(struct file *file, const char __user *buf,
+ size_t nbytes, loff_t *ppos)
+{
+ struct lpfc_debug *debug = file->private_data;
+ struct lpfc_hba *phba = (struct lpfc_hba *)debug->i_private;
+ uint32_t bar_num, bar_size, offset, value, acc_range;
+ struct pci_dev *pdev;
+ void __iomem *mem_mapped_bar;
+ uint32_t if_type;
+ uint32_t u32val;
+ int rc;
+
+ pdev = phba->pcidev;
+ if (!pdev)
+ return -EFAULT;
+
+ /* This is a user write operation */
+ debug->op = LPFC_IDIAG_OP_WR;
+
+ rc = lpfc_idiag_cmd_get(buf, nbytes, &idiag.cmd);
+ if (rc < 0)
+ return rc;
+
+ if_type = bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf);
+ bar_num = idiag.cmd.data[IDIAG_BARACC_BAR_NUM_INDX];
+
+ if (if_type == LPFC_SLI_INTF_IF_TYPE_0) {
+ if ((bar_num != IDIAG_BARACC_BAR_0) &&
+ (bar_num != IDIAG_BARACC_BAR_1) &&
+ (bar_num != IDIAG_BARACC_BAR_2))
+ goto error_out;
+ } else if (if_type == LPFC_SLI_INTF_IF_TYPE_2) {
+ if (bar_num != IDIAG_BARACC_BAR_0)
+ goto error_out;
+ } else
+ goto error_out;
+
+ if (if_type == LPFC_SLI_INTF_IF_TYPE_0) {
+ if (bar_num == IDIAG_BARACC_BAR_0) {
+ idiag.cmd.data[IDIAG_BARACC_BAR_SZE_INDX] =
+ LPFC_PCI_IF0_BAR0_SIZE;
+ mem_mapped_bar = phba->sli4_hba.conf_regs_memmap_p;
+ } else if (bar_num == IDIAG_BARACC_BAR_1) {
+ idiag.cmd.data[IDIAG_BARACC_BAR_SZE_INDX] =
+ LPFC_PCI_IF0_BAR1_SIZE;
+ mem_mapped_bar = phba->sli4_hba.ctrl_regs_memmap_p;
+ } else if (bar_num == IDIAG_BARACC_BAR_2) {
+ idiag.cmd.data[IDIAG_BARACC_BAR_SZE_INDX] =
+ LPFC_PCI_IF0_BAR2_SIZE;
+ mem_mapped_bar = phba->sli4_hba.drbl_regs_memmap_p;
+ } else
+ goto error_out;
+ } else if (if_type == LPFC_SLI_INTF_IF_TYPE_2) {
+ if (bar_num == IDIAG_BARACC_BAR_0) {
+ idiag.cmd.data[IDIAG_BARACC_BAR_SZE_INDX] =
+ LPFC_PCI_IF2_BAR0_SIZE;
+ mem_mapped_bar = phba->sli4_hba.conf_regs_memmap_p;
+ } else
+ goto error_out;
+ } else
+ goto error_out;
+
+ offset = idiag.cmd.data[IDIAG_BARACC_OFF_SET_INDX];
+ if (offset % sizeof(uint32_t))
+ goto error_out;
+
+ bar_size = idiag.cmd.data[IDIAG_BARACC_BAR_SZE_INDX];
+ if (idiag.cmd.opcode == LPFC_IDIAG_CMD_BARACC_RD) {
+ /* Sanity check on PCI config read command line arguments */
+ if (rc != LPFC_PCI_BAR_RD_CMD_ARG)
+ goto error_out;
+ acc_range = idiag.cmd.data[IDIAG_BARACC_ACC_MOD_INDX];
+ if (acc_range == LPFC_PCI_BAR_BROWSE) {
+ if (offset > bar_size - sizeof(uint32_t))
+ goto error_out;
+ /* Starting offset to browse */
+ idiag.offset.last_rd = offset;
+ } else if (acc_range > SINGLE_WORD) {
+ if (offset + acc_range * sizeof(uint32_t) > bar_size)
+ goto error_out;
+ /* Starting offset to browse */
+ idiag.offset.last_rd = offset;
+ } else if (acc_range != SINGLE_WORD)
+ goto error_out;
+ } else if (idiag.cmd.opcode == LPFC_IDIAG_CMD_BARACC_WR ||
+ idiag.cmd.opcode == LPFC_IDIAG_CMD_BARACC_ST ||
+ idiag.cmd.opcode == LPFC_IDIAG_CMD_BARACC_CL) {
+ /* Sanity check on PCI bar write command line arguments */
+ if (rc != LPFC_PCI_BAR_WR_CMD_ARG)
+ goto error_out;
+ /* Write command to PCI bar space, read-modify-write */
+ acc_range = SINGLE_WORD;
+ value = idiag.cmd.data[IDIAG_BARACC_REG_VAL_INDX];
+ if (idiag.cmd.opcode == LPFC_IDIAG_CMD_BARACC_WR) {
+ writel(value, mem_mapped_bar + offset);
+ readl(mem_mapped_bar + offset);
+ }
+ if (idiag.cmd.opcode == LPFC_IDIAG_CMD_BARACC_ST) {
+ u32val = readl(mem_mapped_bar + offset);
+ u32val |= value;
+ writel(u32val, mem_mapped_bar + offset);
+ readl(mem_mapped_bar + offset);
+ }
+ if (idiag.cmd.opcode == LPFC_IDIAG_CMD_BARACC_CL) {
+ u32val = readl(mem_mapped_bar + offset);
+ u32val &= ~value;
+ writel(u32val, mem_mapped_bar + offset);
+ readl(mem_mapped_bar + offset);
+ }
+ } else
+ /* All other opecodes are illegal for now */
+ goto error_out;
+
+ return nbytes;
+error_out:
+ memset(&idiag, 0, sizeof(idiag));
+ return -EINVAL;
+}
+
+/**
+ * lpfc_idiag_queinfo_read - idiag debugfs read queue information
+ * @file: The file pointer to read from.
+ * @buf: The buffer to copy the data to.
+ * @nbytes: The number of bytes to read.
+ * @ppos: The position in the file to start reading from.
+ *
+ * Description:
+ * This routine reads data from the @phba SLI4 PCI function queue information,
+ * and copies to user @buf.
+ *
+ * Returns:
+ * This function returns the amount of data that was read (this could be less
+ * than @nbytes if the end of the file was reached) or a negative error value.
+ **/
+static ssize_t
+lpfc_idiag_queinfo_read(struct file *file, char __user *buf, size_t nbytes,
+ loff_t *ppos)
+{
+ struct lpfc_debug *debug = file->private_data;
+ struct lpfc_hba *phba = (struct lpfc_hba *)debug->i_private;
+ int len = 0;
+ char *pbuffer;
+ int x, cnt;
+ int max_cnt;
+ struct lpfc_queue *qp = NULL;
+
+
+ if (!debug->buffer)
+ debug->buffer = kmalloc(LPFC_QUE_INFO_GET_BUF_SIZE, GFP_KERNEL);
+ if (!debug->buffer)
+ return 0;
+ pbuffer = debug->buffer;
+ max_cnt = LPFC_QUE_INFO_GET_BUF_SIZE - 128;
+
+ if (*ppos)
+ return 0;
+
+ spin_lock_irq(&phba->hbalock);
+
+ /* Fast-path event queue */
+ if (phba->sli4_hba.hba_eq && phba->cfg_fcp_io_channel) {
+ cnt = phba->cfg_fcp_io_channel;
+
+ for (x = 0; x < cnt; x++) {
+
+ /* Fast-path EQ */
+ qp = phba->sli4_hba.hba_eq[x];
+ if (!qp)
+ goto proc_cq;
+
+ len += snprintf(pbuffer+len,
+ LPFC_QUE_INFO_GET_BUF_SIZE-len,
+ "\nHBA EQ info: "
+ "EQ-STAT[max:x%x noE:x%x "
+ "bs:x%x proc:x%llx]\n",
+ qp->q_cnt_1, qp->q_cnt_2,
+ qp->q_cnt_3, (unsigned long long)qp->q_cnt_4);
+
+ len += snprintf(pbuffer+len,
+ LPFC_QUE_INFO_GET_BUF_SIZE-len,
+ "EQID[%02d], "
+ "QE-CNT[%04d], QE-SIZE[%04d], "
+ "HOST-IDX[%04d], PORT-IDX[%04d]",
+ qp->queue_id,
+ qp->entry_count,
+ qp->entry_size,
+ qp->host_index,
+ qp->hba_index);
+
+
+ /* Reset max counter */
+ qp->EQ_max_eqe = 0;
+
+ len += snprintf(pbuffer+len,
+ LPFC_QUE_INFO_GET_BUF_SIZE-len, "\n");
+ if (len >= max_cnt)
+ goto too_big;
+proc_cq:
+ /* Fast-path FCP CQ */
+ qp = phba->sli4_hba.fcp_cq[x];
+ len += snprintf(pbuffer+len,
+ LPFC_QUE_INFO_GET_BUF_SIZE-len,
+ "\tFCP CQ info: ");
+ len += snprintf(pbuffer+len,
+ LPFC_QUE_INFO_GET_BUF_SIZE-len,
+ "AssocEQID[%02d]: "
+ "CQ STAT[max:x%x relw:x%x "
+ "xabt:x%x wq:x%llx]\n",
+ qp->assoc_qid,
+ qp->q_cnt_1, qp->q_cnt_2,
+ qp->q_cnt_3, (unsigned long long)qp->q_cnt_4);
+ len += snprintf(pbuffer+len,
+ LPFC_QUE_INFO_GET_BUF_SIZE-len,
+ "\tCQID[%02d], "
+ "QE-CNT[%04d], QE-SIZE[%04d], "
+ "HOST-IDX[%04d], PORT-IDX[%04d]",
+ qp->queue_id, qp->entry_count,
+ qp->entry_size, qp->host_index,
+ qp->hba_index);
+
+
+ /* Reset max counter */
+ qp->CQ_max_cqe = 0;
+
+ len += snprintf(pbuffer+len,
+ LPFC_QUE_INFO_GET_BUF_SIZE-len, "\n");
+ if (len >= max_cnt)
+ goto too_big;
+
+ /* Fast-path FCP WQ */
+ qp = phba->sli4_hba.fcp_wq[x];
+
+ len += snprintf(pbuffer+len,
+ LPFC_QUE_INFO_GET_BUF_SIZE-len,
+ "\t\tFCP WQ info: ");
+ len += snprintf(pbuffer+len,
+ LPFC_QUE_INFO_GET_BUF_SIZE-len,
+ "AssocCQID[%02d]: "
+ "WQ-STAT[oflow:x%x posted:x%llx]\n",
+ qp->assoc_qid,
+ qp->q_cnt_1, (unsigned long long)qp->q_cnt_4);
+ len += snprintf(pbuffer+len,
+ LPFC_QUE_INFO_GET_BUF_SIZE-len,
+ "\t\tWQID[%02d], "
+ "QE-CNT[%04d], QE-SIZE[%04d], "
+ "HOST-IDX[%04d], PORT-IDX[%04d]",
+ qp->queue_id,
+ qp->entry_count,
+ qp->entry_size,
+ qp->host_index,
+ qp->hba_index);
+
+ len += snprintf(pbuffer+len,
+ LPFC_QUE_INFO_GET_BUF_SIZE-len, "\n");
+ if (len >= max_cnt)
+ goto too_big;
+
+ if (x)
+ continue;
+
+ /* Only EQ 0 has slow path CQs configured */
+
+ /* Slow-path mailbox CQ */
+ qp = phba->sli4_hba.mbx_cq;
+ if (qp) {
+ len += snprintf(pbuffer+len,
+ LPFC_QUE_INFO_GET_BUF_SIZE-len,
+ "\tMBX CQ info: ");
+ len += snprintf(pbuffer+len,
+ LPFC_QUE_INFO_GET_BUF_SIZE-len,
+ "AssocEQID[%02d]: "
+ "CQ-STAT[mbox:x%x relw:x%x "
+ "xabt:x%x wq:x%llx]\n",
+ qp->assoc_qid,
+ qp->q_cnt_1, qp->q_cnt_2,
+ qp->q_cnt_3,
+ (unsigned long long)qp->q_cnt_4);
+ len += snprintf(pbuffer+len,
+ LPFC_QUE_INFO_GET_BUF_SIZE-len,
+ "\tCQID[%02d], "
+ "QE-CNT[%04d], QE-SIZE[%04d], "
+ "HOST-IDX[%04d], PORT-IDX[%04d]",
+ qp->queue_id, qp->entry_count,
+ qp->entry_size, qp->host_index,
+ qp->hba_index);
+
+ len += snprintf(pbuffer+len,
+ LPFC_QUE_INFO_GET_BUF_SIZE-len, "\n");
+ if (len >= max_cnt)
+ goto too_big;
+ }
+
+ /* Slow-path MBOX MQ */
+ qp = phba->sli4_hba.mbx_wq;
+ if (qp) {
+ len += snprintf(pbuffer+len,
+ LPFC_QUE_INFO_GET_BUF_SIZE-len,
+ "\t\tMBX MQ info: ");
+ len += snprintf(pbuffer+len,
+ LPFC_QUE_INFO_GET_BUF_SIZE-len,
+ "AssocCQID[%02d]:\n",
+ phba->sli4_hba.mbx_wq->assoc_qid);
+ len += snprintf(pbuffer+len,
+ LPFC_QUE_INFO_GET_BUF_SIZE-len,
+ "\t\tWQID[%02d], "
+ "QE-CNT[%04d], QE-SIZE[%04d], "
+ "HOST-IDX[%04d], PORT-IDX[%04d]",
+ qp->queue_id, qp->entry_count,
+ qp->entry_size, qp->host_index,
+ qp->hba_index);
+
+ len += snprintf(pbuffer+len,
+ LPFC_QUE_INFO_GET_BUF_SIZE-len, "\n");
+ if (len >= max_cnt)
+ goto too_big;
+ }
+
+ /* Slow-path ELS response CQ */
+ qp = phba->sli4_hba.els_cq;
+ if (qp) {
+ len += snprintf(pbuffer+len,
+ LPFC_QUE_INFO_GET_BUF_SIZE-len,
+ "\tELS CQ info: ");
+ len += snprintf(pbuffer+len,
+ LPFC_QUE_INFO_GET_BUF_SIZE-len,
+ "AssocEQID[%02d]: "
+ "CQ-STAT[max:x%x relw:x%x "
+ "xabt:x%x wq:x%llx]\n",
+ qp->assoc_qid,
+ qp->q_cnt_1, qp->q_cnt_2,
+ qp->q_cnt_3,
+ (unsigned long long)qp->q_cnt_4);
+ len += snprintf(pbuffer+len,
+ LPFC_QUE_INFO_GET_BUF_SIZE-len,
+ "\tCQID [%02d], "
+ "QE-CNT[%04d], QE-SIZE[%04d], "
+ "HOST-IDX[%04d], PORT-IDX[%04d]",
+ qp->queue_id, qp->entry_count,
+ qp->entry_size, qp->host_index,
+ qp->hba_index);
+
+ /* Reset max counter */
+ qp->CQ_max_cqe = 0;
+
+ len += snprintf(pbuffer+len,
+ LPFC_QUE_INFO_GET_BUF_SIZE-len, "\n");
+ if (len >= max_cnt)
+ goto too_big;
+ }
+
+ /* Slow-path ELS WQ */
+ qp = phba->sli4_hba.els_wq;
+ if (qp) {
+ len += snprintf(pbuffer+len,
+ LPFC_QUE_INFO_GET_BUF_SIZE-len,
+ "\t\tELS WQ info: ");
+ len += snprintf(pbuffer+len,
+ LPFC_QUE_INFO_GET_BUF_SIZE-len,
+ "AssocCQID[%02d]: "
+ " WQ-STAT[oflow:x%x "
+ "posted:x%llx]\n",
+ qp->assoc_qid,
+ qp->q_cnt_1,
+ (unsigned long long)qp->q_cnt_4);
+ len += snprintf(pbuffer+len,
+ LPFC_QUE_INFO_GET_BUF_SIZE-len,
+ "\t\tWQID[%02d], "
+ "QE-CNT[%04d], QE-SIZE[%04d], "
+ "HOST-IDX[%04d], PORT-IDX[%04d]",
+ qp->queue_id, qp->entry_count,
+ qp->entry_size, qp->host_index,
+ qp->hba_index);
+
+ len += snprintf(pbuffer+len,
+ LPFC_QUE_INFO_GET_BUF_SIZE-len, "\n");
+ if (len >= max_cnt)
+ goto too_big;
+ }
+
+ if (phba->sli4_hba.hdr_rq && phba->sli4_hba.dat_rq) {
+ /* Slow-path RQ header */
+ qp = phba->sli4_hba.hdr_rq;
+
+ len += snprintf(pbuffer+len,
+ LPFC_QUE_INFO_GET_BUF_SIZE-len,
+ "\t\tRQ info: ");
+ len += snprintf(pbuffer+len,
+ LPFC_QUE_INFO_GET_BUF_SIZE-len,
+ "AssocCQID[%02d]: "
+ "RQ-STAT[nopost:x%x nobuf:x%x "
+ "trunc:x%x rcv:x%llx]\n",
+ qp->assoc_qid,
+ qp->q_cnt_1, qp->q_cnt_2,
+ qp->q_cnt_3,
+ (unsigned long long)qp->q_cnt_4);
+ len += snprintf(pbuffer+len,
+ LPFC_QUE_INFO_GET_BUF_SIZE-len,
+ "\t\tHQID[%02d], "
+ "QE-CNT[%04d], QE-SIZE[%04d], "
+ "HOST-IDX[%04d], PORT-IDX[%04d]\n",
+ qp->queue_id,
+ qp->entry_count,
+ qp->entry_size,
+ qp->host_index,
+ qp->hba_index);
+
+ /* Slow-path RQ data */
+ qp = phba->sli4_hba.dat_rq;
+ len += snprintf(pbuffer+len,
+ LPFC_QUE_INFO_GET_BUF_SIZE-len,
+ "\t\tDQID[%02d], "
+ "QE-CNT[%04d], QE-SIZE[%04d], "
+ "HOST-IDX[%04d], PORT-IDX[%04d]\n",
+ qp->queue_id,
+ qp->entry_count,
+ qp->entry_size,
+ qp->host_index,
+ qp->hba_index);
+
+ len += snprintf(pbuffer+len,
+ LPFC_QUE_INFO_GET_BUF_SIZE-len, "\n");
+ }
+ }
+ }
+
+ if (phba->cfg_fof) {
+ /* FOF EQ */
+ qp = phba->sli4_hba.fof_eq;
+ if (!qp)
+ goto out;
+
+ len += snprintf(pbuffer+len,
+ LPFC_QUE_INFO_GET_BUF_SIZE-len,
+ "\nFOF EQ info: "
+ "EQ-STAT[max:x%x noE:x%x "
+ "bs:x%x proc:x%llx]\n",
+ qp->q_cnt_1, qp->q_cnt_2,
+ qp->q_cnt_3, (unsigned long long)qp->q_cnt_4);
+
+ len += snprintf(pbuffer+len,
+ LPFC_QUE_INFO_GET_BUF_SIZE-len,
+ "EQID[%02d], "
+ "QE-CNT[%04d], QE-SIZE[%04d], "
+ "HOST-IDX[%04d], PORT-IDX[%04d]",
+ qp->queue_id,
+ qp->entry_count,
+ qp->entry_size,
+ qp->host_index,
+ qp->hba_index);
+
+ /* Reset max counter */
+ qp->EQ_max_eqe = 0;
+
+ len += snprintf(pbuffer+len,
+ LPFC_QUE_INFO_GET_BUF_SIZE-len, "\n");
+ if (len >= max_cnt)
+ goto too_big;
+ }
+
+ if (phba->cfg_fof) {
+
+ /* OAS CQ */
+ qp = phba->sli4_hba.oas_cq;
+ if (qp) {
+ len += snprintf(pbuffer+len,
+ LPFC_QUE_INFO_GET_BUF_SIZE-len,
+ "\tOAS CQ info: ");
+ len += snprintf(pbuffer+len,
+ LPFC_QUE_INFO_GET_BUF_SIZE-len,
+ "AssocEQID[%02d]: "
+ "CQ STAT[max:x%x relw:x%x "
+ "xabt:x%x wq:x%llx]\n",
+ qp->assoc_qid,
+ qp->q_cnt_1, qp->q_cnt_2,
+ qp->q_cnt_3, (unsigned long long)qp->q_cnt_4);
+ len += snprintf(pbuffer+len,
+ LPFC_QUE_INFO_GET_BUF_SIZE-len,
+ "\tCQID[%02d], "
+ "QE-CNT[%04d], QE-SIZE[%04d], "
+ "HOST-IDX[%04d], PORT-IDX[%04d]",
+ qp->queue_id, qp->entry_count,
+ qp->entry_size, qp->host_index,
+ qp->hba_index);
+
+ /* Reset max counter */
+ qp->CQ_max_cqe = 0;
+
+ len += snprintf(pbuffer+len,
+ LPFC_QUE_INFO_GET_BUF_SIZE-len, "\n");
+ if (len >= max_cnt)
+ goto too_big;
+ }
+
+ /* OAS WQ */
+ qp = phba->sli4_hba.oas_wq;
+ if (qp) {
+ len += snprintf(pbuffer+len,
+ LPFC_QUE_INFO_GET_BUF_SIZE-len,
+ "\t\tOAS WQ info: ");
+ len += snprintf(pbuffer+len,
+ LPFC_QUE_INFO_GET_BUF_SIZE-len,
+ "AssocCQID[%02d]: "
+ "WQ-STAT[oflow:x%x posted:x%llx]\n",
+ qp->assoc_qid,
+ qp->q_cnt_1, (unsigned long long)qp->q_cnt_4);
+ len += snprintf(pbuffer+len,
+ LPFC_QUE_INFO_GET_BUF_SIZE-len,
+ "\t\tWQID[%02d], "
+ "QE-CNT[%04d], QE-SIZE[%04d], "
+ "HOST-IDX[%04d], PORT-IDX[%04d]",
+ qp->queue_id,
+ qp->entry_count,
+ qp->entry_size,
+ qp->host_index,
+ qp->hba_index);
+
+ len += snprintf(pbuffer+len,
+ LPFC_QUE_INFO_GET_BUF_SIZE-len, "\n");
+ if (len >= max_cnt)
+ goto too_big;
+ }
+ }
+out:
+ spin_unlock_irq(&phba->hbalock);
+ return simple_read_from_buffer(buf, nbytes, ppos, pbuffer, len);
+
+too_big:
+ len += snprintf(pbuffer+len,
+ LPFC_QUE_INFO_GET_BUF_SIZE-len, "Truncated ...\n");
+ spin_unlock_irq(&phba->hbalock);
+ return simple_read_from_buffer(buf, nbytes, ppos, pbuffer, len);
+}
+
+/**
+ * lpfc_idiag_que_param_check - queue access command parameter sanity check
+ * @q: The pointer to queue structure.
+ * @index: The index into a queue entry.
+ * @count: The number of queue entries to access.
+ *
+ * Description:
+ * The routine performs sanity check on device queue access method commands.
+ *
+ * Returns:
+ * This function returns -EINVAL when fails the sanity check, otherwise, it
+ * returns 0.
+ **/
+static int
+lpfc_idiag_que_param_check(struct lpfc_queue *q, int index, int count)
+{
+ /* Only support single entry read or browsing */
+ if ((count != 1) && (count != LPFC_QUE_ACC_BROWSE))
+ return -EINVAL;
+ if (index > q->entry_count - 1)
+ return -EINVAL;
+ return 0;
+}
+
+/**
+ * lpfc_idiag_queacc_read_qe - read a single entry from the given queue index
+ * @pbuffer: The pointer to buffer to copy the read data into.
+ * @pque: The pointer to the queue to be read.
+ * @index: The index into the queue entry.
+ *
+ * Description:
+ * This routine reads out a single entry from the given queue's index location
+ * and copies it into the buffer provided.
+ *
+ * Returns:
+ * This function returns 0 when it fails, otherwise, it returns the length of
+ * the data read into the buffer provided.
+ **/
+static int
+lpfc_idiag_queacc_read_qe(char *pbuffer, int len, struct lpfc_queue *pque,
+ uint32_t index)
+{
+ int offset, esize;
+ uint32_t *pentry;
+
+ if (!pbuffer || !pque)
+ return 0;
+
+ esize = pque->entry_size;
+ len += snprintf(pbuffer+len, LPFC_QUE_ACC_BUF_SIZE-len,
+ "QE-INDEX[%04d]:\n", index);
+
+ offset = 0;
+ pentry = pque->qe[index].address;
+ while (esize > 0) {
+ len += snprintf(pbuffer+len, LPFC_QUE_ACC_BUF_SIZE-len,
+ "%08x ", *pentry);
+ pentry++;
+ offset += sizeof(uint32_t);
+ esize -= sizeof(uint32_t);
+ if (esize > 0 && !(offset % (4 * sizeof(uint32_t))))
+ len += snprintf(pbuffer+len,
+ LPFC_QUE_ACC_BUF_SIZE-len, "\n");
+ }
+ len += snprintf(pbuffer+len, LPFC_QUE_ACC_BUF_SIZE-len, "\n");
+
+ return len;
+}
+
+/**
+ * lpfc_idiag_queacc_read - idiag debugfs read port queue
+ * @file: The file pointer to read from.
+ * @buf: The buffer to copy the data to.
+ * @nbytes: The number of bytes to read.
+ * @ppos: The position in the file to start reading from.
+ *
+ * Description:
+ * This routine reads data from the @phba device queue memory according to the
+ * idiag command, and copies to user @buf. Depending on the queue dump read
+ * command setup, it does either a single queue entry read or browing through
+ * all entries of the queue.
+ *
+ * Returns:
+ * This function returns the amount of data that was read (this could be less
+ * than @nbytes if the end of the file was reached) or a negative error value.
+ **/
+static ssize_t
+lpfc_idiag_queacc_read(struct file *file, char __user *buf, size_t nbytes,
+ loff_t *ppos)
+{
+ struct lpfc_debug *debug = file->private_data;
+ uint32_t last_index, index, count;
+ struct lpfc_queue *pque = NULL;
+ char *pbuffer;
+ int len = 0;
+
+ /* This is a user read operation */
+ debug->op = LPFC_IDIAG_OP_RD;
+
+ if (!debug->buffer)
+ debug->buffer = kmalloc(LPFC_QUE_ACC_BUF_SIZE, GFP_KERNEL);
+ if (!debug->buffer)
+ return 0;
+ pbuffer = debug->buffer;
+
+ if (*ppos)
+ return 0;
+
+ if (idiag.cmd.opcode == LPFC_IDIAG_CMD_QUEACC_RD) {
+ index = idiag.cmd.data[IDIAG_QUEACC_INDEX_INDX];
+ count = idiag.cmd.data[IDIAG_QUEACC_COUNT_INDX];
+ pque = (struct lpfc_queue *)idiag.ptr_private;
+ } else
+ return 0;
+
+ /* Browse the queue starting from index */
+ if (count == LPFC_QUE_ACC_BROWSE)
+ goto que_browse;
+
+ /* Read a single entry from the queue */
+ len = lpfc_idiag_queacc_read_qe(pbuffer, len, pque, index);
+
+ return simple_read_from_buffer(buf, nbytes, ppos, pbuffer, len);
+
+que_browse:
+
+ /* Browse all entries from the queue */
+ last_index = idiag.offset.last_rd;
+ index = last_index;
+
+ while (len < LPFC_QUE_ACC_SIZE - pque->entry_size) {
+ len = lpfc_idiag_queacc_read_qe(pbuffer, len, pque, index);
+ index++;
+ if (index > pque->entry_count - 1)
+ break;
+ }
+
+ /* Set up the offset for next portion of pci cfg read */
+ if (index > pque->entry_count - 1)
+ index = 0;
+ idiag.offset.last_rd = index;
+
+ return simple_read_from_buffer(buf, nbytes, ppos, pbuffer, len);
+}
+
+/**
+ * lpfc_idiag_queacc_write - Syntax check and set up idiag queacc commands
+ * @file: The file pointer to read from.
+ * @buf: The buffer to copy the user data from.
+ * @nbytes: The number of bytes to get.
+ * @ppos: The position in the file to start reading from.
+ *
+ * This routine get the debugfs idiag command struct from user space and then
+ * perform the syntax check for port queue read (dump) or write (set) command
+ * accordingly. In the case of port queue read command, it sets up the command
+ * in the idiag command struct for the following debugfs read operation. In
+ * the case of port queue write operation, it executes the write operation
+ * into the port queue entry accordingly.
+ *
+ * It returns the @nbytges passing in from debugfs user space when successful.
+ * In case of error conditions, it returns proper error code back to the user
+ * space.
+ **/
+static ssize_t
+lpfc_idiag_queacc_write(struct file *file, const char __user *buf,
+ size_t nbytes, loff_t *ppos)
+{
+ struct lpfc_debug *debug = file->private_data;
+ struct lpfc_hba *phba = (struct lpfc_hba *)debug->i_private;
+ uint32_t qidx, quetp, queid, index, count, offset, value;
+ uint32_t *pentry;
+ struct lpfc_queue *pque;
+ int rc;
+
+ /* This is a user write operation */
+ debug->op = LPFC_IDIAG_OP_WR;
+
+ rc = lpfc_idiag_cmd_get(buf, nbytes, &idiag.cmd);
+ if (rc < 0)
+ return rc;
+
+ /* Get and sanity check on command feilds */
+ quetp = idiag.cmd.data[IDIAG_QUEACC_QUETP_INDX];
+ queid = idiag.cmd.data[IDIAG_QUEACC_QUEID_INDX];
+ index = idiag.cmd.data[IDIAG_QUEACC_INDEX_INDX];
+ count = idiag.cmd.data[IDIAG_QUEACC_COUNT_INDX];
+ offset = idiag.cmd.data[IDIAG_QUEACC_OFFST_INDX];
+ value = idiag.cmd.data[IDIAG_QUEACC_VALUE_INDX];
+
+ /* Sanity check on command line arguments */
+ if (idiag.cmd.opcode == LPFC_IDIAG_CMD_QUEACC_WR ||
+ idiag.cmd.opcode == LPFC_IDIAG_CMD_QUEACC_ST ||
+ idiag.cmd.opcode == LPFC_IDIAG_CMD_QUEACC_CL) {
+ if (rc != LPFC_QUE_ACC_WR_CMD_ARG)
+ goto error_out;
+ if (count != 1)
+ goto error_out;
+ } else if (idiag.cmd.opcode == LPFC_IDIAG_CMD_QUEACC_RD) {
+ if (rc != LPFC_QUE_ACC_RD_CMD_ARG)
+ goto error_out;
+ } else
+ goto error_out;
+
+ switch (quetp) {
+ case LPFC_IDIAG_EQ:
+ /* HBA event queue */
+ if (phba->sli4_hba.hba_eq) {
+ for (qidx = 0; qidx < phba->cfg_fcp_io_channel;
+ qidx++) {
+ if (phba->sli4_hba.hba_eq[qidx] &&
+ phba->sli4_hba.hba_eq[qidx]->queue_id ==
+ queid) {
+ /* Sanity check */
+ rc = lpfc_idiag_que_param_check(
+ phba->sli4_hba.hba_eq[qidx],
+ index, count);
+ if (rc)
+ goto error_out;
+ idiag.ptr_private =
+ phba->sli4_hba.hba_eq[qidx];
+ goto pass_check;
+ }
+ }
+ }
+ goto error_out;
+ break;
+ case LPFC_IDIAG_CQ:
+ /* MBX complete queue */
+ if (phba->sli4_hba.mbx_cq &&
+ phba->sli4_hba.mbx_cq->queue_id == queid) {
+ /* Sanity check */
+ rc = lpfc_idiag_que_param_check(
+ phba->sli4_hba.mbx_cq, index, count);
+ if (rc)
+ goto error_out;
+ idiag.ptr_private = phba->sli4_hba.mbx_cq;
+ goto pass_check;
+ }
+ /* ELS complete queue */
+ if (phba->sli4_hba.els_cq &&
+ phba->sli4_hba.els_cq->queue_id == queid) {
+ /* Sanity check */
+ rc = lpfc_idiag_que_param_check(
+ phba->sli4_hba.els_cq, index, count);
+ if (rc)
+ goto error_out;
+ idiag.ptr_private = phba->sli4_hba.els_cq;
+ goto pass_check;
+ }
+ /* FCP complete queue */
+ if (phba->sli4_hba.fcp_cq) {
+ qidx = 0;
+ do {
+ if (phba->sli4_hba.fcp_cq[qidx] &&
+ phba->sli4_hba.fcp_cq[qidx]->queue_id ==
+ queid) {
+ /* Sanity check */
+ rc = lpfc_idiag_que_param_check(
+ phba->sli4_hba.fcp_cq[qidx],
+ index, count);
+ if (rc)
+ goto error_out;
+ idiag.ptr_private =
+ phba->sli4_hba.fcp_cq[qidx];
+ goto pass_check;
+ }
+ } while (++qidx < phba->cfg_fcp_io_channel);
+ }
+ goto error_out;
+ break;
+ case LPFC_IDIAG_MQ:
+ /* MBX work queue */
+ if (phba->sli4_hba.mbx_wq &&
+ phba->sli4_hba.mbx_wq->queue_id == queid) {
+ /* Sanity check */
+ rc = lpfc_idiag_que_param_check(
+ phba->sli4_hba.mbx_wq, index, count);
+ if (rc)
+ goto error_out;
+ idiag.ptr_private = phba->sli4_hba.mbx_wq;
+ goto pass_check;
+ }
+ goto error_out;
+ break;
+ case LPFC_IDIAG_WQ:
+ /* ELS work queue */
+ if (phba->sli4_hba.els_wq &&
+ phba->sli4_hba.els_wq->queue_id == queid) {
+ /* Sanity check */
+ rc = lpfc_idiag_que_param_check(
+ phba->sli4_hba.els_wq, index, count);
+ if (rc)
+ goto error_out;
+ idiag.ptr_private = phba->sli4_hba.els_wq;
+ goto pass_check;
+ }
+ /* FCP work queue */
+ if (phba->sli4_hba.fcp_wq) {
+ for (qidx = 0; qidx < phba->cfg_fcp_io_channel;
+ qidx++) {
+ if (!phba->sli4_hba.fcp_wq[qidx])
+ continue;
+ if (phba->sli4_hba.fcp_wq[qidx]->queue_id ==
+ queid) {
+ /* Sanity check */
+ rc = lpfc_idiag_que_param_check(
+ phba->sli4_hba.fcp_wq[qidx],
+ index, count);
+ if (rc)
+ goto error_out;
+ idiag.ptr_private =
+ phba->sli4_hba.fcp_wq[qidx];
+ goto pass_check;
+ }
+ }
+ }
+ goto error_out;
+ break;
+ case LPFC_IDIAG_RQ:
+ /* HDR queue */
+ if (phba->sli4_hba.hdr_rq &&
+ phba->sli4_hba.hdr_rq->queue_id == queid) {
+ /* Sanity check */
+ rc = lpfc_idiag_que_param_check(
+ phba->sli4_hba.hdr_rq, index, count);
+ if (rc)
+ goto error_out;
+ idiag.ptr_private = phba->sli4_hba.hdr_rq;
+ goto pass_check;
+ }
+ /* DAT queue */
+ if (phba->sli4_hba.dat_rq &&
+ phba->sli4_hba.dat_rq->queue_id == queid) {
+ /* Sanity check */
+ rc = lpfc_idiag_que_param_check(
+ phba->sli4_hba.dat_rq, index, count);
+ if (rc)
+ goto error_out;
+ idiag.ptr_private = phba->sli4_hba.dat_rq;
+ goto pass_check;
+ }
+ goto error_out;
+ break;
+ default:
+ goto error_out;
+ break;
+ }
+
+pass_check:
+
+ if (idiag.cmd.opcode == LPFC_IDIAG_CMD_QUEACC_RD) {
+ if (count == LPFC_QUE_ACC_BROWSE)
+ idiag.offset.last_rd = index;
+ }
+
+ if (idiag.cmd.opcode == LPFC_IDIAG_CMD_QUEACC_WR ||
+ idiag.cmd.opcode == LPFC_IDIAG_CMD_QUEACC_ST ||
+ idiag.cmd.opcode == LPFC_IDIAG_CMD_QUEACC_CL) {
+ /* Additional sanity checks on write operation */
+ pque = (struct lpfc_queue *)idiag.ptr_private;
+ if (offset > pque->entry_size/sizeof(uint32_t) - 1)
+ goto error_out;
+ pentry = pque->qe[index].address;
+ pentry += offset;
+ if (idiag.cmd.opcode == LPFC_IDIAG_CMD_QUEACC_WR)
+ *pentry = value;
+ if (idiag.cmd.opcode == LPFC_IDIAG_CMD_QUEACC_ST)
+ *pentry |= value;
+ if (idiag.cmd.opcode == LPFC_IDIAG_CMD_QUEACC_CL)
+ *pentry &= ~value;
+ }
+ return nbytes;
+
+error_out:
+ /* Clean out command structure on command error out */
+ memset(&idiag, 0, sizeof(idiag));
+ return -EINVAL;
+}
+
+/**
+ * lpfc_idiag_drbacc_read_reg - idiag debugfs read a doorbell register
+ * @phba: The pointer to hba structure.
+ * @pbuffer: The pointer to the buffer to copy the data to.
+ * @len: The lenght of bytes to copied.
+ * @drbregid: The id to doorbell registers.
+ *
+ * Description:
+ * This routine reads a doorbell register and copies its content to the
+ * user buffer pointed to by @pbuffer.
+ *
+ * Returns:
+ * This function returns the amount of data that was copied into @pbuffer.
+ **/
+static int
+lpfc_idiag_drbacc_read_reg(struct lpfc_hba *phba, char *pbuffer,
+ int len, uint32_t drbregid)
+{
+
+ if (!pbuffer)
+ return 0;
+
+ switch (drbregid) {
+ case LPFC_DRB_EQCQ:
+ len += snprintf(pbuffer+len, LPFC_DRB_ACC_BUF_SIZE-len,
+ "EQCQ-DRB-REG: 0x%08x\n",
+ readl(phba->sli4_hba.EQCQDBregaddr));
+ break;
+ case LPFC_DRB_MQ:
+ len += snprintf(pbuffer+len, LPFC_DRB_ACC_BUF_SIZE-len,
+ "MQ-DRB-REG: 0x%08x\n",
+ readl(phba->sli4_hba.MQDBregaddr));
+ break;
+ case LPFC_DRB_WQ:
+ len += snprintf(pbuffer+len, LPFC_DRB_ACC_BUF_SIZE-len,
+ "WQ-DRB-REG: 0x%08x\n",
+ readl(phba->sli4_hba.WQDBregaddr));
+ break;
+ case LPFC_DRB_RQ:
+ len += snprintf(pbuffer+len, LPFC_DRB_ACC_BUF_SIZE-len,
+ "RQ-DRB-REG: 0x%08x\n",
+ readl(phba->sli4_hba.RQDBregaddr));
+ break;
+ default:
+ break;
+ }
+
+ return len;
+}
+
+/**
+ * lpfc_idiag_drbacc_read - idiag debugfs read port doorbell
+ * @file: The file pointer to read from.
+ * @buf: The buffer to copy the data to.
+ * @nbytes: The number of bytes to read.
+ * @ppos: The position in the file to start reading from.
+ *
+ * Description:
+ * This routine reads data from the @phba device doorbell register according
+ * to the idiag command, and copies to user @buf. Depending on the doorbell
+ * register read command setup, it does either a single doorbell register
+ * read or dump all doorbell registers.
+ *
+ * Returns:
+ * This function returns the amount of data that was read (this could be less
+ * than @nbytes if the end of the file was reached) or a negative error value.
+ **/
+static ssize_t
+lpfc_idiag_drbacc_read(struct file *file, char __user *buf, size_t nbytes,
+ loff_t *ppos)
+{
+ struct lpfc_debug *debug = file->private_data;
+ struct lpfc_hba *phba = (struct lpfc_hba *)debug->i_private;
+ uint32_t drb_reg_id, i;
+ char *pbuffer;
+ int len = 0;
+
+ /* This is a user read operation */
+ debug->op = LPFC_IDIAG_OP_RD;
+
+ if (!debug->buffer)
+ debug->buffer = kmalloc(LPFC_DRB_ACC_BUF_SIZE, GFP_KERNEL);
+ if (!debug->buffer)
+ return 0;
+ pbuffer = debug->buffer;
+
+ if (*ppos)
+ return 0;
+
+ if (idiag.cmd.opcode == LPFC_IDIAG_CMD_DRBACC_RD)
+ drb_reg_id = idiag.cmd.data[IDIAG_DRBACC_REGID_INDX];
+ else
+ return 0;
+
+ if (drb_reg_id == LPFC_DRB_ACC_ALL)
+ for (i = 1; i <= LPFC_DRB_MAX; i++)
+ len = lpfc_idiag_drbacc_read_reg(phba,
+ pbuffer, len, i);
+ else
+ len = lpfc_idiag_drbacc_read_reg(phba,
+ pbuffer, len, drb_reg_id);
+
+ return simple_read_from_buffer(buf, nbytes, ppos, pbuffer, len);
+}
+
+/**
+ * lpfc_idiag_drbacc_write - Syntax check and set up idiag drbacc commands
+ * @file: The file pointer to read from.
+ * @buf: The buffer to copy the user data from.
+ * @nbytes: The number of bytes to get.
+ * @ppos: The position in the file to start reading from.
+ *
+ * This routine get the debugfs idiag command struct from user space and then
+ * perform the syntax check for port doorbell register read (dump) or write
+ * (set) command accordingly. In the case of port queue read command, it sets
+ * up the command in the idiag command struct for the following debugfs read
+ * operation. In the case of port doorbell register write operation, it
+ * executes the write operation into the port doorbell register accordingly.
+ *
+ * It returns the @nbytges passing in from debugfs user space when successful.
+ * In case of error conditions, it returns proper error code back to the user
+ * space.
+ **/
+static ssize_t
+lpfc_idiag_drbacc_write(struct file *file, const char __user *buf,
+ size_t nbytes, loff_t *ppos)
+{
+ struct lpfc_debug *debug = file->private_data;
+ struct lpfc_hba *phba = (struct lpfc_hba *)debug->i_private;
+ uint32_t drb_reg_id, value, reg_val = 0;
+ void __iomem *drb_reg;
+ int rc;
+
+ /* This is a user write operation */
+ debug->op = LPFC_IDIAG_OP_WR;
+
+ rc = lpfc_idiag_cmd_get(buf, nbytes, &idiag.cmd);
+ if (rc < 0)
+ return rc;
+
+ /* Sanity check on command line arguments */
+ drb_reg_id = idiag.cmd.data[IDIAG_DRBACC_REGID_INDX];
+ value = idiag.cmd.data[IDIAG_DRBACC_VALUE_INDX];
+
+ if (idiag.cmd.opcode == LPFC_IDIAG_CMD_DRBACC_WR ||
+ idiag.cmd.opcode == LPFC_IDIAG_CMD_DRBACC_ST ||
+ idiag.cmd.opcode == LPFC_IDIAG_CMD_DRBACC_CL) {
+ if (rc != LPFC_DRB_ACC_WR_CMD_ARG)
+ goto error_out;
+ if (drb_reg_id > LPFC_DRB_MAX)
+ goto error_out;
+ } else if (idiag.cmd.opcode == LPFC_IDIAG_CMD_DRBACC_RD) {
+ if (rc != LPFC_DRB_ACC_RD_CMD_ARG)
+ goto error_out;
+ if ((drb_reg_id > LPFC_DRB_MAX) &&
+ (drb_reg_id != LPFC_DRB_ACC_ALL))
+ goto error_out;
+ } else
+ goto error_out;
+
+ /* Perform the write access operation */
+ if (idiag.cmd.opcode == LPFC_IDIAG_CMD_DRBACC_WR ||
+ idiag.cmd.opcode == LPFC_IDIAG_CMD_DRBACC_ST ||
+ idiag.cmd.opcode == LPFC_IDIAG_CMD_DRBACC_CL) {
+ switch (drb_reg_id) {
+ case LPFC_DRB_EQCQ:
+ drb_reg = phba->sli4_hba.EQCQDBregaddr;
+ break;
+ case LPFC_DRB_MQ:
+ drb_reg = phba->sli4_hba.MQDBregaddr;
+ break;
+ case LPFC_DRB_WQ:
+ drb_reg = phba->sli4_hba.WQDBregaddr;
+ break;
+ case LPFC_DRB_RQ:
+ drb_reg = phba->sli4_hba.RQDBregaddr;
+ break;
+ default:
+ goto error_out;
+ }
+
+ if (idiag.cmd.opcode == LPFC_IDIAG_CMD_DRBACC_WR)
+ reg_val = value;
+ if (idiag.cmd.opcode == LPFC_IDIAG_CMD_DRBACC_ST) {
+ reg_val = readl(drb_reg);
+ reg_val |= value;
+ }
+ if (idiag.cmd.opcode == LPFC_IDIAG_CMD_DRBACC_CL) {
+ reg_val = readl(drb_reg);
+ reg_val &= ~value;
+ }
+ writel(reg_val, drb_reg);
+ readl(drb_reg); /* flush */
+ }
+ return nbytes;
+
+error_out:
+ /* Clean out command structure on command error out */
+ memset(&idiag, 0, sizeof(idiag));
+ return -EINVAL;
+}
+
+/**
+ * lpfc_idiag_ctlacc_read_reg - idiag debugfs read a control registers
+ * @phba: The pointer to hba structure.
+ * @pbuffer: The pointer to the buffer to copy the data to.
+ * @len: The lenght of bytes to copied.
+ * @drbregid: The id to doorbell registers.
+ *
+ * Description:
+ * This routine reads a control register and copies its content to the
+ * user buffer pointed to by @pbuffer.
+ *
+ * Returns:
+ * This function returns the amount of data that was copied into @pbuffer.
+ **/
+static int
+lpfc_idiag_ctlacc_read_reg(struct lpfc_hba *phba, char *pbuffer,
+ int len, uint32_t ctlregid)
+{
+
+ if (!pbuffer)
+ return 0;
+
+ switch (ctlregid) {
+ case LPFC_CTL_PORT_SEM:
+ len += snprintf(pbuffer+len, LPFC_CTL_ACC_BUF_SIZE-len,
+ "Port SemReg: 0x%08x\n",
+ readl(phba->sli4_hba.conf_regs_memmap_p +
+ LPFC_CTL_PORT_SEM_OFFSET));
+ break;
+ case LPFC_CTL_PORT_STA:
+ len += snprintf(pbuffer+len, LPFC_CTL_ACC_BUF_SIZE-len,
+ "Port StaReg: 0x%08x\n",
+ readl(phba->sli4_hba.conf_regs_memmap_p +
+ LPFC_CTL_PORT_STA_OFFSET));
+ break;
+ case LPFC_CTL_PORT_CTL:
+ len += snprintf(pbuffer+len, LPFC_CTL_ACC_BUF_SIZE-len,
+ "Port CtlReg: 0x%08x\n",
+ readl(phba->sli4_hba.conf_regs_memmap_p +
+ LPFC_CTL_PORT_CTL_OFFSET));
+ break;
+ case LPFC_CTL_PORT_ER1:
+ len += snprintf(pbuffer+len, LPFC_CTL_ACC_BUF_SIZE-len,
+ "Port Er1Reg: 0x%08x\n",
+ readl(phba->sli4_hba.conf_regs_memmap_p +
+ LPFC_CTL_PORT_ER1_OFFSET));
+ break;
+ case LPFC_CTL_PORT_ER2:
+ len += snprintf(pbuffer+len, LPFC_CTL_ACC_BUF_SIZE-len,
+ "Port Er2Reg: 0x%08x\n",
+ readl(phba->sli4_hba.conf_regs_memmap_p +
+ LPFC_CTL_PORT_ER2_OFFSET));
+ break;
+ case LPFC_CTL_PDEV_CTL:
+ len += snprintf(pbuffer+len, LPFC_CTL_ACC_BUF_SIZE-len,
+ "PDev CtlReg: 0x%08x\n",
+ readl(phba->sli4_hba.conf_regs_memmap_p +
+ LPFC_CTL_PDEV_CTL_OFFSET));
+ break;
+ default:
+ break;
+ }
+ return len;
+}
+
+/**
+ * lpfc_idiag_ctlacc_read - idiag debugfs read port and device control register
+ * @file: The file pointer to read from.
+ * @buf: The buffer to copy the data to.
+ * @nbytes: The number of bytes to read.
+ * @ppos: The position in the file to start reading from.
+ *
+ * Description:
+ * This routine reads data from the @phba port and device registers according
+ * to the idiag command, and copies to user @buf.
+ *
+ * Returns:
+ * This function returns the amount of data that was read (this could be less
+ * than @nbytes if the end of the file was reached) or a negative error value.
+ **/
+static ssize_t
+lpfc_idiag_ctlacc_read(struct file *file, char __user *buf, size_t nbytes,
+ loff_t *ppos)
+{
+ struct lpfc_debug *debug = file->private_data;
+ struct lpfc_hba *phba = (struct lpfc_hba *)debug->i_private;
+ uint32_t ctl_reg_id, i;
+ char *pbuffer;
+ int len = 0;
+
+ /* This is a user read operation */
+ debug->op = LPFC_IDIAG_OP_RD;
+
+ if (!debug->buffer)
+ debug->buffer = kmalloc(LPFC_CTL_ACC_BUF_SIZE, GFP_KERNEL);
+ if (!debug->buffer)
+ return 0;
+ pbuffer = debug->buffer;
+
+ if (*ppos)
+ return 0;
+
+ if (idiag.cmd.opcode == LPFC_IDIAG_CMD_CTLACC_RD)
+ ctl_reg_id = idiag.cmd.data[IDIAG_CTLACC_REGID_INDX];
+ else
+ return 0;
+
+ if (ctl_reg_id == LPFC_CTL_ACC_ALL)
+ for (i = 1; i <= LPFC_CTL_MAX; i++)
+ len = lpfc_idiag_ctlacc_read_reg(phba,
+ pbuffer, len, i);
+ else
+ len = lpfc_idiag_ctlacc_read_reg(phba,
+ pbuffer, len, ctl_reg_id);
+
+ return simple_read_from_buffer(buf, nbytes, ppos, pbuffer, len);
+}
+
+/**
+ * lpfc_idiag_ctlacc_write - Syntax check and set up idiag ctlacc commands
+ * @file: The file pointer to read from.
+ * @buf: The buffer to copy the user data from.
+ * @nbytes: The number of bytes to get.
+ * @ppos: The position in the file to start reading from.
+ *
+ * This routine get the debugfs idiag command struct from user space and then
+ * perform the syntax check for port and device control register read (dump)
+ * or write (set) command accordingly.
+ *
+ * It returns the @nbytges passing in from debugfs user space when successful.
+ * In case of error conditions, it returns proper error code back to the user
+ * space.
+ **/
+static ssize_t
+lpfc_idiag_ctlacc_write(struct file *file, const char __user *buf,
+ size_t nbytes, loff_t *ppos)
+{
+ struct lpfc_debug *debug = file->private_data;
+ struct lpfc_hba *phba = (struct lpfc_hba *)debug->i_private;
+ uint32_t ctl_reg_id, value, reg_val = 0;
+ void __iomem *ctl_reg;
+ int rc;
+
+ /* This is a user write operation */
+ debug->op = LPFC_IDIAG_OP_WR;
+
+ rc = lpfc_idiag_cmd_get(buf, nbytes, &idiag.cmd);
+ if (rc < 0)
+ return rc;
+
+ /* Sanity check on command line arguments */
+ ctl_reg_id = idiag.cmd.data[IDIAG_CTLACC_REGID_INDX];
+ value = idiag.cmd.data[IDIAG_CTLACC_VALUE_INDX];
+
+ if (idiag.cmd.opcode == LPFC_IDIAG_CMD_CTLACC_WR ||
+ idiag.cmd.opcode == LPFC_IDIAG_CMD_CTLACC_ST ||
+ idiag.cmd.opcode == LPFC_IDIAG_CMD_CTLACC_CL) {
+ if (rc != LPFC_CTL_ACC_WR_CMD_ARG)
+ goto error_out;
+ if (ctl_reg_id > LPFC_CTL_MAX)
+ goto error_out;
+ } else if (idiag.cmd.opcode == LPFC_IDIAG_CMD_CTLACC_RD) {
+ if (rc != LPFC_CTL_ACC_RD_CMD_ARG)
+ goto error_out;
+ if ((ctl_reg_id > LPFC_CTL_MAX) &&
+ (ctl_reg_id != LPFC_CTL_ACC_ALL))
+ goto error_out;
+ } else
+ goto error_out;
+
+ /* Perform the write access operation */
+ if (idiag.cmd.opcode == LPFC_IDIAG_CMD_CTLACC_WR ||
+ idiag.cmd.opcode == LPFC_IDIAG_CMD_CTLACC_ST ||
+ idiag.cmd.opcode == LPFC_IDIAG_CMD_CTLACC_CL) {
+ switch (ctl_reg_id) {
+ case LPFC_CTL_PORT_SEM:
+ ctl_reg = phba->sli4_hba.conf_regs_memmap_p +
+ LPFC_CTL_PORT_SEM_OFFSET;
+ break;
+ case LPFC_CTL_PORT_STA:
+ ctl_reg = phba->sli4_hba.conf_regs_memmap_p +
+ LPFC_CTL_PORT_STA_OFFSET;
+ break;
+ case LPFC_CTL_PORT_CTL:
+ ctl_reg = phba->sli4_hba.conf_regs_memmap_p +
+ LPFC_CTL_PORT_CTL_OFFSET;
+ break;
+ case LPFC_CTL_PORT_ER1:
+ ctl_reg = phba->sli4_hba.conf_regs_memmap_p +
+ LPFC_CTL_PORT_ER1_OFFSET;
+ break;
+ case LPFC_CTL_PORT_ER2:
+ ctl_reg = phba->sli4_hba.conf_regs_memmap_p +
+ LPFC_CTL_PORT_ER2_OFFSET;
+ break;
+ case LPFC_CTL_PDEV_CTL:
+ ctl_reg = phba->sli4_hba.conf_regs_memmap_p +
+ LPFC_CTL_PDEV_CTL_OFFSET;
+ break;
+ default:
+ goto error_out;
+ }
+
+ if (idiag.cmd.opcode == LPFC_IDIAG_CMD_CTLACC_WR)
+ reg_val = value;
+ if (idiag.cmd.opcode == LPFC_IDIAG_CMD_CTLACC_ST) {
+ reg_val = readl(ctl_reg);
+ reg_val |= value;
+ }
+ if (idiag.cmd.opcode == LPFC_IDIAG_CMD_CTLACC_CL) {
+ reg_val = readl(ctl_reg);
+ reg_val &= ~value;
+ }
+ writel(reg_val, ctl_reg);
+ readl(ctl_reg); /* flush */
+ }
+ return nbytes;
+
+error_out:
+ /* Clean out command structure on command error out */
+ memset(&idiag, 0, sizeof(idiag));
+ return -EINVAL;
+}
+
+/**
+ * lpfc_idiag_mbxacc_get_setup - idiag debugfs get mailbox access setup
+ * @phba: Pointer to HBA context object.
+ * @pbuffer: Pointer to data buffer.
+ *
+ * Description:
+ * This routine gets the driver mailbox access debugfs setup information.
+ *
+ * Returns:
+ * This function returns the amount of data that was read (this could be less
+ * than @nbytes if the end of the file was reached) or a negative error value.
+ **/
+static int
+lpfc_idiag_mbxacc_get_setup(struct lpfc_hba *phba, char *pbuffer)
+{
+ uint32_t mbx_dump_map, mbx_dump_cnt, mbx_word_cnt, mbx_mbox_cmd;
+ int len = 0;
+
+ mbx_mbox_cmd = idiag.cmd.data[IDIAG_MBXACC_MBCMD_INDX];
+ mbx_dump_map = idiag.cmd.data[IDIAG_MBXACC_DPMAP_INDX];
+ mbx_dump_cnt = idiag.cmd.data[IDIAG_MBXACC_DPCNT_INDX];
+ mbx_word_cnt = idiag.cmd.data[IDIAG_MBXACC_WDCNT_INDX];
+
+ len += snprintf(pbuffer+len, LPFC_MBX_ACC_BUF_SIZE-len,
+ "mbx_dump_map: 0x%08x\n", mbx_dump_map);
+ len += snprintf(pbuffer+len, LPFC_MBX_ACC_BUF_SIZE-len,
+ "mbx_dump_cnt: %04d\n", mbx_dump_cnt);
+ len += snprintf(pbuffer+len, LPFC_MBX_ACC_BUF_SIZE-len,
+ "mbx_word_cnt: %04d\n", mbx_word_cnt);
+ len += snprintf(pbuffer+len, LPFC_MBX_ACC_BUF_SIZE-len,
+ "mbx_mbox_cmd: 0x%02x\n", mbx_mbox_cmd);
+
+ return len;
+}
+
+/**
+ * lpfc_idiag_mbxacc_read - idiag debugfs read on mailbox access
+ * @file: The file pointer to read from.
+ * @buf: The buffer to copy the data to.
+ * @nbytes: The number of bytes to read.
+ * @ppos: The position in the file to start reading from.
+ *
+ * Description:
+ * This routine reads data from the @phba driver mailbox access debugfs setup
+ * information.
+ *
+ * Returns:
+ * This function returns the amount of data that was read (this could be less
+ * than @nbytes if the end of the file was reached) or a negative error value.
+ **/
+static ssize_t
+lpfc_idiag_mbxacc_read(struct file *file, char __user *buf, size_t nbytes,
+ loff_t *ppos)
+{
+ struct lpfc_debug *debug = file->private_data;
+ struct lpfc_hba *phba = (struct lpfc_hba *)debug->i_private;
+ char *pbuffer;
+ int len = 0;
+
+ /* This is a user read operation */
+ debug->op = LPFC_IDIAG_OP_RD;
+
+ if (!debug->buffer)
+ debug->buffer = kmalloc(LPFC_MBX_ACC_BUF_SIZE, GFP_KERNEL);
+ if (!debug->buffer)
+ return 0;
+ pbuffer = debug->buffer;
+
+ if (*ppos)
+ return 0;
+
+ if ((idiag.cmd.opcode != LPFC_IDIAG_CMD_MBXACC_DP) &&
+ (idiag.cmd.opcode != LPFC_IDIAG_BSG_MBXACC_DP))
+ return 0;
+
+ len = lpfc_idiag_mbxacc_get_setup(phba, pbuffer);
+
+ return simple_read_from_buffer(buf, nbytes, ppos, pbuffer, len);
+}
+
+/**
+ * lpfc_idiag_mbxacc_write - Syntax check and set up idiag mbxacc commands
+ * @file: The file pointer to read from.
+ * @buf: The buffer to copy the user data from.
+ * @nbytes: The number of bytes to get.
+ * @ppos: The position in the file to start reading from.
+ *
+ * This routine get the debugfs idiag command struct from user space and then
+ * perform the syntax check for driver mailbox command (dump) and sets up the
+ * necessary states in the idiag command struct accordingly.
+ *
+ * It returns the @nbytges passing in from debugfs user space when successful.
+ * In case of error conditions, it returns proper error code back to the user
+ * space.
+ **/
+static ssize_t
+lpfc_idiag_mbxacc_write(struct file *file, const char __user *buf,
+ size_t nbytes, loff_t *ppos)
+{
+ struct lpfc_debug *debug = file->private_data;
+ uint32_t mbx_dump_map, mbx_dump_cnt, mbx_word_cnt, mbx_mbox_cmd;
+ int rc;
+
+ /* This is a user write operation */
+ debug->op = LPFC_IDIAG_OP_WR;
+
+ rc = lpfc_idiag_cmd_get(buf, nbytes, &idiag.cmd);
+ if (rc < 0)
+ return rc;
+
+ /* Sanity check on command line arguments */
+ mbx_mbox_cmd = idiag.cmd.data[IDIAG_MBXACC_MBCMD_INDX];
+ mbx_dump_map = idiag.cmd.data[IDIAG_MBXACC_DPMAP_INDX];
+ mbx_dump_cnt = idiag.cmd.data[IDIAG_MBXACC_DPCNT_INDX];
+ mbx_word_cnt = idiag.cmd.data[IDIAG_MBXACC_WDCNT_INDX];
+
+ if (idiag.cmd.opcode == LPFC_IDIAG_CMD_MBXACC_DP) {
+ if (!(mbx_dump_map & LPFC_MBX_DMP_MBX_ALL))
+ goto error_out;
+ if ((mbx_dump_map & ~LPFC_MBX_DMP_MBX_ALL) &&
+ (mbx_dump_map != LPFC_MBX_DMP_ALL))
+ goto error_out;
+ if (mbx_word_cnt > sizeof(MAILBOX_t))
+ goto error_out;
+ } else if (idiag.cmd.opcode == LPFC_IDIAG_BSG_MBXACC_DP) {
+ if (!(mbx_dump_map & LPFC_BSG_DMP_MBX_ALL))
+ goto error_out;
+ if ((mbx_dump_map & ~LPFC_BSG_DMP_MBX_ALL) &&
+ (mbx_dump_map != LPFC_MBX_DMP_ALL))
+ goto error_out;
+ if (mbx_word_cnt > (BSG_MBOX_SIZE)/4)
+ goto error_out;
+ if (mbx_mbox_cmd != 0x9b)
+ goto error_out;
+ } else
+ goto error_out;
+
+ if (mbx_word_cnt == 0)
+ goto error_out;
+ if (rc != LPFC_MBX_DMP_ARG)
+ goto error_out;
+ if (mbx_mbox_cmd & ~0xff)
+ goto error_out;
+
+ /* condition for stop mailbox dump */
+ if (mbx_dump_cnt == 0)
+ goto reset_out;
+
+ return nbytes;
+
+reset_out:
+ /* Clean out command structure on command error out */
+ memset(&idiag, 0, sizeof(idiag));
+ return nbytes;
+
+error_out:
+ /* Clean out command structure on command error out */
+ memset(&idiag, 0, sizeof(idiag));
+ return -EINVAL;
+}
+
+/**
+ * lpfc_idiag_extacc_avail_get - get the available extents information
+ * @phba: pointer to lpfc hba data structure.
+ * @pbuffer: pointer to internal buffer.
+ * @len: length into the internal buffer data has been copied.
+ *
+ * Description:
+ * This routine is to get the available extent information.
+ *
+ * Returns:
+ * overall lenth of the data read into the internal buffer.
+ **/
+static int
+lpfc_idiag_extacc_avail_get(struct lpfc_hba *phba, char *pbuffer, int len)
+{
+ uint16_t ext_cnt, ext_size;
+
+ len += snprintf(pbuffer+len, LPFC_EXT_ACC_BUF_SIZE-len,
+ "\nAvailable Extents Information:\n");
+
+ len += snprintf(pbuffer+len, LPFC_EXT_ACC_BUF_SIZE-len,
+ "\tPort Available VPI extents: ");
+ lpfc_sli4_get_avail_extnt_rsrc(phba, LPFC_RSC_TYPE_FCOE_VPI,
+ &ext_cnt, &ext_size);
+ len += snprintf(pbuffer+len, LPFC_EXT_ACC_BUF_SIZE-len,
+ "Count %3d, Size %3d\n", ext_cnt, ext_size);
+
+ len += snprintf(pbuffer+len, LPFC_EXT_ACC_BUF_SIZE-len,
+ "\tPort Available VFI extents: ");
+ lpfc_sli4_get_avail_extnt_rsrc(phba, LPFC_RSC_TYPE_FCOE_VFI,
+ &ext_cnt, &ext_size);
+ len += snprintf(pbuffer+len, LPFC_EXT_ACC_BUF_SIZE-len,
+ "Count %3d, Size %3d\n", ext_cnt, ext_size);
+
+ len += snprintf(pbuffer+len, LPFC_EXT_ACC_BUF_SIZE-len,
+ "\tPort Available RPI extents: ");
+ lpfc_sli4_get_avail_extnt_rsrc(phba, LPFC_RSC_TYPE_FCOE_RPI,
+ &ext_cnt, &ext_size);
+ len += snprintf(pbuffer+len, LPFC_EXT_ACC_BUF_SIZE-len,
+ "Count %3d, Size %3d\n", ext_cnt, ext_size);
+
+ len += snprintf(pbuffer+len, LPFC_EXT_ACC_BUF_SIZE-len,
+ "\tPort Available XRI extents: ");
+ lpfc_sli4_get_avail_extnt_rsrc(phba, LPFC_RSC_TYPE_FCOE_XRI,
+ &ext_cnt, &ext_size);
+ len += snprintf(pbuffer+len, LPFC_EXT_ACC_BUF_SIZE-len,
+ "Count %3d, Size %3d\n", ext_cnt, ext_size);
+
+ return len;
+}
+
+/**
+ * lpfc_idiag_extacc_alloc_get - get the allocated extents information
+ * @phba: pointer to lpfc hba data structure.
+ * @pbuffer: pointer to internal buffer.
+ * @len: length into the internal buffer data has been copied.
+ *
+ * Description:
+ * This routine is to get the allocated extent information.
+ *
+ * Returns:
+ * overall lenth of the data read into the internal buffer.
+ **/
+static int
+lpfc_idiag_extacc_alloc_get(struct lpfc_hba *phba, char *pbuffer, int len)
+{
+ uint16_t ext_cnt, ext_size;
+ int rc;
+
+ len += snprintf(pbuffer+len, LPFC_EXT_ACC_BUF_SIZE-len,
+ "\nAllocated Extents Information:\n");
+
+ len += snprintf(pbuffer+len, LPFC_EXT_ACC_BUF_SIZE-len,
+ "\tHost Allocated VPI extents: ");
+ rc = lpfc_sli4_get_allocated_extnts(phba, LPFC_RSC_TYPE_FCOE_VPI,
+ &ext_cnt, &ext_size);
+ if (!rc)
+ len += snprintf(pbuffer+len, LPFC_EXT_ACC_BUF_SIZE-len,
+ "Port %d Extent %3d, Size %3d\n",
+ phba->brd_no, ext_cnt, ext_size);
+ else
+ len += snprintf(pbuffer+len, LPFC_EXT_ACC_BUF_SIZE-len,
+ "N/A\n");
+
+ len += snprintf(pbuffer+len, LPFC_EXT_ACC_BUF_SIZE-len,
+ "\tHost Allocated VFI extents: ");
+ rc = lpfc_sli4_get_allocated_extnts(phba, LPFC_RSC_TYPE_FCOE_VFI,
+ &ext_cnt, &ext_size);
+ if (!rc)
+ len += snprintf(pbuffer+len, LPFC_EXT_ACC_BUF_SIZE-len,
+ "Port %d Extent %3d, Size %3d\n",
+ phba->brd_no, ext_cnt, ext_size);
+ else
+ len += snprintf(pbuffer+len, LPFC_EXT_ACC_BUF_SIZE-len,
+ "N/A\n");
+
+ len += snprintf(pbuffer+len, LPFC_EXT_ACC_BUF_SIZE-len,
+ "\tHost Allocated RPI extents: ");
+ rc = lpfc_sli4_get_allocated_extnts(phba, LPFC_RSC_TYPE_FCOE_RPI,
+ &ext_cnt, &ext_size);
+ if (!rc)
+ len += snprintf(pbuffer+len, LPFC_EXT_ACC_BUF_SIZE-len,
+ "Port %d Extent %3d, Size %3d\n",
+ phba->brd_no, ext_cnt, ext_size);
+ else
+ len += snprintf(pbuffer+len, LPFC_EXT_ACC_BUF_SIZE-len,
+ "N/A\n");
+
+ len += snprintf(pbuffer+len, LPFC_EXT_ACC_BUF_SIZE-len,
+ "\tHost Allocated XRI extents: ");
+ rc = lpfc_sli4_get_allocated_extnts(phba, LPFC_RSC_TYPE_FCOE_XRI,
+ &ext_cnt, &ext_size);
+ if (!rc)
+ len += snprintf(pbuffer+len, LPFC_EXT_ACC_BUF_SIZE-len,
+ "Port %d Extent %3d, Size %3d\n",
+ phba->brd_no, ext_cnt, ext_size);
+ else
+ len += snprintf(pbuffer+len, LPFC_EXT_ACC_BUF_SIZE-len,
+ "N/A\n");
+
+ return len;
+}
+
+/**
+ * lpfc_idiag_extacc_drivr_get - get driver extent information
+ * @phba: pointer to lpfc hba data structure.
+ * @pbuffer: pointer to internal buffer.
+ * @len: length into the internal buffer data has been copied.
+ *
+ * Description:
+ * This routine is to get the driver extent information.
+ *
+ * Returns:
+ * overall lenth of the data read into the internal buffer.
+ **/
+static int
+lpfc_idiag_extacc_drivr_get(struct lpfc_hba *phba, char *pbuffer, int len)
+{
+ struct lpfc_rsrc_blks *rsrc_blks;
+ int index;
+
+ len += snprintf(pbuffer+len, LPFC_EXT_ACC_BUF_SIZE-len,
+ "\nDriver Extents Information:\n");
+
+ len += snprintf(pbuffer+len, LPFC_EXT_ACC_BUF_SIZE-len,
+ "\tVPI extents:\n");
+ index = 0;
+ list_for_each_entry(rsrc_blks, &phba->lpfc_vpi_blk_list, list) {
+ len += snprintf(pbuffer+len, LPFC_EXT_ACC_BUF_SIZE-len,
+ "\t\tBlock %3d: Start %4d, Count %4d\n",
+ index, rsrc_blks->rsrc_start,
+ rsrc_blks->rsrc_size);
+ index++;
+ }
+ len += snprintf(pbuffer+len, LPFC_EXT_ACC_BUF_SIZE-len,
+ "\tVFI extents:\n");
+ index = 0;
+ list_for_each_entry(rsrc_blks, &phba->sli4_hba.lpfc_vfi_blk_list,
+ list) {
+ len += snprintf(pbuffer+len, LPFC_EXT_ACC_BUF_SIZE-len,
+ "\t\tBlock %3d: Start %4d, Count %4d\n",
+ index, rsrc_blks->rsrc_start,
+ rsrc_blks->rsrc_size);
+ index++;
+ }
+
+ len += snprintf(pbuffer+len, LPFC_EXT_ACC_BUF_SIZE-len,
+ "\tRPI extents:\n");
+ index = 0;
+ list_for_each_entry(rsrc_blks, &phba->sli4_hba.lpfc_rpi_blk_list,
+ list) {
+ len += snprintf(pbuffer+len, LPFC_EXT_ACC_BUF_SIZE-len,
+ "\t\tBlock %3d: Start %4d, Count %4d\n",
+ index, rsrc_blks->rsrc_start,
+ rsrc_blks->rsrc_size);
+ index++;
+ }
+
+ len += snprintf(pbuffer+len, LPFC_EXT_ACC_BUF_SIZE-len,
+ "\tXRI extents:\n");
+ index = 0;
+ list_for_each_entry(rsrc_blks, &phba->sli4_hba.lpfc_xri_blk_list,
+ list) {
+ len += snprintf(pbuffer+len, LPFC_EXT_ACC_BUF_SIZE-len,
+ "\t\tBlock %3d: Start %4d, Count %4d\n",
+ index, rsrc_blks->rsrc_start,
+ rsrc_blks->rsrc_size);
+ index++;
+ }
+
+ return len;
+}
+
+/**
+ * lpfc_idiag_extacc_write - Syntax check and set up idiag extacc commands
+ * @file: The file pointer to read from.
+ * @buf: The buffer to copy the user data from.
+ * @nbytes: The number of bytes to get.
+ * @ppos: The position in the file to start reading from.
+ *
+ * This routine get the debugfs idiag command struct from user space and then
+ * perform the syntax check for extent information access commands and sets
+ * up the necessary states in the idiag command struct accordingly.
+ *
+ * It returns the @nbytges passing in from debugfs user space when successful.
+ * In case of error conditions, it returns proper error code back to the user
+ * space.
+ **/
+static ssize_t
+lpfc_idiag_extacc_write(struct file *file, const char __user *buf,
+ size_t nbytes, loff_t *ppos)
+{
+ struct lpfc_debug *debug = file->private_data;
+ uint32_t ext_map;
+ int rc;
+
+ /* This is a user write operation */
+ debug->op = LPFC_IDIAG_OP_WR;
+
+ rc = lpfc_idiag_cmd_get(buf, nbytes, &idiag.cmd);
+ if (rc < 0)
+ return rc;
+
+ ext_map = idiag.cmd.data[IDIAG_EXTACC_EXMAP_INDX];
+
+ if (idiag.cmd.opcode != LPFC_IDIAG_CMD_EXTACC_RD)
+ goto error_out;
+ if (rc != LPFC_EXT_ACC_CMD_ARG)
+ goto error_out;
+ if (!(ext_map & LPFC_EXT_ACC_ALL))
+ goto error_out;
+
+ return nbytes;
+error_out:
+ /* Clean out command structure on command error out */
+ memset(&idiag, 0, sizeof(idiag));
+ return -EINVAL;
+}
+
+/**
+ * lpfc_idiag_extacc_read - idiag debugfs read access to extent information
+ * @file: The file pointer to read from.
+ * @buf: The buffer to copy the data to.
+ * @nbytes: The number of bytes to read.
+ * @ppos: The position in the file to start reading from.
+ *
+ * Description:
+ * This routine reads data from the proper extent information according to
+ * the idiag command, and copies to user @buf.
+ *
+ * Returns:
+ * This function returns the amount of data that was read (this could be less
+ * than @nbytes if the end of the file was reached) or a negative error value.
+ **/
+static ssize_t
+lpfc_idiag_extacc_read(struct file *file, char __user *buf, size_t nbytes,
+ loff_t *ppos)
+{
+ struct lpfc_debug *debug = file->private_data;
+ struct lpfc_hba *phba = (struct lpfc_hba *)debug->i_private;
+ char *pbuffer;
+ uint32_t ext_map;
+ int len = 0;
+
+ /* This is a user read operation */
+ debug->op = LPFC_IDIAG_OP_RD;
+
+ if (!debug->buffer)
+ debug->buffer = kmalloc(LPFC_EXT_ACC_BUF_SIZE, GFP_KERNEL);
+ if (!debug->buffer)
+ return 0;
+ pbuffer = debug->buffer;
+ if (*ppos)
+ return 0;
+ if (idiag.cmd.opcode != LPFC_IDIAG_CMD_EXTACC_RD)
+ return 0;
+
+ ext_map = idiag.cmd.data[IDIAG_EXTACC_EXMAP_INDX];
+ if (ext_map & LPFC_EXT_ACC_AVAIL)
+ len = lpfc_idiag_extacc_avail_get(phba, pbuffer, len);
+ if (ext_map & LPFC_EXT_ACC_ALLOC)
+ len = lpfc_idiag_extacc_alloc_get(phba, pbuffer, len);
+ if (ext_map & LPFC_EXT_ACC_DRIVR)
+ len = lpfc_idiag_extacc_drivr_get(phba, pbuffer, len);
+
+ return simple_read_from_buffer(buf, nbytes, ppos, pbuffer, len);
+}
+
+#undef lpfc_debugfs_op_disc_trc
+static const struct file_operations lpfc_debugfs_op_disc_trc = {
+ .owner = THIS_MODULE,
+ .open = lpfc_debugfs_disc_trc_open,
+ .llseek = lpfc_debugfs_lseek,
+ .read = lpfc_debugfs_read,
+ .release = lpfc_debugfs_release,
+};
+
+#undef lpfc_debugfs_op_nodelist
+static const struct file_operations lpfc_debugfs_op_nodelist = {
+ .owner = THIS_MODULE,
+ .open = lpfc_debugfs_nodelist_open,
+ .llseek = lpfc_debugfs_lseek,
+ .read = lpfc_debugfs_read,
+ .release = lpfc_debugfs_release,
+};
+
+#undef lpfc_debugfs_op_hbqinfo
+static const struct file_operations lpfc_debugfs_op_hbqinfo = {
+ .owner = THIS_MODULE,
+ .open = lpfc_debugfs_hbqinfo_open,
+ .llseek = lpfc_debugfs_lseek,
+ .read = lpfc_debugfs_read,
+ .release = lpfc_debugfs_release,
+};
+
+#undef lpfc_debugfs_op_dumpHBASlim
+static const struct file_operations lpfc_debugfs_op_dumpHBASlim = {
+ .owner = THIS_MODULE,
+ .open = lpfc_debugfs_dumpHBASlim_open,
+ .llseek = lpfc_debugfs_lseek,
+ .read = lpfc_debugfs_read,
+ .release = lpfc_debugfs_release,
+};
+
+#undef lpfc_debugfs_op_dumpHostSlim
+static const struct file_operations lpfc_debugfs_op_dumpHostSlim = {
+ .owner = THIS_MODULE,
+ .open = lpfc_debugfs_dumpHostSlim_open,
+ .llseek = lpfc_debugfs_lseek,
+ .read = lpfc_debugfs_read,
+ .release = lpfc_debugfs_release,
+};
+
+#undef lpfc_debugfs_op_dumpData
+static const struct file_operations lpfc_debugfs_op_dumpData = {
+ .owner = THIS_MODULE,
+ .open = lpfc_debugfs_dumpData_open,
+ .llseek = lpfc_debugfs_lseek,
+ .read = lpfc_debugfs_read,
+ .write = lpfc_debugfs_dumpDataDif_write,
+ .release = lpfc_debugfs_dumpDataDif_release,
+};
+
+#undef lpfc_debugfs_op_dumpDif
+static const struct file_operations lpfc_debugfs_op_dumpDif = {
+ .owner = THIS_MODULE,
+ .open = lpfc_debugfs_dumpDif_open,
+ .llseek = lpfc_debugfs_lseek,
+ .read = lpfc_debugfs_read,
+ .write = lpfc_debugfs_dumpDataDif_write,
+ .release = lpfc_debugfs_dumpDataDif_release,
+};
+
+#undef lpfc_debugfs_op_dif_err
+static const struct file_operations lpfc_debugfs_op_dif_err = {
+ .owner = THIS_MODULE,
+ .open = simple_open,
+ .llseek = lpfc_debugfs_lseek,
+ .read = lpfc_debugfs_dif_err_read,
+ .write = lpfc_debugfs_dif_err_write,
+ .release = lpfc_debugfs_dif_err_release,
+};
+
+#undef lpfc_debugfs_op_slow_ring_trc
+static const struct file_operations lpfc_debugfs_op_slow_ring_trc = {
+ .owner = THIS_MODULE,
+ .open = lpfc_debugfs_slow_ring_trc_open,
+ .llseek = lpfc_debugfs_lseek,
+ .read = lpfc_debugfs_read,
+ .release = lpfc_debugfs_release,
+};
+
+static struct dentry *lpfc_debugfs_root = NULL;
+static atomic_t lpfc_debugfs_hba_count;
+
+/*
+ * File operations for the iDiag debugfs
+ */
+#undef lpfc_idiag_op_pciCfg
+static const struct file_operations lpfc_idiag_op_pciCfg = {
+ .owner = THIS_MODULE,
+ .open = lpfc_idiag_open,
+ .llseek = lpfc_debugfs_lseek,
+ .read = lpfc_idiag_pcicfg_read,
+ .write = lpfc_idiag_pcicfg_write,
+ .release = lpfc_idiag_cmd_release,
+};
+
+#undef lpfc_idiag_op_barAcc
+static const struct file_operations lpfc_idiag_op_barAcc = {
+ .owner = THIS_MODULE,
+ .open = lpfc_idiag_open,
+ .llseek = lpfc_debugfs_lseek,
+ .read = lpfc_idiag_baracc_read,
+ .write = lpfc_idiag_baracc_write,
+ .release = lpfc_idiag_cmd_release,
+};
+
+#undef lpfc_idiag_op_queInfo
+static const struct file_operations lpfc_idiag_op_queInfo = {
+ .owner = THIS_MODULE,
+ .open = lpfc_idiag_open,
+ .read = lpfc_idiag_queinfo_read,
+ .release = lpfc_idiag_release,
+};
+
+#undef lpfc_idiag_op_queAcc
+static const struct file_operations lpfc_idiag_op_queAcc = {
+ .owner = THIS_MODULE,
+ .open = lpfc_idiag_open,
+ .llseek = lpfc_debugfs_lseek,
+ .read = lpfc_idiag_queacc_read,
+ .write = lpfc_idiag_queacc_write,
+ .release = lpfc_idiag_cmd_release,
+};
+
+#undef lpfc_idiag_op_drbAcc
+static const struct file_operations lpfc_idiag_op_drbAcc = {
+ .owner = THIS_MODULE,
+ .open = lpfc_idiag_open,
+ .llseek = lpfc_debugfs_lseek,
+ .read = lpfc_idiag_drbacc_read,
+ .write = lpfc_idiag_drbacc_write,
+ .release = lpfc_idiag_cmd_release,
+};
+
+#undef lpfc_idiag_op_ctlAcc
+static const struct file_operations lpfc_idiag_op_ctlAcc = {
+ .owner = THIS_MODULE,
+ .open = lpfc_idiag_open,
+ .llseek = lpfc_debugfs_lseek,
+ .read = lpfc_idiag_ctlacc_read,
+ .write = lpfc_idiag_ctlacc_write,
+ .release = lpfc_idiag_cmd_release,
+};
+
+#undef lpfc_idiag_op_mbxAcc
+static const struct file_operations lpfc_idiag_op_mbxAcc = {
+ .owner = THIS_MODULE,
+ .open = lpfc_idiag_open,
+ .llseek = lpfc_debugfs_lseek,
+ .read = lpfc_idiag_mbxacc_read,
+ .write = lpfc_idiag_mbxacc_write,
+ .release = lpfc_idiag_cmd_release,
+};
+
+#undef lpfc_idiag_op_extAcc
+static const struct file_operations lpfc_idiag_op_extAcc = {
+ .owner = THIS_MODULE,
+ .open = lpfc_idiag_open,
+ .llseek = lpfc_debugfs_lseek,
+ .read = lpfc_idiag_extacc_read,
+ .write = lpfc_idiag_extacc_write,
+ .release = lpfc_idiag_cmd_release,
+};
+
+#endif
+
+/* lpfc_idiag_mbxacc_dump_bsg_mbox - idiag debugfs dump bsg mailbox command
+ * @phba: Pointer to HBA context object.
+ * @dmabuf: Pointer to a DMA buffer descriptor.
+ *
+ * Description:
+ * This routine dump a bsg pass-through non-embedded mailbox command with
+ * external buffer.
+ **/
+void
+lpfc_idiag_mbxacc_dump_bsg_mbox(struct lpfc_hba *phba, enum nemb_type nemb_tp,
+ enum mbox_type mbox_tp, enum dma_type dma_tp,
+ enum sta_type sta_tp,
+ struct lpfc_dmabuf *dmabuf, uint32_t ext_buf)
+{
+#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
+ uint32_t *mbx_mbox_cmd, *mbx_dump_map, *mbx_dump_cnt, *mbx_word_cnt;
+ char line_buf[LPFC_MBX_ACC_LBUF_SZ];
+ int len = 0;
+ uint32_t do_dump = 0;
+ uint32_t *pword;
+ uint32_t i;
+
+ if (idiag.cmd.opcode != LPFC_IDIAG_BSG_MBXACC_DP)
+ return;
+
+ mbx_mbox_cmd = &idiag.cmd.data[IDIAG_MBXACC_MBCMD_INDX];
+ mbx_dump_map = &idiag.cmd.data[IDIAG_MBXACC_DPMAP_INDX];
+ mbx_dump_cnt = &idiag.cmd.data[IDIAG_MBXACC_DPCNT_INDX];
+ mbx_word_cnt = &idiag.cmd.data[IDIAG_MBXACC_WDCNT_INDX];
+
+ if (!(*mbx_dump_map & LPFC_MBX_DMP_ALL) ||
+ (*mbx_dump_cnt == 0) ||
+ (*mbx_word_cnt == 0))
+ return;
+
+ if (*mbx_mbox_cmd != 0x9B)
+ return;
+
+ if ((mbox_tp == mbox_rd) && (dma_tp == dma_mbox)) {
+ if (*mbx_dump_map & LPFC_BSG_DMP_MBX_RD_MBX) {
+ do_dump |= LPFC_BSG_DMP_MBX_RD_MBX;
+ printk(KERN_ERR "\nRead mbox command (x%x), "
+ "nemb:0x%x, extbuf_cnt:%d:\n",
+ sta_tp, nemb_tp, ext_buf);
+ }
+ }
+ if ((mbox_tp == mbox_rd) && (dma_tp == dma_ebuf)) {
+ if (*mbx_dump_map & LPFC_BSG_DMP_MBX_RD_BUF) {
+ do_dump |= LPFC_BSG_DMP_MBX_RD_BUF;
+ printk(KERN_ERR "\nRead mbox buffer (x%x), "
+ "nemb:0x%x, extbuf_seq:%d:\n",
+ sta_tp, nemb_tp, ext_buf);
+ }
+ }
+ if ((mbox_tp == mbox_wr) && (dma_tp == dma_mbox)) {
+ if (*mbx_dump_map & LPFC_BSG_DMP_MBX_WR_MBX) {
+ do_dump |= LPFC_BSG_DMP_MBX_WR_MBX;
+ printk(KERN_ERR "\nWrite mbox command (x%x), "
+ "nemb:0x%x, extbuf_cnt:%d:\n",
+ sta_tp, nemb_tp, ext_buf);
+ }
+ }
+ if ((mbox_tp == mbox_wr) && (dma_tp == dma_ebuf)) {
+ if (*mbx_dump_map & LPFC_BSG_DMP_MBX_WR_BUF) {
+ do_dump |= LPFC_BSG_DMP_MBX_WR_BUF;
+ printk(KERN_ERR "\nWrite mbox buffer (x%x), "
+ "nemb:0x%x, extbuf_seq:%d:\n",
+ sta_tp, nemb_tp, ext_buf);
+ }
+ }
+
+ /* dump buffer content */
+ if (do_dump) {
+ pword = (uint32_t *)dmabuf->virt;
+ for (i = 0; i < *mbx_word_cnt; i++) {
+ if (!(i % 8)) {
+ if (i != 0)
+ printk(KERN_ERR "%s\n", line_buf);
+ len = 0;
+ len += snprintf(line_buf+len,
+ LPFC_MBX_ACC_LBUF_SZ-len,
+ "%03d: ", i);
+ }
+ len += snprintf(line_buf+len, LPFC_MBX_ACC_LBUF_SZ-len,
+ "%08x ", (uint32_t)*pword);
+ pword++;
+ }
+ if ((i - 1) % 8)
+ printk(KERN_ERR "%s\n", line_buf);
+ (*mbx_dump_cnt)--;
+ }
+
+ /* Clean out command structure on reaching dump count */
+ if (*mbx_dump_cnt == 0)
+ memset(&idiag, 0, sizeof(idiag));
+ return;
+#endif
+}
+
+/* lpfc_idiag_mbxacc_dump_issue_mbox - idiag debugfs dump issue mailbox command
+ * @phba: Pointer to HBA context object.
+ * @dmabuf: Pointer to a DMA buffer descriptor.
+ *
+ * Description:
+ * This routine dump a pass-through non-embedded mailbox command from issue
+ * mailbox command.
+ **/
+void
+lpfc_idiag_mbxacc_dump_issue_mbox(struct lpfc_hba *phba, MAILBOX_t *pmbox)
+{
+#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
+ uint32_t *mbx_dump_map, *mbx_dump_cnt, *mbx_word_cnt, *mbx_mbox_cmd;
+ char line_buf[LPFC_MBX_ACC_LBUF_SZ];
+ int len = 0;
+ uint32_t *pword;
+ uint8_t *pbyte;
+ uint32_t i, j;
+
+ if (idiag.cmd.opcode != LPFC_IDIAG_CMD_MBXACC_DP)
+ return;
+
+ mbx_mbox_cmd = &idiag.cmd.data[IDIAG_MBXACC_MBCMD_INDX];
+ mbx_dump_map = &idiag.cmd.data[IDIAG_MBXACC_DPMAP_INDX];
+ mbx_dump_cnt = &idiag.cmd.data[IDIAG_MBXACC_DPCNT_INDX];
+ mbx_word_cnt = &idiag.cmd.data[IDIAG_MBXACC_WDCNT_INDX];
+
+ if (!(*mbx_dump_map & LPFC_MBX_DMP_MBX_ALL) ||
+ (*mbx_dump_cnt == 0) ||
+ (*mbx_word_cnt == 0))
+ return;
+
+ if ((*mbx_mbox_cmd != LPFC_MBX_ALL_CMD) &&
+ (*mbx_mbox_cmd != pmbox->mbxCommand))
+ return;
+
+ /* dump buffer content */
+ if (*mbx_dump_map & LPFC_MBX_DMP_MBX_WORD) {
+ printk(KERN_ERR "Mailbox command:0x%x dump by word:\n",
+ pmbox->mbxCommand);
+ pword = (uint32_t *)pmbox;
+ for (i = 0; i < *mbx_word_cnt; i++) {
+ if (!(i % 8)) {
+ if (i != 0)
+ printk(KERN_ERR "%s\n", line_buf);
+ len = 0;
+ memset(line_buf, 0, LPFC_MBX_ACC_LBUF_SZ);
+ len += snprintf(line_buf+len,
+ LPFC_MBX_ACC_LBUF_SZ-len,
+ "%03d: ", i);
+ }
+ len += snprintf(line_buf+len, LPFC_MBX_ACC_LBUF_SZ-len,
+ "%08x ",
+ ((uint32_t)*pword) & 0xffffffff);
+ pword++;
+ }
+ if ((i - 1) % 8)
+ printk(KERN_ERR "%s\n", line_buf);
+ printk(KERN_ERR "\n");
+ }
+ if (*mbx_dump_map & LPFC_MBX_DMP_MBX_BYTE) {
+ printk(KERN_ERR "Mailbox command:0x%x dump by byte:\n",
+ pmbox->mbxCommand);
+ pbyte = (uint8_t *)pmbox;
+ for (i = 0; i < *mbx_word_cnt; i++) {
+ if (!(i % 8)) {
+ if (i != 0)
+ printk(KERN_ERR "%s\n", line_buf);
+ len = 0;
+ memset(line_buf, 0, LPFC_MBX_ACC_LBUF_SZ);
+ len += snprintf(line_buf+len,
+ LPFC_MBX_ACC_LBUF_SZ-len,
+ "%03d: ", i);
+ }
+ for (j = 0; j < 4; j++) {
+ len += snprintf(line_buf+len,
+ LPFC_MBX_ACC_LBUF_SZ-len,
+ "%02x",
+ ((uint8_t)*pbyte) & 0xff);
+ pbyte++;
+ }
+ len += snprintf(line_buf+len,
+ LPFC_MBX_ACC_LBUF_SZ-len, " ");
+ }
+ if ((i - 1) % 8)
+ printk(KERN_ERR "%s\n", line_buf);
+ printk(KERN_ERR "\n");
+ }
+ (*mbx_dump_cnt)--;
+
+ /* Clean out command structure on reaching dump count */
+ if (*mbx_dump_cnt == 0)
+ memset(&idiag, 0, sizeof(idiag));
+ return;
+#endif
+}
+
+/**
+ * lpfc_debugfs_initialize - Initialize debugfs for a vport
+ * @vport: The vport pointer to initialize.
+ *
+ * Description:
+ * When Debugfs is configured this routine sets up the lpfc debugfs file system.
+ * If not already created, this routine will create the lpfc directory, and
+ * lpfcX directory (for this HBA), and vportX directory for this vport. It will
+ * also create each file used to access lpfc specific debugfs information.
+ **/
+inline void
+lpfc_debugfs_initialize(struct lpfc_vport *vport)
+{
+#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
+ struct lpfc_hba *phba = vport->phba;
+ char name[64];
+ uint32_t num, i;
+ bool pport_setup = false;
+
+ if (!lpfc_debugfs_enable)
+ return;
+
+ /* Setup lpfc root directory */
+ if (!lpfc_debugfs_root) {
+ lpfc_debugfs_root = debugfs_create_dir("lpfc", NULL);
+ atomic_set(&lpfc_debugfs_hba_count, 0);
+ if (!lpfc_debugfs_root) {
+ lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT,
+ "0408 Cannot create debugfs root\n");
+ goto debug_failed;
+ }
+ }
+ if (!lpfc_debugfs_start_time)
+ lpfc_debugfs_start_time = jiffies;
+
+ /* Setup funcX directory for specific HBA PCI function */
+ snprintf(name, sizeof(name), "fn%d", phba->brd_no);
+ if (!phba->hba_debugfs_root) {
+ pport_setup = true;
+ phba->hba_debugfs_root =
+ debugfs_create_dir(name, lpfc_debugfs_root);
+ if (!phba->hba_debugfs_root) {
+ lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT,
+ "0412 Cannot create debugfs hba\n");
+ goto debug_failed;
+ }
+ atomic_inc(&lpfc_debugfs_hba_count);
+ atomic_set(&phba->debugfs_vport_count, 0);
+
+ /* Setup hbqinfo */
+ snprintf(name, sizeof(name), "hbqinfo");
+ phba->debug_hbqinfo =
+ debugfs_create_file(name, S_IFREG|S_IRUGO|S_IWUSR,
+ phba->hba_debugfs_root,
+ phba, &lpfc_debugfs_op_hbqinfo);
+ if (!phba->debug_hbqinfo) {
+ lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT,
+ "0411 Cannot create debugfs hbqinfo\n");
+ goto debug_failed;
+ }
+
+ /* Setup dumpHBASlim */
+ if (phba->sli_rev < LPFC_SLI_REV4) {
+ snprintf(name, sizeof(name), "dumpHBASlim");
+ phba->debug_dumpHBASlim =
+ debugfs_create_file(name,
+ S_IFREG|S_IRUGO|S_IWUSR,
+ phba->hba_debugfs_root,
+ phba, &lpfc_debugfs_op_dumpHBASlim);
+ if (!phba->debug_dumpHBASlim) {
+ lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT,
+ "0413 Cannot create debugfs "
+ "dumpHBASlim\n");
+ goto debug_failed;
+ }
+ } else
+ phba->debug_dumpHBASlim = NULL;
+
+ /* Setup dumpHostSlim */
+ if (phba->sli_rev < LPFC_SLI_REV4) {
+ snprintf(name, sizeof(name), "dumpHostSlim");
+ phba->debug_dumpHostSlim =
+ debugfs_create_file(name,
+ S_IFREG|S_IRUGO|S_IWUSR,
+ phba->hba_debugfs_root,
+ phba, &lpfc_debugfs_op_dumpHostSlim);
+ if (!phba->debug_dumpHostSlim) {
+ lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT,
+ "0414 Cannot create debugfs "
+ "dumpHostSlim\n");
+ goto debug_failed;
+ }
+ } else
+ phba->debug_dumpHostSlim = NULL;
+
+ /* Setup dumpData */
+ snprintf(name, sizeof(name), "dumpData");
+ phba->debug_dumpData =
+ debugfs_create_file(name, S_IFREG|S_IRUGO|S_IWUSR,
+ phba->hba_debugfs_root,
+ phba, &lpfc_debugfs_op_dumpData);
+ if (!phba->debug_dumpData) {
+ lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT,
+ "0800 Cannot create debugfs dumpData\n");
+ goto debug_failed;
+ }
+
+ /* Setup dumpDif */
+ snprintf(name, sizeof(name), "dumpDif");
+ phba->debug_dumpDif =
+ debugfs_create_file(name, S_IFREG|S_IRUGO|S_IWUSR,
+ phba->hba_debugfs_root,
+ phba, &lpfc_debugfs_op_dumpDif);
+ if (!phba->debug_dumpDif) {
+ lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT,
+ "0801 Cannot create debugfs dumpDif\n");
+ goto debug_failed;
+ }
+
+ /* Setup DIF Error Injections */
+ snprintf(name, sizeof(name), "InjErrLBA");
+ phba->debug_InjErrLBA =
+ debugfs_create_file(name, S_IFREG|S_IRUGO|S_IWUSR,
+ phba->hba_debugfs_root,
+ phba, &lpfc_debugfs_op_dif_err);
+ if (!phba->debug_InjErrLBA) {
+ lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT,
+ "0807 Cannot create debugfs InjErrLBA\n");
+ goto debug_failed;
+ }
+ phba->lpfc_injerr_lba = LPFC_INJERR_LBA_OFF;
+
+ snprintf(name, sizeof(name), "InjErrNPortID");
+ phba->debug_InjErrNPortID =
+ debugfs_create_file(name, S_IFREG|S_IRUGO|S_IWUSR,
+ phba->hba_debugfs_root,
+ phba, &lpfc_debugfs_op_dif_err);
+ if (!phba->debug_InjErrNPortID) {
+ lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT,
+ "0809 Cannot create debugfs InjErrNPortID\n");
+ goto debug_failed;
+ }
+
+ snprintf(name, sizeof(name), "InjErrWWPN");
+ phba->debug_InjErrWWPN =
+ debugfs_create_file(name, S_IFREG|S_IRUGO|S_IWUSR,
+ phba->hba_debugfs_root,
+ phba, &lpfc_debugfs_op_dif_err);
+ if (!phba->debug_InjErrWWPN) {
+ lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT,
+ "0810 Cannot create debugfs InjErrWWPN\n");
+ goto debug_failed;
+ }
+
+ snprintf(name, sizeof(name), "writeGuardInjErr");
+ phba->debug_writeGuard =
+ debugfs_create_file(name, S_IFREG|S_IRUGO|S_IWUSR,
+ phba->hba_debugfs_root,
+ phba, &lpfc_debugfs_op_dif_err);
+ if (!phba->debug_writeGuard) {
+ lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT,
+ "0802 Cannot create debugfs writeGuard\n");
+ goto debug_failed;
+ }
+
+ snprintf(name, sizeof(name), "writeAppInjErr");
+ phba->debug_writeApp =
+ debugfs_create_file(name, S_IFREG|S_IRUGO|S_IWUSR,
+ phba->hba_debugfs_root,
+ phba, &lpfc_debugfs_op_dif_err);
+ if (!phba->debug_writeApp) {
+ lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT,
+ "0803 Cannot create debugfs writeApp\n");
+ goto debug_failed;
+ }
+
+ snprintf(name, sizeof(name), "writeRefInjErr");
+ phba->debug_writeRef =
+ debugfs_create_file(name, S_IFREG|S_IRUGO|S_IWUSR,
+ phba->hba_debugfs_root,
+ phba, &lpfc_debugfs_op_dif_err);
+ if (!phba->debug_writeRef) {
+ lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT,
+ "0804 Cannot create debugfs writeRef\n");
+ goto debug_failed;
+ }
+
+ snprintf(name, sizeof(name), "readGuardInjErr");
+ phba->debug_readGuard =
+ debugfs_create_file(name, S_IFREG|S_IRUGO|S_IWUSR,
+ phba->hba_debugfs_root,
+ phba, &lpfc_debugfs_op_dif_err);
+ if (!phba->debug_readGuard) {
+ lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT,
+ "0808 Cannot create debugfs readGuard\n");
+ goto debug_failed;
+ }
+
+ snprintf(name, sizeof(name), "readAppInjErr");
+ phba->debug_readApp =
+ debugfs_create_file(name, S_IFREG|S_IRUGO|S_IWUSR,
+ phba->hba_debugfs_root,
+ phba, &lpfc_debugfs_op_dif_err);
+ if (!phba->debug_readApp) {
+ lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT,
+ "0805 Cannot create debugfs readApp\n");
+ goto debug_failed;
+ }
+
+ snprintf(name, sizeof(name), "readRefInjErr");
+ phba->debug_readRef =
+ debugfs_create_file(name, S_IFREG|S_IRUGO|S_IWUSR,
+ phba->hba_debugfs_root,
+ phba, &lpfc_debugfs_op_dif_err);
+ if (!phba->debug_readRef) {
+ lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT,
+ "0806 Cannot create debugfs readApp\n");
+ goto debug_failed;
+ }
+
+ /* Setup slow ring trace */
+ if (lpfc_debugfs_max_slow_ring_trc) {
+ num = lpfc_debugfs_max_slow_ring_trc - 1;
+ if (num & lpfc_debugfs_max_slow_ring_trc) {
+ /* Change to be a power of 2 */
+ num = lpfc_debugfs_max_slow_ring_trc;
+ i = 0;
+ while (num > 1) {
+ num = num >> 1;
+ i++;
+ }
+ lpfc_debugfs_max_slow_ring_trc = (1 << i);
+ printk(KERN_ERR
+ "lpfc_debugfs_max_disc_trc changed to "
+ "%d\n", lpfc_debugfs_max_disc_trc);
+ }
+ }
+
+ snprintf(name, sizeof(name), "slow_ring_trace");
+ phba->debug_slow_ring_trc =
+ debugfs_create_file(name, S_IFREG|S_IRUGO|S_IWUSR,
+ phba->hba_debugfs_root,
+ phba, &lpfc_debugfs_op_slow_ring_trc);
+ if (!phba->debug_slow_ring_trc) {
+ lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT,
+ "0415 Cannot create debugfs "
+ "slow_ring_trace\n");
+ goto debug_failed;
+ }
+ if (!phba->slow_ring_trc) {
+ phba->slow_ring_trc = kmalloc(
+ (sizeof(struct lpfc_debugfs_trc) *
+ lpfc_debugfs_max_slow_ring_trc),
+ GFP_KERNEL);
+ if (!phba->slow_ring_trc) {
+ lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT,
+ "0416 Cannot create debugfs "
+ "slow_ring buffer\n");
+ goto debug_failed;
+ }
+ atomic_set(&phba->slow_ring_trc_cnt, 0);
+ memset(phba->slow_ring_trc, 0,
+ (sizeof(struct lpfc_debugfs_trc) *
+ lpfc_debugfs_max_slow_ring_trc));
+ }
+ }
+
+ snprintf(name, sizeof(name), "vport%d", vport->vpi);
+ if (!vport->vport_debugfs_root) {
+ vport->vport_debugfs_root =
+ debugfs_create_dir(name, phba->hba_debugfs_root);
+ if (!vport->vport_debugfs_root) {
+ lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT,
+ "0417 Can't create debugfs\n");
+ goto debug_failed;
+ }
+ atomic_inc(&phba->debugfs_vport_count);
+ }
+
+ if (lpfc_debugfs_max_disc_trc) {
+ num = lpfc_debugfs_max_disc_trc - 1;
+ if (num & lpfc_debugfs_max_disc_trc) {
+ /* Change to be a power of 2 */
+ num = lpfc_debugfs_max_disc_trc;
+ i = 0;
+ while (num > 1) {
+ num = num >> 1;
+ i++;
+ }
+ lpfc_debugfs_max_disc_trc = (1 << i);
+ printk(KERN_ERR
+ "lpfc_debugfs_max_disc_trc changed to %d\n",
+ lpfc_debugfs_max_disc_trc);
+ }
+ }
+
+ vport->disc_trc = kzalloc(
+ (sizeof(struct lpfc_debugfs_trc) * lpfc_debugfs_max_disc_trc),
+ GFP_KERNEL);
+
+ if (!vport->disc_trc) {
+ lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT,
+ "0418 Cannot create debugfs disc trace "
+ "buffer\n");
+ goto debug_failed;
+ }
+ atomic_set(&vport->disc_trc_cnt, 0);
+
+ snprintf(name, sizeof(name), "discovery_trace");
+ vport->debug_disc_trc =
+ debugfs_create_file(name, S_IFREG|S_IRUGO|S_IWUSR,
+ vport->vport_debugfs_root,
+ vport, &lpfc_debugfs_op_disc_trc);
+ if (!vport->debug_disc_trc) {
+ lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT,
+ "0419 Cannot create debugfs "
+ "discovery_trace\n");
+ goto debug_failed;
+ }
+ snprintf(name, sizeof(name), "nodelist");
+ vport->debug_nodelist =
+ debugfs_create_file(name, S_IFREG|S_IRUGO|S_IWUSR,
+ vport->vport_debugfs_root,
+ vport, &lpfc_debugfs_op_nodelist);
+ if (!vport->debug_nodelist) {
+ lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT,
+ "2985 Can't create debugfs nodelist\n");
+ goto debug_failed;
+ }
+
+ /*
+ * The following section is for additional directories/files for the
+ * physical port.
+ */
+
+ if (!pport_setup)
+ goto debug_failed;
+
+ /*
+ * iDiag debugfs root entry points for SLI4 device only
+ */
+ if (phba->sli_rev < LPFC_SLI_REV4)
+ goto debug_failed;
+
+ snprintf(name, sizeof(name), "iDiag");
+ if (!phba->idiag_root) {
+ phba->idiag_root =
+ debugfs_create_dir(name, phba->hba_debugfs_root);
+ if (!phba->idiag_root) {
+ lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT,
+ "2922 Can't create idiag debugfs\n");
+ goto debug_failed;
+ }
+ /* Initialize iDiag data structure */
+ memset(&idiag, 0, sizeof(idiag));
+ }
+
+ /* iDiag read PCI config space */
+ snprintf(name, sizeof(name), "pciCfg");
+ if (!phba->idiag_pci_cfg) {
+ phba->idiag_pci_cfg =
+ debugfs_create_file(name, S_IFREG|S_IRUGO|S_IWUSR,
+ phba->idiag_root, phba, &lpfc_idiag_op_pciCfg);
+ if (!phba->idiag_pci_cfg) {
+ lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT,
+ "2923 Can't create idiag debugfs\n");
+ goto debug_failed;
+ }
+ idiag.offset.last_rd = 0;
+ }
+
+ /* iDiag PCI BAR access */
+ snprintf(name, sizeof(name), "barAcc");
+ if (!phba->idiag_bar_acc) {
+ phba->idiag_bar_acc =
+ debugfs_create_file(name, S_IFREG|S_IRUGO|S_IWUSR,
+ phba->idiag_root, phba, &lpfc_idiag_op_barAcc);
+ if (!phba->idiag_bar_acc) {
+ lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT,
+ "3056 Can't create idiag debugfs\n");
+ goto debug_failed;
+ }
+ idiag.offset.last_rd = 0;
+ }
+
+ /* iDiag get PCI function queue information */
+ snprintf(name, sizeof(name), "queInfo");
+ if (!phba->idiag_que_info) {
+ phba->idiag_que_info =
+ debugfs_create_file(name, S_IFREG|S_IRUGO,
+ phba->idiag_root, phba, &lpfc_idiag_op_queInfo);
+ if (!phba->idiag_que_info) {
+ lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT,
+ "2924 Can't create idiag debugfs\n");
+ goto debug_failed;
+ }
+ }
+
+ /* iDiag access PCI function queue */
+ snprintf(name, sizeof(name), "queAcc");
+ if (!phba->idiag_que_acc) {
+ phba->idiag_que_acc =
+ debugfs_create_file(name, S_IFREG|S_IRUGO|S_IWUSR,
+ phba->idiag_root, phba, &lpfc_idiag_op_queAcc);
+ if (!phba->idiag_que_acc) {
+ lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT,
+ "2926 Can't create idiag debugfs\n");
+ goto debug_failed;
+ }
+ }
+
+ /* iDiag access PCI function doorbell registers */
+ snprintf(name, sizeof(name), "drbAcc");
+ if (!phba->idiag_drb_acc) {
+ phba->idiag_drb_acc =
+ debugfs_create_file(name, S_IFREG|S_IRUGO|S_IWUSR,
+ phba->idiag_root, phba, &lpfc_idiag_op_drbAcc);
+ if (!phba->idiag_drb_acc) {
+ lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT,
+ "2927 Can't create idiag debugfs\n");
+ goto debug_failed;
+ }
+ }
+
+ /* iDiag access PCI function control registers */
+ snprintf(name, sizeof(name), "ctlAcc");
+ if (!phba->idiag_ctl_acc) {
+ phba->idiag_ctl_acc =
+ debugfs_create_file(name, S_IFREG|S_IRUGO|S_IWUSR,
+ phba->idiag_root, phba, &lpfc_idiag_op_ctlAcc);
+ if (!phba->idiag_ctl_acc) {
+ lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT,
+ "2981 Can't create idiag debugfs\n");
+ goto debug_failed;
+ }
+ }
+
+ /* iDiag access mbox commands */
+ snprintf(name, sizeof(name), "mbxAcc");
+ if (!phba->idiag_mbx_acc) {
+ phba->idiag_mbx_acc =
+ debugfs_create_file(name, S_IFREG|S_IRUGO|S_IWUSR,
+ phba->idiag_root, phba, &lpfc_idiag_op_mbxAcc);
+ if (!phba->idiag_mbx_acc) {
+ lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT,
+ "2980 Can't create idiag debugfs\n");
+ goto debug_failed;
+ }
+ }
+
+ /* iDiag extents access commands */
+ if (phba->sli4_hba.extents_in_use) {
+ snprintf(name, sizeof(name), "extAcc");
+ if (!phba->idiag_ext_acc) {
+ phba->idiag_ext_acc =
+ debugfs_create_file(name,
+ S_IFREG|S_IRUGO|S_IWUSR,
+ phba->idiag_root, phba,
+ &lpfc_idiag_op_extAcc);
+ if (!phba->idiag_ext_acc) {
+ lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT,
+ "2986 Cant create "
+ "idiag debugfs\n");
+ goto debug_failed;
+ }
+ }
+ }
+
+debug_failed:
+ return;
+#endif
+}
+
+/**
+ * lpfc_debugfs_terminate - Tear down debugfs infrastructure for this vport
+ * @vport: The vport pointer to remove from debugfs.
+ *
+ * Description:
+ * When Debugfs is configured this routine removes debugfs file system elements
+ * that are specific to this vport. It also checks to see if there are any
+ * users left for the debugfs directories associated with the HBA and driver. If
+ * this is the last user of the HBA directory or driver directory then it will
+ * remove those from the debugfs infrastructure as well.
+ **/
+inline void
+lpfc_debugfs_terminate(struct lpfc_vport *vport)
+{
+#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
+ struct lpfc_hba *phba = vport->phba;
+
+ if (vport->disc_trc) {
+ kfree(vport->disc_trc);
+ vport->disc_trc = NULL;
+ }
+ if (vport->debug_disc_trc) {
+ debugfs_remove(vport->debug_disc_trc); /* discovery_trace */
+ vport->debug_disc_trc = NULL;
+ }
+ if (vport->debug_nodelist) {
+ debugfs_remove(vport->debug_nodelist); /* nodelist */
+ vport->debug_nodelist = NULL;
+ }
+ if (vport->vport_debugfs_root) {
+ debugfs_remove(vport->vport_debugfs_root); /* vportX */
+ vport->vport_debugfs_root = NULL;
+ atomic_dec(&phba->debugfs_vport_count);
+ }
+ if (atomic_read(&phba->debugfs_vport_count) == 0) {
+
+ if (phba->debug_hbqinfo) {
+ debugfs_remove(phba->debug_hbqinfo); /* hbqinfo */
+ phba->debug_hbqinfo = NULL;
+ }
+ if (phba->debug_dumpHBASlim) {
+ debugfs_remove(phba->debug_dumpHBASlim); /* HBASlim */
+ phba->debug_dumpHBASlim = NULL;
+ }
+ if (phba->debug_dumpHostSlim) {
+ debugfs_remove(phba->debug_dumpHostSlim); /* HostSlim */
+ phba->debug_dumpHostSlim = NULL;
+ }
+ if (phba->debug_dumpData) {
+ debugfs_remove(phba->debug_dumpData); /* dumpData */
+ phba->debug_dumpData = NULL;
+ }
+
+ if (phba->debug_dumpDif) {
+ debugfs_remove(phba->debug_dumpDif); /* dumpDif */
+ phba->debug_dumpDif = NULL;
+ }
+ if (phba->debug_InjErrLBA) {
+ debugfs_remove(phba->debug_InjErrLBA); /* InjErrLBA */
+ phba->debug_InjErrLBA = NULL;
+ }
+ if (phba->debug_InjErrNPortID) { /* InjErrNPortID */
+ debugfs_remove(phba->debug_InjErrNPortID);
+ phba->debug_InjErrNPortID = NULL;
+ }
+ if (phba->debug_InjErrWWPN) {
+ debugfs_remove(phba->debug_InjErrWWPN); /* InjErrWWPN */
+ phba->debug_InjErrWWPN = NULL;
+ }
+ if (phba->debug_writeGuard) {
+ debugfs_remove(phba->debug_writeGuard); /* writeGuard */
+ phba->debug_writeGuard = NULL;
+ }
+ if (phba->debug_writeApp) {
+ debugfs_remove(phba->debug_writeApp); /* writeApp */
+ phba->debug_writeApp = NULL;
+ }
+ if (phba->debug_writeRef) {
+ debugfs_remove(phba->debug_writeRef); /* writeRef */
+ phba->debug_writeRef = NULL;
+ }
+ if (phba->debug_readGuard) {
+ debugfs_remove(phba->debug_readGuard); /* readGuard */
+ phba->debug_readGuard = NULL;
+ }
+ if (phba->debug_readApp) {
+ debugfs_remove(phba->debug_readApp); /* readApp */
+ phba->debug_readApp = NULL;
+ }
+ if (phba->debug_readRef) {
+ debugfs_remove(phba->debug_readRef); /* readRef */
+ phba->debug_readRef = NULL;
+ }
+
+ if (phba->slow_ring_trc) {
+ kfree(phba->slow_ring_trc);
+ phba->slow_ring_trc = NULL;
+ }
+ if (phba->debug_slow_ring_trc) {
+ /* slow_ring_trace */
+ debugfs_remove(phba->debug_slow_ring_trc);
+ phba->debug_slow_ring_trc = NULL;
+ }
+
+ /*
+ * iDiag release
+ */
+ if (phba->sli_rev == LPFC_SLI_REV4) {
+ if (phba->idiag_ext_acc) {
+ /* iDiag extAcc */
+ debugfs_remove(phba->idiag_ext_acc);
+ phba->idiag_ext_acc = NULL;
+ }
+ if (phba->idiag_mbx_acc) {
+ /* iDiag mbxAcc */
+ debugfs_remove(phba->idiag_mbx_acc);
+ phba->idiag_mbx_acc = NULL;
+ }
+ if (phba->idiag_ctl_acc) {
+ /* iDiag ctlAcc */
+ debugfs_remove(phba->idiag_ctl_acc);
+ phba->idiag_ctl_acc = NULL;
+ }
+ if (phba->idiag_drb_acc) {
+ /* iDiag drbAcc */
+ debugfs_remove(phba->idiag_drb_acc);
+ phba->idiag_drb_acc = NULL;
+ }
+ if (phba->idiag_que_acc) {
+ /* iDiag queAcc */
+ debugfs_remove(phba->idiag_que_acc);
+ phba->idiag_que_acc = NULL;
+ }
+ if (phba->idiag_que_info) {
+ /* iDiag queInfo */
+ debugfs_remove(phba->idiag_que_info);
+ phba->idiag_que_info = NULL;
+ }
+ if (phba->idiag_bar_acc) {
+ /* iDiag barAcc */
+ debugfs_remove(phba->idiag_bar_acc);
+ phba->idiag_bar_acc = NULL;
+ }
+ if (phba->idiag_pci_cfg) {
+ /* iDiag pciCfg */
+ debugfs_remove(phba->idiag_pci_cfg);
+ phba->idiag_pci_cfg = NULL;
+ }
+
+ /* Finally remove the iDiag debugfs root */
+ if (phba->idiag_root) {
+ /* iDiag root */
+ debugfs_remove(phba->idiag_root);
+ phba->idiag_root = NULL;
+ }
+ }
+
+ if (phba->hba_debugfs_root) {
+ debugfs_remove(phba->hba_debugfs_root); /* fnX */
+ phba->hba_debugfs_root = NULL;
+ atomic_dec(&lpfc_debugfs_hba_count);
+ }
+
+ if (atomic_read(&lpfc_debugfs_hba_count) == 0) {
+ debugfs_remove(lpfc_debugfs_root); /* lpfc */
+ lpfc_debugfs_root = NULL;
+ }
+ }
+#endif
+ return;
+}
+
+/*
+ * Driver debug utility routines outside of debugfs. The debug utility
+ * routines implemented here is intended to be used in the instrumented
+ * debug driver for debugging host or port issues.
+ */
+
+/**
+ * lpfc_debug_dump_all_queues - dump all the queues with a hba
+ * @phba: Pointer to HBA context object.
+ *
+ * This function dumps entries of all the queues asociated with the @phba.
+ **/
+void
+lpfc_debug_dump_all_queues(struct lpfc_hba *phba)
+{
+ int fcp_wqidx;
+
+ /*
+ * Dump Work Queues (WQs)
+ */
+ lpfc_debug_dump_mbx_wq(phba);
+ lpfc_debug_dump_els_wq(phba);
+
+ for (fcp_wqidx = 0; fcp_wqidx < phba->cfg_fcp_io_channel; fcp_wqidx++)
+ lpfc_debug_dump_fcp_wq(phba, fcp_wqidx);
+
+ lpfc_debug_dump_hdr_rq(phba);
+ lpfc_debug_dump_dat_rq(phba);
+ /*
+ * Dump Complete Queues (CQs)
+ */
+ lpfc_debug_dump_mbx_cq(phba);
+ lpfc_debug_dump_els_cq(phba);
+
+ for (fcp_wqidx = 0; fcp_wqidx < phba->cfg_fcp_io_channel; fcp_wqidx++)
+ lpfc_debug_dump_fcp_cq(phba, fcp_wqidx);
+
+ /*
+ * Dump Event Queues (EQs)
+ */
+ for (fcp_wqidx = 0; fcp_wqidx < phba->cfg_fcp_io_channel; fcp_wqidx++)
+ lpfc_debug_dump_hba_eq(phba, fcp_wqidx);
+}
diff --git a/drivers/scsi/lpfc/lpfc_debugfs.h b/drivers/scsi/lpfc/lpfc_debugfs.h
new file mode 100644
index 000000000..8b2b6a3bf
--- /dev/null
+++ b/drivers/scsi/lpfc/lpfc_debugfs.h
@@ -0,0 +1,671 @@
+/*******************************************************************
+ * This file is part of the Emulex Linux Device Driver for *
+ * Fibre Channel Host Bus Adapters. *
+ * Copyright (C) 2007-2011 Emulex. All rights reserved. *
+ * EMULEX and SLI are trademarks of Emulex. *
+ * www.emulex.com *
+ * *
+ * This program is free software; you can redistribute it and/or *
+ * modify it under the terms of version 2 of the GNU General *
+ * Public License as published by the Free Software Foundation. *
+ * This program is distributed in the hope that it will be useful. *
+ * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND *
+ * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY, *
+ * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE *
+ * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD *
+ * TO BE LEGALLY INVALID. See the GNU General Public License for *
+ * more details, a copy of which can be found in the file COPYING *
+ * included with this package. *
+ *******************************************************************/
+
+#ifndef _H_LPFC_DEBUG_FS
+#define _H_LPFC_DEBUG_FS
+
+#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
+
+/* size of output line, for discovery_trace and slow_ring_trace */
+#define LPFC_DEBUG_TRC_ENTRY_SIZE 100
+
+/* nodelist output buffer size */
+#define LPFC_NODELIST_SIZE 8192
+#define LPFC_NODELIST_ENTRY_SIZE 120
+
+/* dumpHBASlim output buffer size */
+#define LPFC_DUMPHBASLIM_SIZE 4096
+
+/* dumpHostSlim output buffer size */
+#define LPFC_DUMPHOSTSLIM_SIZE 4096
+
+/* dumpSLIqinfo output buffer size */
+#define LPFC_DUMPSLIQINFO_SIZE 4096
+
+/* hbqinfo output buffer size */
+#define LPFC_HBQINFO_SIZE 8192
+
+/*
+ * For SLI4 iDiag debugfs diagnostics tool
+ */
+
+/* pciConf */
+#define LPFC_PCI_CFG_BROWSE 0xffff
+#define LPFC_PCI_CFG_RD_CMD_ARG 2
+#define LPFC_PCI_CFG_WR_CMD_ARG 3
+#define LPFC_PCI_CFG_SIZE 4096
+#define LPFC_PCI_CFG_RD_SIZE (LPFC_PCI_CFG_SIZE/4)
+
+#define IDIAG_PCICFG_WHERE_INDX 0
+#define IDIAG_PCICFG_COUNT_INDX 1
+#define IDIAG_PCICFG_VALUE_INDX 2
+
+/* barAcc */
+#define LPFC_PCI_BAR_BROWSE 0xffff
+#define LPFC_PCI_BAR_RD_CMD_ARG 3
+#define LPFC_PCI_BAR_WR_CMD_ARG 3
+
+#define LPFC_PCI_IF0_BAR0_SIZE (1024 * 16)
+#define LPFC_PCI_IF0_BAR1_SIZE (1024 * 128)
+#define LPFC_PCI_IF0_BAR2_SIZE (1024 * 128)
+#define LPFC_PCI_IF2_BAR0_SIZE (1024 * 32)
+
+#define LPFC_PCI_BAR_RD_BUF_SIZE 4096
+#define LPFC_PCI_BAR_RD_SIZE (LPFC_PCI_BAR_RD_BUF_SIZE/4)
+
+#define LPFC_PCI_IF0_BAR0_RD_SIZE (LPFC_PCI_IF0_BAR0_SIZE/4)
+#define LPFC_PCI_IF0_BAR1_RD_SIZE (LPFC_PCI_IF0_BAR1_SIZE/4)
+#define LPFC_PCI_IF0_BAR2_RD_SIZE (LPFC_PCI_IF0_BAR2_SIZE/4)
+#define LPFC_PCI_IF2_BAR0_RD_SIZE (LPFC_PCI_IF2_BAR0_SIZE/4)
+
+#define IDIAG_BARACC_BAR_NUM_INDX 0
+#define IDIAG_BARACC_OFF_SET_INDX 1
+#define IDIAG_BARACC_ACC_MOD_INDX 2
+#define IDIAG_BARACC_REG_VAL_INDX 2
+#define IDIAG_BARACC_BAR_SZE_INDX 3
+
+#define IDIAG_BARACC_BAR_0 0
+#define IDIAG_BARACC_BAR_1 1
+#define IDIAG_BARACC_BAR_2 2
+
+#define SINGLE_WORD 1
+
+/* queue info */
+#define LPFC_QUE_INFO_GET_BUF_SIZE 4096
+
+/* queue acc */
+#define LPFC_QUE_ACC_BROWSE 0xffff
+#define LPFC_QUE_ACC_RD_CMD_ARG 4
+#define LPFC_QUE_ACC_WR_CMD_ARG 6
+#define LPFC_QUE_ACC_BUF_SIZE 4096
+#define LPFC_QUE_ACC_SIZE (LPFC_QUE_ACC_BUF_SIZE/2)
+
+#define LPFC_IDIAG_EQ 1
+#define LPFC_IDIAG_CQ 2
+#define LPFC_IDIAG_MQ 3
+#define LPFC_IDIAG_WQ 4
+#define LPFC_IDIAG_RQ 5
+
+#define IDIAG_QUEACC_QUETP_INDX 0
+#define IDIAG_QUEACC_QUEID_INDX 1
+#define IDIAG_QUEACC_INDEX_INDX 2
+#define IDIAG_QUEACC_COUNT_INDX 3
+#define IDIAG_QUEACC_OFFST_INDX 4
+#define IDIAG_QUEACC_VALUE_INDX 5
+
+/* doorbell register acc */
+#define LPFC_DRB_ACC_ALL 0xffff
+#define LPFC_DRB_ACC_RD_CMD_ARG 1
+#define LPFC_DRB_ACC_WR_CMD_ARG 2
+#define LPFC_DRB_ACC_BUF_SIZE 256
+
+#define LPFC_DRB_EQCQ 1
+#define LPFC_DRB_MQ 2
+#define LPFC_DRB_WQ 3
+#define LPFC_DRB_RQ 4
+
+#define LPFC_DRB_MAX 4
+
+#define IDIAG_DRBACC_REGID_INDX 0
+#define IDIAG_DRBACC_VALUE_INDX 1
+
+/* control register acc */
+#define LPFC_CTL_ACC_ALL 0xffff
+#define LPFC_CTL_ACC_RD_CMD_ARG 1
+#define LPFC_CTL_ACC_WR_CMD_ARG 2
+#define LPFC_CTL_ACC_BUF_SIZE 256
+
+#define LPFC_CTL_PORT_SEM 1
+#define LPFC_CTL_PORT_STA 2
+#define LPFC_CTL_PORT_CTL 3
+#define LPFC_CTL_PORT_ER1 4
+#define LPFC_CTL_PORT_ER2 5
+#define LPFC_CTL_PDEV_CTL 6
+
+#define LPFC_CTL_MAX 6
+
+#define IDIAG_CTLACC_REGID_INDX 0
+#define IDIAG_CTLACC_VALUE_INDX 1
+
+/* mailbox access */
+#define LPFC_MBX_DMP_ARG 4
+
+#define LPFC_MBX_ACC_BUF_SIZE 512
+#define LPFC_MBX_ACC_LBUF_SZ 128
+
+#define LPFC_MBX_DMP_MBX_WORD 0x00000001
+#define LPFC_MBX_DMP_MBX_BYTE 0x00000002
+#define LPFC_MBX_DMP_MBX_ALL (LPFC_MBX_DMP_MBX_WORD | LPFC_MBX_DMP_MBX_BYTE)
+
+#define LPFC_BSG_DMP_MBX_RD_MBX 0x00000001
+#define LPFC_BSG_DMP_MBX_RD_BUF 0x00000002
+#define LPFC_BSG_DMP_MBX_WR_MBX 0x00000004
+#define LPFC_BSG_DMP_MBX_WR_BUF 0x00000008
+#define LPFC_BSG_DMP_MBX_ALL (LPFC_BSG_DMP_MBX_RD_MBX | \
+ LPFC_BSG_DMP_MBX_RD_BUF | \
+ LPFC_BSG_DMP_MBX_WR_MBX | \
+ LPFC_BSG_DMP_MBX_WR_BUF)
+
+#define LPFC_MBX_DMP_ALL 0xffff
+#define LPFC_MBX_ALL_CMD 0xff
+
+#define IDIAG_MBXACC_MBCMD_INDX 0
+#define IDIAG_MBXACC_DPMAP_INDX 1
+#define IDIAG_MBXACC_DPCNT_INDX 2
+#define IDIAG_MBXACC_WDCNT_INDX 3
+
+/* extents access */
+#define LPFC_EXT_ACC_CMD_ARG 1
+#define LPFC_EXT_ACC_BUF_SIZE 4096
+
+#define LPFC_EXT_ACC_AVAIL 0x1
+#define LPFC_EXT_ACC_ALLOC 0x2
+#define LPFC_EXT_ACC_DRIVR 0x4
+#define LPFC_EXT_ACC_ALL (LPFC_EXT_ACC_DRIVR | \
+ LPFC_EXT_ACC_AVAIL | \
+ LPFC_EXT_ACC_ALLOC)
+
+#define IDIAG_EXTACC_EXMAP_INDX 0
+
+#define SIZE_U8 sizeof(uint8_t)
+#define SIZE_U16 sizeof(uint16_t)
+#define SIZE_U32 sizeof(uint32_t)
+
+struct lpfc_debug {
+ char *i_private;
+ char op;
+#define LPFC_IDIAG_OP_RD 1
+#define LPFC_IDIAG_OP_WR 2
+ char *buffer;
+ int len;
+};
+
+struct lpfc_debugfs_trc {
+ char *fmt;
+ uint32_t data1;
+ uint32_t data2;
+ uint32_t data3;
+ uint32_t seq_cnt;
+ unsigned long jif;
+};
+
+struct lpfc_idiag_offset {
+ uint32_t last_rd;
+};
+
+#define LPFC_IDIAG_CMD_DATA_SIZE 8
+struct lpfc_idiag_cmd {
+ uint32_t opcode;
+#define LPFC_IDIAG_CMD_PCICFG_RD 0x00000001
+#define LPFC_IDIAG_CMD_PCICFG_WR 0x00000002
+#define LPFC_IDIAG_CMD_PCICFG_ST 0x00000003
+#define LPFC_IDIAG_CMD_PCICFG_CL 0x00000004
+
+#define LPFC_IDIAG_CMD_BARACC_RD 0x00000008
+#define LPFC_IDIAG_CMD_BARACC_WR 0x00000009
+#define LPFC_IDIAG_CMD_BARACC_ST 0x0000000a
+#define LPFC_IDIAG_CMD_BARACC_CL 0x0000000b
+
+#define LPFC_IDIAG_CMD_QUEACC_RD 0x00000011
+#define LPFC_IDIAG_CMD_QUEACC_WR 0x00000012
+#define LPFC_IDIAG_CMD_QUEACC_ST 0x00000013
+#define LPFC_IDIAG_CMD_QUEACC_CL 0x00000014
+
+#define LPFC_IDIAG_CMD_DRBACC_RD 0x00000021
+#define LPFC_IDIAG_CMD_DRBACC_WR 0x00000022
+#define LPFC_IDIAG_CMD_DRBACC_ST 0x00000023
+#define LPFC_IDIAG_CMD_DRBACC_CL 0x00000024
+
+#define LPFC_IDIAG_CMD_CTLACC_RD 0x00000031
+#define LPFC_IDIAG_CMD_CTLACC_WR 0x00000032
+#define LPFC_IDIAG_CMD_CTLACC_ST 0x00000033
+#define LPFC_IDIAG_CMD_CTLACC_CL 0x00000034
+
+#define LPFC_IDIAG_CMD_MBXACC_DP 0x00000041
+#define LPFC_IDIAG_BSG_MBXACC_DP 0x00000042
+
+#define LPFC_IDIAG_CMD_EXTACC_RD 0x00000051
+
+ uint32_t data[LPFC_IDIAG_CMD_DATA_SIZE];
+};
+
+struct lpfc_idiag {
+ uint32_t active;
+ struct lpfc_idiag_cmd cmd;
+ struct lpfc_idiag_offset offset;
+ void *ptr_private;
+};
+#endif
+
+/* Mask for discovery_trace */
+#define LPFC_DISC_TRC_ELS_CMD 0x1 /* Trace ELS commands */
+#define LPFC_DISC_TRC_ELS_RSP 0x2 /* Trace ELS response */
+#define LPFC_DISC_TRC_ELS_UNSOL 0x4 /* Trace ELS rcv'ed */
+#define LPFC_DISC_TRC_ELS_ALL 0x7 /* Trace ELS */
+#define LPFC_DISC_TRC_MBOX_VPORT 0x8 /* Trace vport MBOXs */
+#define LPFC_DISC_TRC_MBOX 0x10 /* Trace other MBOXs */
+#define LPFC_DISC_TRC_MBOX_ALL 0x18 /* Trace all MBOXs */
+#define LPFC_DISC_TRC_CT 0x20 /* Trace disc CT requests */
+#define LPFC_DISC_TRC_DSM 0x40 /* Trace DSM events */
+#define LPFC_DISC_TRC_RPORT 0x80 /* Trace rport events */
+#define LPFC_DISC_TRC_NODE 0x100 /* Trace ndlp state changes */
+
+#define LPFC_DISC_TRC_DISCOVERY 0xef /* common mask for general
+ * discovery */
+#endif /* H_LPFC_DEBUG_FS */
+
+
+/*
+ * Driver debug utility routines outside of debugfs. The debug utility
+ * routines implemented here is intended to be used in the instrumented
+ * debug driver for debugging host or port issues.
+ */
+
+/**
+ * lpfc_debug_dump_qe - dump an specific entry from a queue
+ * @q: Pointer to the queue descriptor.
+ * @idx: Index to the entry on the queue.
+ *
+ * This function dumps an entry indexed by @idx from a queue specified by the
+ * queue descriptor @q.
+ **/
+static inline void
+lpfc_debug_dump_qe(struct lpfc_queue *q, uint32_t idx)
+{
+ char line_buf[LPFC_LBUF_SZ];
+ int i, esize, qe_word_cnt, len;
+ uint32_t *pword;
+
+ /* sanity checks */
+ if (!q)
+ return;
+ if (idx >= q->entry_count)
+ return;
+
+ esize = q->entry_size;
+ qe_word_cnt = esize / sizeof(uint32_t);
+ pword = q->qe[idx].address;
+
+ len = 0;
+ len += snprintf(line_buf+len, LPFC_LBUF_SZ-len, "QE[%04d]: ", idx);
+ if (qe_word_cnt > 8)
+ printk(KERN_ERR "%s\n", line_buf);
+
+ for (i = 0; i < qe_word_cnt; i++) {
+ if (!(i % 8)) {
+ if (i != 0)
+ printk(KERN_ERR "%s\n", line_buf);
+ if (qe_word_cnt > 8) {
+ len = 0;
+ memset(line_buf, 0, LPFC_LBUF_SZ);
+ len += snprintf(line_buf+len, LPFC_LBUF_SZ-len,
+ "%03d: ", i);
+ }
+ }
+ len += snprintf(line_buf+len, LPFC_LBUF_SZ-len, "%08x ",
+ ((uint32_t)*pword) & 0xffffffff);
+ pword++;
+ }
+ if (qe_word_cnt <= 8 || (i - 1) % 8)
+ printk(KERN_ERR "%s\n", line_buf);
+}
+
+/**
+ * lpfc_debug_dump_q - dump all entries from an specific queue
+ * @q: Pointer to the queue descriptor.
+ *
+ * This function dumps all entries from a queue specified by the queue
+ * descriptor @q.
+ **/
+static inline void
+lpfc_debug_dump_q(struct lpfc_queue *q)
+{
+ int idx, entry_count;
+
+ /* sanity check */
+ if (!q)
+ return;
+
+ dev_printk(KERN_ERR, &(((q->phba))->pcidev)->dev,
+ "%d: [qid:%d, type:%d, subtype:%d, "
+ "qe_size:%d, qe_count:%d, "
+ "host_index:%d, port_index:%d]\n",
+ (q->phba)->brd_no,
+ q->queue_id, q->type, q->subtype,
+ q->entry_size, q->entry_count,
+ q->host_index, q->hba_index);
+ entry_count = q->entry_count;
+ for (idx = 0; idx < entry_count; idx++)
+ lpfc_debug_dump_qe(q, idx);
+ printk(KERN_ERR "\n");
+}
+
+/**
+ * lpfc_debug_dump_fcp_wq - dump all entries from a fcp work queue
+ * @phba: Pointer to HBA context object.
+ * @fcp_wqidx: Index to a FCP work queue.
+ *
+ * This function dumps all entries from a FCP work queue specified by the
+ * @fcp_wqidx.
+ **/
+static inline void
+lpfc_debug_dump_fcp_wq(struct lpfc_hba *phba, int fcp_wqidx)
+{
+ /* sanity check */
+ if (fcp_wqidx >= phba->cfg_fcp_io_channel)
+ return;
+
+ printk(KERN_ERR "FCP WQ: WQ[Idx:%d|Qid:%d]\n",
+ fcp_wqidx, phba->sli4_hba.fcp_wq[fcp_wqidx]->queue_id);
+ lpfc_debug_dump_q(phba->sli4_hba.fcp_wq[fcp_wqidx]);
+}
+
+/**
+ * lpfc_debug_dump_fcp_cq - dump all entries from a fcp work queue's cmpl queue
+ * @phba: Pointer to HBA context object.
+ * @fcp_wqidx: Index to a FCP work queue.
+ *
+ * This function dumps all entries from a FCP complete queue which is
+ * associated to the FCP work queue specified by the @fcp_wqidx.
+ **/
+static inline void
+lpfc_debug_dump_fcp_cq(struct lpfc_hba *phba, int fcp_wqidx)
+{
+ int fcp_cqidx, fcp_cqid;
+
+ /* sanity check */
+ if (fcp_wqidx >= phba->cfg_fcp_io_channel)
+ return;
+
+ fcp_cqid = phba->sli4_hba.fcp_wq[fcp_wqidx]->assoc_qid;
+ for (fcp_cqidx = 0; fcp_cqidx < phba->cfg_fcp_io_channel; fcp_cqidx++)
+ if (phba->sli4_hba.fcp_cq[fcp_cqidx]->queue_id == fcp_cqid)
+ break;
+ if (phba->intr_type == MSIX) {
+ if (fcp_cqidx >= phba->cfg_fcp_io_channel)
+ return;
+ } else {
+ if (fcp_cqidx > 0)
+ return;
+ }
+
+ printk(KERN_ERR "FCP CQ: WQ[Idx:%d|Qid%d]->CQ[Idx%d|Qid%d]:\n",
+ fcp_wqidx, phba->sli4_hba.fcp_wq[fcp_wqidx]->queue_id,
+ fcp_cqidx, fcp_cqid);
+ lpfc_debug_dump_q(phba->sli4_hba.fcp_cq[fcp_cqidx]);
+}
+
+/**
+ * lpfc_debug_dump_hba_eq - dump all entries from a fcp work queue's evt queue
+ * @phba: Pointer to HBA context object.
+ * @fcp_wqidx: Index to a FCP work queue.
+ *
+ * This function dumps all entries from a FCP event queue which is
+ * associated to the FCP work queue specified by the @fcp_wqidx.
+ **/
+static inline void
+lpfc_debug_dump_hba_eq(struct lpfc_hba *phba, int fcp_wqidx)
+{
+ struct lpfc_queue *qdesc;
+ int fcp_eqidx, fcp_eqid;
+ int fcp_cqidx, fcp_cqid;
+
+ /* sanity check */
+ if (fcp_wqidx >= phba->cfg_fcp_io_channel)
+ return;
+ fcp_cqid = phba->sli4_hba.fcp_wq[fcp_wqidx]->assoc_qid;
+ for (fcp_cqidx = 0; fcp_cqidx < phba->cfg_fcp_io_channel; fcp_cqidx++)
+ if (phba->sli4_hba.fcp_cq[fcp_cqidx]->queue_id == fcp_cqid)
+ break;
+ if (phba->intr_type == MSIX) {
+ if (fcp_cqidx >= phba->cfg_fcp_io_channel)
+ return;
+ } else {
+ if (fcp_cqidx > 0)
+ return;
+ }
+
+ fcp_eqidx = fcp_cqidx;
+ fcp_eqid = phba->sli4_hba.hba_eq[fcp_eqidx]->queue_id;
+ qdesc = phba->sli4_hba.hba_eq[fcp_eqidx];
+
+ printk(KERN_ERR "FCP EQ: WQ[Idx:%d|Qid:%d]->CQ[Idx:%d|Qid:%d]->"
+ "EQ[Idx:%d|Qid:%d]\n",
+ fcp_wqidx, phba->sli4_hba.fcp_wq[fcp_wqidx]->queue_id,
+ fcp_cqidx, fcp_cqid, fcp_eqidx, fcp_eqid);
+ lpfc_debug_dump_q(qdesc);
+}
+
+/**
+ * lpfc_debug_dump_els_wq - dump all entries from the els work queue
+ * @phba: Pointer to HBA context object.
+ *
+ * This function dumps all entries from the ELS work queue.
+ **/
+static inline void
+lpfc_debug_dump_els_wq(struct lpfc_hba *phba)
+{
+ printk(KERN_ERR "ELS WQ: WQ[Qid:%d]:\n",
+ phba->sli4_hba.els_wq->queue_id);
+ lpfc_debug_dump_q(phba->sli4_hba.els_wq);
+}
+
+/**
+ * lpfc_debug_dump_mbx_wq - dump all entries from the mbox work queue
+ * @phba: Pointer to HBA context object.
+ *
+ * This function dumps all entries from the MBOX work queue.
+ **/
+static inline void
+lpfc_debug_dump_mbx_wq(struct lpfc_hba *phba)
+{
+ printk(KERN_ERR "MBX WQ: WQ[Qid:%d]\n",
+ phba->sli4_hba.mbx_wq->queue_id);
+ lpfc_debug_dump_q(phba->sli4_hba.mbx_wq);
+}
+
+/**
+ * lpfc_debug_dump_dat_rq - dump all entries from the receive data queue
+ * @phba: Pointer to HBA context object.
+ *
+ * This function dumps all entries from the receive data queue.
+ **/
+static inline void
+lpfc_debug_dump_dat_rq(struct lpfc_hba *phba)
+{
+ printk(KERN_ERR "DAT RQ: RQ[Qid:%d]\n",
+ phba->sli4_hba.dat_rq->queue_id);
+ lpfc_debug_dump_q(phba->sli4_hba.dat_rq);
+}
+
+/**
+ * lpfc_debug_dump_hdr_rq - dump all entries from the receive header queue
+ * @phba: Pointer to HBA context object.
+ *
+ * This function dumps all entries from the receive header queue.
+ **/
+static inline void
+lpfc_debug_dump_hdr_rq(struct lpfc_hba *phba)
+{
+ printk(KERN_ERR "HDR RQ: RQ[Qid:%d]\n",
+ phba->sli4_hba.hdr_rq->queue_id);
+ lpfc_debug_dump_q(phba->sli4_hba.hdr_rq);
+}
+
+/**
+ * lpfc_debug_dump_els_cq - dump all entries from the els complete queue
+ * @phba: Pointer to HBA context object.
+ *
+ * This function dumps all entries from the els complete queue.
+ **/
+static inline void
+lpfc_debug_dump_els_cq(struct lpfc_hba *phba)
+{
+ printk(KERN_ERR "ELS CQ: WQ[Qid:%d]->CQ[Qid:%d]\n",
+ phba->sli4_hba.els_wq->queue_id,
+ phba->sli4_hba.els_cq->queue_id);
+ lpfc_debug_dump_q(phba->sli4_hba.els_cq);
+}
+
+/**
+ * lpfc_debug_dump_mbx_cq - dump all entries from the mbox complete queue
+ * @phba: Pointer to HBA context object.
+ *
+ * This function dumps all entries from the mbox complete queue.
+ **/
+static inline void
+lpfc_debug_dump_mbx_cq(struct lpfc_hba *phba)
+{
+ printk(KERN_ERR "MBX CQ: WQ[Qid:%d]->CQ[Qid:%d]\n",
+ phba->sli4_hba.mbx_wq->queue_id,
+ phba->sli4_hba.mbx_cq->queue_id);
+ lpfc_debug_dump_q(phba->sli4_hba.mbx_cq);
+}
+
+/**
+ * lpfc_debug_dump_wq_by_id - dump all entries from a work queue by queue id
+ * @phba: Pointer to HBA context object.
+ * @qid: Work queue identifier.
+ *
+ * This function dumps all entries from a work queue identified by the queue
+ * identifier.
+ **/
+static inline void
+lpfc_debug_dump_wq_by_id(struct lpfc_hba *phba, int qid)
+{
+ int wq_idx;
+
+ for (wq_idx = 0; wq_idx < phba->cfg_fcp_io_channel; wq_idx++)
+ if (phba->sli4_hba.fcp_wq[wq_idx]->queue_id == qid)
+ break;
+ if (wq_idx < phba->cfg_fcp_io_channel) {
+ printk(KERN_ERR "FCP WQ[Idx:%d|Qid:%d]\n", wq_idx, qid);
+ lpfc_debug_dump_q(phba->sli4_hba.fcp_wq[wq_idx]);
+ return;
+ }
+
+ if (phba->sli4_hba.els_wq->queue_id == qid) {
+ printk(KERN_ERR "ELS WQ[Qid:%d]\n", qid);
+ lpfc_debug_dump_q(phba->sli4_hba.els_wq);
+ }
+}
+
+/**
+ * lpfc_debug_dump_mq_by_id - dump all entries from a mbox queue by queue id
+ * @phba: Pointer to HBA context object.
+ * @qid: Mbox work queue identifier.
+ *
+ * This function dumps all entries from a mbox work queue identified by the
+ * queue identifier.
+ **/
+static inline void
+lpfc_debug_dump_mq_by_id(struct lpfc_hba *phba, int qid)
+{
+ if (phba->sli4_hba.mbx_wq->queue_id == qid) {
+ printk(KERN_ERR "MBX WQ[Qid:%d]\n", qid);
+ lpfc_debug_dump_q(phba->sli4_hba.mbx_wq);
+ }
+}
+
+/**
+ * lpfc_debug_dump_rq_by_id - dump all entries from a receive queue by queue id
+ * @phba: Pointer to HBA context object.
+ * @qid: Receive queue identifier.
+ *
+ * This function dumps all entries from a receive queue identified by the
+ * queue identifier.
+ **/
+static inline void
+lpfc_debug_dump_rq_by_id(struct lpfc_hba *phba, int qid)
+{
+ if (phba->sli4_hba.hdr_rq->queue_id == qid) {
+ printk(KERN_ERR "HDR RQ[Qid:%d]\n", qid);
+ lpfc_debug_dump_q(phba->sli4_hba.hdr_rq);
+ return;
+ }
+ if (phba->sli4_hba.dat_rq->queue_id == qid) {
+ printk(KERN_ERR "DAT RQ[Qid:%d]\n", qid);
+ lpfc_debug_dump_q(phba->sli4_hba.dat_rq);
+ }
+}
+
+/**
+ * lpfc_debug_dump_cq_by_id - dump all entries from a cmpl queue by queue id
+ * @phba: Pointer to HBA context object.
+ * @qid: Complete queue identifier.
+ *
+ * This function dumps all entries from a complete queue identified by the
+ * queue identifier.
+ **/
+static inline void
+lpfc_debug_dump_cq_by_id(struct lpfc_hba *phba, int qid)
+{
+ int cq_idx = 0;
+
+ do {
+ if (phba->sli4_hba.fcp_cq[cq_idx]->queue_id == qid)
+ break;
+ } while (++cq_idx < phba->cfg_fcp_io_channel);
+
+ if (cq_idx < phba->cfg_fcp_io_channel) {
+ printk(KERN_ERR "FCP CQ[Idx:%d|Qid:%d]\n", cq_idx, qid);
+ lpfc_debug_dump_q(phba->sli4_hba.fcp_cq[cq_idx]);
+ return;
+ }
+
+ if (phba->sli4_hba.els_cq->queue_id == qid) {
+ printk(KERN_ERR "ELS CQ[Qid:%d]\n", qid);
+ lpfc_debug_dump_q(phba->sli4_hba.els_cq);
+ return;
+ }
+
+ if (phba->sli4_hba.mbx_cq->queue_id == qid) {
+ printk(KERN_ERR "MBX CQ[Qid:%d]\n", qid);
+ lpfc_debug_dump_q(phba->sli4_hba.mbx_cq);
+ }
+}
+
+/**
+ * lpfc_debug_dump_eq_by_id - dump all entries from an event queue by queue id
+ * @phba: Pointer to HBA context object.
+ * @qid: Complete queue identifier.
+ *
+ * This function dumps all entries from an event queue identified by the
+ * queue identifier.
+ **/
+static inline void
+lpfc_debug_dump_eq_by_id(struct lpfc_hba *phba, int qid)
+{
+ int eq_idx;
+
+ for (eq_idx = 0; eq_idx < phba->cfg_fcp_io_channel; eq_idx++) {
+ if (phba->sli4_hba.hba_eq[eq_idx]->queue_id == qid)
+ break;
+ }
+
+ if (eq_idx < phba->cfg_fcp_io_channel) {
+ printk(KERN_ERR "FCP EQ[Idx:%d|Qid:%d]\n", eq_idx, qid);
+ lpfc_debug_dump_q(phba->sli4_hba.hba_eq[eq_idx]);
+ return;
+ }
+
+}
+
+void lpfc_debug_dump_all_queues(struct lpfc_hba *);
diff --git a/drivers/scsi/lpfc/lpfc_disc.h b/drivers/scsi/lpfc/lpfc_disc.h
new file mode 100644
index 000000000..697702797
--- /dev/null
+++ b/drivers/scsi/lpfc/lpfc_disc.h
@@ -0,0 +1,272 @@
+/*******************************************************************
+ * This file is part of the Emulex Linux Device Driver for *
+ * Fibre Channel Host Bus Adapters. *
+ * Copyright (C) 2004-2013 Emulex. All rights reserved. *
+ * EMULEX and SLI are trademarks of Emulex. *
+ * www.emulex.com *
+ * *
+ * This program is free software; you can redistribute it and/or *
+ * modify it under the terms of version 2 of the GNU General *
+ * Public License as published by the Free Software Foundation. *
+ * This program is distributed in the hope that it will be useful. *
+ * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND *
+ * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY, *
+ * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE *
+ * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD *
+ * TO BE LEGALLY INVALID. See the GNU General Public License for *
+ * more details, a copy of which can be found in the file COPYING *
+ * included with this package. *
+ *******************************************************************/
+
+#define FC_MAX_HOLD_RSCN 32 /* max number of deferred RSCNs */
+#define FC_MAX_NS_RSP 64512 /* max size NameServer rsp */
+#define FC_MAXLOOP 126 /* max devices supported on a fc loop */
+#define LPFC_DISC_FLOGI_TMO 10 /* Discovery FLOGI ratov */
+
+
+/* This is the protocol dependent definition for a Node List Entry.
+ * This is used by Fibre Channel protocol to support FCP.
+ */
+
+/* worker thread events */
+enum lpfc_work_type {
+ LPFC_EVT_ONLINE,
+ LPFC_EVT_OFFLINE_PREP,
+ LPFC_EVT_OFFLINE,
+ LPFC_EVT_WARM_START,
+ LPFC_EVT_KILL,
+ LPFC_EVT_ELS_RETRY,
+ LPFC_EVT_DEV_LOSS,
+ LPFC_EVT_FASTPATH_MGMT_EVT,
+ LPFC_EVT_RESET_HBA,
+};
+
+/* structure used to queue event to the discovery tasklet */
+struct lpfc_work_evt {
+ struct list_head evt_listp;
+ void *evt_arg1;
+ void *evt_arg2;
+ enum lpfc_work_type evt;
+};
+
+struct lpfc_scsi_check_condition_event;
+struct lpfc_scsi_varqueuedepth_event;
+struct lpfc_scsi_event_header;
+struct lpfc_fabric_event_header;
+struct lpfc_fcprdchkerr_event;
+
+/* structure used for sending events from fast path */
+struct lpfc_fast_path_event {
+ struct lpfc_work_evt work_evt;
+ struct lpfc_vport *vport;
+ union {
+ struct lpfc_scsi_check_condition_event check_cond_evt;
+ struct lpfc_scsi_varqueuedepth_event queue_depth_evt;
+ struct lpfc_scsi_event_header scsi_evt;
+ struct lpfc_fabric_event_header fabric_evt;
+ struct lpfc_fcprdchkerr_event read_check_error;
+ } un;
+};
+
+#define LPFC_SLI4_MAX_XRI 1024 /* Used to make the ndlp's xri_bitmap */
+#define XRI_BITMAP_ULONGS (LPFC_SLI4_MAX_XRI / BITS_PER_LONG)
+struct lpfc_node_rrqs {
+ unsigned long xri_bitmap[XRI_BITMAP_ULONGS];
+};
+
+struct lpfc_nodelist {
+ struct list_head nlp_listp;
+ struct lpfc_name nlp_portname;
+ struct lpfc_name nlp_nodename;
+ uint32_t nlp_flag; /* entry flags */
+ uint32_t nlp_add_flag; /* additional flags */
+ uint32_t nlp_DID; /* FC D_ID of entry */
+ uint32_t nlp_last_elscmd; /* Last ELS cmd sent */
+ uint16_t nlp_type;
+#define NLP_FC_NODE 0x1 /* entry is an FC node */
+#define NLP_FABRIC 0x4 /* entry rep a Fabric entity */
+#define NLP_FCP_TARGET 0x8 /* entry is an FCP target */
+#define NLP_FCP_INITIATOR 0x10 /* entry is an FCP Initiator */
+
+ uint16_t nlp_rpi;
+ uint16_t nlp_state; /* state transition indicator */
+ uint16_t nlp_prev_state; /* state transition indicator */
+ uint16_t nlp_xri; /* output exchange id for RPI */
+ uint16_t nlp_sid; /* scsi id */
+#define NLP_NO_SID 0xffff
+ uint16_t nlp_maxframe; /* Max RCV frame size */
+ uint8_t nlp_class_sup; /* Supported Classes */
+ uint8_t nlp_retry; /* used for ELS retries */
+ uint8_t nlp_fcp_info; /* class info, bits 0-3 */
+#define NLP_FCP_2_DEVICE 0x10 /* FCP-2 device */
+
+ uint16_t nlp_usg_map; /* ndlp management usage bitmap */
+#define NLP_USG_NODE_ACT_BIT 0x1 /* Indicate ndlp is actively used */
+#define NLP_USG_IACT_REQ_BIT 0x2 /* Request to inactivate ndlp */
+#define NLP_USG_FREE_REQ_BIT 0x4 /* Request to invoke ndlp memory free */
+#define NLP_USG_FREE_ACK_BIT 0x8 /* Indicate ndlp memory free invoked */
+
+ struct timer_list nlp_delayfunc; /* Used for delayed ELS cmds */
+ struct lpfc_hba *phba;
+ struct fc_rport *rport; /* Corresponding FC transport
+ port structure */
+ struct lpfc_vport *vport;
+ struct lpfc_work_evt els_retry_evt;
+ struct lpfc_work_evt dev_loss_evt;
+ struct kref kref;
+ atomic_t cmd_pending;
+ uint32_t cmd_qdepth;
+ unsigned long last_change_time;
+ unsigned long *active_rrqs_xri_bitmap;
+ struct lpfc_scsicmd_bkt *lat_data; /* Latency data */
+};
+struct lpfc_node_rrq {
+ struct list_head list;
+ uint16_t xritag;
+ uint16_t send_rrq;
+ uint16_t rxid;
+ uint32_t nlp_DID; /* FC D_ID of entry */
+ struct lpfc_vport *vport;
+ struct lpfc_nodelist *ndlp;
+ unsigned long rrq_stop_time;
+};
+
+/* Defines for nlp_flag (uint32) */
+#define NLP_IGNR_REG_CMPL 0x00000001 /* Rcvd rscn before we cmpl reg login */
+#define NLP_REG_LOGIN_SEND 0x00000002 /* sent reglogin to adapter */
+#define NLP_PLOGI_SND 0x00000020 /* sent PLOGI request for this entry */
+#define NLP_PRLI_SND 0x00000040 /* sent PRLI request for this entry */
+#define NLP_ADISC_SND 0x00000080 /* sent ADISC request for this entry */
+#define NLP_LOGO_SND 0x00000100 /* sent LOGO request for this entry */
+#define NLP_RNID_SND 0x00000400 /* sent RNID request for this entry */
+#define NLP_ELS_SND_MASK 0x000007e0 /* sent ELS request for this entry */
+#define NLP_DEFER_RM 0x00010000 /* Remove this ndlp if no longer used */
+#define NLP_DELAY_TMO 0x00020000 /* delay timeout is running for node */
+#define NLP_NPR_2B_DISC 0x00040000 /* node is included in num_disc_nodes */
+#define NLP_RCV_PLOGI 0x00080000 /* Rcv'ed PLOGI from remote system */
+#define NLP_LOGO_ACC 0x00100000 /* Process LOGO after ACC completes */
+#define NLP_TGT_NO_SCSIID 0x00200000 /* good PRLI but no binding for scsid */
+#define NLP_ISSUE_LOGO 0x00400000 /* waiting to issue a LOGO */
+#define NLP_ACC_REGLOGIN 0x01000000 /* Issue Reg Login after successful
+ ACC */
+#define NLP_NPR_ADISC 0x02000000 /* Issue ADISC when dq'ed from
+ NPR list */
+#define NLP_RM_DFLT_RPI 0x04000000 /* need to remove leftover dflt RPI */
+#define NLP_NODEV_REMOVE 0x08000000 /* Defer removal till discovery ends */
+#define NLP_TARGET_REMOVE 0x10000000 /* Target remove in process */
+#define NLP_SC_REQ 0x20000000 /* Target requires authentication */
+#define NLP_FIRSTBURST 0x40000000 /* Target supports FirstBurst */
+#define NLP_RPI_REGISTERED 0x80000000 /* nlp_rpi is valid */
+
+/* Defines for nlp_add_flag (uint32) */
+#define NLP_IN_DEV_LOSS 0x00000001 /* Dev Loss processing in progress */
+
+/* ndlp usage management macros */
+#define NLP_CHK_NODE_ACT(ndlp) (((ndlp)->nlp_usg_map \
+ & NLP_USG_NODE_ACT_BIT) \
+ && \
+ !((ndlp)->nlp_usg_map \
+ & NLP_USG_FREE_ACK_BIT))
+#define NLP_SET_NODE_ACT(ndlp) ((ndlp)->nlp_usg_map \
+ |= NLP_USG_NODE_ACT_BIT)
+#define NLP_INT_NODE_ACT(ndlp) ((ndlp)->nlp_usg_map \
+ = NLP_USG_NODE_ACT_BIT)
+#define NLP_CLR_NODE_ACT(ndlp) ((ndlp)->nlp_usg_map \
+ &= ~NLP_USG_NODE_ACT_BIT)
+#define NLP_CHK_IACT_REQ(ndlp) ((ndlp)->nlp_usg_map \
+ & NLP_USG_IACT_REQ_BIT)
+#define NLP_SET_IACT_REQ(ndlp) ((ndlp)->nlp_usg_map \
+ |= NLP_USG_IACT_REQ_BIT)
+#define NLP_CHK_FREE_REQ(ndlp) ((ndlp)->nlp_usg_map \
+ & NLP_USG_FREE_REQ_BIT)
+#define NLP_SET_FREE_REQ(ndlp) ((ndlp)->nlp_usg_map \
+ |= NLP_USG_FREE_REQ_BIT)
+#define NLP_CHK_FREE_ACK(ndlp) ((ndlp)->nlp_usg_map \
+ & NLP_USG_FREE_ACK_BIT)
+#define NLP_SET_FREE_ACK(ndlp) ((ndlp)->nlp_usg_map \
+ |= NLP_USG_FREE_ACK_BIT)
+
+/* There are 4 different double linked lists nodelist entries can reside on.
+ * The Port Login (PLOGI) list and Address Discovery (ADISC) list are used
+ * when Link Up discovery or Registered State Change Notification (RSCN)
+ * processing is needed. Each list holds the nodes that require a PLOGI or
+ * ADISC Extended Link Service (ELS) request. These lists keep track of the
+ * nodes affected by an RSCN, or a Link Up (Typically, all nodes are effected
+ * by Link Up) event. The unmapped_list contains all nodes that have
+ * successfully logged into at the Fibre Channel level. The
+ * mapped_list will contain all nodes that are mapped FCP targets.
+ *
+ * The bind list is a list of undiscovered (potentially non-existent) nodes
+ * that we have saved binding information on. This information is used when
+ * nodes transition from the unmapped to the mapped list.
+ */
+
+/* Defines for nlp_state */
+#define NLP_STE_UNUSED_NODE 0x0 /* node is just allocated */
+#define NLP_STE_PLOGI_ISSUE 0x1 /* PLOGI was sent to NL_PORT */
+#define NLP_STE_ADISC_ISSUE 0x2 /* ADISC was sent to NL_PORT */
+#define NLP_STE_REG_LOGIN_ISSUE 0x3 /* REG_LOGIN was issued for NL_PORT */
+#define NLP_STE_PRLI_ISSUE 0x4 /* PRLI was sent to NL_PORT */
+#define NLP_STE_LOGO_ISSUE 0x5 /* LOGO was sent to NL_PORT */
+#define NLP_STE_UNMAPPED_NODE 0x6 /* PRLI completed from NL_PORT */
+#define NLP_STE_MAPPED_NODE 0x7 /* Identified as a FCP Target */
+#define NLP_STE_NPR_NODE 0x8 /* NPort disappeared */
+#define NLP_STE_MAX_STATE 0x9
+#define NLP_STE_FREED_NODE 0xff /* node entry was freed to MEM_NLP */
+
+/* For UNUSED_NODE state, the node has just been allocated.
+ * For PLOGI_ISSUE and REG_LOGIN_ISSUE, the node is on
+ * the PLOGI list. For REG_LOGIN_COMPL, the node is taken off the PLOGI list
+ * and put on the unmapped list. For ADISC processing, the node is taken off
+ * the ADISC list and placed on either the mapped or unmapped list (depending
+ * on its previous state). Once on the unmapped list, a PRLI is issued and the
+ * state changed to PRLI_ISSUE. When the PRLI completion occurs, the state is
+ * changed to PRLI_COMPL. If the completion indicates a mapped
+ * node, the node is taken off the unmapped list. The binding list is checked
+ * for a valid binding, or a binding is automatically assigned. If binding
+ * assignment is unsuccessful, the node is left on the unmapped list. If
+ * binding assignment is successful, the associated binding list entry (if
+ * any) is removed, and the node is placed on the mapped list.
+ */
+/*
+ * For a Link Down, all nodes on the ADISC, PLOGI, unmapped or mapped
+ * lists will receive a DEVICE_RECOVERY event. If the linkdown or devloss timers
+ * expire, all effected nodes will receive a DEVICE_RM event.
+ */
+/*
+ * For a Link Up or RSCN, all nodes will move from the mapped / unmapped lists
+ * to either the ADISC or PLOGI list. After a Nameserver query or ALPA loopmap
+ * check, additional nodes may be added (DEVICE_ADD) or removed (DEVICE_RM) to /
+ * from the PLOGI or ADISC lists. Once the PLOGI and ADISC lists are populated,
+ * we will first process the ADISC list. 32 entries are processed initially and
+ * ADISC is initited for each one. Completions / Events for each node are
+ * funnelled thru the state machine. As each node finishes ADISC processing, it
+ * starts ADISC for any nodes waiting for ADISC processing. If no nodes are
+ * waiting, and the ADISC list count is identically 0, then we are done. For
+ * Link Up discovery, since all nodes on the PLOGI list are UNREG_LOGIN'ed, we
+ * can issue a CLEAR_LA and reenable Link Events. Next we will process the PLOGI
+ * list. 32 entries are processed initially and PLOGI is initited for each one.
+ * Completions / Events for each node are funnelled thru the state machine. As
+ * each node finishes PLOGI processing, it starts PLOGI for any nodes waiting
+ * for PLOGI processing. If no nodes are waiting, and the PLOGI list count is
+ * identically 0, then we are done. We have now completed discovery / RSCN
+ * handling. Upon completion, ALL nodes should be on either the mapped or
+ * unmapped lists.
+ */
+
+/* Defines for Node List Entry Events that could happen */
+#define NLP_EVT_RCV_PLOGI 0x0 /* Rcv'd an ELS PLOGI command */
+#define NLP_EVT_RCV_PRLI 0x1 /* Rcv'd an ELS PRLI command */
+#define NLP_EVT_RCV_LOGO 0x2 /* Rcv'd an ELS LOGO command */
+#define NLP_EVT_RCV_ADISC 0x3 /* Rcv'd an ELS ADISC command */
+#define NLP_EVT_RCV_PDISC 0x4 /* Rcv'd an ELS PDISC command */
+#define NLP_EVT_RCV_PRLO 0x5 /* Rcv'd an ELS PRLO command */
+#define NLP_EVT_CMPL_PLOGI 0x6 /* Sent an ELS PLOGI command */
+#define NLP_EVT_CMPL_PRLI 0x7 /* Sent an ELS PRLI command */
+#define NLP_EVT_CMPL_LOGO 0x8 /* Sent an ELS LOGO command */
+#define NLP_EVT_CMPL_ADISC 0x9 /* Sent an ELS ADISC command */
+#define NLP_EVT_CMPL_REG_LOGIN 0xa /* REG_LOGIN mbox cmd completed */
+#define NLP_EVT_DEVICE_RM 0xb /* Device not found in NS / ALPAmap */
+#define NLP_EVT_DEVICE_RECOVERY 0xc /* Device existence unknown */
+#define NLP_EVT_MAX_EVENT 0xd
+
diff --git a/drivers/scsi/lpfc/lpfc_els.c b/drivers/scsi/lpfc/lpfc_els.c
new file mode 100644
index 000000000..851e8efe3
--- /dev/null
+++ b/drivers/scsi/lpfc/lpfc_els.c
@@ -0,0 +1,8286 @@
+/*******************************************************************
+ * This file is part of the Emulex Linux Device Driver for *
+ * Fibre Channel Host Bus Adapters. *
+ * Copyright (C) 2004-2015 Emulex. All rights reserved. *
+ * EMULEX and SLI are trademarks of Emulex. *
+ * www.emulex.com *
+ * Portions Copyright (C) 2004-2005 Christoph Hellwig *
+ * *
+ * This program is free software; you can redistribute it and/or *
+ * modify it under the terms of version 2 of the GNU General *
+ * Public License as published by the Free Software Foundation. *
+ * This program is distributed in the hope that it will be useful. *
+ * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND *
+ * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY, *
+ * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE *
+ * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD *
+ * TO BE LEGALLY INVALID. See the GNU General Public License for *
+ * more details, a copy of which can be found in the file COPYING *
+ * included with this package. *
+ *******************************************************************/
+/* See Fibre Channel protocol T11 FC-LS for details */
+#include <linux/blkdev.h>
+#include <linux/pci.h>
+#include <linux/slab.h>
+#include <linux/interrupt.h>
+
+#include <scsi/scsi.h>
+#include <scsi/scsi_device.h>
+#include <scsi/scsi_host.h>
+#include <scsi/scsi_transport_fc.h>
+
+
+#include "lpfc_hw4.h"
+#include "lpfc_hw.h"
+#include "lpfc_sli.h"
+#include "lpfc_sli4.h"
+#include "lpfc_nl.h"
+#include "lpfc_disc.h"
+#include "lpfc_scsi.h"
+#include "lpfc.h"
+#include "lpfc_logmsg.h"
+#include "lpfc_crtn.h"
+#include "lpfc_vport.h"
+#include "lpfc_debugfs.h"
+
+static int lpfc_els_retry(struct lpfc_hba *, struct lpfc_iocbq *,
+ struct lpfc_iocbq *);
+static void lpfc_cmpl_fabric_iocb(struct lpfc_hba *, struct lpfc_iocbq *,
+ struct lpfc_iocbq *);
+static void lpfc_fabric_abort_vport(struct lpfc_vport *vport);
+static int lpfc_issue_els_fdisc(struct lpfc_vport *vport,
+ struct lpfc_nodelist *ndlp, uint8_t retry);
+static int lpfc_issue_fabric_iocb(struct lpfc_hba *phba,
+ struct lpfc_iocbq *iocb);
+
+static int lpfc_max_els_tries = 3;
+
+/**
+ * lpfc_els_chk_latt - Check host link attention event for a vport
+ * @vport: pointer to a host virtual N_Port data structure.
+ *
+ * This routine checks whether there is an outstanding host link
+ * attention event during the discovery process with the @vport. It is done
+ * by reading the HBA's Host Attention (HA) register. If there is any host
+ * link attention events during this @vport's discovery process, the @vport
+ * shall be marked as FC_ABORT_DISCOVERY, a host link attention clear shall
+ * be issued if the link state is not already in host link cleared state,
+ * and a return code shall indicate whether the host link attention event
+ * had happened.
+ *
+ * Note that, if either the host link is in state LPFC_LINK_DOWN or @vport
+ * state in LPFC_VPORT_READY, the request for checking host link attention
+ * event will be ignored and a return code shall indicate no host link
+ * attention event had happened.
+ *
+ * Return codes
+ * 0 - no host link attention event happened
+ * 1 - host link attention event happened
+ **/
+int
+lpfc_els_chk_latt(struct lpfc_vport *vport)
+{
+ struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
+ struct lpfc_hba *phba = vport->phba;
+ uint32_t ha_copy;
+
+ if (vport->port_state >= LPFC_VPORT_READY ||
+ phba->link_state == LPFC_LINK_DOWN ||
+ phba->sli_rev > LPFC_SLI_REV3)
+ return 0;
+
+ /* Read the HBA Host Attention Register */
+ if (lpfc_readl(phba->HAregaddr, &ha_copy))
+ return 1;
+
+ if (!(ha_copy & HA_LATT))
+ return 0;
+
+ /* Pending Link Event during Discovery */
+ lpfc_printf_vlog(vport, KERN_ERR, LOG_DISCOVERY,
+ "0237 Pending Link Event during "
+ "Discovery: State x%x\n",
+ phba->pport->port_state);
+
+ /* CLEAR_LA should re-enable link attention events and
+ * we should then immediately take a LATT event. The
+ * LATT processing should call lpfc_linkdown() which
+ * will cleanup any left over in-progress discovery
+ * events.
+ */
+ spin_lock_irq(shost->host_lock);
+ vport->fc_flag |= FC_ABORT_DISCOVERY;
+ spin_unlock_irq(shost->host_lock);
+
+ if (phba->link_state != LPFC_CLEAR_LA)
+ lpfc_issue_clear_la(phba, vport);
+
+ return 1;
+}
+
+/**
+ * lpfc_prep_els_iocb - Allocate and prepare a lpfc iocb data structure
+ * @vport: pointer to a host virtual N_Port data structure.
+ * @expectRsp: flag indicating whether response is expected.
+ * @cmdSize: size of the ELS command.
+ * @retry: number of retries to the command IOCB when it fails.
+ * @ndlp: pointer to a node-list data structure.
+ * @did: destination identifier.
+ * @elscmd: the ELS command code.
+ *
+ * This routine is used for allocating a lpfc-IOCB data structure from
+ * the driver lpfc-IOCB free-list and prepare the IOCB with the parameters
+ * passed into the routine for discovery state machine to issue an Extended
+ * Link Service (ELS) commands. It is a generic lpfc-IOCB allocation
+ * and preparation routine that is used by all the discovery state machine
+ * routines and the ELS command-specific fields will be later set up by
+ * the individual discovery machine routines after calling this routine
+ * allocating and preparing a generic IOCB data structure. It fills in the
+ * Buffer Descriptor Entries (BDEs), allocates buffers for both command
+ * payload and response payload (if expected). The reference count on the
+ * ndlp is incremented by 1 and the reference to the ndlp is put into
+ * context1 of the IOCB data structure for this IOCB to hold the ndlp
+ * reference for the command's callback function to access later.
+ *
+ * Return code
+ * Pointer to the newly allocated/prepared els iocb data structure
+ * NULL - when els iocb data structure allocation/preparation failed
+ **/
+struct lpfc_iocbq *
+lpfc_prep_els_iocb(struct lpfc_vport *vport, uint8_t expectRsp,
+ uint16_t cmdSize, uint8_t retry,
+ struct lpfc_nodelist *ndlp, uint32_t did,
+ uint32_t elscmd)
+{
+ struct lpfc_hba *phba = vport->phba;
+ struct lpfc_iocbq *elsiocb;
+ struct lpfc_dmabuf *pcmd, *prsp, *pbuflist;
+ struct ulp_bde64 *bpl;
+ IOCB_t *icmd;
+
+
+ if (!lpfc_is_link_up(phba))
+ return NULL;
+
+ /* Allocate buffer for command iocb */
+ elsiocb = lpfc_sli_get_iocbq(phba);
+
+ if (elsiocb == NULL)
+ return NULL;
+
+ /*
+ * If this command is for fabric controller and HBA running
+ * in FIP mode send FLOGI, FDISC and LOGO as FIP frames.
+ */
+ if ((did == Fabric_DID) &&
+ (phba->hba_flag & HBA_FIP_SUPPORT) &&
+ ((elscmd == ELS_CMD_FLOGI) ||
+ (elscmd == ELS_CMD_FDISC) ||
+ (elscmd == ELS_CMD_LOGO)))
+ switch (elscmd) {
+ case ELS_CMD_FLOGI:
+ elsiocb->iocb_flag |=
+ ((LPFC_ELS_ID_FLOGI << LPFC_FIP_ELS_ID_SHIFT)
+ & LPFC_FIP_ELS_ID_MASK);
+ break;
+ case ELS_CMD_FDISC:
+ elsiocb->iocb_flag |=
+ ((LPFC_ELS_ID_FDISC << LPFC_FIP_ELS_ID_SHIFT)
+ & LPFC_FIP_ELS_ID_MASK);
+ break;
+ case ELS_CMD_LOGO:
+ elsiocb->iocb_flag |=
+ ((LPFC_ELS_ID_LOGO << LPFC_FIP_ELS_ID_SHIFT)
+ & LPFC_FIP_ELS_ID_MASK);
+ break;
+ }
+ else
+ elsiocb->iocb_flag &= ~LPFC_FIP_ELS_ID_MASK;
+
+ icmd = &elsiocb->iocb;
+
+ /* fill in BDEs for command */
+ /* Allocate buffer for command payload */
+ pcmd = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
+ if (pcmd)
+ pcmd->virt = lpfc_mbuf_alloc(phba, MEM_PRI, &pcmd->phys);
+ if (!pcmd || !pcmd->virt)
+ goto els_iocb_free_pcmb_exit;
+
+ INIT_LIST_HEAD(&pcmd->list);
+
+ /* Allocate buffer for response payload */
+ if (expectRsp) {
+ prsp = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
+ if (prsp)
+ prsp->virt = lpfc_mbuf_alloc(phba, MEM_PRI,
+ &prsp->phys);
+ if (!prsp || !prsp->virt)
+ goto els_iocb_free_prsp_exit;
+ INIT_LIST_HEAD(&prsp->list);
+ } else
+ prsp = NULL;
+
+ /* Allocate buffer for Buffer ptr list */
+ pbuflist = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
+ if (pbuflist)
+ pbuflist->virt = lpfc_mbuf_alloc(phba, MEM_PRI,
+ &pbuflist->phys);
+ if (!pbuflist || !pbuflist->virt)
+ goto els_iocb_free_pbuf_exit;
+
+ INIT_LIST_HEAD(&pbuflist->list);
+
+ if (expectRsp) {
+ icmd->un.elsreq64.bdl.addrHigh = putPaddrHigh(pbuflist->phys);
+ icmd->un.elsreq64.bdl.addrLow = putPaddrLow(pbuflist->phys);
+ icmd->un.elsreq64.bdl.bdeFlags = BUFF_TYPE_BLP_64;
+ icmd->un.elsreq64.bdl.bdeSize = (2 * sizeof(struct ulp_bde64));
+
+ icmd->un.elsreq64.remoteID = did; /* DID */
+ icmd->ulpCommand = CMD_ELS_REQUEST64_CR;
+ if (elscmd == ELS_CMD_FLOGI)
+ icmd->ulpTimeout = FF_DEF_RATOV * 2;
+ else
+ icmd->ulpTimeout = phba->fc_ratov * 2;
+ } else {
+ icmd->un.xseq64.bdl.addrHigh = putPaddrHigh(pbuflist->phys);
+ icmd->un.xseq64.bdl.addrLow = putPaddrLow(pbuflist->phys);
+ icmd->un.xseq64.bdl.bdeFlags = BUFF_TYPE_BLP_64;
+ icmd->un.xseq64.bdl.bdeSize = sizeof(struct ulp_bde64);
+ icmd->un.xseq64.xmit_els_remoteID = did; /* DID */
+ icmd->ulpCommand = CMD_XMIT_ELS_RSP64_CX;
+ }
+ icmd->ulpBdeCount = 1;
+ icmd->ulpLe = 1;
+ icmd->ulpClass = CLASS3;
+
+ /*
+ * If we have NPIV enabled, we want to send ELS traffic by VPI.
+ * For SLI4, since the driver controls VPIs we also want to include
+ * all ELS pt2pt protocol traffic as well.
+ */
+ if ((phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) ||
+ ((phba->sli_rev == LPFC_SLI_REV4) &&
+ (vport->fc_flag & FC_PT2PT))) {
+
+ if (expectRsp) {
+ icmd->un.elsreq64.myID = vport->fc_myDID;
+
+ /* For ELS_REQUEST64_CR, use the VPI by default */
+ icmd->ulpContext = phba->vpi_ids[vport->vpi];
+ }
+
+ icmd->ulpCt_h = 0;
+ /* The CT field must be 0=INVALID_RPI for the ECHO cmd */
+ if (elscmd == ELS_CMD_ECHO)
+ icmd->ulpCt_l = 0; /* context = invalid RPI */
+ else
+ icmd->ulpCt_l = 1; /* context = VPI */
+ }
+
+ bpl = (struct ulp_bde64 *) pbuflist->virt;
+ bpl->addrLow = le32_to_cpu(putPaddrLow(pcmd->phys));
+ bpl->addrHigh = le32_to_cpu(putPaddrHigh(pcmd->phys));
+ bpl->tus.f.bdeSize = cmdSize;
+ bpl->tus.f.bdeFlags = 0;
+ bpl->tus.w = le32_to_cpu(bpl->tus.w);
+
+ if (expectRsp) {
+ bpl++;
+ bpl->addrLow = le32_to_cpu(putPaddrLow(prsp->phys));
+ bpl->addrHigh = le32_to_cpu(putPaddrHigh(prsp->phys));
+ bpl->tus.f.bdeSize = FCELSSIZE;
+ bpl->tus.f.bdeFlags = BUFF_TYPE_BDE_64;
+ bpl->tus.w = le32_to_cpu(bpl->tus.w);
+ }
+
+ /* prevent preparing iocb with NULL ndlp reference */
+ elsiocb->context1 = lpfc_nlp_get(ndlp);
+ if (!elsiocb->context1)
+ goto els_iocb_free_pbuf_exit;
+ elsiocb->context2 = pcmd;
+ elsiocb->context3 = pbuflist;
+ elsiocb->retry = retry;
+ elsiocb->vport = vport;
+ elsiocb->drvrTimeout = (phba->fc_ratov << 1) + LPFC_DRVR_TIMEOUT;
+
+ if (prsp) {
+ list_add(&prsp->list, &pcmd->list);
+ }
+ if (expectRsp) {
+ /* Xmit ELS command <elsCmd> to remote NPORT <did> */
+ lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
+ "0116 Xmit ELS command x%x to remote "
+ "NPORT x%x I/O tag: x%x, port state:x%x"
+ " fc_flag:x%x\n",
+ elscmd, did, elsiocb->iotag,
+ vport->port_state,
+ vport->fc_flag);
+ } else {
+ /* Xmit ELS response <elsCmd> to remote NPORT <did> */
+ lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
+ "0117 Xmit ELS response x%x to remote "
+ "NPORT x%x I/O tag: x%x, size: x%x "
+ "port_state x%x fc_flag x%x\n",
+ elscmd, ndlp->nlp_DID, elsiocb->iotag,
+ cmdSize, vport->port_state,
+ vport->fc_flag);
+ }
+ return elsiocb;
+
+els_iocb_free_pbuf_exit:
+ if (expectRsp)
+ lpfc_mbuf_free(phba, prsp->virt, prsp->phys);
+ kfree(pbuflist);
+
+els_iocb_free_prsp_exit:
+ lpfc_mbuf_free(phba, pcmd->virt, pcmd->phys);
+ kfree(prsp);
+
+els_iocb_free_pcmb_exit:
+ kfree(pcmd);
+ lpfc_sli_release_iocbq(phba, elsiocb);
+ return NULL;
+}
+
+/**
+ * lpfc_issue_fabric_reglogin - Issue fabric registration login for a vport
+ * @vport: pointer to a host virtual N_Port data structure.
+ *
+ * This routine issues a fabric registration login for a @vport. An
+ * active ndlp node with Fabric_DID must already exist for this @vport.
+ * The routine invokes two mailbox commands to carry out fabric registration
+ * login through the HBA firmware: the first mailbox command requests the
+ * HBA to perform link configuration for the @vport; and the second mailbox
+ * command requests the HBA to perform the actual fabric registration login
+ * with the @vport.
+ *
+ * Return code
+ * 0 - successfully issued fabric registration login for @vport
+ * -ENXIO -- failed to issue fabric registration login for @vport
+ **/
+int
+lpfc_issue_fabric_reglogin(struct lpfc_vport *vport)
+{
+ struct lpfc_hba *phba = vport->phba;
+ LPFC_MBOXQ_t *mbox;
+ struct lpfc_dmabuf *mp;
+ struct lpfc_nodelist *ndlp;
+ struct serv_parm *sp;
+ int rc;
+ int err = 0;
+
+ sp = &phba->fc_fabparam;
+ ndlp = lpfc_findnode_did(vport, Fabric_DID);
+ if (!ndlp || !NLP_CHK_NODE_ACT(ndlp)) {
+ err = 1;
+ goto fail;
+ }
+
+ mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
+ if (!mbox) {
+ err = 2;
+ goto fail;
+ }
+
+ vport->port_state = LPFC_FABRIC_CFG_LINK;
+ lpfc_config_link(phba, mbox);
+ mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
+ mbox->vport = vport;
+
+ rc = lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT);
+ if (rc == MBX_NOT_FINISHED) {
+ err = 3;
+ goto fail_free_mbox;
+ }
+
+ mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
+ if (!mbox) {
+ err = 4;
+ goto fail;
+ }
+ rc = lpfc_reg_rpi(phba, vport->vpi, Fabric_DID, (uint8_t *)sp, mbox,
+ ndlp->nlp_rpi);
+ if (rc) {
+ err = 5;
+ goto fail_free_mbox;
+ }
+
+ mbox->mbox_cmpl = lpfc_mbx_cmpl_fabric_reg_login;
+ mbox->vport = vport;
+ /* increment the reference count on ndlp to hold reference
+ * for the callback routine.
+ */
+ mbox->context2 = lpfc_nlp_get(ndlp);
+
+ rc = lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT);
+ if (rc == MBX_NOT_FINISHED) {
+ err = 6;
+ goto fail_issue_reg_login;
+ }
+
+ return 0;
+
+fail_issue_reg_login:
+ /* decrement the reference count on ndlp just incremented
+ * for the failed mbox command.
+ */
+ lpfc_nlp_put(ndlp);
+ mp = (struct lpfc_dmabuf *) mbox->context1;
+ lpfc_mbuf_free(phba, mp->virt, mp->phys);
+ kfree(mp);
+fail_free_mbox:
+ mempool_free(mbox, phba->mbox_mem_pool);
+
+fail:
+ lpfc_vport_set_state(vport, FC_VPORT_FAILED);
+ lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
+ "0249 Cannot issue Register Fabric login: Err %d\n", err);
+ return -ENXIO;
+}
+
+/**
+ * lpfc_issue_reg_vfi - Register VFI for this vport's fabric login
+ * @vport: pointer to a host virtual N_Port data structure.
+ *
+ * This routine issues a REG_VFI mailbox for the vfi, vpi, fcfi triplet for
+ * the @vport. This mailbox command is necessary for SLI4 port only.
+ *
+ * Return code
+ * 0 - successfully issued REG_VFI for @vport
+ * A failure code otherwise.
+ **/
+int
+lpfc_issue_reg_vfi(struct lpfc_vport *vport)
+{
+ struct lpfc_hba *phba = vport->phba;
+ LPFC_MBOXQ_t *mboxq;
+ struct lpfc_nodelist *ndlp;
+ struct serv_parm *sp;
+ struct lpfc_dmabuf *dmabuf;
+ int rc = 0;
+
+ sp = &phba->fc_fabparam;
+ /* move forward in case of SLI4 FC port loopback test and pt2pt mode */
+ if ((phba->sli_rev == LPFC_SLI_REV4) &&
+ !(phba->link_flag & LS_LOOPBACK_MODE) &&
+ !(vport->fc_flag & FC_PT2PT)) {
+ ndlp = lpfc_findnode_did(vport, Fabric_DID);
+ if (!ndlp || !NLP_CHK_NODE_ACT(ndlp)) {
+ rc = -ENODEV;
+ goto fail;
+ }
+ }
+
+ dmabuf = kzalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
+ if (!dmabuf) {
+ rc = -ENOMEM;
+ goto fail;
+ }
+ dmabuf->virt = lpfc_mbuf_alloc(phba, MEM_PRI, &dmabuf->phys);
+ if (!dmabuf->virt) {
+ rc = -ENOMEM;
+ goto fail_free_dmabuf;
+ }
+
+ mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
+ if (!mboxq) {
+ rc = -ENOMEM;
+ goto fail_free_coherent;
+ }
+ vport->port_state = LPFC_FABRIC_CFG_LINK;
+ memcpy(dmabuf->virt, &phba->fc_fabparam, sizeof(vport->fc_sparam));
+ lpfc_reg_vfi(mboxq, vport, dmabuf->phys);
+
+ mboxq->mbox_cmpl = lpfc_mbx_cmpl_reg_vfi;
+ mboxq->vport = vport;
+ mboxq->context1 = dmabuf;
+ rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_NOWAIT);
+ if (rc == MBX_NOT_FINISHED) {
+ rc = -ENXIO;
+ goto fail_free_mbox;
+ }
+ return 0;
+
+fail_free_mbox:
+ mempool_free(mboxq, phba->mbox_mem_pool);
+fail_free_coherent:
+ lpfc_mbuf_free(phba, dmabuf->virt, dmabuf->phys);
+fail_free_dmabuf:
+ kfree(dmabuf);
+fail:
+ lpfc_vport_set_state(vport, FC_VPORT_FAILED);
+ lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
+ "0289 Issue Register VFI failed: Err %d\n", rc);
+ return rc;
+}
+
+/**
+ * lpfc_issue_unreg_vfi - Unregister VFI for this vport's fabric login
+ * @vport: pointer to a host virtual N_Port data structure.
+ *
+ * This routine issues a UNREG_VFI mailbox with the vfi, vpi, fcfi triplet for
+ * the @vport. This mailbox command is necessary for SLI4 port only.
+ *
+ * Return code
+ * 0 - successfully issued REG_VFI for @vport
+ * A failure code otherwise.
+ **/
+int
+lpfc_issue_unreg_vfi(struct lpfc_vport *vport)
+{
+ struct lpfc_hba *phba = vport->phba;
+ struct Scsi_Host *shost;
+ LPFC_MBOXQ_t *mboxq;
+ int rc;
+
+ mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
+ if (!mboxq) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY|LOG_MBOX,
+ "2556 UNREG_VFI mbox allocation failed"
+ "HBA state x%x\n", phba->pport->port_state);
+ return -ENOMEM;
+ }
+
+ lpfc_unreg_vfi(mboxq, vport);
+ mboxq->vport = vport;
+ mboxq->mbox_cmpl = lpfc_unregister_vfi_cmpl;
+
+ rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_NOWAIT);
+ if (rc == MBX_NOT_FINISHED) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY|LOG_MBOX,
+ "2557 UNREG_VFI issue mbox failed rc x%x "
+ "HBA state x%x\n",
+ rc, phba->pport->port_state);
+ mempool_free(mboxq, phba->mbox_mem_pool);
+ return -EIO;
+ }
+
+ shost = lpfc_shost_from_vport(vport);
+ spin_lock_irq(shost->host_lock);
+ vport->fc_flag &= ~FC_VFI_REGISTERED;
+ spin_unlock_irq(shost->host_lock);
+ return 0;
+}
+
+/**
+ * lpfc_check_clean_addr_bit - Check whether assigned FCID is clean.
+ * @vport: pointer to a host virtual N_Port data structure.
+ * @sp: pointer to service parameter data structure.
+ *
+ * This routine is called from FLOGI/FDISC completion handler functions.
+ * lpfc_check_clean_addr_bit return 1 when FCID/Fabric portname/ Fabric
+ * node nodename is changed in the completion service parameter else return
+ * 0. This function also set flag in the vport data structure to delay
+ * NP_Port discovery after the FLOGI/FDISC completion if Clean address bit
+ * in FLOGI/FDISC response is cleared and FCID/Fabric portname/ Fabric
+ * node nodename is changed in the completion service parameter.
+ *
+ * Return code
+ * 0 - FCID and Fabric Nodename and Fabric portname is not changed.
+ * 1 - FCID or Fabric Nodename or Fabric portname is changed.
+ *
+ **/
+static uint8_t
+lpfc_check_clean_addr_bit(struct lpfc_vport *vport,
+ struct serv_parm *sp)
+{
+ uint8_t fabric_param_changed = 0;
+ struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
+
+ if ((vport->fc_prevDID != vport->fc_myDID) ||
+ memcmp(&vport->fabric_portname, &sp->portName,
+ sizeof(struct lpfc_name)) ||
+ memcmp(&vport->fabric_nodename, &sp->nodeName,
+ sizeof(struct lpfc_name)))
+ fabric_param_changed = 1;
+
+ /*
+ * Word 1 Bit 31 in common service parameter is overloaded.
+ * Word 1 Bit 31 in FLOGI request is multiple NPort request
+ * Word 1 Bit 31 in FLOGI response is clean address bit
+ *
+ * If fabric parameter is changed and clean address bit is
+ * cleared delay nport discovery if
+ * - vport->fc_prevDID != 0 (not initial discovery) OR
+ * - lpfc_delay_discovery module parameter is set.
+ */
+ if (fabric_param_changed && !sp->cmn.clean_address_bit &&
+ (vport->fc_prevDID || lpfc_delay_discovery)) {
+ spin_lock_irq(shost->host_lock);
+ vport->fc_flag |= FC_DISC_DELAYED;
+ spin_unlock_irq(shost->host_lock);
+ }
+
+ return fabric_param_changed;
+}
+
+
+/**
+ * lpfc_cmpl_els_flogi_fabric - Completion function for flogi to a fabric port
+ * @vport: pointer to a host virtual N_Port data structure.
+ * @ndlp: pointer to a node-list data structure.
+ * @sp: pointer to service parameter data structure.
+ * @irsp: pointer to the IOCB within the lpfc response IOCB.
+ *
+ * This routine is invoked by the lpfc_cmpl_els_flogi() completion callback
+ * function to handle the completion of a Fabric Login (FLOGI) into a fabric
+ * port in a fabric topology. It properly sets up the parameters to the @ndlp
+ * from the IOCB response. It also check the newly assigned N_Port ID to the
+ * @vport against the previously assigned N_Port ID. If it is different from
+ * the previously assigned Destination ID (DID), the lpfc_unreg_rpi() routine
+ * is invoked on all the remaining nodes with the @vport to unregister the
+ * Remote Port Indicators (RPIs). Finally, the lpfc_issue_fabric_reglogin()
+ * is invoked to register login to the fabric.
+ *
+ * Return code
+ * 0 - Success (currently, always return 0)
+ **/
+static int
+lpfc_cmpl_els_flogi_fabric(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
+ struct serv_parm *sp, IOCB_t *irsp)
+{
+ struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
+ struct lpfc_hba *phba = vport->phba;
+ struct lpfc_nodelist *np;
+ struct lpfc_nodelist *next_np;
+ uint8_t fabric_param_changed;
+
+ spin_lock_irq(shost->host_lock);
+ vport->fc_flag |= FC_FABRIC;
+ spin_unlock_irq(shost->host_lock);
+
+ phba->fc_edtov = be32_to_cpu(sp->cmn.e_d_tov);
+ if (sp->cmn.edtovResolution) /* E_D_TOV ticks are in nanoseconds */
+ phba->fc_edtov = (phba->fc_edtov + 999999) / 1000000;
+
+ phba->fc_edtovResol = sp->cmn.edtovResolution;
+ phba->fc_ratov = (be32_to_cpu(sp->cmn.w2.r_a_tov) + 999) / 1000;
+
+ if (phba->fc_topology == LPFC_TOPOLOGY_LOOP) {
+ spin_lock_irq(shost->host_lock);
+ vport->fc_flag |= FC_PUBLIC_LOOP;
+ spin_unlock_irq(shost->host_lock);
+ }
+
+ vport->fc_myDID = irsp->un.ulpWord[4] & Mask_DID;
+ memcpy(&ndlp->nlp_portname, &sp->portName, sizeof(struct lpfc_name));
+ memcpy(&ndlp->nlp_nodename, &sp->nodeName, sizeof(struct lpfc_name));
+ ndlp->nlp_class_sup = 0;
+ if (sp->cls1.classValid)
+ ndlp->nlp_class_sup |= FC_COS_CLASS1;
+ if (sp->cls2.classValid)
+ ndlp->nlp_class_sup |= FC_COS_CLASS2;
+ if (sp->cls3.classValid)
+ ndlp->nlp_class_sup |= FC_COS_CLASS3;
+ if (sp->cls4.classValid)
+ ndlp->nlp_class_sup |= FC_COS_CLASS4;
+ ndlp->nlp_maxframe = ((sp->cmn.bbRcvSizeMsb & 0x0F) << 8) |
+ sp->cmn.bbRcvSizeLsb;
+
+ fabric_param_changed = lpfc_check_clean_addr_bit(vport, sp);
+ memcpy(&vport->fabric_portname, &sp->portName,
+ sizeof(struct lpfc_name));
+ memcpy(&vport->fabric_nodename, &sp->nodeName,
+ sizeof(struct lpfc_name));
+ memcpy(&phba->fc_fabparam, sp, sizeof(struct serv_parm));
+
+ if (phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) {
+ if (sp->cmn.response_multiple_NPort) {
+ lpfc_printf_vlog(vport, KERN_WARNING,
+ LOG_ELS | LOG_VPORT,
+ "1816 FLOGI NPIV supported, "
+ "response data 0x%x\n",
+ sp->cmn.response_multiple_NPort);
+ spin_lock_irq(&phba->hbalock);
+ phba->link_flag |= LS_NPIV_FAB_SUPPORTED;
+ spin_unlock_irq(&phba->hbalock);
+ } else {
+ /* Because we asked f/w for NPIV it still expects us
+ to call reg_vnpid atleast for the physcial host */
+ lpfc_printf_vlog(vport, KERN_WARNING,
+ LOG_ELS | LOG_VPORT,
+ "1817 Fabric does not support NPIV "
+ "- configuring single port mode.\n");
+ spin_lock_irq(&phba->hbalock);
+ phba->link_flag &= ~LS_NPIV_FAB_SUPPORTED;
+ spin_unlock_irq(&phba->hbalock);
+ }
+ }
+
+ /*
+ * For FC we need to do some special processing because of the SLI
+ * Port's default settings of the Common Service Parameters.
+ */
+ if (phba->sli4_hba.lnk_info.lnk_tp == LPFC_LNK_TYPE_FC) {
+ /* If physical FC port changed, unreg VFI and ALL VPIs / RPIs */
+ if ((phba->sli_rev == LPFC_SLI_REV4) && fabric_param_changed)
+ lpfc_unregister_fcf_prep(phba);
+
+ /* This should just update the VFI CSPs*/
+ if (vport->fc_flag & FC_VFI_REGISTERED)
+ lpfc_issue_reg_vfi(vport);
+ }
+
+ if (fabric_param_changed &&
+ !(vport->fc_flag & FC_VPORT_NEEDS_REG_VPI)) {
+
+ /* If our NportID changed, we need to ensure all
+ * remaining NPORTs get unreg_login'ed.
+ */
+ list_for_each_entry_safe(np, next_np,
+ &vport->fc_nodes, nlp_listp) {
+ if (!NLP_CHK_NODE_ACT(np))
+ continue;
+ if ((np->nlp_state != NLP_STE_NPR_NODE) ||
+ !(np->nlp_flag & NLP_NPR_ADISC))
+ continue;
+ spin_lock_irq(shost->host_lock);
+ np->nlp_flag &= ~NLP_NPR_ADISC;
+ spin_unlock_irq(shost->host_lock);
+ lpfc_unreg_rpi(vport, np);
+ }
+ lpfc_cleanup_pending_mbox(vport);
+
+ if (phba->sli_rev == LPFC_SLI_REV4) {
+ lpfc_sli4_unreg_all_rpis(vport);
+ lpfc_mbx_unreg_vpi(vport);
+ spin_lock_irq(shost->host_lock);
+ vport->fc_flag |= FC_VPORT_NEEDS_INIT_VPI;
+ spin_unlock_irq(shost->host_lock);
+ }
+
+ /*
+ * For SLI3 and SLI4, the VPI needs to be reregistered in
+ * response to this fabric parameter change event.
+ */
+ spin_lock_irq(shost->host_lock);
+ vport->fc_flag |= FC_VPORT_NEEDS_REG_VPI;
+ spin_unlock_irq(shost->host_lock);
+ } else if ((phba->sli_rev == LPFC_SLI_REV4) &&
+ !(vport->fc_flag & FC_VPORT_NEEDS_REG_VPI)) {
+ /*
+ * Driver needs to re-reg VPI in order for f/w
+ * to update the MAC address.
+ */
+ lpfc_nlp_set_state(vport, ndlp, NLP_STE_UNMAPPED_NODE);
+ lpfc_register_new_vport(phba, vport, ndlp);
+ return 0;
+ }
+
+ if (phba->sli_rev < LPFC_SLI_REV4) {
+ lpfc_nlp_set_state(vport, ndlp, NLP_STE_REG_LOGIN_ISSUE);
+ if (phba->sli3_options & LPFC_SLI3_NPIV_ENABLED &&
+ vport->fc_flag & FC_VPORT_NEEDS_REG_VPI)
+ lpfc_register_new_vport(phba, vport, ndlp);
+ else
+ lpfc_issue_fabric_reglogin(vport);
+ } else {
+ ndlp->nlp_type |= NLP_FABRIC;
+ lpfc_nlp_set_state(vport, ndlp, NLP_STE_UNMAPPED_NODE);
+ if ((!(vport->fc_flag & FC_VPORT_NEEDS_REG_VPI)) &&
+ (vport->vpi_state & LPFC_VPI_REGISTERED)) {
+ lpfc_start_fdiscs(phba);
+ lpfc_do_scr_ns_plogi(phba, vport);
+ } else if (vport->fc_flag & FC_VFI_REGISTERED)
+ lpfc_issue_init_vpi(vport);
+ else {
+ lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
+ "3135 Need register VFI: (x%x/%x)\n",
+ vport->fc_prevDID, vport->fc_myDID);
+ lpfc_issue_reg_vfi(vport);
+ }
+ }
+ return 0;
+}
+
+/**
+ * lpfc_cmpl_els_flogi_nport - Completion function for flogi to an N_Port
+ * @vport: pointer to a host virtual N_Port data structure.
+ * @ndlp: pointer to a node-list data structure.
+ * @sp: pointer to service parameter data structure.
+ *
+ * This routine is invoked by the lpfc_cmpl_els_flogi() completion callback
+ * function to handle the completion of a Fabric Login (FLOGI) into an N_Port
+ * in a point-to-point topology. First, the @vport's N_Port Name is compared
+ * with the received N_Port Name: if the @vport's N_Port Name is greater than
+ * the received N_Port Name lexicographically, this node shall assign local
+ * N_Port ID (PT2PT_LocalID: 1) and remote N_Port ID (PT2PT_RemoteID: 2) and
+ * will send out Port Login (PLOGI) with the N_Port IDs assigned. Otherwise,
+ * this node shall just wait for the remote node to issue PLOGI and assign
+ * N_Port IDs.
+ *
+ * Return code
+ * 0 - Success
+ * -ENXIO - Fail
+ **/
+static int
+lpfc_cmpl_els_flogi_nport(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
+ struct serv_parm *sp)
+{
+ struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
+ struct lpfc_hba *phba = vport->phba;
+ LPFC_MBOXQ_t *mbox;
+ int rc;
+
+ spin_lock_irq(shost->host_lock);
+ vport->fc_flag &= ~(FC_FABRIC | FC_PUBLIC_LOOP);
+ spin_unlock_irq(shost->host_lock);
+
+ phba->fc_edtov = FF_DEF_EDTOV;
+ phba->fc_ratov = FF_DEF_RATOV;
+ rc = memcmp(&vport->fc_portname, &sp->portName,
+ sizeof(vport->fc_portname));
+ memcpy(&phba->fc_fabparam, sp, sizeof(struct serv_parm));
+
+ if (rc >= 0) {
+ /* This side will initiate the PLOGI */
+ spin_lock_irq(shost->host_lock);
+ vport->fc_flag |= FC_PT2PT_PLOGI;
+ spin_unlock_irq(shost->host_lock);
+
+ /*
+ * N_Port ID cannot be 0, set our to LocalID the other
+ * side will be RemoteID.
+ */
+
+ /* not equal */
+ if (rc)
+ vport->fc_myDID = PT2PT_LocalID;
+
+ mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
+ if (!mbox)
+ goto fail;
+
+ lpfc_config_link(phba, mbox);
+
+ mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
+ mbox->vport = vport;
+ rc = lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT);
+ if (rc == MBX_NOT_FINISHED) {
+ mempool_free(mbox, phba->mbox_mem_pool);
+ goto fail;
+ }
+
+ /*
+ * For SLI4, the VFI/VPI are registered AFTER the
+ * Nport with the higher WWPN sends the PLOGI with
+ * an assigned NPortId.
+ */
+
+ /* not equal */
+ if ((phba->sli_rev == LPFC_SLI_REV4) && rc)
+ lpfc_issue_reg_vfi(vport);
+
+ /* Decrement ndlp reference count indicating that ndlp can be
+ * safely released when other references to it are done.
+ */
+ lpfc_nlp_put(ndlp);
+
+ ndlp = lpfc_findnode_did(vport, PT2PT_RemoteID);
+ if (!ndlp) {
+ /*
+ * Cannot find existing Fabric ndlp, so allocate a
+ * new one
+ */
+ ndlp = mempool_alloc(phba->nlp_mem_pool, GFP_KERNEL);
+ if (!ndlp)
+ goto fail;
+ lpfc_nlp_init(vport, ndlp, PT2PT_RemoteID);
+ } else if (!NLP_CHK_NODE_ACT(ndlp)) {
+ ndlp = lpfc_enable_node(vport, ndlp,
+ NLP_STE_UNUSED_NODE);
+ if(!ndlp)
+ goto fail;
+ }
+
+ memcpy(&ndlp->nlp_portname, &sp->portName,
+ sizeof(struct lpfc_name));
+ memcpy(&ndlp->nlp_nodename, &sp->nodeName,
+ sizeof(struct lpfc_name));
+ /* Set state will put ndlp onto node list if not already done */
+ lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE);
+ spin_lock_irq(shost->host_lock);
+ ndlp->nlp_flag |= NLP_NPR_2B_DISC;
+ spin_unlock_irq(shost->host_lock);
+ } else
+ /* This side will wait for the PLOGI, decrement ndlp reference
+ * count indicating that ndlp can be released when other
+ * references to it are done.
+ */
+ lpfc_nlp_put(ndlp);
+
+ /* If we are pt2pt with another NPort, force NPIV off! */
+ phba->sli3_options &= ~LPFC_SLI3_NPIV_ENABLED;
+
+ spin_lock_irq(shost->host_lock);
+ vport->fc_flag |= FC_PT2PT;
+ spin_unlock_irq(shost->host_lock);
+ /* If physical FC port changed, unreg VFI and ALL VPIs / RPIs */
+ if ((phba->sli_rev == LPFC_SLI_REV4) && phba->fc_topology_changed) {
+ lpfc_unregister_fcf_prep(phba);
+
+ /* The FC_VFI_REGISTERED flag will get clear in the cmpl
+ * handler for unreg_vfi, but if we don't force the
+ * FC_VFI_REGISTERED flag then the reg_vfi mailbox could be
+ * built with the update bit set instead of just the vp bit to
+ * change the Nport ID. We need to have the vp set and the
+ * Upd cleared on topology changes.
+ */
+ spin_lock_irq(shost->host_lock);
+ vport->fc_flag &= ~FC_VFI_REGISTERED;
+ spin_unlock_irq(shost->host_lock);
+ phba->fc_topology_changed = 0;
+ lpfc_issue_reg_vfi(vport);
+ }
+
+ /* Start discovery - this should just do CLEAR_LA */
+ lpfc_disc_start(vport);
+ return 0;
+fail:
+ return -ENXIO;
+}
+
+/**
+ * lpfc_cmpl_els_flogi - Completion callback function for flogi
+ * @phba: pointer to lpfc hba data structure.
+ * @cmdiocb: pointer to lpfc command iocb data structure.
+ * @rspiocb: pointer to lpfc response iocb data structure.
+ *
+ * This routine is the top-level completion callback function for issuing
+ * a Fabric Login (FLOGI) command. If the response IOCB reported error,
+ * the lpfc_els_retry() routine shall be invoked to retry the FLOGI. If
+ * retry has been made (either immediately or delayed with lpfc_els_retry()
+ * returning 1), the command IOCB will be released and function returned.
+ * If the retry attempt has been given up (possibly reach the maximum
+ * number of retries), one additional decrement of ndlp reference shall be
+ * invoked before going out after releasing the command IOCB. This will
+ * actually release the remote node (Note, lpfc_els_free_iocb() will also
+ * invoke one decrement of ndlp reference count). If no error reported in
+ * the IOCB status, the command Port ID field is used to determine whether
+ * this is a point-to-point topology or a fabric topology: if the Port ID
+ * field is assigned, it is a fabric topology; otherwise, it is a
+ * point-to-point topology. The routine lpfc_cmpl_els_flogi_fabric() or
+ * lpfc_cmpl_els_flogi_nport() shall be invoked accordingly to handle the
+ * specific topology completion conditions.
+ **/
+static void
+lpfc_cmpl_els_flogi(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
+ struct lpfc_iocbq *rspiocb)
+{
+ struct lpfc_vport *vport = cmdiocb->vport;
+ struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
+ IOCB_t *irsp = &rspiocb->iocb;
+ struct lpfc_nodelist *ndlp = cmdiocb->context1;
+ struct lpfc_dmabuf *pcmd = cmdiocb->context2, *prsp;
+ struct serv_parm *sp;
+ uint16_t fcf_index;
+ int rc;
+
+ /* Check to see if link went down during discovery */
+ if (lpfc_els_chk_latt(vport)) {
+ /* One additional decrement on node reference count to
+ * trigger the release of the node
+ */
+ lpfc_nlp_put(ndlp);
+ goto out;
+ }
+
+ lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD,
+ "FLOGI cmpl: status:x%x/x%x state:x%x",
+ irsp->ulpStatus, irsp->un.ulpWord[4],
+ vport->port_state);
+
+ if (irsp->ulpStatus) {
+ /*
+ * In case of FIP mode, perform roundrobin FCF failover
+ * due to new FCF discovery
+ */
+ if ((phba->hba_flag & HBA_FIP_SUPPORT) &&
+ (phba->fcf.fcf_flag & FCF_DISCOVERY)) {
+ if (phba->link_state < LPFC_LINK_UP)
+ goto stop_rr_fcf_flogi;
+ if ((phba->fcoe_cvl_eventtag_attn ==
+ phba->fcoe_cvl_eventtag) &&
+ (irsp->ulpStatus == IOSTAT_LOCAL_REJECT) &&
+ ((irsp->un.ulpWord[4] & IOERR_PARAM_MASK) ==
+ IOERR_SLI_ABORTED))
+ goto stop_rr_fcf_flogi;
+ else
+ phba->fcoe_cvl_eventtag_attn =
+ phba->fcoe_cvl_eventtag;
+ lpfc_printf_log(phba, KERN_WARNING, LOG_FIP | LOG_ELS,
+ "2611 FLOGI failed on FCF (x%x), "
+ "status:x%x/x%x, tmo:x%x, perform "
+ "roundrobin FCF failover\n",
+ phba->fcf.current_rec.fcf_indx,
+ irsp->ulpStatus, irsp->un.ulpWord[4],
+ irsp->ulpTimeout);
+ lpfc_sli4_set_fcf_flogi_fail(phba,
+ phba->fcf.current_rec.fcf_indx);
+ fcf_index = lpfc_sli4_fcf_rr_next_index_get(phba);
+ rc = lpfc_sli4_fcf_rr_next_proc(vport, fcf_index);
+ if (rc)
+ goto out;
+ }
+
+stop_rr_fcf_flogi:
+ /* FLOGI failure */
+ lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
+ "2858 FLOGI failure Status:x%x/x%x TMO:x%x\n",
+ irsp->ulpStatus, irsp->un.ulpWord[4],
+ irsp->ulpTimeout);
+
+ /* Check for retry */
+ if (lpfc_els_retry(phba, cmdiocb, rspiocb))
+ goto out;
+
+ /* FLOGI failure */
+ lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
+ "0100 FLOGI failure Status:x%x/x%x TMO:x%x\n",
+ irsp->ulpStatus, irsp->un.ulpWord[4],
+ irsp->ulpTimeout);
+
+ /* FLOGI failed, so there is no fabric */
+ spin_lock_irq(shost->host_lock);
+ vport->fc_flag &= ~(FC_FABRIC | FC_PUBLIC_LOOP);
+ spin_unlock_irq(shost->host_lock);
+
+ /* If private loop, then allow max outstanding els to be
+ * LPFC_MAX_DISC_THREADS (32). Scanning in the case of no
+ * alpa map would take too long otherwise.
+ */
+ if (phba->alpa_map[0] == 0)
+ vport->cfg_discovery_threads = LPFC_MAX_DISC_THREADS;
+ if ((phba->sli_rev == LPFC_SLI_REV4) &&
+ (!(vport->fc_flag & FC_VFI_REGISTERED) ||
+ (vport->fc_prevDID != vport->fc_myDID) ||
+ phba->fc_topology_changed)) {
+ if (vport->fc_flag & FC_VFI_REGISTERED) {
+ if (phba->fc_topology_changed) {
+ lpfc_unregister_fcf_prep(phba);
+ spin_lock_irq(shost->host_lock);
+ vport->fc_flag &= ~FC_VFI_REGISTERED;
+ spin_unlock_irq(shost->host_lock);
+ phba->fc_topology_changed = 0;
+ } else {
+ lpfc_sli4_unreg_all_rpis(vport);
+ }
+ }
+ lpfc_issue_reg_vfi(vport);
+ lpfc_nlp_put(ndlp);
+ goto out;
+ }
+ goto flogifail;
+ }
+ spin_lock_irq(shost->host_lock);
+ vport->fc_flag &= ~FC_VPORT_CVL_RCVD;
+ vport->fc_flag &= ~FC_VPORT_LOGO_RCVD;
+ spin_unlock_irq(shost->host_lock);
+
+ /*
+ * The FLogI succeeded. Sync the data for the CPU before
+ * accessing it.
+ */
+ prsp = list_get_first(&pcmd->list, struct lpfc_dmabuf, list);
+ if (!prsp)
+ goto out;
+ sp = prsp->virt + sizeof(uint32_t);
+
+ /* FLOGI completes successfully */
+ lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
+ "0101 FLOGI completes successfully, I/O tag:x%x, "
+ "Data: x%x x%x x%x x%x x%x x%x\n", cmdiocb->iotag,
+ irsp->un.ulpWord[4], sp->cmn.e_d_tov,
+ sp->cmn.w2.r_a_tov, sp->cmn.edtovResolution,
+ vport->port_state, vport->fc_flag);
+
+ if (vport->port_state == LPFC_FLOGI) {
+ /*
+ * If Common Service Parameters indicate Nport
+ * we are point to point, if Fport we are Fabric.
+ */
+ if (sp->cmn.fPort)
+ rc = lpfc_cmpl_els_flogi_fabric(vport, ndlp, sp, irsp);
+ else if (!(phba->hba_flag & HBA_FCOE_MODE))
+ rc = lpfc_cmpl_els_flogi_nport(vport, ndlp, sp);
+ else {
+ lpfc_printf_vlog(vport, KERN_ERR,
+ LOG_FIP | LOG_ELS,
+ "2831 FLOGI response with cleared Fabric "
+ "bit fcf_index 0x%x "
+ "Switch Name %02x%02x%02x%02x%02x%02x%02x%02x "
+ "Fabric Name "
+ "%02x%02x%02x%02x%02x%02x%02x%02x\n",
+ phba->fcf.current_rec.fcf_indx,
+ phba->fcf.current_rec.switch_name[0],
+ phba->fcf.current_rec.switch_name[1],
+ phba->fcf.current_rec.switch_name[2],
+ phba->fcf.current_rec.switch_name[3],
+ phba->fcf.current_rec.switch_name[4],
+ phba->fcf.current_rec.switch_name[5],
+ phba->fcf.current_rec.switch_name[6],
+ phba->fcf.current_rec.switch_name[7],
+ phba->fcf.current_rec.fabric_name[0],
+ phba->fcf.current_rec.fabric_name[1],
+ phba->fcf.current_rec.fabric_name[2],
+ phba->fcf.current_rec.fabric_name[3],
+ phba->fcf.current_rec.fabric_name[4],
+ phba->fcf.current_rec.fabric_name[5],
+ phba->fcf.current_rec.fabric_name[6],
+ phba->fcf.current_rec.fabric_name[7]);
+ lpfc_nlp_put(ndlp);
+ spin_lock_irq(&phba->hbalock);
+ phba->fcf.fcf_flag &= ~FCF_DISCOVERY;
+ phba->hba_flag &= ~(FCF_RR_INPROG | HBA_DEVLOSS_TMO);
+ spin_unlock_irq(&phba->hbalock);
+ goto out;
+ }
+ if (!rc) {
+ /* Mark the FCF discovery process done */
+ if (phba->hba_flag & HBA_FIP_SUPPORT)
+ lpfc_printf_vlog(vport, KERN_INFO, LOG_FIP |
+ LOG_ELS,
+ "2769 FLOGI to FCF (x%x) "
+ "completed successfully\n",
+ phba->fcf.current_rec.fcf_indx);
+ spin_lock_irq(&phba->hbalock);
+ phba->fcf.fcf_flag &= ~FCF_DISCOVERY;
+ phba->hba_flag &= ~(FCF_RR_INPROG | HBA_DEVLOSS_TMO);
+ spin_unlock_irq(&phba->hbalock);
+ goto out;
+ }
+ }
+
+flogifail:
+ lpfc_nlp_put(ndlp);
+
+ if (!lpfc_error_lost_link(irsp)) {
+ /* FLOGI failed, so just use loop map to make discovery list */
+ lpfc_disc_list_loopmap(vport);
+
+ /* Start discovery */
+ lpfc_disc_start(vport);
+ } else if (((irsp->ulpStatus != IOSTAT_LOCAL_REJECT) ||
+ (((irsp->un.ulpWord[4] & IOERR_PARAM_MASK) !=
+ IOERR_SLI_ABORTED) &&
+ ((irsp->un.ulpWord[4] & IOERR_PARAM_MASK) !=
+ IOERR_SLI_DOWN))) &&
+ (phba->link_state != LPFC_CLEAR_LA)) {
+ /* If FLOGI failed enable link interrupt. */
+ lpfc_issue_clear_la(phba, vport);
+ }
+out:
+ lpfc_els_free_iocb(phba, cmdiocb);
+}
+
+/**
+ * lpfc_issue_els_flogi - Issue an flogi iocb command for a vport
+ * @vport: pointer to a host virtual N_Port data structure.
+ * @ndlp: pointer to a node-list data structure.
+ * @retry: number of retries to the command IOCB.
+ *
+ * This routine issues a Fabric Login (FLOGI) Request ELS command
+ * for a @vport. The initiator service parameters are put into the payload
+ * of the FLOGI Request IOCB and the top-level callback function pointer
+ * to lpfc_cmpl_els_flogi() routine is put to the IOCB completion callback
+ * function field. The lpfc_issue_fabric_iocb routine is invoked to send
+ * out FLOGI ELS command with one outstanding fabric IOCB at a time.
+ *
+ * Note that, in lpfc_prep_els_iocb() routine, the reference count of ndlp
+ * will be incremented by 1 for holding the ndlp and the reference to ndlp
+ * will be stored into the context1 field of the IOCB for the completion
+ * callback function to the FLOGI ELS command.
+ *
+ * Return code
+ * 0 - successfully issued flogi iocb for @vport
+ * 1 - failed to issue flogi iocb for @vport
+ **/
+static int
+lpfc_issue_els_flogi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
+ uint8_t retry)
+{
+ struct lpfc_hba *phba = vport->phba;
+ struct serv_parm *sp;
+ IOCB_t *icmd;
+ struct lpfc_iocbq *elsiocb;
+ struct lpfc_sli_ring *pring;
+ uint8_t *pcmd;
+ uint16_t cmdsize;
+ uint32_t tmo;
+ int rc;
+
+ pring = &phba->sli.ring[LPFC_ELS_RING];
+
+ cmdsize = (sizeof(uint32_t) + sizeof(struct serv_parm));
+ elsiocb = lpfc_prep_els_iocb(vport, 1, cmdsize, retry, ndlp,
+ ndlp->nlp_DID, ELS_CMD_FLOGI);
+
+ if (!elsiocb)
+ return 1;
+
+ icmd = &elsiocb->iocb;
+ pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt);
+
+ /* For FLOGI request, remainder of payload is service parameters */
+ *((uint32_t *) (pcmd)) = ELS_CMD_FLOGI;
+ pcmd += sizeof(uint32_t);
+ memcpy(pcmd, &vport->fc_sparam, sizeof(struct serv_parm));
+ sp = (struct serv_parm *) pcmd;
+
+ /* Setup CSPs accordingly for Fabric */
+ sp->cmn.e_d_tov = 0;
+ sp->cmn.w2.r_a_tov = 0;
+ sp->cmn.virtual_fabric_support = 0;
+ sp->cls1.classValid = 0;
+ if (sp->cmn.fcphLow < FC_PH3)
+ sp->cmn.fcphLow = FC_PH3;
+ if (sp->cmn.fcphHigh < FC_PH3)
+ sp->cmn.fcphHigh = FC_PH3;
+
+ if (phba->sli_rev == LPFC_SLI_REV4) {
+ if (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) ==
+ LPFC_SLI_INTF_IF_TYPE_0) {
+ elsiocb->iocb.ulpCt_h = ((SLI4_CT_FCFI >> 1) & 1);
+ elsiocb->iocb.ulpCt_l = (SLI4_CT_FCFI & 1);
+ /* FLOGI needs to be 3 for WQE FCFI */
+ /* Set the fcfi to the fcfi we registered with */
+ elsiocb->iocb.ulpContext = phba->fcf.fcfi;
+ }
+ /* Can't do SLI4 class2 without support sequence coalescing */
+ sp->cls2.classValid = 0;
+ sp->cls2.seqDelivery = 0;
+ } else {
+ /* Historical, setting sequential-delivery bit for SLI3 */
+ sp->cls2.seqDelivery = (sp->cls2.classValid) ? 1 : 0;
+ sp->cls3.seqDelivery = (sp->cls3.classValid) ? 1 : 0;
+ if (phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) {
+ sp->cmn.request_multiple_Nport = 1;
+ /* For FLOGI, Let FLOGI rsp set the NPortID for VPI 0 */
+ icmd->ulpCt_h = 1;
+ icmd->ulpCt_l = 0;
+ } else
+ sp->cmn.request_multiple_Nport = 0;
+ }
+
+ if (phba->fc_topology != LPFC_TOPOLOGY_LOOP) {
+ icmd->un.elsreq64.myID = 0;
+ icmd->un.elsreq64.fl = 1;
+ }
+
+ tmo = phba->fc_ratov;
+ phba->fc_ratov = LPFC_DISC_FLOGI_TMO;
+ lpfc_set_disctmo(vport);
+ phba->fc_ratov = tmo;
+
+ phba->fc_stat.elsXmitFLOGI++;
+ elsiocb->iocb_cmpl = lpfc_cmpl_els_flogi;
+
+ lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD,
+ "Issue FLOGI: opt:x%x",
+ phba->sli3_options, 0, 0);
+
+ rc = lpfc_issue_fabric_iocb(phba, elsiocb);
+ if (rc == IOCB_ERROR) {
+ lpfc_els_free_iocb(phba, elsiocb);
+ return 1;
+ }
+ return 0;
+}
+
+/**
+ * lpfc_els_abort_flogi - Abort all outstanding flogi iocbs
+ * @phba: pointer to lpfc hba data structure.
+ *
+ * This routine aborts all the outstanding Fabric Login (FLOGI) IOCBs
+ * with a @phba. This routine walks all the outstanding IOCBs on the txcmplq
+ * list and issues an abort IOCB commond on each outstanding IOCB that
+ * contains a active Fabric_DID ndlp. Note that this function is to issue
+ * the abort IOCB command on all the outstanding IOCBs, thus when this
+ * function returns, it does not guarantee all the IOCBs are actually aborted.
+ *
+ * Return code
+ * 0 - Successfully issued abort iocb on all outstanding flogis (Always 0)
+ **/
+int
+lpfc_els_abort_flogi(struct lpfc_hba *phba)
+{
+ struct lpfc_sli_ring *pring;
+ struct lpfc_iocbq *iocb, *next_iocb;
+ struct lpfc_nodelist *ndlp;
+ IOCB_t *icmd;
+
+ /* Abort outstanding I/O on NPort <nlp_DID> */
+ lpfc_printf_log(phba, KERN_INFO, LOG_DISCOVERY,
+ "0201 Abort outstanding I/O on NPort x%x\n",
+ Fabric_DID);
+
+ pring = &phba->sli.ring[LPFC_ELS_RING];
+
+ /*
+ * Check the txcmplq for an iocb that matches the nport the driver is
+ * searching for.
+ */
+ spin_lock_irq(&phba->hbalock);
+ list_for_each_entry_safe(iocb, next_iocb, &pring->txcmplq, list) {
+ icmd = &iocb->iocb;
+ if (icmd->ulpCommand == CMD_ELS_REQUEST64_CR) {
+ ndlp = (struct lpfc_nodelist *)(iocb->context1);
+ if (ndlp && NLP_CHK_NODE_ACT(ndlp) &&
+ (ndlp->nlp_DID == Fabric_DID))
+ lpfc_sli_issue_abort_iotag(phba, pring, iocb);
+ }
+ }
+ spin_unlock_irq(&phba->hbalock);
+
+ return 0;
+}
+
+/**
+ * lpfc_initial_flogi - Issue an initial fabric login for a vport
+ * @vport: pointer to a host virtual N_Port data structure.
+ *
+ * This routine issues an initial Fabric Login (FLOGI) for the @vport
+ * specified. It first searches the ndlp with the Fabric_DID (0xfffffe) from
+ * the @vport's ndlp list. If no such ndlp found, it will create an ndlp and
+ * put it into the @vport's ndlp list. If an inactive ndlp found on the list,
+ * it will just be enabled and made active. The lpfc_issue_els_flogi() routine
+ * is then invoked with the @vport and the ndlp to perform the FLOGI for the
+ * @vport.
+ *
+ * Return code
+ * 0 - failed to issue initial flogi for @vport
+ * 1 - successfully issued initial flogi for @vport
+ **/
+int
+lpfc_initial_flogi(struct lpfc_vport *vport)
+{
+ struct lpfc_hba *phba = vport->phba;
+ struct lpfc_nodelist *ndlp;
+
+ vport->port_state = LPFC_FLOGI;
+ lpfc_set_disctmo(vport);
+
+ /* First look for the Fabric ndlp */
+ ndlp = lpfc_findnode_did(vport, Fabric_DID);
+ if (!ndlp) {
+ /* Cannot find existing Fabric ndlp, so allocate a new one */
+ ndlp = mempool_alloc(phba->nlp_mem_pool, GFP_KERNEL);
+ if (!ndlp)
+ return 0;
+ lpfc_nlp_init(vport, ndlp, Fabric_DID);
+ /* Set the node type */
+ ndlp->nlp_type |= NLP_FABRIC;
+ /* Put ndlp onto node list */
+ lpfc_enqueue_node(vport, ndlp);
+ } else if (!NLP_CHK_NODE_ACT(ndlp)) {
+ /* re-setup ndlp without removing from node list */
+ ndlp = lpfc_enable_node(vport, ndlp, NLP_STE_UNUSED_NODE);
+ if (!ndlp)
+ return 0;
+ }
+
+ if (lpfc_issue_els_flogi(vport, ndlp, 0)) {
+ /* This decrement of reference count to node shall kick off
+ * the release of the node.
+ */
+ lpfc_nlp_put(ndlp);
+ return 0;
+ }
+ return 1;
+}
+
+/**
+ * lpfc_initial_fdisc - Issue an initial fabric discovery for a vport
+ * @vport: pointer to a host virtual N_Port data structure.
+ *
+ * This routine issues an initial Fabric Discover (FDISC) for the @vport
+ * specified. It first searches the ndlp with the Fabric_DID (0xfffffe) from
+ * the @vport's ndlp list. If no such ndlp found, it will create an ndlp and
+ * put it into the @vport's ndlp list. If an inactive ndlp found on the list,
+ * it will just be enabled and made active. The lpfc_issue_els_fdisc() routine
+ * is then invoked with the @vport and the ndlp to perform the FDISC for the
+ * @vport.
+ *
+ * Return code
+ * 0 - failed to issue initial fdisc for @vport
+ * 1 - successfully issued initial fdisc for @vport
+ **/
+int
+lpfc_initial_fdisc(struct lpfc_vport *vport)
+{
+ struct lpfc_hba *phba = vport->phba;
+ struct lpfc_nodelist *ndlp;
+
+ /* First look for the Fabric ndlp */
+ ndlp = lpfc_findnode_did(vport, Fabric_DID);
+ if (!ndlp) {
+ /* Cannot find existing Fabric ndlp, so allocate a new one */
+ ndlp = mempool_alloc(phba->nlp_mem_pool, GFP_KERNEL);
+ if (!ndlp)
+ return 0;
+ lpfc_nlp_init(vport, ndlp, Fabric_DID);
+ /* Put ndlp onto node list */
+ lpfc_enqueue_node(vport, ndlp);
+ } else if (!NLP_CHK_NODE_ACT(ndlp)) {
+ /* re-setup ndlp without removing from node list */
+ ndlp = lpfc_enable_node(vport, ndlp, NLP_STE_UNUSED_NODE);
+ if (!ndlp)
+ return 0;
+ }
+
+ if (lpfc_issue_els_fdisc(vport, ndlp, 0)) {
+ /* decrement node reference count to trigger the release of
+ * the node.
+ */
+ lpfc_nlp_put(ndlp);
+ return 0;
+ }
+ return 1;
+}
+
+/**
+ * lpfc_more_plogi - Check and issue remaining plogis for a vport
+ * @vport: pointer to a host virtual N_Port data structure.
+ *
+ * This routine checks whether there are more remaining Port Logins
+ * (PLOGI) to be issued for the @vport. If so, it will invoke the routine
+ * lpfc_els_disc_plogi() to go through the Node Port Recovery (NPR) nodes
+ * to issue ELS PLOGIs up to the configured discover threads with the
+ * @vport (@vport->cfg_discovery_threads). The function also decrement
+ * the @vport's num_disc_node by 1 if it is not already 0.
+ **/
+void
+lpfc_more_plogi(struct lpfc_vport *vport)
+{
+ int sentplogi;
+
+ if (vport->num_disc_nodes)
+ vport->num_disc_nodes--;
+
+ /* Continue discovery with <num_disc_nodes> PLOGIs to go */
+ lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY,
+ "0232 Continue discovery with %d PLOGIs to go "
+ "Data: x%x x%x x%x\n",
+ vport->num_disc_nodes, vport->fc_plogi_cnt,
+ vport->fc_flag, vport->port_state);
+ /* Check to see if there are more PLOGIs to be sent */
+ if (vport->fc_flag & FC_NLP_MORE)
+ /* go thru NPR nodes and issue any remaining ELS PLOGIs */
+ sentplogi = lpfc_els_disc_plogi(vport);
+
+ return;
+}
+
+/**
+ * lpfc_plogi_confirm_nport - Confirm pologi wwpn matches stored ndlp
+ * @phba: pointer to lpfc hba data structure.
+ * @prsp: pointer to response IOCB payload.
+ * @ndlp: pointer to a node-list data structure.
+ *
+ * This routine checks and indicates whether the WWPN of an N_Port, retrieved
+ * from a PLOGI, matches the WWPN that is stored in the @ndlp for that N_POrt.
+ * The following cases are considered N_Port confirmed:
+ * 1) The N_Port is a Fabric ndlp; 2) The @ndlp is on vport list and matches
+ * the WWPN of the N_Port logged into; 3) The @ndlp is not on vport list but
+ * it does not have WWPN assigned either. If the WWPN is confirmed, the
+ * pointer to the @ndlp will be returned. If the WWPN is not confirmed:
+ * 1) if there is a node on vport list other than the @ndlp with the same
+ * WWPN of the N_Port PLOGI logged into, the lpfc_unreg_rpi() will be invoked
+ * on that node to release the RPI associated with the node; 2) if there is
+ * no node found on vport list with the same WWPN of the N_Port PLOGI logged
+ * into, a new node shall be allocated (or activated). In either case, the
+ * parameters of the @ndlp shall be copied to the new_ndlp, the @ndlp shall
+ * be released and the new_ndlp shall be put on to the vport node list and
+ * its pointer returned as the confirmed node.
+ *
+ * Note that before the @ndlp got "released", the keepDID from not-matching
+ * or inactive "new_ndlp" on the vport node list is assigned to the nlp_DID
+ * of the @ndlp. This is because the release of @ndlp is actually to put it
+ * into an inactive state on the vport node list and the vport node list
+ * management algorithm does not allow two node with a same DID.
+ *
+ * Return code
+ * pointer to the PLOGI N_Port @ndlp
+ **/
+static struct lpfc_nodelist *
+lpfc_plogi_confirm_nport(struct lpfc_hba *phba, uint32_t *prsp,
+ struct lpfc_nodelist *ndlp)
+{
+ struct lpfc_vport *vport = ndlp->vport;
+ struct lpfc_nodelist *new_ndlp;
+ struct lpfc_rport_data *rdata;
+ struct fc_rport *rport;
+ struct serv_parm *sp;
+ uint8_t name[sizeof(struct lpfc_name)];
+ uint32_t rc, keepDID = 0;
+ int put_node;
+ int put_rport;
+ unsigned long *active_rrqs_xri_bitmap = NULL;
+
+ /* Fabric nodes can have the same WWPN so we don't bother searching
+ * by WWPN. Just return the ndlp that was given to us.
+ */
+ if (ndlp->nlp_type & NLP_FABRIC)
+ return ndlp;
+
+ sp = (struct serv_parm *) ((uint8_t *) prsp + sizeof(uint32_t));
+ memset(name, 0, sizeof(struct lpfc_name));
+
+ /* Now we find out if the NPort we are logging into, matches the WWPN
+ * we have for that ndlp. If not, we have some work to do.
+ */
+ new_ndlp = lpfc_findnode_wwpn(vport, &sp->portName);
+
+ if (new_ndlp == ndlp && NLP_CHK_NODE_ACT(new_ndlp))
+ return ndlp;
+ if (phba->sli_rev == LPFC_SLI_REV4) {
+ active_rrqs_xri_bitmap = mempool_alloc(phba->active_rrq_pool,
+ GFP_KERNEL);
+ if (active_rrqs_xri_bitmap)
+ memset(active_rrqs_xri_bitmap, 0,
+ phba->cfg_rrq_xri_bitmap_sz);
+ }
+
+ lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
+ "3178 PLOGI confirm: ndlp %p x%x: new_ndlp %p\n",
+ ndlp, ndlp->nlp_DID, new_ndlp);
+
+ if (!new_ndlp) {
+ rc = memcmp(&ndlp->nlp_portname, name,
+ sizeof(struct lpfc_name));
+ if (!rc) {
+ if (active_rrqs_xri_bitmap)
+ mempool_free(active_rrqs_xri_bitmap,
+ phba->active_rrq_pool);
+ return ndlp;
+ }
+ new_ndlp = mempool_alloc(phba->nlp_mem_pool, GFP_ATOMIC);
+ if (!new_ndlp) {
+ if (active_rrqs_xri_bitmap)
+ mempool_free(active_rrqs_xri_bitmap,
+ phba->active_rrq_pool);
+ return ndlp;
+ }
+ lpfc_nlp_init(vport, new_ndlp, ndlp->nlp_DID);
+ } else if (!NLP_CHK_NODE_ACT(new_ndlp)) {
+ rc = memcmp(&ndlp->nlp_portname, name,
+ sizeof(struct lpfc_name));
+ if (!rc) {
+ if (active_rrqs_xri_bitmap)
+ mempool_free(active_rrqs_xri_bitmap,
+ phba->active_rrq_pool);
+ return ndlp;
+ }
+ new_ndlp = lpfc_enable_node(vport, new_ndlp,
+ NLP_STE_UNUSED_NODE);
+ if (!new_ndlp) {
+ if (active_rrqs_xri_bitmap)
+ mempool_free(active_rrqs_xri_bitmap,
+ phba->active_rrq_pool);
+ return ndlp;
+ }
+ keepDID = new_ndlp->nlp_DID;
+ if ((phba->sli_rev == LPFC_SLI_REV4) && active_rrqs_xri_bitmap)
+ memcpy(active_rrqs_xri_bitmap,
+ new_ndlp->active_rrqs_xri_bitmap,
+ phba->cfg_rrq_xri_bitmap_sz);
+ } else {
+ keepDID = new_ndlp->nlp_DID;
+ if (phba->sli_rev == LPFC_SLI_REV4 &&
+ active_rrqs_xri_bitmap)
+ memcpy(active_rrqs_xri_bitmap,
+ new_ndlp->active_rrqs_xri_bitmap,
+ phba->cfg_rrq_xri_bitmap_sz);
+ }
+
+ lpfc_unreg_rpi(vport, new_ndlp);
+ new_ndlp->nlp_DID = ndlp->nlp_DID;
+ new_ndlp->nlp_prev_state = ndlp->nlp_prev_state;
+ if (phba->sli_rev == LPFC_SLI_REV4)
+ memcpy(new_ndlp->active_rrqs_xri_bitmap,
+ ndlp->active_rrqs_xri_bitmap,
+ phba->cfg_rrq_xri_bitmap_sz);
+
+ if (ndlp->nlp_flag & NLP_NPR_2B_DISC)
+ new_ndlp->nlp_flag |= NLP_NPR_2B_DISC;
+ ndlp->nlp_flag &= ~NLP_NPR_2B_DISC;
+
+ /* Set state will put new_ndlp on to node list if not already done */
+ lpfc_nlp_set_state(vport, new_ndlp, ndlp->nlp_state);
+
+ /* Move this back to NPR state */
+ if (memcmp(&ndlp->nlp_portname, name, sizeof(struct lpfc_name)) == 0) {
+ /* The new_ndlp is replacing ndlp totally, so we need
+ * to put ndlp on UNUSED list and try to free it.
+ */
+ lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
+ "3179 PLOGI confirm NEW: %x %x\n",
+ new_ndlp->nlp_DID, keepDID);
+
+ /* Fix up the rport accordingly */
+ rport = ndlp->rport;
+ if (rport) {
+ rdata = rport->dd_data;
+ if (rdata->pnode == ndlp) {
+ lpfc_nlp_put(ndlp);
+ ndlp->rport = NULL;
+ rdata->pnode = lpfc_nlp_get(new_ndlp);
+ new_ndlp->rport = rport;
+ }
+ new_ndlp->nlp_type = ndlp->nlp_type;
+ }
+ /* We shall actually free the ndlp with both nlp_DID and
+ * nlp_portname fields equals 0 to avoid any ndlp on the
+ * nodelist never to be used.
+ */
+ if (ndlp->nlp_DID == 0) {
+ spin_lock_irq(&phba->ndlp_lock);
+ NLP_SET_FREE_REQ(ndlp);
+ spin_unlock_irq(&phba->ndlp_lock);
+ }
+
+ /* Two ndlps cannot have the same did on the nodelist */
+ ndlp->nlp_DID = keepDID;
+ if (phba->sli_rev == LPFC_SLI_REV4 &&
+ active_rrqs_xri_bitmap)
+ memcpy(ndlp->active_rrqs_xri_bitmap,
+ active_rrqs_xri_bitmap,
+ phba->cfg_rrq_xri_bitmap_sz);
+ lpfc_drop_node(vport, ndlp);
+ }
+ else {
+ lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
+ "3180 PLOGI confirm SWAP: %x %x\n",
+ new_ndlp->nlp_DID, keepDID);
+
+ lpfc_unreg_rpi(vport, ndlp);
+
+ /* Two ndlps cannot have the same did */
+ ndlp->nlp_DID = keepDID;
+ if (phba->sli_rev == LPFC_SLI_REV4 &&
+ active_rrqs_xri_bitmap)
+ memcpy(ndlp->active_rrqs_xri_bitmap,
+ active_rrqs_xri_bitmap,
+ phba->cfg_rrq_xri_bitmap_sz);
+
+ /* Since we are swapping the ndlp passed in with the new one
+ * and the did has already been swapped, copy over state.
+ * The new WWNs are already in new_ndlp since thats what
+ * we looked it up by in the begining of this routine.
+ */
+ new_ndlp->nlp_state = ndlp->nlp_state;
+
+ /* Since we are switching over to the new_ndlp, the old
+ * ndlp should be put in the NPR state, unless we have
+ * already started re-discovery on it.
+ */
+ if ((ndlp->nlp_state == NLP_STE_UNMAPPED_NODE) ||
+ (ndlp->nlp_state == NLP_STE_MAPPED_NODE))
+ lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE);
+
+ /* Fix up the rport accordingly */
+ rport = ndlp->rport;
+ if (rport) {
+ rdata = rport->dd_data;
+ put_node = rdata->pnode != NULL;
+ put_rport = ndlp->rport != NULL;
+ rdata->pnode = NULL;
+ ndlp->rport = NULL;
+ if (put_node)
+ lpfc_nlp_put(ndlp);
+ if (put_rport)
+ put_device(&rport->dev);
+ }
+ }
+ if (phba->sli_rev == LPFC_SLI_REV4 &&
+ active_rrqs_xri_bitmap)
+ mempool_free(active_rrqs_xri_bitmap,
+ phba->active_rrq_pool);
+ return new_ndlp;
+}
+
+/**
+ * lpfc_end_rscn - Check and handle more rscn for a vport
+ * @vport: pointer to a host virtual N_Port data structure.
+ *
+ * This routine checks whether more Registration State Change
+ * Notifications (RSCNs) came in while the discovery state machine was in
+ * the FC_RSCN_MODE. If so, the lpfc_els_handle_rscn() routine will be
+ * invoked to handle the additional RSCNs for the @vport. Otherwise, the
+ * FC_RSCN_MODE bit will be cleared with the @vport to mark as the end of
+ * handling the RSCNs.
+ **/
+void
+lpfc_end_rscn(struct lpfc_vport *vport)
+{
+ struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
+
+ if (vport->fc_flag & FC_RSCN_MODE) {
+ /*
+ * Check to see if more RSCNs came in while we were
+ * processing this one.
+ */
+ if (vport->fc_rscn_id_cnt ||
+ (vport->fc_flag & FC_RSCN_DISCOVERY) != 0)
+ lpfc_els_handle_rscn(vport);
+ else {
+ spin_lock_irq(shost->host_lock);
+ vport->fc_flag &= ~FC_RSCN_MODE;
+ spin_unlock_irq(shost->host_lock);
+ }
+ }
+}
+
+/**
+ * lpfc_cmpl_els_rrq - Completion handled for els RRQs.
+ * @phba: pointer to lpfc hba data structure.
+ * @cmdiocb: pointer to lpfc command iocb data structure.
+ * @rspiocb: pointer to lpfc response iocb data structure.
+ *
+ * This routine will call the clear rrq function to free the rrq and
+ * clear the xri's bit in the ndlp's xri_bitmap. If the ndlp does not
+ * exist then the clear_rrq is still called because the rrq needs to
+ * be freed.
+ **/
+
+static void
+lpfc_cmpl_els_rrq(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
+ struct lpfc_iocbq *rspiocb)
+{
+ struct lpfc_vport *vport = cmdiocb->vport;
+ IOCB_t *irsp;
+ struct lpfc_nodelist *ndlp;
+ struct lpfc_node_rrq *rrq;
+
+ /* we pass cmdiocb to state machine which needs rspiocb as well */
+ rrq = cmdiocb->context_un.rrq;
+ cmdiocb->context_un.rsp_iocb = rspiocb;
+
+ irsp = &rspiocb->iocb;
+ lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD,
+ "RRQ cmpl: status:x%x/x%x did:x%x",
+ irsp->ulpStatus, irsp->un.ulpWord[4],
+ irsp->un.elsreq64.remoteID);
+
+ ndlp = lpfc_findnode_did(vport, irsp->un.elsreq64.remoteID);
+ if (!ndlp || !NLP_CHK_NODE_ACT(ndlp) || ndlp != rrq->ndlp) {
+ lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
+ "2882 RRQ completes to NPort x%x "
+ "with no ndlp. Data: x%x x%x x%x\n",
+ irsp->un.elsreq64.remoteID,
+ irsp->ulpStatus, irsp->un.ulpWord[4],
+ irsp->ulpIoTag);
+ goto out;
+ }
+
+ /* rrq completes to NPort <nlp_DID> */
+ lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
+ "2880 RRQ completes to NPort x%x "
+ "Data: x%x x%x x%x x%x x%x\n",
+ ndlp->nlp_DID, irsp->ulpStatus, irsp->un.ulpWord[4],
+ irsp->ulpTimeout, rrq->xritag, rrq->rxid);
+
+ if (irsp->ulpStatus) {
+ /* Check for retry */
+ /* RRQ failed Don't print the vport to vport rjts */
+ if (irsp->ulpStatus != IOSTAT_LS_RJT ||
+ (((irsp->un.ulpWord[4]) >> 16 != LSRJT_INVALID_CMD) &&
+ ((irsp->un.ulpWord[4]) >> 16 != LSRJT_UNABLE_TPC)) ||
+ (phba)->pport->cfg_log_verbose & LOG_ELS)
+ lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
+ "2881 RRQ failure DID:%06X Status:x%x/x%x\n",
+ ndlp->nlp_DID, irsp->ulpStatus,
+ irsp->un.ulpWord[4]);
+ }
+out:
+ if (rrq)
+ lpfc_clr_rrq_active(phba, rrq->xritag, rrq);
+ lpfc_els_free_iocb(phba, cmdiocb);
+ return;
+}
+/**
+ * lpfc_cmpl_els_plogi - Completion callback function for plogi
+ * @phba: pointer to lpfc hba data structure.
+ * @cmdiocb: pointer to lpfc command iocb data structure.
+ * @rspiocb: pointer to lpfc response iocb data structure.
+ *
+ * This routine is the completion callback function for issuing the Port
+ * Login (PLOGI) command. For PLOGI completion, there must be an active
+ * ndlp on the vport node list that matches the remote node ID from the
+ * PLOGI response IOCB. If such ndlp does not exist, the PLOGI is simply
+ * ignored and command IOCB released. The PLOGI response IOCB status is
+ * checked for error conditons. If there is error status reported, PLOGI
+ * retry shall be attempted by invoking the lpfc_els_retry() routine.
+ * Otherwise, the lpfc_plogi_confirm_nport() routine shall be invoked on
+ * the ndlp and the NLP_EVT_CMPL_PLOGI state to the Discover State Machine
+ * (DSM) is set for this PLOGI completion. Finally, it checks whether
+ * there are additional N_Port nodes with the vport that need to perform
+ * PLOGI. If so, the lpfc_more_plogi() routine is invoked to issue addition
+ * PLOGIs.
+ **/
+static void
+lpfc_cmpl_els_plogi(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
+ struct lpfc_iocbq *rspiocb)
+{
+ struct lpfc_vport *vport = cmdiocb->vport;
+ struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
+ IOCB_t *irsp;
+ struct lpfc_nodelist *ndlp;
+ struct lpfc_dmabuf *prsp;
+ int disc, rc;
+
+ /* we pass cmdiocb to state machine which needs rspiocb as well */
+ cmdiocb->context_un.rsp_iocb = rspiocb;
+
+ irsp = &rspiocb->iocb;
+ lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD,
+ "PLOGI cmpl: status:x%x/x%x did:x%x",
+ irsp->ulpStatus, irsp->un.ulpWord[4],
+ irsp->un.elsreq64.remoteID);
+
+ ndlp = lpfc_findnode_did(vport, irsp->un.elsreq64.remoteID);
+ if (!ndlp || !NLP_CHK_NODE_ACT(ndlp)) {
+ lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
+ "0136 PLOGI completes to NPort x%x "
+ "with no ndlp. Data: x%x x%x x%x\n",
+ irsp->un.elsreq64.remoteID,
+ irsp->ulpStatus, irsp->un.ulpWord[4],
+ irsp->ulpIoTag);
+ goto out;
+ }
+
+ /* Since ndlp can be freed in the disc state machine, note if this node
+ * is being used during discovery.
+ */
+ spin_lock_irq(shost->host_lock);
+ disc = (ndlp->nlp_flag & NLP_NPR_2B_DISC);
+ ndlp->nlp_flag &= ~NLP_NPR_2B_DISC;
+ spin_unlock_irq(shost->host_lock);
+ rc = 0;
+
+ /* PLOGI completes to NPort <nlp_DID> */
+ lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
+ "0102 PLOGI completes to NPort x%x "
+ "Data: x%x x%x x%x x%x x%x\n",
+ ndlp->nlp_DID, irsp->ulpStatus, irsp->un.ulpWord[4],
+ irsp->ulpTimeout, disc, vport->num_disc_nodes);
+ /* Check to see if link went down during discovery */
+ if (lpfc_els_chk_latt(vport)) {
+ spin_lock_irq(shost->host_lock);
+ ndlp->nlp_flag |= NLP_NPR_2B_DISC;
+ spin_unlock_irq(shost->host_lock);
+ goto out;
+ }
+
+ if (irsp->ulpStatus) {
+ /* Check for retry */
+ if (lpfc_els_retry(phba, cmdiocb, rspiocb)) {
+ /* ELS command is being retried */
+ if (disc) {
+ spin_lock_irq(shost->host_lock);
+ ndlp->nlp_flag |= NLP_NPR_2B_DISC;
+ spin_unlock_irq(shost->host_lock);
+ }
+ goto out;
+ }
+ /* PLOGI failed Don't print the vport to vport rjts */
+ if (irsp->ulpStatus != IOSTAT_LS_RJT ||
+ (((irsp->un.ulpWord[4]) >> 16 != LSRJT_INVALID_CMD) &&
+ ((irsp->un.ulpWord[4]) >> 16 != LSRJT_UNABLE_TPC)) ||
+ (phba)->pport->cfg_log_verbose & LOG_ELS)
+ lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
+ "2753 PLOGI failure DID:%06X Status:x%x/x%x\n",
+ ndlp->nlp_DID, irsp->ulpStatus,
+ irsp->un.ulpWord[4]);
+ /* Do not call DSM for lpfc_els_abort'ed ELS cmds */
+ if (lpfc_error_lost_link(irsp))
+ rc = NLP_STE_FREED_NODE;
+ else
+ rc = lpfc_disc_state_machine(vport, ndlp, cmdiocb,
+ NLP_EVT_CMPL_PLOGI);
+ } else {
+ /* Good status, call state machine */
+ prsp = list_entry(((struct lpfc_dmabuf *)
+ cmdiocb->context2)->list.next,
+ struct lpfc_dmabuf, list);
+ ndlp = lpfc_plogi_confirm_nport(phba, prsp->virt, ndlp);
+ rc = lpfc_disc_state_machine(vport, ndlp, cmdiocb,
+ NLP_EVT_CMPL_PLOGI);
+ }
+
+ if (disc && vport->num_disc_nodes) {
+ /* Check to see if there are more PLOGIs to be sent */
+ lpfc_more_plogi(vport);
+
+ if (vport->num_disc_nodes == 0) {
+ spin_lock_irq(shost->host_lock);
+ vport->fc_flag &= ~FC_NDISC_ACTIVE;
+ spin_unlock_irq(shost->host_lock);
+
+ lpfc_can_disctmo(vport);
+ lpfc_end_rscn(vport);
+ }
+ }
+
+out:
+ lpfc_els_free_iocb(phba, cmdiocb);
+ return;
+}
+
+/**
+ * lpfc_issue_els_plogi - Issue an plogi iocb command for a vport
+ * @vport: pointer to a host virtual N_Port data structure.
+ * @did: destination port identifier.
+ * @retry: number of retries to the command IOCB.
+ *
+ * This routine issues a Port Login (PLOGI) command to a remote N_Port
+ * (with the @did) for a @vport. Before issuing a PLOGI to a remote N_Port,
+ * the ndlp with the remote N_Port DID must exist on the @vport's ndlp list.
+ * This routine constructs the proper feilds of the PLOGI IOCB and invokes
+ * the lpfc_sli_issue_iocb() routine to send out PLOGI ELS command.
+ *
+ * Note that, in lpfc_prep_els_iocb() routine, the reference count of ndlp
+ * will be incremented by 1 for holding the ndlp and the reference to ndlp
+ * will be stored into the context1 field of the IOCB for the completion
+ * callback function to the PLOGI ELS command.
+ *
+ * Return code
+ * 0 - Successfully issued a plogi for @vport
+ * 1 - failed to issue a plogi for @vport
+ **/
+int
+lpfc_issue_els_plogi(struct lpfc_vport *vport, uint32_t did, uint8_t retry)
+{
+ struct lpfc_hba *phba = vport->phba;
+ struct serv_parm *sp;
+ IOCB_t *icmd;
+ struct lpfc_nodelist *ndlp;
+ struct lpfc_iocbq *elsiocb;
+ struct lpfc_sli *psli;
+ uint8_t *pcmd;
+ uint16_t cmdsize;
+ int ret;
+
+ psli = &phba->sli;
+
+ ndlp = lpfc_findnode_did(vport, did);
+ if (ndlp && !NLP_CHK_NODE_ACT(ndlp))
+ ndlp = NULL;
+
+ /* If ndlp is not NULL, we will bump the reference count on it */
+ cmdsize = (sizeof(uint32_t) + sizeof(struct serv_parm));
+ elsiocb = lpfc_prep_els_iocb(vport, 1, cmdsize, retry, ndlp, did,
+ ELS_CMD_PLOGI);
+ if (!elsiocb)
+ return 1;
+
+ icmd = &elsiocb->iocb;
+ pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt);
+
+ /* For PLOGI request, remainder of payload is service parameters */
+ *((uint32_t *) (pcmd)) = ELS_CMD_PLOGI;
+ pcmd += sizeof(uint32_t);
+ memcpy(pcmd, &vport->fc_sparam, sizeof(struct serv_parm));
+ sp = (struct serv_parm *) pcmd;
+
+ /*
+ * If we are a N-port connected to a Fabric, fix-up paramm's so logins
+ * to device on remote loops work.
+ */
+ if ((vport->fc_flag & FC_FABRIC) && !(vport->fc_flag & FC_PUBLIC_LOOP))
+ sp->cmn.altBbCredit = 1;
+
+ if (sp->cmn.fcphLow < FC_PH_4_3)
+ sp->cmn.fcphLow = FC_PH_4_3;
+
+ if (sp->cmn.fcphHigh < FC_PH3)
+ sp->cmn.fcphHigh = FC_PH3;
+
+ lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD,
+ "Issue PLOGI: did:x%x",
+ did, 0, 0);
+
+ phba->fc_stat.elsXmitPLOGI++;
+ elsiocb->iocb_cmpl = lpfc_cmpl_els_plogi;
+ ret = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0);
+
+ if (ret == IOCB_ERROR) {
+ lpfc_els_free_iocb(phba, elsiocb);
+ return 1;
+ }
+ return 0;
+}
+
+/**
+ * lpfc_cmpl_els_prli - Completion callback function for prli
+ * @phba: pointer to lpfc hba data structure.
+ * @cmdiocb: pointer to lpfc command iocb data structure.
+ * @rspiocb: pointer to lpfc response iocb data structure.
+ *
+ * This routine is the completion callback function for a Process Login
+ * (PRLI) ELS command. The PRLI response IOCB status is checked for error
+ * status. If there is error status reported, PRLI retry shall be attempted
+ * by invoking the lpfc_els_retry() routine. Otherwise, the state
+ * NLP_EVT_CMPL_PRLI is sent to the Discover State Machine (DSM) for this
+ * ndlp to mark the PRLI completion.
+ **/
+static void
+lpfc_cmpl_els_prli(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
+ struct lpfc_iocbq *rspiocb)
+{
+ struct lpfc_vport *vport = cmdiocb->vport;
+ struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
+ IOCB_t *irsp;
+ struct lpfc_sli *psli;
+ struct lpfc_nodelist *ndlp;
+
+ psli = &phba->sli;
+ /* we pass cmdiocb to state machine which needs rspiocb as well */
+ cmdiocb->context_un.rsp_iocb = rspiocb;
+
+ irsp = &(rspiocb->iocb);
+ ndlp = (struct lpfc_nodelist *) cmdiocb->context1;
+ spin_lock_irq(shost->host_lock);
+ ndlp->nlp_flag &= ~NLP_PRLI_SND;
+ spin_unlock_irq(shost->host_lock);
+
+ lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD,
+ "PRLI cmpl: status:x%x/x%x did:x%x",
+ irsp->ulpStatus, irsp->un.ulpWord[4],
+ ndlp->nlp_DID);
+ /* PRLI completes to NPort <nlp_DID> */
+ lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
+ "0103 PRLI completes to NPort x%x "
+ "Data: x%x x%x x%x x%x\n",
+ ndlp->nlp_DID, irsp->ulpStatus, irsp->un.ulpWord[4],
+ irsp->ulpTimeout, vport->num_disc_nodes);
+
+ vport->fc_prli_sent--;
+ /* Check to see if link went down during discovery */
+ if (lpfc_els_chk_latt(vport))
+ goto out;
+
+ if (irsp->ulpStatus) {
+ /* Check for retry */
+ if (lpfc_els_retry(phba, cmdiocb, rspiocb)) {
+ /* ELS command is being retried */
+ goto out;
+ }
+ /* PRLI failed */
+ lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
+ "2754 PRLI failure DID:%06X Status:x%x/x%x\n",
+ ndlp->nlp_DID, irsp->ulpStatus,
+ irsp->un.ulpWord[4]);
+ /* Do not call DSM for lpfc_els_abort'ed ELS cmds */
+ if (lpfc_error_lost_link(irsp))
+ goto out;
+ else
+ lpfc_disc_state_machine(vport, ndlp, cmdiocb,
+ NLP_EVT_CMPL_PRLI);
+ } else
+ /* Good status, call state machine */
+ lpfc_disc_state_machine(vport, ndlp, cmdiocb,
+ NLP_EVT_CMPL_PRLI);
+out:
+ lpfc_els_free_iocb(phba, cmdiocb);
+ return;
+}
+
+/**
+ * lpfc_issue_els_prli - Issue a prli iocb command for a vport
+ * @vport: pointer to a host virtual N_Port data structure.
+ * @ndlp: pointer to a node-list data structure.
+ * @retry: number of retries to the command IOCB.
+ *
+ * This routine issues a Process Login (PRLI) ELS command for the
+ * @vport. The PRLI service parameters are set up in the payload of the
+ * PRLI Request command and the pointer to lpfc_cmpl_els_prli() routine
+ * is put to the IOCB completion callback func field before invoking the
+ * routine lpfc_sli_issue_iocb() to send out PRLI command.
+ *
+ * Note that, in lpfc_prep_els_iocb() routine, the reference count of ndlp
+ * will be incremented by 1 for holding the ndlp and the reference to ndlp
+ * will be stored into the context1 field of the IOCB for the completion
+ * callback function to the PRLI ELS command.
+ *
+ * Return code
+ * 0 - successfully issued prli iocb command for @vport
+ * 1 - failed to issue prli iocb command for @vport
+ **/
+int
+lpfc_issue_els_prli(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
+ uint8_t retry)
+{
+ struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
+ struct lpfc_hba *phba = vport->phba;
+ PRLI *npr;
+ IOCB_t *icmd;
+ struct lpfc_iocbq *elsiocb;
+ uint8_t *pcmd;
+ uint16_t cmdsize;
+
+ cmdsize = (sizeof(uint32_t) + sizeof(PRLI));
+ elsiocb = lpfc_prep_els_iocb(vport, 1, cmdsize, retry, ndlp,
+ ndlp->nlp_DID, ELS_CMD_PRLI);
+ if (!elsiocb)
+ return 1;
+
+ icmd = &elsiocb->iocb;
+ pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt);
+
+ /* For PRLI request, remainder of payload is service parameters */
+ memset(pcmd, 0, (sizeof(PRLI) + sizeof(uint32_t)));
+ *((uint32_t *) (pcmd)) = ELS_CMD_PRLI;
+ pcmd += sizeof(uint32_t);
+
+ /* For PRLI, remainder of payload is PRLI parameter page */
+ npr = (PRLI *) pcmd;
+ /*
+ * If our firmware version is 3.20 or later,
+ * set the following bits for FC-TAPE support.
+ */
+ if (phba->vpd.rev.feaLevelHigh >= 0x02) {
+ npr->ConfmComplAllowed = 1;
+ npr->Retry = 1;
+ npr->TaskRetryIdReq = 1;
+ }
+ npr->estabImagePair = 1;
+ npr->readXferRdyDis = 1;
+ if (vport->cfg_first_burst_size)
+ npr->writeXferRdyDis = 1;
+
+ /* For FCP support */
+ npr->prliType = PRLI_FCP_TYPE;
+ npr->initiatorFunc = 1;
+
+ lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD,
+ "Issue PRLI: did:x%x",
+ ndlp->nlp_DID, 0, 0);
+
+ phba->fc_stat.elsXmitPRLI++;
+ elsiocb->iocb_cmpl = lpfc_cmpl_els_prli;
+ spin_lock_irq(shost->host_lock);
+ ndlp->nlp_flag |= NLP_PRLI_SND;
+ spin_unlock_irq(shost->host_lock);
+ if (lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0) ==
+ IOCB_ERROR) {
+ spin_lock_irq(shost->host_lock);
+ ndlp->nlp_flag &= ~NLP_PRLI_SND;
+ spin_unlock_irq(shost->host_lock);
+ lpfc_els_free_iocb(phba, elsiocb);
+ return 1;
+ }
+ vport->fc_prli_sent++;
+ return 0;
+}
+
+/**
+ * lpfc_rscn_disc - Perform rscn discovery for a vport
+ * @vport: pointer to a host virtual N_Port data structure.
+ *
+ * This routine performs Registration State Change Notification (RSCN)
+ * discovery for a @vport. If the @vport's node port recovery count is not
+ * zero, it will invoke the lpfc_els_disc_plogi() to perform PLOGI for all
+ * the nodes that need recovery. If none of the PLOGI were needed through
+ * the lpfc_els_disc_plogi() routine, the lpfc_end_rscn() routine shall be
+ * invoked to check and handle possible more RSCN came in during the period
+ * of processing the current ones.
+ **/
+static void
+lpfc_rscn_disc(struct lpfc_vport *vport)
+{
+ lpfc_can_disctmo(vport);
+
+ /* RSCN discovery */
+ /* go thru NPR nodes and issue ELS PLOGIs */
+ if (vport->fc_npr_cnt)
+ if (lpfc_els_disc_plogi(vport))
+ return;
+
+ lpfc_end_rscn(vport);
+}
+
+/**
+ * lpfc_adisc_done - Complete the adisc phase of discovery
+ * @vport: pointer to lpfc_vport hba data structure that finished all ADISCs.
+ *
+ * This function is called when the final ADISC is completed during discovery.
+ * This function handles clearing link attention or issuing reg_vpi depending
+ * on whether npiv is enabled. This function also kicks off the PLOGI phase of
+ * discovery.
+ * This function is called with no locks held.
+ **/
+static void
+lpfc_adisc_done(struct lpfc_vport *vport)
+{
+ struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
+ struct lpfc_hba *phba = vport->phba;
+
+ /*
+ * For NPIV, cmpl_reg_vpi will set port_state to READY,
+ * and continue discovery.
+ */
+ if ((phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) &&
+ !(vport->fc_flag & FC_RSCN_MODE) &&
+ (phba->sli_rev < LPFC_SLI_REV4)) {
+ /* The ADISCs are complete. Doesn't matter if they
+ * succeeded or failed because the ADISC completion
+ * routine guarantees to call the state machine and
+ * the RPI is either unregistered (failed ADISC response)
+ * or the RPI is still valid and the node is marked
+ * mapped for a target. The exchanges should be in the
+ * correct state. This code is specific to SLI3.
+ */
+ lpfc_issue_clear_la(phba, vport);
+ lpfc_issue_reg_vpi(phba, vport);
+ return;
+ }
+ /*
+ * For SLI2, we need to set port_state to READY
+ * and continue discovery.
+ */
+ if (vport->port_state < LPFC_VPORT_READY) {
+ /* If we get here, there is nothing to ADISC */
+ lpfc_issue_clear_la(phba, vport);
+ if (!(vport->fc_flag & FC_ABORT_DISCOVERY)) {
+ vport->num_disc_nodes = 0;
+ /* go thru NPR list, issue ELS PLOGIs */
+ if (vport->fc_npr_cnt)
+ lpfc_els_disc_plogi(vport);
+ if (!vport->num_disc_nodes) {
+ spin_lock_irq(shost->host_lock);
+ vport->fc_flag &= ~FC_NDISC_ACTIVE;
+ spin_unlock_irq(shost->host_lock);
+ lpfc_can_disctmo(vport);
+ lpfc_end_rscn(vport);
+ }
+ }
+ vport->port_state = LPFC_VPORT_READY;
+ } else
+ lpfc_rscn_disc(vport);
+}
+
+/**
+ * lpfc_more_adisc - Issue more adisc as needed
+ * @vport: pointer to a host virtual N_Port data structure.
+ *
+ * This routine determines whether there are more ndlps on a @vport
+ * node list need to have Address Discover (ADISC) issued. If so, it will
+ * invoke the lpfc_els_disc_adisc() routine to issue ADISC on the @vport's
+ * remaining nodes which need to have ADISC sent.
+ **/
+void
+lpfc_more_adisc(struct lpfc_vport *vport)
+{
+ if (vport->num_disc_nodes)
+ vport->num_disc_nodes--;
+ /* Continue discovery with <num_disc_nodes> ADISCs to go */
+ lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY,
+ "0210 Continue discovery with %d ADISCs to go "
+ "Data: x%x x%x x%x\n",
+ vport->num_disc_nodes, vport->fc_adisc_cnt,
+ vport->fc_flag, vport->port_state);
+ /* Check to see if there are more ADISCs to be sent */
+ if (vport->fc_flag & FC_NLP_MORE) {
+ lpfc_set_disctmo(vport);
+ /* go thru NPR nodes and issue any remaining ELS ADISCs */
+ lpfc_els_disc_adisc(vport);
+ }
+ if (!vport->num_disc_nodes)
+ lpfc_adisc_done(vport);
+ return;
+}
+
+/**
+ * lpfc_cmpl_els_adisc - Completion callback function for adisc
+ * @phba: pointer to lpfc hba data structure.
+ * @cmdiocb: pointer to lpfc command iocb data structure.
+ * @rspiocb: pointer to lpfc response iocb data structure.
+ *
+ * This routine is the completion function for issuing the Address Discover
+ * (ADISC) command. It first checks to see whether link went down during
+ * the discovery process. If so, the node will be marked as node port
+ * recovery for issuing discover IOCB by the link attention handler and
+ * exit. Otherwise, the response status is checked. If error was reported
+ * in the response status, the ADISC command shall be retried by invoking
+ * the lpfc_els_retry() routine. Otherwise, if no error was reported in
+ * the response status, the state machine is invoked to set transition
+ * with respect to NLP_EVT_CMPL_ADISC event.
+ **/
+static void
+lpfc_cmpl_els_adisc(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
+ struct lpfc_iocbq *rspiocb)
+{
+ struct lpfc_vport *vport = cmdiocb->vport;
+ struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
+ IOCB_t *irsp;
+ struct lpfc_nodelist *ndlp;
+ int disc;
+
+ /* we pass cmdiocb to state machine which needs rspiocb as well */
+ cmdiocb->context_un.rsp_iocb = rspiocb;
+
+ irsp = &(rspiocb->iocb);
+ ndlp = (struct lpfc_nodelist *) cmdiocb->context1;
+
+ lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD,
+ "ADISC cmpl: status:x%x/x%x did:x%x",
+ irsp->ulpStatus, irsp->un.ulpWord[4],
+ ndlp->nlp_DID);
+
+ /* Since ndlp can be freed in the disc state machine, note if this node
+ * is being used during discovery.
+ */
+ spin_lock_irq(shost->host_lock);
+ disc = (ndlp->nlp_flag & NLP_NPR_2B_DISC);
+ ndlp->nlp_flag &= ~(NLP_ADISC_SND | NLP_NPR_2B_DISC);
+ spin_unlock_irq(shost->host_lock);
+ /* ADISC completes to NPort <nlp_DID> */
+ lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
+ "0104 ADISC completes to NPort x%x "
+ "Data: x%x x%x x%x x%x x%x\n",
+ ndlp->nlp_DID, irsp->ulpStatus, irsp->un.ulpWord[4],
+ irsp->ulpTimeout, disc, vport->num_disc_nodes);
+ /* Check to see if link went down during discovery */
+ if (lpfc_els_chk_latt(vport)) {
+ spin_lock_irq(shost->host_lock);
+ ndlp->nlp_flag |= NLP_NPR_2B_DISC;
+ spin_unlock_irq(shost->host_lock);
+ goto out;
+ }
+
+ if (irsp->ulpStatus) {
+ /* Check for retry */
+ if (lpfc_els_retry(phba, cmdiocb, rspiocb)) {
+ /* ELS command is being retried */
+ if (disc) {
+ spin_lock_irq(shost->host_lock);
+ ndlp->nlp_flag |= NLP_NPR_2B_DISC;
+ spin_unlock_irq(shost->host_lock);
+ lpfc_set_disctmo(vport);
+ }
+ goto out;
+ }
+ /* ADISC failed */
+ lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
+ "2755 ADISC failure DID:%06X Status:x%x/x%x\n",
+ ndlp->nlp_DID, irsp->ulpStatus,
+ irsp->un.ulpWord[4]);
+ /* Do not call DSM for lpfc_els_abort'ed ELS cmds */
+ if (!lpfc_error_lost_link(irsp))
+ lpfc_disc_state_machine(vport, ndlp, cmdiocb,
+ NLP_EVT_CMPL_ADISC);
+ } else
+ /* Good status, call state machine */
+ lpfc_disc_state_machine(vport, ndlp, cmdiocb,
+ NLP_EVT_CMPL_ADISC);
+
+ /* Check to see if there are more ADISCs to be sent */
+ if (disc && vport->num_disc_nodes)
+ lpfc_more_adisc(vport);
+out:
+ lpfc_els_free_iocb(phba, cmdiocb);
+ return;
+}
+
+/**
+ * lpfc_issue_els_adisc - Issue an address discover iocb to an node on a vport
+ * @vport: pointer to a virtual N_Port data structure.
+ * @ndlp: pointer to a node-list data structure.
+ * @retry: number of retries to the command IOCB.
+ *
+ * This routine issues an Address Discover (ADISC) for an @ndlp on a
+ * @vport. It prepares the payload of the ADISC ELS command, updates the
+ * and states of the ndlp, and invokes the lpfc_sli_issue_iocb() routine
+ * to issue the ADISC ELS command.
+ *
+ * Note that, in lpfc_prep_els_iocb() routine, the reference count of ndlp
+ * will be incremented by 1 for holding the ndlp and the reference to ndlp
+ * will be stored into the context1 field of the IOCB for the completion
+ * callback function to the ADISC ELS command.
+ *
+ * Return code
+ * 0 - successfully issued adisc
+ * 1 - failed to issue adisc
+ **/
+int
+lpfc_issue_els_adisc(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
+ uint8_t retry)
+{
+ struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
+ struct lpfc_hba *phba = vport->phba;
+ ADISC *ap;
+ IOCB_t *icmd;
+ struct lpfc_iocbq *elsiocb;
+ uint8_t *pcmd;
+ uint16_t cmdsize;
+
+ cmdsize = (sizeof(uint32_t) + sizeof(ADISC));
+ elsiocb = lpfc_prep_els_iocb(vport, 1, cmdsize, retry, ndlp,
+ ndlp->nlp_DID, ELS_CMD_ADISC);
+ if (!elsiocb)
+ return 1;
+
+ icmd = &elsiocb->iocb;
+ pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt);
+
+ /* For ADISC request, remainder of payload is service parameters */
+ *((uint32_t *) (pcmd)) = ELS_CMD_ADISC;
+ pcmd += sizeof(uint32_t);
+
+ /* Fill in ADISC payload */
+ ap = (ADISC *) pcmd;
+ ap->hardAL_PA = phba->fc_pref_ALPA;
+ memcpy(&ap->portName, &vport->fc_portname, sizeof(struct lpfc_name));
+ memcpy(&ap->nodeName, &vport->fc_nodename, sizeof(struct lpfc_name));
+ ap->DID = be32_to_cpu(vport->fc_myDID);
+
+ lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD,
+ "Issue ADISC: did:x%x",
+ ndlp->nlp_DID, 0, 0);
+
+ phba->fc_stat.elsXmitADISC++;
+ elsiocb->iocb_cmpl = lpfc_cmpl_els_adisc;
+ spin_lock_irq(shost->host_lock);
+ ndlp->nlp_flag |= NLP_ADISC_SND;
+ spin_unlock_irq(shost->host_lock);
+ if (lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0) ==
+ IOCB_ERROR) {
+ spin_lock_irq(shost->host_lock);
+ ndlp->nlp_flag &= ~NLP_ADISC_SND;
+ spin_unlock_irq(shost->host_lock);
+ lpfc_els_free_iocb(phba, elsiocb);
+ return 1;
+ }
+ return 0;
+}
+
+/**
+ * lpfc_cmpl_els_logo - Completion callback function for logo
+ * @phba: pointer to lpfc hba data structure.
+ * @cmdiocb: pointer to lpfc command iocb data structure.
+ * @rspiocb: pointer to lpfc response iocb data structure.
+ *
+ * This routine is the completion function for issuing the ELS Logout (LOGO)
+ * command. If no error status was reported from the LOGO response, the
+ * state machine of the associated ndlp shall be invoked for transition with
+ * respect to NLP_EVT_CMPL_LOGO event. Otherwise, if error status was reported,
+ * the lpfc_els_retry() routine will be invoked to retry the LOGO command.
+ **/
+static void
+lpfc_cmpl_els_logo(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
+ struct lpfc_iocbq *rspiocb)
+{
+ struct lpfc_nodelist *ndlp = (struct lpfc_nodelist *) cmdiocb->context1;
+ struct lpfc_vport *vport = ndlp->vport;
+ struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
+ IOCB_t *irsp;
+ struct lpfc_sli *psli;
+ struct lpfcMboxq *mbox;
+ unsigned long flags;
+ uint32_t skip_recovery = 0;
+
+ psli = &phba->sli;
+ /* we pass cmdiocb to state machine which needs rspiocb as well */
+ cmdiocb->context_un.rsp_iocb = rspiocb;
+
+ irsp = &(rspiocb->iocb);
+ spin_lock_irq(shost->host_lock);
+ ndlp->nlp_flag &= ~NLP_LOGO_SND;
+ spin_unlock_irq(shost->host_lock);
+
+ lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD,
+ "LOGO cmpl: status:x%x/x%x did:x%x",
+ irsp->ulpStatus, irsp->un.ulpWord[4],
+ ndlp->nlp_DID);
+
+ /* LOGO completes to NPort <nlp_DID> */
+ lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
+ "0105 LOGO completes to NPort x%x "
+ "Data: x%x x%x x%x x%x\n",
+ ndlp->nlp_DID, irsp->ulpStatus, irsp->un.ulpWord[4],
+ irsp->ulpTimeout, vport->num_disc_nodes);
+
+ if (lpfc_els_chk_latt(vport)) {
+ skip_recovery = 1;
+ goto out;
+ }
+
+ /* Check to see if link went down during discovery */
+ if (ndlp->nlp_flag & NLP_TARGET_REMOVE) {
+ /* NLP_EVT_DEVICE_RM should unregister the RPI
+ * which should abort all outstanding IOs.
+ */
+ lpfc_disc_state_machine(vport, ndlp, cmdiocb,
+ NLP_EVT_DEVICE_RM);
+ skip_recovery = 1;
+ goto out;
+ }
+
+ if (irsp->ulpStatus) {
+ /* Check for retry */
+ if (lpfc_els_retry(phba, cmdiocb, rspiocb)) {
+ /* ELS command is being retried */
+ skip_recovery = 1;
+ goto out;
+ }
+ /* LOGO failed */
+ lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
+ "2756 LOGO failure DID:%06X Status:x%x/x%x\n",
+ ndlp->nlp_DID, irsp->ulpStatus,
+ irsp->un.ulpWord[4]);
+ /* Do not call DSM for lpfc_els_abort'ed ELS cmds */
+ if (lpfc_error_lost_link(irsp)) {
+ skip_recovery = 1;
+ goto out;
+ }
+ }
+
+ /* Call state machine. This will unregister the rpi if needed. */
+ lpfc_disc_state_machine(vport, ndlp, cmdiocb, NLP_EVT_CMPL_LOGO);
+
+out:
+ lpfc_els_free_iocb(phba, cmdiocb);
+ /* If we are in pt2pt mode, we could rcv new S_ID on PLOGI */
+ if ((vport->fc_flag & FC_PT2PT) &&
+ !(vport->fc_flag & FC_PT2PT_PLOGI)) {
+ phba->pport->fc_myDID = 0;
+ mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
+ if (mbox) {
+ lpfc_config_link(phba, mbox);
+ mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
+ mbox->vport = vport;
+ if (lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT) ==
+ MBX_NOT_FINISHED) {
+ mempool_free(mbox, phba->mbox_mem_pool);
+ skip_recovery = 1;
+ }
+ }
+ }
+
+ /*
+ * If the node is a target, the handling attempts to recover the port.
+ * For any other port type, the rpi is unregistered as an implicit
+ * LOGO.
+ */
+ if ((ndlp->nlp_type & NLP_FCP_TARGET) && (skip_recovery == 0)) {
+ lpfc_cancel_retry_delay_tmo(vport, ndlp);
+ spin_lock_irqsave(shost->host_lock, flags);
+ ndlp->nlp_flag |= NLP_NPR_2B_DISC;
+ spin_unlock_irqrestore(shost->host_lock, flags);
+
+ lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
+ "3187 LOGO completes to NPort x%x: Start "
+ "Recovery Data: x%x x%x x%x x%x\n",
+ ndlp->nlp_DID, irsp->ulpStatus,
+ irsp->un.ulpWord[4], irsp->ulpTimeout,
+ vport->num_disc_nodes);
+ lpfc_disc_start(vport);
+ }
+ return;
+}
+
+/**
+ * lpfc_issue_els_logo - Issue a logo to an node on a vport
+ * @vport: pointer to a virtual N_Port data structure.
+ * @ndlp: pointer to a node-list data structure.
+ * @retry: number of retries to the command IOCB.
+ *
+ * This routine constructs and issues an ELS Logout (LOGO) iocb command
+ * to a remote node, referred by an @ndlp on a @vport. It constructs the
+ * payload of the IOCB, properly sets up the @ndlp state, and invokes the
+ * lpfc_sli_issue_iocb() routine to send out the LOGO ELS command.
+ *
+ * Note that, in lpfc_prep_els_iocb() routine, the reference count of ndlp
+ * will be incremented by 1 for holding the ndlp and the reference to ndlp
+ * will be stored into the context1 field of the IOCB for the completion
+ * callback function to the LOGO ELS command.
+ *
+ * Return code
+ * 0 - successfully issued logo
+ * 1 - failed to issue logo
+ **/
+int
+lpfc_issue_els_logo(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
+ uint8_t retry)
+{
+ struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
+ struct lpfc_hba *phba = vport->phba;
+ IOCB_t *icmd;
+ struct lpfc_iocbq *elsiocb;
+ uint8_t *pcmd;
+ uint16_t cmdsize;
+ int rc;
+
+ spin_lock_irq(shost->host_lock);
+ if (ndlp->nlp_flag & NLP_LOGO_SND) {
+ spin_unlock_irq(shost->host_lock);
+ return 0;
+ }
+ spin_unlock_irq(shost->host_lock);
+
+ cmdsize = (2 * sizeof(uint32_t)) + sizeof(struct lpfc_name);
+ elsiocb = lpfc_prep_els_iocb(vport, 1, cmdsize, retry, ndlp,
+ ndlp->nlp_DID, ELS_CMD_LOGO);
+ if (!elsiocb)
+ return 1;
+
+ icmd = &elsiocb->iocb;
+ pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt);
+ *((uint32_t *) (pcmd)) = ELS_CMD_LOGO;
+ pcmd += sizeof(uint32_t);
+
+ /* Fill in LOGO payload */
+ *((uint32_t *) (pcmd)) = be32_to_cpu(vport->fc_myDID);
+ pcmd += sizeof(uint32_t);
+ memcpy(pcmd, &vport->fc_portname, sizeof(struct lpfc_name));
+
+ lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD,
+ "Issue LOGO: did:x%x",
+ ndlp->nlp_DID, 0, 0);
+
+ /*
+ * If we are issuing a LOGO, we may try to recover the remote NPort
+ * by issuing a PLOGI later. Even though we issue ELS cmds by the
+ * VPI, if we have a valid RPI, and that RPI gets unreg'ed while
+ * that ELS command is in-flight, the HBA returns a IOERR_INVALID_RPI
+ * for that ELS cmd. To avoid this situation, lets get rid of the
+ * RPI right now, before any ELS cmds are sent.
+ */
+ spin_lock_irq(shost->host_lock);
+ ndlp->nlp_flag |= NLP_ISSUE_LOGO;
+ spin_unlock_irq(shost->host_lock);
+ if (lpfc_unreg_rpi(vport, ndlp)) {
+ lpfc_els_free_iocb(phba, elsiocb);
+ return 0;
+ }
+
+ phba->fc_stat.elsXmitLOGO++;
+ elsiocb->iocb_cmpl = lpfc_cmpl_els_logo;
+ spin_lock_irq(shost->host_lock);
+ ndlp->nlp_flag |= NLP_LOGO_SND;
+ ndlp->nlp_flag &= ~NLP_ISSUE_LOGO;
+ spin_unlock_irq(shost->host_lock);
+ rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0);
+
+ if (rc == IOCB_ERROR) {
+ spin_lock_irq(shost->host_lock);
+ ndlp->nlp_flag &= ~NLP_LOGO_SND;
+ spin_unlock_irq(shost->host_lock);
+ lpfc_els_free_iocb(phba, elsiocb);
+ return 1;
+ }
+ return 0;
+}
+
+/**
+ * lpfc_cmpl_els_cmd - Completion callback function for generic els command
+ * @phba: pointer to lpfc hba data structure.
+ * @cmdiocb: pointer to lpfc command iocb data structure.
+ * @rspiocb: pointer to lpfc response iocb data structure.
+ *
+ * This routine is a generic completion callback function for ELS commands.
+ * Specifically, it is the callback function which does not need to perform
+ * any command specific operations. It is currently used by the ELS command
+ * issuing routines for the ELS State Change Request (SCR),
+ * lpfc_issue_els_scr(), and the ELS Fibre Channel Address Resolution
+ * Protocol Response (FARPR) routine, lpfc_issue_els_farpr(). Other than
+ * certain debug loggings, this callback function simply invokes the
+ * lpfc_els_chk_latt() routine to check whether link went down during the
+ * discovery process.
+ **/
+static void
+lpfc_cmpl_els_cmd(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
+ struct lpfc_iocbq *rspiocb)
+{
+ struct lpfc_vport *vport = cmdiocb->vport;
+ IOCB_t *irsp;
+
+ irsp = &rspiocb->iocb;
+
+ lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD,
+ "ELS cmd cmpl: status:x%x/x%x did:x%x",
+ irsp->ulpStatus, irsp->un.ulpWord[4],
+ irsp->un.elsreq64.remoteID);
+ /* ELS cmd tag <ulpIoTag> completes */
+ lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
+ "0106 ELS cmd tag x%x completes Data: x%x x%x x%x\n",
+ irsp->ulpIoTag, irsp->ulpStatus,
+ irsp->un.ulpWord[4], irsp->ulpTimeout);
+ /* Check to see if link went down during discovery */
+ lpfc_els_chk_latt(vport);
+ lpfc_els_free_iocb(phba, cmdiocb);
+ return;
+}
+
+/**
+ * lpfc_issue_els_scr - Issue a scr to an node on a vport
+ * @vport: pointer to a host virtual N_Port data structure.
+ * @nportid: N_Port identifier to the remote node.
+ * @retry: number of retries to the command IOCB.
+ *
+ * This routine issues a State Change Request (SCR) to a fabric node
+ * on a @vport. The remote node @nportid is passed into the function. It
+ * first search the @vport node list to find the matching ndlp. If no such
+ * ndlp is found, a new ndlp shall be created for this (SCR) purpose. An
+ * IOCB is allocated, payload prepared, and the lpfc_sli_issue_iocb()
+ * routine is invoked to send the SCR IOCB.
+ *
+ * Note that, in lpfc_prep_els_iocb() routine, the reference count of ndlp
+ * will be incremented by 1 for holding the ndlp and the reference to ndlp
+ * will be stored into the context1 field of the IOCB for the completion
+ * callback function to the SCR ELS command.
+ *
+ * Return code
+ * 0 - Successfully issued scr command
+ * 1 - Failed to issue scr command
+ **/
+int
+lpfc_issue_els_scr(struct lpfc_vport *vport, uint32_t nportid, uint8_t retry)
+{
+ struct lpfc_hba *phba = vport->phba;
+ IOCB_t *icmd;
+ struct lpfc_iocbq *elsiocb;
+ struct lpfc_sli *psli;
+ uint8_t *pcmd;
+ uint16_t cmdsize;
+ struct lpfc_nodelist *ndlp;
+
+ psli = &phba->sli;
+ cmdsize = (sizeof(uint32_t) + sizeof(SCR));
+
+ ndlp = lpfc_findnode_did(vport, nportid);
+ if (!ndlp) {
+ ndlp = mempool_alloc(phba->nlp_mem_pool, GFP_KERNEL);
+ if (!ndlp)
+ return 1;
+ lpfc_nlp_init(vport, ndlp, nportid);
+ lpfc_enqueue_node(vport, ndlp);
+ } else if (!NLP_CHK_NODE_ACT(ndlp)) {
+ ndlp = lpfc_enable_node(vport, ndlp, NLP_STE_UNUSED_NODE);
+ if (!ndlp)
+ return 1;
+ }
+
+ elsiocb = lpfc_prep_els_iocb(vport, 1, cmdsize, retry, ndlp,
+ ndlp->nlp_DID, ELS_CMD_SCR);
+
+ if (!elsiocb) {
+ /* This will trigger the release of the node just
+ * allocated
+ */
+ lpfc_nlp_put(ndlp);
+ return 1;
+ }
+
+ icmd = &elsiocb->iocb;
+ pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt);
+
+ *((uint32_t *) (pcmd)) = ELS_CMD_SCR;
+ pcmd += sizeof(uint32_t);
+
+ /* For SCR, remainder of payload is SCR parameter page */
+ memset(pcmd, 0, sizeof(SCR));
+ ((SCR *) pcmd)->Function = SCR_FUNC_FULL;
+
+ lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD,
+ "Issue SCR: did:x%x",
+ ndlp->nlp_DID, 0, 0);
+
+ phba->fc_stat.elsXmitSCR++;
+ elsiocb->iocb_cmpl = lpfc_cmpl_els_cmd;
+ if (lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0) ==
+ IOCB_ERROR) {
+ /* The additional lpfc_nlp_put will cause the following
+ * lpfc_els_free_iocb routine to trigger the rlease of
+ * the node.
+ */
+ lpfc_nlp_put(ndlp);
+ lpfc_els_free_iocb(phba, elsiocb);
+ return 1;
+ }
+ /* This will cause the callback-function lpfc_cmpl_els_cmd to
+ * trigger the release of node.
+ */
+
+ lpfc_nlp_put(ndlp);
+ return 0;
+}
+
+/**
+ * lpfc_issue_els_farpr - Issue a farp to an node on a vport
+ * @vport: pointer to a host virtual N_Port data structure.
+ * @nportid: N_Port identifier to the remote node.
+ * @retry: number of retries to the command IOCB.
+ *
+ * This routine issues a Fibre Channel Address Resolution Response
+ * (FARPR) to a node on a vport. The remote node N_Port identifier (@nportid)
+ * is passed into the function. It first search the @vport node list to find
+ * the matching ndlp. If no such ndlp is found, a new ndlp shall be created
+ * for this (FARPR) purpose. An IOCB is allocated, payload prepared, and the
+ * lpfc_sli_issue_iocb() routine is invoked to send the FARPR ELS command.
+ *
+ * Note that, in lpfc_prep_els_iocb() routine, the reference count of ndlp
+ * will be incremented by 1 for holding the ndlp and the reference to ndlp
+ * will be stored into the context1 field of the IOCB for the completion
+ * callback function to the PARPR ELS command.
+ *
+ * Return code
+ * 0 - Successfully issued farpr command
+ * 1 - Failed to issue farpr command
+ **/
+static int
+lpfc_issue_els_farpr(struct lpfc_vport *vport, uint32_t nportid, uint8_t retry)
+{
+ struct lpfc_hba *phba = vport->phba;
+ IOCB_t *icmd;
+ struct lpfc_iocbq *elsiocb;
+ struct lpfc_sli *psli;
+ FARP *fp;
+ uint8_t *pcmd;
+ uint32_t *lp;
+ uint16_t cmdsize;
+ struct lpfc_nodelist *ondlp;
+ struct lpfc_nodelist *ndlp;
+
+ psli = &phba->sli;
+ cmdsize = (sizeof(uint32_t) + sizeof(FARP));
+
+ ndlp = lpfc_findnode_did(vport, nportid);
+ if (!ndlp) {
+ ndlp = mempool_alloc(phba->nlp_mem_pool, GFP_KERNEL);
+ if (!ndlp)
+ return 1;
+ lpfc_nlp_init(vport, ndlp, nportid);
+ lpfc_enqueue_node(vport, ndlp);
+ } else if (!NLP_CHK_NODE_ACT(ndlp)) {
+ ndlp = lpfc_enable_node(vport, ndlp, NLP_STE_UNUSED_NODE);
+ if (!ndlp)
+ return 1;
+ }
+
+ elsiocb = lpfc_prep_els_iocb(vport, 1, cmdsize, retry, ndlp,
+ ndlp->nlp_DID, ELS_CMD_RNID);
+ if (!elsiocb) {
+ /* This will trigger the release of the node just
+ * allocated
+ */
+ lpfc_nlp_put(ndlp);
+ return 1;
+ }
+
+ icmd = &elsiocb->iocb;
+ pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt);
+
+ *((uint32_t *) (pcmd)) = ELS_CMD_FARPR;
+ pcmd += sizeof(uint32_t);
+
+ /* Fill in FARPR payload */
+ fp = (FARP *) (pcmd);
+ memset(fp, 0, sizeof(FARP));
+ lp = (uint32_t *) pcmd;
+ *lp++ = be32_to_cpu(nportid);
+ *lp++ = be32_to_cpu(vport->fc_myDID);
+ fp->Rflags = 0;
+ fp->Mflags = (FARP_MATCH_PORT | FARP_MATCH_NODE);
+
+ memcpy(&fp->RportName, &vport->fc_portname, sizeof(struct lpfc_name));
+ memcpy(&fp->RnodeName, &vport->fc_nodename, sizeof(struct lpfc_name));
+ ondlp = lpfc_findnode_did(vport, nportid);
+ if (ondlp && NLP_CHK_NODE_ACT(ondlp)) {
+ memcpy(&fp->OportName, &ondlp->nlp_portname,
+ sizeof(struct lpfc_name));
+ memcpy(&fp->OnodeName, &ondlp->nlp_nodename,
+ sizeof(struct lpfc_name));
+ }
+
+ lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD,
+ "Issue FARPR: did:x%x",
+ ndlp->nlp_DID, 0, 0);
+
+ phba->fc_stat.elsXmitFARPR++;
+ elsiocb->iocb_cmpl = lpfc_cmpl_els_cmd;
+ if (lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0) ==
+ IOCB_ERROR) {
+ /* The additional lpfc_nlp_put will cause the following
+ * lpfc_els_free_iocb routine to trigger the release of
+ * the node.
+ */
+ lpfc_nlp_put(ndlp);
+ lpfc_els_free_iocb(phba, elsiocb);
+ return 1;
+ }
+ /* This will cause the callback-function lpfc_cmpl_els_cmd to
+ * trigger the release of the node.
+ */
+ lpfc_nlp_put(ndlp);
+ return 0;
+}
+
+/**
+ * lpfc_cancel_retry_delay_tmo - Cancel the timer with delayed iocb-cmd retry
+ * @vport: pointer to a host virtual N_Port data structure.
+ * @nlp: pointer to a node-list data structure.
+ *
+ * This routine cancels the timer with a delayed IOCB-command retry for
+ * a @vport's @ndlp. It stops the timer for the delayed function retrial and
+ * removes the ELS retry event if it presents. In addition, if the
+ * NLP_NPR_2B_DISC bit is set in the @nlp's nlp_flag bitmap, ADISC IOCB
+ * commands are sent for the @vport's nodes that require issuing discovery
+ * ADISC.
+ **/
+void
+lpfc_cancel_retry_delay_tmo(struct lpfc_vport *vport, struct lpfc_nodelist *nlp)
+{
+ struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
+ struct lpfc_work_evt *evtp;
+
+ if (!(nlp->nlp_flag & NLP_DELAY_TMO))
+ return;
+ spin_lock_irq(shost->host_lock);
+ nlp->nlp_flag &= ~NLP_DELAY_TMO;
+ spin_unlock_irq(shost->host_lock);
+ del_timer_sync(&nlp->nlp_delayfunc);
+ nlp->nlp_last_elscmd = 0;
+ if (!list_empty(&nlp->els_retry_evt.evt_listp)) {
+ list_del_init(&nlp->els_retry_evt.evt_listp);
+ /* Decrement nlp reference count held for the delayed retry */
+ evtp = &nlp->els_retry_evt;
+ lpfc_nlp_put((struct lpfc_nodelist *)evtp->evt_arg1);
+ }
+ if (nlp->nlp_flag & NLP_NPR_2B_DISC) {
+ spin_lock_irq(shost->host_lock);
+ nlp->nlp_flag &= ~NLP_NPR_2B_DISC;
+ spin_unlock_irq(shost->host_lock);
+ if (vport->num_disc_nodes) {
+ if (vport->port_state < LPFC_VPORT_READY) {
+ /* Check if there are more ADISCs to be sent */
+ lpfc_more_adisc(vport);
+ } else {
+ /* Check if there are more PLOGIs to be sent */
+ lpfc_more_plogi(vport);
+ if (vport->num_disc_nodes == 0) {
+ spin_lock_irq(shost->host_lock);
+ vport->fc_flag &= ~FC_NDISC_ACTIVE;
+ spin_unlock_irq(shost->host_lock);
+ lpfc_can_disctmo(vport);
+ lpfc_end_rscn(vport);
+ }
+ }
+ }
+ }
+ return;
+}
+
+/**
+ * lpfc_els_retry_delay - Timer function with a ndlp delayed function timer
+ * @ptr: holder for the pointer to the timer function associated data (ndlp).
+ *
+ * This routine is invoked by the ndlp delayed-function timer to check
+ * whether there is any pending ELS retry event(s) with the node. If not, it
+ * simply returns. Otherwise, if there is at least one ELS delayed event, it
+ * adds the delayed events to the HBA work list and invokes the
+ * lpfc_worker_wake_up() routine to wake up worker thread to process the
+ * event. Note that lpfc_nlp_get() is called before posting the event to
+ * the work list to hold reference count of ndlp so that it guarantees the
+ * reference to ndlp will still be available when the worker thread gets
+ * to the event associated with the ndlp.
+ **/
+void
+lpfc_els_retry_delay(unsigned long ptr)
+{
+ struct lpfc_nodelist *ndlp = (struct lpfc_nodelist *) ptr;
+ struct lpfc_vport *vport = ndlp->vport;
+ struct lpfc_hba *phba = vport->phba;
+ unsigned long flags;
+ struct lpfc_work_evt *evtp = &ndlp->els_retry_evt;
+
+ spin_lock_irqsave(&phba->hbalock, flags);
+ if (!list_empty(&evtp->evt_listp)) {
+ spin_unlock_irqrestore(&phba->hbalock, flags);
+ return;
+ }
+
+ /* We need to hold the node by incrementing the reference
+ * count until the queued work is done
+ */
+ evtp->evt_arg1 = lpfc_nlp_get(ndlp);
+ if (evtp->evt_arg1) {
+ evtp->evt = LPFC_EVT_ELS_RETRY;
+ list_add_tail(&evtp->evt_listp, &phba->work_list);
+ lpfc_worker_wake_up(phba);
+ }
+ spin_unlock_irqrestore(&phba->hbalock, flags);
+ return;
+}
+
+/**
+ * lpfc_els_retry_delay_handler - Work thread handler for ndlp delayed function
+ * @ndlp: pointer to a node-list data structure.
+ *
+ * This routine is the worker-thread handler for processing the @ndlp delayed
+ * event(s), posted by the lpfc_els_retry_delay() routine. It simply retrieves
+ * the last ELS command from the associated ndlp and invokes the proper ELS
+ * function according to the delayed ELS command to retry the command.
+ **/
+void
+lpfc_els_retry_delay_handler(struct lpfc_nodelist *ndlp)
+{
+ struct lpfc_vport *vport = ndlp->vport;
+ struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
+ uint32_t cmd, retry;
+
+ spin_lock_irq(shost->host_lock);
+ cmd = ndlp->nlp_last_elscmd;
+ ndlp->nlp_last_elscmd = 0;
+
+ if (!(ndlp->nlp_flag & NLP_DELAY_TMO)) {
+ spin_unlock_irq(shost->host_lock);
+ return;
+ }
+
+ ndlp->nlp_flag &= ~NLP_DELAY_TMO;
+ spin_unlock_irq(shost->host_lock);
+ /*
+ * If a discovery event readded nlp_delayfunc after timer
+ * firing and before processing the timer, cancel the
+ * nlp_delayfunc.
+ */
+ del_timer_sync(&ndlp->nlp_delayfunc);
+ retry = ndlp->nlp_retry;
+ ndlp->nlp_retry = 0;
+
+ switch (cmd) {
+ case ELS_CMD_FLOGI:
+ lpfc_issue_els_flogi(vport, ndlp, retry);
+ break;
+ case ELS_CMD_PLOGI:
+ if (!lpfc_issue_els_plogi(vport, ndlp->nlp_DID, retry)) {
+ ndlp->nlp_prev_state = ndlp->nlp_state;
+ lpfc_nlp_set_state(vport, ndlp, NLP_STE_PLOGI_ISSUE);
+ }
+ break;
+ case ELS_CMD_ADISC:
+ if (!lpfc_issue_els_adisc(vport, ndlp, retry)) {
+ ndlp->nlp_prev_state = ndlp->nlp_state;
+ lpfc_nlp_set_state(vport, ndlp, NLP_STE_ADISC_ISSUE);
+ }
+ break;
+ case ELS_CMD_PRLI:
+ if (!lpfc_issue_els_prli(vport, ndlp, retry)) {
+ ndlp->nlp_prev_state = ndlp->nlp_state;
+ lpfc_nlp_set_state(vport, ndlp, NLP_STE_PRLI_ISSUE);
+ }
+ break;
+ case ELS_CMD_LOGO:
+ if (!lpfc_issue_els_logo(vport, ndlp, retry)) {
+ ndlp->nlp_prev_state = ndlp->nlp_state;
+ lpfc_nlp_set_state(vport, ndlp, NLP_STE_LOGO_ISSUE);
+ }
+ break;
+ case ELS_CMD_FDISC:
+ if (!(vport->fc_flag & FC_VPORT_NEEDS_INIT_VPI))
+ lpfc_issue_els_fdisc(vport, ndlp, retry);
+ break;
+ }
+ return;
+}
+
+/**
+ * lpfc_els_retry - Make retry decision on an els command iocb
+ * @phba: pointer to lpfc hba data structure.
+ * @cmdiocb: pointer to lpfc command iocb data structure.
+ * @rspiocb: pointer to lpfc response iocb data structure.
+ *
+ * This routine makes a retry decision on an ELS command IOCB, which has
+ * failed. The following ELS IOCBs use this function for retrying the command
+ * when previously issued command responsed with error status: FLOGI, PLOGI,
+ * PRLI, ADISC, LOGO, and FDISC. Based on the ELS command type and the
+ * returned error status, it makes the decision whether a retry shall be
+ * issued for the command, and whether a retry shall be made immediately or
+ * delayed. In the former case, the corresponding ELS command issuing-function
+ * is called to retry the command. In the later case, the ELS command shall
+ * be posted to the ndlp delayed event and delayed function timer set to the
+ * ndlp for the delayed command issusing.
+ *
+ * Return code
+ * 0 - No retry of els command is made
+ * 1 - Immediate or delayed retry of els command is made
+ **/
+static int
+lpfc_els_retry(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
+ struct lpfc_iocbq *rspiocb)
+{
+ struct lpfc_vport *vport = cmdiocb->vport;
+ struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
+ IOCB_t *irsp = &rspiocb->iocb;
+ struct lpfc_nodelist *ndlp = (struct lpfc_nodelist *) cmdiocb->context1;
+ struct lpfc_dmabuf *pcmd = (struct lpfc_dmabuf *) cmdiocb->context2;
+ uint32_t *elscmd;
+ struct ls_rjt stat;
+ int retry = 0, maxretry = lpfc_max_els_tries, delay = 0;
+ int logerr = 0;
+ uint32_t cmd = 0;
+ uint32_t did;
+
+
+ /* Note: context2 may be 0 for internal driver abort
+ * of delays ELS command.
+ */
+
+ if (pcmd && pcmd->virt) {
+ elscmd = (uint32_t *) (pcmd->virt);
+ cmd = *elscmd++;
+ }
+
+ if (ndlp && NLP_CHK_NODE_ACT(ndlp))
+ did = ndlp->nlp_DID;
+ else {
+ /* We should only hit this case for retrying PLOGI */
+ did = irsp->un.elsreq64.remoteID;
+ ndlp = lpfc_findnode_did(vport, did);
+ if ((!ndlp || !NLP_CHK_NODE_ACT(ndlp))
+ && (cmd != ELS_CMD_PLOGI))
+ return 1;
+ }
+
+ lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD,
+ "Retry ELS: wd7:x%x wd4:x%x did:x%x",
+ *(((uint32_t *) irsp) + 7), irsp->un.ulpWord[4], ndlp->nlp_DID);
+
+ switch (irsp->ulpStatus) {
+ case IOSTAT_FCP_RSP_ERROR:
+ break;
+ case IOSTAT_REMOTE_STOP:
+ if (phba->sli_rev == LPFC_SLI_REV4) {
+ /* This IO was aborted by the target, we don't
+ * know the rxid and because we did not send the
+ * ABTS we cannot generate and RRQ.
+ */
+ lpfc_set_rrq_active(phba, ndlp,
+ cmdiocb->sli4_lxritag, 0, 0);
+ }
+ break;
+ case IOSTAT_LOCAL_REJECT:
+ switch ((irsp->un.ulpWord[4] & IOERR_PARAM_MASK)) {
+ case IOERR_LOOP_OPEN_FAILURE:
+ if (cmd == ELS_CMD_FLOGI) {
+ if (PCI_DEVICE_ID_HORNET ==
+ phba->pcidev->device) {
+ phba->fc_topology = LPFC_TOPOLOGY_LOOP;
+ phba->pport->fc_myDID = 0;
+ phba->alpa_map[0] = 0;
+ phba->alpa_map[1] = 0;
+ }
+ }
+ if (cmd == ELS_CMD_PLOGI && cmdiocb->retry == 0)
+ delay = 1000;
+ retry = 1;
+ break;
+
+ case IOERR_ILLEGAL_COMMAND:
+ lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
+ "0124 Retry illegal cmd x%x "
+ "retry:x%x delay:x%x\n",
+ cmd, cmdiocb->retry, delay);
+ retry = 1;
+ /* All command's retry policy */
+ maxretry = 8;
+ if (cmdiocb->retry > 2)
+ delay = 1000;
+ break;
+
+ case IOERR_NO_RESOURCES:
+ logerr = 1; /* HBA out of resources */
+ retry = 1;
+ if (cmdiocb->retry > 100)
+ delay = 100;
+ maxretry = 250;
+ break;
+
+ case IOERR_ILLEGAL_FRAME:
+ delay = 100;
+ retry = 1;
+ break;
+
+ case IOERR_SEQUENCE_TIMEOUT:
+ case IOERR_INVALID_RPI:
+ if (cmd == ELS_CMD_PLOGI &&
+ did == NameServer_DID) {
+ /* Continue forever if plogi to */
+ /* the nameserver fails */
+ maxretry = 0;
+ delay = 100;
+ }
+ retry = 1;
+ break;
+ }
+ break;
+
+ case IOSTAT_NPORT_RJT:
+ case IOSTAT_FABRIC_RJT:
+ if (irsp->un.ulpWord[4] & RJT_UNAVAIL_TEMP) {
+ retry = 1;
+ break;
+ }
+ break;
+
+ case IOSTAT_NPORT_BSY:
+ case IOSTAT_FABRIC_BSY:
+ logerr = 1; /* Fabric / Remote NPort out of resources */
+ retry = 1;
+ break;
+
+ case IOSTAT_LS_RJT:
+ stat.un.lsRjtError = be32_to_cpu(irsp->un.ulpWord[4]);
+ /* Added for Vendor specifc support
+ * Just keep retrying for these Rsn / Exp codes
+ */
+ switch (stat.un.b.lsRjtRsnCode) {
+ case LSRJT_UNABLE_TPC:
+ if (stat.un.b.lsRjtRsnCodeExp ==
+ LSEXP_CMD_IN_PROGRESS) {
+ if (cmd == ELS_CMD_PLOGI) {
+ delay = 1000;
+ maxretry = 48;
+ }
+ retry = 1;
+ break;
+ }
+ if (stat.un.b.lsRjtRsnCodeExp ==
+ LSEXP_CANT_GIVE_DATA) {
+ if (cmd == ELS_CMD_PLOGI) {
+ delay = 1000;
+ maxretry = 48;
+ }
+ retry = 1;
+ break;
+ }
+ if ((cmd == ELS_CMD_PLOGI) ||
+ (cmd == ELS_CMD_PRLI)) {
+ delay = 1000;
+ maxretry = lpfc_max_els_tries + 1;
+ retry = 1;
+ break;
+ }
+ if ((phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) &&
+ (cmd == ELS_CMD_FDISC) &&
+ (stat.un.b.lsRjtRsnCodeExp == LSEXP_OUT_OF_RESOURCE)){
+ lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
+ "0125 FDISC Failed (x%x). "
+ "Fabric out of resources\n",
+ stat.un.lsRjtError);
+ lpfc_vport_set_state(vport,
+ FC_VPORT_NO_FABRIC_RSCS);
+ }
+ break;
+
+ case LSRJT_LOGICAL_BSY:
+ if ((cmd == ELS_CMD_PLOGI) ||
+ (cmd == ELS_CMD_PRLI)) {
+ delay = 1000;
+ maxretry = 48;
+ } else if (cmd == ELS_CMD_FDISC) {
+ /* FDISC retry policy */
+ maxretry = 48;
+ if (cmdiocb->retry >= 32)
+ delay = 1000;
+ }
+ retry = 1;
+ break;
+
+ case LSRJT_LOGICAL_ERR:
+ /* There are some cases where switches return this
+ * error when they are not ready and should be returning
+ * Logical Busy. We should delay every time.
+ */
+ if (cmd == ELS_CMD_FDISC &&
+ stat.un.b.lsRjtRsnCodeExp == LSEXP_PORT_LOGIN_REQ) {
+ maxretry = 3;
+ delay = 1000;
+ retry = 1;
+ break;
+ }
+ case LSRJT_PROTOCOL_ERR:
+ if ((phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) &&
+ (cmd == ELS_CMD_FDISC) &&
+ ((stat.un.b.lsRjtRsnCodeExp == LSEXP_INVALID_PNAME) ||
+ (stat.un.b.lsRjtRsnCodeExp == LSEXP_INVALID_NPORT_ID))
+ ) {
+ lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
+ "0122 FDISC Failed (x%x). "
+ "Fabric Detected Bad WWN\n",
+ stat.un.lsRjtError);
+ lpfc_vport_set_state(vport,
+ FC_VPORT_FABRIC_REJ_WWN);
+ }
+ break;
+ }
+ break;
+
+ case IOSTAT_INTERMED_RSP:
+ case IOSTAT_BA_RJT:
+ break;
+
+ default:
+ break;
+ }
+
+ if (did == FDMI_DID)
+ retry = 1;
+
+ if ((cmd == ELS_CMD_FLOGI) &&
+ (phba->fc_topology != LPFC_TOPOLOGY_LOOP) &&
+ !lpfc_error_lost_link(irsp)) {
+ /* FLOGI retry policy */
+ retry = 1;
+ /* retry FLOGI forever */
+ if (phba->link_flag != LS_LOOPBACK_MODE)
+ maxretry = 0;
+ else
+ maxretry = 2;
+
+ if (cmdiocb->retry >= 100)
+ delay = 5000;
+ else if (cmdiocb->retry >= 32)
+ delay = 1000;
+ } else if ((cmd == ELS_CMD_FDISC) && !lpfc_error_lost_link(irsp)) {
+ /* retry FDISCs every second up to devloss */
+ retry = 1;
+ maxretry = vport->cfg_devloss_tmo;
+ delay = 1000;
+ }
+
+ cmdiocb->retry++;
+ if (maxretry && (cmdiocb->retry >= maxretry)) {
+ phba->fc_stat.elsRetryExceeded++;
+ retry = 0;
+ }
+
+ if ((vport->load_flag & FC_UNLOADING) != 0)
+ retry = 0;
+
+ if (retry) {
+ if ((cmd == ELS_CMD_PLOGI) || (cmd == ELS_CMD_FDISC)) {
+ /* Stop retrying PLOGI and FDISC if in FCF discovery */
+ if (phba->fcf.fcf_flag & FCF_DISCOVERY) {
+ lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
+ "2849 Stop retry ELS command "
+ "x%x to remote NPORT x%x, "
+ "Data: x%x x%x\n", cmd, did,
+ cmdiocb->retry, delay);
+ return 0;
+ }
+ }
+
+ /* Retry ELS command <elsCmd> to remote NPORT <did> */
+ lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
+ "0107 Retry ELS command x%x to remote "
+ "NPORT x%x Data: x%x x%x\n",
+ cmd, did, cmdiocb->retry, delay);
+
+ if (((cmd == ELS_CMD_PLOGI) || (cmd == ELS_CMD_ADISC)) &&
+ ((irsp->ulpStatus != IOSTAT_LOCAL_REJECT) ||
+ ((irsp->un.ulpWord[4] & IOERR_PARAM_MASK) !=
+ IOERR_NO_RESOURCES))) {
+ /* Don't reset timer for no resources */
+
+ /* If discovery / RSCN timer is running, reset it */
+ if (timer_pending(&vport->fc_disctmo) ||
+ (vport->fc_flag & FC_RSCN_MODE))
+ lpfc_set_disctmo(vport);
+ }
+
+ phba->fc_stat.elsXmitRetry++;
+ if (ndlp && NLP_CHK_NODE_ACT(ndlp) && delay) {
+ phba->fc_stat.elsDelayRetry++;
+ ndlp->nlp_retry = cmdiocb->retry;
+
+ /* delay is specified in milliseconds */
+ mod_timer(&ndlp->nlp_delayfunc,
+ jiffies + msecs_to_jiffies(delay));
+ spin_lock_irq(shost->host_lock);
+ ndlp->nlp_flag |= NLP_DELAY_TMO;
+ spin_unlock_irq(shost->host_lock);
+
+ ndlp->nlp_prev_state = ndlp->nlp_state;
+ if (cmd == ELS_CMD_PRLI)
+ lpfc_nlp_set_state(vport, ndlp,
+ NLP_STE_PRLI_ISSUE);
+ else
+ lpfc_nlp_set_state(vport, ndlp,
+ NLP_STE_NPR_NODE);
+ ndlp->nlp_last_elscmd = cmd;
+
+ return 1;
+ }
+ switch (cmd) {
+ case ELS_CMD_FLOGI:
+ lpfc_issue_els_flogi(vport, ndlp, cmdiocb->retry);
+ return 1;
+ case ELS_CMD_FDISC:
+ lpfc_issue_els_fdisc(vport, ndlp, cmdiocb->retry);
+ return 1;
+ case ELS_CMD_PLOGI:
+ if (ndlp && NLP_CHK_NODE_ACT(ndlp)) {
+ ndlp->nlp_prev_state = ndlp->nlp_state;
+ lpfc_nlp_set_state(vport, ndlp,
+ NLP_STE_PLOGI_ISSUE);
+ }
+ lpfc_issue_els_plogi(vport, did, cmdiocb->retry);
+ return 1;
+ case ELS_CMD_ADISC:
+ ndlp->nlp_prev_state = ndlp->nlp_state;
+ lpfc_nlp_set_state(vport, ndlp, NLP_STE_ADISC_ISSUE);
+ lpfc_issue_els_adisc(vport, ndlp, cmdiocb->retry);
+ return 1;
+ case ELS_CMD_PRLI:
+ ndlp->nlp_prev_state = ndlp->nlp_state;
+ lpfc_nlp_set_state(vport, ndlp, NLP_STE_PRLI_ISSUE);
+ lpfc_issue_els_prli(vport, ndlp, cmdiocb->retry);
+ return 1;
+ case ELS_CMD_LOGO:
+ ndlp->nlp_prev_state = ndlp->nlp_state;
+ lpfc_nlp_set_state(vport, ndlp, NLP_STE_LOGO_ISSUE);
+ lpfc_issue_els_logo(vport, ndlp, cmdiocb->retry);
+ return 1;
+ }
+ }
+ /* No retry ELS command <elsCmd> to remote NPORT <did> */
+ if (logerr) {
+ lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
+ "0137 No retry ELS command x%x to remote "
+ "NPORT x%x: Out of Resources: Error:x%x/%x\n",
+ cmd, did, irsp->ulpStatus,
+ irsp->un.ulpWord[4]);
+ }
+ else {
+ lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
+ "0108 No retry ELS command x%x to remote "
+ "NPORT x%x Retried:%d Error:x%x/%x\n",
+ cmd, did, cmdiocb->retry, irsp->ulpStatus,
+ irsp->un.ulpWord[4]);
+ }
+ return 0;
+}
+
+/**
+ * lpfc_els_free_data - Free lpfc dma buffer and data structure with an iocb
+ * @phba: pointer to lpfc hba data structure.
+ * @buf_ptr1: pointer to the lpfc DMA buffer data structure.
+ *
+ * This routine releases the lpfc DMA (Direct Memory Access) buffer(s)
+ * associated with a command IOCB back to the lpfc DMA buffer pool. It first
+ * checks to see whether there is a lpfc DMA buffer associated with the
+ * response of the command IOCB. If so, it will be released before releasing
+ * the lpfc DMA buffer associated with the IOCB itself.
+ *
+ * Return code
+ * 0 - Successfully released lpfc DMA buffer (currently, always return 0)
+ **/
+static int
+lpfc_els_free_data(struct lpfc_hba *phba, struct lpfc_dmabuf *buf_ptr1)
+{
+ struct lpfc_dmabuf *buf_ptr;
+
+ /* Free the response before processing the command. */
+ if (!list_empty(&buf_ptr1->list)) {
+ list_remove_head(&buf_ptr1->list, buf_ptr,
+ struct lpfc_dmabuf,
+ list);
+ lpfc_mbuf_free(phba, buf_ptr->virt, buf_ptr->phys);
+ kfree(buf_ptr);
+ }
+ lpfc_mbuf_free(phba, buf_ptr1->virt, buf_ptr1->phys);
+ kfree(buf_ptr1);
+ return 0;
+}
+
+/**
+ * lpfc_els_free_bpl - Free lpfc dma buffer and data structure with bpl
+ * @phba: pointer to lpfc hba data structure.
+ * @buf_ptr: pointer to the lpfc dma buffer data structure.
+ *
+ * This routine releases the lpfc Direct Memory Access (DMA) buffer
+ * associated with a Buffer Pointer List (BPL) back to the lpfc DMA buffer
+ * pool.
+ *
+ * Return code
+ * 0 - Successfully released lpfc DMA buffer (currently, always return 0)
+ **/
+static int
+lpfc_els_free_bpl(struct lpfc_hba *phba, struct lpfc_dmabuf *buf_ptr)
+{
+ lpfc_mbuf_free(phba, buf_ptr->virt, buf_ptr->phys);
+ kfree(buf_ptr);
+ return 0;
+}
+
+/**
+ * lpfc_els_free_iocb - Free a command iocb and its associated resources
+ * @phba: pointer to lpfc hba data structure.
+ * @elsiocb: pointer to lpfc els command iocb data structure.
+ *
+ * This routine frees a command IOCB and its associated resources. The
+ * command IOCB data structure contains the reference to various associated
+ * resources, these fields must be set to NULL if the associated reference
+ * not present:
+ * context1 - reference to ndlp
+ * context2 - reference to cmd
+ * context2->next - reference to rsp
+ * context3 - reference to bpl
+ *
+ * It first properly decrements the reference count held on ndlp for the
+ * IOCB completion callback function. If LPFC_DELAY_MEM_FREE flag is not
+ * set, it invokes the lpfc_els_free_data() routine to release the Direct
+ * Memory Access (DMA) buffers associated with the IOCB. Otherwise, it
+ * adds the DMA buffer the @phba data structure for the delayed release.
+ * If reference to the Buffer Pointer List (BPL) is present, the
+ * lpfc_els_free_bpl() routine is invoked to release the DMA memory
+ * associated with BPL. Finally, the lpfc_sli_release_iocbq() routine is
+ * invoked to release the IOCB data structure back to @phba IOCBQ list.
+ *
+ * Return code
+ * 0 - Success (currently, always return 0)
+ **/
+int
+lpfc_els_free_iocb(struct lpfc_hba *phba, struct lpfc_iocbq *elsiocb)
+{
+ struct lpfc_dmabuf *buf_ptr, *buf_ptr1;
+ struct lpfc_nodelist *ndlp;
+
+ ndlp = (struct lpfc_nodelist *)elsiocb->context1;
+ if (ndlp) {
+ if (ndlp->nlp_flag & NLP_DEFER_RM) {
+ lpfc_nlp_put(ndlp);
+
+ /* If the ndlp is not being used by another discovery
+ * thread, free it.
+ */
+ if (!lpfc_nlp_not_used(ndlp)) {
+ /* If ndlp is being used by another discovery
+ * thread, just clear NLP_DEFER_RM
+ */
+ ndlp->nlp_flag &= ~NLP_DEFER_RM;
+ }
+ }
+ else
+ lpfc_nlp_put(ndlp);
+ elsiocb->context1 = NULL;
+ }
+ /* context2 = cmd, context2->next = rsp, context3 = bpl */
+ if (elsiocb->context2) {
+ if (elsiocb->iocb_flag & LPFC_DELAY_MEM_FREE) {
+ /* Firmware could still be in progress of DMAing
+ * payload, so don't free data buffer till after
+ * a hbeat.
+ */
+ elsiocb->iocb_flag &= ~LPFC_DELAY_MEM_FREE;
+ buf_ptr = elsiocb->context2;
+ elsiocb->context2 = NULL;
+ if (buf_ptr) {
+ buf_ptr1 = NULL;
+ spin_lock_irq(&phba->hbalock);
+ if (!list_empty(&buf_ptr->list)) {
+ list_remove_head(&buf_ptr->list,
+ buf_ptr1, struct lpfc_dmabuf,
+ list);
+ INIT_LIST_HEAD(&buf_ptr1->list);
+ list_add_tail(&buf_ptr1->list,
+ &phba->elsbuf);
+ phba->elsbuf_cnt++;
+ }
+ INIT_LIST_HEAD(&buf_ptr->list);
+ list_add_tail(&buf_ptr->list, &phba->elsbuf);
+ phba->elsbuf_cnt++;
+ spin_unlock_irq(&phba->hbalock);
+ }
+ } else {
+ buf_ptr1 = (struct lpfc_dmabuf *) elsiocb->context2;
+ lpfc_els_free_data(phba, buf_ptr1);
+ }
+ }
+
+ if (elsiocb->context3) {
+ buf_ptr = (struct lpfc_dmabuf *) elsiocb->context3;
+ lpfc_els_free_bpl(phba, buf_ptr);
+ }
+ lpfc_sli_release_iocbq(phba, elsiocb);
+ return 0;
+}
+
+/**
+ * lpfc_cmpl_els_logo_acc - Completion callback function to logo acc response
+ * @phba: pointer to lpfc hba data structure.
+ * @cmdiocb: pointer to lpfc command iocb data structure.
+ * @rspiocb: pointer to lpfc response iocb data structure.
+ *
+ * This routine is the completion callback function to the Logout (LOGO)
+ * Accept (ACC) Response ELS command. This routine is invoked to indicate
+ * the completion of the LOGO process. It invokes the lpfc_nlp_not_used() to
+ * release the ndlp if it has the last reference remaining (reference count
+ * is 1). If succeeded (meaning ndlp released), it sets the IOCB context1
+ * field to NULL to inform the following lpfc_els_free_iocb() routine no
+ * ndlp reference count needs to be decremented. Otherwise, the ndlp
+ * reference use-count shall be decremented by the lpfc_els_free_iocb()
+ * routine. Finally, the lpfc_els_free_iocb() is invoked to release the
+ * IOCB data structure.
+ **/
+static void
+lpfc_cmpl_els_logo_acc(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
+ struct lpfc_iocbq *rspiocb)
+{
+ struct lpfc_nodelist *ndlp = (struct lpfc_nodelist *) cmdiocb->context1;
+ struct lpfc_vport *vport = cmdiocb->vport;
+ IOCB_t *irsp;
+
+ irsp = &rspiocb->iocb;
+ lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_RSP,
+ "ACC LOGO cmpl: status:x%x/x%x did:x%x",
+ irsp->ulpStatus, irsp->un.ulpWord[4], ndlp->nlp_DID);
+ /* ACC to LOGO completes to NPort <nlp_DID> */
+ lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
+ "0109 ACC to LOGO completes to NPort x%x "
+ "Data: x%x x%x x%x\n",
+ ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_state,
+ ndlp->nlp_rpi);
+
+ if (ndlp->nlp_state == NLP_STE_NPR_NODE) {
+ /* NPort Recovery mode or node is just allocated */
+ if (!lpfc_nlp_not_used(ndlp)) {
+ /* If the ndlp is being used by another discovery
+ * thread, just unregister the RPI.
+ */
+ lpfc_unreg_rpi(vport, ndlp);
+ } else {
+ /* Indicate the node has already released, should
+ * not reference to it from within lpfc_els_free_iocb.
+ */
+ cmdiocb->context1 = NULL;
+ }
+ }
+
+ /*
+ * The driver received a LOGO from the rport and has ACK'd it.
+ * At this point, the driver is done so release the IOCB
+ */
+ lpfc_els_free_iocb(phba, cmdiocb);
+
+ /*
+ * Remove the ndlp reference if it's a fabric node that has
+ * sent us an unsolicted LOGO.
+ */
+ if (ndlp->nlp_type & NLP_FABRIC)
+ lpfc_nlp_put(ndlp);
+
+ return;
+}
+
+/**
+ * lpfc_mbx_cmpl_dflt_rpi - Completion callbk func for unreg dflt rpi mbox cmd
+ * @phba: pointer to lpfc hba data structure.
+ * @pmb: pointer to the driver internal queue element for mailbox command.
+ *
+ * This routine is the completion callback function for unregister default
+ * RPI (Remote Port Index) mailbox command to the @phba. It simply releases
+ * the associated lpfc Direct Memory Access (DMA) buffer back to the pool and
+ * decrements the ndlp reference count held for this completion callback
+ * function. After that, it invokes the lpfc_nlp_not_used() to check
+ * whether there is only one reference left on the ndlp. If so, it will
+ * perform one more decrement and trigger the release of the ndlp.
+ **/
+void
+lpfc_mbx_cmpl_dflt_rpi(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
+{
+ struct lpfc_dmabuf *mp = (struct lpfc_dmabuf *) (pmb->context1);
+ struct lpfc_nodelist *ndlp = (struct lpfc_nodelist *) pmb->context2;
+
+ pmb->context1 = NULL;
+ pmb->context2 = NULL;
+
+ lpfc_mbuf_free(phba, mp->virt, mp->phys);
+ kfree(mp);
+ mempool_free(pmb, phba->mbox_mem_pool);
+ if (ndlp) {
+ lpfc_printf_vlog(ndlp->vport, KERN_INFO, LOG_NODE,
+ "0006 rpi%x DID:%x flg:%x %d map:%x %p\n",
+ ndlp->nlp_rpi, ndlp->nlp_DID, ndlp->nlp_flag,
+ atomic_read(&ndlp->kref.refcount),
+ ndlp->nlp_usg_map, ndlp);
+ if (NLP_CHK_NODE_ACT(ndlp)) {
+ lpfc_nlp_put(ndlp);
+ /* This is the end of the default RPI cleanup logic for
+ * this ndlp. If no other discovery threads are using
+ * this ndlp, free all resources associated with it.
+ */
+ lpfc_nlp_not_used(ndlp);
+ } else {
+ lpfc_drop_node(ndlp->vport, ndlp);
+ }
+ }
+
+ return;
+}
+
+/**
+ * lpfc_cmpl_els_rsp - Completion callback function for els response iocb cmd
+ * @phba: pointer to lpfc hba data structure.
+ * @cmdiocb: pointer to lpfc command iocb data structure.
+ * @rspiocb: pointer to lpfc response iocb data structure.
+ *
+ * This routine is the completion callback function for ELS Response IOCB
+ * command. In normal case, this callback function just properly sets the
+ * nlp_flag bitmap in the ndlp data structure, if the mbox command reference
+ * field in the command IOCB is not NULL, the referred mailbox command will
+ * be send out, and then invokes the lpfc_els_free_iocb() routine to release
+ * the IOCB. Under error conditions, such as when a LS_RJT is returned or a
+ * link down event occurred during the discovery, the lpfc_nlp_not_used()
+ * routine shall be invoked trying to release the ndlp if no other threads
+ * are currently referring it.
+ **/
+static void
+lpfc_cmpl_els_rsp(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
+ struct lpfc_iocbq *rspiocb)
+{
+ struct lpfc_nodelist *ndlp = (struct lpfc_nodelist *) cmdiocb->context1;
+ struct lpfc_vport *vport = ndlp ? ndlp->vport : NULL;
+ struct Scsi_Host *shost = vport ? lpfc_shost_from_vport(vport) : NULL;
+ IOCB_t *irsp;
+ uint8_t *pcmd;
+ LPFC_MBOXQ_t *mbox = NULL;
+ struct lpfc_dmabuf *mp = NULL;
+ uint32_t ls_rjt = 0;
+
+ irsp = &rspiocb->iocb;
+
+ if (cmdiocb->context_un.mbox)
+ mbox = cmdiocb->context_un.mbox;
+
+ /* First determine if this is a LS_RJT cmpl. Note, this callback
+ * function can have cmdiocb->contest1 (ndlp) field set to NULL.
+ */
+ pcmd = (uint8_t *) (((struct lpfc_dmabuf *) cmdiocb->context2)->virt);
+ if (ndlp && NLP_CHK_NODE_ACT(ndlp) &&
+ (*((uint32_t *) (pcmd)) == ELS_CMD_LS_RJT)) {
+ /* A LS_RJT associated with Default RPI cleanup has its own
+ * separate code path.
+ */
+ if (!(ndlp->nlp_flag & NLP_RM_DFLT_RPI))
+ ls_rjt = 1;
+ }
+
+ /* Check to see if link went down during discovery */
+ if (!ndlp || !NLP_CHK_NODE_ACT(ndlp) || lpfc_els_chk_latt(vport)) {
+ if (mbox) {
+ mp = (struct lpfc_dmabuf *) mbox->context1;
+ if (mp) {
+ lpfc_mbuf_free(phba, mp->virt, mp->phys);
+ kfree(mp);
+ }
+ mempool_free(mbox, phba->mbox_mem_pool);
+ }
+ if (ndlp && NLP_CHK_NODE_ACT(ndlp) &&
+ (ndlp->nlp_flag & NLP_RM_DFLT_RPI))
+ if (lpfc_nlp_not_used(ndlp)) {
+ ndlp = NULL;
+ /* Indicate the node has already released,
+ * should not reference to it from within
+ * the routine lpfc_els_free_iocb.
+ */
+ cmdiocb->context1 = NULL;
+ }
+ goto out;
+ }
+
+ lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_RSP,
+ "ELS rsp cmpl: status:x%x/x%x did:x%x",
+ irsp->ulpStatus, irsp->un.ulpWord[4],
+ cmdiocb->iocb.un.elsreq64.remoteID);
+ /* ELS response tag <ulpIoTag> completes */
+ lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
+ "0110 ELS response tag x%x completes "
+ "Data: x%x x%x x%x x%x x%x x%x x%x\n",
+ cmdiocb->iocb.ulpIoTag, rspiocb->iocb.ulpStatus,
+ rspiocb->iocb.un.ulpWord[4], rspiocb->iocb.ulpTimeout,
+ ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_state,
+ ndlp->nlp_rpi);
+ if (mbox) {
+ if ((rspiocb->iocb.ulpStatus == 0)
+ && (ndlp->nlp_flag & NLP_ACC_REGLOGIN)) {
+ lpfc_unreg_rpi(vport, ndlp);
+ /* Increment reference count to ndlp to hold the
+ * reference to ndlp for the callback function.
+ */
+ mbox->context2 = lpfc_nlp_get(ndlp);
+ mbox->vport = vport;
+ if (ndlp->nlp_flag & NLP_RM_DFLT_RPI) {
+ mbox->mbox_flag |= LPFC_MBX_IMED_UNREG;
+ mbox->mbox_cmpl = lpfc_mbx_cmpl_dflt_rpi;
+ }
+ else {
+ mbox->mbox_cmpl = lpfc_mbx_cmpl_reg_login;
+ ndlp->nlp_prev_state = ndlp->nlp_state;
+ lpfc_nlp_set_state(vport, ndlp,
+ NLP_STE_REG_LOGIN_ISSUE);
+ }
+ if (lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT)
+ != MBX_NOT_FINISHED)
+ goto out;
+ else
+ /* Decrement the ndlp reference count we
+ * set for this failed mailbox command.
+ */
+ lpfc_nlp_put(ndlp);
+
+ /* ELS rsp: Cannot issue reg_login for <NPortid> */
+ lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
+ "0138 ELS rsp: Cannot issue reg_login for x%x "
+ "Data: x%x x%x x%x\n",
+ ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_state,
+ ndlp->nlp_rpi);
+
+ if (lpfc_nlp_not_used(ndlp)) {
+ ndlp = NULL;
+ /* Indicate node has already been released,
+ * should not reference to it from within
+ * the routine lpfc_els_free_iocb.
+ */
+ cmdiocb->context1 = NULL;
+ }
+ } else {
+ /* Do not drop node for lpfc_els_abort'ed ELS cmds */
+ if (!lpfc_error_lost_link(irsp) &&
+ ndlp->nlp_flag & NLP_ACC_REGLOGIN) {
+ if (lpfc_nlp_not_used(ndlp)) {
+ ndlp = NULL;
+ /* Indicate node has already been
+ * released, should not reference
+ * to it from within the routine
+ * lpfc_els_free_iocb.
+ */
+ cmdiocb->context1 = NULL;
+ }
+ }
+ }
+ mp = (struct lpfc_dmabuf *) mbox->context1;
+ if (mp) {
+ lpfc_mbuf_free(phba, mp->virt, mp->phys);
+ kfree(mp);
+ }
+ mempool_free(mbox, phba->mbox_mem_pool);
+ }
+out:
+ if (ndlp && NLP_CHK_NODE_ACT(ndlp)) {
+ spin_lock_irq(shost->host_lock);
+ ndlp->nlp_flag &= ~(NLP_ACC_REGLOGIN | NLP_RM_DFLT_RPI);
+ spin_unlock_irq(shost->host_lock);
+
+ /* If the node is not being used by another discovery thread,
+ * and we are sending a reject, we are done with it.
+ * Release driver reference count here and free associated
+ * resources.
+ */
+ if (ls_rjt)
+ if (lpfc_nlp_not_used(ndlp))
+ /* Indicate node has already been released,
+ * should not reference to it from within
+ * the routine lpfc_els_free_iocb.
+ */
+ cmdiocb->context1 = NULL;
+ }
+
+ lpfc_els_free_iocb(phba, cmdiocb);
+ return;
+}
+
+/**
+ * lpfc_els_rsp_acc - Prepare and issue an acc response iocb command
+ * @vport: pointer to a host virtual N_Port data structure.
+ * @flag: the els command code to be accepted.
+ * @oldiocb: pointer to the original lpfc command iocb data structure.
+ * @ndlp: pointer to a node-list data structure.
+ * @mbox: pointer to the driver internal queue element for mailbox command.
+ *
+ * This routine prepares and issues an Accept (ACC) response IOCB
+ * command. It uses the @flag to properly set up the IOCB field for the
+ * specific ACC response command to be issued and invokes the
+ * lpfc_sli_issue_iocb() routine to send out ACC response IOCB. If a
+ * @mbox pointer is passed in, it will be put into the context_un.mbox
+ * field of the IOCB for the completion callback function to issue the
+ * mailbox command to the HBA later when callback is invoked.
+ *
+ * Note that, in lpfc_prep_els_iocb() routine, the reference count of ndlp
+ * will be incremented by 1 for holding the ndlp and the reference to ndlp
+ * will be stored into the context1 field of the IOCB for the completion
+ * callback function to the corresponding response ELS IOCB command.
+ *
+ * Return code
+ * 0 - Successfully issued acc response
+ * 1 - Failed to issue acc response
+ **/
+int
+lpfc_els_rsp_acc(struct lpfc_vport *vport, uint32_t flag,
+ struct lpfc_iocbq *oldiocb, struct lpfc_nodelist *ndlp,
+ LPFC_MBOXQ_t *mbox)
+{
+ struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
+ struct lpfc_hba *phba = vport->phba;
+ IOCB_t *icmd;
+ IOCB_t *oldcmd;
+ struct lpfc_iocbq *elsiocb;
+ struct lpfc_sli *psli;
+ uint8_t *pcmd;
+ uint16_t cmdsize;
+ int rc;
+ ELS_PKT *els_pkt_ptr;
+
+ psli = &phba->sli;
+ oldcmd = &oldiocb->iocb;
+
+ switch (flag) {
+ case ELS_CMD_ACC:
+ cmdsize = sizeof(uint32_t);
+ elsiocb = lpfc_prep_els_iocb(vport, 0, cmdsize, oldiocb->retry,
+ ndlp, ndlp->nlp_DID, ELS_CMD_ACC);
+ if (!elsiocb) {
+ spin_lock_irq(shost->host_lock);
+ ndlp->nlp_flag &= ~NLP_LOGO_ACC;
+ spin_unlock_irq(shost->host_lock);
+ return 1;
+ }
+
+ icmd = &elsiocb->iocb;
+ icmd->ulpContext = oldcmd->ulpContext; /* Xri / rx_id */
+ icmd->unsli3.rcvsli3.ox_id = oldcmd->unsli3.rcvsli3.ox_id;
+ pcmd = (((struct lpfc_dmabuf *) elsiocb->context2)->virt);
+ *((uint32_t *) (pcmd)) = ELS_CMD_ACC;
+ pcmd += sizeof(uint32_t);
+
+ lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_RSP,
+ "Issue ACC: did:x%x flg:x%x",
+ ndlp->nlp_DID, ndlp->nlp_flag, 0);
+ break;
+ case ELS_CMD_PLOGI:
+ cmdsize = (sizeof(struct serv_parm) + sizeof(uint32_t));
+ elsiocb = lpfc_prep_els_iocb(vport, 0, cmdsize, oldiocb->retry,
+ ndlp, ndlp->nlp_DID, ELS_CMD_ACC);
+ if (!elsiocb)
+ return 1;
+
+ icmd = &elsiocb->iocb;
+ icmd->ulpContext = oldcmd->ulpContext; /* Xri / rx_id */
+ icmd->unsli3.rcvsli3.ox_id = oldcmd->unsli3.rcvsli3.ox_id;
+ pcmd = (((struct lpfc_dmabuf *) elsiocb->context2)->virt);
+
+ if (mbox)
+ elsiocb->context_un.mbox = mbox;
+
+ *((uint32_t *) (pcmd)) = ELS_CMD_ACC;
+ pcmd += sizeof(uint32_t);
+ memcpy(pcmd, &vport->fc_sparam, sizeof(struct serv_parm));
+
+ lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_RSP,
+ "Issue ACC PLOGI: did:x%x flg:x%x",
+ ndlp->nlp_DID, ndlp->nlp_flag, 0);
+ break;
+ case ELS_CMD_PRLO:
+ cmdsize = sizeof(uint32_t) + sizeof(PRLO);
+ elsiocb = lpfc_prep_els_iocb(vport, 0, cmdsize, oldiocb->retry,
+ ndlp, ndlp->nlp_DID, ELS_CMD_PRLO);
+ if (!elsiocb)
+ return 1;
+
+ icmd = &elsiocb->iocb;
+ icmd->ulpContext = oldcmd->ulpContext; /* Xri / rx_id */
+ icmd->unsli3.rcvsli3.ox_id = oldcmd->unsli3.rcvsli3.ox_id;
+ pcmd = (((struct lpfc_dmabuf *) elsiocb->context2)->virt);
+
+ memcpy(pcmd, ((struct lpfc_dmabuf *) oldiocb->context2)->virt,
+ sizeof(uint32_t) + sizeof(PRLO));
+ *((uint32_t *) (pcmd)) = ELS_CMD_PRLO_ACC;
+ els_pkt_ptr = (ELS_PKT *) pcmd;
+ els_pkt_ptr->un.prlo.acceptRspCode = PRLO_REQ_EXECUTED;
+
+ lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_RSP,
+ "Issue ACC PRLO: did:x%x flg:x%x",
+ ndlp->nlp_DID, ndlp->nlp_flag, 0);
+ break;
+ default:
+ return 1;
+ }
+ /* Xmit ELS ACC response tag <ulpIoTag> */
+ lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
+ "0128 Xmit ELS ACC response tag x%x, XRI: x%x, "
+ "DID: x%x, nlp_flag: x%x nlp_state: x%x RPI: x%x "
+ "fc_flag x%x\n",
+ elsiocb->iotag, elsiocb->iocb.ulpContext,
+ ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_state,
+ ndlp->nlp_rpi, vport->fc_flag);
+ if (ndlp->nlp_flag & NLP_LOGO_ACC) {
+ spin_lock_irq(shost->host_lock);
+ ndlp->nlp_flag &= ~NLP_LOGO_ACC;
+ spin_unlock_irq(shost->host_lock);
+ elsiocb->iocb_cmpl = lpfc_cmpl_els_logo_acc;
+ } else {
+ elsiocb->iocb_cmpl = lpfc_cmpl_els_rsp;
+ }
+
+ phba->fc_stat.elsXmitACC++;
+ rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0);
+ if (rc == IOCB_ERROR) {
+ lpfc_els_free_iocb(phba, elsiocb);
+ return 1;
+ }
+ return 0;
+}
+
+/**
+ * lpfc_els_rsp_reject - Propare and issue a rjt response iocb command
+ * @vport: pointer to a virtual N_Port data structure.
+ * @rejectError:
+ * @oldiocb: pointer to the original lpfc command iocb data structure.
+ * @ndlp: pointer to a node-list data structure.
+ * @mbox: pointer to the driver internal queue element for mailbox command.
+ *
+ * This routine prepares and issue an Reject (RJT) response IOCB
+ * command. If a @mbox pointer is passed in, it will be put into the
+ * context_un.mbox field of the IOCB for the completion callback function
+ * to issue to the HBA later.
+ *
+ * Note that, in lpfc_prep_els_iocb() routine, the reference count of ndlp
+ * will be incremented by 1 for holding the ndlp and the reference to ndlp
+ * will be stored into the context1 field of the IOCB for the completion
+ * callback function to the reject response ELS IOCB command.
+ *
+ * Return code
+ * 0 - Successfully issued reject response
+ * 1 - Failed to issue reject response
+ **/
+int
+lpfc_els_rsp_reject(struct lpfc_vport *vport, uint32_t rejectError,
+ struct lpfc_iocbq *oldiocb, struct lpfc_nodelist *ndlp,
+ LPFC_MBOXQ_t *mbox)
+{
+ struct lpfc_hba *phba = vport->phba;
+ IOCB_t *icmd;
+ IOCB_t *oldcmd;
+ struct lpfc_iocbq *elsiocb;
+ struct lpfc_sli *psli;
+ uint8_t *pcmd;
+ uint16_t cmdsize;
+ int rc;
+
+ psli = &phba->sli;
+ cmdsize = 2 * sizeof(uint32_t);
+ elsiocb = lpfc_prep_els_iocb(vport, 0, cmdsize, oldiocb->retry, ndlp,
+ ndlp->nlp_DID, ELS_CMD_LS_RJT);
+ if (!elsiocb)
+ return 1;
+
+ icmd = &elsiocb->iocb;
+ oldcmd = &oldiocb->iocb;
+ icmd->ulpContext = oldcmd->ulpContext; /* Xri / rx_id */
+ icmd->unsli3.rcvsli3.ox_id = oldcmd->unsli3.rcvsli3.ox_id;
+ pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt);
+
+ *((uint32_t *) (pcmd)) = ELS_CMD_LS_RJT;
+ pcmd += sizeof(uint32_t);
+ *((uint32_t *) (pcmd)) = rejectError;
+
+ if (mbox)
+ elsiocb->context_un.mbox = mbox;
+
+ /* Xmit ELS RJT <err> response tag <ulpIoTag> */
+ lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
+ "0129 Xmit ELS RJT x%x response tag x%x "
+ "xri x%x, did x%x, nlp_flag x%x, nlp_state x%x, "
+ "rpi x%x\n",
+ rejectError, elsiocb->iotag,
+ elsiocb->iocb.ulpContext, ndlp->nlp_DID,
+ ndlp->nlp_flag, ndlp->nlp_state, ndlp->nlp_rpi);
+ lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_RSP,
+ "Issue LS_RJT: did:x%x flg:x%x err:x%x",
+ ndlp->nlp_DID, ndlp->nlp_flag, rejectError);
+
+ phba->fc_stat.elsXmitLSRJT++;
+ elsiocb->iocb_cmpl = lpfc_cmpl_els_rsp;
+ rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0);
+
+ if (rc == IOCB_ERROR) {
+ lpfc_els_free_iocb(phba, elsiocb);
+ return 1;
+ }
+ return 0;
+}
+
+/**
+ * lpfc_els_rsp_adisc_acc - Prepare and issue acc response to adisc iocb cmd
+ * @vport: pointer to a virtual N_Port data structure.
+ * @oldiocb: pointer to the original lpfc command iocb data structure.
+ * @ndlp: pointer to a node-list data structure.
+ *
+ * This routine prepares and issues an Accept (ACC) response to Address
+ * Discover (ADISC) ELS command. It simply prepares the payload of the IOCB
+ * and invokes the lpfc_sli_issue_iocb() routine to send out the command.
+ *
+ * Note that, in lpfc_prep_els_iocb() routine, the reference count of ndlp
+ * will be incremented by 1 for holding the ndlp and the reference to ndlp
+ * will be stored into the context1 field of the IOCB for the completion
+ * callback function to the ADISC Accept response ELS IOCB command.
+ *
+ * Return code
+ * 0 - Successfully issued acc adisc response
+ * 1 - Failed to issue adisc acc response
+ **/
+int
+lpfc_els_rsp_adisc_acc(struct lpfc_vport *vport, struct lpfc_iocbq *oldiocb,
+ struct lpfc_nodelist *ndlp)
+{
+ struct lpfc_hba *phba = vport->phba;
+ ADISC *ap;
+ IOCB_t *icmd, *oldcmd;
+ struct lpfc_iocbq *elsiocb;
+ uint8_t *pcmd;
+ uint16_t cmdsize;
+ int rc;
+
+ cmdsize = sizeof(uint32_t) + sizeof(ADISC);
+ elsiocb = lpfc_prep_els_iocb(vport, 0, cmdsize, oldiocb->retry, ndlp,
+ ndlp->nlp_DID, ELS_CMD_ACC);
+ if (!elsiocb)
+ return 1;
+
+ icmd = &elsiocb->iocb;
+ oldcmd = &oldiocb->iocb;
+ icmd->ulpContext = oldcmd->ulpContext; /* Xri / rx_id */
+ icmd->unsli3.rcvsli3.ox_id = oldcmd->unsli3.rcvsli3.ox_id;
+
+ /* Xmit ADISC ACC response tag <ulpIoTag> */
+ lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
+ "0130 Xmit ADISC ACC response iotag x%x xri: "
+ "x%x, did x%x, nlp_flag x%x, nlp_state x%x rpi x%x\n",
+ elsiocb->iotag, elsiocb->iocb.ulpContext,
+ ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_state,
+ ndlp->nlp_rpi);
+ pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt);
+
+ *((uint32_t *) (pcmd)) = ELS_CMD_ACC;
+ pcmd += sizeof(uint32_t);
+
+ ap = (ADISC *) (pcmd);
+ ap->hardAL_PA = phba->fc_pref_ALPA;
+ memcpy(&ap->portName, &vport->fc_portname, sizeof(struct lpfc_name));
+ memcpy(&ap->nodeName, &vport->fc_nodename, sizeof(struct lpfc_name));
+ ap->DID = be32_to_cpu(vport->fc_myDID);
+
+ lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_RSP,
+ "Issue ACC ADISC: did:x%x flg:x%x",
+ ndlp->nlp_DID, ndlp->nlp_flag, 0);
+
+ phba->fc_stat.elsXmitACC++;
+ elsiocb->iocb_cmpl = lpfc_cmpl_els_rsp;
+ rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0);
+ if (rc == IOCB_ERROR) {
+ lpfc_els_free_iocb(phba, elsiocb);
+ return 1;
+ }
+ return 0;
+}
+
+/**
+ * lpfc_els_rsp_prli_acc - Prepare and issue acc response to prli iocb cmd
+ * @vport: pointer to a virtual N_Port data structure.
+ * @oldiocb: pointer to the original lpfc command iocb data structure.
+ * @ndlp: pointer to a node-list data structure.
+ *
+ * This routine prepares and issues an Accept (ACC) response to Process
+ * Login (PRLI) ELS command. It simply prepares the payload of the IOCB
+ * and invokes the lpfc_sli_issue_iocb() routine to send out the command.
+ *
+ * Note that, in lpfc_prep_els_iocb() routine, the reference count of ndlp
+ * will be incremented by 1 for holding the ndlp and the reference to ndlp
+ * will be stored into the context1 field of the IOCB for the completion
+ * callback function to the PRLI Accept response ELS IOCB command.
+ *
+ * Return code
+ * 0 - Successfully issued acc prli response
+ * 1 - Failed to issue acc prli response
+ **/
+int
+lpfc_els_rsp_prli_acc(struct lpfc_vport *vport, struct lpfc_iocbq *oldiocb,
+ struct lpfc_nodelist *ndlp)
+{
+ struct lpfc_hba *phba = vport->phba;
+ PRLI *npr;
+ lpfc_vpd_t *vpd;
+ IOCB_t *icmd;
+ IOCB_t *oldcmd;
+ struct lpfc_iocbq *elsiocb;
+ struct lpfc_sli *psli;
+ uint8_t *pcmd;
+ uint16_t cmdsize;
+ int rc;
+
+ psli = &phba->sli;
+
+ cmdsize = sizeof(uint32_t) + sizeof(PRLI);
+ elsiocb = lpfc_prep_els_iocb(vport, 0, cmdsize, oldiocb->retry, ndlp,
+ ndlp->nlp_DID, (ELS_CMD_ACC | (ELS_CMD_PRLI & ~ELS_RSP_MASK)));
+ if (!elsiocb)
+ return 1;
+
+ icmd = &elsiocb->iocb;
+ oldcmd = &oldiocb->iocb;
+ icmd->ulpContext = oldcmd->ulpContext; /* Xri / rx_id */
+ icmd->unsli3.rcvsli3.ox_id = oldcmd->unsli3.rcvsli3.ox_id;
+
+ /* Xmit PRLI ACC response tag <ulpIoTag> */
+ lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
+ "0131 Xmit PRLI ACC response tag x%x xri x%x, "
+ "did x%x, nlp_flag x%x, nlp_state x%x, rpi x%x\n",
+ elsiocb->iotag, elsiocb->iocb.ulpContext,
+ ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_state,
+ ndlp->nlp_rpi);
+ pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt);
+
+ *((uint32_t *) (pcmd)) = (ELS_CMD_ACC | (ELS_CMD_PRLI & ~ELS_RSP_MASK));
+ pcmd += sizeof(uint32_t);
+
+ /* For PRLI, remainder of payload is PRLI parameter page */
+ memset(pcmd, 0, sizeof(PRLI));
+
+ npr = (PRLI *) pcmd;
+ vpd = &phba->vpd;
+ /*
+ * If the remote port is a target and our firmware version is 3.20 or
+ * later, set the following bits for FC-TAPE support.
+ */
+ if ((ndlp->nlp_type & NLP_FCP_TARGET) &&
+ (vpd->rev.feaLevelHigh >= 0x02)) {
+ npr->ConfmComplAllowed = 1;
+ npr->Retry = 1;
+ npr->TaskRetryIdReq = 1;
+ }
+
+ npr->acceptRspCode = PRLI_REQ_EXECUTED;
+ npr->estabImagePair = 1;
+ npr->readXferRdyDis = 1;
+ npr->ConfmComplAllowed = 1;
+
+ npr->prliType = PRLI_FCP_TYPE;
+ npr->initiatorFunc = 1;
+
+ lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_RSP,
+ "Issue ACC PRLI: did:x%x flg:x%x",
+ ndlp->nlp_DID, ndlp->nlp_flag, 0);
+
+ phba->fc_stat.elsXmitACC++;
+ elsiocb->iocb_cmpl = lpfc_cmpl_els_rsp;
+
+ rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0);
+ if (rc == IOCB_ERROR) {
+ lpfc_els_free_iocb(phba, elsiocb);
+ return 1;
+ }
+ return 0;
+}
+
+/**
+ * lpfc_els_rsp_rnid_acc - Issue rnid acc response iocb command
+ * @vport: pointer to a virtual N_Port data structure.
+ * @format: rnid command format.
+ * @oldiocb: pointer to the original lpfc command iocb data structure.
+ * @ndlp: pointer to a node-list data structure.
+ *
+ * This routine issues a Request Node Identification Data (RNID) Accept
+ * (ACC) response. It constructs the RNID ACC response command according to
+ * the proper @format and then calls the lpfc_sli_issue_iocb() routine to
+ * issue the response. Note that this command does not need to hold the ndlp
+ * reference count for the callback. So, the ndlp reference count taken by
+ * the lpfc_prep_els_iocb() routine is put back and the context1 field of
+ * IOCB is set to NULL to indicate to the lpfc_els_free_iocb() routine that
+ * there is no ndlp reference available.
+ *
+ * Note that, in lpfc_prep_els_iocb() routine, the reference count of ndlp
+ * will be incremented by 1 for holding the ndlp and the reference to ndlp
+ * will be stored into the context1 field of the IOCB for the completion
+ * callback function. However, for the RNID Accept Response ELS command,
+ * this is undone later by this routine after the IOCB is allocated.
+ *
+ * Return code
+ * 0 - Successfully issued acc rnid response
+ * 1 - Failed to issue acc rnid response
+ **/
+static int
+lpfc_els_rsp_rnid_acc(struct lpfc_vport *vport, uint8_t format,
+ struct lpfc_iocbq *oldiocb, struct lpfc_nodelist *ndlp)
+{
+ struct lpfc_hba *phba = vport->phba;
+ RNID *rn;
+ IOCB_t *icmd, *oldcmd;
+ struct lpfc_iocbq *elsiocb;
+ struct lpfc_sli *psli;
+ uint8_t *pcmd;
+ uint16_t cmdsize;
+ int rc;
+
+ psli = &phba->sli;
+ cmdsize = sizeof(uint32_t) + sizeof(uint32_t)
+ + (2 * sizeof(struct lpfc_name));
+ if (format)
+ cmdsize += sizeof(RNID_TOP_DISC);
+
+ elsiocb = lpfc_prep_els_iocb(vport, 0, cmdsize, oldiocb->retry, ndlp,
+ ndlp->nlp_DID, ELS_CMD_ACC);
+ if (!elsiocb)
+ return 1;
+
+ icmd = &elsiocb->iocb;
+ oldcmd = &oldiocb->iocb;
+ icmd->ulpContext = oldcmd->ulpContext; /* Xri / rx_id */
+ icmd->unsli3.rcvsli3.ox_id = oldcmd->unsli3.rcvsli3.ox_id;
+
+ /* Xmit RNID ACC response tag <ulpIoTag> */
+ lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
+ "0132 Xmit RNID ACC response tag x%x xri x%x\n",
+ elsiocb->iotag, elsiocb->iocb.ulpContext);
+ pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt);
+ *((uint32_t *) (pcmd)) = ELS_CMD_ACC;
+ pcmd += sizeof(uint32_t);
+
+ memset(pcmd, 0, sizeof(RNID));
+ rn = (RNID *) (pcmd);
+ rn->Format = format;
+ rn->CommonLen = (2 * sizeof(struct lpfc_name));
+ memcpy(&rn->portName, &vport->fc_portname, sizeof(struct lpfc_name));
+ memcpy(&rn->nodeName, &vport->fc_nodename, sizeof(struct lpfc_name));
+ switch (format) {
+ case 0:
+ rn->SpecificLen = 0;
+ break;
+ case RNID_TOPOLOGY_DISC:
+ rn->SpecificLen = sizeof(RNID_TOP_DISC);
+ memcpy(&rn->un.topologyDisc.portName,
+ &vport->fc_portname, sizeof(struct lpfc_name));
+ rn->un.topologyDisc.unitType = RNID_HBA;
+ rn->un.topologyDisc.physPort = 0;
+ rn->un.topologyDisc.attachedNodes = 0;
+ break;
+ default:
+ rn->CommonLen = 0;
+ rn->SpecificLen = 0;
+ break;
+ }
+
+ lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_RSP,
+ "Issue ACC RNID: did:x%x flg:x%x",
+ ndlp->nlp_DID, ndlp->nlp_flag, 0);
+
+ phba->fc_stat.elsXmitACC++;
+ elsiocb->iocb_cmpl = lpfc_cmpl_els_rsp;
+
+ rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0);
+ if (rc == IOCB_ERROR) {
+ lpfc_els_free_iocb(phba, elsiocb);
+ return 1;
+ }
+ return 0;
+}
+
+/**
+ * lpfc_els_clear_rrq - Clear the rq that this rrq describes.
+ * @vport: pointer to a virtual N_Port data structure.
+ * @iocb: pointer to the lpfc command iocb data structure.
+ * @ndlp: pointer to a node-list data structure.
+ *
+ * Return
+ **/
+static void
+lpfc_els_clear_rrq(struct lpfc_vport *vport,
+ struct lpfc_iocbq *iocb, struct lpfc_nodelist *ndlp)
+{
+ struct lpfc_hba *phba = vport->phba;
+ uint8_t *pcmd;
+ struct RRQ *rrq;
+ uint16_t rxid;
+ uint16_t xri;
+ struct lpfc_node_rrq *prrq;
+
+
+ pcmd = (uint8_t *) (((struct lpfc_dmabuf *) iocb->context2)->virt);
+ pcmd += sizeof(uint32_t);
+ rrq = (struct RRQ *)pcmd;
+ rrq->rrq_exchg = be32_to_cpu(rrq->rrq_exchg);
+ rxid = bf_get(rrq_rxid, rrq);
+
+ lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
+ "2883 Clear RRQ for SID:x%x OXID:x%x RXID:x%x"
+ " x%x x%x\n",
+ be32_to_cpu(bf_get(rrq_did, rrq)),
+ bf_get(rrq_oxid, rrq),
+ rxid,
+ iocb->iotag, iocb->iocb.ulpContext);
+
+ lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_RSP,
+ "Clear RRQ: did:x%x flg:x%x exchg:x%.08x",
+ ndlp->nlp_DID, ndlp->nlp_flag, rrq->rrq_exchg);
+ if (vport->fc_myDID == be32_to_cpu(bf_get(rrq_did, rrq)))
+ xri = bf_get(rrq_oxid, rrq);
+ else
+ xri = rxid;
+ prrq = lpfc_get_active_rrq(vport, xri, ndlp->nlp_DID);
+ if (prrq)
+ lpfc_clr_rrq_active(phba, xri, prrq);
+ return;
+}
+
+/**
+ * lpfc_els_rsp_echo_acc - Issue echo acc response
+ * @vport: pointer to a virtual N_Port data structure.
+ * @data: pointer to echo data to return in the accept.
+ * @oldiocb: pointer to the original lpfc command iocb data structure.
+ * @ndlp: pointer to a node-list data structure.
+ *
+ * Return code
+ * 0 - Successfully issued acc echo response
+ * 1 - Failed to issue acc echo response
+ **/
+static int
+lpfc_els_rsp_echo_acc(struct lpfc_vport *vport, uint8_t *data,
+ struct lpfc_iocbq *oldiocb, struct lpfc_nodelist *ndlp)
+{
+ struct lpfc_hba *phba = vport->phba;
+ struct lpfc_iocbq *elsiocb;
+ struct lpfc_sli *psli;
+ uint8_t *pcmd;
+ uint16_t cmdsize;
+ int rc;
+
+ psli = &phba->sli;
+ cmdsize = oldiocb->iocb.unsli3.rcvsli3.acc_len;
+
+ /* The accumulated length can exceed the BPL_SIZE. For
+ * now, use this as the limit
+ */
+ if (cmdsize > LPFC_BPL_SIZE)
+ cmdsize = LPFC_BPL_SIZE;
+ elsiocb = lpfc_prep_els_iocb(vport, 0, cmdsize, oldiocb->retry, ndlp,
+ ndlp->nlp_DID, ELS_CMD_ACC);
+ if (!elsiocb)
+ return 1;
+
+ elsiocb->iocb.ulpContext = oldiocb->iocb.ulpContext; /* Xri / rx_id */
+ elsiocb->iocb.unsli3.rcvsli3.ox_id = oldiocb->iocb.unsli3.rcvsli3.ox_id;
+
+ /* Xmit ECHO ACC response tag <ulpIoTag> */
+ lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
+ "2876 Xmit ECHO ACC response tag x%x xri x%x\n",
+ elsiocb->iotag, elsiocb->iocb.ulpContext);
+ pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt);
+ *((uint32_t *) (pcmd)) = ELS_CMD_ACC;
+ pcmd += sizeof(uint32_t);
+ memcpy(pcmd, data, cmdsize - sizeof(uint32_t));
+
+ lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_RSP,
+ "Issue ACC ECHO: did:x%x flg:x%x",
+ ndlp->nlp_DID, ndlp->nlp_flag, 0);
+
+ phba->fc_stat.elsXmitACC++;
+ elsiocb->iocb_cmpl = lpfc_cmpl_els_rsp;
+
+ rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0);
+ if (rc == IOCB_ERROR) {
+ lpfc_els_free_iocb(phba, elsiocb);
+ return 1;
+ }
+ return 0;
+}
+
+/**
+ * lpfc_els_disc_adisc - Issue remaining adisc iocbs to npr nodes of a vport
+ * @vport: pointer to a host virtual N_Port data structure.
+ *
+ * This routine issues Address Discover (ADISC) ELS commands to those
+ * N_Ports which are in node port recovery state and ADISC has not been issued
+ * for the @vport. Each time an ELS ADISC IOCB is issued by invoking the
+ * lpfc_issue_els_adisc() routine, the per @vport number of discover count
+ * (num_disc_nodes) shall be incremented. If the num_disc_nodes reaches a
+ * pre-configured threshold (cfg_discovery_threads), the @vport fc_flag will
+ * be marked with FC_NLP_MORE bit and the process of issuing remaining ADISC
+ * IOCBs quit for later pick up. On the other hand, after walking through
+ * all the ndlps with the @vport and there is none ADISC IOCB issued, the
+ * @vport fc_flag shall be cleared with FC_NLP_MORE bit indicating there is
+ * no more ADISC need to be sent.
+ *
+ * Return code
+ * The number of N_Ports with adisc issued.
+ **/
+int
+lpfc_els_disc_adisc(struct lpfc_vport *vport)
+{
+ struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
+ struct lpfc_nodelist *ndlp, *next_ndlp;
+ int sentadisc = 0;
+
+ /* go thru NPR nodes and issue any remaining ELS ADISCs */
+ list_for_each_entry_safe(ndlp, next_ndlp, &vport->fc_nodes, nlp_listp) {
+ if (!NLP_CHK_NODE_ACT(ndlp))
+ continue;
+ if (ndlp->nlp_state == NLP_STE_NPR_NODE &&
+ (ndlp->nlp_flag & NLP_NPR_2B_DISC) != 0 &&
+ (ndlp->nlp_flag & NLP_NPR_ADISC) != 0) {
+ spin_lock_irq(shost->host_lock);
+ ndlp->nlp_flag &= ~NLP_NPR_ADISC;
+ spin_unlock_irq(shost->host_lock);
+ ndlp->nlp_prev_state = ndlp->nlp_state;
+ lpfc_nlp_set_state(vport, ndlp, NLP_STE_ADISC_ISSUE);
+ lpfc_issue_els_adisc(vport, ndlp, 0);
+ sentadisc++;
+ vport->num_disc_nodes++;
+ if (vport->num_disc_nodes >=
+ vport->cfg_discovery_threads) {
+ spin_lock_irq(shost->host_lock);
+ vport->fc_flag |= FC_NLP_MORE;
+ spin_unlock_irq(shost->host_lock);
+ break;
+ }
+ }
+ }
+ if (sentadisc == 0) {
+ spin_lock_irq(shost->host_lock);
+ vport->fc_flag &= ~FC_NLP_MORE;
+ spin_unlock_irq(shost->host_lock);
+ }
+ return sentadisc;
+}
+
+/**
+ * lpfc_els_disc_plogi - Issue plogi for all npr nodes of a vport before adisc
+ * @vport: pointer to a host virtual N_Port data structure.
+ *
+ * This routine issues Port Login (PLOGI) ELS commands to all the N_Ports
+ * which are in node port recovery state, with a @vport. Each time an ELS
+ * ADISC PLOGI IOCB is issued by invoking the lpfc_issue_els_plogi() routine,
+ * the per @vport number of discover count (num_disc_nodes) shall be
+ * incremented. If the num_disc_nodes reaches a pre-configured threshold
+ * (cfg_discovery_threads), the @vport fc_flag will be marked with FC_NLP_MORE
+ * bit set and quit the process of issuing remaining ADISC PLOGIN IOCBs for
+ * later pick up. On the other hand, after walking through all the ndlps with
+ * the @vport and there is none ADISC PLOGI IOCB issued, the @vport fc_flag
+ * shall be cleared with the FC_NLP_MORE bit indicating there is no more ADISC
+ * PLOGI need to be sent.
+ *
+ * Return code
+ * The number of N_Ports with plogi issued.
+ **/
+int
+lpfc_els_disc_plogi(struct lpfc_vport *vport)
+{
+ struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
+ struct lpfc_nodelist *ndlp, *next_ndlp;
+ int sentplogi = 0;
+
+ /* go thru NPR nodes and issue any remaining ELS PLOGIs */
+ list_for_each_entry_safe(ndlp, next_ndlp, &vport->fc_nodes, nlp_listp) {
+ if (!NLP_CHK_NODE_ACT(ndlp))
+ continue;
+ if (ndlp->nlp_state == NLP_STE_NPR_NODE &&
+ (ndlp->nlp_flag & NLP_NPR_2B_DISC) != 0 &&
+ (ndlp->nlp_flag & NLP_DELAY_TMO) == 0 &&
+ (ndlp->nlp_flag & NLP_NPR_ADISC) == 0) {
+ ndlp->nlp_prev_state = ndlp->nlp_state;
+ lpfc_nlp_set_state(vport, ndlp, NLP_STE_PLOGI_ISSUE);
+ lpfc_issue_els_plogi(vport, ndlp->nlp_DID, 0);
+ sentplogi++;
+ vport->num_disc_nodes++;
+ if (vport->num_disc_nodes >=
+ vport->cfg_discovery_threads) {
+ spin_lock_irq(shost->host_lock);
+ vport->fc_flag |= FC_NLP_MORE;
+ spin_unlock_irq(shost->host_lock);
+ break;
+ }
+ }
+ }
+ if (sentplogi) {
+ lpfc_set_disctmo(vport);
+ }
+ else {
+ spin_lock_irq(shost->host_lock);
+ vport->fc_flag &= ~FC_NLP_MORE;
+ spin_unlock_irq(shost->host_lock);
+ }
+ return sentplogi;
+}
+
+/**
+ * lpfc_els_flush_rscn - Clean up any rscn activities with a vport
+ * @vport: pointer to a host virtual N_Port data structure.
+ *
+ * This routine cleans up any Registration State Change Notification
+ * (RSCN) activity with a @vport. Note that the fc_rscn_flush flag of the
+ * @vport together with the host_lock is used to prevent multiple thread
+ * trying to access the RSCN array on a same @vport at the same time.
+ **/
+void
+lpfc_els_flush_rscn(struct lpfc_vport *vport)
+{
+ struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
+ struct lpfc_hba *phba = vport->phba;
+ int i;
+
+ spin_lock_irq(shost->host_lock);
+ if (vport->fc_rscn_flush) {
+ /* Another thread is walking fc_rscn_id_list on this vport */
+ spin_unlock_irq(shost->host_lock);
+ return;
+ }
+ /* Indicate we are walking lpfc_els_flush_rscn on this vport */
+ vport->fc_rscn_flush = 1;
+ spin_unlock_irq(shost->host_lock);
+
+ for (i = 0; i < vport->fc_rscn_id_cnt; i++) {
+ lpfc_in_buf_free(phba, vport->fc_rscn_id_list[i]);
+ vport->fc_rscn_id_list[i] = NULL;
+ }
+ spin_lock_irq(shost->host_lock);
+ vport->fc_rscn_id_cnt = 0;
+ vport->fc_flag &= ~(FC_RSCN_MODE | FC_RSCN_DISCOVERY);
+ spin_unlock_irq(shost->host_lock);
+ lpfc_can_disctmo(vport);
+ /* Indicate we are done walking this fc_rscn_id_list */
+ vport->fc_rscn_flush = 0;
+}
+
+/**
+ * lpfc_rscn_payload_check - Check whether there is a pending rscn to a did
+ * @vport: pointer to a host virtual N_Port data structure.
+ * @did: remote destination port identifier.
+ *
+ * This routine checks whether there is any pending Registration State
+ * Configuration Notification (RSCN) to a @did on @vport.
+ *
+ * Return code
+ * None zero - The @did matched with a pending rscn
+ * 0 - not able to match @did with a pending rscn
+ **/
+int
+lpfc_rscn_payload_check(struct lpfc_vport *vport, uint32_t did)
+{
+ D_ID ns_did;
+ D_ID rscn_did;
+ uint32_t *lp;
+ uint32_t payload_len, i;
+ struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
+
+ ns_did.un.word = did;
+
+ /* Never match fabric nodes for RSCNs */
+ if ((did & Fabric_DID_MASK) == Fabric_DID_MASK)
+ return 0;
+
+ /* If we are doing a FULL RSCN rediscovery, match everything */
+ if (vport->fc_flag & FC_RSCN_DISCOVERY)
+ return did;
+
+ spin_lock_irq(shost->host_lock);
+ if (vport->fc_rscn_flush) {
+ /* Another thread is walking fc_rscn_id_list on this vport */
+ spin_unlock_irq(shost->host_lock);
+ return 0;
+ }
+ /* Indicate we are walking fc_rscn_id_list on this vport */
+ vport->fc_rscn_flush = 1;
+ spin_unlock_irq(shost->host_lock);
+ for (i = 0; i < vport->fc_rscn_id_cnt; i++) {
+ lp = vport->fc_rscn_id_list[i]->virt;
+ payload_len = be32_to_cpu(*lp++ & ~ELS_CMD_MASK);
+ payload_len -= sizeof(uint32_t); /* take off word 0 */
+ while (payload_len) {
+ rscn_did.un.word = be32_to_cpu(*lp++);
+ payload_len -= sizeof(uint32_t);
+ switch (rscn_did.un.b.resv & RSCN_ADDRESS_FORMAT_MASK) {
+ case RSCN_ADDRESS_FORMAT_PORT:
+ if ((ns_did.un.b.domain == rscn_did.un.b.domain)
+ && (ns_did.un.b.area == rscn_did.un.b.area)
+ && (ns_did.un.b.id == rscn_did.un.b.id))
+ goto return_did_out;
+ break;
+ case RSCN_ADDRESS_FORMAT_AREA:
+ if ((ns_did.un.b.domain == rscn_did.un.b.domain)
+ && (ns_did.un.b.area == rscn_did.un.b.area))
+ goto return_did_out;
+ break;
+ case RSCN_ADDRESS_FORMAT_DOMAIN:
+ if (ns_did.un.b.domain == rscn_did.un.b.domain)
+ goto return_did_out;
+ break;
+ case RSCN_ADDRESS_FORMAT_FABRIC:
+ goto return_did_out;
+ }
+ }
+ }
+ /* Indicate we are done with walking fc_rscn_id_list on this vport */
+ vport->fc_rscn_flush = 0;
+ return 0;
+return_did_out:
+ /* Indicate we are done with walking fc_rscn_id_list on this vport */
+ vport->fc_rscn_flush = 0;
+ return did;
+}
+
+/**
+ * lpfc_rscn_recovery_check - Send recovery event to vport nodes matching rscn
+ * @vport: pointer to a host virtual N_Port data structure.
+ *
+ * This routine sends recovery (NLP_EVT_DEVICE_RECOVERY) event to the
+ * state machine for a @vport's nodes that are with pending RSCN (Registration
+ * State Change Notification).
+ *
+ * Return code
+ * 0 - Successful (currently alway return 0)
+ **/
+static int
+lpfc_rscn_recovery_check(struct lpfc_vport *vport)
+{
+ struct lpfc_nodelist *ndlp = NULL;
+
+ /* Move all affected nodes by pending RSCNs to NPR state. */
+ list_for_each_entry(ndlp, &vport->fc_nodes, nlp_listp) {
+ if (!NLP_CHK_NODE_ACT(ndlp) ||
+ (ndlp->nlp_state == NLP_STE_UNUSED_NODE) ||
+ !lpfc_rscn_payload_check(vport, ndlp->nlp_DID))
+ continue;
+ lpfc_disc_state_machine(vport, ndlp, NULL,
+ NLP_EVT_DEVICE_RECOVERY);
+ lpfc_cancel_retry_delay_tmo(vport, ndlp);
+ }
+ return 0;
+}
+
+/**
+ * lpfc_send_rscn_event - Send an RSCN event to management application
+ * @vport: pointer to a host virtual N_Port data structure.
+ * @cmdiocb: pointer to lpfc command iocb data structure.
+ *
+ * lpfc_send_rscn_event sends an RSCN netlink event to management
+ * applications.
+ */
+static void
+lpfc_send_rscn_event(struct lpfc_vport *vport,
+ struct lpfc_iocbq *cmdiocb)
+{
+ struct lpfc_dmabuf *pcmd;
+ struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
+ uint32_t *payload_ptr;
+ uint32_t payload_len;
+ struct lpfc_rscn_event_header *rscn_event_data;
+
+ pcmd = (struct lpfc_dmabuf *) cmdiocb->context2;
+ payload_ptr = (uint32_t *) pcmd->virt;
+ payload_len = be32_to_cpu(*payload_ptr & ~ELS_CMD_MASK);
+
+ rscn_event_data = kmalloc(sizeof(struct lpfc_rscn_event_header) +
+ payload_len, GFP_KERNEL);
+ if (!rscn_event_data) {
+ lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
+ "0147 Failed to allocate memory for RSCN event\n");
+ return;
+ }
+ rscn_event_data->event_type = FC_REG_RSCN_EVENT;
+ rscn_event_data->payload_length = payload_len;
+ memcpy(rscn_event_data->rscn_payload, payload_ptr,
+ payload_len);
+
+ fc_host_post_vendor_event(shost,
+ fc_get_event_number(),
+ sizeof(struct lpfc_els_event_header) + payload_len,
+ (char *)rscn_event_data,
+ LPFC_NL_VENDOR_ID);
+
+ kfree(rscn_event_data);
+}
+
+/**
+ * lpfc_els_rcv_rscn - Process an unsolicited rscn iocb
+ * @vport: pointer to a host virtual N_Port data structure.
+ * @cmdiocb: pointer to lpfc command iocb data structure.
+ * @ndlp: pointer to a node-list data structure.
+ *
+ * This routine processes an unsolicited RSCN (Registration State Change
+ * Notification) IOCB. First, the payload of the unsolicited RSCN is walked
+ * to invoke fc_host_post_event() routine to the FC transport layer. If the
+ * discover state machine is about to begin discovery, it just accepts the
+ * RSCN and the discovery process will satisfy the RSCN. If this RSCN only
+ * contains N_Port IDs for other vports on this HBA, it just accepts the
+ * RSCN and ignore processing it. If the state machine is in the recovery
+ * state, the fc_rscn_id_list of this @vport is walked and the
+ * lpfc_rscn_recovery_check() routine is invoked to send recovery event for
+ * all nodes that match RSCN payload. Otherwise, the lpfc_els_handle_rscn()
+ * routine is invoked to handle the RSCN event.
+ *
+ * Return code
+ * 0 - Just sent the acc response
+ * 1 - Sent the acc response and waited for name server completion
+ **/
+static int
+lpfc_els_rcv_rscn(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb,
+ struct lpfc_nodelist *ndlp)
+{
+ struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
+ struct lpfc_hba *phba = vport->phba;
+ struct lpfc_dmabuf *pcmd;
+ uint32_t *lp, *datap;
+ IOCB_t *icmd;
+ uint32_t payload_len, length, nportid, *cmd;
+ int rscn_cnt;
+ int rscn_id = 0, hba_id = 0;
+ int i;
+
+ icmd = &cmdiocb->iocb;
+ pcmd = (struct lpfc_dmabuf *) cmdiocb->context2;
+ lp = (uint32_t *) pcmd->virt;
+
+ payload_len = be32_to_cpu(*lp++ & ~ELS_CMD_MASK);
+ payload_len -= sizeof(uint32_t); /* take off word 0 */
+ /* RSCN received */
+ lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY,
+ "0214 RSCN received Data: x%x x%x x%x x%x\n",
+ vport->fc_flag, payload_len, *lp,
+ vport->fc_rscn_id_cnt);
+
+ /* Send an RSCN event to the management application */
+ lpfc_send_rscn_event(vport, cmdiocb);
+
+ for (i = 0; i < payload_len/sizeof(uint32_t); i++)
+ fc_host_post_event(shost, fc_get_event_number(),
+ FCH_EVT_RSCN, lp[i]);
+
+ /* If we are about to begin discovery, just ACC the RSCN.
+ * Discovery processing will satisfy it.
+ */
+ if (vport->port_state <= LPFC_NS_QRY) {
+ lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL,
+ "RCV RSCN ignore: did:x%x/ste:x%x flg:x%x",
+ ndlp->nlp_DID, vport->port_state, ndlp->nlp_flag);
+
+ lpfc_els_rsp_acc(vport, ELS_CMD_ACC, cmdiocb, ndlp, NULL);
+ return 0;
+ }
+
+ /* If this RSCN just contains NPortIDs for other vports on this HBA,
+ * just ACC and ignore it.
+ */
+ if ((phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) &&
+ !(vport->cfg_peer_port_login)) {
+ i = payload_len;
+ datap = lp;
+ while (i > 0) {
+ nportid = *datap++;
+ nportid = ((be32_to_cpu(nportid)) & Mask_DID);
+ i -= sizeof(uint32_t);
+ rscn_id++;
+ if (lpfc_find_vport_by_did(phba, nportid))
+ hba_id++;
+ }
+ if (rscn_id == hba_id) {
+ /* ALL NPortIDs in RSCN are on HBA */
+ lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY,
+ "0219 Ignore RSCN "
+ "Data: x%x x%x x%x x%x\n",
+ vport->fc_flag, payload_len,
+ *lp, vport->fc_rscn_id_cnt);
+ lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL,
+ "RCV RSCN vport: did:x%x/ste:x%x flg:x%x",
+ ndlp->nlp_DID, vport->port_state,
+ ndlp->nlp_flag);
+
+ lpfc_els_rsp_acc(vport, ELS_CMD_ACC, cmdiocb,
+ ndlp, NULL);
+ return 0;
+ }
+ }
+
+ spin_lock_irq(shost->host_lock);
+ if (vport->fc_rscn_flush) {
+ /* Another thread is walking fc_rscn_id_list on this vport */
+ vport->fc_flag |= FC_RSCN_DISCOVERY;
+ spin_unlock_irq(shost->host_lock);
+ /* Send back ACC */
+ lpfc_els_rsp_acc(vport, ELS_CMD_ACC, cmdiocb, ndlp, NULL);
+ return 0;
+ }
+ /* Indicate we are walking fc_rscn_id_list on this vport */
+ vport->fc_rscn_flush = 1;
+ spin_unlock_irq(shost->host_lock);
+ /* Get the array count after successfully have the token */
+ rscn_cnt = vport->fc_rscn_id_cnt;
+ /* If we are already processing an RSCN, save the received
+ * RSCN payload buffer, cmdiocb->context2 to process later.
+ */
+ if (vport->fc_flag & (FC_RSCN_MODE | FC_NDISC_ACTIVE)) {
+ lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL,
+ "RCV RSCN defer: did:x%x/ste:x%x flg:x%x",
+ ndlp->nlp_DID, vport->port_state, ndlp->nlp_flag);
+
+ spin_lock_irq(shost->host_lock);
+ vport->fc_flag |= FC_RSCN_DEFERRED;
+ if ((rscn_cnt < FC_MAX_HOLD_RSCN) &&
+ !(vport->fc_flag & FC_RSCN_DISCOVERY)) {
+ vport->fc_flag |= FC_RSCN_MODE;
+ spin_unlock_irq(shost->host_lock);
+ if (rscn_cnt) {
+ cmd = vport->fc_rscn_id_list[rscn_cnt-1]->virt;
+ length = be32_to_cpu(*cmd & ~ELS_CMD_MASK);
+ }
+ if ((rscn_cnt) &&
+ (payload_len + length <= LPFC_BPL_SIZE)) {
+ *cmd &= ELS_CMD_MASK;
+ *cmd |= cpu_to_be32(payload_len + length);
+ memcpy(((uint8_t *)cmd) + length, lp,
+ payload_len);
+ } else {
+ vport->fc_rscn_id_list[rscn_cnt] = pcmd;
+ vport->fc_rscn_id_cnt++;
+ /* If we zero, cmdiocb->context2, the calling
+ * routine will not try to free it.
+ */
+ cmdiocb->context2 = NULL;
+ }
+ /* Deferred RSCN */
+ lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY,
+ "0235 Deferred RSCN "
+ "Data: x%x x%x x%x\n",
+ vport->fc_rscn_id_cnt, vport->fc_flag,
+ vport->port_state);
+ } else {
+ vport->fc_flag |= FC_RSCN_DISCOVERY;
+ spin_unlock_irq(shost->host_lock);
+ /* ReDiscovery RSCN */
+ lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY,
+ "0234 ReDiscovery RSCN "
+ "Data: x%x x%x x%x\n",
+ vport->fc_rscn_id_cnt, vport->fc_flag,
+ vport->port_state);
+ }
+ /* Indicate we are done walking fc_rscn_id_list on this vport */
+ vport->fc_rscn_flush = 0;
+ /* Send back ACC */
+ lpfc_els_rsp_acc(vport, ELS_CMD_ACC, cmdiocb, ndlp, NULL);
+ /* send RECOVERY event for ALL nodes that match RSCN payload */
+ lpfc_rscn_recovery_check(vport);
+ spin_lock_irq(shost->host_lock);
+ vport->fc_flag &= ~FC_RSCN_DEFERRED;
+ spin_unlock_irq(shost->host_lock);
+ return 0;
+ }
+ lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL,
+ "RCV RSCN: did:x%x/ste:x%x flg:x%x",
+ ndlp->nlp_DID, vport->port_state, ndlp->nlp_flag);
+
+ spin_lock_irq(shost->host_lock);
+ vport->fc_flag |= FC_RSCN_MODE;
+ spin_unlock_irq(shost->host_lock);
+ vport->fc_rscn_id_list[vport->fc_rscn_id_cnt++] = pcmd;
+ /* Indicate we are done walking fc_rscn_id_list on this vport */
+ vport->fc_rscn_flush = 0;
+ /*
+ * If we zero, cmdiocb->context2, the calling routine will
+ * not try to free it.
+ */
+ cmdiocb->context2 = NULL;
+ lpfc_set_disctmo(vport);
+ /* Send back ACC */
+ lpfc_els_rsp_acc(vport, ELS_CMD_ACC, cmdiocb, ndlp, NULL);
+ /* send RECOVERY event for ALL nodes that match RSCN payload */
+ lpfc_rscn_recovery_check(vport);
+ return lpfc_els_handle_rscn(vport);
+}
+
+/**
+ * lpfc_els_handle_rscn - Handle rscn for a vport
+ * @vport: pointer to a host virtual N_Port data structure.
+ *
+ * This routine handles the Registration State Configuration Notification
+ * (RSCN) for a @vport. If login to NameServer does not exist, a new ndlp shall
+ * be created and a Port Login (PLOGI) to the NameServer is issued. Otherwise,
+ * if the ndlp to NameServer exists, a Common Transport (CT) command to the
+ * NameServer shall be issued. If CT command to the NameServer fails to be
+ * issued, the lpfc_els_flush_rscn() routine shall be invoked to clean up any
+ * RSCN activities with the @vport.
+ *
+ * Return code
+ * 0 - Cleaned up rscn on the @vport
+ * 1 - Wait for plogi to name server before proceed
+ **/
+int
+lpfc_els_handle_rscn(struct lpfc_vport *vport)
+{
+ struct lpfc_nodelist *ndlp;
+ struct lpfc_hba *phba = vport->phba;
+
+ /* Ignore RSCN if the port is being torn down. */
+ if (vport->load_flag & FC_UNLOADING) {
+ lpfc_els_flush_rscn(vport);
+ return 0;
+ }
+
+ /* Start timer for RSCN processing */
+ lpfc_set_disctmo(vport);
+
+ /* RSCN processed */
+ lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY,
+ "0215 RSCN processed Data: x%x x%x x%x x%x\n",
+ vport->fc_flag, 0, vport->fc_rscn_id_cnt,
+ vport->port_state);
+
+ /* To process RSCN, first compare RSCN data with NameServer */
+ vport->fc_ns_retry = 0;
+ vport->num_disc_nodes = 0;
+
+ ndlp = lpfc_findnode_did(vport, NameServer_DID);
+ if (ndlp && NLP_CHK_NODE_ACT(ndlp)
+ && ndlp->nlp_state == NLP_STE_UNMAPPED_NODE) {
+ /* Good ndlp, issue CT Request to NameServer */
+ if (lpfc_ns_cmd(vport, SLI_CTNS_GID_FT, 0, 0) == 0)
+ /* Wait for NameServer query cmpl before we can
+ continue */
+ return 1;
+ } else {
+ /* If login to NameServer does not exist, issue one */
+ /* Good status, issue PLOGI to NameServer */
+ ndlp = lpfc_findnode_did(vport, NameServer_DID);
+ if (ndlp && NLP_CHK_NODE_ACT(ndlp))
+ /* Wait for NameServer login cmpl before we can
+ continue */
+ return 1;
+
+ if (ndlp) {
+ ndlp = lpfc_enable_node(vport, ndlp,
+ NLP_STE_PLOGI_ISSUE);
+ if (!ndlp) {
+ lpfc_els_flush_rscn(vport);
+ return 0;
+ }
+ ndlp->nlp_prev_state = NLP_STE_UNUSED_NODE;
+ } else {
+ ndlp = mempool_alloc(phba->nlp_mem_pool, GFP_KERNEL);
+ if (!ndlp) {
+ lpfc_els_flush_rscn(vport);
+ return 0;
+ }
+ lpfc_nlp_init(vport, ndlp, NameServer_DID);
+ ndlp->nlp_prev_state = ndlp->nlp_state;
+ lpfc_nlp_set_state(vport, ndlp, NLP_STE_PLOGI_ISSUE);
+ }
+ ndlp->nlp_type |= NLP_FABRIC;
+ lpfc_issue_els_plogi(vport, NameServer_DID, 0);
+ /* Wait for NameServer login cmpl before we can
+ * continue
+ */
+ return 1;
+ }
+
+ lpfc_els_flush_rscn(vport);
+ return 0;
+}
+
+/**
+ * lpfc_els_rcv_flogi - Process an unsolicited flogi iocb
+ * @vport: pointer to a host virtual N_Port data structure.
+ * @cmdiocb: pointer to lpfc command iocb data structure.
+ * @ndlp: pointer to a node-list data structure.
+ *
+ * This routine processes Fabric Login (FLOGI) IOCB received as an ELS
+ * unsolicited event. An unsolicited FLOGI can be received in a point-to-
+ * point topology. As an unsolicited FLOGI should not be received in a loop
+ * mode, any unsolicited FLOGI received in loop mode shall be ignored. The
+ * lpfc_check_sparm() routine is invoked to check the parameters in the
+ * unsolicited FLOGI. If parameters validation failed, the routine
+ * lpfc_els_rsp_reject() shall be called with reject reason code set to
+ * LSEXP_SPARM_OPTIONS to reject the FLOGI. Otherwise, the Port WWN in the
+ * FLOGI shall be compared with the Port WWN of the @vport to determine who
+ * will initiate PLOGI. The higher lexicographical value party shall has
+ * higher priority (as the winning port) and will initiate PLOGI and
+ * communicate Port_IDs (Addresses) for both nodes in PLOGI. The result
+ * of this will be marked in the @vport fc_flag field with FC_PT2PT_PLOGI
+ * and then the lpfc_els_rsp_acc() routine is invoked to accept the FLOGI.
+ *
+ * Return code
+ * 0 - Successfully processed the unsolicited flogi
+ * 1 - Failed to process the unsolicited flogi
+ **/
+static int
+lpfc_els_rcv_flogi(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb,
+ struct lpfc_nodelist *ndlp)
+{
+ struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
+ struct lpfc_hba *phba = vport->phba;
+ struct lpfc_dmabuf *pcmd = (struct lpfc_dmabuf *) cmdiocb->context2;
+ uint32_t *lp = (uint32_t *) pcmd->virt;
+ IOCB_t *icmd = &cmdiocb->iocb;
+ struct serv_parm *sp;
+ LPFC_MBOXQ_t *mbox;
+ struct ls_rjt stat;
+ uint32_t cmd, did;
+ int rc;
+ uint32_t fc_flag = 0;
+ uint32_t port_state = 0;
+
+ cmd = *lp++;
+ sp = (struct serv_parm *) lp;
+
+ /* FLOGI received */
+
+ lpfc_set_disctmo(vport);
+
+ if (phba->fc_topology == LPFC_TOPOLOGY_LOOP) {
+ /* We should never receive a FLOGI in loop mode, ignore it */
+ did = icmd->un.elsreq64.remoteID;
+
+ /* An FLOGI ELS command <elsCmd> was received from DID <did> in
+ Loop Mode */
+ lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
+ "0113 An FLOGI ELS command x%x was "
+ "received from DID x%x in Loop Mode\n",
+ cmd, did);
+ return 1;
+ }
+
+ if ((lpfc_check_sparm(vport, ndlp, sp, CLASS3, 1))) {
+ /* For a FLOGI we accept, then if our portname is greater
+ * then the remote portname we initiate Nport login.
+ */
+
+ rc = memcmp(&vport->fc_portname, &sp->portName,
+ sizeof(struct lpfc_name));
+
+ if (!rc) {
+ if (phba->sli_rev < LPFC_SLI_REV4) {
+ mbox = mempool_alloc(phba->mbox_mem_pool,
+ GFP_KERNEL);
+ if (!mbox)
+ return 1;
+ lpfc_linkdown(phba);
+ lpfc_init_link(phba, mbox,
+ phba->cfg_topology,
+ phba->cfg_link_speed);
+ mbox->u.mb.un.varInitLnk.lipsr_AL_PA = 0;
+ mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
+ mbox->vport = vport;
+ rc = lpfc_sli_issue_mbox(phba, mbox,
+ MBX_NOWAIT);
+ lpfc_set_loopback_flag(phba);
+ if (rc == MBX_NOT_FINISHED)
+ mempool_free(mbox, phba->mbox_mem_pool);
+ return 1;
+ } else {
+ /* abort the flogi coming back to ourselves
+ * due to external loopback on the port.
+ */
+ lpfc_els_abort_flogi(phba);
+ return 0;
+ }
+ } else if (rc > 0) { /* greater than */
+ spin_lock_irq(shost->host_lock);
+ vport->fc_flag |= FC_PT2PT_PLOGI;
+ spin_unlock_irq(shost->host_lock);
+
+ /* If we have the high WWPN we can assign our own
+ * myDID; otherwise, we have to WAIT for a PLOGI
+ * from the remote NPort to find out what it
+ * will be.
+ */
+ vport->fc_myDID = PT2PT_LocalID;
+ } else
+ vport->fc_myDID = PT2PT_RemoteID;
+
+ /*
+ * The vport state should go to LPFC_FLOGI only
+ * AFTER we issue a FLOGI, not receive one.
+ */
+ spin_lock_irq(shost->host_lock);
+ fc_flag = vport->fc_flag;
+ port_state = vport->port_state;
+ vport->fc_flag |= FC_PT2PT;
+ vport->fc_flag &= ~(FC_FABRIC | FC_PUBLIC_LOOP);
+ spin_unlock_irq(shost->host_lock);
+ lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
+ "3311 Rcv Flogi PS x%x new PS x%x "
+ "fc_flag x%x new fc_flag x%x\n",
+ port_state, vport->port_state,
+ fc_flag, vport->fc_flag);
+
+ /*
+ * We temporarily set fc_myDID to make it look like we are
+ * a Fabric. This is done just so we end up with the right
+ * did / sid on the FLOGI ACC rsp.
+ */
+ did = vport->fc_myDID;
+ vport->fc_myDID = Fabric_DID;
+
+ } else {
+ /* Reject this request because invalid parameters */
+ stat.un.b.lsRjtRsvd0 = 0;
+ stat.un.b.lsRjtRsnCode = LSRJT_UNABLE_TPC;
+ stat.un.b.lsRjtRsnCodeExp = LSEXP_SPARM_OPTIONS;
+ stat.un.b.vendorUnique = 0;
+
+ /*
+ * We temporarily set fc_myDID to make it look like we are
+ * a Fabric. This is done just so we end up with the right
+ * did / sid on the FLOGI LS_RJT rsp.
+ */
+ did = vport->fc_myDID;
+ vport->fc_myDID = Fabric_DID;
+
+ lpfc_els_rsp_reject(vport, stat.un.lsRjtError, cmdiocb, ndlp,
+ NULL);
+
+ /* Now lets put fc_myDID back to what its supposed to be */
+ vport->fc_myDID = did;
+
+ return 1;
+ }
+
+ /* Send back ACC */
+ lpfc_els_rsp_acc(vport, ELS_CMD_PLOGI, cmdiocb, ndlp, NULL);
+
+ /* Now lets put fc_myDID back to what its supposed to be */
+ vport->fc_myDID = did;
+
+ if (!(vport->fc_flag & FC_PT2PT_PLOGI)) {
+
+ mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
+ if (!mbox)
+ goto fail;
+
+ lpfc_config_link(phba, mbox);
+
+ mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
+ mbox->vport = vport;
+ rc = lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT);
+ if (rc == MBX_NOT_FINISHED) {
+ mempool_free(mbox, phba->mbox_mem_pool);
+ goto fail;
+ }
+ }
+
+ return 0;
+fail:
+ return 1;
+}
+
+/**
+ * lpfc_els_rcv_rnid - Process an unsolicited rnid iocb
+ * @vport: pointer to a host virtual N_Port data structure.
+ * @cmdiocb: pointer to lpfc command iocb data structure.
+ * @ndlp: pointer to a node-list data structure.
+ *
+ * This routine processes Request Node Identification Data (RNID) IOCB
+ * received as an ELS unsolicited event. Only when the RNID specified format
+ * 0x0 or 0xDF (Topology Discovery Specific Node Identification Data)
+ * present, this routine will invoke the lpfc_els_rsp_rnid_acc() routine to
+ * Accept (ACC) the RNID ELS command. All the other RNID formats are
+ * rejected by invoking the lpfc_els_rsp_reject() routine.
+ *
+ * Return code
+ * 0 - Successfully processed rnid iocb (currently always return 0)
+ **/
+static int
+lpfc_els_rcv_rnid(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb,
+ struct lpfc_nodelist *ndlp)
+{
+ struct lpfc_dmabuf *pcmd;
+ uint32_t *lp;
+ IOCB_t *icmd;
+ RNID *rn;
+ struct ls_rjt stat;
+ uint32_t cmd;
+
+ icmd = &cmdiocb->iocb;
+ pcmd = (struct lpfc_dmabuf *) cmdiocb->context2;
+ lp = (uint32_t *) pcmd->virt;
+
+ cmd = *lp++;
+ rn = (RNID *) lp;
+
+ /* RNID received */
+
+ switch (rn->Format) {
+ case 0:
+ case RNID_TOPOLOGY_DISC:
+ /* Send back ACC */
+ lpfc_els_rsp_rnid_acc(vport, rn->Format, cmdiocb, ndlp);
+ break;
+ default:
+ /* Reject this request because format not supported */
+ stat.un.b.lsRjtRsvd0 = 0;
+ stat.un.b.lsRjtRsnCode = LSRJT_UNABLE_TPC;
+ stat.un.b.lsRjtRsnCodeExp = LSEXP_CANT_GIVE_DATA;
+ stat.un.b.vendorUnique = 0;
+ lpfc_els_rsp_reject(vport, stat.un.lsRjtError, cmdiocb, ndlp,
+ NULL);
+ }
+ return 0;
+}
+
+/**
+ * lpfc_els_rcv_echo - Process an unsolicited echo iocb
+ * @vport: pointer to a host virtual N_Port data structure.
+ * @cmdiocb: pointer to lpfc command iocb data structure.
+ * @ndlp: pointer to a node-list data structure.
+ *
+ * Return code
+ * 0 - Successfully processed echo iocb (currently always return 0)
+ **/
+static int
+lpfc_els_rcv_echo(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb,
+ struct lpfc_nodelist *ndlp)
+{
+ uint8_t *pcmd;
+
+ pcmd = (uint8_t *) (((struct lpfc_dmabuf *) cmdiocb->context2)->virt);
+
+ /* skip over first word of echo command to find echo data */
+ pcmd += sizeof(uint32_t);
+
+ lpfc_els_rsp_echo_acc(vport, pcmd, cmdiocb, ndlp);
+ return 0;
+}
+
+/**
+ * lpfc_els_rcv_lirr - Process an unsolicited lirr iocb
+ * @vport: pointer to a host virtual N_Port data structure.
+ * @cmdiocb: pointer to lpfc command iocb data structure.
+ * @ndlp: pointer to a node-list data structure.
+ *
+ * This routine processes a Link Incident Report Registration(LIRR) IOCB
+ * received as an ELS unsolicited event. Currently, this function just invokes
+ * the lpfc_els_rsp_reject() routine to reject the LIRR IOCB unconditionally.
+ *
+ * Return code
+ * 0 - Successfully processed lirr iocb (currently always return 0)
+ **/
+static int
+lpfc_els_rcv_lirr(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb,
+ struct lpfc_nodelist *ndlp)
+{
+ struct ls_rjt stat;
+
+ /* For now, unconditionally reject this command */
+ stat.un.b.lsRjtRsvd0 = 0;
+ stat.un.b.lsRjtRsnCode = LSRJT_UNABLE_TPC;
+ stat.un.b.lsRjtRsnCodeExp = LSEXP_CANT_GIVE_DATA;
+ stat.un.b.vendorUnique = 0;
+ lpfc_els_rsp_reject(vport, stat.un.lsRjtError, cmdiocb, ndlp, NULL);
+ return 0;
+}
+
+/**
+ * lpfc_els_rcv_rrq - Process an unsolicited rrq iocb
+ * @vport: pointer to a host virtual N_Port data structure.
+ * @cmdiocb: pointer to lpfc command iocb data structure.
+ * @ndlp: pointer to a node-list data structure.
+ *
+ * This routine processes a Reinstate Recovery Qualifier (RRQ) IOCB
+ * received as an ELS unsolicited event. A request to RRQ shall only
+ * be accepted if the Originator Nx_Port N_Port_ID or the Responder
+ * Nx_Port N_Port_ID of the target Exchange is the same as the
+ * N_Port_ID of the Nx_Port that makes the request. If the RRQ is
+ * not accepted, an LS_RJT with reason code "Unable to perform
+ * command request" and reason code explanation "Invalid Originator
+ * S_ID" shall be returned. For now, we just unconditionally accept
+ * RRQ from the target.
+ **/
+static void
+lpfc_els_rcv_rrq(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb,
+ struct lpfc_nodelist *ndlp)
+{
+ lpfc_els_rsp_acc(vport, ELS_CMD_ACC, cmdiocb, ndlp, NULL);
+ if (vport->phba->sli_rev == LPFC_SLI_REV4)
+ lpfc_els_clear_rrq(vport, cmdiocb, ndlp);
+}
+
+/**
+ * lpfc_els_rsp_rls_acc - Completion callbk func for MBX_READ_LNK_STAT mbox cmd
+ * @phba: pointer to lpfc hba data structure.
+ * @pmb: pointer to the driver internal queue element for mailbox command.
+ *
+ * This routine is the completion callback function for the MBX_READ_LNK_STAT
+ * mailbox command. This callback function is to actually send the Accept
+ * (ACC) response to a Read Port Status (RPS) unsolicited IOCB event. It
+ * collects the link statistics from the completion of the MBX_READ_LNK_STAT
+ * mailbox command, constructs the RPS response with the link statistics
+ * collected, and then invokes the lpfc_sli_issue_iocb() routine to send ACC
+ * response to the RPS.
+ *
+ * Note that, in lpfc_prep_els_iocb() routine, the reference count of ndlp
+ * will be incremented by 1 for holding the ndlp and the reference to ndlp
+ * will be stored into the context1 field of the IOCB for the completion
+ * callback function to the RPS Accept Response ELS IOCB command.
+ *
+ **/
+static void
+lpfc_els_rsp_rls_acc(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
+{
+ MAILBOX_t *mb;
+ IOCB_t *icmd;
+ struct RLS_RSP *rls_rsp;
+ uint8_t *pcmd;
+ struct lpfc_iocbq *elsiocb;
+ struct lpfc_nodelist *ndlp;
+ uint16_t oxid;
+ uint16_t rxid;
+ uint32_t cmdsize;
+
+ mb = &pmb->u.mb;
+
+ ndlp = (struct lpfc_nodelist *) pmb->context2;
+ rxid = (uint16_t) ((unsigned long)(pmb->context1) & 0xffff);
+ oxid = (uint16_t) (((unsigned long)(pmb->context1) >> 16) & 0xffff);
+ pmb->context1 = NULL;
+ pmb->context2 = NULL;
+
+ if (mb->mbxStatus) {
+ mempool_free(pmb, phba->mbox_mem_pool);
+ return;
+ }
+
+ cmdsize = sizeof(struct RLS_RSP) + sizeof(uint32_t);
+ elsiocb = lpfc_prep_els_iocb(phba->pport, 0, cmdsize,
+ lpfc_max_els_tries, ndlp,
+ ndlp->nlp_DID, ELS_CMD_ACC);
+
+ /* Decrement the ndlp reference count from previous mbox command */
+ lpfc_nlp_put(ndlp);
+
+ if (!elsiocb) {
+ mempool_free(pmb, phba->mbox_mem_pool);
+ return;
+ }
+
+ icmd = &elsiocb->iocb;
+ icmd->ulpContext = rxid;
+ icmd->unsli3.rcvsli3.ox_id = oxid;
+
+ pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt);
+ *((uint32_t *) (pcmd)) = ELS_CMD_ACC;
+ pcmd += sizeof(uint32_t); /* Skip past command */
+ rls_rsp = (struct RLS_RSP *)pcmd;
+
+ rls_rsp->linkFailureCnt = cpu_to_be32(mb->un.varRdLnk.linkFailureCnt);
+ rls_rsp->lossSyncCnt = cpu_to_be32(mb->un.varRdLnk.lossSyncCnt);
+ rls_rsp->lossSignalCnt = cpu_to_be32(mb->un.varRdLnk.lossSignalCnt);
+ rls_rsp->primSeqErrCnt = cpu_to_be32(mb->un.varRdLnk.primSeqErrCnt);
+ rls_rsp->invalidXmitWord = cpu_to_be32(mb->un.varRdLnk.invalidXmitWord);
+ rls_rsp->crcCnt = cpu_to_be32(mb->un.varRdLnk.crcCnt);
+ mempool_free(pmb, phba->mbox_mem_pool);
+ /* Xmit ELS RLS ACC response tag <ulpIoTag> */
+ lpfc_printf_vlog(ndlp->vport, KERN_INFO, LOG_ELS,
+ "2874 Xmit ELS RLS ACC response tag x%x xri x%x, "
+ "did x%x, nlp_flag x%x, nlp_state x%x, rpi x%x\n",
+ elsiocb->iotag, elsiocb->iocb.ulpContext,
+ ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_state,
+ ndlp->nlp_rpi);
+ elsiocb->iocb_cmpl = lpfc_cmpl_els_rsp;
+ phba->fc_stat.elsXmitACC++;
+ if (lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0) == IOCB_ERROR)
+ lpfc_els_free_iocb(phba, elsiocb);
+}
+
+/**
+ * lpfc_els_rsp_rps_acc - Completion callbk func for MBX_READ_LNK_STAT mbox cmd
+ * @phba: pointer to lpfc hba data structure.
+ * @pmb: pointer to the driver internal queue element for mailbox command.
+ *
+ * This routine is the completion callback function for the MBX_READ_LNK_STAT
+ * mailbox command. This callback function is to actually send the Accept
+ * (ACC) response to a Read Port Status (RPS) unsolicited IOCB event. It
+ * collects the link statistics from the completion of the MBX_READ_LNK_STAT
+ * mailbox command, constructs the RPS response with the link statistics
+ * collected, and then invokes the lpfc_sli_issue_iocb() routine to send ACC
+ * response to the RPS.
+ *
+ * Note that, in lpfc_prep_els_iocb() routine, the reference count of ndlp
+ * will be incremented by 1 for holding the ndlp and the reference to ndlp
+ * will be stored into the context1 field of the IOCB for the completion
+ * callback function to the RPS Accept Response ELS IOCB command.
+ *
+ **/
+static void
+lpfc_els_rsp_rps_acc(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
+{
+ MAILBOX_t *mb;
+ IOCB_t *icmd;
+ RPS_RSP *rps_rsp;
+ uint8_t *pcmd;
+ struct lpfc_iocbq *elsiocb;
+ struct lpfc_nodelist *ndlp;
+ uint16_t status;
+ uint16_t oxid;
+ uint16_t rxid;
+ uint32_t cmdsize;
+
+ mb = &pmb->u.mb;
+
+ ndlp = (struct lpfc_nodelist *) pmb->context2;
+ rxid = (uint16_t) ((unsigned long)(pmb->context1) & 0xffff);
+ oxid = (uint16_t) (((unsigned long)(pmb->context1) >> 16) & 0xffff);
+ pmb->context1 = NULL;
+ pmb->context2 = NULL;
+
+ if (mb->mbxStatus) {
+ mempool_free(pmb, phba->mbox_mem_pool);
+ return;
+ }
+
+ cmdsize = sizeof(RPS_RSP) + sizeof(uint32_t);
+ mempool_free(pmb, phba->mbox_mem_pool);
+ elsiocb = lpfc_prep_els_iocb(phba->pport, 0, cmdsize,
+ lpfc_max_els_tries, ndlp,
+ ndlp->nlp_DID, ELS_CMD_ACC);
+
+ /* Decrement the ndlp reference count from previous mbox command */
+ lpfc_nlp_put(ndlp);
+
+ if (!elsiocb)
+ return;
+
+ icmd = &elsiocb->iocb;
+ icmd->ulpContext = rxid;
+ icmd->unsli3.rcvsli3.ox_id = oxid;
+
+ pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt);
+ *((uint32_t *) (pcmd)) = ELS_CMD_ACC;
+ pcmd += sizeof(uint32_t); /* Skip past command */
+ rps_rsp = (RPS_RSP *)pcmd;
+
+ if (phba->fc_topology != LPFC_TOPOLOGY_LOOP)
+ status = 0x10;
+ else
+ status = 0x8;
+ if (phba->pport->fc_flag & FC_FABRIC)
+ status |= 0x4;
+
+ rps_rsp->rsvd1 = 0;
+ rps_rsp->portStatus = cpu_to_be16(status);
+ rps_rsp->linkFailureCnt = cpu_to_be32(mb->un.varRdLnk.linkFailureCnt);
+ rps_rsp->lossSyncCnt = cpu_to_be32(mb->un.varRdLnk.lossSyncCnt);
+ rps_rsp->lossSignalCnt = cpu_to_be32(mb->un.varRdLnk.lossSignalCnt);
+ rps_rsp->primSeqErrCnt = cpu_to_be32(mb->un.varRdLnk.primSeqErrCnt);
+ rps_rsp->invalidXmitWord = cpu_to_be32(mb->un.varRdLnk.invalidXmitWord);
+ rps_rsp->crcCnt = cpu_to_be32(mb->un.varRdLnk.crcCnt);
+ /* Xmit ELS RPS ACC response tag <ulpIoTag> */
+ lpfc_printf_vlog(ndlp->vport, KERN_INFO, LOG_ELS,
+ "0118 Xmit ELS RPS ACC response tag x%x xri x%x, "
+ "did x%x, nlp_flag x%x, nlp_state x%x, rpi x%x\n",
+ elsiocb->iotag, elsiocb->iocb.ulpContext,
+ ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_state,
+ ndlp->nlp_rpi);
+ elsiocb->iocb_cmpl = lpfc_cmpl_els_rsp;
+ phba->fc_stat.elsXmitACC++;
+ if (lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0) == IOCB_ERROR)
+ lpfc_els_free_iocb(phba, elsiocb);
+ return;
+}
+
+/**
+ * lpfc_els_rcv_rls - Process an unsolicited rls iocb
+ * @vport: pointer to a host virtual N_Port data structure.
+ * @cmdiocb: pointer to lpfc command iocb data structure.
+ * @ndlp: pointer to a node-list data structure.
+ *
+ * This routine processes Read Port Status (RPL) IOCB received as an
+ * ELS unsolicited event. It first checks the remote port state. If the
+ * remote port is not in NLP_STE_UNMAPPED_NODE state or NLP_STE_MAPPED_NODE
+ * state, it invokes the lpfc_els_rsl_reject() routine to send the reject
+ * response. Otherwise, it issue the MBX_READ_LNK_STAT mailbox command
+ * for reading the HBA link statistics. It is for the callback function,
+ * lpfc_els_rsp_rls_acc(), set to the MBX_READ_LNK_STAT mailbox command
+ * to actually sending out RPL Accept (ACC) response.
+ *
+ * Return codes
+ * 0 - Successfully processed rls iocb (currently always return 0)
+ **/
+static int
+lpfc_els_rcv_rls(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb,
+ struct lpfc_nodelist *ndlp)
+{
+ struct lpfc_hba *phba = vport->phba;
+ LPFC_MBOXQ_t *mbox;
+ struct lpfc_dmabuf *pcmd;
+ struct ls_rjt stat;
+
+ if ((ndlp->nlp_state != NLP_STE_UNMAPPED_NODE) &&
+ (ndlp->nlp_state != NLP_STE_MAPPED_NODE))
+ /* reject the unsolicited RPS request and done with it */
+ goto reject_out;
+
+ pcmd = (struct lpfc_dmabuf *) cmdiocb->context2;
+
+ mbox = mempool_alloc(phba->mbox_mem_pool, GFP_ATOMIC);
+ if (mbox) {
+ lpfc_read_lnk_stat(phba, mbox);
+ mbox->context1 = (void *)((unsigned long)
+ ((cmdiocb->iocb.unsli3.rcvsli3.ox_id << 16) |
+ cmdiocb->iocb.ulpContext)); /* rx_id */
+ mbox->context2 = lpfc_nlp_get(ndlp);
+ mbox->vport = vport;
+ mbox->mbox_cmpl = lpfc_els_rsp_rls_acc;
+ if (lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT)
+ != MBX_NOT_FINISHED)
+ /* Mbox completion will send ELS Response */
+ return 0;
+ /* Decrement reference count used for the failed mbox
+ * command.
+ */
+ lpfc_nlp_put(ndlp);
+ mempool_free(mbox, phba->mbox_mem_pool);
+ }
+reject_out:
+ /* issue rejection response */
+ stat.un.b.lsRjtRsvd0 = 0;
+ stat.un.b.lsRjtRsnCode = LSRJT_UNABLE_TPC;
+ stat.un.b.lsRjtRsnCodeExp = LSEXP_CANT_GIVE_DATA;
+ stat.un.b.vendorUnique = 0;
+ lpfc_els_rsp_reject(vport, stat.un.lsRjtError, cmdiocb, ndlp, NULL);
+ return 0;
+}
+
+/**
+ * lpfc_els_rcv_rtv - Process an unsolicited rtv iocb
+ * @vport: pointer to a host virtual N_Port data structure.
+ * @cmdiocb: pointer to lpfc command iocb data structure.
+ * @ndlp: pointer to a node-list data structure.
+ *
+ * This routine processes Read Timout Value (RTV) IOCB received as an
+ * ELS unsolicited event. It first checks the remote port state. If the
+ * remote port is not in NLP_STE_UNMAPPED_NODE state or NLP_STE_MAPPED_NODE
+ * state, it invokes the lpfc_els_rsl_reject() routine to send the reject
+ * response. Otherwise, it sends the Accept(ACC) response to a Read Timeout
+ * Value (RTV) unsolicited IOCB event.
+ *
+ * Note that, in lpfc_prep_els_iocb() routine, the reference count of ndlp
+ * will be incremented by 1 for holding the ndlp and the reference to ndlp
+ * will be stored into the context1 field of the IOCB for the completion
+ * callback function to the RPS Accept Response ELS IOCB command.
+ *
+ * Return codes
+ * 0 - Successfully processed rtv iocb (currently always return 0)
+ **/
+static int
+lpfc_els_rcv_rtv(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb,
+ struct lpfc_nodelist *ndlp)
+{
+ struct lpfc_hba *phba = vport->phba;
+ struct ls_rjt stat;
+ struct RTV_RSP *rtv_rsp;
+ uint8_t *pcmd;
+ struct lpfc_iocbq *elsiocb;
+ uint32_t cmdsize;
+
+
+ if ((ndlp->nlp_state != NLP_STE_UNMAPPED_NODE) &&
+ (ndlp->nlp_state != NLP_STE_MAPPED_NODE))
+ /* reject the unsolicited RPS request and done with it */
+ goto reject_out;
+
+ cmdsize = sizeof(struct RTV_RSP) + sizeof(uint32_t);
+ elsiocb = lpfc_prep_els_iocb(phba->pport, 0, cmdsize,
+ lpfc_max_els_tries, ndlp,
+ ndlp->nlp_DID, ELS_CMD_ACC);
+
+ if (!elsiocb)
+ return 1;
+
+ pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt);
+ *((uint32_t *) (pcmd)) = ELS_CMD_ACC;
+ pcmd += sizeof(uint32_t); /* Skip past command */
+
+ /* use the command's xri in the response */
+ elsiocb->iocb.ulpContext = cmdiocb->iocb.ulpContext; /* Xri / rx_id */
+ elsiocb->iocb.unsli3.rcvsli3.ox_id = cmdiocb->iocb.unsli3.rcvsli3.ox_id;
+
+ rtv_rsp = (struct RTV_RSP *)pcmd;
+
+ /* populate RTV payload */
+ rtv_rsp->ratov = cpu_to_be32(phba->fc_ratov * 1000); /* report msecs */
+ rtv_rsp->edtov = cpu_to_be32(phba->fc_edtov);
+ bf_set(qtov_edtovres, rtv_rsp, phba->fc_edtovResol ? 1 : 0);
+ bf_set(qtov_rttov, rtv_rsp, 0); /* Field is for FC ONLY */
+ rtv_rsp->qtov = cpu_to_be32(rtv_rsp->qtov);
+
+ /* Xmit ELS RLS ACC response tag <ulpIoTag> */
+ lpfc_printf_vlog(ndlp->vport, KERN_INFO, LOG_ELS,
+ "2875 Xmit ELS RTV ACC response tag x%x xri x%x, "
+ "did x%x, nlp_flag x%x, nlp_state x%x, rpi x%x, "
+ "Data: x%x x%x x%x\n",
+ elsiocb->iotag, elsiocb->iocb.ulpContext,
+ ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_state,
+ ndlp->nlp_rpi,
+ rtv_rsp->ratov, rtv_rsp->edtov, rtv_rsp->qtov);
+ elsiocb->iocb_cmpl = lpfc_cmpl_els_rsp;
+ phba->fc_stat.elsXmitACC++;
+ if (lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0) == IOCB_ERROR)
+ lpfc_els_free_iocb(phba, elsiocb);
+ return 0;
+
+reject_out:
+ /* issue rejection response */
+ stat.un.b.lsRjtRsvd0 = 0;
+ stat.un.b.lsRjtRsnCode = LSRJT_UNABLE_TPC;
+ stat.un.b.lsRjtRsnCodeExp = LSEXP_CANT_GIVE_DATA;
+ stat.un.b.vendorUnique = 0;
+ lpfc_els_rsp_reject(vport, stat.un.lsRjtError, cmdiocb, ndlp, NULL);
+ return 0;
+}
+
+/* lpfc_els_rcv_rps - Process an unsolicited rps iocb
+ * @vport: pointer to a host virtual N_Port data structure.
+ * @cmdiocb: pointer to lpfc command iocb data structure.
+ * @ndlp: pointer to a node-list data structure.
+ *
+ * This routine processes Read Port Status (RPS) IOCB received as an
+ * ELS unsolicited event. It first checks the remote port state. If the
+ * remote port is not in NLP_STE_UNMAPPED_NODE state or NLP_STE_MAPPED_NODE
+ * state, it invokes the lpfc_els_rsp_reject() routine to send the reject
+ * response. Otherwise, it issue the MBX_READ_LNK_STAT mailbox command
+ * for reading the HBA link statistics. It is for the callback function,
+ * lpfc_els_rsp_rps_acc(), set to the MBX_READ_LNK_STAT mailbox command
+ * to actually sending out RPS Accept (ACC) response.
+ *
+ * Return codes
+ * 0 - Successfully processed rps iocb (currently always return 0)
+ **/
+static int
+lpfc_els_rcv_rps(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb,
+ struct lpfc_nodelist *ndlp)
+{
+ struct lpfc_hba *phba = vport->phba;
+ uint32_t *lp;
+ uint8_t flag;
+ LPFC_MBOXQ_t *mbox;
+ struct lpfc_dmabuf *pcmd;
+ RPS *rps;
+ struct ls_rjt stat;
+
+ if ((ndlp->nlp_state != NLP_STE_UNMAPPED_NODE) &&
+ (ndlp->nlp_state != NLP_STE_MAPPED_NODE))
+ /* reject the unsolicited RPS request and done with it */
+ goto reject_out;
+
+ pcmd = (struct lpfc_dmabuf *) cmdiocb->context2;
+ lp = (uint32_t *) pcmd->virt;
+ flag = (be32_to_cpu(*lp++) & 0xf);
+ rps = (RPS *) lp;
+
+ if ((flag == 0) ||
+ ((flag == 1) && (be32_to_cpu(rps->un.portNum) == 0)) ||
+ ((flag == 2) && (memcmp(&rps->un.portName, &vport->fc_portname,
+ sizeof(struct lpfc_name)) == 0))) {
+
+ printk("Fix me....\n");
+ dump_stack();
+ mbox = mempool_alloc(phba->mbox_mem_pool, GFP_ATOMIC);
+ if (mbox) {
+ lpfc_read_lnk_stat(phba, mbox);
+ mbox->context1 = (void *)((unsigned long)
+ ((cmdiocb->iocb.unsli3.rcvsli3.ox_id << 16) |
+ cmdiocb->iocb.ulpContext)); /* rx_id */
+ mbox->context2 = lpfc_nlp_get(ndlp);
+ mbox->vport = vport;
+ mbox->mbox_cmpl = lpfc_els_rsp_rps_acc;
+ if (lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT)
+ != MBX_NOT_FINISHED)
+ /* Mbox completion will send ELS Response */
+ return 0;
+ /* Decrement reference count used for the failed mbox
+ * command.
+ */
+ lpfc_nlp_put(ndlp);
+ mempool_free(mbox, phba->mbox_mem_pool);
+ }
+ }
+
+reject_out:
+ /* issue rejection response */
+ stat.un.b.lsRjtRsvd0 = 0;
+ stat.un.b.lsRjtRsnCode = LSRJT_UNABLE_TPC;
+ stat.un.b.lsRjtRsnCodeExp = LSEXP_CANT_GIVE_DATA;
+ stat.un.b.vendorUnique = 0;
+ lpfc_els_rsp_reject(vport, stat.un.lsRjtError, cmdiocb, ndlp, NULL);
+ return 0;
+}
+
+/* lpfc_issue_els_rrq - Process an unsolicited rps iocb
+ * @vport: pointer to a host virtual N_Port data structure.
+ * @ndlp: pointer to a node-list data structure.
+ * @did: DID of the target.
+ * @rrq: Pointer to the rrq struct.
+ *
+ * Build a ELS RRQ command and send it to the target. If the issue_iocb is
+ * Successful the the completion handler will clear the RRQ.
+ *
+ * Return codes
+ * 0 - Successfully sent rrq els iocb.
+ * 1 - Failed to send rrq els iocb.
+ **/
+static int
+lpfc_issue_els_rrq(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
+ uint32_t did, struct lpfc_node_rrq *rrq)
+{
+ struct lpfc_hba *phba = vport->phba;
+ struct RRQ *els_rrq;
+ IOCB_t *icmd;
+ struct lpfc_iocbq *elsiocb;
+ uint8_t *pcmd;
+ uint16_t cmdsize;
+ int ret;
+
+
+ if (ndlp != rrq->ndlp)
+ ndlp = rrq->ndlp;
+ if (!ndlp || !NLP_CHK_NODE_ACT(ndlp))
+ return 1;
+
+ /* If ndlp is not NULL, we will bump the reference count on it */
+ cmdsize = (sizeof(uint32_t) + sizeof(struct RRQ));
+ elsiocb = lpfc_prep_els_iocb(vport, 1, cmdsize, 0, ndlp, did,
+ ELS_CMD_RRQ);
+ if (!elsiocb)
+ return 1;
+
+ icmd = &elsiocb->iocb;
+ pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt);
+
+ /* For RRQ request, remainder of payload is Exchange IDs */
+ *((uint32_t *) (pcmd)) = ELS_CMD_RRQ;
+ pcmd += sizeof(uint32_t);
+ els_rrq = (struct RRQ *) pcmd;
+
+ bf_set(rrq_oxid, els_rrq, phba->sli4_hba.xri_ids[rrq->xritag]);
+ bf_set(rrq_rxid, els_rrq, rrq->rxid);
+ bf_set(rrq_did, els_rrq, vport->fc_myDID);
+ els_rrq->rrq = cpu_to_be32(els_rrq->rrq);
+ els_rrq->rrq_exchg = cpu_to_be32(els_rrq->rrq_exchg);
+
+
+ lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD,
+ "Issue RRQ: did:x%x",
+ did, rrq->xritag, rrq->rxid);
+ elsiocb->context_un.rrq = rrq;
+ elsiocb->iocb_cmpl = lpfc_cmpl_els_rrq;
+ ret = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0);
+
+ if (ret == IOCB_ERROR) {
+ lpfc_els_free_iocb(phba, elsiocb);
+ return 1;
+ }
+ return 0;
+}
+
+/**
+ * lpfc_send_rrq - Sends ELS RRQ if needed.
+ * @phba: pointer to lpfc hba data structure.
+ * @rrq: pointer to the active rrq.
+ *
+ * This routine will call the lpfc_issue_els_rrq if the rrq is
+ * still active for the xri. If this function returns a failure then
+ * the caller needs to clean up the RRQ by calling lpfc_clr_active_rrq.
+ *
+ * Returns 0 Success.
+ * 1 Failure.
+ **/
+int
+lpfc_send_rrq(struct lpfc_hba *phba, struct lpfc_node_rrq *rrq)
+{
+ struct lpfc_nodelist *ndlp = lpfc_findnode_did(rrq->vport,
+ rrq->nlp_DID);
+ if (lpfc_test_rrq_active(phba, ndlp, rrq->xritag))
+ return lpfc_issue_els_rrq(rrq->vport, ndlp,
+ rrq->nlp_DID, rrq);
+ else
+ return 1;
+}
+
+/**
+ * lpfc_els_rsp_rpl_acc - Issue an accept rpl els command
+ * @vport: pointer to a host virtual N_Port data structure.
+ * @cmdsize: size of the ELS command.
+ * @oldiocb: pointer to the original lpfc command iocb data structure.
+ * @ndlp: pointer to a node-list data structure.
+ *
+ * This routine issuees an Accept (ACC) Read Port List (RPL) ELS command.
+ * It is to be called by the lpfc_els_rcv_rpl() routine to accept the RPL.
+ *
+ * Note that, in lpfc_prep_els_iocb() routine, the reference count of ndlp
+ * will be incremented by 1 for holding the ndlp and the reference to ndlp
+ * will be stored into the context1 field of the IOCB for the completion
+ * callback function to the RPL Accept Response ELS command.
+ *
+ * Return code
+ * 0 - Successfully issued ACC RPL ELS command
+ * 1 - Failed to issue ACC RPL ELS command
+ **/
+static int
+lpfc_els_rsp_rpl_acc(struct lpfc_vport *vport, uint16_t cmdsize,
+ struct lpfc_iocbq *oldiocb, struct lpfc_nodelist *ndlp)
+{
+ struct lpfc_hba *phba = vport->phba;
+ IOCB_t *icmd, *oldcmd;
+ RPL_RSP rpl_rsp;
+ struct lpfc_iocbq *elsiocb;
+ uint8_t *pcmd;
+
+ elsiocb = lpfc_prep_els_iocb(vport, 0, cmdsize, oldiocb->retry, ndlp,
+ ndlp->nlp_DID, ELS_CMD_ACC);
+
+ if (!elsiocb)
+ return 1;
+
+ icmd = &elsiocb->iocb;
+ oldcmd = &oldiocb->iocb;
+ icmd->ulpContext = oldcmd->ulpContext; /* Xri / rx_id */
+ icmd->unsli3.rcvsli3.ox_id = oldcmd->unsli3.rcvsli3.ox_id;
+
+ pcmd = (((struct lpfc_dmabuf *) elsiocb->context2)->virt);
+ *((uint32_t *) (pcmd)) = ELS_CMD_ACC;
+ pcmd += sizeof(uint16_t);
+ *((uint16_t *)(pcmd)) = be16_to_cpu(cmdsize);
+ pcmd += sizeof(uint16_t);
+
+ /* Setup the RPL ACC payload */
+ rpl_rsp.listLen = be32_to_cpu(1);
+ rpl_rsp.index = 0;
+ rpl_rsp.port_num_blk.portNum = 0;
+ rpl_rsp.port_num_blk.portID = be32_to_cpu(vport->fc_myDID);
+ memcpy(&rpl_rsp.port_num_blk.portName, &vport->fc_portname,
+ sizeof(struct lpfc_name));
+ memcpy(pcmd, &rpl_rsp, cmdsize - sizeof(uint32_t));
+ /* Xmit ELS RPL ACC response tag <ulpIoTag> */
+ lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
+ "0120 Xmit ELS RPL ACC response tag x%x "
+ "xri x%x, did x%x, nlp_flag x%x, nlp_state x%x, "
+ "rpi x%x\n",
+ elsiocb->iotag, elsiocb->iocb.ulpContext,
+ ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_state,
+ ndlp->nlp_rpi);
+ elsiocb->iocb_cmpl = lpfc_cmpl_els_rsp;
+ phba->fc_stat.elsXmitACC++;
+ if (lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0) ==
+ IOCB_ERROR) {
+ lpfc_els_free_iocb(phba, elsiocb);
+ return 1;
+ }
+ return 0;
+}
+
+/**
+ * lpfc_els_rcv_rpl - Process an unsolicited rpl iocb
+ * @vport: pointer to a host virtual N_Port data structure.
+ * @cmdiocb: pointer to lpfc command iocb data structure.
+ * @ndlp: pointer to a node-list data structure.
+ *
+ * This routine processes Read Port List (RPL) IOCB received as an ELS
+ * unsolicited event. It first checks the remote port state. If the remote
+ * port is not in NLP_STE_UNMAPPED_NODE and NLP_STE_MAPPED_NODE states, it
+ * invokes the lpfc_els_rsp_reject() routine to send reject response.
+ * Otherwise, this routine then invokes the lpfc_els_rsp_rpl_acc() routine
+ * to accept the RPL.
+ *
+ * Return code
+ * 0 - Successfully processed rpl iocb (currently always return 0)
+ **/
+static int
+lpfc_els_rcv_rpl(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb,
+ struct lpfc_nodelist *ndlp)
+{
+ struct lpfc_dmabuf *pcmd;
+ uint32_t *lp;
+ uint32_t maxsize;
+ uint16_t cmdsize;
+ RPL *rpl;
+ struct ls_rjt stat;
+
+ if ((ndlp->nlp_state != NLP_STE_UNMAPPED_NODE) &&
+ (ndlp->nlp_state != NLP_STE_MAPPED_NODE)) {
+ /* issue rejection response */
+ stat.un.b.lsRjtRsvd0 = 0;
+ stat.un.b.lsRjtRsnCode = LSRJT_UNABLE_TPC;
+ stat.un.b.lsRjtRsnCodeExp = LSEXP_CANT_GIVE_DATA;
+ stat.un.b.vendorUnique = 0;
+ lpfc_els_rsp_reject(vport, stat.un.lsRjtError, cmdiocb, ndlp,
+ NULL);
+ /* rejected the unsolicited RPL request and done with it */
+ return 0;
+ }
+
+ pcmd = (struct lpfc_dmabuf *) cmdiocb->context2;
+ lp = (uint32_t *) pcmd->virt;
+ rpl = (RPL *) (lp + 1);
+ maxsize = be32_to_cpu(rpl->maxsize);
+
+ /* We support only one port */
+ if ((rpl->index == 0) &&
+ ((maxsize == 0) ||
+ ((maxsize * sizeof(uint32_t)) >= sizeof(RPL_RSP)))) {
+ cmdsize = sizeof(uint32_t) + sizeof(RPL_RSP);
+ } else {
+ cmdsize = sizeof(uint32_t) + maxsize * sizeof(uint32_t);
+ }
+ lpfc_els_rsp_rpl_acc(vport, cmdsize, cmdiocb, ndlp);
+
+ return 0;
+}
+
+/**
+ * lpfc_els_rcv_farp - Process an unsolicited farp request els command
+ * @vport: pointer to a virtual N_Port data structure.
+ * @cmdiocb: pointer to lpfc command iocb data structure.
+ * @ndlp: pointer to a node-list data structure.
+ *
+ * This routine processes Fibre Channel Address Resolution Protocol
+ * (FARP) Request IOCB received as an ELS unsolicited event. Currently,
+ * the lpfc driver only supports matching on WWPN or WWNN for FARP. As such,
+ * FARP_MATCH_PORT flag and FARP_MATCH_NODE flag are checked against the
+ * Match Flag in the FARP request IOCB: if FARP_MATCH_PORT flag is set, the
+ * remote PortName is compared against the FC PortName stored in the @vport
+ * data structure; if FARP_MATCH_NODE flag is set, the remote NodeName is
+ * compared against the FC NodeName stored in the @vport data structure.
+ * If any of these matches and the FARP_REQUEST_FARPR flag is set in the
+ * FARP request IOCB Response Flag, the lpfc_issue_els_farpr() routine is
+ * invoked to send out FARP Response to the remote node. Before sending the
+ * FARP Response, however, the FARP_REQUEST_PLOGI flag is check in the FARP
+ * request IOCB Response Flag and, if it is set, the lpfc_issue_els_plogi()
+ * routine is invoked to log into the remote port first.
+ *
+ * Return code
+ * 0 - Either the FARP Match Mode not supported or successfully processed
+ **/
+static int
+lpfc_els_rcv_farp(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb,
+ struct lpfc_nodelist *ndlp)
+{
+ struct lpfc_dmabuf *pcmd;
+ uint32_t *lp;
+ IOCB_t *icmd;
+ FARP *fp;
+ uint32_t cmd, cnt, did;
+
+ icmd = &cmdiocb->iocb;
+ did = icmd->un.elsreq64.remoteID;
+ pcmd = (struct lpfc_dmabuf *) cmdiocb->context2;
+ lp = (uint32_t *) pcmd->virt;
+
+ cmd = *lp++;
+ fp = (FARP *) lp;
+ /* FARP-REQ received from DID <did> */
+ lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
+ "0601 FARP-REQ received from DID x%x\n", did);
+ /* We will only support match on WWPN or WWNN */
+ if (fp->Mflags & ~(FARP_MATCH_NODE | FARP_MATCH_PORT)) {
+ return 0;
+ }
+
+ cnt = 0;
+ /* If this FARP command is searching for my portname */
+ if (fp->Mflags & FARP_MATCH_PORT) {
+ if (memcmp(&fp->RportName, &vport->fc_portname,
+ sizeof(struct lpfc_name)) == 0)
+ cnt = 1;
+ }
+
+ /* If this FARP command is searching for my nodename */
+ if (fp->Mflags & FARP_MATCH_NODE) {
+ if (memcmp(&fp->RnodeName, &vport->fc_nodename,
+ sizeof(struct lpfc_name)) == 0)
+ cnt = 1;
+ }
+
+ if (cnt) {
+ if ((ndlp->nlp_state == NLP_STE_UNMAPPED_NODE) ||
+ (ndlp->nlp_state == NLP_STE_MAPPED_NODE)) {
+ /* Log back into the node before sending the FARP. */
+ if (fp->Rflags & FARP_REQUEST_PLOGI) {
+ ndlp->nlp_prev_state = ndlp->nlp_state;
+ lpfc_nlp_set_state(vport, ndlp,
+ NLP_STE_PLOGI_ISSUE);
+ lpfc_issue_els_plogi(vport, ndlp->nlp_DID, 0);
+ }
+
+ /* Send a FARP response to that node */
+ if (fp->Rflags & FARP_REQUEST_FARPR)
+ lpfc_issue_els_farpr(vport, did, 0);
+ }
+ }
+ return 0;
+}
+
+/**
+ * lpfc_els_rcv_farpr - Process an unsolicited farp response iocb
+ * @vport: pointer to a host virtual N_Port data structure.
+ * @cmdiocb: pointer to lpfc command iocb data structure.
+ * @ndlp: pointer to a node-list data structure.
+ *
+ * This routine processes Fibre Channel Address Resolution Protocol
+ * Response (FARPR) IOCB received as an ELS unsolicited event. It simply
+ * invokes the lpfc_els_rsp_acc() routine to the remote node to accept
+ * the FARP response request.
+ *
+ * Return code
+ * 0 - Successfully processed FARPR IOCB (currently always return 0)
+ **/
+static int
+lpfc_els_rcv_farpr(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb,
+ struct lpfc_nodelist *ndlp)
+{
+ struct lpfc_dmabuf *pcmd;
+ uint32_t *lp;
+ IOCB_t *icmd;
+ uint32_t cmd, did;
+
+ icmd = &cmdiocb->iocb;
+ did = icmd->un.elsreq64.remoteID;
+ pcmd = (struct lpfc_dmabuf *) cmdiocb->context2;
+ lp = (uint32_t *) pcmd->virt;
+
+ cmd = *lp++;
+ /* FARP-RSP received from DID <did> */
+ lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
+ "0600 FARP-RSP received from DID x%x\n", did);
+ /* ACCEPT the Farp resp request */
+ lpfc_els_rsp_acc(vport, ELS_CMD_ACC, cmdiocb, ndlp, NULL);
+
+ return 0;
+}
+
+/**
+ * lpfc_els_rcv_fan - Process an unsolicited fan iocb command
+ * @vport: pointer to a host virtual N_Port data structure.
+ * @cmdiocb: pointer to lpfc command iocb data structure.
+ * @fan_ndlp: pointer to a node-list data structure.
+ *
+ * This routine processes a Fabric Address Notification (FAN) IOCB
+ * command received as an ELS unsolicited event. The FAN ELS command will
+ * only be processed on a physical port (i.e., the @vport represents the
+ * physical port). The fabric NodeName and PortName from the FAN IOCB are
+ * compared against those in the phba data structure. If any of those is
+ * different, the lpfc_initial_flogi() routine is invoked to initialize
+ * Fabric Login (FLOGI) to the fabric to start the discover over. Otherwise,
+ * if both of those are identical, the lpfc_issue_fabric_reglogin() routine
+ * is invoked to register login to the fabric.
+ *
+ * Return code
+ * 0 - Successfully processed fan iocb (currently always return 0).
+ **/
+static int
+lpfc_els_rcv_fan(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb,
+ struct lpfc_nodelist *fan_ndlp)
+{
+ struct lpfc_hba *phba = vport->phba;
+ uint32_t *lp;
+ FAN *fp;
+
+ lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, "0265 FAN received\n");
+ lp = (uint32_t *)((struct lpfc_dmabuf *)cmdiocb->context2)->virt;
+ fp = (FAN *) ++lp;
+ /* FAN received; Fan does not have a reply sequence */
+ if ((vport == phba->pport) &&
+ (vport->port_state == LPFC_LOCAL_CFG_LINK)) {
+ if ((memcmp(&phba->fc_fabparam.nodeName, &fp->FnodeName,
+ sizeof(struct lpfc_name))) ||
+ (memcmp(&phba->fc_fabparam.portName, &fp->FportName,
+ sizeof(struct lpfc_name)))) {
+ /* This port has switched fabrics. FLOGI is required */
+ lpfc_issue_init_vfi(vport);
+ } else {
+ /* FAN verified - skip FLOGI */
+ vport->fc_myDID = vport->fc_prevDID;
+ if (phba->sli_rev < LPFC_SLI_REV4)
+ lpfc_issue_fabric_reglogin(vport);
+ else {
+ lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
+ "3138 Need register VFI: (x%x/%x)\n",
+ vport->fc_prevDID, vport->fc_myDID);
+ lpfc_issue_reg_vfi(vport);
+ }
+ }
+ }
+ return 0;
+}
+
+/**
+ * lpfc_els_timeout - Handler funciton to the els timer
+ * @ptr: holder for the timer function associated data.
+ *
+ * This routine is invoked by the ELS timer after timeout. It posts the ELS
+ * timer timeout event by setting the WORKER_ELS_TMO bit to the work port
+ * event bitmap and then invokes the lpfc_worker_wake_up() routine to wake
+ * up the worker thread. It is for the worker thread to invoke the routine
+ * lpfc_els_timeout_handler() to work on the posted event WORKER_ELS_TMO.
+ **/
+void
+lpfc_els_timeout(unsigned long ptr)
+{
+ struct lpfc_vport *vport = (struct lpfc_vport *) ptr;
+ struct lpfc_hba *phba = vport->phba;
+ uint32_t tmo_posted;
+ unsigned long iflag;
+
+ spin_lock_irqsave(&vport->work_port_lock, iflag);
+ tmo_posted = vport->work_port_events & WORKER_ELS_TMO;
+ if ((!tmo_posted) && (!(vport->load_flag & FC_UNLOADING)))
+ vport->work_port_events |= WORKER_ELS_TMO;
+ spin_unlock_irqrestore(&vport->work_port_lock, iflag);
+
+ if ((!tmo_posted) && (!(vport->load_flag & FC_UNLOADING)))
+ lpfc_worker_wake_up(phba);
+ return;
+}
+
+
+/**
+ * lpfc_els_timeout_handler - Process an els timeout event
+ * @vport: pointer to a virtual N_Port data structure.
+ *
+ * This routine is the actual handler function that processes an ELS timeout
+ * event. It walks the ELS ring to get and abort all the IOCBs (except the
+ * ABORT/CLOSE/FARP/FARPR/FDISC), which are associated with the @vport by
+ * invoking the lpfc_sli_issue_abort_iotag() routine.
+ **/
+void
+lpfc_els_timeout_handler(struct lpfc_vport *vport)
+{
+ struct lpfc_hba *phba = vport->phba;
+ struct lpfc_sli_ring *pring;
+ struct lpfc_iocbq *tmp_iocb, *piocb;
+ IOCB_t *cmd = NULL;
+ struct lpfc_dmabuf *pcmd;
+ uint32_t els_command = 0;
+ uint32_t timeout;
+ uint32_t remote_ID = 0xffffffff;
+ LIST_HEAD(abort_list);
+
+
+ timeout = (uint32_t)(phba->fc_ratov << 1);
+
+ pring = &phba->sli.ring[LPFC_ELS_RING];
+ if ((phba->pport->load_flag & FC_UNLOADING))
+ return;
+ spin_lock_irq(&phba->hbalock);
+ if (phba->sli_rev == LPFC_SLI_REV4)
+ spin_lock(&pring->ring_lock);
+
+ if ((phba->pport->load_flag & FC_UNLOADING)) {
+ if (phba->sli_rev == LPFC_SLI_REV4)
+ spin_unlock(&pring->ring_lock);
+ spin_unlock_irq(&phba->hbalock);
+ return;
+ }
+
+ list_for_each_entry_safe(piocb, tmp_iocb, &pring->txcmplq, list) {
+ cmd = &piocb->iocb;
+
+ if ((piocb->iocb_flag & LPFC_IO_LIBDFC) != 0 ||
+ piocb->iocb.ulpCommand == CMD_ABORT_XRI_CN ||
+ piocb->iocb.ulpCommand == CMD_CLOSE_XRI_CN)
+ continue;
+
+ if (piocb->vport != vport)
+ continue;
+
+ pcmd = (struct lpfc_dmabuf *) piocb->context2;
+ if (pcmd)
+ els_command = *(uint32_t *) (pcmd->virt);
+
+ if (els_command == ELS_CMD_FARP ||
+ els_command == ELS_CMD_FARPR ||
+ els_command == ELS_CMD_FDISC)
+ continue;
+
+ if (piocb->drvrTimeout > 0) {
+ if (piocb->drvrTimeout >= timeout)
+ piocb->drvrTimeout -= timeout;
+ else
+ piocb->drvrTimeout = 0;
+ continue;
+ }
+
+ remote_ID = 0xffffffff;
+ if (cmd->ulpCommand != CMD_GEN_REQUEST64_CR)
+ remote_ID = cmd->un.elsreq64.remoteID;
+ else {
+ struct lpfc_nodelist *ndlp;
+ ndlp = __lpfc_findnode_rpi(vport, cmd->ulpContext);
+ if (ndlp && NLP_CHK_NODE_ACT(ndlp))
+ remote_ID = ndlp->nlp_DID;
+ }
+ list_add_tail(&piocb->dlist, &abort_list);
+ }
+ if (phba->sli_rev == LPFC_SLI_REV4)
+ spin_unlock(&pring->ring_lock);
+ spin_unlock_irq(&phba->hbalock);
+
+ list_for_each_entry_safe(piocb, tmp_iocb, &abort_list, dlist) {
+ cmd = &piocb->iocb;
+ lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
+ "0127 ELS timeout Data: x%x x%x x%x "
+ "x%x\n", els_command,
+ remote_ID, cmd->ulpCommand, cmd->ulpIoTag);
+ spin_lock_irq(&phba->hbalock);
+ list_del_init(&piocb->dlist);
+ lpfc_sli_issue_abort_iotag(phba, pring, piocb);
+ spin_unlock_irq(&phba->hbalock);
+ }
+
+ if (!list_empty(&phba->sli.ring[LPFC_ELS_RING].txcmplq))
+ if (!(phba->pport->load_flag & FC_UNLOADING))
+ mod_timer(&vport->els_tmofunc,
+ jiffies + msecs_to_jiffies(1000 * timeout));
+}
+
+/**
+ * lpfc_els_flush_cmd - Clean up the outstanding els commands to a vport
+ * @vport: pointer to a host virtual N_Port data structure.
+ *
+ * This routine is used to clean up all the outstanding ELS commands on a
+ * @vport. It first aborts the @vport by invoking lpfc_fabric_abort_vport()
+ * routine. After that, it walks the ELS transmit queue to remove all the
+ * IOCBs with the @vport other than the QUE_RING and ABORT/CLOSE IOCBs. For
+ * the IOCBs with a non-NULL completion callback function, the callback
+ * function will be invoked with the status set to IOSTAT_LOCAL_REJECT and
+ * un.ulpWord[4] set to IOERR_SLI_ABORTED. For IOCBs with a NULL completion
+ * callback function, the IOCB will simply be released. Finally, it walks
+ * the ELS transmit completion queue to issue an abort IOCB to any transmit
+ * completion queue IOCB that is associated with the @vport and is not
+ * an IOCB from libdfc (i.e., the management plane IOCBs that are not
+ * part of the discovery state machine) out to HBA by invoking the
+ * lpfc_sli_issue_abort_iotag() routine. Note that this function issues the
+ * abort IOCB to any transmit completion queueed IOCB, it does not guarantee
+ * the IOCBs are aborted when this function returns.
+ **/
+void
+lpfc_els_flush_cmd(struct lpfc_vport *vport)
+{
+ LIST_HEAD(abort_list);
+ struct lpfc_hba *phba = vport->phba;
+ struct lpfc_sli_ring *pring = &phba->sli.ring[LPFC_ELS_RING];
+ struct lpfc_iocbq *tmp_iocb, *piocb;
+ IOCB_t *cmd = NULL;
+
+ lpfc_fabric_abort_vport(vport);
+ /*
+ * For SLI3, only the hbalock is required. But SLI4 needs to coordinate
+ * with the ring insert operation. Because lpfc_sli_issue_abort_iotag
+ * ultimately grabs the ring_lock, the driver must splice the list into
+ * a working list and release the locks before calling the abort.
+ */
+ spin_lock_irq(&phba->hbalock);
+ if (phba->sli_rev == LPFC_SLI_REV4)
+ spin_lock(&pring->ring_lock);
+
+ list_for_each_entry_safe(piocb, tmp_iocb, &pring->txcmplq, list) {
+ if (piocb->iocb_flag & LPFC_IO_LIBDFC)
+ continue;
+
+ if (piocb->vport != vport)
+ continue;
+ list_add_tail(&piocb->dlist, &abort_list);
+ }
+ if (phba->sli_rev == LPFC_SLI_REV4)
+ spin_unlock(&pring->ring_lock);
+ spin_unlock_irq(&phba->hbalock);
+ /* Abort each iocb on the aborted list and remove the dlist links. */
+ list_for_each_entry_safe(piocb, tmp_iocb, &abort_list, dlist) {
+ spin_lock_irq(&phba->hbalock);
+ list_del_init(&piocb->dlist);
+ lpfc_sli_issue_abort_iotag(phba, pring, piocb);
+ spin_unlock_irq(&phba->hbalock);
+ }
+ if (!list_empty(&abort_list))
+ lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
+ "3387 abort list for txq not empty\n");
+ INIT_LIST_HEAD(&abort_list);
+
+ spin_lock_irq(&phba->hbalock);
+ if (phba->sli_rev == LPFC_SLI_REV4)
+ spin_lock(&pring->ring_lock);
+
+ list_for_each_entry_safe(piocb, tmp_iocb, &pring->txq, list) {
+ cmd = &piocb->iocb;
+
+ if (piocb->iocb_flag & LPFC_IO_LIBDFC) {
+ continue;
+ }
+
+ /* Do not flush out the QUE_RING and ABORT/CLOSE iocbs */
+ if (cmd->ulpCommand == CMD_QUE_RING_BUF_CN ||
+ cmd->ulpCommand == CMD_QUE_RING_BUF64_CN ||
+ cmd->ulpCommand == CMD_CLOSE_XRI_CN ||
+ cmd->ulpCommand == CMD_ABORT_XRI_CN)
+ continue;
+
+ if (piocb->vport != vport)
+ continue;
+
+ list_del_init(&piocb->list);
+ list_add_tail(&piocb->list, &abort_list);
+ }
+ if (phba->sli_rev == LPFC_SLI_REV4)
+ spin_unlock(&pring->ring_lock);
+ spin_unlock_irq(&phba->hbalock);
+
+ /* Cancell all the IOCBs from the completions list */
+ lpfc_sli_cancel_iocbs(phba, &abort_list,
+ IOSTAT_LOCAL_REJECT, IOERR_SLI_ABORTED);
+
+ return;
+}
+
+/**
+ * lpfc_els_flush_all_cmd - Clean up all the outstanding els commands to a HBA
+ * @phba: pointer to lpfc hba data structure.
+ *
+ * This routine is used to clean up all the outstanding ELS commands on a
+ * @phba. It first aborts the @phba by invoking the lpfc_fabric_abort_hba()
+ * routine. After that, it walks the ELS transmit queue to remove all the
+ * IOCBs to the @phba other than the QUE_RING and ABORT/CLOSE IOCBs. For
+ * the IOCBs with the completion callback function associated, the callback
+ * function will be invoked with the status set to IOSTAT_LOCAL_REJECT and
+ * un.ulpWord[4] set to IOERR_SLI_ABORTED. For IOCBs without the completion
+ * callback function associated, the IOCB will simply be released. Finally,
+ * it walks the ELS transmit completion queue to issue an abort IOCB to any
+ * transmit completion queue IOCB that is not an IOCB from libdfc (i.e., the
+ * management plane IOCBs that are not part of the discovery state machine)
+ * out to HBA by invoking the lpfc_sli_issue_abort_iotag() routine.
+ **/
+void
+lpfc_els_flush_all_cmd(struct lpfc_hba *phba)
+{
+ struct lpfc_vport *vport;
+ list_for_each_entry(vport, &phba->port_list, listentry)
+ lpfc_els_flush_cmd(vport);
+
+ return;
+}
+
+/**
+ * lpfc_send_els_failure_event - Posts an ELS command failure event
+ * @phba: Pointer to hba context object.
+ * @cmdiocbp: Pointer to command iocb which reported error.
+ * @rspiocbp: Pointer to response iocb which reported error.
+ *
+ * This function sends an event when there is an ELS command
+ * failure.
+ **/
+void
+lpfc_send_els_failure_event(struct lpfc_hba *phba,
+ struct lpfc_iocbq *cmdiocbp,
+ struct lpfc_iocbq *rspiocbp)
+{
+ struct lpfc_vport *vport = cmdiocbp->vport;
+ struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
+ struct lpfc_lsrjt_event lsrjt_event;
+ struct lpfc_fabric_event_header fabric_event;
+ struct ls_rjt stat;
+ struct lpfc_nodelist *ndlp;
+ uint32_t *pcmd;
+
+ ndlp = cmdiocbp->context1;
+ if (!ndlp || !NLP_CHK_NODE_ACT(ndlp))
+ return;
+
+ if (rspiocbp->iocb.ulpStatus == IOSTAT_LS_RJT) {
+ lsrjt_event.header.event_type = FC_REG_ELS_EVENT;
+ lsrjt_event.header.subcategory = LPFC_EVENT_LSRJT_RCV;
+ memcpy(lsrjt_event.header.wwpn, &ndlp->nlp_portname,
+ sizeof(struct lpfc_name));
+ memcpy(lsrjt_event.header.wwnn, &ndlp->nlp_nodename,
+ sizeof(struct lpfc_name));
+ pcmd = (uint32_t *) (((struct lpfc_dmabuf *)
+ cmdiocbp->context2)->virt);
+ lsrjt_event.command = (pcmd != NULL) ? *pcmd : 0;
+ stat.un.lsRjtError = be32_to_cpu(rspiocbp->iocb.un.ulpWord[4]);
+ lsrjt_event.reason_code = stat.un.b.lsRjtRsnCode;
+ lsrjt_event.explanation = stat.un.b.lsRjtRsnCodeExp;
+ fc_host_post_vendor_event(shost,
+ fc_get_event_number(),
+ sizeof(lsrjt_event),
+ (char *)&lsrjt_event,
+ LPFC_NL_VENDOR_ID);
+ return;
+ }
+ if ((rspiocbp->iocb.ulpStatus == IOSTAT_NPORT_BSY) ||
+ (rspiocbp->iocb.ulpStatus == IOSTAT_FABRIC_BSY)) {
+ fabric_event.event_type = FC_REG_FABRIC_EVENT;
+ if (rspiocbp->iocb.ulpStatus == IOSTAT_NPORT_BSY)
+ fabric_event.subcategory = LPFC_EVENT_PORT_BUSY;
+ else
+ fabric_event.subcategory = LPFC_EVENT_FABRIC_BUSY;
+ memcpy(fabric_event.wwpn, &ndlp->nlp_portname,
+ sizeof(struct lpfc_name));
+ memcpy(fabric_event.wwnn, &ndlp->nlp_nodename,
+ sizeof(struct lpfc_name));
+ fc_host_post_vendor_event(shost,
+ fc_get_event_number(),
+ sizeof(fabric_event),
+ (char *)&fabric_event,
+ LPFC_NL_VENDOR_ID);
+ return;
+ }
+
+}
+
+/**
+ * lpfc_send_els_event - Posts unsolicited els event
+ * @vport: Pointer to vport object.
+ * @ndlp: Pointer FC node object.
+ * @cmd: ELS command code.
+ *
+ * This function posts an event when there is an incoming
+ * unsolicited ELS command.
+ **/
+static void
+lpfc_send_els_event(struct lpfc_vport *vport,
+ struct lpfc_nodelist *ndlp,
+ uint32_t *payload)
+{
+ struct lpfc_els_event_header *els_data = NULL;
+ struct lpfc_logo_event *logo_data = NULL;
+ struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
+
+ if (*payload == ELS_CMD_LOGO) {
+ logo_data = kmalloc(sizeof(struct lpfc_logo_event), GFP_KERNEL);
+ if (!logo_data) {
+ lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
+ "0148 Failed to allocate memory "
+ "for LOGO event\n");
+ return;
+ }
+ els_data = &logo_data->header;
+ } else {
+ els_data = kmalloc(sizeof(struct lpfc_els_event_header),
+ GFP_KERNEL);
+ if (!els_data) {
+ lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
+ "0149 Failed to allocate memory "
+ "for ELS event\n");
+ return;
+ }
+ }
+ els_data->event_type = FC_REG_ELS_EVENT;
+ switch (*payload) {
+ case ELS_CMD_PLOGI:
+ els_data->subcategory = LPFC_EVENT_PLOGI_RCV;
+ break;
+ case ELS_CMD_PRLO:
+ els_data->subcategory = LPFC_EVENT_PRLO_RCV;
+ break;
+ case ELS_CMD_ADISC:
+ els_data->subcategory = LPFC_EVENT_ADISC_RCV;
+ break;
+ case ELS_CMD_LOGO:
+ els_data->subcategory = LPFC_EVENT_LOGO_RCV;
+ /* Copy the WWPN in the LOGO payload */
+ memcpy(logo_data->logo_wwpn, &payload[2],
+ sizeof(struct lpfc_name));
+ break;
+ default:
+ kfree(els_data);
+ return;
+ }
+ memcpy(els_data->wwpn, &ndlp->nlp_portname, sizeof(struct lpfc_name));
+ memcpy(els_data->wwnn, &ndlp->nlp_nodename, sizeof(struct lpfc_name));
+ if (*payload == ELS_CMD_LOGO) {
+ fc_host_post_vendor_event(shost,
+ fc_get_event_number(),
+ sizeof(struct lpfc_logo_event),
+ (char *)logo_data,
+ LPFC_NL_VENDOR_ID);
+ kfree(logo_data);
+ } else {
+ fc_host_post_vendor_event(shost,
+ fc_get_event_number(),
+ sizeof(struct lpfc_els_event_header),
+ (char *)els_data,
+ LPFC_NL_VENDOR_ID);
+ kfree(els_data);
+ }
+
+ return;
+}
+
+
+/**
+ * lpfc_els_unsol_buffer - Process an unsolicited event data buffer
+ * @phba: pointer to lpfc hba data structure.
+ * @pring: pointer to a SLI ring.
+ * @vport: pointer to a host virtual N_Port data structure.
+ * @elsiocb: pointer to lpfc els command iocb data structure.
+ *
+ * This routine is used for processing the IOCB associated with a unsolicited
+ * event. It first determines whether there is an existing ndlp that matches
+ * the DID from the unsolicited IOCB. If not, it will create a new one with
+ * the DID from the unsolicited IOCB. The ELS command from the unsolicited
+ * IOCB is then used to invoke the proper routine and to set up proper state
+ * of the discovery state machine.
+ **/
+static void
+lpfc_els_unsol_buffer(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
+ struct lpfc_vport *vport, struct lpfc_iocbq *elsiocb)
+{
+ struct Scsi_Host *shost;
+ struct lpfc_nodelist *ndlp;
+ struct ls_rjt stat;
+ uint32_t *payload;
+ uint32_t cmd, did, newnode;
+ uint8_t rjt_exp, rjt_err = 0;
+ IOCB_t *icmd = &elsiocb->iocb;
+
+ if (!vport || !(elsiocb->context2))
+ goto dropit;
+
+ newnode = 0;
+ payload = ((struct lpfc_dmabuf *)elsiocb->context2)->virt;
+ cmd = *payload;
+ if ((phba->sli3_options & LPFC_SLI3_HBQ_ENABLED) == 0)
+ lpfc_post_buffer(phba, pring, 1);
+
+ did = icmd->un.rcvels.remoteID;
+ if (icmd->ulpStatus) {
+ lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL,
+ "RCV Unsol ELS: status:x%x/x%x did:x%x",
+ icmd->ulpStatus, icmd->un.ulpWord[4], did);
+ goto dropit;
+ }
+
+ /* Check to see if link went down during discovery */
+ if (lpfc_els_chk_latt(vport))
+ goto dropit;
+
+ /* Ignore traffic received during vport shutdown. */
+ if (vport->load_flag & FC_UNLOADING)
+ goto dropit;
+
+ /* If NPort discovery is delayed drop incoming ELS */
+ if ((vport->fc_flag & FC_DISC_DELAYED) &&
+ (cmd != ELS_CMD_PLOGI))
+ goto dropit;
+
+ ndlp = lpfc_findnode_did(vport, did);
+ if (!ndlp) {
+ /* Cannot find existing Fabric ndlp, so allocate a new one */
+ ndlp = mempool_alloc(phba->nlp_mem_pool, GFP_KERNEL);
+ if (!ndlp)
+ goto dropit;
+
+ lpfc_nlp_init(vport, ndlp, did);
+ lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE);
+ newnode = 1;
+ if ((did & Fabric_DID_MASK) == Fabric_DID_MASK)
+ ndlp->nlp_type |= NLP_FABRIC;
+ } else if (!NLP_CHK_NODE_ACT(ndlp)) {
+ ndlp = lpfc_enable_node(vport, ndlp,
+ NLP_STE_UNUSED_NODE);
+ if (!ndlp)
+ goto dropit;
+ lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE);
+ newnode = 1;
+ if ((did & Fabric_DID_MASK) == Fabric_DID_MASK)
+ ndlp->nlp_type |= NLP_FABRIC;
+ } else if (ndlp->nlp_state == NLP_STE_UNUSED_NODE) {
+ /* This is similar to the new node path */
+ ndlp = lpfc_nlp_get(ndlp);
+ if (!ndlp)
+ goto dropit;
+ lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE);
+ newnode = 1;
+ }
+
+ phba->fc_stat.elsRcvFrame++;
+
+ /*
+ * Do not process any unsolicited ELS commands
+ * if the ndlp is in DEV_LOSS
+ */
+ if (ndlp->nlp_add_flag & NLP_IN_DEV_LOSS)
+ goto dropit;
+
+ elsiocb->context1 = lpfc_nlp_get(ndlp);
+ elsiocb->vport = vport;
+
+ if ((cmd & ELS_CMD_MASK) == ELS_CMD_RSCN) {
+ cmd &= ELS_CMD_MASK;
+ }
+ /* ELS command <elsCmd> received from NPORT <did> */
+ lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
+ "0112 ELS command x%x received from NPORT x%x "
+ "Data: x%x x%x x%x x%x\n",
+ cmd, did, vport->port_state, vport->fc_flag,
+ vport->fc_myDID, vport->fc_prevDID);
+ switch (cmd) {
+ case ELS_CMD_PLOGI:
+ lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL,
+ "RCV PLOGI: did:x%x/ste:x%x flg:x%x",
+ did, vport->port_state, ndlp->nlp_flag);
+
+ phba->fc_stat.elsRcvPLOGI++;
+ ndlp = lpfc_plogi_confirm_nport(phba, payload, ndlp);
+ if (phba->sli_rev == LPFC_SLI_REV4 &&
+ (phba->pport->fc_flag & FC_PT2PT)) {
+ vport->fc_prevDID = vport->fc_myDID;
+ /* Our DID needs to be updated before registering
+ * the vfi. This is done in lpfc_rcv_plogi but
+ * that is called after the reg_vfi.
+ */
+ vport->fc_myDID = elsiocb->iocb.un.rcvels.parmRo;
+ lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
+ "3312 Remote port assigned DID x%x "
+ "%x\n", vport->fc_myDID,
+ vport->fc_prevDID);
+ }
+
+ lpfc_send_els_event(vport, ndlp, payload);
+
+ /* If Nport discovery is delayed, reject PLOGIs */
+ if (vport->fc_flag & FC_DISC_DELAYED) {
+ rjt_err = LSRJT_UNABLE_TPC;
+ rjt_exp = LSEXP_NOTHING_MORE;
+ break;
+ }
+ shost = lpfc_shost_from_vport(vport);
+ if (vport->port_state < LPFC_DISC_AUTH) {
+ if (!(phba->pport->fc_flag & FC_PT2PT) ||
+ (phba->pport->fc_flag & FC_PT2PT_PLOGI)) {
+ rjt_err = LSRJT_UNABLE_TPC;
+ rjt_exp = LSEXP_NOTHING_MORE;
+ break;
+ }
+ /* We get here, and drop thru, if we are PT2PT with
+ * another NPort and the other side has initiated
+ * the PLOGI before responding to our FLOGI.
+ */
+ if (phba->sli_rev == LPFC_SLI_REV4 &&
+ (phba->fc_topology_changed ||
+ vport->fc_myDID != vport->fc_prevDID)) {
+ lpfc_unregister_fcf_prep(phba);
+ spin_lock_irq(shost->host_lock);
+ vport->fc_flag &= ~FC_VFI_REGISTERED;
+ spin_unlock_irq(shost->host_lock);
+ phba->fc_topology_changed = 0;
+ lpfc_issue_reg_vfi(vport);
+ }
+ }
+
+ spin_lock_irq(shost->host_lock);
+ ndlp->nlp_flag &= ~NLP_TARGET_REMOVE;
+ spin_unlock_irq(shost->host_lock);
+
+ lpfc_disc_state_machine(vport, ndlp, elsiocb,
+ NLP_EVT_RCV_PLOGI);
+
+ break;
+ case ELS_CMD_FLOGI:
+ lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL,
+ "RCV FLOGI: did:x%x/ste:x%x flg:x%x",
+ did, vport->port_state, ndlp->nlp_flag);
+
+ phba->fc_stat.elsRcvFLOGI++;
+ lpfc_els_rcv_flogi(vport, elsiocb, ndlp);
+ if (newnode)
+ lpfc_nlp_put(ndlp);
+ break;
+ case ELS_CMD_LOGO:
+ lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL,
+ "RCV LOGO: did:x%x/ste:x%x flg:x%x",
+ did, vport->port_state, ndlp->nlp_flag);
+
+ phba->fc_stat.elsRcvLOGO++;
+ lpfc_send_els_event(vport, ndlp, payload);
+ if (vport->port_state < LPFC_DISC_AUTH) {
+ rjt_err = LSRJT_UNABLE_TPC;
+ rjt_exp = LSEXP_NOTHING_MORE;
+ break;
+ }
+ lpfc_disc_state_machine(vport, ndlp, elsiocb, NLP_EVT_RCV_LOGO);
+ break;
+ case ELS_CMD_PRLO:
+ lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL,
+ "RCV PRLO: did:x%x/ste:x%x flg:x%x",
+ did, vport->port_state, ndlp->nlp_flag);
+
+ phba->fc_stat.elsRcvPRLO++;
+ lpfc_send_els_event(vport, ndlp, payload);
+ if (vport->port_state < LPFC_DISC_AUTH) {
+ rjt_err = LSRJT_UNABLE_TPC;
+ rjt_exp = LSEXP_NOTHING_MORE;
+ break;
+ }
+ lpfc_disc_state_machine(vport, ndlp, elsiocb, NLP_EVT_RCV_PRLO);
+ break;
+ case ELS_CMD_RSCN:
+ phba->fc_stat.elsRcvRSCN++;
+ lpfc_els_rcv_rscn(vport, elsiocb, ndlp);
+ if (newnode)
+ lpfc_nlp_put(ndlp);
+ break;
+ case ELS_CMD_ADISC:
+ lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL,
+ "RCV ADISC: did:x%x/ste:x%x flg:x%x",
+ did, vport->port_state, ndlp->nlp_flag);
+
+ lpfc_send_els_event(vport, ndlp, payload);
+ phba->fc_stat.elsRcvADISC++;
+ if (vport->port_state < LPFC_DISC_AUTH) {
+ rjt_err = LSRJT_UNABLE_TPC;
+ rjt_exp = LSEXP_NOTHING_MORE;
+ break;
+ }
+ lpfc_disc_state_machine(vport, ndlp, elsiocb,
+ NLP_EVT_RCV_ADISC);
+ break;
+ case ELS_CMD_PDISC:
+ lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL,
+ "RCV PDISC: did:x%x/ste:x%x flg:x%x",
+ did, vport->port_state, ndlp->nlp_flag);
+
+ phba->fc_stat.elsRcvPDISC++;
+ if (vport->port_state < LPFC_DISC_AUTH) {
+ rjt_err = LSRJT_UNABLE_TPC;
+ rjt_exp = LSEXP_NOTHING_MORE;
+ break;
+ }
+ lpfc_disc_state_machine(vport, ndlp, elsiocb,
+ NLP_EVT_RCV_PDISC);
+ break;
+ case ELS_CMD_FARPR:
+ lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL,
+ "RCV FARPR: did:x%x/ste:x%x flg:x%x",
+ did, vport->port_state, ndlp->nlp_flag);
+
+ phba->fc_stat.elsRcvFARPR++;
+ lpfc_els_rcv_farpr(vport, elsiocb, ndlp);
+ break;
+ case ELS_CMD_FARP:
+ lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL,
+ "RCV FARP: did:x%x/ste:x%x flg:x%x",
+ did, vport->port_state, ndlp->nlp_flag);
+
+ phba->fc_stat.elsRcvFARP++;
+ lpfc_els_rcv_farp(vport, elsiocb, ndlp);
+ break;
+ case ELS_CMD_FAN:
+ lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL,
+ "RCV FAN: did:x%x/ste:x%x flg:x%x",
+ did, vport->port_state, ndlp->nlp_flag);
+
+ phba->fc_stat.elsRcvFAN++;
+ lpfc_els_rcv_fan(vport, elsiocb, ndlp);
+ break;
+ case ELS_CMD_PRLI:
+ lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL,
+ "RCV PRLI: did:x%x/ste:x%x flg:x%x",
+ did, vport->port_state, ndlp->nlp_flag);
+
+ phba->fc_stat.elsRcvPRLI++;
+ if (vport->port_state < LPFC_DISC_AUTH) {
+ rjt_err = LSRJT_UNABLE_TPC;
+ rjt_exp = LSEXP_NOTHING_MORE;
+ break;
+ }
+ lpfc_disc_state_machine(vport, ndlp, elsiocb, NLP_EVT_RCV_PRLI);
+ break;
+ case ELS_CMD_LIRR:
+ lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL,
+ "RCV LIRR: did:x%x/ste:x%x flg:x%x",
+ did, vport->port_state, ndlp->nlp_flag);
+
+ phba->fc_stat.elsRcvLIRR++;
+ lpfc_els_rcv_lirr(vport, elsiocb, ndlp);
+ if (newnode)
+ lpfc_nlp_put(ndlp);
+ break;
+ case ELS_CMD_RLS:
+ lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL,
+ "RCV RLS: did:x%x/ste:x%x flg:x%x",
+ did, vport->port_state, ndlp->nlp_flag);
+
+ phba->fc_stat.elsRcvRLS++;
+ lpfc_els_rcv_rls(vport, elsiocb, ndlp);
+ if (newnode)
+ lpfc_nlp_put(ndlp);
+ break;
+ case ELS_CMD_RPS:
+ lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL,
+ "RCV RPS: did:x%x/ste:x%x flg:x%x",
+ did, vport->port_state, ndlp->nlp_flag);
+
+ phba->fc_stat.elsRcvRPS++;
+ lpfc_els_rcv_rps(vport, elsiocb, ndlp);
+ if (newnode)
+ lpfc_nlp_put(ndlp);
+ break;
+ case ELS_CMD_RPL:
+ lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL,
+ "RCV RPL: did:x%x/ste:x%x flg:x%x",
+ did, vport->port_state, ndlp->nlp_flag);
+
+ phba->fc_stat.elsRcvRPL++;
+ lpfc_els_rcv_rpl(vport, elsiocb, ndlp);
+ if (newnode)
+ lpfc_nlp_put(ndlp);
+ break;
+ case ELS_CMD_RNID:
+ lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL,
+ "RCV RNID: did:x%x/ste:x%x flg:x%x",
+ did, vport->port_state, ndlp->nlp_flag);
+
+ phba->fc_stat.elsRcvRNID++;
+ lpfc_els_rcv_rnid(vport, elsiocb, ndlp);
+ if (newnode)
+ lpfc_nlp_put(ndlp);
+ break;
+ case ELS_CMD_RTV:
+ lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL,
+ "RCV RTV: did:x%x/ste:x%x flg:x%x",
+ did, vport->port_state, ndlp->nlp_flag);
+ phba->fc_stat.elsRcvRTV++;
+ lpfc_els_rcv_rtv(vport, elsiocb, ndlp);
+ if (newnode)
+ lpfc_nlp_put(ndlp);
+ break;
+ case ELS_CMD_RRQ:
+ lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL,
+ "RCV RRQ: did:x%x/ste:x%x flg:x%x",
+ did, vport->port_state, ndlp->nlp_flag);
+
+ phba->fc_stat.elsRcvRRQ++;
+ lpfc_els_rcv_rrq(vport, elsiocb, ndlp);
+ if (newnode)
+ lpfc_nlp_put(ndlp);
+ break;
+ case ELS_CMD_ECHO:
+ lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL,
+ "RCV ECHO: did:x%x/ste:x%x flg:x%x",
+ did, vport->port_state, ndlp->nlp_flag);
+
+ phba->fc_stat.elsRcvECHO++;
+ lpfc_els_rcv_echo(vport, elsiocb, ndlp);
+ if (newnode)
+ lpfc_nlp_put(ndlp);
+ break;
+ case ELS_CMD_REC:
+ /* receive this due to exchange closed */
+ rjt_err = LSRJT_UNABLE_TPC;
+ rjt_exp = LSEXP_INVALID_OX_RX;
+ break;
+ default:
+ lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL,
+ "RCV ELS cmd: cmd:x%x did:x%x/ste:x%x",
+ cmd, did, vport->port_state);
+
+ /* Unsupported ELS command, reject */
+ rjt_err = LSRJT_CMD_UNSUPPORTED;
+ rjt_exp = LSEXP_NOTHING_MORE;
+
+ /* Unknown ELS command <elsCmd> received from NPORT <did> */
+ lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
+ "0115 Unknown ELS command x%x "
+ "received from NPORT x%x\n", cmd, did);
+ if (newnode)
+ lpfc_nlp_put(ndlp);
+ break;
+ }
+
+ /* check if need to LS_RJT received ELS cmd */
+ if (rjt_err) {
+ memset(&stat, 0, sizeof(stat));
+ stat.un.b.lsRjtRsnCode = rjt_err;
+ stat.un.b.lsRjtRsnCodeExp = rjt_exp;
+ lpfc_els_rsp_reject(vport, stat.un.lsRjtError, elsiocb, ndlp,
+ NULL);
+ }
+
+ lpfc_nlp_put(elsiocb->context1);
+ elsiocb->context1 = NULL;
+ return;
+
+dropit:
+ if (vport && !(vport->load_flag & FC_UNLOADING))
+ lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
+ "0111 Dropping received ELS cmd "
+ "Data: x%x x%x x%x\n",
+ icmd->ulpStatus, icmd->un.ulpWord[4], icmd->ulpTimeout);
+ phba->fc_stat.elsRcvDrop++;
+}
+
+/**
+ * lpfc_els_unsol_event - Process an unsolicited event from an els sli ring
+ * @phba: pointer to lpfc hba data structure.
+ * @pring: pointer to a SLI ring.
+ * @elsiocb: pointer to lpfc els iocb data structure.
+ *
+ * This routine is used to process an unsolicited event received from a SLI
+ * (Service Level Interface) ring. The actual processing of the data buffer
+ * associated with the unsolicited event is done by invoking the routine
+ * lpfc_els_unsol_buffer() after properly set up the iocb buffer from the
+ * SLI ring on which the unsolicited event was received.
+ **/
+void
+lpfc_els_unsol_event(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
+ struct lpfc_iocbq *elsiocb)
+{
+ struct lpfc_vport *vport = phba->pport;
+ IOCB_t *icmd = &elsiocb->iocb;
+ dma_addr_t paddr;
+ struct lpfc_dmabuf *bdeBuf1 = elsiocb->context2;
+ struct lpfc_dmabuf *bdeBuf2 = elsiocb->context3;
+
+ elsiocb->context1 = NULL;
+ elsiocb->context2 = NULL;
+ elsiocb->context3 = NULL;
+
+ if (icmd->ulpStatus == IOSTAT_NEED_BUFFER) {
+ lpfc_sli_hbqbuf_add_hbqs(phba, LPFC_ELS_HBQ);
+ } else if (icmd->ulpStatus == IOSTAT_LOCAL_REJECT &&
+ (icmd->un.ulpWord[4] & IOERR_PARAM_MASK) ==
+ IOERR_RCV_BUFFER_WAITING) {
+ phba->fc_stat.NoRcvBuf++;
+ /* Not enough posted buffers; Try posting more buffers */
+ if (!(phba->sli3_options & LPFC_SLI3_HBQ_ENABLED))
+ lpfc_post_buffer(phba, pring, 0);
+ return;
+ }
+
+ if ((phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) &&
+ (icmd->ulpCommand == CMD_IOCB_RCV_ELS64_CX ||
+ icmd->ulpCommand == CMD_IOCB_RCV_SEQ64_CX)) {
+ if (icmd->unsli3.rcvsli3.vpi == 0xffff)
+ vport = phba->pport;
+ else
+ vport = lpfc_find_vport_by_vpid(phba,
+ icmd->unsli3.rcvsli3.vpi);
+ }
+
+ /* If there are no BDEs associated
+ * with this IOCB, there is nothing to do.
+ */
+ if (icmd->ulpBdeCount == 0)
+ return;
+
+ /* type of ELS cmd is first 32bit word
+ * in packet
+ */
+ if (phba->sli3_options & LPFC_SLI3_HBQ_ENABLED) {
+ elsiocb->context2 = bdeBuf1;
+ } else {
+ paddr = getPaddr(icmd->un.cont64[0].addrHigh,
+ icmd->un.cont64[0].addrLow);
+ elsiocb->context2 = lpfc_sli_ringpostbuf_get(phba, pring,
+ paddr);
+ }
+
+ lpfc_els_unsol_buffer(phba, pring, vport, elsiocb);
+ /*
+ * The different unsolicited event handlers would tell us
+ * if they are done with "mp" by setting context2 to NULL.
+ */
+ if (elsiocb->context2) {
+ lpfc_in_buf_free(phba, (struct lpfc_dmabuf *)elsiocb->context2);
+ elsiocb->context2 = NULL;
+ }
+
+ /* RCV_ELS64_CX provide for 2 BDEs - process 2nd if included */
+ if ((phba->sli3_options & LPFC_SLI3_HBQ_ENABLED) &&
+ icmd->ulpBdeCount == 2) {
+ elsiocb->context2 = bdeBuf2;
+ lpfc_els_unsol_buffer(phba, pring, vport, elsiocb);
+ /* free mp if we are done with it */
+ if (elsiocb->context2) {
+ lpfc_in_buf_free(phba, elsiocb->context2);
+ elsiocb->context2 = NULL;
+ }
+ }
+}
+
+/**
+ * lpfc_do_scr_ns_plogi - Issue a plogi to the name server for scr
+ * @phba: pointer to lpfc hba data structure.
+ * @vport: pointer to a virtual N_Port data structure.
+ *
+ * This routine issues a Port Login (PLOGI) to the Name Server with
+ * State Change Request (SCR) for a @vport. This routine will create an
+ * ndlp for the Name Server associated to the @vport if such node does
+ * not already exist. The PLOGI to Name Server is issued by invoking the
+ * lpfc_issue_els_plogi() routine. If Fabric-Device Management Interface
+ * (FDMI) is configured to the @vport, a FDMI node will be created and
+ * the PLOGI to FDMI is issued by invoking lpfc_issue_els_plogi() routine.
+ **/
+void
+lpfc_do_scr_ns_plogi(struct lpfc_hba *phba, struct lpfc_vport *vport)
+{
+ struct lpfc_nodelist *ndlp, *ndlp_fdmi;
+ struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
+
+ /*
+ * If lpfc_delay_discovery parameter is set and the clean address
+ * bit is cleared and fc fabric parameters chenged, delay FC NPort
+ * discovery.
+ */
+ spin_lock_irq(shost->host_lock);
+ if (vport->fc_flag & FC_DISC_DELAYED) {
+ spin_unlock_irq(shost->host_lock);
+ lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY,
+ "3334 Delay fc port discovery for %d seconds\n",
+ phba->fc_ratov);
+ mod_timer(&vport->delayed_disc_tmo,
+ jiffies + msecs_to_jiffies(1000 * phba->fc_ratov));
+ return;
+ }
+ spin_unlock_irq(shost->host_lock);
+
+ ndlp = lpfc_findnode_did(vport, NameServer_DID);
+ if (!ndlp) {
+ ndlp = mempool_alloc(phba->nlp_mem_pool, GFP_KERNEL);
+ if (!ndlp) {
+ if (phba->fc_topology == LPFC_TOPOLOGY_LOOP) {
+ lpfc_disc_start(vport);
+ return;
+ }
+ lpfc_vport_set_state(vport, FC_VPORT_FAILED);
+ lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
+ "0251 NameServer login: no memory\n");
+ return;
+ }
+ lpfc_nlp_init(vport, ndlp, NameServer_DID);
+ } else if (!NLP_CHK_NODE_ACT(ndlp)) {
+ ndlp = lpfc_enable_node(vport, ndlp, NLP_STE_UNUSED_NODE);
+ if (!ndlp) {
+ if (phba->fc_topology == LPFC_TOPOLOGY_LOOP) {
+ lpfc_disc_start(vport);
+ return;
+ }
+ lpfc_vport_set_state(vport, FC_VPORT_FAILED);
+ lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
+ "0348 NameServer login: node freed\n");
+ return;
+ }
+ }
+ ndlp->nlp_type |= NLP_FABRIC;
+
+ lpfc_nlp_set_state(vport, ndlp, NLP_STE_PLOGI_ISSUE);
+
+ if (lpfc_issue_els_plogi(vport, ndlp->nlp_DID, 0)) {
+ lpfc_vport_set_state(vport, FC_VPORT_FAILED);
+ lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
+ "0252 Cannot issue NameServer login\n");
+ return;
+ }
+
+ if (vport->cfg_fdmi_on & LPFC_FDMI_SUPPORT) {
+ /* If this is the first time, allocate an ndlp and initialize
+ * it. Otherwise, make sure the node is enabled and then do the
+ * login.
+ */
+ ndlp_fdmi = lpfc_findnode_did(vport, FDMI_DID);
+ if (!ndlp_fdmi) {
+ ndlp_fdmi = mempool_alloc(phba->nlp_mem_pool,
+ GFP_KERNEL);
+ if (ndlp_fdmi) {
+ lpfc_nlp_init(vport, ndlp_fdmi, FDMI_DID);
+ ndlp_fdmi->nlp_type |= NLP_FABRIC;
+ } else
+ return;
+ }
+ if (!NLP_CHK_NODE_ACT(ndlp_fdmi))
+ ndlp_fdmi = lpfc_enable_node(vport,
+ ndlp_fdmi,
+ NLP_STE_NPR_NODE);
+
+ if (ndlp_fdmi) {
+ lpfc_nlp_set_state(vport, ndlp_fdmi,
+ NLP_STE_PLOGI_ISSUE);
+ lpfc_issue_els_plogi(vport, ndlp_fdmi->nlp_DID, 0);
+ }
+ }
+}
+
+/**
+ * lpfc_cmpl_reg_new_vport - Completion callback function to register new vport
+ * @phba: pointer to lpfc hba data structure.
+ * @pmb: pointer to the driver internal queue element for mailbox command.
+ *
+ * This routine is the completion callback function to register new vport
+ * mailbox command. If the new vport mailbox command completes successfully,
+ * the fabric registration login shall be performed on physical port (the
+ * new vport created is actually a physical port, with VPI 0) or the port
+ * login to Name Server for State Change Request (SCR) will be performed
+ * on virtual port (real virtual port, with VPI greater than 0).
+ **/
+static void
+lpfc_cmpl_reg_new_vport(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
+{
+ struct lpfc_vport *vport = pmb->vport;
+ struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
+ struct lpfc_nodelist *ndlp = (struct lpfc_nodelist *) pmb->context2;
+ MAILBOX_t *mb = &pmb->u.mb;
+ int rc;
+
+ spin_lock_irq(shost->host_lock);
+ vport->fc_flag &= ~FC_VPORT_NEEDS_REG_VPI;
+ spin_unlock_irq(shost->host_lock);
+
+ if (mb->mbxStatus) {
+ lpfc_printf_vlog(vport, KERN_ERR, LOG_MBOX,
+ "0915 Register VPI failed : Status: x%x"
+ " upd bit: x%x \n", mb->mbxStatus,
+ mb->un.varRegVpi.upd);
+ if (phba->sli_rev == LPFC_SLI_REV4 &&
+ mb->un.varRegVpi.upd)
+ goto mbox_err_exit ;
+
+ switch (mb->mbxStatus) {
+ case 0x11: /* unsupported feature */
+ case 0x9603: /* max_vpi exceeded */
+ case 0x9602: /* Link event since CLEAR_LA */
+ /* giving up on vport registration */
+ lpfc_vport_set_state(vport, FC_VPORT_FAILED);
+ spin_lock_irq(shost->host_lock);
+ vport->fc_flag &= ~(FC_FABRIC | FC_PUBLIC_LOOP);
+ spin_unlock_irq(shost->host_lock);
+ lpfc_can_disctmo(vport);
+ break;
+ /* If reg_vpi fail with invalid VPI status, re-init VPI */
+ case 0x20:
+ spin_lock_irq(shost->host_lock);
+ vport->fc_flag |= FC_VPORT_NEEDS_REG_VPI;
+ spin_unlock_irq(shost->host_lock);
+ lpfc_init_vpi(phba, pmb, vport->vpi);
+ pmb->vport = vport;
+ pmb->mbox_cmpl = lpfc_init_vpi_cmpl;
+ rc = lpfc_sli_issue_mbox(phba, pmb,
+ MBX_NOWAIT);
+ if (rc == MBX_NOT_FINISHED) {
+ lpfc_printf_vlog(vport,
+ KERN_ERR, LOG_MBOX,
+ "2732 Failed to issue INIT_VPI"
+ " mailbox command\n");
+ } else {
+ lpfc_nlp_put(ndlp);
+ return;
+ }
+
+ default:
+ /* Try to recover from this error */
+ if (phba->sli_rev == LPFC_SLI_REV4)
+ lpfc_sli4_unreg_all_rpis(vport);
+ lpfc_mbx_unreg_vpi(vport);
+ spin_lock_irq(shost->host_lock);
+ vport->fc_flag |= FC_VPORT_NEEDS_REG_VPI;
+ spin_unlock_irq(shost->host_lock);
+ if (vport->port_type == LPFC_PHYSICAL_PORT
+ && !(vport->fc_flag & FC_LOGO_RCVD_DID_CHNG))
+ lpfc_issue_init_vfi(vport);
+ else
+ lpfc_initial_fdisc(vport);
+ break;
+ }
+ } else {
+ spin_lock_irq(shost->host_lock);
+ vport->vpi_state |= LPFC_VPI_REGISTERED;
+ spin_unlock_irq(shost->host_lock);
+ if (vport == phba->pport) {
+ if (phba->sli_rev < LPFC_SLI_REV4)
+ lpfc_issue_fabric_reglogin(vport);
+ else {
+ /*
+ * If the physical port is instantiated using
+ * FDISC, do not start vport discovery.
+ */
+ if (vport->port_state != LPFC_FDISC)
+ lpfc_start_fdiscs(phba);
+ lpfc_do_scr_ns_plogi(phba, vport);
+ }
+ } else
+ lpfc_do_scr_ns_plogi(phba, vport);
+ }
+mbox_err_exit:
+ /* Now, we decrement the ndlp reference count held for this
+ * callback function
+ */
+ lpfc_nlp_put(ndlp);
+
+ mempool_free(pmb, phba->mbox_mem_pool);
+ return;
+}
+
+/**
+ * lpfc_register_new_vport - Register a new vport with a HBA
+ * @phba: pointer to lpfc hba data structure.
+ * @vport: pointer to a host virtual N_Port data structure.
+ * @ndlp: pointer to a node-list data structure.
+ *
+ * This routine registers the @vport as a new virtual port with a HBA.
+ * It is done through a registering vpi mailbox command.
+ **/
+void
+lpfc_register_new_vport(struct lpfc_hba *phba, struct lpfc_vport *vport,
+ struct lpfc_nodelist *ndlp)
+{
+ struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
+ LPFC_MBOXQ_t *mbox;
+
+ mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
+ if (mbox) {
+ lpfc_reg_vpi(vport, mbox);
+ mbox->vport = vport;
+ mbox->context2 = lpfc_nlp_get(ndlp);
+ mbox->mbox_cmpl = lpfc_cmpl_reg_new_vport;
+ if (lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT)
+ == MBX_NOT_FINISHED) {
+ /* mailbox command not success, decrement ndlp
+ * reference count for this command
+ */
+ lpfc_nlp_put(ndlp);
+ mempool_free(mbox, phba->mbox_mem_pool);
+
+ lpfc_printf_vlog(vport, KERN_ERR, LOG_MBOX,
+ "0253 Register VPI: Can't send mbox\n");
+ goto mbox_err_exit;
+ }
+ } else {
+ lpfc_printf_vlog(vport, KERN_ERR, LOG_MBOX,
+ "0254 Register VPI: no memory\n");
+ goto mbox_err_exit;
+ }
+ return;
+
+mbox_err_exit:
+ lpfc_vport_set_state(vport, FC_VPORT_FAILED);
+ spin_lock_irq(shost->host_lock);
+ vport->fc_flag &= ~FC_VPORT_NEEDS_REG_VPI;
+ spin_unlock_irq(shost->host_lock);
+ return;
+}
+
+/**
+ * lpfc_cancel_all_vport_retry_delay_timer - Cancel all vport retry delay timer
+ * @phba: pointer to lpfc hba data structure.
+ *
+ * This routine cancels the retry delay timers to all the vports.
+ **/
+void
+lpfc_cancel_all_vport_retry_delay_timer(struct lpfc_hba *phba)
+{
+ struct lpfc_vport **vports;
+ struct lpfc_nodelist *ndlp;
+ uint32_t link_state;
+ int i;
+
+ /* Treat this failure as linkdown for all vports */
+ link_state = phba->link_state;
+ lpfc_linkdown(phba);
+ phba->link_state = link_state;
+
+ vports = lpfc_create_vport_work_array(phba);
+
+ if (vports) {
+ for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) {
+ ndlp = lpfc_findnode_did(vports[i], Fabric_DID);
+ if (ndlp)
+ lpfc_cancel_retry_delay_tmo(vports[i], ndlp);
+ lpfc_els_flush_cmd(vports[i]);
+ }
+ lpfc_destroy_vport_work_array(phba, vports);
+ }
+}
+
+/**
+ * lpfc_retry_pport_discovery - Start timer to retry FLOGI.
+ * @phba: pointer to lpfc hba data structure.
+ *
+ * This routine abort all pending discovery commands and
+ * start a timer to retry FLOGI for the physical port
+ * discovery.
+ **/
+void
+lpfc_retry_pport_discovery(struct lpfc_hba *phba)
+{
+ struct lpfc_nodelist *ndlp;
+ struct Scsi_Host *shost;
+
+ /* Cancel the all vports retry delay retry timers */
+ lpfc_cancel_all_vport_retry_delay_timer(phba);
+
+ /* If fabric require FLOGI, then re-instantiate physical login */
+ ndlp = lpfc_findnode_did(phba->pport, Fabric_DID);
+ if (!ndlp)
+ return;
+
+ shost = lpfc_shost_from_vport(phba->pport);
+ mod_timer(&ndlp->nlp_delayfunc, jiffies + msecs_to_jiffies(1000));
+ spin_lock_irq(shost->host_lock);
+ ndlp->nlp_flag |= NLP_DELAY_TMO;
+ spin_unlock_irq(shost->host_lock);
+ ndlp->nlp_last_elscmd = ELS_CMD_FLOGI;
+ phba->pport->port_state = LPFC_FLOGI;
+ return;
+}
+
+/**
+ * lpfc_fabric_login_reqd - Check if FLOGI required.
+ * @phba: pointer to lpfc hba data structure.
+ * @cmdiocb: pointer to FDISC command iocb.
+ * @rspiocb: pointer to FDISC response iocb.
+ *
+ * This routine checks if a FLOGI is reguired for FDISC
+ * to succeed.
+ **/
+static int
+lpfc_fabric_login_reqd(struct lpfc_hba *phba,
+ struct lpfc_iocbq *cmdiocb,
+ struct lpfc_iocbq *rspiocb)
+{
+
+ if ((rspiocb->iocb.ulpStatus != IOSTAT_FABRIC_RJT) ||
+ (rspiocb->iocb.un.ulpWord[4] != RJT_LOGIN_REQUIRED))
+ return 0;
+ else
+ return 1;
+}
+
+/**
+ * lpfc_cmpl_els_fdisc - Completion function for fdisc iocb command
+ * @phba: pointer to lpfc hba data structure.
+ * @cmdiocb: pointer to lpfc command iocb data structure.
+ * @rspiocb: pointer to lpfc response iocb data structure.
+ *
+ * This routine is the completion callback function to a Fabric Discover
+ * (FDISC) ELS command. Since all the FDISC ELS commands are issued
+ * single threaded, each FDISC completion callback function will reset
+ * the discovery timer for all vports such that the timers will not get
+ * unnecessary timeout. The function checks the FDISC IOCB status. If error
+ * detected, the vport will be set to FC_VPORT_FAILED state. Otherwise,the
+ * vport will set to FC_VPORT_ACTIVE state. It then checks whether the DID
+ * assigned to the vport has been changed with the completion of the FDISC
+ * command. If so, both RPI (Remote Port Index) and VPI (Virtual Port Index)
+ * are unregistered from the HBA, and then the lpfc_register_new_vport()
+ * routine is invoked to register new vport with the HBA. Otherwise, the
+ * lpfc_do_scr_ns_plogi() routine is invoked to issue a PLOGI to the Name
+ * Server for State Change Request (SCR).
+ **/
+static void
+lpfc_cmpl_els_fdisc(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
+ struct lpfc_iocbq *rspiocb)
+{
+ struct lpfc_vport *vport = cmdiocb->vport;
+ struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
+ struct lpfc_nodelist *ndlp = (struct lpfc_nodelist *) cmdiocb->context1;
+ struct lpfc_nodelist *np;
+ struct lpfc_nodelist *next_np;
+ IOCB_t *irsp = &rspiocb->iocb;
+ struct lpfc_iocbq *piocb;
+ struct lpfc_dmabuf *pcmd = cmdiocb->context2, *prsp;
+ struct serv_parm *sp;
+ uint8_t fabric_param_changed;
+
+ lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
+ "0123 FDISC completes. x%x/x%x prevDID: x%x\n",
+ irsp->ulpStatus, irsp->un.ulpWord[4],
+ vport->fc_prevDID);
+ /* Since all FDISCs are being single threaded, we
+ * must reset the discovery timer for ALL vports
+ * waiting to send FDISC when one completes.
+ */
+ list_for_each_entry(piocb, &phba->fabric_iocb_list, list) {
+ lpfc_set_disctmo(piocb->vport);
+ }
+
+ lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD,
+ "FDISC cmpl: status:x%x/x%x prevdid:x%x",
+ irsp->ulpStatus, irsp->un.ulpWord[4], vport->fc_prevDID);
+
+ if (irsp->ulpStatus) {
+
+ if (lpfc_fabric_login_reqd(phba, cmdiocb, rspiocb)) {
+ lpfc_retry_pport_discovery(phba);
+ goto out;
+ }
+
+ /* Check for retry */
+ if (lpfc_els_retry(phba, cmdiocb, rspiocb))
+ goto out;
+ /* FDISC failed */
+ lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
+ "0126 FDISC failed. (x%x/x%x)\n",
+ irsp->ulpStatus, irsp->un.ulpWord[4]);
+ goto fdisc_failed;
+ }
+ spin_lock_irq(shost->host_lock);
+ vport->fc_flag &= ~FC_VPORT_CVL_RCVD;
+ vport->fc_flag &= ~FC_VPORT_LOGO_RCVD;
+ vport->fc_flag |= FC_FABRIC;
+ if (vport->phba->fc_topology == LPFC_TOPOLOGY_LOOP)
+ vport->fc_flag |= FC_PUBLIC_LOOP;
+ spin_unlock_irq(shost->host_lock);
+
+ vport->fc_myDID = irsp->un.ulpWord[4] & Mask_DID;
+ lpfc_vport_set_state(vport, FC_VPORT_ACTIVE);
+ prsp = list_get_first(&pcmd->list, struct lpfc_dmabuf, list);
+ if (!prsp)
+ goto out;
+ sp = prsp->virt + sizeof(uint32_t);
+ fabric_param_changed = lpfc_check_clean_addr_bit(vport, sp);
+ memcpy(&vport->fabric_portname, &sp->portName,
+ sizeof(struct lpfc_name));
+ memcpy(&vport->fabric_nodename, &sp->nodeName,
+ sizeof(struct lpfc_name));
+ if (fabric_param_changed &&
+ !(vport->fc_flag & FC_VPORT_NEEDS_REG_VPI)) {
+ /* If our NportID changed, we need to ensure all
+ * remaining NPORTs get unreg_login'ed so we can
+ * issue unreg_vpi.
+ */
+ list_for_each_entry_safe(np, next_np,
+ &vport->fc_nodes, nlp_listp) {
+ if (!NLP_CHK_NODE_ACT(ndlp) ||
+ (np->nlp_state != NLP_STE_NPR_NODE) ||
+ !(np->nlp_flag & NLP_NPR_ADISC))
+ continue;
+ spin_lock_irq(shost->host_lock);
+ np->nlp_flag &= ~NLP_NPR_ADISC;
+ spin_unlock_irq(shost->host_lock);
+ lpfc_unreg_rpi(vport, np);
+ }
+ lpfc_cleanup_pending_mbox(vport);
+
+ if (phba->sli_rev == LPFC_SLI_REV4)
+ lpfc_sli4_unreg_all_rpis(vport);
+
+ lpfc_mbx_unreg_vpi(vport);
+ spin_lock_irq(shost->host_lock);
+ vport->fc_flag |= FC_VPORT_NEEDS_REG_VPI;
+ if (phba->sli_rev == LPFC_SLI_REV4)
+ vport->fc_flag |= FC_VPORT_NEEDS_INIT_VPI;
+ else
+ vport->fc_flag |= FC_LOGO_RCVD_DID_CHNG;
+ spin_unlock_irq(shost->host_lock);
+ } else if ((phba->sli_rev == LPFC_SLI_REV4) &&
+ !(vport->fc_flag & FC_VPORT_NEEDS_REG_VPI)) {
+ /*
+ * Driver needs to re-reg VPI in order for f/w
+ * to update the MAC address.
+ */
+ lpfc_register_new_vport(phba, vport, ndlp);
+ goto out;
+ }
+
+ if (vport->fc_flag & FC_VPORT_NEEDS_INIT_VPI)
+ lpfc_issue_init_vpi(vport);
+ else if (vport->fc_flag & FC_VPORT_NEEDS_REG_VPI)
+ lpfc_register_new_vport(phba, vport, ndlp);
+ else
+ lpfc_do_scr_ns_plogi(phba, vport);
+ goto out;
+fdisc_failed:
+ lpfc_vport_set_state(vport, FC_VPORT_FAILED);
+ /* Cancel discovery timer */
+ lpfc_can_disctmo(vport);
+ lpfc_nlp_put(ndlp);
+out:
+ lpfc_els_free_iocb(phba, cmdiocb);
+}
+
+/**
+ * lpfc_issue_els_fdisc - Issue a fdisc iocb command
+ * @vport: pointer to a virtual N_Port data structure.
+ * @ndlp: pointer to a node-list data structure.
+ * @retry: number of retries to the command IOCB.
+ *
+ * This routine prepares and issues a Fabric Discover (FDISC) IOCB to
+ * a remote node (@ndlp) off a @vport. It uses the lpfc_issue_fabric_iocb()
+ * routine to issue the IOCB, which makes sure only one outstanding fabric
+ * IOCB will be sent off HBA at any given time.
+ *
+ * Note that, in lpfc_prep_els_iocb() routine, the reference count of ndlp
+ * will be incremented by 1 for holding the ndlp and the reference to ndlp
+ * will be stored into the context1 field of the IOCB for the completion
+ * callback function to the FDISC ELS command.
+ *
+ * Return code
+ * 0 - Successfully issued fdisc iocb command
+ * 1 - Failed to issue fdisc iocb command
+ **/
+static int
+lpfc_issue_els_fdisc(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
+ uint8_t retry)
+{
+ struct lpfc_hba *phba = vport->phba;
+ IOCB_t *icmd;
+ struct lpfc_iocbq *elsiocb;
+ struct serv_parm *sp;
+ uint8_t *pcmd;
+ uint16_t cmdsize;
+ int did = ndlp->nlp_DID;
+ int rc;
+
+ vport->port_state = LPFC_FDISC;
+ vport->fc_myDID = 0;
+ cmdsize = (sizeof(uint32_t) + sizeof(struct serv_parm));
+ elsiocb = lpfc_prep_els_iocb(vport, 1, cmdsize, retry, ndlp, did,
+ ELS_CMD_FDISC);
+ if (!elsiocb) {
+ lpfc_vport_set_state(vport, FC_VPORT_FAILED);
+ lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
+ "0255 Issue FDISC: no IOCB\n");
+ return 1;
+ }
+
+ icmd = &elsiocb->iocb;
+ icmd->un.elsreq64.myID = 0;
+ icmd->un.elsreq64.fl = 1;
+
+ /*
+ * SLI3 ports require a different context type value than SLI4.
+ * Catch SLI3 ports here and override the prep.
+ */
+ if (phba->sli_rev == LPFC_SLI_REV3) {
+ icmd->ulpCt_h = 1;
+ icmd->ulpCt_l = 0;
+ }
+
+ pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt);
+ *((uint32_t *) (pcmd)) = ELS_CMD_FDISC;
+ pcmd += sizeof(uint32_t); /* CSP Word 1 */
+ memcpy(pcmd, &vport->phba->pport->fc_sparam, sizeof(struct serv_parm));
+ sp = (struct serv_parm *) pcmd;
+ /* Setup CSPs accordingly for Fabric */
+ sp->cmn.e_d_tov = 0;
+ sp->cmn.w2.r_a_tov = 0;
+ sp->cmn.virtual_fabric_support = 0;
+ sp->cls1.classValid = 0;
+ sp->cls2.seqDelivery = 1;
+ sp->cls3.seqDelivery = 1;
+
+ pcmd += sizeof(uint32_t); /* CSP Word 2 */
+ pcmd += sizeof(uint32_t); /* CSP Word 3 */
+ pcmd += sizeof(uint32_t); /* CSP Word 4 */
+ pcmd += sizeof(uint32_t); /* Port Name */
+ memcpy(pcmd, &vport->fc_portname, 8);
+ pcmd += sizeof(uint32_t); /* Node Name */
+ pcmd += sizeof(uint32_t); /* Node Name */
+ memcpy(pcmd, &vport->fc_nodename, 8);
+
+ lpfc_set_disctmo(vport);
+
+ phba->fc_stat.elsXmitFDISC++;
+ elsiocb->iocb_cmpl = lpfc_cmpl_els_fdisc;
+
+ lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD,
+ "Issue FDISC: did:x%x",
+ did, 0, 0);
+
+ rc = lpfc_issue_fabric_iocb(phba, elsiocb);
+ if (rc == IOCB_ERROR) {
+ lpfc_els_free_iocb(phba, elsiocb);
+ lpfc_vport_set_state(vport, FC_VPORT_FAILED);
+ lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
+ "0256 Issue FDISC: Cannot send IOCB\n");
+ return 1;
+ }
+ lpfc_vport_set_state(vport, FC_VPORT_INITIALIZING);
+ return 0;
+}
+
+/**
+ * lpfc_cmpl_els_npiv_logo - Completion function with vport logo
+ * @phba: pointer to lpfc hba data structure.
+ * @cmdiocb: pointer to lpfc command iocb data structure.
+ * @rspiocb: pointer to lpfc response iocb data structure.
+ *
+ * This routine is the completion callback function to the issuing of a LOGO
+ * ELS command off a vport. It frees the command IOCB and then decrement the
+ * reference count held on ndlp for this completion function, indicating that
+ * the reference to the ndlp is no long needed. Note that the
+ * lpfc_els_free_iocb() routine decrements the ndlp reference held for this
+ * callback function and an additional explicit ndlp reference decrementation
+ * will trigger the actual release of the ndlp.
+ **/
+static void
+lpfc_cmpl_els_npiv_logo(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
+ struct lpfc_iocbq *rspiocb)
+{
+ struct lpfc_vport *vport = cmdiocb->vport;
+ IOCB_t *irsp;
+ struct lpfc_nodelist *ndlp;
+ struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
+
+ ndlp = (struct lpfc_nodelist *)cmdiocb->context1;
+ irsp = &rspiocb->iocb;
+ lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD,
+ "LOGO npiv cmpl: status:x%x/x%x did:x%x",
+ irsp->ulpStatus, irsp->un.ulpWord[4], irsp->un.rcvels.remoteID);
+
+ lpfc_els_free_iocb(phba, cmdiocb);
+ vport->unreg_vpi_cmpl = VPORT_ERROR;
+
+ /* Trigger the release of the ndlp after logo */
+ lpfc_nlp_put(ndlp);
+
+ /* NPIV LOGO completes to NPort <nlp_DID> */
+ lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
+ "2928 NPIV LOGO completes to NPort x%x "
+ "Data: x%x x%x x%x x%x\n",
+ ndlp->nlp_DID, irsp->ulpStatus, irsp->un.ulpWord[4],
+ irsp->ulpTimeout, vport->num_disc_nodes);
+
+ if (irsp->ulpStatus == IOSTAT_SUCCESS) {
+ spin_lock_irq(shost->host_lock);
+ vport->fc_flag &= ~FC_FABRIC;
+ spin_unlock_irq(shost->host_lock);
+ }
+}
+
+/**
+ * lpfc_issue_els_npiv_logo - Issue a logo off a vport
+ * @vport: pointer to a virtual N_Port data structure.
+ * @ndlp: pointer to a node-list data structure.
+ *
+ * This routine issues a LOGO ELS command to an @ndlp off a @vport.
+ *
+ * Note that, in lpfc_prep_els_iocb() routine, the reference count of ndlp
+ * will be incremented by 1 for holding the ndlp and the reference to ndlp
+ * will be stored into the context1 field of the IOCB for the completion
+ * callback function to the LOGO ELS command.
+ *
+ * Return codes
+ * 0 - Successfully issued logo off the @vport
+ * 1 - Failed to issue logo off the @vport
+ **/
+int
+lpfc_issue_els_npiv_logo(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
+{
+ struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
+ struct lpfc_hba *phba = vport->phba;
+ IOCB_t *icmd;
+ struct lpfc_iocbq *elsiocb;
+ uint8_t *pcmd;
+ uint16_t cmdsize;
+
+ cmdsize = 2 * sizeof(uint32_t) + sizeof(struct lpfc_name);
+ elsiocb = lpfc_prep_els_iocb(vport, 1, cmdsize, 0, ndlp, ndlp->nlp_DID,
+ ELS_CMD_LOGO);
+ if (!elsiocb)
+ return 1;
+
+ icmd = &elsiocb->iocb;
+ pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt);
+ *((uint32_t *) (pcmd)) = ELS_CMD_LOGO;
+ pcmd += sizeof(uint32_t);
+
+ /* Fill in LOGO payload */
+ *((uint32_t *) (pcmd)) = be32_to_cpu(vport->fc_myDID);
+ pcmd += sizeof(uint32_t);
+ memcpy(pcmd, &vport->fc_portname, sizeof(struct lpfc_name));
+
+ lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD,
+ "Issue LOGO npiv did:x%x flg:x%x",
+ ndlp->nlp_DID, ndlp->nlp_flag, 0);
+
+ elsiocb->iocb_cmpl = lpfc_cmpl_els_npiv_logo;
+ spin_lock_irq(shost->host_lock);
+ ndlp->nlp_flag |= NLP_LOGO_SND;
+ spin_unlock_irq(shost->host_lock);
+ if (lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0) ==
+ IOCB_ERROR) {
+ spin_lock_irq(shost->host_lock);
+ ndlp->nlp_flag &= ~NLP_LOGO_SND;
+ spin_unlock_irq(shost->host_lock);
+ lpfc_els_free_iocb(phba, elsiocb);
+ return 1;
+ }
+ return 0;
+}
+
+/**
+ * lpfc_fabric_block_timeout - Handler function to the fabric block timer
+ * @ptr: holder for the timer function associated data.
+ *
+ * This routine is invoked by the fabric iocb block timer after
+ * timeout. It posts the fabric iocb block timeout event by setting the
+ * WORKER_FABRIC_BLOCK_TMO bit to work port event bitmap and then invokes
+ * lpfc_worker_wake_up() routine to wake up the worker thread. It is for
+ * the worker thread to invoke the lpfc_unblock_fabric_iocbs() on the
+ * posted event WORKER_FABRIC_BLOCK_TMO.
+ **/
+void
+lpfc_fabric_block_timeout(unsigned long ptr)
+{
+ struct lpfc_hba *phba = (struct lpfc_hba *) ptr;
+ unsigned long iflags;
+ uint32_t tmo_posted;
+
+ spin_lock_irqsave(&phba->pport->work_port_lock, iflags);
+ tmo_posted = phba->pport->work_port_events & WORKER_FABRIC_BLOCK_TMO;
+ if (!tmo_posted)
+ phba->pport->work_port_events |= WORKER_FABRIC_BLOCK_TMO;
+ spin_unlock_irqrestore(&phba->pport->work_port_lock, iflags);
+
+ if (!tmo_posted)
+ lpfc_worker_wake_up(phba);
+ return;
+}
+
+/**
+ * lpfc_resume_fabric_iocbs - Issue a fabric iocb from driver internal list
+ * @phba: pointer to lpfc hba data structure.
+ *
+ * This routine issues one fabric iocb from the driver internal list to
+ * the HBA. It first checks whether it's ready to issue one fabric iocb to
+ * the HBA (whether there is no outstanding fabric iocb). If so, it shall
+ * remove one pending fabric iocb from the driver internal list and invokes
+ * lpfc_sli_issue_iocb() routine to send the fabric iocb to the HBA.
+ **/
+static void
+lpfc_resume_fabric_iocbs(struct lpfc_hba *phba)
+{
+ struct lpfc_iocbq *iocb;
+ unsigned long iflags;
+ int ret;
+ IOCB_t *cmd;
+
+repeat:
+ iocb = NULL;
+ spin_lock_irqsave(&phba->hbalock, iflags);
+ /* Post any pending iocb to the SLI layer */
+ if (atomic_read(&phba->fabric_iocb_count) == 0) {
+ list_remove_head(&phba->fabric_iocb_list, iocb, typeof(*iocb),
+ list);
+ if (iocb)
+ /* Increment fabric iocb count to hold the position */
+ atomic_inc(&phba->fabric_iocb_count);
+ }
+ spin_unlock_irqrestore(&phba->hbalock, iflags);
+ if (iocb) {
+ iocb->fabric_iocb_cmpl = iocb->iocb_cmpl;
+ iocb->iocb_cmpl = lpfc_cmpl_fabric_iocb;
+ iocb->iocb_flag |= LPFC_IO_FABRIC;
+
+ lpfc_debugfs_disc_trc(iocb->vport, LPFC_DISC_TRC_ELS_CMD,
+ "Fabric sched1: ste:x%x",
+ iocb->vport->port_state, 0, 0);
+
+ ret = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, iocb, 0);
+
+ if (ret == IOCB_ERROR) {
+ iocb->iocb_cmpl = iocb->fabric_iocb_cmpl;
+ iocb->fabric_iocb_cmpl = NULL;
+ iocb->iocb_flag &= ~LPFC_IO_FABRIC;
+ cmd = &iocb->iocb;
+ cmd->ulpStatus = IOSTAT_LOCAL_REJECT;
+ cmd->un.ulpWord[4] = IOERR_SLI_ABORTED;
+ iocb->iocb_cmpl(phba, iocb, iocb);
+
+ atomic_dec(&phba->fabric_iocb_count);
+ goto repeat;
+ }
+ }
+
+ return;
+}
+
+/**
+ * lpfc_unblock_fabric_iocbs - Unblock issuing fabric iocb command
+ * @phba: pointer to lpfc hba data structure.
+ *
+ * This routine unblocks the issuing fabric iocb command. The function
+ * will clear the fabric iocb block bit and then invoke the routine
+ * lpfc_resume_fabric_iocbs() to issue one of the pending fabric iocb
+ * from the driver internal fabric iocb list.
+ **/
+void
+lpfc_unblock_fabric_iocbs(struct lpfc_hba *phba)
+{
+ clear_bit(FABRIC_COMANDS_BLOCKED, &phba->bit_flags);
+
+ lpfc_resume_fabric_iocbs(phba);
+ return;
+}
+
+/**
+ * lpfc_block_fabric_iocbs - Block issuing fabric iocb command
+ * @phba: pointer to lpfc hba data structure.
+ *
+ * This routine blocks the issuing fabric iocb for a specified amount of
+ * time (currently 100 ms). This is done by set the fabric iocb block bit
+ * and set up a timeout timer for 100ms. When the block bit is set, no more
+ * fabric iocb will be issued out of the HBA.
+ **/
+static void
+lpfc_block_fabric_iocbs(struct lpfc_hba *phba)
+{
+ int blocked;
+
+ blocked = test_and_set_bit(FABRIC_COMANDS_BLOCKED, &phba->bit_flags);
+ /* Start a timer to unblock fabric iocbs after 100ms */
+ if (!blocked)
+ mod_timer(&phba->fabric_block_timer,
+ jiffies + msecs_to_jiffies(100));
+
+ return;
+}
+
+/**
+ * lpfc_cmpl_fabric_iocb - Completion callback function for fabric iocb
+ * @phba: pointer to lpfc hba data structure.
+ * @cmdiocb: pointer to lpfc command iocb data structure.
+ * @rspiocb: pointer to lpfc response iocb data structure.
+ *
+ * This routine is the callback function that is put to the fabric iocb's
+ * callback function pointer (iocb->iocb_cmpl). The original iocb's callback
+ * function pointer has been stored in iocb->fabric_iocb_cmpl. This callback
+ * function first restores and invokes the original iocb's callback function
+ * and then invokes the lpfc_resume_fabric_iocbs() routine to issue the next
+ * fabric bound iocb from the driver internal fabric iocb list onto the wire.
+ **/
+static void
+lpfc_cmpl_fabric_iocb(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
+ struct lpfc_iocbq *rspiocb)
+{
+ struct ls_rjt stat;
+
+ if ((cmdiocb->iocb_flag & LPFC_IO_FABRIC) != LPFC_IO_FABRIC)
+ BUG();
+
+ switch (rspiocb->iocb.ulpStatus) {
+ case IOSTAT_NPORT_RJT:
+ case IOSTAT_FABRIC_RJT:
+ if (rspiocb->iocb.un.ulpWord[4] & RJT_UNAVAIL_TEMP) {
+ lpfc_block_fabric_iocbs(phba);
+ }
+ break;
+
+ case IOSTAT_NPORT_BSY:
+ case IOSTAT_FABRIC_BSY:
+ lpfc_block_fabric_iocbs(phba);
+ break;
+
+ case IOSTAT_LS_RJT:
+ stat.un.lsRjtError =
+ be32_to_cpu(rspiocb->iocb.un.ulpWord[4]);
+ if ((stat.un.b.lsRjtRsnCode == LSRJT_UNABLE_TPC) ||
+ (stat.un.b.lsRjtRsnCode == LSRJT_LOGICAL_BSY))
+ lpfc_block_fabric_iocbs(phba);
+ break;
+ }
+
+ if (atomic_read(&phba->fabric_iocb_count) == 0)
+ BUG();
+
+ cmdiocb->iocb_cmpl = cmdiocb->fabric_iocb_cmpl;
+ cmdiocb->fabric_iocb_cmpl = NULL;
+ cmdiocb->iocb_flag &= ~LPFC_IO_FABRIC;
+ cmdiocb->iocb_cmpl(phba, cmdiocb, rspiocb);
+
+ atomic_dec(&phba->fabric_iocb_count);
+ if (!test_bit(FABRIC_COMANDS_BLOCKED, &phba->bit_flags)) {
+ /* Post any pending iocbs to HBA */
+ lpfc_resume_fabric_iocbs(phba);
+ }
+}
+
+/**
+ * lpfc_issue_fabric_iocb - Issue a fabric iocb command
+ * @phba: pointer to lpfc hba data structure.
+ * @iocb: pointer to lpfc command iocb data structure.
+ *
+ * This routine is used as the top-level API for issuing a fabric iocb command
+ * such as FLOGI and FDISC. To accommodate certain switch fabric, this driver
+ * function makes sure that only one fabric bound iocb will be outstanding at
+ * any given time. As such, this function will first check to see whether there
+ * is already an outstanding fabric iocb on the wire. If so, it will put the
+ * newly issued iocb onto the driver internal fabric iocb list, waiting to be
+ * issued later. Otherwise, it will issue the iocb on the wire and update the
+ * fabric iocb count it indicate that there is one fabric iocb on the wire.
+ *
+ * Note, this implementation has a potential sending out fabric IOCBs out of
+ * order. The problem is caused by the construction of the "ready" boolen does
+ * not include the condition that the internal fabric IOCB list is empty. As
+ * such, it is possible a fabric IOCB issued by this routine might be "jump"
+ * ahead of the fabric IOCBs in the internal list.
+ *
+ * Return code
+ * IOCB_SUCCESS - either fabric iocb put on the list or issued successfully
+ * IOCB_ERROR - failed to issue fabric iocb
+ **/
+static int
+lpfc_issue_fabric_iocb(struct lpfc_hba *phba, struct lpfc_iocbq *iocb)
+{
+ unsigned long iflags;
+ int ready;
+ int ret;
+
+ if (atomic_read(&phba->fabric_iocb_count) > 1)
+ BUG();
+
+ spin_lock_irqsave(&phba->hbalock, iflags);
+ ready = atomic_read(&phba->fabric_iocb_count) == 0 &&
+ !test_bit(FABRIC_COMANDS_BLOCKED, &phba->bit_flags);
+
+ if (ready)
+ /* Increment fabric iocb count to hold the position */
+ atomic_inc(&phba->fabric_iocb_count);
+ spin_unlock_irqrestore(&phba->hbalock, iflags);
+ if (ready) {
+ iocb->fabric_iocb_cmpl = iocb->iocb_cmpl;
+ iocb->iocb_cmpl = lpfc_cmpl_fabric_iocb;
+ iocb->iocb_flag |= LPFC_IO_FABRIC;
+
+ lpfc_debugfs_disc_trc(iocb->vport, LPFC_DISC_TRC_ELS_CMD,
+ "Fabric sched2: ste:x%x",
+ iocb->vport->port_state, 0, 0);
+
+ ret = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, iocb, 0);
+
+ if (ret == IOCB_ERROR) {
+ iocb->iocb_cmpl = iocb->fabric_iocb_cmpl;
+ iocb->fabric_iocb_cmpl = NULL;
+ iocb->iocb_flag &= ~LPFC_IO_FABRIC;
+ atomic_dec(&phba->fabric_iocb_count);
+ }
+ } else {
+ spin_lock_irqsave(&phba->hbalock, iflags);
+ list_add_tail(&iocb->list, &phba->fabric_iocb_list);
+ spin_unlock_irqrestore(&phba->hbalock, iflags);
+ ret = IOCB_SUCCESS;
+ }
+ return ret;
+}
+
+/**
+ * lpfc_fabric_abort_vport - Abort a vport's iocbs from driver fabric iocb list
+ * @vport: pointer to a virtual N_Port data structure.
+ *
+ * This routine aborts all the IOCBs associated with a @vport from the
+ * driver internal fabric IOCB list. The list contains fabric IOCBs to be
+ * issued to the ELS IOCB ring. This abort function walks the fabric IOCB
+ * list, removes each IOCB associated with the @vport off the list, set the
+ * status feild to IOSTAT_LOCAL_REJECT, and invokes the callback function
+ * associated with the IOCB.
+ **/
+static void lpfc_fabric_abort_vport(struct lpfc_vport *vport)
+{
+ LIST_HEAD(completions);
+ struct lpfc_hba *phba = vport->phba;
+ struct lpfc_iocbq *tmp_iocb, *piocb;
+
+ spin_lock_irq(&phba->hbalock);
+ list_for_each_entry_safe(piocb, tmp_iocb, &phba->fabric_iocb_list,
+ list) {
+
+ if (piocb->vport != vport)
+ continue;
+
+ list_move_tail(&piocb->list, &completions);
+ }
+ spin_unlock_irq(&phba->hbalock);
+
+ /* Cancel all the IOCBs from the completions list */
+ lpfc_sli_cancel_iocbs(phba, &completions, IOSTAT_LOCAL_REJECT,
+ IOERR_SLI_ABORTED);
+}
+
+/**
+ * lpfc_fabric_abort_nport - Abort a ndlp's iocbs from driver fabric iocb list
+ * @ndlp: pointer to a node-list data structure.
+ *
+ * This routine aborts all the IOCBs associated with an @ndlp from the
+ * driver internal fabric IOCB list. The list contains fabric IOCBs to be
+ * issued to the ELS IOCB ring. This abort function walks the fabric IOCB
+ * list, removes each IOCB associated with the @ndlp off the list, set the
+ * status feild to IOSTAT_LOCAL_REJECT, and invokes the callback function
+ * associated with the IOCB.
+ **/
+void lpfc_fabric_abort_nport(struct lpfc_nodelist *ndlp)
+{
+ LIST_HEAD(completions);
+ struct lpfc_hba *phba = ndlp->phba;
+ struct lpfc_iocbq *tmp_iocb, *piocb;
+ struct lpfc_sli_ring *pring = &phba->sli.ring[LPFC_ELS_RING];
+
+ spin_lock_irq(&phba->hbalock);
+ list_for_each_entry_safe(piocb, tmp_iocb, &phba->fabric_iocb_list,
+ list) {
+ if ((lpfc_check_sli_ndlp(phba, pring, piocb, ndlp))) {
+
+ list_move_tail(&piocb->list, &completions);
+ }
+ }
+ spin_unlock_irq(&phba->hbalock);
+
+ /* Cancel all the IOCBs from the completions list */
+ lpfc_sli_cancel_iocbs(phba, &completions, IOSTAT_LOCAL_REJECT,
+ IOERR_SLI_ABORTED);
+}
+
+/**
+ * lpfc_fabric_abort_hba - Abort all iocbs on driver fabric iocb list
+ * @phba: pointer to lpfc hba data structure.
+ *
+ * This routine aborts all the IOCBs currently on the driver internal
+ * fabric IOCB list. The list contains fabric IOCBs to be issued to the ELS
+ * IOCB ring. This function takes the entire IOCB list off the fabric IOCB
+ * list, removes IOCBs off the list, set the status feild to
+ * IOSTAT_LOCAL_REJECT, and invokes the callback function associated with
+ * the IOCB.
+ **/
+void lpfc_fabric_abort_hba(struct lpfc_hba *phba)
+{
+ LIST_HEAD(completions);
+
+ spin_lock_irq(&phba->hbalock);
+ list_splice_init(&phba->fabric_iocb_list, &completions);
+ spin_unlock_irq(&phba->hbalock);
+
+ /* Cancel all the IOCBs from the completions list */
+ lpfc_sli_cancel_iocbs(phba, &completions, IOSTAT_LOCAL_REJECT,
+ IOERR_SLI_ABORTED);
+}
+
+/**
+ * lpfc_sli4_vport_delete_els_xri_aborted -Remove all ndlp references for vport
+ * @vport: pointer to lpfc vport data structure.
+ *
+ * This routine is invoked by the vport cleanup for deletions and the cleanup
+ * for an ndlp on removal.
+ **/
+void
+lpfc_sli4_vport_delete_els_xri_aborted(struct lpfc_vport *vport)
+{
+ struct lpfc_hba *phba = vport->phba;
+ struct lpfc_sglq *sglq_entry = NULL, *sglq_next = NULL;
+ unsigned long iflag = 0;
+
+ spin_lock_irqsave(&phba->hbalock, iflag);
+ spin_lock(&phba->sli4_hba.abts_sgl_list_lock);
+ list_for_each_entry_safe(sglq_entry, sglq_next,
+ &phba->sli4_hba.lpfc_abts_els_sgl_list, list) {
+ if (sglq_entry->ndlp && sglq_entry->ndlp->vport == vport)
+ sglq_entry->ndlp = NULL;
+ }
+ spin_unlock(&phba->sli4_hba.abts_sgl_list_lock);
+ spin_unlock_irqrestore(&phba->hbalock, iflag);
+ return;
+}
+
+/**
+ * lpfc_sli4_els_xri_aborted - Slow-path process of els xri abort
+ * @phba: pointer to lpfc hba data structure.
+ * @axri: pointer to the els xri abort wcqe structure.
+ *
+ * This routine is invoked by the worker thread to process a SLI4 slow-path
+ * ELS aborted xri.
+ **/
+void
+lpfc_sli4_els_xri_aborted(struct lpfc_hba *phba,
+ struct sli4_wcqe_xri_aborted *axri)
+{
+ uint16_t xri = bf_get(lpfc_wcqe_xa_xri, axri);
+ uint16_t rxid = bf_get(lpfc_wcqe_xa_remote_xid, axri);
+ uint16_t lxri = 0;
+
+ struct lpfc_sglq *sglq_entry = NULL, *sglq_next = NULL;
+ unsigned long iflag = 0;
+ struct lpfc_nodelist *ndlp;
+ struct lpfc_sli_ring *pring = &phba->sli.ring[LPFC_ELS_RING];
+
+ spin_lock_irqsave(&phba->hbalock, iflag);
+ spin_lock(&phba->sli4_hba.abts_sgl_list_lock);
+ list_for_each_entry_safe(sglq_entry, sglq_next,
+ &phba->sli4_hba.lpfc_abts_els_sgl_list, list) {
+ if (sglq_entry->sli4_xritag == xri) {
+ list_del(&sglq_entry->list);
+ ndlp = sglq_entry->ndlp;
+ sglq_entry->ndlp = NULL;
+ spin_lock(&pring->ring_lock);
+ list_add_tail(&sglq_entry->list,
+ &phba->sli4_hba.lpfc_sgl_list);
+ sglq_entry->state = SGL_FREED;
+ spin_unlock(&pring->ring_lock);
+ spin_unlock(&phba->sli4_hba.abts_sgl_list_lock);
+ spin_unlock_irqrestore(&phba->hbalock, iflag);
+ lpfc_set_rrq_active(phba, ndlp,
+ sglq_entry->sli4_lxritag,
+ rxid, 1);
+
+ /* Check if TXQ queue needs to be serviced */
+ if (!(list_empty(&pring->txq)))
+ lpfc_worker_wake_up(phba);
+ return;
+ }
+ }
+ spin_unlock(&phba->sli4_hba.abts_sgl_list_lock);
+ lxri = lpfc_sli4_xri_inrange(phba, xri);
+ if (lxri == NO_XRI) {
+ spin_unlock_irqrestore(&phba->hbalock, iflag);
+ return;
+ }
+ spin_lock(&pring->ring_lock);
+ sglq_entry = __lpfc_get_active_sglq(phba, lxri);
+ if (!sglq_entry || (sglq_entry->sli4_xritag != xri)) {
+ spin_unlock(&pring->ring_lock);
+ spin_unlock_irqrestore(&phba->hbalock, iflag);
+ return;
+ }
+ sglq_entry->state = SGL_XRI_ABORTED;
+ spin_unlock(&pring->ring_lock);
+ spin_unlock_irqrestore(&phba->hbalock, iflag);
+ return;
+}
+
+/* lpfc_sli_abts_recover_port - Recover a port that failed a BLS_ABORT req.
+ * @vport: pointer to virtual port object.
+ * @ndlp: nodelist pointer for the impacted node.
+ *
+ * The driver calls this routine in response to an SLI4 XRI ABORT CQE
+ * or an SLI3 ASYNC_STATUS_CN event from the port. For either event,
+ * the driver is required to send a LOGO to the remote node before it
+ * attempts to recover its login to the remote node.
+ */
+void
+lpfc_sli_abts_recover_port(struct lpfc_vport *vport,
+ struct lpfc_nodelist *ndlp)
+{
+ struct Scsi_Host *shost;
+ struct lpfc_hba *phba;
+ unsigned long flags = 0;
+
+ shost = lpfc_shost_from_vport(vport);
+ phba = vport->phba;
+ if (ndlp->nlp_state != NLP_STE_MAPPED_NODE) {
+ lpfc_printf_log(phba, KERN_INFO,
+ LOG_SLI, "3093 No rport recovery needed. "
+ "rport in state 0x%x\n", ndlp->nlp_state);
+ return;
+ }
+ lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
+ "3094 Start rport recovery on shost id 0x%x "
+ "fc_id 0x%06x vpi 0x%x rpi 0x%x state 0x%x "
+ "flags 0x%x\n",
+ shost->host_no, ndlp->nlp_DID,
+ vport->vpi, ndlp->nlp_rpi, ndlp->nlp_state,
+ ndlp->nlp_flag);
+ /*
+ * The rport is not responding. Remove the FCP-2 flag to prevent
+ * an ADISC in the follow-up recovery code.
+ */
+ spin_lock_irqsave(shost->host_lock, flags);
+ ndlp->nlp_fcp_info &= ~NLP_FCP_2_DEVICE;
+ spin_unlock_irqrestore(shost->host_lock, flags);
+ lpfc_issue_els_logo(vport, ndlp, 0);
+ lpfc_nlp_set_state(vport, ndlp, NLP_STE_LOGO_ISSUE);
+}
+
diff --git a/drivers/scsi/lpfc/lpfc_hbadisc.c b/drivers/scsi/lpfc/lpfc_hbadisc.c
new file mode 100644
index 000000000..2500f15d4
--- /dev/null
+++ b/drivers/scsi/lpfc/lpfc_hbadisc.c
@@ -0,0 +1,6423 @@
+/*******************************************************************
+ * This file is part of the Emulex Linux Device Driver for *
+ * Fibre Channel Host Bus Adapters. *
+ * Copyright (C) 2004-2015 Emulex. All rights reserved. *
+ * EMULEX and SLI are trademarks of Emulex. *
+ * www.emulex.com *
+ * Portions Copyright (C) 2004-2005 Christoph Hellwig *
+ * *
+ * This program is free software; you can redistribute it and/or *
+ * modify it under the terms of version 2 of the GNU General *
+ * Public License as published by the Free Software Foundation. *
+ * This program is distributed in the hope that it will be useful. *
+ * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND *
+ * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY, *
+ * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE *
+ * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD *
+ * TO BE LEGALLY INVALID. See the GNU General Public License for *
+ * more details, a copy of which can be found in the file COPYING *
+ * included with this package. *
+ *******************************************************************/
+
+#include <linux/blkdev.h>
+#include <linux/delay.h>
+#include <linux/slab.h>
+#include <linux/pci.h>
+#include <linux/kthread.h>
+#include <linux/interrupt.h>
+
+#include <scsi/scsi.h>
+#include <scsi/scsi_device.h>
+#include <scsi/scsi_host.h>
+#include <scsi/scsi_transport_fc.h>
+
+#include "lpfc_hw4.h"
+#include "lpfc_hw.h"
+#include "lpfc_nl.h"
+#include "lpfc_disc.h"
+#include "lpfc_sli.h"
+#include "lpfc_sli4.h"
+#include "lpfc_scsi.h"
+#include "lpfc.h"
+#include "lpfc_logmsg.h"
+#include "lpfc_crtn.h"
+#include "lpfc_vport.h"
+#include "lpfc_debugfs.h"
+
+/* AlpaArray for assignment of scsid for scan-down and bind_method */
+static uint8_t lpfcAlpaArray[] = {
+ 0xEF, 0xE8, 0xE4, 0xE2, 0xE1, 0xE0, 0xDC, 0xDA, 0xD9, 0xD6,
+ 0xD5, 0xD4, 0xD3, 0xD2, 0xD1, 0xCE, 0xCD, 0xCC, 0xCB, 0xCA,
+ 0xC9, 0xC7, 0xC6, 0xC5, 0xC3, 0xBC, 0xBA, 0xB9, 0xB6, 0xB5,
+ 0xB4, 0xB3, 0xB2, 0xB1, 0xAE, 0xAD, 0xAC, 0xAB, 0xAA, 0xA9,
+ 0xA7, 0xA6, 0xA5, 0xA3, 0x9F, 0x9E, 0x9D, 0x9B, 0x98, 0x97,
+ 0x90, 0x8F, 0x88, 0x84, 0x82, 0x81, 0x80, 0x7C, 0x7A, 0x79,
+ 0x76, 0x75, 0x74, 0x73, 0x72, 0x71, 0x6E, 0x6D, 0x6C, 0x6B,
+ 0x6A, 0x69, 0x67, 0x66, 0x65, 0x63, 0x5C, 0x5A, 0x59, 0x56,
+ 0x55, 0x54, 0x53, 0x52, 0x51, 0x4E, 0x4D, 0x4C, 0x4B, 0x4A,
+ 0x49, 0x47, 0x46, 0x45, 0x43, 0x3C, 0x3A, 0x39, 0x36, 0x35,
+ 0x34, 0x33, 0x32, 0x31, 0x2E, 0x2D, 0x2C, 0x2B, 0x2A, 0x29,
+ 0x27, 0x26, 0x25, 0x23, 0x1F, 0x1E, 0x1D, 0x1B, 0x18, 0x17,
+ 0x10, 0x0F, 0x08, 0x04, 0x02, 0x01
+};
+
+static void lpfc_disc_timeout_handler(struct lpfc_vport *);
+static void lpfc_disc_flush_list(struct lpfc_vport *vport);
+static void lpfc_unregister_fcfi_cmpl(struct lpfc_hba *, LPFC_MBOXQ_t *);
+static int lpfc_fcf_inuse(struct lpfc_hba *);
+
+void
+lpfc_terminate_rport_io(struct fc_rport *rport)
+{
+ struct lpfc_rport_data *rdata;
+ struct lpfc_nodelist * ndlp;
+ struct lpfc_hba *phba;
+
+ rdata = rport->dd_data;
+ ndlp = rdata->pnode;
+
+ if (!ndlp || !NLP_CHK_NODE_ACT(ndlp)) {
+ if (rport->roles & FC_RPORT_ROLE_FCP_TARGET)
+ printk(KERN_ERR "Cannot find remote node"
+ " to terminate I/O Data x%x\n",
+ rport->port_id);
+ return;
+ }
+
+ phba = ndlp->phba;
+
+ lpfc_debugfs_disc_trc(ndlp->vport, LPFC_DISC_TRC_RPORT,
+ "rport terminate: sid:x%x did:x%x flg:x%x",
+ ndlp->nlp_sid, ndlp->nlp_DID, ndlp->nlp_flag);
+
+ if (ndlp->nlp_sid != NLP_NO_SID) {
+ lpfc_sli_abort_iocb(ndlp->vport,
+ &phba->sli.ring[phba->sli.fcp_ring],
+ ndlp->nlp_sid, 0, LPFC_CTX_TGT);
+ }
+}
+
+/*
+ * This function will be called when dev_loss_tmo fire.
+ */
+void
+lpfc_dev_loss_tmo_callbk(struct fc_rport *rport)
+{
+ struct lpfc_rport_data *rdata;
+ struct lpfc_nodelist * ndlp;
+ struct lpfc_vport *vport;
+ struct lpfc_hba *phba;
+ struct lpfc_work_evt *evtp;
+ int put_node;
+ int put_rport;
+
+ rdata = rport->dd_data;
+ ndlp = rdata->pnode;
+ if (!ndlp || !NLP_CHK_NODE_ACT(ndlp))
+ return;
+
+ vport = ndlp->vport;
+ phba = vport->phba;
+
+ lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_RPORT,
+ "rport devlosscb: sid:x%x did:x%x flg:x%x",
+ ndlp->nlp_sid, ndlp->nlp_DID, ndlp->nlp_flag);
+
+ lpfc_printf_vlog(ndlp->vport, KERN_INFO, LOG_NODE,
+ "3181 dev_loss_callbk x%06x, rport %p flg x%x\n",
+ ndlp->nlp_DID, ndlp->rport, ndlp->nlp_flag);
+
+ /* Don't defer this if we are in the process of deleting the vport
+ * or unloading the driver. The unload will cleanup the node
+ * appropriately we just need to cleanup the ndlp rport info here.
+ */
+ if (vport->load_flag & FC_UNLOADING) {
+ put_node = rdata->pnode != NULL;
+ put_rport = ndlp->rport != NULL;
+ rdata->pnode = NULL;
+ ndlp->rport = NULL;
+ if (put_node)
+ lpfc_nlp_put(ndlp);
+ if (put_rport)
+ put_device(&rport->dev);
+ return;
+ }
+
+ if (ndlp->nlp_state == NLP_STE_MAPPED_NODE)
+ return;
+
+ if (ndlp->nlp_type & NLP_FABRIC) {
+
+ /* If the WWPN of the rport and ndlp don't match, ignore it */
+ if (rport->port_name != wwn_to_u64(ndlp->nlp_portname.u.wwn)) {
+ lpfc_printf_vlog(vport, KERN_ERR, LOG_NODE,
+ "6789 rport name %lx != node port name %lx",
+ (unsigned long)rport->port_name,
+ (unsigned long)wwn_to_u64(
+ ndlp->nlp_portname.u.wwn));
+ put_node = rdata->pnode != NULL;
+ put_rport = ndlp->rport != NULL;
+ rdata->pnode = NULL;
+ ndlp->rport = NULL;
+ if (put_node)
+ lpfc_nlp_put(ndlp);
+ put_device(&rport->dev);
+ return;
+ }
+
+ put_node = rdata->pnode != NULL;
+ put_rport = ndlp->rport != NULL;
+ rdata->pnode = NULL;
+ ndlp->rport = NULL;
+ if (put_node)
+ lpfc_nlp_put(ndlp);
+ if (put_rport)
+ put_device(&rport->dev);
+ return;
+ }
+
+ evtp = &ndlp->dev_loss_evt;
+
+ if (!list_empty(&evtp->evt_listp))
+ return;
+
+ evtp->evt_arg1 = lpfc_nlp_get(ndlp);
+ ndlp->nlp_add_flag |= NLP_IN_DEV_LOSS;
+
+ spin_lock_irq(&phba->hbalock);
+ /* We need to hold the node by incrementing the reference
+ * count until this queued work is done
+ */
+ if (evtp->evt_arg1) {
+ evtp->evt = LPFC_EVT_DEV_LOSS;
+ list_add_tail(&evtp->evt_listp, &phba->work_list);
+ lpfc_worker_wake_up(phba);
+ }
+ spin_unlock_irq(&phba->hbalock);
+
+ return;
+}
+
+/**
+ * lpfc_dev_loss_tmo_handler - Remote node devloss timeout handler
+ * @ndlp: Pointer to remote node object.
+ *
+ * This function is called from the worker thread when devloss timeout timer
+ * expires. For SLI4 host, this routine shall return 1 when at lease one
+ * remote node, including this @ndlp, is still in use of FCF; otherwise, this
+ * routine shall return 0 when there is no remote node is still in use of FCF
+ * when devloss timeout happened to this @ndlp.
+ **/
+static int
+lpfc_dev_loss_tmo_handler(struct lpfc_nodelist *ndlp)
+{
+ struct lpfc_rport_data *rdata;
+ struct fc_rport *rport;
+ struct lpfc_vport *vport;
+ struct lpfc_hba *phba;
+ uint8_t *name;
+ int put_node;
+ int put_rport;
+ int warn_on = 0;
+ int fcf_inuse = 0;
+
+ rport = ndlp->rport;
+
+ if (!rport) {
+ ndlp->nlp_add_flag &= ~NLP_IN_DEV_LOSS;
+ return fcf_inuse;
+ }
+
+ rdata = rport->dd_data;
+ name = (uint8_t *) &ndlp->nlp_portname;
+ vport = ndlp->vport;
+ phba = vport->phba;
+
+ if (phba->sli_rev == LPFC_SLI_REV4)
+ fcf_inuse = lpfc_fcf_inuse(phba);
+
+ lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_RPORT,
+ "rport devlosstmo:did:x%x type:x%x id:x%x",
+ ndlp->nlp_DID, ndlp->nlp_type, rport->scsi_target_id);
+
+ lpfc_printf_vlog(ndlp->vport, KERN_INFO, LOG_NODE,
+ "3182 dev_loss_tmo_handler x%06x, rport %p flg x%x\n",
+ ndlp->nlp_DID, ndlp->rport, ndlp->nlp_flag);
+
+ /* Don't defer this if we are in the process of deleting the vport
+ * or unloading the driver. The unload will cleanup the node
+ * appropriately we just need to cleanup the ndlp rport info here.
+ */
+ if (vport->load_flag & FC_UNLOADING) {
+ if (ndlp->nlp_sid != NLP_NO_SID) {
+ /* flush the target */
+ lpfc_sli_abort_iocb(vport,
+ &phba->sli.ring[phba->sli.fcp_ring],
+ ndlp->nlp_sid, 0, LPFC_CTX_TGT);
+ }
+ put_node = rdata->pnode != NULL;
+ put_rport = ndlp->rport != NULL;
+ rdata->pnode = NULL;
+ ndlp->rport = NULL;
+ ndlp->nlp_add_flag &= ~NLP_IN_DEV_LOSS;
+ if (put_node)
+ lpfc_nlp_put(ndlp);
+ if (put_rport)
+ put_device(&rport->dev);
+ return fcf_inuse;
+ }
+
+ if (ndlp->nlp_state == NLP_STE_MAPPED_NODE) {
+ lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY,
+ "0284 Devloss timeout Ignored on "
+ "WWPN %x:%x:%x:%x:%x:%x:%x:%x "
+ "NPort x%x\n",
+ *name, *(name+1), *(name+2), *(name+3),
+ *(name+4), *(name+5), *(name+6), *(name+7),
+ ndlp->nlp_DID);
+ ndlp->nlp_add_flag &= ~NLP_IN_DEV_LOSS;
+ return fcf_inuse;
+ }
+
+ if (ndlp->nlp_type & NLP_FABRIC) {
+ /* We will clean up these Nodes in linkup */
+ put_node = rdata->pnode != NULL;
+ put_rport = ndlp->rport != NULL;
+ rdata->pnode = NULL;
+ ndlp->rport = NULL;
+ ndlp->nlp_add_flag &= ~NLP_IN_DEV_LOSS;
+ if (put_node)
+ lpfc_nlp_put(ndlp);
+ if (put_rport)
+ put_device(&rport->dev);
+ return fcf_inuse;
+ }
+
+ if (ndlp->nlp_sid != NLP_NO_SID) {
+ warn_on = 1;
+ /* flush the target */
+ ndlp->nlp_add_flag &= ~NLP_IN_DEV_LOSS;
+ lpfc_sli_abort_iocb(vport, &phba->sli.ring[phba->sli.fcp_ring],
+ ndlp->nlp_sid, 0, LPFC_CTX_TGT);
+ }
+
+ if (warn_on) {
+ lpfc_printf_vlog(vport, KERN_ERR, LOG_DISCOVERY,
+ "0203 Devloss timeout on "
+ "WWPN %02x:%02x:%02x:%02x:%02x:%02x:%02x:%02x "
+ "NPort x%06x Data: x%x x%x x%x\n",
+ *name, *(name+1), *(name+2), *(name+3),
+ *(name+4), *(name+5), *(name+6), *(name+7),
+ ndlp->nlp_DID, ndlp->nlp_flag,
+ ndlp->nlp_state, ndlp->nlp_rpi);
+ } else {
+ lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY,
+ "0204 Devloss timeout on "
+ "WWPN %02x:%02x:%02x:%02x:%02x:%02x:%02x:%02x "
+ "NPort x%06x Data: x%x x%x x%x\n",
+ *name, *(name+1), *(name+2), *(name+3),
+ *(name+4), *(name+5), *(name+6), *(name+7),
+ ndlp->nlp_DID, ndlp->nlp_flag,
+ ndlp->nlp_state, ndlp->nlp_rpi);
+ }
+
+ put_node = rdata->pnode != NULL;
+ put_rport = ndlp->rport != NULL;
+ rdata->pnode = NULL;
+ ndlp->rport = NULL;
+ ndlp->nlp_add_flag &= ~NLP_IN_DEV_LOSS;
+ if (put_node)
+ lpfc_nlp_put(ndlp);
+ if (put_rport)
+ put_device(&rport->dev);
+
+ if (!(vport->load_flag & FC_UNLOADING) &&
+ !(ndlp->nlp_flag & NLP_DELAY_TMO) &&
+ !(ndlp->nlp_flag & NLP_NPR_2B_DISC) &&
+ (ndlp->nlp_state != NLP_STE_UNMAPPED_NODE) &&
+ (ndlp->nlp_state != NLP_STE_REG_LOGIN_ISSUE) &&
+ (ndlp->nlp_state != NLP_STE_PRLI_ISSUE))
+ lpfc_disc_state_machine(vport, ndlp, NULL, NLP_EVT_DEVICE_RM);
+
+ return fcf_inuse;
+}
+
+/**
+ * lpfc_sli4_post_dev_loss_tmo_handler - SLI4 post devloss timeout handler
+ * @phba: Pointer to hba context object.
+ * @fcf_inuse: SLI4 FCF in-use state reported from devloss timeout handler.
+ * @nlp_did: remote node identifer with devloss timeout.
+ *
+ * This function is called from the worker thread after invoking devloss
+ * timeout handler and releasing the reference count for the ndlp with
+ * which the devloss timeout was handled for SLI4 host. For the devloss
+ * timeout of the last remote node which had been in use of FCF, when this
+ * routine is invoked, it shall be guaranteed that none of the remote are
+ * in-use of FCF. When devloss timeout to the last remote using the FCF,
+ * if the FIP engine is neither in FCF table scan process nor roundrobin
+ * failover process, the in-use FCF shall be unregistered. If the FIP
+ * engine is in FCF discovery process, the devloss timeout state shall
+ * be set for either the FCF table scan process or roundrobin failover
+ * process to unregister the in-use FCF.
+ **/
+static void
+lpfc_sli4_post_dev_loss_tmo_handler(struct lpfc_hba *phba, int fcf_inuse,
+ uint32_t nlp_did)
+{
+ /* If devloss timeout happened to a remote node when FCF had no
+ * longer been in-use, do nothing.
+ */
+ if (!fcf_inuse)
+ return;
+
+ if ((phba->hba_flag & HBA_FIP_SUPPORT) && !lpfc_fcf_inuse(phba)) {
+ spin_lock_irq(&phba->hbalock);
+ if (phba->fcf.fcf_flag & FCF_DISCOVERY) {
+ if (phba->hba_flag & HBA_DEVLOSS_TMO) {
+ spin_unlock_irq(&phba->hbalock);
+ return;
+ }
+ phba->hba_flag |= HBA_DEVLOSS_TMO;
+ lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
+ "2847 Last remote node (x%x) using "
+ "FCF devloss tmo\n", nlp_did);
+ }
+ if (phba->fcf.fcf_flag & FCF_REDISC_PROG) {
+ spin_unlock_irq(&phba->hbalock);
+ lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
+ "2868 Devloss tmo to FCF rediscovery "
+ "in progress\n");
+ return;
+ }
+ if (!(phba->hba_flag & (FCF_TS_INPROG | FCF_RR_INPROG))) {
+ spin_unlock_irq(&phba->hbalock);
+ lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
+ "2869 Devloss tmo to idle FIP engine, "
+ "unreg in-use FCF and rescan.\n");
+ /* Unregister in-use FCF and rescan */
+ lpfc_unregister_fcf_rescan(phba);
+ return;
+ }
+ spin_unlock_irq(&phba->hbalock);
+ if (phba->hba_flag & FCF_TS_INPROG)
+ lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
+ "2870 FCF table scan in progress\n");
+ if (phba->hba_flag & FCF_RR_INPROG)
+ lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
+ "2871 FLOGI roundrobin FCF failover "
+ "in progress\n");
+ }
+ lpfc_unregister_unused_fcf(phba);
+}
+
+/**
+ * lpfc_alloc_fast_evt - Allocates data structure for posting event
+ * @phba: Pointer to hba context object.
+ *
+ * This function is called from the functions which need to post
+ * events from interrupt context. This function allocates data
+ * structure required for posting event. It also keeps track of
+ * number of events pending and prevent event storm when there are
+ * too many events.
+ **/
+struct lpfc_fast_path_event *
+lpfc_alloc_fast_evt(struct lpfc_hba *phba) {
+ struct lpfc_fast_path_event *ret;
+
+ /* If there are lot of fast event do not exhaust memory due to this */
+ if (atomic_read(&phba->fast_event_count) > LPFC_MAX_EVT_COUNT)
+ return NULL;
+
+ ret = kzalloc(sizeof(struct lpfc_fast_path_event),
+ GFP_ATOMIC);
+ if (ret) {
+ atomic_inc(&phba->fast_event_count);
+ INIT_LIST_HEAD(&ret->work_evt.evt_listp);
+ ret->work_evt.evt = LPFC_EVT_FASTPATH_MGMT_EVT;
+ }
+ return ret;
+}
+
+/**
+ * lpfc_free_fast_evt - Frees event data structure
+ * @phba: Pointer to hba context object.
+ * @evt: Event object which need to be freed.
+ *
+ * This function frees the data structure required for posting
+ * events.
+ **/
+void
+lpfc_free_fast_evt(struct lpfc_hba *phba,
+ struct lpfc_fast_path_event *evt) {
+
+ atomic_dec(&phba->fast_event_count);
+ kfree(evt);
+}
+
+/**
+ * lpfc_send_fastpath_evt - Posts events generated from fast path
+ * @phba: Pointer to hba context object.
+ * @evtp: Event data structure.
+ *
+ * This function is called from worker thread, when the interrupt
+ * context need to post an event. This function posts the event
+ * to fc transport netlink interface.
+ **/
+static void
+lpfc_send_fastpath_evt(struct lpfc_hba *phba,
+ struct lpfc_work_evt *evtp)
+{
+ unsigned long evt_category, evt_sub_category;
+ struct lpfc_fast_path_event *fast_evt_data;
+ char *evt_data;
+ uint32_t evt_data_size;
+ struct Scsi_Host *shost;
+
+ fast_evt_data = container_of(evtp, struct lpfc_fast_path_event,
+ work_evt);
+
+ evt_category = (unsigned long) fast_evt_data->un.fabric_evt.event_type;
+ evt_sub_category = (unsigned long) fast_evt_data->un.
+ fabric_evt.subcategory;
+ shost = lpfc_shost_from_vport(fast_evt_data->vport);
+ if (evt_category == FC_REG_FABRIC_EVENT) {
+ if (evt_sub_category == LPFC_EVENT_FCPRDCHKERR) {
+ evt_data = (char *) &fast_evt_data->un.read_check_error;
+ evt_data_size = sizeof(fast_evt_data->un.
+ read_check_error);
+ } else if ((evt_sub_category == LPFC_EVENT_FABRIC_BUSY) ||
+ (evt_sub_category == LPFC_EVENT_PORT_BUSY)) {
+ evt_data = (char *) &fast_evt_data->un.fabric_evt;
+ evt_data_size = sizeof(fast_evt_data->un.fabric_evt);
+ } else {
+ lpfc_free_fast_evt(phba, fast_evt_data);
+ return;
+ }
+ } else if (evt_category == FC_REG_SCSI_EVENT) {
+ switch (evt_sub_category) {
+ case LPFC_EVENT_QFULL:
+ case LPFC_EVENT_DEVBSY:
+ evt_data = (char *) &fast_evt_data->un.scsi_evt;
+ evt_data_size = sizeof(fast_evt_data->un.scsi_evt);
+ break;
+ case LPFC_EVENT_CHECK_COND:
+ evt_data = (char *) &fast_evt_data->un.check_cond_evt;
+ evt_data_size = sizeof(fast_evt_data->un.
+ check_cond_evt);
+ break;
+ case LPFC_EVENT_VARQUEDEPTH:
+ evt_data = (char *) &fast_evt_data->un.queue_depth_evt;
+ evt_data_size = sizeof(fast_evt_data->un.
+ queue_depth_evt);
+ break;
+ default:
+ lpfc_free_fast_evt(phba, fast_evt_data);
+ return;
+ }
+ } else {
+ lpfc_free_fast_evt(phba, fast_evt_data);
+ return;
+ }
+
+ fc_host_post_vendor_event(shost,
+ fc_get_event_number(),
+ evt_data_size,
+ evt_data,
+ LPFC_NL_VENDOR_ID);
+
+ lpfc_free_fast_evt(phba, fast_evt_data);
+ return;
+}
+
+static void
+lpfc_work_list_done(struct lpfc_hba *phba)
+{
+ struct lpfc_work_evt *evtp = NULL;
+ struct lpfc_nodelist *ndlp;
+ int free_evt;
+ int fcf_inuse;
+ uint32_t nlp_did;
+
+ spin_lock_irq(&phba->hbalock);
+ while (!list_empty(&phba->work_list)) {
+ list_remove_head((&phba->work_list), evtp, typeof(*evtp),
+ evt_listp);
+ spin_unlock_irq(&phba->hbalock);
+ free_evt = 1;
+ switch (evtp->evt) {
+ case LPFC_EVT_ELS_RETRY:
+ ndlp = (struct lpfc_nodelist *) (evtp->evt_arg1);
+ lpfc_els_retry_delay_handler(ndlp);
+ free_evt = 0; /* evt is part of ndlp */
+ /* decrement the node reference count held
+ * for this queued work
+ */
+ lpfc_nlp_put(ndlp);
+ break;
+ case LPFC_EVT_DEV_LOSS:
+ ndlp = (struct lpfc_nodelist *)(evtp->evt_arg1);
+ fcf_inuse = lpfc_dev_loss_tmo_handler(ndlp);
+ free_evt = 0;
+ /* decrement the node reference count held for
+ * this queued work
+ */
+ nlp_did = ndlp->nlp_DID;
+ lpfc_nlp_put(ndlp);
+ if (phba->sli_rev == LPFC_SLI_REV4)
+ lpfc_sli4_post_dev_loss_tmo_handler(phba,
+ fcf_inuse,
+ nlp_did);
+ break;
+ case LPFC_EVT_ONLINE:
+ if (phba->link_state < LPFC_LINK_DOWN)
+ *(int *) (evtp->evt_arg1) = lpfc_online(phba);
+ else
+ *(int *) (evtp->evt_arg1) = 0;
+ complete((struct completion *)(evtp->evt_arg2));
+ break;
+ case LPFC_EVT_OFFLINE_PREP:
+ if (phba->link_state >= LPFC_LINK_DOWN)
+ lpfc_offline_prep(phba, LPFC_MBX_WAIT);
+ *(int *)(evtp->evt_arg1) = 0;
+ complete((struct completion *)(evtp->evt_arg2));
+ break;
+ case LPFC_EVT_OFFLINE:
+ lpfc_offline(phba);
+ lpfc_sli_brdrestart(phba);
+ *(int *)(evtp->evt_arg1) =
+ lpfc_sli_brdready(phba, HS_FFRDY | HS_MBRDY);
+ lpfc_unblock_mgmt_io(phba);
+ complete((struct completion *)(evtp->evt_arg2));
+ break;
+ case LPFC_EVT_WARM_START:
+ lpfc_offline(phba);
+ lpfc_reset_barrier(phba);
+ lpfc_sli_brdreset(phba);
+ lpfc_hba_down_post(phba);
+ *(int *)(evtp->evt_arg1) =
+ lpfc_sli_brdready(phba, HS_MBRDY);
+ lpfc_unblock_mgmt_io(phba);
+ complete((struct completion *)(evtp->evt_arg2));
+ break;
+ case LPFC_EVT_KILL:
+ lpfc_offline(phba);
+ *(int *)(evtp->evt_arg1)
+ = (phba->pport->stopped)
+ ? 0 : lpfc_sli_brdkill(phba);
+ lpfc_unblock_mgmt_io(phba);
+ complete((struct completion *)(evtp->evt_arg2));
+ break;
+ case LPFC_EVT_FASTPATH_MGMT_EVT:
+ lpfc_send_fastpath_evt(phba, evtp);
+ free_evt = 0;
+ break;
+ case LPFC_EVT_RESET_HBA:
+ if (!(phba->pport->load_flag & FC_UNLOADING))
+ lpfc_reset_hba(phba);
+ break;
+ }
+ if (free_evt)
+ kfree(evtp);
+ spin_lock_irq(&phba->hbalock);
+ }
+ spin_unlock_irq(&phba->hbalock);
+
+}
+
+static void
+lpfc_work_done(struct lpfc_hba *phba)
+{
+ struct lpfc_sli_ring *pring;
+ uint32_t ha_copy, status, control, work_port_events;
+ struct lpfc_vport **vports;
+ struct lpfc_vport *vport;
+ int i;
+
+ spin_lock_irq(&phba->hbalock);
+ ha_copy = phba->work_ha;
+ phba->work_ha = 0;
+ spin_unlock_irq(&phba->hbalock);
+
+ /* First, try to post the next mailbox command to SLI4 device */
+ if (phba->pci_dev_grp == LPFC_PCI_DEV_OC)
+ lpfc_sli4_post_async_mbox(phba);
+
+ if (ha_copy & HA_ERATT)
+ /* Handle the error attention event */
+ lpfc_handle_eratt(phba);
+
+ if (ha_copy & HA_MBATT)
+ lpfc_sli_handle_mb_event(phba);
+
+ if (ha_copy & HA_LATT)
+ lpfc_handle_latt(phba);
+
+ /* Process SLI4 events */
+ if (phba->pci_dev_grp == LPFC_PCI_DEV_OC) {
+ if (phba->hba_flag & HBA_RRQ_ACTIVE)
+ lpfc_handle_rrq_active(phba);
+ if (phba->hba_flag & FCP_XRI_ABORT_EVENT)
+ lpfc_sli4_fcp_xri_abort_event_proc(phba);
+ if (phba->hba_flag & ELS_XRI_ABORT_EVENT)
+ lpfc_sli4_els_xri_abort_event_proc(phba);
+ if (phba->hba_flag & ASYNC_EVENT)
+ lpfc_sli4_async_event_proc(phba);
+ if (phba->hba_flag & HBA_POST_RECEIVE_BUFFER) {
+ spin_lock_irq(&phba->hbalock);
+ phba->hba_flag &= ~HBA_POST_RECEIVE_BUFFER;
+ spin_unlock_irq(&phba->hbalock);
+ lpfc_sli_hbqbuf_add_hbqs(phba, LPFC_ELS_HBQ);
+ }
+ if (phba->fcf.fcf_flag & FCF_REDISC_EVT)
+ lpfc_sli4_fcf_redisc_event_proc(phba);
+ }
+
+ vports = lpfc_create_vport_work_array(phba);
+ if (vports != NULL)
+ for (i = 0; i <= phba->max_vports; i++) {
+ /*
+ * We could have no vports in array if unloading, so if
+ * this happens then just use the pport
+ */
+ if (vports[i] == NULL && i == 0)
+ vport = phba->pport;
+ else
+ vport = vports[i];
+ if (vport == NULL)
+ break;
+ spin_lock_irq(&vport->work_port_lock);
+ work_port_events = vport->work_port_events;
+ vport->work_port_events &= ~work_port_events;
+ spin_unlock_irq(&vport->work_port_lock);
+ if (work_port_events & WORKER_DISC_TMO)
+ lpfc_disc_timeout_handler(vport);
+ if (work_port_events & WORKER_ELS_TMO)
+ lpfc_els_timeout_handler(vport);
+ if (work_port_events & WORKER_HB_TMO)
+ lpfc_hb_timeout_handler(phba);
+ if (work_port_events & WORKER_MBOX_TMO)
+ lpfc_mbox_timeout_handler(phba);
+ if (work_port_events & WORKER_FABRIC_BLOCK_TMO)
+ lpfc_unblock_fabric_iocbs(phba);
+ if (work_port_events & WORKER_FDMI_TMO)
+ lpfc_fdmi_timeout_handler(vport);
+ if (work_port_events & WORKER_RAMP_DOWN_QUEUE)
+ lpfc_ramp_down_queue_handler(phba);
+ if (work_port_events & WORKER_DELAYED_DISC_TMO)
+ lpfc_delayed_disc_timeout_handler(vport);
+ }
+ lpfc_destroy_vport_work_array(phba, vports);
+
+ pring = &phba->sli.ring[LPFC_ELS_RING];
+ status = (ha_copy & (HA_RXMASK << (4*LPFC_ELS_RING)));
+ status >>= (4*LPFC_ELS_RING);
+ if ((status & HA_RXMASK) ||
+ (pring->flag & LPFC_DEFERRED_RING_EVENT) ||
+ (phba->hba_flag & HBA_SP_QUEUE_EVT)) {
+ if (pring->flag & LPFC_STOP_IOCB_EVENT) {
+ pring->flag |= LPFC_DEFERRED_RING_EVENT;
+ /* Set the lpfc data pending flag */
+ set_bit(LPFC_DATA_READY, &phba->data_flags);
+ } else {
+ if (phba->link_state >= LPFC_LINK_UP) {
+ pring->flag &= ~LPFC_DEFERRED_RING_EVENT;
+ lpfc_sli_handle_slow_ring_event(phba, pring,
+ (status &
+ HA_RXMASK));
+ }
+ }
+ if ((phba->sli_rev == LPFC_SLI_REV4) &
+ (!list_empty(&pring->txq)))
+ lpfc_drain_txq(phba);
+ /*
+ * Turn on Ring interrupts
+ */
+ if (phba->sli_rev <= LPFC_SLI_REV3) {
+ spin_lock_irq(&phba->hbalock);
+ control = readl(phba->HCregaddr);
+ if (!(control & (HC_R0INT_ENA << LPFC_ELS_RING))) {
+ lpfc_debugfs_slow_ring_trc(phba,
+ "WRK Enable ring: cntl:x%x hacopy:x%x",
+ control, ha_copy, 0);
+
+ control |= (HC_R0INT_ENA << LPFC_ELS_RING);
+ writel(control, phba->HCregaddr);
+ readl(phba->HCregaddr); /* flush */
+ } else {
+ lpfc_debugfs_slow_ring_trc(phba,
+ "WRK Ring ok: cntl:x%x hacopy:x%x",
+ control, ha_copy, 0);
+ }
+ spin_unlock_irq(&phba->hbalock);
+ }
+ }
+ lpfc_work_list_done(phba);
+}
+
+int
+lpfc_do_work(void *p)
+{
+ struct lpfc_hba *phba = p;
+ int rc;
+
+ set_user_nice(current, MIN_NICE);
+ current->flags |= PF_NOFREEZE;
+ phba->data_flags = 0;
+
+ while (!kthread_should_stop()) {
+ /* wait and check worker queue activities */
+ rc = wait_event_interruptible(phba->work_waitq,
+ (test_and_clear_bit(LPFC_DATA_READY,
+ &phba->data_flags)
+ || kthread_should_stop()));
+ /* Signal wakeup shall terminate the worker thread */
+ if (rc) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_ELS,
+ "0433 Wakeup on signal: rc=x%x\n", rc);
+ break;
+ }
+
+ /* Attend pending lpfc data processing */
+ lpfc_work_done(phba);
+ }
+ phba->worker_thread = NULL;
+ lpfc_printf_log(phba, KERN_INFO, LOG_ELS,
+ "0432 Worker thread stopped.\n");
+ return 0;
+}
+
+/*
+ * This is only called to handle FC worker events. Since this a rare
+ * occurrence, we allocate a struct lpfc_work_evt structure here instead of
+ * embedding it in the IOCB.
+ */
+int
+lpfc_workq_post_event(struct lpfc_hba *phba, void *arg1, void *arg2,
+ uint32_t evt)
+{
+ struct lpfc_work_evt *evtp;
+ unsigned long flags;
+
+ /*
+ * All Mailbox completions and LPFC_ELS_RING rcv ring IOCB events will
+ * be queued to worker thread for processing
+ */
+ evtp = kmalloc(sizeof(struct lpfc_work_evt), GFP_ATOMIC);
+ if (!evtp)
+ return 0;
+
+ evtp->evt_arg1 = arg1;
+ evtp->evt_arg2 = arg2;
+ evtp->evt = evt;
+
+ spin_lock_irqsave(&phba->hbalock, flags);
+ list_add_tail(&evtp->evt_listp, &phba->work_list);
+ spin_unlock_irqrestore(&phba->hbalock, flags);
+
+ lpfc_worker_wake_up(phba);
+
+ return 1;
+}
+
+void
+lpfc_cleanup_rpis(struct lpfc_vport *vport, int remove)
+{
+ struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
+ struct lpfc_hba *phba = vport->phba;
+ struct lpfc_nodelist *ndlp, *next_ndlp;
+ int rc;
+
+ list_for_each_entry_safe(ndlp, next_ndlp, &vport->fc_nodes, nlp_listp) {
+ if (!NLP_CHK_NODE_ACT(ndlp))
+ continue;
+ if (ndlp->nlp_state == NLP_STE_UNUSED_NODE)
+ continue;
+ if ((phba->sli3_options & LPFC_SLI3_VPORT_TEARDOWN) ||
+ ((vport->port_type == LPFC_NPIV_PORT) &&
+ (ndlp->nlp_DID == NameServer_DID)))
+ lpfc_unreg_rpi(vport, ndlp);
+
+ /* Leave Fabric nodes alone on link down */
+ if ((phba->sli_rev < LPFC_SLI_REV4) &&
+ (!remove && ndlp->nlp_type & NLP_FABRIC))
+ continue;
+ rc = lpfc_disc_state_machine(vport, ndlp, NULL,
+ remove
+ ? NLP_EVT_DEVICE_RM
+ : NLP_EVT_DEVICE_RECOVERY);
+ }
+ if (phba->sli3_options & LPFC_SLI3_VPORT_TEARDOWN) {
+ if (phba->sli_rev == LPFC_SLI_REV4)
+ lpfc_sli4_unreg_all_rpis(vport);
+ lpfc_mbx_unreg_vpi(vport);
+ spin_lock_irq(shost->host_lock);
+ vport->fc_flag |= FC_VPORT_NEEDS_REG_VPI;
+ spin_unlock_irq(shost->host_lock);
+ }
+}
+
+void
+lpfc_port_link_failure(struct lpfc_vport *vport)
+{
+ lpfc_vport_set_state(vport, FC_VPORT_LINKDOWN);
+
+ /* Cleanup any outstanding received buffers */
+ lpfc_cleanup_rcv_buffers(vport);
+
+ /* Cleanup any outstanding RSCN activity */
+ lpfc_els_flush_rscn(vport);
+
+ /* Cleanup any outstanding ELS commands */
+ lpfc_els_flush_cmd(vport);
+
+ lpfc_cleanup_rpis(vport, 0);
+
+ /* Turn off discovery timer if its running */
+ lpfc_can_disctmo(vport);
+}
+
+void
+lpfc_linkdown_port(struct lpfc_vport *vport)
+{
+ struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
+
+ fc_host_post_event(shost, fc_get_event_number(), FCH_EVT_LINKDOWN, 0);
+
+ lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD,
+ "Link Down: state:x%x rtry:x%x flg:x%x",
+ vport->port_state, vport->fc_ns_retry, vport->fc_flag);
+
+ lpfc_port_link_failure(vport);
+
+ /* Stop delayed Nport discovery */
+ spin_lock_irq(shost->host_lock);
+ vport->fc_flag &= ~FC_DISC_DELAYED;
+ spin_unlock_irq(shost->host_lock);
+ del_timer_sync(&vport->delayed_disc_tmo);
+}
+
+int
+lpfc_linkdown(struct lpfc_hba *phba)
+{
+ struct lpfc_vport *vport = phba->pport;
+ struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
+ struct lpfc_vport **vports;
+ LPFC_MBOXQ_t *mb;
+ int i;
+
+ if (phba->link_state == LPFC_LINK_DOWN)
+ return 0;
+
+ /* Block all SCSI stack I/Os */
+ lpfc_scsi_dev_block(phba);
+
+ spin_lock_irq(&phba->hbalock);
+ phba->fcf.fcf_flag &= ~(FCF_AVAILABLE | FCF_SCAN_DONE);
+ spin_unlock_irq(&phba->hbalock);
+ if (phba->link_state > LPFC_LINK_DOWN) {
+ phba->link_state = LPFC_LINK_DOWN;
+ spin_lock_irq(shost->host_lock);
+ phba->pport->fc_flag &= ~FC_LBIT;
+ spin_unlock_irq(shost->host_lock);
+ }
+ vports = lpfc_create_vport_work_array(phba);
+ if (vports != NULL)
+ for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) {
+ /* Issue a LINK DOWN event to all nodes */
+ lpfc_linkdown_port(vports[i]);
+ }
+ lpfc_destroy_vport_work_array(phba, vports);
+ /* Clean up any firmware default rpi's */
+ mb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
+ if (mb) {
+ lpfc_unreg_did(phba, 0xffff, LPFC_UNREG_ALL_DFLT_RPIS, mb);
+ mb->vport = vport;
+ mb->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
+ if (lpfc_sli_issue_mbox(phba, mb, MBX_NOWAIT)
+ == MBX_NOT_FINISHED) {
+ mempool_free(mb, phba->mbox_mem_pool);
+ }
+ }
+
+ /* Setup myDID for link up if we are in pt2pt mode */
+ if (phba->pport->fc_flag & FC_PT2PT) {
+ phba->pport->fc_myDID = 0;
+ mb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
+ if (mb) {
+ lpfc_config_link(phba, mb);
+ mb->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
+ mb->vport = vport;
+ if (lpfc_sli_issue_mbox(phba, mb, MBX_NOWAIT)
+ == MBX_NOT_FINISHED) {
+ mempool_free(mb, phba->mbox_mem_pool);
+ }
+ }
+ spin_lock_irq(shost->host_lock);
+ phba->pport->fc_flag &= ~(FC_PT2PT | FC_PT2PT_PLOGI);
+ spin_unlock_irq(shost->host_lock);
+ }
+
+ return 0;
+}
+
+static void
+lpfc_linkup_cleanup_nodes(struct lpfc_vport *vport)
+{
+ struct lpfc_nodelist *ndlp;
+
+ list_for_each_entry(ndlp, &vport->fc_nodes, nlp_listp) {
+ if (!NLP_CHK_NODE_ACT(ndlp))
+ continue;
+ if (ndlp->nlp_state == NLP_STE_UNUSED_NODE)
+ continue;
+ if (ndlp->nlp_type & NLP_FABRIC) {
+ /* On Linkup its safe to clean up the ndlp
+ * from Fabric connections.
+ */
+ if (ndlp->nlp_DID != Fabric_DID)
+ lpfc_unreg_rpi(vport, ndlp);
+ lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE);
+ } else if (!(ndlp->nlp_flag & NLP_NPR_ADISC)) {
+ /* Fail outstanding IO now since device is
+ * marked for PLOGI.
+ */
+ lpfc_unreg_rpi(vport, ndlp);
+ }
+ }
+}
+
+static void
+lpfc_linkup_port(struct lpfc_vport *vport)
+{
+ struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
+ struct lpfc_hba *phba = vport->phba;
+
+ if ((vport->load_flag & FC_UNLOADING) != 0)
+ return;
+
+ lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD,
+ "Link Up: top:x%x speed:x%x flg:x%x",
+ phba->fc_topology, phba->fc_linkspeed, phba->link_flag);
+
+ /* If NPIV is not enabled, only bring the physical port up */
+ if (!(phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) &&
+ (vport != phba->pport))
+ return;
+
+ fc_host_post_event(shost, fc_get_event_number(), FCH_EVT_LINKUP, 0);
+
+ spin_lock_irq(shost->host_lock);
+ vport->fc_flag &= ~(FC_PT2PT | FC_PT2PT_PLOGI | FC_ABORT_DISCOVERY |
+ FC_RSCN_MODE | FC_NLP_MORE | FC_RSCN_DISCOVERY);
+ vport->fc_flag |= FC_NDISC_ACTIVE;
+ vport->fc_ns_retry = 0;
+ spin_unlock_irq(shost->host_lock);
+
+ if (vport->fc_flag & FC_LBIT)
+ lpfc_linkup_cleanup_nodes(vport);
+
+}
+
+static int
+lpfc_linkup(struct lpfc_hba *phba)
+{
+ struct lpfc_vport **vports;
+ int i;
+
+ phba->link_state = LPFC_LINK_UP;
+
+ /* Unblock fabric iocbs if they are blocked */
+ clear_bit(FABRIC_COMANDS_BLOCKED, &phba->bit_flags);
+ del_timer_sync(&phba->fabric_block_timer);
+
+ vports = lpfc_create_vport_work_array(phba);
+ if (vports != NULL)
+ for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++)
+ lpfc_linkup_port(vports[i]);
+ lpfc_destroy_vport_work_array(phba, vports);
+
+ return 0;
+}
+
+/*
+ * This routine handles processing a CLEAR_LA mailbox
+ * command upon completion. It is setup in the LPFC_MBOXQ
+ * as the completion routine when the command is
+ * handed off to the SLI layer.
+ */
+static void
+lpfc_mbx_cmpl_clear_la(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
+{
+ struct lpfc_vport *vport = pmb->vport;
+ struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
+ struct lpfc_sli *psli = &phba->sli;
+ MAILBOX_t *mb = &pmb->u.mb;
+ uint32_t control;
+
+ /* Since we don't do discovery right now, turn these off here */
+ psli->ring[psli->extra_ring].flag &= ~LPFC_STOP_IOCB_EVENT;
+ psli->ring[psli->fcp_ring].flag &= ~LPFC_STOP_IOCB_EVENT;
+ psli->ring[psli->next_ring].flag &= ~LPFC_STOP_IOCB_EVENT;
+
+ /* Check for error */
+ if ((mb->mbxStatus) && (mb->mbxStatus != 0x1601)) {
+ /* CLEAR_LA mbox error <mbxStatus> state <hba_state> */
+ lpfc_printf_vlog(vport, KERN_ERR, LOG_MBOX,
+ "0320 CLEAR_LA mbxStatus error x%x hba "
+ "state x%x\n",
+ mb->mbxStatus, vport->port_state);
+ phba->link_state = LPFC_HBA_ERROR;
+ goto out;
+ }
+
+ if (vport->port_type == LPFC_PHYSICAL_PORT)
+ phba->link_state = LPFC_HBA_READY;
+
+ spin_lock_irq(&phba->hbalock);
+ psli->sli_flag |= LPFC_PROCESS_LA;
+ control = readl(phba->HCregaddr);
+ control |= HC_LAINT_ENA;
+ writel(control, phba->HCregaddr);
+ readl(phba->HCregaddr); /* flush */
+ spin_unlock_irq(&phba->hbalock);
+ mempool_free(pmb, phba->mbox_mem_pool);
+ return;
+
+out:
+ /* Device Discovery completes */
+ lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY,
+ "0225 Device Discovery completes\n");
+ mempool_free(pmb, phba->mbox_mem_pool);
+
+ spin_lock_irq(shost->host_lock);
+ vport->fc_flag &= ~FC_ABORT_DISCOVERY;
+ spin_unlock_irq(shost->host_lock);
+
+ lpfc_can_disctmo(vport);
+
+ /* turn on Link Attention interrupts */
+
+ spin_lock_irq(&phba->hbalock);
+ psli->sli_flag |= LPFC_PROCESS_LA;
+ control = readl(phba->HCregaddr);
+ control |= HC_LAINT_ENA;
+ writel(control, phba->HCregaddr);
+ readl(phba->HCregaddr); /* flush */
+ spin_unlock_irq(&phba->hbalock);
+
+ return;
+}
+
+
+static void
+lpfc_mbx_cmpl_local_config_link(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
+{
+ struct lpfc_vport *vport = pmb->vport;
+
+ if (pmb->u.mb.mbxStatus)
+ goto out;
+
+ mempool_free(pmb, phba->mbox_mem_pool);
+
+ /* don't perform discovery for SLI4 loopback diagnostic test */
+ if ((phba->sli_rev == LPFC_SLI_REV4) &&
+ !(phba->hba_flag & HBA_FCOE_MODE) &&
+ (phba->link_flag & LS_LOOPBACK_MODE))
+ return;
+
+ if (phba->fc_topology == LPFC_TOPOLOGY_LOOP &&
+ vport->fc_flag & FC_PUBLIC_LOOP &&
+ !(vport->fc_flag & FC_LBIT)) {
+ /* Need to wait for FAN - use discovery timer
+ * for timeout. port_state is identically
+ * LPFC_LOCAL_CFG_LINK while waiting for FAN
+ */
+ lpfc_set_disctmo(vport);
+ return;
+ }
+
+ /* Start discovery by sending a FLOGI. port_state is identically
+ * LPFC_FLOGI while waiting for FLOGI cmpl
+ */
+ if (vport->port_state != LPFC_FLOGI || vport->fc_flag & FC_PT2PT_PLOGI)
+ lpfc_initial_flogi(vport);
+ return;
+
+out:
+ lpfc_printf_vlog(vport, KERN_ERR, LOG_MBOX,
+ "0306 CONFIG_LINK mbxStatus error x%x "
+ "HBA state x%x\n",
+ pmb->u.mb.mbxStatus, vport->port_state);
+ mempool_free(pmb, phba->mbox_mem_pool);
+
+ lpfc_linkdown(phba);
+
+ lpfc_printf_vlog(vport, KERN_ERR, LOG_DISCOVERY,
+ "0200 CONFIG_LINK bad hba state x%x\n",
+ vport->port_state);
+
+ lpfc_issue_clear_la(phba, vport);
+ return;
+}
+
+/**
+ * lpfc_sli4_clear_fcf_rr_bmask
+ * @phba pointer to the struct lpfc_hba for this port.
+ * This fucnction resets the round robin bit mask and clears the
+ * fcf priority list. The list deletions are done while holding the
+ * hbalock. The ON_LIST flag and the FLOGI_FAILED flags are cleared
+ * from the lpfc_fcf_pri record.
+ **/
+void
+lpfc_sli4_clear_fcf_rr_bmask(struct lpfc_hba *phba)
+{
+ struct lpfc_fcf_pri *fcf_pri;
+ struct lpfc_fcf_pri *next_fcf_pri;
+ memset(phba->fcf.fcf_rr_bmask, 0, sizeof(*phba->fcf.fcf_rr_bmask));
+ spin_lock_irq(&phba->hbalock);
+ list_for_each_entry_safe(fcf_pri, next_fcf_pri,
+ &phba->fcf.fcf_pri_list, list) {
+ list_del_init(&fcf_pri->list);
+ fcf_pri->fcf_rec.flag = 0;
+ }
+ spin_unlock_irq(&phba->hbalock);
+}
+static void
+lpfc_mbx_cmpl_reg_fcfi(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
+{
+ struct lpfc_vport *vport = mboxq->vport;
+
+ if (mboxq->u.mb.mbxStatus) {
+ lpfc_printf_vlog(vport, KERN_ERR, LOG_MBOX,
+ "2017 REG_FCFI mbxStatus error x%x "
+ "HBA state x%x\n",
+ mboxq->u.mb.mbxStatus, vport->port_state);
+ goto fail_out;
+ }
+
+ /* Start FCoE discovery by sending a FLOGI. */
+ phba->fcf.fcfi = bf_get(lpfc_reg_fcfi_fcfi, &mboxq->u.mqe.un.reg_fcfi);
+ /* Set the FCFI registered flag */
+ spin_lock_irq(&phba->hbalock);
+ phba->fcf.fcf_flag |= FCF_REGISTERED;
+ spin_unlock_irq(&phba->hbalock);
+
+ /* If there is a pending FCoE event, restart FCF table scan. */
+ if ((!(phba->hba_flag & FCF_RR_INPROG)) &&
+ lpfc_check_pending_fcoe_event(phba, LPFC_UNREG_FCF))
+ goto fail_out;
+
+ /* Mark successful completion of FCF table scan */
+ spin_lock_irq(&phba->hbalock);
+ phba->fcf.fcf_flag |= (FCF_SCAN_DONE | FCF_IN_USE);
+ phba->hba_flag &= ~FCF_TS_INPROG;
+ if (vport->port_state != LPFC_FLOGI) {
+ phba->hba_flag |= FCF_RR_INPROG;
+ spin_unlock_irq(&phba->hbalock);
+ lpfc_issue_init_vfi(vport);
+ goto out;
+ }
+ spin_unlock_irq(&phba->hbalock);
+ goto out;
+
+fail_out:
+ spin_lock_irq(&phba->hbalock);
+ phba->hba_flag &= ~FCF_RR_INPROG;
+ spin_unlock_irq(&phba->hbalock);
+out:
+ mempool_free(mboxq, phba->mbox_mem_pool);
+}
+
+/**
+ * lpfc_fab_name_match - Check if the fcf fabric name match.
+ * @fab_name: pointer to fabric name.
+ * @new_fcf_record: pointer to fcf record.
+ *
+ * This routine compare the fcf record's fabric name with provided
+ * fabric name. If the fabric name are identical this function
+ * returns 1 else return 0.
+ **/
+static uint32_t
+lpfc_fab_name_match(uint8_t *fab_name, struct fcf_record *new_fcf_record)
+{
+ if (fab_name[0] != bf_get(lpfc_fcf_record_fab_name_0, new_fcf_record))
+ return 0;
+ if (fab_name[1] != bf_get(lpfc_fcf_record_fab_name_1, new_fcf_record))
+ return 0;
+ if (fab_name[2] != bf_get(lpfc_fcf_record_fab_name_2, new_fcf_record))
+ return 0;
+ if (fab_name[3] != bf_get(lpfc_fcf_record_fab_name_3, new_fcf_record))
+ return 0;
+ if (fab_name[4] != bf_get(lpfc_fcf_record_fab_name_4, new_fcf_record))
+ return 0;
+ if (fab_name[5] != bf_get(lpfc_fcf_record_fab_name_5, new_fcf_record))
+ return 0;
+ if (fab_name[6] != bf_get(lpfc_fcf_record_fab_name_6, new_fcf_record))
+ return 0;
+ if (fab_name[7] != bf_get(lpfc_fcf_record_fab_name_7, new_fcf_record))
+ return 0;
+ return 1;
+}
+
+/**
+ * lpfc_sw_name_match - Check if the fcf switch name match.
+ * @fab_name: pointer to fabric name.
+ * @new_fcf_record: pointer to fcf record.
+ *
+ * This routine compare the fcf record's switch name with provided
+ * switch name. If the switch name are identical this function
+ * returns 1 else return 0.
+ **/
+static uint32_t
+lpfc_sw_name_match(uint8_t *sw_name, struct fcf_record *new_fcf_record)
+{
+ if (sw_name[0] != bf_get(lpfc_fcf_record_switch_name_0, new_fcf_record))
+ return 0;
+ if (sw_name[1] != bf_get(lpfc_fcf_record_switch_name_1, new_fcf_record))
+ return 0;
+ if (sw_name[2] != bf_get(lpfc_fcf_record_switch_name_2, new_fcf_record))
+ return 0;
+ if (sw_name[3] != bf_get(lpfc_fcf_record_switch_name_3, new_fcf_record))
+ return 0;
+ if (sw_name[4] != bf_get(lpfc_fcf_record_switch_name_4, new_fcf_record))
+ return 0;
+ if (sw_name[5] != bf_get(lpfc_fcf_record_switch_name_5, new_fcf_record))
+ return 0;
+ if (sw_name[6] != bf_get(lpfc_fcf_record_switch_name_6, new_fcf_record))
+ return 0;
+ if (sw_name[7] != bf_get(lpfc_fcf_record_switch_name_7, new_fcf_record))
+ return 0;
+ return 1;
+}
+
+/**
+ * lpfc_mac_addr_match - Check if the fcf mac address match.
+ * @mac_addr: pointer to mac address.
+ * @new_fcf_record: pointer to fcf record.
+ *
+ * This routine compare the fcf record's mac address with HBA's
+ * FCF mac address. If the mac addresses are identical this function
+ * returns 1 else return 0.
+ **/
+static uint32_t
+lpfc_mac_addr_match(uint8_t *mac_addr, struct fcf_record *new_fcf_record)
+{
+ if (mac_addr[0] != bf_get(lpfc_fcf_record_mac_0, new_fcf_record))
+ return 0;
+ if (mac_addr[1] != bf_get(lpfc_fcf_record_mac_1, new_fcf_record))
+ return 0;
+ if (mac_addr[2] != bf_get(lpfc_fcf_record_mac_2, new_fcf_record))
+ return 0;
+ if (mac_addr[3] != bf_get(lpfc_fcf_record_mac_3, new_fcf_record))
+ return 0;
+ if (mac_addr[4] != bf_get(lpfc_fcf_record_mac_4, new_fcf_record))
+ return 0;
+ if (mac_addr[5] != bf_get(lpfc_fcf_record_mac_5, new_fcf_record))
+ return 0;
+ return 1;
+}
+
+static bool
+lpfc_vlan_id_match(uint16_t curr_vlan_id, uint16_t new_vlan_id)
+{
+ return (curr_vlan_id == new_vlan_id);
+}
+
+/**
+ * lpfc_update_fcf_record - Update driver fcf record
+ * __lpfc_update_fcf_record_pri - update the lpfc_fcf_pri record.
+ * @phba: pointer to lpfc hba data structure.
+ * @fcf_index: Index for the lpfc_fcf_record.
+ * @new_fcf_record: pointer to hba fcf record.
+ *
+ * This routine updates the driver FCF priority record from the new HBA FCF
+ * record. This routine is called with the host lock held.
+ **/
+static void
+__lpfc_update_fcf_record_pri(struct lpfc_hba *phba, uint16_t fcf_index,
+ struct fcf_record *new_fcf_record
+ )
+{
+ struct lpfc_fcf_pri *fcf_pri;
+
+ fcf_pri = &phba->fcf.fcf_pri[fcf_index];
+ fcf_pri->fcf_rec.fcf_index = fcf_index;
+ /* FCF record priority */
+ fcf_pri->fcf_rec.priority = new_fcf_record->fip_priority;
+
+}
+
+/**
+ * lpfc_copy_fcf_record - Copy fcf information to lpfc_hba.
+ * @fcf: pointer to driver fcf record.
+ * @new_fcf_record: pointer to fcf record.
+ *
+ * This routine copies the FCF information from the FCF
+ * record to lpfc_hba data structure.
+ **/
+static void
+lpfc_copy_fcf_record(struct lpfc_fcf_rec *fcf_rec,
+ struct fcf_record *new_fcf_record)
+{
+ /* Fabric name */
+ fcf_rec->fabric_name[0] =
+ bf_get(lpfc_fcf_record_fab_name_0, new_fcf_record);
+ fcf_rec->fabric_name[1] =
+ bf_get(lpfc_fcf_record_fab_name_1, new_fcf_record);
+ fcf_rec->fabric_name[2] =
+ bf_get(lpfc_fcf_record_fab_name_2, new_fcf_record);
+ fcf_rec->fabric_name[3] =
+ bf_get(lpfc_fcf_record_fab_name_3, new_fcf_record);
+ fcf_rec->fabric_name[4] =
+ bf_get(lpfc_fcf_record_fab_name_4, new_fcf_record);
+ fcf_rec->fabric_name[5] =
+ bf_get(lpfc_fcf_record_fab_name_5, new_fcf_record);
+ fcf_rec->fabric_name[6] =
+ bf_get(lpfc_fcf_record_fab_name_6, new_fcf_record);
+ fcf_rec->fabric_name[7] =
+ bf_get(lpfc_fcf_record_fab_name_7, new_fcf_record);
+ /* Mac address */
+ fcf_rec->mac_addr[0] = bf_get(lpfc_fcf_record_mac_0, new_fcf_record);
+ fcf_rec->mac_addr[1] = bf_get(lpfc_fcf_record_mac_1, new_fcf_record);
+ fcf_rec->mac_addr[2] = bf_get(lpfc_fcf_record_mac_2, new_fcf_record);
+ fcf_rec->mac_addr[3] = bf_get(lpfc_fcf_record_mac_3, new_fcf_record);
+ fcf_rec->mac_addr[4] = bf_get(lpfc_fcf_record_mac_4, new_fcf_record);
+ fcf_rec->mac_addr[5] = bf_get(lpfc_fcf_record_mac_5, new_fcf_record);
+ /* FCF record index */
+ fcf_rec->fcf_indx = bf_get(lpfc_fcf_record_fcf_index, new_fcf_record);
+ /* FCF record priority */
+ fcf_rec->priority = new_fcf_record->fip_priority;
+ /* Switch name */
+ fcf_rec->switch_name[0] =
+ bf_get(lpfc_fcf_record_switch_name_0, new_fcf_record);
+ fcf_rec->switch_name[1] =
+ bf_get(lpfc_fcf_record_switch_name_1, new_fcf_record);
+ fcf_rec->switch_name[2] =
+ bf_get(lpfc_fcf_record_switch_name_2, new_fcf_record);
+ fcf_rec->switch_name[3] =
+ bf_get(lpfc_fcf_record_switch_name_3, new_fcf_record);
+ fcf_rec->switch_name[4] =
+ bf_get(lpfc_fcf_record_switch_name_4, new_fcf_record);
+ fcf_rec->switch_name[5] =
+ bf_get(lpfc_fcf_record_switch_name_5, new_fcf_record);
+ fcf_rec->switch_name[6] =
+ bf_get(lpfc_fcf_record_switch_name_6, new_fcf_record);
+ fcf_rec->switch_name[7] =
+ bf_get(lpfc_fcf_record_switch_name_7, new_fcf_record);
+}
+
+/**
+ * lpfc_update_fcf_record - Update driver fcf record
+ * @phba: pointer to lpfc hba data structure.
+ * @fcf_rec: pointer to driver fcf record.
+ * @new_fcf_record: pointer to hba fcf record.
+ * @addr_mode: address mode to be set to the driver fcf record.
+ * @vlan_id: vlan tag to be set to the driver fcf record.
+ * @flag: flag bits to be set to the driver fcf record.
+ *
+ * This routine updates the driver FCF record from the new HBA FCF record
+ * together with the address mode, vlan_id, and other informations. This
+ * routine is called with the host lock held.
+ **/
+static void
+__lpfc_update_fcf_record(struct lpfc_hba *phba, struct lpfc_fcf_rec *fcf_rec,
+ struct fcf_record *new_fcf_record, uint32_t addr_mode,
+ uint16_t vlan_id, uint32_t flag)
+{
+ /* Copy the fields from the HBA's FCF record */
+ lpfc_copy_fcf_record(fcf_rec, new_fcf_record);
+ /* Update other fields of driver FCF record */
+ fcf_rec->addr_mode = addr_mode;
+ fcf_rec->vlan_id = vlan_id;
+ fcf_rec->flag |= (flag | RECORD_VALID);
+ __lpfc_update_fcf_record_pri(phba,
+ bf_get(lpfc_fcf_record_fcf_index, new_fcf_record),
+ new_fcf_record);
+}
+
+/**
+ * lpfc_register_fcf - Register the FCF with hba.
+ * @phba: pointer to lpfc hba data structure.
+ *
+ * This routine issues a register fcfi mailbox command to register
+ * the fcf with HBA.
+ **/
+static void
+lpfc_register_fcf(struct lpfc_hba *phba)
+{
+ LPFC_MBOXQ_t *fcf_mbxq;
+ int rc;
+
+ spin_lock_irq(&phba->hbalock);
+ /* If the FCF is not available do nothing. */
+ if (!(phba->fcf.fcf_flag & FCF_AVAILABLE)) {
+ phba->hba_flag &= ~(FCF_TS_INPROG | FCF_RR_INPROG);
+ spin_unlock_irq(&phba->hbalock);
+ return;
+ }
+
+ /* The FCF is already registered, start discovery */
+ if (phba->fcf.fcf_flag & FCF_REGISTERED) {
+ phba->fcf.fcf_flag |= (FCF_SCAN_DONE | FCF_IN_USE);
+ phba->hba_flag &= ~FCF_TS_INPROG;
+ if (phba->pport->port_state != LPFC_FLOGI &&
+ phba->pport->fc_flag & FC_FABRIC) {
+ phba->hba_flag |= FCF_RR_INPROG;
+ spin_unlock_irq(&phba->hbalock);
+ lpfc_initial_flogi(phba->pport);
+ return;
+ }
+ spin_unlock_irq(&phba->hbalock);
+ return;
+ }
+ spin_unlock_irq(&phba->hbalock);
+
+ fcf_mbxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
+ if (!fcf_mbxq) {
+ spin_lock_irq(&phba->hbalock);
+ phba->hba_flag &= ~(FCF_TS_INPROG | FCF_RR_INPROG);
+ spin_unlock_irq(&phba->hbalock);
+ return;
+ }
+
+ lpfc_reg_fcfi(phba, fcf_mbxq);
+ fcf_mbxq->vport = phba->pport;
+ fcf_mbxq->mbox_cmpl = lpfc_mbx_cmpl_reg_fcfi;
+ rc = lpfc_sli_issue_mbox(phba, fcf_mbxq, MBX_NOWAIT);
+ if (rc == MBX_NOT_FINISHED) {
+ spin_lock_irq(&phba->hbalock);
+ phba->hba_flag &= ~(FCF_TS_INPROG | FCF_RR_INPROG);
+ spin_unlock_irq(&phba->hbalock);
+ mempool_free(fcf_mbxq, phba->mbox_mem_pool);
+ }
+
+ return;
+}
+
+/**
+ * lpfc_match_fcf_conn_list - Check if the FCF record can be used for discovery.
+ * @phba: pointer to lpfc hba data structure.
+ * @new_fcf_record: pointer to fcf record.
+ * @boot_flag: Indicates if this record used by boot bios.
+ * @addr_mode: The address mode to be used by this FCF
+ * @vlan_id: The vlan id to be used as vlan tagging by this FCF.
+ *
+ * This routine compare the fcf record with connect list obtained from the
+ * config region to decide if this FCF can be used for SAN discovery. It returns
+ * 1 if this record can be used for SAN discovery else return zero. If this FCF
+ * record can be used for SAN discovery, the boot_flag will indicate if this FCF
+ * is used by boot bios and addr_mode will indicate the addressing mode to be
+ * used for this FCF when the function returns.
+ * If the FCF record need to be used with a particular vlan id, the vlan is
+ * set in the vlan_id on return of the function. If not VLAN tagging need to
+ * be used with the FCF vlan_id will be set to LPFC_FCOE_NULL_VID;
+ **/
+static int
+lpfc_match_fcf_conn_list(struct lpfc_hba *phba,
+ struct fcf_record *new_fcf_record,
+ uint32_t *boot_flag, uint32_t *addr_mode,
+ uint16_t *vlan_id)
+{
+ struct lpfc_fcf_conn_entry *conn_entry;
+ int i, j, fcf_vlan_id = 0;
+
+ /* Find the lowest VLAN id in the FCF record */
+ for (i = 0; i < 512; i++) {
+ if (new_fcf_record->vlan_bitmap[i]) {
+ fcf_vlan_id = i * 8;
+ j = 0;
+ while (!((new_fcf_record->vlan_bitmap[i] >> j) & 1)) {
+ j++;
+ fcf_vlan_id++;
+ }
+ break;
+ }
+ }
+
+ /* FCF not valid/available or solicitation in progress */
+ if (!bf_get(lpfc_fcf_record_fcf_avail, new_fcf_record) ||
+ !bf_get(lpfc_fcf_record_fcf_valid, new_fcf_record) ||
+ bf_get(lpfc_fcf_record_fcf_sol, new_fcf_record))
+ return 0;
+
+ if (!(phba->hba_flag & HBA_FIP_SUPPORT)) {
+ *boot_flag = 0;
+ *addr_mode = bf_get(lpfc_fcf_record_mac_addr_prov,
+ new_fcf_record);
+ if (phba->valid_vlan)
+ *vlan_id = phba->vlan_id;
+ else
+ *vlan_id = LPFC_FCOE_NULL_VID;
+ return 1;
+ }
+
+ /*
+ * If there are no FCF connection table entry, driver connect to all
+ * FCFs.
+ */
+ if (list_empty(&phba->fcf_conn_rec_list)) {
+ *boot_flag = 0;
+ *addr_mode = bf_get(lpfc_fcf_record_mac_addr_prov,
+ new_fcf_record);
+
+ /*
+ * When there are no FCF connect entries, use driver's default
+ * addressing mode - FPMA.
+ */
+ if (*addr_mode & LPFC_FCF_FPMA)
+ *addr_mode = LPFC_FCF_FPMA;
+
+ /* If FCF record report a vlan id use that vlan id */
+ if (fcf_vlan_id)
+ *vlan_id = fcf_vlan_id;
+ else
+ *vlan_id = LPFC_FCOE_NULL_VID;
+ return 1;
+ }
+
+ list_for_each_entry(conn_entry,
+ &phba->fcf_conn_rec_list, list) {
+ if (!(conn_entry->conn_rec.flags & FCFCNCT_VALID))
+ continue;
+
+ if ((conn_entry->conn_rec.flags & FCFCNCT_FBNM_VALID) &&
+ !lpfc_fab_name_match(conn_entry->conn_rec.fabric_name,
+ new_fcf_record))
+ continue;
+ if ((conn_entry->conn_rec.flags & FCFCNCT_SWNM_VALID) &&
+ !lpfc_sw_name_match(conn_entry->conn_rec.switch_name,
+ new_fcf_record))
+ continue;
+ if (conn_entry->conn_rec.flags & FCFCNCT_VLAN_VALID) {
+ /*
+ * If the vlan bit map does not have the bit set for the
+ * vlan id to be used, then it is not a match.
+ */
+ if (!(new_fcf_record->vlan_bitmap
+ [conn_entry->conn_rec.vlan_tag / 8] &
+ (1 << (conn_entry->conn_rec.vlan_tag % 8))))
+ continue;
+ }
+
+ /*
+ * If connection record does not support any addressing mode,
+ * skip the FCF record.
+ */
+ if (!(bf_get(lpfc_fcf_record_mac_addr_prov, new_fcf_record)
+ & (LPFC_FCF_FPMA | LPFC_FCF_SPMA)))
+ continue;
+
+ /*
+ * Check if the connection record specifies a required
+ * addressing mode.
+ */
+ if ((conn_entry->conn_rec.flags & FCFCNCT_AM_VALID) &&
+ !(conn_entry->conn_rec.flags & FCFCNCT_AM_PREFERRED)) {
+
+ /*
+ * If SPMA required but FCF not support this continue.
+ */
+ if ((conn_entry->conn_rec.flags & FCFCNCT_AM_SPMA) &&
+ !(bf_get(lpfc_fcf_record_mac_addr_prov,
+ new_fcf_record) & LPFC_FCF_SPMA))
+ continue;
+
+ /*
+ * If FPMA required but FCF not support this continue.
+ */
+ if (!(conn_entry->conn_rec.flags & FCFCNCT_AM_SPMA) &&
+ !(bf_get(lpfc_fcf_record_mac_addr_prov,
+ new_fcf_record) & LPFC_FCF_FPMA))
+ continue;
+ }
+
+ /*
+ * This fcf record matches filtering criteria.
+ */
+ if (conn_entry->conn_rec.flags & FCFCNCT_BOOT)
+ *boot_flag = 1;
+ else
+ *boot_flag = 0;
+
+ /*
+ * If user did not specify any addressing mode, or if the
+ * preferred addressing mode specified by user is not supported
+ * by FCF, allow fabric to pick the addressing mode.
+ */
+ *addr_mode = bf_get(lpfc_fcf_record_mac_addr_prov,
+ new_fcf_record);
+ /*
+ * If the user specified a required address mode, assign that
+ * address mode
+ */
+ if ((conn_entry->conn_rec.flags & FCFCNCT_AM_VALID) &&
+ (!(conn_entry->conn_rec.flags & FCFCNCT_AM_PREFERRED)))
+ *addr_mode = (conn_entry->conn_rec.flags &
+ FCFCNCT_AM_SPMA) ?
+ LPFC_FCF_SPMA : LPFC_FCF_FPMA;
+ /*
+ * If the user specified a preferred address mode, use the
+ * addr mode only if FCF support the addr_mode.
+ */
+ else if ((conn_entry->conn_rec.flags & FCFCNCT_AM_VALID) &&
+ (conn_entry->conn_rec.flags & FCFCNCT_AM_PREFERRED) &&
+ (conn_entry->conn_rec.flags & FCFCNCT_AM_SPMA) &&
+ (*addr_mode & LPFC_FCF_SPMA))
+ *addr_mode = LPFC_FCF_SPMA;
+ else if ((conn_entry->conn_rec.flags & FCFCNCT_AM_VALID) &&
+ (conn_entry->conn_rec.flags & FCFCNCT_AM_PREFERRED) &&
+ !(conn_entry->conn_rec.flags & FCFCNCT_AM_SPMA) &&
+ (*addr_mode & LPFC_FCF_FPMA))
+ *addr_mode = LPFC_FCF_FPMA;
+
+ /* If matching connect list has a vlan id, use it */
+ if (conn_entry->conn_rec.flags & FCFCNCT_VLAN_VALID)
+ *vlan_id = conn_entry->conn_rec.vlan_tag;
+ /*
+ * If no vlan id is specified in connect list, use the vlan id
+ * in the FCF record
+ */
+ else if (fcf_vlan_id)
+ *vlan_id = fcf_vlan_id;
+ else
+ *vlan_id = LPFC_FCOE_NULL_VID;
+
+ return 1;
+ }
+
+ return 0;
+}
+
+/**
+ * lpfc_check_pending_fcoe_event - Check if there is pending fcoe event.
+ * @phba: pointer to lpfc hba data structure.
+ * @unreg_fcf: Unregister FCF if FCF table need to be re-scaned.
+ *
+ * This function check if there is any fcoe event pending while driver
+ * scan FCF entries. If there is any pending event, it will restart the
+ * FCF saning and return 1 else return 0.
+ */
+int
+lpfc_check_pending_fcoe_event(struct lpfc_hba *phba, uint8_t unreg_fcf)
+{
+ /*
+ * If the Link is up and no FCoE events while in the
+ * FCF discovery, no need to restart FCF discovery.
+ */
+ if ((phba->link_state >= LPFC_LINK_UP) &&
+ (phba->fcoe_eventtag == phba->fcoe_eventtag_at_fcf_scan))
+ return 0;
+
+ lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
+ "2768 Pending link or FCF event during current "
+ "handling of the previous event: link_state:x%x, "
+ "evt_tag_at_scan:x%x, evt_tag_current:x%x\n",
+ phba->link_state, phba->fcoe_eventtag_at_fcf_scan,
+ phba->fcoe_eventtag);
+
+ spin_lock_irq(&phba->hbalock);
+ phba->fcf.fcf_flag &= ~FCF_AVAILABLE;
+ spin_unlock_irq(&phba->hbalock);
+
+ if (phba->link_state >= LPFC_LINK_UP) {
+ lpfc_printf_log(phba, KERN_INFO, LOG_FIP | LOG_DISCOVERY,
+ "2780 Restart FCF table scan due to "
+ "pending FCF event:evt_tag_at_scan:x%x, "
+ "evt_tag_current:x%x\n",
+ phba->fcoe_eventtag_at_fcf_scan,
+ phba->fcoe_eventtag);
+ lpfc_sli4_fcf_scan_read_fcf_rec(phba, LPFC_FCOE_FCF_GET_FIRST);
+ } else {
+ /*
+ * Do not continue FCF discovery and clear FCF_TS_INPROG
+ * flag
+ */
+ lpfc_printf_log(phba, KERN_INFO, LOG_FIP | LOG_DISCOVERY,
+ "2833 Stop FCF discovery process due to link "
+ "state change (x%x)\n", phba->link_state);
+ spin_lock_irq(&phba->hbalock);
+ phba->hba_flag &= ~(FCF_TS_INPROG | FCF_RR_INPROG);
+ phba->fcf.fcf_flag &= ~(FCF_REDISC_FOV | FCF_DISCOVERY);
+ spin_unlock_irq(&phba->hbalock);
+ }
+
+ /* Unregister the currently registered FCF if required */
+ if (unreg_fcf) {
+ spin_lock_irq(&phba->hbalock);
+ phba->fcf.fcf_flag &= ~FCF_REGISTERED;
+ spin_unlock_irq(&phba->hbalock);
+ lpfc_sli4_unregister_fcf(phba);
+ }
+ return 1;
+}
+
+/**
+ * lpfc_sli4_new_fcf_random_select - Randomly select an eligible new fcf record
+ * @phba: pointer to lpfc hba data structure.
+ * @fcf_cnt: number of eligible fcf record seen so far.
+ *
+ * This function makes an running random selection decision on FCF record to
+ * use through a sequence of @fcf_cnt eligible FCF records with equal
+ * probability. To perform integer manunipulation of random numbers with
+ * size unit32_t, the lower 16 bits of the 32-bit random number returned
+ * from prandom_u32() are taken as the random random number generated.
+ *
+ * Returns true when outcome is for the newly read FCF record should be
+ * chosen; otherwise, return false when outcome is for keeping the previously
+ * chosen FCF record.
+ **/
+static bool
+lpfc_sli4_new_fcf_random_select(struct lpfc_hba *phba, uint32_t fcf_cnt)
+{
+ uint32_t rand_num;
+
+ /* Get 16-bit uniform random number */
+ rand_num = 0xFFFF & prandom_u32();
+
+ /* Decision with probability 1/fcf_cnt */
+ if ((fcf_cnt * rand_num) < 0xFFFF)
+ return true;
+ else
+ return false;
+}
+
+/**
+ * lpfc_sli4_fcf_rec_mbox_parse - Parse read_fcf mbox command.
+ * @phba: pointer to lpfc hba data structure.
+ * @mboxq: pointer to mailbox object.
+ * @next_fcf_index: pointer to holder of next fcf index.
+ *
+ * This routine parses the non-embedded fcf mailbox command by performing the
+ * necessarily error checking, non-embedded read FCF record mailbox command
+ * SGE parsing, and endianness swapping.
+ *
+ * Returns the pointer to the new FCF record in the non-embedded mailbox
+ * command DMA memory if successfully, other NULL.
+ */
+static struct fcf_record *
+lpfc_sli4_fcf_rec_mbox_parse(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq,
+ uint16_t *next_fcf_index)
+{
+ void *virt_addr;
+ dma_addr_t phys_addr;
+ struct lpfc_mbx_sge sge;
+ struct lpfc_mbx_read_fcf_tbl *read_fcf;
+ uint32_t shdr_status, shdr_add_status;
+ union lpfc_sli4_cfg_shdr *shdr;
+ struct fcf_record *new_fcf_record;
+
+ /* Get the first SGE entry from the non-embedded DMA memory. This
+ * routine only uses a single SGE.
+ */
+ lpfc_sli4_mbx_sge_get(mboxq, 0, &sge);
+ phys_addr = getPaddr(sge.pa_hi, sge.pa_lo);
+ if (unlikely(!mboxq->sge_array)) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_MBOX,
+ "2524 Failed to get the non-embedded SGE "
+ "virtual address\n");
+ return NULL;
+ }
+ virt_addr = mboxq->sge_array->addr[0];
+
+ shdr = (union lpfc_sli4_cfg_shdr *)virt_addr;
+ lpfc_sli_pcimem_bcopy(shdr, shdr,
+ sizeof(union lpfc_sli4_cfg_shdr));
+ shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
+ shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
+ if (shdr_status || shdr_add_status) {
+ if (shdr_status == STATUS_FCF_TABLE_EMPTY)
+ lpfc_printf_log(phba, KERN_ERR, LOG_FIP,
+ "2726 READ_FCF_RECORD Indicates empty "
+ "FCF table.\n");
+ else
+ lpfc_printf_log(phba, KERN_ERR, LOG_FIP,
+ "2521 READ_FCF_RECORD mailbox failed "
+ "with status x%x add_status x%x, "
+ "mbx\n", shdr_status, shdr_add_status);
+ return NULL;
+ }
+
+ /* Interpreting the returned information of the FCF record */
+ read_fcf = (struct lpfc_mbx_read_fcf_tbl *)virt_addr;
+ lpfc_sli_pcimem_bcopy(read_fcf, read_fcf,
+ sizeof(struct lpfc_mbx_read_fcf_tbl));
+ *next_fcf_index = bf_get(lpfc_mbx_read_fcf_tbl_nxt_vindx, read_fcf);
+ new_fcf_record = (struct fcf_record *)(virt_addr +
+ sizeof(struct lpfc_mbx_read_fcf_tbl));
+ lpfc_sli_pcimem_bcopy(new_fcf_record, new_fcf_record,
+ offsetof(struct fcf_record, vlan_bitmap));
+ new_fcf_record->word137 = le32_to_cpu(new_fcf_record->word137);
+ new_fcf_record->word138 = le32_to_cpu(new_fcf_record->word138);
+
+ return new_fcf_record;
+}
+
+/**
+ * lpfc_sli4_log_fcf_record_info - Log the information of a fcf record
+ * @phba: pointer to lpfc hba data structure.
+ * @fcf_record: pointer to the fcf record.
+ * @vlan_id: the lowest vlan identifier associated to this fcf record.
+ * @next_fcf_index: the index to the next fcf record in hba's fcf table.
+ *
+ * This routine logs the detailed FCF record if the LOG_FIP loggin is
+ * enabled.
+ **/
+static void
+lpfc_sli4_log_fcf_record_info(struct lpfc_hba *phba,
+ struct fcf_record *fcf_record,
+ uint16_t vlan_id,
+ uint16_t next_fcf_index)
+{
+ lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
+ "2764 READ_FCF_RECORD:\n"
+ "\tFCF_Index : x%x\n"
+ "\tFCF_Avail : x%x\n"
+ "\tFCF_Valid : x%x\n"
+ "\tFCF_SOL : x%x\n"
+ "\tFIP_Priority : x%x\n"
+ "\tMAC_Provider : x%x\n"
+ "\tLowest VLANID : x%x\n"
+ "\tFCF_MAC Addr : x%x:%x:%x:%x:%x:%x\n"
+ "\tFabric_Name : x%x:%x:%x:%x:%x:%x:%x:%x\n"
+ "\tSwitch_Name : x%x:%x:%x:%x:%x:%x:%x:%x\n"
+ "\tNext_FCF_Index: x%x\n",
+ bf_get(lpfc_fcf_record_fcf_index, fcf_record),
+ bf_get(lpfc_fcf_record_fcf_avail, fcf_record),
+ bf_get(lpfc_fcf_record_fcf_valid, fcf_record),
+ bf_get(lpfc_fcf_record_fcf_sol, fcf_record),
+ fcf_record->fip_priority,
+ bf_get(lpfc_fcf_record_mac_addr_prov, fcf_record),
+ vlan_id,
+ bf_get(lpfc_fcf_record_mac_0, fcf_record),
+ bf_get(lpfc_fcf_record_mac_1, fcf_record),
+ bf_get(lpfc_fcf_record_mac_2, fcf_record),
+ bf_get(lpfc_fcf_record_mac_3, fcf_record),
+ bf_get(lpfc_fcf_record_mac_4, fcf_record),
+ bf_get(lpfc_fcf_record_mac_5, fcf_record),
+ bf_get(lpfc_fcf_record_fab_name_0, fcf_record),
+ bf_get(lpfc_fcf_record_fab_name_1, fcf_record),
+ bf_get(lpfc_fcf_record_fab_name_2, fcf_record),
+ bf_get(lpfc_fcf_record_fab_name_3, fcf_record),
+ bf_get(lpfc_fcf_record_fab_name_4, fcf_record),
+ bf_get(lpfc_fcf_record_fab_name_5, fcf_record),
+ bf_get(lpfc_fcf_record_fab_name_6, fcf_record),
+ bf_get(lpfc_fcf_record_fab_name_7, fcf_record),
+ bf_get(lpfc_fcf_record_switch_name_0, fcf_record),
+ bf_get(lpfc_fcf_record_switch_name_1, fcf_record),
+ bf_get(lpfc_fcf_record_switch_name_2, fcf_record),
+ bf_get(lpfc_fcf_record_switch_name_3, fcf_record),
+ bf_get(lpfc_fcf_record_switch_name_4, fcf_record),
+ bf_get(lpfc_fcf_record_switch_name_5, fcf_record),
+ bf_get(lpfc_fcf_record_switch_name_6, fcf_record),
+ bf_get(lpfc_fcf_record_switch_name_7, fcf_record),
+ next_fcf_index);
+}
+
+/**
+ lpfc_sli4_fcf_record_match - testing new FCF record for matching existing FCF
+ * @phba: pointer to lpfc hba data structure.
+ * @fcf_rec: pointer to an existing FCF record.
+ * @new_fcf_record: pointer to a new FCF record.
+ * @new_vlan_id: vlan id from the new FCF record.
+ *
+ * This function performs matching test of a new FCF record against an existing
+ * FCF record. If the new_vlan_id passed in is LPFC_FCOE_IGNORE_VID, vlan id
+ * will not be used as part of the FCF record matching criteria.
+ *
+ * Returns true if all the fields matching, otherwise returns false.
+ */
+static bool
+lpfc_sli4_fcf_record_match(struct lpfc_hba *phba,
+ struct lpfc_fcf_rec *fcf_rec,
+ struct fcf_record *new_fcf_record,
+ uint16_t new_vlan_id)
+{
+ if (new_vlan_id != LPFC_FCOE_IGNORE_VID)
+ if (!lpfc_vlan_id_match(fcf_rec->vlan_id, new_vlan_id))
+ return false;
+ if (!lpfc_mac_addr_match(fcf_rec->mac_addr, new_fcf_record))
+ return false;
+ if (!lpfc_sw_name_match(fcf_rec->switch_name, new_fcf_record))
+ return false;
+ if (!lpfc_fab_name_match(fcf_rec->fabric_name, new_fcf_record))
+ return false;
+ if (fcf_rec->priority != new_fcf_record->fip_priority)
+ return false;
+ return true;
+}
+
+/**
+ * lpfc_sli4_fcf_rr_next_proc - processing next roundrobin fcf
+ * @vport: Pointer to vport object.
+ * @fcf_index: index to next fcf.
+ *
+ * This function processing the roundrobin fcf failover to next fcf index.
+ * When this function is invoked, there will be a current fcf registered
+ * for flogi.
+ * Return: 0 for continue retrying flogi on currently registered fcf;
+ * 1 for stop flogi on currently registered fcf;
+ */
+int lpfc_sli4_fcf_rr_next_proc(struct lpfc_vport *vport, uint16_t fcf_index)
+{
+ struct lpfc_hba *phba = vport->phba;
+ int rc;
+
+ if (fcf_index == LPFC_FCOE_FCF_NEXT_NONE) {
+ spin_lock_irq(&phba->hbalock);
+ if (phba->hba_flag & HBA_DEVLOSS_TMO) {
+ spin_unlock_irq(&phba->hbalock);
+ lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
+ "2872 Devloss tmo with no eligible "
+ "FCF, unregister in-use FCF (x%x) "
+ "and rescan FCF table\n",
+ phba->fcf.current_rec.fcf_indx);
+ lpfc_unregister_fcf_rescan(phba);
+ goto stop_flogi_current_fcf;
+ }
+ /* Mark the end to FLOGI roundrobin failover */
+ phba->hba_flag &= ~FCF_RR_INPROG;
+ /* Allow action to new fcf asynchronous event */
+ phba->fcf.fcf_flag &= ~(FCF_AVAILABLE | FCF_SCAN_DONE);
+ spin_unlock_irq(&phba->hbalock);
+ lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
+ "2865 No FCF available, stop roundrobin FCF "
+ "failover and change port state:x%x/x%x\n",
+ phba->pport->port_state, LPFC_VPORT_UNKNOWN);
+ phba->pport->port_state = LPFC_VPORT_UNKNOWN;
+ goto stop_flogi_current_fcf;
+ } else {
+ lpfc_printf_log(phba, KERN_INFO, LOG_FIP | LOG_ELS,
+ "2794 Try FLOGI roundrobin FCF failover to "
+ "(x%x)\n", fcf_index);
+ rc = lpfc_sli4_fcf_rr_read_fcf_rec(phba, fcf_index);
+ if (rc)
+ lpfc_printf_log(phba, KERN_WARNING, LOG_FIP | LOG_ELS,
+ "2761 FLOGI roundrobin FCF failover "
+ "failed (rc:x%x) to read FCF (x%x)\n",
+ rc, phba->fcf.current_rec.fcf_indx);
+ else
+ goto stop_flogi_current_fcf;
+ }
+ return 0;
+
+stop_flogi_current_fcf:
+ lpfc_can_disctmo(vport);
+ return 1;
+}
+
+/**
+ * lpfc_sli4_fcf_pri_list_del
+ * @phba: pointer to lpfc hba data structure.
+ * @fcf_index the index of the fcf record to delete
+ * This routine checks the on list flag of the fcf_index to be deleted.
+ * If it is one the list then it is removed from the list, and the flag
+ * is cleared. This routine grab the hbalock before removing the fcf
+ * record from the list.
+ **/
+static void lpfc_sli4_fcf_pri_list_del(struct lpfc_hba *phba,
+ uint16_t fcf_index)
+{
+ struct lpfc_fcf_pri *new_fcf_pri;
+
+ new_fcf_pri = &phba->fcf.fcf_pri[fcf_index];
+ lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
+ "3058 deleting idx x%x pri x%x flg x%x\n",
+ fcf_index, new_fcf_pri->fcf_rec.priority,
+ new_fcf_pri->fcf_rec.flag);
+ spin_lock_irq(&phba->hbalock);
+ if (new_fcf_pri->fcf_rec.flag & LPFC_FCF_ON_PRI_LIST) {
+ if (phba->fcf.current_rec.priority ==
+ new_fcf_pri->fcf_rec.priority)
+ phba->fcf.eligible_fcf_cnt--;
+ list_del_init(&new_fcf_pri->list);
+ new_fcf_pri->fcf_rec.flag &= ~LPFC_FCF_ON_PRI_LIST;
+ }
+ spin_unlock_irq(&phba->hbalock);
+}
+
+/**
+ * lpfc_sli4_set_fcf_flogi_fail
+ * @phba: pointer to lpfc hba data structure.
+ * @fcf_index the index of the fcf record to update
+ * This routine acquires the hbalock and then set the LPFC_FCF_FLOGI_FAILED
+ * flag so the the round robin slection for the particular priority level
+ * will try a different fcf record that does not have this bit set.
+ * If the fcf record is re-read for any reason this flag is cleared brfore
+ * adding it to the priority list.
+ **/
+void
+lpfc_sli4_set_fcf_flogi_fail(struct lpfc_hba *phba, uint16_t fcf_index)
+{
+ struct lpfc_fcf_pri *new_fcf_pri;
+ new_fcf_pri = &phba->fcf.fcf_pri[fcf_index];
+ spin_lock_irq(&phba->hbalock);
+ new_fcf_pri->fcf_rec.flag |= LPFC_FCF_FLOGI_FAILED;
+ spin_unlock_irq(&phba->hbalock);
+}
+
+/**
+ * lpfc_sli4_fcf_pri_list_add
+ * @phba: pointer to lpfc hba data structure.
+ * @fcf_index the index of the fcf record to add
+ * This routine checks the priority of the fcf_index to be added.
+ * If it is a lower priority than the current head of the fcf_pri list
+ * then it is added to the list in the right order.
+ * If it is the same priority as the current head of the list then it
+ * is added to the head of the list and its bit in the rr_bmask is set.
+ * If the fcf_index to be added is of a higher priority than the current
+ * head of the list then the rr_bmask is cleared, its bit is set in the
+ * rr_bmask and it is added to the head of the list.
+ * returns:
+ * 0=success 1=failure
+ **/
+static int lpfc_sli4_fcf_pri_list_add(struct lpfc_hba *phba,
+ uint16_t fcf_index,
+ struct fcf_record *new_fcf_record)
+{
+ uint16_t current_fcf_pri;
+ uint16_t last_index;
+ struct lpfc_fcf_pri *fcf_pri;
+ struct lpfc_fcf_pri *next_fcf_pri;
+ struct lpfc_fcf_pri *new_fcf_pri;
+ int ret;
+
+ new_fcf_pri = &phba->fcf.fcf_pri[fcf_index];
+ lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
+ "3059 adding idx x%x pri x%x flg x%x\n",
+ fcf_index, new_fcf_record->fip_priority,
+ new_fcf_pri->fcf_rec.flag);
+ spin_lock_irq(&phba->hbalock);
+ if (new_fcf_pri->fcf_rec.flag & LPFC_FCF_ON_PRI_LIST)
+ list_del_init(&new_fcf_pri->list);
+ new_fcf_pri->fcf_rec.fcf_index = fcf_index;
+ new_fcf_pri->fcf_rec.priority = new_fcf_record->fip_priority;
+ if (list_empty(&phba->fcf.fcf_pri_list)) {
+ list_add(&new_fcf_pri->list, &phba->fcf.fcf_pri_list);
+ ret = lpfc_sli4_fcf_rr_index_set(phba,
+ new_fcf_pri->fcf_rec.fcf_index);
+ goto out;
+ }
+
+ last_index = find_first_bit(phba->fcf.fcf_rr_bmask,
+ LPFC_SLI4_FCF_TBL_INDX_MAX);
+ if (last_index >= LPFC_SLI4_FCF_TBL_INDX_MAX) {
+ ret = 0; /* Empty rr list */
+ goto out;
+ }
+ current_fcf_pri = phba->fcf.fcf_pri[last_index].fcf_rec.priority;
+ if (new_fcf_pri->fcf_rec.priority <= current_fcf_pri) {
+ list_add(&new_fcf_pri->list, &phba->fcf.fcf_pri_list);
+ if (new_fcf_pri->fcf_rec.priority < current_fcf_pri) {
+ memset(phba->fcf.fcf_rr_bmask, 0,
+ sizeof(*phba->fcf.fcf_rr_bmask));
+ /* fcfs_at_this_priority_level = 1; */
+ phba->fcf.eligible_fcf_cnt = 1;
+ } else
+ /* fcfs_at_this_priority_level++; */
+ phba->fcf.eligible_fcf_cnt++;
+ ret = lpfc_sli4_fcf_rr_index_set(phba,
+ new_fcf_pri->fcf_rec.fcf_index);
+ goto out;
+ }
+
+ list_for_each_entry_safe(fcf_pri, next_fcf_pri,
+ &phba->fcf.fcf_pri_list, list) {
+ if (new_fcf_pri->fcf_rec.priority <=
+ fcf_pri->fcf_rec.priority) {
+ if (fcf_pri->list.prev == &phba->fcf.fcf_pri_list)
+ list_add(&new_fcf_pri->list,
+ &phba->fcf.fcf_pri_list);
+ else
+ list_add(&new_fcf_pri->list,
+ &((struct lpfc_fcf_pri *)
+ fcf_pri->list.prev)->list);
+ ret = 0;
+ goto out;
+ } else if (fcf_pri->list.next == &phba->fcf.fcf_pri_list
+ || new_fcf_pri->fcf_rec.priority <
+ next_fcf_pri->fcf_rec.priority) {
+ list_add(&new_fcf_pri->list, &fcf_pri->list);
+ ret = 0;
+ goto out;
+ }
+ if (new_fcf_pri->fcf_rec.priority > fcf_pri->fcf_rec.priority)
+ continue;
+
+ }
+ ret = 1;
+out:
+ /* we use = instead of |= to clear the FLOGI_FAILED flag. */
+ new_fcf_pri->fcf_rec.flag = LPFC_FCF_ON_PRI_LIST;
+ spin_unlock_irq(&phba->hbalock);
+ return ret;
+}
+
+/**
+ * lpfc_mbx_cmpl_fcf_scan_read_fcf_rec - fcf scan read_fcf mbox cmpl handler.
+ * @phba: pointer to lpfc hba data structure.
+ * @mboxq: pointer to mailbox object.
+ *
+ * This function iterates through all the fcf records available in
+ * HBA and chooses the optimal FCF record for discovery. After finding
+ * the FCF for discovery it registers the FCF record and kicks start
+ * discovery.
+ * If FCF_IN_USE flag is set in currently used FCF, the routine tries to
+ * use an FCF record which matches fabric name and mac address of the
+ * currently used FCF record.
+ * If the driver supports only one FCF, it will try to use the FCF record
+ * used by BOOT_BIOS.
+ */
+void
+lpfc_mbx_cmpl_fcf_scan_read_fcf_rec(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
+{
+ struct fcf_record *new_fcf_record;
+ uint32_t boot_flag, addr_mode;
+ uint16_t fcf_index, next_fcf_index;
+ struct lpfc_fcf_rec *fcf_rec = NULL;
+ uint16_t vlan_id;
+ bool select_new_fcf;
+ int rc;
+
+ /* If there is pending FCoE event restart FCF table scan */
+ if (lpfc_check_pending_fcoe_event(phba, LPFC_SKIP_UNREG_FCF)) {
+ lpfc_sli4_mbox_cmd_free(phba, mboxq);
+ return;
+ }
+
+ /* Parse the FCF record from the non-embedded mailbox command */
+ new_fcf_record = lpfc_sli4_fcf_rec_mbox_parse(phba, mboxq,
+ &next_fcf_index);
+ if (!new_fcf_record) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_FIP,
+ "2765 Mailbox command READ_FCF_RECORD "
+ "failed to retrieve a FCF record.\n");
+ /* Let next new FCF event trigger fast failover */
+ spin_lock_irq(&phba->hbalock);
+ phba->hba_flag &= ~FCF_TS_INPROG;
+ spin_unlock_irq(&phba->hbalock);
+ lpfc_sli4_mbox_cmd_free(phba, mboxq);
+ return;
+ }
+
+ /* Check the FCF record against the connection list */
+ rc = lpfc_match_fcf_conn_list(phba, new_fcf_record, &boot_flag,
+ &addr_mode, &vlan_id);
+
+ /* Log the FCF record information if turned on */
+ lpfc_sli4_log_fcf_record_info(phba, new_fcf_record, vlan_id,
+ next_fcf_index);
+
+ /*
+ * If the fcf record does not match with connect list entries
+ * read the next entry; otherwise, this is an eligible FCF
+ * record for roundrobin FCF failover.
+ */
+ if (!rc) {
+ lpfc_sli4_fcf_pri_list_del(phba,
+ bf_get(lpfc_fcf_record_fcf_index,
+ new_fcf_record));
+ lpfc_printf_log(phba, KERN_WARNING, LOG_FIP,
+ "2781 FCF (x%x) failed connection "
+ "list check: (x%x/x%x/%x)\n",
+ bf_get(lpfc_fcf_record_fcf_index,
+ new_fcf_record),
+ bf_get(lpfc_fcf_record_fcf_avail,
+ new_fcf_record),
+ bf_get(lpfc_fcf_record_fcf_valid,
+ new_fcf_record),
+ bf_get(lpfc_fcf_record_fcf_sol,
+ new_fcf_record));
+ if ((phba->fcf.fcf_flag & FCF_IN_USE) &&
+ lpfc_sli4_fcf_record_match(phba, &phba->fcf.current_rec,
+ new_fcf_record, LPFC_FCOE_IGNORE_VID)) {
+ if (bf_get(lpfc_fcf_record_fcf_index, new_fcf_record) !=
+ phba->fcf.current_rec.fcf_indx) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_FIP,
+ "2862 FCF (x%x) matches property "
+ "of in-use FCF (x%x)\n",
+ bf_get(lpfc_fcf_record_fcf_index,
+ new_fcf_record),
+ phba->fcf.current_rec.fcf_indx);
+ goto read_next_fcf;
+ }
+ /*
+ * In case the current in-use FCF record becomes
+ * invalid/unavailable during FCF discovery that
+ * was not triggered by fast FCF failover process,
+ * treat it as fast FCF failover.
+ */
+ if (!(phba->fcf.fcf_flag & FCF_REDISC_PEND) &&
+ !(phba->fcf.fcf_flag & FCF_REDISC_FOV)) {
+ lpfc_printf_log(phba, KERN_WARNING, LOG_FIP,
+ "2835 Invalid in-use FCF "
+ "(x%x), enter FCF failover "
+ "table scan.\n",
+ phba->fcf.current_rec.fcf_indx);
+ spin_lock_irq(&phba->hbalock);
+ phba->fcf.fcf_flag |= FCF_REDISC_FOV;
+ spin_unlock_irq(&phba->hbalock);
+ lpfc_sli4_mbox_cmd_free(phba, mboxq);
+ lpfc_sli4_fcf_scan_read_fcf_rec(phba,
+ LPFC_FCOE_FCF_GET_FIRST);
+ return;
+ }
+ }
+ goto read_next_fcf;
+ } else {
+ fcf_index = bf_get(lpfc_fcf_record_fcf_index, new_fcf_record);
+ rc = lpfc_sli4_fcf_pri_list_add(phba, fcf_index,
+ new_fcf_record);
+ if (rc)
+ goto read_next_fcf;
+ }
+
+ /*
+ * If this is not the first FCF discovery of the HBA, use last
+ * FCF record for the discovery. The condition that a rescan
+ * matches the in-use FCF record: fabric name, switch name, mac
+ * address, and vlan_id.
+ */
+ spin_lock_irq(&phba->hbalock);
+ if (phba->fcf.fcf_flag & FCF_IN_USE) {
+ if (phba->cfg_fcf_failover_policy == LPFC_FCF_FOV &&
+ lpfc_sli4_fcf_record_match(phba, &phba->fcf.current_rec,
+ new_fcf_record, vlan_id)) {
+ if (bf_get(lpfc_fcf_record_fcf_index, new_fcf_record) ==
+ phba->fcf.current_rec.fcf_indx) {
+ phba->fcf.fcf_flag |= FCF_AVAILABLE;
+ if (phba->fcf.fcf_flag & FCF_REDISC_PEND)
+ /* Stop FCF redisc wait timer */
+ __lpfc_sli4_stop_fcf_redisc_wait_timer(
+ phba);
+ else if (phba->fcf.fcf_flag & FCF_REDISC_FOV)
+ /* Fast failover, mark completed */
+ phba->fcf.fcf_flag &= ~FCF_REDISC_FOV;
+ spin_unlock_irq(&phba->hbalock);
+ lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
+ "2836 New FCF matches in-use "
+ "FCF (x%x), port_state:x%x, "
+ "fc_flag:x%x\n",
+ phba->fcf.current_rec.fcf_indx,
+ phba->pport->port_state,
+ phba->pport->fc_flag);
+ goto out;
+ } else
+ lpfc_printf_log(phba, KERN_ERR, LOG_FIP,
+ "2863 New FCF (x%x) matches "
+ "property of in-use FCF (x%x)\n",
+ bf_get(lpfc_fcf_record_fcf_index,
+ new_fcf_record),
+ phba->fcf.current_rec.fcf_indx);
+ }
+ /*
+ * Read next FCF record from HBA searching for the matching
+ * with in-use record only if not during the fast failover
+ * period. In case of fast failover period, it shall try to
+ * determine whether the FCF record just read should be the
+ * next candidate.
+ */
+ if (!(phba->fcf.fcf_flag & FCF_REDISC_FOV)) {
+ spin_unlock_irq(&phba->hbalock);
+ goto read_next_fcf;
+ }
+ }
+ /*
+ * Update on failover FCF record only if it's in FCF fast-failover
+ * period; otherwise, update on current FCF record.
+ */
+ if (phba->fcf.fcf_flag & FCF_REDISC_FOV)
+ fcf_rec = &phba->fcf.failover_rec;
+ else
+ fcf_rec = &phba->fcf.current_rec;
+
+ if (phba->fcf.fcf_flag & FCF_AVAILABLE) {
+ /*
+ * If the driver FCF record does not have boot flag
+ * set and new hba fcf record has boot flag set, use
+ * the new hba fcf record.
+ */
+ if (boot_flag && !(fcf_rec->flag & BOOT_ENABLE)) {
+ /* Choose this FCF record */
+ lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
+ "2837 Update current FCF record "
+ "(x%x) with new FCF record (x%x)\n",
+ fcf_rec->fcf_indx,
+ bf_get(lpfc_fcf_record_fcf_index,
+ new_fcf_record));
+ __lpfc_update_fcf_record(phba, fcf_rec, new_fcf_record,
+ addr_mode, vlan_id, BOOT_ENABLE);
+ spin_unlock_irq(&phba->hbalock);
+ goto read_next_fcf;
+ }
+ /*
+ * If the driver FCF record has boot flag set and the
+ * new hba FCF record does not have boot flag, read
+ * the next FCF record.
+ */
+ if (!boot_flag && (fcf_rec->flag & BOOT_ENABLE)) {
+ spin_unlock_irq(&phba->hbalock);
+ goto read_next_fcf;
+ }
+ /*
+ * If the new hba FCF record has lower priority value
+ * than the driver FCF record, use the new record.
+ */
+ if (new_fcf_record->fip_priority < fcf_rec->priority) {
+ /* Choose the new FCF record with lower priority */
+ lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
+ "2838 Update current FCF record "
+ "(x%x) with new FCF record (x%x)\n",
+ fcf_rec->fcf_indx,
+ bf_get(lpfc_fcf_record_fcf_index,
+ new_fcf_record));
+ __lpfc_update_fcf_record(phba, fcf_rec, new_fcf_record,
+ addr_mode, vlan_id, 0);
+ /* Reset running random FCF selection count */
+ phba->fcf.eligible_fcf_cnt = 1;
+ } else if (new_fcf_record->fip_priority == fcf_rec->priority) {
+ /* Update running random FCF selection count */
+ phba->fcf.eligible_fcf_cnt++;
+ select_new_fcf = lpfc_sli4_new_fcf_random_select(phba,
+ phba->fcf.eligible_fcf_cnt);
+ if (select_new_fcf) {
+ lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
+ "2839 Update current FCF record "
+ "(x%x) with new FCF record (x%x)\n",
+ fcf_rec->fcf_indx,
+ bf_get(lpfc_fcf_record_fcf_index,
+ new_fcf_record));
+ /* Choose the new FCF by random selection */
+ __lpfc_update_fcf_record(phba, fcf_rec,
+ new_fcf_record,
+ addr_mode, vlan_id, 0);
+ }
+ }
+ spin_unlock_irq(&phba->hbalock);
+ goto read_next_fcf;
+ }
+ /*
+ * This is the first suitable FCF record, choose this record for
+ * initial best-fit FCF.
+ */
+ if (fcf_rec) {
+ lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
+ "2840 Update initial FCF candidate "
+ "with FCF (x%x)\n",
+ bf_get(lpfc_fcf_record_fcf_index,
+ new_fcf_record));
+ __lpfc_update_fcf_record(phba, fcf_rec, new_fcf_record,
+ addr_mode, vlan_id, (boot_flag ?
+ BOOT_ENABLE : 0));
+ phba->fcf.fcf_flag |= FCF_AVAILABLE;
+ /* Setup initial running random FCF selection count */
+ phba->fcf.eligible_fcf_cnt = 1;
+ }
+ spin_unlock_irq(&phba->hbalock);
+ goto read_next_fcf;
+
+read_next_fcf:
+ lpfc_sli4_mbox_cmd_free(phba, mboxq);
+ if (next_fcf_index == LPFC_FCOE_FCF_NEXT_NONE || next_fcf_index == 0) {
+ if (phba->fcf.fcf_flag & FCF_REDISC_FOV) {
+ /*
+ * Case of FCF fast failover scan
+ */
+
+ /*
+ * It has not found any suitable FCF record, cancel
+ * FCF scan inprogress, and do nothing
+ */
+ if (!(phba->fcf.failover_rec.flag & RECORD_VALID)) {
+ lpfc_printf_log(phba, KERN_WARNING, LOG_FIP,
+ "2782 No suitable FCF found: "
+ "(x%x/x%x)\n",
+ phba->fcoe_eventtag_at_fcf_scan,
+ bf_get(lpfc_fcf_record_fcf_index,
+ new_fcf_record));
+ spin_lock_irq(&phba->hbalock);
+ if (phba->hba_flag & HBA_DEVLOSS_TMO) {
+ phba->hba_flag &= ~FCF_TS_INPROG;
+ spin_unlock_irq(&phba->hbalock);
+ /* Unregister in-use FCF and rescan */
+ lpfc_printf_log(phba, KERN_INFO,
+ LOG_FIP,
+ "2864 On devloss tmo "
+ "unreg in-use FCF and "
+ "rescan FCF table\n");
+ lpfc_unregister_fcf_rescan(phba);
+ return;
+ }
+ /*
+ * Let next new FCF event trigger fast failover
+ */
+ phba->hba_flag &= ~FCF_TS_INPROG;
+ spin_unlock_irq(&phba->hbalock);
+ return;
+ }
+ /*
+ * It has found a suitable FCF record that is not
+ * the same as in-use FCF record, unregister the
+ * in-use FCF record, replace the in-use FCF record
+ * with the new FCF record, mark FCF fast failover
+ * completed, and then start register the new FCF
+ * record.
+ */
+
+ /* Unregister the current in-use FCF record */
+ lpfc_unregister_fcf(phba);
+
+ /* Replace in-use record with the new record */
+ lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
+ "2842 Replace in-use FCF (x%x) "
+ "with failover FCF (x%x)\n",
+ phba->fcf.current_rec.fcf_indx,
+ phba->fcf.failover_rec.fcf_indx);
+ memcpy(&phba->fcf.current_rec,
+ &phba->fcf.failover_rec,
+ sizeof(struct lpfc_fcf_rec));
+ /*
+ * Mark the fast FCF failover rediscovery completed
+ * and the start of the first round of the roundrobin
+ * FCF failover.
+ */
+ spin_lock_irq(&phba->hbalock);
+ phba->fcf.fcf_flag &= ~FCF_REDISC_FOV;
+ spin_unlock_irq(&phba->hbalock);
+ /* Register to the new FCF record */
+ lpfc_register_fcf(phba);
+ } else {
+ /*
+ * In case of transaction period to fast FCF failover,
+ * do nothing when search to the end of the FCF table.
+ */
+ if ((phba->fcf.fcf_flag & FCF_REDISC_EVT) ||
+ (phba->fcf.fcf_flag & FCF_REDISC_PEND))
+ return;
+
+ if (phba->cfg_fcf_failover_policy == LPFC_FCF_FOV &&
+ phba->fcf.fcf_flag & FCF_IN_USE) {
+ /*
+ * In case the current in-use FCF record no
+ * longer existed during FCF discovery that
+ * was not triggered by fast FCF failover
+ * process, treat it as fast FCF failover.
+ */
+ lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
+ "2841 In-use FCF record (x%x) "
+ "not reported, entering fast "
+ "FCF failover mode scanning.\n",
+ phba->fcf.current_rec.fcf_indx);
+ spin_lock_irq(&phba->hbalock);
+ phba->fcf.fcf_flag |= FCF_REDISC_FOV;
+ spin_unlock_irq(&phba->hbalock);
+ lpfc_sli4_fcf_scan_read_fcf_rec(phba,
+ LPFC_FCOE_FCF_GET_FIRST);
+ return;
+ }
+ /* Register to the new FCF record */
+ lpfc_register_fcf(phba);
+ }
+ } else
+ lpfc_sli4_fcf_scan_read_fcf_rec(phba, next_fcf_index);
+ return;
+
+out:
+ lpfc_sli4_mbox_cmd_free(phba, mboxq);
+ lpfc_register_fcf(phba);
+
+ return;
+}
+
+/**
+ * lpfc_mbx_cmpl_fcf_rr_read_fcf_rec - fcf roundrobin read_fcf mbox cmpl hdler
+ * @phba: pointer to lpfc hba data structure.
+ * @mboxq: pointer to mailbox object.
+ *
+ * This is the callback function for FLOGI failure roundrobin FCF failover
+ * read FCF record mailbox command from the eligible FCF record bmask for
+ * performing the failover. If the FCF read back is not valid/available, it
+ * fails through to retrying FLOGI to the currently registered FCF again.
+ * Otherwise, if the FCF read back is valid and available, it will set the
+ * newly read FCF record to the failover FCF record, unregister currently
+ * registered FCF record, copy the failover FCF record to the current
+ * FCF record, and then register the current FCF record before proceeding
+ * to trying FLOGI on the new failover FCF.
+ */
+void
+lpfc_mbx_cmpl_fcf_rr_read_fcf_rec(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
+{
+ struct fcf_record *new_fcf_record;
+ uint32_t boot_flag, addr_mode;
+ uint16_t next_fcf_index, fcf_index;
+ uint16_t current_fcf_index;
+ uint16_t vlan_id;
+ int rc;
+
+ /* If link state is not up, stop the roundrobin failover process */
+ if (phba->link_state < LPFC_LINK_UP) {
+ spin_lock_irq(&phba->hbalock);
+ phba->fcf.fcf_flag &= ~FCF_DISCOVERY;
+ phba->hba_flag &= ~FCF_RR_INPROG;
+ spin_unlock_irq(&phba->hbalock);
+ goto out;
+ }
+
+ /* Parse the FCF record from the non-embedded mailbox command */
+ new_fcf_record = lpfc_sli4_fcf_rec_mbox_parse(phba, mboxq,
+ &next_fcf_index);
+ if (!new_fcf_record) {
+ lpfc_printf_log(phba, KERN_WARNING, LOG_FIP,
+ "2766 Mailbox command READ_FCF_RECORD "
+ "failed to retrieve a FCF record. "
+ "hba_flg x%x fcf_flg x%x\n", phba->hba_flag,
+ phba->fcf.fcf_flag);
+ lpfc_unregister_fcf_rescan(phba);
+ goto out;
+ }
+
+ /* Get the needed parameters from FCF record */
+ rc = lpfc_match_fcf_conn_list(phba, new_fcf_record, &boot_flag,
+ &addr_mode, &vlan_id);
+
+ /* Log the FCF record information if turned on */
+ lpfc_sli4_log_fcf_record_info(phba, new_fcf_record, vlan_id,
+ next_fcf_index);
+
+ fcf_index = bf_get(lpfc_fcf_record_fcf_index, new_fcf_record);
+ if (!rc) {
+ lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
+ "2848 Remove ineligible FCF (x%x) from "
+ "from roundrobin bmask\n", fcf_index);
+ /* Clear roundrobin bmask bit for ineligible FCF */
+ lpfc_sli4_fcf_rr_index_clear(phba, fcf_index);
+ /* Perform next round of roundrobin FCF failover */
+ fcf_index = lpfc_sli4_fcf_rr_next_index_get(phba);
+ rc = lpfc_sli4_fcf_rr_next_proc(phba->pport, fcf_index);
+ if (rc)
+ goto out;
+ goto error_out;
+ }
+
+ if (fcf_index == phba->fcf.current_rec.fcf_indx) {
+ lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
+ "2760 Perform FLOGI roundrobin FCF failover: "
+ "FCF (x%x) back to FCF (x%x)\n",
+ phba->fcf.current_rec.fcf_indx, fcf_index);
+ /* Wait 500 ms before retrying FLOGI to current FCF */
+ msleep(500);
+ lpfc_issue_init_vfi(phba->pport);
+ goto out;
+ }
+
+ /* Upload new FCF record to the failover FCF record */
+ lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
+ "2834 Update current FCF (x%x) with new FCF (x%x)\n",
+ phba->fcf.failover_rec.fcf_indx, fcf_index);
+ spin_lock_irq(&phba->hbalock);
+ __lpfc_update_fcf_record(phba, &phba->fcf.failover_rec,
+ new_fcf_record, addr_mode, vlan_id,
+ (boot_flag ? BOOT_ENABLE : 0));
+ spin_unlock_irq(&phba->hbalock);
+
+ current_fcf_index = phba->fcf.current_rec.fcf_indx;
+
+ /* Unregister the current in-use FCF record */
+ lpfc_unregister_fcf(phba);
+
+ /* Replace in-use record with the new record */
+ memcpy(&phba->fcf.current_rec, &phba->fcf.failover_rec,
+ sizeof(struct lpfc_fcf_rec));
+
+ lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
+ "2783 Perform FLOGI roundrobin FCF failover: FCF "
+ "(x%x) to FCF (x%x)\n", current_fcf_index, fcf_index);
+
+error_out:
+ lpfc_register_fcf(phba);
+out:
+ lpfc_sli4_mbox_cmd_free(phba, mboxq);
+}
+
+/**
+ * lpfc_mbx_cmpl_read_fcf_rec - read fcf completion handler.
+ * @phba: pointer to lpfc hba data structure.
+ * @mboxq: pointer to mailbox object.
+ *
+ * This is the callback function of read FCF record mailbox command for
+ * updating the eligible FCF bmask for FLOGI failure roundrobin FCF
+ * failover when a new FCF event happened. If the FCF read back is
+ * valid/available and it passes the connection list check, it updates
+ * the bmask for the eligible FCF record for roundrobin failover.
+ */
+void
+lpfc_mbx_cmpl_read_fcf_rec(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
+{
+ struct fcf_record *new_fcf_record;
+ uint32_t boot_flag, addr_mode;
+ uint16_t fcf_index, next_fcf_index;
+ uint16_t vlan_id;
+ int rc;
+
+ /* If link state is not up, no need to proceed */
+ if (phba->link_state < LPFC_LINK_UP)
+ goto out;
+
+ /* If FCF discovery period is over, no need to proceed */
+ if (!(phba->fcf.fcf_flag & FCF_DISCOVERY))
+ goto out;
+
+ /* Parse the FCF record from the non-embedded mailbox command */
+ new_fcf_record = lpfc_sli4_fcf_rec_mbox_parse(phba, mboxq,
+ &next_fcf_index);
+ if (!new_fcf_record) {
+ lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
+ "2767 Mailbox command READ_FCF_RECORD "
+ "failed to retrieve a FCF record.\n");
+ goto out;
+ }
+
+ /* Check the connection list for eligibility */
+ rc = lpfc_match_fcf_conn_list(phba, new_fcf_record, &boot_flag,
+ &addr_mode, &vlan_id);
+
+ /* Log the FCF record information if turned on */
+ lpfc_sli4_log_fcf_record_info(phba, new_fcf_record, vlan_id,
+ next_fcf_index);
+
+ if (!rc)
+ goto out;
+
+ /* Update the eligible FCF record index bmask */
+ fcf_index = bf_get(lpfc_fcf_record_fcf_index, new_fcf_record);
+
+ rc = lpfc_sli4_fcf_pri_list_add(phba, fcf_index, new_fcf_record);
+
+out:
+ lpfc_sli4_mbox_cmd_free(phba, mboxq);
+}
+
+/**
+ * lpfc_init_vfi_cmpl - Completion handler for init_vfi mbox command.
+ * @phba: pointer to lpfc hba data structure.
+ * @mboxq: pointer to mailbox data structure.
+ *
+ * This function handles completion of init vfi mailbox command.
+ */
+static void
+lpfc_init_vfi_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
+{
+ struct lpfc_vport *vport = mboxq->vport;
+
+ /*
+ * VFI not supported on interface type 0, just do the flogi
+ * Also continue if the VFI is in use - just use the same one.
+ */
+ if (mboxq->u.mb.mbxStatus &&
+ (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) !=
+ LPFC_SLI_INTF_IF_TYPE_0) &&
+ mboxq->u.mb.mbxStatus != MBX_VFI_IN_USE) {
+ lpfc_printf_vlog(vport, KERN_ERR,
+ LOG_MBOX,
+ "2891 Init VFI mailbox failed 0x%x\n",
+ mboxq->u.mb.mbxStatus);
+ mempool_free(mboxq, phba->mbox_mem_pool);
+ lpfc_vport_set_state(vport, FC_VPORT_FAILED);
+ return;
+ }
+
+ lpfc_initial_flogi(vport);
+ mempool_free(mboxq, phba->mbox_mem_pool);
+ return;
+}
+
+/**
+ * lpfc_issue_init_vfi - Issue init_vfi mailbox command.
+ * @vport: pointer to lpfc_vport data structure.
+ *
+ * This function issue a init_vfi mailbox command to initialize the VFI and
+ * VPI for the physical port.
+ */
+void
+lpfc_issue_init_vfi(struct lpfc_vport *vport)
+{
+ LPFC_MBOXQ_t *mboxq;
+ int rc;
+ struct lpfc_hba *phba = vport->phba;
+
+ mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
+ if (!mboxq) {
+ lpfc_printf_vlog(vport, KERN_ERR,
+ LOG_MBOX, "2892 Failed to allocate "
+ "init_vfi mailbox\n");
+ return;
+ }
+ lpfc_init_vfi(mboxq, vport);
+ mboxq->mbox_cmpl = lpfc_init_vfi_cmpl;
+ rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_NOWAIT);
+ if (rc == MBX_NOT_FINISHED) {
+ lpfc_printf_vlog(vport, KERN_ERR,
+ LOG_MBOX, "2893 Failed to issue init_vfi mailbox\n");
+ mempool_free(mboxq, vport->phba->mbox_mem_pool);
+ }
+}
+
+/**
+ * lpfc_init_vpi_cmpl - Completion handler for init_vpi mbox command.
+ * @phba: pointer to lpfc hba data structure.
+ * @mboxq: pointer to mailbox data structure.
+ *
+ * This function handles completion of init vpi mailbox command.
+ */
+void
+lpfc_init_vpi_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
+{
+ struct lpfc_vport *vport = mboxq->vport;
+ struct lpfc_nodelist *ndlp;
+ struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
+
+ if (mboxq->u.mb.mbxStatus) {
+ lpfc_printf_vlog(vport, KERN_ERR,
+ LOG_MBOX,
+ "2609 Init VPI mailbox failed 0x%x\n",
+ mboxq->u.mb.mbxStatus);
+ mempool_free(mboxq, phba->mbox_mem_pool);
+ lpfc_vport_set_state(vport, FC_VPORT_FAILED);
+ return;
+ }
+ spin_lock_irq(shost->host_lock);
+ vport->fc_flag &= ~FC_VPORT_NEEDS_INIT_VPI;
+ spin_unlock_irq(shost->host_lock);
+
+ /* If this port is physical port or FDISC is done, do reg_vpi */
+ if ((phba->pport == vport) || (vport->port_state == LPFC_FDISC)) {
+ ndlp = lpfc_findnode_did(vport, Fabric_DID);
+ if (!ndlp)
+ lpfc_printf_vlog(vport, KERN_ERR,
+ LOG_DISCOVERY,
+ "2731 Cannot find fabric "
+ "controller node\n");
+ else
+ lpfc_register_new_vport(phba, vport, ndlp);
+ mempool_free(mboxq, phba->mbox_mem_pool);
+ return;
+ }
+
+ if (phba->link_flag & LS_NPIV_FAB_SUPPORTED)
+ lpfc_initial_fdisc(vport);
+ else {
+ lpfc_vport_set_state(vport, FC_VPORT_NO_FABRIC_SUPP);
+ lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
+ "2606 No NPIV Fabric support\n");
+ }
+ mempool_free(mboxq, phba->mbox_mem_pool);
+ return;
+}
+
+/**
+ * lpfc_issue_init_vpi - Issue init_vpi mailbox command.
+ * @vport: pointer to lpfc_vport data structure.
+ *
+ * This function issue a init_vpi mailbox command to initialize
+ * VPI for the vport.
+ */
+void
+lpfc_issue_init_vpi(struct lpfc_vport *vport)
+{
+ LPFC_MBOXQ_t *mboxq;
+ int rc, vpi;
+
+ if ((vport->port_type != LPFC_PHYSICAL_PORT) && (!vport->vpi)) {
+ vpi = lpfc_alloc_vpi(vport->phba);
+ if (!vpi) {
+ lpfc_printf_vlog(vport, KERN_ERR,
+ LOG_MBOX,
+ "3303 Failed to obtain vport vpi\n");
+ lpfc_vport_set_state(vport, FC_VPORT_FAILED);
+ return;
+ }
+ vport->vpi = vpi;
+ }
+
+ mboxq = mempool_alloc(vport->phba->mbox_mem_pool, GFP_KERNEL);
+ if (!mboxq) {
+ lpfc_printf_vlog(vport, KERN_ERR,
+ LOG_MBOX, "2607 Failed to allocate "
+ "init_vpi mailbox\n");
+ return;
+ }
+ lpfc_init_vpi(vport->phba, mboxq, vport->vpi);
+ mboxq->vport = vport;
+ mboxq->mbox_cmpl = lpfc_init_vpi_cmpl;
+ rc = lpfc_sli_issue_mbox(vport->phba, mboxq, MBX_NOWAIT);
+ if (rc == MBX_NOT_FINISHED) {
+ lpfc_printf_vlog(vport, KERN_ERR,
+ LOG_MBOX, "2608 Failed to issue init_vpi mailbox\n");
+ mempool_free(mboxq, vport->phba->mbox_mem_pool);
+ }
+}
+
+/**
+ * lpfc_start_fdiscs - send fdiscs for each vports on this port.
+ * @phba: pointer to lpfc hba data structure.
+ *
+ * This function loops through the list of vports on the @phba and issues an
+ * FDISC if possible.
+ */
+void
+lpfc_start_fdiscs(struct lpfc_hba *phba)
+{
+ struct lpfc_vport **vports;
+ int i;
+
+ vports = lpfc_create_vport_work_array(phba);
+ if (vports != NULL) {
+ for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) {
+ if (vports[i]->port_type == LPFC_PHYSICAL_PORT)
+ continue;
+ /* There are no vpi for this vport */
+ if (vports[i]->vpi > phba->max_vpi) {
+ lpfc_vport_set_state(vports[i],
+ FC_VPORT_FAILED);
+ continue;
+ }
+ if (phba->fc_topology == LPFC_TOPOLOGY_LOOP) {
+ lpfc_vport_set_state(vports[i],
+ FC_VPORT_LINKDOWN);
+ continue;
+ }
+ if (vports[i]->fc_flag & FC_VPORT_NEEDS_INIT_VPI) {
+ lpfc_issue_init_vpi(vports[i]);
+ continue;
+ }
+ if (phba->link_flag & LS_NPIV_FAB_SUPPORTED)
+ lpfc_initial_fdisc(vports[i]);
+ else {
+ lpfc_vport_set_state(vports[i],
+ FC_VPORT_NO_FABRIC_SUPP);
+ lpfc_printf_vlog(vports[i], KERN_ERR,
+ LOG_ELS,
+ "0259 No NPIV "
+ "Fabric support\n");
+ }
+ }
+ }
+ lpfc_destroy_vport_work_array(phba, vports);
+}
+
+void
+lpfc_mbx_cmpl_reg_vfi(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
+{
+ struct lpfc_dmabuf *dmabuf = mboxq->context1;
+ struct lpfc_vport *vport = mboxq->vport;
+ struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
+
+ /*
+ * VFI not supported for interface type 0, so ignore any mailbox
+ * error (except VFI in use) and continue with the discovery.
+ */
+ if (mboxq->u.mb.mbxStatus &&
+ (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) !=
+ LPFC_SLI_INTF_IF_TYPE_0) &&
+ mboxq->u.mb.mbxStatus != MBX_VFI_IN_USE) {
+ lpfc_printf_vlog(vport, KERN_ERR, LOG_MBOX,
+ "2018 REG_VFI mbxStatus error x%x "
+ "HBA state x%x\n",
+ mboxq->u.mb.mbxStatus, vport->port_state);
+ if (phba->fc_topology == LPFC_TOPOLOGY_LOOP) {
+ /* FLOGI failed, use loop map to make discovery list */
+ lpfc_disc_list_loopmap(vport);
+ /* Start discovery */
+ lpfc_disc_start(vport);
+ goto out_free_mem;
+ }
+ lpfc_vport_set_state(vport, FC_VPORT_FAILED);
+ goto out_free_mem;
+ }
+
+ /* If the VFI is already registered, there is nothing else to do
+ * Unless this was a VFI update and we are in PT2PT mode, then
+ * we should drop through to set the port state to ready.
+ */
+ if (vport->fc_flag & FC_VFI_REGISTERED)
+ if (!(phba->sli_rev == LPFC_SLI_REV4 &&
+ vport->fc_flag & FC_PT2PT))
+ goto out_free_mem;
+
+ /* The VPI is implicitly registered when the VFI is registered */
+ spin_lock_irq(shost->host_lock);
+ vport->vpi_state |= LPFC_VPI_REGISTERED;
+ vport->fc_flag |= FC_VFI_REGISTERED;
+ vport->fc_flag &= ~FC_VPORT_NEEDS_REG_VPI;
+ vport->fc_flag &= ~FC_VPORT_NEEDS_INIT_VPI;
+ spin_unlock_irq(shost->host_lock);
+
+ /* In case SLI4 FC loopback test, we are ready */
+ if ((phba->sli_rev == LPFC_SLI_REV4) &&
+ (phba->link_flag & LS_LOOPBACK_MODE)) {
+ phba->link_state = LPFC_HBA_READY;
+ goto out_free_mem;
+ }
+
+ lpfc_printf_vlog(vport, KERN_INFO, LOG_SLI,
+ "3313 cmpl reg vfi port_state:%x fc_flag:%x myDid:%x "
+ "alpacnt:%d LinkState:%x topology:%x\n",
+ vport->port_state, vport->fc_flag, vport->fc_myDID,
+ vport->phba->alpa_map[0],
+ phba->link_state, phba->fc_topology);
+
+ if (vport->port_state == LPFC_FABRIC_CFG_LINK) {
+ /*
+ * For private loop or for NPort pt2pt,
+ * just start discovery and we are done.
+ */
+ if ((vport->fc_flag & FC_PT2PT) ||
+ ((phba->fc_topology == LPFC_TOPOLOGY_LOOP) &&
+ !(vport->fc_flag & FC_PUBLIC_LOOP))) {
+
+ /* Use loop map to make discovery list */
+ lpfc_disc_list_loopmap(vport);
+ /* Start discovery */
+ if (vport->fc_flag & FC_PT2PT)
+ vport->port_state = LPFC_VPORT_READY;
+ else
+ lpfc_disc_start(vport);
+ } else {
+ lpfc_start_fdiscs(phba);
+ lpfc_do_scr_ns_plogi(phba, vport);
+ }
+ }
+
+out_free_mem:
+ mempool_free(mboxq, phba->mbox_mem_pool);
+ lpfc_mbuf_free(phba, dmabuf->virt, dmabuf->phys);
+ kfree(dmabuf);
+ return;
+}
+
+static void
+lpfc_mbx_cmpl_read_sparam(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
+{
+ MAILBOX_t *mb = &pmb->u.mb;
+ struct lpfc_dmabuf *mp = (struct lpfc_dmabuf *) pmb->context1;
+ struct lpfc_vport *vport = pmb->vport;
+
+
+ /* Check for error */
+ if (mb->mbxStatus) {
+ /* READ_SPARAM mbox error <mbxStatus> state <hba_state> */
+ lpfc_printf_vlog(vport, KERN_ERR, LOG_MBOX,
+ "0319 READ_SPARAM mbxStatus error x%x "
+ "hba state x%x>\n",
+ mb->mbxStatus, vport->port_state);
+ lpfc_linkdown(phba);
+ goto out;
+ }
+
+ memcpy((uint8_t *) &vport->fc_sparam, (uint8_t *) mp->virt,
+ sizeof (struct serv_parm));
+ lpfc_update_vport_wwn(vport);
+ if (vport->port_type == LPFC_PHYSICAL_PORT) {
+ memcpy(&phba->wwnn, &vport->fc_nodename, sizeof(phba->wwnn));
+ memcpy(&phba->wwpn, &vport->fc_portname, sizeof(phba->wwnn));
+ }
+
+ lpfc_mbuf_free(phba, mp->virt, mp->phys);
+ kfree(mp);
+ mempool_free(pmb, phba->mbox_mem_pool);
+ return;
+
+out:
+ pmb->context1 = NULL;
+ lpfc_mbuf_free(phba, mp->virt, mp->phys);
+ kfree(mp);
+ lpfc_issue_clear_la(phba, vport);
+ mempool_free(pmb, phba->mbox_mem_pool);
+ return;
+}
+
+static void
+lpfc_mbx_process_link_up(struct lpfc_hba *phba, struct lpfc_mbx_read_top *la)
+{
+ struct lpfc_vport *vport = phba->pport;
+ LPFC_MBOXQ_t *sparam_mbox, *cfglink_mbox = NULL;
+ struct Scsi_Host *shost;
+ int i;
+ struct lpfc_dmabuf *mp;
+ int rc;
+ struct fcf_record *fcf_record;
+ uint32_t fc_flags = 0;
+
+ spin_lock_irq(&phba->hbalock);
+ switch (bf_get(lpfc_mbx_read_top_link_spd, la)) {
+ case LPFC_LINK_SPEED_1GHZ:
+ case LPFC_LINK_SPEED_2GHZ:
+ case LPFC_LINK_SPEED_4GHZ:
+ case LPFC_LINK_SPEED_8GHZ:
+ case LPFC_LINK_SPEED_10GHZ:
+ case LPFC_LINK_SPEED_16GHZ:
+ phba->fc_linkspeed = bf_get(lpfc_mbx_read_top_link_spd, la);
+ break;
+ default:
+ phba->fc_linkspeed = LPFC_LINK_SPEED_UNKNOWN;
+ break;
+ }
+
+ if (phba->fc_topology &&
+ phba->fc_topology != bf_get(lpfc_mbx_read_top_topology, la)) {
+ lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
+ "3314 Toplogy changed was 0x%x is 0x%x\n",
+ phba->fc_topology,
+ bf_get(lpfc_mbx_read_top_topology, la));
+ phba->fc_topology_changed = 1;
+ }
+
+ phba->fc_topology = bf_get(lpfc_mbx_read_top_topology, la);
+ phba->link_flag &= ~LS_NPIV_FAB_SUPPORTED;
+
+ shost = lpfc_shost_from_vport(vport);
+ if (phba->fc_topology == LPFC_TOPOLOGY_LOOP) {
+ phba->sli3_options &= ~LPFC_SLI3_NPIV_ENABLED;
+
+ /* if npiv is enabled and this adapter supports npiv log
+ * a message that npiv is not supported in this topology
+ */
+ if (phba->cfg_enable_npiv && phba->max_vpi)
+ lpfc_printf_log(phba, KERN_ERR, LOG_LINK_EVENT,
+ "1309 Link Up Event npiv not supported in loop "
+ "topology\n");
+ /* Get Loop Map information */
+ if (bf_get(lpfc_mbx_read_top_il, la))
+ fc_flags |= FC_LBIT;
+
+ vport->fc_myDID = bf_get(lpfc_mbx_read_top_alpa_granted, la);
+ i = la->lilpBde64.tus.f.bdeSize;
+
+ if (i == 0) {
+ phba->alpa_map[0] = 0;
+ } else {
+ if (vport->cfg_log_verbose & LOG_LINK_EVENT) {
+ int numalpa, j, k;
+ union {
+ uint8_t pamap[16];
+ struct {
+ uint32_t wd1;
+ uint32_t wd2;
+ uint32_t wd3;
+ uint32_t wd4;
+ } pa;
+ } un;
+ numalpa = phba->alpa_map[0];
+ j = 0;
+ while (j < numalpa) {
+ memset(un.pamap, 0, 16);
+ for (k = 1; j < numalpa; k++) {
+ un.pamap[k - 1] =
+ phba->alpa_map[j + 1];
+ j++;
+ if (k == 16)
+ break;
+ }
+ /* Link Up Event ALPA map */
+ lpfc_printf_log(phba,
+ KERN_WARNING,
+ LOG_LINK_EVENT,
+ "1304 Link Up Event "
+ "ALPA map Data: x%x "
+ "x%x x%x x%x\n",
+ un.pa.wd1, un.pa.wd2,
+ un.pa.wd3, un.pa.wd4);
+ }
+ }
+ }
+ } else {
+ if (!(phba->sli3_options & LPFC_SLI3_NPIV_ENABLED)) {
+ if (phba->max_vpi && phba->cfg_enable_npiv &&
+ (phba->sli_rev >= LPFC_SLI_REV3))
+ phba->sli3_options |= LPFC_SLI3_NPIV_ENABLED;
+ }
+ vport->fc_myDID = phba->fc_pref_DID;
+ fc_flags |= FC_LBIT;
+ }
+ spin_unlock_irq(&phba->hbalock);
+
+ if (fc_flags) {
+ spin_lock_irq(shost->host_lock);
+ vport->fc_flag |= fc_flags;
+ spin_unlock_irq(shost->host_lock);
+ }
+
+ lpfc_linkup(phba);
+ sparam_mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
+ if (!sparam_mbox)
+ goto out;
+
+ rc = lpfc_read_sparam(phba, sparam_mbox, 0);
+ if (rc) {
+ mempool_free(sparam_mbox, phba->mbox_mem_pool);
+ goto out;
+ }
+ sparam_mbox->vport = vport;
+ sparam_mbox->mbox_cmpl = lpfc_mbx_cmpl_read_sparam;
+ rc = lpfc_sli_issue_mbox(phba, sparam_mbox, MBX_NOWAIT);
+ if (rc == MBX_NOT_FINISHED) {
+ mp = (struct lpfc_dmabuf *) sparam_mbox->context1;
+ lpfc_mbuf_free(phba, mp->virt, mp->phys);
+ kfree(mp);
+ mempool_free(sparam_mbox, phba->mbox_mem_pool);
+ goto out;
+ }
+
+ if (!(phba->hba_flag & HBA_FCOE_MODE)) {
+ cfglink_mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
+ if (!cfglink_mbox)
+ goto out;
+ vport->port_state = LPFC_LOCAL_CFG_LINK;
+ lpfc_config_link(phba, cfglink_mbox);
+ cfglink_mbox->vport = vport;
+ cfglink_mbox->mbox_cmpl = lpfc_mbx_cmpl_local_config_link;
+ rc = lpfc_sli_issue_mbox(phba, cfglink_mbox, MBX_NOWAIT);
+ if (rc == MBX_NOT_FINISHED) {
+ mempool_free(cfglink_mbox, phba->mbox_mem_pool);
+ goto out;
+ }
+ } else {
+ vport->port_state = LPFC_VPORT_UNKNOWN;
+ /*
+ * Add the driver's default FCF record at FCF index 0 now. This
+ * is phase 1 implementation that support FCF index 0 and driver
+ * defaults.
+ */
+ if (!(phba->hba_flag & HBA_FIP_SUPPORT)) {
+ fcf_record = kzalloc(sizeof(struct fcf_record),
+ GFP_KERNEL);
+ if (unlikely(!fcf_record)) {
+ lpfc_printf_log(phba, KERN_ERR,
+ LOG_MBOX | LOG_SLI,
+ "2554 Could not allocate memory for "
+ "fcf record\n");
+ rc = -ENODEV;
+ goto out;
+ }
+
+ lpfc_sli4_build_dflt_fcf_record(phba, fcf_record,
+ LPFC_FCOE_FCF_DEF_INDEX);
+ rc = lpfc_sli4_add_fcf_record(phba, fcf_record);
+ if (unlikely(rc)) {
+ lpfc_printf_log(phba, KERN_ERR,
+ LOG_MBOX | LOG_SLI,
+ "2013 Could not manually add FCF "
+ "record 0, status %d\n", rc);
+ rc = -ENODEV;
+ kfree(fcf_record);
+ goto out;
+ }
+ kfree(fcf_record);
+ }
+ /*
+ * The driver is expected to do FIP/FCF. Call the port
+ * and get the FCF Table.
+ */
+ spin_lock_irq(&phba->hbalock);
+ if (phba->hba_flag & FCF_TS_INPROG) {
+ spin_unlock_irq(&phba->hbalock);
+ return;
+ }
+ /* This is the initial FCF discovery scan */
+ phba->fcf.fcf_flag |= FCF_INIT_DISC;
+ spin_unlock_irq(&phba->hbalock);
+ lpfc_printf_log(phba, KERN_INFO, LOG_FIP | LOG_DISCOVERY,
+ "2778 Start FCF table scan at linkup\n");
+ rc = lpfc_sli4_fcf_scan_read_fcf_rec(phba,
+ LPFC_FCOE_FCF_GET_FIRST);
+ if (rc) {
+ spin_lock_irq(&phba->hbalock);
+ phba->fcf.fcf_flag &= ~FCF_INIT_DISC;
+ spin_unlock_irq(&phba->hbalock);
+ goto out;
+ }
+ /* Reset FCF roundrobin bmask for new discovery */
+ lpfc_sli4_clear_fcf_rr_bmask(phba);
+ }
+
+ return;
+out:
+ lpfc_vport_set_state(vport, FC_VPORT_FAILED);
+ lpfc_printf_vlog(vport, KERN_ERR, LOG_MBOX,
+ "0263 Discovery Mailbox error: state: 0x%x : %p %p\n",
+ vport->port_state, sparam_mbox, cfglink_mbox);
+ lpfc_issue_clear_la(phba, vport);
+ return;
+}
+
+static void
+lpfc_enable_la(struct lpfc_hba *phba)
+{
+ uint32_t control;
+ struct lpfc_sli *psli = &phba->sli;
+ spin_lock_irq(&phba->hbalock);
+ psli->sli_flag |= LPFC_PROCESS_LA;
+ if (phba->sli_rev <= LPFC_SLI_REV3) {
+ control = readl(phba->HCregaddr);
+ control |= HC_LAINT_ENA;
+ writel(control, phba->HCregaddr);
+ readl(phba->HCregaddr); /* flush */
+ }
+ spin_unlock_irq(&phba->hbalock);
+}
+
+static void
+lpfc_mbx_issue_link_down(struct lpfc_hba *phba)
+{
+ lpfc_linkdown(phba);
+ lpfc_enable_la(phba);
+ lpfc_unregister_unused_fcf(phba);
+ /* turn on Link Attention interrupts - no CLEAR_LA needed */
+}
+
+
+/*
+ * This routine handles processing a READ_TOPOLOGY mailbox
+ * command upon completion. It is setup in the LPFC_MBOXQ
+ * as the completion routine when the command is
+ * handed off to the SLI layer.
+ */
+void
+lpfc_mbx_cmpl_read_topology(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
+{
+ struct lpfc_vport *vport = pmb->vport;
+ struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
+ struct lpfc_mbx_read_top *la;
+ MAILBOX_t *mb = &pmb->u.mb;
+ struct lpfc_dmabuf *mp = (struct lpfc_dmabuf *) (pmb->context1);
+
+ /* Unblock ELS traffic */
+ phba->sli.ring[LPFC_ELS_RING].flag &= ~LPFC_STOP_IOCB_EVENT;
+ /* Check for error */
+ if (mb->mbxStatus) {
+ lpfc_printf_log(phba, KERN_INFO, LOG_LINK_EVENT,
+ "1307 READ_LA mbox error x%x state x%x\n",
+ mb->mbxStatus, vport->port_state);
+ lpfc_mbx_issue_link_down(phba);
+ phba->link_state = LPFC_HBA_ERROR;
+ goto lpfc_mbx_cmpl_read_topology_free_mbuf;
+ }
+
+ la = (struct lpfc_mbx_read_top *) &pmb->u.mb.un.varReadTop;
+
+ memcpy(&phba->alpa_map[0], mp->virt, 128);
+
+ spin_lock_irq(shost->host_lock);
+ if (bf_get(lpfc_mbx_read_top_pb, la))
+ vport->fc_flag |= FC_BYPASSED_MODE;
+ else
+ vport->fc_flag &= ~FC_BYPASSED_MODE;
+ spin_unlock_irq(shost->host_lock);
+
+ if (phba->fc_eventTag <= la->eventTag) {
+ phba->fc_stat.LinkMultiEvent++;
+ if (bf_get(lpfc_mbx_read_top_att_type, la) == LPFC_ATT_LINK_UP)
+ if (phba->fc_eventTag != 0)
+ lpfc_linkdown(phba);
+ }
+
+ phba->fc_eventTag = la->eventTag;
+ if (phba->sli_rev < LPFC_SLI_REV4) {
+ spin_lock_irq(&phba->hbalock);
+ if (bf_get(lpfc_mbx_read_top_mm, la))
+ phba->sli.sli_flag |= LPFC_MENLO_MAINT;
+ else
+ phba->sli.sli_flag &= ~LPFC_MENLO_MAINT;
+ spin_unlock_irq(&phba->hbalock);
+ }
+
+ phba->link_events++;
+ if ((bf_get(lpfc_mbx_read_top_att_type, la) == LPFC_ATT_LINK_UP) &&
+ !(phba->sli.sli_flag & LPFC_MENLO_MAINT)) {
+ phba->fc_stat.LinkUp++;
+ if (phba->link_flag & LS_LOOPBACK_MODE) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_LINK_EVENT,
+ "1306 Link Up Event in loop back mode "
+ "x%x received Data: x%x x%x x%x x%x\n",
+ la->eventTag, phba->fc_eventTag,
+ bf_get(lpfc_mbx_read_top_alpa_granted,
+ la),
+ bf_get(lpfc_mbx_read_top_link_spd, la),
+ phba->alpa_map[0]);
+ } else {
+ lpfc_printf_log(phba, KERN_ERR, LOG_LINK_EVENT,
+ "1303 Link Up Event x%x received "
+ "Data: x%x x%x x%x x%x x%x x%x %d\n",
+ la->eventTag, phba->fc_eventTag,
+ bf_get(lpfc_mbx_read_top_alpa_granted,
+ la),
+ bf_get(lpfc_mbx_read_top_link_spd, la),
+ phba->alpa_map[0],
+ bf_get(lpfc_mbx_read_top_mm, la),
+ bf_get(lpfc_mbx_read_top_fa, la),
+ phba->wait_4_mlo_maint_flg);
+ }
+ lpfc_mbx_process_link_up(phba, la);
+ } else if (bf_get(lpfc_mbx_read_top_att_type, la) ==
+ LPFC_ATT_LINK_DOWN) {
+ phba->fc_stat.LinkDown++;
+ if (phba->link_flag & LS_LOOPBACK_MODE)
+ lpfc_printf_log(phba, KERN_ERR, LOG_LINK_EVENT,
+ "1308 Link Down Event in loop back mode "
+ "x%x received "
+ "Data: x%x x%x x%x\n",
+ la->eventTag, phba->fc_eventTag,
+ phba->pport->port_state, vport->fc_flag);
+ else
+ lpfc_printf_log(phba, KERN_ERR, LOG_LINK_EVENT,
+ "1305 Link Down Event x%x received "
+ "Data: x%x x%x x%x x%x x%x\n",
+ la->eventTag, phba->fc_eventTag,
+ phba->pport->port_state, vport->fc_flag,
+ bf_get(lpfc_mbx_read_top_mm, la),
+ bf_get(lpfc_mbx_read_top_fa, la));
+ lpfc_mbx_issue_link_down(phba);
+ }
+ if ((phba->sli.sli_flag & LPFC_MENLO_MAINT) &&
+ ((bf_get(lpfc_mbx_read_top_att_type, la) == LPFC_ATT_LINK_UP))) {
+ if (phba->link_state != LPFC_LINK_DOWN) {
+ phba->fc_stat.LinkDown++;
+ lpfc_printf_log(phba, KERN_ERR, LOG_LINK_EVENT,
+ "1312 Link Down Event x%x received "
+ "Data: x%x x%x x%x\n",
+ la->eventTag, phba->fc_eventTag,
+ phba->pport->port_state, vport->fc_flag);
+ lpfc_mbx_issue_link_down(phba);
+ } else
+ lpfc_enable_la(phba);
+
+ lpfc_printf_log(phba, KERN_ERR, LOG_LINK_EVENT,
+ "1310 Menlo Maint Mode Link up Event x%x rcvd "
+ "Data: x%x x%x x%x\n",
+ la->eventTag, phba->fc_eventTag,
+ phba->pport->port_state, vport->fc_flag);
+ /*
+ * The cmnd that triggered this will be waiting for this
+ * signal.
+ */
+ /* WAKEUP for MENLO_SET_MODE or MENLO_RESET command. */
+ if (phba->wait_4_mlo_maint_flg) {
+ phba->wait_4_mlo_maint_flg = 0;
+ wake_up_interruptible(&phba->wait_4_mlo_m_q);
+ }
+ }
+
+ if ((phba->sli_rev < LPFC_SLI_REV4) &&
+ bf_get(lpfc_mbx_read_top_fa, la)) {
+ if (phba->sli.sli_flag & LPFC_MENLO_MAINT)
+ lpfc_issue_clear_la(phba, vport);
+ lpfc_printf_log(phba, KERN_INFO, LOG_LINK_EVENT,
+ "1311 fa %d\n",
+ bf_get(lpfc_mbx_read_top_fa, la));
+ }
+
+lpfc_mbx_cmpl_read_topology_free_mbuf:
+ lpfc_mbuf_free(phba, mp->virt, mp->phys);
+ kfree(mp);
+ mempool_free(pmb, phba->mbox_mem_pool);
+ return;
+}
+
+/*
+ * This routine handles processing a REG_LOGIN mailbox
+ * command upon completion. It is setup in the LPFC_MBOXQ
+ * as the completion routine when the command is
+ * handed off to the SLI layer.
+ */
+void
+lpfc_mbx_cmpl_reg_login(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
+{
+ struct lpfc_vport *vport = pmb->vport;
+ struct lpfc_dmabuf *mp = (struct lpfc_dmabuf *) (pmb->context1);
+ struct lpfc_nodelist *ndlp = (struct lpfc_nodelist *) pmb->context2;
+ struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
+
+ pmb->context1 = NULL;
+ pmb->context2 = NULL;
+
+ lpfc_printf_vlog(vport, KERN_INFO, LOG_SLI,
+ "0002 rpi:%x DID:%x flg:%x %d map:%x %p\n",
+ ndlp->nlp_rpi, ndlp->nlp_DID, ndlp->nlp_flag,
+ atomic_read(&ndlp->kref.refcount),
+ ndlp->nlp_usg_map, ndlp);
+ if (ndlp->nlp_flag & NLP_REG_LOGIN_SEND)
+ ndlp->nlp_flag &= ~NLP_REG_LOGIN_SEND;
+
+ if (ndlp->nlp_flag & NLP_IGNR_REG_CMPL ||
+ ndlp->nlp_state != NLP_STE_REG_LOGIN_ISSUE) {
+ /* We rcvd a rscn after issuing this
+ * mbox reg login, we may have cycled
+ * back through the state and be
+ * back at reg login state so this
+ * mbox needs to be ignored becase
+ * there is another reg login in
+ * process.
+ */
+ spin_lock_irq(shost->host_lock);
+ ndlp->nlp_flag &= ~NLP_IGNR_REG_CMPL;
+ spin_unlock_irq(shost->host_lock);
+ } else
+ /* Good status, call state machine */
+ lpfc_disc_state_machine(vport, ndlp, pmb,
+ NLP_EVT_CMPL_REG_LOGIN);
+
+ lpfc_mbuf_free(phba, mp->virt, mp->phys);
+ kfree(mp);
+ mempool_free(pmb, phba->mbox_mem_pool);
+ /* decrement the node reference count held for this callback
+ * function.
+ */
+ lpfc_nlp_put(ndlp);
+
+ return;
+}
+
+static void
+lpfc_mbx_cmpl_unreg_vpi(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
+{
+ MAILBOX_t *mb = &pmb->u.mb;
+ struct lpfc_vport *vport = pmb->vport;
+ struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
+
+ switch (mb->mbxStatus) {
+ case 0x0011:
+ case 0x0020:
+ lpfc_printf_vlog(vport, KERN_INFO, LOG_NODE,
+ "0911 cmpl_unreg_vpi, mb status = 0x%x\n",
+ mb->mbxStatus);
+ break;
+ /* If VPI is busy, reset the HBA */
+ case 0x9700:
+ lpfc_printf_vlog(vport, KERN_ERR, LOG_NODE,
+ "2798 Unreg_vpi failed vpi 0x%x, mb status = 0x%x\n",
+ vport->vpi, mb->mbxStatus);
+ if (!(phba->pport->load_flag & FC_UNLOADING))
+ lpfc_workq_post_event(phba, NULL, NULL,
+ LPFC_EVT_RESET_HBA);
+ }
+ spin_lock_irq(shost->host_lock);
+ vport->vpi_state &= ~LPFC_VPI_REGISTERED;
+ vport->fc_flag |= FC_VPORT_NEEDS_REG_VPI;
+ spin_unlock_irq(shost->host_lock);
+ vport->unreg_vpi_cmpl = VPORT_OK;
+ mempool_free(pmb, phba->mbox_mem_pool);
+ lpfc_cleanup_vports_rrqs(vport, NULL);
+ /*
+ * This shost reference might have been taken at the beginning of
+ * lpfc_vport_delete()
+ */
+ if ((vport->load_flag & FC_UNLOADING) && (vport != phba->pport))
+ scsi_host_put(shost);
+}
+
+int
+lpfc_mbx_unreg_vpi(struct lpfc_vport *vport)
+{
+ struct lpfc_hba *phba = vport->phba;
+ LPFC_MBOXQ_t *mbox;
+ int rc;
+
+ mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
+ if (!mbox)
+ return 1;
+
+ lpfc_unreg_vpi(phba, vport->vpi, mbox);
+ mbox->vport = vport;
+ mbox->mbox_cmpl = lpfc_mbx_cmpl_unreg_vpi;
+ rc = lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT);
+ if (rc == MBX_NOT_FINISHED) {
+ lpfc_printf_vlog(vport, KERN_ERR, LOG_MBOX | LOG_VPORT,
+ "1800 Could not issue unreg_vpi\n");
+ mempool_free(mbox, phba->mbox_mem_pool);
+ vport->unreg_vpi_cmpl = VPORT_ERROR;
+ return rc;
+ }
+ return 0;
+}
+
+static void
+lpfc_mbx_cmpl_reg_vpi(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
+{
+ struct lpfc_vport *vport = pmb->vport;
+ struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
+ MAILBOX_t *mb = &pmb->u.mb;
+
+ switch (mb->mbxStatus) {
+ case 0x0011:
+ case 0x9601:
+ case 0x9602:
+ lpfc_printf_vlog(vport, KERN_INFO, LOG_NODE,
+ "0912 cmpl_reg_vpi, mb status = 0x%x\n",
+ mb->mbxStatus);
+ lpfc_vport_set_state(vport, FC_VPORT_FAILED);
+ spin_lock_irq(shost->host_lock);
+ vport->fc_flag &= ~(FC_FABRIC | FC_PUBLIC_LOOP);
+ spin_unlock_irq(shost->host_lock);
+ vport->fc_myDID = 0;
+ goto out;
+ }
+
+ spin_lock_irq(shost->host_lock);
+ vport->vpi_state |= LPFC_VPI_REGISTERED;
+ vport->fc_flag &= ~FC_VPORT_NEEDS_REG_VPI;
+ spin_unlock_irq(shost->host_lock);
+ vport->num_disc_nodes = 0;
+ /* go thru NPR list and issue ELS PLOGIs */
+ if (vport->fc_npr_cnt)
+ lpfc_els_disc_plogi(vport);
+
+ if (!vport->num_disc_nodes) {
+ spin_lock_irq(shost->host_lock);
+ vport->fc_flag &= ~FC_NDISC_ACTIVE;
+ spin_unlock_irq(shost->host_lock);
+ lpfc_can_disctmo(vport);
+ }
+ vport->port_state = LPFC_VPORT_READY;
+
+out:
+ mempool_free(pmb, phba->mbox_mem_pool);
+ return;
+}
+
+/**
+ * lpfc_create_static_vport - Read HBA config region to create static vports.
+ * @phba: pointer to lpfc hba data structure.
+ *
+ * This routine issue a DUMP mailbox command for config region 22 to get
+ * the list of static vports to be created. The function create vports
+ * based on the information returned from the HBA.
+ **/
+void
+lpfc_create_static_vport(struct lpfc_hba *phba)
+{
+ LPFC_MBOXQ_t *pmb = NULL;
+ MAILBOX_t *mb;
+ struct static_vport_info *vport_info;
+ int mbx_wait_rc = 0, i;
+ struct fc_vport_identifiers vport_id;
+ struct fc_vport *new_fc_vport;
+ struct Scsi_Host *shost;
+ struct lpfc_vport *vport;
+ uint16_t offset = 0;
+ uint8_t *vport_buff;
+ struct lpfc_dmabuf *mp;
+ uint32_t byte_count = 0;
+
+ pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
+ if (!pmb) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+ "0542 lpfc_create_static_vport failed to"
+ " allocate mailbox memory\n");
+ return;
+ }
+ memset(pmb, 0, sizeof(LPFC_MBOXQ_t));
+ mb = &pmb->u.mb;
+
+ vport_info = kzalloc(sizeof(struct static_vport_info), GFP_KERNEL);
+ if (!vport_info) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+ "0543 lpfc_create_static_vport failed to"
+ " allocate vport_info\n");
+ mempool_free(pmb, phba->mbox_mem_pool);
+ return;
+ }
+
+ vport_buff = (uint8_t *) vport_info;
+ do {
+ /* free dma buffer from previous round */
+ if (pmb->context1) {
+ mp = (struct lpfc_dmabuf *)pmb->context1;
+ lpfc_mbuf_free(phba, mp->virt, mp->phys);
+ kfree(mp);
+ }
+ if (lpfc_dump_static_vport(phba, pmb, offset))
+ goto out;
+
+ pmb->vport = phba->pport;
+ mbx_wait_rc = lpfc_sli_issue_mbox_wait(phba, pmb,
+ LPFC_MBOX_TMO);
+
+ if ((mbx_wait_rc != MBX_SUCCESS) || mb->mbxStatus) {
+ lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
+ "0544 lpfc_create_static_vport failed to"
+ " issue dump mailbox command ret 0x%x "
+ "status 0x%x\n",
+ mbx_wait_rc, mb->mbxStatus);
+ goto out;
+ }
+
+ if (phba->sli_rev == LPFC_SLI_REV4) {
+ byte_count = pmb->u.mqe.un.mb_words[5];
+ mp = (struct lpfc_dmabuf *)pmb->context1;
+ if (byte_count > sizeof(struct static_vport_info) -
+ offset)
+ byte_count = sizeof(struct static_vport_info)
+ - offset;
+ memcpy(vport_buff + offset, mp->virt, byte_count);
+ offset += byte_count;
+ } else {
+ if (mb->un.varDmp.word_cnt >
+ sizeof(struct static_vport_info) - offset)
+ mb->un.varDmp.word_cnt =
+ sizeof(struct static_vport_info)
+ - offset;
+ byte_count = mb->un.varDmp.word_cnt;
+ lpfc_sli_pcimem_bcopy(((uint8_t *)mb) + DMP_RSP_OFFSET,
+ vport_buff + offset,
+ byte_count);
+
+ offset += byte_count;
+ }
+
+ } while (byte_count &&
+ offset < sizeof(struct static_vport_info));
+
+
+ if ((le32_to_cpu(vport_info->signature) != VPORT_INFO_SIG) ||
+ ((le32_to_cpu(vport_info->rev) & VPORT_INFO_REV_MASK)
+ != VPORT_INFO_REV)) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+ "0545 lpfc_create_static_vport bad"
+ " information header 0x%x 0x%x\n",
+ le32_to_cpu(vport_info->signature),
+ le32_to_cpu(vport_info->rev) & VPORT_INFO_REV_MASK);
+
+ goto out;
+ }
+
+ shost = lpfc_shost_from_vport(phba->pport);
+
+ for (i = 0; i < MAX_STATIC_VPORT_COUNT; i++) {
+ memset(&vport_id, 0, sizeof(vport_id));
+ vport_id.port_name = wwn_to_u64(vport_info->vport_list[i].wwpn);
+ vport_id.node_name = wwn_to_u64(vport_info->vport_list[i].wwnn);
+ if (!vport_id.port_name || !vport_id.node_name)
+ continue;
+
+ vport_id.roles = FC_PORT_ROLE_FCP_INITIATOR;
+ vport_id.vport_type = FC_PORTTYPE_NPIV;
+ vport_id.disable = false;
+ new_fc_vport = fc_vport_create(shost, 0, &vport_id);
+
+ if (!new_fc_vport) {
+ lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
+ "0546 lpfc_create_static_vport failed to"
+ " create vport\n");
+ continue;
+ }
+
+ vport = *(struct lpfc_vport **)new_fc_vport->dd_data;
+ vport->vport_flag |= STATIC_VPORT;
+ }
+
+out:
+ kfree(vport_info);
+ if (mbx_wait_rc != MBX_TIMEOUT) {
+ if (pmb->context1) {
+ mp = (struct lpfc_dmabuf *)pmb->context1;
+ lpfc_mbuf_free(phba, mp->virt, mp->phys);
+ kfree(mp);
+ }
+ mempool_free(pmb, phba->mbox_mem_pool);
+ }
+
+ return;
+}
+
+/*
+ * This routine handles processing a Fabric REG_LOGIN mailbox
+ * command upon completion. It is setup in the LPFC_MBOXQ
+ * as the completion routine when the command is
+ * handed off to the SLI layer.
+ */
+void
+lpfc_mbx_cmpl_fabric_reg_login(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
+{
+ struct lpfc_vport *vport = pmb->vport;
+ MAILBOX_t *mb = &pmb->u.mb;
+ struct lpfc_dmabuf *mp = (struct lpfc_dmabuf *) (pmb->context1);
+ struct lpfc_nodelist *ndlp;
+ struct Scsi_Host *shost;
+
+ ndlp = (struct lpfc_nodelist *) pmb->context2;
+ pmb->context1 = NULL;
+ pmb->context2 = NULL;
+
+ if (mb->mbxStatus) {
+ lpfc_printf_vlog(vport, KERN_ERR, LOG_MBOX,
+ "0258 Register Fabric login error: 0x%x\n",
+ mb->mbxStatus);
+ lpfc_mbuf_free(phba, mp->virt, mp->phys);
+ kfree(mp);
+ mempool_free(pmb, phba->mbox_mem_pool);
+
+ if (phba->fc_topology == LPFC_TOPOLOGY_LOOP) {
+ /* FLOGI failed, use loop map to make discovery list */
+ lpfc_disc_list_loopmap(vport);
+
+ /* Start discovery */
+ lpfc_disc_start(vport);
+ /* Decrement the reference count to ndlp after the
+ * reference to the ndlp are done.
+ */
+ lpfc_nlp_put(ndlp);
+ return;
+ }
+
+ lpfc_vport_set_state(vport, FC_VPORT_FAILED);
+ /* Decrement the reference count to ndlp after the reference
+ * to the ndlp are done.
+ */
+ lpfc_nlp_put(ndlp);
+ return;
+ }
+
+ if (phba->sli_rev < LPFC_SLI_REV4)
+ ndlp->nlp_rpi = mb->un.varWords[0];
+ ndlp->nlp_flag |= NLP_RPI_REGISTERED;
+ ndlp->nlp_type |= NLP_FABRIC;
+ lpfc_nlp_set_state(vport, ndlp, NLP_STE_UNMAPPED_NODE);
+
+ if (vport->port_state == LPFC_FABRIC_CFG_LINK) {
+ /* when physical port receive logo donot start
+ * vport discovery */
+ if (!(vport->fc_flag & FC_LOGO_RCVD_DID_CHNG))
+ lpfc_start_fdiscs(phba);
+ else {
+ shost = lpfc_shost_from_vport(vport);
+ spin_lock_irq(shost->host_lock);
+ vport->fc_flag &= ~FC_LOGO_RCVD_DID_CHNG ;
+ spin_unlock_irq(shost->host_lock);
+ }
+ lpfc_do_scr_ns_plogi(phba, vport);
+ }
+
+ lpfc_mbuf_free(phba, mp->virt, mp->phys);
+ kfree(mp);
+ mempool_free(pmb, phba->mbox_mem_pool);
+
+ /* Drop the reference count from the mbox at the end after
+ * all the current reference to the ndlp have been done.
+ */
+ lpfc_nlp_put(ndlp);
+ return;
+}
+
+/*
+ * This routine handles processing a NameServer REG_LOGIN mailbox
+ * command upon completion. It is setup in the LPFC_MBOXQ
+ * as the completion routine when the command is
+ * handed off to the SLI layer.
+ */
+void
+lpfc_mbx_cmpl_ns_reg_login(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
+{
+ MAILBOX_t *mb = &pmb->u.mb;
+ struct lpfc_dmabuf *mp = (struct lpfc_dmabuf *) (pmb->context1);
+ struct lpfc_nodelist *ndlp = (struct lpfc_nodelist *) pmb->context2;
+ struct lpfc_vport *vport = pmb->vport;
+
+ pmb->context1 = NULL;
+ pmb->context2 = NULL;
+
+ if (mb->mbxStatus) {
+out:
+ lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
+ "0260 Register NameServer error: 0x%x\n",
+ mb->mbxStatus);
+ /* decrement the node reference count held for this
+ * callback function.
+ */
+ lpfc_nlp_put(ndlp);
+ lpfc_mbuf_free(phba, mp->virt, mp->phys);
+ kfree(mp);
+ mempool_free(pmb, phba->mbox_mem_pool);
+
+ /* If no other thread is using the ndlp, free it */
+ lpfc_nlp_not_used(ndlp);
+
+ if (phba->fc_topology == LPFC_TOPOLOGY_LOOP) {
+ /*
+ * RegLogin failed, use loop map to make discovery
+ * list
+ */
+ lpfc_disc_list_loopmap(vport);
+
+ /* Start discovery */
+ lpfc_disc_start(vport);
+ return;
+ }
+ lpfc_vport_set_state(vport, FC_VPORT_FAILED);
+ return;
+ }
+
+ if (phba->sli_rev < LPFC_SLI_REV4)
+ ndlp->nlp_rpi = mb->un.varWords[0];
+ ndlp->nlp_flag |= NLP_RPI_REGISTERED;
+ ndlp->nlp_type |= NLP_FABRIC;
+ lpfc_nlp_set_state(vport, ndlp, NLP_STE_UNMAPPED_NODE);
+ lpfc_printf_vlog(vport, KERN_INFO, LOG_SLI,
+ "0003 rpi:%x DID:%x flg:%x %d map%x %p\n",
+ ndlp->nlp_rpi, ndlp->nlp_DID, ndlp->nlp_flag,
+ atomic_read(&ndlp->kref.refcount),
+ ndlp->nlp_usg_map, ndlp);
+
+ if (vport->port_state < LPFC_VPORT_READY) {
+ /* Link up discovery requires Fabric registration. */
+ lpfc_ns_cmd(vport, SLI_CTNS_RFF_ID, 0, 0); /* Do this first! */
+ lpfc_ns_cmd(vport, SLI_CTNS_RNN_ID, 0, 0);
+ lpfc_ns_cmd(vport, SLI_CTNS_RSNN_NN, 0, 0);
+ lpfc_ns_cmd(vport, SLI_CTNS_RSPN_ID, 0, 0);
+ lpfc_ns_cmd(vport, SLI_CTNS_RFT_ID, 0, 0);
+
+ /* Issue SCR just before NameServer GID_FT Query */
+ lpfc_issue_els_scr(vport, SCR_DID, 0);
+ }
+
+ vport->fc_ns_retry = 0;
+ /* Good status, issue CT Request to NameServer */
+ if (lpfc_ns_cmd(vport, SLI_CTNS_GID_FT, 0, 0)) {
+ /* Cannot issue NameServer Query, so finish up discovery */
+ goto out;
+ }
+
+ /* decrement the node reference count held for this
+ * callback function.
+ */
+ lpfc_nlp_put(ndlp);
+ lpfc_mbuf_free(phba, mp->virt, mp->phys);
+ kfree(mp);
+ mempool_free(pmb, phba->mbox_mem_pool);
+
+ return;
+}
+
+static void
+lpfc_register_remote_port(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
+{
+ struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
+ struct fc_rport *rport;
+ struct lpfc_rport_data *rdata;
+ struct fc_rport_identifiers rport_ids;
+ struct lpfc_hba *phba = vport->phba;
+
+ /* Remote port has reappeared. Re-register w/ FC transport */
+ rport_ids.node_name = wwn_to_u64(ndlp->nlp_nodename.u.wwn);
+ rport_ids.port_name = wwn_to_u64(ndlp->nlp_portname.u.wwn);
+ rport_ids.port_id = ndlp->nlp_DID;
+ rport_ids.roles = FC_RPORT_ROLE_UNKNOWN;
+
+ /*
+ * We leave our node pointer in rport->dd_data when we unregister a
+ * FCP target port. But fc_remote_port_add zeros the space to which
+ * rport->dd_data points. So, if we're reusing a previously
+ * registered port, drop the reference that we took the last time we
+ * registered the port.
+ */
+ if (ndlp->rport && ndlp->rport->dd_data &&
+ ((struct lpfc_rport_data *) ndlp->rport->dd_data)->pnode == ndlp)
+ lpfc_nlp_put(ndlp);
+
+ lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_RPORT,
+ "rport add: did:x%x flg:x%x type x%x",
+ ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_type);
+
+ /* Don't add the remote port if unloading. */
+ if (vport->load_flag & FC_UNLOADING)
+ return;
+
+ ndlp->rport = rport = fc_remote_port_add(shost, 0, &rport_ids);
+ if (!rport || !get_device(&rport->dev)) {
+ dev_printk(KERN_WARNING, &phba->pcidev->dev,
+ "Warning: fc_remote_port_add failed\n");
+ return;
+ }
+
+ /* initialize static port data */
+ rport->maxframe_size = ndlp->nlp_maxframe;
+ rport->supported_classes = ndlp->nlp_class_sup;
+ rdata = rport->dd_data;
+ rdata->pnode = lpfc_nlp_get(ndlp);
+
+ if (ndlp->nlp_type & NLP_FCP_TARGET)
+ rport_ids.roles |= FC_RPORT_ROLE_FCP_TARGET;
+ if (ndlp->nlp_type & NLP_FCP_INITIATOR)
+ rport_ids.roles |= FC_RPORT_ROLE_FCP_INITIATOR;
+
+ if (rport_ids.roles != FC_RPORT_ROLE_UNKNOWN)
+ fc_remote_port_rolechg(rport, rport_ids.roles);
+
+ lpfc_printf_vlog(ndlp->vport, KERN_INFO, LOG_NODE,
+ "3183 rport register x%06x, rport %p role x%x\n",
+ ndlp->nlp_DID, rport, rport_ids.roles);
+
+ if ((rport->scsi_target_id != -1) &&
+ (rport->scsi_target_id < LPFC_MAX_TARGET)) {
+ ndlp->nlp_sid = rport->scsi_target_id;
+ }
+ return;
+}
+
+static void
+lpfc_unregister_remote_port(struct lpfc_nodelist *ndlp)
+{
+ struct fc_rport *rport = ndlp->rport;
+
+ lpfc_debugfs_disc_trc(ndlp->vport, LPFC_DISC_TRC_RPORT,
+ "rport delete: did:x%x flg:x%x type x%x",
+ ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_type);
+
+ lpfc_printf_vlog(ndlp->vport, KERN_INFO, LOG_NODE,
+ "3184 rport unregister x%06x, rport %p\n",
+ ndlp->nlp_DID, rport);
+
+ fc_remote_port_delete(rport);
+
+ return;
+}
+
+static void
+lpfc_nlp_counters(struct lpfc_vport *vport, int state, int count)
+{
+ struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
+
+ spin_lock_irq(shost->host_lock);
+ switch (state) {
+ case NLP_STE_UNUSED_NODE:
+ vport->fc_unused_cnt += count;
+ break;
+ case NLP_STE_PLOGI_ISSUE:
+ vport->fc_plogi_cnt += count;
+ break;
+ case NLP_STE_ADISC_ISSUE:
+ vport->fc_adisc_cnt += count;
+ break;
+ case NLP_STE_REG_LOGIN_ISSUE:
+ vport->fc_reglogin_cnt += count;
+ break;
+ case NLP_STE_PRLI_ISSUE:
+ vport->fc_prli_cnt += count;
+ break;
+ case NLP_STE_UNMAPPED_NODE:
+ vport->fc_unmap_cnt += count;
+ break;
+ case NLP_STE_MAPPED_NODE:
+ vport->fc_map_cnt += count;
+ break;
+ case NLP_STE_NPR_NODE:
+ if (vport->fc_npr_cnt == 0 && count == -1)
+ vport->fc_npr_cnt = 0;
+ else
+ vport->fc_npr_cnt += count;
+ break;
+ }
+ spin_unlock_irq(shost->host_lock);
+}
+
+static void
+lpfc_nlp_state_cleanup(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
+ int old_state, int new_state)
+{
+ struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
+
+ if (new_state == NLP_STE_UNMAPPED_NODE) {
+ ndlp->nlp_flag &= ~NLP_NODEV_REMOVE;
+ ndlp->nlp_type |= NLP_FC_NODE;
+ }
+ if (new_state == NLP_STE_MAPPED_NODE)
+ ndlp->nlp_flag &= ~NLP_NODEV_REMOVE;
+ if (new_state == NLP_STE_NPR_NODE)
+ ndlp->nlp_flag &= ~NLP_RCV_PLOGI;
+
+ /* Transport interface */
+ if (ndlp->rport && (old_state == NLP_STE_MAPPED_NODE ||
+ old_state == NLP_STE_UNMAPPED_NODE)) {
+ vport->phba->nport_event_cnt++;
+ lpfc_unregister_remote_port(ndlp);
+ }
+
+ if (new_state == NLP_STE_MAPPED_NODE ||
+ new_state == NLP_STE_UNMAPPED_NODE) {
+ vport->phba->nport_event_cnt++;
+ /*
+ * Tell the fc transport about the port, if we haven't
+ * already. If we have, and it's a scsi entity, be
+ * sure to unblock any attached scsi devices
+ */
+ lpfc_register_remote_port(vport, ndlp);
+ }
+ if ((new_state == NLP_STE_MAPPED_NODE) &&
+ (vport->stat_data_enabled)) {
+ /*
+ * A new target is discovered, if there is no buffer for
+ * statistical data collection allocate buffer.
+ */
+ ndlp->lat_data = kcalloc(LPFC_MAX_BUCKET_COUNT,
+ sizeof(struct lpfc_scsicmd_bkt),
+ GFP_KERNEL);
+
+ if (!ndlp->lat_data)
+ lpfc_printf_vlog(vport, KERN_ERR, LOG_NODE,
+ "0286 lpfc_nlp_state_cleanup failed to "
+ "allocate statistical data buffer DID "
+ "0x%x\n", ndlp->nlp_DID);
+ }
+ /*
+ * if we added to Mapped list, but the remote port
+ * registration failed or assigned a target id outside
+ * our presentable range - move the node to the
+ * Unmapped List
+ */
+ if (new_state == NLP_STE_MAPPED_NODE &&
+ (!ndlp->rport ||
+ ndlp->rport->scsi_target_id == -1 ||
+ ndlp->rport->scsi_target_id >= LPFC_MAX_TARGET)) {
+ spin_lock_irq(shost->host_lock);
+ ndlp->nlp_flag |= NLP_TGT_NO_SCSIID;
+ spin_unlock_irq(shost->host_lock);
+ lpfc_nlp_set_state(vport, ndlp, NLP_STE_UNMAPPED_NODE);
+ }
+}
+
+static char *
+lpfc_nlp_state_name(char *buffer, size_t size, int state)
+{
+ static char *states[] = {
+ [NLP_STE_UNUSED_NODE] = "UNUSED",
+ [NLP_STE_PLOGI_ISSUE] = "PLOGI",
+ [NLP_STE_ADISC_ISSUE] = "ADISC",
+ [NLP_STE_REG_LOGIN_ISSUE] = "REGLOGIN",
+ [NLP_STE_PRLI_ISSUE] = "PRLI",
+ [NLP_STE_LOGO_ISSUE] = "LOGO",
+ [NLP_STE_UNMAPPED_NODE] = "UNMAPPED",
+ [NLP_STE_MAPPED_NODE] = "MAPPED",
+ [NLP_STE_NPR_NODE] = "NPR",
+ };
+
+ if (state < NLP_STE_MAX_STATE && states[state])
+ strlcpy(buffer, states[state], size);
+ else
+ snprintf(buffer, size, "unknown (%d)", state);
+ return buffer;
+}
+
+void
+lpfc_nlp_set_state(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
+ int state)
+{
+ struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
+ int old_state = ndlp->nlp_state;
+ char name1[16], name2[16];
+
+ lpfc_printf_vlog(vport, KERN_INFO, LOG_NODE,
+ "0904 NPort state transition x%06x, %s -> %s\n",
+ ndlp->nlp_DID,
+ lpfc_nlp_state_name(name1, sizeof(name1), old_state),
+ lpfc_nlp_state_name(name2, sizeof(name2), state));
+
+ lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_NODE,
+ "node statechg did:x%x old:%d ste:%d",
+ ndlp->nlp_DID, old_state, state);
+
+ if (old_state == NLP_STE_NPR_NODE &&
+ state != NLP_STE_NPR_NODE)
+ lpfc_cancel_retry_delay_tmo(vport, ndlp);
+ if (old_state == NLP_STE_UNMAPPED_NODE) {
+ ndlp->nlp_flag &= ~NLP_TGT_NO_SCSIID;
+ ndlp->nlp_type &= ~NLP_FC_NODE;
+ }
+
+ if (list_empty(&ndlp->nlp_listp)) {
+ spin_lock_irq(shost->host_lock);
+ list_add_tail(&ndlp->nlp_listp, &vport->fc_nodes);
+ spin_unlock_irq(shost->host_lock);
+ } else if (old_state)
+ lpfc_nlp_counters(vport, old_state, -1);
+
+ ndlp->nlp_state = state;
+ lpfc_nlp_counters(vport, state, 1);
+ lpfc_nlp_state_cleanup(vport, ndlp, old_state, state);
+}
+
+void
+lpfc_enqueue_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
+{
+ struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
+
+ if (list_empty(&ndlp->nlp_listp)) {
+ spin_lock_irq(shost->host_lock);
+ list_add_tail(&ndlp->nlp_listp, &vport->fc_nodes);
+ spin_unlock_irq(shost->host_lock);
+ }
+}
+
+void
+lpfc_dequeue_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
+{
+ struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
+
+ lpfc_cancel_retry_delay_tmo(vport, ndlp);
+ if (ndlp->nlp_state && !list_empty(&ndlp->nlp_listp))
+ lpfc_nlp_counters(vport, ndlp->nlp_state, -1);
+ spin_lock_irq(shost->host_lock);
+ list_del_init(&ndlp->nlp_listp);
+ spin_unlock_irq(shost->host_lock);
+ lpfc_nlp_state_cleanup(vport, ndlp, ndlp->nlp_state,
+ NLP_STE_UNUSED_NODE);
+}
+
+static void
+lpfc_disable_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
+{
+ lpfc_cancel_retry_delay_tmo(vport, ndlp);
+ if (ndlp->nlp_state && !list_empty(&ndlp->nlp_listp))
+ lpfc_nlp_counters(vport, ndlp->nlp_state, -1);
+ lpfc_nlp_state_cleanup(vport, ndlp, ndlp->nlp_state,
+ NLP_STE_UNUSED_NODE);
+}
+/**
+ * lpfc_initialize_node - Initialize all fields of node object
+ * @vport: Pointer to Virtual Port object.
+ * @ndlp: Pointer to FC node object.
+ * @did: FC_ID of the node.
+ *
+ * This function is always called when node object need to be initialized.
+ * It initializes all the fields of the node object. Although the reference
+ * to phba from @ndlp can be obtained indirectly through it's reference to
+ * @vport, a direct reference to phba is taken here by @ndlp. This is due
+ * to the life-span of the @ndlp might go beyond the existence of @vport as
+ * the final release of ndlp is determined by its reference count. And, the
+ * operation on @ndlp needs the reference to phba.
+ **/
+static inline void
+lpfc_initialize_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
+ uint32_t did)
+{
+ INIT_LIST_HEAD(&ndlp->els_retry_evt.evt_listp);
+ INIT_LIST_HEAD(&ndlp->dev_loss_evt.evt_listp);
+ init_timer(&ndlp->nlp_delayfunc);
+ ndlp->nlp_delayfunc.function = lpfc_els_retry_delay;
+ ndlp->nlp_delayfunc.data = (unsigned long)ndlp;
+ ndlp->nlp_DID = did;
+ ndlp->vport = vport;
+ ndlp->phba = vport->phba;
+ ndlp->nlp_sid = NLP_NO_SID;
+ kref_init(&ndlp->kref);
+ NLP_INT_NODE_ACT(ndlp);
+ atomic_set(&ndlp->cmd_pending, 0);
+ ndlp->cmd_qdepth = vport->cfg_tgt_queue_depth;
+}
+
+struct lpfc_nodelist *
+lpfc_enable_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
+ int state)
+{
+ struct lpfc_hba *phba = vport->phba;
+ uint32_t did;
+ unsigned long flags;
+ unsigned long *active_rrqs_xri_bitmap = NULL;
+
+ if (!ndlp)
+ return NULL;
+
+ spin_lock_irqsave(&phba->ndlp_lock, flags);
+ /* The ndlp should not be in memory free mode */
+ if (NLP_CHK_FREE_REQ(ndlp)) {
+ spin_unlock_irqrestore(&phba->ndlp_lock, flags);
+ lpfc_printf_vlog(vport, KERN_WARNING, LOG_NODE,
+ "0277 lpfc_enable_node: ndlp:x%p "
+ "usgmap:x%x refcnt:%d\n",
+ (void *)ndlp, ndlp->nlp_usg_map,
+ atomic_read(&ndlp->kref.refcount));
+ return NULL;
+ }
+ /* The ndlp should not already be in active mode */
+ if (NLP_CHK_NODE_ACT(ndlp)) {
+ spin_unlock_irqrestore(&phba->ndlp_lock, flags);
+ lpfc_printf_vlog(vport, KERN_WARNING, LOG_NODE,
+ "0278 lpfc_enable_node: ndlp:x%p "
+ "usgmap:x%x refcnt:%d\n",
+ (void *)ndlp, ndlp->nlp_usg_map,
+ atomic_read(&ndlp->kref.refcount));
+ return NULL;
+ }
+
+ /* Keep the original DID */
+ did = ndlp->nlp_DID;
+ if (phba->sli_rev == LPFC_SLI_REV4)
+ active_rrqs_xri_bitmap = ndlp->active_rrqs_xri_bitmap;
+
+ /* re-initialize ndlp except of ndlp linked list pointer */
+ memset((((char *)ndlp) + sizeof (struct list_head)), 0,
+ sizeof (struct lpfc_nodelist) - sizeof (struct list_head));
+ lpfc_initialize_node(vport, ndlp, did);
+
+ if (phba->sli_rev == LPFC_SLI_REV4)
+ ndlp->active_rrqs_xri_bitmap = active_rrqs_xri_bitmap;
+
+ spin_unlock_irqrestore(&phba->ndlp_lock, flags);
+ if (vport->phba->sli_rev == LPFC_SLI_REV4) {
+ ndlp->nlp_rpi = lpfc_sli4_alloc_rpi(vport->phba);
+ lpfc_printf_vlog(vport, KERN_INFO, LOG_NODE,
+ "0008 rpi:%x DID:%x flg:%x refcnt:%d "
+ "map:%x %p\n", ndlp->nlp_rpi, ndlp->nlp_DID,
+ ndlp->nlp_flag,
+ atomic_read(&ndlp->kref.refcount),
+ ndlp->nlp_usg_map, ndlp);
+ }
+
+
+ if (state != NLP_STE_UNUSED_NODE)
+ lpfc_nlp_set_state(vport, ndlp, state);
+
+ lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_NODE,
+ "node enable: did:x%x",
+ ndlp->nlp_DID, 0, 0);
+ return ndlp;
+}
+
+void
+lpfc_drop_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
+{
+ /*
+ * Use of lpfc_drop_node and UNUSED list: lpfc_drop_node should
+ * be used if we wish to issue the "last" lpfc_nlp_put() to remove
+ * the ndlp from the vport. The ndlp marked as UNUSED on the list
+ * until ALL other outstanding threads have completed. We check
+ * that the ndlp not already in the UNUSED state before we proceed.
+ */
+ if (ndlp->nlp_state == NLP_STE_UNUSED_NODE)
+ return;
+ lpfc_nlp_set_state(vport, ndlp, NLP_STE_UNUSED_NODE);
+ if (vport->phba->sli_rev == LPFC_SLI_REV4) {
+ lpfc_cleanup_vports_rrqs(vport, ndlp);
+ lpfc_unreg_rpi(vport, ndlp);
+ } else {
+ lpfc_nlp_put(ndlp);
+ }
+ return;
+}
+
+/*
+ * Start / ReStart rescue timer for Discovery / RSCN handling
+ */
+void
+lpfc_set_disctmo(struct lpfc_vport *vport)
+{
+ struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
+ struct lpfc_hba *phba = vport->phba;
+ uint32_t tmo;
+
+ if (vport->port_state == LPFC_LOCAL_CFG_LINK) {
+ /* For FAN, timeout should be greater than edtov */
+ tmo = (((phba->fc_edtov + 999) / 1000) + 1);
+ } else {
+ /* Normal discovery timeout should be > than ELS/CT timeout
+ * FC spec states we need 3 * ratov for CT requests
+ */
+ tmo = ((phba->fc_ratov * 3) + 3);
+ }
+
+
+ if (!timer_pending(&vport->fc_disctmo)) {
+ lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD,
+ "set disc timer: tmo:x%x state:x%x flg:x%x",
+ tmo, vport->port_state, vport->fc_flag);
+ }
+
+ mod_timer(&vport->fc_disctmo, jiffies + msecs_to_jiffies(1000 * tmo));
+ spin_lock_irq(shost->host_lock);
+ vport->fc_flag |= FC_DISC_TMO;
+ spin_unlock_irq(shost->host_lock);
+
+ /* Start Discovery Timer state <hba_state> */
+ lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY,
+ "0247 Start Discovery Timer state x%x "
+ "Data: x%x x%lx x%x x%x\n",
+ vport->port_state, tmo,
+ (unsigned long)&vport->fc_disctmo, vport->fc_plogi_cnt,
+ vport->fc_adisc_cnt);
+
+ return;
+}
+
+/*
+ * Cancel rescue timer for Discovery / RSCN handling
+ */
+int
+lpfc_can_disctmo(struct lpfc_vport *vport)
+{
+ struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
+ unsigned long iflags;
+
+ lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD,
+ "can disc timer: state:x%x rtry:x%x flg:x%x",
+ vport->port_state, vport->fc_ns_retry, vport->fc_flag);
+
+ /* Turn off discovery timer if its running */
+ if (vport->fc_flag & FC_DISC_TMO) {
+ spin_lock_irqsave(shost->host_lock, iflags);
+ vport->fc_flag &= ~FC_DISC_TMO;
+ spin_unlock_irqrestore(shost->host_lock, iflags);
+ del_timer_sync(&vport->fc_disctmo);
+ spin_lock_irqsave(&vport->work_port_lock, iflags);
+ vport->work_port_events &= ~WORKER_DISC_TMO;
+ spin_unlock_irqrestore(&vport->work_port_lock, iflags);
+ }
+
+ /* Cancel Discovery Timer state <hba_state> */
+ lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY,
+ "0248 Cancel Discovery Timer state x%x "
+ "Data: x%x x%x x%x\n",
+ vport->port_state, vport->fc_flag,
+ vport->fc_plogi_cnt, vport->fc_adisc_cnt);
+ return 0;
+}
+
+/*
+ * Check specified ring for outstanding IOCB on the SLI queue
+ * Return true if iocb matches the specified nport
+ */
+int
+lpfc_check_sli_ndlp(struct lpfc_hba *phba,
+ struct lpfc_sli_ring *pring,
+ struct lpfc_iocbq *iocb,
+ struct lpfc_nodelist *ndlp)
+{
+ struct lpfc_sli *psli = &phba->sli;
+ IOCB_t *icmd = &iocb->iocb;
+ struct lpfc_vport *vport = ndlp->vport;
+
+ if (iocb->vport != vport)
+ return 0;
+
+ if (pring->ringno == LPFC_ELS_RING) {
+ switch (icmd->ulpCommand) {
+ case CMD_GEN_REQUEST64_CR:
+ if (iocb->context_un.ndlp == ndlp)
+ return 1;
+ case CMD_ELS_REQUEST64_CR:
+ if (icmd->un.elsreq64.remoteID == ndlp->nlp_DID)
+ return 1;
+ case CMD_XMIT_ELS_RSP64_CX:
+ if (iocb->context1 == (uint8_t *) ndlp)
+ return 1;
+ }
+ } else if (pring->ringno == psli->extra_ring) {
+
+ } else if (pring->ringno == psli->fcp_ring) {
+ /* Skip match check if waiting to relogin to FCP target */
+ if ((ndlp->nlp_type & NLP_FCP_TARGET) &&
+ (ndlp->nlp_flag & NLP_DELAY_TMO)) {
+ return 0;
+ }
+ if (icmd->ulpContext == (volatile ushort)ndlp->nlp_rpi) {
+ return 1;
+ }
+ } else if (pring->ringno == psli->next_ring) {
+
+ }
+ return 0;
+}
+
+/*
+ * Free resources / clean up outstanding I/Os
+ * associated with nlp_rpi in the LPFC_NODELIST entry.
+ */
+static int
+lpfc_no_rpi(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp)
+{
+ LIST_HEAD(completions);
+ struct lpfc_sli *psli;
+ struct lpfc_sli_ring *pring;
+ struct lpfc_iocbq *iocb, *next_iocb;
+ uint32_t i;
+
+ lpfc_fabric_abort_nport(ndlp);
+
+ /*
+ * Everything that matches on txcmplq will be returned
+ * by firmware with a no rpi error.
+ */
+ psli = &phba->sli;
+ if (ndlp->nlp_flag & NLP_RPI_REGISTERED) {
+ /* Now process each ring */
+ for (i = 0; i < psli->num_rings; i++) {
+ pring = &psli->ring[i];
+
+ spin_lock_irq(&phba->hbalock);
+ list_for_each_entry_safe(iocb, next_iocb, &pring->txq,
+ list) {
+ /*
+ * Check to see if iocb matches the nport we are
+ * looking for
+ */
+ if ((lpfc_check_sli_ndlp(phba, pring, iocb,
+ ndlp))) {
+ /* It matches, so deque and call compl
+ with an error */
+ list_move_tail(&iocb->list,
+ &completions);
+ }
+ }
+ spin_unlock_irq(&phba->hbalock);
+ }
+ }
+
+ /* Cancel all the IOCBs from the completions list */
+ lpfc_sli_cancel_iocbs(phba, &completions, IOSTAT_LOCAL_REJECT,
+ IOERR_SLI_ABORTED);
+
+ return 0;
+}
+
+/**
+ * lpfc_nlp_logo_unreg - Unreg mailbox completion handler before LOGO
+ * @phba: Pointer to HBA context object.
+ * @pmb: Pointer to mailbox object.
+ *
+ * This function will issue an ELS LOGO command after completing
+ * the UNREG_RPI.
+ **/
+static void
+lpfc_nlp_logo_unreg(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
+{
+ struct lpfc_vport *vport = pmb->vport;
+ struct lpfc_nodelist *ndlp;
+
+ ndlp = (struct lpfc_nodelist *)(pmb->context1);
+ if (!ndlp)
+ return;
+ lpfc_issue_els_logo(vport, ndlp, 0);
+ mempool_free(pmb, phba->mbox_mem_pool);
+}
+
+/*
+ * Free rpi associated with LPFC_NODELIST entry.
+ * This routine is called from lpfc_freenode(), when we are removing
+ * a LPFC_NODELIST entry. It is also called if the driver initiates a
+ * LOGO that completes successfully, and we are waiting to PLOGI back
+ * to the remote NPort. In addition, it is called after we receive
+ * and unsolicated ELS cmd, send back a rsp, the rsp completes and
+ * we are waiting to PLOGI back to the remote NPort.
+ */
+int
+lpfc_unreg_rpi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
+{
+ struct lpfc_hba *phba = vport->phba;
+ LPFC_MBOXQ_t *mbox;
+ int rc;
+ uint16_t rpi;
+
+ if (ndlp->nlp_flag & NLP_RPI_REGISTERED ||
+ ndlp->nlp_flag & NLP_REG_LOGIN_SEND) {
+ if (ndlp->nlp_flag & NLP_REG_LOGIN_SEND)
+ lpfc_printf_vlog(vport, KERN_INFO, LOG_SLI,
+ "3366 RPI x%x needs to be "
+ "unregistered nlp_flag x%x "
+ "did x%x\n",
+ ndlp->nlp_rpi, ndlp->nlp_flag,
+ ndlp->nlp_DID);
+ mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
+ if (mbox) {
+ /* SLI4 ports require the physical rpi value. */
+ rpi = ndlp->nlp_rpi;
+ if (phba->sli_rev == LPFC_SLI_REV4)
+ rpi = phba->sli4_hba.rpi_ids[ndlp->nlp_rpi];
+
+ lpfc_unreg_login(phba, vport->vpi, rpi, mbox);
+ mbox->vport = vport;
+ if (ndlp->nlp_flag & NLP_ISSUE_LOGO) {
+ mbox->context1 = ndlp;
+ mbox->mbox_cmpl = lpfc_nlp_logo_unreg;
+ } else {
+ if (phba->sli_rev == LPFC_SLI_REV4 &&
+ (!(vport->load_flag & FC_UNLOADING)) &&
+ (bf_get(lpfc_sli_intf_if_type,
+ &phba->sli4_hba.sli_intf) ==
+ LPFC_SLI_INTF_IF_TYPE_2)) {
+ mbox->context1 = lpfc_nlp_get(ndlp);
+ mbox->mbox_cmpl =
+ lpfc_sli4_unreg_rpi_cmpl_clr;
+ } else
+ mbox->mbox_cmpl =
+ lpfc_sli_def_mbox_cmpl;
+ }
+
+ rc = lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT);
+ if (rc == MBX_NOT_FINISHED)
+ mempool_free(mbox, phba->mbox_mem_pool);
+ }
+ lpfc_no_rpi(phba, ndlp);
+
+ if (phba->sli_rev != LPFC_SLI_REV4)
+ ndlp->nlp_rpi = 0;
+ ndlp->nlp_flag &= ~NLP_RPI_REGISTERED;
+ ndlp->nlp_flag &= ~NLP_NPR_ADISC;
+ return 1;
+ }
+ return 0;
+}
+
+/**
+ * lpfc_unreg_hba_rpis - Unregister rpis registered to the hba.
+ * @phba: pointer to lpfc hba data structure.
+ *
+ * This routine is invoked to unregister all the currently registered RPIs
+ * to the HBA.
+ **/
+void
+lpfc_unreg_hba_rpis(struct lpfc_hba *phba)
+{
+ struct lpfc_vport **vports;
+ struct lpfc_nodelist *ndlp;
+ struct Scsi_Host *shost;
+ int i;
+
+ vports = lpfc_create_vport_work_array(phba);
+ if (!vports) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY,
+ "2884 Vport array allocation failed \n");
+ return;
+ }
+ for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) {
+ shost = lpfc_shost_from_vport(vports[i]);
+ spin_lock_irq(shost->host_lock);
+ list_for_each_entry(ndlp, &vports[i]->fc_nodes, nlp_listp) {
+ if (ndlp->nlp_flag & NLP_RPI_REGISTERED) {
+ /* The mempool_alloc might sleep */
+ spin_unlock_irq(shost->host_lock);
+ lpfc_unreg_rpi(vports[i], ndlp);
+ spin_lock_irq(shost->host_lock);
+ }
+ }
+ spin_unlock_irq(shost->host_lock);
+ }
+ lpfc_destroy_vport_work_array(phba, vports);
+}
+
+void
+lpfc_unreg_all_rpis(struct lpfc_vport *vport)
+{
+ struct lpfc_hba *phba = vport->phba;
+ LPFC_MBOXQ_t *mbox;
+ int rc;
+
+ if (phba->sli_rev == LPFC_SLI_REV4) {
+ lpfc_sli4_unreg_all_rpis(vport);
+ return;
+ }
+
+ mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
+ if (mbox) {
+ lpfc_unreg_login(phba, vport->vpi, LPFC_UNREG_ALL_RPIS_VPORT,
+ mbox);
+ mbox->vport = vport;
+ mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
+ mbox->context1 = NULL;
+ rc = lpfc_sli_issue_mbox_wait(phba, mbox, LPFC_MBOX_TMO);
+ if (rc != MBX_TIMEOUT)
+ mempool_free(mbox, phba->mbox_mem_pool);
+
+ if ((rc == MBX_TIMEOUT) || (rc == MBX_NOT_FINISHED))
+ lpfc_printf_vlog(vport, KERN_ERR, LOG_MBOX | LOG_VPORT,
+ "1836 Could not issue "
+ "unreg_login(all_rpis) status %d\n", rc);
+ }
+}
+
+void
+lpfc_unreg_default_rpis(struct lpfc_vport *vport)
+{
+ struct lpfc_hba *phba = vport->phba;
+ LPFC_MBOXQ_t *mbox;
+ int rc;
+
+ mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
+ if (mbox) {
+ lpfc_unreg_did(phba, vport->vpi, LPFC_UNREG_ALL_DFLT_RPIS,
+ mbox);
+ mbox->vport = vport;
+ mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
+ mbox->context1 = NULL;
+ rc = lpfc_sli_issue_mbox_wait(phba, mbox, LPFC_MBOX_TMO);
+ if (rc != MBX_TIMEOUT)
+ mempool_free(mbox, phba->mbox_mem_pool);
+
+ if ((rc == MBX_TIMEOUT) || (rc == MBX_NOT_FINISHED))
+ lpfc_printf_vlog(vport, KERN_ERR, LOG_MBOX | LOG_VPORT,
+ "1815 Could not issue "
+ "unreg_did (default rpis) status %d\n",
+ rc);
+ }
+}
+
+/*
+ * Free resources associated with LPFC_NODELIST entry
+ * so it can be freed.
+ */
+static int
+lpfc_cleanup_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
+{
+ struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
+ struct lpfc_hba *phba = vport->phba;
+ LPFC_MBOXQ_t *mb, *nextmb;
+ struct lpfc_dmabuf *mp;
+
+ /* Cleanup node for NPort <nlp_DID> */
+ lpfc_printf_vlog(vport, KERN_INFO, LOG_NODE,
+ "0900 Cleanup node for NPort x%x "
+ "Data: x%x x%x x%x\n",
+ ndlp->nlp_DID, ndlp->nlp_flag,
+ ndlp->nlp_state, ndlp->nlp_rpi);
+ if (NLP_CHK_FREE_REQ(ndlp)) {
+ lpfc_printf_vlog(vport, KERN_WARNING, LOG_NODE,
+ "0280 lpfc_cleanup_node: ndlp:x%p "
+ "usgmap:x%x refcnt:%d\n",
+ (void *)ndlp, ndlp->nlp_usg_map,
+ atomic_read(&ndlp->kref.refcount));
+ lpfc_dequeue_node(vport, ndlp);
+ } else {
+ lpfc_printf_vlog(vport, KERN_WARNING, LOG_NODE,
+ "0281 lpfc_cleanup_node: ndlp:x%p "
+ "usgmap:x%x refcnt:%d\n",
+ (void *)ndlp, ndlp->nlp_usg_map,
+ atomic_read(&ndlp->kref.refcount));
+ lpfc_disable_node(vport, ndlp);
+ }
+
+
+ /* Don't need to clean up REG_LOGIN64 cmds for Default RPI cleanup */
+
+ /* cleanup any ndlp on mbox q waiting for reglogin cmpl */
+ if ((mb = phba->sli.mbox_active)) {
+ if ((mb->u.mb.mbxCommand == MBX_REG_LOGIN64) &&
+ !(mb->mbox_flag & LPFC_MBX_IMED_UNREG) &&
+ (ndlp == (struct lpfc_nodelist *) mb->context2)) {
+ mb->context2 = NULL;
+ mb->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
+ }
+ }
+
+ spin_lock_irq(&phba->hbalock);
+ /* Cleanup REG_LOGIN completions which are not yet processed */
+ list_for_each_entry(mb, &phba->sli.mboxq_cmpl, list) {
+ if ((mb->u.mb.mbxCommand != MBX_REG_LOGIN64) ||
+ (mb->mbox_flag & LPFC_MBX_IMED_UNREG) ||
+ (ndlp != (struct lpfc_nodelist *) mb->context2))
+ continue;
+
+ mb->context2 = NULL;
+ mb->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
+ }
+
+ list_for_each_entry_safe(mb, nextmb, &phba->sli.mboxq, list) {
+ if ((mb->u.mb.mbxCommand == MBX_REG_LOGIN64) &&
+ !(mb->mbox_flag & LPFC_MBX_IMED_UNREG) &&
+ (ndlp == (struct lpfc_nodelist *) mb->context2)) {
+ mp = (struct lpfc_dmabuf *) (mb->context1);
+ if (mp) {
+ __lpfc_mbuf_free(phba, mp->virt, mp->phys);
+ kfree(mp);
+ }
+ list_del(&mb->list);
+ mempool_free(mb, phba->mbox_mem_pool);
+ /* We shall not invoke the lpfc_nlp_put to decrement
+ * the ndlp reference count as we are in the process
+ * of lpfc_nlp_release.
+ */
+ }
+ }
+ spin_unlock_irq(&phba->hbalock);
+
+ lpfc_els_abort(phba, ndlp);
+
+ spin_lock_irq(shost->host_lock);
+ ndlp->nlp_flag &= ~NLP_DELAY_TMO;
+ spin_unlock_irq(shost->host_lock);
+
+ ndlp->nlp_last_elscmd = 0;
+ del_timer_sync(&ndlp->nlp_delayfunc);
+
+ list_del_init(&ndlp->els_retry_evt.evt_listp);
+ list_del_init(&ndlp->dev_loss_evt.evt_listp);
+ lpfc_cleanup_vports_rrqs(vport, ndlp);
+ lpfc_unreg_rpi(vport, ndlp);
+
+ return 0;
+}
+
+/*
+ * Check to see if we can free the nlp back to the freelist.
+ * If we are in the middle of using the nlp in the discovery state
+ * machine, defer the free till we reach the end of the state machine.
+ */
+static void
+lpfc_nlp_remove(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
+{
+ struct lpfc_hba *phba = vport->phba;
+ struct lpfc_rport_data *rdata;
+ LPFC_MBOXQ_t *mbox;
+ int rc;
+
+ lpfc_cancel_retry_delay_tmo(vport, ndlp);
+ if ((ndlp->nlp_flag & NLP_DEFER_RM) &&
+ !(ndlp->nlp_flag & NLP_REG_LOGIN_SEND) &&
+ !(ndlp->nlp_flag & NLP_RPI_REGISTERED)) {
+ /* For this case we need to cleanup the default rpi
+ * allocated by the firmware.
+ */
+ lpfc_printf_vlog(vport, KERN_INFO, LOG_NODE,
+ "0005 rpi:%x DID:%x flg:%x %d map:%x %p\n",
+ ndlp->nlp_rpi, ndlp->nlp_DID, ndlp->nlp_flag,
+ atomic_read(&ndlp->kref.refcount),
+ ndlp->nlp_usg_map, ndlp);
+ if ((mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL))
+ != NULL) {
+ rc = lpfc_reg_rpi(phba, vport->vpi, ndlp->nlp_DID,
+ (uint8_t *) &vport->fc_sparam, mbox, ndlp->nlp_rpi);
+ if (rc) {
+ mempool_free(mbox, phba->mbox_mem_pool);
+ }
+ else {
+ mbox->mbox_flag |= LPFC_MBX_IMED_UNREG;
+ mbox->mbox_cmpl = lpfc_mbx_cmpl_dflt_rpi;
+ mbox->vport = vport;
+ mbox->context2 = ndlp;
+ rc =lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT);
+ if (rc == MBX_NOT_FINISHED) {
+ mempool_free(mbox, phba->mbox_mem_pool);
+ }
+ }
+ }
+ }
+ lpfc_cleanup_node(vport, ndlp);
+
+ /*
+ * We can get here with a non-NULL ndlp->rport because when we
+ * unregister a rport we don't break the rport/node linkage. So if we
+ * do, make sure we don't leaving any dangling pointers behind.
+ */
+ if (ndlp->rport) {
+ rdata = ndlp->rport->dd_data;
+ rdata->pnode = NULL;
+ ndlp->rport = NULL;
+ }
+}
+
+static int
+lpfc_matchdid(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
+ uint32_t did)
+{
+ D_ID mydid, ndlpdid, matchdid;
+
+ if (did == Bcast_DID)
+ return 0;
+
+ /* First check for Direct match */
+ if (ndlp->nlp_DID == did)
+ return 1;
+
+ /* Next check for area/domain identically equals 0 match */
+ mydid.un.word = vport->fc_myDID;
+ if ((mydid.un.b.domain == 0) && (mydid.un.b.area == 0)) {
+ return 0;
+ }
+
+ matchdid.un.word = did;
+ ndlpdid.un.word = ndlp->nlp_DID;
+ if (matchdid.un.b.id == ndlpdid.un.b.id) {
+ if ((mydid.un.b.domain == matchdid.un.b.domain) &&
+ (mydid.un.b.area == matchdid.un.b.area)) {
+ if ((ndlpdid.un.b.domain == 0) &&
+ (ndlpdid.un.b.area == 0)) {
+ if (ndlpdid.un.b.id)
+ return 1;
+ }
+ return 0;
+ }
+
+ matchdid.un.word = ndlp->nlp_DID;
+ if ((mydid.un.b.domain == ndlpdid.un.b.domain) &&
+ (mydid.un.b.area == ndlpdid.un.b.area)) {
+ if ((matchdid.un.b.domain == 0) &&
+ (matchdid.un.b.area == 0)) {
+ if (matchdid.un.b.id)
+ return 1;
+ }
+ }
+ }
+ return 0;
+}
+
+/* Search for a nodelist entry */
+static struct lpfc_nodelist *
+__lpfc_findnode_did(struct lpfc_vport *vport, uint32_t did)
+{
+ struct lpfc_nodelist *ndlp;
+ uint32_t data1;
+
+ list_for_each_entry(ndlp, &vport->fc_nodes, nlp_listp) {
+ if (lpfc_matchdid(vport, ndlp, did)) {
+ data1 = (((uint32_t) ndlp->nlp_state << 24) |
+ ((uint32_t) ndlp->nlp_xri << 16) |
+ ((uint32_t) ndlp->nlp_type << 8) |
+ ((uint32_t) ndlp->nlp_rpi & 0xff));
+ lpfc_printf_vlog(vport, KERN_INFO, LOG_NODE,
+ "0929 FIND node DID "
+ "Data: x%p x%x x%x x%x %p\n",
+ ndlp, ndlp->nlp_DID,
+ ndlp->nlp_flag, data1,
+ ndlp->active_rrqs_xri_bitmap);
+ return ndlp;
+ }
+ }
+
+ /* FIND node did <did> NOT FOUND */
+ lpfc_printf_vlog(vport, KERN_INFO, LOG_NODE,
+ "0932 FIND node did x%x NOT FOUND.\n", did);
+ return NULL;
+}
+
+struct lpfc_nodelist *
+lpfc_findnode_did(struct lpfc_vport *vport, uint32_t did)
+{
+ struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
+ struct lpfc_nodelist *ndlp;
+ unsigned long iflags;
+
+ spin_lock_irqsave(shost->host_lock, iflags);
+ ndlp = __lpfc_findnode_did(vport, did);
+ spin_unlock_irqrestore(shost->host_lock, iflags);
+ return ndlp;
+}
+
+struct lpfc_nodelist *
+lpfc_setup_disc_node(struct lpfc_vport *vport, uint32_t did)
+{
+ struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
+ struct lpfc_nodelist *ndlp;
+
+ ndlp = lpfc_findnode_did(vport, did);
+ if (!ndlp) {
+ if ((vport->fc_flag & FC_RSCN_MODE) != 0 &&
+ lpfc_rscn_payload_check(vport, did) == 0)
+ return NULL;
+ ndlp = (struct lpfc_nodelist *)
+ mempool_alloc(vport->phba->nlp_mem_pool, GFP_KERNEL);
+ if (!ndlp)
+ return NULL;
+ lpfc_nlp_init(vport, ndlp, did);
+ lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE);
+ spin_lock_irq(shost->host_lock);
+ ndlp->nlp_flag |= NLP_NPR_2B_DISC;
+ spin_unlock_irq(shost->host_lock);
+ return ndlp;
+ } else if (!NLP_CHK_NODE_ACT(ndlp)) {
+ ndlp = lpfc_enable_node(vport, ndlp, NLP_STE_NPR_NODE);
+ if (!ndlp)
+ return NULL;
+ spin_lock_irq(shost->host_lock);
+ ndlp->nlp_flag |= NLP_NPR_2B_DISC;
+ spin_unlock_irq(shost->host_lock);
+ return ndlp;
+ }
+
+ if ((vport->fc_flag & FC_RSCN_MODE) &&
+ !(vport->fc_flag & FC_NDISC_ACTIVE)) {
+ if (lpfc_rscn_payload_check(vport, did)) {
+ /* If we've already received a PLOGI from this NPort
+ * we don't need to try to discover it again.
+ */
+ if (ndlp->nlp_flag & NLP_RCV_PLOGI)
+ return NULL;
+
+ /* Since this node is marked for discovery,
+ * delay timeout is not needed.
+ */
+ lpfc_cancel_retry_delay_tmo(vport, ndlp);
+ spin_lock_irq(shost->host_lock);
+ ndlp->nlp_flag |= NLP_NPR_2B_DISC;
+ spin_unlock_irq(shost->host_lock);
+ } else
+ ndlp = NULL;
+ } else {
+ /* If we've already received a PLOGI from this NPort,
+ * or we are already in the process of discovery on it,
+ * we don't need to try to discover it again.
+ */
+ if (ndlp->nlp_state == NLP_STE_ADISC_ISSUE ||
+ ndlp->nlp_state == NLP_STE_PLOGI_ISSUE ||
+ ndlp->nlp_flag & NLP_RCV_PLOGI)
+ return NULL;
+ lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE);
+ spin_lock_irq(shost->host_lock);
+ ndlp->nlp_flag |= NLP_NPR_2B_DISC;
+ spin_unlock_irq(shost->host_lock);
+ }
+ return ndlp;
+}
+
+/* Build a list of nodes to discover based on the loopmap */
+void
+lpfc_disc_list_loopmap(struct lpfc_vport *vport)
+{
+ struct lpfc_hba *phba = vport->phba;
+ int j;
+ uint32_t alpa, index;
+
+ if (!lpfc_is_link_up(phba))
+ return;
+
+ if (phba->fc_topology != LPFC_TOPOLOGY_LOOP)
+ return;
+
+ /* Check for loop map present or not */
+ if (phba->alpa_map[0]) {
+ for (j = 1; j <= phba->alpa_map[0]; j++) {
+ alpa = phba->alpa_map[j];
+ if (((vport->fc_myDID & 0xff) == alpa) || (alpa == 0))
+ continue;
+ lpfc_setup_disc_node(vport, alpa);
+ }
+ } else {
+ /* No alpamap, so try all alpa's */
+ for (j = 0; j < FC_MAXLOOP; j++) {
+ /* If cfg_scan_down is set, start from highest
+ * ALPA (0xef) to lowest (0x1).
+ */
+ if (vport->cfg_scan_down)
+ index = j;
+ else
+ index = FC_MAXLOOP - j - 1;
+ alpa = lpfcAlpaArray[index];
+ if ((vport->fc_myDID & 0xff) == alpa)
+ continue;
+ lpfc_setup_disc_node(vport, alpa);
+ }
+ }
+ return;
+}
+
+void
+lpfc_issue_clear_la(struct lpfc_hba *phba, struct lpfc_vport *vport)
+{
+ LPFC_MBOXQ_t *mbox;
+ struct lpfc_sli *psli = &phba->sli;
+ struct lpfc_sli_ring *extra_ring = &psli->ring[psli->extra_ring];
+ struct lpfc_sli_ring *fcp_ring = &psli->ring[psli->fcp_ring];
+ struct lpfc_sli_ring *next_ring = &psli->ring[psli->next_ring];
+ int rc;
+
+ /*
+ * if it's not a physical port or if we already send
+ * clear_la then don't send it.
+ */
+ if ((phba->link_state >= LPFC_CLEAR_LA) ||
+ (vport->port_type != LPFC_PHYSICAL_PORT) ||
+ (phba->sli_rev == LPFC_SLI_REV4))
+ return;
+
+ /* Link up discovery */
+ if ((mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL)) != NULL) {
+ phba->link_state = LPFC_CLEAR_LA;
+ lpfc_clear_la(phba, mbox);
+ mbox->mbox_cmpl = lpfc_mbx_cmpl_clear_la;
+ mbox->vport = vport;
+ rc = lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT);
+ if (rc == MBX_NOT_FINISHED) {
+ mempool_free(mbox, phba->mbox_mem_pool);
+ lpfc_disc_flush_list(vport);
+ extra_ring->flag &= ~LPFC_STOP_IOCB_EVENT;
+ fcp_ring->flag &= ~LPFC_STOP_IOCB_EVENT;
+ next_ring->flag &= ~LPFC_STOP_IOCB_EVENT;
+ phba->link_state = LPFC_HBA_ERROR;
+ }
+ }
+}
+
+/* Reg_vpi to tell firmware to resume normal operations */
+void
+lpfc_issue_reg_vpi(struct lpfc_hba *phba, struct lpfc_vport *vport)
+{
+ LPFC_MBOXQ_t *regvpimbox;
+
+ regvpimbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
+ if (regvpimbox) {
+ lpfc_reg_vpi(vport, regvpimbox);
+ regvpimbox->mbox_cmpl = lpfc_mbx_cmpl_reg_vpi;
+ regvpimbox->vport = vport;
+ if (lpfc_sli_issue_mbox(phba, regvpimbox, MBX_NOWAIT)
+ == MBX_NOT_FINISHED) {
+ mempool_free(regvpimbox, phba->mbox_mem_pool);
+ }
+ }
+}
+
+/* Start Link up / RSCN discovery on NPR nodes */
+void
+lpfc_disc_start(struct lpfc_vport *vport)
+{
+ struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
+ struct lpfc_hba *phba = vport->phba;
+ uint32_t num_sent;
+ uint32_t clear_la_pending;
+
+ if (!lpfc_is_link_up(phba)) {
+ lpfc_printf_vlog(vport, KERN_INFO, LOG_SLI,
+ "3315 Link is not up %x\n",
+ phba->link_state);
+ return;
+ }
+
+ if (phba->link_state == LPFC_CLEAR_LA)
+ clear_la_pending = 1;
+ else
+ clear_la_pending = 0;
+
+ if (vport->port_state < LPFC_VPORT_READY)
+ vport->port_state = LPFC_DISC_AUTH;
+
+ lpfc_set_disctmo(vport);
+
+ vport->fc_prevDID = vport->fc_myDID;
+ vport->num_disc_nodes = 0;
+
+ /* Start Discovery state <hba_state> */
+ lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY,
+ "0202 Start Discovery hba state x%x "
+ "Data: x%x x%x x%x\n",
+ vport->port_state, vport->fc_flag, vport->fc_plogi_cnt,
+ vport->fc_adisc_cnt);
+
+ /* First do ADISCs - if any */
+ num_sent = lpfc_els_disc_adisc(vport);
+
+ if (num_sent)
+ return;
+
+ /* Register the VPI for SLI3, NPIV only. */
+ if ((phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) &&
+ !(vport->fc_flag & FC_PT2PT) &&
+ !(vport->fc_flag & FC_RSCN_MODE) &&
+ (phba->sli_rev < LPFC_SLI_REV4)) {
+ lpfc_issue_clear_la(phba, vport);
+ lpfc_issue_reg_vpi(phba, vport);
+ return;
+ }
+
+ /*
+ * For SLI2, we need to set port_state to READY and continue
+ * discovery.
+ */
+ if (vport->port_state < LPFC_VPORT_READY && !clear_la_pending) {
+ /* If we get here, there is nothing to ADISC */
+ lpfc_issue_clear_la(phba, vport);
+
+ if (!(vport->fc_flag & FC_ABORT_DISCOVERY)) {
+ vport->num_disc_nodes = 0;
+ /* go thru NPR nodes and issue ELS PLOGIs */
+ if (vport->fc_npr_cnt)
+ lpfc_els_disc_plogi(vport);
+
+ if (!vport->num_disc_nodes) {
+ spin_lock_irq(shost->host_lock);
+ vport->fc_flag &= ~FC_NDISC_ACTIVE;
+ spin_unlock_irq(shost->host_lock);
+ lpfc_can_disctmo(vport);
+ }
+ }
+ vport->port_state = LPFC_VPORT_READY;
+ } else {
+ /* Next do PLOGIs - if any */
+ num_sent = lpfc_els_disc_plogi(vport);
+
+ if (num_sent)
+ return;
+
+ if (vport->fc_flag & FC_RSCN_MODE) {
+ /* Check to see if more RSCNs came in while we
+ * were processing this one.
+ */
+ if ((vport->fc_rscn_id_cnt == 0) &&
+ (!(vport->fc_flag & FC_RSCN_DISCOVERY))) {
+ spin_lock_irq(shost->host_lock);
+ vport->fc_flag &= ~FC_RSCN_MODE;
+ spin_unlock_irq(shost->host_lock);
+ lpfc_can_disctmo(vport);
+ } else
+ lpfc_els_handle_rscn(vport);
+ }
+ }
+ return;
+}
+
+/*
+ * Ignore completion for all IOCBs on tx and txcmpl queue for ELS
+ * ring the match the sppecified nodelist.
+ */
+static void
+lpfc_free_tx(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp)
+{
+ LIST_HEAD(completions);
+ struct lpfc_sli *psli;
+ IOCB_t *icmd;
+ struct lpfc_iocbq *iocb, *next_iocb;
+ struct lpfc_sli_ring *pring;
+
+ psli = &phba->sli;
+ pring = &psli->ring[LPFC_ELS_RING];
+
+ /* Error matching iocb on txq or txcmplq
+ * First check the txq.
+ */
+ spin_lock_irq(&phba->hbalock);
+ list_for_each_entry_safe(iocb, next_iocb, &pring->txq, list) {
+ if (iocb->context1 != ndlp) {
+ continue;
+ }
+ icmd = &iocb->iocb;
+ if ((icmd->ulpCommand == CMD_ELS_REQUEST64_CR) ||
+ (icmd->ulpCommand == CMD_XMIT_ELS_RSP64_CX)) {
+
+ list_move_tail(&iocb->list, &completions);
+ }
+ }
+
+ /* Next check the txcmplq */
+ list_for_each_entry_safe(iocb, next_iocb, &pring->txcmplq, list) {
+ if (iocb->context1 != ndlp) {
+ continue;
+ }
+ icmd = &iocb->iocb;
+ if (icmd->ulpCommand == CMD_ELS_REQUEST64_CR ||
+ icmd->ulpCommand == CMD_XMIT_ELS_RSP64_CX) {
+ lpfc_sli_issue_abort_iotag(phba, pring, iocb);
+ }
+ }
+ spin_unlock_irq(&phba->hbalock);
+
+ /* Cancel all the IOCBs from the completions list */
+ lpfc_sli_cancel_iocbs(phba, &completions, IOSTAT_LOCAL_REJECT,
+ IOERR_SLI_ABORTED);
+}
+
+static void
+lpfc_disc_flush_list(struct lpfc_vport *vport)
+{
+ struct lpfc_nodelist *ndlp, *next_ndlp;
+ struct lpfc_hba *phba = vport->phba;
+
+ if (vport->fc_plogi_cnt || vport->fc_adisc_cnt) {
+ list_for_each_entry_safe(ndlp, next_ndlp, &vport->fc_nodes,
+ nlp_listp) {
+ if (!NLP_CHK_NODE_ACT(ndlp))
+ continue;
+ if (ndlp->nlp_state == NLP_STE_PLOGI_ISSUE ||
+ ndlp->nlp_state == NLP_STE_ADISC_ISSUE) {
+ lpfc_free_tx(phba, ndlp);
+ }
+ }
+ }
+}
+
+void
+lpfc_cleanup_discovery_resources(struct lpfc_vport *vport)
+{
+ lpfc_els_flush_rscn(vport);
+ lpfc_els_flush_cmd(vport);
+ lpfc_disc_flush_list(vport);
+}
+
+/*****************************************************************************/
+/*
+ * NAME: lpfc_disc_timeout
+ *
+ * FUNCTION: Fibre Channel driver discovery timeout routine.
+ *
+ * EXECUTION ENVIRONMENT: interrupt only
+ *
+ * CALLED FROM:
+ * Timer function
+ *
+ * RETURNS:
+ * none
+ */
+/*****************************************************************************/
+void
+lpfc_disc_timeout(unsigned long ptr)
+{
+ struct lpfc_vport *vport = (struct lpfc_vport *) ptr;
+ struct lpfc_hba *phba = vport->phba;
+ uint32_t tmo_posted;
+ unsigned long flags = 0;
+
+ if (unlikely(!phba))
+ return;
+
+ spin_lock_irqsave(&vport->work_port_lock, flags);
+ tmo_posted = vport->work_port_events & WORKER_DISC_TMO;
+ if (!tmo_posted)
+ vport->work_port_events |= WORKER_DISC_TMO;
+ spin_unlock_irqrestore(&vport->work_port_lock, flags);
+
+ if (!tmo_posted)
+ lpfc_worker_wake_up(phba);
+ return;
+}
+
+static void
+lpfc_disc_timeout_handler(struct lpfc_vport *vport)
+{
+ struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
+ struct lpfc_hba *phba = vport->phba;
+ struct lpfc_sli *psli = &phba->sli;
+ struct lpfc_nodelist *ndlp, *next_ndlp;
+ LPFC_MBOXQ_t *initlinkmbox;
+ int rc, clrlaerr = 0;
+
+ if (!(vport->fc_flag & FC_DISC_TMO))
+ return;
+
+ spin_lock_irq(shost->host_lock);
+ vport->fc_flag &= ~FC_DISC_TMO;
+ spin_unlock_irq(shost->host_lock);
+
+ lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD,
+ "disc timeout: state:x%x rtry:x%x flg:x%x",
+ vport->port_state, vport->fc_ns_retry, vport->fc_flag);
+
+ switch (vport->port_state) {
+
+ case LPFC_LOCAL_CFG_LINK:
+ /* port_state is identically LPFC_LOCAL_CFG_LINK while waiting for
+ * FAN
+ */
+ /* FAN timeout */
+ lpfc_printf_vlog(vport, KERN_WARNING, LOG_DISCOVERY,
+ "0221 FAN timeout\n");
+ /* Start discovery by sending FLOGI, clean up old rpis */
+ list_for_each_entry_safe(ndlp, next_ndlp, &vport->fc_nodes,
+ nlp_listp) {
+ if (!NLP_CHK_NODE_ACT(ndlp))
+ continue;
+ if (ndlp->nlp_state != NLP_STE_NPR_NODE)
+ continue;
+ if (ndlp->nlp_type & NLP_FABRIC) {
+ /* Clean up the ndlp on Fabric connections */
+ lpfc_drop_node(vport, ndlp);
+
+ } else if (!(ndlp->nlp_flag & NLP_NPR_ADISC)) {
+ /* Fail outstanding IO now since device
+ * is marked for PLOGI.
+ */
+ lpfc_unreg_rpi(vport, ndlp);
+ }
+ }
+ if (vport->port_state != LPFC_FLOGI) {
+ if (phba->sli_rev <= LPFC_SLI_REV3)
+ lpfc_initial_flogi(vport);
+ else
+ lpfc_issue_init_vfi(vport);
+ return;
+ }
+ break;
+
+ case LPFC_FDISC:
+ case LPFC_FLOGI:
+ /* port_state is identically LPFC_FLOGI while waiting for FLOGI cmpl */
+ /* Initial FLOGI timeout */
+ lpfc_printf_vlog(vport, KERN_ERR, LOG_DISCOVERY,
+ "0222 Initial %s timeout\n",
+ vport->vpi ? "FDISC" : "FLOGI");
+
+ /* Assume no Fabric and go on with discovery.
+ * Check for outstanding ELS FLOGI to abort.
+ */
+
+ /* FLOGI failed, so just use loop map to make discovery list */
+ lpfc_disc_list_loopmap(vport);
+
+ /* Start discovery */
+ lpfc_disc_start(vport);
+ break;
+
+ case LPFC_FABRIC_CFG_LINK:
+ /* hba_state is identically LPFC_FABRIC_CFG_LINK while waiting for
+ NameServer login */
+ lpfc_printf_vlog(vport, KERN_ERR, LOG_DISCOVERY,
+ "0223 Timeout while waiting for "
+ "NameServer login\n");
+ /* Next look for NameServer ndlp */
+ ndlp = lpfc_findnode_did(vport, NameServer_DID);
+ if (ndlp && NLP_CHK_NODE_ACT(ndlp))
+ lpfc_els_abort(phba, ndlp);
+
+ /* ReStart discovery */
+ goto restart_disc;
+
+ case LPFC_NS_QRY:
+ /* Check for wait for NameServer Rsp timeout */
+ lpfc_printf_vlog(vport, KERN_ERR, LOG_DISCOVERY,
+ "0224 NameServer Query timeout "
+ "Data: x%x x%x\n",
+ vport->fc_ns_retry, LPFC_MAX_NS_RETRY);
+
+ if (vport->fc_ns_retry < LPFC_MAX_NS_RETRY) {
+ /* Try it one more time */
+ vport->fc_ns_retry++;
+ rc = lpfc_ns_cmd(vport, SLI_CTNS_GID_FT,
+ vport->fc_ns_retry, 0);
+ if (rc == 0)
+ break;
+ }
+ vport->fc_ns_retry = 0;
+
+restart_disc:
+ /*
+ * Discovery is over.
+ * set port_state to PORT_READY if SLI2.
+ * cmpl_reg_vpi will set port_state to READY for SLI3.
+ */
+ if (phba->sli_rev < LPFC_SLI_REV4) {
+ if (phba->sli3_options & LPFC_SLI3_NPIV_ENABLED)
+ lpfc_issue_reg_vpi(phba, vport);
+ else {
+ lpfc_issue_clear_la(phba, vport);
+ vport->port_state = LPFC_VPORT_READY;
+ }
+ }
+
+ /* Setup and issue mailbox INITIALIZE LINK command */
+ initlinkmbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
+ if (!initlinkmbox) {
+ lpfc_printf_vlog(vport, KERN_ERR, LOG_DISCOVERY,
+ "0206 Device Discovery "
+ "completion error\n");
+ phba->link_state = LPFC_HBA_ERROR;
+ break;
+ }
+
+ lpfc_linkdown(phba);
+ lpfc_init_link(phba, initlinkmbox, phba->cfg_topology,
+ phba->cfg_link_speed);
+ initlinkmbox->u.mb.un.varInitLnk.lipsr_AL_PA = 0;
+ initlinkmbox->vport = vport;
+ initlinkmbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
+ rc = lpfc_sli_issue_mbox(phba, initlinkmbox, MBX_NOWAIT);
+ lpfc_set_loopback_flag(phba);
+ if (rc == MBX_NOT_FINISHED)
+ mempool_free(initlinkmbox, phba->mbox_mem_pool);
+
+ break;
+
+ case LPFC_DISC_AUTH:
+ /* Node Authentication timeout */
+ lpfc_printf_vlog(vport, KERN_ERR, LOG_DISCOVERY,
+ "0227 Node Authentication timeout\n");
+ lpfc_disc_flush_list(vport);
+
+ /*
+ * set port_state to PORT_READY if SLI2.
+ * cmpl_reg_vpi will set port_state to READY for SLI3.
+ */
+ if (phba->sli_rev < LPFC_SLI_REV4) {
+ if (phba->sli3_options & LPFC_SLI3_NPIV_ENABLED)
+ lpfc_issue_reg_vpi(phba, vport);
+ else { /* NPIV Not enabled */
+ lpfc_issue_clear_la(phba, vport);
+ vport->port_state = LPFC_VPORT_READY;
+ }
+ }
+ break;
+
+ case LPFC_VPORT_READY:
+ if (vport->fc_flag & FC_RSCN_MODE) {
+ lpfc_printf_vlog(vport, KERN_ERR, LOG_DISCOVERY,
+ "0231 RSCN timeout Data: x%x "
+ "x%x\n",
+ vport->fc_ns_retry, LPFC_MAX_NS_RETRY);
+
+ /* Cleanup any outstanding ELS commands */
+ lpfc_els_flush_cmd(vport);
+
+ lpfc_els_flush_rscn(vport);
+ lpfc_disc_flush_list(vport);
+ }
+ break;
+
+ default:
+ lpfc_printf_vlog(vport, KERN_ERR, LOG_DISCOVERY,
+ "0273 Unexpected discovery timeout, "
+ "vport State x%x\n", vport->port_state);
+ break;
+ }
+
+ switch (phba->link_state) {
+ case LPFC_CLEAR_LA:
+ /* CLEAR LA timeout */
+ lpfc_printf_vlog(vport, KERN_ERR, LOG_DISCOVERY,
+ "0228 CLEAR LA timeout\n");
+ clrlaerr = 1;
+ break;
+
+ case LPFC_LINK_UP:
+ lpfc_issue_clear_la(phba, vport);
+ /* Drop thru */
+ case LPFC_LINK_UNKNOWN:
+ case LPFC_WARM_START:
+ case LPFC_INIT_START:
+ case LPFC_INIT_MBX_CMDS:
+ case LPFC_LINK_DOWN:
+ case LPFC_HBA_ERROR:
+ lpfc_printf_vlog(vport, KERN_ERR, LOG_DISCOVERY,
+ "0230 Unexpected timeout, hba link "
+ "state x%x\n", phba->link_state);
+ clrlaerr = 1;
+ break;
+
+ case LPFC_HBA_READY:
+ break;
+ }
+
+ if (clrlaerr) {
+ lpfc_disc_flush_list(vport);
+ psli->ring[(psli->extra_ring)].flag &= ~LPFC_STOP_IOCB_EVENT;
+ psli->ring[(psli->fcp_ring)].flag &= ~LPFC_STOP_IOCB_EVENT;
+ psli->ring[(psli->next_ring)].flag &= ~LPFC_STOP_IOCB_EVENT;
+ vport->port_state = LPFC_VPORT_READY;
+ }
+
+ return;
+}
+
+/*
+ * This routine handles processing a NameServer REG_LOGIN mailbox
+ * command upon completion. It is setup in the LPFC_MBOXQ
+ * as the completion routine when the command is
+ * handed off to the SLI layer.
+ */
+void
+lpfc_mbx_cmpl_fdmi_reg_login(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
+{
+ MAILBOX_t *mb = &pmb->u.mb;
+ struct lpfc_dmabuf *mp = (struct lpfc_dmabuf *) (pmb->context1);
+ struct lpfc_nodelist *ndlp = (struct lpfc_nodelist *) pmb->context2;
+ struct lpfc_vport *vport = pmb->vport;
+
+ pmb->context1 = NULL;
+ pmb->context2 = NULL;
+
+ if (phba->sli_rev < LPFC_SLI_REV4)
+ ndlp->nlp_rpi = mb->un.varWords[0];
+ ndlp->nlp_flag |= NLP_RPI_REGISTERED;
+ ndlp->nlp_type |= NLP_FABRIC;
+ lpfc_nlp_set_state(vport, ndlp, NLP_STE_UNMAPPED_NODE);
+ lpfc_printf_vlog(vport, KERN_INFO, LOG_SLI,
+ "0004 rpi:%x DID:%x flg:%x %d map:%x %p\n",
+ ndlp->nlp_rpi, ndlp->nlp_DID, ndlp->nlp_flag,
+ atomic_read(&ndlp->kref.refcount),
+ ndlp->nlp_usg_map, ndlp);
+ /*
+ * Start issuing Fabric-Device Management Interface (FDMI) command to
+ * 0xfffffa (FDMI well known port) or Delay issuing FDMI command if
+ * fdmi-on=2 (supporting RPA/hostnmae)
+ */
+
+ if (vport->cfg_fdmi_on & LPFC_FDMI_REG_DELAY)
+ mod_timer(&vport->fc_fdmitmo,
+ jiffies + msecs_to_jiffies(1000 * 60));
+ else
+ lpfc_fdmi_cmd(vport, ndlp, SLI_MGMT_DHBA);
+
+ /* decrement the node reference count held for this callback
+ * function.
+ */
+ lpfc_nlp_put(ndlp);
+ lpfc_mbuf_free(phba, mp->virt, mp->phys);
+ kfree(mp);
+ mempool_free(pmb, phba->mbox_mem_pool);
+
+ return;
+}
+
+static int
+lpfc_filter_by_rpi(struct lpfc_nodelist *ndlp, void *param)
+{
+ uint16_t *rpi = param;
+
+ /* check for active node */
+ if (!NLP_CHK_NODE_ACT(ndlp))
+ return 0;
+
+ return ndlp->nlp_rpi == *rpi;
+}
+
+static int
+lpfc_filter_by_wwpn(struct lpfc_nodelist *ndlp, void *param)
+{
+ return memcmp(&ndlp->nlp_portname, param,
+ sizeof(ndlp->nlp_portname)) == 0;
+}
+
+static struct lpfc_nodelist *
+__lpfc_find_node(struct lpfc_vport *vport, node_filter filter, void *param)
+{
+ struct lpfc_nodelist *ndlp;
+
+ list_for_each_entry(ndlp, &vport->fc_nodes, nlp_listp) {
+ if (filter(ndlp, param)) {
+ lpfc_printf_vlog(vport, KERN_INFO, LOG_NODE,
+ "3185 FIND node filter %p DID "
+ "Data: x%p x%x x%x\n",
+ filter, ndlp, ndlp->nlp_DID,
+ ndlp->nlp_flag);
+ return ndlp;
+ }
+ }
+ lpfc_printf_vlog(vport, KERN_INFO, LOG_NODE,
+ "3186 FIND node filter %p NOT FOUND.\n", filter);
+ return NULL;
+}
+
+/*
+ * This routine looks up the ndlp lists for the given RPI. If rpi found it
+ * returns the node list element pointer else return NULL.
+ */
+struct lpfc_nodelist *
+__lpfc_findnode_rpi(struct lpfc_vport *vport, uint16_t rpi)
+{
+ return __lpfc_find_node(vport, lpfc_filter_by_rpi, &rpi);
+}
+
+/*
+ * This routine looks up the ndlp lists for the given WWPN. If WWPN found it
+ * returns the node element list pointer else return NULL.
+ */
+struct lpfc_nodelist *
+lpfc_findnode_wwpn(struct lpfc_vport *vport, struct lpfc_name *wwpn)
+{
+ struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
+ struct lpfc_nodelist *ndlp;
+
+ spin_lock_irq(shost->host_lock);
+ ndlp = __lpfc_find_node(vport, lpfc_filter_by_wwpn, wwpn);
+ spin_unlock_irq(shost->host_lock);
+ return ndlp;
+}
+
+/*
+ * This routine looks up the ndlp lists for the given RPI. If the rpi
+ * is found, the routine returns the node element list pointer else
+ * return NULL.
+ */
+struct lpfc_nodelist *
+lpfc_findnode_rpi(struct lpfc_vport *vport, uint16_t rpi)
+{
+ struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
+ struct lpfc_nodelist *ndlp;
+
+ spin_lock_irq(shost->host_lock);
+ ndlp = __lpfc_findnode_rpi(vport, rpi);
+ spin_unlock_irq(shost->host_lock);
+ return ndlp;
+}
+
+/**
+ * lpfc_find_vport_by_vpid - Find a vport on a HBA through vport identifier
+ * @phba: pointer to lpfc hba data structure.
+ * @vpi: the physical host virtual N_Port identifier.
+ *
+ * This routine finds a vport on a HBA (referred by @phba) through a
+ * @vpi. The function walks the HBA's vport list and returns the address
+ * of the vport with the matching @vpi.
+ *
+ * Return code
+ * NULL - No vport with the matching @vpi found
+ * Otherwise - Address to the vport with the matching @vpi.
+ **/
+struct lpfc_vport *
+lpfc_find_vport_by_vpid(struct lpfc_hba *phba, uint16_t vpi)
+{
+ struct lpfc_vport *vport;
+ unsigned long flags;
+ int i = 0;
+
+ /* The physical ports are always vpi 0 - translate is unnecessary. */
+ if (vpi > 0) {
+ /*
+ * Translate the physical vpi to the logical vpi. The
+ * vport stores the logical vpi.
+ */
+ for (i = 0; i < phba->max_vpi; i++) {
+ if (vpi == phba->vpi_ids[i])
+ break;
+ }
+
+ if (i >= phba->max_vpi) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_ELS,
+ "2936 Could not find Vport mapped "
+ "to vpi %d\n", vpi);
+ return NULL;
+ }
+ }
+
+ spin_lock_irqsave(&phba->hbalock, flags);
+ list_for_each_entry(vport, &phba->port_list, listentry) {
+ if (vport->vpi == i) {
+ spin_unlock_irqrestore(&phba->hbalock, flags);
+ return vport;
+ }
+ }
+ spin_unlock_irqrestore(&phba->hbalock, flags);
+ return NULL;
+}
+
+void
+lpfc_nlp_init(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
+ uint32_t did)
+{
+ memset(ndlp, 0, sizeof (struct lpfc_nodelist));
+
+ lpfc_initialize_node(vport, ndlp, did);
+ INIT_LIST_HEAD(&ndlp->nlp_listp);
+ if (vport->phba->sli_rev == LPFC_SLI_REV4) {
+ ndlp->nlp_rpi = lpfc_sli4_alloc_rpi(vport->phba);
+ lpfc_printf_vlog(vport, KERN_INFO, LOG_NODE,
+ "0007 rpi:%x DID:%x flg:%x refcnt:%d "
+ "map:%x %p\n", ndlp->nlp_rpi, ndlp->nlp_DID,
+ ndlp->nlp_flag,
+ atomic_read(&ndlp->kref.refcount),
+ ndlp->nlp_usg_map, ndlp);
+
+ ndlp->active_rrqs_xri_bitmap =
+ mempool_alloc(vport->phba->active_rrq_pool,
+ GFP_KERNEL);
+ if (ndlp->active_rrqs_xri_bitmap)
+ memset(ndlp->active_rrqs_xri_bitmap, 0,
+ ndlp->phba->cfg_rrq_xri_bitmap_sz);
+ }
+
+
+
+ lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_NODE,
+ "node init: did:x%x",
+ ndlp->nlp_DID, 0, 0);
+
+ return;
+}
+
+/* This routine releases all resources associated with a specifc NPort's ndlp
+ * and mempool_free's the nodelist.
+ */
+static void
+lpfc_nlp_release(struct kref *kref)
+{
+ struct lpfc_hba *phba;
+ unsigned long flags;
+ struct lpfc_nodelist *ndlp = container_of(kref, struct lpfc_nodelist,
+ kref);
+
+ lpfc_debugfs_disc_trc(ndlp->vport, LPFC_DISC_TRC_NODE,
+ "node release: did:x%x flg:x%x type:x%x",
+ ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_type);
+
+ lpfc_printf_vlog(ndlp->vport, KERN_INFO, LOG_NODE,
+ "0279 lpfc_nlp_release: ndlp:x%p did %x "
+ "usgmap:x%x refcnt:%d rpi:%x\n",
+ (void *)ndlp, ndlp->nlp_DID, ndlp->nlp_usg_map,
+ atomic_read(&ndlp->kref.refcount), ndlp->nlp_rpi);
+
+ /* remove ndlp from action. */
+ lpfc_nlp_remove(ndlp->vport, ndlp);
+
+ /* clear the ndlp active flag for all release cases */
+ phba = ndlp->phba;
+ spin_lock_irqsave(&phba->ndlp_lock, flags);
+ NLP_CLR_NODE_ACT(ndlp);
+ spin_unlock_irqrestore(&phba->ndlp_lock, flags);
+ if (phba->sli_rev == LPFC_SLI_REV4)
+ lpfc_sli4_free_rpi(phba, ndlp->nlp_rpi);
+
+ /* free ndlp memory for final ndlp release */
+ if (NLP_CHK_FREE_REQ(ndlp)) {
+ kfree(ndlp->lat_data);
+ if (phba->sli_rev == LPFC_SLI_REV4)
+ mempool_free(ndlp->active_rrqs_xri_bitmap,
+ ndlp->phba->active_rrq_pool);
+ mempool_free(ndlp, ndlp->phba->nlp_mem_pool);
+ }
+}
+
+/* This routine bumps the reference count for a ndlp structure to ensure
+ * that one discovery thread won't free a ndlp while another discovery thread
+ * is using it.
+ */
+struct lpfc_nodelist *
+lpfc_nlp_get(struct lpfc_nodelist *ndlp)
+{
+ struct lpfc_hba *phba;
+ unsigned long flags;
+
+ if (ndlp) {
+ lpfc_debugfs_disc_trc(ndlp->vport, LPFC_DISC_TRC_NODE,
+ "node get: did:x%x flg:x%x refcnt:x%x",
+ ndlp->nlp_DID, ndlp->nlp_flag,
+ atomic_read(&ndlp->kref.refcount));
+ /* The check of ndlp usage to prevent incrementing the
+ * ndlp reference count that is in the process of being
+ * released.
+ */
+ phba = ndlp->phba;
+ spin_lock_irqsave(&phba->ndlp_lock, flags);
+ if (!NLP_CHK_NODE_ACT(ndlp) || NLP_CHK_FREE_ACK(ndlp)) {
+ spin_unlock_irqrestore(&phba->ndlp_lock, flags);
+ lpfc_printf_vlog(ndlp->vport, KERN_WARNING, LOG_NODE,
+ "0276 lpfc_nlp_get: ndlp:x%p "
+ "usgmap:x%x refcnt:%d\n",
+ (void *)ndlp, ndlp->nlp_usg_map,
+ atomic_read(&ndlp->kref.refcount));
+ return NULL;
+ } else
+ kref_get(&ndlp->kref);
+ spin_unlock_irqrestore(&phba->ndlp_lock, flags);
+ }
+ return ndlp;
+}
+
+/* This routine decrements the reference count for a ndlp structure. If the
+ * count goes to 0, this indicates the the associated nodelist should be
+ * freed. Returning 1 indicates the ndlp resource has been released; on the
+ * other hand, returning 0 indicates the ndlp resource has not been released
+ * yet.
+ */
+int
+lpfc_nlp_put(struct lpfc_nodelist *ndlp)
+{
+ struct lpfc_hba *phba;
+ unsigned long flags;
+
+ if (!ndlp)
+ return 1;
+
+ lpfc_debugfs_disc_trc(ndlp->vport, LPFC_DISC_TRC_NODE,
+ "node put: did:x%x flg:x%x refcnt:x%x",
+ ndlp->nlp_DID, ndlp->nlp_flag,
+ atomic_read(&ndlp->kref.refcount));
+ phba = ndlp->phba;
+ spin_lock_irqsave(&phba->ndlp_lock, flags);
+ /* Check the ndlp memory free acknowledge flag to avoid the
+ * possible race condition that kref_put got invoked again
+ * after previous one has done ndlp memory free.
+ */
+ if (NLP_CHK_FREE_ACK(ndlp)) {
+ spin_unlock_irqrestore(&phba->ndlp_lock, flags);
+ lpfc_printf_vlog(ndlp->vport, KERN_WARNING, LOG_NODE,
+ "0274 lpfc_nlp_put: ndlp:x%p "
+ "usgmap:x%x refcnt:%d\n",
+ (void *)ndlp, ndlp->nlp_usg_map,
+ atomic_read(&ndlp->kref.refcount));
+ return 1;
+ }
+ /* Check the ndlp inactivate log flag to avoid the possible
+ * race condition that kref_put got invoked again after ndlp
+ * is already in inactivating state.
+ */
+ if (NLP_CHK_IACT_REQ(ndlp)) {
+ spin_unlock_irqrestore(&phba->ndlp_lock, flags);
+ lpfc_printf_vlog(ndlp->vport, KERN_WARNING, LOG_NODE,
+ "0275 lpfc_nlp_put: ndlp:x%p "
+ "usgmap:x%x refcnt:%d\n",
+ (void *)ndlp, ndlp->nlp_usg_map,
+ atomic_read(&ndlp->kref.refcount));
+ return 1;
+ }
+ /* For last put, mark the ndlp usage flags to make sure no
+ * other kref_get and kref_put on the same ndlp shall get
+ * in between the process when the final kref_put has been
+ * invoked on this ndlp.
+ */
+ if (atomic_read(&ndlp->kref.refcount) == 1) {
+ /* Indicate ndlp is put to inactive state. */
+ NLP_SET_IACT_REQ(ndlp);
+ /* Acknowledge ndlp memory free has been seen. */
+ if (NLP_CHK_FREE_REQ(ndlp))
+ NLP_SET_FREE_ACK(ndlp);
+ }
+ spin_unlock_irqrestore(&phba->ndlp_lock, flags);
+ /* Note, the kref_put returns 1 when decrementing a reference
+ * count that was 1, it invokes the release callback function,
+ * but it still left the reference count as 1 (not actually
+ * performs the last decrementation). Otherwise, it actually
+ * decrements the reference count and returns 0.
+ */
+ return kref_put(&ndlp->kref, lpfc_nlp_release);
+}
+
+/* This routine free's the specified nodelist if it is not in use
+ * by any other discovery thread. This routine returns 1 if the
+ * ndlp has been freed. A return value of 0 indicates the ndlp is
+ * not yet been released.
+ */
+int
+lpfc_nlp_not_used(struct lpfc_nodelist *ndlp)
+{
+ lpfc_debugfs_disc_trc(ndlp->vport, LPFC_DISC_TRC_NODE,
+ "node not used: did:x%x flg:x%x refcnt:x%x",
+ ndlp->nlp_DID, ndlp->nlp_flag,
+ atomic_read(&ndlp->kref.refcount));
+ if (atomic_read(&ndlp->kref.refcount) == 1)
+ if (lpfc_nlp_put(ndlp))
+ return 1;
+ return 0;
+}
+
+/**
+ * lpfc_fcf_inuse - Check if FCF can be unregistered.
+ * @phba: Pointer to hba context object.
+ *
+ * This function iterate through all FC nodes associated
+ * will all vports to check if there is any node with
+ * fc_rports associated with it. If there is an fc_rport
+ * associated with the node, then the node is either in
+ * discovered state or its devloss_timer is pending.
+ */
+static int
+lpfc_fcf_inuse(struct lpfc_hba *phba)
+{
+ struct lpfc_vport **vports;
+ int i, ret = 0;
+ struct lpfc_nodelist *ndlp;
+ struct Scsi_Host *shost;
+
+ vports = lpfc_create_vport_work_array(phba);
+
+ /* If driver cannot allocate memory, indicate fcf is in use */
+ if (!vports)
+ return 1;
+
+ for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) {
+ shost = lpfc_shost_from_vport(vports[i]);
+ spin_lock_irq(shost->host_lock);
+ /*
+ * IF the CVL_RCVD bit is not set then we have sent the
+ * flogi.
+ * If dev_loss fires while we are waiting we do not want to
+ * unreg the fcf.
+ */
+ if (!(vports[i]->fc_flag & FC_VPORT_CVL_RCVD)) {
+ spin_unlock_irq(shost->host_lock);
+ ret = 1;
+ goto out;
+ }
+ list_for_each_entry(ndlp, &vports[i]->fc_nodes, nlp_listp) {
+ if (NLP_CHK_NODE_ACT(ndlp) && ndlp->rport &&
+ (ndlp->rport->roles & FC_RPORT_ROLE_FCP_TARGET)) {
+ ret = 1;
+ spin_unlock_irq(shost->host_lock);
+ goto out;
+ } else if (ndlp->nlp_flag & NLP_RPI_REGISTERED) {
+ ret = 1;
+ lpfc_printf_log(phba, KERN_INFO, LOG_ELS,
+ "2624 RPI %x DID %x flag %x "
+ "still logged in\n",
+ ndlp->nlp_rpi, ndlp->nlp_DID,
+ ndlp->nlp_flag);
+ }
+ }
+ spin_unlock_irq(shost->host_lock);
+ }
+out:
+ lpfc_destroy_vport_work_array(phba, vports);
+ return ret;
+}
+
+/**
+ * lpfc_unregister_vfi_cmpl - Completion handler for unreg vfi.
+ * @phba: Pointer to hba context object.
+ * @mboxq: Pointer to mailbox object.
+ *
+ * This function frees memory associated with the mailbox command.
+ */
+void
+lpfc_unregister_vfi_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
+{
+ struct lpfc_vport *vport = mboxq->vport;
+ struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
+
+ if (mboxq->u.mb.mbxStatus) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY|LOG_MBOX,
+ "2555 UNREG_VFI mbxStatus error x%x "
+ "HBA state x%x\n",
+ mboxq->u.mb.mbxStatus, vport->port_state);
+ }
+ spin_lock_irq(shost->host_lock);
+ phba->pport->fc_flag &= ~FC_VFI_REGISTERED;
+ spin_unlock_irq(shost->host_lock);
+ mempool_free(mboxq, phba->mbox_mem_pool);
+ return;
+}
+
+/**
+ * lpfc_unregister_fcfi_cmpl - Completion handler for unreg fcfi.
+ * @phba: Pointer to hba context object.
+ * @mboxq: Pointer to mailbox object.
+ *
+ * This function frees memory associated with the mailbox command.
+ */
+static void
+lpfc_unregister_fcfi_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
+{
+ struct lpfc_vport *vport = mboxq->vport;
+
+ if (mboxq->u.mb.mbxStatus) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY|LOG_MBOX,
+ "2550 UNREG_FCFI mbxStatus error x%x "
+ "HBA state x%x\n",
+ mboxq->u.mb.mbxStatus, vport->port_state);
+ }
+ mempool_free(mboxq, phba->mbox_mem_pool);
+ return;
+}
+
+/**
+ * lpfc_unregister_fcf_prep - Unregister fcf record preparation
+ * @phba: Pointer to hba context object.
+ *
+ * This function prepare the HBA for unregistering the currently registered
+ * FCF from the HBA. It performs unregistering, in order, RPIs, VPIs, and
+ * VFIs.
+ */
+int
+lpfc_unregister_fcf_prep(struct lpfc_hba *phba)
+{
+ struct lpfc_vport **vports;
+ struct lpfc_nodelist *ndlp;
+ struct Scsi_Host *shost;
+ int i = 0, rc;
+
+ /* Unregister RPIs */
+ if (lpfc_fcf_inuse(phba))
+ lpfc_unreg_hba_rpis(phba);
+
+ /* At this point, all discovery is aborted */
+ phba->pport->port_state = LPFC_VPORT_UNKNOWN;
+
+ /* Unregister VPIs */
+ vports = lpfc_create_vport_work_array(phba);
+ if (vports && (phba->sli3_options & LPFC_SLI3_NPIV_ENABLED))
+ for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) {
+ /* Stop FLOGI/FDISC retries */
+ ndlp = lpfc_findnode_did(vports[i], Fabric_DID);
+ if (ndlp)
+ lpfc_cancel_retry_delay_tmo(vports[i], ndlp);
+ lpfc_cleanup_pending_mbox(vports[i]);
+ if (phba->sli_rev == LPFC_SLI_REV4)
+ lpfc_sli4_unreg_all_rpis(vports[i]);
+ lpfc_mbx_unreg_vpi(vports[i]);
+ shost = lpfc_shost_from_vport(vports[i]);
+ spin_lock_irq(shost->host_lock);
+ vports[i]->fc_flag |= FC_VPORT_NEEDS_INIT_VPI;
+ vports[i]->vpi_state &= ~LPFC_VPI_REGISTERED;
+ spin_unlock_irq(shost->host_lock);
+ }
+ lpfc_destroy_vport_work_array(phba, vports);
+ if (i == 0 && (!(phba->sli3_options & LPFC_SLI3_NPIV_ENABLED))) {
+ ndlp = lpfc_findnode_did(phba->pport, Fabric_DID);
+ if (ndlp)
+ lpfc_cancel_retry_delay_tmo(phba->pport, ndlp);
+ lpfc_cleanup_pending_mbox(phba->pport);
+ if (phba->sli_rev == LPFC_SLI_REV4)
+ lpfc_sli4_unreg_all_rpis(phba->pport);
+ lpfc_mbx_unreg_vpi(phba->pport);
+ shost = lpfc_shost_from_vport(phba->pport);
+ spin_lock_irq(shost->host_lock);
+ phba->pport->fc_flag |= FC_VPORT_NEEDS_INIT_VPI;
+ phba->pport->vpi_state &= ~LPFC_VPI_REGISTERED;
+ spin_unlock_irq(shost->host_lock);
+ }
+
+ /* Cleanup any outstanding ELS commands */
+ lpfc_els_flush_all_cmd(phba);
+
+ /* Unregister the physical port VFI */
+ rc = lpfc_issue_unreg_vfi(phba->pport);
+ return rc;
+}
+
+/**
+ * lpfc_sli4_unregister_fcf - Unregister currently registered FCF record
+ * @phba: Pointer to hba context object.
+ *
+ * This function issues synchronous unregister FCF mailbox command to HBA to
+ * unregister the currently registered FCF record. The driver does not reset
+ * the driver FCF usage state flags.
+ *
+ * Return 0 if successfully issued, none-zero otherwise.
+ */
+int
+lpfc_sli4_unregister_fcf(struct lpfc_hba *phba)
+{
+ LPFC_MBOXQ_t *mbox;
+ int rc;
+
+ mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
+ if (!mbox) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY|LOG_MBOX,
+ "2551 UNREG_FCFI mbox allocation failed"
+ "HBA state x%x\n", phba->pport->port_state);
+ return -ENOMEM;
+ }
+ lpfc_unreg_fcfi(mbox, phba->fcf.fcfi);
+ mbox->vport = phba->pport;
+ mbox->mbox_cmpl = lpfc_unregister_fcfi_cmpl;
+ rc = lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT);
+
+ if (rc == MBX_NOT_FINISHED) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
+ "2552 Unregister FCFI command failed rc x%x "
+ "HBA state x%x\n",
+ rc, phba->pport->port_state);
+ return -EINVAL;
+ }
+ return 0;
+}
+
+/**
+ * lpfc_unregister_fcf_rescan - Unregister currently registered fcf and rescan
+ * @phba: Pointer to hba context object.
+ *
+ * This function unregisters the currently reigstered FCF. This function
+ * also tries to find another FCF for discovery by rescan the HBA FCF table.
+ */
+void
+lpfc_unregister_fcf_rescan(struct lpfc_hba *phba)
+{
+ int rc;
+
+ /* Preparation for unregistering fcf */
+ rc = lpfc_unregister_fcf_prep(phba);
+ if (rc) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY,
+ "2748 Failed to prepare for unregistering "
+ "HBA's FCF record: rc=%d\n", rc);
+ return;
+ }
+
+ /* Now, unregister FCF record and reset HBA FCF state */
+ rc = lpfc_sli4_unregister_fcf(phba);
+ if (rc)
+ return;
+ /* Reset HBA FCF states after successful unregister FCF */
+ phba->fcf.fcf_flag = 0;
+ phba->fcf.current_rec.flag = 0;
+
+ /*
+ * If driver is not unloading, check if there is any other
+ * FCF record that can be used for discovery.
+ */
+ if ((phba->pport->load_flag & FC_UNLOADING) ||
+ (phba->link_state < LPFC_LINK_UP))
+ return;
+
+ /* This is considered as the initial FCF discovery scan */
+ spin_lock_irq(&phba->hbalock);
+ phba->fcf.fcf_flag |= FCF_INIT_DISC;
+ spin_unlock_irq(&phba->hbalock);
+
+ /* Reset FCF roundrobin bmask for new discovery */
+ lpfc_sli4_clear_fcf_rr_bmask(phba);
+
+ rc = lpfc_sli4_fcf_scan_read_fcf_rec(phba, LPFC_FCOE_FCF_GET_FIRST);
+
+ if (rc) {
+ spin_lock_irq(&phba->hbalock);
+ phba->fcf.fcf_flag &= ~FCF_INIT_DISC;
+ spin_unlock_irq(&phba->hbalock);
+ lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY|LOG_MBOX,
+ "2553 lpfc_unregister_unused_fcf failed "
+ "to read FCF record HBA state x%x\n",
+ phba->pport->port_state);
+ }
+}
+
+/**
+ * lpfc_unregister_fcf - Unregister the currently registered fcf record
+ * @phba: Pointer to hba context object.
+ *
+ * This function just unregisters the currently reigstered FCF. It does not
+ * try to find another FCF for discovery.
+ */
+void
+lpfc_unregister_fcf(struct lpfc_hba *phba)
+{
+ int rc;
+
+ /* Preparation for unregistering fcf */
+ rc = lpfc_unregister_fcf_prep(phba);
+ if (rc) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY,
+ "2749 Failed to prepare for unregistering "
+ "HBA's FCF record: rc=%d\n", rc);
+ return;
+ }
+
+ /* Now, unregister FCF record and reset HBA FCF state */
+ rc = lpfc_sli4_unregister_fcf(phba);
+ if (rc)
+ return;
+ /* Set proper HBA FCF states after successful unregister FCF */
+ spin_lock_irq(&phba->hbalock);
+ phba->fcf.fcf_flag &= ~FCF_REGISTERED;
+ spin_unlock_irq(&phba->hbalock);
+}
+
+/**
+ * lpfc_unregister_unused_fcf - Unregister FCF if all devices are disconnected.
+ * @phba: Pointer to hba context object.
+ *
+ * This function check if there are any connected remote port for the FCF and
+ * if all the devices are disconnected, this function unregister FCFI.
+ * This function also tries to use another FCF for discovery.
+ */
+void
+lpfc_unregister_unused_fcf(struct lpfc_hba *phba)
+{
+ /*
+ * If HBA is not running in FIP mode, if HBA does not support
+ * FCoE, if FCF discovery is ongoing, or if FCF has not been
+ * registered, do nothing.
+ */
+ spin_lock_irq(&phba->hbalock);
+ if (!(phba->hba_flag & HBA_FCOE_MODE) ||
+ !(phba->fcf.fcf_flag & FCF_REGISTERED) ||
+ !(phba->hba_flag & HBA_FIP_SUPPORT) ||
+ (phba->fcf.fcf_flag & FCF_DISCOVERY) ||
+ (phba->pport->port_state == LPFC_FLOGI)) {
+ spin_unlock_irq(&phba->hbalock);
+ return;
+ }
+ spin_unlock_irq(&phba->hbalock);
+
+ if (lpfc_fcf_inuse(phba))
+ return;
+
+ lpfc_unregister_fcf_rescan(phba);
+}
+
+/**
+ * lpfc_read_fcf_conn_tbl - Create driver FCF connection table.
+ * @phba: Pointer to hba context object.
+ * @buff: Buffer containing the FCF connection table as in the config
+ * region.
+ * This function create driver data structure for the FCF connection
+ * record table read from config region 23.
+ */
+static void
+lpfc_read_fcf_conn_tbl(struct lpfc_hba *phba,
+ uint8_t *buff)
+{
+ struct lpfc_fcf_conn_entry *conn_entry, *next_conn_entry;
+ struct lpfc_fcf_conn_hdr *conn_hdr;
+ struct lpfc_fcf_conn_rec *conn_rec;
+ uint32_t record_count;
+ int i;
+
+ /* Free the current connect table */
+ list_for_each_entry_safe(conn_entry, next_conn_entry,
+ &phba->fcf_conn_rec_list, list) {
+ list_del_init(&conn_entry->list);
+ kfree(conn_entry);
+ }
+
+ conn_hdr = (struct lpfc_fcf_conn_hdr *) buff;
+ record_count = conn_hdr->length * sizeof(uint32_t)/
+ sizeof(struct lpfc_fcf_conn_rec);
+
+ conn_rec = (struct lpfc_fcf_conn_rec *)
+ (buff + sizeof(struct lpfc_fcf_conn_hdr));
+
+ for (i = 0; i < record_count; i++) {
+ if (!(conn_rec[i].flags & FCFCNCT_VALID))
+ continue;
+ conn_entry = kzalloc(sizeof(struct lpfc_fcf_conn_entry),
+ GFP_KERNEL);
+ if (!conn_entry) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+ "2566 Failed to allocate connection"
+ " table entry\n");
+ return;
+ }
+
+ memcpy(&conn_entry->conn_rec, &conn_rec[i],
+ sizeof(struct lpfc_fcf_conn_rec));
+ list_add_tail(&conn_entry->list,
+ &phba->fcf_conn_rec_list);
+ }
+
+ if (!list_empty(&phba->fcf_conn_rec_list)) {
+ i = 0;
+ list_for_each_entry(conn_entry, &phba->fcf_conn_rec_list,
+ list) {
+ conn_rec = &conn_entry->conn_rec;
+ lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
+ "3345 FCF connection list rec[%02d]: "
+ "flags:x%04x, vtag:x%04x, "
+ "fabric_name:x%02x:%02x:%02x:%02x:"
+ "%02x:%02x:%02x:%02x, "
+ "switch_name:x%02x:%02x:%02x:%02x:"
+ "%02x:%02x:%02x:%02x\n", i++,
+ conn_rec->flags, conn_rec->vlan_tag,
+ conn_rec->fabric_name[0],
+ conn_rec->fabric_name[1],
+ conn_rec->fabric_name[2],
+ conn_rec->fabric_name[3],
+ conn_rec->fabric_name[4],
+ conn_rec->fabric_name[5],
+ conn_rec->fabric_name[6],
+ conn_rec->fabric_name[7],
+ conn_rec->switch_name[0],
+ conn_rec->switch_name[1],
+ conn_rec->switch_name[2],
+ conn_rec->switch_name[3],
+ conn_rec->switch_name[4],
+ conn_rec->switch_name[5],
+ conn_rec->switch_name[6],
+ conn_rec->switch_name[7]);
+ }
+ }
+}
+
+/**
+ * lpfc_read_fcoe_param - Read FCoe parameters from conf region..
+ * @phba: Pointer to hba context object.
+ * @buff: Buffer containing the FCoE parameter data structure.
+ *
+ * This function update driver data structure with config
+ * parameters read from config region 23.
+ */
+static void
+lpfc_read_fcoe_param(struct lpfc_hba *phba,
+ uint8_t *buff)
+{
+ struct lpfc_fip_param_hdr *fcoe_param_hdr;
+ struct lpfc_fcoe_params *fcoe_param;
+
+ fcoe_param_hdr = (struct lpfc_fip_param_hdr *)
+ buff;
+ fcoe_param = (struct lpfc_fcoe_params *)
+ (buff + sizeof(struct lpfc_fip_param_hdr));
+
+ if ((fcoe_param_hdr->parm_version != FIPP_VERSION) ||
+ (fcoe_param_hdr->length != FCOE_PARAM_LENGTH))
+ return;
+
+ if (fcoe_param_hdr->parm_flags & FIPP_VLAN_VALID) {
+ phba->valid_vlan = 1;
+ phba->vlan_id = le16_to_cpu(fcoe_param->vlan_tag) &
+ 0xFFF;
+ }
+
+ phba->fc_map[0] = fcoe_param->fc_map[0];
+ phba->fc_map[1] = fcoe_param->fc_map[1];
+ phba->fc_map[2] = fcoe_param->fc_map[2];
+ return;
+}
+
+/**
+ * lpfc_get_rec_conf23 - Get a record type in config region data.
+ * @buff: Buffer containing config region 23 data.
+ * @size: Size of the data buffer.
+ * @rec_type: Record type to be searched.
+ *
+ * This function searches config region data to find the beginning
+ * of the record specified by record_type. If record found, this
+ * function return pointer to the record else return NULL.
+ */
+static uint8_t *
+lpfc_get_rec_conf23(uint8_t *buff, uint32_t size, uint8_t rec_type)
+{
+ uint32_t offset = 0, rec_length;
+
+ if ((buff[0] == LPFC_REGION23_LAST_REC) ||
+ (size < sizeof(uint32_t)))
+ return NULL;
+
+ rec_length = buff[offset + 1];
+
+ /*
+ * One TLV record has one word header and number of data words
+ * specified in the rec_length field of the record header.
+ */
+ while ((offset + rec_length * sizeof(uint32_t) + sizeof(uint32_t))
+ <= size) {
+ if (buff[offset] == rec_type)
+ return &buff[offset];
+
+ if (buff[offset] == LPFC_REGION23_LAST_REC)
+ return NULL;
+
+ offset += rec_length * sizeof(uint32_t) + sizeof(uint32_t);
+ rec_length = buff[offset + 1];
+ }
+ return NULL;
+}
+
+/**
+ * lpfc_parse_fcoe_conf - Parse FCoE config data read from config region 23.
+ * @phba: Pointer to lpfc_hba data structure.
+ * @buff: Buffer containing config region 23 data.
+ * @size: Size of the data buffer.
+ *
+ * This function parses the FCoE config parameters in config region 23 and
+ * populate driver data structure with the parameters.
+ */
+void
+lpfc_parse_fcoe_conf(struct lpfc_hba *phba,
+ uint8_t *buff,
+ uint32_t size)
+{
+ uint32_t offset = 0;
+ uint8_t *rec_ptr;
+
+ /*
+ * If data size is less than 2 words signature and version cannot be
+ * verified.
+ */
+ if (size < 2*sizeof(uint32_t))
+ return;
+
+ /* Check the region signature first */
+ if (memcmp(buff, LPFC_REGION23_SIGNATURE, 4)) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+ "2567 Config region 23 has bad signature\n");
+ return;
+ }
+
+ offset += 4;
+
+ /* Check the data structure version */
+ if (buff[offset] != LPFC_REGION23_VERSION) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+ "2568 Config region 23 has bad version\n");
+ return;
+ }
+ offset += 4;
+
+ /* Read FCoE param record */
+ rec_ptr = lpfc_get_rec_conf23(&buff[offset],
+ size - offset, FCOE_PARAM_TYPE);
+ if (rec_ptr)
+ lpfc_read_fcoe_param(phba, rec_ptr);
+
+ /* Read FCF connection table */
+ rec_ptr = lpfc_get_rec_conf23(&buff[offset],
+ size - offset, FCOE_CONN_TBL_TYPE);
+ if (rec_ptr)
+ lpfc_read_fcf_conn_tbl(phba, rec_ptr);
+
+}
diff --git a/drivers/scsi/lpfc/lpfc_hw.h b/drivers/scsi/lpfc/lpfc_hw.h
new file mode 100644
index 000000000..37beb9dc1
--- /dev/null
+++ b/drivers/scsi/lpfc/lpfc_hw.h
@@ -0,0 +1,3834 @@
+/*******************************************************************
+ * This file is part of the Emulex Linux Device Driver for *
+ * Fibre Channel Host Bus Adapters. *
+ * Copyright (C) 2004-2015 Emulex. All rights reserved. *
+ * EMULEX and SLI are trademarks of Emulex. *
+ * www.emulex.com *
+ * *
+ * This program is free software; you can redistribute it and/or *
+ * modify it under the terms of version 2 of the GNU General *
+ * Public License as published by the Free Software Foundation. *
+ * This program is distributed in the hope that it will be useful. *
+ * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND *
+ * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY, *
+ * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE *
+ * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD *
+ * TO BE LEGALLY INVALID. See the GNU General Public License for *
+ * more details, a copy of which can be found in the file COPYING *
+ * included with this package. *
+ *******************************************************************/
+
+#define FDMI_DID 0xfffffaU
+#define NameServer_DID 0xfffffcU
+#define SCR_DID 0xfffffdU
+#define Fabric_DID 0xfffffeU
+#define Bcast_DID 0xffffffU
+#define Mask_DID 0xffffffU
+#define CT_DID_MASK 0xffff00U
+#define Fabric_DID_MASK 0xfff000U
+#define WELL_KNOWN_DID_MASK 0xfffff0U
+
+#define PT2PT_LocalID 1
+#define PT2PT_RemoteID 2
+
+#define FF_DEF_EDTOV 2000 /* Default E_D_TOV (2000ms) */
+#define FF_DEF_ALTOV 15 /* Default AL_TIME (15ms) */
+#define FF_DEF_RATOV 2 /* Default RA_TOV (2s) */
+#define FF_DEF_ARBTOV 1900 /* Default ARB_TOV (1900ms) */
+
+#define LPFC_BUF_RING0 64 /* Number of buffers to post to RING
+ 0 */
+
+#define FCELSSIZE 1024 /* maximum ELS transfer size */
+
+#define LPFC_FCP_RING 0 /* ring 0 for FCP initiator commands */
+#define LPFC_EXTRA_RING 1 /* ring 1 for other protocols */
+#define LPFC_ELS_RING 2 /* ring 2 for ELS commands */
+#define LPFC_FCP_NEXT_RING 3
+#define LPFC_FCP_OAS_RING 3
+
+#define SLI2_IOCB_CMD_R0_ENTRIES 172 /* SLI-2 FCP command ring entries */
+#define SLI2_IOCB_RSP_R0_ENTRIES 134 /* SLI-2 FCP response ring entries */
+#define SLI2_IOCB_CMD_R1_ENTRIES 4 /* SLI-2 extra command ring entries */
+#define SLI2_IOCB_RSP_R1_ENTRIES 4 /* SLI-2 extra response ring entries */
+#define SLI2_IOCB_CMD_R1XTRA_ENTRIES 36 /* SLI-2 extra FCP cmd ring entries */
+#define SLI2_IOCB_RSP_R1XTRA_ENTRIES 52 /* SLI-2 extra FCP rsp ring entries */
+#define SLI2_IOCB_CMD_R2_ENTRIES 20 /* SLI-2 ELS command ring entries */
+#define SLI2_IOCB_RSP_R2_ENTRIES 20 /* SLI-2 ELS response ring entries */
+#define SLI2_IOCB_CMD_R3_ENTRIES 0
+#define SLI2_IOCB_RSP_R3_ENTRIES 0
+#define SLI2_IOCB_CMD_R3XTRA_ENTRIES 24
+#define SLI2_IOCB_RSP_R3XTRA_ENTRIES 32
+
+#define SLI2_IOCB_CMD_SIZE 32
+#define SLI2_IOCB_RSP_SIZE 32
+#define SLI3_IOCB_CMD_SIZE 128
+#define SLI3_IOCB_RSP_SIZE 64
+
+#define LPFC_UNREG_ALL_RPIS_VPORT 0xffff
+#define LPFC_UNREG_ALL_DFLT_RPIS 0xffffffff
+
+/* vendor ID used in SCSI netlink calls */
+#define LPFC_NL_VENDOR_ID (SCSI_NL_VID_TYPE_PCI | PCI_VENDOR_ID_EMULEX)
+
+#define FW_REV_STR_SIZE 32
+/* Common Transport structures and definitions */
+
+union CtRevisionId {
+ /* Structure is in Big Endian format */
+ struct {
+ uint32_t Revision:8;
+ uint32_t InId:24;
+ } bits;
+ uint32_t word;
+};
+
+union CtCommandResponse {
+ /* Structure is in Big Endian format */
+ struct {
+ uint32_t CmdRsp:16;
+ uint32_t Size:16;
+ } bits;
+ uint32_t word;
+};
+
+#define FC4_FEATURE_INIT 0x2
+#define FC4_FEATURE_TARGET 0x1
+
+struct lpfc_sli_ct_request {
+ /* Structure is in Big Endian format */
+ union CtRevisionId RevisionId;
+ uint8_t FsType;
+ uint8_t FsSubType;
+ uint8_t Options;
+ uint8_t Rsrvd1;
+ union CtCommandResponse CommandResponse;
+ uint8_t Rsrvd2;
+ uint8_t ReasonCode;
+ uint8_t Explanation;
+ uint8_t VendorUnique;
+#define LPFC_CT_PREAMBLE 20 /* Size of CTReq + 4 up to here */
+
+ union {
+ uint32_t PortID;
+ struct gid {
+ uint8_t PortType; /* for GID_PT requests */
+ uint8_t DomainScope;
+ uint8_t AreaScope;
+ uint8_t Fc4Type; /* for GID_FT requests */
+ } gid;
+ struct rft {
+ uint32_t PortId; /* For RFT_ID requests */
+
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint32_t rsvd0:16;
+ uint32_t rsvd1:7;
+ uint32_t fcpReg:1; /* Type 8 */
+ uint32_t rsvd2:2;
+ uint32_t ipReg:1; /* Type 5 */
+ uint32_t rsvd3:5;
+#else /* __LITTLE_ENDIAN_BITFIELD */
+ uint32_t rsvd0:16;
+ uint32_t fcpReg:1; /* Type 8 */
+ uint32_t rsvd1:7;
+ uint32_t rsvd3:5;
+ uint32_t ipReg:1; /* Type 5 */
+ uint32_t rsvd2:2;
+#endif
+
+ uint32_t rsvd[7];
+ } rft;
+ struct rnn {
+ uint32_t PortId; /* For RNN_ID requests */
+ uint8_t wwnn[8];
+ } rnn;
+ struct rsnn { /* For RSNN_ID requests */
+ uint8_t wwnn[8];
+ uint8_t len;
+ uint8_t symbname[255];
+ } rsnn;
+ struct da_id { /* For DA_ID requests */
+ uint32_t port_id;
+ } da_id;
+ struct rspn { /* For RSPN_ID requests */
+ uint32_t PortId;
+ uint8_t len;
+ uint8_t symbname[255];
+ } rspn;
+ struct gff {
+ uint32_t PortId;
+ } gff;
+ struct gff_acc {
+ uint8_t fbits[128];
+ } gff_acc;
+#define FCP_TYPE_FEATURE_OFFSET 7
+ struct rff {
+ uint32_t PortId;
+ uint8_t reserved[2];
+ uint8_t fbits;
+ uint8_t type_code; /* type=8 for FCP */
+ } rff;
+ } un;
+};
+
+#define LPFC_MAX_CT_SIZE (60 * 4096)
+
+#define SLI_CT_REVISION 1
+#define GID_REQUEST_SZ (offsetof(struct lpfc_sli_ct_request, un) + \
+ sizeof(struct gid))
+#define GFF_REQUEST_SZ (offsetof(struct lpfc_sli_ct_request, un) + \
+ sizeof(struct gff))
+#define RFT_REQUEST_SZ (offsetof(struct lpfc_sli_ct_request, un) + \
+ sizeof(struct rft))
+#define RFF_REQUEST_SZ (offsetof(struct lpfc_sli_ct_request, un) + \
+ sizeof(struct rff))
+#define RNN_REQUEST_SZ (offsetof(struct lpfc_sli_ct_request, un) + \
+ sizeof(struct rnn))
+#define RSNN_REQUEST_SZ (offsetof(struct lpfc_sli_ct_request, un) + \
+ sizeof(struct rsnn))
+#define DA_ID_REQUEST_SZ (offsetof(struct lpfc_sli_ct_request, un) + \
+ sizeof(struct da_id))
+#define RSPN_REQUEST_SZ (offsetof(struct lpfc_sli_ct_request, un) + \
+ sizeof(struct rspn))
+
+/*
+ * FsType Definitions
+ */
+
+#define SLI_CT_MANAGEMENT_SERVICE 0xFA
+#define SLI_CT_TIME_SERVICE 0xFB
+#define SLI_CT_DIRECTORY_SERVICE 0xFC
+#define SLI_CT_FABRIC_CONTROLLER_SERVICE 0xFD
+
+/*
+ * Directory Service Subtypes
+ */
+
+#define SLI_CT_DIRECTORY_NAME_SERVER 0x02
+
+/*
+ * Response Codes
+ */
+
+#define SLI_CT_RESPONSE_FS_RJT 0x8001
+#define SLI_CT_RESPONSE_FS_ACC 0x8002
+
+/*
+ * Reason Codes
+ */
+
+#define SLI_CT_NO_ADDITIONAL_EXPL 0x0
+#define SLI_CT_INVALID_COMMAND 0x01
+#define SLI_CT_INVALID_VERSION 0x02
+#define SLI_CT_LOGICAL_ERROR 0x03
+#define SLI_CT_INVALID_IU_SIZE 0x04
+#define SLI_CT_LOGICAL_BUSY 0x05
+#define SLI_CT_PROTOCOL_ERROR 0x07
+#define SLI_CT_UNABLE_TO_PERFORM_REQ 0x09
+#define SLI_CT_REQ_NOT_SUPPORTED 0x0b
+#define SLI_CT_HBA_INFO_NOT_REGISTERED 0x10
+#define SLI_CT_MULTIPLE_HBA_ATTR_OF_SAME_TYPE 0x11
+#define SLI_CT_INVALID_HBA_ATTR_BLOCK_LEN 0x12
+#define SLI_CT_HBA_ATTR_NOT_PRESENT 0x13
+#define SLI_CT_PORT_INFO_NOT_REGISTERED 0x20
+#define SLI_CT_MULTIPLE_PORT_ATTR_OF_SAME_TYPE 0x21
+#define SLI_CT_INVALID_PORT_ATTR_BLOCK_LEN 0x22
+#define SLI_CT_VENDOR_UNIQUE 0xff
+
+/*
+ * Name Server SLI_CT_UNABLE_TO_PERFORM_REQ Explanations
+ */
+
+#define SLI_CT_NO_PORT_ID 0x01
+#define SLI_CT_NO_PORT_NAME 0x02
+#define SLI_CT_NO_NODE_NAME 0x03
+#define SLI_CT_NO_CLASS_OF_SERVICE 0x04
+#define SLI_CT_NO_IP_ADDRESS 0x05
+#define SLI_CT_NO_IPA 0x06
+#define SLI_CT_NO_FC4_TYPES 0x07
+#define SLI_CT_NO_SYMBOLIC_PORT_NAME 0x08
+#define SLI_CT_NO_SYMBOLIC_NODE_NAME 0x09
+#define SLI_CT_NO_PORT_TYPE 0x0A
+#define SLI_CT_ACCESS_DENIED 0x10
+#define SLI_CT_INVALID_PORT_ID 0x11
+#define SLI_CT_DATABASE_EMPTY 0x12
+
+/*
+ * Name Server Command Codes
+ */
+
+#define SLI_CTNS_GA_NXT 0x0100
+#define SLI_CTNS_GPN_ID 0x0112
+#define SLI_CTNS_GNN_ID 0x0113
+#define SLI_CTNS_GCS_ID 0x0114
+#define SLI_CTNS_GFT_ID 0x0117
+#define SLI_CTNS_GSPN_ID 0x0118
+#define SLI_CTNS_GPT_ID 0x011A
+#define SLI_CTNS_GFF_ID 0x011F
+#define SLI_CTNS_GID_PN 0x0121
+#define SLI_CTNS_GID_NN 0x0131
+#define SLI_CTNS_GIP_NN 0x0135
+#define SLI_CTNS_GIPA_NN 0x0136
+#define SLI_CTNS_GSNN_NN 0x0139
+#define SLI_CTNS_GNN_IP 0x0153
+#define SLI_CTNS_GIPA_IP 0x0156
+#define SLI_CTNS_GID_FT 0x0171
+#define SLI_CTNS_GID_PT 0x01A1
+#define SLI_CTNS_RPN_ID 0x0212
+#define SLI_CTNS_RNN_ID 0x0213
+#define SLI_CTNS_RCS_ID 0x0214
+#define SLI_CTNS_RFT_ID 0x0217
+#define SLI_CTNS_RSPN_ID 0x0218
+#define SLI_CTNS_RPT_ID 0x021A
+#define SLI_CTNS_RFF_ID 0x021F
+#define SLI_CTNS_RIP_NN 0x0235
+#define SLI_CTNS_RIPA_NN 0x0236
+#define SLI_CTNS_RSNN_NN 0x0239
+#define SLI_CTNS_DA_ID 0x0300
+
+/*
+ * Port Types
+ */
+
+#define SLI_CTPT_N_PORT 0x01
+#define SLI_CTPT_NL_PORT 0x02
+#define SLI_CTPT_FNL_PORT 0x03
+#define SLI_CTPT_IP 0x04
+#define SLI_CTPT_FCP 0x08
+#define SLI_CTPT_NX_PORT 0x7F
+#define SLI_CTPT_F_PORT 0x81
+#define SLI_CTPT_FL_PORT 0x82
+#define SLI_CTPT_E_PORT 0x84
+
+#define SLI_CT_LAST_ENTRY 0x80000000
+
+/* Fibre Channel Service Parameter definitions */
+
+#define FC_PH_4_0 6 /* FC-PH version 4.0 */
+#define FC_PH_4_1 7 /* FC-PH version 4.1 */
+#define FC_PH_4_2 8 /* FC-PH version 4.2 */
+#define FC_PH_4_3 9 /* FC-PH version 4.3 */
+
+#define FC_PH_LOW 8 /* Lowest supported FC-PH version */
+#define FC_PH_HIGH 9 /* Highest supported FC-PH version */
+#define FC_PH3 0x20 /* FC-PH-3 version */
+
+#define FF_FRAME_SIZE 2048
+
+struct lpfc_name {
+ union {
+ struct {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint8_t nameType:4; /* FC Word 0, bit 28:31 */
+ uint8_t IEEEextMsn:4; /* FC Word 0, bit 24:27, bit
+ 8:11 of IEEE ext */
+#else /* __LITTLE_ENDIAN_BITFIELD */
+ uint8_t IEEEextMsn:4; /* FC Word 0, bit 24:27, bit
+ 8:11 of IEEE ext */
+ uint8_t nameType:4; /* FC Word 0, bit 28:31 */
+#endif
+
+#define NAME_IEEE 0x1 /* IEEE name - nameType */
+#define NAME_IEEE_EXT 0x2 /* IEEE extended name */
+#define NAME_FC_TYPE 0x3 /* FC native name type */
+#define NAME_IP_TYPE 0x4 /* IP address */
+#define NAME_CCITT_TYPE 0xC
+#define NAME_CCITT_GR_TYPE 0xE
+ uint8_t IEEEextLsb; /* FC Word 0, bit 16:23, IEEE
+ extended Lsb */
+ uint8_t IEEE[6]; /* FC IEEE address */
+ } s;
+ uint8_t wwn[8];
+ } u;
+};
+
+struct csp {
+ uint8_t fcphHigh; /* FC Word 0, byte 0 */
+ uint8_t fcphLow;
+ uint8_t bbCreditMsb;
+ uint8_t bbCreditlsb; /* FC Word 0, byte 3 */
+
+/*
+ * Word 1 Bit 31 in common service parameter is overloaded.
+ * Word 1 Bit 31 in FLOGI request is multiple NPort request
+ * Word 1 Bit 31 in FLOGI response is clean address bit
+ */
+#define clean_address_bit request_multiple_Nport /* Word 1, bit 31 */
+/*
+ * Word 1 Bit 30 in common service parameter is overloaded.
+ * Word 1 Bit 30 in FLOGI request is Virtual Fabrics
+ * Word 1 Bit 30 in PLOGI request is random offset
+ */
+#define virtual_fabric_support randomOffset /* Word 1, bit 30 */
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint16_t request_multiple_Nport:1; /* FC Word 1, bit 31 */
+ uint16_t randomOffset:1; /* FC Word 1, bit 30 */
+ uint16_t response_multiple_NPort:1; /* FC Word 1, bit 29 */
+ uint16_t fPort:1; /* FC Word 1, bit 28 */
+ uint16_t altBbCredit:1; /* FC Word 1, bit 27 */
+ uint16_t edtovResolution:1; /* FC Word 1, bit 26 */
+ uint16_t multicast:1; /* FC Word 1, bit 25 */
+ uint16_t broadcast:1; /* FC Word 1, bit 24 */
+
+ uint16_t huntgroup:1; /* FC Word 1, bit 23 */
+ uint16_t simplex:1; /* FC Word 1, bit 22 */
+ uint16_t word1Reserved1:3; /* FC Word 1, bit 21:19 */
+ uint16_t dhd:1; /* FC Word 1, bit 18 */
+ uint16_t contIncSeqCnt:1; /* FC Word 1, bit 17 */
+ uint16_t payloadlength:1; /* FC Word 1, bit 16 */
+#else /* __LITTLE_ENDIAN_BITFIELD */
+ uint16_t broadcast:1; /* FC Word 1, bit 24 */
+ uint16_t multicast:1; /* FC Word 1, bit 25 */
+ uint16_t edtovResolution:1; /* FC Word 1, bit 26 */
+ uint16_t altBbCredit:1; /* FC Word 1, bit 27 */
+ uint16_t fPort:1; /* FC Word 1, bit 28 */
+ uint16_t response_multiple_NPort:1; /* FC Word 1, bit 29 */
+ uint16_t randomOffset:1; /* FC Word 1, bit 30 */
+ uint16_t request_multiple_Nport:1; /* FC Word 1, bit 31 */
+
+ uint16_t payloadlength:1; /* FC Word 1, bit 16 */
+ uint16_t contIncSeqCnt:1; /* FC Word 1, bit 17 */
+ uint16_t dhd:1; /* FC Word 1, bit 18 */
+ uint16_t word1Reserved1:3; /* FC Word 1, bit 21:19 */
+ uint16_t simplex:1; /* FC Word 1, bit 22 */
+ uint16_t huntgroup:1; /* FC Word 1, bit 23 */
+#endif
+
+ uint8_t bbRcvSizeMsb; /* Upper nibble is reserved */
+ uint8_t bbRcvSizeLsb; /* FC Word 1, byte 3 */
+ union {
+ struct {
+ uint8_t word2Reserved1; /* FC Word 2 byte 0 */
+
+ uint8_t totalConcurrSeq; /* FC Word 2 byte 1 */
+ uint8_t roByCategoryMsb; /* FC Word 2 byte 2 */
+
+ uint8_t roByCategoryLsb; /* FC Word 2 byte 3 */
+ } nPort;
+ uint32_t r_a_tov; /* R_A_TOV must be in B.E. format */
+ } w2;
+
+ uint32_t e_d_tov; /* E_D_TOV must be in B.E. format */
+};
+
+struct class_parms {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint8_t classValid:1; /* FC Word 0, bit 31 */
+ uint8_t intermix:1; /* FC Word 0, bit 30 */
+ uint8_t stackedXparent:1; /* FC Word 0, bit 29 */
+ uint8_t stackedLockDown:1; /* FC Word 0, bit 28 */
+ uint8_t seqDelivery:1; /* FC Word 0, bit 27 */
+ uint8_t word0Reserved1:3; /* FC Word 0, bit 24:26 */
+#else /* __LITTLE_ENDIAN_BITFIELD */
+ uint8_t word0Reserved1:3; /* FC Word 0, bit 24:26 */
+ uint8_t seqDelivery:1; /* FC Word 0, bit 27 */
+ uint8_t stackedLockDown:1; /* FC Word 0, bit 28 */
+ uint8_t stackedXparent:1; /* FC Word 0, bit 29 */
+ uint8_t intermix:1; /* FC Word 0, bit 30 */
+ uint8_t classValid:1; /* FC Word 0, bit 31 */
+
+#endif
+
+ uint8_t word0Reserved2; /* FC Word 0, bit 16:23 */
+
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint8_t iCtlXidReAssgn:2; /* FC Word 0, Bit 14:15 */
+ uint8_t iCtlInitialPa:2; /* FC Word 0, bit 12:13 */
+ uint8_t iCtlAck0capable:1; /* FC Word 0, bit 11 */
+ uint8_t iCtlAckNcapable:1; /* FC Word 0, bit 10 */
+ uint8_t word0Reserved3:2; /* FC Word 0, bit 8: 9 */
+#else /* __LITTLE_ENDIAN_BITFIELD */
+ uint8_t word0Reserved3:2; /* FC Word 0, bit 8: 9 */
+ uint8_t iCtlAckNcapable:1; /* FC Word 0, bit 10 */
+ uint8_t iCtlAck0capable:1; /* FC Word 0, bit 11 */
+ uint8_t iCtlInitialPa:2; /* FC Word 0, bit 12:13 */
+ uint8_t iCtlXidReAssgn:2; /* FC Word 0, Bit 14:15 */
+#endif
+
+ uint8_t word0Reserved4; /* FC Word 0, bit 0: 7 */
+
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint8_t rCtlAck0capable:1; /* FC Word 1, bit 31 */
+ uint8_t rCtlAckNcapable:1; /* FC Word 1, bit 30 */
+ uint8_t rCtlXidInterlck:1; /* FC Word 1, bit 29 */
+ uint8_t rCtlErrorPolicy:2; /* FC Word 1, bit 27:28 */
+ uint8_t word1Reserved1:1; /* FC Word 1, bit 26 */
+ uint8_t rCtlCatPerSeq:2; /* FC Word 1, bit 24:25 */
+#else /* __LITTLE_ENDIAN_BITFIELD */
+ uint8_t rCtlCatPerSeq:2; /* FC Word 1, bit 24:25 */
+ uint8_t word1Reserved1:1; /* FC Word 1, bit 26 */
+ uint8_t rCtlErrorPolicy:2; /* FC Word 1, bit 27:28 */
+ uint8_t rCtlXidInterlck:1; /* FC Word 1, bit 29 */
+ uint8_t rCtlAckNcapable:1; /* FC Word 1, bit 30 */
+ uint8_t rCtlAck0capable:1; /* FC Word 1, bit 31 */
+#endif
+
+ uint8_t word1Reserved2; /* FC Word 1, bit 16:23 */
+ uint8_t rcvDataSizeMsb; /* FC Word 1, bit 8:15 */
+ uint8_t rcvDataSizeLsb; /* FC Word 1, bit 0: 7 */
+
+ uint8_t concurrentSeqMsb; /* FC Word 2, bit 24:31 */
+ uint8_t concurrentSeqLsb; /* FC Word 2, bit 16:23 */
+ uint8_t EeCreditSeqMsb; /* FC Word 2, bit 8:15 */
+ uint8_t EeCreditSeqLsb; /* FC Word 2, bit 0: 7 */
+
+ uint8_t openSeqPerXchgMsb; /* FC Word 3, bit 24:31 */
+ uint8_t openSeqPerXchgLsb; /* FC Word 3, bit 16:23 */
+ uint8_t word3Reserved1; /* Fc Word 3, bit 8:15 */
+ uint8_t word3Reserved2; /* Fc Word 3, bit 0: 7 */
+};
+
+struct serv_parm { /* Structure is in Big Endian format */
+ struct csp cmn;
+ struct lpfc_name portName;
+ struct lpfc_name nodeName;
+ struct class_parms cls1;
+ struct class_parms cls2;
+ struct class_parms cls3;
+ struct class_parms cls4;
+ uint8_t vendorVersion[16];
+};
+
+/*
+ * Virtual Fabric Tagging Header
+ */
+struct fc_vft_header {
+ uint32_t word0;
+#define fc_vft_hdr_r_ctl_SHIFT 24
+#define fc_vft_hdr_r_ctl_MASK 0xFF
+#define fc_vft_hdr_r_ctl_WORD word0
+#define fc_vft_hdr_ver_SHIFT 22
+#define fc_vft_hdr_ver_MASK 0x3
+#define fc_vft_hdr_ver_WORD word0
+#define fc_vft_hdr_type_SHIFT 18
+#define fc_vft_hdr_type_MASK 0xF
+#define fc_vft_hdr_type_WORD word0
+#define fc_vft_hdr_e_SHIFT 16
+#define fc_vft_hdr_e_MASK 0x1
+#define fc_vft_hdr_e_WORD word0
+#define fc_vft_hdr_priority_SHIFT 13
+#define fc_vft_hdr_priority_MASK 0x7
+#define fc_vft_hdr_priority_WORD word0
+#define fc_vft_hdr_vf_id_SHIFT 1
+#define fc_vft_hdr_vf_id_MASK 0xFFF
+#define fc_vft_hdr_vf_id_WORD word0
+ uint32_t word1;
+#define fc_vft_hdr_hopct_SHIFT 24
+#define fc_vft_hdr_hopct_MASK 0xFF
+#define fc_vft_hdr_hopct_WORD word1
+};
+
+/*
+ * Extended Link Service LS_COMMAND codes (Payload Word 0)
+ */
+#ifdef __BIG_ENDIAN_BITFIELD
+#define ELS_CMD_MASK 0xffff0000
+#define ELS_RSP_MASK 0xff000000
+#define ELS_CMD_LS_RJT 0x01000000
+#define ELS_CMD_ACC 0x02000000
+#define ELS_CMD_PLOGI 0x03000000
+#define ELS_CMD_FLOGI 0x04000000
+#define ELS_CMD_LOGO 0x05000000
+#define ELS_CMD_ABTX 0x06000000
+#define ELS_CMD_RCS 0x07000000
+#define ELS_CMD_RES 0x08000000
+#define ELS_CMD_RSS 0x09000000
+#define ELS_CMD_RSI 0x0A000000
+#define ELS_CMD_ESTS 0x0B000000
+#define ELS_CMD_ESTC 0x0C000000
+#define ELS_CMD_ADVC 0x0D000000
+#define ELS_CMD_RTV 0x0E000000
+#define ELS_CMD_RLS 0x0F000000
+#define ELS_CMD_ECHO 0x10000000
+#define ELS_CMD_TEST 0x11000000
+#define ELS_CMD_RRQ 0x12000000
+#define ELS_CMD_REC 0x13000000
+#define ELS_CMD_PRLI 0x20100014
+#define ELS_CMD_PRLO 0x21100014
+#define ELS_CMD_PRLO_ACC 0x02100014
+#define ELS_CMD_PDISC 0x50000000
+#define ELS_CMD_FDISC 0x51000000
+#define ELS_CMD_ADISC 0x52000000
+#define ELS_CMD_FARP 0x54000000
+#define ELS_CMD_FARPR 0x55000000
+#define ELS_CMD_RPS 0x56000000
+#define ELS_CMD_RPL 0x57000000
+#define ELS_CMD_FAN 0x60000000
+#define ELS_CMD_RSCN 0x61040000
+#define ELS_CMD_SCR 0x62000000
+#define ELS_CMD_RNID 0x78000000
+#define ELS_CMD_LIRR 0x7A000000
+#else /* __LITTLE_ENDIAN_BITFIELD */
+#define ELS_CMD_MASK 0xffff
+#define ELS_RSP_MASK 0xff
+#define ELS_CMD_LS_RJT 0x01
+#define ELS_CMD_ACC 0x02
+#define ELS_CMD_PLOGI 0x03
+#define ELS_CMD_FLOGI 0x04
+#define ELS_CMD_LOGO 0x05
+#define ELS_CMD_ABTX 0x06
+#define ELS_CMD_RCS 0x07
+#define ELS_CMD_RES 0x08
+#define ELS_CMD_RSS 0x09
+#define ELS_CMD_RSI 0x0A
+#define ELS_CMD_ESTS 0x0B
+#define ELS_CMD_ESTC 0x0C
+#define ELS_CMD_ADVC 0x0D
+#define ELS_CMD_RTV 0x0E
+#define ELS_CMD_RLS 0x0F
+#define ELS_CMD_ECHO 0x10
+#define ELS_CMD_TEST 0x11
+#define ELS_CMD_RRQ 0x12
+#define ELS_CMD_REC 0x13
+#define ELS_CMD_PRLI 0x14001020
+#define ELS_CMD_PRLO 0x14001021
+#define ELS_CMD_PRLO_ACC 0x14001002
+#define ELS_CMD_PDISC 0x50
+#define ELS_CMD_FDISC 0x51
+#define ELS_CMD_ADISC 0x52
+#define ELS_CMD_FARP 0x54
+#define ELS_CMD_FARPR 0x55
+#define ELS_CMD_RPS 0x56
+#define ELS_CMD_RPL 0x57
+#define ELS_CMD_FAN 0x60
+#define ELS_CMD_RSCN 0x0461
+#define ELS_CMD_SCR 0x62
+#define ELS_CMD_RNID 0x78
+#define ELS_CMD_LIRR 0x7A
+#endif
+
+/*
+ * LS_RJT Payload Definition
+ */
+
+struct ls_rjt { /* Structure is in Big Endian format */
+ union {
+ uint32_t lsRjtError;
+ struct {
+ uint8_t lsRjtRsvd0; /* FC Word 0, bit 24:31 */
+
+ uint8_t lsRjtRsnCode; /* FC Word 0, bit 16:23 */
+ /* LS_RJT reason codes */
+#define LSRJT_INVALID_CMD 0x01
+#define LSRJT_LOGICAL_ERR 0x03
+#define LSRJT_LOGICAL_BSY 0x05
+#define LSRJT_PROTOCOL_ERR 0x07
+#define LSRJT_UNABLE_TPC 0x09 /* Unable to perform command */
+#define LSRJT_CMD_UNSUPPORTED 0x0B
+#define LSRJT_VENDOR_UNIQUE 0xFF /* See Byte 3 */
+
+ uint8_t lsRjtRsnCodeExp; /* FC Word 0, bit 8:15 */
+ /* LS_RJT reason explanation */
+#define LSEXP_NOTHING_MORE 0x00
+#define LSEXP_SPARM_OPTIONS 0x01
+#define LSEXP_SPARM_ICTL 0x03
+#define LSEXP_SPARM_RCTL 0x05
+#define LSEXP_SPARM_RCV_SIZE 0x07
+#define LSEXP_SPARM_CONCUR_SEQ 0x09
+#define LSEXP_SPARM_CREDIT 0x0B
+#define LSEXP_INVALID_PNAME 0x0D
+#define LSEXP_INVALID_NNAME 0x0E
+#define LSEXP_INVALID_CSP 0x0F
+#define LSEXP_INVALID_ASSOC_HDR 0x11
+#define LSEXP_ASSOC_HDR_REQ 0x13
+#define LSEXP_INVALID_O_SID 0x15
+#define LSEXP_INVALID_OX_RX 0x17
+#define LSEXP_CMD_IN_PROGRESS 0x19
+#define LSEXP_PORT_LOGIN_REQ 0x1E
+#define LSEXP_INVALID_NPORT_ID 0x1F
+#define LSEXP_INVALID_SEQ_ID 0x21
+#define LSEXP_INVALID_XCHG 0x23
+#define LSEXP_INACTIVE_XCHG 0x25
+#define LSEXP_RQ_REQUIRED 0x27
+#define LSEXP_OUT_OF_RESOURCE 0x29
+#define LSEXP_CANT_GIVE_DATA 0x2A
+#define LSEXP_REQ_UNSUPPORTED 0x2C
+ uint8_t vendorUnique; /* FC Word 0, bit 0: 7 */
+ } b;
+ } un;
+};
+
+/*
+ * N_Port Login (FLOGO/PLOGO Request) Payload Definition
+ */
+
+typedef struct _LOGO { /* Structure is in Big Endian format */
+ union {
+ uint32_t nPortId32; /* Access nPortId as a word */
+ struct {
+ uint8_t word1Reserved1; /* FC Word 1, bit 31:24 */
+ uint8_t nPortIdByte0; /* N_port ID bit 16:23 */
+ uint8_t nPortIdByte1; /* N_port ID bit 8:15 */
+ uint8_t nPortIdByte2; /* N_port ID bit 0: 7 */
+ } b;
+ } un;
+ struct lpfc_name portName; /* N_port name field */
+} LOGO;
+
+/*
+ * FCP Login (PRLI Request / ACC) Payload Definition
+ */
+
+#define PRLX_PAGE_LEN 0x10
+#define TPRLO_PAGE_LEN 0x14
+
+typedef struct _PRLI { /* Structure is in Big Endian format */
+ uint8_t prliType; /* FC Parm Word 0, bit 24:31 */
+
+#define PRLI_FCP_TYPE 0x08
+ uint8_t word0Reserved1; /* FC Parm Word 0, bit 16:23 */
+
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint8_t origProcAssocV:1; /* FC Parm Word 0, bit 15 */
+ uint8_t respProcAssocV:1; /* FC Parm Word 0, bit 14 */
+ uint8_t estabImagePair:1; /* FC Parm Word 0, bit 13 */
+
+ /* ACC = imagePairEstablished */
+ uint8_t word0Reserved2:1; /* FC Parm Word 0, bit 12 */
+ uint8_t acceptRspCode:4; /* FC Parm Word 0, bit 8:11, ACC ONLY */
+#else /* __LITTLE_ENDIAN_BITFIELD */
+ uint8_t acceptRspCode:4; /* FC Parm Word 0, bit 8:11, ACC ONLY */
+ uint8_t word0Reserved2:1; /* FC Parm Word 0, bit 12 */
+ uint8_t estabImagePair:1; /* FC Parm Word 0, bit 13 */
+ uint8_t respProcAssocV:1; /* FC Parm Word 0, bit 14 */
+ uint8_t origProcAssocV:1; /* FC Parm Word 0, bit 15 */
+ /* ACC = imagePairEstablished */
+#endif
+
+#define PRLI_REQ_EXECUTED 0x1 /* acceptRspCode */
+#define PRLI_NO_RESOURCES 0x2
+#define PRLI_INIT_INCOMPLETE 0x3
+#define PRLI_NO_SUCH_PA 0x4
+#define PRLI_PREDEF_CONFIG 0x5
+#define PRLI_PARTIAL_SUCCESS 0x6
+#define PRLI_INVALID_PAGE_CNT 0x7
+ uint8_t word0Reserved3; /* FC Parm Word 0, bit 0:7 */
+
+ uint32_t origProcAssoc; /* FC Parm Word 1, bit 0:31 */
+
+ uint32_t respProcAssoc; /* FC Parm Word 2, bit 0:31 */
+
+ uint8_t word3Reserved1; /* FC Parm Word 3, bit 24:31 */
+ uint8_t word3Reserved2; /* FC Parm Word 3, bit 16:23 */
+
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint16_t Word3bit15Resved:1; /* FC Parm Word 3, bit 15 */
+ uint16_t Word3bit14Resved:1; /* FC Parm Word 3, bit 14 */
+ uint16_t Word3bit13Resved:1; /* FC Parm Word 3, bit 13 */
+ uint16_t Word3bit12Resved:1; /* FC Parm Word 3, bit 12 */
+ uint16_t Word3bit11Resved:1; /* FC Parm Word 3, bit 11 */
+ uint16_t Word3bit10Resved:1; /* FC Parm Word 3, bit 10 */
+ uint16_t TaskRetryIdReq:1; /* FC Parm Word 3, bit 9 */
+ uint16_t Retry:1; /* FC Parm Word 3, bit 8 */
+ uint16_t ConfmComplAllowed:1; /* FC Parm Word 3, bit 7 */
+ uint16_t dataOverLay:1; /* FC Parm Word 3, bit 6 */
+ uint16_t initiatorFunc:1; /* FC Parm Word 3, bit 5 */
+ uint16_t targetFunc:1; /* FC Parm Word 3, bit 4 */
+ uint16_t cmdDataMixEna:1; /* FC Parm Word 3, bit 3 */
+ uint16_t dataRspMixEna:1; /* FC Parm Word 3, bit 2 */
+ uint16_t readXferRdyDis:1; /* FC Parm Word 3, bit 1 */
+ uint16_t writeXferRdyDis:1; /* FC Parm Word 3, bit 0 */
+#else /* __LITTLE_ENDIAN_BITFIELD */
+ uint16_t Retry:1; /* FC Parm Word 3, bit 8 */
+ uint16_t TaskRetryIdReq:1; /* FC Parm Word 3, bit 9 */
+ uint16_t Word3bit10Resved:1; /* FC Parm Word 3, bit 10 */
+ uint16_t Word3bit11Resved:1; /* FC Parm Word 3, bit 11 */
+ uint16_t Word3bit12Resved:1; /* FC Parm Word 3, bit 12 */
+ uint16_t Word3bit13Resved:1; /* FC Parm Word 3, bit 13 */
+ uint16_t Word3bit14Resved:1; /* FC Parm Word 3, bit 14 */
+ uint16_t Word3bit15Resved:1; /* FC Parm Word 3, bit 15 */
+ uint16_t writeXferRdyDis:1; /* FC Parm Word 3, bit 0 */
+ uint16_t readXferRdyDis:1; /* FC Parm Word 3, bit 1 */
+ uint16_t dataRspMixEna:1; /* FC Parm Word 3, bit 2 */
+ uint16_t cmdDataMixEna:1; /* FC Parm Word 3, bit 3 */
+ uint16_t targetFunc:1; /* FC Parm Word 3, bit 4 */
+ uint16_t initiatorFunc:1; /* FC Parm Word 3, bit 5 */
+ uint16_t dataOverLay:1; /* FC Parm Word 3, bit 6 */
+ uint16_t ConfmComplAllowed:1; /* FC Parm Word 3, bit 7 */
+#endif
+} PRLI;
+
+/*
+ * FCP Logout (PRLO Request / ACC) Payload Definition
+ */
+
+typedef struct _PRLO { /* Structure is in Big Endian format */
+ uint8_t prloType; /* FC Parm Word 0, bit 24:31 */
+
+#define PRLO_FCP_TYPE 0x08
+ uint8_t word0Reserved1; /* FC Parm Word 0, bit 16:23 */
+
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint8_t origProcAssocV:1; /* FC Parm Word 0, bit 15 */
+ uint8_t respProcAssocV:1; /* FC Parm Word 0, bit 14 */
+ uint8_t word0Reserved2:2; /* FC Parm Word 0, bit 12:13 */
+ uint8_t acceptRspCode:4; /* FC Parm Word 0, bit 8:11, ACC ONLY */
+#else /* __LITTLE_ENDIAN_BITFIELD */
+ uint8_t acceptRspCode:4; /* FC Parm Word 0, bit 8:11, ACC ONLY */
+ uint8_t word0Reserved2:2; /* FC Parm Word 0, bit 12:13 */
+ uint8_t respProcAssocV:1; /* FC Parm Word 0, bit 14 */
+ uint8_t origProcAssocV:1; /* FC Parm Word 0, bit 15 */
+#endif
+
+#define PRLO_REQ_EXECUTED 0x1 /* acceptRspCode */
+#define PRLO_NO_SUCH_IMAGE 0x4
+#define PRLO_INVALID_PAGE_CNT 0x7
+
+ uint8_t word0Reserved3; /* FC Parm Word 0, bit 0:7 */
+
+ uint32_t origProcAssoc; /* FC Parm Word 1, bit 0:31 */
+
+ uint32_t respProcAssoc; /* FC Parm Word 2, bit 0:31 */
+
+ uint32_t word3Reserved1; /* FC Parm Word 3, bit 0:31 */
+} PRLO;
+
+typedef struct _ADISC { /* Structure is in Big Endian format */
+ uint32_t hardAL_PA;
+ struct lpfc_name portName;
+ struct lpfc_name nodeName;
+ uint32_t DID;
+} ADISC;
+
+typedef struct _FARP { /* Structure is in Big Endian format */
+ uint32_t Mflags:8;
+ uint32_t Odid:24;
+#define FARP_NO_ACTION 0 /* FARP information enclosed, no
+ action */
+#define FARP_MATCH_PORT 0x1 /* Match on Responder Port Name */
+#define FARP_MATCH_NODE 0x2 /* Match on Responder Node Name */
+#define FARP_MATCH_IP 0x4 /* Match on IP address, not supported */
+#define FARP_MATCH_IPV4 0x5 /* Match on IPV4 address, not
+ supported */
+#define FARP_MATCH_IPV6 0x6 /* Match on IPV6 address, not
+ supported */
+ uint32_t Rflags:8;
+ uint32_t Rdid:24;
+#define FARP_REQUEST_PLOGI 0x1 /* Request for PLOGI */
+#define FARP_REQUEST_FARPR 0x2 /* Request for FARP Response */
+ struct lpfc_name OportName;
+ struct lpfc_name OnodeName;
+ struct lpfc_name RportName;
+ struct lpfc_name RnodeName;
+ uint8_t Oipaddr[16];
+ uint8_t Ripaddr[16];
+} FARP;
+
+typedef struct _FAN { /* Structure is in Big Endian format */
+ uint32_t Fdid;
+ struct lpfc_name FportName;
+ struct lpfc_name FnodeName;
+} FAN;
+
+typedef struct _SCR { /* Structure is in Big Endian format */
+ uint8_t resvd1;
+ uint8_t resvd2;
+ uint8_t resvd3;
+ uint8_t Function;
+#define SCR_FUNC_FABRIC 0x01
+#define SCR_FUNC_NPORT 0x02
+#define SCR_FUNC_FULL 0x03
+#define SCR_CLEAR 0xff
+} SCR;
+
+typedef struct _RNID_TOP_DISC {
+ struct lpfc_name portName;
+ uint8_t resvd[8];
+ uint32_t unitType;
+#define RNID_HBA 0x7
+#define RNID_HOST 0xa
+#define RNID_DRIVER 0xd
+ uint32_t physPort;
+ uint32_t attachedNodes;
+ uint16_t ipVersion;
+#define RNID_IPV4 0x1
+#define RNID_IPV6 0x2
+ uint16_t UDPport;
+ uint8_t ipAddr[16];
+ uint16_t resvd1;
+ uint16_t flags;
+#define RNID_TD_SUPPORT 0x1
+#define RNID_LP_VALID 0x2
+} RNID_TOP_DISC;
+
+typedef struct _RNID { /* Structure is in Big Endian format */
+ uint8_t Format;
+#define RNID_TOPOLOGY_DISC 0xdf
+ uint8_t CommonLen;
+ uint8_t resvd1;
+ uint8_t SpecificLen;
+ struct lpfc_name portName;
+ struct lpfc_name nodeName;
+ union {
+ RNID_TOP_DISC topologyDisc; /* topology disc (0xdf) */
+ } un;
+} RNID;
+
+typedef struct _RPS { /* Structure is in Big Endian format */
+ union {
+ uint32_t portNum;
+ struct lpfc_name portName;
+ } un;
+} RPS;
+
+typedef struct _RPS_RSP { /* Structure is in Big Endian format */
+ uint16_t rsvd1;
+ uint16_t portStatus;
+ uint32_t linkFailureCnt;
+ uint32_t lossSyncCnt;
+ uint32_t lossSignalCnt;
+ uint32_t primSeqErrCnt;
+ uint32_t invalidXmitWord;
+ uint32_t crcCnt;
+} RPS_RSP;
+
+struct RLS { /* Structure is in Big Endian format */
+ uint32_t rls;
+#define rls_rsvd_SHIFT 24
+#define rls_rsvd_MASK 0x000000ff
+#define rls_rsvd_WORD rls
+#define rls_did_SHIFT 0
+#define rls_did_MASK 0x00ffffff
+#define rls_did_WORD rls
+};
+
+struct RLS_RSP { /* Structure is in Big Endian format */
+ uint32_t linkFailureCnt;
+ uint32_t lossSyncCnt;
+ uint32_t lossSignalCnt;
+ uint32_t primSeqErrCnt;
+ uint32_t invalidXmitWord;
+ uint32_t crcCnt;
+};
+
+struct RRQ { /* Structure is in Big Endian format */
+ uint32_t rrq;
+#define rrq_rsvd_SHIFT 24
+#define rrq_rsvd_MASK 0x000000ff
+#define rrq_rsvd_WORD rrq
+#define rrq_did_SHIFT 0
+#define rrq_did_MASK 0x00ffffff
+#define rrq_did_WORD rrq
+ uint32_t rrq_exchg;
+#define rrq_oxid_SHIFT 16
+#define rrq_oxid_MASK 0xffff
+#define rrq_oxid_WORD rrq_exchg
+#define rrq_rxid_SHIFT 0
+#define rrq_rxid_MASK 0xffff
+#define rrq_rxid_WORD rrq_exchg
+};
+
+#define LPFC_MAX_VFN_PER_PFN 255 /* Maximum VFs allowed per ARI */
+#define LPFC_DEF_VFN_PER_PFN 0 /* Default VFs due to platform limitation*/
+
+struct RTV_RSP { /* Structure is in Big Endian format */
+ uint32_t ratov;
+ uint32_t edtov;
+ uint32_t qtov;
+#define qtov_rsvd0_SHIFT 28
+#define qtov_rsvd0_MASK 0x0000000f
+#define qtov_rsvd0_WORD qtov /* reserved */
+#define qtov_edtovres_SHIFT 27
+#define qtov_edtovres_MASK 0x00000001
+#define qtov_edtovres_WORD qtov /* E_D_TOV Resolution */
+#define qtov__rsvd1_SHIFT 19
+#define qtov_rsvd1_MASK 0x0000003f
+#define qtov_rsvd1_WORD qtov /* reserved */
+#define qtov_rttov_SHIFT 18
+#define qtov_rttov_MASK 0x00000001
+#define qtov_rttov_WORD qtov /* R_T_TOV value */
+#define qtov_rsvd2_SHIFT 0
+#define qtov_rsvd2_MASK 0x0003ffff
+#define qtov_rsvd2_WORD qtov /* reserved */
+};
+
+
+typedef struct _RPL { /* Structure is in Big Endian format */
+ uint32_t maxsize;
+ uint32_t index;
+} RPL;
+
+typedef struct _PORT_NUM_BLK {
+ uint32_t portNum;
+ uint32_t portID;
+ struct lpfc_name portName;
+} PORT_NUM_BLK;
+
+typedef struct _RPL_RSP { /* Structure is in Big Endian format */
+ uint32_t listLen;
+ uint32_t index;
+ PORT_NUM_BLK port_num_blk;
+} RPL_RSP;
+
+/* This is used for RSCN command */
+typedef struct _D_ID { /* Structure is in Big Endian format */
+ union {
+ uint32_t word;
+ struct {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint8_t resv;
+ uint8_t domain;
+ uint8_t area;
+ uint8_t id;
+#else /* __LITTLE_ENDIAN_BITFIELD */
+ uint8_t id;
+ uint8_t area;
+ uint8_t domain;
+ uint8_t resv;
+#endif
+ } b;
+ } un;
+} D_ID;
+
+#define RSCN_ADDRESS_FORMAT_PORT 0x0
+#define RSCN_ADDRESS_FORMAT_AREA 0x1
+#define RSCN_ADDRESS_FORMAT_DOMAIN 0x2
+#define RSCN_ADDRESS_FORMAT_FABRIC 0x3
+#define RSCN_ADDRESS_FORMAT_MASK 0x3
+
+/*
+ * Structure to define all ELS Payload types
+ */
+
+typedef struct _ELS_PKT { /* Structure is in Big Endian format */
+ uint8_t elsCode; /* FC Word 0, bit 24:31 */
+ uint8_t elsByte1;
+ uint8_t elsByte2;
+ uint8_t elsByte3;
+ union {
+ struct ls_rjt lsRjt; /* Payload for LS_RJT ELS response */
+ struct serv_parm logi; /* Payload for PLOGI/FLOGI/PDISC/ACC */
+ LOGO logo; /* Payload for PLOGO/FLOGO/ACC */
+ PRLI prli; /* Payload for PRLI/ACC */
+ PRLO prlo; /* Payload for PRLO/ACC */
+ ADISC adisc; /* Payload for ADISC/ACC */
+ FARP farp; /* Payload for FARP/ACC */
+ FAN fan; /* Payload for FAN */
+ SCR scr; /* Payload for SCR/ACC */
+ RNID rnid; /* Payload for RNID */
+ uint8_t pad[128 - 4]; /* Pad out to payload of 128 bytes */
+ } un;
+} ELS_PKT;
+
+/******** FDMI ********/
+
+/* lpfc_sli_ct_request defines the CT_IU preamble for FDMI commands */
+#define SLI_CT_FDMI_Subtypes 0x10 /* Management Service Subtype */
+
+/*
+ * Registered Port List Format
+ */
+struct lpfc_fdmi_reg_port_list {
+ uint32_t EntryCnt;
+ uint32_t pe; /* Variable-length array */
+};
+
+
+/* Definitions for HBA / Port attribute entries */
+
+struct lpfc_fdmi_attr_def { /* Defined in TLV format */
+ /* Structure is in Big Endian format */
+ uint32_t AttrType:16;
+ uint32_t AttrLen:16;
+ uint32_t AttrValue; /* Marks start of Value (ATTRIBUTE_ENTRY) */
+};
+
+
+/* Attribute Entry */
+struct lpfc_fdmi_attr_entry {
+ union {
+ uint32_t VendorSpecific;
+ uint32_t SupportClass;
+ uint32_t SupportSpeed;
+ uint32_t PortSpeed;
+ uint32_t MaxFrameSize;
+ uint32_t MaxCTPayloadLen;
+ uint32_t PortState;
+ uint32_t PortId;
+ struct lpfc_name NodeName;
+ struct lpfc_name PortName;
+ struct lpfc_name FabricName;
+ uint8_t FC4Types[32];
+ uint8_t Manufacturer[64];
+ uint8_t SerialNumber[64];
+ uint8_t Model[256];
+ uint8_t ModelDescription[256];
+ uint8_t HardwareVersion[256];
+ uint8_t DriverVersion[256];
+ uint8_t OptionROMVersion[256];
+ uint8_t FirmwareVersion[256];
+ uint8_t OsHostName[256];
+ uint8_t NodeSymName[256];
+ uint8_t OsDeviceName[256];
+ uint8_t OsNameVersion[256];
+ uint8_t HostName[256];
+ } un;
+};
+
+#define LPFC_FDMI_MAX_AE_SIZE sizeof(struct lpfc_fdmi_attr_entry)
+
+/*
+ * HBA Attribute Block
+ */
+struct lpfc_fdmi_attr_block {
+ uint32_t EntryCnt; /* Number of HBA attribute entries */
+ struct lpfc_fdmi_attr_entry Entry; /* Variable-length array */
+};
+
+/*
+ * Port Entry
+ */
+struct lpfc_fdmi_port_entry {
+ struct lpfc_name PortName;
+};
+
+/*
+ * HBA Identifier
+ */
+struct lpfc_fdmi_hba_ident {
+ struct lpfc_name PortName;
+};
+
+/*
+ * Register HBA(RHBA)
+ */
+struct lpfc_fdmi_reg_hba {
+ struct lpfc_fdmi_hba_ident hi;
+ struct lpfc_fdmi_reg_port_list rpl; /* variable-length array */
+/* struct lpfc_fdmi_attr_block ab; */
+};
+
+/*
+ * Register HBA Attributes (RHAT)
+ */
+struct lpfc_fdmi_reg_hbaattr {
+ struct lpfc_name HBA_PortName;
+ struct lpfc_fdmi_attr_block ab;
+};
+
+/*
+ * Register Port Attributes (RPA)
+ */
+struct lpfc_fdmi_reg_portattr {
+ struct lpfc_name PortName;
+ struct lpfc_fdmi_attr_block ab;
+};
+
+/*
+ * HBA MAnagement Operations Command Codes
+ */
+#define SLI_MGMT_GRHL 0x100 /* Get registered HBA list */
+#define SLI_MGMT_GHAT 0x101 /* Get HBA attributes */
+#define SLI_MGMT_GRPL 0x102 /* Get registered Port list */
+#define SLI_MGMT_GPAT 0x110 /* Get Port attributes */
+#define SLI_MGMT_GPAS 0x120 /* Get Port Statistics */
+#define SLI_MGMT_RHBA 0x200 /* Register HBA */
+#define SLI_MGMT_RHAT 0x201 /* Register HBA attributes */
+#define SLI_MGMT_RPRT 0x210 /* Register Port */
+#define SLI_MGMT_RPA 0x211 /* Register Port attributes */
+#define SLI_MGMT_DHBA 0x300 /* De-register HBA */
+#define SLI_MGMT_DHAT 0x301 /* De-register HBA attributes */
+#define SLI_MGMT_DPRT 0x310 /* De-register Port */
+#define SLI_MGMT_DPA 0x311 /* De-register Port attributes */
+
+/*
+ * HBA Attribute Types
+ */
+#define RHBA_NODENAME 0x1 /* 8 byte WWNN */
+#define RHBA_MANUFACTURER 0x2 /* 4 to 64 byte ASCII string */
+#define RHBA_SERIAL_NUMBER 0x3 /* 4 to 64 byte ASCII string */
+#define RHBA_MODEL 0x4 /* 4 to 256 byte ASCII string */
+#define RHBA_MODEL_DESCRIPTION 0x5 /* 4 to 256 byte ASCII string */
+#define RHBA_HARDWARE_VERSION 0x6 /* 4 to 256 byte ASCII string */
+#define RHBA_DRIVER_VERSION 0x7 /* 4 to 256 byte ASCII string */
+#define RHBA_OPTION_ROM_VERSION 0x8 /* 4 to 256 byte ASCII string */
+#define RHBA_FIRMWARE_VERSION 0x9 /* 4 to 256 byte ASCII string */
+#define RHBA_OS_NAME_VERSION 0xa /* 4 to 256 byte ASCII string */
+#define RHBA_MAX_CT_PAYLOAD_LEN 0xb /* 32-bit unsigned int */
+#define RHBA_SYM_NODENAME 0xc /* 4 to 256 byte ASCII string */
+
+/*
+ * Port Attrubute Types
+ */
+#define RPRT_SUPPORTED_FC4_TYPES 0x1 /* 32 byte binary array */
+#define RPRT_SUPPORTED_SPEED 0x2 /* 32-bit unsigned int */
+#define RPRT_PORT_SPEED 0x3 /* 32-bit unsigned int */
+#define RPRT_MAX_FRAME_SIZE 0x4 /* 32-bit unsigned int */
+#define RPRT_OS_DEVICE_NAME 0x5 /* 4 to 256 byte ASCII string */
+#define RPRT_HOST_NAME 0x6 /* 4 to 256 byte ASCII string */
+#define RPRT_NODENAME 0x7 /* 8 byte WWNN */
+#define RPRT_PORTNAME 0x8 /* 8 byte WWNN */
+#define RPRT_SYM_PORTNAME 0x9 /* 4 to 256 byte ASCII string */
+#define RPRT_PORT_TYPE 0xa /* 32-bit unsigned int */
+#define RPRT_SUPPORTED_CLASS 0xb /* 32-bit unsigned int */
+#define RPRT_FABRICNAME 0xc /* 8 byte Fabric WWNN */
+#define RPRT_ACTIVE_FC4_TYPES 0xd /* 32 byte binary array */
+#define RPRT_PORT_STATE 0x101 /* 32-bit unsigned int */
+#define RPRT_DISC_PORT 0x102 /* 32-bit unsigned int */
+#define RPRT_PORT_ID 0x103 /* 32-bit unsigned int */
+
+/*
+ * Begin HBA configuration parameters.
+ * The PCI configuration register BAR assignments are:
+ * BAR0, offset 0x10 - SLIM base memory address
+ * BAR1, offset 0x14 - SLIM base memory high address
+ * BAR2, offset 0x18 - REGISTER base memory address
+ * BAR3, offset 0x1c - REGISTER base memory high address
+ * BAR4, offset 0x20 - BIU I/O registers
+ * BAR5, offset 0x24 - REGISTER base io high address
+ */
+
+/* Number of rings currently used and available. */
+#define MAX_SLI3_CONFIGURED_RINGS 3
+#define MAX_SLI3_RINGS 4
+
+/* IOCB / Mailbox is owned by FireFly */
+#define OWN_CHIP 1
+
+/* IOCB / Mailbox is owned by Host */
+#define OWN_HOST 0
+
+/* Number of 4-byte words in an IOCB. */
+#define IOCB_WORD_SZ 8
+
+/* network headers for Dfctl field */
+#define FC_NET_HDR 0x20
+
+/* Start FireFly Register definitions */
+#define PCI_VENDOR_ID_EMULEX 0x10df
+#define PCI_DEVICE_ID_FIREFLY 0x1ae5
+#define PCI_DEVICE_ID_PROTEUS_VF 0xe100
+#define PCI_DEVICE_ID_BALIUS 0xe131
+#define PCI_DEVICE_ID_PROTEUS_PF 0xe180
+#define PCI_DEVICE_ID_LANCER_FC 0xe200
+#define PCI_DEVICE_ID_LANCER_FC_VF 0xe208
+#define PCI_DEVICE_ID_LANCER_FCOE 0xe260
+#define PCI_DEVICE_ID_LANCER_FCOE_VF 0xe268
+#define PCI_DEVICE_ID_SAT_SMB 0xf011
+#define PCI_DEVICE_ID_SAT_MID 0xf015
+#define PCI_DEVICE_ID_RFLY 0xf095
+#define PCI_DEVICE_ID_PFLY 0xf098
+#define PCI_DEVICE_ID_LP101 0xf0a1
+#define PCI_DEVICE_ID_TFLY 0xf0a5
+#define PCI_DEVICE_ID_BSMB 0xf0d1
+#define PCI_DEVICE_ID_BMID 0xf0d5
+#define PCI_DEVICE_ID_ZSMB 0xf0e1
+#define PCI_DEVICE_ID_ZMID 0xf0e5
+#define PCI_DEVICE_ID_NEPTUNE 0xf0f5
+#define PCI_DEVICE_ID_NEPTUNE_SCSP 0xf0f6
+#define PCI_DEVICE_ID_NEPTUNE_DCSP 0xf0f7
+#define PCI_DEVICE_ID_SAT 0xf100
+#define PCI_DEVICE_ID_SAT_SCSP 0xf111
+#define PCI_DEVICE_ID_SAT_DCSP 0xf112
+#define PCI_DEVICE_ID_FALCON 0xf180
+#define PCI_DEVICE_ID_SUPERFLY 0xf700
+#define PCI_DEVICE_ID_DRAGONFLY 0xf800
+#define PCI_DEVICE_ID_CENTAUR 0xf900
+#define PCI_DEVICE_ID_PEGASUS 0xf980
+#define PCI_DEVICE_ID_THOR 0xfa00
+#define PCI_DEVICE_ID_VIPER 0xfb00
+#define PCI_DEVICE_ID_LP10000S 0xfc00
+#define PCI_DEVICE_ID_LP11000S 0xfc10
+#define PCI_DEVICE_ID_LPE11000S 0xfc20
+#define PCI_DEVICE_ID_SAT_S 0xfc40
+#define PCI_DEVICE_ID_PROTEUS_S 0xfc50
+#define PCI_DEVICE_ID_HELIOS 0xfd00
+#define PCI_DEVICE_ID_HELIOS_SCSP 0xfd11
+#define PCI_DEVICE_ID_HELIOS_DCSP 0xfd12
+#define PCI_DEVICE_ID_ZEPHYR 0xfe00
+#define PCI_DEVICE_ID_HORNET 0xfe05
+#define PCI_DEVICE_ID_ZEPHYR_SCSP 0xfe11
+#define PCI_DEVICE_ID_ZEPHYR_DCSP 0xfe12
+#define PCI_VENDOR_ID_SERVERENGINE 0x19a2
+#define PCI_DEVICE_ID_TIGERSHARK 0x0704
+#define PCI_DEVICE_ID_TOMCAT 0x0714
+#define PCI_DEVICE_ID_SKYHAWK 0x0724
+#define PCI_DEVICE_ID_SKYHAWK_VF 0x072c
+
+#define JEDEC_ID_ADDRESS 0x0080001c
+#define FIREFLY_JEDEC_ID 0x1ACC
+#define SUPERFLY_JEDEC_ID 0x0020
+#define DRAGONFLY_JEDEC_ID 0x0021
+#define DRAGONFLY_V2_JEDEC_ID 0x0025
+#define CENTAUR_2G_JEDEC_ID 0x0026
+#define CENTAUR_1G_JEDEC_ID 0x0028
+#define PEGASUS_ORION_JEDEC_ID 0x0036
+#define PEGASUS_JEDEC_ID 0x0038
+#define THOR_JEDEC_ID 0x0012
+#define HELIOS_JEDEC_ID 0x0364
+#define ZEPHYR_JEDEC_ID 0x0577
+#define VIPER_JEDEC_ID 0x4838
+#define SATURN_JEDEC_ID 0x1004
+#define HORNET_JDEC_ID 0x2057706D
+
+#define JEDEC_ID_MASK 0x0FFFF000
+#define JEDEC_ID_SHIFT 12
+#define FC_JEDEC_ID(id) ((id & JEDEC_ID_MASK) >> JEDEC_ID_SHIFT)
+
+typedef struct { /* FireFly BIU registers */
+ uint32_t hostAtt; /* See definitions for Host Attention
+ register */
+ uint32_t chipAtt; /* See definitions for Chip Attention
+ register */
+ uint32_t hostStatus; /* See definitions for Host Status register */
+ uint32_t hostControl; /* See definitions for Host Control register */
+ uint32_t buiConfig; /* See definitions for BIU configuration
+ register */
+} FF_REGS;
+
+/* IO Register size in bytes */
+#define FF_REG_AREA_SIZE 256
+
+/* Host Attention Register */
+
+#define HA_REG_OFFSET 0 /* Byte offset from register base address */
+
+#define HA_R0RE_REQ 0x00000001 /* Bit 0 */
+#define HA_R0CE_RSP 0x00000002 /* Bit 1 */
+#define HA_R0ATT 0x00000008 /* Bit 3 */
+#define HA_R1RE_REQ 0x00000010 /* Bit 4 */
+#define HA_R1CE_RSP 0x00000020 /* Bit 5 */
+#define HA_R1ATT 0x00000080 /* Bit 7 */
+#define HA_R2RE_REQ 0x00000100 /* Bit 8 */
+#define HA_R2CE_RSP 0x00000200 /* Bit 9 */
+#define HA_R2ATT 0x00000800 /* Bit 11 */
+#define HA_R3RE_REQ 0x00001000 /* Bit 12 */
+#define HA_R3CE_RSP 0x00002000 /* Bit 13 */
+#define HA_R3ATT 0x00008000 /* Bit 15 */
+#define HA_LATT 0x20000000 /* Bit 29 */
+#define HA_MBATT 0x40000000 /* Bit 30 */
+#define HA_ERATT 0x80000000 /* Bit 31 */
+
+#define HA_RXRE_REQ 0x00000001 /* Bit 0 */
+#define HA_RXCE_RSP 0x00000002 /* Bit 1 */
+#define HA_RXATT 0x00000008 /* Bit 3 */
+#define HA_RXMASK 0x0000000f
+
+#define HA_R0_CLR_MSK (HA_R0RE_REQ | HA_R0CE_RSP | HA_R0ATT)
+#define HA_R1_CLR_MSK (HA_R1RE_REQ | HA_R1CE_RSP | HA_R1ATT)
+#define HA_R2_CLR_MSK (HA_R2RE_REQ | HA_R2CE_RSP | HA_R2ATT)
+#define HA_R3_CLR_MSK (HA_R3RE_REQ | HA_R3CE_RSP | HA_R3ATT)
+
+#define HA_R0_POS 3
+#define HA_R1_POS 7
+#define HA_R2_POS 11
+#define HA_R3_POS 15
+#define HA_LE_POS 29
+#define HA_MB_POS 30
+#define HA_ER_POS 31
+/* Chip Attention Register */
+
+#define CA_REG_OFFSET 4 /* Byte offset from register base address */
+
+#define CA_R0CE_REQ 0x00000001 /* Bit 0 */
+#define CA_R0RE_RSP 0x00000002 /* Bit 1 */
+#define CA_R0ATT 0x00000008 /* Bit 3 */
+#define CA_R1CE_REQ 0x00000010 /* Bit 4 */
+#define CA_R1RE_RSP 0x00000020 /* Bit 5 */
+#define CA_R1ATT 0x00000080 /* Bit 7 */
+#define CA_R2CE_REQ 0x00000100 /* Bit 8 */
+#define CA_R2RE_RSP 0x00000200 /* Bit 9 */
+#define CA_R2ATT 0x00000800 /* Bit 11 */
+#define CA_R3CE_REQ 0x00001000 /* Bit 12 */
+#define CA_R3RE_RSP 0x00002000 /* Bit 13 */
+#define CA_R3ATT 0x00008000 /* Bit 15 */
+#define CA_MBATT 0x40000000 /* Bit 30 */
+
+/* Host Status Register */
+
+#define HS_REG_OFFSET 8 /* Byte offset from register base address */
+
+#define HS_MBRDY 0x00400000 /* Bit 22 */
+#define HS_FFRDY 0x00800000 /* Bit 23 */
+#define HS_FFER8 0x01000000 /* Bit 24 */
+#define HS_FFER7 0x02000000 /* Bit 25 */
+#define HS_FFER6 0x04000000 /* Bit 26 */
+#define HS_FFER5 0x08000000 /* Bit 27 */
+#define HS_FFER4 0x10000000 /* Bit 28 */
+#define HS_FFER3 0x20000000 /* Bit 29 */
+#define HS_FFER2 0x40000000 /* Bit 30 */
+#define HS_FFER1 0x80000000 /* Bit 31 */
+#define HS_CRIT_TEMP 0x00000100 /* Bit 8 */
+#define HS_FFERM 0xFF000100 /* Mask for error bits 31:24 and 8 */
+#define UNPLUG_ERR 0x00000001 /* Indicate pci hot unplug */
+/* Host Control Register */
+
+#define HC_REG_OFFSET 12 /* Byte offset from register base address */
+
+#define HC_MBINT_ENA 0x00000001 /* Bit 0 */
+#define HC_R0INT_ENA 0x00000002 /* Bit 1 */
+#define HC_R1INT_ENA 0x00000004 /* Bit 2 */
+#define HC_R2INT_ENA 0x00000008 /* Bit 3 */
+#define HC_R3INT_ENA 0x00000010 /* Bit 4 */
+#define HC_INITHBI 0x02000000 /* Bit 25 */
+#define HC_INITMB 0x04000000 /* Bit 26 */
+#define HC_INITFF 0x08000000 /* Bit 27 */
+#define HC_LAINT_ENA 0x20000000 /* Bit 29 */
+#define HC_ERINT_ENA 0x80000000 /* Bit 31 */
+
+/* Message Signaled Interrupt eXtension (MSI-X) message identifiers */
+#define MSIX_DFLT_ID 0
+#define MSIX_RNG0_ID 0
+#define MSIX_RNG1_ID 1
+#define MSIX_RNG2_ID 2
+#define MSIX_RNG3_ID 3
+
+#define MSIX_LINK_ID 4
+#define MSIX_MBOX_ID 5
+
+#define MSIX_SPARE0_ID 6
+#define MSIX_SPARE1_ID 7
+
+/* Mailbox Commands */
+#define MBX_SHUTDOWN 0x00 /* terminate testing */
+#define MBX_LOAD_SM 0x01
+#define MBX_READ_NV 0x02
+#define MBX_WRITE_NV 0x03
+#define MBX_RUN_BIU_DIAG 0x04
+#define MBX_INIT_LINK 0x05
+#define MBX_DOWN_LINK 0x06
+#define MBX_CONFIG_LINK 0x07
+#define MBX_CONFIG_RING 0x09
+#define MBX_RESET_RING 0x0A
+#define MBX_READ_CONFIG 0x0B
+#define MBX_READ_RCONFIG 0x0C
+#define MBX_READ_SPARM 0x0D
+#define MBX_READ_STATUS 0x0E
+#define MBX_READ_RPI 0x0F
+#define MBX_READ_XRI 0x10
+#define MBX_READ_REV 0x11
+#define MBX_READ_LNK_STAT 0x12
+#define MBX_REG_LOGIN 0x13
+#define MBX_UNREG_LOGIN 0x14
+#define MBX_CLEAR_LA 0x16
+#define MBX_DUMP_MEMORY 0x17
+#define MBX_DUMP_CONTEXT 0x18
+#define MBX_RUN_DIAGS 0x19
+#define MBX_RESTART 0x1A
+#define MBX_UPDATE_CFG 0x1B
+#define MBX_DOWN_LOAD 0x1C
+#define MBX_DEL_LD_ENTRY 0x1D
+#define MBX_RUN_PROGRAM 0x1E
+#define MBX_SET_MASK 0x20
+#define MBX_SET_VARIABLE 0x21
+#define MBX_UNREG_D_ID 0x23
+#define MBX_KILL_BOARD 0x24
+#define MBX_CONFIG_FARP 0x25
+#define MBX_BEACON 0x2A
+#define MBX_CONFIG_MSI 0x30
+#define MBX_HEARTBEAT 0x31
+#define MBX_WRITE_VPARMS 0x32
+#define MBX_ASYNCEVT_ENABLE 0x33
+#define MBX_READ_EVENT_LOG_STATUS 0x37
+#define MBX_READ_EVENT_LOG 0x38
+#define MBX_WRITE_EVENT_LOG 0x39
+
+#define MBX_PORT_CAPABILITIES 0x3B
+#define MBX_PORT_IOV_CONTROL 0x3C
+
+#define MBX_CONFIG_HBQ 0x7C
+#define MBX_LOAD_AREA 0x81
+#define MBX_RUN_BIU_DIAG64 0x84
+#define MBX_CONFIG_PORT 0x88
+#define MBX_READ_SPARM64 0x8D
+#define MBX_READ_RPI64 0x8F
+#define MBX_REG_LOGIN64 0x93
+#define MBX_READ_TOPOLOGY 0x95
+#define MBX_REG_VPI 0x96
+#define MBX_UNREG_VPI 0x97
+
+#define MBX_WRITE_WWN 0x98
+#define MBX_SET_DEBUG 0x99
+#define MBX_LOAD_EXP_ROM 0x9C
+#define MBX_SLI4_CONFIG 0x9B
+#define MBX_SLI4_REQ_FTRS 0x9D
+#define MBX_MAX_CMDS 0x9E
+#define MBX_RESUME_RPI 0x9E
+#define MBX_SLI2_CMD_MASK 0x80
+#define MBX_REG_VFI 0x9F
+#define MBX_REG_FCFI 0xA0
+#define MBX_UNREG_VFI 0xA1
+#define MBX_UNREG_FCFI 0xA2
+#define MBX_INIT_VFI 0xA3
+#define MBX_INIT_VPI 0xA4
+#define MBX_ACCESS_VDATA 0xA5
+
+#define MBX_AUTH_PORT 0xF8
+#define MBX_SECURITY_MGMT 0xF9
+
+/* IOCB Commands */
+
+#define CMD_RCV_SEQUENCE_CX 0x01
+#define CMD_XMIT_SEQUENCE_CR 0x02
+#define CMD_XMIT_SEQUENCE_CX 0x03
+#define CMD_XMIT_BCAST_CN 0x04
+#define CMD_XMIT_BCAST_CX 0x05
+#define CMD_QUE_RING_BUF_CN 0x06
+#define CMD_QUE_XRI_BUF_CX 0x07
+#define CMD_IOCB_CONTINUE_CN 0x08
+#define CMD_RET_XRI_BUF_CX 0x09
+#define CMD_ELS_REQUEST_CR 0x0A
+#define CMD_ELS_REQUEST_CX 0x0B
+#define CMD_RCV_ELS_REQ_CX 0x0D
+#define CMD_ABORT_XRI_CN 0x0E
+#define CMD_ABORT_XRI_CX 0x0F
+#define CMD_CLOSE_XRI_CN 0x10
+#define CMD_CLOSE_XRI_CX 0x11
+#define CMD_CREATE_XRI_CR 0x12
+#define CMD_CREATE_XRI_CX 0x13
+#define CMD_GET_RPI_CN 0x14
+#define CMD_XMIT_ELS_RSP_CX 0x15
+#define CMD_GET_RPI_CR 0x16
+#define CMD_XRI_ABORTED_CX 0x17
+#define CMD_FCP_IWRITE_CR 0x18
+#define CMD_FCP_IWRITE_CX 0x19
+#define CMD_FCP_IREAD_CR 0x1A
+#define CMD_FCP_IREAD_CX 0x1B
+#define CMD_FCP_ICMND_CR 0x1C
+#define CMD_FCP_ICMND_CX 0x1D
+#define CMD_FCP_TSEND_CX 0x1F
+#define CMD_FCP_TRECEIVE_CX 0x21
+#define CMD_FCP_TRSP_CX 0x23
+#define CMD_FCP_AUTO_TRSP_CX 0x29
+
+#define CMD_ADAPTER_MSG 0x20
+#define CMD_ADAPTER_DUMP 0x22
+
+/* SLI_2 IOCB Command Set */
+
+#define CMD_ASYNC_STATUS 0x7C
+#define CMD_RCV_SEQUENCE64_CX 0x81
+#define CMD_XMIT_SEQUENCE64_CR 0x82
+#define CMD_XMIT_SEQUENCE64_CX 0x83
+#define CMD_XMIT_BCAST64_CN 0x84
+#define CMD_XMIT_BCAST64_CX 0x85
+#define CMD_QUE_RING_BUF64_CN 0x86
+#define CMD_QUE_XRI_BUF64_CX 0x87
+#define CMD_IOCB_CONTINUE64_CN 0x88
+#define CMD_RET_XRI_BUF64_CX 0x89
+#define CMD_ELS_REQUEST64_CR 0x8A
+#define CMD_ELS_REQUEST64_CX 0x8B
+#define CMD_ABORT_MXRI64_CN 0x8C
+#define CMD_RCV_ELS_REQ64_CX 0x8D
+#define CMD_XMIT_ELS_RSP64_CX 0x95
+#define CMD_XMIT_BLS_RSP64_CX 0x97
+#define CMD_FCP_IWRITE64_CR 0x98
+#define CMD_FCP_IWRITE64_CX 0x99
+#define CMD_FCP_IREAD64_CR 0x9A
+#define CMD_FCP_IREAD64_CX 0x9B
+#define CMD_FCP_ICMND64_CR 0x9C
+#define CMD_FCP_ICMND64_CX 0x9D
+#define CMD_FCP_TSEND64_CX 0x9F
+#define CMD_FCP_TRECEIVE64_CX 0xA1
+#define CMD_FCP_TRSP64_CX 0xA3
+
+#define CMD_QUE_XRI64_CX 0xB3
+#define CMD_IOCB_RCV_SEQ64_CX 0xB5
+#define CMD_IOCB_RCV_ELS64_CX 0xB7
+#define CMD_IOCB_RET_XRI64_CX 0xB9
+#define CMD_IOCB_RCV_CONT64_CX 0xBB
+
+#define CMD_GEN_REQUEST64_CR 0xC2
+#define CMD_GEN_REQUEST64_CX 0xC3
+
+/* Unhandled SLI-3 Commands */
+#define CMD_IOCB_XMIT_MSEQ64_CR 0xB0
+#define CMD_IOCB_XMIT_MSEQ64_CX 0xB1
+#define CMD_IOCB_RCV_SEQ_LIST64_CX 0xC1
+#define CMD_IOCB_RCV_ELS_LIST64_CX 0xCD
+#define CMD_IOCB_CLOSE_EXTENDED_CN 0xB6
+#define CMD_IOCB_ABORT_EXTENDED_CN 0xBA
+#define CMD_IOCB_RET_HBQE64_CN 0xCA
+#define CMD_IOCB_FCP_IBIDIR64_CR 0xAC
+#define CMD_IOCB_FCP_IBIDIR64_CX 0xAD
+#define CMD_IOCB_FCP_ITASKMGT64_CX 0xAF
+#define CMD_IOCB_LOGENTRY_CN 0x94
+#define CMD_IOCB_LOGENTRY_ASYNC_CN 0x96
+
+/* Data Security SLI Commands */
+#define DSSCMD_IWRITE64_CR 0xF8
+#define DSSCMD_IWRITE64_CX 0xF9
+#define DSSCMD_IREAD64_CR 0xFA
+#define DSSCMD_IREAD64_CX 0xFB
+
+#define CMD_MAX_IOCB_CMD 0xFB
+#define CMD_IOCB_MASK 0xff
+
+#define MAX_MSG_DATA 28 /* max msg data in CMD_ADAPTER_MSG
+ iocb */
+#define LPFC_MAX_ADPTMSG 32 /* max msg data */
+/*
+ * Define Status
+ */
+#define MBX_SUCCESS 0
+#define MBXERR_NUM_RINGS 1
+#define MBXERR_NUM_IOCBS 2
+#define MBXERR_IOCBS_EXCEEDED 3
+#define MBXERR_BAD_RING_NUMBER 4
+#define MBXERR_MASK_ENTRIES_RANGE 5
+#define MBXERR_MASKS_EXCEEDED 6
+#define MBXERR_BAD_PROFILE 7
+#define MBXERR_BAD_DEF_CLASS 8
+#define MBXERR_BAD_MAX_RESPONDER 9
+#define MBXERR_BAD_MAX_ORIGINATOR 10
+#define MBXERR_RPI_REGISTERED 11
+#define MBXERR_RPI_FULL 12
+#define MBXERR_NO_RESOURCES 13
+#define MBXERR_BAD_RCV_LENGTH 14
+#define MBXERR_DMA_ERROR 15
+#define MBXERR_ERROR 16
+#define MBXERR_LINK_DOWN 0x33
+#define MBXERR_SEC_NO_PERMISSION 0xF02
+#define MBX_NOT_FINISHED 255
+
+#define MBX_BUSY 0xffffff /* Attempted cmd to busy Mailbox */
+#define MBX_TIMEOUT 0xfffffe /* time-out expired waiting for */
+
+#define TEMPERATURE_OFFSET 0xB0 /* Slim offset for critical temperature event */
+
+/*
+ * Begin Structure Definitions for Mailbox Commands
+ */
+
+typedef struct {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint8_t tval;
+ uint8_t tmask;
+ uint8_t rval;
+ uint8_t rmask;
+#else /* __LITTLE_ENDIAN_BITFIELD */
+ uint8_t rmask;
+ uint8_t rval;
+ uint8_t tmask;
+ uint8_t tval;
+#endif
+} RR_REG;
+
+struct ulp_bde {
+ uint32_t bdeAddress;
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint32_t bdeReserved:4;
+ uint32_t bdeAddrHigh:4;
+ uint32_t bdeSize:24;
+#else /* __LITTLE_ENDIAN_BITFIELD */
+ uint32_t bdeSize:24;
+ uint32_t bdeAddrHigh:4;
+ uint32_t bdeReserved:4;
+#endif
+};
+
+typedef struct ULP_BDL { /* SLI-2 */
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint32_t bdeFlags:8; /* BDL Flags */
+ uint32_t bdeSize:24; /* Size of BDL array in host memory (bytes) */
+#else /* __LITTLE_ENDIAN_BITFIELD */
+ uint32_t bdeSize:24; /* Size of BDL array in host memory (bytes) */
+ uint32_t bdeFlags:8; /* BDL Flags */
+#endif
+
+ uint32_t addrLow; /* Address 0:31 */
+ uint32_t addrHigh; /* Address 32:63 */
+ uint32_t ulpIoTag32; /* Can be used for 32 bit I/O Tag */
+} ULP_BDL;
+
+/*
+ * BlockGuard Definitions
+ */
+
+enum lpfc_protgrp_type {
+ LPFC_PG_TYPE_INVALID = 0, /* used to indicate errors */
+ LPFC_PG_TYPE_NO_DIF, /* no DIF data pointed to by prot grp */
+ LPFC_PG_TYPE_EMBD_DIF, /* DIF is embedded (inline) with data */
+ LPFC_PG_TYPE_DIF_BUF /* DIF has its own scatter/gather list */
+};
+
+/* PDE Descriptors */
+#define LPFC_PDE5_DESCRIPTOR 0x85
+#define LPFC_PDE6_DESCRIPTOR 0x86
+#define LPFC_PDE7_DESCRIPTOR 0x87
+
+/* BlockGuard Opcodes */
+#define BG_OP_IN_NODIF_OUT_CRC 0x0
+#define BG_OP_IN_CRC_OUT_NODIF 0x1
+#define BG_OP_IN_NODIF_OUT_CSUM 0x2
+#define BG_OP_IN_CSUM_OUT_NODIF 0x3
+#define BG_OP_IN_CRC_OUT_CRC 0x4
+#define BG_OP_IN_CSUM_OUT_CSUM 0x5
+#define BG_OP_IN_CRC_OUT_CSUM 0x6
+#define BG_OP_IN_CSUM_OUT_CRC 0x7
+#define BG_OP_RAW_MODE 0x8
+
+struct lpfc_pde5 {
+ uint32_t word0;
+#define pde5_type_SHIFT 24
+#define pde5_type_MASK 0x000000ff
+#define pde5_type_WORD word0
+#define pde5_rsvd0_SHIFT 0
+#define pde5_rsvd0_MASK 0x00ffffff
+#define pde5_rsvd0_WORD word0
+ uint32_t reftag; /* Reference Tag Value */
+ uint32_t reftagtr; /* Reference Tag Translation Value */
+};
+
+struct lpfc_pde6 {
+ uint32_t word0;
+#define pde6_type_SHIFT 24
+#define pde6_type_MASK 0x000000ff
+#define pde6_type_WORD word0
+#define pde6_rsvd0_SHIFT 0
+#define pde6_rsvd0_MASK 0x00ffffff
+#define pde6_rsvd0_WORD word0
+ uint32_t word1;
+#define pde6_rsvd1_SHIFT 26
+#define pde6_rsvd1_MASK 0x0000003f
+#define pde6_rsvd1_WORD word1
+#define pde6_na_SHIFT 25
+#define pde6_na_MASK 0x00000001
+#define pde6_na_WORD word1
+#define pde6_rsvd2_SHIFT 16
+#define pde6_rsvd2_MASK 0x000001FF
+#define pde6_rsvd2_WORD word1
+#define pde6_apptagtr_SHIFT 0
+#define pde6_apptagtr_MASK 0x0000ffff
+#define pde6_apptagtr_WORD word1
+ uint32_t word2;
+#define pde6_optx_SHIFT 28
+#define pde6_optx_MASK 0x0000000f
+#define pde6_optx_WORD word2
+#define pde6_oprx_SHIFT 24
+#define pde6_oprx_MASK 0x0000000f
+#define pde6_oprx_WORD word2
+#define pde6_nr_SHIFT 23
+#define pde6_nr_MASK 0x00000001
+#define pde6_nr_WORD word2
+#define pde6_ce_SHIFT 22
+#define pde6_ce_MASK 0x00000001
+#define pde6_ce_WORD word2
+#define pde6_re_SHIFT 21
+#define pde6_re_MASK 0x00000001
+#define pde6_re_WORD word2
+#define pde6_ae_SHIFT 20
+#define pde6_ae_MASK 0x00000001
+#define pde6_ae_WORD word2
+#define pde6_ai_SHIFT 19
+#define pde6_ai_MASK 0x00000001
+#define pde6_ai_WORD word2
+#define pde6_bs_SHIFT 16
+#define pde6_bs_MASK 0x00000007
+#define pde6_bs_WORD word2
+#define pde6_apptagval_SHIFT 0
+#define pde6_apptagval_MASK 0x0000ffff
+#define pde6_apptagval_WORD word2
+};
+
+struct lpfc_pde7 {
+ uint32_t word0;
+#define pde7_type_SHIFT 24
+#define pde7_type_MASK 0x000000ff
+#define pde7_type_WORD word0
+#define pde7_rsvd0_SHIFT 0
+#define pde7_rsvd0_MASK 0x00ffffff
+#define pde7_rsvd0_WORD word0
+ uint32_t addrHigh;
+ uint32_t addrLow;
+};
+
+/* Structure for MB Command LOAD_SM and DOWN_LOAD */
+
+typedef struct {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint32_t rsvd2:25;
+ uint32_t acknowledgment:1;
+ uint32_t version:1;
+ uint32_t erase_or_prog:1;
+ uint32_t update_flash:1;
+ uint32_t update_ram:1;
+ uint32_t method:1;
+ uint32_t load_cmplt:1;
+#else /* __LITTLE_ENDIAN_BITFIELD */
+ uint32_t load_cmplt:1;
+ uint32_t method:1;
+ uint32_t update_ram:1;
+ uint32_t update_flash:1;
+ uint32_t erase_or_prog:1;
+ uint32_t version:1;
+ uint32_t acknowledgment:1;
+ uint32_t rsvd2:25;
+#endif
+
+ uint32_t dl_to_adr_low;
+ uint32_t dl_to_adr_high;
+ uint32_t dl_len;
+ union {
+ uint32_t dl_from_mbx_offset;
+ struct ulp_bde dl_from_bde;
+ struct ulp_bde64 dl_from_bde64;
+ } un;
+
+} LOAD_SM_VAR;
+
+/* Structure for MB Command READ_NVPARM (02) */
+
+typedef struct {
+ uint32_t rsvd1[3]; /* Read as all one's */
+ uint32_t rsvd2; /* Read as all zero's */
+ uint32_t portname[2]; /* N_PORT name */
+ uint32_t nodename[2]; /* NODE name */
+
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint32_t pref_DID:24;
+ uint32_t hardAL_PA:8;
+#else /* __LITTLE_ENDIAN_BITFIELD */
+ uint32_t hardAL_PA:8;
+ uint32_t pref_DID:24;
+#endif
+
+ uint32_t rsvd3[21]; /* Read as all one's */
+} READ_NV_VAR;
+
+/* Structure for MB Command WRITE_NVPARMS (03) */
+
+typedef struct {
+ uint32_t rsvd1[3]; /* Must be all one's */
+ uint32_t rsvd2; /* Must be all zero's */
+ uint32_t portname[2]; /* N_PORT name */
+ uint32_t nodename[2]; /* NODE name */
+
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint32_t pref_DID:24;
+ uint32_t hardAL_PA:8;
+#else /* __LITTLE_ENDIAN_BITFIELD */
+ uint32_t hardAL_PA:8;
+ uint32_t pref_DID:24;
+#endif
+
+ uint32_t rsvd3[21]; /* Must be all one's */
+} WRITE_NV_VAR;
+
+/* Structure for MB Command RUN_BIU_DIAG (04) */
+/* Structure for MB Command RUN_BIU_DIAG64 (0x84) */
+
+typedef struct {
+ uint32_t rsvd1;
+ union {
+ struct {
+ struct ulp_bde xmit_bde;
+ struct ulp_bde rcv_bde;
+ } s1;
+ struct {
+ struct ulp_bde64 xmit_bde64;
+ struct ulp_bde64 rcv_bde64;
+ } s2;
+ } un;
+} BIU_DIAG_VAR;
+
+/* Structure for MB command READ_EVENT_LOG (0x38) */
+struct READ_EVENT_LOG_VAR {
+ uint32_t word1;
+#define lpfc_event_log_SHIFT 29
+#define lpfc_event_log_MASK 0x00000001
+#define lpfc_event_log_WORD word1
+#define USE_MAILBOX_RESPONSE 1
+ uint32_t offset;
+ struct ulp_bde64 rcv_bde64;
+};
+
+/* Structure for MB Command INIT_LINK (05) */
+
+typedef struct {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint32_t rsvd1:24;
+ uint32_t lipsr_AL_PA:8; /* AL_PA to issue Lip Selective Reset to */
+#else /* __LITTLE_ENDIAN_BITFIELD */
+ uint32_t lipsr_AL_PA:8; /* AL_PA to issue Lip Selective Reset to */
+ uint32_t rsvd1:24;
+#endif
+
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint8_t fabric_AL_PA; /* If using a Fabric Assigned AL_PA */
+ uint8_t rsvd2;
+ uint16_t link_flags;
+#else /* __LITTLE_ENDIAN_BITFIELD */
+ uint16_t link_flags;
+ uint8_t rsvd2;
+ uint8_t fabric_AL_PA; /* If using a Fabric Assigned AL_PA */
+#endif
+
+#define FLAGS_TOPOLOGY_MODE_LOOP_PT 0x00 /* Attempt loop then pt-pt */
+#define FLAGS_LOCAL_LB 0x01 /* link_flags (=1) ENDEC loopback */
+#define FLAGS_TOPOLOGY_MODE_PT_PT 0x02 /* Attempt pt-pt only */
+#define FLAGS_TOPOLOGY_MODE_LOOP 0x04 /* Attempt loop only */
+#define FLAGS_TOPOLOGY_MODE_PT_LOOP 0x06 /* Attempt pt-pt then loop */
+#define FLAGS_UNREG_LOGIN_ALL 0x08 /* UNREG_LOGIN all on link down */
+#define FLAGS_LIRP_LILP 0x80 /* LIRP / LILP is disabled */
+
+#define FLAGS_TOPOLOGY_FAILOVER 0x0400 /* Bit 10 */
+#define FLAGS_LINK_SPEED 0x0800 /* Bit 11 */
+#define FLAGS_IMED_ABORT 0x04000 /* Bit 14 */
+
+ uint32_t link_speed;
+#define LINK_SPEED_AUTO 0x0 /* Auto selection */
+#define LINK_SPEED_1G 0x1 /* 1 Gigabaud */
+#define LINK_SPEED_2G 0x2 /* 2 Gigabaud */
+#define LINK_SPEED_4G 0x4 /* 4 Gigabaud */
+#define LINK_SPEED_8G 0x8 /* 8 Gigabaud */
+#define LINK_SPEED_10G 0x10 /* 10 Gigabaud */
+#define LINK_SPEED_16G 0x11 /* 16 Gigabaud */
+
+} INIT_LINK_VAR;
+
+/* Structure for MB Command DOWN_LINK (06) */
+
+typedef struct {
+ uint32_t rsvd1;
+} DOWN_LINK_VAR;
+
+/* Structure for MB Command CONFIG_LINK (07) */
+
+typedef struct {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint32_t cr:1;
+ uint32_t ci:1;
+ uint32_t cr_delay:6;
+ uint32_t cr_count:8;
+ uint32_t rsvd1:8;
+ uint32_t MaxBBC:8;
+#else /* __LITTLE_ENDIAN_BITFIELD */
+ uint32_t MaxBBC:8;
+ uint32_t rsvd1:8;
+ uint32_t cr_count:8;
+ uint32_t cr_delay:6;
+ uint32_t ci:1;
+ uint32_t cr:1;
+#endif
+
+ uint32_t myId;
+ uint32_t rsvd2;
+ uint32_t edtov;
+ uint32_t arbtov;
+ uint32_t ratov;
+ uint32_t rttov;
+ uint32_t altov;
+ uint32_t crtov;
+ uint32_t citov;
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint32_t rrq_enable:1;
+ uint32_t rrq_immed:1;
+ uint32_t rsvd4:29;
+ uint32_t ack0_enable:1;
+#else /* __LITTLE_ENDIAN_BITFIELD */
+ uint32_t ack0_enable:1;
+ uint32_t rsvd4:29;
+ uint32_t rrq_immed:1;
+ uint32_t rrq_enable:1;
+#endif
+} CONFIG_LINK;
+
+/* Structure for MB Command PART_SLIM (08)
+ * will be removed since SLI1 is no longer supported!
+ */
+typedef struct {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint16_t offCiocb;
+ uint16_t numCiocb;
+ uint16_t offRiocb;
+ uint16_t numRiocb;
+#else /* __LITTLE_ENDIAN_BITFIELD */
+ uint16_t numCiocb;
+ uint16_t offCiocb;
+ uint16_t numRiocb;
+ uint16_t offRiocb;
+#endif
+} RING_DEF;
+
+typedef struct {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint32_t unused1:24;
+ uint32_t numRing:8;
+#else /* __LITTLE_ENDIAN_BITFIELD */
+ uint32_t numRing:8;
+ uint32_t unused1:24;
+#endif
+
+ RING_DEF ringdef[4];
+ uint32_t hbainit;
+} PART_SLIM_VAR;
+
+/* Structure for MB Command CONFIG_RING (09) */
+
+typedef struct {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint32_t unused2:6;
+ uint32_t recvSeq:1;
+ uint32_t recvNotify:1;
+ uint32_t numMask:8;
+ uint32_t profile:8;
+ uint32_t unused1:4;
+ uint32_t ring:4;
+#else /* __LITTLE_ENDIAN_BITFIELD */
+ uint32_t ring:4;
+ uint32_t unused1:4;
+ uint32_t profile:8;
+ uint32_t numMask:8;
+ uint32_t recvNotify:1;
+ uint32_t recvSeq:1;
+ uint32_t unused2:6;
+#endif
+
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint16_t maxRespXchg;
+ uint16_t maxOrigXchg;
+#else /* __LITTLE_ENDIAN_BITFIELD */
+ uint16_t maxOrigXchg;
+ uint16_t maxRespXchg;
+#endif
+
+ RR_REG rrRegs[6];
+} CONFIG_RING_VAR;
+
+/* Structure for MB Command RESET_RING (10) */
+
+typedef struct {
+ uint32_t ring_no;
+} RESET_RING_VAR;
+
+/* Structure for MB Command READ_CONFIG (11) */
+
+typedef struct {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint32_t cr:1;
+ uint32_t ci:1;
+ uint32_t cr_delay:6;
+ uint32_t cr_count:8;
+ uint32_t InitBBC:8;
+ uint32_t MaxBBC:8;
+#else /* __LITTLE_ENDIAN_BITFIELD */
+ uint32_t MaxBBC:8;
+ uint32_t InitBBC:8;
+ uint32_t cr_count:8;
+ uint32_t cr_delay:6;
+ uint32_t ci:1;
+ uint32_t cr:1;
+#endif
+
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint32_t topology:8;
+ uint32_t myDid:24;
+#else /* __LITTLE_ENDIAN_BITFIELD */
+ uint32_t myDid:24;
+ uint32_t topology:8;
+#endif
+
+ /* Defines for topology (defined previously) */
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint32_t AR:1;
+ uint32_t IR:1;
+ uint32_t rsvd1:29;
+ uint32_t ack0:1;
+#else /* __LITTLE_ENDIAN_BITFIELD */
+ uint32_t ack0:1;
+ uint32_t rsvd1:29;
+ uint32_t IR:1;
+ uint32_t AR:1;
+#endif
+
+ uint32_t edtov;
+ uint32_t arbtov;
+ uint32_t ratov;
+ uint32_t rttov;
+ uint32_t altov;
+ uint32_t lmt;
+#define LMT_RESERVED 0x000 /* Not used */
+#define LMT_1Gb 0x004
+#define LMT_2Gb 0x008
+#define LMT_4Gb 0x040
+#define LMT_8Gb 0x080
+#define LMT_10Gb 0x100
+#define LMT_16Gb 0x200
+ uint32_t rsvd2;
+ uint32_t rsvd3;
+ uint32_t max_xri;
+ uint32_t max_iocb;
+ uint32_t max_rpi;
+ uint32_t avail_xri;
+ uint32_t avail_iocb;
+ uint32_t avail_rpi;
+ uint32_t max_vpi;
+ uint32_t rsvd4;
+ uint32_t rsvd5;
+ uint32_t avail_vpi;
+} READ_CONFIG_VAR;
+
+/* Structure for MB Command READ_RCONFIG (12) */
+
+typedef struct {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint32_t rsvd2:7;
+ uint32_t recvNotify:1;
+ uint32_t numMask:8;
+ uint32_t profile:8;
+ uint32_t rsvd1:4;
+ uint32_t ring:4;
+#else /* __LITTLE_ENDIAN_BITFIELD */
+ uint32_t ring:4;
+ uint32_t rsvd1:4;
+ uint32_t profile:8;
+ uint32_t numMask:8;
+ uint32_t recvNotify:1;
+ uint32_t rsvd2:7;
+#endif
+
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint16_t maxResp;
+ uint16_t maxOrig;
+#else /* __LITTLE_ENDIAN_BITFIELD */
+ uint16_t maxOrig;
+ uint16_t maxResp;
+#endif
+
+ RR_REG rrRegs[6];
+
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint16_t cmdRingOffset;
+ uint16_t cmdEntryCnt;
+ uint16_t rspRingOffset;
+ uint16_t rspEntryCnt;
+ uint16_t nextCmdOffset;
+ uint16_t rsvd3;
+ uint16_t nextRspOffset;
+ uint16_t rsvd4;
+#else /* __LITTLE_ENDIAN_BITFIELD */
+ uint16_t cmdEntryCnt;
+ uint16_t cmdRingOffset;
+ uint16_t rspEntryCnt;
+ uint16_t rspRingOffset;
+ uint16_t rsvd3;
+ uint16_t nextCmdOffset;
+ uint16_t rsvd4;
+ uint16_t nextRspOffset;
+#endif
+} READ_RCONF_VAR;
+
+/* Structure for MB Command READ_SPARM (13) */
+/* Structure for MB Command READ_SPARM64 (0x8D) */
+
+typedef struct {
+ uint32_t rsvd1;
+ uint32_t rsvd2;
+ union {
+ struct ulp_bde sp; /* This BDE points to struct serv_parm
+ structure */
+ struct ulp_bde64 sp64;
+ } un;
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint16_t rsvd3;
+ uint16_t vpi;
+#else /* __LITTLE_ENDIAN_BITFIELD */
+ uint16_t vpi;
+ uint16_t rsvd3;
+#endif
+} READ_SPARM_VAR;
+
+/* Structure for MB Command READ_STATUS (14) */
+
+typedef struct {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint32_t rsvd1:31;
+ uint32_t clrCounters:1;
+ uint16_t activeXriCnt;
+ uint16_t activeRpiCnt;
+#else /* __LITTLE_ENDIAN_BITFIELD */
+ uint32_t clrCounters:1;
+ uint32_t rsvd1:31;
+ uint16_t activeRpiCnt;
+ uint16_t activeXriCnt;
+#endif
+
+ uint32_t xmitByteCnt;
+ uint32_t rcvByteCnt;
+ uint32_t xmitFrameCnt;
+ uint32_t rcvFrameCnt;
+ uint32_t xmitSeqCnt;
+ uint32_t rcvSeqCnt;
+ uint32_t totalOrigExchanges;
+ uint32_t totalRespExchanges;
+ uint32_t rcvPbsyCnt;
+ uint32_t rcvFbsyCnt;
+} READ_STATUS_VAR;
+
+/* Structure for MB Command READ_RPI (15) */
+/* Structure for MB Command READ_RPI64 (0x8F) */
+
+typedef struct {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint16_t nextRpi;
+ uint16_t reqRpi;
+ uint32_t rsvd2:8;
+ uint32_t DID:24;
+#else /* __LITTLE_ENDIAN_BITFIELD */
+ uint16_t reqRpi;
+ uint16_t nextRpi;
+ uint32_t DID:24;
+ uint32_t rsvd2:8;
+#endif
+
+ union {
+ struct ulp_bde sp;
+ struct ulp_bde64 sp64;
+ } un;
+
+} READ_RPI_VAR;
+
+/* Structure for MB Command READ_XRI (16) */
+
+typedef struct {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint16_t nextXri;
+ uint16_t reqXri;
+ uint16_t rsvd1;
+ uint16_t rpi;
+ uint32_t rsvd2:8;
+ uint32_t DID:24;
+ uint32_t rsvd3:8;
+ uint32_t SID:24;
+ uint32_t rsvd4;
+ uint8_t seqId;
+ uint8_t rsvd5;
+ uint16_t seqCount;
+ uint16_t oxId;
+ uint16_t rxId;
+ uint32_t rsvd6:30;
+ uint32_t si:1;
+ uint32_t exchOrig:1;
+#else /* __LITTLE_ENDIAN_BITFIELD */
+ uint16_t reqXri;
+ uint16_t nextXri;
+ uint16_t rpi;
+ uint16_t rsvd1;
+ uint32_t DID:24;
+ uint32_t rsvd2:8;
+ uint32_t SID:24;
+ uint32_t rsvd3:8;
+ uint32_t rsvd4;
+ uint16_t seqCount;
+ uint8_t rsvd5;
+ uint8_t seqId;
+ uint16_t rxId;
+ uint16_t oxId;
+ uint32_t exchOrig:1;
+ uint32_t si:1;
+ uint32_t rsvd6:30;
+#endif
+} READ_XRI_VAR;
+
+/* Structure for MB Command READ_REV (17) */
+
+typedef struct {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint32_t cv:1;
+ uint32_t rr:1;
+ uint32_t rsvd2:2;
+ uint32_t v3req:1;
+ uint32_t v3rsp:1;
+ uint32_t rsvd1:25;
+ uint32_t rv:1;
+#else /* __LITTLE_ENDIAN_BITFIELD */
+ uint32_t rv:1;
+ uint32_t rsvd1:25;
+ uint32_t v3rsp:1;
+ uint32_t v3req:1;
+ uint32_t rsvd2:2;
+ uint32_t rr:1;
+ uint32_t cv:1;
+#endif
+
+ uint32_t biuRev;
+ uint32_t smRev;
+ union {
+ uint32_t smFwRev;
+ struct {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint8_t ProgType;
+ uint8_t ProgId;
+ uint16_t ProgVer:4;
+ uint16_t ProgRev:4;
+ uint16_t ProgFixLvl:2;
+ uint16_t ProgDistType:2;
+ uint16_t DistCnt:4;
+#else /* __LITTLE_ENDIAN_BITFIELD */
+ uint16_t DistCnt:4;
+ uint16_t ProgDistType:2;
+ uint16_t ProgFixLvl:2;
+ uint16_t ProgRev:4;
+ uint16_t ProgVer:4;
+ uint8_t ProgId;
+ uint8_t ProgType;
+#endif
+
+ } b;
+ } un;
+ uint32_t endecRev;
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint8_t feaLevelHigh;
+ uint8_t feaLevelLow;
+ uint8_t fcphHigh;
+ uint8_t fcphLow;
+#else /* __LITTLE_ENDIAN_BITFIELD */
+ uint8_t fcphLow;
+ uint8_t fcphHigh;
+ uint8_t feaLevelLow;
+ uint8_t feaLevelHigh;
+#endif
+
+ uint32_t postKernRev;
+ uint32_t opFwRev;
+ uint8_t opFwName[16];
+ uint32_t sli1FwRev;
+ uint8_t sli1FwName[16];
+ uint32_t sli2FwRev;
+ uint8_t sli2FwName[16];
+ uint32_t sli3Feat;
+ uint32_t RandomData[6];
+} READ_REV_VAR;
+
+/* Structure for MB Command READ_LINK_STAT (18) */
+
+typedef struct {
+ uint32_t rsvd1;
+ uint32_t linkFailureCnt;
+ uint32_t lossSyncCnt;
+
+ uint32_t lossSignalCnt;
+ uint32_t primSeqErrCnt;
+ uint32_t invalidXmitWord;
+ uint32_t crcCnt;
+ uint32_t primSeqTimeout;
+ uint32_t elasticOverrun;
+ uint32_t arbTimeout;
+} READ_LNK_VAR;
+
+/* Structure for MB Command REG_LOGIN (19) */
+/* Structure for MB Command REG_LOGIN64 (0x93) */
+
+typedef struct {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint16_t rsvd1;
+ uint16_t rpi;
+ uint32_t rsvd2:8;
+ uint32_t did:24;
+#else /* __LITTLE_ENDIAN_BITFIELD */
+ uint16_t rpi;
+ uint16_t rsvd1;
+ uint32_t did:24;
+ uint32_t rsvd2:8;
+#endif
+
+ union {
+ struct ulp_bde sp;
+ struct ulp_bde64 sp64;
+ } un;
+
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint16_t rsvd6;
+ uint16_t vpi;
+#else /* __LITTLE_ENDIAN_BITFIELD */
+ uint16_t vpi;
+ uint16_t rsvd6;
+#endif
+
+} REG_LOGIN_VAR;
+
+/* Word 30 contents for REG_LOGIN */
+typedef union {
+ struct {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint16_t rsvd1:12;
+ uint16_t wd30_class:4;
+ uint16_t xri;
+#else /* __LITTLE_ENDIAN_BITFIELD */
+ uint16_t xri;
+ uint16_t wd30_class:4;
+ uint16_t rsvd1:12;
+#endif
+ } f;
+ uint32_t word;
+} REG_WD30;
+
+/* Structure for MB Command UNREG_LOGIN (20) */
+
+typedef struct {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint16_t rsvd1;
+ uint16_t rpi;
+ uint32_t rsvd2;
+ uint32_t rsvd3;
+ uint32_t rsvd4;
+ uint32_t rsvd5;
+ uint16_t rsvd6;
+ uint16_t vpi;
+#else /* __LITTLE_ENDIAN_BITFIELD */
+ uint16_t rpi;
+ uint16_t rsvd1;
+ uint32_t rsvd2;
+ uint32_t rsvd3;
+ uint32_t rsvd4;
+ uint32_t rsvd5;
+ uint16_t vpi;
+ uint16_t rsvd6;
+#endif
+} UNREG_LOGIN_VAR;
+
+/* Structure for MB Command REG_VPI (0x96) */
+typedef struct {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint32_t rsvd1;
+ uint32_t rsvd2:7;
+ uint32_t upd:1;
+ uint32_t sid:24;
+ uint32_t wwn[2];
+ uint32_t rsvd5;
+ uint16_t vfi;
+ uint16_t vpi;
+#else /* __LITTLE_ENDIAN */
+ uint32_t rsvd1;
+ uint32_t sid:24;
+ uint32_t upd:1;
+ uint32_t rsvd2:7;
+ uint32_t wwn[2];
+ uint32_t rsvd5;
+ uint16_t vpi;
+ uint16_t vfi;
+#endif
+} REG_VPI_VAR;
+
+/* Structure for MB Command UNREG_VPI (0x97) */
+typedef struct {
+ uint32_t rsvd1;
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint16_t rsvd2;
+ uint16_t sli4_vpi;
+#else /* __LITTLE_ENDIAN */
+ uint16_t sli4_vpi;
+ uint16_t rsvd2;
+#endif
+ uint32_t rsvd3;
+ uint32_t rsvd4;
+ uint32_t rsvd5;
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint16_t rsvd6;
+ uint16_t vpi;
+#else /* __LITTLE_ENDIAN */
+ uint16_t vpi;
+ uint16_t rsvd6;
+#endif
+} UNREG_VPI_VAR;
+
+/* Structure for MB Command UNREG_D_ID (0x23) */
+
+typedef struct {
+ uint32_t did;
+ uint32_t rsvd2;
+ uint32_t rsvd3;
+ uint32_t rsvd4;
+ uint32_t rsvd5;
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint16_t rsvd6;
+ uint16_t vpi;
+#else
+ uint16_t vpi;
+ uint16_t rsvd6;
+#endif
+} UNREG_D_ID_VAR;
+
+/* Structure for MB Command READ_TOPOLOGY (0x95) */
+struct lpfc_mbx_read_top {
+ uint32_t eventTag; /* Event tag */
+ uint32_t word2;
+#define lpfc_mbx_read_top_fa_SHIFT 12
+#define lpfc_mbx_read_top_fa_MASK 0x00000001
+#define lpfc_mbx_read_top_fa_WORD word2
+#define lpfc_mbx_read_top_mm_SHIFT 11
+#define lpfc_mbx_read_top_mm_MASK 0x00000001
+#define lpfc_mbx_read_top_mm_WORD word2
+#define lpfc_mbx_read_top_pb_SHIFT 9
+#define lpfc_mbx_read_top_pb_MASK 0X00000001
+#define lpfc_mbx_read_top_pb_WORD word2
+#define lpfc_mbx_read_top_il_SHIFT 8
+#define lpfc_mbx_read_top_il_MASK 0x00000001
+#define lpfc_mbx_read_top_il_WORD word2
+#define lpfc_mbx_read_top_att_type_SHIFT 0
+#define lpfc_mbx_read_top_att_type_MASK 0x000000FF
+#define lpfc_mbx_read_top_att_type_WORD word2
+#define LPFC_ATT_RESERVED 0x00 /* Reserved - attType */
+#define LPFC_ATT_LINK_UP 0x01 /* Link is up */
+#define LPFC_ATT_LINK_DOWN 0x02 /* Link is down */
+ uint32_t word3;
+#define lpfc_mbx_read_top_alpa_granted_SHIFT 24
+#define lpfc_mbx_read_top_alpa_granted_MASK 0x000000FF
+#define lpfc_mbx_read_top_alpa_granted_WORD word3
+#define lpfc_mbx_read_top_lip_alps_SHIFT 16
+#define lpfc_mbx_read_top_lip_alps_MASK 0x000000FF
+#define lpfc_mbx_read_top_lip_alps_WORD word3
+#define lpfc_mbx_read_top_lip_type_SHIFT 8
+#define lpfc_mbx_read_top_lip_type_MASK 0x000000FF
+#define lpfc_mbx_read_top_lip_type_WORD word3
+#define lpfc_mbx_read_top_topology_SHIFT 0
+#define lpfc_mbx_read_top_topology_MASK 0x000000FF
+#define lpfc_mbx_read_top_topology_WORD word3
+#define LPFC_TOPOLOGY_PT_PT 0x01 /* Topology is pt-pt / pt-fabric */
+#define LPFC_TOPOLOGY_LOOP 0x02 /* Topology is FC-AL */
+#define LPFC_TOPOLOGY_MM 0x05 /* maint mode zephtr to menlo */
+ /* store the LILP AL_PA position map into */
+ struct ulp_bde64 lilpBde64;
+#define LPFC_ALPA_MAP_SIZE 128
+ uint32_t word7;
+#define lpfc_mbx_read_top_ld_lu_SHIFT 31
+#define lpfc_mbx_read_top_ld_lu_MASK 0x00000001
+#define lpfc_mbx_read_top_ld_lu_WORD word7
+#define lpfc_mbx_read_top_ld_tf_SHIFT 30
+#define lpfc_mbx_read_top_ld_tf_MASK 0x00000001
+#define lpfc_mbx_read_top_ld_tf_WORD word7
+#define lpfc_mbx_read_top_ld_link_spd_SHIFT 8
+#define lpfc_mbx_read_top_ld_link_spd_MASK 0x000000FF
+#define lpfc_mbx_read_top_ld_link_spd_WORD word7
+#define lpfc_mbx_read_top_ld_nl_port_SHIFT 4
+#define lpfc_mbx_read_top_ld_nl_port_MASK 0x0000000F
+#define lpfc_mbx_read_top_ld_nl_port_WORD word7
+#define lpfc_mbx_read_top_ld_tx_SHIFT 2
+#define lpfc_mbx_read_top_ld_tx_MASK 0x00000003
+#define lpfc_mbx_read_top_ld_tx_WORD word7
+#define lpfc_mbx_read_top_ld_rx_SHIFT 0
+#define lpfc_mbx_read_top_ld_rx_MASK 0x00000003
+#define lpfc_mbx_read_top_ld_rx_WORD word7
+ uint32_t word8;
+#define lpfc_mbx_read_top_lu_SHIFT 31
+#define lpfc_mbx_read_top_lu_MASK 0x00000001
+#define lpfc_mbx_read_top_lu_WORD word8
+#define lpfc_mbx_read_top_tf_SHIFT 30
+#define lpfc_mbx_read_top_tf_MASK 0x00000001
+#define lpfc_mbx_read_top_tf_WORD word8
+#define lpfc_mbx_read_top_link_spd_SHIFT 8
+#define lpfc_mbx_read_top_link_spd_MASK 0x000000FF
+#define lpfc_mbx_read_top_link_spd_WORD word8
+#define lpfc_mbx_read_top_nl_port_SHIFT 4
+#define lpfc_mbx_read_top_nl_port_MASK 0x0000000F
+#define lpfc_mbx_read_top_nl_port_WORD word8
+#define lpfc_mbx_read_top_tx_SHIFT 2
+#define lpfc_mbx_read_top_tx_MASK 0x00000003
+#define lpfc_mbx_read_top_tx_WORD word8
+#define lpfc_mbx_read_top_rx_SHIFT 0
+#define lpfc_mbx_read_top_rx_MASK 0x00000003
+#define lpfc_mbx_read_top_rx_WORD word8
+#define LPFC_LINK_SPEED_UNKNOWN 0x0
+#define LPFC_LINK_SPEED_1GHZ 0x04
+#define LPFC_LINK_SPEED_2GHZ 0x08
+#define LPFC_LINK_SPEED_4GHZ 0x10
+#define LPFC_LINK_SPEED_8GHZ 0x20
+#define LPFC_LINK_SPEED_10GHZ 0x40
+#define LPFC_LINK_SPEED_16GHZ 0x80
+};
+
+/* Structure for MB Command CLEAR_LA (22) */
+
+typedef struct {
+ uint32_t eventTag; /* Event tag */
+ uint32_t rsvd1;
+} CLEAR_LA_VAR;
+
+/* Structure for MB Command DUMP */
+
+typedef struct {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint32_t rsvd:25;
+ uint32_t ra:1;
+ uint32_t co:1;
+ uint32_t cv:1;
+ uint32_t type:4;
+ uint32_t entry_index:16;
+ uint32_t region_id:16;
+#else /* __LITTLE_ENDIAN_BITFIELD */
+ uint32_t type:4;
+ uint32_t cv:1;
+ uint32_t co:1;
+ uint32_t ra:1;
+ uint32_t rsvd:25;
+ uint32_t region_id:16;
+ uint32_t entry_index:16;
+#endif
+
+ uint32_t sli4_length;
+ uint32_t word_cnt;
+ uint32_t resp_offset;
+} DUMP_VAR;
+
+#define DMP_MEM_REG 0x1
+#define DMP_NV_PARAMS 0x2
+#define DMP_LMSD 0x3 /* Link Module Serial Data */
+#define DMP_WELL_KNOWN 0x4
+
+#define DMP_REGION_VPD 0xe
+#define DMP_VPD_SIZE 0x400 /* maximum amount of VPD */
+#define DMP_RSP_OFFSET 0x14 /* word 5 contains first word of rsp */
+#define DMP_RSP_SIZE 0x6C /* maximum of 27 words of rsp data */
+
+#define DMP_REGION_VPORT 0x16 /* VPort info region */
+#define DMP_VPORT_REGION_SIZE 0x200
+#define DMP_MBOX_OFFSET_WORD 0x5
+
+#define DMP_REGION_23 0x17 /* fcoe param and port state region */
+#define DMP_RGN23_SIZE 0x400
+
+#define WAKE_UP_PARMS_REGION_ID 4
+#define WAKE_UP_PARMS_WORD_SIZE 15
+
+struct vport_rec {
+ uint8_t wwpn[8];
+ uint8_t wwnn[8];
+};
+
+#define VPORT_INFO_SIG 0x32324752
+#define VPORT_INFO_REV_MASK 0xff
+#define VPORT_INFO_REV 0x1
+#define MAX_STATIC_VPORT_COUNT 16
+struct static_vport_info {
+ uint32_t signature;
+ uint32_t rev;
+ struct vport_rec vport_list[MAX_STATIC_VPORT_COUNT];
+ uint32_t resvd[66];
+};
+
+/* Option rom version structure */
+struct prog_id {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint8_t type;
+ uint8_t id;
+ uint32_t ver:4; /* Major Version */
+ uint32_t rev:4; /* Revision */
+ uint32_t lev:2; /* Level */
+ uint32_t dist:2; /* Dist Type */
+ uint32_t num:4; /* number after dist type */
+#else /* __LITTLE_ENDIAN_BITFIELD */
+ uint32_t num:4; /* number after dist type */
+ uint32_t dist:2; /* Dist Type */
+ uint32_t lev:2; /* Level */
+ uint32_t rev:4; /* Revision */
+ uint32_t ver:4; /* Major Version */
+ uint8_t id;
+ uint8_t type;
+#endif
+};
+
+/* Structure for MB Command UPDATE_CFG (0x1B) */
+
+struct update_cfg_var {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint32_t rsvd2:16;
+ uint32_t type:8;
+ uint32_t rsvd:1;
+ uint32_t ra:1;
+ uint32_t co:1;
+ uint32_t cv:1;
+ uint32_t req:4;
+ uint32_t entry_length:16;
+ uint32_t region_id:16;
+#else /* __LITTLE_ENDIAN_BITFIELD */
+ uint32_t req:4;
+ uint32_t cv:1;
+ uint32_t co:1;
+ uint32_t ra:1;
+ uint32_t rsvd:1;
+ uint32_t type:8;
+ uint32_t rsvd2:16;
+ uint32_t region_id:16;
+ uint32_t entry_length:16;
+#endif
+
+ uint32_t resp_info;
+ uint32_t byte_cnt;
+ uint32_t data_offset;
+};
+
+struct hbq_mask {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint8_t tmatch;
+ uint8_t tmask;
+ uint8_t rctlmatch;
+ uint8_t rctlmask;
+#else /* __LITTLE_ENDIAN */
+ uint8_t rctlmask;
+ uint8_t rctlmatch;
+ uint8_t tmask;
+ uint8_t tmatch;
+#endif
+};
+
+
+/* Structure for MB Command CONFIG_HBQ (7c) */
+
+struct config_hbq_var {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint32_t rsvd1 :7;
+ uint32_t recvNotify :1; /* Receive Notification */
+ uint32_t numMask :8; /* # Mask Entries */
+ uint32_t profile :8; /* Selection Profile */
+ uint32_t rsvd2 :8;
+#else /* __LITTLE_ENDIAN */
+ uint32_t rsvd2 :8;
+ uint32_t profile :8; /* Selection Profile */
+ uint32_t numMask :8; /* # Mask Entries */
+ uint32_t recvNotify :1; /* Receive Notification */
+ uint32_t rsvd1 :7;
+#endif
+
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint32_t hbqId :16;
+ uint32_t rsvd3 :12;
+ uint32_t ringMask :4;
+#else /* __LITTLE_ENDIAN */
+ uint32_t ringMask :4;
+ uint32_t rsvd3 :12;
+ uint32_t hbqId :16;
+#endif
+
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint32_t entry_count :16;
+ uint32_t rsvd4 :8;
+ uint32_t headerLen :8;
+#else /* __LITTLE_ENDIAN */
+ uint32_t headerLen :8;
+ uint32_t rsvd4 :8;
+ uint32_t entry_count :16;
+#endif
+
+ uint32_t hbqaddrLow;
+ uint32_t hbqaddrHigh;
+
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint32_t rsvd5 :31;
+ uint32_t logEntry :1;
+#else /* __LITTLE_ENDIAN */
+ uint32_t logEntry :1;
+ uint32_t rsvd5 :31;
+#endif
+
+ uint32_t rsvd6; /* w7 */
+ uint32_t rsvd7; /* w8 */
+ uint32_t rsvd8; /* w9 */
+
+ struct hbq_mask hbqMasks[6];
+
+
+ union {
+ uint32_t allprofiles[12];
+
+ struct {
+ #ifdef __BIG_ENDIAN_BITFIELD
+ uint32_t seqlenoff :16;
+ uint32_t maxlen :16;
+ #else /* __LITTLE_ENDIAN */
+ uint32_t maxlen :16;
+ uint32_t seqlenoff :16;
+ #endif
+ #ifdef __BIG_ENDIAN_BITFIELD
+ uint32_t rsvd1 :28;
+ uint32_t seqlenbcnt :4;
+ #else /* __LITTLE_ENDIAN */
+ uint32_t seqlenbcnt :4;
+ uint32_t rsvd1 :28;
+ #endif
+ uint32_t rsvd[10];
+ } profile2;
+
+ struct {
+ #ifdef __BIG_ENDIAN_BITFIELD
+ uint32_t seqlenoff :16;
+ uint32_t maxlen :16;
+ #else /* __LITTLE_ENDIAN */
+ uint32_t maxlen :16;
+ uint32_t seqlenoff :16;
+ #endif
+ #ifdef __BIG_ENDIAN_BITFIELD
+ uint32_t cmdcodeoff :28;
+ uint32_t rsvd1 :12;
+ uint32_t seqlenbcnt :4;
+ #else /* __LITTLE_ENDIAN */
+ uint32_t seqlenbcnt :4;
+ uint32_t rsvd1 :12;
+ uint32_t cmdcodeoff :28;
+ #endif
+ uint32_t cmdmatch[8];
+
+ uint32_t rsvd[2];
+ } profile3;
+
+ struct {
+ #ifdef __BIG_ENDIAN_BITFIELD
+ uint32_t seqlenoff :16;
+ uint32_t maxlen :16;
+ #else /* __LITTLE_ENDIAN */
+ uint32_t maxlen :16;
+ uint32_t seqlenoff :16;
+ #endif
+ #ifdef __BIG_ENDIAN_BITFIELD
+ uint32_t cmdcodeoff :28;
+ uint32_t rsvd1 :12;
+ uint32_t seqlenbcnt :4;
+ #else /* __LITTLE_ENDIAN */
+ uint32_t seqlenbcnt :4;
+ uint32_t rsvd1 :12;
+ uint32_t cmdcodeoff :28;
+ #endif
+ uint32_t cmdmatch[8];
+
+ uint32_t rsvd[2];
+ } profile5;
+
+ } profiles;
+
+};
+
+
+
+/* Structure for MB Command CONFIG_PORT (0x88) */
+typedef struct {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint32_t cBE : 1;
+ uint32_t cET : 1;
+ uint32_t cHpcb : 1;
+ uint32_t cMA : 1;
+ uint32_t sli_mode : 4;
+ uint32_t pcbLen : 24; /* bit 23:0 of memory based port
+ * config block */
+#else /* __LITTLE_ENDIAN */
+ uint32_t pcbLen : 24; /* bit 23:0 of memory based port
+ * config block */
+ uint32_t sli_mode : 4;
+ uint32_t cMA : 1;
+ uint32_t cHpcb : 1;
+ uint32_t cET : 1;
+ uint32_t cBE : 1;
+#endif
+
+ uint32_t pcbLow; /* bit 31:0 of memory based port config block */
+ uint32_t pcbHigh; /* bit 63:32 of memory based port config block */
+ uint32_t hbainit[5];
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint32_t hps : 1; /* bit 31 word9 Host Pointer in slim */
+ uint32_t rsvd : 31; /* least significant 31 bits of word 9 */
+#else /* __LITTLE_ENDIAN */
+ uint32_t rsvd : 31; /* least significant 31 bits of word 9 */
+ uint32_t hps : 1; /* bit 31 word9 Host Pointer in slim */
+#endif
+
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint32_t rsvd1 : 19; /* Reserved */
+ uint32_t cdss : 1; /* Configure Data Security SLI */
+ uint32_t casabt : 1; /* Configure async abts status notice */
+ uint32_t rsvd2 : 2; /* Reserved */
+ uint32_t cbg : 1; /* Configure BlockGuard */
+ uint32_t cmv : 1; /* Configure Max VPIs */
+ uint32_t ccrp : 1; /* Config Command Ring Polling */
+ uint32_t csah : 1; /* Configure Synchronous Abort Handling */
+ uint32_t chbs : 1; /* Cofigure Host Backing store */
+ uint32_t cinb : 1; /* Enable Interrupt Notification Block */
+ uint32_t cerbm : 1; /* Configure Enhanced Receive Buf Mgmt */
+ uint32_t cmx : 1; /* Configure Max XRIs */
+ uint32_t cmr : 1; /* Configure Max RPIs */
+#else /* __LITTLE_ENDIAN */
+ uint32_t cmr : 1; /* Configure Max RPIs */
+ uint32_t cmx : 1; /* Configure Max XRIs */
+ uint32_t cerbm : 1; /* Configure Enhanced Receive Buf Mgmt */
+ uint32_t cinb : 1; /* Enable Interrupt Notification Block */
+ uint32_t chbs : 1; /* Cofigure Host Backing store */
+ uint32_t csah : 1; /* Configure Synchronous Abort Handling */
+ uint32_t ccrp : 1; /* Config Command Ring Polling */
+ uint32_t cmv : 1; /* Configure Max VPIs */
+ uint32_t cbg : 1; /* Configure BlockGuard */
+ uint32_t rsvd2 : 2; /* Reserved */
+ uint32_t casabt : 1; /* Configure async abts status notice */
+ uint32_t cdss : 1; /* Configure Data Security SLI */
+ uint32_t rsvd1 : 19; /* Reserved */
+#endif
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint32_t rsvd3 : 19; /* Reserved */
+ uint32_t gdss : 1; /* Configure Data Security SLI */
+ uint32_t gasabt : 1; /* Grant async abts status notice */
+ uint32_t rsvd4 : 2; /* Reserved */
+ uint32_t gbg : 1; /* Grant BlockGuard */
+ uint32_t gmv : 1; /* Grant Max VPIs */
+ uint32_t gcrp : 1; /* Grant Command Ring Polling */
+ uint32_t gsah : 1; /* Grant Synchronous Abort Handling */
+ uint32_t ghbs : 1; /* Grant Host Backing Store */
+ uint32_t ginb : 1; /* Grant Interrupt Notification Block */
+ uint32_t gerbm : 1; /* Grant ERBM Request */
+ uint32_t gmx : 1; /* Grant Max XRIs */
+ uint32_t gmr : 1; /* Grant Max RPIs */
+#else /* __LITTLE_ENDIAN */
+ uint32_t gmr : 1; /* Grant Max RPIs */
+ uint32_t gmx : 1; /* Grant Max XRIs */
+ uint32_t gerbm : 1; /* Grant ERBM Request */
+ uint32_t ginb : 1; /* Grant Interrupt Notification Block */
+ uint32_t ghbs : 1; /* Grant Host Backing Store */
+ uint32_t gsah : 1; /* Grant Synchronous Abort Handling */
+ uint32_t gcrp : 1; /* Grant Command Ring Polling */
+ uint32_t gmv : 1; /* Grant Max VPIs */
+ uint32_t gbg : 1; /* Grant BlockGuard */
+ uint32_t rsvd4 : 2; /* Reserved */
+ uint32_t gasabt : 1; /* Grant async abts status notice */
+ uint32_t gdss : 1; /* Configure Data Security SLI */
+ uint32_t rsvd3 : 19; /* Reserved */
+#endif
+
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint32_t max_rpi : 16; /* Max RPIs Port should configure */
+ uint32_t max_xri : 16; /* Max XRIs Port should configure */
+#else /* __LITTLE_ENDIAN */
+ uint32_t max_xri : 16; /* Max XRIs Port should configure */
+ uint32_t max_rpi : 16; /* Max RPIs Port should configure */
+#endif
+
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint32_t max_hbq : 16; /* Max HBQs Host expect to configure */
+ uint32_t rsvd5 : 16; /* Max HBQs Host expect to configure */
+#else /* __LITTLE_ENDIAN */
+ uint32_t rsvd5 : 16; /* Max HBQs Host expect to configure */
+ uint32_t max_hbq : 16; /* Max HBQs Host expect to configure */
+#endif
+
+ uint32_t rsvd6; /* Reserved */
+
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint32_t fips_rev : 3; /* FIPS Spec Revision */
+ uint32_t fips_level : 4; /* FIPS Level */
+ uint32_t sec_err : 9; /* security crypto error */
+ uint32_t max_vpi : 16; /* Max number of virt N-Ports */
+#else /* __LITTLE_ENDIAN */
+ uint32_t max_vpi : 16; /* Max number of virt N-Ports */
+ uint32_t sec_err : 9; /* security crypto error */
+ uint32_t fips_level : 4; /* FIPS Level */
+ uint32_t fips_rev : 3; /* FIPS Spec Revision */
+#endif
+
+} CONFIG_PORT_VAR;
+
+/* Structure for MB Command CONFIG_MSI (0x30) */
+struct config_msi_var {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint32_t dfltMsgNum:8; /* Default message number */
+ uint32_t rsvd1:11; /* Reserved */
+ uint32_t NID:5; /* Number of secondary attention IDs */
+ uint32_t rsvd2:5; /* Reserved */
+ uint32_t dfltPresent:1; /* Default message number present */
+ uint32_t addFlag:1; /* Add association flag */
+ uint32_t reportFlag:1; /* Report association flag */
+#else /* __LITTLE_ENDIAN_BITFIELD */
+ uint32_t reportFlag:1; /* Report association flag */
+ uint32_t addFlag:1; /* Add association flag */
+ uint32_t dfltPresent:1; /* Default message number present */
+ uint32_t rsvd2:5; /* Reserved */
+ uint32_t NID:5; /* Number of secondary attention IDs */
+ uint32_t rsvd1:11; /* Reserved */
+ uint32_t dfltMsgNum:8; /* Default message number */
+#endif
+ uint32_t attentionConditions[2];
+ uint8_t attentionId[16];
+ uint8_t messageNumberByHA[64];
+ uint8_t messageNumberByID[16];
+ uint32_t autoClearHA[2];
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint32_t rsvd3:16;
+ uint32_t autoClearID:16;
+#else /* __LITTLE_ENDIAN_BITFIELD */
+ uint32_t autoClearID:16;
+ uint32_t rsvd3:16;
+#endif
+ uint32_t rsvd4;
+};
+
+/* SLI-2 Port Control Block */
+
+/* SLIM POINTER */
+#define SLIMOFF 0x30 /* WORD */
+
+typedef struct _SLI2_RDSC {
+ uint32_t cmdEntries;
+ uint32_t cmdAddrLow;
+ uint32_t cmdAddrHigh;
+
+ uint32_t rspEntries;
+ uint32_t rspAddrLow;
+ uint32_t rspAddrHigh;
+} SLI2_RDSC;
+
+typedef struct _PCB {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint32_t type:8;
+#define TYPE_NATIVE_SLI2 0x01
+ uint32_t feature:8;
+#define FEATURE_INITIAL_SLI2 0x01
+ uint32_t rsvd:12;
+ uint32_t maxRing:4;
+#else /* __LITTLE_ENDIAN_BITFIELD */
+ uint32_t maxRing:4;
+ uint32_t rsvd:12;
+ uint32_t feature:8;
+#define FEATURE_INITIAL_SLI2 0x01
+ uint32_t type:8;
+#define TYPE_NATIVE_SLI2 0x01
+#endif
+
+ uint32_t mailBoxSize;
+ uint32_t mbAddrLow;
+ uint32_t mbAddrHigh;
+
+ uint32_t hgpAddrLow;
+ uint32_t hgpAddrHigh;
+
+ uint32_t pgpAddrLow;
+ uint32_t pgpAddrHigh;
+ SLI2_RDSC rdsc[MAX_SLI3_RINGS];
+} PCB_t;
+
+/* NEW_FEATURE */
+typedef struct {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint32_t rsvd0:27;
+ uint32_t discardFarp:1;
+ uint32_t IPEnable:1;
+ uint32_t nodeName:1;
+ uint32_t portName:1;
+ uint32_t filterEnable:1;
+#else /* __LITTLE_ENDIAN_BITFIELD */
+ uint32_t filterEnable:1;
+ uint32_t portName:1;
+ uint32_t nodeName:1;
+ uint32_t IPEnable:1;
+ uint32_t discardFarp:1;
+ uint32_t rsvd:27;
+#endif
+
+ uint8_t portname[8]; /* Used to be struct lpfc_name */
+ uint8_t nodename[8];
+ uint32_t rsvd1;
+ uint32_t rsvd2;
+ uint32_t rsvd3;
+ uint32_t IPAddress;
+} CONFIG_FARP_VAR;
+
+/* Structure for MB Command MBX_ASYNCEVT_ENABLE (0x33) */
+
+typedef struct {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint32_t rsvd:30;
+ uint32_t ring:2; /* Ring for ASYNC_EVENT iocb Bits 0-1*/
+#else /* __LITTLE_ENDIAN */
+ uint32_t ring:2; /* Ring for ASYNC_EVENT iocb Bits 0-1*/
+ uint32_t rsvd:30;
+#endif
+} ASYNCEVT_ENABLE_VAR;
+
+/* Union of all Mailbox Command types */
+#define MAILBOX_CMD_WSIZE 32
+#define MAILBOX_CMD_SIZE (MAILBOX_CMD_WSIZE * sizeof(uint32_t))
+/* ext_wsize times 4 bytes should not be greater than max xmit size */
+#define MAILBOX_EXT_WSIZE 512
+#define MAILBOX_EXT_SIZE (MAILBOX_EXT_WSIZE * sizeof(uint32_t))
+#define MAILBOX_HBA_EXT_OFFSET 0x100
+/* max mbox xmit size is a page size for sysfs IO operations */
+#define MAILBOX_SYSFS_MAX 4096
+
+typedef union {
+ uint32_t varWords[MAILBOX_CMD_WSIZE - 1]; /* first word is type/
+ * feature/max ring number
+ */
+ LOAD_SM_VAR varLdSM; /* cmd = 1 (LOAD_SM) */
+ READ_NV_VAR varRDnvp; /* cmd = 2 (READ_NVPARMS) */
+ WRITE_NV_VAR varWTnvp; /* cmd = 3 (WRITE_NVPARMS) */
+ BIU_DIAG_VAR varBIUdiag; /* cmd = 4 (RUN_BIU_DIAG) */
+ INIT_LINK_VAR varInitLnk; /* cmd = 5 (INIT_LINK) */
+ DOWN_LINK_VAR varDwnLnk; /* cmd = 6 (DOWN_LINK) */
+ CONFIG_LINK varCfgLnk; /* cmd = 7 (CONFIG_LINK) */
+ PART_SLIM_VAR varSlim; /* cmd = 8 (PART_SLIM) */
+ CONFIG_RING_VAR varCfgRing; /* cmd = 9 (CONFIG_RING) */
+ RESET_RING_VAR varRstRing; /* cmd = 10 (RESET_RING) */
+ READ_CONFIG_VAR varRdConfig; /* cmd = 11 (READ_CONFIG) */
+ READ_RCONF_VAR varRdRConfig; /* cmd = 12 (READ_RCONFIG) */
+ READ_SPARM_VAR varRdSparm; /* cmd = 13 (READ_SPARM(64)) */
+ READ_STATUS_VAR varRdStatus; /* cmd = 14 (READ_STATUS) */
+ READ_RPI_VAR varRdRPI; /* cmd = 15 (READ_RPI(64)) */
+ READ_XRI_VAR varRdXRI; /* cmd = 16 (READ_XRI) */
+ READ_REV_VAR varRdRev; /* cmd = 17 (READ_REV) */
+ READ_LNK_VAR varRdLnk; /* cmd = 18 (READ_LNK_STAT) */
+ REG_LOGIN_VAR varRegLogin; /* cmd = 19 (REG_LOGIN(64)) */
+ UNREG_LOGIN_VAR varUnregLogin; /* cmd = 20 (UNREG_LOGIN) */
+ CLEAR_LA_VAR varClearLA; /* cmd = 22 (CLEAR_LA) */
+ DUMP_VAR varDmp; /* Warm Start DUMP mbx cmd */
+ UNREG_D_ID_VAR varUnregDID; /* cmd = 0x23 (UNREG_D_ID) */
+ CONFIG_FARP_VAR varCfgFarp; /* cmd = 0x25 (CONFIG_FARP)
+ * NEW_FEATURE
+ */
+ struct config_hbq_var varCfgHbq;/* cmd = 0x7c (CONFIG_HBQ) */
+ struct update_cfg_var varUpdateCfg; /* cmd = 0x1B (UPDATE_CFG)*/
+ CONFIG_PORT_VAR varCfgPort; /* cmd = 0x88 (CONFIG_PORT) */
+ struct lpfc_mbx_read_top varReadTop; /* cmd = 0x95 (READ_TOPOLOGY) */
+ REG_VPI_VAR varRegVpi; /* cmd = 0x96 (REG_VPI) */
+ UNREG_VPI_VAR varUnregVpi; /* cmd = 0x97 (UNREG_VPI) */
+ ASYNCEVT_ENABLE_VAR varCfgAsyncEvent; /*cmd = x33 (CONFIG_ASYNC) */
+ struct READ_EVENT_LOG_VAR varRdEventLog; /* cmd = 0x38
+ * (READ_EVENT_LOG)
+ */
+ struct config_msi_var varCfgMSI;/* cmd = x30 (CONFIG_MSI) */
+} MAILVARIANTS;
+
+/*
+ * SLI-2 specific structures
+ */
+
+struct lpfc_hgp {
+ __le32 cmdPutInx;
+ __le32 rspGetInx;
+};
+
+struct lpfc_pgp {
+ __le32 cmdGetInx;
+ __le32 rspPutInx;
+};
+
+struct sli2_desc {
+ uint32_t unused1[16];
+ struct lpfc_hgp host[MAX_SLI3_RINGS];
+ struct lpfc_pgp port[MAX_SLI3_RINGS];
+};
+
+struct sli3_desc {
+ struct lpfc_hgp host[MAX_SLI3_RINGS];
+ uint32_t reserved[8];
+ uint32_t hbq_put[16];
+};
+
+struct sli3_pgp {
+ struct lpfc_pgp port[MAX_SLI3_RINGS];
+ uint32_t hbq_get[16];
+};
+
+union sli_var {
+ struct sli2_desc s2;
+ struct sli3_desc s3;
+ struct sli3_pgp s3_pgp;
+};
+
+typedef struct {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint16_t mbxStatus;
+ uint8_t mbxCommand;
+ uint8_t mbxReserved:6;
+ uint8_t mbxHc:1;
+ uint8_t mbxOwner:1; /* Low order bit first word */
+#else /* __LITTLE_ENDIAN_BITFIELD */
+ uint8_t mbxOwner:1; /* Low order bit first word */
+ uint8_t mbxHc:1;
+ uint8_t mbxReserved:6;
+ uint8_t mbxCommand;
+ uint16_t mbxStatus;
+#endif
+
+ MAILVARIANTS un;
+ union sli_var us;
+} MAILBOX_t;
+
+/*
+ * Begin Structure Definitions for IOCB Commands
+ */
+
+typedef struct {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint8_t statAction;
+ uint8_t statRsn;
+ uint8_t statBaExp;
+ uint8_t statLocalError;
+#else /* __LITTLE_ENDIAN_BITFIELD */
+ uint8_t statLocalError;
+ uint8_t statBaExp;
+ uint8_t statRsn;
+ uint8_t statAction;
+#endif
+ /* statRsn P/F_RJT reason codes */
+#define RJT_BAD_D_ID 0x01 /* Invalid D_ID field */
+#define RJT_BAD_S_ID 0x02 /* Invalid S_ID field */
+#define RJT_UNAVAIL_TEMP 0x03 /* N_Port unavailable temp. */
+#define RJT_UNAVAIL_PERM 0x04 /* N_Port unavailable perm. */
+#define RJT_UNSUP_CLASS 0x05 /* Class not supported */
+#define RJT_DELIM_ERR 0x06 /* Delimiter usage error */
+#define RJT_UNSUP_TYPE 0x07 /* Type not supported */
+#define RJT_BAD_CONTROL 0x08 /* Invalid link conrtol */
+#define RJT_BAD_RCTL 0x09 /* R_CTL invalid */
+#define RJT_BAD_FCTL 0x0A /* F_CTL invalid */
+#define RJT_BAD_OXID 0x0B /* OX_ID invalid */
+#define RJT_BAD_RXID 0x0C /* RX_ID invalid */
+#define RJT_BAD_SEQID 0x0D /* SEQ_ID invalid */
+#define RJT_BAD_DFCTL 0x0E /* DF_CTL invalid */
+#define RJT_BAD_SEQCNT 0x0F /* SEQ_CNT invalid */
+#define RJT_BAD_PARM 0x10 /* Param. field invalid */
+#define RJT_XCHG_ERR 0x11 /* Exchange error */
+#define RJT_PROT_ERR 0x12 /* Protocol error */
+#define RJT_BAD_LENGTH 0x13 /* Invalid Length */
+#define RJT_UNEXPECTED_ACK 0x14 /* Unexpected ACK */
+#define RJT_LOGIN_REQUIRED 0x16 /* Login required */
+#define RJT_TOO_MANY_SEQ 0x17 /* Excessive sequences */
+#define RJT_XCHG_NOT_STRT 0x18 /* Exchange not started */
+#define RJT_UNSUP_SEC_HDR 0x19 /* Security hdr not supported */
+#define RJT_UNAVAIL_PATH 0x1A /* Fabric Path not available */
+#define RJT_VENDOR_UNIQUE 0xFF /* Vendor unique error */
+
+#define IOERR_SUCCESS 0x00 /* statLocalError */
+#define IOERR_MISSING_CONTINUE 0x01
+#define IOERR_SEQUENCE_TIMEOUT 0x02
+#define IOERR_INTERNAL_ERROR 0x03
+#define IOERR_INVALID_RPI 0x04
+#define IOERR_NO_XRI 0x05
+#define IOERR_ILLEGAL_COMMAND 0x06
+#define IOERR_XCHG_DROPPED 0x07
+#define IOERR_ILLEGAL_FIELD 0x08
+#define IOERR_BAD_CONTINUE 0x09
+#define IOERR_TOO_MANY_BUFFERS 0x0A
+#define IOERR_RCV_BUFFER_WAITING 0x0B
+#define IOERR_NO_CONNECTION 0x0C
+#define IOERR_TX_DMA_FAILED 0x0D
+#define IOERR_RX_DMA_FAILED 0x0E
+#define IOERR_ILLEGAL_FRAME 0x0F
+#define IOERR_EXTRA_DATA 0x10
+#define IOERR_NO_RESOURCES 0x11
+#define IOERR_RESERVED 0x12
+#define IOERR_ILLEGAL_LENGTH 0x13
+#define IOERR_UNSUPPORTED_FEATURE 0x14
+#define IOERR_ABORT_IN_PROGRESS 0x15
+#define IOERR_ABORT_REQUESTED 0x16
+#define IOERR_RECEIVE_BUFFER_TIMEOUT 0x17
+#define IOERR_LOOP_OPEN_FAILURE 0x18
+#define IOERR_RING_RESET 0x19
+#define IOERR_LINK_DOWN 0x1A
+#define IOERR_CORRUPTED_DATA 0x1B
+#define IOERR_CORRUPTED_RPI 0x1C
+#define IOERR_OUT_OF_ORDER_DATA 0x1D
+#define IOERR_OUT_OF_ORDER_ACK 0x1E
+#define IOERR_DUP_FRAME 0x1F
+#define IOERR_LINK_CONTROL_FRAME 0x20 /* ACK_N received */
+#define IOERR_BAD_HOST_ADDRESS 0x21
+#define IOERR_RCV_HDRBUF_WAITING 0x22
+#define IOERR_MISSING_HDR_BUFFER 0x23
+#define IOERR_MSEQ_CHAIN_CORRUPTED 0x24
+#define IOERR_ABORTMULT_REQUESTED 0x25
+#define IOERR_BUFFER_SHORTAGE 0x28
+#define IOERR_DEFAULT 0x29
+#define IOERR_CNT 0x2A
+#define IOERR_SLER_FAILURE 0x46
+#define IOERR_SLER_CMD_RCV_FAILURE 0x47
+#define IOERR_SLER_REC_RJT_ERR 0x48
+#define IOERR_SLER_REC_SRR_RETRY_ERR 0x49
+#define IOERR_SLER_SRR_RJT_ERR 0x4A
+#define IOERR_SLER_RRQ_RJT_ERR 0x4C
+#define IOERR_SLER_RRQ_RETRY_ERR 0x4D
+#define IOERR_SLER_ABTS_ERR 0x4E
+#define IOERR_ELXSEC_KEY_UNWRAP_ERROR 0xF0
+#define IOERR_ELXSEC_KEY_UNWRAP_COMPARE_ERROR 0xF1
+#define IOERR_ELXSEC_CRYPTO_ERROR 0xF2
+#define IOERR_ELXSEC_CRYPTO_COMPARE_ERROR 0xF3
+#define IOERR_DRVR_MASK 0x100
+#define IOERR_SLI_DOWN 0x101 /* ulpStatus - Driver defined */
+#define IOERR_SLI_BRESET 0x102
+#define IOERR_SLI_ABORTED 0x103
+#define IOERR_PARAM_MASK 0x1ff
+} PARM_ERR;
+
+typedef union {
+ struct {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint8_t Rctl; /* R_CTL field */
+ uint8_t Type; /* TYPE field */
+ uint8_t Dfctl; /* DF_CTL field */
+ uint8_t Fctl; /* Bits 0-7 of IOCB word 5 */
+#else /* __LITTLE_ENDIAN_BITFIELD */
+ uint8_t Fctl; /* Bits 0-7 of IOCB word 5 */
+ uint8_t Dfctl; /* DF_CTL field */
+ uint8_t Type; /* TYPE field */
+ uint8_t Rctl; /* R_CTL field */
+#endif
+
+#define BC 0x02 /* Broadcast Received - Fctl */
+#define SI 0x04 /* Sequence Initiative */
+#define LA 0x08 /* Ignore Link Attention state */
+#define LS 0x80 /* Last Sequence */
+ } hcsw;
+ uint32_t reserved;
+} WORD5;
+
+/* IOCB Command template for a generic response */
+typedef struct {
+ uint32_t reserved[4];
+ PARM_ERR perr;
+} GENERIC_RSP;
+
+/* IOCB Command template for XMIT / XMIT_BCAST / RCV_SEQUENCE / XMIT_ELS */
+typedef struct {
+ struct ulp_bde xrsqbde[2];
+ uint32_t xrsqRo; /* Starting Relative Offset */
+ WORD5 w5; /* Header control/status word */
+} XR_SEQ_FIELDS;
+
+/* IOCB Command template for ELS_REQUEST */
+typedef struct {
+ struct ulp_bde elsReq;
+ struct ulp_bde elsRsp;
+
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint32_t word4Rsvd:7;
+ uint32_t fl:1;
+ uint32_t myID:24;
+ uint32_t word5Rsvd:8;
+ uint32_t remoteID:24;
+#else /* __LITTLE_ENDIAN_BITFIELD */
+ uint32_t myID:24;
+ uint32_t fl:1;
+ uint32_t word4Rsvd:7;
+ uint32_t remoteID:24;
+ uint32_t word5Rsvd:8;
+#endif
+} ELS_REQUEST;
+
+/* IOCB Command template for RCV_ELS_REQ */
+typedef struct {
+ struct ulp_bde elsReq[2];
+ uint32_t parmRo;
+
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint32_t word5Rsvd:8;
+ uint32_t remoteID:24;
+#else /* __LITTLE_ENDIAN_BITFIELD */
+ uint32_t remoteID:24;
+ uint32_t word5Rsvd:8;
+#endif
+} RCV_ELS_REQ;
+
+/* IOCB Command template for ABORT / CLOSE_XRI */
+typedef struct {
+ uint32_t rsvd[3];
+ uint32_t abortType;
+#define ABORT_TYPE_ABTX 0x00000000
+#define ABORT_TYPE_ABTS 0x00000001
+ uint32_t parm;
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint16_t abortContextTag; /* ulpContext from command to abort/close */
+ uint16_t abortIoTag; /* ulpIoTag from command to abort/close */
+#else /* __LITTLE_ENDIAN_BITFIELD */
+ uint16_t abortIoTag; /* ulpIoTag from command to abort/close */
+ uint16_t abortContextTag; /* ulpContext from command to abort/close */
+#endif
+} AC_XRI;
+
+/* IOCB Command template for ABORT_MXRI64 */
+typedef struct {
+ uint32_t rsvd[3];
+ uint32_t abortType;
+ uint32_t parm;
+ uint32_t iotag32;
+} A_MXRI64;
+
+/* IOCB Command template for GET_RPI */
+typedef struct {
+ uint32_t rsvd[4];
+ uint32_t parmRo;
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint32_t word5Rsvd:8;
+ uint32_t remoteID:24;
+#else /* __LITTLE_ENDIAN_BITFIELD */
+ uint32_t remoteID:24;
+ uint32_t word5Rsvd:8;
+#endif
+} GET_RPI;
+
+/* IOCB Command template for all FCP Initiator commands */
+typedef struct {
+ struct ulp_bde fcpi_cmnd; /* FCP_CMND payload descriptor */
+ struct ulp_bde fcpi_rsp; /* Rcv buffer */
+ uint32_t fcpi_parm;
+ uint32_t fcpi_XRdy; /* transfer ready for IWRITE */
+} FCPI_FIELDS;
+
+/* IOCB Command template for all FCP Target commands */
+typedef struct {
+ struct ulp_bde fcpt_Buffer[2]; /* FCP_CMND payload descriptor */
+ uint32_t fcpt_Offset;
+ uint32_t fcpt_Length; /* transfer ready for IWRITE */
+} FCPT_FIELDS;
+
+/* SLI-2 IOCB structure definitions */
+
+/* IOCB Command template for 64 bit XMIT / XMIT_BCAST / XMIT_ELS */
+typedef struct {
+ ULP_BDL bdl;
+ uint32_t xrsqRo; /* Starting Relative Offset */
+ WORD5 w5; /* Header control/status word */
+} XMT_SEQ_FIELDS64;
+
+/* This word is remote ports D_ID for XMIT_ELS_RSP64 */
+#define xmit_els_remoteID xrsqRo
+
+/* IOCB Command template for 64 bit RCV_SEQUENCE64 */
+typedef struct {
+ struct ulp_bde64 rcvBde;
+ uint32_t rsvd1;
+ uint32_t xrsqRo; /* Starting Relative Offset */
+ WORD5 w5; /* Header control/status word */
+} RCV_SEQ_FIELDS64;
+
+/* IOCB Command template for ELS_REQUEST64 */
+typedef struct {
+ ULP_BDL bdl;
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint32_t word4Rsvd:7;
+ uint32_t fl:1;
+ uint32_t myID:24;
+ uint32_t word5Rsvd:8;
+ uint32_t remoteID:24;
+#else /* __LITTLE_ENDIAN_BITFIELD */
+ uint32_t myID:24;
+ uint32_t fl:1;
+ uint32_t word4Rsvd:7;
+ uint32_t remoteID:24;
+ uint32_t word5Rsvd:8;
+#endif
+} ELS_REQUEST64;
+
+/* IOCB Command template for GEN_REQUEST64 */
+typedef struct {
+ ULP_BDL bdl;
+ uint32_t xrsqRo; /* Starting Relative Offset */
+ WORD5 w5; /* Header control/status word */
+} GEN_REQUEST64;
+
+/* IOCB Command template for RCV_ELS_REQ64 */
+typedef struct {
+ struct ulp_bde64 elsReq;
+ uint32_t rcvd1;
+ uint32_t parmRo;
+
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint32_t word5Rsvd:8;
+ uint32_t remoteID:24;
+#else /* __LITTLE_ENDIAN_BITFIELD */
+ uint32_t remoteID:24;
+ uint32_t word5Rsvd:8;
+#endif
+} RCV_ELS_REQ64;
+
+/* IOCB Command template for RCV_SEQ64 */
+struct rcv_seq64 {
+ struct ulp_bde64 elsReq;
+ uint32_t hbq_1;
+ uint32_t parmRo;
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint32_t rctl:8;
+ uint32_t type:8;
+ uint32_t dfctl:8;
+ uint32_t ls:1;
+ uint32_t fs:1;
+ uint32_t rsvd2:3;
+ uint32_t si:1;
+ uint32_t bc:1;
+ uint32_t rsvd3:1;
+#else /* __LITTLE_ENDIAN_BITFIELD */
+ uint32_t rsvd3:1;
+ uint32_t bc:1;
+ uint32_t si:1;
+ uint32_t rsvd2:3;
+ uint32_t fs:1;
+ uint32_t ls:1;
+ uint32_t dfctl:8;
+ uint32_t type:8;
+ uint32_t rctl:8;
+#endif
+};
+
+/* IOCB Command template for all 64 bit FCP Initiator commands */
+typedef struct {
+ ULP_BDL bdl;
+ uint32_t fcpi_parm;
+ uint32_t fcpi_XRdy; /* transfer ready for IWRITE */
+} FCPI_FIELDS64;
+
+/* IOCB Command template for all 64 bit FCP Target commands */
+typedef struct {
+ ULP_BDL bdl;
+ uint32_t fcpt_Offset;
+ uint32_t fcpt_Length; /* transfer ready for IWRITE */
+} FCPT_FIELDS64;
+
+/* IOCB Command template for Async Status iocb commands */
+typedef struct {
+ uint32_t rsvd[4];
+ uint32_t param;
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint16_t evt_code; /* High order bits word 5 */
+ uint16_t sub_ctxt_tag; /* Low order bits word 5 */
+#else /* __LITTLE_ENDIAN_BITFIELD */
+ uint16_t sub_ctxt_tag; /* High order bits word 5 */
+ uint16_t evt_code; /* Low order bits word 5 */
+#endif
+} ASYNCSTAT_FIELDS;
+#define ASYNC_TEMP_WARN 0x100
+#define ASYNC_TEMP_SAFE 0x101
+#define ASYNC_STATUS_CN 0x102
+
+/* IOCB Command template for CMD_IOCB_RCV_ELS64_CX (0xB7)
+ or CMD_IOCB_RCV_SEQ64_CX (0xB5) */
+
+struct rcv_sli3 {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint16_t ox_id;
+ uint16_t seq_cnt;
+
+ uint16_t vpi;
+ uint16_t word9Rsvd;
+#else /* __LITTLE_ENDIAN */
+ uint16_t seq_cnt;
+ uint16_t ox_id;
+
+ uint16_t word9Rsvd;
+ uint16_t vpi;
+#endif
+ uint32_t word10Rsvd;
+ uint32_t acc_len; /* accumulated length */
+ struct ulp_bde64 bde2;
+};
+
+/* Structure used for a single HBQ entry */
+struct lpfc_hbq_entry {
+ struct ulp_bde64 bde;
+ uint32_t buffer_tag;
+};
+
+/* IOCB Command template for QUE_XRI64_CX (0xB3) command */
+typedef struct {
+ struct lpfc_hbq_entry buff;
+ uint32_t rsvd;
+ uint32_t rsvd1;
+} QUE_XRI64_CX_FIELDS;
+
+struct que_xri64cx_ext_fields {
+ uint32_t iotag64_low;
+ uint32_t iotag64_high;
+ uint32_t ebde_count;
+ uint32_t rsvd;
+ struct lpfc_hbq_entry buff[5];
+};
+
+struct sli3_bg_fields {
+ uint32_t filler[6]; /* word 8-13 in IOCB */
+ uint32_t bghm; /* word 14 - BlockGuard High Water Mark */
+/* Bitfields for bgstat (BlockGuard Status - word 15 of IOCB) */
+#define BGS_BIDIR_BG_PROF_MASK 0xff000000
+#define BGS_BIDIR_BG_PROF_SHIFT 24
+#define BGS_BIDIR_ERR_COND_FLAGS_MASK 0x003f0000
+#define BGS_BIDIR_ERR_COND_SHIFT 16
+#define BGS_BG_PROFILE_MASK 0x0000ff00
+#define BGS_BG_PROFILE_SHIFT 8
+#define BGS_INVALID_PROF_MASK 0x00000020
+#define BGS_INVALID_PROF_SHIFT 5
+#define BGS_UNINIT_DIF_BLOCK_MASK 0x00000010
+#define BGS_UNINIT_DIF_BLOCK_SHIFT 4
+#define BGS_HI_WATER_MARK_PRESENT_MASK 0x00000008
+#define BGS_HI_WATER_MARK_PRESENT_SHIFT 3
+#define BGS_REFTAG_ERR_MASK 0x00000004
+#define BGS_REFTAG_ERR_SHIFT 2
+#define BGS_APPTAG_ERR_MASK 0x00000002
+#define BGS_APPTAG_ERR_SHIFT 1
+#define BGS_GUARD_ERR_MASK 0x00000001
+#define BGS_GUARD_ERR_SHIFT 0
+ uint32_t bgstat; /* word 15 - BlockGuard Status */
+};
+
+static inline uint32_t
+lpfc_bgs_get_bidir_bg_prof(uint32_t bgstat)
+{
+ return (bgstat & BGS_BIDIR_BG_PROF_MASK) >>
+ BGS_BIDIR_BG_PROF_SHIFT;
+}
+
+static inline uint32_t
+lpfc_bgs_get_bidir_err_cond(uint32_t bgstat)
+{
+ return (bgstat & BGS_BIDIR_ERR_COND_FLAGS_MASK) >>
+ BGS_BIDIR_ERR_COND_SHIFT;
+}
+
+static inline uint32_t
+lpfc_bgs_get_bg_prof(uint32_t bgstat)
+{
+ return (bgstat & BGS_BG_PROFILE_MASK) >>
+ BGS_BG_PROFILE_SHIFT;
+}
+
+static inline uint32_t
+lpfc_bgs_get_invalid_prof(uint32_t bgstat)
+{
+ return (bgstat & BGS_INVALID_PROF_MASK) >>
+ BGS_INVALID_PROF_SHIFT;
+}
+
+static inline uint32_t
+lpfc_bgs_get_uninit_dif_block(uint32_t bgstat)
+{
+ return (bgstat & BGS_UNINIT_DIF_BLOCK_MASK) >>
+ BGS_UNINIT_DIF_BLOCK_SHIFT;
+}
+
+static inline uint32_t
+lpfc_bgs_get_hi_water_mark_present(uint32_t bgstat)
+{
+ return (bgstat & BGS_HI_WATER_MARK_PRESENT_MASK) >>
+ BGS_HI_WATER_MARK_PRESENT_SHIFT;
+}
+
+static inline uint32_t
+lpfc_bgs_get_reftag_err(uint32_t bgstat)
+{
+ return (bgstat & BGS_REFTAG_ERR_MASK) >>
+ BGS_REFTAG_ERR_SHIFT;
+}
+
+static inline uint32_t
+lpfc_bgs_get_apptag_err(uint32_t bgstat)
+{
+ return (bgstat & BGS_APPTAG_ERR_MASK) >>
+ BGS_APPTAG_ERR_SHIFT;
+}
+
+static inline uint32_t
+lpfc_bgs_get_guard_err(uint32_t bgstat)
+{
+ return (bgstat & BGS_GUARD_ERR_MASK) >>
+ BGS_GUARD_ERR_SHIFT;
+}
+
+#define LPFC_EXT_DATA_BDE_COUNT 3
+struct fcp_irw_ext {
+ uint32_t io_tag64_low;
+ uint32_t io_tag64_high;
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint8_t reserved1;
+ uint8_t reserved2;
+ uint8_t reserved3;
+ uint8_t ebde_count;
+#else /* __LITTLE_ENDIAN */
+ uint8_t ebde_count;
+ uint8_t reserved3;
+ uint8_t reserved2;
+ uint8_t reserved1;
+#endif
+ uint32_t reserved4;
+ struct ulp_bde64 rbde; /* response bde */
+ struct ulp_bde64 dbde[LPFC_EXT_DATA_BDE_COUNT]; /* data BDE or BPL */
+ uint8_t icd[32]; /* immediate command data (32 bytes) */
+};
+
+typedef struct _IOCB { /* IOCB structure */
+ union {
+ GENERIC_RSP grsp; /* Generic response */
+ XR_SEQ_FIELDS xrseq; /* XMIT / BCAST / RCV_SEQUENCE cmd */
+ struct ulp_bde cont[3]; /* up to 3 continuation bdes */
+ RCV_ELS_REQ rcvels; /* RCV_ELS_REQ template */
+ AC_XRI acxri; /* ABORT / CLOSE_XRI template */
+ A_MXRI64 amxri; /* abort multiple xri command overlay */
+ GET_RPI getrpi; /* GET_RPI template */
+ FCPI_FIELDS fcpi; /* FCP Initiator template */
+ FCPT_FIELDS fcpt; /* FCP target template */
+
+ /* SLI-2 structures */
+
+ struct ulp_bde64 cont64[2]; /* up to 2 64 bit continuation
+ * bde_64s */
+ ELS_REQUEST64 elsreq64; /* ELS_REQUEST template */
+ GEN_REQUEST64 genreq64; /* GEN_REQUEST template */
+ RCV_ELS_REQ64 rcvels64; /* RCV_ELS_REQ template */
+ XMT_SEQ_FIELDS64 xseq64; /* XMIT / BCAST cmd */
+ FCPI_FIELDS64 fcpi64; /* FCP 64 bit Initiator template */
+ FCPT_FIELDS64 fcpt64; /* FCP 64 bit target template */
+ ASYNCSTAT_FIELDS asyncstat; /* async_status iocb */
+ QUE_XRI64_CX_FIELDS quexri64cx; /* que_xri64_cx fields */
+ struct rcv_seq64 rcvseq64; /* RCV_SEQ64 and RCV_CONT64 */
+ struct sli4_bls_rsp bls_rsp; /* UNSOL ABTS BLS_RSP params */
+ uint32_t ulpWord[IOCB_WORD_SZ - 2]; /* generic 6 'words' */
+ } un;
+ union {
+ struct {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint16_t ulpContext; /* High order bits word 6 */
+ uint16_t ulpIoTag; /* Low order bits word 6 */
+#else /* __LITTLE_ENDIAN_BITFIELD */
+ uint16_t ulpIoTag; /* Low order bits word 6 */
+ uint16_t ulpContext; /* High order bits word 6 */
+#endif
+ } t1;
+ struct {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint16_t ulpContext; /* High order bits word 6 */
+ uint16_t ulpIoTag1:2; /* Low order bits word 6 */
+ uint16_t ulpIoTag0:14; /* Low order bits word 6 */
+#else /* __LITTLE_ENDIAN_BITFIELD */
+ uint16_t ulpIoTag0:14; /* Low order bits word 6 */
+ uint16_t ulpIoTag1:2; /* Low order bits word 6 */
+ uint16_t ulpContext; /* High order bits word 6 */
+#endif
+ } t2;
+ } un1;
+#define ulpContext un1.t1.ulpContext
+#define ulpIoTag un1.t1.ulpIoTag
+#define ulpIoTag0 un1.t2.ulpIoTag0
+
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint32_t ulpTimeout:8;
+ uint32_t ulpXS:1;
+ uint32_t ulpFCP2Rcvy:1;
+ uint32_t ulpPU:2;
+ uint32_t ulpIr:1;
+ uint32_t ulpClass:3;
+ uint32_t ulpCommand:8;
+ uint32_t ulpStatus:4;
+ uint32_t ulpBdeCount:2;
+ uint32_t ulpLe:1;
+ uint32_t ulpOwner:1; /* Low order bit word 7 */
+#else /* __LITTLE_ENDIAN_BITFIELD */
+ uint32_t ulpOwner:1; /* Low order bit word 7 */
+ uint32_t ulpLe:1;
+ uint32_t ulpBdeCount:2;
+ uint32_t ulpStatus:4;
+ uint32_t ulpCommand:8;
+ uint32_t ulpClass:3;
+ uint32_t ulpIr:1;
+ uint32_t ulpPU:2;
+ uint32_t ulpFCP2Rcvy:1;
+ uint32_t ulpXS:1;
+ uint32_t ulpTimeout:8;
+#endif
+
+ union {
+ struct rcv_sli3 rcvsli3; /* words 8 - 15 */
+
+ /* words 8-31 used for que_xri_cx iocb */
+ struct que_xri64cx_ext_fields que_xri64cx_ext_words;
+ struct fcp_irw_ext fcp_ext;
+ uint32_t sli3Words[24]; /* 96 extra bytes for SLI-3 */
+
+ /* words 8-15 for BlockGuard */
+ struct sli3_bg_fields sli3_bg;
+ } unsli3;
+
+#define ulpCt_h ulpXS
+#define ulpCt_l ulpFCP2Rcvy
+
+#define IOCB_FCP 1 /* IOCB is used for FCP ELS cmds-ulpRsvByte */
+#define IOCB_IP 2 /* IOCB is used for IP ELS cmds */
+#define PARM_UNUSED 0 /* PU field (Word 4) not used */
+#define PARM_REL_OFF 1 /* PU field (Word 4) = R. O. */
+#define PARM_READ_CHECK 2 /* PU field (Word 4) = Data Transfer Length */
+#define PARM_NPIV_DID 3
+#define CLASS1 0 /* Class 1 */
+#define CLASS2 1 /* Class 2 */
+#define CLASS3 2 /* Class 3 */
+#define CLASS_FCP_INTERMIX 7 /* FCP Data->Cls 1, all else->Cls 2 */
+
+#define IOSTAT_SUCCESS 0x0 /* ulpStatus - HBA defined */
+#define IOSTAT_FCP_RSP_ERROR 0x1
+#define IOSTAT_REMOTE_STOP 0x2
+#define IOSTAT_LOCAL_REJECT 0x3
+#define IOSTAT_NPORT_RJT 0x4
+#define IOSTAT_FABRIC_RJT 0x5
+#define IOSTAT_NPORT_BSY 0x6
+#define IOSTAT_FABRIC_BSY 0x7
+#define IOSTAT_INTERMED_RSP 0x8
+#define IOSTAT_LS_RJT 0x9
+#define IOSTAT_BA_RJT 0xA
+#define IOSTAT_RSVD1 0xB
+#define IOSTAT_RSVD2 0xC
+#define IOSTAT_RSVD3 0xD
+#define IOSTAT_RSVD4 0xE
+#define IOSTAT_NEED_BUFFER 0xF
+#define IOSTAT_DRIVER_REJECT 0x10 /* ulpStatus - Driver defined */
+#define IOSTAT_DEFAULT 0xF /* Same as rsvd5 for now */
+#define IOSTAT_CNT 0x11
+
+} IOCB_t;
+
+
+#define SLI1_SLIM_SIZE (4 * 1024)
+
+/* Up to 498 IOCBs will fit into 16k
+ * 256 (MAILBOX_t) + 140 (PCB_t) + ( 32 (IOCB_t) * 498 ) = < 16384
+ */
+#define SLI2_SLIM_SIZE (64 * 1024)
+
+/* Maximum IOCBs that will fit in SLI2 slim */
+#define MAX_SLI2_IOCB 498
+#define MAX_SLIM_IOCB_SIZE (SLI2_SLIM_SIZE - \
+ (sizeof(MAILBOX_t) + sizeof(PCB_t) + \
+ sizeof(uint32_t) * MAILBOX_EXT_WSIZE))
+
+/* HBQ entries are 4 words each = 4k */
+#define LPFC_TOTAL_HBQ_SIZE (sizeof(struct lpfc_hbq_entry) * \
+ lpfc_sli_hbq_count())
+
+struct lpfc_sli2_slim {
+ MAILBOX_t mbx;
+ uint32_t mbx_ext_words[MAILBOX_EXT_WSIZE];
+ PCB_t pcb;
+ IOCB_t IOCBs[MAX_SLIM_IOCB_SIZE];
+};
+
+/*
+ * This function checks PCI device to allow special handling for LC HBAs.
+ *
+ * Parameters:
+ * device : struct pci_dev 's device field
+ *
+ * return 1 => TRUE
+ * 0 => FALSE
+ */
+static inline int
+lpfc_is_LC_HBA(unsigned short device)
+{
+ if ((device == PCI_DEVICE_ID_TFLY) ||
+ (device == PCI_DEVICE_ID_PFLY) ||
+ (device == PCI_DEVICE_ID_LP101) ||
+ (device == PCI_DEVICE_ID_BMID) ||
+ (device == PCI_DEVICE_ID_BSMB) ||
+ (device == PCI_DEVICE_ID_ZMID) ||
+ (device == PCI_DEVICE_ID_ZSMB) ||
+ (device == PCI_DEVICE_ID_SAT_MID) ||
+ (device == PCI_DEVICE_ID_SAT_SMB) ||
+ (device == PCI_DEVICE_ID_RFLY))
+ return 1;
+ else
+ return 0;
+}
+
+/*
+ * Determine if an IOCB failed because of a link event or firmware reset.
+ */
+
+static inline int
+lpfc_error_lost_link(IOCB_t *iocbp)
+{
+ return (iocbp->ulpStatus == IOSTAT_LOCAL_REJECT &&
+ (iocbp->un.ulpWord[4] == IOERR_SLI_ABORTED ||
+ iocbp->un.ulpWord[4] == IOERR_LINK_DOWN ||
+ iocbp->un.ulpWord[4] == IOERR_SLI_DOWN));
+}
+
+#define MENLO_TRANSPORT_TYPE 0xfe
+#define MENLO_CONTEXT 0
+#define MENLO_PU 3
+#define MENLO_TIMEOUT 30
+#define SETVAR_MLOMNT 0x103107
+#define SETVAR_MLORST 0x103007
+
+#define BPL_ALIGN_SZ 8 /* 8 byte alignment for bpl and mbufs */
diff --git a/drivers/scsi/lpfc/lpfc_hw4.h b/drivers/scsi/lpfc/lpfc_hw4.h
new file mode 100644
index 000000000..1813c4594
--- /dev/null
+++ b/drivers/scsi/lpfc/lpfc_hw4.h
@@ -0,0 +1,3694 @@
+/*******************************************************************
+ * This file is part of the Emulex Linux Device Driver for *
+ * Fibre Channel Host Bus Adapters. *
+ * Copyright (C) 2009-2015 Emulex. All rights reserved. *
+ * EMULEX and SLI are trademarks of Emulex. *
+ * www.emulex.com *
+ * *
+ * This program is free software; you can redistribute it and/or *
+ * modify it under the terms of version 2 of the GNU General *
+ * Public License as published by the Free Software Foundation. *
+ * This program is distributed in the hope that it will be useful. *
+ * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND *
+ * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY, *
+ * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE *
+ * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD *
+ * TO BE LEGALLY INVALID. See the GNU General Public License for *
+ * more details, a copy of which can be found in the file COPYING *
+ * included with this package. *
+ *******************************************************************/
+
+/* Macros to deal with bit fields. Each bit field must have 3 #defines
+ * associated with it (_SHIFT, _MASK, and _WORD).
+ * EG. For a bit field that is in the 7th bit of the "field4" field of a
+ * structure and is 2 bits in size the following #defines must exist:
+ * struct temp {
+ * uint32_t field1;
+ * uint32_t field2;
+ * uint32_t field3;
+ * uint32_t field4;
+ * #define example_bit_field_SHIFT 7
+ * #define example_bit_field_MASK 0x03
+ * #define example_bit_field_WORD field4
+ * uint32_t field5;
+ * };
+ * Then the macros below may be used to get or set the value of that field.
+ * EG. To get the value of the bit field from the above example:
+ * struct temp t1;
+ * value = bf_get(example_bit_field, &t1);
+ * And then to set that bit field:
+ * bf_set(example_bit_field, &t1, 2);
+ * Or clear that bit field:
+ * bf_set(example_bit_field, &t1, 0);
+ */
+#define bf_get_be32(name, ptr) \
+ ((be32_to_cpu((ptr)->name##_WORD) >> name##_SHIFT) & name##_MASK)
+#define bf_get_le32(name, ptr) \
+ ((le32_to_cpu((ptr)->name##_WORD) >> name##_SHIFT) & name##_MASK)
+#define bf_get(name, ptr) \
+ (((ptr)->name##_WORD >> name##_SHIFT) & name##_MASK)
+#define bf_set_le32(name, ptr, value) \
+ ((ptr)->name##_WORD = cpu_to_le32(((((value) & \
+ name##_MASK) << name##_SHIFT) | (le32_to_cpu((ptr)->name##_WORD) & \
+ ~(name##_MASK << name##_SHIFT)))))
+#define bf_set(name, ptr, value) \
+ ((ptr)->name##_WORD = ((((value) & name##_MASK) << name##_SHIFT) | \
+ ((ptr)->name##_WORD & ~(name##_MASK << name##_SHIFT))))
+
+struct dma_address {
+ uint32_t addr_lo;
+ uint32_t addr_hi;
+};
+
+struct lpfc_sli_intf {
+ uint32_t word0;
+#define lpfc_sli_intf_valid_SHIFT 29
+#define lpfc_sli_intf_valid_MASK 0x00000007
+#define lpfc_sli_intf_valid_WORD word0
+#define LPFC_SLI_INTF_VALID 6
+#define lpfc_sli_intf_sli_hint2_SHIFT 24
+#define lpfc_sli_intf_sli_hint2_MASK 0x0000001F
+#define lpfc_sli_intf_sli_hint2_WORD word0
+#define LPFC_SLI_INTF_SLI_HINT2_NONE 0
+#define lpfc_sli_intf_sli_hint1_SHIFT 16
+#define lpfc_sli_intf_sli_hint1_MASK 0x000000FF
+#define lpfc_sli_intf_sli_hint1_WORD word0
+#define LPFC_SLI_INTF_SLI_HINT1_NONE 0
+#define LPFC_SLI_INTF_SLI_HINT1_1 1
+#define LPFC_SLI_INTF_SLI_HINT1_2 2
+#define lpfc_sli_intf_if_type_SHIFT 12
+#define lpfc_sli_intf_if_type_MASK 0x0000000F
+#define lpfc_sli_intf_if_type_WORD word0
+#define LPFC_SLI_INTF_IF_TYPE_0 0
+#define LPFC_SLI_INTF_IF_TYPE_1 1
+#define LPFC_SLI_INTF_IF_TYPE_2 2
+#define lpfc_sli_intf_sli_family_SHIFT 8
+#define lpfc_sli_intf_sli_family_MASK 0x0000000F
+#define lpfc_sli_intf_sli_family_WORD word0
+#define LPFC_SLI_INTF_FAMILY_BE2 0x0
+#define LPFC_SLI_INTF_FAMILY_BE3 0x1
+#define LPFC_SLI_INTF_FAMILY_LNCR_A0 0xa
+#define LPFC_SLI_INTF_FAMILY_LNCR_B0 0xb
+#define lpfc_sli_intf_slirev_SHIFT 4
+#define lpfc_sli_intf_slirev_MASK 0x0000000F
+#define lpfc_sli_intf_slirev_WORD word0
+#define LPFC_SLI_INTF_REV_SLI3 3
+#define LPFC_SLI_INTF_REV_SLI4 4
+#define lpfc_sli_intf_func_type_SHIFT 0
+#define lpfc_sli_intf_func_type_MASK 0x00000001
+#define lpfc_sli_intf_func_type_WORD word0
+#define LPFC_SLI_INTF_IF_TYPE_PHYS 0
+#define LPFC_SLI_INTF_IF_TYPE_VIRT 1
+};
+
+#define LPFC_SLI4_MBX_EMBED true
+#define LPFC_SLI4_MBX_NEMBED false
+
+#define LPFC_SLI4_MB_WORD_COUNT 64
+#define LPFC_MAX_MQ_PAGE 8
+#define LPFC_MAX_WQ_PAGE_V0 4
+#define LPFC_MAX_WQ_PAGE 8
+#define LPFC_MAX_CQ_PAGE 4
+#define LPFC_MAX_EQ_PAGE 8
+
+#define LPFC_VIR_FUNC_MAX 32 /* Maximum number of virtual functions */
+#define LPFC_PCI_FUNC_MAX 5 /* Maximum number of PCI functions */
+#define LPFC_VFR_PAGE_SIZE 0x1000 /* 4KB BAR2 per-VF register page size */
+
+/* Define SLI4 Alignment requirements. */
+#define LPFC_ALIGN_16_BYTE 16
+#define LPFC_ALIGN_64_BYTE 64
+
+/* Define SLI4 specific definitions. */
+#define LPFC_MQ_CQE_BYTE_OFFSET 256
+#define LPFC_MBX_CMD_HDR_LENGTH 16
+#define LPFC_MBX_ERROR_RANGE 0x4000
+#define LPFC_BMBX_BIT1_ADDR_HI 0x2
+#define LPFC_BMBX_BIT1_ADDR_LO 0
+#define LPFC_RPI_HDR_COUNT 64
+#define LPFC_HDR_TEMPLATE_SIZE 4096
+#define LPFC_RPI_ALLOC_ERROR 0xFFFF
+#define LPFC_FCF_RECORD_WD_CNT 132
+#define LPFC_ENTIRE_FCF_DATABASE 0
+#define LPFC_DFLT_FCF_INDEX 0
+
+/* Virtual function numbers */
+#define LPFC_VF0 0
+#define LPFC_VF1 1
+#define LPFC_VF2 2
+#define LPFC_VF3 3
+#define LPFC_VF4 4
+#define LPFC_VF5 5
+#define LPFC_VF6 6
+#define LPFC_VF7 7
+#define LPFC_VF8 8
+#define LPFC_VF9 9
+#define LPFC_VF10 10
+#define LPFC_VF11 11
+#define LPFC_VF12 12
+#define LPFC_VF13 13
+#define LPFC_VF14 14
+#define LPFC_VF15 15
+#define LPFC_VF16 16
+#define LPFC_VF17 17
+#define LPFC_VF18 18
+#define LPFC_VF19 19
+#define LPFC_VF20 20
+#define LPFC_VF21 21
+#define LPFC_VF22 22
+#define LPFC_VF23 23
+#define LPFC_VF24 24
+#define LPFC_VF25 25
+#define LPFC_VF26 26
+#define LPFC_VF27 27
+#define LPFC_VF28 28
+#define LPFC_VF29 29
+#define LPFC_VF30 30
+#define LPFC_VF31 31
+
+/* PCI function numbers */
+#define LPFC_PCI_FUNC0 0
+#define LPFC_PCI_FUNC1 1
+#define LPFC_PCI_FUNC2 2
+#define LPFC_PCI_FUNC3 3
+#define LPFC_PCI_FUNC4 4
+
+/* SLI4 interface type-2 PDEV_CTL register */
+#define LPFC_CTL_PDEV_CTL_OFFSET 0x414
+#define LPFC_CTL_PDEV_CTL_DRST 0x00000001
+#define LPFC_CTL_PDEV_CTL_FRST 0x00000002
+#define LPFC_CTL_PDEV_CTL_DD 0x00000004
+#define LPFC_CTL_PDEV_CTL_LC 0x00000008
+#define LPFC_CTL_PDEV_CTL_FRL_ALL 0x00
+#define LPFC_CTL_PDEV_CTL_FRL_FC_FCOE 0x10
+#define LPFC_CTL_PDEV_CTL_FRL_NIC 0x20
+
+#define LPFC_FW_DUMP_REQUEST (LPFC_CTL_PDEV_CTL_DD | LPFC_CTL_PDEV_CTL_FRST)
+
+/* Active interrupt test count */
+#define LPFC_ACT_INTR_CNT 4
+
+/* Algrithmns for scheduling FCP commands to WQs */
+#define LPFC_FCP_SCHED_ROUND_ROBIN 0
+#define LPFC_FCP_SCHED_BY_CPU 1
+
+/* Delay Multiplier constant */
+#define LPFC_DMULT_CONST 651042
+
+/* Configuration of Interrupts / sec for entire HBA port */
+#define LPFC_MIN_IMAX 5000
+#define LPFC_MAX_IMAX 5000000
+#define LPFC_DEF_IMAX 50000
+
+#define LPFC_MIN_CPU_MAP 0
+#define LPFC_MAX_CPU_MAP 2
+#define LPFC_HBA_CPU_MAP 1
+#define LPFC_DRIVER_CPU_MAP 2 /* Default */
+
+/* PORT_CAPABILITIES constants. */
+#define LPFC_MAX_SUPPORTED_PAGES 8
+
+struct ulp_bde64 {
+ union ULP_BDE_TUS {
+ uint32_t w;
+ struct {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint32_t bdeFlags:8; /* BDE Flags 0 IS A SUPPORTED
+ VALUE !! */
+ uint32_t bdeSize:24; /* Size of buffer (in bytes) */
+#else /* __LITTLE_ENDIAN_BITFIELD */
+ uint32_t bdeSize:24; /* Size of buffer (in bytes) */
+ uint32_t bdeFlags:8; /* BDE Flags 0 IS A SUPPORTED
+ VALUE !! */
+#endif
+#define BUFF_TYPE_BDE_64 0x00 /* BDE (Host_resident) */
+#define BUFF_TYPE_BDE_IMMED 0x01 /* Immediate Data BDE */
+#define BUFF_TYPE_BDE_64P 0x02 /* BDE (Port-resident) */
+#define BUFF_TYPE_BDE_64I 0x08 /* Input BDE (Host-resident) */
+#define BUFF_TYPE_BDE_64IP 0x0A /* Input BDE (Port-resident) */
+#define BUFF_TYPE_BLP_64 0x40 /* BLP (Host-resident) */
+#define BUFF_TYPE_BLP_64P 0x42 /* BLP (Port-resident) */
+ } f;
+ } tus;
+ uint32_t addrLow;
+ uint32_t addrHigh;
+};
+
+/* Maximun size of immediate data that can fit into a 128 byte WQE */
+#define LPFC_MAX_BDE_IMM_SIZE 64
+
+struct lpfc_sli4_flags {
+ uint32_t word0;
+#define lpfc_idx_rsrc_rdy_SHIFT 0
+#define lpfc_idx_rsrc_rdy_MASK 0x00000001
+#define lpfc_idx_rsrc_rdy_WORD word0
+#define LPFC_IDX_RSRC_RDY 1
+#define lpfc_rpi_rsrc_rdy_SHIFT 1
+#define lpfc_rpi_rsrc_rdy_MASK 0x00000001
+#define lpfc_rpi_rsrc_rdy_WORD word0
+#define LPFC_RPI_RSRC_RDY 1
+#define lpfc_vpi_rsrc_rdy_SHIFT 2
+#define lpfc_vpi_rsrc_rdy_MASK 0x00000001
+#define lpfc_vpi_rsrc_rdy_WORD word0
+#define LPFC_VPI_RSRC_RDY 1
+#define lpfc_vfi_rsrc_rdy_SHIFT 3
+#define lpfc_vfi_rsrc_rdy_MASK 0x00000001
+#define lpfc_vfi_rsrc_rdy_WORD word0
+#define LPFC_VFI_RSRC_RDY 1
+};
+
+struct sli4_bls_rsp {
+ uint32_t word0_rsvd; /* Word0 must be reserved */
+ uint32_t word1;
+#define lpfc_abts_orig_SHIFT 0
+#define lpfc_abts_orig_MASK 0x00000001
+#define lpfc_abts_orig_WORD word1
+#define LPFC_ABTS_UNSOL_RSP 1
+#define LPFC_ABTS_UNSOL_INT 0
+ uint32_t word2;
+#define lpfc_abts_rxid_SHIFT 0
+#define lpfc_abts_rxid_MASK 0x0000FFFF
+#define lpfc_abts_rxid_WORD word2
+#define lpfc_abts_oxid_SHIFT 16
+#define lpfc_abts_oxid_MASK 0x0000FFFF
+#define lpfc_abts_oxid_WORD word2
+ uint32_t word3;
+#define lpfc_vndr_code_SHIFT 0
+#define lpfc_vndr_code_MASK 0x000000FF
+#define lpfc_vndr_code_WORD word3
+#define lpfc_rsn_expln_SHIFT 8
+#define lpfc_rsn_expln_MASK 0x000000FF
+#define lpfc_rsn_expln_WORD word3
+#define lpfc_rsn_code_SHIFT 16
+#define lpfc_rsn_code_MASK 0x000000FF
+#define lpfc_rsn_code_WORD word3
+
+ uint32_t word4;
+ uint32_t word5_rsvd; /* Word5 must be reserved */
+};
+
+/* event queue entry structure */
+struct lpfc_eqe {
+ uint32_t word0;
+#define lpfc_eqe_resource_id_SHIFT 16
+#define lpfc_eqe_resource_id_MASK 0x000000FF
+#define lpfc_eqe_resource_id_WORD word0
+#define lpfc_eqe_minor_code_SHIFT 4
+#define lpfc_eqe_minor_code_MASK 0x00000FFF
+#define lpfc_eqe_minor_code_WORD word0
+#define lpfc_eqe_major_code_SHIFT 1
+#define lpfc_eqe_major_code_MASK 0x00000007
+#define lpfc_eqe_major_code_WORD word0
+#define lpfc_eqe_valid_SHIFT 0
+#define lpfc_eqe_valid_MASK 0x00000001
+#define lpfc_eqe_valid_WORD word0
+};
+
+/* completion queue entry structure (common fields for all cqe types) */
+struct lpfc_cqe {
+ uint32_t reserved0;
+ uint32_t reserved1;
+ uint32_t reserved2;
+ uint32_t word3;
+#define lpfc_cqe_valid_SHIFT 31
+#define lpfc_cqe_valid_MASK 0x00000001
+#define lpfc_cqe_valid_WORD word3
+#define lpfc_cqe_code_SHIFT 16
+#define lpfc_cqe_code_MASK 0x000000FF
+#define lpfc_cqe_code_WORD word3
+};
+
+/* Completion Queue Entry Status Codes */
+#define CQE_STATUS_SUCCESS 0x0
+#define CQE_STATUS_FCP_RSP_FAILURE 0x1
+#define CQE_STATUS_REMOTE_STOP 0x2
+#define CQE_STATUS_LOCAL_REJECT 0x3
+#define CQE_STATUS_NPORT_RJT 0x4
+#define CQE_STATUS_FABRIC_RJT 0x5
+#define CQE_STATUS_NPORT_BSY 0x6
+#define CQE_STATUS_FABRIC_BSY 0x7
+#define CQE_STATUS_INTERMED_RSP 0x8
+#define CQE_STATUS_LS_RJT 0x9
+#define CQE_STATUS_CMD_REJECT 0xb
+#define CQE_STATUS_FCP_TGT_LENCHECK 0xc
+#define CQE_STATUS_NEED_BUFF_ENTRY 0xf
+#define CQE_STATUS_DI_ERROR 0x16
+
+/* Used when mapping CQE status to IOCB */
+#define LPFC_IOCB_STATUS_MASK 0xf
+
+/* Status returned by hardware (valid only if status = CQE_STATUS_SUCCESS). */
+#define CQE_HW_STATUS_NO_ERR 0x0
+#define CQE_HW_STATUS_UNDERRUN 0x1
+#define CQE_HW_STATUS_OVERRUN 0x2
+
+/* Completion Queue Entry Codes */
+#define CQE_CODE_COMPL_WQE 0x1
+#define CQE_CODE_RELEASE_WQE 0x2
+#define CQE_CODE_RECEIVE 0x4
+#define CQE_CODE_XRI_ABORTED 0x5
+#define CQE_CODE_RECEIVE_V1 0x9
+
+/*
+ * Define mask value for xri_aborted and wcqe completed CQE extended status.
+ * Currently, extended status is limited to 9 bits (0x0 -> 0x103) .
+ */
+#define WCQE_PARAM_MASK 0x1FF
+
+/* completion queue entry for wqe completions */
+struct lpfc_wcqe_complete {
+ uint32_t word0;
+#define lpfc_wcqe_c_request_tag_SHIFT 16
+#define lpfc_wcqe_c_request_tag_MASK 0x0000FFFF
+#define lpfc_wcqe_c_request_tag_WORD word0
+#define lpfc_wcqe_c_status_SHIFT 8
+#define lpfc_wcqe_c_status_MASK 0x000000FF
+#define lpfc_wcqe_c_status_WORD word0
+#define lpfc_wcqe_c_hw_status_SHIFT 0
+#define lpfc_wcqe_c_hw_status_MASK 0x000000FF
+#define lpfc_wcqe_c_hw_status_WORD word0
+ uint32_t total_data_placed;
+ uint32_t parameter;
+#define lpfc_wcqe_c_bg_edir_SHIFT 5
+#define lpfc_wcqe_c_bg_edir_MASK 0x00000001
+#define lpfc_wcqe_c_bg_edir_WORD parameter
+#define lpfc_wcqe_c_bg_tdpv_SHIFT 3
+#define lpfc_wcqe_c_bg_tdpv_MASK 0x00000001
+#define lpfc_wcqe_c_bg_tdpv_WORD parameter
+#define lpfc_wcqe_c_bg_re_SHIFT 2
+#define lpfc_wcqe_c_bg_re_MASK 0x00000001
+#define lpfc_wcqe_c_bg_re_WORD parameter
+#define lpfc_wcqe_c_bg_ae_SHIFT 1
+#define lpfc_wcqe_c_bg_ae_MASK 0x00000001
+#define lpfc_wcqe_c_bg_ae_WORD parameter
+#define lpfc_wcqe_c_bg_ge_SHIFT 0
+#define lpfc_wcqe_c_bg_ge_MASK 0x00000001
+#define lpfc_wcqe_c_bg_ge_WORD parameter
+ uint32_t word3;
+#define lpfc_wcqe_c_valid_SHIFT lpfc_cqe_valid_SHIFT
+#define lpfc_wcqe_c_valid_MASK lpfc_cqe_valid_MASK
+#define lpfc_wcqe_c_valid_WORD lpfc_cqe_valid_WORD
+#define lpfc_wcqe_c_xb_SHIFT 28
+#define lpfc_wcqe_c_xb_MASK 0x00000001
+#define lpfc_wcqe_c_xb_WORD word3
+#define lpfc_wcqe_c_pv_SHIFT 27
+#define lpfc_wcqe_c_pv_MASK 0x00000001
+#define lpfc_wcqe_c_pv_WORD word3
+#define lpfc_wcqe_c_priority_SHIFT 24
+#define lpfc_wcqe_c_priority_MASK 0x00000007
+#define lpfc_wcqe_c_priority_WORD word3
+#define lpfc_wcqe_c_code_SHIFT lpfc_cqe_code_SHIFT
+#define lpfc_wcqe_c_code_MASK lpfc_cqe_code_MASK
+#define lpfc_wcqe_c_code_WORD lpfc_cqe_code_WORD
+};
+
+/* completion queue entry for wqe release */
+struct lpfc_wcqe_release {
+ uint32_t reserved0;
+ uint32_t reserved1;
+ uint32_t word2;
+#define lpfc_wcqe_r_wq_id_SHIFT 16
+#define lpfc_wcqe_r_wq_id_MASK 0x0000FFFF
+#define lpfc_wcqe_r_wq_id_WORD word2
+#define lpfc_wcqe_r_wqe_index_SHIFT 0
+#define lpfc_wcqe_r_wqe_index_MASK 0x0000FFFF
+#define lpfc_wcqe_r_wqe_index_WORD word2
+ uint32_t word3;
+#define lpfc_wcqe_r_valid_SHIFT lpfc_cqe_valid_SHIFT
+#define lpfc_wcqe_r_valid_MASK lpfc_cqe_valid_MASK
+#define lpfc_wcqe_r_valid_WORD lpfc_cqe_valid_WORD
+#define lpfc_wcqe_r_code_SHIFT lpfc_cqe_code_SHIFT
+#define lpfc_wcqe_r_code_MASK lpfc_cqe_code_MASK
+#define lpfc_wcqe_r_code_WORD lpfc_cqe_code_WORD
+};
+
+struct sli4_wcqe_xri_aborted {
+ uint32_t word0;
+#define lpfc_wcqe_xa_status_SHIFT 8
+#define lpfc_wcqe_xa_status_MASK 0x000000FF
+#define lpfc_wcqe_xa_status_WORD word0
+ uint32_t parameter;
+ uint32_t word2;
+#define lpfc_wcqe_xa_remote_xid_SHIFT 16
+#define lpfc_wcqe_xa_remote_xid_MASK 0x0000FFFF
+#define lpfc_wcqe_xa_remote_xid_WORD word2
+#define lpfc_wcqe_xa_xri_SHIFT 0
+#define lpfc_wcqe_xa_xri_MASK 0x0000FFFF
+#define lpfc_wcqe_xa_xri_WORD word2
+ uint32_t word3;
+#define lpfc_wcqe_xa_valid_SHIFT lpfc_cqe_valid_SHIFT
+#define lpfc_wcqe_xa_valid_MASK lpfc_cqe_valid_MASK
+#define lpfc_wcqe_xa_valid_WORD lpfc_cqe_valid_WORD
+#define lpfc_wcqe_xa_ia_SHIFT 30
+#define lpfc_wcqe_xa_ia_MASK 0x00000001
+#define lpfc_wcqe_xa_ia_WORD word3
+#define CQE_XRI_ABORTED_IA_REMOTE 0
+#define CQE_XRI_ABORTED_IA_LOCAL 1
+#define lpfc_wcqe_xa_br_SHIFT 29
+#define lpfc_wcqe_xa_br_MASK 0x00000001
+#define lpfc_wcqe_xa_br_WORD word3
+#define CQE_XRI_ABORTED_BR_BA_ACC 0
+#define CQE_XRI_ABORTED_BR_BA_RJT 1
+#define lpfc_wcqe_xa_eo_SHIFT 28
+#define lpfc_wcqe_xa_eo_MASK 0x00000001
+#define lpfc_wcqe_xa_eo_WORD word3
+#define CQE_XRI_ABORTED_EO_REMOTE 0
+#define CQE_XRI_ABORTED_EO_LOCAL 1
+#define lpfc_wcqe_xa_code_SHIFT lpfc_cqe_code_SHIFT
+#define lpfc_wcqe_xa_code_MASK lpfc_cqe_code_MASK
+#define lpfc_wcqe_xa_code_WORD lpfc_cqe_code_WORD
+};
+
+/* completion queue entry structure for rqe completion */
+struct lpfc_rcqe {
+ uint32_t word0;
+#define lpfc_rcqe_bindex_SHIFT 16
+#define lpfc_rcqe_bindex_MASK 0x0000FFF
+#define lpfc_rcqe_bindex_WORD word0
+#define lpfc_rcqe_status_SHIFT 8
+#define lpfc_rcqe_status_MASK 0x000000FF
+#define lpfc_rcqe_status_WORD word0
+#define FC_STATUS_RQ_SUCCESS 0x10 /* Async receive successful */
+#define FC_STATUS_RQ_BUF_LEN_EXCEEDED 0x11 /* payload truncated */
+#define FC_STATUS_INSUFF_BUF_NEED_BUF 0x12 /* Insufficient buffers */
+#define FC_STATUS_INSUFF_BUF_FRM_DISC 0x13 /* Frame Discard */
+ uint32_t word1;
+#define lpfc_rcqe_fcf_id_v1_SHIFT 0
+#define lpfc_rcqe_fcf_id_v1_MASK 0x0000003F
+#define lpfc_rcqe_fcf_id_v1_WORD word1
+ uint32_t word2;
+#define lpfc_rcqe_length_SHIFT 16
+#define lpfc_rcqe_length_MASK 0x0000FFFF
+#define lpfc_rcqe_length_WORD word2
+#define lpfc_rcqe_rq_id_SHIFT 6
+#define lpfc_rcqe_rq_id_MASK 0x000003FF
+#define lpfc_rcqe_rq_id_WORD word2
+#define lpfc_rcqe_fcf_id_SHIFT 0
+#define lpfc_rcqe_fcf_id_MASK 0x0000003F
+#define lpfc_rcqe_fcf_id_WORD word2
+#define lpfc_rcqe_rq_id_v1_SHIFT 0
+#define lpfc_rcqe_rq_id_v1_MASK 0x0000FFFF
+#define lpfc_rcqe_rq_id_v1_WORD word2
+ uint32_t word3;
+#define lpfc_rcqe_valid_SHIFT lpfc_cqe_valid_SHIFT
+#define lpfc_rcqe_valid_MASK lpfc_cqe_valid_MASK
+#define lpfc_rcqe_valid_WORD lpfc_cqe_valid_WORD
+#define lpfc_rcqe_port_SHIFT 30
+#define lpfc_rcqe_port_MASK 0x00000001
+#define lpfc_rcqe_port_WORD word3
+#define lpfc_rcqe_hdr_length_SHIFT 24
+#define lpfc_rcqe_hdr_length_MASK 0x0000001F
+#define lpfc_rcqe_hdr_length_WORD word3
+#define lpfc_rcqe_code_SHIFT lpfc_cqe_code_SHIFT
+#define lpfc_rcqe_code_MASK lpfc_cqe_code_MASK
+#define lpfc_rcqe_code_WORD lpfc_cqe_code_WORD
+#define lpfc_rcqe_eof_SHIFT 8
+#define lpfc_rcqe_eof_MASK 0x000000FF
+#define lpfc_rcqe_eof_WORD word3
+#define FCOE_EOFn 0x41
+#define FCOE_EOFt 0x42
+#define FCOE_EOFni 0x49
+#define FCOE_EOFa 0x50
+#define lpfc_rcqe_sof_SHIFT 0
+#define lpfc_rcqe_sof_MASK 0x000000FF
+#define lpfc_rcqe_sof_WORD word3
+#define FCOE_SOFi2 0x2d
+#define FCOE_SOFi3 0x2e
+#define FCOE_SOFn2 0x35
+#define FCOE_SOFn3 0x36
+};
+
+struct lpfc_rqe {
+ uint32_t address_hi;
+ uint32_t address_lo;
+};
+
+/* buffer descriptors */
+struct lpfc_bde4 {
+ uint32_t addr_hi;
+ uint32_t addr_lo;
+ uint32_t word2;
+#define lpfc_bde4_last_SHIFT 31
+#define lpfc_bde4_last_MASK 0x00000001
+#define lpfc_bde4_last_WORD word2
+#define lpfc_bde4_sge_offset_SHIFT 0
+#define lpfc_bde4_sge_offset_MASK 0x000003FF
+#define lpfc_bde4_sge_offset_WORD word2
+ uint32_t word3;
+#define lpfc_bde4_length_SHIFT 0
+#define lpfc_bde4_length_MASK 0x000000FF
+#define lpfc_bde4_length_WORD word3
+};
+
+struct lpfc_register {
+ uint32_t word0;
+};
+
+/* The following BAR0 Registers apply to SLI4 if_type 0 UCNAs. */
+#define LPFC_UERR_STATUS_HI 0x00A4
+#define LPFC_UERR_STATUS_LO 0x00A0
+#define LPFC_UE_MASK_HI 0x00AC
+#define LPFC_UE_MASK_LO 0x00A8
+
+/* The following BAR0 register sets are defined for if_type 0 and 2 UCNAs. */
+#define LPFC_SLI_INTF 0x0058
+
+#define LPFC_CTL_PORT_SEM_OFFSET 0x400
+#define lpfc_port_smphr_perr_SHIFT 31
+#define lpfc_port_smphr_perr_MASK 0x1
+#define lpfc_port_smphr_perr_WORD word0
+#define lpfc_port_smphr_sfi_SHIFT 30
+#define lpfc_port_smphr_sfi_MASK 0x1
+#define lpfc_port_smphr_sfi_WORD word0
+#define lpfc_port_smphr_nip_SHIFT 29
+#define lpfc_port_smphr_nip_MASK 0x1
+#define lpfc_port_smphr_nip_WORD word0
+#define lpfc_port_smphr_ipc_SHIFT 28
+#define lpfc_port_smphr_ipc_MASK 0x1
+#define lpfc_port_smphr_ipc_WORD word0
+#define lpfc_port_smphr_scr1_SHIFT 27
+#define lpfc_port_smphr_scr1_MASK 0x1
+#define lpfc_port_smphr_scr1_WORD word0
+#define lpfc_port_smphr_scr2_SHIFT 26
+#define lpfc_port_smphr_scr2_MASK 0x1
+#define lpfc_port_smphr_scr2_WORD word0
+#define lpfc_port_smphr_host_scratch_SHIFT 16
+#define lpfc_port_smphr_host_scratch_MASK 0xFF
+#define lpfc_port_smphr_host_scratch_WORD word0
+#define lpfc_port_smphr_port_status_SHIFT 0
+#define lpfc_port_smphr_port_status_MASK 0xFFFF
+#define lpfc_port_smphr_port_status_WORD word0
+
+#define LPFC_POST_STAGE_POWER_ON_RESET 0x0000
+#define LPFC_POST_STAGE_AWAITING_HOST_RDY 0x0001
+#define LPFC_POST_STAGE_HOST_RDY 0x0002
+#define LPFC_POST_STAGE_BE_RESET 0x0003
+#define LPFC_POST_STAGE_SEEPROM_CS_START 0x0100
+#define LPFC_POST_STAGE_SEEPROM_CS_DONE 0x0101
+#define LPFC_POST_STAGE_DDR_CONFIG_START 0x0200
+#define LPFC_POST_STAGE_DDR_CONFIG_DONE 0x0201
+#define LPFC_POST_STAGE_DDR_CALIBRATE_START 0x0300
+#define LPFC_POST_STAGE_DDR_CALIBRATE_DONE 0x0301
+#define LPFC_POST_STAGE_DDR_TEST_START 0x0400
+#define LPFC_POST_STAGE_DDR_TEST_DONE 0x0401
+#define LPFC_POST_STAGE_REDBOOT_INIT_START 0x0600
+#define LPFC_POST_STAGE_REDBOOT_INIT_DONE 0x0601
+#define LPFC_POST_STAGE_FW_IMAGE_LOAD_START 0x0700
+#define LPFC_POST_STAGE_FW_IMAGE_LOAD_DONE 0x0701
+#define LPFC_POST_STAGE_ARMFW_START 0x0800
+#define LPFC_POST_STAGE_DHCP_QUERY_START 0x0900
+#define LPFC_POST_STAGE_DHCP_QUERY_DONE 0x0901
+#define LPFC_POST_STAGE_BOOT_TARGET_DISCOVERY_START 0x0A00
+#define LPFC_POST_STAGE_BOOT_TARGET_DISCOVERY_DONE 0x0A01
+#define LPFC_POST_STAGE_RC_OPTION_SET 0x0B00
+#define LPFC_POST_STAGE_SWITCH_LINK 0x0B01
+#define LPFC_POST_STAGE_SEND_ICDS_MESSAGE 0x0B02
+#define LPFC_POST_STAGE_PERFROM_TFTP 0x0B03
+#define LPFC_POST_STAGE_PARSE_XML 0x0B04
+#define LPFC_POST_STAGE_DOWNLOAD_IMAGE 0x0B05
+#define LPFC_POST_STAGE_FLASH_IMAGE 0x0B06
+#define LPFC_POST_STAGE_RC_DONE 0x0B07
+#define LPFC_POST_STAGE_REBOOT_SYSTEM 0x0B08
+#define LPFC_POST_STAGE_MAC_ADDRESS 0x0C00
+#define LPFC_POST_STAGE_PORT_READY 0xC000
+#define LPFC_POST_STAGE_PORT_UE 0xF000
+
+#define LPFC_CTL_PORT_STA_OFFSET 0x404
+#define lpfc_sliport_status_err_SHIFT 31
+#define lpfc_sliport_status_err_MASK 0x1
+#define lpfc_sliport_status_err_WORD word0
+#define lpfc_sliport_status_end_SHIFT 30
+#define lpfc_sliport_status_end_MASK 0x1
+#define lpfc_sliport_status_end_WORD word0
+#define lpfc_sliport_status_oti_SHIFT 29
+#define lpfc_sliport_status_oti_MASK 0x1
+#define lpfc_sliport_status_oti_WORD word0
+#define lpfc_sliport_status_rn_SHIFT 24
+#define lpfc_sliport_status_rn_MASK 0x1
+#define lpfc_sliport_status_rn_WORD word0
+#define lpfc_sliport_status_rdy_SHIFT 23
+#define lpfc_sliport_status_rdy_MASK 0x1
+#define lpfc_sliport_status_rdy_WORD word0
+#define MAX_IF_TYPE_2_RESETS 6
+
+#define LPFC_CTL_PORT_CTL_OFFSET 0x408
+#define lpfc_sliport_ctrl_end_SHIFT 30
+#define lpfc_sliport_ctrl_end_MASK 0x1
+#define lpfc_sliport_ctrl_end_WORD word0
+#define LPFC_SLIPORT_LITTLE_ENDIAN 0
+#define LPFC_SLIPORT_BIG_ENDIAN 1
+#define lpfc_sliport_ctrl_ip_SHIFT 27
+#define lpfc_sliport_ctrl_ip_MASK 0x1
+#define lpfc_sliport_ctrl_ip_WORD word0
+#define LPFC_SLIPORT_INIT_PORT 1
+
+#define LPFC_CTL_PORT_ER1_OFFSET 0x40C
+#define LPFC_CTL_PORT_ER2_OFFSET 0x410
+
+/* The following Registers apply to SLI4 if_type 0 UCNAs. They typically
+ * reside in BAR 2.
+ */
+#define LPFC_SLIPORT_IF0_SMPHR 0x00AC
+
+#define LPFC_IMR_MASK_ALL 0xFFFFFFFF
+#define LPFC_ISCR_CLEAR_ALL 0xFFFFFFFF
+
+#define LPFC_HST_ISR0 0x0C18
+#define LPFC_HST_ISR1 0x0C1C
+#define LPFC_HST_ISR2 0x0C20
+#define LPFC_HST_ISR3 0x0C24
+#define LPFC_HST_ISR4 0x0C28
+
+#define LPFC_HST_IMR0 0x0C48
+#define LPFC_HST_IMR1 0x0C4C
+#define LPFC_HST_IMR2 0x0C50
+#define LPFC_HST_IMR3 0x0C54
+#define LPFC_HST_IMR4 0x0C58
+
+#define LPFC_HST_ISCR0 0x0C78
+#define LPFC_HST_ISCR1 0x0C7C
+#define LPFC_HST_ISCR2 0x0C80
+#define LPFC_HST_ISCR3 0x0C84
+#define LPFC_HST_ISCR4 0x0C88
+
+#define LPFC_SLI4_INTR0 BIT0
+#define LPFC_SLI4_INTR1 BIT1
+#define LPFC_SLI4_INTR2 BIT2
+#define LPFC_SLI4_INTR3 BIT3
+#define LPFC_SLI4_INTR4 BIT4
+#define LPFC_SLI4_INTR5 BIT5
+#define LPFC_SLI4_INTR6 BIT6
+#define LPFC_SLI4_INTR7 BIT7
+#define LPFC_SLI4_INTR8 BIT8
+#define LPFC_SLI4_INTR9 BIT9
+#define LPFC_SLI4_INTR10 BIT10
+#define LPFC_SLI4_INTR11 BIT11
+#define LPFC_SLI4_INTR12 BIT12
+#define LPFC_SLI4_INTR13 BIT13
+#define LPFC_SLI4_INTR14 BIT14
+#define LPFC_SLI4_INTR15 BIT15
+#define LPFC_SLI4_INTR16 BIT16
+#define LPFC_SLI4_INTR17 BIT17
+#define LPFC_SLI4_INTR18 BIT18
+#define LPFC_SLI4_INTR19 BIT19
+#define LPFC_SLI4_INTR20 BIT20
+#define LPFC_SLI4_INTR21 BIT21
+#define LPFC_SLI4_INTR22 BIT22
+#define LPFC_SLI4_INTR23 BIT23
+#define LPFC_SLI4_INTR24 BIT24
+#define LPFC_SLI4_INTR25 BIT25
+#define LPFC_SLI4_INTR26 BIT26
+#define LPFC_SLI4_INTR27 BIT27
+#define LPFC_SLI4_INTR28 BIT28
+#define LPFC_SLI4_INTR29 BIT29
+#define LPFC_SLI4_INTR30 BIT30
+#define LPFC_SLI4_INTR31 BIT31
+
+/*
+ * The Doorbell registers defined here exist in different BAR
+ * register sets depending on the UCNA Port's reported if_type
+ * value. For UCNA ports running SLI4 and if_type 0, they reside in
+ * BAR4. For UCNA ports running SLI4 and if_type 2, they reside in
+ * BAR0. The offsets are the same so the driver must account for
+ * any base address difference.
+ */
+#define LPFC_ULP0_RQ_DOORBELL 0x00A0
+#define LPFC_ULP1_RQ_DOORBELL 0x00C0
+#define lpfc_rq_db_list_fm_num_posted_SHIFT 24
+#define lpfc_rq_db_list_fm_num_posted_MASK 0x00FF
+#define lpfc_rq_db_list_fm_num_posted_WORD word0
+#define lpfc_rq_db_list_fm_index_SHIFT 16
+#define lpfc_rq_db_list_fm_index_MASK 0x00FF
+#define lpfc_rq_db_list_fm_index_WORD word0
+#define lpfc_rq_db_list_fm_id_SHIFT 0
+#define lpfc_rq_db_list_fm_id_MASK 0xFFFF
+#define lpfc_rq_db_list_fm_id_WORD word0
+#define lpfc_rq_db_ring_fm_num_posted_SHIFT 16
+#define lpfc_rq_db_ring_fm_num_posted_MASK 0x3FFF
+#define lpfc_rq_db_ring_fm_num_posted_WORD word0
+#define lpfc_rq_db_ring_fm_id_SHIFT 0
+#define lpfc_rq_db_ring_fm_id_MASK 0xFFFF
+#define lpfc_rq_db_ring_fm_id_WORD word0
+
+#define LPFC_ULP0_WQ_DOORBELL 0x0040
+#define LPFC_ULP1_WQ_DOORBELL 0x0060
+#define lpfc_wq_db_list_fm_num_posted_SHIFT 24
+#define lpfc_wq_db_list_fm_num_posted_MASK 0x00FF
+#define lpfc_wq_db_list_fm_num_posted_WORD word0
+#define lpfc_wq_db_list_fm_index_SHIFT 16
+#define lpfc_wq_db_list_fm_index_MASK 0x00FF
+#define lpfc_wq_db_list_fm_index_WORD word0
+#define lpfc_wq_db_list_fm_id_SHIFT 0
+#define lpfc_wq_db_list_fm_id_MASK 0xFFFF
+#define lpfc_wq_db_list_fm_id_WORD word0
+#define lpfc_wq_db_ring_fm_num_posted_SHIFT 16
+#define lpfc_wq_db_ring_fm_num_posted_MASK 0x3FFF
+#define lpfc_wq_db_ring_fm_num_posted_WORD word0
+#define lpfc_wq_db_ring_fm_id_SHIFT 0
+#define lpfc_wq_db_ring_fm_id_MASK 0xFFFF
+#define lpfc_wq_db_ring_fm_id_WORD word0
+
+#define LPFC_EQCQ_DOORBELL 0x0120
+#define lpfc_eqcq_doorbell_se_SHIFT 31
+#define lpfc_eqcq_doorbell_se_MASK 0x0001
+#define lpfc_eqcq_doorbell_se_WORD word0
+#define LPFC_EQCQ_SOLICIT_ENABLE_OFF 0
+#define LPFC_EQCQ_SOLICIT_ENABLE_ON 1
+#define lpfc_eqcq_doorbell_arm_SHIFT 29
+#define lpfc_eqcq_doorbell_arm_MASK 0x0001
+#define lpfc_eqcq_doorbell_arm_WORD word0
+#define lpfc_eqcq_doorbell_num_released_SHIFT 16
+#define lpfc_eqcq_doorbell_num_released_MASK 0x1FFF
+#define lpfc_eqcq_doorbell_num_released_WORD word0
+#define lpfc_eqcq_doorbell_qt_SHIFT 10
+#define lpfc_eqcq_doorbell_qt_MASK 0x0001
+#define lpfc_eqcq_doorbell_qt_WORD word0
+#define LPFC_QUEUE_TYPE_COMPLETION 0
+#define LPFC_QUEUE_TYPE_EVENT 1
+#define lpfc_eqcq_doorbell_eqci_SHIFT 9
+#define lpfc_eqcq_doorbell_eqci_MASK 0x0001
+#define lpfc_eqcq_doorbell_eqci_WORD word0
+#define lpfc_eqcq_doorbell_cqid_lo_SHIFT 0
+#define lpfc_eqcq_doorbell_cqid_lo_MASK 0x03FF
+#define lpfc_eqcq_doorbell_cqid_lo_WORD word0
+#define lpfc_eqcq_doorbell_cqid_hi_SHIFT 11
+#define lpfc_eqcq_doorbell_cqid_hi_MASK 0x001F
+#define lpfc_eqcq_doorbell_cqid_hi_WORD word0
+#define lpfc_eqcq_doorbell_eqid_lo_SHIFT 0
+#define lpfc_eqcq_doorbell_eqid_lo_MASK 0x01FF
+#define lpfc_eqcq_doorbell_eqid_lo_WORD word0
+#define lpfc_eqcq_doorbell_eqid_hi_SHIFT 11
+#define lpfc_eqcq_doorbell_eqid_hi_MASK 0x001F
+#define lpfc_eqcq_doorbell_eqid_hi_WORD word0
+#define LPFC_CQID_HI_FIELD_SHIFT 10
+#define LPFC_EQID_HI_FIELD_SHIFT 9
+
+#define LPFC_BMBX 0x0160
+#define lpfc_bmbx_addr_SHIFT 2
+#define lpfc_bmbx_addr_MASK 0x3FFFFFFF
+#define lpfc_bmbx_addr_WORD word0
+#define lpfc_bmbx_hi_SHIFT 1
+#define lpfc_bmbx_hi_MASK 0x0001
+#define lpfc_bmbx_hi_WORD word0
+#define lpfc_bmbx_rdy_SHIFT 0
+#define lpfc_bmbx_rdy_MASK 0x0001
+#define lpfc_bmbx_rdy_WORD word0
+
+#define LPFC_MQ_DOORBELL 0x0140
+#define lpfc_mq_doorbell_num_posted_SHIFT 16
+#define lpfc_mq_doorbell_num_posted_MASK 0x3FFF
+#define lpfc_mq_doorbell_num_posted_WORD word0
+#define lpfc_mq_doorbell_id_SHIFT 0
+#define lpfc_mq_doorbell_id_MASK 0xFFFF
+#define lpfc_mq_doorbell_id_WORD word0
+
+struct lpfc_sli4_cfg_mhdr {
+ uint32_t word1;
+#define lpfc_mbox_hdr_emb_SHIFT 0
+#define lpfc_mbox_hdr_emb_MASK 0x00000001
+#define lpfc_mbox_hdr_emb_WORD word1
+#define lpfc_mbox_hdr_sge_cnt_SHIFT 3
+#define lpfc_mbox_hdr_sge_cnt_MASK 0x0000001F
+#define lpfc_mbox_hdr_sge_cnt_WORD word1
+ uint32_t payload_length;
+ uint32_t tag_lo;
+ uint32_t tag_hi;
+ uint32_t reserved5;
+};
+
+union lpfc_sli4_cfg_shdr {
+ struct {
+ uint32_t word6;
+#define lpfc_mbox_hdr_opcode_SHIFT 0
+#define lpfc_mbox_hdr_opcode_MASK 0x000000FF
+#define lpfc_mbox_hdr_opcode_WORD word6
+#define lpfc_mbox_hdr_subsystem_SHIFT 8
+#define lpfc_mbox_hdr_subsystem_MASK 0x000000FF
+#define lpfc_mbox_hdr_subsystem_WORD word6
+#define lpfc_mbox_hdr_port_number_SHIFT 16
+#define lpfc_mbox_hdr_port_number_MASK 0x000000FF
+#define lpfc_mbox_hdr_port_number_WORD word6
+#define lpfc_mbox_hdr_domain_SHIFT 24
+#define lpfc_mbox_hdr_domain_MASK 0x000000FF
+#define lpfc_mbox_hdr_domain_WORD word6
+ uint32_t timeout;
+ uint32_t request_length;
+ uint32_t word9;
+#define lpfc_mbox_hdr_version_SHIFT 0
+#define lpfc_mbox_hdr_version_MASK 0x000000FF
+#define lpfc_mbox_hdr_version_WORD word9
+#define lpfc_mbox_hdr_pf_num_SHIFT 16
+#define lpfc_mbox_hdr_pf_num_MASK 0x000000FF
+#define lpfc_mbox_hdr_pf_num_WORD word9
+#define lpfc_mbox_hdr_vh_num_SHIFT 24
+#define lpfc_mbox_hdr_vh_num_MASK 0x000000FF
+#define lpfc_mbox_hdr_vh_num_WORD word9
+#define LPFC_Q_CREATE_VERSION_2 2
+#define LPFC_Q_CREATE_VERSION_1 1
+#define LPFC_Q_CREATE_VERSION_0 0
+#define LPFC_OPCODE_VERSION_0 0
+#define LPFC_OPCODE_VERSION_1 1
+ } request;
+ struct {
+ uint32_t word6;
+#define lpfc_mbox_hdr_opcode_SHIFT 0
+#define lpfc_mbox_hdr_opcode_MASK 0x000000FF
+#define lpfc_mbox_hdr_opcode_WORD word6
+#define lpfc_mbox_hdr_subsystem_SHIFT 8
+#define lpfc_mbox_hdr_subsystem_MASK 0x000000FF
+#define lpfc_mbox_hdr_subsystem_WORD word6
+#define lpfc_mbox_hdr_domain_SHIFT 24
+#define lpfc_mbox_hdr_domain_MASK 0x000000FF
+#define lpfc_mbox_hdr_domain_WORD word6
+ uint32_t word7;
+#define lpfc_mbox_hdr_status_SHIFT 0
+#define lpfc_mbox_hdr_status_MASK 0x000000FF
+#define lpfc_mbox_hdr_status_WORD word7
+#define lpfc_mbox_hdr_add_status_SHIFT 8
+#define lpfc_mbox_hdr_add_status_MASK 0x000000FF
+#define lpfc_mbox_hdr_add_status_WORD word7
+ uint32_t response_length;
+ uint32_t actual_response_length;
+ } response;
+};
+
+/* Mailbox Header structures.
+ * struct mbox_header is defined for first generation SLI4_CFG mailbox
+ * calls deployed for BE-based ports.
+ *
+ * struct sli4_mbox_header is defined for second generation SLI4
+ * ports that don't deploy the SLI4_CFG mechanism.
+ */
+struct mbox_header {
+ struct lpfc_sli4_cfg_mhdr cfg_mhdr;
+ union lpfc_sli4_cfg_shdr cfg_shdr;
+};
+
+#define LPFC_EXTENT_LOCAL 0
+#define LPFC_TIMEOUT_DEFAULT 0
+#define LPFC_EXTENT_VERSION_DEFAULT 0
+
+/* Subsystem Definitions */
+#define LPFC_MBOX_SUBSYSTEM_NA 0x0
+#define LPFC_MBOX_SUBSYSTEM_COMMON 0x1
+#define LPFC_MBOX_SUBSYSTEM_FCOE 0xC
+
+/* Device Specific Definitions */
+
+/* The HOST ENDIAN defines are in Big Endian format. */
+#define HOST_ENDIAN_LOW_WORD0 0xFF3412FF
+#define HOST_ENDIAN_HIGH_WORD1 0xFF7856FF
+
+/* Common Opcodes */
+#define LPFC_MBOX_OPCODE_NA 0x00
+#define LPFC_MBOX_OPCODE_CQ_CREATE 0x0C
+#define LPFC_MBOX_OPCODE_EQ_CREATE 0x0D
+#define LPFC_MBOX_OPCODE_MQ_CREATE 0x15
+#define LPFC_MBOX_OPCODE_GET_CNTL_ATTRIBUTES 0x20
+#define LPFC_MBOX_OPCODE_NOP 0x21
+#define LPFC_MBOX_OPCODE_MODIFY_EQ_DELAY 0x29
+#define LPFC_MBOX_OPCODE_MQ_DESTROY 0x35
+#define LPFC_MBOX_OPCODE_CQ_DESTROY 0x36
+#define LPFC_MBOX_OPCODE_EQ_DESTROY 0x37
+#define LPFC_MBOX_OPCODE_QUERY_FW_CFG 0x3A
+#define LPFC_MBOX_OPCODE_FUNCTION_RESET 0x3D
+#define LPFC_MBOX_OPCODE_SET_PHYSICAL_LINK_CONFIG 0x3E
+#define LPFC_MBOX_OPCODE_SET_BOOT_CONFIG 0x43
+#define LPFC_MBOX_OPCODE_GET_PORT_NAME 0x4D
+#define LPFC_MBOX_OPCODE_MQ_CREATE_EXT 0x5A
+#define LPFC_MBOX_OPCODE_GET_VPD_DATA 0x5B
+#define LPFC_MBOX_OPCODE_SEND_ACTIVATION 0x73
+#define LPFC_MBOX_OPCODE_RESET_LICENSES 0x74
+#define LPFC_MBOX_OPCODE_GET_RSRC_EXTENT_INFO 0x9A
+#define LPFC_MBOX_OPCODE_GET_ALLOC_RSRC_EXTENT 0x9B
+#define LPFC_MBOX_OPCODE_ALLOC_RSRC_EXTENT 0x9C
+#define LPFC_MBOX_OPCODE_DEALLOC_RSRC_EXTENT 0x9D
+#define LPFC_MBOX_OPCODE_GET_FUNCTION_CONFIG 0xA0
+#define LPFC_MBOX_OPCODE_GET_PROFILE_CAPACITIES 0xA1
+#define LPFC_MBOX_OPCODE_GET_PROFILE_CONFIG 0xA4
+#define LPFC_MBOX_OPCODE_SET_PROFILE_CONFIG 0xA5
+#define LPFC_MBOX_OPCODE_GET_PROFILE_LIST 0xA6
+#define LPFC_MBOX_OPCODE_SET_ACT_PROFILE 0xA8
+#define LPFC_MBOX_OPCODE_GET_FACTORY_PROFILE_CONFIG 0xA9
+#define LPFC_MBOX_OPCODE_READ_OBJECT 0xAB
+#define LPFC_MBOX_OPCODE_WRITE_OBJECT 0xAC
+#define LPFC_MBOX_OPCODE_READ_OBJECT_LIST 0xAD
+#define LPFC_MBOX_OPCODE_DELETE_OBJECT 0xAE
+#define LPFC_MBOX_OPCODE_GET_SLI4_PARAMETERS 0xB5
+
+/* FCoE Opcodes */
+#define LPFC_MBOX_OPCODE_FCOE_WQ_CREATE 0x01
+#define LPFC_MBOX_OPCODE_FCOE_WQ_DESTROY 0x02
+#define LPFC_MBOX_OPCODE_FCOE_POST_SGL_PAGES 0x03
+#define LPFC_MBOX_OPCODE_FCOE_REMOVE_SGL_PAGES 0x04
+#define LPFC_MBOX_OPCODE_FCOE_RQ_CREATE 0x05
+#define LPFC_MBOX_OPCODE_FCOE_RQ_DESTROY 0x06
+#define LPFC_MBOX_OPCODE_FCOE_READ_FCF_TABLE 0x08
+#define LPFC_MBOX_OPCODE_FCOE_ADD_FCF 0x09
+#define LPFC_MBOX_OPCODE_FCOE_DELETE_FCF 0x0A
+#define LPFC_MBOX_OPCODE_FCOE_POST_HDR_TEMPLATE 0x0B
+#define LPFC_MBOX_OPCODE_FCOE_REDISCOVER_FCF 0x10
+#define LPFC_MBOX_OPCODE_FCOE_SET_FCLINK_SETTINGS 0x21
+#define LPFC_MBOX_OPCODE_FCOE_LINK_DIAG_STATE 0x22
+#define LPFC_MBOX_OPCODE_FCOE_LINK_DIAG_LOOPBACK 0x23
+
+/* Mailbox command structures */
+struct eq_context {
+ uint32_t word0;
+#define lpfc_eq_context_size_SHIFT 31
+#define lpfc_eq_context_size_MASK 0x00000001
+#define lpfc_eq_context_size_WORD word0
+#define LPFC_EQE_SIZE_4 0x0
+#define LPFC_EQE_SIZE_16 0x1
+#define lpfc_eq_context_valid_SHIFT 29
+#define lpfc_eq_context_valid_MASK 0x00000001
+#define lpfc_eq_context_valid_WORD word0
+ uint32_t word1;
+#define lpfc_eq_context_count_SHIFT 26
+#define lpfc_eq_context_count_MASK 0x00000003
+#define lpfc_eq_context_count_WORD word1
+#define LPFC_EQ_CNT_256 0x0
+#define LPFC_EQ_CNT_512 0x1
+#define LPFC_EQ_CNT_1024 0x2
+#define LPFC_EQ_CNT_2048 0x3
+#define LPFC_EQ_CNT_4096 0x4
+ uint32_t word2;
+#define lpfc_eq_context_delay_multi_SHIFT 13
+#define lpfc_eq_context_delay_multi_MASK 0x000003FF
+#define lpfc_eq_context_delay_multi_WORD word2
+ uint32_t reserved3;
+};
+
+struct eq_delay_info {
+ uint32_t eq_id;
+ uint32_t phase;
+ uint32_t delay_multi;
+};
+#define LPFC_MAX_EQ_DELAY 8
+
+struct sgl_page_pairs {
+ uint32_t sgl_pg0_addr_lo;
+ uint32_t sgl_pg0_addr_hi;
+ uint32_t sgl_pg1_addr_lo;
+ uint32_t sgl_pg1_addr_hi;
+};
+
+struct lpfc_mbx_post_sgl_pages {
+ struct mbox_header header;
+ uint32_t word0;
+#define lpfc_post_sgl_pages_xri_SHIFT 0
+#define lpfc_post_sgl_pages_xri_MASK 0x0000FFFF
+#define lpfc_post_sgl_pages_xri_WORD word0
+#define lpfc_post_sgl_pages_xricnt_SHIFT 16
+#define lpfc_post_sgl_pages_xricnt_MASK 0x0000FFFF
+#define lpfc_post_sgl_pages_xricnt_WORD word0
+ struct sgl_page_pairs sgl_pg_pairs[1];
+};
+
+/* word0 of page-1 struct shares the same SHIFT/MASK/WORD defines as above */
+struct lpfc_mbx_post_uembed_sgl_page1 {
+ union lpfc_sli4_cfg_shdr cfg_shdr;
+ uint32_t word0;
+ struct sgl_page_pairs sgl_pg_pairs;
+};
+
+struct lpfc_mbx_sge {
+ uint32_t pa_lo;
+ uint32_t pa_hi;
+ uint32_t length;
+};
+
+struct lpfc_mbx_nembed_cmd {
+ struct lpfc_sli4_cfg_mhdr cfg_mhdr;
+#define LPFC_SLI4_MBX_SGE_MAX_PAGES 19
+ struct lpfc_mbx_sge sge[LPFC_SLI4_MBX_SGE_MAX_PAGES];
+};
+
+struct lpfc_mbx_nembed_sge_virt {
+ void *addr[LPFC_SLI4_MBX_SGE_MAX_PAGES];
+};
+
+struct lpfc_mbx_eq_create {
+ struct mbox_header header;
+ union {
+ struct {
+ uint32_t word0;
+#define lpfc_mbx_eq_create_num_pages_SHIFT 0
+#define lpfc_mbx_eq_create_num_pages_MASK 0x0000FFFF
+#define lpfc_mbx_eq_create_num_pages_WORD word0
+ struct eq_context context;
+ struct dma_address page[LPFC_MAX_EQ_PAGE];
+ } request;
+ struct {
+ uint32_t word0;
+#define lpfc_mbx_eq_create_q_id_SHIFT 0
+#define lpfc_mbx_eq_create_q_id_MASK 0x0000FFFF
+#define lpfc_mbx_eq_create_q_id_WORD word0
+ } response;
+ } u;
+};
+
+struct lpfc_mbx_modify_eq_delay {
+ struct mbox_header header;
+ union {
+ struct {
+ uint32_t num_eq;
+ struct eq_delay_info eq[LPFC_MAX_EQ_DELAY];
+ } request;
+ struct {
+ uint32_t word0;
+ } response;
+ } u;
+};
+
+struct lpfc_mbx_eq_destroy {
+ struct mbox_header header;
+ union {
+ struct {
+ uint32_t word0;
+#define lpfc_mbx_eq_destroy_q_id_SHIFT 0
+#define lpfc_mbx_eq_destroy_q_id_MASK 0x0000FFFF
+#define lpfc_mbx_eq_destroy_q_id_WORD word0
+ } request;
+ struct {
+ uint32_t word0;
+ } response;
+ } u;
+};
+
+struct lpfc_mbx_nop {
+ struct mbox_header header;
+ uint32_t context[2];
+};
+
+struct cq_context {
+ uint32_t word0;
+#define lpfc_cq_context_event_SHIFT 31
+#define lpfc_cq_context_event_MASK 0x00000001
+#define lpfc_cq_context_event_WORD word0
+#define lpfc_cq_context_valid_SHIFT 29
+#define lpfc_cq_context_valid_MASK 0x00000001
+#define lpfc_cq_context_valid_WORD word0
+#define lpfc_cq_context_count_SHIFT 27
+#define lpfc_cq_context_count_MASK 0x00000003
+#define lpfc_cq_context_count_WORD word0
+#define LPFC_CQ_CNT_256 0x0
+#define LPFC_CQ_CNT_512 0x1
+#define LPFC_CQ_CNT_1024 0x2
+ uint32_t word1;
+#define lpfc_cq_eq_id_SHIFT 22 /* Version 0 Only */
+#define lpfc_cq_eq_id_MASK 0x000000FF
+#define lpfc_cq_eq_id_WORD word1
+#define lpfc_cq_eq_id_2_SHIFT 0 /* Version 2 Only */
+#define lpfc_cq_eq_id_2_MASK 0x0000FFFF
+#define lpfc_cq_eq_id_2_WORD word1
+ uint32_t reserved0;
+ uint32_t reserved1;
+};
+
+struct lpfc_mbx_cq_create {
+ struct mbox_header header;
+ union {
+ struct {
+ uint32_t word0;
+#define lpfc_mbx_cq_create_page_size_SHIFT 16 /* Version 2 Only */
+#define lpfc_mbx_cq_create_page_size_MASK 0x000000FF
+#define lpfc_mbx_cq_create_page_size_WORD word0
+#define lpfc_mbx_cq_create_num_pages_SHIFT 0
+#define lpfc_mbx_cq_create_num_pages_MASK 0x0000FFFF
+#define lpfc_mbx_cq_create_num_pages_WORD word0
+ struct cq_context context;
+ struct dma_address page[LPFC_MAX_CQ_PAGE];
+ } request;
+ struct {
+ uint32_t word0;
+#define lpfc_mbx_cq_create_q_id_SHIFT 0
+#define lpfc_mbx_cq_create_q_id_MASK 0x0000FFFF
+#define lpfc_mbx_cq_create_q_id_WORD word0
+ } response;
+ } u;
+};
+
+struct lpfc_mbx_cq_destroy {
+ struct mbox_header header;
+ union {
+ struct {
+ uint32_t word0;
+#define lpfc_mbx_cq_destroy_q_id_SHIFT 0
+#define lpfc_mbx_cq_destroy_q_id_MASK 0x0000FFFF
+#define lpfc_mbx_cq_destroy_q_id_WORD word0
+ } request;
+ struct {
+ uint32_t word0;
+ } response;
+ } u;
+};
+
+struct wq_context {
+ uint32_t reserved0;
+ uint32_t reserved1;
+ uint32_t reserved2;
+ uint32_t reserved3;
+};
+
+struct lpfc_mbx_wq_create {
+ struct mbox_header header;
+ union {
+ struct { /* Version 0 Request */
+ uint32_t word0;
+#define lpfc_mbx_wq_create_num_pages_SHIFT 0
+#define lpfc_mbx_wq_create_num_pages_MASK 0x000000FF
+#define lpfc_mbx_wq_create_num_pages_WORD word0
+#define lpfc_mbx_wq_create_dua_SHIFT 8
+#define lpfc_mbx_wq_create_dua_MASK 0x00000001
+#define lpfc_mbx_wq_create_dua_WORD word0
+#define lpfc_mbx_wq_create_cq_id_SHIFT 16
+#define lpfc_mbx_wq_create_cq_id_MASK 0x0000FFFF
+#define lpfc_mbx_wq_create_cq_id_WORD word0
+ struct dma_address page[LPFC_MAX_WQ_PAGE_V0];
+ uint32_t word9;
+#define lpfc_mbx_wq_create_bua_SHIFT 0
+#define lpfc_mbx_wq_create_bua_MASK 0x00000001
+#define lpfc_mbx_wq_create_bua_WORD word9
+#define lpfc_mbx_wq_create_ulp_num_SHIFT 8
+#define lpfc_mbx_wq_create_ulp_num_MASK 0x000000FF
+#define lpfc_mbx_wq_create_ulp_num_WORD word9
+ } request;
+ struct { /* Version 1 Request */
+ uint32_t word0; /* Word 0 is the same as in v0 */
+ uint32_t word1;
+#define lpfc_mbx_wq_create_page_size_SHIFT 0
+#define lpfc_mbx_wq_create_page_size_MASK 0x000000FF
+#define lpfc_mbx_wq_create_page_size_WORD word1
+#define lpfc_mbx_wq_create_wqe_size_SHIFT 8
+#define lpfc_mbx_wq_create_wqe_size_MASK 0x0000000F
+#define lpfc_mbx_wq_create_wqe_size_WORD word1
+#define LPFC_WQ_WQE_SIZE_64 0x5
+#define LPFC_WQ_WQE_SIZE_128 0x6
+#define lpfc_mbx_wq_create_wqe_count_SHIFT 16
+#define lpfc_mbx_wq_create_wqe_count_MASK 0x0000FFFF
+#define lpfc_mbx_wq_create_wqe_count_WORD word1
+ uint32_t word2;
+ struct dma_address page[LPFC_MAX_WQ_PAGE-1];
+ } request_1;
+ struct {
+ uint32_t word0;
+#define lpfc_mbx_wq_create_q_id_SHIFT 0
+#define lpfc_mbx_wq_create_q_id_MASK 0x0000FFFF
+#define lpfc_mbx_wq_create_q_id_WORD word0
+ uint32_t doorbell_offset;
+ uint32_t word2;
+#define lpfc_mbx_wq_create_bar_set_SHIFT 0
+#define lpfc_mbx_wq_create_bar_set_MASK 0x0000FFFF
+#define lpfc_mbx_wq_create_bar_set_WORD word2
+#define WQ_PCI_BAR_0_AND_1 0x00
+#define WQ_PCI_BAR_2_AND_3 0x01
+#define WQ_PCI_BAR_4_AND_5 0x02
+#define lpfc_mbx_wq_create_db_format_SHIFT 16
+#define lpfc_mbx_wq_create_db_format_MASK 0x0000FFFF
+#define lpfc_mbx_wq_create_db_format_WORD word2
+ } response;
+ } u;
+};
+
+struct lpfc_mbx_wq_destroy {
+ struct mbox_header header;
+ union {
+ struct {
+ uint32_t word0;
+#define lpfc_mbx_wq_destroy_q_id_SHIFT 0
+#define lpfc_mbx_wq_destroy_q_id_MASK 0x0000FFFF
+#define lpfc_mbx_wq_destroy_q_id_WORD word0
+ } request;
+ struct {
+ uint32_t word0;
+ } response;
+ } u;
+};
+
+#define LPFC_HDR_BUF_SIZE 128
+#define LPFC_DATA_BUF_SIZE 2048
+struct rq_context {
+ uint32_t word0;
+#define lpfc_rq_context_rqe_count_SHIFT 16 /* Version 0 Only */
+#define lpfc_rq_context_rqe_count_MASK 0x0000000F
+#define lpfc_rq_context_rqe_count_WORD word0
+#define LPFC_RQ_RING_SIZE_512 9 /* 512 entries */
+#define LPFC_RQ_RING_SIZE_1024 10 /* 1024 entries */
+#define LPFC_RQ_RING_SIZE_2048 11 /* 2048 entries */
+#define LPFC_RQ_RING_SIZE_4096 12 /* 4096 entries */
+#define lpfc_rq_context_rqe_count_1_SHIFT 16 /* Version 1 Only */
+#define lpfc_rq_context_rqe_count_1_MASK 0x0000FFFF
+#define lpfc_rq_context_rqe_count_1_WORD word0
+#define lpfc_rq_context_rqe_size_SHIFT 8 /* Version 1 Only */
+#define lpfc_rq_context_rqe_size_MASK 0x0000000F
+#define lpfc_rq_context_rqe_size_WORD word0
+#define LPFC_RQE_SIZE_8 2
+#define LPFC_RQE_SIZE_16 3
+#define LPFC_RQE_SIZE_32 4
+#define LPFC_RQE_SIZE_64 5
+#define LPFC_RQE_SIZE_128 6
+#define lpfc_rq_context_page_size_SHIFT 0 /* Version 1 Only */
+#define lpfc_rq_context_page_size_MASK 0x000000FF
+#define lpfc_rq_context_page_size_WORD word0
+ uint32_t reserved1;
+ uint32_t word2;
+#define lpfc_rq_context_cq_id_SHIFT 16
+#define lpfc_rq_context_cq_id_MASK 0x000003FF
+#define lpfc_rq_context_cq_id_WORD word2
+#define lpfc_rq_context_buf_size_SHIFT 0
+#define lpfc_rq_context_buf_size_MASK 0x0000FFFF
+#define lpfc_rq_context_buf_size_WORD word2
+ uint32_t buffer_size; /* Version 1 Only */
+};
+
+struct lpfc_mbx_rq_create {
+ struct mbox_header header;
+ union {
+ struct {
+ uint32_t word0;
+#define lpfc_mbx_rq_create_num_pages_SHIFT 0
+#define lpfc_mbx_rq_create_num_pages_MASK 0x0000FFFF
+#define lpfc_mbx_rq_create_num_pages_WORD word0
+#define lpfc_mbx_rq_create_dua_SHIFT 16
+#define lpfc_mbx_rq_create_dua_MASK 0x00000001
+#define lpfc_mbx_rq_create_dua_WORD word0
+#define lpfc_mbx_rq_create_bqu_SHIFT 17
+#define lpfc_mbx_rq_create_bqu_MASK 0x00000001
+#define lpfc_mbx_rq_create_bqu_WORD word0
+#define lpfc_mbx_rq_create_ulp_num_SHIFT 24
+#define lpfc_mbx_rq_create_ulp_num_MASK 0x000000FF
+#define lpfc_mbx_rq_create_ulp_num_WORD word0
+ struct rq_context context;
+ struct dma_address page[LPFC_MAX_WQ_PAGE];
+ } request;
+ struct {
+ uint32_t word0;
+#define lpfc_mbx_rq_create_q_id_SHIFT 0
+#define lpfc_mbx_rq_create_q_id_MASK 0x0000FFFF
+#define lpfc_mbx_rq_create_q_id_WORD word0
+ uint32_t doorbell_offset;
+ uint32_t word2;
+#define lpfc_mbx_rq_create_bar_set_SHIFT 0
+#define lpfc_mbx_rq_create_bar_set_MASK 0x0000FFFF
+#define lpfc_mbx_rq_create_bar_set_WORD word2
+#define lpfc_mbx_rq_create_db_format_SHIFT 16
+#define lpfc_mbx_rq_create_db_format_MASK 0x0000FFFF
+#define lpfc_mbx_rq_create_db_format_WORD word2
+ } response;
+ } u;
+};
+
+struct lpfc_mbx_rq_destroy {
+ struct mbox_header header;
+ union {
+ struct {
+ uint32_t word0;
+#define lpfc_mbx_rq_destroy_q_id_SHIFT 0
+#define lpfc_mbx_rq_destroy_q_id_MASK 0x0000FFFF
+#define lpfc_mbx_rq_destroy_q_id_WORD word0
+ } request;
+ struct {
+ uint32_t word0;
+ } response;
+ } u;
+};
+
+struct mq_context {
+ uint32_t word0;
+#define lpfc_mq_context_cq_id_SHIFT 22 /* Version 0 Only */
+#define lpfc_mq_context_cq_id_MASK 0x000003FF
+#define lpfc_mq_context_cq_id_WORD word0
+#define lpfc_mq_context_ring_size_SHIFT 16
+#define lpfc_mq_context_ring_size_MASK 0x0000000F
+#define lpfc_mq_context_ring_size_WORD word0
+#define LPFC_MQ_RING_SIZE_16 0x5
+#define LPFC_MQ_RING_SIZE_32 0x6
+#define LPFC_MQ_RING_SIZE_64 0x7
+#define LPFC_MQ_RING_SIZE_128 0x8
+ uint32_t word1;
+#define lpfc_mq_context_valid_SHIFT 31
+#define lpfc_mq_context_valid_MASK 0x00000001
+#define lpfc_mq_context_valid_WORD word1
+ uint32_t reserved2;
+ uint32_t reserved3;
+};
+
+struct lpfc_mbx_mq_create {
+ struct mbox_header header;
+ union {
+ struct {
+ uint32_t word0;
+#define lpfc_mbx_mq_create_num_pages_SHIFT 0
+#define lpfc_mbx_mq_create_num_pages_MASK 0x0000FFFF
+#define lpfc_mbx_mq_create_num_pages_WORD word0
+ struct mq_context context;
+ struct dma_address page[LPFC_MAX_MQ_PAGE];
+ } request;
+ struct {
+ uint32_t word0;
+#define lpfc_mbx_mq_create_q_id_SHIFT 0
+#define lpfc_mbx_mq_create_q_id_MASK 0x0000FFFF
+#define lpfc_mbx_mq_create_q_id_WORD word0
+ } response;
+ } u;
+};
+
+struct lpfc_mbx_mq_create_ext {
+ struct mbox_header header;
+ union {
+ struct {
+ uint32_t word0;
+#define lpfc_mbx_mq_create_ext_num_pages_SHIFT 0
+#define lpfc_mbx_mq_create_ext_num_pages_MASK 0x0000FFFF
+#define lpfc_mbx_mq_create_ext_num_pages_WORD word0
+#define lpfc_mbx_mq_create_ext_cq_id_SHIFT 16 /* Version 1 Only */
+#define lpfc_mbx_mq_create_ext_cq_id_MASK 0x0000FFFF
+#define lpfc_mbx_mq_create_ext_cq_id_WORD word0
+ uint32_t async_evt_bmap;
+#define lpfc_mbx_mq_create_ext_async_evt_link_SHIFT LPFC_TRAILER_CODE_LINK
+#define lpfc_mbx_mq_create_ext_async_evt_link_MASK 0x00000001
+#define lpfc_mbx_mq_create_ext_async_evt_link_WORD async_evt_bmap
+#define LPFC_EVT_CODE_LINK_NO_LINK 0x0
+#define LPFC_EVT_CODE_LINK_10_MBIT 0x1
+#define LPFC_EVT_CODE_LINK_100_MBIT 0x2
+#define LPFC_EVT_CODE_LINK_1_GBIT 0x3
+#define LPFC_EVT_CODE_LINK_10_GBIT 0x4
+#define lpfc_mbx_mq_create_ext_async_evt_fip_SHIFT LPFC_TRAILER_CODE_FCOE
+#define lpfc_mbx_mq_create_ext_async_evt_fip_MASK 0x00000001
+#define lpfc_mbx_mq_create_ext_async_evt_fip_WORD async_evt_bmap
+#define lpfc_mbx_mq_create_ext_async_evt_group5_SHIFT LPFC_TRAILER_CODE_GRP5
+#define lpfc_mbx_mq_create_ext_async_evt_group5_MASK 0x00000001
+#define lpfc_mbx_mq_create_ext_async_evt_group5_WORD async_evt_bmap
+#define lpfc_mbx_mq_create_ext_async_evt_fc_SHIFT LPFC_TRAILER_CODE_FC
+#define lpfc_mbx_mq_create_ext_async_evt_fc_MASK 0x00000001
+#define lpfc_mbx_mq_create_ext_async_evt_fc_WORD async_evt_bmap
+#define LPFC_EVT_CODE_FC_NO_LINK 0x0
+#define LPFC_EVT_CODE_FC_1_GBAUD 0x1
+#define LPFC_EVT_CODE_FC_2_GBAUD 0x2
+#define LPFC_EVT_CODE_FC_4_GBAUD 0x4
+#define LPFC_EVT_CODE_FC_8_GBAUD 0x8
+#define LPFC_EVT_CODE_FC_10_GBAUD 0xA
+#define LPFC_EVT_CODE_FC_16_GBAUD 0x10
+#define lpfc_mbx_mq_create_ext_async_evt_sli_SHIFT LPFC_TRAILER_CODE_SLI
+#define lpfc_mbx_mq_create_ext_async_evt_sli_MASK 0x00000001
+#define lpfc_mbx_mq_create_ext_async_evt_sli_WORD async_evt_bmap
+ struct mq_context context;
+ struct dma_address page[LPFC_MAX_MQ_PAGE];
+ } request;
+ struct {
+ uint32_t word0;
+#define lpfc_mbx_mq_create_q_id_SHIFT 0
+#define lpfc_mbx_mq_create_q_id_MASK 0x0000FFFF
+#define lpfc_mbx_mq_create_q_id_WORD word0
+ } response;
+ } u;
+#define LPFC_ASYNC_EVENT_LINK_STATE 0x2
+#define LPFC_ASYNC_EVENT_FCF_STATE 0x4
+#define LPFC_ASYNC_EVENT_GROUP5 0x20
+};
+
+struct lpfc_mbx_mq_destroy {
+ struct mbox_header header;
+ union {
+ struct {
+ uint32_t word0;
+#define lpfc_mbx_mq_destroy_q_id_SHIFT 0
+#define lpfc_mbx_mq_destroy_q_id_MASK 0x0000FFFF
+#define lpfc_mbx_mq_destroy_q_id_WORD word0
+ } request;
+ struct {
+ uint32_t word0;
+ } response;
+ } u;
+};
+
+/* Start Gen 2 SLI4 Mailbox definitions: */
+
+/* Define allocate-ready Gen 2 SLI4 FCoE Resource Extent Types. */
+#define LPFC_RSC_TYPE_FCOE_VFI 0x20
+#define LPFC_RSC_TYPE_FCOE_VPI 0x21
+#define LPFC_RSC_TYPE_FCOE_RPI 0x22
+#define LPFC_RSC_TYPE_FCOE_XRI 0x23
+
+struct lpfc_mbx_get_rsrc_extent_info {
+ struct mbox_header header;
+ union {
+ struct {
+ uint32_t word4;
+#define lpfc_mbx_get_rsrc_extent_info_type_SHIFT 0
+#define lpfc_mbx_get_rsrc_extent_info_type_MASK 0x0000FFFF
+#define lpfc_mbx_get_rsrc_extent_info_type_WORD word4
+ } req;
+ struct {
+ uint32_t word4;
+#define lpfc_mbx_get_rsrc_extent_info_cnt_SHIFT 0
+#define lpfc_mbx_get_rsrc_extent_info_cnt_MASK 0x0000FFFF
+#define lpfc_mbx_get_rsrc_extent_info_cnt_WORD word4
+#define lpfc_mbx_get_rsrc_extent_info_size_SHIFT 16
+#define lpfc_mbx_get_rsrc_extent_info_size_MASK 0x0000FFFF
+#define lpfc_mbx_get_rsrc_extent_info_size_WORD word4
+ } rsp;
+ } u;
+};
+
+struct lpfc_mbx_query_fw_config {
+ struct mbox_header header;
+ struct {
+ uint32_t config_number;
+#define LPFC_FC_FCOE 0x00000007
+ uint32_t asic_revision;
+ uint32_t physical_port;
+ uint32_t function_mode;
+#define LPFC_FCOE_INI_MODE 0x00000040
+#define LPFC_FCOE_TGT_MODE 0x00000080
+#define LPFC_DUA_MODE 0x00000800
+ uint32_t ulp0_mode;
+#define LPFC_ULP_FCOE_INIT_MODE 0x00000040
+#define LPFC_ULP_FCOE_TGT_MODE 0x00000080
+ uint32_t ulp0_nap_words[12];
+ uint32_t ulp1_mode;
+ uint32_t ulp1_nap_words[12];
+ uint32_t function_capabilities;
+ uint32_t cqid_base;
+ uint32_t cqid_tot;
+ uint32_t eqid_base;
+ uint32_t eqid_tot;
+ uint32_t ulp0_nap2_words[2];
+ uint32_t ulp1_nap2_words[2];
+ } rsp;
+};
+
+struct lpfc_id_range {
+ uint32_t word5;
+#define lpfc_mbx_rsrc_id_word4_0_SHIFT 0
+#define lpfc_mbx_rsrc_id_word4_0_MASK 0x0000FFFF
+#define lpfc_mbx_rsrc_id_word4_0_WORD word5
+#define lpfc_mbx_rsrc_id_word4_1_SHIFT 16
+#define lpfc_mbx_rsrc_id_word4_1_MASK 0x0000FFFF
+#define lpfc_mbx_rsrc_id_word4_1_WORD word5
+};
+
+struct lpfc_mbx_set_link_diag_state {
+ struct mbox_header header;
+ union {
+ struct {
+ uint32_t word0;
+#define lpfc_mbx_set_diag_state_diag_SHIFT 0
+#define lpfc_mbx_set_diag_state_diag_MASK 0x00000001
+#define lpfc_mbx_set_diag_state_diag_WORD word0
+#define lpfc_mbx_set_diag_state_diag_bit_valid_SHIFT 2
+#define lpfc_mbx_set_diag_state_diag_bit_valid_MASK 0x00000001
+#define lpfc_mbx_set_diag_state_diag_bit_valid_WORD word0
+#define LPFC_DIAG_STATE_DIAG_BIT_VALID_NO_CHANGE 0
+#define LPFC_DIAG_STATE_DIAG_BIT_VALID_CHANGE 1
+#define lpfc_mbx_set_diag_state_link_num_SHIFT 16
+#define lpfc_mbx_set_diag_state_link_num_MASK 0x0000003F
+#define lpfc_mbx_set_diag_state_link_num_WORD word0
+#define lpfc_mbx_set_diag_state_link_type_SHIFT 22
+#define lpfc_mbx_set_diag_state_link_type_MASK 0x00000003
+#define lpfc_mbx_set_diag_state_link_type_WORD word0
+ } req;
+ struct {
+ uint32_t word0;
+ } rsp;
+ } u;
+};
+
+struct lpfc_mbx_set_link_diag_loopback {
+ struct mbox_header header;
+ union {
+ struct {
+ uint32_t word0;
+#define lpfc_mbx_set_diag_lpbk_type_SHIFT 0
+#define lpfc_mbx_set_diag_lpbk_type_MASK 0x00000003
+#define lpfc_mbx_set_diag_lpbk_type_WORD word0
+#define LPFC_DIAG_LOOPBACK_TYPE_DISABLE 0x0
+#define LPFC_DIAG_LOOPBACK_TYPE_INTERNAL 0x1
+#define LPFC_DIAG_LOOPBACK_TYPE_SERDES 0x2
+#define lpfc_mbx_set_diag_lpbk_link_num_SHIFT 16
+#define lpfc_mbx_set_diag_lpbk_link_num_MASK 0x0000003F
+#define lpfc_mbx_set_diag_lpbk_link_num_WORD word0
+#define lpfc_mbx_set_diag_lpbk_link_type_SHIFT 22
+#define lpfc_mbx_set_diag_lpbk_link_type_MASK 0x00000003
+#define lpfc_mbx_set_diag_lpbk_link_type_WORD word0
+ } req;
+ struct {
+ uint32_t word0;
+ } rsp;
+ } u;
+};
+
+struct lpfc_mbx_run_link_diag_test {
+ struct mbox_header header;
+ union {
+ struct {
+ uint32_t word0;
+#define lpfc_mbx_run_diag_test_link_num_SHIFT 16
+#define lpfc_mbx_run_diag_test_link_num_MASK 0x0000003F
+#define lpfc_mbx_run_diag_test_link_num_WORD word0
+#define lpfc_mbx_run_diag_test_link_type_SHIFT 22
+#define lpfc_mbx_run_diag_test_link_type_MASK 0x00000003
+#define lpfc_mbx_run_diag_test_link_type_WORD word0
+ uint32_t word1;
+#define lpfc_mbx_run_diag_test_test_id_SHIFT 0
+#define lpfc_mbx_run_diag_test_test_id_MASK 0x0000FFFF
+#define lpfc_mbx_run_diag_test_test_id_WORD word1
+#define lpfc_mbx_run_diag_test_loops_SHIFT 16
+#define lpfc_mbx_run_diag_test_loops_MASK 0x0000FFFF
+#define lpfc_mbx_run_diag_test_loops_WORD word1
+ uint32_t word2;
+#define lpfc_mbx_run_diag_test_test_ver_SHIFT 0
+#define lpfc_mbx_run_diag_test_test_ver_MASK 0x0000FFFF
+#define lpfc_mbx_run_diag_test_test_ver_WORD word2
+#define lpfc_mbx_run_diag_test_err_act_SHIFT 16
+#define lpfc_mbx_run_diag_test_err_act_MASK 0x000000FF
+#define lpfc_mbx_run_diag_test_err_act_WORD word2
+ } req;
+ struct {
+ uint32_t word0;
+ } rsp;
+ } u;
+};
+
+/*
+ * struct lpfc_mbx_alloc_rsrc_extents:
+ * A mbox is generically 256 bytes long. An SLI4_CONFIG mailbox requires
+ * 6 words of header + 4 words of shared subcommand header +
+ * 1 words of Extent-Opcode-specific header = 11 words or 44 bytes total.
+ *
+ * An embedded version of SLI4_CONFIG therefore has 256 - 44 = 212 bytes
+ * for extents payload.
+ *
+ * 212/2 (bytes per extent) = 106 extents.
+ * 106/2 (extents per word) = 53 words.
+ * lpfc_id_range id is statically size to 53.
+ *
+ * This mailbox definition is used for ALLOC or GET_ALLOCATED
+ * extent ranges. For ALLOC, the type and cnt are required.
+ * For GET_ALLOCATED, only the type is required.
+ */
+struct lpfc_mbx_alloc_rsrc_extents {
+ struct mbox_header header;
+ union {
+ struct {
+ uint32_t word4;
+#define lpfc_mbx_alloc_rsrc_extents_type_SHIFT 0
+#define lpfc_mbx_alloc_rsrc_extents_type_MASK 0x0000FFFF
+#define lpfc_mbx_alloc_rsrc_extents_type_WORD word4
+#define lpfc_mbx_alloc_rsrc_extents_cnt_SHIFT 16
+#define lpfc_mbx_alloc_rsrc_extents_cnt_MASK 0x0000FFFF
+#define lpfc_mbx_alloc_rsrc_extents_cnt_WORD word4
+ } req;
+ struct {
+ uint32_t word4;
+#define lpfc_mbx_rsrc_cnt_SHIFT 0
+#define lpfc_mbx_rsrc_cnt_MASK 0x0000FFFF
+#define lpfc_mbx_rsrc_cnt_WORD word4
+ struct lpfc_id_range id[53];
+ } rsp;
+ } u;
+};
+
+/*
+ * This is the non-embedded version of ALLOC or GET RSRC_EXTENTS. Word4 in this
+ * structure shares the same SHIFT/MASK/WORD defines provided in the
+ * mbx_alloc_rsrc_extents and mbx_get_alloc_rsrc_extents, word4, provided in
+ * the structures defined above. This non-embedded structure provides for the
+ * maximum number of extents supported by the port.
+ */
+struct lpfc_mbx_nembed_rsrc_extent {
+ union lpfc_sli4_cfg_shdr cfg_shdr;
+ uint32_t word4;
+ struct lpfc_id_range id;
+};
+
+struct lpfc_mbx_dealloc_rsrc_extents {
+ struct mbox_header header;
+ struct {
+ uint32_t word4;
+#define lpfc_mbx_dealloc_rsrc_extents_type_SHIFT 0
+#define lpfc_mbx_dealloc_rsrc_extents_type_MASK 0x0000FFFF
+#define lpfc_mbx_dealloc_rsrc_extents_type_WORD word4
+ } req;
+
+};
+
+/* Start SLI4 FCoE specific mbox structures. */
+
+struct lpfc_mbx_post_hdr_tmpl {
+ struct mbox_header header;
+ uint32_t word10;
+#define lpfc_mbx_post_hdr_tmpl_rpi_offset_SHIFT 0
+#define lpfc_mbx_post_hdr_tmpl_rpi_offset_MASK 0x0000FFFF
+#define lpfc_mbx_post_hdr_tmpl_rpi_offset_WORD word10
+#define lpfc_mbx_post_hdr_tmpl_page_cnt_SHIFT 16
+#define lpfc_mbx_post_hdr_tmpl_page_cnt_MASK 0x0000FFFF
+#define lpfc_mbx_post_hdr_tmpl_page_cnt_WORD word10
+ uint32_t rpi_paddr_lo;
+ uint32_t rpi_paddr_hi;
+};
+
+struct sli4_sge { /* SLI-4 */
+ uint32_t addr_hi;
+ uint32_t addr_lo;
+
+ uint32_t word2;
+#define lpfc_sli4_sge_offset_SHIFT 0
+#define lpfc_sli4_sge_offset_MASK 0x07FFFFFF
+#define lpfc_sli4_sge_offset_WORD word2
+#define lpfc_sli4_sge_type_SHIFT 27
+#define lpfc_sli4_sge_type_MASK 0x0000000F
+#define lpfc_sli4_sge_type_WORD word2
+#define LPFC_SGE_TYPE_DATA 0x0
+#define LPFC_SGE_TYPE_DIF 0x4
+#define LPFC_SGE_TYPE_LSP 0x5
+#define LPFC_SGE_TYPE_PEDIF 0x6
+#define LPFC_SGE_TYPE_PESEED 0x7
+#define LPFC_SGE_TYPE_DISEED 0x8
+#define LPFC_SGE_TYPE_ENC 0x9
+#define LPFC_SGE_TYPE_ATM 0xA
+#define LPFC_SGE_TYPE_SKIP 0xC
+#define lpfc_sli4_sge_last_SHIFT 31 /* Last SEG in the SGL sets it */
+#define lpfc_sli4_sge_last_MASK 0x00000001
+#define lpfc_sli4_sge_last_WORD word2
+ uint32_t sge_len;
+};
+
+struct sli4_sge_diseed { /* SLI-4 */
+ uint32_t ref_tag;
+ uint32_t ref_tag_tran;
+
+ uint32_t word2;
+#define lpfc_sli4_sge_dif_apptran_SHIFT 0
+#define lpfc_sli4_sge_dif_apptran_MASK 0x0000FFFF
+#define lpfc_sli4_sge_dif_apptran_WORD word2
+#define lpfc_sli4_sge_dif_af_SHIFT 24
+#define lpfc_sli4_sge_dif_af_MASK 0x00000001
+#define lpfc_sli4_sge_dif_af_WORD word2
+#define lpfc_sli4_sge_dif_na_SHIFT 25
+#define lpfc_sli4_sge_dif_na_MASK 0x00000001
+#define lpfc_sli4_sge_dif_na_WORD word2
+#define lpfc_sli4_sge_dif_hi_SHIFT 26
+#define lpfc_sli4_sge_dif_hi_MASK 0x00000001
+#define lpfc_sli4_sge_dif_hi_WORD word2
+#define lpfc_sli4_sge_dif_type_SHIFT 27
+#define lpfc_sli4_sge_dif_type_MASK 0x0000000F
+#define lpfc_sli4_sge_dif_type_WORD word2
+#define lpfc_sli4_sge_dif_last_SHIFT 31 /* Last SEG in the SGL sets it */
+#define lpfc_sli4_sge_dif_last_MASK 0x00000001
+#define lpfc_sli4_sge_dif_last_WORD word2
+ uint32_t word3;
+#define lpfc_sli4_sge_dif_apptag_SHIFT 0
+#define lpfc_sli4_sge_dif_apptag_MASK 0x0000FFFF
+#define lpfc_sli4_sge_dif_apptag_WORD word3
+#define lpfc_sli4_sge_dif_bs_SHIFT 16
+#define lpfc_sli4_sge_dif_bs_MASK 0x00000007
+#define lpfc_sli4_sge_dif_bs_WORD word3
+#define lpfc_sli4_sge_dif_ai_SHIFT 19
+#define lpfc_sli4_sge_dif_ai_MASK 0x00000001
+#define lpfc_sli4_sge_dif_ai_WORD word3
+#define lpfc_sli4_sge_dif_me_SHIFT 20
+#define lpfc_sli4_sge_dif_me_MASK 0x00000001
+#define lpfc_sli4_sge_dif_me_WORD word3
+#define lpfc_sli4_sge_dif_re_SHIFT 21
+#define lpfc_sli4_sge_dif_re_MASK 0x00000001
+#define lpfc_sli4_sge_dif_re_WORD word3
+#define lpfc_sli4_sge_dif_ce_SHIFT 22
+#define lpfc_sli4_sge_dif_ce_MASK 0x00000001
+#define lpfc_sli4_sge_dif_ce_WORD word3
+#define lpfc_sli4_sge_dif_nr_SHIFT 23
+#define lpfc_sli4_sge_dif_nr_MASK 0x00000001
+#define lpfc_sli4_sge_dif_nr_WORD word3
+#define lpfc_sli4_sge_dif_oprx_SHIFT 24
+#define lpfc_sli4_sge_dif_oprx_MASK 0x0000000F
+#define lpfc_sli4_sge_dif_oprx_WORD word3
+#define lpfc_sli4_sge_dif_optx_SHIFT 28
+#define lpfc_sli4_sge_dif_optx_MASK 0x0000000F
+#define lpfc_sli4_sge_dif_optx_WORD word3
+/* optx and oprx use BG_OP_IN defines in lpfc_hw.h */
+};
+
+struct fcf_record {
+ uint32_t max_rcv_size;
+ uint32_t fka_adv_period;
+ uint32_t fip_priority;
+ uint32_t word3;
+#define lpfc_fcf_record_mac_0_SHIFT 0
+#define lpfc_fcf_record_mac_0_MASK 0x000000FF
+#define lpfc_fcf_record_mac_0_WORD word3
+#define lpfc_fcf_record_mac_1_SHIFT 8
+#define lpfc_fcf_record_mac_1_MASK 0x000000FF
+#define lpfc_fcf_record_mac_1_WORD word3
+#define lpfc_fcf_record_mac_2_SHIFT 16
+#define lpfc_fcf_record_mac_2_MASK 0x000000FF
+#define lpfc_fcf_record_mac_2_WORD word3
+#define lpfc_fcf_record_mac_3_SHIFT 24
+#define lpfc_fcf_record_mac_3_MASK 0x000000FF
+#define lpfc_fcf_record_mac_3_WORD word3
+ uint32_t word4;
+#define lpfc_fcf_record_mac_4_SHIFT 0
+#define lpfc_fcf_record_mac_4_MASK 0x000000FF
+#define lpfc_fcf_record_mac_4_WORD word4
+#define lpfc_fcf_record_mac_5_SHIFT 8
+#define lpfc_fcf_record_mac_5_MASK 0x000000FF
+#define lpfc_fcf_record_mac_5_WORD word4
+#define lpfc_fcf_record_fcf_avail_SHIFT 16
+#define lpfc_fcf_record_fcf_avail_MASK 0x000000FF
+#define lpfc_fcf_record_fcf_avail_WORD word4
+#define lpfc_fcf_record_mac_addr_prov_SHIFT 24
+#define lpfc_fcf_record_mac_addr_prov_MASK 0x000000FF
+#define lpfc_fcf_record_mac_addr_prov_WORD word4
+#define LPFC_FCF_FPMA 1 /* Fabric Provided MAC Address */
+#define LPFC_FCF_SPMA 2 /* Server Provided MAC Address */
+ uint32_t word5;
+#define lpfc_fcf_record_fab_name_0_SHIFT 0
+#define lpfc_fcf_record_fab_name_0_MASK 0x000000FF
+#define lpfc_fcf_record_fab_name_0_WORD word5
+#define lpfc_fcf_record_fab_name_1_SHIFT 8
+#define lpfc_fcf_record_fab_name_1_MASK 0x000000FF
+#define lpfc_fcf_record_fab_name_1_WORD word5
+#define lpfc_fcf_record_fab_name_2_SHIFT 16
+#define lpfc_fcf_record_fab_name_2_MASK 0x000000FF
+#define lpfc_fcf_record_fab_name_2_WORD word5
+#define lpfc_fcf_record_fab_name_3_SHIFT 24
+#define lpfc_fcf_record_fab_name_3_MASK 0x000000FF
+#define lpfc_fcf_record_fab_name_3_WORD word5
+ uint32_t word6;
+#define lpfc_fcf_record_fab_name_4_SHIFT 0
+#define lpfc_fcf_record_fab_name_4_MASK 0x000000FF
+#define lpfc_fcf_record_fab_name_4_WORD word6
+#define lpfc_fcf_record_fab_name_5_SHIFT 8
+#define lpfc_fcf_record_fab_name_5_MASK 0x000000FF
+#define lpfc_fcf_record_fab_name_5_WORD word6
+#define lpfc_fcf_record_fab_name_6_SHIFT 16
+#define lpfc_fcf_record_fab_name_6_MASK 0x000000FF
+#define lpfc_fcf_record_fab_name_6_WORD word6
+#define lpfc_fcf_record_fab_name_7_SHIFT 24
+#define lpfc_fcf_record_fab_name_7_MASK 0x000000FF
+#define lpfc_fcf_record_fab_name_7_WORD word6
+ uint32_t word7;
+#define lpfc_fcf_record_fc_map_0_SHIFT 0
+#define lpfc_fcf_record_fc_map_0_MASK 0x000000FF
+#define lpfc_fcf_record_fc_map_0_WORD word7
+#define lpfc_fcf_record_fc_map_1_SHIFT 8
+#define lpfc_fcf_record_fc_map_1_MASK 0x000000FF
+#define lpfc_fcf_record_fc_map_1_WORD word7
+#define lpfc_fcf_record_fc_map_2_SHIFT 16
+#define lpfc_fcf_record_fc_map_2_MASK 0x000000FF
+#define lpfc_fcf_record_fc_map_2_WORD word7
+#define lpfc_fcf_record_fcf_valid_SHIFT 24
+#define lpfc_fcf_record_fcf_valid_MASK 0x00000001
+#define lpfc_fcf_record_fcf_valid_WORD word7
+#define lpfc_fcf_record_fcf_fc_SHIFT 25
+#define lpfc_fcf_record_fcf_fc_MASK 0x00000001
+#define lpfc_fcf_record_fcf_fc_WORD word7
+#define lpfc_fcf_record_fcf_sol_SHIFT 31
+#define lpfc_fcf_record_fcf_sol_MASK 0x00000001
+#define lpfc_fcf_record_fcf_sol_WORD word7
+ uint32_t word8;
+#define lpfc_fcf_record_fcf_index_SHIFT 0
+#define lpfc_fcf_record_fcf_index_MASK 0x0000FFFF
+#define lpfc_fcf_record_fcf_index_WORD word8
+#define lpfc_fcf_record_fcf_state_SHIFT 16
+#define lpfc_fcf_record_fcf_state_MASK 0x0000FFFF
+#define lpfc_fcf_record_fcf_state_WORD word8
+ uint8_t vlan_bitmap[512];
+ uint32_t word137;
+#define lpfc_fcf_record_switch_name_0_SHIFT 0
+#define lpfc_fcf_record_switch_name_0_MASK 0x000000FF
+#define lpfc_fcf_record_switch_name_0_WORD word137
+#define lpfc_fcf_record_switch_name_1_SHIFT 8
+#define lpfc_fcf_record_switch_name_1_MASK 0x000000FF
+#define lpfc_fcf_record_switch_name_1_WORD word137
+#define lpfc_fcf_record_switch_name_2_SHIFT 16
+#define lpfc_fcf_record_switch_name_2_MASK 0x000000FF
+#define lpfc_fcf_record_switch_name_2_WORD word137
+#define lpfc_fcf_record_switch_name_3_SHIFT 24
+#define lpfc_fcf_record_switch_name_3_MASK 0x000000FF
+#define lpfc_fcf_record_switch_name_3_WORD word137
+ uint32_t word138;
+#define lpfc_fcf_record_switch_name_4_SHIFT 0
+#define lpfc_fcf_record_switch_name_4_MASK 0x000000FF
+#define lpfc_fcf_record_switch_name_4_WORD word138
+#define lpfc_fcf_record_switch_name_5_SHIFT 8
+#define lpfc_fcf_record_switch_name_5_MASK 0x000000FF
+#define lpfc_fcf_record_switch_name_5_WORD word138
+#define lpfc_fcf_record_switch_name_6_SHIFT 16
+#define lpfc_fcf_record_switch_name_6_MASK 0x000000FF
+#define lpfc_fcf_record_switch_name_6_WORD word138
+#define lpfc_fcf_record_switch_name_7_SHIFT 24
+#define lpfc_fcf_record_switch_name_7_MASK 0x000000FF
+#define lpfc_fcf_record_switch_name_7_WORD word138
+};
+
+struct lpfc_mbx_read_fcf_tbl {
+ union lpfc_sli4_cfg_shdr cfg_shdr;
+ union {
+ struct {
+ uint32_t word10;
+#define lpfc_mbx_read_fcf_tbl_indx_SHIFT 0
+#define lpfc_mbx_read_fcf_tbl_indx_MASK 0x0000FFFF
+#define lpfc_mbx_read_fcf_tbl_indx_WORD word10
+ } request;
+ struct {
+ uint32_t eventag;
+ } response;
+ } u;
+ uint32_t word11;
+#define lpfc_mbx_read_fcf_tbl_nxt_vindx_SHIFT 0
+#define lpfc_mbx_read_fcf_tbl_nxt_vindx_MASK 0x0000FFFF
+#define lpfc_mbx_read_fcf_tbl_nxt_vindx_WORD word11
+};
+
+struct lpfc_mbx_add_fcf_tbl_entry {
+ union lpfc_sli4_cfg_shdr cfg_shdr;
+ uint32_t word10;
+#define lpfc_mbx_add_fcf_tbl_fcfi_SHIFT 0
+#define lpfc_mbx_add_fcf_tbl_fcfi_MASK 0x0000FFFF
+#define lpfc_mbx_add_fcf_tbl_fcfi_WORD word10
+ struct lpfc_mbx_sge fcf_sge;
+};
+
+struct lpfc_mbx_del_fcf_tbl_entry {
+ struct mbox_header header;
+ uint32_t word10;
+#define lpfc_mbx_del_fcf_tbl_count_SHIFT 0
+#define lpfc_mbx_del_fcf_tbl_count_MASK 0x0000FFFF
+#define lpfc_mbx_del_fcf_tbl_count_WORD word10
+#define lpfc_mbx_del_fcf_tbl_index_SHIFT 16
+#define lpfc_mbx_del_fcf_tbl_index_MASK 0x0000FFFF
+#define lpfc_mbx_del_fcf_tbl_index_WORD word10
+};
+
+struct lpfc_mbx_redisc_fcf_tbl {
+ struct mbox_header header;
+ uint32_t word10;
+#define lpfc_mbx_redisc_fcf_count_SHIFT 0
+#define lpfc_mbx_redisc_fcf_count_MASK 0x0000FFFF
+#define lpfc_mbx_redisc_fcf_count_WORD word10
+ uint32_t resvd;
+ uint32_t word12;
+#define lpfc_mbx_redisc_fcf_index_SHIFT 0
+#define lpfc_mbx_redisc_fcf_index_MASK 0x0000FFFF
+#define lpfc_mbx_redisc_fcf_index_WORD word12
+};
+
+/* Status field for embedded SLI_CONFIG mailbox command */
+#define STATUS_SUCCESS 0x0
+#define STATUS_FAILED 0x1
+#define STATUS_ILLEGAL_REQUEST 0x2
+#define STATUS_ILLEGAL_FIELD 0x3
+#define STATUS_INSUFFICIENT_BUFFER 0x4
+#define STATUS_UNAUTHORIZED_REQUEST 0x5
+#define STATUS_FLASHROM_SAVE_FAILED 0x17
+#define STATUS_FLASHROM_RESTORE_FAILED 0x18
+#define STATUS_ICCBINDEX_ALLOC_FAILED 0x1a
+#define STATUS_IOCTLHANDLE_ALLOC_FAILED 0x1b
+#define STATUS_INVALID_PHY_ADDR_FROM_OSM 0x1c
+#define STATUS_INVALID_PHY_ADDR_LEN_FROM_OSM 0x1d
+#define STATUS_ASSERT_FAILED 0x1e
+#define STATUS_INVALID_SESSION 0x1f
+#define STATUS_INVALID_CONNECTION 0x20
+#define STATUS_BTL_PATH_EXCEEDS_OSM_LIMIT 0x21
+#define STATUS_BTL_NO_FREE_SLOT_PATH 0x24
+#define STATUS_BTL_NO_FREE_SLOT_TGTID 0x25
+#define STATUS_OSM_DEVSLOT_NOT_FOUND 0x26
+#define STATUS_FLASHROM_READ_FAILED 0x27
+#define STATUS_POLL_IOCTL_TIMEOUT 0x28
+#define STATUS_ERROR_ACITMAIN 0x2a
+#define STATUS_REBOOT_REQUIRED 0x2c
+#define STATUS_FCF_IN_USE 0x3a
+#define STATUS_FCF_TABLE_EMPTY 0x43
+
+struct lpfc_mbx_sli4_config {
+ struct mbox_header header;
+};
+
+struct lpfc_mbx_init_vfi {
+ uint32_t word1;
+#define lpfc_init_vfi_vr_SHIFT 31
+#define lpfc_init_vfi_vr_MASK 0x00000001
+#define lpfc_init_vfi_vr_WORD word1
+#define lpfc_init_vfi_vt_SHIFT 30
+#define lpfc_init_vfi_vt_MASK 0x00000001
+#define lpfc_init_vfi_vt_WORD word1
+#define lpfc_init_vfi_vf_SHIFT 29
+#define lpfc_init_vfi_vf_MASK 0x00000001
+#define lpfc_init_vfi_vf_WORD word1
+#define lpfc_init_vfi_vp_SHIFT 28
+#define lpfc_init_vfi_vp_MASK 0x00000001
+#define lpfc_init_vfi_vp_WORD word1
+#define lpfc_init_vfi_vfi_SHIFT 0
+#define lpfc_init_vfi_vfi_MASK 0x0000FFFF
+#define lpfc_init_vfi_vfi_WORD word1
+ uint32_t word2;
+#define lpfc_init_vfi_vpi_SHIFT 16
+#define lpfc_init_vfi_vpi_MASK 0x0000FFFF
+#define lpfc_init_vfi_vpi_WORD word2
+#define lpfc_init_vfi_fcfi_SHIFT 0
+#define lpfc_init_vfi_fcfi_MASK 0x0000FFFF
+#define lpfc_init_vfi_fcfi_WORD word2
+ uint32_t word3;
+#define lpfc_init_vfi_pri_SHIFT 13
+#define lpfc_init_vfi_pri_MASK 0x00000007
+#define lpfc_init_vfi_pri_WORD word3
+#define lpfc_init_vfi_vf_id_SHIFT 1
+#define lpfc_init_vfi_vf_id_MASK 0x00000FFF
+#define lpfc_init_vfi_vf_id_WORD word3
+ uint32_t word4;
+#define lpfc_init_vfi_hop_count_SHIFT 24
+#define lpfc_init_vfi_hop_count_MASK 0x000000FF
+#define lpfc_init_vfi_hop_count_WORD word4
+};
+#define MBX_VFI_IN_USE 0x9F02
+
+
+struct lpfc_mbx_reg_vfi {
+ uint32_t word1;
+#define lpfc_reg_vfi_upd_SHIFT 29
+#define lpfc_reg_vfi_upd_MASK 0x00000001
+#define lpfc_reg_vfi_upd_WORD word1
+#define lpfc_reg_vfi_vp_SHIFT 28
+#define lpfc_reg_vfi_vp_MASK 0x00000001
+#define lpfc_reg_vfi_vp_WORD word1
+#define lpfc_reg_vfi_vfi_SHIFT 0
+#define lpfc_reg_vfi_vfi_MASK 0x0000FFFF
+#define lpfc_reg_vfi_vfi_WORD word1
+ uint32_t word2;
+#define lpfc_reg_vfi_vpi_SHIFT 16
+#define lpfc_reg_vfi_vpi_MASK 0x0000FFFF
+#define lpfc_reg_vfi_vpi_WORD word2
+#define lpfc_reg_vfi_fcfi_SHIFT 0
+#define lpfc_reg_vfi_fcfi_MASK 0x0000FFFF
+#define lpfc_reg_vfi_fcfi_WORD word2
+ uint32_t wwn[2];
+ struct ulp_bde64 bde;
+ uint32_t e_d_tov;
+ uint32_t r_a_tov;
+ uint32_t word10;
+#define lpfc_reg_vfi_nport_id_SHIFT 0
+#define lpfc_reg_vfi_nport_id_MASK 0x00FFFFFF
+#define lpfc_reg_vfi_nport_id_WORD word10
+};
+
+struct lpfc_mbx_init_vpi {
+ uint32_t word1;
+#define lpfc_init_vpi_vfi_SHIFT 16
+#define lpfc_init_vpi_vfi_MASK 0x0000FFFF
+#define lpfc_init_vpi_vfi_WORD word1
+#define lpfc_init_vpi_vpi_SHIFT 0
+#define lpfc_init_vpi_vpi_MASK 0x0000FFFF
+#define lpfc_init_vpi_vpi_WORD word1
+};
+
+struct lpfc_mbx_read_vpi {
+ uint32_t word1_rsvd;
+ uint32_t word2;
+#define lpfc_mbx_read_vpi_vnportid_SHIFT 0
+#define lpfc_mbx_read_vpi_vnportid_MASK 0x00FFFFFF
+#define lpfc_mbx_read_vpi_vnportid_WORD word2
+ uint32_t word3_rsvd;
+ uint32_t word4;
+#define lpfc_mbx_read_vpi_acq_alpa_SHIFT 0
+#define lpfc_mbx_read_vpi_acq_alpa_MASK 0x000000FF
+#define lpfc_mbx_read_vpi_acq_alpa_WORD word4
+#define lpfc_mbx_read_vpi_pb_SHIFT 15
+#define lpfc_mbx_read_vpi_pb_MASK 0x00000001
+#define lpfc_mbx_read_vpi_pb_WORD word4
+#define lpfc_mbx_read_vpi_spec_alpa_SHIFT 16
+#define lpfc_mbx_read_vpi_spec_alpa_MASK 0x000000FF
+#define lpfc_mbx_read_vpi_spec_alpa_WORD word4
+#define lpfc_mbx_read_vpi_ns_SHIFT 30
+#define lpfc_mbx_read_vpi_ns_MASK 0x00000001
+#define lpfc_mbx_read_vpi_ns_WORD word4
+#define lpfc_mbx_read_vpi_hl_SHIFT 31
+#define lpfc_mbx_read_vpi_hl_MASK 0x00000001
+#define lpfc_mbx_read_vpi_hl_WORD word4
+ uint32_t word5_rsvd;
+ uint32_t word6;
+#define lpfc_mbx_read_vpi_vpi_SHIFT 0
+#define lpfc_mbx_read_vpi_vpi_MASK 0x0000FFFF
+#define lpfc_mbx_read_vpi_vpi_WORD word6
+ uint32_t word7;
+#define lpfc_mbx_read_vpi_mac_0_SHIFT 0
+#define lpfc_mbx_read_vpi_mac_0_MASK 0x000000FF
+#define lpfc_mbx_read_vpi_mac_0_WORD word7
+#define lpfc_mbx_read_vpi_mac_1_SHIFT 8
+#define lpfc_mbx_read_vpi_mac_1_MASK 0x000000FF
+#define lpfc_mbx_read_vpi_mac_1_WORD word7
+#define lpfc_mbx_read_vpi_mac_2_SHIFT 16
+#define lpfc_mbx_read_vpi_mac_2_MASK 0x000000FF
+#define lpfc_mbx_read_vpi_mac_2_WORD word7
+#define lpfc_mbx_read_vpi_mac_3_SHIFT 24
+#define lpfc_mbx_read_vpi_mac_3_MASK 0x000000FF
+#define lpfc_mbx_read_vpi_mac_3_WORD word7
+ uint32_t word8;
+#define lpfc_mbx_read_vpi_mac_4_SHIFT 0
+#define lpfc_mbx_read_vpi_mac_4_MASK 0x000000FF
+#define lpfc_mbx_read_vpi_mac_4_WORD word8
+#define lpfc_mbx_read_vpi_mac_5_SHIFT 8
+#define lpfc_mbx_read_vpi_mac_5_MASK 0x000000FF
+#define lpfc_mbx_read_vpi_mac_5_WORD word8
+#define lpfc_mbx_read_vpi_vlan_tag_SHIFT 16
+#define lpfc_mbx_read_vpi_vlan_tag_MASK 0x00000FFF
+#define lpfc_mbx_read_vpi_vlan_tag_WORD word8
+#define lpfc_mbx_read_vpi_vv_SHIFT 28
+#define lpfc_mbx_read_vpi_vv_MASK 0x0000001
+#define lpfc_mbx_read_vpi_vv_WORD word8
+};
+
+struct lpfc_mbx_unreg_vfi {
+ uint32_t word1_rsvd;
+ uint32_t word2;
+#define lpfc_unreg_vfi_vfi_SHIFT 0
+#define lpfc_unreg_vfi_vfi_MASK 0x0000FFFF
+#define lpfc_unreg_vfi_vfi_WORD word2
+};
+
+struct lpfc_mbx_resume_rpi {
+ uint32_t word1;
+#define lpfc_resume_rpi_index_SHIFT 0
+#define lpfc_resume_rpi_index_MASK 0x0000FFFF
+#define lpfc_resume_rpi_index_WORD word1
+#define lpfc_resume_rpi_ii_SHIFT 30
+#define lpfc_resume_rpi_ii_MASK 0x00000003
+#define lpfc_resume_rpi_ii_WORD word1
+#define RESUME_INDEX_RPI 0
+#define RESUME_INDEX_VPI 1
+#define RESUME_INDEX_VFI 2
+#define RESUME_INDEX_FCFI 3
+ uint32_t event_tag;
+};
+
+#define REG_FCF_INVALID_QID 0xFFFF
+struct lpfc_mbx_reg_fcfi {
+ uint32_t word1;
+#define lpfc_reg_fcfi_info_index_SHIFT 0
+#define lpfc_reg_fcfi_info_index_MASK 0x0000FFFF
+#define lpfc_reg_fcfi_info_index_WORD word1
+#define lpfc_reg_fcfi_fcfi_SHIFT 16
+#define lpfc_reg_fcfi_fcfi_MASK 0x0000FFFF
+#define lpfc_reg_fcfi_fcfi_WORD word1
+ uint32_t word2;
+#define lpfc_reg_fcfi_rq_id1_SHIFT 0
+#define lpfc_reg_fcfi_rq_id1_MASK 0x0000FFFF
+#define lpfc_reg_fcfi_rq_id1_WORD word2
+#define lpfc_reg_fcfi_rq_id0_SHIFT 16
+#define lpfc_reg_fcfi_rq_id0_MASK 0x0000FFFF
+#define lpfc_reg_fcfi_rq_id0_WORD word2
+ uint32_t word3;
+#define lpfc_reg_fcfi_rq_id3_SHIFT 0
+#define lpfc_reg_fcfi_rq_id3_MASK 0x0000FFFF
+#define lpfc_reg_fcfi_rq_id3_WORD word3
+#define lpfc_reg_fcfi_rq_id2_SHIFT 16
+#define lpfc_reg_fcfi_rq_id2_MASK 0x0000FFFF
+#define lpfc_reg_fcfi_rq_id2_WORD word3
+ uint32_t word4;
+#define lpfc_reg_fcfi_type_match0_SHIFT 24
+#define lpfc_reg_fcfi_type_match0_MASK 0x000000FF
+#define lpfc_reg_fcfi_type_match0_WORD word4
+#define lpfc_reg_fcfi_type_mask0_SHIFT 16
+#define lpfc_reg_fcfi_type_mask0_MASK 0x000000FF
+#define lpfc_reg_fcfi_type_mask0_WORD word4
+#define lpfc_reg_fcfi_rctl_match0_SHIFT 8
+#define lpfc_reg_fcfi_rctl_match0_MASK 0x000000FF
+#define lpfc_reg_fcfi_rctl_match0_WORD word4
+#define lpfc_reg_fcfi_rctl_mask0_SHIFT 0
+#define lpfc_reg_fcfi_rctl_mask0_MASK 0x000000FF
+#define lpfc_reg_fcfi_rctl_mask0_WORD word4
+ uint32_t word5;
+#define lpfc_reg_fcfi_type_match1_SHIFT 24
+#define lpfc_reg_fcfi_type_match1_MASK 0x000000FF
+#define lpfc_reg_fcfi_type_match1_WORD word5
+#define lpfc_reg_fcfi_type_mask1_SHIFT 16
+#define lpfc_reg_fcfi_type_mask1_MASK 0x000000FF
+#define lpfc_reg_fcfi_type_mask1_WORD word5
+#define lpfc_reg_fcfi_rctl_match1_SHIFT 8
+#define lpfc_reg_fcfi_rctl_match1_MASK 0x000000FF
+#define lpfc_reg_fcfi_rctl_match1_WORD word5
+#define lpfc_reg_fcfi_rctl_mask1_SHIFT 0
+#define lpfc_reg_fcfi_rctl_mask1_MASK 0x000000FF
+#define lpfc_reg_fcfi_rctl_mask1_WORD word5
+ uint32_t word6;
+#define lpfc_reg_fcfi_type_match2_SHIFT 24
+#define lpfc_reg_fcfi_type_match2_MASK 0x000000FF
+#define lpfc_reg_fcfi_type_match2_WORD word6
+#define lpfc_reg_fcfi_type_mask2_SHIFT 16
+#define lpfc_reg_fcfi_type_mask2_MASK 0x000000FF
+#define lpfc_reg_fcfi_type_mask2_WORD word6
+#define lpfc_reg_fcfi_rctl_match2_SHIFT 8
+#define lpfc_reg_fcfi_rctl_match2_MASK 0x000000FF
+#define lpfc_reg_fcfi_rctl_match2_WORD word6
+#define lpfc_reg_fcfi_rctl_mask2_SHIFT 0
+#define lpfc_reg_fcfi_rctl_mask2_MASK 0x000000FF
+#define lpfc_reg_fcfi_rctl_mask2_WORD word6
+ uint32_t word7;
+#define lpfc_reg_fcfi_type_match3_SHIFT 24
+#define lpfc_reg_fcfi_type_match3_MASK 0x000000FF
+#define lpfc_reg_fcfi_type_match3_WORD word7
+#define lpfc_reg_fcfi_type_mask3_SHIFT 16
+#define lpfc_reg_fcfi_type_mask3_MASK 0x000000FF
+#define lpfc_reg_fcfi_type_mask3_WORD word7
+#define lpfc_reg_fcfi_rctl_match3_SHIFT 8
+#define lpfc_reg_fcfi_rctl_match3_MASK 0x000000FF
+#define lpfc_reg_fcfi_rctl_match3_WORD word7
+#define lpfc_reg_fcfi_rctl_mask3_SHIFT 0
+#define lpfc_reg_fcfi_rctl_mask3_MASK 0x000000FF
+#define lpfc_reg_fcfi_rctl_mask3_WORD word7
+ uint32_t word8;
+#define lpfc_reg_fcfi_mam_SHIFT 13
+#define lpfc_reg_fcfi_mam_MASK 0x00000003
+#define lpfc_reg_fcfi_mam_WORD word8
+#define LPFC_MAM_BOTH 0 /* Both SPMA and FPMA */
+#define LPFC_MAM_SPMA 1 /* Server Provided MAC Address */
+#define LPFC_MAM_FPMA 2 /* Fabric Provided MAC Address */
+#define lpfc_reg_fcfi_vv_SHIFT 12
+#define lpfc_reg_fcfi_vv_MASK 0x00000001
+#define lpfc_reg_fcfi_vv_WORD word8
+#define lpfc_reg_fcfi_vlan_tag_SHIFT 0
+#define lpfc_reg_fcfi_vlan_tag_MASK 0x00000FFF
+#define lpfc_reg_fcfi_vlan_tag_WORD word8
+};
+
+struct lpfc_mbx_unreg_fcfi {
+ uint32_t word1_rsv;
+ uint32_t word2;
+#define lpfc_unreg_fcfi_SHIFT 0
+#define lpfc_unreg_fcfi_MASK 0x0000FFFF
+#define lpfc_unreg_fcfi_WORD word2
+};
+
+struct lpfc_mbx_read_rev {
+ uint32_t word1;
+#define lpfc_mbx_rd_rev_sli_lvl_SHIFT 16
+#define lpfc_mbx_rd_rev_sli_lvl_MASK 0x0000000F
+#define lpfc_mbx_rd_rev_sli_lvl_WORD word1
+#define lpfc_mbx_rd_rev_fcoe_SHIFT 20
+#define lpfc_mbx_rd_rev_fcoe_MASK 0x00000001
+#define lpfc_mbx_rd_rev_fcoe_WORD word1
+#define lpfc_mbx_rd_rev_cee_ver_SHIFT 21
+#define lpfc_mbx_rd_rev_cee_ver_MASK 0x00000003
+#define lpfc_mbx_rd_rev_cee_ver_WORD word1
+#define LPFC_PREDCBX_CEE_MODE 0
+#define LPFC_DCBX_CEE_MODE 1
+#define lpfc_mbx_rd_rev_vpd_SHIFT 29
+#define lpfc_mbx_rd_rev_vpd_MASK 0x00000001
+#define lpfc_mbx_rd_rev_vpd_WORD word1
+ uint32_t first_hw_rev;
+ uint32_t second_hw_rev;
+ uint32_t word4_rsvd;
+ uint32_t third_hw_rev;
+ uint32_t word6;
+#define lpfc_mbx_rd_rev_fcph_low_SHIFT 0
+#define lpfc_mbx_rd_rev_fcph_low_MASK 0x000000FF
+#define lpfc_mbx_rd_rev_fcph_low_WORD word6
+#define lpfc_mbx_rd_rev_fcph_high_SHIFT 8
+#define lpfc_mbx_rd_rev_fcph_high_MASK 0x000000FF
+#define lpfc_mbx_rd_rev_fcph_high_WORD word6
+#define lpfc_mbx_rd_rev_ftr_lvl_low_SHIFT 16
+#define lpfc_mbx_rd_rev_ftr_lvl_low_MASK 0x000000FF
+#define lpfc_mbx_rd_rev_ftr_lvl_low_WORD word6
+#define lpfc_mbx_rd_rev_ftr_lvl_high_SHIFT 24
+#define lpfc_mbx_rd_rev_ftr_lvl_high_MASK 0x000000FF
+#define lpfc_mbx_rd_rev_ftr_lvl_high_WORD word6
+ uint32_t word7_rsvd;
+ uint32_t fw_id_rev;
+ uint8_t fw_name[16];
+ uint32_t ulp_fw_id_rev;
+ uint8_t ulp_fw_name[16];
+ uint32_t word18_47_rsvd[30];
+ uint32_t word48;
+#define lpfc_mbx_rd_rev_avail_len_SHIFT 0
+#define lpfc_mbx_rd_rev_avail_len_MASK 0x00FFFFFF
+#define lpfc_mbx_rd_rev_avail_len_WORD word48
+ uint32_t vpd_paddr_low;
+ uint32_t vpd_paddr_high;
+ uint32_t avail_vpd_len;
+ uint32_t rsvd_52_63[12];
+};
+
+struct lpfc_mbx_read_config {
+ uint32_t word1;
+#define lpfc_mbx_rd_conf_extnts_inuse_SHIFT 31
+#define lpfc_mbx_rd_conf_extnts_inuse_MASK 0x00000001
+#define lpfc_mbx_rd_conf_extnts_inuse_WORD word1
+ uint32_t word2;
+#define lpfc_mbx_rd_conf_lnk_numb_SHIFT 0
+#define lpfc_mbx_rd_conf_lnk_numb_MASK 0x0000003F
+#define lpfc_mbx_rd_conf_lnk_numb_WORD word2
+#define lpfc_mbx_rd_conf_lnk_type_SHIFT 6
+#define lpfc_mbx_rd_conf_lnk_type_MASK 0x00000003
+#define lpfc_mbx_rd_conf_lnk_type_WORD word2
+#define LPFC_LNK_TYPE_GE 0
+#define LPFC_LNK_TYPE_FC 1
+#define lpfc_mbx_rd_conf_lnk_ldv_SHIFT 8
+#define lpfc_mbx_rd_conf_lnk_ldv_MASK 0x00000001
+#define lpfc_mbx_rd_conf_lnk_ldv_WORD word2
+#define lpfc_mbx_rd_conf_topology_SHIFT 24
+#define lpfc_mbx_rd_conf_topology_MASK 0x000000FF
+#define lpfc_mbx_rd_conf_topology_WORD word2
+ uint32_t rsvd_3;
+ uint32_t word4;
+#define lpfc_mbx_rd_conf_e_d_tov_SHIFT 0
+#define lpfc_mbx_rd_conf_e_d_tov_MASK 0x0000FFFF
+#define lpfc_mbx_rd_conf_e_d_tov_WORD word4
+ uint32_t rsvd_5;
+ uint32_t word6;
+#define lpfc_mbx_rd_conf_r_a_tov_SHIFT 0
+#define lpfc_mbx_rd_conf_r_a_tov_MASK 0x0000FFFF
+#define lpfc_mbx_rd_conf_r_a_tov_WORD word6
+ uint32_t rsvd_7;
+ uint32_t rsvd_8;
+ uint32_t word9;
+#define lpfc_mbx_rd_conf_lmt_SHIFT 0
+#define lpfc_mbx_rd_conf_lmt_MASK 0x0000FFFF
+#define lpfc_mbx_rd_conf_lmt_WORD word9
+ uint32_t rsvd_10;
+ uint32_t rsvd_11;
+ uint32_t word12;
+#define lpfc_mbx_rd_conf_xri_base_SHIFT 0
+#define lpfc_mbx_rd_conf_xri_base_MASK 0x0000FFFF
+#define lpfc_mbx_rd_conf_xri_base_WORD word12
+#define lpfc_mbx_rd_conf_xri_count_SHIFT 16
+#define lpfc_mbx_rd_conf_xri_count_MASK 0x0000FFFF
+#define lpfc_mbx_rd_conf_xri_count_WORD word12
+ uint32_t word13;
+#define lpfc_mbx_rd_conf_rpi_base_SHIFT 0
+#define lpfc_mbx_rd_conf_rpi_base_MASK 0x0000FFFF
+#define lpfc_mbx_rd_conf_rpi_base_WORD word13
+#define lpfc_mbx_rd_conf_rpi_count_SHIFT 16
+#define lpfc_mbx_rd_conf_rpi_count_MASK 0x0000FFFF
+#define lpfc_mbx_rd_conf_rpi_count_WORD word13
+ uint32_t word14;
+#define lpfc_mbx_rd_conf_vpi_base_SHIFT 0
+#define lpfc_mbx_rd_conf_vpi_base_MASK 0x0000FFFF
+#define lpfc_mbx_rd_conf_vpi_base_WORD word14
+#define lpfc_mbx_rd_conf_vpi_count_SHIFT 16
+#define lpfc_mbx_rd_conf_vpi_count_MASK 0x0000FFFF
+#define lpfc_mbx_rd_conf_vpi_count_WORD word14
+ uint32_t word15;
+#define lpfc_mbx_rd_conf_vfi_base_SHIFT 0
+#define lpfc_mbx_rd_conf_vfi_base_MASK 0x0000FFFF
+#define lpfc_mbx_rd_conf_vfi_base_WORD word15
+#define lpfc_mbx_rd_conf_vfi_count_SHIFT 16
+#define lpfc_mbx_rd_conf_vfi_count_MASK 0x0000FFFF
+#define lpfc_mbx_rd_conf_vfi_count_WORD word15
+ uint32_t word16;
+#define lpfc_mbx_rd_conf_fcfi_count_SHIFT 16
+#define lpfc_mbx_rd_conf_fcfi_count_MASK 0x0000FFFF
+#define lpfc_mbx_rd_conf_fcfi_count_WORD word16
+ uint32_t word17;
+#define lpfc_mbx_rd_conf_rq_count_SHIFT 0
+#define lpfc_mbx_rd_conf_rq_count_MASK 0x0000FFFF
+#define lpfc_mbx_rd_conf_rq_count_WORD word17
+#define lpfc_mbx_rd_conf_eq_count_SHIFT 16
+#define lpfc_mbx_rd_conf_eq_count_MASK 0x0000FFFF
+#define lpfc_mbx_rd_conf_eq_count_WORD word17
+ uint32_t word18;
+#define lpfc_mbx_rd_conf_wq_count_SHIFT 0
+#define lpfc_mbx_rd_conf_wq_count_MASK 0x0000FFFF
+#define lpfc_mbx_rd_conf_wq_count_WORD word18
+#define lpfc_mbx_rd_conf_cq_count_SHIFT 16
+#define lpfc_mbx_rd_conf_cq_count_MASK 0x0000FFFF
+#define lpfc_mbx_rd_conf_cq_count_WORD word18
+};
+
+struct lpfc_mbx_request_features {
+ uint32_t word1;
+#define lpfc_mbx_rq_ftr_qry_SHIFT 0
+#define lpfc_mbx_rq_ftr_qry_MASK 0x00000001
+#define lpfc_mbx_rq_ftr_qry_WORD word1
+ uint32_t word2;
+#define lpfc_mbx_rq_ftr_rq_iaab_SHIFT 0
+#define lpfc_mbx_rq_ftr_rq_iaab_MASK 0x00000001
+#define lpfc_mbx_rq_ftr_rq_iaab_WORD word2
+#define lpfc_mbx_rq_ftr_rq_npiv_SHIFT 1
+#define lpfc_mbx_rq_ftr_rq_npiv_MASK 0x00000001
+#define lpfc_mbx_rq_ftr_rq_npiv_WORD word2
+#define lpfc_mbx_rq_ftr_rq_dif_SHIFT 2
+#define lpfc_mbx_rq_ftr_rq_dif_MASK 0x00000001
+#define lpfc_mbx_rq_ftr_rq_dif_WORD word2
+#define lpfc_mbx_rq_ftr_rq_vf_SHIFT 3
+#define lpfc_mbx_rq_ftr_rq_vf_MASK 0x00000001
+#define lpfc_mbx_rq_ftr_rq_vf_WORD word2
+#define lpfc_mbx_rq_ftr_rq_fcpi_SHIFT 4
+#define lpfc_mbx_rq_ftr_rq_fcpi_MASK 0x00000001
+#define lpfc_mbx_rq_ftr_rq_fcpi_WORD word2
+#define lpfc_mbx_rq_ftr_rq_fcpt_SHIFT 5
+#define lpfc_mbx_rq_ftr_rq_fcpt_MASK 0x00000001
+#define lpfc_mbx_rq_ftr_rq_fcpt_WORD word2
+#define lpfc_mbx_rq_ftr_rq_fcpc_SHIFT 6
+#define lpfc_mbx_rq_ftr_rq_fcpc_MASK 0x00000001
+#define lpfc_mbx_rq_ftr_rq_fcpc_WORD word2
+#define lpfc_mbx_rq_ftr_rq_ifip_SHIFT 7
+#define lpfc_mbx_rq_ftr_rq_ifip_MASK 0x00000001
+#define lpfc_mbx_rq_ftr_rq_ifip_WORD word2
+#define lpfc_mbx_rq_ftr_rq_perfh_SHIFT 11
+#define lpfc_mbx_rq_ftr_rq_perfh_MASK 0x00000001
+#define lpfc_mbx_rq_ftr_rq_perfh_WORD word2
+ uint32_t word3;
+#define lpfc_mbx_rq_ftr_rsp_iaab_SHIFT 0
+#define lpfc_mbx_rq_ftr_rsp_iaab_MASK 0x00000001
+#define lpfc_mbx_rq_ftr_rsp_iaab_WORD word3
+#define lpfc_mbx_rq_ftr_rsp_npiv_SHIFT 1
+#define lpfc_mbx_rq_ftr_rsp_npiv_MASK 0x00000001
+#define lpfc_mbx_rq_ftr_rsp_npiv_WORD word3
+#define lpfc_mbx_rq_ftr_rsp_dif_SHIFT 2
+#define lpfc_mbx_rq_ftr_rsp_dif_MASK 0x00000001
+#define lpfc_mbx_rq_ftr_rsp_dif_WORD word3
+#define lpfc_mbx_rq_ftr_rsp_vf_SHIFT 3
+#define lpfc_mbx_rq_ftr_rsp_vf__MASK 0x00000001
+#define lpfc_mbx_rq_ftr_rsp_vf_WORD word3
+#define lpfc_mbx_rq_ftr_rsp_fcpi_SHIFT 4
+#define lpfc_mbx_rq_ftr_rsp_fcpi_MASK 0x00000001
+#define lpfc_mbx_rq_ftr_rsp_fcpi_WORD word3
+#define lpfc_mbx_rq_ftr_rsp_fcpt_SHIFT 5
+#define lpfc_mbx_rq_ftr_rsp_fcpt_MASK 0x00000001
+#define lpfc_mbx_rq_ftr_rsp_fcpt_WORD word3
+#define lpfc_mbx_rq_ftr_rsp_fcpc_SHIFT 6
+#define lpfc_mbx_rq_ftr_rsp_fcpc_MASK 0x00000001
+#define lpfc_mbx_rq_ftr_rsp_fcpc_WORD word3
+#define lpfc_mbx_rq_ftr_rsp_ifip_SHIFT 7
+#define lpfc_mbx_rq_ftr_rsp_ifip_MASK 0x00000001
+#define lpfc_mbx_rq_ftr_rsp_ifip_WORD word3
+#define lpfc_mbx_rq_ftr_rsp_perfh_SHIFT 11
+#define lpfc_mbx_rq_ftr_rsp_perfh_MASK 0x00000001
+#define lpfc_mbx_rq_ftr_rsp_perfh_WORD word3
+};
+
+struct lpfc_mbx_supp_pages {
+ uint32_t word1;
+#define qs_SHIFT 0
+#define qs_MASK 0x00000001
+#define qs_WORD word1
+#define wr_SHIFT 1
+#define wr_MASK 0x00000001
+#define wr_WORD word1
+#define pf_SHIFT 8
+#define pf_MASK 0x000000ff
+#define pf_WORD word1
+#define cpn_SHIFT 16
+#define cpn_MASK 0x000000ff
+#define cpn_WORD word1
+ uint32_t word2;
+#define list_offset_SHIFT 0
+#define list_offset_MASK 0x000000ff
+#define list_offset_WORD word2
+#define next_offset_SHIFT 8
+#define next_offset_MASK 0x000000ff
+#define next_offset_WORD word2
+#define elem_cnt_SHIFT 16
+#define elem_cnt_MASK 0x000000ff
+#define elem_cnt_WORD word2
+ uint32_t word3;
+#define pn_0_SHIFT 24
+#define pn_0_MASK 0x000000ff
+#define pn_0_WORD word3
+#define pn_1_SHIFT 16
+#define pn_1_MASK 0x000000ff
+#define pn_1_WORD word3
+#define pn_2_SHIFT 8
+#define pn_2_MASK 0x000000ff
+#define pn_2_WORD word3
+#define pn_3_SHIFT 0
+#define pn_3_MASK 0x000000ff
+#define pn_3_WORD word3
+ uint32_t word4;
+#define pn_4_SHIFT 24
+#define pn_4_MASK 0x000000ff
+#define pn_4_WORD word4
+#define pn_5_SHIFT 16
+#define pn_5_MASK 0x000000ff
+#define pn_5_WORD word4
+#define pn_6_SHIFT 8
+#define pn_6_MASK 0x000000ff
+#define pn_6_WORD word4
+#define pn_7_SHIFT 0
+#define pn_7_MASK 0x000000ff
+#define pn_7_WORD word4
+ uint32_t rsvd[27];
+#define LPFC_SUPP_PAGES 0
+#define LPFC_BLOCK_GUARD_PROFILES 1
+#define LPFC_SLI4_PARAMETERS 2
+};
+
+struct lpfc_mbx_pc_sli4_params {
+ uint32_t word1;
+#define qs_SHIFT 0
+#define qs_MASK 0x00000001
+#define qs_WORD word1
+#define wr_SHIFT 1
+#define wr_MASK 0x00000001
+#define wr_WORD word1
+#define pf_SHIFT 8
+#define pf_MASK 0x000000ff
+#define pf_WORD word1
+#define cpn_SHIFT 16
+#define cpn_MASK 0x000000ff
+#define cpn_WORD word1
+ uint32_t word2;
+#define if_type_SHIFT 0
+#define if_type_MASK 0x00000007
+#define if_type_WORD word2
+#define sli_rev_SHIFT 4
+#define sli_rev_MASK 0x0000000f
+#define sli_rev_WORD word2
+#define sli_family_SHIFT 8
+#define sli_family_MASK 0x000000ff
+#define sli_family_WORD word2
+#define featurelevel_1_SHIFT 16
+#define featurelevel_1_MASK 0x000000ff
+#define featurelevel_1_WORD word2
+#define featurelevel_2_SHIFT 24
+#define featurelevel_2_MASK 0x0000001f
+#define featurelevel_2_WORD word2
+ uint32_t word3;
+#define fcoe_SHIFT 0
+#define fcoe_MASK 0x00000001
+#define fcoe_WORD word3
+#define fc_SHIFT 1
+#define fc_MASK 0x00000001
+#define fc_WORD word3
+#define nic_SHIFT 2
+#define nic_MASK 0x00000001
+#define nic_WORD word3
+#define iscsi_SHIFT 3
+#define iscsi_MASK 0x00000001
+#define iscsi_WORD word3
+#define rdma_SHIFT 4
+#define rdma_MASK 0x00000001
+#define rdma_WORD word3
+ uint32_t sge_supp_len;
+#define SLI4_PAGE_SIZE 4096
+ uint32_t word5;
+#define if_page_sz_SHIFT 0
+#define if_page_sz_MASK 0x0000ffff
+#define if_page_sz_WORD word5
+#define loopbk_scope_SHIFT 24
+#define loopbk_scope_MASK 0x0000000f
+#define loopbk_scope_WORD word5
+#define rq_db_window_SHIFT 28
+#define rq_db_window_MASK 0x0000000f
+#define rq_db_window_WORD word5
+ uint32_t word6;
+#define eq_pages_SHIFT 0
+#define eq_pages_MASK 0x0000000f
+#define eq_pages_WORD word6
+#define eqe_size_SHIFT 8
+#define eqe_size_MASK 0x000000ff
+#define eqe_size_WORD word6
+ uint32_t word7;
+#define cq_pages_SHIFT 0
+#define cq_pages_MASK 0x0000000f
+#define cq_pages_WORD word7
+#define cqe_size_SHIFT 8
+#define cqe_size_MASK 0x000000ff
+#define cqe_size_WORD word7
+ uint32_t word8;
+#define mq_pages_SHIFT 0
+#define mq_pages_MASK 0x0000000f
+#define mq_pages_WORD word8
+#define mqe_size_SHIFT 8
+#define mqe_size_MASK 0x000000ff
+#define mqe_size_WORD word8
+#define mq_elem_cnt_SHIFT 16
+#define mq_elem_cnt_MASK 0x000000ff
+#define mq_elem_cnt_WORD word8
+ uint32_t word9;
+#define wq_pages_SHIFT 0
+#define wq_pages_MASK 0x0000ffff
+#define wq_pages_WORD word9
+#define wqe_size_SHIFT 8
+#define wqe_size_MASK 0x000000ff
+#define wqe_size_WORD word9
+ uint32_t word10;
+#define rq_pages_SHIFT 0
+#define rq_pages_MASK 0x0000ffff
+#define rq_pages_WORD word10
+#define rqe_size_SHIFT 8
+#define rqe_size_MASK 0x000000ff
+#define rqe_size_WORD word10
+ uint32_t word11;
+#define hdr_pages_SHIFT 0
+#define hdr_pages_MASK 0x0000000f
+#define hdr_pages_WORD word11
+#define hdr_size_SHIFT 8
+#define hdr_size_MASK 0x0000000f
+#define hdr_size_WORD word11
+#define hdr_pp_align_SHIFT 16
+#define hdr_pp_align_MASK 0x0000ffff
+#define hdr_pp_align_WORD word11
+ uint32_t word12;
+#define sgl_pages_SHIFT 0
+#define sgl_pages_MASK 0x0000000f
+#define sgl_pages_WORD word12
+#define sgl_pp_align_SHIFT 16
+#define sgl_pp_align_MASK 0x0000ffff
+#define sgl_pp_align_WORD word12
+ uint32_t rsvd_13_63[51];
+};
+#define SLI4_PAGE_ALIGN(addr) (((addr)+((SLI4_PAGE_SIZE)-1)) \
+ &(~((SLI4_PAGE_SIZE)-1)))
+
+struct lpfc_sli4_parameters {
+ uint32_t word0;
+#define cfg_prot_type_SHIFT 0
+#define cfg_prot_type_MASK 0x000000FF
+#define cfg_prot_type_WORD word0
+ uint32_t word1;
+#define cfg_ft_SHIFT 0
+#define cfg_ft_MASK 0x00000001
+#define cfg_ft_WORD word1
+#define cfg_sli_rev_SHIFT 4
+#define cfg_sli_rev_MASK 0x0000000f
+#define cfg_sli_rev_WORD word1
+#define cfg_sli_family_SHIFT 8
+#define cfg_sli_family_MASK 0x0000000f
+#define cfg_sli_family_WORD word1
+#define cfg_if_type_SHIFT 12
+#define cfg_if_type_MASK 0x0000000f
+#define cfg_if_type_WORD word1
+#define cfg_sli_hint_1_SHIFT 16
+#define cfg_sli_hint_1_MASK 0x000000ff
+#define cfg_sli_hint_1_WORD word1
+#define cfg_sli_hint_2_SHIFT 24
+#define cfg_sli_hint_2_MASK 0x0000001f
+#define cfg_sli_hint_2_WORD word1
+ uint32_t word2;
+ uint32_t word3;
+ uint32_t word4;
+#define cfg_cqv_SHIFT 14
+#define cfg_cqv_MASK 0x00000003
+#define cfg_cqv_WORD word4
+ uint32_t word5;
+ uint32_t word6;
+#define cfg_mqv_SHIFT 14
+#define cfg_mqv_MASK 0x00000003
+#define cfg_mqv_WORD word6
+ uint32_t word7;
+ uint32_t word8;
+#define cfg_wqsize_SHIFT 8
+#define cfg_wqsize_MASK 0x0000000f
+#define cfg_wqsize_WORD word8
+#define cfg_wqv_SHIFT 14
+#define cfg_wqv_MASK 0x00000003
+#define cfg_wqv_WORD word8
+ uint32_t word9;
+ uint32_t word10;
+#define cfg_rqv_SHIFT 14
+#define cfg_rqv_MASK 0x00000003
+#define cfg_rqv_WORD word10
+ uint32_t word11;
+#define cfg_rq_db_window_SHIFT 28
+#define cfg_rq_db_window_MASK 0x0000000f
+#define cfg_rq_db_window_WORD word11
+ uint32_t word12;
+#define cfg_fcoe_SHIFT 0
+#define cfg_fcoe_MASK 0x00000001
+#define cfg_fcoe_WORD word12
+#define cfg_ext_SHIFT 1
+#define cfg_ext_MASK 0x00000001
+#define cfg_ext_WORD word12
+#define cfg_hdrr_SHIFT 2
+#define cfg_hdrr_MASK 0x00000001
+#define cfg_hdrr_WORD word12
+#define cfg_phwq_SHIFT 15
+#define cfg_phwq_MASK 0x00000001
+#define cfg_phwq_WORD word12
+#define cfg_oas_SHIFT 25
+#define cfg_oas_MASK 0x00000001
+#define cfg_oas_WORD word12
+#define cfg_loopbk_scope_SHIFT 28
+#define cfg_loopbk_scope_MASK 0x0000000f
+#define cfg_loopbk_scope_WORD word12
+ uint32_t sge_supp_len;
+ uint32_t word14;
+#define cfg_sgl_page_cnt_SHIFT 0
+#define cfg_sgl_page_cnt_MASK 0x0000000f
+#define cfg_sgl_page_cnt_WORD word14
+#define cfg_sgl_page_size_SHIFT 8
+#define cfg_sgl_page_size_MASK 0x000000ff
+#define cfg_sgl_page_size_WORD word14
+#define cfg_sgl_pp_align_SHIFT 16
+#define cfg_sgl_pp_align_MASK 0x000000ff
+#define cfg_sgl_pp_align_WORD word14
+ uint32_t word15;
+ uint32_t word16;
+ uint32_t word17;
+ uint32_t word18;
+ uint32_t word19;
+};
+
+struct lpfc_mbx_get_sli4_parameters {
+ struct mbox_header header;
+ struct lpfc_sli4_parameters sli4_parameters;
+};
+
+struct lpfc_rscr_desc_generic {
+#define LPFC_RSRC_DESC_WSIZE 22
+ uint32_t desc[LPFC_RSRC_DESC_WSIZE];
+};
+
+struct lpfc_rsrc_desc_pcie {
+ uint32_t word0;
+#define lpfc_rsrc_desc_pcie_type_SHIFT 0
+#define lpfc_rsrc_desc_pcie_type_MASK 0x000000ff
+#define lpfc_rsrc_desc_pcie_type_WORD word0
+#define LPFC_RSRC_DESC_TYPE_PCIE 0x40
+#define lpfc_rsrc_desc_pcie_length_SHIFT 8
+#define lpfc_rsrc_desc_pcie_length_MASK 0x000000ff
+#define lpfc_rsrc_desc_pcie_length_WORD word0
+ uint32_t word1;
+#define lpfc_rsrc_desc_pcie_pfnum_SHIFT 0
+#define lpfc_rsrc_desc_pcie_pfnum_MASK 0x000000ff
+#define lpfc_rsrc_desc_pcie_pfnum_WORD word1
+ uint32_t reserved;
+ uint32_t word3;
+#define lpfc_rsrc_desc_pcie_sriov_sta_SHIFT 0
+#define lpfc_rsrc_desc_pcie_sriov_sta_MASK 0x000000ff
+#define lpfc_rsrc_desc_pcie_sriov_sta_WORD word3
+#define lpfc_rsrc_desc_pcie_pf_sta_SHIFT 8
+#define lpfc_rsrc_desc_pcie_pf_sta_MASK 0x000000ff
+#define lpfc_rsrc_desc_pcie_pf_sta_WORD word3
+#define lpfc_rsrc_desc_pcie_pf_type_SHIFT 16
+#define lpfc_rsrc_desc_pcie_pf_type_MASK 0x000000ff
+#define lpfc_rsrc_desc_pcie_pf_type_WORD word3
+ uint32_t word4;
+#define lpfc_rsrc_desc_pcie_nr_virtfn_SHIFT 0
+#define lpfc_rsrc_desc_pcie_nr_virtfn_MASK 0x0000ffff
+#define lpfc_rsrc_desc_pcie_nr_virtfn_WORD word4
+};
+
+struct lpfc_rsrc_desc_fcfcoe {
+ uint32_t word0;
+#define lpfc_rsrc_desc_fcfcoe_type_SHIFT 0
+#define lpfc_rsrc_desc_fcfcoe_type_MASK 0x000000ff
+#define lpfc_rsrc_desc_fcfcoe_type_WORD word0
+#define LPFC_RSRC_DESC_TYPE_FCFCOE 0x43
+#define lpfc_rsrc_desc_fcfcoe_length_SHIFT 8
+#define lpfc_rsrc_desc_fcfcoe_length_MASK 0x000000ff
+#define lpfc_rsrc_desc_fcfcoe_length_WORD word0
+#define LPFC_RSRC_DESC_TYPE_FCFCOE_V0_RSVD 0
+#define LPFC_RSRC_DESC_TYPE_FCFCOE_V0_LENGTH 72
+#define LPFC_RSRC_DESC_TYPE_FCFCOE_V1_LENGTH 88
+ uint32_t word1;
+#define lpfc_rsrc_desc_fcfcoe_vfnum_SHIFT 0
+#define lpfc_rsrc_desc_fcfcoe_vfnum_MASK 0x000000ff
+#define lpfc_rsrc_desc_fcfcoe_vfnum_WORD word1
+#define lpfc_rsrc_desc_fcfcoe_pfnum_SHIFT 16
+#define lpfc_rsrc_desc_fcfcoe_pfnum_MASK 0x000007ff
+#define lpfc_rsrc_desc_fcfcoe_pfnum_WORD word1
+ uint32_t word2;
+#define lpfc_rsrc_desc_fcfcoe_rpi_cnt_SHIFT 0
+#define lpfc_rsrc_desc_fcfcoe_rpi_cnt_MASK 0x0000ffff
+#define lpfc_rsrc_desc_fcfcoe_rpi_cnt_WORD word2
+#define lpfc_rsrc_desc_fcfcoe_xri_cnt_SHIFT 16
+#define lpfc_rsrc_desc_fcfcoe_xri_cnt_MASK 0x0000ffff
+#define lpfc_rsrc_desc_fcfcoe_xri_cnt_WORD word2
+ uint32_t word3;
+#define lpfc_rsrc_desc_fcfcoe_wq_cnt_SHIFT 0
+#define lpfc_rsrc_desc_fcfcoe_wq_cnt_MASK 0x0000ffff
+#define lpfc_rsrc_desc_fcfcoe_wq_cnt_WORD word3
+#define lpfc_rsrc_desc_fcfcoe_rq_cnt_SHIFT 16
+#define lpfc_rsrc_desc_fcfcoe_rq_cnt_MASK 0x0000ffff
+#define lpfc_rsrc_desc_fcfcoe_rq_cnt_WORD word3
+ uint32_t word4;
+#define lpfc_rsrc_desc_fcfcoe_cq_cnt_SHIFT 0
+#define lpfc_rsrc_desc_fcfcoe_cq_cnt_MASK 0x0000ffff
+#define lpfc_rsrc_desc_fcfcoe_cq_cnt_WORD word4
+#define lpfc_rsrc_desc_fcfcoe_vpi_cnt_SHIFT 16
+#define lpfc_rsrc_desc_fcfcoe_vpi_cnt_MASK 0x0000ffff
+#define lpfc_rsrc_desc_fcfcoe_vpi_cnt_WORD word4
+ uint32_t word5;
+#define lpfc_rsrc_desc_fcfcoe_fcfi_cnt_SHIFT 0
+#define lpfc_rsrc_desc_fcfcoe_fcfi_cnt_MASK 0x0000ffff
+#define lpfc_rsrc_desc_fcfcoe_fcfi_cnt_WORD word5
+#define lpfc_rsrc_desc_fcfcoe_vfi_cnt_SHIFT 16
+#define lpfc_rsrc_desc_fcfcoe_vfi_cnt_MASK 0x0000ffff
+#define lpfc_rsrc_desc_fcfcoe_vfi_cnt_WORD word5
+ uint32_t word6;
+ uint32_t word7;
+ uint32_t word8;
+ uint32_t word9;
+ uint32_t word10;
+ uint32_t word11;
+ uint32_t word12;
+ uint32_t word13;
+#define lpfc_rsrc_desc_fcfcoe_lnk_nr_SHIFT 0
+#define lpfc_rsrc_desc_fcfcoe_lnk_nr_MASK 0x0000003f
+#define lpfc_rsrc_desc_fcfcoe_lnk_nr_WORD word13
+#define lpfc_rsrc_desc_fcfcoe_lnk_tp_SHIFT 6
+#define lpfc_rsrc_desc_fcfcoe_lnk_tp_MASK 0x00000003
+#define lpfc_rsrc_desc_fcfcoe_lnk_tp_WORD word13
+#define lpfc_rsrc_desc_fcfcoe_lmc_SHIFT 8
+#define lpfc_rsrc_desc_fcfcoe_lmc_MASK 0x00000001
+#define lpfc_rsrc_desc_fcfcoe_lmc_WORD word13
+#define lpfc_rsrc_desc_fcfcoe_lld_SHIFT 9
+#define lpfc_rsrc_desc_fcfcoe_lld_MASK 0x00000001
+#define lpfc_rsrc_desc_fcfcoe_lld_WORD word13
+#define lpfc_rsrc_desc_fcfcoe_eq_cnt_SHIFT 16
+#define lpfc_rsrc_desc_fcfcoe_eq_cnt_MASK 0x0000ffff
+#define lpfc_rsrc_desc_fcfcoe_eq_cnt_WORD word13
+/* extended FC/FCoE Resource Descriptor when length = 88 bytes */
+ uint32_t bw_min;
+ uint32_t bw_max;
+ uint32_t iops_min;
+ uint32_t iops_max;
+ uint32_t reserved[4];
+};
+
+struct lpfc_func_cfg {
+#define LPFC_RSRC_DESC_MAX_NUM 2
+ uint32_t rsrc_desc_count;
+ struct lpfc_rscr_desc_generic desc[LPFC_RSRC_DESC_MAX_NUM];
+};
+
+struct lpfc_mbx_get_func_cfg {
+ struct mbox_header header;
+#define LPFC_CFG_TYPE_PERSISTENT_OVERRIDE 0x0
+#define LPFC_CFG_TYPE_FACTURY_DEFAULT 0x1
+#define LPFC_CFG_TYPE_CURRENT_ACTIVE 0x2
+ struct lpfc_func_cfg func_cfg;
+};
+
+struct lpfc_prof_cfg {
+#define LPFC_RSRC_DESC_MAX_NUM 2
+ uint32_t rsrc_desc_count;
+ struct lpfc_rscr_desc_generic desc[LPFC_RSRC_DESC_MAX_NUM];
+};
+
+struct lpfc_mbx_get_prof_cfg {
+ struct mbox_header header;
+#define LPFC_CFG_TYPE_PERSISTENT_OVERRIDE 0x0
+#define LPFC_CFG_TYPE_FACTURY_DEFAULT 0x1
+#define LPFC_CFG_TYPE_CURRENT_ACTIVE 0x2
+ union {
+ struct {
+ uint32_t word10;
+#define lpfc_mbx_get_prof_cfg_prof_id_SHIFT 0
+#define lpfc_mbx_get_prof_cfg_prof_id_MASK 0x000000ff
+#define lpfc_mbx_get_prof_cfg_prof_id_WORD word10
+#define lpfc_mbx_get_prof_cfg_prof_tp_SHIFT 8
+#define lpfc_mbx_get_prof_cfg_prof_tp_MASK 0x00000003
+#define lpfc_mbx_get_prof_cfg_prof_tp_WORD word10
+ } request;
+ struct {
+ struct lpfc_prof_cfg prof_cfg;
+ } response;
+ } u;
+};
+
+struct lpfc_controller_attribute {
+ uint32_t version_string[8];
+ uint32_t manufacturer_name[8];
+ uint32_t supported_modes;
+ uint32_t word17;
+#define lpfc_cntl_attr_eprom_ver_lo_SHIFT 0
+#define lpfc_cntl_attr_eprom_ver_lo_MASK 0x000000ff
+#define lpfc_cntl_attr_eprom_ver_lo_WORD word17
+#define lpfc_cntl_attr_eprom_ver_hi_SHIFT 8
+#define lpfc_cntl_attr_eprom_ver_hi_MASK 0x000000ff
+#define lpfc_cntl_attr_eprom_ver_hi_WORD word17
+ uint32_t mbx_da_struct_ver;
+ uint32_t ep_fw_da_struct_ver;
+ uint32_t ncsi_ver_str[3];
+ uint32_t dflt_ext_timeout;
+ uint32_t model_number[8];
+ uint32_t description[16];
+ uint32_t serial_number[8];
+ uint32_t ip_ver_str[8];
+ uint32_t fw_ver_str[8];
+ uint32_t bios_ver_str[8];
+ uint32_t redboot_ver_str[8];
+ uint32_t driver_ver_str[8];
+ uint32_t flash_fw_ver_str[8];
+ uint32_t functionality;
+ uint32_t word105;
+#define lpfc_cntl_attr_max_cbd_len_SHIFT 0
+#define lpfc_cntl_attr_max_cbd_len_MASK 0x0000ffff
+#define lpfc_cntl_attr_max_cbd_len_WORD word105
+#define lpfc_cntl_attr_asic_rev_SHIFT 16
+#define lpfc_cntl_attr_asic_rev_MASK 0x000000ff
+#define lpfc_cntl_attr_asic_rev_WORD word105
+#define lpfc_cntl_attr_gen_guid0_SHIFT 24
+#define lpfc_cntl_attr_gen_guid0_MASK 0x000000ff
+#define lpfc_cntl_attr_gen_guid0_WORD word105
+ uint32_t gen_guid1_12[3];
+ uint32_t word109;
+#define lpfc_cntl_attr_gen_guid13_14_SHIFT 0
+#define lpfc_cntl_attr_gen_guid13_14_MASK 0x0000ffff
+#define lpfc_cntl_attr_gen_guid13_14_WORD word109
+#define lpfc_cntl_attr_gen_guid15_SHIFT 16
+#define lpfc_cntl_attr_gen_guid15_MASK 0x000000ff
+#define lpfc_cntl_attr_gen_guid15_WORD word109
+#define lpfc_cntl_attr_hba_port_cnt_SHIFT 24
+#define lpfc_cntl_attr_hba_port_cnt_MASK 0x000000ff
+#define lpfc_cntl_attr_hba_port_cnt_WORD word109
+ uint32_t word110;
+#define lpfc_cntl_attr_dflt_lnk_tmo_SHIFT 0
+#define lpfc_cntl_attr_dflt_lnk_tmo_MASK 0x0000ffff
+#define lpfc_cntl_attr_dflt_lnk_tmo_WORD word110
+#define lpfc_cntl_attr_multi_func_dev_SHIFT 24
+#define lpfc_cntl_attr_multi_func_dev_MASK 0x000000ff
+#define lpfc_cntl_attr_multi_func_dev_WORD word110
+ uint32_t word111;
+#define lpfc_cntl_attr_cache_valid_SHIFT 0
+#define lpfc_cntl_attr_cache_valid_MASK 0x000000ff
+#define lpfc_cntl_attr_cache_valid_WORD word111
+#define lpfc_cntl_attr_hba_status_SHIFT 8
+#define lpfc_cntl_attr_hba_status_MASK 0x000000ff
+#define lpfc_cntl_attr_hba_status_WORD word111
+#define lpfc_cntl_attr_max_domain_SHIFT 16
+#define lpfc_cntl_attr_max_domain_MASK 0x000000ff
+#define lpfc_cntl_attr_max_domain_WORD word111
+#define lpfc_cntl_attr_lnk_numb_SHIFT 24
+#define lpfc_cntl_attr_lnk_numb_MASK 0x0000003f
+#define lpfc_cntl_attr_lnk_numb_WORD word111
+#define lpfc_cntl_attr_lnk_type_SHIFT 30
+#define lpfc_cntl_attr_lnk_type_MASK 0x00000003
+#define lpfc_cntl_attr_lnk_type_WORD word111
+ uint32_t fw_post_status;
+ uint32_t hba_mtu[8];
+ uint32_t word121;
+ uint32_t reserved1[3];
+ uint32_t word125;
+#define lpfc_cntl_attr_pci_vendor_id_SHIFT 0
+#define lpfc_cntl_attr_pci_vendor_id_MASK 0x0000ffff
+#define lpfc_cntl_attr_pci_vendor_id_WORD word125
+#define lpfc_cntl_attr_pci_device_id_SHIFT 16
+#define lpfc_cntl_attr_pci_device_id_MASK 0x0000ffff
+#define lpfc_cntl_attr_pci_device_id_WORD word125
+ uint32_t word126;
+#define lpfc_cntl_attr_pci_subvdr_id_SHIFT 0
+#define lpfc_cntl_attr_pci_subvdr_id_MASK 0x0000ffff
+#define lpfc_cntl_attr_pci_subvdr_id_WORD word126
+#define lpfc_cntl_attr_pci_subsys_id_SHIFT 16
+#define lpfc_cntl_attr_pci_subsys_id_MASK 0x0000ffff
+#define lpfc_cntl_attr_pci_subsys_id_WORD word126
+ uint32_t word127;
+#define lpfc_cntl_attr_pci_bus_num_SHIFT 0
+#define lpfc_cntl_attr_pci_bus_num_MASK 0x000000ff
+#define lpfc_cntl_attr_pci_bus_num_WORD word127
+#define lpfc_cntl_attr_pci_dev_num_SHIFT 8
+#define lpfc_cntl_attr_pci_dev_num_MASK 0x000000ff
+#define lpfc_cntl_attr_pci_dev_num_WORD word127
+#define lpfc_cntl_attr_pci_fnc_num_SHIFT 16
+#define lpfc_cntl_attr_pci_fnc_num_MASK 0x000000ff
+#define lpfc_cntl_attr_pci_fnc_num_WORD word127
+#define lpfc_cntl_attr_inf_type_SHIFT 24
+#define lpfc_cntl_attr_inf_type_MASK 0x000000ff
+#define lpfc_cntl_attr_inf_type_WORD word127
+ uint32_t unique_id[2];
+ uint32_t word130;
+#define lpfc_cntl_attr_num_netfil_SHIFT 0
+#define lpfc_cntl_attr_num_netfil_MASK 0x000000ff
+#define lpfc_cntl_attr_num_netfil_WORD word130
+ uint32_t reserved2[4];
+};
+
+struct lpfc_mbx_get_cntl_attributes {
+ union lpfc_sli4_cfg_shdr cfg_shdr;
+ struct lpfc_controller_attribute cntl_attr;
+};
+
+struct lpfc_mbx_get_port_name {
+ struct mbox_header header;
+ union {
+ struct {
+ uint32_t word4;
+#define lpfc_mbx_get_port_name_lnk_type_SHIFT 0
+#define lpfc_mbx_get_port_name_lnk_type_MASK 0x00000003
+#define lpfc_mbx_get_port_name_lnk_type_WORD word4
+ } request;
+ struct {
+ uint32_t word4;
+#define lpfc_mbx_get_port_name_name0_SHIFT 0
+#define lpfc_mbx_get_port_name_name0_MASK 0x000000FF
+#define lpfc_mbx_get_port_name_name0_WORD word4
+#define lpfc_mbx_get_port_name_name1_SHIFT 8
+#define lpfc_mbx_get_port_name_name1_MASK 0x000000FF
+#define lpfc_mbx_get_port_name_name1_WORD word4
+#define lpfc_mbx_get_port_name_name2_SHIFT 16
+#define lpfc_mbx_get_port_name_name2_MASK 0x000000FF
+#define lpfc_mbx_get_port_name_name2_WORD word4
+#define lpfc_mbx_get_port_name_name3_SHIFT 24
+#define lpfc_mbx_get_port_name_name3_MASK 0x000000FF
+#define lpfc_mbx_get_port_name_name3_WORD word4
+#define LPFC_LINK_NUMBER_0 0
+#define LPFC_LINK_NUMBER_1 1
+#define LPFC_LINK_NUMBER_2 2
+#define LPFC_LINK_NUMBER_3 3
+ } response;
+ } u;
+};
+
+/* Mailbox Completion Queue Error Messages */
+#define MB_CQE_STATUS_SUCCESS 0x0
+#define MB_CQE_STATUS_INSUFFICIENT_PRIVILEGES 0x1
+#define MB_CQE_STATUS_INVALID_PARAMETER 0x2
+#define MB_CQE_STATUS_INSUFFICIENT_RESOURCES 0x3
+#define MB_CEQ_STATUS_QUEUE_FLUSHING 0x4
+#define MB_CQE_STATUS_DMA_FAILED 0x5
+
+#define LPFC_MBX_WR_CONFIG_MAX_BDE 8
+struct lpfc_mbx_wr_object {
+ struct mbox_header header;
+ union {
+ struct {
+ uint32_t word4;
+#define lpfc_wr_object_eof_SHIFT 31
+#define lpfc_wr_object_eof_MASK 0x00000001
+#define lpfc_wr_object_eof_WORD word4
+#define lpfc_wr_object_write_length_SHIFT 0
+#define lpfc_wr_object_write_length_MASK 0x00FFFFFF
+#define lpfc_wr_object_write_length_WORD word4
+ uint32_t write_offset;
+ uint32_t object_name[26];
+ uint32_t bde_count;
+ struct ulp_bde64 bde[LPFC_MBX_WR_CONFIG_MAX_BDE];
+ } request;
+ struct {
+ uint32_t actual_write_length;
+ } response;
+ } u;
+};
+
+/* mailbox queue entry structure */
+struct lpfc_mqe {
+ uint32_t word0;
+#define lpfc_mqe_status_SHIFT 16
+#define lpfc_mqe_status_MASK 0x0000FFFF
+#define lpfc_mqe_status_WORD word0
+#define lpfc_mqe_command_SHIFT 8
+#define lpfc_mqe_command_MASK 0x000000FF
+#define lpfc_mqe_command_WORD word0
+ union {
+ uint32_t mb_words[LPFC_SLI4_MB_WORD_COUNT - 1];
+ /* sli4 mailbox commands */
+ struct lpfc_mbx_sli4_config sli4_config;
+ struct lpfc_mbx_init_vfi init_vfi;
+ struct lpfc_mbx_reg_vfi reg_vfi;
+ struct lpfc_mbx_reg_vfi unreg_vfi;
+ struct lpfc_mbx_init_vpi init_vpi;
+ struct lpfc_mbx_resume_rpi resume_rpi;
+ struct lpfc_mbx_read_fcf_tbl read_fcf_tbl;
+ struct lpfc_mbx_add_fcf_tbl_entry add_fcf_entry;
+ struct lpfc_mbx_del_fcf_tbl_entry del_fcf_entry;
+ struct lpfc_mbx_redisc_fcf_tbl redisc_fcf_tbl;
+ struct lpfc_mbx_reg_fcfi reg_fcfi;
+ struct lpfc_mbx_unreg_fcfi unreg_fcfi;
+ struct lpfc_mbx_mq_create mq_create;
+ struct lpfc_mbx_mq_create_ext mq_create_ext;
+ struct lpfc_mbx_eq_create eq_create;
+ struct lpfc_mbx_modify_eq_delay eq_delay;
+ struct lpfc_mbx_cq_create cq_create;
+ struct lpfc_mbx_wq_create wq_create;
+ struct lpfc_mbx_rq_create rq_create;
+ struct lpfc_mbx_mq_destroy mq_destroy;
+ struct lpfc_mbx_eq_destroy eq_destroy;
+ struct lpfc_mbx_cq_destroy cq_destroy;
+ struct lpfc_mbx_wq_destroy wq_destroy;
+ struct lpfc_mbx_rq_destroy rq_destroy;
+ struct lpfc_mbx_get_rsrc_extent_info rsrc_extent_info;
+ struct lpfc_mbx_alloc_rsrc_extents alloc_rsrc_extents;
+ struct lpfc_mbx_dealloc_rsrc_extents dealloc_rsrc_extents;
+ struct lpfc_mbx_post_sgl_pages post_sgl_pages;
+ struct lpfc_mbx_nembed_cmd nembed_cmd;
+ struct lpfc_mbx_read_rev read_rev;
+ struct lpfc_mbx_read_vpi read_vpi;
+ struct lpfc_mbx_read_config rd_config;
+ struct lpfc_mbx_request_features req_ftrs;
+ struct lpfc_mbx_post_hdr_tmpl hdr_tmpl;
+ struct lpfc_mbx_query_fw_config query_fw_cfg;
+ struct lpfc_mbx_supp_pages supp_pages;
+ struct lpfc_mbx_pc_sli4_params sli4_params;
+ struct lpfc_mbx_get_sli4_parameters get_sli4_parameters;
+ struct lpfc_mbx_set_link_diag_state link_diag_state;
+ struct lpfc_mbx_set_link_diag_loopback link_diag_loopback;
+ struct lpfc_mbx_run_link_diag_test link_diag_test;
+ struct lpfc_mbx_get_func_cfg get_func_cfg;
+ struct lpfc_mbx_get_prof_cfg get_prof_cfg;
+ struct lpfc_mbx_wr_object wr_object;
+ struct lpfc_mbx_get_port_name get_port_name;
+ struct lpfc_mbx_nop nop;
+ } un;
+};
+
+struct lpfc_mcqe {
+ uint32_t word0;
+#define lpfc_mcqe_status_SHIFT 0
+#define lpfc_mcqe_status_MASK 0x0000FFFF
+#define lpfc_mcqe_status_WORD word0
+#define lpfc_mcqe_ext_status_SHIFT 16
+#define lpfc_mcqe_ext_status_MASK 0x0000FFFF
+#define lpfc_mcqe_ext_status_WORD word0
+ uint32_t mcqe_tag0;
+ uint32_t mcqe_tag1;
+ uint32_t trailer;
+#define lpfc_trailer_valid_SHIFT 31
+#define lpfc_trailer_valid_MASK 0x00000001
+#define lpfc_trailer_valid_WORD trailer
+#define lpfc_trailer_async_SHIFT 30
+#define lpfc_trailer_async_MASK 0x00000001
+#define lpfc_trailer_async_WORD trailer
+#define lpfc_trailer_hpi_SHIFT 29
+#define lpfc_trailer_hpi_MASK 0x00000001
+#define lpfc_trailer_hpi_WORD trailer
+#define lpfc_trailer_completed_SHIFT 28
+#define lpfc_trailer_completed_MASK 0x00000001
+#define lpfc_trailer_completed_WORD trailer
+#define lpfc_trailer_consumed_SHIFT 27
+#define lpfc_trailer_consumed_MASK 0x00000001
+#define lpfc_trailer_consumed_WORD trailer
+#define lpfc_trailer_type_SHIFT 16
+#define lpfc_trailer_type_MASK 0x000000FF
+#define lpfc_trailer_type_WORD trailer
+#define lpfc_trailer_code_SHIFT 8
+#define lpfc_trailer_code_MASK 0x000000FF
+#define lpfc_trailer_code_WORD trailer
+#define LPFC_TRAILER_CODE_LINK 0x1
+#define LPFC_TRAILER_CODE_FCOE 0x2
+#define LPFC_TRAILER_CODE_DCBX 0x3
+#define LPFC_TRAILER_CODE_GRP5 0x5
+#define LPFC_TRAILER_CODE_FC 0x10
+#define LPFC_TRAILER_CODE_SLI 0x11
+};
+
+struct lpfc_acqe_link {
+ uint32_t word0;
+#define lpfc_acqe_link_speed_SHIFT 24
+#define lpfc_acqe_link_speed_MASK 0x000000FF
+#define lpfc_acqe_link_speed_WORD word0
+#define LPFC_ASYNC_LINK_SPEED_ZERO 0x0
+#define LPFC_ASYNC_LINK_SPEED_10MBPS 0x1
+#define LPFC_ASYNC_LINK_SPEED_100MBPS 0x2
+#define LPFC_ASYNC_LINK_SPEED_1GBPS 0x3
+#define LPFC_ASYNC_LINK_SPEED_10GBPS 0x4
+#define LPFC_ASYNC_LINK_SPEED_20GBPS 0x5
+#define LPFC_ASYNC_LINK_SPEED_25GBPS 0x6
+#define LPFC_ASYNC_LINK_SPEED_40GBPS 0x7
+#define lpfc_acqe_link_duplex_SHIFT 16
+#define lpfc_acqe_link_duplex_MASK 0x000000FF
+#define lpfc_acqe_link_duplex_WORD word0
+#define LPFC_ASYNC_LINK_DUPLEX_NONE 0x0
+#define LPFC_ASYNC_LINK_DUPLEX_HALF 0x1
+#define LPFC_ASYNC_LINK_DUPLEX_FULL 0x2
+#define lpfc_acqe_link_status_SHIFT 8
+#define lpfc_acqe_link_status_MASK 0x000000FF
+#define lpfc_acqe_link_status_WORD word0
+#define LPFC_ASYNC_LINK_STATUS_DOWN 0x0
+#define LPFC_ASYNC_LINK_STATUS_UP 0x1
+#define LPFC_ASYNC_LINK_STATUS_LOGICAL_DOWN 0x2
+#define LPFC_ASYNC_LINK_STATUS_LOGICAL_UP 0x3
+#define lpfc_acqe_link_type_SHIFT 6
+#define lpfc_acqe_link_type_MASK 0x00000003
+#define lpfc_acqe_link_type_WORD word0
+#define lpfc_acqe_link_number_SHIFT 0
+#define lpfc_acqe_link_number_MASK 0x0000003F
+#define lpfc_acqe_link_number_WORD word0
+ uint32_t word1;
+#define lpfc_acqe_link_fault_SHIFT 0
+#define lpfc_acqe_link_fault_MASK 0x000000FF
+#define lpfc_acqe_link_fault_WORD word1
+#define LPFC_ASYNC_LINK_FAULT_NONE 0x0
+#define LPFC_ASYNC_LINK_FAULT_LOCAL 0x1
+#define LPFC_ASYNC_LINK_FAULT_REMOTE 0x2
+#define lpfc_acqe_logical_link_speed_SHIFT 16
+#define lpfc_acqe_logical_link_speed_MASK 0x0000FFFF
+#define lpfc_acqe_logical_link_speed_WORD word1
+ uint32_t event_tag;
+ uint32_t trailer;
+#define LPFC_LINK_EVENT_TYPE_PHYSICAL 0x0
+#define LPFC_LINK_EVENT_TYPE_VIRTUAL 0x1
+};
+
+struct lpfc_acqe_fip {
+ uint32_t index;
+ uint32_t word1;
+#define lpfc_acqe_fip_fcf_count_SHIFT 0
+#define lpfc_acqe_fip_fcf_count_MASK 0x0000FFFF
+#define lpfc_acqe_fip_fcf_count_WORD word1
+#define lpfc_acqe_fip_event_type_SHIFT 16
+#define lpfc_acqe_fip_event_type_MASK 0x0000FFFF
+#define lpfc_acqe_fip_event_type_WORD word1
+ uint32_t event_tag;
+ uint32_t trailer;
+#define LPFC_FIP_EVENT_TYPE_NEW_FCF 0x1
+#define LPFC_FIP_EVENT_TYPE_FCF_TABLE_FULL 0x2
+#define LPFC_FIP_EVENT_TYPE_FCF_DEAD 0x3
+#define LPFC_FIP_EVENT_TYPE_CVL 0x4
+#define LPFC_FIP_EVENT_TYPE_FCF_PARAM_MOD 0x5
+};
+
+struct lpfc_acqe_dcbx {
+ uint32_t tlv_ttl;
+ uint32_t reserved;
+ uint32_t event_tag;
+ uint32_t trailer;
+};
+
+struct lpfc_acqe_grp5 {
+ uint32_t word0;
+#define lpfc_acqe_grp5_type_SHIFT 6
+#define lpfc_acqe_grp5_type_MASK 0x00000003
+#define lpfc_acqe_grp5_type_WORD word0
+#define lpfc_acqe_grp5_number_SHIFT 0
+#define lpfc_acqe_grp5_number_MASK 0x0000003F
+#define lpfc_acqe_grp5_number_WORD word0
+ uint32_t word1;
+#define lpfc_acqe_grp5_llink_spd_SHIFT 16
+#define lpfc_acqe_grp5_llink_spd_MASK 0x0000FFFF
+#define lpfc_acqe_grp5_llink_spd_WORD word1
+ uint32_t event_tag;
+ uint32_t trailer;
+};
+
+struct lpfc_acqe_fc_la {
+ uint32_t word0;
+#define lpfc_acqe_fc_la_speed_SHIFT 24
+#define lpfc_acqe_fc_la_speed_MASK 0x000000FF
+#define lpfc_acqe_fc_la_speed_WORD word0
+#define LPFC_FC_LA_SPEED_UNKNOWN 0x0
+#define LPFC_FC_LA_SPEED_1G 0x1
+#define LPFC_FC_LA_SPEED_2G 0x2
+#define LPFC_FC_LA_SPEED_4G 0x4
+#define LPFC_FC_LA_SPEED_8G 0x8
+#define LPFC_FC_LA_SPEED_10G 0xA
+#define LPFC_FC_LA_SPEED_16G 0x10
+#define lpfc_acqe_fc_la_topology_SHIFT 16
+#define lpfc_acqe_fc_la_topology_MASK 0x000000FF
+#define lpfc_acqe_fc_la_topology_WORD word0
+#define LPFC_FC_LA_TOP_UNKOWN 0x0
+#define LPFC_FC_LA_TOP_P2P 0x1
+#define LPFC_FC_LA_TOP_FCAL 0x2
+#define LPFC_FC_LA_TOP_INTERNAL_LOOP 0x3
+#define LPFC_FC_LA_TOP_SERDES_LOOP 0x4
+#define lpfc_acqe_fc_la_att_type_SHIFT 8
+#define lpfc_acqe_fc_la_att_type_MASK 0x000000FF
+#define lpfc_acqe_fc_la_att_type_WORD word0
+#define LPFC_FC_LA_TYPE_LINK_UP 0x1
+#define LPFC_FC_LA_TYPE_LINK_DOWN 0x2
+#define LPFC_FC_LA_TYPE_NO_HARD_ALPA 0x3
+#define lpfc_acqe_fc_la_port_type_SHIFT 6
+#define lpfc_acqe_fc_la_port_type_MASK 0x00000003
+#define lpfc_acqe_fc_la_port_type_WORD word0
+#define LPFC_LINK_TYPE_ETHERNET 0x0
+#define LPFC_LINK_TYPE_FC 0x1
+#define lpfc_acqe_fc_la_port_number_SHIFT 0
+#define lpfc_acqe_fc_la_port_number_MASK 0x0000003F
+#define lpfc_acqe_fc_la_port_number_WORD word0
+ uint32_t word1;
+#define lpfc_acqe_fc_la_llink_spd_SHIFT 16
+#define lpfc_acqe_fc_la_llink_spd_MASK 0x0000FFFF
+#define lpfc_acqe_fc_la_llink_spd_WORD word1
+#define lpfc_acqe_fc_la_fault_SHIFT 0
+#define lpfc_acqe_fc_la_fault_MASK 0x000000FF
+#define lpfc_acqe_fc_la_fault_WORD word1
+#define LPFC_FC_LA_FAULT_NONE 0x0
+#define LPFC_FC_LA_FAULT_LOCAL 0x1
+#define LPFC_FC_LA_FAULT_REMOTE 0x2
+ uint32_t event_tag;
+ uint32_t trailer;
+#define LPFC_FC_LA_EVENT_TYPE_FC_LINK 0x1
+#define LPFC_FC_LA_EVENT_TYPE_SHARED_LINK 0x2
+};
+
+struct lpfc_acqe_misconfigured_event {
+ struct {
+ uint32_t word0;
+#define lpfc_sli_misconfigured_port0_SHIFT 0
+#define lpfc_sli_misconfigured_port0_MASK 0x000000FF
+#define lpfc_sli_misconfigured_port0_WORD word0
+#define lpfc_sli_misconfigured_port1_SHIFT 8
+#define lpfc_sli_misconfigured_port1_MASK 0x000000FF
+#define lpfc_sli_misconfigured_port1_WORD word0
+#define lpfc_sli_misconfigured_port2_SHIFT 16
+#define lpfc_sli_misconfigured_port2_MASK 0x000000FF
+#define lpfc_sli_misconfigured_port2_WORD word0
+#define lpfc_sli_misconfigured_port3_SHIFT 24
+#define lpfc_sli_misconfigured_port3_MASK 0x000000FF
+#define lpfc_sli_misconfigured_port3_WORD word0
+ } theEvent;
+#define LPFC_SLI_EVENT_STATUS_VALID 0x00
+#define LPFC_SLI_EVENT_STATUS_NOT_PRESENT 0x01
+#define LPFC_SLI_EVENT_STATUS_WRONG_TYPE 0x02
+#define LPFC_SLI_EVENT_STATUS_UNSUPPORTED 0x03
+};
+
+struct lpfc_acqe_sli {
+ uint32_t event_data1;
+ uint32_t event_data2;
+ uint32_t reserved;
+ uint32_t trailer;
+#define LPFC_SLI_EVENT_TYPE_PORT_ERROR 0x1
+#define LPFC_SLI_EVENT_TYPE_OVER_TEMP 0x2
+#define LPFC_SLI_EVENT_TYPE_NORM_TEMP 0x3
+#define LPFC_SLI_EVENT_TYPE_NVLOG_POST 0x4
+#define LPFC_SLI_EVENT_TYPE_DIAG_DUMP 0x5
+#define LPFC_SLI_EVENT_TYPE_MISCONFIGURED 0x9
+#define LPFC_SLI_EVENT_TYPE_REMOTE_DPORT 0xA
+};
+
+/*
+ * Define the bootstrap mailbox (bmbx) region used to communicate
+ * mailbox command between the host and port. The mailbox consists
+ * of a payload area of 256 bytes and a completion queue of length
+ * 16 bytes.
+ */
+struct lpfc_bmbx_create {
+ struct lpfc_mqe mqe;
+ struct lpfc_mcqe mcqe;
+};
+
+#define SGL_ALIGN_SZ 64
+#define SGL_PAGE_SIZE 4096
+/* align SGL addr on a size boundary - adjust address up */
+#define NO_XRI 0xffff
+
+struct wqe_common {
+ uint32_t word6;
+#define wqe_xri_tag_SHIFT 0
+#define wqe_xri_tag_MASK 0x0000FFFF
+#define wqe_xri_tag_WORD word6
+#define wqe_ctxt_tag_SHIFT 16
+#define wqe_ctxt_tag_MASK 0x0000FFFF
+#define wqe_ctxt_tag_WORD word6
+ uint32_t word7;
+#define wqe_dif_SHIFT 0
+#define wqe_dif_MASK 0x00000003
+#define wqe_dif_WORD word7
+#define LPFC_WQE_DIF_PASSTHRU 1
+#define LPFC_WQE_DIF_STRIP 2
+#define LPFC_WQE_DIF_INSERT 3
+#define wqe_ct_SHIFT 2
+#define wqe_ct_MASK 0x00000003
+#define wqe_ct_WORD word7
+#define wqe_status_SHIFT 4
+#define wqe_status_MASK 0x0000000f
+#define wqe_status_WORD word7
+#define wqe_cmnd_SHIFT 8
+#define wqe_cmnd_MASK 0x000000ff
+#define wqe_cmnd_WORD word7
+#define wqe_class_SHIFT 16
+#define wqe_class_MASK 0x00000007
+#define wqe_class_WORD word7
+#define wqe_ar_SHIFT 19
+#define wqe_ar_MASK 0x00000001
+#define wqe_ar_WORD word7
+#define wqe_ag_SHIFT wqe_ar_SHIFT
+#define wqe_ag_MASK wqe_ar_MASK
+#define wqe_ag_WORD wqe_ar_WORD
+#define wqe_pu_SHIFT 20
+#define wqe_pu_MASK 0x00000003
+#define wqe_pu_WORD word7
+#define wqe_erp_SHIFT 22
+#define wqe_erp_MASK 0x00000001
+#define wqe_erp_WORD word7
+#define wqe_conf_SHIFT wqe_erp_SHIFT
+#define wqe_conf_MASK wqe_erp_MASK
+#define wqe_conf_WORD wqe_erp_WORD
+#define wqe_lnk_SHIFT 23
+#define wqe_lnk_MASK 0x00000001
+#define wqe_lnk_WORD word7
+#define wqe_tmo_SHIFT 24
+#define wqe_tmo_MASK 0x000000ff
+#define wqe_tmo_WORD word7
+ uint32_t abort_tag; /* word 8 in WQE */
+ uint32_t word9;
+#define wqe_reqtag_SHIFT 0
+#define wqe_reqtag_MASK 0x0000FFFF
+#define wqe_reqtag_WORD word9
+#define wqe_temp_rpi_SHIFT 16
+#define wqe_temp_rpi_MASK 0x0000FFFF
+#define wqe_temp_rpi_WORD word9
+#define wqe_rcvoxid_SHIFT 16
+#define wqe_rcvoxid_MASK 0x0000FFFF
+#define wqe_rcvoxid_WORD word9
+ uint32_t word10;
+#define wqe_ebde_cnt_SHIFT 0
+#define wqe_ebde_cnt_MASK 0x0000000f
+#define wqe_ebde_cnt_WORD word10
+#define wqe_oas_SHIFT 6
+#define wqe_oas_MASK 0x00000001
+#define wqe_oas_WORD word10
+#define wqe_lenloc_SHIFT 7
+#define wqe_lenloc_MASK 0x00000003
+#define wqe_lenloc_WORD word10
+#define LPFC_WQE_LENLOC_NONE 0
+#define LPFC_WQE_LENLOC_WORD3 1
+#define LPFC_WQE_LENLOC_WORD12 2
+#define LPFC_WQE_LENLOC_WORD4 3
+#define wqe_qosd_SHIFT 9
+#define wqe_qosd_MASK 0x00000001
+#define wqe_qosd_WORD word10
+#define wqe_xbl_SHIFT 11
+#define wqe_xbl_MASK 0x00000001
+#define wqe_xbl_WORD word10
+#define wqe_iod_SHIFT 13
+#define wqe_iod_MASK 0x00000001
+#define wqe_iod_WORD word10
+#define LPFC_WQE_IOD_WRITE 0
+#define LPFC_WQE_IOD_READ 1
+#define wqe_dbde_SHIFT 14
+#define wqe_dbde_MASK 0x00000001
+#define wqe_dbde_WORD word10
+#define wqe_wqes_SHIFT 15
+#define wqe_wqes_MASK 0x00000001
+#define wqe_wqes_WORD word10
+/* Note that this field overlaps above fields */
+#define wqe_wqid_SHIFT 1
+#define wqe_wqid_MASK 0x00007fff
+#define wqe_wqid_WORD word10
+#define wqe_pri_SHIFT 16
+#define wqe_pri_MASK 0x00000007
+#define wqe_pri_WORD word10
+#define wqe_pv_SHIFT 19
+#define wqe_pv_MASK 0x00000001
+#define wqe_pv_WORD word10
+#define wqe_xc_SHIFT 21
+#define wqe_xc_MASK 0x00000001
+#define wqe_xc_WORD word10
+#define wqe_sr_SHIFT 22
+#define wqe_sr_MASK 0x00000001
+#define wqe_sr_WORD word10
+#define wqe_ccpe_SHIFT 23
+#define wqe_ccpe_MASK 0x00000001
+#define wqe_ccpe_WORD word10
+#define wqe_ccp_SHIFT 24
+#define wqe_ccp_MASK 0x000000ff
+#define wqe_ccp_WORD word10
+ uint32_t word11;
+#define wqe_cmd_type_SHIFT 0
+#define wqe_cmd_type_MASK 0x0000000f
+#define wqe_cmd_type_WORD word11
+#define wqe_els_id_SHIFT 4
+#define wqe_els_id_MASK 0x00000003
+#define wqe_els_id_WORD word11
+#define LPFC_ELS_ID_FLOGI 3
+#define LPFC_ELS_ID_FDISC 2
+#define LPFC_ELS_ID_LOGO 1
+#define LPFC_ELS_ID_DEFAULT 0
+#define wqe_wqec_SHIFT 7
+#define wqe_wqec_MASK 0x00000001
+#define wqe_wqec_WORD word11
+#define wqe_cqid_SHIFT 16
+#define wqe_cqid_MASK 0x0000ffff
+#define wqe_cqid_WORD word11
+#define LPFC_WQE_CQ_ID_DEFAULT 0xffff
+};
+
+struct wqe_did {
+ uint32_t word5;
+#define wqe_els_did_SHIFT 0
+#define wqe_els_did_MASK 0x00FFFFFF
+#define wqe_els_did_WORD word5
+#define wqe_xmit_bls_pt_SHIFT 28
+#define wqe_xmit_bls_pt_MASK 0x00000003
+#define wqe_xmit_bls_pt_WORD word5
+#define wqe_xmit_bls_ar_SHIFT 30
+#define wqe_xmit_bls_ar_MASK 0x00000001
+#define wqe_xmit_bls_ar_WORD word5
+#define wqe_xmit_bls_xo_SHIFT 31
+#define wqe_xmit_bls_xo_MASK 0x00000001
+#define wqe_xmit_bls_xo_WORD word5
+};
+
+struct lpfc_wqe_generic{
+ struct ulp_bde64 bde;
+ uint32_t word3;
+ uint32_t word4;
+ uint32_t word5;
+ struct wqe_common wqe_com;
+ uint32_t payload[4];
+};
+
+struct els_request64_wqe {
+ struct ulp_bde64 bde;
+ uint32_t payload_len;
+ uint32_t word4;
+#define els_req64_sid_SHIFT 0
+#define els_req64_sid_MASK 0x00FFFFFF
+#define els_req64_sid_WORD word4
+#define els_req64_sp_SHIFT 24
+#define els_req64_sp_MASK 0x00000001
+#define els_req64_sp_WORD word4
+#define els_req64_vf_SHIFT 25
+#define els_req64_vf_MASK 0x00000001
+#define els_req64_vf_WORD word4
+ struct wqe_did wqe_dest;
+ struct wqe_common wqe_com; /* words 6-11 */
+ uint32_t word12;
+#define els_req64_vfid_SHIFT 1
+#define els_req64_vfid_MASK 0x00000FFF
+#define els_req64_vfid_WORD word12
+#define els_req64_pri_SHIFT 13
+#define els_req64_pri_MASK 0x00000007
+#define els_req64_pri_WORD word12
+ uint32_t word13;
+#define els_req64_hopcnt_SHIFT 24
+#define els_req64_hopcnt_MASK 0x000000ff
+#define els_req64_hopcnt_WORD word13
+ uint32_t word14;
+ uint32_t max_response_payload_len;
+};
+
+struct xmit_els_rsp64_wqe {
+ struct ulp_bde64 bde;
+ uint32_t response_payload_len;
+ uint32_t word4;
+#define els_rsp64_sid_SHIFT 0
+#define els_rsp64_sid_MASK 0x00FFFFFF
+#define els_rsp64_sid_WORD word4
+#define els_rsp64_sp_SHIFT 24
+#define els_rsp64_sp_MASK 0x00000001
+#define els_rsp64_sp_WORD word4
+ struct wqe_did wqe_dest;
+ struct wqe_common wqe_com; /* words 6-11 */
+ uint32_t word12;
+#define wqe_rsp_temp_rpi_SHIFT 0
+#define wqe_rsp_temp_rpi_MASK 0x0000FFFF
+#define wqe_rsp_temp_rpi_WORD word12
+ uint32_t rsvd_13_15[3];
+};
+
+struct xmit_bls_rsp64_wqe {
+ uint32_t payload0;
+/* Payload0 for BA_ACC */
+#define xmit_bls_rsp64_acc_seq_id_SHIFT 16
+#define xmit_bls_rsp64_acc_seq_id_MASK 0x000000ff
+#define xmit_bls_rsp64_acc_seq_id_WORD payload0
+#define xmit_bls_rsp64_acc_seq_id_vald_SHIFT 24
+#define xmit_bls_rsp64_acc_seq_id_vald_MASK 0x000000ff
+#define xmit_bls_rsp64_acc_seq_id_vald_WORD payload0
+/* Payload0 for BA_RJT */
+#define xmit_bls_rsp64_rjt_vspec_SHIFT 0
+#define xmit_bls_rsp64_rjt_vspec_MASK 0x000000ff
+#define xmit_bls_rsp64_rjt_vspec_WORD payload0
+#define xmit_bls_rsp64_rjt_expc_SHIFT 8
+#define xmit_bls_rsp64_rjt_expc_MASK 0x000000ff
+#define xmit_bls_rsp64_rjt_expc_WORD payload0
+#define xmit_bls_rsp64_rjt_rsnc_SHIFT 16
+#define xmit_bls_rsp64_rjt_rsnc_MASK 0x000000ff
+#define xmit_bls_rsp64_rjt_rsnc_WORD payload0
+ uint32_t word1;
+#define xmit_bls_rsp64_rxid_SHIFT 0
+#define xmit_bls_rsp64_rxid_MASK 0x0000ffff
+#define xmit_bls_rsp64_rxid_WORD word1
+#define xmit_bls_rsp64_oxid_SHIFT 16
+#define xmit_bls_rsp64_oxid_MASK 0x0000ffff
+#define xmit_bls_rsp64_oxid_WORD word1
+ uint32_t word2;
+#define xmit_bls_rsp64_seqcnthi_SHIFT 0
+#define xmit_bls_rsp64_seqcnthi_MASK 0x0000ffff
+#define xmit_bls_rsp64_seqcnthi_WORD word2
+#define xmit_bls_rsp64_seqcntlo_SHIFT 16
+#define xmit_bls_rsp64_seqcntlo_MASK 0x0000ffff
+#define xmit_bls_rsp64_seqcntlo_WORD word2
+ uint32_t rsrvd3;
+ uint32_t rsrvd4;
+ struct wqe_did wqe_dest;
+ struct wqe_common wqe_com; /* words 6-11 */
+ uint32_t word12;
+#define xmit_bls_rsp64_temprpi_SHIFT 0
+#define xmit_bls_rsp64_temprpi_MASK 0x0000ffff
+#define xmit_bls_rsp64_temprpi_WORD word12
+ uint32_t rsvd_13_15[3];
+};
+
+struct wqe_rctl_dfctl {
+ uint32_t word5;
+#define wqe_si_SHIFT 2
+#define wqe_si_MASK 0x000000001
+#define wqe_si_WORD word5
+#define wqe_la_SHIFT 3
+#define wqe_la_MASK 0x000000001
+#define wqe_la_WORD word5
+#define wqe_xo_SHIFT 6
+#define wqe_xo_MASK 0x000000001
+#define wqe_xo_WORD word5
+#define wqe_ls_SHIFT 7
+#define wqe_ls_MASK 0x000000001
+#define wqe_ls_WORD word5
+#define wqe_dfctl_SHIFT 8
+#define wqe_dfctl_MASK 0x0000000ff
+#define wqe_dfctl_WORD word5
+#define wqe_type_SHIFT 16
+#define wqe_type_MASK 0x0000000ff
+#define wqe_type_WORD word5
+#define wqe_rctl_SHIFT 24
+#define wqe_rctl_MASK 0x0000000ff
+#define wqe_rctl_WORD word5
+};
+
+struct xmit_seq64_wqe {
+ struct ulp_bde64 bde;
+ uint32_t rsvd3;
+ uint32_t relative_offset;
+ struct wqe_rctl_dfctl wge_ctl;
+ struct wqe_common wqe_com; /* words 6-11 */
+ uint32_t xmit_len;
+ uint32_t rsvd_12_15[3];
+};
+struct xmit_bcast64_wqe {
+ struct ulp_bde64 bde;
+ uint32_t seq_payload_len;
+ uint32_t rsvd4;
+ struct wqe_rctl_dfctl wge_ctl; /* word 5 */
+ struct wqe_common wqe_com; /* words 6-11 */
+ uint32_t rsvd_12_15[4];
+};
+
+struct gen_req64_wqe {
+ struct ulp_bde64 bde;
+ uint32_t request_payload_len;
+ uint32_t relative_offset;
+ struct wqe_rctl_dfctl wge_ctl; /* word 5 */
+ struct wqe_common wqe_com; /* words 6-11 */
+ uint32_t rsvd_12_14[3];
+ uint32_t max_response_payload_len;
+};
+
+struct create_xri_wqe {
+ uint32_t rsrvd[5]; /* words 0-4 */
+ struct wqe_did wqe_dest; /* word 5 */
+ struct wqe_common wqe_com; /* words 6-11 */
+ uint32_t rsvd_12_15[4]; /* word 12-15 */
+};
+
+#define T_REQUEST_TAG 3
+#define T_XRI_TAG 1
+
+struct abort_cmd_wqe {
+ uint32_t rsrvd[3];
+ uint32_t word3;
+#define abort_cmd_ia_SHIFT 0
+#define abort_cmd_ia_MASK 0x000000001
+#define abort_cmd_ia_WORD word3
+#define abort_cmd_criteria_SHIFT 8
+#define abort_cmd_criteria_MASK 0x0000000ff
+#define abort_cmd_criteria_WORD word3
+ uint32_t rsrvd4;
+ uint32_t rsrvd5;
+ struct wqe_common wqe_com; /* words 6-11 */
+ uint32_t rsvd_12_15[4]; /* word 12-15 */
+};
+
+struct fcp_iwrite64_wqe {
+ struct ulp_bde64 bde;
+ uint32_t word3;
+#define cmd_buff_len_SHIFT 16
+#define cmd_buff_len_MASK 0x00000ffff
+#define cmd_buff_len_WORD word3
+#define payload_offset_len_SHIFT 0
+#define payload_offset_len_MASK 0x0000ffff
+#define payload_offset_len_WORD word3
+ uint32_t total_xfer_len;
+ uint32_t initial_xfer_len;
+ struct wqe_common wqe_com; /* words 6-11 */
+ uint32_t rsrvd12;
+ struct ulp_bde64 ph_bde; /* words 13-15 */
+};
+
+struct fcp_iread64_wqe {
+ struct ulp_bde64 bde;
+ uint32_t word3;
+#define cmd_buff_len_SHIFT 16
+#define cmd_buff_len_MASK 0x00000ffff
+#define cmd_buff_len_WORD word3
+#define payload_offset_len_SHIFT 0
+#define payload_offset_len_MASK 0x0000ffff
+#define payload_offset_len_WORD word3
+ uint32_t total_xfer_len; /* word 4 */
+ uint32_t rsrvd5; /* word 5 */
+ struct wqe_common wqe_com; /* words 6-11 */
+ uint32_t rsrvd12;
+ struct ulp_bde64 ph_bde; /* words 13-15 */
+};
+
+struct fcp_icmnd64_wqe {
+ struct ulp_bde64 bde; /* words 0-2 */
+ uint32_t word3;
+#define cmd_buff_len_SHIFT 16
+#define cmd_buff_len_MASK 0x00000ffff
+#define cmd_buff_len_WORD word3
+#define payload_offset_len_SHIFT 0
+#define payload_offset_len_MASK 0x0000ffff
+#define payload_offset_len_WORD word3
+ uint32_t rsrvd4; /* word 4 */
+ uint32_t rsrvd5; /* word 5 */
+ struct wqe_common wqe_com; /* words 6-11 */
+ uint32_t rsvd_12_15[4]; /* word 12-15 */
+};
+
+
+union lpfc_wqe {
+ uint32_t words[16];
+ struct lpfc_wqe_generic generic;
+ struct fcp_icmnd64_wqe fcp_icmd;
+ struct fcp_iread64_wqe fcp_iread;
+ struct fcp_iwrite64_wqe fcp_iwrite;
+ struct abort_cmd_wqe abort_cmd;
+ struct create_xri_wqe create_xri;
+ struct xmit_bcast64_wqe xmit_bcast64;
+ struct xmit_seq64_wqe xmit_sequence;
+ struct xmit_bls_rsp64_wqe xmit_bls_rsp;
+ struct xmit_els_rsp64_wqe xmit_els_rsp;
+ struct els_request64_wqe els_req;
+ struct gen_req64_wqe gen_req;
+};
+
+union lpfc_wqe128 {
+ uint32_t words[32];
+ struct lpfc_wqe_generic generic;
+ struct xmit_seq64_wqe xmit_sequence;
+ struct gen_req64_wqe gen_req;
+};
+
+#define LPFC_GROUP_OJECT_MAGIC_NUM 0xfeaa0001
+#define LPFC_FILE_TYPE_GROUP 0xf7
+#define LPFC_FILE_ID_GROUP 0xa2
+struct lpfc_grp_hdr {
+ uint32_t size;
+ uint32_t magic_number;
+ uint32_t word2;
+#define lpfc_grp_hdr_file_type_SHIFT 24
+#define lpfc_grp_hdr_file_type_MASK 0x000000FF
+#define lpfc_grp_hdr_file_type_WORD word2
+#define lpfc_grp_hdr_id_SHIFT 16
+#define lpfc_grp_hdr_id_MASK 0x000000FF
+#define lpfc_grp_hdr_id_WORD word2
+ uint8_t rev_name[128];
+ uint8_t date[12];
+ uint8_t revision[32];
+};
+
+#define FCP_COMMAND 0x0
+#define FCP_COMMAND_DATA_OUT 0x1
+#define ELS_COMMAND_NON_FIP 0xC
+#define ELS_COMMAND_FIP 0xD
+#define OTHER_COMMAND 0x8
+
+#define LPFC_FW_DUMP 1
+#define LPFC_FW_RESET 2
+#define LPFC_DV_RESET 3
diff --git a/drivers/scsi/lpfc/lpfc_init.c b/drivers/scsi/lpfc/lpfc_init.c
new file mode 100644
index 000000000..ddf80eb11
--- /dev/null
+++ b/drivers/scsi/lpfc/lpfc_init.c
@@ -0,0 +1,11477 @@
+/*******************************************************************
+ * This file is part of the Emulex Linux Device Driver for *
+ * Fibre Channel Host Bus Adapters. *
+ * Copyright (C) 2004-2015 Emulex. All rights reserved. *
+ * EMULEX and SLI are trademarks of Emulex. *
+ * www.emulex.com *
+ * Portions Copyright (C) 2004-2005 Christoph Hellwig *
+ * *
+ * This program is free software; you can redistribute it and/or *
+ * modify it under the terms of version 2 of the GNU General *
+ * Public License as published by the Free Software Foundation. *
+ * This program is distributed in the hope that it will be useful. *
+ * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND *
+ * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY, *
+ * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE *
+ * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD *
+ * TO BE LEGALLY INVALID. See the GNU General Public License for *
+ * more details, a copy of which can be found in the file COPYING *
+ * included with this package. *
+ *******************************************************************/
+
+#include <linux/blkdev.h>
+#include <linux/delay.h>
+#include <linux/dma-mapping.h>
+#include <linux/idr.h>
+#include <linux/interrupt.h>
+#include <linux/module.h>
+#include <linux/kthread.h>
+#include <linux/pci.h>
+#include <linux/spinlock.h>
+#include <linux/ctype.h>
+#include <linux/aer.h>
+#include <linux/slab.h>
+#include <linux/firmware.h>
+#include <linux/miscdevice.h>
+#include <linux/percpu.h>
+
+#include <scsi/scsi.h>
+#include <scsi/scsi_device.h>
+#include <scsi/scsi_host.h>
+#include <scsi/scsi_transport_fc.h>
+
+#include "lpfc_hw4.h"
+#include "lpfc_hw.h"
+#include "lpfc_sli.h"
+#include "lpfc_sli4.h"
+#include "lpfc_nl.h"
+#include "lpfc_disc.h"
+#include "lpfc_scsi.h"
+#include "lpfc.h"
+#include "lpfc_logmsg.h"
+#include "lpfc_crtn.h"
+#include "lpfc_vport.h"
+#include "lpfc_version.h"
+
+char *_dump_buf_data;
+unsigned long _dump_buf_data_order;
+char *_dump_buf_dif;
+unsigned long _dump_buf_dif_order;
+spinlock_t _dump_buf_lock;
+
+/* Used when mapping IRQ vectors in a driver centric manner */
+uint16_t *lpfc_used_cpu;
+uint32_t lpfc_present_cpu;
+
+static void lpfc_get_hba_model_desc(struct lpfc_hba *, uint8_t *, uint8_t *);
+static int lpfc_post_rcv_buf(struct lpfc_hba *);
+static int lpfc_sli4_queue_verify(struct lpfc_hba *);
+static int lpfc_create_bootstrap_mbox(struct lpfc_hba *);
+static int lpfc_setup_endian_order(struct lpfc_hba *);
+static void lpfc_destroy_bootstrap_mbox(struct lpfc_hba *);
+static void lpfc_free_els_sgl_list(struct lpfc_hba *);
+static void lpfc_init_sgl_list(struct lpfc_hba *);
+static int lpfc_init_active_sgl_array(struct lpfc_hba *);
+static void lpfc_free_active_sgl(struct lpfc_hba *);
+static int lpfc_hba_down_post_s3(struct lpfc_hba *phba);
+static int lpfc_hba_down_post_s4(struct lpfc_hba *phba);
+static int lpfc_sli4_cq_event_pool_create(struct lpfc_hba *);
+static void lpfc_sli4_cq_event_pool_destroy(struct lpfc_hba *);
+static void lpfc_sli4_cq_event_release_all(struct lpfc_hba *);
+static void lpfc_sli4_disable_intr(struct lpfc_hba *);
+static uint32_t lpfc_sli4_enable_intr(struct lpfc_hba *, uint32_t);
+static void lpfc_sli4_oas_verify(struct lpfc_hba *phba);
+
+static struct scsi_transport_template *lpfc_transport_template = NULL;
+static struct scsi_transport_template *lpfc_vport_transport_template = NULL;
+static DEFINE_IDR(lpfc_hba_index);
+
+/**
+ * lpfc_config_port_prep - Perform lpfc initialization prior to config port
+ * @phba: pointer to lpfc hba data structure.
+ *
+ * This routine will do LPFC initialization prior to issuing the CONFIG_PORT
+ * mailbox command. It retrieves the revision information from the HBA and
+ * collects the Vital Product Data (VPD) about the HBA for preparing the
+ * configuration of the HBA.
+ *
+ * Return codes:
+ * 0 - success.
+ * -ERESTART - requests the SLI layer to reset the HBA and try again.
+ * Any other value - indicates an error.
+ **/
+int
+lpfc_config_port_prep(struct lpfc_hba *phba)
+{
+ lpfc_vpd_t *vp = &phba->vpd;
+ int i = 0, rc;
+ LPFC_MBOXQ_t *pmb;
+ MAILBOX_t *mb;
+ char *lpfc_vpd_data = NULL;
+ uint16_t offset = 0;
+ static char licensed[56] =
+ "key unlock for use with gnu public licensed code only\0";
+ static int init_key = 1;
+
+ pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
+ if (!pmb) {
+ phba->link_state = LPFC_HBA_ERROR;
+ return -ENOMEM;
+ }
+
+ mb = &pmb->u.mb;
+ phba->link_state = LPFC_INIT_MBX_CMDS;
+
+ if (lpfc_is_LC_HBA(phba->pcidev->device)) {
+ if (init_key) {
+ uint32_t *ptext = (uint32_t *) licensed;
+
+ for (i = 0; i < 56; i += sizeof (uint32_t), ptext++)
+ *ptext = cpu_to_be32(*ptext);
+ init_key = 0;
+ }
+
+ lpfc_read_nv(phba, pmb);
+ memset((char*)mb->un.varRDnvp.rsvd3, 0,
+ sizeof (mb->un.varRDnvp.rsvd3));
+ memcpy((char*)mb->un.varRDnvp.rsvd3, licensed,
+ sizeof (licensed));
+
+ rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL);
+
+ if (rc != MBX_SUCCESS) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_MBOX,
+ "0324 Config Port initialization "
+ "error, mbxCmd x%x READ_NVPARM, "
+ "mbxStatus x%x\n",
+ mb->mbxCommand, mb->mbxStatus);
+ mempool_free(pmb, phba->mbox_mem_pool);
+ return -ERESTART;
+ }
+ memcpy(phba->wwnn, (char *)mb->un.varRDnvp.nodename,
+ sizeof(phba->wwnn));
+ memcpy(phba->wwpn, (char *)mb->un.varRDnvp.portname,
+ sizeof(phba->wwpn));
+ }
+
+ phba->sli3_options = 0x0;
+
+ /* Setup and issue mailbox READ REV command */
+ lpfc_read_rev(phba, pmb);
+ rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL);
+ if (rc != MBX_SUCCESS) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+ "0439 Adapter failed to init, mbxCmd x%x "
+ "READ_REV, mbxStatus x%x\n",
+ mb->mbxCommand, mb->mbxStatus);
+ mempool_free( pmb, phba->mbox_mem_pool);
+ return -ERESTART;
+ }
+
+
+ /*
+ * The value of rr must be 1 since the driver set the cv field to 1.
+ * This setting requires the FW to set all revision fields.
+ */
+ if (mb->un.varRdRev.rr == 0) {
+ vp->rev.rBit = 0;
+ lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+ "0440 Adapter failed to init, READ_REV has "
+ "missing revision information.\n");
+ mempool_free(pmb, phba->mbox_mem_pool);
+ return -ERESTART;
+ }
+
+ if (phba->sli_rev == 3 && !mb->un.varRdRev.v3rsp) {
+ mempool_free(pmb, phba->mbox_mem_pool);
+ return -EINVAL;
+ }
+
+ /* Save information as VPD data */
+ vp->rev.rBit = 1;
+ memcpy(&vp->sli3Feat, &mb->un.varRdRev.sli3Feat, sizeof(uint32_t));
+ vp->rev.sli1FwRev = mb->un.varRdRev.sli1FwRev;
+ memcpy(vp->rev.sli1FwName, (char*) mb->un.varRdRev.sli1FwName, 16);
+ vp->rev.sli2FwRev = mb->un.varRdRev.sli2FwRev;
+ memcpy(vp->rev.sli2FwName, (char *) mb->un.varRdRev.sli2FwName, 16);
+ vp->rev.biuRev = mb->un.varRdRev.biuRev;
+ vp->rev.smRev = mb->un.varRdRev.smRev;
+ vp->rev.smFwRev = mb->un.varRdRev.un.smFwRev;
+ vp->rev.endecRev = mb->un.varRdRev.endecRev;
+ vp->rev.fcphHigh = mb->un.varRdRev.fcphHigh;
+ vp->rev.fcphLow = mb->un.varRdRev.fcphLow;
+ vp->rev.feaLevelHigh = mb->un.varRdRev.feaLevelHigh;
+ vp->rev.feaLevelLow = mb->un.varRdRev.feaLevelLow;
+ vp->rev.postKernRev = mb->un.varRdRev.postKernRev;
+ vp->rev.opFwRev = mb->un.varRdRev.opFwRev;
+
+ /* If the sli feature level is less then 9, we must
+ * tear down all RPIs and VPIs on link down if NPIV
+ * is enabled.
+ */
+ if (vp->rev.feaLevelHigh < 9)
+ phba->sli3_options |= LPFC_SLI3_VPORT_TEARDOWN;
+
+ if (lpfc_is_LC_HBA(phba->pcidev->device))
+ memcpy(phba->RandomData, (char *)&mb->un.varWords[24],
+ sizeof (phba->RandomData));
+
+ /* Get adapter VPD information */
+ lpfc_vpd_data = kmalloc(DMP_VPD_SIZE, GFP_KERNEL);
+ if (!lpfc_vpd_data)
+ goto out_free_mbox;
+ do {
+ lpfc_dump_mem(phba, pmb, offset, DMP_REGION_VPD);
+ rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL);
+
+ if (rc != MBX_SUCCESS) {
+ lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
+ "0441 VPD not present on adapter, "
+ "mbxCmd x%x DUMP VPD, mbxStatus x%x\n",
+ mb->mbxCommand, mb->mbxStatus);
+ mb->un.varDmp.word_cnt = 0;
+ }
+ /* dump mem may return a zero when finished or we got a
+ * mailbox error, either way we are done.
+ */
+ if (mb->un.varDmp.word_cnt == 0)
+ break;
+ if (mb->un.varDmp.word_cnt > DMP_VPD_SIZE - offset)
+ mb->un.varDmp.word_cnt = DMP_VPD_SIZE - offset;
+ lpfc_sli_pcimem_bcopy(((uint8_t *)mb) + DMP_RSP_OFFSET,
+ lpfc_vpd_data + offset,
+ mb->un.varDmp.word_cnt);
+ offset += mb->un.varDmp.word_cnt;
+ } while (mb->un.varDmp.word_cnt && offset < DMP_VPD_SIZE);
+ lpfc_parse_vpd(phba, lpfc_vpd_data, offset);
+
+ kfree(lpfc_vpd_data);
+out_free_mbox:
+ mempool_free(pmb, phba->mbox_mem_pool);
+ return 0;
+}
+
+/**
+ * lpfc_config_async_cmpl - Completion handler for config async event mbox cmd
+ * @phba: pointer to lpfc hba data structure.
+ * @pmboxq: pointer to the driver internal queue element for mailbox command.
+ *
+ * This is the completion handler for driver's configuring asynchronous event
+ * mailbox command to the device. If the mailbox command returns successfully,
+ * it will set internal async event support flag to 1; otherwise, it will
+ * set internal async event support flag to 0.
+ **/
+static void
+lpfc_config_async_cmpl(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmboxq)
+{
+ if (pmboxq->u.mb.mbxStatus == MBX_SUCCESS)
+ phba->temp_sensor_support = 1;
+ else
+ phba->temp_sensor_support = 0;
+ mempool_free(pmboxq, phba->mbox_mem_pool);
+ return;
+}
+
+/**
+ * lpfc_dump_wakeup_param_cmpl - dump memory mailbox command completion handler
+ * @phba: pointer to lpfc hba data structure.
+ * @pmboxq: pointer to the driver internal queue element for mailbox command.
+ *
+ * This is the completion handler for dump mailbox command for getting
+ * wake up parameters. When this command complete, the response contain
+ * Option rom version of the HBA. This function translate the version number
+ * into a human readable string and store it in OptionROMVersion.
+ **/
+static void
+lpfc_dump_wakeup_param_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmboxq)
+{
+ struct prog_id *prg;
+ uint32_t prog_id_word;
+ char dist = ' ';
+ /* character array used for decoding dist type. */
+ char dist_char[] = "nabx";
+
+ if (pmboxq->u.mb.mbxStatus != MBX_SUCCESS) {
+ mempool_free(pmboxq, phba->mbox_mem_pool);
+ return;
+ }
+
+ prg = (struct prog_id *) &prog_id_word;
+
+ /* word 7 contain option rom version */
+ prog_id_word = pmboxq->u.mb.un.varWords[7];
+
+ /* Decode the Option rom version word to a readable string */
+ if (prg->dist < 4)
+ dist = dist_char[prg->dist];
+
+ if ((prg->dist == 3) && (prg->num == 0))
+ snprintf(phba->OptionROMVersion, 32, "%d.%d%d",
+ prg->ver, prg->rev, prg->lev);
+ else
+ snprintf(phba->OptionROMVersion, 32, "%d.%d%d%c%d",
+ prg->ver, prg->rev, prg->lev,
+ dist, prg->num);
+ mempool_free(pmboxq, phba->mbox_mem_pool);
+ return;
+}
+
+/**
+ * lpfc_update_vport_wwn - Updates the fc_nodename, fc_portname,
+ * cfg_soft_wwnn, cfg_soft_wwpn
+ * @vport: pointer to lpfc vport data structure.
+ *
+ *
+ * Return codes
+ * None.
+ **/
+void
+lpfc_update_vport_wwn(struct lpfc_vport *vport)
+{
+ /* If the soft name exists then update it using the service params */
+ if (vport->phba->cfg_soft_wwnn)
+ u64_to_wwn(vport->phba->cfg_soft_wwnn,
+ vport->fc_sparam.nodeName.u.wwn);
+ if (vport->phba->cfg_soft_wwpn)
+ u64_to_wwn(vport->phba->cfg_soft_wwpn,
+ vport->fc_sparam.portName.u.wwn);
+
+ /*
+ * If the name is empty or there exists a soft name
+ * then copy the service params name, otherwise use the fc name
+ */
+ if (vport->fc_nodename.u.wwn[0] == 0 || vport->phba->cfg_soft_wwnn)
+ memcpy(&vport->fc_nodename, &vport->fc_sparam.nodeName,
+ sizeof(struct lpfc_name));
+ else
+ memcpy(&vport->fc_sparam.nodeName, &vport->fc_nodename,
+ sizeof(struct lpfc_name));
+
+ if (vport->fc_portname.u.wwn[0] == 0 || vport->phba->cfg_soft_wwpn)
+ memcpy(&vport->fc_portname, &vport->fc_sparam.portName,
+ sizeof(struct lpfc_name));
+ else
+ memcpy(&vport->fc_sparam.portName, &vport->fc_portname,
+ sizeof(struct lpfc_name));
+}
+
+/**
+ * lpfc_config_port_post - Perform lpfc initialization after config port
+ * @phba: pointer to lpfc hba data structure.
+ *
+ * This routine will do LPFC initialization after the CONFIG_PORT mailbox
+ * command call. It performs all internal resource and state setups on the
+ * port: post IOCB buffers, enable appropriate host interrupt attentions,
+ * ELS ring timers, etc.
+ *
+ * Return codes
+ * 0 - success.
+ * Any other value - error.
+ **/
+int
+lpfc_config_port_post(struct lpfc_hba *phba)
+{
+ struct lpfc_vport *vport = phba->pport;
+ struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
+ LPFC_MBOXQ_t *pmb;
+ MAILBOX_t *mb;
+ struct lpfc_dmabuf *mp;
+ struct lpfc_sli *psli = &phba->sli;
+ uint32_t status, timeout;
+ int i, j;
+ int rc;
+
+ spin_lock_irq(&phba->hbalock);
+ /*
+ * If the Config port completed correctly the HBA is not
+ * over heated any more.
+ */
+ if (phba->over_temp_state == HBA_OVER_TEMP)
+ phba->over_temp_state = HBA_NORMAL_TEMP;
+ spin_unlock_irq(&phba->hbalock);
+
+ pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
+ if (!pmb) {
+ phba->link_state = LPFC_HBA_ERROR;
+ return -ENOMEM;
+ }
+ mb = &pmb->u.mb;
+
+ /* Get login parameters for NID. */
+ rc = lpfc_read_sparam(phba, pmb, 0);
+ if (rc) {
+ mempool_free(pmb, phba->mbox_mem_pool);
+ return -ENOMEM;
+ }
+
+ pmb->vport = vport;
+ if (lpfc_sli_issue_mbox(phba, pmb, MBX_POLL) != MBX_SUCCESS) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+ "0448 Adapter failed init, mbxCmd x%x "
+ "READ_SPARM mbxStatus x%x\n",
+ mb->mbxCommand, mb->mbxStatus);
+ phba->link_state = LPFC_HBA_ERROR;
+ mp = (struct lpfc_dmabuf *) pmb->context1;
+ mempool_free(pmb, phba->mbox_mem_pool);
+ lpfc_mbuf_free(phba, mp->virt, mp->phys);
+ kfree(mp);
+ return -EIO;
+ }
+
+ mp = (struct lpfc_dmabuf *) pmb->context1;
+
+ memcpy(&vport->fc_sparam, mp->virt, sizeof (struct serv_parm));
+ lpfc_mbuf_free(phba, mp->virt, mp->phys);
+ kfree(mp);
+ pmb->context1 = NULL;
+ lpfc_update_vport_wwn(vport);
+
+ /* Update the fc_host data structures with new wwn. */
+ fc_host_node_name(shost) = wwn_to_u64(vport->fc_nodename.u.wwn);
+ fc_host_port_name(shost) = wwn_to_u64(vport->fc_portname.u.wwn);
+ fc_host_max_npiv_vports(shost) = phba->max_vpi;
+
+ /* If no serial number in VPD data, use low 6 bytes of WWNN */
+ /* This should be consolidated into parse_vpd ? - mr */
+ if (phba->SerialNumber[0] == 0) {
+ uint8_t *outptr;
+
+ outptr = &vport->fc_nodename.u.s.IEEE[0];
+ for (i = 0; i < 12; i++) {
+ status = *outptr++;
+ j = ((status & 0xf0) >> 4);
+ if (j <= 9)
+ phba->SerialNumber[i] =
+ (char)((uint8_t) 0x30 + (uint8_t) j);
+ else
+ phba->SerialNumber[i] =
+ (char)((uint8_t) 0x61 + (uint8_t) (j - 10));
+ i++;
+ j = (status & 0xf);
+ if (j <= 9)
+ phba->SerialNumber[i] =
+ (char)((uint8_t) 0x30 + (uint8_t) j);
+ else
+ phba->SerialNumber[i] =
+ (char)((uint8_t) 0x61 + (uint8_t) (j - 10));
+ }
+ }
+
+ lpfc_read_config(phba, pmb);
+ pmb->vport = vport;
+ if (lpfc_sli_issue_mbox(phba, pmb, MBX_POLL) != MBX_SUCCESS) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+ "0453 Adapter failed to init, mbxCmd x%x "
+ "READ_CONFIG, mbxStatus x%x\n",
+ mb->mbxCommand, mb->mbxStatus);
+ phba->link_state = LPFC_HBA_ERROR;
+ mempool_free( pmb, phba->mbox_mem_pool);
+ return -EIO;
+ }
+
+ /* Check if the port is disabled */
+ lpfc_sli_read_link_ste(phba);
+
+ /* Reset the DFT_HBA_Q_DEPTH to the max xri */
+ i = (mb->un.varRdConfig.max_xri + 1);
+ if (phba->cfg_hba_queue_depth > i) {
+ lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
+ "3359 HBA queue depth changed from %d to %d\n",
+ phba->cfg_hba_queue_depth, i);
+ phba->cfg_hba_queue_depth = i;
+ }
+
+ /* Reset the DFT_LUN_Q_DEPTH to (max xri >> 3) */
+ i = (mb->un.varRdConfig.max_xri >> 3);
+ if (phba->pport->cfg_lun_queue_depth > i) {
+ lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
+ "3360 LUN queue depth changed from %d to %d\n",
+ phba->pport->cfg_lun_queue_depth, i);
+ phba->pport->cfg_lun_queue_depth = i;
+ }
+
+ phba->lmt = mb->un.varRdConfig.lmt;
+
+ /* Get the default values for Model Name and Description */
+ lpfc_get_hba_model_desc(phba, phba->ModelName, phba->ModelDesc);
+
+ phba->link_state = LPFC_LINK_DOWN;
+
+ /* Only process IOCBs on ELS ring till hba_state is READY */
+ if (psli->ring[psli->extra_ring].sli.sli3.cmdringaddr)
+ psli->ring[psli->extra_ring].flag |= LPFC_STOP_IOCB_EVENT;
+ if (psli->ring[psli->fcp_ring].sli.sli3.cmdringaddr)
+ psli->ring[psli->fcp_ring].flag |= LPFC_STOP_IOCB_EVENT;
+ if (psli->ring[psli->next_ring].sli.sli3.cmdringaddr)
+ psli->ring[psli->next_ring].flag |= LPFC_STOP_IOCB_EVENT;
+
+ /* Post receive buffers for desired rings */
+ if (phba->sli_rev != 3)
+ lpfc_post_rcv_buf(phba);
+
+ /*
+ * Configure HBA MSI-X attention conditions to messages if MSI-X mode
+ */
+ if (phba->intr_type == MSIX) {
+ rc = lpfc_config_msi(phba, pmb);
+ if (rc) {
+ mempool_free(pmb, phba->mbox_mem_pool);
+ return -EIO;
+ }
+ rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL);
+ if (rc != MBX_SUCCESS) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_MBOX,
+ "0352 Config MSI mailbox command "
+ "failed, mbxCmd x%x, mbxStatus x%x\n",
+ pmb->u.mb.mbxCommand,
+ pmb->u.mb.mbxStatus);
+ mempool_free(pmb, phba->mbox_mem_pool);
+ return -EIO;
+ }
+ }
+
+ spin_lock_irq(&phba->hbalock);
+ /* Initialize ERATT handling flag */
+ phba->hba_flag &= ~HBA_ERATT_HANDLED;
+
+ /* Enable appropriate host interrupts */
+ if (lpfc_readl(phba->HCregaddr, &status)) {
+ spin_unlock_irq(&phba->hbalock);
+ return -EIO;
+ }
+ status |= HC_MBINT_ENA | HC_ERINT_ENA | HC_LAINT_ENA;
+ if (psli->num_rings > 0)
+ status |= HC_R0INT_ENA;
+ if (psli->num_rings > 1)
+ status |= HC_R1INT_ENA;
+ if (psli->num_rings > 2)
+ status |= HC_R2INT_ENA;
+ if (psli->num_rings > 3)
+ status |= HC_R3INT_ENA;
+
+ if ((phba->cfg_poll & ENABLE_FCP_RING_POLLING) &&
+ (phba->cfg_poll & DISABLE_FCP_RING_INT))
+ status &= ~(HC_R0INT_ENA);
+
+ writel(status, phba->HCregaddr);
+ readl(phba->HCregaddr); /* flush */
+ spin_unlock_irq(&phba->hbalock);
+
+ /* Set up ring-0 (ELS) timer */
+ timeout = phba->fc_ratov * 2;
+ mod_timer(&vport->els_tmofunc,
+ jiffies + msecs_to_jiffies(1000 * timeout));
+ /* Set up heart beat (HB) timer */
+ mod_timer(&phba->hb_tmofunc,
+ jiffies + msecs_to_jiffies(1000 * LPFC_HB_MBOX_INTERVAL));
+ phba->hb_outstanding = 0;
+ phba->last_completion_time = jiffies;
+ /* Set up error attention (ERATT) polling timer */
+ mod_timer(&phba->eratt_poll,
+ jiffies + msecs_to_jiffies(1000 * LPFC_ERATT_POLL_INTERVAL));
+
+ if (phba->hba_flag & LINK_DISABLED) {
+ lpfc_printf_log(phba,
+ KERN_ERR, LOG_INIT,
+ "2598 Adapter Link is disabled.\n");
+ lpfc_down_link(phba, pmb);
+ pmb->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
+ rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT);
+ if ((rc != MBX_SUCCESS) && (rc != MBX_BUSY)) {
+ lpfc_printf_log(phba,
+ KERN_ERR, LOG_INIT,
+ "2599 Adapter failed to issue DOWN_LINK"
+ " mbox command rc 0x%x\n", rc);
+
+ mempool_free(pmb, phba->mbox_mem_pool);
+ return -EIO;
+ }
+ } else if (phba->cfg_suppress_link_up == LPFC_INITIALIZE_LINK) {
+ mempool_free(pmb, phba->mbox_mem_pool);
+ rc = phba->lpfc_hba_init_link(phba, MBX_NOWAIT);
+ if (rc)
+ return rc;
+ }
+ /* MBOX buffer will be freed in mbox compl */
+ pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
+ if (!pmb) {
+ phba->link_state = LPFC_HBA_ERROR;
+ return -ENOMEM;
+ }
+
+ lpfc_config_async(phba, pmb, LPFC_ELS_RING);
+ pmb->mbox_cmpl = lpfc_config_async_cmpl;
+ pmb->vport = phba->pport;
+ rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT);
+
+ if ((rc != MBX_BUSY) && (rc != MBX_SUCCESS)) {
+ lpfc_printf_log(phba,
+ KERN_ERR,
+ LOG_INIT,
+ "0456 Adapter failed to issue "
+ "ASYNCEVT_ENABLE mbox status x%x\n",
+ rc);
+ mempool_free(pmb, phba->mbox_mem_pool);
+ }
+
+ /* Get Option rom version */
+ pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
+ if (!pmb) {
+ phba->link_state = LPFC_HBA_ERROR;
+ return -ENOMEM;
+ }
+
+ lpfc_dump_wakeup_param(phba, pmb);
+ pmb->mbox_cmpl = lpfc_dump_wakeup_param_cmpl;
+ pmb->vport = phba->pport;
+ rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT);
+
+ if ((rc != MBX_BUSY) && (rc != MBX_SUCCESS)) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_INIT, "0435 Adapter failed "
+ "to get Option ROM version status x%x\n", rc);
+ mempool_free(pmb, phba->mbox_mem_pool);
+ }
+
+ return 0;
+}
+
+/**
+ * lpfc_hba_init_link - Initialize the FC link
+ * @phba: pointer to lpfc hba data structure.
+ * @flag: mailbox command issue mode - either MBX_POLL or MBX_NOWAIT
+ *
+ * This routine will issue the INIT_LINK mailbox command call.
+ * It is available to other drivers through the lpfc_hba data
+ * structure for use as a delayed link up mechanism with the
+ * module parameter lpfc_suppress_link_up.
+ *
+ * Return code
+ * 0 - success
+ * Any other value - error
+ **/
+static int
+lpfc_hba_init_link(struct lpfc_hba *phba, uint32_t flag)
+{
+ return lpfc_hba_init_link_fc_topology(phba, phba->cfg_topology, flag);
+}
+
+/**
+ * lpfc_hba_init_link_fc_topology - Initialize FC link with desired topology
+ * @phba: pointer to lpfc hba data structure.
+ * @fc_topology: desired fc topology.
+ * @flag: mailbox command issue mode - either MBX_POLL or MBX_NOWAIT
+ *
+ * This routine will issue the INIT_LINK mailbox command call.
+ * It is available to other drivers through the lpfc_hba data
+ * structure for use as a delayed link up mechanism with the
+ * module parameter lpfc_suppress_link_up.
+ *
+ * Return code
+ * 0 - success
+ * Any other value - error
+ **/
+int
+lpfc_hba_init_link_fc_topology(struct lpfc_hba *phba, uint32_t fc_topology,
+ uint32_t flag)
+{
+ struct lpfc_vport *vport = phba->pport;
+ LPFC_MBOXQ_t *pmb;
+ MAILBOX_t *mb;
+ int rc;
+
+ pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
+ if (!pmb) {
+ phba->link_state = LPFC_HBA_ERROR;
+ return -ENOMEM;
+ }
+ mb = &pmb->u.mb;
+ pmb->vport = vport;
+
+ if ((phba->cfg_link_speed > LPFC_USER_LINK_SPEED_MAX) ||
+ ((phba->cfg_link_speed == LPFC_USER_LINK_SPEED_1G) &&
+ !(phba->lmt & LMT_1Gb)) ||
+ ((phba->cfg_link_speed == LPFC_USER_LINK_SPEED_2G) &&
+ !(phba->lmt & LMT_2Gb)) ||
+ ((phba->cfg_link_speed == LPFC_USER_LINK_SPEED_4G) &&
+ !(phba->lmt & LMT_4Gb)) ||
+ ((phba->cfg_link_speed == LPFC_USER_LINK_SPEED_8G) &&
+ !(phba->lmt & LMT_8Gb)) ||
+ ((phba->cfg_link_speed == LPFC_USER_LINK_SPEED_10G) &&
+ !(phba->lmt & LMT_10Gb)) ||
+ ((phba->cfg_link_speed == LPFC_USER_LINK_SPEED_16G) &&
+ !(phba->lmt & LMT_16Gb))) {
+ /* Reset link speed to auto */
+ lpfc_printf_log(phba, KERN_ERR, LOG_LINK_EVENT,
+ "1302 Invalid speed for this board:%d "
+ "Reset link speed to auto.\n",
+ phba->cfg_link_speed);
+ phba->cfg_link_speed = LPFC_USER_LINK_SPEED_AUTO;
+ }
+ lpfc_init_link(phba, pmb, fc_topology, phba->cfg_link_speed);
+ pmb->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
+ if (phba->sli_rev < LPFC_SLI_REV4)
+ lpfc_set_loopback_flag(phba);
+ rc = lpfc_sli_issue_mbox(phba, pmb, flag);
+ if ((rc != MBX_BUSY) && (rc != MBX_SUCCESS)) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+ "0498 Adapter failed to init, mbxCmd x%x "
+ "INIT_LINK, mbxStatus x%x\n",
+ mb->mbxCommand, mb->mbxStatus);
+ if (phba->sli_rev <= LPFC_SLI_REV3) {
+ /* Clear all interrupt enable conditions */
+ writel(0, phba->HCregaddr);
+ readl(phba->HCregaddr); /* flush */
+ /* Clear all pending interrupts */
+ writel(0xffffffff, phba->HAregaddr);
+ readl(phba->HAregaddr); /* flush */
+ }
+ phba->link_state = LPFC_HBA_ERROR;
+ if (rc != MBX_BUSY || flag == MBX_POLL)
+ mempool_free(pmb, phba->mbox_mem_pool);
+ return -EIO;
+ }
+ phba->cfg_suppress_link_up = LPFC_INITIALIZE_LINK;
+ if (flag == MBX_POLL)
+ mempool_free(pmb, phba->mbox_mem_pool);
+
+ return 0;
+}
+
+/**
+ * lpfc_hba_down_link - this routine downs the FC link
+ * @phba: pointer to lpfc hba data structure.
+ * @flag: mailbox command issue mode - either MBX_POLL or MBX_NOWAIT
+ *
+ * This routine will issue the DOWN_LINK mailbox command call.
+ * It is available to other drivers through the lpfc_hba data
+ * structure for use to stop the link.
+ *
+ * Return code
+ * 0 - success
+ * Any other value - error
+ **/
+static int
+lpfc_hba_down_link(struct lpfc_hba *phba, uint32_t flag)
+{
+ LPFC_MBOXQ_t *pmb;
+ int rc;
+
+ pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
+ if (!pmb) {
+ phba->link_state = LPFC_HBA_ERROR;
+ return -ENOMEM;
+ }
+
+ lpfc_printf_log(phba,
+ KERN_ERR, LOG_INIT,
+ "0491 Adapter Link is disabled.\n");
+ lpfc_down_link(phba, pmb);
+ pmb->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
+ rc = lpfc_sli_issue_mbox(phba, pmb, flag);
+ if ((rc != MBX_SUCCESS) && (rc != MBX_BUSY)) {
+ lpfc_printf_log(phba,
+ KERN_ERR, LOG_INIT,
+ "2522 Adapter failed to issue DOWN_LINK"
+ " mbox command rc 0x%x\n", rc);
+
+ mempool_free(pmb, phba->mbox_mem_pool);
+ return -EIO;
+ }
+ if (flag == MBX_POLL)
+ mempool_free(pmb, phba->mbox_mem_pool);
+
+ return 0;
+}
+
+/**
+ * lpfc_hba_down_prep - Perform lpfc uninitialization prior to HBA reset
+ * @phba: pointer to lpfc HBA data structure.
+ *
+ * This routine will do LPFC uninitialization before the HBA is reset when
+ * bringing down the SLI Layer.
+ *
+ * Return codes
+ * 0 - success.
+ * Any other value - error.
+ **/
+int
+lpfc_hba_down_prep(struct lpfc_hba *phba)
+{
+ struct lpfc_vport **vports;
+ int i;
+
+ if (phba->sli_rev <= LPFC_SLI_REV3) {
+ /* Disable interrupts */
+ writel(0, phba->HCregaddr);
+ readl(phba->HCregaddr); /* flush */
+ }
+
+ if (phba->pport->load_flag & FC_UNLOADING)
+ lpfc_cleanup_discovery_resources(phba->pport);
+ else {
+ vports = lpfc_create_vport_work_array(phba);
+ if (vports != NULL)
+ for (i = 0; i <= phba->max_vports &&
+ vports[i] != NULL; i++)
+ lpfc_cleanup_discovery_resources(vports[i]);
+ lpfc_destroy_vport_work_array(phba, vports);
+ }
+ return 0;
+}
+
+/**
+ * lpfc_sli4_free_sp_events - Cleanup sp_queue_events to free
+ * rspiocb which got deferred
+ *
+ * @phba: pointer to lpfc HBA data structure.
+ *
+ * This routine will cleanup completed slow path events after HBA is reset
+ * when bringing down the SLI Layer.
+ *
+ *
+ * Return codes
+ * void.
+ **/
+static void
+lpfc_sli4_free_sp_events(struct lpfc_hba *phba)
+{
+ struct lpfc_iocbq *rspiocbq;
+ struct hbq_dmabuf *dmabuf;
+ struct lpfc_cq_event *cq_event;
+
+ spin_lock_irq(&phba->hbalock);
+ phba->hba_flag &= ~HBA_SP_QUEUE_EVT;
+ spin_unlock_irq(&phba->hbalock);
+
+ while (!list_empty(&phba->sli4_hba.sp_queue_event)) {
+ /* Get the response iocb from the head of work queue */
+ spin_lock_irq(&phba->hbalock);
+ list_remove_head(&phba->sli4_hba.sp_queue_event,
+ cq_event, struct lpfc_cq_event, list);
+ spin_unlock_irq(&phba->hbalock);
+
+ switch (bf_get(lpfc_wcqe_c_code, &cq_event->cqe.wcqe_cmpl)) {
+ case CQE_CODE_COMPL_WQE:
+ rspiocbq = container_of(cq_event, struct lpfc_iocbq,
+ cq_event);
+ lpfc_sli_release_iocbq(phba, rspiocbq);
+ break;
+ case CQE_CODE_RECEIVE:
+ case CQE_CODE_RECEIVE_V1:
+ dmabuf = container_of(cq_event, struct hbq_dmabuf,
+ cq_event);
+ lpfc_in_buf_free(phba, &dmabuf->dbuf);
+ }
+ }
+}
+
+/**
+ * lpfc_hba_free_post_buf - Perform lpfc uninitialization after HBA reset
+ * @phba: pointer to lpfc HBA data structure.
+ *
+ * This routine will cleanup posted ELS buffers after the HBA is reset
+ * when bringing down the SLI Layer.
+ *
+ *
+ * Return codes
+ * void.
+ **/
+static void
+lpfc_hba_free_post_buf(struct lpfc_hba *phba)
+{
+ struct lpfc_sli *psli = &phba->sli;
+ struct lpfc_sli_ring *pring;
+ struct lpfc_dmabuf *mp, *next_mp;
+ LIST_HEAD(buflist);
+ int count;
+
+ if (phba->sli3_options & LPFC_SLI3_HBQ_ENABLED)
+ lpfc_sli_hbqbuf_free_all(phba);
+ else {
+ /* Cleanup preposted buffers on the ELS ring */
+ pring = &psli->ring[LPFC_ELS_RING];
+ spin_lock_irq(&phba->hbalock);
+ list_splice_init(&pring->postbufq, &buflist);
+ spin_unlock_irq(&phba->hbalock);
+
+ count = 0;
+ list_for_each_entry_safe(mp, next_mp, &buflist, list) {
+ list_del(&mp->list);
+ count++;
+ lpfc_mbuf_free(phba, mp->virt, mp->phys);
+ kfree(mp);
+ }
+
+ spin_lock_irq(&phba->hbalock);
+ pring->postbufq_cnt -= count;
+ spin_unlock_irq(&phba->hbalock);
+ }
+}
+
+/**
+ * lpfc_hba_clean_txcmplq - Perform lpfc uninitialization after HBA reset
+ * @phba: pointer to lpfc HBA data structure.
+ *
+ * This routine will cleanup the txcmplq after the HBA is reset when bringing
+ * down the SLI Layer.
+ *
+ * Return codes
+ * void
+ **/
+static void
+lpfc_hba_clean_txcmplq(struct lpfc_hba *phba)
+{
+ struct lpfc_sli *psli = &phba->sli;
+ struct lpfc_sli_ring *pring;
+ LIST_HEAD(completions);
+ int i;
+
+ for (i = 0; i < psli->num_rings; i++) {
+ pring = &psli->ring[i];
+ if (phba->sli_rev >= LPFC_SLI_REV4)
+ spin_lock_irq(&pring->ring_lock);
+ else
+ spin_lock_irq(&phba->hbalock);
+ /* At this point in time the HBA is either reset or DOA. Either
+ * way, nothing should be on txcmplq as it will NEVER complete.
+ */
+ list_splice_init(&pring->txcmplq, &completions);
+ pring->txcmplq_cnt = 0;
+
+ if (phba->sli_rev >= LPFC_SLI_REV4)
+ spin_unlock_irq(&pring->ring_lock);
+ else
+ spin_unlock_irq(&phba->hbalock);
+
+ /* Cancel all the IOCBs from the completions list */
+ lpfc_sli_cancel_iocbs(phba, &completions, IOSTAT_LOCAL_REJECT,
+ IOERR_SLI_ABORTED);
+ lpfc_sli_abort_iocb_ring(phba, pring);
+ }
+}
+
+/**
+ * lpfc_hba_down_post_s3 - Perform lpfc uninitialization after HBA reset
+ int i;
+ * @phba: pointer to lpfc HBA data structure.
+ *
+ * This routine will do uninitialization after the HBA is reset when bring
+ * down the SLI Layer.
+ *
+ * Return codes
+ * 0 - success.
+ * Any other value - error.
+ **/
+static int
+lpfc_hba_down_post_s3(struct lpfc_hba *phba)
+{
+ lpfc_hba_free_post_buf(phba);
+ lpfc_hba_clean_txcmplq(phba);
+ return 0;
+}
+
+/**
+ * lpfc_hba_down_post_s4 - Perform lpfc uninitialization after HBA reset
+ * @phba: pointer to lpfc HBA data structure.
+ *
+ * This routine will do uninitialization after the HBA is reset when bring
+ * down the SLI Layer.
+ *
+ * Return codes
+ * 0 - success.
+ * Any other value - error.
+ **/
+static int
+lpfc_hba_down_post_s4(struct lpfc_hba *phba)
+{
+ struct lpfc_scsi_buf *psb, *psb_next;
+ LIST_HEAD(aborts);
+ unsigned long iflag = 0;
+ struct lpfc_sglq *sglq_entry = NULL;
+ struct lpfc_sli *psli = &phba->sli;
+ struct lpfc_sli_ring *pring;
+
+ lpfc_hba_free_post_buf(phba);
+ lpfc_hba_clean_txcmplq(phba);
+ pring = &psli->ring[LPFC_ELS_RING];
+
+ /* At this point in time the HBA is either reset or DOA. Either
+ * way, nothing should be on lpfc_abts_els_sgl_list, it needs to be
+ * on the lpfc_sgl_list so that it can either be freed if the
+ * driver is unloading or reposted if the driver is restarting
+ * the port.
+ */
+ spin_lock_irq(&phba->hbalock); /* required for lpfc_sgl_list and */
+ /* scsl_buf_list */
+ /* abts_sgl_list_lock required because worker thread uses this
+ * list.
+ */
+ spin_lock(&phba->sli4_hba.abts_sgl_list_lock);
+ list_for_each_entry(sglq_entry,
+ &phba->sli4_hba.lpfc_abts_els_sgl_list, list)
+ sglq_entry->state = SGL_FREED;
+
+ spin_lock(&pring->ring_lock);
+ list_splice_init(&phba->sli4_hba.lpfc_abts_els_sgl_list,
+ &phba->sli4_hba.lpfc_sgl_list);
+ spin_unlock(&pring->ring_lock);
+ spin_unlock(&phba->sli4_hba.abts_sgl_list_lock);
+ /* abts_scsi_buf_list_lock required because worker thread uses this
+ * list.
+ */
+ spin_lock(&phba->sli4_hba.abts_scsi_buf_list_lock);
+ list_splice_init(&phba->sli4_hba.lpfc_abts_scsi_buf_list,
+ &aborts);
+ spin_unlock(&phba->sli4_hba.abts_scsi_buf_list_lock);
+ spin_unlock_irq(&phba->hbalock);
+
+ list_for_each_entry_safe(psb, psb_next, &aborts, list) {
+ psb->pCmd = NULL;
+ psb->status = IOSTAT_SUCCESS;
+ }
+ spin_lock_irqsave(&phba->scsi_buf_list_put_lock, iflag);
+ list_splice(&aborts, &phba->lpfc_scsi_buf_list_put);
+ spin_unlock_irqrestore(&phba->scsi_buf_list_put_lock, iflag);
+
+ lpfc_sli4_free_sp_events(phba);
+ return 0;
+}
+
+/**
+ * lpfc_hba_down_post - Wrapper func for hba down post routine
+ * @phba: pointer to lpfc HBA data structure.
+ *
+ * This routine wraps the actual SLI3 or SLI4 routine for performing
+ * uninitialization after the HBA is reset when bring down the SLI Layer.
+ *
+ * Return codes
+ * 0 - success.
+ * Any other value - error.
+ **/
+int
+lpfc_hba_down_post(struct lpfc_hba *phba)
+{
+ return (*phba->lpfc_hba_down_post)(phba);
+}
+
+/**
+ * lpfc_hb_timeout - The HBA-timer timeout handler
+ * @ptr: unsigned long holds the pointer to lpfc hba data structure.
+ *
+ * This is the HBA-timer timeout handler registered to the lpfc driver. When
+ * this timer fires, a HBA timeout event shall be posted to the lpfc driver
+ * work-port-events bitmap and the worker thread is notified. This timeout
+ * event will be used by the worker thread to invoke the actual timeout
+ * handler routine, lpfc_hb_timeout_handler. Any periodical operations will
+ * be performed in the timeout handler and the HBA timeout event bit shall
+ * be cleared by the worker thread after it has taken the event bitmap out.
+ **/
+static void
+lpfc_hb_timeout(unsigned long ptr)
+{
+ struct lpfc_hba *phba;
+ uint32_t tmo_posted;
+ unsigned long iflag;
+
+ phba = (struct lpfc_hba *)ptr;
+
+ /* Check for heart beat timeout conditions */
+ spin_lock_irqsave(&phba->pport->work_port_lock, iflag);
+ tmo_posted = phba->pport->work_port_events & WORKER_HB_TMO;
+ if (!tmo_posted)
+ phba->pport->work_port_events |= WORKER_HB_TMO;
+ spin_unlock_irqrestore(&phba->pport->work_port_lock, iflag);
+
+ /* Tell the worker thread there is work to do */
+ if (!tmo_posted)
+ lpfc_worker_wake_up(phba);
+ return;
+}
+
+/**
+ * lpfc_rrq_timeout - The RRQ-timer timeout handler
+ * @ptr: unsigned long holds the pointer to lpfc hba data structure.
+ *
+ * This is the RRQ-timer timeout handler registered to the lpfc driver. When
+ * this timer fires, a RRQ timeout event shall be posted to the lpfc driver
+ * work-port-events bitmap and the worker thread is notified. This timeout
+ * event will be used by the worker thread to invoke the actual timeout
+ * handler routine, lpfc_rrq_handler. Any periodical operations will
+ * be performed in the timeout handler and the RRQ timeout event bit shall
+ * be cleared by the worker thread after it has taken the event bitmap out.
+ **/
+static void
+lpfc_rrq_timeout(unsigned long ptr)
+{
+ struct lpfc_hba *phba;
+ unsigned long iflag;
+
+ phba = (struct lpfc_hba *)ptr;
+ spin_lock_irqsave(&phba->pport->work_port_lock, iflag);
+ if (!(phba->pport->load_flag & FC_UNLOADING))
+ phba->hba_flag |= HBA_RRQ_ACTIVE;
+ else
+ phba->hba_flag &= ~HBA_RRQ_ACTIVE;
+ spin_unlock_irqrestore(&phba->pport->work_port_lock, iflag);
+
+ if (!(phba->pport->load_flag & FC_UNLOADING))
+ lpfc_worker_wake_up(phba);
+}
+
+/**
+ * lpfc_hb_mbox_cmpl - The lpfc heart-beat mailbox command callback function
+ * @phba: pointer to lpfc hba data structure.
+ * @pmboxq: pointer to the driver internal queue element for mailbox command.
+ *
+ * This is the callback function to the lpfc heart-beat mailbox command.
+ * If configured, the lpfc driver issues the heart-beat mailbox command to
+ * the HBA every LPFC_HB_MBOX_INTERVAL (current 5) seconds. At the time the
+ * heart-beat mailbox command is issued, the driver shall set up heart-beat
+ * timeout timer to LPFC_HB_MBOX_TIMEOUT (current 30) seconds and marks
+ * heart-beat outstanding state. Once the mailbox command comes back and
+ * no error conditions detected, the heart-beat mailbox command timer is
+ * reset to LPFC_HB_MBOX_INTERVAL seconds and the heart-beat outstanding
+ * state is cleared for the next heart-beat. If the timer expired with the
+ * heart-beat outstanding state set, the driver will put the HBA offline.
+ **/
+static void
+lpfc_hb_mbox_cmpl(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmboxq)
+{
+ unsigned long drvr_flag;
+
+ spin_lock_irqsave(&phba->hbalock, drvr_flag);
+ phba->hb_outstanding = 0;
+ spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
+
+ /* Check and reset heart-beat timer is necessary */
+ mempool_free(pmboxq, phba->mbox_mem_pool);
+ if (!(phba->pport->fc_flag & FC_OFFLINE_MODE) &&
+ !(phba->link_state == LPFC_HBA_ERROR) &&
+ !(phba->pport->load_flag & FC_UNLOADING))
+ mod_timer(&phba->hb_tmofunc,
+ jiffies +
+ msecs_to_jiffies(1000 * LPFC_HB_MBOX_INTERVAL));
+ return;
+}
+
+/**
+ * lpfc_hb_timeout_handler - The HBA-timer timeout handler
+ * @phba: pointer to lpfc hba data structure.
+ *
+ * This is the actual HBA-timer timeout handler to be invoked by the worker
+ * thread whenever the HBA timer fired and HBA-timeout event posted. This
+ * handler performs any periodic operations needed for the device. If such
+ * periodic event has already been attended to either in the interrupt handler
+ * or by processing slow-ring or fast-ring events within the HBA-timer
+ * timeout window (LPFC_HB_MBOX_INTERVAL), this handler just simply resets
+ * the timer for the next timeout period. If lpfc heart-beat mailbox command
+ * is configured and there is no heart-beat mailbox command outstanding, a
+ * heart-beat mailbox is issued and timer set properly. Otherwise, if there
+ * has been a heart-beat mailbox command outstanding, the HBA shall be put
+ * to offline.
+ **/
+void
+lpfc_hb_timeout_handler(struct lpfc_hba *phba)
+{
+ struct lpfc_vport **vports;
+ LPFC_MBOXQ_t *pmboxq;
+ struct lpfc_dmabuf *buf_ptr;
+ int retval, i;
+ struct lpfc_sli *psli = &phba->sli;
+ LIST_HEAD(completions);
+
+ vports = lpfc_create_vport_work_array(phba);
+ if (vports != NULL)
+ for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++)
+ lpfc_rcv_seq_check_edtov(vports[i]);
+ lpfc_destroy_vport_work_array(phba, vports);
+
+ if ((phba->link_state == LPFC_HBA_ERROR) ||
+ (phba->pport->load_flag & FC_UNLOADING) ||
+ (phba->pport->fc_flag & FC_OFFLINE_MODE))
+ return;
+
+ spin_lock_irq(&phba->pport->work_port_lock);
+
+ if (time_after(phba->last_completion_time +
+ msecs_to_jiffies(1000 * LPFC_HB_MBOX_INTERVAL),
+ jiffies)) {
+ spin_unlock_irq(&phba->pport->work_port_lock);
+ if (!phba->hb_outstanding)
+ mod_timer(&phba->hb_tmofunc,
+ jiffies +
+ msecs_to_jiffies(1000 * LPFC_HB_MBOX_INTERVAL));
+ else
+ mod_timer(&phba->hb_tmofunc,
+ jiffies +
+ msecs_to_jiffies(1000 * LPFC_HB_MBOX_TIMEOUT));
+ return;
+ }
+ spin_unlock_irq(&phba->pport->work_port_lock);
+
+ if (phba->elsbuf_cnt &&
+ (phba->elsbuf_cnt == phba->elsbuf_prev_cnt)) {
+ spin_lock_irq(&phba->hbalock);
+ list_splice_init(&phba->elsbuf, &completions);
+ phba->elsbuf_cnt = 0;
+ phba->elsbuf_prev_cnt = 0;
+ spin_unlock_irq(&phba->hbalock);
+
+ while (!list_empty(&completions)) {
+ list_remove_head(&completions, buf_ptr,
+ struct lpfc_dmabuf, list);
+ lpfc_mbuf_free(phba, buf_ptr->virt, buf_ptr->phys);
+ kfree(buf_ptr);
+ }
+ }
+ phba->elsbuf_prev_cnt = phba->elsbuf_cnt;
+
+ /* If there is no heart beat outstanding, issue a heartbeat command */
+ if (phba->cfg_enable_hba_heartbeat) {
+ if (!phba->hb_outstanding) {
+ if ((!(psli->sli_flag & LPFC_SLI_MBOX_ACTIVE)) &&
+ (list_empty(&psli->mboxq))) {
+ pmboxq = mempool_alloc(phba->mbox_mem_pool,
+ GFP_KERNEL);
+ if (!pmboxq) {
+ mod_timer(&phba->hb_tmofunc,
+ jiffies +
+ msecs_to_jiffies(1000 *
+ LPFC_HB_MBOX_INTERVAL));
+ return;
+ }
+
+ lpfc_heart_beat(phba, pmboxq);
+ pmboxq->mbox_cmpl = lpfc_hb_mbox_cmpl;
+ pmboxq->vport = phba->pport;
+ retval = lpfc_sli_issue_mbox(phba, pmboxq,
+ MBX_NOWAIT);
+
+ if (retval != MBX_BUSY &&
+ retval != MBX_SUCCESS) {
+ mempool_free(pmboxq,
+ phba->mbox_mem_pool);
+ mod_timer(&phba->hb_tmofunc,
+ jiffies +
+ msecs_to_jiffies(1000 *
+ LPFC_HB_MBOX_INTERVAL));
+ return;
+ }
+ phba->skipped_hb = 0;
+ phba->hb_outstanding = 1;
+ } else if (time_before_eq(phba->last_completion_time,
+ phba->skipped_hb)) {
+ lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
+ "2857 Last completion time not "
+ " updated in %d ms\n",
+ jiffies_to_msecs(jiffies
+ - phba->last_completion_time));
+ } else
+ phba->skipped_hb = jiffies;
+
+ mod_timer(&phba->hb_tmofunc,
+ jiffies +
+ msecs_to_jiffies(1000 * LPFC_HB_MBOX_TIMEOUT));
+ return;
+ } else {
+ /*
+ * If heart beat timeout called with hb_outstanding set
+ * we need to give the hb mailbox cmd a chance to
+ * complete or TMO.
+ */
+ lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
+ "0459 Adapter heartbeat still out"
+ "standing:last compl time was %d ms.\n",
+ jiffies_to_msecs(jiffies
+ - phba->last_completion_time));
+ mod_timer(&phba->hb_tmofunc,
+ jiffies +
+ msecs_to_jiffies(1000 * LPFC_HB_MBOX_TIMEOUT));
+ }
+ }
+}
+
+/**
+ * lpfc_offline_eratt - Bring lpfc offline on hardware error attention
+ * @phba: pointer to lpfc hba data structure.
+ *
+ * This routine is called to bring the HBA offline when HBA hardware error
+ * other than Port Error 6 has been detected.
+ **/
+static void
+lpfc_offline_eratt(struct lpfc_hba *phba)
+{
+ struct lpfc_sli *psli = &phba->sli;
+
+ spin_lock_irq(&phba->hbalock);
+ psli->sli_flag &= ~LPFC_SLI_ACTIVE;
+ spin_unlock_irq(&phba->hbalock);
+ lpfc_offline_prep(phba, LPFC_MBX_NO_WAIT);
+
+ lpfc_offline(phba);
+ lpfc_reset_barrier(phba);
+ spin_lock_irq(&phba->hbalock);
+ lpfc_sli_brdreset(phba);
+ spin_unlock_irq(&phba->hbalock);
+ lpfc_hba_down_post(phba);
+ lpfc_sli_brdready(phba, HS_MBRDY);
+ lpfc_unblock_mgmt_io(phba);
+ phba->link_state = LPFC_HBA_ERROR;
+ return;
+}
+
+/**
+ * lpfc_sli4_offline_eratt - Bring lpfc offline on SLI4 hardware error attention
+ * @phba: pointer to lpfc hba data structure.
+ *
+ * This routine is called to bring a SLI4 HBA offline when HBA hardware error
+ * other than Port Error 6 has been detected.
+ **/
+void
+lpfc_sli4_offline_eratt(struct lpfc_hba *phba)
+{
+ spin_lock_irq(&phba->hbalock);
+ phba->link_state = LPFC_HBA_ERROR;
+ spin_unlock_irq(&phba->hbalock);
+
+ lpfc_offline_prep(phba, LPFC_MBX_NO_WAIT);
+ lpfc_offline(phba);
+ lpfc_hba_down_post(phba);
+ lpfc_unblock_mgmt_io(phba);
+}
+
+/**
+ * lpfc_handle_deferred_eratt - The HBA hardware deferred error handler
+ * @phba: pointer to lpfc hba data structure.
+ *
+ * This routine is invoked to handle the deferred HBA hardware error
+ * conditions. This type of error is indicated by HBA by setting ER1
+ * and another ER bit in the host status register. The driver will
+ * wait until the ER1 bit clears before handling the error condition.
+ **/
+static void
+lpfc_handle_deferred_eratt(struct lpfc_hba *phba)
+{
+ uint32_t old_host_status = phba->work_hs;
+ struct lpfc_sli *psli = &phba->sli;
+
+ /* If the pci channel is offline, ignore possible errors,
+ * since we cannot communicate with the pci card anyway.
+ */
+ if (pci_channel_offline(phba->pcidev)) {
+ spin_lock_irq(&phba->hbalock);
+ phba->hba_flag &= ~DEFER_ERATT;
+ spin_unlock_irq(&phba->hbalock);
+ return;
+ }
+
+ lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+ "0479 Deferred Adapter Hardware Error "
+ "Data: x%x x%x x%x\n",
+ phba->work_hs,
+ phba->work_status[0], phba->work_status[1]);
+
+ spin_lock_irq(&phba->hbalock);
+ psli->sli_flag &= ~LPFC_SLI_ACTIVE;
+ spin_unlock_irq(&phba->hbalock);
+
+
+ /*
+ * Firmware stops when it triggred erratt. That could cause the I/Os
+ * dropped by the firmware. Error iocb (I/O) on txcmplq and let the
+ * SCSI layer retry it after re-establishing link.
+ */
+ lpfc_sli_abort_fcp_rings(phba);
+
+ /*
+ * There was a firmware error. Take the hba offline and then
+ * attempt to restart it.
+ */
+ lpfc_offline_prep(phba, LPFC_MBX_WAIT);
+ lpfc_offline(phba);
+
+ /* Wait for the ER1 bit to clear.*/
+ while (phba->work_hs & HS_FFER1) {
+ msleep(100);
+ if (lpfc_readl(phba->HSregaddr, &phba->work_hs)) {
+ phba->work_hs = UNPLUG_ERR ;
+ break;
+ }
+ /* If driver is unloading let the worker thread continue */
+ if (phba->pport->load_flag & FC_UNLOADING) {
+ phba->work_hs = 0;
+ break;
+ }
+ }
+
+ /*
+ * This is to ptrotect against a race condition in which
+ * first write to the host attention register clear the
+ * host status register.
+ */
+ if ((!phba->work_hs) && (!(phba->pport->load_flag & FC_UNLOADING)))
+ phba->work_hs = old_host_status & ~HS_FFER1;
+
+ spin_lock_irq(&phba->hbalock);
+ phba->hba_flag &= ~DEFER_ERATT;
+ spin_unlock_irq(&phba->hbalock);
+ phba->work_status[0] = readl(phba->MBslimaddr + 0xa8);
+ phba->work_status[1] = readl(phba->MBslimaddr + 0xac);
+}
+
+static void
+lpfc_board_errevt_to_mgmt(struct lpfc_hba *phba)
+{
+ struct lpfc_board_event_header board_event;
+ struct Scsi_Host *shost;
+
+ board_event.event_type = FC_REG_BOARD_EVENT;
+ board_event.subcategory = LPFC_EVENT_PORTINTERR;
+ shost = lpfc_shost_from_vport(phba->pport);
+ fc_host_post_vendor_event(shost, fc_get_event_number(),
+ sizeof(board_event),
+ (char *) &board_event,
+ LPFC_NL_VENDOR_ID);
+}
+
+/**
+ * lpfc_handle_eratt_s3 - The SLI3 HBA hardware error handler
+ * @phba: pointer to lpfc hba data structure.
+ *
+ * This routine is invoked to handle the following HBA hardware error
+ * conditions:
+ * 1 - HBA error attention interrupt
+ * 2 - DMA ring index out of range
+ * 3 - Mailbox command came back as unknown
+ **/
+static void
+lpfc_handle_eratt_s3(struct lpfc_hba *phba)
+{
+ struct lpfc_vport *vport = phba->pport;
+ struct lpfc_sli *psli = &phba->sli;
+ uint32_t event_data;
+ unsigned long temperature;
+ struct temp_event temp_event_data;
+ struct Scsi_Host *shost;
+
+ /* If the pci channel is offline, ignore possible errors,
+ * since we cannot communicate with the pci card anyway.
+ */
+ if (pci_channel_offline(phba->pcidev)) {
+ spin_lock_irq(&phba->hbalock);
+ phba->hba_flag &= ~DEFER_ERATT;
+ spin_unlock_irq(&phba->hbalock);
+ return;
+ }
+
+ /* If resets are disabled then leave the HBA alone and return */
+ if (!phba->cfg_enable_hba_reset)
+ return;
+
+ /* Send an internal error event to mgmt application */
+ lpfc_board_errevt_to_mgmt(phba);
+
+ if (phba->hba_flag & DEFER_ERATT)
+ lpfc_handle_deferred_eratt(phba);
+
+ if ((phba->work_hs & HS_FFER6) || (phba->work_hs & HS_FFER8)) {
+ if (phba->work_hs & HS_FFER6)
+ /* Re-establishing Link */
+ lpfc_printf_log(phba, KERN_INFO, LOG_LINK_EVENT,
+ "1301 Re-establishing Link "
+ "Data: x%x x%x x%x\n",
+ phba->work_hs, phba->work_status[0],
+ phba->work_status[1]);
+ if (phba->work_hs & HS_FFER8)
+ /* Device Zeroization */
+ lpfc_printf_log(phba, KERN_INFO, LOG_LINK_EVENT,
+ "2861 Host Authentication device "
+ "zeroization Data:x%x x%x x%x\n",
+ phba->work_hs, phba->work_status[0],
+ phba->work_status[1]);
+
+ spin_lock_irq(&phba->hbalock);
+ psli->sli_flag &= ~LPFC_SLI_ACTIVE;
+ spin_unlock_irq(&phba->hbalock);
+
+ /*
+ * Firmware stops when it triggled erratt with HS_FFER6.
+ * That could cause the I/Os dropped by the firmware.
+ * Error iocb (I/O) on txcmplq and let the SCSI layer
+ * retry it after re-establishing link.
+ */
+ lpfc_sli_abort_fcp_rings(phba);
+
+ /*
+ * There was a firmware error. Take the hba offline and then
+ * attempt to restart it.
+ */
+ lpfc_offline_prep(phba, LPFC_MBX_NO_WAIT);
+ lpfc_offline(phba);
+ lpfc_sli_brdrestart(phba);
+ if (lpfc_online(phba) == 0) { /* Initialize the HBA */
+ lpfc_unblock_mgmt_io(phba);
+ return;
+ }
+ lpfc_unblock_mgmt_io(phba);
+ } else if (phba->work_hs & HS_CRIT_TEMP) {
+ temperature = readl(phba->MBslimaddr + TEMPERATURE_OFFSET);
+ temp_event_data.event_type = FC_REG_TEMPERATURE_EVENT;
+ temp_event_data.event_code = LPFC_CRIT_TEMP;
+ temp_event_data.data = (uint32_t)temperature;
+
+ lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+ "0406 Adapter maximum temperature exceeded "
+ "(%ld), taking this port offline "
+ "Data: x%x x%x x%x\n",
+ temperature, phba->work_hs,
+ phba->work_status[0], phba->work_status[1]);
+
+ shost = lpfc_shost_from_vport(phba->pport);
+ fc_host_post_vendor_event(shost, fc_get_event_number(),
+ sizeof(temp_event_data),
+ (char *) &temp_event_data,
+ SCSI_NL_VID_TYPE_PCI
+ | PCI_VENDOR_ID_EMULEX);
+
+ spin_lock_irq(&phba->hbalock);
+ phba->over_temp_state = HBA_OVER_TEMP;
+ spin_unlock_irq(&phba->hbalock);
+ lpfc_offline_eratt(phba);
+
+ } else {
+ /* The if clause above forces this code path when the status
+ * failure is a value other than FFER6. Do not call the offline
+ * twice. This is the adapter hardware error path.
+ */
+ lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+ "0457 Adapter Hardware Error "
+ "Data: x%x x%x x%x\n",
+ phba->work_hs,
+ phba->work_status[0], phba->work_status[1]);
+
+ event_data = FC_REG_DUMP_EVENT;
+ shost = lpfc_shost_from_vport(vport);
+ fc_host_post_vendor_event(shost, fc_get_event_number(),
+ sizeof(event_data), (char *) &event_data,
+ SCSI_NL_VID_TYPE_PCI | PCI_VENDOR_ID_EMULEX);
+
+ lpfc_offline_eratt(phba);
+ }
+ return;
+}
+
+/**
+ * lpfc_sli4_port_sta_fn_reset - The SLI4 function reset due to port status reg
+ * @phba: pointer to lpfc hba data structure.
+ * @mbx_action: flag for mailbox shutdown action.
+ *
+ * This routine is invoked to perform an SLI4 port PCI function reset in
+ * response to port status register polling attention. It waits for port
+ * status register (ERR, RDY, RN) bits before proceeding with function reset.
+ * During this process, interrupt vectors are freed and later requested
+ * for handling possible port resource change.
+ **/
+static int
+lpfc_sli4_port_sta_fn_reset(struct lpfc_hba *phba, int mbx_action,
+ bool en_rn_msg)
+{
+ int rc;
+ uint32_t intr_mode;
+
+ /*
+ * On error status condition, driver need to wait for port
+ * ready before performing reset.
+ */
+ rc = lpfc_sli4_pdev_status_reg_wait(phba);
+ if (!rc) {
+ /* need reset: attempt for port recovery */
+ if (en_rn_msg)
+ lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+ "2887 Reset Needed: Attempting Port "
+ "Recovery...\n");
+ lpfc_offline_prep(phba, mbx_action);
+ lpfc_offline(phba);
+ /* release interrupt for possible resource change */
+ lpfc_sli4_disable_intr(phba);
+ lpfc_sli_brdrestart(phba);
+ /* request and enable interrupt */
+ intr_mode = lpfc_sli4_enable_intr(phba, phba->intr_mode);
+ if (intr_mode == LPFC_INTR_ERROR) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+ "3175 Failed to enable interrupt\n");
+ return -EIO;
+ } else {
+ phba->intr_mode = intr_mode;
+ }
+ rc = lpfc_online(phba);
+ if (rc == 0)
+ lpfc_unblock_mgmt_io(phba);
+ }
+ return rc;
+}
+
+/**
+ * lpfc_handle_eratt_s4 - The SLI4 HBA hardware error handler
+ * @phba: pointer to lpfc hba data structure.
+ *
+ * This routine is invoked to handle the SLI4 HBA hardware error attention
+ * conditions.
+ **/
+static void
+lpfc_handle_eratt_s4(struct lpfc_hba *phba)
+{
+ struct lpfc_vport *vport = phba->pport;
+ uint32_t event_data;
+ struct Scsi_Host *shost;
+ uint32_t if_type;
+ struct lpfc_register portstat_reg = {0};
+ uint32_t reg_err1, reg_err2;
+ uint32_t uerrlo_reg, uemasklo_reg;
+ uint32_t pci_rd_rc1, pci_rd_rc2;
+ bool en_rn_msg = true;
+ struct temp_event temp_event_data;
+ int rc;
+
+ /* If the pci channel is offline, ignore possible errors, since
+ * we cannot communicate with the pci card anyway.
+ */
+ if (pci_channel_offline(phba->pcidev))
+ return;
+
+ if_type = bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf);
+ switch (if_type) {
+ case LPFC_SLI_INTF_IF_TYPE_0:
+ pci_rd_rc1 = lpfc_readl(
+ phba->sli4_hba.u.if_type0.UERRLOregaddr,
+ &uerrlo_reg);
+ pci_rd_rc2 = lpfc_readl(
+ phba->sli4_hba.u.if_type0.UEMASKLOregaddr,
+ &uemasklo_reg);
+ /* consider PCI bus read error as pci_channel_offline */
+ if (pci_rd_rc1 == -EIO && pci_rd_rc2 == -EIO)
+ return;
+ lpfc_sli4_offline_eratt(phba);
+ break;
+
+ case LPFC_SLI_INTF_IF_TYPE_2:
+ pci_rd_rc1 = lpfc_readl(
+ phba->sli4_hba.u.if_type2.STATUSregaddr,
+ &portstat_reg.word0);
+ /* consider PCI bus read error as pci_channel_offline */
+ if (pci_rd_rc1 == -EIO) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+ "3151 PCI bus read access failure: x%x\n",
+ readl(phba->sli4_hba.u.if_type2.STATUSregaddr));
+ return;
+ }
+ reg_err1 = readl(phba->sli4_hba.u.if_type2.ERR1regaddr);
+ reg_err2 = readl(phba->sli4_hba.u.if_type2.ERR2regaddr);
+ if (bf_get(lpfc_sliport_status_oti, &portstat_reg)) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+ "2889 Port Overtemperature event, "
+ "taking port offline Data: x%x x%x\n",
+ reg_err1, reg_err2);
+
+ temp_event_data.event_type = FC_REG_TEMPERATURE_EVENT;
+ temp_event_data.event_code = LPFC_CRIT_TEMP;
+ temp_event_data.data = 0xFFFFFFFF;
+
+ shost = lpfc_shost_from_vport(phba->pport);
+ fc_host_post_vendor_event(shost, fc_get_event_number(),
+ sizeof(temp_event_data),
+ (char *)&temp_event_data,
+ SCSI_NL_VID_TYPE_PCI
+ | PCI_VENDOR_ID_EMULEX);
+
+ spin_lock_irq(&phba->hbalock);
+ phba->over_temp_state = HBA_OVER_TEMP;
+ spin_unlock_irq(&phba->hbalock);
+ lpfc_sli4_offline_eratt(phba);
+ return;
+ }
+ if (reg_err1 == SLIPORT_ERR1_REG_ERR_CODE_2 &&
+ reg_err2 == SLIPORT_ERR2_REG_FW_RESTART) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+ "3143 Port Down: Firmware Update "
+ "Detected\n");
+ en_rn_msg = false;
+ } else if (reg_err1 == SLIPORT_ERR1_REG_ERR_CODE_2 &&
+ reg_err2 == SLIPORT_ERR2_REG_FORCED_DUMP)
+ lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+ "3144 Port Down: Debug Dump\n");
+ else if (reg_err1 == SLIPORT_ERR1_REG_ERR_CODE_2 &&
+ reg_err2 == SLIPORT_ERR2_REG_FUNC_PROVISON)
+ lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+ "3145 Port Down: Provisioning\n");
+
+ /* If resets are disabled then leave the HBA alone and return */
+ if (!phba->cfg_enable_hba_reset)
+ return;
+
+ /* Check port status register for function reset */
+ rc = lpfc_sli4_port_sta_fn_reset(phba, LPFC_MBX_NO_WAIT,
+ en_rn_msg);
+ if (rc == 0) {
+ /* don't report event on forced debug dump */
+ if (reg_err1 == SLIPORT_ERR1_REG_ERR_CODE_2 &&
+ reg_err2 == SLIPORT_ERR2_REG_FORCED_DUMP)
+ return;
+ else
+ break;
+ }
+ /* fall through for not able to recover */
+ lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+ "3152 Unrecoverable error, bring the port "
+ "offline\n");
+ lpfc_sli4_offline_eratt(phba);
+ break;
+ case LPFC_SLI_INTF_IF_TYPE_1:
+ default:
+ break;
+ }
+ lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
+ "3123 Report dump event to upper layer\n");
+ /* Send an internal error event to mgmt application */
+ lpfc_board_errevt_to_mgmt(phba);
+
+ event_data = FC_REG_DUMP_EVENT;
+ shost = lpfc_shost_from_vport(vport);
+ fc_host_post_vendor_event(shost, fc_get_event_number(),
+ sizeof(event_data), (char *) &event_data,
+ SCSI_NL_VID_TYPE_PCI | PCI_VENDOR_ID_EMULEX);
+}
+
+/**
+ * lpfc_handle_eratt - Wrapper func for handling hba error attention
+ * @phba: pointer to lpfc HBA data structure.
+ *
+ * This routine wraps the actual SLI3 or SLI4 hba error attention handling
+ * routine from the API jump table function pointer from the lpfc_hba struct.
+ *
+ * Return codes
+ * 0 - success.
+ * Any other value - error.
+ **/
+void
+lpfc_handle_eratt(struct lpfc_hba *phba)
+{
+ (*phba->lpfc_handle_eratt)(phba);
+}
+
+/**
+ * lpfc_handle_latt - The HBA link event handler
+ * @phba: pointer to lpfc hba data structure.
+ *
+ * This routine is invoked from the worker thread to handle a HBA host
+ * attention link event.
+ **/
+void
+lpfc_handle_latt(struct lpfc_hba *phba)
+{
+ struct lpfc_vport *vport = phba->pport;
+ struct lpfc_sli *psli = &phba->sli;
+ LPFC_MBOXQ_t *pmb;
+ volatile uint32_t control;
+ struct lpfc_dmabuf *mp;
+ int rc = 0;
+
+ pmb = (LPFC_MBOXQ_t *)mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
+ if (!pmb) {
+ rc = 1;
+ goto lpfc_handle_latt_err_exit;
+ }
+
+ mp = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
+ if (!mp) {
+ rc = 2;
+ goto lpfc_handle_latt_free_pmb;
+ }
+
+ mp->virt = lpfc_mbuf_alloc(phba, 0, &mp->phys);
+ if (!mp->virt) {
+ rc = 3;
+ goto lpfc_handle_latt_free_mp;
+ }
+
+ /* Cleanup any outstanding ELS commands */
+ lpfc_els_flush_all_cmd(phba);
+
+ psli->slistat.link_event++;
+ lpfc_read_topology(phba, pmb, mp);
+ pmb->mbox_cmpl = lpfc_mbx_cmpl_read_topology;
+ pmb->vport = vport;
+ /* Block ELS IOCBs until we have processed this mbox command */
+ phba->sli.ring[LPFC_ELS_RING].flag |= LPFC_STOP_IOCB_EVENT;
+ rc = lpfc_sli_issue_mbox (phba, pmb, MBX_NOWAIT);
+ if (rc == MBX_NOT_FINISHED) {
+ rc = 4;
+ goto lpfc_handle_latt_free_mbuf;
+ }
+
+ /* Clear Link Attention in HA REG */
+ spin_lock_irq(&phba->hbalock);
+ writel(HA_LATT, phba->HAregaddr);
+ readl(phba->HAregaddr); /* flush */
+ spin_unlock_irq(&phba->hbalock);
+
+ return;
+
+lpfc_handle_latt_free_mbuf:
+ phba->sli.ring[LPFC_ELS_RING].flag &= ~LPFC_STOP_IOCB_EVENT;
+ lpfc_mbuf_free(phba, mp->virt, mp->phys);
+lpfc_handle_latt_free_mp:
+ kfree(mp);
+lpfc_handle_latt_free_pmb:
+ mempool_free(pmb, phba->mbox_mem_pool);
+lpfc_handle_latt_err_exit:
+ /* Enable Link attention interrupts */
+ spin_lock_irq(&phba->hbalock);
+ psli->sli_flag |= LPFC_PROCESS_LA;
+ control = readl(phba->HCregaddr);
+ control |= HC_LAINT_ENA;
+ writel(control, phba->HCregaddr);
+ readl(phba->HCregaddr); /* flush */
+
+ /* Clear Link Attention in HA REG */
+ writel(HA_LATT, phba->HAregaddr);
+ readl(phba->HAregaddr); /* flush */
+ spin_unlock_irq(&phba->hbalock);
+ lpfc_linkdown(phba);
+ phba->link_state = LPFC_HBA_ERROR;
+
+ lpfc_printf_log(phba, KERN_ERR, LOG_MBOX,
+ "0300 LATT: Cannot issue READ_LA: Data:%d\n", rc);
+
+ return;
+}
+
+/**
+ * lpfc_parse_vpd - Parse VPD (Vital Product Data)
+ * @phba: pointer to lpfc hba data structure.
+ * @vpd: pointer to the vital product data.
+ * @len: length of the vital product data in bytes.
+ *
+ * This routine parses the Vital Product Data (VPD). The VPD is treated as
+ * an array of characters. In this routine, the ModelName, ProgramType, and
+ * ModelDesc, etc. fields of the phba data structure will be populated.
+ *
+ * Return codes
+ * 0 - pointer to the VPD passed in is NULL
+ * 1 - success
+ **/
+int
+lpfc_parse_vpd(struct lpfc_hba *phba, uint8_t *vpd, int len)
+{
+ uint8_t lenlo, lenhi;
+ int Length;
+ int i, j;
+ int finished = 0;
+ int index = 0;
+
+ if (!vpd)
+ return 0;
+
+ /* Vital Product */
+ lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
+ "0455 Vital Product Data: x%x x%x x%x x%x\n",
+ (uint32_t) vpd[0], (uint32_t) vpd[1], (uint32_t) vpd[2],
+ (uint32_t) vpd[3]);
+ while (!finished && (index < (len - 4))) {
+ switch (vpd[index]) {
+ case 0x82:
+ case 0x91:
+ index += 1;
+ lenlo = vpd[index];
+ index += 1;
+ lenhi = vpd[index];
+ index += 1;
+ i = ((((unsigned short)lenhi) << 8) + lenlo);
+ index += i;
+ break;
+ case 0x90:
+ index += 1;
+ lenlo = vpd[index];
+ index += 1;
+ lenhi = vpd[index];
+ index += 1;
+ Length = ((((unsigned short)lenhi) << 8) + lenlo);
+ if (Length > len - index)
+ Length = len - index;
+ while (Length > 0) {
+ /* Look for Serial Number */
+ if ((vpd[index] == 'S') && (vpd[index+1] == 'N')) {
+ index += 2;
+ i = vpd[index];
+ index += 1;
+ j = 0;
+ Length -= (3+i);
+ while(i--) {
+ phba->SerialNumber[j++] = vpd[index++];
+ if (j == 31)
+ break;
+ }
+ phba->SerialNumber[j] = 0;
+ continue;
+ }
+ else if ((vpd[index] == 'V') && (vpd[index+1] == '1')) {
+ phba->vpd_flag |= VPD_MODEL_DESC;
+ index += 2;
+ i = vpd[index];
+ index += 1;
+ j = 0;
+ Length -= (3+i);
+ while(i--) {
+ phba->ModelDesc[j++] = vpd[index++];
+ if (j == 255)
+ break;
+ }
+ phba->ModelDesc[j] = 0;
+ continue;
+ }
+ else if ((vpd[index] == 'V') && (vpd[index+1] == '2')) {
+ phba->vpd_flag |= VPD_MODEL_NAME;
+ index += 2;
+ i = vpd[index];
+ index += 1;
+ j = 0;
+ Length -= (3+i);
+ while(i--) {
+ phba->ModelName[j++] = vpd[index++];
+ if (j == 79)
+ break;
+ }
+ phba->ModelName[j] = 0;
+ continue;
+ }
+ else if ((vpd[index] == 'V') && (vpd[index+1] == '3')) {
+ phba->vpd_flag |= VPD_PROGRAM_TYPE;
+ index += 2;
+ i = vpd[index];
+ index += 1;
+ j = 0;
+ Length -= (3+i);
+ while(i--) {
+ phba->ProgramType[j++] = vpd[index++];
+ if (j == 255)
+ break;
+ }
+ phba->ProgramType[j] = 0;
+ continue;
+ }
+ else if ((vpd[index] == 'V') && (vpd[index+1] == '4')) {
+ phba->vpd_flag |= VPD_PORT;
+ index += 2;
+ i = vpd[index];
+ index += 1;
+ j = 0;
+ Length -= (3+i);
+ while(i--) {
+ if ((phba->sli_rev == LPFC_SLI_REV4) &&
+ (phba->sli4_hba.pport_name_sta ==
+ LPFC_SLI4_PPNAME_GET)) {
+ j++;
+ index++;
+ } else
+ phba->Port[j++] = vpd[index++];
+ if (j == 19)
+ break;
+ }
+ if ((phba->sli_rev != LPFC_SLI_REV4) ||
+ (phba->sli4_hba.pport_name_sta ==
+ LPFC_SLI4_PPNAME_NON))
+ phba->Port[j] = 0;
+ continue;
+ }
+ else {
+ index += 2;
+ i = vpd[index];
+ index += 1;
+ index += i;
+ Length -= (3 + i);
+ }
+ }
+ finished = 0;
+ break;
+ case 0x78:
+ finished = 1;
+ break;
+ default:
+ index ++;
+ break;
+ }
+ }
+
+ return(1);
+}
+
+/**
+ * lpfc_get_hba_model_desc - Retrieve HBA device model name and description
+ * @phba: pointer to lpfc hba data structure.
+ * @mdp: pointer to the data structure to hold the derived model name.
+ * @descp: pointer to the data structure to hold the derived description.
+ *
+ * This routine retrieves HBA's description based on its registered PCI device
+ * ID. The @descp passed into this function points to an array of 256 chars. It
+ * shall be returned with the model name, maximum speed, and the host bus type.
+ * The @mdp passed into this function points to an array of 80 chars. When the
+ * function returns, the @mdp will be filled with the model name.
+ **/
+static void
+lpfc_get_hba_model_desc(struct lpfc_hba *phba, uint8_t *mdp, uint8_t *descp)
+{
+ lpfc_vpd_t *vp;
+ uint16_t dev_id = phba->pcidev->device;
+ int max_speed;
+ int GE = 0;
+ int oneConnect = 0; /* default is not a oneConnect */
+ struct {
+ char *name;
+ char *bus;
+ char *function;
+ } m = {"<Unknown>", "", ""};
+
+ if (mdp && mdp[0] != '\0'
+ && descp && descp[0] != '\0')
+ return;
+
+ if (phba->lmt & LMT_16Gb)
+ max_speed = 16;
+ else if (phba->lmt & LMT_10Gb)
+ max_speed = 10;
+ else if (phba->lmt & LMT_8Gb)
+ max_speed = 8;
+ else if (phba->lmt & LMT_4Gb)
+ max_speed = 4;
+ else if (phba->lmt & LMT_2Gb)
+ max_speed = 2;
+ else if (phba->lmt & LMT_1Gb)
+ max_speed = 1;
+ else
+ max_speed = 0;
+
+ vp = &phba->vpd;
+
+ switch (dev_id) {
+ case PCI_DEVICE_ID_FIREFLY:
+ m = (typeof(m)){"LP6000", "PCI",
+ "Obsolete, Unsupported Fibre Channel Adapter"};
+ break;
+ case PCI_DEVICE_ID_SUPERFLY:
+ if (vp->rev.biuRev >= 1 && vp->rev.biuRev <= 3)
+ m = (typeof(m)){"LP7000", "PCI", ""};
+ else
+ m = (typeof(m)){"LP7000E", "PCI", ""};
+ m.function = "Obsolete, Unsupported Fibre Channel Adapter";
+ break;
+ case PCI_DEVICE_ID_DRAGONFLY:
+ m = (typeof(m)){"LP8000", "PCI",
+ "Obsolete, Unsupported Fibre Channel Adapter"};
+ break;
+ case PCI_DEVICE_ID_CENTAUR:
+ if (FC_JEDEC_ID(vp->rev.biuRev) == CENTAUR_2G_JEDEC_ID)
+ m = (typeof(m)){"LP9002", "PCI", ""};
+ else
+ m = (typeof(m)){"LP9000", "PCI", ""};
+ m.function = "Obsolete, Unsupported Fibre Channel Adapter";
+ break;
+ case PCI_DEVICE_ID_RFLY:
+ m = (typeof(m)){"LP952", "PCI",
+ "Obsolete, Unsupported Fibre Channel Adapter"};
+ break;
+ case PCI_DEVICE_ID_PEGASUS:
+ m = (typeof(m)){"LP9802", "PCI-X",
+ "Obsolete, Unsupported Fibre Channel Adapter"};
+ break;
+ case PCI_DEVICE_ID_THOR:
+ m = (typeof(m)){"LP10000", "PCI-X",
+ "Obsolete, Unsupported Fibre Channel Adapter"};
+ break;
+ case PCI_DEVICE_ID_VIPER:
+ m = (typeof(m)){"LPX1000", "PCI-X",
+ "Obsolete, Unsupported Fibre Channel Adapter"};
+ break;
+ case PCI_DEVICE_ID_PFLY:
+ m = (typeof(m)){"LP982", "PCI-X",
+ "Obsolete, Unsupported Fibre Channel Adapter"};
+ break;
+ case PCI_DEVICE_ID_TFLY:
+ m = (typeof(m)){"LP1050", "PCI-X",
+ "Obsolete, Unsupported Fibre Channel Adapter"};
+ break;
+ case PCI_DEVICE_ID_HELIOS:
+ m = (typeof(m)){"LP11000", "PCI-X2",
+ "Obsolete, Unsupported Fibre Channel Adapter"};
+ break;
+ case PCI_DEVICE_ID_HELIOS_SCSP:
+ m = (typeof(m)){"LP11000-SP", "PCI-X2",
+ "Obsolete, Unsupported Fibre Channel Adapter"};
+ break;
+ case PCI_DEVICE_ID_HELIOS_DCSP:
+ m = (typeof(m)){"LP11002-SP", "PCI-X2",
+ "Obsolete, Unsupported Fibre Channel Adapter"};
+ break;
+ case PCI_DEVICE_ID_NEPTUNE:
+ m = (typeof(m)){"LPe1000", "PCIe",
+ "Obsolete, Unsupported Fibre Channel Adapter"};
+ break;
+ case PCI_DEVICE_ID_NEPTUNE_SCSP:
+ m = (typeof(m)){"LPe1000-SP", "PCIe",
+ "Obsolete, Unsupported Fibre Channel Adapter"};
+ break;
+ case PCI_DEVICE_ID_NEPTUNE_DCSP:
+ m = (typeof(m)){"LPe1002-SP", "PCIe",
+ "Obsolete, Unsupported Fibre Channel Adapter"};
+ break;
+ case PCI_DEVICE_ID_BMID:
+ m = (typeof(m)){"LP1150", "PCI-X2", "Fibre Channel Adapter"};
+ break;
+ case PCI_DEVICE_ID_BSMB:
+ m = (typeof(m)){"LP111", "PCI-X2",
+ "Obsolete, Unsupported Fibre Channel Adapter"};
+ break;
+ case PCI_DEVICE_ID_ZEPHYR:
+ m = (typeof(m)){"LPe11000", "PCIe", "Fibre Channel Adapter"};
+ break;
+ case PCI_DEVICE_ID_ZEPHYR_SCSP:
+ m = (typeof(m)){"LPe11000", "PCIe", "Fibre Channel Adapter"};
+ break;
+ case PCI_DEVICE_ID_ZEPHYR_DCSP:
+ m = (typeof(m)){"LP2105", "PCIe", "FCoE Adapter"};
+ GE = 1;
+ break;
+ case PCI_DEVICE_ID_ZMID:
+ m = (typeof(m)){"LPe1150", "PCIe", "Fibre Channel Adapter"};
+ break;
+ case PCI_DEVICE_ID_ZSMB:
+ m = (typeof(m)){"LPe111", "PCIe", "Fibre Channel Adapter"};
+ break;
+ case PCI_DEVICE_ID_LP101:
+ m = (typeof(m)){"LP101", "PCI-X",
+ "Obsolete, Unsupported Fibre Channel Adapter"};
+ break;
+ case PCI_DEVICE_ID_LP10000S:
+ m = (typeof(m)){"LP10000-S", "PCI",
+ "Obsolete, Unsupported Fibre Channel Adapter"};
+ break;
+ case PCI_DEVICE_ID_LP11000S:
+ m = (typeof(m)){"LP11000-S", "PCI-X2",
+ "Obsolete, Unsupported Fibre Channel Adapter"};
+ break;
+ case PCI_DEVICE_ID_LPE11000S:
+ m = (typeof(m)){"LPe11000-S", "PCIe",
+ "Obsolete, Unsupported Fibre Channel Adapter"};
+ break;
+ case PCI_DEVICE_ID_SAT:
+ m = (typeof(m)){"LPe12000", "PCIe", "Fibre Channel Adapter"};
+ break;
+ case PCI_DEVICE_ID_SAT_MID:
+ m = (typeof(m)){"LPe1250", "PCIe", "Fibre Channel Adapter"};
+ break;
+ case PCI_DEVICE_ID_SAT_SMB:
+ m = (typeof(m)){"LPe121", "PCIe", "Fibre Channel Adapter"};
+ break;
+ case PCI_DEVICE_ID_SAT_DCSP:
+ m = (typeof(m)){"LPe12002-SP", "PCIe", "Fibre Channel Adapter"};
+ break;
+ case PCI_DEVICE_ID_SAT_SCSP:
+ m = (typeof(m)){"LPe12000-SP", "PCIe", "Fibre Channel Adapter"};
+ break;
+ case PCI_DEVICE_ID_SAT_S:
+ m = (typeof(m)){"LPe12000-S", "PCIe", "Fibre Channel Adapter"};
+ break;
+ case PCI_DEVICE_ID_HORNET:
+ m = (typeof(m)){"LP21000", "PCIe",
+ "Obsolete, Unsupported FCoE Adapter"};
+ GE = 1;
+ break;
+ case PCI_DEVICE_ID_PROTEUS_VF:
+ m = (typeof(m)){"LPev12000", "PCIe IOV",
+ "Obsolete, Unsupported Fibre Channel Adapter"};
+ break;
+ case PCI_DEVICE_ID_PROTEUS_PF:
+ m = (typeof(m)){"LPev12000", "PCIe IOV",
+ "Obsolete, Unsupported Fibre Channel Adapter"};
+ break;
+ case PCI_DEVICE_ID_PROTEUS_S:
+ m = (typeof(m)){"LPemv12002-S", "PCIe IOV",
+ "Obsolete, Unsupported Fibre Channel Adapter"};
+ break;
+ case PCI_DEVICE_ID_TIGERSHARK:
+ oneConnect = 1;
+ m = (typeof(m)){"OCe10100", "PCIe", "FCoE"};
+ break;
+ case PCI_DEVICE_ID_TOMCAT:
+ oneConnect = 1;
+ m = (typeof(m)){"OCe11100", "PCIe", "FCoE"};
+ break;
+ case PCI_DEVICE_ID_FALCON:
+ m = (typeof(m)){"LPSe12002-ML1-E", "PCIe",
+ "EmulexSecure Fibre"};
+ break;
+ case PCI_DEVICE_ID_BALIUS:
+ m = (typeof(m)){"LPVe12002", "PCIe Shared I/O",
+ "Obsolete, Unsupported Fibre Channel Adapter"};
+ break;
+ case PCI_DEVICE_ID_LANCER_FC:
+ m = (typeof(m)){"LPe16000", "PCIe", "Fibre Channel Adapter"};
+ break;
+ case PCI_DEVICE_ID_LANCER_FC_VF:
+ m = (typeof(m)){"LPe16000", "PCIe",
+ "Obsolete, Unsupported Fibre Channel Adapter"};
+ break;
+ case PCI_DEVICE_ID_LANCER_FCOE:
+ oneConnect = 1;
+ m = (typeof(m)){"OCe15100", "PCIe", "FCoE"};
+ break;
+ case PCI_DEVICE_ID_LANCER_FCOE_VF:
+ oneConnect = 1;
+ m = (typeof(m)){"OCe15100", "PCIe",
+ "Obsolete, Unsupported FCoE"};
+ break;
+ case PCI_DEVICE_ID_SKYHAWK:
+ case PCI_DEVICE_ID_SKYHAWK_VF:
+ oneConnect = 1;
+ m = (typeof(m)){"OCe14000", "PCIe", "FCoE"};
+ break;
+ default:
+ m = (typeof(m)){"Unknown", "", ""};
+ break;
+ }
+
+ if (mdp && mdp[0] == '\0')
+ snprintf(mdp, 79,"%s", m.name);
+ /*
+ * oneConnect hba requires special processing, they are all initiators
+ * and we put the port number on the end
+ */
+ if (descp && descp[0] == '\0') {
+ if (oneConnect)
+ snprintf(descp, 255,
+ "Emulex OneConnect %s, %s Initiator %s",
+ m.name, m.function,
+ phba->Port);
+ else if (max_speed == 0)
+ snprintf(descp, 255,
+ "Emulex %s %s %s ",
+ m.name, m.bus, m.function);
+ else
+ snprintf(descp, 255,
+ "Emulex %s %d%s %s %s",
+ m.name, max_speed, (GE) ? "GE" : "Gb",
+ m.bus, m.function);
+ }
+}
+
+/**
+ * lpfc_post_buffer - Post IOCB(s) with DMA buffer descriptor(s) to a IOCB ring
+ * @phba: pointer to lpfc hba data structure.
+ * @pring: pointer to a IOCB ring.
+ * @cnt: the number of IOCBs to be posted to the IOCB ring.
+ *
+ * This routine posts a given number of IOCBs with the associated DMA buffer
+ * descriptors specified by the cnt argument to the given IOCB ring.
+ *
+ * Return codes
+ * The number of IOCBs NOT able to be posted to the IOCB ring.
+ **/
+int
+lpfc_post_buffer(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, int cnt)
+{
+ IOCB_t *icmd;
+ struct lpfc_iocbq *iocb;
+ struct lpfc_dmabuf *mp1, *mp2;
+
+ cnt += pring->missbufcnt;
+
+ /* While there are buffers to post */
+ while (cnt > 0) {
+ /* Allocate buffer for command iocb */
+ iocb = lpfc_sli_get_iocbq(phba);
+ if (iocb == NULL) {
+ pring->missbufcnt = cnt;
+ return cnt;
+ }
+ icmd = &iocb->iocb;
+
+ /* 2 buffers can be posted per command */
+ /* Allocate buffer to post */
+ mp1 = kmalloc(sizeof (struct lpfc_dmabuf), GFP_KERNEL);
+ if (mp1)
+ mp1->virt = lpfc_mbuf_alloc(phba, MEM_PRI, &mp1->phys);
+ if (!mp1 || !mp1->virt) {
+ kfree(mp1);
+ lpfc_sli_release_iocbq(phba, iocb);
+ pring->missbufcnt = cnt;
+ return cnt;
+ }
+
+ INIT_LIST_HEAD(&mp1->list);
+ /* Allocate buffer to post */
+ if (cnt > 1) {
+ mp2 = kmalloc(sizeof (struct lpfc_dmabuf), GFP_KERNEL);
+ if (mp2)
+ mp2->virt = lpfc_mbuf_alloc(phba, MEM_PRI,
+ &mp2->phys);
+ if (!mp2 || !mp2->virt) {
+ kfree(mp2);
+ lpfc_mbuf_free(phba, mp1->virt, mp1->phys);
+ kfree(mp1);
+ lpfc_sli_release_iocbq(phba, iocb);
+ pring->missbufcnt = cnt;
+ return cnt;
+ }
+
+ INIT_LIST_HEAD(&mp2->list);
+ } else {
+ mp2 = NULL;
+ }
+
+ icmd->un.cont64[0].addrHigh = putPaddrHigh(mp1->phys);
+ icmd->un.cont64[0].addrLow = putPaddrLow(mp1->phys);
+ icmd->un.cont64[0].tus.f.bdeSize = FCELSSIZE;
+ icmd->ulpBdeCount = 1;
+ cnt--;
+ if (mp2) {
+ icmd->un.cont64[1].addrHigh = putPaddrHigh(mp2->phys);
+ icmd->un.cont64[1].addrLow = putPaddrLow(mp2->phys);
+ icmd->un.cont64[1].tus.f.bdeSize = FCELSSIZE;
+ cnt--;
+ icmd->ulpBdeCount = 2;
+ }
+
+ icmd->ulpCommand = CMD_QUE_RING_BUF64_CN;
+ icmd->ulpLe = 1;
+
+ if (lpfc_sli_issue_iocb(phba, pring->ringno, iocb, 0) ==
+ IOCB_ERROR) {
+ lpfc_mbuf_free(phba, mp1->virt, mp1->phys);
+ kfree(mp1);
+ cnt++;
+ if (mp2) {
+ lpfc_mbuf_free(phba, mp2->virt, mp2->phys);
+ kfree(mp2);
+ cnt++;
+ }
+ lpfc_sli_release_iocbq(phba, iocb);
+ pring->missbufcnt = cnt;
+ return cnt;
+ }
+ lpfc_sli_ringpostbuf_put(phba, pring, mp1);
+ if (mp2)
+ lpfc_sli_ringpostbuf_put(phba, pring, mp2);
+ }
+ pring->missbufcnt = 0;
+ return 0;
+}
+
+/**
+ * lpfc_post_rcv_buf - Post the initial receive IOCB buffers to ELS ring
+ * @phba: pointer to lpfc hba data structure.
+ *
+ * This routine posts initial receive IOCB buffers to the ELS ring. The
+ * current number of initial IOCB buffers specified by LPFC_BUF_RING0 is
+ * set to 64 IOCBs.
+ *
+ * Return codes
+ * 0 - success (currently always success)
+ **/
+static int
+lpfc_post_rcv_buf(struct lpfc_hba *phba)
+{
+ struct lpfc_sli *psli = &phba->sli;
+
+ /* Ring 0, ELS / CT buffers */
+ lpfc_post_buffer(phba, &psli->ring[LPFC_ELS_RING], LPFC_BUF_RING0);
+ /* Ring 2 - FCP no buffers needed */
+
+ return 0;
+}
+
+#define S(N,V) (((V)<<(N))|((V)>>(32-(N))))
+
+/**
+ * lpfc_sha_init - Set up initial array of hash table entries
+ * @HashResultPointer: pointer to an array as hash table.
+ *
+ * This routine sets up the initial values to the array of hash table entries
+ * for the LC HBAs.
+ **/
+static void
+lpfc_sha_init(uint32_t * HashResultPointer)
+{
+ HashResultPointer[0] = 0x67452301;
+ HashResultPointer[1] = 0xEFCDAB89;
+ HashResultPointer[2] = 0x98BADCFE;
+ HashResultPointer[3] = 0x10325476;
+ HashResultPointer[4] = 0xC3D2E1F0;
+}
+
+/**
+ * lpfc_sha_iterate - Iterate initial hash table with the working hash table
+ * @HashResultPointer: pointer to an initial/result hash table.
+ * @HashWorkingPointer: pointer to an working hash table.
+ *
+ * This routine iterates an initial hash table pointed by @HashResultPointer
+ * with the values from the working hash table pointeed by @HashWorkingPointer.
+ * The results are putting back to the initial hash table, returned through
+ * the @HashResultPointer as the result hash table.
+ **/
+static void
+lpfc_sha_iterate(uint32_t * HashResultPointer, uint32_t * HashWorkingPointer)
+{
+ int t;
+ uint32_t TEMP;
+ uint32_t A, B, C, D, E;
+ t = 16;
+ do {
+ HashWorkingPointer[t] =
+ S(1,
+ HashWorkingPointer[t - 3] ^ HashWorkingPointer[t -
+ 8] ^
+ HashWorkingPointer[t - 14] ^ HashWorkingPointer[t - 16]);
+ } while (++t <= 79);
+ t = 0;
+ A = HashResultPointer[0];
+ B = HashResultPointer[1];
+ C = HashResultPointer[2];
+ D = HashResultPointer[3];
+ E = HashResultPointer[4];
+
+ do {
+ if (t < 20) {
+ TEMP = ((B & C) | ((~B) & D)) + 0x5A827999;
+ } else if (t < 40) {
+ TEMP = (B ^ C ^ D) + 0x6ED9EBA1;
+ } else if (t < 60) {
+ TEMP = ((B & C) | (B & D) | (C & D)) + 0x8F1BBCDC;
+ } else {
+ TEMP = (B ^ C ^ D) + 0xCA62C1D6;
+ }
+ TEMP += S(5, A) + E + HashWorkingPointer[t];
+ E = D;
+ D = C;
+ C = S(30, B);
+ B = A;
+ A = TEMP;
+ } while (++t <= 79);
+
+ HashResultPointer[0] += A;
+ HashResultPointer[1] += B;
+ HashResultPointer[2] += C;
+ HashResultPointer[3] += D;
+ HashResultPointer[4] += E;
+
+}
+
+/**
+ * lpfc_challenge_key - Create challenge key based on WWPN of the HBA
+ * @RandomChallenge: pointer to the entry of host challenge random number array.
+ * @HashWorking: pointer to the entry of the working hash array.
+ *
+ * This routine calculates the working hash array referred by @HashWorking
+ * from the challenge random numbers associated with the host, referred by
+ * @RandomChallenge. The result is put into the entry of the working hash
+ * array and returned by reference through @HashWorking.
+ **/
+static void
+lpfc_challenge_key(uint32_t * RandomChallenge, uint32_t * HashWorking)
+{
+ *HashWorking = (*RandomChallenge ^ *HashWorking);
+}
+
+/**
+ * lpfc_hba_init - Perform special handling for LC HBA initialization
+ * @phba: pointer to lpfc hba data structure.
+ * @hbainit: pointer to an array of unsigned 32-bit integers.
+ *
+ * This routine performs the special handling for LC HBA initialization.
+ **/
+void
+lpfc_hba_init(struct lpfc_hba *phba, uint32_t *hbainit)
+{
+ int t;
+ uint32_t *HashWorking;
+ uint32_t *pwwnn = (uint32_t *) phba->wwnn;
+
+ HashWorking = kcalloc(80, sizeof(uint32_t), GFP_KERNEL);
+ if (!HashWorking)
+ return;
+
+ HashWorking[0] = HashWorking[78] = *pwwnn++;
+ HashWorking[1] = HashWorking[79] = *pwwnn;
+
+ for (t = 0; t < 7; t++)
+ lpfc_challenge_key(phba->RandomData + t, HashWorking + t);
+
+ lpfc_sha_init(hbainit);
+ lpfc_sha_iterate(hbainit, HashWorking);
+ kfree(HashWorking);
+}
+
+/**
+ * lpfc_cleanup - Performs vport cleanups before deleting a vport
+ * @vport: pointer to a virtual N_Port data structure.
+ *
+ * This routine performs the necessary cleanups before deleting the @vport.
+ * It invokes the discovery state machine to perform necessary state
+ * transitions and to release the ndlps associated with the @vport. Note,
+ * the physical port is treated as @vport 0.
+ **/
+void
+lpfc_cleanup(struct lpfc_vport *vport)
+{
+ struct lpfc_hba *phba = vport->phba;
+ struct lpfc_nodelist *ndlp, *next_ndlp;
+ int i = 0;
+
+ if (phba->link_state > LPFC_LINK_DOWN)
+ lpfc_port_link_failure(vport);
+
+ list_for_each_entry_safe(ndlp, next_ndlp, &vport->fc_nodes, nlp_listp) {
+ if (!NLP_CHK_NODE_ACT(ndlp)) {
+ ndlp = lpfc_enable_node(vport, ndlp,
+ NLP_STE_UNUSED_NODE);
+ if (!ndlp)
+ continue;
+ spin_lock_irq(&phba->ndlp_lock);
+ NLP_SET_FREE_REQ(ndlp);
+ spin_unlock_irq(&phba->ndlp_lock);
+ /* Trigger the release of the ndlp memory */
+ lpfc_nlp_put(ndlp);
+ continue;
+ }
+ spin_lock_irq(&phba->ndlp_lock);
+ if (NLP_CHK_FREE_REQ(ndlp)) {
+ /* The ndlp should not be in memory free mode already */
+ spin_unlock_irq(&phba->ndlp_lock);
+ continue;
+ } else
+ /* Indicate request for freeing ndlp memory */
+ NLP_SET_FREE_REQ(ndlp);
+ spin_unlock_irq(&phba->ndlp_lock);
+
+ if (vport->port_type != LPFC_PHYSICAL_PORT &&
+ ndlp->nlp_DID == Fabric_DID) {
+ /* Just free up ndlp with Fabric_DID for vports */
+ lpfc_nlp_put(ndlp);
+ continue;
+ }
+
+ /* take care of nodes in unused state before the state
+ * machine taking action.
+ */
+ if (ndlp->nlp_state == NLP_STE_UNUSED_NODE) {
+ lpfc_nlp_put(ndlp);
+ continue;
+ }
+
+ if (ndlp->nlp_type & NLP_FABRIC)
+ lpfc_disc_state_machine(vport, ndlp, NULL,
+ NLP_EVT_DEVICE_RECOVERY);
+
+ lpfc_disc_state_machine(vport, ndlp, NULL,
+ NLP_EVT_DEVICE_RM);
+ }
+
+ /* At this point, ALL ndlp's should be gone
+ * because of the previous NLP_EVT_DEVICE_RM.
+ * Lets wait for this to happen, if needed.
+ */
+ while (!list_empty(&vport->fc_nodes)) {
+ if (i++ > 3000) {
+ lpfc_printf_vlog(vport, KERN_ERR, LOG_DISCOVERY,
+ "0233 Nodelist not empty\n");
+ list_for_each_entry_safe(ndlp, next_ndlp,
+ &vport->fc_nodes, nlp_listp) {
+ lpfc_printf_vlog(ndlp->vport, KERN_ERR,
+ LOG_NODE,
+ "0282 did:x%x ndlp:x%p "
+ "usgmap:x%x refcnt:%d\n",
+ ndlp->nlp_DID, (void *)ndlp,
+ ndlp->nlp_usg_map,
+ atomic_read(
+ &ndlp->kref.refcount));
+ }
+ break;
+ }
+
+ /* Wait for any activity on ndlps to settle */
+ msleep(10);
+ }
+ lpfc_cleanup_vports_rrqs(vport, NULL);
+}
+
+/**
+ * lpfc_stop_vport_timers - Stop all the timers associated with a vport
+ * @vport: pointer to a virtual N_Port data structure.
+ *
+ * This routine stops all the timers associated with a @vport. This function
+ * is invoked before disabling or deleting a @vport. Note that the physical
+ * port is treated as @vport 0.
+ **/
+void
+lpfc_stop_vport_timers(struct lpfc_vport *vport)
+{
+ del_timer_sync(&vport->els_tmofunc);
+ del_timer_sync(&vport->fc_fdmitmo);
+ del_timer_sync(&vport->delayed_disc_tmo);
+ lpfc_can_disctmo(vport);
+ return;
+}
+
+/**
+ * __lpfc_sli4_stop_fcf_redisc_wait_timer - Stop FCF rediscovery wait timer
+ * @phba: pointer to lpfc hba data structure.
+ *
+ * This routine stops the SLI4 FCF rediscover wait timer if it's on. The
+ * caller of this routine should already hold the host lock.
+ **/
+void
+__lpfc_sli4_stop_fcf_redisc_wait_timer(struct lpfc_hba *phba)
+{
+ /* Clear pending FCF rediscovery wait flag */
+ phba->fcf.fcf_flag &= ~FCF_REDISC_PEND;
+
+ /* Now, try to stop the timer */
+ del_timer(&phba->fcf.redisc_wait);
+}
+
+/**
+ * lpfc_sli4_stop_fcf_redisc_wait_timer - Stop FCF rediscovery wait timer
+ * @phba: pointer to lpfc hba data structure.
+ *
+ * This routine stops the SLI4 FCF rediscover wait timer if it's on. It
+ * checks whether the FCF rediscovery wait timer is pending with the host
+ * lock held before proceeding with disabling the timer and clearing the
+ * wait timer pendig flag.
+ **/
+void
+lpfc_sli4_stop_fcf_redisc_wait_timer(struct lpfc_hba *phba)
+{
+ spin_lock_irq(&phba->hbalock);
+ if (!(phba->fcf.fcf_flag & FCF_REDISC_PEND)) {
+ /* FCF rediscovery timer already fired or stopped */
+ spin_unlock_irq(&phba->hbalock);
+ return;
+ }
+ __lpfc_sli4_stop_fcf_redisc_wait_timer(phba);
+ /* Clear failover in progress flags */
+ phba->fcf.fcf_flag &= ~(FCF_DEAD_DISC | FCF_ACVL_DISC);
+ spin_unlock_irq(&phba->hbalock);
+}
+
+/**
+ * lpfc_stop_hba_timers - Stop all the timers associated with an HBA
+ * @phba: pointer to lpfc hba data structure.
+ *
+ * This routine stops all the timers associated with a HBA. This function is
+ * invoked before either putting a HBA offline or unloading the driver.
+ **/
+void
+lpfc_stop_hba_timers(struct lpfc_hba *phba)
+{
+ lpfc_stop_vport_timers(phba->pport);
+ del_timer_sync(&phba->sli.mbox_tmo);
+ del_timer_sync(&phba->fabric_block_timer);
+ del_timer_sync(&phba->eratt_poll);
+ del_timer_sync(&phba->hb_tmofunc);
+ if (phba->sli_rev == LPFC_SLI_REV4) {
+ del_timer_sync(&phba->rrq_tmr);
+ phba->hba_flag &= ~HBA_RRQ_ACTIVE;
+ }
+ phba->hb_outstanding = 0;
+
+ switch (phba->pci_dev_grp) {
+ case LPFC_PCI_DEV_LP:
+ /* Stop any LightPulse device specific driver timers */
+ del_timer_sync(&phba->fcp_poll_timer);
+ break;
+ case LPFC_PCI_DEV_OC:
+ /* Stop any OneConnect device sepcific driver timers */
+ lpfc_sli4_stop_fcf_redisc_wait_timer(phba);
+ break;
+ default:
+ lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+ "0297 Invalid device group (x%x)\n",
+ phba->pci_dev_grp);
+ break;
+ }
+ return;
+}
+
+/**
+ * lpfc_block_mgmt_io - Mark a HBA's management interface as blocked
+ * @phba: pointer to lpfc hba data structure.
+ *
+ * This routine marks a HBA's management interface as blocked. Once the HBA's
+ * management interface is marked as blocked, all the user space access to
+ * the HBA, whether they are from sysfs interface or libdfc interface will
+ * all be blocked. The HBA is set to block the management interface when the
+ * driver prepares the HBA interface for online or offline.
+ **/
+static void
+lpfc_block_mgmt_io(struct lpfc_hba *phba, int mbx_action)
+{
+ unsigned long iflag;
+ uint8_t actcmd = MBX_HEARTBEAT;
+ unsigned long timeout;
+
+ spin_lock_irqsave(&phba->hbalock, iflag);
+ phba->sli.sli_flag |= LPFC_BLOCK_MGMT_IO;
+ spin_unlock_irqrestore(&phba->hbalock, iflag);
+ if (mbx_action == LPFC_MBX_NO_WAIT)
+ return;
+ timeout = msecs_to_jiffies(LPFC_MBOX_TMO * 1000) + jiffies;
+ spin_lock_irqsave(&phba->hbalock, iflag);
+ if (phba->sli.mbox_active) {
+ actcmd = phba->sli.mbox_active->u.mb.mbxCommand;
+ /* Determine how long we might wait for the active mailbox
+ * command to be gracefully completed by firmware.
+ */
+ timeout = msecs_to_jiffies(lpfc_mbox_tmo_val(phba,
+ phba->sli.mbox_active) * 1000) + jiffies;
+ }
+ spin_unlock_irqrestore(&phba->hbalock, iflag);
+
+ /* Wait for the outstnading mailbox command to complete */
+ while (phba->sli.mbox_active) {
+ /* Check active mailbox complete status every 2ms */
+ msleep(2);
+ if (time_after(jiffies, timeout)) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
+ "2813 Mgmt IO is Blocked %x "
+ "- mbox cmd %x still active\n",
+ phba->sli.sli_flag, actcmd);
+ break;
+ }
+ }
+}
+
+/**
+ * lpfc_sli4_node_prep - Assign RPIs for active nodes.
+ * @phba: pointer to lpfc hba data structure.
+ *
+ * Allocate RPIs for all active remote nodes. This is needed whenever
+ * an SLI4 adapter is reset and the driver is not unloading. Its purpose
+ * is to fixup the temporary rpi assignments.
+ **/
+void
+lpfc_sli4_node_prep(struct lpfc_hba *phba)
+{
+ struct lpfc_nodelist *ndlp, *next_ndlp;
+ struct lpfc_vport **vports;
+ int i;
+
+ if (phba->sli_rev != LPFC_SLI_REV4)
+ return;
+
+ vports = lpfc_create_vport_work_array(phba);
+ if (vports != NULL) {
+ for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) {
+ if (vports[i]->load_flag & FC_UNLOADING)
+ continue;
+
+ list_for_each_entry_safe(ndlp, next_ndlp,
+ &vports[i]->fc_nodes,
+ nlp_listp) {
+ if (NLP_CHK_NODE_ACT(ndlp)) {
+ ndlp->nlp_rpi =
+ lpfc_sli4_alloc_rpi(phba);
+ lpfc_printf_vlog(ndlp->vport, KERN_INFO,
+ LOG_NODE,
+ "0009 rpi:%x DID:%x "
+ "flg:%x map:%x %p\n",
+ ndlp->nlp_rpi,
+ ndlp->nlp_DID,
+ ndlp->nlp_flag,
+ ndlp->nlp_usg_map,
+ ndlp);
+ }
+ }
+ }
+ }
+ lpfc_destroy_vport_work_array(phba, vports);
+}
+
+/**
+ * lpfc_online - Initialize and bring a HBA online
+ * @phba: pointer to lpfc hba data structure.
+ *
+ * This routine initializes the HBA and brings a HBA online. During this
+ * process, the management interface is blocked to prevent user space access
+ * to the HBA interfering with the driver initialization.
+ *
+ * Return codes
+ * 0 - successful
+ * 1 - failed
+ **/
+int
+lpfc_online(struct lpfc_hba *phba)
+{
+ struct lpfc_vport *vport;
+ struct lpfc_vport **vports;
+ int i;
+ bool vpis_cleared = false;
+
+ if (!phba)
+ return 0;
+ vport = phba->pport;
+
+ if (!(vport->fc_flag & FC_OFFLINE_MODE))
+ return 0;
+
+ lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
+ "0458 Bring Adapter online\n");
+
+ lpfc_block_mgmt_io(phba, LPFC_MBX_WAIT);
+
+ if (!lpfc_sli_queue_setup(phba)) {
+ lpfc_unblock_mgmt_io(phba);
+ return 1;
+ }
+
+ if (phba->sli_rev == LPFC_SLI_REV4) {
+ if (lpfc_sli4_hba_setup(phba)) { /* Initialize SLI4 HBA */
+ lpfc_unblock_mgmt_io(phba);
+ return 1;
+ }
+ spin_lock_irq(&phba->hbalock);
+ if (!phba->sli4_hba.max_cfg_param.vpi_used)
+ vpis_cleared = true;
+ spin_unlock_irq(&phba->hbalock);
+ } else {
+ if (lpfc_sli_hba_setup(phba)) { /* Initialize SLI2/SLI3 HBA */
+ lpfc_unblock_mgmt_io(phba);
+ return 1;
+ }
+ }
+
+ vports = lpfc_create_vport_work_array(phba);
+ if (vports != NULL)
+ for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) {
+ struct Scsi_Host *shost;
+ shost = lpfc_shost_from_vport(vports[i]);
+ spin_lock_irq(shost->host_lock);
+ vports[i]->fc_flag &= ~FC_OFFLINE_MODE;
+ if (phba->sli3_options & LPFC_SLI3_NPIV_ENABLED)
+ vports[i]->fc_flag |= FC_VPORT_NEEDS_REG_VPI;
+ if (phba->sli_rev == LPFC_SLI_REV4) {
+ vports[i]->fc_flag |= FC_VPORT_NEEDS_INIT_VPI;
+ if ((vpis_cleared) &&
+ (vports[i]->port_type !=
+ LPFC_PHYSICAL_PORT))
+ vports[i]->vpi = 0;
+ }
+ spin_unlock_irq(shost->host_lock);
+ }
+ lpfc_destroy_vport_work_array(phba, vports);
+
+ lpfc_unblock_mgmt_io(phba);
+ return 0;
+}
+
+/**
+ * lpfc_unblock_mgmt_io - Mark a HBA's management interface to be not blocked
+ * @phba: pointer to lpfc hba data structure.
+ *
+ * This routine marks a HBA's management interface as not blocked. Once the
+ * HBA's management interface is marked as not blocked, all the user space
+ * access to the HBA, whether they are from sysfs interface or libdfc
+ * interface will be allowed. The HBA is set to block the management interface
+ * when the driver prepares the HBA interface for online or offline and then
+ * set to unblock the management interface afterwards.
+ **/
+void
+lpfc_unblock_mgmt_io(struct lpfc_hba * phba)
+{
+ unsigned long iflag;
+
+ spin_lock_irqsave(&phba->hbalock, iflag);
+ phba->sli.sli_flag &= ~LPFC_BLOCK_MGMT_IO;
+ spin_unlock_irqrestore(&phba->hbalock, iflag);
+}
+
+/**
+ * lpfc_offline_prep - Prepare a HBA to be brought offline
+ * @phba: pointer to lpfc hba data structure.
+ *
+ * This routine is invoked to prepare a HBA to be brought offline. It performs
+ * unregistration login to all the nodes on all vports and flushes the mailbox
+ * queue to make it ready to be brought offline.
+ **/
+void
+lpfc_offline_prep(struct lpfc_hba *phba, int mbx_action)
+{
+ struct lpfc_vport *vport = phba->pport;
+ struct lpfc_nodelist *ndlp, *next_ndlp;
+ struct lpfc_vport **vports;
+ struct Scsi_Host *shost;
+ int i;
+
+ if (vport->fc_flag & FC_OFFLINE_MODE)
+ return;
+
+ lpfc_block_mgmt_io(phba, mbx_action);
+
+ lpfc_linkdown(phba);
+
+ /* Issue an unreg_login to all nodes on all vports */
+ vports = lpfc_create_vport_work_array(phba);
+ if (vports != NULL) {
+ for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) {
+ if (vports[i]->load_flag & FC_UNLOADING)
+ continue;
+ shost = lpfc_shost_from_vport(vports[i]);
+ spin_lock_irq(shost->host_lock);
+ vports[i]->vpi_state &= ~LPFC_VPI_REGISTERED;
+ vports[i]->fc_flag |= FC_VPORT_NEEDS_REG_VPI;
+ vports[i]->fc_flag &= ~FC_VFI_REGISTERED;
+ spin_unlock_irq(shost->host_lock);
+
+ shost = lpfc_shost_from_vport(vports[i]);
+ list_for_each_entry_safe(ndlp, next_ndlp,
+ &vports[i]->fc_nodes,
+ nlp_listp) {
+ if (!NLP_CHK_NODE_ACT(ndlp))
+ continue;
+ if (ndlp->nlp_state == NLP_STE_UNUSED_NODE)
+ continue;
+ if (ndlp->nlp_type & NLP_FABRIC) {
+ lpfc_disc_state_machine(vports[i], ndlp,
+ NULL, NLP_EVT_DEVICE_RECOVERY);
+ lpfc_disc_state_machine(vports[i], ndlp,
+ NULL, NLP_EVT_DEVICE_RM);
+ }
+ spin_lock_irq(shost->host_lock);
+ ndlp->nlp_flag &= ~NLP_NPR_ADISC;
+ spin_unlock_irq(shost->host_lock);
+ /*
+ * Whenever an SLI4 port goes offline, free the
+ * RPI. Get a new RPI when the adapter port
+ * comes back online.
+ */
+ if (phba->sli_rev == LPFC_SLI_REV4) {
+ lpfc_printf_vlog(ndlp->vport,
+ KERN_INFO, LOG_NODE,
+ "0011 lpfc_offline: "
+ "ndlp:x%p did %x "
+ "usgmap:x%x rpi:%x\n",
+ ndlp, ndlp->nlp_DID,
+ ndlp->nlp_usg_map,
+ ndlp->nlp_rpi);
+
+ lpfc_sli4_free_rpi(phba, ndlp->nlp_rpi);
+ }
+ lpfc_unreg_rpi(vports[i], ndlp);
+ }
+ }
+ }
+ lpfc_destroy_vport_work_array(phba, vports);
+
+ lpfc_sli_mbox_sys_shutdown(phba, mbx_action);
+}
+
+/**
+ * lpfc_offline - Bring a HBA offline
+ * @phba: pointer to lpfc hba data structure.
+ *
+ * This routine actually brings a HBA offline. It stops all the timers
+ * associated with the HBA, brings down the SLI layer, and eventually
+ * marks the HBA as in offline state for the upper layer protocol.
+ **/
+void
+lpfc_offline(struct lpfc_hba *phba)
+{
+ struct Scsi_Host *shost;
+ struct lpfc_vport **vports;
+ int i;
+
+ if (phba->pport->fc_flag & FC_OFFLINE_MODE)
+ return;
+
+ /* stop port and all timers associated with this hba */
+ lpfc_stop_port(phba);
+ vports = lpfc_create_vport_work_array(phba);
+ if (vports != NULL)
+ for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++)
+ lpfc_stop_vport_timers(vports[i]);
+ lpfc_destroy_vport_work_array(phba, vports);
+ lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
+ "0460 Bring Adapter offline\n");
+ /* Bring down the SLI Layer and cleanup. The HBA is offline
+ now. */
+ lpfc_sli_hba_down(phba);
+ spin_lock_irq(&phba->hbalock);
+ phba->work_ha = 0;
+ spin_unlock_irq(&phba->hbalock);
+ vports = lpfc_create_vport_work_array(phba);
+ if (vports != NULL)
+ for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) {
+ shost = lpfc_shost_from_vport(vports[i]);
+ spin_lock_irq(shost->host_lock);
+ vports[i]->work_port_events = 0;
+ vports[i]->fc_flag |= FC_OFFLINE_MODE;
+ spin_unlock_irq(shost->host_lock);
+ }
+ lpfc_destroy_vport_work_array(phba, vports);
+}
+
+/**
+ * lpfc_scsi_free - Free all the SCSI buffers and IOCBs from driver lists
+ * @phba: pointer to lpfc hba data structure.
+ *
+ * This routine is to free all the SCSI buffers and IOCBs from the driver
+ * list back to kernel. It is called from lpfc_pci_remove_one to free
+ * the internal resources before the device is removed from the system.
+ **/
+static void
+lpfc_scsi_free(struct lpfc_hba *phba)
+{
+ struct lpfc_scsi_buf *sb, *sb_next;
+ struct lpfc_iocbq *io, *io_next;
+
+ spin_lock_irq(&phba->hbalock);
+
+ /* Release all the lpfc_scsi_bufs maintained by this host. */
+
+ spin_lock(&phba->scsi_buf_list_put_lock);
+ list_for_each_entry_safe(sb, sb_next, &phba->lpfc_scsi_buf_list_put,
+ list) {
+ list_del(&sb->list);
+ pci_pool_free(phba->lpfc_scsi_dma_buf_pool, sb->data,
+ sb->dma_handle);
+ kfree(sb);
+ phba->total_scsi_bufs--;
+ }
+ spin_unlock(&phba->scsi_buf_list_put_lock);
+
+ spin_lock(&phba->scsi_buf_list_get_lock);
+ list_for_each_entry_safe(sb, sb_next, &phba->lpfc_scsi_buf_list_get,
+ list) {
+ list_del(&sb->list);
+ pci_pool_free(phba->lpfc_scsi_dma_buf_pool, sb->data,
+ sb->dma_handle);
+ kfree(sb);
+ phba->total_scsi_bufs--;
+ }
+ spin_unlock(&phba->scsi_buf_list_get_lock);
+
+ /* Release all the lpfc_iocbq entries maintained by this host. */
+ list_for_each_entry_safe(io, io_next, &phba->lpfc_iocb_list, list) {
+ list_del(&io->list);
+ kfree(io);
+ phba->total_iocbq_bufs--;
+ }
+
+ spin_unlock_irq(&phba->hbalock);
+}
+
+/**
+ * lpfc_sli4_xri_sgl_update - update xri-sgl sizing and mapping
+ * @phba: pointer to lpfc hba data structure.
+ *
+ * This routine first calculates the sizes of the current els and allocated
+ * scsi sgl lists, and then goes through all sgls to updates the physical
+ * XRIs assigned due to port function reset. During port initialization, the
+ * current els and allocated scsi sgl lists are 0s.
+ *
+ * Return codes
+ * 0 - successful (for now, it always returns 0)
+ **/
+int
+lpfc_sli4_xri_sgl_update(struct lpfc_hba *phba)
+{
+ struct lpfc_sglq *sglq_entry = NULL, *sglq_entry_next = NULL;
+ struct lpfc_scsi_buf *psb = NULL, *psb_next = NULL;
+ uint16_t i, lxri, xri_cnt, els_xri_cnt, scsi_xri_cnt;
+ LIST_HEAD(els_sgl_list);
+ LIST_HEAD(scsi_sgl_list);
+ int rc;
+ struct lpfc_sli_ring *pring = &phba->sli.ring[LPFC_ELS_RING];
+
+ /*
+ * update on pci function's els xri-sgl list
+ */
+ els_xri_cnt = lpfc_sli4_get_els_iocb_cnt(phba);
+ if (els_xri_cnt > phba->sli4_hba.els_xri_cnt) {
+ /* els xri-sgl expanded */
+ xri_cnt = els_xri_cnt - phba->sli4_hba.els_xri_cnt;
+ lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
+ "3157 ELS xri-sgl count increased from "
+ "%d to %d\n", phba->sli4_hba.els_xri_cnt,
+ els_xri_cnt);
+ /* allocate the additional els sgls */
+ for (i = 0; i < xri_cnt; i++) {
+ sglq_entry = kzalloc(sizeof(struct lpfc_sglq),
+ GFP_KERNEL);
+ if (sglq_entry == NULL) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
+ "2562 Failure to allocate an "
+ "ELS sgl entry:%d\n", i);
+ rc = -ENOMEM;
+ goto out_free_mem;
+ }
+ sglq_entry->buff_type = GEN_BUFF_TYPE;
+ sglq_entry->virt = lpfc_mbuf_alloc(phba, 0,
+ &sglq_entry->phys);
+ if (sglq_entry->virt == NULL) {
+ kfree(sglq_entry);
+ lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
+ "2563 Failure to allocate an "
+ "ELS mbuf:%d\n", i);
+ rc = -ENOMEM;
+ goto out_free_mem;
+ }
+ sglq_entry->sgl = sglq_entry->virt;
+ memset(sglq_entry->sgl, 0, LPFC_BPL_SIZE);
+ sglq_entry->state = SGL_FREED;
+ list_add_tail(&sglq_entry->list, &els_sgl_list);
+ }
+ spin_lock_irq(&phba->hbalock);
+ spin_lock(&pring->ring_lock);
+ list_splice_init(&els_sgl_list, &phba->sli4_hba.lpfc_sgl_list);
+ spin_unlock(&pring->ring_lock);
+ spin_unlock_irq(&phba->hbalock);
+ } else if (els_xri_cnt < phba->sli4_hba.els_xri_cnt) {
+ /* els xri-sgl shrinked */
+ xri_cnt = phba->sli4_hba.els_xri_cnt - els_xri_cnt;
+ lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
+ "3158 ELS xri-sgl count decreased from "
+ "%d to %d\n", phba->sli4_hba.els_xri_cnt,
+ els_xri_cnt);
+ spin_lock_irq(&phba->hbalock);
+ spin_lock(&pring->ring_lock);
+ list_splice_init(&phba->sli4_hba.lpfc_sgl_list, &els_sgl_list);
+ spin_unlock(&pring->ring_lock);
+ spin_unlock_irq(&phba->hbalock);
+ /* release extra els sgls from list */
+ for (i = 0; i < xri_cnt; i++) {
+ list_remove_head(&els_sgl_list,
+ sglq_entry, struct lpfc_sglq, list);
+ if (sglq_entry) {
+ lpfc_mbuf_free(phba, sglq_entry->virt,
+ sglq_entry->phys);
+ kfree(sglq_entry);
+ }
+ }
+ spin_lock_irq(&phba->hbalock);
+ spin_lock(&pring->ring_lock);
+ list_splice_init(&els_sgl_list, &phba->sli4_hba.lpfc_sgl_list);
+ spin_unlock(&pring->ring_lock);
+ spin_unlock_irq(&phba->hbalock);
+ } else
+ lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
+ "3163 ELS xri-sgl count unchanged: %d\n",
+ els_xri_cnt);
+ phba->sli4_hba.els_xri_cnt = els_xri_cnt;
+
+ /* update xris to els sgls on the list */
+ sglq_entry = NULL;
+ sglq_entry_next = NULL;
+ list_for_each_entry_safe(sglq_entry, sglq_entry_next,
+ &phba->sli4_hba.lpfc_sgl_list, list) {
+ lxri = lpfc_sli4_next_xritag(phba);
+ if (lxri == NO_XRI) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
+ "2400 Failed to allocate xri for "
+ "ELS sgl\n");
+ rc = -ENOMEM;
+ goto out_free_mem;
+ }
+ sglq_entry->sli4_lxritag = lxri;
+ sglq_entry->sli4_xritag = phba->sli4_hba.xri_ids[lxri];
+ }
+
+ /*
+ * update on pci function's allocated scsi xri-sgl list
+ */
+ phba->total_scsi_bufs = 0;
+
+ /* maximum number of xris available for scsi buffers */
+ phba->sli4_hba.scsi_xri_max = phba->sli4_hba.max_cfg_param.max_xri -
+ els_xri_cnt;
+
+ lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
+ "2401 Current allocated SCSI xri-sgl count:%d, "
+ "maximum SCSI xri count:%d\n",
+ phba->sli4_hba.scsi_xri_cnt,
+ phba->sli4_hba.scsi_xri_max);
+
+ spin_lock_irq(&phba->scsi_buf_list_get_lock);
+ spin_lock(&phba->scsi_buf_list_put_lock);
+ list_splice_init(&phba->lpfc_scsi_buf_list_get, &scsi_sgl_list);
+ list_splice(&phba->lpfc_scsi_buf_list_put, &scsi_sgl_list);
+ spin_unlock(&phba->scsi_buf_list_put_lock);
+ spin_unlock_irq(&phba->scsi_buf_list_get_lock);
+
+ if (phba->sli4_hba.scsi_xri_cnt > phba->sli4_hba.scsi_xri_max) {
+ /* max scsi xri shrinked below the allocated scsi buffers */
+ scsi_xri_cnt = phba->sli4_hba.scsi_xri_cnt -
+ phba->sli4_hba.scsi_xri_max;
+ /* release the extra allocated scsi buffers */
+ for (i = 0; i < scsi_xri_cnt; i++) {
+ list_remove_head(&scsi_sgl_list, psb,
+ struct lpfc_scsi_buf, list);
+ if (psb) {
+ pci_pool_free(phba->lpfc_scsi_dma_buf_pool,
+ psb->data, psb->dma_handle);
+ kfree(psb);
+ }
+ }
+ spin_lock_irq(&phba->scsi_buf_list_get_lock);
+ phba->sli4_hba.scsi_xri_cnt -= scsi_xri_cnt;
+ spin_unlock_irq(&phba->scsi_buf_list_get_lock);
+ }
+
+ /* update xris associated to remaining allocated scsi buffers */
+ psb = NULL;
+ psb_next = NULL;
+ list_for_each_entry_safe(psb, psb_next, &scsi_sgl_list, list) {
+ lxri = lpfc_sli4_next_xritag(phba);
+ if (lxri == NO_XRI) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
+ "2560 Failed to allocate xri for "
+ "scsi buffer\n");
+ rc = -ENOMEM;
+ goto out_free_mem;
+ }
+ psb->cur_iocbq.sli4_lxritag = lxri;
+ psb->cur_iocbq.sli4_xritag = phba->sli4_hba.xri_ids[lxri];
+ }
+ spin_lock_irq(&phba->scsi_buf_list_get_lock);
+ spin_lock(&phba->scsi_buf_list_put_lock);
+ list_splice_init(&scsi_sgl_list, &phba->lpfc_scsi_buf_list_get);
+ INIT_LIST_HEAD(&phba->lpfc_scsi_buf_list_put);
+ spin_unlock(&phba->scsi_buf_list_put_lock);
+ spin_unlock_irq(&phba->scsi_buf_list_get_lock);
+
+ return 0;
+
+out_free_mem:
+ lpfc_free_els_sgl_list(phba);
+ lpfc_scsi_free(phba);
+ return rc;
+}
+
+/**
+ * lpfc_create_port - Create an FC port
+ * @phba: pointer to lpfc hba data structure.
+ * @instance: a unique integer ID to this FC port.
+ * @dev: pointer to the device data structure.
+ *
+ * This routine creates a FC port for the upper layer protocol. The FC port
+ * can be created on top of either a physical port or a virtual port provided
+ * by the HBA. This routine also allocates a SCSI host data structure (shost)
+ * and associates the FC port created before adding the shost into the SCSI
+ * layer.
+ *
+ * Return codes
+ * @vport - pointer to the virtual N_Port data structure.
+ * NULL - port create failed.
+ **/
+struct lpfc_vport *
+lpfc_create_port(struct lpfc_hba *phba, int instance, struct device *dev)
+{
+ struct lpfc_vport *vport;
+ struct Scsi_Host *shost;
+ int error = 0;
+
+ if (dev != &phba->pcidev->dev) {
+ shost = scsi_host_alloc(&lpfc_vport_template,
+ sizeof(struct lpfc_vport));
+ } else {
+ if (phba->sli_rev == LPFC_SLI_REV4)
+ shost = scsi_host_alloc(&lpfc_template,
+ sizeof(struct lpfc_vport));
+ else
+ shost = scsi_host_alloc(&lpfc_template_s3,
+ sizeof(struct lpfc_vport));
+ }
+ if (!shost)
+ goto out;
+
+ vport = (struct lpfc_vport *) shost->hostdata;
+ vport->phba = phba;
+ vport->load_flag |= FC_LOADING;
+ vport->fc_flag |= FC_VPORT_NEEDS_REG_VPI;
+ vport->fc_rscn_flush = 0;
+
+ lpfc_get_vport_cfgparam(vport);
+ shost->unique_id = instance;
+ shost->max_id = LPFC_MAX_TARGET;
+ shost->max_lun = vport->cfg_max_luns;
+ shost->this_id = -1;
+ shost->max_cmd_len = 16;
+ if (phba->sli_rev == LPFC_SLI_REV4) {
+ shost->dma_boundary =
+ phba->sli4_hba.pc_sli4_params.sge_supp_len-1;
+ shost->sg_tablesize = phba->cfg_sg_seg_cnt;
+ }
+
+ /*
+ * Set initial can_queue value since 0 is no longer supported and
+ * scsi_add_host will fail. This will be adjusted later based on the
+ * max xri value determined in hba setup.
+ */
+ shost->can_queue = phba->cfg_hba_queue_depth - 10;
+ if (dev != &phba->pcidev->dev) {
+ shost->transportt = lpfc_vport_transport_template;
+ vport->port_type = LPFC_NPIV_PORT;
+ } else {
+ shost->transportt = lpfc_transport_template;
+ vport->port_type = LPFC_PHYSICAL_PORT;
+ }
+
+ /* Initialize all internally managed lists. */
+ INIT_LIST_HEAD(&vport->fc_nodes);
+ INIT_LIST_HEAD(&vport->rcv_buffer_list);
+ spin_lock_init(&vport->work_port_lock);
+
+ init_timer(&vport->fc_disctmo);
+ vport->fc_disctmo.function = lpfc_disc_timeout;
+ vport->fc_disctmo.data = (unsigned long)vport;
+
+ init_timer(&vport->fc_fdmitmo);
+ vport->fc_fdmitmo.function = lpfc_fdmi_tmo;
+ vport->fc_fdmitmo.data = (unsigned long)vport;
+
+ init_timer(&vport->els_tmofunc);
+ vport->els_tmofunc.function = lpfc_els_timeout;
+ vport->els_tmofunc.data = (unsigned long)vport;
+
+ init_timer(&vport->delayed_disc_tmo);
+ vport->delayed_disc_tmo.function = lpfc_delayed_disc_tmo;
+ vport->delayed_disc_tmo.data = (unsigned long)vport;
+
+ error = scsi_add_host_with_dma(shost, dev, &phba->pcidev->dev);
+ if (error)
+ goto out_put_shost;
+
+ spin_lock_irq(&phba->hbalock);
+ list_add_tail(&vport->listentry, &phba->port_list);
+ spin_unlock_irq(&phba->hbalock);
+ return vport;
+
+out_put_shost:
+ scsi_host_put(shost);
+out:
+ return NULL;
+}
+
+/**
+ * destroy_port - destroy an FC port
+ * @vport: pointer to an lpfc virtual N_Port data structure.
+ *
+ * This routine destroys a FC port from the upper layer protocol. All the
+ * resources associated with the port are released.
+ **/
+void
+destroy_port(struct lpfc_vport *vport)
+{
+ struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
+ struct lpfc_hba *phba = vport->phba;
+
+ lpfc_debugfs_terminate(vport);
+ fc_remove_host(shost);
+ scsi_remove_host(shost);
+
+ spin_lock_irq(&phba->hbalock);
+ list_del_init(&vport->listentry);
+ spin_unlock_irq(&phba->hbalock);
+
+ lpfc_cleanup(vport);
+ return;
+}
+
+/**
+ * lpfc_get_instance - Get a unique integer ID
+ *
+ * This routine allocates a unique integer ID from lpfc_hba_index pool. It
+ * uses the kernel idr facility to perform the task.
+ *
+ * Return codes:
+ * instance - a unique integer ID allocated as the new instance.
+ * -1 - lpfc get instance failed.
+ **/
+int
+lpfc_get_instance(void)
+{
+ int ret;
+
+ ret = idr_alloc(&lpfc_hba_index, NULL, 0, 0, GFP_KERNEL);
+ return ret < 0 ? -1 : ret;
+}
+
+/**
+ * lpfc_scan_finished - method for SCSI layer to detect whether scan is done
+ * @shost: pointer to SCSI host data structure.
+ * @time: elapsed time of the scan in jiffies.
+ *
+ * This routine is called by the SCSI layer with a SCSI host to determine
+ * whether the scan host is finished.
+ *
+ * Note: there is no scan_start function as adapter initialization will have
+ * asynchronously kicked off the link initialization.
+ *
+ * Return codes
+ * 0 - SCSI host scan is not over yet.
+ * 1 - SCSI host scan is over.
+ **/
+int lpfc_scan_finished(struct Scsi_Host *shost, unsigned long time)
+{
+ struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
+ struct lpfc_hba *phba = vport->phba;
+ int stat = 0;
+
+ spin_lock_irq(shost->host_lock);
+
+ if (vport->load_flag & FC_UNLOADING) {
+ stat = 1;
+ goto finished;
+ }
+ if (time >= msecs_to_jiffies(30 * 1000)) {
+ lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
+ "0461 Scanning longer than 30 "
+ "seconds. Continuing initialization\n");
+ stat = 1;
+ goto finished;
+ }
+ if (time >= msecs_to_jiffies(15 * 1000) &&
+ phba->link_state <= LPFC_LINK_DOWN) {
+ lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
+ "0465 Link down longer than 15 "
+ "seconds. Continuing initialization\n");
+ stat = 1;
+ goto finished;
+ }
+
+ if (vport->port_state != LPFC_VPORT_READY)
+ goto finished;
+ if (vport->num_disc_nodes || vport->fc_prli_sent)
+ goto finished;
+ if (vport->fc_map_cnt == 0 && time < msecs_to_jiffies(2 * 1000))
+ goto finished;
+ if ((phba->sli.sli_flag & LPFC_SLI_MBOX_ACTIVE) != 0)
+ goto finished;
+
+ stat = 1;
+
+finished:
+ spin_unlock_irq(shost->host_lock);
+ return stat;
+}
+
+/**
+ * lpfc_host_attrib_init - Initialize SCSI host attributes on a FC port
+ * @shost: pointer to SCSI host data structure.
+ *
+ * This routine initializes a given SCSI host attributes on a FC port. The
+ * SCSI host can be either on top of a physical port or a virtual port.
+ **/
+void lpfc_host_attrib_init(struct Scsi_Host *shost)
+{
+ struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
+ struct lpfc_hba *phba = vport->phba;
+ /*
+ * Set fixed host attributes. Must done after lpfc_sli_hba_setup().
+ */
+
+ fc_host_node_name(shost) = wwn_to_u64(vport->fc_nodename.u.wwn);
+ fc_host_port_name(shost) = wwn_to_u64(vport->fc_portname.u.wwn);
+ fc_host_supported_classes(shost) = FC_COS_CLASS3;
+
+ memset(fc_host_supported_fc4s(shost), 0,
+ sizeof(fc_host_supported_fc4s(shost)));
+ fc_host_supported_fc4s(shost)[2] = 1;
+ fc_host_supported_fc4s(shost)[7] = 1;
+
+ lpfc_vport_symbolic_node_name(vport, fc_host_symbolic_name(shost),
+ sizeof fc_host_symbolic_name(shost));
+
+ fc_host_supported_speeds(shost) = 0;
+ if (phba->lmt & LMT_16Gb)
+ fc_host_supported_speeds(shost) |= FC_PORTSPEED_16GBIT;
+ if (phba->lmt & LMT_10Gb)
+ fc_host_supported_speeds(shost) |= FC_PORTSPEED_10GBIT;
+ if (phba->lmt & LMT_8Gb)
+ fc_host_supported_speeds(shost) |= FC_PORTSPEED_8GBIT;
+ if (phba->lmt & LMT_4Gb)
+ fc_host_supported_speeds(shost) |= FC_PORTSPEED_4GBIT;
+ if (phba->lmt & LMT_2Gb)
+ fc_host_supported_speeds(shost) |= FC_PORTSPEED_2GBIT;
+ if (phba->lmt & LMT_1Gb)
+ fc_host_supported_speeds(shost) |= FC_PORTSPEED_1GBIT;
+
+ fc_host_maxframe_size(shost) =
+ (((uint32_t) vport->fc_sparam.cmn.bbRcvSizeMsb & 0x0F) << 8) |
+ (uint32_t) vport->fc_sparam.cmn.bbRcvSizeLsb;
+
+ fc_host_dev_loss_tmo(shost) = vport->cfg_devloss_tmo;
+
+ /* This value is also unchanging */
+ memset(fc_host_active_fc4s(shost), 0,
+ sizeof(fc_host_active_fc4s(shost)));
+ fc_host_active_fc4s(shost)[2] = 1;
+ fc_host_active_fc4s(shost)[7] = 1;
+
+ fc_host_max_npiv_vports(shost) = phba->max_vpi;
+ spin_lock_irq(shost->host_lock);
+ vport->load_flag &= ~FC_LOADING;
+ spin_unlock_irq(shost->host_lock);
+}
+
+/**
+ * lpfc_stop_port_s3 - Stop SLI3 device port
+ * @phba: pointer to lpfc hba data structure.
+ *
+ * This routine is invoked to stop an SLI3 device port, it stops the device
+ * from generating interrupts and stops the device driver's timers for the
+ * device.
+ **/
+static void
+lpfc_stop_port_s3(struct lpfc_hba *phba)
+{
+ /* Clear all interrupt enable conditions */
+ writel(0, phba->HCregaddr);
+ readl(phba->HCregaddr); /* flush */
+ /* Clear all pending interrupts */
+ writel(0xffffffff, phba->HAregaddr);
+ readl(phba->HAregaddr); /* flush */
+
+ /* Reset some HBA SLI setup states */
+ lpfc_stop_hba_timers(phba);
+ phba->pport->work_port_events = 0;
+}
+
+/**
+ * lpfc_stop_port_s4 - Stop SLI4 device port
+ * @phba: pointer to lpfc hba data structure.
+ *
+ * This routine is invoked to stop an SLI4 device port, it stops the device
+ * from generating interrupts and stops the device driver's timers for the
+ * device.
+ **/
+static void
+lpfc_stop_port_s4(struct lpfc_hba *phba)
+{
+ /* Reset some HBA SLI4 setup states */
+ lpfc_stop_hba_timers(phba);
+ phba->pport->work_port_events = 0;
+ phba->sli4_hba.intr_enable = 0;
+}
+
+/**
+ * lpfc_stop_port - Wrapper function for stopping hba port
+ * @phba: Pointer to HBA context object.
+ *
+ * This routine wraps the actual SLI3 or SLI4 hba stop port routine from
+ * the API jump table function pointer from the lpfc_hba struct.
+ **/
+void
+lpfc_stop_port(struct lpfc_hba *phba)
+{
+ phba->lpfc_stop_port(phba);
+}
+
+/**
+ * lpfc_fcf_redisc_wait_start_timer - Start fcf rediscover wait timer
+ * @phba: Pointer to hba for which this call is being executed.
+ *
+ * This routine starts the timer waiting for the FCF rediscovery to complete.
+ **/
+void
+lpfc_fcf_redisc_wait_start_timer(struct lpfc_hba *phba)
+{
+ unsigned long fcf_redisc_wait_tmo =
+ (jiffies + msecs_to_jiffies(LPFC_FCF_REDISCOVER_WAIT_TMO));
+ /* Start fcf rediscovery wait period timer */
+ mod_timer(&phba->fcf.redisc_wait, fcf_redisc_wait_tmo);
+ spin_lock_irq(&phba->hbalock);
+ /* Allow action to new fcf asynchronous event */
+ phba->fcf.fcf_flag &= ~(FCF_AVAILABLE | FCF_SCAN_DONE);
+ /* Mark the FCF rediscovery pending state */
+ phba->fcf.fcf_flag |= FCF_REDISC_PEND;
+ spin_unlock_irq(&phba->hbalock);
+}
+
+/**
+ * lpfc_sli4_fcf_redisc_wait_tmo - FCF table rediscover wait timeout
+ * @ptr: Map to lpfc_hba data structure pointer.
+ *
+ * This routine is invoked when waiting for FCF table rediscover has been
+ * timed out. If new FCF record(s) has (have) been discovered during the
+ * wait period, a new FCF event shall be added to the FCOE async event
+ * list, and then worker thread shall be waked up for processing from the
+ * worker thread context.
+ **/
+static void
+lpfc_sli4_fcf_redisc_wait_tmo(unsigned long ptr)
+{
+ struct lpfc_hba *phba = (struct lpfc_hba *)ptr;
+
+ /* Don't send FCF rediscovery event if timer cancelled */
+ spin_lock_irq(&phba->hbalock);
+ if (!(phba->fcf.fcf_flag & FCF_REDISC_PEND)) {
+ spin_unlock_irq(&phba->hbalock);
+ return;
+ }
+ /* Clear FCF rediscovery timer pending flag */
+ phba->fcf.fcf_flag &= ~FCF_REDISC_PEND;
+ /* FCF rediscovery event to worker thread */
+ phba->fcf.fcf_flag |= FCF_REDISC_EVT;
+ spin_unlock_irq(&phba->hbalock);
+ lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
+ "2776 FCF rediscover quiescent timer expired\n");
+ /* wake up worker thread */
+ lpfc_worker_wake_up(phba);
+}
+
+/**
+ * lpfc_sli4_parse_latt_fault - Parse sli4 link-attention link fault code
+ * @phba: pointer to lpfc hba data structure.
+ * @acqe_link: pointer to the async link completion queue entry.
+ *
+ * This routine is to parse the SLI4 link-attention link fault code and
+ * translate it into the base driver's read link attention mailbox command
+ * status.
+ *
+ * Return: Link-attention status in terms of base driver's coding.
+ **/
+static uint16_t
+lpfc_sli4_parse_latt_fault(struct lpfc_hba *phba,
+ struct lpfc_acqe_link *acqe_link)
+{
+ uint16_t latt_fault;
+
+ switch (bf_get(lpfc_acqe_link_fault, acqe_link)) {
+ case LPFC_ASYNC_LINK_FAULT_NONE:
+ case LPFC_ASYNC_LINK_FAULT_LOCAL:
+ case LPFC_ASYNC_LINK_FAULT_REMOTE:
+ latt_fault = 0;
+ break;
+ default:
+ lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+ "0398 Invalid link fault code: x%x\n",
+ bf_get(lpfc_acqe_link_fault, acqe_link));
+ latt_fault = MBXERR_ERROR;
+ break;
+ }
+ return latt_fault;
+}
+
+/**
+ * lpfc_sli4_parse_latt_type - Parse sli4 link attention type
+ * @phba: pointer to lpfc hba data structure.
+ * @acqe_link: pointer to the async link completion queue entry.
+ *
+ * This routine is to parse the SLI4 link attention type and translate it
+ * into the base driver's link attention type coding.
+ *
+ * Return: Link attention type in terms of base driver's coding.
+ **/
+static uint8_t
+lpfc_sli4_parse_latt_type(struct lpfc_hba *phba,
+ struct lpfc_acqe_link *acqe_link)
+{
+ uint8_t att_type;
+
+ switch (bf_get(lpfc_acqe_link_status, acqe_link)) {
+ case LPFC_ASYNC_LINK_STATUS_DOWN:
+ case LPFC_ASYNC_LINK_STATUS_LOGICAL_DOWN:
+ att_type = LPFC_ATT_LINK_DOWN;
+ break;
+ case LPFC_ASYNC_LINK_STATUS_UP:
+ /* Ignore physical link up events - wait for logical link up */
+ att_type = LPFC_ATT_RESERVED;
+ break;
+ case LPFC_ASYNC_LINK_STATUS_LOGICAL_UP:
+ att_type = LPFC_ATT_LINK_UP;
+ break;
+ default:
+ lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+ "0399 Invalid link attention type: x%x\n",
+ bf_get(lpfc_acqe_link_status, acqe_link));
+ att_type = LPFC_ATT_RESERVED;
+ break;
+ }
+ return att_type;
+}
+
+/**
+ * lpfc_sli4_parse_latt_link_speed - Parse sli4 link-attention link speed
+ * @phba: pointer to lpfc hba data structure.
+ * @acqe_link: pointer to the async link completion queue entry.
+ *
+ * This routine is to parse the SLI4 link-attention link speed and translate
+ * it into the base driver's link-attention link speed coding.
+ *
+ * Return: Link-attention link speed in terms of base driver's coding.
+ **/
+static uint8_t
+lpfc_sli4_parse_latt_link_speed(struct lpfc_hba *phba,
+ struct lpfc_acqe_link *acqe_link)
+{
+ uint8_t link_speed;
+
+ switch (bf_get(lpfc_acqe_link_speed, acqe_link)) {
+ case LPFC_ASYNC_LINK_SPEED_ZERO:
+ case LPFC_ASYNC_LINK_SPEED_10MBPS:
+ case LPFC_ASYNC_LINK_SPEED_100MBPS:
+ link_speed = LPFC_LINK_SPEED_UNKNOWN;
+ break;
+ case LPFC_ASYNC_LINK_SPEED_1GBPS:
+ link_speed = LPFC_LINK_SPEED_1GHZ;
+ break;
+ case LPFC_ASYNC_LINK_SPEED_10GBPS:
+ link_speed = LPFC_LINK_SPEED_10GHZ;
+ break;
+ case LPFC_ASYNC_LINK_SPEED_20GBPS:
+ case LPFC_ASYNC_LINK_SPEED_25GBPS:
+ case LPFC_ASYNC_LINK_SPEED_40GBPS:
+ link_speed = LPFC_LINK_SPEED_UNKNOWN;
+ break;
+ default:
+ lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+ "0483 Invalid link-attention link speed: x%x\n",
+ bf_get(lpfc_acqe_link_speed, acqe_link));
+ link_speed = LPFC_LINK_SPEED_UNKNOWN;
+ break;
+ }
+ return link_speed;
+}
+
+/**
+ * lpfc_sli_port_speed_get - Get sli3 link speed code to link speed
+ * @phba: pointer to lpfc hba data structure.
+ *
+ * This routine is to get an SLI3 FC port's link speed in Mbps.
+ *
+ * Return: link speed in terms of Mbps.
+ **/
+uint32_t
+lpfc_sli_port_speed_get(struct lpfc_hba *phba)
+{
+ uint32_t link_speed;
+
+ if (!lpfc_is_link_up(phba))
+ return 0;
+
+ switch (phba->fc_linkspeed) {
+ case LPFC_LINK_SPEED_1GHZ:
+ link_speed = 1000;
+ break;
+ case LPFC_LINK_SPEED_2GHZ:
+ link_speed = 2000;
+ break;
+ case LPFC_LINK_SPEED_4GHZ:
+ link_speed = 4000;
+ break;
+ case LPFC_LINK_SPEED_8GHZ:
+ link_speed = 8000;
+ break;
+ case LPFC_LINK_SPEED_10GHZ:
+ link_speed = 10000;
+ break;
+ case LPFC_LINK_SPEED_16GHZ:
+ link_speed = 16000;
+ break;
+ default:
+ link_speed = 0;
+ }
+ return link_speed;
+}
+
+/**
+ * lpfc_sli4_port_speed_parse - Parse async evt link speed code to link speed
+ * @phba: pointer to lpfc hba data structure.
+ * @evt_code: asynchronous event code.
+ * @speed_code: asynchronous event link speed code.
+ *
+ * This routine is to parse the giving SLI4 async event link speed code into
+ * value of Mbps for the link speed.
+ *
+ * Return: link speed in terms of Mbps.
+ **/
+static uint32_t
+lpfc_sli4_port_speed_parse(struct lpfc_hba *phba, uint32_t evt_code,
+ uint8_t speed_code)
+{
+ uint32_t port_speed;
+
+ switch (evt_code) {
+ case LPFC_TRAILER_CODE_LINK:
+ switch (speed_code) {
+ case LPFC_ASYNC_LINK_SPEED_ZERO:
+ port_speed = 0;
+ break;
+ case LPFC_ASYNC_LINK_SPEED_10MBPS:
+ port_speed = 10;
+ break;
+ case LPFC_ASYNC_LINK_SPEED_100MBPS:
+ port_speed = 100;
+ break;
+ case LPFC_ASYNC_LINK_SPEED_1GBPS:
+ port_speed = 1000;
+ break;
+ case LPFC_ASYNC_LINK_SPEED_10GBPS:
+ port_speed = 10000;
+ break;
+ case LPFC_ASYNC_LINK_SPEED_20GBPS:
+ port_speed = 20000;
+ break;
+ case LPFC_ASYNC_LINK_SPEED_25GBPS:
+ port_speed = 25000;
+ break;
+ case LPFC_ASYNC_LINK_SPEED_40GBPS:
+ port_speed = 40000;
+ break;
+ default:
+ port_speed = 0;
+ }
+ break;
+ case LPFC_TRAILER_CODE_FC:
+ switch (speed_code) {
+ case LPFC_FC_LA_SPEED_UNKNOWN:
+ port_speed = 0;
+ break;
+ case LPFC_FC_LA_SPEED_1G:
+ port_speed = 1000;
+ break;
+ case LPFC_FC_LA_SPEED_2G:
+ port_speed = 2000;
+ break;
+ case LPFC_FC_LA_SPEED_4G:
+ port_speed = 4000;
+ break;
+ case LPFC_FC_LA_SPEED_8G:
+ port_speed = 8000;
+ break;
+ case LPFC_FC_LA_SPEED_10G:
+ port_speed = 10000;
+ break;
+ case LPFC_FC_LA_SPEED_16G:
+ port_speed = 16000;
+ break;
+ default:
+ port_speed = 0;
+ }
+ break;
+ default:
+ port_speed = 0;
+ }
+ return port_speed;
+}
+
+/**
+ * lpfc_sli4_async_link_evt - Process the asynchronous FCoE link event
+ * @phba: pointer to lpfc hba data structure.
+ * @acqe_link: pointer to the async link completion queue entry.
+ *
+ * This routine is to handle the SLI4 asynchronous FCoE link event.
+ **/
+static void
+lpfc_sli4_async_link_evt(struct lpfc_hba *phba,
+ struct lpfc_acqe_link *acqe_link)
+{
+ struct lpfc_dmabuf *mp;
+ LPFC_MBOXQ_t *pmb;
+ MAILBOX_t *mb;
+ struct lpfc_mbx_read_top *la;
+ uint8_t att_type;
+ int rc;
+
+ att_type = lpfc_sli4_parse_latt_type(phba, acqe_link);
+ if (att_type != LPFC_ATT_LINK_DOWN && att_type != LPFC_ATT_LINK_UP)
+ return;
+ phba->fcoe_eventtag = acqe_link->event_tag;
+ pmb = (LPFC_MBOXQ_t *)mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
+ if (!pmb) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
+ "0395 The mboxq allocation failed\n");
+ return;
+ }
+ mp = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
+ if (!mp) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
+ "0396 The lpfc_dmabuf allocation failed\n");
+ goto out_free_pmb;
+ }
+ mp->virt = lpfc_mbuf_alloc(phba, 0, &mp->phys);
+ if (!mp->virt) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
+ "0397 The mbuf allocation failed\n");
+ goto out_free_dmabuf;
+ }
+
+ /* Cleanup any outstanding ELS commands */
+ lpfc_els_flush_all_cmd(phba);
+
+ /* Block ELS IOCBs until we have done process link event */
+ phba->sli.ring[LPFC_ELS_RING].flag |= LPFC_STOP_IOCB_EVENT;
+
+ /* Update link event statistics */
+ phba->sli.slistat.link_event++;
+
+ /* Create lpfc_handle_latt mailbox command from link ACQE */
+ lpfc_read_topology(phba, pmb, mp);
+ pmb->mbox_cmpl = lpfc_mbx_cmpl_read_topology;
+ pmb->vport = phba->pport;
+
+ /* Keep the link status for extra SLI4 state machine reference */
+ phba->sli4_hba.link_state.speed =
+ lpfc_sli4_port_speed_parse(phba, LPFC_TRAILER_CODE_LINK,
+ bf_get(lpfc_acqe_link_speed, acqe_link));
+ phba->sli4_hba.link_state.duplex =
+ bf_get(lpfc_acqe_link_duplex, acqe_link);
+ phba->sli4_hba.link_state.status =
+ bf_get(lpfc_acqe_link_status, acqe_link);
+ phba->sli4_hba.link_state.type =
+ bf_get(lpfc_acqe_link_type, acqe_link);
+ phba->sli4_hba.link_state.number =
+ bf_get(lpfc_acqe_link_number, acqe_link);
+ phba->sli4_hba.link_state.fault =
+ bf_get(lpfc_acqe_link_fault, acqe_link);
+ phba->sli4_hba.link_state.logical_speed =
+ bf_get(lpfc_acqe_logical_link_speed, acqe_link) * 10;
+
+ lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
+ "2900 Async FC/FCoE Link event - Speed:%dGBit "
+ "duplex:x%x LA Type:x%x Port Type:%d Port Number:%d "
+ "Logical speed:%dMbps Fault:%d\n",
+ phba->sli4_hba.link_state.speed,
+ phba->sli4_hba.link_state.topology,
+ phba->sli4_hba.link_state.status,
+ phba->sli4_hba.link_state.type,
+ phba->sli4_hba.link_state.number,
+ phba->sli4_hba.link_state.logical_speed,
+ phba->sli4_hba.link_state.fault);
+ /*
+ * For FC Mode: issue the READ_TOPOLOGY mailbox command to fetch
+ * topology info. Note: Optional for non FC-AL ports.
+ */
+ if (!(phba->hba_flag & HBA_FCOE_MODE)) {
+ rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT);
+ if (rc == MBX_NOT_FINISHED)
+ goto out_free_dmabuf;
+ return;
+ }
+ /*
+ * For FCoE Mode: fill in all the topology information we need and call
+ * the READ_TOPOLOGY completion routine to continue without actually
+ * sending the READ_TOPOLOGY mailbox command to the port.
+ */
+ /* Parse and translate status field */
+ mb = &pmb->u.mb;
+ mb->mbxStatus = lpfc_sli4_parse_latt_fault(phba, acqe_link);
+
+ /* Parse and translate link attention fields */
+ la = (struct lpfc_mbx_read_top *) &pmb->u.mb.un.varReadTop;
+ la->eventTag = acqe_link->event_tag;
+ bf_set(lpfc_mbx_read_top_att_type, la, att_type);
+ bf_set(lpfc_mbx_read_top_link_spd, la,
+ lpfc_sli4_parse_latt_link_speed(phba, acqe_link));
+
+ /* Fake the the following irrelvant fields */
+ bf_set(lpfc_mbx_read_top_topology, la, LPFC_TOPOLOGY_PT_PT);
+ bf_set(lpfc_mbx_read_top_alpa_granted, la, 0);
+ bf_set(lpfc_mbx_read_top_il, la, 0);
+ bf_set(lpfc_mbx_read_top_pb, la, 0);
+ bf_set(lpfc_mbx_read_top_fa, la, 0);
+ bf_set(lpfc_mbx_read_top_mm, la, 0);
+
+ /* Invoke the lpfc_handle_latt mailbox command callback function */
+ lpfc_mbx_cmpl_read_topology(phba, pmb);
+
+ return;
+
+out_free_dmabuf:
+ kfree(mp);
+out_free_pmb:
+ mempool_free(pmb, phba->mbox_mem_pool);
+}
+
+/**
+ * lpfc_sli4_async_fc_evt - Process the asynchronous FC link event
+ * @phba: pointer to lpfc hba data structure.
+ * @acqe_fc: pointer to the async fc completion queue entry.
+ *
+ * This routine is to handle the SLI4 asynchronous FC event. It will simply log
+ * that the event was received and then issue a read_topology mailbox command so
+ * that the rest of the driver will treat it the same as SLI3.
+ **/
+static void
+lpfc_sli4_async_fc_evt(struct lpfc_hba *phba, struct lpfc_acqe_fc_la *acqe_fc)
+{
+ struct lpfc_dmabuf *mp;
+ LPFC_MBOXQ_t *pmb;
+ int rc;
+
+ if (bf_get(lpfc_trailer_type, acqe_fc) !=
+ LPFC_FC_LA_EVENT_TYPE_FC_LINK) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
+ "2895 Non FC link Event detected.(%d)\n",
+ bf_get(lpfc_trailer_type, acqe_fc));
+ return;
+ }
+ /* Keep the link status for extra SLI4 state machine reference */
+ phba->sli4_hba.link_state.speed =
+ lpfc_sli4_port_speed_parse(phba, LPFC_TRAILER_CODE_FC,
+ bf_get(lpfc_acqe_fc_la_speed, acqe_fc));
+ phba->sli4_hba.link_state.duplex = LPFC_ASYNC_LINK_DUPLEX_FULL;
+ phba->sli4_hba.link_state.topology =
+ bf_get(lpfc_acqe_fc_la_topology, acqe_fc);
+ phba->sli4_hba.link_state.status =
+ bf_get(lpfc_acqe_fc_la_att_type, acqe_fc);
+ phba->sli4_hba.link_state.type =
+ bf_get(lpfc_acqe_fc_la_port_type, acqe_fc);
+ phba->sli4_hba.link_state.number =
+ bf_get(lpfc_acqe_fc_la_port_number, acqe_fc);
+ phba->sli4_hba.link_state.fault =
+ bf_get(lpfc_acqe_link_fault, acqe_fc);
+ phba->sli4_hba.link_state.logical_speed =
+ bf_get(lpfc_acqe_fc_la_llink_spd, acqe_fc) * 10;
+ lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
+ "2896 Async FC event - Speed:%dGBaud Topology:x%x "
+ "LA Type:x%x Port Type:%d Port Number:%d Logical speed:"
+ "%dMbps Fault:%d\n",
+ phba->sli4_hba.link_state.speed,
+ phba->sli4_hba.link_state.topology,
+ phba->sli4_hba.link_state.status,
+ phba->sli4_hba.link_state.type,
+ phba->sli4_hba.link_state.number,
+ phba->sli4_hba.link_state.logical_speed,
+ phba->sli4_hba.link_state.fault);
+ pmb = (LPFC_MBOXQ_t *)mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
+ if (!pmb) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
+ "2897 The mboxq allocation failed\n");
+ return;
+ }
+ mp = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
+ if (!mp) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
+ "2898 The lpfc_dmabuf allocation failed\n");
+ goto out_free_pmb;
+ }
+ mp->virt = lpfc_mbuf_alloc(phba, 0, &mp->phys);
+ if (!mp->virt) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
+ "2899 The mbuf allocation failed\n");
+ goto out_free_dmabuf;
+ }
+
+ /* Cleanup any outstanding ELS commands */
+ lpfc_els_flush_all_cmd(phba);
+
+ /* Block ELS IOCBs until we have done process link event */
+ phba->sli.ring[LPFC_ELS_RING].flag |= LPFC_STOP_IOCB_EVENT;
+
+ /* Update link event statistics */
+ phba->sli.slistat.link_event++;
+
+ /* Create lpfc_handle_latt mailbox command from link ACQE */
+ lpfc_read_topology(phba, pmb, mp);
+ pmb->mbox_cmpl = lpfc_mbx_cmpl_read_topology;
+ pmb->vport = phba->pport;
+
+ rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT);
+ if (rc == MBX_NOT_FINISHED)
+ goto out_free_dmabuf;
+ return;
+
+out_free_dmabuf:
+ kfree(mp);
+out_free_pmb:
+ mempool_free(pmb, phba->mbox_mem_pool);
+}
+
+/**
+ * lpfc_sli4_async_sli_evt - Process the asynchronous SLI link event
+ * @phba: pointer to lpfc hba data structure.
+ * @acqe_fc: pointer to the async SLI completion queue entry.
+ *
+ * This routine is to handle the SLI4 asynchronous SLI events.
+ **/
+static void
+lpfc_sli4_async_sli_evt(struct lpfc_hba *phba, struct lpfc_acqe_sli *acqe_sli)
+{
+ char port_name;
+ char message[128];
+ uint8_t status;
+ uint8_t evt_type;
+ struct temp_event temp_event_data;
+ struct lpfc_acqe_misconfigured_event *misconfigured;
+ struct Scsi_Host *shost;
+
+ evt_type = bf_get(lpfc_trailer_type, acqe_sli);
+
+ /* Special case Lancer */
+ if (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) !=
+ LPFC_SLI_INTF_IF_TYPE_2) {
+ lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
+ "2901 Async SLI event - Event Data1:x%08x Event Data2:"
+ "x%08x SLI Event Type:%d\n",
+ acqe_sli->event_data1, acqe_sli->event_data2,
+ evt_type);
+ return;
+ }
+
+ port_name = phba->Port[0];
+ if (port_name == 0x00)
+ port_name = '?'; /* get port name is empty */
+
+ switch (evt_type) {
+ case LPFC_SLI_EVENT_TYPE_OVER_TEMP:
+ temp_event_data.event_type = FC_REG_TEMPERATURE_EVENT;
+ temp_event_data.event_code = LPFC_THRESHOLD_TEMP;
+ temp_event_data.data = (uint32_t)acqe_sli->event_data1;
+
+ lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
+ "3190 Over Temperature:%d Celsius- Port Name %c\n",
+ acqe_sli->event_data1, port_name);
+
+ shost = lpfc_shost_from_vport(phba->pport);
+ fc_host_post_vendor_event(shost, fc_get_event_number(),
+ sizeof(temp_event_data),
+ (char *)&temp_event_data,
+ SCSI_NL_VID_TYPE_PCI
+ | PCI_VENDOR_ID_EMULEX);
+ break;
+ case LPFC_SLI_EVENT_TYPE_NORM_TEMP:
+ temp_event_data.event_type = FC_REG_TEMPERATURE_EVENT;
+ temp_event_data.event_code = LPFC_NORMAL_TEMP;
+ temp_event_data.data = (uint32_t)acqe_sli->event_data1;
+
+ lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
+ "3191 Normal Temperature:%d Celsius - Port Name %c\n",
+ acqe_sli->event_data1, port_name);
+
+ shost = lpfc_shost_from_vport(phba->pport);
+ fc_host_post_vendor_event(shost, fc_get_event_number(),
+ sizeof(temp_event_data),
+ (char *)&temp_event_data,
+ SCSI_NL_VID_TYPE_PCI
+ | PCI_VENDOR_ID_EMULEX);
+ break;
+ case LPFC_SLI_EVENT_TYPE_MISCONFIGURED:
+ misconfigured = (struct lpfc_acqe_misconfigured_event *)
+ &acqe_sli->event_data1;
+
+ /* fetch the status for this port */
+ switch (phba->sli4_hba.lnk_info.lnk_no) {
+ case LPFC_LINK_NUMBER_0:
+ status = bf_get(lpfc_sli_misconfigured_port0,
+ &misconfigured->theEvent);
+ break;
+ case LPFC_LINK_NUMBER_1:
+ status = bf_get(lpfc_sli_misconfigured_port1,
+ &misconfigured->theEvent);
+ break;
+ case LPFC_LINK_NUMBER_2:
+ status = bf_get(lpfc_sli_misconfigured_port2,
+ &misconfigured->theEvent);
+ break;
+ case LPFC_LINK_NUMBER_3:
+ status = bf_get(lpfc_sli_misconfigured_port3,
+ &misconfigured->theEvent);
+ break;
+ default:
+ status = ~LPFC_SLI_EVENT_STATUS_VALID;
+ break;
+ }
+
+ switch (status) {
+ case LPFC_SLI_EVENT_STATUS_VALID:
+ return; /* no message if the sfp is okay */
+ case LPFC_SLI_EVENT_STATUS_NOT_PRESENT:
+ sprintf(message, "Optics faulted/incorrectly "
+ "installed/not installed - Reseat optics, "
+ "if issue not resolved, replace.");
+ break;
+ case LPFC_SLI_EVENT_STATUS_WRONG_TYPE:
+ sprintf(message,
+ "Optics of two types installed - Remove one "
+ "optic or install matching pair of optics.");
+ break;
+ case LPFC_SLI_EVENT_STATUS_UNSUPPORTED:
+ sprintf(message, "Incompatible optics - Replace with "
+ "compatible optics for card to function.");
+ break;
+ default:
+ /* firmware is reporting a status we don't know about */
+ sprintf(message, "Unknown event status x%02x", status);
+ break;
+ }
+
+ lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
+ "3176 Misconfigured Physical Port - "
+ "Port Name %c %s\n", port_name, message);
+ break;
+ case LPFC_SLI_EVENT_TYPE_REMOTE_DPORT:
+ lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
+ "3192 Remote DPort Test Initiated - "
+ "Event Data1:x%08x Event Data2: x%08x\n",
+ acqe_sli->event_data1, acqe_sli->event_data2);
+ break;
+ default:
+ lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
+ "3193 Async SLI event - Event Data1:x%08x Event Data2:"
+ "x%08x SLI Event Type:%d\n",
+ acqe_sli->event_data1, acqe_sli->event_data2,
+ evt_type);
+ break;
+ }
+}
+
+/**
+ * lpfc_sli4_perform_vport_cvl - Perform clear virtual link on a vport
+ * @vport: pointer to vport data structure.
+ *
+ * This routine is to perform Clear Virtual Link (CVL) on a vport in
+ * response to a CVL event.
+ *
+ * Return the pointer to the ndlp with the vport if successful, otherwise
+ * return NULL.
+ **/
+static struct lpfc_nodelist *
+lpfc_sli4_perform_vport_cvl(struct lpfc_vport *vport)
+{
+ struct lpfc_nodelist *ndlp;
+ struct Scsi_Host *shost;
+ struct lpfc_hba *phba;
+
+ if (!vport)
+ return NULL;
+ phba = vport->phba;
+ if (!phba)
+ return NULL;
+ ndlp = lpfc_findnode_did(vport, Fabric_DID);
+ if (!ndlp) {
+ /* Cannot find existing Fabric ndlp, so allocate a new one */
+ ndlp = mempool_alloc(phba->nlp_mem_pool, GFP_KERNEL);
+ if (!ndlp)
+ return 0;
+ lpfc_nlp_init(vport, ndlp, Fabric_DID);
+ /* Set the node type */
+ ndlp->nlp_type |= NLP_FABRIC;
+ /* Put ndlp onto node list */
+ lpfc_enqueue_node(vport, ndlp);
+ } else if (!NLP_CHK_NODE_ACT(ndlp)) {
+ /* re-setup ndlp without removing from node list */
+ ndlp = lpfc_enable_node(vport, ndlp, NLP_STE_UNUSED_NODE);
+ if (!ndlp)
+ return 0;
+ }
+ if ((phba->pport->port_state < LPFC_FLOGI) &&
+ (phba->pport->port_state != LPFC_VPORT_FAILED))
+ return NULL;
+ /* If virtual link is not yet instantiated ignore CVL */
+ if ((vport != phba->pport) && (vport->port_state < LPFC_FDISC)
+ && (vport->port_state != LPFC_VPORT_FAILED))
+ return NULL;
+ shost = lpfc_shost_from_vport(vport);
+ if (!shost)
+ return NULL;
+ lpfc_linkdown_port(vport);
+ lpfc_cleanup_pending_mbox(vport);
+ spin_lock_irq(shost->host_lock);
+ vport->fc_flag |= FC_VPORT_CVL_RCVD;
+ spin_unlock_irq(shost->host_lock);
+
+ return ndlp;
+}
+
+/**
+ * lpfc_sli4_perform_all_vport_cvl - Perform clear virtual link on all vports
+ * @vport: pointer to lpfc hba data structure.
+ *
+ * This routine is to perform Clear Virtual Link (CVL) on all vports in
+ * response to a FCF dead event.
+ **/
+static void
+lpfc_sli4_perform_all_vport_cvl(struct lpfc_hba *phba)
+{
+ struct lpfc_vport **vports;
+ int i;
+
+ vports = lpfc_create_vport_work_array(phba);
+ if (vports)
+ for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++)
+ lpfc_sli4_perform_vport_cvl(vports[i]);
+ lpfc_destroy_vport_work_array(phba, vports);
+}
+
+/**
+ * lpfc_sli4_async_fip_evt - Process the asynchronous FCoE FIP event
+ * @phba: pointer to lpfc hba data structure.
+ * @acqe_link: pointer to the async fcoe completion queue entry.
+ *
+ * This routine is to handle the SLI4 asynchronous fcoe event.
+ **/
+static void
+lpfc_sli4_async_fip_evt(struct lpfc_hba *phba,
+ struct lpfc_acqe_fip *acqe_fip)
+{
+ uint8_t event_type = bf_get(lpfc_trailer_type, acqe_fip);
+ int rc;
+ struct lpfc_vport *vport;
+ struct lpfc_nodelist *ndlp;
+ struct Scsi_Host *shost;
+ int active_vlink_present;
+ struct lpfc_vport **vports;
+ int i;
+
+ phba->fc_eventTag = acqe_fip->event_tag;
+ phba->fcoe_eventtag = acqe_fip->event_tag;
+ switch (event_type) {
+ case LPFC_FIP_EVENT_TYPE_NEW_FCF:
+ case LPFC_FIP_EVENT_TYPE_FCF_PARAM_MOD:
+ if (event_type == LPFC_FIP_EVENT_TYPE_NEW_FCF)
+ lpfc_printf_log(phba, KERN_ERR, LOG_FIP |
+ LOG_DISCOVERY,
+ "2546 New FCF event, evt_tag:x%x, "
+ "index:x%x\n",
+ acqe_fip->event_tag,
+ acqe_fip->index);
+ else
+ lpfc_printf_log(phba, KERN_WARNING, LOG_FIP |
+ LOG_DISCOVERY,
+ "2788 FCF param modified event, "
+ "evt_tag:x%x, index:x%x\n",
+ acqe_fip->event_tag,
+ acqe_fip->index);
+ if (phba->fcf.fcf_flag & FCF_DISCOVERY) {
+ /*
+ * During period of FCF discovery, read the FCF
+ * table record indexed by the event to update
+ * FCF roundrobin failover eligible FCF bmask.
+ */
+ lpfc_printf_log(phba, KERN_INFO, LOG_FIP |
+ LOG_DISCOVERY,
+ "2779 Read FCF (x%x) for updating "
+ "roundrobin FCF failover bmask\n",
+ acqe_fip->index);
+ rc = lpfc_sli4_read_fcf_rec(phba, acqe_fip->index);
+ }
+
+ /* If the FCF discovery is in progress, do nothing. */
+ spin_lock_irq(&phba->hbalock);
+ if (phba->hba_flag & FCF_TS_INPROG) {
+ spin_unlock_irq(&phba->hbalock);
+ break;
+ }
+ /* If fast FCF failover rescan event is pending, do nothing */
+ if (phba->fcf.fcf_flag & FCF_REDISC_EVT) {
+ spin_unlock_irq(&phba->hbalock);
+ break;
+ }
+
+ /* If the FCF has been in discovered state, do nothing. */
+ if (phba->fcf.fcf_flag & FCF_SCAN_DONE) {
+ spin_unlock_irq(&phba->hbalock);
+ break;
+ }
+ spin_unlock_irq(&phba->hbalock);
+
+ /* Otherwise, scan the entire FCF table and re-discover SAN */
+ lpfc_printf_log(phba, KERN_INFO, LOG_FIP | LOG_DISCOVERY,
+ "2770 Start FCF table scan per async FCF "
+ "event, evt_tag:x%x, index:x%x\n",
+ acqe_fip->event_tag, acqe_fip->index);
+ rc = lpfc_sli4_fcf_scan_read_fcf_rec(phba,
+ LPFC_FCOE_FCF_GET_FIRST);
+ if (rc)
+ lpfc_printf_log(phba, KERN_ERR, LOG_FIP | LOG_DISCOVERY,
+ "2547 Issue FCF scan read FCF mailbox "
+ "command failed (x%x)\n", rc);
+ break;
+
+ case LPFC_FIP_EVENT_TYPE_FCF_TABLE_FULL:
+ lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
+ "2548 FCF Table full count 0x%x tag 0x%x\n",
+ bf_get(lpfc_acqe_fip_fcf_count, acqe_fip),
+ acqe_fip->event_tag);
+ break;
+
+ case LPFC_FIP_EVENT_TYPE_FCF_DEAD:
+ phba->fcoe_cvl_eventtag = acqe_fip->event_tag;
+ lpfc_printf_log(phba, KERN_ERR, LOG_FIP | LOG_DISCOVERY,
+ "2549 FCF (x%x) disconnected from network, "
+ "tag:x%x\n", acqe_fip->index, acqe_fip->event_tag);
+ /*
+ * If we are in the middle of FCF failover process, clear
+ * the corresponding FCF bit in the roundrobin bitmap.
+ */
+ spin_lock_irq(&phba->hbalock);
+ if (phba->fcf.fcf_flag & FCF_DISCOVERY) {
+ spin_unlock_irq(&phba->hbalock);
+ /* Update FLOGI FCF failover eligible FCF bmask */
+ lpfc_sli4_fcf_rr_index_clear(phba, acqe_fip->index);
+ break;
+ }
+ spin_unlock_irq(&phba->hbalock);
+
+ /* If the event is not for currently used fcf do nothing */
+ if (phba->fcf.current_rec.fcf_indx != acqe_fip->index)
+ break;
+
+ /*
+ * Otherwise, request the port to rediscover the entire FCF
+ * table for a fast recovery from case that the current FCF
+ * is no longer valid as we are not in the middle of FCF
+ * failover process already.
+ */
+ spin_lock_irq(&phba->hbalock);
+ /* Mark the fast failover process in progress */
+ phba->fcf.fcf_flag |= FCF_DEAD_DISC;
+ spin_unlock_irq(&phba->hbalock);
+
+ lpfc_printf_log(phba, KERN_INFO, LOG_FIP | LOG_DISCOVERY,
+ "2771 Start FCF fast failover process due to "
+ "FCF DEAD event: evt_tag:x%x, fcf_index:x%x "
+ "\n", acqe_fip->event_tag, acqe_fip->index);
+ rc = lpfc_sli4_redisc_fcf_table(phba);
+ if (rc) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_FIP |
+ LOG_DISCOVERY,
+ "2772 Issue FCF rediscover mabilbox "
+ "command failed, fail through to FCF "
+ "dead event\n");
+ spin_lock_irq(&phba->hbalock);
+ phba->fcf.fcf_flag &= ~FCF_DEAD_DISC;
+ spin_unlock_irq(&phba->hbalock);
+ /*
+ * Last resort will fail over by treating this
+ * as a link down to FCF registration.
+ */
+ lpfc_sli4_fcf_dead_failthrough(phba);
+ } else {
+ /* Reset FCF roundrobin bmask for new discovery */
+ lpfc_sli4_clear_fcf_rr_bmask(phba);
+ /*
+ * Handling fast FCF failover to a DEAD FCF event is
+ * considered equalivant to receiving CVL to all vports.
+ */
+ lpfc_sli4_perform_all_vport_cvl(phba);
+ }
+ break;
+ case LPFC_FIP_EVENT_TYPE_CVL:
+ phba->fcoe_cvl_eventtag = acqe_fip->event_tag;
+ lpfc_printf_log(phba, KERN_ERR, LOG_FIP | LOG_DISCOVERY,
+ "2718 Clear Virtual Link Received for VPI 0x%x"
+ " tag 0x%x\n", acqe_fip->index, acqe_fip->event_tag);
+
+ vport = lpfc_find_vport_by_vpid(phba,
+ acqe_fip->index);
+ ndlp = lpfc_sli4_perform_vport_cvl(vport);
+ if (!ndlp)
+ break;
+ active_vlink_present = 0;
+
+ vports = lpfc_create_vport_work_array(phba);
+ if (vports) {
+ for (i = 0; i <= phba->max_vports && vports[i] != NULL;
+ i++) {
+ if ((!(vports[i]->fc_flag &
+ FC_VPORT_CVL_RCVD)) &&
+ (vports[i]->port_state > LPFC_FDISC)) {
+ active_vlink_present = 1;
+ break;
+ }
+ }
+ lpfc_destroy_vport_work_array(phba, vports);
+ }
+
+ if (active_vlink_present) {
+ /*
+ * If there are other active VLinks present,
+ * re-instantiate the Vlink using FDISC.
+ */
+ mod_timer(&ndlp->nlp_delayfunc,
+ jiffies + msecs_to_jiffies(1000));
+ shost = lpfc_shost_from_vport(vport);
+ spin_lock_irq(shost->host_lock);
+ ndlp->nlp_flag |= NLP_DELAY_TMO;
+ spin_unlock_irq(shost->host_lock);
+ ndlp->nlp_last_elscmd = ELS_CMD_FDISC;
+ vport->port_state = LPFC_FDISC;
+ } else {
+ /*
+ * Otherwise, we request port to rediscover
+ * the entire FCF table for a fast recovery
+ * from possible case that the current FCF
+ * is no longer valid if we are not already
+ * in the FCF failover process.
+ */
+ spin_lock_irq(&phba->hbalock);
+ if (phba->fcf.fcf_flag & FCF_DISCOVERY) {
+ spin_unlock_irq(&phba->hbalock);
+ break;
+ }
+ /* Mark the fast failover process in progress */
+ phba->fcf.fcf_flag |= FCF_ACVL_DISC;
+ spin_unlock_irq(&phba->hbalock);
+ lpfc_printf_log(phba, KERN_INFO, LOG_FIP |
+ LOG_DISCOVERY,
+ "2773 Start FCF failover per CVL, "
+ "evt_tag:x%x\n", acqe_fip->event_tag);
+ rc = lpfc_sli4_redisc_fcf_table(phba);
+ if (rc) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_FIP |
+ LOG_DISCOVERY,
+ "2774 Issue FCF rediscover "
+ "mabilbox command failed, "
+ "through to CVL event\n");
+ spin_lock_irq(&phba->hbalock);
+ phba->fcf.fcf_flag &= ~FCF_ACVL_DISC;
+ spin_unlock_irq(&phba->hbalock);
+ /*
+ * Last resort will be re-try on the
+ * the current registered FCF entry.
+ */
+ lpfc_retry_pport_discovery(phba);
+ } else
+ /*
+ * Reset FCF roundrobin bmask for new
+ * discovery.
+ */
+ lpfc_sli4_clear_fcf_rr_bmask(phba);
+ }
+ break;
+ default:
+ lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
+ "0288 Unknown FCoE event type 0x%x event tag "
+ "0x%x\n", event_type, acqe_fip->event_tag);
+ break;
+ }
+}
+
+/**
+ * lpfc_sli4_async_dcbx_evt - Process the asynchronous dcbx event
+ * @phba: pointer to lpfc hba data structure.
+ * @acqe_link: pointer to the async dcbx completion queue entry.
+ *
+ * This routine is to handle the SLI4 asynchronous dcbx event.
+ **/
+static void
+lpfc_sli4_async_dcbx_evt(struct lpfc_hba *phba,
+ struct lpfc_acqe_dcbx *acqe_dcbx)
+{
+ phba->fc_eventTag = acqe_dcbx->event_tag;
+ lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
+ "0290 The SLI4 DCBX asynchronous event is not "
+ "handled yet\n");
+}
+
+/**
+ * lpfc_sli4_async_grp5_evt - Process the asynchronous group5 event
+ * @phba: pointer to lpfc hba data structure.
+ * @acqe_link: pointer to the async grp5 completion queue entry.
+ *
+ * This routine is to handle the SLI4 asynchronous grp5 event. A grp5 event
+ * is an asynchronous notified of a logical link speed change. The Port
+ * reports the logical link speed in units of 10Mbps.
+ **/
+static void
+lpfc_sli4_async_grp5_evt(struct lpfc_hba *phba,
+ struct lpfc_acqe_grp5 *acqe_grp5)
+{
+ uint16_t prev_ll_spd;
+
+ phba->fc_eventTag = acqe_grp5->event_tag;
+ phba->fcoe_eventtag = acqe_grp5->event_tag;
+ prev_ll_spd = phba->sli4_hba.link_state.logical_speed;
+ phba->sli4_hba.link_state.logical_speed =
+ (bf_get(lpfc_acqe_grp5_llink_spd, acqe_grp5)) * 10;
+ lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
+ "2789 GRP5 Async Event: Updating logical link speed "
+ "from %dMbps to %dMbps\n", prev_ll_spd,
+ phba->sli4_hba.link_state.logical_speed);
+}
+
+/**
+ * lpfc_sli4_async_event_proc - Process all the pending asynchronous event
+ * @phba: pointer to lpfc hba data structure.
+ *
+ * This routine is invoked by the worker thread to process all the pending
+ * SLI4 asynchronous events.
+ **/
+void lpfc_sli4_async_event_proc(struct lpfc_hba *phba)
+{
+ struct lpfc_cq_event *cq_event;
+
+ /* First, declare the async event has been handled */
+ spin_lock_irq(&phba->hbalock);
+ phba->hba_flag &= ~ASYNC_EVENT;
+ spin_unlock_irq(&phba->hbalock);
+ /* Now, handle all the async events */
+ while (!list_empty(&phba->sli4_hba.sp_asynce_work_queue)) {
+ /* Get the first event from the head of the event queue */
+ spin_lock_irq(&phba->hbalock);
+ list_remove_head(&phba->sli4_hba.sp_asynce_work_queue,
+ cq_event, struct lpfc_cq_event, list);
+ spin_unlock_irq(&phba->hbalock);
+ /* Process the asynchronous event */
+ switch (bf_get(lpfc_trailer_code, &cq_event->cqe.mcqe_cmpl)) {
+ case LPFC_TRAILER_CODE_LINK:
+ lpfc_sli4_async_link_evt(phba,
+ &cq_event->cqe.acqe_link);
+ break;
+ case LPFC_TRAILER_CODE_FCOE:
+ lpfc_sli4_async_fip_evt(phba, &cq_event->cqe.acqe_fip);
+ break;
+ case LPFC_TRAILER_CODE_DCBX:
+ lpfc_sli4_async_dcbx_evt(phba,
+ &cq_event->cqe.acqe_dcbx);
+ break;
+ case LPFC_TRAILER_CODE_GRP5:
+ lpfc_sli4_async_grp5_evt(phba,
+ &cq_event->cqe.acqe_grp5);
+ break;
+ case LPFC_TRAILER_CODE_FC:
+ lpfc_sli4_async_fc_evt(phba, &cq_event->cqe.acqe_fc);
+ break;
+ case LPFC_TRAILER_CODE_SLI:
+ lpfc_sli4_async_sli_evt(phba, &cq_event->cqe.acqe_sli);
+ break;
+ default:
+ lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
+ "1804 Invalid asynchrous event code: "
+ "x%x\n", bf_get(lpfc_trailer_code,
+ &cq_event->cqe.mcqe_cmpl));
+ break;
+ }
+ /* Free the completion event processed to the free pool */
+ lpfc_sli4_cq_event_release(phba, cq_event);
+ }
+}
+
+/**
+ * lpfc_sli4_fcf_redisc_event_proc - Process fcf table rediscovery event
+ * @phba: pointer to lpfc hba data structure.
+ *
+ * This routine is invoked by the worker thread to process FCF table
+ * rediscovery pending completion event.
+ **/
+void lpfc_sli4_fcf_redisc_event_proc(struct lpfc_hba *phba)
+{
+ int rc;
+
+ spin_lock_irq(&phba->hbalock);
+ /* Clear FCF rediscovery timeout event */
+ phba->fcf.fcf_flag &= ~FCF_REDISC_EVT;
+ /* Clear driver fast failover FCF record flag */
+ phba->fcf.failover_rec.flag = 0;
+ /* Set state for FCF fast failover */
+ phba->fcf.fcf_flag |= FCF_REDISC_FOV;
+ spin_unlock_irq(&phba->hbalock);
+
+ /* Scan FCF table from the first entry to re-discover SAN */
+ lpfc_printf_log(phba, KERN_INFO, LOG_FIP | LOG_DISCOVERY,
+ "2777 Start post-quiescent FCF table scan\n");
+ rc = lpfc_sli4_fcf_scan_read_fcf_rec(phba, LPFC_FCOE_FCF_GET_FIRST);
+ if (rc)
+ lpfc_printf_log(phba, KERN_ERR, LOG_FIP | LOG_DISCOVERY,
+ "2747 Issue FCF scan read FCF mailbox "
+ "command failed 0x%x\n", rc);
+}
+
+/**
+ * lpfc_api_table_setup - Set up per hba pci-device group func api jump table
+ * @phba: pointer to lpfc hba data structure.
+ * @dev_grp: The HBA PCI-Device group number.
+ *
+ * This routine is invoked to set up the per HBA PCI-Device group function
+ * API jump table entries.
+ *
+ * Return: 0 if success, otherwise -ENODEV
+ **/
+int
+lpfc_api_table_setup(struct lpfc_hba *phba, uint8_t dev_grp)
+{
+ int rc;
+
+ /* Set up lpfc PCI-device group */
+ phba->pci_dev_grp = dev_grp;
+
+ /* The LPFC_PCI_DEV_OC uses SLI4 */
+ if (dev_grp == LPFC_PCI_DEV_OC)
+ phba->sli_rev = LPFC_SLI_REV4;
+
+ /* Set up device INIT API function jump table */
+ rc = lpfc_init_api_table_setup(phba, dev_grp);
+ if (rc)
+ return -ENODEV;
+ /* Set up SCSI API function jump table */
+ rc = lpfc_scsi_api_table_setup(phba, dev_grp);
+ if (rc)
+ return -ENODEV;
+ /* Set up SLI API function jump table */
+ rc = lpfc_sli_api_table_setup(phba, dev_grp);
+ if (rc)
+ return -ENODEV;
+ /* Set up MBOX API function jump table */
+ rc = lpfc_mbox_api_table_setup(phba, dev_grp);
+ if (rc)
+ return -ENODEV;
+
+ return 0;
+}
+
+/**
+ * lpfc_log_intr_mode - Log the active interrupt mode
+ * @phba: pointer to lpfc hba data structure.
+ * @intr_mode: active interrupt mode adopted.
+ *
+ * This routine it invoked to log the currently used active interrupt mode
+ * to the device.
+ **/
+static void lpfc_log_intr_mode(struct lpfc_hba *phba, uint32_t intr_mode)
+{
+ switch (intr_mode) {
+ case 0:
+ lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
+ "0470 Enable INTx interrupt mode.\n");
+ break;
+ case 1:
+ lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
+ "0481 Enabled MSI interrupt mode.\n");
+ break;
+ case 2:
+ lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
+ "0480 Enabled MSI-X interrupt mode.\n");
+ break;
+ default:
+ lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+ "0482 Illegal interrupt mode.\n");
+ break;
+ }
+ return;
+}
+
+/**
+ * lpfc_enable_pci_dev - Enable a generic PCI device.
+ * @phba: pointer to lpfc hba data structure.
+ *
+ * This routine is invoked to enable the PCI device that is common to all
+ * PCI devices.
+ *
+ * Return codes
+ * 0 - successful
+ * other values - error
+ **/
+static int
+lpfc_enable_pci_dev(struct lpfc_hba *phba)
+{
+ struct pci_dev *pdev;
+ int bars = 0;
+
+ /* Obtain PCI device reference */
+ if (!phba->pcidev)
+ goto out_error;
+ else
+ pdev = phba->pcidev;
+ /* Select PCI BARs */
+ bars = pci_select_bars(pdev, IORESOURCE_MEM);
+ /* Enable PCI device */
+ if (pci_enable_device_mem(pdev))
+ goto out_error;
+ /* Request PCI resource for the device */
+ if (pci_request_selected_regions(pdev, bars, LPFC_DRIVER_NAME))
+ goto out_disable_device;
+ /* Set up device as PCI master and save state for EEH */
+ pci_set_master(pdev);
+ pci_try_set_mwi(pdev);
+ pci_save_state(pdev);
+
+ /* PCIe EEH recovery on powerpc platforms needs fundamental reset */
+ if (pci_is_pcie(pdev))
+ pdev->needs_freset = 1;
+
+ return 0;
+
+out_disable_device:
+ pci_disable_device(pdev);
+out_error:
+ lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+ "1401 Failed to enable pci device, bars:x%x\n", bars);
+ return -ENODEV;
+}
+
+/**
+ * lpfc_disable_pci_dev - Disable a generic PCI device.
+ * @phba: pointer to lpfc hba data structure.
+ *
+ * This routine is invoked to disable the PCI device that is common to all
+ * PCI devices.
+ **/
+static void
+lpfc_disable_pci_dev(struct lpfc_hba *phba)
+{
+ struct pci_dev *pdev;
+ int bars;
+
+ /* Obtain PCI device reference */
+ if (!phba->pcidev)
+ return;
+ else
+ pdev = phba->pcidev;
+ /* Select PCI BARs */
+ bars = pci_select_bars(pdev, IORESOURCE_MEM);
+ /* Release PCI resource and disable PCI device */
+ pci_release_selected_regions(pdev, bars);
+ pci_disable_device(pdev);
+
+ return;
+}
+
+/**
+ * lpfc_reset_hba - Reset a hba
+ * @phba: pointer to lpfc hba data structure.
+ *
+ * This routine is invoked to reset a hba device. It brings the HBA
+ * offline, performs a board restart, and then brings the board back
+ * online. The lpfc_offline calls lpfc_sli_hba_down which will clean up
+ * on outstanding mailbox commands.
+ **/
+void
+lpfc_reset_hba(struct lpfc_hba *phba)
+{
+ /* If resets are disabled then set error state and return. */
+ if (!phba->cfg_enable_hba_reset) {
+ phba->link_state = LPFC_HBA_ERROR;
+ return;
+ }
+ if (phba->sli.sli_flag & LPFC_SLI_ACTIVE)
+ lpfc_offline_prep(phba, LPFC_MBX_WAIT);
+ else
+ lpfc_offline_prep(phba, LPFC_MBX_NO_WAIT);
+ lpfc_offline(phba);
+ lpfc_sli_brdrestart(phba);
+ lpfc_online(phba);
+ lpfc_unblock_mgmt_io(phba);
+}
+
+/**
+ * lpfc_sli_sriov_nr_virtfn_get - Get the number of sr-iov virtual functions
+ * @phba: pointer to lpfc hba data structure.
+ *
+ * This function enables the PCI SR-IOV virtual functions to a physical
+ * function. It invokes the PCI SR-IOV api with the @nr_vfn provided to
+ * enable the number of virtual functions to the physical function. As
+ * not all devices support SR-IOV, the return code from the pci_enable_sriov()
+ * API call does not considered as an error condition for most of the device.
+ **/
+uint16_t
+lpfc_sli_sriov_nr_virtfn_get(struct lpfc_hba *phba)
+{
+ struct pci_dev *pdev = phba->pcidev;
+ uint16_t nr_virtfn;
+ int pos;
+
+ pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_SRIOV);
+ if (pos == 0)
+ return 0;
+
+ pci_read_config_word(pdev, pos + PCI_SRIOV_TOTAL_VF, &nr_virtfn);
+ return nr_virtfn;
+}
+
+/**
+ * lpfc_sli_probe_sriov_nr_virtfn - Enable a number of sr-iov virtual functions
+ * @phba: pointer to lpfc hba data structure.
+ * @nr_vfn: number of virtual functions to be enabled.
+ *
+ * This function enables the PCI SR-IOV virtual functions to a physical
+ * function. It invokes the PCI SR-IOV api with the @nr_vfn provided to
+ * enable the number of virtual functions to the physical function. As
+ * not all devices support SR-IOV, the return code from the pci_enable_sriov()
+ * API call does not considered as an error condition for most of the device.
+ **/
+int
+lpfc_sli_probe_sriov_nr_virtfn(struct lpfc_hba *phba, int nr_vfn)
+{
+ struct pci_dev *pdev = phba->pcidev;
+ uint16_t max_nr_vfn;
+ int rc;
+
+ max_nr_vfn = lpfc_sli_sriov_nr_virtfn_get(phba);
+ if (nr_vfn > max_nr_vfn) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+ "3057 Requested vfs (%d) greater than "
+ "supported vfs (%d)", nr_vfn, max_nr_vfn);
+ return -EINVAL;
+ }
+
+ rc = pci_enable_sriov(pdev, nr_vfn);
+ if (rc) {
+ lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
+ "2806 Failed to enable sriov on this device "
+ "with vfn number nr_vf:%d, rc:%d\n",
+ nr_vfn, rc);
+ } else
+ lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
+ "2807 Successful enable sriov on this device "
+ "with vfn number nr_vf:%d\n", nr_vfn);
+ return rc;
+}
+
+/**
+ * lpfc_sli_driver_resource_setup - Setup driver internal resources for SLI3 dev.
+ * @phba: pointer to lpfc hba data structure.
+ *
+ * This routine is invoked to set up the driver internal resources specific to
+ * support the SLI-3 HBA device it attached to.
+ *
+ * Return codes
+ * 0 - successful
+ * other values - error
+ **/
+static int
+lpfc_sli_driver_resource_setup(struct lpfc_hba *phba)
+{
+ struct lpfc_sli *psli;
+ int rc;
+
+ /*
+ * Initialize timers used by driver
+ */
+
+ /* Heartbeat timer */
+ init_timer(&phba->hb_tmofunc);
+ phba->hb_tmofunc.function = lpfc_hb_timeout;
+ phba->hb_tmofunc.data = (unsigned long)phba;
+
+ psli = &phba->sli;
+ /* MBOX heartbeat timer */
+ init_timer(&psli->mbox_tmo);
+ psli->mbox_tmo.function = lpfc_mbox_timeout;
+ psli->mbox_tmo.data = (unsigned long) phba;
+ /* FCP polling mode timer */
+ init_timer(&phba->fcp_poll_timer);
+ phba->fcp_poll_timer.function = lpfc_poll_timeout;
+ phba->fcp_poll_timer.data = (unsigned long) phba;
+ /* Fabric block timer */
+ init_timer(&phba->fabric_block_timer);
+ phba->fabric_block_timer.function = lpfc_fabric_block_timeout;
+ phba->fabric_block_timer.data = (unsigned long) phba;
+ /* EA polling mode timer */
+ init_timer(&phba->eratt_poll);
+ phba->eratt_poll.function = lpfc_poll_eratt;
+ phba->eratt_poll.data = (unsigned long) phba;
+
+ /* Host attention work mask setup */
+ phba->work_ha_mask = (HA_ERATT | HA_MBATT | HA_LATT);
+ phba->work_ha_mask |= (HA_RXMASK << (LPFC_ELS_RING * 4));
+
+ /* Get all the module params for configuring this host */
+ lpfc_get_cfgparam(phba);
+ if (phba->pcidev->device == PCI_DEVICE_ID_HORNET) {
+ phba->menlo_flag |= HBA_MENLO_SUPPORT;
+ /* check for menlo minimum sg count */
+ if (phba->cfg_sg_seg_cnt < LPFC_DEFAULT_MENLO_SG_SEG_CNT)
+ phba->cfg_sg_seg_cnt = LPFC_DEFAULT_MENLO_SG_SEG_CNT;
+ }
+
+ if (!phba->sli.ring)
+ phba->sli.ring = (struct lpfc_sli_ring *)
+ kzalloc(LPFC_SLI3_MAX_RING *
+ sizeof(struct lpfc_sli_ring), GFP_KERNEL);
+ if (!phba->sli.ring)
+ return -ENOMEM;
+
+ /*
+ * Since lpfc_sg_seg_cnt is module parameter, the sg_dma_buf_size
+ * used to create the sg_dma_buf_pool must be dynamically calculated.
+ */
+
+ /* Initialize the host templates the configured values. */
+ lpfc_vport_template.sg_tablesize = phba->cfg_sg_seg_cnt;
+ lpfc_template.sg_tablesize = phba->cfg_sg_seg_cnt;
+
+ /* There are going to be 2 reserved BDEs: 1 FCP cmnd + 1 FCP rsp */
+ if (phba->cfg_enable_bg) {
+ /*
+ * The scsi_buf for a T10-DIF I/O will hold the FCP cmnd,
+ * the FCP rsp, and a BDE for each. Sice we have no control
+ * over how many protection data segments the SCSI Layer
+ * will hand us (ie: there could be one for every block
+ * in the IO), we just allocate enough BDEs to accomidate
+ * our max amount and we need to limit lpfc_sg_seg_cnt to
+ * minimize the risk of running out.
+ */
+ phba->cfg_sg_dma_buf_size = sizeof(struct fcp_cmnd) +
+ sizeof(struct fcp_rsp) +
+ (LPFC_MAX_SG_SEG_CNT * sizeof(struct ulp_bde64));
+
+ if (phba->cfg_sg_seg_cnt > LPFC_MAX_SG_SEG_CNT_DIF)
+ phba->cfg_sg_seg_cnt = LPFC_MAX_SG_SEG_CNT_DIF;
+
+ /* Total BDEs in BPL for scsi_sg_list and scsi_sg_prot_list */
+ phba->cfg_total_seg_cnt = LPFC_MAX_SG_SEG_CNT;
+ } else {
+ /*
+ * The scsi_buf for a regular I/O will hold the FCP cmnd,
+ * the FCP rsp, a BDE for each, and a BDE for up to
+ * cfg_sg_seg_cnt data segments.
+ */
+ phba->cfg_sg_dma_buf_size = sizeof(struct fcp_cmnd) +
+ sizeof(struct fcp_rsp) +
+ ((phba->cfg_sg_seg_cnt + 2) * sizeof(struct ulp_bde64));
+
+ /* Total BDEs in BPL for scsi_sg_list */
+ phba->cfg_total_seg_cnt = phba->cfg_sg_seg_cnt + 2;
+ }
+
+ lpfc_printf_log(phba, KERN_INFO, LOG_INIT | LOG_FCP,
+ "9088 sg_tablesize:%d dmabuf_size:%d total_bde:%d\n",
+ phba->cfg_sg_seg_cnt, phba->cfg_sg_dma_buf_size,
+ phba->cfg_total_seg_cnt);
+
+ phba->max_vpi = LPFC_MAX_VPI;
+ /* This will be set to correct value after config_port mbox */
+ phba->max_vports = 0;
+
+ /*
+ * Initialize the SLI Layer to run with lpfc HBAs.
+ */
+ lpfc_sli_setup(phba);
+ lpfc_sli_queue_setup(phba);
+
+ /* Allocate device driver memory */
+ if (lpfc_mem_alloc(phba, BPL_ALIGN_SZ))
+ return -ENOMEM;
+
+ /*
+ * Enable sr-iov virtual functions if supported and configured
+ * through the module parameter.
+ */
+ if (phba->cfg_sriov_nr_virtfn > 0) {
+ rc = lpfc_sli_probe_sriov_nr_virtfn(phba,
+ phba->cfg_sriov_nr_virtfn);
+ if (rc) {
+ lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
+ "2808 Requested number of SR-IOV "
+ "virtual functions (%d) is not "
+ "supported\n",
+ phba->cfg_sriov_nr_virtfn);
+ phba->cfg_sriov_nr_virtfn = 0;
+ }
+ }
+
+ return 0;
+}
+
+/**
+ * lpfc_sli_driver_resource_unset - Unset drvr internal resources for SLI3 dev
+ * @phba: pointer to lpfc hba data structure.
+ *
+ * This routine is invoked to unset the driver internal resources set up
+ * specific for supporting the SLI-3 HBA device it attached to.
+ **/
+static void
+lpfc_sli_driver_resource_unset(struct lpfc_hba *phba)
+{
+ /* Free device driver memory allocated */
+ lpfc_mem_free_all(phba);
+
+ return;
+}
+
+/**
+ * lpfc_sli4_driver_resource_setup - Setup drvr internal resources for SLI4 dev
+ * @phba: pointer to lpfc hba data structure.
+ *
+ * This routine is invoked to set up the driver internal resources specific to
+ * support the SLI-4 HBA device it attached to.
+ *
+ * Return codes
+ * 0 - successful
+ * other values - error
+ **/
+static int
+lpfc_sli4_driver_resource_setup(struct lpfc_hba *phba)
+{
+ struct lpfc_vector_map_info *cpup;
+ struct lpfc_sli *psli;
+ LPFC_MBOXQ_t *mboxq;
+ int rc, i, hbq_count, max_buf_size;
+ uint8_t pn_page[LPFC_MAX_SUPPORTED_PAGES] = {0};
+ struct lpfc_mqe *mqe;
+ int longs;
+ int fof_vectors = 0;
+
+ /* Get all the module params for configuring this host */
+ lpfc_get_cfgparam(phba);
+
+ /* Before proceed, wait for POST done and device ready */
+ rc = lpfc_sli4_post_status_check(phba);
+ if (rc)
+ return -ENODEV;
+
+ /*
+ * Initialize timers used by driver
+ */
+
+ /* Heartbeat timer */
+ init_timer(&phba->hb_tmofunc);
+ phba->hb_tmofunc.function = lpfc_hb_timeout;
+ phba->hb_tmofunc.data = (unsigned long)phba;
+ init_timer(&phba->rrq_tmr);
+ phba->rrq_tmr.function = lpfc_rrq_timeout;
+ phba->rrq_tmr.data = (unsigned long)phba;
+
+ psli = &phba->sli;
+ /* MBOX heartbeat timer */
+ init_timer(&psli->mbox_tmo);
+ psli->mbox_tmo.function = lpfc_mbox_timeout;
+ psli->mbox_tmo.data = (unsigned long) phba;
+ /* Fabric block timer */
+ init_timer(&phba->fabric_block_timer);
+ phba->fabric_block_timer.function = lpfc_fabric_block_timeout;
+ phba->fabric_block_timer.data = (unsigned long) phba;
+ /* EA polling mode timer */
+ init_timer(&phba->eratt_poll);
+ phba->eratt_poll.function = lpfc_poll_eratt;
+ phba->eratt_poll.data = (unsigned long) phba;
+ /* FCF rediscover timer */
+ init_timer(&phba->fcf.redisc_wait);
+ phba->fcf.redisc_wait.function = lpfc_sli4_fcf_redisc_wait_tmo;
+ phba->fcf.redisc_wait.data = (unsigned long)phba;
+
+ /*
+ * Control structure for handling external multi-buffer mailbox
+ * command pass-through.
+ */
+ memset((uint8_t *)&phba->mbox_ext_buf_ctx, 0,
+ sizeof(struct lpfc_mbox_ext_buf_ctx));
+ INIT_LIST_HEAD(&phba->mbox_ext_buf_ctx.ext_dmabuf_list);
+
+ phba->max_vpi = LPFC_MAX_VPI;
+
+ /* This will be set to correct value after the read_config mbox */
+ phba->max_vports = 0;
+
+ /* Program the default value of vlan_id and fc_map */
+ phba->valid_vlan = 0;
+ phba->fc_map[0] = LPFC_FCOE_FCF_MAP0;
+ phba->fc_map[1] = LPFC_FCOE_FCF_MAP1;
+ phba->fc_map[2] = LPFC_FCOE_FCF_MAP2;
+
+ /*
+ * For SLI4, instead of using ring 0 (LPFC_FCP_RING) for FCP commands
+ * we will associate a new ring, for each FCP fastpath EQ/CQ/WQ tuple.
+ */
+ if (!phba->sli.ring)
+ phba->sli.ring = kzalloc(
+ (LPFC_SLI3_MAX_RING + phba->cfg_fcp_io_channel) *
+ sizeof(struct lpfc_sli_ring), GFP_KERNEL);
+ if (!phba->sli.ring)
+ return -ENOMEM;
+
+ /*
+ * It doesn't matter what family our adapter is in, we are
+ * limited to 2 Pages, 512 SGEs, for our SGL.
+ * There are going to be 2 reserved SGEs: 1 FCP cmnd + 1 FCP rsp
+ */
+ max_buf_size = (2 * SLI4_PAGE_SIZE);
+ if (phba->cfg_sg_seg_cnt > LPFC_MAX_SGL_SEG_CNT - 2)
+ phba->cfg_sg_seg_cnt = LPFC_MAX_SGL_SEG_CNT - 2;
+
+ /*
+ * Since lpfc_sg_seg_cnt is module parameter, the sg_dma_buf_size
+ * used to create the sg_dma_buf_pool must be dynamically calculated.
+ */
+
+ if (phba->cfg_enable_bg) {
+ /*
+ * The scsi_buf for a T10-DIF I/O will hold the FCP cmnd,
+ * the FCP rsp, and a SGE for each. Sice we have no control
+ * over how many protection data segments the SCSI Layer
+ * will hand us (ie: there could be one for every block
+ * in the IO), we just allocate enough SGEs to accomidate
+ * our max amount and we need to limit lpfc_sg_seg_cnt to
+ * minimize the risk of running out.
+ */
+ phba->cfg_sg_dma_buf_size = sizeof(struct fcp_cmnd) +
+ sizeof(struct fcp_rsp) + max_buf_size;
+
+ /* Total SGEs for scsi_sg_list and scsi_sg_prot_list */
+ phba->cfg_total_seg_cnt = LPFC_MAX_SGL_SEG_CNT;
+
+ if (phba->cfg_sg_seg_cnt > LPFC_MAX_SG_SLI4_SEG_CNT_DIF)
+ phba->cfg_sg_seg_cnt = LPFC_MAX_SG_SLI4_SEG_CNT_DIF;
+ } else {
+ /*
+ * The scsi_buf for a regular I/O will hold the FCP cmnd,
+ * the FCP rsp, a SGE for each, and a SGE for up to
+ * cfg_sg_seg_cnt data segments.
+ */
+ phba->cfg_sg_dma_buf_size = sizeof(struct fcp_cmnd) +
+ sizeof(struct fcp_rsp) +
+ ((phba->cfg_sg_seg_cnt + 2) * sizeof(struct sli4_sge));
+
+ /* Total SGEs for scsi_sg_list */
+ phba->cfg_total_seg_cnt = phba->cfg_sg_seg_cnt + 2;
+ /*
+ * NOTE: if (phba->cfg_sg_seg_cnt + 2) <= 256 we only need
+ * to post 1 page for the SGL.
+ */
+ }
+
+ /* Initialize the host templates with the updated values. */
+ lpfc_vport_template.sg_tablesize = phba->cfg_sg_seg_cnt;
+ lpfc_template.sg_tablesize = phba->cfg_sg_seg_cnt;
+
+ if (phba->cfg_sg_dma_buf_size <= LPFC_MIN_SG_SLI4_BUF_SZ)
+ phba->cfg_sg_dma_buf_size = LPFC_MIN_SG_SLI4_BUF_SZ;
+ else
+ phba->cfg_sg_dma_buf_size =
+ SLI4_PAGE_ALIGN(phba->cfg_sg_dma_buf_size);
+
+ lpfc_printf_log(phba, KERN_INFO, LOG_INIT | LOG_FCP,
+ "9087 sg_tablesize:%d dmabuf_size:%d total_sge:%d\n",
+ phba->cfg_sg_seg_cnt, phba->cfg_sg_dma_buf_size,
+ phba->cfg_total_seg_cnt);
+
+ /* Initialize buffer queue management fields */
+ hbq_count = lpfc_sli_hbq_count();
+ for (i = 0; i < hbq_count; ++i)
+ INIT_LIST_HEAD(&phba->hbqs[i].hbq_buffer_list);
+ INIT_LIST_HEAD(&phba->rb_pend_list);
+ phba->hbqs[LPFC_ELS_HBQ].hbq_alloc_buffer = lpfc_sli4_rb_alloc;
+ phba->hbqs[LPFC_ELS_HBQ].hbq_free_buffer = lpfc_sli4_rb_free;
+
+ /*
+ * Initialize the SLI Layer to run with lpfc SLI4 HBAs.
+ */
+ /* Initialize the Abort scsi buffer list used by driver */
+ spin_lock_init(&phba->sli4_hba.abts_scsi_buf_list_lock);
+ INIT_LIST_HEAD(&phba->sli4_hba.lpfc_abts_scsi_buf_list);
+ /* This abort list used by worker thread */
+ spin_lock_init(&phba->sli4_hba.abts_sgl_list_lock);
+
+ /*
+ * Initialize driver internal slow-path work queues
+ */
+
+ /* Driver internel slow-path CQ Event pool */
+ INIT_LIST_HEAD(&phba->sli4_hba.sp_cqe_event_pool);
+ /* Response IOCB work queue list */
+ INIT_LIST_HEAD(&phba->sli4_hba.sp_queue_event);
+ /* Asynchronous event CQ Event work queue list */
+ INIT_LIST_HEAD(&phba->sli4_hba.sp_asynce_work_queue);
+ /* Fast-path XRI aborted CQ Event work queue list */
+ INIT_LIST_HEAD(&phba->sli4_hba.sp_fcp_xri_aborted_work_queue);
+ /* Slow-path XRI aborted CQ Event work queue list */
+ INIT_LIST_HEAD(&phba->sli4_hba.sp_els_xri_aborted_work_queue);
+ /* Receive queue CQ Event work queue list */
+ INIT_LIST_HEAD(&phba->sli4_hba.sp_unsol_work_queue);
+
+ /* Initialize extent block lists. */
+ INIT_LIST_HEAD(&phba->sli4_hba.lpfc_rpi_blk_list);
+ INIT_LIST_HEAD(&phba->sli4_hba.lpfc_xri_blk_list);
+ INIT_LIST_HEAD(&phba->sli4_hba.lpfc_vfi_blk_list);
+ INIT_LIST_HEAD(&phba->lpfc_vpi_blk_list);
+
+ /* Initialize the driver internal SLI layer lists. */
+ lpfc_sli_setup(phba);
+ lpfc_sli_queue_setup(phba);
+
+ /* Allocate device driver memory */
+ rc = lpfc_mem_alloc(phba, SGL_ALIGN_SZ);
+ if (rc)
+ return -ENOMEM;
+
+ /* IF Type 2 ports get initialized now. */
+ if (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) ==
+ LPFC_SLI_INTF_IF_TYPE_2) {
+ rc = lpfc_pci_function_reset(phba);
+ if (unlikely(rc))
+ return -ENODEV;
+ phba->temp_sensor_support = 1;
+ }
+
+ /* Create the bootstrap mailbox command */
+ rc = lpfc_create_bootstrap_mbox(phba);
+ if (unlikely(rc))
+ goto out_free_mem;
+
+ /* Set up the host's endian order with the device. */
+ rc = lpfc_setup_endian_order(phba);
+ if (unlikely(rc))
+ goto out_free_bsmbx;
+
+ /* Set up the hba's configuration parameters. */
+ rc = lpfc_sli4_read_config(phba);
+ if (unlikely(rc))
+ goto out_free_bsmbx;
+ rc = lpfc_mem_alloc_active_rrq_pool_s4(phba);
+ if (unlikely(rc))
+ goto out_free_bsmbx;
+
+ /* IF Type 0 ports get initialized now. */
+ if (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) ==
+ LPFC_SLI_INTF_IF_TYPE_0) {
+ rc = lpfc_pci_function_reset(phba);
+ if (unlikely(rc))
+ goto out_free_bsmbx;
+ }
+
+ mboxq = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool,
+ GFP_KERNEL);
+ if (!mboxq) {
+ rc = -ENOMEM;
+ goto out_free_bsmbx;
+ }
+
+ /* Get the Supported Pages if PORT_CAPABILITIES is supported by port. */
+ lpfc_supported_pages(mboxq);
+ rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
+ if (!rc) {
+ mqe = &mboxq->u.mqe;
+ memcpy(&pn_page[0], ((uint8_t *)&mqe->un.supp_pages.word3),
+ LPFC_MAX_SUPPORTED_PAGES);
+ for (i = 0; i < LPFC_MAX_SUPPORTED_PAGES; i++) {
+ switch (pn_page[i]) {
+ case LPFC_SLI4_PARAMETERS:
+ phba->sli4_hba.pc_sli4_params.supported = 1;
+ break;
+ default:
+ break;
+ }
+ }
+ /* Read the port's SLI4 Parameters capabilities if supported. */
+ if (phba->sli4_hba.pc_sli4_params.supported)
+ rc = lpfc_pc_sli4_params_get(phba, mboxq);
+ if (rc) {
+ mempool_free(mboxq, phba->mbox_mem_pool);
+ rc = -EIO;
+ goto out_free_bsmbx;
+ }
+ }
+ /*
+ * Get sli4 parameters that override parameters from Port capabilities.
+ * If this call fails, it isn't critical unless the SLI4 parameters come
+ * back in conflict.
+ */
+ rc = lpfc_get_sli4_parameters(phba, mboxq);
+ if (rc) {
+ if (phba->sli4_hba.extents_in_use &&
+ phba->sli4_hba.rpi_hdrs_in_use) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+ "2999 Unsupported SLI4 Parameters "
+ "Extents and RPI headers enabled.\n");
+ goto out_free_bsmbx;
+ }
+ }
+ mempool_free(mboxq, phba->mbox_mem_pool);
+
+ /* Verify OAS is supported */
+ lpfc_sli4_oas_verify(phba);
+ if (phba->cfg_fof)
+ fof_vectors = 1;
+
+ /* Verify all the SLI4 queues */
+ rc = lpfc_sli4_queue_verify(phba);
+ if (rc)
+ goto out_free_bsmbx;
+
+ /* Create driver internal CQE event pool */
+ rc = lpfc_sli4_cq_event_pool_create(phba);
+ if (rc)
+ goto out_free_bsmbx;
+
+ /* Initialize sgl lists per host */
+ lpfc_init_sgl_list(phba);
+
+ /* Allocate and initialize active sgl array */
+ rc = lpfc_init_active_sgl_array(phba);
+ if (rc) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+ "1430 Failed to initialize sgl list.\n");
+ goto out_destroy_cq_event_pool;
+ }
+ rc = lpfc_sli4_init_rpi_hdrs(phba);
+ if (rc) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+ "1432 Failed to initialize rpi headers.\n");
+ goto out_free_active_sgl;
+ }
+
+ /* Allocate eligible FCF bmask memory for FCF roundrobin failover */
+ longs = (LPFC_SLI4_FCF_TBL_INDX_MAX + BITS_PER_LONG - 1)/BITS_PER_LONG;
+ phba->fcf.fcf_rr_bmask = kzalloc(longs * sizeof(unsigned long),
+ GFP_KERNEL);
+ if (!phba->fcf.fcf_rr_bmask) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+ "2759 Failed allocate memory for FCF round "
+ "robin failover bmask\n");
+ rc = -ENOMEM;
+ goto out_remove_rpi_hdrs;
+ }
+
+ phba->sli4_hba.fcp_eq_hdl =
+ kzalloc((sizeof(struct lpfc_fcp_eq_hdl) *
+ (fof_vectors + phba->cfg_fcp_io_channel)),
+ GFP_KERNEL);
+ if (!phba->sli4_hba.fcp_eq_hdl) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+ "2572 Failed allocate memory for "
+ "fast-path per-EQ handle array\n");
+ rc = -ENOMEM;
+ goto out_free_fcf_rr_bmask;
+ }
+
+ phba->sli4_hba.msix_entries = kzalloc((sizeof(struct msix_entry) *
+ (fof_vectors +
+ phba->cfg_fcp_io_channel)), GFP_KERNEL);
+ if (!phba->sli4_hba.msix_entries) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+ "2573 Failed allocate memory for msi-x "
+ "interrupt vector entries\n");
+ rc = -ENOMEM;
+ goto out_free_fcp_eq_hdl;
+ }
+
+ phba->sli4_hba.cpu_map = kzalloc((sizeof(struct lpfc_vector_map_info) *
+ phba->sli4_hba.num_present_cpu),
+ GFP_KERNEL);
+ if (!phba->sli4_hba.cpu_map) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+ "3327 Failed allocate memory for msi-x "
+ "interrupt vector mapping\n");
+ rc = -ENOMEM;
+ goto out_free_msix;
+ }
+ if (lpfc_used_cpu == NULL) {
+ lpfc_used_cpu = kzalloc((sizeof(uint16_t) * lpfc_present_cpu),
+ GFP_KERNEL);
+ if (!lpfc_used_cpu) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+ "3335 Failed allocate memory for msi-x "
+ "interrupt vector mapping\n");
+ kfree(phba->sli4_hba.cpu_map);
+ rc = -ENOMEM;
+ goto out_free_msix;
+ }
+ for (i = 0; i < lpfc_present_cpu; i++)
+ lpfc_used_cpu[i] = LPFC_VECTOR_MAP_EMPTY;
+ }
+
+ /* Initialize io channels for round robin */
+ cpup = phba->sli4_hba.cpu_map;
+ rc = 0;
+ for (i = 0; i < phba->sli4_hba.num_present_cpu; i++) {
+ cpup->channel_id = rc;
+ rc++;
+ if (rc >= phba->cfg_fcp_io_channel)
+ rc = 0;
+ }
+
+ /*
+ * Enable sr-iov virtual functions if supported and configured
+ * through the module parameter.
+ */
+ if (phba->cfg_sriov_nr_virtfn > 0) {
+ rc = lpfc_sli_probe_sriov_nr_virtfn(phba,
+ phba->cfg_sriov_nr_virtfn);
+ if (rc) {
+ lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
+ "3020 Requested number of SR-IOV "
+ "virtual functions (%d) is not "
+ "supported\n",
+ phba->cfg_sriov_nr_virtfn);
+ phba->cfg_sriov_nr_virtfn = 0;
+ }
+ }
+
+ return 0;
+
+out_free_msix:
+ kfree(phba->sli4_hba.msix_entries);
+out_free_fcp_eq_hdl:
+ kfree(phba->sli4_hba.fcp_eq_hdl);
+out_free_fcf_rr_bmask:
+ kfree(phba->fcf.fcf_rr_bmask);
+out_remove_rpi_hdrs:
+ lpfc_sli4_remove_rpi_hdrs(phba);
+out_free_active_sgl:
+ lpfc_free_active_sgl(phba);
+out_destroy_cq_event_pool:
+ lpfc_sli4_cq_event_pool_destroy(phba);
+out_free_bsmbx:
+ lpfc_destroy_bootstrap_mbox(phba);
+out_free_mem:
+ lpfc_mem_free(phba);
+ return rc;
+}
+
+/**
+ * lpfc_sli4_driver_resource_unset - Unset drvr internal resources for SLI4 dev
+ * @phba: pointer to lpfc hba data structure.
+ *
+ * This routine is invoked to unset the driver internal resources set up
+ * specific for supporting the SLI-4 HBA device it attached to.
+ **/
+static void
+lpfc_sli4_driver_resource_unset(struct lpfc_hba *phba)
+{
+ struct lpfc_fcf_conn_entry *conn_entry, *next_conn_entry;
+
+ /* Free memory allocated for msi-x interrupt vector to CPU mapping */
+ kfree(phba->sli4_hba.cpu_map);
+ phba->sli4_hba.num_present_cpu = 0;
+ phba->sli4_hba.num_online_cpu = 0;
+ phba->sli4_hba.curr_disp_cpu = 0;
+
+ /* Free memory allocated for msi-x interrupt vector entries */
+ kfree(phba->sli4_hba.msix_entries);
+
+ /* Free memory allocated for fast-path work queue handles */
+ kfree(phba->sli4_hba.fcp_eq_hdl);
+
+ /* Free the allocated rpi headers. */
+ lpfc_sli4_remove_rpi_hdrs(phba);
+ lpfc_sli4_remove_rpis(phba);
+
+ /* Free eligible FCF index bmask */
+ kfree(phba->fcf.fcf_rr_bmask);
+
+ /* Free the ELS sgl list */
+ lpfc_free_active_sgl(phba);
+ lpfc_free_els_sgl_list(phba);
+
+ /* Free the completion queue EQ event pool */
+ lpfc_sli4_cq_event_release_all(phba);
+ lpfc_sli4_cq_event_pool_destroy(phba);
+
+ /* Release resource identifiers. */
+ lpfc_sli4_dealloc_resource_identifiers(phba);
+
+ /* Free the bsmbx region. */
+ lpfc_destroy_bootstrap_mbox(phba);
+
+ /* Free the SLI Layer memory with SLI4 HBAs */
+ lpfc_mem_free_all(phba);
+
+ /* Free the current connect table */
+ list_for_each_entry_safe(conn_entry, next_conn_entry,
+ &phba->fcf_conn_rec_list, list) {
+ list_del_init(&conn_entry->list);
+ kfree(conn_entry);
+ }
+
+ return;
+}
+
+/**
+ * lpfc_init_api_table_setup - Set up init api function jump table
+ * @phba: The hba struct for which this call is being executed.
+ * @dev_grp: The HBA PCI-Device group number.
+ *
+ * This routine sets up the device INIT interface API function jump table
+ * in @phba struct.
+ *
+ * Returns: 0 - success, -ENODEV - failure.
+ **/
+int
+lpfc_init_api_table_setup(struct lpfc_hba *phba, uint8_t dev_grp)
+{
+ phba->lpfc_hba_init_link = lpfc_hba_init_link;
+ phba->lpfc_hba_down_link = lpfc_hba_down_link;
+ phba->lpfc_selective_reset = lpfc_selective_reset;
+ switch (dev_grp) {
+ case LPFC_PCI_DEV_LP:
+ phba->lpfc_hba_down_post = lpfc_hba_down_post_s3;
+ phba->lpfc_handle_eratt = lpfc_handle_eratt_s3;
+ phba->lpfc_stop_port = lpfc_stop_port_s3;
+ break;
+ case LPFC_PCI_DEV_OC:
+ phba->lpfc_hba_down_post = lpfc_hba_down_post_s4;
+ phba->lpfc_handle_eratt = lpfc_handle_eratt_s4;
+ phba->lpfc_stop_port = lpfc_stop_port_s4;
+ break;
+ default:
+ lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+ "1431 Invalid HBA PCI-device group: 0x%x\n",
+ dev_grp);
+ return -ENODEV;
+ break;
+ }
+ return 0;
+}
+
+/**
+ * lpfc_setup_driver_resource_phase1 - Phase1 etup driver internal resources.
+ * @phba: pointer to lpfc hba data structure.
+ *
+ * This routine is invoked to set up the driver internal resources before the
+ * device specific resource setup to support the HBA device it attached to.
+ *
+ * Return codes
+ * 0 - successful
+ * other values - error
+ **/
+static int
+lpfc_setup_driver_resource_phase1(struct lpfc_hba *phba)
+{
+ /*
+ * Driver resources common to all SLI revisions
+ */
+ atomic_set(&phba->fast_event_count, 0);
+ spin_lock_init(&phba->hbalock);
+
+ /* Initialize ndlp management spinlock */
+ spin_lock_init(&phba->ndlp_lock);
+
+ INIT_LIST_HEAD(&phba->port_list);
+ INIT_LIST_HEAD(&phba->work_list);
+ init_waitqueue_head(&phba->wait_4_mlo_m_q);
+
+ /* Initialize the wait queue head for the kernel thread */
+ init_waitqueue_head(&phba->work_waitq);
+
+ /* Initialize the scsi buffer list used by driver for scsi IO */
+ spin_lock_init(&phba->scsi_buf_list_get_lock);
+ INIT_LIST_HEAD(&phba->lpfc_scsi_buf_list_get);
+ spin_lock_init(&phba->scsi_buf_list_put_lock);
+ INIT_LIST_HEAD(&phba->lpfc_scsi_buf_list_put);
+
+ /* Initialize the fabric iocb list */
+ INIT_LIST_HEAD(&phba->fabric_iocb_list);
+
+ /* Initialize list to save ELS buffers */
+ INIT_LIST_HEAD(&phba->elsbuf);
+
+ /* Initialize FCF connection rec list */
+ INIT_LIST_HEAD(&phba->fcf_conn_rec_list);
+
+ /* Initialize OAS configuration list */
+ spin_lock_init(&phba->devicelock);
+ INIT_LIST_HEAD(&phba->luns);
+
+ return 0;
+}
+
+/**
+ * lpfc_setup_driver_resource_phase2 - Phase2 setup driver internal resources.
+ * @phba: pointer to lpfc hba data structure.
+ *
+ * This routine is invoked to set up the driver internal resources after the
+ * device specific resource setup to support the HBA device it attached to.
+ *
+ * Return codes
+ * 0 - successful
+ * other values - error
+ **/
+static int
+lpfc_setup_driver_resource_phase2(struct lpfc_hba *phba)
+{
+ int error;
+
+ /* Startup the kernel thread for this host adapter. */
+ phba->worker_thread = kthread_run(lpfc_do_work, phba,
+ "lpfc_worker_%d", phba->brd_no);
+ if (IS_ERR(phba->worker_thread)) {
+ error = PTR_ERR(phba->worker_thread);
+ return error;
+ }
+
+ return 0;
+}
+
+/**
+ * lpfc_unset_driver_resource_phase2 - Phase2 unset driver internal resources.
+ * @phba: pointer to lpfc hba data structure.
+ *
+ * This routine is invoked to unset the driver internal resources set up after
+ * the device specific resource setup for supporting the HBA device it
+ * attached to.
+ **/
+static void
+lpfc_unset_driver_resource_phase2(struct lpfc_hba *phba)
+{
+ /* Stop kernel worker thread */
+ kthread_stop(phba->worker_thread);
+}
+
+/**
+ * lpfc_free_iocb_list - Free iocb list.
+ * @phba: pointer to lpfc hba data structure.
+ *
+ * This routine is invoked to free the driver's IOCB list and memory.
+ **/
+static void
+lpfc_free_iocb_list(struct lpfc_hba *phba)
+{
+ struct lpfc_iocbq *iocbq_entry = NULL, *iocbq_next = NULL;
+
+ spin_lock_irq(&phba->hbalock);
+ list_for_each_entry_safe(iocbq_entry, iocbq_next,
+ &phba->lpfc_iocb_list, list) {
+ list_del(&iocbq_entry->list);
+ kfree(iocbq_entry);
+ phba->total_iocbq_bufs--;
+ }
+ spin_unlock_irq(&phba->hbalock);
+
+ return;
+}
+
+/**
+ * lpfc_init_iocb_list - Allocate and initialize iocb list.
+ * @phba: pointer to lpfc hba data structure.
+ *
+ * This routine is invoked to allocate and initizlize the driver's IOCB
+ * list and set up the IOCB tag array accordingly.
+ *
+ * Return codes
+ * 0 - successful
+ * other values - error
+ **/
+static int
+lpfc_init_iocb_list(struct lpfc_hba *phba, int iocb_count)
+{
+ struct lpfc_iocbq *iocbq_entry = NULL;
+ uint16_t iotag;
+ int i;
+
+ /* Initialize and populate the iocb list per host. */
+ INIT_LIST_HEAD(&phba->lpfc_iocb_list);
+ for (i = 0; i < iocb_count; i++) {
+ iocbq_entry = kzalloc(sizeof(struct lpfc_iocbq), GFP_KERNEL);
+ if (iocbq_entry == NULL) {
+ printk(KERN_ERR "%s: only allocated %d iocbs of "
+ "expected %d count. Unloading driver.\n",
+ __func__, i, LPFC_IOCB_LIST_CNT);
+ goto out_free_iocbq;
+ }
+
+ iotag = lpfc_sli_next_iotag(phba, iocbq_entry);
+ if (iotag == 0) {
+ kfree(iocbq_entry);
+ printk(KERN_ERR "%s: failed to allocate IOTAG. "
+ "Unloading driver.\n", __func__);
+ goto out_free_iocbq;
+ }
+ iocbq_entry->sli4_lxritag = NO_XRI;
+ iocbq_entry->sli4_xritag = NO_XRI;
+
+ spin_lock_irq(&phba->hbalock);
+ list_add(&iocbq_entry->list, &phba->lpfc_iocb_list);
+ phba->total_iocbq_bufs++;
+ spin_unlock_irq(&phba->hbalock);
+ }
+
+ return 0;
+
+out_free_iocbq:
+ lpfc_free_iocb_list(phba);
+
+ return -ENOMEM;
+}
+
+/**
+ * lpfc_free_sgl_list - Free a given sgl list.
+ * @phba: pointer to lpfc hba data structure.
+ * @sglq_list: pointer to the head of sgl list.
+ *
+ * This routine is invoked to free a give sgl list and memory.
+ **/
+void
+lpfc_free_sgl_list(struct lpfc_hba *phba, struct list_head *sglq_list)
+{
+ struct lpfc_sglq *sglq_entry = NULL, *sglq_next = NULL;
+
+ list_for_each_entry_safe(sglq_entry, sglq_next, sglq_list, list) {
+ list_del(&sglq_entry->list);
+ lpfc_mbuf_free(phba, sglq_entry->virt, sglq_entry->phys);
+ kfree(sglq_entry);
+ }
+}
+
+/**
+ * lpfc_free_els_sgl_list - Free els sgl list.
+ * @phba: pointer to lpfc hba data structure.
+ *
+ * This routine is invoked to free the driver's els sgl list and memory.
+ **/
+static void
+lpfc_free_els_sgl_list(struct lpfc_hba *phba)
+{
+ LIST_HEAD(sglq_list);
+ struct lpfc_sli_ring *pring = &phba->sli.ring[LPFC_ELS_RING];
+
+ /* Retrieve all els sgls from driver list */
+ spin_lock_irq(&phba->hbalock);
+ spin_lock(&pring->ring_lock);
+ list_splice_init(&phba->sli4_hba.lpfc_sgl_list, &sglq_list);
+ spin_unlock(&pring->ring_lock);
+ spin_unlock_irq(&phba->hbalock);
+
+ /* Now free the sgl list */
+ lpfc_free_sgl_list(phba, &sglq_list);
+}
+
+/**
+ * lpfc_init_active_sgl_array - Allocate the buf to track active ELS XRIs.
+ * @phba: pointer to lpfc hba data structure.
+ *
+ * This routine is invoked to allocate the driver's active sgl memory.
+ * This array will hold the sglq_entry's for active IOs.
+ **/
+static int
+lpfc_init_active_sgl_array(struct lpfc_hba *phba)
+{
+ int size;
+ size = sizeof(struct lpfc_sglq *);
+ size *= phba->sli4_hba.max_cfg_param.max_xri;
+
+ phba->sli4_hba.lpfc_sglq_active_list =
+ kzalloc(size, GFP_KERNEL);
+ if (!phba->sli4_hba.lpfc_sglq_active_list)
+ return -ENOMEM;
+ return 0;
+}
+
+/**
+ * lpfc_free_active_sgl - Free the buf that tracks active ELS XRIs.
+ * @phba: pointer to lpfc hba data structure.
+ *
+ * This routine is invoked to walk through the array of active sglq entries
+ * and free all of the resources.
+ * This is just a place holder for now.
+ **/
+static void
+lpfc_free_active_sgl(struct lpfc_hba *phba)
+{
+ kfree(phba->sli4_hba.lpfc_sglq_active_list);
+}
+
+/**
+ * lpfc_init_sgl_list - Allocate and initialize sgl list.
+ * @phba: pointer to lpfc hba data structure.
+ *
+ * This routine is invoked to allocate and initizlize the driver's sgl
+ * list and set up the sgl xritag tag array accordingly.
+ *
+ **/
+static void
+lpfc_init_sgl_list(struct lpfc_hba *phba)
+{
+ /* Initialize and populate the sglq list per host/VF. */
+ INIT_LIST_HEAD(&phba->sli4_hba.lpfc_sgl_list);
+ INIT_LIST_HEAD(&phba->sli4_hba.lpfc_abts_els_sgl_list);
+
+ /* els xri-sgl book keeping */
+ phba->sli4_hba.els_xri_cnt = 0;
+
+ /* scsi xri-buffer book keeping */
+ phba->sli4_hba.scsi_xri_cnt = 0;
+}
+
+/**
+ * lpfc_sli4_init_rpi_hdrs - Post the rpi header memory region to the port
+ * @phba: pointer to lpfc hba data structure.
+ *
+ * This routine is invoked to post rpi header templates to the
+ * port for those SLI4 ports that do not support extents. This routine
+ * posts a PAGE_SIZE memory region to the port to hold up to
+ * PAGE_SIZE modulo 64 rpi context headers. This is an initialization routine
+ * and should be called only when interrupts are disabled.
+ *
+ * Return codes
+ * 0 - successful
+ * -ERROR - otherwise.
+ **/
+int
+lpfc_sli4_init_rpi_hdrs(struct lpfc_hba *phba)
+{
+ int rc = 0;
+ struct lpfc_rpi_hdr *rpi_hdr;
+
+ INIT_LIST_HEAD(&phba->sli4_hba.lpfc_rpi_hdr_list);
+ if (!phba->sli4_hba.rpi_hdrs_in_use)
+ return rc;
+ if (phba->sli4_hba.extents_in_use)
+ return -EIO;
+
+ rpi_hdr = lpfc_sli4_create_rpi_hdr(phba);
+ if (!rpi_hdr) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
+ "0391 Error during rpi post operation\n");
+ lpfc_sli4_remove_rpis(phba);
+ rc = -ENODEV;
+ }
+
+ return rc;
+}
+
+/**
+ * lpfc_sli4_create_rpi_hdr - Allocate an rpi header memory region
+ * @phba: pointer to lpfc hba data structure.
+ *
+ * This routine is invoked to allocate a single 4KB memory region to
+ * support rpis and stores them in the phba. This single region
+ * provides support for up to 64 rpis. The region is used globally
+ * by the device.
+ *
+ * Returns:
+ * A valid rpi hdr on success.
+ * A NULL pointer on any failure.
+ **/
+struct lpfc_rpi_hdr *
+lpfc_sli4_create_rpi_hdr(struct lpfc_hba *phba)
+{
+ uint16_t rpi_limit, curr_rpi_range;
+ struct lpfc_dmabuf *dmabuf;
+ struct lpfc_rpi_hdr *rpi_hdr;
+ uint32_t rpi_count;
+
+ /*
+ * If the SLI4 port supports extents, posting the rpi header isn't
+ * required. Set the expected maximum count and let the actual value
+ * get set when extents are fully allocated.
+ */
+ if (!phba->sli4_hba.rpi_hdrs_in_use)
+ return NULL;
+ if (phba->sli4_hba.extents_in_use)
+ return NULL;
+
+ /* The limit on the logical index is just the max_rpi count. */
+ rpi_limit = phba->sli4_hba.max_cfg_param.rpi_base +
+ phba->sli4_hba.max_cfg_param.max_rpi - 1;
+
+ spin_lock_irq(&phba->hbalock);
+ /*
+ * Establish the starting RPI in this header block. The starting
+ * rpi is normalized to a zero base because the physical rpi is
+ * port based.
+ */
+ curr_rpi_range = phba->sli4_hba.next_rpi;
+ spin_unlock_irq(&phba->hbalock);
+
+ /*
+ * The port has a limited number of rpis. The increment here
+ * is LPFC_RPI_HDR_COUNT - 1 to account for the starting value
+ * and to allow the full max_rpi range per port.
+ */
+ if ((curr_rpi_range + (LPFC_RPI_HDR_COUNT - 1)) > rpi_limit)
+ rpi_count = rpi_limit - curr_rpi_range;
+ else
+ rpi_count = LPFC_RPI_HDR_COUNT;
+
+ if (!rpi_count)
+ return NULL;
+ /*
+ * First allocate the protocol header region for the port. The
+ * port expects a 4KB DMA-mapped memory region that is 4K aligned.
+ */
+ dmabuf = kzalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
+ if (!dmabuf)
+ return NULL;
+
+ dmabuf->virt = dma_zalloc_coherent(&phba->pcidev->dev,
+ LPFC_HDR_TEMPLATE_SIZE,
+ &dmabuf->phys, GFP_KERNEL);
+ if (!dmabuf->virt) {
+ rpi_hdr = NULL;
+ goto err_free_dmabuf;
+ }
+
+ if (!IS_ALIGNED(dmabuf->phys, LPFC_HDR_TEMPLATE_SIZE)) {
+ rpi_hdr = NULL;
+ goto err_free_coherent;
+ }
+
+ /* Save the rpi header data for cleanup later. */
+ rpi_hdr = kzalloc(sizeof(struct lpfc_rpi_hdr), GFP_KERNEL);
+ if (!rpi_hdr)
+ goto err_free_coherent;
+
+ rpi_hdr->dmabuf = dmabuf;
+ rpi_hdr->len = LPFC_HDR_TEMPLATE_SIZE;
+ rpi_hdr->page_count = 1;
+ spin_lock_irq(&phba->hbalock);
+
+ /* The rpi_hdr stores the logical index only. */
+ rpi_hdr->start_rpi = curr_rpi_range;
+ list_add_tail(&rpi_hdr->list, &phba->sli4_hba.lpfc_rpi_hdr_list);
+
+ /*
+ * The next_rpi stores the next logical module-64 rpi value used
+ * to post physical rpis in subsequent rpi postings.
+ */
+ phba->sli4_hba.next_rpi += rpi_count;
+ spin_unlock_irq(&phba->hbalock);
+ return rpi_hdr;
+
+ err_free_coherent:
+ dma_free_coherent(&phba->pcidev->dev, LPFC_HDR_TEMPLATE_SIZE,
+ dmabuf->virt, dmabuf->phys);
+ err_free_dmabuf:
+ kfree(dmabuf);
+ return NULL;
+}
+
+/**
+ * lpfc_sli4_remove_rpi_hdrs - Remove all rpi header memory regions
+ * @phba: pointer to lpfc hba data structure.
+ *
+ * This routine is invoked to remove all memory resources allocated
+ * to support rpis for SLI4 ports not supporting extents. This routine
+ * presumes the caller has released all rpis consumed by fabric or port
+ * logins and is prepared to have the header pages removed.
+ **/
+void
+lpfc_sli4_remove_rpi_hdrs(struct lpfc_hba *phba)
+{
+ struct lpfc_rpi_hdr *rpi_hdr, *next_rpi_hdr;
+
+ if (!phba->sli4_hba.rpi_hdrs_in_use)
+ goto exit;
+
+ list_for_each_entry_safe(rpi_hdr, next_rpi_hdr,
+ &phba->sli4_hba.lpfc_rpi_hdr_list, list) {
+ list_del(&rpi_hdr->list);
+ dma_free_coherent(&phba->pcidev->dev, rpi_hdr->len,
+ rpi_hdr->dmabuf->virt, rpi_hdr->dmabuf->phys);
+ kfree(rpi_hdr->dmabuf);
+ kfree(rpi_hdr);
+ }
+ exit:
+ /* There are no rpis available to the port now. */
+ phba->sli4_hba.next_rpi = 0;
+}
+
+/**
+ * lpfc_hba_alloc - Allocate driver hba data structure for a device.
+ * @pdev: pointer to pci device data structure.
+ *
+ * This routine is invoked to allocate the driver hba data structure for an
+ * HBA device. If the allocation is successful, the phba reference to the
+ * PCI device data structure is set.
+ *
+ * Return codes
+ * pointer to @phba - successful
+ * NULL - error
+ **/
+static struct lpfc_hba *
+lpfc_hba_alloc(struct pci_dev *pdev)
+{
+ struct lpfc_hba *phba;
+
+ /* Allocate memory for HBA structure */
+ phba = kzalloc(sizeof(struct lpfc_hba), GFP_KERNEL);
+ if (!phba) {
+ dev_err(&pdev->dev, "failed to allocate hba struct\n");
+ return NULL;
+ }
+
+ /* Set reference to PCI device in HBA structure */
+ phba->pcidev = pdev;
+
+ /* Assign an unused board number */
+ phba->brd_no = lpfc_get_instance();
+ if (phba->brd_no < 0) {
+ kfree(phba);
+ return NULL;
+ }
+
+ spin_lock_init(&phba->ct_ev_lock);
+ INIT_LIST_HEAD(&phba->ct_ev_waiters);
+
+ return phba;
+}
+
+/**
+ * lpfc_hba_free - Free driver hba data structure with a device.
+ * @phba: pointer to lpfc hba data structure.
+ *
+ * This routine is invoked to free the driver hba data structure with an
+ * HBA device.
+ **/
+static void
+lpfc_hba_free(struct lpfc_hba *phba)
+{
+ /* Release the driver assigned board number */
+ idr_remove(&lpfc_hba_index, phba->brd_no);
+
+ /* Free memory allocated with sli rings */
+ kfree(phba->sli.ring);
+ phba->sli.ring = NULL;
+
+ kfree(phba);
+ return;
+}
+
+/**
+ * lpfc_create_shost - Create hba physical port with associated scsi host.
+ * @phba: pointer to lpfc hba data structure.
+ *
+ * This routine is invoked to create HBA physical port and associate a SCSI
+ * host with it.
+ *
+ * Return codes
+ * 0 - successful
+ * other values - error
+ **/
+static int
+lpfc_create_shost(struct lpfc_hba *phba)
+{
+ struct lpfc_vport *vport;
+ struct Scsi_Host *shost;
+
+ /* Initialize HBA FC structure */
+ phba->fc_edtov = FF_DEF_EDTOV;
+ phba->fc_ratov = FF_DEF_RATOV;
+ phba->fc_altov = FF_DEF_ALTOV;
+ phba->fc_arbtov = FF_DEF_ARBTOV;
+
+ atomic_set(&phba->sdev_cnt, 0);
+ vport = lpfc_create_port(phba, phba->brd_no, &phba->pcidev->dev);
+ if (!vport)
+ return -ENODEV;
+
+ shost = lpfc_shost_from_vport(vport);
+ phba->pport = vport;
+ lpfc_debugfs_initialize(vport);
+ /* Put reference to SCSI host to driver's device private data */
+ pci_set_drvdata(phba->pcidev, shost);
+
+ return 0;
+}
+
+/**
+ * lpfc_destroy_shost - Destroy hba physical port with associated scsi host.
+ * @phba: pointer to lpfc hba data structure.
+ *
+ * This routine is invoked to destroy HBA physical port and the associated
+ * SCSI host.
+ **/
+static void
+lpfc_destroy_shost(struct lpfc_hba *phba)
+{
+ struct lpfc_vport *vport = phba->pport;
+
+ /* Destroy physical port that associated with the SCSI host */
+ destroy_port(vport);
+
+ return;
+}
+
+/**
+ * lpfc_setup_bg - Setup Block guard structures and debug areas.
+ * @phba: pointer to lpfc hba data structure.
+ * @shost: the shost to be used to detect Block guard settings.
+ *
+ * This routine sets up the local Block guard protocol settings for @shost.
+ * This routine also allocates memory for debugging bg buffers.
+ **/
+static void
+lpfc_setup_bg(struct lpfc_hba *phba, struct Scsi_Host *shost)
+{
+ uint32_t old_mask;
+ uint32_t old_guard;
+
+ int pagecnt = 10;
+ if (lpfc_prot_mask && lpfc_prot_guard) {
+ lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
+ "1478 Registering BlockGuard with the "
+ "SCSI layer\n");
+
+ old_mask = lpfc_prot_mask;
+ old_guard = lpfc_prot_guard;
+
+ /* Only allow supported values */
+ lpfc_prot_mask &= (SHOST_DIF_TYPE1_PROTECTION |
+ SHOST_DIX_TYPE0_PROTECTION |
+ SHOST_DIX_TYPE1_PROTECTION);
+ lpfc_prot_guard &= (SHOST_DIX_GUARD_IP | SHOST_DIX_GUARD_CRC);
+
+ /* DIF Type 1 protection for profiles AST1/C1 is end to end */
+ if (lpfc_prot_mask == SHOST_DIX_TYPE1_PROTECTION)
+ lpfc_prot_mask |= SHOST_DIF_TYPE1_PROTECTION;
+
+ if (lpfc_prot_mask && lpfc_prot_guard) {
+ if ((old_mask != lpfc_prot_mask) ||
+ (old_guard != lpfc_prot_guard))
+ lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+ "1475 Registering BlockGuard with the "
+ "SCSI layer: mask %d guard %d\n",
+ lpfc_prot_mask, lpfc_prot_guard);
+
+ scsi_host_set_prot(shost, lpfc_prot_mask);
+ scsi_host_set_guard(shost, lpfc_prot_guard);
+ } else
+ lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+ "1479 Not Registering BlockGuard with the SCSI "
+ "layer, Bad protection parameters: %d %d\n",
+ old_mask, old_guard);
+ }
+
+ if (!_dump_buf_data) {
+ while (pagecnt) {
+ spin_lock_init(&_dump_buf_lock);
+ _dump_buf_data =
+ (char *) __get_free_pages(GFP_KERNEL, pagecnt);
+ if (_dump_buf_data) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_BG,
+ "9043 BLKGRD: allocated %d pages for "
+ "_dump_buf_data at 0x%p\n",
+ (1 << pagecnt), _dump_buf_data);
+ _dump_buf_data_order = pagecnt;
+ memset(_dump_buf_data, 0,
+ ((1 << PAGE_SHIFT) << pagecnt));
+ break;
+ } else
+ --pagecnt;
+ }
+ if (!_dump_buf_data_order)
+ lpfc_printf_log(phba, KERN_ERR, LOG_BG,
+ "9044 BLKGRD: ERROR unable to allocate "
+ "memory for hexdump\n");
+ } else
+ lpfc_printf_log(phba, KERN_ERR, LOG_BG,
+ "9045 BLKGRD: already allocated _dump_buf_data=0x%p"
+ "\n", _dump_buf_data);
+ if (!_dump_buf_dif) {
+ while (pagecnt) {
+ _dump_buf_dif =
+ (char *) __get_free_pages(GFP_KERNEL, pagecnt);
+ if (_dump_buf_dif) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_BG,
+ "9046 BLKGRD: allocated %d pages for "
+ "_dump_buf_dif at 0x%p\n",
+ (1 << pagecnt), _dump_buf_dif);
+ _dump_buf_dif_order = pagecnt;
+ memset(_dump_buf_dif, 0,
+ ((1 << PAGE_SHIFT) << pagecnt));
+ break;
+ } else
+ --pagecnt;
+ }
+ if (!_dump_buf_dif_order)
+ lpfc_printf_log(phba, KERN_ERR, LOG_BG,
+ "9047 BLKGRD: ERROR unable to allocate "
+ "memory for hexdump\n");
+ } else
+ lpfc_printf_log(phba, KERN_ERR, LOG_BG,
+ "9048 BLKGRD: already allocated _dump_buf_dif=0x%p\n",
+ _dump_buf_dif);
+}
+
+/**
+ * lpfc_post_init_setup - Perform necessary device post initialization setup.
+ * @phba: pointer to lpfc hba data structure.
+ *
+ * This routine is invoked to perform all the necessary post initialization
+ * setup for the device.
+ **/
+static void
+lpfc_post_init_setup(struct lpfc_hba *phba)
+{
+ struct Scsi_Host *shost;
+ struct lpfc_adapter_event_header adapter_event;
+
+ /* Get the default values for Model Name and Description */
+ lpfc_get_hba_model_desc(phba, phba->ModelName, phba->ModelDesc);
+
+ /*
+ * hba setup may have changed the hba_queue_depth so we need to
+ * adjust the value of can_queue.
+ */
+ shost = pci_get_drvdata(phba->pcidev);
+ shost->can_queue = phba->cfg_hba_queue_depth - 10;
+ if (phba->sli3_options & LPFC_SLI3_BG_ENABLED)
+ lpfc_setup_bg(phba, shost);
+
+ lpfc_host_attrib_init(shost);
+
+ if (phba->cfg_poll & DISABLE_FCP_RING_INT) {
+ spin_lock_irq(shost->host_lock);
+ lpfc_poll_start_timer(phba);
+ spin_unlock_irq(shost->host_lock);
+ }
+
+ lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
+ "0428 Perform SCSI scan\n");
+ /* Send board arrival event to upper layer */
+ adapter_event.event_type = FC_REG_ADAPTER_EVENT;
+ adapter_event.subcategory = LPFC_EVENT_ARRIVAL;
+ fc_host_post_vendor_event(shost, fc_get_event_number(),
+ sizeof(adapter_event),
+ (char *) &adapter_event,
+ LPFC_NL_VENDOR_ID);
+ return;
+}
+
+/**
+ * lpfc_sli_pci_mem_setup - Setup SLI3 HBA PCI memory space.
+ * @phba: pointer to lpfc hba data structure.
+ *
+ * This routine is invoked to set up the PCI device memory space for device
+ * with SLI-3 interface spec.
+ *
+ * Return codes
+ * 0 - successful
+ * other values - error
+ **/
+static int
+lpfc_sli_pci_mem_setup(struct lpfc_hba *phba)
+{
+ struct pci_dev *pdev;
+ unsigned long bar0map_len, bar2map_len;
+ int i, hbq_count;
+ void *ptr;
+ int error = -ENODEV;
+
+ /* Obtain PCI device reference */
+ if (!phba->pcidev)
+ return error;
+ else
+ pdev = phba->pcidev;
+
+ /* Set the device DMA mask size */
+ if (pci_set_dma_mask(pdev, DMA_BIT_MASK(64)) != 0
+ || pci_set_consistent_dma_mask(pdev,DMA_BIT_MASK(64)) != 0) {
+ if (pci_set_dma_mask(pdev, DMA_BIT_MASK(32)) != 0
+ || pci_set_consistent_dma_mask(pdev,DMA_BIT_MASK(32)) != 0) {
+ return error;
+ }
+ }
+
+ /* Get the bus address of Bar0 and Bar2 and the number of bytes
+ * required by each mapping.
+ */
+ phba->pci_bar0_map = pci_resource_start(pdev, 0);
+ bar0map_len = pci_resource_len(pdev, 0);
+
+ phba->pci_bar2_map = pci_resource_start(pdev, 2);
+ bar2map_len = pci_resource_len(pdev, 2);
+
+ /* Map HBA SLIM to a kernel virtual address. */
+ phba->slim_memmap_p = ioremap(phba->pci_bar0_map, bar0map_len);
+ if (!phba->slim_memmap_p) {
+ dev_printk(KERN_ERR, &pdev->dev,
+ "ioremap failed for SLIM memory.\n");
+ goto out;
+ }
+
+ /* Map HBA Control Registers to a kernel virtual address. */
+ phba->ctrl_regs_memmap_p = ioremap(phba->pci_bar2_map, bar2map_len);
+ if (!phba->ctrl_regs_memmap_p) {
+ dev_printk(KERN_ERR, &pdev->dev,
+ "ioremap failed for HBA control registers.\n");
+ goto out_iounmap_slim;
+ }
+
+ /* Allocate memory for SLI-2 structures */
+ phba->slim2p.virt = dma_zalloc_coherent(&pdev->dev, SLI2_SLIM_SIZE,
+ &phba->slim2p.phys, GFP_KERNEL);
+ if (!phba->slim2p.virt)
+ goto out_iounmap;
+
+ phba->mbox = phba->slim2p.virt + offsetof(struct lpfc_sli2_slim, mbx);
+ phba->mbox_ext = (phba->slim2p.virt +
+ offsetof(struct lpfc_sli2_slim, mbx_ext_words));
+ phba->pcb = (phba->slim2p.virt + offsetof(struct lpfc_sli2_slim, pcb));
+ phba->IOCBs = (phba->slim2p.virt +
+ offsetof(struct lpfc_sli2_slim, IOCBs));
+
+ phba->hbqslimp.virt = dma_alloc_coherent(&pdev->dev,
+ lpfc_sli_hbq_size(),
+ &phba->hbqslimp.phys,
+ GFP_KERNEL);
+ if (!phba->hbqslimp.virt)
+ goto out_free_slim;
+
+ hbq_count = lpfc_sli_hbq_count();
+ ptr = phba->hbqslimp.virt;
+ for (i = 0; i < hbq_count; ++i) {
+ phba->hbqs[i].hbq_virt = ptr;
+ INIT_LIST_HEAD(&phba->hbqs[i].hbq_buffer_list);
+ ptr += (lpfc_hbq_defs[i]->entry_count *
+ sizeof(struct lpfc_hbq_entry));
+ }
+ phba->hbqs[LPFC_ELS_HBQ].hbq_alloc_buffer = lpfc_els_hbq_alloc;
+ phba->hbqs[LPFC_ELS_HBQ].hbq_free_buffer = lpfc_els_hbq_free;
+
+ memset(phba->hbqslimp.virt, 0, lpfc_sli_hbq_size());
+
+ INIT_LIST_HEAD(&phba->rb_pend_list);
+
+ phba->MBslimaddr = phba->slim_memmap_p;
+ phba->HAregaddr = phba->ctrl_regs_memmap_p + HA_REG_OFFSET;
+ phba->CAregaddr = phba->ctrl_regs_memmap_p + CA_REG_OFFSET;
+ phba->HSregaddr = phba->ctrl_regs_memmap_p + HS_REG_OFFSET;
+ phba->HCregaddr = phba->ctrl_regs_memmap_p + HC_REG_OFFSET;
+
+ return 0;
+
+out_free_slim:
+ dma_free_coherent(&pdev->dev, SLI2_SLIM_SIZE,
+ phba->slim2p.virt, phba->slim2p.phys);
+out_iounmap:
+ iounmap(phba->ctrl_regs_memmap_p);
+out_iounmap_slim:
+ iounmap(phba->slim_memmap_p);
+out:
+ return error;
+}
+
+/**
+ * lpfc_sli_pci_mem_unset - Unset SLI3 HBA PCI memory space.
+ * @phba: pointer to lpfc hba data structure.
+ *
+ * This routine is invoked to unset the PCI device memory space for device
+ * with SLI-3 interface spec.
+ **/
+static void
+lpfc_sli_pci_mem_unset(struct lpfc_hba *phba)
+{
+ struct pci_dev *pdev;
+
+ /* Obtain PCI device reference */
+ if (!phba->pcidev)
+ return;
+ else
+ pdev = phba->pcidev;
+
+ /* Free coherent DMA memory allocated */
+ dma_free_coherent(&pdev->dev, lpfc_sli_hbq_size(),
+ phba->hbqslimp.virt, phba->hbqslimp.phys);
+ dma_free_coherent(&pdev->dev, SLI2_SLIM_SIZE,
+ phba->slim2p.virt, phba->slim2p.phys);
+
+ /* I/O memory unmap */
+ iounmap(phba->ctrl_regs_memmap_p);
+ iounmap(phba->slim_memmap_p);
+
+ return;
+}
+
+/**
+ * lpfc_sli4_post_status_check - Wait for SLI4 POST done and check status
+ * @phba: pointer to lpfc hba data structure.
+ *
+ * This routine is invoked to wait for SLI4 device Power On Self Test (POST)
+ * done and check status.
+ *
+ * Return 0 if successful, otherwise -ENODEV.
+ **/
+int
+lpfc_sli4_post_status_check(struct lpfc_hba *phba)
+{
+ struct lpfc_register portsmphr_reg, uerrlo_reg, uerrhi_reg;
+ struct lpfc_register reg_data;
+ int i, port_error = 0;
+ uint32_t if_type;
+
+ memset(&portsmphr_reg, 0, sizeof(portsmphr_reg));
+ memset(&reg_data, 0, sizeof(reg_data));
+ if (!phba->sli4_hba.PSMPHRregaddr)
+ return -ENODEV;
+
+ /* Wait up to 30 seconds for the SLI Port POST done and ready */
+ for (i = 0; i < 3000; i++) {
+ if (lpfc_readl(phba->sli4_hba.PSMPHRregaddr,
+ &portsmphr_reg.word0) ||
+ (bf_get(lpfc_port_smphr_perr, &portsmphr_reg))) {
+ /* Port has a fatal POST error, break out */
+ port_error = -ENODEV;
+ break;
+ }
+ if (LPFC_POST_STAGE_PORT_READY ==
+ bf_get(lpfc_port_smphr_port_status, &portsmphr_reg))
+ break;
+ msleep(10);
+ }
+
+ /*
+ * If there was a port error during POST, then don't proceed with
+ * other register reads as the data may not be valid. Just exit.
+ */
+ if (port_error) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+ "1408 Port Failed POST - portsmphr=0x%x, "
+ "perr=x%x, sfi=x%x, nip=x%x, ipc=x%x, scr1=x%x, "
+ "scr2=x%x, hscratch=x%x, pstatus=x%x\n",
+ portsmphr_reg.word0,
+ bf_get(lpfc_port_smphr_perr, &portsmphr_reg),
+ bf_get(lpfc_port_smphr_sfi, &portsmphr_reg),
+ bf_get(lpfc_port_smphr_nip, &portsmphr_reg),
+ bf_get(lpfc_port_smphr_ipc, &portsmphr_reg),
+ bf_get(lpfc_port_smphr_scr1, &portsmphr_reg),
+ bf_get(lpfc_port_smphr_scr2, &portsmphr_reg),
+ bf_get(lpfc_port_smphr_host_scratch, &portsmphr_reg),
+ bf_get(lpfc_port_smphr_port_status, &portsmphr_reg));
+ } else {
+ lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
+ "2534 Device Info: SLIFamily=0x%x, "
+ "SLIRev=0x%x, IFType=0x%x, SLIHint_1=0x%x, "
+ "SLIHint_2=0x%x, FT=0x%x\n",
+ bf_get(lpfc_sli_intf_sli_family,
+ &phba->sli4_hba.sli_intf),
+ bf_get(lpfc_sli_intf_slirev,
+ &phba->sli4_hba.sli_intf),
+ bf_get(lpfc_sli_intf_if_type,
+ &phba->sli4_hba.sli_intf),
+ bf_get(lpfc_sli_intf_sli_hint1,
+ &phba->sli4_hba.sli_intf),
+ bf_get(lpfc_sli_intf_sli_hint2,
+ &phba->sli4_hba.sli_intf),
+ bf_get(lpfc_sli_intf_func_type,
+ &phba->sli4_hba.sli_intf));
+ /*
+ * Check for other Port errors during the initialization
+ * process. Fail the load if the port did not come up
+ * correctly.
+ */
+ if_type = bf_get(lpfc_sli_intf_if_type,
+ &phba->sli4_hba.sli_intf);
+ switch (if_type) {
+ case LPFC_SLI_INTF_IF_TYPE_0:
+ phba->sli4_hba.ue_mask_lo =
+ readl(phba->sli4_hba.u.if_type0.UEMASKLOregaddr);
+ phba->sli4_hba.ue_mask_hi =
+ readl(phba->sli4_hba.u.if_type0.UEMASKHIregaddr);
+ uerrlo_reg.word0 =
+ readl(phba->sli4_hba.u.if_type0.UERRLOregaddr);
+ uerrhi_reg.word0 =
+ readl(phba->sli4_hba.u.if_type0.UERRHIregaddr);
+ if ((~phba->sli4_hba.ue_mask_lo & uerrlo_reg.word0) ||
+ (~phba->sli4_hba.ue_mask_hi & uerrhi_reg.word0)) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+ "1422 Unrecoverable Error "
+ "Detected during POST "
+ "uerr_lo_reg=0x%x, "
+ "uerr_hi_reg=0x%x, "
+ "ue_mask_lo_reg=0x%x, "
+ "ue_mask_hi_reg=0x%x\n",
+ uerrlo_reg.word0,
+ uerrhi_reg.word0,
+ phba->sli4_hba.ue_mask_lo,
+ phba->sli4_hba.ue_mask_hi);
+ port_error = -ENODEV;
+ }
+ break;
+ case LPFC_SLI_INTF_IF_TYPE_2:
+ /* Final checks. The port status should be clean. */
+ if (lpfc_readl(phba->sli4_hba.u.if_type2.STATUSregaddr,
+ &reg_data.word0) ||
+ (bf_get(lpfc_sliport_status_err, &reg_data) &&
+ !bf_get(lpfc_sliport_status_rn, &reg_data))) {
+ phba->work_status[0] =
+ readl(phba->sli4_hba.u.if_type2.
+ ERR1regaddr);
+ phba->work_status[1] =
+ readl(phba->sli4_hba.u.if_type2.
+ ERR2regaddr);
+ lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+ "2888 Unrecoverable port error "
+ "following POST: port status reg "
+ "0x%x, port_smphr reg 0x%x, "
+ "error 1=0x%x, error 2=0x%x\n",
+ reg_data.word0,
+ portsmphr_reg.word0,
+ phba->work_status[0],
+ phba->work_status[1]);
+ port_error = -ENODEV;
+ }
+ break;
+ case LPFC_SLI_INTF_IF_TYPE_1:
+ default:
+ break;
+ }
+ }
+ return port_error;
+}
+
+/**
+ * lpfc_sli4_bar0_register_memmap - Set up SLI4 BAR0 register memory map.
+ * @phba: pointer to lpfc hba data structure.
+ * @if_type: The SLI4 interface type getting configured.
+ *
+ * This routine is invoked to set up SLI4 BAR0 PCI config space register
+ * memory map.
+ **/
+static void
+lpfc_sli4_bar0_register_memmap(struct lpfc_hba *phba, uint32_t if_type)
+{
+ switch (if_type) {
+ case LPFC_SLI_INTF_IF_TYPE_0:
+ phba->sli4_hba.u.if_type0.UERRLOregaddr =
+ phba->sli4_hba.conf_regs_memmap_p + LPFC_UERR_STATUS_LO;
+ phba->sli4_hba.u.if_type0.UERRHIregaddr =
+ phba->sli4_hba.conf_regs_memmap_p + LPFC_UERR_STATUS_HI;
+ phba->sli4_hba.u.if_type0.UEMASKLOregaddr =
+ phba->sli4_hba.conf_regs_memmap_p + LPFC_UE_MASK_LO;
+ phba->sli4_hba.u.if_type0.UEMASKHIregaddr =
+ phba->sli4_hba.conf_regs_memmap_p + LPFC_UE_MASK_HI;
+ phba->sli4_hba.SLIINTFregaddr =
+ phba->sli4_hba.conf_regs_memmap_p + LPFC_SLI_INTF;
+ break;
+ case LPFC_SLI_INTF_IF_TYPE_2:
+ phba->sli4_hba.u.if_type2.ERR1regaddr =
+ phba->sli4_hba.conf_regs_memmap_p +
+ LPFC_CTL_PORT_ER1_OFFSET;
+ phba->sli4_hba.u.if_type2.ERR2regaddr =
+ phba->sli4_hba.conf_regs_memmap_p +
+ LPFC_CTL_PORT_ER2_OFFSET;
+ phba->sli4_hba.u.if_type2.CTRLregaddr =
+ phba->sli4_hba.conf_regs_memmap_p +
+ LPFC_CTL_PORT_CTL_OFFSET;
+ phba->sli4_hba.u.if_type2.STATUSregaddr =
+ phba->sli4_hba.conf_regs_memmap_p +
+ LPFC_CTL_PORT_STA_OFFSET;
+ phba->sli4_hba.SLIINTFregaddr =
+ phba->sli4_hba.conf_regs_memmap_p + LPFC_SLI_INTF;
+ phba->sli4_hba.PSMPHRregaddr =
+ phba->sli4_hba.conf_regs_memmap_p +
+ LPFC_CTL_PORT_SEM_OFFSET;
+ phba->sli4_hba.RQDBregaddr =
+ phba->sli4_hba.conf_regs_memmap_p +
+ LPFC_ULP0_RQ_DOORBELL;
+ phba->sli4_hba.WQDBregaddr =
+ phba->sli4_hba.conf_regs_memmap_p +
+ LPFC_ULP0_WQ_DOORBELL;
+ phba->sli4_hba.EQCQDBregaddr =
+ phba->sli4_hba.conf_regs_memmap_p + LPFC_EQCQ_DOORBELL;
+ phba->sli4_hba.MQDBregaddr =
+ phba->sli4_hba.conf_regs_memmap_p + LPFC_MQ_DOORBELL;
+ phba->sli4_hba.BMBXregaddr =
+ phba->sli4_hba.conf_regs_memmap_p + LPFC_BMBX;
+ break;
+ case LPFC_SLI_INTF_IF_TYPE_1:
+ default:
+ dev_printk(KERN_ERR, &phba->pcidev->dev,
+ "FATAL - unsupported SLI4 interface type - %d\n",
+ if_type);
+ break;
+ }
+}
+
+/**
+ * lpfc_sli4_bar1_register_memmap - Set up SLI4 BAR1 register memory map.
+ * @phba: pointer to lpfc hba data structure.
+ *
+ * This routine is invoked to set up SLI4 BAR1 control status register (CSR)
+ * memory map.
+ **/
+static void
+lpfc_sli4_bar1_register_memmap(struct lpfc_hba *phba)
+{
+ phba->sli4_hba.PSMPHRregaddr = phba->sli4_hba.ctrl_regs_memmap_p +
+ LPFC_SLIPORT_IF0_SMPHR;
+ phba->sli4_hba.ISRregaddr = phba->sli4_hba.ctrl_regs_memmap_p +
+ LPFC_HST_ISR0;
+ phba->sli4_hba.IMRregaddr = phba->sli4_hba.ctrl_regs_memmap_p +
+ LPFC_HST_IMR0;
+ phba->sli4_hba.ISCRregaddr = phba->sli4_hba.ctrl_regs_memmap_p +
+ LPFC_HST_ISCR0;
+}
+
+/**
+ * lpfc_sli4_bar2_register_memmap - Set up SLI4 BAR2 register memory map.
+ * @phba: pointer to lpfc hba data structure.
+ * @vf: virtual function number
+ *
+ * This routine is invoked to set up SLI4 BAR2 doorbell register memory map
+ * based on the given viftual function number, @vf.
+ *
+ * Return 0 if successful, otherwise -ENODEV.
+ **/
+static int
+lpfc_sli4_bar2_register_memmap(struct lpfc_hba *phba, uint32_t vf)
+{
+ if (vf > LPFC_VIR_FUNC_MAX)
+ return -ENODEV;
+
+ phba->sli4_hba.RQDBregaddr = (phba->sli4_hba.drbl_regs_memmap_p +
+ vf * LPFC_VFR_PAGE_SIZE +
+ LPFC_ULP0_RQ_DOORBELL);
+ phba->sli4_hba.WQDBregaddr = (phba->sli4_hba.drbl_regs_memmap_p +
+ vf * LPFC_VFR_PAGE_SIZE +
+ LPFC_ULP0_WQ_DOORBELL);
+ phba->sli4_hba.EQCQDBregaddr = (phba->sli4_hba.drbl_regs_memmap_p +
+ vf * LPFC_VFR_PAGE_SIZE + LPFC_EQCQ_DOORBELL);
+ phba->sli4_hba.MQDBregaddr = (phba->sli4_hba.drbl_regs_memmap_p +
+ vf * LPFC_VFR_PAGE_SIZE + LPFC_MQ_DOORBELL);
+ phba->sli4_hba.BMBXregaddr = (phba->sli4_hba.drbl_regs_memmap_p +
+ vf * LPFC_VFR_PAGE_SIZE + LPFC_BMBX);
+ return 0;
+}
+
+/**
+ * lpfc_create_bootstrap_mbox - Create the bootstrap mailbox
+ * @phba: pointer to lpfc hba data structure.
+ *
+ * This routine is invoked to create the bootstrap mailbox
+ * region consistent with the SLI-4 interface spec. This
+ * routine allocates all memory necessary to communicate
+ * mailbox commands to the port and sets up all alignment
+ * needs. No locks are expected to be held when calling
+ * this routine.
+ *
+ * Return codes
+ * 0 - successful
+ * -ENOMEM - could not allocated memory.
+ **/
+static int
+lpfc_create_bootstrap_mbox(struct lpfc_hba *phba)
+{
+ uint32_t bmbx_size;
+ struct lpfc_dmabuf *dmabuf;
+ struct dma_address *dma_address;
+ uint32_t pa_addr;
+ uint64_t phys_addr;
+
+ dmabuf = kzalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
+ if (!dmabuf)
+ return -ENOMEM;
+
+ /*
+ * The bootstrap mailbox region is comprised of 2 parts
+ * plus an alignment restriction of 16 bytes.
+ */
+ bmbx_size = sizeof(struct lpfc_bmbx_create) + (LPFC_ALIGN_16_BYTE - 1);
+ dmabuf->virt = dma_zalloc_coherent(&phba->pcidev->dev, bmbx_size,
+ &dmabuf->phys, GFP_KERNEL);
+ if (!dmabuf->virt) {
+ kfree(dmabuf);
+ return -ENOMEM;
+ }
+
+ /*
+ * Initialize the bootstrap mailbox pointers now so that the register
+ * operations are simple later. The mailbox dma address is required
+ * to be 16-byte aligned. Also align the virtual memory as each
+ * maibox is copied into the bmbx mailbox region before issuing the
+ * command to the port.
+ */
+ phba->sli4_hba.bmbx.dmabuf = dmabuf;
+ phba->sli4_hba.bmbx.bmbx_size = bmbx_size;
+
+ phba->sli4_hba.bmbx.avirt = PTR_ALIGN(dmabuf->virt,
+ LPFC_ALIGN_16_BYTE);
+ phba->sli4_hba.bmbx.aphys = ALIGN(dmabuf->phys,
+ LPFC_ALIGN_16_BYTE);
+
+ /*
+ * Set the high and low physical addresses now. The SLI4 alignment
+ * requirement is 16 bytes and the mailbox is posted to the port
+ * as two 30-bit addresses. The other data is a bit marking whether
+ * the 30-bit address is the high or low address.
+ * Upcast bmbx aphys to 64bits so shift instruction compiles
+ * clean on 32 bit machines.
+ */
+ dma_address = &phba->sli4_hba.bmbx.dma_address;
+ phys_addr = (uint64_t)phba->sli4_hba.bmbx.aphys;
+ pa_addr = (uint32_t) ((phys_addr >> 34) & 0x3fffffff);
+ dma_address->addr_hi = (uint32_t) ((pa_addr << 2) |
+ LPFC_BMBX_BIT1_ADDR_HI);
+
+ pa_addr = (uint32_t) ((phba->sli4_hba.bmbx.aphys >> 4) & 0x3fffffff);
+ dma_address->addr_lo = (uint32_t) ((pa_addr << 2) |
+ LPFC_BMBX_BIT1_ADDR_LO);
+ return 0;
+}
+
+/**
+ * lpfc_destroy_bootstrap_mbox - Destroy all bootstrap mailbox resources
+ * @phba: pointer to lpfc hba data structure.
+ *
+ * This routine is invoked to teardown the bootstrap mailbox
+ * region and release all host resources. This routine requires
+ * the caller to ensure all mailbox commands recovered, no
+ * additional mailbox comands are sent, and interrupts are disabled
+ * before calling this routine.
+ *
+ **/
+static void
+lpfc_destroy_bootstrap_mbox(struct lpfc_hba *phba)
+{
+ dma_free_coherent(&phba->pcidev->dev,
+ phba->sli4_hba.bmbx.bmbx_size,
+ phba->sli4_hba.bmbx.dmabuf->virt,
+ phba->sli4_hba.bmbx.dmabuf->phys);
+
+ kfree(phba->sli4_hba.bmbx.dmabuf);
+ memset(&phba->sli4_hba.bmbx, 0, sizeof(struct lpfc_bmbx));
+}
+
+/**
+ * lpfc_sli4_read_config - Get the config parameters.
+ * @phba: pointer to lpfc hba data structure.
+ *
+ * This routine is invoked to read the configuration parameters from the HBA.
+ * The configuration parameters are used to set the base and maximum values
+ * for RPI's XRI's VPI's VFI's and FCFIs. These values also affect the resource
+ * allocation for the port.
+ *
+ * Return codes
+ * 0 - successful
+ * -ENOMEM - No available memory
+ * -EIO - The mailbox failed to complete successfully.
+ **/
+int
+lpfc_sli4_read_config(struct lpfc_hba *phba)
+{
+ LPFC_MBOXQ_t *pmb;
+ struct lpfc_mbx_read_config *rd_config;
+ union lpfc_sli4_cfg_shdr *shdr;
+ uint32_t shdr_status, shdr_add_status;
+ struct lpfc_mbx_get_func_cfg *get_func_cfg;
+ struct lpfc_rsrc_desc_fcfcoe *desc;
+ char *pdesc_0;
+ int length, i, rc = 0, rc2;
+
+ pmb = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
+ if (!pmb) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
+ "2011 Unable to allocate memory for issuing "
+ "SLI_CONFIG_SPECIAL mailbox command\n");
+ return -ENOMEM;
+ }
+
+ lpfc_read_config(phba, pmb);
+
+ rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL);
+ if (rc != MBX_SUCCESS) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
+ "2012 Mailbox failed , mbxCmd x%x "
+ "READ_CONFIG, mbxStatus x%x\n",
+ bf_get(lpfc_mqe_command, &pmb->u.mqe),
+ bf_get(lpfc_mqe_status, &pmb->u.mqe));
+ rc = -EIO;
+ } else {
+ rd_config = &pmb->u.mqe.un.rd_config;
+ if (bf_get(lpfc_mbx_rd_conf_lnk_ldv, rd_config)) {
+ phba->sli4_hba.lnk_info.lnk_dv = LPFC_LNK_DAT_VAL;
+ phba->sli4_hba.lnk_info.lnk_tp =
+ bf_get(lpfc_mbx_rd_conf_lnk_type, rd_config);
+ phba->sli4_hba.lnk_info.lnk_no =
+ bf_get(lpfc_mbx_rd_conf_lnk_numb, rd_config);
+ lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
+ "3081 lnk_type:%d, lnk_numb:%d\n",
+ phba->sli4_hba.lnk_info.lnk_tp,
+ phba->sli4_hba.lnk_info.lnk_no);
+ } else
+ lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
+ "3082 Mailbox (x%x) returned ldv:x0\n",
+ bf_get(lpfc_mqe_command, &pmb->u.mqe));
+ phba->sli4_hba.extents_in_use =
+ bf_get(lpfc_mbx_rd_conf_extnts_inuse, rd_config);
+ phba->sli4_hba.max_cfg_param.max_xri =
+ bf_get(lpfc_mbx_rd_conf_xri_count, rd_config);
+ phba->sli4_hba.max_cfg_param.xri_base =
+ bf_get(lpfc_mbx_rd_conf_xri_base, rd_config);
+ phba->sli4_hba.max_cfg_param.max_vpi =
+ bf_get(lpfc_mbx_rd_conf_vpi_count, rd_config);
+ phba->sli4_hba.max_cfg_param.vpi_base =
+ bf_get(lpfc_mbx_rd_conf_vpi_base, rd_config);
+ phba->sli4_hba.max_cfg_param.max_rpi =
+ bf_get(lpfc_mbx_rd_conf_rpi_count, rd_config);
+ phba->sli4_hba.max_cfg_param.rpi_base =
+ bf_get(lpfc_mbx_rd_conf_rpi_base, rd_config);
+ phba->sli4_hba.max_cfg_param.max_vfi =
+ bf_get(lpfc_mbx_rd_conf_vfi_count, rd_config);
+ phba->sli4_hba.max_cfg_param.vfi_base =
+ bf_get(lpfc_mbx_rd_conf_vfi_base, rd_config);
+ phba->sli4_hba.max_cfg_param.max_fcfi =
+ bf_get(lpfc_mbx_rd_conf_fcfi_count, rd_config);
+ phba->sli4_hba.max_cfg_param.max_eq =
+ bf_get(lpfc_mbx_rd_conf_eq_count, rd_config);
+ phba->sli4_hba.max_cfg_param.max_rq =
+ bf_get(lpfc_mbx_rd_conf_rq_count, rd_config);
+ phba->sli4_hba.max_cfg_param.max_wq =
+ bf_get(lpfc_mbx_rd_conf_wq_count, rd_config);
+ phba->sli4_hba.max_cfg_param.max_cq =
+ bf_get(lpfc_mbx_rd_conf_cq_count, rd_config);
+ phba->lmt = bf_get(lpfc_mbx_rd_conf_lmt, rd_config);
+ phba->sli4_hba.next_xri = phba->sli4_hba.max_cfg_param.xri_base;
+ phba->vpi_base = phba->sli4_hba.max_cfg_param.vpi_base;
+ phba->vfi_base = phba->sli4_hba.max_cfg_param.vfi_base;
+ phba->max_vpi = (phba->sli4_hba.max_cfg_param.max_vpi > 0) ?
+ (phba->sli4_hba.max_cfg_param.max_vpi - 1) : 0;
+ phba->max_vports = phba->max_vpi;
+ lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
+ "2003 cfg params Extents? %d "
+ "XRI(B:%d M:%d), "
+ "VPI(B:%d M:%d) "
+ "VFI(B:%d M:%d) "
+ "RPI(B:%d M:%d) "
+ "FCFI(Count:%d)\n",
+ phba->sli4_hba.extents_in_use,
+ phba->sli4_hba.max_cfg_param.xri_base,
+ phba->sli4_hba.max_cfg_param.max_xri,
+ phba->sli4_hba.max_cfg_param.vpi_base,
+ phba->sli4_hba.max_cfg_param.max_vpi,
+ phba->sli4_hba.max_cfg_param.vfi_base,
+ phba->sli4_hba.max_cfg_param.max_vfi,
+ phba->sli4_hba.max_cfg_param.rpi_base,
+ phba->sli4_hba.max_cfg_param.max_rpi,
+ phba->sli4_hba.max_cfg_param.max_fcfi);
+ }
+
+ if (rc)
+ goto read_cfg_out;
+
+ /* Reset the DFT_HBA_Q_DEPTH to the max xri */
+ length = phba->sli4_hba.max_cfg_param.max_xri -
+ lpfc_sli4_get_els_iocb_cnt(phba);
+ if (phba->cfg_hba_queue_depth > length) {
+ lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
+ "3361 HBA queue depth changed from %d to %d\n",
+ phba->cfg_hba_queue_depth, length);
+ phba->cfg_hba_queue_depth = length;
+ }
+
+ if (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) !=
+ LPFC_SLI_INTF_IF_TYPE_2)
+ goto read_cfg_out;
+
+ /* get the pf# and vf# for SLI4 if_type 2 port */
+ length = (sizeof(struct lpfc_mbx_get_func_cfg) -
+ sizeof(struct lpfc_sli4_cfg_mhdr));
+ lpfc_sli4_config(phba, pmb, LPFC_MBOX_SUBSYSTEM_COMMON,
+ LPFC_MBOX_OPCODE_GET_FUNCTION_CONFIG,
+ length, LPFC_SLI4_MBX_EMBED);
+
+ rc2 = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL);
+ shdr = (union lpfc_sli4_cfg_shdr *)
+ &pmb->u.mqe.un.sli4_config.header.cfg_shdr;
+ shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
+ shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
+ if (rc2 || shdr_status || shdr_add_status) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
+ "3026 Mailbox failed , mbxCmd x%x "
+ "GET_FUNCTION_CONFIG, mbxStatus x%x\n",
+ bf_get(lpfc_mqe_command, &pmb->u.mqe),
+ bf_get(lpfc_mqe_status, &pmb->u.mqe));
+ goto read_cfg_out;
+ }
+
+ /* search for fc_fcoe resrouce descriptor */
+ get_func_cfg = &pmb->u.mqe.un.get_func_cfg;
+
+ pdesc_0 = (char *)&get_func_cfg->func_cfg.desc[0];
+ desc = (struct lpfc_rsrc_desc_fcfcoe *)pdesc_0;
+ length = bf_get(lpfc_rsrc_desc_fcfcoe_length, desc);
+ if (length == LPFC_RSRC_DESC_TYPE_FCFCOE_V0_RSVD)
+ length = LPFC_RSRC_DESC_TYPE_FCFCOE_V0_LENGTH;
+ else if (length != LPFC_RSRC_DESC_TYPE_FCFCOE_V1_LENGTH)
+ goto read_cfg_out;
+
+ for (i = 0; i < LPFC_RSRC_DESC_MAX_NUM; i++) {
+ desc = (struct lpfc_rsrc_desc_fcfcoe *)(pdesc_0 + length * i);
+ if (LPFC_RSRC_DESC_TYPE_FCFCOE ==
+ bf_get(lpfc_rsrc_desc_fcfcoe_type, desc)) {
+ phba->sli4_hba.iov.pf_number =
+ bf_get(lpfc_rsrc_desc_fcfcoe_pfnum, desc);
+ phba->sli4_hba.iov.vf_number =
+ bf_get(lpfc_rsrc_desc_fcfcoe_vfnum, desc);
+ break;
+ }
+ }
+
+ if (i < LPFC_RSRC_DESC_MAX_NUM)
+ lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
+ "3027 GET_FUNCTION_CONFIG: pf_number:%d, "
+ "vf_number:%d\n", phba->sli4_hba.iov.pf_number,
+ phba->sli4_hba.iov.vf_number);
+ else
+ lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
+ "3028 GET_FUNCTION_CONFIG: failed to find "
+ "Resrouce Descriptor:x%x\n",
+ LPFC_RSRC_DESC_TYPE_FCFCOE);
+
+read_cfg_out:
+ mempool_free(pmb, phba->mbox_mem_pool);
+ return rc;
+}
+
+/**
+ * lpfc_setup_endian_order - Write endian order to an SLI4 if_type 0 port.
+ * @phba: pointer to lpfc hba data structure.
+ *
+ * This routine is invoked to setup the port-side endian order when
+ * the port if_type is 0. This routine has no function for other
+ * if_types.
+ *
+ * Return codes
+ * 0 - successful
+ * -ENOMEM - No available memory
+ * -EIO - The mailbox failed to complete successfully.
+ **/
+static int
+lpfc_setup_endian_order(struct lpfc_hba *phba)
+{
+ LPFC_MBOXQ_t *mboxq;
+ uint32_t if_type, rc = 0;
+ uint32_t endian_mb_data[2] = {HOST_ENDIAN_LOW_WORD0,
+ HOST_ENDIAN_HIGH_WORD1};
+
+ if_type = bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf);
+ switch (if_type) {
+ case LPFC_SLI_INTF_IF_TYPE_0:
+ mboxq = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool,
+ GFP_KERNEL);
+ if (!mboxq) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+ "0492 Unable to allocate memory for "
+ "issuing SLI_CONFIG_SPECIAL mailbox "
+ "command\n");
+ return -ENOMEM;
+ }
+
+ /*
+ * The SLI4_CONFIG_SPECIAL mailbox command requires the first
+ * two words to contain special data values and no other data.
+ */
+ memset(mboxq, 0, sizeof(LPFC_MBOXQ_t));
+ memcpy(&mboxq->u.mqe, &endian_mb_data, sizeof(endian_mb_data));
+ rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
+ if (rc != MBX_SUCCESS) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+ "0493 SLI_CONFIG_SPECIAL mailbox "
+ "failed with status x%x\n",
+ rc);
+ rc = -EIO;
+ }
+ mempool_free(mboxq, phba->mbox_mem_pool);
+ break;
+ case LPFC_SLI_INTF_IF_TYPE_2:
+ case LPFC_SLI_INTF_IF_TYPE_1:
+ default:
+ break;
+ }
+ return rc;
+}
+
+/**
+ * lpfc_sli4_queue_verify - Verify and update EQ and CQ counts
+ * @phba: pointer to lpfc hba data structure.
+ *
+ * This routine is invoked to check the user settable queue counts for EQs and
+ * CQs. after this routine is called the counts will be set to valid values that
+ * adhere to the constraints of the system's interrupt vectors and the port's
+ * queue resources.
+ *
+ * Return codes
+ * 0 - successful
+ * -ENOMEM - No available memory
+ **/
+static int
+lpfc_sli4_queue_verify(struct lpfc_hba *phba)
+{
+ int cfg_fcp_io_channel;
+ uint32_t cpu;
+ uint32_t i = 0;
+ int fof_vectors = phba->cfg_fof ? 1 : 0;
+
+ /*
+ * Sanity check for configured queue parameters against the run-time
+ * device parameters
+ */
+
+ /* Sanity check on HBA EQ parameters */
+ cfg_fcp_io_channel = phba->cfg_fcp_io_channel;
+
+ /* It doesn't make sense to have more io channels then online CPUs */
+ for_each_present_cpu(cpu) {
+ if (cpu_online(cpu))
+ i++;
+ }
+ phba->sli4_hba.num_online_cpu = i;
+ phba->sli4_hba.num_present_cpu = lpfc_present_cpu;
+ phba->sli4_hba.curr_disp_cpu = 0;
+
+ if (i < cfg_fcp_io_channel) {
+ lpfc_printf_log(phba,
+ KERN_ERR, LOG_INIT,
+ "3188 Reducing IO channels to match number of "
+ "online CPUs: from %d to %d\n",
+ cfg_fcp_io_channel, i);
+ cfg_fcp_io_channel = i;
+ }
+
+ if (cfg_fcp_io_channel + fof_vectors >
+ phba->sli4_hba.max_cfg_param.max_eq) {
+ if (phba->sli4_hba.max_cfg_param.max_eq <
+ LPFC_FCP_IO_CHAN_MIN) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+ "2574 Not enough EQs (%d) from the "
+ "pci function for supporting FCP "
+ "EQs (%d)\n",
+ phba->sli4_hba.max_cfg_param.max_eq,
+ phba->cfg_fcp_io_channel);
+ goto out_error;
+ }
+ lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+ "2575 Reducing IO channels to match number of "
+ "available EQs: from %d to %d\n",
+ cfg_fcp_io_channel,
+ phba->sli4_hba.max_cfg_param.max_eq);
+ cfg_fcp_io_channel = phba->sli4_hba.max_cfg_param.max_eq -
+ fof_vectors;
+ }
+
+ /* The actual number of FCP event queues adopted */
+ phba->cfg_fcp_io_channel = cfg_fcp_io_channel;
+
+ /* Get EQ depth from module parameter, fake the default for now */
+ phba->sli4_hba.eq_esize = LPFC_EQE_SIZE_4B;
+ phba->sli4_hba.eq_ecount = LPFC_EQE_DEF_COUNT;
+
+ /* Get CQ depth from module parameter, fake the default for now */
+ phba->sli4_hba.cq_esize = LPFC_CQE_SIZE;
+ phba->sli4_hba.cq_ecount = LPFC_CQE_DEF_COUNT;
+
+ return 0;
+out_error:
+ return -ENOMEM;
+}
+
+/**
+ * lpfc_sli4_queue_create - Create all the SLI4 queues
+ * @phba: pointer to lpfc hba data structure.
+ *
+ * This routine is invoked to allocate all the SLI4 queues for the FCoE HBA
+ * operation. For each SLI4 queue type, the parameters such as queue entry
+ * count (queue depth) shall be taken from the module parameter. For now,
+ * we just use some constant number as place holder.
+ *
+ * Return codes
+ * 0 - successful
+ * -ENOMEM - No availble memory
+ * -EIO - The mailbox failed to complete successfully.
+ **/
+int
+lpfc_sli4_queue_create(struct lpfc_hba *phba)
+{
+ struct lpfc_queue *qdesc;
+ int idx;
+
+ /*
+ * Create HBA Record arrays.
+ */
+ if (!phba->cfg_fcp_io_channel)
+ return -ERANGE;
+
+ phba->sli4_hba.mq_esize = LPFC_MQE_SIZE;
+ phba->sli4_hba.mq_ecount = LPFC_MQE_DEF_COUNT;
+ phba->sli4_hba.wq_esize = LPFC_WQE_SIZE;
+ phba->sli4_hba.wq_ecount = LPFC_WQE_DEF_COUNT;
+ phba->sli4_hba.rq_esize = LPFC_RQE_SIZE;
+ phba->sli4_hba.rq_ecount = LPFC_RQE_DEF_COUNT;
+
+ phba->sli4_hba.hba_eq = kzalloc((sizeof(struct lpfc_queue *) *
+ phba->cfg_fcp_io_channel), GFP_KERNEL);
+ if (!phba->sli4_hba.hba_eq) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+ "2576 Failed allocate memory for "
+ "fast-path EQ record array\n");
+ goto out_error;
+ }
+
+ phba->sli4_hba.fcp_cq = kzalloc((sizeof(struct lpfc_queue *) *
+ phba->cfg_fcp_io_channel), GFP_KERNEL);
+ if (!phba->sli4_hba.fcp_cq) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+ "2577 Failed allocate memory for fast-path "
+ "CQ record array\n");
+ goto out_error;
+ }
+
+ phba->sli4_hba.fcp_wq = kzalloc((sizeof(struct lpfc_queue *) *
+ phba->cfg_fcp_io_channel), GFP_KERNEL);
+ if (!phba->sli4_hba.fcp_wq) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+ "2578 Failed allocate memory for fast-path "
+ "WQ record array\n");
+ goto out_error;
+ }
+
+ /*
+ * Since the first EQ can have multiple CQs associated with it,
+ * this array is used to quickly see if we have a FCP fast-path
+ * CQ match.
+ */
+ phba->sli4_hba.fcp_cq_map = kzalloc((sizeof(uint16_t) *
+ phba->cfg_fcp_io_channel), GFP_KERNEL);
+ if (!phba->sli4_hba.fcp_cq_map) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+ "2545 Failed allocate memory for fast-path "
+ "CQ map\n");
+ goto out_error;
+ }
+
+ /*
+ * Create HBA Event Queues (EQs). The cfg_fcp_io_channel specifies
+ * how many EQs to create.
+ */
+ for (idx = 0; idx < phba->cfg_fcp_io_channel; idx++) {
+
+ /* Create EQs */
+ qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.eq_esize,
+ phba->sli4_hba.eq_ecount);
+ if (!qdesc) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+ "0497 Failed allocate EQ (%d)\n", idx);
+ goto out_error;
+ }
+ phba->sli4_hba.hba_eq[idx] = qdesc;
+
+ /* Create Fast Path FCP CQs */
+ qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.cq_esize,
+ phba->sli4_hba.cq_ecount);
+ if (!qdesc) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+ "0499 Failed allocate fast-path FCP "
+ "CQ (%d)\n", idx);
+ goto out_error;
+ }
+ phba->sli4_hba.fcp_cq[idx] = qdesc;
+
+ /* Create Fast Path FCP WQs */
+ qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.wq_esize,
+ phba->sli4_hba.wq_ecount);
+ if (!qdesc) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+ "0503 Failed allocate fast-path FCP "
+ "WQ (%d)\n", idx);
+ goto out_error;
+ }
+ phba->sli4_hba.fcp_wq[idx] = qdesc;
+ }
+
+
+ /*
+ * Create Slow Path Completion Queues (CQs)
+ */
+
+ /* Create slow-path Mailbox Command Complete Queue */
+ qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.cq_esize,
+ phba->sli4_hba.cq_ecount);
+ if (!qdesc) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+ "0500 Failed allocate slow-path mailbox CQ\n");
+ goto out_error;
+ }
+ phba->sli4_hba.mbx_cq = qdesc;
+
+ /* Create slow-path ELS Complete Queue */
+ qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.cq_esize,
+ phba->sli4_hba.cq_ecount);
+ if (!qdesc) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+ "0501 Failed allocate slow-path ELS CQ\n");
+ goto out_error;
+ }
+ phba->sli4_hba.els_cq = qdesc;
+
+
+ /*
+ * Create Slow Path Work Queues (WQs)
+ */
+
+ /* Create Mailbox Command Queue */
+
+ qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.mq_esize,
+ phba->sli4_hba.mq_ecount);
+ if (!qdesc) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+ "0505 Failed allocate slow-path MQ\n");
+ goto out_error;
+ }
+ phba->sli4_hba.mbx_wq = qdesc;
+
+ /*
+ * Create ELS Work Queues
+ */
+
+ /* Create slow-path ELS Work Queue */
+ qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.wq_esize,
+ phba->sli4_hba.wq_ecount);
+ if (!qdesc) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+ "0504 Failed allocate slow-path ELS WQ\n");
+ goto out_error;
+ }
+ phba->sli4_hba.els_wq = qdesc;
+
+ /*
+ * Create Receive Queue (RQ)
+ */
+
+ /* Create Receive Queue for header */
+ qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.rq_esize,
+ phba->sli4_hba.rq_ecount);
+ if (!qdesc) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+ "0506 Failed allocate receive HRQ\n");
+ goto out_error;
+ }
+ phba->sli4_hba.hdr_rq = qdesc;
+
+ /* Create Receive Queue for data */
+ qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.rq_esize,
+ phba->sli4_hba.rq_ecount);
+ if (!qdesc) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+ "0507 Failed allocate receive DRQ\n");
+ goto out_error;
+ }
+ phba->sli4_hba.dat_rq = qdesc;
+
+ /* Create the Queues needed for Flash Optimized Fabric operations */
+ if (phba->cfg_fof)
+ lpfc_fof_queue_create(phba);
+ return 0;
+
+out_error:
+ lpfc_sli4_queue_destroy(phba);
+ return -ENOMEM;
+}
+
+/**
+ * lpfc_sli4_queue_destroy - Destroy all the SLI4 queues
+ * @phba: pointer to lpfc hba data structure.
+ *
+ * This routine is invoked to release all the SLI4 queues with the FCoE HBA
+ * operation.
+ *
+ * Return codes
+ * 0 - successful
+ * -ENOMEM - No available memory
+ * -EIO - The mailbox failed to complete successfully.
+ **/
+void
+lpfc_sli4_queue_destroy(struct lpfc_hba *phba)
+{
+ int idx;
+
+ if (phba->cfg_fof)
+ lpfc_fof_queue_destroy(phba);
+
+ if (phba->sli4_hba.hba_eq != NULL) {
+ /* Release HBA event queue */
+ for (idx = 0; idx < phba->cfg_fcp_io_channel; idx++) {
+ if (phba->sli4_hba.hba_eq[idx] != NULL) {
+ lpfc_sli4_queue_free(
+ phba->sli4_hba.hba_eq[idx]);
+ phba->sli4_hba.hba_eq[idx] = NULL;
+ }
+ }
+ kfree(phba->sli4_hba.hba_eq);
+ phba->sli4_hba.hba_eq = NULL;
+ }
+
+ if (phba->sli4_hba.fcp_cq != NULL) {
+ /* Release FCP completion queue */
+ for (idx = 0; idx < phba->cfg_fcp_io_channel; idx++) {
+ if (phba->sli4_hba.fcp_cq[idx] != NULL) {
+ lpfc_sli4_queue_free(
+ phba->sli4_hba.fcp_cq[idx]);
+ phba->sli4_hba.fcp_cq[idx] = NULL;
+ }
+ }
+ kfree(phba->sli4_hba.fcp_cq);
+ phba->sli4_hba.fcp_cq = NULL;
+ }
+
+ if (phba->sli4_hba.fcp_wq != NULL) {
+ /* Release FCP work queue */
+ for (idx = 0; idx < phba->cfg_fcp_io_channel; idx++) {
+ if (phba->sli4_hba.fcp_wq[idx] != NULL) {
+ lpfc_sli4_queue_free(
+ phba->sli4_hba.fcp_wq[idx]);
+ phba->sli4_hba.fcp_wq[idx] = NULL;
+ }
+ }
+ kfree(phba->sli4_hba.fcp_wq);
+ phba->sli4_hba.fcp_wq = NULL;
+ }
+
+ /* Release FCP CQ mapping array */
+ if (phba->sli4_hba.fcp_cq_map != NULL) {
+ kfree(phba->sli4_hba.fcp_cq_map);
+ phba->sli4_hba.fcp_cq_map = NULL;
+ }
+
+ /* Release mailbox command work queue */
+ if (phba->sli4_hba.mbx_wq != NULL) {
+ lpfc_sli4_queue_free(phba->sli4_hba.mbx_wq);
+ phba->sli4_hba.mbx_wq = NULL;
+ }
+
+ /* Release ELS work queue */
+ if (phba->sli4_hba.els_wq != NULL) {
+ lpfc_sli4_queue_free(phba->sli4_hba.els_wq);
+ phba->sli4_hba.els_wq = NULL;
+ }
+
+ /* Release unsolicited receive queue */
+ if (phba->sli4_hba.hdr_rq != NULL) {
+ lpfc_sli4_queue_free(phba->sli4_hba.hdr_rq);
+ phba->sli4_hba.hdr_rq = NULL;
+ }
+ if (phba->sli4_hba.dat_rq != NULL) {
+ lpfc_sli4_queue_free(phba->sli4_hba.dat_rq);
+ phba->sli4_hba.dat_rq = NULL;
+ }
+
+ /* Release ELS complete queue */
+ if (phba->sli4_hba.els_cq != NULL) {
+ lpfc_sli4_queue_free(phba->sli4_hba.els_cq);
+ phba->sli4_hba.els_cq = NULL;
+ }
+
+ /* Release mailbox command complete queue */
+ if (phba->sli4_hba.mbx_cq != NULL) {
+ lpfc_sli4_queue_free(phba->sli4_hba.mbx_cq);
+ phba->sli4_hba.mbx_cq = NULL;
+ }
+
+ return;
+}
+
+/**
+ * lpfc_sli4_queue_setup - Set up all the SLI4 queues
+ * @phba: pointer to lpfc hba data structure.
+ *
+ * This routine is invoked to set up all the SLI4 queues for the FCoE HBA
+ * operation.
+ *
+ * Return codes
+ * 0 - successful
+ * -ENOMEM - No available memory
+ * -EIO - The mailbox failed to complete successfully.
+ **/
+int
+lpfc_sli4_queue_setup(struct lpfc_hba *phba)
+{
+ struct lpfc_sli *psli = &phba->sli;
+ struct lpfc_sli_ring *pring;
+ int rc = -ENOMEM;
+ int fcp_eqidx, fcp_cqidx, fcp_wqidx;
+ int fcp_cq_index = 0;
+ uint32_t shdr_status, shdr_add_status;
+ union lpfc_sli4_cfg_shdr *shdr;
+ LPFC_MBOXQ_t *mboxq;
+ uint32_t length;
+
+ /* Check for dual-ULP support */
+ mboxq = (LPFC_MBOXQ_t *)mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
+ if (!mboxq) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+ "3249 Unable to allocate memory for "
+ "QUERY_FW_CFG mailbox command\n");
+ return -ENOMEM;
+ }
+ length = (sizeof(struct lpfc_mbx_query_fw_config) -
+ sizeof(struct lpfc_sli4_cfg_mhdr));
+ lpfc_sli4_config(phba, mboxq, LPFC_MBOX_SUBSYSTEM_COMMON,
+ LPFC_MBOX_OPCODE_QUERY_FW_CFG,
+ length, LPFC_SLI4_MBX_EMBED);
+
+ rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
+
+ shdr = (union lpfc_sli4_cfg_shdr *)
+ &mboxq->u.mqe.un.sli4_config.header.cfg_shdr;
+ shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
+ shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
+ if (shdr_status || shdr_add_status || rc) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+ "3250 QUERY_FW_CFG mailbox failed with status "
+ "x%x add_status x%x, mbx status x%x\n",
+ shdr_status, shdr_add_status, rc);
+ if (rc != MBX_TIMEOUT)
+ mempool_free(mboxq, phba->mbox_mem_pool);
+ rc = -ENXIO;
+ goto out_error;
+ }
+
+ phba->sli4_hba.fw_func_mode =
+ mboxq->u.mqe.un.query_fw_cfg.rsp.function_mode;
+ phba->sli4_hba.ulp0_mode = mboxq->u.mqe.un.query_fw_cfg.rsp.ulp0_mode;
+ phba->sli4_hba.ulp1_mode = mboxq->u.mqe.un.query_fw_cfg.rsp.ulp1_mode;
+ lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
+ "3251 QUERY_FW_CFG: func_mode:x%x, ulp0_mode:x%x, "
+ "ulp1_mode:x%x\n", phba->sli4_hba.fw_func_mode,
+ phba->sli4_hba.ulp0_mode, phba->sli4_hba.ulp1_mode);
+
+ if (rc != MBX_TIMEOUT)
+ mempool_free(mboxq, phba->mbox_mem_pool);
+
+ /*
+ * Set up HBA Event Queues (EQs)
+ */
+
+ /* Set up HBA event queue */
+ if (phba->cfg_fcp_io_channel && !phba->sli4_hba.hba_eq) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+ "3147 Fast-path EQs not allocated\n");
+ rc = -ENOMEM;
+ goto out_error;
+ }
+ for (fcp_eqidx = 0; fcp_eqidx < phba->cfg_fcp_io_channel; fcp_eqidx++) {
+ if (!phba->sli4_hba.hba_eq[fcp_eqidx]) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+ "0522 Fast-path EQ (%d) not "
+ "allocated\n", fcp_eqidx);
+ rc = -ENOMEM;
+ goto out_destroy_hba_eq;
+ }
+ rc = lpfc_eq_create(phba, phba->sli4_hba.hba_eq[fcp_eqidx],
+ (phba->cfg_fcp_imax / phba->cfg_fcp_io_channel));
+ if (rc) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+ "0523 Failed setup of fast-path EQ "
+ "(%d), rc = 0x%x\n", fcp_eqidx,
+ (uint32_t)rc);
+ goto out_destroy_hba_eq;
+ }
+ lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
+ "2584 HBA EQ setup: "
+ "queue[%d]-id=%d\n", fcp_eqidx,
+ phba->sli4_hba.hba_eq[fcp_eqidx]->queue_id);
+ }
+
+ /* Set up fast-path FCP Response Complete Queue */
+ if (!phba->sli4_hba.fcp_cq) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+ "3148 Fast-path FCP CQ array not "
+ "allocated\n");
+ rc = -ENOMEM;
+ goto out_destroy_hba_eq;
+ }
+
+ for (fcp_cqidx = 0; fcp_cqidx < phba->cfg_fcp_io_channel; fcp_cqidx++) {
+ if (!phba->sli4_hba.fcp_cq[fcp_cqidx]) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+ "0526 Fast-path FCP CQ (%d) not "
+ "allocated\n", fcp_cqidx);
+ rc = -ENOMEM;
+ goto out_destroy_fcp_cq;
+ }
+ rc = lpfc_cq_create(phba, phba->sli4_hba.fcp_cq[fcp_cqidx],
+ phba->sli4_hba.hba_eq[fcp_cqidx], LPFC_WCQ, LPFC_FCP);
+ if (rc) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+ "0527 Failed setup of fast-path FCP "
+ "CQ (%d), rc = 0x%x\n", fcp_cqidx,
+ (uint32_t)rc);
+ goto out_destroy_fcp_cq;
+ }
+
+ /* Setup fcp_cq_map for fast lookup */
+ phba->sli4_hba.fcp_cq_map[fcp_cqidx] =
+ phba->sli4_hba.fcp_cq[fcp_cqidx]->queue_id;
+
+ lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
+ "2588 FCP CQ setup: cq[%d]-id=%d, "
+ "parent seq[%d]-id=%d\n",
+ fcp_cqidx,
+ phba->sli4_hba.fcp_cq[fcp_cqidx]->queue_id,
+ fcp_cqidx,
+ phba->sli4_hba.hba_eq[fcp_cqidx]->queue_id);
+ }
+
+ /* Set up fast-path FCP Work Queue */
+ if (!phba->sli4_hba.fcp_wq) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+ "3149 Fast-path FCP WQ array not "
+ "allocated\n");
+ rc = -ENOMEM;
+ goto out_destroy_fcp_cq;
+ }
+
+ for (fcp_wqidx = 0; fcp_wqidx < phba->cfg_fcp_io_channel; fcp_wqidx++) {
+ if (!phba->sli4_hba.fcp_wq[fcp_wqidx]) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+ "0534 Fast-path FCP WQ (%d) not "
+ "allocated\n", fcp_wqidx);
+ rc = -ENOMEM;
+ goto out_destroy_fcp_wq;
+ }
+ rc = lpfc_wq_create(phba, phba->sli4_hba.fcp_wq[fcp_wqidx],
+ phba->sli4_hba.fcp_cq[fcp_wqidx],
+ LPFC_FCP);
+ if (rc) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+ "0535 Failed setup of fast-path FCP "
+ "WQ (%d), rc = 0x%x\n", fcp_wqidx,
+ (uint32_t)rc);
+ goto out_destroy_fcp_wq;
+ }
+
+ /* Bind this WQ to the next FCP ring */
+ pring = &psli->ring[MAX_SLI3_CONFIGURED_RINGS + fcp_wqidx];
+ pring->sli.sli4.wqp = (void *)phba->sli4_hba.fcp_wq[fcp_wqidx];
+ phba->sli4_hba.fcp_cq[fcp_wqidx]->pring = pring;
+
+ lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
+ "2591 FCP WQ setup: wq[%d]-id=%d, "
+ "parent cq[%d]-id=%d\n",
+ fcp_wqidx,
+ phba->sli4_hba.fcp_wq[fcp_wqidx]->queue_id,
+ fcp_cq_index,
+ phba->sli4_hba.fcp_cq[fcp_wqidx]->queue_id);
+ }
+ /*
+ * Set up Complete Queues (CQs)
+ */
+
+ /* Set up slow-path MBOX Complete Queue as the first CQ */
+ if (!phba->sli4_hba.mbx_cq) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+ "0528 Mailbox CQ not allocated\n");
+ rc = -ENOMEM;
+ goto out_destroy_fcp_wq;
+ }
+ rc = lpfc_cq_create(phba, phba->sli4_hba.mbx_cq,
+ phba->sli4_hba.hba_eq[0], LPFC_MCQ, LPFC_MBOX);
+ if (rc) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+ "0529 Failed setup of slow-path mailbox CQ: "
+ "rc = 0x%x\n", (uint32_t)rc);
+ goto out_destroy_fcp_wq;
+ }
+ lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
+ "2585 MBX CQ setup: cq-id=%d, parent eq-id=%d\n",
+ phba->sli4_hba.mbx_cq->queue_id,
+ phba->sli4_hba.hba_eq[0]->queue_id);
+
+ /* Set up slow-path ELS Complete Queue */
+ if (!phba->sli4_hba.els_cq) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+ "0530 ELS CQ not allocated\n");
+ rc = -ENOMEM;
+ goto out_destroy_mbx_cq;
+ }
+ rc = lpfc_cq_create(phba, phba->sli4_hba.els_cq,
+ phba->sli4_hba.hba_eq[0], LPFC_WCQ, LPFC_ELS);
+ if (rc) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+ "0531 Failed setup of slow-path ELS CQ: "
+ "rc = 0x%x\n", (uint32_t)rc);
+ goto out_destroy_mbx_cq;
+ }
+ lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
+ "2586 ELS CQ setup: cq-id=%d, parent eq-id=%d\n",
+ phba->sli4_hba.els_cq->queue_id,
+ phba->sli4_hba.hba_eq[0]->queue_id);
+
+ /*
+ * Set up all the Work Queues (WQs)
+ */
+
+ /* Set up Mailbox Command Queue */
+ if (!phba->sli4_hba.mbx_wq) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+ "0538 Slow-path MQ not allocated\n");
+ rc = -ENOMEM;
+ goto out_destroy_els_cq;
+ }
+ rc = lpfc_mq_create(phba, phba->sli4_hba.mbx_wq,
+ phba->sli4_hba.mbx_cq, LPFC_MBOX);
+ if (rc) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+ "0539 Failed setup of slow-path MQ: "
+ "rc = 0x%x\n", rc);
+ goto out_destroy_els_cq;
+ }
+ lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
+ "2589 MBX MQ setup: wq-id=%d, parent cq-id=%d\n",
+ phba->sli4_hba.mbx_wq->queue_id,
+ phba->sli4_hba.mbx_cq->queue_id);
+
+ /* Set up slow-path ELS Work Queue */
+ if (!phba->sli4_hba.els_wq) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+ "0536 Slow-path ELS WQ not allocated\n");
+ rc = -ENOMEM;
+ goto out_destroy_mbx_wq;
+ }
+ rc = lpfc_wq_create(phba, phba->sli4_hba.els_wq,
+ phba->sli4_hba.els_cq, LPFC_ELS);
+ if (rc) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+ "0537 Failed setup of slow-path ELS WQ: "
+ "rc = 0x%x\n", (uint32_t)rc);
+ goto out_destroy_mbx_wq;
+ }
+
+ /* Bind this WQ to the ELS ring */
+ pring = &psli->ring[LPFC_ELS_RING];
+ pring->sli.sli4.wqp = (void *)phba->sli4_hba.els_wq;
+ phba->sli4_hba.els_cq->pring = pring;
+
+ lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
+ "2590 ELS WQ setup: wq-id=%d, parent cq-id=%d\n",
+ phba->sli4_hba.els_wq->queue_id,
+ phba->sli4_hba.els_cq->queue_id);
+
+ /*
+ * Create Receive Queue (RQ)
+ */
+ if (!phba->sli4_hba.hdr_rq || !phba->sli4_hba.dat_rq) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+ "0540 Receive Queue not allocated\n");
+ rc = -ENOMEM;
+ goto out_destroy_els_wq;
+ }
+
+ lpfc_rq_adjust_repost(phba, phba->sli4_hba.hdr_rq, LPFC_ELS_HBQ);
+ lpfc_rq_adjust_repost(phba, phba->sli4_hba.dat_rq, LPFC_ELS_HBQ);
+
+ rc = lpfc_rq_create(phba, phba->sli4_hba.hdr_rq, phba->sli4_hba.dat_rq,
+ phba->sli4_hba.els_cq, LPFC_USOL);
+ if (rc) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+ "0541 Failed setup of Receive Queue: "
+ "rc = 0x%x\n", (uint32_t)rc);
+ goto out_destroy_fcp_wq;
+ }
+
+ lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
+ "2592 USL RQ setup: hdr-rq-id=%d, dat-rq-id=%d "
+ "parent cq-id=%d\n",
+ phba->sli4_hba.hdr_rq->queue_id,
+ phba->sli4_hba.dat_rq->queue_id,
+ phba->sli4_hba.els_cq->queue_id);
+
+ if (phba->cfg_fof) {
+ rc = lpfc_fof_queue_setup(phba);
+ if (rc) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+ "0549 Failed setup of FOF Queues: "
+ "rc = 0x%x\n", rc);
+ goto out_destroy_els_rq;
+ }
+ }
+
+ /*
+ * Configure EQ delay multipier for interrupt coalescing using
+ * MODIFY_EQ_DELAY for all EQs created, LPFC_MAX_EQ_DELAY at a time.
+ */
+ for (fcp_eqidx = 0; fcp_eqidx < phba->cfg_fcp_io_channel;
+ fcp_eqidx += LPFC_MAX_EQ_DELAY)
+ lpfc_modify_fcp_eq_delay(phba, fcp_eqidx);
+ return 0;
+
+out_destroy_els_rq:
+ lpfc_rq_destroy(phba, phba->sli4_hba.hdr_rq, phba->sli4_hba.dat_rq);
+out_destroy_els_wq:
+ lpfc_wq_destroy(phba, phba->sli4_hba.els_wq);
+out_destroy_mbx_wq:
+ lpfc_mq_destroy(phba, phba->sli4_hba.mbx_wq);
+out_destroy_els_cq:
+ lpfc_cq_destroy(phba, phba->sli4_hba.els_cq);
+out_destroy_mbx_cq:
+ lpfc_cq_destroy(phba, phba->sli4_hba.mbx_cq);
+out_destroy_fcp_wq:
+ for (--fcp_wqidx; fcp_wqidx >= 0; fcp_wqidx--)
+ lpfc_wq_destroy(phba, phba->sli4_hba.fcp_wq[fcp_wqidx]);
+out_destroy_fcp_cq:
+ for (--fcp_cqidx; fcp_cqidx >= 0; fcp_cqidx--)
+ lpfc_cq_destroy(phba, phba->sli4_hba.fcp_cq[fcp_cqidx]);
+out_destroy_hba_eq:
+ for (--fcp_eqidx; fcp_eqidx >= 0; fcp_eqidx--)
+ lpfc_eq_destroy(phba, phba->sli4_hba.hba_eq[fcp_eqidx]);
+out_error:
+ return rc;
+}
+
+/**
+ * lpfc_sli4_queue_unset - Unset all the SLI4 queues
+ * @phba: pointer to lpfc hba data structure.
+ *
+ * This routine is invoked to unset all the SLI4 queues with the FCoE HBA
+ * operation.
+ *
+ * Return codes
+ * 0 - successful
+ * -ENOMEM - No available memory
+ * -EIO - The mailbox failed to complete successfully.
+ **/
+void
+lpfc_sli4_queue_unset(struct lpfc_hba *phba)
+{
+ int fcp_qidx;
+
+ /* Unset the queues created for Flash Optimized Fabric operations */
+ if (phba->cfg_fof)
+ lpfc_fof_queue_destroy(phba);
+ /* Unset mailbox command work queue */
+ lpfc_mq_destroy(phba, phba->sli4_hba.mbx_wq);
+ /* Unset ELS work queue */
+ lpfc_wq_destroy(phba, phba->sli4_hba.els_wq);
+ /* Unset unsolicited receive queue */
+ lpfc_rq_destroy(phba, phba->sli4_hba.hdr_rq, phba->sli4_hba.dat_rq);
+ /* Unset FCP work queue */
+ if (phba->sli4_hba.fcp_wq) {
+ for (fcp_qidx = 0; fcp_qidx < phba->cfg_fcp_io_channel;
+ fcp_qidx++)
+ lpfc_wq_destroy(phba, phba->sli4_hba.fcp_wq[fcp_qidx]);
+ }
+ /* Unset mailbox command complete queue */
+ lpfc_cq_destroy(phba, phba->sli4_hba.mbx_cq);
+ /* Unset ELS complete queue */
+ lpfc_cq_destroy(phba, phba->sli4_hba.els_cq);
+ /* Unset FCP response complete queue */
+ if (phba->sli4_hba.fcp_cq) {
+ for (fcp_qidx = 0; fcp_qidx < phba->cfg_fcp_io_channel;
+ fcp_qidx++)
+ lpfc_cq_destroy(phba, phba->sli4_hba.fcp_cq[fcp_qidx]);
+ }
+ /* Unset fast-path event queue */
+ if (phba->sli4_hba.hba_eq) {
+ for (fcp_qidx = 0; fcp_qidx < phba->cfg_fcp_io_channel;
+ fcp_qidx++)
+ lpfc_eq_destroy(phba, phba->sli4_hba.hba_eq[fcp_qidx]);
+ }
+}
+
+/**
+ * lpfc_sli4_cq_event_pool_create - Create completion-queue event free pool
+ * @phba: pointer to lpfc hba data structure.
+ *
+ * This routine is invoked to allocate and set up a pool of completion queue
+ * events. The body of the completion queue event is a completion queue entry
+ * CQE. For now, this pool is used for the interrupt service routine to queue
+ * the following HBA completion queue events for the worker thread to process:
+ * - Mailbox asynchronous events
+ * - Receive queue completion unsolicited events
+ * Later, this can be used for all the slow-path events.
+ *
+ * Return codes
+ * 0 - successful
+ * -ENOMEM - No available memory
+ **/
+static int
+lpfc_sli4_cq_event_pool_create(struct lpfc_hba *phba)
+{
+ struct lpfc_cq_event *cq_event;
+ int i;
+
+ for (i = 0; i < (4 * phba->sli4_hba.cq_ecount); i++) {
+ cq_event = kmalloc(sizeof(struct lpfc_cq_event), GFP_KERNEL);
+ if (!cq_event)
+ goto out_pool_create_fail;
+ list_add_tail(&cq_event->list,
+ &phba->sli4_hba.sp_cqe_event_pool);
+ }
+ return 0;
+
+out_pool_create_fail:
+ lpfc_sli4_cq_event_pool_destroy(phba);
+ return -ENOMEM;
+}
+
+/**
+ * lpfc_sli4_cq_event_pool_destroy - Free completion-queue event free pool
+ * @phba: pointer to lpfc hba data structure.
+ *
+ * This routine is invoked to free the pool of completion queue events at
+ * driver unload time. Note that, it is the responsibility of the driver
+ * cleanup routine to free all the outstanding completion-queue events
+ * allocated from this pool back into the pool before invoking this routine
+ * to destroy the pool.
+ **/
+static void
+lpfc_sli4_cq_event_pool_destroy(struct lpfc_hba *phba)
+{
+ struct lpfc_cq_event *cq_event, *next_cq_event;
+
+ list_for_each_entry_safe(cq_event, next_cq_event,
+ &phba->sli4_hba.sp_cqe_event_pool, list) {
+ list_del(&cq_event->list);
+ kfree(cq_event);
+ }
+}
+
+/**
+ * __lpfc_sli4_cq_event_alloc - Allocate a completion-queue event from free pool
+ * @phba: pointer to lpfc hba data structure.
+ *
+ * This routine is the lock free version of the API invoked to allocate a
+ * completion-queue event from the free pool.
+ *
+ * Return: Pointer to the newly allocated completion-queue event if successful
+ * NULL otherwise.
+ **/
+struct lpfc_cq_event *
+__lpfc_sli4_cq_event_alloc(struct lpfc_hba *phba)
+{
+ struct lpfc_cq_event *cq_event = NULL;
+
+ list_remove_head(&phba->sli4_hba.sp_cqe_event_pool, cq_event,
+ struct lpfc_cq_event, list);
+ return cq_event;
+}
+
+/**
+ * lpfc_sli4_cq_event_alloc - Allocate a completion-queue event from free pool
+ * @phba: pointer to lpfc hba data structure.
+ *
+ * This routine is the lock version of the API invoked to allocate a
+ * completion-queue event from the free pool.
+ *
+ * Return: Pointer to the newly allocated completion-queue event if successful
+ * NULL otherwise.
+ **/
+struct lpfc_cq_event *
+lpfc_sli4_cq_event_alloc(struct lpfc_hba *phba)
+{
+ struct lpfc_cq_event *cq_event;
+ unsigned long iflags;
+
+ spin_lock_irqsave(&phba->hbalock, iflags);
+ cq_event = __lpfc_sli4_cq_event_alloc(phba);
+ spin_unlock_irqrestore(&phba->hbalock, iflags);
+ return cq_event;
+}
+
+/**
+ * __lpfc_sli4_cq_event_release - Release a completion-queue event to free pool
+ * @phba: pointer to lpfc hba data structure.
+ * @cq_event: pointer to the completion queue event to be freed.
+ *
+ * This routine is the lock free version of the API invoked to release a
+ * completion-queue event back into the free pool.
+ **/
+void
+__lpfc_sli4_cq_event_release(struct lpfc_hba *phba,
+ struct lpfc_cq_event *cq_event)
+{
+ list_add_tail(&cq_event->list, &phba->sli4_hba.sp_cqe_event_pool);
+}
+
+/**
+ * lpfc_sli4_cq_event_release - Release a completion-queue event to free pool
+ * @phba: pointer to lpfc hba data structure.
+ * @cq_event: pointer to the completion queue event to be freed.
+ *
+ * This routine is the lock version of the API invoked to release a
+ * completion-queue event back into the free pool.
+ **/
+void
+lpfc_sli4_cq_event_release(struct lpfc_hba *phba,
+ struct lpfc_cq_event *cq_event)
+{
+ unsigned long iflags;
+ spin_lock_irqsave(&phba->hbalock, iflags);
+ __lpfc_sli4_cq_event_release(phba, cq_event);
+ spin_unlock_irqrestore(&phba->hbalock, iflags);
+}
+
+/**
+ * lpfc_sli4_cq_event_release_all - Release all cq events to the free pool
+ * @phba: pointer to lpfc hba data structure.
+ *
+ * This routine is to free all the pending completion-queue events to the
+ * back into the free pool for device reset.
+ **/
+static void
+lpfc_sli4_cq_event_release_all(struct lpfc_hba *phba)
+{
+ LIST_HEAD(cqelist);
+ struct lpfc_cq_event *cqe;
+ unsigned long iflags;
+
+ /* Retrieve all the pending WCQEs from pending WCQE lists */
+ spin_lock_irqsave(&phba->hbalock, iflags);
+ /* Pending FCP XRI abort events */
+ list_splice_init(&phba->sli4_hba.sp_fcp_xri_aborted_work_queue,
+ &cqelist);
+ /* Pending ELS XRI abort events */
+ list_splice_init(&phba->sli4_hba.sp_els_xri_aborted_work_queue,
+ &cqelist);
+ /* Pending asynnc events */
+ list_splice_init(&phba->sli4_hba.sp_asynce_work_queue,
+ &cqelist);
+ spin_unlock_irqrestore(&phba->hbalock, iflags);
+
+ while (!list_empty(&cqelist)) {
+ list_remove_head(&cqelist, cqe, struct lpfc_cq_event, list);
+ lpfc_sli4_cq_event_release(phba, cqe);
+ }
+}
+
+/**
+ * lpfc_pci_function_reset - Reset pci function.
+ * @phba: pointer to lpfc hba data structure.
+ *
+ * This routine is invoked to request a PCI function reset. It will destroys
+ * all resources assigned to the PCI function which originates this request.
+ *
+ * Return codes
+ * 0 - successful
+ * -ENOMEM - No available memory
+ * -EIO - The mailbox failed to complete successfully.
+ **/
+int
+lpfc_pci_function_reset(struct lpfc_hba *phba)
+{
+ LPFC_MBOXQ_t *mboxq;
+ uint32_t rc = 0, if_type;
+ uint32_t shdr_status, shdr_add_status;
+ uint32_t rdy_chk;
+ uint32_t port_reset = 0;
+ union lpfc_sli4_cfg_shdr *shdr;
+ struct lpfc_register reg_data;
+ uint16_t devid;
+
+ if_type = bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf);
+ switch (if_type) {
+ case LPFC_SLI_INTF_IF_TYPE_0:
+ mboxq = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool,
+ GFP_KERNEL);
+ if (!mboxq) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+ "0494 Unable to allocate memory for "
+ "issuing SLI_FUNCTION_RESET mailbox "
+ "command\n");
+ return -ENOMEM;
+ }
+
+ /* Setup PCI function reset mailbox-ioctl command */
+ lpfc_sli4_config(phba, mboxq, LPFC_MBOX_SUBSYSTEM_COMMON,
+ LPFC_MBOX_OPCODE_FUNCTION_RESET, 0,
+ LPFC_SLI4_MBX_EMBED);
+ rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
+ shdr = (union lpfc_sli4_cfg_shdr *)
+ &mboxq->u.mqe.un.sli4_config.header.cfg_shdr;
+ shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
+ shdr_add_status = bf_get(lpfc_mbox_hdr_add_status,
+ &shdr->response);
+ if (rc != MBX_TIMEOUT)
+ mempool_free(mboxq, phba->mbox_mem_pool);
+ if (shdr_status || shdr_add_status || rc) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+ "0495 SLI_FUNCTION_RESET mailbox "
+ "failed with status x%x add_status x%x,"
+ " mbx status x%x\n",
+ shdr_status, shdr_add_status, rc);
+ rc = -ENXIO;
+ }
+ break;
+ case LPFC_SLI_INTF_IF_TYPE_2:
+wait:
+ /*
+ * Poll the Port Status Register and wait for RDY for
+ * up to 30 seconds. If the port doesn't respond, treat
+ * it as an error.
+ */
+ for (rdy_chk = 0; rdy_chk < 1500; rdy_chk++) {
+ if (lpfc_readl(phba->sli4_hba.u.if_type2.
+ STATUSregaddr, &reg_data.word0)) {
+ rc = -ENODEV;
+ goto out;
+ }
+ if (bf_get(lpfc_sliport_status_rdy, &reg_data))
+ break;
+ msleep(20);
+ }
+
+ if (!bf_get(lpfc_sliport_status_rdy, &reg_data)) {
+ phba->work_status[0] = readl(
+ phba->sli4_hba.u.if_type2.ERR1regaddr);
+ phba->work_status[1] = readl(
+ phba->sli4_hba.u.if_type2.ERR2regaddr);
+ lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+ "2890 Port not ready, port status reg "
+ "0x%x error 1=0x%x, error 2=0x%x\n",
+ reg_data.word0,
+ phba->work_status[0],
+ phba->work_status[1]);
+ rc = -ENODEV;
+ goto out;
+ }
+
+ if (!port_reset) {
+ /*
+ * Reset the port now
+ */
+ reg_data.word0 = 0;
+ bf_set(lpfc_sliport_ctrl_end, &reg_data,
+ LPFC_SLIPORT_LITTLE_ENDIAN);
+ bf_set(lpfc_sliport_ctrl_ip, &reg_data,
+ LPFC_SLIPORT_INIT_PORT);
+ writel(reg_data.word0, phba->sli4_hba.u.if_type2.
+ CTRLregaddr);
+ /* flush */
+ pci_read_config_word(phba->pcidev,
+ PCI_DEVICE_ID, &devid);
+
+ port_reset = 1;
+ msleep(20);
+ goto wait;
+ } else if (bf_get(lpfc_sliport_status_rn, &reg_data)) {
+ rc = -ENODEV;
+ goto out;
+ }
+ break;
+
+ case LPFC_SLI_INTF_IF_TYPE_1:
+ default:
+ break;
+ }
+
+out:
+ /* Catch the not-ready port failure after a port reset. */
+ if (rc) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+ "3317 HBA not functional: IP Reset Failed "
+ "try: echo fw_reset > board_mode\n");
+ rc = -ENODEV;
+ }
+
+ return rc;
+}
+
+/**
+ * lpfc_sli4_pci_mem_setup - Setup SLI4 HBA PCI memory space.
+ * @phba: pointer to lpfc hba data structure.
+ *
+ * This routine is invoked to set up the PCI device memory space for device
+ * with SLI-4 interface spec.
+ *
+ * Return codes
+ * 0 - successful
+ * other values - error
+ **/
+static int
+lpfc_sli4_pci_mem_setup(struct lpfc_hba *phba)
+{
+ struct pci_dev *pdev;
+ unsigned long bar0map_len, bar1map_len, bar2map_len;
+ int error = -ENODEV;
+ uint32_t if_type;
+
+ /* Obtain PCI device reference */
+ if (!phba->pcidev)
+ return error;
+ else
+ pdev = phba->pcidev;
+
+ /* Set the device DMA mask size */
+ if (pci_set_dma_mask(pdev, DMA_BIT_MASK(64)) != 0
+ || pci_set_consistent_dma_mask(pdev,DMA_BIT_MASK(64)) != 0) {
+ if (pci_set_dma_mask(pdev, DMA_BIT_MASK(32)) != 0
+ || pci_set_consistent_dma_mask(pdev,DMA_BIT_MASK(32)) != 0) {
+ return error;
+ }
+ }
+
+ /*
+ * The BARs and register set definitions and offset locations are
+ * dependent on the if_type.
+ */
+ if (pci_read_config_dword(pdev, LPFC_SLI_INTF,
+ &phba->sli4_hba.sli_intf.word0)) {
+ return error;
+ }
+
+ /* There is no SLI3 failback for SLI4 devices. */
+ if (bf_get(lpfc_sli_intf_valid, &phba->sli4_hba.sli_intf) !=
+ LPFC_SLI_INTF_VALID) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+ "2894 SLI_INTF reg contents invalid "
+ "sli_intf reg 0x%x\n",
+ phba->sli4_hba.sli_intf.word0);
+ return error;
+ }
+
+ if_type = bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf);
+ /*
+ * Get the bus address of SLI4 device Bar regions and the
+ * number of bytes required by each mapping. The mapping of the
+ * particular PCI BARs regions is dependent on the type of
+ * SLI4 device.
+ */
+ if (pci_resource_start(pdev, PCI_64BIT_BAR0)) {
+ phba->pci_bar0_map = pci_resource_start(pdev, PCI_64BIT_BAR0);
+ bar0map_len = pci_resource_len(pdev, PCI_64BIT_BAR0);
+
+ /*
+ * Map SLI4 PCI Config Space Register base to a kernel virtual
+ * addr
+ */
+ phba->sli4_hba.conf_regs_memmap_p =
+ ioremap(phba->pci_bar0_map, bar0map_len);
+ if (!phba->sli4_hba.conf_regs_memmap_p) {
+ dev_printk(KERN_ERR, &pdev->dev,
+ "ioremap failed for SLI4 PCI config "
+ "registers.\n");
+ goto out;
+ }
+ phba->pci_bar0_memmap_p = phba->sli4_hba.conf_regs_memmap_p;
+ /* Set up BAR0 PCI config space register memory map */
+ lpfc_sli4_bar0_register_memmap(phba, if_type);
+ } else {
+ phba->pci_bar0_map = pci_resource_start(pdev, 1);
+ bar0map_len = pci_resource_len(pdev, 1);
+ if (if_type == LPFC_SLI_INTF_IF_TYPE_2) {
+ dev_printk(KERN_ERR, &pdev->dev,
+ "FATAL - No BAR0 mapping for SLI4, if_type 2\n");
+ goto out;
+ }
+ phba->sli4_hba.conf_regs_memmap_p =
+ ioremap(phba->pci_bar0_map, bar0map_len);
+ if (!phba->sli4_hba.conf_regs_memmap_p) {
+ dev_printk(KERN_ERR, &pdev->dev,
+ "ioremap failed for SLI4 PCI config "
+ "registers.\n");
+ goto out;
+ }
+ lpfc_sli4_bar0_register_memmap(phba, if_type);
+ }
+
+ if ((if_type == LPFC_SLI_INTF_IF_TYPE_0) &&
+ (pci_resource_start(pdev, PCI_64BIT_BAR2))) {
+ /*
+ * Map SLI4 if type 0 HBA Control Register base to a kernel
+ * virtual address and setup the registers.
+ */
+ phba->pci_bar1_map = pci_resource_start(pdev, PCI_64BIT_BAR2);
+ bar1map_len = pci_resource_len(pdev, PCI_64BIT_BAR2);
+ phba->sli4_hba.ctrl_regs_memmap_p =
+ ioremap(phba->pci_bar1_map, bar1map_len);
+ if (!phba->sli4_hba.ctrl_regs_memmap_p) {
+ dev_printk(KERN_ERR, &pdev->dev,
+ "ioremap failed for SLI4 HBA control registers.\n");
+ goto out_iounmap_conf;
+ }
+ phba->pci_bar2_memmap_p = phba->sli4_hba.ctrl_regs_memmap_p;
+ lpfc_sli4_bar1_register_memmap(phba);
+ }
+
+ if ((if_type == LPFC_SLI_INTF_IF_TYPE_0) &&
+ (pci_resource_start(pdev, PCI_64BIT_BAR4))) {
+ /*
+ * Map SLI4 if type 0 HBA Doorbell Register base to a kernel
+ * virtual address and setup the registers.
+ */
+ phba->pci_bar2_map = pci_resource_start(pdev, PCI_64BIT_BAR4);
+ bar2map_len = pci_resource_len(pdev, PCI_64BIT_BAR4);
+ phba->sli4_hba.drbl_regs_memmap_p =
+ ioremap(phba->pci_bar2_map, bar2map_len);
+ if (!phba->sli4_hba.drbl_regs_memmap_p) {
+ dev_printk(KERN_ERR, &pdev->dev,
+ "ioremap failed for SLI4 HBA doorbell registers.\n");
+ goto out_iounmap_ctrl;
+ }
+ phba->pci_bar4_memmap_p = phba->sli4_hba.drbl_regs_memmap_p;
+ error = lpfc_sli4_bar2_register_memmap(phba, LPFC_VF0);
+ if (error)
+ goto out_iounmap_all;
+ }
+
+ return 0;
+
+out_iounmap_all:
+ iounmap(phba->sli4_hba.drbl_regs_memmap_p);
+out_iounmap_ctrl:
+ iounmap(phba->sli4_hba.ctrl_regs_memmap_p);
+out_iounmap_conf:
+ iounmap(phba->sli4_hba.conf_regs_memmap_p);
+out:
+ return error;
+}
+
+/**
+ * lpfc_sli4_pci_mem_unset - Unset SLI4 HBA PCI memory space.
+ * @phba: pointer to lpfc hba data structure.
+ *
+ * This routine is invoked to unset the PCI device memory space for device
+ * with SLI-4 interface spec.
+ **/
+static void
+lpfc_sli4_pci_mem_unset(struct lpfc_hba *phba)
+{
+ uint32_t if_type;
+ if_type = bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf);
+
+ switch (if_type) {
+ case LPFC_SLI_INTF_IF_TYPE_0:
+ iounmap(phba->sli4_hba.drbl_regs_memmap_p);
+ iounmap(phba->sli4_hba.ctrl_regs_memmap_p);
+ iounmap(phba->sli4_hba.conf_regs_memmap_p);
+ break;
+ case LPFC_SLI_INTF_IF_TYPE_2:
+ iounmap(phba->sli4_hba.conf_regs_memmap_p);
+ break;
+ case LPFC_SLI_INTF_IF_TYPE_1:
+ default:
+ dev_printk(KERN_ERR, &phba->pcidev->dev,
+ "FATAL - unsupported SLI4 interface type - %d\n",
+ if_type);
+ break;
+ }
+}
+
+/**
+ * lpfc_sli_enable_msix - Enable MSI-X interrupt mode on SLI-3 device
+ * @phba: pointer to lpfc hba data structure.
+ *
+ * This routine is invoked to enable the MSI-X interrupt vectors to device
+ * with SLI-3 interface specs. The kernel function pci_enable_msix_exact()
+ * is called to enable the MSI-X vectors. Note that pci_enable_msix_exact(),
+ * once invoked, enables either all or nothing, depending on the current
+ * availability of PCI vector resources. The device driver is responsible
+ * for calling the individual request_irq() to register each MSI-X vector
+ * with a interrupt handler, which is done in this function. Note that
+ * later when device is unloading, the driver should always call free_irq()
+ * on all MSI-X vectors it has done request_irq() on before calling
+ * pci_disable_msix(). Failure to do so results in a BUG_ON() and a device
+ * will be left with MSI-X enabled and leaks its vectors.
+ *
+ * Return codes
+ * 0 - successful
+ * other values - error
+ **/
+static int
+lpfc_sli_enable_msix(struct lpfc_hba *phba)
+{
+ int rc, i;
+ LPFC_MBOXQ_t *pmb;
+
+ /* Set up MSI-X multi-message vectors */
+ for (i = 0; i < LPFC_MSIX_VECTORS; i++)
+ phba->msix_entries[i].entry = i;
+
+ /* Configure MSI-X capability structure */
+ rc = pci_enable_msix_exact(phba->pcidev, phba->msix_entries,
+ LPFC_MSIX_VECTORS);
+ if (rc) {
+ lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
+ "0420 PCI enable MSI-X failed (%d)\n", rc);
+ goto vec_fail_out;
+ }
+ for (i = 0; i < LPFC_MSIX_VECTORS; i++)
+ lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
+ "0477 MSI-X entry[%d]: vector=x%x "
+ "message=%d\n", i,
+ phba->msix_entries[i].vector,
+ phba->msix_entries[i].entry);
+ /*
+ * Assign MSI-X vectors to interrupt handlers
+ */
+
+ /* vector-0 is associated to slow-path handler */
+ rc = request_irq(phba->msix_entries[0].vector,
+ &lpfc_sli_sp_intr_handler, IRQF_SHARED,
+ LPFC_SP_DRIVER_HANDLER_NAME, phba);
+ if (rc) {
+ lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
+ "0421 MSI-X slow-path request_irq failed "
+ "(%d)\n", rc);
+ goto msi_fail_out;
+ }
+
+ /* vector-1 is associated to fast-path handler */
+ rc = request_irq(phba->msix_entries[1].vector,
+ &lpfc_sli_fp_intr_handler, IRQF_SHARED,
+ LPFC_FP_DRIVER_HANDLER_NAME, phba);
+
+ if (rc) {
+ lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
+ "0429 MSI-X fast-path request_irq failed "
+ "(%d)\n", rc);
+ goto irq_fail_out;
+ }
+
+ /*
+ * Configure HBA MSI-X attention conditions to messages
+ */
+ pmb = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
+
+ if (!pmb) {
+ rc = -ENOMEM;
+ lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+ "0474 Unable to allocate memory for issuing "
+ "MBOX_CONFIG_MSI command\n");
+ goto mem_fail_out;
+ }
+ rc = lpfc_config_msi(phba, pmb);
+ if (rc)
+ goto mbx_fail_out;
+ rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL);
+ if (rc != MBX_SUCCESS) {
+ lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX,
+ "0351 Config MSI mailbox command failed, "
+ "mbxCmd x%x, mbxStatus x%x\n",
+ pmb->u.mb.mbxCommand, pmb->u.mb.mbxStatus);
+ goto mbx_fail_out;
+ }
+
+ /* Free memory allocated for mailbox command */
+ mempool_free(pmb, phba->mbox_mem_pool);
+ return rc;
+
+mbx_fail_out:
+ /* Free memory allocated for mailbox command */
+ mempool_free(pmb, phba->mbox_mem_pool);
+
+mem_fail_out:
+ /* free the irq already requested */
+ free_irq(phba->msix_entries[1].vector, phba);
+
+irq_fail_out:
+ /* free the irq already requested */
+ free_irq(phba->msix_entries[0].vector, phba);
+
+msi_fail_out:
+ /* Unconfigure MSI-X capability structure */
+ pci_disable_msix(phba->pcidev);
+
+vec_fail_out:
+ return rc;
+}
+
+/**
+ * lpfc_sli_disable_msix - Disable MSI-X interrupt mode on SLI-3 device.
+ * @phba: pointer to lpfc hba data structure.
+ *
+ * This routine is invoked to release the MSI-X vectors and then disable the
+ * MSI-X interrupt mode to device with SLI-3 interface spec.
+ **/
+static void
+lpfc_sli_disable_msix(struct lpfc_hba *phba)
+{
+ int i;
+
+ /* Free up MSI-X multi-message vectors */
+ for (i = 0; i < LPFC_MSIX_VECTORS; i++)
+ free_irq(phba->msix_entries[i].vector, phba);
+ /* Disable MSI-X */
+ pci_disable_msix(phba->pcidev);
+
+ return;
+}
+
+/**
+ * lpfc_sli_enable_msi - Enable MSI interrupt mode on SLI-3 device.
+ * @phba: pointer to lpfc hba data structure.
+ *
+ * This routine is invoked to enable the MSI interrupt mode to device with
+ * SLI-3 interface spec. The kernel function pci_enable_msi() is called to
+ * enable the MSI vector. The device driver is responsible for calling the
+ * request_irq() to register MSI vector with a interrupt the handler, which
+ * is done in this function.
+ *
+ * Return codes
+ * 0 - successful
+ * other values - error
+ */
+static int
+lpfc_sli_enable_msi(struct lpfc_hba *phba)
+{
+ int rc;
+
+ rc = pci_enable_msi(phba->pcidev);
+ if (!rc)
+ lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
+ "0462 PCI enable MSI mode success.\n");
+ else {
+ lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
+ "0471 PCI enable MSI mode failed (%d)\n", rc);
+ return rc;
+ }
+
+ rc = request_irq(phba->pcidev->irq, lpfc_sli_intr_handler,
+ IRQF_SHARED, LPFC_DRIVER_NAME, phba);
+ if (rc) {
+ pci_disable_msi(phba->pcidev);
+ lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
+ "0478 MSI request_irq failed (%d)\n", rc);
+ }
+ return rc;
+}
+
+/**
+ * lpfc_sli_disable_msi - Disable MSI interrupt mode to SLI-3 device.
+ * @phba: pointer to lpfc hba data structure.
+ *
+ * This routine is invoked to disable the MSI interrupt mode to device with
+ * SLI-3 interface spec. The driver calls free_irq() on MSI vector it has
+ * done request_irq() on before calling pci_disable_msi(). Failure to do so
+ * results in a BUG_ON() and a device will be left with MSI enabled and leaks
+ * its vector.
+ */
+static void
+lpfc_sli_disable_msi(struct lpfc_hba *phba)
+{
+ free_irq(phba->pcidev->irq, phba);
+ pci_disable_msi(phba->pcidev);
+ return;
+}
+
+/**
+ * lpfc_sli_enable_intr - Enable device interrupt to SLI-3 device.
+ * @phba: pointer to lpfc hba data structure.
+ *
+ * This routine is invoked to enable device interrupt and associate driver's
+ * interrupt handler(s) to interrupt vector(s) to device with SLI-3 interface
+ * spec. Depends on the interrupt mode configured to the driver, the driver
+ * will try to fallback from the configured interrupt mode to an interrupt
+ * mode which is supported by the platform, kernel, and device in the order
+ * of:
+ * MSI-X -> MSI -> IRQ.
+ *
+ * Return codes
+ * 0 - successful
+ * other values - error
+ **/
+static uint32_t
+lpfc_sli_enable_intr(struct lpfc_hba *phba, uint32_t cfg_mode)
+{
+ uint32_t intr_mode = LPFC_INTR_ERROR;
+ int retval;
+
+ if (cfg_mode == 2) {
+ /* Need to issue conf_port mbox cmd before conf_msi mbox cmd */
+ retval = lpfc_sli_config_port(phba, LPFC_SLI_REV3);
+ if (!retval) {
+ /* Now, try to enable MSI-X interrupt mode */
+ retval = lpfc_sli_enable_msix(phba);
+ if (!retval) {
+ /* Indicate initialization to MSI-X mode */
+ phba->intr_type = MSIX;
+ intr_mode = 2;
+ }
+ }
+ }
+
+ /* Fallback to MSI if MSI-X initialization failed */
+ if (cfg_mode >= 1 && phba->intr_type == NONE) {
+ retval = lpfc_sli_enable_msi(phba);
+ if (!retval) {
+ /* Indicate initialization to MSI mode */
+ phba->intr_type = MSI;
+ intr_mode = 1;
+ }
+ }
+
+ /* Fallback to INTx if both MSI-X/MSI initalization failed */
+ if (phba->intr_type == NONE) {
+ retval = request_irq(phba->pcidev->irq, lpfc_sli_intr_handler,
+ IRQF_SHARED, LPFC_DRIVER_NAME, phba);
+ if (!retval) {
+ /* Indicate initialization to INTx mode */
+ phba->intr_type = INTx;
+ intr_mode = 0;
+ }
+ }
+ return intr_mode;
+}
+
+/**
+ * lpfc_sli_disable_intr - Disable device interrupt to SLI-3 device.
+ * @phba: pointer to lpfc hba data structure.
+ *
+ * This routine is invoked to disable device interrupt and disassociate the
+ * driver's interrupt handler(s) from interrupt vector(s) to device with
+ * SLI-3 interface spec. Depending on the interrupt mode, the driver will
+ * release the interrupt vector(s) for the message signaled interrupt.
+ **/
+static void
+lpfc_sli_disable_intr(struct lpfc_hba *phba)
+{
+ /* Disable the currently initialized interrupt mode */
+ if (phba->intr_type == MSIX)
+ lpfc_sli_disable_msix(phba);
+ else if (phba->intr_type == MSI)
+ lpfc_sli_disable_msi(phba);
+ else if (phba->intr_type == INTx)
+ free_irq(phba->pcidev->irq, phba);
+
+ /* Reset interrupt management states */
+ phba->intr_type = NONE;
+ phba->sli.slistat.sli_intr = 0;
+
+ return;
+}
+
+/**
+ * lpfc_find_next_cpu - Find next available CPU that matches the phys_id
+ * @phba: pointer to lpfc hba data structure.
+ *
+ * Find next available CPU to use for IRQ to CPU affinity.
+ */
+static int
+lpfc_find_next_cpu(struct lpfc_hba *phba, uint32_t phys_id)
+{
+ struct lpfc_vector_map_info *cpup;
+ int cpu;
+
+ cpup = phba->sli4_hba.cpu_map;
+ for (cpu = 0; cpu < phba->sli4_hba.num_present_cpu; cpu++) {
+ /* CPU must be online */
+ if (cpu_online(cpu)) {
+ if ((cpup->irq == LPFC_VECTOR_MAP_EMPTY) &&
+ (lpfc_used_cpu[cpu] == LPFC_VECTOR_MAP_EMPTY) &&
+ (cpup->phys_id == phys_id)) {
+ return cpu;
+ }
+ }
+ cpup++;
+ }
+
+ /*
+ * If we get here, we have used ALL CPUs for the specific
+ * phys_id. Now we need to clear out lpfc_used_cpu and start
+ * reusing CPUs.
+ */
+
+ for (cpu = 0; cpu < phba->sli4_hba.num_present_cpu; cpu++) {
+ if (lpfc_used_cpu[cpu] == phys_id)
+ lpfc_used_cpu[cpu] = LPFC_VECTOR_MAP_EMPTY;
+ }
+
+ cpup = phba->sli4_hba.cpu_map;
+ for (cpu = 0; cpu < phba->sli4_hba.num_present_cpu; cpu++) {
+ /* CPU must be online */
+ if (cpu_online(cpu)) {
+ if ((cpup->irq == LPFC_VECTOR_MAP_EMPTY) &&
+ (cpup->phys_id == phys_id)) {
+ return cpu;
+ }
+ }
+ cpup++;
+ }
+ return LPFC_VECTOR_MAP_EMPTY;
+}
+
+/**
+ * lpfc_sli4_set_affinity - Set affinity for HBA IRQ vectors
+ * @phba: pointer to lpfc hba data structure.
+ * @vectors: number of HBA vectors
+ *
+ * Affinitize MSIX IRQ vectors to CPUs. Try to equally spread vector
+ * affinization across multple physical CPUs (numa nodes).
+ * In addition, this routine will assign an IO channel for each CPU
+ * to use when issuing I/Os.
+ */
+static int
+lpfc_sli4_set_affinity(struct lpfc_hba *phba, int vectors)
+{
+ int i, idx, saved_chann, used_chann, cpu, phys_id;
+ int max_phys_id, min_phys_id;
+ int num_io_channel, first_cpu, chan;
+ struct lpfc_vector_map_info *cpup;
+#ifdef CONFIG_X86
+ struct cpuinfo_x86 *cpuinfo;
+#endif
+ struct cpumask *mask;
+ uint8_t chann[LPFC_FCP_IO_CHAN_MAX+1];
+
+ /* If there is no mapping, just return */
+ if (!phba->cfg_fcp_cpu_map)
+ return 1;
+
+ /* Init cpu_map array */
+ memset(phba->sli4_hba.cpu_map, 0xff,
+ (sizeof(struct lpfc_vector_map_info) *
+ phba->sli4_hba.num_present_cpu));
+
+ max_phys_id = 0;
+ min_phys_id = 0xff;
+ phys_id = 0;
+ num_io_channel = 0;
+ first_cpu = LPFC_VECTOR_MAP_EMPTY;
+
+ /* Update CPU map with physical id and core id of each CPU */
+ cpup = phba->sli4_hba.cpu_map;
+ for (cpu = 0; cpu < phba->sli4_hba.num_present_cpu; cpu++) {
+#ifdef CONFIG_X86
+ cpuinfo = &cpu_data(cpu);
+ cpup->phys_id = cpuinfo->phys_proc_id;
+ cpup->core_id = cpuinfo->cpu_core_id;
+#else
+ /* No distinction between CPUs for other platforms */
+ cpup->phys_id = 0;
+ cpup->core_id = 0;
+#endif
+
+ lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
+ "3328 CPU physid %d coreid %d\n",
+ cpup->phys_id, cpup->core_id);
+
+ if (cpup->phys_id > max_phys_id)
+ max_phys_id = cpup->phys_id;
+ if (cpup->phys_id < min_phys_id)
+ min_phys_id = cpup->phys_id;
+ cpup++;
+ }
+
+ phys_id = min_phys_id;
+ /* Now associate the HBA vectors with specific CPUs */
+ for (idx = 0; idx < vectors; idx++) {
+ cpup = phba->sli4_hba.cpu_map;
+ cpu = lpfc_find_next_cpu(phba, phys_id);
+ if (cpu == LPFC_VECTOR_MAP_EMPTY) {
+
+ /* Try for all phys_id's */
+ for (i = 1; i < max_phys_id; i++) {
+ phys_id++;
+ if (phys_id > max_phys_id)
+ phys_id = min_phys_id;
+ cpu = lpfc_find_next_cpu(phba, phys_id);
+ if (cpu == LPFC_VECTOR_MAP_EMPTY)
+ continue;
+ goto found;
+ }
+
+ /* Use round robin for scheduling */
+ phba->cfg_fcp_io_sched = LPFC_FCP_SCHED_ROUND_ROBIN;
+ chan = 0;
+ cpup = phba->sli4_hba.cpu_map;
+ for (i = 0; i < phba->sli4_hba.num_present_cpu; i++) {
+ cpup->channel_id = chan;
+ cpup++;
+ chan++;
+ if (chan >= phba->cfg_fcp_io_channel)
+ chan = 0;
+ }
+
+ lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+ "3329 Cannot set affinity:"
+ "Error mapping vector %d (%d)\n",
+ idx, vectors);
+ return 0;
+ }
+found:
+ cpup += cpu;
+ if (phba->cfg_fcp_cpu_map == LPFC_DRIVER_CPU_MAP)
+ lpfc_used_cpu[cpu] = phys_id;
+
+ /* Associate vector with selected CPU */
+ cpup->irq = phba->sli4_hba.msix_entries[idx].vector;
+
+ /* Associate IO channel with selected CPU */
+ cpup->channel_id = idx;
+ num_io_channel++;
+
+ if (first_cpu == LPFC_VECTOR_MAP_EMPTY)
+ first_cpu = cpu;
+
+ /* Now affinitize to the selected CPU */
+ mask = &cpup->maskbits;
+ cpumask_clear(mask);
+ cpumask_set_cpu(cpu, mask);
+ i = irq_set_affinity_hint(phba->sli4_hba.msix_entries[idx].
+ vector, mask);
+
+ lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
+ "3330 Set Affinity: CPU %d channel %d "
+ "irq %d (%x)\n",
+ cpu, cpup->channel_id,
+ phba->sli4_hba.msix_entries[idx].vector, i);
+
+ /* Spread vector mapping across multple physical CPU nodes */
+ phys_id++;
+ if (phys_id > max_phys_id)
+ phys_id = min_phys_id;
+ }
+
+ /*
+ * Finally fill in the IO channel for any remaining CPUs.
+ * At this point, all IO channels have been assigned to a specific
+ * MSIx vector, mapped to a specific CPU.
+ * Base the remaining IO channel assigned, to IO channels already
+ * assigned to other CPUs on the same phys_id.
+ */
+ for (i = min_phys_id; i <= max_phys_id; i++) {
+ /*
+ * If there are no io channels already mapped to
+ * this phys_id, just round robin thru the io_channels.
+ * Setup chann[] for round robin.
+ */
+ for (idx = 0; idx < phba->cfg_fcp_io_channel; idx++)
+ chann[idx] = idx;
+
+ saved_chann = 0;
+ used_chann = 0;
+
+ /*
+ * First build a list of IO channels already assigned
+ * to this phys_id before reassigning the same IO
+ * channels to the remaining CPUs.
+ */
+ cpup = phba->sli4_hba.cpu_map;
+ cpu = first_cpu;
+ cpup += cpu;
+ for (idx = 0; idx < phba->sli4_hba.num_present_cpu;
+ idx++) {
+ if (cpup->phys_id == i) {
+ /*
+ * Save any IO channels that are
+ * already mapped to this phys_id.
+ */
+ if (cpup->irq != LPFC_VECTOR_MAP_EMPTY) {
+ chann[saved_chann] =
+ cpup->channel_id;
+ saved_chann++;
+ goto out;
+ }
+
+ /* See if we are using round-robin */
+ if (saved_chann == 0)
+ saved_chann =
+ phba->cfg_fcp_io_channel;
+
+ /* Associate next IO channel with CPU */
+ cpup->channel_id = chann[used_chann];
+ num_io_channel++;
+ used_chann++;
+ if (used_chann == saved_chann)
+ used_chann = 0;
+
+ lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
+ "3331 Set IO_CHANN "
+ "CPU %d channel %d\n",
+ idx, cpup->channel_id);
+ }
+out:
+ cpu++;
+ if (cpu >= phba->sli4_hba.num_present_cpu) {
+ cpup = phba->sli4_hba.cpu_map;
+ cpu = 0;
+ } else {
+ cpup++;
+ }
+ }
+ }
+
+ if (phba->sli4_hba.num_online_cpu != phba->sli4_hba.num_present_cpu) {
+ cpup = phba->sli4_hba.cpu_map;
+ for (idx = 0; idx < phba->sli4_hba.num_present_cpu; idx++) {
+ if (cpup->channel_id == LPFC_VECTOR_MAP_EMPTY) {
+ cpup->channel_id = 0;
+ num_io_channel++;
+
+ lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
+ "3332 Assign IO_CHANN "
+ "CPU %d channel %d\n",
+ idx, cpup->channel_id);
+ }
+ cpup++;
+ }
+ }
+
+ /* Sanity check */
+ if (num_io_channel != phba->sli4_hba.num_present_cpu)
+ lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+ "3333 Set affinity mismatch:"
+ "%d chann != %d cpus: %d vectors\n",
+ num_io_channel, phba->sli4_hba.num_present_cpu,
+ vectors);
+
+ /* Enable using cpu affinity for scheduling */
+ phba->cfg_fcp_io_sched = LPFC_FCP_SCHED_BY_CPU;
+ return 1;
+}
+
+
+/**
+ * lpfc_sli4_enable_msix - Enable MSI-X interrupt mode to SLI-4 device
+ * @phba: pointer to lpfc hba data structure.
+ *
+ * This routine is invoked to enable the MSI-X interrupt vectors to device
+ * with SLI-4 interface spec. The kernel function pci_enable_msix_range()
+ * is called to enable the MSI-X vectors. The device driver is responsible
+ * for calling the individual request_irq() to register each MSI-X vector
+ * with a interrupt handler, which is done in this function. Note that
+ * later when device is unloading, the driver should always call free_irq()
+ * on all MSI-X vectors it has done request_irq() on before calling
+ * pci_disable_msix(). Failure to do so results in a BUG_ON() and a device
+ * will be left with MSI-X enabled and leaks its vectors.
+ *
+ * Return codes
+ * 0 - successful
+ * other values - error
+ **/
+static int
+lpfc_sli4_enable_msix(struct lpfc_hba *phba)
+{
+ int vectors, rc, index;
+
+ /* Set up MSI-X multi-message vectors */
+ for (index = 0; index < phba->cfg_fcp_io_channel; index++)
+ phba->sli4_hba.msix_entries[index].entry = index;
+
+ /* Configure MSI-X capability structure */
+ vectors = phba->cfg_fcp_io_channel;
+ if (phba->cfg_fof) {
+ phba->sli4_hba.msix_entries[index].entry = index;
+ vectors++;
+ }
+ rc = pci_enable_msix_range(phba->pcidev, phba->sli4_hba.msix_entries,
+ 2, vectors);
+ if (rc < 0) {
+ lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
+ "0484 PCI enable MSI-X failed (%d)\n", rc);
+ goto vec_fail_out;
+ }
+ vectors = rc;
+
+ /* Log MSI-X vector assignment */
+ for (index = 0; index < vectors; index++)
+ lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
+ "0489 MSI-X entry[%d]: vector=x%x "
+ "message=%d\n", index,
+ phba->sli4_hba.msix_entries[index].vector,
+ phba->sli4_hba.msix_entries[index].entry);
+
+ /* Assign MSI-X vectors to interrupt handlers */
+ for (index = 0; index < vectors; index++) {
+ memset(&phba->sli4_hba.handler_name[index], 0, 16);
+ snprintf((char *)&phba->sli4_hba.handler_name[index],
+ LPFC_SLI4_HANDLER_NAME_SZ,
+ LPFC_DRIVER_HANDLER_NAME"%d", index);
+
+ phba->sli4_hba.fcp_eq_hdl[index].idx = index;
+ phba->sli4_hba.fcp_eq_hdl[index].phba = phba;
+ atomic_set(&phba->sli4_hba.fcp_eq_hdl[index].fcp_eq_in_use, 1);
+ if (phba->cfg_fof && (index == (vectors - 1)))
+ rc = request_irq(
+ phba->sli4_hba.msix_entries[index].vector,
+ &lpfc_sli4_fof_intr_handler, IRQF_SHARED,
+ (char *)&phba->sli4_hba.handler_name[index],
+ &phba->sli4_hba.fcp_eq_hdl[index]);
+ else
+ rc = request_irq(
+ phba->sli4_hba.msix_entries[index].vector,
+ &lpfc_sli4_hba_intr_handler, IRQF_SHARED,
+ (char *)&phba->sli4_hba.handler_name[index],
+ &phba->sli4_hba.fcp_eq_hdl[index]);
+ if (rc) {
+ lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
+ "0486 MSI-X fast-path (%d) "
+ "request_irq failed (%d)\n", index, rc);
+ goto cfg_fail_out;
+ }
+ }
+
+ if (phba->cfg_fof)
+ vectors--;
+
+ if (vectors != phba->cfg_fcp_io_channel) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+ "3238 Reducing IO channels to match number of "
+ "MSI-X vectors, requested %d got %d\n",
+ phba->cfg_fcp_io_channel, vectors);
+ phba->cfg_fcp_io_channel = vectors;
+ }
+
+ lpfc_sli4_set_affinity(phba, vectors);
+ return rc;
+
+cfg_fail_out:
+ /* free the irq already requested */
+ for (--index; index >= 0; index--) {
+ irq_set_affinity_hint(phba->sli4_hba.msix_entries[index].
+ vector, NULL);
+ free_irq(phba->sli4_hba.msix_entries[index].vector,
+ &phba->sli4_hba.fcp_eq_hdl[index]);
+ }
+
+ /* Unconfigure MSI-X capability structure */
+ pci_disable_msix(phba->pcidev);
+
+vec_fail_out:
+ return rc;
+}
+
+/**
+ * lpfc_sli4_disable_msix - Disable MSI-X interrupt mode to SLI-4 device
+ * @phba: pointer to lpfc hba data structure.
+ *
+ * This routine is invoked to release the MSI-X vectors and then disable the
+ * MSI-X interrupt mode to device with SLI-4 interface spec.
+ **/
+static void
+lpfc_sli4_disable_msix(struct lpfc_hba *phba)
+{
+ int index;
+
+ /* Free up MSI-X multi-message vectors */
+ for (index = 0; index < phba->cfg_fcp_io_channel; index++) {
+ irq_set_affinity_hint(phba->sli4_hba.msix_entries[index].
+ vector, NULL);
+ free_irq(phba->sli4_hba.msix_entries[index].vector,
+ &phba->sli4_hba.fcp_eq_hdl[index]);
+ }
+ if (phba->cfg_fof) {
+ free_irq(phba->sli4_hba.msix_entries[index].vector,
+ &phba->sli4_hba.fcp_eq_hdl[index]);
+ }
+ /* Disable MSI-X */
+ pci_disable_msix(phba->pcidev);
+
+ return;
+}
+
+/**
+ * lpfc_sli4_enable_msi - Enable MSI interrupt mode to SLI-4 device
+ * @phba: pointer to lpfc hba data structure.
+ *
+ * This routine is invoked to enable the MSI interrupt mode to device with
+ * SLI-4 interface spec. The kernel function pci_enable_msi() is called
+ * to enable the MSI vector. The device driver is responsible for calling
+ * the request_irq() to register MSI vector with a interrupt the handler,
+ * which is done in this function.
+ *
+ * Return codes
+ * 0 - successful
+ * other values - error
+ **/
+static int
+lpfc_sli4_enable_msi(struct lpfc_hba *phba)
+{
+ int rc, index;
+
+ rc = pci_enable_msi(phba->pcidev);
+ if (!rc)
+ lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
+ "0487 PCI enable MSI mode success.\n");
+ else {
+ lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
+ "0488 PCI enable MSI mode failed (%d)\n", rc);
+ return rc;
+ }
+
+ rc = request_irq(phba->pcidev->irq, lpfc_sli4_intr_handler,
+ IRQF_SHARED, LPFC_DRIVER_NAME, phba);
+ if (rc) {
+ pci_disable_msi(phba->pcidev);
+ lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
+ "0490 MSI request_irq failed (%d)\n", rc);
+ return rc;
+ }
+
+ for (index = 0; index < phba->cfg_fcp_io_channel; index++) {
+ phba->sli4_hba.fcp_eq_hdl[index].idx = index;
+ phba->sli4_hba.fcp_eq_hdl[index].phba = phba;
+ }
+
+ if (phba->cfg_fof) {
+ phba->sli4_hba.fcp_eq_hdl[index].idx = index;
+ phba->sli4_hba.fcp_eq_hdl[index].phba = phba;
+ }
+ return 0;
+}
+
+/**
+ * lpfc_sli4_disable_msi - Disable MSI interrupt mode to SLI-4 device
+ * @phba: pointer to lpfc hba data structure.
+ *
+ * This routine is invoked to disable the MSI interrupt mode to device with
+ * SLI-4 interface spec. The driver calls free_irq() on MSI vector it has
+ * done request_irq() on before calling pci_disable_msi(). Failure to do so
+ * results in a BUG_ON() and a device will be left with MSI enabled and leaks
+ * its vector.
+ **/
+static void
+lpfc_sli4_disable_msi(struct lpfc_hba *phba)
+{
+ free_irq(phba->pcidev->irq, phba);
+ pci_disable_msi(phba->pcidev);
+ return;
+}
+
+/**
+ * lpfc_sli4_enable_intr - Enable device interrupt to SLI-4 device
+ * @phba: pointer to lpfc hba data structure.
+ *
+ * This routine is invoked to enable device interrupt and associate driver's
+ * interrupt handler(s) to interrupt vector(s) to device with SLI-4
+ * interface spec. Depends on the interrupt mode configured to the driver,
+ * the driver will try to fallback from the configured interrupt mode to an
+ * interrupt mode which is supported by the platform, kernel, and device in
+ * the order of:
+ * MSI-X -> MSI -> IRQ.
+ *
+ * Return codes
+ * 0 - successful
+ * other values - error
+ **/
+static uint32_t
+lpfc_sli4_enable_intr(struct lpfc_hba *phba, uint32_t cfg_mode)
+{
+ uint32_t intr_mode = LPFC_INTR_ERROR;
+ int retval, index;
+
+ if (cfg_mode == 2) {
+ /* Preparation before conf_msi mbox cmd */
+ retval = 0;
+ if (!retval) {
+ /* Now, try to enable MSI-X interrupt mode */
+ retval = lpfc_sli4_enable_msix(phba);
+ if (!retval) {
+ /* Indicate initialization to MSI-X mode */
+ phba->intr_type = MSIX;
+ intr_mode = 2;
+ }
+ }
+ }
+
+ /* Fallback to MSI if MSI-X initialization failed */
+ if (cfg_mode >= 1 && phba->intr_type == NONE) {
+ retval = lpfc_sli4_enable_msi(phba);
+ if (!retval) {
+ /* Indicate initialization to MSI mode */
+ phba->intr_type = MSI;
+ intr_mode = 1;
+ }
+ }
+
+ /* Fallback to INTx if both MSI-X/MSI initalization failed */
+ if (phba->intr_type == NONE) {
+ retval = request_irq(phba->pcidev->irq, lpfc_sli4_intr_handler,
+ IRQF_SHARED, LPFC_DRIVER_NAME, phba);
+ if (!retval) {
+ /* Indicate initialization to INTx mode */
+ phba->intr_type = INTx;
+ intr_mode = 0;
+ for (index = 0; index < phba->cfg_fcp_io_channel;
+ index++) {
+ phba->sli4_hba.fcp_eq_hdl[index].idx = index;
+ phba->sli4_hba.fcp_eq_hdl[index].phba = phba;
+ atomic_set(&phba->sli4_hba.fcp_eq_hdl[index].
+ fcp_eq_in_use, 1);
+ }
+ if (phba->cfg_fof) {
+ phba->sli4_hba.fcp_eq_hdl[index].idx = index;
+ phba->sli4_hba.fcp_eq_hdl[index].phba = phba;
+ atomic_set(&phba->sli4_hba.fcp_eq_hdl[index].
+ fcp_eq_in_use, 1);
+ }
+ }
+ }
+ return intr_mode;
+}
+
+/**
+ * lpfc_sli4_disable_intr - Disable device interrupt to SLI-4 device
+ * @phba: pointer to lpfc hba data structure.
+ *
+ * This routine is invoked to disable device interrupt and disassociate
+ * the driver's interrupt handler(s) from interrupt vector(s) to device
+ * with SLI-4 interface spec. Depending on the interrupt mode, the driver
+ * will release the interrupt vector(s) for the message signaled interrupt.
+ **/
+static void
+lpfc_sli4_disable_intr(struct lpfc_hba *phba)
+{
+ /* Disable the currently initialized interrupt mode */
+ if (phba->intr_type == MSIX)
+ lpfc_sli4_disable_msix(phba);
+ else if (phba->intr_type == MSI)
+ lpfc_sli4_disable_msi(phba);
+ else if (phba->intr_type == INTx)
+ free_irq(phba->pcidev->irq, phba);
+
+ /* Reset interrupt management states */
+ phba->intr_type = NONE;
+ phba->sli.slistat.sli_intr = 0;
+
+ return;
+}
+
+/**
+ * lpfc_unset_hba - Unset SLI3 hba device initialization
+ * @phba: pointer to lpfc hba data structure.
+ *
+ * This routine is invoked to unset the HBA device initialization steps to
+ * a device with SLI-3 interface spec.
+ **/
+static void
+lpfc_unset_hba(struct lpfc_hba *phba)
+{
+ struct lpfc_vport *vport = phba->pport;
+ struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
+
+ spin_lock_irq(shost->host_lock);
+ vport->load_flag |= FC_UNLOADING;
+ spin_unlock_irq(shost->host_lock);
+
+ kfree(phba->vpi_bmask);
+ kfree(phba->vpi_ids);
+
+ lpfc_stop_hba_timers(phba);
+
+ phba->pport->work_port_events = 0;
+
+ lpfc_sli_hba_down(phba);
+
+ lpfc_sli_brdrestart(phba);
+
+ lpfc_sli_disable_intr(phba);
+
+ return;
+}
+
+/**
+ * lpfc_sli4_xri_exchange_busy_wait - Wait for device XRI exchange busy
+ * @phba: Pointer to HBA context object.
+ *
+ * This function is called in the SLI4 code path to wait for completion
+ * of device's XRIs exchange busy. It will check the XRI exchange busy
+ * on outstanding FCP and ELS I/Os every 10ms for up to 10 seconds; after
+ * that, it will check the XRI exchange busy on outstanding FCP and ELS
+ * I/Os every 30 seconds, log error message, and wait forever. Only when
+ * all XRI exchange busy complete, the driver unload shall proceed with
+ * invoking the function reset ioctl mailbox command to the CNA and the
+ * the rest of the driver unload resource release.
+ **/
+static void
+lpfc_sli4_xri_exchange_busy_wait(struct lpfc_hba *phba)
+{
+ int wait_time = 0;
+ int fcp_xri_cmpl = list_empty(&phba->sli4_hba.lpfc_abts_scsi_buf_list);
+ int els_xri_cmpl = list_empty(&phba->sli4_hba.lpfc_abts_els_sgl_list);
+
+ while (!fcp_xri_cmpl || !els_xri_cmpl) {
+ if (wait_time > LPFC_XRI_EXCH_BUSY_WAIT_TMO) {
+ if (!fcp_xri_cmpl)
+ lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+ "2877 FCP XRI exchange busy "
+ "wait time: %d seconds.\n",
+ wait_time/1000);
+ if (!els_xri_cmpl)
+ lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+ "2878 ELS XRI exchange busy "
+ "wait time: %d seconds.\n",
+ wait_time/1000);
+ msleep(LPFC_XRI_EXCH_BUSY_WAIT_T2);
+ wait_time += LPFC_XRI_EXCH_BUSY_WAIT_T2;
+ } else {
+ msleep(LPFC_XRI_EXCH_BUSY_WAIT_T1);
+ wait_time += LPFC_XRI_EXCH_BUSY_WAIT_T1;
+ }
+ fcp_xri_cmpl =
+ list_empty(&phba->sli4_hba.lpfc_abts_scsi_buf_list);
+ els_xri_cmpl =
+ list_empty(&phba->sli4_hba.lpfc_abts_els_sgl_list);
+ }
+}
+
+/**
+ * lpfc_sli4_hba_unset - Unset the fcoe hba
+ * @phba: Pointer to HBA context object.
+ *
+ * This function is called in the SLI4 code path to reset the HBA's FCoE
+ * function. The caller is not required to hold any lock. This routine
+ * issues PCI function reset mailbox command to reset the FCoE function.
+ * At the end of the function, it calls lpfc_hba_down_post function to
+ * free any pending commands.
+ **/
+static void
+lpfc_sli4_hba_unset(struct lpfc_hba *phba)
+{
+ int wait_cnt = 0;
+ LPFC_MBOXQ_t *mboxq;
+ struct pci_dev *pdev = phba->pcidev;
+
+ lpfc_stop_hba_timers(phba);
+ phba->sli4_hba.intr_enable = 0;
+
+ /*
+ * Gracefully wait out the potential current outstanding asynchronous
+ * mailbox command.
+ */
+
+ /* First, block any pending async mailbox command from posted */
+ spin_lock_irq(&phba->hbalock);
+ phba->sli.sli_flag |= LPFC_SLI_ASYNC_MBX_BLK;
+ spin_unlock_irq(&phba->hbalock);
+ /* Now, trying to wait it out if we can */
+ while (phba->sli.sli_flag & LPFC_SLI_MBOX_ACTIVE) {
+ msleep(10);
+ if (++wait_cnt > LPFC_ACTIVE_MBOX_WAIT_CNT)
+ break;
+ }
+ /* Forcefully release the outstanding mailbox command if timed out */
+ if (phba->sli.sli_flag & LPFC_SLI_MBOX_ACTIVE) {
+ spin_lock_irq(&phba->hbalock);
+ mboxq = phba->sli.mbox_active;
+ mboxq->u.mb.mbxStatus = MBX_NOT_FINISHED;
+ __lpfc_mbox_cmpl_put(phba, mboxq);
+ phba->sli.sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
+ phba->sli.mbox_active = NULL;
+ spin_unlock_irq(&phba->hbalock);
+ }
+
+ /* Abort all iocbs associated with the hba */
+ lpfc_sli_hba_iocb_abort(phba);
+
+ /* Wait for completion of device XRI exchange busy */
+ lpfc_sli4_xri_exchange_busy_wait(phba);
+
+ /* Disable PCI subsystem interrupt */
+ lpfc_sli4_disable_intr(phba);
+
+ /* Disable SR-IOV if enabled */
+ if (phba->cfg_sriov_nr_virtfn)
+ pci_disable_sriov(pdev);
+
+ /* Stop kthread signal shall trigger work_done one more time */
+ kthread_stop(phba->worker_thread);
+
+ /* Reset SLI4 HBA FCoE function */
+ lpfc_pci_function_reset(phba);
+ lpfc_sli4_queue_destroy(phba);
+
+ /* Stop the SLI4 device port */
+ phba->pport->work_port_events = 0;
+}
+
+ /**
+ * lpfc_pc_sli4_params_get - Get the SLI4_PARAMS port capabilities.
+ * @phba: Pointer to HBA context object.
+ * @mboxq: Pointer to the mailboxq memory for the mailbox command response.
+ *
+ * This function is called in the SLI4 code path to read the port's
+ * sli4 capabilities.
+ *
+ * This function may be be called from any context that can block-wait
+ * for the completion. The expectation is that this routine is called
+ * typically from probe_one or from the online routine.
+ **/
+int
+lpfc_pc_sli4_params_get(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
+{
+ int rc;
+ struct lpfc_mqe *mqe;
+ struct lpfc_pc_sli4_params *sli4_params;
+ uint32_t mbox_tmo;
+
+ rc = 0;
+ mqe = &mboxq->u.mqe;
+
+ /* Read the port's SLI4 Parameters port capabilities */
+ lpfc_pc_sli4_params(mboxq);
+ if (!phba->sli4_hba.intr_enable)
+ rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
+ else {
+ mbox_tmo = lpfc_mbox_tmo_val(phba, mboxq);
+ rc = lpfc_sli_issue_mbox_wait(phba, mboxq, mbox_tmo);
+ }
+
+ if (unlikely(rc))
+ return 1;
+
+ sli4_params = &phba->sli4_hba.pc_sli4_params;
+ sli4_params->if_type = bf_get(if_type, &mqe->un.sli4_params);
+ sli4_params->sli_rev = bf_get(sli_rev, &mqe->un.sli4_params);
+ sli4_params->sli_family = bf_get(sli_family, &mqe->un.sli4_params);
+ sli4_params->featurelevel_1 = bf_get(featurelevel_1,
+ &mqe->un.sli4_params);
+ sli4_params->featurelevel_2 = bf_get(featurelevel_2,
+ &mqe->un.sli4_params);
+ sli4_params->proto_types = mqe->un.sli4_params.word3;
+ sli4_params->sge_supp_len = mqe->un.sli4_params.sge_supp_len;
+ sli4_params->if_page_sz = bf_get(if_page_sz, &mqe->un.sli4_params);
+ sli4_params->rq_db_window = bf_get(rq_db_window, &mqe->un.sli4_params);
+ sli4_params->loopbk_scope = bf_get(loopbk_scope, &mqe->un.sli4_params);
+ sli4_params->eq_pages_max = bf_get(eq_pages, &mqe->un.sli4_params);
+ sli4_params->eqe_size = bf_get(eqe_size, &mqe->un.sli4_params);
+ sli4_params->cq_pages_max = bf_get(cq_pages, &mqe->un.sli4_params);
+ sli4_params->cqe_size = bf_get(cqe_size, &mqe->un.sli4_params);
+ sli4_params->mq_pages_max = bf_get(mq_pages, &mqe->un.sli4_params);
+ sli4_params->mqe_size = bf_get(mqe_size, &mqe->un.sli4_params);
+ sli4_params->mq_elem_cnt = bf_get(mq_elem_cnt, &mqe->un.sli4_params);
+ sli4_params->wq_pages_max = bf_get(wq_pages, &mqe->un.sli4_params);
+ sli4_params->wqe_size = bf_get(wqe_size, &mqe->un.sli4_params);
+ sli4_params->rq_pages_max = bf_get(rq_pages, &mqe->un.sli4_params);
+ sli4_params->rqe_size = bf_get(rqe_size, &mqe->un.sli4_params);
+ sli4_params->hdr_pages_max = bf_get(hdr_pages, &mqe->un.sli4_params);
+ sli4_params->hdr_size = bf_get(hdr_size, &mqe->un.sli4_params);
+ sli4_params->hdr_pp_align = bf_get(hdr_pp_align, &mqe->un.sli4_params);
+ sli4_params->sgl_pages_max = bf_get(sgl_pages, &mqe->un.sli4_params);
+ sli4_params->sgl_pp_align = bf_get(sgl_pp_align, &mqe->un.sli4_params);
+
+ /* Make sure that sge_supp_len can be handled by the driver */
+ if (sli4_params->sge_supp_len > LPFC_MAX_SGE_SIZE)
+ sli4_params->sge_supp_len = LPFC_MAX_SGE_SIZE;
+
+ return rc;
+}
+
+/**
+ * lpfc_get_sli4_parameters - Get the SLI4 Config PARAMETERS.
+ * @phba: Pointer to HBA context object.
+ * @mboxq: Pointer to the mailboxq memory for the mailbox command response.
+ *
+ * This function is called in the SLI4 code path to read the port's
+ * sli4 capabilities.
+ *
+ * This function may be be called from any context that can block-wait
+ * for the completion. The expectation is that this routine is called
+ * typically from probe_one or from the online routine.
+ **/
+int
+lpfc_get_sli4_parameters(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
+{
+ int rc;
+ struct lpfc_mqe *mqe = &mboxq->u.mqe;
+ struct lpfc_pc_sli4_params *sli4_params;
+ uint32_t mbox_tmo;
+ int length;
+ struct lpfc_sli4_parameters *mbx_sli4_parameters;
+
+ /*
+ * By default, the driver assumes the SLI4 port requires RPI
+ * header postings. The SLI4_PARAM response will correct this
+ * assumption.
+ */
+ phba->sli4_hba.rpi_hdrs_in_use = 1;
+
+ /* Read the port's SLI4 Config Parameters */
+ length = (sizeof(struct lpfc_mbx_get_sli4_parameters) -
+ sizeof(struct lpfc_sli4_cfg_mhdr));
+ lpfc_sli4_config(phba, mboxq, LPFC_MBOX_SUBSYSTEM_COMMON,
+ LPFC_MBOX_OPCODE_GET_SLI4_PARAMETERS,
+ length, LPFC_SLI4_MBX_EMBED);
+ if (!phba->sli4_hba.intr_enable)
+ rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
+ else {
+ mbox_tmo = lpfc_mbox_tmo_val(phba, mboxq);
+ rc = lpfc_sli_issue_mbox_wait(phba, mboxq, mbox_tmo);
+ }
+ if (unlikely(rc))
+ return rc;
+ sli4_params = &phba->sli4_hba.pc_sli4_params;
+ mbx_sli4_parameters = &mqe->un.get_sli4_parameters.sli4_parameters;
+ sli4_params->if_type = bf_get(cfg_if_type, mbx_sli4_parameters);
+ sli4_params->sli_rev = bf_get(cfg_sli_rev, mbx_sli4_parameters);
+ sli4_params->sli_family = bf_get(cfg_sli_family, mbx_sli4_parameters);
+ sli4_params->featurelevel_1 = bf_get(cfg_sli_hint_1,
+ mbx_sli4_parameters);
+ sli4_params->featurelevel_2 = bf_get(cfg_sli_hint_2,
+ mbx_sli4_parameters);
+ if (bf_get(cfg_phwq, mbx_sli4_parameters))
+ phba->sli3_options |= LPFC_SLI4_PHWQ_ENABLED;
+ else
+ phba->sli3_options &= ~LPFC_SLI4_PHWQ_ENABLED;
+ sli4_params->sge_supp_len = mbx_sli4_parameters->sge_supp_len;
+ sli4_params->loopbk_scope = bf_get(loopbk_scope, mbx_sli4_parameters);
+ sli4_params->oas_supported = bf_get(cfg_oas, mbx_sli4_parameters);
+ sli4_params->cqv = bf_get(cfg_cqv, mbx_sli4_parameters);
+ sli4_params->mqv = bf_get(cfg_mqv, mbx_sli4_parameters);
+ sli4_params->wqv = bf_get(cfg_wqv, mbx_sli4_parameters);
+ sli4_params->rqv = bf_get(cfg_rqv, mbx_sli4_parameters);
+ sli4_params->wqsize = bf_get(cfg_wqsize, mbx_sli4_parameters);
+ sli4_params->sgl_pages_max = bf_get(cfg_sgl_page_cnt,
+ mbx_sli4_parameters);
+ sli4_params->sgl_pp_align = bf_get(cfg_sgl_pp_align,
+ mbx_sli4_parameters);
+ phba->sli4_hba.extents_in_use = bf_get(cfg_ext, mbx_sli4_parameters);
+ phba->sli4_hba.rpi_hdrs_in_use = bf_get(cfg_hdrr, mbx_sli4_parameters);
+
+ /* Make sure that sge_supp_len can be handled by the driver */
+ if (sli4_params->sge_supp_len > LPFC_MAX_SGE_SIZE)
+ sli4_params->sge_supp_len = LPFC_MAX_SGE_SIZE;
+
+ return 0;
+}
+
+/**
+ * lpfc_pci_probe_one_s3 - PCI probe func to reg SLI-3 device to PCI subsystem.
+ * @pdev: pointer to PCI device
+ * @pid: pointer to PCI device identifier
+ *
+ * This routine is to be called to attach a device with SLI-3 interface spec
+ * to the PCI subsystem. When an Emulex HBA with SLI-3 interface spec is
+ * presented on PCI bus, the kernel PCI subsystem looks at PCI device-specific
+ * information of the device and driver to see if the driver state that it can
+ * support this kind of device. If the match is successful, the driver core
+ * invokes this routine. If this routine determines it can claim the HBA, it
+ * does all the initialization that it needs to do to handle the HBA properly.
+ *
+ * Return code
+ * 0 - driver can claim the device
+ * negative value - driver can not claim the device
+ **/
+static int
+lpfc_pci_probe_one_s3(struct pci_dev *pdev, const struct pci_device_id *pid)
+{
+ struct lpfc_hba *phba;
+ struct lpfc_vport *vport = NULL;
+ struct Scsi_Host *shost = NULL;
+ int error;
+ uint32_t cfg_mode, intr_mode;
+
+ /* Allocate memory for HBA structure */
+ phba = lpfc_hba_alloc(pdev);
+ if (!phba)
+ return -ENOMEM;
+
+ /* Perform generic PCI device enabling operation */
+ error = lpfc_enable_pci_dev(phba);
+ if (error)
+ goto out_free_phba;
+
+ /* Set up SLI API function jump table for PCI-device group-0 HBAs */
+ error = lpfc_api_table_setup(phba, LPFC_PCI_DEV_LP);
+ if (error)
+ goto out_disable_pci_dev;
+
+ /* Set up SLI-3 specific device PCI memory space */
+ error = lpfc_sli_pci_mem_setup(phba);
+ if (error) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+ "1402 Failed to set up pci memory space.\n");
+ goto out_disable_pci_dev;
+ }
+
+ /* Set up phase-1 common device driver resources */
+ error = lpfc_setup_driver_resource_phase1(phba);
+ if (error) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+ "1403 Failed to set up driver resource.\n");
+ goto out_unset_pci_mem_s3;
+ }
+
+ /* Set up SLI-3 specific device driver resources */
+ error = lpfc_sli_driver_resource_setup(phba);
+ if (error) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+ "1404 Failed to set up driver resource.\n");
+ goto out_unset_pci_mem_s3;
+ }
+
+ /* Initialize and populate the iocb list per host */
+ error = lpfc_init_iocb_list(phba, LPFC_IOCB_LIST_CNT);
+ if (error) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+ "1405 Failed to initialize iocb list.\n");
+ goto out_unset_driver_resource_s3;
+ }
+
+ /* Set up common device driver resources */
+ error = lpfc_setup_driver_resource_phase2(phba);
+ if (error) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+ "1406 Failed to set up driver resource.\n");
+ goto out_free_iocb_list;
+ }
+
+ /* Get the default values for Model Name and Description */
+ lpfc_get_hba_model_desc(phba, phba->ModelName, phba->ModelDesc);
+
+ /* Create SCSI host to the physical port */
+ error = lpfc_create_shost(phba);
+ if (error) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+ "1407 Failed to create scsi host.\n");
+ goto out_unset_driver_resource;
+ }
+
+ /* Configure sysfs attributes */
+ vport = phba->pport;
+ error = lpfc_alloc_sysfs_attr(vport);
+ if (error) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+ "1476 Failed to allocate sysfs attr\n");
+ goto out_destroy_shost;
+ }
+
+ shost = lpfc_shost_from_vport(vport); /* save shost for error cleanup */
+ /* Now, trying to enable interrupt and bring up the device */
+ cfg_mode = phba->cfg_use_msi;
+ while (true) {
+ /* Put device to a known state before enabling interrupt */
+ lpfc_stop_port(phba);
+ /* Configure and enable interrupt */
+ intr_mode = lpfc_sli_enable_intr(phba, cfg_mode);
+ if (intr_mode == LPFC_INTR_ERROR) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+ "0431 Failed to enable interrupt.\n");
+ error = -ENODEV;
+ goto out_free_sysfs_attr;
+ }
+ /* SLI-3 HBA setup */
+ if (lpfc_sli_hba_setup(phba)) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+ "1477 Failed to set up hba\n");
+ error = -ENODEV;
+ goto out_remove_device;
+ }
+
+ /* Wait 50ms for the interrupts of previous mailbox commands */
+ msleep(50);
+ /* Check active interrupts on message signaled interrupts */
+ if (intr_mode == 0 ||
+ phba->sli.slistat.sli_intr > LPFC_MSIX_VECTORS) {
+ /* Log the current active interrupt mode */
+ phba->intr_mode = intr_mode;
+ lpfc_log_intr_mode(phba, intr_mode);
+ break;
+ } else {
+ lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
+ "0447 Configure interrupt mode (%d) "
+ "failed active interrupt test.\n",
+ intr_mode);
+ /* Disable the current interrupt mode */
+ lpfc_sli_disable_intr(phba);
+ /* Try next level of interrupt mode */
+ cfg_mode = --intr_mode;
+ }
+ }
+
+ /* Perform post initialization setup */
+ lpfc_post_init_setup(phba);
+
+ /* Check if there are static vports to be created. */
+ lpfc_create_static_vport(phba);
+
+ return 0;
+
+out_remove_device:
+ lpfc_unset_hba(phba);
+out_free_sysfs_attr:
+ lpfc_free_sysfs_attr(vport);
+out_destroy_shost:
+ lpfc_destroy_shost(phba);
+out_unset_driver_resource:
+ lpfc_unset_driver_resource_phase2(phba);
+out_free_iocb_list:
+ lpfc_free_iocb_list(phba);
+out_unset_driver_resource_s3:
+ lpfc_sli_driver_resource_unset(phba);
+out_unset_pci_mem_s3:
+ lpfc_sli_pci_mem_unset(phba);
+out_disable_pci_dev:
+ lpfc_disable_pci_dev(phba);
+ if (shost)
+ scsi_host_put(shost);
+out_free_phba:
+ lpfc_hba_free(phba);
+ return error;
+}
+
+/**
+ * lpfc_pci_remove_one_s3 - PCI func to unreg SLI-3 device from PCI subsystem.
+ * @pdev: pointer to PCI device
+ *
+ * This routine is to be called to disattach a device with SLI-3 interface
+ * spec from PCI subsystem. When an Emulex HBA with SLI-3 interface spec is
+ * removed from PCI bus, it performs all the necessary cleanup for the HBA
+ * device to be removed from the PCI subsystem properly.
+ **/
+static void
+lpfc_pci_remove_one_s3(struct pci_dev *pdev)
+{
+ struct Scsi_Host *shost = pci_get_drvdata(pdev);
+ struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
+ struct lpfc_vport **vports;
+ struct lpfc_hba *phba = vport->phba;
+ int i;
+ int bars = pci_select_bars(pdev, IORESOURCE_MEM);
+
+ spin_lock_irq(&phba->hbalock);
+ vport->load_flag |= FC_UNLOADING;
+ spin_unlock_irq(&phba->hbalock);
+
+ lpfc_free_sysfs_attr(vport);
+
+ /* Release all the vports against this physical port */
+ vports = lpfc_create_vport_work_array(phba);
+ if (vports != NULL)
+ for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) {
+ if (vports[i]->port_type == LPFC_PHYSICAL_PORT)
+ continue;
+ fc_vport_terminate(vports[i]->fc_vport);
+ }
+ lpfc_destroy_vport_work_array(phba, vports);
+
+ /* Remove FC host and then SCSI host with the physical port */
+ fc_remove_host(shost);
+ scsi_remove_host(shost);
+ lpfc_cleanup(vport);
+
+ /*
+ * Bring down the SLI Layer. This step disable all interrupts,
+ * clears the rings, discards all mailbox commands, and resets
+ * the HBA.
+ */
+
+ /* HBA interrupt will be disabled after this call */
+ lpfc_sli_hba_down(phba);
+ /* Stop kthread signal shall trigger work_done one more time */
+ kthread_stop(phba->worker_thread);
+ /* Final cleanup of txcmplq and reset the HBA */
+ lpfc_sli_brdrestart(phba);
+
+ kfree(phba->vpi_bmask);
+ kfree(phba->vpi_ids);
+
+ lpfc_stop_hba_timers(phba);
+ spin_lock_irq(&phba->hbalock);
+ list_del_init(&vport->listentry);
+ spin_unlock_irq(&phba->hbalock);
+
+ lpfc_debugfs_terminate(vport);
+
+ /* Disable SR-IOV if enabled */
+ if (phba->cfg_sriov_nr_virtfn)
+ pci_disable_sriov(pdev);
+
+ /* Disable interrupt */
+ lpfc_sli_disable_intr(phba);
+
+ scsi_host_put(shost);
+
+ /*
+ * Call scsi_free before mem_free since scsi bufs are released to their
+ * corresponding pools here.
+ */
+ lpfc_scsi_free(phba);
+ lpfc_mem_free_all(phba);
+
+ dma_free_coherent(&pdev->dev, lpfc_sli_hbq_size(),
+ phba->hbqslimp.virt, phba->hbqslimp.phys);
+
+ /* Free resources associated with SLI2 interface */
+ dma_free_coherent(&pdev->dev, SLI2_SLIM_SIZE,
+ phba->slim2p.virt, phba->slim2p.phys);
+
+ /* unmap adapter SLIM and Control Registers */
+ iounmap(phba->ctrl_regs_memmap_p);
+ iounmap(phba->slim_memmap_p);
+
+ lpfc_hba_free(phba);
+
+ pci_release_selected_regions(pdev, bars);
+ pci_disable_device(pdev);
+}
+
+/**
+ * lpfc_pci_suspend_one_s3 - PCI func to suspend SLI-3 device for power mgmnt
+ * @pdev: pointer to PCI device
+ * @msg: power management message
+ *
+ * This routine is to be called from the kernel's PCI subsystem to support
+ * system Power Management (PM) to device with SLI-3 interface spec. When
+ * PM invokes this method, it quiesces the device by stopping the driver's
+ * worker thread for the device, turning off device's interrupt and DMA,
+ * and bring the device offline. Note that as the driver implements the
+ * minimum PM requirements to a power-aware driver's PM support for the
+ * suspend/resume -- all the possible PM messages (SUSPEND, HIBERNATE, FREEZE)
+ * to the suspend() method call will be treated as SUSPEND and the driver will
+ * fully reinitialize its device during resume() method call, the driver will
+ * set device to PCI_D3hot state in PCI config space instead of setting it
+ * according to the @msg provided by the PM.
+ *
+ * Return code
+ * 0 - driver suspended the device
+ * Error otherwise
+ **/
+static int
+lpfc_pci_suspend_one_s3(struct pci_dev *pdev, pm_message_t msg)
+{
+ struct Scsi_Host *shost = pci_get_drvdata(pdev);
+ struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
+
+ lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
+ "0473 PCI device Power Management suspend.\n");
+
+ /* Bring down the device */
+ lpfc_offline_prep(phba, LPFC_MBX_WAIT);
+ lpfc_offline(phba);
+ kthread_stop(phba->worker_thread);
+
+ /* Disable interrupt from device */
+ lpfc_sli_disable_intr(phba);
+
+ /* Save device state to PCI config space */
+ pci_save_state(pdev);
+ pci_set_power_state(pdev, PCI_D3hot);
+
+ return 0;
+}
+
+/**
+ * lpfc_pci_resume_one_s3 - PCI func to resume SLI-3 device for power mgmnt
+ * @pdev: pointer to PCI device
+ *
+ * This routine is to be called from the kernel's PCI subsystem to support
+ * system Power Management (PM) to device with SLI-3 interface spec. When PM
+ * invokes this method, it restores the device's PCI config space state and
+ * fully reinitializes the device and brings it online. Note that as the
+ * driver implements the minimum PM requirements to a power-aware driver's
+ * PM for suspend/resume -- all the possible PM messages (SUSPEND, HIBERNATE,
+ * FREEZE) to the suspend() method call will be treated as SUSPEND and the
+ * driver will fully reinitialize its device during resume() method call,
+ * the device will be set to PCI_D0 directly in PCI config space before
+ * restoring the state.
+ *
+ * Return code
+ * 0 - driver suspended the device
+ * Error otherwise
+ **/
+static int
+lpfc_pci_resume_one_s3(struct pci_dev *pdev)
+{
+ struct Scsi_Host *shost = pci_get_drvdata(pdev);
+ struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
+ uint32_t intr_mode;
+ int error;
+
+ lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
+ "0452 PCI device Power Management resume.\n");
+
+ /* Restore device state from PCI config space */
+ pci_set_power_state(pdev, PCI_D0);
+ pci_restore_state(pdev);
+
+ /*
+ * As the new kernel behavior of pci_restore_state() API call clears
+ * device saved_state flag, need to save the restored state again.
+ */
+ pci_save_state(pdev);
+
+ if (pdev->is_busmaster)
+ pci_set_master(pdev);
+
+ /* Startup the kernel thread for this host adapter. */
+ phba->worker_thread = kthread_run(lpfc_do_work, phba,
+ "lpfc_worker_%d", phba->brd_no);
+ if (IS_ERR(phba->worker_thread)) {
+ error = PTR_ERR(phba->worker_thread);
+ lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+ "0434 PM resume failed to start worker "
+ "thread: error=x%x.\n", error);
+ return error;
+ }
+
+ /* Configure and enable interrupt */
+ intr_mode = lpfc_sli_enable_intr(phba, phba->intr_mode);
+ if (intr_mode == LPFC_INTR_ERROR) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+ "0430 PM resume Failed to enable interrupt\n");
+ return -EIO;
+ } else
+ phba->intr_mode = intr_mode;
+
+ /* Restart HBA and bring it online */
+ lpfc_sli_brdrestart(phba);
+ lpfc_online(phba);
+
+ /* Log the current active interrupt mode */
+ lpfc_log_intr_mode(phba, phba->intr_mode);
+
+ return 0;
+}
+
+/**
+ * lpfc_sli_prep_dev_for_recover - Prepare SLI3 device for pci slot recover
+ * @phba: pointer to lpfc hba data structure.
+ *
+ * This routine is called to prepare the SLI3 device for PCI slot recover. It
+ * aborts all the outstanding SCSI I/Os to the pci device.
+ **/
+static void
+lpfc_sli_prep_dev_for_recover(struct lpfc_hba *phba)
+{
+ lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+ "2723 PCI channel I/O abort preparing for recovery\n");
+
+ /*
+ * There may be errored I/Os through HBA, abort all I/Os on txcmplq
+ * and let the SCSI mid-layer to retry them to recover.
+ */
+ lpfc_sli_abort_fcp_rings(phba);
+}
+
+/**
+ * lpfc_sli_prep_dev_for_reset - Prepare SLI3 device for pci slot reset
+ * @phba: pointer to lpfc hba data structure.
+ *
+ * This routine is called to prepare the SLI3 device for PCI slot reset. It
+ * disables the device interrupt and pci device, and aborts the internal FCP
+ * pending I/Os.
+ **/
+static void
+lpfc_sli_prep_dev_for_reset(struct lpfc_hba *phba)
+{
+ lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+ "2710 PCI channel disable preparing for reset\n");
+
+ /* Block any management I/Os to the device */
+ lpfc_block_mgmt_io(phba, LPFC_MBX_WAIT);
+
+ /* Block all SCSI devices' I/Os on the host */
+ lpfc_scsi_dev_block(phba);
+
+ /* Flush all driver's outstanding SCSI I/Os as we are to reset */
+ lpfc_sli_flush_fcp_rings(phba);
+
+ /* stop all timers */
+ lpfc_stop_hba_timers(phba);
+
+ /* Disable interrupt and pci device */
+ lpfc_sli_disable_intr(phba);
+ pci_disable_device(phba->pcidev);
+}
+
+/**
+ * lpfc_sli_prep_dev_for_perm_failure - Prepare SLI3 dev for pci slot disable
+ * @phba: pointer to lpfc hba data structure.
+ *
+ * This routine is called to prepare the SLI3 device for PCI slot permanently
+ * disabling. It blocks the SCSI transport layer traffic and flushes the FCP
+ * pending I/Os.
+ **/
+static void
+lpfc_sli_prep_dev_for_perm_failure(struct lpfc_hba *phba)
+{
+ lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+ "2711 PCI channel permanent disable for failure\n");
+ /* Block all SCSI devices' I/Os on the host */
+ lpfc_scsi_dev_block(phba);
+
+ /* stop all timers */
+ lpfc_stop_hba_timers(phba);
+
+ /* Clean up all driver's outstanding SCSI I/Os */
+ lpfc_sli_flush_fcp_rings(phba);
+}
+
+/**
+ * lpfc_io_error_detected_s3 - Method for handling SLI-3 device PCI I/O error
+ * @pdev: pointer to PCI device.
+ * @state: the current PCI connection state.
+ *
+ * This routine is called from the PCI subsystem for I/O error handling to
+ * device with SLI-3 interface spec. This function is called by the PCI
+ * subsystem after a PCI bus error affecting this device has been detected.
+ * When this function is invoked, it will need to stop all the I/Os and
+ * interrupt(s) to the device. Once that is done, it will return
+ * PCI_ERS_RESULT_NEED_RESET for the PCI subsystem to perform proper recovery
+ * as desired.
+ *
+ * Return codes
+ * PCI_ERS_RESULT_CAN_RECOVER - can be recovered with reset_link
+ * PCI_ERS_RESULT_NEED_RESET - need to reset before recovery
+ * PCI_ERS_RESULT_DISCONNECT - device could not be recovered
+ **/
+static pci_ers_result_t
+lpfc_io_error_detected_s3(struct pci_dev *pdev, pci_channel_state_t state)
+{
+ struct Scsi_Host *shost = pci_get_drvdata(pdev);
+ struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
+
+ switch (state) {
+ case pci_channel_io_normal:
+ /* Non-fatal error, prepare for recovery */
+ lpfc_sli_prep_dev_for_recover(phba);
+ return PCI_ERS_RESULT_CAN_RECOVER;
+ case pci_channel_io_frozen:
+ /* Fatal error, prepare for slot reset */
+ lpfc_sli_prep_dev_for_reset(phba);
+ return PCI_ERS_RESULT_NEED_RESET;
+ case pci_channel_io_perm_failure:
+ /* Permanent failure, prepare for device down */
+ lpfc_sli_prep_dev_for_perm_failure(phba);
+ return PCI_ERS_RESULT_DISCONNECT;
+ default:
+ /* Unknown state, prepare and request slot reset */
+ lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+ "0472 Unknown PCI error state: x%x\n", state);
+ lpfc_sli_prep_dev_for_reset(phba);
+ return PCI_ERS_RESULT_NEED_RESET;
+ }
+}
+
+/**
+ * lpfc_io_slot_reset_s3 - Method for restarting PCI SLI-3 device from scratch.
+ * @pdev: pointer to PCI device.
+ *
+ * This routine is called from the PCI subsystem for error handling to
+ * device with SLI-3 interface spec. This is called after PCI bus has been
+ * reset to restart the PCI card from scratch, as if from a cold-boot.
+ * During the PCI subsystem error recovery, after driver returns
+ * PCI_ERS_RESULT_NEED_RESET, the PCI subsystem will perform proper error
+ * recovery and then call this routine before calling the .resume method
+ * to recover the device. This function will initialize the HBA device,
+ * enable the interrupt, but it will just put the HBA to offline state
+ * without passing any I/O traffic.
+ *
+ * Return codes
+ * PCI_ERS_RESULT_RECOVERED - the device has been recovered
+ * PCI_ERS_RESULT_DISCONNECT - device could not be recovered
+ */
+static pci_ers_result_t
+lpfc_io_slot_reset_s3(struct pci_dev *pdev)
+{
+ struct Scsi_Host *shost = pci_get_drvdata(pdev);
+ struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
+ struct lpfc_sli *psli = &phba->sli;
+ uint32_t intr_mode;
+
+ dev_printk(KERN_INFO, &pdev->dev, "recovering from a slot reset.\n");
+ if (pci_enable_device_mem(pdev)) {
+ printk(KERN_ERR "lpfc: Cannot re-enable "
+ "PCI device after reset.\n");
+ return PCI_ERS_RESULT_DISCONNECT;
+ }
+
+ pci_restore_state(pdev);
+
+ /*
+ * As the new kernel behavior of pci_restore_state() API call clears
+ * device saved_state flag, need to save the restored state again.
+ */
+ pci_save_state(pdev);
+
+ if (pdev->is_busmaster)
+ pci_set_master(pdev);
+
+ spin_lock_irq(&phba->hbalock);
+ psli->sli_flag &= ~LPFC_SLI_ACTIVE;
+ spin_unlock_irq(&phba->hbalock);
+
+ /* Configure and enable interrupt */
+ intr_mode = lpfc_sli_enable_intr(phba, phba->intr_mode);
+ if (intr_mode == LPFC_INTR_ERROR) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+ "0427 Cannot re-enable interrupt after "
+ "slot reset.\n");
+ return PCI_ERS_RESULT_DISCONNECT;
+ } else
+ phba->intr_mode = intr_mode;
+
+ /* Take device offline, it will perform cleanup */
+ lpfc_offline_prep(phba, LPFC_MBX_WAIT);
+ lpfc_offline(phba);
+ lpfc_sli_brdrestart(phba);
+
+ /* Log the current active interrupt mode */
+ lpfc_log_intr_mode(phba, phba->intr_mode);
+
+ return PCI_ERS_RESULT_RECOVERED;
+}
+
+/**
+ * lpfc_io_resume_s3 - Method for resuming PCI I/O operation on SLI-3 device.
+ * @pdev: pointer to PCI device
+ *
+ * This routine is called from the PCI subsystem for error handling to device
+ * with SLI-3 interface spec. It is called when kernel error recovery tells
+ * the lpfc driver that it is ok to resume normal PCI operation after PCI bus
+ * error recovery. After this call, traffic can start to flow from this device
+ * again.
+ */
+static void
+lpfc_io_resume_s3(struct pci_dev *pdev)
+{
+ struct Scsi_Host *shost = pci_get_drvdata(pdev);
+ struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
+
+ /* Bring device online, it will be no-op for non-fatal error resume */
+ lpfc_online(phba);
+
+ /* Clean up Advanced Error Reporting (AER) if needed */
+ if (phba->hba_flag & HBA_AER_ENABLED)
+ pci_cleanup_aer_uncorrect_error_status(pdev);
+}
+
+/**
+ * lpfc_sli4_get_els_iocb_cnt - Calculate the # of ELS IOCBs to reserve
+ * @phba: pointer to lpfc hba data structure.
+ *
+ * returns the number of ELS/CT IOCBs to reserve
+ **/
+int
+lpfc_sli4_get_els_iocb_cnt(struct lpfc_hba *phba)
+{
+ int max_xri = phba->sli4_hba.max_cfg_param.max_xri;
+
+ if (phba->sli_rev == LPFC_SLI_REV4) {
+ if (max_xri <= 100)
+ return 10;
+ else if (max_xri <= 256)
+ return 25;
+ else if (max_xri <= 512)
+ return 50;
+ else if (max_xri <= 1024)
+ return 100;
+ else if (max_xri <= 1536)
+ return 150;
+ else if (max_xri <= 2048)
+ return 200;
+ else
+ return 250;
+ } else
+ return 0;
+}
+
+/**
+ * lpfc_write_firmware - attempt to write a firmware image to the port
+ * @fw: pointer to firmware image returned from request_firmware.
+ * @phba: pointer to lpfc hba data structure.
+ *
+ **/
+static void
+lpfc_write_firmware(const struct firmware *fw, void *context)
+{
+ struct lpfc_hba *phba = (struct lpfc_hba *)context;
+ char fwrev[FW_REV_STR_SIZE];
+ struct lpfc_grp_hdr *image;
+ struct list_head dma_buffer_list;
+ int i, rc = 0;
+ struct lpfc_dmabuf *dmabuf, *next;
+ uint32_t offset = 0, temp_offset = 0;
+
+ /* It can be null in no-wait mode, sanity check */
+ if (!fw) {
+ rc = -ENXIO;
+ goto out;
+ }
+ image = (struct lpfc_grp_hdr *)fw->data;
+
+ INIT_LIST_HEAD(&dma_buffer_list);
+ if ((be32_to_cpu(image->magic_number) != LPFC_GROUP_OJECT_MAGIC_NUM) ||
+ (bf_get_be32(lpfc_grp_hdr_file_type, image) !=
+ LPFC_FILE_TYPE_GROUP) ||
+ (bf_get_be32(lpfc_grp_hdr_id, image) != LPFC_FILE_ID_GROUP) ||
+ (be32_to_cpu(image->size) != fw->size)) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+ "3022 Invalid FW image found. "
+ "Magic:%x Type:%x ID:%x\n",
+ be32_to_cpu(image->magic_number),
+ bf_get_be32(lpfc_grp_hdr_file_type, image),
+ bf_get_be32(lpfc_grp_hdr_id, image));
+ rc = -EINVAL;
+ goto release_out;
+ }
+ lpfc_decode_firmware_rev(phba, fwrev, 1);
+ if (strncmp(fwrev, image->revision, strnlen(image->revision, 16))) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+ "3023 Updating Firmware, Current Version:%s "
+ "New Version:%s\n",
+ fwrev, image->revision);
+ for (i = 0; i < LPFC_MBX_WR_CONFIG_MAX_BDE; i++) {
+ dmabuf = kzalloc(sizeof(struct lpfc_dmabuf),
+ GFP_KERNEL);
+ if (!dmabuf) {
+ rc = -ENOMEM;
+ goto release_out;
+ }
+ dmabuf->virt = dma_alloc_coherent(&phba->pcidev->dev,
+ SLI4_PAGE_SIZE,
+ &dmabuf->phys,
+ GFP_KERNEL);
+ if (!dmabuf->virt) {
+ kfree(dmabuf);
+ rc = -ENOMEM;
+ goto release_out;
+ }
+ list_add_tail(&dmabuf->list, &dma_buffer_list);
+ }
+ while (offset < fw->size) {
+ temp_offset = offset;
+ list_for_each_entry(dmabuf, &dma_buffer_list, list) {
+ if (temp_offset + SLI4_PAGE_SIZE > fw->size) {
+ memcpy(dmabuf->virt,
+ fw->data + temp_offset,
+ fw->size - temp_offset);
+ temp_offset = fw->size;
+ break;
+ }
+ memcpy(dmabuf->virt, fw->data + temp_offset,
+ SLI4_PAGE_SIZE);
+ temp_offset += SLI4_PAGE_SIZE;
+ }
+ rc = lpfc_wr_object(phba, &dma_buffer_list,
+ (fw->size - offset), &offset);
+ if (rc)
+ goto release_out;
+ }
+ rc = offset;
+ }
+
+release_out:
+ list_for_each_entry_safe(dmabuf, next, &dma_buffer_list, list) {
+ list_del(&dmabuf->list);
+ dma_free_coherent(&phba->pcidev->dev, SLI4_PAGE_SIZE,
+ dmabuf->virt, dmabuf->phys);
+ kfree(dmabuf);
+ }
+ release_firmware(fw);
+out:
+ lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+ "3024 Firmware update done: %d.\n", rc);
+ return;
+}
+
+/**
+ * lpfc_sli4_request_firmware_update - Request linux generic firmware upgrade
+ * @phba: pointer to lpfc hba data structure.
+ *
+ * This routine is called to perform Linux generic firmware upgrade on device
+ * that supports such feature.
+ **/
+int
+lpfc_sli4_request_firmware_update(struct lpfc_hba *phba, uint8_t fw_upgrade)
+{
+ uint8_t file_name[ELX_MODEL_NAME_SIZE];
+ int ret;
+ const struct firmware *fw;
+
+ /* Only supported on SLI4 interface type 2 for now */
+ if (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) !=
+ LPFC_SLI_INTF_IF_TYPE_2)
+ return -EPERM;
+
+ snprintf(file_name, ELX_MODEL_NAME_SIZE, "%s.grp", phba->ModelName);
+
+ if (fw_upgrade == INT_FW_UPGRADE) {
+ ret = reject_firmware_nowait(THIS_MODULE, FW_ACTION_HOTPLUG,
+ file_name, &phba->pcidev->dev,
+ GFP_KERNEL, (void *)phba,
+ lpfc_write_firmware);
+ } else if (fw_upgrade == RUN_FW_UPGRADE) {
+ ret = reject_firmware(&fw, file_name, &phba->pcidev->dev);
+ if (!ret)
+ lpfc_write_firmware(fw, (void *)phba);
+ } else {
+ ret = -EINVAL;
+ }
+
+ return ret;
+}
+
+/**
+ * lpfc_pci_probe_one_s4 - PCI probe func to reg SLI-4 device to PCI subsys
+ * @pdev: pointer to PCI device
+ * @pid: pointer to PCI device identifier
+ *
+ * This routine is called from the kernel's PCI subsystem to device with
+ * SLI-4 interface spec. When an Emulex HBA with SLI-4 interface spec is
+ * presented on PCI bus, the kernel PCI subsystem looks at PCI device-specific
+ * information of the device and driver to see if the driver state that it
+ * can support this kind of device. If the match is successful, the driver
+ * core invokes this routine. If this routine determines it can claim the HBA,
+ * it does all the initialization that it needs to do to handle the HBA
+ * properly.
+ *
+ * Return code
+ * 0 - driver can claim the device
+ * negative value - driver can not claim the device
+ **/
+static int
+lpfc_pci_probe_one_s4(struct pci_dev *pdev, const struct pci_device_id *pid)
+{
+ struct lpfc_hba *phba;
+ struct lpfc_vport *vport = NULL;
+ struct Scsi_Host *shost = NULL;
+ int error, ret;
+ uint32_t cfg_mode, intr_mode;
+ int adjusted_fcp_io_channel;
+
+ /* Allocate memory for HBA structure */
+ phba = lpfc_hba_alloc(pdev);
+ if (!phba)
+ return -ENOMEM;
+
+ /* Perform generic PCI device enabling operation */
+ error = lpfc_enable_pci_dev(phba);
+ if (error)
+ goto out_free_phba;
+
+ /* Set up SLI API function jump table for PCI-device group-1 HBAs */
+ error = lpfc_api_table_setup(phba, LPFC_PCI_DEV_OC);
+ if (error)
+ goto out_disable_pci_dev;
+
+ /* Set up SLI-4 specific device PCI memory space */
+ error = lpfc_sli4_pci_mem_setup(phba);
+ if (error) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+ "1410 Failed to set up pci memory space.\n");
+ goto out_disable_pci_dev;
+ }
+
+ /* Set up phase-1 common device driver resources */
+ error = lpfc_setup_driver_resource_phase1(phba);
+ if (error) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+ "1411 Failed to set up driver resource.\n");
+ goto out_unset_pci_mem_s4;
+ }
+
+ /* Set up SLI-4 Specific device driver resources */
+ error = lpfc_sli4_driver_resource_setup(phba);
+ if (error) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+ "1412 Failed to set up driver resource.\n");
+ goto out_unset_pci_mem_s4;
+ }
+
+ /* Initialize and populate the iocb list per host */
+
+ lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
+ "2821 initialize iocb list %d.\n",
+ phba->cfg_iocb_cnt*1024);
+ error = lpfc_init_iocb_list(phba, phba->cfg_iocb_cnt*1024);
+
+ if (error) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+ "1413 Failed to initialize iocb list.\n");
+ goto out_unset_driver_resource_s4;
+ }
+
+ INIT_LIST_HEAD(&phba->active_rrq_list);
+ INIT_LIST_HEAD(&phba->fcf.fcf_pri_list);
+
+ /* Set up common device driver resources */
+ error = lpfc_setup_driver_resource_phase2(phba);
+ if (error) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+ "1414 Failed to set up driver resource.\n");
+ goto out_free_iocb_list;
+ }
+
+ /* Get the default values for Model Name and Description */
+ lpfc_get_hba_model_desc(phba, phba->ModelName, phba->ModelDesc);
+
+ /* Create SCSI host to the physical port */
+ error = lpfc_create_shost(phba);
+ if (error) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+ "1415 Failed to create scsi host.\n");
+ goto out_unset_driver_resource;
+ }
+
+ /* Configure sysfs attributes */
+ vport = phba->pport;
+ error = lpfc_alloc_sysfs_attr(vport);
+ if (error) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+ "1416 Failed to allocate sysfs attr\n");
+ goto out_destroy_shost;
+ }
+
+ shost = lpfc_shost_from_vport(vport); /* save shost for error cleanup */
+ /* Now, trying to enable interrupt and bring up the device */
+ cfg_mode = phba->cfg_use_msi;
+
+ /* Put device to a known state before enabling interrupt */
+ lpfc_stop_port(phba);
+ /* Configure and enable interrupt */
+ intr_mode = lpfc_sli4_enable_intr(phba, cfg_mode);
+ if (intr_mode == LPFC_INTR_ERROR) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+ "0426 Failed to enable interrupt.\n");
+ error = -ENODEV;
+ goto out_free_sysfs_attr;
+ }
+ /* Default to single EQ for non-MSI-X */
+ if (phba->intr_type != MSIX)
+ adjusted_fcp_io_channel = 1;
+ else
+ adjusted_fcp_io_channel = phba->cfg_fcp_io_channel;
+ phba->cfg_fcp_io_channel = adjusted_fcp_io_channel;
+ /* Set up SLI-4 HBA */
+ if (lpfc_sli4_hba_setup(phba)) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+ "1421 Failed to set up hba\n");
+ error = -ENODEV;
+ goto out_disable_intr;
+ }
+
+ /* Log the current active interrupt mode */
+ phba->intr_mode = intr_mode;
+ lpfc_log_intr_mode(phba, intr_mode);
+
+ /* Perform post initialization setup */
+ lpfc_post_init_setup(phba);
+
+ /* check for firmware upgrade or downgrade */
+ if (phba->cfg_request_firmware_upgrade)
+ ret = lpfc_sli4_request_firmware_update(phba, INT_FW_UPGRADE);
+
+ /* Check if there are static vports to be created. */
+ lpfc_create_static_vport(phba);
+ return 0;
+
+out_disable_intr:
+ lpfc_sli4_disable_intr(phba);
+out_free_sysfs_attr:
+ lpfc_free_sysfs_attr(vport);
+out_destroy_shost:
+ lpfc_destroy_shost(phba);
+out_unset_driver_resource:
+ lpfc_unset_driver_resource_phase2(phba);
+out_free_iocb_list:
+ lpfc_free_iocb_list(phba);
+out_unset_driver_resource_s4:
+ lpfc_sli4_driver_resource_unset(phba);
+out_unset_pci_mem_s4:
+ lpfc_sli4_pci_mem_unset(phba);
+out_disable_pci_dev:
+ lpfc_disable_pci_dev(phba);
+ if (shost)
+ scsi_host_put(shost);
+out_free_phba:
+ lpfc_hba_free(phba);
+ return error;
+}
+
+/**
+ * lpfc_pci_remove_one_s4 - PCI func to unreg SLI-4 device from PCI subsystem
+ * @pdev: pointer to PCI device
+ *
+ * This routine is called from the kernel's PCI subsystem to device with
+ * SLI-4 interface spec. When an Emulex HBA with SLI-4 interface spec is
+ * removed from PCI bus, it performs all the necessary cleanup for the HBA
+ * device to be removed from the PCI subsystem properly.
+ **/
+static void
+lpfc_pci_remove_one_s4(struct pci_dev *pdev)
+{
+ struct Scsi_Host *shost = pci_get_drvdata(pdev);
+ struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
+ struct lpfc_vport **vports;
+ struct lpfc_hba *phba = vport->phba;
+ int i;
+
+ /* Mark the device unloading flag */
+ spin_lock_irq(&phba->hbalock);
+ vport->load_flag |= FC_UNLOADING;
+ spin_unlock_irq(&phba->hbalock);
+
+ /* Free the HBA sysfs attributes */
+ lpfc_free_sysfs_attr(vport);
+
+ /* Release all the vports against this physical port */
+ vports = lpfc_create_vport_work_array(phba);
+ if (vports != NULL)
+ for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) {
+ if (vports[i]->port_type == LPFC_PHYSICAL_PORT)
+ continue;
+ fc_vport_terminate(vports[i]->fc_vport);
+ }
+ lpfc_destroy_vport_work_array(phba, vports);
+
+ /* Remove FC host and then SCSI host with the physical port */
+ fc_remove_host(shost);
+ scsi_remove_host(shost);
+
+ /* Perform cleanup on the physical port */
+ lpfc_cleanup(vport);
+
+ /*
+ * Bring down the SLI Layer. This step disables all interrupts,
+ * clears the rings, discards all mailbox commands, and resets
+ * the HBA FCoE function.
+ */
+ lpfc_debugfs_terminate(vport);
+ lpfc_sli4_hba_unset(phba);
+
+ spin_lock_irq(&phba->hbalock);
+ list_del_init(&vport->listentry);
+ spin_unlock_irq(&phba->hbalock);
+
+ /* Perform scsi free before driver resource_unset since scsi
+ * buffers are released to their corresponding pools here.
+ */
+ lpfc_scsi_free(phba);
+
+ lpfc_sli4_driver_resource_unset(phba);
+
+ /* Unmap adapter Control and Doorbell registers */
+ lpfc_sli4_pci_mem_unset(phba);
+
+ /* Release PCI resources and disable device's PCI function */
+ scsi_host_put(shost);
+ lpfc_disable_pci_dev(phba);
+
+ /* Finally, free the driver's device data structure */
+ lpfc_hba_free(phba);
+
+ return;
+}
+
+/**
+ * lpfc_pci_suspend_one_s4 - PCI func to suspend SLI-4 device for power mgmnt
+ * @pdev: pointer to PCI device
+ * @msg: power management message
+ *
+ * This routine is called from the kernel's PCI subsystem to support system
+ * Power Management (PM) to device with SLI-4 interface spec. When PM invokes
+ * this method, it quiesces the device by stopping the driver's worker
+ * thread for the device, turning off device's interrupt and DMA, and bring
+ * the device offline. Note that as the driver implements the minimum PM
+ * requirements to a power-aware driver's PM support for suspend/resume -- all
+ * the possible PM messages (SUSPEND, HIBERNATE, FREEZE) to the suspend()
+ * method call will be treated as SUSPEND and the driver will fully
+ * reinitialize its device during resume() method call, the driver will set
+ * device to PCI_D3hot state in PCI config space instead of setting it
+ * according to the @msg provided by the PM.
+ *
+ * Return code
+ * 0 - driver suspended the device
+ * Error otherwise
+ **/
+static int
+lpfc_pci_suspend_one_s4(struct pci_dev *pdev, pm_message_t msg)
+{
+ struct Scsi_Host *shost = pci_get_drvdata(pdev);
+ struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
+
+ lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
+ "2843 PCI device Power Management suspend.\n");
+
+ /* Bring down the device */
+ lpfc_offline_prep(phba, LPFC_MBX_WAIT);
+ lpfc_offline(phba);
+ kthread_stop(phba->worker_thread);
+
+ /* Disable interrupt from device */
+ lpfc_sli4_disable_intr(phba);
+ lpfc_sli4_queue_destroy(phba);
+
+ /* Save device state to PCI config space */
+ pci_save_state(pdev);
+ pci_set_power_state(pdev, PCI_D3hot);
+
+ return 0;
+}
+
+/**
+ * lpfc_pci_resume_one_s4 - PCI func to resume SLI-4 device for power mgmnt
+ * @pdev: pointer to PCI device
+ *
+ * This routine is called from the kernel's PCI subsystem to support system
+ * Power Management (PM) to device with SLI-4 interface spac. When PM invokes
+ * this method, it restores the device's PCI config space state and fully
+ * reinitializes the device and brings it online. Note that as the driver
+ * implements the minimum PM requirements to a power-aware driver's PM for
+ * suspend/resume -- all the possible PM messages (SUSPEND, HIBERNATE, FREEZE)
+ * to the suspend() method call will be treated as SUSPEND and the driver
+ * will fully reinitialize its device during resume() method call, the device
+ * will be set to PCI_D0 directly in PCI config space before restoring the
+ * state.
+ *
+ * Return code
+ * 0 - driver suspended the device
+ * Error otherwise
+ **/
+static int
+lpfc_pci_resume_one_s4(struct pci_dev *pdev)
+{
+ struct Scsi_Host *shost = pci_get_drvdata(pdev);
+ struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
+ uint32_t intr_mode;
+ int error;
+
+ lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
+ "0292 PCI device Power Management resume.\n");
+
+ /* Restore device state from PCI config space */
+ pci_set_power_state(pdev, PCI_D0);
+ pci_restore_state(pdev);
+
+ /*
+ * As the new kernel behavior of pci_restore_state() API call clears
+ * device saved_state flag, need to save the restored state again.
+ */
+ pci_save_state(pdev);
+
+ if (pdev->is_busmaster)
+ pci_set_master(pdev);
+
+ /* Startup the kernel thread for this host adapter. */
+ phba->worker_thread = kthread_run(lpfc_do_work, phba,
+ "lpfc_worker_%d", phba->brd_no);
+ if (IS_ERR(phba->worker_thread)) {
+ error = PTR_ERR(phba->worker_thread);
+ lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+ "0293 PM resume failed to start worker "
+ "thread: error=x%x.\n", error);
+ return error;
+ }
+
+ /* Configure and enable interrupt */
+ intr_mode = lpfc_sli4_enable_intr(phba, phba->intr_mode);
+ if (intr_mode == LPFC_INTR_ERROR) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+ "0294 PM resume Failed to enable interrupt\n");
+ return -EIO;
+ } else
+ phba->intr_mode = intr_mode;
+
+ /* Restart HBA and bring it online */
+ lpfc_sli_brdrestart(phba);
+ lpfc_online(phba);
+
+ /* Log the current active interrupt mode */
+ lpfc_log_intr_mode(phba, phba->intr_mode);
+
+ return 0;
+}
+
+/**
+ * lpfc_sli4_prep_dev_for_recover - Prepare SLI4 device for pci slot recover
+ * @phba: pointer to lpfc hba data structure.
+ *
+ * This routine is called to prepare the SLI4 device for PCI slot recover. It
+ * aborts all the outstanding SCSI I/Os to the pci device.
+ **/
+static void
+lpfc_sli4_prep_dev_for_recover(struct lpfc_hba *phba)
+{
+ lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+ "2828 PCI channel I/O abort preparing for recovery\n");
+ /*
+ * There may be errored I/Os through HBA, abort all I/Os on txcmplq
+ * and let the SCSI mid-layer to retry them to recover.
+ */
+ lpfc_sli_abort_fcp_rings(phba);
+}
+
+/**
+ * lpfc_sli4_prep_dev_for_reset - Prepare SLI4 device for pci slot reset
+ * @phba: pointer to lpfc hba data structure.
+ *
+ * This routine is called to prepare the SLI4 device for PCI slot reset. It
+ * disables the device interrupt and pci device, and aborts the internal FCP
+ * pending I/Os.
+ **/
+static void
+lpfc_sli4_prep_dev_for_reset(struct lpfc_hba *phba)
+{
+ lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+ "2826 PCI channel disable preparing for reset\n");
+
+ /* Block any management I/Os to the device */
+ lpfc_block_mgmt_io(phba, LPFC_MBX_NO_WAIT);
+
+ /* Block all SCSI devices' I/Os on the host */
+ lpfc_scsi_dev_block(phba);
+
+ /* Flush all driver's outstanding SCSI I/Os as we are to reset */
+ lpfc_sli_flush_fcp_rings(phba);
+
+ /* stop all timers */
+ lpfc_stop_hba_timers(phba);
+
+ /* Disable interrupt and pci device */
+ lpfc_sli4_disable_intr(phba);
+ lpfc_sli4_queue_destroy(phba);
+ pci_disable_device(phba->pcidev);
+}
+
+/**
+ * lpfc_sli4_prep_dev_for_perm_failure - Prepare SLI4 dev for pci slot disable
+ * @phba: pointer to lpfc hba data structure.
+ *
+ * This routine is called to prepare the SLI4 device for PCI slot permanently
+ * disabling. It blocks the SCSI transport layer traffic and flushes the FCP
+ * pending I/Os.
+ **/
+static void
+lpfc_sli4_prep_dev_for_perm_failure(struct lpfc_hba *phba)
+{
+ lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+ "2827 PCI channel permanent disable for failure\n");
+
+ /* Block all SCSI devices' I/Os on the host */
+ lpfc_scsi_dev_block(phba);
+
+ /* stop all timers */
+ lpfc_stop_hba_timers(phba);
+
+ /* Clean up all driver's outstanding SCSI I/Os */
+ lpfc_sli_flush_fcp_rings(phba);
+}
+
+/**
+ * lpfc_io_error_detected_s4 - Method for handling PCI I/O error to SLI-4 device
+ * @pdev: pointer to PCI device.
+ * @state: the current PCI connection state.
+ *
+ * This routine is called from the PCI subsystem for error handling to device
+ * with SLI-4 interface spec. This function is called by the PCI subsystem
+ * after a PCI bus error affecting this device has been detected. When this
+ * function is invoked, it will need to stop all the I/Os and interrupt(s)
+ * to the device. Once that is done, it will return PCI_ERS_RESULT_NEED_RESET
+ * for the PCI subsystem to perform proper recovery as desired.
+ *
+ * Return codes
+ * PCI_ERS_RESULT_NEED_RESET - need to reset before recovery
+ * PCI_ERS_RESULT_DISCONNECT - device could not be recovered
+ **/
+static pci_ers_result_t
+lpfc_io_error_detected_s4(struct pci_dev *pdev, pci_channel_state_t state)
+{
+ struct Scsi_Host *shost = pci_get_drvdata(pdev);
+ struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
+
+ switch (state) {
+ case pci_channel_io_normal:
+ /* Non-fatal error, prepare for recovery */
+ lpfc_sli4_prep_dev_for_recover(phba);
+ return PCI_ERS_RESULT_CAN_RECOVER;
+ case pci_channel_io_frozen:
+ /* Fatal error, prepare for slot reset */
+ lpfc_sli4_prep_dev_for_reset(phba);
+ return PCI_ERS_RESULT_NEED_RESET;
+ case pci_channel_io_perm_failure:
+ /* Permanent failure, prepare for device down */
+ lpfc_sli4_prep_dev_for_perm_failure(phba);
+ return PCI_ERS_RESULT_DISCONNECT;
+ default:
+ /* Unknown state, prepare and request slot reset */
+ lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+ "2825 Unknown PCI error state: x%x\n", state);
+ lpfc_sli4_prep_dev_for_reset(phba);
+ return PCI_ERS_RESULT_NEED_RESET;
+ }
+}
+
+/**
+ * lpfc_io_slot_reset_s4 - Method for restart PCI SLI-4 device from scratch
+ * @pdev: pointer to PCI device.
+ *
+ * This routine is called from the PCI subsystem for error handling to device
+ * with SLI-4 interface spec. It is called after PCI bus has been reset to
+ * restart the PCI card from scratch, as if from a cold-boot. During the
+ * PCI subsystem error recovery, after the driver returns
+ * PCI_ERS_RESULT_NEED_RESET, the PCI subsystem will perform proper error
+ * recovery and then call this routine before calling the .resume method to
+ * recover the device. This function will initialize the HBA device, enable
+ * the interrupt, but it will just put the HBA to offline state without
+ * passing any I/O traffic.
+ *
+ * Return codes
+ * PCI_ERS_RESULT_RECOVERED - the device has been recovered
+ * PCI_ERS_RESULT_DISCONNECT - device could not be recovered
+ */
+static pci_ers_result_t
+lpfc_io_slot_reset_s4(struct pci_dev *pdev)
+{
+ struct Scsi_Host *shost = pci_get_drvdata(pdev);
+ struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
+ struct lpfc_sli *psli = &phba->sli;
+ uint32_t intr_mode;
+
+ dev_printk(KERN_INFO, &pdev->dev, "recovering from a slot reset.\n");
+ if (pci_enable_device_mem(pdev)) {
+ printk(KERN_ERR "lpfc: Cannot re-enable "
+ "PCI device after reset.\n");
+ return PCI_ERS_RESULT_DISCONNECT;
+ }
+
+ pci_restore_state(pdev);
+
+ /*
+ * As the new kernel behavior of pci_restore_state() API call clears
+ * device saved_state flag, need to save the restored state again.
+ */
+ pci_save_state(pdev);
+
+ if (pdev->is_busmaster)
+ pci_set_master(pdev);
+
+ spin_lock_irq(&phba->hbalock);
+ psli->sli_flag &= ~LPFC_SLI_ACTIVE;
+ spin_unlock_irq(&phba->hbalock);
+
+ /* Configure and enable interrupt */
+ intr_mode = lpfc_sli4_enable_intr(phba, phba->intr_mode);
+ if (intr_mode == LPFC_INTR_ERROR) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+ "2824 Cannot re-enable interrupt after "
+ "slot reset.\n");
+ return PCI_ERS_RESULT_DISCONNECT;
+ } else
+ phba->intr_mode = intr_mode;
+
+ /* Log the current active interrupt mode */
+ lpfc_log_intr_mode(phba, phba->intr_mode);
+
+ return PCI_ERS_RESULT_RECOVERED;
+}
+
+/**
+ * lpfc_io_resume_s4 - Method for resuming PCI I/O operation to SLI-4 device
+ * @pdev: pointer to PCI device
+ *
+ * This routine is called from the PCI subsystem for error handling to device
+ * with SLI-4 interface spec. It is called when kernel error recovery tells
+ * the lpfc driver that it is ok to resume normal PCI operation after PCI bus
+ * error recovery. After this call, traffic can start to flow from this device
+ * again.
+ **/
+static void
+lpfc_io_resume_s4(struct pci_dev *pdev)
+{
+ struct Scsi_Host *shost = pci_get_drvdata(pdev);
+ struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
+
+ /*
+ * In case of slot reset, as function reset is performed through
+ * mailbox command which needs DMA to be enabled, this operation
+ * has to be moved to the io resume phase. Taking device offline
+ * will perform the necessary cleanup.
+ */
+ if (!(phba->sli.sli_flag & LPFC_SLI_ACTIVE)) {
+ /* Perform device reset */
+ lpfc_offline_prep(phba, LPFC_MBX_WAIT);
+ lpfc_offline(phba);
+ lpfc_sli_brdrestart(phba);
+ /* Bring the device back online */
+ lpfc_online(phba);
+ }
+
+ /* Clean up Advanced Error Reporting (AER) if needed */
+ if (phba->hba_flag & HBA_AER_ENABLED)
+ pci_cleanup_aer_uncorrect_error_status(pdev);
+}
+
+/**
+ * lpfc_pci_probe_one - lpfc PCI probe func to reg dev to PCI subsystem
+ * @pdev: pointer to PCI device
+ * @pid: pointer to PCI device identifier
+ *
+ * This routine is to be registered to the kernel's PCI subsystem. When an
+ * Emulex HBA device is presented on PCI bus, the kernel PCI subsystem looks
+ * at PCI device-specific information of the device and driver to see if the
+ * driver state that it can support this kind of device. If the match is
+ * successful, the driver core invokes this routine. This routine dispatches
+ * the action to the proper SLI-3 or SLI-4 device probing routine, which will
+ * do all the initialization that it needs to do to handle the HBA device
+ * properly.
+ *
+ * Return code
+ * 0 - driver can claim the device
+ * negative value - driver can not claim the device
+ **/
+static int
+lpfc_pci_probe_one(struct pci_dev *pdev, const struct pci_device_id *pid)
+{
+ int rc;
+ struct lpfc_sli_intf intf;
+
+ if (pci_read_config_dword(pdev, LPFC_SLI_INTF, &intf.word0))
+ return -ENODEV;
+
+ if ((bf_get(lpfc_sli_intf_valid, &intf) == LPFC_SLI_INTF_VALID) &&
+ (bf_get(lpfc_sli_intf_slirev, &intf) == LPFC_SLI_INTF_REV_SLI4))
+ rc = lpfc_pci_probe_one_s4(pdev, pid);
+ else
+ rc = lpfc_pci_probe_one_s3(pdev, pid);
+
+ return rc;
+}
+
+/**
+ * lpfc_pci_remove_one - lpfc PCI func to unreg dev from PCI subsystem
+ * @pdev: pointer to PCI device
+ *
+ * This routine is to be registered to the kernel's PCI subsystem. When an
+ * Emulex HBA is removed from PCI bus, the driver core invokes this routine.
+ * This routine dispatches the action to the proper SLI-3 or SLI-4 device
+ * remove routine, which will perform all the necessary cleanup for the
+ * device to be removed from the PCI subsystem properly.
+ **/
+static void
+lpfc_pci_remove_one(struct pci_dev *pdev)
+{
+ struct Scsi_Host *shost = pci_get_drvdata(pdev);
+ struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
+
+ switch (phba->pci_dev_grp) {
+ case LPFC_PCI_DEV_LP:
+ lpfc_pci_remove_one_s3(pdev);
+ break;
+ case LPFC_PCI_DEV_OC:
+ lpfc_pci_remove_one_s4(pdev);
+ break;
+ default:
+ lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+ "1424 Invalid PCI device group: 0x%x\n",
+ phba->pci_dev_grp);
+ break;
+ }
+ return;
+}
+
+/**
+ * lpfc_pci_suspend_one - lpfc PCI func to suspend dev for power management
+ * @pdev: pointer to PCI device
+ * @msg: power management message
+ *
+ * This routine is to be registered to the kernel's PCI subsystem to support
+ * system Power Management (PM). When PM invokes this method, it dispatches
+ * the action to the proper SLI-3 or SLI-4 device suspend routine, which will
+ * suspend the device.
+ *
+ * Return code
+ * 0 - driver suspended the device
+ * Error otherwise
+ **/
+static int
+lpfc_pci_suspend_one(struct pci_dev *pdev, pm_message_t msg)
+{
+ struct Scsi_Host *shost = pci_get_drvdata(pdev);
+ struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
+ int rc = -ENODEV;
+
+ switch (phba->pci_dev_grp) {
+ case LPFC_PCI_DEV_LP:
+ rc = lpfc_pci_suspend_one_s3(pdev, msg);
+ break;
+ case LPFC_PCI_DEV_OC:
+ rc = lpfc_pci_suspend_one_s4(pdev, msg);
+ break;
+ default:
+ lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+ "1425 Invalid PCI device group: 0x%x\n",
+ phba->pci_dev_grp);
+ break;
+ }
+ return rc;
+}
+
+/**
+ * lpfc_pci_resume_one - lpfc PCI func to resume dev for power management
+ * @pdev: pointer to PCI device
+ *
+ * This routine is to be registered to the kernel's PCI subsystem to support
+ * system Power Management (PM). When PM invokes this method, it dispatches
+ * the action to the proper SLI-3 or SLI-4 device resume routine, which will
+ * resume the device.
+ *
+ * Return code
+ * 0 - driver suspended the device
+ * Error otherwise
+ **/
+static int
+lpfc_pci_resume_one(struct pci_dev *pdev)
+{
+ struct Scsi_Host *shost = pci_get_drvdata(pdev);
+ struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
+ int rc = -ENODEV;
+
+ switch (phba->pci_dev_grp) {
+ case LPFC_PCI_DEV_LP:
+ rc = lpfc_pci_resume_one_s3(pdev);
+ break;
+ case LPFC_PCI_DEV_OC:
+ rc = lpfc_pci_resume_one_s4(pdev);
+ break;
+ default:
+ lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+ "1426 Invalid PCI device group: 0x%x\n",
+ phba->pci_dev_grp);
+ break;
+ }
+ return rc;
+}
+
+/**
+ * lpfc_io_error_detected - lpfc method for handling PCI I/O error
+ * @pdev: pointer to PCI device.
+ * @state: the current PCI connection state.
+ *
+ * This routine is registered to the PCI subsystem for error handling. This
+ * function is called by the PCI subsystem after a PCI bus error affecting
+ * this device has been detected. When this routine is invoked, it dispatches
+ * the action to the proper SLI-3 or SLI-4 device error detected handling
+ * routine, which will perform the proper error detected operation.
+ *
+ * Return codes
+ * PCI_ERS_RESULT_NEED_RESET - need to reset before recovery
+ * PCI_ERS_RESULT_DISCONNECT - device could not be recovered
+ **/
+static pci_ers_result_t
+lpfc_io_error_detected(struct pci_dev *pdev, pci_channel_state_t state)
+{
+ struct Scsi_Host *shost = pci_get_drvdata(pdev);
+ struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
+ pci_ers_result_t rc = PCI_ERS_RESULT_DISCONNECT;
+
+ switch (phba->pci_dev_grp) {
+ case LPFC_PCI_DEV_LP:
+ rc = lpfc_io_error_detected_s3(pdev, state);
+ break;
+ case LPFC_PCI_DEV_OC:
+ rc = lpfc_io_error_detected_s4(pdev, state);
+ break;
+ default:
+ lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+ "1427 Invalid PCI device group: 0x%x\n",
+ phba->pci_dev_grp);
+ break;
+ }
+ return rc;
+}
+
+/**
+ * lpfc_io_slot_reset - lpfc method for restart PCI dev from scratch
+ * @pdev: pointer to PCI device.
+ *
+ * This routine is registered to the PCI subsystem for error handling. This
+ * function is called after PCI bus has been reset to restart the PCI card
+ * from scratch, as if from a cold-boot. When this routine is invoked, it
+ * dispatches the action to the proper SLI-3 or SLI-4 device reset handling
+ * routine, which will perform the proper device reset.
+ *
+ * Return codes
+ * PCI_ERS_RESULT_RECOVERED - the device has been recovered
+ * PCI_ERS_RESULT_DISCONNECT - device could not be recovered
+ **/
+static pci_ers_result_t
+lpfc_io_slot_reset(struct pci_dev *pdev)
+{
+ struct Scsi_Host *shost = pci_get_drvdata(pdev);
+ struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
+ pci_ers_result_t rc = PCI_ERS_RESULT_DISCONNECT;
+
+ switch (phba->pci_dev_grp) {
+ case LPFC_PCI_DEV_LP:
+ rc = lpfc_io_slot_reset_s3(pdev);
+ break;
+ case LPFC_PCI_DEV_OC:
+ rc = lpfc_io_slot_reset_s4(pdev);
+ break;
+ default:
+ lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+ "1428 Invalid PCI device group: 0x%x\n",
+ phba->pci_dev_grp);
+ break;
+ }
+ return rc;
+}
+
+/**
+ * lpfc_io_resume - lpfc method for resuming PCI I/O operation
+ * @pdev: pointer to PCI device
+ *
+ * This routine is registered to the PCI subsystem for error handling. It
+ * is called when kernel error recovery tells the lpfc driver that it is
+ * OK to resume normal PCI operation after PCI bus error recovery. When
+ * this routine is invoked, it dispatches the action to the proper SLI-3
+ * or SLI-4 device io_resume routine, which will resume the device operation.
+ **/
+static void
+lpfc_io_resume(struct pci_dev *pdev)
+{
+ struct Scsi_Host *shost = pci_get_drvdata(pdev);
+ struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
+
+ switch (phba->pci_dev_grp) {
+ case LPFC_PCI_DEV_LP:
+ lpfc_io_resume_s3(pdev);
+ break;
+ case LPFC_PCI_DEV_OC:
+ lpfc_io_resume_s4(pdev);
+ break;
+ default:
+ lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+ "1429 Invalid PCI device group: 0x%x\n",
+ phba->pci_dev_grp);
+ break;
+ }
+ return;
+}
+
+/**
+ * lpfc_sli4_oas_verify - Verify OAS is supported by this adapter
+ * @phba: pointer to lpfc hba data structure.
+ *
+ * This routine checks to see if OAS is supported for this adapter. If
+ * supported, the configure Flash Optimized Fabric flag is set. Otherwise,
+ * the enable oas flag is cleared and the pool created for OAS device data
+ * is destroyed.
+ *
+ **/
+void
+lpfc_sli4_oas_verify(struct lpfc_hba *phba)
+{
+
+ if (!phba->cfg_EnableXLane)
+ return;
+
+ if (phba->sli4_hba.pc_sli4_params.oas_supported) {
+ phba->cfg_fof = 1;
+ } else {
+ phba->cfg_fof = 0;
+ if (phba->device_data_mem_pool)
+ mempool_destroy(phba->device_data_mem_pool);
+ phba->device_data_mem_pool = NULL;
+ }
+
+ return;
+}
+
+/**
+ * lpfc_fof_queue_setup - Set up all the fof queues
+ * @phba: pointer to lpfc hba data structure.
+ *
+ * This routine is invoked to set up all the fof queues for the FC HBA
+ * operation.
+ *
+ * Return codes
+ * 0 - successful
+ * -ENOMEM - No available memory
+ **/
+int
+lpfc_fof_queue_setup(struct lpfc_hba *phba)
+{
+ struct lpfc_sli *psli = &phba->sli;
+ int rc;
+
+ rc = lpfc_eq_create(phba, phba->sli4_hba.fof_eq, LPFC_MAX_IMAX);
+ if (rc)
+ return -ENOMEM;
+
+ if (phba->cfg_fof) {
+
+ rc = lpfc_cq_create(phba, phba->sli4_hba.oas_cq,
+ phba->sli4_hba.fof_eq, LPFC_WCQ, LPFC_FCP);
+ if (rc)
+ goto out_oas_cq;
+
+ rc = lpfc_wq_create(phba, phba->sli4_hba.oas_wq,
+ phba->sli4_hba.oas_cq, LPFC_FCP);
+ if (rc)
+ goto out_oas_wq;
+
+ phba->sli4_hba.oas_cq->pring = &psli->ring[LPFC_FCP_OAS_RING];
+ phba->sli4_hba.oas_ring = &psli->ring[LPFC_FCP_OAS_RING];
+ }
+
+ return 0;
+
+out_oas_wq:
+ lpfc_cq_destroy(phba, phba->sli4_hba.oas_cq);
+out_oas_cq:
+ lpfc_eq_destroy(phba, phba->sli4_hba.fof_eq);
+ return rc;
+
+}
+
+/**
+ * lpfc_fof_queue_create - Create all the fof queues
+ * @phba: pointer to lpfc hba data structure.
+ *
+ * This routine is invoked to allocate all the fof queues for the FC HBA
+ * operation. For each SLI4 queue type, the parameters such as queue entry
+ * count (queue depth) shall be taken from the module parameter. For now,
+ * we just use some constant number as place holder.
+ *
+ * Return codes
+ * 0 - successful
+ * -ENOMEM - No availble memory
+ * -EIO - The mailbox failed to complete successfully.
+ **/
+int
+lpfc_fof_queue_create(struct lpfc_hba *phba)
+{
+ struct lpfc_queue *qdesc;
+
+ /* Create FOF EQ */
+ qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.eq_esize,
+ phba->sli4_hba.eq_ecount);
+ if (!qdesc)
+ goto out_error;
+
+ phba->sli4_hba.fof_eq = qdesc;
+
+ if (phba->cfg_fof) {
+
+ /* Create OAS CQ */
+ qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.cq_esize,
+ phba->sli4_hba.cq_ecount);
+ if (!qdesc)
+ goto out_error;
+
+ phba->sli4_hba.oas_cq = qdesc;
+
+ /* Create OAS WQ */
+ qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.wq_esize,
+ phba->sli4_hba.wq_ecount);
+ if (!qdesc)
+ goto out_error;
+
+ phba->sli4_hba.oas_wq = qdesc;
+
+ }
+ return 0;
+
+out_error:
+ lpfc_fof_queue_destroy(phba);
+ return -ENOMEM;
+}
+
+/**
+ * lpfc_fof_queue_destroy - Destroy all the fof queues
+ * @phba: pointer to lpfc hba data structure.
+ *
+ * This routine is invoked to release all the SLI4 queues with the FC HBA
+ * operation.
+ *
+ * Return codes
+ * 0 - successful
+ **/
+int
+lpfc_fof_queue_destroy(struct lpfc_hba *phba)
+{
+ /* Release FOF Event queue */
+ if (phba->sli4_hba.fof_eq != NULL) {
+ lpfc_sli4_queue_free(phba->sli4_hba.fof_eq);
+ phba->sli4_hba.fof_eq = NULL;
+ }
+
+ /* Release OAS Completion queue */
+ if (phba->sli4_hba.oas_cq != NULL) {
+ lpfc_sli4_queue_free(phba->sli4_hba.oas_cq);
+ phba->sli4_hba.oas_cq = NULL;
+ }
+
+ /* Release OAS Work queue */
+ if (phba->sli4_hba.oas_wq != NULL) {
+ lpfc_sli4_queue_free(phba->sli4_hba.oas_wq);
+ phba->sli4_hba.oas_wq = NULL;
+ }
+ return 0;
+}
+
+static struct pci_device_id lpfc_id_table[] = {
+ {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_VIPER,
+ PCI_ANY_ID, PCI_ANY_ID, },
+ {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_FIREFLY,
+ PCI_ANY_ID, PCI_ANY_ID, },
+ {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_THOR,
+ PCI_ANY_ID, PCI_ANY_ID, },
+ {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_PEGASUS,
+ PCI_ANY_ID, PCI_ANY_ID, },
+ {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_CENTAUR,
+ PCI_ANY_ID, PCI_ANY_ID, },
+ {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_DRAGONFLY,
+ PCI_ANY_ID, PCI_ANY_ID, },
+ {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_SUPERFLY,
+ PCI_ANY_ID, PCI_ANY_ID, },
+ {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_RFLY,
+ PCI_ANY_ID, PCI_ANY_ID, },
+ {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_PFLY,
+ PCI_ANY_ID, PCI_ANY_ID, },
+ {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_NEPTUNE,
+ PCI_ANY_ID, PCI_ANY_ID, },
+ {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_NEPTUNE_SCSP,
+ PCI_ANY_ID, PCI_ANY_ID, },
+ {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_NEPTUNE_DCSP,
+ PCI_ANY_ID, PCI_ANY_ID, },
+ {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_HELIOS,
+ PCI_ANY_ID, PCI_ANY_ID, },
+ {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_HELIOS_SCSP,
+ PCI_ANY_ID, PCI_ANY_ID, },
+ {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_HELIOS_DCSP,
+ PCI_ANY_ID, PCI_ANY_ID, },
+ {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_BMID,
+ PCI_ANY_ID, PCI_ANY_ID, },
+ {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_BSMB,
+ PCI_ANY_ID, PCI_ANY_ID, },
+ {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_ZEPHYR,
+ PCI_ANY_ID, PCI_ANY_ID, },
+ {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_HORNET,
+ PCI_ANY_ID, PCI_ANY_ID, },
+ {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_ZEPHYR_SCSP,
+ PCI_ANY_ID, PCI_ANY_ID, },
+ {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_ZEPHYR_DCSP,
+ PCI_ANY_ID, PCI_ANY_ID, },
+ {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_ZMID,
+ PCI_ANY_ID, PCI_ANY_ID, },
+ {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_ZSMB,
+ PCI_ANY_ID, PCI_ANY_ID, },
+ {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_TFLY,
+ PCI_ANY_ID, PCI_ANY_ID, },
+ {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_LP101,
+ PCI_ANY_ID, PCI_ANY_ID, },
+ {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_LP10000S,
+ PCI_ANY_ID, PCI_ANY_ID, },
+ {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_LP11000S,
+ PCI_ANY_ID, PCI_ANY_ID, },
+ {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_LPE11000S,
+ PCI_ANY_ID, PCI_ANY_ID, },
+ {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_SAT,
+ PCI_ANY_ID, PCI_ANY_ID, },
+ {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_SAT_MID,
+ PCI_ANY_ID, PCI_ANY_ID, },
+ {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_SAT_SMB,
+ PCI_ANY_ID, PCI_ANY_ID, },
+ {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_SAT_DCSP,
+ PCI_ANY_ID, PCI_ANY_ID, },
+ {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_SAT_SCSP,
+ PCI_ANY_ID, PCI_ANY_ID, },
+ {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_SAT_S,
+ PCI_ANY_ID, PCI_ANY_ID, },
+ {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_PROTEUS_VF,
+ PCI_ANY_ID, PCI_ANY_ID, },
+ {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_PROTEUS_PF,
+ PCI_ANY_ID, PCI_ANY_ID, },
+ {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_PROTEUS_S,
+ PCI_ANY_ID, PCI_ANY_ID, },
+ {PCI_VENDOR_ID_SERVERENGINE, PCI_DEVICE_ID_TIGERSHARK,
+ PCI_ANY_ID, PCI_ANY_ID, },
+ {PCI_VENDOR_ID_SERVERENGINE, PCI_DEVICE_ID_TOMCAT,
+ PCI_ANY_ID, PCI_ANY_ID, },
+ {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_FALCON,
+ PCI_ANY_ID, PCI_ANY_ID, },
+ {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_BALIUS,
+ PCI_ANY_ID, PCI_ANY_ID, },
+ {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_LANCER_FC,
+ PCI_ANY_ID, PCI_ANY_ID, },
+ {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_LANCER_FCOE,
+ PCI_ANY_ID, PCI_ANY_ID, },
+ {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_LANCER_FC_VF,
+ PCI_ANY_ID, PCI_ANY_ID, },
+ {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_LANCER_FCOE_VF,
+ PCI_ANY_ID, PCI_ANY_ID, },
+ {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_SKYHAWK,
+ PCI_ANY_ID, PCI_ANY_ID, },
+ {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_SKYHAWK_VF,
+ PCI_ANY_ID, PCI_ANY_ID, },
+ { 0 }
+};
+
+MODULE_DEVICE_TABLE(pci, lpfc_id_table);
+
+static const struct pci_error_handlers lpfc_err_handler = {
+ .error_detected = lpfc_io_error_detected,
+ .slot_reset = lpfc_io_slot_reset,
+ .resume = lpfc_io_resume,
+};
+
+static struct pci_driver lpfc_driver = {
+ .name = LPFC_DRIVER_NAME,
+ .id_table = lpfc_id_table,
+ .probe = lpfc_pci_probe_one,
+ .remove = lpfc_pci_remove_one,
+ .suspend = lpfc_pci_suspend_one,
+ .resume = lpfc_pci_resume_one,
+ .err_handler = &lpfc_err_handler,
+};
+
+static const struct file_operations lpfc_mgmt_fop = {
+ .owner = THIS_MODULE,
+};
+
+static struct miscdevice lpfc_mgmt_dev = {
+ .minor = MISC_DYNAMIC_MINOR,
+ .name = "lpfcmgmt",
+ .fops = &lpfc_mgmt_fop,
+};
+
+/**
+ * lpfc_init - lpfc module initialization routine
+ *
+ * This routine is to be invoked when the lpfc module is loaded into the
+ * kernel. The special kernel macro module_init() is used to indicate the
+ * role of this routine to the kernel as lpfc module entry point.
+ *
+ * Return codes
+ * 0 - successful
+ * -ENOMEM - FC attach transport failed
+ * all others - failed
+ */
+static int __init
+lpfc_init(void)
+{
+ int cpu;
+ int error = 0;
+
+ printk(LPFC_MODULE_DESC "\n");
+ printk(LPFC_COPYRIGHT "\n");
+
+ error = misc_register(&lpfc_mgmt_dev);
+ if (error)
+ printk(KERN_ERR "Could not register lpfcmgmt device, "
+ "misc_register returned with status %d", error);
+
+ if (lpfc_enable_npiv) {
+ lpfc_transport_functions.vport_create = lpfc_vport_create;
+ lpfc_transport_functions.vport_delete = lpfc_vport_delete;
+ }
+ lpfc_transport_template =
+ fc_attach_transport(&lpfc_transport_functions);
+ if (lpfc_transport_template == NULL)
+ return -ENOMEM;
+ if (lpfc_enable_npiv) {
+ lpfc_vport_transport_template =
+ fc_attach_transport(&lpfc_vport_transport_functions);
+ if (lpfc_vport_transport_template == NULL) {
+ fc_release_transport(lpfc_transport_template);
+ return -ENOMEM;
+ }
+ }
+
+ /* Initialize in case vector mapping is needed */
+ lpfc_used_cpu = NULL;
+ lpfc_present_cpu = 0;
+ for_each_present_cpu(cpu)
+ lpfc_present_cpu++;
+
+ error = pci_register_driver(&lpfc_driver);
+ if (error) {
+ fc_release_transport(lpfc_transport_template);
+ if (lpfc_enable_npiv)
+ fc_release_transport(lpfc_vport_transport_template);
+ }
+
+ return error;
+}
+
+/**
+ * lpfc_exit - lpfc module removal routine
+ *
+ * This routine is invoked when the lpfc module is removed from the kernel.
+ * The special kernel macro module_exit() is used to indicate the role of
+ * this routine to the kernel as lpfc module exit point.
+ */
+static void __exit
+lpfc_exit(void)
+{
+ misc_deregister(&lpfc_mgmt_dev);
+ pci_unregister_driver(&lpfc_driver);
+ fc_release_transport(lpfc_transport_template);
+ if (lpfc_enable_npiv)
+ fc_release_transport(lpfc_vport_transport_template);
+ if (_dump_buf_data) {
+ printk(KERN_ERR "9062 BLKGRD: freeing %lu pages for "
+ "_dump_buf_data at 0x%p\n",
+ (1L << _dump_buf_data_order), _dump_buf_data);
+ free_pages((unsigned long)_dump_buf_data, _dump_buf_data_order);
+ }
+
+ if (_dump_buf_dif) {
+ printk(KERN_ERR "9049 BLKGRD: freeing %lu pages for "
+ "_dump_buf_dif at 0x%p\n",
+ (1L << _dump_buf_dif_order), _dump_buf_dif);
+ free_pages((unsigned long)_dump_buf_dif, _dump_buf_dif_order);
+ }
+ kfree(lpfc_used_cpu);
+}
+
+module_init(lpfc_init);
+module_exit(lpfc_exit);
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION(LPFC_MODULE_DESC);
+MODULE_AUTHOR("Emulex Corporation - tech.support@emulex.com");
+MODULE_VERSION("0:" LPFC_DRIVER_VERSION);
diff --git a/drivers/scsi/lpfc/lpfc_logmsg.h b/drivers/scsi/lpfc/lpfc_logmsg.h
new file mode 100644
index 000000000..2a4e5d21e
--- /dev/null
+++ b/drivers/scsi/lpfc/lpfc_logmsg.h
@@ -0,0 +1,59 @@
+/*******************************************************************
+ * This file is part of the Emulex Linux Device Driver for *
+ * Fibre Channel Host Bus Adapters. *
+ * Copyright (C) 2004-2009 Emulex. All rights reserved. *
+ * EMULEX and SLI are trademarks of Emulex. *
+ * www.emulex.com *
+ * *
+ * This program is free software; you can redistribute it and/or *
+ * modify it under the terms of version 2 of the GNU General *
+ * Public License as published by the Free Software Foundation. *
+ * This program is distributed in the hope that it will be useful. *
+ * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND *
+ * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY, *
+ * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE *
+ * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD *
+ * TO BE LEGALLY INVALID. See the GNU General Public License for *
+ * more details, a copy of which can be found in the file COPYING *
+ * included with this package. *
+ *******************************************************************/
+
+#define LOG_ELS 0x00000001 /* ELS events */
+#define LOG_DISCOVERY 0x00000002 /* Link discovery events */
+#define LOG_MBOX 0x00000004 /* Mailbox events */
+#define LOG_INIT 0x00000008 /* Initialization events */
+#define LOG_LINK_EVENT 0x00000010 /* Link events */
+#define LOG_IP 0x00000020 /* IP traffic history */
+#define LOG_FCP 0x00000040 /* FCP traffic history */
+#define LOG_NODE 0x00000080 /* Node table events */
+#define LOG_TEMP 0x00000100 /* Temperature sensor events */
+#define LOG_BG 0x00000200 /* BlockGuard events */
+#define LOG_MISC 0x00000400 /* Miscellaneous events */
+#define LOG_SLI 0x00000800 /* SLI events */
+#define LOG_FCP_ERROR 0x00001000 /* log errors, not underruns */
+#define LOG_LIBDFC 0x00002000 /* Libdfc events */
+#define LOG_VPORT 0x00004000 /* NPIV events */
+#define LOG_SECURITY 0x00008000 /* Security events */
+#define LOG_EVENT 0x00010000 /* CT,TEMP,DUMP, logging */
+#define LOG_FIP 0x00020000 /* FIP events */
+#define LOG_FCP_UNDER 0x00040000 /* FCP underruns errors */
+#define LOG_SCSI_CMD 0x00080000 /* ALL SCSI commands */
+#define LOG_ALL_MSG 0xffffffff /* LOG all messages */
+
+#define lpfc_printf_vlog(vport, level, mask, fmt, arg...) \
+do { \
+ { if (((mask) & (vport)->cfg_log_verbose) || (level[1] <= '3')) \
+ dev_printk(level, &((vport)->phba->pcidev)->dev, "%d:(%d):" \
+ fmt, (vport)->phba->brd_no, vport->vpi, ##arg); } \
+} while (0)
+
+#define lpfc_printf_log(phba, level, mask, fmt, arg...) \
+do { \
+ { uint32_t log_verbose = (phba)->pport ? \
+ (phba)->pport->cfg_log_verbose : \
+ (phba)->cfg_log_verbose; \
+ if (((mask) & log_verbose) || (level[1] <= '3')) \
+ dev_printk(level, &((phba)->pcidev)->dev, "%d:" \
+ fmt, phba->brd_no, ##arg); \
+ } \
+} while (0)
diff --git a/drivers/scsi/lpfc/lpfc_mbox.c b/drivers/scsi/lpfc/lpfc_mbox.c
new file mode 100644
index 000000000..816f596cd
--- /dev/null
+++ b/drivers/scsi/lpfc/lpfc_mbox.c
@@ -0,0 +1,2368 @@
+/*******************************************************************
+ * This file is part of the Emulex Linux Device Driver for *
+ * Fibre Channel Host Bus Adapters. *
+ * Copyright (C) 2004-2015 Emulex. All rights reserved. *
+ * EMULEX and SLI are trademarks of Emulex. *
+ * www.emulex.com *
+ * Portions Copyright (C) 2004-2005 Christoph Hellwig *
+ * *
+ * This program is free software; you can redistribute it and/or *
+ * modify it under the terms of version 2 of the GNU General *
+ * Public License as published by the Free Software Foundation. *
+ * This program is distributed in the hope that it will be useful. *
+ * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND *
+ * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY, *
+ * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE *
+ * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD *
+ * TO BE LEGALLY INVALID. See the GNU General Public License for *
+ * more details, a copy of which can be found in the file COPYING *
+ * included with this package. *
+ *******************************************************************/
+
+#include <linux/blkdev.h>
+#include <linux/pci.h>
+#include <linux/slab.h>
+#include <linux/interrupt.h>
+
+#include <scsi/scsi_device.h>
+#include <scsi/scsi_transport_fc.h>
+#include <scsi/scsi.h>
+#include <scsi/fc/fc_fs.h>
+
+#include "lpfc_hw4.h"
+#include "lpfc_hw.h"
+#include "lpfc_sli.h"
+#include "lpfc_sli4.h"
+#include "lpfc_nl.h"
+#include "lpfc_disc.h"
+#include "lpfc_scsi.h"
+#include "lpfc.h"
+#include "lpfc_logmsg.h"
+#include "lpfc_crtn.h"
+#include "lpfc_compat.h"
+
+/**
+ * lpfc_dump_static_vport - Dump HBA's static vport information.
+ * @phba: pointer to lpfc hba data structure.
+ * @pmb: pointer to the driver internal queue element for mailbox command.
+ * @offset: offset for dumping vport info.
+ *
+ * The dump mailbox command provides a method for the device driver to obtain
+ * various types of information from the HBA device.
+ *
+ * This routine prepares the mailbox command for dumping list of static
+ * vports to be created.
+ **/
+int
+lpfc_dump_static_vport(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb,
+ uint16_t offset)
+{
+ MAILBOX_t *mb;
+ struct lpfc_dmabuf *mp;
+
+ mb = &pmb->u.mb;
+
+ /* Setup to dump vport info region */
+ memset(pmb, 0, sizeof(LPFC_MBOXQ_t));
+ mb->mbxCommand = MBX_DUMP_MEMORY;
+ mb->un.varDmp.type = DMP_NV_PARAMS;
+ mb->un.varDmp.entry_index = offset;
+ mb->un.varDmp.region_id = DMP_REGION_VPORT;
+ mb->mbxOwner = OWN_HOST;
+
+ /* For SLI3 HBAs data is embedded in mailbox */
+ if (phba->sli_rev != LPFC_SLI_REV4) {
+ mb->un.varDmp.cv = 1;
+ mb->un.varDmp.word_cnt = DMP_RSP_SIZE/sizeof(uint32_t);
+ return 0;
+ }
+
+ /* For SLI4 HBAs driver need to allocate memory */
+ mp = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
+ if (mp)
+ mp->virt = lpfc_mbuf_alloc(phba, 0, &mp->phys);
+
+ if (!mp || !mp->virt) {
+ kfree(mp);
+ lpfc_printf_log(phba, KERN_ERR, LOG_MBOX,
+ "2605 lpfc_dump_static_vport: memory"
+ " allocation failed\n");
+ return 1;
+ }
+ memset(mp->virt, 0, LPFC_BPL_SIZE);
+ INIT_LIST_HEAD(&mp->list);
+ /* save address for completion */
+ pmb->context1 = (uint8_t *)mp;
+ mb->un.varWords[3] = putPaddrLow(mp->phys);
+ mb->un.varWords[4] = putPaddrHigh(mp->phys);
+ mb->un.varDmp.sli4_length = sizeof(struct static_vport_info);
+
+ return 0;
+}
+
+/**
+ * lpfc_down_link - Bring down HBAs link.
+ * @phba: pointer to lpfc hba data structure.
+ * @pmb: pointer to the driver internal queue element for mailbox command.
+ *
+ * This routine prepares a mailbox command to bring down HBA link.
+ **/
+void
+lpfc_down_link(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
+{
+ MAILBOX_t *mb;
+ memset(pmb, 0, sizeof(LPFC_MBOXQ_t));
+ mb = &pmb->u.mb;
+ mb->mbxCommand = MBX_DOWN_LINK;
+ mb->mbxOwner = OWN_HOST;
+}
+
+/**
+ * lpfc_dump_mem - Prepare a mailbox command for reading a region.
+ * @phba: pointer to lpfc hba data structure.
+ * @pmb: pointer to the driver internal queue element for mailbox command.
+ * @offset: offset into the region.
+ * @region_id: config region id.
+ *
+ * The dump mailbox command provides a method for the device driver to obtain
+ * various types of information from the HBA device.
+ *
+ * This routine prepares the mailbox command for dumping HBA's config region.
+ **/
+void
+lpfc_dump_mem(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb, uint16_t offset,
+ uint16_t region_id)
+{
+ MAILBOX_t *mb;
+ void *ctx;
+
+ mb = &pmb->u.mb;
+ ctx = pmb->context2;
+
+ /* Setup to dump VPD region */
+ memset(pmb, 0, sizeof (LPFC_MBOXQ_t));
+ mb->mbxCommand = MBX_DUMP_MEMORY;
+ mb->un.varDmp.cv = 1;
+ mb->un.varDmp.type = DMP_NV_PARAMS;
+ mb->un.varDmp.entry_index = offset;
+ mb->un.varDmp.region_id = region_id;
+ mb->un.varDmp.word_cnt = (DMP_RSP_SIZE / sizeof (uint32_t));
+ mb->un.varDmp.co = 0;
+ mb->un.varDmp.resp_offset = 0;
+ pmb->context2 = ctx;
+ mb->mbxOwner = OWN_HOST;
+ return;
+}
+
+/**
+ * lpfc_dump_wakeup_param - Prepare mailbox command for retrieving wakeup params
+ * @phba: pointer to lpfc hba data structure.
+ * @pmb: pointer to the driver internal queue element for mailbox command.
+ *
+ * This function create a dump memory mailbox command to dump wake up
+ * parameters.
+ */
+void
+lpfc_dump_wakeup_param(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
+{
+ MAILBOX_t *mb;
+ void *ctx;
+
+ mb = &pmb->u.mb;
+ /* Save context so that we can restore after memset */
+ ctx = pmb->context2;
+
+ /* Setup to dump VPD region */
+ memset(pmb, 0, sizeof(LPFC_MBOXQ_t));
+ mb->mbxCommand = MBX_DUMP_MEMORY;
+ mb->mbxOwner = OWN_HOST;
+ mb->un.varDmp.cv = 1;
+ mb->un.varDmp.type = DMP_NV_PARAMS;
+ if (phba->sli_rev < LPFC_SLI_REV4)
+ mb->un.varDmp.entry_index = 0;
+ mb->un.varDmp.region_id = WAKE_UP_PARMS_REGION_ID;
+ mb->un.varDmp.word_cnt = WAKE_UP_PARMS_WORD_SIZE;
+ mb->un.varDmp.co = 0;
+ mb->un.varDmp.resp_offset = 0;
+ pmb->context2 = ctx;
+ return;
+}
+
+/**
+ * lpfc_read_nv - Prepare a mailbox command for reading HBA's NVRAM param
+ * @phba: pointer to lpfc hba data structure.
+ * @pmb: pointer to the driver internal queue element for mailbox command.
+ *
+ * The read NVRAM mailbox command returns the HBA's non-volatile parameters
+ * that are used as defaults when the Fibre Channel link is brought on-line.
+ *
+ * This routine prepares the mailbox command for reading information stored
+ * in the HBA's NVRAM. Specifically, the HBA's WWNN and WWPN.
+ **/
+void
+lpfc_read_nv(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmb)
+{
+ MAILBOX_t *mb;
+
+ mb = &pmb->u.mb;
+ memset(pmb, 0, sizeof (LPFC_MBOXQ_t));
+ mb->mbxCommand = MBX_READ_NV;
+ mb->mbxOwner = OWN_HOST;
+ return;
+}
+
+/**
+ * lpfc_config_async - Prepare a mailbox command for enabling HBA async event
+ * @phba: pointer to lpfc hba data structure.
+ * @pmb: pointer to the driver internal queue element for mailbox command.
+ * @ring: ring number for the asynchronous event to be configured.
+ *
+ * The asynchronous event enable mailbox command is used to enable the
+ * asynchronous event posting via the ASYNC_STATUS_CN IOCB response and
+ * specifies the default ring to which events are posted.
+ *
+ * This routine prepares the mailbox command for enabling HBA asynchronous
+ * event support on a IOCB ring.
+ **/
+void
+lpfc_config_async(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmb,
+ uint32_t ring)
+{
+ MAILBOX_t *mb;
+
+ mb = &pmb->u.mb;
+ memset(pmb, 0, sizeof (LPFC_MBOXQ_t));
+ mb->mbxCommand = MBX_ASYNCEVT_ENABLE;
+ mb->un.varCfgAsyncEvent.ring = ring;
+ mb->mbxOwner = OWN_HOST;
+ return;
+}
+
+/**
+ * lpfc_heart_beat - Prepare a mailbox command for heart beat
+ * @phba: pointer to lpfc hba data structure.
+ * @pmb: pointer to the driver internal queue element for mailbox command.
+ *
+ * The heart beat mailbox command is used to detect an unresponsive HBA, which
+ * is defined as any device where no error attention is sent and both mailbox
+ * and rings are not processed.
+ *
+ * This routine prepares the mailbox command for issuing a heart beat in the
+ * form of mailbox command to the HBA. The timely completion of the heart
+ * beat mailbox command indicates the health of the HBA.
+ **/
+void
+lpfc_heart_beat(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmb)
+{
+ MAILBOX_t *mb;
+
+ mb = &pmb->u.mb;
+ memset(pmb, 0, sizeof (LPFC_MBOXQ_t));
+ mb->mbxCommand = MBX_HEARTBEAT;
+ mb->mbxOwner = OWN_HOST;
+ return;
+}
+
+/**
+ * lpfc_read_topology - Prepare a mailbox command for reading HBA topology
+ * @phba: pointer to lpfc hba data structure.
+ * @pmb: pointer to the driver internal queue element for mailbox command.
+ * @mp: DMA buffer memory for reading the link attention information into.
+ *
+ * The read topology mailbox command is issued to read the link topology
+ * information indicated by the HBA port when the Link Event bit of the Host
+ * Attention (HSTATT) register is set to 1 (For SLI-3) or when an FC Link
+ * Attention ACQE is received from the port (For SLI-4). A Link Event
+ * Attention occurs based on an exception detected at the Fibre Channel link
+ * interface.
+ *
+ * This routine prepares the mailbox command for reading HBA link topology
+ * information. A DMA memory has been set aside and address passed to the
+ * HBA through @mp for the HBA to DMA link attention information into the
+ * memory as part of the execution of the mailbox command.
+ *
+ * Return codes
+ * 0 - Success (currently always return 0)
+ **/
+int
+lpfc_read_topology(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb,
+ struct lpfc_dmabuf *mp)
+{
+ MAILBOX_t *mb;
+ struct lpfc_sli *psli;
+
+ psli = &phba->sli;
+ mb = &pmb->u.mb;
+ memset(pmb, 0, sizeof (LPFC_MBOXQ_t));
+
+ INIT_LIST_HEAD(&mp->list);
+ mb->mbxCommand = MBX_READ_TOPOLOGY;
+ mb->un.varReadTop.lilpBde64.tus.f.bdeSize = LPFC_ALPA_MAP_SIZE;
+ mb->un.varReadTop.lilpBde64.addrHigh = putPaddrHigh(mp->phys);
+ mb->un.varReadTop.lilpBde64.addrLow = putPaddrLow(mp->phys);
+
+ /* Save address for later completion and set the owner to host so that
+ * the FW knows this mailbox is available for processing.
+ */
+ pmb->context1 = (uint8_t *)mp;
+ mb->mbxOwner = OWN_HOST;
+ return (0);
+}
+
+/**
+ * lpfc_clear_la - Prepare a mailbox command for clearing HBA link attention
+ * @phba: pointer to lpfc hba data structure.
+ * @pmb: pointer to the driver internal queue element for mailbox command.
+ *
+ * The clear link attention mailbox command is issued to clear the link event
+ * attention condition indicated by the Link Event bit of the Host Attention
+ * (HSTATT) register. The link event attention condition is cleared only if
+ * the event tag specified matches that of the current link event counter.
+ * The current event tag is read using the read link attention event mailbox
+ * command.
+ *
+ * This routine prepares the mailbox command for clearing HBA link attention
+ * information.
+ **/
+void
+lpfc_clear_la(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmb)
+{
+ MAILBOX_t *mb;
+
+ mb = &pmb->u.mb;
+ memset(pmb, 0, sizeof (LPFC_MBOXQ_t));
+
+ mb->un.varClearLA.eventTag = phba->fc_eventTag;
+ mb->mbxCommand = MBX_CLEAR_LA;
+ mb->mbxOwner = OWN_HOST;
+ return;
+}
+
+/**
+ * lpfc_config_link - Prepare a mailbox command for configuring link on a HBA
+ * @phba: pointer to lpfc hba data structure.
+ * @pmb: pointer to the driver internal queue element for mailbox command.
+ *
+ * The configure link mailbox command is used before the initialize link
+ * mailbox command to override default value and to configure link-oriented
+ * parameters such as DID address and various timers. Typically, this
+ * command would be used after an F_Port login to set the returned DID address
+ * and the fabric timeout values. This command is not valid before a configure
+ * port command has configured the HBA port.
+ *
+ * This routine prepares the mailbox command for configuring link on a HBA.
+ **/
+void
+lpfc_config_link(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmb)
+{
+ struct lpfc_vport *vport = phba->pport;
+ MAILBOX_t *mb = &pmb->u.mb;
+ memset(pmb, 0, sizeof (LPFC_MBOXQ_t));
+
+ /* NEW_FEATURE
+ * SLI-2, Coalescing Response Feature.
+ */
+ if (phba->cfg_cr_delay && (phba->sli_rev < LPFC_SLI_REV4)) {
+ mb->un.varCfgLnk.cr = 1;
+ mb->un.varCfgLnk.ci = 1;
+ mb->un.varCfgLnk.cr_delay = phba->cfg_cr_delay;
+ mb->un.varCfgLnk.cr_count = phba->cfg_cr_count;
+ }
+
+ mb->un.varCfgLnk.myId = vport->fc_myDID;
+ mb->un.varCfgLnk.edtov = phba->fc_edtov;
+ mb->un.varCfgLnk.arbtov = phba->fc_arbtov;
+ mb->un.varCfgLnk.ratov = phba->fc_ratov;
+ mb->un.varCfgLnk.rttov = phba->fc_rttov;
+ mb->un.varCfgLnk.altov = phba->fc_altov;
+ mb->un.varCfgLnk.crtov = phba->fc_crtov;
+ mb->un.varCfgLnk.citov = phba->fc_citov;
+
+ if (phba->cfg_ack0 && (phba->sli_rev < LPFC_SLI_REV4))
+ mb->un.varCfgLnk.ack0_enable = 1;
+
+ mb->mbxCommand = MBX_CONFIG_LINK;
+ mb->mbxOwner = OWN_HOST;
+ return;
+}
+
+/**
+ * lpfc_config_msi - Prepare a mailbox command for configuring msi-x
+ * @phba: pointer to lpfc hba data structure.
+ * @pmb: pointer to the driver internal queue element for mailbox command.
+ *
+ * The configure MSI-X mailbox command is used to configure the HBA's SLI-3
+ * MSI-X multi-message interrupt vector association to interrupt attention
+ * conditions.
+ *
+ * Return codes
+ * 0 - Success
+ * -EINVAL - Failure
+ **/
+int
+lpfc_config_msi(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
+{
+ MAILBOX_t *mb = &pmb->u.mb;
+ uint32_t attentionConditions[2];
+
+ /* Sanity check */
+ if (phba->cfg_use_msi != 2) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+ "0475 Not configured for supporting MSI-X "
+ "cfg_use_msi: 0x%x\n", phba->cfg_use_msi);
+ return -EINVAL;
+ }
+
+ if (phba->sli_rev < 3) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+ "0476 HBA not supporting SLI-3 or later "
+ "SLI Revision: 0x%x\n", phba->sli_rev);
+ return -EINVAL;
+ }
+
+ /* Clear mailbox command fields */
+ memset(pmb, 0, sizeof(LPFC_MBOXQ_t));
+
+ /*
+ * SLI-3, Message Signaled Interrupt Fearure.
+ */
+
+ /* Multi-message attention configuration */
+ attentionConditions[0] = (HA_R0ATT | HA_R1ATT | HA_R2ATT | HA_ERATT |
+ HA_LATT | HA_MBATT);
+ attentionConditions[1] = 0;
+
+ mb->un.varCfgMSI.attentionConditions[0] = attentionConditions[0];
+ mb->un.varCfgMSI.attentionConditions[1] = attentionConditions[1];
+
+ /*
+ * Set up message number to HA bit association
+ */
+#ifdef __BIG_ENDIAN_BITFIELD
+ /* RA0 (FCP Ring) */
+ mb->un.varCfgMSI.messageNumberByHA[HA_R0_POS] = 1;
+ /* RA1 (Other Protocol Extra Ring) */
+ mb->un.varCfgMSI.messageNumberByHA[HA_R1_POS] = 1;
+#else /* __LITTLE_ENDIAN_BITFIELD */
+ /* RA0 (FCP Ring) */
+ mb->un.varCfgMSI.messageNumberByHA[HA_R0_POS^3] = 1;
+ /* RA1 (Other Protocol Extra Ring) */
+ mb->un.varCfgMSI.messageNumberByHA[HA_R1_POS^3] = 1;
+#endif
+ /* Multi-message interrupt autoclear configuration*/
+ mb->un.varCfgMSI.autoClearHA[0] = attentionConditions[0];
+ mb->un.varCfgMSI.autoClearHA[1] = attentionConditions[1];
+
+ /* For now, HBA autoclear does not work reliably, disable it */
+ mb->un.varCfgMSI.autoClearHA[0] = 0;
+ mb->un.varCfgMSI.autoClearHA[1] = 0;
+
+ /* Set command and owner bit */
+ mb->mbxCommand = MBX_CONFIG_MSI;
+ mb->mbxOwner = OWN_HOST;
+
+ return 0;
+}
+
+/**
+ * lpfc_init_link - Prepare a mailbox command for initialize link on a HBA
+ * @phba: pointer to lpfc hba data structure.
+ * @pmb: pointer to the driver internal queue element for mailbox command.
+ * @topology: the link topology for the link to be initialized to.
+ * @linkspeed: the link speed for the link to be initialized to.
+ *
+ * The initialize link mailbox command is used to initialize the Fibre
+ * Channel link. This command must follow a configure port command that
+ * establishes the mode of operation.
+ *
+ * This routine prepares the mailbox command for initializing link on a HBA
+ * with the specified link topology and speed.
+ **/
+void
+lpfc_init_link(struct lpfc_hba * phba,
+ LPFC_MBOXQ_t * pmb, uint32_t topology, uint32_t linkspeed)
+{
+ lpfc_vpd_t *vpd;
+ struct lpfc_sli *psli;
+ MAILBOX_t *mb;
+
+ mb = &pmb->u.mb;
+ memset(pmb, 0, sizeof (LPFC_MBOXQ_t));
+
+ psli = &phba->sli;
+ switch (topology) {
+ case FLAGS_TOPOLOGY_MODE_LOOP_PT:
+ mb->un.varInitLnk.link_flags = FLAGS_TOPOLOGY_MODE_LOOP;
+ mb->un.varInitLnk.link_flags |= FLAGS_TOPOLOGY_FAILOVER;
+ break;
+ case FLAGS_TOPOLOGY_MODE_PT_PT:
+ mb->un.varInitLnk.link_flags = FLAGS_TOPOLOGY_MODE_PT_PT;
+ break;
+ case FLAGS_TOPOLOGY_MODE_LOOP:
+ mb->un.varInitLnk.link_flags = FLAGS_TOPOLOGY_MODE_LOOP;
+ break;
+ case FLAGS_TOPOLOGY_MODE_PT_LOOP:
+ mb->un.varInitLnk.link_flags = FLAGS_TOPOLOGY_MODE_PT_PT;
+ mb->un.varInitLnk.link_flags |= FLAGS_TOPOLOGY_FAILOVER;
+ break;
+ case FLAGS_LOCAL_LB:
+ mb->un.varInitLnk.link_flags = FLAGS_LOCAL_LB;
+ break;
+ }
+
+ /* Enable asynchronous ABTS responses from firmware */
+ mb->un.varInitLnk.link_flags |= FLAGS_IMED_ABORT;
+
+ /* NEW_FEATURE
+ * Setting up the link speed
+ */
+ vpd = &phba->vpd;
+ if (vpd->rev.feaLevelHigh >= 0x02){
+ switch(linkspeed){
+ case LPFC_USER_LINK_SPEED_1G:
+ mb->un.varInitLnk.link_flags |= FLAGS_LINK_SPEED;
+ mb->un.varInitLnk.link_speed = LINK_SPEED_1G;
+ break;
+ case LPFC_USER_LINK_SPEED_2G:
+ mb->un.varInitLnk.link_flags |= FLAGS_LINK_SPEED;
+ mb->un.varInitLnk.link_speed = LINK_SPEED_2G;
+ break;
+ case LPFC_USER_LINK_SPEED_4G:
+ mb->un.varInitLnk.link_flags |= FLAGS_LINK_SPEED;
+ mb->un.varInitLnk.link_speed = LINK_SPEED_4G;
+ break;
+ case LPFC_USER_LINK_SPEED_8G:
+ mb->un.varInitLnk.link_flags |= FLAGS_LINK_SPEED;
+ mb->un.varInitLnk.link_speed = LINK_SPEED_8G;
+ break;
+ case LPFC_USER_LINK_SPEED_10G:
+ mb->un.varInitLnk.link_flags |= FLAGS_LINK_SPEED;
+ mb->un.varInitLnk.link_speed = LINK_SPEED_10G;
+ break;
+ case LPFC_USER_LINK_SPEED_16G:
+ mb->un.varInitLnk.link_flags |= FLAGS_LINK_SPEED;
+ mb->un.varInitLnk.link_speed = LINK_SPEED_16G;
+ break;
+ case LPFC_USER_LINK_SPEED_AUTO:
+ default:
+ mb->un.varInitLnk.link_speed = LINK_SPEED_AUTO;
+ break;
+ }
+
+ }
+ else
+ mb->un.varInitLnk.link_speed = LINK_SPEED_AUTO;
+
+ mb->mbxCommand = (volatile uint8_t)MBX_INIT_LINK;
+ mb->mbxOwner = OWN_HOST;
+ mb->un.varInitLnk.fabric_AL_PA = phba->fc_pref_ALPA;
+ return;
+}
+
+/**
+ * lpfc_read_sparam - Prepare a mailbox command for reading HBA parameters
+ * @phba: pointer to lpfc hba data structure.
+ * @pmb: pointer to the driver internal queue element for mailbox command.
+ * @vpi: virtual N_Port identifier.
+ *
+ * The read service parameter mailbox command is used to read the HBA port
+ * service parameters. The service parameters are read into the buffer
+ * specified directly by a BDE in the mailbox command. These service
+ * parameters may then be used to build the payload of an N_Port/F_POrt
+ * login request and reply (LOGI/ACC).
+ *
+ * This routine prepares the mailbox command for reading HBA port service
+ * parameters. The DMA memory is allocated in this function and the addresses
+ * are populated into the mailbox command for the HBA to DMA the service
+ * parameters into.
+ *
+ * Return codes
+ * 0 - Success
+ * 1 - DMA memory allocation failed
+ **/
+int
+lpfc_read_sparam(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb, int vpi)
+{
+ struct lpfc_dmabuf *mp;
+ MAILBOX_t *mb;
+ struct lpfc_sli *psli;
+
+ psli = &phba->sli;
+ mb = &pmb->u.mb;
+ memset(pmb, 0, sizeof (LPFC_MBOXQ_t));
+
+ mb->mbxOwner = OWN_HOST;
+
+ /* Get a buffer to hold the HBAs Service Parameters */
+
+ mp = kmalloc(sizeof (struct lpfc_dmabuf), GFP_KERNEL);
+ if (mp)
+ mp->virt = lpfc_mbuf_alloc(phba, 0, &mp->phys);
+ if (!mp || !mp->virt) {
+ kfree(mp);
+ mb->mbxCommand = MBX_READ_SPARM64;
+ /* READ_SPARAM: no buffers */
+ lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX,
+ "0301 READ_SPARAM: no buffers\n");
+ return (1);
+ }
+ INIT_LIST_HEAD(&mp->list);
+ mb->mbxCommand = MBX_READ_SPARM64;
+ mb->un.varRdSparm.un.sp64.tus.f.bdeSize = sizeof (struct serv_parm);
+ mb->un.varRdSparm.un.sp64.addrHigh = putPaddrHigh(mp->phys);
+ mb->un.varRdSparm.un.sp64.addrLow = putPaddrLow(mp->phys);
+ if (phba->sli_rev >= LPFC_SLI_REV3)
+ mb->un.varRdSparm.vpi = phba->vpi_ids[vpi];
+
+ /* save address for completion */
+ pmb->context1 = mp;
+
+ return (0);
+}
+
+/**
+ * lpfc_unreg_did - Prepare a mailbox command for unregistering DID
+ * @phba: pointer to lpfc hba data structure.
+ * @vpi: virtual N_Port identifier.
+ * @did: remote port identifier.
+ * @pmb: pointer to the driver internal queue element for mailbox command.
+ *
+ * The unregister DID mailbox command is used to unregister an N_Port/F_Port
+ * login for an unknown RPI by specifying the DID of a remote port. This
+ * command frees an RPI context in the HBA port. This has the effect of
+ * performing an implicit N_Port/F_Port logout.
+ *
+ * This routine prepares the mailbox command for unregistering a remote
+ * N_Port/F_Port (DID) login.
+ **/
+void
+lpfc_unreg_did(struct lpfc_hba * phba, uint16_t vpi, uint32_t did,
+ LPFC_MBOXQ_t * pmb)
+{
+ MAILBOX_t *mb;
+
+ mb = &pmb->u.mb;
+ memset(pmb, 0, sizeof (LPFC_MBOXQ_t));
+
+ mb->un.varUnregDID.did = did;
+ mb->un.varUnregDID.vpi = vpi;
+ if ((vpi != 0xffff) &&
+ (phba->sli_rev == LPFC_SLI_REV4))
+ mb->un.varUnregDID.vpi = phba->vpi_ids[vpi];
+
+ mb->mbxCommand = MBX_UNREG_D_ID;
+ mb->mbxOwner = OWN_HOST;
+ return;
+}
+
+/**
+ * lpfc_read_config - Prepare a mailbox command for reading HBA configuration
+ * @phba: pointer to lpfc hba data structure.
+ * @pmb: pointer to the driver internal queue element for mailbox command.
+ *
+ * The read configuration mailbox command is used to read the HBA port
+ * configuration parameters. This mailbox command provides a method for
+ * seeing any parameters that may have changed via various configuration
+ * mailbox commands.
+ *
+ * This routine prepares the mailbox command for reading out HBA configuration
+ * parameters.
+ **/
+void
+lpfc_read_config(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmb)
+{
+ MAILBOX_t *mb;
+
+ mb = &pmb->u.mb;
+ memset(pmb, 0, sizeof (LPFC_MBOXQ_t));
+
+ mb->mbxCommand = MBX_READ_CONFIG;
+ mb->mbxOwner = OWN_HOST;
+ return;
+}
+
+/**
+ * lpfc_read_lnk_stat - Prepare a mailbox command for reading HBA link stats
+ * @phba: pointer to lpfc hba data structure.
+ * @pmb: pointer to the driver internal queue element for mailbox command.
+ *
+ * The read link status mailbox command is used to read the link status from
+ * the HBA. Link status includes all link-related error counters. These
+ * counters are maintained by the HBA and originated in the link hardware
+ * unit. Note that all of these counters wrap.
+ *
+ * This routine prepares the mailbox command for reading out HBA link status.
+ **/
+void
+lpfc_read_lnk_stat(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmb)
+{
+ MAILBOX_t *mb;
+
+ mb = &pmb->u.mb;
+ memset(pmb, 0, sizeof (LPFC_MBOXQ_t));
+
+ mb->mbxCommand = MBX_READ_LNK_STAT;
+ mb->mbxOwner = OWN_HOST;
+ return;
+}
+
+/**
+ * lpfc_reg_rpi - Prepare a mailbox command for registering remote login
+ * @phba: pointer to lpfc hba data structure.
+ * @vpi: virtual N_Port identifier.
+ * @did: remote port identifier.
+ * @param: pointer to memory holding the server parameters.
+ * @pmb: pointer to the driver internal queue element for mailbox command.
+ * @rpi: the rpi to use in the registration (usually only used for SLI4.
+ *
+ * The registration login mailbox command is used to register an N_Port or
+ * F_Port login. This registration allows the HBA to cache the remote N_Port
+ * service parameters internally and thereby make the appropriate FC-2
+ * decisions. The remote port service parameters are handed off by the driver
+ * to the HBA using a descriptor entry that directly identifies a buffer in
+ * host memory. In exchange, the HBA returns an RPI identifier.
+ *
+ * This routine prepares the mailbox command for registering remote port login.
+ * The function allocates DMA buffer for passing the service parameters to the
+ * HBA with the mailbox command.
+ *
+ * Return codes
+ * 0 - Success
+ * 1 - DMA memory allocation failed
+ **/
+int
+lpfc_reg_rpi(struct lpfc_hba *phba, uint16_t vpi, uint32_t did,
+ uint8_t *param, LPFC_MBOXQ_t *pmb, uint16_t rpi)
+{
+ MAILBOX_t *mb = &pmb->u.mb;
+ uint8_t *sparam;
+ struct lpfc_dmabuf *mp;
+
+ memset(pmb, 0, sizeof (LPFC_MBOXQ_t));
+
+ mb->un.varRegLogin.rpi = 0;
+ if (phba->sli_rev == LPFC_SLI_REV4)
+ mb->un.varRegLogin.rpi = phba->sli4_hba.rpi_ids[rpi];
+ if (phba->sli_rev >= LPFC_SLI_REV3)
+ mb->un.varRegLogin.vpi = phba->vpi_ids[vpi];
+ mb->un.varRegLogin.did = did;
+ mb->mbxOwner = OWN_HOST;
+ /* Get a buffer to hold NPorts Service Parameters */
+ mp = kmalloc(sizeof (struct lpfc_dmabuf), GFP_KERNEL);
+ if (mp)
+ mp->virt = lpfc_mbuf_alloc(phba, 0, &mp->phys);
+ if (!mp || !mp->virt) {
+ kfree(mp);
+ mb->mbxCommand = MBX_REG_LOGIN64;
+ /* REG_LOGIN: no buffers */
+ lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX,
+ "0302 REG_LOGIN: no buffers, VPI:%d DID:x%x, "
+ "rpi x%x\n", vpi, did, rpi);
+ return 1;
+ }
+ INIT_LIST_HEAD(&mp->list);
+ sparam = mp->virt;
+
+ /* Copy param's into a new buffer */
+ memcpy(sparam, param, sizeof (struct serv_parm));
+
+ /* save address for completion */
+ pmb->context1 = (uint8_t *) mp;
+
+ mb->mbxCommand = MBX_REG_LOGIN64;
+ mb->un.varRegLogin.un.sp64.tus.f.bdeSize = sizeof (struct serv_parm);
+ mb->un.varRegLogin.un.sp64.addrHigh = putPaddrHigh(mp->phys);
+ mb->un.varRegLogin.un.sp64.addrLow = putPaddrLow(mp->phys);
+
+ return 0;
+}
+
+/**
+ * lpfc_unreg_login - Prepare a mailbox command for unregistering remote login
+ * @phba: pointer to lpfc hba data structure.
+ * @vpi: virtual N_Port identifier.
+ * @rpi: remote port identifier
+ * @pmb: pointer to the driver internal queue element for mailbox command.
+ *
+ * The unregistration login mailbox command is used to unregister an N_Port
+ * or F_Port login. This command frees an RPI context in the HBA. It has the
+ * effect of performing an implicit N_Port/F_Port logout.
+ *
+ * This routine prepares the mailbox command for unregistering remote port
+ * login.
+ *
+ * For SLI4 ports, the rpi passed to this function must be the physical
+ * rpi value, not the logical index.
+ **/
+void
+lpfc_unreg_login(struct lpfc_hba *phba, uint16_t vpi, uint32_t rpi,
+ LPFC_MBOXQ_t * pmb)
+{
+ MAILBOX_t *mb;
+
+ mb = &pmb->u.mb;
+ memset(pmb, 0, sizeof (LPFC_MBOXQ_t));
+
+ mb->un.varUnregLogin.rpi = rpi;
+ mb->un.varUnregLogin.rsvd1 = 0;
+ if (phba->sli_rev >= LPFC_SLI_REV3)
+ mb->un.varUnregLogin.vpi = phba->vpi_ids[vpi];
+
+ mb->mbxCommand = MBX_UNREG_LOGIN;
+ mb->mbxOwner = OWN_HOST;
+
+ return;
+}
+
+/**
+ * lpfc_sli4_unreg_all_rpis - unregister all RPIs for a vport on SLI4 HBA.
+ * @vport: pointer to a vport object.
+ *
+ * This routine sends mailbox command to unregister all active RPIs for
+ * a vport.
+ **/
+void
+lpfc_sli4_unreg_all_rpis(struct lpfc_vport *vport)
+{
+ struct lpfc_hba *phba = vport->phba;
+ LPFC_MBOXQ_t *mbox;
+ int rc;
+
+ mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
+ if (mbox) {
+ /*
+ * For SLI4 functions, the rpi field is overloaded for
+ * the vport context unreg all. This routine passes
+ * 0 for the rpi field in lpfc_unreg_login for compatibility
+ * with SLI3 and then overrides the rpi field with the
+ * expected value for SLI4.
+ */
+ lpfc_unreg_login(phba, vport->vpi, phba->vpi_ids[vport->vpi],
+ mbox);
+ mbox->u.mb.un.varUnregLogin.rsvd1 = 0x4000;
+ mbox->vport = vport;
+ mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
+ mbox->context1 = NULL;
+ rc = lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT);
+ if (rc == MBX_NOT_FINISHED)
+ mempool_free(mbox, phba->mbox_mem_pool);
+ }
+}
+
+/**
+ * lpfc_reg_vpi - Prepare a mailbox command for registering vport identifier
+ * @phba: pointer to lpfc hba data structure.
+ * @vpi: virtual N_Port identifier.
+ * @sid: Fibre Channel S_ID (N_Port_ID assigned to a virtual N_Port).
+ * @pmb: pointer to the driver internal queue element for mailbox command.
+ *
+ * The registration vport identifier mailbox command is used to activate a
+ * virtual N_Port after it has acquired an N_Port_ID. The HBA validates the
+ * N_Port_ID against the information in the selected virtual N_Port context
+ * block and marks it active to allow normal processing of IOCB commands and
+ * received unsolicited exchanges.
+ *
+ * This routine prepares the mailbox command for registering a virtual N_Port.
+ **/
+void
+lpfc_reg_vpi(struct lpfc_vport *vport, LPFC_MBOXQ_t *pmb)
+{
+ MAILBOX_t *mb = &pmb->u.mb;
+ struct lpfc_hba *phba = vport->phba;
+
+ memset(pmb, 0, sizeof (LPFC_MBOXQ_t));
+ /*
+ * Set the re-reg VPI bit for f/w to update the MAC address.
+ */
+ if ((phba->sli_rev == LPFC_SLI_REV4) &&
+ !(vport->fc_flag & FC_VPORT_NEEDS_REG_VPI))
+ mb->un.varRegVpi.upd = 1;
+
+ mb->un.varRegVpi.vpi = phba->vpi_ids[vport->vpi];
+ mb->un.varRegVpi.sid = vport->fc_myDID;
+ if (phba->sli_rev == LPFC_SLI_REV4)
+ mb->un.varRegVpi.vfi = phba->sli4_hba.vfi_ids[vport->vfi];
+ else
+ mb->un.varRegVpi.vfi = vport->vfi + vport->phba->vfi_base;
+ memcpy(mb->un.varRegVpi.wwn, &vport->fc_portname,
+ sizeof(struct lpfc_name));
+ mb->un.varRegVpi.wwn[0] = cpu_to_le32(mb->un.varRegVpi.wwn[0]);
+ mb->un.varRegVpi.wwn[1] = cpu_to_le32(mb->un.varRegVpi.wwn[1]);
+
+ mb->mbxCommand = MBX_REG_VPI;
+ mb->mbxOwner = OWN_HOST;
+ return;
+
+}
+
+/**
+ * lpfc_unreg_vpi - Prepare a mailbox command for unregistering vport id
+ * @phba: pointer to lpfc hba data structure.
+ * @vpi: virtual N_Port identifier.
+ * @pmb: pointer to the driver internal queue element for mailbox command.
+ *
+ * The unregistration vport identifier mailbox command is used to inactivate
+ * a virtual N_Port. The driver must have logged out and unregistered all
+ * remote N_Ports to abort any activity on the virtual N_Port. The HBA will
+ * unregisters any default RPIs associated with the specified vpi, aborting
+ * any active exchanges. The HBA will post the mailbox response after making
+ * the virtual N_Port inactive.
+ *
+ * This routine prepares the mailbox command for unregistering a virtual
+ * N_Port.
+ **/
+void
+lpfc_unreg_vpi(struct lpfc_hba *phba, uint16_t vpi, LPFC_MBOXQ_t *pmb)
+{
+ MAILBOX_t *mb = &pmb->u.mb;
+ memset(pmb, 0, sizeof (LPFC_MBOXQ_t));
+
+ if (phba->sli_rev == LPFC_SLI_REV3)
+ mb->un.varUnregVpi.vpi = phba->vpi_ids[vpi];
+ else if (phba->sli_rev >= LPFC_SLI_REV4)
+ mb->un.varUnregVpi.sli4_vpi = phba->vpi_ids[vpi];
+
+ mb->mbxCommand = MBX_UNREG_VPI;
+ mb->mbxOwner = OWN_HOST;
+ return;
+
+}
+
+/**
+ * lpfc_config_pcb_setup - Set up IOCB rings in the Port Control Block (PCB)
+ * @phba: pointer to lpfc hba data structure.
+ *
+ * This routine sets up and initializes the IOCB rings in the Port Control
+ * Block (PCB).
+ **/
+static void
+lpfc_config_pcb_setup(struct lpfc_hba * phba)
+{
+ struct lpfc_sli *psli = &phba->sli;
+ struct lpfc_sli_ring *pring;
+ PCB_t *pcbp = phba->pcb;
+ dma_addr_t pdma_addr;
+ uint32_t offset;
+ uint32_t iocbCnt = 0;
+ int i;
+
+ pcbp->maxRing = (psli->num_rings - 1);
+
+ for (i = 0; i < psli->num_rings; i++) {
+ pring = &psli->ring[i];
+
+ pring->sli.sli3.sizeCiocb =
+ phba->sli_rev == 3 ? SLI3_IOCB_CMD_SIZE :
+ SLI2_IOCB_CMD_SIZE;
+ pring->sli.sli3.sizeRiocb =
+ phba->sli_rev == 3 ? SLI3_IOCB_RSP_SIZE :
+ SLI2_IOCB_RSP_SIZE;
+ /* A ring MUST have both cmd and rsp entries defined to be
+ valid */
+ if ((pring->sli.sli3.numCiocb == 0) ||
+ (pring->sli.sli3.numRiocb == 0)) {
+ pcbp->rdsc[i].cmdEntries = 0;
+ pcbp->rdsc[i].rspEntries = 0;
+ pcbp->rdsc[i].cmdAddrHigh = 0;
+ pcbp->rdsc[i].rspAddrHigh = 0;
+ pcbp->rdsc[i].cmdAddrLow = 0;
+ pcbp->rdsc[i].rspAddrLow = 0;
+ pring->sli.sli3.cmdringaddr = NULL;
+ pring->sli.sli3.rspringaddr = NULL;
+ continue;
+ }
+ /* Command ring setup for ring */
+ pring->sli.sli3.cmdringaddr = (void *)&phba->IOCBs[iocbCnt];
+ pcbp->rdsc[i].cmdEntries = pring->sli.sli3.numCiocb;
+
+ offset = (uint8_t *) &phba->IOCBs[iocbCnt] -
+ (uint8_t *) phba->slim2p.virt;
+ pdma_addr = phba->slim2p.phys + offset;
+ pcbp->rdsc[i].cmdAddrHigh = putPaddrHigh(pdma_addr);
+ pcbp->rdsc[i].cmdAddrLow = putPaddrLow(pdma_addr);
+ iocbCnt += pring->sli.sli3.numCiocb;
+
+ /* Response ring setup for ring */
+ pring->sli.sli3.rspringaddr = (void *) &phba->IOCBs[iocbCnt];
+
+ pcbp->rdsc[i].rspEntries = pring->sli.sli3.numRiocb;
+ offset = (uint8_t *)&phba->IOCBs[iocbCnt] -
+ (uint8_t *)phba->slim2p.virt;
+ pdma_addr = phba->slim2p.phys + offset;
+ pcbp->rdsc[i].rspAddrHigh = putPaddrHigh(pdma_addr);
+ pcbp->rdsc[i].rspAddrLow = putPaddrLow(pdma_addr);
+ iocbCnt += pring->sli.sli3.numRiocb;
+ }
+}
+
+/**
+ * lpfc_read_rev - Prepare a mailbox command for reading HBA revision
+ * @phba: pointer to lpfc hba data structure.
+ * @pmb: pointer to the driver internal queue element for mailbox command.
+ *
+ * The read revision mailbox command is used to read the revision levels of
+ * the HBA components. These components include hardware units, resident
+ * firmware, and available firmware. HBAs that supports SLI-3 mode of
+ * operation provide different response information depending on the version
+ * requested by the driver.
+ *
+ * This routine prepares the mailbox command for reading HBA revision
+ * information.
+ **/
+void
+lpfc_read_rev(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmb)
+{
+ MAILBOX_t *mb = &pmb->u.mb;
+ memset(pmb, 0, sizeof (LPFC_MBOXQ_t));
+ mb->un.varRdRev.cv = 1;
+ mb->un.varRdRev.v3req = 1; /* Request SLI3 info */
+ mb->mbxCommand = MBX_READ_REV;
+ mb->mbxOwner = OWN_HOST;
+ return;
+}
+
+void
+lpfc_sli4_swap_str(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
+{
+ MAILBOX_t *mb = &pmb->u.mb;
+ struct lpfc_mqe *mqe;
+
+ switch (mb->mbxCommand) {
+ case MBX_READ_REV:
+ mqe = &pmb->u.mqe;
+ lpfc_sli_pcimem_bcopy(mqe->un.read_rev.fw_name,
+ mqe->un.read_rev.fw_name, 16);
+ lpfc_sli_pcimem_bcopy(mqe->un.read_rev.ulp_fw_name,
+ mqe->un.read_rev.ulp_fw_name, 16);
+ break;
+ default:
+ break;
+ }
+ return;
+}
+
+/**
+ * lpfc_build_hbq_profile2 - Set up the HBQ Selection Profile 2
+ * @hbqmb: pointer to the HBQ configuration data structure in mailbox command.
+ * @hbq_desc: pointer to the HBQ selection profile descriptor.
+ *
+ * The Host Buffer Queue (HBQ) Selection Profile 2 specifies that the HBA
+ * tests the incoming frames' R_CTL/TYPE fields with works 10:15 and performs
+ * the Sequence Length Test using the fields in the Selection Profile 2
+ * extension in words 20:31.
+ **/
+static void
+lpfc_build_hbq_profile2(struct config_hbq_var *hbqmb,
+ struct lpfc_hbq_init *hbq_desc)
+{
+ hbqmb->profiles.profile2.seqlenbcnt = hbq_desc->seqlenbcnt;
+ hbqmb->profiles.profile2.maxlen = hbq_desc->maxlen;
+ hbqmb->profiles.profile2.seqlenoff = hbq_desc->seqlenoff;
+}
+
+/**
+ * lpfc_build_hbq_profile3 - Set up the HBQ Selection Profile 3
+ * @hbqmb: pointer to the HBQ configuration data structure in mailbox command.
+ * @hbq_desc: pointer to the HBQ selection profile descriptor.
+ *
+ * The Host Buffer Queue (HBQ) Selection Profile 3 specifies that the HBA
+ * tests the incoming frame's R_CTL/TYPE fields with words 10:15 and performs
+ * the Sequence Length Test and Byte Field Test using the fields in the
+ * Selection Profile 3 extension in words 20:31.
+ **/
+static void
+lpfc_build_hbq_profile3(struct config_hbq_var *hbqmb,
+ struct lpfc_hbq_init *hbq_desc)
+{
+ hbqmb->profiles.profile3.seqlenbcnt = hbq_desc->seqlenbcnt;
+ hbqmb->profiles.profile3.maxlen = hbq_desc->maxlen;
+ hbqmb->profiles.profile3.cmdcodeoff = hbq_desc->cmdcodeoff;
+ hbqmb->profiles.profile3.seqlenoff = hbq_desc->seqlenoff;
+ memcpy(&hbqmb->profiles.profile3.cmdmatch, hbq_desc->cmdmatch,
+ sizeof(hbqmb->profiles.profile3.cmdmatch));
+}
+
+/**
+ * lpfc_build_hbq_profile5 - Set up the HBQ Selection Profile 5
+ * @hbqmb: pointer to the HBQ configuration data structure in mailbox command.
+ * @hbq_desc: pointer to the HBQ selection profile descriptor.
+ *
+ * The Host Buffer Queue (HBQ) Selection Profile 5 specifies a header HBQ. The
+ * HBA tests the initial frame of an incoming sequence using the frame's
+ * R_CTL/TYPE fields with words 10:15 and performs the Sequence Length Test
+ * and Byte Field Test using the fields in the Selection Profile 5 extension
+ * words 20:31.
+ **/
+static void
+lpfc_build_hbq_profile5(struct config_hbq_var *hbqmb,
+ struct lpfc_hbq_init *hbq_desc)
+{
+ hbqmb->profiles.profile5.seqlenbcnt = hbq_desc->seqlenbcnt;
+ hbqmb->profiles.profile5.maxlen = hbq_desc->maxlen;
+ hbqmb->profiles.profile5.cmdcodeoff = hbq_desc->cmdcodeoff;
+ hbqmb->profiles.profile5.seqlenoff = hbq_desc->seqlenoff;
+ memcpy(&hbqmb->profiles.profile5.cmdmatch, hbq_desc->cmdmatch,
+ sizeof(hbqmb->profiles.profile5.cmdmatch));
+}
+
+/**
+ * lpfc_config_hbq - Prepare a mailbox command for configuring an HBQ
+ * @phba: pointer to lpfc hba data structure.
+ * @id: HBQ identifier.
+ * @hbq_desc: pointer to the HBA descriptor data structure.
+ * @hbq_entry_index: index of the HBQ entry data structures.
+ * @pmb: pointer to the driver internal queue element for mailbox command.
+ *
+ * The configure HBQ (Host Buffer Queue) mailbox command is used to configure
+ * an HBQ. The configuration binds events that require buffers to a particular
+ * ring and HBQ based on a selection profile.
+ *
+ * This routine prepares the mailbox command for configuring an HBQ.
+ **/
+void
+lpfc_config_hbq(struct lpfc_hba *phba, uint32_t id,
+ struct lpfc_hbq_init *hbq_desc,
+ uint32_t hbq_entry_index, LPFC_MBOXQ_t *pmb)
+{
+ int i;
+ MAILBOX_t *mb = &pmb->u.mb;
+ struct config_hbq_var *hbqmb = &mb->un.varCfgHbq;
+
+ memset(pmb, 0, sizeof (LPFC_MBOXQ_t));
+ hbqmb->hbqId = id;
+ hbqmb->entry_count = hbq_desc->entry_count; /* # entries in HBQ */
+ hbqmb->recvNotify = hbq_desc->rn; /* Receive
+ * Notification */
+ hbqmb->numMask = hbq_desc->mask_count; /* # R_CTL/TYPE masks
+ * # in words 0-19 */
+ hbqmb->profile = hbq_desc->profile; /* Selection profile:
+ * 0 = all,
+ * 7 = logentry */
+ hbqmb->ringMask = hbq_desc->ring_mask; /* Binds HBQ to a ring
+ * e.g. Ring0=b0001,
+ * ring2=b0100 */
+ hbqmb->headerLen = hbq_desc->headerLen; /* 0 if not profile 4
+ * or 5 */
+ hbqmb->logEntry = hbq_desc->logEntry; /* Set to 1 if this
+ * HBQ will be used
+ * for LogEntry
+ * buffers */
+ hbqmb->hbqaddrLow = putPaddrLow(phba->hbqslimp.phys) +
+ hbq_entry_index * sizeof(struct lpfc_hbq_entry);
+ hbqmb->hbqaddrHigh = putPaddrHigh(phba->hbqslimp.phys);
+
+ mb->mbxCommand = MBX_CONFIG_HBQ;
+ mb->mbxOwner = OWN_HOST;
+
+ /* Copy info for profiles 2,3,5. Other
+ * profiles this area is reserved
+ */
+ if (hbq_desc->profile == 2)
+ lpfc_build_hbq_profile2(hbqmb, hbq_desc);
+ else if (hbq_desc->profile == 3)
+ lpfc_build_hbq_profile3(hbqmb, hbq_desc);
+ else if (hbq_desc->profile == 5)
+ lpfc_build_hbq_profile5(hbqmb, hbq_desc);
+
+ /* Return if no rctl / type masks for this HBQ */
+ if (!hbq_desc->mask_count)
+ return;
+
+ /* Otherwise we setup specific rctl / type masks for this HBQ */
+ for (i = 0; i < hbq_desc->mask_count; i++) {
+ hbqmb->hbqMasks[i].tmatch = hbq_desc->hbqMasks[i].tmatch;
+ hbqmb->hbqMasks[i].tmask = hbq_desc->hbqMasks[i].tmask;
+ hbqmb->hbqMasks[i].rctlmatch = hbq_desc->hbqMasks[i].rctlmatch;
+ hbqmb->hbqMasks[i].rctlmask = hbq_desc->hbqMasks[i].rctlmask;
+ }
+
+ return;
+}
+
+/**
+ * lpfc_config_ring - Prepare a mailbox command for configuring an IOCB ring
+ * @phba: pointer to lpfc hba data structure.
+ * @ring:
+ * @pmb: pointer to the driver internal queue element for mailbox command.
+ *
+ * The configure ring mailbox command is used to configure an IOCB ring. This
+ * configuration binds from one to six of HBA RC_CTL/TYPE mask entries to the
+ * ring. This is used to map incoming sequences to a particular ring whose
+ * RC_CTL/TYPE mask entry matches that of the sequence. The driver should not
+ * attempt to configure a ring whose number is greater than the number
+ * specified in the Port Control Block (PCB). It is an error to issue the
+ * configure ring command more than once with the same ring number. The HBA
+ * returns an error if the driver attempts this.
+ *
+ * This routine prepares the mailbox command for configuring IOCB ring.
+ **/
+void
+lpfc_config_ring(struct lpfc_hba * phba, int ring, LPFC_MBOXQ_t * pmb)
+{
+ int i;
+ MAILBOX_t *mb = &pmb->u.mb;
+ struct lpfc_sli *psli;
+ struct lpfc_sli_ring *pring;
+
+ memset(pmb, 0, sizeof (LPFC_MBOXQ_t));
+
+ mb->un.varCfgRing.ring = ring;
+ mb->un.varCfgRing.maxOrigXchg = 0;
+ mb->un.varCfgRing.maxRespXchg = 0;
+ mb->un.varCfgRing.recvNotify = 1;
+
+ psli = &phba->sli;
+ pring = &psli->ring[ring];
+ mb->un.varCfgRing.numMask = pring->num_mask;
+ mb->mbxCommand = MBX_CONFIG_RING;
+ mb->mbxOwner = OWN_HOST;
+
+ /* Is this ring configured for a specific profile */
+ if (pring->prt[0].profile) {
+ mb->un.varCfgRing.profile = pring->prt[0].profile;
+ return;
+ }
+
+ /* Otherwise we setup specific rctl / type masks for this ring */
+ for (i = 0; i < pring->num_mask; i++) {
+ mb->un.varCfgRing.rrRegs[i].rval = pring->prt[i].rctl;
+ if (mb->un.varCfgRing.rrRegs[i].rval != FC_RCTL_ELS_REQ)
+ mb->un.varCfgRing.rrRegs[i].rmask = 0xff;
+ else
+ mb->un.varCfgRing.rrRegs[i].rmask = 0xfe;
+ mb->un.varCfgRing.rrRegs[i].tval = pring->prt[i].type;
+ mb->un.varCfgRing.rrRegs[i].tmask = 0xff;
+ }
+
+ return;
+}
+
+/**
+ * lpfc_config_port - Prepare a mailbox command for configuring port
+ * @phba: pointer to lpfc hba data structure.
+ * @pmb: pointer to the driver internal queue element for mailbox command.
+ *
+ * The configure port mailbox command is used to identify the Port Control
+ * Block (PCB) in the driver memory. After this command is issued, the
+ * driver must not access the mailbox in the HBA without first resetting
+ * the HBA. The HBA may copy the PCB information to internal storage for
+ * subsequent use; the driver can not change the PCB information unless it
+ * resets the HBA.
+ *
+ * This routine prepares the mailbox command for configuring port.
+ **/
+void
+lpfc_config_port(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
+{
+ MAILBOX_t __iomem *mb_slim = (MAILBOX_t __iomem *) phba->MBslimaddr;
+ MAILBOX_t *mb = &pmb->u.mb;
+ dma_addr_t pdma_addr;
+ uint32_t bar_low, bar_high;
+ size_t offset;
+ struct lpfc_hgp hgp;
+ int i;
+ uint32_t pgp_offset;
+
+ memset(pmb, 0, sizeof(LPFC_MBOXQ_t));
+ mb->mbxCommand = MBX_CONFIG_PORT;
+ mb->mbxOwner = OWN_HOST;
+
+ mb->un.varCfgPort.pcbLen = sizeof(PCB_t);
+
+ offset = (uint8_t *)phba->pcb - (uint8_t *)phba->slim2p.virt;
+ pdma_addr = phba->slim2p.phys + offset;
+ mb->un.varCfgPort.pcbLow = putPaddrLow(pdma_addr);
+ mb->un.varCfgPort.pcbHigh = putPaddrHigh(pdma_addr);
+
+ /* Always Host Group Pointer is in SLIM */
+ mb->un.varCfgPort.hps = 1;
+
+ /* If HBA supports SLI=3 ask for it */
+
+ if (phba->sli_rev == LPFC_SLI_REV3 && phba->vpd.sli3Feat.cerbm) {
+ if (phba->cfg_enable_bg)
+ mb->un.varCfgPort.cbg = 1; /* configure BlockGuard */
+ if (phba->cfg_enable_dss)
+ mb->un.varCfgPort.cdss = 1; /* Configure Security */
+ mb->un.varCfgPort.cerbm = 1; /* Request HBQs */
+ mb->un.varCfgPort.ccrp = 1; /* Command Ring Polling */
+ mb->un.varCfgPort.max_hbq = lpfc_sli_hbq_count();
+ if (phba->max_vpi && phba->cfg_enable_npiv &&
+ phba->vpd.sli3Feat.cmv) {
+ mb->un.varCfgPort.max_vpi = LPFC_MAX_VPI;
+ mb->un.varCfgPort.cmv = 1;
+ } else
+ mb->un.varCfgPort.max_vpi = phba->max_vpi = 0;
+ } else
+ phba->sli_rev = LPFC_SLI_REV2;
+ mb->un.varCfgPort.sli_mode = phba->sli_rev;
+
+ /* If this is an SLI3 port, configure async status notification. */
+ if (phba->sli_rev == LPFC_SLI_REV3)
+ mb->un.varCfgPort.casabt = 1;
+
+ /* Now setup pcb */
+ phba->pcb->type = TYPE_NATIVE_SLI2;
+ phba->pcb->feature = FEATURE_INITIAL_SLI2;
+
+ /* Setup Mailbox pointers */
+ phba->pcb->mailBoxSize = sizeof(MAILBOX_t) + MAILBOX_EXT_SIZE;
+ offset = (uint8_t *)phba->mbox - (uint8_t *)phba->slim2p.virt;
+ pdma_addr = phba->slim2p.phys + offset;
+ phba->pcb->mbAddrHigh = putPaddrHigh(pdma_addr);
+ phba->pcb->mbAddrLow = putPaddrLow(pdma_addr);
+
+ /*
+ * Setup Host Group ring pointer.
+ *
+ * For efficiency reasons, the ring get/put pointers can be
+ * placed in adapter memory (SLIM) rather than in host memory.
+ * This allows firmware to avoid PCI reads/writes when updating
+ * and checking pointers.
+ *
+ * The firmware recognizes the use of SLIM memory by comparing
+ * the address of the get/put pointers structure with that of
+ * the SLIM BAR (BAR0).
+ *
+ * Caution: be sure to use the PCI config space value of BAR0/BAR1
+ * (the hardware's view of the base address), not the OS's
+ * value of pci_resource_start() as the OS value may be a cookie
+ * for ioremap/iomap.
+ */
+
+
+ pci_read_config_dword(phba->pcidev, PCI_BASE_ADDRESS_0, &bar_low);
+ pci_read_config_dword(phba->pcidev, PCI_BASE_ADDRESS_1, &bar_high);
+
+ /*
+ * Set up HGP - Port Memory
+ *
+ * The port expects the host get/put pointers to reside in memory
+ * following the "non-diagnostic" mode mailbox (32 words, 0x80 bytes)
+ * area of SLIM. In SLI-2 mode, there's an additional 16 reserved
+ * words (0x40 bytes). This area is not reserved if HBQs are
+ * configured in SLI-3.
+ *
+ * CR0Put - SLI2(no HBQs) = 0xc0, With HBQs = 0x80
+ * RR0Get 0xc4 0x84
+ * CR1Put 0xc8 0x88
+ * RR1Get 0xcc 0x8c
+ * CR2Put 0xd0 0x90
+ * RR2Get 0xd4 0x94
+ * CR3Put 0xd8 0x98
+ * RR3Get 0xdc 0x9c
+ *
+ * Reserved 0xa0-0xbf
+ * If HBQs configured:
+ * HBQ 0 Put ptr 0xc0
+ * HBQ 1 Put ptr 0xc4
+ * HBQ 2 Put ptr 0xc8
+ * ......
+ * HBQ(M-1)Put Pointer 0xc0+(M-1)*4
+ *
+ */
+
+ if (phba->cfg_hostmem_hgp && phba->sli_rev != 3) {
+ phba->host_gp = &phba->mbox->us.s2.host[0];
+ phba->hbq_put = NULL;
+ offset = (uint8_t *)&phba->mbox->us.s2.host -
+ (uint8_t *)phba->slim2p.virt;
+ pdma_addr = phba->slim2p.phys + offset;
+ phba->pcb->hgpAddrHigh = putPaddrHigh(pdma_addr);
+ phba->pcb->hgpAddrLow = putPaddrLow(pdma_addr);
+ } else {
+ /* Always Host Group Pointer is in SLIM */
+ mb->un.varCfgPort.hps = 1;
+
+ if (phba->sli_rev == 3) {
+ phba->host_gp = &mb_slim->us.s3.host[0];
+ phba->hbq_put = &mb_slim->us.s3.hbq_put[0];
+ } else {
+ phba->host_gp = &mb_slim->us.s2.host[0];
+ phba->hbq_put = NULL;
+ }
+
+ /* mask off BAR0's flag bits 0 - 3 */
+ phba->pcb->hgpAddrLow = (bar_low & PCI_BASE_ADDRESS_MEM_MASK) +
+ (void __iomem *)phba->host_gp -
+ (void __iomem *)phba->MBslimaddr;
+ if (bar_low & PCI_BASE_ADDRESS_MEM_TYPE_64)
+ phba->pcb->hgpAddrHigh = bar_high;
+ else
+ phba->pcb->hgpAddrHigh = 0;
+ /* write HGP data to SLIM at the required longword offset */
+ memset(&hgp, 0, sizeof(struct lpfc_hgp));
+
+ for (i = 0; i < phba->sli.num_rings; i++) {
+ lpfc_memcpy_to_slim(phba->host_gp + i, &hgp,
+ sizeof(*phba->host_gp));
+ }
+ }
+
+ /* Setup Port Group offset */
+ if (phba->sli_rev == 3)
+ pgp_offset = offsetof(struct lpfc_sli2_slim,
+ mbx.us.s3_pgp.port);
+ else
+ pgp_offset = offsetof(struct lpfc_sli2_slim, mbx.us.s2.port);
+ pdma_addr = phba->slim2p.phys + pgp_offset;
+ phba->pcb->pgpAddrHigh = putPaddrHigh(pdma_addr);
+ phba->pcb->pgpAddrLow = putPaddrLow(pdma_addr);
+
+ /* Use callback routine to setp rings in the pcb */
+ lpfc_config_pcb_setup(phba);
+
+ /* special handling for LC HBAs */
+ if (lpfc_is_LC_HBA(phba->pcidev->device)) {
+ uint32_t hbainit[5];
+
+ lpfc_hba_init(phba, hbainit);
+
+ memcpy(&mb->un.varCfgPort.hbainit, hbainit, 20);
+ }
+
+ /* Swap PCB if needed */
+ lpfc_sli_pcimem_bcopy(phba->pcb, phba->pcb, sizeof(PCB_t));
+}
+
+/**
+ * lpfc_kill_board - Prepare a mailbox command for killing board
+ * @phba: pointer to lpfc hba data structure.
+ * @pmb: pointer to the driver internal queue element for mailbox command.
+ *
+ * The kill board mailbox command is used to tell firmware to perform a
+ * graceful shutdown of a channel on a specified board to prepare for reset.
+ * When the kill board mailbox command is received, the ER3 bit is set to 1
+ * in the Host Status register and the ER Attention bit is set to 1 in the
+ * Host Attention register of the HBA function that received the kill board
+ * command.
+ *
+ * This routine prepares the mailbox command for killing the board in
+ * preparation for a graceful shutdown.
+ **/
+void
+lpfc_kill_board(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmb)
+{
+ MAILBOX_t *mb = &pmb->u.mb;
+
+ memset(pmb, 0, sizeof(LPFC_MBOXQ_t));
+ mb->mbxCommand = MBX_KILL_BOARD;
+ mb->mbxOwner = OWN_HOST;
+ return;
+}
+
+/**
+ * lpfc_mbox_put - Put a mailbox cmd into the tail of driver's mailbox queue
+ * @phba: pointer to lpfc hba data structure.
+ * @mbq: pointer to the driver internal queue element for mailbox command.
+ *
+ * Driver maintains a internal mailbox command queue implemented as a linked
+ * list. When a mailbox command is issued, it shall be put into the mailbox
+ * command queue such that they shall be processed orderly as HBA can process
+ * one mailbox command at a time.
+ **/
+void
+lpfc_mbox_put(struct lpfc_hba * phba, LPFC_MBOXQ_t * mbq)
+{
+ struct lpfc_sli *psli;
+
+ psli = &phba->sli;
+
+ list_add_tail(&mbq->list, &psli->mboxq);
+
+ psli->mboxq_cnt++;
+
+ return;
+}
+
+/**
+ * lpfc_mbox_get - Remove a mailbox cmd from the head of driver's mailbox queue
+ * @phba: pointer to lpfc hba data structure.
+ *
+ * Driver maintains a internal mailbox command queue implemented as a linked
+ * list. When a mailbox command is issued, it shall be put into the mailbox
+ * command queue such that they shall be processed orderly as HBA can process
+ * one mailbox command at a time. After HBA finished processing a mailbox
+ * command, the driver will remove a pending mailbox command from the head of
+ * the mailbox command queue and send to the HBA for processing.
+ *
+ * Return codes
+ * pointer to the driver internal queue element for mailbox command.
+ **/
+LPFC_MBOXQ_t *
+lpfc_mbox_get(struct lpfc_hba * phba)
+{
+ LPFC_MBOXQ_t *mbq = NULL;
+ struct lpfc_sli *psli = &phba->sli;
+
+ list_remove_head((&psli->mboxq), mbq, LPFC_MBOXQ_t, list);
+ if (mbq)
+ psli->mboxq_cnt--;
+
+ return mbq;
+}
+
+/**
+ * __lpfc_mbox_cmpl_put - Put mailbox cmd into mailbox cmd complete list
+ * @phba: pointer to lpfc hba data structure.
+ * @mbq: pointer to the driver internal queue element for mailbox command.
+ *
+ * This routine put the completed mailbox command into the mailbox command
+ * complete list. This is the unlocked version of the routine. The mailbox
+ * complete list is used by the driver worker thread to process mailbox
+ * complete callback functions outside the driver interrupt handler.
+ **/
+void
+__lpfc_mbox_cmpl_put(struct lpfc_hba *phba, LPFC_MBOXQ_t *mbq)
+{
+ list_add_tail(&mbq->list, &phba->sli.mboxq_cmpl);
+}
+
+/**
+ * lpfc_mbox_cmpl_put - Put mailbox command into mailbox command complete list
+ * @phba: pointer to lpfc hba data structure.
+ * @mbq: pointer to the driver internal queue element for mailbox command.
+ *
+ * This routine put the completed mailbox command into the mailbox command
+ * complete list. This is the locked version of the routine. The mailbox
+ * complete list is used by the driver worker thread to process mailbox
+ * complete callback functions outside the driver interrupt handler.
+ **/
+void
+lpfc_mbox_cmpl_put(struct lpfc_hba *phba, LPFC_MBOXQ_t *mbq)
+{
+ unsigned long iflag;
+
+ /* This function expects to be called from interrupt context */
+ spin_lock_irqsave(&phba->hbalock, iflag);
+ __lpfc_mbox_cmpl_put(phba, mbq);
+ spin_unlock_irqrestore(&phba->hbalock, iflag);
+ return;
+}
+
+/**
+ * lpfc_mbox_cmd_check - Check the validality of a mailbox command
+ * @phba: pointer to lpfc hba data structure.
+ * @mboxq: pointer to the driver internal queue element for mailbox command.
+ *
+ * This routine is to check whether a mailbox command is valid to be issued.
+ * This check will be performed by both the mailbox issue API when a client
+ * is to issue a mailbox command to the mailbox transport.
+ *
+ * Return 0 - pass the check, -ENODEV - fail the check
+ **/
+int
+lpfc_mbox_cmd_check(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
+{
+ /* Mailbox command that have a completion handler must also have a
+ * vport specified.
+ */
+ if (mboxq->mbox_cmpl && mboxq->mbox_cmpl != lpfc_sli_def_mbox_cmpl &&
+ mboxq->mbox_cmpl != lpfc_sli_wake_mbox_wait) {
+ if (!mboxq->vport) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_VPORT,
+ "1814 Mbox x%x failed, no vport\n",
+ mboxq->u.mb.mbxCommand);
+ dump_stack();
+ return -ENODEV;
+ }
+ }
+ return 0;
+}
+
+/**
+ * lpfc_mbox_dev_check - Check the device state for issuing a mailbox command
+ * @phba: pointer to lpfc hba data structure.
+ *
+ * This routine is to check whether the HBA device is ready for posting a
+ * mailbox command. It is used by the mailbox transport API at the time the
+ * to post a mailbox command to the device.
+ *
+ * Return 0 - pass the check, -ENODEV - fail the check
+ **/
+int
+lpfc_mbox_dev_check(struct lpfc_hba *phba)
+{
+ /* If the PCI channel is in offline state, do not issue mbox */
+ if (unlikely(pci_channel_offline(phba->pcidev)))
+ return -ENODEV;
+
+ /* If the HBA is in error state, do not issue mbox */
+ if (phba->link_state == LPFC_HBA_ERROR)
+ return -ENODEV;
+
+ return 0;
+}
+
+/**
+ * lpfc_mbox_tmo_val - Retrieve mailbox command timeout value
+ * @phba: pointer to lpfc hba data structure.
+ * @cmd: mailbox command code.
+ *
+ * This routine retrieves the proper timeout value according to the mailbox
+ * command code.
+ *
+ * Return codes
+ * Timeout value to be used for the given mailbox command
+ **/
+int
+lpfc_mbox_tmo_val(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
+{
+ MAILBOX_t *mbox = &mboxq->u.mb;
+ uint8_t subsys, opcode;
+
+ switch (mbox->mbxCommand) {
+ case MBX_WRITE_NV: /* 0x03 */
+ case MBX_DUMP_MEMORY: /* 0x17 */
+ case MBX_UPDATE_CFG: /* 0x1B */
+ case MBX_DOWN_LOAD: /* 0x1C */
+ case MBX_DEL_LD_ENTRY: /* 0x1D */
+ case MBX_WRITE_VPARMS: /* 0x32 */
+ case MBX_LOAD_AREA: /* 0x81 */
+ case MBX_WRITE_WWN: /* 0x98 */
+ case MBX_LOAD_EXP_ROM: /* 0x9C */
+ case MBX_ACCESS_VDATA: /* 0xA5 */
+ return LPFC_MBOX_TMO_FLASH_CMD;
+ case MBX_SLI4_CONFIG: /* 0x9b */
+ subsys = lpfc_sli_config_mbox_subsys_get(phba, mboxq);
+ opcode = lpfc_sli_config_mbox_opcode_get(phba, mboxq);
+ if (subsys == LPFC_MBOX_SUBSYSTEM_COMMON) {
+ switch (opcode) {
+ case LPFC_MBOX_OPCODE_READ_OBJECT:
+ case LPFC_MBOX_OPCODE_WRITE_OBJECT:
+ case LPFC_MBOX_OPCODE_READ_OBJECT_LIST:
+ case LPFC_MBOX_OPCODE_DELETE_OBJECT:
+ case LPFC_MBOX_OPCODE_GET_PROFILE_LIST:
+ case LPFC_MBOX_OPCODE_SET_ACT_PROFILE:
+ case LPFC_MBOX_OPCODE_GET_PROFILE_CONFIG:
+ case LPFC_MBOX_OPCODE_SET_PROFILE_CONFIG:
+ case LPFC_MBOX_OPCODE_GET_FACTORY_PROFILE_CONFIG:
+ case LPFC_MBOX_OPCODE_GET_PROFILE_CAPACITIES:
+ case LPFC_MBOX_OPCODE_SEND_ACTIVATION:
+ case LPFC_MBOX_OPCODE_RESET_LICENSES:
+ case LPFC_MBOX_OPCODE_SET_BOOT_CONFIG:
+ case LPFC_MBOX_OPCODE_GET_VPD_DATA:
+ case LPFC_MBOX_OPCODE_SET_PHYSICAL_LINK_CONFIG:
+ return LPFC_MBOX_SLI4_CONFIG_EXTENDED_TMO;
+ }
+ }
+ if (subsys == LPFC_MBOX_SUBSYSTEM_FCOE) {
+ switch (opcode) {
+ case LPFC_MBOX_OPCODE_FCOE_SET_FCLINK_SETTINGS:
+ return LPFC_MBOX_SLI4_CONFIG_EXTENDED_TMO;
+ }
+ }
+ return LPFC_MBOX_SLI4_CONFIG_TMO;
+ }
+ return LPFC_MBOX_TMO;
+}
+
+/**
+ * lpfc_sli4_mbx_sge_set - Set a sge entry in non-embedded mailbox command
+ * @mbox: pointer to lpfc mbox command.
+ * @sgentry: sge entry index.
+ * @phyaddr: physical address for the sge
+ * @length: Length of the sge.
+ *
+ * This routine sets up an entry in the non-embedded mailbox command at the sge
+ * index location.
+ **/
+void
+lpfc_sli4_mbx_sge_set(struct lpfcMboxq *mbox, uint32_t sgentry,
+ dma_addr_t phyaddr, uint32_t length)
+{
+ struct lpfc_mbx_nembed_cmd *nembed_sge;
+
+ nembed_sge = (struct lpfc_mbx_nembed_cmd *)
+ &mbox->u.mqe.un.nembed_cmd;
+ nembed_sge->sge[sgentry].pa_lo = putPaddrLow(phyaddr);
+ nembed_sge->sge[sgentry].pa_hi = putPaddrHigh(phyaddr);
+ nembed_sge->sge[sgentry].length = length;
+}
+
+/**
+ * lpfc_sli4_mbx_sge_get - Get a sge entry from non-embedded mailbox command
+ * @mbox: pointer to lpfc mbox command.
+ * @sgentry: sge entry index.
+ *
+ * This routine gets an entry from the non-embedded mailbox command at the sge
+ * index location.
+ **/
+void
+lpfc_sli4_mbx_sge_get(struct lpfcMboxq *mbox, uint32_t sgentry,
+ struct lpfc_mbx_sge *sge)
+{
+ struct lpfc_mbx_nembed_cmd *nembed_sge;
+
+ nembed_sge = (struct lpfc_mbx_nembed_cmd *)
+ &mbox->u.mqe.un.nembed_cmd;
+ sge->pa_lo = nembed_sge->sge[sgentry].pa_lo;
+ sge->pa_hi = nembed_sge->sge[sgentry].pa_hi;
+ sge->length = nembed_sge->sge[sgentry].length;
+}
+
+/**
+ * lpfc_sli4_mbox_cmd_free - Free a sli4 mailbox command
+ * @phba: pointer to lpfc hba data structure.
+ * @mbox: pointer to lpfc mbox command.
+ *
+ * This routine frees SLI4 specific mailbox command for sending IOCTL command.
+ **/
+void
+lpfc_sli4_mbox_cmd_free(struct lpfc_hba *phba, struct lpfcMboxq *mbox)
+{
+ struct lpfc_mbx_sli4_config *sli4_cfg;
+ struct lpfc_mbx_sge sge;
+ dma_addr_t phyaddr;
+ uint32_t sgecount, sgentry;
+
+ sli4_cfg = &mbox->u.mqe.un.sli4_config;
+
+ /* For embedded mbox command, just free the mbox command */
+ if (bf_get(lpfc_mbox_hdr_emb, &sli4_cfg->header.cfg_mhdr)) {
+ mempool_free(mbox, phba->mbox_mem_pool);
+ return;
+ }
+
+ /* For non-embedded mbox command, we need to free the pages first */
+ sgecount = bf_get(lpfc_mbox_hdr_sge_cnt, &sli4_cfg->header.cfg_mhdr);
+ /* There is nothing we can do if there is no sge address array */
+ if (unlikely(!mbox->sge_array)) {
+ mempool_free(mbox, phba->mbox_mem_pool);
+ return;
+ }
+ /* Each non-embedded DMA memory was allocated in the length of a page */
+ for (sgentry = 0; sgentry < sgecount; sgentry++) {
+ lpfc_sli4_mbx_sge_get(mbox, sgentry, &sge);
+ phyaddr = getPaddr(sge.pa_hi, sge.pa_lo);
+ dma_free_coherent(&phba->pcidev->dev, SLI4_PAGE_SIZE,
+ mbox->sge_array->addr[sgentry], phyaddr);
+ }
+ /* Free the sge address array memory */
+ kfree(mbox->sge_array);
+ /* Finally, free the mailbox command itself */
+ mempool_free(mbox, phba->mbox_mem_pool);
+}
+
+/**
+ * lpfc_sli4_config - Initialize the SLI4 Config Mailbox command
+ * @phba: pointer to lpfc hba data structure.
+ * @mbox: pointer to lpfc mbox command.
+ * @subsystem: The sli4 config sub mailbox subsystem.
+ * @opcode: The sli4 config sub mailbox command opcode.
+ * @length: Length of the sli4 config mailbox command (including sub-header).
+ *
+ * This routine sets up the header fields of SLI4 specific mailbox command
+ * for sending IOCTL command.
+ *
+ * Return: the actual length of the mbox command allocated (mostly useful
+ * for none embedded mailbox command).
+ **/
+int
+lpfc_sli4_config(struct lpfc_hba *phba, struct lpfcMboxq *mbox,
+ uint8_t subsystem, uint8_t opcode, uint32_t length, bool emb)
+{
+ struct lpfc_mbx_sli4_config *sli4_config;
+ union lpfc_sli4_cfg_shdr *cfg_shdr = NULL;
+ uint32_t alloc_len;
+ uint32_t resid_len;
+ uint32_t pagen, pcount;
+ void *viraddr;
+ dma_addr_t phyaddr;
+
+ /* Set up SLI4 mailbox command header fields */
+ memset(mbox, 0, sizeof(*mbox));
+ bf_set(lpfc_mqe_command, &mbox->u.mqe, MBX_SLI4_CONFIG);
+
+ /* Set up SLI4 ioctl command header fields */
+ sli4_config = &mbox->u.mqe.un.sli4_config;
+
+ /* Setup for the embedded mbox command */
+ if (emb) {
+ /* Set up main header fields */
+ bf_set(lpfc_mbox_hdr_emb, &sli4_config->header.cfg_mhdr, 1);
+ sli4_config->header.cfg_mhdr.payload_length = length;
+ /* Set up sub-header fields following main header */
+ bf_set(lpfc_mbox_hdr_opcode,
+ &sli4_config->header.cfg_shdr.request, opcode);
+ bf_set(lpfc_mbox_hdr_subsystem,
+ &sli4_config->header.cfg_shdr.request, subsystem);
+ sli4_config->header.cfg_shdr.request.request_length =
+ length - LPFC_MBX_CMD_HDR_LENGTH;
+ return length;
+ }
+
+ /* Setup for the non-embedded mbox command */
+ pcount = (SLI4_PAGE_ALIGN(length))/SLI4_PAGE_SIZE;
+ pcount = (pcount > LPFC_SLI4_MBX_SGE_MAX_PAGES) ?
+ LPFC_SLI4_MBX_SGE_MAX_PAGES : pcount;
+ /* Allocate record for keeping SGE virtual addresses */
+ mbox->sge_array = kzalloc(sizeof(struct lpfc_mbx_nembed_sge_virt),
+ GFP_KERNEL);
+ if (!mbox->sge_array) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_MBOX,
+ "2527 Failed to allocate non-embedded SGE "
+ "array.\n");
+ return 0;
+ }
+ for (pagen = 0, alloc_len = 0; pagen < pcount; pagen++) {
+ /* The DMA memory is always allocated in the length of a
+ * page even though the last SGE might not fill up to a
+ * page, this is used as a priori size of SLI4_PAGE_SIZE for
+ * the later DMA memory free.
+ */
+ viraddr = dma_zalloc_coherent(&phba->pcidev->dev,
+ SLI4_PAGE_SIZE, &phyaddr,
+ GFP_KERNEL);
+ /* In case of malloc fails, proceed with whatever we have */
+ if (!viraddr)
+ break;
+ mbox->sge_array->addr[pagen] = viraddr;
+ /* Keep the first page for later sub-header construction */
+ if (pagen == 0)
+ cfg_shdr = (union lpfc_sli4_cfg_shdr *)viraddr;
+ resid_len = length - alloc_len;
+ if (resid_len > SLI4_PAGE_SIZE) {
+ lpfc_sli4_mbx_sge_set(mbox, pagen, phyaddr,
+ SLI4_PAGE_SIZE);
+ alloc_len += SLI4_PAGE_SIZE;
+ } else {
+ lpfc_sli4_mbx_sge_set(mbox, pagen, phyaddr,
+ resid_len);
+ alloc_len = length;
+ }
+ }
+
+ /* Set up main header fields in mailbox command */
+ sli4_config->header.cfg_mhdr.payload_length = alloc_len;
+ bf_set(lpfc_mbox_hdr_sge_cnt, &sli4_config->header.cfg_mhdr, pagen);
+
+ /* Set up sub-header fields into the first page */
+ if (pagen > 0) {
+ bf_set(lpfc_mbox_hdr_opcode, &cfg_shdr->request, opcode);
+ bf_set(lpfc_mbox_hdr_subsystem, &cfg_shdr->request, subsystem);
+ cfg_shdr->request.request_length =
+ alloc_len - sizeof(union lpfc_sli4_cfg_shdr);
+ }
+ /* The sub-header is in DMA memory, which needs endian converstion */
+ if (cfg_shdr)
+ lpfc_sli_pcimem_bcopy(cfg_shdr, cfg_shdr,
+ sizeof(union lpfc_sli4_cfg_shdr));
+ return alloc_len;
+}
+
+/**
+ * lpfc_sli4_mbox_rsrc_extent - Initialize the opcode resource extent.
+ * @phba: pointer to lpfc hba data structure.
+ * @mbox: pointer to an allocated lpfc mbox resource.
+ * @exts_count: the number of extents, if required, to allocate.
+ * @rsrc_type: the resource extent type.
+ * @emb: true if LPFC_SLI4_MBX_EMBED. false if LPFC_SLI4_MBX_NEMBED.
+ *
+ * This routine completes the subcommand header for SLI4 resource extent
+ * mailbox commands. It is called after lpfc_sli4_config. The caller must
+ * pass an allocated mailbox and the attributes required to initialize the
+ * mailbox correctly.
+ *
+ * Return: the actual length of the mbox command allocated.
+ **/
+int
+lpfc_sli4_mbox_rsrc_extent(struct lpfc_hba *phba, struct lpfcMboxq *mbox,
+ uint16_t exts_count, uint16_t rsrc_type, bool emb)
+{
+ uint8_t opcode = 0;
+ struct lpfc_mbx_nembed_rsrc_extent *n_rsrc_extnt = NULL;
+ void *virtaddr = NULL;
+
+ /* Set up SLI4 ioctl command header fields */
+ if (emb == LPFC_SLI4_MBX_NEMBED) {
+ /* Get the first SGE entry from the non-embedded DMA memory */
+ virtaddr = mbox->sge_array->addr[0];
+ if (virtaddr == NULL)
+ return 1;
+ n_rsrc_extnt = (struct lpfc_mbx_nembed_rsrc_extent *) virtaddr;
+ }
+
+ /*
+ * The resource type is common to all extent Opcodes and resides in the
+ * same position.
+ */
+ if (emb == LPFC_SLI4_MBX_EMBED)
+ bf_set(lpfc_mbx_alloc_rsrc_extents_type,
+ &mbox->u.mqe.un.alloc_rsrc_extents.u.req,
+ rsrc_type);
+ else {
+ /* This is DMA data. Byteswap is required. */
+ bf_set(lpfc_mbx_alloc_rsrc_extents_type,
+ n_rsrc_extnt, rsrc_type);
+ lpfc_sli_pcimem_bcopy(&n_rsrc_extnt->word4,
+ &n_rsrc_extnt->word4,
+ sizeof(uint32_t));
+ }
+
+ /* Complete the initialization for the particular Opcode. */
+ opcode = lpfc_sli_config_mbox_opcode_get(phba, mbox);
+ switch (opcode) {
+ case LPFC_MBOX_OPCODE_ALLOC_RSRC_EXTENT:
+ if (emb == LPFC_SLI4_MBX_EMBED)
+ bf_set(lpfc_mbx_alloc_rsrc_extents_cnt,
+ &mbox->u.mqe.un.alloc_rsrc_extents.u.req,
+ exts_count);
+ else
+ bf_set(lpfc_mbx_alloc_rsrc_extents_cnt,
+ n_rsrc_extnt, exts_count);
+ break;
+ case LPFC_MBOX_OPCODE_GET_ALLOC_RSRC_EXTENT:
+ case LPFC_MBOX_OPCODE_GET_RSRC_EXTENT_INFO:
+ case LPFC_MBOX_OPCODE_DEALLOC_RSRC_EXTENT:
+ /* Initialization is complete.*/
+ break;
+ default:
+ lpfc_printf_log(phba, KERN_ERR, LOG_MBOX,
+ "2929 Resource Extent Opcode x%x is "
+ "unsupported\n", opcode);
+ return 1;
+ }
+
+ return 0;
+}
+
+/**
+ * lpfc_sli_config_mbox_subsys_get - Get subsystem from a sli_config mbox cmd
+ * @phba: pointer to lpfc hba data structure.
+ * @mbox: pointer to lpfc mbox command queue entry.
+ *
+ * This routine gets the subsystem from a SLI4 specific SLI_CONFIG mailbox
+ * command. If the mailbox command is not MBX_SLI4_CONFIG (0x9B) or if the
+ * sub-header is not present, subsystem LPFC_MBOX_SUBSYSTEM_NA (0x0) shall
+ * be returned.
+ **/
+uint8_t
+lpfc_sli_config_mbox_subsys_get(struct lpfc_hba *phba, LPFC_MBOXQ_t *mbox)
+{
+ struct lpfc_mbx_sli4_config *sli4_cfg;
+ union lpfc_sli4_cfg_shdr *cfg_shdr;
+
+ if (mbox->u.mb.mbxCommand != MBX_SLI4_CONFIG)
+ return LPFC_MBOX_SUBSYSTEM_NA;
+ sli4_cfg = &mbox->u.mqe.un.sli4_config;
+
+ /* For embedded mbox command, get opcode from embedded sub-header*/
+ if (bf_get(lpfc_mbox_hdr_emb, &sli4_cfg->header.cfg_mhdr)) {
+ cfg_shdr = &mbox->u.mqe.un.sli4_config.header.cfg_shdr;
+ return bf_get(lpfc_mbox_hdr_subsystem, &cfg_shdr->request);
+ }
+
+ /* For non-embedded mbox command, get opcode from first dma page */
+ if (unlikely(!mbox->sge_array))
+ return LPFC_MBOX_SUBSYSTEM_NA;
+ cfg_shdr = (union lpfc_sli4_cfg_shdr *)mbox->sge_array->addr[0];
+ return bf_get(lpfc_mbox_hdr_subsystem, &cfg_shdr->request);
+}
+
+/**
+ * lpfc_sli_config_mbox_opcode_get - Get opcode from a sli_config mbox cmd
+ * @phba: pointer to lpfc hba data structure.
+ * @mbox: pointer to lpfc mbox command queue entry.
+ *
+ * This routine gets the opcode from a SLI4 specific SLI_CONFIG mailbox
+ * command. If the mailbox command is not MBX_SLI4_CONFIG (0x9B) or if
+ * the sub-header is not present, opcode LPFC_MBOX_OPCODE_NA (0x0) be
+ * returned.
+ **/
+uint8_t
+lpfc_sli_config_mbox_opcode_get(struct lpfc_hba *phba, LPFC_MBOXQ_t *mbox)
+{
+ struct lpfc_mbx_sli4_config *sli4_cfg;
+ union lpfc_sli4_cfg_shdr *cfg_shdr;
+
+ if (mbox->u.mb.mbxCommand != MBX_SLI4_CONFIG)
+ return LPFC_MBOX_OPCODE_NA;
+ sli4_cfg = &mbox->u.mqe.un.sli4_config;
+
+ /* For embedded mbox command, get opcode from embedded sub-header*/
+ if (bf_get(lpfc_mbox_hdr_emb, &sli4_cfg->header.cfg_mhdr)) {
+ cfg_shdr = &mbox->u.mqe.un.sli4_config.header.cfg_shdr;
+ return bf_get(lpfc_mbox_hdr_opcode, &cfg_shdr->request);
+ }
+
+ /* For non-embedded mbox command, get opcode from first dma page */
+ if (unlikely(!mbox->sge_array))
+ return LPFC_MBOX_OPCODE_NA;
+ cfg_shdr = (union lpfc_sli4_cfg_shdr *)mbox->sge_array->addr[0];
+ return bf_get(lpfc_mbox_hdr_opcode, &cfg_shdr->request);
+}
+
+/**
+ * lpfc_sli4_mbx_read_fcf_rec - Allocate and construct read fcf mbox cmd
+ * @phba: pointer to lpfc hba data structure.
+ * @fcf_index: index to fcf table.
+ *
+ * This routine routine allocates and constructs non-embedded mailbox command
+ * for reading a FCF table entry referred by @fcf_index.
+ *
+ * Return: pointer to the mailbox command constructed if successful, otherwise
+ * NULL.
+ **/
+int
+lpfc_sli4_mbx_read_fcf_rec(struct lpfc_hba *phba,
+ struct lpfcMboxq *mboxq,
+ uint16_t fcf_index)
+{
+ void *virt_addr;
+ dma_addr_t phys_addr;
+ uint8_t *bytep;
+ struct lpfc_mbx_sge sge;
+ uint32_t alloc_len, req_len;
+ struct lpfc_mbx_read_fcf_tbl *read_fcf;
+
+ if (!mboxq)
+ return -ENOMEM;
+
+ req_len = sizeof(struct fcf_record) +
+ sizeof(union lpfc_sli4_cfg_shdr) + 2 * sizeof(uint32_t);
+
+ /* Set up READ_FCF SLI4_CONFIG mailbox-ioctl command */
+ alloc_len = lpfc_sli4_config(phba, mboxq, LPFC_MBOX_SUBSYSTEM_FCOE,
+ LPFC_MBOX_OPCODE_FCOE_READ_FCF_TABLE, req_len,
+ LPFC_SLI4_MBX_NEMBED);
+
+ if (alloc_len < req_len) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_MBOX,
+ "0291 Allocated DMA memory size (x%x) is "
+ "less than the requested DMA memory "
+ "size (x%x)\n", alloc_len, req_len);
+ return -ENOMEM;
+ }
+
+ /* Get the first SGE entry from the non-embedded DMA memory. This
+ * routine only uses a single SGE.
+ */
+ lpfc_sli4_mbx_sge_get(mboxq, 0, &sge);
+ phys_addr = getPaddr(sge.pa_hi, sge.pa_lo);
+ virt_addr = mboxq->sge_array->addr[0];
+ read_fcf = (struct lpfc_mbx_read_fcf_tbl *)virt_addr;
+
+ /* Set up command fields */
+ bf_set(lpfc_mbx_read_fcf_tbl_indx, &read_fcf->u.request, fcf_index);
+ /* Perform necessary endian conversion */
+ bytep = virt_addr + sizeof(union lpfc_sli4_cfg_shdr);
+ lpfc_sli_pcimem_bcopy(bytep, bytep, sizeof(uint32_t));
+
+ return 0;
+}
+
+/**
+ * lpfc_request_features: Configure SLI4 REQUEST_FEATURES mailbox
+ * @mboxq: pointer to lpfc mbox command.
+ *
+ * This routine sets up the mailbox for an SLI4 REQUEST_FEATURES
+ * mailbox command.
+ **/
+void
+lpfc_request_features(struct lpfc_hba *phba, struct lpfcMboxq *mboxq)
+{
+ /* Set up SLI4 mailbox command header fields */
+ memset(mboxq, 0, sizeof(LPFC_MBOXQ_t));
+ bf_set(lpfc_mqe_command, &mboxq->u.mqe, MBX_SLI4_REQ_FTRS);
+
+ /* Set up host requested features. */
+ bf_set(lpfc_mbx_rq_ftr_rq_fcpi, &mboxq->u.mqe.un.req_ftrs, 1);
+ bf_set(lpfc_mbx_rq_ftr_rq_perfh, &mboxq->u.mqe.un.req_ftrs, 1);
+
+ /* Enable DIF (block guard) only if configured to do so. */
+ if (phba->cfg_enable_bg)
+ bf_set(lpfc_mbx_rq_ftr_rq_dif, &mboxq->u.mqe.un.req_ftrs, 1);
+
+ /* Enable NPIV only if configured to do so. */
+ if (phba->max_vpi && phba->cfg_enable_npiv)
+ bf_set(lpfc_mbx_rq_ftr_rq_npiv, &mboxq->u.mqe.un.req_ftrs, 1);
+
+ return;
+}
+
+/**
+ * lpfc_init_vfi - Initialize the INIT_VFI mailbox command
+ * @mbox: pointer to lpfc mbox command to initialize.
+ * @vport: Vport associated with the VF.
+ *
+ * This routine initializes @mbox to all zeros and then fills in the mailbox
+ * fields from @vport. INIT_VFI configures virtual fabrics identified by VFI
+ * in the context of an FCF. The driver issues this command to setup a VFI
+ * before issuing a FLOGI to login to the VSAN. The driver should also issue a
+ * REG_VFI after a successful VSAN login.
+ **/
+void
+lpfc_init_vfi(struct lpfcMboxq *mbox, struct lpfc_vport *vport)
+{
+ struct lpfc_mbx_init_vfi *init_vfi;
+
+ memset(mbox, 0, sizeof(*mbox));
+ mbox->vport = vport;
+ init_vfi = &mbox->u.mqe.un.init_vfi;
+ bf_set(lpfc_mqe_command, &mbox->u.mqe, MBX_INIT_VFI);
+ bf_set(lpfc_init_vfi_vr, init_vfi, 1);
+ bf_set(lpfc_init_vfi_vt, init_vfi, 1);
+ bf_set(lpfc_init_vfi_vp, init_vfi, 1);
+ bf_set(lpfc_init_vfi_vfi, init_vfi,
+ vport->phba->sli4_hba.vfi_ids[vport->vfi]);
+ bf_set(lpfc_init_vfi_vpi, init_vfi,
+ vport->phba->vpi_ids[vport->vpi]);
+ bf_set(lpfc_init_vfi_fcfi, init_vfi,
+ vport->phba->fcf.fcfi);
+}
+
+/**
+ * lpfc_reg_vfi - Initialize the REG_VFI mailbox command
+ * @mbox: pointer to lpfc mbox command to initialize.
+ * @vport: vport associated with the VF.
+ * @phys: BDE DMA bus address used to send the service parameters to the HBA.
+ *
+ * This routine initializes @mbox to all zeros and then fills in the mailbox
+ * fields from @vport, and uses @buf as a DMAable buffer to send the vport's
+ * fc service parameters to the HBA for this VFI. REG_VFI configures virtual
+ * fabrics identified by VFI in the context of an FCF.
+ **/
+void
+lpfc_reg_vfi(struct lpfcMboxq *mbox, struct lpfc_vport *vport, dma_addr_t phys)
+{
+ struct lpfc_mbx_reg_vfi *reg_vfi;
+ struct lpfc_hba *phba = vport->phba;
+
+ memset(mbox, 0, sizeof(*mbox));
+ reg_vfi = &mbox->u.mqe.un.reg_vfi;
+ bf_set(lpfc_mqe_command, &mbox->u.mqe, MBX_REG_VFI);
+ bf_set(lpfc_reg_vfi_vp, reg_vfi, 1);
+ bf_set(lpfc_reg_vfi_vfi, reg_vfi,
+ phba->sli4_hba.vfi_ids[vport->vfi]);
+ bf_set(lpfc_reg_vfi_fcfi, reg_vfi, phba->fcf.fcfi);
+ bf_set(lpfc_reg_vfi_vpi, reg_vfi, phba->vpi_ids[vport->vpi]);
+ memcpy(reg_vfi->wwn, &vport->fc_portname, sizeof(struct lpfc_name));
+ reg_vfi->wwn[0] = cpu_to_le32(reg_vfi->wwn[0]);
+ reg_vfi->wwn[1] = cpu_to_le32(reg_vfi->wwn[1]);
+ reg_vfi->e_d_tov = phba->fc_edtov;
+ reg_vfi->r_a_tov = phba->fc_ratov;
+ reg_vfi->bde.addrHigh = putPaddrHigh(phys);
+ reg_vfi->bde.addrLow = putPaddrLow(phys);
+ reg_vfi->bde.tus.f.bdeSize = sizeof(vport->fc_sparam);
+ reg_vfi->bde.tus.f.bdeFlags = BUFF_TYPE_BDE_64;
+ bf_set(lpfc_reg_vfi_nport_id, reg_vfi, vport->fc_myDID);
+
+ /* Only FC supports upd bit */
+ if ((phba->sli4_hba.lnk_info.lnk_tp == LPFC_LNK_TYPE_FC) &&
+ (vport->fc_flag & FC_VFI_REGISTERED) &&
+ (!phba->fc_topology_changed)) {
+ bf_set(lpfc_reg_vfi_vp, reg_vfi, 0);
+ bf_set(lpfc_reg_vfi_upd, reg_vfi, 1);
+ }
+ lpfc_printf_vlog(vport, KERN_INFO, LOG_MBOX,
+ "3134 Register VFI, mydid:x%x, fcfi:%d, "
+ " vfi:%d, vpi:%d, fc_pname:%x%x fc_flag:x%x"
+ " port_state:x%x topology chg:%d\n",
+ vport->fc_myDID,
+ phba->fcf.fcfi,
+ phba->sli4_hba.vfi_ids[vport->vfi],
+ phba->vpi_ids[vport->vpi],
+ reg_vfi->wwn[0], reg_vfi->wwn[1], vport->fc_flag,
+ vport->port_state, phba->fc_topology_changed);
+}
+
+/**
+ * lpfc_init_vpi - Initialize the INIT_VPI mailbox command
+ * @phba: pointer to the hba structure to init the VPI for.
+ * @mbox: pointer to lpfc mbox command to initialize.
+ * @vpi: VPI to be initialized.
+ *
+ * The INIT_VPI mailbox command supports virtual N_Ports. The driver uses the
+ * command to activate a virtual N_Port. The HBA assigns a MAC address to use
+ * with the virtual N Port. The SLI Host issues this command before issuing a
+ * FDISC to connect to the Fabric. The SLI Host should issue a REG_VPI after a
+ * successful virtual NPort login.
+ **/
+void
+lpfc_init_vpi(struct lpfc_hba *phba, struct lpfcMboxq *mbox, uint16_t vpi)
+{
+ memset(mbox, 0, sizeof(*mbox));
+ bf_set(lpfc_mqe_command, &mbox->u.mqe, MBX_INIT_VPI);
+ bf_set(lpfc_init_vpi_vpi, &mbox->u.mqe.un.init_vpi,
+ phba->vpi_ids[vpi]);
+ bf_set(lpfc_init_vpi_vfi, &mbox->u.mqe.un.init_vpi,
+ phba->sli4_hba.vfi_ids[phba->pport->vfi]);
+}
+
+/**
+ * lpfc_unreg_vfi - Initialize the UNREG_VFI mailbox command
+ * @mbox: pointer to lpfc mbox command to initialize.
+ * @vport: vport associated with the VF.
+ *
+ * The UNREG_VFI mailbox command causes the SLI Host to put a virtual fabric
+ * (logical NPort) into the inactive state. The SLI Host must have logged out
+ * and unregistered all remote N_Ports to abort any activity on the virtual
+ * fabric. The SLI Port posts the mailbox response after marking the virtual
+ * fabric inactive.
+ **/
+void
+lpfc_unreg_vfi(struct lpfcMboxq *mbox, struct lpfc_vport *vport)
+{
+ memset(mbox, 0, sizeof(*mbox));
+ bf_set(lpfc_mqe_command, &mbox->u.mqe, MBX_UNREG_VFI);
+ bf_set(lpfc_unreg_vfi_vfi, &mbox->u.mqe.un.unreg_vfi,
+ vport->phba->sli4_hba.vfi_ids[vport->vfi]);
+}
+
+/**
+ * lpfc_sli4_dump_cfg_rg23 - Dump sli4 port config region 23
+ * @phba: pointer to the hba structure containing.
+ * @mbox: pointer to lpfc mbox command to initialize.
+ *
+ * This function create a SLI4 dump mailbox command to dump configure
+ * region 23.
+ **/
+int
+lpfc_sli4_dump_cfg_rg23(struct lpfc_hba *phba, struct lpfcMboxq *mbox)
+{
+ struct lpfc_dmabuf *mp = NULL;
+ MAILBOX_t *mb;
+
+ memset(mbox, 0, sizeof(*mbox));
+ mb = &mbox->u.mb;
+
+ mp = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
+ if (mp)
+ mp->virt = lpfc_mbuf_alloc(phba, 0, &mp->phys);
+
+ if (!mp || !mp->virt) {
+ kfree(mp);
+ /* dump config region 23 failed to allocate memory */
+ lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX,
+ "2569 lpfc dump config region 23: memory"
+ " allocation failed\n");
+ return 1;
+ }
+
+ memset(mp->virt, 0, LPFC_BPL_SIZE);
+ INIT_LIST_HEAD(&mp->list);
+
+ /* save address for completion */
+ mbox->context1 = (uint8_t *) mp;
+
+ mb->mbxCommand = MBX_DUMP_MEMORY;
+ mb->un.varDmp.type = DMP_NV_PARAMS;
+ mb->un.varDmp.region_id = DMP_REGION_23;
+ mb->un.varDmp.sli4_length = DMP_RGN23_SIZE;
+ mb->un.varWords[3] = putPaddrLow(mp->phys);
+ mb->un.varWords[4] = putPaddrHigh(mp->phys);
+ return 0;
+}
+
+/**
+ * lpfc_reg_fcfi - Initialize the REG_FCFI mailbox command
+ * @phba: pointer to the hba structure containing the FCF index and RQ ID.
+ * @mbox: pointer to lpfc mbox command to initialize.
+ *
+ * The REG_FCFI mailbox command supports Fibre Channel Forwarders (FCFs). The
+ * SLI Host uses the command to activate an FCF after it has acquired FCF
+ * information via a READ_FCF mailbox command. This mailbox command also is used
+ * to indicate where received unsolicited frames from this FCF will be sent. By
+ * default this routine will set up the FCF to forward all unsolicited frames
+ * the the RQ ID passed in the @phba. This can be overridden by the caller for
+ * more complicated setups.
+ **/
+void
+lpfc_reg_fcfi(struct lpfc_hba *phba, struct lpfcMboxq *mbox)
+{
+ struct lpfc_mbx_reg_fcfi *reg_fcfi;
+
+ memset(mbox, 0, sizeof(*mbox));
+ reg_fcfi = &mbox->u.mqe.un.reg_fcfi;
+ bf_set(lpfc_mqe_command, &mbox->u.mqe, MBX_REG_FCFI);
+ bf_set(lpfc_reg_fcfi_rq_id0, reg_fcfi, phba->sli4_hba.hdr_rq->queue_id);
+ bf_set(lpfc_reg_fcfi_rq_id1, reg_fcfi, REG_FCF_INVALID_QID);
+ bf_set(lpfc_reg_fcfi_rq_id2, reg_fcfi, REG_FCF_INVALID_QID);
+ bf_set(lpfc_reg_fcfi_rq_id3, reg_fcfi, REG_FCF_INVALID_QID);
+ bf_set(lpfc_reg_fcfi_info_index, reg_fcfi,
+ phba->fcf.current_rec.fcf_indx);
+ /* reg_fcf addr mode is bit wise inverted value of fcf addr_mode */
+ bf_set(lpfc_reg_fcfi_mam, reg_fcfi, (~phba->fcf.addr_mode) & 0x3);
+ if (phba->fcf.current_rec.vlan_id != LPFC_FCOE_NULL_VID) {
+ bf_set(lpfc_reg_fcfi_vv, reg_fcfi, 1);
+ bf_set(lpfc_reg_fcfi_vlan_tag, reg_fcfi,
+ phba->fcf.current_rec.vlan_id);
+ }
+}
+
+/**
+ * lpfc_unreg_fcfi - Initialize the UNREG_FCFI mailbox command
+ * @mbox: pointer to lpfc mbox command to initialize.
+ * @fcfi: FCFI to be unregistered.
+ *
+ * The UNREG_FCFI mailbox command supports Fibre Channel Forwarders (FCFs).
+ * The SLI Host uses the command to inactivate an FCFI.
+ **/
+void
+lpfc_unreg_fcfi(struct lpfcMboxq *mbox, uint16_t fcfi)
+{
+ memset(mbox, 0, sizeof(*mbox));
+ bf_set(lpfc_mqe_command, &mbox->u.mqe, MBX_UNREG_FCFI);
+ bf_set(lpfc_unreg_fcfi, &mbox->u.mqe.un.unreg_fcfi, fcfi);
+}
+
+/**
+ * lpfc_resume_rpi - Initialize the RESUME_RPI mailbox command
+ * @mbox: pointer to lpfc mbox command to initialize.
+ * @ndlp: The nodelist structure that describes the RPI to resume.
+ *
+ * The RESUME_RPI mailbox command is used to restart I/O to an RPI after a
+ * link event.
+ **/
+void
+lpfc_resume_rpi(struct lpfcMboxq *mbox, struct lpfc_nodelist *ndlp)
+{
+ struct lpfc_hba *phba = ndlp->phba;
+ struct lpfc_mbx_resume_rpi *resume_rpi;
+
+ memset(mbox, 0, sizeof(*mbox));
+ resume_rpi = &mbox->u.mqe.un.resume_rpi;
+ bf_set(lpfc_mqe_command, &mbox->u.mqe, MBX_RESUME_RPI);
+ bf_set(lpfc_resume_rpi_index, resume_rpi,
+ phba->sli4_hba.rpi_ids[ndlp->nlp_rpi]);
+ bf_set(lpfc_resume_rpi_ii, resume_rpi, RESUME_INDEX_RPI);
+ resume_rpi->event_tag = ndlp->phba->fc_eventTag;
+}
+
+/**
+ * lpfc_supported_pages - Initialize the PORT_CAPABILITIES supported pages
+ * mailbox command.
+ * @mbox: pointer to lpfc mbox command to initialize.
+ *
+ * The PORT_CAPABILITIES supported pages mailbox command is issued to
+ * retrieve the particular feature pages supported by the port.
+ **/
+void
+lpfc_supported_pages(struct lpfcMboxq *mbox)
+{
+ struct lpfc_mbx_supp_pages *supp_pages;
+
+ memset(mbox, 0, sizeof(*mbox));
+ supp_pages = &mbox->u.mqe.un.supp_pages;
+ bf_set(lpfc_mqe_command, &mbox->u.mqe, MBX_PORT_CAPABILITIES);
+ bf_set(cpn, supp_pages, LPFC_SUPP_PAGES);
+}
+
+/**
+ * lpfc_pc_sli4_params - Initialize the PORT_CAPABILITIES SLI4 Params mbox cmd.
+ * @mbox: pointer to lpfc mbox command to initialize.
+ *
+ * The PORT_CAPABILITIES SLI4 parameters mailbox command is issued to
+ * retrieve the particular SLI4 features supported by the port.
+ **/
+void
+lpfc_pc_sli4_params(struct lpfcMboxq *mbox)
+{
+ struct lpfc_mbx_pc_sli4_params *sli4_params;
+
+ memset(mbox, 0, sizeof(*mbox));
+ sli4_params = &mbox->u.mqe.un.sli4_params;
+ bf_set(lpfc_mqe_command, &mbox->u.mqe, MBX_PORT_CAPABILITIES);
+ bf_set(cpn, sli4_params, LPFC_SLI4_PARAMETERS);
+}
diff --git a/drivers/scsi/lpfc/lpfc_mem.c b/drivers/scsi/lpfc/lpfc_mem.c
new file mode 100644
index 000000000..3fa65338d
--- /dev/null
+++ b/drivers/scsi/lpfc/lpfc_mem.c
@@ -0,0 +1,588 @@
+/*******************************************************************
+ * This file is part of the Emulex Linux Device Driver for *
+ * Fibre Channel Host Bus Adapters. *
+ * Copyright (C) 2004-2014 Emulex. All rights reserved. *
+ * EMULEX and SLI are trademarks of Emulex. *
+ * www.emulex.com *
+ * Portions Copyright (C) 2004-2005 Christoph Hellwig *
+ * *
+ * This program is free software; you can redistribute it and/or *
+ * modify it under the terms of version 2 of the GNU General *
+ * Public License as published by the Free Software Foundation. *
+ * This program is distributed in the hope that it will be useful. *
+ * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND *
+ * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY, *
+ * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE *
+ * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD *
+ * TO BE LEGALLY INVALID. See the GNU General Public License for *
+ * more details, a copy of which can be found in the file COPYING *
+ * included with this package. *
+ *******************************************************************/
+
+#include <linux/mempool.h>
+#include <linux/slab.h>
+#include <linux/pci.h>
+#include <linux/interrupt.h>
+
+#include <scsi/scsi_device.h>
+#include <scsi/scsi_transport_fc.h>
+
+#include <scsi/scsi.h>
+
+#include "lpfc_hw4.h"
+#include "lpfc_hw.h"
+#include "lpfc_sli.h"
+#include "lpfc_sli4.h"
+#include "lpfc_nl.h"
+#include "lpfc_disc.h"
+#include "lpfc_scsi.h"
+#include "lpfc.h"
+#include "lpfc_crtn.h"
+#include "lpfc_logmsg.h"
+
+#define LPFC_MBUF_POOL_SIZE 64 /* max elements in MBUF safety pool */
+#define LPFC_MEM_POOL_SIZE 64 /* max elem in non-DMA safety pool */
+#define LPFC_DEVICE_DATA_POOL_SIZE 64 /* max elements in device data pool */
+
+int
+lpfc_mem_alloc_active_rrq_pool_s4(struct lpfc_hba *phba) {
+ size_t bytes;
+ int max_xri = phba->sli4_hba.max_cfg_param.max_xri;
+
+ if (max_xri <= 0)
+ return -ENOMEM;
+ bytes = ((BITS_PER_LONG - 1 + max_xri) / BITS_PER_LONG) *
+ sizeof(unsigned long);
+ phba->cfg_rrq_xri_bitmap_sz = bytes;
+ phba->active_rrq_pool = mempool_create_kmalloc_pool(LPFC_MEM_POOL_SIZE,
+ bytes);
+ if (!phba->active_rrq_pool)
+ return -ENOMEM;
+ else
+ return 0;
+}
+
+/**
+ * lpfc_mem_alloc - create and allocate all PCI and memory pools
+ * @phba: HBA to allocate pools for
+ *
+ * Description: Creates and allocates PCI pools lpfc_scsi_dma_buf_pool,
+ * lpfc_mbuf_pool, lpfc_hrb_pool. Creates and allocates kmalloc-backed mempools
+ * for LPFC_MBOXQ_t and lpfc_nodelist. Also allocates the VPI bitmask.
+ *
+ * Notes: Not interrupt-safe. Must be called with no locks held. If any
+ * allocation fails, frees all successfully allocated memory before returning.
+ *
+ * Returns:
+ * 0 on success
+ * -ENOMEM on failure (if any memory allocations fail)
+ **/
+int
+lpfc_mem_alloc(struct lpfc_hba *phba, int align)
+{
+ struct lpfc_dma_pool *pool = &phba->lpfc_mbuf_safety_pool;
+ int i;
+
+ if (phba->sli_rev == LPFC_SLI_REV4) {
+ /* Calculate alignment */
+ if (phba->cfg_sg_dma_buf_size < SLI4_PAGE_SIZE)
+ i = phba->cfg_sg_dma_buf_size;
+ else
+ i = SLI4_PAGE_SIZE;
+
+ phba->lpfc_scsi_dma_buf_pool =
+ pci_pool_create("lpfc_scsi_dma_buf_pool",
+ phba->pcidev,
+ phba->cfg_sg_dma_buf_size,
+ i,
+ 0);
+ } else {
+ phba->lpfc_scsi_dma_buf_pool =
+ pci_pool_create("lpfc_scsi_dma_buf_pool",
+ phba->pcidev, phba->cfg_sg_dma_buf_size,
+ align, 0);
+ }
+
+ if (!phba->lpfc_scsi_dma_buf_pool)
+ goto fail;
+
+ phba->lpfc_mbuf_pool = pci_pool_create("lpfc_mbuf_pool", phba->pcidev,
+ LPFC_BPL_SIZE,
+ align, 0);
+ if (!phba->lpfc_mbuf_pool)
+ goto fail_free_dma_buf_pool;
+
+ pool->elements = kmalloc(sizeof(struct lpfc_dmabuf) *
+ LPFC_MBUF_POOL_SIZE, GFP_KERNEL);
+ if (!pool->elements)
+ goto fail_free_lpfc_mbuf_pool;
+
+ pool->max_count = 0;
+ pool->current_count = 0;
+ for ( i = 0; i < LPFC_MBUF_POOL_SIZE; i++) {
+ pool->elements[i].virt = pci_pool_alloc(phba->lpfc_mbuf_pool,
+ GFP_KERNEL, &pool->elements[i].phys);
+ if (!pool->elements[i].virt)
+ goto fail_free_mbuf_pool;
+ pool->max_count++;
+ pool->current_count++;
+ }
+
+ phba->mbox_mem_pool = mempool_create_kmalloc_pool(LPFC_MEM_POOL_SIZE,
+ sizeof(LPFC_MBOXQ_t));
+ if (!phba->mbox_mem_pool)
+ goto fail_free_mbuf_pool;
+
+ phba->nlp_mem_pool = mempool_create_kmalloc_pool(LPFC_MEM_POOL_SIZE,
+ sizeof(struct lpfc_nodelist));
+ if (!phba->nlp_mem_pool)
+ goto fail_free_mbox_pool;
+
+ if (phba->sli_rev == LPFC_SLI_REV4) {
+ phba->rrq_pool =
+ mempool_create_kmalloc_pool(LPFC_MEM_POOL_SIZE,
+ sizeof(struct lpfc_node_rrq));
+ if (!phba->rrq_pool)
+ goto fail_free_nlp_mem_pool;
+ phba->lpfc_hrb_pool = pci_pool_create("lpfc_hrb_pool",
+ phba->pcidev,
+ LPFC_HDR_BUF_SIZE, align, 0);
+ if (!phba->lpfc_hrb_pool)
+ goto fail_free_rrq_mem_pool;
+
+ phba->lpfc_drb_pool = pci_pool_create("lpfc_drb_pool",
+ phba->pcidev,
+ LPFC_DATA_BUF_SIZE, align, 0);
+ if (!phba->lpfc_drb_pool)
+ goto fail_free_hrb_pool;
+ phba->lpfc_hbq_pool = NULL;
+ } else {
+ phba->lpfc_hbq_pool = pci_pool_create("lpfc_hbq_pool",
+ phba->pcidev, LPFC_BPL_SIZE, align, 0);
+ if (!phba->lpfc_hbq_pool)
+ goto fail_free_nlp_mem_pool;
+ phba->lpfc_hrb_pool = NULL;
+ phba->lpfc_drb_pool = NULL;
+ }
+
+ if (phba->cfg_EnableXLane) {
+ phba->device_data_mem_pool = mempool_create_kmalloc_pool(
+ LPFC_DEVICE_DATA_POOL_SIZE,
+ sizeof(struct lpfc_device_data));
+ if (!phba->device_data_mem_pool)
+ goto fail_free_hrb_pool;
+ } else {
+ phba->device_data_mem_pool = NULL;
+ }
+
+ return 0;
+ fail_free_hrb_pool:
+ pci_pool_destroy(phba->lpfc_hrb_pool);
+ phba->lpfc_hrb_pool = NULL;
+ fail_free_rrq_mem_pool:
+ mempool_destroy(phba->rrq_pool);
+ phba->rrq_pool = NULL;
+ fail_free_nlp_mem_pool:
+ mempool_destroy(phba->nlp_mem_pool);
+ phba->nlp_mem_pool = NULL;
+ fail_free_mbox_pool:
+ mempool_destroy(phba->mbox_mem_pool);
+ phba->mbox_mem_pool = NULL;
+ fail_free_mbuf_pool:
+ while (i--)
+ pci_pool_free(phba->lpfc_mbuf_pool, pool->elements[i].virt,
+ pool->elements[i].phys);
+ kfree(pool->elements);
+ fail_free_lpfc_mbuf_pool:
+ pci_pool_destroy(phba->lpfc_mbuf_pool);
+ phba->lpfc_mbuf_pool = NULL;
+ fail_free_dma_buf_pool:
+ pci_pool_destroy(phba->lpfc_scsi_dma_buf_pool);
+ phba->lpfc_scsi_dma_buf_pool = NULL;
+ fail:
+ return -ENOMEM;
+}
+
+/**
+ * lpfc_mem_free - Frees memory allocated by lpfc_mem_alloc
+ * @phba: HBA to free memory for
+ *
+ * Description: Free the memory allocated by lpfc_mem_alloc routine. This
+ * routine is a the counterpart of lpfc_mem_alloc.
+ *
+ * Returns: None
+ **/
+void
+lpfc_mem_free(struct lpfc_hba *phba)
+{
+ int i;
+ struct lpfc_dma_pool *pool = &phba->lpfc_mbuf_safety_pool;
+ struct lpfc_device_data *device_data;
+
+ /* Free HBQ pools */
+ lpfc_sli_hbqbuf_free_all(phba);
+ if (phba->lpfc_drb_pool)
+ pci_pool_destroy(phba->lpfc_drb_pool);
+ phba->lpfc_drb_pool = NULL;
+ if (phba->lpfc_hrb_pool)
+ pci_pool_destroy(phba->lpfc_hrb_pool);
+ phba->lpfc_hrb_pool = NULL;
+
+ if (phba->lpfc_hbq_pool)
+ pci_pool_destroy(phba->lpfc_hbq_pool);
+ phba->lpfc_hbq_pool = NULL;
+
+ if (phba->rrq_pool)
+ mempool_destroy(phba->rrq_pool);
+ phba->rrq_pool = NULL;
+
+ /* Free NLP memory pool */
+ mempool_destroy(phba->nlp_mem_pool);
+ phba->nlp_mem_pool = NULL;
+ if (phba->sli_rev == LPFC_SLI_REV4 && phba->active_rrq_pool) {
+ mempool_destroy(phba->active_rrq_pool);
+ phba->active_rrq_pool = NULL;
+ }
+
+ /* Free mbox memory pool */
+ mempool_destroy(phba->mbox_mem_pool);
+ phba->mbox_mem_pool = NULL;
+
+ /* Free MBUF memory pool */
+ for (i = 0; i < pool->current_count; i++)
+ pci_pool_free(phba->lpfc_mbuf_pool, pool->elements[i].virt,
+ pool->elements[i].phys);
+ kfree(pool->elements);
+
+ pci_pool_destroy(phba->lpfc_mbuf_pool);
+ phba->lpfc_mbuf_pool = NULL;
+
+ /* Free DMA buffer memory pool */
+ pci_pool_destroy(phba->lpfc_scsi_dma_buf_pool);
+ phba->lpfc_scsi_dma_buf_pool = NULL;
+
+ /* Free Device Data memory pool */
+ if (phba->device_data_mem_pool) {
+ /* Ensure all objects have been returned to the pool */
+ while (!list_empty(&phba->luns)) {
+ device_data = list_first_entry(&phba->luns,
+ struct lpfc_device_data,
+ listentry);
+ list_del(&device_data->listentry);
+ mempool_free(device_data, phba->device_data_mem_pool);
+ }
+ mempool_destroy(phba->device_data_mem_pool);
+ }
+ phba->device_data_mem_pool = NULL;
+ return;
+}
+
+/**
+ * lpfc_mem_free_all - Frees all PCI and driver memory
+ * @phba: HBA to free memory for
+ *
+ * Description: Free memory from PCI and driver memory pools and also those
+ * used : lpfc_scsi_dma_buf_pool, lpfc_mbuf_pool, lpfc_hrb_pool. Frees
+ * kmalloc-backed mempools for LPFC_MBOXQ_t and lpfc_nodelist. Also frees
+ * the VPI bitmask.
+ *
+ * Returns: None
+ **/
+void
+lpfc_mem_free_all(struct lpfc_hba *phba)
+{
+ struct lpfc_sli *psli = &phba->sli;
+ LPFC_MBOXQ_t *mbox, *next_mbox;
+ struct lpfc_dmabuf *mp;
+
+ /* Free memory used in mailbox queue back to mailbox memory pool */
+ list_for_each_entry_safe(mbox, next_mbox, &psli->mboxq, list) {
+ mp = (struct lpfc_dmabuf *) (mbox->context1);
+ if (mp) {
+ lpfc_mbuf_free(phba, mp->virt, mp->phys);
+ kfree(mp);
+ }
+ list_del(&mbox->list);
+ mempool_free(mbox, phba->mbox_mem_pool);
+ }
+ /* Free memory used in mailbox cmpl list back to mailbox memory pool */
+ list_for_each_entry_safe(mbox, next_mbox, &psli->mboxq_cmpl, list) {
+ mp = (struct lpfc_dmabuf *) (mbox->context1);
+ if (mp) {
+ lpfc_mbuf_free(phba, mp->virt, mp->phys);
+ kfree(mp);
+ }
+ list_del(&mbox->list);
+ mempool_free(mbox, phba->mbox_mem_pool);
+ }
+ /* Free the active mailbox command back to the mailbox memory pool */
+ spin_lock_irq(&phba->hbalock);
+ psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
+ spin_unlock_irq(&phba->hbalock);
+ if (psli->mbox_active) {
+ mbox = psli->mbox_active;
+ mp = (struct lpfc_dmabuf *) (mbox->context1);
+ if (mp) {
+ lpfc_mbuf_free(phba, mp->virt, mp->phys);
+ kfree(mp);
+ }
+ mempool_free(mbox, phba->mbox_mem_pool);
+ psli->mbox_active = NULL;
+ }
+
+ /* Free and destroy all the allocated memory pools */
+ lpfc_mem_free(phba);
+
+ /* Free the iocb lookup array */
+ kfree(psli->iocbq_lookup);
+ psli->iocbq_lookup = NULL;
+
+ return;
+}
+
+/**
+ * lpfc_mbuf_alloc - Allocate an mbuf from the lpfc_mbuf_pool PCI pool
+ * @phba: HBA which owns the pool to allocate from
+ * @mem_flags: indicates if this is a priority (MEM_PRI) allocation
+ * @handle: used to return the DMA-mapped address of the mbuf
+ *
+ * Description: Allocates a DMA-mapped buffer from the lpfc_mbuf_pool PCI pool.
+ * Allocates from generic pci_pool_alloc function first and if that fails and
+ * mem_flags has MEM_PRI set (the only defined flag), returns an mbuf from the
+ * HBA's pool.
+ *
+ * Notes: Not interrupt-safe. Must be called with no locks held. Takes
+ * phba->hbalock.
+ *
+ * Returns:
+ * pointer to the allocated mbuf on success
+ * NULL on failure
+ **/
+void *
+lpfc_mbuf_alloc(struct lpfc_hba *phba, int mem_flags, dma_addr_t *handle)
+{
+ struct lpfc_dma_pool *pool = &phba->lpfc_mbuf_safety_pool;
+ unsigned long iflags;
+ void *ret;
+
+ ret = pci_pool_alloc(phba->lpfc_mbuf_pool, GFP_KERNEL, handle);
+
+ spin_lock_irqsave(&phba->hbalock, iflags);
+ if (!ret && (mem_flags & MEM_PRI) && pool->current_count) {
+ pool->current_count--;
+ ret = pool->elements[pool->current_count].virt;
+ *handle = pool->elements[pool->current_count].phys;
+ }
+ spin_unlock_irqrestore(&phba->hbalock, iflags);
+ return ret;
+}
+
+/**
+ * __lpfc_mbuf_free - Free an mbuf from the lpfc_mbuf_pool PCI pool (locked)
+ * @phba: HBA which owns the pool to return to
+ * @virt: mbuf to free
+ * @dma: the DMA-mapped address of the lpfc_mbuf_pool to be freed
+ *
+ * Description: Returns an mbuf lpfc_mbuf_pool to the lpfc_mbuf_safety_pool if
+ * it is below its max_count, frees the mbuf otherwise.
+ *
+ * Notes: Must be called with phba->hbalock held to synchronize access to
+ * lpfc_mbuf_safety_pool.
+ *
+ * Returns: None
+ **/
+void
+__lpfc_mbuf_free(struct lpfc_hba * phba, void *virt, dma_addr_t dma)
+{
+ struct lpfc_dma_pool *pool = &phba->lpfc_mbuf_safety_pool;
+
+ if (pool->current_count < pool->max_count) {
+ pool->elements[pool->current_count].virt = virt;
+ pool->elements[pool->current_count].phys = dma;
+ pool->current_count++;
+ } else {
+ pci_pool_free(phba->lpfc_mbuf_pool, virt, dma);
+ }
+ return;
+}
+
+/**
+ * lpfc_mbuf_free - Free an mbuf from the lpfc_mbuf_pool PCI pool (unlocked)
+ * @phba: HBA which owns the pool to return to
+ * @virt: mbuf to free
+ * @dma: the DMA-mapped address of the lpfc_mbuf_pool to be freed
+ *
+ * Description: Returns an mbuf lpfc_mbuf_pool to the lpfc_mbuf_safety_pool if
+ * it is below its max_count, frees the mbuf otherwise.
+ *
+ * Notes: Takes phba->hbalock. Can be called with or without other locks held.
+ *
+ * Returns: None
+ **/
+void
+lpfc_mbuf_free(struct lpfc_hba * phba, void *virt, dma_addr_t dma)
+{
+ unsigned long iflags;
+
+ spin_lock_irqsave(&phba->hbalock, iflags);
+ __lpfc_mbuf_free(phba, virt, dma);
+ spin_unlock_irqrestore(&phba->hbalock, iflags);
+ return;
+}
+
+/**
+ * lpfc_els_hbq_alloc - Allocate an HBQ buffer
+ * @phba: HBA to allocate HBQ buffer for
+ *
+ * Description: Allocates a DMA-mapped HBQ buffer from the lpfc_hrb_pool PCI
+ * pool along a non-DMA-mapped container for it.
+ *
+ * Notes: Not interrupt-safe. Must be called with no locks held.
+ *
+ * Returns:
+ * pointer to HBQ on success
+ * NULL on failure
+ **/
+struct hbq_dmabuf *
+lpfc_els_hbq_alloc(struct lpfc_hba *phba)
+{
+ struct hbq_dmabuf *hbqbp;
+
+ hbqbp = kzalloc(sizeof(struct hbq_dmabuf), GFP_KERNEL);
+ if (!hbqbp)
+ return NULL;
+
+ hbqbp->dbuf.virt = pci_pool_alloc(phba->lpfc_hbq_pool, GFP_KERNEL,
+ &hbqbp->dbuf.phys);
+ if (!hbqbp->dbuf.virt) {
+ kfree(hbqbp);
+ return NULL;
+ }
+ hbqbp->size = LPFC_BPL_SIZE;
+ return hbqbp;
+}
+
+/**
+ * lpfc_els_hbq_free - Frees an HBQ buffer allocated with lpfc_els_hbq_alloc
+ * @phba: HBA buffer was allocated for
+ * @hbqbp: HBQ container returned by lpfc_els_hbq_alloc
+ *
+ * Description: Frees both the container and the DMA-mapped buffer returned by
+ * lpfc_els_hbq_alloc.
+ *
+ * Notes: Can be called with or without locks held.
+ *
+ * Returns: None
+ **/
+void
+lpfc_els_hbq_free(struct lpfc_hba *phba, struct hbq_dmabuf *hbqbp)
+{
+ pci_pool_free(phba->lpfc_hbq_pool, hbqbp->dbuf.virt, hbqbp->dbuf.phys);
+ kfree(hbqbp);
+ return;
+}
+
+/**
+ * lpfc_sli4_rb_alloc - Allocate an SLI4 Receive buffer
+ * @phba: HBA to allocate a receive buffer for
+ *
+ * Description: Allocates a DMA-mapped receive buffer from the lpfc_hrb_pool PCI
+ * pool along a non-DMA-mapped container for it.
+ *
+ * Notes: Not interrupt-safe. Must be called with no locks held.
+ *
+ * Returns:
+ * pointer to HBQ on success
+ * NULL on failure
+ **/
+struct hbq_dmabuf *
+lpfc_sli4_rb_alloc(struct lpfc_hba *phba)
+{
+ struct hbq_dmabuf *dma_buf;
+
+ dma_buf = kzalloc(sizeof(struct hbq_dmabuf), GFP_KERNEL);
+ if (!dma_buf)
+ return NULL;
+
+ dma_buf->hbuf.virt = pci_pool_alloc(phba->lpfc_hrb_pool, GFP_KERNEL,
+ &dma_buf->hbuf.phys);
+ if (!dma_buf->hbuf.virt) {
+ kfree(dma_buf);
+ return NULL;
+ }
+ dma_buf->dbuf.virt = pci_pool_alloc(phba->lpfc_drb_pool, GFP_KERNEL,
+ &dma_buf->dbuf.phys);
+ if (!dma_buf->dbuf.virt) {
+ pci_pool_free(phba->lpfc_hrb_pool, dma_buf->hbuf.virt,
+ dma_buf->hbuf.phys);
+ kfree(dma_buf);
+ return NULL;
+ }
+ dma_buf->size = LPFC_BPL_SIZE;
+ return dma_buf;
+}
+
+/**
+ * lpfc_sli4_rb_free - Frees a receive buffer
+ * @phba: HBA buffer was allocated for
+ * @dmab: DMA Buffer container returned by lpfc_sli4_hbq_alloc
+ *
+ * Description: Frees both the container and the DMA-mapped buffers returned by
+ * lpfc_sli4_rb_alloc.
+ *
+ * Notes: Can be called with or without locks held.
+ *
+ * Returns: None
+ **/
+void
+lpfc_sli4_rb_free(struct lpfc_hba *phba, struct hbq_dmabuf *dmab)
+{
+ pci_pool_free(phba->lpfc_hrb_pool, dmab->hbuf.virt, dmab->hbuf.phys);
+ pci_pool_free(phba->lpfc_drb_pool, dmab->dbuf.virt, dmab->dbuf.phys);
+ kfree(dmab);
+ return;
+}
+
+/**
+ * lpfc_in_buf_free - Free a DMA buffer
+ * @phba: HBA buffer is associated with
+ * @mp: Buffer to free
+ *
+ * Description: Frees the given DMA buffer in the appropriate way given if the
+ * HBA is running in SLI3 mode with HBQs enabled.
+ *
+ * Notes: Takes phba->hbalock. Can be called with or without other locks held.
+ *
+ * Returns: None
+ **/
+void
+lpfc_in_buf_free(struct lpfc_hba *phba, struct lpfc_dmabuf *mp)
+{
+ struct hbq_dmabuf *hbq_entry;
+ unsigned long flags;
+
+ if (!mp)
+ return;
+
+ if (phba->sli3_options & LPFC_SLI3_HBQ_ENABLED) {
+ /* Check whether HBQ is still in use */
+ spin_lock_irqsave(&phba->hbalock, flags);
+ if (!phba->hbq_in_use) {
+ spin_unlock_irqrestore(&phba->hbalock, flags);
+ return;
+ }
+ hbq_entry = container_of(mp, struct hbq_dmabuf, dbuf);
+ list_del(&hbq_entry->dbuf.list);
+ if (hbq_entry->tag == -1) {
+ (phba->hbqs[LPFC_ELS_HBQ].hbq_free_buffer)
+ (phba, hbq_entry);
+ } else {
+ lpfc_sli_free_hbq(phba, hbq_entry);
+ }
+ spin_unlock_irqrestore(&phba->hbalock, flags);
+ } else {
+ lpfc_mbuf_free(phba, mp->virt, mp->phys);
+ kfree(mp);
+ }
+ return;
+}
diff --git a/drivers/scsi/lpfc/lpfc_nl.h b/drivers/scsi/lpfc/lpfc_nl.h
new file mode 100644
index 000000000..f2b1bbcb1
--- /dev/null
+++ b/drivers/scsi/lpfc/lpfc_nl.h
@@ -0,0 +1,179 @@
+/*******************************************************************
+ * This file is part of the Emulex Linux Device Driver for *
+ * Fibre Channel Host Bus Adapters. *
+ * Copyright (C) 2010 Emulex. All rights reserved. *
+ * EMULEX and SLI are trademarks of Emulex. *
+ * www.emulex.com *
+ * *
+ * This program is free software; you can redistribute it and/or *
+ * modify it under the terms of version 2 of the GNU General *
+ * Public License as published by the Free Software Foundation. *
+ * This program is distributed in the hope that it will be useful. *
+ * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND *
+ * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY, *
+ * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE *
+ * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD *
+ * TO BE LEGALLY INVALID. See the GNU General Public License for *
+ * more details, a copy of which can be found in the file COPYING *
+ * included with this package. *
+ *******************************************************************/
+
+/* Event definitions for RegisterForEvent */
+#define FC_REG_LINK_EVENT 0x0001 /* link up / down events */
+#define FC_REG_RSCN_EVENT 0x0002 /* RSCN events */
+#define FC_REG_CT_EVENT 0x0004 /* CT request events */
+#define FC_REG_DUMP_EVENT 0x0010 /* Dump events */
+#define FC_REG_TEMPERATURE_EVENT 0x0020 /* temperature events */
+#define FC_REG_VPORTRSCN_EVENT 0x0040 /* Vport RSCN events */
+#define FC_REG_ELS_EVENT 0x0080 /* lpfc els events */
+#define FC_REG_FABRIC_EVENT 0x0100 /* lpfc fabric events */
+#define FC_REG_SCSI_EVENT 0x0200 /* lpfc scsi events */
+#define FC_REG_BOARD_EVENT 0x0400 /* lpfc board events */
+#define FC_REG_ADAPTER_EVENT 0x0800 /* lpfc adapter events */
+#define FC_REG_EVENT_MASK (FC_REG_LINK_EVENT | \
+ FC_REG_RSCN_EVENT | \
+ FC_REG_CT_EVENT | \
+ FC_REG_DUMP_EVENT | \
+ FC_REG_TEMPERATURE_EVENT | \
+ FC_REG_VPORTRSCN_EVENT | \
+ FC_REG_ELS_EVENT | \
+ FC_REG_FABRIC_EVENT | \
+ FC_REG_SCSI_EVENT | \
+ FC_REG_BOARD_EVENT | \
+ FC_REG_ADAPTER_EVENT)
+/* Temperature events */
+#define LPFC_CRIT_TEMP 0x1
+#define LPFC_THRESHOLD_TEMP 0x2
+#define LPFC_NORMAL_TEMP 0x3
+/*
+ * All net link event payloads will begin with and event type
+ * and subcategory. The event type must come first.
+ * The subcategory further defines the data that follows in the rest
+ * of the payload. Each category will have its own unique header plus
+ * any additional data unique to the subcategory.
+ * The payload sent via the fc transport is one-way driver->application.
+ */
+
+/* RSCN event header */
+struct lpfc_rscn_event_header {
+ uint32_t event_type;
+ uint32_t payload_length; /* RSCN data length in bytes */
+ uint32_t rscn_payload[];
+};
+
+/* els event header */
+struct lpfc_els_event_header {
+ uint32_t event_type;
+ uint32_t subcategory;
+ uint8_t wwpn[8];
+ uint8_t wwnn[8];
+};
+
+/* subcategory codes for FC_REG_ELS_EVENT */
+#define LPFC_EVENT_PLOGI_RCV 0x01
+#define LPFC_EVENT_PRLO_RCV 0x02
+#define LPFC_EVENT_ADISC_RCV 0x04
+#define LPFC_EVENT_LSRJT_RCV 0x08
+#define LPFC_EVENT_LOGO_RCV 0x10
+
+/* special els lsrjt event */
+struct lpfc_lsrjt_event {
+ struct lpfc_els_event_header header;
+ uint32_t command;
+ uint32_t reason_code;
+ uint32_t explanation;
+};
+
+/* special els logo event */
+struct lpfc_logo_event {
+ struct lpfc_els_event_header header;
+ uint8_t logo_wwpn[8];
+};
+
+/* fabric event header */
+struct lpfc_fabric_event_header {
+ uint32_t event_type;
+ uint32_t subcategory;
+ uint8_t wwpn[8];
+ uint8_t wwnn[8];
+};
+
+/* subcategory codes for FC_REG_FABRIC_EVENT */
+#define LPFC_EVENT_FABRIC_BUSY 0x01
+#define LPFC_EVENT_PORT_BUSY 0x02
+#define LPFC_EVENT_FCPRDCHKERR 0x04
+
+/* special case fabric fcprdchkerr event */
+struct lpfc_fcprdchkerr_event {
+ struct lpfc_fabric_event_header header;
+ uint32_t lun;
+ uint32_t opcode;
+ uint32_t fcpiparam;
+};
+
+
+/* scsi event header */
+struct lpfc_scsi_event_header {
+ uint32_t event_type;
+ uint32_t subcategory;
+ uint32_t lun;
+ uint8_t wwpn[8];
+ uint8_t wwnn[8];
+};
+
+/* subcategory codes for FC_REG_SCSI_EVENT */
+#define LPFC_EVENT_QFULL 0x0001
+#define LPFC_EVENT_DEVBSY 0x0002
+#define LPFC_EVENT_CHECK_COND 0x0004
+#define LPFC_EVENT_LUNRESET 0x0008
+#define LPFC_EVENT_TGTRESET 0x0010
+#define LPFC_EVENT_BUSRESET 0x0020
+#define LPFC_EVENT_VARQUEDEPTH 0x0040
+
+/* special case scsi varqueuedepth event */
+struct lpfc_scsi_varqueuedepth_event {
+ struct lpfc_scsi_event_header scsi_event;
+ uint32_t oldval;
+ uint32_t newval;
+};
+
+/* special case scsi check condition event */
+struct lpfc_scsi_check_condition_event {
+ struct lpfc_scsi_event_header scsi_event;
+ uint8_t opcode;
+ uint8_t sense_key;
+ uint8_t asc;
+ uint8_t ascq;
+};
+
+/* event codes for FC_REG_BOARD_EVENT */
+#define LPFC_EVENT_PORTINTERR 0x01
+
+/* board event header */
+struct lpfc_board_event_header {
+ uint32_t event_type;
+ uint32_t subcategory;
+};
+
+
+/* event codes for FC_REG_ADAPTER_EVENT */
+#define LPFC_EVENT_ARRIVAL 0x01
+
+/* adapter event header */
+struct lpfc_adapter_event_header {
+ uint32_t event_type;
+ uint32_t subcategory;
+};
+
+
+/* event codes for temp_event */
+#define LPFC_CRIT_TEMP 0x1
+#define LPFC_THRESHOLD_TEMP 0x2
+#define LPFC_NORMAL_TEMP 0x3
+
+struct temp_event {
+ uint32_t event_type;
+ uint32_t event_code;
+ uint32_t data;
+};
+
diff --git a/drivers/scsi/lpfc/lpfc_nportdisc.c b/drivers/scsi/lpfc/lpfc_nportdisc.c
new file mode 100644
index 000000000..4cb9882af
--- /dev/null
+++ b/drivers/scsi/lpfc/lpfc_nportdisc.c
@@ -0,0 +1,2577 @@
+ /*******************************************************************
+ * This file is part of the Emulex Linux Device Driver for *
+ * Fibre Channel Host Bus Adapters. *
+ * Copyright (C) 2004-2015 Emulex. All rights reserved. *
+ * EMULEX and SLI are trademarks of Emulex. *
+ * www.emulex.com *
+ * Portions Copyright (C) 2004-2005 Christoph Hellwig *
+ * *
+ * This program is free software; you can redistribute it and/or *
+ * modify it under the terms of version 2 of the GNU General *
+ * Public License as published by the Free Software Foundation. *
+ * This program is distributed in the hope that it will be useful. *
+ * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND *
+ * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY, *
+ * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE *
+ * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD *
+ * TO BE LEGALLY INVALID. See the GNU General Public License for *
+ * more details, a copy of which can be found in the file COPYING *
+ * included with this package. *
+ *******************************************************************/
+
+#include <linux/blkdev.h>
+#include <linux/pci.h>
+#include <linux/slab.h>
+#include <linux/interrupt.h>
+
+#include <scsi/scsi.h>
+#include <scsi/scsi_device.h>
+#include <scsi/scsi_host.h>
+#include <scsi/scsi_transport_fc.h>
+
+#include "lpfc_hw4.h"
+#include "lpfc_hw.h"
+#include "lpfc_sli.h"
+#include "lpfc_sli4.h"
+#include "lpfc_nl.h"
+#include "lpfc_disc.h"
+#include "lpfc_scsi.h"
+#include "lpfc.h"
+#include "lpfc_logmsg.h"
+#include "lpfc_crtn.h"
+#include "lpfc_vport.h"
+#include "lpfc_debugfs.h"
+
+
+/* Called to verify a rcv'ed ADISC was intended for us. */
+static int
+lpfc_check_adisc(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
+ struct lpfc_name *nn, struct lpfc_name *pn)
+{
+ /* First, we MUST have a RPI registered */
+ if (!(ndlp->nlp_flag & NLP_RPI_REGISTERED))
+ return 0;
+
+ /* Compare the ADISC rsp WWNN / WWPN matches our internal node
+ * table entry for that node.
+ */
+ if (memcmp(nn, &ndlp->nlp_nodename, sizeof (struct lpfc_name)))
+ return 0;
+
+ if (memcmp(pn, &ndlp->nlp_portname, sizeof (struct lpfc_name)))
+ return 0;
+
+ /* we match, return success */
+ return 1;
+}
+
+int
+lpfc_check_sparm(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
+ struct serv_parm *sp, uint32_t class, int flogi)
+{
+ volatile struct serv_parm *hsp = &vport->fc_sparam;
+ uint16_t hsp_value, ssp_value = 0;
+
+ /*
+ * The receive data field size and buffer-to-buffer receive data field
+ * size entries are 16 bits but are represented as two 8-bit fields in
+ * the driver data structure to account for rsvd bits and other control
+ * bits. Reconstruct and compare the fields as a 16-bit values before
+ * correcting the byte values.
+ */
+ if (sp->cls1.classValid) {
+ if (!flogi) {
+ hsp_value = ((hsp->cls1.rcvDataSizeMsb << 8) |
+ hsp->cls1.rcvDataSizeLsb);
+ ssp_value = ((sp->cls1.rcvDataSizeMsb << 8) |
+ sp->cls1.rcvDataSizeLsb);
+ if (!ssp_value)
+ goto bad_service_param;
+ if (ssp_value > hsp_value) {
+ sp->cls1.rcvDataSizeLsb =
+ hsp->cls1.rcvDataSizeLsb;
+ sp->cls1.rcvDataSizeMsb =
+ hsp->cls1.rcvDataSizeMsb;
+ }
+ }
+ } else if (class == CLASS1)
+ goto bad_service_param;
+ if (sp->cls2.classValid) {
+ if (!flogi) {
+ hsp_value = ((hsp->cls2.rcvDataSizeMsb << 8) |
+ hsp->cls2.rcvDataSizeLsb);
+ ssp_value = ((sp->cls2.rcvDataSizeMsb << 8) |
+ sp->cls2.rcvDataSizeLsb);
+ if (!ssp_value)
+ goto bad_service_param;
+ if (ssp_value > hsp_value) {
+ sp->cls2.rcvDataSizeLsb =
+ hsp->cls2.rcvDataSizeLsb;
+ sp->cls2.rcvDataSizeMsb =
+ hsp->cls2.rcvDataSizeMsb;
+ }
+ }
+ } else if (class == CLASS2)
+ goto bad_service_param;
+ if (sp->cls3.classValid) {
+ if (!flogi) {
+ hsp_value = ((hsp->cls3.rcvDataSizeMsb << 8) |
+ hsp->cls3.rcvDataSizeLsb);
+ ssp_value = ((sp->cls3.rcvDataSizeMsb << 8) |
+ sp->cls3.rcvDataSizeLsb);
+ if (!ssp_value)
+ goto bad_service_param;
+ if (ssp_value > hsp_value) {
+ sp->cls3.rcvDataSizeLsb =
+ hsp->cls3.rcvDataSizeLsb;
+ sp->cls3.rcvDataSizeMsb =
+ hsp->cls3.rcvDataSizeMsb;
+ }
+ }
+ } else if (class == CLASS3)
+ goto bad_service_param;
+
+ /*
+ * Preserve the upper four bits of the MSB from the PLOGI response.
+ * These bits contain the Buffer-to-Buffer State Change Number
+ * from the target and need to be passed to the FW.
+ */
+ hsp_value = (hsp->cmn.bbRcvSizeMsb << 8) | hsp->cmn.bbRcvSizeLsb;
+ ssp_value = (sp->cmn.bbRcvSizeMsb << 8) | sp->cmn.bbRcvSizeLsb;
+ if (ssp_value > hsp_value) {
+ sp->cmn.bbRcvSizeLsb = hsp->cmn.bbRcvSizeLsb;
+ sp->cmn.bbRcvSizeMsb = (sp->cmn.bbRcvSizeMsb & 0xF0) |
+ (hsp->cmn.bbRcvSizeMsb & 0x0F);
+ }
+
+ memcpy(&ndlp->nlp_nodename, &sp->nodeName, sizeof (struct lpfc_name));
+ memcpy(&ndlp->nlp_portname, &sp->portName, sizeof (struct lpfc_name));
+ return 1;
+bad_service_param:
+ lpfc_printf_vlog(vport, KERN_ERR, LOG_DISCOVERY,
+ "0207 Device %x "
+ "(%02x:%02x:%02x:%02x:%02x:%02x:%02x:%02x) sent "
+ "invalid service parameters. Ignoring device.\n",
+ ndlp->nlp_DID,
+ sp->nodeName.u.wwn[0], sp->nodeName.u.wwn[1],
+ sp->nodeName.u.wwn[2], sp->nodeName.u.wwn[3],
+ sp->nodeName.u.wwn[4], sp->nodeName.u.wwn[5],
+ sp->nodeName.u.wwn[6], sp->nodeName.u.wwn[7]);
+ return 0;
+}
+
+static void *
+lpfc_check_elscmpl_iocb(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
+ struct lpfc_iocbq *rspiocb)
+{
+ struct lpfc_dmabuf *pcmd, *prsp;
+ uint32_t *lp;
+ void *ptr = NULL;
+ IOCB_t *irsp;
+
+ irsp = &rspiocb->iocb;
+ pcmd = (struct lpfc_dmabuf *) cmdiocb->context2;
+
+ /* For lpfc_els_abort, context2 could be zero'ed to delay
+ * freeing associated memory till after ABTS completes.
+ */
+ if (pcmd) {
+ prsp = list_get_first(&pcmd->list, struct lpfc_dmabuf,
+ list);
+ if (prsp) {
+ lp = (uint32_t *) prsp->virt;
+ ptr = (void *)((uint8_t *)lp + sizeof(uint32_t));
+ }
+ } else {
+ /* Force ulpStatus error since we are returning NULL ptr */
+ if (!(irsp->ulpStatus)) {
+ irsp->ulpStatus = IOSTAT_LOCAL_REJECT;
+ irsp->un.ulpWord[4] = IOERR_SLI_ABORTED;
+ }
+ ptr = NULL;
+ }
+ return ptr;
+}
+
+
+
+/*
+ * Free resources / clean up outstanding I/Os
+ * associated with a LPFC_NODELIST entry. This
+ * routine effectively results in a "software abort".
+ */
+int
+lpfc_els_abort(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp)
+{
+ LIST_HEAD(abort_list);
+ struct lpfc_sli *psli = &phba->sli;
+ struct lpfc_sli_ring *pring = &psli->ring[LPFC_ELS_RING];
+ struct lpfc_iocbq *iocb, *next_iocb;
+
+ /* Abort outstanding I/O on NPort <nlp_DID> */
+ lpfc_printf_vlog(ndlp->vport, KERN_INFO, LOG_DISCOVERY,
+ "2819 Abort outstanding I/O on NPort x%x "
+ "Data: x%x x%x x%x\n",
+ ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_state,
+ ndlp->nlp_rpi);
+ /* Clean up all fabric IOs first.*/
+ lpfc_fabric_abort_nport(ndlp);
+
+ /*
+ * Lock the ELS ring txcmplq for SLI3/SLI4 and build a local list
+ * of all ELS IOs that need an ABTS. The IOs need to stay on the
+ * txcmplq so that the abort operation completes them successfully.
+ */
+ spin_lock_irq(&phba->hbalock);
+ if (phba->sli_rev == LPFC_SLI_REV4)
+ spin_lock(&pring->ring_lock);
+ list_for_each_entry_safe(iocb, next_iocb, &pring->txcmplq, list) {
+ /* Add to abort_list on on NDLP match. */
+ if (lpfc_check_sli_ndlp(phba, pring, iocb, ndlp))
+ list_add_tail(&iocb->dlist, &abort_list);
+ }
+ if (phba->sli_rev == LPFC_SLI_REV4)
+ spin_unlock(&pring->ring_lock);
+ spin_unlock_irq(&phba->hbalock);
+
+ /* Abort the targeted IOs and remove them from the abort list. */
+ list_for_each_entry_safe(iocb, next_iocb, &abort_list, dlist) {
+ spin_lock_irq(&phba->hbalock);
+ list_del_init(&iocb->dlist);
+ lpfc_sli_issue_abort_iotag(phba, pring, iocb);
+ spin_unlock_irq(&phba->hbalock);
+ }
+
+ INIT_LIST_HEAD(&abort_list);
+
+ /* Now process the txq */
+ spin_lock_irq(&phba->hbalock);
+ if (phba->sli_rev == LPFC_SLI_REV4)
+ spin_lock(&pring->ring_lock);
+
+ list_for_each_entry_safe(iocb, next_iocb, &pring->txq, list) {
+ /* Check to see if iocb matches the nport we are looking for */
+ if (lpfc_check_sli_ndlp(phba, pring, iocb, ndlp)) {
+ list_del_init(&iocb->list);
+ list_add_tail(&iocb->list, &abort_list);
+ }
+ }
+
+ if (phba->sli_rev == LPFC_SLI_REV4)
+ spin_unlock(&pring->ring_lock);
+ spin_unlock_irq(&phba->hbalock);
+
+ /* Cancel all the IOCBs from the completions list */
+ lpfc_sli_cancel_iocbs(phba, &abort_list,
+ IOSTAT_LOCAL_REJECT, IOERR_SLI_ABORTED);
+
+ lpfc_cancel_retry_delay_tmo(phba->pport, ndlp);
+ return 0;
+}
+
+static int
+lpfc_rcv_plogi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
+ struct lpfc_iocbq *cmdiocb)
+{
+ struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
+ struct lpfc_hba *phba = vport->phba;
+ struct lpfc_dmabuf *pcmd;
+ uint64_t nlp_portwwn = 0;
+ uint32_t *lp;
+ IOCB_t *icmd;
+ struct serv_parm *sp;
+ LPFC_MBOXQ_t *mbox;
+ struct ls_rjt stat;
+ int rc;
+
+ memset(&stat, 0, sizeof (struct ls_rjt));
+ if (vport->port_state <= LPFC_FDISC) {
+ /* Before responding to PLOGI, check for pt2pt mode.
+ * If we are pt2pt, with an outstanding FLOGI, abort
+ * the FLOGI and resend it first.
+ */
+ if (vport->fc_flag & FC_PT2PT) {
+ lpfc_els_abort_flogi(phba);
+ if (!(vport->fc_flag & FC_PT2PT_PLOGI)) {
+ /* If the other side is supposed to initiate
+ * the PLOGI anyway, just ACC it now and
+ * move on with discovery.
+ */
+ phba->fc_edtov = FF_DEF_EDTOV;
+ phba->fc_ratov = FF_DEF_RATOV;
+ /* Start discovery - this should just do
+ CLEAR_LA */
+ lpfc_disc_start(vport);
+ } else
+ lpfc_initial_flogi(vport);
+ } else {
+ stat.un.b.lsRjtRsnCode = LSRJT_LOGICAL_BSY;
+ stat.un.b.lsRjtRsnCodeExp = LSEXP_NOTHING_MORE;
+ lpfc_els_rsp_reject(vport, stat.un.lsRjtError, cmdiocb,
+ ndlp, NULL);
+ return 0;
+ }
+ }
+ pcmd = (struct lpfc_dmabuf *) cmdiocb->context2;
+ lp = (uint32_t *) pcmd->virt;
+ sp = (struct serv_parm *) ((uint8_t *) lp + sizeof (uint32_t));
+ if (wwn_to_u64(sp->portName.u.wwn) == 0) {
+ lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
+ "0140 PLOGI Reject: invalid nname\n");
+ stat.un.b.lsRjtRsnCode = LSRJT_UNABLE_TPC;
+ stat.un.b.lsRjtRsnCodeExp = LSEXP_INVALID_PNAME;
+ lpfc_els_rsp_reject(vport, stat.un.lsRjtError, cmdiocb, ndlp,
+ NULL);
+ return 0;
+ }
+ if (wwn_to_u64(sp->nodeName.u.wwn) == 0) {
+ lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
+ "0141 PLOGI Reject: invalid pname\n");
+ stat.un.b.lsRjtRsnCode = LSRJT_UNABLE_TPC;
+ stat.un.b.lsRjtRsnCodeExp = LSEXP_INVALID_NNAME;
+ lpfc_els_rsp_reject(vport, stat.un.lsRjtError, cmdiocb, ndlp,
+ NULL);
+ return 0;
+ }
+
+ nlp_portwwn = wwn_to_u64(ndlp->nlp_portname.u.wwn);
+ if ((lpfc_check_sparm(vport, ndlp, sp, CLASS3, 0) == 0)) {
+ /* Reject this request because invalid parameters */
+ stat.un.b.lsRjtRsnCode = LSRJT_UNABLE_TPC;
+ stat.un.b.lsRjtRsnCodeExp = LSEXP_SPARM_OPTIONS;
+ lpfc_els_rsp_reject(vport, stat.un.lsRjtError, cmdiocb, ndlp,
+ NULL);
+ return 0;
+ }
+ icmd = &cmdiocb->iocb;
+
+ /* PLOGI chkparm OK */
+ lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
+ "0114 PLOGI chkparm OK Data: x%x x%x x%x "
+ "x%x x%x x%x\n",
+ ndlp->nlp_DID, ndlp->nlp_state, ndlp->nlp_flag,
+ ndlp->nlp_rpi, vport->port_state,
+ vport->fc_flag);
+
+ if (vport->cfg_fcp_class == 2 && sp->cls2.classValid)
+ ndlp->nlp_fcp_info |= CLASS2;
+ else
+ ndlp->nlp_fcp_info |= CLASS3;
+
+ ndlp->nlp_class_sup = 0;
+ if (sp->cls1.classValid)
+ ndlp->nlp_class_sup |= FC_COS_CLASS1;
+ if (sp->cls2.classValid)
+ ndlp->nlp_class_sup |= FC_COS_CLASS2;
+ if (sp->cls3.classValid)
+ ndlp->nlp_class_sup |= FC_COS_CLASS3;
+ if (sp->cls4.classValid)
+ ndlp->nlp_class_sup |= FC_COS_CLASS4;
+ ndlp->nlp_maxframe =
+ ((sp->cmn.bbRcvSizeMsb & 0x0F) << 8) | sp->cmn.bbRcvSizeLsb;
+
+ /* if already logged in, do implicit logout */
+ switch (ndlp->nlp_state) {
+ case NLP_STE_NPR_NODE:
+ if (!(ndlp->nlp_flag & NLP_NPR_ADISC))
+ break;
+ case NLP_STE_REG_LOGIN_ISSUE:
+ case NLP_STE_PRLI_ISSUE:
+ case NLP_STE_UNMAPPED_NODE:
+ case NLP_STE_MAPPED_NODE:
+ /* lpfc_plogi_confirm_nport skips fabric did, handle it here */
+ if (!(ndlp->nlp_type & NLP_FABRIC)) {
+ lpfc_els_rsp_acc(vport, ELS_CMD_PLOGI, cmdiocb,
+ ndlp, NULL);
+ return 1;
+ }
+ if (nlp_portwwn != 0 &&
+ nlp_portwwn != wwn_to_u64(sp->portName.u.wwn))
+ lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
+ "0143 PLOGI recv'd from DID: x%x "
+ "WWPN changed: old %llx new %llx\n",
+ ndlp->nlp_DID,
+ (unsigned long long)nlp_portwwn,
+ (unsigned long long)
+ wwn_to_u64(sp->portName.u.wwn));
+
+ ndlp->nlp_prev_state = ndlp->nlp_state;
+ /* rport needs to be unregistered first */
+ lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE);
+ break;
+ }
+
+ /* Check for Nport to NPort pt2pt protocol */
+ if ((vport->fc_flag & FC_PT2PT) &&
+ !(vport->fc_flag & FC_PT2PT_PLOGI)) {
+
+ /* rcv'ed PLOGI decides what our NPortId will be */
+ vport->fc_myDID = icmd->un.rcvels.parmRo;
+ mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
+ if (mbox == NULL)
+ goto out;
+ lpfc_config_link(phba, mbox);
+ mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
+ mbox->vport = vport;
+ rc = lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT);
+ if (rc == MBX_NOT_FINISHED) {
+ mempool_free(mbox, phba->mbox_mem_pool);
+ goto out;
+ }
+ /*
+ * For SLI4, the VFI/VPI are registered AFTER the
+ * Nport with the higher WWPN sends us a PLOGI with
+ * our assigned NPortId.
+ */
+ if (phba->sli_rev == LPFC_SLI_REV4)
+ lpfc_issue_reg_vfi(vport);
+
+ lpfc_can_disctmo(vport);
+ }
+ mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
+ if (!mbox)
+ goto out;
+
+ /* Registering an existing RPI behaves differently for SLI3 vs SLI4 */
+ if (phba->sli_rev == LPFC_SLI_REV4)
+ lpfc_unreg_rpi(vport, ndlp);
+
+ rc = lpfc_reg_rpi(phba, vport->vpi, icmd->un.rcvels.remoteID,
+ (uint8_t *) sp, mbox, ndlp->nlp_rpi);
+ if (rc) {
+ mempool_free(mbox, phba->mbox_mem_pool);
+ goto out;
+ }
+
+ /* ACC PLOGI rsp command needs to execute first,
+ * queue this mbox command to be processed later.
+ */
+ mbox->mbox_cmpl = lpfc_mbx_cmpl_reg_login;
+ /*
+ * mbox->context2 = lpfc_nlp_get(ndlp) deferred until mailbox
+ * command issued in lpfc_cmpl_els_acc().
+ */
+ mbox->vport = vport;
+ spin_lock_irq(shost->host_lock);
+ ndlp->nlp_flag |= (NLP_ACC_REGLOGIN | NLP_RCV_PLOGI);
+ spin_unlock_irq(shost->host_lock);
+
+ /*
+ * If there is an outstanding PLOGI issued, abort it before
+ * sending ACC rsp for received PLOGI. If pending plogi
+ * is not canceled here, the plogi will be rejected by
+ * remote port and will be retried. On a configuration with
+ * single discovery thread, this will cause a huge delay in
+ * discovery. Also this will cause multiple state machines
+ * running in parallel for this node.
+ */
+ if (ndlp->nlp_state == NLP_STE_PLOGI_ISSUE) {
+ /* software abort outstanding PLOGI */
+ lpfc_els_abort(phba, ndlp);
+ }
+
+ if ((vport->port_type == LPFC_NPIV_PORT &&
+ vport->cfg_restrict_login)) {
+
+ /* In order to preserve RPIs, we want to cleanup
+ * the default RPI the firmware created to rcv
+ * this ELS request. The only way to do this is
+ * to register, then unregister the RPI.
+ */
+ spin_lock_irq(shost->host_lock);
+ ndlp->nlp_flag |= NLP_RM_DFLT_RPI;
+ spin_unlock_irq(shost->host_lock);
+ stat.un.b.lsRjtRsnCode = LSRJT_INVALID_CMD;
+ stat.un.b.lsRjtRsnCodeExp = LSEXP_NOTHING_MORE;
+ rc = lpfc_els_rsp_reject(vport, stat.un.lsRjtError, cmdiocb,
+ ndlp, mbox);
+ if (rc)
+ mempool_free(mbox, phba->mbox_mem_pool);
+ return 1;
+ }
+ rc = lpfc_els_rsp_acc(vport, ELS_CMD_PLOGI, cmdiocb, ndlp, mbox);
+ if (rc)
+ mempool_free(mbox, phba->mbox_mem_pool);
+ return 1;
+out:
+ stat.un.b.lsRjtRsnCode = LSRJT_UNABLE_TPC;
+ stat.un.b.lsRjtRsnCodeExp = LSEXP_OUT_OF_RESOURCE;
+ lpfc_els_rsp_reject(vport, stat.un.lsRjtError, cmdiocb, ndlp, NULL);
+ return 0;
+}
+
+/**
+ * lpfc_mbx_cmpl_resume_rpi - Resume RPI completion routine
+ * @phba: pointer to lpfc hba data structure.
+ * @mboxq: pointer to mailbox object
+ *
+ * This routine is invoked to issue a completion to a rcv'ed
+ * ADISC or PDISC after the paused RPI has been resumed.
+ **/
+static void
+lpfc_mbx_cmpl_resume_rpi(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
+{
+ struct lpfc_vport *vport;
+ struct lpfc_iocbq *elsiocb;
+ struct lpfc_nodelist *ndlp;
+ uint32_t cmd;
+
+ elsiocb = (struct lpfc_iocbq *)mboxq->context1;
+ ndlp = (struct lpfc_nodelist *) mboxq->context2;
+ vport = mboxq->vport;
+ cmd = elsiocb->drvrTimeout;
+
+ if (cmd == ELS_CMD_ADISC) {
+ lpfc_els_rsp_adisc_acc(vport, elsiocb, ndlp);
+ } else {
+ lpfc_els_rsp_acc(vport, ELS_CMD_PLOGI, elsiocb,
+ ndlp, NULL);
+ }
+ kfree(elsiocb);
+ mempool_free(mboxq, phba->mbox_mem_pool);
+}
+
+static int
+lpfc_rcv_padisc(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
+ struct lpfc_iocbq *cmdiocb)
+{
+ struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
+ struct lpfc_iocbq *elsiocb;
+ struct lpfc_dmabuf *pcmd;
+ struct serv_parm *sp;
+ struct lpfc_name *pnn, *ppn;
+ struct ls_rjt stat;
+ ADISC *ap;
+ IOCB_t *icmd;
+ uint32_t *lp;
+ uint32_t cmd;
+
+ pcmd = (struct lpfc_dmabuf *) cmdiocb->context2;
+ lp = (uint32_t *) pcmd->virt;
+
+ cmd = *lp++;
+ if (cmd == ELS_CMD_ADISC) {
+ ap = (ADISC *) lp;
+ pnn = (struct lpfc_name *) & ap->nodeName;
+ ppn = (struct lpfc_name *) & ap->portName;
+ } else {
+ sp = (struct serv_parm *) lp;
+ pnn = (struct lpfc_name *) & sp->nodeName;
+ ppn = (struct lpfc_name *) & sp->portName;
+ }
+
+ icmd = &cmdiocb->iocb;
+ if (icmd->ulpStatus == 0 && lpfc_check_adisc(vport, ndlp, pnn, ppn)) {
+
+ /*
+ * As soon as we send ACC, the remote NPort can
+ * start sending us data. Thus, for SLI4 we must
+ * resume the RPI before the ACC goes out.
+ */
+ if (vport->phba->sli_rev == LPFC_SLI_REV4) {
+ elsiocb = kmalloc(sizeof(struct lpfc_iocbq),
+ GFP_KERNEL);
+ if (elsiocb) {
+
+ /* Save info from cmd IOCB used in rsp */
+ memcpy((uint8_t *)elsiocb, (uint8_t *)cmdiocb,
+ sizeof(struct lpfc_iocbq));
+
+ /* Save the ELS cmd */
+ elsiocb->drvrTimeout = cmd;
+
+ lpfc_sli4_resume_rpi(ndlp,
+ lpfc_mbx_cmpl_resume_rpi, elsiocb);
+ goto out;
+ }
+ }
+
+ if (cmd == ELS_CMD_ADISC) {
+ lpfc_els_rsp_adisc_acc(vport, cmdiocb, ndlp);
+ } else {
+ lpfc_els_rsp_acc(vport, ELS_CMD_PLOGI, cmdiocb,
+ ndlp, NULL);
+ }
+out:
+ /* If we are authenticated, move to the proper state */
+ if (ndlp->nlp_type & NLP_FCP_TARGET)
+ lpfc_nlp_set_state(vport, ndlp, NLP_STE_MAPPED_NODE);
+ else
+ lpfc_nlp_set_state(vport, ndlp, NLP_STE_UNMAPPED_NODE);
+
+ return 1;
+ }
+ /* Reject this request because invalid parameters */
+ stat.un.b.lsRjtRsvd0 = 0;
+ stat.un.b.lsRjtRsnCode = LSRJT_UNABLE_TPC;
+ stat.un.b.lsRjtRsnCodeExp = LSEXP_SPARM_OPTIONS;
+ stat.un.b.vendorUnique = 0;
+ lpfc_els_rsp_reject(vport, stat.un.lsRjtError, cmdiocb, ndlp, NULL);
+
+ /* 1 sec timeout */
+ mod_timer(&ndlp->nlp_delayfunc, jiffies + msecs_to_jiffies(1000));
+
+ spin_lock_irq(shost->host_lock);
+ ndlp->nlp_flag |= NLP_DELAY_TMO;
+ spin_unlock_irq(shost->host_lock);
+ ndlp->nlp_last_elscmd = ELS_CMD_PLOGI;
+ ndlp->nlp_prev_state = ndlp->nlp_state;
+ lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE);
+ return 0;
+}
+
+static int
+lpfc_rcv_logo(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
+ struct lpfc_iocbq *cmdiocb, uint32_t els_cmd)
+{
+ struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
+ struct lpfc_hba *phba = vport->phba;
+ struct lpfc_vport **vports;
+ int i, active_vlink_present = 0 ;
+
+ /* Put ndlp in NPR state with 1 sec timeout for plogi, ACC logo */
+ /* Only call LOGO ACC for first LOGO, this avoids sending unnecessary
+ * PLOGIs during LOGO storms from a device.
+ */
+ spin_lock_irq(shost->host_lock);
+ ndlp->nlp_flag |= NLP_LOGO_ACC;
+ spin_unlock_irq(shost->host_lock);
+ if (els_cmd == ELS_CMD_PRLO)
+ lpfc_els_rsp_acc(vport, ELS_CMD_PRLO, cmdiocb, ndlp, NULL);
+ else
+ lpfc_els_rsp_acc(vport, ELS_CMD_ACC, cmdiocb, ndlp, NULL);
+ if (ndlp->nlp_DID == Fabric_DID) {
+ if (vport->port_state <= LPFC_FDISC)
+ goto out;
+ lpfc_linkdown_port(vport);
+ spin_lock_irq(shost->host_lock);
+ vport->fc_flag |= FC_VPORT_LOGO_RCVD;
+ spin_unlock_irq(shost->host_lock);
+ vports = lpfc_create_vport_work_array(phba);
+ if (vports) {
+ for (i = 0; i <= phba->max_vports && vports[i] != NULL;
+ i++) {
+ if ((!(vports[i]->fc_flag &
+ FC_VPORT_LOGO_RCVD)) &&
+ (vports[i]->port_state > LPFC_FDISC)) {
+ active_vlink_present = 1;
+ break;
+ }
+ }
+ lpfc_destroy_vport_work_array(phba, vports);
+ }
+
+ if (active_vlink_present) {
+ /*
+ * If there are other active VLinks present,
+ * re-instantiate the Vlink using FDISC.
+ */
+ mod_timer(&ndlp->nlp_delayfunc,
+ jiffies + msecs_to_jiffies(1000));
+ spin_lock_irq(shost->host_lock);
+ ndlp->nlp_flag |= NLP_DELAY_TMO;
+ spin_unlock_irq(shost->host_lock);
+ ndlp->nlp_last_elscmd = ELS_CMD_FDISC;
+ vport->port_state = LPFC_FDISC;
+ } else {
+ spin_lock_irq(shost->host_lock);
+ phba->pport->fc_flag &= ~FC_LOGO_RCVD_DID_CHNG;
+ spin_unlock_irq(shost->host_lock);
+ lpfc_retry_pport_discovery(phba);
+ }
+ } else if ((!(ndlp->nlp_type & NLP_FABRIC) &&
+ ((ndlp->nlp_type & NLP_FCP_TARGET) ||
+ !(ndlp->nlp_type & NLP_FCP_INITIATOR))) ||
+ (ndlp->nlp_state == NLP_STE_ADISC_ISSUE)) {
+ /* Only try to re-login if this is NOT a Fabric Node */
+ mod_timer(&ndlp->nlp_delayfunc,
+ jiffies + msecs_to_jiffies(1000 * 1));
+ spin_lock_irq(shost->host_lock);
+ ndlp->nlp_flag |= NLP_DELAY_TMO;
+ spin_unlock_irq(shost->host_lock);
+
+ ndlp->nlp_last_elscmd = ELS_CMD_PLOGI;
+ }
+out:
+ ndlp->nlp_prev_state = ndlp->nlp_state;
+ lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE);
+
+ spin_lock_irq(shost->host_lock);
+ ndlp->nlp_flag &= ~NLP_NPR_ADISC;
+ spin_unlock_irq(shost->host_lock);
+ /* The driver has to wait until the ACC completes before it continues
+ * processing the LOGO. The action will resume in
+ * lpfc_cmpl_els_logo_acc routine. Since part of processing includes an
+ * unreg_login, the driver waits so the ACC does not get aborted.
+ */
+ return 0;
+}
+
+static void
+lpfc_rcv_prli(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
+ struct lpfc_iocbq *cmdiocb)
+{
+ struct lpfc_dmabuf *pcmd;
+ uint32_t *lp;
+ PRLI *npr;
+ struct fc_rport *rport = ndlp->rport;
+ u32 roles;
+
+ pcmd = (struct lpfc_dmabuf *) cmdiocb->context2;
+ lp = (uint32_t *) pcmd->virt;
+ npr = (PRLI *) ((uint8_t *) lp + sizeof (uint32_t));
+
+ ndlp->nlp_type &= ~(NLP_FCP_TARGET | NLP_FCP_INITIATOR);
+ ndlp->nlp_fcp_info &= ~NLP_FCP_2_DEVICE;
+ ndlp->nlp_flag &= ~NLP_FIRSTBURST;
+ if (npr->prliType == PRLI_FCP_TYPE) {
+ if (npr->initiatorFunc)
+ ndlp->nlp_type |= NLP_FCP_INITIATOR;
+ if (npr->targetFunc) {
+ ndlp->nlp_type |= NLP_FCP_TARGET;
+ if (npr->writeXferRdyDis)
+ ndlp->nlp_flag |= NLP_FIRSTBURST;
+ }
+ if (npr->Retry)
+ ndlp->nlp_fcp_info |= NLP_FCP_2_DEVICE;
+ }
+ if (rport) {
+ /* We need to update the rport role values */
+ roles = FC_RPORT_ROLE_UNKNOWN;
+ if (ndlp->nlp_type & NLP_FCP_INITIATOR)
+ roles |= FC_RPORT_ROLE_FCP_INITIATOR;
+ if (ndlp->nlp_type & NLP_FCP_TARGET)
+ roles |= FC_RPORT_ROLE_FCP_TARGET;
+
+ lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_RPORT,
+ "rport rolechg: role:x%x did:x%x flg:x%x",
+ roles, ndlp->nlp_DID, ndlp->nlp_flag);
+
+ fc_remote_port_rolechg(rport, roles);
+ }
+}
+
+static uint32_t
+lpfc_disc_set_adisc(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
+{
+ struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
+
+ if (!(ndlp->nlp_flag & NLP_RPI_REGISTERED)) {
+ ndlp->nlp_flag &= ~NLP_NPR_ADISC;
+ return 0;
+ }
+
+ if (!(vport->fc_flag & FC_PT2PT)) {
+ /* Check config parameter use-adisc or FCP-2 */
+ if ((vport->cfg_use_adisc && (vport->fc_flag & FC_RSCN_MODE)) ||
+ ((ndlp->nlp_fcp_info & NLP_FCP_2_DEVICE) &&
+ (ndlp->nlp_type & NLP_FCP_TARGET))) {
+ spin_lock_irq(shost->host_lock);
+ ndlp->nlp_flag |= NLP_NPR_ADISC;
+ spin_unlock_irq(shost->host_lock);
+ return 1;
+ }
+ }
+ ndlp->nlp_flag &= ~NLP_NPR_ADISC;
+ lpfc_unreg_rpi(vport, ndlp);
+ return 0;
+}
+
+/**
+ * lpfc_release_rpi - Release a RPI by issuing unreg_login mailbox cmd.
+ * @phba : Pointer to lpfc_hba structure.
+ * @vport: Pointer to lpfc_vport structure.
+ * @rpi : rpi to be release.
+ *
+ * This function will send a unreg_login mailbox command to the firmware
+ * to release a rpi.
+ **/
+void
+lpfc_release_rpi(struct lpfc_hba *phba,
+ struct lpfc_vport *vport,
+ uint16_t rpi)
+{
+ LPFC_MBOXQ_t *pmb;
+ int rc;
+
+ pmb = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool,
+ GFP_KERNEL);
+ if (!pmb)
+ lpfc_printf_vlog(vport, KERN_ERR, LOG_MBOX,
+ "2796 mailbox memory allocation failed \n");
+ else {
+ lpfc_unreg_login(phba, vport->vpi, rpi, pmb);
+ pmb->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
+ rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT);
+ if (rc == MBX_NOT_FINISHED)
+ mempool_free(pmb, phba->mbox_mem_pool);
+ }
+}
+
+static uint32_t
+lpfc_disc_illegal(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
+ void *arg, uint32_t evt)
+{
+ struct lpfc_hba *phba;
+ LPFC_MBOXQ_t *pmb = (LPFC_MBOXQ_t *) arg;
+ MAILBOX_t *mb;
+ uint16_t rpi;
+
+ phba = vport->phba;
+ /* Release the RPI if reglogin completing */
+ if (!(phba->pport->load_flag & FC_UNLOADING) &&
+ (evt == NLP_EVT_CMPL_REG_LOGIN) &&
+ (!pmb->u.mb.mbxStatus)) {
+ mb = &pmb->u.mb;
+ rpi = pmb->u.mb.un.varWords[0];
+ lpfc_release_rpi(phba, vport, rpi);
+ }
+ lpfc_printf_vlog(vport, KERN_ERR, LOG_DISCOVERY,
+ "0271 Illegal State Transition: node x%x "
+ "event x%x, state x%x Data: x%x x%x\n",
+ ndlp->nlp_DID, evt, ndlp->nlp_state, ndlp->nlp_rpi,
+ ndlp->nlp_flag);
+ return ndlp->nlp_state;
+}
+
+static uint32_t
+lpfc_cmpl_plogi_illegal(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
+ void *arg, uint32_t evt)
+{
+ /* This transition is only legal if we previously
+ * rcv'ed a PLOGI. Since we don't want 2 discovery threads
+ * working on the same NPortID, do nothing for this thread
+ * to stop it.
+ */
+ if (!(ndlp->nlp_flag & NLP_RCV_PLOGI)) {
+ lpfc_printf_vlog(vport, KERN_ERR, LOG_DISCOVERY,
+ "0272 Illegal State Transition: node x%x "
+ "event x%x, state x%x Data: x%x x%x\n",
+ ndlp->nlp_DID, evt, ndlp->nlp_state, ndlp->nlp_rpi,
+ ndlp->nlp_flag);
+ }
+ return ndlp->nlp_state;
+}
+
+/* Start of Discovery State Machine routines */
+
+static uint32_t
+lpfc_rcv_plogi_unused_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
+ void *arg, uint32_t evt)
+{
+ struct lpfc_iocbq *cmdiocb;
+
+ cmdiocb = (struct lpfc_iocbq *) arg;
+
+ if (lpfc_rcv_plogi(vport, ndlp, cmdiocb)) {
+ return ndlp->nlp_state;
+ }
+ return NLP_STE_FREED_NODE;
+}
+
+static uint32_t
+lpfc_rcv_els_unused_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
+ void *arg, uint32_t evt)
+{
+ lpfc_issue_els_logo(vport, ndlp, 0);
+ return ndlp->nlp_state;
+}
+
+static uint32_t
+lpfc_rcv_logo_unused_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
+ void *arg, uint32_t evt)
+{
+ struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
+ struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *) arg;
+
+ spin_lock_irq(shost->host_lock);
+ ndlp->nlp_flag |= NLP_LOGO_ACC;
+ spin_unlock_irq(shost->host_lock);
+ lpfc_els_rsp_acc(vport, ELS_CMD_ACC, cmdiocb, ndlp, NULL);
+
+ return ndlp->nlp_state;
+}
+
+static uint32_t
+lpfc_cmpl_logo_unused_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
+ void *arg, uint32_t evt)
+{
+ return NLP_STE_FREED_NODE;
+}
+
+static uint32_t
+lpfc_device_rm_unused_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
+ void *arg, uint32_t evt)
+{
+ return NLP_STE_FREED_NODE;
+}
+
+static uint32_t
+lpfc_device_recov_unused_node(struct lpfc_vport *vport,
+ struct lpfc_nodelist *ndlp,
+ void *arg, uint32_t evt)
+{
+ return ndlp->nlp_state;
+}
+
+static uint32_t
+lpfc_rcv_plogi_plogi_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
+ void *arg, uint32_t evt)
+{
+ struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
+ struct lpfc_hba *phba = vport->phba;
+ struct lpfc_iocbq *cmdiocb = arg;
+ struct lpfc_dmabuf *pcmd = (struct lpfc_dmabuf *) cmdiocb->context2;
+ uint32_t *lp = (uint32_t *) pcmd->virt;
+ struct serv_parm *sp = (struct serv_parm *) (lp + 1);
+ struct ls_rjt stat;
+ int port_cmp;
+
+ memset(&stat, 0, sizeof (struct ls_rjt));
+
+ /* For a PLOGI, we only accept if our portname is less
+ * than the remote portname.
+ */
+ phba->fc_stat.elsLogiCol++;
+ port_cmp = memcmp(&vport->fc_portname, &sp->portName,
+ sizeof(struct lpfc_name));
+
+ if (port_cmp >= 0) {
+ /* Reject this request because the remote node will accept
+ ours */
+ stat.un.b.lsRjtRsnCode = LSRJT_UNABLE_TPC;
+ stat.un.b.lsRjtRsnCodeExp = LSEXP_CMD_IN_PROGRESS;
+ lpfc_els_rsp_reject(vport, stat.un.lsRjtError, cmdiocb, ndlp,
+ NULL);
+ } else {
+ if (lpfc_rcv_plogi(vport, ndlp, cmdiocb) &&
+ (ndlp->nlp_flag & NLP_NPR_2B_DISC) &&
+ (vport->num_disc_nodes)) {
+ spin_lock_irq(shost->host_lock);
+ ndlp->nlp_flag &= ~NLP_NPR_2B_DISC;
+ spin_unlock_irq(shost->host_lock);
+ /* Check if there are more PLOGIs to be sent */
+ lpfc_more_plogi(vport);
+ if (vport->num_disc_nodes == 0) {
+ spin_lock_irq(shost->host_lock);
+ vport->fc_flag &= ~FC_NDISC_ACTIVE;
+ spin_unlock_irq(shost->host_lock);
+ lpfc_can_disctmo(vport);
+ lpfc_end_rscn(vport);
+ }
+ }
+ } /* If our portname was less */
+
+ return ndlp->nlp_state;
+}
+
+static uint32_t
+lpfc_rcv_prli_plogi_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
+ void *arg, uint32_t evt)
+{
+ struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *) arg;
+ struct ls_rjt stat;
+
+ memset(&stat, 0, sizeof (struct ls_rjt));
+ stat.un.b.lsRjtRsnCode = LSRJT_LOGICAL_BSY;
+ stat.un.b.lsRjtRsnCodeExp = LSEXP_NOTHING_MORE;
+ lpfc_els_rsp_reject(vport, stat.un.lsRjtError, cmdiocb, ndlp, NULL);
+ return ndlp->nlp_state;
+}
+
+static uint32_t
+lpfc_rcv_logo_plogi_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
+ void *arg, uint32_t evt)
+{
+ struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *) arg;
+
+ /* software abort outstanding PLOGI */
+ lpfc_els_abort(vport->phba, ndlp);
+
+ lpfc_rcv_logo(vport, ndlp, cmdiocb, ELS_CMD_LOGO);
+ return ndlp->nlp_state;
+}
+
+static uint32_t
+lpfc_rcv_els_plogi_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
+ void *arg, uint32_t evt)
+{
+ struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
+ struct lpfc_hba *phba = vport->phba;
+ struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *) arg;
+
+ /* software abort outstanding PLOGI */
+ lpfc_els_abort(phba, ndlp);
+
+ if (evt == NLP_EVT_RCV_LOGO) {
+ lpfc_els_rsp_acc(vport, ELS_CMD_ACC, cmdiocb, ndlp, NULL);
+ } else {
+ lpfc_issue_els_logo(vport, ndlp, 0);
+ }
+
+ /* Put ndlp in npr state set plogi timer for 1 sec */
+ mod_timer(&ndlp->nlp_delayfunc, jiffies + msecs_to_jiffies(1000 * 1));
+ spin_lock_irq(shost->host_lock);
+ ndlp->nlp_flag |= NLP_DELAY_TMO;
+ spin_unlock_irq(shost->host_lock);
+ ndlp->nlp_last_elscmd = ELS_CMD_PLOGI;
+ ndlp->nlp_prev_state = NLP_STE_PLOGI_ISSUE;
+ lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE);
+
+ return ndlp->nlp_state;
+}
+
+static uint32_t
+lpfc_cmpl_plogi_plogi_issue(struct lpfc_vport *vport,
+ struct lpfc_nodelist *ndlp,
+ void *arg,
+ uint32_t evt)
+{
+ struct lpfc_hba *phba = vport->phba;
+ struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
+ struct lpfc_iocbq *cmdiocb, *rspiocb;
+ struct lpfc_dmabuf *pcmd, *prsp, *mp;
+ uint32_t *lp;
+ IOCB_t *irsp;
+ struct serv_parm *sp;
+ LPFC_MBOXQ_t *mbox;
+
+ cmdiocb = (struct lpfc_iocbq *) arg;
+ rspiocb = cmdiocb->context_un.rsp_iocb;
+
+ if (ndlp->nlp_flag & NLP_ACC_REGLOGIN) {
+ /* Recovery from PLOGI collision logic */
+ return ndlp->nlp_state;
+ }
+
+ irsp = &rspiocb->iocb;
+
+ if (irsp->ulpStatus)
+ goto out;
+
+ pcmd = (struct lpfc_dmabuf *) cmdiocb->context2;
+
+ prsp = list_get_first(&pcmd->list, struct lpfc_dmabuf, list);
+ if (!prsp)
+ goto out;
+
+ lp = (uint32_t *) prsp->virt;
+ sp = (struct serv_parm *) ((uint8_t *) lp + sizeof (uint32_t));
+
+ /* Some switches have FDMI servers returning 0 for WWN */
+ if ((ndlp->nlp_DID != FDMI_DID) &&
+ (wwn_to_u64(sp->portName.u.wwn) == 0 ||
+ wwn_to_u64(sp->nodeName.u.wwn) == 0)) {
+ lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
+ "0142 PLOGI RSP: Invalid WWN.\n");
+ goto out;
+ }
+ if (!lpfc_check_sparm(vport, ndlp, sp, CLASS3, 0))
+ goto out;
+ /* PLOGI chkparm OK */
+ lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
+ "0121 PLOGI chkparm OK Data: x%x x%x x%x x%x\n",
+ ndlp->nlp_DID, ndlp->nlp_state,
+ ndlp->nlp_flag, ndlp->nlp_rpi);
+ if (vport->cfg_fcp_class == 2 && (sp->cls2.classValid))
+ ndlp->nlp_fcp_info |= CLASS2;
+ else
+ ndlp->nlp_fcp_info |= CLASS3;
+
+ ndlp->nlp_class_sup = 0;
+ if (sp->cls1.classValid)
+ ndlp->nlp_class_sup |= FC_COS_CLASS1;
+ if (sp->cls2.classValid)
+ ndlp->nlp_class_sup |= FC_COS_CLASS2;
+ if (sp->cls3.classValid)
+ ndlp->nlp_class_sup |= FC_COS_CLASS3;
+ if (sp->cls4.classValid)
+ ndlp->nlp_class_sup |= FC_COS_CLASS4;
+ ndlp->nlp_maxframe =
+ ((sp->cmn.bbRcvSizeMsb & 0x0F) << 8) | sp->cmn.bbRcvSizeLsb;
+
+ mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
+ if (!mbox) {
+ lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
+ "0133 PLOGI: no memory for reg_login "
+ "Data: x%x x%x x%x x%x\n",
+ ndlp->nlp_DID, ndlp->nlp_state,
+ ndlp->nlp_flag, ndlp->nlp_rpi);
+ goto out;
+ }
+
+ lpfc_unreg_rpi(vport, ndlp);
+
+ if (lpfc_reg_rpi(phba, vport->vpi, irsp->un.elsreq64.remoteID,
+ (uint8_t *) sp, mbox, ndlp->nlp_rpi) == 0) {
+ switch (ndlp->nlp_DID) {
+ case NameServer_DID:
+ mbox->mbox_cmpl = lpfc_mbx_cmpl_ns_reg_login;
+ break;
+ case FDMI_DID:
+ mbox->mbox_cmpl = lpfc_mbx_cmpl_fdmi_reg_login;
+ break;
+ default:
+ ndlp->nlp_flag |= NLP_REG_LOGIN_SEND;
+ mbox->mbox_cmpl = lpfc_mbx_cmpl_reg_login;
+ }
+ mbox->context2 = lpfc_nlp_get(ndlp);
+ mbox->vport = vport;
+ if (lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT)
+ != MBX_NOT_FINISHED) {
+ lpfc_nlp_set_state(vport, ndlp,
+ NLP_STE_REG_LOGIN_ISSUE);
+ return ndlp->nlp_state;
+ }
+ if (ndlp->nlp_flag & NLP_REG_LOGIN_SEND)
+ ndlp->nlp_flag &= ~NLP_REG_LOGIN_SEND;
+ /* decrement node reference count to the failed mbox
+ * command
+ */
+ lpfc_nlp_put(ndlp);
+ mp = (struct lpfc_dmabuf *) mbox->context1;
+ lpfc_mbuf_free(phba, mp->virt, mp->phys);
+ kfree(mp);
+ mempool_free(mbox, phba->mbox_mem_pool);
+
+ lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
+ "0134 PLOGI: cannot issue reg_login "
+ "Data: x%x x%x x%x x%x\n",
+ ndlp->nlp_DID, ndlp->nlp_state,
+ ndlp->nlp_flag, ndlp->nlp_rpi);
+ } else {
+ mempool_free(mbox, phba->mbox_mem_pool);
+
+ lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
+ "0135 PLOGI: cannot format reg_login "
+ "Data: x%x x%x x%x x%x\n",
+ ndlp->nlp_DID, ndlp->nlp_state,
+ ndlp->nlp_flag, ndlp->nlp_rpi);
+ }
+
+
+out:
+ if (ndlp->nlp_DID == NameServer_DID) {
+ lpfc_vport_set_state(vport, FC_VPORT_FAILED);
+ lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
+ "0261 Cannot Register NameServer login\n");
+ }
+
+ /*
+ ** In case the node reference counter does not go to zero, ensure that
+ ** the stale state for the node is not processed.
+ */
+
+ ndlp->nlp_prev_state = ndlp->nlp_state;
+ lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE);
+ spin_lock_irq(shost->host_lock);
+ ndlp->nlp_flag |= NLP_DEFER_RM;
+ spin_unlock_irq(shost->host_lock);
+ return NLP_STE_FREED_NODE;
+}
+
+static uint32_t
+lpfc_cmpl_logo_plogi_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
+ void *arg, uint32_t evt)
+{
+ return ndlp->nlp_state;
+}
+
+static uint32_t
+lpfc_cmpl_reglogin_plogi_issue(struct lpfc_vport *vport,
+ struct lpfc_nodelist *ndlp, void *arg, uint32_t evt)
+{
+ struct lpfc_hba *phba;
+ LPFC_MBOXQ_t *pmb = (LPFC_MBOXQ_t *) arg;
+ MAILBOX_t *mb = &pmb->u.mb;
+ uint16_t rpi;
+
+ phba = vport->phba;
+ /* Release the RPI */
+ if (!(phba->pport->load_flag & FC_UNLOADING) &&
+ !mb->mbxStatus) {
+ rpi = pmb->u.mb.un.varWords[0];
+ lpfc_release_rpi(phba, vport, rpi);
+ }
+ return ndlp->nlp_state;
+}
+
+static uint32_t
+lpfc_device_rm_plogi_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
+ void *arg, uint32_t evt)
+{
+ struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
+
+ if (ndlp->nlp_flag & NLP_NPR_2B_DISC) {
+ spin_lock_irq(shost->host_lock);
+ ndlp->nlp_flag |= NLP_NODEV_REMOVE;
+ spin_unlock_irq(shost->host_lock);
+ return ndlp->nlp_state;
+ } else {
+ /* software abort outstanding PLOGI */
+ lpfc_els_abort(vport->phba, ndlp);
+
+ lpfc_drop_node(vport, ndlp);
+ return NLP_STE_FREED_NODE;
+ }
+}
+
+static uint32_t
+lpfc_device_recov_plogi_issue(struct lpfc_vport *vport,
+ struct lpfc_nodelist *ndlp,
+ void *arg,
+ uint32_t evt)
+{
+ struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
+ struct lpfc_hba *phba = vport->phba;
+
+ /* Don't do anything that will mess up processing of the
+ * previous RSCN.
+ */
+ if (vport->fc_flag & FC_RSCN_DEFERRED)
+ return ndlp->nlp_state;
+
+ /* software abort outstanding PLOGI */
+ lpfc_els_abort(phba, ndlp);
+
+ ndlp->nlp_prev_state = NLP_STE_PLOGI_ISSUE;
+ lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE);
+ spin_lock_irq(shost->host_lock);
+ ndlp->nlp_flag &= ~(NLP_NODEV_REMOVE | NLP_NPR_2B_DISC);
+ spin_unlock_irq(shost->host_lock);
+
+ return ndlp->nlp_state;
+}
+
+static uint32_t
+lpfc_rcv_plogi_adisc_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
+ void *arg, uint32_t evt)
+{
+ struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
+ struct lpfc_hba *phba = vport->phba;
+ struct lpfc_iocbq *cmdiocb;
+
+ /* software abort outstanding ADISC */
+ lpfc_els_abort(phba, ndlp);
+
+ cmdiocb = (struct lpfc_iocbq *) arg;
+
+ if (lpfc_rcv_plogi(vport, ndlp, cmdiocb)) {
+ if (ndlp->nlp_flag & NLP_NPR_2B_DISC) {
+ spin_lock_irq(shost->host_lock);
+ ndlp->nlp_flag &= ~NLP_NPR_2B_DISC;
+ spin_unlock_irq(shost->host_lock);
+ if (vport->num_disc_nodes)
+ lpfc_more_adisc(vport);
+ }
+ return ndlp->nlp_state;
+ }
+ ndlp->nlp_prev_state = NLP_STE_ADISC_ISSUE;
+ lpfc_issue_els_plogi(vport, ndlp->nlp_DID, 0);
+ lpfc_nlp_set_state(vport, ndlp, NLP_STE_PLOGI_ISSUE);
+
+ return ndlp->nlp_state;
+}
+
+static uint32_t
+lpfc_rcv_prli_adisc_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
+ void *arg, uint32_t evt)
+{
+ struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *) arg;
+
+ lpfc_els_rsp_prli_acc(vport, cmdiocb, ndlp);
+ return ndlp->nlp_state;
+}
+
+static uint32_t
+lpfc_rcv_logo_adisc_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
+ void *arg, uint32_t evt)
+{
+ struct lpfc_hba *phba = vport->phba;
+ struct lpfc_iocbq *cmdiocb;
+
+ cmdiocb = (struct lpfc_iocbq *) arg;
+
+ /* software abort outstanding ADISC */
+ lpfc_els_abort(phba, ndlp);
+
+ lpfc_rcv_logo(vport, ndlp, cmdiocb, ELS_CMD_LOGO);
+ return ndlp->nlp_state;
+}
+
+static uint32_t
+lpfc_rcv_padisc_adisc_issue(struct lpfc_vport *vport,
+ struct lpfc_nodelist *ndlp,
+ void *arg, uint32_t evt)
+{
+ struct lpfc_iocbq *cmdiocb;
+
+ cmdiocb = (struct lpfc_iocbq *) arg;
+
+ lpfc_rcv_padisc(vport, ndlp, cmdiocb);
+ return ndlp->nlp_state;
+}
+
+static uint32_t
+lpfc_rcv_prlo_adisc_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
+ void *arg, uint32_t evt)
+{
+ struct lpfc_iocbq *cmdiocb;
+
+ cmdiocb = (struct lpfc_iocbq *) arg;
+
+ /* Treat like rcv logo */
+ lpfc_rcv_logo(vport, ndlp, cmdiocb, ELS_CMD_PRLO);
+ return ndlp->nlp_state;
+}
+
+static uint32_t
+lpfc_cmpl_adisc_adisc_issue(struct lpfc_vport *vport,
+ struct lpfc_nodelist *ndlp,
+ void *arg, uint32_t evt)
+{
+ struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
+ struct lpfc_hba *phba = vport->phba;
+ struct lpfc_iocbq *cmdiocb, *rspiocb;
+ IOCB_t *irsp;
+ ADISC *ap;
+ int rc;
+
+ cmdiocb = (struct lpfc_iocbq *) arg;
+ rspiocb = cmdiocb->context_un.rsp_iocb;
+
+ ap = (ADISC *)lpfc_check_elscmpl_iocb(phba, cmdiocb, rspiocb);
+ irsp = &rspiocb->iocb;
+
+ if ((irsp->ulpStatus) ||
+ (!lpfc_check_adisc(vport, ndlp, &ap->nodeName, &ap->portName))) {
+ /* 1 sec timeout */
+ mod_timer(&ndlp->nlp_delayfunc,
+ jiffies + msecs_to_jiffies(1000));
+ spin_lock_irq(shost->host_lock);
+ ndlp->nlp_flag |= NLP_DELAY_TMO;
+ spin_unlock_irq(shost->host_lock);
+ ndlp->nlp_last_elscmd = ELS_CMD_PLOGI;
+
+ memset(&ndlp->nlp_nodename, 0, sizeof(struct lpfc_name));
+ memset(&ndlp->nlp_portname, 0, sizeof(struct lpfc_name));
+
+ ndlp->nlp_prev_state = NLP_STE_ADISC_ISSUE;
+ lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE);
+ lpfc_unreg_rpi(vport, ndlp);
+ return ndlp->nlp_state;
+ }
+
+ if (phba->sli_rev == LPFC_SLI_REV4) {
+ rc = lpfc_sli4_resume_rpi(ndlp, NULL, NULL);
+ if (rc) {
+ /* Stay in state and retry. */
+ ndlp->nlp_prev_state = NLP_STE_ADISC_ISSUE;
+ return ndlp->nlp_state;
+ }
+ }
+
+ if (ndlp->nlp_type & NLP_FCP_TARGET) {
+ ndlp->nlp_prev_state = NLP_STE_ADISC_ISSUE;
+ lpfc_nlp_set_state(vport, ndlp, NLP_STE_MAPPED_NODE);
+ } else {
+ ndlp->nlp_prev_state = NLP_STE_ADISC_ISSUE;
+ lpfc_nlp_set_state(vport, ndlp, NLP_STE_UNMAPPED_NODE);
+ }
+
+ return ndlp->nlp_state;
+}
+
+static uint32_t
+lpfc_device_rm_adisc_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
+ void *arg, uint32_t evt)
+{
+ struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
+
+ if (ndlp->nlp_flag & NLP_NPR_2B_DISC) {
+ spin_lock_irq(shost->host_lock);
+ ndlp->nlp_flag |= NLP_NODEV_REMOVE;
+ spin_unlock_irq(shost->host_lock);
+ return ndlp->nlp_state;
+ } else {
+ /* software abort outstanding ADISC */
+ lpfc_els_abort(vport->phba, ndlp);
+
+ lpfc_drop_node(vport, ndlp);
+ return NLP_STE_FREED_NODE;
+ }
+}
+
+static uint32_t
+lpfc_device_recov_adisc_issue(struct lpfc_vport *vport,
+ struct lpfc_nodelist *ndlp,
+ void *arg,
+ uint32_t evt)
+{
+ struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
+ struct lpfc_hba *phba = vport->phba;
+
+ /* Don't do anything that will mess up processing of the
+ * previous RSCN.
+ */
+ if (vport->fc_flag & FC_RSCN_DEFERRED)
+ return ndlp->nlp_state;
+
+ /* software abort outstanding ADISC */
+ lpfc_els_abort(phba, ndlp);
+
+ ndlp->nlp_prev_state = NLP_STE_ADISC_ISSUE;
+ lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE);
+ spin_lock_irq(shost->host_lock);
+ ndlp->nlp_flag &= ~(NLP_NODEV_REMOVE | NLP_NPR_2B_DISC);
+ spin_unlock_irq(shost->host_lock);
+ lpfc_disc_set_adisc(vport, ndlp);
+ return ndlp->nlp_state;
+}
+
+static uint32_t
+lpfc_rcv_plogi_reglogin_issue(struct lpfc_vport *vport,
+ struct lpfc_nodelist *ndlp,
+ void *arg,
+ uint32_t evt)
+{
+ struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *) arg;
+
+ lpfc_rcv_plogi(vport, ndlp, cmdiocb);
+ return ndlp->nlp_state;
+}
+
+static uint32_t
+lpfc_rcv_prli_reglogin_issue(struct lpfc_vport *vport,
+ struct lpfc_nodelist *ndlp,
+ void *arg,
+ uint32_t evt)
+{
+ struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *) arg;
+
+ lpfc_els_rsp_prli_acc(vport, cmdiocb, ndlp);
+ return ndlp->nlp_state;
+}
+
+static uint32_t
+lpfc_rcv_logo_reglogin_issue(struct lpfc_vport *vport,
+ struct lpfc_nodelist *ndlp,
+ void *arg,
+ uint32_t evt)
+{
+ struct lpfc_hba *phba = vport->phba;
+ struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *) arg;
+ LPFC_MBOXQ_t *mb;
+ LPFC_MBOXQ_t *nextmb;
+ struct lpfc_dmabuf *mp;
+
+ cmdiocb = (struct lpfc_iocbq *) arg;
+
+ /* cleanup any ndlp on mbox q waiting for reglogin cmpl */
+ if ((mb = phba->sli.mbox_active)) {
+ if ((mb->u.mb.mbxCommand == MBX_REG_LOGIN64) &&
+ (ndlp == (struct lpfc_nodelist *) mb->context2)) {
+ lpfc_nlp_put(ndlp);
+ mb->context2 = NULL;
+ mb->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
+ }
+ }
+
+ spin_lock_irq(&phba->hbalock);
+ list_for_each_entry_safe(mb, nextmb, &phba->sli.mboxq, list) {
+ if ((mb->u.mb.mbxCommand == MBX_REG_LOGIN64) &&
+ (ndlp == (struct lpfc_nodelist *) mb->context2)) {
+ mp = (struct lpfc_dmabuf *) (mb->context1);
+ if (mp) {
+ __lpfc_mbuf_free(phba, mp->virt, mp->phys);
+ kfree(mp);
+ }
+ lpfc_nlp_put(ndlp);
+ list_del(&mb->list);
+ phba->sli.mboxq_cnt--;
+ mempool_free(mb, phba->mbox_mem_pool);
+ }
+ }
+ spin_unlock_irq(&phba->hbalock);
+
+ lpfc_rcv_logo(vport, ndlp, cmdiocb, ELS_CMD_LOGO);
+ return ndlp->nlp_state;
+}
+
+static uint32_t
+lpfc_rcv_padisc_reglogin_issue(struct lpfc_vport *vport,
+ struct lpfc_nodelist *ndlp,
+ void *arg,
+ uint32_t evt)
+{
+ struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *) arg;
+
+ lpfc_rcv_padisc(vport, ndlp, cmdiocb);
+ return ndlp->nlp_state;
+}
+
+static uint32_t
+lpfc_rcv_prlo_reglogin_issue(struct lpfc_vport *vport,
+ struct lpfc_nodelist *ndlp,
+ void *arg,
+ uint32_t evt)
+{
+ struct lpfc_iocbq *cmdiocb;
+
+ cmdiocb = (struct lpfc_iocbq *) arg;
+ lpfc_els_rsp_acc(vport, ELS_CMD_PRLO, cmdiocb, ndlp, NULL);
+ return ndlp->nlp_state;
+}
+
+static uint32_t
+lpfc_cmpl_reglogin_reglogin_issue(struct lpfc_vport *vport,
+ struct lpfc_nodelist *ndlp,
+ void *arg,
+ uint32_t evt)
+{
+ struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
+ LPFC_MBOXQ_t *pmb = (LPFC_MBOXQ_t *) arg;
+ MAILBOX_t *mb = &pmb->u.mb;
+ uint32_t did = mb->un.varWords[1];
+
+ if (mb->mbxStatus) {
+ /* RegLogin failed */
+ lpfc_printf_vlog(vport, KERN_ERR, LOG_DISCOVERY,
+ "0246 RegLogin failed Data: x%x x%x x%x x%x "
+ "x%x\n",
+ did, mb->mbxStatus, vport->port_state,
+ mb->un.varRegLogin.vpi,
+ mb->un.varRegLogin.rpi);
+ /*
+ * If RegLogin failed due to lack of HBA resources do not
+ * retry discovery.
+ */
+ if (mb->mbxStatus == MBXERR_RPI_FULL) {
+ ndlp->nlp_prev_state = NLP_STE_REG_LOGIN_ISSUE;
+ lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE);
+ return ndlp->nlp_state;
+ }
+
+ /* Put ndlp in npr state set plogi timer for 1 sec */
+ mod_timer(&ndlp->nlp_delayfunc,
+ jiffies + msecs_to_jiffies(1000 * 1));
+ spin_lock_irq(shost->host_lock);
+ ndlp->nlp_flag |= NLP_DELAY_TMO;
+ spin_unlock_irq(shost->host_lock);
+ ndlp->nlp_last_elscmd = ELS_CMD_PLOGI;
+
+ lpfc_issue_els_logo(vport, ndlp, 0);
+ ndlp->nlp_prev_state = NLP_STE_REG_LOGIN_ISSUE;
+ lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE);
+ return ndlp->nlp_state;
+ }
+
+ /* SLI4 ports have preallocated logical rpis. */
+ if (vport->phba->sli_rev < LPFC_SLI_REV4)
+ ndlp->nlp_rpi = mb->un.varWords[0];
+
+ ndlp->nlp_flag |= NLP_RPI_REGISTERED;
+
+ /* Only if we are not a fabric nport do we issue PRLI */
+ if (!(ndlp->nlp_type & NLP_FABRIC)) {
+ ndlp->nlp_prev_state = NLP_STE_REG_LOGIN_ISSUE;
+ lpfc_nlp_set_state(vport, ndlp, NLP_STE_PRLI_ISSUE);
+ lpfc_issue_els_prli(vport, ndlp, 0);
+ } else {
+ ndlp->nlp_prev_state = NLP_STE_REG_LOGIN_ISSUE;
+ lpfc_nlp_set_state(vport, ndlp, NLP_STE_UNMAPPED_NODE);
+ }
+ return ndlp->nlp_state;
+}
+
+static uint32_t
+lpfc_device_rm_reglogin_issue(struct lpfc_vport *vport,
+ struct lpfc_nodelist *ndlp,
+ void *arg,
+ uint32_t evt)
+{
+ struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
+
+ if (ndlp->nlp_flag & NLP_NPR_2B_DISC) {
+ spin_lock_irq(shost->host_lock);
+ ndlp->nlp_flag |= NLP_NODEV_REMOVE;
+ spin_unlock_irq(shost->host_lock);
+ return ndlp->nlp_state;
+ } else {
+ lpfc_drop_node(vport, ndlp);
+ return NLP_STE_FREED_NODE;
+ }
+}
+
+static uint32_t
+lpfc_device_recov_reglogin_issue(struct lpfc_vport *vport,
+ struct lpfc_nodelist *ndlp,
+ void *arg,
+ uint32_t evt)
+{
+ struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
+
+ /* Don't do anything that will mess up processing of the
+ * previous RSCN.
+ */
+ if (vport->fc_flag & FC_RSCN_DEFERRED)
+ return ndlp->nlp_state;
+
+ ndlp->nlp_prev_state = NLP_STE_REG_LOGIN_ISSUE;
+ lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE);
+ spin_lock_irq(shost->host_lock);
+ ndlp->nlp_flag |= NLP_IGNR_REG_CMPL;
+ ndlp->nlp_flag &= ~(NLP_NODEV_REMOVE | NLP_NPR_2B_DISC);
+ spin_unlock_irq(shost->host_lock);
+ lpfc_disc_set_adisc(vport, ndlp);
+ return ndlp->nlp_state;
+}
+
+static uint32_t
+lpfc_rcv_plogi_prli_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
+ void *arg, uint32_t evt)
+{
+ struct lpfc_iocbq *cmdiocb;
+
+ cmdiocb = (struct lpfc_iocbq *) arg;
+
+ lpfc_rcv_plogi(vport, ndlp, cmdiocb);
+ return ndlp->nlp_state;
+}
+
+static uint32_t
+lpfc_rcv_prli_prli_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
+ void *arg, uint32_t evt)
+{
+ struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *) arg;
+
+ lpfc_els_rsp_prli_acc(vport, cmdiocb, ndlp);
+ return ndlp->nlp_state;
+}
+
+static uint32_t
+lpfc_rcv_logo_prli_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
+ void *arg, uint32_t evt)
+{
+ struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *) arg;
+
+ /* Software abort outstanding PRLI before sending acc */
+ lpfc_els_abort(vport->phba, ndlp);
+
+ lpfc_rcv_logo(vport, ndlp, cmdiocb, ELS_CMD_LOGO);
+ return ndlp->nlp_state;
+}
+
+static uint32_t
+lpfc_rcv_padisc_prli_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
+ void *arg, uint32_t evt)
+{
+ struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *) arg;
+
+ lpfc_rcv_padisc(vport, ndlp, cmdiocb);
+ return ndlp->nlp_state;
+}
+
+/* This routine is envoked when we rcv a PRLO request from a nport
+ * we are logged into. We should send back a PRLO rsp setting the
+ * appropriate bits.
+ * NEXT STATE = PRLI_ISSUE
+ */
+static uint32_t
+lpfc_rcv_prlo_prli_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
+ void *arg, uint32_t evt)
+{
+ struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *) arg;
+
+ lpfc_els_rsp_acc(vport, ELS_CMD_PRLO, cmdiocb, ndlp, NULL);
+ return ndlp->nlp_state;
+}
+
+static uint32_t
+lpfc_cmpl_prli_prli_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
+ void *arg, uint32_t evt)
+{
+ struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
+ struct lpfc_iocbq *cmdiocb, *rspiocb;
+ struct lpfc_hba *phba = vport->phba;
+ IOCB_t *irsp;
+ PRLI *npr;
+
+ cmdiocb = (struct lpfc_iocbq *) arg;
+ rspiocb = cmdiocb->context_un.rsp_iocb;
+ npr = (PRLI *)lpfc_check_elscmpl_iocb(phba, cmdiocb, rspiocb);
+
+ irsp = &rspiocb->iocb;
+ if (irsp->ulpStatus) {
+ if ((vport->port_type == LPFC_NPIV_PORT) &&
+ vport->cfg_restrict_login) {
+ goto out;
+ }
+ ndlp->nlp_prev_state = NLP_STE_PRLI_ISSUE;
+ lpfc_nlp_set_state(vport, ndlp, NLP_STE_UNMAPPED_NODE);
+ return ndlp->nlp_state;
+ }
+
+ /* Check out PRLI rsp */
+ ndlp->nlp_type &= ~(NLP_FCP_TARGET | NLP_FCP_INITIATOR);
+ ndlp->nlp_fcp_info &= ~NLP_FCP_2_DEVICE;
+ ndlp->nlp_flag &= ~NLP_FIRSTBURST;
+ if ((npr->acceptRspCode == PRLI_REQ_EXECUTED) &&
+ (npr->prliType == PRLI_FCP_TYPE)) {
+ if (npr->initiatorFunc)
+ ndlp->nlp_type |= NLP_FCP_INITIATOR;
+ if (npr->targetFunc) {
+ ndlp->nlp_type |= NLP_FCP_TARGET;
+ if (npr->writeXferRdyDis)
+ ndlp->nlp_flag |= NLP_FIRSTBURST;
+ }
+ if (npr->Retry)
+ ndlp->nlp_fcp_info |= NLP_FCP_2_DEVICE;
+ }
+ if (!(ndlp->nlp_type & NLP_FCP_TARGET) &&
+ (vport->port_type == LPFC_NPIV_PORT) &&
+ vport->cfg_restrict_login) {
+out:
+ spin_lock_irq(shost->host_lock);
+ ndlp->nlp_flag |= NLP_TARGET_REMOVE;
+ spin_unlock_irq(shost->host_lock);
+ lpfc_issue_els_logo(vport, ndlp, 0);
+
+ ndlp->nlp_prev_state = NLP_STE_PRLI_ISSUE;
+ lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE);
+ return ndlp->nlp_state;
+ }
+
+ ndlp->nlp_prev_state = NLP_STE_PRLI_ISSUE;
+ if (ndlp->nlp_type & NLP_FCP_TARGET)
+ lpfc_nlp_set_state(vport, ndlp, NLP_STE_MAPPED_NODE);
+ else
+ lpfc_nlp_set_state(vport, ndlp, NLP_STE_UNMAPPED_NODE);
+ return ndlp->nlp_state;
+}
+
+/*! lpfc_device_rm_prli_issue
+ *
+ * \pre
+ * \post
+ * \param phba
+ * \param ndlp
+ * \param arg
+ * \param evt
+ * \return uint32_t
+ *
+ * \b Description:
+ * This routine is envoked when we a request to remove a nport we are in the
+ * process of PRLIing. We should software abort outstanding prli, unreg
+ * login, send a logout. We will change node state to UNUSED_NODE, put it
+ * on plogi list so it can be freed when LOGO completes.
+ *
+ */
+
+static uint32_t
+lpfc_device_rm_prli_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
+ void *arg, uint32_t evt)
+{
+ struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
+
+ if (ndlp->nlp_flag & NLP_NPR_2B_DISC) {
+ spin_lock_irq(shost->host_lock);
+ ndlp->nlp_flag |= NLP_NODEV_REMOVE;
+ spin_unlock_irq(shost->host_lock);
+ return ndlp->nlp_state;
+ } else {
+ /* software abort outstanding PLOGI */
+ lpfc_els_abort(vport->phba, ndlp);
+
+ lpfc_drop_node(vport, ndlp);
+ return NLP_STE_FREED_NODE;
+ }
+}
+
+
+/*! lpfc_device_recov_prli_issue
+ *
+ * \pre
+ * \post
+ * \param phba
+ * \param ndlp
+ * \param arg
+ * \param evt
+ * \return uint32_t
+ *
+ * \b Description:
+ * The routine is envoked when the state of a device is unknown, like
+ * during a link down. We should remove the nodelist entry from the
+ * unmapped list, issue a UNREG_LOGIN, do a software abort of the
+ * outstanding PRLI command, then free the node entry.
+ */
+static uint32_t
+lpfc_device_recov_prli_issue(struct lpfc_vport *vport,
+ struct lpfc_nodelist *ndlp,
+ void *arg,
+ uint32_t evt)
+{
+ struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
+ struct lpfc_hba *phba = vport->phba;
+
+ /* Don't do anything that will mess up processing of the
+ * previous RSCN.
+ */
+ if (vport->fc_flag & FC_RSCN_DEFERRED)
+ return ndlp->nlp_state;
+
+ /* software abort outstanding PRLI */
+ lpfc_els_abort(phba, ndlp);
+
+ ndlp->nlp_prev_state = NLP_STE_PRLI_ISSUE;
+ lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE);
+ spin_lock_irq(shost->host_lock);
+ ndlp->nlp_flag &= ~(NLP_NODEV_REMOVE | NLP_NPR_2B_DISC);
+ spin_unlock_irq(shost->host_lock);
+ lpfc_disc_set_adisc(vport, ndlp);
+ return ndlp->nlp_state;
+}
+
+static uint32_t
+lpfc_rcv_plogi_logo_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
+ void *arg, uint32_t evt)
+{
+ struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *)arg;
+ struct ls_rjt stat;
+
+ memset(&stat, 0, sizeof(struct ls_rjt));
+ stat.un.b.lsRjtRsnCode = LSRJT_UNABLE_TPC;
+ stat.un.b.lsRjtRsnCodeExp = LSEXP_NOTHING_MORE;
+ lpfc_els_rsp_reject(vport, stat.un.lsRjtError, cmdiocb, ndlp, NULL);
+ return ndlp->nlp_state;
+}
+
+static uint32_t
+lpfc_rcv_prli_logo_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
+ void *arg, uint32_t evt)
+{
+ struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *)arg;
+ struct ls_rjt stat;
+
+ memset(&stat, 0, sizeof(struct ls_rjt));
+ stat.un.b.lsRjtRsnCode = LSRJT_UNABLE_TPC;
+ stat.un.b.lsRjtRsnCodeExp = LSEXP_NOTHING_MORE;
+ lpfc_els_rsp_reject(vport, stat.un.lsRjtError, cmdiocb, ndlp, NULL);
+ return ndlp->nlp_state;
+}
+
+static uint32_t
+lpfc_rcv_logo_logo_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
+ void *arg, uint32_t evt)
+{
+ struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
+ struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *)arg;
+
+ spin_lock_irq(shost->host_lock);
+ ndlp->nlp_flag &= NLP_LOGO_ACC;
+ spin_unlock_irq(shost->host_lock);
+ lpfc_els_rsp_acc(vport, ELS_CMD_ACC, cmdiocb, ndlp, NULL);
+ return ndlp->nlp_state;
+}
+
+static uint32_t
+lpfc_rcv_padisc_logo_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
+ void *arg, uint32_t evt)
+{
+ struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *)arg;
+ struct ls_rjt stat;
+
+ memset(&stat, 0, sizeof(struct ls_rjt));
+ stat.un.b.lsRjtRsnCode = LSRJT_UNABLE_TPC;
+ stat.un.b.lsRjtRsnCodeExp = LSEXP_NOTHING_MORE;
+ lpfc_els_rsp_reject(vport, stat.un.lsRjtError, cmdiocb, ndlp, NULL);
+ return ndlp->nlp_state;
+}
+
+static uint32_t
+lpfc_rcv_prlo_logo_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
+ void *arg, uint32_t evt)
+{
+ struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *)arg;
+ struct ls_rjt stat;
+
+ memset(&stat, 0, sizeof(struct ls_rjt));
+ stat.un.b.lsRjtRsnCode = LSRJT_UNABLE_TPC;
+ stat.un.b.lsRjtRsnCodeExp = LSEXP_NOTHING_MORE;
+ lpfc_els_rsp_reject(vport, stat.un.lsRjtError, cmdiocb, ndlp, NULL);
+ return ndlp->nlp_state;
+}
+
+static uint32_t
+lpfc_cmpl_logo_logo_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
+ void *arg, uint32_t evt)
+{
+ struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
+
+ ndlp->nlp_prev_state = NLP_STE_LOGO_ISSUE;
+ lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE);
+ spin_lock_irq(shost->host_lock);
+ ndlp->nlp_flag &= ~(NLP_NODEV_REMOVE | NLP_NPR_2B_DISC);
+ spin_unlock_irq(shost->host_lock);
+ lpfc_disc_set_adisc(vport, ndlp);
+ return ndlp->nlp_state;
+}
+
+static uint32_t
+lpfc_device_rm_logo_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
+ void *arg, uint32_t evt)
+{
+ /*
+ * Take no action. If a LOGO is outstanding, then possibly DevLoss has
+ * timed out and is calling for Device Remove. In this case, the LOGO
+ * must be allowed to complete in state LOGO_ISSUE so that the rpi
+ * and other NLP flags are correctly cleaned up.
+ */
+ return ndlp->nlp_state;
+}
+
+static uint32_t
+lpfc_device_recov_logo_issue(struct lpfc_vport *vport,
+ struct lpfc_nodelist *ndlp,
+ void *arg, uint32_t evt)
+{
+ /*
+ * Device Recovery events have no meaning for a node with a LOGO
+ * outstanding. The LOGO has to complete first and handle the
+ * node from that point.
+ */
+ return ndlp->nlp_state;
+}
+
+static uint32_t
+lpfc_rcv_plogi_unmap_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
+ void *arg, uint32_t evt)
+{
+ struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *) arg;
+
+ lpfc_rcv_plogi(vport, ndlp, cmdiocb);
+ return ndlp->nlp_state;
+}
+
+static uint32_t
+lpfc_rcv_prli_unmap_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
+ void *arg, uint32_t evt)
+{
+ struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *) arg;
+
+ lpfc_rcv_prli(vport, ndlp, cmdiocb);
+ lpfc_els_rsp_prli_acc(vport, cmdiocb, ndlp);
+ return ndlp->nlp_state;
+}
+
+static uint32_t
+lpfc_rcv_logo_unmap_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
+ void *arg, uint32_t evt)
+{
+ struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *) arg;
+
+ lpfc_rcv_logo(vport, ndlp, cmdiocb, ELS_CMD_LOGO);
+ return ndlp->nlp_state;
+}
+
+static uint32_t
+lpfc_rcv_padisc_unmap_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
+ void *arg, uint32_t evt)
+{
+ struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *) arg;
+
+ lpfc_rcv_padisc(vport, ndlp, cmdiocb);
+ return ndlp->nlp_state;
+}
+
+static uint32_t
+lpfc_rcv_prlo_unmap_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
+ void *arg, uint32_t evt)
+{
+ struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *) arg;
+
+ lpfc_els_rsp_acc(vport, ELS_CMD_PRLO, cmdiocb, ndlp, NULL);
+ return ndlp->nlp_state;
+}
+
+static uint32_t
+lpfc_device_recov_unmap_node(struct lpfc_vport *vport,
+ struct lpfc_nodelist *ndlp,
+ void *arg,
+ uint32_t evt)
+{
+ struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
+
+ ndlp->nlp_prev_state = NLP_STE_UNMAPPED_NODE;
+ lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE);
+ spin_lock_irq(shost->host_lock);
+ ndlp->nlp_flag &= ~(NLP_NODEV_REMOVE | NLP_NPR_2B_DISC);
+ spin_unlock_irq(shost->host_lock);
+ lpfc_disc_set_adisc(vport, ndlp);
+
+ return ndlp->nlp_state;
+}
+
+static uint32_t
+lpfc_rcv_plogi_mapped_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
+ void *arg, uint32_t evt)
+{
+ struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *) arg;
+
+ lpfc_rcv_plogi(vport, ndlp, cmdiocb);
+ return ndlp->nlp_state;
+}
+
+static uint32_t
+lpfc_rcv_prli_mapped_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
+ void *arg, uint32_t evt)
+{
+ struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *) arg;
+
+ lpfc_els_rsp_prli_acc(vport, cmdiocb, ndlp);
+ return ndlp->nlp_state;
+}
+
+static uint32_t
+lpfc_rcv_logo_mapped_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
+ void *arg, uint32_t evt)
+{
+ struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *) arg;
+
+ lpfc_rcv_logo(vport, ndlp, cmdiocb, ELS_CMD_LOGO);
+ return ndlp->nlp_state;
+}
+
+static uint32_t
+lpfc_rcv_padisc_mapped_node(struct lpfc_vport *vport,
+ struct lpfc_nodelist *ndlp,
+ void *arg, uint32_t evt)
+{
+ struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *) arg;
+
+ lpfc_rcv_padisc(vport, ndlp, cmdiocb);
+ return ndlp->nlp_state;
+}
+
+static uint32_t
+lpfc_rcv_prlo_mapped_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
+ void *arg, uint32_t evt)
+{
+ struct lpfc_hba *phba = vport->phba;
+ struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *) arg;
+
+ /* flush the target */
+ lpfc_sli_abort_iocb(vport, &phba->sli.ring[phba->sli.fcp_ring],
+ ndlp->nlp_sid, 0, LPFC_CTX_TGT);
+
+ /* Treat like rcv logo */
+ lpfc_rcv_logo(vport, ndlp, cmdiocb, ELS_CMD_PRLO);
+ return ndlp->nlp_state;
+}
+
+static uint32_t
+lpfc_device_recov_mapped_node(struct lpfc_vport *vport,
+ struct lpfc_nodelist *ndlp,
+ void *arg,
+ uint32_t evt)
+{
+ struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
+
+ ndlp->nlp_prev_state = NLP_STE_MAPPED_NODE;
+ lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE);
+ spin_lock_irq(shost->host_lock);
+ ndlp->nlp_flag &= ~(NLP_NODEV_REMOVE | NLP_NPR_2B_DISC);
+ spin_unlock_irq(shost->host_lock);
+ lpfc_disc_set_adisc(vport, ndlp);
+ return ndlp->nlp_state;
+}
+
+static uint32_t
+lpfc_rcv_plogi_npr_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
+ void *arg, uint32_t evt)
+{
+ struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
+ struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *) arg;
+
+ /* Ignore PLOGI if we have an outstanding LOGO */
+ if (ndlp->nlp_flag & (NLP_LOGO_SND | NLP_LOGO_ACC))
+ return ndlp->nlp_state;
+ if (lpfc_rcv_plogi(vport, ndlp, cmdiocb)) {
+ lpfc_cancel_retry_delay_tmo(vport, ndlp);
+ spin_lock_irq(shost->host_lock);
+ ndlp->nlp_flag &= ~(NLP_NPR_ADISC | NLP_NPR_2B_DISC);
+ spin_unlock_irq(shost->host_lock);
+ } else if (!(ndlp->nlp_flag & NLP_NPR_2B_DISC)) {
+ /* send PLOGI immediately, move to PLOGI issue state */
+ if (!(ndlp->nlp_flag & NLP_DELAY_TMO)) {
+ ndlp->nlp_prev_state = NLP_STE_NPR_NODE;
+ lpfc_nlp_set_state(vport, ndlp, NLP_STE_PLOGI_ISSUE);
+ lpfc_issue_els_plogi(vport, ndlp->nlp_DID, 0);
+ }
+ }
+ return ndlp->nlp_state;
+}
+
+static uint32_t
+lpfc_rcv_prli_npr_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
+ void *arg, uint32_t evt)
+{
+ struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
+ struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *) arg;
+ struct ls_rjt stat;
+
+ memset(&stat, 0, sizeof (struct ls_rjt));
+ stat.un.b.lsRjtRsnCode = LSRJT_UNABLE_TPC;
+ stat.un.b.lsRjtRsnCodeExp = LSEXP_NOTHING_MORE;
+ lpfc_els_rsp_reject(vport, stat.un.lsRjtError, cmdiocb, ndlp, NULL);
+
+ if (!(ndlp->nlp_flag & NLP_DELAY_TMO)) {
+ if (ndlp->nlp_flag & NLP_NPR_ADISC) {
+ spin_lock_irq(shost->host_lock);
+ ndlp->nlp_flag &= ~NLP_NPR_ADISC;
+ ndlp->nlp_prev_state = NLP_STE_NPR_NODE;
+ spin_unlock_irq(shost->host_lock);
+ lpfc_nlp_set_state(vport, ndlp, NLP_STE_ADISC_ISSUE);
+ lpfc_issue_els_adisc(vport, ndlp, 0);
+ } else {
+ ndlp->nlp_prev_state = NLP_STE_NPR_NODE;
+ lpfc_nlp_set_state(vport, ndlp, NLP_STE_PLOGI_ISSUE);
+ lpfc_issue_els_plogi(vport, ndlp->nlp_DID, 0);
+ }
+ }
+ return ndlp->nlp_state;
+}
+
+static uint32_t
+lpfc_rcv_logo_npr_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
+ void *arg, uint32_t evt)
+{
+ struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *) arg;
+
+ lpfc_rcv_logo(vport, ndlp, cmdiocb, ELS_CMD_LOGO);
+ return ndlp->nlp_state;
+}
+
+static uint32_t
+lpfc_rcv_padisc_npr_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
+ void *arg, uint32_t evt)
+{
+ struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *) arg;
+
+ lpfc_rcv_padisc(vport, ndlp, cmdiocb);
+ /*
+ * Do not start discovery if discovery is about to start
+ * or discovery in progress for this node. Starting discovery
+ * here will affect the counting of discovery threads.
+ */
+ if (!(ndlp->nlp_flag & NLP_DELAY_TMO) &&
+ !(ndlp->nlp_flag & NLP_NPR_2B_DISC)) {
+ if (ndlp->nlp_flag & NLP_NPR_ADISC) {
+ ndlp->nlp_flag &= ~NLP_NPR_ADISC;
+ ndlp->nlp_prev_state = NLP_STE_NPR_NODE;
+ lpfc_nlp_set_state(vport, ndlp, NLP_STE_ADISC_ISSUE);
+ lpfc_issue_els_adisc(vport, ndlp, 0);
+ } else {
+ ndlp->nlp_prev_state = NLP_STE_NPR_NODE;
+ lpfc_nlp_set_state(vport, ndlp, NLP_STE_PLOGI_ISSUE);
+ lpfc_issue_els_plogi(vport, ndlp->nlp_DID, 0);
+ }
+ }
+ return ndlp->nlp_state;
+}
+
+static uint32_t
+lpfc_rcv_prlo_npr_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
+ void *arg, uint32_t evt)
+{
+ struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
+ struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *) arg;
+
+ spin_lock_irq(shost->host_lock);
+ ndlp->nlp_flag |= NLP_LOGO_ACC;
+ spin_unlock_irq(shost->host_lock);
+
+ lpfc_els_rsp_acc(vport, ELS_CMD_ACC, cmdiocb, ndlp, NULL);
+
+ if ((ndlp->nlp_flag & NLP_DELAY_TMO) == 0) {
+ mod_timer(&ndlp->nlp_delayfunc,
+ jiffies + msecs_to_jiffies(1000 * 1));
+ spin_lock_irq(shost->host_lock);
+ ndlp->nlp_flag |= NLP_DELAY_TMO;
+ ndlp->nlp_flag &= ~NLP_NPR_ADISC;
+ spin_unlock_irq(shost->host_lock);
+ ndlp->nlp_last_elscmd = ELS_CMD_PLOGI;
+ } else {
+ spin_lock_irq(shost->host_lock);
+ ndlp->nlp_flag &= ~NLP_NPR_ADISC;
+ spin_unlock_irq(shost->host_lock);
+ }
+ return ndlp->nlp_state;
+}
+
+static uint32_t
+lpfc_cmpl_plogi_npr_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
+ void *arg, uint32_t evt)
+{
+ struct lpfc_iocbq *cmdiocb, *rspiocb;
+ IOCB_t *irsp;
+ struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
+
+ cmdiocb = (struct lpfc_iocbq *) arg;
+ rspiocb = cmdiocb->context_un.rsp_iocb;
+
+ irsp = &rspiocb->iocb;
+ if (irsp->ulpStatus) {
+ spin_lock_irq(shost->host_lock);
+ ndlp->nlp_flag |= NLP_DEFER_RM;
+ spin_unlock_irq(shost->host_lock);
+ return NLP_STE_FREED_NODE;
+ }
+ return ndlp->nlp_state;
+}
+
+static uint32_t
+lpfc_cmpl_prli_npr_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
+ void *arg, uint32_t evt)
+{
+ struct lpfc_iocbq *cmdiocb, *rspiocb;
+ IOCB_t *irsp;
+
+ cmdiocb = (struct lpfc_iocbq *) arg;
+ rspiocb = cmdiocb->context_un.rsp_iocb;
+
+ irsp = &rspiocb->iocb;
+ if (irsp->ulpStatus && (ndlp->nlp_flag & NLP_NODEV_REMOVE)) {
+ lpfc_drop_node(vport, ndlp);
+ return NLP_STE_FREED_NODE;
+ }
+ return ndlp->nlp_state;
+}
+
+static uint32_t
+lpfc_cmpl_logo_npr_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
+ void *arg, uint32_t evt)
+{
+ struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
+
+ /* For the fabric port just clear the fc flags. */
+ if (ndlp->nlp_DID == Fabric_DID) {
+ spin_lock_irq(shost->host_lock);
+ vport->fc_flag &= ~(FC_FABRIC | FC_PUBLIC_LOOP);
+ spin_unlock_irq(shost->host_lock);
+ }
+ lpfc_unreg_rpi(vport, ndlp);
+ return ndlp->nlp_state;
+}
+
+static uint32_t
+lpfc_cmpl_adisc_npr_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
+ void *arg, uint32_t evt)
+{
+ struct lpfc_iocbq *cmdiocb, *rspiocb;
+ IOCB_t *irsp;
+
+ cmdiocb = (struct lpfc_iocbq *) arg;
+ rspiocb = cmdiocb->context_un.rsp_iocb;
+
+ irsp = &rspiocb->iocb;
+ if (irsp->ulpStatus && (ndlp->nlp_flag & NLP_NODEV_REMOVE)) {
+ lpfc_drop_node(vport, ndlp);
+ return NLP_STE_FREED_NODE;
+ }
+ return ndlp->nlp_state;
+}
+
+static uint32_t
+lpfc_cmpl_reglogin_npr_node(struct lpfc_vport *vport,
+ struct lpfc_nodelist *ndlp,
+ void *arg, uint32_t evt)
+{
+ LPFC_MBOXQ_t *pmb = (LPFC_MBOXQ_t *) arg;
+ MAILBOX_t *mb = &pmb->u.mb;
+
+ if (!mb->mbxStatus) {
+ /* SLI4 ports have preallocated logical rpis. */
+ if (vport->phba->sli_rev < LPFC_SLI_REV4)
+ ndlp->nlp_rpi = mb->un.varWords[0];
+ ndlp->nlp_flag |= NLP_RPI_REGISTERED;
+ } else {
+ if (ndlp->nlp_flag & NLP_NODEV_REMOVE) {
+ lpfc_drop_node(vport, ndlp);
+ return NLP_STE_FREED_NODE;
+ }
+ }
+ return ndlp->nlp_state;
+}
+
+static uint32_t
+lpfc_device_rm_npr_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
+ void *arg, uint32_t evt)
+{
+ struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
+
+ if (ndlp->nlp_flag & NLP_NPR_2B_DISC) {
+ spin_lock_irq(shost->host_lock);
+ ndlp->nlp_flag |= NLP_NODEV_REMOVE;
+ spin_unlock_irq(shost->host_lock);
+ return ndlp->nlp_state;
+ }
+ lpfc_drop_node(vport, ndlp);
+ return NLP_STE_FREED_NODE;
+}
+
+static uint32_t
+lpfc_device_recov_npr_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
+ void *arg, uint32_t evt)
+{
+ struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
+
+ /* Don't do anything that will mess up processing of the
+ * previous RSCN.
+ */
+ if (vport->fc_flag & FC_RSCN_DEFERRED)
+ return ndlp->nlp_state;
+
+ lpfc_cancel_retry_delay_tmo(vport, ndlp);
+ spin_lock_irq(shost->host_lock);
+ ndlp->nlp_flag &= ~(NLP_NODEV_REMOVE | NLP_NPR_2B_DISC);
+ spin_unlock_irq(shost->host_lock);
+ return ndlp->nlp_state;
+}
+
+
+/* This next section defines the NPort Discovery State Machine */
+
+/* There are 4 different double linked lists nodelist entries can reside on.
+ * The plogi list and adisc list are used when Link Up discovery or RSCN
+ * processing is needed. Each list holds the nodes that we will send PLOGI
+ * or ADISC on. These lists will keep track of what nodes will be effected
+ * by an RSCN, or a Link Up (Typically, all nodes are effected on Link Up).
+ * The unmapped_list will contain all nodes that we have successfully logged
+ * into at the Fibre Channel level. The mapped_list will contain all nodes
+ * that are mapped FCP targets.
+ */
+/*
+ * The bind list is a list of undiscovered (potentially non-existent) nodes
+ * that we have saved binding information on. This information is used when
+ * nodes transition from the unmapped to the mapped list.
+ */
+/* For UNUSED_NODE state, the node has just been allocated .
+ * For PLOGI_ISSUE and REG_LOGIN_ISSUE, the node is on
+ * the PLOGI list. For REG_LOGIN_COMPL, the node is taken off the PLOGI list
+ * and put on the unmapped list. For ADISC processing, the node is taken off
+ * the ADISC list and placed on either the mapped or unmapped list (depending
+ * on its previous state). Once on the unmapped list, a PRLI is issued and the
+ * state changed to PRLI_ISSUE. When the PRLI completion occurs, the state is
+ * changed to UNMAPPED_NODE. If the completion indicates a mapped
+ * node, the node is taken off the unmapped list. The binding list is checked
+ * for a valid binding, or a binding is automatically assigned. If binding
+ * assignment is unsuccessful, the node is left on the unmapped list. If
+ * binding assignment is successful, the associated binding list entry (if
+ * any) is removed, and the node is placed on the mapped list.
+ */
+/*
+ * For a Link Down, all nodes on the ADISC, PLOGI, unmapped or mapped
+ * lists will receive a DEVICE_RECOVERY event. If the linkdown or devloss timers
+ * expire, all effected nodes will receive a DEVICE_RM event.
+ */
+/*
+ * For a Link Up or RSCN, all nodes will move from the mapped / unmapped lists
+ * to either the ADISC or PLOGI list. After a Nameserver query or ALPA loopmap
+ * check, additional nodes may be added or removed (via DEVICE_RM) to / from
+ * the PLOGI or ADISC lists. Once the PLOGI and ADISC lists are populated,
+ * we will first process the ADISC list. 32 entries are processed initially and
+ * ADISC is initited for each one. Completions / Events for each node are
+ * funnelled thru the state machine. As each node finishes ADISC processing, it
+ * starts ADISC for any nodes waiting for ADISC processing. If no nodes are
+ * waiting, and the ADISC list count is identically 0, then we are done. For
+ * Link Up discovery, since all nodes on the PLOGI list are UNREG_LOGIN'ed, we
+ * can issue a CLEAR_LA and reenable Link Events. Next we will process the PLOGI
+ * list. 32 entries are processed initially and PLOGI is initited for each one.
+ * Completions / Events for each node are funnelled thru the state machine. As
+ * each node finishes PLOGI processing, it starts PLOGI for any nodes waiting
+ * for PLOGI processing. If no nodes are waiting, and the PLOGI list count is
+ * indentically 0, then we are done. We have now completed discovery / RSCN
+ * handling. Upon completion, ALL nodes should be on either the mapped or
+ * unmapped lists.
+ */
+
+static uint32_t (*lpfc_disc_action[NLP_STE_MAX_STATE * NLP_EVT_MAX_EVENT])
+ (struct lpfc_vport *, struct lpfc_nodelist *, void *, uint32_t) = {
+ /* Action routine Event Current State */
+ lpfc_rcv_plogi_unused_node, /* RCV_PLOGI UNUSED_NODE */
+ lpfc_rcv_els_unused_node, /* RCV_PRLI */
+ lpfc_rcv_logo_unused_node, /* RCV_LOGO */
+ lpfc_rcv_els_unused_node, /* RCV_ADISC */
+ lpfc_rcv_els_unused_node, /* RCV_PDISC */
+ lpfc_rcv_els_unused_node, /* RCV_PRLO */
+ lpfc_disc_illegal, /* CMPL_PLOGI */
+ lpfc_disc_illegal, /* CMPL_PRLI */
+ lpfc_cmpl_logo_unused_node, /* CMPL_LOGO */
+ lpfc_disc_illegal, /* CMPL_ADISC */
+ lpfc_disc_illegal, /* CMPL_REG_LOGIN */
+ lpfc_device_rm_unused_node, /* DEVICE_RM */
+ lpfc_device_recov_unused_node, /* DEVICE_RECOVERY */
+
+ lpfc_rcv_plogi_plogi_issue, /* RCV_PLOGI PLOGI_ISSUE */
+ lpfc_rcv_prli_plogi_issue, /* RCV_PRLI */
+ lpfc_rcv_logo_plogi_issue, /* RCV_LOGO */
+ lpfc_rcv_els_plogi_issue, /* RCV_ADISC */
+ lpfc_rcv_els_plogi_issue, /* RCV_PDISC */
+ lpfc_rcv_els_plogi_issue, /* RCV_PRLO */
+ lpfc_cmpl_plogi_plogi_issue, /* CMPL_PLOGI */
+ lpfc_disc_illegal, /* CMPL_PRLI */
+ lpfc_cmpl_logo_plogi_issue, /* CMPL_LOGO */
+ lpfc_disc_illegal, /* CMPL_ADISC */
+ lpfc_cmpl_reglogin_plogi_issue,/* CMPL_REG_LOGIN */
+ lpfc_device_rm_plogi_issue, /* DEVICE_RM */
+ lpfc_device_recov_plogi_issue, /* DEVICE_RECOVERY */
+
+ lpfc_rcv_plogi_adisc_issue, /* RCV_PLOGI ADISC_ISSUE */
+ lpfc_rcv_prli_adisc_issue, /* RCV_PRLI */
+ lpfc_rcv_logo_adisc_issue, /* RCV_LOGO */
+ lpfc_rcv_padisc_adisc_issue, /* RCV_ADISC */
+ lpfc_rcv_padisc_adisc_issue, /* RCV_PDISC */
+ lpfc_rcv_prlo_adisc_issue, /* RCV_PRLO */
+ lpfc_disc_illegal, /* CMPL_PLOGI */
+ lpfc_disc_illegal, /* CMPL_PRLI */
+ lpfc_disc_illegal, /* CMPL_LOGO */
+ lpfc_cmpl_adisc_adisc_issue, /* CMPL_ADISC */
+ lpfc_disc_illegal, /* CMPL_REG_LOGIN */
+ lpfc_device_rm_adisc_issue, /* DEVICE_RM */
+ lpfc_device_recov_adisc_issue, /* DEVICE_RECOVERY */
+
+ lpfc_rcv_plogi_reglogin_issue, /* RCV_PLOGI REG_LOGIN_ISSUE */
+ lpfc_rcv_prli_reglogin_issue, /* RCV_PLOGI */
+ lpfc_rcv_logo_reglogin_issue, /* RCV_LOGO */
+ lpfc_rcv_padisc_reglogin_issue, /* RCV_ADISC */
+ lpfc_rcv_padisc_reglogin_issue, /* RCV_PDISC */
+ lpfc_rcv_prlo_reglogin_issue, /* RCV_PRLO */
+ lpfc_cmpl_plogi_illegal, /* CMPL_PLOGI */
+ lpfc_disc_illegal, /* CMPL_PRLI */
+ lpfc_disc_illegal, /* CMPL_LOGO */
+ lpfc_disc_illegal, /* CMPL_ADISC */
+ lpfc_cmpl_reglogin_reglogin_issue,/* CMPL_REG_LOGIN */
+ lpfc_device_rm_reglogin_issue, /* DEVICE_RM */
+ lpfc_device_recov_reglogin_issue,/* DEVICE_RECOVERY */
+
+ lpfc_rcv_plogi_prli_issue, /* RCV_PLOGI PRLI_ISSUE */
+ lpfc_rcv_prli_prli_issue, /* RCV_PRLI */
+ lpfc_rcv_logo_prli_issue, /* RCV_LOGO */
+ lpfc_rcv_padisc_prli_issue, /* RCV_ADISC */
+ lpfc_rcv_padisc_prli_issue, /* RCV_PDISC */
+ lpfc_rcv_prlo_prli_issue, /* RCV_PRLO */
+ lpfc_cmpl_plogi_illegal, /* CMPL_PLOGI */
+ lpfc_cmpl_prli_prli_issue, /* CMPL_PRLI */
+ lpfc_disc_illegal, /* CMPL_LOGO */
+ lpfc_disc_illegal, /* CMPL_ADISC */
+ lpfc_disc_illegal, /* CMPL_REG_LOGIN */
+ lpfc_device_rm_prli_issue, /* DEVICE_RM */
+ lpfc_device_recov_prli_issue, /* DEVICE_RECOVERY */
+
+ lpfc_rcv_plogi_logo_issue, /* RCV_PLOGI LOGO_ISSUE */
+ lpfc_rcv_prli_logo_issue, /* RCV_PRLI */
+ lpfc_rcv_logo_logo_issue, /* RCV_LOGO */
+ lpfc_rcv_padisc_logo_issue, /* RCV_ADISC */
+ lpfc_rcv_padisc_logo_issue, /* RCV_PDISC */
+ lpfc_rcv_prlo_logo_issue, /* RCV_PRLO */
+ lpfc_cmpl_plogi_illegal, /* CMPL_PLOGI */
+ lpfc_disc_illegal, /* CMPL_PRLI */
+ lpfc_cmpl_logo_logo_issue, /* CMPL_LOGO */
+ lpfc_disc_illegal, /* CMPL_ADISC */
+ lpfc_disc_illegal, /* CMPL_REG_LOGIN */
+ lpfc_device_rm_logo_issue, /* DEVICE_RM */
+ lpfc_device_recov_logo_issue, /* DEVICE_RECOVERY */
+
+ lpfc_rcv_plogi_unmap_node, /* RCV_PLOGI UNMAPPED_NODE */
+ lpfc_rcv_prli_unmap_node, /* RCV_PRLI */
+ lpfc_rcv_logo_unmap_node, /* RCV_LOGO */
+ lpfc_rcv_padisc_unmap_node, /* RCV_ADISC */
+ lpfc_rcv_padisc_unmap_node, /* RCV_PDISC */
+ lpfc_rcv_prlo_unmap_node, /* RCV_PRLO */
+ lpfc_disc_illegal, /* CMPL_PLOGI */
+ lpfc_disc_illegal, /* CMPL_PRLI */
+ lpfc_disc_illegal, /* CMPL_LOGO */
+ lpfc_disc_illegal, /* CMPL_ADISC */
+ lpfc_disc_illegal, /* CMPL_REG_LOGIN */
+ lpfc_disc_illegal, /* DEVICE_RM */
+ lpfc_device_recov_unmap_node, /* DEVICE_RECOVERY */
+
+ lpfc_rcv_plogi_mapped_node, /* RCV_PLOGI MAPPED_NODE */
+ lpfc_rcv_prli_mapped_node, /* RCV_PRLI */
+ lpfc_rcv_logo_mapped_node, /* RCV_LOGO */
+ lpfc_rcv_padisc_mapped_node, /* RCV_ADISC */
+ lpfc_rcv_padisc_mapped_node, /* RCV_PDISC */
+ lpfc_rcv_prlo_mapped_node, /* RCV_PRLO */
+ lpfc_disc_illegal, /* CMPL_PLOGI */
+ lpfc_disc_illegal, /* CMPL_PRLI */
+ lpfc_disc_illegal, /* CMPL_LOGO */
+ lpfc_disc_illegal, /* CMPL_ADISC */
+ lpfc_disc_illegal, /* CMPL_REG_LOGIN */
+ lpfc_disc_illegal, /* DEVICE_RM */
+ lpfc_device_recov_mapped_node, /* DEVICE_RECOVERY */
+
+ lpfc_rcv_plogi_npr_node, /* RCV_PLOGI NPR_NODE */
+ lpfc_rcv_prli_npr_node, /* RCV_PRLI */
+ lpfc_rcv_logo_npr_node, /* RCV_LOGO */
+ lpfc_rcv_padisc_npr_node, /* RCV_ADISC */
+ lpfc_rcv_padisc_npr_node, /* RCV_PDISC */
+ lpfc_rcv_prlo_npr_node, /* RCV_PRLO */
+ lpfc_cmpl_plogi_npr_node, /* CMPL_PLOGI */
+ lpfc_cmpl_prli_npr_node, /* CMPL_PRLI */
+ lpfc_cmpl_logo_npr_node, /* CMPL_LOGO */
+ lpfc_cmpl_adisc_npr_node, /* CMPL_ADISC */
+ lpfc_cmpl_reglogin_npr_node, /* CMPL_REG_LOGIN */
+ lpfc_device_rm_npr_node, /* DEVICE_RM */
+ lpfc_device_recov_npr_node, /* DEVICE_RECOVERY */
+};
+
+int
+lpfc_disc_state_machine(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
+ void *arg, uint32_t evt)
+{
+ uint32_t cur_state, rc;
+ uint32_t(*func) (struct lpfc_vport *, struct lpfc_nodelist *, void *,
+ uint32_t);
+ uint32_t got_ndlp = 0;
+
+ if (lpfc_nlp_get(ndlp))
+ got_ndlp = 1;
+
+ cur_state = ndlp->nlp_state;
+
+ /* DSM in event <evt> on NPort <nlp_DID> in state <cur_state> */
+ lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY,
+ "0211 DSM in event x%x on NPort x%x in "
+ "state %d Data: x%x\n",
+ evt, ndlp->nlp_DID, cur_state, ndlp->nlp_flag);
+
+ lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_DSM,
+ "DSM in: evt:%d ste:%d did:x%x",
+ evt, cur_state, ndlp->nlp_DID);
+
+ func = lpfc_disc_action[(cur_state * NLP_EVT_MAX_EVENT) + evt];
+ rc = (func) (vport, ndlp, arg, evt);
+
+ /* DSM out state <rc> on NPort <nlp_DID> */
+ if (got_ndlp) {
+ lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY,
+ "0212 DSM out state %d on NPort x%x Data: x%x\n",
+ rc, ndlp->nlp_DID, ndlp->nlp_flag);
+
+ lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_DSM,
+ "DSM out: ste:%d did:x%x flg:x%x",
+ rc, ndlp->nlp_DID, ndlp->nlp_flag);
+ /* Decrement the ndlp reference count held for this function */
+ lpfc_nlp_put(ndlp);
+ } else {
+ lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY,
+ "0213 DSM out state %d on NPort free\n", rc);
+
+ lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_DSM,
+ "DSM out: ste:%d did:x%x flg:x%x",
+ rc, 0, 0);
+ }
+
+ return rc;
+}
diff --git a/drivers/scsi/lpfc/lpfc_scsi.c b/drivers/scsi/lpfc/lpfc_scsi.c
new file mode 100644
index 000000000..c140f9977
--- /dev/null
+++ b/drivers/scsi/lpfc/lpfc_scsi.c
@@ -0,0 +1,5936 @@
+/*******************************************************************
+ * This file is part of the Emulex Linux Device Driver for *
+ * Fibre Channel Host Bus Adapters. *
+ * Copyright (C) 2004-2015 Emulex. All rights reserved. *
+ * EMULEX and SLI are trademarks of Emulex. *
+ * www.emulex.com *
+ * Portions Copyright (C) 2004-2005 Christoph Hellwig *
+ * *
+ * This program is free software; you can redistribute it and/or *
+ * modify it under the terms of version 2 of the GNU General *
+ * Public License as published by the Free Software Foundation. *
+ * This program is distributed in the hope that it will be useful. *
+ * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND *
+ * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY, *
+ * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE *
+ * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD *
+ * TO BE LEGALLY INVALID. See the GNU General Public License for *
+ * more details, a copy of which can be found in the file COPYING *
+ * included with this package. *
+ *******************************************************************/
+#include <linux/pci.h>
+#include <linux/slab.h>
+#include <linux/interrupt.h>
+#include <linux/export.h>
+#include <linux/delay.h>
+#include <asm/unaligned.h>
+#include <linux/crc-t10dif.h>
+#include <net/checksum.h>
+
+#include <scsi/scsi.h>
+#include <scsi/scsi_device.h>
+#include <scsi/scsi_eh.h>
+#include <scsi/scsi_host.h>
+#include <scsi/scsi_tcq.h>
+#include <scsi/scsi_transport_fc.h>
+
+#include "lpfc_version.h"
+#include "lpfc_hw4.h"
+#include "lpfc_hw.h"
+#include "lpfc_sli.h"
+#include "lpfc_sli4.h"
+#include "lpfc_nl.h"
+#include "lpfc_disc.h"
+#include "lpfc.h"
+#include "lpfc_scsi.h"
+#include "lpfc_logmsg.h"
+#include "lpfc_crtn.h"
+#include "lpfc_vport.h"
+
+#define LPFC_RESET_WAIT 2
+#define LPFC_ABORT_WAIT 2
+
+int _dump_buf_done = 1;
+
+static char *dif_op_str[] = {
+ "PROT_NORMAL",
+ "PROT_READ_INSERT",
+ "PROT_WRITE_STRIP",
+ "PROT_READ_STRIP",
+ "PROT_WRITE_INSERT",
+ "PROT_READ_PASS",
+ "PROT_WRITE_PASS",
+};
+
+struct scsi_dif_tuple {
+ __be16 guard_tag; /* Checksum */
+ __be16 app_tag; /* Opaque storage */
+ __be32 ref_tag; /* Target LBA or indirect LBA */
+};
+
+static struct lpfc_rport_data *
+lpfc_rport_data_from_scsi_device(struct scsi_device *sdev)
+{
+ struct lpfc_vport *vport = (struct lpfc_vport *)sdev->host->hostdata;
+
+ if (vport->phba->cfg_fof)
+ return ((struct lpfc_device_data *)sdev->hostdata)->rport_data;
+ else
+ return (struct lpfc_rport_data *)sdev->hostdata;
+}
+
+static void
+lpfc_release_scsi_buf_s4(struct lpfc_hba *phba, struct lpfc_scsi_buf *psb);
+static void
+lpfc_release_scsi_buf_s3(struct lpfc_hba *phba, struct lpfc_scsi_buf *psb);
+static int
+lpfc_prot_group_type(struct lpfc_hba *phba, struct scsi_cmnd *sc);
+
+static void
+lpfc_debug_save_data(struct lpfc_hba *phba, struct scsi_cmnd *cmnd)
+{
+ void *src, *dst;
+ struct scatterlist *sgde = scsi_sglist(cmnd);
+
+ if (!_dump_buf_data) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_BG,
+ "9050 BLKGRD: ERROR %s _dump_buf_data is NULL\n",
+ __func__);
+ return;
+ }
+
+
+ if (!sgde) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_BG,
+ "9051 BLKGRD: ERROR: data scatterlist is null\n");
+ return;
+ }
+
+ dst = (void *) _dump_buf_data;
+ while (sgde) {
+ src = sg_virt(sgde);
+ memcpy(dst, src, sgde->length);
+ dst += sgde->length;
+ sgde = sg_next(sgde);
+ }
+}
+
+static void
+lpfc_debug_save_dif(struct lpfc_hba *phba, struct scsi_cmnd *cmnd)
+{
+ void *src, *dst;
+ struct scatterlist *sgde = scsi_prot_sglist(cmnd);
+
+ if (!_dump_buf_dif) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_BG,
+ "9052 BLKGRD: ERROR %s _dump_buf_data is NULL\n",
+ __func__);
+ return;
+ }
+
+ if (!sgde) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_BG,
+ "9053 BLKGRD: ERROR: prot scatterlist is null\n");
+ return;
+ }
+
+ dst = _dump_buf_dif;
+ while (sgde) {
+ src = sg_virt(sgde);
+ memcpy(dst, src, sgde->length);
+ dst += sgde->length;
+ sgde = sg_next(sgde);
+ }
+}
+
+static inline unsigned
+lpfc_cmd_blksize(struct scsi_cmnd *sc)
+{
+ return sc->device->sector_size;
+}
+
+#define LPFC_CHECK_PROTECT_GUARD 1
+#define LPFC_CHECK_PROTECT_REF 2
+static inline unsigned
+lpfc_cmd_protect(struct scsi_cmnd *sc, int flag)
+{
+ return 1;
+}
+
+static inline unsigned
+lpfc_cmd_guard_csum(struct scsi_cmnd *sc)
+{
+ if (lpfc_prot_group_type(NULL, sc) == LPFC_PG_TYPE_NO_DIF)
+ return 0;
+ if (scsi_host_get_guard(sc->device->host) == SHOST_DIX_GUARD_IP)
+ return 1;
+ return 0;
+}
+
+/**
+ * lpfc_sli4_set_rsp_sgl_last - Set the last bit in the response sge.
+ * @phba: Pointer to HBA object.
+ * @lpfc_cmd: lpfc scsi command object pointer.
+ *
+ * This function is called from the lpfc_prep_task_mgmt_cmd function to
+ * set the last bit in the response sge entry.
+ **/
+static void
+lpfc_sli4_set_rsp_sgl_last(struct lpfc_hba *phba,
+ struct lpfc_scsi_buf *lpfc_cmd)
+{
+ struct sli4_sge *sgl = (struct sli4_sge *)lpfc_cmd->fcp_bpl;
+ if (sgl) {
+ sgl += 1;
+ sgl->word2 = le32_to_cpu(sgl->word2);
+ bf_set(lpfc_sli4_sge_last, sgl, 1);
+ sgl->word2 = cpu_to_le32(sgl->word2);
+ }
+}
+
+/**
+ * lpfc_update_stats - Update statistical data for the command completion
+ * @phba: Pointer to HBA object.
+ * @lpfc_cmd: lpfc scsi command object pointer.
+ *
+ * This function is called when there is a command completion and this
+ * function updates the statistical data for the command completion.
+ **/
+static void
+lpfc_update_stats(struct lpfc_hba *phba, struct lpfc_scsi_buf *lpfc_cmd)
+{
+ struct lpfc_rport_data *rdata = lpfc_cmd->rdata;
+ struct lpfc_nodelist *pnode = rdata->pnode;
+ struct scsi_cmnd *cmd = lpfc_cmd->pCmd;
+ unsigned long flags;
+ struct Scsi_Host *shost = cmd->device->host;
+ struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
+ unsigned long latency;
+ int i;
+
+ if (cmd->result)
+ return;
+
+ latency = jiffies_to_msecs((long)jiffies - (long)lpfc_cmd->start_time);
+
+ spin_lock_irqsave(shost->host_lock, flags);
+ if (!vport->stat_data_enabled ||
+ vport->stat_data_blocked ||
+ !pnode ||
+ !pnode->lat_data ||
+ (phba->bucket_type == LPFC_NO_BUCKET)) {
+ spin_unlock_irqrestore(shost->host_lock, flags);
+ return;
+ }
+
+ if (phba->bucket_type == LPFC_LINEAR_BUCKET) {
+ i = (latency + phba->bucket_step - 1 - phba->bucket_base)/
+ phba->bucket_step;
+ /* check array subscript bounds */
+ if (i < 0)
+ i = 0;
+ else if (i >= LPFC_MAX_BUCKET_COUNT)
+ i = LPFC_MAX_BUCKET_COUNT - 1;
+ } else {
+ for (i = 0; i < LPFC_MAX_BUCKET_COUNT-1; i++)
+ if (latency <= (phba->bucket_base +
+ ((1<<i)*phba->bucket_step)))
+ break;
+ }
+
+ pnode->lat_data[i].cmd_count++;
+ spin_unlock_irqrestore(shost->host_lock, flags);
+}
+
+/**
+ * lpfc_rampdown_queue_depth - Post RAMP_DOWN_QUEUE event to worker thread
+ * @phba: The Hba for which this call is being executed.
+ *
+ * This routine is called when there is resource error in driver or firmware.
+ * This routine posts WORKER_RAMP_DOWN_QUEUE event for @phba. This routine
+ * posts at most 1 event each second. This routine wakes up worker thread of
+ * @phba to process WORKER_RAM_DOWN_EVENT event.
+ *
+ * This routine should be called with no lock held.
+ **/
+void
+lpfc_rampdown_queue_depth(struct lpfc_hba *phba)
+{
+ unsigned long flags;
+ uint32_t evt_posted;
+ unsigned long expires;
+
+ spin_lock_irqsave(&phba->hbalock, flags);
+ atomic_inc(&phba->num_rsrc_err);
+ phba->last_rsrc_error_time = jiffies;
+
+ expires = phba->last_ramp_down_time + QUEUE_RAMP_DOWN_INTERVAL;
+ if (time_after(expires, jiffies)) {
+ spin_unlock_irqrestore(&phba->hbalock, flags);
+ return;
+ }
+
+ phba->last_ramp_down_time = jiffies;
+
+ spin_unlock_irqrestore(&phba->hbalock, flags);
+
+ spin_lock_irqsave(&phba->pport->work_port_lock, flags);
+ evt_posted = phba->pport->work_port_events & WORKER_RAMP_DOWN_QUEUE;
+ if (!evt_posted)
+ phba->pport->work_port_events |= WORKER_RAMP_DOWN_QUEUE;
+ spin_unlock_irqrestore(&phba->pport->work_port_lock, flags);
+
+ if (!evt_posted)
+ lpfc_worker_wake_up(phba);
+ return;
+}
+
+/**
+ * lpfc_ramp_down_queue_handler - WORKER_RAMP_DOWN_QUEUE event handler
+ * @phba: The Hba for which this call is being executed.
+ *
+ * This routine is called to process WORKER_RAMP_DOWN_QUEUE event for worker
+ * thread.This routine reduces queue depth for all scsi device on each vport
+ * associated with @phba.
+ **/
+void
+lpfc_ramp_down_queue_handler(struct lpfc_hba *phba)
+{
+ struct lpfc_vport **vports;
+ struct Scsi_Host *shost;
+ struct scsi_device *sdev;
+ unsigned long new_queue_depth;
+ unsigned long num_rsrc_err, num_cmd_success;
+ int i;
+
+ num_rsrc_err = atomic_read(&phba->num_rsrc_err);
+ num_cmd_success = atomic_read(&phba->num_cmd_success);
+
+ /*
+ * The error and success command counters are global per
+ * driver instance. If another handler has already
+ * operated on this error event, just exit.
+ */
+ if (num_rsrc_err == 0)
+ return;
+
+ vports = lpfc_create_vport_work_array(phba);
+ if (vports != NULL)
+ for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) {
+ shost = lpfc_shost_from_vport(vports[i]);
+ shost_for_each_device(sdev, shost) {
+ new_queue_depth =
+ sdev->queue_depth * num_rsrc_err /
+ (num_rsrc_err + num_cmd_success);
+ if (!new_queue_depth)
+ new_queue_depth = sdev->queue_depth - 1;
+ else
+ new_queue_depth = sdev->queue_depth -
+ new_queue_depth;
+ scsi_change_queue_depth(sdev, new_queue_depth);
+ }
+ }
+ lpfc_destroy_vport_work_array(phba, vports);
+ atomic_set(&phba->num_rsrc_err, 0);
+ atomic_set(&phba->num_cmd_success, 0);
+}
+
+/**
+ * lpfc_scsi_dev_block - set all scsi hosts to block state
+ * @phba: Pointer to HBA context object.
+ *
+ * This function walks vport list and set each SCSI host to block state
+ * by invoking fc_remote_port_delete() routine. This function is invoked
+ * with EEH when device's PCI slot has been permanently disabled.
+ **/
+void
+lpfc_scsi_dev_block(struct lpfc_hba *phba)
+{
+ struct lpfc_vport **vports;
+ struct Scsi_Host *shost;
+ struct scsi_device *sdev;
+ struct fc_rport *rport;
+ int i;
+
+ vports = lpfc_create_vport_work_array(phba);
+ if (vports != NULL)
+ for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) {
+ shost = lpfc_shost_from_vport(vports[i]);
+ shost_for_each_device(sdev, shost) {
+ rport = starget_to_rport(scsi_target(sdev));
+ fc_remote_port_delete(rport);
+ }
+ }
+ lpfc_destroy_vport_work_array(phba, vports);
+}
+
+/**
+ * lpfc_new_scsi_buf_s3 - Scsi buffer allocator for HBA with SLI3 IF spec
+ * @vport: The virtual port for which this call being executed.
+ * @num_to_allocate: The requested number of buffers to allocate.
+ *
+ * This routine allocates a scsi buffer for device with SLI-3 interface spec,
+ * the scsi buffer contains all the necessary information needed to initiate
+ * a SCSI I/O. The non-DMAable buffer region contains information to build
+ * the IOCB. The DMAable region contains memory for the FCP CMND, FCP RSP,
+ * and the initial BPL. In addition to allocating memory, the FCP CMND and
+ * FCP RSP BDEs are setup in the BPL and the BPL BDE is setup in the IOCB.
+ *
+ * Return codes:
+ * int - number of scsi buffers that were allocated.
+ * 0 = failure, less than num_to_alloc is a partial failure.
+ **/
+static int
+lpfc_new_scsi_buf_s3(struct lpfc_vport *vport, int num_to_alloc)
+{
+ struct lpfc_hba *phba = vport->phba;
+ struct lpfc_scsi_buf *psb;
+ struct ulp_bde64 *bpl;
+ IOCB_t *iocb;
+ dma_addr_t pdma_phys_fcp_cmd;
+ dma_addr_t pdma_phys_fcp_rsp;
+ dma_addr_t pdma_phys_bpl;
+ uint16_t iotag;
+ int bcnt, bpl_size;
+
+ bpl_size = phba->cfg_sg_dma_buf_size -
+ (sizeof(struct fcp_cmnd) + sizeof(struct fcp_rsp));
+
+ lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP,
+ "9067 ALLOC %d scsi_bufs: %d (%d + %d + %d)\n",
+ num_to_alloc, phba->cfg_sg_dma_buf_size,
+ (int)sizeof(struct fcp_cmnd),
+ (int)sizeof(struct fcp_rsp), bpl_size);
+
+ for (bcnt = 0; bcnt < num_to_alloc; bcnt++) {
+ psb = kzalloc(sizeof(struct lpfc_scsi_buf), GFP_KERNEL);
+ if (!psb)
+ break;
+
+ /*
+ * Get memory from the pci pool to map the virt space to pci
+ * bus space for an I/O. The DMA buffer includes space for the
+ * struct fcp_cmnd, struct fcp_rsp and the number of bde's
+ * necessary to support the sg_tablesize.
+ */
+ psb->data = pci_pool_alloc(phba->lpfc_scsi_dma_buf_pool,
+ GFP_KERNEL, &psb->dma_handle);
+ if (!psb->data) {
+ kfree(psb);
+ break;
+ }
+
+ /* Initialize virtual ptrs to dma_buf region. */
+ memset(psb->data, 0, phba->cfg_sg_dma_buf_size);
+
+ /* Allocate iotag for psb->cur_iocbq. */
+ iotag = lpfc_sli_next_iotag(phba, &psb->cur_iocbq);
+ if (iotag == 0) {
+ pci_pool_free(phba->lpfc_scsi_dma_buf_pool,
+ psb->data, psb->dma_handle);
+ kfree(psb);
+ break;
+ }
+ psb->cur_iocbq.iocb_flag |= LPFC_IO_FCP;
+
+ psb->fcp_cmnd = psb->data;
+ psb->fcp_rsp = psb->data + sizeof(struct fcp_cmnd);
+ psb->fcp_bpl = psb->data + sizeof(struct fcp_cmnd) +
+ sizeof(struct fcp_rsp);
+
+ /* Initialize local short-hand pointers. */
+ bpl = psb->fcp_bpl;
+ pdma_phys_fcp_cmd = psb->dma_handle;
+ pdma_phys_fcp_rsp = psb->dma_handle + sizeof(struct fcp_cmnd);
+ pdma_phys_bpl = psb->dma_handle + sizeof(struct fcp_cmnd) +
+ sizeof(struct fcp_rsp);
+
+ /*
+ * The first two bdes are the FCP_CMD and FCP_RSP. The balance
+ * are sg list bdes. Initialize the first two and leave the
+ * rest for queuecommand.
+ */
+ bpl[0].addrHigh = le32_to_cpu(putPaddrHigh(pdma_phys_fcp_cmd));
+ bpl[0].addrLow = le32_to_cpu(putPaddrLow(pdma_phys_fcp_cmd));
+ bpl[0].tus.f.bdeSize = sizeof(struct fcp_cmnd);
+ bpl[0].tus.f.bdeFlags = BUFF_TYPE_BDE_64;
+ bpl[0].tus.w = le32_to_cpu(bpl[0].tus.w);
+
+ /* Setup the physical region for the FCP RSP */
+ bpl[1].addrHigh = le32_to_cpu(putPaddrHigh(pdma_phys_fcp_rsp));
+ bpl[1].addrLow = le32_to_cpu(putPaddrLow(pdma_phys_fcp_rsp));
+ bpl[1].tus.f.bdeSize = sizeof(struct fcp_rsp);
+ bpl[1].tus.f.bdeFlags = BUFF_TYPE_BDE_64;
+ bpl[1].tus.w = le32_to_cpu(bpl[1].tus.w);
+
+ /*
+ * Since the IOCB for the FCP I/O is built into this
+ * lpfc_scsi_buf, initialize it with all known data now.
+ */
+ iocb = &psb->cur_iocbq.iocb;
+ iocb->un.fcpi64.bdl.ulpIoTag32 = 0;
+ if ((phba->sli_rev == 3) &&
+ !(phba->sli3_options & LPFC_SLI3_BG_ENABLED)) {
+ /* fill in immediate fcp command BDE */
+ iocb->un.fcpi64.bdl.bdeFlags = BUFF_TYPE_BDE_IMMED;
+ iocb->un.fcpi64.bdl.bdeSize = sizeof(struct fcp_cmnd);
+ iocb->un.fcpi64.bdl.addrLow = offsetof(IOCB_t,
+ unsli3.fcp_ext.icd);
+ iocb->un.fcpi64.bdl.addrHigh = 0;
+ iocb->ulpBdeCount = 0;
+ iocb->ulpLe = 0;
+ /* fill in response BDE */
+ iocb->unsli3.fcp_ext.rbde.tus.f.bdeFlags =
+ BUFF_TYPE_BDE_64;
+ iocb->unsli3.fcp_ext.rbde.tus.f.bdeSize =
+ sizeof(struct fcp_rsp);
+ iocb->unsli3.fcp_ext.rbde.addrLow =
+ putPaddrLow(pdma_phys_fcp_rsp);
+ iocb->unsli3.fcp_ext.rbde.addrHigh =
+ putPaddrHigh(pdma_phys_fcp_rsp);
+ } else {
+ iocb->un.fcpi64.bdl.bdeFlags = BUFF_TYPE_BLP_64;
+ iocb->un.fcpi64.bdl.bdeSize =
+ (2 * sizeof(struct ulp_bde64));
+ iocb->un.fcpi64.bdl.addrLow =
+ putPaddrLow(pdma_phys_bpl);
+ iocb->un.fcpi64.bdl.addrHigh =
+ putPaddrHigh(pdma_phys_bpl);
+ iocb->ulpBdeCount = 1;
+ iocb->ulpLe = 1;
+ }
+ iocb->ulpClass = CLASS3;
+ psb->status = IOSTAT_SUCCESS;
+ /* Put it back into the SCSI buffer list */
+ psb->cur_iocbq.context1 = psb;
+ lpfc_release_scsi_buf_s3(phba, psb);
+
+ }
+
+ return bcnt;
+}
+
+/**
+ * lpfc_sli4_vport_delete_fcp_xri_aborted -Remove all ndlp references for vport
+ * @vport: pointer to lpfc vport data structure.
+ *
+ * This routine is invoked by the vport cleanup for deletions and the cleanup
+ * for an ndlp on removal.
+ **/
+void
+lpfc_sli4_vport_delete_fcp_xri_aborted(struct lpfc_vport *vport)
+{
+ struct lpfc_hba *phba = vport->phba;
+ struct lpfc_scsi_buf *psb, *next_psb;
+ unsigned long iflag = 0;
+
+ spin_lock_irqsave(&phba->hbalock, iflag);
+ spin_lock(&phba->sli4_hba.abts_scsi_buf_list_lock);
+ list_for_each_entry_safe(psb, next_psb,
+ &phba->sli4_hba.lpfc_abts_scsi_buf_list, list) {
+ if (psb->rdata && psb->rdata->pnode
+ && psb->rdata->pnode->vport == vport)
+ psb->rdata = NULL;
+ }
+ spin_unlock(&phba->sli4_hba.abts_scsi_buf_list_lock);
+ spin_unlock_irqrestore(&phba->hbalock, iflag);
+}
+
+/**
+ * lpfc_sli4_fcp_xri_aborted - Fast-path process of fcp xri abort
+ * @phba: pointer to lpfc hba data structure.
+ * @axri: pointer to the fcp xri abort wcqe structure.
+ *
+ * This routine is invoked by the worker thread to process a SLI4 fast-path
+ * FCP aborted xri.
+ **/
+void
+lpfc_sli4_fcp_xri_aborted(struct lpfc_hba *phba,
+ struct sli4_wcqe_xri_aborted *axri)
+{
+ uint16_t xri = bf_get(lpfc_wcqe_xa_xri, axri);
+ uint16_t rxid = bf_get(lpfc_wcqe_xa_remote_xid, axri);
+ struct lpfc_scsi_buf *psb, *next_psb;
+ unsigned long iflag = 0;
+ struct lpfc_iocbq *iocbq;
+ int i;
+ struct lpfc_nodelist *ndlp;
+ int rrq_empty = 0;
+ struct lpfc_sli_ring *pring = &phba->sli.ring[LPFC_ELS_RING];
+
+ spin_lock_irqsave(&phba->hbalock, iflag);
+ spin_lock(&phba->sli4_hba.abts_scsi_buf_list_lock);
+ list_for_each_entry_safe(psb, next_psb,
+ &phba->sli4_hba.lpfc_abts_scsi_buf_list, list) {
+ if (psb->cur_iocbq.sli4_xritag == xri) {
+ list_del(&psb->list);
+ psb->exch_busy = 0;
+ psb->status = IOSTAT_SUCCESS;
+ spin_unlock(
+ &phba->sli4_hba.abts_scsi_buf_list_lock);
+ if (psb->rdata && psb->rdata->pnode)
+ ndlp = psb->rdata->pnode;
+ else
+ ndlp = NULL;
+
+ rrq_empty = list_empty(&phba->active_rrq_list);
+ spin_unlock_irqrestore(&phba->hbalock, iflag);
+ if (ndlp) {
+ lpfc_set_rrq_active(phba, ndlp,
+ psb->cur_iocbq.sli4_lxritag, rxid, 1);
+ lpfc_sli4_abts_err_handler(phba, ndlp, axri);
+ }
+ lpfc_release_scsi_buf_s4(phba, psb);
+ if (rrq_empty)
+ lpfc_worker_wake_up(phba);
+ return;
+ }
+ }
+ spin_unlock(&phba->sli4_hba.abts_scsi_buf_list_lock);
+ for (i = 1; i <= phba->sli.last_iotag; i++) {
+ iocbq = phba->sli.iocbq_lookup[i];
+
+ if (!(iocbq->iocb_flag & LPFC_IO_FCP) ||
+ (iocbq->iocb_flag & LPFC_IO_LIBDFC))
+ continue;
+ if (iocbq->sli4_xritag != xri)
+ continue;
+ psb = container_of(iocbq, struct lpfc_scsi_buf, cur_iocbq);
+ psb->exch_busy = 0;
+ spin_unlock_irqrestore(&phba->hbalock, iflag);
+ if (!list_empty(&pring->txq))
+ lpfc_worker_wake_up(phba);
+ return;
+
+ }
+ spin_unlock_irqrestore(&phba->hbalock, iflag);
+}
+
+/**
+ * lpfc_sli4_post_scsi_sgl_list - Psot blocks of scsi buffer sgls from a list
+ * @phba: pointer to lpfc hba data structure.
+ * @post_sblist: pointer to the scsi buffer list.
+ *
+ * This routine walks a list of scsi buffers that was passed in. It attempts
+ * to construct blocks of scsi buffer sgls which contains contiguous xris and
+ * uses the non-embedded SGL block post mailbox commands to post to the port.
+ * For single SCSI buffer sgl with non-contiguous xri, if any, it shall use
+ * embedded SGL post mailbox command for posting. The @post_sblist passed in
+ * must be local list, thus no lock is needed when manipulate the list.
+ *
+ * Returns: 0 = failure, non-zero number of successfully posted buffers.
+ **/
+static int
+lpfc_sli4_post_scsi_sgl_list(struct lpfc_hba *phba,
+ struct list_head *post_sblist, int sb_count)
+{
+ struct lpfc_scsi_buf *psb, *psb_next;
+ int status, sgl_size;
+ int post_cnt = 0, block_cnt = 0, num_posting = 0, num_posted = 0;
+ dma_addr_t pdma_phys_bpl1;
+ int last_xritag = NO_XRI;
+ LIST_HEAD(prep_sblist);
+ LIST_HEAD(blck_sblist);
+ LIST_HEAD(scsi_sblist);
+
+ /* sanity check */
+ if (sb_count <= 0)
+ return -EINVAL;
+
+ sgl_size = phba->cfg_sg_dma_buf_size -
+ (sizeof(struct fcp_cmnd) + sizeof(struct fcp_rsp));
+
+ list_for_each_entry_safe(psb, psb_next, post_sblist, list) {
+ list_del_init(&psb->list);
+ block_cnt++;
+ if ((last_xritag != NO_XRI) &&
+ (psb->cur_iocbq.sli4_xritag != last_xritag + 1)) {
+ /* a hole in xri block, form a sgl posting block */
+ list_splice_init(&prep_sblist, &blck_sblist);
+ post_cnt = block_cnt - 1;
+ /* prepare list for next posting block */
+ list_add_tail(&psb->list, &prep_sblist);
+ block_cnt = 1;
+ } else {
+ /* prepare list for next posting block */
+ list_add_tail(&psb->list, &prep_sblist);
+ /* enough sgls for non-embed sgl mbox command */
+ if (block_cnt == LPFC_NEMBED_MBOX_SGL_CNT) {
+ list_splice_init(&prep_sblist, &blck_sblist);
+ post_cnt = block_cnt;
+ block_cnt = 0;
+ }
+ }
+ num_posting++;
+ last_xritag = psb->cur_iocbq.sli4_xritag;
+
+ /* end of repost sgl list condition for SCSI buffers */
+ if (num_posting == sb_count) {
+ if (post_cnt == 0) {
+ /* last sgl posting block */
+ list_splice_init(&prep_sblist, &blck_sblist);
+ post_cnt = block_cnt;
+ } else if (block_cnt == 1) {
+ /* last single sgl with non-contiguous xri */
+ if (sgl_size > SGL_PAGE_SIZE)
+ pdma_phys_bpl1 = psb->dma_phys_bpl +
+ SGL_PAGE_SIZE;
+ else
+ pdma_phys_bpl1 = 0;
+ status = lpfc_sli4_post_sgl(phba,
+ psb->dma_phys_bpl,
+ pdma_phys_bpl1,
+ psb->cur_iocbq.sli4_xritag);
+ if (status) {
+ /* failure, put on abort scsi list */
+ psb->exch_busy = 1;
+ } else {
+ /* success, put on SCSI buffer list */
+ psb->exch_busy = 0;
+ psb->status = IOSTAT_SUCCESS;
+ num_posted++;
+ }
+ /* success, put on SCSI buffer sgl list */
+ list_add_tail(&psb->list, &scsi_sblist);
+ }
+ }
+
+ /* continue until a nembed page worth of sgls */
+ if (post_cnt == 0)
+ continue;
+
+ /* post block of SCSI buffer list sgls */
+ status = lpfc_sli4_post_scsi_sgl_block(phba, &blck_sblist,
+ post_cnt);
+
+ /* don't reset xirtag due to hole in xri block */
+ if (block_cnt == 0)
+ last_xritag = NO_XRI;
+
+ /* reset SCSI buffer post count for next round of posting */
+ post_cnt = 0;
+
+ /* put posted SCSI buffer-sgl posted on SCSI buffer sgl list */
+ while (!list_empty(&blck_sblist)) {
+ list_remove_head(&blck_sblist, psb,
+ struct lpfc_scsi_buf, list);
+ if (status) {
+ /* failure, put on abort scsi list */
+ psb->exch_busy = 1;
+ } else {
+ /* success, put on SCSI buffer list */
+ psb->exch_busy = 0;
+ psb->status = IOSTAT_SUCCESS;
+ num_posted++;
+ }
+ list_add_tail(&psb->list, &scsi_sblist);
+ }
+ }
+ /* Push SCSI buffers with sgl posted to the availble list */
+ while (!list_empty(&scsi_sblist)) {
+ list_remove_head(&scsi_sblist, psb,
+ struct lpfc_scsi_buf, list);
+ lpfc_release_scsi_buf_s4(phba, psb);
+ }
+ return num_posted;
+}
+
+/**
+ * lpfc_sli4_repost_scsi_sgl_list - Repsot all the allocated scsi buffer sgls
+ * @phba: pointer to lpfc hba data structure.
+ *
+ * This routine walks the list of scsi buffers that have been allocated and
+ * repost them to the port by using SGL block post. This is needed after a
+ * pci_function_reset/warm_start or start. The lpfc_hba_down_post_s4 routine
+ * is responsible for moving all scsi buffers on the lpfc_abts_scsi_sgl_list
+ * to the lpfc_scsi_buf_list. If the repost fails, reject all scsi buffers.
+ *
+ * Returns: 0 = success, non-zero failure.
+ **/
+int
+lpfc_sli4_repost_scsi_sgl_list(struct lpfc_hba *phba)
+{
+ LIST_HEAD(post_sblist);
+ int num_posted, rc = 0;
+
+ /* get all SCSI buffers need to repost to a local list */
+ spin_lock_irq(&phba->scsi_buf_list_get_lock);
+ spin_lock(&phba->scsi_buf_list_put_lock);
+ list_splice_init(&phba->lpfc_scsi_buf_list_get, &post_sblist);
+ list_splice(&phba->lpfc_scsi_buf_list_put, &post_sblist);
+ spin_unlock(&phba->scsi_buf_list_put_lock);
+ spin_unlock_irq(&phba->scsi_buf_list_get_lock);
+
+ /* post the list of scsi buffer sgls to port if available */
+ if (!list_empty(&post_sblist)) {
+ num_posted = lpfc_sli4_post_scsi_sgl_list(phba, &post_sblist,
+ phba->sli4_hba.scsi_xri_cnt);
+ /* failed to post any scsi buffer, return error */
+ if (num_posted == 0)
+ rc = -EIO;
+ }
+ return rc;
+}
+
+/**
+ * lpfc_new_scsi_buf_s4 - Scsi buffer allocator for HBA with SLI4 IF spec
+ * @vport: The virtual port for which this call being executed.
+ * @num_to_allocate: The requested number of buffers to allocate.
+ *
+ * This routine allocates scsi buffers for device with SLI-4 interface spec,
+ * the scsi buffer contains all the necessary information needed to initiate
+ * a SCSI I/O. After allocating up to @num_to_allocate SCSI buffers and put
+ * them on a list, it post them to the port by using SGL block post.
+ *
+ * Return codes:
+ * int - number of scsi buffers that were allocated and posted.
+ * 0 = failure, less than num_to_alloc is a partial failure.
+ **/
+static int
+lpfc_new_scsi_buf_s4(struct lpfc_vport *vport, int num_to_alloc)
+{
+ struct lpfc_hba *phba = vport->phba;
+ struct lpfc_scsi_buf *psb;
+ struct sli4_sge *sgl;
+ IOCB_t *iocb;
+ dma_addr_t pdma_phys_fcp_cmd;
+ dma_addr_t pdma_phys_fcp_rsp;
+ dma_addr_t pdma_phys_bpl;
+ uint16_t iotag, lxri = 0;
+ int bcnt, num_posted, sgl_size;
+ LIST_HEAD(prep_sblist);
+ LIST_HEAD(post_sblist);
+ LIST_HEAD(scsi_sblist);
+
+ sgl_size = phba->cfg_sg_dma_buf_size -
+ (sizeof(struct fcp_cmnd) + sizeof(struct fcp_rsp));
+
+ lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP,
+ "9068 ALLOC %d scsi_bufs: %d (%d + %d + %d)\n",
+ num_to_alloc, phba->cfg_sg_dma_buf_size, sgl_size,
+ (int)sizeof(struct fcp_cmnd),
+ (int)sizeof(struct fcp_rsp));
+
+ for (bcnt = 0; bcnt < num_to_alloc; bcnt++) {
+ psb = kzalloc(sizeof(struct lpfc_scsi_buf), GFP_KERNEL);
+ if (!psb)
+ break;
+ /*
+ * Get memory from the pci pool to map the virt space to
+ * pci bus space for an I/O. The DMA buffer includes space
+ * for the struct fcp_cmnd, struct fcp_rsp and the number
+ * of bde's necessary to support the sg_tablesize.
+ */
+ psb->data = pci_pool_alloc(phba->lpfc_scsi_dma_buf_pool,
+ GFP_KERNEL, &psb->dma_handle);
+ if (!psb->data) {
+ kfree(psb);
+ break;
+ }
+ memset(psb->data, 0, phba->cfg_sg_dma_buf_size);
+
+ /*
+ * 4K Page alignment is CRITICAL to BlockGuard, double check
+ * to be sure.
+ */
+ if (phba->cfg_enable_bg && (((unsigned long)(psb->data) &
+ (unsigned long)(SLI4_PAGE_SIZE - 1)) != 0)) {
+ pci_pool_free(phba->lpfc_scsi_dma_buf_pool,
+ psb->data, psb->dma_handle);
+ kfree(psb);
+ break;
+ }
+
+
+ lxri = lpfc_sli4_next_xritag(phba);
+ if (lxri == NO_XRI) {
+ pci_pool_free(phba->lpfc_scsi_dma_buf_pool,
+ psb->data, psb->dma_handle);
+ kfree(psb);
+ break;
+ }
+
+ /* Allocate iotag for psb->cur_iocbq. */
+ iotag = lpfc_sli_next_iotag(phba, &psb->cur_iocbq);
+ if (iotag == 0) {
+ pci_pool_free(phba->lpfc_scsi_dma_buf_pool,
+ psb->data, psb->dma_handle);
+ kfree(psb);
+ lpfc_printf_log(phba, KERN_ERR, LOG_FCP,
+ "3368 Failed to allocated IOTAG for"
+ " XRI:0x%x\n", lxri);
+ lpfc_sli4_free_xri(phba, lxri);
+ break;
+ }
+ psb->cur_iocbq.sli4_lxritag = lxri;
+ psb->cur_iocbq.sli4_xritag = phba->sli4_hba.xri_ids[lxri];
+ psb->cur_iocbq.iocb_flag |= LPFC_IO_FCP;
+ psb->fcp_bpl = psb->data;
+ psb->fcp_cmnd = (psb->data + sgl_size);
+ psb->fcp_rsp = (struct fcp_rsp *)((uint8_t *)psb->fcp_cmnd +
+ sizeof(struct fcp_cmnd));
+
+ /* Initialize local short-hand pointers. */
+ sgl = (struct sli4_sge *)psb->fcp_bpl;
+ pdma_phys_bpl = psb->dma_handle;
+ pdma_phys_fcp_cmd = (psb->dma_handle + sgl_size);
+ pdma_phys_fcp_rsp = pdma_phys_fcp_cmd + sizeof(struct fcp_cmnd);
+
+ /*
+ * The first two bdes are the FCP_CMD and FCP_RSP.
+ * The balance are sg list bdes. Initialize the
+ * first two and leave the rest for queuecommand.
+ */
+ sgl->addr_hi = cpu_to_le32(putPaddrHigh(pdma_phys_fcp_cmd));
+ sgl->addr_lo = cpu_to_le32(putPaddrLow(pdma_phys_fcp_cmd));
+ sgl->word2 = le32_to_cpu(sgl->word2);
+ bf_set(lpfc_sli4_sge_last, sgl, 0);
+ sgl->word2 = cpu_to_le32(sgl->word2);
+ sgl->sge_len = cpu_to_le32(sizeof(struct fcp_cmnd));
+ sgl++;
+
+ /* Setup the physical region for the FCP RSP */
+ sgl->addr_hi = cpu_to_le32(putPaddrHigh(pdma_phys_fcp_rsp));
+ sgl->addr_lo = cpu_to_le32(putPaddrLow(pdma_phys_fcp_rsp));
+ sgl->word2 = le32_to_cpu(sgl->word2);
+ bf_set(lpfc_sli4_sge_last, sgl, 1);
+ sgl->word2 = cpu_to_le32(sgl->word2);
+ sgl->sge_len = cpu_to_le32(sizeof(struct fcp_rsp));
+
+ /*
+ * Since the IOCB for the FCP I/O is built into this
+ * lpfc_scsi_buf, initialize it with all known data now.
+ */
+ iocb = &psb->cur_iocbq.iocb;
+ iocb->un.fcpi64.bdl.ulpIoTag32 = 0;
+ iocb->un.fcpi64.bdl.bdeFlags = BUFF_TYPE_BDE_64;
+ /* setting the BLP size to 2 * sizeof BDE may not be correct.
+ * We are setting the bpl to point to out sgl. An sgl's
+ * entries are 16 bytes, a bpl entries are 12 bytes.
+ */
+ iocb->un.fcpi64.bdl.bdeSize = sizeof(struct fcp_cmnd);
+ iocb->un.fcpi64.bdl.addrLow = putPaddrLow(pdma_phys_fcp_cmd);
+ iocb->un.fcpi64.bdl.addrHigh = putPaddrHigh(pdma_phys_fcp_cmd);
+ iocb->ulpBdeCount = 1;
+ iocb->ulpLe = 1;
+ iocb->ulpClass = CLASS3;
+ psb->cur_iocbq.context1 = psb;
+ psb->dma_phys_bpl = pdma_phys_bpl;
+
+ /* add the scsi buffer to a post list */
+ list_add_tail(&psb->list, &post_sblist);
+ spin_lock_irq(&phba->scsi_buf_list_get_lock);
+ phba->sli4_hba.scsi_xri_cnt++;
+ spin_unlock_irq(&phba->scsi_buf_list_get_lock);
+ }
+ lpfc_printf_log(phba, KERN_INFO, LOG_BG,
+ "3021 Allocate %d out of %d requested new SCSI "
+ "buffers\n", bcnt, num_to_alloc);
+
+ /* post the list of scsi buffer sgls to port if available */
+ if (!list_empty(&post_sblist))
+ num_posted = lpfc_sli4_post_scsi_sgl_list(phba,
+ &post_sblist, bcnt);
+ else
+ num_posted = 0;
+
+ return num_posted;
+}
+
+/**
+ * lpfc_new_scsi_buf - Wrapper funciton for scsi buffer allocator
+ * @vport: The virtual port for which this call being executed.
+ * @num_to_allocate: The requested number of buffers to allocate.
+ *
+ * This routine wraps the actual SCSI buffer allocator function pointer from
+ * the lpfc_hba struct.
+ *
+ * Return codes:
+ * int - number of scsi buffers that were allocated.
+ * 0 = failure, less than num_to_alloc is a partial failure.
+ **/
+static inline int
+lpfc_new_scsi_buf(struct lpfc_vport *vport, int num_to_alloc)
+{
+ return vport->phba->lpfc_new_scsi_buf(vport, num_to_alloc);
+}
+
+/**
+ * lpfc_get_scsi_buf_s3 - Get a scsi buffer from lpfc_scsi_buf_list of the HBA
+ * @phba: The HBA for which this call is being executed.
+ *
+ * This routine removes a scsi buffer from head of @phba lpfc_scsi_buf_list list
+ * and returns to caller.
+ *
+ * Return codes:
+ * NULL - Error
+ * Pointer to lpfc_scsi_buf - Success
+ **/
+static struct lpfc_scsi_buf*
+lpfc_get_scsi_buf_s3(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp)
+{
+ struct lpfc_scsi_buf * lpfc_cmd = NULL;
+ struct list_head *scsi_buf_list_get = &phba->lpfc_scsi_buf_list_get;
+ unsigned long iflag = 0;
+
+ spin_lock_irqsave(&phba->scsi_buf_list_get_lock, iflag);
+ list_remove_head(scsi_buf_list_get, lpfc_cmd, struct lpfc_scsi_buf,
+ list);
+ if (!lpfc_cmd) {
+ spin_lock(&phba->scsi_buf_list_put_lock);
+ list_splice(&phba->lpfc_scsi_buf_list_put,
+ &phba->lpfc_scsi_buf_list_get);
+ INIT_LIST_HEAD(&phba->lpfc_scsi_buf_list_put);
+ list_remove_head(scsi_buf_list_get, lpfc_cmd,
+ struct lpfc_scsi_buf, list);
+ spin_unlock(&phba->scsi_buf_list_put_lock);
+ }
+ spin_unlock_irqrestore(&phba->scsi_buf_list_get_lock, iflag);
+ return lpfc_cmd;
+}
+/**
+ * lpfc_get_scsi_buf_s4 - Get a scsi buffer from lpfc_scsi_buf_list of the HBA
+ * @phba: The HBA for which this call is being executed.
+ *
+ * This routine removes a scsi buffer from head of @phba lpfc_scsi_buf_list list
+ * and returns to caller.
+ *
+ * Return codes:
+ * NULL - Error
+ * Pointer to lpfc_scsi_buf - Success
+ **/
+static struct lpfc_scsi_buf*
+lpfc_get_scsi_buf_s4(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp)
+{
+ struct lpfc_scsi_buf *lpfc_cmd, *lpfc_cmd_next;
+ unsigned long iflag = 0;
+ int found = 0;
+
+ spin_lock_irqsave(&phba->scsi_buf_list_get_lock, iflag);
+ list_for_each_entry_safe(lpfc_cmd, lpfc_cmd_next,
+ &phba->lpfc_scsi_buf_list_get, list) {
+ if (lpfc_test_rrq_active(phba, ndlp,
+ lpfc_cmd->cur_iocbq.sli4_lxritag))
+ continue;
+ list_del(&lpfc_cmd->list);
+ found = 1;
+ break;
+ }
+ if (!found) {
+ spin_lock(&phba->scsi_buf_list_put_lock);
+ list_splice(&phba->lpfc_scsi_buf_list_put,
+ &phba->lpfc_scsi_buf_list_get);
+ INIT_LIST_HEAD(&phba->lpfc_scsi_buf_list_put);
+ spin_unlock(&phba->scsi_buf_list_put_lock);
+ list_for_each_entry_safe(lpfc_cmd, lpfc_cmd_next,
+ &phba->lpfc_scsi_buf_list_get, list) {
+ if (lpfc_test_rrq_active(
+ phba, ndlp, lpfc_cmd->cur_iocbq.sli4_lxritag))
+ continue;
+ list_del(&lpfc_cmd->list);
+ found = 1;
+ break;
+ }
+ }
+ spin_unlock_irqrestore(&phba->scsi_buf_list_get_lock, iflag);
+ if (!found)
+ return NULL;
+ return lpfc_cmd;
+}
+/**
+ * lpfc_get_scsi_buf - Get a scsi buffer from lpfc_scsi_buf_list of the HBA
+ * @phba: The HBA for which this call is being executed.
+ *
+ * This routine removes a scsi buffer from head of @phba lpfc_scsi_buf_list list
+ * and returns to caller.
+ *
+ * Return codes:
+ * NULL - Error
+ * Pointer to lpfc_scsi_buf - Success
+ **/
+static struct lpfc_scsi_buf*
+lpfc_get_scsi_buf(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp)
+{
+ return phba->lpfc_get_scsi_buf(phba, ndlp);
+}
+
+/**
+ * lpfc_release_scsi_buf - Return a scsi buffer back to hba scsi buf list
+ * @phba: The Hba for which this call is being executed.
+ * @psb: The scsi buffer which is being released.
+ *
+ * This routine releases @psb scsi buffer by adding it to tail of @phba
+ * lpfc_scsi_buf_list list.
+ **/
+static void
+lpfc_release_scsi_buf_s3(struct lpfc_hba *phba, struct lpfc_scsi_buf *psb)
+{
+ unsigned long iflag = 0;
+
+ psb->seg_cnt = 0;
+ psb->nonsg_phys = 0;
+ psb->prot_seg_cnt = 0;
+
+ spin_lock_irqsave(&phba->scsi_buf_list_put_lock, iflag);
+ psb->pCmd = NULL;
+ psb->cur_iocbq.iocb_flag = LPFC_IO_FCP;
+ list_add_tail(&psb->list, &phba->lpfc_scsi_buf_list_put);
+ spin_unlock_irqrestore(&phba->scsi_buf_list_put_lock, iflag);
+}
+
+/**
+ * lpfc_release_scsi_buf_s4: Return a scsi buffer back to hba scsi buf list.
+ * @phba: The Hba for which this call is being executed.
+ * @psb: The scsi buffer which is being released.
+ *
+ * This routine releases @psb scsi buffer by adding it to tail of @phba
+ * lpfc_scsi_buf_list list. For SLI4 XRI's are tied to the scsi buffer
+ * and cannot be reused for at least RA_TOV amount of time if it was
+ * aborted.
+ **/
+static void
+lpfc_release_scsi_buf_s4(struct lpfc_hba *phba, struct lpfc_scsi_buf *psb)
+{
+ unsigned long iflag = 0;
+
+ psb->seg_cnt = 0;
+ psb->nonsg_phys = 0;
+ psb->prot_seg_cnt = 0;
+
+ if (psb->exch_busy) {
+ spin_lock_irqsave(&phba->sli4_hba.abts_scsi_buf_list_lock,
+ iflag);
+ psb->pCmd = NULL;
+ list_add_tail(&psb->list,
+ &phba->sli4_hba.lpfc_abts_scsi_buf_list);
+ spin_unlock_irqrestore(&phba->sli4_hba.abts_scsi_buf_list_lock,
+ iflag);
+ } else {
+ psb->pCmd = NULL;
+ psb->cur_iocbq.iocb_flag = LPFC_IO_FCP;
+ spin_lock_irqsave(&phba->scsi_buf_list_put_lock, iflag);
+ list_add_tail(&psb->list, &phba->lpfc_scsi_buf_list_put);
+ spin_unlock_irqrestore(&phba->scsi_buf_list_put_lock, iflag);
+ }
+}
+
+/**
+ * lpfc_release_scsi_buf: Return a scsi buffer back to hba scsi buf list.
+ * @phba: The Hba for which this call is being executed.
+ * @psb: The scsi buffer which is being released.
+ *
+ * This routine releases @psb scsi buffer by adding it to tail of @phba
+ * lpfc_scsi_buf_list list.
+ **/
+static void
+lpfc_release_scsi_buf(struct lpfc_hba *phba, struct lpfc_scsi_buf *psb)
+{
+
+ phba->lpfc_release_scsi_buf(phba, psb);
+}
+
+/**
+ * lpfc_scsi_prep_dma_buf_s3 - DMA mapping for scsi buffer to SLI3 IF spec
+ * @phba: The Hba for which this call is being executed.
+ * @lpfc_cmd: The scsi buffer which is going to be mapped.
+ *
+ * This routine does the pci dma mapping for scatter-gather list of scsi cmnd
+ * field of @lpfc_cmd for device with SLI-3 interface spec. This routine scans
+ * through sg elements and format the bdea. This routine also initializes all
+ * IOCB fields which are dependent on scsi command request buffer.
+ *
+ * Return codes:
+ * 1 - Error
+ * 0 - Success
+ **/
+static int
+lpfc_scsi_prep_dma_buf_s3(struct lpfc_hba *phba, struct lpfc_scsi_buf *lpfc_cmd)
+{
+ struct scsi_cmnd *scsi_cmnd = lpfc_cmd->pCmd;
+ struct scatterlist *sgel = NULL;
+ struct fcp_cmnd *fcp_cmnd = lpfc_cmd->fcp_cmnd;
+ struct ulp_bde64 *bpl = lpfc_cmd->fcp_bpl;
+ struct lpfc_iocbq *iocbq = &lpfc_cmd->cur_iocbq;
+ IOCB_t *iocb_cmd = &lpfc_cmd->cur_iocbq.iocb;
+ struct ulp_bde64 *data_bde = iocb_cmd->unsli3.fcp_ext.dbde;
+ dma_addr_t physaddr;
+ uint32_t num_bde = 0;
+ int nseg, datadir = scsi_cmnd->sc_data_direction;
+
+ /*
+ * There are three possibilities here - use scatter-gather segment, use
+ * the single mapping, or neither. Start the lpfc command prep by
+ * bumping the bpl beyond the fcp_cmnd and fcp_rsp regions to the first
+ * data bde entry.
+ */
+ bpl += 2;
+ if (scsi_sg_count(scsi_cmnd)) {
+ /*
+ * The driver stores the segment count returned from pci_map_sg
+ * because this a count of dma-mappings used to map the use_sg
+ * pages. They are not guaranteed to be the same for those
+ * architectures that implement an IOMMU.
+ */
+
+ nseg = dma_map_sg(&phba->pcidev->dev, scsi_sglist(scsi_cmnd),
+ scsi_sg_count(scsi_cmnd), datadir);
+ if (unlikely(!nseg))
+ return 1;
+
+ lpfc_cmd->seg_cnt = nseg;
+ if (lpfc_cmd->seg_cnt > phba->cfg_sg_seg_cnt) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_BG,
+ "9064 BLKGRD: %s: Too many sg segments from "
+ "dma_map_sg. Config %d, seg_cnt %d\n",
+ __func__, phba->cfg_sg_seg_cnt,
+ lpfc_cmd->seg_cnt);
+ lpfc_cmd->seg_cnt = 0;
+ scsi_dma_unmap(scsi_cmnd);
+ return 1;
+ }
+
+ /*
+ * The driver established a maximum scatter-gather segment count
+ * during probe that limits the number of sg elements in any
+ * single scsi command. Just run through the seg_cnt and format
+ * the bde's.
+ * When using SLI-3 the driver will try to fit all the BDEs into
+ * the IOCB. If it can't then the BDEs get added to a BPL as it
+ * does for SLI-2 mode.
+ */
+ scsi_for_each_sg(scsi_cmnd, sgel, nseg, num_bde) {
+ physaddr = sg_dma_address(sgel);
+ if (phba->sli_rev == 3 &&
+ !(phba->sli3_options & LPFC_SLI3_BG_ENABLED) &&
+ !(iocbq->iocb_flag & DSS_SECURITY_OP) &&
+ nseg <= LPFC_EXT_DATA_BDE_COUNT) {
+ data_bde->tus.f.bdeFlags = BUFF_TYPE_BDE_64;
+ data_bde->tus.f.bdeSize = sg_dma_len(sgel);
+ data_bde->addrLow = putPaddrLow(physaddr);
+ data_bde->addrHigh = putPaddrHigh(physaddr);
+ data_bde++;
+ } else {
+ bpl->tus.f.bdeFlags = BUFF_TYPE_BDE_64;
+ bpl->tus.f.bdeSize = sg_dma_len(sgel);
+ bpl->tus.w = le32_to_cpu(bpl->tus.w);
+ bpl->addrLow =
+ le32_to_cpu(putPaddrLow(physaddr));
+ bpl->addrHigh =
+ le32_to_cpu(putPaddrHigh(physaddr));
+ bpl++;
+ }
+ }
+ }
+
+ /*
+ * Finish initializing those IOCB fields that are dependent on the
+ * scsi_cmnd request_buffer. Note that for SLI-2 the bdeSize is
+ * explicitly reinitialized and for SLI-3 the extended bde count is
+ * explicitly reinitialized since all iocb memory resources are reused.
+ */
+ if (phba->sli_rev == 3 &&
+ !(phba->sli3_options & LPFC_SLI3_BG_ENABLED) &&
+ !(iocbq->iocb_flag & DSS_SECURITY_OP)) {
+ if (num_bde > LPFC_EXT_DATA_BDE_COUNT) {
+ /*
+ * The extended IOCB format can only fit 3 BDE or a BPL.
+ * This I/O has more than 3 BDE so the 1st data bde will
+ * be a BPL that is filled in here.
+ */
+ physaddr = lpfc_cmd->dma_handle;
+ data_bde->tus.f.bdeFlags = BUFF_TYPE_BLP_64;
+ data_bde->tus.f.bdeSize = (num_bde *
+ sizeof(struct ulp_bde64));
+ physaddr += (sizeof(struct fcp_cmnd) +
+ sizeof(struct fcp_rsp) +
+ (2 * sizeof(struct ulp_bde64)));
+ data_bde->addrHigh = putPaddrHigh(physaddr);
+ data_bde->addrLow = putPaddrLow(physaddr);
+ /* ebde count includes the response bde and data bpl */
+ iocb_cmd->unsli3.fcp_ext.ebde_count = 2;
+ } else {
+ /* ebde count includes the response bde and data bdes */
+ iocb_cmd->unsli3.fcp_ext.ebde_count = (num_bde + 1);
+ }
+ } else {
+ iocb_cmd->un.fcpi64.bdl.bdeSize =
+ ((num_bde + 2) * sizeof(struct ulp_bde64));
+ iocb_cmd->unsli3.fcp_ext.ebde_count = (num_bde + 1);
+ }
+ fcp_cmnd->fcpDl = cpu_to_be32(scsi_bufflen(scsi_cmnd));
+
+ /*
+ * Due to difference in data length between DIF/non-DIF paths,
+ * we need to set word 4 of IOCB here
+ */
+ iocb_cmd->un.fcpi.fcpi_parm = scsi_bufflen(scsi_cmnd);
+ return 0;
+}
+
+#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
+
+/* Return if if error injection is detected by Initiator */
+#define BG_ERR_INIT 0x1
+/* Return if if error injection is detected by Target */
+#define BG_ERR_TGT 0x2
+/* Return if if swapping CSUM<-->CRC is required for error injection */
+#define BG_ERR_SWAP 0x10
+/* Return if disabling Guard/Ref/App checking is required for error injection */
+#define BG_ERR_CHECK 0x20
+
+/**
+ * lpfc_bg_err_inject - Determine if we should inject an error
+ * @phba: The Hba for which this call is being executed.
+ * @sc: The SCSI command to examine
+ * @reftag: (out) BlockGuard reference tag for transmitted data
+ * @apptag: (out) BlockGuard application tag for transmitted data
+ * @new_guard (in) Value to replace CRC with if needed
+ *
+ * Returns BG_ERR_* bit mask or 0 if request ignored
+ **/
+static int
+lpfc_bg_err_inject(struct lpfc_hba *phba, struct scsi_cmnd *sc,
+ uint32_t *reftag, uint16_t *apptag, uint32_t new_guard)
+{
+ struct scatterlist *sgpe; /* s/g prot entry */
+ struct scatterlist *sgde; /* s/g data entry */
+ struct lpfc_scsi_buf *lpfc_cmd = NULL;
+ struct scsi_dif_tuple *src = NULL;
+ struct lpfc_nodelist *ndlp;
+ struct lpfc_rport_data *rdata;
+ uint32_t op = scsi_get_prot_op(sc);
+ uint32_t blksize;
+ uint32_t numblks;
+ sector_t lba;
+ int rc = 0;
+ int blockoff = 0;
+
+ if (op == SCSI_PROT_NORMAL)
+ return 0;
+
+ sgpe = scsi_prot_sglist(sc);
+ sgde = scsi_sglist(sc);
+ lba = scsi_get_lba(sc);
+
+ /* First check if we need to match the LBA */
+ if (phba->lpfc_injerr_lba != LPFC_INJERR_LBA_OFF) {
+ blksize = lpfc_cmd_blksize(sc);
+ numblks = (scsi_bufflen(sc) + blksize - 1) / blksize;
+
+ /* Make sure we have the right LBA if one is specified */
+ if ((phba->lpfc_injerr_lba < lba) ||
+ (phba->lpfc_injerr_lba >= (lba + numblks)))
+ return 0;
+ if (sgpe) {
+ blockoff = phba->lpfc_injerr_lba - lba;
+ numblks = sg_dma_len(sgpe) /
+ sizeof(struct scsi_dif_tuple);
+ if (numblks < blockoff)
+ blockoff = numblks;
+ }
+ }
+
+ /* Next check if we need to match the remote NPortID or WWPN */
+ rdata = lpfc_rport_data_from_scsi_device(sc->device);
+ if (rdata && rdata->pnode) {
+ ndlp = rdata->pnode;
+
+ /* Make sure we have the right NPortID if one is specified */
+ if (phba->lpfc_injerr_nportid &&
+ (phba->lpfc_injerr_nportid != ndlp->nlp_DID))
+ return 0;
+
+ /*
+ * Make sure we have the right WWPN if one is specified.
+ * wwn[0] should be a non-zero NAA in a good WWPN.
+ */
+ if (phba->lpfc_injerr_wwpn.u.wwn[0] &&
+ (memcmp(&ndlp->nlp_portname, &phba->lpfc_injerr_wwpn,
+ sizeof(struct lpfc_name)) != 0))
+ return 0;
+ }
+
+ /* Setup a ptr to the protection data if the SCSI host provides it */
+ if (sgpe) {
+ src = (struct scsi_dif_tuple *)sg_virt(sgpe);
+ src += blockoff;
+ lpfc_cmd = (struct lpfc_scsi_buf *)sc->host_scribble;
+ }
+
+ /* Should we change the Reference Tag */
+ if (reftag) {
+ if (phba->lpfc_injerr_wref_cnt) {
+ switch (op) {
+ case SCSI_PROT_WRITE_PASS:
+ if (src) {
+ /*
+ * For WRITE_PASS, force the error
+ * to be sent on the wire. It should
+ * be detected by the Target.
+ * If blockoff != 0 error will be
+ * inserted in middle of the IO.
+ */
+
+ lpfc_printf_log(phba, KERN_ERR, LOG_BG,
+ "9076 BLKGRD: Injecting reftag error: "
+ "write lba x%lx + x%x oldrefTag x%x\n",
+ (unsigned long)lba, blockoff,
+ be32_to_cpu(src->ref_tag));
+
+ /*
+ * Save the old ref_tag so we can
+ * restore it on completion.
+ */
+ if (lpfc_cmd) {
+ lpfc_cmd->prot_data_type =
+ LPFC_INJERR_REFTAG;
+ lpfc_cmd->prot_data_segment =
+ src;
+ lpfc_cmd->prot_data =
+ src->ref_tag;
+ }
+ src->ref_tag = cpu_to_be32(0xDEADBEEF);
+ phba->lpfc_injerr_wref_cnt--;
+ if (phba->lpfc_injerr_wref_cnt == 0) {
+ phba->lpfc_injerr_nportid = 0;
+ phba->lpfc_injerr_lba =
+ LPFC_INJERR_LBA_OFF;
+ memset(&phba->lpfc_injerr_wwpn,
+ 0, sizeof(struct lpfc_name));
+ }
+ rc = BG_ERR_TGT | BG_ERR_CHECK;
+
+ break;
+ }
+ /* Drop thru */
+ case SCSI_PROT_WRITE_INSERT:
+ /*
+ * For WRITE_INSERT, force the error
+ * to be sent on the wire. It should be
+ * detected by the Target.
+ */
+ /* DEADBEEF will be the reftag on the wire */
+ *reftag = 0xDEADBEEF;
+ phba->lpfc_injerr_wref_cnt--;
+ if (phba->lpfc_injerr_wref_cnt == 0) {
+ phba->lpfc_injerr_nportid = 0;
+ phba->lpfc_injerr_lba =
+ LPFC_INJERR_LBA_OFF;
+ memset(&phba->lpfc_injerr_wwpn,
+ 0, sizeof(struct lpfc_name));
+ }
+ rc = BG_ERR_TGT | BG_ERR_CHECK;
+
+ lpfc_printf_log(phba, KERN_ERR, LOG_BG,
+ "9078 BLKGRD: Injecting reftag error: "
+ "write lba x%lx\n", (unsigned long)lba);
+ break;
+ case SCSI_PROT_WRITE_STRIP:
+ /*
+ * For WRITE_STRIP and WRITE_PASS,
+ * force the error on data
+ * being copied from SLI-Host to SLI-Port.
+ */
+ *reftag = 0xDEADBEEF;
+ phba->lpfc_injerr_wref_cnt--;
+ if (phba->lpfc_injerr_wref_cnt == 0) {
+ phba->lpfc_injerr_nportid = 0;
+ phba->lpfc_injerr_lba =
+ LPFC_INJERR_LBA_OFF;
+ memset(&phba->lpfc_injerr_wwpn,
+ 0, sizeof(struct lpfc_name));
+ }
+ rc = BG_ERR_INIT;
+
+ lpfc_printf_log(phba, KERN_ERR, LOG_BG,
+ "9077 BLKGRD: Injecting reftag error: "
+ "write lba x%lx\n", (unsigned long)lba);
+ break;
+ }
+ }
+ if (phba->lpfc_injerr_rref_cnt) {
+ switch (op) {
+ case SCSI_PROT_READ_INSERT:
+ case SCSI_PROT_READ_STRIP:
+ case SCSI_PROT_READ_PASS:
+ /*
+ * For READ_STRIP and READ_PASS, force the
+ * error on data being read off the wire. It
+ * should force an IO error to the driver.
+ */
+ *reftag = 0xDEADBEEF;
+ phba->lpfc_injerr_rref_cnt--;
+ if (phba->lpfc_injerr_rref_cnt == 0) {
+ phba->lpfc_injerr_nportid = 0;
+ phba->lpfc_injerr_lba =
+ LPFC_INJERR_LBA_OFF;
+ memset(&phba->lpfc_injerr_wwpn,
+ 0, sizeof(struct lpfc_name));
+ }
+ rc = BG_ERR_INIT;
+
+ lpfc_printf_log(phba, KERN_ERR, LOG_BG,
+ "9079 BLKGRD: Injecting reftag error: "
+ "read lba x%lx\n", (unsigned long)lba);
+ break;
+ }
+ }
+ }
+
+ /* Should we change the Application Tag */
+ if (apptag) {
+ if (phba->lpfc_injerr_wapp_cnt) {
+ switch (op) {
+ case SCSI_PROT_WRITE_PASS:
+ if (src) {
+ /*
+ * For WRITE_PASS, force the error
+ * to be sent on the wire. It should
+ * be detected by the Target.
+ * If blockoff != 0 error will be
+ * inserted in middle of the IO.
+ */
+
+ lpfc_printf_log(phba, KERN_ERR, LOG_BG,
+ "9080 BLKGRD: Injecting apptag error: "
+ "write lba x%lx + x%x oldappTag x%x\n",
+ (unsigned long)lba, blockoff,
+ be16_to_cpu(src->app_tag));
+
+ /*
+ * Save the old app_tag so we can
+ * restore it on completion.
+ */
+ if (lpfc_cmd) {
+ lpfc_cmd->prot_data_type =
+ LPFC_INJERR_APPTAG;
+ lpfc_cmd->prot_data_segment =
+ src;
+ lpfc_cmd->prot_data =
+ src->app_tag;
+ }
+ src->app_tag = cpu_to_be16(0xDEAD);
+ phba->lpfc_injerr_wapp_cnt--;
+ if (phba->lpfc_injerr_wapp_cnt == 0) {
+ phba->lpfc_injerr_nportid = 0;
+ phba->lpfc_injerr_lba =
+ LPFC_INJERR_LBA_OFF;
+ memset(&phba->lpfc_injerr_wwpn,
+ 0, sizeof(struct lpfc_name));
+ }
+ rc = BG_ERR_TGT | BG_ERR_CHECK;
+ break;
+ }
+ /* Drop thru */
+ case SCSI_PROT_WRITE_INSERT:
+ /*
+ * For WRITE_INSERT, force the
+ * error to be sent on the wire. It should be
+ * detected by the Target.
+ */
+ /* DEAD will be the apptag on the wire */
+ *apptag = 0xDEAD;
+ phba->lpfc_injerr_wapp_cnt--;
+ if (phba->lpfc_injerr_wapp_cnt == 0) {
+ phba->lpfc_injerr_nportid = 0;
+ phba->lpfc_injerr_lba =
+ LPFC_INJERR_LBA_OFF;
+ memset(&phba->lpfc_injerr_wwpn,
+ 0, sizeof(struct lpfc_name));
+ }
+ rc = BG_ERR_TGT | BG_ERR_CHECK;
+
+ lpfc_printf_log(phba, KERN_ERR, LOG_BG,
+ "0813 BLKGRD: Injecting apptag error: "
+ "write lba x%lx\n", (unsigned long)lba);
+ break;
+ case SCSI_PROT_WRITE_STRIP:
+ /*
+ * For WRITE_STRIP and WRITE_PASS,
+ * force the error on data
+ * being copied from SLI-Host to SLI-Port.
+ */
+ *apptag = 0xDEAD;
+ phba->lpfc_injerr_wapp_cnt--;
+ if (phba->lpfc_injerr_wapp_cnt == 0) {
+ phba->lpfc_injerr_nportid = 0;
+ phba->lpfc_injerr_lba =
+ LPFC_INJERR_LBA_OFF;
+ memset(&phba->lpfc_injerr_wwpn,
+ 0, sizeof(struct lpfc_name));
+ }
+ rc = BG_ERR_INIT;
+
+ lpfc_printf_log(phba, KERN_ERR, LOG_BG,
+ "0812 BLKGRD: Injecting apptag error: "
+ "write lba x%lx\n", (unsigned long)lba);
+ break;
+ }
+ }
+ if (phba->lpfc_injerr_rapp_cnt) {
+ switch (op) {
+ case SCSI_PROT_READ_INSERT:
+ case SCSI_PROT_READ_STRIP:
+ case SCSI_PROT_READ_PASS:
+ /*
+ * For READ_STRIP and READ_PASS, force the
+ * error on data being read off the wire. It
+ * should force an IO error to the driver.
+ */
+ *apptag = 0xDEAD;
+ phba->lpfc_injerr_rapp_cnt--;
+ if (phba->lpfc_injerr_rapp_cnt == 0) {
+ phba->lpfc_injerr_nportid = 0;
+ phba->lpfc_injerr_lba =
+ LPFC_INJERR_LBA_OFF;
+ memset(&phba->lpfc_injerr_wwpn,
+ 0, sizeof(struct lpfc_name));
+ }
+ rc = BG_ERR_INIT;
+
+ lpfc_printf_log(phba, KERN_ERR, LOG_BG,
+ "0814 BLKGRD: Injecting apptag error: "
+ "read lba x%lx\n", (unsigned long)lba);
+ break;
+ }
+ }
+ }
+
+
+ /* Should we change the Guard Tag */
+ if (new_guard) {
+ if (phba->lpfc_injerr_wgrd_cnt) {
+ switch (op) {
+ case SCSI_PROT_WRITE_PASS:
+ rc = BG_ERR_CHECK;
+ /* Drop thru */
+
+ case SCSI_PROT_WRITE_INSERT:
+ /*
+ * For WRITE_INSERT, force the
+ * error to be sent on the wire. It should be
+ * detected by the Target.
+ */
+ phba->lpfc_injerr_wgrd_cnt--;
+ if (phba->lpfc_injerr_wgrd_cnt == 0) {
+ phba->lpfc_injerr_nportid = 0;
+ phba->lpfc_injerr_lba =
+ LPFC_INJERR_LBA_OFF;
+ memset(&phba->lpfc_injerr_wwpn,
+ 0, sizeof(struct lpfc_name));
+ }
+
+ rc |= BG_ERR_TGT | BG_ERR_SWAP;
+ /* Signals the caller to swap CRC->CSUM */
+
+ lpfc_printf_log(phba, KERN_ERR, LOG_BG,
+ "0817 BLKGRD: Injecting guard error: "
+ "write lba x%lx\n", (unsigned long)lba);
+ break;
+ case SCSI_PROT_WRITE_STRIP:
+ /*
+ * For WRITE_STRIP and WRITE_PASS,
+ * force the error on data
+ * being copied from SLI-Host to SLI-Port.
+ */
+ phba->lpfc_injerr_wgrd_cnt--;
+ if (phba->lpfc_injerr_wgrd_cnt == 0) {
+ phba->lpfc_injerr_nportid = 0;
+ phba->lpfc_injerr_lba =
+ LPFC_INJERR_LBA_OFF;
+ memset(&phba->lpfc_injerr_wwpn,
+ 0, sizeof(struct lpfc_name));
+ }
+
+ rc = BG_ERR_INIT | BG_ERR_SWAP;
+ /* Signals the caller to swap CRC->CSUM */
+
+ lpfc_printf_log(phba, KERN_ERR, LOG_BG,
+ "0816 BLKGRD: Injecting guard error: "
+ "write lba x%lx\n", (unsigned long)lba);
+ break;
+ }
+ }
+ if (phba->lpfc_injerr_rgrd_cnt) {
+ switch (op) {
+ case SCSI_PROT_READ_INSERT:
+ case SCSI_PROT_READ_STRIP:
+ case SCSI_PROT_READ_PASS:
+ /*
+ * For READ_STRIP and READ_PASS, force the
+ * error on data being read off the wire. It
+ * should force an IO error to the driver.
+ */
+ phba->lpfc_injerr_rgrd_cnt--;
+ if (phba->lpfc_injerr_rgrd_cnt == 0) {
+ phba->lpfc_injerr_nportid = 0;
+ phba->lpfc_injerr_lba =
+ LPFC_INJERR_LBA_OFF;
+ memset(&phba->lpfc_injerr_wwpn,
+ 0, sizeof(struct lpfc_name));
+ }
+
+ rc = BG_ERR_INIT | BG_ERR_SWAP;
+ /* Signals the caller to swap CRC->CSUM */
+
+ lpfc_printf_log(phba, KERN_ERR, LOG_BG,
+ "0818 BLKGRD: Injecting guard error: "
+ "read lba x%lx\n", (unsigned long)lba);
+ }
+ }
+ }
+
+ return rc;
+}
+#endif
+
+/**
+ * lpfc_sc_to_bg_opcodes - Determine the BlockGuard opcodes to be used with
+ * the specified SCSI command.
+ * @phba: The Hba for which this call is being executed.
+ * @sc: The SCSI command to examine
+ * @txopt: (out) BlockGuard operation for transmitted data
+ * @rxopt: (out) BlockGuard operation for received data
+ *
+ * Returns: zero on success; non-zero if tx and/or rx op cannot be determined
+ *
+ **/
+static int
+lpfc_sc_to_bg_opcodes(struct lpfc_hba *phba, struct scsi_cmnd *sc,
+ uint8_t *txop, uint8_t *rxop)
+{
+ uint8_t ret = 0;
+
+ if (lpfc_cmd_guard_csum(sc)) {
+ switch (scsi_get_prot_op(sc)) {
+ case SCSI_PROT_READ_INSERT:
+ case SCSI_PROT_WRITE_STRIP:
+ *rxop = BG_OP_IN_NODIF_OUT_CSUM;
+ *txop = BG_OP_IN_CSUM_OUT_NODIF;
+ break;
+
+ case SCSI_PROT_READ_STRIP:
+ case SCSI_PROT_WRITE_INSERT:
+ *rxop = BG_OP_IN_CRC_OUT_NODIF;
+ *txop = BG_OP_IN_NODIF_OUT_CRC;
+ break;
+
+ case SCSI_PROT_READ_PASS:
+ case SCSI_PROT_WRITE_PASS:
+ *rxop = BG_OP_IN_CRC_OUT_CSUM;
+ *txop = BG_OP_IN_CSUM_OUT_CRC;
+ break;
+
+ case SCSI_PROT_NORMAL:
+ default:
+ lpfc_printf_log(phba, KERN_ERR, LOG_BG,
+ "9063 BLKGRD: Bad op/guard:%d/IP combination\n",
+ scsi_get_prot_op(sc));
+ ret = 1;
+ break;
+
+ }
+ } else {
+ switch (scsi_get_prot_op(sc)) {
+ case SCSI_PROT_READ_STRIP:
+ case SCSI_PROT_WRITE_INSERT:
+ *rxop = BG_OP_IN_CRC_OUT_NODIF;
+ *txop = BG_OP_IN_NODIF_OUT_CRC;
+ break;
+
+ case SCSI_PROT_READ_PASS:
+ case SCSI_PROT_WRITE_PASS:
+ *rxop = BG_OP_IN_CRC_OUT_CRC;
+ *txop = BG_OP_IN_CRC_OUT_CRC;
+ break;
+
+ case SCSI_PROT_READ_INSERT:
+ case SCSI_PROT_WRITE_STRIP:
+ *rxop = BG_OP_IN_NODIF_OUT_CRC;
+ *txop = BG_OP_IN_CRC_OUT_NODIF;
+ break;
+
+ case SCSI_PROT_NORMAL:
+ default:
+ lpfc_printf_log(phba, KERN_ERR, LOG_BG,
+ "9075 BLKGRD: Bad op/guard:%d/CRC combination\n",
+ scsi_get_prot_op(sc));
+ ret = 1;
+ break;
+ }
+ }
+
+ return ret;
+}
+
+#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
+/**
+ * lpfc_bg_err_opcodes - reDetermine the BlockGuard opcodes to be used with
+ * the specified SCSI command in order to force a guard tag error.
+ * @phba: The Hba for which this call is being executed.
+ * @sc: The SCSI command to examine
+ * @txopt: (out) BlockGuard operation for transmitted data
+ * @rxopt: (out) BlockGuard operation for received data
+ *
+ * Returns: zero on success; non-zero if tx and/or rx op cannot be determined
+ *
+ **/
+static int
+lpfc_bg_err_opcodes(struct lpfc_hba *phba, struct scsi_cmnd *sc,
+ uint8_t *txop, uint8_t *rxop)
+{
+ uint8_t ret = 0;
+
+ if (lpfc_cmd_guard_csum(sc)) {
+ switch (scsi_get_prot_op(sc)) {
+ case SCSI_PROT_READ_INSERT:
+ case SCSI_PROT_WRITE_STRIP:
+ *rxop = BG_OP_IN_NODIF_OUT_CRC;
+ *txop = BG_OP_IN_CRC_OUT_NODIF;
+ break;
+
+ case SCSI_PROT_READ_STRIP:
+ case SCSI_PROT_WRITE_INSERT:
+ *rxop = BG_OP_IN_CSUM_OUT_NODIF;
+ *txop = BG_OP_IN_NODIF_OUT_CSUM;
+ break;
+
+ case SCSI_PROT_READ_PASS:
+ case SCSI_PROT_WRITE_PASS:
+ *rxop = BG_OP_IN_CSUM_OUT_CRC;
+ *txop = BG_OP_IN_CRC_OUT_CSUM;
+ break;
+
+ case SCSI_PROT_NORMAL:
+ default:
+ break;
+
+ }
+ } else {
+ switch (scsi_get_prot_op(sc)) {
+ case SCSI_PROT_READ_STRIP:
+ case SCSI_PROT_WRITE_INSERT:
+ *rxop = BG_OP_IN_CSUM_OUT_NODIF;
+ *txop = BG_OP_IN_NODIF_OUT_CSUM;
+ break;
+
+ case SCSI_PROT_READ_PASS:
+ case SCSI_PROT_WRITE_PASS:
+ *rxop = BG_OP_IN_CSUM_OUT_CSUM;
+ *txop = BG_OP_IN_CSUM_OUT_CSUM;
+ break;
+
+ case SCSI_PROT_READ_INSERT:
+ case SCSI_PROT_WRITE_STRIP:
+ *rxop = BG_OP_IN_NODIF_OUT_CSUM;
+ *txop = BG_OP_IN_CSUM_OUT_NODIF;
+ break;
+
+ case SCSI_PROT_NORMAL:
+ default:
+ break;
+ }
+ }
+
+ return ret;
+}
+#endif
+
+/**
+ * lpfc_bg_setup_bpl - Setup BlockGuard BPL with no protection data
+ * @phba: The Hba for which this call is being executed.
+ * @sc: pointer to scsi command we're working on
+ * @bpl: pointer to buffer list for protection groups
+ * @datacnt: number of segments of data that have been dma mapped
+ *
+ * This function sets up BPL buffer list for protection groups of
+ * type LPFC_PG_TYPE_NO_DIF
+ *
+ * This is usually used when the HBA is instructed to generate
+ * DIFs and insert them into data stream (or strip DIF from
+ * incoming data stream)
+ *
+ * The buffer list consists of just one protection group described
+ * below:
+ * +-------------------------+
+ * start of prot group --> | PDE_5 |
+ * +-------------------------+
+ * | PDE_6 |
+ * +-------------------------+
+ * | Data BDE |
+ * +-------------------------+
+ * |more Data BDE's ... (opt)|
+ * +-------------------------+
+ *
+ *
+ * Note: Data s/g buffers have been dma mapped
+ *
+ * Returns the number of BDEs added to the BPL.
+ **/
+static int
+lpfc_bg_setup_bpl(struct lpfc_hba *phba, struct scsi_cmnd *sc,
+ struct ulp_bde64 *bpl, int datasegcnt)
+{
+ struct scatterlist *sgde = NULL; /* s/g data entry */
+ struct lpfc_pde5 *pde5 = NULL;
+ struct lpfc_pde6 *pde6 = NULL;
+ dma_addr_t physaddr;
+ int i = 0, num_bde = 0, status;
+ int datadir = sc->sc_data_direction;
+#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
+ uint32_t rc;
+#endif
+ uint32_t checking = 1;
+ uint32_t reftag;
+ unsigned blksize;
+ uint8_t txop, rxop;
+
+ status = lpfc_sc_to_bg_opcodes(phba, sc, &txop, &rxop);
+ if (status)
+ goto out;
+
+ /* extract some info from the scsi command for pde*/
+ blksize = lpfc_cmd_blksize(sc);
+ reftag = (uint32_t)scsi_get_lba(sc); /* Truncate LBA */
+
+#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
+ rc = lpfc_bg_err_inject(phba, sc, &reftag, NULL, 1);
+ if (rc) {
+ if (rc & BG_ERR_SWAP)
+ lpfc_bg_err_opcodes(phba, sc, &txop, &rxop);
+ if (rc & BG_ERR_CHECK)
+ checking = 0;
+ }
+#endif
+
+ /* setup PDE5 with what we have */
+ pde5 = (struct lpfc_pde5 *) bpl;
+ memset(pde5, 0, sizeof(struct lpfc_pde5));
+ bf_set(pde5_type, pde5, LPFC_PDE5_DESCRIPTOR);
+
+ /* Endianness conversion if necessary for PDE5 */
+ pde5->word0 = cpu_to_le32(pde5->word0);
+ pde5->reftag = cpu_to_le32(reftag);
+
+ /* advance bpl and increment bde count */
+ num_bde++;
+ bpl++;
+ pde6 = (struct lpfc_pde6 *) bpl;
+
+ /* setup PDE6 with the rest of the info */
+ memset(pde6, 0, sizeof(struct lpfc_pde6));
+ bf_set(pde6_type, pde6, LPFC_PDE6_DESCRIPTOR);
+ bf_set(pde6_optx, pde6, txop);
+ bf_set(pde6_oprx, pde6, rxop);
+
+ /*
+ * We only need to check the data on READs, for WRITEs
+ * protection data is automatically generated, not checked.
+ */
+ if (datadir == DMA_FROM_DEVICE) {
+ if (lpfc_cmd_protect(sc, LPFC_CHECK_PROTECT_GUARD))
+ bf_set(pde6_ce, pde6, checking);
+ else
+ bf_set(pde6_ce, pde6, 0);
+
+ if (lpfc_cmd_protect(sc, LPFC_CHECK_PROTECT_REF))
+ bf_set(pde6_re, pde6, checking);
+ else
+ bf_set(pde6_re, pde6, 0);
+ }
+ bf_set(pde6_ai, pde6, 1);
+ bf_set(pde6_ae, pde6, 0);
+ bf_set(pde6_apptagval, pde6, 0);
+
+ /* Endianness conversion if necessary for PDE6 */
+ pde6->word0 = cpu_to_le32(pde6->word0);
+ pde6->word1 = cpu_to_le32(pde6->word1);
+ pde6->word2 = cpu_to_le32(pde6->word2);
+
+ /* advance bpl and increment bde count */
+ num_bde++;
+ bpl++;
+
+ /* assumption: caller has already run dma_map_sg on command data */
+ scsi_for_each_sg(sc, sgde, datasegcnt, i) {
+ physaddr = sg_dma_address(sgde);
+ bpl->addrLow = le32_to_cpu(putPaddrLow(physaddr));
+ bpl->addrHigh = le32_to_cpu(putPaddrHigh(physaddr));
+ bpl->tus.f.bdeSize = sg_dma_len(sgde);
+ if (datadir == DMA_TO_DEVICE)
+ bpl->tus.f.bdeFlags = BUFF_TYPE_BDE_64;
+ else
+ bpl->tus.f.bdeFlags = BUFF_TYPE_BDE_64I;
+ bpl->tus.w = le32_to_cpu(bpl->tus.w);
+ bpl++;
+ num_bde++;
+ }
+
+out:
+ return num_bde;
+}
+
+/**
+ * lpfc_bg_setup_bpl_prot - Setup BlockGuard BPL with protection data
+ * @phba: The Hba for which this call is being executed.
+ * @sc: pointer to scsi command we're working on
+ * @bpl: pointer to buffer list for protection groups
+ * @datacnt: number of segments of data that have been dma mapped
+ * @protcnt: number of segment of protection data that have been dma mapped
+ *
+ * This function sets up BPL buffer list for protection groups of
+ * type LPFC_PG_TYPE_DIF
+ *
+ * This is usually used when DIFs are in their own buffers,
+ * separate from the data. The HBA can then by instructed
+ * to place the DIFs in the outgoing stream. For read operations,
+ * The HBA could extract the DIFs and place it in DIF buffers.
+ *
+ * The buffer list for this type consists of one or more of the
+ * protection groups described below:
+ * +-------------------------+
+ * start of first prot group --> | PDE_5 |
+ * +-------------------------+
+ * | PDE_6 |
+ * +-------------------------+
+ * | PDE_7 (Prot BDE) |
+ * +-------------------------+
+ * | Data BDE |
+ * +-------------------------+
+ * |more Data BDE's ... (opt)|
+ * +-------------------------+
+ * start of new prot group --> | PDE_5 |
+ * +-------------------------+
+ * | ... |
+ * +-------------------------+
+ *
+ * Note: It is assumed that both data and protection s/g buffers have been
+ * mapped for DMA
+ *
+ * Returns the number of BDEs added to the BPL.
+ **/
+static int
+lpfc_bg_setup_bpl_prot(struct lpfc_hba *phba, struct scsi_cmnd *sc,
+ struct ulp_bde64 *bpl, int datacnt, int protcnt)
+{
+ struct scatterlist *sgde = NULL; /* s/g data entry */
+ struct scatterlist *sgpe = NULL; /* s/g prot entry */
+ struct lpfc_pde5 *pde5 = NULL;
+ struct lpfc_pde6 *pde6 = NULL;
+ struct lpfc_pde7 *pde7 = NULL;
+ dma_addr_t dataphysaddr, protphysaddr;
+ unsigned short curr_data = 0, curr_prot = 0;
+ unsigned int split_offset;
+ unsigned int protgroup_len, protgroup_offset = 0, protgroup_remainder;
+ unsigned int protgrp_blks, protgrp_bytes;
+ unsigned int remainder, subtotal;
+ int status;
+ int datadir = sc->sc_data_direction;
+ unsigned char pgdone = 0, alldone = 0;
+ unsigned blksize;
+#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
+ uint32_t rc;
+#endif
+ uint32_t checking = 1;
+ uint32_t reftag;
+ uint8_t txop, rxop;
+ int num_bde = 0;
+
+ sgpe = scsi_prot_sglist(sc);
+ sgde = scsi_sglist(sc);
+
+ if (!sgpe || !sgde) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_FCP,
+ "9020 Invalid s/g entry: data=0x%p prot=0x%p\n",
+ sgpe, sgde);
+ return 0;
+ }
+
+ status = lpfc_sc_to_bg_opcodes(phba, sc, &txop, &rxop);
+ if (status)
+ goto out;
+
+ /* extract some info from the scsi command */
+ blksize = lpfc_cmd_blksize(sc);
+ reftag = (uint32_t)scsi_get_lba(sc); /* Truncate LBA */
+
+#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
+ rc = lpfc_bg_err_inject(phba, sc, &reftag, NULL, 1);
+ if (rc) {
+ if (rc & BG_ERR_SWAP)
+ lpfc_bg_err_opcodes(phba, sc, &txop, &rxop);
+ if (rc & BG_ERR_CHECK)
+ checking = 0;
+ }
+#endif
+
+ split_offset = 0;
+ do {
+ /* Check to see if we ran out of space */
+ if (num_bde >= (phba->cfg_total_seg_cnt - 2))
+ return num_bde + 3;
+
+ /* setup PDE5 with what we have */
+ pde5 = (struct lpfc_pde5 *) bpl;
+ memset(pde5, 0, sizeof(struct lpfc_pde5));
+ bf_set(pde5_type, pde5, LPFC_PDE5_DESCRIPTOR);
+
+ /* Endianness conversion if necessary for PDE5 */
+ pde5->word0 = cpu_to_le32(pde5->word0);
+ pde5->reftag = cpu_to_le32(reftag);
+
+ /* advance bpl and increment bde count */
+ num_bde++;
+ bpl++;
+ pde6 = (struct lpfc_pde6 *) bpl;
+
+ /* setup PDE6 with the rest of the info */
+ memset(pde6, 0, sizeof(struct lpfc_pde6));
+ bf_set(pde6_type, pde6, LPFC_PDE6_DESCRIPTOR);
+ bf_set(pde6_optx, pde6, txop);
+ bf_set(pde6_oprx, pde6, rxop);
+
+ if (lpfc_cmd_protect(sc, LPFC_CHECK_PROTECT_GUARD))
+ bf_set(pde6_ce, pde6, checking);
+ else
+ bf_set(pde6_ce, pde6, 0);
+
+ if (lpfc_cmd_protect(sc, LPFC_CHECK_PROTECT_REF))
+ bf_set(pde6_re, pde6, checking);
+ else
+ bf_set(pde6_re, pde6, 0);
+
+ bf_set(pde6_ai, pde6, 1);
+ bf_set(pde6_ae, pde6, 0);
+ bf_set(pde6_apptagval, pde6, 0);
+
+ /* Endianness conversion if necessary for PDE6 */
+ pde6->word0 = cpu_to_le32(pde6->word0);
+ pde6->word1 = cpu_to_le32(pde6->word1);
+ pde6->word2 = cpu_to_le32(pde6->word2);
+
+ /* advance bpl and increment bde count */
+ num_bde++;
+ bpl++;
+
+ /* setup the first BDE that points to protection buffer */
+ protphysaddr = sg_dma_address(sgpe) + protgroup_offset;
+ protgroup_len = sg_dma_len(sgpe) - protgroup_offset;
+
+ /* must be integer multiple of the DIF block length */
+ BUG_ON(protgroup_len % 8);
+
+ pde7 = (struct lpfc_pde7 *) bpl;
+ memset(pde7, 0, sizeof(struct lpfc_pde7));
+ bf_set(pde7_type, pde7, LPFC_PDE7_DESCRIPTOR);
+
+ pde7->addrHigh = le32_to_cpu(putPaddrHigh(protphysaddr));
+ pde7->addrLow = le32_to_cpu(putPaddrLow(protphysaddr));
+
+ protgrp_blks = protgroup_len / 8;
+ protgrp_bytes = protgrp_blks * blksize;
+
+ /* check if this pde is crossing the 4K boundary; if so split */
+ if ((pde7->addrLow & 0xfff) + protgroup_len > 0x1000) {
+ protgroup_remainder = 0x1000 - (pde7->addrLow & 0xfff);
+ protgroup_offset += protgroup_remainder;
+ protgrp_blks = protgroup_remainder / 8;
+ protgrp_bytes = protgrp_blks * blksize;
+ } else {
+ protgroup_offset = 0;
+ curr_prot++;
+ }
+
+ num_bde++;
+
+ /* setup BDE's for data blocks associated with DIF data */
+ pgdone = 0;
+ subtotal = 0; /* total bytes processed for current prot grp */
+ while (!pgdone) {
+ /* Check to see if we ran out of space */
+ if (num_bde >= phba->cfg_total_seg_cnt)
+ return num_bde + 1;
+
+ if (!sgde) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_BG,
+ "9065 BLKGRD:%s Invalid data segment\n",
+ __func__);
+ return 0;
+ }
+ bpl++;
+ dataphysaddr = sg_dma_address(sgde) + split_offset;
+ bpl->addrLow = le32_to_cpu(putPaddrLow(dataphysaddr));
+ bpl->addrHigh = le32_to_cpu(putPaddrHigh(dataphysaddr));
+
+ remainder = sg_dma_len(sgde) - split_offset;
+
+ if ((subtotal + remainder) <= protgrp_bytes) {
+ /* we can use this whole buffer */
+ bpl->tus.f.bdeSize = remainder;
+ split_offset = 0;
+
+ if ((subtotal + remainder) == protgrp_bytes)
+ pgdone = 1;
+ } else {
+ /* must split this buffer with next prot grp */
+ bpl->tus.f.bdeSize = protgrp_bytes - subtotal;
+ split_offset += bpl->tus.f.bdeSize;
+ }
+
+ subtotal += bpl->tus.f.bdeSize;
+
+ if (datadir == DMA_TO_DEVICE)
+ bpl->tus.f.bdeFlags = BUFF_TYPE_BDE_64;
+ else
+ bpl->tus.f.bdeFlags = BUFF_TYPE_BDE_64I;
+ bpl->tus.w = le32_to_cpu(bpl->tus.w);
+
+ num_bde++;
+ curr_data++;
+
+ if (split_offset)
+ break;
+
+ /* Move to the next s/g segment if possible */
+ sgde = sg_next(sgde);
+
+ }
+
+ if (protgroup_offset) {
+ /* update the reference tag */
+ reftag += protgrp_blks;
+ bpl++;
+ continue;
+ }
+
+ /* are we done ? */
+ if (curr_prot == protcnt) {
+ alldone = 1;
+ } else if (curr_prot < protcnt) {
+ /* advance to next prot buffer */
+ sgpe = sg_next(sgpe);
+ bpl++;
+
+ /* update the reference tag */
+ reftag += protgrp_blks;
+ } else {
+ /* if we're here, we have a bug */
+ lpfc_printf_log(phba, KERN_ERR, LOG_BG,
+ "9054 BLKGRD: bug in %s\n", __func__);
+ }
+
+ } while (!alldone);
+out:
+
+ return num_bde;
+}
+
+/**
+ * lpfc_bg_setup_sgl - Setup BlockGuard SGL with no protection data
+ * @phba: The Hba for which this call is being executed.
+ * @sc: pointer to scsi command we're working on
+ * @sgl: pointer to buffer list for protection groups
+ * @datacnt: number of segments of data that have been dma mapped
+ *
+ * This function sets up SGL buffer list for protection groups of
+ * type LPFC_PG_TYPE_NO_DIF
+ *
+ * This is usually used when the HBA is instructed to generate
+ * DIFs and insert them into data stream (or strip DIF from
+ * incoming data stream)
+ *
+ * The buffer list consists of just one protection group described
+ * below:
+ * +-------------------------+
+ * start of prot group --> | DI_SEED |
+ * +-------------------------+
+ * | Data SGE |
+ * +-------------------------+
+ * |more Data SGE's ... (opt)|
+ * +-------------------------+
+ *
+ *
+ * Note: Data s/g buffers have been dma mapped
+ *
+ * Returns the number of SGEs added to the SGL.
+ **/
+static int
+lpfc_bg_setup_sgl(struct lpfc_hba *phba, struct scsi_cmnd *sc,
+ struct sli4_sge *sgl, int datasegcnt)
+{
+ struct scatterlist *sgde = NULL; /* s/g data entry */
+ struct sli4_sge_diseed *diseed = NULL;
+ dma_addr_t physaddr;
+ int i = 0, num_sge = 0, status;
+ uint32_t reftag;
+ unsigned blksize;
+ uint8_t txop, rxop;
+#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
+ uint32_t rc;
+#endif
+ uint32_t checking = 1;
+ uint32_t dma_len;
+ uint32_t dma_offset = 0;
+
+ status = lpfc_sc_to_bg_opcodes(phba, sc, &txop, &rxop);
+ if (status)
+ goto out;
+
+ /* extract some info from the scsi command for pde*/
+ blksize = lpfc_cmd_blksize(sc);
+ reftag = (uint32_t)scsi_get_lba(sc); /* Truncate LBA */
+
+#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
+ rc = lpfc_bg_err_inject(phba, sc, &reftag, NULL, 1);
+ if (rc) {
+ if (rc & BG_ERR_SWAP)
+ lpfc_bg_err_opcodes(phba, sc, &txop, &rxop);
+ if (rc & BG_ERR_CHECK)
+ checking = 0;
+ }
+#endif
+
+ /* setup DISEED with what we have */
+ diseed = (struct sli4_sge_diseed *) sgl;
+ memset(diseed, 0, sizeof(struct sli4_sge_diseed));
+ bf_set(lpfc_sli4_sge_type, sgl, LPFC_SGE_TYPE_DISEED);
+
+ /* Endianness conversion if necessary */
+ diseed->ref_tag = cpu_to_le32(reftag);
+ diseed->ref_tag_tran = diseed->ref_tag;
+
+ /*
+ * We only need to check the data on READs, for WRITEs
+ * protection data is automatically generated, not checked.
+ */
+ if (sc->sc_data_direction == DMA_FROM_DEVICE) {
+ if (lpfc_cmd_protect(sc, LPFC_CHECK_PROTECT_GUARD))
+ bf_set(lpfc_sli4_sge_dif_ce, diseed, checking);
+ else
+ bf_set(lpfc_sli4_sge_dif_ce, diseed, 0);
+
+ if (lpfc_cmd_protect(sc, LPFC_CHECK_PROTECT_REF))
+ bf_set(lpfc_sli4_sge_dif_re, diseed, checking);
+ else
+ bf_set(lpfc_sli4_sge_dif_re, diseed, 0);
+ }
+
+ /* setup DISEED with the rest of the info */
+ bf_set(lpfc_sli4_sge_dif_optx, diseed, txop);
+ bf_set(lpfc_sli4_sge_dif_oprx, diseed, rxop);
+
+ bf_set(lpfc_sli4_sge_dif_ai, diseed, 1);
+ bf_set(lpfc_sli4_sge_dif_me, diseed, 0);
+
+ /* Endianness conversion if necessary for DISEED */
+ diseed->word2 = cpu_to_le32(diseed->word2);
+ diseed->word3 = cpu_to_le32(diseed->word3);
+
+ /* advance bpl and increment sge count */
+ num_sge++;
+ sgl++;
+
+ /* assumption: caller has already run dma_map_sg on command data */
+ scsi_for_each_sg(sc, sgde, datasegcnt, i) {
+ physaddr = sg_dma_address(sgde);
+ dma_len = sg_dma_len(sgde);
+ sgl->addr_lo = cpu_to_le32(putPaddrLow(physaddr));
+ sgl->addr_hi = cpu_to_le32(putPaddrHigh(physaddr));
+ if ((i + 1) == datasegcnt)
+ bf_set(lpfc_sli4_sge_last, sgl, 1);
+ else
+ bf_set(lpfc_sli4_sge_last, sgl, 0);
+ bf_set(lpfc_sli4_sge_offset, sgl, dma_offset);
+ bf_set(lpfc_sli4_sge_type, sgl, LPFC_SGE_TYPE_DATA);
+
+ sgl->sge_len = cpu_to_le32(dma_len);
+ dma_offset += dma_len;
+
+ sgl++;
+ num_sge++;
+ }
+
+out:
+ return num_sge;
+}
+
+/**
+ * lpfc_bg_setup_sgl_prot - Setup BlockGuard SGL with protection data
+ * @phba: The Hba for which this call is being executed.
+ * @sc: pointer to scsi command we're working on
+ * @sgl: pointer to buffer list for protection groups
+ * @datacnt: number of segments of data that have been dma mapped
+ * @protcnt: number of segment of protection data that have been dma mapped
+ *
+ * This function sets up SGL buffer list for protection groups of
+ * type LPFC_PG_TYPE_DIF
+ *
+ * This is usually used when DIFs are in their own buffers,
+ * separate from the data. The HBA can then by instructed
+ * to place the DIFs in the outgoing stream. For read operations,
+ * The HBA could extract the DIFs and place it in DIF buffers.
+ *
+ * The buffer list for this type consists of one or more of the
+ * protection groups described below:
+ * +-------------------------+
+ * start of first prot group --> | DISEED |
+ * +-------------------------+
+ * | DIF (Prot SGE) |
+ * +-------------------------+
+ * | Data SGE |
+ * +-------------------------+
+ * |more Data SGE's ... (opt)|
+ * +-------------------------+
+ * start of new prot group --> | DISEED |
+ * +-------------------------+
+ * | ... |
+ * +-------------------------+
+ *
+ * Note: It is assumed that both data and protection s/g buffers have been
+ * mapped for DMA
+ *
+ * Returns the number of SGEs added to the SGL.
+ **/
+static int
+lpfc_bg_setup_sgl_prot(struct lpfc_hba *phba, struct scsi_cmnd *sc,
+ struct sli4_sge *sgl, int datacnt, int protcnt)
+{
+ struct scatterlist *sgde = NULL; /* s/g data entry */
+ struct scatterlist *sgpe = NULL; /* s/g prot entry */
+ struct sli4_sge_diseed *diseed = NULL;
+ dma_addr_t dataphysaddr, protphysaddr;
+ unsigned short curr_data = 0, curr_prot = 0;
+ unsigned int split_offset;
+ unsigned int protgroup_len, protgroup_offset = 0, protgroup_remainder;
+ unsigned int protgrp_blks, protgrp_bytes;
+ unsigned int remainder, subtotal;
+ int status;
+ unsigned char pgdone = 0, alldone = 0;
+ unsigned blksize;
+ uint32_t reftag;
+ uint8_t txop, rxop;
+ uint32_t dma_len;
+#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
+ uint32_t rc;
+#endif
+ uint32_t checking = 1;
+ uint32_t dma_offset = 0;
+ int num_sge = 0;
+
+ sgpe = scsi_prot_sglist(sc);
+ sgde = scsi_sglist(sc);
+
+ if (!sgpe || !sgde) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_FCP,
+ "9082 Invalid s/g entry: data=0x%p prot=0x%p\n",
+ sgpe, sgde);
+ return 0;
+ }
+
+ status = lpfc_sc_to_bg_opcodes(phba, sc, &txop, &rxop);
+ if (status)
+ goto out;
+
+ /* extract some info from the scsi command */
+ blksize = lpfc_cmd_blksize(sc);
+ reftag = (uint32_t)scsi_get_lba(sc); /* Truncate LBA */
+
+#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
+ rc = lpfc_bg_err_inject(phba, sc, &reftag, NULL, 1);
+ if (rc) {
+ if (rc & BG_ERR_SWAP)
+ lpfc_bg_err_opcodes(phba, sc, &txop, &rxop);
+ if (rc & BG_ERR_CHECK)
+ checking = 0;
+ }
+#endif
+
+ split_offset = 0;
+ do {
+ /* Check to see if we ran out of space */
+ if (num_sge >= (phba->cfg_total_seg_cnt - 2))
+ return num_sge + 3;
+
+ /* setup DISEED with what we have */
+ diseed = (struct sli4_sge_diseed *) sgl;
+ memset(diseed, 0, sizeof(struct sli4_sge_diseed));
+ bf_set(lpfc_sli4_sge_type, sgl, LPFC_SGE_TYPE_DISEED);
+
+ /* Endianness conversion if necessary */
+ diseed->ref_tag = cpu_to_le32(reftag);
+ diseed->ref_tag_tran = diseed->ref_tag;
+
+ if (lpfc_cmd_protect(sc, LPFC_CHECK_PROTECT_GUARD)) {
+ bf_set(lpfc_sli4_sge_dif_ce, diseed, checking);
+
+ } else {
+ bf_set(lpfc_sli4_sge_dif_ce, diseed, 0);
+ /*
+ * When in this mode, the hardware will replace
+ * the guard tag from the host with a
+ * newly generated good CRC for the wire.
+ * Switch to raw mode here to avoid this
+ * behavior. What the host sends gets put on the wire.
+ */
+ if (txop == BG_OP_IN_CRC_OUT_CRC) {
+ txop = BG_OP_RAW_MODE;
+ rxop = BG_OP_RAW_MODE;
+ }
+ }
+
+
+ if (lpfc_cmd_protect(sc, LPFC_CHECK_PROTECT_REF))
+ bf_set(lpfc_sli4_sge_dif_re, diseed, checking);
+ else
+ bf_set(lpfc_sli4_sge_dif_re, diseed, 0);
+
+ /* setup DISEED with the rest of the info */
+ bf_set(lpfc_sli4_sge_dif_optx, diseed, txop);
+ bf_set(lpfc_sli4_sge_dif_oprx, diseed, rxop);
+
+ bf_set(lpfc_sli4_sge_dif_ai, diseed, 1);
+ bf_set(lpfc_sli4_sge_dif_me, diseed, 0);
+
+ /* Endianness conversion if necessary for DISEED */
+ diseed->word2 = cpu_to_le32(diseed->word2);
+ diseed->word3 = cpu_to_le32(diseed->word3);
+
+ /* advance sgl and increment bde count */
+ num_sge++;
+ sgl++;
+
+ /* setup the first BDE that points to protection buffer */
+ protphysaddr = sg_dma_address(sgpe) + protgroup_offset;
+ protgroup_len = sg_dma_len(sgpe) - protgroup_offset;
+
+ /* must be integer multiple of the DIF block length */
+ BUG_ON(protgroup_len % 8);
+
+ /* Now setup DIF SGE */
+ sgl->word2 = 0;
+ bf_set(lpfc_sli4_sge_type, sgl, LPFC_SGE_TYPE_DIF);
+ sgl->addr_hi = le32_to_cpu(putPaddrHigh(protphysaddr));
+ sgl->addr_lo = le32_to_cpu(putPaddrLow(protphysaddr));
+ sgl->word2 = cpu_to_le32(sgl->word2);
+
+ protgrp_blks = protgroup_len / 8;
+ protgrp_bytes = protgrp_blks * blksize;
+
+ /* check if DIF SGE is crossing the 4K boundary; if so split */
+ if ((sgl->addr_lo & 0xfff) + protgroup_len > 0x1000) {
+ protgroup_remainder = 0x1000 - (sgl->addr_lo & 0xfff);
+ protgroup_offset += protgroup_remainder;
+ protgrp_blks = protgroup_remainder / 8;
+ protgrp_bytes = protgrp_blks * blksize;
+ } else {
+ protgroup_offset = 0;
+ curr_prot++;
+ }
+
+ num_sge++;
+
+ /* setup SGE's for data blocks associated with DIF data */
+ pgdone = 0;
+ subtotal = 0; /* total bytes processed for current prot grp */
+ while (!pgdone) {
+ /* Check to see if we ran out of space */
+ if (num_sge >= phba->cfg_total_seg_cnt)
+ return num_sge + 1;
+
+ if (!sgde) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_BG,
+ "9086 BLKGRD:%s Invalid data segment\n",
+ __func__);
+ return 0;
+ }
+ sgl++;
+ dataphysaddr = sg_dma_address(sgde) + split_offset;
+
+ remainder = sg_dma_len(sgde) - split_offset;
+
+ if ((subtotal + remainder) <= protgrp_bytes) {
+ /* we can use this whole buffer */
+ dma_len = remainder;
+ split_offset = 0;
+
+ if ((subtotal + remainder) == protgrp_bytes)
+ pgdone = 1;
+ } else {
+ /* must split this buffer with next prot grp */
+ dma_len = protgrp_bytes - subtotal;
+ split_offset += dma_len;
+ }
+
+ subtotal += dma_len;
+
+ sgl->addr_lo = cpu_to_le32(putPaddrLow(dataphysaddr));
+ sgl->addr_hi = cpu_to_le32(putPaddrHigh(dataphysaddr));
+ bf_set(lpfc_sli4_sge_last, sgl, 0);
+ bf_set(lpfc_sli4_sge_offset, sgl, dma_offset);
+ bf_set(lpfc_sli4_sge_type, sgl, LPFC_SGE_TYPE_DATA);
+
+ sgl->sge_len = cpu_to_le32(dma_len);
+ dma_offset += dma_len;
+
+ num_sge++;
+ curr_data++;
+
+ if (split_offset)
+ break;
+
+ /* Move to the next s/g segment if possible */
+ sgde = sg_next(sgde);
+ }
+
+ if (protgroup_offset) {
+ /* update the reference tag */
+ reftag += protgrp_blks;
+ sgl++;
+ continue;
+ }
+
+ /* are we done ? */
+ if (curr_prot == protcnt) {
+ bf_set(lpfc_sli4_sge_last, sgl, 1);
+ alldone = 1;
+ } else if (curr_prot < protcnt) {
+ /* advance to next prot buffer */
+ sgpe = sg_next(sgpe);
+ sgl++;
+
+ /* update the reference tag */
+ reftag += protgrp_blks;
+ } else {
+ /* if we're here, we have a bug */
+ lpfc_printf_log(phba, KERN_ERR, LOG_BG,
+ "9085 BLKGRD: bug in %s\n", __func__);
+ }
+
+ } while (!alldone);
+
+out:
+
+ return num_sge;
+}
+
+/**
+ * lpfc_prot_group_type - Get prtotection group type of SCSI command
+ * @phba: The Hba for which this call is being executed.
+ * @sc: pointer to scsi command we're working on
+ *
+ * Given a SCSI command that supports DIF, determine composition of protection
+ * groups involved in setting up buffer lists
+ *
+ * Returns: Protection group type (with or without DIF)
+ *
+ **/
+static int
+lpfc_prot_group_type(struct lpfc_hba *phba, struct scsi_cmnd *sc)
+{
+ int ret = LPFC_PG_TYPE_INVALID;
+ unsigned char op = scsi_get_prot_op(sc);
+
+ switch (op) {
+ case SCSI_PROT_READ_STRIP:
+ case SCSI_PROT_WRITE_INSERT:
+ ret = LPFC_PG_TYPE_NO_DIF;
+ break;
+ case SCSI_PROT_READ_INSERT:
+ case SCSI_PROT_WRITE_STRIP:
+ case SCSI_PROT_READ_PASS:
+ case SCSI_PROT_WRITE_PASS:
+ ret = LPFC_PG_TYPE_DIF_BUF;
+ break;
+ default:
+ if (phba)
+ lpfc_printf_log(phba, KERN_ERR, LOG_FCP,
+ "9021 Unsupported protection op:%d\n",
+ op);
+ break;
+ }
+ return ret;
+}
+
+/**
+ * lpfc_bg_scsi_adjust_dl - Adjust SCSI data length for BlockGuard
+ * @phba: The Hba for which this call is being executed.
+ * @lpfc_cmd: The scsi buffer which is going to be adjusted.
+ *
+ * Adjust the data length to account for how much data
+ * is actually on the wire.
+ *
+ * returns the adjusted data length
+ **/
+static int
+lpfc_bg_scsi_adjust_dl(struct lpfc_hba *phba,
+ struct lpfc_scsi_buf *lpfc_cmd)
+{
+ struct scsi_cmnd *sc = lpfc_cmd->pCmd;
+ int fcpdl;
+
+ fcpdl = scsi_bufflen(sc);
+
+ /* Check if there is protection data on the wire */
+ if (sc->sc_data_direction == DMA_FROM_DEVICE) {
+ /* Read check for protection data */
+ if (scsi_get_prot_op(sc) == SCSI_PROT_READ_INSERT)
+ return fcpdl;
+
+ } else {
+ /* Write check for protection data */
+ if (scsi_get_prot_op(sc) == SCSI_PROT_WRITE_STRIP)
+ return fcpdl;
+ }
+
+ /*
+ * If we are in DIF Type 1 mode every data block has a 8 byte
+ * DIF (trailer) attached to it. Must ajust FCP data length
+ * to account for the protection data.
+ */
+ fcpdl += (fcpdl / lpfc_cmd_blksize(sc)) * 8;
+
+ return fcpdl;
+}
+
+/**
+ * lpfc_bg_scsi_prep_dma_buf_s3 - DMA mapping for scsi buffer to SLI3 IF spec
+ * @phba: The Hba for which this call is being executed.
+ * @lpfc_cmd: The scsi buffer which is going to be prep'ed.
+ *
+ * This is the protection/DIF aware version of
+ * lpfc_scsi_prep_dma_buf(). It may be a good idea to combine the
+ * two functions eventually, but for now, it's here
+ **/
+static int
+lpfc_bg_scsi_prep_dma_buf_s3(struct lpfc_hba *phba,
+ struct lpfc_scsi_buf *lpfc_cmd)
+{
+ struct scsi_cmnd *scsi_cmnd = lpfc_cmd->pCmd;
+ struct fcp_cmnd *fcp_cmnd = lpfc_cmd->fcp_cmnd;
+ struct ulp_bde64 *bpl = lpfc_cmd->fcp_bpl;
+ IOCB_t *iocb_cmd = &lpfc_cmd->cur_iocbq.iocb;
+ uint32_t num_bde = 0;
+ int datasegcnt, protsegcnt, datadir = scsi_cmnd->sc_data_direction;
+ int prot_group_type = 0;
+ int fcpdl;
+
+ /*
+ * Start the lpfc command prep by bumping the bpl beyond fcp_cmnd
+ * fcp_rsp regions to the first data bde entry
+ */
+ bpl += 2;
+ if (scsi_sg_count(scsi_cmnd)) {
+ /*
+ * The driver stores the segment count returned from pci_map_sg
+ * because this a count of dma-mappings used to map the use_sg
+ * pages. They are not guaranteed to be the same for those
+ * architectures that implement an IOMMU.
+ */
+ datasegcnt = dma_map_sg(&phba->pcidev->dev,
+ scsi_sglist(scsi_cmnd),
+ scsi_sg_count(scsi_cmnd), datadir);
+ if (unlikely(!datasegcnt))
+ return 1;
+
+ lpfc_cmd->seg_cnt = datasegcnt;
+
+ /* First check if data segment count from SCSI Layer is good */
+ if (lpfc_cmd->seg_cnt > phba->cfg_sg_seg_cnt)
+ goto err;
+
+ prot_group_type = lpfc_prot_group_type(phba, scsi_cmnd);
+
+ switch (prot_group_type) {
+ case LPFC_PG_TYPE_NO_DIF:
+
+ /* Here we need to add a PDE5 and PDE6 to the count */
+ if ((lpfc_cmd->seg_cnt + 2) > phba->cfg_total_seg_cnt)
+ goto err;
+
+ num_bde = lpfc_bg_setup_bpl(phba, scsi_cmnd, bpl,
+ datasegcnt);
+ /* we should have 2 or more entries in buffer list */
+ if (num_bde < 2)
+ goto err;
+ break;
+
+ case LPFC_PG_TYPE_DIF_BUF:
+ /*
+ * This type indicates that protection buffers are
+ * passed to the driver, so that needs to be prepared
+ * for DMA
+ */
+ protsegcnt = dma_map_sg(&phba->pcidev->dev,
+ scsi_prot_sglist(scsi_cmnd),
+ scsi_prot_sg_count(scsi_cmnd), datadir);
+ if (unlikely(!protsegcnt)) {
+ scsi_dma_unmap(scsi_cmnd);
+ return 1;
+ }
+
+ lpfc_cmd->prot_seg_cnt = protsegcnt;
+
+ /*
+ * There is a minimun of 4 BPLs used for every
+ * protection data segment.
+ */
+ if ((lpfc_cmd->prot_seg_cnt * 4) >
+ (phba->cfg_total_seg_cnt - 2))
+ goto err;
+
+ num_bde = lpfc_bg_setup_bpl_prot(phba, scsi_cmnd, bpl,
+ datasegcnt, protsegcnt);
+ /* we should have 3 or more entries in buffer list */
+ if ((num_bde < 3) ||
+ (num_bde > phba->cfg_total_seg_cnt))
+ goto err;
+ break;
+
+ case LPFC_PG_TYPE_INVALID:
+ default:
+ scsi_dma_unmap(scsi_cmnd);
+ lpfc_cmd->seg_cnt = 0;
+
+ lpfc_printf_log(phba, KERN_ERR, LOG_FCP,
+ "9022 Unexpected protection group %i\n",
+ prot_group_type);
+ return 1;
+ }
+ }
+
+ /*
+ * Finish initializing those IOCB fields that are dependent on the
+ * scsi_cmnd request_buffer. Note that the bdeSize is explicitly
+ * reinitialized since all iocb memory resources are used many times
+ * for transmit, receive, and continuation bpl's.
+ */
+ iocb_cmd->un.fcpi64.bdl.bdeSize = (2 * sizeof(struct ulp_bde64));
+ iocb_cmd->un.fcpi64.bdl.bdeSize += (num_bde * sizeof(struct ulp_bde64));
+ iocb_cmd->ulpBdeCount = 1;
+ iocb_cmd->ulpLe = 1;
+
+ fcpdl = lpfc_bg_scsi_adjust_dl(phba, lpfc_cmd);
+ fcp_cmnd->fcpDl = be32_to_cpu(fcpdl);
+
+ /*
+ * Due to difference in data length between DIF/non-DIF paths,
+ * we need to set word 4 of IOCB here
+ */
+ iocb_cmd->un.fcpi.fcpi_parm = fcpdl;
+
+ return 0;
+err:
+ if (lpfc_cmd->seg_cnt)
+ scsi_dma_unmap(scsi_cmnd);
+ if (lpfc_cmd->prot_seg_cnt)
+ dma_unmap_sg(&phba->pcidev->dev, scsi_prot_sglist(scsi_cmnd),
+ scsi_prot_sg_count(scsi_cmnd),
+ scsi_cmnd->sc_data_direction);
+
+ lpfc_printf_log(phba, KERN_ERR, LOG_FCP,
+ "9023 Cannot setup S/G List for HBA"
+ "IO segs %d/%d BPL %d SCSI %d: %d %d\n",
+ lpfc_cmd->seg_cnt, lpfc_cmd->prot_seg_cnt,
+ phba->cfg_total_seg_cnt, phba->cfg_sg_seg_cnt,
+ prot_group_type, num_bde);
+
+ lpfc_cmd->seg_cnt = 0;
+ lpfc_cmd->prot_seg_cnt = 0;
+ return 1;
+}
+
+/*
+ * This function calcuates the T10 DIF guard tag
+ * on the specified data using a CRC algorithmn
+ * using crc_t10dif.
+ */
+static uint16_t
+lpfc_bg_crc(uint8_t *data, int count)
+{
+ uint16_t crc = 0;
+ uint16_t x;
+
+ crc = crc_t10dif(data, count);
+ x = cpu_to_be16(crc);
+ return x;
+}
+
+/*
+ * This function calcuates the T10 DIF guard tag
+ * on the specified data using a CSUM algorithmn
+ * using ip_compute_csum.
+ */
+static uint16_t
+lpfc_bg_csum(uint8_t *data, int count)
+{
+ uint16_t ret;
+
+ ret = ip_compute_csum(data, count);
+ return ret;
+}
+
+/*
+ * This function examines the protection data to try to determine
+ * what type of T10-DIF error occurred.
+ */
+static void
+lpfc_calc_bg_err(struct lpfc_hba *phba, struct lpfc_scsi_buf *lpfc_cmd)
+{
+ struct scatterlist *sgpe; /* s/g prot entry */
+ struct scatterlist *sgde; /* s/g data entry */
+ struct scsi_cmnd *cmd = lpfc_cmd->pCmd;
+ struct scsi_dif_tuple *src = NULL;
+ uint8_t *data_src = NULL;
+ uint16_t guard_tag, guard_type;
+ uint16_t start_app_tag, app_tag;
+ uint32_t start_ref_tag, ref_tag;
+ int prot, protsegcnt;
+ int err_type, len, data_len;
+ int chk_ref, chk_app, chk_guard;
+ uint16_t sum;
+ unsigned blksize;
+
+ err_type = BGS_GUARD_ERR_MASK;
+ sum = 0;
+ guard_tag = 0;
+
+ /* First check to see if there is protection data to examine */
+ prot = scsi_get_prot_op(cmd);
+ if ((prot == SCSI_PROT_READ_STRIP) ||
+ (prot == SCSI_PROT_WRITE_INSERT) ||
+ (prot == SCSI_PROT_NORMAL))
+ goto out;
+
+ /* Currently the driver just supports ref_tag and guard_tag checking */
+ chk_ref = 1;
+ chk_app = 0;
+ chk_guard = 0;
+
+ /* Setup a ptr to the protection data provided by the SCSI host */
+ sgpe = scsi_prot_sglist(cmd);
+ protsegcnt = lpfc_cmd->prot_seg_cnt;
+
+ if (sgpe && protsegcnt) {
+
+ /*
+ * We will only try to verify guard tag if the segment
+ * data length is a multiple of the blksize.
+ */
+ sgde = scsi_sglist(cmd);
+ blksize = lpfc_cmd_blksize(cmd);
+ data_src = (uint8_t *)sg_virt(sgde);
+ data_len = sgde->length;
+ if ((data_len & (blksize - 1)) == 0)
+ chk_guard = 1;
+ guard_type = scsi_host_get_guard(cmd->device->host);
+
+ src = (struct scsi_dif_tuple *)sg_virt(sgpe);
+ start_ref_tag = (uint32_t)scsi_get_lba(cmd); /* Truncate LBA */
+ start_app_tag = src->app_tag;
+ len = sgpe->length;
+ while (src && protsegcnt) {
+ while (len) {
+
+ /*
+ * First check to see if a protection data
+ * check is valid
+ */
+ if ((src->ref_tag == 0xffffffff) ||
+ (src->app_tag == 0xffff)) {
+ start_ref_tag++;
+ goto skipit;
+ }
+
+ /* First Guard Tag checking */
+ if (chk_guard) {
+ guard_tag = src->guard_tag;
+ if (lpfc_cmd_guard_csum(cmd))
+ sum = lpfc_bg_csum(data_src,
+ blksize);
+ else
+ sum = lpfc_bg_crc(data_src,
+ blksize);
+ if ((guard_tag != sum)) {
+ err_type = BGS_GUARD_ERR_MASK;
+ goto out;
+ }
+ }
+
+ /* Reference Tag checking */
+ ref_tag = be32_to_cpu(src->ref_tag);
+ if (chk_ref && (ref_tag != start_ref_tag)) {
+ err_type = BGS_REFTAG_ERR_MASK;
+ goto out;
+ }
+ start_ref_tag++;
+
+ /* App Tag checking */
+ app_tag = src->app_tag;
+ if (chk_app && (app_tag != start_app_tag)) {
+ err_type = BGS_APPTAG_ERR_MASK;
+ goto out;
+ }
+skipit:
+ len -= sizeof(struct scsi_dif_tuple);
+ if (len < 0)
+ len = 0;
+ src++;
+
+ data_src += blksize;
+ data_len -= blksize;
+
+ /*
+ * Are we at the end of the Data segment?
+ * The data segment is only used for Guard
+ * tag checking.
+ */
+ if (chk_guard && (data_len == 0)) {
+ chk_guard = 0;
+ sgde = sg_next(sgde);
+ if (!sgde)
+ goto out;
+
+ data_src = (uint8_t *)sg_virt(sgde);
+ data_len = sgde->length;
+ if ((data_len & (blksize - 1)) == 0)
+ chk_guard = 1;
+ }
+ }
+
+ /* Goto the next Protection data segment */
+ sgpe = sg_next(sgpe);
+ if (sgpe) {
+ src = (struct scsi_dif_tuple *)sg_virt(sgpe);
+ len = sgpe->length;
+ } else {
+ src = NULL;
+ }
+ protsegcnt--;
+ }
+ }
+out:
+ if (err_type == BGS_GUARD_ERR_MASK) {
+ scsi_build_sense_buffer(1, cmd->sense_buffer, ILLEGAL_REQUEST,
+ 0x10, 0x1);
+ cmd->result = DRIVER_SENSE << 24
+ | ScsiResult(DID_ABORT, SAM_STAT_CHECK_CONDITION);
+ phba->bg_guard_err_cnt++;
+ lpfc_printf_log(phba, KERN_WARNING, LOG_FCP | LOG_BG,
+ "9069 BLKGRD: LBA %lx grd_tag error %x != %x\n",
+ (unsigned long)scsi_get_lba(cmd),
+ sum, guard_tag);
+
+ } else if (err_type == BGS_REFTAG_ERR_MASK) {
+ scsi_build_sense_buffer(1, cmd->sense_buffer, ILLEGAL_REQUEST,
+ 0x10, 0x3);
+ cmd->result = DRIVER_SENSE << 24
+ | ScsiResult(DID_ABORT, SAM_STAT_CHECK_CONDITION);
+
+ phba->bg_reftag_err_cnt++;
+ lpfc_printf_log(phba, KERN_WARNING, LOG_FCP | LOG_BG,
+ "9066 BLKGRD: LBA %lx ref_tag error %x != %x\n",
+ (unsigned long)scsi_get_lba(cmd),
+ ref_tag, start_ref_tag);
+
+ } else if (err_type == BGS_APPTAG_ERR_MASK) {
+ scsi_build_sense_buffer(1, cmd->sense_buffer, ILLEGAL_REQUEST,
+ 0x10, 0x2);
+ cmd->result = DRIVER_SENSE << 24
+ | ScsiResult(DID_ABORT, SAM_STAT_CHECK_CONDITION);
+
+ phba->bg_apptag_err_cnt++;
+ lpfc_printf_log(phba, KERN_WARNING, LOG_FCP | LOG_BG,
+ "9041 BLKGRD: LBA %lx app_tag error %x != %x\n",
+ (unsigned long)scsi_get_lba(cmd),
+ app_tag, start_app_tag);
+ }
+}
+
+
+/*
+ * This function checks for BlockGuard errors detected by
+ * the HBA. In case of errors, the ASC/ASCQ fields in the
+ * sense buffer will be set accordingly, paired with
+ * ILLEGAL_REQUEST to signal to the kernel that the HBA
+ * detected corruption.
+ *
+ * Returns:
+ * 0 - No error found
+ * 1 - BlockGuard error found
+ * -1 - Internal error (bad profile, ...etc)
+ */
+static int
+lpfc_parse_bg_err(struct lpfc_hba *phba, struct lpfc_scsi_buf *lpfc_cmd,
+ struct lpfc_iocbq *pIocbOut)
+{
+ struct scsi_cmnd *cmd = lpfc_cmd->pCmd;
+ struct sli3_bg_fields *bgf = &pIocbOut->iocb.unsli3.sli3_bg;
+ int ret = 0;
+ uint32_t bghm = bgf->bghm;
+ uint32_t bgstat = bgf->bgstat;
+ uint64_t failing_sector = 0;
+
+ spin_lock(&_dump_buf_lock);
+ if (!_dump_buf_done) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_BG, "9070 BLKGRD: Saving"
+ " Data for %u blocks to debugfs\n",
+ (cmd->cmnd[7] << 8 | cmd->cmnd[8]));
+ lpfc_debug_save_data(phba, cmd);
+
+ /* If we have a prot sgl, save the DIF buffer */
+ if (lpfc_prot_group_type(phba, cmd) ==
+ LPFC_PG_TYPE_DIF_BUF) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_BG, "9071 BLKGRD: "
+ "Saving DIF for %u blocks to debugfs\n",
+ (cmd->cmnd[7] << 8 | cmd->cmnd[8]));
+ lpfc_debug_save_dif(phba, cmd);
+ }
+
+ _dump_buf_done = 1;
+ }
+ spin_unlock(&_dump_buf_lock);
+
+ if (lpfc_bgs_get_invalid_prof(bgstat)) {
+ cmd->result = ScsiResult(DID_ERROR, 0);
+ lpfc_printf_log(phba, KERN_WARNING, LOG_FCP | LOG_BG,
+ "9072 BLKGRD: Invalid BG Profile in cmd"
+ " 0x%x lba 0x%llx blk cnt 0x%x "
+ "bgstat=x%x bghm=x%x\n", cmd->cmnd[0],
+ (unsigned long long)scsi_get_lba(cmd),
+ blk_rq_sectors(cmd->request), bgstat, bghm);
+ ret = (-1);
+ goto out;
+ }
+
+ if (lpfc_bgs_get_uninit_dif_block(bgstat)) {
+ cmd->result = ScsiResult(DID_ERROR, 0);
+ lpfc_printf_log(phba, KERN_WARNING, LOG_FCP | LOG_BG,
+ "9073 BLKGRD: Invalid BG PDIF Block in cmd"
+ " 0x%x lba 0x%llx blk cnt 0x%x "
+ "bgstat=x%x bghm=x%x\n", cmd->cmnd[0],
+ (unsigned long long)scsi_get_lba(cmd),
+ blk_rq_sectors(cmd->request), bgstat, bghm);
+ ret = (-1);
+ goto out;
+ }
+
+ if (lpfc_bgs_get_guard_err(bgstat)) {
+ ret = 1;
+
+ scsi_build_sense_buffer(1, cmd->sense_buffer, ILLEGAL_REQUEST,
+ 0x10, 0x1);
+ cmd->result = DRIVER_SENSE << 24
+ | ScsiResult(DID_ABORT, SAM_STAT_CHECK_CONDITION);
+ phba->bg_guard_err_cnt++;
+ lpfc_printf_log(phba, KERN_WARNING, LOG_FCP | LOG_BG,
+ "9055 BLKGRD: Guard Tag error in cmd"
+ " 0x%x lba 0x%llx blk cnt 0x%x "
+ "bgstat=x%x bghm=x%x\n", cmd->cmnd[0],
+ (unsigned long long)scsi_get_lba(cmd),
+ blk_rq_sectors(cmd->request), bgstat, bghm);
+ }
+
+ if (lpfc_bgs_get_reftag_err(bgstat)) {
+ ret = 1;
+
+ scsi_build_sense_buffer(1, cmd->sense_buffer, ILLEGAL_REQUEST,
+ 0x10, 0x3);
+ cmd->result = DRIVER_SENSE << 24
+ | ScsiResult(DID_ABORT, SAM_STAT_CHECK_CONDITION);
+
+ phba->bg_reftag_err_cnt++;
+ lpfc_printf_log(phba, KERN_WARNING, LOG_FCP | LOG_BG,
+ "9056 BLKGRD: Ref Tag error in cmd"
+ " 0x%x lba 0x%llx blk cnt 0x%x "
+ "bgstat=x%x bghm=x%x\n", cmd->cmnd[0],
+ (unsigned long long)scsi_get_lba(cmd),
+ blk_rq_sectors(cmd->request), bgstat, bghm);
+ }
+
+ if (lpfc_bgs_get_apptag_err(bgstat)) {
+ ret = 1;
+
+ scsi_build_sense_buffer(1, cmd->sense_buffer, ILLEGAL_REQUEST,
+ 0x10, 0x2);
+ cmd->result = DRIVER_SENSE << 24
+ | ScsiResult(DID_ABORT, SAM_STAT_CHECK_CONDITION);
+
+ phba->bg_apptag_err_cnt++;
+ lpfc_printf_log(phba, KERN_WARNING, LOG_FCP | LOG_BG,
+ "9061 BLKGRD: App Tag error in cmd"
+ " 0x%x lba 0x%llx blk cnt 0x%x "
+ "bgstat=x%x bghm=x%x\n", cmd->cmnd[0],
+ (unsigned long long)scsi_get_lba(cmd),
+ blk_rq_sectors(cmd->request), bgstat, bghm);
+ }
+
+ if (lpfc_bgs_get_hi_water_mark_present(bgstat)) {
+ /*
+ * setup sense data descriptor 0 per SPC-4 as an information
+ * field, and put the failing LBA in it.
+ * This code assumes there was also a guard/app/ref tag error
+ * indication.
+ */
+ cmd->sense_buffer[7] = 0xc; /* Additional sense length */
+ cmd->sense_buffer[8] = 0; /* Information descriptor type */
+ cmd->sense_buffer[9] = 0xa; /* Additional descriptor length */
+ cmd->sense_buffer[10] = 0x80; /* Validity bit */
+
+ /* bghm is a "on the wire" FC frame based count */
+ switch (scsi_get_prot_op(cmd)) {
+ case SCSI_PROT_READ_INSERT:
+ case SCSI_PROT_WRITE_STRIP:
+ bghm /= cmd->device->sector_size;
+ break;
+ case SCSI_PROT_READ_STRIP:
+ case SCSI_PROT_WRITE_INSERT:
+ case SCSI_PROT_READ_PASS:
+ case SCSI_PROT_WRITE_PASS:
+ bghm /= (cmd->device->sector_size +
+ sizeof(struct scsi_dif_tuple));
+ break;
+ }
+
+ failing_sector = scsi_get_lba(cmd);
+ failing_sector += bghm;
+
+ /* Descriptor Information */
+ put_unaligned_be64(failing_sector, &cmd->sense_buffer[12]);
+ }
+
+ if (!ret) {
+ /* No error was reported - problem in FW? */
+ lpfc_printf_log(phba, KERN_WARNING, LOG_FCP | LOG_BG,
+ "9057 BLKGRD: Unknown error in cmd"
+ " 0x%x lba 0x%llx blk cnt 0x%x "
+ "bgstat=x%x bghm=x%x\n", cmd->cmnd[0],
+ (unsigned long long)scsi_get_lba(cmd),
+ blk_rq_sectors(cmd->request), bgstat, bghm);
+
+ /* Calcuate what type of error it was */
+ lpfc_calc_bg_err(phba, lpfc_cmd);
+ }
+out:
+ return ret;
+}
+
+/**
+ * lpfc_scsi_prep_dma_buf_s4 - DMA mapping for scsi buffer to SLI4 IF spec
+ * @phba: The Hba for which this call is being executed.
+ * @lpfc_cmd: The scsi buffer which is going to be mapped.
+ *
+ * This routine does the pci dma mapping for scatter-gather list of scsi cmnd
+ * field of @lpfc_cmd for device with SLI-4 interface spec.
+ *
+ * Return codes:
+ * 1 - Error
+ * 0 - Success
+ **/
+static int
+lpfc_scsi_prep_dma_buf_s4(struct lpfc_hba *phba, struct lpfc_scsi_buf *lpfc_cmd)
+{
+ struct scsi_cmnd *scsi_cmnd = lpfc_cmd->pCmd;
+ struct scatterlist *sgel = NULL;
+ struct fcp_cmnd *fcp_cmnd = lpfc_cmd->fcp_cmnd;
+ struct sli4_sge *sgl = (struct sli4_sge *)lpfc_cmd->fcp_bpl;
+ struct sli4_sge *first_data_sgl;
+ IOCB_t *iocb_cmd = &lpfc_cmd->cur_iocbq.iocb;
+ dma_addr_t physaddr;
+ uint32_t num_bde = 0;
+ uint32_t dma_len;
+ uint32_t dma_offset = 0;
+ int nseg;
+ struct ulp_bde64 *bde;
+
+ /*
+ * There are three possibilities here - use scatter-gather segment, use
+ * the single mapping, or neither. Start the lpfc command prep by
+ * bumping the bpl beyond the fcp_cmnd and fcp_rsp regions to the first
+ * data bde entry.
+ */
+ if (scsi_sg_count(scsi_cmnd)) {
+ /*
+ * The driver stores the segment count returned from pci_map_sg
+ * because this a count of dma-mappings used to map the use_sg
+ * pages. They are not guaranteed to be the same for those
+ * architectures that implement an IOMMU.
+ */
+
+ nseg = scsi_dma_map(scsi_cmnd);
+ if (unlikely(!nseg))
+ return 1;
+ sgl += 1;
+ /* clear the last flag in the fcp_rsp map entry */
+ sgl->word2 = le32_to_cpu(sgl->word2);
+ bf_set(lpfc_sli4_sge_last, sgl, 0);
+ sgl->word2 = cpu_to_le32(sgl->word2);
+ sgl += 1;
+ first_data_sgl = sgl;
+ lpfc_cmd->seg_cnt = nseg;
+ if (lpfc_cmd->seg_cnt > phba->cfg_sg_seg_cnt) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_BG, "9074 BLKGRD:"
+ " %s: Too many sg segments from "
+ "dma_map_sg. Config %d, seg_cnt %d\n",
+ __func__, phba->cfg_sg_seg_cnt,
+ lpfc_cmd->seg_cnt);
+ lpfc_cmd->seg_cnt = 0;
+ scsi_dma_unmap(scsi_cmnd);
+ return 1;
+ }
+
+ /*
+ * The driver established a maximum scatter-gather segment count
+ * during probe that limits the number of sg elements in any
+ * single scsi command. Just run through the seg_cnt and format
+ * the sge's.
+ * When using SLI-3 the driver will try to fit all the BDEs into
+ * the IOCB. If it can't then the BDEs get added to a BPL as it
+ * does for SLI-2 mode.
+ */
+ scsi_for_each_sg(scsi_cmnd, sgel, nseg, num_bde) {
+ physaddr = sg_dma_address(sgel);
+ dma_len = sg_dma_len(sgel);
+ sgl->addr_lo = cpu_to_le32(putPaddrLow(physaddr));
+ sgl->addr_hi = cpu_to_le32(putPaddrHigh(physaddr));
+ sgl->word2 = le32_to_cpu(sgl->word2);
+ if ((num_bde + 1) == nseg)
+ bf_set(lpfc_sli4_sge_last, sgl, 1);
+ else
+ bf_set(lpfc_sli4_sge_last, sgl, 0);
+ bf_set(lpfc_sli4_sge_offset, sgl, dma_offset);
+ bf_set(lpfc_sli4_sge_type, sgl, LPFC_SGE_TYPE_DATA);
+ sgl->word2 = cpu_to_le32(sgl->word2);
+ sgl->sge_len = cpu_to_le32(dma_len);
+ dma_offset += dma_len;
+ sgl++;
+ }
+ /* setup the performance hint (first data BDE) if enabled */
+ if (phba->sli3_options & LPFC_SLI4_PERFH_ENABLED) {
+ bde = (struct ulp_bde64 *)
+ &(iocb_cmd->unsli3.sli3Words[5]);
+ bde->addrLow = first_data_sgl->addr_lo;
+ bde->addrHigh = first_data_sgl->addr_hi;
+ bde->tus.f.bdeSize =
+ le32_to_cpu(first_data_sgl->sge_len);
+ bde->tus.f.bdeFlags = BUFF_TYPE_BDE_64;
+ bde->tus.w = cpu_to_le32(bde->tus.w);
+ }
+ } else {
+ sgl += 1;
+ /* clear the last flag in the fcp_rsp map entry */
+ sgl->word2 = le32_to_cpu(sgl->word2);
+ bf_set(lpfc_sli4_sge_last, sgl, 1);
+ sgl->word2 = cpu_to_le32(sgl->word2);
+ }
+
+ /*
+ * Finish initializing those IOCB fields that are dependent on the
+ * scsi_cmnd request_buffer. Note that for SLI-2 the bdeSize is
+ * explicitly reinitialized.
+ * all iocb memory resources are reused.
+ */
+ fcp_cmnd->fcpDl = cpu_to_be32(scsi_bufflen(scsi_cmnd));
+
+ /*
+ * Due to difference in data length between DIF/non-DIF paths,
+ * we need to set word 4 of IOCB here
+ */
+ iocb_cmd->un.fcpi.fcpi_parm = scsi_bufflen(scsi_cmnd);
+
+ /*
+ * If the OAS driver feature is enabled and the lun is enabled for
+ * OAS, set the oas iocb related flags.
+ */
+ if ((phba->cfg_fof) && ((struct lpfc_device_data *)
+ scsi_cmnd->device->hostdata)->oas_enabled)
+ lpfc_cmd->cur_iocbq.iocb_flag |= (LPFC_IO_OAS | LPFC_IO_FOF);
+ return 0;
+}
+
+/**
+ * lpfc_bg_scsi_prep_dma_buf_s4 - DMA mapping for scsi buffer to SLI4 IF spec
+ * @phba: The Hba for which this call is being executed.
+ * @lpfc_cmd: The scsi buffer which is going to be mapped.
+ *
+ * This is the protection/DIF aware version of
+ * lpfc_scsi_prep_dma_buf(). It may be a good idea to combine the
+ * two functions eventually, but for now, it's here
+ **/
+static int
+lpfc_bg_scsi_prep_dma_buf_s4(struct lpfc_hba *phba,
+ struct lpfc_scsi_buf *lpfc_cmd)
+{
+ struct scsi_cmnd *scsi_cmnd = lpfc_cmd->pCmd;
+ struct fcp_cmnd *fcp_cmnd = lpfc_cmd->fcp_cmnd;
+ struct sli4_sge *sgl = (struct sli4_sge *)(lpfc_cmd->fcp_bpl);
+ IOCB_t *iocb_cmd = &lpfc_cmd->cur_iocbq.iocb;
+ uint32_t num_sge = 0;
+ int datasegcnt, protsegcnt, datadir = scsi_cmnd->sc_data_direction;
+ int prot_group_type = 0;
+ int fcpdl;
+
+ /*
+ * Start the lpfc command prep by bumping the sgl beyond fcp_cmnd
+ * fcp_rsp regions to the first data sge entry
+ */
+ if (scsi_sg_count(scsi_cmnd)) {
+ /*
+ * The driver stores the segment count returned from pci_map_sg
+ * because this a count of dma-mappings used to map the use_sg
+ * pages. They are not guaranteed to be the same for those
+ * architectures that implement an IOMMU.
+ */
+ datasegcnt = dma_map_sg(&phba->pcidev->dev,
+ scsi_sglist(scsi_cmnd),
+ scsi_sg_count(scsi_cmnd), datadir);
+ if (unlikely(!datasegcnt))
+ return 1;
+
+ sgl += 1;
+ /* clear the last flag in the fcp_rsp map entry */
+ sgl->word2 = le32_to_cpu(sgl->word2);
+ bf_set(lpfc_sli4_sge_last, sgl, 0);
+ sgl->word2 = cpu_to_le32(sgl->word2);
+
+ sgl += 1;
+ lpfc_cmd->seg_cnt = datasegcnt;
+
+ /* First check if data segment count from SCSI Layer is good */
+ if (lpfc_cmd->seg_cnt > phba->cfg_sg_seg_cnt)
+ goto err;
+
+ prot_group_type = lpfc_prot_group_type(phba, scsi_cmnd);
+
+ switch (prot_group_type) {
+ case LPFC_PG_TYPE_NO_DIF:
+ /* Here we need to add a DISEED to the count */
+ if ((lpfc_cmd->seg_cnt + 1) > phba->cfg_total_seg_cnt)
+ goto err;
+
+ num_sge = lpfc_bg_setup_sgl(phba, scsi_cmnd, sgl,
+ datasegcnt);
+
+ /* we should have 2 or more entries in buffer list */
+ if (num_sge < 2)
+ goto err;
+ break;
+
+ case LPFC_PG_TYPE_DIF_BUF:
+ /*
+ * This type indicates that protection buffers are
+ * passed to the driver, so that needs to be prepared
+ * for DMA
+ */
+ protsegcnt = dma_map_sg(&phba->pcidev->dev,
+ scsi_prot_sglist(scsi_cmnd),
+ scsi_prot_sg_count(scsi_cmnd), datadir);
+ if (unlikely(!protsegcnt)) {
+ scsi_dma_unmap(scsi_cmnd);
+ return 1;
+ }
+
+ lpfc_cmd->prot_seg_cnt = protsegcnt;
+ /*
+ * There is a minimun of 3 SGEs used for every
+ * protection data segment.
+ */
+ if ((lpfc_cmd->prot_seg_cnt * 3) >
+ (phba->cfg_total_seg_cnt - 2))
+ goto err;
+
+ num_sge = lpfc_bg_setup_sgl_prot(phba, scsi_cmnd, sgl,
+ datasegcnt, protsegcnt);
+
+ /* we should have 3 or more entries in buffer list */
+ if ((num_sge < 3) ||
+ (num_sge > phba->cfg_total_seg_cnt))
+ goto err;
+ break;
+
+ case LPFC_PG_TYPE_INVALID:
+ default:
+ scsi_dma_unmap(scsi_cmnd);
+ lpfc_cmd->seg_cnt = 0;
+
+ lpfc_printf_log(phba, KERN_ERR, LOG_FCP,
+ "9083 Unexpected protection group %i\n",
+ prot_group_type);
+ return 1;
+ }
+ }
+
+ switch (scsi_get_prot_op(scsi_cmnd)) {
+ case SCSI_PROT_WRITE_STRIP:
+ case SCSI_PROT_READ_STRIP:
+ lpfc_cmd->cur_iocbq.iocb_flag |= LPFC_IO_DIF_STRIP;
+ break;
+ case SCSI_PROT_WRITE_INSERT:
+ case SCSI_PROT_READ_INSERT:
+ lpfc_cmd->cur_iocbq.iocb_flag |= LPFC_IO_DIF_INSERT;
+ break;
+ case SCSI_PROT_WRITE_PASS:
+ case SCSI_PROT_READ_PASS:
+ lpfc_cmd->cur_iocbq.iocb_flag |= LPFC_IO_DIF_PASS;
+ break;
+ }
+
+ fcpdl = lpfc_bg_scsi_adjust_dl(phba, lpfc_cmd);
+ fcp_cmnd->fcpDl = be32_to_cpu(fcpdl);
+
+ /*
+ * Due to difference in data length between DIF/non-DIF paths,
+ * we need to set word 4 of IOCB here
+ */
+ iocb_cmd->un.fcpi.fcpi_parm = fcpdl;
+
+ /*
+ * If the OAS driver feature is enabled and the lun is enabled for
+ * OAS, set the oas iocb related flags.
+ */
+ if ((phba->cfg_fof) && ((struct lpfc_device_data *)
+ scsi_cmnd->device->hostdata)->oas_enabled)
+ lpfc_cmd->cur_iocbq.iocb_flag |= (LPFC_IO_OAS | LPFC_IO_FOF);
+
+ return 0;
+err:
+ if (lpfc_cmd->seg_cnt)
+ scsi_dma_unmap(scsi_cmnd);
+ if (lpfc_cmd->prot_seg_cnt)
+ dma_unmap_sg(&phba->pcidev->dev, scsi_prot_sglist(scsi_cmnd),
+ scsi_prot_sg_count(scsi_cmnd),
+ scsi_cmnd->sc_data_direction);
+
+ lpfc_printf_log(phba, KERN_ERR, LOG_FCP,
+ "9084 Cannot setup S/G List for HBA"
+ "IO segs %d/%d SGL %d SCSI %d: %d %d\n",
+ lpfc_cmd->seg_cnt, lpfc_cmd->prot_seg_cnt,
+ phba->cfg_total_seg_cnt, phba->cfg_sg_seg_cnt,
+ prot_group_type, num_sge);
+
+ lpfc_cmd->seg_cnt = 0;
+ lpfc_cmd->prot_seg_cnt = 0;
+ return 1;
+}
+
+/**
+ * lpfc_scsi_prep_dma_buf - Wrapper function for DMA mapping of scsi buffer
+ * @phba: The Hba for which this call is being executed.
+ * @lpfc_cmd: The scsi buffer which is going to be mapped.
+ *
+ * This routine wraps the actual DMA mapping function pointer from the
+ * lpfc_hba struct.
+ *
+ * Return codes:
+ * 1 - Error
+ * 0 - Success
+ **/
+static inline int
+lpfc_scsi_prep_dma_buf(struct lpfc_hba *phba, struct lpfc_scsi_buf *lpfc_cmd)
+{
+ return phba->lpfc_scsi_prep_dma_buf(phba, lpfc_cmd);
+}
+
+/**
+ * lpfc_bg_scsi_prep_dma_buf - Wrapper function for DMA mapping of scsi buffer
+ * using BlockGuard.
+ * @phba: The Hba for which this call is being executed.
+ * @lpfc_cmd: The scsi buffer which is going to be mapped.
+ *
+ * This routine wraps the actual DMA mapping function pointer from the
+ * lpfc_hba struct.
+ *
+ * Return codes:
+ * 1 - Error
+ * 0 - Success
+ **/
+static inline int
+lpfc_bg_scsi_prep_dma_buf(struct lpfc_hba *phba, struct lpfc_scsi_buf *lpfc_cmd)
+{
+ return phba->lpfc_bg_scsi_prep_dma_buf(phba, lpfc_cmd);
+}
+
+/**
+ * lpfc_send_scsi_error_event - Posts an event when there is SCSI error
+ * @phba: Pointer to hba context object.
+ * @vport: Pointer to vport object.
+ * @lpfc_cmd: Pointer to lpfc scsi command which reported the error.
+ * @rsp_iocb: Pointer to response iocb object which reported error.
+ *
+ * This function posts an event when there is a SCSI command reporting
+ * error from the scsi device.
+ **/
+static void
+lpfc_send_scsi_error_event(struct lpfc_hba *phba, struct lpfc_vport *vport,
+ struct lpfc_scsi_buf *lpfc_cmd, struct lpfc_iocbq *rsp_iocb) {
+ struct scsi_cmnd *cmnd = lpfc_cmd->pCmd;
+ struct fcp_rsp *fcprsp = lpfc_cmd->fcp_rsp;
+ uint32_t resp_info = fcprsp->rspStatus2;
+ uint32_t scsi_status = fcprsp->rspStatus3;
+ uint32_t fcpi_parm = rsp_iocb->iocb.un.fcpi.fcpi_parm;
+ struct lpfc_fast_path_event *fast_path_evt = NULL;
+ struct lpfc_nodelist *pnode = lpfc_cmd->rdata->pnode;
+ unsigned long flags;
+
+ if (!pnode || !NLP_CHK_NODE_ACT(pnode))
+ return;
+
+ /* If there is queuefull or busy condition send a scsi event */
+ if ((cmnd->result == SAM_STAT_TASK_SET_FULL) ||
+ (cmnd->result == SAM_STAT_BUSY)) {
+ fast_path_evt = lpfc_alloc_fast_evt(phba);
+ if (!fast_path_evt)
+ return;
+ fast_path_evt->un.scsi_evt.event_type =
+ FC_REG_SCSI_EVENT;
+ fast_path_evt->un.scsi_evt.subcategory =
+ (cmnd->result == SAM_STAT_TASK_SET_FULL) ?
+ LPFC_EVENT_QFULL : LPFC_EVENT_DEVBSY;
+ fast_path_evt->un.scsi_evt.lun = cmnd->device->lun;
+ memcpy(&fast_path_evt->un.scsi_evt.wwpn,
+ &pnode->nlp_portname, sizeof(struct lpfc_name));
+ memcpy(&fast_path_evt->un.scsi_evt.wwnn,
+ &pnode->nlp_nodename, sizeof(struct lpfc_name));
+ } else if ((resp_info & SNS_LEN_VALID) && fcprsp->rspSnsLen &&
+ ((cmnd->cmnd[0] == READ_10) || (cmnd->cmnd[0] == WRITE_10))) {
+ fast_path_evt = lpfc_alloc_fast_evt(phba);
+ if (!fast_path_evt)
+ return;
+ fast_path_evt->un.check_cond_evt.scsi_event.event_type =
+ FC_REG_SCSI_EVENT;
+ fast_path_evt->un.check_cond_evt.scsi_event.subcategory =
+ LPFC_EVENT_CHECK_COND;
+ fast_path_evt->un.check_cond_evt.scsi_event.lun =
+ cmnd->device->lun;
+ memcpy(&fast_path_evt->un.check_cond_evt.scsi_event.wwpn,
+ &pnode->nlp_portname, sizeof(struct lpfc_name));
+ memcpy(&fast_path_evt->un.check_cond_evt.scsi_event.wwnn,
+ &pnode->nlp_nodename, sizeof(struct lpfc_name));
+ fast_path_evt->un.check_cond_evt.sense_key =
+ cmnd->sense_buffer[2] & 0xf;
+ fast_path_evt->un.check_cond_evt.asc = cmnd->sense_buffer[12];
+ fast_path_evt->un.check_cond_evt.ascq = cmnd->sense_buffer[13];
+ } else if ((cmnd->sc_data_direction == DMA_FROM_DEVICE) &&
+ fcpi_parm &&
+ ((be32_to_cpu(fcprsp->rspResId) != fcpi_parm) ||
+ ((scsi_status == SAM_STAT_GOOD) &&
+ !(resp_info & (RESID_UNDER | RESID_OVER))))) {
+ /*
+ * If status is good or resid does not match with fcp_param and
+ * there is valid fcpi_parm, then there is a read_check error
+ */
+ fast_path_evt = lpfc_alloc_fast_evt(phba);
+ if (!fast_path_evt)
+ return;
+ fast_path_evt->un.read_check_error.header.event_type =
+ FC_REG_FABRIC_EVENT;
+ fast_path_evt->un.read_check_error.header.subcategory =
+ LPFC_EVENT_FCPRDCHKERR;
+ memcpy(&fast_path_evt->un.read_check_error.header.wwpn,
+ &pnode->nlp_portname, sizeof(struct lpfc_name));
+ memcpy(&fast_path_evt->un.read_check_error.header.wwnn,
+ &pnode->nlp_nodename, sizeof(struct lpfc_name));
+ fast_path_evt->un.read_check_error.lun = cmnd->device->lun;
+ fast_path_evt->un.read_check_error.opcode = cmnd->cmnd[0];
+ fast_path_evt->un.read_check_error.fcpiparam =
+ fcpi_parm;
+ } else
+ return;
+
+ fast_path_evt->vport = vport;
+ spin_lock_irqsave(&phba->hbalock, flags);
+ list_add_tail(&fast_path_evt->work_evt.evt_listp, &phba->work_list);
+ spin_unlock_irqrestore(&phba->hbalock, flags);
+ lpfc_worker_wake_up(phba);
+ return;
+}
+
+/**
+ * lpfc_scsi_unprep_dma_buf - Un-map DMA mapping of SG-list for dev
+ * @phba: The HBA for which this call is being executed.
+ * @psb: The scsi buffer which is going to be un-mapped.
+ *
+ * This routine does DMA un-mapping of scatter gather list of scsi command
+ * field of @lpfc_cmd for device with SLI-3 interface spec.
+ **/
+static void
+lpfc_scsi_unprep_dma_buf(struct lpfc_hba *phba, struct lpfc_scsi_buf *psb)
+{
+ /*
+ * There are only two special cases to consider. (1) the scsi command
+ * requested scatter-gather usage or (2) the scsi command allocated
+ * a request buffer, but did not request use_sg. There is a third
+ * case, but it does not require resource deallocation.
+ */
+ if (psb->seg_cnt > 0)
+ scsi_dma_unmap(psb->pCmd);
+ if (psb->prot_seg_cnt > 0)
+ dma_unmap_sg(&phba->pcidev->dev, scsi_prot_sglist(psb->pCmd),
+ scsi_prot_sg_count(psb->pCmd),
+ psb->pCmd->sc_data_direction);
+}
+
+/**
+ * lpfc_handler_fcp_err - FCP response handler
+ * @vport: The virtual port for which this call is being executed.
+ * @lpfc_cmd: Pointer to lpfc_scsi_buf data structure.
+ * @rsp_iocb: The response IOCB which contains FCP error.
+ *
+ * This routine is called to process response IOCB with status field
+ * IOSTAT_FCP_RSP_ERROR. This routine sets result field of scsi command
+ * based upon SCSI and FCP error.
+ **/
+static void
+lpfc_handle_fcp_err(struct lpfc_vport *vport, struct lpfc_scsi_buf *lpfc_cmd,
+ struct lpfc_iocbq *rsp_iocb)
+{
+ struct scsi_cmnd *cmnd = lpfc_cmd->pCmd;
+ struct fcp_cmnd *fcpcmd = lpfc_cmd->fcp_cmnd;
+ struct fcp_rsp *fcprsp = lpfc_cmd->fcp_rsp;
+ uint32_t fcpi_parm = rsp_iocb->iocb.un.fcpi.fcpi_parm;
+ uint32_t resp_info = fcprsp->rspStatus2;
+ uint32_t scsi_status = fcprsp->rspStatus3;
+ uint32_t *lp;
+ uint32_t host_status = DID_OK;
+ uint32_t rsplen = 0;
+ uint32_t logit = LOG_FCP | LOG_FCP_ERROR;
+
+
+ /*
+ * If this is a task management command, there is no
+ * scsi packet associated with this lpfc_cmd. The driver
+ * consumes it.
+ */
+ if (fcpcmd->fcpCntl2) {
+ scsi_status = 0;
+ goto out;
+ }
+
+ if (resp_info & RSP_LEN_VALID) {
+ rsplen = be32_to_cpu(fcprsp->rspRspLen);
+ if (rsplen != 0 && rsplen != 4 && rsplen != 8) {
+ lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP,
+ "2719 Invalid response length: "
+ "tgt x%x lun x%llx cmnd x%x rsplen x%x\n",
+ cmnd->device->id,
+ cmnd->device->lun, cmnd->cmnd[0],
+ rsplen);
+ host_status = DID_ERROR;
+ goto out;
+ }
+ if (fcprsp->rspInfo3 != RSP_NO_FAILURE) {
+ lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP,
+ "2757 Protocol failure detected during "
+ "processing of FCP I/O op: "
+ "tgt x%x lun x%llx cmnd x%x rspInfo3 x%x\n",
+ cmnd->device->id,
+ cmnd->device->lun, cmnd->cmnd[0],
+ fcprsp->rspInfo3);
+ host_status = DID_ERROR;
+ goto out;
+ }
+ }
+
+ if ((resp_info & SNS_LEN_VALID) && fcprsp->rspSnsLen) {
+ uint32_t snslen = be32_to_cpu(fcprsp->rspSnsLen);
+ if (snslen > SCSI_SENSE_BUFFERSIZE)
+ snslen = SCSI_SENSE_BUFFERSIZE;
+
+ if (resp_info & RSP_LEN_VALID)
+ rsplen = be32_to_cpu(fcprsp->rspRspLen);
+ memcpy(cmnd->sense_buffer, &fcprsp->rspInfo0 + rsplen, snslen);
+ }
+ lp = (uint32_t *)cmnd->sense_buffer;
+
+ /* special handling for under run conditions */
+ if (!scsi_status && (resp_info & RESID_UNDER)) {
+ /* don't log under runs if fcp set... */
+ if (vport->cfg_log_verbose & LOG_FCP)
+ logit = LOG_FCP_ERROR;
+ /* unless operator says so */
+ if (vport->cfg_log_verbose & LOG_FCP_UNDER)
+ logit = LOG_FCP_UNDER;
+ }
+
+ lpfc_printf_vlog(vport, KERN_WARNING, logit,
+ "9024 FCP command x%x failed: x%x SNS x%x x%x "
+ "Data: x%x x%x x%x x%x x%x\n",
+ cmnd->cmnd[0], scsi_status,
+ be32_to_cpu(*lp), be32_to_cpu(*(lp + 3)), resp_info,
+ be32_to_cpu(fcprsp->rspResId),
+ be32_to_cpu(fcprsp->rspSnsLen),
+ be32_to_cpu(fcprsp->rspRspLen),
+ fcprsp->rspInfo3);
+
+ scsi_set_resid(cmnd, 0);
+ if (resp_info & RESID_UNDER) {
+ scsi_set_resid(cmnd, be32_to_cpu(fcprsp->rspResId));
+
+ lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP_UNDER,
+ "9025 FCP Read Underrun, expected %d, "
+ "residual %d Data: x%x x%x x%x\n",
+ be32_to_cpu(fcpcmd->fcpDl),
+ scsi_get_resid(cmnd), fcpi_parm, cmnd->cmnd[0],
+ cmnd->underflow);
+
+ /*
+ * If there is an under run check if under run reported by
+ * storage array is same as the under run reported by HBA.
+ * If this is not same, there is a dropped frame.
+ */
+ if ((cmnd->sc_data_direction == DMA_FROM_DEVICE) &&
+ fcpi_parm &&
+ (scsi_get_resid(cmnd) != fcpi_parm)) {
+ lpfc_printf_vlog(vport, KERN_WARNING,
+ LOG_FCP | LOG_FCP_ERROR,
+ "9026 FCP Read Check Error "
+ "and Underrun Data: x%x x%x x%x x%x\n",
+ be32_to_cpu(fcpcmd->fcpDl),
+ scsi_get_resid(cmnd), fcpi_parm,
+ cmnd->cmnd[0]);
+ scsi_set_resid(cmnd, scsi_bufflen(cmnd));
+ host_status = DID_ERROR;
+ }
+ /*
+ * The cmnd->underflow is the minimum number of bytes that must
+ * be transferred for this command. Provided a sense condition
+ * is not present, make sure the actual amount transferred is at
+ * least the underflow value or fail.
+ */
+ if (!(resp_info & SNS_LEN_VALID) &&
+ (scsi_status == SAM_STAT_GOOD) &&
+ (scsi_bufflen(cmnd) - scsi_get_resid(cmnd)
+ < cmnd->underflow)) {
+ lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP,
+ "9027 FCP command x%x residual "
+ "underrun converted to error "
+ "Data: x%x x%x x%x\n",
+ cmnd->cmnd[0], scsi_bufflen(cmnd),
+ scsi_get_resid(cmnd), cmnd->underflow);
+ host_status = DID_ERROR;
+ }
+ } else if (resp_info & RESID_OVER) {
+ lpfc_printf_vlog(vport, KERN_WARNING, LOG_FCP,
+ "9028 FCP command x%x residual overrun error. "
+ "Data: x%x x%x\n", cmnd->cmnd[0],
+ scsi_bufflen(cmnd), scsi_get_resid(cmnd));
+ host_status = DID_ERROR;
+
+ /*
+ * Check SLI validation that all the transfer was actually done
+ * (fcpi_parm should be zero). Apply check only to reads.
+ */
+ } else if (fcpi_parm && (cmnd->sc_data_direction == DMA_FROM_DEVICE)) {
+ lpfc_printf_vlog(vport, KERN_WARNING, LOG_FCP | LOG_FCP_ERROR,
+ "9029 FCP Read Check Error Data: "
+ "x%x x%x x%x x%x x%x\n",
+ be32_to_cpu(fcpcmd->fcpDl),
+ be32_to_cpu(fcprsp->rspResId),
+ fcpi_parm, cmnd->cmnd[0], scsi_status);
+ switch (scsi_status) {
+ case SAM_STAT_GOOD:
+ case SAM_STAT_CHECK_CONDITION:
+ /* Fabric dropped a data frame. Fail any successful
+ * command in which we detected dropped frames.
+ * A status of good or some check conditions could
+ * be considered a successful command.
+ */
+ host_status = DID_ERROR;
+ break;
+ }
+ scsi_set_resid(cmnd, scsi_bufflen(cmnd));
+ }
+
+ out:
+ cmnd->result = ScsiResult(host_status, scsi_status);
+ lpfc_send_scsi_error_event(vport->phba, vport, lpfc_cmd, rsp_iocb);
+}
+
+/**
+ * lpfc_scsi_cmd_iocb_cmpl - Scsi cmnd IOCB completion routine
+ * @phba: The Hba for which this call is being executed.
+ * @pIocbIn: The command IOCBQ for the scsi cmnd.
+ * @pIocbOut: The response IOCBQ for the scsi cmnd.
+ *
+ * This routine assigns scsi command result by looking into response IOCB
+ * status field appropriately. This routine handles QUEUE FULL condition as
+ * well by ramping down device queue depth.
+ **/
+static void
+lpfc_scsi_cmd_iocb_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *pIocbIn,
+ struct lpfc_iocbq *pIocbOut)
+{
+ struct lpfc_scsi_buf *lpfc_cmd =
+ (struct lpfc_scsi_buf *) pIocbIn->context1;
+ struct lpfc_vport *vport = pIocbIn->vport;
+ struct lpfc_rport_data *rdata = lpfc_cmd->rdata;
+ struct lpfc_nodelist *pnode = rdata->pnode;
+ struct scsi_cmnd *cmd;
+ int result;
+ int depth;
+ unsigned long flags;
+ struct lpfc_fast_path_event *fast_path_evt;
+ struct Scsi_Host *shost;
+ uint32_t queue_depth, scsi_id;
+ uint32_t logit = LOG_FCP;
+
+ /* Sanity check on return of outstanding command */
+ if (!(lpfc_cmd->pCmd))
+ return;
+ cmd = lpfc_cmd->pCmd;
+ shost = cmd->device->host;
+
+ lpfc_cmd->result = (pIocbOut->iocb.un.ulpWord[4] & IOERR_PARAM_MASK);
+ lpfc_cmd->status = pIocbOut->iocb.ulpStatus;
+ /* pick up SLI4 exhange busy status from HBA */
+ lpfc_cmd->exch_busy = pIocbOut->iocb_flag & LPFC_EXCHANGE_BUSY;
+
+#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
+ if (lpfc_cmd->prot_data_type) {
+ struct scsi_dif_tuple *src = NULL;
+
+ src = (struct scsi_dif_tuple *)lpfc_cmd->prot_data_segment;
+ /*
+ * Used to restore any changes to protection
+ * data for error injection.
+ */
+ switch (lpfc_cmd->prot_data_type) {
+ case LPFC_INJERR_REFTAG:
+ src->ref_tag =
+ lpfc_cmd->prot_data;
+ break;
+ case LPFC_INJERR_APPTAG:
+ src->app_tag =
+ (uint16_t)lpfc_cmd->prot_data;
+ break;
+ case LPFC_INJERR_GUARD:
+ src->guard_tag =
+ (uint16_t)lpfc_cmd->prot_data;
+ break;
+ default:
+ break;
+ }
+
+ lpfc_cmd->prot_data = 0;
+ lpfc_cmd->prot_data_type = 0;
+ lpfc_cmd->prot_data_segment = NULL;
+ }
+#endif
+ if (pnode && NLP_CHK_NODE_ACT(pnode))
+ atomic_dec(&pnode->cmd_pending);
+
+ if (lpfc_cmd->status) {
+ if (lpfc_cmd->status == IOSTAT_LOCAL_REJECT &&
+ (lpfc_cmd->result & IOERR_DRVR_MASK))
+ lpfc_cmd->status = IOSTAT_DRIVER_REJECT;
+ else if (lpfc_cmd->status >= IOSTAT_CNT)
+ lpfc_cmd->status = IOSTAT_DEFAULT;
+ if (lpfc_cmd->status == IOSTAT_FCP_RSP_ERROR &&
+ !lpfc_cmd->fcp_rsp->rspStatus3 &&
+ (lpfc_cmd->fcp_rsp->rspStatus2 & RESID_UNDER) &&
+ !(vport->cfg_log_verbose & LOG_FCP_UNDER))
+ logit = 0;
+ else
+ logit = LOG_FCP | LOG_FCP_UNDER;
+ lpfc_printf_vlog(vport, KERN_WARNING, logit,
+ "9030 FCP cmd x%x failed <%d/%lld> "
+ "status: x%x result: x%x "
+ "sid: x%x did: x%x oxid: x%x "
+ "Data: x%x x%x\n",
+ cmd->cmnd[0],
+ cmd->device ? cmd->device->id : 0xffff,
+ cmd->device ? cmd->device->lun : 0xffff,
+ lpfc_cmd->status, lpfc_cmd->result,
+ vport->fc_myDID,
+ (pnode) ? pnode->nlp_DID : 0,
+ phba->sli_rev == LPFC_SLI_REV4 ?
+ lpfc_cmd->cur_iocbq.sli4_xritag : 0xffff,
+ pIocbOut->iocb.ulpContext,
+ lpfc_cmd->cur_iocbq.iocb.ulpIoTag);
+
+ switch (lpfc_cmd->status) {
+ case IOSTAT_FCP_RSP_ERROR:
+ /* Call FCP RSP handler to determine result */
+ lpfc_handle_fcp_err(vport, lpfc_cmd, pIocbOut);
+ break;
+ case IOSTAT_NPORT_BSY:
+ case IOSTAT_FABRIC_BSY:
+ cmd->result = ScsiResult(DID_TRANSPORT_DISRUPTED, 0);
+ fast_path_evt = lpfc_alloc_fast_evt(phba);
+ if (!fast_path_evt)
+ break;
+ fast_path_evt->un.fabric_evt.event_type =
+ FC_REG_FABRIC_EVENT;
+ fast_path_evt->un.fabric_evt.subcategory =
+ (lpfc_cmd->status == IOSTAT_NPORT_BSY) ?
+ LPFC_EVENT_PORT_BUSY : LPFC_EVENT_FABRIC_BUSY;
+ if (pnode && NLP_CHK_NODE_ACT(pnode)) {
+ memcpy(&fast_path_evt->un.fabric_evt.wwpn,
+ &pnode->nlp_portname,
+ sizeof(struct lpfc_name));
+ memcpy(&fast_path_evt->un.fabric_evt.wwnn,
+ &pnode->nlp_nodename,
+ sizeof(struct lpfc_name));
+ }
+ fast_path_evt->vport = vport;
+ fast_path_evt->work_evt.evt =
+ LPFC_EVT_FASTPATH_MGMT_EVT;
+ spin_lock_irqsave(&phba->hbalock, flags);
+ list_add_tail(&fast_path_evt->work_evt.evt_listp,
+ &phba->work_list);
+ spin_unlock_irqrestore(&phba->hbalock, flags);
+ lpfc_worker_wake_up(phba);
+ break;
+ case IOSTAT_LOCAL_REJECT:
+ case IOSTAT_REMOTE_STOP:
+ if (lpfc_cmd->result == IOERR_ELXSEC_KEY_UNWRAP_ERROR ||
+ lpfc_cmd->result ==
+ IOERR_ELXSEC_KEY_UNWRAP_COMPARE_ERROR ||
+ lpfc_cmd->result == IOERR_ELXSEC_CRYPTO_ERROR ||
+ lpfc_cmd->result ==
+ IOERR_ELXSEC_CRYPTO_COMPARE_ERROR) {
+ cmd->result = ScsiResult(DID_NO_CONNECT, 0);
+ break;
+ }
+ if (lpfc_cmd->result == IOERR_INVALID_RPI ||
+ lpfc_cmd->result == IOERR_NO_RESOURCES ||
+ lpfc_cmd->result == IOERR_ABORT_REQUESTED ||
+ lpfc_cmd->result == IOERR_SLER_CMD_RCV_FAILURE) {
+ cmd->result = ScsiResult(DID_REQUEUE, 0);
+ break;
+ }
+ if ((lpfc_cmd->result == IOERR_RX_DMA_FAILED ||
+ lpfc_cmd->result == IOERR_TX_DMA_FAILED) &&
+ pIocbOut->iocb.unsli3.sli3_bg.bgstat) {
+ if (scsi_get_prot_op(cmd) != SCSI_PROT_NORMAL) {
+ /*
+ * This is a response for a BG enabled
+ * cmd. Parse BG error
+ */
+ lpfc_parse_bg_err(phba, lpfc_cmd,
+ pIocbOut);
+ break;
+ } else {
+ lpfc_printf_vlog(vport, KERN_WARNING,
+ LOG_BG,
+ "9031 non-zero BGSTAT "
+ "on unprotected cmd\n");
+ }
+ }
+ if ((lpfc_cmd->status == IOSTAT_REMOTE_STOP)
+ && (phba->sli_rev == LPFC_SLI_REV4)
+ && (pnode && NLP_CHK_NODE_ACT(pnode))) {
+ /* This IO was aborted by the target, we don't
+ * know the rxid and because we did not send the
+ * ABTS we cannot generate and RRQ.
+ */
+ lpfc_set_rrq_active(phba, pnode,
+ lpfc_cmd->cur_iocbq.sli4_lxritag,
+ 0, 0);
+ }
+ /* else: fall through */
+ default:
+ cmd->result = ScsiResult(DID_ERROR, 0);
+ break;
+ }
+
+ if (!pnode || !NLP_CHK_NODE_ACT(pnode)
+ || (pnode->nlp_state != NLP_STE_MAPPED_NODE))
+ cmd->result = ScsiResult(DID_TRANSPORT_DISRUPTED,
+ SAM_STAT_BUSY);
+ } else
+ cmd->result = ScsiResult(DID_OK, 0);
+
+ if (cmd->result || lpfc_cmd->fcp_rsp->rspSnsLen) {
+ uint32_t *lp = (uint32_t *)cmd->sense_buffer;
+
+ lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP,
+ "0710 Iodone <%d/%llu> cmd %p, error "
+ "x%x SNS x%x x%x Data: x%x x%x\n",
+ cmd->device->id, cmd->device->lun, cmd,
+ cmd->result, *lp, *(lp + 3), cmd->retries,
+ scsi_get_resid(cmd));
+ }
+
+ lpfc_update_stats(phba, lpfc_cmd);
+ result = cmd->result;
+ if (vport->cfg_max_scsicmpl_time &&
+ time_after(jiffies, lpfc_cmd->start_time +
+ msecs_to_jiffies(vport->cfg_max_scsicmpl_time))) {
+ spin_lock_irqsave(shost->host_lock, flags);
+ if (pnode && NLP_CHK_NODE_ACT(pnode)) {
+ if (pnode->cmd_qdepth >
+ atomic_read(&pnode->cmd_pending) &&
+ (atomic_read(&pnode->cmd_pending) >
+ LPFC_MIN_TGT_QDEPTH) &&
+ ((cmd->cmnd[0] == READ_10) ||
+ (cmd->cmnd[0] == WRITE_10)))
+ pnode->cmd_qdepth =
+ atomic_read(&pnode->cmd_pending);
+
+ pnode->last_change_time = jiffies;
+ }
+ spin_unlock_irqrestore(shost->host_lock, flags);
+ } else if (pnode && NLP_CHK_NODE_ACT(pnode)) {
+ if ((pnode->cmd_qdepth < vport->cfg_tgt_queue_depth) &&
+ time_after(jiffies, pnode->last_change_time +
+ msecs_to_jiffies(LPFC_TGTQ_INTERVAL))) {
+ spin_lock_irqsave(shost->host_lock, flags);
+ depth = pnode->cmd_qdepth * LPFC_TGTQ_RAMPUP_PCENT
+ / 100;
+ depth = depth ? depth : 1;
+ pnode->cmd_qdepth += depth;
+ if (pnode->cmd_qdepth > vport->cfg_tgt_queue_depth)
+ pnode->cmd_qdepth = vport->cfg_tgt_queue_depth;
+ pnode->last_change_time = jiffies;
+ spin_unlock_irqrestore(shost->host_lock, flags);
+ }
+ }
+
+ lpfc_scsi_unprep_dma_buf(phba, lpfc_cmd);
+
+ /* The sdev is not guaranteed to be valid post scsi_done upcall. */
+ queue_depth = cmd->device->queue_depth;
+ scsi_id = cmd->device->id;
+ cmd->scsi_done(cmd);
+
+ if (phba->cfg_poll & ENABLE_FCP_RING_POLLING) {
+ spin_lock_irqsave(&phba->hbalock, flags);
+ lpfc_cmd->pCmd = NULL;
+ spin_unlock_irqrestore(&phba->hbalock, flags);
+
+ /*
+ * If there is a thread waiting for command completion
+ * wake up the thread.
+ */
+ spin_lock_irqsave(shost->host_lock, flags);
+ if (lpfc_cmd->waitq)
+ wake_up(lpfc_cmd->waitq);
+ spin_unlock_irqrestore(shost->host_lock, flags);
+ lpfc_release_scsi_buf(phba, lpfc_cmd);
+ return;
+ }
+
+ spin_lock_irqsave(&phba->hbalock, flags);
+ lpfc_cmd->pCmd = NULL;
+ spin_unlock_irqrestore(&phba->hbalock, flags);
+
+ /*
+ * If there is a thread waiting for command completion
+ * wake up the thread.
+ */
+ spin_lock_irqsave(shost->host_lock, flags);
+ if (lpfc_cmd->waitq)
+ wake_up(lpfc_cmd->waitq);
+ spin_unlock_irqrestore(shost->host_lock, flags);
+
+ lpfc_release_scsi_buf(phba, lpfc_cmd);
+}
+
+/**
+ * lpfc_fcpcmd_to_iocb - copy the fcp_cmd data into the IOCB
+ * @data: A pointer to the immediate command data portion of the IOCB.
+ * @fcp_cmnd: The FCP Command that is provided by the SCSI layer.
+ *
+ * The routine copies the entire FCP command from @fcp_cmnd to @data while
+ * byte swapping the data to big endian format for transmission on the wire.
+ **/
+static void
+lpfc_fcpcmd_to_iocb(uint8_t *data, struct fcp_cmnd *fcp_cmnd)
+{
+ int i, j;
+ for (i = 0, j = 0; i < sizeof(struct fcp_cmnd);
+ i += sizeof(uint32_t), j++) {
+ ((uint32_t *)data)[j] = cpu_to_be32(((uint32_t *)fcp_cmnd)[j]);
+ }
+}
+
+/**
+ * lpfc_scsi_prep_cmnd - Wrapper func for convert scsi cmnd to FCP info unit
+ * @vport: The virtual port for which this call is being executed.
+ * @lpfc_cmd: The scsi command which needs to send.
+ * @pnode: Pointer to lpfc_nodelist.
+ *
+ * This routine initializes fcp_cmnd and iocb data structure from scsi command
+ * to transfer for device with SLI3 interface spec.
+ **/
+static void
+lpfc_scsi_prep_cmnd(struct lpfc_vport *vport, struct lpfc_scsi_buf *lpfc_cmd,
+ struct lpfc_nodelist *pnode)
+{
+ struct lpfc_hba *phba = vport->phba;
+ struct scsi_cmnd *scsi_cmnd = lpfc_cmd->pCmd;
+ struct fcp_cmnd *fcp_cmnd = lpfc_cmd->fcp_cmnd;
+ IOCB_t *iocb_cmd = &lpfc_cmd->cur_iocbq.iocb;
+ struct lpfc_iocbq *piocbq = &(lpfc_cmd->cur_iocbq);
+ int datadir = scsi_cmnd->sc_data_direction;
+ uint8_t *ptr;
+ bool sli4;
+ uint32_t fcpdl;
+
+ if (!pnode || !NLP_CHK_NODE_ACT(pnode))
+ return;
+
+ lpfc_cmd->fcp_rsp->rspSnsLen = 0;
+ /* clear task management bits */
+ lpfc_cmd->fcp_cmnd->fcpCntl2 = 0;
+
+ int_to_scsilun(lpfc_cmd->pCmd->device->lun,
+ &lpfc_cmd->fcp_cmnd->fcp_lun);
+
+ ptr = &fcp_cmnd->fcpCdb[0];
+ memcpy(ptr, scsi_cmnd->cmnd, scsi_cmnd->cmd_len);
+ if (scsi_cmnd->cmd_len < LPFC_FCP_CDB_LEN) {
+ ptr += scsi_cmnd->cmd_len;
+ memset(ptr, 0, (LPFC_FCP_CDB_LEN - scsi_cmnd->cmd_len));
+ }
+
+ fcp_cmnd->fcpCntl1 = SIMPLE_Q;
+
+ sli4 = (phba->sli_rev == LPFC_SLI_REV4);
+ piocbq->iocb.un.fcpi.fcpi_XRdy = 0;
+
+ /*
+ * There are three possibilities here - use scatter-gather segment, use
+ * the single mapping, or neither. Start the lpfc command prep by
+ * bumping the bpl beyond the fcp_cmnd and fcp_rsp regions to the first
+ * data bde entry.
+ */
+ if (scsi_sg_count(scsi_cmnd)) {
+ if (datadir == DMA_TO_DEVICE) {
+ iocb_cmd->ulpCommand = CMD_FCP_IWRITE64_CR;
+ iocb_cmd->ulpPU = PARM_READ_CHECK;
+ if (vport->cfg_first_burst_size &&
+ (pnode->nlp_flag & NLP_FIRSTBURST)) {
+ fcpdl = scsi_bufflen(scsi_cmnd);
+ if (fcpdl < vport->cfg_first_burst_size)
+ piocbq->iocb.un.fcpi.fcpi_XRdy = fcpdl;
+ else
+ piocbq->iocb.un.fcpi.fcpi_XRdy =
+ vport->cfg_first_burst_size;
+ }
+ fcp_cmnd->fcpCntl3 = WRITE_DATA;
+ phba->fc4OutputRequests++;
+ } else {
+ iocb_cmd->ulpCommand = CMD_FCP_IREAD64_CR;
+ iocb_cmd->ulpPU = PARM_READ_CHECK;
+ fcp_cmnd->fcpCntl3 = READ_DATA;
+ phba->fc4InputRequests++;
+ }
+ } else {
+ iocb_cmd->ulpCommand = CMD_FCP_ICMND64_CR;
+ iocb_cmd->un.fcpi.fcpi_parm = 0;
+ iocb_cmd->ulpPU = 0;
+ fcp_cmnd->fcpCntl3 = 0;
+ phba->fc4ControlRequests++;
+ }
+ if (phba->sli_rev == 3 &&
+ !(phba->sli3_options & LPFC_SLI3_BG_ENABLED))
+ lpfc_fcpcmd_to_iocb(iocb_cmd->unsli3.fcp_ext.icd, fcp_cmnd);
+ /*
+ * Finish initializing those IOCB fields that are independent
+ * of the scsi_cmnd request_buffer
+ */
+ piocbq->iocb.ulpContext = pnode->nlp_rpi;
+ if (sli4)
+ piocbq->iocb.ulpContext =
+ phba->sli4_hba.rpi_ids[pnode->nlp_rpi];
+ if (pnode->nlp_fcp_info & NLP_FCP_2_DEVICE)
+ piocbq->iocb.ulpFCP2Rcvy = 1;
+ else
+ piocbq->iocb.ulpFCP2Rcvy = 0;
+
+ piocbq->iocb.ulpClass = (pnode->nlp_fcp_info & 0x0f);
+ piocbq->context1 = lpfc_cmd;
+ piocbq->iocb_cmpl = lpfc_scsi_cmd_iocb_cmpl;
+ piocbq->iocb.ulpTimeout = lpfc_cmd->timeout;
+ piocbq->vport = vport;
+}
+
+/**
+ * lpfc_scsi_prep_task_mgmt_cmd - Convert SLI3 scsi TM cmd to FCP info unit
+ * @vport: The virtual port for which this call is being executed.
+ * @lpfc_cmd: Pointer to lpfc_scsi_buf data structure.
+ * @lun: Logical unit number.
+ * @task_mgmt_cmd: SCSI task management command.
+ *
+ * This routine creates FCP information unit corresponding to @task_mgmt_cmd
+ * for device with SLI-3 interface spec.
+ *
+ * Return codes:
+ * 0 - Error
+ * 1 - Success
+ **/
+static int
+lpfc_scsi_prep_task_mgmt_cmd(struct lpfc_vport *vport,
+ struct lpfc_scsi_buf *lpfc_cmd,
+ uint64_t lun,
+ uint8_t task_mgmt_cmd)
+{
+ struct lpfc_iocbq *piocbq;
+ IOCB_t *piocb;
+ struct fcp_cmnd *fcp_cmnd;
+ struct lpfc_rport_data *rdata = lpfc_cmd->rdata;
+ struct lpfc_nodelist *ndlp = rdata->pnode;
+
+ if (!ndlp || !NLP_CHK_NODE_ACT(ndlp) ||
+ ndlp->nlp_state != NLP_STE_MAPPED_NODE)
+ return 0;
+
+ piocbq = &(lpfc_cmd->cur_iocbq);
+ piocbq->vport = vport;
+
+ piocb = &piocbq->iocb;
+
+ fcp_cmnd = lpfc_cmd->fcp_cmnd;
+ /* Clear out any old data in the FCP command area */
+ memset(fcp_cmnd, 0, sizeof(struct fcp_cmnd));
+ int_to_scsilun(lun, &fcp_cmnd->fcp_lun);
+ fcp_cmnd->fcpCntl2 = task_mgmt_cmd;
+ if (vport->phba->sli_rev == 3 &&
+ !(vport->phba->sli3_options & LPFC_SLI3_BG_ENABLED))
+ lpfc_fcpcmd_to_iocb(piocb->unsli3.fcp_ext.icd, fcp_cmnd);
+ piocb->ulpCommand = CMD_FCP_ICMND64_CR;
+ piocb->ulpContext = ndlp->nlp_rpi;
+ if (vport->phba->sli_rev == LPFC_SLI_REV4) {
+ piocb->ulpContext =
+ vport->phba->sli4_hba.rpi_ids[ndlp->nlp_rpi];
+ }
+ piocb->ulpFCP2Rcvy = (ndlp->nlp_fcp_info & NLP_FCP_2_DEVICE) ? 1 : 0;
+ piocb->ulpClass = (ndlp->nlp_fcp_info & 0x0f);
+ piocb->ulpPU = 0;
+ piocb->un.fcpi.fcpi_parm = 0;
+
+ /* ulpTimeout is only one byte */
+ if (lpfc_cmd->timeout > 0xff) {
+ /*
+ * Do not timeout the command at the firmware level.
+ * The driver will provide the timeout mechanism.
+ */
+ piocb->ulpTimeout = 0;
+ } else
+ piocb->ulpTimeout = lpfc_cmd->timeout;
+
+ if (vport->phba->sli_rev == LPFC_SLI_REV4)
+ lpfc_sli4_set_rsp_sgl_last(vport->phba, lpfc_cmd);
+
+ return 1;
+}
+
+/**
+ * lpfc_scsi_api_table_setup - Set up scsi api function jump table
+ * @phba: The hba struct for which this call is being executed.
+ * @dev_grp: The HBA PCI-Device group number.
+ *
+ * This routine sets up the SCSI interface API function jump table in @phba
+ * struct.
+ * Returns: 0 - success, -ENODEV - failure.
+ **/
+int
+lpfc_scsi_api_table_setup(struct lpfc_hba *phba, uint8_t dev_grp)
+{
+
+ phba->lpfc_scsi_unprep_dma_buf = lpfc_scsi_unprep_dma_buf;
+ phba->lpfc_scsi_prep_cmnd = lpfc_scsi_prep_cmnd;
+
+ switch (dev_grp) {
+ case LPFC_PCI_DEV_LP:
+ phba->lpfc_new_scsi_buf = lpfc_new_scsi_buf_s3;
+ phba->lpfc_scsi_prep_dma_buf = lpfc_scsi_prep_dma_buf_s3;
+ phba->lpfc_bg_scsi_prep_dma_buf = lpfc_bg_scsi_prep_dma_buf_s3;
+ phba->lpfc_release_scsi_buf = lpfc_release_scsi_buf_s3;
+ phba->lpfc_get_scsi_buf = lpfc_get_scsi_buf_s3;
+ break;
+ case LPFC_PCI_DEV_OC:
+ phba->lpfc_new_scsi_buf = lpfc_new_scsi_buf_s4;
+ phba->lpfc_scsi_prep_dma_buf = lpfc_scsi_prep_dma_buf_s4;
+ phba->lpfc_bg_scsi_prep_dma_buf = lpfc_bg_scsi_prep_dma_buf_s4;
+ phba->lpfc_release_scsi_buf = lpfc_release_scsi_buf_s4;
+ phba->lpfc_get_scsi_buf = lpfc_get_scsi_buf_s4;
+ break;
+ default:
+ lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+ "1418 Invalid HBA PCI-device group: 0x%x\n",
+ dev_grp);
+ return -ENODEV;
+ break;
+ }
+ phba->lpfc_rampdown_queue_depth = lpfc_rampdown_queue_depth;
+ phba->lpfc_scsi_cmd_iocb_cmpl = lpfc_scsi_cmd_iocb_cmpl;
+ return 0;
+}
+
+/**
+ * lpfc_taskmgmt_def_cmpl - IOCB completion routine for task management command
+ * @phba: The Hba for which this call is being executed.
+ * @cmdiocbq: Pointer to lpfc_iocbq data structure.
+ * @rspiocbq: Pointer to lpfc_iocbq data structure.
+ *
+ * This routine is IOCB completion routine for device reset and target reset
+ * routine. This routine release scsi buffer associated with lpfc_cmd.
+ **/
+static void
+lpfc_tskmgmt_def_cmpl(struct lpfc_hba *phba,
+ struct lpfc_iocbq *cmdiocbq,
+ struct lpfc_iocbq *rspiocbq)
+{
+ struct lpfc_scsi_buf *lpfc_cmd =
+ (struct lpfc_scsi_buf *) cmdiocbq->context1;
+ if (lpfc_cmd)
+ lpfc_release_scsi_buf(phba, lpfc_cmd);
+ return;
+}
+
+/**
+ * lpfc_info - Info entry point of scsi_host_template data structure
+ * @host: The scsi host for which this call is being executed.
+ *
+ * This routine provides module information about hba.
+ *
+ * Reutrn code:
+ * Pointer to char - Success.
+ **/
+const char *
+lpfc_info(struct Scsi_Host *host)
+{
+ struct lpfc_vport *vport = (struct lpfc_vport *) host->hostdata;
+ struct lpfc_hba *phba = vport->phba;
+ int len, link_speed = 0;
+ static char lpfcinfobuf[384];
+
+ memset(lpfcinfobuf,0,384);
+ if (phba && phba->pcidev){
+ strncpy(lpfcinfobuf, phba->ModelDesc, 256);
+ len = strlen(lpfcinfobuf);
+ snprintf(lpfcinfobuf + len,
+ 384-len,
+ " on PCI bus %02x device %02x irq %d",
+ phba->pcidev->bus->number,
+ phba->pcidev->devfn,
+ phba->pcidev->irq);
+ len = strlen(lpfcinfobuf);
+ if (phba->Port[0]) {
+ snprintf(lpfcinfobuf + len,
+ 384-len,
+ " port %s",
+ phba->Port);
+ }
+ len = strlen(lpfcinfobuf);
+ if (phba->sli_rev <= LPFC_SLI_REV3) {
+ link_speed = lpfc_sli_port_speed_get(phba);
+ } else {
+ if (phba->sli4_hba.link_state.logical_speed)
+ link_speed =
+ phba->sli4_hba.link_state.logical_speed;
+ else
+ link_speed = phba->sli4_hba.link_state.speed;
+ }
+ if (link_speed != 0)
+ snprintf(lpfcinfobuf + len, 384-len,
+ " Logical Link Speed: %d Mbps", link_speed);
+ }
+ return lpfcinfobuf;
+}
+
+/**
+ * lpfc_poll_rearm_time - Routine to modify fcp_poll timer of hba
+ * @phba: The Hba for which this call is being executed.
+ *
+ * This routine modifies fcp_poll_timer field of @phba by cfg_poll_tmo.
+ * The default value of cfg_poll_tmo is 10 milliseconds.
+ **/
+static __inline__ void lpfc_poll_rearm_timer(struct lpfc_hba * phba)
+{
+ unsigned long poll_tmo_expires =
+ (jiffies + msecs_to_jiffies(phba->cfg_poll_tmo));
+
+ if (!list_empty(&phba->sli.ring[LPFC_FCP_RING].txcmplq))
+ mod_timer(&phba->fcp_poll_timer,
+ poll_tmo_expires);
+}
+
+/**
+ * lpfc_poll_start_timer - Routine to start fcp_poll_timer of HBA
+ * @phba: The Hba for which this call is being executed.
+ *
+ * This routine starts the fcp_poll_timer of @phba.
+ **/
+void lpfc_poll_start_timer(struct lpfc_hba * phba)
+{
+ lpfc_poll_rearm_timer(phba);
+}
+
+/**
+ * lpfc_poll_timeout - Restart polling timer
+ * @ptr: Map to lpfc_hba data structure pointer.
+ *
+ * This routine restarts fcp_poll timer, when FCP ring polling is enable
+ * and FCP Ring interrupt is disable.
+ **/
+
+void lpfc_poll_timeout(unsigned long ptr)
+{
+ struct lpfc_hba *phba = (struct lpfc_hba *) ptr;
+
+ if (phba->cfg_poll & ENABLE_FCP_RING_POLLING) {
+ lpfc_sli_handle_fast_ring_event(phba,
+ &phba->sli.ring[LPFC_FCP_RING], HA_R0RE_REQ);
+
+ if (phba->cfg_poll & DISABLE_FCP_RING_INT)
+ lpfc_poll_rearm_timer(phba);
+ }
+}
+
+/**
+ * lpfc_queuecommand - scsi_host_template queuecommand entry point
+ * @cmnd: Pointer to scsi_cmnd data structure.
+ * @done: Pointer to done routine.
+ *
+ * Driver registers this routine to scsi midlayer to submit a @cmd to process.
+ * This routine prepares an IOCB from scsi command and provides to firmware.
+ * The @done callback is invoked after driver finished processing the command.
+ *
+ * Return value :
+ * 0 - Success
+ * SCSI_MLQUEUE_HOST_BUSY - Block all devices served by this host temporarily.
+ **/
+static int
+lpfc_queuecommand(struct Scsi_Host *shost, struct scsi_cmnd *cmnd)
+{
+ struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
+ struct lpfc_hba *phba = vport->phba;
+ struct lpfc_rport_data *rdata;
+ struct lpfc_nodelist *ndlp;
+ struct lpfc_scsi_buf *lpfc_cmd;
+ struct fc_rport *rport = starget_to_rport(scsi_target(cmnd->device));
+ int err;
+
+ rdata = lpfc_rport_data_from_scsi_device(cmnd->device);
+ err = fc_remote_port_chkready(rport);
+ if (err) {
+ cmnd->result = err;
+ goto out_fail_command;
+ }
+ ndlp = rdata->pnode;
+
+ if ((scsi_get_prot_op(cmnd) != SCSI_PROT_NORMAL) &&
+ (!(phba->sli3_options & LPFC_SLI3_BG_ENABLED))) {
+
+ lpfc_printf_log(phba, KERN_ERR, LOG_BG,
+ "9058 BLKGRD: ERROR: rcvd protected cmd:%02x"
+ " op:%02x str=%s without registering for"
+ " BlockGuard - Rejecting command\n",
+ cmnd->cmnd[0], scsi_get_prot_op(cmnd),
+ dif_op_str[scsi_get_prot_op(cmnd)]);
+ goto out_fail_command;
+ }
+
+ /*
+ * Catch race where our node has transitioned, but the
+ * transport is still transitioning.
+ */
+ if (!ndlp || !NLP_CHK_NODE_ACT(ndlp))
+ goto out_tgt_busy;
+ if (atomic_read(&ndlp->cmd_pending) >= ndlp->cmd_qdepth)
+ goto out_tgt_busy;
+
+ lpfc_cmd = lpfc_get_scsi_buf(phba, ndlp);
+ if (lpfc_cmd == NULL) {
+ lpfc_rampdown_queue_depth(phba);
+
+ lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP,
+ "0707 driver's buffer pool is empty, "
+ "IO busied\n");
+ goto out_host_busy;
+ }
+
+ /*
+ * Store the midlayer's command structure for the completion phase
+ * and complete the command initialization.
+ */
+ lpfc_cmd->pCmd = cmnd;
+ lpfc_cmd->rdata = rdata;
+ lpfc_cmd->timeout = 0;
+ lpfc_cmd->start_time = jiffies;
+ cmnd->host_scribble = (unsigned char *)lpfc_cmd;
+
+ if (scsi_get_prot_op(cmnd) != SCSI_PROT_NORMAL) {
+ if (vport->phba->cfg_enable_bg) {
+ lpfc_printf_vlog(vport,
+ KERN_INFO, LOG_SCSI_CMD,
+ "9033 BLKGRD: rcvd %s cmd:x%x "
+ "sector x%llx cnt %u pt %x\n",
+ dif_op_str[scsi_get_prot_op(cmnd)],
+ cmnd->cmnd[0],
+ (unsigned long long)scsi_get_lba(cmnd),
+ blk_rq_sectors(cmnd->request),
+ (cmnd->cmnd[1]>>5));
+ }
+ err = lpfc_bg_scsi_prep_dma_buf(phba, lpfc_cmd);
+ } else {
+ if (vport->phba->cfg_enable_bg) {
+ lpfc_printf_vlog(vport,
+ KERN_INFO, LOG_SCSI_CMD,
+ "9038 BLKGRD: rcvd PROT_NORMAL cmd: "
+ "x%x sector x%llx cnt %u pt %x\n",
+ cmnd->cmnd[0],
+ (unsigned long long)scsi_get_lba(cmnd),
+ blk_rq_sectors(cmnd->request),
+ (cmnd->cmnd[1]>>5));
+ }
+ err = lpfc_scsi_prep_dma_buf(phba, lpfc_cmd);
+ }
+
+ if (err)
+ goto out_host_busy_free_buf;
+
+ lpfc_scsi_prep_cmnd(vport, lpfc_cmd, ndlp);
+
+ atomic_inc(&ndlp->cmd_pending);
+ err = lpfc_sli_issue_iocb(phba, LPFC_FCP_RING,
+ &lpfc_cmd->cur_iocbq, SLI_IOCB_RET_IOCB);
+ if (err) {
+ atomic_dec(&ndlp->cmd_pending);
+ lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP,
+ "3376 FCP could not issue IOCB err %x"
+ "FCP cmd x%x <%d/%llu> "
+ "sid: x%x did: x%x oxid: x%x "
+ "Data: x%x x%x x%x x%x\n",
+ err, cmnd->cmnd[0],
+ cmnd->device ? cmnd->device->id : 0xffff,
+ cmnd->device ? cmnd->device->lun : (u64) -1,
+ vport->fc_myDID, ndlp->nlp_DID,
+ phba->sli_rev == LPFC_SLI_REV4 ?
+ lpfc_cmd->cur_iocbq.sli4_xritag : 0xffff,
+ lpfc_cmd->cur_iocbq.iocb.ulpContext,
+ lpfc_cmd->cur_iocbq.iocb.ulpIoTag,
+ lpfc_cmd->cur_iocbq.iocb.ulpTimeout,
+ (uint32_t)
+ (cmnd->request->timeout / 1000));
+
+
+ goto out_host_busy_free_buf;
+ }
+ if (phba->cfg_poll & ENABLE_FCP_RING_POLLING) {
+ lpfc_sli_handle_fast_ring_event(phba,
+ &phba->sli.ring[LPFC_FCP_RING], HA_R0RE_REQ);
+
+ if (phba->cfg_poll & DISABLE_FCP_RING_INT)
+ lpfc_poll_rearm_timer(phba);
+ }
+
+ return 0;
+
+ out_host_busy_free_buf:
+ lpfc_scsi_unprep_dma_buf(phba, lpfc_cmd);
+ lpfc_release_scsi_buf(phba, lpfc_cmd);
+ out_host_busy:
+ return SCSI_MLQUEUE_HOST_BUSY;
+
+ out_tgt_busy:
+ return SCSI_MLQUEUE_TARGET_BUSY;
+
+ out_fail_command:
+ cmnd->scsi_done(cmnd);
+ return 0;
+}
+
+
+/**
+ * lpfc_abort_handler - scsi_host_template eh_abort_handler entry point
+ * @cmnd: Pointer to scsi_cmnd data structure.
+ *
+ * This routine aborts @cmnd pending in base driver.
+ *
+ * Return code :
+ * 0x2003 - Error
+ * 0x2002 - Success
+ **/
+static int
+lpfc_abort_handler(struct scsi_cmnd *cmnd)
+{
+ struct Scsi_Host *shost = cmnd->device->host;
+ struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
+ struct lpfc_hba *phba = vport->phba;
+ struct lpfc_iocbq *iocb;
+ struct lpfc_iocbq *abtsiocb;
+ struct lpfc_scsi_buf *lpfc_cmd;
+ IOCB_t *cmd, *icmd;
+ int ret = SUCCESS, status = 0;
+ struct lpfc_sli_ring *pring_s4;
+ int ring_number, ret_val;
+ unsigned long flags, iflags;
+ DECLARE_WAIT_QUEUE_HEAD_ONSTACK(waitq);
+
+ status = fc_block_scsi_eh(cmnd);
+ if (status != 0 && status != SUCCESS)
+ return status;
+
+ spin_lock_irqsave(&phba->hbalock, flags);
+ /* driver queued commands are in process of being flushed */
+ if (phba->hba_flag & HBA_FCP_IOQ_FLUSH) {
+ spin_unlock_irqrestore(&phba->hbalock, flags);
+ lpfc_printf_vlog(vport, KERN_WARNING, LOG_FCP,
+ "3168 SCSI Layer abort requested I/O has been "
+ "flushed by LLD.\n");
+ return FAILED;
+ }
+
+ lpfc_cmd = (struct lpfc_scsi_buf *)cmnd->host_scribble;
+ if (!lpfc_cmd || !lpfc_cmd->pCmd) {
+ spin_unlock_irqrestore(&phba->hbalock, flags);
+ lpfc_printf_vlog(vport, KERN_WARNING, LOG_FCP,
+ "2873 SCSI Layer I/O Abort Request IO CMPL Status "
+ "x%x ID %d LUN %llu\n",
+ SUCCESS, cmnd->device->id, cmnd->device->lun);
+ return SUCCESS;
+ }
+
+ iocb = &lpfc_cmd->cur_iocbq;
+ /* the command is in process of being cancelled */
+ if (!(iocb->iocb_flag & LPFC_IO_ON_TXCMPLQ)) {
+ spin_unlock_irqrestore(&phba->hbalock, flags);
+ lpfc_printf_vlog(vport, KERN_WARNING, LOG_FCP,
+ "3169 SCSI Layer abort requested I/O has been "
+ "cancelled by LLD.\n");
+ return FAILED;
+ }
+ /*
+ * If pCmd field of the corresponding lpfc_scsi_buf structure
+ * points to a different SCSI command, then the driver has
+ * already completed this command, but the midlayer did not
+ * see the completion before the eh fired. Just return SUCCESS.
+ */
+ if (lpfc_cmd->pCmd != cmnd) {
+ lpfc_printf_vlog(vport, KERN_WARNING, LOG_FCP,
+ "3170 SCSI Layer abort requested I/O has been "
+ "completed by LLD.\n");
+ goto out_unlock;
+ }
+
+ BUG_ON(iocb->context1 != lpfc_cmd);
+
+ /* abort issued in recovery is still in progress */
+ if (iocb->iocb_flag & LPFC_DRIVER_ABORTED) {
+ lpfc_printf_vlog(vport, KERN_WARNING, LOG_FCP,
+ "3389 SCSI Layer I/O Abort Request is pending\n");
+ spin_unlock_irqrestore(&phba->hbalock, flags);
+ goto wait_for_cmpl;
+ }
+
+ abtsiocb = __lpfc_sli_get_iocbq(phba);
+ if (abtsiocb == NULL) {
+ ret = FAILED;
+ goto out_unlock;
+ }
+
+ /* Indicate the IO is being aborted by the driver. */
+ iocb->iocb_flag |= LPFC_DRIVER_ABORTED;
+
+ /*
+ * The scsi command can not be in txq and it is in flight because the
+ * pCmd is still pointig at the SCSI command we have to abort. There
+ * is no need to search the txcmplq. Just send an abort to the FW.
+ */
+
+ cmd = &iocb->iocb;
+ icmd = &abtsiocb->iocb;
+ icmd->un.acxri.abortType = ABORT_TYPE_ABTS;
+ icmd->un.acxri.abortContextTag = cmd->ulpContext;
+ if (phba->sli_rev == LPFC_SLI_REV4)
+ icmd->un.acxri.abortIoTag = iocb->sli4_xritag;
+ else
+ icmd->un.acxri.abortIoTag = cmd->ulpIoTag;
+
+ icmd->ulpLe = 1;
+ icmd->ulpClass = cmd->ulpClass;
+
+ /* ABTS WQE must go to the same WQ as the WQE to be aborted */
+ abtsiocb->fcp_wqidx = iocb->fcp_wqidx;
+ abtsiocb->iocb_flag |= LPFC_USE_FCPWQIDX;
+ if (iocb->iocb_flag & LPFC_IO_FOF)
+ abtsiocb->iocb_flag |= LPFC_IO_FOF;
+
+ if (lpfc_is_link_up(phba))
+ icmd->ulpCommand = CMD_ABORT_XRI_CN;
+ else
+ icmd->ulpCommand = CMD_CLOSE_XRI_CN;
+
+ abtsiocb->iocb_cmpl = lpfc_sli_abort_fcp_cmpl;
+ abtsiocb->vport = vport;
+ if (phba->sli_rev == LPFC_SLI_REV4) {
+ ring_number = MAX_SLI3_CONFIGURED_RINGS + iocb->fcp_wqidx;
+ pring_s4 = &phba->sli.ring[ring_number];
+ /* Note: both hbalock and ring_lock must be set here */
+ spin_lock_irqsave(&pring_s4->ring_lock, iflags);
+ ret_val = __lpfc_sli_issue_iocb(phba, pring_s4->ringno,
+ abtsiocb, 0);
+ spin_unlock_irqrestore(&pring_s4->ring_lock, iflags);
+ } else {
+ ret_val = __lpfc_sli_issue_iocb(phba, LPFC_FCP_RING,
+ abtsiocb, 0);
+ }
+ /* no longer need the lock after this point */
+ spin_unlock_irqrestore(&phba->hbalock, flags);
+
+
+ if (ret_val == IOCB_ERROR) {
+ lpfc_sli_release_iocbq(phba, abtsiocb);
+ ret = FAILED;
+ goto out;
+ }
+
+ if (phba->cfg_poll & DISABLE_FCP_RING_INT)
+ lpfc_sli_handle_fast_ring_event(phba,
+ &phba->sli.ring[LPFC_FCP_RING], HA_R0RE_REQ);
+
+wait_for_cmpl:
+ lpfc_cmd->waitq = &waitq;
+ /* Wait for abort to complete */
+ wait_event_timeout(waitq,
+ (lpfc_cmd->pCmd != cmnd),
+ msecs_to_jiffies(2*vport->cfg_devloss_tmo*1000));
+
+ spin_lock_irqsave(shost->host_lock, flags);
+ lpfc_cmd->waitq = NULL;
+ spin_unlock_irqrestore(shost->host_lock, flags);
+
+ if (lpfc_cmd->pCmd == cmnd) {
+ ret = FAILED;
+ lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP,
+ "0748 abort handler timed out waiting "
+ "for abortng I/O (xri:x%x) to complete: "
+ "ret %#x, ID %d, LUN %llu\n",
+ iocb->sli4_xritag, ret,
+ cmnd->device->id, cmnd->device->lun);
+ }
+ goto out;
+
+out_unlock:
+ spin_unlock_irqrestore(&phba->hbalock, flags);
+out:
+ lpfc_printf_vlog(vport, KERN_WARNING, LOG_FCP,
+ "0749 SCSI Layer I/O Abort Request Status x%x ID %d "
+ "LUN %llu\n", ret, cmnd->device->id,
+ cmnd->device->lun);
+ return ret;
+}
+
+static char *
+lpfc_taskmgmt_name(uint8_t task_mgmt_cmd)
+{
+ switch (task_mgmt_cmd) {
+ case FCP_ABORT_TASK_SET:
+ return "ABORT_TASK_SET";
+ case FCP_CLEAR_TASK_SET:
+ return "FCP_CLEAR_TASK_SET";
+ case FCP_BUS_RESET:
+ return "FCP_BUS_RESET";
+ case FCP_LUN_RESET:
+ return "FCP_LUN_RESET";
+ case FCP_TARGET_RESET:
+ return "FCP_TARGET_RESET";
+ case FCP_CLEAR_ACA:
+ return "FCP_CLEAR_ACA";
+ case FCP_TERMINATE_TASK:
+ return "FCP_TERMINATE_TASK";
+ default:
+ return "unknown";
+ }
+}
+
+
+/**
+ * lpfc_check_fcp_rsp - check the returned fcp_rsp to see if task failed
+ * @vport: The virtual port for which this call is being executed.
+ * @lpfc_cmd: Pointer to lpfc_scsi_buf data structure.
+ *
+ * This routine checks the FCP RSP INFO to see if the tsk mgmt command succeded
+ *
+ * Return code :
+ * 0x2003 - Error
+ * 0x2002 - Success
+ **/
+static int
+lpfc_check_fcp_rsp(struct lpfc_vport *vport, struct lpfc_scsi_buf *lpfc_cmd)
+{
+ struct fcp_rsp *fcprsp = lpfc_cmd->fcp_rsp;
+ uint32_t rsp_info;
+ uint32_t rsp_len;
+ uint8_t rsp_info_code;
+ int ret = FAILED;
+
+
+ if (fcprsp == NULL)
+ lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP,
+ "0703 fcp_rsp is missing\n");
+ else {
+ rsp_info = fcprsp->rspStatus2;
+ rsp_len = be32_to_cpu(fcprsp->rspRspLen);
+ rsp_info_code = fcprsp->rspInfo3;
+
+
+ lpfc_printf_vlog(vport, KERN_INFO,
+ LOG_FCP,
+ "0706 fcp_rsp valid 0x%x,"
+ " rsp len=%d code 0x%x\n",
+ rsp_info,
+ rsp_len, rsp_info_code);
+
+ if ((fcprsp->rspStatus2&RSP_LEN_VALID) && (rsp_len == 8)) {
+ switch (rsp_info_code) {
+ case RSP_NO_FAILURE:
+ lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP,
+ "0715 Task Mgmt No Failure\n");
+ ret = SUCCESS;
+ break;
+ case RSP_TM_NOT_SUPPORTED: /* TM rejected */
+ lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP,
+ "0716 Task Mgmt Target "
+ "reject\n");
+ break;
+ case RSP_TM_NOT_COMPLETED: /* TM failed */
+ lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP,
+ "0717 Task Mgmt Target "
+ "failed TM\n");
+ break;
+ case RSP_TM_INVALID_LU: /* TM to invalid LU! */
+ lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP,
+ "0718 Task Mgmt to invalid "
+ "LUN\n");
+ break;
+ }
+ }
+ }
+ return ret;
+}
+
+
+/**
+ * lpfc_send_taskmgmt - Generic SCSI Task Mgmt Handler
+ * @vport: The virtual port for which this call is being executed.
+ * @rdata: Pointer to remote port local data
+ * @tgt_id: Target ID of remote device.
+ * @lun_id: Lun number for the TMF
+ * @task_mgmt_cmd: type of TMF to send
+ *
+ * This routine builds and sends a TMF (SCSI Task Mgmt Function) to
+ * a remote port.
+ *
+ * Return Code:
+ * 0x2003 - Error
+ * 0x2002 - Success.
+ **/
+static int
+lpfc_send_taskmgmt(struct lpfc_vport *vport, struct lpfc_rport_data *rdata,
+ unsigned tgt_id, uint64_t lun_id,
+ uint8_t task_mgmt_cmd)
+{
+ struct lpfc_hba *phba = vport->phba;
+ struct lpfc_scsi_buf *lpfc_cmd;
+ struct lpfc_iocbq *iocbq;
+ struct lpfc_iocbq *iocbqrsp;
+ struct lpfc_nodelist *pnode = rdata->pnode;
+ int ret;
+ int status;
+
+ if (!pnode || !NLP_CHK_NODE_ACT(pnode))
+ return FAILED;
+
+ lpfc_cmd = lpfc_get_scsi_buf(phba, rdata->pnode);
+ if (lpfc_cmd == NULL)
+ return FAILED;
+ lpfc_cmd->timeout = phba->cfg_task_mgmt_tmo;
+ lpfc_cmd->rdata = rdata;
+
+ status = lpfc_scsi_prep_task_mgmt_cmd(vport, lpfc_cmd, lun_id,
+ task_mgmt_cmd);
+ if (!status) {
+ lpfc_release_scsi_buf(phba, lpfc_cmd);
+ return FAILED;
+ }
+
+ iocbq = &lpfc_cmd->cur_iocbq;
+ iocbqrsp = lpfc_sli_get_iocbq(phba);
+ if (iocbqrsp == NULL) {
+ lpfc_release_scsi_buf(phba, lpfc_cmd);
+ return FAILED;
+ }
+ iocbq->iocb_cmpl = lpfc_tskmgmt_def_cmpl;
+
+ lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP,
+ "0702 Issue %s to TGT %d LUN %llu "
+ "rpi x%x nlp_flag x%x Data: x%x x%x\n",
+ lpfc_taskmgmt_name(task_mgmt_cmd), tgt_id, lun_id,
+ pnode->nlp_rpi, pnode->nlp_flag, iocbq->sli4_xritag,
+ iocbq->iocb_flag);
+
+ status = lpfc_sli_issue_iocb_wait(phba, LPFC_FCP_RING,
+ iocbq, iocbqrsp, lpfc_cmd->timeout);
+ if ((status != IOCB_SUCCESS) ||
+ (iocbqrsp->iocb.ulpStatus != IOSTAT_SUCCESS)) {
+ lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP,
+ "0727 TMF %s to TGT %d LUN %llu failed (%d, %d) "
+ "iocb_flag x%x\n",
+ lpfc_taskmgmt_name(task_mgmt_cmd),
+ tgt_id, lun_id, iocbqrsp->iocb.ulpStatus,
+ iocbqrsp->iocb.un.ulpWord[4],
+ iocbq->iocb_flag);
+ /* if ulpStatus != IOCB_SUCCESS, then status == IOCB_SUCCESS */
+ if (status == IOCB_SUCCESS) {
+ if (iocbqrsp->iocb.ulpStatus == IOSTAT_FCP_RSP_ERROR)
+ /* Something in the FCP_RSP was invalid.
+ * Check conditions */
+ ret = lpfc_check_fcp_rsp(vport, lpfc_cmd);
+ else
+ ret = FAILED;
+ } else if (status == IOCB_TIMEDOUT) {
+ ret = TIMEOUT_ERROR;
+ } else {
+ ret = FAILED;
+ }
+ lpfc_cmd->status = IOSTAT_DRIVER_REJECT;
+ } else
+ ret = SUCCESS;
+
+ lpfc_sli_release_iocbq(phba, iocbqrsp);
+
+ if (ret != TIMEOUT_ERROR)
+ lpfc_release_scsi_buf(phba, lpfc_cmd);
+
+ return ret;
+}
+
+/**
+ * lpfc_chk_tgt_mapped -
+ * @vport: The virtual port to check on
+ * @cmnd: Pointer to scsi_cmnd data structure.
+ *
+ * This routine delays until the scsi target (aka rport) for the
+ * command exists (is present and logged in) or we declare it non-existent.
+ *
+ * Return code :
+ * 0x2003 - Error
+ * 0x2002 - Success
+ **/
+static int
+lpfc_chk_tgt_mapped(struct lpfc_vport *vport, struct scsi_cmnd *cmnd)
+{
+ struct lpfc_rport_data *rdata;
+ struct lpfc_nodelist *pnode;
+ unsigned long later;
+
+ rdata = lpfc_rport_data_from_scsi_device(cmnd->device);
+ if (!rdata) {
+ lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP,
+ "0797 Tgt Map rport failure: rdata x%p\n", rdata);
+ return FAILED;
+ }
+ pnode = rdata->pnode;
+ /*
+ * If target is not in a MAPPED state, delay until
+ * target is rediscovered or devloss timeout expires.
+ */
+ later = msecs_to_jiffies(2 * vport->cfg_devloss_tmo * 1000) + jiffies;
+ while (time_after(later, jiffies)) {
+ if (!pnode || !NLP_CHK_NODE_ACT(pnode))
+ return FAILED;
+ if (pnode->nlp_state == NLP_STE_MAPPED_NODE)
+ return SUCCESS;
+ schedule_timeout_uninterruptible(msecs_to_jiffies(500));
+ rdata = lpfc_rport_data_from_scsi_device(cmnd->device);
+ if (!rdata)
+ return FAILED;
+ pnode = rdata->pnode;
+ }
+ if (!pnode || !NLP_CHK_NODE_ACT(pnode) ||
+ (pnode->nlp_state != NLP_STE_MAPPED_NODE))
+ return FAILED;
+ return SUCCESS;
+}
+
+/**
+ * lpfc_reset_flush_io_context -
+ * @vport: The virtual port (scsi_host) for the flush context
+ * @tgt_id: If aborting by Target contect - specifies the target id
+ * @lun_id: If aborting by Lun context - specifies the lun id
+ * @context: specifies the context level to flush at.
+ *
+ * After a reset condition via TMF, we need to flush orphaned i/o
+ * contexts from the adapter. This routine aborts any contexts
+ * outstanding, then waits for their completions. The wait is
+ * bounded by devloss_tmo though.
+ *
+ * Return code :
+ * 0x2003 - Error
+ * 0x2002 - Success
+ **/
+static int
+lpfc_reset_flush_io_context(struct lpfc_vport *vport, uint16_t tgt_id,
+ uint64_t lun_id, lpfc_ctx_cmd context)
+{
+ struct lpfc_hba *phba = vport->phba;
+ unsigned long later;
+ int cnt;
+
+ cnt = lpfc_sli_sum_iocb(vport, tgt_id, lun_id, context);
+ if (cnt)
+ lpfc_sli_abort_taskmgmt(vport,
+ &phba->sli.ring[phba->sli.fcp_ring],
+ tgt_id, lun_id, context);
+ later = msecs_to_jiffies(2 * vport->cfg_devloss_tmo * 1000) + jiffies;
+ while (time_after(later, jiffies) && cnt) {
+ schedule_timeout_uninterruptible(msecs_to_jiffies(20));
+ cnt = lpfc_sli_sum_iocb(vport, tgt_id, lun_id, context);
+ }
+ if (cnt) {
+ lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP,
+ "0724 I/O flush failure for context %s : cnt x%x\n",
+ ((context == LPFC_CTX_LUN) ? "LUN" :
+ ((context == LPFC_CTX_TGT) ? "TGT" :
+ ((context == LPFC_CTX_HOST) ? "HOST" : "Unknown"))),
+ cnt);
+ return FAILED;
+ }
+ return SUCCESS;
+}
+
+/**
+ * lpfc_device_reset_handler - scsi_host_template eh_device_reset entry point
+ * @cmnd: Pointer to scsi_cmnd data structure.
+ *
+ * This routine does a device reset by sending a LUN_RESET task management
+ * command.
+ *
+ * Return code :
+ * 0x2003 - Error
+ * 0x2002 - Success
+ **/
+static int
+lpfc_device_reset_handler(struct scsi_cmnd *cmnd)
+{
+ struct Scsi_Host *shost = cmnd->device->host;
+ struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
+ struct lpfc_rport_data *rdata;
+ struct lpfc_nodelist *pnode;
+ unsigned tgt_id = cmnd->device->id;
+ uint64_t lun_id = cmnd->device->lun;
+ struct lpfc_scsi_event_header scsi_event;
+ int status;
+
+ rdata = lpfc_rport_data_from_scsi_device(cmnd->device);
+ if (!rdata || !rdata->pnode) {
+ lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP,
+ "0798 Device Reset rport failure: rdata x%p\n",
+ rdata);
+ return FAILED;
+ }
+ pnode = rdata->pnode;
+ status = fc_block_scsi_eh(cmnd);
+ if (status != 0 && status != SUCCESS)
+ return status;
+
+ status = lpfc_chk_tgt_mapped(vport, cmnd);
+ if (status == FAILED) {
+ lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP,
+ "0721 Device Reset rport failure: rdata x%p\n", rdata);
+ return FAILED;
+ }
+
+ scsi_event.event_type = FC_REG_SCSI_EVENT;
+ scsi_event.subcategory = LPFC_EVENT_LUNRESET;
+ scsi_event.lun = lun_id;
+ memcpy(scsi_event.wwpn, &pnode->nlp_portname, sizeof(struct lpfc_name));
+ memcpy(scsi_event.wwnn, &pnode->nlp_nodename, sizeof(struct lpfc_name));
+
+ fc_host_post_vendor_event(shost, fc_get_event_number(),
+ sizeof(scsi_event), (char *)&scsi_event, LPFC_NL_VENDOR_ID);
+
+ status = lpfc_send_taskmgmt(vport, rdata, tgt_id, lun_id,
+ FCP_LUN_RESET);
+
+ lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP,
+ "0713 SCSI layer issued Device Reset (%d, %llu) "
+ "return x%x\n", tgt_id, lun_id, status);
+
+ /*
+ * We have to clean up i/o as : they may be orphaned by the TMF;
+ * or if the TMF failed, they may be in an indeterminate state.
+ * So, continue on.
+ * We will report success if all the i/o aborts successfully.
+ */
+ if (status == SUCCESS)
+ status = lpfc_reset_flush_io_context(vport, tgt_id, lun_id,
+ LPFC_CTX_LUN);
+
+ return status;
+}
+
+/**
+ * lpfc_target_reset_handler - scsi_host_template eh_target_reset entry point
+ * @cmnd: Pointer to scsi_cmnd data structure.
+ *
+ * This routine does a target reset by sending a TARGET_RESET task management
+ * command.
+ *
+ * Return code :
+ * 0x2003 - Error
+ * 0x2002 - Success
+ **/
+static int
+lpfc_target_reset_handler(struct scsi_cmnd *cmnd)
+{
+ struct Scsi_Host *shost = cmnd->device->host;
+ struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
+ struct lpfc_rport_data *rdata;
+ struct lpfc_nodelist *pnode;
+ unsigned tgt_id = cmnd->device->id;
+ uint64_t lun_id = cmnd->device->lun;
+ struct lpfc_scsi_event_header scsi_event;
+ int status;
+
+ rdata = lpfc_rport_data_from_scsi_device(cmnd->device);
+ if (!rdata) {
+ lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP,
+ "0799 Target Reset rport failure: rdata x%p\n", rdata);
+ return FAILED;
+ }
+ pnode = rdata->pnode;
+ status = fc_block_scsi_eh(cmnd);
+ if (status != 0 && status != SUCCESS)
+ return status;
+
+ status = lpfc_chk_tgt_mapped(vport, cmnd);
+ if (status == FAILED) {
+ lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP,
+ "0722 Target Reset rport failure: rdata x%p\n", rdata);
+ if (pnode) {
+ spin_lock_irq(shost->host_lock);
+ pnode->nlp_flag &= ~NLP_NPR_ADISC;
+ pnode->nlp_fcp_info &= ~NLP_FCP_2_DEVICE;
+ spin_unlock_irq(shost->host_lock);
+ }
+ lpfc_reset_flush_io_context(vport, tgt_id, lun_id,
+ LPFC_CTX_TGT);
+ return FAST_IO_FAIL;
+ }
+
+ scsi_event.event_type = FC_REG_SCSI_EVENT;
+ scsi_event.subcategory = LPFC_EVENT_TGTRESET;
+ scsi_event.lun = 0;
+ memcpy(scsi_event.wwpn, &pnode->nlp_portname, sizeof(struct lpfc_name));
+ memcpy(scsi_event.wwnn, &pnode->nlp_nodename, sizeof(struct lpfc_name));
+
+ fc_host_post_vendor_event(shost, fc_get_event_number(),
+ sizeof(scsi_event), (char *)&scsi_event, LPFC_NL_VENDOR_ID);
+
+ status = lpfc_send_taskmgmt(vport, rdata, tgt_id, lun_id,
+ FCP_TARGET_RESET);
+
+ lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP,
+ "0723 SCSI layer issued Target Reset (%d, %llu) "
+ "return x%x\n", tgt_id, lun_id, status);
+
+ /*
+ * We have to clean up i/o as : they may be orphaned by the TMF;
+ * or if the TMF failed, they may be in an indeterminate state.
+ * So, continue on.
+ * We will report success if all the i/o aborts successfully.
+ */
+ if (status == SUCCESS)
+ status = lpfc_reset_flush_io_context(vport, tgt_id, lun_id,
+ LPFC_CTX_TGT);
+ return status;
+}
+
+/**
+ * lpfc_bus_reset_handler - scsi_host_template eh_bus_reset_handler entry point
+ * @cmnd: Pointer to scsi_cmnd data structure.
+ *
+ * This routine does target reset to all targets on @cmnd->device->host.
+ * This emulates Parallel SCSI Bus Reset Semantics.
+ *
+ * Return code :
+ * 0x2003 - Error
+ * 0x2002 - Success
+ **/
+static int
+lpfc_bus_reset_handler(struct scsi_cmnd *cmnd)
+{
+ struct Scsi_Host *shost = cmnd->device->host;
+ struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
+ struct lpfc_nodelist *ndlp = NULL;
+ struct lpfc_scsi_event_header scsi_event;
+ int match;
+ int ret = SUCCESS, status, i;
+
+ scsi_event.event_type = FC_REG_SCSI_EVENT;
+ scsi_event.subcategory = LPFC_EVENT_BUSRESET;
+ scsi_event.lun = 0;
+ memcpy(scsi_event.wwpn, &vport->fc_portname, sizeof(struct lpfc_name));
+ memcpy(scsi_event.wwnn, &vport->fc_nodename, sizeof(struct lpfc_name));
+
+ fc_host_post_vendor_event(shost, fc_get_event_number(),
+ sizeof(scsi_event), (char *)&scsi_event, LPFC_NL_VENDOR_ID);
+
+ status = fc_block_scsi_eh(cmnd);
+ if (status != 0 && status != SUCCESS)
+ return status;
+
+ /*
+ * Since the driver manages a single bus device, reset all
+ * targets known to the driver. Should any target reset
+ * fail, this routine returns failure to the midlayer.
+ */
+ for (i = 0; i < LPFC_MAX_TARGET; i++) {
+ /* Search for mapped node by target ID */
+ match = 0;
+ spin_lock_irq(shost->host_lock);
+ list_for_each_entry(ndlp, &vport->fc_nodes, nlp_listp) {
+ if (!NLP_CHK_NODE_ACT(ndlp))
+ continue;
+ if (vport->phba->cfg_fcp2_no_tgt_reset &&
+ (ndlp->nlp_fcp_info & NLP_FCP_2_DEVICE))
+ continue;
+ if (ndlp->nlp_state == NLP_STE_MAPPED_NODE &&
+ ndlp->nlp_sid == i &&
+ ndlp->rport) {
+ match = 1;
+ break;
+ }
+ }
+ spin_unlock_irq(shost->host_lock);
+ if (!match)
+ continue;
+
+ status = lpfc_send_taskmgmt(vport, ndlp->rport->dd_data,
+ i, 0, FCP_TARGET_RESET);
+
+ if (status != SUCCESS) {
+ lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP,
+ "0700 Bus Reset on target %d failed\n",
+ i);
+ ret = FAILED;
+ }
+ }
+ /*
+ * We have to clean up i/o as : they may be orphaned by the TMFs
+ * above; or if any of the TMFs failed, they may be in an
+ * indeterminate state.
+ * We will report success if all the i/o aborts successfully.
+ */
+
+ status = lpfc_reset_flush_io_context(vport, 0, 0, LPFC_CTX_HOST);
+ if (status != SUCCESS)
+ ret = FAILED;
+
+ lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP,
+ "0714 SCSI layer issued Bus Reset Data: x%x\n", ret);
+ return ret;
+}
+
+/**
+ * lpfc_host_reset_handler - scsi_host_template eh_host_reset_handler entry pt
+ * @cmnd: Pointer to scsi_cmnd data structure.
+ *
+ * This routine does host reset to the adaptor port. It brings the HBA
+ * offline, performs a board restart, and then brings the board back online.
+ * The lpfc_offline calls lpfc_sli_hba_down which will abort and local
+ * reject all outstanding SCSI commands to the host and error returned
+ * back to SCSI mid-level. As this will be SCSI mid-level's last resort
+ * of error handling, it will only return error if resetting of the adapter
+ * is not successful; in all other cases, will return success.
+ *
+ * Return code :
+ * 0x2003 - Error
+ * 0x2002 - Success
+ **/
+static int
+lpfc_host_reset_handler(struct scsi_cmnd *cmnd)
+{
+ struct Scsi_Host *shost = cmnd->device->host;
+ struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
+ struct lpfc_hba *phba = vport->phba;
+ int rc, ret = SUCCESS;
+
+ lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP,
+ "3172 SCSI layer issued Host Reset Data:\n");
+
+ lpfc_offline_prep(phba, LPFC_MBX_WAIT);
+ lpfc_offline(phba);
+ rc = lpfc_sli_brdrestart(phba);
+ if (rc)
+ ret = FAILED;
+ rc = lpfc_online(phba);
+ if (rc)
+ ret = FAILED;
+ lpfc_unblock_mgmt_io(phba);
+
+ if (ret == FAILED) {
+ lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP,
+ "3323 Failed host reset, bring it offline\n");
+ lpfc_sli4_offline_eratt(phba);
+ }
+ return ret;
+}
+
+/**
+ * lpfc_slave_alloc - scsi_host_template slave_alloc entry point
+ * @sdev: Pointer to scsi_device.
+ *
+ * This routine populates the cmds_per_lun count + 2 scsi_bufs into this host's
+ * globally available list of scsi buffers. This routine also makes sure scsi
+ * buffer is not allocated more than HBA limit conveyed to midlayer. This list
+ * of scsi buffer exists for the lifetime of the driver.
+ *
+ * Return codes:
+ * non-0 - Error
+ * 0 - Success
+ **/
+static int
+lpfc_slave_alloc(struct scsi_device *sdev)
+{
+ struct lpfc_vport *vport = (struct lpfc_vport *) sdev->host->hostdata;
+ struct lpfc_hba *phba = vport->phba;
+ struct fc_rport *rport = starget_to_rport(scsi_target(sdev));
+ uint32_t total = 0;
+ uint32_t num_to_alloc = 0;
+ int num_allocated = 0;
+ uint32_t sdev_cnt;
+ struct lpfc_device_data *device_data;
+ unsigned long flags;
+ struct lpfc_name target_wwpn;
+
+ if (!rport || fc_remote_port_chkready(rport))
+ return -ENXIO;
+
+ if (phba->cfg_fof) {
+
+ /*
+ * Check to see if the device data structure for the lun
+ * exists. If not, create one.
+ */
+
+ u64_to_wwn(rport->port_name, target_wwpn.u.wwn);
+ spin_lock_irqsave(&phba->devicelock, flags);
+ device_data = __lpfc_get_device_data(phba,
+ &phba->luns,
+ &vport->fc_portname,
+ &target_wwpn,
+ sdev->lun);
+ if (!device_data) {
+ spin_unlock_irqrestore(&phba->devicelock, flags);
+ device_data = lpfc_create_device_data(phba,
+ &vport->fc_portname,
+ &target_wwpn,
+ sdev->lun, true);
+ if (!device_data)
+ return -ENOMEM;
+ spin_lock_irqsave(&phba->devicelock, flags);
+ list_add_tail(&device_data->listentry, &phba->luns);
+ }
+ device_data->rport_data = rport->dd_data;
+ device_data->available = true;
+ spin_unlock_irqrestore(&phba->devicelock, flags);
+ sdev->hostdata = device_data;
+ } else {
+ sdev->hostdata = rport->dd_data;
+ }
+ sdev_cnt = atomic_inc_return(&phba->sdev_cnt);
+
+ /*
+ * Populate the cmds_per_lun count scsi_bufs into this host's globally
+ * available list of scsi buffers. Don't allocate more than the
+ * HBA limit conveyed to the midlayer via the host structure. The
+ * formula accounts for the lun_queue_depth + error handlers + 1
+ * extra. This list of scsi bufs exists for the lifetime of the driver.
+ */
+ total = phba->total_scsi_bufs;
+ num_to_alloc = vport->cfg_lun_queue_depth + 2;
+
+ /* If allocated buffers are enough do nothing */
+ if ((sdev_cnt * (vport->cfg_lun_queue_depth + 2)) < total)
+ return 0;
+
+ /* Allow some exchanges to be available always to complete discovery */
+ if (total >= phba->cfg_hba_queue_depth - LPFC_DISC_IOCB_BUFF_COUNT ) {
+ lpfc_printf_vlog(vport, KERN_WARNING, LOG_FCP,
+ "0704 At limitation of %d preallocated "
+ "command buffers\n", total);
+ return 0;
+ /* Allow some exchanges to be available always to complete discovery */
+ } else if (total + num_to_alloc >
+ phba->cfg_hba_queue_depth - LPFC_DISC_IOCB_BUFF_COUNT ) {
+ lpfc_printf_vlog(vport, KERN_WARNING, LOG_FCP,
+ "0705 Allocation request of %d "
+ "command buffers will exceed max of %d. "
+ "Reducing allocation request to %d.\n",
+ num_to_alloc, phba->cfg_hba_queue_depth,
+ (phba->cfg_hba_queue_depth - total));
+ num_to_alloc = phba->cfg_hba_queue_depth - total;
+ }
+ num_allocated = lpfc_new_scsi_buf(vport, num_to_alloc);
+ if (num_to_alloc != num_allocated) {
+ lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP,
+ "0708 Allocation request of %d "
+ "command buffers did not succeed. "
+ "Allocated %d buffers.\n",
+ num_to_alloc, num_allocated);
+ }
+ if (num_allocated > 0)
+ phba->total_scsi_bufs += num_allocated;
+ return 0;
+}
+
+/**
+ * lpfc_slave_configure - scsi_host_template slave_configure entry point
+ * @sdev: Pointer to scsi_device.
+ *
+ * This routine configures following items
+ * - Tag command queuing support for @sdev if supported.
+ * - Enable SLI polling for fcp ring if ENABLE_FCP_RING_POLLING flag is set.
+ *
+ * Return codes:
+ * 0 - Success
+ **/
+static int
+lpfc_slave_configure(struct scsi_device *sdev)
+{
+ struct lpfc_vport *vport = (struct lpfc_vport *) sdev->host->hostdata;
+ struct lpfc_hba *phba = vport->phba;
+
+ scsi_change_queue_depth(sdev, vport->cfg_lun_queue_depth);
+
+ if (phba->cfg_poll & ENABLE_FCP_RING_POLLING) {
+ lpfc_sli_handle_fast_ring_event(phba,
+ &phba->sli.ring[LPFC_FCP_RING], HA_R0RE_REQ);
+ if (phba->cfg_poll & DISABLE_FCP_RING_INT)
+ lpfc_poll_rearm_timer(phba);
+ }
+
+ return 0;
+}
+
+/**
+ * lpfc_slave_destroy - slave_destroy entry point of SHT data structure
+ * @sdev: Pointer to scsi_device.
+ *
+ * This routine sets @sdev hostatdata filed to null.
+ **/
+static void
+lpfc_slave_destroy(struct scsi_device *sdev)
+{
+ struct lpfc_vport *vport = (struct lpfc_vport *) sdev->host->hostdata;
+ struct lpfc_hba *phba = vport->phba;
+ unsigned long flags;
+ struct lpfc_device_data *device_data = sdev->hostdata;
+
+ atomic_dec(&phba->sdev_cnt);
+ if ((phba->cfg_fof) && (device_data)) {
+ spin_lock_irqsave(&phba->devicelock, flags);
+ device_data->available = false;
+ if (!device_data->oas_enabled)
+ lpfc_delete_device_data(phba, device_data);
+ spin_unlock_irqrestore(&phba->devicelock, flags);
+ }
+ sdev->hostdata = NULL;
+ return;
+}
+
+/**
+ * lpfc_create_device_data - creates and initializes device data structure for OAS
+ * @pha: Pointer to host bus adapter structure.
+ * @vport_wwpn: Pointer to vport's wwpn information
+ * @target_wwpn: Pointer to target's wwpn information
+ * @lun: Lun on target
+ * @atomic_create: Flag to indicate if memory should be allocated using the
+ * GFP_ATOMIC flag or not.
+ *
+ * This routine creates a device data structure which will contain identifying
+ * information for the device (host wwpn, target wwpn, lun), state of OAS,
+ * whether or not the corresponding lun is available by the system,
+ * and pointer to the rport data.
+ *
+ * Return codes:
+ * NULL - Error
+ * Pointer to lpfc_device_data - Success
+ **/
+struct lpfc_device_data*
+lpfc_create_device_data(struct lpfc_hba *phba, struct lpfc_name *vport_wwpn,
+ struct lpfc_name *target_wwpn, uint64_t lun,
+ bool atomic_create)
+{
+
+ struct lpfc_device_data *lun_info;
+ int memory_flags;
+
+ if (unlikely(!phba) || !vport_wwpn || !target_wwpn ||
+ !(phba->cfg_fof))
+ return NULL;
+
+ /* Attempt to create the device data to contain lun info */
+
+ if (atomic_create)
+ memory_flags = GFP_ATOMIC;
+ else
+ memory_flags = GFP_KERNEL;
+ lun_info = mempool_alloc(phba->device_data_mem_pool, memory_flags);
+ if (!lun_info)
+ return NULL;
+ INIT_LIST_HEAD(&lun_info->listentry);
+ lun_info->rport_data = NULL;
+ memcpy(&lun_info->device_id.vport_wwpn, vport_wwpn,
+ sizeof(struct lpfc_name));
+ memcpy(&lun_info->device_id.target_wwpn, target_wwpn,
+ sizeof(struct lpfc_name));
+ lun_info->device_id.lun = lun;
+ lun_info->oas_enabled = false;
+ lun_info->available = false;
+ return lun_info;
+}
+
+/**
+ * lpfc_delete_device_data - frees a device data structure for OAS
+ * @pha: Pointer to host bus adapter structure.
+ * @lun_info: Pointer to device data structure to free.
+ *
+ * This routine frees the previously allocated device data structure passed.
+ *
+ **/
+void
+lpfc_delete_device_data(struct lpfc_hba *phba,
+ struct lpfc_device_data *lun_info)
+{
+
+ if (unlikely(!phba) || !lun_info ||
+ !(phba->cfg_fof))
+ return;
+
+ if (!list_empty(&lun_info->listentry))
+ list_del(&lun_info->listentry);
+ mempool_free(lun_info, phba->device_data_mem_pool);
+ return;
+}
+
+/**
+ * __lpfc_get_device_data - returns the device data for the specified lun
+ * @pha: Pointer to host bus adapter structure.
+ * @list: Point to list to search.
+ * @vport_wwpn: Pointer to vport's wwpn information
+ * @target_wwpn: Pointer to target's wwpn information
+ * @lun: Lun on target
+ *
+ * This routine searches the list passed for the specified lun's device data.
+ * This function does not hold locks, it is the responsibility of the caller
+ * to ensure the proper lock is held before calling the function.
+ *
+ * Return codes:
+ * NULL - Error
+ * Pointer to lpfc_device_data - Success
+ **/
+struct lpfc_device_data*
+__lpfc_get_device_data(struct lpfc_hba *phba, struct list_head *list,
+ struct lpfc_name *vport_wwpn,
+ struct lpfc_name *target_wwpn, uint64_t lun)
+{
+
+ struct lpfc_device_data *lun_info;
+
+ if (unlikely(!phba) || !list || !vport_wwpn || !target_wwpn ||
+ !phba->cfg_fof)
+ return NULL;
+
+ /* Check to see if the lun is already enabled for OAS. */
+
+ list_for_each_entry(lun_info, list, listentry) {
+ if ((memcmp(&lun_info->device_id.vport_wwpn, vport_wwpn,
+ sizeof(struct lpfc_name)) == 0) &&
+ (memcmp(&lun_info->device_id.target_wwpn, target_wwpn,
+ sizeof(struct lpfc_name)) == 0) &&
+ (lun_info->device_id.lun == lun))
+ return lun_info;
+ }
+
+ return NULL;
+}
+
+/**
+ * lpfc_find_next_oas_lun - searches for the next oas lun
+ * @pha: Pointer to host bus adapter structure.
+ * @vport_wwpn: Pointer to vport's wwpn information
+ * @target_wwpn: Pointer to target's wwpn information
+ * @starting_lun: Pointer to the lun to start searching for
+ * @found_vport_wwpn: Pointer to the found lun's vport wwpn information
+ * @found_target_wwpn: Pointer to the found lun's target wwpn information
+ * @found_lun: Pointer to the found lun.
+ * @found_lun_status: Pointer to status of the found lun.
+ *
+ * This routine searches the luns list for the specified lun
+ * or the first lun for the vport/target. If the vport wwpn contains
+ * a zero value then a specific vport is not specified. In this case
+ * any vport which contains the lun will be considered a match. If the
+ * target wwpn contains a zero value then a specific target is not specified.
+ * In this case any target which contains the lun will be considered a
+ * match. If the lun is found, the lun, vport wwpn, target wwpn and lun status
+ * are returned. The function will also return the next lun if available.
+ * If the next lun is not found, starting_lun parameter will be set to
+ * NO_MORE_OAS_LUN.
+ *
+ * Return codes:
+ * non-0 - Error
+ * 0 - Success
+ **/
+bool
+lpfc_find_next_oas_lun(struct lpfc_hba *phba, struct lpfc_name *vport_wwpn,
+ struct lpfc_name *target_wwpn, uint64_t *starting_lun,
+ struct lpfc_name *found_vport_wwpn,
+ struct lpfc_name *found_target_wwpn,
+ uint64_t *found_lun,
+ uint32_t *found_lun_status)
+{
+
+ unsigned long flags;
+ struct lpfc_device_data *lun_info;
+ struct lpfc_device_id *device_id;
+ uint64_t lun;
+ bool found = false;
+
+ if (unlikely(!phba) || !vport_wwpn || !target_wwpn ||
+ !starting_lun || !found_vport_wwpn ||
+ !found_target_wwpn || !found_lun || !found_lun_status ||
+ (*starting_lun == NO_MORE_OAS_LUN) ||
+ !phba->cfg_fof)
+ return false;
+
+ lun = *starting_lun;
+ *found_lun = NO_MORE_OAS_LUN;
+ *starting_lun = NO_MORE_OAS_LUN;
+
+ /* Search for lun or the lun closet in value */
+
+ spin_lock_irqsave(&phba->devicelock, flags);
+ list_for_each_entry(lun_info, &phba->luns, listentry) {
+ if (((wwn_to_u64(vport_wwpn->u.wwn) == 0) ||
+ (memcmp(&lun_info->device_id.vport_wwpn, vport_wwpn,
+ sizeof(struct lpfc_name)) == 0)) &&
+ ((wwn_to_u64(target_wwpn->u.wwn) == 0) ||
+ (memcmp(&lun_info->device_id.target_wwpn, target_wwpn,
+ sizeof(struct lpfc_name)) == 0)) &&
+ (lun_info->oas_enabled)) {
+ device_id = &lun_info->device_id;
+ if ((!found) &&
+ ((lun == FIND_FIRST_OAS_LUN) ||
+ (device_id->lun == lun))) {
+ *found_lun = device_id->lun;
+ memcpy(found_vport_wwpn,
+ &device_id->vport_wwpn,
+ sizeof(struct lpfc_name));
+ memcpy(found_target_wwpn,
+ &device_id->target_wwpn,
+ sizeof(struct lpfc_name));
+ if (lun_info->available)
+ *found_lun_status =
+ OAS_LUN_STATUS_EXISTS;
+ else
+ *found_lun_status = 0;
+ if (phba->cfg_oas_flags & OAS_FIND_ANY_VPORT)
+ memset(vport_wwpn, 0x0,
+ sizeof(struct lpfc_name));
+ if (phba->cfg_oas_flags & OAS_FIND_ANY_TARGET)
+ memset(target_wwpn, 0x0,
+ sizeof(struct lpfc_name));
+ found = true;
+ } else if (found) {
+ *starting_lun = device_id->lun;
+ memcpy(vport_wwpn, &device_id->vport_wwpn,
+ sizeof(struct lpfc_name));
+ memcpy(target_wwpn, &device_id->target_wwpn,
+ sizeof(struct lpfc_name));
+ break;
+ }
+ }
+ }
+ spin_unlock_irqrestore(&phba->devicelock, flags);
+ return found;
+}
+
+/**
+ * lpfc_enable_oas_lun - enables a lun for OAS operations
+ * @pha: Pointer to host bus adapter structure.
+ * @vport_wwpn: Pointer to vport's wwpn information
+ * @target_wwpn: Pointer to target's wwpn information
+ * @lun: Lun
+ *
+ * This routine enables a lun for oas operations. The routines does so by
+ * doing the following :
+ *
+ * 1) Checks to see if the device data for the lun has been created.
+ * 2) If found, sets the OAS enabled flag if not set and returns.
+ * 3) Otherwise, creates a device data structure.
+ * 4) If successfully created, indicates the device data is for an OAS lun,
+ * indicates the lun is not available and add to the list of luns.
+ *
+ * Return codes:
+ * false - Error
+ * true - Success
+ **/
+bool
+lpfc_enable_oas_lun(struct lpfc_hba *phba, struct lpfc_name *vport_wwpn,
+ struct lpfc_name *target_wwpn, uint64_t lun)
+{
+
+ struct lpfc_device_data *lun_info;
+ unsigned long flags;
+
+ if (unlikely(!phba) || !vport_wwpn || !target_wwpn ||
+ !phba->cfg_fof)
+ return false;
+
+ spin_lock_irqsave(&phba->devicelock, flags);
+
+ /* Check to see if the device data for the lun has been created */
+ lun_info = __lpfc_get_device_data(phba, &phba->luns, vport_wwpn,
+ target_wwpn, lun);
+ if (lun_info) {
+ if (!lun_info->oas_enabled)
+ lun_info->oas_enabled = true;
+ spin_unlock_irqrestore(&phba->devicelock, flags);
+ return true;
+ }
+
+ /* Create an lun info structure and add to list of luns */
+ lun_info = lpfc_create_device_data(phba, vport_wwpn, target_wwpn, lun,
+ false);
+ if (lun_info) {
+ lun_info->oas_enabled = true;
+ lun_info->available = false;
+ list_add_tail(&lun_info->listentry, &phba->luns);
+ spin_unlock_irqrestore(&phba->devicelock, flags);
+ return true;
+ }
+ spin_unlock_irqrestore(&phba->devicelock, flags);
+ return false;
+}
+
+/**
+ * lpfc_disable_oas_lun - disables a lun for OAS operations
+ * @pha: Pointer to host bus adapter structure.
+ * @vport_wwpn: Pointer to vport's wwpn information
+ * @target_wwpn: Pointer to target's wwpn information
+ * @lun: Lun
+ *
+ * This routine disables a lun for oas operations. The routines does so by
+ * doing the following :
+ *
+ * 1) Checks to see if the device data for the lun is created.
+ * 2) If present, clears the flag indicating this lun is for OAS.
+ * 3) If the lun is not available by the system, the device data is
+ * freed.
+ *
+ * Return codes:
+ * false - Error
+ * true - Success
+ **/
+bool
+lpfc_disable_oas_lun(struct lpfc_hba *phba, struct lpfc_name *vport_wwpn,
+ struct lpfc_name *target_wwpn, uint64_t lun)
+{
+
+ struct lpfc_device_data *lun_info;
+ unsigned long flags;
+
+ if (unlikely(!phba) || !vport_wwpn || !target_wwpn ||
+ !phba->cfg_fof)
+ return false;
+
+ spin_lock_irqsave(&phba->devicelock, flags);
+
+ /* Check to see if the lun is available. */
+ lun_info = __lpfc_get_device_data(phba,
+ &phba->luns, vport_wwpn,
+ target_wwpn, lun);
+ if (lun_info) {
+ lun_info->oas_enabled = false;
+ if (!lun_info->available)
+ lpfc_delete_device_data(phba, lun_info);
+ spin_unlock_irqrestore(&phba->devicelock, flags);
+ return true;
+ }
+
+ spin_unlock_irqrestore(&phba->devicelock, flags);
+ return false;
+}
+
+struct scsi_host_template lpfc_template_s3 = {
+ .module = THIS_MODULE,
+ .name = LPFC_DRIVER_NAME,
+ .info = lpfc_info,
+ .queuecommand = lpfc_queuecommand,
+ .eh_abort_handler = lpfc_abort_handler,
+ .eh_device_reset_handler = lpfc_device_reset_handler,
+ .eh_target_reset_handler = lpfc_target_reset_handler,
+ .eh_bus_reset_handler = lpfc_bus_reset_handler,
+ .slave_alloc = lpfc_slave_alloc,
+ .slave_configure = lpfc_slave_configure,
+ .slave_destroy = lpfc_slave_destroy,
+ .scan_finished = lpfc_scan_finished,
+ .this_id = -1,
+ .sg_tablesize = LPFC_DEFAULT_SG_SEG_CNT,
+ .cmd_per_lun = LPFC_CMD_PER_LUN,
+ .use_clustering = ENABLE_CLUSTERING,
+ .shost_attrs = lpfc_hba_attrs,
+ .max_sectors = 0xFFFF,
+ .vendor_id = LPFC_NL_VENDOR_ID,
+ .change_queue_depth = scsi_change_queue_depth,
+ .use_blk_tags = 1,
+ .track_queue_depth = 1,
+};
+
+struct scsi_host_template lpfc_template = {
+ .module = THIS_MODULE,
+ .name = LPFC_DRIVER_NAME,
+ .info = lpfc_info,
+ .queuecommand = lpfc_queuecommand,
+ .eh_abort_handler = lpfc_abort_handler,
+ .eh_device_reset_handler = lpfc_device_reset_handler,
+ .eh_target_reset_handler = lpfc_target_reset_handler,
+ .eh_bus_reset_handler = lpfc_bus_reset_handler,
+ .eh_host_reset_handler = lpfc_host_reset_handler,
+ .slave_alloc = lpfc_slave_alloc,
+ .slave_configure = lpfc_slave_configure,
+ .slave_destroy = lpfc_slave_destroy,
+ .scan_finished = lpfc_scan_finished,
+ .this_id = -1,
+ .sg_tablesize = LPFC_DEFAULT_SG_SEG_CNT,
+ .cmd_per_lun = LPFC_CMD_PER_LUN,
+ .use_clustering = ENABLE_CLUSTERING,
+ .shost_attrs = lpfc_hba_attrs,
+ .max_sectors = 0xFFFF,
+ .vendor_id = LPFC_NL_VENDOR_ID,
+ .change_queue_depth = scsi_change_queue_depth,
+ .use_blk_tags = 1,
+ .track_queue_depth = 1,
+};
+
+struct scsi_host_template lpfc_vport_template = {
+ .module = THIS_MODULE,
+ .name = LPFC_DRIVER_NAME,
+ .info = lpfc_info,
+ .queuecommand = lpfc_queuecommand,
+ .eh_abort_handler = lpfc_abort_handler,
+ .eh_device_reset_handler = lpfc_device_reset_handler,
+ .eh_target_reset_handler = lpfc_target_reset_handler,
+ .eh_bus_reset_handler = lpfc_bus_reset_handler,
+ .slave_alloc = lpfc_slave_alloc,
+ .slave_configure = lpfc_slave_configure,
+ .slave_destroy = lpfc_slave_destroy,
+ .scan_finished = lpfc_scan_finished,
+ .this_id = -1,
+ .sg_tablesize = LPFC_DEFAULT_SG_SEG_CNT,
+ .cmd_per_lun = LPFC_CMD_PER_LUN,
+ .use_clustering = ENABLE_CLUSTERING,
+ .shost_attrs = lpfc_vport_attrs,
+ .max_sectors = 0xFFFF,
+ .change_queue_depth = scsi_change_queue_depth,
+ .use_blk_tags = 1,
+ .track_queue_depth = 1,
+};
diff --git a/drivers/scsi/lpfc/lpfc_scsi.h b/drivers/scsi/lpfc/lpfc_scsi.h
new file mode 100644
index 000000000..474e30cde
--- /dev/null
+++ b/drivers/scsi/lpfc/lpfc_scsi.h
@@ -0,0 +1,186 @@
+/*******************************************************************
+ * This file is part of the Emulex Linux Device Driver for *
+ * Fibre Channel Host Bus Adapters. *
+ * Copyright (C) 2004-2015 Emulex. All rights reserved. *
+ * EMULEX and SLI are trademarks of Emulex. *
+ * www.emulex.com *
+ * *
+ * This program is free software; you can redistribute it and/or *
+ * modify it under the terms of version 2 of the GNU General *
+ * Public License as published by the Free Software Foundation. *
+ * This program is distributed in the hope that it will be useful. *
+ * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND *
+ * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY, *
+ * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE *
+ * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD *
+ * TO BE LEGALLY INVALID. See the GNU General Public License for *
+ * more details, a copy of which can be found in the file COPYING *
+ * included with this package. *
+ *******************************************************************/
+
+#include <asm/byteorder.h>
+
+struct lpfc_hba;
+#define LPFC_FCP_CDB_LEN 16
+
+#define list_remove_head(list, entry, type, member) \
+ do { \
+ entry = NULL; \
+ if (!list_empty(list)) { \
+ entry = list_entry((list)->next, type, member); \
+ list_del_init(&entry->member); \
+ } \
+ } while(0)
+
+#define list_get_first(list, type, member) \
+ (list_empty(list)) ? NULL : \
+ list_entry((list)->next, type, member)
+
+/* per-port data that is allocated in the FC transport for us */
+struct lpfc_rport_data {
+ struct lpfc_nodelist *pnode; /* Pointer to the node structure. */
+};
+
+struct lpfc_device_id {
+ struct lpfc_name vport_wwpn;
+ struct lpfc_name target_wwpn;
+ uint64_t lun;
+};
+
+struct lpfc_device_data {
+ struct list_head listentry;
+ struct lpfc_rport_data *rport_data;
+ struct lpfc_device_id device_id;
+ bool oas_enabled;
+ bool available;
+};
+
+struct fcp_rsp {
+ uint32_t rspRsvd1; /* FC Word 0, byte 0:3 */
+ uint32_t rspRsvd2; /* FC Word 1, byte 0:3 */
+
+ uint8_t rspStatus0; /* FCP_STATUS byte 0 (reserved) */
+ uint8_t rspStatus1; /* FCP_STATUS byte 1 (reserved) */
+ uint8_t rspStatus2; /* FCP_STATUS byte 2 field validity */
+#define RSP_LEN_VALID 0x01 /* bit 0 */
+#define SNS_LEN_VALID 0x02 /* bit 1 */
+#define RESID_OVER 0x04 /* bit 2 */
+#define RESID_UNDER 0x08 /* bit 3 */
+ uint8_t rspStatus3; /* FCP_STATUS byte 3 SCSI status byte */
+
+ uint32_t rspResId; /* Residual xfer if residual count field set in
+ fcpStatus2 */
+ /* Received in Big Endian format */
+ uint32_t rspSnsLen; /* Length of sense data in fcpSnsInfo */
+ /* Received in Big Endian format */
+ uint32_t rspRspLen; /* Length of FCP response data in fcpRspInfo */
+ /* Received in Big Endian format */
+
+ uint8_t rspInfo0; /* FCP_RSP_INFO byte 0 (reserved) */
+ uint8_t rspInfo1; /* FCP_RSP_INFO byte 1 (reserved) */
+ uint8_t rspInfo2; /* FCP_RSP_INFO byte 2 (reserved) */
+ uint8_t rspInfo3; /* FCP_RSP_INFO RSP_CODE byte 3 */
+
+#define RSP_NO_FAILURE 0x00
+#define RSP_DATA_BURST_ERR 0x01
+#define RSP_CMD_FIELD_ERR 0x02
+#define RSP_RO_MISMATCH_ERR 0x03
+#define RSP_TM_NOT_SUPPORTED 0x04 /* Task mgmt function not supported */
+#define RSP_TM_NOT_COMPLETED 0x05 /* Task mgmt function not performed */
+#define RSP_TM_INVALID_LU 0x09 /* Task mgmt function to invalid LU */
+
+ uint32_t rspInfoRsvd; /* FCP_RSP_INFO bytes 4-7 (reserved) */
+
+ uint8_t rspSnsInfo[128];
+#define SNS_ILLEGAL_REQ 0x05 /* sense key is byte 3 ([2]) */
+#define SNSCOD_BADCMD 0x20 /* sense code is byte 13 ([12]) */
+};
+
+struct fcp_cmnd {
+ struct scsi_lun fcp_lun;
+
+ uint8_t fcpCntl0; /* FCP_CNTL byte 0 (reserved) */
+ uint8_t fcpCntl1; /* FCP_CNTL byte 1 task codes */
+#define SIMPLE_Q 0x00
+#define HEAD_OF_Q 0x01
+#define ORDERED_Q 0x02
+#define ACA_Q 0x04
+#define UNTAGGED 0x05
+ uint8_t fcpCntl2; /* FCP_CTL byte 2 task management codes */
+#define FCP_ABORT_TASK_SET 0x02 /* Bit 1 */
+#define FCP_CLEAR_TASK_SET 0x04 /* bit 2 */
+#define FCP_BUS_RESET 0x08 /* bit 3 */
+#define FCP_LUN_RESET 0x10 /* bit 4 */
+#define FCP_TARGET_RESET 0x20 /* bit 5 */
+#define FCP_CLEAR_ACA 0x40 /* bit 6 */
+#define FCP_TERMINATE_TASK 0x80 /* bit 7 */
+ uint8_t fcpCntl3;
+#define WRITE_DATA 0x01 /* Bit 0 */
+#define READ_DATA 0x02 /* Bit 1 */
+
+ uint8_t fcpCdb[LPFC_FCP_CDB_LEN]; /* SRB cdb field is copied here */
+ uint32_t fcpDl; /* Total transfer length */
+
+};
+
+struct lpfc_scsicmd_bkt {
+ uint32_t cmd_count;
+};
+
+struct lpfc_scsi_buf {
+ struct list_head list;
+ struct scsi_cmnd *pCmd;
+ struct lpfc_rport_data *rdata;
+
+ uint32_t timeout;
+
+ uint16_t exch_busy; /* SLI4 hba reported XB on complete WCQE */
+ uint16_t status; /* From IOCB Word 7- ulpStatus */
+ uint32_t result; /* From IOCB Word 4. */
+
+ uint32_t seg_cnt; /* Number of scatter-gather segments returned by
+ * dma_map_sg. The driver needs this for calls
+ * to dma_unmap_sg. */
+ uint32_t prot_seg_cnt; /* seg_cnt's counterpart for protection data */
+
+ dma_addr_t nonsg_phys; /* Non scatter-gather physical address. */
+
+ /*
+ * data and dma_handle are the kernel virtual and bus address of the
+ * dma-able buffer containing the fcp_cmd, fcp_rsp and a scatter
+ * gather bde list that supports the sg_tablesize value.
+ */
+ void *data;
+ dma_addr_t dma_handle;
+
+ struct fcp_cmnd *fcp_cmnd;
+ struct fcp_rsp *fcp_rsp;
+ struct ulp_bde64 *fcp_bpl;
+
+ dma_addr_t dma_phys_bpl;
+
+ /* cur_iocbq has phys of the dma-able buffer.
+ * Iotag is in here
+ */
+ struct lpfc_iocbq cur_iocbq;
+ wait_queue_head_t *waitq;
+ unsigned long start_time;
+
+#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
+ /* Used to restore any changes to protection data for error injection */
+ void *prot_data_segment;
+ uint32_t prot_data;
+ uint32_t prot_data_type;
+#define LPFC_INJERR_REFTAG 1
+#define LPFC_INJERR_APPTAG 2
+#define LPFC_INJERR_GUARD 3
+#endif
+};
+
+#define LPFC_SCSI_DMA_EXT_SIZE 264
+#define LPFC_BPL_SIZE 1024
+#define MDAC_DIRECT_CMD 0x22
+
+#define FIND_FIRST_OAS_LUN 0
+#define NO_MORE_OAS_LUN -1
+#define NOT_OAS_ENABLED_LUN NO_MORE_OAS_LUN
diff --git a/drivers/scsi/lpfc/lpfc_sli.c b/drivers/scsi/lpfc/lpfc_sli.c
new file mode 100644
index 000000000..56f73682d
--- /dev/null
+++ b/drivers/scsi/lpfc/lpfc_sli.c
@@ -0,0 +1,17060 @@
+/*******************************************************************
+ * This file is part of the Emulex Linux Device Driver for *
+ * Fibre Channel Host Bus Adapters. *
+ * Copyright (C) 2004-2015 Emulex. All rights reserved. *
+ * EMULEX and SLI are trademarks of Emulex. *
+ * www.emulex.com *
+ * Portions Copyright (C) 2004-2005 Christoph Hellwig *
+ * *
+ * This program is free software; you can redistribute it and/or *
+ * modify it under the terms of version 2 of the GNU General *
+ * Public License as published by the Free Software Foundation. *
+ * This program is distributed in the hope that it will be useful. *
+ * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND *
+ * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY, *
+ * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE *
+ * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD *
+ * TO BE LEGALLY INVALID. See the GNU General Public License for *
+ * more details, a copy of which can be found in the file COPYING *
+ * included with this package. *
+ *******************************************************************/
+
+#include <linux/blkdev.h>
+#include <linux/pci.h>
+#include <linux/interrupt.h>
+#include <linux/delay.h>
+#include <linux/slab.h>
+
+#include <scsi/scsi.h>
+#include <scsi/scsi_cmnd.h>
+#include <scsi/scsi_device.h>
+#include <scsi/scsi_host.h>
+#include <scsi/scsi_transport_fc.h>
+#include <scsi/fc/fc_fs.h>
+#include <linux/aer.h>
+
+#include "lpfc_hw4.h"
+#include "lpfc_hw.h"
+#include "lpfc_sli.h"
+#include "lpfc_sli4.h"
+#include "lpfc_nl.h"
+#include "lpfc_disc.h"
+#include "lpfc_scsi.h"
+#include "lpfc.h"
+#include "lpfc_crtn.h"
+#include "lpfc_logmsg.h"
+#include "lpfc_compat.h"
+#include "lpfc_debugfs.h"
+#include "lpfc_vport.h"
+
+/* There are only four IOCB completion types. */
+typedef enum _lpfc_iocb_type {
+ LPFC_UNKNOWN_IOCB,
+ LPFC_UNSOL_IOCB,
+ LPFC_SOL_IOCB,
+ LPFC_ABORT_IOCB
+} lpfc_iocb_type;
+
+
+/* Provide function prototypes local to this module. */
+static int lpfc_sli_issue_mbox_s4(struct lpfc_hba *, LPFC_MBOXQ_t *,
+ uint32_t);
+static int lpfc_sli4_read_rev(struct lpfc_hba *, LPFC_MBOXQ_t *,
+ uint8_t *, uint32_t *);
+static struct lpfc_iocbq *lpfc_sli4_els_wcqe_to_rspiocbq(struct lpfc_hba *,
+ struct lpfc_iocbq *);
+static void lpfc_sli4_send_seq_to_ulp(struct lpfc_vport *,
+ struct hbq_dmabuf *);
+static int lpfc_sli4_fp_handle_wcqe(struct lpfc_hba *, struct lpfc_queue *,
+ struct lpfc_cqe *);
+static int lpfc_sli4_post_els_sgl_list(struct lpfc_hba *, struct list_head *,
+ int);
+static void lpfc_sli4_hba_handle_eqe(struct lpfc_hba *, struct lpfc_eqe *,
+ uint32_t);
+static bool lpfc_sli4_mbox_completions_pending(struct lpfc_hba *phba);
+static bool lpfc_sli4_process_missed_mbox_completions(struct lpfc_hba *phba);
+
+static IOCB_t *
+lpfc_get_iocb_from_iocbq(struct lpfc_iocbq *iocbq)
+{
+ return &iocbq->iocb;
+}
+
+/**
+ * lpfc_sli4_wq_put - Put a Work Queue Entry on an Work Queue
+ * @q: The Work Queue to operate on.
+ * @wqe: The work Queue Entry to put on the Work queue.
+ *
+ * This routine will copy the contents of @wqe to the next available entry on
+ * the @q. This function will then ring the Work Queue Doorbell to signal the
+ * HBA to start processing the Work Queue Entry. This function returns 0 if
+ * successful. If no entries are available on @q then this function will return
+ * -ENOMEM.
+ * The caller is expected to hold the hbalock when calling this routine.
+ **/
+static uint32_t
+lpfc_sli4_wq_put(struct lpfc_queue *q, union lpfc_wqe *wqe)
+{
+ union lpfc_wqe *temp_wqe;
+ struct lpfc_register doorbell;
+ uint32_t host_index;
+ uint32_t idx;
+
+ /* sanity check on queue memory */
+ if (unlikely(!q))
+ return -ENOMEM;
+ temp_wqe = q->qe[q->host_index].wqe;
+
+ /* If the host has not yet processed the next entry then we are done */
+ idx = ((q->host_index + 1) % q->entry_count);
+ if (idx == q->hba_index) {
+ q->WQ_overflow++;
+ return -ENOMEM;
+ }
+ q->WQ_posted++;
+ /* set consumption flag every once in a while */
+ if (!((q->host_index + 1) % q->entry_repost))
+ bf_set(wqe_wqec, &wqe->generic.wqe_com, 1);
+ if (q->phba->sli3_options & LPFC_SLI4_PHWQ_ENABLED)
+ bf_set(wqe_wqid, &wqe->generic.wqe_com, q->queue_id);
+ lpfc_sli_pcimem_bcopy(wqe, temp_wqe, q->entry_size);
+
+ /* Update the host index before invoking device */
+ host_index = q->host_index;
+
+ q->host_index = idx;
+
+ /* Ring Doorbell */
+ doorbell.word0 = 0;
+ if (q->db_format == LPFC_DB_LIST_FORMAT) {
+ bf_set(lpfc_wq_db_list_fm_num_posted, &doorbell, 1);
+ bf_set(lpfc_wq_db_list_fm_index, &doorbell, host_index);
+ bf_set(lpfc_wq_db_list_fm_id, &doorbell, q->queue_id);
+ } else if (q->db_format == LPFC_DB_RING_FORMAT) {
+ bf_set(lpfc_wq_db_ring_fm_num_posted, &doorbell, 1);
+ bf_set(lpfc_wq_db_ring_fm_id, &doorbell, q->queue_id);
+ } else {
+ return -EINVAL;
+ }
+ writel(doorbell.word0, q->db_regaddr);
+
+ return 0;
+}
+
+/**
+ * lpfc_sli4_wq_release - Updates internal hba index for WQ
+ * @q: The Work Queue to operate on.
+ * @index: The index to advance the hba index to.
+ *
+ * This routine will update the HBA index of a queue to reflect consumption of
+ * Work Queue Entries by the HBA. When the HBA indicates that it has consumed
+ * an entry the host calls this function to update the queue's internal
+ * pointers. This routine returns the number of entries that were consumed by
+ * the HBA.
+ **/
+static uint32_t
+lpfc_sli4_wq_release(struct lpfc_queue *q, uint32_t index)
+{
+ uint32_t released = 0;
+
+ /* sanity check on queue memory */
+ if (unlikely(!q))
+ return 0;
+
+ if (q->hba_index == index)
+ return 0;
+ do {
+ q->hba_index = ((q->hba_index + 1) % q->entry_count);
+ released++;
+ } while (q->hba_index != index);
+ return released;
+}
+
+/**
+ * lpfc_sli4_mq_put - Put a Mailbox Queue Entry on an Mailbox Queue
+ * @q: The Mailbox Queue to operate on.
+ * @wqe: The Mailbox Queue Entry to put on the Work queue.
+ *
+ * This routine will copy the contents of @mqe to the next available entry on
+ * the @q. This function will then ring the Work Queue Doorbell to signal the
+ * HBA to start processing the Work Queue Entry. This function returns 0 if
+ * successful. If no entries are available on @q then this function will return
+ * -ENOMEM.
+ * The caller is expected to hold the hbalock when calling this routine.
+ **/
+static uint32_t
+lpfc_sli4_mq_put(struct lpfc_queue *q, struct lpfc_mqe *mqe)
+{
+ struct lpfc_mqe *temp_mqe;
+ struct lpfc_register doorbell;
+
+ /* sanity check on queue memory */
+ if (unlikely(!q))
+ return -ENOMEM;
+ temp_mqe = q->qe[q->host_index].mqe;
+
+ /* If the host has not yet processed the next entry then we are done */
+ if (((q->host_index + 1) % q->entry_count) == q->hba_index)
+ return -ENOMEM;
+ lpfc_sli_pcimem_bcopy(mqe, temp_mqe, q->entry_size);
+ /* Save off the mailbox pointer for completion */
+ q->phba->mbox = (MAILBOX_t *)temp_mqe;
+
+ /* Update the host index before invoking device */
+ q->host_index = ((q->host_index + 1) % q->entry_count);
+
+ /* Ring Doorbell */
+ doorbell.word0 = 0;
+ bf_set(lpfc_mq_doorbell_num_posted, &doorbell, 1);
+ bf_set(lpfc_mq_doorbell_id, &doorbell, q->queue_id);
+ writel(doorbell.word0, q->phba->sli4_hba.MQDBregaddr);
+ return 0;
+}
+
+/**
+ * lpfc_sli4_mq_release - Updates internal hba index for MQ
+ * @q: The Mailbox Queue to operate on.
+ *
+ * This routine will update the HBA index of a queue to reflect consumption of
+ * a Mailbox Queue Entry by the HBA. When the HBA indicates that it has consumed
+ * an entry the host calls this function to update the queue's internal
+ * pointers. This routine returns the number of entries that were consumed by
+ * the HBA.
+ **/
+static uint32_t
+lpfc_sli4_mq_release(struct lpfc_queue *q)
+{
+ /* sanity check on queue memory */
+ if (unlikely(!q))
+ return 0;
+
+ /* Clear the mailbox pointer for completion */
+ q->phba->mbox = NULL;
+ q->hba_index = ((q->hba_index + 1) % q->entry_count);
+ return 1;
+}
+
+/**
+ * lpfc_sli4_eq_get - Gets the next valid EQE from a EQ
+ * @q: The Event Queue to get the first valid EQE from
+ *
+ * This routine will get the first valid Event Queue Entry from @q, update
+ * the queue's internal hba index, and return the EQE. If no valid EQEs are in
+ * the Queue (no more work to do), or the Queue is full of EQEs that have been
+ * processed, but not popped back to the HBA then this routine will return NULL.
+ **/
+static struct lpfc_eqe *
+lpfc_sli4_eq_get(struct lpfc_queue *q)
+{
+ struct lpfc_eqe *eqe;
+ uint32_t idx;
+
+ /* sanity check on queue memory */
+ if (unlikely(!q))
+ return NULL;
+ eqe = q->qe[q->hba_index].eqe;
+
+ /* If the next EQE is not valid then we are done */
+ if (!bf_get_le32(lpfc_eqe_valid, eqe))
+ return NULL;
+ /* If the host has not yet processed the next entry then we are done */
+ idx = ((q->hba_index + 1) % q->entry_count);
+ if (idx == q->host_index)
+ return NULL;
+
+ q->hba_index = idx;
+
+ /*
+ * insert barrier for instruction interlock : data from the hardware
+ * must have the valid bit checked before it can be copied and acted
+ * upon. Given what was seen in lpfc_sli4_cq_get() of speculative
+ * instructions allowing action on content before valid bit checked,
+ * add barrier here as well. May not be needed as "content" is a
+ * single 32-bit entity here (vs multi word structure for cq's).
+ */
+ mb();
+ return eqe;
+}
+
+/**
+ * lpfc_sli4_eq_clr_intr - Turn off interrupts from this EQ
+ * @q: The Event Queue to disable interrupts
+ *
+ **/
+static inline void
+lpfc_sli4_eq_clr_intr(struct lpfc_queue *q)
+{
+ struct lpfc_register doorbell;
+
+ doorbell.word0 = 0;
+ bf_set(lpfc_eqcq_doorbell_eqci, &doorbell, 1);
+ bf_set(lpfc_eqcq_doorbell_qt, &doorbell, LPFC_QUEUE_TYPE_EVENT);
+ bf_set(lpfc_eqcq_doorbell_eqid_hi, &doorbell,
+ (q->queue_id >> LPFC_EQID_HI_FIELD_SHIFT));
+ bf_set(lpfc_eqcq_doorbell_eqid_lo, &doorbell, q->queue_id);
+ writel(doorbell.word0, q->phba->sli4_hba.EQCQDBregaddr);
+}
+
+/**
+ * lpfc_sli4_eq_release - Indicates the host has finished processing an EQ
+ * @q: The Event Queue that the host has completed processing for.
+ * @arm: Indicates whether the host wants to arms this CQ.
+ *
+ * This routine will mark all Event Queue Entries on @q, from the last
+ * known completed entry to the last entry that was processed, as completed
+ * by clearing the valid bit for each completion queue entry. Then it will
+ * notify the HBA, by ringing the doorbell, that the EQEs have been processed.
+ * The internal host index in the @q will be updated by this routine to indicate
+ * that the host has finished processing the entries. The @arm parameter
+ * indicates that the queue should be rearmed when ringing the doorbell.
+ *
+ * This function will return the number of EQEs that were popped.
+ **/
+uint32_t
+lpfc_sli4_eq_release(struct lpfc_queue *q, bool arm)
+{
+ uint32_t released = 0;
+ struct lpfc_eqe *temp_eqe;
+ struct lpfc_register doorbell;
+
+ /* sanity check on queue memory */
+ if (unlikely(!q))
+ return 0;
+
+ /* while there are valid entries */
+ while (q->hba_index != q->host_index) {
+ temp_eqe = q->qe[q->host_index].eqe;
+ bf_set_le32(lpfc_eqe_valid, temp_eqe, 0);
+ released++;
+ q->host_index = ((q->host_index + 1) % q->entry_count);
+ }
+ if (unlikely(released == 0 && !arm))
+ return 0;
+
+ /* ring doorbell for number popped */
+ doorbell.word0 = 0;
+ if (arm) {
+ bf_set(lpfc_eqcq_doorbell_arm, &doorbell, 1);
+ bf_set(lpfc_eqcq_doorbell_eqci, &doorbell, 1);
+ }
+ bf_set(lpfc_eqcq_doorbell_num_released, &doorbell, released);
+ bf_set(lpfc_eqcq_doorbell_qt, &doorbell, LPFC_QUEUE_TYPE_EVENT);
+ bf_set(lpfc_eqcq_doorbell_eqid_hi, &doorbell,
+ (q->queue_id >> LPFC_EQID_HI_FIELD_SHIFT));
+ bf_set(lpfc_eqcq_doorbell_eqid_lo, &doorbell, q->queue_id);
+ writel(doorbell.word0, q->phba->sli4_hba.EQCQDBregaddr);
+ /* PCI read to flush PCI pipeline on re-arming for INTx mode */
+ if ((q->phba->intr_type == INTx) && (arm == LPFC_QUEUE_REARM))
+ readl(q->phba->sli4_hba.EQCQDBregaddr);
+ return released;
+}
+
+/**
+ * lpfc_sli4_cq_get - Gets the next valid CQE from a CQ
+ * @q: The Completion Queue to get the first valid CQE from
+ *
+ * This routine will get the first valid Completion Queue Entry from @q, update
+ * the queue's internal hba index, and return the CQE. If no valid CQEs are in
+ * the Queue (no more work to do), or the Queue is full of CQEs that have been
+ * processed, but not popped back to the HBA then this routine will return NULL.
+ **/
+static struct lpfc_cqe *
+lpfc_sli4_cq_get(struct lpfc_queue *q)
+{
+ struct lpfc_cqe *cqe;
+ uint32_t idx;
+
+ /* sanity check on queue memory */
+ if (unlikely(!q))
+ return NULL;
+
+ /* If the next CQE is not valid then we are done */
+ if (!bf_get_le32(lpfc_cqe_valid, q->qe[q->hba_index].cqe))
+ return NULL;
+ /* If the host has not yet processed the next entry then we are done */
+ idx = ((q->hba_index + 1) % q->entry_count);
+ if (idx == q->host_index)
+ return NULL;
+
+ cqe = q->qe[q->hba_index].cqe;
+ q->hba_index = idx;
+
+ /*
+ * insert barrier for instruction interlock : data from the hardware
+ * must have the valid bit checked before it can be copied and acted
+ * upon. Speculative instructions were allowing a bcopy at the start
+ * of lpfc_sli4_fp_handle_wcqe(), which is called immediately
+ * after our return, to copy data before the valid bit check above
+ * was done. As such, some of the copied data was stale. The barrier
+ * ensures the check is before any data is copied.
+ */
+ mb();
+ return cqe;
+}
+
+/**
+ * lpfc_sli4_cq_release - Indicates the host has finished processing a CQ
+ * @q: The Completion Queue that the host has completed processing for.
+ * @arm: Indicates whether the host wants to arms this CQ.
+ *
+ * This routine will mark all Completion queue entries on @q, from the last
+ * known completed entry to the last entry that was processed, as completed
+ * by clearing the valid bit for each completion queue entry. Then it will
+ * notify the HBA, by ringing the doorbell, that the CQEs have been processed.
+ * The internal host index in the @q will be updated by this routine to indicate
+ * that the host has finished processing the entries. The @arm parameter
+ * indicates that the queue should be rearmed when ringing the doorbell.
+ *
+ * This function will return the number of CQEs that were released.
+ **/
+uint32_t
+lpfc_sli4_cq_release(struct lpfc_queue *q, bool arm)
+{
+ uint32_t released = 0;
+ struct lpfc_cqe *temp_qe;
+ struct lpfc_register doorbell;
+
+ /* sanity check on queue memory */
+ if (unlikely(!q))
+ return 0;
+ /* while there are valid entries */
+ while (q->hba_index != q->host_index) {
+ temp_qe = q->qe[q->host_index].cqe;
+ bf_set_le32(lpfc_cqe_valid, temp_qe, 0);
+ released++;
+ q->host_index = ((q->host_index + 1) % q->entry_count);
+ }
+ if (unlikely(released == 0 && !arm))
+ return 0;
+
+ /* ring doorbell for number popped */
+ doorbell.word0 = 0;
+ if (arm)
+ bf_set(lpfc_eqcq_doorbell_arm, &doorbell, 1);
+ bf_set(lpfc_eqcq_doorbell_num_released, &doorbell, released);
+ bf_set(lpfc_eqcq_doorbell_qt, &doorbell, LPFC_QUEUE_TYPE_COMPLETION);
+ bf_set(lpfc_eqcq_doorbell_cqid_hi, &doorbell,
+ (q->queue_id >> LPFC_CQID_HI_FIELD_SHIFT));
+ bf_set(lpfc_eqcq_doorbell_cqid_lo, &doorbell, q->queue_id);
+ writel(doorbell.word0, q->phba->sli4_hba.EQCQDBregaddr);
+ return released;
+}
+
+/**
+ * lpfc_sli4_rq_put - Put a Receive Buffer Queue Entry on a Receive Queue
+ * @q: The Header Receive Queue to operate on.
+ * @wqe: The Receive Queue Entry to put on the Receive queue.
+ *
+ * This routine will copy the contents of @wqe to the next available entry on
+ * the @q. This function will then ring the Receive Queue Doorbell to signal the
+ * HBA to start processing the Receive Queue Entry. This function returns the
+ * index that the rqe was copied to if successful. If no entries are available
+ * on @q then this function will return -ENOMEM.
+ * The caller is expected to hold the hbalock when calling this routine.
+ **/
+static int
+lpfc_sli4_rq_put(struct lpfc_queue *hq, struct lpfc_queue *dq,
+ struct lpfc_rqe *hrqe, struct lpfc_rqe *drqe)
+{
+ struct lpfc_rqe *temp_hrqe;
+ struct lpfc_rqe *temp_drqe;
+ struct lpfc_register doorbell;
+ int put_index;
+
+ /* sanity check on queue memory */
+ if (unlikely(!hq) || unlikely(!dq))
+ return -ENOMEM;
+ put_index = hq->host_index;
+ temp_hrqe = hq->qe[hq->host_index].rqe;
+ temp_drqe = dq->qe[dq->host_index].rqe;
+
+ if (hq->type != LPFC_HRQ || dq->type != LPFC_DRQ)
+ return -EINVAL;
+ if (hq->host_index != dq->host_index)
+ return -EINVAL;
+ /* If the host has not yet processed the next entry then we are done */
+ if (((hq->host_index + 1) % hq->entry_count) == hq->hba_index)
+ return -EBUSY;
+ lpfc_sli_pcimem_bcopy(hrqe, temp_hrqe, hq->entry_size);
+ lpfc_sli_pcimem_bcopy(drqe, temp_drqe, dq->entry_size);
+
+ /* Update the host index to point to the next slot */
+ hq->host_index = ((hq->host_index + 1) % hq->entry_count);
+ dq->host_index = ((dq->host_index + 1) % dq->entry_count);
+
+ /* Ring The Header Receive Queue Doorbell */
+ if (!(hq->host_index % hq->entry_repost)) {
+ doorbell.word0 = 0;
+ if (hq->db_format == LPFC_DB_RING_FORMAT) {
+ bf_set(lpfc_rq_db_ring_fm_num_posted, &doorbell,
+ hq->entry_repost);
+ bf_set(lpfc_rq_db_ring_fm_id, &doorbell, hq->queue_id);
+ } else if (hq->db_format == LPFC_DB_LIST_FORMAT) {
+ bf_set(lpfc_rq_db_list_fm_num_posted, &doorbell,
+ hq->entry_repost);
+ bf_set(lpfc_rq_db_list_fm_index, &doorbell,
+ hq->host_index);
+ bf_set(lpfc_rq_db_list_fm_id, &doorbell, hq->queue_id);
+ } else {
+ return -EINVAL;
+ }
+ writel(doorbell.word0, hq->db_regaddr);
+ }
+ return put_index;
+}
+
+/**
+ * lpfc_sli4_rq_release - Updates internal hba index for RQ
+ * @q: The Header Receive Queue to operate on.
+ *
+ * This routine will update the HBA index of a queue to reflect consumption of
+ * one Receive Queue Entry by the HBA. When the HBA indicates that it has
+ * consumed an entry the host calls this function to update the queue's
+ * internal pointers. This routine returns the number of entries that were
+ * consumed by the HBA.
+ **/
+static uint32_t
+lpfc_sli4_rq_release(struct lpfc_queue *hq, struct lpfc_queue *dq)
+{
+ /* sanity check on queue memory */
+ if (unlikely(!hq) || unlikely(!dq))
+ return 0;
+
+ if ((hq->type != LPFC_HRQ) || (dq->type != LPFC_DRQ))
+ return 0;
+ hq->hba_index = ((hq->hba_index + 1) % hq->entry_count);
+ dq->hba_index = ((dq->hba_index + 1) % dq->entry_count);
+ return 1;
+}
+
+/**
+ * lpfc_cmd_iocb - Get next command iocb entry in the ring
+ * @phba: Pointer to HBA context object.
+ * @pring: Pointer to driver SLI ring object.
+ *
+ * This function returns pointer to next command iocb entry
+ * in the command ring. The caller must hold hbalock to prevent
+ * other threads consume the next command iocb.
+ * SLI-2/SLI-3 provide different sized iocbs.
+ **/
+static inline IOCB_t *
+lpfc_cmd_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
+{
+ return (IOCB_t *) (((char *) pring->sli.sli3.cmdringaddr) +
+ pring->sli.sli3.cmdidx * phba->iocb_cmd_size);
+}
+
+/**
+ * lpfc_resp_iocb - Get next response iocb entry in the ring
+ * @phba: Pointer to HBA context object.
+ * @pring: Pointer to driver SLI ring object.
+ *
+ * This function returns pointer to next response iocb entry
+ * in the response ring. The caller must hold hbalock to make sure
+ * that no other thread consume the next response iocb.
+ * SLI-2/SLI-3 provide different sized iocbs.
+ **/
+static inline IOCB_t *
+lpfc_resp_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
+{
+ return (IOCB_t *) (((char *) pring->sli.sli3.rspringaddr) +
+ pring->sli.sli3.rspidx * phba->iocb_rsp_size);
+}
+
+/**
+ * __lpfc_sli_get_iocbq - Allocates an iocb object from iocb pool
+ * @phba: Pointer to HBA context object.
+ *
+ * This function is called with hbalock held. This function
+ * allocates a new driver iocb object from the iocb pool. If the
+ * allocation is successful, it returns pointer to the newly
+ * allocated iocb object else it returns NULL.
+ **/
+struct lpfc_iocbq *
+__lpfc_sli_get_iocbq(struct lpfc_hba *phba)
+{
+ struct list_head *lpfc_iocb_list = &phba->lpfc_iocb_list;
+ struct lpfc_iocbq * iocbq = NULL;
+
+ list_remove_head(lpfc_iocb_list, iocbq, struct lpfc_iocbq, list);
+ if (iocbq)
+ phba->iocb_cnt++;
+ if (phba->iocb_cnt > phba->iocb_max)
+ phba->iocb_max = phba->iocb_cnt;
+ return iocbq;
+}
+
+/**
+ * __lpfc_clear_active_sglq - Remove the active sglq for this XRI.
+ * @phba: Pointer to HBA context object.
+ * @xritag: XRI value.
+ *
+ * This function clears the sglq pointer from the array of acive
+ * sglq's. The xritag that is passed in is used to index into the
+ * array. Before the xritag can be used it needs to be adjusted
+ * by subtracting the xribase.
+ *
+ * Returns sglq ponter = success, NULL = Failure.
+ **/
+static struct lpfc_sglq *
+__lpfc_clear_active_sglq(struct lpfc_hba *phba, uint16_t xritag)
+{
+ struct lpfc_sglq *sglq;
+
+ sglq = phba->sli4_hba.lpfc_sglq_active_list[xritag];
+ phba->sli4_hba.lpfc_sglq_active_list[xritag] = NULL;
+ return sglq;
+}
+
+/**
+ * __lpfc_get_active_sglq - Get the active sglq for this XRI.
+ * @phba: Pointer to HBA context object.
+ * @xritag: XRI value.
+ *
+ * This function returns the sglq pointer from the array of acive
+ * sglq's. The xritag that is passed in is used to index into the
+ * array. Before the xritag can be used it needs to be adjusted
+ * by subtracting the xribase.
+ *
+ * Returns sglq ponter = success, NULL = Failure.
+ **/
+struct lpfc_sglq *
+__lpfc_get_active_sglq(struct lpfc_hba *phba, uint16_t xritag)
+{
+ struct lpfc_sglq *sglq;
+
+ sglq = phba->sli4_hba.lpfc_sglq_active_list[xritag];
+ return sglq;
+}
+
+/**
+ * lpfc_clr_rrq_active - Clears RRQ active bit in xri_bitmap.
+ * @phba: Pointer to HBA context object.
+ * @xritag: xri used in this exchange.
+ * @rrq: The RRQ to be cleared.
+ *
+ **/
+void
+lpfc_clr_rrq_active(struct lpfc_hba *phba,
+ uint16_t xritag,
+ struct lpfc_node_rrq *rrq)
+{
+ struct lpfc_nodelist *ndlp = NULL;
+
+ if ((rrq->vport) && NLP_CHK_NODE_ACT(rrq->ndlp))
+ ndlp = lpfc_findnode_did(rrq->vport, rrq->nlp_DID);
+
+ /* The target DID could have been swapped (cable swap)
+ * we should use the ndlp from the findnode if it is
+ * available.
+ */
+ if ((!ndlp) && rrq->ndlp)
+ ndlp = rrq->ndlp;
+
+ if (!ndlp)
+ goto out;
+
+ if (test_and_clear_bit(xritag, ndlp->active_rrqs_xri_bitmap)) {
+ rrq->send_rrq = 0;
+ rrq->xritag = 0;
+ rrq->rrq_stop_time = 0;
+ }
+out:
+ mempool_free(rrq, phba->rrq_pool);
+}
+
+/**
+ * lpfc_handle_rrq_active - Checks if RRQ has waithed RATOV.
+ * @phba: Pointer to HBA context object.
+ *
+ * This function is called with hbalock held. This function
+ * Checks if stop_time (ratov from setting rrq active) has
+ * been reached, if it has and the send_rrq flag is set then
+ * it will call lpfc_send_rrq. If the send_rrq flag is not set
+ * then it will just call the routine to clear the rrq and
+ * free the rrq resource.
+ * The timer is set to the next rrq that is going to expire before
+ * leaving the routine.
+ *
+ **/
+void
+lpfc_handle_rrq_active(struct lpfc_hba *phba)
+{
+ struct lpfc_node_rrq *rrq;
+ struct lpfc_node_rrq *nextrrq;
+ unsigned long next_time;
+ unsigned long iflags;
+ LIST_HEAD(send_rrq);
+
+ spin_lock_irqsave(&phba->hbalock, iflags);
+ phba->hba_flag &= ~HBA_RRQ_ACTIVE;
+ next_time = jiffies + msecs_to_jiffies(1000 * (phba->fc_ratov + 1));
+ list_for_each_entry_safe(rrq, nextrrq,
+ &phba->active_rrq_list, list) {
+ if (time_after(jiffies, rrq->rrq_stop_time))
+ list_move(&rrq->list, &send_rrq);
+ else if (time_before(rrq->rrq_stop_time, next_time))
+ next_time = rrq->rrq_stop_time;
+ }
+ spin_unlock_irqrestore(&phba->hbalock, iflags);
+ if ((!list_empty(&phba->active_rrq_list)) &&
+ (!(phba->pport->load_flag & FC_UNLOADING)))
+ mod_timer(&phba->rrq_tmr, next_time);
+ list_for_each_entry_safe(rrq, nextrrq, &send_rrq, list) {
+ list_del(&rrq->list);
+ if (!rrq->send_rrq)
+ /* this call will free the rrq */
+ lpfc_clr_rrq_active(phba, rrq->xritag, rrq);
+ else if (lpfc_send_rrq(phba, rrq)) {
+ /* if we send the rrq then the completion handler
+ * will clear the bit in the xribitmap.
+ */
+ lpfc_clr_rrq_active(phba, rrq->xritag,
+ rrq);
+ }
+ }
+}
+
+/**
+ * lpfc_get_active_rrq - Get the active RRQ for this exchange.
+ * @vport: Pointer to vport context object.
+ * @xri: The xri used in the exchange.
+ * @did: The targets DID for this exchange.
+ *
+ * returns NULL = rrq not found in the phba->active_rrq_list.
+ * rrq = rrq for this xri and target.
+ **/
+struct lpfc_node_rrq *
+lpfc_get_active_rrq(struct lpfc_vport *vport, uint16_t xri, uint32_t did)
+{
+ struct lpfc_hba *phba = vport->phba;
+ struct lpfc_node_rrq *rrq;
+ struct lpfc_node_rrq *nextrrq;
+ unsigned long iflags;
+
+ if (phba->sli_rev != LPFC_SLI_REV4)
+ return NULL;
+ spin_lock_irqsave(&phba->hbalock, iflags);
+ list_for_each_entry_safe(rrq, nextrrq, &phba->active_rrq_list, list) {
+ if (rrq->vport == vport && rrq->xritag == xri &&
+ rrq->nlp_DID == did){
+ list_del(&rrq->list);
+ spin_unlock_irqrestore(&phba->hbalock, iflags);
+ return rrq;
+ }
+ }
+ spin_unlock_irqrestore(&phba->hbalock, iflags);
+ return NULL;
+}
+
+/**
+ * lpfc_cleanup_vports_rrqs - Remove and clear the active RRQ for this vport.
+ * @vport: Pointer to vport context object.
+ * @ndlp: Pointer to the lpfc_node_list structure.
+ * If ndlp is NULL Remove all active RRQs for this vport from the
+ * phba->active_rrq_list and clear the rrq.
+ * If ndlp is not NULL then only remove rrqs for this vport & this ndlp.
+ **/
+void
+lpfc_cleanup_vports_rrqs(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
+
+{
+ struct lpfc_hba *phba = vport->phba;
+ struct lpfc_node_rrq *rrq;
+ struct lpfc_node_rrq *nextrrq;
+ unsigned long iflags;
+ LIST_HEAD(rrq_list);
+
+ if (phba->sli_rev != LPFC_SLI_REV4)
+ return;
+ if (!ndlp) {
+ lpfc_sli4_vport_delete_els_xri_aborted(vport);
+ lpfc_sli4_vport_delete_fcp_xri_aborted(vport);
+ }
+ spin_lock_irqsave(&phba->hbalock, iflags);
+ list_for_each_entry_safe(rrq, nextrrq, &phba->active_rrq_list, list)
+ if ((rrq->vport == vport) && (!ndlp || rrq->ndlp == ndlp))
+ list_move(&rrq->list, &rrq_list);
+ spin_unlock_irqrestore(&phba->hbalock, iflags);
+
+ list_for_each_entry_safe(rrq, nextrrq, &rrq_list, list) {
+ list_del(&rrq->list);
+ lpfc_clr_rrq_active(phba, rrq->xritag, rrq);
+ }
+}
+
+/**
+ * lpfc_test_rrq_active - Test RRQ bit in xri_bitmap.
+ * @phba: Pointer to HBA context object.
+ * @ndlp: Targets nodelist pointer for this exchange.
+ * @xritag the xri in the bitmap to test.
+ *
+ * This function is called with hbalock held. This function
+ * returns 0 = rrq not active for this xri
+ * 1 = rrq is valid for this xri.
+ **/
+int
+lpfc_test_rrq_active(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp,
+ uint16_t xritag)
+{
+ if (!ndlp)
+ return 0;
+ if (!ndlp->active_rrqs_xri_bitmap)
+ return 0;
+ if (test_bit(xritag, ndlp->active_rrqs_xri_bitmap))
+ return 1;
+ else
+ return 0;
+}
+
+/**
+ * lpfc_set_rrq_active - set RRQ active bit in xri_bitmap.
+ * @phba: Pointer to HBA context object.
+ * @ndlp: nodelist pointer for this target.
+ * @xritag: xri used in this exchange.
+ * @rxid: Remote Exchange ID.
+ * @send_rrq: Flag used to determine if we should send rrq els cmd.
+ *
+ * This function takes the hbalock.
+ * The active bit is always set in the active rrq xri_bitmap even
+ * if there is no slot avaiable for the other rrq information.
+ *
+ * returns 0 rrq actived for this xri
+ * < 0 No memory or invalid ndlp.
+ **/
+int
+lpfc_set_rrq_active(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp,
+ uint16_t xritag, uint16_t rxid, uint16_t send_rrq)
+{
+ unsigned long iflags;
+ struct lpfc_node_rrq *rrq;
+ int empty;
+
+ if (!ndlp)
+ return -EINVAL;
+
+ if (!phba->cfg_enable_rrq)
+ return -EINVAL;
+
+ spin_lock_irqsave(&phba->hbalock, iflags);
+ if (phba->pport->load_flag & FC_UNLOADING) {
+ phba->hba_flag &= ~HBA_RRQ_ACTIVE;
+ goto out;
+ }
+
+ /*
+ * set the active bit even if there is no mem available.
+ */
+ if (NLP_CHK_FREE_REQ(ndlp))
+ goto out;
+
+ if (ndlp->vport && (ndlp->vport->load_flag & FC_UNLOADING))
+ goto out;
+
+ if (!ndlp->active_rrqs_xri_bitmap)
+ goto out;
+
+ if (test_and_set_bit(xritag, ndlp->active_rrqs_xri_bitmap))
+ goto out;
+
+ spin_unlock_irqrestore(&phba->hbalock, iflags);
+ rrq = mempool_alloc(phba->rrq_pool, GFP_KERNEL);
+ if (!rrq) {
+ lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
+ "3155 Unable to allocate RRQ xri:0x%x rxid:0x%x"
+ " DID:0x%x Send:%d\n",
+ xritag, rxid, ndlp->nlp_DID, send_rrq);
+ return -EINVAL;
+ }
+ if (phba->cfg_enable_rrq == 1)
+ rrq->send_rrq = send_rrq;
+ else
+ rrq->send_rrq = 0;
+ rrq->xritag = xritag;
+ rrq->rrq_stop_time = jiffies +
+ msecs_to_jiffies(1000 * (phba->fc_ratov + 1));
+ rrq->ndlp = ndlp;
+ rrq->nlp_DID = ndlp->nlp_DID;
+ rrq->vport = ndlp->vport;
+ rrq->rxid = rxid;
+ spin_lock_irqsave(&phba->hbalock, iflags);
+ empty = list_empty(&phba->active_rrq_list);
+ list_add_tail(&rrq->list, &phba->active_rrq_list);
+ phba->hba_flag |= HBA_RRQ_ACTIVE;
+ if (empty)
+ lpfc_worker_wake_up(phba);
+ spin_unlock_irqrestore(&phba->hbalock, iflags);
+ return 0;
+out:
+ spin_unlock_irqrestore(&phba->hbalock, iflags);
+ lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
+ "2921 Can't set rrq active xri:0x%x rxid:0x%x"
+ " DID:0x%x Send:%d\n",
+ xritag, rxid, ndlp->nlp_DID, send_rrq);
+ return -EINVAL;
+}
+
+/**
+ * __lpfc_sli_get_sglq - Allocates an iocb object from sgl pool
+ * @phba: Pointer to HBA context object.
+ * @piocb: Pointer to the iocbq.
+ *
+ * This function is called with the ring lock held. This function
+ * gets a new driver sglq object from the sglq list. If the
+ * list is not empty then it is successful, it returns pointer to the newly
+ * allocated sglq object else it returns NULL.
+ **/
+static struct lpfc_sglq *
+__lpfc_sli_get_sglq(struct lpfc_hba *phba, struct lpfc_iocbq *piocbq)
+{
+ struct list_head *lpfc_sgl_list = &phba->sli4_hba.lpfc_sgl_list;
+ struct lpfc_sglq *sglq = NULL;
+ struct lpfc_sglq *start_sglq = NULL;
+ struct lpfc_scsi_buf *lpfc_cmd;
+ struct lpfc_nodelist *ndlp;
+ int found = 0;
+
+ if (piocbq->iocb_flag & LPFC_IO_FCP) {
+ lpfc_cmd = (struct lpfc_scsi_buf *) piocbq->context1;
+ ndlp = lpfc_cmd->rdata->pnode;
+ } else if ((piocbq->iocb.ulpCommand == CMD_GEN_REQUEST64_CR) &&
+ !(piocbq->iocb_flag & LPFC_IO_LIBDFC)) {
+ ndlp = piocbq->context_un.ndlp;
+ } else if (piocbq->iocb_flag & LPFC_IO_LIBDFC) {
+ if (piocbq->iocb_flag & LPFC_IO_LOOPBACK)
+ ndlp = NULL;
+ else
+ ndlp = piocbq->context_un.ndlp;
+ } else {
+ ndlp = piocbq->context1;
+ }
+
+ list_remove_head(lpfc_sgl_list, sglq, struct lpfc_sglq, list);
+ start_sglq = sglq;
+ while (!found) {
+ if (!sglq)
+ return NULL;
+ if (lpfc_test_rrq_active(phba, ndlp, sglq->sli4_lxritag)) {
+ /* This xri has an rrq outstanding for this DID.
+ * put it back in the list and get another xri.
+ */
+ list_add_tail(&sglq->list, lpfc_sgl_list);
+ sglq = NULL;
+ list_remove_head(lpfc_sgl_list, sglq,
+ struct lpfc_sglq, list);
+ if (sglq == start_sglq) {
+ sglq = NULL;
+ break;
+ } else
+ continue;
+ }
+ sglq->ndlp = ndlp;
+ found = 1;
+ phba->sli4_hba.lpfc_sglq_active_list[sglq->sli4_lxritag] = sglq;
+ sglq->state = SGL_ALLOCATED;
+ }
+ return sglq;
+}
+
+/**
+ * lpfc_sli_get_iocbq - Allocates an iocb object from iocb pool
+ * @phba: Pointer to HBA context object.
+ *
+ * This function is called with no lock held. This function
+ * allocates a new driver iocb object from the iocb pool. If the
+ * allocation is successful, it returns pointer to the newly
+ * allocated iocb object else it returns NULL.
+ **/
+struct lpfc_iocbq *
+lpfc_sli_get_iocbq(struct lpfc_hba *phba)
+{
+ struct lpfc_iocbq * iocbq = NULL;
+ unsigned long iflags;
+
+ spin_lock_irqsave(&phba->hbalock, iflags);
+ iocbq = __lpfc_sli_get_iocbq(phba);
+ spin_unlock_irqrestore(&phba->hbalock, iflags);
+ return iocbq;
+}
+
+/**
+ * __lpfc_sli_release_iocbq_s4 - Release iocb to the iocb pool
+ * @phba: Pointer to HBA context object.
+ * @iocbq: Pointer to driver iocb object.
+ *
+ * This function is called with hbalock held to release driver
+ * iocb object to the iocb pool. The iotag in the iocb object
+ * does not change for each use of the iocb object. This function
+ * clears all other fields of the iocb object when it is freed.
+ * The sqlq structure that holds the xritag and phys and virtual
+ * mappings for the scatter gather list is retrieved from the
+ * active array of sglq. The get of the sglq pointer also clears
+ * the entry in the array. If the status of the IO indiactes that
+ * this IO was aborted then the sglq entry it put on the
+ * lpfc_abts_els_sgl_list until the CQ_ABORTED_XRI is received. If the
+ * IO has good status or fails for any other reason then the sglq
+ * entry is added to the free list (lpfc_sgl_list).
+ **/
+static void
+__lpfc_sli_release_iocbq_s4(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq)
+{
+ struct lpfc_sglq *sglq;
+ size_t start_clean = offsetof(struct lpfc_iocbq, iocb);
+ unsigned long iflag = 0;
+ struct lpfc_sli_ring *pring = &phba->sli.ring[LPFC_ELS_RING];
+
+ if (iocbq->sli4_xritag == NO_XRI)
+ sglq = NULL;
+ else
+ sglq = __lpfc_clear_active_sglq(phba, iocbq->sli4_lxritag);
+
+
+ if (sglq) {
+ if ((iocbq->iocb_flag & LPFC_EXCHANGE_BUSY) &&
+ (sglq->state != SGL_XRI_ABORTED)) {
+ spin_lock_irqsave(&phba->sli4_hba.abts_sgl_list_lock,
+ iflag);
+ list_add(&sglq->list,
+ &phba->sli4_hba.lpfc_abts_els_sgl_list);
+ spin_unlock_irqrestore(
+ &phba->sli4_hba.abts_sgl_list_lock, iflag);
+ } else {
+ spin_lock_irqsave(&pring->ring_lock, iflag);
+ sglq->state = SGL_FREED;
+ sglq->ndlp = NULL;
+ list_add_tail(&sglq->list,
+ &phba->sli4_hba.lpfc_sgl_list);
+ spin_unlock_irqrestore(&pring->ring_lock, iflag);
+
+ /* Check if TXQ queue needs to be serviced */
+ if (!list_empty(&pring->txq))
+ lpfc_worker_wake_up(phba);
+ }
+ }
+
+
+ /*
+ * Clean all volatile data fields, preserve iotag and node struct.
+ */
+ memset((char *)iocbq + start_clean, 0, sizeof(*iocbq) - start_clean);
+ iocbq->sli4_lxritag = NO_XRI;
+ iocbq->sli4_xritag = NO_XRI;
+ list_add_tail(&iocbq->list, &phba->lpfc_iocb_list);
+}
+
+
+/**
+ * __lpfc_sli_release_iocbq_s3 - Release iocb to the iocb pool
+ * @phba: Pointer to HBA context object.
+ * @iocbq: Pointer to driver iocb object.
+ *
+ * This function is called with hbalock held to release driver
+ * iocb object to the iocb pool. The iotag in the iocb object
+ * does not change for each use of the iocb object. This function
+ * clears all other fields of the iocb object when it is freed.
+ **/
+static void
+__lpfc_sli_release_iocbq_s3(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq)
+{
+ size_t start_clean = offsetof(struct lpfc_iocbq, iocb);
+
+
+ /*
+ * Clean all volatile data fields, preserve iotag and node struct.
+ */
+ memset((char*)iocbq + start_clean, 0, sizeof(*iocbq) - start_clean);
+ iocbq->sli4_xritag = NO_XRI;
+ list_add_tail(&iocbq->list, &phba->lpfc_iocb_list);
+}
+
+/**
+ * __lpfc_sli_release_iocbq - Release iocb to the iocb pool
+ * @phba: Pointer to HBA context object.
+ * @iocbq: Pointer to driver iocb object.
+ *
+ * This function is called with hbalock held to release driver
+ * iocb object to the iocb pool. The iotag in the iocb object
+ * does not change for each use of the iocb object. This function
+ * clears all other fields of the iocb object when it is freed.
+ **/
+static void
+__lpfc_sli_release_iocbq(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq)
+{
+ phba->__lpfc_sli_release_iocbq(phba, iocbq);
+ phba->iocb_cnt--;
+}
+
+/**
+ * lpfc_sli_release_iocbq - Release iocb to the iocb pool
+ * @phba: Pointer to HBA context object.
+ * @iocbq: Pointer to driver iocb object.
+ *
+ * This function is called with no lock held to release the iocb to
+ * iocb pool.
+ **/
+void
+lpfc_sli_release_iocbq(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq)
+{
+ unsigned long iflags;
+
+ /*
+ * Clean all volatile data fields, preserve iotag and node struct.
+ */
+ spin_lock_irqsave(&phba->hbalock, iflags);
+ __lpfc_sli_release_iocbq(phba, iocbq);
+ spin_unlock_irqrestore(&phba->hbalock, iflags);
+}
+
+/**
+ * lpfc_sli_cancel_iocbs - Cancel all iocbs from a list.
+ * @phba: Pointer to HBA context object.
+ * @iocblist: List of IOCBs.
+ * @ulpstatus: ULP status in IOCB command field.
+ * @ulpWord4: ULP word-4 in IOCB command field.
+ *
+ * This function is called with a list of IOCBs to cancel. It cancels the IOCB
+ * on the list by invoking the complete callback function associated with the
+ * IOCB with the provided @ulpstatus and @ulpword4 set to the IOCB commond
+ * fields.
+ **/
+void
+lpfc_sli_cancel_iocbs(struct lpfc_hba *phba, struct list_head *iocblist,
+ uint32_t ulpstatus, uint32_t ulpWord4)
+{
+ struct lpfc_iocbq *piocb;
+
+ while (!list_empty(iocblist)) {
+ list_remove_head(iocblist, piocb, struct lpfc_iocbq, list);
+ if (!piocb->iocb_cmpl)
+ lpfc_sli_release_iocbq(phba, piocb);
+ else {
+ piocb->iocb.ulpStatus = ulpstatus;
+ piocb->iocb.un.ulpWord[4] = ulpWord4;
+ (piocb->iocb_cmpl) (phba, piocb, piocb);
+ }
+ }
+ return;
+}
+
+/**
+ * lpfc_sli_iocb_cmd_type - Get the iocb type
+ * @iocb_cmnd: iocb command code.
+ *
+ * This function is called by ring event handler function to get the iocb type.
+ * This function translates the iocb command to an iocb command type used to
+ * decide the final disposition of each completed IOCB.
+ * The function returns
+ * LPFC_UNKNOWN_IOCB if it is an unsupported iocb
+ * LPFC_SOL_IOCB if it is a solicited iocb completion
+ * LPFC_ABORT_IOCB if it is an abort iocb
+ * LPFC_UNSOL_IOCB if it is an unsolicited iocb
+ *
+ * The caller is not required to hold any lock.
+ **/
+static lpfc_iocb_type
+lpfc_sli_iocb_cmd_type(uint8_t iocb_cmnd)
+{
+ lpfc_iocb_type type = LPFC_UNKNOWN_IOCB;
+
+ if (iocb_cmnd > CMD_MAX_IOCB_CMD)
+ return 0;
+
+ switch (iocb_cmnd) {
+ case CMD_XMIT_SEQUENCE_CR:
+ case CMD_XMIT_SEQUENCE_CX:
+ case CMD_XMIT_BCAST_CN:
+ case CMD_XMIT_BCAST_CX:
+ case CMD_ELS_REQUEST_CR:
+ case CMD_ELS_REQUEST_CX:
+ case CMD_CREATE_XRI_CR:
+ case CMD_CREATE_XRI_CX:
+ case CMD_GET_RPI_CN:
+ case CMD_XMIT_ELS_RSP_CX:
+ case CMD_GET_RPI_CR:
+ case CMD_FCP_IWRITE_CR:
+ case CMD_FCP_IWRITE_CX:
+ case CMD_FCP_IREAD_CR:
+ case CMD_FCP_IREAD_CX:
+ case CMD_FCP_ICMND_CR:
+ case CMD_FCP_ICMND_CX:
+ case CMD_FCP_TSEND_CX:
+ case CMD_FCP_TRSP_CX:
+ case CMD_FCP_TRECEIVE_CX:
+ case CMD_FCP_AUTO_TRSP_CX:
+ case CMD_ADAPTER_MSG:
+ case CMD_ADAPTER_DUMP:
+ case CMD_XMIT_SEQUENCE64_CR:
+ case CMD_XMIT_SEQUENCE64_CX:
+ case CMD_XMIT_BCAST64_CN:
+ case CMD_XMIT_BCAST64_CX:
+ case CMD_ELS_REQUEST64_CR:
+ case CMD_ELS_REQUEST64_CX:
+ case CMD_FCP_IWRITE64_CR:
+ case CMD_FCP_IWRITE64_CX:
+ case CMD_FCP_IREAD64_CR:
+ case CMD_FCP_IREAD64_CX:
+ case CMD_FCP_ICMND64_CR:
+ case CMD_FCP_ICMND64_CX:
+ case CMD_FCP_TSEND64_CX:
+ case CMD_FCP_TRSP64_CX:
+ case CMD_FCP_TRECEIVE64_CX:
+ case CMD_GEN_REQUEST64_CR:
+ case CMD_GEN_REQUEST64_CX:
+ case CMD_XMIT_ELS_RSP64_CX:
+ case DSSCMD_IWRITE64_CR:
+ case DSSCMD_IWRITE64_CX:
+ case DSSCMD_IREAD64_CR:
+ case DSSCMD_IREAD64_CX:
+ type = LPFC_SOL_IOCB;
+ break;
+ case CMD_ABORT_XRI_CN:
+ case CMD_ABORT_XRI_CX:
+ case CMD_CLOSE_XRI_CN:
+ case CMD_CLOSE_XRI_CX:
+ case CMD_XRI_ABORTED_CX:
+ case CMD_ABORT_MXRI64_CN:
+ case CMD_XMIT_BLS_RSP64_CX:
+ type = LPFC_ABORT_IOCB;
+ break;
+ case CMD_RCV_SEQUENCE_CX:
+ case CMD_RCV_ELS_REQ_CX:
+ case CMD_RCV_SEQUENCE64_CX:
+ case CMD_RCV_ELS_REQ64_CX:
+ case CMD_ASYNC_STATUS:
+ case CMD_IOCB_RCV_SEQ64_CX:
+ case CMD_IOCB_RCV_ELS64_CX:
+ case CMD_IOCB_RCV_CONT64_CX:
+ case CMD_IOCB_RET_XRI64_CX:
+ type = LPFC_UNSOL_IOCB;
+ break;
+ case CMD_IOCB_XMIT_MSEQ64_CR:
+ case CMD_IOCB_XMIT_MSEQ64_CX:
+ case CMD_IOCB_RCV_SEQ_LIST64_CX:
+ case CMD_IOCB_RCV_ELS_LIST64_CX:
+ case CMD_IOCB_CLOSE_EXTENDED_CN:
+ case CMD_IOCB_ABORT_EXTENDED_CN:
+ case CMD_IOCB_RET_HBQE64_CN:
+ case CMD_IOCB_FCP_IBIDIR64_CR:
+ case CMD_IOCB_FCP_IBIDIR64_CX:
+ case CMD_IOCB_FCP_ITASKMGT64_CX:
+ case CMD_IOCB_LOGENTRY_CN:
+ case CMD_IOCB_LOGENTRY_ASYNC_CN:
+ printk("%s - Unhandled SLI-3 Command x%x\n",
+ __func__, iocb_cmnd);
+ type = LPFC_UNKNOWN_IOCB;
+ break;
+ default:
+ type = LPFC_UNKNOWN_IOCB;
+ break;
+ }
+
+ return type;
+}
+
+/**
+ * lpfc_sli_ring_map - Issue config_ring mbox for all rings
+ * @phba: Pointer to HBA context object.
+ *
+ * This function is called from SLI initialization code
+ * to configure every ring of the HBA's SLI interface. The
+ * caller is not required to hold any lock. This function issues
+ * a config_ring mailbox command for each ring.
+ * This function returns zero if successful else returns a negative
+ * error code.
+ **/
+static int
+lpfc_sli_ring_map(struct lpfc_hba *phba)
+{
+ struct lpfc_sli *psli = &phba->sli;
+ LPFC_MBOXQ_t *pmb;
+ MAILBOX_t *pmbox;
+ int i, rc, ret = 0;
+
+ pmb = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
+ if (!pmb)
+ return -ENOMEM;
+ pmbox = &pmb->u.mb;
+ phba->link_state = LPFC_INIT_MBX_CMDS;
+ for (i = 0; i < psli->num_rings; i++) {
+ lpfc_config_ring(phba, i, pmb);
+ rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL);
+ if (rc != MBX_SUCCESS) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+ "0446 Adapter failed to init (%d), "
+ "mbxCmd x%x CFG_RING, mbxStatus x%x, "
+ "ring %d\n",
+ rc, pmbox->mbxCommand,
+ pmbox->mbxStatus, i);
+ phba->link_state = LPFC_HBA_ERROR;
+ ret = -ENXIO;
+ break;
+ }
+ }
+ mempool_free(pmb, phba->mbox_mem_pool);
+ return ret;
+}
+
+/**
+ * lpfc_sli_ringtxcmpl_put - Adds new iocb to the txcmplq
+ * @phba: Pointer to HBA context object.
+ * @pring: Pointer to driver SLI ring object.
+ * @piocb: Pointer to the driver iocb object.
+ *
+ * This function is called with hbalock held. The function adds the
+ * new iocb to txcmplq of the given ring. This function always returns
+ * 0. If this function is called for ELS ring, this function checks if
+ * there is a vport associated with the ELS command. This function also
+ * starts els_tmofunc timer if this is an ELS command.
+ **/
+static int
+lpfc_sli_ringtxcmpl_put(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
+ struct lpfc_iocbq *piocb)
+{
+ list_add_tail(&piocb->list, &pring->txcmplq);
+ piocb->iocb_flag |= LPFC_IO_ON_TXCMPLQ;
+
+ if ((unlikely(pring->ringno == LPFC_ELS_RING)) &&
+ (piocb->iocb.ulpCommand != CMD_ABORT_XRI_CN) &&
+ (piocb->iocb.ulpCommand != CMD_CLOSE_XRI_CN) &&
+ (!(piocb->vport->load_flag & FC_UNLOADING))) {
+ if (!piocb->vport)
+ BUG();
+ else
+ mod_timer(&piocb->vport->els_tmofunc,
+ jiffies +
+ msecs_to_jiffies(1000 * (phba->fc_ratov << 1)));
+ }
+
+
+ return 0;
+}
+
+/**
+ * lpfc_sli_ringtx_get - Get first element of the txq
+ * @phba: Pointer to HBA context object.
+ * @pring: Pointer to driver SLI ring object.
+ *
+ * This function is called with hbalock held to get next
+ * iocb in txq of the given ring. If there is any iocb in
+ * the txq, the function returns first iocb in the list after
+ * removing the iocb from the list, else it returns NULL.
+ **/
+struct lpfc_iocbq *
+lpfc_sli_ringtx_get(struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
+{
+ struct lpfc_iocbq *cmd_iocb;
+
+ list_remove_head((&pring->txq), cmd_iocb, struct lpfc_iocbq, list);
+ return cmd_iocb;
+}
+
+/**
+ * lpfc_sli_next_iocb_slot - Get next iocb slot in the ring
+ * @phba: Pointer to HBA context object.
+ * @pring: Pointer to driver SLI ring object.
+ *
+ * This function is called with hbalock held and the caller must post the
+ * iocb without releasing the lock. If the caller releases the lock,
+ * iocb slot returned by the function is not guaranteed to be available.
+ * The function returns pointer to the next available iocb slot if there
+ * is available slot in the ring, else it returns NULL.
+ * If the get index of the ring is ahead of the put index, the function
+ * will post an error attention event to the worker thread to take the
+ * HBA to offline state.
+ **/
+static IOCB_t *
+lpfc_sli_next_iocb_slot (struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
+{
+ struct lpfc_pgp *pgp = &phba->port_gp[pring->ringno];
+ uint32_t max_cmd_idx = pring->sli.sli3.numCiocb;
+ if ((pring->sli.sli3.next_cmdidx == pring->sli.sli3.cmdidx) &&
+ (++pring->sli.sli3.next_cmdidx >= max_cmd_idx))
+ pring->sli.sli3.next_cmdidx = 0;
+
+ if (unlikely(pring->sli.sli3.local_getidx ==
+ pring->sli.sli3.next_cmdidx)) {
+
+ pring->sli.sli3.local_getidx = le32_to_cpu(pgp->cmdGetInx);
+
+ if (unlikely(pring->sli.sli3.local_getidx >= max_cmd_idx)) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
+ "0315 Ring %d issue: portCmdGet %d "
+ "is bigger than cmd ring %d\n",
+ pring->ringno,
+ pring->sli.sli3.local_getidx,
+ max_cmd_idx);
+
+ phba->link_state = LPFC_HBA_ERROR;
+ /*
+ * All error attention handlers are posted to
+ * worker thread
+ */
+ phba->work_ha |= HA_ERATT;
+ phba->work_hs = HS_FFER3;
+
+ lpfc_worker_wake_up(phba);
+
+ return NULL;
+ }
+
+ if (pring->sli.sli3.local_getidx == pring->sli.sli3.next_cmdidx)
+ return NULL;
+ }
+
+ return lpfc_cmd_iocb(phba, pring);
+}
+
+/**
+ * lpfc_sli_next_iotag - Get an iotag for the iocb
+ * @phba: Pointer to HBA context object.
+ * @iocbq: Pointer to driver iocb object.
+ *
+ * This function gets an iotag for the iocb. If there is no unused iotag and
+ * the iocbq_lookup_len < 0xffff, this function allocates a bigger iotag_lookup
+ * array and assigns a new iotag.
+ * The function returns the allocated iotag if successful, else returns zero.
+ * Zero is not a valid iotag.
+ * The caller is not required to hold any lock.
+ **/
+uint16_t
+lpfc_sli_next_iotag(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq)
+{
+ struct lpfc_iocbq **new_arr;
+ struct lpfc_iocbq **old_arr;
+ size_t new_len;
+ struct lpfc_sli *psli = &phba->sli;
+ uint16_t iotag;
+
+ spin_lock_irq(&phba->hbalock);
+ iotag = psli->last_iotag;
+ if(++iotag < psli->iocbq_lookup_len) {
+ psli->last_iotag = iotag;
+ psli->iocbq_lookup[iotag] = iocbq;
+ spin_unlock_irq(&phba->hbalock);
+ iocbq->iotag = iotag;
+ return iotag;
+ } else if (psli->iocbq_lookup_len < (0xffff
+ - LPFC_IOCBQ_LOOKUP_INCREMENT)) {
+ new_len = psli->iocbq_lookup_len + LPFC_IOCBQ_LOOKUP_INCREMENT;
+ spin_unlock_irq(&phba->hbalock);
+ new_arr = kzalloc(new_len * sizeof (struct lpfc_iocbq *),
+ GFP_KERNEL);
+ if (new_arr) {
+ spin_lock_irq(&phba->hbalock);
+ old_arr = psli->iocbq_lookup;
+ if (new_len <= psli->iocbq_lookup_len) {
+ /* highly unprobable case */
+ kfree(new_arr);
+ iotag = psli->last_iotag;
+ if(++iotag < psli->iocbq_lookup_len) {
+ psli->last_iotag = iotag;
+ psli->iocbq_lookup[iotag] = iocbq;
+ spin_unlock_irq(&phba->hbalock);
+ iocbq->iotag = iotag;
+ return iotag;
+ }
+ spin_unlock_irq(&phba->hbalock);
+ return 0;
+ }
+ if (psli->iocbq_lookup)
+ memcpy(new_arr, old_arr,
+ ((psli->last_iotag + 1) *
+ sizeof (struct lpfc_iocbq *)));
+ psli->iocbq_lookup = new_arr;
+ psli->iocbq_lookup_len = new_len;
+ psli->last_iotag = iotag;
+ psli->iocbq_lookup[iotag] = iocbq;
+ spin_unlock_irq(&phba->hbalock);
+ iocbq->iotag = iotag;
+ kfree(old_arr);
+ return iotag;
+ }
+ } else
+ spin_unlock_irq(&phba->hbalock);
+
+ lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
+ "0318 Failed to allocate IOTAG.last IOTAG is %d\n",
+ psli->last_iotag);
+
+ return 0;
+}
+
+/**
+ * lpfc_sli_submit_iocb - Submit an iocb to the firmware
+ * @phba: Pointer to HBA context object.
+ * @pring: Pointer to driver SLI ring object.
+ * @iocb: Pointer to iocb slot in the ring.
+ * @nextiocb: Pointer to driver iocb object which need to be
+ * posted to firmware.
+ *
+ * This function is called with hbalock held to post a new iocb to
+ * the firmware. This function copies the new iocb to ring iocb slot and
+ * updates the ring pointers. It adds the new iocb to txcmplq if there is
+ * a completion call back for this iocb else the function will free the
+ * iocb object.
+ **/
+static void
+lpfc_sli_submit_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
+ IOCB_t *iocb, struct lpfc_iocbq *nextiocb)
+{
+ /*
+ * Set up an iotag
+ */
+ nextiocb->iocb.ulpIoTag = (nextiocb->iocb_cmpl) ? nextiocb->iotag : 0;
+
+
+ if (pring->ringno == LPFC_ELS_RING) {
+ lpfc_debugfs_slow_ring_trc(phba,
+ "IOCB cmd ring: wd4:x%08x wd6:x%08x wd7:x%08x",
+ *(((uint32_t *) &nextiocb->iocb) + 4),
+ *(((uint32_t *) &nextiocb->iocb) + 6),
+ *(((uint32_t *) &nextiocb->iocb) + 7));
+ }
+
+ /*
+ * Issue iocb command to adapter
+ */
+ lpfc_sli_pcimem_bcopy(&nextiocb->iocb, iocb, phba->iocb_cmd_size);
+ wmb();
+ pring->stats.iocb_cmd++;
+
+ /*
+ * If there is no completion routine to call, we can release the
+ * IOCB buffer back right now. For IOCBs, like QUE_RING_BUF,
+ * that have no rsp ring completion, iocb_cmpl MUST be NULL.
+ */
+ if (nextiocb->iocb_cmpl)
+ lpfc_sli_ringtxcmpl_put(phba, pring, nextiocb);
+ else
+ __lpfc_sli_release_iocbq(phba, nextiocb);
+
+ /*
+ * Let the HBA know what IOCB slot will be the next one the
+ * driver will put a command into.
+ */
+ pring->sli.sli3.cmdidx = pring->sli.sli3.next_cmdidx;
+ writel(pring->sli.sli3.cmdidx, &phba->host_gp[pring->ringno].cmdPutInx);
+}
+
+/**
+ * lpfc_sli_update_full_ring - Update the chip attention register
+ * @phba: Pointer to HBA context object.
+ * @pring: Pointer to driver SLI ring object.
+ *
+ * The caller is not required to hold any lock for calling this function.
+ * This function updates the chip attention bits for the ring to inform firmware
+ * that there are pending work to be done for this ring and requests an
+ * interrupt when there is space available in the ring. This function is
+ * called when the driver is unable to post more iocbs to the ring due
+ * to unavailability of space in the ring.
+ **/
+static void
+lpfc_sli_update_full_ring(struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
+{
+ int ringno = pring->ringno;
+
+ pring->flag |= LPFC_CALL_RING_AVAILABLE;
+
+ wmb();
+
+ /*
+ * Set ring 'ringno' to SET R0CE_REQ in Chip Att register.
+ * The HBA will tell us when an IOCB entry is available.
+ */
+ writel((CA_R0ATT|CA_R0CE_REQ) << (ringno*4), phba->CAregaddr);
+ readl(phba->CAregaddr); /* flush */
+
+ pring->stats.iocb_cmd_full++;
+}
+
+/**
+ * lpfc_sli_update_ring - Update chip attention register
+ * @phba: Pointer to HBA context object.
+ * @pring: Pointer to driver SLI ring object.
+ *
+ * This function updates the chip attention register bit for the
+ * given ring to inform HBA that there is more work to be done
+ * in this ring. The caller is not required to hold any lock.
+ **/
+static void
+lpfc_sli_update_ring(struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
+{
+ int ringno = pring->ringno;
+
+ /*
+ * Tell the HBA that there is work to do in this ring.
+ */
+ if (!(phba->sli3_options & LPFC_SLI3_CRP_ENABLED)) {
+ wmb();
+ writel(CA_R0ATT << (ringno * 4), phba->CAregaddr);
+ readl(phba->CAregaddr); /* flush */
+ }
+}
+
+/**
+ * lpfc_sli_resume_iocb - Process iocbs in the txq
+ * @phba: Pointer to HBA context object.
+ * @pring: Pointer to driver SLI ring object.
+ *
+ * This function is called with hbalock held to post pending iocbs
+ * in the txq to the firmware. This function is called when driver
+ * detects space available in the ring.
+ **/
+static void
+lpfc_sli_resume_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
+{
+ IOCB_t *iocb;
+ struct lpfc_iocbq *nextiocb;
+
+ /*
+ * Check to see if:
+ * (a) there is anything on the txq to send
+ * (b) link is up
+ * (c) link attention events can be processed (fcp ring only)
+ * (d) IOCB processing is not blocked by the outstanding mbox command.
+ */
+
+ if (lpfc_is_link_up(phba) &&
+ (!list_empty(&pring->txq)) &&
+ (pring->ringno != phba->sli.fcp_ring ||
+ phba->sli.sli_flag & LPFC_PROCESS_LA)) {
+
+ while ((iocb = lpfc_sli_next_iocb_slot(phba, pring)) &&
+ (nextiocb = lpfc_sli_ringtx_get(phba, pring)))
+ lpfc_sli_submit_iocb(phba, pring, iocb, nextiocb);
+
+ if (iocb)
+ lpfc_sli_update_ring(phba, pring);
+ else
+ lpfc_sli_update_full_ring(phba, pring);
+ }
+
+ return;
+}
+
+/**
+ * lpfc_sli_next_hbq_slot - Get next hbq entry for the HBQ
+ * @phba: Pointer to HBA context object.
+ * @hbqno: HBQ number.
+ *
+ * This function is called with hbalock held to get the next
+ * available slot for the given HBQ. If there is free slot
+ * available for the HBQ it will return pointer to the next available
+ * HBQ entry else it will return NULL.
+ **/
+static struct lpfc_hbq_entry *
+lpfc_sli_next_hbq_slot(struct lpfc_hba *phba, uint32_t hbqno)
+{
+ struct hbq_s *hbqp = &phba->hbqs[hbqno];
+
+ if (hbqp->next_hbqPutIdx == hbqp->hbqPutIdx &&
+ ++hbqp->next_hbqPutIdx >= hbqp->entry_count)
+ hbqp->next_hbqPutIdx = 0;
+
+ if (unlikely(hbqp->local_hbqGetIdx == hbqp->next_hbqPutIdx)) {
+ uint32_t raw_index = phba->hbq_get[hbqno];
+ uint32_t getidx = le32_to_cpu(raw_index);
+
+ hbqp->local_hbqGetIdx = getidx;
+
+ if (unlikely(hbqp->local_hbqGetIdx >= hbqp->entry_count)) {
+ lpfc_printf_log(phba, KERN_ERR,
+ LOG_SLI | LOG_VPORT,
+ "1802 HBQ %d: local_hbqGetIdx "
+ "%u is > than hbqp->entry_count %u\n",
+ hbqno, hbqp->local_hbqGetIdx,
+ hbqp->entry_count);
+
+ phba->link_state = LPFC_HBA_ERROR;
+ return NULL;
+ }
+
+ if (hbqp->local_hbqGetIdx == hbqp->next_hbqPutIdx)
+ return NULL;
+ }
+
+ return (struct lpfc_hbq_entry *) phba->hbqs[hbqno].hbq_virt +
+ hbqp->hbqPutIdx;
+}
+
+/**
+ * lpfc_sli_hbqbuf_free_all - Free all the hbq buffers
+ * @phba: Pointer to HBA context object.
+ *
+ * This function is called with no lock held to free all the
+ * hbq buffers while uninitializing the SLI interface. It also
+ * frees the HBQ buffers returned by the firmware but not yet
+ * processed by the upper layers.
+ **/
+void
+lpfc_sli_hbqbuf_free_all(struct lpfc_hba *phba)
+{
+ struct lpfc_dmabuf *dmabuf, *next_dmabuf;
+ struct hbq_dmabuf *hbq_buf;
+ unsigned long flags;
+ int i, hbq_count;
+ uint32_t hbqno;
+
+ hbq_count = lpfc_sli_hbq_count();
+ /* Return all memory used by all HBQs */
+ spin_lock_irqsave(&phba->hbalock, flags);
+ for (i = 0; i < hbq_count; ++i) {
+ list_for_each_entry_safe(dmabuf, next_dmabuf,
+ &phba->hbqs[i].hbq_buffer_list, list) {
+ hbq_buf = container_of(dmabuf, struct hbq_dmabuf, dbuf);
+ list_del(&hbq_buf->dbuf.list);
+ (phba->hbqs[i].hbq_free_buffer)(phba, hbq_buf);
+ }
+ phba->hbqs[i].buffer_count = 0;
+ }
+ /* Return all HBQ buffer that are in-fly */
+ list_for_each_entry_safe(dmabuf, next_dmabuf, &phba->rb_pend_list,
+ list) {
+ hbq_buf = container_of(dmabuf, struct hbq_dmabuf, dbuf);
+ list_del(&hbq_buf->dbuf.list);
+ if (hbq_buf->tag == -1) {
+ (phba->hbqs[LPFC_ELS_HBQ].hbq_free_buffer)
+ (phba, hbq_buf);
+ } else {
+ hbqno = hbq_buf->tag >> 16;
+ if (hbqno >= LPFC_MAX_HBQS)
+ (phba->hbqs[LPFC_ELS_HBQ].hbq_free_buffer)
+ (phba, hbq_buf);
+ else
+ (phba->hbqs[hbqno].hbq_free_buffer)(phba,
+ hbq_buf);
+ }
+ }
+
+ /* Mark the HBQs not in use */
+ phba->hbq_in_use = 0;
+ spin_unlock_irqrestore(&phba->hbalock, flags);
+}
+
+/**
+ * lpfc_sli_hbq_to_firmware - Post the hbq buffer to firmware
+ * @phba: Pointer to HBA context object.
+ * @hbqno: HBQ number.
+ * @hbq_buf: Pointer to HBQ buffer.
+ *
+ * This function is called with the hbalock held to post a
+ * hbq buffer to the firmware. If the function finds an empty
+ * slot in the HBQ, it will post the buffer. The function will return
+ * pointer to the hbq entry if it successfully post the buffer
+ * else it will return NULL.
+ **/
+static int
+lpfc_sli_hbq_to_firmware(struct lpfc_hba *phba, uint32_t hbqno,
+ struct hbq_dmabuf *hbq_buf)
+{
+ return phba->lpfc_sli_hbq_to_firmware(phba, hbqno, hbq_buf);
+}
+
+/**
+ * lpfc_sli_hbq_to_firmware_s3 - Post the hbq buffer to SLI3 firmware
+ * @phba: Pointer to HBA context object.
+ * @hbqno: HBQ number.
+ * @hbq_buf: Pointer to HBQ buffer.
+ *
+ * This function is called with the hbalock held to post a hbq buffer to the
+ * firmware. If the function finds an empty slot in the HBQ, it will post the
+ * buffer and place it on the hbq_buffer_list. The function will return zero if
+ * it successfully post the buffer else it will return an error.
+ **/
+static int
+lpfc_sli_hbq_to_firmware_s3(struct lpfc_hba *phba, uint32_t hbqno,
+ struct hbq_dmabuf *hbq_buf)
+{
+ struct lpfc_hbq_entry *hbqe;
+ dma_addr_t physaddr = hbq_buf->dbuf.phys;
+
+ /* Get next HBQ entry slot to use */
+ hbqe = lpfc_sli_next_hbq_slot(phba, hbqno);
+ if (hbqe) {
+ struct hbq_s *hbqp = &phba->hbqs[hbqno];
+
+ hbqe->bde.addrHigh = le32_to_cpu(putPaddrHigh(physaddr));
+ hbqe->bde.addrLow = le32_to_cpu(putPaddrLow(physaddr));
+ hbqe->bde.tus.f.bdeSize = hbq_buf->size;
+ hbqe->bde.tus.f.bdeFlags = 0;
+ hbqe->bde.tus.w = le32_to_cpu(hbqe->bde.tus.w);
+ hbqe->buffer_tag = le32_to_cpu(hbq_buf->tag);
+ /* Sync SLIM */
+ hbqp->hbqPutIdx = hbqp->next_hbqPutIdx;
+ writel(hbqp->hbqPutIdx, phba->hbq_put + hbqno);
+ /* flush */
+ readl(phba->hbq_put + hbqno);
+ list_add_tail(&hbq_buf->dbuf.list, &hbqp->hbq_buffer_list);
+ return 0;
+ } else
+ return -ENOMEM;
+}
+
+/**
+ * lpfc_sli_hbq_to_firmware_s4 - Post the hbq buffer to SLI4 firmware
+ * @phba: Pointer to HBA context object.
+ * @hbqno: HBQ number.
+ * @hbq_buf: Pointer to HBQ buffer.
+ *
+ * This function is called with the hbalock held to post an RQE to the SLI4
+ * firmware. If able to post the RQE to the RQ it will queue the hbq entry to
+ * the hbq_buffer_list and return zero, otherwise it will return an error.
+ **/
+static int
+lpfc_sli_hbq_to_firmware_s4(struct lpfc_hba *phba, uint32_t hbqno,
+ struct hbq_dmabuf *hbq_buf)
+{
+ int rc;
+ struct lpfc_rqe hrqe;
+ struct lpfc_rqe drqe;
+
+ hrqe.address_lo = putPaddrLow(hbq_buf->hbuf.phys);
+ hrqe.address_hi = putPaddrHigh(hbq_buf->hbuf.phys);
+ drqe.address_lo = putPaddrLow(hbq_buf->dbuf.phys);
+ drqe.address_hi = putPaddrHigh(hbq_buf->dbuf.phys);
+ rc = lpfc_sli4_rq_put(phba->sli4_hba.hdr_rq, phba->sli4_hba.dat_rq,
+ &hrqe, &drqe);
+ if (rc < 0)
+ return rc;
+ hbq_buf->tag = rc;
+ list_add_tail(&hbq_buf->dbuf.list, &phba->hbqs[hbqno].hbq_buffer_list);
+ return 0;
+}
+
+/* HBQ for ELS and CT traffic. */
+static struct lpfc_hbq_init lpfc_els_hbq = {
+ .rn = 1,
+ .entry_count = 256,
+ .mask_count = 0,
+ .profile = 0,
+ .ring_mask = (1 << LPFC_ELS_RING),
+ .buffer_count = 0,
+ .init_count = 40,
+ .add_count = 40,
+};
+
+/* HBQ for the extra ring if needed */
+static struct lpfc_hbq_init lpfc_extra_hbq = {
+ .rn = 1,
+ .entry_count = 200,
+ .mask_count = 0,
+ .profile = 0,
+ .ring_mask = (1 << LPFC_EXTRA_RING),
+ .buffer_count = 0,
+ .init_count = 0,
+ .add_count = 5,
+};
+
+/* Array of HBQs */
+struct lpfc_hbq_init *lpfc_hbq_defs[] = {
+ &lpfc_els_hbq,
+ &lpfc_extra_hbq,
+};
+
+/**
+ * lpfc_sli_hbqbuf_fill_hbqs - Post more hbq buffers to HBQ
+ * @phba: Pointer to HBA context object.
+ * @hbqno: HBQ number.
+ * @count: Number of HBQ buffers to be posted.
+ *
+ * This function is called with no lock held to post more hbq buffers to the
+ * given HBQ. The function returns the number of HBQ buffers successfully
+ * posted.
+ **/
+static int
+lpfc_sli_hbqbuf_fill_hbqs(struct lpfc_hba *phba, uint32_t hbqno, uint32_t count)
+{
+ uint32_t i, posted = 0;
+ unsigned long flags;
+ struct hbq_dmabuf *hbq_buffer;
+ LIST_HEAD(hbq_buf_list);
+ if (!phba->hbqs[hbqno].hbq_alloc_buffer)
+ return 0;
+
+ if ((phba->hbqs[hbqno].buffer_count + count) >
+ lpfc_hbq_defs[hbqno]->entry_count)
+ count = lpfc_hbq_defs[hbqno]->entry_count -
+ phba->hbqs[hbqno].buffer_count;
+ if (!count)
+ return 0;
+ /* Allocate HBQ entries */
+ for (i = 0; i < count; i++) {
+ hbq_buffer = (phba->hbqs[hbqno].hbq_alloc_buffer)(phba);
+ if (!hbq_buffer)
+ break;
+ list_add_tail(&hbq_buffer->dbuf.list, &hbq_buf_list);
+ }
+ /* Check whether HBQ is still in use */
+ spin_lock_irqsave(&phba->hbalock, flags);
+ if (!phba->hbq_in_use)
+ goto err;
+ while (!list_empty(&hbq_buf_list)) {
+ list_remove_head(&hbq_buf_list, hbq_buffer, struct hbq_dmabuf,
+ dbuf.list);
+ hbq_buffer->tag = (phba->hbqs[hbqno].buffer_count |
+ (hbqno << 16));
+ if (!lpfc_sli_hbq_to_firmware(phba, hbqno, hbq_buffer)) {
+ phba->hbqs[hbqno].buffer_count++;
+ posted++;
+ } else
+ (phba->hbqs[hbqno].hbq_free_buffer)(phba, hbq_buffer);
+ }
+ spin_unlock_irqrestore(&phba->hbalock, flags);
+ return posted;
+err:
+ spin_unlock_irqrestore(&phba->hbalock, flags);
+ while (!list_empty(&hbq_buf_list)) {
+ list_remove_head(&hbq_buf_list, hbq_buffer, struct hbq_dmabuf,
+ dbuf.list);
+ (phba->hbqs[hbqno].hbq_free_buffer)(phba, hbq_buffer);
+ }
+ return 0;
+}
+
+/**
+ * lpfc_sli_hbqbuf_add_hbqs - Post more HBQ buffers to firmware
+ * @phba: Pointer to HBA context object.
+ * @qno: HBQ number.
+ *
+ * This function posts more buffers to the HBQ. This function
+ * is called with no lock held. The function returns the number of HBQ entries
+ * successfully allocated.
+ **/
+int
+lpfc_sli_hbqbuf_add_hbqs(struct lpfc_hba *phba, uint32_t qno)
+{
+ if (phba->sli_rev == LPFC_SLI_REV4)
+ return 0;
+ else
+ return lpfc_sli_hbqbuf_fill_hbqs(phba, qno,
+ lpfc_hbq_defs[qno]->add_count);
+}
+
+/**
+ * lpfc_sli_hbqbuf_init_hbqs - Post initial buffers to the HBQ
+ * @phba: Pointer to HBA context object.
+ * @qno: HBQ queue number.
+ *
+ * This function is called from SLI initialization code path with
+ * no lock held to post initial HBQ buffers to firmware. The
+ * function returns the number of HBQ entries successfully allocated.
+ **/
+static int
+lpfc_sli_hbqbuf_init_hbqs(struct lpfc_hba *phba, uint32_t qno)
+{
+ if (phba->sli_rev == LPFC_SLI_REV4)
+ return lpfc_sli_hbqbuf_fill_hbqs(phba, qno,
+ lpfc_hbq_defs[qno]->entry_count);
+ else
+ return lpfc_sli_hbqbuf_fill_hbqs(phba, qno,
+ lpfc_hbq_defs[qno]->init_count);
+}
+
+/**
+ * lpfc_sli_hbqbuf_get - Remove the first hbq off of an hbq list
+ * @phba: Pointer to HBA context object.
+ * @hbqno: HBQ number.
+ *
+ * This function removes the first hbq buffer on an hbq list and returns a
+ * pointer to that buffer. If it finds no buffers on the list it returns NULL.
+ **/
+static struct hbq_dmabuf *
+lpfc_sli_hbqbuf_get(struct list_head *rb_list)
+{
+ struct lpfc_dmabuf *d_buf;
+
+ list_remove_head(rb_list, d_buf, struct lpfc_dmabuf, list);
+ if (!d_buf)
+ return NULL;
+ return container_of(d_buf, struct hbq_dmabuf, dbuf);
+}
+
+/**
+ * lpfc_sli_hbqbuf_find - Find the hbq buffer associated with a tag
+ * @phba: Pointer to HBA context object.
+ * @tag: Tag of the hbq buffer.
+ *
+ * This function is called with hbalock held. This function searches
+ * for the hbq buffer associated with the given tag in the hbq buffer
+ * list. If it finds the hbq buffer, it returns the hbq_buffer other wise
+ * it returns NULL.
+ **/
+static struct hbq_dmabuf *
+lpfc_sli_hbqbuf_find(struct lpfc_hba *phba, uint32_t tag)
+{
+ struct lpfc_dmabuf *d_buf;
+ struct hbq_dmabuf *hbq_buf;
+ uint32_t hbqno;
+
+ hbqno = tag >> 16;
+ if (hbqno >= LPFC_MAX_HBQS)
+ return NULL;
+
+ spin_lock_irq(&phba->hbalock);
+ list_for_each_entry(d_buf, &phba->hbqs[hbqno].hbq_buffer_list, list) {
+ hbq_buf = container_of(d_buf, struct hbq_dmabuf, dbuf);
+ if (hbq_buf->tag == tag) {
+ spin_unlock_irq(&phba->hbalock);
+ return hbq_buf;
+ }
+ }
+ spin_unlock_irq(&phba->hbalock);
+ lpfc_printf_log(phba, KERN_ERR, LOG_SLI | LOG_VPORT,
+ "1803 Bad hbq tag. Data: x%x x%x\n",
+ tag, phba->hbqs[tag >> 16].buffer_count);
+ return NULL;
+}
+
+/**
+ * lpfc_sli_free_hbq - Give back the hbq buffer to firmware
+ * @phba: Pointer to HBA context object.
+ * @hbq_buffer: Pointer to HBQ buffer.
+ *
+ * This function is called with hbalock. This function gives back
+ * the hbq buffer to firmware. If the HBQ does not have space to
+ * post the buffer, it will free the buffer.
+ **/
+void
+lpfc_sli_free_hbq(struct lpfc_hba *phba, struct hbq_dmabuf *hbq_buffer)
+{
+ uint32_t hbqno;
+
+ if (hbq_buffer) {
+ hbqno = hbq_buffer->tag >> 16;
+ if (lpfc_sli_hbq_to_firmware(phba, hbqno, hbq_buffer))
+ (phba->hbqs[hbqno].hbq_free_buffer)(phba, hbq_buffer);
+ }
+}
+
+/**
+ * lpfc_sli_chk_mbx_command - Check if the mailbox is a legitimate mailbox
+ * @mbxCommand: mailbox command code.
+ *
+ * This function is called by the mailbox event handler function to verify
+ * that the completed mailbox command is a legitimate mailbox command. If the
+ * completed mailbox is not known to the function, it will return MBX_SHUTDOWN
+ * and the mailbox event handler will take the HBA offline.
+ **/
+static int
+lpfc_sli_chk_mbx_command(uint8_t mbxCommand)
+{
+ uint8_t ret;
+
+ switch (mbxCommand) {
+ case MBX_LOAD_SM:
+ case MBX_READ_NV:
+ case MBX_WRITE_NV:
+ case MBX_WRITE_VPARMS:
+ case MBX_RUN_BIU_DIAG:
+ case MBX_INIT_LINK:
+ case MBX_DOWN_LINK:
+ case MBX_CONFIG_LINK:
+ case MBX_CONFIG_RING:
+ case MBX_RESET_RING:
+ case MBX_READ_CONFIG:
+ case MBX_READ_RCONFIG:
+ case MBX_READ_SPARM:
+ case MBX_READ_STATUS:
+ case MBX_READ_RPI:
+ case MBX_READ_XRI:
+ case MBX_READ_REV:
+ case MBX_READ_LNK_STAT:
+ case MBX_REG_LOGIN:
+ case MBX_UNREG_LOGIN:
+ case MBX_CLEAR_LA:
+ case MBX_DUMP_MEMORY:
+ case MBX_DUMP_CONTEXT:
+ case MBX_RUN_DIAGS:
+ case MBX_RESTART:
+ case MBX_UPDATE_CFG:
+ case MBX_DOWN_LOAD:
+ case MBX_DEL_LD_ENTRY:
+ case MBX_RUN_PROGRAM:
+ case MBX_SET_MASK:
+ case MBX_SET_VARIABLE:
+ case MBX_UNREG_D_ID:
+ case MBX_KILL_BOARD:
+ case MBX_CONFIG_FARP:
+ case MBX_BEACON:
+ case MBX_LOAD_AREA:
+ case MBX_RUN_BIU_DIAG64:
+ case MBX_CONFIG_PORT:
+ case MBX_READ_SPARM64:
+ case MBX_READ_RPI64:
+ case MBX_REG_LOGIN64:
+ case MBX_READ_TOPOLOGY:
+ case MBX_WRITE_WWN:
+ case MBX_SET_DEBUG:
+ case MBX_LOAD_EXP_ROM:
+ case MBX_ASYNCEVT_ENABLE:
+ case MBX_REG_VPI:
+ case MBX_UNREG_VPI:
+ case MBX_HEARTBEAT:
+ case MBX_PORT_CAPABILITIES:
+ case MBX_PORT_IOV_CONTROL:
+ case MBX_SLI4_CONFIG:
+ case MBX_SLI4_REQ_FTRS:
+ case MBX_REG_FCFI:
+ case MBX_UNREG_FCFI:
+ case MBX_REG_VFI:
+ case MBX_UNREG_VFI:
+ case MBX_INIT_VPI:
+ case MBX_INIT_VFI:
+ case MBX_RESUME_RPI:
+ case MBX_READ_EVENT_LOG_STATUS:
+ case MBX_READ_EVENT_LOG:
+ case MBX_SECURITY_MGMT:
+ case MBX_AUTH_PORT:
+ case MBX_ACCESS_VDATA:
+ ret = mbxCommand;
+ break;
+ default:
+ ret = MBX_SHUTDOWN;
+ break;
+ }
+ return ret;
+}
+
+/**
+ * lpfc_sli_wake_mbox_wait - lpfc_sli_issue_mbox_wait mbox completion handler
+ * @phba: Pointer to HBA context object.
+ * @pmboxq: Pointer to mailbox command.
+ *
+ * This is completion handler function for mailbox commands issued from
+ * lpfc_sli_issue_mbox_wait function. This function is called by the
+ * mailbox event handler function with no lock held. This function
+ * will wake up thread waiting on the wait queue pointed by context1
+ * of the mailbox.
+ **/
+void
+lpfc_sli_wake_mbox_wait(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmboxq)
+{
+ wait_queue_head_t *pdone_q;
+ unsigned long drvr_flag;
+
+ /*
+ * If pdone_q is empty, the driver thread gave up waiting and
+ * continued running.
+ */
+ pmboxq->mbox_flag |= LPFC_MBX_WAKE;
+ spin_lock_irqsave(&phba->hbalock, drvr_flag);
+ pdone_q = (wait_queue_head_t *) pmboxq->context1;
+ if (pdone_q)
+ wake_up_interruptible(pdone_q);
+ spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
+ return;
+}
+
+
+/**
+ * lpfc_sli_def_mbox_cmpl - Default mailbox completion handler
+ * @phba: Pointer to HBA context object.
+ * @pmb: Pointer to mailbox object.
+ *
+ * This function is the default mailbox completion handler. It
+ * frees the memory resources associated with the completed mailbox
+ * command. If the completed command is a REG_LOGIN mailbox command,
+ * this function will issue a UREG_LOGIN to re-claim the RPI.
+ **/
+void
+lpfc_sli_def_mbox_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
+{
+ struct lpfc_vport *vport = pmb->vport;
+ struct lpfc_dmabuf *mp;
+ struct lpfc_nodelist *ndlp;
+ struct Scsi_Host *shost;
+ uint16_t rpi, vpi;
+ int rc;
+
+ mp = (struct lpfc_dmabuf *) (pmb->context1);
+
+ if (mp) {
+ lpfc_mbuf_free(phba, mp->virt, mp->phys);
+ kfree(mp);
+ }
+
+ /*
+ * If a REG_LOGIN succeeded after node is destroyed or node
+ * is in re-discovery driver need to cleanup the RPI.
+ */
+ if (!(phba->pport->load_flag & FC_UNLOADING) &&
+ pmb->u.mb.mbxCommand == MBX_REG_LOGIN64 &&
+ !pmb->u.mb.mbxStatus) {
+ rpi = pmb->u.mb.un.varWords[0];
+ vpi = pmb->u.mb.un.varRegLogin.vpi;
+ lpfc_unreg_login(phba, vpi, rpi, pmb);
+ pmb->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
+ rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT);
+ if (rc != MBX_NOT_FINISHED)
+ return;
+ }
+
+ if ((pmb->u.mb.mbxCommand == MBX_REG_VPI) &&
+ !(phba->pport->load_flag & FC_UNLOADING) &&
+ !pmb->u.mb.mbxStatus) {
+ shost = lpfc_shost_from_vport(vport);
+ spin_lock_irq(shost->host_lock);
+ vport->vpi_state |= LPFC_VPI_REGISTERED;
+ vport->fc_flag &= ~FC_VPORT_NEEDS_REG_VPI;
+ spin_unlock_irq(shost->host_lock);
+ }
+
+ if (pmb->u.mb.mbxCommand == MBX_REG_LOGIN64) {
+ ndlp = (struct lpfc_nodelist *)pmb->context2;
+ lpfc_nlp_put(ndlp);
+ pmb->context2 = NULL;
+ }
+
+ /* Check security permission status on INIT_LINK mailbox command */
+ if ((pmb->u.mb.mbxCommand == MBX_INIT_LINK) &&
+ (pmb->u.mb.mbxStatus == MBXERR_SEC_NO_PERMISSION))
+ lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
+ "2860 SLI authentication is required "
+ "for INIT_LINK but has not done yet\n");
+
+ if (bf_get(lpfc_mqe_command, &pmb->u.mqe) == MBX_SLI4_CONFIG)
+ lpfc_sli4_mbox_cmd_free(phba, pmb);
+ else
+ mempool_free(pmb, phba->mbox_mem_pool);
+}
+ /**
+ * lpfc_sli4_unreg_rpi_cmpl_clr - mailbox completion handler
+ * @phba: Pointer to HBA context object.
+ * @pmb: Pointer to mailbox object.
+ *
+ * This function is the unreg rpi mailbox completion handler. It
+ * frees the memory resources associated with the completed mailbox
+ * command. An additional refrenece is put on the ndlp to prevent
+ * lpfc_nlp_release from freeing the rpi bit in the bitmask before
+ * the unreg mailbox command completes, this routine puts the
+ * reference back.
+ *
+ **/
+void
+lpfc_sli4_unreg_rpi_cmpl_clr(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
+{
+ struct lpfc_vport *vport = pmb->vport;
+ struct lpfc_nodelist *ndlp;
+
+ ndlp = pmb->context1;
+ if (pmb->u.mb.mbxCommand == MBX_UNREG_LOGIN) {
+ if (phba->sli_rev == LPFC_SLI_REV4 &&
+ (bf_get(lpfc_sli_intf_if_type,
+ &phba->sli4_hba.sli_intf) ==
+ LPFC_SLI_INTF_IF_TYPE_2)) {
+ if (ndlp) {
+ lpfc_printf_vlog(vport, KERN_INFO, LOG_SLI,
+ "0010 UNREG_LOGIN vpi:%x "
+ "rpi:%x DID:%x map:%x %p\n",
+ vport->vpi, ndlp->nlp_rpi,
+ ndlp->nlp_DID,
+ ndlp->nlp_usg_map, ndlp);
+
+ lpfc_nlp_put(ndlp);
+ }
+ }
+ }
+
+ mempool_free(pmb, phba->mbox_mem_pool);
+}
+
+/**
+ * lpfc_sli_handle_mb_event - Handle mailbox completions from firmware
+ * @phba: Pointer to HBA context object.
+ *
+ * This function is called with no lock held. This function processes all
+ * the completed mailbox commands and gives it to upper layers. The interrupt
+ * service routine processes mailbox completion interrupt and adds completed
+ * mailbox commands to the mboxq_cmpl queue and signals the worker thread.
+ * Worker thread call lpfc_sli_handle_mb_event, which will return the
+ * completed mailbox commands in mboxq_cmpl queue to the upper layers. This
+ * function returns the mailbox commands to the upper layer by calling the
+ * completion handler function of each mailbox.
+ **/
+int
+lpfc_sli_handle_mb_event(struct lpfc_hba *phba)
+{
+ MAILBOX_t *pmbox;
+ LPFC_MBOXQ_t *pmb;
+ int rc;
+ LIST_HEAD(cmplq);
+
+ phba->sli.slistat.mbox_event++;
+
+ /* Get all completed mailboxe buffers into the cmplq */
+ spin_lock_irq(&phba->hbalock);
+ list_splice_init(&phba->sli.mboxq_cmpl, &cmplq);
+ spin_unlock_irq(&phba->hbalock);
+
+ /* Get a Mailbox buffer to setup mailbox commands for callback */
+ do {
+ list_remove_head(&cmplq, pmb, LPFC_MBOXQ_t, list);
+ if (pmb == NULL)
+ break;
+
+ pmbox = &pmb->u.mb;
+
+ if (pmbox->mbxCommand != MBX_HEARTBEAT) {
+ if (pmb->vport) {
+ lpfc_debugfs_disc_trc(pmb->vport,
+ LPFC_DISC_TRC_MBOX_VPORT,
+ "MBOX cmpl vport: cmd:x%x mb:x%x x%x",
+ (uint32_t)pmbox->mbxCommand,
+ pmbox->un.varWords[0],
+ pmbox->un.varWords[1]);
+ }
+ else {
+ lpfc_debugfs_disc_trc(phba->pport,
+ LPFC_DISC_TRC_MBOX,
+ "MBOX cmpl: cmd:x%x mb:x%x x%x",
+ (uint32_t)pmbox->mbxCommand,
+ pmbox->un.varWords[0],
+ pmbox->un.varWords[1]);
+ }
+ }
+
+ /*
+ * It is a fatal error if unknown mbox command completion.
+ */
+ if (lpfc_sli_chk_mbx_command(pmbox->mbxCommand) ==
+ MBX_SHUTDOWN) {
+ /* Unknown mailbox command compl */
+ lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
+ "(%d):0323 Unknown Mailbox command "
+ "x%x (x%x/x%x) Cmpl\n",
+ pmb->vport ? pmb->vport->vpi : 0,
+ pmbox->mbxCommand,
+ lpfc_sli_config_mbox_subsys_get(phba,
+ pmb),
+ lpfc_sli_config_mbox_opcode_get(phba,
+ pmb));
+ phba->link_state = LPFC_HBA_ERROR;
+ phba->work_hs = HS_FFER3;
+ lpfc_handle_eratt(phba);
+ continue;
+ }
+
+ if (pmbox->mbxStatus) {
+ phba->sli.slistat.mbox_stat_err++;
+ if (pmbox->mbxStatus == MBXERR_NO_RESOURCES) {
+ /* Mbox cmd cmpl error - RETRYing */
+ lpfc_printf_log(phba, KERN_INFO,
+ LOG_MBOX | LOG_SLI,
+ "(%d):0305 Mbox cmd cmpl "
+ "error - RETRYing Data: x%x "
+ "(x%x/x%x) x%x x%x x%x\n",
+ pmb->vport ? pmb->vport->vpi : 0,
+ pmbox->mbxCommand,
+ lpfc_sli_config_mbox_subsys_get(phba,
+ pmb),
+ lpfc_sli_config_mbox_opcode_get(phba,
+ pmb),
+ pmbox->mbxStatus,
+ pmbox->un.varWords[0],
+ pmb->vport->port_state);
+ pmbox->mbxStatus = 0;
+ pmbox->mbxOwner = OWN_HOST;
+ rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT);
+ if (rc != MBX_NOT_FINISHED)
+ continue;
+ }
+ }
+
+ /* Mailbox cmd <cmd> Cmpl <cmpl> */
+ lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI,
+ "(%d):0307 Mailbox cmd x%x (x%x/x%x) Cmpl x%p "
+ "Data: x%x x%x x%x x%x x%x x%x x%x x%x x%x "
+ "x%x x%x x%x\n",
+ pmb->vport ? pmb->vport->vpi : 0,
+ pmbox->mbxCommand,
+ lpfc_sli_config_mbox_subsys_get(phba, pmb),
+ lpfc_sli_config_mbox_opcode_get(phba, pmb),
+ pmb->mbox_cmpl,
+ *((uint32_t *) pmbox),
+ pmbox->un.varWords[0],
+ pmbox->un.varWords[1],
+ pmbox->un.varWords[2],
+ pmbox->un.varWords[3],
+ pmbox->un.varWords[4],
+ pmbox->un.varWords[5],
+ pmbox->un.varWords[6],
+ pmbox->un.varWords[7],
+ pmbox->un.varWords[8],
+ pmbox->un.varWords[9],
+ pmbox->un.varWords[10]);
+
+ if (pmb->mbox_cmpl)
+ pmb->mbox_cmpl(phba,pmb);
+ } while (1);
+ return 0;
+}
+
+/**
+ * lpfc_sli_get_buff - Get the buffer associated with the buffer tag
+ * @phba: Pointer to HBA context object.
+ * @pring: Pointer to driver SLI ring object.
+ * @tag: buffer tag.
+ *
+ * This function is called with no lock held. When QUE_BUFTAG_BIT bit
+ * is set in the tag the buffer is posted for a particular exchange,
+ * the function will return the buffer without replacing the buffer.
+ * If the buffer is for unsolicited ELS or CT traffic, this function
+ * returns the buffer and also posts another buffer to the firmware.
+ **/
+static struct lpfc_dmabuf *
+lpfc_sli_get_buff(struct lpfc_hba *phba,
+ struct lpfc_sli_ring *pring,
+ uint32_t tag)
+{
+ struct hbq_dmabuf *hbq_entry;
+
+ if (tag & QUE_BUFTAG_BIT)
+ return lpfc_sli_ring_taggedbuf_get(phba, pring, tag);
+ hbq_entry = lpfc_sli_hbqbuf_find(phba, tag);
+ if (!hbq_entry)
+ return NULL;
+ return &hbq_entry->dbuf;
+}
+
+/**
+ * lpfc_complete_unsol_iocb - Complete an unsolicited sequence
+ * @phba: Pointer to HBA context object.
+ * @pring: Pointer to driver SLI ring object.
+ * @saveq: Pointer to the iocbq struct representing the sequence starting frame.
+ * @fch_r_ctl: the r_ctl for the first frame of the sequence.
+ * @fch_type: the type for the first frame of the sequence.
+ *
+ * This function is called with no lock held. This function uses the r_ctl and
+ * type of the received sequence to find the correct callback function to call
+ * to process the sequence.
+ **/
+static int
+lpfc_complete_unsol_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
+ struct lpfc_iocbq *saveq, uint32_t fch_r_ctl,
+ uint32_t fch_type)
+{
+ int i;
+
+ /* unSolicited Responses */
+ if (pring->prt[0].profile) {
+ if (pring->prt[0].lpfc_sli_rcv_unsol_event)
+ (pring->prt[0].lpfc_sli_rcv_unsol_event) (phba, pring,
+ saveq);
+ return 1;
+ }
+ /* We must search, based on rctl / type
+ for the right routine */
+ for (i = 0; i < pring->num_mask; i++) {
+ if ((pring->prt[i].rctl == fch_r_ctl) &&
+ (pring->prt[i].type == fch_type)) {
+ if (pring->prt[i].lpfc_sli_rcv_unsol_event)
+ (pring->prt[i].lpfc_sli_rcv_unsol_event)
+ (phba, pring, saveq);
+ return 1;
+ }
+ }
+ return 0;
+}
+
+/**
+ * lpfc_sli_process_unsol_iocb - Unsolicited iocb handler
+ * @phba: Pointer to HBA context object.
+ * @pring: Pointer to driver SLI ring object.
+ * @saveq: Pointer to the unsolicited iocb.
+ *
+ * This function is called with no lock held by the ring event handler
+ * when there is an unsolicited iocb posted to the response ring by the
+ * firmware. This function gets the buffer associated with the iocbs
+ * and calls the event handler for the ring. This function handles both
+ * qring buffers and hbq buffers.
+ * When the function returns 1 the caller can free the iocb object otherwise
+ * upper layer functions will free the iocb objects.
+ **/
+static int
+lpfc_sli_process_unsol_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
+ struct lpfc_iocbq *saveq)
+{
+ IOCB_t * irsp;
+ WORD5 * w5p;
+ uint32_t Rctl, Type;
+ struct lpfc_iocbq *iocbq;
+ struct lpfc_dmabuf *dmzbuf;
+
+ irsp = &(saveq->iocb);
+
+ if (irsp->ulpCommand == CMD_ASYNC_STATUS) {
+ if (pring->lpfc_sli_rcv_async_status)
+ pring->lpfc_sli_rcv_async_status(phba, pring, saveq);
+ else
+ lpfc_printf_log(phba,
+ KERN_WARNING,
+ LOG_SLI,
+ "0316 Ring %d handler: unexpected "
+ "ASYNC_STATUS iocb received evt_code "
+ "0x%x\n",
+ pring->ringno,
+ irsp->un.asyncstat.evt_code);
+ return 1;
+ }
+
+ if ((irsp->ulpCommand == CMD_IOCB_RET_XRI64_CX) &&
+ (phba->sli3_options & LPFC_SLI3_HBQ_ENABLED)) {
+ if (irsp->ulpBdeCount > 0) {
+ dmzbuf = lpfc_sli_get_buff(phba, pring,
+ irsp->un.ulpWord[3]);
+ lpfc_in_buf_free(phba, dmzbuf);
+ }
+
+ if (irsp->ulpBdeCount > 1) {
+ dmzbuf = lpfc_sli_get_buff(phba, pring,
+ irsp->unsli3.sli3Words[3]);
+ lpfc_in_buf_free(phba, dmzbuf);
+ }
+
+ if (irsp->ulpBdeCount > 2) {
+ dmzbuf = lpfc_sli_get_buff(phba, pring,
+ irsp->unsli3.sli3Words[7]);
+ lpfc_in_buf_free(phba, dmzbuf);
+ }
+
+ return 1;
+ }
+
+ if (phba->sli3_options & LPFC_SLI3_HBQ_ENABLED) {
+ if (irsp->ulpBdeCount != 0) {
+ saveq->context2 = lpfc_sli_get_buff(phba, pring,
+ irsp->un.ulpWord[3]);
+ if (!saveq->context2)
+ lpfc_printf_log(phba,
+ KERN_ERR,
+ LOG_SLI,
+ "0341 Ring %d Cannot find buffer for "
+ "an unsolicited iocb. tag 0x%x\n",
+ pring->ringno,
+ irsp->un.ulpWord[3]);
+ }
+ if (irsp->ulpBdeCount == 2) {
+ saveq->context3 = lpfc_sli_get_buff(phba, pring,
+ irsp->unsli3.sli3Words[7]);
+ if (!saveq->context3)
+ lpfc_printf_log(phba,
+ KERN_ERR,
+ LOG_SLI,
+ "0342 Ring %d Cannot find buffer for an"
+ " unsolicited iocb. tag 0x%x\n",
+ pring->ringno,
+ irsp->unsli3.sli3Words[7]);
+ }
+ list_for_each_entry(iocbq, &saveq->list, list) {
+ irsp = &(iocbq->iocb);
+ if (irsp->ulpBdeCount != 0) {
+ iocbq->context2 = lpfc_sli_get_buff(phba, pring,
+ irsp->un.ulpWord[3]);
+ if (!iocbq->context2)
+ lpfc_printf_log(phba,
+ KERN_ERR,
+ LOG_SLI,
+ "0343 Ring %d Cannot find "
+ "buffer for an unsolicited iocb"
+ ". tag 0x%x\n", pring->ringno,
+ irsp->un.ulpWord[3]);
+ }
+ if (irsp->ulpBdeCount == 2) {
+ iocbq->context3 = lpfc_sli_get_buff(phba, pring,
+ irsp->unsli3.sli3Words[7]);
+ if (!iocbq->context3)
+ lpfc_printf_log(phba,
+ KERN_ERR,
+ LOG_SLI,
+ "0344 Ring %d Cannot find "
+ "buffer for an unsolicited "
+ "iocb. tag 0x%x\n",
+ pring->ringno,
+ irsp->unsli3.sli3Words[7]);
+ }
+ }
+ }
+ if (irsp->ulpBdeCount != 0 &&
+ (irsp->ulpCommand == CMD_IOCB_RCV_CONT64_CX ||
+ irsp->ulpStatus == IOSTAT_INTERMED_RSP)) {
+ int found = 0;
+
+ /* search continue save q for same XRI */
+ list_for_each_entry(iocbq, &pring->iocb_continue_saveq, clist) {
+ if (iocbq->iocb.unsli3.rcvsli3.ox_id ==
+ saveq->iocb.unsli3.rcvsli3.ox_id) {
+ list_add_tail(&saveq->list, &iocbq->list);
+ found = 1;
+ break;
+ }
+ }
+ if (!found)
+ list_add_tail(&saveq->clist,
+ &pring->iocb_continue_saveq);
+ if (saveq->iocb.ulpStatus != IOSTAT_INTERMED_RSP) {
+ list_del_init(&iocbq->clist);
+ saveq = iocbq;
+ irsp = &(saveq->iocb);
+ } else
+ return 0;
+ }
+ if ((irsp->ulpCommand == CMD_RCV_ELS_REQ64_CX) ||
+ (irsp->ulpCommand == CMD_RCV_ELS_REQ_CX) ||
+ (irsp->ulpCommand == CMD_IOCB_RCV_ELS64_CX)) {
+ Rctl = FC_RCTL_ELS_REQ;
+ Type = FC_TYPE_ELS;
+ } else {
+ w5p = (WORD5 *)&(saveq->iocb.un.ulpWord[5]);
+ Rctl = w5p->hcsw.Rctl;
+ Type = w5p->hcsw.Type;
+
+ /* Firmware Workaround */
+ if ((Rctl == 0) && (pring->ringno == LPFC_ELS_RING) &&
+ (irsp->ulpCommand == CMD_RCV_SEQUENCE64_CX ||
+ irsp->ulpCommand == CMD_IOCB_RCV_SEQ64_CX)) {
+ Rctl = FC_RCTL_ELS_REQ;
+ Type = FC_TYPE_ELS;
+ w5p->hcsw.Rctl = Rctl;
+ w5p->hcsw.Type = Type;
+ }
+ }
+
+ if (!lpfc_complete_unsol_iocb(phba, pring, saveq, Rctl, Type))
+ lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
+ "0313 Ring %d handler: unexpected Rctl x%x "
+ "Type x%x received\n",
+ pring->ringno, Rctl, Type);
+
+ return 1;
+}
+
+/**
+ * lpfc_sli_iocbq_lookup - Find command iocb for the given response iocb
+ * @phba: Pointer to HBA context object.
+ * @pring: Pointer to driver SLI ring object.
+ * @prspiocb: Pointer to response iocb object.
+ *
+ * This function looks up the iocb_lookup table to get the command iocb
+ * corresponding to the given response iocb using the iotag of the
+ * response iocb. This function is called with the hbalock held.
+ * This function returns the command iocb object if it finds the command
+ * iocb else returns NULL.
+ **/
+static struct lpfc_iocbq *
+lpfc_sli_iocbq_lookup(struct lpfc_hba *phba,
+ struct lpfc_sli_ring *pring,
+ struct lpfc_iocbq *prspiocb)
+{
+ struct lpfc_iocbq *cmd_iocb = NULL;
+ uint16_t iotag;
+
+ iotag = prspiocb->iocb.ulpIoTag;
+
+ if (iotag != 0 && iotag <= phba->sli.last_iotag) {
+ cmd_iocb = phba->sli.iocbq_lookup[iotag];
+ list_del_init(&cmd_iocb->list);
+ if (cmd_iocb->iocb_flag & LPFC_IO_ON_TXCMPLQ) {
+ cmd_iocb->iocb_flag &= ~LPFC_IO_ON_TXCMPLQ;
+ }
+ return cmd_iocb;
+ }
+
+ lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
+ "0317 iotag x%x is out off "
+ "range: max iotag x%x wd0 x%x\n",
+ iotag, phba->sli.last_iotag,
+ *(((uint32_t *) &prspiocb->iocb) + 7));
+ return NULL;
+}
+
+/**
+ * lpfc_sli_iocbq_lookup_by_tag - Find command iocb for the iotag
+ * @phba: Pointer to HBA context object.
+ * @pring: Pointer to driver SLI ring object.
+ * @iotag: IOCB tag.
+ *
+ * This function looks up the iocb_lookup table to get the command iocb
+ * corresponding to the given iotag. This function is called with the
+ * hbalock held.
+ * This function returns the command iocb object if it finds the command
+ * iocb else returns NULL.
+ **/
+static struct lpfc_iocbq *
+lpfc_sli_iocbq_lookup_by_tag(struct lpfc_hba *phba,
+ struct lpfc_sli_ring *pring, uint16_t iotag)
+{
+ struct lpfc_iocbq *cmd_iocb;
+
+ if (iotag != 0 && iotag <= phba->sli.last_iotag) {
+ cmd_iocb = phba->sli.iocbq_lookup[iotag];
+ if (cmd_iocb->iocb_flag & LPFC_IO_ON_TXCMPLQ) {
+ /* remove from txcmpl queue list */
+ list_del_init(&cmd_iocb->list);
+ cmd_iocb->iocb_flag &= ~LPFC_IO_ON_TXCMPLQ;
+ return cmd_iocb;
+ }
+ }
+ lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
+ "0372 iotag x%x is out off range: max iotag (x%x)\n",
+ iotag, phba->sli.last_iotag);
+ return NULL;
+}
+
+/**
+ * lpfc_sli_process_sol_iocb - process solicited iocb completion
+ * @phba: Pointer to HBA context object.
+ * @pring: Pointer to driver SLI ring object.
+ * @saveq: Pointer to the response iocb to be processed.
+ *
+ * This function is called by the ring event handler for non-fcp
+ * rings when there is a new response iocb in the response ring.
+ * The caller is not required to hold any locks. This function
+ * gets the command iocb associated with the response iocb and
+ * calls the completion handler for the command iocb. If there
+ * is no completion handler, the function will free the resources
+ * associated with command iocb. If the response iocb is for
+ * an already aborted command iocb, the status of the completion
+ * is changed to IOSTAT_LOCAL_REJECT/IOERR_SLI_ABORTED.
+ * This function always returns 1.
+ **/
+static int
+lpfc_sli_process_sol_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
+ struct lpfc_iocbq *saveq)
+{
+ struct lpfc_iocbq *cmdiocbp;
+ int rc = 1;
+ unsigned long iflag;
+
+ /* Based on the iotag field, get the cmd IOCB from the txcmplq */
+ spin_lock_irqsave(&phba->hbalock, iflag);
+ cmdiocbp = lpfc_sli_iocbq_lookup(phba, pring, saveq);
+ spin_unlock_irqrestore(&phba->hbalock, iflag);
+
+ if (cmdiocbp) {
+ if (cmdiocbp->iocb_cmpl) {
+ /*
+ * If an ELS command failed send an event to mgmt
+ * application.
+ */
+ if (saveq->iocb.ulpStatus &&
+ (pring->ringno == LPFC_ELS_RING) &&
+ (cmdiocbp->iocb.ulpCommand ==
+ CMD_ELS_REQUEST64_CR))
+ lpfc_send_els_failure_event(phba,
+ cmdiocbp, saveq);
+
+ /*
+ * Post all ELS completions to the worker thread.
+ * All other are passed to the completion callback.
+ */
+ if (pring->ringno == LPFC_ELS_RING) {
+ if ((phba->sli_rev < LPFC_SLI_REV4) &&
+ (cmdiocbp->iocb_flag &
+ LPFC_DRIVER_ABORTED)) {
+ spin_lock_irqsave(&phba->hbalock,
+ iflag);
+ cmdiocbp->iocb_flag &=
+ ~LPFC_DRIVER_ABORTED;
+ spin_unlock_irqrestore(&phba->hbalock,
+ iflag);
+ saveq->iocb.ulpStatus =
+ IOSTAT_LOCAL_REJECT;
+ saveq->iocb.un.ulpWord[4] =
+ IOERR_SLI_ABORTED;
+
+ /* Firmware could still be in progress
+ * of DMAing payload, so don't free data
+ * buffer till after a hbeat.
+ */
+ spin_lock_irqsave(&phba->hbalock,
+ iflag);
+ saveq->iocb_flag |= LPFC_DELAY_MEM_FREE;
+ spin_unlock_irqrestore(&phba->hbalock,
+ iflag);
+ }
+ if (phba->sli_rev == LPFC_SLI_REV4) {
+ if (saveq->iocb_flag &
+ LPFC_EXCHANGE_BUSY) {
+ /* Set cmdiocb flag for the
+ * exchange busy so sgl (xri)
+ * will not be released until
+ * the abort xri is received
+ * from hba.
+ */
+ spin_lock_irqsave(
+ &phba->hbalock, iflag);
+ cmdiocbp->iocb_flag |=
+ LPFC_EXCHANGE_BUSY;
+ spin_unlock_irqrestore(
+ &phba->hbalock, iflag);
+ }
+ if (cmdiocbp->iocb_flag &
+ LPFC_DRIVER_ABORTED) {
+ /*
+ * Clear LPFC_DRIVER_ABORTED
+ * bit in case it was driver
+ * initiated abort.
+ */
+ spin_lock_irqsave(
+ &phba->hbalock, iflag);
+ cmdiocbp->iocb_flag &=
+ ~LPFC_DRIVER_ABORTED;
+ spin_unlock_irqrestore(
+ &phba->hbalock, iflag);
+ cmdiocbp->iocb.ulpStatus =
+ IOSTAT_LOCAL_REJECT;
+ cmdiocbp->iocb.un.ulpWord[4] =
+ IOERR_ABORT_REQUESTED;
+ /*
+ * For SLI4, irsiocb contains
+ * NO_XRI in sli_xritag, it
+ * shall not affect releasing
+ * sgl (xri) process.
+ */
+ saveq->iocb.ulpStatus =
+ IOSTAT_LOCAL_REJECT;
+ saveq->iocb.un.ulpWord[4] =
+ IOERR_SLI_ABORTED;
+ spin_lock_irqsave(
+ &phba->hbalock, iflag);
+ saveq->iocb_flag |=
+ LPFC_DELAY_MEM_FREE;
+ spin_unlock_irqrestore(
+ &phba->hbalock, iflag);
+ }
+ }
+ }
+ (cmdiocbp->iocb_cmpl) (phba, cmdiocbp, saveq);
+ } else
+ lpfc_sli_release_iocbq(phba, cmdiocbp);
+ } else {
+ /*
+ * Unknown initiating command based on the response iotag.
+ * This could be the case on the ELS ring because of
+ * lpfc_els_abort().
+ */
+ if (pring->ringno != LPFC_ELS_RING) {
+ /*
+ * Ring <ringno> handler: unexpected completion IoTag
+ * <IoTag>
+ */
+ lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
+ "0322 Ring %d handler: "
+ "unexpected completion IoTag x%x "
+ "Data: x%x x%x x%x x%x\n",
+ pring->ringno,
+ saveq->iocb.ulpIoTag,
+ saveq->iocb.ulpStatus,
+ saveq->iocb.un.ulpWord[4],
+ saveq->iocb.ulpCommand,
+ saveq->iocb.ulpContext);
+ }
+ }
+
+ return rc;
+}
+
+/**
+ * lpfc_sli_rsp_pointers_error - Response ring pointer error handler
+ * @phba: Pointer to HBA context object.
+ * @pring: Pointer to driver SLI ring object.
+ *
+ * This function is called from the iocb ring event handlers when
+ * put pointer is ahead of the get pointer for a ring. This function signal
+ * an error attention condition to the worker thread and the worker
+ * thread will transition the HBA to offline state.
+ **/
+static void
+lpfc_sli_rsp_pointers_error(struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
+{
+ struct lpfc_pgp *pgp = &phba->port_gp[pring->ringno];
+ /*
+ * Ring <ringno> handler: portRspPut <portRspPut> is bigger than
+ * rsp ring <portRspMax>
+ */
+ lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
+ "0312 Ring %d handler: portRspPut %d "
+ "is bigger than rsp ring %d\n",
+ pring->ringno, le32_to_cpu(pgp->rspPutInx),
+ pring->sli.sli3.numRiocb);
+
+ phba->link_state = LPFC_HBA_ERROR;
+
+ /*
+ * All error attention handlers are posted to
+ * worker thread
+ */
+ phba->work_ha |= HA_ERATT;
+ phba->work_hs = HS_FFER3;
+
+ lpfc_worker_wake_up(phba);
+
+ return;
+}
+
+/**
+ * lpfc_poll_eratt - Error attention polling timer timeout handler
+ * @ptr: Pointer to address of HBA context object.
+ *
+ * This function is invoked by the Error Attention polling timer when the
+ * timer times out. It will check the SLI Error Attention register for
+ * possible attention events. If so, it will post an Error Attention event
+ * and wake up worker thread to process it. Otherwise, it will set up the
+ * Error Attention polling timer for the next poll.
+ **/
+void lpfc_poll_eratt(unsigned long ptr)
+{
+ struct lpfc_hba *phba;
+ uint32_t eratt = 0;
+ uint64_t sli_intr, cnt;
+
+ phba = (struct lpfc_hba *)ptr;
+
+ /* Here we will also keep track of interrupts per sec of the hba */
+ sli_intr = phba->sli.slistat.sli_intr;
+
+ if (phba->sli.slistat.sli_prev_intr > sli_intr)
+ cnt = (((uint64_t)(-1) - phba->sli.slistat.sli_prev_intr) +
+ sli_intr);
+ else
+ cnt = (sli_intr - phba->sli.slistat.sli_prev_intr);
+
+ /* 64-bit integer division not supporte on 32-bit x86 - use do_div */
+ do_div(cnt, LPFC_ERATT_POLL_INTERVAL);
+ phba->sli.slistat.sli_ips = cnt;
+
+ phba->sli.slistat.sli_prev_intr = sli_intr;
+
+ /* Check chip HA register for error event */
+ eratt = lpfc_sli_check_eratt(phba);
+
+ if (eratt)
+ /* Tell the worker thread there is work to do */
+ lpfc_worker_wake_up(phba);
+ else
+ /* Restart the timer for next eratt poll */
+ mod_timer(&phba->eratt_poll,
+ jiffies +
+ msecs_to_jiffies(1000 * LPFC_ERATT_POLL_INTERVAL));
+ return;
+}
+
+
+/**
+ * lpfc_sli_handle_fast_ring_event - Handle ring events on FCP ring
+ * @phba: Pointer to HBA context object.
+ * @pring: Pointer to driver SLI ring object.
+ * @mask: Host attention register mask for this ring.
+ *
+ * This function is called from the interrupt context when there is a ring
+ * event for the fcp ring. The caller does not hold any lock.
+ * The function processes each response iocb in the response ring until it
+ * finds an iocb with LE bit set and chains all the iocbs up to the iocb with
+ * LE bit set. The function will call the completion handler of the command iocb
+ * if the response iocb indicates a completion for a command iocb or it is
+ * an abort completion. The function will call lpfc_sli_process_unsol_iocb
+ * function if this is an unsolicited iocb.
+ * This routine presumes LPFC_FCP_RING handling and doesn't bother
+ * to check it explicitly.
+ */
+int
+lpfc_sli_handle_fast_ring_event(struct lpfc_hba *phba,
+ struct lpfc_sli_ring *pring, uint32_t mask)
+{
+ struct lpfc_pgp *pgp = &phba->port_gp[pring->ringno];
+ IOCB_t *irsp = NULL;
+ IOCB_t *entry = NULL;
+ struct lpfc_iocbq *cmdiocbq = NULL;
+ struct lpfc_iocbq rspiocbq;
+ uint32_t status;
+ uint32_t portRspPut, portRspMax;
+ int rc = 1;
+ lpfc_iocb_type type;
+ unsigned long iflag;
+ uint32_t rsp_cmpl = 0;
+
+ spin_lock_irqsave(&phba->hbalock, iflag);
+ pring->stats.iocb_event++;
+
+ /*
+ * The next available response entry should never exceed the maximum
+ * entries. If it does, treat it as an adapter hardware error.
+ */
+ portRspMax = pring->sli.sli3.numRiocb;
+ portRspPut = le32_to_cpu(pgp->rspPutInx);
+ if (unlikely(portRspPut >= portRspMax)) {
+ lpfc_sli_rsp_pointers_error(phba, pring);
+ spin_unlock_irqrestore(&phba->hbalock, iflag);
+ return 1;
+ }
+ if (phba->fcp_ring_in_use) {
+ spin_unlock_irqrestore(&phba->hbalock, iflag);
+ return 1;
+ } else
+ phba->fcp_ring_in_use = 1;
+
+ rmb();
+ while (pring->sli.sli3.rspidx != portRspPut) {
+ /*
+ * Fetch an entry off the ring and copy it into a local data
+ * structure. The copy involves a byte-swap since the
+ * network byte order and pci byte orders are different.
+ */
+ entry = lpfc_resp_iocb(phba, pring);
+ phba->last_completion_time = jiffies;
+
+ if (++pring->sli.sli3.rspidx >= portRspMax)
+ pring->sli.sli3.rspidx = 0;
+
+ lpfc_sli_pcimem_bcopy((uint32_t *) entry,
+ (uint32_t *) &rspiocbq.iocb,
+ phba->iocb_rsp_size);
+ INIT_LIST_HEAD(&(rspiocbq.list));
+ irsp = &rspiocbq.iocb;
+
+ type = lpfc_sli_iocb_cmd_type(irsp->ulpCommand & CMD_IOCB_MASK);
+ pring->stats.iocb_rsp++;
+ rsp_cmpl++;
+
+ if (unlikely(irsp->ulpStatus)) {
+ /*
+ * If resource errors reported from HBA, reduce
+ * queuedepths of the SCSI device.
+ */
+ if ((irsp->ulpStatus == IOSTAT_LOCAL_REJECT) &&
+ ((irsp->un.ulpWord[4] & IOERR_PARAM_MASK) ==
+ IOERR_NO_RESOURCES)) {
+ spin_unlock_irqrestore(&phba->hbalock, iflag);
+ phba->lpfc_rampdown_queue_depth(phba);
+ spin_lock_irqsave(&phba->hbalock, iflag);
+ }
+
+ /* Rsp ring <ringno> error: IOCB */
+ lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
+ "0336 Rsp Ring %d error: IOCB Data: "
+ "x%x x%x x%x x%x x%x x%x x%x x%x\n",
+ pring->ringno,
+ irsp->un.ulpWord[0],
+ irsp->un.ulpWord[1],
+ irsp->un.ulpWord[2],
+ irsp->un.ulpWord[3],
+ irsp->un.ulpWord[4],
+ irsp->un.ulpWord[5],
+ *(uint32_t *)&irsp->un1,
+ *((uint32_t *)&irsp->un1 + 1));
+ }
+
+ switch (type) {
+ case LPFC_ABORT_IOCB:
+ case LPFC_SOL_IOCB:
+ /*
+ * Idle exchange closed via ABTS from port. No iocb
+ * resources need to be recovered.
+ */
+ if (unlikely(irsp->ulpCommand == CMD_XRI_ABORTED_CX)) {
+ lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
+ "0333 IOCB cmd 0x%x"
+ " processed. Skipping"
+ " completion\n",
+ irsp->ulpCommand);
+ break;
+ }
+
+ cmdiocbq = lpfc_sli_iocbq_lookup(phba, pring,
+ &rspiocbq);
+ if (unlikely(!cmdiocbq))
+ break;
+ if (cmdiocbq->iocb_flag & LPFC_DRIVER_ABORTED)
+ cmdiocbq->iocb_flag &= ~LPFC_DRIVER_ABORTED;
+ if (cmdiocbq->iocb_cmpl) {
+ spin_unlock_irqrestore(&phba->hbalock, iflag);
+ (cmdiocbq->iocb_cmpl)(phba, cmdiocbq,
+ &rspiocbq);
+ spin_lock_irqsave(&phba->hbalock, iflag);
+ }
+ break;
+ case LPFC_UNSOL_IOCB:
+ spin_unlock_irqrestore(&phba->hbalock, iflag);
+ lpfc_sli_process_unsol_iocb(phba, pring, &rspiocbq);
+ spin_lock_irqsave(&phba->hbalock, iflag);
+ break;
+ default:
+ if (irsp->ulpCommand == CMD_ADAPTER_MSG) {
+ char adaptermsg[LPFC_MAX_ADPTMSG];
+ memset(adaptermsg, 0, LPFC_MAX_ADPTMSG);
+ memcpy(&adaptermsg[0], (uint8_t *) irsp,
+ MAX_MSG_DATA);
+ dev_warn(&((phba->pcidev)->dev),
+ "lpfc%d: %s\n",
+ phba->brd_no, adaptermsg);
+ } else {
+ /* Unknown IOCB command */
+ lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
+ "0334 Unknown IOCB command "
+ "Data: x%x, x%x x%x x%x x%x\n",
+ type, irsp->ulpCommand,
+ irsp->ulpStatus,
+ irsp->ulpIoTag,
+ irsp->ulpContext);
+ }
+ break;
+ }
+
+ /*
+ * The response IOCB has been processed. Update the ring
+ * pointer in SLIM. If the port response put pointer has not
+ * been updated, sync the pgp->rspPutInx and fetch the new port
+ * response put pointer.
+ */
+ writel(pring->sli.sli3.rspidx,
+ &phba->host_gp[pring->ringno].rspGetInx);
+
+ if (pring->sli.sli3.rspidx == portRspPut)
+ portRspPut = le32_to_cpu(pgp->rspPutInx);
+ }
+
+ if ((rsp_cmpl > 0) && (mask & HA_R0RE_REQ)) {
+ pring->stats.iocb_rsp_full++;
+ status = ((CA_R0ATT | CA_R0RE_RSP) << (pring->ringno * 4));
+ writel(status, phba->CAregaddr);
+ readl(phba->CAregaddr);
+ }
+ if ((mask & HA_R0CE_RSP) && (pring->flag & LPFC_CALL_RING_AVAILABLE)) {
+ pring->flag &= ~LPFC_CALL_RING_AVAILABLE;
+ pring->stats.iocb_cmd_empty++;
+
+ /* Force update of the local copy of cmdGetInx */
+ pring->sli.sli3.local_getidx = le32_to_cpu(pgp->cmdGetInx);
+ lpfc_sli_resume_iocb(phba, pring);
+
+ if ((pring->lpfc_sli_cmd_available))
+ (pring->lpfc_sli_cmd_available) (phba, pring);
+
+ }
+
+ phba->fcp_ring_in_use = 0;
+ spin_unlock_irqrestore(&phba->hbalock, iflag);
+ return rc;
+}
+
+/**
+ * lpfc_sli_sp_handle_rspiocb - Handle slow-path response iocb
+ * @phba: Pointer to HBA context object.
+ * @pring: Pointer to driver SLI ring object.
+ * @rspiocbp: Pointer to driver response IOCB object.
+ *
+ * This function is called from the worker thread when there is a slow-path
+ * response IOCB to process. This function chains all the response iocbs until
+ * seeing the iocb with the LE bit set. The function will call
+ * lpfc_sli_process_sol_iocb function if the response iocb indicates a
+ * completion of a command iocb. The function will call the
+ * lpfc_sli_process_unsol_iocb function if this is an unsolicited iocb.
+ * The function frees the resources or calls the completion handler if this
+ * iocb is an abort completion. The function returns NULL when the response
+ * iocb has the LE bit set and all the chained iocbs are processed, otherwise
+ * this function shall chain the iocb on to the iocb_continueq and return the
+ * response iocb passed in.
+ **/
+static struct lpfc_iocbq *
+lpfc_sli_sp_handle_rspiocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
+ struct lpfc_iocbq *rspiocbp)
+{
+ struct lpfc_iocbq *saveq;
+ struct lpfc_iocbq *cmdiocbp;
+ struct lpfc_iocbq *next_iocb;
+ IOCB_t *irsp = NULL;
+ uint32_t free_saveq;
+ uint8_t iocb_cmd_type;
+ lpfc_iocb_type type;
+ unsigned long iflag;
+ int rc;
+
+ spin_lock_irqsave(&phba->hbalock, iflag);
+ /* First add the response iocb to the countinueq list */
+ list_add_tail(&rspiocbp->list, &(pring->iocb_continueq));
+ pring->iocb_continueq_cnt++;
+
+ /* Now, determine whether the list is completed for processing */
+ irsp = &rspiocbp->iocb;
+ if (irsp->ulpLe) {
+ /*
+ * By default, the driver expects to free all resources
+ * associated with this iocb completion.
+ */
+ free_saveq = 1;
+ saveq = list_get_first(&pring->iocb_continueq,
+ struct lpfc_iocbq, list);
+ irsp = &(saveq->iocb);
+ list_del_init(&pring->iocb_continueq);
+ pring->iocb_continueq_cnt = 0;
+
+ pring->stats.iocb_rsp++;
+
+ /*
+ * If resource errors reported from HBA, reduce
+ * queuedepths of the SCSI device.
+ */
+ if ((irsp->ulpStatus == IOSTAT_LOCAL_REJECT) &&
+ ((irsp->un.ulpWord[4] & IOERR_PARAM_MASK) ==
+ IOERR_NO_RESOURCES)) {
+ spin_unlock_irqrestore(&phba->hbalock, iflag);
+ phba->lpfc_rampdown_queue_depth(phba);
+ spin_lock_irqsave(&phba->hbalock, iflag);
+ }
+
+ if (irsp->ulpStatus) {
+ /* Rsp ring <ringno> error: IOCB */
+ lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
+ "0328 Rsp Ring %d error: "
+ "IOCB Data: "
+ "x%x x%x x%x x%x "
+ "x%x x%x x%x x%x "
+ "x%x x%x x%x x%x "
+ "x%x x%x x%x x%x\n",
+ pring->ringno,
+ irsp->un.ulpWord[0],
+ irsp->un.ulpWord[1],
+ irsp->un.ulpWord[2],
+ irsp->un.ulpWord[3],
+ irsp->un.ulpWord[4],
+ irsp->un.ulpWord[5],
+ *(((uint32_t *) irsp) + 6),
+ *(((uint32_t *) irsp) + 7),
+ *(((uint32_t *) irsp) + 8),
+ *(((uint32_t *) irsp) + 9),
+ *(((uint32_t *) irsp) + 10),
+ *(((uint32_t *) irsp) + 11),
+ *(((uint32_t *) irsp) + 12),
+ *(((uint32_t *) irsp) + 13),
+ *(((uint32_t *) irsp) + 14),
+ *(((uint32_t *) irsp) + 15));
+ }
+
+ /*
+ * Fetch the IOCB command type and call the correct completion
+ * routine. Solicited and Unsolicited IOCBs on the ELS ring
+ * get freed back to the lpfc_iocb_list by the discovery
+ * kernel thread.
+ */
+ iocb_cmd_type = irsp->ulpCommand & CMD_IOCB_MASK;
+ type = lpfc_sli_iocb_cmd_type(iocb_cmd_type);
+ switch (type) {
+ case LPFC_SOL_IOCB:
+ spin_unlock_irqrestore(&phba->hbalock, iflag);
+ rc = lpfc_sli_process_sol_iocb(phba, pring, saveq);
+ spin_lock_irqsave(&phba->hbalock, iflag);
+ break;
+
+ case LPFC_UNSOL_IOCB:
+ spin_unlock_irqrestore(&phba->hbalock, iflag);
+ rc = lpfc_sli_process_unsol_iocb(phba, pring, saveq);
+ spin_lock_irqsave(&phba->hbalock, iflag);
+ if (!rc)
+ free_saveq = 0;
+ break;
+
+ case LPFC_ABORT_IOCB:
+ cmdiocbp = NULL;
+ if (irsp->ulpCommand != CMD_XRI_ABORTED_CX)
+ cmdiocbp = lpfc_sli_iocbq_lookup(phba, pring,
+ saveq);
+ if (cmdiocbp) {
+ /* Call the specified completion routine */
+ if (cmdiocbp->iocb_cmpl) {
+ spin_unlock_irqrestore(&phba->hbalock,
+ iflag);
+ (cmdiocbp->iocb_cmpl)(phba, cmdiocbp,
+ saveq);
+ spin_lock_irqsave(&phba->hbalock,
+ iflag);
+ } else
+ __lpfc_sli_release_iocbq(phba,
+ cmdiocbp);
+ }
+ break;
+
+ case LPFC_UNKNOWN_IOCB:
+ if (irsp->ulpCommand == CMD_ADAPTER_MSG) {
+ char adaptermsg[LPFC_MAX_ADPTMSG];
+ memset(adaptermsg, 0, LPFC_MAX_ADPTMSG);
+ memcpy(&adaptermsg[0], (uint8_t *)irsp,
+ MAX_MSG_DATA);
+ dev_warn(&((phba->pcidev)->dev),
+ "lpfc%d: %s\n",
+ phba->brd_no, adaptermsg);
+ } else {
+ /* Unknown IOCB command */
+ lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
+ "0335 Unknown IOCB "
+ "command Data: x%x "
+ "x%x x%x x%x\n",
+ irsp->ulpCommand,
+ irsp->ulpStatus,
+ irsp->ulpIoTag,
+ irsp->ulpContext);
+ }
+ break;
+ }
+
+ if (free_saveq) {
+ list_for_each_entry_safe(rspiocbp, next_iocb,
+ &saveq->list, list) {
+ list_del_init(&rspiocbp->list);
+ __lpfc_sli_release_iocbq(phba, rspiocbp);
+ }
+ __lpfc_sli_release_iocbq(phba, saveq);
+ }
+ rspiocbp = NULL;
+ }
+ spin_unlock_irqrestore(&phba->hbalock, iflag);
+ return rspiocbp;
+}
+
+/**
+ * lpfc_sli_handle_slow_ring_event - Wrapper func for handling slow-path iocbs
+ * @phba: Pointer to HBA context object.
+ * @pring: Pointer to driver SLI ring object.
+ * @mask: Host attention register mask for this ring.
+ *
+ * This routine wraps the actual slow_ring event process routine from the
+ * API jump table function pointer from the lpfc_hba struct.
+ **/
+void
+lpfc_sli_handle_slow_ring_event(struct lpfc_hba *phba,
+ struct lpfc_sli_ring *pring, uint32_t mask)
+{
+ phba->lpfc_sli_handle_slow_ring_event(phba, pring, mask);
+}
+
+/**
+ * lpfc_sli_handle_slow_ring_event_s3 - Handle SLI3 ring event for non-FCP rings
+ * @phba: Pointer to HBA context object.
+ * @pring: Pointer to driver SLI ring object.
+ * @mask: Host attention register mask for this ring.
+ *
+ * This function is called from the worker thread when there is a ring event
+ * for non-fcp rings. The caller does not hold any lock. The function will
+ * remove each response iocb in the response ring and calls the handle
+ * response iocb routine (lpfc_sli_sp_handle_rspiocb) to process it.
+ **/
+static void
+lpfc_sli_handle_slow_ring_event_s3(struct lpfc_hba *phba,
+ struct lpfc_sli_ring *pring, uint32_t mask)
+{
+ struct lpfc_pgp *pgp;
+ IOCB_t *entry;
+ IOCB_t *irsp = NULL;
+ struct lpfc_iocbq *rspiocbp = NULL;
+ uint32_t portRspPut, portRspMax;
+ unsigned long iflag;
+ uint32_t status;
+
+ pgp = &phba->port_gp[pring->ringno];
+ spin_lock_irqsave(&phba->hbalock, iflag);
+ pring->stats.iocb_event++;
+
+ /*
+ * The next available response entry should never exceed the maximum
+ * entries. If it does, treat it as an adapter hardware error.
+ */
+ portRspMax = pring->sli.sli3.numRiocb;
+ portRspPut = le32_to_cpu(pgp->rspPutInx);
+ if (portRspPut >= portRspMax) {
+ /*
+ * Ring <ringno> handler: portRspPut <portRspPut> is bigger than
+ * rsp ring <portRspMax>
+ */
+ lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
+ "0303 Ring %d handler: portRspPut %d "
+ "is bigger than rsp ring %d\n",
+ pring->ringno, portRspPut, portRspMax);
+
+ phba->link_state = LPFC_HBA_ERROR;
+ spin_unlock_irqrestore(&phba->hbalock, iflag);
+
+ phba->work_hs = HS_FFER3;
+ lpfc_handle_eratt(phba);
+
+ return;
+ }
+
+ rmb();
+ while (pring->sli.sli3.rspidx != portRspPut) {
+ /*
+ * Build a completion list and call the appropriate handler.
+ * The process is to get the next available response iocb, get
+ * a free iocb from the list, copy the response data into the
+ * free iocb, insert to the continuation list, and update the
+ * next response index to slim. This process makes response
+ * iocb's in the ring available to DMA as fast as possible but
+ * pays a penalty for a copy operation. Since the iocb is
+ * only 32 bytes, this penalty is considered small relative to
+ * the PCI reads for register values and a slim write. When
+ * the ulpLe field is set, the entire Command has been
+ * received.
+ */
+ entry = lpfc_resp_iocb(phba, pring);
+
+ phba->last_completion_time = jiffies;
+ rspiocbp = __lpfc_sli_get_iocbq(phba);
+ if (rspiocbp == NULL) {
+ printk(KERN_ERR "%s: out of buffers! Failing "
+ "completion.\n", __func__);
+ break;
+ }
+
+ lpfc_sli_pcimem_bcopy(entry, &rspiocbp->iocb,
+ phba->iocb_rsp_size);
+ irsp = &rspiocbp->iocb;
+
+ if (++pring->sli.sli3.rspidx >= portRspMax)
+ pring->sli.sli3.rspidx = 0;
+
+ if (pring->ringno == LPFC_ELS_RING) {
+ lpfc_debugfs_slow_ring_trc(phba,
+ "IOCB rsp ring: wd4:x%08x wd6:x%08x wd7:x%08x",
+ *(((uint32_t *) irsp) + 4),
+ *(((uint32_t *) irsp) + 6),
+ *(((uint32_t *) irsp) + 7));
+ }
+
+ writel(pring->sli.sli3.rspidx,
+ &phba->host_gp[pring->ringno].rspGetInx);
+
+ spin_unlock_irqrestore(&phba->hbalock, iflag);
+ /* Handle the response IOCB */
+ rspiocbp = lpfc_sli_sp_handle_rspiocb(phba, pring, rspiocbp);
+ spin_lock_irqsave(&phba->hbalock, iflag);
+
+ /*
+ * If the port response put pointer has not been updated, sync
+ * the pgp->rspPutInx in the MAILBOX_tand fetch the new port
+ * response put pointer.
+ */
+ if (pring->sli.sli3.rspidx == portRspPut) {
+ portRspPut = le32_to_cpu(pgp->rspPutInx);
+ }
+ } /* while (pring->sli.sli3.rspidx != portRspPut) */
+
+ if ((rspiocbp != NULL) && (mask & HA_R0RE_REQ)) {
+ /* At least one response entry has been freed */
+ pring->stats.iocb_rsp_full++;
+ /* SET RxRE_RSP in Chip Att register */
+ status = ((CA_R0ATT | CA_R0RE_RSP) << (pring->ringno * 4));
+ writel(status, phba->CAregaddr);
+ readl(phba->CAregaddr); /* flush */
+ }
+ if ((mask & HA_R0CE_RSP) && (pring->flag & LPFC_CALL_RING_AVAILABLE)) {
+ pring->flag &= ~LPFC_CALL_RING_AVAILABLE;
+ pring->stats.iocb_cmd_empty++;
+
+ /* Force update of the local copy of cmdGetInx */
+ pring->sli.sli3.local_getidx = le32_to_cpu(pgp->cmdGetInx);
+ lpfc_sli_resume_iocb(phba, pring);
+
+ if ((pring->lpfc_sli_cmd_available))
+ (pring->lpfc_sli_cmd_available) (phba, pring);
+
+ }
+
+ spin_unlock_irqrestore(&phba->hbalock, iflag);
+ return;
+}
+
+/**
+ * lpfc_sli_handle_slow_ring_event_s4 - Handle SLI4 slow-path els events
+ * @phba: Pointer to HBA context object.
+ * @pring: Pointer to driver SLI ring object.
+ * @mask: Host attention register mask for this ring.
+ *
+ * This function is called from the worker thread when there is a pending
+ * ELS response iocb on the driver internal slow-path response iocb worker
+ * queue. The caller does not hold any lock. The function will remove each
+ * response iocb from the response worker queue and calls the handle
+ * response iocb routine (lpfc_sli_sp_handle_rspiocb) to process it.
+ **/
+static void
+lpfc_sli_handle_slow_ring_event_s4(struct lpfc_hba *phba,
+ struct lpfc_sli_ring *pring, uint32_t mask)
+{
+ struct lpfc_iocbq *irspiocbq;
+ struct hbq_dmabuf *dmabuf;
+ struct lpfc_cq_event *cq_event;
+ unsigned long iflag;
+
+ spin_lock_irqsave(&phba->hbalock, iflag);
+ phba->hba_flag &= ~HBA_SP_QUEUE_EVT;
+ spin_unlock_irqrestore(&phba->hbalock, iflag);
+ while (!list_empty(&phba->sli4_hba.sp_queue_event)) {
+ /* Get the response iocb from the head of work queue */
+ spin_lock_irqsave(&phba->hbalock, iflag);
+ list_remove_head(&phba->sli4_hba.sp_queue_event,
+ cq_event, struct lpfc_cq_event, list);
+ spin_unlock_irqrestore(&phba->hbalock, iflag);
+
+ switch (bf_get(lpfc_wcqe_c_code, &cq_event->cqe.wcqe_cmpl)) {
+ case CQE_CODE_COMPL_WQE:
+ irspiocbq = container_of(cq_event, struct lpfc_iocbq,
+ cq_event);
+ /* Translate ELS WCQE to response IOCBQ */
+ irspiocbq = lpfc_sli4_els_wcqe_to_rspiocbq(phba,
+ irspiocbq);
+ if (irspiocbq)
+ lpfc_sli_sp_handle_rspiocb(phba, pring,
+ irspiocbq);
+ break;
+ case CQE_CODE_RECEIVE:
+ case CQE_CODE_RECEIVE_V1:
+ dmabuf = container_of(cq_event, struct hbq_dmabuf,
+ cq_event);
+ lpfc_sli4_handle_received_buffer(phba, dmabuf);
+ break;
+ default:
+ break;
+ }
+ }
+}
+
+/**
+ * lpfc_sli_abort_iocb_ring - Abort all iocbs in the ring
+ * @phba: Pointer to HBA context object.
+ * @pring: Pointer to driver SLI ring object.
+ *
+ * This function aborts all iocbs in the given ring and frees all the iocb
+ * objects in txq. This function issues an abort iocb for all the iocb commands
+ * in txcmplq. The iocbs in the txcmplq is not guaranteed to complete before
+ * the return of this function. The caller is not required to hold any locks.
+ **/
+void
+lpfc_sli_abort_iocb_ring(struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
+{
+ LIST_HEAD(completions);
+ struct lpfc_iocbq *iocb, *next_iocb;
+
+ if (pring->ringno == LPFC_ELS_RING) {
+ lpfc_fabric_abort_hba(phba);
+ }
+
+ /* Error everything on txq and txcmplq
+ * First do the txq.
+ */
+ if (phba->sli_rev >= LPFC_SLI_REV4) {
+ spin_lock_irq(&pring->ring_lock);
+ list_splice_init(&pring->txq, &completions);
+ pring->txq_cnt = 0;
+ spin_unlock_irq(&pring->ring_lock);
+
+ spin_lock_irq(&phba->hbalock);
+ /* Next issue ABTS for everything on the txcmplq */
+ list_for_each_entry_safe(iocb, next_iocb, &pring->txcmplq, list)
+ lpfc_sli_issue_abort_iotag(phba, pring, iocb);
+ spin_unlock_irq(&phba->hbalock);
+ } else {
+ spin_lock_irq(&phba->hbalock);
+ list_splice_init(&pring->txq, &completions);
+ pring->txq_cnt = 0;
+
+ /* Next issue ABTS for everything on the txcmplq */
+ list_for_each_entry_safe(iocb, next_iocb, &pring->txcmplq, list)
+ lpfc_sli_issue_abort_iotag(phba, pring, iocb);
+ spin_unlock_irq(&phba->hbalock);
+ }
+
+ /* Cancel all the IOCBs from the completions list */
+ lpfc_sli_cancel_iocbs(phba, &completions, IOSTAT_LOCAL_REJECT,
+ IOERR_SLI_ABORTED);
+}
+
+/**
+ * lpfc_sli_abort_fcp_rings - Abort all iocbs in all FCP rings
+ * @phba: Pointer to HBA context object.
+ * @pring: Pointer to driver SLI ring object.
+ *
+ * This function aborts all iocbs in FCP rings and frees all the iocb
+ * objects in txq. This function issues an abort iocb for all the iocb commands
+ * in txcmplq. The iocbs in the txcmplq is not guaranteed to complete before
+ * the return of this function. The caller is not required to hold any locks.
+ **/
+void
+lpfc_sli_abort_fcp_rings(struct lpfc_hba *phba)
+{
+ struct lpfc_sli *psli = &phba->sli;
+ struct lpfc_sli_ring *pring;
+ uint32_t i;
+
+ /* Look on all the FCP Rings for the iotag */
+ if (phba->sli_rev >= LPFC_SLI_REV4) {
+ for (i = 0; i < phba->cfg_fcp_io_channel; i++) {
+ pring = &psli->ring[i + MAX_SLI3_CONFIGURED_RINGS];
+ lpfc_sli_abort_iocb_ring(phba, pring);
+ }
+ } else {
+ pring = &psli->ring[psli->fcp_ring];
+ lpfc_sli_abort_iocb_ring(phba, pring);
+ }
+}
+
+
+/**
+ * lpfc_sli_flush_fcp_rings - flush all iocbs in the fcp ring
+ * @phba: Pointer to HBA context object.
+ *
+ * This function flushes all iocbs in the fcp ring and frees all the iocb
+ * objects in txq and txcmplq. This function will not issue abort iocbs
+ * for all the iocb commands in txcmplq, they will just be returned with
+ * IOERR_SLI_DOWN. This function is invoked with EEH when device's PCI
+ * slot has been permanently disabled.
+ **/
+void
+lpfc_sli_flush_fcp_rings(struct lpfc_hba *phba)
+{
+ LIST_HEAD(txq);
+ LIST_HEAD(txcmplq);
+ struct lpfc_sli *psli = &phba->sli;
+ struct lpfc_sli_ring *pring;
+ uint32_t i;
+
+ spin_lock_irq(&phba->hbalock);
+ /* Indicate the I/O queues are flushed */
+ phba->hba_flag |= HBA_FCP_IOQ_FLUSH;
+ spin_unlock_irq(&phba->hbalock);
+
+ /* Look on all the FCP Rings for the iotag */
+ if (phba->sli_rev >= LPFC_SLI_REV4) {
+ for (i = 0; i < phba->cfg_fcp_io_channel; i++) {
+ pring = &psli->ring[i + MAX_SLI3_CONFIGURED_RINGS];
+
+ spin_lock_irq(&pring->ring_lock);
+ /* Retrieve everything on txq */
+ list_splice_init(&pring->txq, &txq);
+ /* Retrieve everything on the txcmplq */
+ list_splice_init(&pring->txcmplq, &txcmplq);
+ pring->txq_cnt = 0;
+ pring->txcmplq_cnt = 0;
+ spin_unlock_irq(&pring->ring_lock);
+
+ /* Flush the txq */
+ lpfc_sli_cancel_iocbs(phba, &txq,
+ IOSTAT_LOCAL_REJECT,
+ IOERR_SLI_DOWN);
+ /* Flush the txcmpq */
+ lpfc_sli_cancel_iocbs(phba, &txcmplq,
+ IOSTAT_LOCAL_REJECT,
+ IOERR_SLI_DOWN);
+ }
+ } else {
+ pring = &psli->ring[psli->fcp_ring];
+
+ spin_lock_irq(&phba->hbalock);
+ /* Retrieve everything on txq */
+ list_splice_init(&pring->txq, &txq);
+ /* Retrieve everything on the txcmplq */
+ list_splice_init(&pring->txcmplq, &txcmplq);
+ pring->txq_cnt = 0;
+ pring->txcmplq_cnt = 0;
+ spin_unlock_irq(&phba->hbalock);
+
+ /* Flush the txq */
+ lpfc_sli_cancel_iocbs(phba, &txq, IOSTAT_LOCAL_REJECT,
+ IOERR_SLI_DOWN);
+ /* Flush the txcmpq */
+ lpfc_sli_cancel_iocbs(phba, &txcmplq, IOSTAT_LOCAL_REJECT,
+ IOERR_SLI_DOWN);
+ }
+}
+
+/**
+ * lpfc_sli_brdready_s3 - Check for sli3 host ready status
+ * @phba: Pointer to HBA context object.
+ * @mask: Bit mask to be checked.
+ *
+ * This function reads the host status register and compares
+ * with the provided bit mask to check if HBA completed
+ * the restart. This function will wait in a loop for the
+ * HBA to complete restart. If the HBA does not restart within
+ * 15 iterations, the function will reset the HBA again. The
+ * function returns 1 when HBA fail to restart otherwise returns
+ * zero.
+ **/
+static int
+lpfc_sli_brdready_s3(struct lpfc_hba *phba, uint32_t mask)
+{
+ uint32_t status;
+ int i = 0;
+ int retval = 0;
+
+ /* Read the HBA Host Status Register */
+ if (lpfc_readl(phba->HSregaddr, &status))
+ return 1;
+
+ /*
+ * Check status register every 100ms for 5 retries, then every
+ * 500ms for 5, then every 2.5 sec for 5, then reset board and
+ * every 2.5 sec for 4.
+ * Break our of the loop if errors occurred during init.
+ */
+ while (((status & mask) != mask) &&
+ !(status & HS_FFERM) &&
+ i++ < 20) {
+
+ if (i <= 5)
+ msleep(10);
+ else if (i <= 10)
+ msleep(500);
+ else
+ msleep(2500);
+
+ if (i == 15) {
+ /* Do post */
+ phba->pport->port_state = LPFC_VPORT_UNKNOWN;
+ lpfc_sli_brdrestart(phba);
+ }
+ /* Read the HBA Host Status Register */
+ if (lpfc_readl(phba->HSregaddr, &status)) {
+ retval = 1;
+ break;
+ }
+ }
+
+ /* Check to see if any errors occurred during init */
+ if ((status & HS_FFERM) || (i >= 20)) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+ "2751 Adapter failed to restart, "
+ "status reg x%x, FW Data: A8 x%x AC x%x\n",
+ status,
+ readl(phba->MBslimaddr + 0xa8),
+ readl(phba->MBslimaddr + 0xac));
+ phba->link_state = LPFC_HBA_ERROR;
+ retval = 1;
+ }
+
+ return retval;
+}
+
+/**
+ * lpfc_sli_brdready_s4 - Check for sli4 host ready status
+ * @phba: Pointer to HBA context object.
+ * @mask: Bit mask to be checked.
+ *
+ * This function checks the host status register to check if HBA is
+ * ready. This function will wait in a loop for the HBA to be ready
+ * If the HBA is not ready , the function will will reset the HBA PCI
+ * function again. The function returns 1 when HBA fail to be ready
+ * otherwise returns zero.
+ **/
+static int
+lpfc_sli_brdready_s4(struct lpfc_hba *phba, uint32_t mask)
+{
+ uint32_t status;
+ int retval = 0;
+
+ /* Read the HBA Host Status Register */
+ status = lpfc_sli4_post_status_check(phba);
+
+ if (status) {
+ phba->pport->port_state = LPFC_VPORT_UNKNOWN;
+ lpfc_sli_brdrestart(phba);
+ status = lpfc_sli4_post_status_check(phba);
+ }
+
+ /* Check to see if any errors occurred during init */
+ if (status) {
+ phba->link_state = LPFC_HBA_ERROR;
+ retval = 1;
+ } else
+ phba->sli4_hba.intr_enable = 0;
+
+ return retval;
+}
+
+/**
+ * lpfc_sli_brdready - Wrapper func for checking the hba readyness
+ * @phba: Pointer to HBA context object.
+ * @mask: Bit mask to be checked.
+ *
+ * This routine wraps the actual SLI3 or SLI4 hba readyness check routine
+ * from the API jump table function pointer from the lpfc_hba struct.
+ **/
+int
+lpfc_sli_brdready(struct lpfc_hba *phba, uint32_t mask)
+{
+ return phba->lpfc_sli_brdready(phba, mask);
+}
+
+#define BARRIER_TEST_PATTERN (0xdeadbeef)
+
+/**
+ * lpfc_reset_barrier - Make HBA ready for HBA reset
+ * @phba: Pointer to HBA context object.
+ *
+ * This function is called before resetting an HBA. This function is called
+ * with hbalock held and requests HBA to quiesce DMAs before a reset.
+ **/
+void lpfc_reset_barrier(struct lpfc_hba *phba)
+{
+ uint32_t __iomem *resp_buf;
+ uint32_t __iomem *mbox_buf;
+ volatile uint32_t mbox;
+ uint32_t hc_copy, ha_copy, resp_data;
+ int i;
+ uint8_t hdrtype;
+
+ pci_read_config_byte(phba->pcidev, PCI_HEADER_TYPE, &hdrtype);
+ if (hdrtype != 0x80 ||
+ (FC_JEDEC_ID(phba->vpd.rev.biuRev) != HELIOS_JEDEC_ID &&
+ FC_JEDEC_ID(phba->vpd.rev.biuRev) != THOR_JEDEC_ID))
+ return;
+
+ /*
+ * Tell the other part of the chip to suspend temporarily all
+ * its DMA activity.
+ */
+ resp_buf = phba->MBslimaddr;
+
+ /* Disable the error attention */
+ if (lpfc_readl(phba->HCregaddr, &hc_copy))
+ return;
+ writel((hc_copy & ~HC_ERINT_ENA), phba->HCregaddr);
+ readl(phba->HCregaddr); /* flush */
+ phba->link_flag |= LS_IGNORE_ERATT;
+
+ if (lpfc_readl(phba->HAregaddr, &ha_copy))
+ return;
+ if (ha_copy & HA_ERATT) {
+ /* Clear Chip error bit */
+ writel(HA_ERATT, phba->HAregaddr);
+ phba->pport->stopped = 1;
+ }
+
+ mbox = 0;
+ ((MAILBOX_t *)&mbox)->mbxCommand = MBX_KILL_BOARD;
+ ((MAILBOX_t *)&mbox)->mbxOwner = OWN_CHIP;
+
+ writel(BARRIER_TEST_PATTERN, (resp_buf + 1));
+ mbox_buf = phba->MBslimaddr;
+ writel(mbox, mbox_buf);
+
+ for (i = 0; i < 50; i++) {
+ if (lpfc_readl((resp_buf + 1), &resp_data))
+ return;
+ if (resp_data != ~(BARRIER_TEST_PATTERN))
+ mdelay(1);
+ else
+ break;
+ }
+ resp_data = 0;
+ if (lpfc_readl((resp_buf + 1), &resp_data))
+ return;
+ if (resp_data != ~(BARRIER_TEST_PATTERN)) {
+ if (phba->sli.sli_flag & LPFC_SLI_ACTIVE ||
+ phba->pport->stopped)
+ goto restore_hc;
+ else
+ goto clear_errat;
+ }
+
+ ((MAILBOX_t *)&mbox)->mbxOwner = OWN_HOST;
+ resp_data = 0;
+ for (i = 0; i < 500; i++) {
+ if (lpfc_readl(resp_buf, &resp_data))
+ return;
+ if (resp_data != mbox)
+ mdelay(1);
+ else
+ break;
+ }
+
+clear_errat:
+
+ while (++i < 500) {
+ if (lpfc_readl(phba->HAregaddr, &ha_copy))
+ return;
+ if (!(ha_copy & HA_ERATT))
+ mdelay(1);
+ else
+ break;
+ }
+
+ if (readl(phba->HAregaddr) & HA_ERATT) {
+ writel(HA_ERATT, phba->HAregaddr);
+ phba->pport->stopped = 1;
+ }
+
+restore_hc:
+ phba->link_flag &= ~LS_IGNORE_ERATT;
+ writel(hc_copy, phba->HCregaddr);
+ readl(phba->HCregaddr); /* flush */
+}
+
+/**
+ * lpfc_sli_brdkill - Issue a kill_board mailbox command
+ * @phba: Pointer to HBA context object.
+ *
+ * This function issues a kill_board mailbox command and waits for
+ * the error attention interrupt. This function is called for stopping
+ * the firmware processing. The caller is not required to hold any
+ * locks. This function calls lpfc_hba_down_post function to free
+ * any pending commands after the kill. The function will return 1 when it
+ * fails to kill the board else will return 0.
+ **/
+int
+lpfc_sli_brdkill(struct lpfc_hba *phba)
+{
+ struct lpfc_sli *psli;
+ LPFC_MBOXQ_t *pmb;
+ uint32_t status;
+ uint32_t ha_copy;
+ int retval;
+ int i = 0;
+
+ psli = &phba->sli;
+
+ /* Kill HBA */
+ lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
+ "0329 Kill HBA Data: x%x x%x\n",
+ phba->pport->port_state, psli->sli_flag);
+
+ pmb = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
+ if (!pmb)
+ return 1;
+
+ /* Disable the error attention */
+ spin_lock_irq(&phba->hbalock);
+ if (lpfc_readl(phba->HCregaddr, &status)) {
+ spin_unlock_irq(&phba->hbalock);
+ mempool_free(pmb, phba->mbox_mem_pool);
+ return 1;
+ }
+ status &= ~HC_ERINT_ENA;
+ writel(status, phba->HCregaddr);
+ readl(phba->HCregaddr); /* flush */
+ phba->link_flag |= LS_IGNORE_ERATT;
+ spin_unlock_irq(&phba->hbalock);
+
+ lpfc_kill_board(phba, pmb);
+ pmb->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
+ retval = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT);
+
+ if (retval != MBX_SUCCESS) {
+ if (retval != MBX_BUSY)
+ mempool_free(pmb, phba->mbox_mem_pool);
+ lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
+ "2752 KILL_BOARD command failed retval %d\n",
+ retval);
+ spin_lock_irq(&phba->hbalock);
+ phba->link_flag &= ~LS_IGNORE_ERATT;
+ spin_unlock_irq(&phba->hbalock);
+ return 1;
+ }
+
+ spin_lock_irq(&phba->hbalock);
+ psli->sli_flag &= ~LPFC_SLI_ACTIVE;
+ spin_unlock_irq(&phba->hbalock);
+
+ mempool_free(pmb, phba->mbox_mem_pool);
+
+ /* There is no completion for a KILL_BOARD mbox cmd. Check for an error
+ * attention every 100ms for 3 seconds. If we don't get ERATT after
+ * 3 seconds we still set HBA_ERROR state because the status of the
+ * board is now undefined.
+ */
+ if (lpfc_readl(phba->HAregaddr, &ha_copy))
+ return 1;
+ while ((i++ < 30) && !(ha_copy & HA_ERATT)) {
+ mdelay(100);
+ if (lpfc_readl(phba->HAregaddr, &ha_copy))
+ return 1;
+ }
+
+ del_timer_sync(&psli->mbox_tmo);
+ if (ha_copy & HA_ERATT) {
+ writel(HA_ERATT, phba->HAregaddr);
+ phba->pport->stopped = 1;
+ }
+ spin_lock_irq(&phba->hbalock);
+ psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
+ psli->mbox_active = NULL;
+ phba->link_flag &= ~LS_IGNORE_ERATT;
+ spin_unlock_irq(&phba->hbalock);
+
+ lpfc_hba_down_post(phba);
+ phba->link_state = LPFC_HBA_ERROR;
+
+ return ha_copy & HA_ERATT ? 0 : 1;
+}
+
+/**
+ * lpfc_sli_brdreset - Reset a sli-2 or sli-3 HBA
+ * @phba: Pointer to HBA context object.
+ *
+ * This function resets the HBA by writing HC_INITFF to the control
+ * register. After the HBA resets, this function resets all the iocb ring
+ * indices. This function disables PCI layer parity checking during
+ * the reset.
+ * This function returns 0 always.
+ * The caller is not required to hold any locks.
+ **/
+int
+lpfc_sli_brdreset(struct lpfc_hba *phba)
+{
+ struct lpfc_sli *psli;
+ struct lpfc_sli_ring *pring;
+ uint16_t cfg_value;
+ int i;
+
+ psli = &phba->sli;
+
+ /* Reset HBA */
+ lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
+ "0325 Reset HBA Data: x%x x%x\n",
+ phba->pport->port_state, psli->sli_flag);
+
+ /* perform board reset */
+ phba->fc_eventTag = 0;
+ phba->link_events = 0;
+ phba->pport->fc_myDID = 0;
+ phba->pport->fc_prevDID = 0;
+
+ /* Turn off parity checking and serr during the physical reset */
+ pci_read_config_word(phba->pcidev, PCI_COMMAND, &cfg_value);
+ pci_write_config_word(phba->pcidev, PCI_COMMAND,
+ (cfg_value &
+ ~(PCI_COMMAND_PARITY | PCI_COMMAND_SERR)));
+
+ psli->sli_flag &= ~(LPFC_SLI_ACTIVE | LPFC_PROCESS_LA);
+
+ /* Now toggle INITFF bit in the Host Control Register */
+ writel(HC_INITFF, phba->HCregaddr);
+ mdelay(1);
+ readl(phba->HCregaddr); /* flush */
+ writel(0, phba->HCregaddr);
+ readl(phba->HCregaddr); /* flush */
+
+ /* Restore PCI cmd register */
+ pci_write_config_word(phba->pcidev, PCI_COMMAND, cfg_value);
+
+ /* Initialize relevant SLI info */
+ for (i = 0; i < psli->num_rings; i++) {
+ pring = &psli->ring[i];
+ pring->flag = 0;
+ pring->sli.sli3.rspidx = 0;
+ pring->sli.sli3.next_cmdidx = 0;
+ pring->sli.sli3.local_getidx = 0;
+ pring->sli.sli3.cmdidx = 0;
+ pring->missbufcnt = 0;
+ }
+
+ phba->link_state = LPFC_WARM_START;
+ return 0;
+}
+
+/**
+ * lpfc_sli4_brdreset - Reset a sli-4 HBA
+ * @phba: Pointer to HBA context object.
+ *
+ * This function resets a SLI4 HBA. This function disables PCI layer parity
+ * checking during resets the device. The caller is not required to hold
+ * any locks.
+ *
+ * This function returns 0 always.
+ **/
+int
+lpfc_sli4_brdreset(struct lpfc_hba *phba)
+{
+ struct lpfc_sli *psli = &phba->sli;
+ uint16_t cfg_value;
+ int rc = 0;
+
+ /* Reset HBA */
+ lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
+ "0295 Reset HBA Data: x%x x%x x%x\n",
+ phba->pport->port_state, psli->sli_flag,
+ phba->hba_flag);
+
+ /* perform board reset */
+ phba->fc_eventTag = 0;
+ phba->link_events = 0;
+ phba->pport->fc_myDID = 0;
+ phba->pport->fc_prevDID = 0;
+
+ spin_lock_irq(&phba->hbalock);
+ psli->sli_flag &= ~(LPFC_PROCESS_LA);
+ phba->fcf.fcf_flag = 0;
+ spin_unlock_irq(&phba->hbalock);
+
+ /* SLI4 INTF 2: if FW dump is being taken skip INIT_PORT */
+ if (phba->hba_flag & HBA_FW_DUMP_OP) {
+ phba->hba_flag &= ~HBA_FW_DUMP_OP;
+ return rc;
+ }
+
+ /* Now physically reset the device */
+ lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
+ "0389 Performing PCI function reset!\n");
+
+ /* Turn off parity checking and serr during the physical reset */
+ pci_read_config_word(phba->pcidev, PCI_COMMAND, &cfg_value);
+ pci_write_config_word(phba->pcidev, PCI_COMMAND, (cfg_value &
+ ~(PCI_COMMAND_PARITY | PCI_COMMAND_SERR)));
+
+ /* Perform FCoE PCI function reset before freeing queue memory */
+ rc = lpfc_pci_function_reset(phba);
+ lpfc_sli4_queue_destroy(phba);
+
+ /* Restore PCI cmd register */
+ pci_write_config_word(phba->pcidev, PCI_COMMAND, cfg_value);
+
+ return rc;
+}
+
+/**
+ * lpfc_sli_brdrestart_s3 - Restart a sli-3 hba
+ * @phba: Pointer to HBA context object.
+ *
+ * This function is called in the SLI initialization code path to
+ * restart the HBA. The caller is not required to hold any lock.
+ * This function writes MBX_RESTART mailbox command to the SLIM and
+ * resets the HBA. At the end of the function, it calls lpfc_hba_down_post
+ * function to free any pending commands. The function enables
+ * POST only during the first initialization. The function returns zero.
+ * The function does not guarantee completion of MBX_RESTART mailbox
+ * command before the return of this function.
+ **/
+static int
+lpfc_sli_brdrestart_s3(struct lpfc_hba *phba)
+{
+ MAILBOX_t *mb;
+ struct lpfc_sli *psli;
+ volatile uint32_t word0;
+ void __iomem *to_slim;
+ uint32_t hba_aer_enabled;
+
+ spin_lock_irq(&phba->hbalock);
+
+ /* Take PCIe device Advanced Error Reporting (AER) state */
+ hba_aer_enabled = phba->hba_flag & HBA_AER_ENABLED;
+
+ psli = &phba->sli;
+
+ /* Restart HBA */
+ lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
+ "0337 Restart HBA Data: x%x x%x\n",
+ phba->pport->port_state, psli->sli_flag);
+
+ word0 = 0;
+ mb = (MAILBOX_t *) &word0;
+ mb->mbxCommand = MBX_RESTART;
+ mb->mbxHc = 1;
+
+ lpfc_reset_barrier(phba);
+
+ to_slim = phba->MBslimaddr;
+ writel(*(uint32_t *) mb, to_slim);
+ readl(to_slim); /* flush */
+
+ /* Only skip post after fc_ffinit is completed */
+ if (phba->pport->port_state)
+ word0 = 1; /* This is really setting up word1 */
+ else
+ word0 = 0; /* This is really setting up word1 */
+ to_slim = phba->MBslimaddr + sizeof (uint32_t);
+ writel(*(uint32_t *) mb, to_slim);
+ readl(to_slim); /* flush */
+
+ lpfc_sli_brdreset(phba);
+ phba->pport->stopped = 0;
+ phba->link_state = LPFC_INIT_START;
+ phba->hba_flag = 0;
+ spin_unlock_irq(&phba->hbalock);
+
+ memset(&psli->lnk_stat_offsets, 0, sizeof(psli->lnk_stat_offsets));
+ psli->stats_start = get_seconds();
+
+ /* Give the INITFF and Post time to settle. */
+ mdelay(100);
+
+ /* Reset HBA AER if it was enabled, note hba_flag was reset above */
+ if (hba_aer_enabled)
+ pci_disable_pcie_error_reporting(phba->pcidev);
+
+ lpfc_hba_down_post(phba);
+
+ return 0;
+}
+
+/**
+ * lpfc_sli_brdrestart_s4 - Restart the sli-4 hba
+ * @phba: Pointer to HBA context object.
+ *
+ * This function is called in the SLI initialization code path to restart
+ * a SLI4 HBA. The caller is not required to hold any lock.
+ * At the end of the function, it calls lpfc_hba_down_post function to
+ * free any pending commands.
+ **/
+static int
+lpfc_sli_brdrestart_s4(struct lpfc_hba *phba)
+{
+ struct lpfc_sli *psli = &phba->sli;
+ uint32_t hba_aer_enabled;
+ int rc;
+
+ /* Restart HBA */
+ lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
+ "0296 Restart HBA Data: x%x x%x\n",
+ phba->pport->port_state, psli->sli_flag);
+
+ /* Take PCIe device Advanced Error Reporting (AER) state */
+ hba_aer_enabled = phba->hba_flag & HBA_AER_ENABLED;
+
+ rc = lpfc_sli4_brdreset(phba);
+
+ spin_lock_irq(&phba->hbalock);
+ phba->pport->stopped = 0;
+ phba->link_state = LPFC_INIT_START;
+ phba->hba_flag = 0;
+ spin_unlock_irq(&phba->hbalock);
+
+ memset(&psli->lnk_stat_offsets, 0, sizeof(psli->lnk_stat_offsets));
+ psli->stats_start = get_seconds();
+
+ /* Reset HBA AER if it was enabled, note hba_flag was reset above */
+ if (hba_aer_enabled)
+ pci_disable_pcie_error_reporting(phba->pcidev);
+
+ lpfc_hba_down_post(phba);
+
+ return rc;
+}
+
+/**
+ * lpfc_sli_brdrestart - Wrapper func for restarting hba
+ * @phba: Pointer to HBA context object.
+ *
+ * This routine wraps the actual SLI3 or SLI4 hba restart routine from the
+ * API jump table function pointer from the lpfc_hba struct.
+**/
+int
+lpfc_sli_brdrestart(struct lpfc_hba *phba)
+{
+ return phba->lpfc_sli_brdrestart(phba);
+}
+
+/**
+ * lpfc_sli_chipset_init - Wait for the restart of the HBA after a restart
+ * @phba: Pointer to HBA context object.
+ *
+ * This function is called after a HBA restart to wait for successful
+ * restart of the HBA. Successful restart of the HBA is indicated by
+ * HS_FFRDY and HS_MBRDY bits. If the HBA fails to restart even after 15
+ * iteration, the function will restart the HBA again. The function returns
+ * zero if HBA successfully restarted else returns negative error code.
+ **/
+static int
+lpfc_sli_chipset_init(struct lpfc_hba *phba)
+{
+ uint32_t status, i = 0;
+
+ /* Read the HBA Host Status Register */
+ if (lpfc_readl(phba->HSregaddr, &status))
+ return -EIO;
+
+ /* Check status register to see what current state is */
+ i = 0;
+ while ((status & (HS_FFRDY | HS_MBRDY)) != (HS_FFRDY | HS_MBRDY)) {
+
+ /* Check every 10ms for 10 retries, then every 100ms for 90
+ * retries, then every 1 sec for 50 retires for a total of
+ * ~60 seconds before reset the board again and check every
+ * 1 sec for 50 retries. The up to 60 seconds before the
+ * board ready is required by the Falcon FIPS zeroization
+ * complete, and any reset the board in between shall cause
+ * restart of zeroization, further delay the board ready.
+ */
+ if (i++ >= 200) {
+ /* Adapter failed to init, timeout, status reg
+ <status> */
+ lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+ "0436 Adapter failed to init, "
+ "timeout, status reg x%x, "
+ "FW Data: A8 x%x AC x%x\n", status,
+ readl(phba->MBslimaddr + 0xa8),
+ readl(phba->MBslimaddr + 0xac));
+ phba->link_state = LPFC_HBA_ERROR;
+ return -ETIMEDOUT;
+ }
+
+ /* Check to see if any errors occurred during init */
+ if (status & HS_FFERM) {
+ /* ERROR: During chipset initialization */
+ /* Adapter failed to init, chipset, status reg
+ <status> */
+ lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+ "0437 Adapter failed to init, "
+ "chipset, status reg x%x, "
+ "FW Data: A8 x%x AC x%x\n", status,
+ readl(phba->MBslimaddr + 0xa8),
+ readl(phba->MBslimaddr + 0xac));
+ phba->link_state = LPFC_HBA_ERROR;
+ return -EIO;
+ }
+
+ if (i <= 10)
+ msleep(10);
+ else if (i <= 100)
+ msleep(100);
+ else
+ msleep(1000);
+
+ if (i == 150) {
+ /* Do post */
+ phba->pport->port_state = LPFC_VPORT_UNKNOWN;
+ lpfc_sli_brdrestart(phba);
+ }
+ /* Read the HBA Host Status Register */
+ if (lpfc_readl(phba->HSregaddr, &status))
+ return -EIO;
+ }
+
+ /* Check to see if any errors occurred during init */
+ if (status & HS_FFERM) {
+ /* ERROR: During chipset initialization */
+ /* Adapter failed to init, chipset, status reg <status> */
+ lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+ "0438 Adapter failed to init, chipset, "
+ "status reg x%x, "
+ "FW Data: A8 x%x AC x%x\n", status,
+ readl(phba->MBslimaddr + 0xa8),
+ readl(phba->MBslimaddr + 0xac));
+ phba->link_state = LPFC_HBA_ERROR;
+ return -EIO;
+ }
+
+ /* Clear all interrupt enable conditions */
+ writel(0, phba->HCregaddr);
+ readl(phba->HCregaddr); /* flush */
+
+ /* setup host attn register */
+ writel(0xffffffff, phba->HAregaddr);
+ readl(phba->HAregaddr); /* flush */
+ return 0;
+}
+
+/**
+ * lpfc_sli_hbq_count - Get the number of HBQs to be configured
+ *
+ * This function calculates and returns the number of HBQs required to be
+ * configured.
+ **/
+int
+lpfc_sli_hbq_count(void)
+{
+ return ARRAY_SIZE(lpfc_hbq_defs);
+}
+
+/**
+ * lpfc_sli_hbq_entry_count - Calculate total number of hbq entries
+ *
+ * This function adds the number of hbq entries in every HBQ to get
+ * the total number of hbq entries required for the HBA and returns
+ * the total count.
+ **/
+static int
+lpfc_sli_hbq_entry_count(void)
+{
+ int hbq_count = lpfc_sli_hbq_count();
+ int count = 0;
+ int i;
+
+ for (i = 0; i < hbq_count; ++i)
+ count += lpfc_hbq_defs[i]->entry_count;
+ return count;
+}
+
+/**
+ * lpfc_sli_hbq_size - Calculate memory required for all hbq entries
+ *
+ * This function calculates amount of memory required for all hbq entries
+ * to be configured and returns the total memory required.
+ **/
+int
+lpfc_sli_hbq_size(void)
+{
+ return lpfc_sli_hbq_entry_count() * sizeof(struct lpfc_hbq_entry);
+}
+
+/**
+ * lpfc_sli_hbq_setup - configure and initialize HBQs
+ * @phba: Pointer to HBA context object.
+ *
+ * This function is called during the SLI initialization to configure
+ * all the HBQs and post buffers to the HBQ. The caller is not
+ * required to hold any locks. This function will return zero if successful
+ * else it will return negative error code.
+ **/
+static int
+lpfc_sli_hbq_setup(struct lpfc_hba *phba)
+{
+ int hbq_count = lpfc_sli_hbq_count();
+ LPFC_MBOXQ_t *pmb;
+ MAILBOX_t *pmbox;
+ uint32_t hbqno;
+ uint32_t hbq_entry_index;
+
+ /* Get a Mailbox buffer to setup mailbox
+ * commands for HBA initialization
+ */
+ pmb = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
+
+ if (!pmb)
+ return -ENOMEM;
+
+ pmbox = &pmb->u.mb;
+
+ /* Initialize the struct lpfc_sli_hbq structure for each hbq */
+ phba->link_state = LPFC_INIT_MBX_CMDS;
+ phba->hbq_in_use = 1;
+
+ hbq_entry_index = 0;
+ for (hbqno = 0; hbqno < hbq_count; ++hbqno) {
+ phba->hbqs[hbqno].next_hbqPutIdx = 0;
+ phba->hbqs[hbqno].hbqPutIdx = 0;
+ phba->hbqs[hbqno].local_hbqGetIdx = 0;
+ phba->hbqs[hbqno].entry_count =
+ lpfc_hbq_defs[hbqno]->entry_count;
+ lpfc_config_hbq(phba, hbqno, lpfc_hbq_defs[hbqno],
+ hbq_entry_index, pmb);
+ hbq_entry_index += phba->hbqs[hbqno].entry_count;
+
+ if (lpfc_sli_issue_mbox(phba, pmb, MBX_POLL) != MBX_SUCCESS) {
+ /* Adapter failed to init, mbxCmd <cmd> CFG_RING,
+ mbxStatus <status>, ring <num> */
+
+ lpfc_printf_log(phba, KERN_ERR,
+ LOG_SLI | LOG_VPORT,
+ "1805 Adapter failed to init. "
+ "Data: x%x x%x x%x\n",
+ pmbox->mbxCommand,
+ pmbox->mbxStatus, hbqno);
+
+ phba->link_state = LPFC_HBA_ERROR;
+ mempool_free(pmb, phba->mbox_mem_pool);
+ return -ENXIO;
+ }
+ }
+ phba->hbq_count = hbq_count;
+
+ mempool_free(pmb, phba->mbox_mem_pool);
+
+ /* Initially populate or replenish the HBQs */
+ for (hbqno = 0; hbqno < hbq_count; ++hbqno)
+ lpfc_sli_hbqbuf_init_hbqs(phba, hbqno);
+ return 0;
+}
+
+/**
+ * lpfc_sli4_rb_setup - Initialize and post RBs to HBA
+ * @phba: Pointer to HBA context object.
+ *
+ * This function is called during the SLI initialization to configure
+ * all the HBQs and post buffers to the HBQ. The caller is not
+ * required to hold any locks. This function will return zero if successful
+ * else it will return negative error code.
+ **/
+static int
+lpfc_sli4_rb_setup(struct lpfc_hba *phba)
+{
+ phba->hbq_in_use = 1;
+ phba->hbqs[0].entry_count = lpfc_hbq_defs[0]->entry_count;
+ phba->hbq_count = 1;
+ /* Initially populate or replenish the HBQs */
+ lpfc_sli_hbqbuf_init_hbqs(phba, 0);
+ return 0;
+}
+
+/**
+ * lpfc_sli_config_port - Issue config port mailbox command
+ * @phba: Pointer to HBA context object.
+ * @sli_mode: sli mode - 2/3
+ *
+ * This function is called by the sli intialization code path
+ * to issue config_port mailbox command. This function restarts the
+ * HBA firmware and issues a config_port mailbox command to configure
+ * the SLI interface in the sli mode specified by sli_mode
+ * variable. The caller is not required to hold any locks.
+ * The function returns 0 if successful, else returns negative error
+ * code.
+ **/
+int
+lpfc_sli_config_port(struct lpfc_hba *phba, int sli_mode)
+{
+ LPFC_MBOXQ_t *pmb;
+ uint32_t resetcount = 0, rc = 0, done = 0;
+
+ pmb = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
+ if (!pmb) {
+ phba->link_state = LPFC_HBA_ERROR;
+ return -ENOMEM;
+ }
+
+ phba->sli_rev = sli_mode;
+ while (resetcount < 2 && !done) {
+ spin_lock_irq(&phba->hbalock);
+ phba->sli.sli_flag |= LPFC_SLI_MBOX_ACTIVE;
+ spin_unlock_irq(&phba->hbalock);
+ phba->pport->port_state = LPFC_VPORT_UNKNOWN;
+ lpfc_sli_brdrestart(phba);
+ rc = lpfc_sli_chipset_init(phba);
+ if (rc)
+ break;
+
+ spin_lock_irq(&phba->hbalock);
+ phba->sli.sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
+ spin_unlock_irq(&phba->hbalock);
+ resetcount++;
+
+ /* Call pre CONFIG_PORT mailbox command initialization. A
+ * value of 0 means the call was successful. Any other
+ * nonzero value is a failure, but if ERESTART is returned,
+ * the driver may reset the HBA and try again.
+ */
+ rc = lpfc_config_port_prep(phba);
+ if (rc == -ERESTART) {
+ phba->link_state = LPFC_LINK_UNKNOWN;
+ continue;
+ } else if (rc)
+ break;
+
+ phba->link_state = LPFC_INIT_MBX_CMDS;
+ lpfc_config_port(phba, pmb);
+ rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL);
+ phba->sli3_options &= ~(LPFC_SLI3_NPIV_ENABLED |
+ LPFC_SLI3_HBQ_ENABLED |
+ LPFC_SLI3_CRP_ENABLED |
+ LPFC_SLI3_BG_ENABLED |
+ LPFC_SLI3_DSS_ENABLED);
+ if (rc != MBX_SUCCESS) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+ "0442 Adapter failed to init, mbxCmd x%x "
+ "CONFIG_PORT, mbxStatus x%x Data: x%x\n",
+ pmb->u.mb.mbxCommand, pmb->u.mb.mbxStatus, 0);
+ spin_lock_irq(&phba->hbalock);
+ phba->sli.sli_flag &= ~LPFC_SLI_ACTIVE;
+ spin_unlock_irq(&phba->hbalock);
+ rc = -ENXIO;
+ } else {
+ /* Allow asynchronous mailbox command to go through */
+ spin_lock_irq(&phba->hbalock);
+ phba->sli.sli_flag &= ~LPFC_SLI_ASYNC_MBX_BLK;
+ spin_unlock_irq(&phba->hbalock);
+ done = 1;
+
+ if ((pmb->u.mb.un.varCfgPort.casabt == 1) &&
+ (pmb->u.mb.un.varCfgPort.gasabt == 0))
+ lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
+ "3110 Port did not grant ASABT\n");
+ }
+ }
+ if (!done) {
+ rc = -EINVAL;
+ goto do_prep_failed;
+ }
+ if (pmb->u.mb.un.varCfgPort.sli_mode == 3) {
+ if (!pmb->u.mb.un.varCfgPort.cMA) {
+ rc = -ENXIO;
+ goto do_prep_failed;
+ }
+ if (phba->max_vpi && pmb->u.mb.un.varCfgPort.gmv) {
+ phba->sli3_options |= LPFC_SLI3_NPIV_ENABLED;
+ phba->max_vpi = pmb->u.mb.un.varCfgPort.max_vpi;
+ phba->max_vports = (phba->max_vpi > phba->max_vports) ?
+ phba->max_vpi : phba->max_vports;
+
+ } else
+ phba->max_vpi = 0;
+ phba->fips_level = 0;
+ phba->fips_spec_rev = 0;
+ if (pmb->u.mb.un.varCfgPort.gdss) {
+ phba->sli3_options |= LPFC_SLI3_DSS_ENABLED;
+ phba->fips_level = pmb->u.mb.un.varCfgPort.fips_level;
+ phba->fips_spec_rev = pmb->u.mb.un.varCfgPort.fips_rev;
+ lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
+ "2850 Security Crypto Active. FIPS x%d "
+ "(Spec Rev: x%d)",
+ phba->fips_level, phba->fips_spec_rev);
+ }
+ if (pmb->u.mb.un.varCfgPort.sec_err) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+ "2856 Config Port Security Crypto "
+ "Error: x%x ",
+ pmb->u.mb.un.varCfgPort.sec_err);
+ }
+ if (pmb->u.mb.un.varCfgPort.gerbm)
+ phba->sli3_options |= LPFC_SLI3_HBQ_ENABLED;
+ if (pmb->u.mb.un.varCfgPort.gcrp)
+ phba->sli3_options |= LPFC_SLI3_CRP_ENABLED;
+
+ phba->hbq_get = phba->mbox->us.s3_pgp.hbq_get;
+ phba->port_gp = phba->mbox->us.s3_pgp.port;
+
+ if (phba->cfg_enable_bg) {
+ if (pmb->u.mb.un.varCfgPort.gbg)
+ phba->sli3_options |= LPFC_SLI3_BG_ENABLED;
+ else
+ lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+ "0443 Adapter did not grant "
+ "BlockGuard\n");
+ }
+ } else {
+ phba->hbq_get = NULL;
+ phba->port_gp = phba->mbox->us.s2.port;
+ phba->max_vpi = 0;
+ }
+do_prep_failed:
+ mempool_free(pmb, phba->mbox_mem_pool);
+ return rc;
+}
+
+
+/**
+ * lpfc_sli_hba_setup - SLI intialization function
+ * @phba: Pointer to HBA context object.
+ *
+ * This function is the main SLI intialization function. This function
+ * is called by the HBA intialization code, HBA reset code and HBA
+ * error attention handler code. Caller is not required to hold any
+ * locks. This function issues config_port mailbox command to configure
+ * the SLI, setup iocb rings and HBQ rings. In the end the function
+ * calls the config_port_post function to issue init_link mailbox
+ * command and to start the discovery. The function will return zero
+ * if successful, else it will return negative error code.
+ **/
+int
+lpfc_sli_hba_setup(struct lpfc_hba *phba)
+{
+ uint32_t rc;
+ int mode = 3, i;
+ int longs;
+
+ switch (lpfc_sli_mode) {
+ case 2:
+ if (phba->cfg_enable_npiv) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_INIT | LOG_VPORT,
+ "1824 NPIV enabled: Override lpfc_sli_mode "
+ "parameter (%d) to auto (0).\n",
+ lpfc_sli_mode);
+ break;
+ }
+ mode = 2;
+ break;
+ case 0:
+ case 3:
+ break;
+ default:
+ lpfc_printf_log(phba, KERN_ERR, LOG_INIT | LOG_VPORT,
+ "1819 Unrecognized lpfc_sli_mode "
+ "parameter: %d.\n", lpfc_sli_mode);
+
+ break;
+ }
+
+ rc = lpfc_sli_config_port(phba, mode);
+
+ if (rc && lpfc_sli_mode == 3)
+ lpfc_printf_log(phba, KERN_ERR, LOG_INIT | LOG_VPORT,
+ "1820 Unable to select SLI-3. "
+ "Not supported by adapter.\n");
+ if (rc && mode != 2)
+ rc = lpfc_sli_config_port(phba, 2);
+ if (rc)
+ goto lpfc_sli_hba_setup_error;
+
+ /* Enable PCIe device Advanced Error Reporting (AER) if configured */
+ if (phba->cfg_aer_support == 1 && !(phba->hba_flag & HBA_AER_ENABLED)) {
+ rc = pci_enable_pcie_error_reporting(phba->pcidev);
+ if (!rc) {
+ lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
+ "2709 This device supports "
+ "Advanced Error Reporting (AER)\n");
+ spin_lock_irq(&phba->hbalock);
+ phba->hba_flag |= HBA_AER_ENABLED;
+ spin_unlock_irq(&phba->hbalock);
+ } else {
+ lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
+ "2708 This device does not support "
+ "Advanced Error Reporting (AER): %d\n",
+ rc);
+ phba->cfg_aer_support = 0;
+ }
+ }
+
+ if (phba->sli_rev == 3) {
+ phba->iocb_cmd_size = SLI3_IOCB_CMD_SIZE;
+ phba->iocb_rsp_size = SLI3_IOCB_RSP_SIZE;
+ } else {
+ phba->iocb_cmd_size = SLI2_IOCB_CMD_SIZE;
+ phba->iocb_rsp_size = SLI2_IOCB_RSP_SIZE;
+ phba->sli3_options = 0;
+ }
+
+ lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
+ "0444 Firmware in SLI %x mode. Max_vpi %d\n",
+ phba->sli_rev, phba->max_vpi);
+ rc = lpfc_sli_ring_map(phba);
+
+ if (rc)
+ goto lpfc_sli_hba_setup_error;
+
+ /* Initialize VPIs. */
+ if (phba->sli_rev == LPFC_SLI_REV3) {
+ /*
+ * The VPI bitmask and physical ID array are allocated
+ * and initialized once only - at driver load. A port
+ * reset doesn't need to reinitialize this memory.
+ */
+ if ((phba->vpi_bmask == NULL) && (phba->vpi_ids == NULL)) {
+ longs = (phba->max_vpi + BITS_PER_LONG) / BITS_PER_LONG;
+ phba->vpi_bmask = kzalloc(longs * sizeof(unsigned long),
+ GFP_KERNEL);
+ if (!phba->vpi_bmask) {
+ rc = -ENOMEM;
+ goto lpfc_sli_hba_setup_error;
+ }
+
+ phba->vpi_ids = kzalloc(
+ (phba->max_vpi+1) * sizeof(uint16_t),
+ GFP_KERNEL);
+ if (!phba->vpi_ids) {
+ kfree(phba->vpi_bmask);
+ rc = -ENOMEM;
+ goto lpfc_sli_hba_setup_error;
+ }
+ for (i = 0; i < phba->max_vpi; i++)
+ phba->vpi_ids[i] = i;
+ }
+ }
+
+ /* Init HBQs */
+ if (phba->sli3_options & LPFC_SLI3_HBQ_ENABLED) {
+ rc = lpfc_sli_hbq_setup(phba);
+ if (rc)
+ goto lpfc_sli_hba_setup_error;
+ }
+ spin_lock_irq(&phba->hbalock);
+ phba->sli.sli_flag |= LPFC_PROCESS_LA;
+ spin_unlock_irq(&phba->hbalock);
+
+ rc = lpfc_config_port_post(phba);
+ if (rc)
+ goto lpfc_sli_hba_setup_error;
+
+ return rc;
+
+lpfc_sli_hba_setup_error:
+ phba->link_state = LPFC_HBA_ERROR;
+ lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+ "0445 Firmware initialization failed\n");
+ return rc;
+}
+
+/**
+ * lpfc_sli4_read_fcoe_params - Read fcoe params from conf region
+ * @phba: Pointer to HBA context object.
+ * @mboxq: mailbox pointer.
+ * This function issue a dump mailbox command to read config region
+ * 23 and parse the records in the region and populate driver
+ * data structure.
+ **/
+static int
+lpfc_sli4_read_fcoe_params(struct lpfc_hba *phba)
+{
+ LPFC_MBOXQ_t *mboxq;
+ struct lpfc_dmabuf *mp;
+ struct lpfc_mqe *mqe;
+ uint32_t data_length;
+ int rc;
+
+ /* Program the default value of vlan_id and fc_map */
+ phba->valid_vlan = 0;
+ phba->fc_map[0] = LPFC_FCOE_FCF_MAP0;
+ phba->fc_map[1] = LPFC_FCOE_FCF_MAP1;
+ phba->fc_map[2] = LPFC_FCOE_FCF_MAP2;
+
+ mboxq = (LPFC_MBOXQ_t *)mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
+ if (!mboxq)
+ return -ENOMEM;
+
+ mqe = &mboxq->u.mqe;
+ if (lpfc_sli4_dump_cfg_rg23(phba, mboxq)) {
+ rc = -ENOMEM;
+ goto out_free_mboxq;
+ }
+
+ mp = (struct lpfc_dmabuf *) mboxq->context1;
+ rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
+
+ lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI,
+ "(%d):2571 Mailbox cmd x%x Status x%x "
+ "Data: x%x x%x x%x x%x x%x x%x x%x x%x x%x "
+ "x%x x%x x%x x%x x%x x%x x%x x%x x%x "
+ "CQ: x%x x%x x%x x%x\n",
+ mboxq->vport ? mboxq->vport->vpi : 0,
+ bf_get(lpfc_mqe_command, mqe),
+ bf_get(lpfc_mqe_status, mqe),
+ mqe->un.mb_words[0], mqe->un.mb_words[1],
+ mqe->un.mb_words[2], mqe->un.mb_words[3],
+ mqe->un.mb_words[4], mqe->un.mb_words[5],
+ mqe->un.mb_words[6], mqe->un.mb_words[7],
+ mqe->un.mb_words[8], mqe->un.mb_words[9],
+ mqe->un.mb_words[10], mqe->un.mb_words[11],
+ mqe->un.mb_words[12], mqe->un.mb_words[13],
+ mqe->un.mb_words[14], mqe->un.mb_words[15],
+ mqe->un.mb_words[16], mqe->un.mb_words[50],
+ mboxq->mcqe.word0,
+ mboxq->mcqe.mcqe_tag0, mboxq->mcqe.mcqe_tag1,
+ mboxq->mcqe.trailer);
+
+ if (rc) {
+ lpfc_mbuf_free(phba, mp->virt, mp->phys);
+ kfree(mp);
+ rc = -EIO;
+ goto out_free_mboxq;
+ }
+ data_length = mqe->un.mb_words[5];
+ if (data_length > DMP_RGN23_SIZE) {
+ lpfc_mbuf_free(phba, mp->virt, mp->phys);
+ kfree(mp);
+ rc = -EIO;
+ goto out_free_mboxq;
+ }
+
+ lpfc_parse_fcoe_conf(phba, mp->virt, data_length);
+ lpfc_mbuf_free(phba, mp->virt, mp->phys);
+ kfree(mp);
+ rc = 0;
+
+out_free_mboxq:
+ mempool_free(mboxq, phba->mbox_mem_pool);
+ return rc;
+}
+
+/**
+ * lpfc_sli4_read_rev - Issue READ_REV and collect vpd data
+ * @phba: pointer to lpfc hba data structure.
+ * @mboxq: pointer to the LPFC_MBOXQ_t structure.
+ * @vpd: pointer to the memory to hold resulting port vpd data.
+ * @vpd_size: On input, the number of bytes allocated to @vpd.
+ * On output, the number of data bytes in @vpd.
+ *
+ * This routine executes a READ_REV SLI4 mailbox command. In
+ * addition, this routine gets the port vpd data.
+ *
+ * Return codes
+ * 0 - successful
+ * -ENOMEM - could not allocated memory.
+ **/
+static int
+lpfc_sli4_read_rev(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq,
+ uint8_t *vpd, uint32_t *vpd_size)
+{
+ int rc = 0;
+ uint32_t dma_size;
+ struct lpfc_dmabuf *dmabuf;
+ struct lpfc_mqe *mqe;
+
+ dmabuf = kzalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
+ if (!dmabuf)
+ return -ENOMEM;
+
+ /*
+ * Get a DMA buffer for the vpd data resulting from the READ_REV
+ * mailbox command.
+ */
+ dma_size = *vpd_size;
+ dmabuf->virt = dma_zalloc_coherent(&phba->pcidev->dev, dma_size,
+ &dmabuf->phys, GFP_KERNEL);
+ if (!dmabuf->virt) {
+ kfree(dmabuf);
+ return -ENOMEM;
+ }
+
+ /*
+ * The SLI4 implementation of READ_REV conflicts at word1,
+ * bits 31:16 and SLI4 adds vpd functionality not present
+ * in SLI3. This code corrects the conflicts.
+ */
+ lpfc_read_rev(phba, mboxq);
+ mqe = &mboxq->u.mqe;
+ mqe->un.read_rev.vpd_paddr_high = putPaddrHigh(dmabuf->phys);
+ mqe->un.read_rev.vpd_paddr_low = putPaddrLow(dmabuf->phys);
+ mqe->un.read_rev.word1 &= 0x0000FFFF;
+ bf_set(lpfc_mbx_rd_rev_vpd, &mqe->un.read_rev, 1);
+ bf_set(lpfc_mbx_rd_rev_avail_len, &mqe->un.read_rev, dma_size);
+
+ rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
+ if (rc) {
+ dma_free_coherent(&phba->pcidev->dev, dma_size,
+ dmabuf->virt, dmabuf->phys);
+ kfree(dmabuf);
+ return -EIO;
+ }
+
+ /*
+ * The available vpd length cannot be bigger than the
+ * DMA buffer passed to the port. Catch the less than
+ * case and update the caller's size.
+ */
+ if (mqe->un.read_rev.avail_vpd_len < *vpd_size)
+ *vpd_size = mqe->un.read_rev.avail_vpd_len;
+
+ memcpy(vpd, dmabuf->virt, *vpd_size);
+
+ dma_free_coherent(&phba->pcidev->dev, dma_size,
+ dmabuf->virt, dmabuf->phys);
+ kfree(dmabuf);
+ return 0;
+}
+
+/**
+ * lpfc_sli4_retrieve_pport_name - Retrieve SLI4 device physical port name
+ * @phba: pointer to lpfc hba data structure.
+ *
+ * This routine retrieves SLI4 device physical port name this PCI function
+ * is attached to.
+ *
+ * Return codes
+ * 0 - successful
+ * otherwise - failed to retrieve physical port name
+ **/
+static int
+lpfc_sli4_retrieve_pport_name(struct lpfc_hba *phba)
+{
+ LPFC_MBOXQ_t *mboxq;
+ struct lpfc_mbx_get_cntl_attributes *mbx_cntl_attr;
+ struct lpfc_controller_attribute *cntl_attr;
+ struct lpfc_mbx_get_port_name *get_port_name;
+ void *virtaddr = NULL;
+ uint32_t alloclen, reqlen;
+ uint32_t shdr_status, shdr_add_status;
+ union lpfc_sli4_cfg_shdr *shdr;
+ char cport_name = 0;
+ int rc;
+
+ /* We assume nothing at this point */
+ phba->sli4_hba.lnk_info.lnk_dv = LPFC_LNK_DAT_INVAL;
+ phba->sli4_hba.pport_name_sta = LPFC_SLI4_PPNAME_NON;
+
+ mboxq = (LPFC_MBOXQ_t *)mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
+ if (!mboxq)
+ return -ENOMEM;
+ /* obtain link type and link number via READ_CONFIG */
+ phba->sli4_hba.lnk_info.lnk_dv = LPFC_LNK_DAT_INVAL;
+ lpfc_sli4_read_config(phba);
+ if (phba->sli4_hba.lnk_info.lnk_dv == LPFC_LNK_DAT_VAL)
+ goto retrieve_ppname;
+
+ /* obtain link type and link number via COMMON_GET_CNTL_ATTRIBUTES */
+ reqlen = sizeof(struct lpfc_mbx_get_cntl_attributes);
+ alloclen = lpfc_sli4_config(phba, mboxq, LPFC_MBOX_SUBSYSTEM_COMMON,
+ LPFC_MBOX_OPCODE_GET_CNTL_ATTRIBUTES, reqlen,
+ LPFC_SLI4_MBX_NEMBED);
+ if (alloclen < reqlen) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
+ "3084 Allocated DMA memory size (%d) is "
+ "less than the requested DMA memory size "
+ "(%d)\n", alloclen, reqlen);
+ rc = -ENOMEM;
+ goto out_free_mboxq;
+ }
+ rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
+ virtaddr = mboxq->sge_array->addr[0];
+ mbx_cntl_attr = (struct lpfc_mbx_get_cntl_attributes *)virtaddr;
+ shdr = &mbx_cntl_attr->cfg_shdr;
+ shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
+ shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
+ if (shdr_status || shdr_add_status || rc) {
+ lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
+ "3085 Mailbox x%x (x%x/x%x) failed, "
+ "rc:x%x, status:x%x, add_status:x%x\n",
+ bf_get(lpfc_mqe_command, &mboxq->u.mqe),
+ lpfc_sli_config_mbox_subsys_get(phba, mboxq),
+ lpfc_sli_config_mbox_opcode_get(phba, mboxq),
+ rc, shdr_status, shdr_add_status);
+ rc = -ENXIO;
+ goto out_free_mboxq;
+ }
+ cntl_attr = &mbx_cntl_attr->cntl_attr;
+ phba->sli4_hba.lnk_info.lnk_dv = LPFC_LNK_DAT_VAL;
+ phba->sli4_hba.lnk_info.lnk_tp =
+ bf_get(lpfc_cntl_attr_lnk_type, cntl_attr);
+ phba->sli4_hba.lnk_info.lnk_no =
+ bf_get(lpfc_cntl_attr_lnk_numb, cntl_attr);
+ lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
+ "3086 lnk_type:%d, lnk_numb:%d\n",
+ phba->sli4_hba.lnk_info.lnk_tp,
+ phba->sli4_hba.lnk_info.lnk_no);
+
+retrieve_ppname:
+ lpfc_sli4_config(phba, mboxq, LPFC_MBOX_SUBSYSTEM_COMMON,
+ LPFC_MBOX_OPCODE_GET_PORT_NAME,
+ sizeof(struct lpfc_mbx_get_port_name) -
+ sizeof(struct lpfc_sli4_cfg_mhdr),
+ LPFC_SLI4_MBX_EMBED);
+ get_port_name = &mboxq->u.mqe.un.get_port_name;
+ shdr = (union lpfc_sli4_cfg_shdr *)&get_port_name->header.cfg_shdr;
+ bf_set(lpfc_mbox_hdr_version, &shdr->request, LPFC_OPCODE_VERSION_1);
+ bf_set(lpfc_mbx_get_port_name_lnk_type, &get_port_name->u.request,
+ phba->sli4_hba.lnk_info.lnk_tp);
+ rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
+ shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
+ shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
+ if (shdr_status || shdr_add_status || rc) {
+ lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
+ "3087 Mailbox x%x (x%x/x%x) failed: "
+ "rc:x%x, status:x%x, add_status:x%x\n",
+ bf_get(lpfc_mqe_command, &mboxq->u.mqe),
+ lpfc_sli_config_mbox_subsys_get(phba, mboxq),
+ lpfc_sli_config_mbox_opcode_get(phba, mboxq),
+ rc, shdr_status, shdr_add_status);
+ rc = -ENXIO;
+ goto out_free_mboxq;
+ }
+ switch (phba->sli4_hba.lnk_info.lnk_no) {
+ case LPFC_LINK_NUMBER_0:
+ cport_name = bf_get(lpfc_mbx_get_port_name_name0,
+ &get_port_name->u.response);
+ phba->sli4_hba.pport_name_sta = LPFC_SLI4_PPNAME_GET;
+ break;
+ case LPFC_LINK_NUMBER_1:
+ cport_name = bf_get(lpfc_mbx_get_port_name_name1,
+ &get_port_name->u.response);
+ phba->sli4_hba.pport_name_sta = LPFC_SLI4_PPNAME_GET;
+ break;
+ case LPFC_LINK_NUMBER_2:
+ cport_name = bf_get(lpfc_mbx_get_port_name_name2,
+ &get_port_name->u.response);
+ phba->sli4_hba.pport_name_sta = LPFC_SLI4_PPNAME_GET;
+ break;
+ case LPFC_LINK_NUMBER_3:
+ cport_name = bf_get(lpfc_mbx_get_port_name_name3,
+ &get_port_name->u.response);
+ phba->sli4_hba.pport_name_sta = LPFC_SLI4_PPNAME_GET;
+ break;
+ default:
+ break;
+ }
+
+ if (phba->sli4_hba.pport_name_sta == LPFC_SLI4_PPNAME_GET) {
+ phba->Port[0] = cport_name;
+ phba->Port[1] = '\0';
+ lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
+ "3091 SLI get port name: %s\n", phba->Port);
+ }
+
+out_free_mboxq:
+ if (rc != MBX_TIMEOUT) {
+ if (bf_get(lpfc_mqe_command, &mboxq->u.mqe) == MBX_SLI4_CONFIG)
+ lpfc_sli4_mbox_cmd_free(phba, mboxq);
+ else
+ mempool_free(mboxq, phba->mbox_mem_pool);
+ }
+ return rc;
+}
+
+/**
+ * lpfc_sli4_arm_cqeq_intr - Arm sli-4 device completion and event queues
+ * @phba: pointer to lpfc hba data structure.
+ *
+ * This routine is called to explicitly arm the SLI4 device's completion and
+ * event queues
+ **/
+static void
+lpfc_sli4_arm_cqeq_intr(struct lpfc_hba *phba)
+{
+ int fcp_eqidx;
+
+ lpfc_sli4_cq_release(phba->sli4_hba.mbx_cq, LPFC_QUEUE_REARM);
+ lpfc_sli4_cq_release(phba->sli4_hba.els_cq, LPFC_QUEUE_REARM);
+ fcp_eqidx = 0;
+ if (phba->sli4_hba.fcp_cq) {
+ do {
+ lpfc_sli4_cq_release(phba->sli4_hba.fcp_cq[fcp_eqidx],
+ LPFC_QUEUE_REARM);
+ } while (++fcp_eqidx < phba->cfg_fcp_io_channel);
+ }
+
+ if (phba->cfg_fof)
+ lpfc_sli4_cq_release(phba->sli4_hba.oas_cq, LPFC_QUEUE_REARM);
+
+ if (phba->sli4_hba.hba_eq) {
+ for (fcp_eqidx = 0; fcp_eqidx < phba->cfg_fcp_io_channel;
+ fcp_eqidx++)
+ lpfc_sli4_eq_release(phba->sli4_hba.hba_eq[fcp_eqidx],
+ LPFC_QUEUE_REARM);
+ }
+
+ if (phba->cfg_fof)
+ lpfc_sli4_eq_release(phba->sli4_hba.fof_eq, LPFC_QUEUE_REARM);
+}
+
+/**
+ * lpfc_sli4_get_avail_extnt_rsrc - Get available resource extent count.
+ * @phba: Pointer to HBA context object.
+ * @type: The resource extent type.
+ * @extnt_count: buffer to hold port available extent count.
+ * @extnt_size: buffer to hold element count per extent.
+ *
+ * This function calls the port and retrievs the number of available
+ * extents and their size for a particular extent type.
+ *
+ * Returns: 0 if successful. Nonzero otherwise.
+ **/
+int
+lpfc_sli4_get_avail_extnt_rsrc(struct lpfc_hba *phba, uint16_t type,
+ uint16_t *extnt_count, uint16_t *extnt_size)
+{
+ int rc = 0;
+ uint32_t length;
+ uint32_t mbox_tmo;
+ struct lpfc_mbx_get_rsrc_extent_info *rsrc_info;
+ LPFC_MBOXQ_t *mbox;
+
+ mbox = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
+ if (!mbox)
+ return -ENOMEM;
+
+ /* Find out how many extents are available for this resource type */
+ length = (sizeof(struct lpfc_mbx_get_rsrc_extent_info) -
+ sizeof(struct lpfc_sli4_cfg_mhdr));
+ lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
+ LPFC_MBOX_OPCODE_GET_RSRC_EXTENT_INFO,
+ length, LPFC_SLI4_MBX_EMBED);
+
+ /* Send an extents count of 0 - the GET doesn't use it. */
+ rc = lpfc_sli4_mbox_rsrc_extent(phba, mbox, 0, type,
+ LPFC_SLI4_MBX_EMBED);
+ if (unlikely(rc)) {
+ rc = -EIO;
+ goto err_exit;
+ }
+
+ if (!phba->sli4_hba.intr_enable)
+ rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
+ else {
+ mbox_tmo = lpfc_mbox_tmo_val(phba, mbox);
+ rc = lpfc_sli_issue_mbox_wait(phba, mbox, mbox_tmo);
+ }
+ if (unlikely(rc)) {
+ rc = -EIO;
+ goto err_exit;
+ }
+
+ rsrc_info = &mbox->u.mqe.un.rsrc_extent_info;
+ if (bf_get(lpfc_mbox_hdr_status,
+ &rsrc_info->header.cfg_shdr.response)) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_INIT,
+ "2930 Failed to get resource extents "
+ "Status 0x%x Add'l Status 0x%x\n",
+ bf_get(lpfc_mbox_hdr_status,
+ &rsrc_info->header.cfg_shdr.response),
+ bf_get(lpfc_mbox_hdr_add_status,
+ &rsrc_info->header.cfg_shdr.response));
+ rc = -EIO;
+ goto err_exit;
+ }
+
+ *extnt_count = bf_get(lpfc_mbx_get_rsrc_extent_info_cnt,
+ &rsrc_info->u.rsp);
+ *extnt_size = bf_get(lpfc_mbx_get_rsrc_extent_info_size,
+ &rsrc_info->u.rsp);
+
+ lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
+ "3162 Retrieved extents type-%d from port: count:%d, "
+ "size:%d\n", type, *extnt_count, *extnt_size);
+
+err_exit:
+ mempool_free(mbox, phba->mbox_mem_pool);
+ return rc;
+}
+
+/**
+ * lpfc_sli4_chk_avail_extnt_rsrc - Check for available SLI4 resource extents.
+ * @phba: Pointer to HBA context object.
+ * @type: The extent type to check.
+ *
+ * This function reads the current available extents from the port and checks
+ * if the extent count or extent size has changed since the last access.
+ * Callers use this routine post port reset to understand if there is a
+ * extent reprovisioning requirement.
+ *
+ * Returns:
+ * -Error: error indicates problem.
+ * 1: Extent count or size has changed.
+ * 0: No changes.
+ **/
+static int
+lpfc_sli4_chk_avail_extnt_rsrc(struct lpfc_hba *phba, uint16_t type)
+{
+ uint16_t curr_ext_cnt, rsrc_ext_cnt;
+ uint16_t size_diff, rsrc_ext_size;
+ int rc = 0;
+ struct lpfc_rsrc_blks *rsrc_entry;
+ struct list_head *rsrc_blk_list = NULL;
+
+ size_diff = 0;
+ curr_ext_cnt = 0;
+ rc = lpfc_sli4_get_avail_extnt_rsrc(phba, type,
+ &rsrc_ext_cnt,
+ &rsrc_ext_size);
+ if (unlikely(rc))
+ return -EIO;
+
+ switch (type) {
+ case LPFC_RSC_TYPE_FCOE_RPI:
+ rsrc_blk_list = &phba->sli4_hba.lpfc_rpi_blk_list;
+ break;
+ case LPFC_RSC_TYPE_FCOE_VPI:
+ rsrc_blk_list = &phba->lpfc_vpi_blk_list;
+ break;
+ case LPFC_RSC_TYPE_FCOE_XRI:
+ rsrc_blk_list = &phba->sli4_hba.lpfc_xri_blk_list;
+ break;
+ case LPFC_RSC_TYPE_FCOE_VFI:
+ rsrc_blk_list = &phba->sli4_hba.lpfc_vfi_blk_list;
+ break;
+ default:
+ break;
+ }
+
+ list_for_each_entry(rsrc_entry, rsrc_blk_list, list) {
+ curr_ext_cnt++;
+ if (rsrc_entry->rsrc_size != rsrc_ext_size)
+ size_diff++;
+ }
+
+ if (curr_ext_cnt != rsrc_ext_cnt || size_diff != 0)
+ rc = 1;
+
+ return rc;
+}
+
+/**
+ * lpfc_sli4_cfg_post_extnts -
+ * @phba: Pointer to HBA context object.
+ * @extnt_cnt - number of available extents.
+ * @type - the extent type (rpi, xri, vfi, vpi).
+ * @emb - buffer to hold either MBX_EMBED or MBX_NEMBED operation.
+ * @mbox - pointer to the caller's allocated mailbox structure.
+ *
+ * This function executes the extents allocation request. It also
+ * takes care of the amount of memory needed to allocate or get the
+ * allocated extents. It is the caller's responsibility to evaluate
+ * the response.
+ *
+ * Returns:
+ * -Error: Error value describes the condition found.
+ * 0: if successful
+ **/
+static int
+lpfc_sli4_cfg_post_extnts(struct lpfc_hba *phba, uint16_t extnt_cnt,
+ uint16_t type, bool *emb, LPFC_MBOXQ_t *mbox)
+{
+ int rc = 0;
+ uint32_t req_len;
+ uint32_t emb_len;
+ uint32_t alloc_len, mbox_tmo;
+
+ /* Calculate the total requested length of the dma memory */
+ req_len = extnt_cnt * sizeof(uint16_t);
+
+ /*
+ * Calculate the size of an embedded mailbox. The uint32_t
+ * accounts for extents-specific word.
+ */
+ emb_len = sizeof(MAILBOX_t) - sizeof(struct mbox_header) -
+ sizeof(uint32_t);
+
+ /*
+ * Presume the allocation and response will fit into an embedded
+ * mailbox. If not true, reconfigure to a non-embedded mailbox.
+ */
+ *emb = LPFC_SLI4_MBX_EMBED;
+ if (req_len > emb_len) {
+ req_len = extnt_cnt * sizeof(uint16_t) +
+ sizeof(union lpfc_sli4_cfg_shdr) +
+ sizeof(uint32_t);
+ *emb = LPFC_SLI4_MBX_NEMBED;
+ }
+
+ alloc_len = lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
+ LPFC_MBOX_OPCODE_ALLOC_RSRC_EXTENT,
+ req_len, *emb);
+ if (alloc_len < req_len) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+ "2982 Allocated DMA memory size (x%x) is "
+ "less than the requested DMA memory "
+ "size (x%x)\n", alloc_len, req_len);
+ return -ENOMEM;
+ }
+ rc = lpfc_sli4_mbox_rsrc_extent(phba, mbox, extnt_cnt, type, *emb);
+ if (unlikely(rc))
+ return -EIO;
+
+ if (!phba->sli4_hba.intr_enable)
+ rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
+ else {
+ mbox_tmo = lpfc_mbox_tmo_val(phba, mbox);
+ rc = lpfc_sli_issue_mbox_wait(phba, mbox, mbox_tmo);
+ }
+
+ if (unlikely(rc))
+ rc = -EIO;
+ return rc;
+}
+
+/**
+ * lpfc_sli4_alloc_extent - Allocate an SLI4 resource extent.
+ * @phba: Pointer to HBA context object.
+ * @type: The resource extent type to allocate.
+ *
+ * This function allocates the number of elements for the specified
+ * resource type.
+ **/
+static int
+lpfc_sli4_alloc_extent(struct lpfc_hba *phba, uint16_t type)
+{
+ bool emb = false;
+ uint16_t rsrc_id_cnt, rsrc_cnt, rsrc_size;
+ uint16_t rsrc_id, rsrc_start, j, k;
+ uint16_t *ids;
+ int i, rc;
+ unsigned long longs;
+ unsigned long *bmask;
+ struct lpfc_rsrc_blks *rsrc_blks;
+ LPFC_MBOXQ_t *mbox;
+ uint32_t length;
+ struct lpfc_id_range *id_array = NULL;
+ void *virtaddr = NULL;
+ struct lpfc_mbx_nembed_rsrc_extent *n_rsrc;
+ struct lpfc_mbx_alloc_rsrc_extents *rsrc_ext;
+ struct list_head *ext_blk_list;
+
+ rc = lpfc_sli4_get_avail_extnt_rsrc(phba, type,
+ &rsrc_cnt,
+ &rsrc_size);
+ if (unlikely(rc))
+ return -EIO;
+
+ if ((rsrc_cnt == 0) || (rsrc_size == 0)) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_INIT,
+ "3009 No available Resource Extents "
+ "for resource type 0x%x: Count: 0x%x, "
+ "Size 0x%x\n", type, rsrc_cnt,
+ rsrc_size);
+ return -ENOMEM;
+ }
+
+ lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_INIT | LOG_SLI,
+ "2903 Post resource extents type-0x%x: "
+ "count:%d, size %d\n", type, rsrc_cnt, rsrc_size);
+
+ mbox = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
+ if (!mbox)
+ return -ENOMEM;
+
+ rc = lpfc_sli4_cfg_post_extnts(phba, rsrc_cnt, type, &emb, mbox);
+ if (unlikely(rc)) {
+ rc = -EIO;
+ goto err_exit;
+ }
+
+ /*
+ * Figure out where the response is located. Then get local pointers
+ * to the response data. The port does not guarantee to respond to
+ * all extents counts request so update the local variable with the
+ * allocated count from the port.
+ */
+ if (emb == LPFC_SLI4_MBX_EMBED) {
+ rsrc_ext = &mbox->u.mqe.un.alloc_rsrc_extents;
+ id_array = &rsrc_ext->u.rsp.id[0];
+ rsrc_cnt = bf_get(lpfc_mbx_rsrc_cnt, &rsrc_ext->u.rsp);
+ } else {
+ virtaddr = mbox->sge_array->addr[0];
+ n_rsrc = (struct lpfc_mbx_nembed_rsrc_extent *) virtaddr;
+ rsrc_cnt = bf_get(lpfc_mbx_rsrc_cnt, n_rsrc);
+ id_array = &n_rsrc->id;
+ }
+
+ longs = ((rsrc_cnt * rsrc_size) + BITS_PER_LONG - 1) / BITS_PER_LONG;
+ rsrc_id_cnt = rsrc_cnt * rsrc_size;
+
+ /*
+ * Based on the resource size and count, correct the base and max
+ * resource values.
+ */
+ length = sizeof(struct lpfc_rsrc_blks);
+ switch (type) {
+ case LPFC_RSC_TYPE_FCOE_RPI:
+ phba->sli4_hba.rpi_bmask = kzalloc(longs *
+ sizeof(unsigned long),
+ GFP_KERNEL);
+ if (unlikely(!phba->sli4_hba.rpi_bmask)) {
+ rc = -ENOMEM;
+ goto err_exit;
+ }
+ phba->sli4_hba.rpi_ids = kzalloc(rsrc_id_cnt *
+ sizeof(uint16_t),
+ GFP_KERNEL);
+ if (unlikely(!phba->sli4_hba.rpi_ids)) {
+ kfree(phba->sli4_hba.rpi_bmask);
+ rc = -ENOMEM;
+ goto err_exit;
+ }
+
+ /*
+ * The next_rpi was initialized with the maximum available
+ * count but the port may allocate a smaller number. Catch
+ * that case and update the next_rpi.
+ */
+ phba->sli4_hba.next_rpi = rsrc_id_cnt;
+
+ /* Initialize local ptrs for common extent processing later. */
+ bmask = phba->sli4_hba.rpi_bmask;
+ ids = phba->sli4_hba.rpi_ids;
+ ext_blk_list = &phba->sli4_hba.lpfc_rpi_blk_list;
+ break;
+ case LPFC_RSC_TYPE_FCOE_VPI:
+ phba->vpi_bmask = kzalloc(longs *
+ sizeof(unsigned long),
+ GFP_KERNEL);
+ if (unlikely(!phba->vpi_bmask)) {
+ rc = -ENOMEM;
+ goto err_exit;
+ }
+ phba->vpi_ids = kzalloc(rsrc_id_cnt *
+ sizeof(uint16_t),
+ GFP_KERNEL);
+ if (unlikely(!phba->vpi_ids)) {
+ kfree(phba->vpi_bmask);
+ rc = -ENOMEM;
+ goto err_exit;
+ }
+
+ /* Initialize local ptrs for common extent processing later. */
+ bmask = phba->vpi_bmask;
+ ids = phba->vpi_ids;
+ ext_blk_list = &phba->lpfc_vpi_blk_list;
+ break;
+ case LPFC_RSC_TYPE_FCOE_XRI:
+ phba->sli4_hba.xri_bmask = kzalloc(longs *
+ sizeof(unsigned long),
+ GFP_KERNEL);
+ if (unlikely(!phba->sli4_hba.xri_bmask)) {
+ rc = -ENOMEM;
+ goto err_exit;
+ }
+ phba->sli4_hba.max_cfg_param.xri_used = 0;
+ phba->sli4_hba.xri_ids = kzalloc(rsrc_id_cnt *
+ sizeof(uint16_t),
+ GFP_KERNEL);
+ if (unlikely(!phba->sli4_hba.xri_ids)) {
+ kfree(phba->sli4_hba.xri_bmask);
+ rc = -ENOMEM;
+ goto err_exit;
+ }
+
+ /* Initialize local ptrs for common extent processing later. */
+ bmask = phba->sli4_hba.xri_bmask;
+ ids = phba->sli4_hba.xri_ids;
+ ext_blk_list = &phba->sli4_hba.lpfc_xri_blk_list;
+ break;
+ case LPFC_RSC_TYPE_FCOE_VFI:
+ phba->sli4_hba.vfi_bmask = kzalloc(longs *
+ sizeof(unsigned long),
+ GFP_KERNEL);
+ if (unlikely(!phba->sli4_hba.vfi_bmask)) {
+ rc = -ENOMEM;
+ goto err_exit;
+ }
+ phba->sli4_hba.vfi_ids = kzalloc(rsrc_id_cnt *
+ sizeof(uint16_t),
+ GFP_KERNEL);
+ if (unlikely(!phba->sli4_hba.vfi_ids)) {
+ kfree(phba->sli4_hba.vfi_bmask);
+ rc = -ENOMEM;
+ goto err_exit;
+ }
+
+ /* Initialize local ptrs for common extent processing later. */
+ bmask = phba->sli4_hba.vfi_bmask;
+ ids = phba->sli4_hba.vfi_ids;
+ ext_blk_list = &phba->sli4_hba.lpfc_vfi_blk_list;
+ break;
+ default:
+ /* Unsupported Opcode. Fail call. */
+ id_array = NULL;
+ bmask = NULL;
+ ids = NULL;
+ ext_blk_list = NULL;
+ goto err_exit;
+ }
+
+ /*
+ * Complete initializing the extent configuration with the
+ * allocated ids assigned to this function. The bitmask serves
+ * as an index into the array and manages the available ids. The
+ * array just stores the ids communicated to the port via the wqes.
+ */
+ for (i = 0, j = 0, k = 0; i < rsrc_cnt; i++) {
+ if ((i % 2) == 0)
+ rsrc_id = bf_get(lpfc_mbx_rsrc_id_word4_0,
+ &id_array[k]);
+ else
+ rsrc_id = bf_get(lpfc_mbx_rsrc_id_word4_1,
+ &id_array[k]);
+
+ rsrc_blks = kzalloc(length, GFP_KERNEL);
+ if (unlikely(!rsrc_blks)) {
+ rc = -ENOMEM;
+ kfree(bmask);
+ kfree(ids);
+ goto err_exit;
+ }
+ rsrc_blks->rsrc_start = rsrc_id;
+ rsrc_blks->rsrc_size = rsrc_size;
+ list_add_tail(&rsrc_blks->list, ext_blk_list);
+ rsrc_start = rsrc_id;
+ if ((type == LPFC_RSC_TYPE_FCOE_XRI) && (j == 0))
+ phba->sli4_hba.scsi_xri_start = rsrc_start +
+ lpfc_sli4_get_els_iocb_cnt(phba);
+
+ while (rsrc_id < (rsrc_start + rsrc_size)) {
+ ids[j] = rsrc_id;
+ rsrc_id++;
+ j++;
+ }
+ /* Entire word processed. Get next word.*/
+ if ((i % 2) == 1)
+ k++;
+ }
+ err_exit:
+ lpfc_sli4_mbox_cmd_free(phba, mbox);
+ return rc;
+}
+
+/**
+ * lpfc_sli4_dealloc_extent - Deallocate an SLI4 resource extent.
+ * @phba: Pointer to HBA context object.
+ * @type: the extent's type.
+ *
+ * This function deallocates all extents of a particular resource type.
+ * SLI4 does not allow for deallocating a particular extent range. It
+ * is the caller's responsibility to release all kernel memory resources.
+ **/
+static int
+lpfc_sli4_dealloc_extent(struct lpfc_hba *phba, uint16_t type)
+{
+ int rc;
+ uint32_t length, mbox_tmo = 0;
+ LPFC_MBOXQ_t *mbox;
+ struct lpfc_mbx_dealloc_rsrc_extents *dealloc_rsrc;
+ struct lpfc_rsrc_blks *rsrc_blk, *rsrc_blk_next;
+
+ mbox = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
+ if (!mbox)
+ return -ENOMEM;
+
+ /*
+ * This function sends an embedded mailbox because it only sends the
+ * the resource type. All extents of this type are released by the
+ * port.
+ */
+ length = (sizeof(struct lpfc_mbx_dealloc_rsrc_extents) -
+ sizeof(struct lpfc_sli4_cfg_mhdr));
+ lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
+ LPFC_MBOX_OPCODE_DEALLOC_RSRC_EXTENT,
+ length, LPFC_SLI4_MBX_EMBED);
+
+ /* Send an extents count of 0 - the dealloc doesn't use it. */
+ rc = lpfc_sli4_mbox_rsrc_extent(phba, mbox, 0, type,
+ LPFC_SLI4_MBX_EMBED);
+ if (unlikely(rc)) {
+ rc = -EIO;
+ goto out_free_mbox;
+ }
+ if (!phba->sli4_hba.intr_enable)
+ rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
+ else {
+ mbox_tmo = lpfc_mbox_tmo_val(phba, mbox);
+ rc = lpfc_sli_issue_mbox_wait(phba, mbox, mbox_tmo);
+ }
+ if (unlikely(rc)) {
+ rc = -EIO;
+ goto out_free_mbox;
+ }
+
+ dealloc_rsrc = &mbox->u.mqe.un.dealloc_rsrc_extents;
+ if (bf_get(lpfc_mbox_hdr_status,
+ &dealloc_rsrc->header.cfg_shdr.response)) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_INIT,
+ "2919 Failed to release resource extents "
+ "for type %d - Status 0x%x Add'l Status 0x%x. "
+ "Resource memory not released.\n",
+ type,
+ bf_get(lpfc_mbox_hdr_status,
+ &dealloc_rsrc->header.cfg_shdr.response),
+ bf_get(lpfc_mbox_hdr_add_status,
+ &dealloc_rsrc->header.cfg_shdr.response));
+ rc = -EIO;
+ goto out_free_mbox;
+ }
+
+ /* Release kernel memory resources for the specific type. */
+ switch (type) {
+ case LPFC_RSC_TYPE_FCOE_VPI:
+ kfree(phba->vpi_bmask);
+ kfree(phba->vpi_ids);
+ bf_set(lpfc_vpi_rsrc_rdy, &phba->sli4_hba.sli4_flags, 0);
+ list_for_each_entry_safe(rsrc_blk, rsrc_blk_next,
+ &phba->lpfc_vpi_blk_list, list) {
+ list_del_init(&rsrc_blk->list);
+ kfree(rsrc_blk);
+ }
+ phba->sli4_hba.max_cfg_param.vpi_used = 0;
+ break;
+ case LPFC_RSC_TYPE_FCOE_XRI:
+ kfree(phba->sli4_hba.xri_bmask);
+ kfree(phba->sli4_hba.xri_ids);
+ list_for_each_entry_safe(rsrc_blk, rsrc_blk_next,
+ &phba->sli4_hba.lpfc_xri_blk_list, list) {
+ list_del_init(&rsrc_blk->list);
+ kfree(rsrc_blk);
+ }
+ break;
+ case LPFC_RSC_TYPE_FCOE_VFI:
+ kfree(phba->sli4_hba.vfi_bmask);
+ kfree(phba->sli4_hba.vfi_ids);
+ bf_set(lpfc_vfi_rsrc_rdy, &phba->sli4_hba.sli4_flags, 0);
+ list_for_each_entry_safe(rsrc_blk, rsrc_blk_next,
+ &phba->sli4_hba.lpfc_vfi_blk_list, list) {
+ list_del_init(&rsrc_blk->list);
+ kfree(rsrc_blk);
+ }
+ break;
+ case LPFC_RSC_TYPE_FCOE_RPI:
+ /* RPI bitmask and physical id array are cleaned up earlier. */
+ list_for_each_entry_safe(rsrc_blk, rsrc_blk_next,
+ &phba->sli4_hba.lpfc_rpi_blk_list, list) {
+ list_del_init(&rsrc_blk->list);
+ kfree(rsrc_blk);
+ }
+ break;
+ default:
+ break;
+ }
+
+ bf_set(lpfc_idx_rsrc_rdy, &phba->sli4_hba.sli4_flags, 0);
+
+ out_free_mbox:
+ mempool_free(mbox, phba->mbox_mem_pool);
+ return rc;
+}
+
+/**
+ * lpfc_sli4_alloc_resource_identifiers - Allocate all SLI4 resource extents.
+ * @phba: Pointer to HBA context object.
+ *
+ * This function allocates all SLI4 resource identifiers.
+ **/
+int
+lpfc_sli4_alloc_resource_identifiers(struct lpfc_hba *phba)
+{
+ int i, rc, error = 0;
+ uint16_t count, base;
+ unsigned long longs;
+
+ if (!phba->sli4_hba.rpi_hdrs_in_use)
+ phba->sli4_hba.next_rpi = phba->sli4_hba.max_cfg_param.max_rpi;
+ if (phba->sli4_hba.extents_in_use) {
+ /*
+ * The port supports resource extents. The XRI, VPI, VFI, RPI
+ * resource extent count must be read and allocated before
+ * provisioning the resource id arrays.
+ */
+ if (bf_get(lpfc_idx_rsrc_rdy, &phba->sli4_hba.sli4_flags) ==
+ LPFC_IDX_RSRC_RDY) {
+ /*
+ * Extent-based resources are set - the driver could
+ * be in a port reset. Figure out if any corrective
+ * actions need to be taken.
+ */
+ rc = lpfc_sli4_chk_avail_extnt_rsrc(phba,
+ LPFC_RSC_TYPE_FCOE_VFI);
+ if (rc != 0)
+ error++;
+ rc = lpfc_sli4_chk_avail_extnt_rsrc(phba,
+ LPFC_RSC_TYPE_FCOE_VPI);
+ if (rc != 0)
+ error++;
+ rc = lpfc_sli4_chk_avail_extnt_rsrc(phba,
+ LPFC_RSC_TYPE_FCOE_XRI);
+ if (rc != 0)
+ error++;
+ rc = lpfc_sli4_chk_avail_extnt_rsrc(phba,
+ LPFC_RSC_TYPE_FCOE_RPI);
+ if (rc != 0)
+ error++;
+
+ /*
+ * It's possible that the number of resources
+ * provided to this port instance changed between
+ * resets. Detect this condition and reallocate
+ * resources. Otherwise, there is no action.
+ */
+ if (error) {
+ lpfc_printf_log(phba, KERN_INFO,
+ LOG_MBOX | LOG_INIT,
+ "2931 Detected extent resource "
+ "change. Reallocating all "
+ "extents.\n");
+ rc = lpfc_sli4_dealloc_extent(phba,
+ LPFC_RSC_TYPE_FCOE_VFI);
+ rc = lpfc_sli4_dealloc_extent(phba,
+ LPFC_RSC_TYPE_FCOE_VPI);
+ rc = lpfc_sli4_dealloc_extent(phba,
+ LPFC_RSC_TYPE_FCOE_XRI);
+ rc = lpfc_sli4_dealloc_extent(phba,
+ LPFC_RSC_TYPE_FCOE_RPI);
+ } else
+ return 0;
+ }
+
+ rc = lpfc_sli4_alloc_extent(phba, LPFC_RSC_TYPE_FCOE_VFI);
+ if (unlikely(rc))
+ goto err_exit;
+
+ rc = lpfc_sli4_alloc_extent(phba, LPFC_RSC_TYPE_FCOE_VPI);
+ if (unlikely(rc))
+ goto err_exit;
+
+ rc = lpfc_sli4_alloc_extent(phba, LPFC_RSC_TYPE_FCOE_RPI);
+ if (unlikely(rc))
+ goto err_exit;
+
+ rc = lpfc_sli4_alloc_extent(phba, LPFC_RSC_TYPE_FCOE_XRI);
+ if (unlikely(rc))
+ goto err_exit;
+ bf_set(lpfc_idx_rsrc_rdy, &phba->sli4_hba.sli4_flags,
+ LPFC_IDX_RSRC_RDY);
+ return rc;
+ } else {
+ /*
+ * The port does not support resource extents. The XRI, VPI,
+ * VFI, RPI resource ids were determined from READ_CONFIG.
+ * Just allocate the bitmasks and provision the resource id
+ * arrays. If a port reset is active, the resources don't
+ * need any action - just exit.
+ */
+ if (bf_get(lpfc_idx_rsrc_rdy, &phba->sli4_hba.sli4_flags) ==
+ LPFC_IDX_RSRC_RDY) {
+ lpfc_sli4_dealloc_resource_identifiers(phba);
+ lpfc_sli4_remove_rpis(phba);
+ }
+ /* RPIs. */
+ count = phba->sli4_hba.max_cfg_param.max_rpi;
+ if (count <= 0) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
+ "3279 Invalid provisioning of "
+ "rpi:%d\n", count);
+ rc = -EINVAL;
+ goto err_exit;
+ }
+ base = phba->sli4_hba.max_cfg_param.rpi_base;
+ longs = (count + BITS_PER_LONG - 1) / BITS_PER_LONG;
+ phba->sli4_hba.rpi_bmask = kzalloc(longs *
+ sizeof(unsigned long),
+ GFP_KERNEL);
+ if (unlikely(!phba->sli4_hba.rpi_bmask)) {
+ rc = -ENOMEM;
+ goto err_exit;
+ }
+ phba->sli4_hba.rpi_ids = kzalloc(count *
+ sizeof(uint16_t),
+ GFP_KERNEL);
+ if (unlikely(!phba->sli4_hba.rpi_ids)) {
+ rc = -ENOMEM;
+ goto free_rpi_bmask;
+ }
+
+ for (i = 0; i < count; i++)
+ phba->sli4_hba.rpi_ids[i] = base + i;
+
+ /* VPIs. */
+ count = phba->sli4_hba.max_cfg_param.max_vpi;
+ if (count <= 0) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
+ "3280 Invalid provisioning of "
+ "vpi:%d\n", count);
+ rc = -EINVAL;
+ goto free_rpi_ids;
+ }
+ base = phba->sli4_hba.max_cfg_param.vpi_base;
+ longs = (count + BITS_PER_LONG - 1) / BITS_PER_LONG;
+ phba->vpi_bmask = kzalloc(longs *
+ sizeof(unsigned long),
+ GFP_KERNEL);
+ if (unlikely(!phba->vpi_bmask)) {
+ rc = -ENOMEM;
+ goto free_rpi_ids;
+ }
+ phba->vpi_ids = kzalloc(count *
+ sizeof(uint16_t),
+ GFP_KERNEL);
+ if (unlikely(!phba->vpi_ids)) {
+ rc = -ENOMEM;
+ goto free_vpi_bmask;
+ }
+
+ for (i = 0; i < count; i++)
+ phba->vpi_ids[i] = base + i;
+
+ /* XRIs. */
+ count = phba->sli4_hba.max_cfg_param.max_xri;
+ if (count <= 0) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
+ "3281 Invalid provisioning of "
+ "xri:%d\n", count);
+ rc = -EINVAL;
+ goto free_vpi_ids;
+ }
+ base = phba->sli4_hba.max_cfg_param.xri_base;
+ longs = (count + BITS_PER_LONG - 1) / BITS_PER_LONG;
+ phba->sli4_hba.xri_bmask = kzalloc(longs *
+ sizeof(unsigned long),
+ GFP_KERNEL);
+ if (unlikely(!phba->sli4_hba.xri_bmask)) {
+ rc = -ENOMEM;
+ goto free_vpi_ids;
+ }
+ phba->sli4_hba.max_cfg_param.xri_used = 0;
+ phba->sli4_hba.xri_ids = kzalloc(count *
+ sizeof(uint16_t),
+ GFP_KERNEL);
+ if (unlikely(!phba->sli4_hba.xri_ids)) {
+ rc = -ENOMEM;
+ goto free_xri_bmask;
+ }
+
+ for (i = 0; i < count; i++)
+ phba->sli4_hba.xri_ids[i] = base + i;
+
+ /* VFIs. */
+ count = phba->sli4_hba.max_cfg_param.max_vfi;
+ if (count <= 0) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
+ "3282 Invalid provisioning of "
+ "vfi:%d\n", count);
+ rc = -EINVAL;
+ goto free_xri_ids;
+ }
+ base = phba->sli4_hba.max_cfg_param.vfi_base;
+ longs = (count + BITS_PER_LONG - 1) / BITS_PER_LONG;
+ phba->sli4_hba.vfi_bmask = kzalloc(longs *
+ sizeof(unsigned long),
+ GFP_KERNEL);
+ if (unlikely(!phba->sli4_hba.vfi_bmask)) {
+ rc = -ENOMEM;
+ goto free_xri_ids;
+ }
+ phba->sli4_hba.vfi_ids = kzalloc(count *
+ sizeof(uint16_t),
+ GFP_KERNEL);
+ if (unlikely(!phba->sli4_hba.vfi_ids)) {
+ rc = -ENOMEM;
+ goto free_vfi_bmask;
+ }
+
+ for (i = 0; i < count; i++)
+ phba->sli4_hba.vfi_ids[i] = base + i;
+
+ /*
+ * Mark all resources ready. An HBA reset doesn't need
+ * to reset the initialization.
+ */
+ bf_set(lpfc_idx_rsrc_rdy, &phba->sli4_hba.sli4_flags,
+ LPFC_IDX_RSRC_RDY);
+ return 0;
+ }
+
+ free_vfi_bmask:
+ kfree(phba->sli4_hba.vfi_bmask);
+ free_xri_ids:
+ kfree(phba->sli4_hba.xri_ids);
+ free_xri_bmask:
+ kfree(phba->sli4_hba.xri_bmask);
+ free_vpi_ids:
+ kfree(phba->vpi_ids);
+ free_vpi_bmask:
+ kfree(phba->vpi_bmask);
+ free_rpi_ids:
+ kfree(phba->sli4_hba.rpi_ids);
+ free_rpi_bmask:
+ kfree(phba->sli4_hba.rpi_bmask);
+ err_exit:
+ return rc;
+}
+
+/**
+ * lpfc_sli4_dealloc_resource_identifiers - Deallocate all SLI4 resource extents.
+ * @phba: Pointer to HBA context object.
+ *
+ * This function allocates the number of elements for the specified
+ * resource type.
+ **/
+int
+lpfc_sli4_dealloc_resource_identifiers(struct lpfc_hba *phba)
+{
+ if (phba->sli4_hba.extents_in_use) {
+ lpfc_sli4_dealloc_extent(phba, LPFC_RSC_TYPE_FCOE_VPI);
+ lpfc_sli4_dealloc_extent(phba, LPFC_RSC_TYPE_FCOE_RPI);
+ lpfc_sli4_dealloc_extent(phba, LPFC_RSC_TYPE_FCOE_XRI);
+ lpfc_sli4_dealloc_extent(phba, LPFC_RSC_TYPE_FCOE_VFI);
+ } else {
+ kfree(phba->vpi_bmask);
+ phba->sli4_hba.max_cfg_param.vpi_used = 0;
+ kfree(phba->vpi_ids);
+ bf_set(lpfc_vpi_rsrc_rdy, &phba->sli4_hba.sli4_flags, 0);
+ kfree(phba->sli4_hba.xri_bmask);
+ kfree(phba->sli4_hba.xri_ids);
+ kfree(phba->sli4_hba.vfi_bmask);
+ kfree(phba->sli4_hba.vfi_ids);
+ bf_set(lpfc_vfi_rsrc_rdy, &phba->sli4_hba.sli4_flags, 0);
+ bf_set(lpfc_idx_rsrc_rdy, &phba->sli4_hba.sli4_flags, 0);
+ }
+
+ return 0;
+}
+
+/**
+ * lpfc_sli4_get_allocated_extnts - Get the port's allocated extents.
+ * @phba: Pointer to HBA context object.
+ * @type: The resource extent type.
+ * @extnt_count: buffer to hold port extent count response
+ * @extnt_size: buffer to hold port extent size response.
+ *
+ * This function calls the port to read the host allocated extents
+ * for a particular type.
+ **/
+int
+lpfc_sli4_get_allocated_extnts(struct lpfc_hba *phba, uint16_t type,
+ uint16_t *extnt_cnt, uint16_t *extnt_size)
+{
+ bool emb;
+ int rc = 0;
+ uint16_t curr_blks = 0;
+ uint32_t req_len, emb_len;
+ uint32_t alloc_len, mbox_tmo;
+ struct list_head *blk_list_head;
+ struct lpfc_rsrc_blks *rsrc_blk;
+ LPFC_MBOXQ_t *mbox;
+ void *virtaddr = NULL;
+ struct lpfc_mbx_nembed_rsrc_extent *n_rsrc;
+ struct lpfc_mbx_alloc_rsrc_extents *rsrc_ext;
+ union lpfc_sli4_cfg_shdr *shdr;
+
+ switch (type) {
+ case LPFC_RSC_TYPE_FCOE_VPI:
+ blk_list_head = &phba->lpfc_vpi_blk_list;
+ break;
+ case LPFC_RSC_TYPE_FCOE_XRI:
+ blk_list_head = &phba->sli4_hba.lpfc_xri_blk_list;
+ break;
+ case LPFC_RSC_TYPE_FCOE_VFI:
+ blk_list_head = &phba->sli4_hba.lpfc_vfi_blk_list;
+ break;
+ case LPFC_RSC_TYPE_FCOE_RPI:
+ blk_list_head = &phba->sli4_hba.lpfc_rpi_blk_list;
+ break;
+ default:
+ return -EIO;
+ }
+
+ /* Count the number of extents currently allocatd for this type. */
+ list_for_each_entry(rsrc_blk, blk_list_head, list) {
+ if (curr_blks == 0) {
+ /*
+ * The GET_ALLOCATED mailbox does not return the size,
+ * just the count. The size should be just the size
+ * stored in the current allocated block and all sizes
+ * for an extent type are the same so set the return
+ * value now.
+ */
+ *extnt_size = rsrc_blk->rsrc_size;
+ }
+ curr_blks++;
+ }
+
+ /*
+ * Calculate the size of an embedded mailbox. The uint32_t
+ * accounts for extents-specific word.
+ */
+ emb_len = sizeof(MAILBOX_t) - sizeof(struct mbox_header) -
+ sizeof(uint32_t);
+
+ /*
+ * Presume the allocation and response will fit into an embedded
+ * mailbox. If not true, reconfigure to a non-embedded mailbox.
+ */
+ emb = LPFC_SLI4_MBX_EMBED;
+ req_len = emb_len;
+ if (req_len > emb_len) {
+ req_len = curr_blks * sizeof(uint16_t) +
+ sizeof(union lpfc_sli4_cfg_shdr) +
+ sizeof(uint32_t);
+ emb = LPFC_SLI4_MBX_NEMBED;
+ }
+
+ mbox = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
+ if (!mbox)
+ return -ENOMEM;
+ memset(mbox, 0, sizeof(LPFC_MBOXQ_t));
+
+ alloc_len = lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
+ LPFC_MBOX_OPCODE_GET_ALLOC_RSRC_EXTENT,
+ req_len, emb);
+ if (alloc_len < req_len) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+ "2983 Allocated DMA memory size (x%x) is "
+ "less than the requested DMA memory "
+ "size (x%x)\n", alloc_len, req_len);
+ rc = -ENOMEM;
+ goto err_exit;
+ }
+ rc = lpfc_sli4_mbox_rsrc_extent(phba, mbox, curr_blks, type, emb);
+ if (unlikely(rc)) {
+ rc = -EIO;
+ goto err_exit;
+ }
+
+ if (!phba->sli4_hba.intr_enable)
+ rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
+ else {
+ mbox_tmo = lpfc_mbox_tmo_val(phba, mbox);
+ rc = lpfc_sli_issue_mbox_wait(phba, mbox, mbox_tmo);
+ }
+
+ if (unlikely(rc)) {
+ rc = -EIO;
+ goto err_exit;
+ }
+
+ /*
+ * Figure out where the response is located. Then get local pointers
+ * to the response data. The port does not guarantee to respond to
+ * all extents counts request so update the local variable with the
+ * allocated count from the port.
+ */
+ if (emb == LPFC_SLI4_MBX_EMBED) {
+ rsrc_ext = &mbox->u.mqe.un.alloc_rsrc_extents;
+ shdr = &rsrc_ext->header.cfg_shdr;
+ *extnt_cnt = bf_get(lpfc_mbx_rsrc_cnt, &rsrc_ext->u.rsp);
+ } else {
+ virtaddr = mbox->sge_array->addr[0];
+ n_rsrc = (struct lpfc_mbx_nembed_rsrc_extent *) virtaddr;
+ shdr = &n_rsrc->cfg_shdr;
+ *extnt_cnt = bf_get(lpfc_mbx_rsrc_cnt, n_rsrc);
+ }
+
+ if (bf_get(lpfc_mbox_hdr_status, &shdr->response)) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_INIT,
+ "2984 Failed to read allocated resources "
+ "for type %d - Status 0x%x Add'l Status 0x%x.\n",
+ type,
+ bf_get(lpfc_mbox_hdr_status, &shdr->response),
+ bf_get(lpfc_mbox_hdr_add_status, &shdr->response));
+ rc = -EIO;
+ goto err_exit;
+ }
+ err_exit:
+ lpfc_sli4_mbox_cmd_free(phba, mbox);
+ return rc;
+}
+
+/**
+ * lpfc_sli4_repost_els_sgl_list - Repsot the els buffers sgl pages as block
+ * @phba: pointer to lpfc hba data structure.
+ *
+ * This routine walks the list of els buffers that have been allocated and
+ * repost them to the port by using SGL block post. This is needed after a
+ * pci_function_reset/warm_start or start. It attempts to construct blocks
+ * of els buffer sgls which contains contiguous xris and uses the non-embedded
+ * SGL block post mailbox commands to post them to the port. For single els
+ * buffer sgl with non-contiguous xri, if any, it shall use embedded SGL post
+ * mailbox command for posting.
+ *
+ * Returns: 0 = success, non-zero failure.
+ **/
+static int
+lpfc_sli4_repost_els_sgl_list(struct lpfc_hba *phba)
+{
+ struct lpfc_sglq *sglq_entry = NULL;
+ struct lpfc_sglq *sglq_entry_next = NULL;
+ struct lpfc_sglq *sglq_entry_first = NULL;
+ int status, total_cnt, post_cnt = 0, num_posted = 0, block_cnt = 0;
+ int last_xritag = NO_XRI;
+ struct lpfc_sli_ring *pring;
+ LIST_HEAD(prep_sgl_list);
+ LIST_HEAD(blck_sgl_list);
+ LIST_HEAD(allc_sgl_list);
+ LIST_HEAD(post_sgl_list);
+ LIST_HEAD(free_sgl_list);
+
+ pring = &phba->sli.ring[LPFC_ELS_RING];
+ spin_lock_irq(&phba->hbalock);
+ spin_lock(&pring->ring_lock);
+ list_splice_init(&phba->sli4_hba.lpfc_sgl_list, &allc_sgl_list);
+ spin_unlock(&pring->ring_lock);
+ spin_unlock_irq(&phba->hbalock);
+
+ total_cnt = phba->sli4_hba.els_xri_cnt;
+ list_for_each_entry_safe(sglq_entry, sglq_entry_next,
+ &allc_sgl_list, list) {
+ list_del_init(&sglq_entry->list);
+ block_cnt++;
+ if ((last_xritag != NO_XRI) &&
+ (sglq_entry->sli4_xritag != last_xritag + 1)) {
+ /* a hole in xri block, form a sgl posting block */
+ list_splice_init(&prep_sgl_list, &blck_sgl_list);
+ post_cnt = block_cnt - 1;
+ /* prepare list for next posting block */
+ list_add_tail(&sglq_entry->list, &prep_sgl_list);
+ block_cnt = 1;
+ } else {
+ /* prepare list for next posting block */
+ list_add_tail(&sglq_entry->list, &prep_sgl_list);
+ /* enough sgls for non-embed sgl mbox command */
+ if (block_cnt == LPFC_NEMBED_MBOX_SGL_CNT) {
+ list_splice_init(&prep_sgl_list,
+ &blck_sgl_list);
+ post_cnt = block_cnt;
+ block_cnt = 0;
+ }
+ }
+ num_posted++;
+
+ /* keep track of last sgl's xritag */
+ last_xritag = sglq_entry->sli4_xritag;
+
+ /* end of repost sgl list condition for els buffers */
+ if (num_posted == phba->sli4_hba.els_xri_cnt) {
+ if (post_cnt == 0) {
+ list_splice_init(&prep_sgl_list,
+ &blck_sgl_list);
+ post_cnt = block_cnt;
+ } else if (block_cnt == 1) {
+ status = lpfc_sli4_post_sgl(phba,
+ sglq_entry->phys, 0,
+ sglq_entry->sli4_xritag);
+ if (!status) {
+ /* successful, put sgl to posted list */
+ list_add_tail(&sglq_entry->list,
+ &post_sgl_list);
+ } else {
+ /* Failure, put sgl to free list */
+ lpfc_printf_log(phba, KERN_WARNING,
+ LOG_SLI,
+ "3159 Failed to post els "
+ "sgl, xritag:x%x\n",
+ sglq_entry->sli4_xritag);
+ list_add_tail(&sglq_entry->list,
+ &free_sgl_list);
+ total_cnt--;
+ }
+ }
+ }
+
+ /* continue until a nembed page worth of sgls */
+ if (post_cnt == 0)
+ continue;
+
+ /* post the els buffer list sgls as a block */
+ status = lpfc_sli4_post_els_sgl_list(phba, &blck_sgl_list,
+ post_cnt);
+
+ if (!status) {
+ /* success, put sgl list to posted sgl list */
+ list_splice_init(&blck_sgl_list, &post_sgl_list);
+ } else {
+ /* Failure, put sgl list to free sgl list */
+ sglq_entry_first = list_first_entry(&blck_sgl_list,
+ struct lpfc_sglq,
+ list);
+ lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
+ "3160 Failed to post els sgl-list, "
+ "xritag:x%x-x%x\n",
+ sglq_entry_first->sli4_xritag,
+ (sglq_entry_first->sli4_xritag +
+ post_cnt - 1));
+ list_splice_init(&blck_sgl_list, &free_sgl_list);
+ total_cnt -= post_cnt;
+ }
+
+ /* don't reset xirtag due to hole in xri block */
+ if (block_cnt == 0)
+ last_xritag = NO_XRI;
+
+ /* reset els sgl post count for next round of posting */
+ post_cnt = 0;
+ }
+ /* update the number of XRIs posted for ELS */
+ phba->sli4_hba.els_xri_cnt = total_cnt;
+
+ /* free the els sgls failed to post */
+ lpfc_free_sgl_list(phba, &free_sgl_list);
+
+ /* push els sgls posted to the availble list */
+ if (!list_empty(&post_sgl_list)) {
+ spin_lock_irq(&phba->hbalock);
+ spin_lock(&pring->ring_lock);
+ list_splice_init(&post_sgl_list,
+ &phba->sli4_hba.lpfc_sgl_list);
+ spin_unlock(&pring->ring_lock);
+ spin_unlock_irq(&phba->hbalock);
+ } else {
+ lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
+ "3161 Failure to post els sgl to port.\n");
+ return -EIO;
+ }
+ return 0;
+}
+
+/**
+ * lpfc_sli4_hba_setup - SLI4 device intialization PCI function
+ * @phba: Pointer to HBA context object.
+ *
+ * This function is the main SLI4 device intialization PCI function. This
+ * function is called by the HBA intialization code, HBA reset code and
+ * HBA error attention handler code. Caller is not required to hold any
+ * locks.
+ **/
+int
+lpfc_sli4_hba_setup(struct lpfc_hba *phba)
+{
+ int rc;
+ LPFC_MBOXQ_t *mboxq;
+ struct lpfc_mqe *mqe;
+ uint8_t *vpd;
+ uint32_t vpd_size;
+ uint32_t ftr_rsp = 0;
+ struct Scsi_Host *shost = lpfc_shost_from_vport(phba->pport);
+ struct lpfc_vport *vport = phba->pport;
+ struct lpfc_dmabuf *mp;
+
+ /* Perform a PCI function reset to start from clean */
+ rc = lpfc_pci_function_reset(phba);
+ if (unlikely(rc))
+ return -ENODEV;
+
+ /* Check the HBA Host Status Register for readyness */
+ rc = lpfc_sli4_post_status_check(phba);
+ if (unlikely(rc))
+ return -ENODEV;
+ else {
+ spin_lock_irq(&phba->hbalock);
+ phba->sli.sli_flag |= LPFC_SLI_ACTIVE;
+ spin_unlock_irq(&phba->hbalock);
+ }
+
+ /*
+ * Allocate a single mailbox container for initializing the
+ * port.
+ */
+ mboxq = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
+ if (!mboxq)
+ return -ENOMEM;
+
+ /* Issue READ_REV to collect vpd and FW information. */
+ vpd_size = SLI4_PAGE_SIZE;
+ vpd = kzalloc(vpd_size, GFP_KERNEL);
+ if (!vpd) {
+ rc = -ENOMEM;
+ goto out_free_mbox;
+ }
+
+ rc = lpfc_sli4_read_rev(phba, mboxq, vpd, &vpd_size);
+ if (unlikely(rc)) {
+ kfree(vpd);
+ goto out_free_mbox;
+ }
+
+ mqe = &mboxq->u.mqe;
+ phba->sli_rev = bf_get(lpfc_mbx_rd_rev_sli_lvl, &mqe->un.read_rev);
+ if (bf_get(lpfc_mbx_rd_rev_fcoe, &mqe->un.read_rev))
+ phba->hba_flag |= HBA_FCOE_MODE;
+ else
+ phba->hba_flag &= ~HBA_FCOE_MODE;
+
+ if (bf_get(lpfc_mbx_rd_rev_cee_ver, &mqe->un.read_rev) ==
+ LPFC_DCBX_CEE_MODE)
+ phba->hba_flag |= HBA_FIP_SUPPORT;
+ else
+ phba->hba_flag &= ~HBA_FIP_SUPPORT;
+
+ phba->hba_flag &= ~HBA_FCP_IOQ_FLUSH;
+
+ if (phba->sli_rev != LPFC_SLI_REV4) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
+ "0376 READ_REV Error. SLI Level %d "
+ "FCoE enabled %d\n",
+ phba->sli_rev, phba->hba_flag & HBA_FCOE_MODE);
+ rc = -EIO;
+ kfree(vpd);
+ goto out_free_mbox;
+ }
+
+ /*
+ * Continue initialization with default values even if driver failed
+ * to read FCoE param config regions, only read parameters if the
+ * board is FCoE
+ */
+ if (phba->hba_flag & HBA_FCOE_MODE &&
+ lpfc_sli4_read_fcoe_params(phba))
+ lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX | LOG_INIT,
+ "2570 Failed to read FCoE parameters\n");
+
+ /*
+ * Retrieve sli4 device physical port name, failure of doing it
+ * is considered as non-fatal.
+ */
+ rc = lpfc_sli4_retrieve_pport_name(phba);
+ if (!rc)
+ lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI,
+ "3080 Successful retrieving SLI4 device "
+ "physical port name: %s.\n", phba->Port);
+
+ /*
+ * Evaluate the read rev and vpd data. Populate the driver
+ * state with the results. If this routine fails, the failure
+ * is not fatal as the driver will use generic values.
+ */
+ rc = lpfc_parse_vpd(phba, vpd, vpd_size);
+ if (unlikely(!rc)) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
+ "0377 Error %d parsing vpd. "
+ "Using defaults.\n", rc);
+ rc = 0;
+ }
+ kfree(vpd);
+
+ /* Save information as VPD data */
+ phba->vpd.rev.biuRev = mqe->un.read_rev.first_hw_rev;
+ phba->vpd.rev.smRev = mqe->un.read_rev.second_hw_rev;
+ phba->vpd.rev.endecRev = mqe->un.read_rev.third_hw_rev;
+ phba->vpd.rev.fcphHigh = bf_get(lpfc_mbx_rd_rev_fcph_high,
+ &mqe->un.read_rev);
+ phba->vpd.rev.fcphLow = bf_get(lpfc_mbx_rd_rev_fcph_low,
+ &mqe->un.read_rev);
+ phba->vpd.rev.feaLevelHigh = bf_get(lpfc_mbx_rd_rev_ftr_lvl_high,
+ &mqe->un.read_rev);
+ phba->vpd.rev.feaLevelLow = bf_get(lpfc_mbx_rd_rev_ftr_lvl_low,
+ &mqe->un.read_rev);
+ phba->vpd.rev.sli1FwRev = mqe->un.read_rev.fw_id_rev;
+ memcpy(phba->vpd.rev.sli1FwName, mqe->un.read_rev.fw_name, 16);
+ phba->vpd.rev.sli2FwRev = mqe->un.read_rev.ulp_fw_id_rev;
+ memcpy(phba->vpd.rev.sli2FwName, mqe->un.read_rev.ulp_fw_name, 16);
+ phba->vpd.rev.opFwRev = mqe->un.read_rev.fw_id_rev;
+ memcpy(phba->vpd.rev.opFwName, mqe->un.read_rev.fw_name, 16);
+ lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI,
+ "(%d):0380 READ_REV Status x%x "
+ "fw_rev:%s fcphHi:%x fcphLo:%x flHi:%x flLo:%x\n",
+ mboxq->vport ? mboxq->vport->vpi : 0,
+ bf_get(lpfc_mqe_status, mqe),
+ phba->vpd.rev.opFwName,
+ phba->vpd.rev.fcphHigh, phba->vpd.rev.fcphLow,
+ phba->vpd.rev.feaLevelHigh, phba->vpd.rev.feaLevelLow);
+
+ /* Reset the DFT_LUN_Q_DEPTH to (max xri >> 3) */
+ rc = (phba->sli4_hba.max_cfg_param.max_xri >> 3);
+ if (phba->pport->cfg_lun_queue_depth > rc) {
+ lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
+ "3362 LUN queue depth changed from %d to %d\n",
+ phba->pport->cfg_lun_queue_depth, rc);
+ phba->pport->cfg_lun_queue_depth = rc;
+ }
+
+
+ /*
+ * Discover the port's supported feature set and match it against the
+ * hosts requests.
+ */
+ lpfc_request_features(phba, mboxq);
+ rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
+ if (unlikely(rc)) {
+ rc = -EIO;
+ goto out_free_mbox;
+ }
+
+ /*
+ * The port must support FCP initiator mode as this is the
+ * only mode running in the host.
+ */
+ if (!(bf_get(lpfc_mbx_rq_ftr_rsp_fcpi, &mqe->un.req_ftrs))) {
+ lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX | LOG_SLI,
+ "0378 No support for fcpi mode.\n");
+ ftr_rsp++;
+ }
+ if (bf_get(lpfc_mbx_rq_ftr_rsp_perfh, &mqe->un.req_ftrs))
+ phba->sli3_options |= LPFC_SLI4_PERFH_ENABLED;
+ else
+ phba->sli3_options &= ~LPFC_SLI4_PERFH_ENABLED;
+ /*
+ * If the port cannot support the host's requested features
+ * then turn off the global config parameters to disable the
+ * feature in the driver. This is not a fatal error.
+ */
+ phba->sli3_options &= ~LPFC_SLI3_BG_ENABLED;
+ if (phba->cfg_enable_bg) {
+ if (bf_get(lpfc_mbx_rq_ftr_rsp_dif, &mqe->un.req_ftrs))
+ phba->sli3_options |= LPFC_SLI3_BG_ENABLED;
+ else
+ ftr_rsp++;
+ }
+
+ if (phba->max_vpi && phba->cfg_enable_npiv &&
+ !(bf_get(lpfc_mbx_rq_ftr_rsp_npiv, &mqe->un.req_ftrs)))
+ ftr_rsp++;
+
+ if (ftr_rsp) {
+ lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX | LOG_SLI,
+ "0379 Feature Mismatch Data: x%08x %08x "
+ "x%x x%x x%x\n", mqe->un.req_ftrs.word2,
+ mqe->un.req_ftrs.word3, phba->cfg_enable_bg,
+ phba->cfg_enable_npiv, phba->max_vpi);
+ if (!(bf_get(lpfc_mbx_rq_ftr_rsp_dif, &mqe->un.req_ftrs)))
+ phba->cfg_enable_bg = 0;
+ if (!(bf_get(lpfc_mbx_rq_ftr_rsp_npiv, &mqe->un.req_ftrs)))
+ phba->cfg_enable_npiv = 0;
+ }
+
+ /* These SLI3 features are assumed in SLI4 */
+ spin_lock_irq(&phba->hbalock);
+ phba->sli3_options |= (LPFC_SLI3_NPIV_ENABLED | LPFC_SLI3_HBQ_ENABLED);
+ spin_unlock_irq(&phba->hbalock);
+
+ /*
+ * Allocate all resources (xri,rpi,vpi,vfi) now. Subsequent
+ * calls depends on these resources to complete port setup.
+ */
+ rc = lpfc_sli4_alloc_resource_identifiers(phba);
+ if (rc) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
+ "2920 Failed to alloc Resource IDs "
+ "rc = x%x\n", rc);
+ goto out_free_mbox;
+ }
+
+ /* Read the port's service parameters. */
+ rc = lpfc_read_sparam(phba, mboxq, vport->vpi);
+ if (rc) {
+ phba->link_state = LPFC_HBA_ERROR;
+ rc = -ENOMEM;
+ goto out_free_mbox;
+ }
+
+ mboxq->vport = vport;
+ rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
+ mp = (struct lpfc_dmabuf *) mboxq->context1;
+ if (rc == MBX_SUCCESS) {
+ memcpy(&vport->fc_sparam, mp->virt, sizeof(struct serv_parm));
+ rc = 0;
+ }
+
+ /*
+ * This memory was allocated by the lpfc_read_sparam routine. Release
+ * it to the mbuf pool.
+ */
+ lpfc_mbuf_free(phba, mp->virt, mp->phys);
+ kfree(mp);
+ mboxq->context1 = NULL;
+ if (unlikely(rc)) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
+ "0382 READ_SPARAM command failed "
+ "status %d, mbxStatus x%x\n",
+ rc, bf_get(lpfc_mqe_status, mqe));
+ phba->link_state = LPFC_HBA_ERROR;
+ rc = -EIO;
+ goto out_free_mbox;
+ }
+
+ lpfc_update_vport_wwn(vport);
+
+ /* Update the fc_host data structures with new wwn. */
+ fc_host_node_name(shost) = wwn_to_u64(vport->fc_nodename.u.wwn);
+ fc_host_port_name(shost) = wwn_to_u64(vport->fc_portname.u.wwn);
+
+ /* update host els and scsi xri-sgl sizes and mappings */
+ rc = lpfc_sli4_xri_sgl_update(phba);
+ if (unlikely(rc)) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
+ "1400 Failed to update xri-sgl size and "
+ "mapping: %d\n", rc);
+ goto out_free_mbox;
+ }
+
+ /* register the els sgl pool to the port */
+ rc = lpfc_sli4_repost_els_sgl_list(phba);
+ if (unlikely(rc)) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
+ "0582 Error %d during els sgl post "
+ "operation\n", rc);
+ rc = -ENODEV;
+ goto out_free_mbox;
+ }
+
+ /* register the allocated scsi sgl pool to the port */
+ rc = lpfc_sli4_repost_scsi_sgl_list(phba);
+ if (unlikely(rc)) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
+ "0383 Error %d during scsi sgl post "
+ "operation\n", rc);
+ /* Some Scsi buffers were moved to the abort scsi list */
+ /* A pci function reset will repost them */
+ rc = -ENODEV;
+ goto out_free_mbox;
+ }
+
+ /* Post the rpi header region to the device. */
+ rc = lpfc_sli4_post_all_rpi_hdrs(phba);
+ if (unlikely(rc)) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
+ "0393 Error %d during rpi post operation\n",
+ rc);
+ rc = -ENODEV;
+ goto out_free_mbox;
+ }
+ lpfc_sli4_node_prep(phba);
+
+ /* Create all the SLI4 queues */
+ rc = lpfc_sli4_queue_create(phba);
+ if (rc) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+ "3089 Failed to allocate queues\n");
+ rc = -ENODEV;
+ goto out_stop_timers;
+ }
+ /* Set up all the queues to the device */
+ rc = lpfc_sli4_queue_setup(phba);
+ if (unlikely(rc)) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
+ "0381 Error %d during queue setup.\n ", rc);
+ goto out_destroy_queue;
+ }
+
+ /* Arm the CQs and then EQs on device */
+ lpfc_sli4_arm_cqeq_intr(phba);
+
+ /* Indicate device interrupt mode */
+ phba->sli4_hba.intr_enable = 1;
+
+ /* Allow asynchronous mailbox command to go through */
+ spin_lock_irq(&phba->hbalock);
+ phba->sli.sli_flag &= ~LPFC_SLI_ASYNC_MBX_BLK;
+ spin_unlock_irq(&phba->hbalock);
+
+ /* Post receive buffers to the device */
+ lpfc_sli4_rb_setup(phba);
+
+ /* Reset HBA FCF states after HBA reset */
+ phba->fcf.fcf_flag = 0;
+ phba->fcf.current_rec.flag = 0;
+
+ /* Start the ELS watchdog timer */
+ mod_timer(&vport->els_tmofunc,
+ jiffies + msecs_to_jiffies(1000 * (phba->fc_ratov * 2)));
+
+ /* Start heart beat timer */
+ mod_timer(&phba->hb_tmofunc,
+ jiffies + msecs_to_jiffies(1000 * LPFC_HB_MBOX_INTERVAL));
+ phba->hb_outstanding = 0;
+ phba->last_completion_time = jiffies;
+
+ /* Start error attention (ERATT) polling timer */
+ mod_timer(&phba->eratt_poll,
+ jiffies + msecs_to_jiffies(1000 * LPFC_ERATT_POLL_INTERVAL));
+
+ /* Enable PCIe device Advanced Error Reporting (AER) if configured */
+ if (phba->cfg_aer_support == 1 && !(phba->hba_flag & HBA_AER_ENABLED)) {
+ rc = pci_enable_pcie_error_reporting(phba->pcidev);
+ if (!rc) {
+ lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
+ "2829 This device supports "
+ "Advanced Error Reporting (AER)\n");
+ spin_lock_irq(&phba->hbalock);
+ phba->hba_flag |= HBA_AER_ENABLED;
+ spin_unlock_irq(&phba->hbalock);
+ } else {
+ lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
+ "2830 This device does not support "
+ "Advanced Error Reporting (AER)\n");
+ phba->cfg_aer_support = 0;
+ }
+ rc = 0;
+ }
+
+ if (!(phba->hba_flag & HBA_FCOE_MODE)) {
+ /*
+ * The FC Port needs to register FCFI (index 0)
+ */
+ lpfc_reg_fcfi(phba, mboxq);
+ mboxq->vport = phba->pport;
+ rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
+ if (rc != MBX_SUCCESS)
+ goto out_unset_queue;
+ rc = 0;
+ phba->fcf.fcfi = bf_get(lpfc_reg_fcfi_fcfi,
+ &mboxq->u.mqe.un.reg_fcfi);
+
+ /* Check if the port is configured to be disabled */
+ lpfc_sli_read_link_ste(phba);
+ }
+
+ /*
+ * The port is ready, set the host's link state to LINK_DOWN
+ * in preparation for link interrupts.
+ */
+ spin_lock_irq(&phba->hbalock);
+ phba->link_state = LPFC_LINK_DOWN;
+ spin_unlock_irq(&phba->hbalock);
+ if (!(phba->hba_flag & HBA_FCOE_MODE) &&
+ (phba->hba_flag & LINK_DISABLED)) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_INIT | LOG_SLI,
+ "3103 Adapter Link is disabled.\n");
+ lpfc_down_link(phba, mboxq);
+ rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
+ if (rc != MBX_SUCCESS) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_INIT | LOG_SLI,
+ "3104 Adapter failed to issue "
+ "DOWN_LINK mbox cmd, rc:x%x\n", rc);
+ goto out_unset_queue;
+ }
+ } else if (phba->cfg_suppress_link_up == LPFC_INITIALIZE_LINK) {
+ /* don't perform init_link on SLI4 FC port loopback test */
+ if (!(phba->link_flag & LS_LOOPBACK_MODE)) {
+ rc = phba->lpfc_hba_init_link(phba, MBX_NOWAIT);
+ if (rc)
+ goto out_unset_queue;
+ }
+ }
+ mempool_free(mboxq, phba->mbox_mem_pool);
+ return rc;
+out_unset_queue:
+ /* Unset all the queues set up in this routine when error out */
+ lpfc_sli4_queue_unset(phba);
+out_destroy_queue:
+ lpfc_sli4_queue_destroy(phba);
+out_stop_timers:
+ lpfc_stop_hba_timers(phba);
+out_free_mbox:
+ mempool_free(mboxq, phba->mbox_mem_pool);
+ return rc;
+}
+
+/**
+ * lpfc_mbox_timeout - Timeout call back function for mbox timer
+ * @ptr: context object - pointer to hba structure.
+ *
+ * This is the callback function for mailbox timer. The mailbox
+ * timer is armed when a new mailbox command is issued and the timer
+ * is deleted when the mailbox complete. The function is called by
+ * the kernel timer code when a mailbox does not complete within
+ * expected time. This function wakes up the worker thread to
+ * process the mailbox timeout and returns. All the processing is
+ * done by the worker thread function lpfc_mbox_timeout_handler.
+ **/
+void
+lpfc_mbox_timeout(unsigned long ptr)
+{
+ struct lpfc_hba *phba = (struct lpfc_hba *) ptr;
+ unsigned long iflag;
+ uint32_t tmo_posted;
+
+ spin_lock_irqsave(&phba->pport->work_port_lock, iflag);
+ tmo_posted = phba->pport->work_port_events & WORKER_MBOX_TMO;
+ if (!tmo_posted)
+ phba->pport->work_port_events |= WORKER_MBOX_TMO;
+ spin_unlock_irqrestore(&phba->pport->work_port_lock, iflag);
+
+ if (!tmo_posted)
+ lpfc_worker_wake_up(phba);
+ return;
+}
+
+/**
+ * lpfc_sli4_mbox_completions_pending - check to see if any mailbox completions
+ * are pending
+ * @phba: Pointer to HBA context object.
+ *
+ * This function checks if any mailbox completions are present on the mailbox
+ * completion queue.
+ **/
+bool
+lpfc_sli4_mbox_completions_pending(struct lpfc_hba *phba)
+{
+
+ uint32_t idx;
+ struct lpfc_queue *mcq;
+ struct lpfc_mcqe *mcqe;
+ bool pending_completions = false;
+
+ if (unlikely(!phba) || (phba->sli_rev != LPFC_SLI_REV4))
+ return false;
+
+ /* Check for completions on mailbox completion queue */
+
+ mcq = phba->sli4_hba.mbx_cq;
+ idx = mcq->hba_index;
+ while (bf_get_le32(lpfc_cqe_valid, mcq->qe[idx].cqe)) {
+ mcqe = (struct lpfc_mcqe *)mcq->qe[idx].cqe;
+ if (bf_get_le32(lpfc_trailer_completed, mcqe) &&
+ (!bf_get_le32(lpfc_trailer_async, mcqe))) {
+ pending_completions = true;
+ break;
+ }
+ idx = (idx + 1) % mcq->entry_count;
+ if (mcq->hba_index == idx)
+ break;
+ }
+ return pending_completions;
+
+}
+
+/**
+ * lpfc_sli4_process_missed_mbox_completions - process mbox completions
+ * that were missed.
+ * @phba: Pointer to HBA context object.
+ *
+ * For sli4, it is possible to miss an interrupt. As such mbox completions
+ * maybe missed causing erroneous mailbox timeouts to occur. This function
+ * checks to see if mbox completions are on the mailbox completion queue
+ * and will process all the completions associated with the eq for the
+ * mailbox completion queue.
+ **/
+bool
+lpfc_sli4_process_missed_mbox_completions(struct lpfc_hba *phba)
+{
+
+ uint32_t eqidx;
+ struct lpfc_queue *fpeq = NULL;
+ struct lpfc_eqe *eqe;
+ bool mbox_pending;
+
+ if (unlikely(!phba) || (phba->sli_rev != LPFC_SLI_REV4))
+ return false;
+
+ /* Find the eq associated with the mcq */
+
+ if (phba->sli4_hba.hba_eq)
+ for (eqidx = 0; eqidx < phba->cfg_fcp_io_channel; eqidx++)
+ if (phba->sli4_hba.hba_eq[eqidx]->queue_id ==
+ phba->sli4_hba.mbx_cq->assoc_qid) {
+ fpeq = phba->sli4_hba.hba_eq[eqidx];
+ break;
+ }
+ if (!fpeq)
+ return false;
+
+ /* Turn off interrupts from this EQ */
+
+ lpfc_sli4_eq_clr_intr(fpeq);
+
+ /* Check to see if a mbox completion is pending */
+
+ mbox_pending = lpfc_sli4_mbox_completions_pending(phba);
+
+ /*
+ * If a mbox completion is pending, process all the events on EQ
+ * associated with the mbox completion queue (this could include
+ * mailbox commands, async events, els commands, receive queue data
+ * and fcp commands)
+ */
+
+ if (mbox_pending)
+ while ((eqe = lpfc_sli4_eq_get(fpeq))) {
+ lpfc_sli4_hba_handle_eqe(phba, eqe, eqidx);
+ fpeq->EQ_processed++;
+ }
+
+ /* Always clear and re-arm the EQ */
+
+ lpfc_sli4_eq_release(fpeq, LPFC_QUEUE_REARM);
+
+ return mbox_pending;
+
+}
+
+/**
+ * lpfc_mbox_timeout_handler - Worker thread function to handle mailbox timeout
+ * @phba: Pointer to HBA context object.
+ *
+ * This function is called from worker thread when a mailbox command times out.
+ * The caller is not required to hold any locks. This function will reset the
+ * HBA and recover all the pending commands.
+ **/
+void
+lpfc_mbox_timeout_handler(struct lpfc_hba *phba)
+{
+ LPFC_MBOXQ_t *pmbox = phba->sli.mbox_active;
+ MAILBOX_t *mb = NULL;
+
+ struct lpfc_sli *psli = &phba->sli;
+
+ /* If the mailbox completed, process the completion and return */
+ if (lpfc_sli4_process_missed_mbox_completions(phba))
+ return;
+
+ if (pmbox != NULL)
+ mb = &pmbox->u.mb;
+ /* Check the pmbox pointer first. There is a race condition
+ * between the mbox timeout handler getting executed in the
+ * worklist and the mailbox actually completing. When this
+ * race condition occurs, the mbox_active will be NULL.
+ */
+ spin_lock_irq(&phba->hbalock);
+ if (pmbox == NULL) {
+ lpfc_printf_log(phba, KERN_WARNING,
+ LOG_MBOX | LOG_SLI,
+ "0353 Active Mailbox cleared - mailbox timeout "
+ "exiting\n");
+ spin_unlock_irq(&phba->hbalock);
+ return;
+ }
+
+ /* Mbox cmd <mbxCommand> timeout */
+ lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
+ "0310 Mailbox command x%x timeout Data: x%x x%x x%p\n",
+ mb->mbxCommand,
+ phba->pport->port_state,
+ phba->sli.sli_flag,
+ phba->sli.mbox_active);
+ spin_unlock_irq(&phba->hbalock);
+
+ /* Setting state unknown so lpfc_sli_abort_iocb_ring
+ * would get IOCB_ERROR from lpfc_sli_issue_iocb, allowing
+ * it to fail all outstanding SCSI IO.
+ */
+ spin_lock_irq(&phba->pport->work_port_lock);
+ phba->pport->work_port_events &= ~WORKER_MBOX_TMO;
+ spin_unlock_irq(&phba->pport->work_port_lock);
+ spin_lock_irq(&phba->hbalock);
+ phba->link_state = LPFC_LINK_UNKNOWN;
+ psli->sli_flag &= ~LPFC_SLI_ACTIVE;
+ spin_unlock_irq(&phba->hbalock);
+
+ lpfc_sli_abort_fcp_rings(phba);
+
+ lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
+ "0345 Resetting board due to mailbox timeout\n");
+
+ /* Reset the HBA device */
+ lpfc_reset_hba(phba);
+}
+
+/**
+ * lpfc_sli_issue_mbox_s3 - Issue an SLI3 mailbox command to firmware
+ * @phba: Pointer to HBA context object.
+ * @pmbox: Pointer to mailbox object.
+ * @flag: Flag indicating how the mailbox need to be processed.
+ *
+ * This function is called by discovery code and HBA management code
+ * to submit a mailbox command to firmware with SLI-3 interface spec. This
+ * function gets the hbalock to protect the data structures.
+ * The mailbox command can be submitted in polling mode, in which case
+ * this function will wait in a polling loop for the completion of the
+ * mailbox.
+ * If the mailbox is submitted in no_wait mode (not polling) the
+ * function will submit the command and returns immediately without waiting
+ * for the mailbox completion. The no_wait is supported only when HBA
+ * is in SLI2/SLI3 mode - interrupts are enabled.
+ * The SLI interface allows only one mailbox pending at a time. If the
+ * mailbox is issued in polling mode and there is already a mailbox
+ * pending, then the function will return an error. If the mailbox is issued
+ * in NO_WAIT mode and there is a mailbox pending already, the function
+ * will return MBX_BUSY after queuing the mailbox into mailbox queue.
+ * The sli layer owns the mailbox object until the completion of mailbox
+ * command if this function return MBX_BUSY or MBX_SUCCESS. For all other
+ * return codes the caller owns the mailbox command after the return of
+ * the function.
+ **/
+static int
+lpfc_sli_issue_mbox_s3(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmbox,
+ uint32_t flag)
+{
+ MAILBOX_t *mbx;
+ struct lpfc_sli *psli = &phba->sli;
+ uint32_t status, evtctr;
+ uint32_t ha_copy, hc_copy;
+ int i;
+ unsigned long timeout;
+ unsigned long drvr_flag = 0;
+ uint32_t word0, ldata;
+ void __iomem *to_slim;
+ int processing_queue = 0;
+
+ spin_lock_irqsave(&phba->hbalock, drvr_flag);
+ if (!pmbox) {
+ phba->sli.sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
+ /* processing mbox queue from intr_handler */
+ if (unlikely(psli->sli_flag & LPFC_SLI_ASYNC_MBX_BLK)) {
+ spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
+ return MBX_SUCCESS;
+ }
+ processing_queue = 1;
+ pmbox = lpfc_mbox_get(phba);
+ if (!pmbox) {
+ spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
+ return MBX_SUCCESS;
+ }
+ }
+
+ if (pmbox->mbox_cmpl && pmbox->mbox_cmpl != lpfc_sli_def_mbox_cmpl &&
+ pmbox->mbox_cmpl != lpfc_sli_wake_mbox_wait) {
+ if(!pmbox->vport) {
+ spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
+ lpfc_printf_log(phba, KERN_ERR,
+ LOG_MBOX | LOG_VPORT,
+ "1806 Mbox x%x failed. No vport\n",
+ pmbox->u.mb.mbxCommand);
+ dump_stack();
+ goto out_not_finished;
+ }
+ }
+
+ /* If the PCI channel is in offline state, do not post mbox. */
+ if (unlikely(pci_channel_offline(phba->pcidev))) {
+ spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
+ goto out_not_finished;
+ }
+
+ /* If HBA has a deferred error attention, fail the iocb. */
+ if (unlikely(phba->hba_flag & DEFER_ERATT)) {
+ spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
+ goto out_not_finished;
+ }
+
+ psli = &phba->sli;
+
+ mbx = &pmbox->u.mb;
+ status = MBX_SUCCESS;
+
+ if (phba->link_state == LPFC_HBA_ERROR) {
+ spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
+
+ /* Mbox command <mbxCommand> cannot issue */
+ lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
+ "(%d):0311 Mailbox command x%x cannot "
+ "issue Data: x%x x%x\n",
+ pmbox->vport ? pmbox->vport->vpi : 0,
+ pmbox->u.mb.mbxCommand, psli->sli_flag, flag);
+ goto out_not_finished;
+ }
+
+ if (mbx->mbxCommand != MBX_KILL_BOARD && flag & MBX_NOWAIT) {
+ if (lpfc_readl(phba->HCregaddr, &hc_copy) ||
+ !(hc_copy & HC_MBINT_ENA)) {
+ spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
+ lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
+ "(%d):2528 Mailbox command x%x cannot "
+ "issue Data: x%x x%x\n",
+ pmbox->vport ? pmbox->vport->vpi : 0,
+ pmbox->u.mb.mbxCommand, psli->sli_flag, flag);
+ goto out_not_finished;
+ }
+ }
+
+ if (psli->sli_flag & LPFC_SLI_MBOX_ACTIVE) {
+ /* Polling for a mbox command when another one is already active
+ * is not allowed in SLI. Also, the driver must have established
+ * SLI2 mode to queue and process multiple mbox commands.
+ */
+
+ if (flag & MBX_POLL) {
+ spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
+
+ /* Mbox command <mbxCommand> cannot issue */
+ lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
+ "(%d):2529 Mailbox command x%x "
+ "cannot issue Data: x%x x%x\n",
+ pmbox->vport ? pmbox->vport->vpi : 0,
+ pmbox->u.mb.mbxCommand,
+ psli->sli_flag, flag);
+ goto out_not_finished;
+ }
+
+ if (!(psli->sli_flag & LPFC_SLI_ACTIVE)) {
+ spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
+ /* Mbox command <mbxCommand> cannot issue */
+ lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
+ "(%d):2530 Mailbox command x%x "
+ "cannot issue Data: x%x x%x\n",
+ pmbox->vport ? pmbox->vport->vpi : 0,
+ pmbox->u.mb.mbxCommand,
+ psli->sli_flag, flag);
+ goto out_not_finished;
+ }
+
+ /* Another mailbox command is still being processed, queue this
+ * command to be processed later.
+ */
+ lpfc_mbox_put(phba, pmbox);
+
+ /* Mbox cmd issue - BUSY */
+ lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI,
+ "(%d):0308 Mbox cmd issue - BUSY Data: "
+ "x%x x%x x%x x%x\n",
+ pmbox->vport ? pmbox->vport->vpi : 0xffffff,
+ mbx->mbxCommand, phba->pport->port_state,
+ psli->sli_flag, flag);
+
+ psli->slistat.mbox_busy++;
+ spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
+
+ if (pmbox->vport) {
+ lpfc_debugfs_disc_trc(pmbox->vport,
+ LPFC_DISC_TRC_MBOX_VPORT,
+ "MBOX Bsy vport: cmd:x%x mb:x%x x%x",
+ (uint32_t)mbx->mbxCommand,
+ mbx->un.varWords[0], mbx->un.varWords[1]);
+ }
+ else {
+ lpfc_debugfs_disc_trc(phba->pport,
+ LPFC_DISC_TRC_MBOX,
+ "MBOX Bsy: cmd:x%x mb:x%x x%x",
+ (uint32_t)mbx->mbxCommand,
+ mbx->un.varWords[0], mbx->un.varWords[1]);
+ }
+
+ return MBX_BUSY;
+ }
+
+ psli->sli_flag |= LPFC_SLI_MBOX_ACTIVE;
+
+ /* If we are not polling, we MUST be in SLI2 mode */
+ if (flag != MBX_POLL) {
+ if (!(psli->sli_flag & LPFC_SLI_ACTIVE) &&
+ (mbx->mbxCommand != MBX_KILL_BOARD)) {
+ psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
+ spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
+ /* Mbox command <mbxCommand> cannot issue */
+ lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
+ "(%d):2531 Mailbox command x%x "
+ "cannot issue Data: x%x x%x\n",
+ pmbox->vport ? pmbox->vport->vpi : 0,
+ pmbox->u.mb.mbxCommand,
+ psli->sli_flag, flag);
+ goto out_not_finished;
+ }
+ /* timeout active mbox command */
+ timeout = msecs_to_jiffies(lpfc_mbox_tmo_val(phba, pmbox) *
+ 1000);
+ mod_timer(&psli->mbox_tmo, jiffies + timeout);
+ }
+
+ /* Mailbox cmd <cmd> issue */
+ lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI,
+ "(%d):0309 Mailbox cmd x%x issue Data: x%x x%x "
+ "x%x\n",
+ pmbox->vport ? pmbox->vport->vpi : 0,
+ mbx->mbxCommand, phba->pport->port_state,
+ psli->sli_flag, flag);
+
+ if (mbx->mbxCommand != MBX_HEARTBEAT) {
+ if (pmbox->vport) {
+ lpfc_debugfs_disc_trc(pmbox->vport,
+ LPFC_DISC_TRC_MBOX_VPORT,
+ "MBOX Send vport: cmd:x%x mb:x%x x%x",
+ (uint32_t)mbx->mbxCommand,
+ mbx->un.varWords[0], mbx->un.varWords[1]);
+ }
+ else {
+ lpfc_debugfs_disc_trc(phba->pport,
+ LPFC_DISC_TRC_MBOX,
+ "MBOX Send: cmd:x%x mb:x%x x%x",
+ (uint32_t)mbx->mbxCommand,
+ mbx->un.varWords[0], mbx->un.varWords[1]);
+ }
+ }
+
+ psli->slistat.mbox_cmd++;
+ evtctr = psli->slistat.mbox_event;
+
+ /* next set own bit for the adapter and copy over command word */
+ mbx->mbxOwner = OWN_CHIP;
+
+ if (psli->sli_flag & LPFC_SLI_ACTIVE) {
+ /* Populate mbox extension offset word. */
+ if (pmbox->in_ext_byte_len || pmbox->out_ext_byte_len) {
+ *(((uint32_t *)mbx) + pmbox->mbox_offset_word)
+ = (uint8_t *)phba->mbox_ext
+ - (uint8_t *)phba->mbox;
+ }
+
+ /* Copy the mailbox extension data */
+ if (pmbox->in_ext_byte_len && pmbox->context2) {
+ lpfc_sli_pcimem_bcopy(pmbox->context2,
+ (uint8_t *)phba->mbox_ext,
+ pmbox->in_ext_byte_len);
+ }
+ /* Copy command data to host SLIM area */
+ lpfc_sli_pcimem_bcopy(mbx, phba->mbox, MAILBOX_CMD_SIZE);
+ } else {
+ /* Populate mbox extension offset word. */
+ if (pmbox->in_ext_byte_len || pmbox->out_ext_byte_len)
+ *(((uint32_t *)mbx) + pmbox->mbox_offset_word)
+ = MAILBOX_HBA_EXT_OFFSET;
+
+ /* Copy the mailbox extension data */
+ if (pmbox->in_ext_byte_len && pmbox->context2) {
+ lpfc_memcpy_to_slim(phba->MBslimaddr +
+ MAILBOX_HBA_EXT_OFFSET,
+ pmbox->context2, pmbox->in_ext_byte_len);
+
+ }
+ if (mbx->mbxCommand == MBX_CONFIG_PORT) {
+ /* copy command data into host mbox for cmpl */
+ lpfc_sli_pcimem_bcopy(mbx, phba->mbox, MAILBOX_CMD_SIZE);
+ }
+
+ /* First copy mbox command data to HBA SLIM, skip past first
+ word */
+ to_slim = phba->MBslimaddr + sizeof (uint32_t);
+ lpfc_memcpy_to_slim(to_slim, &mbx->un.varWords[0],
+ MAILBOX_CMD_SIZE - sizeof (uint32_t));
+
+ /* Next copy over first word, with mbxOwner set */
+ ldata = *((uint32_t *)mbx);
+ to_slim = phba->MBslimaddr;
+ writel(ldata, to_slim);
+ readl(to_slim); /* flush */
+
+ if (mbx->mbxCommand == MBX_CONFIG_PORT) {
+ /* switch over to host mailbox */
+ psli->sli_flag |= LPFC_SLI_ACTIVE;
+ }
+ }
+
+ wmb();
+
+ switch (flag) {
+ case MBX_NOWAIT:
+ /* Set up reference to mailbox command */
+ psli->mbox_active = pmbox;
+ /* Interrupt board to do it */
+ writel(CA_MBATT, phba->CAregaddr);
+ readl(phba->CAregaddr); /* flush */
+ /* Don't wait for it to finish, just return */
+ break;
+
+ case MBX_POLL:
+ /* Set up null reference to mailbox command */
+ psli->mbox_active = NULL;
+ /* Interrupt board to do it */
+ writel(CA_MBATT, phba->CAregaddr);
+ readl(phba->CAregaddr); /* flush */
+
+ if (psli->sli_flag & LPFC_SLI_ACTIVE) {
+ /* First read mbox status word */
+ word0 = *((uint32_t *)phba->mbox);
+ word0 = le32_to_cpu(word0);
+ } else {
+ /* First read mbox status word */
+ if (lpfc_readl(phba->MBslimaddr, &word0)) {
+ spin_unlock_irqrestore(&phba->hbalock,
+ drvr_flag);
+ goto out_not_finished;
+ }
+ }
+
+ /* Read the HBA Host Attention Register */
+ if (lpfc_readl(phba->HAregaddr, &ha_copy)) {
+ spin_unlock_irqrestore(&phba->hbalock,
+ drvr_flag);
+ goto out_not_finished;
+ }
+ timeout = msecs_to_jiffies(lpfc_mbox_tmo_val(phba, pmbox) *
+ 1000) + jiffies;
+ i = 0;
+ /* Wait for command to complete */
+ while (((word0 & OWN_CHIP) == OWN_CHIP) ||
+ (!(ha_copy & HA_MBATT) &&
+ (phba->link_state > LPFC_WARM_START))) {
+ if (time_after(jiffies, timeout)) {
+ psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
+ spin_unlock_irqrestore(&phba->hbalock,
+ drvr_flag);
+ goto out_not_finished;
+ }
+
+ /* Check if we took a mbox interrupt while we were
+ polling */
+ if (((word0 & OWN_CHIP) != OWN_CHIP)
+ && (evtctr != psli->slistat.mbox_event))
+ break;
+
+ if (i++ > 10) {
+ spin_unlock_irqrestore(&phba->hbalock,
+ drvr_flag);
+ msleep(1);
+ spin_lock_irqsave(&phba->hbalock, drvr_flag);
+ }
+
+ if (psli->sli_flag & LPFC_SLI_ACTIVE) {
+ /* First copy command data */
+ word0 = *((uint32_t *)phba->mbox);
+ word0 = le32_to_cpu(word0);
+ if (mbx->mbxCommand == MBX_CONFIG_PORT) {
+ MAILBOX_t *slimmb;
+ uint32_t slimword0;
+ /* Check real SLIM for any errors */
+ slimword0 = readl(phba->MBslimaddr);
+ slimmb = (MAILBOX_t *) & slimword0;
+ if (((slimword0 & OWN_CHIP) != OWN_CHIP)
+ && slimmb->mbxStatus) {
+ psli->sli_flag &=
+ ~LPFC_SLI_ACTIVE;
+ word0 = slimword0;
+ }
+ }
+ } else {
+ /* First copy command data */
+ word0 = readl(phba->MBslimaddr);
+ }
+ /* Read the HBA Host Attention Register */
+ if (lpfc_readl(phba->HAregaddr, &ha_copy)) {
+ spin_unlock_irqrestore(&phba->hbalock,
+ drvr_flag);
+ goto out_not_finished;
+ }
+ }
+
+ if (psli->sli_flag & LPFC_SLI_ACTIVE) {
+ /* copy results back to user */
+ lpfc_sli_pcimem_bcopy(phba->mbox, mbx, MAILBOX_CMD_SIZE);
+ /* Copy the mailbox extension data */
+ if (pmbox->out_ext_byte_len && pmbox->context2) {
+ lpfc_sli_pcimem_bcopy(phba->mbox_ext,
+ pmbox->context2,
+ pmbox->out_ext_byte_len);
+ }
+ } else {
+ /* First copy command data */
+ lpfc_memcpy_from_slim(mbx, phba->MBslimaddr,
+ MAILBOX_CMD_SIZE);
+ /* Copy the mailbox extension data */
+ if (pmbox->out_ext_byte_len && pmbox->context2) {
+ lpfc_memcpy_from_slim(pmbox->context2,
+ phba->MBslimaddr +
+ MAILBOX_HBA_EXT_OFFSET,
+ pmbox->out_ext_byte_len);
+ }
+ }
+
+ writel(HA_MBATT, phba->HAregaddr);
+ readl(phba->HAregaddr); /* flush */
+
+ psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
+ status = mbx->mbxStatus;
+ }
+
+ spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
+ return status;
+
+out_not_finished:
+ if (processing_queue) {
+ pmbox->u.mb.mbxStatus = MBX_NOT_FINISHED;
+ lpfc_mbox_cmpl_put(phba, pmbox);
+ }
+ return MBX_NOT_FINISHED;
+}
+
+/**
+ * lpfc_sli4_async_mbox_block - Block posting SLI4 asynchronous mailbox command
+ * @phba: Pointer to HBA context object.
+ *
+ * The function blocks the posting of SLI4 asynchronous mailbox commands from
+ * the driver internal pending mailbox queue. It will then try to wait out the
+ * possible outstanding mailbox command before return.
+ *
+ * Returns:
+ * 0 - the outstanding mailbox command completed; otherwise, the wait for
+ * the outstanding mailbox command timed out.
+ **/
+static int
+lpfc_sli4_async_mbox_block(struct lpfc_hba *phba)
+{
+ struct lpfc_sli *psli = &phba->sli;
+ int rc = 0;
+ unsigned long timeout = 0;
+
+ /* Mark the asynchronous mailbox command posting as blocked */
+ spin_lock_irq(&phba->hbalock);
+ psli->sli_flag |= LPFC_SLI_ASYNC_MBX_BLK;
+ /* Determine how long we might wait for the active mailbox
+ * command to be gracefully completed by firmware.
+ */
+ if (phba->sli.mbox_active)
+ timeout = msecs_to_jiffies(lpfc_mbox_tmo_val(phba,
+ phba->sli.mbox_active) *
+ 1000) + jiffies;
+ spin_unlock_irq(&phba->hbalock);
+
+ /* Make sure the mailbox is really active */
+ if (timeout)
+ lpfc_sli4_process_missed_mbox_completions(phba);
+
+ /* Wait for the outstnading mailbox command to complete */
+ while (phba->sli.mbox_active) {
+ /* Check active mailbox complete status every 2ms */
+ msleep(2);
+ if (time_after(jiffies, timeout)) {
+ /* Timeout, marked the outstanding cmd not complete */
+ rc = 1;
+ break;
+ }
+ }
+
+ /* Can not cleanly block async mailbox command, fails it */
+ if (rc) {
+ spin_lock_irq(&phba->hbalock);
+ psli->sli_flag &= ~LPFC_SLI_ASYNC_MBX_BLK;
+ spin_unlock_irq(&phba->hbalock);
+ }
+ return rc;
+}
+
+/**
+ * lpfc_sli4_async_mbox_unblock - Block posting SLI4 async mailbox command
+ * @phba: Pointer to HBA context object.
+ *
+ * The function unblocks and resume posting of SLI4 asynchronous mailbox
+ * commands from the driver internal pending mailbox queue. It makes sure
+ * that there is no outstanding mailbox command before resuming posting
+ * asynchronous mailbox commands. If, for any reason, there is outstanding
+ * mailbox command, it will try to wait it out before resuming asynchronous
+ * mailbox command posting.
+ **/
+static void
+lpfc_sli4_async_mbox_unblock(struct lpfc_hba *phba)
+{
+ struct lpfc_sli *psli = &phba->sli;
+
+ spin_lock_irq(&phba->hbalock);
+ if (!(psli->sli_flag & LPFC_SLI_ASYNC_MBX_BLK)) {
+ /* Asynchronous mailbox posting is not blocked, do nothing */
+ spin_unlock_irq(&phba->hbalock);
+ return;
+ }
+
+ /* Outstanding synchronous mailbox command is guaranteed to be done,
+ * successful or timeout, after timing-out the outstanding mailbox
+ * command shall always be removed, so just unblock posting async
+ * mailbox command and resume
+ */
+ psli->sli_flag &= ~LPFC_SLI_ASYNC_MBX_BLK;
+ spin_unlock_irq(&phba->hbalock);
+
+ /* wake up worker thread to post asynchronlous mailbox command */
+ lpfc_worker_wake_up(phba);
+}
+
+/**
+ * lpfc_sli4_wait_bmbx_ready - Wait for bootstrap mailbox register ready
+ * @phba: Pointer to HBA context object.
+ * @mboxq: Pointer to mailbox object.
+ *
+ * The function waits for the bootstrap mailbox register ready bit from
+ * port for twice the regular mailbox command timeout value.
+ *
+ * 0 - no timeout on waiting for bootstrap mailbox register ready.
+ * MBXERR_ERROR - wait for bootstrap mailbox register timed out.
+ **/
+static int
+lpfc_sli4_wait_bmbx_ready(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
+{
+ uint32_t db_ready;
+ unsigned long timeout;
+ struct lpfc_register bmbx_reg;
+
+ timeout = msecs_to_jiffies(lpfc_mbox_tmo_val(phba, mboxq)
+ * 1000) + jiffies;
+
+ do {
+ bmbx_reg.word0 = readl(phba->sli4_hba.BMBXregaddr);
+ db_ready = bf_get(lpfc_bmbx_rdy, &bmbx_reg);
+ if (!db_ready)
+ msleep(2);
+
+ if (time_after(jiffies, timeout))
+ return MBXERR_ERROR;
+ } while (!db_ready);
+
+ return 0;
+}
+
+/**
+ * lpfc_sli4_post_sync_mbox - Post an SLI4 mailbox to the bootstrap mailbox
+ * @phba: Pointer to HBA context object.
+ * @mboxq: Pointer to mailbox object.
+ *
+ * The function posts a mailbox to the port. The mailbox is expected
+ * to be comletely filled in and ready for the port to operate on it.
+ * This routine executes a synchronous completion operation on the
+ * mailbox by polling for its completion.
+ *
+ * The caller must not be holding any locks when calling this routine.
+ *
+ * Returns:
+ * MBX_SUCCESS - mailbox posted successfully
+ * Any of the MBX error values.
+ **/
+static int
+lpfc_sli4_post_sync_mbox(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
+{
+ int rc = MBX_SUCCESS;
+ unsigned long iflag;
+ uint32_t mcqe_status;
+ uint32_t mbx_cmnd;
+ struct lpfc_sli *psli = &phba->sli;
+ struct lpfc_mqe *mb = &mboxq->u.mqe;
+ struct lpfc_bmbx_create *mbox_rgn;
+ struct dma_address *dma_address;
+
+ /*
+ * Only one mailbox can be active to the bootstrap mailbox region
+ * at a time and there is no queueing provided.
+ */
+ spin_lock_irqsave(&phba->hbalock, iflag);
+ if (psli->sli_flag & LPFC_SLI_MBOX_ACTIVE) {
+ spin_unlock_irqrestore(&phba->hbalock, iflag);
+ lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
+ "(%d):2532 Mailbox command x%x (x%x/x%x) "
+ "cannot issue Data: x%x x%x\n",
+ mboxq->vport ? mboxq->vport->vpi : 0,
+ mboxq->u.mb.mbxCommand,
+ lpfc_sli_config_mbox_subsys_get(phba, mboxq),
+ lpfc_sli_config_mbox_opcode_get(phba, mboxq),
+ psli->sli_flag, MBX_POLL);
+ return MBXERR_ERROR;
+ }
+ /* The server grabs the token and owns it until release */
+ psli->sli_flag |= LPFC_SLI_MBOX_ACTIVE;
+ phba->sli.mbox_active = mboxq;
+ spin_unlock_irqrestore(&phba->hbalock, iflag);
+
+ /* wait for bootstrap mbox register for readyness */
+ rc = lpfc_sli4_wait_bmbx_ready(phba, mboxq);
+ if (rc)
+ goto exit;
+
+ /*
+ * Initialize the bootstrap memory region to avoid stale data areas
+ * in the mailbox post. Then copy the caller's mailbox contents to
+ * the bmbx mailbox region.
+ */
+ mbx_cmnd = bf_get(lpfc_mqe_command, mb);
+ memset(phba->sli4_hba.bmbx.avirt, 0, sizeof(struct lpfc_bmbx_create));
+ lpfc_sli_pcimem_bcopy(mb, phba->sli4_hba.bmbx.avirt,
+ sizeof(struct lpfc_mqe));
+
+ /* Post the high mailbox dma address to the port and wait for ready. */
+ dma_address = &phba->sli4_hba.bmbx.dma_address;
+ writel(dma_address->addr_hi, phba->sli4_hba.BMBXregaddr);
+
+ /* wait for bootstrap mbox register for hi-address write done */
+ rc = lpfc_sli4_wait_bmbx_ready(phba, mboxq);
+ if (rc)
+ goto exit;
+
+ /* Post the low mailbox dma address to the port. */
+ writel(dma_address->addr_lo, phba->sli4_hba.BMBXregaddr);
+
+ /* wait for bootstrap mbox register for low address write done */
+ rc = lpfc_sli4_wait_bmbx_ready(phba, mboxq);
+ if (rc)
+ goto exit;
+
+ /*
+ * Read the CQ to ensure the mailbox has completed.
+ * If so, update the mailbox status so that the upper layers
+ * can complete the request normally.
+ */
+ lpfc_sli_pcimem_bcopy(phba->sli4_hba.bmbx.avirt, mb,
+ sizeof(struct lpfc_mqe));
+ mbox_rgn = (struct lpfc_bmbx_create *) phba->sli4_hba.bmbx.avirt;
+ lpfc_sli_pcimem_bcopy(&mbox_rgn->mcqe, &mboxq->mcqe,
+ sizeof(struct lpfc_mcqe));
+ mcqe_status = bf_get(lpfc_mcqe_status, &mbox_rgn->mcqe);
+ /*
+ * When the CQE status indicates a failure and the mailbox status
+ * indicates success then copy the CQE status into the mailbox status
+ * (and prefix it with x4000).
+ */
+ if (mcqe_status != MB_CQE_STATUS_SUCCESS) {
+ if (bf_get(lpfc_mqe_status, mb) == MBX_SUCCESS)
+ bf_set(lpfc_mqe_status, mb,
+ (LPFC_MBX_ERROR_RANGE | mcqe_status));
+ rc = MBXERR_ERROR;
+ } else
+ lpfc_sli4_swap_str(phba, mboxq);
+
+ lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI,
+ "(%d):0356 Mailbox cmd x%x (x%x/x%x) Status x%x "
+ "Data: x%x x%x x%x x%x x%x x%x x%x x%x x%x x%x x%x"
+ " x%x x%x CQ: x%x x%x x%x x%x\n",
+ mboxq->vport ? mboxq->vport->vpi : 0, mbx_cmnd,
+ lpfc_sli_config_mbox_subsys_get(phba, mboxq),
+ lpfc_sli_config_mbox_opcode_get(phba, mboxq),
+ bf_get(lpfc_mqe_status, mb),
+ mb->un.mb_words[0], mb->un.mb_words[1],
+ mb->un.mb_words[2], mb->un.mb_words[3],
+ mb->un.mb_words[4], mb->un.mb_words[5],
+ mb->un.mb_words[6], mb->un.mb_words[7],
+ mb->un.mb_words[8], mb->un.mb_words[9],
+ mb->un.mb_words[10], mb->un.mb_words[11],
+ mb->un.mb_words[12], mboxq->mcqe.word0,
+ mboxq->mcqe.mcqe_tag0, mboxq->mcqe.mcqe_tag1,
+ mboxq->mcqe.trailer);
+exit:
+ /* We are holding the token, no needed for lock when release */
+ spin_lock_irqsave(&phba->hbalock, iflag);
+ psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
+ phba->sli.mbox_active = NULL;
+ spin_unlock_irqrestore(&phba->hbalock, iflag);
+ return rc;
+}
+
+/**
+ * lpfc_sli_issue_mbox_s4 - Issue an SLI4 mailbox command to firmware
+ * @phba: Pointer to HBA context object.
+ * @pmbox: Pointer to mailbox object.
+ * @flag: Flag indicating how the mailbox need to be processed.
+ *
+ * This function is called by discovery code and HBA management code to submit
+ * a mailbox command to firmware with SLI-4 interface spec.
+ *
+ * Return codes the caller owns the mailbox command after the return of the
+ * function.
+ **/
+static int
+lpfc_sli_issue_mbox_s4(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq,
+ uint32_t flag)
+{
+ struct lpfc_sli *psli = &phba->sli;
+ unsigned long iflags;
+ int rc;
+
+ /* dump from issue mailbox command if setup */
+ lpfc_idiag_mbxacc_dump_issue_mbox(phba, &mboxq->u.mb);
+
+ rc = lpfc_mbox_dev_check(phba);
+ if (unlikely(rc)) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
+ "(%d):2544 Mailbox command x%x (x%x/x%x) "
+ "cannot issue Data: x%x x%x\n",
+ mboxq->vport ? mboxq->vport->vpi : 0,
+ mboxq->u.mb.mbxCommand,
+ lpfc_sli_config_mbox_subsys_get(phba, mboxq),
+ lpfc_sli_config_mbox_opcode_get(phba, mboxq),
+ psli->sli_flag, flag);
+ goto out_not_finished;
+ }
+
+ /* Detect polling mode and jump to a handler */
+ if (!phba->sli4_hba.intr_enable) {
+ if (flag == MBX_POLL)
+ rc = lpfc_sli4_post_sync_mbox(phba, mboxq);
+ else
+ rc = -EIO;
+ if (rc != MBX_SUCCESS)
+ lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX | LOG_SLI,
+ "(%d):2541 Mailbox command x%x "
+ "(x%x/x%x) failure: "
+ "mqe_sta: x%x mcqe_sta: x%x/x%x "
+ "Data: x%x x%x\n,",
+ mboxq->vport ? mboxq->vport->vpi : 0,
+ mboxq->u.mb.mbxCommand,
+ lpfc_sli_config_mbox_subsys_get(phba,
+ mboxq),
+ lpfc_sli_config_mbox_opcode_get(phba,
+ mboxq),
+ bf_get(lpfc_mqe_status, &mboxq->u.mqe),
+ bf_get(lpfc_mcqe_status, &mboxq->mcqe),
+ bf_get(lpfc_mcqe_ext_status,
+ &mboxq->mcqe),
+ psli->sli_flag, flag);
+ return rc;
+ } else if (flag == MBX_POLL) {
+ lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX | LOG_SLI,
+ "(%d):2542 Try to issue mailbox command "
+ "x%x (x%x/x%x) synchronously ahead of async"
+ "mailbox command queue: x%x x%x\n",
+ mboxq->vport ? mboxq->vport->vpi : 0,
+ mboxq->u.mb.mbxCommand,
+ lpfc_sli_config_mbox_subsys_get(phba, mboxq),
+ lpfc_sli_config_mbox_opcode_get(phba, mboxq),
+ psli->sli_flag, flag);
+ /* Try to block the asynchronous mailbox posting */
+ rc = lpfc_sli4_async_mbox_block(phba);
+ if (!rc) {
+ /* Successfully blocked, now issue sync mbox cmd */
+ rc = lpfc_sli4_post_sync_mbox(phba, mboxq);
+ if (rc != MBX_SUCCESS)
+ lpfc_printf_log(phba, KERN_WARNING,
+ LOG_MBOX | LOG_SLI,
+ "(%d):2597 Sync Mailbox command "
+ "x%x (x%x/x%x) failure: "
+ "mqe_sta: x%x mcqe_sta: x%x/x%x "
+ "Data: x%x x%x\n,",
+ mboxq->vport ? mboxq->vport->vpi : 0,
+ mboxq->u.mb.mbxCommand,
+ lpfc_sli_config_mbox_subsys_get(phba,
+ mboxq),
+ lpfc_sli_config_mbox_opcode_get(phba,
+ mboxq),
+ bf_get(lpfc_mqe_status, &mboxq->u.mqe),
+ bf_get(lpfc_mcqe_status, &mboxq->mcqe),
+ bf_get(lpfc_mcqe_ext_status,
+ &mboxq->mcqe),
+ psli->sli_flag, flag);
+ /* Unblock the async mailbox posting afterward */
+ lpfc_sli4_async_mbox_unblock(phba);
+ }
+ return rc;
+ }
+
+ /* Now, interrupt mode asynchrous mailbox command */
+ rc = lpfc_mbox_cmd_check(phba, mboxq);
+ if (rc) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
+ "(%d):2543 Mailbox command x%x (x%x/x%x) "
+ "cannot issue Data: x%x x%x\n",
+ mboxq->vport ? mboxq->vport->vpi : 0,
+ mboxq->u.mb.mbxCommand,
+ lpfc_sli_config_mbox_subsys_get(phba, mboxq),
+ lpfc_sli_config_mbox_opcode_get(phba, mboxq),
+ psli->sli_flag, flag);
+ goto out_not_finished;
+ }
+
+ /* Put the mailbox command to the driver internal FIFO */
+ psli->slistat.mbox_busy++;
+ spin_lock_irqsave(&phba->hbalock, iflags);
+ lpfc_mbox_put(phba, mboxq);
+ spin_unlock_irqrestore(&phba->hbalock, iflags);
+ lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI,
+ "(%d):0354 Mbox cmd issue - Enqueue Data: "
+ "x%x (x%x/x%x) x%x x%x x%x\n",
+ mboxq->vport ? mboxq->vport->vpi : 0xffffff,
+ bf_get(lpfc_mqe_command, &mboxq->u.mqe),
+ lpfc_sli_config_mbox_subsys_get(phba, mboxq),
+ lpfc_sli_config_mbox_opcode_get(phba, mboxq),
+ phba->pport->port_state,
+ psli->sli_flag, MBX_NOWAIT);
+ /* Wake up worker thread to transport mailbox command from head */
+ lpfc_worker_wake_up(phba);
+
+ return MBX_BUSY;
+
+out_not_finished:
+ return MBX_NOT_FINISHED;
+}
+
+/**
+ * lpfc_sli4_post_async_mbox - Post an SLI4 mailbox command to device
+ * @phba: Pointer to HBA context object.
+ *
+ * This function is called by worker thread to send a mailbox command to
+ * SLI4 HBA firmware.
+ *
+ **/
+int
+lpfc_sli4_post_async_mbox(struct lpfc_hba *phba)
+{
+ struct lpfc_sli *psli = &phba->sli;
+ LPFC_MBOXQ_t *mboxq;
+ int rc = MBX_SUCCESS;
+ unsigned long iflags;
+ struct lpfc_mqe *mqe;
+ uint32_t mbx_cmnd;
+
+ /* Check interrupt mode before post async mailbox command */
+ if (unlikely(!phba->sli4_hba.intr_enable))
+ return MBX_NOT_FINISHED;
+
+ /* Check for mailbox command service token */
+ spin_lock_irqsave(&phba->hbalock, iflags);
+ if (unlikely(psli->sli_flag & LPFC_SLI_ASYNC_MBX_BLK)) {
+ spin_unlock_irqrestore(&phba->hbalock, iflags);
+ return MBX_NOT_FINISHED;
+ }
+ if (psli->sli_flag & LPFC_SLI_MBOX_ACTIVE) {
+ spin_unlock_irqrestore(&phba->hbalock, iflags);
+ return MBX_NOT_FINISHED;
+ }
+ if (unlikely(phba->sli.mbox_active)) {
+ spin_unlock_irqrestore(&phba->hbalock, iflags);
+ lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
+ "0384 There is pending active mailbox cmd\n");
+ return MBX_NOT_FINISHED;
+ }
+ /* Take the mailbox command service token */
+ psli->sli_flag |= LPFC_SLI_MBOX_ACTIVE;
+
+ /* Get the next mailbox command from head of queue */
+ mboxq = lpfc_mbox_get(phba);
+
+ /* If no more mailbox command waiting for post, we're done */
+ if (!mboxq) {
+ psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
+ spin_unlock_irqrestore(&phba->hbalock, iflags);
+ return MBX_SUCCESS;
+ }
+ phba->sli.mbox_active = mboxq;
+ spin_unlock_irqrestore(&phba->hbalock, iflags);
+
+ /* Check device readiness for posting mailbox command */
+ rc = lpfc_mbox_dev_check(phba);
+ if (unlikely(rc))
+ /* Driver clean routine will clean up pending mailbox */
+ goto out_not_finished;
+
+ /* Prepare the mbox command to be posted */
+ mqe = &mboxq->u.mqe;
+ mbx_cmnd = bf_get(lpfc_mqe_command, mqe);
+
+ /* Start timer for the mbox_tmo and log some mailbox post messages */
+ mod_timer(&psli->mbox_tmo, (jiffies +
+ msecs_to_jiffies(1000 * lpfc_mbox_tmo_val(phba, mboxq))));
+
+ lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI,
+ "(%d):0355 Mailbox cmd x%x (x%x/x%x) issue Data: "
+ "x%x x%x\n",
+ mboxq->vport ? mboxq->vport->vpi : 0, mbx_cmnd,
+ lpfc_sli_config_mbox_subsys_get(phba, mboxq),
+ lpfc_sli_config_mbox_opcode_get(phba, mboxq),
+ phba->pport->port_state, psli->sli_flag);
+
+ if (mbx_cmnd != MBX_HEARTBEAT) {
+ if (mboxq->vport) {
+ lpfc_debugfs_disc_trc(mboxq->vport,
+ LPFC_DISC_TRC_MBOX_VPORT,
+ "MBOX Send vport: cmd:x%x mb:x%x x%x",
+ mbx_cmnd, mqe->un.mb_words[0],
+ mqe->un.mb_words[1]);
+ } else {
+ lpfc_debugfs_disc_trc(phba->pport,
+ LPFC_DISC_TRC_MBOX,
+ "MBOX Send: cmd:x%x mb:x%x x%x",
+ mbx_cmnd, mqe->un.mb_words[0],
+ mqe->un.mb_words[1]);
+ }
+ }
+ psli->slistat.mbox_cmd++;
+
+ /* Post the mailbox command to the port */
+ rc = lpfc_sli4_mq_put(phba->sli4_hba.mbx_wq, mqe);
+ if (rc != MBX_SUCCESS) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
+ "(%d):2533 Mailbox command x%x (x%x/x%x) "
+ "cannot issue Data: x%x x%x\n",
+ mboxq->vport ? mboxq->vport->vpi : 0,
+ mboxq->u.mb.mbxCommand,
+ lpfc_sli_config_mbox_subsys_get(phba, mboxq),
+ lpfc_sli_config_mbox_opcode_get(phba, mboxq),
+ psli->sli_flag, MBX_NOWAIT);
+ goto out_not_finished;
+ }
+
+ return rc;
+
+out_not_finished:
+ spin_lock_irqsave(&phba->hbalock, iflags);
+ if (phba->sli.mbox_active) {
+ mboxq->u.mb.mbxStatus = MBX_NOT_FINISHED;
+ __lpfc_mbox_cmpl_put(phba, mboxq);
+ /* Release the token */
+ psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
+ phba->sli.mbox_active = NULL;
+ }
+ spin_unlock_irqrestore(&phba->hbalock, iflags);
+
+ return MBX_NOT_FINISHED;
+}
+
+/**
+ * lpfc_sli_issue_mbox - Wrapper func for issuing mailbox command
+ * @phba: Pointer to HBA context object.
+ * @pmbox: Pointer to mailbox object.
+ * @flag: Flag indicating how the mailbox need to be processed.
+ *
+ * This routine wraps the actual SLI3 or SLI4 mailbox issuing routine from
+ * the API jump table function pointer from the lpfc_hba struct.
+ *
+ * Return codes the caller owns the mailbox command after the return of the
+ * function.
+ **/
+int
+lpfc_sli_issue_mbox(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmbox, uint32_t flag)
+{
+ return phba->lpfc_sli_issue_mbox(phba, pmbox, flag);
+}
+
+/**
+ * lpfc_mbox_api_table_setup - Set up mbox api function jump table
+ * @phba: The hba struct for which this call is being executed.
+ * @dev_grp: The HBA PCI-Device group number.
+ *
+ * This routine sets up the mbox interface API function jump table in @phba
+ * struct.
+ * Returns: 0 - success, -ENODEV - failure.
+ **/
+int
+lpfc_mbox_api_table_setup(struct lpfc_hba *phba, uint8_t dev_grp)
+{
+
+ switch (dev_grp) {
+ case LPFC_PCI_DEV_LP:
+ phba->lpfc_sli_issue_mbox = lpfc_sli_issue_mbox_s3;
+ phba->lpfc_sli_handle_slow_ring_event =
+ lpfc_sli_handle_slow_ring_event_s3;
+ phba->lpfc_sli_hbq_to_firmware = lpfc_sli_hbq_to_firmware_s3;
+ phba->lpfc_sli_brdrestart = lpfc_sli_brdrestart_s3;
+ phba->lpfc_sli_brdready = lpfc_sli_brdready_s3;
+ break;
+ case LPFC_PCI_DEV_OC:
+ phba->lpfc_sli_issue_mbox = lpfc_sli_issue_mbox_s4;
+ phba->lpfc_sli_handle_slow_ring_event =
+ lpfc_sli_handle_slow_ring_event_s4;
+ phba->lpfc_sli_hbq_to_firmware = lpfc_sli_hbq_to_firmware_s4;
+ phba->lpfc_sli_brdrestart = lpfc_sli_brdrestart_s4;
+ phba->lpfc_sli_brdready = lpfc_sli_brdready_s4;
+ break;
+ default:
+ lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+ "1420 Invalid HBA PCI-device group: 0x%x\n",
+ dev_grp);
+ return -ENODEV;
+ break;
+ }
+ return 0;
+}
+
+/**
+ * __lpfc_sli_ringtx_put - Add an iocb to the txq
+ * @phba: Pointer to HBA context object.
+ * @pring: Pointer to driver SLI ring object.
+ * @piocb: Pointer to address of newly added command iocb.
+ *
+ * This function is called with hbalock held to add a command
+ * iocb to the txq when SLI layer cannot submit the command iocb
+ * to the ring.
+ **/
+void
+__lpfc_sli_ringtx_put(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
+ struct lpfc_iocbq *piocb)
+{
+ /* Insert the caller's iocb in the txq tail for later processing. */
+ list_add_tail(&piocb->list, &pring->txq);
+}
+
+/**
+ * lpfc_sli_next_iocb - Get the next iocb in the txq
+ * @phba: Pointer to HBA context object.
+ * @pring: Pointer to driver SLI ring object.
+ * @piocb: Pointer to address of newly added command iocb.
+ *
+ * This function is called with hbalock held before a new
+ * iocb is submitted to the firmware. This function checks
+ * txq to flush the iocbs in txq to Firmware before
+ * submitting new iocbs to the Firmware.
+ * If there are iocbs in the txq which need to be submitted
+ * to firmware, lpfc_sli_next_iocb returns the first element
+ * of the txq after dequeuing it from txq.
+ * If there is no iocb in the txq then the function will return
+ * *piocb and *piocb is set to NULL. Caller needs to check
+ * *piocb to find if there are more commands in the txq.
+ **/
+static struct lpfc_iocbq *
+lpfc_sli_next_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
+ struct lpfc_iocbq **piocb)
+{
+ struct lpfc_iocbq * nextiocb;
+
+ nextiocb = lpfc_sli_ringtx_get(phba, pring);
+ if (!nextiocb) {
+ nextiocb = *piocb;
+ *piocb = NULL;
+ }
+
+ return nextiocb;
+}
+
+/**
+ * __lpfc_sli_issue_iocb_s3 - SLI3 device lockless ver of lpfc_sli_issue_iocb
+ * @phba: Pointer to HBA context object.
+ * @ring_number: SLI ring number to issue iocb on.
+ * @piocb: Pointer to command iocb.
+ * @flag: Flag indicating if this command can be put into txq.
+ *
+ * __lpfc_sli_issue_iocb_s3 is used by other functions in the driver to issue
+ * an iocb command to an HBA with SLI-3 interface spec. If the PCI slot is
+ * recovering from error state, if HBA is resetting or if LPFC_STOP_IOCB_EVENT
+ * flag is turned on, the function returns IOCB_ERROR. When the link is down,
+ * this function allows only iocbs for posting buffers. This function finds
+ * next available slot in the command ring and posts the command to the
+ * available slot and writes the port attention register to request HBA start
+ * processing new iocb. If there is no slot available in the ring and
+ * flag & SLI_IOCB_RET_IOCB is set, the new iocb is added to the txq, otherwise
+ * the function returns IOCB_BUSY.
+ *
+ * This function is called with hbalock held. The function will return success
+ * after it successfully submit the iocb to firmware or after adding to the
+ * txq.
+ **/
+static int
+__lpfc_sli_issue_iocb_s3(struct lpfc_hba *phba, uint32_t ring_number,
+ struct lpfc_iocbq *piocb, uint32_t flag)
+{
+ struct lpfc_iocbq *nextiocb;
+ IOCB_t *iocb;
+ struct lpfc_sli_ring *pring = &phba->sli.ring[ring_number];
+
+ if (piocb->iocb_cmpl && (!piocb->vport) &&
+ (piocb->iocb.ulpCommand != CMD_ABORT_XRI_CN) &&
+ (piocb->iocb.ulpCommand != CMD_CLOSE_XRI_CN)) {
+ lpfc_printf_log(phba, KERN_ERR,
+ LOG_SLI | LOG_VPORT,
+ "1807 IOCB x%x failed. No vport\n",
+ piocb->iocb.ulpCommand);
+ dump_stack();
+ return IOCB_ERROR;
+ }
+
+
+ /* If the PCI channel is in offline state, do not post iocbs. */
+ if (unlikely(pci_channel_offline(phba->pcidev)))
+ return IOCB_ERROR;
+
+ /* If HBA has a deferred error attention, fail the iocb. */
+ if (unlikely(phba->hba_flag & DEFER_ERATT))
+ return IOCB_ERROR;
+
+ /*
+ * We should never get an IOCB if we are in a < LINK_DOWN state
+ */
+ if (unlikely(phba->link_state < LPFC_LINK_DOWN))
+ return IOCB_ERROR;
+
+ /*
+ * Check to see if we are blocking IOCB processing because of a
+ * outstanding event.
+ */
+ if (unlikely(pring->flag & LPFC_STOP_IOCB_EVENT))
+ goto iocb_busy;
+
+ if (unlikely(phba->link_state == LPFC_LINK_DOWN)) {
+ /*
+ * Only CREATE_XRI, CLOSE_XRI, and QUE_RING_BUF
+ * can be issued if the link is not up.
+ */
+ switch (piocb->iocb.ulpCommand) {
+ case CMD_GEN_REQUEST64_CR:
+ case CMD_GEN_REQUEST64_CX:
+ if (!(phba->sli.sli_flag & LPFC_MENLO_MAINT) ||
+ (piocb->iocb.un.genreq64.w5.hcsw.Rctl !=
+ FC_RCTL_DD_UNSOL_CMD) ||
+ (piocb->iocb.un.genreq64.w5.hcsw.Type !=
+ MENLO_TRANSPORT_TYPE))
+
+ goto iocb_busy;
+ break;
+ case CMD_QUE_RING_BUF_CN:
+ case CMD_QUE_RING_BUF64_CN:
+ /*
+ * For IOCBs, like QUE_RING_BUF, that have no rsp ring
+ * completion, iocb_cmpl MUST be 0.
+ */
+ if (piocb->iocb_cmpl)
+ piocb->iocb_cmpl = NULL;
+ /*FALLTHROUGH*/
+ case CMD_CREATE_XRI_CR:
+ case CMD_CLOSE_XRI_CN:
+ case CMD_CLOSE_XRI_CX:
+ break;
+ default:
+ goto iocb_busy;
+ }
+
+ /*
+ * For FCP commands, we must be in a state where we can process link
+ * attention events.
+ */
+ } else if (unlikely(pring->ringno == phba->sli.fcp_ring &&
+ !(phba->sli.sli_flag & LPFC_PROCESS_LA))) {
+ goto iocb_busy;
+ }
+
+ while ((iocb = lpfc_sli_next_iocb_slot(phba, pring)) &&
+ (nextiocb = lpfc_sli_next_iocb(phba, pring, &piocb)))
+ lpfc_sli_submit_iocb(phba, pring, iocb, nextiocb);
+
+ if (iocb)
+ lpfc_sli_update_ring(phba, pring);
+ else
+ lpfc_sli_update_full_ring(phba, pring);
+
+ if (!piocb)
+ return IOCB_SUCCESS;
+
+ goto out_busy;
+
+ iocb_busy:
+ pring->stats.iocb_cmd_delay++;
+
+ out_busy:
+
+ if (!(flag & SLI_IOCB_RET_IOCB)) {
+ __lpfc_sli_ringtx_put(phba, pring, piocb);
+ return IOCB_SUCCESS;
+ }
+
+ return IOCB_BUSY;
+}
+
+/**
+ * lpfc_sli4_bpl2sgl - Convert the bpl/bde to a sgl.
+ * @phba: Pointer to HBA context object.
+ * @piocb: Pointer to command iocb.
+ * @sglq: Pointer to the scatter gather queue object.
+ *
+ * This routine converts the bpl or bde that is in the IOCB
+ * to a sgl list for the sli4 hardware. The physical address
+ * of the bpl/bde is converted back to a virtual address.
+ * If the IOCB contains a BPL then the list of BDE's is
+ * converted to sli4_sge's. If the IOCB contains a single
+ * BDE then it is converted to a single sli_sge.
+ * The IOCB is still in cpu endianess so the contents of
+ * the bpl can be used without byte swapping.
+ *
+ * Returns valid XRI = Success, NO_XRI = Failure.
+**/
+static uint16_t
+lpfc_sli4_bpl2sgl(struct lpfc_hba *phba, struct lpfc_iocbq *piocbq,
+ struct lpfc_sglq *sglq)
+{
+ uint16_t xritag = NO_XRI;
+ struct ulp_bde64 *bpl = NULL;
+ struct ulp_bde64 bde;
+ struct sli4_sge *sgl = NULL;
+ struct lpfc_dmabuf *dmabuf;
+ IOCB_t *icmd;
+ int numBdes = 0;
+ int i = 0;
+ uint32_t offset = 0; /* accumulated offset in the sg request list */
+ int inbound = 0; /* number of sg reply entries inbound from firmware */
+
+ if (!piocbq || !sglq)
+ return xritag;
+
+ sgl = (struct sli4_sge *)sglq->sgl;
+ icmd = &piocbq->iocb;
+ if (icmd->ulpCommand == CMD_XMIT_BLS_RSP64_CX)
+ return sglq->sli4_xritag;
+ if (icmd->un.genreq64.bdl.bdeFlags == BUFF_TYPE_BLP_64) {
+ numBdes = icmd->un.genreq64.bdl.bdeSize /
+ sizeof(struct ulp_bde64);
+ /* The addrHigh and addrLow fields within the IOCB
+ * have not been byteswapped yet so there is no
+ * need to swap them back.
+ */
+ if (piocbq->context3)
+ dmabuf = (struct lpfc_dmabuf *)piocbq->context3;
+ else
+ return xritag;
+
+ bpl = (struct ulp_bde64 *)dmabuf->virt;
+ if (!bpl)
+ return xritag;
+
+ for (i = 0; i < numBdes; i++) {
+ /* Should already be byte swapped. */
+ sgl->addr_hi = bpl->addrHigh;
+ sgl->addr_lo = bpl->addrLow;
+
+ sgl->word2 = le32_to_cpu(sgl->word2);
+ if ((i+1) == numBdes)
+ bf_set(lpfc_sli4_sge_last, sgl, 1);
+ else
+ bf_set(lpfc_sli4_sge_last, sgl, 0);
+ /* swap the size field back to the cpu so we
+ * can assign it to the sgl.
+ */
+ bde.tus.w = le32_to_cpu(bpl->tus.w);
+ sgl->sge_len = cpu_to_le32(bde.tus.f.bdeSize);
+ /* The offsets in the sgl need to be accumulated
+ * separately for the request and reply lists.
+ * The request is always first, the reply follows.
+ */
+ if (piocbq->iocb.ulpCommand == CMD_GEN_REQUEST64_CR) {
+ /* add up the reply sg entries */
+ if (bpl->tus.f.bdeFlags == BUFF_TYPE_BDE_64I)
+ inbound++;
+ /* first inbound? reset the offset */
+ if (inbound == 1)
+ offset = 0;
+ bf_set(lpfc_sli4_sge_offset, sgl, offset);
+ bf_set(lpfc_sli4_sge_type, sgl,
+ LPFC_SGE_TYPE_DATA);
+ offset += bde.tus.f.bdeSize;
+ }
+ sgl->word2 = cpu_to_le32(sgl->word2);
+ bpl++;
+ sgl++;
+ }
+ } else if (icmd->un.genreq64.bdl.bdeFlags == BUFF_TYPE_BDE_64) {
+ /* The addrHigh and addrLow fields of the BDE have not
+ * been byteswapped yet so they need to be swapped
+ * before putting them in the sgl.
+ */
+ sgl->addr_hi =
+ cpu_to_le32(icmd->un.genreq64.bdl.addrHigh);
+ sgl->addr_lo =
+ cpu_to_le32(icmd->un.genreq64.bdl.addrLow);
+ sgl->word2 = le32_to_cpu(sgl->word2);
+ bf_set(lpfc_sli4_sge_last, sgl, 1);
+ sgl->word2 = cpu_to_le32(sgl->word2);
+ sgl->sge_len =
+ cpu_to_le32(icmd->un.genreq64.bdl.bdeSize);
+ }
+ return sglq->sli4_xritag;
+}
+
+/**
+ * lpfc_sli4_scmd_to_wqidx_distr - scsi command to SLI4 WQ index distribution
+ * @phba: Pointer to HBA context object.
+ *
+ * This routine performs a roundrobin SCSI command to SLI4 FCP WQ index
+ * distribution. This is called by __lpfc_sli_issue_iocb_s4() with the hbalock
+ * held.
+ *
+ * Return: index into SLI4 fast-path FCP queue index.
+ **/
+static inline int
+lpfc_sli4_scmd_to_wqidx_distr(struct lpfc_hba *phba)
+{
+ struct lpfc_vector_map_info *cpup;
+ int chann, cpu;
+
+ if (phba->cfg_fcp_io_sched == LPFC_FCP_SCHED_BY_CPU
+ && phba->cfg_fcp_io_channel > 1) {
+ cpu = smp_processor_id();
+ if (cpu < phba->sli4_hba.num_present_cpu) {
+ cpup = phba->sli4_hba.cpu_map;
+ cpup += cpu;
+ return cpup->channel_id;
+ }
+ }
+ chann = atomic_add_return(1, &phba->fcp_qidx);
+ chann = (chann % phba->cfg_fcp_io_channel);
+ return chann;
+}
+
+/**
+ * lpfc_sli_iocb2wqe - Convert the IOCB to a work queue entry.
+ * @phba: Pointer to HBA context object.
+ * @piocb: Pointer to command iocb.
+ * @wqe: Pointer to the work queue entry.
+ *
+ * This routine converts the iocb command to its Work Queue Entry
+ * equivalent. The wqe pointer should not have any fields set when
+ * this routine is called because it will memcpy over them.
+ * This routine does not set the CQ_ID or the WQEC bits in the
+ * wqe.
+ *
+ * Returns: 0 = Success, IOCB_ERROR = Failure.
+ **/
+static int
+lpfc_sli4_iocb2wqe(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq,
+ union lpfc_wqe *wqe)
+{
+ uint32_t xmit_len = 0, total_len = 0;
+ uint8_t ct = 0;
+ uint32_t fip;
+ uint32_t abort_tag;
+ uint8_t command_type = ELS_COMMAND_NON_FIP;
+ uint8_t cmnd;
+ uint16_t xritag;
+ uint16_t abrt_iotag;
+ struct lpfc_iocbq *abrtiocbq;
+ struct ulp_bde64 *bpl = NULL;
+ uint32_t els_id = LPFC_ELS_ID_DEFAULT;
+ int numBdes, i;
+ struct ulp_bde64 bde;
+ struct lpfc_nodelist *ndlp;
+ uint32_t *pcmd;
+ uint32_t if_type;
+
+ fip = phba->hba_flag & HBA_FIP_SUPPORT;
+ /* The fcp commands will set command type */
+ if (iocbq->iocb_flag & LPFC_IO_FCP)
+ command_type = FCP_COMMAND;
+ else if (fip && (iocbq->iocb_flag & LPFC_FIP_ELS_ID_MASK))
+ command_type = ELS_COMMAND_FIP;
+ else
+ command_type = ELS_COMMAND_NON_FIP;
+
+ /* Some of the fields are in the right position already */
+ memcpy(wqe, &iocbq->iocb, sizeof(union lpfc_wqe));
+ abort_tag = (uint32_t) iocbq->iotag;
+ xritag = iocbq->sli4_xritag;
+ wqe->generic.wqe_com.word7 = 0; /* The ct field has moved so reset */
+ wqe->generic.wqe_com.word10 = 0;
+ /* words0-2 bpl convert bde */
+ if (iocbq->iocb.un.genreq64.bdl.bdeFlags == BUFF_TYPE_BLP_64) {
+ numBdes = iocbq->iocb.un.genreq64.bdl.bdeSize /
+ sizeof(struct ulp_bde64);
+ bpl = (struct ulp_bde64 *)
+ ((struct lpfc_dmabuf *)iocbq->context3)->virt;
+ if (!bpl)
+ return IOCB_ERROR;
+
+ /* Should already be byte swapped. */
+ wqe->generic.bde.addrHigh = le32_to_cpu(bpl->addrHigh);
+ wqe->generic.bde.addrLow = le32_to_cpu(bpl->addrLow);
+ /* swap the size field back to the cpu so we
+ * can assign it to the sgl.
+ */
+ wqe->generic.bde.tus.w = le32_to_cpu(bpl->tus.w);
+ xmit_len = wqe->generic.bde.tus.f.bdeSize;
+ total_len = 0;
+ for (i = 0; i < numBdes; i++) {
+ bde.tus.w = le32_to_cpu(bpl[i].tus.w);
+ total_len += bde.tus.f.bdeSize;
+ }
+ } else
+ xmit_len = iocbq->iocb.un.fcpi64.bdl.bdeSize;
+
+ iocbq->iocb.ulpIoTag = iocbq->iotag;
+ cmnd = iocbq->iocb.ulpCommand;
+
+ switch (iocbq->iocb.ulpCommand) {
+ case CMD_ELS_REQUEST64_CR:
+ if (iocbq->iocb_flag & LPFC_IO_LIBDFC)
+ ndlp = iocbq->context_un.ndlp;
+ else
+ ndlp = (struct lpfc_nodelist *)iocbq->context1;
+ if (!iocbq->iocb.ulpLe) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
+ "2007 Only Limited Edition cmd Format"
+ " supported 0x%x\n",
+ iocbq->iocb.ulpCommand);
+ return IOCB_ERROR;
+ }
+
+ wqe->els_req.payload_len = xmit_len;
+ /* Els_reguest64 has a TMO */
+ bf_set(wqe_tmo, &wqe->els_req.wqe_com,
+ iocbq->iocb.ulpTimeout);
+ /* Need a VF for word 4 set the vf bit*/
+ bf_set(els_req64_vf, &wqe->els_req, 0);
+ /* And a VFID for word 12 */
+ bf_set(els_req64_vfid, &wqe->els_req, 0);
+ ct = ((iocbq->iocb.ulpCt_h << 1) | iocbq->iocb.ulpCt_l);
+ bf_set(wqe_ctxt_tag, &wqe->els_req.wqe_com,
+ iocbq->iocb.ulpContext);
+ bf_set(wqe_ct, &wqe->els_req.wqe_com, ct);
+ bf_set(wqe_pu, &wqe->els_req.wqe_com, 0);
+ /* CCP CCPE PV PRI in word10 were set in the memcpy */
+ if (command_type == ELS_COMMAND_FIP)
+ els_id = ((iocbq->iocb_flag & LPFC_FIP_ELS_ID_MASK)
+ >> LPFC_FIP_ELS_ID_SHIFT);
+ pcmd = (uint32_t *) (((struct lpfc_dmabuf *)
+ iocbq->context2)->virt);
+ if_type = bf_get(lpfc_sli_intf_if_type,
+ &phba->sli4_hba.sli_intf);
+ if (if_type == LPFC_SLI_INTF_IF_TYPE_2) {
+ if (pcmd && (*pcmd == ELS_CMD_FLOGI ||
+ *pcmd == ELS_CMD_SCR ||
+ *pcmd == ELS_CMD_FDISC ||
+ *pcmd == ELS_CMD_LOGO ||
+ *pcmd == ELS_CMD_PLOGI)) {
+ bf_set(els_req64_sp, &wqe->els_req, 1);
+ bf_set(els_req64_sid, &wqe->els_req,
+ iocbq->vport->fc_myDID);
+ if ((*pcmd == ELS_CMD_FLOGI) &&
+ !(phba->fc_topology ==
+ LPFC_TOPOLOGY_LOOP))
+ bf_set(els_req64_sid, &wqe->els_req, 0);
+ bf_set(wqe_ct, &wqe->els_req.wqe_com, 1);
+ bf_set(wqe_ctxt_tag, &wqe->els_req.wqe_com,
+ phba->vpi_ids[iocbq->vport->vpi]);
+ } else if (pcmd && iocbq->context1) {
+ bf_set(wqe_ct, &wqe->els_req.wqe_com, 0);
+ bf_set(wqe_ctxt_tag, &wqe->els_req.wqe_com,
+ phba->sli4_hba.rpi_ids[ndlp->nlp_rpi]);
+ }
+ }
+ bf_set(wqe_temp_rpi, &wqe->els_req.wqe_com,
+ phba->sli4_hba.rpi_ids[ndlp->nlp_rpi]);
+ bf_set(wqe_els_id, &wqe->els_req.wqe_com, els_id);
+ bf_set(wqe_dbde, &wqe->els_req.wqe_com, 1);
+ bf_set(wqe_iod, &wqe->els_req.wqe_com, LPFC_WQE_IOD_READ);
+ bf_set(wqe_qosd, &wqe->els_req.wqe_com, 1);
+ bf_set(wqe_lenloc, &wqe->els_req.wqe_com, LPFC_WQE_LENLOC_NONE);
+ bf_set(wqe_ebde_cnt, &wqe->els_req.wqe_com, 0);
+ wqe->els_req.max_response_payload_len = total_len - xmit_len;
+ break;
+ case CMD_XMIT_SEQUENCE64_CX:
+ bf_set(wqe_ctxt_tag, &wqe->xmit_sequence.wqe_com,
+ iocbq->iocb.un.ulpWord[3]);
+ bf_set(wqe_rcvoxid, &wqe->xmit_sequence.wqe_com,
+ iocbq->iocb.unsli3.rcvsli3.ox_id);
+ /* The entire sequence is transmitted for this IOCB */
+ xmit_len = total_len;
+ cmnd = CMD_XMIT_SEQUENCE64_CR;
+ if (phba->link_flag & LS_LOOPBACK_MODE)
+ bf_set(wqe_xo, &wqe->xmit_sequence.wge_ctl, 1);
+ case CMD_XMIT_SEQUENCE64_CR:
+ /* word3 iocb=io_tag32 wqe=reserved */
+ wqe->xmit_sequence.rsvd3 = 0;
+ /* word4 relative_offset memcpy */
+ /* word5 r_ctl/df_ctl memcpy */
+ bf_set(wqe_pu, &wqe->xmit_sequence.wqe_com, 0);
+ bf_set(wqe_dbde, &wqe->xmit_sequence.wqe_com, 1);
+ bf_set(wqe_iod, &wqe->xmit_sequence.wqe_com,
+ LPFC_WQE_IOD_WRITE);
+ bf_set(wqe_lenloc, &wqe->xmit_sequence.wqe_com,
+ LPFC_WQE_LENLOC_WORD12);
+ bf_set(wqe_ebde_cnt, &wqe->xmit_sequence.wqe_com, 0);
+ wqe->xmit_sequence.xmit_len = xmit_len;
+ command_type = OTHER_COMMAND;
+ break;
+ case CMD_XMIT_BCAST64_CN:
+ /* word3 iocb=iotag32 wqe=seq_payload_len */
+ wqe->xmit_bcast64.seq_payload_len = xmit_len;
+ /* word4 iocb=rsvd wqe=rsvd */
+ /* word5 iocb=rctl/type/df_ctl wqe=rctl/type/df_ctl memcpy */
+ /* word6 iocb=ctxt_tag/io_tag wqe=ctxt_tag/xri */
+ bf_set(wqe_ct, &wqe->xmit_bcast64.wqe_com,
+ ((iocbq->iocb.ulpCt_h << 1) | iocbq->iocb.ulpCt_l));
+ bf_set(wqe_dbde, &wqe->xmit_bcast64.wqe_com, 1);
+ bf_set(wqe_iod, &wqe->xmit_bcast64.wqe_com, LPFC_WQE_IOD_WRITE);
+ bf_set(wqe_lenloc, &wqe->xmit_bcast64.wqe_com,
+ LPFC_WQE_LENLOC_WORD3);
+ bf_set(wqe_ebde_cnt, &wqe->xmit_bcast64.wqe_com, 0);
+ break;
+ case CMD_FCP_IWRITE64_CR:
+ command_type = FCP_COMMAND_DATA_OUT;
+ /* word3 iocb=iotag wqe=payload_offset_len */
+ /* Add the FCP_CMD and FCP_RSP sizes to get the offset */
+ bf_set(payload_offset_len, &wqe->fcp_iwrite,
+ xmit_len + sizeof(struct fcp_rsp));
+ bf_set(cmd_buff_len, &wqe->fcp_iwrite,
+ 0);
+ /* word4 iocb=parameter wqe=total_xfer_length memcpy */
+ /* word5 iocb=initial_xfer_len wqe=initial_xfer_len memcpy */
+ bf_set(wqe_erp, &wqe->fcp_iwrite.wqe_com,
+ iocbq->iocb.ulpFCP2Rcvy);
+ bf_set(wqe_lnk, &wqe->fcp_iwrite.wqe_com, iocbq->iocb.ulpXS);
+ /* Always open the exchange */
+ bf_set(wqe_xc, &wqe->fcp_iwrite.wqe_com, 0);
+ bf_set(wqe_iod, &wqe->fcp_iwrite.wqe_com, LPFC_WQE_IOD_WRITE);
+ bf_set(wqe_lenloc, &wqe->fcp_iwrite.wqe_com,
+ LPFC_WQE_LENLOC_WORD4);
+ bf_set(wqe_ebde_cnt, &wqe->fcp_iwrite.wqe_com, 0);
+ bf_set(wqe_pu, &wqe->fcp_iwrite.wqe_com, iocbq->iocb.ulpPU);
+ bf_set(wqe_dbde, &wqe->fcp_iwrite.wqe_com, 1);
+ if (iocbq->iocb_flag & LPFC_IO_OAS) {
+ bf_set(wqe_oas, &wqe->fcp_iwrite.wqe_com, 1);
+ if (phba->cfg_XLanePriority) {
+ bf_set(wqe_ccpe, &wqe->fcp_iwrite.wqe_com, 1);
+ bf_set(wqe_ccp, &wqe->fcp_iwrite.wqe_com,
+ (phba->cfg_XLanePriority << 1));
+ }
+ }
+ break;
+ case CMD_FCP_IREAD64_CR:
+ /* word3 iocb=iotag wqe=payload_offset_len */
+ /* Add the FCP_CMD and FCP_RSP sizes to get the offset */
+ bf_set(payload_offset_len, &wqe->fcp_iread,
+ xmit_len + sizeof(struct fcp_rsp));
+ bf_set(cmd_buff_len, &wqe->fcp_iread,
+ 0);
+ /* word4 iocb=parameter wqe=total_xfer_length memcpy */
+ /* word5 iocb=initial_xfer_len wqe=initial_xfer_len memcpy */
+ bf_set(wqe_erp, &wqe->fcp_iread.wqe_com,
+ iocbq->iocb.ulpFCP2Rcvy);
+ bf_set(wqe_lnk, &wqe->fcp_iread.wqe_com, iocbq->iocb.ulpXS);
+ /* Always open the exchange */
+ bf_set(wqe_xc, &wqe->fcp_iread.wqe_com, 0);
+ bf_set(wqe_iod, &wqe->fcp_iread.wqe_com, LPFC_WQE_IOD_READ);
+ bf_set(wqe_lenloc, &wqe->fcp_iread.wqe_com,
+ LPFC_WQE_LENLOC_WORD4);
+ bf_set(wqe_ebde_cnt, &wqe->fcp_iread.wqe_com, 0);
+ bf_set(wqe_pu, &wqe->fcp_iread.wqe_com, iocbq->iocb.ulpPU);
+ bf_set(wqe_dbde, &wqe->fcp_iread.wqe_com, 1);
+ if (iocbq->iocb_flag & LPFC_IO_OAS) {
+ bf_set(wqe_oas, &wqe->fcp_iread.wqe_com, 1);
+ if (phba->cfg_XLanePriority) {
+ bf_set(wqe_ccpe, &wqe->fcp_iread.wqe_com, 1);
+ bf_set(wqe_ccp, &wqe->fcp_iread.wqe_com,
+ (phba->cfg_XLanePriority << 1));
+ }
+ }
+ break;
+ case CMD_FCP_ICMND64_CR:
+ /* word3 iocb=iotag wqe=payload_offset_len */
+ /* Add the FCP_CMD and FCP_RSP sizes to get the offset */
+ bf_set(payload_offset_len, &wqe->fcp_icmd,
+ xmit_len + sizeof(struct fcp_rsp));
+ bf_set(cmd_buff_len, &wqe->fcp_icmd,
+ 0);
+ /* word3 iocb=IO_TAG wqe=reserved */
+ bf_set(wqe_pu, &wqe->fcp_icmd.wqe_com, 0);
+ /* Always open the exchange */
+ bf_set(wqe_xc, &wqe->fcp_icmd.wqe_com, 0);
+ bf_set(wqe_dbde, &wqe->fcp_icmd.wqe_com, 1);
+ bf_set(wqe_iod, &wqe->fcp_icmd.wqe_com, LPFC_WQE_IOD_WRITE);
+ bf_set(wqe_qosd, &wqe->fcp_icmd.wqe_com, 1);
+ bf_set(wqe_lenloc, &wqe->fcp_icmd.wqe_com,
+ LPFC_WQE_LENLOC_NONE);
+ bf_set(wqe_ebde_cnt, &wqe->fcp_icmd.wqe_com, 0);
+ bf_set(wqe_erp, &wqe->fcp_icmd.wqe_com,
+ iocbq->iocb.ulpFCP2Rcvy);
+ if (iocbq->iocb_flag & LPFC_IO_OAS) {
+ bf_set(wqe_oas, &wqe->fcp_icmd.wqe_com, 1);
+ if (phba->cfg_XLanePriority) {
+ bf_set(wqe_ccpe, &wqe->fcp_icmd.wqe_com, 1);
+ bf_set(wqe_ccp, &wqe->fcp_icmd.wqe_com,
+ (phba->cfg_XLanePriority << 1));
+ }
+ }
+ break;
+ case CMD_GEN_REQUEST64_CR:
+ /* For this command calculate the xmit length of the
+ * request bde.
+ */
+ xmit_len = 0;
+ numBdes = iocbq->iocb.un.genreq64.bdl.bdeSize /
+ sizeof(struct ulp_bde64);
+ for (i = 0; i < numBdes; i++) {
+ bde.tus.w = le32_to_cpu(bpl[i].tus.w);
+ if (bde.tus.f.bdeFlags != BUFF_TYPE_BDE_64)
+ break;
+ xmit_len += bde.tus.f.bdeSize;
+ }
+ /* word3 iocb=IO_TAG wqe=request_payload_len */
+ wqe->gen_req.request_payload_len = xmit_len;
+ /* word4 iocb=parameter wqe=relative_offset memcpy */
+ /* word5 [rctl, type, df_ctl, la] copied in memcpy */
+ /* word6 context tag copied in memcpy */
+ if (iocbq->iocb.ulpCt_h || iocbq->iocb.ulpCt_l) {
+ ct = ((iocbq->iocb.ulpCt_h << 1) | iocbq->iocb.ulpCt_l);
+ lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
+ "2015 Invalid CT %x command 0x%x\n",
+ ct, iocbq->iocb.ulpCommand);
+ return IOCB_ERROR;
+ }
+ bf_set(wqe_ct, &wqe->gen_req.wqe_com, 0);
+ bf_set(wqe_tmo, &wqe->gen_req.wqe_com, iocbq->iocb.ulpTimeout);
+ bf_set(wqe_pu, &wqe->gen_req.wqe_com, iocbq->iocb.ulpPU);
+ bf_set(wqe_dbde, &wqe->gen_req.wqe_com, 1);
+ bf_set(wqe_iod, &wqe->gen_req.wqe_com, LPFC_WQE_IOD_READ);
+ bf_set(wqe_qosd, &wqe->gen_req.wqe_com, 1);
+ bf_set(wqe_lenloc, &wqe->gen_req.wqe_com, LPFC_WQE_LENLOC_NONE);
+ bf_set(wqe_ebde_cnt, &wqe->gen_req.wqe_com, 0);
+ wqe->gen_req.max_response_payload_len = total_len - xmit_len;
+ command_type = OTHER_COMMAND;
+ break;
+ case CMD_XMIT_ELS_RSP64_CX:
+ ndlp = (struct lpfc_nodelist *)iocbq->context1;
+ /* words0-2 BDE memcpy */
+ /* word3 iocb=iotag32 wqe=response_payload_len */
+ wqe->xmit_els_rsp.response_payload_len = xmit_len;
+ /* word4 */
+ wqe->xmit_els_rsp.word4 = 0;
+ /* word5 iocb=rsvd wge=did */
+ bf_set(wqe_els_did, &wqe->xmit_els_rsp.wqe_dest,
+ iocbq->iocb.un.xseq64.xmit_els_remoteID);
+
+ if_type = bf_get(lpfc_sli_intf_if_type,
+ &phba->sli4_hba.sli_intf);
+ if (if_type == LPFC_SLI_INTF_IF_TYPE_2) {
+ if (iocbq->vport->fc_flag & FC_PT2PT) {
+ bf_set(els_rsp64_sp, &wqe->xmit_els_rsp, 1);
+ bf_set(els_rsp64_sid, &wqe->xmit_els_rsp,
+ iocbq->vport->fc_myDID);
+ if (iocbq->vport->fc_myDID == Fabric_DID) {
+ bf_set(wqe_els_did,
+ &wqe->xmit_els_rsp.wqe_dest, 0);
+ }
+ }
+ }
+ bf_set(wqe_ct, &wqe->xmit_els_rsp.wqe_com,
+ ((iocbq->iocb.ulpCt_h << 1) | iocbq->iocb.ulpCt_l));
+ bf_set(wqe_pu, &wqe->xmit_els_rsp.wqe_com, iocbq->iocb.ulpPU);
+ bf_set(wqe_rcvoxid, &wqe->xmit_els_rsp.wqe_com,
+ iocbq->iocb.unsli3.rcvsli3.ox_id);
+ if (!iocbq->iocb.ulpCt_h && iocbq->iocb.ulpCt_l)
+ bf_set(wqe_ctxt_tag, &wqe->xmit_els_rsp.wqe_com,
+ phba->vpi_ids[iocbq->vport->vpi]);
+ bf_set(wqe_dbde, &wqe->xmit_els_rsp.wqe_com, 1);
+ bf_set(wqe_iod, &wqe->xmit_els_rsp.wqe_com, LPFC_WQE_IOD_WRITE);
+ bf_set(wqe_qosd, &wqe->xmit_els_rsp.wqe_com, 1);
+ bf_set(wqe_lenloc, &wqe->xmit_els_rsp.wqe_com,
+ LPFC_WQE_LENLOC_WORD3);
+ bf_set(wqe_ebde_cnt, &wqe->xmit_els_rsp.wqe_com, 0);
+ bf_set(wqe_rsp_temp_rpi, &wqe->xmit_els_rsp,
+ phba->sli4_hba.rpi_ids[ndlp->nlp_rpi]);
+ pcmd = (uint32_t *) (((struct lpfc_dmabuf *)
+ iocbq->context2)->virt);
+ if (phba->fc_topology == LPFC_TOPOLOGY_LOOP) {
+ bf_set(els_rsp64_sp, &wqe->xmit_els_rsp, 1);
+ bf_set(els_rsp64_sid, &wqe->xmit_els_rsp,
+ iocbq->vport->fc_myDID);
+ bf_set(wqe_ct, &wqe->xmit_els_rsp.wqe_com, 1);
+ bf_set(wqe_ctxt_tag, &wqe->xmit_els_rsp.wqe_com,
+ phba->vpi_ids[phba->pport->vpi]);
+ }
+ command_type = OTHER_COMMAND;
+ break;
+ case CMD_CLOSE_XRI_CN:
+ case CMD_ABORT_XRI_CN:
+ case CMD_ABORT_XRI_CX:
+ /* words 0-2 memcpy should be 0 rserved */
+ /* port will send abts */
+ abrt_iotag = iocbq->iocb.un.acxri.abortContextTag;
+ if (abrt_iotag != 0 && abrt_iotag <= phba->sli.last_iotag) {
+ abrtiocbq = phba->sli.iocbq_lookup[abrt_iotag];
+ fip = abrtiocbq->iocb_flag & LPFC_FIP_ELS_ID_MASK;
+ } else
+ fip = 0;
+
+ if ((iocbq->iocb.ulpCommand == CMD_CLOSE_XRI_CN) || fip)
+ /*
+ * The link is down, or the command was ELS_FIP
+ * so the fw does not need to send abts
+ * on the wire.
+ */
+ bf_set(abort_cmd_ia, &wqe->abort_cmd, 1);
+ else
+ bf_set(abort_cmd_ia, &wqe->abort_cmd, 0);
+ bf_set(abort_cmd_criteria, &wqe->abort_cmd, T_XRI_TAG);
+ /* word5 iocb=CONTEXT_TAG|IO_TAG wqe=reserved */
+ wqe->abort_cmd.rsrvd5 = 0;
+ bf_set(wqe_ct, &wqe->abort_cmd.wqe_com,
+ ((iocbq->iocb.ulpCt_h << 1) | iocbq->iocb.ulpCt_l));
+ abort_tag = iocbq->iocb.un.acxri.abortIoTag;
+ /*
+ * The abort handler will send us CMD_ABORT_XRI_CN or
+ * CMD_CLOSE_XRI_CN and the fw only accepts CMD_ABORT_XRI_CX
+ */
+ bf_set(wqe_cmnd, &wqe->abort_cmd.wqe_com, CMD_ABORT_XRI_CX);
+ bf_set(wqe_qosd, &wqe->abort_cmd.wqe_com, 1);
+ bf_set(wqe_lenloc, &wqe->abort_cmd.wqe_com,
+ LPFC_WQE_LENLOC_NONE);
+ cmnd = CMD_ABORT_XRI_CX;
+ command_type = OTHER_COMMAND;
+ xritag = 0;
+ break;
+ case CMD_XMIT_BLS_RSP64_CX:
+ ndlp = (struct lpfc_nodelist *)iocbq->context1;
+ /* As BLS ABTS RSP WQE is very different from other WQEs,
+ * we re-construct this WQE here based on information in
+ * iocbq from scratch.
+ */
+ memset(wqe, 0, sizeof(union lpfc_wqe));
+ /* OX_ID is invariable to who sent ABTS to CT exchange */
+ bf_set(xmit_bls_rsp64_oxid, &wqe->xmit_bls_rsp,
+ bf_get(lpfc_abts_oxid, &iocbq->iocb.un.bls_rsp));
+ if (bf_get(lpfc_abts_orig, &iocbq->iocb.un.bls_rsp) ==
+ LPFC_ABTS_UNSOL_INT) {
+ /* ABTS sent by initiator to CT exchange, the
+ * RX_ID field will be filled with the newly
+ * allocated responder XRI.
+ */
+ bf_set(xmit_bls_rsp64_rxid, &wqe->xmit_bls_rsp,
+ iocbq->sli4_xritag);
+ } else {
+ /* ABTS sent by responder to CT exchange, the
+ * RX_ID field will be filled with the responder
+ * RX_ID from ABTS.
+ */
+ bf_set(xmit_bls_rsp64_rxid, &wqe->xmit_bls_rsp,
+ bf_get(lpfc_abts_rxid, &iocbq->iocb.un.bls_rsp));
+ }
+ bf_set(xmit_bls_rsp64_seqcnthi, &wqe->xmit_bls_rsp, 0xffff);
+ bf_set(wqe_xmit_bls_pt, &wqe->xmit_bls_rsp.wqe_dest, 0x1);
+
+ /* Use CT=VPI */
+ bf_set(wqe_els_did, &wqe->xmit_bls_rsp.wqe_dest,
+ ndlp->nlp_DID);
+ bf_set(xmit_bls_rsp64_temprpi, &wqe->xmit_bls_rsp,
+ iocbq->iocb.ulpContext);
+ bf_set(wqe_ct, &wqe->xmit_bls_rsp.wqe_com, 1);
+ bf_set(wqe_ctxt_tag, &wqe->xmit_bls_rsp.wqe_com,
+ phba->vpi_ids[phba->pport->vpi]);
+ bf_set(wqe_qosd, &wqe->xmit_bls_rsp.wqe_com, 1);
+ bf_set(wqe_lenloc, &wqe->xmit_bls_rsp.wqe_com,
+ LPFC_WQE_LENLOC_NONE);
+ /* Overwrite the pre-set comnd type with OTHER_COMMAND */
+ command_type = OTHER_COMMAND;
+ if (iocbq->iocb.un.xseq64.w5.hcsw.Rctl == FC_RCTL_BA_RJT) {
+ bf_set(xmit_bls_rsp64_rjt_vspec, &wqe->xmit_bls_rsp,
+ bf_get(lpfc_vndr_code, &iocbq->iocb.un.bls_rsp));
+ bf_set(xmit_bls_rsp64_rjt_expc, &wqe->xmit_bls_rsp,
+ bf_get(lpfc_rsn_expln, &iocbq->iocb.un.bls_rsp));
+ bf_set(xmit_bls_rsp64_rjt_rsnc, &wqe->xmit_bls_rsp,
+ bf_get(lpfc_rsn_code, &iocbq->iocb.un.bls_rsp));
+ }
+
+ break;
+ case CMD_XRI_ABORTED_CX:
+ case CMD_CREATE_XRI_CR: /* Do we expect to use this? */
+ case CMD_IOCB_FCP_IBIDIR64_CR: /* bidirectional xfer */
+ case CMD_FCP_TSEND64_CX: /* Target mode send xfer-ready */
+ case CMD_FCP_TRSP64_CX: /* Target mode rcv */
+ case CMD_FCP_AUTO_TRSP_CX: /* Auto target rsp */
+ default:
+ lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
+ "2014 Invalid command 0x%x\n",
+ iocbq->iocb.ulpCommand);
+ return IOCB_ERROR;
+ break;
+ }
+
+ if (iocbq->iocb_flag & LPFC_IO_DIF_PASS)
+ bf_set(wqe_dif, &wqe->generic.wqe_com, LPFC_WQE_DIF_PASSTHRU);
+ else if (iocbq->iocb_flag & LPFC_IO_DIF_STRIP)
+ bf_set(wqe_dif, &wqe->generic.wqe_com, LPFC_WQE_DIF_STRIP);
+ else if (iocbq->iocb_flag & LPFC_IO_DIF_INSERT)
+ bf_set(wqe_dif, &wqe->generic.wqe_com, LPFC_WQE_DIF_INSERT);
+ iocbq->iocb_flag &= ~(LPFC_IO_DIF_PASS | LPFC_IO_DIF_STRIP |
+ LPFC_IO_DIF_INSERT);
+ bf_set(wqe_xri_tag, &wqe->generic.wqe_com, xritag);
+ bf_set(wqe_reqtag, &wqe->generic.wqe_com, iocbq->iotag);
+ wqe->generic.wqe_com.abort_tag = abort_tag;
+ bf_set(wqe_cmd_type, &wqe->generic.wqe_com, command_type);
+ bf_set(wqe_cmnd, &wqe->generic.wqe_com, cmnd);
+ bf_set(wqe_class, &wqe->generic.wqe_com, iocbq->iocb.ulpClass);
+ bf_set(wqe_cqid, &wqe->generic.wqe_com, LPFC_WQE_CQ_ID_DEFAULT);
+ return 0;
+}
+
+/**
+ * __lpfc_sli_issue_iocb_s4 - SLI4 device lockless ver of lpfc_sli_issue_iocb
+ * @phba: Pointer to HBA context object.
+ * @ring_number: SLI ring number to issue iocb on.
+ * @piocb: Pointer to command iocb.
+ * @flag: Flag indicating if this command can be put into txq.
+ *
+ * __lpfc_sli_issue_iocb_s4 is used by other functions in the driver to issue
+ * an iocb command to an HBA with SLI-4 interface spec.
+ *
+ * This function is called with hbalock held. The function will return success
+ * after it successfully submit the iocb to firmware or after adding to the
+ * txq.
+ **/
+static int
+__lpfc_sli_issue_iocb_s4(struct lpfc_hba *phba, uint32_t ring_number,
+ struct lpfc_iocbq *piocb, uint32_t flag)
+{
+ struct lpfc_sglq *sglq;
+ union lpfc_wqe wqe;
+ struct lpfc_queue *wq;
+ struct lpfc_sli_ring *pring = &phba->sli.ring[ring_number];
+
+ if (piocb->sli4_xritag == NO_XRI) {
+ if (piocb->iocb.ulpCommand == CMD_ABORT_XRI_CN ||
+ piocb->iocb.ulpCommand == CMD_CLOSE_XRI_CN)
+ sglq = NULL;
+ else {
+ if (!list_empty(&pring->txq)) {
+ if (!(flag & SLI_IOCB_RET_IOCB)) {
+ __lpfc_sli_ringtx_put(phba,
+ pring, piocb);
+ return IOCB_SUCCESS;
+ } else {
+ return IOCB_BUSY;
+ }
+ } else {
+ sglq = __lpfc_sli_get_sglq(phba, piocb);
+ if (!sglq) {
+ if (!(flag & SLI_IOCB_RET_IOCB)) {
+ __lpfc_sli_ringtx_put(phba,
+ pring,
+ piocb);
+ return IOCB_SUCCESS;
+ } else
+ return IOCB_BUSY;
+ }
+ }
+ }
+ } else if (piocb->iocb_flag & LPFC_IO_FCP) {
+ /* These IO's already have an XRI and a mapped sgl. */
+ sglq = NULL;
+ } else {
+ /*
+ * This is a continuation of a commandi,(CX) so this
+ * sglq is on the active list
+ */
+ sglq = __lpfc_get_active_sglq(phba, piocb->sli4_lxritag);
+ if (!sglq)
+ return IOCB_ERROR;
+ }
+
+ if (sglq) {
+ piocb->sli4_lxritag = sglq->sli4_lxritag;
+ piocb->sli4_xritag = sglq->sli4_xritag;
+ if (NO_XRI == lpfc_sli4_bpl2sgl(phba, piocb, sglq))
+ return IOCB_ERROR;
+ }
+
+ if (lpfc_sli4_iocb2wqe(phba, piocb, &wqe))
+ return IOCB_ERROR;
+
+ if ((piocb->iocb_flag & LPFC_IO_FCP) ||
+ (piocb->iocb_flag & LPFC_USE_FCPWQIDX)) {
+ if (!phba->cfg_fof || (!(piocb->iocb_flag & LPFC_IO_OAS))) {
+ wq = phba->sli4_hba.fcp_wq[piocb->fcp_wqidx];
+ } else {
+ wq = phba->sli4_hba.oas_wq;
+ }
+ if (lpfc_sli4_wq_put(wq, &wqe))
+ return IOCB_ERROR;
+ } else {
+ if (unlikely(!phba->sli4_hba.els_wq))
+ return IOCB_ERROR;
+ if (lpfc_sli4_wq_put(phba->sli4_hba.els_wq, &wqe))
+ return IOCB_ERROR;
+ }
+ lpfc_sli_ringtxcmpl_put(phba, pring, piocb);
+
+ return 0;
+}
+
+/**
+ * __lpfc_sli_issue_iocb - Wrapper func of lockless version for issuing iocb
+ *
+ * This routine wraps the actual lockless version for issusing IOCB function
+ * pointer from the lpfc_hba struct.
+ *
+ * Return codes:
+ * IOCB_ERROR - Error
+ * IOCB_SUCCESS - Success
+ * IOCB_BUSY - Busy
+ **/
+int
+__lpfc_sli_issue_iocb(struct lpfc_hba *phba, uint32_t ring_number,
+ struct lpfc_iocbq *piocb, uint32_t flag)
+{
+ return phba->__lpfc_sli_issue_iocb(phba, ring_number, piocb, flag);
+}
+
+/**
+ * lpfc_sli_api_table_setup - Set up sli api function jump table
+ * @phba: The hba struct for which this call is being executed.
+ * @dev_grp: The HBA PCI-Device group number.
+ *
+ * This routine sets up the SLI interface API function jump table in @phba
+ * struct.
+ * Returns: 0 - success, -ENODEV - failure.
+ **/
+int
+lpfc_sli_api_table_setup(struct lpfc_hba *phba, uint8_t dev_grp)
+{
+
+ switch (dev_grp) {
+ case LPFC_PCI_DEV_LP:
+ phba->__lpfc_sli_issue_iocb = __lpfc_sli_issue_iocb_s3;
+ phba->__lpfc_sli_release_iocbq = __lpfc_sli_release_iocbq_s3;
+ break;
+ case LPFC_PCI_DEV_OC:
+ phba->__lpfc_sli_issue_iocb = __lpfc_sli_issue_iocb_s4;
+ phba->__lpfc_sli_release_iocbq = __lpfc_sli_release_iocbq_s4;
+ break;
+ default:
+ lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+ "1419 Invalid HBA PCI-device group: 0x%x\n",
+ dev_grp);
+ return -ENODEV;
+ break;
+ }
+ phba->lpfc_get_iocb_from_iocbq = lpfc_get_iocb_from_iocbq;
+ return 0;
+}
+
+int
+lpfc_sli_calc_ring(struct lpfc_hba *phba, uint32_t ring_number,
+ struct lpfc_iocbq *piocb)
+{
+ uint32_t idx;
+
+ if (phba->sli_rev == LPFC_SLI_REV4) {
+ if (piocb->iocb_flag & (LPFC_IO_FCP | LPFC_USE_FCPWQIDX)) {
+ /*
+ * fcp_wqidx should already be setup based on what
+ * completion queue we want to use.
+ */
+ if (!(phba->cfg_fof) ||
+ (!(piocb->iocb_flag & LPFC_IO_FOF))) {
+ if (unlikely(!phba->sli4_hba.fcp_wq))
+ return LPFC_HBA_ERROR;
+ idx = lpfc_sli4_scmd_to_wqidx_distr(phba);
+ piocb->fcp_wqidx = idx;
+ ring_number = MAX_SLI3_CONFIGURED_RINGS + idx;
+ } else {
+ if (unlikely(!phba->sli4_hba.oas_wq))
+ return LPFC_HBA_ERROR;
+ idx = 0;
+ piocb->fcp_wqidx = idx;
+ ring_number = LPFC_FCP_OAS_RING;
+ }
+ }
+ }
+ return ring_number;
+}
+
+/**
+ * lpfc_sli_issue_iocb - Wrapper function for __lpfc_sli_issue_iocb
+ * @phba: Pointer to HBA context object.
+ * @pring: Pointer to driver SLI ring object.
+ * @piocb: Pointer to command iocb.
+ * @flag: Flag indicating if this command can be put into txq.
+ *
+ * lpfc_sli_issue_iocb is a wrapper around __lpfc_sli_issue_iocb
+ * function. This function gets the hbalock and calls
+ * __lpfc_sli_issue_iocb function and will return the error returned
+ * by __lpfc_sli_issue_iocb function. This wrapper is used by
+ * functions which do not hold hbalock.
+ **/
+int
+lpfc_sli_issue_iocb(struct lpfc_hba *phba, uint32_t ring_number,
+ struct lpfc_iocbq *piocb, uint32_t flag)
+{
+ struct lpfc_fcp_eq_hdl *fcp_eq_hdl;
+ struct lpfc_sli_ring *pring;
+ struct lpfc_queue *fpeq;
+ struct lpfc_eqe *eqe;
+ unsigned long iflags;
+ int rc, idx;
+
+ if (phba->sli_rev == LPFC_SLI_REV4) {
+ ring_number = lpfc_sli_calc_ring(phba, ring_number, piocb);
+ if (unlikely(ring_number == LPFC_HBA_ERROR))
+ return IOCB_ERROR;
+ idx = piocb->fcp_wqidx;
+
+ pring = &phba->sli.ring[ring_number];
+ spin_lock_irqsave(&pring->ring_lock, iflags);
+ rc = __lpfc_sli_issue_iocb(phba, ring_number, piocb, flag);
+ spin_unlock_irqrestore(&pring->ring_lock, iflags);
+
+ if (lpfc_fcp_look_ahead && (piocb->iocb_flag & LPFC_IO_FCP)) {
+ fcp_eq_hdl = &phba->sli4_hba.fcp_eq_hdl[idx];
+
+ if (atomic_dec_and_test(&fcp_eq_hdl->
+ fcp_eq_in_use)) {
+
+ /* Get associated EQ with this index */
+ fpeq = phba->sli4_hba.hba_eq[idx];
+
+ /* Turn off interrupts from this EQ */
+ lpfc_sli4_eq_clr_intr(fpeq);
+
+ /*
+ * Process all the events on FCP EQ
+ */
+ while ((eqe = lpfc_sli4_eq_get(fpeq))) {
+ lpfc_sli4_hba_handle_eqe(phba,
+ eqe, idx);
+ fpeq->EQ_processed++;
+ }
+
+ /* Always clear and re-arm the EQ */
+ lpfc_sli4_eq_release(fpeq,
+ LPFC_QUEUE_REARM);
+ }
+ atomic_inc(&fcp_eq_hdl->fcp_eq_in_use);
+ }
+ } else {
+ /* For now, SLI2/3 will still use hbalock */
+ spin_lock_irqsave(&phba->hbalock, iflags);
+ rc = __lpfc_sli_issue_iocb(phba, ring_number, piocb, flag);
+ spin_unlock_irqrestore(&phba->hbalock, iflags);
+ }
+ return rc;
+}
+
+/**
+ * lpfc_extra_ring_setup - Extra ring setup function
+ * @phba: Pointer to HBA context object.
+ *
+ * This function is called while driver attaches with the
+ * HBA to setup the extra ring. The extra ring is used
+ * only when driver needs to support target mode functionality
+ * or IP over FC functionalities.
+ *
+ * This function is called with no lock held.
+ **/
+static int
+lpfc_extra_ring_setup( struct lpfc_hba *phba)
+{
+ struct lpfc_sli *psli;
+ struct lpfc_sli_ring *pring;
+
+ psli = &phba->sli;
+
+ /* Adjust cmd/rsp ring iocb entries more evenly */
+
+ /* Take some away from the FCP ring */
+ pring = &psli->ring[psli->fcp_ring];
+ pring->sli.sli3.numCiocb -= SLI2_IOCB_CMD_R1XTRA_ENTRIES;
+ pring->sli.sli3.numRiocb -= SLI2_IOCB_RSP_R1XTRA_ENTRIES;
+ pring->sli.sli3.numCiocb -= SLI2_IOCB_CMD_R3XTRA_ENTRIES;
+ pring->sli.sli3.numRiocb -= SLI2_IOCB_RSP_R3XTRA_ENTRIES;
+
+ /* and give them to the extra ring */
+ pring = &psli->ring[psli->extra_ring];
+
+ pring->sli.sli3.numCiocb += SLI2_IOCB_CMD_R1XTRA_ENTRIES;
+ pring->sli.sli3.numRiocb += SLI2_IOCB_RSP_R1XTRA_ENTRIES;
+ pring->sli.sli3.numCiocb += SLI2_IOCB_CMD_R3XTRA_ENTRIES;
+ pring->sli.sli3.numRiocb += SLI2_IOCB_RSP_R3XTRA_ENTRIES;
+
+ /* Setup default profile for this ring */
+ pring->iotag_max = 4096;
+ pring->num_mask = 1;
+ pring->prt[0].profile = 0; /* Mask 0 */
+ pring->prt[0].rctl = phba->cfg_multi_ring_rctl;
+ pring->prt[0].type = phba->cfg_multi_ring_type;
+ pring->prt[0].lpfc_sli_rcv_unsol_event = NULL;
+ return 0;
+}
+
+/* lpfc_sli_abts_err_handler - handle a failed ABTS request from an SLI3 port.
+ * @phba: Pointer to HBA context object.
+ * @iocbq: Pointer to iocb object.
+ *
+ * The async_event handler calls this routine when it receives
+ * an ASYNC_STATUS_CN event from the port. The port generates
+ * this event when an Abort Sequence request to an rport fails
+ * twice in succession. The abort could be originated by the
+ * driver or by the port. The ABTS could have been for an ELS
+ * or FCP IO. The port only generates this event when an ABTS
+ * fails to complete after one retry.
+ */
+static void
+lpfc_sli_abts_err_handler(struct lpfc_hba *phba,
+ struct lpfc_iocbq *iocbq)
+{
+ struct lpfc_nodelist *ndlp = NULL;
+ uint16_t rpi = 0, vpi = 0;
+ struct lpfc_vport *vport = NULL;
+
+ /* The rpi in the ulpContext is vport-sensitive. */
+ vpi = iocbq->iocb.un.asyncstat.sub_ctxt_tag;
+ rpi = iocbq->iocb.ulpContext;
+
+ lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
+ "3092 Port generated ABTS async event "
+ "on vpi %d rpi %d status 0x%x\n",
+ vpi, rpi, iocbq->iocb.ulpStatus);
+
+ vport = lpfc_find_vport_by_vpid(phba, vpi);
+ if (!vport)
+ goto err_exit;
+ ndlp = lpfc_findnode_rpi(vport, rpi);
+ if (!ndlp || !NLP_CHK_NODE_ACT(ndlp))
+ goto err_exit;
+
+ if (iocbq->iocb.ulpStatus == IOSTAT_LOCAL_REJECT)
+ lpfc_sli_abts_recover_port(vport, ndlp);
+ return;
+
+ err_exit:
+ lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
+ "3095 Event Context not found, no "
+ "action on vpi %d rpi %d status 0x%x, reason 0x%x\n",
+ iocbq->iocb.ulpContext, iocbq->iocb.ulpStatus,
+ vpi, rpi);
+}
+
+/* lpfc_sli4_abts_err_handler - handle a failed ABTS request from an SLI4 port.
+ * @phba: pointer to HBA context object.
+ * @ndlp: nodelist pointer for the impacted rport.
+ * @axri: pointer to the wcqe containing the failed exchange.
+ *
+ * The driver calls this routine when it receives an ABORT_XRI_FCP CQE from the
+ * port. The port generates this event when an abort exchange request to an
+ * rport fails twice in succession with no reply. The abort could be originated
+ * by the driver or by the port. The ABTS could have been for an ELS or FCP IO.
+ */
+void
+lpfc_sli4_abts_err_handler(struct lpfc_hba *phba,
+ struct lpfc_nodelist *ndlp,
+ struct sli4_wcqe_xri_aborted *axri)
+{
+ struct lpfc_vport *vport;
+ uint32_t ext_status = 0;
+
+ if (!ndlp || !NLP_CHK_NODE_ACT(ndlp)) {
+ lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
+ "3115 Node Context not found, driver "
+ "ignoring abts err event\n");
+ return;
+ }
+
+ vport = ndlp->vport;
+ lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
+ "3116 Port generated FCP XRI ABORT event on "
+ "vpi %d rpi %d xri x%x status 0x%x parameter x%x\n",
+ ndlp->vport->vpi, phba->sli4_hba.rpi_ids[ndlp->nlp_rpi],
+ bf_get(lpfc_wcqe_xa_xri, axri),
+ bf_get(lpfc_wcqe_xa_status, axri),
+ axri->parameter);
+
+ /*
+ * Catch the ABTS protocol failure case. Older OCe FW releases returned
+ * LOCAL_REJECT and 0 for a failed ABTS exchange and later OCe and
+ * LPe FW releases returned LOCAL_REJECT and SEQUENCE_TIMEOUT.
+ */
+ ext_status = axri->parameter & IOERR_PARAM_MASK;
+ if ((bf_get(lpfc_wcqe_xa_status, axri) == IOSTAT_LOCAL_REJECT) &&
+ ((ext_status == IOERR_SEQUENCE_TIMEOUT) || (ext_status == 0)))
+ lpfc_sli_abts_recover_port(vport, ndlp);
+}
+
+/**
+ * lpfc_sli_async_event_handler - ASYNC iocb handler function
+ * @phba: Pointer to HBA context object.
+ * @pring: Pointer to driver SLI ring object.
+ * @iocbq: Pointer to iocb object.
+ *
+ * This function is called by the slow ring event handler
+ * function when there is an ASYNC event iocb in the ring.
+ * This function is called with no lock held.
+ * Currently this function handles only temperature related
+ * ASYNC events. The function decodes the temperature sensor
+ * event message and posts events for the management applications.
+ **/
+static void
+lpfc_sli_async_event_handler(struct lpfc_hba * phba,
+ struct lpfc_sli_ring * pring, struct lpfc_iocbq * iocbq)
+{
+ IOCB_t *icmd;
+ uint16_t evt_code;
+ struct temp_event temp_event_data;
+ struct Scsi_Host *shost;
+ uint32_t *iocb_w;
+
+ icmd = &iocbq->iocb;
+ evt_code = icmd->un.asyncstat.evt_code;
+
+ switch (evt_code) {
+ case ASYNC_TEMP_WARN:
+ case ASYNC_TEMP_SAFE:
+ temp_event_data.data = (uint32_t) icmd->ulpContext;
+ temp_event_data.event_type = FC_REG_TEMPERATURE_EVENT;
+ if (evt_code == ASYNC_TEMP_WARN) {
+ temp_event_data.event_code = LPFC_THRESHOLD_TEMP;
+ lpfc_printf_log(phba, KERN_ERR, LOG_TEMP,
+ "0347 Adapter is very hot, please take "
+ "corrective action. temperature : %d Celsius\n",
+ (uint32_t) icmd->ulpContext);
+ } else {
+ temp_event_data.event_code = LPFC_NORMAL_TEMP;
+ lpfc_printf_log(phba, KERN_ERR, LOG_TEMP,
+ "0340 Adapter temperature is OK now. "
+ "temperature : %d Celsius\n",
+ (uint32_t) icmd->ulpContext);
+ }
+
+ /* Send temperature change event to applications */
+ shost = lpfc_shost_from_vport(phba->pport);
+ fc_host_post_vendor_event(shost, fc_get_event_number(),
+ sizeof(temp_event_data), (char *) &temp_event_data,
+ LPFC_NL_VENDOR_ID);
+ break;
+ case ASYNC_STATUS_CN:
+ lpfc_sli_abts_err_handler(phba, iocbq);
+ break;
+ default:
+ iocb_w = (uint32_t *) icmd;
+ lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
+ "0346 Ring %d handler: unexpected ASYNC_STATUS"
+ " evt_code 0x%x\n"
+ "W0 0x%08x W1 0x%08x W2 0x%08x W3 0x%08x\n"
+ "W4 0x%08x W5 0x%08x W6 0x%08x W7 0x%08x\n"
+ "W8 0x%08x W9 0x%08x W10 0x%08x W11 0x%08x\n"
+ "W12 0x%08x W13 0x%08x W14 0x%08x W15 0x%08x\n",
+ pring->ringno, icmd->un.asyncstat.evt_code,
+ iocb_w[0], iocb_w[1], iocb_w[2], iocb_w[3],
+ iocb_w[4], iocb_w[5], iocb_w[6], iocb_w[7],
+ iocb_w[8], iocb_w[9], iocb_w[10], iocb_w[11],
+ iocb_w[12], iocb_w[13], iocb_w[14], iocb_w[15]);
+
+ break;
+ }
+}
+
+
+/**
+ * lpfc_sli_setup - SLI ring setup function
+ * @phba: Pointer to HBA context object.
+ *
+ * lpfc_sli_setup sets up rings of the SLI interface with
+ * number of iocbs per ring and iotags. This function is
+ * called while driver attach to the HBA and before the
+ * interrupts are enabled. So there is no need for locking.
+ *
+ * This function always returns 0.
+ **/
+int
+lpfc_sli_setup(struct lpfc_hba *phba)
+{
+ int i, totiocbsize = 0;
+ struct lpfc_sli *psli = &phba->sli;
+ struct lpfc_sli_ring *pring;
+
+ psli->num_rings = MAX_SLI3_CONFIGURED_RINGS;
+ if (phba->sli_rev == LPFC_SLI_REV4)
+ psli->num_rings += phba->cfg_fcp_io_channel;
+ psli->sli_flag = 0;
+ psli->fcp_ring = LPFC_FCP_RING;
+ psli->next_ring = LPFC_FCP_NEXT_RING;
+ psli->extra_ring = LPFC_EXTRA_RING;
+
+ psli->iocbq_lookup = NULL;
+ psli->iocbq_lookup_len = 0;
+ psli->last_iotag = 0;
+
+ for (i = 0; i < psli->num_rings; i++) {
+ pring = &psli->ring[i];
+ switch (i) {
+ case LPFC_FCP_RING: /* ring 0 - FCP */
+ /* numCiocb and numRiocb are used in config_port */
+ pring->sli.sli3.numCiocb = SLI2_IOCB_CMD_R0_ENTRIES;
+ pring->sli.sli3.numRiocb = SLI2_IOCB_RSP_R0_ENTRIES;
+ pring->sli.sli3.numCiocb +=
+ SLI2_IOCB_CMD_R1XTRA_ENTRIES;
+ pring->sli.sli3.numRiocb +=
+ SLI2_IOCB_RSP_R1XTRA_ENTRIES;
+ pring->sli.sli3.numCiocb +=
+ SLI2_IOCB_CMD_R3XTRA_ENTRIES;
+ pring->sli.sli3.numRiocb +=
+ SLI2_IOCB_RSP_R3XTRA_ENTRIES;
+ pring->sli.sli3.sizeCiocb = (phba->sli_rev == 3) ?
+ SLI3_IOCB_CMD_SIZE :
+ SLI2_IOCB_CMD_SIZE;
+ pring->sli.sli3.sizeRiocb = (phba->sli_rev == 3) ?
+ SLI3_IOCB_RSP_SIZE :
+ SLI2_IOCB_RSP_SIZE;
+ pring->iotag_ctr = 0;
+ pring->iotag_max =
+ (phba->cfg_hba_queue_depth * 2);
+ pring->fast_iotag = pring->iotag_max;
+ pring->num_mask = 0;
+ break;
+ case LPFC_EXTRA_RING: /* ring 1 - EXTRA */
+ /* numCiocb and numRiocb are used in config_port */
+ pring->sli.sli3.numCiocb = SLI2_IOCB_CMD_R1_ENTRIES;
+ pring->sli.sli3.numRiocb = SLI2_IOCB_RSP_R1_ENTRIES;
+ pring->sli.sli3.sizeCiocb = (phba->sli_rev == 3) ?
+ SLI3_IOCB_CMD_SIZE :
+ SLI2_IOCB_CMD_SIZE;
+ pring->sli.sli3.sizeRiocb = (phba->sli_rev == 3) ?
+ SLI3_IOCB_RSP_SIZE :
+ SLI2_IOCB_RSP_SIZE;
+ pring->iotag_max = phba->cfg_hba_queue_depth;
+ pring->num_mask = 0;
+ break;
+ case LPFC_ELS_RING: /* ring 2 - ELS / CT */
+ /* numCiocb and numRiocb are used in config_port */
+ pring->sli.sli3.numCiocb = SLI2_IOCB_CMD_R2_ENTRIES;
+ pring->sli.sli3.numRiocb = SLI2_IOCB_RSP_R2_ENTRIES;
+ pring->sli.sli3.sizeCiocb = (phba->sli_rev == 3) ?
+ SLI3_IOCB_CMD_SIZE :
+ SLI2_IOCB_CMD_SIZE;
+ pring->sli.sli3.sizeRiocb = (phba->sli_rev == 3) ?
+ SLI3_IOCB_RSP_SIZE :
+ SLI2_IOCB_RSP_SIZE;
+ pring->fast_iotag = 0;
+ pring->iotag_ctr = 0;
+ pring->iotag_max = 4096;
+ pring->lpfc_sli_rcv_async_status =
+ lpfc_sli_async_event_handler;
+ pring->num_mask = LPFC_MAX_RING_MASK;
+ pring->prt[0].profile = 0; /* Mask 0 */
+ pring->prt[0].rctl = FC_RCTL_ELS_REQ;
+ pring->prt[0].type = FC_TYPE_ELS;
+ pring->prt[0].lpfc_sli_rcv_unsol_event =
+ lpfc_els_unsol_event;
+ pring->prt[1].profile = 0; /* Mask 1 */
+ pring->prt[1].rctl = FC_RCTL_ELS_REP;
+ pring->prt[1].type = FC_TYPE_ELS;
+ pring->prt[1].lpfc_sli_rcv_unsol_event =
+ lpfc_els_unsol_event;
+ pring->prt[2].profile = 0; /* Mask 2 */
+ /* NameServer Inquiry */
+ pring->prt[2].rctl = FC_RCTL_DD_UNSOL_CTL;
+ /* NameServer */
+ pring->prt[2].type = FC_TYPE_CT;
+ pring->prt[2].lpfc_sli_rcv_unsol_event =
+ lpfc_ct_unsol_event;
+ pring->prt[3].profile = 0; /* Mask 3 */
+ /* NameServer response */
+ pring->prt[3].rctl = FC_RCTL_DD_SOL_CTL;
+ /* NameServer */
+ pring->prt[3].type = FC_TYPE_CT;
+ pring->prt[3].lpfc_sli_rcv_unsol_event =
+ lpfc_ct_unsol_event;
+ break;
+ }
+ totiocbsize += (pring->sli.sli3.numCiocb *
+ pring->sli.sli3.sizeCiocb) +
+ (pring->sli.sli3.numRiocb * pring->sli.sli3.sizeRiocb);
+ }
+ if (totiocbsize > MAX_SLIM_IOCB_SIZE) {
+ /* Too many cmd / rsp ring entries in SLI2 SLIM */
+ printk(KERN_ERR "%d:0462 Too many cmd / rsp ring entries in "
+ "SLI2 SLIM Data: x%x x%lx\n",
+ phba->brd_no, totiocbsize,
+ (unsigned long) MAX_SLIM_IOCB_SIZE);
+ }
+ if (phba->cfg_multi_ring_support == 2)
+ lpfc_extra_ring_setup(phba);
+
+ return 0;
+}
+
+/**
+ * lpfc_sli_queue_setup - Queue initialization function
+ * @phba: Pointer to HBA context object.
+ *
+ * lpfc_sli_queue_setup sets up mailbox queues and iocb queues for each
+ * ring. This function also initializes ring indices of each ring.
+ * This function is called during the initialization of the SLI
+ * interface of an HBA.
+ * This function is called with no lock held and always returns
+ * 1.
+ **/
+int
+lpfc_sli_queue_setup(struct lpfc_hba *phba)
+{
+ struct lpfc_sli *psli;
+ struct lpfc_sli_ring *pring;
+ int i;
+
+ psli = &phba->sli;
+ spin_lock_irq(&phba->hbalock);
+ INIT_LIST_HEAD(&psli->mboxq);
+ INIT_LIST_HEAD(&psli->mboxq_cmpl);
+ /* Initialize list headers for txq and txcmplq as double linked lists */
+ for (i = 0; i < psli->num_rings; i++) {
+ pring = &psli->ring[i];
+ pring->ringno = i;
+ pring->sli.sli3.next_cmdidx = 0;
+ pring->sli.sli3.local_getidx = 0;
+ pring->sli.sli3.cmdidx = 0;
+ pring->flag = 0;
+ INIT_LIST_HEAD(&pring->txq);
+ INIT_LIST_HEAD(&pring->txcmplq);
+ INIT_LIST_HEAD(&pring->iocb_continueq);
+ INIT_LIST_HEAD(&pring->iocb_continue_saveq);
+ INIT_LIST_HEAD(&pring->postbufq);
+ spin_lock_init(&pring->ring_lock);
+ }
+ spin_unlock_irq(&phba->hbalock);
+ return 1;
+}
+
+/**
+ * lpfc_sli_mbox_sys_flush - Flush mailbox command sub-system
+ * @phba: Pointer to HBA context object.
+ *
+ * This routine flushes the mailbox command subsystem. It will unconditionally
+ * flush all the mailbox commands in the three possible stages in the mailbox
+ * command sub-system: pending mailbox command queue; the outstanding mailbox
+ * command; and completed mailbox command queue. It is caller's responsibility
+ * to make sure that the driver is in the proper state to flush the mailbox
+ * command sub-system. Namely, the posting of mailbox commands into the
+ * pending mailbox command queue from the various clients must be stopped;
+ * either the HBA is in a state that it will never works on the outstanding
+ * mailbox command (such as in EEH or ERATT conditions) or the outstanding
+ * mailbox command has been completed.
+ **/
+static void
+lpfc_sli_mbox_sys_flush(struct lpfc_hba *phba)
+{
+ LIST_HEAD(completions);
+ struct lpfc_sli *psli = &phba->sli;
+ LPFC_MBOXQ_t *pmb;
+ unsigned long iflag;
+
+ /* Flush all the mailbox commands in the mbox system */
+ spin_lock_irqsave(&phba->hbalock, iflag);
+ /* The pending mailbox command queue */
+ list_splice_init(&phba->sli.mboxq, &completions);
+ /* The outstanding active mailbox command */
+ if (psli->mbox_active) {
+ list_add_tail(&psli->mbox_active->list, &completions);
+ psli->mbox_active = NULL;
+ psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
+ }
+ /* The completed mailbox command queue */
+ list_splice_init(&phba->sli.mboxq_cmpl, &completions);
+ spin_unlock_irqrestore(&phba->hbalock, iflag);
+
+ /* Return all flushed mailbox commands with MBX_NOT_FINISHED status */
+ while (!list_empty(&completions)) {
+ list_remove_head(&completions, pmb, LPFC_MBOXQ_t, list);
+ pmb->u.mb.mbxStatus = MBX_NOT_FINISHED;
+ if (pmb->mbox_cmpl)
+ pmb->mbox_cmpl(phba, pmb);
+ }
+}
+
+/**
+ * lpfc_sli_host_down - Vport cleanup function
+ * @vport: Pointer to virtual port object.
+ *
+ * lpfc_sli_host_down is called to clean up the resources
+ * associated with a vport before destroying virtual
+ * port data structures.
+ * This function does following operations:
+ * - Free discovery resources associated with this virtual
+ * port.
+ * - Free iocbs associated with this virtual port in
+ * the txq.
+ * - Send abort for all iocb commands associated with this
+ * vport in txcmplq.
+ *
+ * This function is called with no lock held and always returns 1.
+ **/
+int
+lpfc_sli_host_down(struct lpfc_vport *vport)
+{
+ LIST_HEAD(completions);
+ struct lpfc_hba *phba = vport->phba;
+ struct lpfc_sli *psli = &phba->sli;
+ struct lpfc_sli_ring *pring;
+ struct lpfc_iocbq *iocb, *next_iocb;
+ int i;
+ unsigned long flags = 0;
+ uint16_t prev_pring_flag;
+
+ lpfc_cleanup_discovery_resources(vport);
+
+ spin_lock_irqsave(&phba->hbalock, flags);
+ for (i = 0; i < psli->num_rings; i++) {
+ pring = &psli->ring[i];
+ prev_pring_flag = pring->flag;
+ /* Only slow rings */
+ if (pring->ringno == LPFC_ELS_RING) {
+ pring->flag |= LPFC_DEFERRED_RING_EVENT;
+ /* Set the lpfc data pending flag */
+ set_bit(LPFC_DATA_READY, &phba->data_flags);
+ }
+ /*
+ * Error everything on the txq since these iocbs have not been
+ * given to the FW yet.
+ */
+ list_for_each_entry_safe(iocb, next_iocb, &pring->txq, list) {
+ if (iocb->vport != vport)
+ continue;
+ list_move_tail(&iocb->list, &completions);
+ }
+
+ /* Next issue ABTS for everything on the txcmplq */
+ list_for_each_entry_safe(iocb, next_iocb, &pring->txcmplq,
+ list) {
+ if (iocb->vport != vport)
+ continue;
+ lpfc_sli_issue_abort_iotag(phba, pring, iocb);
+ }
+
+ pring->flag = prev_pring_flag;
+ }
+
+ spin_unlock_irqrestore(&phba->hbalock, flags);
+
+ /* Cancel all the IOCBs from the completions list */
+ lpfc_sli_cancel_iocbs(phba, &completions, IOSTAT_LOCAL_REJECT,
+ IOERR_SLI_DOWN);
+ return 1;
+}
+
+/**
+ * lpfc_sli_hba_down - Resource cleanup function for the HBA
+ * @phba: Pointer to HBA context object.
+ *
+ * This function cleans up all iocb, buffers, mailbox commands
+ * while shutting down the HBA. This function is called with no
+ * lock held and always returns 1.
+ * This function does the following to cleanup driver resources:
+ * - Free discovery resources for each virtual port
+ * - Cleanup any pending fabric iocbs
+ * - Iterate through the iocb txq and free each entry
+ * in the list.
+ * - Free up any buffer posted to the HBA
+ * - Free mailbox commands in the mailbox queue.
+ **/
+int
+lpfc_sli_hba_down(struct lpfc_hba *phba)
+{
+ LIST_HEAD(completions);
+ struct lpfc_sli *psli = &phba->sli;
+ struct lpfc_sli_ring *pring;
+ struct lpfc_dmabuf *buf_ptr;
+ unsigned long flags = 0;
+ int i;
+
+ /* Shutdown the mailbox command sub-system */
+ lpfc_sli_mbox_sys_shutdown(phba, LPFC_MBX_WAIT);
+
+ lpfc_hba_down_prep(phba);
+
+ lpfc_fabric_abort_hba(phba);
+
+ spin_lock_irqsave(&phba->hbalock, flags);
+ for (i = 0; i < psli->num_rings; i++) {
+ pring = &psli->ring[i];
+ /* Only slow rings */
+ if (pring->ringno == LPFC_ELS_RING) {
+ pring->flag |= LPFC_DEFERRED_RING_EVENT;
+ /* Set the lpfc data pending flag */
+ set_bit(LPFC_DATA_READY, &phba->data_flags);
+ }
+
+ /*
+ * Error everything on the txq since these iocbs have not been
+ * given to the FW yet.
+ */
+ list_splice_init(&pring->txq, &completions);
+ }
+ spin_unlock_irqrestore(&phba->hbalock, flags);
+
+ /* Cancel all the IOCBs from the completions list */
+ lpfc_sli_cancel_iocbs(phba, &completions, IOSTAT_LOCAL_REJECT,
+ IOERR_SLI_DOWN);
+
+ spin_lock_irqsave(&phba->hbalock, flags);
+ list_splice_init(&phba->elsbuf, &completions);
+ phba->elsbuf_cnt = 0;
+ phba->elsbuf_prev_cnt = 0;
+ spin_unlock_irqrestore(&phba->hbalock, flags);
+
+ while (!list_empty(&completions)) {
+ list_remove_head(&completions, buf_ptr,
+ struct lpfc_dmabuf, list);
+ lpfc_mbuf_free(phba, buf_ptr->virt, buf_ptr->phys);
+ kfree(buf_ptr);
+ }
+
+ /* Return any active mbox cmds */
+ del_timer_sync(&psli->mbox_tmo);
+
+ spin_lock_irqsave(&phba->pport->work_port_lock, flags);
+ phba->pport->work_port_events &= ~WORKER_MBOX_TMO;
+ spin_unlock_irqrestore(&phba->pport->work_port_lock, flags);
+
+ return 1;
+}
+
+/**
+ * lpfc_sli_pcimem_bcopy - SLI memory copy function
+ * @srcp: Source memory pointer.
+ * @destp: Destination memory pointer.
+ * @cnt: Number of words required to be copied.
+ *
+ * This function is used for copying data between driver memory
+ * and the SLI memory. This function also changes the endianness
+ * of each word if native endianness is different from SLI
+ * endianness. This function can be called with or without
+ * lock.
+ **/
+void
+lpfc_sli_pcimem_bcopy(void *srcp, void *destp, uint32_t cnt)
+{
+ uint32_t *src = srcp;
+ uint32_t *dest = destp;
+ uint32_t ldata;
+ int i;
+
+ for (i = 0; i < (int)cnt; i += sizeof (uint32_t)) {
+ ldata = *src;
+ ldata = le32_to_cpu(ldata);
+ *dest = ldata;
+ src++;
+ dest++;
+ }
+}
+
+
+/**
+ * lpfc_sli_bemem_bcopy - SLI memory copy function
+ * @srcp: Source memory pointer.
+ * @destp: Destination memory pointer.
+ * @cnt: Number of words required to be copied.
+ *
+ * This function is used for copying data between a data structure
+ * with big endian representation to local endianness.
+ * This function can be called with or without lock.
+ **/
+void
+lpfc_sli_bemem_bcopy(void *srcp, void *destp, uint32_t cnt)
+{
+ uint32_t *src = srcp;
+ uint32_t *dest = destp;
+ uint32_t ldata;
+ int i;
+
+ for (i = 0; i < (int)cnt; i += sizeof(uint32_t)) {
+ ldata = *src;
+ ldata = be32_to_cpu(ldata);
+ *dest = ldata;
+ src++;
+ dest++;
+ }
+}
+
+/**
+ * lpfc_sli_ringpostbuf_put - Function to add a buffer to postbufq
+ * @phba: Pointer to HBA context object.
+ * @pring: Pointer to driver SLI ring object.
+ * @mp: Pointer to driver buffer object.
+ *
+ * This function is called with no lock held.
+ * It always return zero after adding the buffer to the postbufq
+ * buffer list.
+ **/
+int
+lpfc_sli_ringpostbuf_put(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
+ struct lpfc_dmabuf *mp)
+{
+ /* Stick struct lpfc_dmabuf at end of postbufq so driver can look it up
+ later */
+ spin_lock_irq(&phba->hbalock);
+ list_add_tail(&mp->list, &pring->postbufq);
+ pring->postbufq_cnt++;
+ spin_unlock_irq(&phba->hbalock);
+ return 0;
+}
+
+/**
+ * lpfc_sli_get_buffer_tag - allocates a tag for a CMD_QUE_XRI64_CX buffer
+ * @phba: Pointer to HBA context object.
+ *
+ * When HBQ is enabled, buffers are searched based on tags. This function
+ * allocates a tag for buffer posted using CMD_QUE_XRI64_CX iocb. The
+ * tag is bit wise or-ed with QUE_BUFTAG_BIT to make sure that the tag
+ * does not conflict with tags of buffer posted for unsolicited events.
+ * The function returns the allocated tag. The function is called with
+ * no locks held.
+ **/
+uint32_t
+lpfc_sli_get_buffer_tag(struct lpfc_hba *phba)
+{
+ spin_lock_irq(&phba->hbalock);
+ phba->buffer_tag_count++;
+ /*
+ * Always set the QUE_BUFTAG_BIT to distiguish between
+ * a tag assigned by HBQ.
+ */
+ phba->buffer_tag_count |= QUE_BUFTAG_BIT;
+ spin_unlock_irq(&phba->hbalock);
+ return phba->buffer_tag_count;
+}
+
+/**
+ * lpfc_sli_ring_taggedbuf_get - find HBQ buffer associated with given tag
+ * @phba: Pointer to HBA context object.
+ * @pring: Pointer to driver SLI ring object.
+ * @tag: Buffer tag.
+ *
+ * Buffers posted using CMD_QUE_XRI64_CX iocb are in pring->postbufq
+ * list. After HBA DMA data to these buffers, CMD_IOCB_RET_XRI64_CX
+ * iocb is posted to the response ring with the tag of the buffer.
+ * This function searches the pring->postbufq list using the tag
+ * to find buffer associated with CMD_IOCB_RET_XRI64_CX
+ * iocb. If the buffer is found then lpfc_dmabuf object of the
+ * buffer is returned to the caller else NULL is returned.
+ * This function is called with no lock held.
+ **/
+struct lpfc_dmabuf *
+lpfc_sli_ring_taggedbuf_get(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
+ uint32_t tag)
+{
+ struct lpfc_dmabuf *mp, *next_mp;
+ struct list_head *slp = &pring->postbufq;
+
+ /* Search postbufq, from the beginning, looking for a match on tag */
+ spin_lock_irq(&phba->hbalock);
+ list_for_each_entry_safe(mp, next_mp, &pring->postbufq, list) {
+ if (mp->buffer_tag == tag) {
+ list_del_init(&mp->list);
+ pring->postbufq_cnt--;
+ spin_unlock_irq(&phba->hbalock);
+ return mp;
+ }
+ }
+
+ spin_unlock_irq(&phba->hbalock);
+ lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+ "0402 Cannot find virtual addr for buffer tag on "
+ "ring %d Data x%lx x%p x%p x%x\n",
+ pring->ringno, (unsigned long) tag,
+ slp->next, slp->prev, pring->postbufq_cnt);
+
+ return NULL;
+}
+
+/**
+ * lpfc_sli_ringpostbuf_get - search buffers for unsolicited CT and ELS events
+ * @phba: Pointer to HBA context object.
+ * @pring: Pointer to driver SLI ring object.
+ * @phys: DMA address of the buffer.
+ *
+ * This function searches the buffer list using the dma_address
+ * of unsolicited event to find the driver's lpfc_dmabuf object
+ * corresponding to the dma_address. The function returns the
+ * lpfc_dmabuf object if a buffer is found else it returns NULL.
+ * This function is called by the ct and els unsolicited event
+ * handlers to get the buffer associated with the unsolicited
+ * event.
+ *
+ * This function is called with no lock held.
+ **/
+struct lpfc_dmabuf *
+lpfc_sli_ringpostbuf_get(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
+ dma_addr_t phys)
+{
+ struct lpfc_dmabuf *mp, *next_mp;
+ struct list_head *slp = &pring->postbufq;
+
+ /* Search postbufq, from the beginning, looking for a match on phys */
+ spin_lock_irq(&phba->hbalock);
+ list_for_each_entry_safe(mp, next_mp, &pring->postbufq, list) {
+ if (mp->phys == phys) {
+ list_del_init(&mp->list);
+ pring->postbufq_cnt--;
+ spin_unlock_irq(&phba->hbalock);
+ return mp;
+ }
+ }
+
+ spin_unlock_irq(&phba->hbalock);
+ lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+ "0410 Cannot find virtual addr for mapped buf on "
+ "ring %d Data x%llx x%p x%p x%x\n",
+ pring->ringno, (unsigned long long)phys,
+ slp->next, slp->prev, pring->postbufq_cnt);
+ return NULL;
+}
+
+/**
+ * lpfc_sli_abort_els_cmpl - Completion handler for the els abort iocbs
+ * @phba: Pointer to HBA context object.
+ * @cmdiocb: Pointer to driver command iocb object.
+ * @rspiocb: Pointer to driver response iocb object.
+ *
+ * This function is the completion handler for the abort iocbs for
+ * ELS commands. This function is called from the ELS ring event
+ * handler with no lock held. This function frees memory resources
+ * associated with the abort iocb.
+ **/
+static void
+lpfc_sli_abort_els_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
+ struct lpfc_iocbq *rspiocb)
+{
+ IOCB_t *irsp = &rspiocb->iocb;
+ uint16_t abort_iotag, abort_context;
+ struct lpfc_iocbq *abort_iocb = NULL;
+
+ if (irsp->ulpStatus) {
+
+ /*
+ * Assume that the port already completed and returned, or
+ * will return the iocb. Just Log the message.
+ */
+ abort_context = cmdiocb->iocb.un.acxri.abortContextTag;
+ abort_iotag = cmdiocb->iocb.un.acxri.abortIoTag;
+
+ spin_lock_irq(&phba->hbalock);
+ if (phba->sli_rev < LPFC_SLI_REV4) {
+ if (abort_iotag != 0 &&
+ abort_iotag <= phba->sli.last_iotag)
+ abort_iocb =
+ phba->sli.iocbq_lookup[abort_iotag];
+ } else
+ /* For sli4 the abort_tag is the XRI,
+ * so the abort routine puts the iotag of the iocb
+ * being aborted in the context field of the abort
+ * IOCB.
+ */
+ abort_iocb = phba->sli.iocbq_lookup[abort_context];
+
+ lpfc_printf_log(phba, KERN_WARNING, LOG_ELS | LOG_SLI,
+ "0327 Cannot abort els iocb %p "
+ "with tag %x context %x, abort status %x, "
+ "abort code %x\n",
+ abort_iocb, abort_iotag, abort_context,
+ irsp->ulpStatus, irsp->un.ulpWord[4]);
+
+ spin_unlock_irq(&phba->hbalock);
+ }
+ lpfc_sli_release_iocbq(phba, cmdiocb);
+ return;
+}
+
+/**
+ * lpfc_ignore_els_cmpl - Completion handler for aborted ELS command
+ * @phba: Pointer to HBA context object.
+ * @cmdiocb: Pointer to driver command iocb object.
+ * @rspiocb: Pointer to driver response iocb object.
+ *
+ * The function is called from SLI ring event handler with no
+ * lock held. This function is the completion handler for ELS commands
+ * which are aborted. The function frees memory resources used for
+ * the aborted ELS commands.
+ **/
+static void
+lpfc_ignore_els_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
+ struct lpfc_iocbq *rspiocb)
+{
+ IOCB_t *irsp = &rspiocb->iocb;
+
+ /* ELS cmd tag <ulpIoTag> completes */
+ lpfc_printf_log(phba, KERN_INFO, LOG_ELS,
+ "0139 Ignoring ELS cmd tag x%x completion Data: "
+ "x%x x%x x%x\n",
+ irsp->ulpIoTag, irsp->ulpStatus,
+ irsp->un.ulpWord[4], irsp->ulpTimeout);
+ if (cmdiocb->iocb.ulpCommand == CMD_GEN_REQUEST64_CR)
+ lpfc_ct_free_iocb(phba, cmdiocb);
+ else
+ lpfc_els_free_iocb(phba, cmdiocb);
+ return;
+}
+
+/**
+ * lpfc_sli_abort_iotag_issue - Issue abort for a command iocb
+ * @phba: Pointer to HBA context object.
+ * @pring: Pointer to driver SLI ring object.
+ * @cmdiocb: Pointer to driver command iocb object.
+ *
+ * This function issues an abort iocb for the provided command iocb down to
+ * the port. Other than the case the outstanding command iocb is an abort
+ * request, this function issues abort out unconditionally. This function is
+ * called with hbalock held. The function returns 0 when it fails due to
+ * memory allocation failure or when the command iocb is an abort request.
+ **/
+static int
+lpfc_sli_abort_iotag_issue(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
+ struct lpfc_iocbq *cmdiocb)
+{
+ struct lpfc_vport *vport = cmdiocb->vport;
+ struct lpfc_iocbq *abtsiocbp;
+ IOCB_t *icmd = NULL;
+ IOCB_t *iabt = NULL;
+ int ring_number;
+ int retval;
+ unsigned long iflags;
+
+ /*
+ * There are certain command types we don't want to abort. And we
+ * don't want to abort commands that are already in the process of
+ * being aborted.
+ */
+ icmd = &cmdiocb->iocb;
+ if (icmd->ulpCommand == CMD_ABORT_XRI_CN ||
+ icmd->ulpCommand == CMD_CLOSE_XRI_CN ||
+ (cmdiocb->iocb_flag & LPFC_DRIVER_ABORTED) != 0)
+ return 0;
+
+ /* issue ABTS for this IOCB based on iotag */
+ abtsiocbp = __lpfc_sli_get_iocbq(phba);
+ if (abtsiocbp == NULL)
+ return 0;
+
+ /* This signals the response to set the correct status
+ * before calling the completion handler
+ */
+ cmdiocb->iocb_flag |= LPFC_DRIVER_ABORTED;
+
+ iabt = &abtsiocbp->iocb;
+ iabt->un.acxri.abortType = ABORT_TYPE_ABTS;
+ iabt->un.acxri.abortContextTag = icmd->ulpContext;
+ if (phba->sli_rev == LPFC_SLI_REV4) {
+ iabt->un.acxri.abortIoTag = cmdiocb->sli4_xritag;
+ iabt->un.acxri.abortContextTag = cmdiocb->iotag;
+ }
+ else
+ iabt->un.acxri.abortIoTag = icmd->ulpIoTag;
+ iabt->ulpLe = 1;
+ iabt->ulpClass = icmd->ulpClass;
+
+ /* ABTS WQE must go to the same WQ as the WQE to be aborted */
+ abtsiocbp->fcp_wqidx = cmdiocb->fcp_wqidx;
+ if (cmdiocb->iocb_flag & LPFC_IO_FCP)
+ abtsiocbp->iocb_flag |= LPFC_USE_FCPWQIDX;
+ if (cmdiocb->iocb_flag & LPFC_IO_FOF)
+ abtsiocbp->iocb_flag |= LPFC_IO_FOF;
+
+ if (phba->link_state >= LPFC_LINK_UP)
+ iabt->ulpCommand = CMD_ABORT_XRI_CN;
+ else
+ iabt->ulpCommand = CMD_CLOSE_XRI_CN;
+
+ abtsiocbp->iocb_cmpl = lpfc_sli_abort_els_cmpl;
+
+ lpfc_printf_vlog(vport, KERN_INFO, LOG_SLI,
+ "0339 Abort xri x%x, original iotag x%x, "
+ "abort cmd iotag x%x\n",
+ iabt->un.acxri.abortIoTag,
+ iabt->un.acxri.abortContextTag,
+ abtsiocbp->iotag);
+
+ if (phba->sli_rev == LPFC_SLI_REV4) {
+ ring_number =
+ lpfc_sli_calc_ring(phba, pring->ringno, abtsiocbp);
+ if (unlikely(ring_number == LPFC_HBA_ERROR))
+ return 0;
+ pring = &phba->sli.ring[ring_number];
+ /* Note: both hbalock and ring_lock need to be set here */
+ spin_lock_irqsave(&pring->ring_lock, iflags);
+ retval = __lpfc_sli_issue_iocb(phba, pring->ringno,
+ abtsiocbp, 0);
+ spin_unlock_irqrestore(&pring->ring_lock, iflags);
+ } else {
+ retval = __lpfc_sli_issue_iocb(phba, pring->ringno,
+ abtsiocbp, 0);
+ }
+
+ if (retval)
+ __lpfc_sli_release_iocbq(phba, abtsiocbp);
+
+ /*
+ * Caller to this routine should check for IOCB_ERROR
+ * and handle it properly. This routine no longer removes
+ * iocb off txcmplq and call compl in case of IOCB_ERROR.
+ */
+ return retval;
+}
+
+/**
+ * lpfc_sli_issue_abort_iotag - Abort function for a command iocb
+ * @phba: Pointer to HBA context object.
+ * @pring: Pointer to driver SLI ring object.
+ * @cmdiocb: Pointer to driver command iocb object.
+ *
+ * This function issues an abort iocb for the provided command iocb. In case
+ * of unloading, the abort iocb will not be issued to commands on the ELS
+ * ring. Instead, the callback function shall be changed to those commands
+ * so that nothing happens when them finishes. This function is called with
+ * hbalock held. The function returns 0 when the command iocb is an abort
+ * request.
+ **/
+int
+lpfc_sli_issue_abort_iotag(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
+ struct lpfc_iocbq *cmdiocb)
+{
+ struct lpfc_vport *vport = cmdiocb->vport;
+ int retval = IOCB_ERROR;
+ IOCB_t *icmd = NULL;
+
+ /*
+ * There are certain command types we don't want to abort. And we
+ * don't want to abort commands that are already in the process of
+ * being aborted.
+ */
+ icmd = &cmdiocb->iocb;
+ if (icmd->ulpCommand == CMD_ABORT_XRI_CN ||
+ icmd->ulpCommand == CMD_CLOSE_XRI_CN ||
+ (cmdiocb->iocb_flag & LPFC_DRIVER_ABORTED) != 0)
+ return 0;
+
+ /*
+ * If we're unloading, don't abort iocb on the ELS ring, but change
+ * the callback so that nothing happens when it finishes.
+ */
+ if ((vport->load_flag & FC_UNLOADING) &&
+ (pring->ringno == LPFC_ELS_RING)) {
+ if (cmdiocb->iocb_flag & LPFC_IO_FABRIC)
+ cmdiocb->fabric_iocb_cmpl = lpfc_ignore_els_cmpl;
+ else
+ cmdiocb->iocb_cmpl = lpfc_ignore_els_cmpl;
+ goto abort_iotag_exit;
+ }
+
+ /* Now, we try to issue the abort to the cmdiocb out */
+ retval = lpfc_sli_abort_iotag_issue(phba, pring, cmdiocb);
+
+abort_iotag_exit:
+ /*
+ * Caller to this routine should check for IOCB_ERROR
+ * and handle it properly. This routine no longer removes
+ * iocb off txcmplq and call compl in case of IOCB_ERROR.
+ */
+ return retval;
+}
+
+/**
+ * lpfc_sli_hba_iocb_abort - Abort all iocbs to an hba.
+ * @phba: pointer to lpfc HBA data structure.
+ *
+ * This routine will abort all pending and outstanding iocbs to an HBA.
+ **/
+void
+lpfc_sli_hba_iocb_abort(struct lpfc_hba *phba)
+{
+ struct lpfc_sli *psli = &phba->sli;
+ struct lpfc_sli_ring *pring;
+ int i;
+
+ for (i = 0; i < psli->num_rings; i++) {
+ pring = &psli->ring[i];
+ lpfc_sli_abort_iocb_ring(phba, pring);
+ }
+}
+
+/**
+ * lpfc_sli_validate_fcp_iocb - find commands associated with a vport or LUN
+ * @iocbq: Pointer to driver iocb object.
+ * @vport: Pointer to driver virtual port object.
+ * @tgt_id: SCSI ID of the target.
+ * @lun_id: LUN ID of the scsi device.
+ * @ctx_cmd: LPFC_CTX_LUN/LPFC_CTX_TGT/LPFC_CTX_HOST
+ *
+ * This function acts as an iocb filter for functions which abort or count
+ * all FCP iocbs pending on a lun/SCSI target/SCSI host. It will return
+ * 0 if the filtering criteria is met for the given iocb and will return
+ * 1 if the filtering criteria is not met.
+ * If ctx_cmd == LPFC_CTX_LUN, the function returns 0 only if the
+ * given iocb is for the SCSI device specified by vport, tgt_id and
+ * lun_id parameter.
+ * If ctx_cmd == LPFC_CTX_TGT, the function returns 0 only if the
+ * given iocb is for the SCSI target specified by vport and tgt_id
+ * parameters.
+ * If ctx_cmd == LPFC_CTX_HOST, the function returns 0 only if the
+ * given iocb is for the SCSI host associated with the given vport.
+ * This function is called with no locks held.
+ **/
+static int
+lpfc_sli_validate_fcp_iocb(struct lpfc_iocbq *iocbq, struct lpfc_vport *vport,
+ uint16_t tgt_id, uint64_t lun_id,
+ lpfc_ctx_cmd ctx_cmd)
+{
+ struct lpfc_scsi_buf *lpfc_cmd;
+ int rc = 1;
+
+ if (!(iocbq->iocb_flag & LPFC_IO_FCP))
+ return rc;
+
+ if (iocbq->vport != vport)
+ return rc;
+
+ lpfc_cmd = container_of(iocbq, struct lpfc_scsi_buf, cur_iocbq);
+
+ if (lpfc_cmd->pCmd == NULL)
+ return rc;
+
+ switch (ctx_cmd) {
+ case LPFC_CTX_LUN:
+ if ((lpfc_cmd->rdata->pnode) &&
+ (lpfc_cmd->rdata->pnode->nlp_sid == tgt_id) &&
+ (scsilun_to_int(&lpfc_cmd->fcp_cmnd->fcp_lun) == lun_id))
+ rc = 0;
+ break;
+ case LPFC_CTX_TGT:
+ if ((lpfc_cmd->rdata->pnode) &&
+ (lpfc_cmd->rdata->pnode->nlp_sid == tgt_id))
+ rc = 0;
+ break;
+ case LPFC_CTX_HOST:
+ rc = 0;
+ break;
+ default:
+ printk(KERN_ERR "%s: Unknown context cmd type, value %d\n",
+ __func__, ctx_cmd);
+ break;
+ }
+
+ return rc;
+}
+
+/**
+ * lpfc_sli_sum_iocb - Function to count the number of FCP iocbs pending
+ * @vport: Pointer to virtual port.
+ * @tgt_id: SCSI ID of the target.
+ * @lun_id: LUN ID of the scsi device.
+ * @ctx_cmd: LPFC_CTX_LUN/LPFC_CTX_TGT/LPFC_CTX_HOST.
+ *
+ * This function returns number of FCP commands pending for the vport.
+ * When ctx_cmd == LPFC_CTX_LUN, the function returns number of FCP
+ * commands pending on the vport associated with SCSI device specified
+ * by tgt_id and lun_id parameters.
+ * When ctx_cmd == LPFC_CTX_TGT, the function returns number of FCP
+ * commands pending on the vport associated with SCSI target specified
+ * by tgt_id parameter.
+ * When ctx_cmd == LPFC_CTX_HOST, the function returns number of FCP
+ * commands pending on the vport.
+ * This function returns the number of iocbs which satisfy the filter.
+ * This function is called without any lock held.
+ **/
+int
+lpfc_sli_sum_iocb(struct lpfc_vport *vport, uint16_t tgt_id, uint64_t lun_id,
+ lpfc_ctx_cmd ctx_cmd)
+{
+ struct lpfc_hba *phba = vport->phba;
+ struct lpfc_iocbq *iocbq;
+ int sum, i;
+
+ for (i = 1, sum = 0; i <= phba->sli.last_iotag; i++) {
+ iocbq = phba->sli.iocbq_lookup[i];
+
+ if (lpfc_sli_validate_fcp_iocb (iocbq, vport, tgt_id, lun_id,
+ ctx_cmd) == 0)
+ sum++;
+ }
+
+ return sum;
+}
+
+/**
+ * lpfc_sli_abort_fcp_cmpl - Completion handler function for aborted FCP IOCBs
+ * @phba: Pointer to HBA context object
+ * @cmdiocb: Pointer to command iocb object.
+ * @rspiocb: Pointer to response iocb object.
+ *
+ * This function is called when an aborted FCP iocb completes. This
+ * function is called by the ring event handler with no lock held.
+ * This function frees the iocb.
+ **/
+void
+lpfc_sli_abort_fcp_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
+ struct lpfc_iocbq *rspiocb)
+{
+ lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
+ "3096 ABORT_XRI_CN completing on rpi x%x "
+ "original iotag x%x, abort cmd iotag x%x "
+ "status 0x%x, reason 0x%x\n",
+ cmdiocb->iocb.un.acxri.abortContextTag,
+ cmdiocb->iocb.un.acxri.abortIoTag,
+ cmdiocb->iotag, rspiocb->iocb.ulpStatus,
+ rspiocb->iocb.un.ulpWord[4]);
+ lpfc_sli_release_iocbq(phba, cmdiocb);
+ return;
+}
+
+/**
+ * lpfc_sli_abort_iocb - issue abort for all commands on a host/target/LUN
+ * @vport: Pointer to virtual port.
+ * @pring: Pointer to driver SLI ring object.
+ * @tgt_id: SCSI ID of the target.
+ * @lun_id: LUN ID of the scsi device.
+ * @abort_cmd: LPFC_CTX_LUN/LPFC_CTX_TGT/LPFC_CTX_HOST.
+ *
+ * This function sends an abort command for every SCSI command
+ * associated with the given virtual port pending on the ring
+ * filtered by lpfc_sli_validate_fcp_iocb function.
+ * When abort_cmd == LPFC_CTX_LUN, the function sends abort only to the
+ * FCP iocbs associated with lun specified by tgt_id and lun_id
+ * parameters
+ * When abort_cmd == LPFC_CTX_TGT, the function sends abort only to the
+ * FCP iocbs associated with SCSI target specified by tgt_id parameter.
+ * When abort_cmd == LPFC_CTX_HOST, the function sends abort to all
+ * FCP iocbs associated with virtual port.
+ * This function returns number of iocbs it failed to abort.
+ * This function is called with no locks held.
+ **/
+int
+lpfc_sli_abort_iocb(struct lpfc_vport *vport, struct lpfc_sli_ring *pring,
+ uint16_t tgt_id, uint64_t lun_id, lpfc_ctx_cmd abort_cmd)
+{
+ struct lpfc_hba *phba = vport->phba;
+ struct lpfc_iocbq *iocbq;
+ struct lpfc_iocbq *abtsiocb;
+ IOCB_t *cmd = NULL;
+ int errcnt = 0, ret_val = 0;
+ int i;
+
+ for (i = 1; i <= phba->sli.last_iotag; i++) {
+ iocbq = phba->sli.iocbq_lookup[i];
+
+ if (lpfc_sli_validate_fcp_iocb(iocbq, vport, tgt_id, lun_id,
+ abort_cmd) != 0)
+ continue;
+
+ /*
+ * If the iocbq is already being aborted, don't take a second
+ * action, but do count it.
+ */
+ if (iocbq->iocb_flag & LPFC_DRIVER_ABORTED)
+ continue;
+
+ /* issue ABTS for this IOCB based on iotag */
+ abtsiocb = lpfc_sli_get_iocbq(phba);
+ if (abtsiocb == NULL) {
+ errcnt++;
+ continue;
+ }
+
+ /* indicate the IO is being aborted by the driver. */
+ iocbq->iocb_flag |= LPFC_DRIVER_ABORTED;
+
+ cmd = &iocbq->iocb;
+ abtsiocb->iocb.un.acxri.abortType = ABORT_TYPE_ABTS;
+ abtsiocb->iocb.un.acxri.abortContextTag = cmd->ulpContext;
+ if (phba->sli_rev == LPFC_SLI_REV4)
+ abtsiocb->iocb.un.acxri.abortIoTag = iocbq->sli4_xritag;
+ else
+ abtsiocb->iocb.un.acxri.abortIoTag = cmd->ulpIoTag;
+ abtsiocb->iocb.ulpLe = 1;
+ abtsiocb->iocb.ulpClass = cmd->ulpClass;
+ abtsiocb->vport = vport;
+
+ /* ABTS WQE must go to the same WQ as the WQE to be aborted */
+ abtsiocb->fcp_wqidx = iocbq->fcp_wqidx;
+ if (iocbq->iocb_flag & LPFC_IO_FCP)
+ abtsiocb->iocb_flag |= LPFC_USE_FCPWQIDX;
+ if (iocbq->iocb_flag & LPFC_IO_FOF)
+ abtsiocb->iocb_flag |= LPFC_IO_FOF;
+
+ if (lpfc_is_link_up(phba))
+ abtsiocb->iocb.ulpCommand = CMD_ABORT_XRI_CN;
+ else
+ abtsiocb->iocb.ulpCommand = CMD_CLOSE_XRI_CN;
+
+ /* Setup callback routine and issue the command. */
+ abtsiocb->iocb_cmpl = lpfc_sli_abort_fcp_cmpl;
+ ret_val = lpfc_sli_issue_iocb(phba, pring->ringno,
+ abtsiocb, 0);
+ if (ret_val == IOCB_ERROR) {
+ lpfc_sli_release_iocbq(phba, abtsiocb);
+ errcnt++;
+ continue;
+ }
+ }
+
+ return errcnt;
+}
+
+/**
+ * lpfc_sli_abort_taskmgmt - issue abort for all commands on a host/target/LUN
+ * @vport: Pointer to virtual port.
+ * @pring: Pointer to driver SLI ring object.
+ * @tgt_id: SCSI ID of the target.
+ * @lun_id: LUN ID of the scsi device.
+ * @taskmgmt_cmd: LPFC_CTX_LUN/LPFC_CTX_TGT/LPFC_CTX_HOST.
+ *
+ * This function sends an abort command for every SCSI command
+ * associated with the given virtual port pending on the ring
+ * filtered by lpfc_sli_validate_fcp_iocb function.
+ * When taskmgmt_cmd == LPFC_CTX_LUN, the function sends abort only to the
+ * FCP iocbs associated with lun specified by tgt_id and lun_id
+ * parameters
+ * When taskmgmt_cmd == LPFC_CTX_TGT, the function sends abort only to the
+ * FCP iocbs associated with SCSI target specified by tgt_id parameter.
+ * When taskmgmt_cmd == LPFC_CTX_HOST, the function sends abort to all
+ * FCP iocbs associated with virtual port.
+ * This function returns number of iocbs it aborted .
+ * This function is called with no locks held right after a taskmgmt
+ * command is sent.
+ **/
+int
+lpfc_sli_abort_taskmgmt(struct lpfc_vport *vport, struct lpfc_sli_ring *pring,
+ uint16_t tgt_id, uint64_t lun_id, lpfc_ctx_cmd cmd)
+{
+ struct lpfc_hba *phba = vport->phba;
+ struct lpfc_scsi_buf *lpfc_cmd;
+ struct lpfc_iocbq *abtsiocbq;
+ struct lpfc_nodelist *ndlp;
+ struct lpfc_iocbq *iocbq;
+ IOCB_t *icmd;
+ int sum, i, ret_val;
+ unsigned long iflags;
+ struct lpfc_sli_ring *pring_s4;
+ uint32_t ring_number;
+
+ spin_lock_irq(&phba->hbalock);
+
+ /* all I/Os are in process of being flushed */
+ if (phba->hba_flag & HBA_FCP_IOQ_FLUSH) {
+ spin_unlock_irq(&phba->hbalock);
+ return 0;
+ }
+ sum = 0;
+
+ for (i = 1; i <= phba->sli.last_iotag; i++) {
+ iocbq = phba->sli.iocbq_lookup[i];
+
+ if (lpfc_sli_validate_fcp_iocb(iocbq, vport, tgt_id, lun_id,
+ cmd) != 0)
+ continue;
+
+ /*
+ * If the iocbq is already being aborted, don't take a second
+ * action, but do count it.
+ */
+ if (iocbq->iocb_flag & LPFC_DRIVER_ABORTED)
+ continue;
+
+ /* issue ABTS for this IOCB based on iotag */
+ abtsiocbq = __lpfc_sli_get_iocbq(phba);
+ if (abtsiocbq == NULL)
+ continue;
+
+ icmd = &iocbq->iocb;
+ abtsiocbq->iocb.un.acxri.abortType = ABORT_TYPE_ABTS;
+ abtsiocbq->iocb.un.acxri.abortContextTag = icmd->ulpContext;
+ if (phba->sli_rev == LPFC_SLI_REV4)
+ abtsiocbq->iocb.un.acxri.abortIoTag =
+ iocbq->sli4_xritag;
+ else
+ abtsiocbq->iocb.un.acxri.abortIoTag = icmd->ulpIoTag;
+ abtsiocbq->iocb.ulpLe = 1;
+ abtsiocbq->iocb.ulpClass = icmd->ulpClass;
+ abtsiocbq->vport = vport;
+
+ /* ABTS WQE must go to the same WQ as the WQE to be aborted */
+ abtsiocbq->fcp_wqidx = iocbq->fcp_wqidx;
+ if (iocbq->iocb_flag & LPFC_IO_FCP)
+ abtsiocbq->iocb_flag |= LPFC_USE_FCPWQIDX;
+ if (iocbq->iocb_flag & LPFC_IO_FOF)
+ abtsiocbq->iocb_flag |= LPFC_IO_FOF;
+
+ lpfc_cmd = container_of(iocbq, struct lpfc_scsi_buf, cur_iocbq);
+ ndlp = lpfc_cmd->rdata->pnode;
+
+ if (lpfc_is_link_up(phba) &&
+ (ndlp && ndlp->nlp_state == NLP_STE_MAPPED_NODE))
+ abtsiocbq->iocb.ulpCommand = CMD_ABORT_XRI_CN;
+ else
+ abtsiocbq->iocb.ulpCommand = CMD_CLOSE_XRI_CN;
+
+ /* Setup callback routine and issue the command. */
+ abtsiocbq->iocb_cmpl = lpfc_sli_abort_fcp_cmpl;
+
+ /*
+ * Indicate the IO is being aborted by the driver and set
+ * the caller's flag into the aborted IO.
+ */
+ iocbq->iocb_flag |= LPFC_DRIVER_ABORTED;
+
+ if (phba->sli_rev == LPFC_SLI_REV4) {
+ ring_number = MAX_SLI3_CONFIGURED_RINGS +
+ iocbq->fcp_wqidx;
+ pring_s4 = &phba->sli.ring[ring_number];
+ /* Note: both hbalock and ring_lock must be set here */
+ spin_lock_irqsave(&pring_s4->ring_lock, iflags);
+ ret_val = __lpfc_sli_issue_iocb(phba, pring_s4->ringno,
+ abtsiocbq, 0);
+ spin_unlock_irqrestore(&pring_s4->ring_lock, iflags);
+ } else {
+ ret_val = __lpfc_sli_issue_iocb(phba, pring->ringno,
+ abtsiocbq, 0);
+ }
+
+
+ if (ret_val == IOCB_ERROR)
+ __lpfc_sli_release_iocbq(phba, abtsiocbq);
+ else
+ sum++;
+ }
+ spin_unlock_irq(&phba->hbalock);
+ return sum;
+}
+
+/**
+ * lpfc_sli_wake_iocb_wait - lpfc_sli_issue_iocb_wait's completion handler
+ * @phba: Pointer to HBA context object.
+ * @cmdiocbq: Pointer to command iocb.
+ * @rspiocbq: Pointer to response iocb.
+ *
+ * This function is the completion handler for iocbs issued using
+ * lpfc_sli_issue_iocb_wait function. This function is called by the
+ * ring event handler function without any lock held. This function
+ * can be called from both worker thread context and interrupt
+ * context. This function also can be called from other thread which
+ * cleans up the SLI layer objects.
+ * This function copy the contents of the response iocb to the
+ * response iocb memory object provided by the caller of
+ * lpfc_sli_issue_iocb_wait and then wakes up the thread which
+ * sleeps for the iocb completion.
+ **/
+static void
+lpfc_sli_wake_iocb_wait(struct lpfc_hba *phba,
+ struct lpfc_iocbq *cmdiocbq,
+ struct lpfc_iocbq *rspiocbq)
+{
+ wait_queue_head_t *pdone_q;
+ unsigned long iflags;
+ struct lpfc_scsi_buf *lpfc_cmd;
+
+ spin_lock_irqsave(&phba->hbalock, iflags);
+ if (cmdiocbq->iocb_flag & LPFC_IO_WAKE_TMO) {
+
+ /*
+ * A time out has occurred for the iocb. If a time out
+ * completion handler has been supplied, call it. Otherwise,
+ * just free the iocbq.
+ */
+
+ spin_unlock_irqrestore(&phba->hbalock, iflags);
+ cmdiocbq->iocb_cmpl = cmdiocbq->wait_iocb_cmpl;
+ cmdiocbq->wait_iocb_cmpl = NULL;
+ if (cmdiocbq->iocb_cmpl)
+ (cmdiocbq->iocb_cmpl)(phba, cmdiocbq, NULL);
+ else
+ lpfc_sli_release_iocbq(phba, cmdiocbq);
+ return;
+ }
+
+ cmdiocbq->iocb_flag |= LPFC_IO_WAKE;
+ if (cmdiocbq->context2 && rspiocbq)
+ memcpy(&((struct lpfc_iocbq *)cmdiocbq->context2)->iocb,
+ &rspiocbq->iocb, sizeof(IOCB_t));
+
+ /* Set the exchange busy flag for task management commands */
+ if ((cmdiocbq->iocb_flag & LPFC_IO_FCP) &&
+ !(cmdiocbq->iocb_flag & LPFC_IO_LIBDFC)) {
+ lpfc_cmd = container_of(cmdiocbq, struct lpfc_scsi_buf,
+ cur_iocbq);
+ lpfc_cmd->exch_busy = rspiocbq->iocb_flag & LPFC_EXCHANGE_BUSY;
+ }
+
+ pdone_q = cmdiocbq->context_un.wait_queue;
+ if (pdone_q)
+ wake_up(pdone_q);
+ spin_unlock_irqrestore(&phba->hbalock, iflags);
+ return;
+}
+
+/**
+ * lpfc_chk_iocb_flg - Test IOCB flag with lock held.
+ * @phba: Pointer to HBA context object..
+ * @piocbq: Pointer to command iocb.
+ * @flag: Flag to test.
+ *
+ * This routine grabs the hbalock and then test the iocb_flag to
+ * see if the passed in flag is set.
+ * Returns:
+ * 1 if flag is set.
+ * 0 if flag is not set.
+ **/
+static int
+lpfc_chk_iocb_flg(struct lpfc_hba *phba,
+ struct lpfc_iocbq *piocbq, uint32_t flag)
+{
+ unsigned long iflags;
+ int ret;
+
+ spin_lock_irqsave(&phba->hbalock, iflags);
+ ret = piocbq->iocb_flag & flag;
+ spin_unlock_irqrestore(&phba->hbalock, iflags);
+ return ret;
+
+}
+
+/**
+ * lpfc_sli_issue_iocb_wait - Synchronous function to issue iocb commands
+ * @phba: Pointer to HBA context object..
+ * @pring: Pointer to sli ring.
+ * @piocb: Pointer to command iocb.
+ * @prspiocbq: Pointer to response iocb.
+ * @timeout: Timeout in number of seconds.
+ *
+ * This function issues the iocb to firmware and waits for the
+ * iocb to complete. The iocb_cmpl field of the shall be used
+ * to handle iocbs which time out. If the field is NULL, the
+ * function shall free the iocbq structure. If more clean up is
+ * needed, the caller is expected to provide a completion function
+ * that will provide the needed clean up. If the iocb command is
+ * not completed within timeout seconds, the function will either
+ * free the iocbq structure (if iocb_cmpl == NULL) or execute the
+ * completion function set in the iocb_cmpl field and then return
+ * a status of IOCB_TIMEDOUT. The caller should not free the iocb
+ * resources if this function returns IOCB_TIMEDOUT.
+ * The function waits for the iocb completion using an
+ * non-interruptible wait.
+ * This function will sleep while waiting for iocb completion.
+ * So, this function should not be called from any context which
+ * does not allow sleeping. Due to the same reason, this function
+ * cannot be called with interrupt disabled.
+ * This function assumes that the iocb completions occur while
+ * this function sleep. So, this function cannot be called from
+ * the thread which process iocb completion for this ring.
+ * This function clears the iocb_flag of the iocb object before
+ * issuing the iocb and the iocb completion handler sets this
+ * flag and wakes this thread when the iocb completes.
+ * The contents of the response iocb will be copied to prspiocbq
+ * by the completion handler when the command completes.
+ * This function returns IOCB_SUCCESS when success.
+ * This function is called with no lock held.
+ **/
+int
+lpfc_sli_issue_iocb_wait(struct lpfc_hba *phba,
+ uint32_t ring_number,
+ struct lpfc_iocbq *piocb,
+ struct lpfc_iocbq *prspiocbq,
+ uint32_t timeout)
+{
+ DECLARE_WAIT_QUEUE_HEAD_ONSTACK(done_q);
+ long timeleft, timeout_req = 0;
+ int retval = IOCB_SUCCESS;
+ uint32_t creg_val;
+ struct lpfc_iocbq *iocb;
+ int txq_cnt = 0;
+ int txcmplq_cnt = 0;
+ struct lpfc_sli_ring *pring = &phba->sli.ring[LPFC_ELS_RING];
+ unsigned long iflags;
+ bool iocb_completed = true;
+
+ /*
+ * If the caller has provided a response iocbq buffer, then context2
+ * is NULL or its an error.
+ */
+ if (prspiocbq) {
+ if (piocb->context2)
+ return IOCB_ERROR;
+ piocb->context2 = prspiocbq;
+ }
+
+ piocb->wait_iocb_cmpl = piocb->iocb_cmpl;
+ piocb->iocb_cmpl = lpfc_sli_wake_iocb_wait;
+ piocb->context_un.wait_queue = &done_q;
+ piocb->iocb_flag &= ~(LPFC_IO_WAKE | LPFC_IO_WAKE_TMO);
+
+ if (phba->cfg_poll & DISABLE_FCP_RING_INT) {
+ if (lpfc_readl(phba->HCregaddr, &creg_val))
+ return IOCB_ERROR;
+ creg_val |= (HC_R0INT_ENA << LPFC_FCP_RING);
+ writel(creg_val, phba->HCregaddr);
+ readl(phba->HCregaddr); /* flush */
+ }
+
+ retval = lpfc_sli_issue_iocb(phba, ring_number, piocb,
+ SLI_IOCB_RET_IOCB);
+ if (retval == IOCB_SUCCESS) {
+ timeout_req = msecs_to_jiffies(timeout * 1000);
+ timeleft = wait_event_timeout(done_q,
+ lpfc_chk_iocb_flg(phba, piocb, LPFC_IO_WAKE),
+ timeout_req);
+ spin_lock_irqsave(&phba->hbalock, iflags);
+ if (!(piocb->iocb_flag & LPFC_IO_WAKE)) {
+
+ /*
+ * IOCB timed out. Inform the wake iocb wait
+ * completion function and set local status
+ */
+
+ iocb_completed = false;
+ piocb->iocb_flag |= LPFC_IO_WAKE_TMO;
+ }
+ spin_unlock_irqrestore(&phba->hbalock, iflags);
+ if (iocb_completed) {
+ lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
+ "0331 IOCB wake signaled\n");
+ /* Note: we are not indicating if the IOCB has a success
+ * status or not - that's for the caller to check.
+ * IOCB_SUCCESS means just that the command was sent and
+ * completed. Not that it completed successfully.
+ * */
+ } else if (timeleft == 0) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
+ "0338 IOCB wait timeout error - no "
+ "wake response Data x%x\n", timeout);
+ retval = IOCB_TIMEDOUT;
+ } else {
+ lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
+ "0330 IOCB wake NOT set, "
+ "Data x%x x%lx\n",
+ timeout, (timeleft / jiffies));
+ retval = IOCB_TIMEDOUT;
+ }
+ } else if (retval == IOCB_BUSY) {
+ if (phba->cfg_log_verbose & LOG_SLI) {
+ list_for_each_entry(iocb, &pring->txq, list) {
+ txq_cnt++;
+ }
+ list_for_each_entry(iocb, &pring->txcmplq, list) {
+ txcmplq_cnt++;
+ }
+ lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
+ "2818 Max IOCBs %d txq cnt %d txcmplq cnt %d\n",
+ phba->iocb_cnt, txq_cnt, txcmplq_cnt);
+ }
+ return retval;
+ } else {
+ lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
+ "0332 IOCB wait issue failed, Data x%x\n",
+ retval);
+ retval = IOCB_ERROR;
+ }
+
+ if (phba->cfg_poll & DISABLE_FCP_RING_INT) {
+ if (lpfc_readl(phba->HCregaddr, &creg_val))
+ return IOCB_ERROR;
+ creg_val &= ~(HC_R0INT_ENA << LPFC_FCP_RING);
+ writel(creg_val, phba->HCregaddr);
+ readl(phba->HCregaddr); /* flush */
+ }
+
+ if (prspiocbq)
+ piocb->context2 = NULL;
+
+ piocb->context_un.wait_queue = NULL;
+ piocb->iocb_cmpl = NULL;
+ return retval;
+}
+
+/**
+ * lpfc_sli_issue_mbox_wait - Synchronous function to issue mailbox
+ * @phba: Pointer to HBA context object.
+ * @pmboxq: Pointer to driver mailbox object.
+ * @timeout: Timeout in number of seconds.
+ *
+ * This function issues the mailbox to firmware and waits for the
+ * mailbox command to complete. If the mailbox command is not
+ * completed within timeout seconds, it returns MBX_TIMEOUT.
+ * The function waits for the mailbox completion using an
+ * interruptible wait. If the thread is woken up due to a
+ * signal, MBX_TIMEOUT error is returned to the caller. Caller
+ * should not free the mailbox resources, if this function returns
+ * MBX_TIMEOUT.
+ * This function will sleep while waiting for mailbox completion.
+ * So, this function should not be called from any context which
+ * does not allow sleeping. Due to the same reason, this function
+ * cannot be called with interrupt disabled.
+ * This function assumes that the mailbox completion occurs while
+ * this function sleep. So, this function cannot be called from
+ * the worker thread which processes mailbox completion.
+ * This function is called in the context of HBA management
+ * applications.
+ * This function returns MBX_SUCCESS when successful.
+ * This function is called with no lock held.
+ **/
+int
+lpfc_sli_issue_mbox_wait(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmboxq,
+ uint32_t timeout)
+{
+ DECLARE_WAIT_QUEUE_HEAD_ONSTACK(done_q);
+ MAILBOX_t *mb = NULL;
+ int retval;
+ unsigned long flag;
+
+ /* The caller might set context1 for extended buffer */
+ if (pmboxq->context1)
+ mb = (MAILBOX_t *)pmboxq->context1;
+
+ pmboxq->mbox_flag &= ~LPFC_MBX_WAKE;
+ /* setup wake call as IOCB callback */
+ pmboxq->mbox_cmpl = lpfc_sli_wake_mbox_wait;
+ /* setup context field to pass wait_queue pointer to wake function */
+ pmboxq->context1 = &done_q;
+
+ /* now issue the command */
+ retval = lpfc_sli_issue_mbox(phba, pmboxq, MBX_NOWAIT);
+ if (retval == MBX_BUSY || retval == MBX_SUCCESS) {
+ wait_event_interruptible_timeout(done_q,
+ pmboxq->mbox_flag & LPFC_MBX_WAKE,
+ msecs_to_jiffies(timeout * 1000));
+
+ spin_lock_irqsave(&phba->hbalock, flag);
+ /* restore the possible extended buffer for free resource */
+ pmboxq->context1 = (uint8_t *)mb;
+ /*
+ * if LPFC_MBX_WAKE flag is set the mailbox is completed
+ * else do not free the resources.
+ */
+ if (pmboxq->mbox_flag & LPFC_MBX_WAKE) {
+ retval = MBX_SUCCESS;
+ } else {
+ retval = MBX_TIMEOUT;
+ pmboxq->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
+ }
+ spin_unlock_irqrestore(&phba->hbalock, flag);
+ } else {
+ /* restore the possible extended buffer for free resource */
+ pmboxq->context1 = (uint8_t *)mb;
+ }
+
+ return retval;
+}
+
+/**
+ * lpfc_sli_mbox_sys_shutdown - shutdown mailbox command sub-system
+ * @phba: Pointer to HBA context.
+ *
+ * This function is called to shutdown the driver's mailbox sub-system.
+ * It first marks the mailbox sub-system is in a block state to prevent
+ * the asynchronous mailbox command from issued off the pending mailbox
+ * command queue. If the mailbox command sub-system shutdown is due to
+ * HBA error conditions such as EEH or ERATT, this routine shall invoke
+ * the mailbox sub-system flush routine to forcefully bring down the
+ * mailbox sub-system. Otherwise, if it is due to normal condition (such
+ * as with offline or HBA function reset), this routine will wait for the
+ * outstanding mailbox command to complete before invoking the mailbox
+ * sub-system flush routine to gracefully bring down mailbox sub-system.
+ **/
+void
+lpfc_sli_mbox_sys_shutdown(struct lpfc_hba *phba, int mbx_action)
+{
+ struct lpfc_sli *psli = &phba->sli;
+ unsigned long timeout;
+
+ if (mbx_action == LPFC_MBX_NO_WAIT) {
+ /* delay 100ms for port state */
+ msleep(100);
+ lpfc_sli_mbox_sys_flush(phba);
+ return;
+ }
+ timeout = msecs_to_jiffies(LPFC_MBOX_TMO * 1000) + jiffies;
+
+ spin_lock_irq(&phba->hbalock);
+ psli->sli_flag |= LPFC_SLI_ASYNC_MBX_BLK;
+
+ if (psli->sli_flag & LPFC_SLI_ACTIVE) {
+ /* Determine how long we might wait for the active mailbox
+ * command to be gracefully completed by firmware.
+ */
+ if (phba->sli.mbox_active)
+ timeout = msecs_to_jiffies(lpfc_mbox_tmo_val(phba,
+ phba->sli.mbox_active) *
+ 1000) + jiffies;
+ spin_unlock_irq(&phba->hbalock);
+
+ while (phba->sli.mbox_active) {
+ /* Check active mailbox complete status every 2ms */
+ msleep(2);
+ if (time_after(jiffies, timeout))
+ /* Timeout, let the mailbox flush routine to
+ * forcefully release active mailbox command
+ */
+ break;
+ }
+ } else
+ spin_unlock_irq(&phba->hbalock);
+
+ lpfc_sli_mbox_sys_flush(phba);
+}
+
+/**
+ * lpfc_sli_eratt_read - read sli-3 error attention events
+ * @phba: Pointer to HBA context.
+ *
+ * This function is called to read the SLI3 device error attention registers
+ * for possible error attention events. The caller must hold the hostlock
+ * with spin_lock_irq().
+ *
+ * This function returns 1 when there is Error Attention in the Host Attention
+ * Register and returns 0 otherwise.
+ **/
+static int
+lpfc_sli_eratt_read(struct lpfc_hba *phba)
+{
+ uint32_t ha_copy;
+
+ /* Read chip Host Attention (HA) register */
+ if (lpfc_readl(phba->HAregaddr, &ha_copy))
+ goto unplug_err;
+
+ if (ha_copy & HA_ERATT) {
+ /* Read host status register to retrieve error event */
+ if (lpfc_sli_read_hs(phba))
+ goto unplug_err;
+
+ /* Check if there is a deferred error condition is active */
+ if ((HS_FFER1 & phba->work_hs) &&
+ ((HS_FFER2 | HS_FFER3 | HS_FFER4 | HS_FFER5 |
+ HS_FFER6 | HS_FFER7 | HS_FFER8) & phba->work_hs)) {
+ phba->hba_flag |= DEFER_ERATT;
+ /* Clear all interrupt enable conditions */
+ writel(0, phba->HCregaddr);
+ readl(phba->HCregaddr);
+ }
+
+ /* Set the driver HA work bitmap */
+ phba->work_ha |= HA_ERATT;
+ /* Indicate polling handles this ERATT */
+ phba->hba_flag |= HBA_ERATT_HANDLED;
+ return 1;
+ }
+ return 0;
+
+unplug_err:
+ /* Set the driver HS work bitmap */
+ phba->work_hs |= UNPLUG_ERR;
+ /* Set the driver HA work bitmap */
+ phba->work_ha |= HA_ERATT;
+ /* Indicate polling handles this ERATT */
+ phba->hba_flag |= HBA_ERATT_HANDLED;
+ return 1;
+}
+
+/**
+ * lpfc_sli4_eratt_read - read sli-4 error attention events
+ * @phba: Pointer to HBA context.
+ *
+ * This function is called to read the SLI4 device error attention registers
+ * for possible error attention events. The caller must hold the hostlock
+ * with spin_lock_irq().
+ *
+ * This function returns 1 when there is Error Attention in the Host Attention
+ * Register and returns 0 otherwise.
+ **/
+static int
+lpfc_sli4_eratt_read(struct lpfc_hba *phba)
+{
+ uint32_t uerr_sta_hi, uerr_sta_lo;
+ uint32_t if_type, portsmphr;
+ struct lpfc_register portstat_reg;
+
+ /*
+ * For now, use the SLI4 device internal unrecoverable error
+ * registers for error attention. This can be changed later.
+ */
+ if_type = bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf);
+ switch (if_type) {
+ case LPFC_SLI_INTF_IF_TYPE_0:
+ if (lpfc_readl(phba->sli4_hba.u.if_type0.UERRLOregaddr,
+ &uerr_sta_lo) ||
+ lpfc_readl(phba->sli4_hba.u.if_type0.UERRHIregaddr,
+ &uerr_sta_hi)) {
+ phba->work_hs |= UNPLUG_ERR;
+ phba->work_ha |= HA_ERATT;
+ phba->hba_flag |= HBA_ERATT_HANDLED;
+ return 1;
+ }
+ if ((~phba->sli4_hba.ue_mask_lo & uerr_sta_lo) ||
+ (~phba->sli4_hba.ue_mask_hi & uerr_sta_hi)) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+ "1423 HBA Unrecoverable error: "
+ "uerr_lo_reg=0x%x, uerr_hi_reg=0x%x, "
+ "ue_mask_lo_reg=0x%x, "
+ "ue_mask_hi_reg=0x%x\n",
+ uerr_sta_lo, uerr_sta_hi,
+ phba->sli4_hba.ue_mask_lo,
+ phba->sli4_hba.ue_mask_hi);
+ phba->work_status[0] = uerr_sta_lo;
+ phba->work_status[1] = uerr_sta_hi;
+ phba->work_ha |= HA_ERATT;
+ phba->hba_flag |= HBA_ERATT_HANDLED;
+ return 1;
+ }
+ break;
+ case LPFC_SLI_INTF_IF_TYPE_2:
+ if (lpfc_readl(phba->sli4_hba.u.if_type2.STATUSregaddr,
+ &portstat_reg.word0) ||
+ lpfc_readl(phba->sli4_hba.PSMPHRregaddr,
+ &portsmphr)){
+ phba->work_hs |= UNPLUG_ERR;
+ phba->work_ha |= HA_ERATT;
+ phba->hba_flag |= HBA_ERATT_HANDLED;
+ return 1;
+ }
+ if (bf_get(lpfc_sliport_status_err, &portstat_reg)) {
+ phba->work_status[0] =
+ readl(phba->sli4_hba.u.if_type2.ERR1regaddr);
+ phba->work_status[1] =
+ readl(phba->sli4_hba.u.if_type2.ERR2regaddr);
+ lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+ "2885 Port Status Event: "
+ "port status reg 0x%x, "
+ "port smphr reg 0x%x, "
+ "error 1=0x%x, error 2=0x%x\n",
+ portstat_reg.word0,
+ portsmphr,
+ phba->work_status[0],
+ phba->work_status[1]);
+ phba->work_ha |= HA_ERATT;
+ phba->hba_flag |= HBA_ERATT_HANDLED;
+ return 1;
+ }
+ break;
+ case LPFC_SLI_INTF_IF_TYPE_1:
+ default:
+ lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+ "2886 HBA Error Attention on unsupported "
+ "if type %d.", if_type);
+ return 1;
+ }
+
+ return 0;
+}
+
+/**
+ * lpfc_sli_check_eratt - check error attention events
+ * @phba: Pointer to HBA context.
+ *
+ * This function is called from timer soft interrupt context to check HBA's
+ * error attention register bit for error attention events.
+ *
+ * This function returns 1 when there is Error Attention in the Host Attention
+ * Register and returns 0 otherwise.
+ **/
+int
+lpfc_sli_check_eratt(struct lpfc_hba *phba)
+{
+ uint32_t ha_copy;
+
+ /* If somebody is waiting to handle an eratt, don't process it
+ * here. The brdkill function will do this.
+ */
+ if (phba->link_flag & LS_IGNORE_ERATT)
+ return 0;
+
+ /* Check if interrupt handler handles this ERATT */
+ spin_lock_irq(&phba->hbalock);
+ if (phba->hba_flag & HBA_ERATT_HANDLED) {
+ /* Interrupt handler has handled ERATT */
+ spin_unlock_irq(&phba->hbalock);
+ return 0;
+ }
+
+ /*
+ * If there is deferred error attention, do not check for error
+ * attention
+ */
+ if (unlikely(phba->hba_flag & DEFER_ERATT)) {
+ spin_unlock_irq(&phba->hbalock);
+ return 0;
+ }
+
+ /* If PCI channel is offline, don't process it */
+ if (unlikely(pci_channel_offline(phba->pcidev))) {
+ spin_unlock_irq(&phba->hbalock);
+ return 0;
+ }
+
+ switch (phba->sli_rev) {
+ case LPFC_SLI_REV2:
+ case LPFC_SLI_REV3:
+ /* Read chip Host Attention (HA) register */
+ ha_copy = lpfc_sli_eratt_read(phba);
+ break;
+ case LPFC_SLI_REV4:
+ /* Read device Uncoverable Error (UERR) registers */
+ ha_copy = lpfc_sli4_eratt_read(phba);
+ break;
+ default:
+ lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+ "0299 Invalid SLI revision (%d)\n",
+ phba->sli_rev);
+ ha_copy = 0;
+ break;
+ }
+ spin_unlock_irq(&phba->hbalock);
+
+ return ha_copy;
+}
+
+/**
+ * lpfc_intr_state_check - Check device state for interrupt handling
+ * @phba: Pointer to HBA context.
+ *
+ * This inline routine checks whether a device or its PCI slot is in a state
+ * that the interrupt should be handled.
+ *
+ * This function returns 0 if the device or the PCI slot is in a state that
+ * interrupt should be handled, otherwise -EIO.
+ */
+static inline int
+lpfc_intr_state_check(struct lpfc_hba *phba)
+{
+ /* If the pci channel is offline, ignore all the interrupts */
+ if (unlikely(pci_channel_offline(phba->pcidev)))
+ return -EIO;
+
+ /* Update device level interrupt statistics */
+ phba->sli.slistat.sli_intr++;
+
+ /* Ignore all interrupts during initialization. */
+ if (unlikely(phba->link_state < LPFC_LINK_DOWN))
+ return -EIO;
+
+ return 0;
+}
+
+/**
+ * lpfc_sli_sp_intr_handler - Slow-path interrupt handler to SLI-3 device
+ * @irq: Interrupt number.
+ * @dev_id: The device context pointer.
+ *
+ * This function is directly called from the PCI layer as an interrupt
+ * service routine when device with SLI-3 interface spec is enabled with
+ * MSI-X multi-message interrupt mode and there are slow-path events in
+ * the HBA. However, when the device is enabled with either MSI or Pin-IRQ
+ * interrupt mode, this function is called as part of the device-level
+ * interrupt handler. When the PCI slot is in error recovery or the HBA
+ * is undergoing initialization, the interrupt handler will not process
+ * the interrupt. The link attention and ELS ring attention events are
+ * handled by the worker thread. The interrupt handler signals the worker
+ * thread and returns for these events. This function is called without
+ * any lock held. It gets the hbalock to access and update SLI data
+ * structures.
+ *
+ * This function returns IRQ_HANDLED when interrupt is handled else it
+ * returns IRQ_NONE.
+ **/
+irqreturn_t
+lpfc_sli_sp_intr_handler(int irq, void *dev_id)
+{
+ struct lpfc_hba *phba;
+ uint32_t ha_copy, hc_copy;
+ uint32_t work_ha_copy;
+ unsigned long status;
+ unsigned long iflag;
+ uint32_t control;
+
+ MAILBOX_t *mbox, *pmbox;
+ struct lpfc_vport *vport;
+ struct lpfc_nodelist *ndlp;
+ struct lpfc_dmabuf *mp;
+ LPFC_MBOXQ_t *pmb;
+ int rc;
+
+ /*
+ * Get the driver's phba structure from the dev_id and
+ * assume the HBA is not interrupting.
+ */
+ phba = (struct lpfc_hba *)dev_id;
+
+ if (unlikely(!phba))
+ return IRQ_NONE;
+
+ /*
+ * Stuff needs to be attented to when this function is invoked as an
+ * individual interrupt handler in MSI-X multi-message interrupt mode
+ */
+ if (phba->intr_type == MSIX) {
+ /* Check device state for handling interrupt */
+ if (lpfc_intr_state_check(phba))
+ return IRQ_NONE;
+ /* Need to read HA REG for slow-path events */
+ spin_lock_irqsave(&phba->hbalock, iflag);
+ if (lpfc_readl(phba->HAregaddr, &ha_copy))
+ goto unplug_error;
+ /* If somebody is waiting to handle an eratt don't process it
+ * here. The brdkill function will do this.
+ */
+ if (phba->link_flag & LS_IGNORE_ERATT)
+ ha_copy &= ~HA_ERATT;
+ /* Check the need for handling ERATT in interrupt handler */
+ if (ha_copy & HA_ERATT) {
+ if (phba->hba_flag & HBA_ERATT_HANDLED)
+ /* ERATT polling has handled ERATT */
+ ha_copy &= ~HA_ERATT;
+ else
+ /* Indicate interrupt handler handles ERATT */
+ phba->hba_flag |= HBA_ERATT_HANDLED;
+ }
+
+ /*
+ * If there is deferred error attention, do not check for any
+ * interrupt.
+ */
+ if (unlikely(phba->hba_flag & DEFER_ERATT)) {
+ spin_unlock_irqrestore(&phba->hbalock, iflag);
+ return IRQ_NONE;
+ }
+
+ /* Clear up only attention source related to slow-path */
+ if (lpfc_readl(phba->HCregaddr, &hc_copy))
+ goto unplug_error;
+
+ writel(hc_copy & ~(HC_MBINT_ENA | HC_R2INT_ENA |
+ HC_LAINT_ENA | HC_ERINT_ENA),
+ phba->HCregaddr);
+ writel((ha_copy & (HA_MBATT | HA_R2_CLR_MSK)),
+ phba->HAregaddr);
+ writel(hc_copy, phba->HCregaddr);
+ readl(phba->HAregaddr); /* flush */
+ spin_unlock_irqrestore(&phba->hbalock, iflag);
+ } else
+ ha_copy = phba->ha_copy;
+
+ work_ha_copy = ha_copy & phba->work_ha_mask;
+
+ if (work_ha_copy) {
+ if (work_ha_copy & HA_LATT) {
+ if (phba->sli.sli_flag & LPFC_PROCESS_LA) {
+ /*
+ * Turn off Link Attention interrupts
+ * until CLEAR_LA done
+ */
+ spin_lock_irqsave(&phba->hbalock, iflag);
+ phba->sli.sli_flag &= ~LPFC_PROCESS_LA;
+ if (lpfc_readl(phba->HCregaddr, &control))
+ goto unplug_error;
+ control &= ~HC_LAINT_ENA;
+ writel(control, phba->HCregaddr);
+ readl(phba->HCregaddr); /* flush */
+ spin_unlock_irqrestore(&phba->hbalock, iflag);
+ }
+ else
+ work_ha_copy &= ~HA_LATT;
+ }
+
+ if (work_ha_copy & ~(HA_ERATT | HA_MBATT | HA_LATT)) {
+ /*
+ * Turn off Slow Rings interrupts, LPFC_ELS_RING is
+ * the only slow ring.
+ */
+ status = (work_ha_copy &
+ (HA_RXMASK << (4*LPFC_ELS_RING)));
+ status >>= (4*LPFC_ELS_RING);
+ if (status & HA_RXMASK) {
+ spin_lock_irqsave(&phba->hbalock, iflag);
+ if (lpfc_readl(phba->HCregaddr, &control))
+ goto unplug_error;
+
+ lpfc_debugfs_slow_ring_trc(phba,
+ "ISR slow ring: ctl:x%x stat:x%x isrcnt:x%x",
+ control, status,
+ (uint32_t)phba->sli.slistat.sli_intr);
+
+ if (control & (HC_R0INT_ENA << LPFC_ELS_RING)) {
+ lpfc_debugfs_slow_ring_trc(phba,
+ "ISR Disable ring:"
+ "pwork:x%x hawork:x%x wait:x%x",
+ phba->work_ha, work_ha_copy,
+ (uint32_t)((unsigned long)
+ &phba->work_waitq));
+
+ control &=
+ ~(HC_R0INT_ENA << LPFC_ELS_RING);
+ writel(control, phba->HCregaddr);
+ readl(phba->HCregaddr); /* flush */
+ }
+ else {
+ lpfc_debugfs_slow_ring_trc(phba,
+ "ISR slow ring: pwork:"
+ "x%x hawork:x%x wait:x%x",
+ phba->work_ha, work_ha_copy,
+ (uint32_t)((unsigned long)
+ &phba->work_waitq));
+ }
+ spin_unlock_irqrestore(&phba->hbalock, iflag);
+ }
+ }
+ spin_lock_irqsave(&phba->hbalock, iflag);
+ if (work_ha_copy & HA_ERATT) {
+ if (lpfc_sli_read_hs(phba))
+ goto unplug_error;
+ /*
+ * Check if there is a deferred error condition
+ * is active
+ */
+ if ((HS_FFER1 & phba->work_hs) &&
+ ((HS_FFER2 | HS_FFER3 | HS_FFER4 | HS_FFER5 |
+ HS_FFER6 | HS_FFER7 | HS_FFER8) &
+ phba->work_hs)) {
+ phba->hba_flag |= DEFER_ERATT;
+ /* Clear all interrupt enable conditions */
+ writel(0, phba->HCregaddr);
+ readl(phba->HCregaddr);
+ }
+ }
+
+ if ((work_ha_copy & HA_MBATT) && (phba->sli.mbox_active)) {
+ pmb = phba->sli.mbox_active;
+ pmbox = &pmb->u.mb;
+ mbox = phba->mbox;
+ vport = pmb->vport;
+
+ /* First check out the status word */
+ lpfc_sli_pcimem_bcopy(mbox, pmbox, sizeof(uint32_t));
+ if (pmbox->mbxOwner != OWN_HOST) {
+ spin_unlock_irqrestore(&phba->hbalock, iflag);
+ /*
+ * Stray Mailbox Interrupt, mbxCommand <cmd>
+ * mbxStatus <status>
+ */
+ lpfc_printf_log(phba, KERN_ERR, LOG_MBOX |
+ LOG_SLI,
+ "(%d):0304 Stray Mailbox "
+ "Interrupt mbxCommand x%x "
+ "mbxStatus x%x\n",
+ (vport ? vport->vpi : 0),
+ pmbox->mbxCommand,
+ pmbox->mbxStatus);
+ /* clear mailbox attention bit */
+ work_ha_copy &= ~HA_MBATT;
+ } else {
+ phba->sli.mbox_active = NULL;
+ spin_unlock_irqrestore(&phba->hbalock, iflag);
+ phba->last_completion_time = jiffies;
+ del_timer(&phba->sli.mbox_tmo);
+ if (pmb->mbox_cmpl) {
+ lpfc_sli_pcimem_bcopy(mbox, pmbox,
+ MAILBOX_CMD_SIZE);
+ if (pmb->out_ext_byte_len &&
+ pmb->context2)
+ lpfc_sli_pcimem_bcopy(
+ phba->mbox_ext,
+ pmb->context2,
+ pmb->out_ext_byte_len);
+ }
+ if (pmb->mbox_flag & LPFC_MBX_IMED_UNREG) {
+ pmb->mbox_flag &= ~LPFC_MBX_IMED_UNREG;
+
+ lpfc_debugfs_disc_trc(vport,
+ LPFC_DISC_TRC_MBOX_VPORT,
+ "MBOX dflt rpi: : "
+ "status:x%x rpi:x%x",
+ (uint32_t)pmbox->mbxStatus,
+ pmbox->un.varWords[0], 0);
+
+ if (!pmbox->mbxStatus) {
+ mp = (struct lpfc_dmabuf *)
+ (pmb->context1);
+ ndlp = (struct lpfc_nodelist *)
+ pmb->context2;
+
+ /* Reg_LOGIN of dflt RPI was
+ * successful. new lets get
+ * rid of the RPI using the
+ * same mbox buffer.
+ */
+ lpfc_unreg_login(phba,
+ vport->vpi,
+ pmbox->un.varWords[0],
+ pmb);
+ pmb->mbox_cmpl =
+ lpfc_mbx_cmpl_dflt_rpi;
+ pmb->context1 = mp;
+ pmb->context2 = ndlp;
+ pmb->vport = vport;
+ rc = lpfc_sli_issue_mbox(phba,
+ pmb,
+ MBX_NOWAIT);
+ if (rc != MBX_BUSY)
+ lpfc_printf_log(phba,
+ KERN_ERR,
+ LOG_MBOX | LOG_SLI,
+ "0350 rc should have"
+ "been MBX_BUSY\n");
+ if (rc != MBX_NOT_FINISHED)
+ goto send_current_mbox;
+ }
+ }
+ spin_lock_irqsave(
+ &phba->pport->work_port_lock,
+ iflag);
+ phba->pport->work_port_events &=
+ ~WORKER_MBOX_TMO;
+ spin_unlock_irqrestore(
+ &phba->pport->work_port_lock,
+ iflag);
+ lpfc_mbox_cmpl_put(phba, pmb);
+ }
+ } else
+ spin_unlock_irqrestore(&phba->hbalock, iflag);
+
+ if ((work_ha_copy & HA_MBATT) &&
+ (phba->sli.mbox_active == NULL)) {
+send_current_mbox:
+ /* Process next mailbox command if there is one */
+ do {
+ rc = lpfc_sli_issue_mbox(phba, NULL,
+ MBX_NOWAIT);
+ } while (rc == MBX_NOT_FINISHED);
+ if (rc != MBX_SUCCESS)
+ lpfc_printf_log(phba, KERN_ERR, LOG_MBOX |
+ LOG_SLI, "0349 rc should be "
+ "MBX_SUCCESS\n");
+ }
+
+ spin_lock_irqsave(&phba->hbalock, iflag);
+ phba->work_ha |= work_ha_copy;
+ spin_unlock_irqrestore(&phba->hbalock, iflag);
+ lpfc_worker_wake_up(phba);
+ }
+ return IRQ_HANDLED;
+unplug_error:
+ spin_unlock_irqrestore(&phba->hbalock, iflag);
+ return IRQ_HANDLED;
+
+} /* lpfc_sli_sp_intr_handler */
+
+/**
+ * lpfc_sli_fp_intr_handler - Fast-path interrupt handler to SLI-3 device.
+ * @irq: Interrupt number.
+ * @dev_id: The device context pointer.
+ *
+ * This function is directly called from the PCI layer as an interrupt
+ * service routine when device with SLI-3 interface spec is enabled with
+ * MSI-X multi-message interrupt mode and there is a fast-path FCP IOCB
+ * ring event in the HBA. However, when the device is enabled with either
+ * MSI or Pin-IRQ interrupt mode, this function is called as part of the
+ * device-level interrupt handler. When the PCI slot is in error recovery
+ * or the HBA is undergoing initialization, the interrupt handler will not
+ * process the interrupt. The SCSI FCP fast-path ring event are handled in
+ * the intrrupt context. This function is called without any lock held.
+ * It gets the hbalock to access and update SLI data structures.
+ *
+ * This function returns IRQ_HANDLED when interrupt is handled else it
+ * returns IRQ_NONE.
+ **/
+irqreturn_t
+lpfc_sli_fp_intr_handler(int irq, void *dev_id)
+{
+ struct lpfc_hba *phba;
+ uint32_t ha_copy;
+ unsigned long status;
+ unsigned long iflag;
+
+ /* Get the driver's phba structure from the dev_id and
+ * assume the HBA is not interrupting.
+ */
+ phba = (struct lpfc_hba *) dev_id;
+
+ if (unlikely(!phba))
+ return IRQ_NONE;
+
+ /*
+ * Stuff needs to be attented to when this function is invoked as an
+ * individual interrupt handler in MSI-X multi-message interrupt mode
+ */
+ if (phba->intr_type == MSIX) {
+ /* Check device state for handling interrupt */
+ if (lpfc_intr_state_check(phba))
+ return IRQ_NONE;
+ /* Need to read HA REG for FCP ring and other ring events */
+ if (lpfc_readl(phba->HAregaddr, &ha_copy))
+ return IRQ_HANDLED;
+ /* Clear up only attention source related to fast-path */
+ spin_lock_irqsave(&phba->hbalock, iflag);
+ /*
+ * If there is deferred error attention, do not check for
+ * any interrupt.
+ */
+ if (unlikely(phba->hba_flag & DEFER_ERATT)) {
+ spin_unlock_irqrestore(&phba->hbalock, iflag);
+ return IRQ_NONE;
+ }
+ writel((ha_copy & (HA_R0_CLR_MSK | HA_R1_CLR_MSK)),
+ phba->HAregaddr);
+ readl(phba->HAregaddr); /* flush */
+ spin_unlock_irqrestore(&phba->hbalock, iflag);
+ } else
+ ha_copy = phba->ha_copy;
+
+ /*
+ * Process all events on FCP ring. Take the optimized path for FCP IO.
+ */
+ ha_copy &= ~(phba->work_ha_mask);
+
+ status = (ha_copy & (HA_RXMASK << (4*LPFC_FCP_RING)));
+ status >>= (4*LPFC_FCP_RING);
+ if (status & HA_RXMASK)
+ lpfc_sli_handle_fast_ring_event(phba,
+ &phba->sli.ring[LPFC_FCP_RING],
+ status);
+
+ if (phba->cfg_multi_ring_support == 2) {
+ /*
+ * Process all events on extra ring. Take the optimized path
+ * for extra ring IO.
+ */
+ status = (ha_copy & (HA_RXMASK << (4*LPFC_EXTRA_RING)));
+ status >>= (4*LPFC_EXTRA_RING);
+ if (status & HA_RXMASK) {
+ lpfc_sli_handle_fast_ring_event(phba,
+ &phba->sli.ring[LPFC_EXTRA_RING],
+ status);
+ }
+ }
+ return IRQ_HANDLED;
+} /* lpfc_sli_fp_intr_handler */
+
+/**
+ * lpfc_sli_intr_handler - Device-level interrupt handler to SLI-3 device
+ * @irq: Interrupt number.
+ * @dev_id: The device context pointer.
+ *
+ * This function is the HBA device-level interrupt handler to device with
+ * SLI-3 interface spec, called from the PCI layer when either MSI or
+ * Pin-IRQ interrupt mode is enabled and there is an event in the HBA which
+ * requires driver attention. This function invokes the slow-path interrupt
+ * attention handling function and fast-path interrupt attention handling
+ * function in turn to process the relevant HBA attention events. This
+ * function is called without any lock held. It gets the hbalock to access
+ * and update SLI data structures.
+ *
+ * This function returns IRQ_HANDLED when interrupt is handled, else it
+ * returns IRQ_NONE.
+ **/
+irqreturn_t
+lpfc_sli_intr_handler(int irq, void *dev_id)
+{
+ struct lpfc_hba *phba;
+ irqreturn_t sp_irq_rc, fp_irq_rc;
+ unsigned long status1, status2;
+ uint32_t hc_copy;
+
+ /*
+ * Get the driver's phba structure from the dev_id and
+ * assume the HBA is not interrupting.
+ */
+ phba = (struct lpfc_hba *) dev_id;
+
+ if (unlikely(!phba))
+ return IRQ_NONE;
+
+ /* Check device state for handling interrupt */
+ if (lpfc_intr_state_check(phba))
+ return IRQ_NONE;
+
+ spin_lock(&phba->hbalock);
+ if (lpfc_readl(phba->HAregaddr, &phba->ha_copy)) {
+ spin_unlock(&phba->hbalock);
+ return IRQ_HANDLED;
+ }
+
+ if (unlikely(!phba->ha_copy)) {
+ spin_unlock(&phba->hbalock);
+ return IRQ_NONE;
+ } else if (phba->ha_copy & HA_ERATT) {
+ if (phba->hba_flag & HBA_ERATT_HANDLED)
+ /* ERATT polling has handled ERATT */
+ phba->ha_copy &= ~HA_ERATT;
+ else
+ /* Indicate interrupt handler handles ERATT */
+ phba->hba_flag |= HBA_ERATT_HANDLED;
+ }
+
+ /*
+ * If there is deferred error attention, do not check for any interrupt.
+ */
+ if (unlikely(phba->hba_flag & DEFER_ERATT)) {
+ spin_unlock(&phba->hbalock);
+ return IRQ_NONE;
+ }
+
+ /* Clear attention sources except link and error attentions */
+ if (lpfc_readl(phba->HCregaddr, &hc_copy)) {
+ spin_unlock(&phba->hbalock);
+ return IRQ_HANDLED;
+ }
+ writel(hc_copy & ~(HC_MBINT_ENA | HC_R0INT_ENA | HC_R1INT_ENA
+ | HC_R2INT_ENA | HC_LAINT_ENA | HC_ERINT_ENA),
+ phba->HCregaddr);
+ writel((phba->ha_copy & ~(HA_LATT | HA_ERATT)), phba->HAregaddr);
+ writel(hc_copy, phba->HCregaddr);
+ readl(phba->HAregaddr); /* flush */
+ spin_unlock(&phba->hbalock);
+
+ /*
+ * Invokes slow-path host attention interrupt handling as appropriate.
+ */
+
+ /* status of events with mailbox and link attention */
+ status1 = phba->ha_copy & (HA_MBATT | HA_LATT | HA_ERATT);
+
+ /* status of events with ELS ring */
+ status2 = (phba->ha_copy & (HA_RXMASK << (4*LPFC_ELS_RING)));
+ status2 >>= (4*LPFC_ELS_RING);
+
+ if (status1 || (status2 & HA_RXMASK))
+ sp_irq_rc = lpfc_sli_sp_intr_handler(irq, dev_id);
+ else
+ sp_irq_rc = IRQ_NONE;
+
+ /*
+ * Invoke fast-path host attention interrupt handling as appropriate.
+ */
+
+ /* status of events with FCP ring */
+ status1 = (phba->ha_copy & (HA_RXMASK << (4*LPFC_FCP_RING)));
+ status1 >>= (4*LPFC_FCP_RING);
+
+ /* status of events with extra ring */
+ if (phba->cfg_multi_ring_support == 2) {
+ status2 = (phba->ha_copy & (HA_RXMASK << (4*LPFC_EXTRA_RING)));
+ status2 >>= (4*LPFC_EXTRA_RING);
+ } else
+ status2 = 0;
+
+ if ((status1 & HA_RXMASK) || (status2 & HA_RXMASK))
+ fp_irq_rc = lpfc_sli_fp_intr_handler(irq, dev_id);
+ else
+ fp_irq_rc = IRQ_NONE;
+
+ /* Return device-level interrupt handling status */
+ return (sp_irq_rc == IRQ_HANDLED) ? sp_irq_rc : fp_irq_rc;
+} /* lpfc_sli_intr_handler */
+
+/**
+ * lpfc_sli4_fcp_xri_abort_event_proc - Process fcp xri abort event
+ * @phba: pointer to lpfc hba data structure.
+ *
+ * This routine is invoked by the worker thread to process all the pending
+ * SLI4 FCP abort XRI events.
+ **/
+void lpfc_sli4_fcp_xri_abort_event_proc(struct lpfc_hba *phba)
+{
+ struct lpfc_cq_event *cq_event;
+
+ /* First, declare the fcp xri abort event has been handled */
+ spin_lock_irq(&phba->hbalock);
+ phba->hba_flag &= ~FCP_XRI_ABORT_EVENT;
+ spin_unlock_irq(&phba->hbalock);
+ /* Now, handle all the fcp xri abort events */
+ while (!list_empty(&phba->sli4_hba.sp_fcp_xri_aborted_work_queue)) {
+ /* Get the first event from the head of the event queue */
+ spin_lock_irq(&phba->hbalock);
+ list_remove_head(&phba->sli4_hba.sp_fcp_xri_aborted_work_queue,
+ cq_event, struct lpfc_cq_event, list);
+ spin_unlock_irq(&phba->hbalock);
+ /* Notify aborted XRI for FCP work queue */
+ lpfc_sli4_fcp_xri_aborted(phba, &cq_event->cqe.wcqe_axri);
+ /* Free the event processed back to the free pool */
+ lpfc_sli4_cq_event_release(phba, cq_event);
+ }
+}
+
+/**
+ * lpfc_sli4_els_xri_abort_event_proc - Process els xri abort event
+ * @phba: pointer to lpfc hba data structure.
+ *
+ * This routine is invoked by the worker thread to process all the pending
+ * SLI4 els abort xri events.
+ **/
+void lpfc_sli4_els_xri_abort_event_proc(struct lpfc_hba *phba)
+{
+ struct lpfc_cq_event *cq_event;
+
+ /* First, declare the els xri abort event has been handled */
+ spin_lock_irq(&phba->hbalock);
+ phba->hba_flag &= ~ELS_XRI_ABORT_EVENT;
+ spin_unlock_irq(&phba->hbalock);
+ /* Now, handle all the els xri abort events */
+ while (!list_empty(&phba->sli4_hba.sp_els_xri_aborted_work_queue)) {
+ /* Get the first event from the head of the event queue */
+ spin_lock_irq(&phba->hbalock);
+ list_remove_head(&phba->sli4_hba.sp_els_xri_aborted_work_queue,
+ cq_event, struct lpfc_cq_event, list);
+ spin_unlock_irq(&phba->hbalock);
+ /* Notify aborted XRI for ELS work queue */
+ lpfc_sli4_els_xri_aborted(phba, &cq_event->cqe.wcqe_axri);
+ /* Free the event processed back to the free pool */
+ lpfc_sli4_cq_event_release(phba, cq_event);
+ }
+}
+
+/**
+ * lpfc_sli4_iocb_param_transfer - Transfer pIocbOut and cmpl status to pIocbIn
+ * @phba: pointer to lpfc hba data structure
+ * @pIocbIn: pointer to the rspiocbq
+ * @pIocbOut: pointer to the cmdiocbq
+ * @wcqe: pointer to the complete wcqe
+ *
+ * This routine transfers the fields of a command iocbq to a response iocbq
+ * by copying all the IOCB fields from command iocbq and transferring the
+ * completion status information from the complete wcqe.
+ **/
+static void
+lpfc_sli4_iocb_param_transfer(struct lpfc_hba *phba,
+ struct lpfc_iocbq *pIocbIn,
+ struct lpfc_iocbq *pIocbOut,
+ struct lpfc_wcqe_complete *wcqe)
+{
+ int numBdes, i;
+ unsigned long iflags;
+ uint32_t status, max_response;
+ struct lpfc_dmabuf *dmabuf;
+ struct ulp_bde64 *bpl, bde;
+ size_t offset = offsetof(struct lpfc_iocbq, iocb);
+
+ memcpy((char *)pIocbIn + offset, (char *)pIocbOut + offset,
+ sizeof(struct lpfc_iocbq) - offset);
+ /* Map WCQE parameters into irspiocb parameters */
+ status = bf_get(lpfc_wcqe_c_status, wcqe);
+ pIocbIn->iocb.ulpStatus = (status & LPFC_IOCB_STATUS_MASK);
+ if (pIocbOut->iocb_flag & LPFC_IO_FCP)
+ if (pIocbIn->iocb.ulpStatus == IOSTAT_FCP_RSP_ERROR)
+ pIocbIn->iocb.un.fcpi.fcpi_parm =
+ pIocbOut->iocb.un.fcpi.fcpi_parm -
+ wcqe->total_data_placed;
+ else
+ pIocbIn->iocb.un.ulpWord[4] = wcqe->parameter;
+ else {
+ pIocbIn->iocb.un.ulpWord[4] = wcqe->parameter;
+ switch (pIocbOut->iocb.ulpCommand) {
+ case CMD_ELS_REQUEST64_CR:
+ dmabuf = (struct lpfc_dmabuf *)pIocbOut->context3;
+ bpl = (struct ulp_bde64 *)dmabuf->virt;
+ bde.tus.w = le32_to_cpu(bpl[1].tus.w);
+ max_response = bde.tus.f.bdeSize;
+ break;
+ case CMD_GEN_REQUEST64_CR:
+ max_response = 0;
+ if (!pIocbOut->context3)
+ break;
+ numBdes = pIocbOut->iocb.un.genreq64.bdl.bdeSize/
+ sizeof(struct ulp_bde64);
+ dmabuf = (struct lpfc_dmabuf *)pIocbOut->context3;
+ bpl = (struct ulp_bde64 *)dmabuf->virt;
+ for (i = 0; i < numBdes; i++) {
+ bde.tus.w = le32_to_cpu(bpl[i].tus.w);
+ if (bde.tus.f.bdeFlags != BUFF_TYPE_BDE_64)
+ max_response += bde.tus.f.bdeSize;
+ }
+ break;
+ default:
+ max_response = wcqe->total_data_placed;
+ break;
+ }
+ if (max_response < wcqe->total_data_placed)
+ pIocbIn->iocb.un.genreq64.bdl.bdeSize = max_response;
+ else
+ pIocbIn->iocb.un.genreq64.bdl.bdeSize =
+ wcqe->total_data_placed;
+ }
+
+ /* Convert BG errors for completion status */
+ if (status == CQE_STATUS_DI_ERROR) {
+ pIocbIn->iocb.ulpStatus = IOSTAT_LOCAL_REJECT;
+
+ if (bf_get(lpfc_wcqe_c_bg_edir, wcqe))
+ pIocbIn->iocb.un.ulpWord[4] = IOERR_RX_DMA_FAILED;
+ else
+ pIocbIn->iocb.un.ulpWord[4] = IOERR_TX_DMA_FAILED;
+
+ pIocbIn->iocb.unsli3.sli3_bg.bgstat = 0;
+ if (bf_get(lpfc_wcqe_c_bg_ge, wcqe)) /* Guard Check failed */
+ pIocbIn->iocb.unsli3.sli3_bg.bgstat |=
+ BGS_GUARD_ERR_MASK;
+ if (bf_get(lpfc_wcqe_c_bg_ae, wcqe)) /* App Tag Check failed */
+ pIocbIn->iocb.unsli3.sli3_bg.bgstat |=
+ BGS_APPTAG_ERR_MASK;
+ if (bf_get(lpfc_wcqe_c_bg_re, wcqe)) /* Ref Tag Check failed */
+ pIocbIn->iocb.unsli3.sli3_bg.bgstat |=
+ BGS_REFTAG_ERR_MASK;
+
+ /* Check to see if there was any good data before the error */
+ if (bf_get(lpfc_wcqe_c_bg_tdpv, wcqe)) {
+ pIocbIn->iocb.unsli3.sli3_bg.bgstat |=
+ BGS_HI_WATER_MARK_PRESENT_MASK;
+ pIocbIn->iocb.unsli3.sli3_bg.bghm =
+ wcqe->total_data_placed;
+ }
+
+ /*
+ * Set ALL the error bits to indicate we don't know what
+ * type of error it is.
+ */
+ if (!pIocbIn->iocb.unsli3.sli3_bg.bgstat)
+ pIocbIn->iocb.unsli3.sli3_bg.bgstat |=
+ (BGS_REFTAG_ERR_MASK | BGS_APPTAG_ERR_MASK |
+ BGS_GUARD_ERR_MASK);
+ }
+
+ /* Pick up HBA exchange busy condition */
+ if (bf_get(lpfc_wcqe_c_xb, wcqe)) {
+ spin_lock_irqsave(&phba->hbalock, iflags);
+ pIocbIn->iocb_flag |= LPFC_EXCHANGE_BUSY;
+ spin_unlock_irqrestore(&phba->hbalock, iflags);
+ }
+}
+
+/**
+ * lpfc_sli4_els_wcqe_to_rspiocbq - Get response iocbq from els wcqe
+ * @phba: Pointer to HBA context object.
+ * @wcqe: Pointer to work-queue completion queue entry.
+ *
+ * This routine handles an ELS work-queue completion event and construct
+ * a pseudo response ELS IODBQ from the SLI4 ELS WCQE for the common
+ * discovery engine to handle.
+ *
+ * Return: Pointer to the receive IOCBQ, NULL otherwise.
+ **/
+static struct lpfc_iocbq *
+lpfc_sli4_els_wcqe_to_rspiocbq(struct lpfc_hba *phba,
+ struct lpfc_iocbq *irspiocbq)
+{
+ struct lpfc_sli_ring *pring = &phba->sli.ring[LPFC_ELS_RING];
+ struct lpfc_iocbq *cmdiocbq;
+ struct lpfc_wcqe_complete *wcqe;
+ unsigned long iflags;
+
+ wcqe = &irspiocbq->cq_event.cqe.wcqe_cmpl;
+ spin_lock_irqsave(&pring->ring_lock, iflags);
+ pring->stats.iocb_event++;
+ /* Look up the ELS command IOCB and create pseudo response IOCB */
+ cmdiocbq = lpfc_sli_iocbq_lookup_by_tag(phba, pring,
+ bf_get(lpfc_wcqe_c_request_tag, wcqe));
+ spin_unlock_irqrestore(&pring->ring_lock, iflags);
+
+ if (unlikely(!cmdiocbq)) {
+ lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
+ "0386 ELS complete with no corresponding "
+ "cmdiocb: iotag (%d)\n",
+ bf_get(lpfc_wcqe_c_request_tag, wcqe));
+ lpfc_sli_release_iocbq(phba, irspiocbq);
+ return NULL;
+ }
+
+ /* Fake the irspiocbq and copy necessary response information */
+ lpfc_sli4_iocb_param_transfer(phba, irspiocbq, cmdiocbq, wcqe);
+
+ return irspiocbq;
+}
+
+/**
+ * lpfc_sli4_sp_handle_async_event - Handle an asynchroous event
+ * @phba: Pointer to HBA context object.
+ * @cqe: Pointer to mailbox completion queue entry.
+ *
+ * This routine process a mailbox completion queue entry with asynchrous
+ * event.
+ *
+ * Return: true if work posted to worker thread, otherwise false.
+ **/
+static bool
+lpfc_sli4_sp_handle_async_event(struct lpfc_hba *phba, struct lpfc_mcqe *mcqe)
+{
+ struct lpfc_cq_event *cq_event;
+ unsigned long iflags;
+
+ lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
+ "0392 Async Event: word0:x%x, word1:x%x, "
+ "word2:x%x, word3:x%x\n", mcqe->word0,
+ mcqe->mcqe_tag0, mcqe->mcqe_tag1, mcqe->trailer);
+
+ /* Allocate a new internal CQ_EVENT entry */
+ cq_event = lpfc_sli4_cq_event_alloc(phba);
+ if (!cq_event) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
+ "0394 Failed to allocate CQ_EVENT entry\n");
+ return false;
+ }
+
+ /* Move the CQE into an asynchronous event entry */
+ memcpy(&cq_event->cqe, mcqe, sizeof(struct lpfc_mcqe));
+ spin_lock_irqsave(&phba->hbalock, iflags);
+ list_add_tail(&cq_event->list, &phba->sli4_hba.sp_asynce_work_queue);
+ /* Set the async event flag */
+ phba->hba_flag |= ASYNC_EVENT;
+ spin_unlock_irqrestore(&phba->hbalock, iflags);
+
+ return true;
+}
+
+/**
+ * lpfc_sli4_sp_handle_mbox_event - Handle a mailbox completion event
+ * @phba: Pointer to HBA context object.
+ * @cqe: Pointer to mailbox completion queue entry.
+ *
+ * This routine process a mailbox completion queue entry with mailbox
+ * completion event.
+ *
+ * Return: true if work posted to worker thread, otherwise false.
+ **/
+static bool
+lpfc_sli4_sp_handle_mbox_event(struct lpfc_hba *phba, struct lpfc_mcqe *mcqe)
+{
+ uint32_t mcqe_status;
+ MAILBOX_t *mbox, *pmbox;
+ struct lpfc_mqe *mqe;
+ struct lpfc_vport *vport;
+ struct lpfc_nodelist *ndlp;
+ struct lpfc_dmabuf *mp;
+ unsigned long iflags;
+ LPFC_MBOXQ_t *pmb;
+ bool workposted = false;
+ int rc;
+
+ /* If not a mailbox complete MCQE, out by checking mailbox consume */
+ if (!bf_get(lpfc_trailer_completed, mcqe))
+ goto out_no_mqe_complete;
+
+ /* Get the reference to the active mbox command */
+ spin_lock_irqsave(&phba->hbalock, iflags);
+ pmb = phba->sli.mbox_active;
+ if (unlikely(!pmb)) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_MBOX,
+ "1832 No pending MBOX command to handle\n");
+ spin_unlock_irqrestore(&phba->hbalock, iflags);
+ goto out_no_mqe_complete;
+ }
+ spin_unlock_irqrestore(&phba->hbalock, iflags);
+ mqe = &pmb->u.mqe;
+ pmbox = (MAILBOX_t *)&pmb->u.mqe;
+ mbox = phba->mbox;
+ vport = pmb->vport;
+
+ /* Reset heartbeat timer */
+ phba->last_completion_time = jiffies;
+ del_timer(&phba->sli.mbox_tmo);
+
+ /* Move mbox data to caller's mailbox region, do endian swapping */
+ if (pmb->mbox_cmpl && mbox)
+ lpfc_sli_pcimem_bcopy(mbox, mqe, sizeof(struct lpfc_mqe));
+
+ /*
+ * For mcqe errors, conditionally move a modified error code to
+ * the mbox so that the error will not be missed.
+ */
+ mcqe_status = bf_get(lpfc_mcqe_status, mcqe);
+ if (mcqe_status != MB_CQE_STATUS_SUCCESS) {
+ if (bf_get(lpfc_mqe_status, mqe) == MBX_SUCCESS)
+ bf_set(lpfc_mqe_status, mqe,
+ (LPFC_MBX_ERROR_RANGE | mcqe_status));
+ }
+ if (pmb->mbox_flag & LPFC_MBX_IMED_UNREG) {
+ pmb->mbox_flag &= ~LPFC_MBX_IMED_UNREG;
+ lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_MBOX_VPORT,
+ "MBOX dflt rpi: status:x%x rpi:x%x",
+ mcqe_status,
+ pmbox->un.varWords[0], 0);
+ if (mcqe_status == MB_CQE_STATUS_SUCCESS) {
+ mp = (struct lpfc_dmabuf *)(pmb->context1);
+ ndlp = (struct lpfc_nodelist *)pmb->context2;
+ /* Reg_LOGIN of dflt RPI was successful. Now lets get
+ * RID of the PPI using the same mbox buffer.
+ */
+ lpfc_unreg_login(phba, vport->vpi,
+ pmbox->un.varWords[0], pmb);
+ pmb->mbox_cmpl = lpfc_mbx_cmpl_dflt_rpi;
+ pmb->context1 = mp;
+ pmb->context2 = ndlp;
+ pmb->vport = vport;
+ rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT);
+ if (rc != MBX_BUSY)
+ lpfc_printf_log(phba, KERN_ERR, LOG_MBOX |
+ LOG_SLI, "0385 rc should "
+ "have been MBX_BUSY\n");
+ if (rc != MBX_NOT_FINISHED)
+ goto send_current_mbox;
+ }
+ }
+ spin_lock_irqsave(&phba->pport->work_port_lock, iflags);
+ phba->pport->work_port_events &= ~WORKER_MBOX_TMO;
+ spin_unlock_irqrestore(&phba->pport->work_port_lock, iflags);
+
+ /* There is mailbox completion work to do */
+ spin_lock_irqsave(&phba->hbalock, iflags);
+ __lpfc_mbox_cmpl_put(phba, pmb);
+ phba->work_ha |= HA_MBATT;
+ spin_unlock_irqrestore(&phba->hbalock, iflags);
+ workposted = true;
+
+send_current_mbox:
+ spin_lock_irqsave(&phba->hbalock, iflags);
+ /* Release the mailbox command posting token */
+ phba->sli.sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
+ /* Setting active mailbox pointer need to be in sync to flag clear */
+ phba->sli.mbox_active = NULL;
+ spin_unlock_irqrestore(&phba->hbalock, iflags);
+ /* Wake up worker thread to post the next pending mailbox command */
+ lpfc_worker_wake_up(phba);
+out_no_mqe_complete:
+ if (bf_get(lpfc_trailer_consumed, mcqe))
+ lpfc_sli4_mq_release(phba->sli4_hba.mbx_wq);
+ return workposted;
+}
+
+/**
+ * lpfc_sli4_sp_handle_mcqe - Process a mailbox completion queue entry
+ * @phba: Pointer to HBA context object.
+ * @cqe: Pointer to mailbox completion queue entry.
+ *
+ * This routine process a mailbox completion queue entry, it invokes the
+ * proper mailbox complete handling or asynchrous event handling routine
+ * according to the MCQE's async bit.
+ *
+ * Return: true if work posted to worker thread, otherwise false.
+ **/
+static bool
+lpfc_sli4_sp_handle_mcqe(struct lpfc_hba *phba, struct lpfc_cqe *cqe)
+{
+ struct lpfc_mcqe mcqe;
+ bool workposted;
+
+ /* Copy the mailbox MCQE and convert endian order as needed */
+ lpfc_sli_pcimem_bcopy(cqe, &mcqe, sizeof(struct lpfc_mcqe));
+
+ /* Invoke the proper event handling routine */
+ if (!bf_get(lpfc_trailer_async, &mcqe))
+ workposted = lpfc_sli4_sp_handle_mbox_event(phba, &mcqe);
+ else
+ workposted = lpfc_sli4_sp_handle_async_event(phba, &mcqe);
+ return workposted;
+}
+
+/**
+ * lpfc_sli4_sp_handle_els_wcqe - Handle els work-queue completion event
+ * @phba: Pointer to HBA context object.
+ * @cq: Pointer to associated CQ
+ * @wcqe: Pointer to work-queue completion queue entry.
+ *
+ * This routine handles an ELS work-queue completion event.
+ *
+ * Return: true if work posted to worker thread, otherwise false.
+ **/
+static bool
+lpfc_sli4_sp_handle_els_wcqe(struct lpfc_hba *phba, struct lpfc_queue *cq,
+ struct lpfc_wcqe_complete *wcqe)
+{
+ struct lpfc_iocbq *irspiocbq;
+ unsigned long iflags;
+ struct lpfc_sli_ring *pring = cq->pring;
+ int txq_cnt = 0;
+ int txcmplq_cnt = 0;
+ int fcp_txcmplq_cnt = 0;
+
+ /* Get an irspiocbq for later ELS response processing use */
+ irspiocbq = lpfc_sli_get_iocbq(phba);
+ if (!irspiocbq) {
+ if (!list_empty(&pring->txq))
+ txq_cnt++;
+ if (!list_empty(&pring->txcmplq))
+ txcmplq_cnt++;
+ if (!list_empty(&phba->sli.ring[LPFC_FCP_RING].txcmplq))
+ fcp_txcmplq_cnt++;
+ lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
+ "0387 NO IOCBQ data: txq_cnt=%d iocb_cnt=%d "
+ "fcp_txcmplq_cnt=%d, els_txcmplq_cnt=%d\n",
+ txq_cnt, phba->iocb_cnt,
+ fcp_txcmplq_cnt,
+ txcmplq_cnt);
+ return false;
+ }
+
+ /* Save off the slow-path queue event for work thread to process */
+ memcpy(&irspiocbq->cq_event.cqe.wcqe_cmpl, wcqe, sizeof(*wcqe));
+ spin_lock_irqsave(&phba->hbalock, iflags);
+ list_add_tail(&irspiocbq->cq_event.list,
+ &phba->sli4_hba.sp_queue_event);
+ phba->hba_flag |= HBA_SP_QUEUE_EVT;
+ spin_unlock_irqrestore(&phba->hbalock, iflags);
+
+ return true;
+}
+
+/**
+ * lpfc_sli4_sp_handle_rel_wcqe - Handle slow-path WQ entry consumed event
+ * @phba: Pointer to HBA context object.
+ * @wcqe: Pointer to work-queue completion queue entry.
+ *
+ * This routine handles slow-path WQ entry comsumed event by invoking the
+ * proper WQ release routine to the slow-path WQ.
+ **/
+static void
+lpfc_sli4_sp_handle_rel_wcqe(struct lpfc_hba *phba,
+ struct lpfc_wcqe_release *wcqe)
+{
+ /* sanity check on queue memory */
+ if (unlikely(!phba->sli4_hba.els_wq))
+ return;
+ /* Check for the slow-path ELS work queue */
+ if (bf_get(lpfc_wcqe_r_wq_id, wcqe) == phba->sli4_hba.els_wq->queue_id)
+ lpfc_sli4_wq_release(phba->sli4_hba.els_wq,
+ bf_get(lpfc_wcqe_r_wqe_index, wcqe));
+ else
+ lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
+ "2579 Slow-path wqe consume event carries "
+ "miss-matched qid: wcqe-qid=x%x, sp-qid=x%x\n",
+ bf_get(lpfc_wcqe_r_wqe_index, wcqe),
+ phba->sli4_hba.els_wq->queue_id);
+}
+
+/**
+ * lpfc_sli4_sp_handle_abort_xri_wcqe - Handle a xri abort event
+ * @phba: Pointer to HBA context object.
+ * @cq: Pointer to a WQ completion queue.
+ * @wcqe: Pointer to work-queue completion queue entry.
+ *
+ * This routine handles an XRI abort event.
+ *
+ * Return: true if work posted to worker thread, otherwise false.
+ **/
+static bool
+lpfc_sli4_sp_handle_abort_xri_wcqe(struct lpfc_hba *phba,
+ struct lpfc_queue *cq,
+ struct sli4_wcqe_xri_aborted *wcqe)
+{
+ bool workposted = false;
+ struct lpfc_cq_event *cq_event;
+ unsigned long iflags;
+
+ /* Allocate a new internal CQ_EVENT entry */
+ cq_event = lpfc_sli4_cq_event_alloc(phba);
+ if (!cq_event) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
+ "0602 Failed to allocate CQ_EVENT entry\n");
+ return false;
+ }
+
+ /* Move the CQE into the proper xri abort event list */
+ memcpy(&cq_event->cqe, wcqe, sizeof(struct sli4_wcqe_xri_aborted));
+ switch (cq->subtype) {
+ case LPFC_FCP:
+ spin_lock_irqsave(&phba->hbalock, iflags);
+ list_add_tail(&cq_event->list,
+ &phba->sli4_hba.sp_fcp_xri_aborted_work_queue);
+ /* Set the fcp xri abort event flag */
+ phba->hba_flag |= FCP_XRI_ABORT_EVENT;
+ spin_unlock_irqrestore(&phba->hbalock, iflags);
+ workposted = true;
+ break;
+ case LPFC_ELS:
+ spin_lock_irqsave(&phba->hbalock, iflags);
+ list_add_tail(&cq_event->list,
+ &phba->sli4_hba.sp_els_xri_aborted_work_queue);
+ /* Set the els xri abort event flag */
+ phba->hba_flag |= ELS_XRI_ABORT_EVENT;
+ spin_unlock_irqrestore(&phba->hbalock, iflags);
+ workposted = true;
+ break;
+ default:
+ lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
+ "0603 Invalid work queue CQE subtype (x%x)\n",
+ cq->subtype);
+ workposted = false;
+ break;
+ }
+ return workposted;
+}
+
+/**
+ * lpfc_sli4_sp_handle_rcqe - Process a receive-queue completion queue entry
+ * @phba: Pointer to HBA context object.
+ * @rcqe: Pointer to receive-queue completion queue entry.
+ *
+ * This routine process a receive-queue completion queue entry.
+ *
+ * Return: true if work posted to worker thread, otherwise false.
+ **/
+static bool
+lpfc_sli4_sp_handle_rcqe(struct lpfc_hba *phba, struct lpfc_rcqe *rcqe)
+{
+ bool workposted = false;
+ struct lpfc_queue *hrq = phba->sli4_hba.hdr_rq;
+ struct lpfc_queue *drq = phba->sli4_hba.dat_rq;
+ struct hbq_dmabuf *dma_buf;
+ uint32_t status, rq_id;
+ unsigned long iflags;
+
+ /* sanity check on queue memory */
+ if (unlikely(!hrq) || unlikely(!drq))
+ return workposted;
+
+ if (bf_get(lpfc_cqe_code, rcqe) == CQE_CODE_RECEIVE_V1)
+ rq_id = bf_get(lpfc_rcqe_rq_id_v1, rcqe);
+ else
+ rq_id = bf_get(lpfc_rcqe_rq_id, rcqe);
+ if (rq_id != hrq->queue_id)
+ goto out;
+
+ status = bf_get(lpfc_rcqe_status, rcqe);
+ switch (status) {
+ case FC_STATUS_RQ_BUF_LEN_EXCEEDED:
+ lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
+ "2537 Receive Frame Truncated!!\n");
+ hrq->RQ_buf_trunc++;
+ case FC_STATUS_RQ_SUCCESS:
+ lpfc_sli4_rq_release(hrq, drq);
+ spin_lock_irqsave(&phba->hbalock, iflags);
+ dma_buf = lpfc_sli_hbqbuf_get(&phba->hbqs[0].hbq_buffer_list);
+ if (!dma_buf) {
+ hrq->RQ_no_buf_found++;
+ spin_unlock_irqrestore(&phba->hbalock, iflags);
+ goto out;
+ }
+ hrq->RQ_rcv_buf++;
+ memcpy(&dma_buf->cq_event.cqe.rcqe_cmpl, rcqe, sizeof(*rcqe));
+ /* save off the frame for the word thread to process */
+ list_add_tail(&dma_buf->cq_event.list,
+ &phba->sli4_hba.sp_queue_event);
+ /* Frame received */
+ phba->hba_flag |= HBA_SP_QUEUE_EVT;
+ spin_unlock_irqrestore(&phba->hbalock, iflags);
+ workposted = true;
+ break;
+ case FC_STATUS_INSUFF_BUF_NEED_BUF:
+ case FC_STATUS_INSUFF_BUF_FRM_DISC:
+ hrq->RQ_no_posted_buf++;
+ /* Post more buffers if possible */
+ spin_lock_irqsave(&phba->hbalock, iflags);
+ phba->hba_flag |= HBA_POST_RECEIVE_BUFFER;
+ spin_unlock_irqrestore(&phba->hbalock, iflags);
+ workposted = true;
+ break;
+ }
+out:
+ return workposted;
+}
+
+/**
+ * lpfc_sli4_sp_handle_cqe - Process a slow path completion queue entry
+ * @phba: Pointer to HBA context object.
+ * @cq: Pointer to the completion queue.
+ * @wcqe: Pointer to a completion queue entry.
+ *
+ * This routine process a slow-path work-queue or receive queue completion queue
+ * entry.
+ *
+ * Return: true if work posted to worker thread, otherwise false.
+ **/
+static bool
+lpfc_sli4_sp_handle_cqe(struct lpfc_hba *phba, struct lpfc_queue *cq,
+ struct lpfc_cqe *cqe)
+{
+ struct lpfc_cqe cqevt;
+ bool workposted = false;
+
+ /* Copy the work queue CQE and convert endian order if needed */
+ lpfc_sli_pcimem_bcopy(cqe, &cqevt, sizeof(struct lpfc_cqe));
+
+ /* Check and process for different type of WCQE and dispatch */
+ switch (bf_get(lpfc_cqe_code, &cqevt)) {
+ case CQE_CODE_COMPL_WQE:
+ /* Process the WQ/RQ complete event */
+ phba->last_completion_time = jiffies;
+ workposted = lpfc_sli4_sp_handle_els_wcqe(phba, cq,
+ (struct lpfc_wcqe_complete *)&cqevt);
+ break;
+ case CQE_CODE_RELEASE_WQE:
+ /* Process the WQ release event */
+ lpfc_sli4_sp_handle_rel_wcqe(phba,
+ (struct lpfc_wcqe_release *)&cqevt);
+ break;
+ case CQE_CODE_XRI_ABORTED:
+ /* Process the WQ XRI abort event */
+ phba->last_completion_time = jiffies;
+ workposted = lpfc_sli4_sp_handle_abort_xri_wcqe(phba, cq,
+ (struct sli4_wcqe_xri_aborted *)&cqevt);
+ break;
+ case CQE_CODE_RECEIVE:
+ case CQE_CODE_RECEIVE_V1:
+ /* Process the RQ event */
+ phba->last_completion_time = jiffies;
+ workposted = lpfc_sli4_sp_handle_rcqe(phba,
+ (struct lpfc_rcqe *)&cqevt);
+ break;
+ default:
+ lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
+ "0388 Not a valid WCQE code: x%x\n",
+ bf_get(lpfc_cqe_code, &cqevt));
+ break;
+ }
+ return workposted;
+}
+
+/**
+ * lpfc_sli4_sp_handle_eqe - Process a slow-path event queue entry
+ * @phba: Pointer to HBA context object.
+ * @eqe: Pointer to fast-path event queue entry.
+ *
+ * This routine process a event queue entry from the slow-path event queue.
+ * It will check the MajorCode and MinorCode to determine this is for a
+ * completion event on a completion queue, if not, an error shall be logged
+ * and just return. Otherwise, it will get to the corresponding completion
+ * queue and process all the entries on that completion queue, rearm the
+ * completion queue, and then return.
+ *
+ **/
+static void
+lpfc_sli4_sp_handle_eqe(struct lpfc_hba *phba, struct lpfc_eqe *eqe,
+ struct lpfc_queue *speq)
+{
+ struct lpfc_queue *cq = NULL, *childq;
+ struct lpfc_cqe *cqe;
+ bool workposted = false;
+ int ecount = 0;
+ uint16_t cqid;
+
+ /* Get the reference to the corresponding CQ */
+ cqid = bf_get_le32(lpfc_eqe_resource_id, eqe);
+
+ list_for_each_entry(childq, &speq->child_list, list) {
+ if (childq->queue_id == cqid) {
+ cq = childq;
+ break;
+ }
+ }
+ if (unlikely(!cq)) {
+ if (phba->sli.sli_flag & LPFC_SLI_ACTIVE)
+ lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
+ "0365 Slow-path CQ identifier "
+ "(%d) does not exist\n", cqid);
+ return;
+ }
+
+ /* Process all the entries to the CQ */
+ switch (cq->type) {
+ case LPFC_MCQ:
+ while ((cqe = lpfc_sli4_cq_get(cq))) {
+ workposted |= lpfc_sli4_sp_handle_mcqe(phba, cqe);
+ if (!(++ecount % cq->entry_repost))
+ lpfc_sli4_cq_release(cq, LPFC_QUEUE_NOARM);
+ cq->CQ_mbox++;
+ }
+ break;
+ case LPFC_WCQ:
+ while ((cqe = lpfc_sli4_cq_get(cq))) {
+ if (cq->subtype == LPFC_FCP)
+ workposted |= lpfc_sli4_fp_handle_wcqe(phba, cq,
+ cqe);
+ else
+ workposted |= lpfc_sli4_sp_handle_cqe(phba, cq,
+ cqe);
+ if (!(++ecount % cq->entry_repost))
+ lpfc_sli4_cq_release(cq, LPFC_QUEUE_NOARM);
+ }
+
+ /* Track the max number of CQEs processed in 1 EQ */
+ if (ecount > cq->CQ_max_cqe)
+ cq->CQ_max_cqe = ecount;
+ break;
+ default:
+ lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
+ "0370 Invalid completion queue type (%d)\n",
+ cq->type);
+ return;
+ }
+
+ /* Catch the no cq entry condition, log an error */
+ if (unlikely(ecount == 0))
+ lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
+ "0371 No entry from the CQ: identifier "
+ "(x%x), type (%d)\n", cq->queue_id, cq->type);
+
+ /* In any case, flash and re-arm the RCQ */
+ lpfc_sli4_cq_release(cq, LPFC_QUEUE_REARM);
+
+ /* wake up worker thread if there are works to be done */
+ if (workposted)
+ lpfc_worker_wake_up(phba);
+}
+
+/**
+ * lpfc_sli4_fp_handle_fcp_wcqe - Process fast-path work queue completion entry
+ * @phba: Pointer to HBA context object.
+ * @cq: Pointer to associated CQ
+ * @wcqe: Pointer to work-queue completion queue entry.
+ *
+ * This routine process a fast-path work queue completion entry from fast-path
+ * event queue for FCP command response completion.
+ **/
+static void
+lpfc_sli4_fp_handle_fcp_wcqe(struct lpfc_hba *phba, struct lpfc_queue *cq,
+ struct lpfc_wcqe_complete *wcqe)
+{
+ struct lpfc_sli_ring *pring = cq->pring;
+ struct lpfc_iocbq *cmdiocbq;
+ struct lpfc_iocbq irspiocbq;
+ unsigned long iflags;
+
+ /* Check for response status */
+ if (unlikely(bf_get(lpfc_wcqe_c_status, wcqe))) {
+ /* If resource errors reported from HBA, reduce queue
+ * depth of the SCSI device.
+ */
+ if (((bf_get(lpfc_wcqe_c_status, wcqe) ==
+ IOSTAT_LOCAL_REJECT)) &&
+ ((wcqe->parameter & IOERR_PARAM_MASK) ==
+ IOERR_NO_RESOURCES))
+ phba->lpfc_rampdown_queue_depth(phba);
+
+ /* Log the error status */
+ lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
+ "0373 FCP complete error: status=x%x, "
+ "hw_status=x%x, total_data_specified=%d, "
+ "parameter=x%x, word3=x%x\n",
+ bf_get(lpfc_wcqe_c_status, wcqe),
+ bf_get(lpfc_wcqe_c_hw_status, wcqe),
+ wcqe->total_data_placed, wcqe->parameter,
+ wcqe->word3);
+ }
+
+ /* Look up the FCP command IOCB and create pseudo response IOCB */
+ spin_lock_irqsave(&pring->ring_lock, iflags);
+ pring->stats.iocb_event++;
+ cmdiocbq = lpfc_sli_iocbq_lookup_by_tag(phba, pring,
+ bf_get(lpfc_wcqe_c_request_tag, wcqe));
+ spin_unlock_irqrestore(&pring->ring_lock, iflags);
+ if (unlikely(!cmdiocbq)) {
+ lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
+ "0374 FCP complete with no corresponding "
+ "cmdiocb: iotag (%d)\n",
+ bf_get(lpfc_wcqe_c_request_tag, wcqe));
+ return;
+ }
+ if (unlikely(!cmdiocbq->iocb_cmpl)) {
+ lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
+ "0375 FCP cmdiocb not callback function "
+ "iotag: (%d)\n",
+ bf_get(lpfc_wcqe_c_request_tag, wcqe));
+ return;
+ }
+
+ /* Fake the irspiocb and copy necessary response information */
+ lpfc_sli4_iocb_param_transfer(phba, &irspiocbq, cmdiocbq, wcqe);
+
+ if (cmdiocbq->iocb_flag & LPFC_DRIVER_ABORTED) {
+ spin_lock_irqsave(&phba->hbalock, iflags);
+ cmdiocbq->iocb_flag &= ~LPFC_DRIVER_ABORTED;
+ spin_unlock_irqrestore(&phba->hbalock, iflags);
+ }
+
+ /* Pass the cmd_iocb and the rsp state to the upper layer */
+ (cmdiocbq->iocb_cmpl)(phba, cmdiocbq, &irspiocbq);
+}
+
+/**
+ * lpfc_sli4_fp_handle_rel_wcqe - Handle fast-path WQ entry consumed event
+ * @phba: Pointer to HBA context object.
+ * @cq: Pointer to completion queue.
+ * @wcqe: Pointer to work-queue completion queue entry.
+ *
+ * This routine handles an fast-path WQ entry comsumed event by invoking the
+ * proper WQ release routine to the slow-path WQ.
+ **/
+static void
+lpfc_sli4_fp_handle_rel_wcqe(struct lpfc_hba *phba, struct lpfc_queue *cq,
+ struct lpfc_wcqe_release *wcqe)
+{
+ struct lpfc_queue *childwq;
+ bool wqid_matched = false;
+ uint16_t fcp_wqid;
+
+ /* Check for fast-path FCP work queue release */
+ fcp_wqid = bf_get(lpfc_wcqe_r_wq_id, wcqe);
+ list_for_each_entry(childwq, &cq->child_list, list) {
+ if (childwq->queue_id == fcp_wqid) {
+ lpfc_sli4_wq_release(childwq,
+ bf_get(lpfc_wcqe_r_wqe_index, wcqe));
+ wqid_matched = true;
+ break;
+ }
+ }
+ /* Report warning log message if no match found */
+ if (wqid_matched != true)
+ lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
+ "2580 Fast-path wqe consume event carries "
+ "miss-matched qid: wcqe-qid=x%x\n", fcp_wqid);
+}
+
+/**
+ * lpfc_sli4_fp_handle_wcqe - Process fast-path work queue completion entry
+ * @cq: Pointer to the completion queue.
+ * @eqe: Pointer to fast-path completion queue entry.
+ *
+ * This routine process a fast-path work queue completion entry from fast-path
+ * event queue for FCP command response completion.
+ **/
+static int
+lpfc_sli4_fp_handle_wcqe(struct lpfc_hba *phba, struct lpfc_queue *cq,
+ struct lpfc_cqe *cqe)
+{
+ struct lpfc_wcqe_release wcqe;
+ bool workposted = false;
+
+ /* Copy the work queue CQE and convert endian order if needed */
+ lpfc_sli_pcimem_bcopy(cqe, &wcqe, sizeof(struct lpfc_cqe));
+
+ /* Check and process for different type of WCQE and dispatch */
+ switch (bf_get(lpfc_wcqe_c_code, &wcqe)) {
+ case CQE_CODE_COMPL_WQE:
+ cq->CQ_wq++;
+ /* Process the WQ complete event */
+ phba->last_completion_time = jiffies;
+ lpfc_sli4_fp_handle_fcp_wcqe(phba, cq,
+ (struct lpfc_wcqe_complete *)&wcqe);
+ break;
+ case CQE_CODE_RELEASE_WQE:
+ cq->CQ_release_wqe++;
+ /* Process the WQ release event */
+ lpfc_sli4_fp_handle_rel_wcqe(phba, cq,
+ (struct lpfc_wcqe_release *)&wcqe);
+ break;
+ case CQE_CODE_XRI_ABORTED:
+ cq->CQ_xri_aborted++;
+ /* Process the WQ XRI abort event */
+ phba->last_completion_time = jiffies;
+ workposted = lpfc_sli4_sp_handle_abort_xri_wcqe(phba, cq,
+ (struct sli4_wcqe_xri_aborted *)&wcqe);
+ break;
+ default:
+ lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
+ "0144 Not a valid WCQE code: x%x\n",
+ bf_get(lpfc_wcqe_c_code, &wcqe));
+ break;
+ }
+ return workposted;
+}
+
+/**
+ * lpfc_sli4_hba_handle_eqe - Process a fast-path event queue entry
+ * @phba: Pointer to HBA context object.
+ * @eqe: Pointer to fast-path event queue entry.
+ *
+ * This routine process a event queue entry from the fast-path event queue.
+ * It will check the MajorCode and MinorCode to determine this is for a
+ * completion event on a completion queue, if not, an error shall be logged
+ * and just return. Otherwise, it will get to the corresponding completion
+ * queue and process all the entries on the completion queue, rearm the
+ * completion queue, and then return.
+ **/
+static void
+lpfc_sli4_hba_handle_eqe(struct lpfc_hba *phba, struct lpfc_eqe *eqe,
+ uint32_t qidx)
+{
+ struct lpfc_queue *cq;
+ struct lpfc_cqe *cqe;
+ bool workposted = false;
+ uint16_t cqid;
+ int ecount = 0;
+
+ if (unlikely(bf_get_le32(lpfc_eqe_major_code, eqe) != 0)) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
+ "0366 Not a valid completion "
+ "event: majorcode=x%x, minorcode=x%x\n",
+ bf_get_le32(lpfc_eqe_major_code, eqe),
+ bf_get_le32(lpfc_eqe_minor_code, eqe));
+ return;
+ }
+
+ /* Get the reference to the corresponding CQ */
+ cqid = bf_get_le32(lpfc_eqe_resource_id, eqe);
+
+ /* Check if this is a Slow path event */
+ if (unlikely(cqid != phba->sli4_hba.fcp_cq_map[qidx])) {
+ lpfc_sli4_sp_handle_eqe(phba, eqe,
+ phba->sli4_hba.hba_eq[qidx]);
+ return;
+ }
+
+ if (unlikely(!phba->sli4_hba.fcp_cq)) {
+ lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
+ "3146 Fast-path completion queues "
+ "does not exist\n");
+ return;
+ }
+ cq = phba->sli4_hba.fcp_cq[qidx];
+ if (unlikely(!cq)) {
+ if (phba->sli.sli_flag & LPFC_SLI_ACTIVE)
+ lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
+ "0367 Fast-path completion queue "
+ "(%d) does not exist\n", qidx);
+ return;
+ }
+
+ if (unlikely(cqid != cq->queue_id)) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
+ "0368 Miss-matched fast-path completion "
+ "queue identifier: eqcqid=%d, fcpcqid=%d\n",
+ cqid, cq->queue_id);
+ return;
+ }
+
+ /* Process all the entries to the CQ */
+ while ((cqe = lpfc_sli4_cq_get(cq))) {
+ workposted |= lpfc_sli4_fp_handle_wcqe(phba, cq, cqe);
+ if (!(++ecount % cq->entry_repost))
+ lpfc_sli4_cq_release(cq, LPFC_QUEUE_NOARM);
+ }
+
+ /* Track the max number of CQEs processed in 1 EQ */
+ if (ecount > cq->CQ_max_cqe)
+ cq->CQ_max_cqe = ecount;
+
+ /* Catch the no cq entry condition */
+ if (unlikely(ecount == 0))
+ lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
+ "0369 No entry from fast-path completion "
+ "queue fcpcqid=%d\n", cq->queue_id);
+
+ /* In any case, flash and re-arm the CQ */
+ lpfc_sli4_cq_release(cq, LPFC_QUEUE_REARM);
+
+ /* wake up worker thread if there are works to be done */
+ if (workposted)
+ lpfc_worker_wake_up(phba);
+}
+
+static void
+lpfc_sli4_eq_flush(struct lpfc_hba *phba, struct lpfc_queue *eq)
+{
+ struct lpfc_eqe *eqe;
+
+ /* walk all the EQ entries and drop on the floor */
+ while ((eqe = lpfc_sli4_eq_get(eq)))
+ ;
+
+ /* Clear and re-arm the EQ */
+ lpfc_sli4_eq_release(eq, LPFC_QUEUE_REARM);
+}
+
+
+/**
+ * lpfc_sli4_fof_handle_eqe - Process a Flash Optimized Fabric event queue
+ * entry
+ * @phba: Pointer to HBA context object.
+ * @eqe: Pointer to fast-path event queue entry.
+ *
+ * This routine process a event queue entry from the Flash Optimized Fabric
+ * event queue. It will check the MajorCode and MinorCode to determine this
+ * is for a completion event on a completion queue, if not, an error shall be
+ * logged and just return. Otherwise, it will get to the corresponding
+ * completion queue and process all the entries on the completion queue, rearm
+ * the completion queue, and then return.
+ **/
+static void
+lpfc_sli4_fof_handle_eqe(struct lpfc_hba *phba, struct lpfc_eqe *eqe)
+{
+ struct lpfc_queue *cq;
+ struct lpfc_cqe *cqe;
+ bool workposted = false;
+ uint16_t cqid;
+ int ecount = 0;
+
+ if (unlikely(bf_get_le32(lpfc_eqe_major_code, eqe) != 0)) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
+ "9147 Not a valid completion "
+ "event: majorcode=x%x, minorcode=x%x\n",
+ bf_get_le32(lpfc_eqe_major_code, eqe),
+ bf_get_le32(lpfc_eqe_minor_code, eqe));
+ return;
+ }
+
+ /* Get the reference to the corresponding CQ */
+ cqid = bf_get_le32(lpfc_eqe_resource_id, eqe);
+
+ /* Next check for OAS */
+ cq = phba->sli4_hba.oas_cq;
+ if (unlikely(!cq)) {
+ if (phba->sli.sli_flag & LPFC_SLI_ACTIVE)
+ lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
+ "9148 OAS completion queue "
+ "does not exist\n");
+ return;
+ }
+
+ if (unlikely(cqid != cq->queue_id)) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
+ "9149 Miss-matched fast-path compl "
+ "queue id: eqcqid=%d, fcpcqid=%d\n",
+ cqid, cq->queue_id);
+ return;
+ }
+
+ /* Process all the entries to the OAS CQ */
+ while ((cqe = lpfc_sli4_cq_get(cq))) {
+ workposted |= lpfc_sli4_fp_handle_wcqe(phba, cq, cqe);
+ if (!(++ecount % cq->entry_repost))
+ lpfc_sli4_cq_release(cq, LPFC_QUEUE_NOARM);
+ }
+
+ /* Track the max number of CQEs processed in 1 EQ */
+ if (ecount > cq->CQ_max_cqe)
+ cq->CQ_max_cqe = ecount;
+
+ /* Catch the no cq entry condition */
+ if (unlikely(ecount == 0))
+ lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
+ "9153 No entry from fast-path completion "
+ "queue fcpcqid=%d\n", cq->queue_id);
+
+ /* In any case, flash and re-arm the CQ */
+ lpfc_sli4_cq_release(cq, LPFC_QUEUE_REARM);
+
+ /* wake up worker thread if there are works to be done */
+ if (workposted)
+ lpfc_worker_wake_up(phba);
+}
+
+/**
+ * lpfc_sli4_fof_intr_handler - HBA interrupt handler to SLI-4 device
+ * @irq: Interrupt number.
+ * @dev_id: The device context pointer.
+ *
+ * This function is directly called from the PCI layer as an interrupt
+ * service routine when device with SLI-4 interface spec is enabled with
+ * MSI-X multi-message interrupt mode and there is a Flash Optimized Fabric
+ * IOCB ring event in the HBA. However, when the device is enabled with either
+ * MSI or Pin-IRQ interrupt mode, this function is called as part of the
+ * device-level interrupt handler. When the PCI slot is in error recovery
+ * or the HBA is undergoing initialization, the interrupt handler will not
+ * process the interrupt. The Flash Optimized Fabric ring event are handled in
+ * the intrrupt context. This function is called without any lock held.
+ * It gets the hbalock to access and update SLI data structures. Note that,
+ * the EQ to CQ are one-to-one map such that the EQ index is
+ * equal to that of CQ index.
+ *
+ * This function returns IRQ_HANDLED when interrupt is handled else it
+ * returns IRQ_NONE.
+ **/
+irqreturn_t
+lpfc_sli4_fof_intr_handler(int irq, void *dev_id)
+{
+ struct lpfc_hba *phba;
+ struct lpfc_fcp_eq_hdl *fcp_eq_hdl;
+ struct lpfc_queue *eq;
+ struct lpfc_eqe *eqe;
+ unsigned long iflag;
+ int ecount = 0;
+ uint32_t eqidx;
+
+ /* Get the driver's phba structure from the dev_id */
+ fcp_eq_hdl = (struct lpfc_fcp_eq_hdl *)dev_id;
+ phba = fcp_eq_hdl->phba;
+ eqidx = fcp_eq_hdl->idx;
+
+ if (unlikely(!phba))
+ return IRQ_NONE;
+
+ /* Get to the EQ struct associated with this vector */
+ eq = phba->sli4_hba.fof_eq;
+ if (unlikely(!eq))
+ return IRQ_NONE;
+
+ /* Check device state for handling interrupt */
+ if (unlikely(lpfc_intr_state_check(phba))) {
+ eq->EQ_badstate++;
+ /* Check again for link_state with lock held */
+ spin_lock_irqsave(&phba->hbalock, iflag);
+ if (phba->link_state < LPFC_LINK_DOWN)
+ /* Flush, clear interrupt, and rearm the EQ */
+ lpfc_sli4_eq_flush(phba, eq);
+ spin_unlock_irqrestore(&phba->hbalock, iflag);
+ return IRQ_NONE;
+ }
+
+ /*
+ * Process all the event on FCP fast-path EQ
+ */
+ while ((eqe = lpfc_sli4_eq_get(eq))) {
+ lpfc_sli4_fof_handle_eqe(phba, eqe);
+ if (!(++ecount % eq->entry_repost))
+ lpfc_sli4_eq_release(eq, LPFC_QUEUE_NOARM);
+ eq->EQ_processed++;
+ }
+
+ /* Track the max number of EQEs processed in 1 intr */
+ if (ecount > eq->EQ_max_eqe)
+ eq->EQ_max_eqe = ecount;
+
+
+ if (unlikely(ecount == 0)) {
+ eq->EQ_no_entry++;
+
+ if (phba->intr_type == MSIX)
+ /* MSI-X treated interrupt served as no EQ share INT */
+ lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
+ "9145 MSI-X interrupt with no EQE\n");
+ else {
+ lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
+ "9146 ISR interrupt with no EQE\n");
+ /* Non MSI-X treated on interrupt as EQ share INT */
+ return IRQ_NONE;
+ }
+ }
+ /* Always clear and re-arm the fast-path EQ */
+ lpfc_sli4_eq_release(eq, LPFC_QUEUE_REARM);
+ return IRQ_HANDLED;
+}
+
+/**
+ * lpfc_sli4_hba_intr_handler - HBA interrupt handler to SLI-4 device
+ * @irq: Interrupt number.
+ * @dev_id: The device context pointer.
+ *
+ * This function is directly called from the PCI layer as an interrupt
+ * service routine when device with SLI-4 interface spec is enabled with
+ * MSI-X multi-message interrupt mode and there is a fast-path FCP IOCB
+ * ring event in the HBA. However, when the device is enabled with either
+ * MSI or Pin-IRQ interrupt mode, this function is called as part of the
+ * device-level interrupt handler. When the PCI slot is in error recovery
+ * or the HBA is undergoing initialization, the interrupt handler will not
+ * process the interrupt. The SCSI FCP fast-path ring event are handled in
+ * the intrrupt context. This function is called without any lock held.
+ * It gets the hbalock to access and update SLI data structures. Note that,
+ * the FCP EQ to FCP CQ are one-to-one map such that the FCP EQ index is
+ * equal to that of FCP CQ index.
+ *
+ * The link attention and ELS ring attention events are handled
+ * by the worker thread. The interrupt handler signals the worker thread
+ * and returns for these events. This function is called without any lock
+ * held. It gets the hbalock to access and update SLI data structures.
+ *
+ * This function returns IRQ_HANDLED when interrupt is handled else it
+ * returns IRQ_NONE.
+ **/
+irqreturn_t
+lpfc_sli4_hba_intr_handler(int irq, void *dev_id)
+{
+ struct lpfc_hba *phba;
+ struct lpfc_fcp_eq_hdl *fcp_eq_hdl;
+ struct lpfc_queue *fpeq;
+ struct lpfc_eqe *eqe;
+ unsigned long iflag;
+ int ecount = 0;
+ int fcp_eqidx;
+
+ /* Get the driver's phba structure from the dev_id */
+ fcp_eq_hdl = (struct lpfc_fcp_eq_hdl *)dev_id;
+ phba = fcp_eq_hdl->phba;
+ fcp_eqidx = fcp_eq_hdl->idx;
+
+ if (unlikely(!phba))
+ return IRQ_NONE;
+ if (unlikely(!phba->sli4_hba.hba_eq))
+ return IRQ_NONE;
+
+ /* Get to the EQ struct associated with this vector */
+ fpeq = phba->sli4_hba.hba_eq[fcp_eqidx];
+ if (unlikely(!fpeq))
+ return IRQ_NONE;
+
+ if (lpfc_fcp_look_ahead) {
+ if (atomic_dec_and_test(&fcp_eq_hdl->fcp_eq_in_use))
+ lpfc_sli4_eq_clr_intr(fpeq);
+ else {
+ atomic_inc(&fcp_eq_hdl->fcp_eq_in_use);
+ return IRQ_NONE;
+ }
+ }
+
+ /* Check device state for handling interrupt */
+ if (unlikely(lpfc_intr_state_check(phba))) {
+ fpeq->EQ_badstate++;
+ /* Check again for link_state with lock held */
+ spin_lock_irqsave(&phba->hbalock, iflag);
+ if (phba->link_state < LPFC_LINK_DOWN)
+ /* Flush, clear interrupt, and rearm the EQ */
+ lpfc_sli4_eq_flush(phba, fpeq);
+ spin_unlock_irqrestore(&phba->hbalock, iflag);
+ if (lpfc_fcp_look_ahead)
+ atomic_inc(&fcp_eq_hdl->fcp_eq_in_use);
+ return IRQ_NONE;
+ }
+
+ /*
+ * Process all the event on FCP fast-path EQ
+ */
+ while ((eqe = lpfc_sli4_eq_get(fpeq))) {
+ if (eqe == NULL)
+ break;
+
+ lpfc_sli4_hba_handle_eqe(phba, eqe, fcp_eqidx);
+ if (!(++ecount % fpeq->entry_repost))
+ lpfc_sli4_eq_release(fpeq, LPFC_QUEUE_NOARM);
+ fpeq->EQ_processed++;
+ }
+
+ /* Track the max number of EQEs processed in 1 intr */
+ if (ecount > fpeq->EQ_max_eqe)
+ fpeq->EQ_max_eqe = ecount;
+
+ /* Always clear and re-arm the fast-path EQ */
+ lpfc_sli4_eq_release(fpeq, LPFC_QUEUE_REARM);
+
+ if (unlikely(ecount == 0)) {
+ fpeq->EQ_no_entry++;
+
+ if (lpfc_fcp_look_ahead) {
+ atomic_inc(&fcp_eq_hdl->fcp_eq_in_use);
+ return IRQ_NONE;
+ }
+
+ if (phba->intr_type == MSIX)
+ /* MSI-X treated interrupt served as no EQ share INT */
+ lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
+ "0358 MSI-X interrupt with no EQE\n");
+ else
+ /* Non MSI-X treated on interrupt as EQ share INT */
+ return IRQ_NONE;
+ }
+
+ if (lpfc_fcp_look_ahead)
+ atomic_inc(&fcp_eq_hdl->fcp_eq_in_use);
+ return IRQ_HANDLED;
+} /* lpfc_sli4_fp_intr_handler */
+
+/**
+ * lpfc_sli4_intr_handler - Device-level interrupt handler for SLI-4 device
+ * @irq: Interrupt number.
+ * @dev_id: The device context pointer.
+ *
+ * This function is the device-level interrupt handler to device with SLI-4
+ * interface spec, called from the PCI layer when either MSI or Pin-IRQ
+ * interrupt mode is enabled and there is an event in the HBA which requires
+ * driver attention. This function invokes the slow-path interrupt attention
+ * handling function and fast-path interrupt attention handling function in
+ * turn to process the relevant HBA attention events. This function is called
+ * without any lock held. It gets the hbalock to access and update SLI data
+ * structures.
+ *
+ * This function returns IRQ_HANDLED when interrupt is handled, else it
+ * returns IRQ_NONE.
+ **/
+irqreturn_t
+lpfc_sli4_intr_handler(int irq, void *dev_id)
+{
+ struct lpfc_hba *phba;
+ irqreturn_t hba_irq_rc;
+ bool hba_handled = false;
+ int fcp_eqidx;
+
+ /* Get the driver's phba structure from the dev_id */
+ phba = (struct lpfc_hba *)dev_id;
+
+ if (unlikely(!phba))
+ return IRQ_NONE;
+
+ /*
+ * Invoke fast-path host attention interrupt handling as appropriate.
+ */
+ for (fcp_eqidx = 0; fcp_eqidx < phba->cfg_fcp_io_channel; fcp_eqidx++) {
+ hba_irq_rc = lpfc_sli4_hba_intr_handler(irq,
+ &phba->sli4_hba.fcp_eq_hdl[fcp_eqidx]);
+ if (hba_irq_rc == IRQ_HANDLED)
+ hba_handled |= true;
+ }
+
+ if (phba->cfg_fof) {
+ hba_irq_rc = lpfc_sli4_fof_intr_handler(irq,
+ &phba->sli4_hba.fcp_eq_hdl[0]);
+ if (hba_irq_rc == IRQ_HANDLED)
+ hba_handled |= true;
+ }
+
+ return (hba_handled == true) ? IRQ_HANDLED : IRQ_NONE;
+} /* lpfc_sli4_intr_handler */
+
+/**
+ * lpfc_sli4_queue_free - free a queue structure and associated memory
+ * @queue: The queue structure to free.
+ *
+ * This function frees a queue structure and the DMAable memory used for
+ * the host resident queue. This function must be called after destroying the
+ * queue on the HBA.
+ **/
+void
+lpfc_sli4_queue_free(struct lpfc_queue *queue)
+{
+ struct lpfc_dmabuf *dmabuf;
+
+ if (!queue)
+ return;
+
+ while (!list_empty(&queue->page_list)) {
+ list_remove_head(&queue->page_list, dmabuf, struct lpfc_dmabuf,
+ list);
+ dma_free_coherent(&queue->phba->pcidev->dev, SLI4_PAGE_SIZE,
+ dmabuf->virt, dmabuf->phys);
+ kfree(dmabuf);
+ }
+ kfree(queue);
+ return;
+}
+
+/**
+ * lpfc_sli4_queue_alloc - Allocate and initialize a queue structure
+ * @phba: The HBA that this queue is being created on.
+ * @entry_size: The size of each queue entry for this queue.
+ * @entry count: The number of entries that this queue will handle.
+ *
+ * This function allocates a queue structure and the DMAable memory used for
+ * the host resident queue. This function must be called before creating the
+ * queue on the HBA.
+ **/
+struct lpfc_queue *
+lpfc_sli4_queue_alloc(struct lpfc_hba *phba, uint32_t entry_size,
+ uint32_t entry_count)
+{
+ struct lpfc_queue *queue;
+ struct lpfc_dmabuf *dmabuf;
+ int x, total_qe_count;
+ void *dma_pointer;
+ uint32_t hw_page_size = phba->sli4_hba.pc_sli4_params.if_page_sz;
+
+ if (!phba->sli4_hba.pc_sli4_params.supported)
+ hw_page_size = SLI4_PAGE_SIZE;
+
+ queue = kzalloc(sizeof(struct lpfc_queue) +
+ (sizeof(union sli4_qe) * entry_count), GFP_KERNEL);
+ if (!queue)
+ return NULL;
+ queue->page_count = (ALIGN(entry_size * entry_count,
+ hw_page_size))/hw_page_size;
+ INIT_LIST_HEAD(&queue->list);
+ INIT_LIST_HEAD(&queue->page_list);
+ INIT_LIST_HEAD(&queue->child_list);
+ for (x = 0, total_qe_count = 0; x < queue->page_count; x++) {
+ dmabuf = kzalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
+ if (!dmabuf)
+ goto out_fail;
+ dmabuf->virt = dma_zalloc_coherent(&phba->pcidev->dev,
+ hw_page_size, &dmabuf->phys,
+ GFP_KERNEL);
+ if (!dmabuf->virt) {
+ kfree(dmabuf);
+ goto out_fail;
+ }
+ dmabuf->buffer_tag = x;
+ list_add_tail(&dmabuf->list, &queue->page_list);
+ /* initialize queue's entry array */
+ dma_pointer = dmabuf->virt;
+ for (; total_qe_count < entry_count &&
+ dma_pointer < (hw_page_size + dmabuf->virt);
+ total_qe_count++, dma_pointer += entry_size) {
+ queue->qe[total_qe_count].address = dma_pointer;
+ }
+ }
+ queue->entry_size = entry_size;
+ queue->entry_count = entry_count;
+
+ /*
+ * entry_repost is calculated based on the number of entries in the
+ * queue. This works out except for RQs. If buffers are NOT initially
+ * posted for every RQE, entry_repost should be adjusted accordingly.
+ */
+ queue->entry_repost = (entry_count >> 3);
+ if (queue->entry_repost < LPFC_QUEUE_MIN_REPOST)
+ queue->entry_repost = LPFC_QUEUE_MIN_REPOST;
+ queue->phba = phba;
+
+ return queue;
+out_fail:
+ lpfc_sli4_queue_free(queue);
+ return NULL;
+}
+
+/**
+ * lpfc_dual_chute_pci_bar_map - Map pci base address register to host memory
+ * @phba: HBA structure that indicates port to create a queue on.
+ * @pci_barset: PCI BAR set flag.
+ *
+ * This function shall perform iomap of the specified PCI BAR address to host
+ * memory address if not already done so and return it. The returned host
+ * memory address can be NULL.
+ */
+static void __iomem *
+lpfc_dual_chute_pci_bar_map(struct lpfc_hba *phba, uint16_t pci_barset)
+{
+ struct pci_dev *pdev;
+
+ if (!phba->pcidev)
+ return NULL;
+ else
+ pdev = phba->pcidev;
+
+ switch (pci_barset) {
+ case WQ_PCI_BAR_0_AND_1:
+ return phba->pci_bar0_memmap_p;
+ case WQ_PCI_BAR_2_AND_3:
+ return phba->pci_bar2_memmap_p;
+ case WQ_PCI_BAR_4_AND_5:
+ return phba->pci_bar4_memmap_p;
+ default:
+ break;
+ }
+ return NULL;
+}
+
+/**
+ * lpfc_modify_fcp_eq_delay - Modify Delay Multiplier on FCP EQs
+ * @phba: HBA structure that indicates port to create a queue on.
+ * @startq: The starting FCP EQ to modify
+ *
+ * This function sends an MODIFY_EQ_DELAY mailbox command to the HBA.
+ *
+ * The @phba struct is used to send mailbox command to HBA. The @startq
+ * is used to get the starting FCP EQ to change.
+ * This function is asynchronous and will wait for the mailbox
+ * command to finish before continuing.
+ *
+ * On success this function will return a zero. If unable to allocate enough
+ * memory this function will return -ENOMEM. If the queue create mailbox command
+ * fails this function will return -ENXIO.
+ **/
+int
+lpfc_modify_fcp_eq_delay(struct lpfc_hba *phba, uint32_t startq)
+{
+ struct lpfc_mbx_modify_eq_delay *eq_delay;
+ LPFC_MBOXQ_t *mbox;
+ struct lpfc_queue *eq;
+ int cnt, rc, length, status = 0;
+ uint32_t shdr_status, shdr_add_status;
+ uint32_t result;
+ int fcp_eqidx;
+ union lpfc_sli4_cfg_shdr *shdr;
+ uint16_t dmult;
+
+ if (startq >= phba->cfg_fcp_io_channel)
+ return 0;
+
+ mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
+ if (!mbox)
+ return -ENOMEM;
+ length = (sizeof(struct lpfc_mbx_modify_eq_delay) -
+ sizeof(struct lpfc_sli4_cfg_mhdr));
+ lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
+ LPFC_MBOX_OPCODE_MODIFY_EQ_DELAY,
+ length, LPFC_SLI4_MBX_EMBED);
+ eq_delay = &mbox->u.mqe.un.eq_delay;
+
+ /* Calculate delay multiper from maximum interrupt per second */
+ result = phba->cfg_fcp_imax / phba->cfg_fcp_io_channel;
+ if (result > LPFC_DMULT_CONST)
+ dmult = 0;
+ else
+ dmult = LPFC_DMULT_CONST/result - 1;
+
+ cnt = 0;
+ for (fcp_eqidx = startq; fcp_eqidx < phba->cfg_fcp_io_channel;
+ fcp_eqidx++) {
+ eq = phba->sli4_hba.hba_eq[fcp_eqidx];
+ if (!eq)
+ continue;
+ eq_delay->u.request.eq[cnt].eq_id = eq->queue_id;
+ eq_delay->u.request.eq[cnt].phase = 0;
+ eq_delay->u.request.eq[cnt].delay_multi = dmult;
+ cnt++;
+ if (cnt >= LPFC_MAX_EQ_DELAY)
+ break;
+ }
+ eq_delay->u.request.num_eq = cnt;
+
+ mbox->vport = phba->pport;
+ mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
+ mbox->context1 = NULL;
+ rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
+ shdr = (union lpfc_sli4_cfg_shdr *) &eq_delay->header.cfg_shdr;
+ shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
+ shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
+ if (shdr_status || shdr_add_status || rc) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+ "2512 MODIFY_EQ_DELAY mailbox failed with "
+ "status x%x add_status x%x, mbx status x%x\n",
+ shdr_status, shdr_add_status, rc);
+ status = -ENXIO;
+ }
+ mempool_free(mbox, phba->mbox_mem_pool);
+ return status;
+}
+
+/**
+ * lpfc_eq_create - Create an Event Queue on the HBA
+ * @phba: HBA structure that indicates port to create a queue on.
+ * @eq: The queue structure to use to create the event queue.
+ * @imax: The maximum interrupt per second limit.
+ *
+ * This function creates an event queue, as detailed in @eq, on a port,
+ * described by @phba by sending an EQ_CREATE mailbox command to the HBA.
+ *
+ * The @phba struct is used to send mailbox command to HBA. The @eq struct
+ * is used to get the entry count and entry size that are necessary to
+ * determine the number of pages to allocate and use for this queue. This
+ * function will send the EQ_CREATE mailbox command to the HBA to setup the
+ * event queue. This function is asynchronous and will wait for the mailbox
+ * command to finish before continuing.
+ *
+ * On success this function will return a zero. If unable to allocate enough
+ * memory this function will return -ENOMEM. If the queue create mailbox command
+ * fails this function will return -ENXIO.
+ **/
+int
+lpfc_eq_create(struct lpfc_hba *phba, struct lpfc_queue *eq, uint32_t imax)
+{
+ struct lpfc_mbx_eq_create *eq_create;
+ LPFC_MBOXQ_t *mbox;
+ int rc, length, status = 0;
+ struct lpfc_dmabuf *dmabuf;
+ uint32_t shdr_status, shdr_add_status;
+ union lpfc_sli4_cfg_shdr *shdr;
+ uint16_t dmult;
+ uint32_t hw_page_size = phba->sli4_hba.pc_sli4_params.if_page_sz;
+
+ /* sanity check on queue memory */
+ if (!eq)
+ return -ENODEV;
+ if (!phba->sli4_hba.pc_sli4_params.supported)
+ hw_page_size = SLI4_PAGE_SIZE;
+
+ mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
+ if (!mbox)
+ return -ENOMEM;
+ length = (sizeof(struct lpfc_mbx_eq_create) -
+ sizeof(struct lpfc_sli4_cfg_mhdr));
+ lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
+ LPFC_MBOX_OPCODE_EQ_CREATE,
+ length, LPFC_SLI4_MBX_EMBED);
+ eq_create = &mbox->u.mqe.un.eq_create;
+ bf_set(lpfc_mbx_eq_create_num_pages, &eq_create->u.request,
+ eq->page_count);
+ bf_set(lpfc_eq_context_size, &eq_create->u.request.context,
+ LPFC_EQE_SIZE);
+ bf_set(lpfc_eq_context_valid, &eq_create->u.request.context, 1);
+ /* don't setup delay multiplier using EQ_CREATE */
+ dmult = 0;
+ bf_set(lpfc_eq_context_delay_multi, &eq_create->u.request.context,
+ dmult);
+ switch (eq->entry_count) {
+ default:
+ lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
+ "0360 Unsupported EQ count. (%d)\n",
+ eq->entry_count);
+ if (eq->entry_count < 256)
+ return -EINVAL;
+ /* otherwise default to smallest count (drop through) */
+ case 256:
+ bf_set(lpfc_eq_context_count, &eq_create->u.request.context,
+ LPFC_EQ_CNT_256);
+ break;
+ case 512:
+ bf_set(lpfc_eq_context_count, &eq_create->u.request.context,
+ LPFC_EQ_CNT_512);
+ break;
+ case 1024:
+ bf_set(lpfc_eq_context_count, &eq_create->u.request.context,
+ LPFC_EQ_CNT_1024);
+ break;
+ case 2048:
+ bf_set(lpfc_eq_context_count, &eq_create->u.request.context,
+ LPFC_EQ_CNT_2048);
+ break;
+ case 4096:
+ bf_set(lpfc_eq_context_count, &eq_create->u.request.context,
+ LPFC_EQ_CNT_4096);
+ break;
+ }
+ list_for_each_entry(dmabuf, &eq->page_list, list) {
+ memset(dmabuf->virt, 0, hw_page_size);
+ eq_create->u.request.page[dmabuf->buffer_tag].addr_lo =
+ putPaddrLow(dmabuf->phys);
+ eq_create->u.request.page[dmabuf->buffer_tag].addr_hi =
+ putPaddrHigh(dmabuf->phys);
+ }
+ mbox->vport = phba->pport;
+ mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
+ mbox->context1 = NULL;
+ rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
+ shdr = (union lpfc_sli4_cfg_shdr *) &eq_create->header.cfg_shdr;
+ shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
+ shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
+ if (shdr_status || shdr_add_status || rc) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+ "2500 EQ_CREATE mailbox failed with "
+ "status x%x add_status x%x, mbx status x%x\n",
+ shdr_status, shdr_add_status, rc);
+ status = -ENXIO;
+ }
+ eq->type = LPFC_EQ;
+ eq->subtype = LPFC_NONE;
+ eq->queue_id = bf_get(lpfc_mbx_eq_create_q_id, &eq_create->u.response);
+ if (eq->queue_id == 0xFFFF)
+ status = -ENXIO;
+ eq->host_index = 0;
+ eq->hba_index = 0;
+
+ mempool_free(mbox, phba->mbox_mem_pool);
+ return status;
+}
+
+/**
+ * lpfc_cq_create - Create a Completion Queue on the HBA
+ * @phba: HBA structure that indicates port to create a queue on.
+ * @cq: The queue structure to use to create the completion queue.
+ * @eq: The event queue to bind this completion queue to.
+ *
+ * This function creates a completion queue, as detailed in @wq, on a port,
+ * described by @phba by sending a CQ_CREATE mailbox command to the HBA.
+ *
+ * The @phba struct is used to send mailbox command to HBA. The @cq struct
+ * is used to get the entry count and entry size that are necessary to
+ * determine the number of pages to allocate and use for this queue. The @eq
+ * is used to indicate which event queue to bind this completion queue to. This
+ * function will send the CQ_CREATE mailbox command to the HBA to setup the
+ * completion queue. This function is asynchronous and will wait for the mailbox
+ * command to finish before continuing.
+ *
+ * On success this function will return a zero. If unable to allocate enough
+ * memory this function will return -ENOMEM. If the queue create mailbox command
+ * fails this function will return -ENXIO.
+ **/
+int
+lpfc_cq_create(struct lpfc_hba *phba, struct lpfc_queue *cq,
+ struct lpfc_queue *eq, uint32_t type, uint32_t subtype)
+{
+ struct lpfc_mbx_cq_create *cq_create;
+ struct lpfc_dmabuf *dmabuf;
+ LPFC_MBOXQ_t *mbox;
+ int rc, length, status = 0;
+ uint32_t shdr_status, shdr_add_status;
+ union lpfc_sli4_cfg_shdr *shdr;
+ uint32_t hw_page_size = phba->sli4_hba.pc_sli4_params.if_page_sz;
+
+ /* sanity check on queue memory */
+ if (!cq || !eq)
+ return -ENODEV;
+ if (!phba->sli4_hba.pc_sli4_params.supported)
+ hw_page_size = SLI4_PAGE_SIZE;
+
+ mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
+ if (!mbox)
+ return -ENOMEM;
+ length = (sizeof(struct lpfc_mbx_cq_create) -
+ sizeof(struct lpfc_sli4_cfg_mhdr));
+ lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
+ LPFC_MBOX_OPCODE_CQ_CREATE,
+ length, LPFC_SLI4_MBX_EMBED);
+ cq_create = &mbox->u.mqe.un.cq_create;
+ shdr = (union lpfc_sli4_cfg_shdr *) &cq_create->header.cfg_shdr;
+ bf_set(lpfc_mbx_cq_create_num_pages, &cq_create->u.request,
+ cq->page_count);
+ bf_set(lpfc_cq_context_event, &cq_create->u.request.context, 1);
+ bf_set(lpfc_cq_context_valid, &cq_create->u.request.context, 1);
+ bf_set(lpfc_mbox_hdr_version, &shdr->request,
+ phba->sli4_hba.pc_sli4_params.cqv);
+ if (phba->sli4_hba.pc_sli4_params.cqv == LPFC_Q_CREATE_VERSION_2) {
+ /* FW only supports 1. Should be PAGE_SIZE/SLI4_PAGE_SIZE */
+ bf_set(lpfc_mbx_cq_create_page_size, &cq_create->u.request, 1);
+ bf_set(lpfc_cq_eq_id_2, &cq_create->u.request.context,
+ eq->queue_id);
+ } else {
+ bf_set(lpfc_cq_eq_id, &cq_create->u.request.context,
+ eq->queue_id);
+ }
+ switch (cq->entry_count) {
+ default:
+ lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
+ "0361 Unsupported CQ count. (%d)\n",
+ cq->entry_count);
+ if (cq->entry_count < 256) {
+ status = -EINVAL;
+ goto out;
+ }
+ /* otherwise default to smallest count (drop through) */
+ case 256:
+ bf_set(lpfc_cq_context_count, &cq_create->u.request.context,
+ LPFC_CQ_CNT_256);
+ break;
+ case 512:
+ bf_set(lpfc_cq_context_count, &cq_create->u.request.context,
+ LPFC_CQ_CNT_512);
+ break;
+ case 1024:
+ bf_set(lpfc_cq_context_count, &cq_create->u.request.context,
+ LPFC_CQ_CNT_1024);
+ break;
+ }
+ list_for_each_entry(dmabuf, &cq->page_list, list) {
+ memset(dmabuf->virt, 0, hw_page_size);
+ cq_create->u.request.page[dmabuf->buffer_tag].addr_lo =
+ putPaddrLow(dmabuf->phys);
+ cq_create->u.request.page[dmabuf->buffer_tag].addr_hi =
+ putPaddrHigh(dmabuf->phys);
+ }
+ rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
+
+ /* The IOCTL status is embedded in the mailbox subheader. */
+ shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
+ shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
+ if (shdr_status || shdr_add_status || rc) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+ "2501 CQ_CREATE mailbox failed with "
+ "status x%x add_status x%x, mbx status x%x\n",
+ shdr_status, shdr_add_status, rc);
+ status = -ENXIO;
+ goto out;
+ }
+ cq->queue_id = bf_get(lpfc_mbx_cq_create_q_id, &cq_create->u.response);
+ if (cq->queue_id == 0xFFFF) {
+ status = -ENXIO;
+ goto out;
+ }
+ /* link the cq onto the parent eq child list */
+ list_add_tail(&cq->list, &eq->child_list);
+ /* Set up completion queue's type and subtype */
+ cq->type = type;
+ cq->subtype = subtype;
+ cq->queue_id = bf_get(lpfc_mbx_cq_create_q_id, &cq_create->u.response);
+ cq->assoc_qid = eq->queue_id;
+ cq->host_index = 0;
+ cq->hba_index = 0;
+
+out:
+ mempool_free(mbox, phba->mbox_mem_pool);
+ return status;
+}
+
+/**
+ * lpfc_mq_create_fb_init - Send MCC_CREATE without async events registration
+ * @phba: HBA structure that indicates port to create a queue on.
+ * @mq: The queue structure to use to create the mailbox queue.
+ * @mbox: An allocated pointer to type LPFC_MBOXQ_t
+ * @cq: The completion queue to associate with this cq.
+ *
+ * This function provides failback (fb) functionality when the
+ * mq_create_ext fails on older FW generations. It's purpose is identical
+ * to mq_create_ext otherwise.
+ *
+ * This routine cannot fail as all attributes were previously accessed and
+ * initialized in mq_create_ext.
+ **/
+static void
+lpfc_mq_create_fb_init(struct lpfc_hba *phba, struct lpfc_queue *mq,
+ LPFC_MBOXQ_t *mbox, struct lpfc_queue *cq)
+{
+ struct lpfc_mbx_mq_create *mq_create;
+ struct lpfc_dmabuf *dmabuf;
+ int length;
+
+ length = (sizeof(struct lpfc_mbx_mq_create) -
+ sizeof(struct lpfc_sli4_cfg_mhdr));
+ lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
+ LPFC_MBOX_OPCODE_MQ_CREATE,
+ length, LPFC_SLI4_MBX_EMBED);
+ mq_create = &mbox->u.mqe.un.mq_create;
+ bf_set(lpfc_mbx_mq_create_num_pages, &mq_create->u.request,
+ mq->page_count);
+ bf_set(lpfc_mq_context_cq_id, &mq_create->u.request.context,
+ cq->queue_id);
+ bf_set(lpfc_mq_context_valid, &mq_create->u.request.context, 1);
+ switch (mq->entry_count) {
+ case 16:
+ bf_set(lpfc_mq_context_ring_size, &mq_create->u.request.context,
+ LPFC_MQ_RING_SIZE_16);
+ break;
+ case 32:
+ bf_set(lpfc_mq_context_ring_size, &mq_create->u.request.context,
+ LPFC_MQ_RING_SIZE_32);
+ break;
+ case 64:
+ bf_set(lpfc_mq_context_ring_size, &mq_create->u.request.context,
+ LPFC_MQ_RING_SIZE_64);
+ break;
+ case 128:
+ bf_set(lpfc_mq_context_ring_size, &mq_create->u.request.context,
+ LPFC_MQ_RING_SIZE_128);
+ break;
+ }
+ list_for_each_entry(dmabuf, &mq->page_list, list) {
+ mq_create->u.request.page[dmabuf->buffer_tag].addr_lo =
+ putPaddrLow(dmabuf->phys);
+ mq_create->u.request.page[dmabuf->buffer_tag].addr_hi =
+ putPaddrHigh(dmabuf->phys);
+ }
+}
+
+/**
+ * lpfc_mq_create - Create a mailbox Queue on the HBA
+ * @phba: HBA structure that indicates port to create a queue on.
+ * @mq: The queue structure to use to create the mailbox queue.
+ * @cq: The completion queue to associate with this cq.
+ * @subtype: The queue's subtype.
+ *
+ * This function creates a mailbox queue, as detailed in @mq, on a port,
+ * described by @phba by sending a MQ_CREATE mailbox command to the HBA.
+ *
+ * The @phba struct is used to send mailbox command to HBA. The @cq struct
+ * is used to get the entry count and entry size that are necessary to
+ * determine the number of pages to allocate and use for this queue. This
+ * function will send the MQ_CREATE mailbox command to the HBA to setup the
+ * mailbox queue. This function is asynchronous and will wait for the mailbox
+ * command to finish before continuing.
+ *
+ * On success this function will return a zero. If unable to allocate enough
+ * memory this function will return -ENOMEM. If the queue create mailbox command
+ * fails this function will return -ENXIO.
+ **/
+int32_t
+lpfc_mq_create(struct lpfc_hba *phba, struct lpfc_queue *mq,
+ struct lpfc_queue *cq, uint32_t subtype)
+{
+ struct lpfc_mbx_mq_create *mq_create;
+ struct lpfc_mbx_mq_create_ext *mq_create_ext;
+ struct lpfc_dmabuf *dmabuf;
+ LPFC_MBOXQ_t *mbox;
+ int rc, length, status = 0;
+ uint32_t shdr_status, shdr_add_status;
+ union lpfc_sli4_cfg_shdr *shdr;
+ uint32_t hw_page_size = phba->sli4_hba.pc_sli4_params.if_page_sz;
+
+ /* sanity check on queue memory */
+ if (!mq || !cq)
+ return -ENODEV;
+ if (!phba->sli4_hba.pc_sli4_params.supported)
+ hw_page_size = SLI4_PAGE_SIZE;
+
+ mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
+ if (!mbox)
+ return -ENOMEM;
+ length = (sizeof(struct lpfc_mbx_mq_create_ext) -
+ sizeof(struct lpfc_sli4_cfg_mhdr));
+ lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
+ LPFC_MBOX_OPCODE_MQ_CREATE_EXT,
+ length, LPFC_SLI4_MBX_EMBED);
+
+ mq_create_ext = &mbox->u.mqe.un.mq_create_ext;
+ shdr = (union lpfc_sli4_cfg_shdr *) &mq_create_ext->header.cfg_shdr;
+ bf_set(lpfc_mbx_mq_create_ext_num_pages,
+ &mq_create_ext->u.request, mq->page_count);
+ bf_set(lpfc_mbx_mq_create_ext_async_evt_link,
+ &mq_create_ext->u.request, 1);
+ bf_set(lpfc_mbx_mq_create_ext_async_evt_fip,
+ &mq_create_ext->u.request, 1);
+ bf_set(lpfc_mbx_mq_create_ext_async_evt_group5,
+ &mq_create_ext->u.request, 1);
+ bf_set(lpfc_mbx_mq_create_ext_async_evt_fc,
+ &mq_create_ext->u.request, 1);
+ bf_set(lpfc_mbx_mq_create_ext_async_evt_sli,
+ &mq_create_ext->u.request, 1);
+ bf_set(lpfc_mq_context_valid, &mq_create_ext->u.request.context, 1);
+ bf_set(lpfc_mbox_hdr_version, &shdr->request,
+ phba->sli4_hba.pc_sli4_params.mqv);
+ if (phba->sli4_hba.pc_sli4_params.mqv == LPFC_Q_CREATE_VERSION_1)
+ bf_set(lpfc_mbx_mq_create_ext_cq_id, &mq_create_ext->u.request,
+ cq->queue_id);
+ else
+ bf_set(lpfc_mq_context_cq_id, &mq_create_ext->u.request.context,
+ cq->queue_id);
+ switch (mq->entry_count) {
+ default:
+ lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
+ "0362 Unsupported MQ count. (%d)\n",
+ mq->entry_count);
+ if (mq->entry_count < 16) {
+ status = -EINVAL;
+ goto out;
+ }
+ /* otherwise default to smallest count (drop through) */
+ case 16:
+ bf_set(lpfc_mq_context_ring_size,
+ &mq_create_ext->u.request.context,
+ LPFC_MQ_RING_SIZE_16);
+ break;
+ case 32:
+ bf_set(lpfc_mq_context_ring_size,
+ &mq_create_ext->u.request.context,
+ LPFC_MQ_RING_SIZE_32);
+ break;
+ case 64:
+ bf_set(lpfc_mq_context_ring_size,
+ &mq_create_ext->u.request.context,
+ LPFC_MQ_RING_SIZE_64);
+ break;
+ case 128:
+ bf_set(lpfc_mq_context_ring_size,
+ &mq_create_ext->u.request.context,
+ LPFC_MQ_RING_SIZE_128);
+ break;
+ }
+ list_for_each_entry(dmabuf, &mq->page_list, list) {
+ memset(dmabuf->virt, 0, hw_page_size);
+ mq_create_ext->u.request.page[dmabuf->buffer_tag].addr_lo =
+ putPaddrLow(dmabuf->phys);
+ mq_create_ext->u.request.page[dmabuf->buffer_tag].addr_hi =
+ putPaddrHigh(dmabuf->phys);
+ }
+ rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
+ mq->queue_id = bf_get(lpfc_mbx_mq_create_q_id,
+ &mq_create_ext->u.response);
+ if (rc != MBX_SUCCESS) {
+ lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
+ "2795 MQ_CREATE_EXT failed with "
+ "status x%x. Failback to MQ_CREATE.\n",
+ rc);
+ lpfc_mq_create_fb_init(phba, mq, mbox, cq);
+ mq_create = &mbox->u.mqe.un.mq_create;
+ rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
+ shdr = (union lpfc_sli4_cfg_shdr *) &mq_create->header.cfg_shdr;
+ mq->queue_id = bf_get(lpfc_mbx_mq_create_q_id,
+ &mq_create->u.response);
+ }
+
+ /* The IOCTL status is embedded in the mailbox subheader. */
+ shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
+ shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
+ if (shdr_status || shdr_add_status || rc) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+ "2502 MQ_CREATE mailbox failed with "
+ "status x%x add_status x%x, mbx status x%x\n",
+ shdr_status, shdr_add_status, rc);
+ status = -ENXIO;
+ goto out;
+ }
+ if (mq->queue_id == 0xFFFF) {
+ status = -ENXIO;
+ goto out;
+ }
+ mq->type = LPFC_MQ;
+ mq->assoc_qid = cq->queue_id;
+ mq->subtype = subtype;
+ mq->host_index = 0;
+ mq->hba_index = 0;
+
+ /* link the mq onto the parent cq child list */
+ list_add_tail(&mq->list, &cq->child_list);
+out:
+ mempool_free(mbox, phba->mbox_mem_pool);
+ return status;
+}
+
+/**
+ * lpfc_wq_create - Create a Work Queue on the HBA
+ * @phba: HBA structure that indicates port to create a queue on.
+ * @wq: The queue structure to use to create the work queue.
+ * @cq: The completion queue to bind this work queue to.
+ * @subtype: The subtype of the work queue indicating its functionality.
+ *
+ * This function creates a work queue, as detailed in @wq, on a port, described
+ * by @phba by sending a WQ_CREATE mailbox command to the HBA.
+ *
+ * The @phba struct is used to send mailbox command to HBA. The @wq struct
+ * is used to get the entry count and entry size that are necessary to
+ * determine the number of pages to allocate and use for this queue. The @cq
+ * is used to indicate which completion queue to bind this work queue to. This
+ * function will send the WQ_CREATE mailbox command to the HBA to setup the
+ * work queue. This function is asynchronous and will wait for the mailbox
+ * command to finish before continuing.
+ *
+ * On success this function will return a zero. If unable to allocate enough
+ * memory this function will return -ENOMEM. If the queue create mailbox command
+ * fails this function will return -ENXIO.
+ **/
+int
+lpfc_wq_create(struct lpfc_hba *phba, struct lpfc_queue *wq,
+ struct lpfc_queue *cq, uint32_t subtype)
+{
+ struct lpfc_mbx_wq_create *wq_create;
+ struct lpfc_dmabuf *dmabuf;
+ LPFC_MBOXQ_t *mbox;
+ int rc, length, status = 0;
+ uint32_t shdr_status, shdr_add_status;
+ union lpfc_sli4_cfg_shdr *shdr;
+ uint32_t hw_page_size = phba->sli4_hba.pc_sli4_params.if_page_sz;
+ struct dma_address *page;
+ void __iomem *bar_memmap_p;
+ uint32_t db_offset;
+ uint16_t pci_barset;
+
+ /* sanity check on queue memory */
+ if (!wq || !cq)
+ return -ENODEV;
+ if (!phba->sli4_hba.pc_sli4_params.supported)
+ hw_page_size = SLI4_PAGE_SIZE;
+
+ mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
+ if (!mbox)
+ return -ENOMEM;
+ length = (sizeof(struct lpfc_mbx_wq_create) -
+ sizeof(struct lpfc_sli4_cfg_mhdr));
+ lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE,
+ LPFC_MBOX_OPCODE_FCOE_WQ_CREATE,
+ length, LPFC_SLI4_MBX_EMBED);
+ wq_create = &mbox->u.mqe.un.wq_create;
+ shdr = (union lpfc_sli4_cfg_shdr *) &wq_create->header.cfg_shdr;
+ bf_set(lpfc_mbx_wq_create_num_pages, &wq_create->u.request,
+ wq->page_count);
+ bf_set(lpfc_mbx_wq_create_cq_id, &wq_create->u.request,
+ cq->queue_id);
+
+ /* wqv is the earliest version supported, NOT the latest */
+ bf_set(lpfc_mbox_hdr_version, &shdr->request,
+ phba->sli4_hba.pc_sli4_params.wqv);
+
+ switch (phba->sli4_hba.pc_sli4_params.wqv) {
+ case LPFC_Q_CREATE_VERSION_0:
+ switch (wq->entry_size) {
+ default:
+ case 64:
+ /* Nothing to do, version 0 ONLY supports 64 byte */
+ page = wq_create->u.request.page;
+ break;
+ case 128:
+ if (!(phba->sli4_hba.pc_sli4_params.wqsize &
+ LPFC_WQ_SZ128_SUPPORT)) {
+ status = -ERANGE;
+ goto out;
+ }
+ /* If we get here the HBA MUST also support V1 and
+ * we MUST use it
+ */
+ bf_set(lpfc_mbox_hdr_version, &shdr->request,
+ LPFC_Q_CREATE_VERSION_1);
+
+ bf_set(lpfc_mbx_wq_create_wqe_count,
+ &wq_create->u.request_1, wq->entry_count);
+ bf_set(lpfc_mbx_wq_create_wqe_size,
+ &wq_create->u.request_1,
+ LPFC_WQ_WQE_SIZE_128);
+ bf_set(lpfc_mbx_wq_create_page_size,
+ &wq_create->u.request_1,
+ (PAGE_SIZE/SLI4_PAGE_SIZE));
+ page = wq_create->u.request_1.page;
+ break;
+ }
+ break;
+ case LPFC_Q_CREATE_VERSION_1:
+ bf_set(lpfc_mbx_wq_create_wqe_count, &wq_create->u.request_1,
+ wq->entry_count);
+ switch (wq->entry_size) {
+ default:
+ case 64:
+ bf_set(lpfc_mbx_wq_create_wqe_size,
+ &wq_create->u.request_1,
+ LPFC_WQ_WQE_SIZE_64);
+ break;
+ case 128:
+ if (!(phba->sli4_hba.pc_sli4_params.wqsize &
+ LPFC_WQ_SZ128_SUPPORT)) {
+ status = -ERANGE;
+ goto out;
+ }
+ bf_set(lpfc_mbx_wq_create_wqe_size,
+ &wq_create->u.request_1,
+ LPFC_WQ_WQE_SIZE_128);
+ break;
+ }
+ bf_set(lpfc_mbx_wq_create_page_size, &wq_create->u.request_1,
+ (PAGE_SIZE/SLI4_PAGE_SIZE));
+ page = wq_create->u.request_1.page;
+ break;
+ default:
+ status = -ERANGE;
+ goto out;
+ }
+
+ list_for_each_entry(dmabuf, &wq->page_list, list) {
+ memset(dmabuf->virt, 0, hw_page_size);
+ page[dmabuf->buffer_tag].addr_lo = putPaddrLow(dmabuf->phys);
+ page[dmabuf->buffer_tag].addr_hi = putPaddrHigh(dmabuf->phys);
+ }
+
+ if (phba->sli4_hba.fw_func_mode & LPFC_DUA_MODE)
+ bf_set(lpfc_mbx_wq_create_dua, &wq_create->u.request, 1);
+
+ rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
+ /* The IOCTL status is embedded in the mailbox subheader. */
+ shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
+ shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
+ if (shdr_status || shdr_add_status || rc) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+ "2503 WQ_CREATE mailbox failed with "
+ "status x%x add_status x%x, mbx status x%x\n",
+ shdr_status, shdr_add_status, rc);
+ status = -ENXIO;
+ goto out;
+ }
+ wq->queue_id = bf_get(lpfc_mbx_wq_create_q_id, &wq_create->u.response);
+ if (wq->queue_id == 0xFFFF) {
+ status = -ENXIO;
+ goto out;
+ }
+ if (phba->sli4_hba.fw_func_mode & LPFC_DUA_MODE) {
+ wq->db_format = bf_get(lpfc_mbx_wq_create_db_format,
+ &wq_create->u.response);
+ if ((wq->db_format != LPFC_DB_LIST_FORMAT) &&
+ (wq->db_format != LPFC_DB_RING_FORMAT)) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+ "3265 WQ[%d] doorbell format not "
+ "supported: x%x\n", wq->queue_id,
+ wq->db_format);
+ status = -EINVAL;
+ goto out;
+ }
+ pci_barset = bf_get(lpfc_mbx_wq_create_bar_set,
+ &wq_create->u.response);
+ bar_memmap_p = lpfc_dual_chute_pci_bar_map(phba, pci_barset);
+ if (!bar_memmap_p) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+ "3263 WQ[%d] failed to memmap pci "
+ "barset:x%x\n", wq->queue_id,
+ pci_barset);
+ status = -ENOMEM;
+ goto out;
+ }
+ db_offset = wq_create->u.response.doorbell_offset;
+ if ((db_offset != LPFC_ULP0_WQ_DOORBELL) &&
+ (db_offset != LPFC_ULP1_WQ_DOORBELL)) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+ "3252 WQ[%d] doorbell offset not "
+ "supported: x%x\n", wq->queue_id,
+ db_offset);
+ status = -EINVAL;
+ goto out;
+ }
+ wq->db_regaddr = bar_memmap_p + db_offset;
+ lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
+ "3264 WQ[%d]: barset:x%x, offset:x%x, "
+ "format:x%x\n", wq->queue_id, pci_barset,
+ db_offset, wq->db_format);
+ } else {
+ wq->db_format = LPFC_DB_LIST_FORMAT;
+ wq->db_regaddr = phba->sli4_hba.WQDBregaddr;
+ }
+ wq->type = LPFC_WQ;
+ wq->assoc_qid = cq->queue_id;
+ wq->subtype = subtype;
+ wq->host_index = 0;
+ wq->hba_index = 0;
+ wq->entry_repost = LPFC_RELEASE_NOTIFICATION_INTERVAL;
+
+ /* link the wq onto the parent cq child list */
+ list_add_tail(&wq->list, &cq->child_list);
+out:
+ mempool_free(mbox, phba->mbox_mem_pool);
+ return status;
+}
+
+/**
+ * lpfc_rq_adjust_repost - Adjust entry_repost for an RQ
+ * @phba: HBA structure that indicates port to create a queue on.
+ * @rq: The queue structure to use for the receive queue.
+ * @qno: The associated HBQ number
+ *
+ *
+ * For SLI4 we need to adjust the RQ repost value based on
+ * the number of buffers that are initially posted to the RQ.
+ */
+void
+lpfc_rq_adjust_repost(struct lpfc_hba *phba, struct lpfc_queue *rq, int qno)
+{
+ uint32_t cnt;
+
+ /* sanity check on queue memory */
+ if (!rq)
+ return;
+ cnt = lpfc_hbq_defs[qno]->entry_count;
+
+ /* Recalc repost for RQs based on buffers initially posted */
+ cnt = (cnt >> 3);
+ if (cnt < LPFC_QUEUE_MIN_REPOST)
+ cnt = LPFC_QUEUE_MIN_REPOST;
+
+ rq->entry_repost = cnt;
+}
+
+/**
+ * lpfc_rq_create - Create a Receive Queue on the HBA
+ * @phba: HBA structure that indicates port to create a queue on.
+ * @hrq: The queue structure to use to create the header receive queue.
+ * @drq: The queue structure to use to create the data receive queue.
+ * @cq: The completion queue to bind this work queue to.
+ *
+ * This function creates a receive buffer queue pair , as detailed in @hrq and
+ * @drq, on a port, described by @phba by sending a RQ_CREATE mailbox command
+ * to the HBA.
+ *
+ * The @phba struct is used to send mailbox command to HBA. The @drq and @hrq
+ * struct is used to get the entry count that is necessary to determine the
+ * number of pages to use for this queue. The @cq is used to indicate which
+ * completion queue to bind received buffers that are posted to these queues to.
+ * This function will send the RQ_CREATE mailbox command to the HBA to setup the
+ * receive queue pair. This function is asynchronous and will wait for the
+ * mailbox command to finish before continuing.
+ *
+ * On success this function will return a zero. If unable to allocate enough
+ * memory this function will return -ENOMEM. If the queue create mailbox command
+ * fails this function will return -ENXIO.
+ **/
+int
+lpfc_rq_create(struct lpfc_hba *phba, struct lpfc_queue *hrq,
+ struct lpfc_queue *drq, struct lpfc_queue *cq, uint32_t subtype)
+{
+ struct lpfc_mbx_rq_create *rq_create;
+ struct lpfc_dmabuf *dmabuf;
+ LPFC_MBOXQ_t *mbox;
+ int rc, length, status = 0;
+ uint32_t shdr_status, shdr_add_status;
+ union lpfc_sli4_cfg_shdr *shdr;
+ uint32_t hw_page_size = phba->sli4_hba.pc_sli4_params.if_page_sz;
+ void __iomem *bar_memmap_p;
+ uint32_t db_offset;
+ uint16_t pci_barset;
+
+ /* sanity check on queue memory */
+ if (!hrq || !drq || !cq)
+ return -ENODEV;
+ if (!phba->sli4_hba.pc_sli4_params.supported)
+ hw_page_size = SLI4_PAGE_SIZE;
+
+ if (hrq->entry_count != drq->entry_count)
+ return -EINVAL;
+ mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
+ if (!mbox)
+ return -ENOMEM;
+ length = (sizeof(struct lpfc_mbx_rq_create) -
+ sizeof(struct lpfc_sli4_cfg_mhdr));
+ lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE,
+ LPFC_MBOX_OPCODE_FCOE_RQ_CREATE,
+ length, LPFC_SLI4_MBX_EMBED);
+ rq_create = &mbox->u.mqe.un.rq_create;
+ shdr = (union lpfc_sli4_cfg_shdr *) &rq_create->header.cfg_shdr;
+ bf_set(lpfc_mbox_hdr_version, &shdr->request,
+ phba->sli4_hba.pc_sli4_params.rqv);
+ if (phba->sli4_hba.pc_sli4_params.rqv == LPFC_Q_CREATE_VERSION_1) {
+ bf_set(lpfc_rq_context_rqe_count_1,
+ &rq_create->u.request.context,
+ hrq->entry_count);
+ rq_create->u.request.context.buffer_size = LPFC_HDR_BUF_SIZE;
+ bf_set(lpfc_rq_context_rqe_size,
+ &rq_create->u.request.context,
+ LPFC_RQE_SIZE_8);
+ bf_set(lpfc_rq_context_page_size,
+ &rq_create->u.request.context,
+ (PAGE_SIZE/SLI4_PAGE_SIZE));
+ } else {
+ switch (hrq->entry_count) {
+ default:
+ lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
+ "2535 Unsupported RQ count. (%d)\n",
+ hrq->entry_count);
+ if (hrq->entry_count < 512) {
+ status = -EINVAL;
+ goto out;
+ }
+ /* otherwise default to smallest count (drop through) */
+ case 512:
+ bf_set(lpfc_rq_context_rqe_count,
+ &rq_create->u.request.context,
+ LPFC_RQ_RING_SIZE_512);
+ break;
+ case 1024:
+ bf_set(lpfc_rq_context_rqe_count,
+ &rq_create->u.request.context,
+ LPFC_RQ_RING_SIZE_1024);
+ break;
+ case 2048:
+ bf_set(lpfc_rq_context_rqe_count,
+ &rq_create->u.request.context,
+ LPFC_RQ_RING_SIZE_2048);
+ break;
+ case 4096:
+ bf_set(lpfc_rq_context_rqe_count,
+ &rq_create->u.request.context,
+ LPFC_RQ_RING_SIZE_4096);
+ break;
+ }
+ bf_set(lpfc_rq_context_buf_size, &rq_create->u.request.context,
+ LPFC_HDR_BUF_SIZE);
+ }
+ bf_set(lpfc_rq_context_cq_id, &rq_create->u.request.context,
+ cq->queue_id);
+ bf_set(lpfc_mbx_rq_create_num_pages, &rq_create->u.request,
+ hrq->page_count);
+ list_for_each_entry(dmabuf, &hrq->page_list, list) {
+ memset(dmabuf->virt, 0, hw_page_size);
+ rq_create->u.request.page[dmabuf->buffer_tag].addr_lo =
+ putPaddrLow(dmabuf->phys);
+ rq_create->u.request.page[dmabuf->buffer_tag].addr_hi =
+ putPaddrHigh(dmabuf->phys);
+ }
+ if (phba->sli4_hba.fw_func_mode & LPFC_DUA_MODE)
+ bf_set(lpfc_mbx_rq_create_dua, &rq_create->u.request, 1);
+
+ rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
+ /* The IOCTL status is embedded in the mailbox subheader. */
+ shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
+ shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
+ if (shdr_status || shdr_add_status || rc) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+ "2504 RQ_CREATE mailbox failed with "
+ "status x%x add_status x%x, mbx status x%x\n",
+ shdr_status, shdr_add_status, rc);
+ status = -ENXIO;
+ goto out;
+ }
+ hrq->queue_id = bf_get(lpfc_mbx_rq_create_q_id, &rq_create->u.response);
+ if (hrq->queue_id == 0xFFFF) {
+ status = -ENXIO;
+ goto out;
+ }
+
+ if (phba->sli4_hba.fw_func_mode & LPFC_DUA_MODE) {
+ hrq->db_format = bf_get(lpfc_mbx_rq_create_db_format,
+ &rq_create->u.response);
+ if ((hrq->db_format != LPFC_DB_LIST_FORMAT) &&
+ (hrq->db_format != LPFC_DB_RING_FORMAT)) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+ "3262 RQ [%d] doorbell format not "
+ "supported: x%x\n", hrq->queue_id,
+ hrq->db_format);
+ status = -EINVAL;
+ goto out;
+ }
+
+ pci_barset = bf_get(lpfc_mbx_rq_create_bar_set,
+ &rq_create->u.response);
+ bar_memmap_p = lpfc_dual_chute_pci_bar_map(phba, pci_barset);
+ if (!bar_memmap_p) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+ "3269 RQ[%d] failed to memmap pci "
+ "barset:x%x\n", hrq->queue_id,
+ pci_barset);
+ status = -ENOMEM;
+ goto out;
+ }
+
+ db_offset = rq_create->u.response.doorbell_offset;
+ if ((db_offset != LPFC_ULP0_RQ_DOORBELL) &&
+ (db_offset != LPFC_ULP1_RQ_DOORBELL)) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+ "3270 RQ[%d] doorbell offset not "
+ "supported: x%x\n", hrq->queue_id,
+ db_offset);
+ status = -EINVAL;
+ goto out;
+ }
+ hrq->db_regaddr = bar_memmap_p + db_offset;
+ lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
+ "3266 RQ[qid:%d]: barset:x%x, offset:x%x, "
+ "format:x%x\n", hrq->queue_id, pci_barset,
+ db_offset, hrq->db_format);
+ } else {
+ hrq->db_format = LPFC_DB_RING_FORMAT;
+ hrq->db_regaddr = phba->sli4_hba.RQDBregaddr;
+ }
+ hrq->type = LPFC_HRQ;
+ hrq->assoc_qid = cq->queue_id;
+ hrq->subtype = subtype;
+ hrq->host_index = 0;
+ hrq->hba_index = 0;
+
+ /* now create the data queue */
+ lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE,
+ LPFC_MBOX_OPCODE_FCOE_RQ_CREATE,
+ length, LPFC_SLI4_MBX_EMBED);
+ bf_set(lpfc_mbox_hdr_version, &shdr->request,
+ phba->sli4_hba.pc_sli4_params.rqv);
+ if (phba->sli4_hba.pc_sli4_params.rqv == LPFC_Q_CREATE_VERSION_1) {
+ bf_set(lpfc_rq_context_rqe_count_1,
+ &rq_create->u.request.context, hrq->entry_count);
+ rq_create->u.request.context.buffer_size = LPFC_DATA_BUF_SIZE;
+ bf_set(lpfc_rq_context_rqe_size, &rq_create->u.request.context,
+ LPFC_RQE_SIZE_8);
+ bf_set(lpfc_rq_context_page_size, &rq_create->u.request.context,
+ (PAGE_SIZE/SLI4_PAGE_SIZE));
+ } else {
+ switch (drq->entry_count) {
+ default:
+ lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
+ "2536 Unsupported RQ count. (%d)\n",
+ drq->entry_count);
+ if (drq->entry_count < 512) {
+ status = -EINVAL;
+ goto out;
+ }
+ /* otherwise default to smallest count (drop through) */
+ case 512:
+ bf_set(lpfc_rq_context_rqe_count,
+ &rq_create->u.request.context,
+ LPFC_RQ_RING_SIZE_512);
+ break;
+ case 1024:
+ bf_set(lpfc_rq_context_rqe_count,
+ &rq_create->u.request.context,
+ LPFC_RQ_RING_SIZE_1024);
+ break;
+ case 2048:
+ bf_set(lpfc_rq_context_rqe_count,
+ &rq_create->u.request.context,
+ LPFC_RQ_RING_SIZE_2048);
+ break;
+ case 4096:
+ bf_set(lpfc_rq_context_rqe_count,
+ &rq_create->u.request.context,
+ LPFC_RQ_RING_SIZE_4096);
+ break;
+ }
+ bf_set(lpfc_rq_context_buf_size, &rq_create->u.request.context,
+ LPFC_DATA_BUF_SIZE);
+ }
+ bf_set(lpfc_rq_context_cq_id, &rq_create->u.request.context,
+ cq->queue_id);
+ bf_set(lpfc_mbx_rq_create_num_pages, &rq_create->u.request,
+ drq->page_count);
+ list_for_each_entry(dmabuf, &drq->page_list, list) {
+ rq_create->u.request.page[dmabuf->buffer_tag].addr_lo =
+ putPaddrLow(dmabuf->phys);
+ rq_create->u.request.page[dmabuf->buffer_tag].addr_hi =
+ putPaddrHigh(dmabuf->phys);
+ }
+ if (phba->sli4_hba.fw_func_mode & LPFC_DUA_MODE)
+ bf_set(lpfc_mbx_rq_create_dua, &rq_create->u.request, 1);
+ rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
+ /* The IOCTL status is embedded in the mailbox subheader. */
+ shdr = (union lpfc_sli4_cfg_shdr *) &rq_create->header.cfg_shdr;
+ shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
+ shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
+ if (shdr_status || shdr_add_status || rc) {
+ status = -ENXIO;
+ goto out;
+ }
+ drq->queue_id = bf_get(lpfc_mbx_rq_create_q_id, &rq_create->u.response);
+ if (drq->queue_id == 0xFFFF) {
+ status = -ENXIO;
+ goto out;
+ }
+ drq->type = LPFC_DRQ;
+ drq->assoc_qid = cq->queue_id;
+ drq->subtype = subtype;
+ drq->host_index = 0;
+ drq->hba_index = 0;
+
+ /* link the header and data RQs onto the parent cq child list */
+ list_add_tail(&hrq->list, &cq->child_list);
+ list_add_tail(&drq->list, &cq->child_list);
+
+out:
+ mempool_free(mbox, phba->mbox_mem_pool);
+ return status;
+}
+
+/**
+ * lpfc_eq_destroy - Destroy an event Queue on the HBA
+ * @eq: The queue structure associated with the queue to destroy.
+ *
+ * This function destroys a queue, as detailed in @eq by sending an mailbox
+ * command, specific to the type of queue, to the HBA.
+ *
+ * The @eq struct is used to get the queue ID of the queue to destroy.
+ *
+ * On success this function will return a zero. If the queue destroy mailbox
+ * command fails this function will return -ENXIO.
+ **/
+int
+lpfc_eq_destroy(struct lpfc_hba *phba, struct lpfc_queue *eq)
+{
+ LPFC_MBOXQ_t *mbox;
+ int rc, length, status = 0;
+ uint32_t shdr_status, shdr_add_status;
+ union lpfc_sli4_cfg_shdr *shdr;
+
+ /* sanity check on queue memory */
+ if (!eq)
+ return -ENODEV;
+ mbox = mempool_alloc(eq->phba->mbox_mem_pool, GFP_KERNEL);
+ if (!mbox)
+ return -ENOMEM;
+ length = (sizeof(struct lpfc_mbx_eq_destroy) -
+ sizeof(struct lpfc_sli4_cfg_mhdr));
+ lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
+ LPFC_MBOX_OPCODE_EQ_DESTROY,
+ length, LPFC_SLI4_MBX_EMBED);
+ bf_set(lpfc_mbx_eq_destroy_q_id, &mbox->u.mqe.un.eq_destroy.u.request,
+ eq->queue_id);
+ mbox->vport = eq->phba->pport;
+ mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
+
+ rc = lpfc_sli_issue_mbox(eq->phba, mbox, MBX_POLL);
+ /* The IOCTL status is embedded in the mailbox subheader. */
+ shdr = (union lpfc_sli4_cfg_shdr *)
+ &mbox->u.mqe.un.eq_destroy.header.cfg_shdr;
+ shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
+ shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
+ if (shdr_status || shdr_add_status || rc) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+ "2505 EQ_DESTROY mailbox failed with "
+ "status x%x add_status x%x, mbx status x%x\n",
+ shdr_status, shdr_add_status, rc);
+ status = -ENXIO;
+ }
+
+ /* Remove eq from any list */
+ list_del_init(&eq->list);
+ mempool_free(mbox, eq->phba->mbox_mem_pool);
+ return status;
+}
+
+/**
+ * lpfc_cq_destroy - Destroy a Completion Queue on the HBA
+ * @cq: The queue structure associated with the queue to destroy.
+ *
+ * This function destroys a queue, as detailed in @cq by sending an mailbox
+ * command, specific to the type of queue, to the HBA.
+ *
+ * The @cq struct is used to get the queue ID of the queue to destroy.
+ *
+ * On success this function will return a zero. If the queue destroy mailbox
+ * command fails this function will return -ENXIO.
+ **/
+int
+lpfc_cq_destroy(struct lpfc_hba *phba, struct lpfc_queue *cq)
+{
+ LPFC_MBOXQ_t *mbox;
+ int rc, length, status = 0;
+ uint32_t shdr_status, shdr_add_status;
+ union lpfc_sli4_cfg_shdr *shdr;
+
+ /* sanity check on queue memory */
+ if (!cq)
+ return -ENODEV;
+ mbox = mempool_alloc(cq->phba->mbox_mem_pool, GFP_KERNEL);
+ if (!mbox)
+ return -ENOMEM;
+ length = (sizeof(struct lpfc_mbx_cq_destroy) -
+ sizeof(struct lpfc_sli4_cfg_mhdr));
+ lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
+ LPFC_MBOX_OPCODE_CQ_DESTROY,
+ length, LPFC_SLI4_MBX_EMBED);
+ bf_set(lpfc_mbx_cq_destroy_q_id, &mbox->u.mqe.un.cq_destroy.u.request,
+ cq->queue_id);
+ mbox->vport = cq->phba->pport;
+ mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
+ rc = lpfc_sli_issue_mbox(cq->phba, mbox, MBX_POLL);
+ /* The IOCTL status is embedded in the mailbox subheader. */
+ shdr = (union lpfc_sli4_cfg_shdr *)
+ &mbox->u.mqe.un.wq_create.header.cfg_shdr;
+ shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
+ shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
+ if (shdr_status || shdr_add_status || rc) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+ "2506 CQ_DESTROY mailbox failed with "
+ "status x%x add_status x%x, mbx status x%x\n",
+ shdr_status, shdr_add_status, rc);
+ status = -ENXIO;
+ }
+ /* Remove cq from any list */
+ list_del_init(&cq->list);
+ mempool_free(mbox, cq->phba->mbox_mem_pool);
+ return status;
+}
+
+/**
+ * lpfc_mq_destroy - Destroy a Mailbox Queue on the HBA
+ * @qm: The queue structure associated with the queue to destroy.
+ *
+ * This function destroys a queue, as detailed in @mq by sending an mailbox
+ * command, specific to the type of queue, to the HBA.
+ *
+ * The @mq struct is used to get the queue ID of the queue to destroy.
+ *
+ * On success this function will return a zero. If the queue destroy mailbox
+ * command fails this function will return -ENXIO.
+ **/
+int
+lpfc_mq_destroy(struct lpfc_hba *phba, struct lpfc_queue *mq)
+{
+ LPFC_MBOXQ_t *mbox;
+ int rc, length, status = 0;
+ uint32_t shdr_status, shdr_add_status;
+ union lpfc_sli4_cfg_shdr *shdr;
+
+ /* sanity check on queue memory */
+ if (!mq)
+ return -ENODEV;
+ mbox = mempool_alloc(mq->phba->mbox_mem_pool, GFP_KERNEL);
+ if (!mbox)
+ return -ENOMEM;
+ length = (sizeof(struct lpfc_mbx_mq_destroy) -
+ sizeof(struct lpfc_sli4_cfg_mhdr));
+ lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
+ LPFC_MBOX_OPCODE_MQ_DESTROY,
+ length, LPFC_SLI4_MBX_EMBED);
+ bf_set(lpfc_mbx_mq_destroy_q_id, &mbox->u.mqe.un.mq_destroy.u.request,
+ mq->queue_id);
+ mbox->vport = mq->phba->pport;
+ mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
+ rc = lpfc_sli_issue_mbox(mq->phba, mbox, MBX_POLL);
+ /* The IOCTL status is embedded in the mailbox subheader. */
+ shdr = (union lpfc_sli4_cfg_shdr *)
+ &mbox->u.mqe.un.mq_destroy.header.cfg_shdr;
+ shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
+ shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
+ if (shdr_status || shdr_add_status || rc) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+ "2507 MQ_DESTROY mailbox failed with "
+ "status x%x add_status x%x, mbx status x%x\n",
+ shdr_status, shdr_add_status, rc);
+ status = -ENXIO;
+ }
+ /* Remove mq from any list */
+ list_del_init(&mq->list);
+ mempool_free(mbox, mq->phba->mbox_mem_pool);
+ return status;
+}
+
+/**
+ * lpfc_wq_destroy - Destroy a Work Queue on the HBA
+ * @wq: The queue structure associated with the queue to destroy.
+ *
+ * This function destroys a queue, as detailed in @wq by sending an mailbox
+ * command, specific to the type of queue, to the HBA.
+ *
+ * The @wq struct is used to get the queue ID of the queue to destroy.
+ *
+ * On success this function will return a zero. If the queue destroy mailbox
+ * command fails this function will return -ENXIO.
+ **/
+int
+lpfc_wq_destroy(struct lpfc_hba *phba, struct lpfc_queue *wq)
+{
+ LPFC_MBOXQ_t *mbox;
+ int rc, length, status = 0;
+ uint32_t shdr_status, shdr_add_status;
+ union lpfc_sli4_cfg_shdr *shdr;
+
+ /* sanity check on queue memory */
+ if (!wq)
+ return -ENODEV;
+ mbox = mempool_alloc(wq->phba->mbox_mem_pool, GFP_KERNEL);
+ if (!mbox)
+ return -ENOMEM;
+ length = (sizeof(struct lpfc_mbx_wq_destroy) -
+ sizeof(struct lpfc_sli4_cfg_mhdr));
+ lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE,
+ LPFC_MBOX_OPCODE_FCOE_WQ_DESTROY,
+ length, LPFC_SLI4_MBX_EMBED);
+ bf_set(lpfc_mbx_wq_destroy_q_id, &mbox->u.mqe.un.wq_destroy.u.request,
+ wq->queue_id);
+ mbox->vport = wq->phba->pport;
+ mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
+ rc = lpfc_sli_issue_mbox(wq->phba, mbox, MBX_POLL);
+ shdr = (union lpfc_sli4_cfg_shdr *)
+ &mbox->u.mqe.un.wq_destroy.header.cfg_shdr;
+ shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
+ shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
+ if (shdr_status || shdr_add_status || rc) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+ "2508 WQ_DESTROY mailbox failed with "
+ "status x%x add_status x%x, mbx status x%x\n",
+ shdr_status, shdr_add_status, rc);
+ status = -ENXIO;
+ }
+ /* Remove wq from any list */
+ list_del_init(&wq->list);
+ mempool_free(mbox, wq->phba->mbox_mem_pool);
+ return status;
+}
+
+/**
+ * lpfc_rq_destroy - Destroy a Receive Queue on the HBA
+ * @rq: The queue structure associated with the queue to destroy.
+ *
+ * This function destroys a queue, as detailed in @rq by sending an mailbox
+ * command, specific to the type of queue, to the HBA.
+ *
+ * The @rq struct is used to get the queue ID of the queue to destroy.
+ *
+ * On success this function will return a zero. If the queue destroy mailbox
+ * command fails this function will return -ENXIO.
+ **/
+int
+lpfc_rq_destroy(struct lpfc_hba *phba, struct lpfc_queue *hrq,
+ struct lpfc_queue *drq)
+{
+ LPFC_MBOXQ_t *mbox;
+ int rc, length, status = 0;
+ uint32_t shdr_status, shdr_add_status;
+ union lpfc_sli4_cfg_shdr *shdr;
+
+ /* sanity check on queue memory */
+ if (!hrq || !drq)
+ return -ENODEV;
+ mbox = mempool_alloc(hrq->phba->mbox_mem_pool, GFP_KERNEL);
+ if (!mbox)
+ return -ENOMEM;
+ length = (sizeof(struct lpfc_mbx_rq_destroy) -
+ sizeof(struct lpfc_sli4_cfg_mhdr));
+ lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE,
+ LPFC_MBOX_OPCODE_FCOE_RQ_DESTROY,
+ length, LPFC_SLI4_MBX_EMBED);
+ bf_set(lpfc_mbx_rq_destroy_q_id, &mbox->u.mqe.un.rq_destroy.u.request,
+ hrq->queue_id);
+ mbox->vport = hrq->phba->pport;
+ mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
+ rc = lpfc_sli_issue_mbox(hrq->phba, mbox, MBX_POLL);
+ /* The IOCTL status is embedded in the mailbox subheader. */
+ shdr = (union lpfc_sli4_cfg_shdr *)
+ &mbox->u.mqe.un.rq_destroy.header.cfg_shdr;
+ shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
+ shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
+ if (shdr_status || shdr_add_status || rc) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+ "2509 RQ_DESTROY mailbox failed with "
+ "status x%x add_status x%x, mbx status x%x\n",
+ shdr_status, shdr_add_status, rc);
+ if (rc != MBX_TIMEOUT)
+ mempool_free(mbox, hrq->phba->mbox_mem_pool);
+ return -ENXIO;
+ }
+ bf_set(lpfc_mbx_rq_destroy_q_id, &mbox->u.mqe.un.rq_destroy.u.request,
+ drq->queue_id);
+ rc = lpfc_sli_issue_mbox(drq->phba, mbox, MBX_POLL);
+ shdr = (union lpfc_sli4_cfg_shdr *)
+ &mbox->u.mqe.un.rq_destroy.header.cfg_shdr;
+ shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
+ shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
+ if (shdr_status || shdr_add_status || rc) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+ "2510 RQ_DESTROY mailbox failed with "
+ "status x%x add_status x%x, mbx status x%x\n",
+ shdr_status, shdr_add_status, rc);
+ status = -ENXIO;
+ }
+ list_del_init(&hrq->list);
+ list_del_init(&drq->list);
+ mempool_free(mbox, hrq->phba->mbox_mem_pool);
+ return status;
+}
+
+/**
+ * lpfc_sli4_post_sgl - Post scatter gather list for an XRI to HBA
+ * @phba: The virtual port for which this call being executed.
+ * @pdma_phys_addr0: Physical address of the 1st SGL page.
+ * @pdma_phys_addr1: Physical address of the 2nd SGL page.
+ * @xritag: the xritag that ties this io to the SGL pages.
+ *
+ * This routine will post the sgl pages for the IO that has the xritag
+ * that is in the iocbq structure. The xritag is assigned during iocbq
+ * creation and persists for as long as the driver is loaded.
+ * if the caller has fewer than 256 scatter gather segments to map then
+ * pdma_phys_addr1 should be 0.
+ * If the caller needs to map more than 256 scatter gather segment then
+ * pdma_phys_addr1 should be a valid physical address.
+ * physical address for SGLs must be 64 byte aligned.
+ * If you are going to map 2 SGL's then the first one must have 256 entries
+ * the second sgl can have between 1 and 256 entries.
+ *
+ * Return codes:
+ * 0 - Success
+ * -ENXIO, -ENOMEM - Failure
+ **/
+int
+lpfc_sli4_post_sgl(struct lpfc_hba *phba,
+ dma_addr_t pdma_phys_addr0,
+ dma_addr_t pdma_phys_addr1,
+ uint16_t xritag)
+{
+ struct lpfc_mbx_post_sgl_pages *post_sgl_pages;
+ LPFC_MBOXQ_t *mbox;
+ int rc;
+ uint32_t shdr_status, shdr_add_status;
+ uint32_t mbox_tmo;
+ union lpfc_sli4_cfg_shdr *shdr;
+
+ if (xritag == NO_XRI) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
+ "0364 Invalid param:\n");
+ return -EINVAL;
+ }
+
+ mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
+ if (!mbox)
+ return -ENOMEM;
+
+ lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE,
+ LPFC_MBOX_OPCODE_FCOE_POST_SGL_PAGES,
+ sizeof(struct lpfc_mbx_post_sgl_pages) -
+ sizeof(struct lpfc_sli4_cfg_mhdr), LPFC_SLI4_MBX_EMBED);
+
+ post_sgl_pages = (struct lpfc_mbx_post_sgl_pages *)
+ &mbox->u.mqe.un.post_sgl_pages;
+ bf_set(lpfc_post_sgl_pages_xri, post_sgl_pages, xritag);
+ bf_set(lpfc_post_sgl_pages_xricnt, post_sgl_pages, 1);
+
+ post_sgl_pages->sgl_pg_pairs[0].sgl_pg0_addr_lo =
+ cpu_to_le32(putPaddrLow(pdma_phys_addr0));
+ post_sgl_pages->sgl_pg_pairs[0].sgl_pg0_addr_hi =
+ cpu_to_le32(putPaddrHigh(pdma_phys_addr0));
+
+ post_sgl_pages->sgl_pg_pairs[0].sgl_pg1_addr_lo =
+ cpu_to_le32(putPaddrLow(pdma_phys_addr1));
+ post_sgl_pages->sgl_pg_pairs[0].sgl_pg1_addr_hi =
+ cpu_to_le32(putPaddrHigh(pdma_phys_addr1));
+ if (!phba->sli4_hba.intr_enable)
+ rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
+ else {
+ mbox_tmo = lpfc_mbox_tmo_val(phba, mbox);
+ rc = lpfc_sli_issue_mbox_wait(phba, mbox, mbox_tmo);
+ }
+ /* The IOCTL status is embedded in the mailbox subheader. */
+ shdr = (union lpfc_sli4_cfg_shdr *) &post_sgl_pages->header.cfg_shdr;
+ shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
+ shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
+ if (rc != MBX_TIMEOUT)
+ mempool_free(mbox, phba->mbox_mem_pool);
+ if (shdr_status || shdr_add_status || rc) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+ "2511 POST_SGL mailbox failed with "
+ "status x%x add_status x%x, mbx status x%x\n",
+ shdr_status, shdr_add_status, rc);
+ }
+ return 0;
+}
+
+/**
+ * lpfc_sli4_alloc_xri - Get an available rpi in the device's range
+ * @phba: pointer to lpfc hba data structure.
+ *
+ * This routine is invoked to post rpi header templates to the
+ * HBA consistent with the SLI-4 interface spec. This routine
+ * posts a SLI4_PAGE_SIZE memory region to the port to hold up to
+ * SLI4_PAGE_SIZE modulo 64 rpi context headers.
+ *
+ * Returns
+ * A nonzero rpi defined as rpi_base <= rpi < max_rpi if successful
+ * LPFC_RPI_ALLOC_ERROR if no rpis are available.
+ **/
+static uint16_t
+lpfc_sli4_alloc_xri(struct lpfc_hba *phba)
+{
+ unsigned long xri;
+
+ /*
+ * Fetch the next logical xri. Because this index is logical,
+ * the driver starts at 0 each time.
+ */
+ spin_lock_irq(&phba->hbalock);
+ xri = find_next_zero_bit(phba->sli4_hba.xri_bmask,
+ phba->sli4_hba.max_cfg_param.max_xri, 0);
+ if (xri >= phba->sli4_hba.max_cfg_param.max_xri) {
+ spin_unlock_irq(&phba->hbalock);
+ return NO_XRI;
+ } else {
+ set_bit(xri, phba->sli4_hba.xri_bmask);
+ phba->sli4_hba.max_cfg_param.xri_used++;
+ }
+ spin_unlock_irq(&phba->hbalock);
+ return xri;
+}
+
+/**
+ * lpfc_sli4_free_xri - Release an xri for reuse.
+ * @phba: pointer to lpfc hba data structure.
+ *
+ * This routine is invoked to release an xri to the pool of
+ * available rpis maintained by the driver.
+ **/
+static void
+__lpfc_sli4_free_xri(struct lpfc_hba *phba, int xri)
+{
+ if (test_and_clear_bit(xri, phba->sli4_hba.xri_bmask)) {
+ phba->sli4_hba.max_cfg_param.xri_used--;
+ }
+}
+
+/**
+ * lpfc_sli4_free_xri - Release an xri for reuse.
+ * @phba: pointer to lpfc hba data structure.
+ *
+ * This routine is invoked to release an xri to the pool of
+ * available rpis maintained by the driver.
+ **/
+void
+lpfc_sli4_free_xri(struct lpfc_hba *phba, int xri)
+{
+ spin_lock_irq(&phba->hbalock);
+ __lpfc_sli4_free_xri(phba, xri);
+ spin_unlock_irq(&phba->hbalock);
+}
+
+/**
+ * lpfc_sli4_next_xritag - Get an xritag for the io
+ * @phba: Pointer to HBA context object.
+ *
+ * This function gets an xritag for the iocb. If there is no unused xritag
+ * it will return 0xffff.
+ * The function returns the allocated xritag if successful, else returns zero.
+ * Zero is not a valid xritag.
+ * The caller is not required to hold any lock.
+ **/
+uint16_t
+lpfc_sli4_next_xritag(struct lpfc_hba *phba)
+{
+ uint16_t xri_index;
+
+ xri_index = lpfc_sli4_alloc_xri(phba);
+ if (xri_index == NO_XRI)
+ lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
+ "2004 Failed to allocate XRI.last XRITAG is %d"
+ " Max XRI is %d, Used XRI is %d\n",
+ xri_index,
+ phba->sli4_hba.max_cfg_param.max_xri,
+ phba->sli4_hba.max_cfg_param.xri_used);
+ return xri_index;
+}
+
+/**
+ * lpfc_sli4_post_els_sgl_list - post a block of ELS sgls to the port.
+ * @phba: pointer to lpfc hba data structure.
+ * @post_sgl_list: pointer to els sgl entry list.
+ * @count: number of els sgl entries on the list.
+ *
+ * This routine is invoked to post a block of driver's sgl pages to the
+ * HBA using non-embedded mailbox command. No Lock is held. This routine
+ * is only called when the driver is loading and after all IO has been
+ * stopped.
+ **/
+static int
+lpfc_sli4_post_els_sgl_list(struct lpfc_hba *phba,
+ struct list_head *post_sgl_list,
+ int post_cnt)
+{
+ struct lpfc_sglq *sglq_entry = NULL, *sglq_next = NULL;
+ struct lpfc_mbx_post_uembed_sgl_page1 *sgl;
+ struct sgl_page_pairs *sgl_pg_pairs;
+ void *viraddr;
+ LPFC_MBOXQ_t *mbox;
+ uint32_t reqlen, alloclen, pg_pairs;
+ uint32_t mbox_tmo;
+ uint16_t xritag_start = 0;
+ int rc = 0;
+ uint32_t shdr_status, shdr_add_status;
+ union lpfc_sli4_cfg_shdr *shdr;
+
+ reqlen = phba->sli4_hba.els_xri_cnt * sizeof(struct sgl_page_pairs) +
+ sizeof(union lpfc_sli4_cfg_shdr) + sizeof(uint32_t);
+ if (reqlen > SLI4_PAGE_SIZE) {
+ lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
+ "2559 Block sgl registration required DMA "
+ "size (%d) great than a page\n", reqlen);
+ return -ENOMEM;
+ }
+ mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
+ if (!mbox)
+ return -ENOMEM;
+
+ /* Allocate DMA memory and set up the non-embedded mailbox command */
+ alloclen = lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE,
+ LPFC_MBOX_OPCODE_FCOE_POST_SGL_PAGES, reqlen,
+ LPFC_SLI4_MBX_NEMBED);
+
+ if (alloclen < reqlen) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+ "0285 Allocated DMA memory size (%d) is "
+ "less than the requested DMA memory "
+ "size (%d)\n", alloclen, reqlen);
+ lpfc_sli4_mbox_cmd_free(phba, mbox);
+ return -ENOMEM;
+ }
+ /* Set up the SGL pages in the non-embedded DMA pages */
+ viraddr = mbox->sge_array->addr[0];
+ sgl = (struct lpfc_mbx_post_uembed_sgl_page1 *)viraddr;
+ sgl_pg_pairs = &sgl->sgl_pg_pairs;
+
+ pg_pairs = 0;
+ list_for_each_entry_safe(sglq_entry, sglq_next, post_sgl_list, list) {
+ /* Set up the sge entry */
+ sgl_pg_pairs->sgl_pg0_addr_lo =
+ cpu_to_le32(putPaddrLow(sglq_entry->phys));
+ sgl_pg_pairs->sgl_pg0_addr_hi =
+ cpu_to_le32(putPaddrHigh(sglq_entry->phys));
+ sgl_pg_pairs->sgl_pg1_addr_lo =
+ cpu_to_le32(putPaddrLow(0));
+ sgl_pg_pairs->sgl_pg1_addr_hi =
+ cpu_to_le32(putPaddrHigh(0));
+
+ /* Keep the first xritag on the list */
+ if (pg_pairs == 0)
+ xritag_start = sglq_entry->sli4_xritag;
+ sgl_pg_pairs++;
+ pg_pairs++;
+ }
+
+ /* Complete initialization and perform endian conversion. */
+ bf_set(lpfc_post_sgl_pages_xri, sgl, xritag_start);
+ bf_set(lpfc_post_sgl_pages_xricnt, sgl, phba->sli4_hba.els_xri_cnt);
+ sgl->word0 = cpu_to_le32(sgl->word0);
+ if (!phba->sli4_hba.intr_enable)
+ rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
+ else {
+ mbox_tmo = lpfc_mbox_tmo_val(phba, mbox);
+ rc = lpfc_sli_issue_mbox_wait(phba, mbox, mbox_tmo);
+ }
+ shdr = (union lpfc_sli4_cfg_shdr *) &sgl->cfg_shdr;
+ shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
+ shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
+ if (rc != MBX_TIMEOUT)
+ lpfc_sli4_mbox_cmd_free(phba, mbox);
+ if (shdr_status || shdr_add_status || rc) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
+ "2513 POST_SGL_BLOCK mailbox command failed "
+ "status x%x add_status x%x mbx status x%x\n",
+ shdr_status, shdr_add_status, rc);
+ rc = -ENXIO;
+ }
+ return rc;
+}
+
+/**
+ * lpfc_sli4_post_scsi_sgl_block - post a block of scsi sgl list to firmware
+ * @phba: pointer to lpfc hba data structure.
+ * @sblist: pointer to scsi buffer list.
+ * @count: number of scsi buffers on the list.
+ *
+ * This routine is invoked to post a block of @count scsi sgl pages from a
+ * SCSI buffer list @sblist to the HBA using non-embedded mailbox command.
+ * No Lock is held.
+ *
+ **/
+int
+lpfc_sli4_post_scsi_sgl_block(struct lpfc_hba *phba,
+ struct list_head *sblist,
+ int count)
+{
+ struct lpfc_scsi_buf *psb;
+ struct lpfc_mbx_post_uembed_sgl_page1 *sgl;
+ struct sgl_page_pairs *sgl_pg_pairs;
+ void *viraddr;
+ LPFC_MBOXQ_t *mbox;
+ uint32_t reqlen, alloclen, pg_pairs;
+ uint32_t mbox_tmo;
+ uint16_t xritag_start = 0;
+ int rc = 0;
+ uint32_t shdr_status, shdr_add_status;
+ dma_addr_t pdma_phys_bpl1;
+ union lpfc_sli4_cfg_shdr *shdr;
+
+ /* Calculate the requested length of the dma memory */
+ reqlen = count * sizeof(struct sgl_page_pairs) +
+ sizeof(union lpfc_sli4_cfg_shdr) + sizeof(uint32_t);
+ if (reqlen > SLI4_PAGE_SIZE) {
+ lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
+ "0217 Block sgl registration required DMA "
+ "size (%d) great than a page\n", reqlen);
+ return -ENOMEM;
+ }
+ mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
+ if (!mbox) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+ "0283 Failed to allocate mbox cmd memory\n");
+ return -ENOMEM;
+ }
+
+ /* Allocate DMA memory and set up the non-embedded mailbox command */
+ alloclen = lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE,
+ LPFC_MBOX_OPCODE_FCOE_POST_SGL_PAGES, reqlen,
+ LPFC_SLI4_MBX_NEMBED);
+
+ if (alloclen < reqlen) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+ "2561 Allocated DMA memory size (%d) is "
+ "less than the requested DMA memory "
+ "size (%d)\n", alloclen, reqlen);
+ lpfc_sli4_mbox_cmd_free(phba, mbox);
+ return -ENOMEM;
+ }
+
+ /* Get the first SGE entry from the non-embedded DMA memory */
+ viraddr = mbox->sge_array->addr[0];
+
+ /* Set up the SGL pages in the non-embedded DMA pages */
+ sgl = (struct lpfc_mbx_post_uembed_sgl_page1 *)viraddr;
+ sgl_pg_pairs = &sgl->sgl_pg_pairs;
+
+ pg_pairs = 0;
+ list_for_each_entry(psb, sblist, list) {
+ /* Set up the sge entry */
+ sgl_pg_pairs->sgl_pg0_addr_lo =
+ cpu_to_le32(putPaddrLow(psb->dma_phys_bpl));
+ sgl_pg_pairs->sgl_pg0_addr_hi =
+ cpu_to_le32(putPaddrHigh(psb->dma_phys_bpl));
+ if (phba->cfg_sg_dma_buf_size > SGL_PAGE_SIZE)
+ pdma_phys_bpl1 = psb->dma_phys_bpl + SGL_PAGE_SIZE;
+ else
+ pdma_phys_bpl1 = 0;
+ sgl_pg_pairs->sgl_pg1_addr_lo =
+ cpu_to_le32(putPaddrLow(pdma_phys_bpl1));
+ sgl_pg_pairs->sgl_pg1_addr_hi =
+ cpu_to_le32(putPaddrHigh(pdma_phys_bpl1));
+ /* Keep the first xritag on the list */
+ if (pg_pairs == 0)
+ xritag_start = psb->cur_iocbq.sli4_xritag;
+ sgl_pg_pairs++;
+ pg_pairs++;
+ }
+ bf_set(lpfc_post_sgl_pages_xri, sgl, xritag_start);
+ bf_set(lpfc_post_sgl_pages_xricnt, sgl, pg_pairs);
+ /* Perform endian conversion if necessary */
+ sgl->word0 = cpu_to_le32(sgl->word0);
+
+ if (!phba->sli4_hba.intr_enable)
+ rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
+ else {
+ mbox_tmo = lpfc_mbox_tmo_val(phba, mbox);
+ rc = lpfc_sli_issue_mbox_wait(phba, mbox, mbox_tmo);
+ }
+ shdr = (union lpfc_sli4_cfg_shdr *) &sgl->cfg_shdr;
+ shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
+ shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
+ if (rc != MBX_TIMEOUT)
+ lpfc_sli4_mbox_cmd_free(phba, mbox);
+ if (shdr_status || shdr_add_status || rc) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
+ "2564 POST_SGL_BLOCK mailbox command failed "
+ "status x%x add_status x%x mbx status x%x\n",
+ shdr_status, shdr_add_status, rc);
+ rc = -ENXIO;
+ }
+ return rc;
+}
+
+/**
+ * lpfc_fc_frame_check - Check that this frame is a valid frame to handle
+ * @phba: pointer to lpfc_hba struct that the frame was received on
+ * @fc_hdr: A pointer to the FC Header data (In Big Endian Format)
+ *
+ * This function checks the fields in the @fc_hdr to see if the FC frame is a
+ * valid type of frame that the LPFC driver will handle. This function will
+ * return a zero if the frame is a valid frame or a non zero value when the
+ * frame does not pass the check.
+ **/
+static int
+lpfc_fc_frame_check(struct lpfc_hba *phba, struct fc_frame_header *fc_hdr)
+{
+ /* make rctl_names static to save stack space */
+ static char *rctl_names[] = FC_RCTL_NAMES_INIT;
+ char *type_names[] = FC_TYPE_NAMES_INIT;
+ struct fc_vft_header *fc_vft_hdr;
+ uint32_t *header = (uint32_t *) fc_hdr;
+
+ switch (fc_hdr->fh_r_ctl) {
+ case FC_RCTL_DD_UNCAT: /* uncategorized information */
+ case FC_RCTL_DD_SOL_DATA: /* solicited data */
+ case FC_RCTL_DD_UNSOL_CTL: /* unsolicited control */
+ case FC_RCTL_DD_SOL_CTL: /* solicited control or reply */
+ case FC_RCTL_DD_UNSOL_DATA: /* unsolicited data */
+ case FC_RCTL_DD_DATA_DESC: /* data descriptor */
+ case FC_RCTL_DD_UNSOL_CMD: /* unsolicited command */
+ case FC_RCTL_DD_CMD_STATUS: /* command status */
+ case FC_RCTL_ELS_REQ: /* extended link services request */
+ case FC_RCTL_ELS_REP: /* extended link services reply */
+ case FC_RCTL_ELS4_REQ: /* FC-4 ELS request */
+ case FC_RCTL_ELS4_REP: /* FC-4 ELS reply */
+ case FC_RCTL_BA_NOP: /* basic link service NOP */
+ case FC_RCTL_BA_ABTS: /* basic link service abort */
+ case FC_RCTL_BA_RMC: /* remove connection */
+ case FC_RCTL_BA_ACC: /* basic accept */
+ case FC_RCTL_BA_RJT: /* basic reject */
+ case FC_RCTL_BA_PRMT:
+ case FC_RCTL_ACK_1: /* acknowledge_1 */
+ case FC_RCTL_ACK_0: /* acknowledge_0 */
+ case FC_RCTL_P_RJT: /* port reject */
+ case FC_RCTL_F_RJT: /* fabric reject */
+ case FC_RCTL_P_BSY: /* port busy */
+ case FC_RCTL_F_BSY: /* fabric busy to data frame */
+ case FC_RCTL_F_BSYL: /* fabric busy to link control frame */
+ case FC_RCTL_LCR: /* link credit reset */
+ case FC_RCTL_END: /* end */
+ break;
+ case FC_RCTL_VFTH: /* Virtual Fabric tagging Header */
+ fc_vft_hdr = (struct fc_vft_header *)fc_hdr;
+ fc_hdr = &((struct fc_frame_header *)fc_vft_hdr)[1];
+ return lpfc_fc_frame_check(phba, fc_hdr);
+ default:
+ goto drop;
+ }
+ switch (fc_hdr->fh_type) {
+ case FC_TYPE_BLS:
+ case FC_TYPE_ELS:
+ case FC_TYPE_FCP:
+ case FC_TYPE_CT:
+ break;
+ case FC_TYPE_IP:
+ case FC_TYPE_ILS:
+ default:
+ goto drop;
+ }
+
+ lpfc_printf_log(phba, KERN_INFO, LOG_ELS,
+ "2538 Received frame rctl:%s (x%x), type:%s (x%x), "
+ "frame Data:%08x %08x %08x %08x %08x %08x %08x\n",
+ rctl_names[fc_hdr->fh_r_ctl], fc_hdr->fh_r_ctl,
+ type_names[fc_hdr->fh_type], fc_hdr->fh_type,
+ be32_to_cpu(header[0]), be32_to_cpu(header[1]),
+ be32_to_cpu(header[2]), be32_to_cpu(header[3]),
+ be32_to_cpu(header[4]), be32_to_cpu(header[5]),
+ be32_to_cpu(header[6]));
+ return 0;
+drop:
+ lpfc_printf_log(phba, KERN_WARNING, LOG_ELS,
+ "2539 Dropped frame rctl:%s type:%s\n",
+ rctl_names[fc_hdr->fh_r_ctl],
+ type_names[fc_hdr->fh_type]);
+ return 1;
+}
+
+/**
+ * lpfc_fc_hdr_get_vfi - Get the VFI from an FC frame
+ * @fc_hdr: A pointer to the FC Header data (In Big Endian Format)
+ *
+ * This function processes the FC header to retrieve the VFI from the VF
+ * header, if one exists. This function will return the VFI if one exists
+ * or 0 if no VSAN Header exists.
+ **/
+static uint32_t
+lpfc_fc_hdr_get_vfi(struct fc_frame_header *fc_hdr)
+{
+ struct fc_vft_header *fc_vft_hdr = (struct fc_vft_header *)fc_hdr;
+
+ if (fc_hdr->fh_r_ctl != FC_RCTL_VFTH)
+ return 0;
+ return bf_get(fc_vft_hdr_vf_id, fc_vft_hdr);
+}
+
+/**
+ * lpfc_fc_frame_to_vport - Finds the vport that a frame is destined to
+ * @phba: Pointer to the HBA structure to search for the vport on
+ * @fc_hdr: A pointer to the FC Header data (In Big Endian Format)
+ * @fcfi: The FC Fabric ID that the frame came from
+ *
+ * This function searches the @phba for a vport that matches the content of the
+ * @fc_hdr passed in and the @fcfi. This function uses the @fc_hdr to fetch the
+ * VFI, if the Virtual Fabric Tagging Header exists, and the DID. This function
+ * returns the matching vport pointer or NULL if unable to match frame to a
+ * vport.
+ **/
+static struct lpfc_vport *
+lpfc_fc_frame_to_vport(struct lpfc_hba *phba, struct fc_frame_header *fc_hdr,
+ uint16_t fcfi)
+{
+ struct lpfc_vport **vports;
+ struct lpfc_vport *vport = NULL;
+ int i;
+ uint32_t did = (fc_hdr->fh_d_id[0] << 16 |
+ fc_hdr->fh_d_id[1] << 8 |
+ fc_hdr->fh_d_id[2]);
+
+ if (did == Fabric_DID)
+ return phba->pport;
+ if ((phba->pport->fc_flag & FC_PT2PT) &&
+ !(phba->link_state == LPFC_HBA_READY))
+ return phba->pport;
+
+ vports = lpfc_create_vport_work_array(phba);
+ if (vports != NULL)
+ for (i = 0; i <= phba->max_vpi && vports[i] != NULL; i++) {
+ if (phba->fcf.fcfi == fcfi &&
+ vports[i]->vfi == lpfc_fc_hdr_get_vfi(fc_hdr) &&
+ vports[i]->fc_myDID == did) {
+ vport = vports[i];
+ break;
+ }
+ }
+ lpfc_destroy_vport_work_array(phba, vports);
+ return vport;
+}
+
+/**
+ * lpfc_update_rcv_time_stamp - Update vport's rcv seq time stamp
+ * @vport: The vport to work on.
+ *
+ * This function updates the receive sequence time stamp for this vport. The
+ * receive sequence time stamp indicates the time that the last frame of the
+ * the sequence that has been idle for the longest amount of time was received.
+ * the driver uses this time stamp to indicate if any received sequences have
+ * timed out.
+ **/
+static void
+lpfc_update_rcv_time_stamp(struct lpfc_vport *vport)
+{
+ struct lpfc_dmabuf *h_buf;
+ struct hbq_dmabuf *dmabuf = NULL;
+
+ /* get the oldest sequence on the rcv list */
+ h_buf = list_get_first(&vport->rcv_buffer_list,
+ struct lpfc_dmabuf, list);
+ if (!h_buf)
+ return;
+ dmabuf = container_of(h_buf, struct hbq_dmabuf, hbuf);
+ vport->rcv_buffer_time_stamp = dmabuf->time_stamp;
+}
+
+/**
+ * lpfc_cleanup_rcv_buffers - Cleans up all outstanding receive sequences.
+ * @vport: The vport that the received sequences were sent to.
+ *
+ * This function cleans up all outstanding received sequences. This is called
+ * by the driver when a link event or user action invalidates all the received
+ * sequences.
+ **/
+void
+lpfc_cleanup_rcv_buffers(struct lpfc_vport *vport)
+{
+ struct lpfc_dmabuf *h_buf, *hnext;
+ struct lpfc_dmabuf *d_buf, *dnext;
+ struct hbq_dmabuf *dmabuf = NULL;
+
+ /* start with the oldest sequence on the rcv list */
+ list_for_each_entry_safe(h_buf, hnext, &vport->rcv_buffer_list, list) {
+ dmabuf = container_of(h_buf, struct hbq_dmabuf, hbuf);
+ list_del_init(&dmabuf->hbuf.list);
+ list_for_each_entry_safe(d_buf, dnext,
+ &dmabuf->dbuf.list, list) {
+ list_del_init(&d_buf->list);
+ lpfc_in_buf_free(vport->phba, d_buf);
+ }
+ lpfc_in_buf_free(vport->phba, &dmabuf->dbuf);
+ }
+}
+
+/**
+ * lpfc_rcv_seq_check_edtov - Cleans up timed out receive sequences.
+ * @vport: The vport that the received sequences were sent to.
+ *
+ * This function determines whether any received sequences have timed out by
+ * first checking the vport's rcv_buffer_time_stamp. If this time_stamp
+ * indicates that there is at least one timed out sequence this routine will
+ * go through the received sequences one at a time from most inactive to most
+ * active to determine which ones need to be cleaned up. Once it has determined
+ * that a sequence needs to be cleaned up it will simply free up the resources
+ * without sending an abort.
+ **/
+void
+lpfc_rcv_seq_check_edtov(struct lpfc_vport *vport)
+{
+ struct lpfc_dmabuf *h_buf, *hnext;
+ struct lpfc_dmabuf *d_buf, *dnext;
+ struct hbq_dmabuf *dmabuf = NULL;
+ unsigned long timeout;
+ int abort_count = 0;
+
+ timeout = (msecs_to_jiffies(vport->phba->fc_edtov) +
+ vport->rcv_buffer_time_stamp);
+ if (list_empty(&vport->rcv_buffer_list) ||
+ time_before(jiffies, timeout))
+ return;
+ /* start with the oldest sequence on the rcv list */
+ list_for_each_entry_safe(h_buf, hnext, &vport->rcv_buffer_list, list) {
+ dmabuf = container_of(h_buf, struct hbq_dmabuf, hbuf);
+ timeout = (msecs_to_jiffies(vport->phba->fc_edtov) +
+ dmabuf->time_stamp);
+ if (time_before(jiffies, timeout))
+ break;
+ abort_count++;
+ list_del_init(&dmabuf->hbuf.list);
+ list_for_each_entry_safe(d_buf, dnext,
+ &dmabuf->dbuf.list, list) {
+ list_del_init(&d_buf->list);
+ lpfc_in_buf_free(vport->phba, d_buf);
+ }
+ lpfc_in_buf_free(vport->phba, &dmabuf->dbuf);
+ }
+ if (abort_count)
+ lpfc_update_rcv_time_stamp(vport);
+}
+
+/**
+ * lpfc_fc_frame_add - Adds a frame to the vport's list of received sequences
+ * @dmabuf: pointer to a dmabuf that describes the hdr and data of the FC frame
+ *
+ * This function searches through the existing incomplete sequences that have
+ * been sent to this @vport. If the frame matches one of the incomplete
+ * sequences then the dbuf in the @dmabuf is added to the list of frames that
+ * make up that sequence. If no sequence is found that matches this frame then
+ * the function will add the hbuf in the @dmabuf to the @vport's rcv_buffer_list
+ * This function returns a pointer to the first dmabuf in the sequence list that
+ * the frame was linked to.
+ **/
+static struct hbq_dmabuf *
+lpfc_fc_frame_add(struct lpfc_vport *vport, struct hbq_dmabuf *dmabuf)
+{
+ struct fc_frame_header *new_hdr;
+ struct fc_frame_header *temp_hdr;
+ struct lpfc_dmabuf *d_buf;
+ struct lpfc_dmabuf *h_buf;
+ struct hbq_dmabuf *seq_dmabuf = NULL;
+ struct hbq_dmabuf *temp_dmabuf = NULL;
+
+ INIT_LIST_HEAD(&dmabuf->dbuf.list);
+ dmabuf->time_stamp = jiffies;
+ new_hdr = (struct fc_frame_header *)dmabuf->hbuf.virt;
+ /* Use the hdr_buf to find the sequence that this frame belongs to */
+ list_for_each_entry(h_buf, &vport->rcv_buffer_list, list) {
+ temp_hdr = (struct fc_frame_header *)h_buf->virt;
+ if ((temp_hdr->fh_seq_id != new_hdr->fh_seq_id) ||
+ (temp_hdr->fh_ox_id != new_hdr->fh_ox_id) ||
+ (memcmp(&temp_hdr->fh_s_id, &new_hdr->fh_s_id, 3)))
+ continue;
+ /* found a pending sequence that matches this frame */
+ seq_dmabuf = container_of(h_buf, struct hbq_dmabuf, hbuf);
+ break;
+ }
+ if (!seq_dmabuf) {
+ /*
+ * This indicates first frame received for this sequence.
+ * Queue the buffer on the vport's rcv_buffer_list.
+ */
+ list_add_tail(&dmabuf->hbuf.list, &vport->rcv_buffer_list);
+ lpfc_update_rcv_time_stamp(vport);
+ return dmabuf;
+ }
+ temp_hdr = seq_dmabuf->hbuf.virt;
+ if (be16_to_cpu(new_hdr->fh_seq_cnt) <
+ be16_to_cpu(temp_hdr->fh_seq_cnt)) {
+ list_del_init(&seq_dmabuf->hbuf.list);
+ list_add_tail(&dmabuf->hbuf.list, &vport->rcv_buffer_list);
+ list_add_tail(&dmabuf->dbuf.list, &seq_dmabuf->dbuf.list);
+ lpfc_update_rcv_time_stamp(vport);
+ return dmabuf;
+ }
+ /* move this sequence to the tail to indicate a young sequence */
+ list_move_tail(&seq_dmabuf->hbuf.list, &vport->rcv_buffer_list);
+ seq_dmabuf->time_stamp = jiffies;
+ lpfc_update_rcv_time_stamp(vport);
+ if (list_empty(&seq_dmabuf->dbuf.list)) {
+ temp_hdr = dmabuf->hbuf.virt;
+ list_add_tail(&dmabuf->dbuf.list, &seq_dmabuf->dbuf.list);
+ return seq_dmabuf;
+ }
+ /* find the correct place in the sequence to insert this frame */
+ list_for_each_entry_reverse(d_buf, &seq_dmabuf->dbuf.list, list) {
+ temp_dmabuf = container_of(d_buf, struct hbq_dmabuf, dbuf);
+ temp_hdr = (struct fc_frame_header *)temp_dmabuf->hbuf.virt;
+ /*
+ * If the frame's sequence count is greater than the frame on
+ * the list then insert the frame right after this frame
+ */
+ if (be16_to_cpu(new_hdr->fh_seq_cnt) >
+ be16_to_cpu(temp_hdr->fh_seq_cnt)) {
+ list_add(&dmabuf->dbuf.list, &temp_dmabuf->dbuf.list);
+ return seq_dmabuf;
+ }
+ }
+ return NULL;
+}
+
+/**
+ * lpfc_sli4_abort_partial_seq - Abort partially assembled unsol sequence
+ * @vport: pointer to a vitural port
+ * @dmabuf: pointer to a dmabuf that describes the FC sequence
+ *
+ * This function tries to abort from the partially assembed sequence, described
+ * by the information from basic abbort @dmabuf. It checks to see whether such
+ * partially assembled sequence held by the driver. If so, it shall free up all
+ * the frames from the partially assembled sequence.
+ *
+ * Return
+ * true -- if there is matching partially assembled sequence present and all
+ * the frames freed with the sequence;
+ * false -- if there is no matching partially assembled sequence present so
+ * nothing got aborted in the lower layer driver
+ **/
+static bool
+lpfc_sli4_abort_partial_seq(struct lpfc_vport *vport,
+ struct hbq_dmabuf *dmabuf)
+{
+ struct fc_frame_header *new_hdr;
+ struct fc_frame_header *temp_hdr;
+ struct lpfc_dmabuf *d_buf, *n_buf, *h_buf;
+ struct hbq_dmabuf *seq_dmabuf = NULL;
+
+ /* Use the hdr_buf to find the sequence that matches this frame */
+ INIT_LIST_HEAD(&dmabuf->dbuf.list);
+ INIT_LIST_HEAD(&dmabuf->hbuf.list);
+ new_hdr = (struct fc_frame_header *)dmabuf->hbuf.virt;
+ list_for_each_entry(h_buf, &vport->rcv_buffer_list, list) {
+ temp_hdr = (struct fc_frame_header *)h_buf->virt;
+ if ((temp_hdr->fh_seq_id != new_hdr->fh_seq_id) ||
+ (temp_hdr->fh_ox_id != new_hdr->fh_ox_id) ||
+ (memcmp(&temp_hdr->fh_s_id, &new_hdr->fh_s_id, 3)))
+ continue;
+ /* found a pending sequence that matches this frame */
+ seq_dmabuf = container_of(h_buf, struct hbq_dmabuf, hbuf);
+ break;
+ }
+
+ /* Free up all the frames from the partially assembled sequence */
+ if (seq_dmabuf) {
+ list_for_each_entry_safe(d_buf, n_buf,
+ &seq_dmabuf->dbuf.list, list) {
+ list_del_init(&d_buf->list);
+ lpfc_in_buf_free(vport->phba, d_buf);
+ }
+ return true;
+ }
+ return false;
+}
+
+/**
+ * lpfc_sli4_abort_ulp_seq - Abort assembled unsol sequence from ulp
+ * @vport: pointer to a vitural port
+ * @dmabuf: pointer to a dmabuf that describes the FC sequence
+ *
+ * This function tries to abort from the assembed sequence from upper level
+ * protocol, described by the information from basic abbort @dmabuf. It
+ * checks to see whether such pending context exists at upper level protocol.
+ * If so, it shall clean up the pending context.
+ *
+ * Return
+ * true -- if there is matching pending context of the sequence cleaned
+ * at ulp;
+ * false -- if there is no matching pending context of the sequence present
+ * at ulp.
+ **/
+static bool
+lpfc_sli4_abort_ulp_seq(struct lpfc_vport *vport, struct hbq_dmabuf *dmabuf)
+{
+ struct lpfc_hba *phba = vport->phba;
+ int handled;
+
+ /* Accepting abort at ulp with SLI4 only */
+ if (phba->sli_rev < LPFC_SLI_REV4)
+ return false;
+
+ /* Register all caring upper level protocols to attend abort */
+ handled = lpfc_ct_handle_unsol_abort(phba, dmabuf);
+ if (handled)
+ return true;
+
+ return false;
+}
+
+/**
+ * lpfc_sli4_seq_abort_rsp_cmpl - BLS ABORT RSP seq abort iocb complete handler
+ * @phba: Pointer to HBA context object.
+ * @cmd_iocbq: pointer to the command iocbq structure.
+ * @rsp_iocbq: pointer to the response iocbq structure.
+ *
+ * This function handles the sequence abort response iocb command complete
+ * event. It properly releases the memory allocated to the sequence abort
+ * accept iocb.
+ **/
+static void
+lpfc_sli4_seq_abort_rsp_cmpl(struct lpfc_hba *phba,
+ struct lpfc_iocbq *cmd_iocbq,
+ struct lpfc_iocbq *rsp_iocbq)
+{
+ struct lpfc_nodelist *ndlp;
+
+ if (cmd_iocbq) {
+ ndlp = (struct lpfc_nodelist *)cmd_iocbq->context1;
+ lpfc_nlp_put(ndlp);
+ lpfc_nlp_not_used(ndlp);
+ lpfc_sli_release_iocbq(phba, cmd_iocbq);
+ }
+
+ /* Failure means BLS ABORT RSP did not get delivered to remote node*/
+ if (rsp_iocbq && rsp_iocbq->iocb.ulpStatus)
+ lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
+ "3154 BLS ABORT RSP failed, data: x%x/x%x\n",
+ rsp_iocbq->iocb.ulpStatus,
+ rsp_iocbq->iocb.un.ulpWord[4]);
+}
+
+/**
+ * lpfc_sli4_xri_inrange - check xri is in range of xris owned by driver.
+ * @phba: Pointer to HBA context object.
+ * @xri: xri id in transaction.
+ *
+ * This function validates the xri maps to the known range of XRIs allocated an
+ * used by the driver.
+ **/
+uint16_t
+lpfc_sli4_xri_inrange(struct lpfc_hba *phba,
+ uint16_t xri)
+{
+ uint16_t i;
+
+ for (i = 0; i < phba->sli4_hba.max_cfg_param.max_xri; i++) {
+ if (xri == phba->sli4_hba.xri_ids[i])
+ return i;
+ }
+ return NO_XRI;
+}
+
+/**
+ * lpfc_sli4_seq_abort_rsp - bls rsp to sequence abort
+ * @phba: Pointer to HBA context object.
+ * @fc_hdr: pointer to a FC frame header.
+ *
+ * This function sends a basic response to a previous unsol sequence abort
+ * event after aborting the sequence handling.
+ **/
+static void
+lpfc_sli4_seq_abort_rsp(struct lpfc_vport *vport,
+ struct fc_frame_header *fc_hdr, bool aborted)
+{
+ struct lpfc_hba *phba = vport->phba;
+ struct lpfc_iocbq *ctiocb = NULL;
+ struct lpfc_nodelist *ndlp;
+ uint16_t oxid, rxid, xri, lxri;
+ uint32_t sid, fctl;
+ IOCB_t *icmd;
+ int rc;
+
+ if (!lpfc_is_link_up(phba))
+ return;
+
+ sid = sli4_sid_from_fc_hdr(fc_hdr);
+ oxid = be16_to_cpu(fc_hdr->fh_ox_id);
+ rxid = be16_to_cpu(fc_hdr->fh_rx_id);
+
+ ndlp = lpfc_findnode_did(vport, sid);
+ if (!ndlp) {
+ ndlp = mempool_alloc(phba->nlp_mem_pool, GFP_KERNEL);
+ if (!ndlp) {
+ lpfc_printf_vlog(vport, KERN_WARNING, LOG_ELS,
+ "1268 Failed to allocate ndlp for "
+ "oxid:x%x SID:x%x\n", oxid, sid);
+ return;
+ }
+ lpfc_nlp_init(vport, ndlp, sid);
+ /* Put ndlp onto pport node list */
+ lpfc_enqueue_node(vport, ndlp);
+ } else if (!NLP_CHK_NODE_ACT(ndlp)) {
+ /* re-setup ndlp without removing from node list */
+ ndlp = lpfc_enable_node(vport, ndlp, NLP_STE_UNUSED_NODE);
+ if (!ndlp) {
+ lpfc_printf_vlog(vport, KERN_WARNING, LOG_ELS,
+ "3275 Failed to active ndlp found "
+ "for oxid:x%x SID:x%x\n", oxid, sid);
+ return;
+ }
+ }
+
+ /* Allocate buffer for rsp iocb */
+ ctiocb = lpfc_sli_get_iocbq(phba);
+ if (!ctiocb)
+ return;
+
+ /* Extract the F_CTL field from FC_HDR */
+ fctl = sli4_fctl_from_fc_hdr(fc_hdr);
+
+ icmd = &ctiocb->iocb;
+ icmd->un.xseq64.bdl.bdeSize = 0;
+ icmd->un.xseq64.bdl.ulpIoTag32 = 0;
+ icmd->un.xseq64.w5.hcsw.Dfctl = 0;
+ icmd->un.xseq64.w5.hcsw.Rctl = FC_RCTL_BA_ACC;
+ icmd->un.xseq64.w5.hcsw.Type = FC_TYPE_BLS;
+
+ /* Fill in the rest of iocb fields */
+ icmd->ulpCommand = CMD_XMIT_BLS_RSP64_CX;
+ icmd->ulpBdeCount = 0;
+ icmd->ulpLe = 1;
+ icmd->ulpClass = CLASS3;
+ icmd->ulpContext = phba->sli4_hba.rpi_ids[ndlp->nlp_rpi];
+ ctiocb->context1 = lpfc_nlp_get(ndlp);
+
+ ctiocb->iocb_cmpl = NULL;
+ ctiocb->vport = phba->pport;
+ ctiocb->iocb_cmpl = lpfc_sli4_seq_abort_rsp_cmpl;
+ ctiocb->sli4_lxritag = NO_XRI;
+ ctiocb->sli4_xritag = NO_XRI;
+
+ if (fctl & FC_FC_EX_CTX)
+ /* Exchange responder sent the abort so we
+ * own the oxid.
+ */
+ xri = oxid;
+ else
+ xri = rxid;
+ lxri = lpfc_sli4_xri_inrange(phba, xri);
+ if (lxri != NO_XRI)
+ lpfc_set_rrq_active(phba, ndlp, lxri,
+ (xri == oxid) ? rxid : oxid, 0);
+ /* For BA_ABTS from exchange responder, if the logical xri with
+ * the oxid maps to the FCP XRI range, the port no longer has
+ * that exchange context, send a BLS_RJT. Override the IOCB for
+ * a BA_RJT.
+ */
+ if ((fctl & FC_FC_EX_CTX) &&
+ (lxri > lpfc_sli4_get_els_iocb_cnt(phba))) {
+ icmd->un.xseq64.w5.hcsw.Rctl = FC_RCTL_BA_RJT;
+ bf_set(lpfc_vndr_code, &icmd->un.bls_rsp, 0);
+ bf_set(lpfc_rsn_expln, &icmd->un.bls_rsp, FC_BA_RJT_INV_XID);
+ bf_set(lpfc_rsn_code, &icmd->un.bls_rsp, FC_BA_RJT_UNABLE);
+ }
+
+ /* If BA_ABTS failed to abort a partially assembled receive sequence,
+ * the driver no longer has that exchange, send a BLS_RJT. Override
+ * the IOCB for a BA_RJT.
+ */
+ if (aborted == false) {
+ icmd->un.xseq64.w5.hcsw.Rctl = FC_RCTL_BA_RJT;
+ bf_set(lpfc_vndr_code, &icmd->un.bls_rsp, 0);
+ bf_set(lpfc_rsn_expln, &icmd->un.bls_rsp, FC_BA_RJT_INV_XID);
+ bf_set(lpfc_rsn_code, &icmd->un.bls_rsp, FC_BA_RJT_UNABLE);
+ }
+
+ if (fctl & FC_FC_EX_CTX) {
+ /* ABTS sent by responder to CT exchange, construction
+ * of BA_ACC will use OX_ID from ABTS for the XRI_TAG
+ * field and RX_ID from ABTS for RX_ID field.
+ */
+ bf_set(lpfc_abts_orig, &icmd->un.bls_rsp, LPFC_ABTS_UNSOL_RSP);
+ } else {
+ /* ABTS sent by initiator to CT exchange, construction
+ * of BA_ACC will need to allocate a new XRI as for the
+ * XRI_TAG field.
+ */
+ bf_set(lpfc_abts_orig, &icmd->un.bls_rsp, LPFC_ABTS_UNSOL_INT);
+ }
+ bf_set(lpfc_abts_rxid, &icmd->un.bls_rsp, rxid);
+ bf_set(lpfc_abts_oxid, &icmd->un.bls_rsp, oxid);
+
+ /* Xmit CT abts response on exchange <xid> */
+ lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
+ "1200 Send BLS cmd x%x on oxid x%x Data: x%x\n",
+ icmd->un.xseq64.w5.hcsw.Rctl, oxid, phba->link_state);
+
+ rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, ctiocb, 0);
+ if (rc == IOCB_ERROR) {
+ lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
+ "2925 Failed to issue CT ABTS RSP x%x on "
+ "xri x%x, Data x%x\n",
+ icmd->un.xseq64.w5.hcsw.Rctl, oxid,
+ phba->link_state);
+ lpfc_nlp_put(ndlp);
+ ctiocb->context1 = NULL;
+ lpfc_sli_release_iocbq(phba, ctiocb);
+ }
+}
+
+/**
+ * lpfc_sli4_handle_unsol_abort - Handle sli-4 unsolicited abort event
+ * @vport: Pointer to the vport on which this sequence was received
+ * @dmabuf: pointer to a dmabuf that describes the FC sequence
+ *
+ * This function handles an SLI-4 unsolicited abort event. If the unsolicited
+ * receive sequence is only partially assembed by the driver, it shall abort
+ * the partially assembled frames for the sequence. Otherwise, if the
+ * unsolicited receive sequence has been completely assembled and passed to
+ * the Upper Layer Protocol (UPL), it then mark the per oxid status for the
+ * unsolicited sequence has been aborted. After that, it will issue a basic
+ * accept to accept the abort.
+ **/
+static void
+lpfc_sli4_handle_unsol_abort(struct lpfc_vport *vport,
+ struct hbq_dmabuf *dmabuf)
+{
+ struct lpfc_hba *phba = vport->phba;
+ struct fc_frame_header fc_hdr;
+ uint32_t fctl;
+ bool aborted;
+
+ /* Make a copy of fc_hdr before the dmabuf being released */
+ memcpy(&fc_hdr, dmabuf->hbuf.virt, sizeof(struct fc_frame_header));
+ fctl = sli4_fctl_from_fc_hdr(&fc_hdr);
+
+ if (fctl & FC_FC_EX_CTX) {
+ /* ABTS by responder to exchange, no cleanup needed */
+ aborted = true;
+ } else {
+ /* ABTS by initiator to exchange, need to do cleanup */
+ aborted = lpfc_sli4_abort_partial_seq(vport, dmabuf);
+ if (aborted == false)
+ aborted = lpfc_sli4_abort_ulp_seq(vport, dmabuf);
+ }
+ lpfc_in_buf_free(phba, &dmabuf->dbuf);
+
+ /* Respond with BA_ACC or BA_RJT accordingly */
+ lpfc_sli4_seq_abort_rsp(vport, &fc_hdr, aborted);
+}
+
+/**
+ * lpfc_seq_complete - Indicates if a sequence is complete
+ * @dmabuf: pointer to a dmabuf that describes the FC sequence
+ *
+ * This function checks the sequence, starting with the frame described by
+ * @dmabuf, to see if all the frames associated with this sequence are present.
+ * the frames associated with this sequence are linked to the @dmabuf using the
+ * dbuf list. This function looks for two major things. 1) That the first frame
+ * has a sequence count of zero. 2) There is a frame with last frame of sequence
+ * set. 3) That there are no holes in the sequence count. The function will
+ * return 1 when the sequence is complete, otherwise it will return 0.
+ **/
+static int
+lpfc_seq_complete(struct hbq_dmabuf *dmabuf)
+{
+ struct fc_frame_header *hdr;
+ struct lpfc_dmabuf *d_buf;
+ struct hbq_dmabuf *seq_dmabuf;
+ uint32_t fctl;
+ int seq_count = 0;
+
+ hdr = (struct fc_frame_header *)dmabuf->hbuf.virt;
+ /* make sure first fame of sequence has a sequence count of zero */
+ if (hdr->fh_seq_cnt != seq_count)
+ return 0;
+ fctl = (hdr->fh_f_ctl[0] << 16 |
+ hdr->fh_f_ctl[1] << 8 |
+ hdr->fh_f_ctl[2]);
+ /* If last frame of sequence we can return success. */
+ if (fctl & FC_FC_END_SEQ)
+ return 1;
+ list_for_each_entry(d_buf, &dmabuf->dbuf.list, list) {
+ seq_dmabuf = container_of(d_buf, struct hbq_dmabuf, dbuf);
+ hdr = (struct fc_frame_header *)seq_dmabuf->hbuf.virt;
+ /* If there is a hole in the sequence count then fail. */
+ if (++seq_count != be16_to_cpu(hdr->fh_seq_cnt))
+ return 0;
+ fctl = (hdr->fh_f_ctl[0] << 16 |
+ hdr->fh_f_ctl[1] << 8 |
+ hdr->fh_f_ctl[2]);
+ /* If last frame of sequence we can return success. */
+ if (fctl & FC_FC_END_SEQ)
+ return 1;
+ }
+ return 0;
+}
+
+/**
+ * lpfc_prep_seq - Prep sequence for ULP processing
+ * @vport: Pointer to the vport on which this sequence was received
+ * @dmabuf: pointer to a dmabuf that describes the FC sequence
+ *
+ * This function takes a sequence, described by a list of frames, and creates
+ * a list of iocbq structures to describe the sequence. This iocbq list will be
+ * used to issue to the generic unsolicited sequence handler. This routine
+ * returns a pointer to the first iocbq in the list. If the function is unable
+ * to allocate an iocbq then it throw out the received frames that were not
+ * able to be described and return a pointer to the first iocbq. If unable to
+ * allocate any iocbqs (including the first) this function will return NULL.
+ **/
+static struct lpfc_iocbq *
+lpfc_prep_seq(struct lpfc_vport *vport, struct hbq_dmabuf *seq_dmabuf)
+{
+ struct hbq_dmabuf *hbq_buf;
+ struct lpfc_dmabuf *d_buf, *n_buf;
+ struct lpfc_iocbq *first_iocbq, *iocbq;
+ struct fc_frame_header *fc_hdr;
+ uint32_t sid;
+ uint32_t len, tot_len;
+ struct ulp_bde64 *pbde;
+
+ fc_hdr = (struct fc_frame_header *)seq_dmabuf->hbuf.virt;
+ /* remove from receive buffer list */
+ list_del_init(&seq_dmabuf->hbuf.list);
+ lpfc_update_rcv_time_stamp(vport);
+ /* get the Remote Port's SID */
+ sid = sli4_sid_from_fc_hdr(fc_hdr);
+ tot_len = 0;
+ /* Get an iocbq struct to fill in. */
+ first_iocbq = lpfc_sli_get_iocbq(vport->phba);
+ if (first_iocbq) {
+ /* Initialize the first IOCB. */
+ first_iocbq->iocb.unsli3.rcvsli3.acc_len = 0;
+ first_iocbq->iocb.ulpStatus = IOSTAT_SUCCESS;
+
+ /* Check FC Header to see what TYPE of frame we are rcv'ing */
+ if (sli4_type_from_fc_hdr(fc_hdr) == FC_TYPE_ELS) {
+ first_iocbq->iocb.ulpCommand = CMD_IOCB_RCV_ELS64_CX;
+ first_iocbq->iocb.un.rcvels.parmRo =
+ sli4_did_from_fc_hdr(fc_hdr);
+ first_iocbq->iocb.ulpPU = PARM_NPIV_DID;
+ } else
+ first_iocbq->iocb.ulpCommand = CMD_IOCB_RCV_SEQ64_CX;
+ first_iocbq->iocb.ulpContext = NO_XRI;
+ first_iocbq->iocb.unsli3.rcvsli3.ox_id =
+ be16_to_cpu(fc_hdr->fh_ox_id);
+ /* iocbq is prepped for internal consumption. Physical vpi. */
+ first_iocbq->iocb.unsli3.rcvsli3.vpi =
+ vport->phba->vpi_ids[vport->vpi];
+ /* put the first buffer into the first IOCBq */
+ tot_len = bf_get(lpfc_rcqe_length,
+ &seq_dmabuf->cq_event.cqe.rcqe_cmpl);
+
+ first_iocbq->context2 = &seq_dmabuf->dbuf;
+ first_iocbq->context3 = NULL;
+ first_iocbq->iocb.ulpBdeCount = 1;
+ if (tot_len > LPFC_DATA_BUF_SIZE)
+ first_iocbq->iocb.un.cont64[0].tus.f.bdeSize =
+ LPFC_DATA_BUF_SIZE;
+ else
+ first_iocbq->iocb.un.cont64[0].tus.f.bdeSize = tot_len;
+
+ first_iocbq->iocb.un.rcvels.remoteID = sid;
+
+ first_iocbq->iocb.unsli3.rcvsli3.acc_len = tot_len;
+ }
+ iocbq = first_iocbq;
+ /*
+ * Each IOCBq can have two Buffers assigned, so go through the list
+ * of buffers for this sequence and save two buffers in each IOCBq
+ */
+ list_for_each_entry_safe(d_buf, n_buf, &seq_dmabuf->dbuf.list, list) {
+ if (!iocbq) {
+ lpfc_in_buf_free(vport->phba, d_buf);
+ continue;
+ }
+ if (!iocbq->context3) {
+ iocbq->context3 = d_buf;
+ iocbq->iocb.ulpBdeCount++;
+ /* We need to get the size out of the right CQE */
+ hbq_buf = container_of(d_buf, struct hbq_dmabuf, dbuf);
+ len = bf_get(lpfc_rcqe_length,
+ &hbq_buf->cq_event.cqe.rcqe_cmpl);
+ pbde = (struct ulp_bde64 *)
+ &iocbq->iocb.unsli3.sli3Words[4];
+ if (len > LPFC_DATA_BUF_SIZE)
+ pbde->tus.f.bdeSize = LPFC_DATA_BUF_SIZE;
+ else
+ pbde->tus.f.bdeSize = len;
+
+ iocbq->iocb.unsli3.rcvsli3.acc_len += len;
+ tot_len += len;
+ } else {
+ iocbq = lpfc_sli_get_iocbq(vport->phba);
+ if (!iocbq) {
+ if (first_iocbq) {
+ first_iocbq->iocb.ulpStatus =
+ IOSTAT_FCP_RSP_ERROR;
+ first_iocbq->iocb.un.ulpWord[4] =
+ IOERR_NO_RESOURCES;
+ }
+ lpfc_in_buf_free(vport->phba, d_buf);
+ continue;
+ }
+ /* We need to get the size out of the right CQE */
+ hbq_buf = container_of(d_buf, struct hbq_dmabuf, dbuf);
+ len = bf_get(lpfc_rcqe_length,
+ &hbq_buf->cq_event.cqe.rcqe_cmpl);
+ iocbq->context2 = d_buf;
+ iocbq->context3 = NULL;
+ iocbq->iocb.ulpBdeCount = 1;
+ if (len > LPFC_DATA_BUF_SIZE)
+ iocbq->iocb.un.cont64[0].tus.f.bdeSize =
+ LPFC_DATA_BUF_SIZE;
+ else
+ iocbq->iocb.un.cont64[0].tus.f.bdeSize = len;
+
+ tot_len += len;
+ iocbq->iocb.unsli3.rcvsli3.acc_len = tot_len;
+
+ iocbq->iocb.un.rcvels.remoteID = sid;
+ list_add_tail(&iocbq->list, &first_iocbq->list);
+ }
+ }
+ return first_iocbq;
+}
+
+static void
+lpfc_sli4_send_seq_to_ulp(struct lpfc_vport *vport,
+ struct hbq_dmabuf *seq_dmabuf)
+{
+ struct fc_frame_header *fc_hdr;
+ struct lpfc_iocbq *iocbq, *curr_iocb, *next_iocb;
+ struct lpfc_hba *phba = vport->phba;
+
+ fc_hdr = (struct fc_frame_header *)seq_dmabuf->hbuf.virt;
+ iocbq = lpfc_prep_seq(vport, seq_dmabuf);
+ if (!iocbq) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
+ "2707 Ring %d handler: Failed to allocate "
+ "iocb Rctl x%x Type x%x received\n",
+ LPFC_ELS_RING,
+ fc_hdr->fh_r_ctl, fc_hdr->fh_type);
+ return;
+ }
+ if (!lpfc_complete_unsol_iocb(phba,
+ &phba->sli.ring[LPFC_ELS_RING],
+ iocbq, fc_hdr->fh_r_ctl,
+ fc_hdr->fh_type))
+ lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
+ "2540 Ring %d handler: unexpected Rctl "
+ "x%x Type x%x received\n",
+ LPFC_ELS_RING,
+ fc_hdr->fh_r_ctl, fc_hdr->fh_type);
+
+ /* Free iocb created in lpfc_prep_seq */
+ list_for_each_entry_safe(curr_iocb, next_iocb,
+ &iocbq->list, list) {
+ list_del_init(&curr_iocb->list);
+ lpfc_sli_release_iocbq(phba, curr_iocb);
+ }
+ lpfc_sli_release_iocbq(phba, iocbq);
+}
+
+/**
+ * lpfc_sli4_handle_received_buffer - Handle received buffers from firmware
+ * @phba: Pointer to HBA context object.
+ *
+ * This function is called with no lock held. This function processes all
+ * the received buffers and gives it to upper layers when a received buffer
+ * indicates that it is the final frame in the sequence. The interrupt
+ * service routine processes received buffers at interrupt contexts and adds
+ * received dma buffers to the rb_pend_list queue and signals the worker thread.
+ * Worker thread calls lpfc_sli4_handle_received_buffer, which will call the
+ * appropriate receive function when the final frame in a sequence is received.
+ **/
+void
+lpfc_sli4_handle_received_buffer(struct lpfc_hba *phba,
+ struct hbq_dmabuf *dmabuf)
+{
+ struct hbq_dmabuf *seq_dmabuf;
+ struct fc_frame_header *fc_hdr;
+ struct lpfc_vport *vport;
+ uint32_t fcfi;
+ uint32_t did;
+
+ /* Process each received buffer */
+ fc_hdr = (struct fc_frame_header *)dmabuf->hbuf.virt;
+ /* check to see if this a valid type of frame */
+ if (lpfc_fc_frame_check(phba, fc_hdr)) {
+ lpfc_in_buf_free(phba, &dmabuf->dbuf);
+ return;
+ }
+ if ((bf_get(lpfc_cqe_code,
+ &dmabuf->cq_event.cqe.rcqe_cmpl) == CQE_CODE_RECEIVE_V1))
+ fcfi = bf_get(lpfc_rcqe_fcf_id_v1,
+ &dmabuf->cq_event.cqe.rcqe_cmpl);
+ else
+ fcfi = bf_get(lpfc_rcqe_fcf_id,
+ &dmabuf->cq_event.cqe.rcqe_cmpl);
+
+ vport = lpfc_fc_frame_to_vport(phba, fc_hdr, fcfi);
+ if (!vport) {
+ /* throw out the frame */
+ lpfc_in_buf_free(phba, &dmabuf->dbuf);
+ return;
+ }
+
+ /* d_id this frame is directed to */
+ did = sli4_did_from_fc_hdr(fc_hdr);
+
+ /* vport is registered unless we rcv a FLOGI directed to Fabric_DID */
+ if (!(vport->vpi_state & LPFC_VPI_REGISTERED) &&
+ (did != Fabric_DID)) {
+ /*
+ * Throw out the frame if we are not pt2pt.
+ * The pt2pt protocol allows for discovery frames
+ * to be received without a registered VPI.
+ */
+ if (!(vport->fc_flag & FC_PT2PT) ||
+ (phba->link_state == LPFC_HBA_READY)) {
+ lpfc_in_buf_free(phba, &dmabuf->dbuf);
+ return;
+ }
+ }
+
+ /* Handle the basic abort sequence (BA_ABTS) event */
+ if (fc_hdr->fh_r_ctl == FC_RCTL_BA_ABTS) {
+ lpfc_sli4_handle_unsol_abort(vport, dmabuf);
+ return;
+ }
+
+ /* Link this frame */
+ seq_dmabuf = lpfc_fc_frame_add(vport, dmabuf);
+ if (!seq_dmabuf) {
+ /* unable to add frame to vport - throw it out */
+ lpfc_in_buf_free(phba, &dmabuf->dbuf);
+ return;
+ }
+ /* If not last frame in sequence continue processing frames. */
+ if (!lpfc_seq_complete(seq_dmabuf))
+ return;
+
+ /* Send the complete sequence to the upper layer protocol */
+ lpfc_sli4_send_seq_to_ulp(vport, seq_dmabuf);
+}
+
+/**
+ * lpfc_sli4_post_all_rpi_hdrs - Post the rpi header memory region to the port
+ * @phba: pointer to lpfc hba data structure.
+ *
+ * This routine is invoked to post rpi header templates to the
+ * HBA consistent with the SLI-4 interface spec. This routine
+ * posts a SLI4_PAGE_SIZE memory region to the port to hold up to
+ * SLI4_PAGE_SIZE modulo 64 rpi context headers.
+ *
+ * This routine does not require any locks. It's usage is expected
+ * to be driver load or reset recovery when the driver is
+ * sequential.
+ *
+ * Return codes
+ * 0 - successful
+ * -EIO - The mailbox failed to complete successfully.
+ * When this error occurs, the driver is not guaranteed
+ * to have any rpi regions posted to the device and
+ * must either attempt to repost the regions or take a
+ * fatal error.
+ **/
+int
+lpfc_sli4_post_all_rpi_hdrs(struct lpfc_hba *phba)
+{
+ struct lpfc_rpi_hdr *rpi_page;
+ uint32_t rc = 0;
+ uint16_t lrpi = 0;
+
+ /* SLI4 ports that support extents do not require RPI headers. */
+ if (!phba->sli4_hba.rpi_hdrs_in_use)
+ goto exit;
+ if (phba->sli4_hba.extents_in_use)
+ return -EIO;
+
+ list_for_each_entry(rpi_page, &phba->sli4_hba.lpfc_rpi_hdr_list, list) {
+ /*
+ * Assign the rpi headers a physical rpi only if the driver
+ * has not initialized those resources. A port reset only
+ * needs the headers posted.
+ */
+ if (bf_get(lpfc_rpi_rsrc_rdy, &phba->sli4_hba.sli4_flags) !=
+ LPFC_RPI_RSRC_RDY)
+ rpi_page->start_rpi = phba->sli4_hba.rpi_ids[lrpi];
+
+ rc = lpfc_sli4_post_rpi_hdr(phba, rpi_page);
+ if (rc != MBX_SUCCESS) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
+ "2008 Error %d posting all rpi "
+ "headers\n", rc);
+ rc = -EIO;
+ break;
+ }
+ }
+
+ exit:
+ bf_set(lpfc_rpi_rsrc_rdy, &phba->sli4_hba.sli4_flags,
+ LPFC_RPI_RSRC_RDY);
+ return rc;
+}
+
+/**
+ * lpfc_sli4_post_rpi_hdr - Post an rpi header memory region to the port
+ * @phba: pointer to lpfc hba data structure.
+ * @rpi_page: pointer to the rpi memory region.
+ *
+ * This routine is invoked to post a single rpi header to the
+ * HBA consistent with the SLI-4 interface spec. This memory region
+ * maps up to 64 rpi context regions.
+ *
+ * Return codes
+ * 0 - successful
+ * -ENOMEM - No available memory
+ * -EIO - The mailbox failed to complete successfully.
+ **/
+int
+lpfc_sli4_post_rpi_hdr(struct lpfc_hba *phba, struct lpfc_rpi_hdr *rpi_page)
+{
+ LPFC_MBOXQ_t *mboxq;
+ struct lpfc_mbx_post_hdr_tmpl *hdr_tmpl;
+ uint32_t rc = 0;
+ uint32_t shdr_status, shdr_add_status;
+ union lpfc_sli4_cfg_shdr *shdr;
+
+ /* SLI4 ports that support extents do not require RPI headers. */
+ if (!phba->sli4_hba.rpi_hdrs_in_use)
+ return rc;
+ if (phba->sli4_hba.extents_in_use)
+ return -EIO;
+
+ /* The port is notified of the header region via a mailbox command. */
+ mboxq = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
+ if (!mboxq) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
+ "2001 Unable to allocate memory for issuing "
+ "SLI_CONFIG_SPECIAL mailbox command\n");
+ return -ENOMEM;
+ }
+
+ /* Post all rpi memory regions to the port. */
+ hdr_tmpl = &mboxq->u.mqe.un.hdr_tmpl;
+ lpfc_sli4_config(phba, mboxq, LPFC_MBOX_SUBSYSTEM_FCOE,
+ LPFC_MBOX_OPCODE_FCOE_POST_HDR_TEMPLATE,
+ sizeof(struct lpfc_mbx_post_hdr_tmpl) -
+ sizeof(struct lpfc_sli4_cfg_mhdr),
+ LPFC_SLI4_MBX_EMBED);
+
+
+ /* Post the physical rpi to the port for this rpi header. */
+ bf_set(lpfc_mbx_post_hdr_tmpl_rpi_offset, hdr_tmpl,
+ rpi_page->start_rpi);
+ bf_set(lpfc_mbx_post_hdr_tmpl_page_cnt,
+ hdr_tmpl, rpi_page->page_count);
+
+ hdr_tmpl->rpi_paddr_lo = putPaddrLow(rpi_page->dmabuf->phys);
+ hdr_tmpl->rpi_paddr_hi = putPaddrHigh(rpi_page->dmabuf->phys);
+ rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
+ shdr = (union lpfc_sli4_cfg_shdr *) &hdr_tmpl->header.cfg_shdr;
+ shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
+ shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
+ if (rc != MBX_TIMEOUT)
+ mempool_free(mboxq, phba->mbox_mem_pool);
+ if (shdr_status || shdr_add_status || rc) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+ "2514 POST_RPI_HDR mailbox failed with "
+ "status x%x add_status x%x, mbx status x%x\n",
+ shdr_status, shdr_add_status, rc);
+ rc = -ENXIO;
+ }
+ return rc;
+}
+
+/**
+ * lpfc_sli4_alloc_rpi - Get an available rpi in the device's range
+ * @phba: pointer to lpfc hba data structure.
+ *
+ * This routine is invoked to post rpi header templates to the
+ * HBA consistent with the SLI-4 interface spec. This routine
+ * posts a SLI4_PAGE_SIZE memory region to the port to hold up to
+ * SLI4_PAGE_SIZE modulo 64 rpi context headers.
+ *
+ * Returns
+ * A nonzero rpi defined as rpi_base <= rpi < max_rpi if successful
+ * LPFC_RPI_ALLOC_ERROR if no rpis are available.
+ **/
+int
+lpfc_sli4_alloc_rpi(struct lpfc_hba *phba)
+{
+ unsigned long rpi;
+ uint16_t max_rpi, rpi_limit;
+ uint16_t rpi_remaining, lrpi = 0;
+ struct lpfc_rpi_hdr *rpi_hdr;
+ unsigned long iflag;
+
+ /*
+ * Fetch the next logical rpi. Because this index is logical,
+ * the driver starts at 0 each time.
+ */
+ spin_lock_irqsave(&phba->hbalock, iflag);
+ max_rpi = phba->sli4_hba.max_cfg_param.max_rpi;
+ rpi_limit = phba->sli4_hba.next_rpi;
+
+ rpi = find_next_zero_bit(phba->sli4_hba.rpi_bmask, rpi_limit, 0);
+ if (rpi >= rpi_limit)
+ rpi = LPFC_RPI_ALLOC_ERROR;
+ else {
+ set_bit(rpi, phba->sli4_hba.rpi_bmask);
+ phba->sli4_hba.max_cfg_param.rpi_used++;
+ phba->sli4_hba.rpi_count++;
+ }
+ lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
+ "0001 rpi:%x max:%x lim:%x\n",
+ (int) rpi, max_rpi, rpi_limit);
+
+ /*
+ * Don't try to allocate more rpi header regions if the device limit
+ * has been exhausted.
+ */
+ if ((rpi == LPFC_RPI_ALLOC_ERROR) &&
+ (phba->sli4_hba.rpi_count >= max_rpi)) {
+ spin_unlock_irqrestore(&phba->hbalock, iflag);
+ return rpi;
+ }
+
+ /*
+ * RPI header postings are not required for SLI4 ports capable of
+ * extents.
+ */
+ if (!phba->sli4_hba.rpi_hdrs_in_use) {
+ spin_unlock_irqrestore(&phba->hbalock, iflag);
+ return rpi;
+ }
+
+ /*
+ * If the driver is running low on rpi resources, allocate another
+ * page now. Note that the next_rpi value is used because
+ * it represents how many are actually in use whereas max_rpi notes
+ * how many are supported max by the device.
+ */
+ rpi_remaining = phba->sli4_hba.next_rpi - phba->sli4_hba.rpi_count;
+ spin_unlock_irqrestore(&phba->hbalock, iflag);
+ if (rpi_remaining < LPFC_RPI_LOW_WATER_MARK) {
+ rpi_hdr = lpfc_sli4_create_rpi_hdr(phba);
+ if (!rpi_hdr) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
+ "2002 Error Could not grow rpi "
+ "count\n");
+ } else {
+ lrpi = rpi_hdr->start_rpi;
+ rpi_hdr->start_rpi = phba->sli4_hba.rpi_ids[lrpi];
+ lpfc_sli4_post_rpi_hdr(phba, rpi_hdr);
+ }
+ }
+
+ return rpi;
+}
+
+/**
+ * lpfc_sli4_free_rpi - Release an rpi for reuse.
+ * @phba: pointer to lpfc hba data structure.
+ *
+ * This routine is invoked to release an rpi to the pool of
+ * available rpis maintained by the driver.
+ **/
+static void
+__lpfc_sli4_free_rpi(struct lpfc_hba *phba, int rpi)
+{
+ if (test_and_clear_bit(rpi, phba->sli4_hba.rpi_bmask)) {
+ phba->sli4_hba.rpi_count--;
+ phba->sli4_hba.max_cfg_param.rpi_used--;
+ }
+}
+
+/**
+ * lpfc_sli4_free_rpi - Release an rpi for reuse.
+ * @phba: pointer to lpfc hba data structure.
+ *
+ * This routine is invoked to release an rpi to the pool of
+ * available rpis maintained by the driver.
+ **/
+void
+lpfc_sli4_free_rpi(struct lpfc_hba *phba, int rpi)
+{
+ spin_lock_irq(&phba->hbalock);
+ __lpfc_sli4_free_rpi(phba, rpi);
+ spin_unlock_irq(&phba->hbalock);
+}
+
+/**
+ * lpfc_sli4_remove_rpis - Remove the rpi bitmask region
+ * @phba: pointer to lpfc hba data structure.
+ *
+ * This routine is invoked to remove the memory region that
+ * provided rpi via a bitmask.
+ **/
+void
+lpfc_sli4_remove_rpis(struct lpfc_hba *phba)
+{
+ kfree(phba->sli4_hba.rpi_bmask);
+ kfree(phba->sli4_hba.rpi_ids);
+ bf_set(lpfc_rpi_rsrc_rdy, &phba->sli4_hba.sli4_flags, 0);
+}
+
+/**
+ * lpfc_sli4_resume_rpi - Remove the rpi bitmask region
+ * @phba: pointer to lpfc hba data structure.
+ *
+ * This routine is invoked to remove the memory region that
+ * provided rpi via a bitmask.
+ **/
+int
+lpfc_sli4_resume_rpi(struct lpfc_nodelist *ndlp,
+ void (*cmpl)(struct lpfc_hba *, LPFC_MBOXQ_t *), void *arg)
+{
+ LPFC_MBOXQ_t *mboxq;
+ struct lpfc_hba *phba = ndlp->phba;
+ int rc;
+
+ /* The port is notified of the header region via a mailbox command. */
+ mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
+ if (!mboxq)
+ return -ENOMEM;
+
+ /* Post all rpi memory regions to the port. */
+ lpfc_resume_rpi(mboxq, ndlp);
+ if (cmpl) {
+ mboxq->mbox_cmpl = cmpl;
+ mboxq->context1 = arg;
+ mboxq->context2 = ndlp;
+ } else
+ mboxq->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
+ mboxq->vport = ndlp->vport;
+ rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_NOWAIT);
+ if (rc == MBX_NOT_FINISHED) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
+ "2010 Resume RPI Mailbox failed "
+ "status %d, mbxStatus x%x\n", rc,
+ bf_get(lpfc_mqe_status, &mboxq->u.mqe));
+ mempool_free(mboxq, phba->mbox_mem_pool);
+ return -EIO;
+ }
+ return 0;
+}
+
+/**
+ * lpfc_sli4_init_vpi - Initialize a vpi with the port
+ * @vport: Pointer to the vport for which the vpi is being initialized
+ *
+ * This routine is invoked to activate a vpi with the port.
+ *
+ * Returns:
+ * 0 success
+ * -Evalue otherwise
+ **/
+int
+lpfc_sli4_init_vpi(struct lpfc_vport *vport)
+{
+ LPFC_MBOXQ_t *mboxq;
+ int rc = 0;
+ int retval = MBX_SUCCESS;
+ uint32_t mbox_tmo;
+ struct lpfc_hba *phba = vport->phba;
+ mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
+ if (!mboxq)
+ return -ENOMEM;
+ lpfc_init_vpi(phba, mboxq, vport->vpi);
+ mbox_tmo = lpfc_mbox_tmo_val(phba, mboxq);
+ rc = lpfc_sli_issue_mbox_wait(phba, mboxq, mbox_tmo);
+ if (rc != MBX_SUCCESS) {
+ lpfc_printf_vlog(vport, KERN_ERR, LOG_SLI,
+ "2022 INIT VPI Mailbox failed "
+ "status %d, mbxStatus x%x\n", rc,
+ bf_get(lpfc_mqe_status, &mboxq->u.mqe));
+ retval = -EIO;
+ }
+ if (rc != MBX_TIMEOUT)
+ mempool_free(mboxq, vport->phba->mbox_mem_pool);
+
+ return retval;
+}
+
+/**
+ * lpfc_mbx_cmpl_add_fcf_record - add fcf mbox completion handler.
+ * @phba: pointer to lpfc hba data structure.
+ * @mboxq: Pointer to mailbox object.
+ *
+ * This routine is invoked to manually add a single FCF record. The caller
+ * must pass a completely initialized FCF_Record. This routine takes
+ * care of the nonembedded mailbox operations.
+ **/
+static void
+lpfc_mbx_cmpl_add_fcf_record(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
+{
+ void *virt_addr;
+ union lpfc_sli4_cfg_shdr *shdr;
+ uint32_t shdr_status, shdr_add_status;
+
+ virt_addr = mboxq->sge_array->addr[0];
+ /* The IOCTL status is embedded in the mailbox subheader. */
+ shdr = (union lpfc_sli4_cfg_shdr *) virt_addr;
+ shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
+ shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
+
+ if ((shdr_status || shdr_add_status) &&
+ (shdr_status != STATUS_FCF_IN_USE))
+ lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+ "2558 ADD_FCF_RECORD mailbox failed with "
+ "status x%x add_status x%x\n",
+ shdr_status, shdr_add_status);
+
+ lpfc_sli4_mbox_cmd_free(phba, mboxq);
+}
+
+/**
+ * lpfc_sli4_add_fcf_record - Manually add an FCF Record.
+ * @phba: pointer to lpfc hba data structure.
+ * @fcf_record: pointer to the initialized fcf record to add.
+ *
+ * This routine is invoked to manually add a single FCF record. The caller
+ * must pass a completely initialized FCF_Record. This routine takes
+ * care of the nonembedded mailbox operations.
+ **/
+int
+lpfc_sli4_add_fcf_record(struct lpfc_hba *phba, struct fcf_record *fcf_record)
+{
+ int rc = 0;
+ LPFC_MBOXQ_t *mboxq;
+ uint8_t *bytep;
+ void *virt_addr;
+ dma_addr_t phys_addr;
+ struct lpfc_mbx_sge sge;
+ uint32_t alloc_len, req_len;
+ uint32_t fcfindex;
+
+ mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
+ if (!mboxq) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+ "2009 Failed to allocate mbox for ADD_FCF cmd\n");
+ return -ENOMEM;
+ }
+
+ req_len = sizeof(struct fcf_record) + sizeof(union lpfc_sli4_cfg_shdr) +
+ sizeof(uint32_t);
+
+ /* Allocate DMA memory and set up the non-embedded mailbox command */
+ alloc_len = lpfc_sli4_config(phba, mboxq, LPFC_MBOX_SUBSYSTEM_FCOE,
+ LPFC_MBOX_OPCODE_FCOE_ADD_FCF,
+ req_len, LPFC_SLI4_MBX_NEMBED);
+ if (alloc_len < req_len) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+ "2523 Allocated DMA memory size (x%x) is "
+ "less than the requested DMA memory "
+ "size (x%x)\n", alloc_len, req_len);
+ lpfc_sli4_mbox_cmd_free(phba, mboxq);
+ return -ENOMEM;
+ }
+
+ /*
+ * Get the first SGE entry from the non-embedded DMA memory. This
+ * routine only uses a single SGE.
+ */
+ lpfc_sli4_mbx_sge_get(mboxq, 0, &sge);
+ phys_addr = getPaddr(sge.pa_hi, sge.pa_lo);
+ virt_addr = mboxq->sge_array->addr[0];
+ /*
+ * Configure the FCF record for FCFI 0. This is the driver's
+ * hardcoded default and gets used in nonFIP mode.
+ */
+ fcfindex = bf_get(lpfc_fcf_record_fcf_index, fcf_record);
+ bytep = virt_addr + sizeof(union lpfc_sli4_cfg_shdr);
+ lpfc_sli_pcimem_bcopy(&fcfindex, bytep, sizeof(uint32_t));
+
+ /*
+ * Copy the fcf_index and the FCF Record Data. The data starts after
+ * the FCoE header plus word10. The data copy needs to be endian
+ * correct.
+ */
+ bytep += sizeof(uint32_t);
+ lpfc_sli_pcimem_bcopy(fcf_record, bytep, sizeof(struct fcf_record));
+ mboxq->vport = phba->pport;
+ mboxq->mbox_cmpl = lpfc_mbx_cmpl_add_fcf_record;
+ rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_NOWAIT);
+ if (rc == MBX_NOT_FINISHED) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+ "2515 ADD_FCF_RECORD mailbox failed with "
+ "status 0x%x\n", rc);
+ lpfc_sli4_mbox_cmd_free(phba, mboxq);
+ rc = -EIO;
+ } else
+ rc = 0;
+
+ return rc;
+}
+
+/**
+ * lpfc_sli4_build_dflt_fcf_record - Build the driver's default FCF Record.
+ * @phba: pointer to lpfc hba data structure.
+ * @fcf_record: pointer to the fcf record to write the default data.
+ * @fcf_index: FCF table entry index.
+ *
+ * This routine is invoked to build the driver's default FCF record. The
+ * values used are hardcoded. This routine handles memory initialization.
+ *
+ **/
+void
+lpfc_sli4_build_dflt_fcf_record(struct lpfc_hba *phba,
+ struct fcf_record *fcf_record,
+ uint16_t fcf_index)
+{
+ memset(fcf_record, 0, sizeof(struct fcf_record));
+ fcf_record->max_rcv_size = LPFC_FCOE_MAX_RCV_SIZE;
+ fcf_record->fka_adv_period = LPFC_FCOE_FKA_ADV_PER;
+ fcf_record->fip_priority = LPFC_FCOE_FIP_PRIORITY;
+ bf_set(lpfc_fcf_record_mac_0, fcf_record, phba->fc_map[0]);
+ bf_set(lpfc_fcf_record_mac_1, fcf_record, phba->fc_map[1]);
+ bf_set(lpfc_fcf_record_mac_2, fcf_record, phba->fc_map[2]);
+ bf_set(lpfc_fcf_record_mac_3, fcf_record, LPFC_FCOE_FCF_MAC3);
+ bf_set(lpfc_fcf_record_mac_4, fcf_record, LPFC_FCOE_FCF_MAC4);
+ bf_set(lpfc_fcf_record_mac_5, fcf_record, LPFC_FCOE_FCF_MAC5);
+ bf_set(lpfc_fcf_record_fc_map_0, fcf_record, phba->fc_map[0]);
+ bf_set(lpfc_fcf_record_fc_map_1, fcf_record, phba->fc_map[1]);
+ bf_set(lpfc_fcf_record_fc_map_2, fcf_record, phba->fc_map[2]);
+ bf_set(lpfc_fcf_record_fcf_valid, fcf_record, 1);
+ bf_set(lpfc_fcf_record_fcf_avail, fcf_record, 1);
+ bf_set(lpfc_fcf_record_fcf_index, fcf_record, fcf_index);
+ bf_set(lpfc_fcf_record_mac_addr_prov, fcf_record,
+ LPFC_FCF_FPMA | LPFC_FCF_SPMA);
+ /* Set the VLAN bit map */
+ if (phba->valid_vlan) {
+ fcf_record->vlan_bitmap[phba->vlan_id / 8]
+ = 1 << (phba->vlan_id % 8);
+ }
+}
+
+/**
+ * lpfc_sli4_fcf_scan_read_fcf_rec - Read hba fcf record for fcf scan.
+ * @phba: pointer to lpfc hba data structure.
+ * @fcf_index: FCF table entry offset.
+ *
+ * This routine is invoked to scan the entire FCF table by reading FCF
+ * record and processing it one at a time starting from the @fcf_index
+ * for initial FCF discovery or fast FCF failover rediscovery.
+ *
+ * Return 0 if the mailbox command is submitted successfully, none 0
+ * otherwise.
+ **/
+int
+lpfc_sli4_fcf_scan_read_fcf_rec(struct lpfc_hba *phba, uint16_t fcf_index)
+{
+ int rc = 0, error;
+ LPFC_MBOXQ_t *mboxq;
+
+ phba->fcoe_eventtag_at_fcf_scan = phba->fcoe_eventtag;
+ phba->fcoe_cvl_eventtag_attn = phba->fcoe_cvl_eventtag;
+ mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
+ if (!mboxq) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+ "2000 Failed to allocate mbox for "
+ "READ_FCF cmd\n");
+ error = -ENOMEM;
+ goto fail_fcf_scan;
+ }
+ /* Construct the read FCF record mailbox command */
+ rc = lpfc_sli4_mbx_read_fcf_rec(phba, mboxq, fcf_index);
+ if (rc) {
+ error = -EINVAL;
+ goto fail_fcf_scan;
+ }
+ /* Issue the mailbox command asynchronously */
+ mboxq->vport = phba->pport;
+ mboxq->mbox_cmpl = lpfc_mbx_cmpl_fcf_scan_read_fcf_rec;
+
+ spin_lock_irq(&phba->hbalock);
+ phba->hba_flag |= FCF_TS_INPROG;
+ spin_unlock_irq(&phba->hbalock);
+
+ rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_NOWAIT);
+ if (rc == MBX_NOT_FINISHED)
+ error = -EIO;
+ else {
+ /* Reset eligible FCF count for new scan */
+ if (fcf_index == LPFC_FCOE_FCF_GET_FIRST)
+ phba->fcf.eligible_fcf_cnt = 0;
+ error = 0;
+ }
+fail_fcf_scan:
+ if (error) {
+ if (mboxq)
+ lpfc_sli4_mbox_cmd_free(phba, mboxq);
+ /* FCF scan failed, clear FCF_TS_INPROG flag */
+ spin_lock_irq(&phba->hbalock);
+ phba->hba_flag &= ~FCF_TS_INPROG;
+ spin_unlock_irq(&phba->hbalock);
+ }
+ return error;
+}
+
+/**
+ * lpfc_sli4_fcf_rr_read_fcf_rec - Read hba fcf record for roundrobin fcf.
+ * @phba: pointer to lpfc hba data structure.
+ * @fcf_index: FCF table entry offset.
+ *
+ * This routine is invoked to read an FCF record indicated by @fcf_index
+ * and to use it for FLOGI roundrobin FCF failover.
+ *
+ * Return 0 if the mailbox command is submitted successfully, none 0
+ * otherwise.
+ **/
+int
+lpfc_sli4_fcf_rr_read_fcf_rec(struct lpfc_hba *phba, uint16_t fcf_index)
+{
+ int rc = 0, error;
+ LPFC_MBOXQ_t *mboxq;
+
+ mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
+ if (!mboxq) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_FIP | LOG_INIT,
+ "2763 Failed to allocate mbox for "
+ "READ_FCF cmd\n");
+ error = -ENOMEM;
+ goto fail_fcf_read;
+ }
+ /* Construct the read FCF record mailbox command */
+ rc = lpfc_sli4_mbx_read_fcf_rec(phba, mboxq, fcf_index);
+ if (rc) {
+ error = -EINVAL;
+ goto fail_fcf_read;
+ }
+ /* Issue the mailbox command asynchronously */
+ mboxq->vport = phba->pport;
+ mboxq->mbox_cmpl = lpfc_mbx_cmpl_fcf_rr_read_fcf_rec;
+ rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_NOWAIT);
+ if (rc == MBX_NOT_FINISHED)
+ error = -EIO;
+ else
+ error = 0;
+
+fail_fcf_read:
+ if (error && mboxq)
+ lpfc_sli4_mbox_cmd_free(phba, mboxq);
+ return error;
+}
+
+/**
+ * lpfc_sli4_read_fcf_rec - Read hba fcf record for update eligible fcf bmask.
+ * @phba: pointer to lpfc hba data structure.
+ * @fcf_index: FCF table entry offset.
+ *
+ * This routine is invoked to read an FCF record indicated by @fcf_index to
+ * determine whether it's eligible for FLOGI roundrobin failover list.
+ *
+ * Return 0 if the mailbox command is submitted successfully, none 0
+ * otherwise.
+ **/
+int
+lpfc_sli4_read_fcf_rec(struct lpfc_hba *phba, uint16_t fcf_index)
+{
+ int rc = 0, error;
+ LPFC_MBOXQ_t *mboxq;
+
+ mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
+ if (!mboxq) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_FIP | LOG_INIT,
+ "2758 Failed to allocate mbox for "
+ "READ_FCF cmd\n");
+ error = -ENOMEM;
+ goto fail_fcf_read;
+ }
+ /* Construct the read FCF record mailbox command */
+ rc = lpfc_sli4_mbx_read_fcf_rec(phba, mboxq, fcf_index);
+ if (rc) {
+ error = -EINVAL;
+ goto fail_fcf_read;
+ }
+ /* Issue the mailbox command asynchronously */
+ mboxq->vport = phba->pport;
+ mboxq->mbox_cmpl = lpfc_mbx_cmpl_read_fcf_rec;
+ rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_NOWAIT);
+ if (rc == MBX_NOT_FINISHED)
+ error = -EIO;
+ else
+ error = 0;
+
+fail_fcf_read:
+ if (error && mboxq)
+ lpfc_sli4_mbox_cmd_free(phba, mboxq);
+ return error;
+}
+
+/**
+ * lpfc_check_next_fcf_pri
+ * phba pointer to the lpfc_hba struct for this port.
+ * This routine is called from the lpfc_sli4_fcf_rr_next_index_get
+ * routine when the rr_bmask is empty. The FCF indecies are put into the
+ * rr_bmask based on their priority level. Starting from the highest priority
+ * to the lowest. The most likely FCF candidate will be in the highest
+ * priority group. When this routine is called it searches the fcf_pri list for
+ * next lowest priority group and repopulates the rr_bmask with only those
+ * fcf_indexes.
+ * returns:
+ * 1=success 0=failure
+ **/
+static int
+lpfc_check_next_fcf_pri_level(struct lpfc_hba *phba)
+{
+ uint16_t next_fcf_pri;
+ uint16_t last_index;
+ struct lpfc_fcf_pri *fcf_pri;
+ int rc;
+ int ret = 0;
+
+ last_index = find_first_bit(phba->fcf.fcf_rr_bmask,
+ LPFC_SLI4_FCF_TBL_INDX_MAX);
+ lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
+ "3060 Last IDX %d\n", last_index);
+
+ /* Verify the priority list has 2 or more entries */
+ spin_lock_irq(&phba->hbalock);
+ if (list_empty(&phba->fcf.fcf_pri_list) ||
+ list_is_singular(&phba->fcf.fcf_pri_list)) {
+ spin_unlock_irq(&phba->hbalock);
+ lpfc_printf_log(phba, KERN_ERR, LOG_FIP,
+ "3061 Last IDX %d\n", last_index);
+ return 0; /* Empty rr list */
+ }
+ spin_unlock_irq(&phba->hbalock);
+
+ next_fcf_pri = 0;
+ /*
+ * Clear the rr_bmask and set all of the bits that are at this
+ * priority.
+ */
+ memset(phba->fcf.fcf_rr_bmask, 0,
+ sizeof(*phba->fcf.fcf_rr_bmask));
+ spin_lock_irq(&phba->hbalock);
+ list_for_each_entry(fcf_pri, &phba->fcf.fcf_pri_list, list) {
+ if (fcf_pri->fcf_rec.flag & LPFC_FCF_FLOGI_FAILED)
+ continue;
+ /*
+ * the 1st priority that has not FLOGI failed
+ * will be the highest.
+ */
+ if (!next_fcf_pri)
+ next_fcf_pri = fcf_pri->fcf_rec.priority;
+ spin_unlock_irq(&phba->hbalock);
+ if (fcf_pri->fcf_rec.priority == next_fcf_pri) {
+ rc = lpfc_sli4_fcf_rr_index_set(phba,
+ fcf_pri->fcf_rec.fcf_index);
+ if (rc)
+ return 0;
+ }
+ spin_lock_irq(&phba->hbalock);
+ }
+ /*
+ * if next_fcf_pri was not set above and the list is not empty then
+ * we have failed flogis on all of them. So reset flogi failed
+ * and start at the beginning.
+ */
+ if (!next_fcf_pri && !list_empty(&phba->fcf.fcf_pri_list)) {
+ list_for_each_entry(fcf_pri, &phba->fcf.fcf_pri_list, list) {
+ fcf_pri->fcf_rec.flag &= ~LPFC_FCF_FLOGI_FAILED;
+ /*
+ * the 1st priority that has not FLOGI failed
+ * will be the highest.
+ */
+ if (!next_fcf_pri)
+ next_fcf_pri = fcf_pri->fcf_rec.priority;
+ spin_unlock_irq(&phba->hbalock);
+ if (fcf_pri->fcf_rec.priority == next_fcf_pri) {
+ rc = lpfc_sli4_fcf_rr_index_set(phba,
+ fcf_pri->fcf_rec.fcf_index);
+ if (rc)
+ return 0;
+ }
+ spin_lock_irq(&phba->hbalock);
+ }
+ } else
+ ret = 1;
+ spin_unlock_irq(&phba->hbalock);
+
+ return ret;
+}
+/**
+ * lpfc_sli4_fcf_rr_next_index_get - Get next eligible fcf record index
+ * @phba: pointer to lpfc hba data structure.
+ *
+ * This routine is to get the next eligible FCF record index in a round
+ * robin fashion. If the next eligible FCF record index equals to the
+ * initial roundrobin FCF record index, LPFC_FCOE_FCF_NEXT_NONE (0xFFFF)
+ * shall be returned, otherwise, the next eligible FCF record's index
+ * shall be returned.
+ **/
+uint16_t
+lpfc_sli4_fcf_rr_next_index_get(struct lpfc_hba *phba)
+{
+ uint16_t next_fcf_index;
+
+initial_priority:
+ /* Search start from next bit of currently registered FCF index */
+ next_fcf_index = phba->fcf.current_rec.fcf_indx;
+
+next_priority:
+ /* Determine the next fcf index to check */
+ next_fcf_index = (next_fcf_index + 1) % LPFC_SLI4_FCF_TBL_INDX_MAX;
+ next_fcf_index = find_next_bit(phba->fcf.fcf_rr_bmask,
+ LPFC_SLI4_FCF_TBL_INDX_MAX,
+ next_fcf_index);
+
+ /* Wrap around condition on phba->fcf.fcf_rr_bmask */
+ if (next_fcf_index >= LPFC_SLI4_FCF_TBL_INDX_MAX) {
+ /*
+ * If we have wrapped then we need to clear the bits that
+ * have been tested so that we can detect when we should
+ * change the priority level.
+ */
+ next_fcf_index = find_next_bit(phba->fcf.fcf_rr_bmask,
+ LPFC_SLI4_FCF_TBL_INDX_MAX, 0);
+ }
+
+
+ /* Check roundrobin failover list empty condition */
+ if (next_fcf_index >= LPFC_SLI4_FCF_TBL_INDX_MAX ||
+ next_fcf_index == phba->fcf.current_rec.fcf_indx) {
+ /*
+ * If next fcf index is not found check if there are lower
+ * Priority level fcf's in the fcf_priority list.
+ * Set up the rr_bmask with all of the avaiable fcf bits
+ * at that level and continue the selection process.
+ */
+ if (lpfc_check_next_fcf_pri_level(phba))
+ goto initial_priority;
+ lpfc_printf_log(phba, KERN_WARNING, LOG_FIP,
+ "2844 No roundrobin failover FCF available\n");
+ if (next_fcf_index >= LPFC_SLI4_FCF_TBL_INDX_MAX)
+ return LPFC_FCOE_FCF_NEXT_NONE;
+ else {
+ lpfc_printf_log(phba, KERN_WARNING, LOG_FIP,
+ "3063 Only FCF available idx %d, flag %x\n",
+ next_fcf_index,
+ phba->fcf.fcf_pri[next_fcf_index].fcf_rec.flag);
+ return next_fcf_index;
+ }
+ }
+
+ if (next_fcf_index < LPFC_SLI4_FCF_TBL_INDX_MAX &&
+ phba->fcf.fcf_pri[next_fcf_index].fcf_rec.flag &
+ LPFC_FCF_FLOGI_FAILED)
+ goto next_priority;
+
+ lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
+ "2845 Get next roundrobin failover FCF (x%x)\n",
+ next_fcf_index);
+
+ return next_fcf_index;
+}
+
+/**
+ * lpfc_sli4_fcf_rr_index_set - Set bmask with eligible fcf record index
+ * @phba: pointer to lpfc hba data structure.
+ *
+ * This routine sets the FCF record index in to the eligible bmask for
+ * roundrobin failover search. It checks to make sure that the index
+ * does not go beyond the range of the driver allocated bmask dimension
+ * before setting the bit.
+ *
+ * Returns 0 if the index bit successfully set, otherwise, it returns
+ * -EINVAL.
+ **/
+int
+lpfc_sli4_fcf_rr_index_set(struct lpfc_hba *phba, uint16_t fcf_index)
+{
+ if (fcf_index >= LPFC_SLI4_FCF_TBL_INDX_MAX) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_FIP,
+ "2610 FCF (x%x) reached driver's book "
+ "keeping dimension:x%x\n",
+ fcf_index, LPFC_SLI4_FCF_TBL_INDX_MAX);
+ return -EINVAL;
+ }
+ /* Set the eligible FCF record index bmask */
+ set_bit(fcf_index, phba->fcf.fcf_rr_bmask);
+
+ lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
+ "2790 Set FCF (x%x) to roundrobin FCF failover "
+ "bmask\n", fcf_index);
+
+ return 0;
+}
+
+/**
+ * lpfc_sli4_fcf_rr_index_clear - Clear bmask from eligible fcf record index
+ * @phba: pointer to lpfc hba data structure.
+ *
+ * This routine clears the FCF record index from the eligible bmask for
+ * roundrobin failover search. It checks to make sure that the index
+ * does not go beyond the range of the driver allocated bmask dimension
+ * before clearing the bit.
+ **/
+void
+lpfc_sli4_fcf_rr_index_clear(struct lpfc_hba *phba, uint16_t fcf_index)
+{
+ struct lpfc_fcf_pri *fcf_pri, *fcf_pri_next;
+ if (fcf_index >= LPFC_SLI4_FCF_TBL_INDX_MAX) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_FIP,
+ "2762 FCF (x%x) reached driver's book "
+ "keeping dimension:x%x\n",
+ fcf_index, LPFC_SLI4_FCF_TBL_INDX_MAX);
+ return;
+ }
+ /* Clear the eligible FCF record index bmask */
+ spin_lock_irq(&phba->hbalock);
+ list_for_each_entry_safe(fcf_pri, fcf_pri_next, &phba->fcf.fcf_pri_list,
+ list) {
+ if (fcf_pri->fcf_rec.fcf_index == fcf_index) {
+ list_del_init(&fcf_pri->list);
+ break;
+ }
+ }
+ spin_unlock_irq(&phba->hbalock);
+ clear_bit(fcf_index, phba->fcf.fcf_rr_bmask);
+
+ lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
+ "2791 Clear FCF (x%x) from roundrobin failover "
+ "bmask\n", fcf_index);
+}
+
+/**
+ * lpfc_mbx_cmpl_redisc_fcf_table - completion routine for rediscover FCF table
+ * @phba: pointer to lpfc hba data structure.
+ *
+ * This routine is the completion routine for the rediscover FCF table mailbox
+ * command. If the mailbox command returned failure, it will try to stop the
+ * FCF rediscover wait timer.
+ **/
+static void
+lpfc_mbx_cmpl_redisc_fcf_table(struct lpfc_hba *phba, LPFC_MBOXQ_t *mbox)
+{
+ struct lpfc_mbx_redisc_fcf_tbl *redisc_fcf;
+ uint32_t shdr_status, shdr_add_status;
+
+ redisc_fcf = &mbox->u.mqe.un.redisc_fcf_tbl;
+
+ shdr_status = bf_get(lpfc_mbox_hdr_status,
+ &redisc_fcf->header.cfg_shdr.response);
+ shdr_add_status = bf_get(lpfc_mbox_hdr_add_status,
+ &redisc_fcf->header.cfg_shdr.response);
+ if (shdr_status || shdr_add_status) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_FIP,
+ "2746 Requesting for FCF rediscovery failed "
+ "status x%x add_status x%x\n",
+ shdr_status, shdr_add_status);
+ if (phba->fcf.fcf_flag & FCF_ACVL_DISC) {
+ spin_lock_irq(&phba->hbalock);
+ phba->fcf.fcf_flag &= ~FCF_ACVL_DISC;
+ spin_unlock_irq(&phba->hbalock);
+ /*
+ * CVL event triggered FCF rediscover request failed,
+ * last resort to re-try current registered FCF entry.
+ */
+ lpfc_retry_pport_discovery(phba);
+ } else {
+ spin_lock_irq(&phba->hbalock);
+ phba->fcf.fcf_flag &= ~FCF_DEAD_DISC;
+ spin_unlock_irq(&phba->hbalock);
+ /*
+ * DEAD FCF event triggered FCF rediscover request
+ * failed, last resort to fail over as a link down
+ * to FCF registration.
+ */
+ lpfc_sli4_fcf_dead_failthrough(phba);
+ }
+ } else {
+ lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
+ "2775 Start FCF rediscover quiescent timer\n");
+ /*
+ * Start FCF rediscovery wait timer for pending FCF
+ * before rescan FCF record table.
+ */
+ lpfc_fcf_redisc_wait_start_timer(phba);
+ }
+
+ mempool_free(mbox, phba->mbox_mem_pool);
+}
+
+/**
+ * lpfc_sli4_redisc_fcf_table - Request to rediscover entire FCF table by port.
+ * @phba: pointer to lpfc hba data structure.
+ *
+ * This routine is invoked to request for rediscovery of the entire FCF table
+ * by the port.
+ **/
+int
+lpfc_sli4_redisc_fcf_table(struct lpfc_hba *phba)
+{
+ LPFC_MBOXQ_t *mbox;
+ struct lpfc_mbx_redisc_fcf_tbl *redisc_fcf;
+ int rc, length;
+
+ /* Cancel retry delay timers to all vports before FCF rediscover */
+ lpfc_cancel_all_vport_retry_delay_timer(phba);
+
+ mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
+ if (!mbox) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
+ "2745 Failed to allocate mbox for "
+ "requesting FCF rediscover.\n");
+ return -ENOMEM;
+ }
+
+ length = (sizeof(struct lpfc_mbx_redisc_fcf_tbl) -
+ sizeof(struct lpfc_sli4_cfg_mhdr));
+ lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE,
+ LPFC_MBOX_OPCODE_FCOE_REDISCOVER_FCF,
+ length, LPFC_SLI4_MBX_EMBED);
+
+ redisc_fcf = &mbox->u.mqe.un.redisc_fcf_tbl;
+ /* Set count to 0 for invalidating the entire FCF database */
+ bf_set(lpfc_mbx_redisc_fcf_count, redisc_fcf, 0);
+
+ /* Issue the mailbox command asynchronously */
+ mbox->vport = phba->pport;
+ mbox->mbox_cmpl = lpfc_mbx_cmpl_redisc_fcf_table;
+ rc = lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT);
+
+ if (rc == MBX_NOT_FINISHED) {
+ mempool_free(mbox, phba->mbox_mem_pool);
+ return -EIO;
+ }
+ return 0;
+}
+
+/**
+ * lpfc_sli4_fcf_dead_failthrough - Failthrough routine to fcf dead event
+ * @phba: pointer to lpfc hba data structure.
+ *
+ * This function is the failover routine as a last resort to the FCF DEAD
+ * event when driver failed to perform fast FCF failover.
+ **/
+void
+lpfc_sli4_fcf_dead_failthrough(struct lpfc_hba *phba)
+{
+ uint32_t link_state;
+
+ /*
+ * Last resort as FCF DEAD event failover will treat this as
+ * a link down, but save the link state because we don't want
+ * it to be changed to Link Down unless it is already down.
+ */
+ link_state = phba->link_state;
+ lpfc_linkdown(phba);
+ phba->link_state = link_state;
+
+ /* Unregister FCF if no devices connected to it */
+ lpfc_unregister_unused_fcf(phba);
+}
+
+/**
+ * lpfc_sli_get_config_region23 - Get sli3 port region 23 data.
+ * @phba: pointer to lpfc hba data structure.
+ * @rgn23_data: pointer to configure region 23 data.
+ *
+ * This function gets SLI3 port configure region 23 data through memory dump
+ * mailbox command. When it successfully retrieves data, the size of the data
+ * will be returned, otherwise, 0 will be returned.
+ **/
+static uint32_t
+lpfc_sli_get_config_region23(struct lpfc_hba *phba, char *rgn23_data)
+{
+ LPFC_MBOXQ_t *pmb = NULL;
+ MAILBOX_t *mb;
+ uint32_t offset = 0;
+ int rc;
+
+ if (!rgn23_data)
+ return 0;
+
+ pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
+ if (!pmb) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+ "2600 failed to allocate mailbox memory\n");
+ return 0;
+ }
+ mb = &pmb->u.mb;
+
+ do {
+ lpfc_dump_mem(phba, pmb, offset, DMP_REGION_23);
+ rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL);
+
+ if (rc != MBX_SUCCESS) {
+ lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
+ "2601 failed to read config "
+ "region 23, rc 0x%x Status 0x%x\n",
+ rc, mb->mbxStatus);
+ mb->un.varDmp.word_cnt = 0;
+ }
+ /*
+ * dump mem may return a zero when finished or we got a
+ * mailbox error, either way we are done.
+ */
+ if (mb->un.varDmp.word_cnt == 0)
+ break;
+ if (mb->un.varDmp.word_cnt > DMP_RGN23_SIZE - offset)
+ mb->un.varDmp.word_cnt = DMP_RGN23_SIZE - offset;
+
+ lpfc_sli_pcimem_bcopy(((uint8_t *)mb) + DMP_RSP_OFFSET,
+ rgn23_data + offset,
+ mb->un.varDmp.word_cnt);
+ offset += mb->un.varDmp.word_cnt;
+ } while (mb->un.varDmp.word_cnt && offset < DMP_RGN23_SIZE);
+
+ mempool_free(pmb, phba->mbox_mem_pool);
+ return offset;
+}
+
+/**
+ * lpfc_sli4_get_config_region23 - Get sli4 port region 23 data.
+ * @phba: pointer to lpfc hba data structure.
+ * @rgn23_data: pointer to configure region 23 data.
+ *
+ * This function gets SLI4 port configure region 23 data through memory dump
+ * mailbox command. When it successfully retrieves data, the size of the data
+ * will be returned, otherwise, 0 will be returned.
+ **/
+static uint32_t
+lpfc_sli4_get_config_region23(struct lpfc_hba *phba, char *rgn23_data)
+{
+ LPFC_MBOXQ_t *mboxq = NULL;
+ struct lpfc_dmabuf *mp = NULL;
+ struct lpfc_mqe *mqe;
+ uint32_t data_length = 0;
+ int rc;
+
+ if (!rgn23_data)
+ return 0;
+
+ mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
+ if (!mboxq) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+ "3105 failed to allocate mailbox memory\n");
+ return 0;
+ }
+
+ if (lpfc_sli4_dump_cfg_rg23(phba, mboxq))
+ goto out;
+ mqe = &mboxq->u.mqe;
+ mp = (struct lpfc_dmabuf *) mboxq->context1;
+ rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
+ if (rc)
+ goto out;
+ data_length = mqe->un.mb_words[5];
+ if (data_length == 0)
+ goto out;
+ if (data_length > DMP_RGN23_SIZE) {
+ data_length = 0;
+ goto out;
+ }
+ lpfc_sli_pcimem_bcopy((char *)mp->virt, rgn23_data, data_length);
+out:
+ mempool_free(mboxq, phba->mbox_mem_pool);
+ if (mp) {
+ lpfc_mbuf_free(phba, mp->virt, mp->phys);
+ kfree(mp);
+ }
+ return data_length;
+}
+
+/**
+ * lpfc_sli_read_link_ste - Read region 23 to decide if link is disabled.
+ * @phba: pointer to lpfc hba data structure.
+ *
+ * This function read region 23 and parse TLV for port status to
+ * decide if the user disaled the port. If the TLV indicates the
+ * port is disabled, the hba_flag is set accordingly.
+ **/
+void
+lpfc_sli_read_link_ste(struct lpfc_hba *phba)
+{
+ uint8_t *rgn23_data = NULL;
+ uint32_t if_type, data_size, sub_tlv_len, tlv_offset;
+ uint32_t offset = 0;
+
+ /* Get adapter Region 23 data */
+ rgn23_data = kzalloc(DMP_RGN23_SIZE, GFP_KERNEL);
+ if (!rgn23_data)
+ goto out;
+
+ if (phba->sli_rev < LPFC_SLI_REV4)
+ data_size = lpfc_sli_get_config_region23(phba, rgn23_data);
+ else {
+ if_type = bf_get(lpfc_sli_intf_if_type,
+ &phba->sli4_hba.sli_intf);
+ if (if_type == LPFC_SLI_INTF_IF_TYPE_0)
+ goto out;
+ data_size = lpfc_sli4_get_config_region23(phba, rgn23_data);
+ }
+
+ if (!data_size)
+ goto out;
+
+ /* Check the region signature first */
+ if (memcmp(&rgn23_data[offset], LPFC_REGION23_SIGNATURE, 4)) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+ "2619 Config region 23 has bad signature\n");
+ goto out;
+ }
+ offset += 4;
+
+ /* Check the data structure version */
+ if (rgn23_data[offset] != LPFC_REGION23_VERSION) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+ "2620 Config region 23 has bad version\n");
+ goto out;
+ }
+ offset += 4;
+
+ /* Parse TLV entries in the region */
+ while (offset < data_size) {
+ if (rgn23_data[offset] == LPFC_REGION23_LAST_REC)
+ break;
+ /*
+ * If the TLV is not driver specific TLV or driver id is
+ * not linux driver id, skip the record.
+ */
+ if ((rgn23_data[offset] != DRIVER_SPECIFIC_TYPE) ||
+ (rgn23_data[offset + 2] != LINUX_DRIVER_ID) ||
+ (rgn23_data[offset + 3] != 0)) {
+ offset += rgn23_data[offset + 1] * 4 + 4;
+ continue;
+ }
+
+ /* Driver found a driver specific TLV in the config region */
+ sub_tlv_len = rgn23_data[offset + 1] * 4;
+ offset += 4;
+ tlv_offset = 0;
+
+ /*
+ * Search for configured port state sub-TLV.
+ */
+ while ((offset < data_size) &&
+ (tlv_offset < sub_tlv_len)) {
+ if (rgn23_data[offset] == LPFC_REGION23_LAST_REC) {
+ offset += 4;
+ tlv_offset += 4;
+ break;
+ }
+ if (rgn23_data[offset] != PORT_STE_TYPE) {
+ offset += rgn23_data[offset + 1] * 4 + 4;
+ tlv_offset += rgn23_data[offset + 1] * 4 + 4;
+ continue;
+ }
+
+ /* This HBA contains PORT_STE configured */
+ if (!rgn23_data[offset + 2])
+ phba->hba_flag |= LINK_DISABLED;
+
+ goto out;
+ }
+ }
+
+out:
+ kfree(rgn23_data);
+ return;
+}
+
+/**
+ * lpfc_wr_object - write an object to the firmware
+ * @phba: HBA structure that indicates port to create a queue on.
+ * @dmabuf_list: list of dmabufs to write to the port.
+ * @size: the total byte value of the objects to write to the port.
+ * @offset: the current offset to be used to start the transfer.
+ *
+ * This routine will create a wr_object mailbox command to send to the port.
+ * the mailbox command will be constructed using the dma buffers described in
+ * @dmabuf_list to create a list of BDEs. This routine will fill in as many
+ * BDEs that the imbedded mailbox can support. The @offset variable will be
+ * used to indicate the starting offset of the transfer and will also return
+ * the offset after the write object mailbox has completed. @size is used to
+ * determine the end of the object and whether the eof bit should be set.
+ *
+ * Return 0 is successful and offset will contain the the new offset to use
+ * for the next write.
+ * Return negative value for error cases.
+ **/
+int
+lpfc_wr_object(struct lpfc_hba *phba, struct list_head *dmabuf_list,
+ uint32_t size, uint32_t *offset)
+{
+ struct lpfc_mbx_wr_object *wr_object;
+ LPFC_MBOXQ_t *mbox;
+ int rc = 0, i = 0;
+ uint32_t shdr_status, shdr_add_status;
+ uint32_t mbox_tmo;
+ union lpfc_sli4_cfg_shdr *shdr;
+ struct lpfc_dmabuf *dmabuf;
+ uint32_t written = 0;
+
+ mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
+ if (!mbox)
+ return -ENOMEM;
+
+ lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
+ LPFC_MBOX_OPCODE_WRITE_OBJECT,
+ sizeof(struct lpfc_mbx_wr_object) -
+ sizeof(struct lpfc_sli4_cfg_mhdr), LPFC_SLI4_MBX_EMBED);
+
+ wr_object = (struct lpfc_mbx_wr_object *)&mbox->u.mqe.un.wr_object;
+ wr_object->u.request.write_offset = *offset;
+ sprintf((uint8_t *)wr_object->u.request.object_name, "/");
+ wr_object->u.request.object_name[0] =
+ cpu_to_le32(wr_object->u.request.object_name[0]);
+ bf_set(lpfc_wr_object_eof, &wr_object->u.request, 0);
+ list_for_each_entry(dmabuf, dmabuf_list, list) {
+ if (i >= LPFC_MBX_WR_CONFIG_MAX_BDE || written >= size)
+ break;
+ wr_object->u.request.bde[i].addrLow = putPaddrLow(dmabuf->phys);
+ wr_object->u.request.bde[i].addrHigh =
+ putPaddrHigh(dmabuf->phys);
+ if (written + SLI4_PAGE_SIZE >= size) {
+ wr_object->u.request.bde[i].tus.f.bdeSize =
+ (size - written);
+ written += (size - written);
+ bf_set(lpfc_wr_object_eof, &wr_object->u.request, 1);
+ } else {
+ wr_object->u.request.bde[i].tus.f.bdeSize =
+ SLI4_PAGE_SIZE;
+ written += SLI4_PAGE_SIZE;
+ }
+ i++;
+ }
+ wr_object->u.request.bde_count = i;
+ bf_set(lpfc_wr_object_write_length, &wr_object->u.request, written);
+ if (!phba->sli4_hba.intr_enable)
+ rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
+ else {
+ mbox_tmo = lpfc_mbox_tmo_val(phba, mbox);
+ rc = lpfc_sli_issue_mbox_wait(phba, mbox, mbox_tmo);
+ }
+ /* The IOCTL status is embedded in the mailbox subheader. */
+ shdr = (union lpfc_sli4_cfg_shdr *) &wr_object->header.cfg_shdr;
+ shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
+ shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
+ if (rc != MBX_TIMEOUT)
+ mempool_free(mbox, phba->mbox_mem_pool);
+ if (shdr_status || shdr_add_status || rc) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+ "3025 Write Object mailbox failed with "
+ "status x%x add_status x%x, mbx status x%x\n",
+ shdr_status, shdr_add_status, rc);
+ rc = -ENXIO;
+ } else
+ *offset += wr_object->u.response.actual_write_length;
+ return rc;
+}
+
+/**
+ * lpfc_cleanup_pending_mbox - Free up vport discovery mailbox commands.
+ * @vport: pointer to vport data structure.
+ *
+ * This function iterate through the mailboxq and clean up all REG_LOGIN
+ * and REG_VPI mailbox commands associated with the vport. This function
+ * is called when driver want to restart discovery of the vport due to
+ * a Clear Virtual Link event.
+ **/
+void
+lpfc_cleanup_pending_mbox(struct lpfc_vport *vport)
+{
+ struct lpfc_hba *phba = vport->phba;
+ LPFC_MBOXQ_t *mb, *nextmb;
+ struct lpfc_dmabuf *mp;
+ struct lpfc_nodelist *ndlp;
+ struct lpfc_nodelist *act_mbx_ndlp = NULL;
+ struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
+ LIST_HEAD(mbox_cmd_list);
+ uint8_t restart_loop;
+
+ /* Clean up internally queued mailbox commands with the vport */
+ spin_lock_irq(&phba->hbalock);
+ list_for_each_entry_safe(mb, nextmb, &phba->sli.mboxq, list) {
+ if (mb->vport != vport)
+ continue;
+
+ if ((mb->u.mb.mbxCommand != MBX_REG_LOGIN64) &&
+ (mb->u.mb.mbxCommand != MBX_REG_VPI))
+ continue;
+
+ list_del(&mb->list);
+ list_add_tail(&mb->list, &mbox_cmd_list);
+ }
+ /* Clean up active mailbox command with the vport */
+ mb = phba->sli.mbox_active;
+ if (mb && (mb->vport == vport)) {
+ if ((mb->u.mb.mbxCommand == MBX_REG_LOGIN64) ||
+ (mb->u.mb.mbxCommand == MBX_REG_VPI))
+ mb->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
+ if (mb->u.mb.mbxCommand == MBX_REG_LOGIN64) {
+ act_mbx_ndlp = (struct lpfc_nodelist *)mb->context2;
+ /* Put reference count for delayed processing */
+ act_mbx_ndlp = lpfc_nlp_get(act_mbx_ndlp);
+ /* Unregister the RPI when mailbox complete */
+ mb->mbox_flag |= LPFC_MBX_IMED_UNREG;
+ }
+ }
+ /* Cleanup any mailbox completions which are not yet processed */
+ do {
+ restart_loop = 0;
+ list_for_each_entry(mb, &phba->sli.mboxq_cmpl, list) {
+ /*
+ * If this mailox is already processed or it is
+ * for another vport ignore it.
+ */
+ if ((mb->vport != vport) ||
+ (mb->mbox_flag & LPFC_MBX_IMED_UNREG))
+ continue;
+
+ if ((mb->u.mb.mbxCommand != MBX_REG_LOGIN64) &&
+ (mb->u.mb.mbxCommand != MBX_REG_VPI))
+ continue;
+
+ mb->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
+ if (mb->u.mb.mbxCommand == MBX_REG_LOGIN64) {
+ ndlp = (struct lpfc_nodelist *)mb->context2;
+ /* Unregister the RPI when mailbox complete */
+ mb->mbox_flag |= LPFC_MBX_IMED_UNREG;
+ restart_loop = 1;
+ spin_unlock_irq(&phba->hbalock);
+ spin_lock(shost->host_lock);
+ ndlp->nlp_flag &= ~NLP_IGNR_REG_CMPL;
+ spin_unlock(shost->host_lock);
+ spin_lock_irq(&phba->hbalock);
+ break;
+ }
+ }
+ } while (restart_loop);
+
+ spin_unlock_irq(&phba->hbalock);
+
+ /* Release the cleaned-up mailbox commands */
+ while (!list_empty(&mbox_cmd_list)) {
+ list_remove_head(&mbox_cmd_list, mb, LPFC_MBOXQ_t, list);
+ if (mb->u.mb.mbxCommand == MBX_REG_LOGIN64) {
+ mp = (struct lpfc_dmabuf *) (mb->context1);
+ if (mp) {
+ __lpfc_mbuf_free(phba, mp->virt, mp->phys);
+ kfree(mp);
+ }
+ ndlp = (struct lpfc_nodelist *) mb->context2;
+ mb->context2 = NULL;
+ if (ndlp) {
+ spin_lock(shost->host_lock);
+ ndlp->nlp_flag &= ~NLP_IGNR_REG_CMPL;
+ spin_unlock(shost->host_lock);
+ lpfc_nlp_put(ndlp);
+ }
+ }
+ mempool_free(mb, phba->mbox_mem_pool);
+ }
+
+ /* Release the ndlp with the cleaned-up active mailbox command */
+ if (act_mbx_ndlp) {
+ spin_lock(shost->host_lock);
+ act_mbx_ndlp->nlp_flag &= ~NLP_IGNR_REG_CMPL;
+ spin_unlock(shost->host_lock);
+ lpfc_nlp_put(act_mbx_ndlp);
+ }
+}
+
+/**
+ * lpfc_drain_txq - Drain the txq
+ * @phba: Pointer to HBA context object.
+ *
+ * This function attempt to submit IOCBs on the txq
+ * to the adapter. For SLI4 adapters, the txq contains
+ * ELS IOCBs that have been deferred because the there
+ * are no SGLs. This congestion can occur with large
+ * vport counts during node discovery.
+ **/
+
+uint32_t
+lpfc_drain_txq(struct lpfc_hba *phba)
+{
+ LIST_HEAD(completions);
+ struct lpfc_sli_ring *pring = &phba->sli.ring[LPFC_ELS_RING];
+ struct lpfc_iocbq *piocbq = NULL;
+ unsigned long iflags = 0;
+ char *fail_msg = NULL;
+ struct lpfc_sglq *sglq;
+ union lpfc_wqe wqe;
+ uint32_t txq_cnt = 0;
+
+ spin_lock_irqsave(&pring->ring_lock, iflags);
+ list_for_each_entry(piocbq, &pring->txq, list) {
+ txq_cnt++;
+ }
+
+ if (txq_cnt > pring->txq_max)
+ pring->txq_max = txq_cnt;
+
+ spin_unlock_irqrestore(&pring->ring_lock, iflags);
+
+ while (!list_empty(&pring->txq)) {
+ spin_lock_irqsave(&pring->ring_lock, iflags);
+
+ piocbq = lpfc_sli_ringtx_get(phba, pring);
+ if (!piocbq) {
+ spin_unlock_irqrestore(&pring->ring_lock, iflags);
+ lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
+ "2823 txq empty and txq_cnt is %d\n ",
+ txq_cnt);
+ break;
+ }
+ sglq = __lpfc_sli_get_sglq(phba, piocbq);
+ if (!sglq) {
+ __lpfc_sli_ringtx_put(phba, pring, piocbq);
+ spin_unlock_irqrestore(&pring->ring_lock, iflags);
+ break;
+ }
+ txq_cnt--;
+
+ /* The xri and iocb resources secured,
+ * attempt to issue request
+ */
+ piocbq->sli4_lxritag = sglq->sli4_lxritag;
+ piocbq->sli4_xritag = sglq->sli4_xritag;
+ if (NO_XRI == lpfc_sli4_bpl2sgl(phba, piocbq, sglq))
+ fail_msg = "to convert bpl to sgl";
+ else if (lpfc_sli4_iocb2wqe(phba, piocbq, &wqe))
+ fail_msg = "to convert iocb to wqe";
+ else if (lpfc_sli4_wq_put(phba->sli4_hba.els_wq, &wqe))
+ fail_msg = " - Wq is full";
+ else
+ lpfc_sli_ringtxcmpl_put(phba, pring, piocbq);
+
+ if (fail_msg) {
+ /* Failed means we can't issue and need to cancel */
+ lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
+ "2822 IOCB failed %s iotag 0x%x "
+ "xri 0x%x\n",
+ fail_msg,
+ piocbq->iotag, piocbq->sli4_xritag);
+ list_add_tail(&piocbq->list, &completions);
+ }
+ spin_unlock_irqrestore(&pring->ring_lock, iflags);
+ }
+
+ /* Cancel all the IOCBs that cannot be issued */
+ lpfc_sli_cancel_iocbs(phba, &completions, IOSTAT_LOCAL_REJECT,
+ IOERR_SLI_ABORTED);
+
+ return txq_cnt;
+}
diff --git a/drivers/scsi/lpfc/lpfc_sli.h b/drivers/scsi/lpfc/lpfc_sli.h
new file mode 100644
index 000000000..7fe99ff80
--- /dev/null
+++ b/drivers/scsi/lpfc/lpfc_sli.h
@@ -0,0 +1,331 @@
+/*******************************************************************
+ * This file is part of the Emulex Linux Device Driver for *
+ * Fibre Channel Host Bus Adapters. *
+ * Copyright (C) 2004-2015 Emulex. All rights reserved. *
+ * EMULEX and SLI are trademarks of Emulex. *
+ * www.emulex.com *
+ * *
+ * This program is free software; you can redistribute it and/or *
+ * modify it under the terms of version 2 of the GNU General *
+ * Public License as published by the Free Software Foundation. *
+ * This program is distributed in the hope that it will be useful. *
+ * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND *
+ * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY, *
+ * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE *
+ * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD *
+ * TO BE LEGALLY INVALID. See the GNU General Public License for *
+ * more details, a copy of which can be found in the file COPYING *
+ * included with this package. *
+ *******************************************************************/
+
+/* forward declaration for LPFC_IOCB_t's use */
+struct lpfc_hba;
+struct lpfc_vport;
+
+/* Define the context types that SLI handles for abort and sums. */
+typedef enum _lpfc_ctx_cmd {
+ LPFC_CTX_LUN,
+ LPFC_CTX_TGT,
+ LPFC_CTX_HOST
+} lpfc_ctx_cmd;
+
+struct lpfc_cq_event {
+ struct list_head list;
+ union {
+ struct lpfc_mcqe mcqe_cmpl;
+ struct lpfc_acqe_link acqe_link;
+ struct lpfc_acqe_fip acqe_fip;
+ struct lpfc_acqe_dcbx acqe_dcbx;
+ struct lpfc_acqe_grp5 acqe_grp5;
+ struct lpfc_acqe_fc_la acqe_fc;
+ struct lpfc_acqe_sli acqe_sli;
+ struct lpfc_rcqe rcqe_cmpl;
+ struct sli4_wcqe_xri_aborted wcqe_axri;
+ struct lpfc_wcqe_complete wcqe_cmpl;
+ } cqe;
+};
+
+/* This structure is used to handle IOCB requests / responses */
+struct lpfc_iocbq {
+ /* lpfc_iocbqs are used in double linked lists */
+ struct list_head list;
+ struct list_head clist;
+ struct list_head dlist;
+ uint16_t iotag; /* pre-assigned IO tag */
+ uint16_t sli4_lxritag; /* logical pre-assigned XRI. */
+ uint16_t sli4_xritag; /* pre-assigned XRI, (OXID) tag. */
+ struct lpfc_cq_event cq_event;
+
+ IOCB_t iocb; /* IOCB cmd */
+ uint8_t retry; /* retry counter for IOCB cmd - if needed */
+ uint32_t iocb_flag;
+#define LPFC_IO_LIBDFC 1 /* libdfc iocb */
+#define LPFC_IO_WAKE 2 /* Synchronous I/O completed */
+#define LPFC_IO_WAKE_TMO LPFC_IO_WAKE /* Synchronous I/O timed out */
+#define LPFC_IO_FCP 4 /* FCP command -- iocbq in scsi_buf */
+#define LPFC_DRIVER_ABORTED 8 /* driver aborted this request */
+#define LPFC_IO_FABRIC 0x10 /* Iocb send using fabric scheduler */
+#define LPFC_DELAY_MEM_FREE 0x20 /* Defer free'ing of FC data */
+#define LPFC_EXCHANGE_BUSY 0x40 /* SLI4 hba reported XB in response */
+#define LPFC_USE_FCPWQIDX 0x80 /* Submit to specified FCPWQ index */
+#define DSS_SECURITY_OP 0x100 /* security IO */
+#define LPFC_IO_ON_TXCMPLQ 0x200 /* The IO is still on the TXCMPLQ */
+#define LPFC_IO_DIF_PASS 0x400 /* T10 DIF IO pass-thru prot */
+#define LPFC_IO_DIF_STRIP 0x800 /* T10 DIF IO strip prot */
+#define LPFC_IO_DIF_INSERT 0x1000 /* T10 DIF IO insert prot */
+#define LPFC_IO_CMD_OUTSTANDING 0x2000 /* timeout handler abort window */
+
+#define LPFC_FIP_ELS_ID_MASK 0xc000 /* ELS_ID range 0-3, non-shifted mask */
+#define LPFC_FIP_ELS_ID_SHIFT 14
+
+#define LPFC_IO_OAS 0x10000 /* OAS FCP IO */
+#define LPFC_IO_FOF 0x20000 /* FOF FCP IO */
+#define LPFC_IO_LOOPBACK 0x40000 /* Loopback IO */
+
+ uint32_t drvrTimeout; /* driver timeout in seconds */
+ uint32_t fcp_wqidx; /* index to FCP work queue */
+ struct lpfc_vport *vport;/* virtual port pointer */
+ void *context1; /* caller context information */
+ void *context2; /* caller context information */
+ void *context3; /* caller context information */
+ union {
+ wait_queue_head_t *wait_queue;
+ struct lpfc_iocbq *rsp_iocb;
+ struct lpfcMboxq *mbox;
+ struct lpfc_nodelist *ndlp;
+ struct lpfc_node_rrq *rrq;
+ } context_un;
+
+ void (*fabric_iocb_cmpl) (struct lpfc_hba *, struct lpfc_iocbq *,
+ struct lpfc_iocbq *);
+ void (*wait_iocb_cmpl) (struct lpfc_hba *, struct lpfc_iocbq *,
+ struct lpfc_iocbq *);
+ void (*iocb_cmpl) (struct lpfc_hba *, struct lpfc_iocbq *,
+ struct lpfc_iocbq *);
+};
+
+#define SLI_IOCB_RET_IOCB 1 /* Return IOCB if cmd ring full */
+
+#define IOCB_SUCCESS 0
+#define IOCB_BUSY 1
+#define IOCB_ERROR 2
+#define IOCB_TIMEDOUT 3
+
+#define LPFC_MBX_WAKE 1
+#define LPFC_MBX_IMED_UNREG 2
+
+typedef struct lpfcMboxq {
+ /* MBOXQs are used in single linked lists */
+ struct list_head list; /* ptr to next mailbox command */
+ union {
+ MAILBOX_t mb; /* Mailbox cmd */
+ struct lpfc_mqe mqe;
+ } u;
+ struct lpfc_vport *vport;/* virtual port pointer */
+ void *context1; /* caller context information */
+ void *context2; /* caller context information */
+
+ void (*mbox_cmpl) (struct lpfc_hba *, struct lpfcMboxq *);
+ uint8_t mbox_flag;
+ uint16_t in_ext_byte_len;
+ uint16_t out_ext_byte_len;
+ uint8_t mbox_offset_word;
+ struct lpfc_mcqe mcqe;
+ struct lpfc_mbx_nembed_sge_virt *sge_array;
+} LPFC_MBOXQ_t;
+
+#define MBX_POLL 1 /* poll mailbox till command done, then
+ return */
+#define MBX_NOWAIT 2 /* issue command then return immediately */
+
+#define LPFC_MAX_RING_MASK 5 /* max num of rctl/type masks allowed per
+ ring */
+#define LPFC_SLI3_MAX_RING 4 /* Max num of SLI3 rings used by driver.
+ For SLI4, an additional ring for each
+ FCP WQ will be allocated. */
+
+struct lpfc_sli_ring;
+
+struct lpfc_sli_ring_mask {
+ uint8_t profile; /* profile associated with ring */
+ uint8_t rctl; /* rctl / type pair configured for ring */
+ uint8_t type; /* rctl / type pair configured for ring */
+ uint8_t rsvd;
+ /* rcv'd unsol event */
+ void (*lpfc_sli_rcv_unsol_event) (struct lpfc_hba *,
+ struct lpfc_sli_ring *,
+ struct lpfc_iocbq *);
+};
+
+
+/* Structure used to hold SLI statistical counters and info */
+struct lpfc_sli_ring_stat {
+ uint64_t iocb_event; /* IOCB event counters */
+ uint64_t iocb_cmd; /* IOCB cmd issued */
+ uint64_t iocb_rsp; /* IOCB rsp received */
+ uint64_t iocb_cmd_delay; /* IOCB cmd ring delay */
+ uint64_t iocb_cmd_full; /* IOCB cmd ring full */
+ uint64_t iocb_cmd_empty; /* IOCB cmd ring is now empty */
+ uint64_t iocb_rsp_full; /* IOCB rsp ring full */
+};
+
+struct lpfc_sli3_ring {
+ uint32_t local_getidx; /* last available cmd index (from cmdGetInx) */
+ uint32_t next_cmdidx; /* next_cmd index */
+ uint32_t rspidx; /* current index in response ring */
+ uint32_t cmdidx; /* current index in command ring */
+ uint16_t numCiocb; /* number of command iocb's per ring */
+ uint16_t numRiocb; /* number of rsp iocb's per ring */
+ uint16_t sizeCiocb; /* Size of command iocb's in this ring */
+ uint16_t sizeRiocb; /* Size of response iocb's in this ring */
+ uint32_t *cmdringaddr; /* virtual address for cmd rings */
+ uint32_t *rspringaddr; /* virtual address for rsp rings */
+};
+
+struct lpfc_sli4_ring {
+ struct lpfc_queue *wqp; /* Pointer to associated WQ */
+};
+
+
+/* Structure used to hold SLI ring information */
+struct lpfc_sli_ring {
+ uint16_t flag; /* ring flags */
+#define LPFC_DEFERRED_RING_EVENT 0x001 /* Deferred processing a ring event */
+#define LPFC_CALL_RING_AVAILABLE 0x002 /* indicates cmd was full */
+#define LPFC_STOP_IOCB_EVENT 0x020 /* Stop processing IOCB cmds event */
+ uint16_t abtsiotag; /* tracks next iotag to use for ABTS */
+
+ uint8_t rsvd;
+ uint8_t ringno; /* ring number */
+
+ spinlock_t ring_lock; /* lock for issuing commands */
+
+ uint32_t fast_iotag; /* max fastlookup based iotag */
+ uint32_t iotag_ctr; /* keeps track of the next iotag to use */
+ uint32_t iotag_max; /* max iotag value to use */
+ struct list_head txq;
+ uint16_t txq_cnt; /* current length of queue */
+ uint16_t txq_max; /* max length */
+ struct list_head txcmplq;
+ uint16_t txcmplq_cnt; /* current length of queue */
+ uint16_t txcmplq_max; /* max length */
+ uint32_t missbufcnt; /* keep track of buffers to post */
+ struct list_head postbufq;
+ uint16_t postbufq_cnt; /* current length of queue */
+ uint16_t postbufq_max; /* max length */
+ struct list_head iocb_continueq;
+ uint16_t iocb_continueq_cnt; /* current length of queue */
+ uint16_t iocb_continueq_max; /* max length */
+ struct list_head iocb_continue_saveq;
+
+ struct lpfc_sli_ring_mask prt[LPFC_MAX_RING_MASK];
+ uint32_t num_mask; /* number of mask entries in prt array */
+ void (*lpfc_sli_rcv_async_status) (struct lpfc_hba *,
+ struct lpfc_sli_ring *, struct lpfc_iocbq *);
+
+ struct lpfc_sli_ring_stat stats; /* SLI statistical info */
+
+ /* cmd ring available */
+ void (*lpfc_sli_cmd_available) (struct lpfc_hba *,
+ struct lpfc_sli_ring *);
+ union {
+ struct lpfc_sli3_ring sli3;
+ struct lpfc_sli4_ring sli4;
+ } sli;
+};
+
+/* Structure used for configuring rings to a specific profile or rctl / type */
+struct lpfc_hbq_init {
+ uint32_t rn; /* Receive buffer notification */
+ uint32_t entry_count; /* max # of entries in HBQ */
+ uint32_t headerLen; /* 0 if not profile 4 or 5 */
+ uint32_t logEntry; /* Set to 1 if this HBQ used for LogEntry */
+ uint32_t profile; /* Selection profile 0=all, 7=logentry */
+ uint32_t ring_mask; /* Binds HBQ to a ring e.g. Ring0=b0001,
+ * ring2=b0100 */
+ uint32_t hbq_index; /* index of this hbq in ring .HBQs[] */
+
+ uint32_t seqlenoff;
+ uint32_t maxlen;
+ uint32_t seqlenbcnt;
+ uint32_t cmdcodeoff;
+ uint32_t cmdmatch[8];
+ uint32_t mask_count; /* number of mask entries in prt array */
+ struct hbq_mask hbqMasks[6];
+
+ /* Non-config rings fields to keep track of buffer allocations */
+ uint32_t buffer_count; /* number of buffers allocated */
+ uint32_t init_count; /* number to allocate when initialized */
+ uint32_t add_count; /* number to allocate when starved */
+} ;
+
+/* Structure used to hold SLI statistical counters and info */
+struct lpfc_sli_stat {
+ uint64_t mbox_stat_err; /* Mbox cmds completed status error */
+ uint64_t mbox_cmd; /* Mailbox commands issued */
+ uint64_t sli_intr; /* Count of Host Attention interrupts */
+ uint64_t sli_prev_intr; /* Previous cnt of Host Attention interrupts */
+ uint64_t sli_ips; /* Host Attention interrupts per sec */
+ uint32_t err_attn_event; /* Error Attn event counters */
+ uint32_t link_event; /* Link event counters */
+ uint32_t mbox_event; /* Mailbox event counters */
+ uint32_t mbox_busy; /* Mailbox cmd busy */
+};
+
+/* Structure to store link status values when port stats are reset */
+struct lpfc_lnk_stat {
+ uint32_t link_failure_count;
+ uint32_t loss_of_sync_count;
+ uint32_t loss_of_signal_count;
+ uint32_t prim_seq_protocol_err_count;
+ uint32_t invalid_tx_word_count;
+ uint32_t invalid_crc_count;
+ uint32_t error_frames;
+ uint32_t link_events;
+};
+
+/* Structure used to hold SLI information */
+struct lpfc_sli {
+ uint32_t num_rings;
+ uint32_t sli_flag;
+
+ /* Additional sli_flags */
+#define LPFC_SLI_MBOX_ACTIVE 0x100 /* HBA mailbox is currently active */
+#define LPFC_SLI_ACTIVE 0x200 /* SLI in firmware is active */
+#define LPFC_PROCESS_LA 0x400 /* Able to process link attention */
+#define LPFC_BLOCK_MGMT_IO 0x800 /* Don't allow mgmt mbx or iocb cmds */
+#define LPFC_MENLO_MAINT 0x1000 /* need for menl fw download */
+#define LPFC_SLI_ASYNC_MBX_BLK 0x2000 /* Async mailbox is blocked */
+
+ struct lpfc_sli_ring *ring;
+ int fcp_ring; /* ring used for FCP initiator commands */
+ int next_ring;
+
+ int extra_ring; /* extra ring used for other protocols */
+
+ struct lpfc_sli_stat slistat; /* SLI statistical info */
+ struct list_head mboxq;
+ uint16_t mboxq_cnt; /* current length of queue */
+ uint16_t mboxq_max; /* max length */
+ LPFC_MBOXQ_t *mbox_active; /* active mboxq information */
+ struct list_head mboxq_cmpl;
+
+ struct timer_list mbox_tmo; /* Hold clk to timeout active mbox
+ cmd */
+
+#define LPFC_IOCBQ_LOOKUP_INCREMENT 1024
+ struct lpfc_iocbq ** iocbq_lookup; /* array to lookup IOCB by IOTAG */
+ size_t iocbq_lookup_len; /* current lengs of the array */
+ uint16_t last_iotag; /* last allocated IOTAG */
+ unsigned long stats_start; /* in seconds */
+ struct lpfc_lnk_stat lnk_stat_offsets;
+};
+
+/* Timeout for normal outstanding mbox command (Seconds) */
+#define LPFC_MBOX_TMO 30
+/* Timeout for non-flash-based outstanding sli_config mbox command (Seconds) */
+#define LPFC_MBOX_SLI4_CONFIG_TMO 60
+/* Timeout for flash-based outstanding sli_config mbox command (Seconds) */
+#define LPFC_MBOX_SLI4_CONFIG_EXTENDED_TMO 300
+/* Timeout for other flash-based outstanding mbox command (Seconds) */
+#define LPFC_MBOX_TMO_FLASH_CMD 300
diff --git a/drivers/scsi/lpfc/lpfc_sli4.h b/drivers/scsi/lpfc/lpfc_sli4.h
new file mode 100644
index 000000000..6eca3b812
--- /dev/null
+++ b/drivers/scsi/lpfc/lpfc_sli4.h
@@ -0,0 +1,739 @@
+/*******************************************************************
+ * This file is part of the Emulex Linux Device Driver for *
+ * Fibre Channel Host Bus Adapters. *
+ * Copyright (C) 2009-2015 Emulex. All rights reserved. *
+ * EMULEX and SLI are trademarks of Emulex. *
+ * www.emulex.com *
+ * *
+ * This program is free software; you can redistribute it and/or *
+ * modify it under the terms of version 2 of the GNU General *
+ * Public License as published by the Free Software Foundation. *
+ * This program is distributed in the hope that it will be useful. *
+ * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND *
+ * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY, *
+ * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE *
+ * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD *
+ * TO BE LEGALLY INVALID. See the GNU General Public License for *
+ * more details, a copy of which can be found in the file COPYING *
+ * included with this package. *
+ *******************************************************************/
+
+#define LPFC_ACTIVE_MBOX_WAIT_CNT 100
+#define LPFC_XRI_EXCH_BUSY_WAIT_TMO 10000
+#define LPFC_XRI_EXCH_BUSY_WAIT_T1 10
+#define LPFC_XRI_EXCH_BUSY_WAIT_T2 30000
+#define LPFC_RELEASE_NOTIFICATION_INTERVAL 32
+#define LPFC_RPI_LOW_WATER_MARK 10
+
+#define LPFC_UNREG_FCF 1
+#define LPFC_SKIP_UNREG_FCF 0
+
+/* Amount of time in seconds for waiting FCF rediscovery to complete */
+#define LPFC_FCF_REDISCOVER_WAIT_TMO 2000 /* msec */
+
+/* Number of SGL entries can be posted in a 4KB nonembedded mbox command */
+#define LPFC_NEMBED_MBOX_SGL_CNT 254
+
+/* Multi-queue arrangement for FCP EQ/CQ/WQ tuples */
+#define LPFC_FCP_IO_CHAN_DEF 4
+#define LPFC_FCP_IO_CHAN_MIN 1
+#define LPFC_FCP_IO_CHAN_MAX 16
+
+/* Number of channels used for Flash Optimized Fabric (FOF) operations */
+
+#define LPFC_FOF_IO_CHAN_NUM 1
+
+/*
+ * Provide the default FCF Record attributes used by the driver
+ * when nonFIP mode is configured and there is no other default
+ * FCF Record attributes.
+ */
+#define LPFC_FCOE_FCF_DEF_INDEX 0
+#define LPFC_FCOE_FCF_GET_FIRST 0xFFFF
+#define LPFC_FCOE_FCF_NEXT_NONE 0xFFFF
+
+#define LPFC_FCOE_NULL_VID 0xFFF
+#define LPFC_FCOE_IGNORE_VID 0xFFFF
+
+/* First 3 bytes of default FCF MAC is specified by FC_MAP */
+#define LPFC_FCOE_FCF_MAC3 0xFF
+#define LPFC_FCOE_FCF_MAC4 0xFF
+#define LPFC_FCOE_FCF_MAC5 0xFE
+#define LPFC_FCOE_FCF_MAP0 0x0E
+#define LPFC_FCOE_FCF_MAP1 0xFC
+#define LPFC_FCOE_FCF_MAP2 0x00
+#define LPFC_FCOE_MAX_RCV_SIZE 0x800
+#define LPFC_FCOE_FKA_ADV_PER 0
+#define LPFC_FCOE_FIP_PRIORITY 0x80
+
+#define sli4_sid_from_fc_hdr(fc_hdr) \
+ ((fc_hdr)->fh_s_id[0] << 16 | \
+ (fc_hdr)->fh_s_id[1] << 8 | \
+ (fc_hdr)->fh_s_id[2])
+
+#define sli4_did_from_fc_hdr(fc_hdr) \
+ ((fc_hdr)->fh_d_id[0] << 16 | \
+ (fc_hdr)->fh_d_id[1] << 8 | \
+ (fc_hdr)->fh_d_id[2])
+
+#define sli4_fctl_from_fc_hdr(fc_hdr) \
+ ((fc_hdr)->fh_f_ctl[0] << 16 | \
+ (fc_hdr)->fh_f_ctl[1] << 8 | \
+ (fc_hdr)->fh_f_ctl[2])
+
+#define sli4_type_from_fc_hdr(fc_hdr) \
+ ((fc_hdr)->fh_type)
+
+#define LPFC_FW_RESET_MAXIMUM_WAIT_10MS_CNT 12000
+
+#define INT_FW_UPGRADE 0
+#define RUN_FW_UPGRADE 1
+
+enum lpfc_sli4_queue_type {
+ LPFC_EQ,
+ LPFC_GCQ,
+ LPFC_MCQ,
+ LPFC_WCQ,
+ LPFC_RCQ,
+ LPFC_MQ,
+ LPFC_WQ,
+ LPFC_HRQ,
+ LPFC_DRQ
+};
+
+/* The queue sub-type defines the functional purpose of the queue */
+enum lpfc_sli4_queue_subtype {
+ LPFC_NONE,
+ LPFC_MBOX,
+ LPFC_FCP,
+ LPFC_ELS,
+ LPFC_USOL
+};
+
+union sli4_qe {
+ void *address;
+ struct lpfc_eqe *eqe;
+ struct lpfc_cqe *cqe;
+ struct lpfc_mcqe *mcqe;
+ struct lpfc_wcqe_complete *wcqe_complete;
+ struct lpfc_wcqe_release *wcqe_release;
+ struct sli4_wcqe_xri_aborted *wcqe_xri_aborted;
+ struct lpfc_rcqe_complete *rcqe_complete;
+ struct lpfc_mqe *mqe;
+ union lpfc_wqe *wqe;
+ union lpfc_wqe128 *wqe128;
+ struct lpfc_rqe *rqe;
+};
+
+struct lpfc_queue {
+ struct list_head list;
+ enum lpfc_sli4_queue_type type;
+ enum lpfc_sli4_queue_subtype subtype;
+ struct lpfc_hba *phba;
+ struct list_head child_list;
+ uint32_t entry_count; /* Number of entries to support on the queue */
+ uint32_t entry_size; /* Size of each queue entry. */
+ uint32_t entry_repost; /* Count of entries before doorbell is rung */
+#define LPFC_QUEUE_MIN_REPOST 8
+ uint32_t queue_id; /* Queue ID assigned by the hardware */
+ uint32_t assoc_qid; /* Queue ID associated with, for CQ/WQ/MQ */
+ struct list_head page_list;
+ uint32_t page_count; /* Number of pages allocated for this queue */
+ uint32_t host_index; /* The host's index for putting or getting */
+ uint32_t hba_index; /* The last known hba index for get or put */
+
+ struct lpfc_sli_ring *pring; /* ptr to io ring associated with q */
+
+ uint16_t db_format;
+#define LPFC_DB_RING_FORMAT 0x01
+#define LPFC_DB_LIST_FORMAT 0x02
+ void __iomem *db_regaddr;
+ /* For q stats */
+ uint32_t q_cnt_1;
+ uint32_t q_cnt_2;
+ uint32_t q_cnt_3;
+ uint64_t q_cnt_4;
+/* defines for EQ stats */
+#define EQ_max_eqe q_cnt_1
+#define EQ_no_entry q_cnt_2
+#define EQ_badstate q_cnt_3
+#define EQ_processed q_cnt_4
+
+/* defines for CQ stats */
+#define CQ_mbox q_cnt_1
+#define CQ_max_cqe q_cnt_1
+#define CQ_release_wqe q_cnt_2
+#define CQ_xri_aborted q_cnt_3
+#define CQ_wq q_cnt_4
+
+/* defines for WQ stats */
+#define WQ_overflow q_cnt_1
+#define WQ_posted q_cnt_4
+
+/* defines for RQ stats */
+#define RQ_no_posted_buf q_cnt_1
+#define RQ_no_buf_found q_cnt_2
+#define RQ_buf_trunc q_cnt_3
+#define RQ_rcv_buf q_cnt_4
+
+ union sli4_qe qe[1]; /* array to index entries (must be last) */
+};
+
+struct lpfc_sli4_link {
+ uint16_t speed;
+ uint8_t duplex;
+ uint8_t status;
+ uint8_t type;
+ uint8_t number;
+ uint8_t fault;
+ uint16_t logical_speed;
+ uint16_t topology;
+};
+
+struct lpfc_fcf_rec {
+ uint8_t fabric_name[8];
+ uint8_t switch_name[8];
+ uint8_t mac_addr[6];
+ uint16_t fcf_indx;
+ uint32_t priority;
+ uint16_t vlan_id;
+ uint32_t addr_mode;
+ uint32_t flag;
+#define BOOT_ENABLE 0x01
+#define RECORD_VALID 0x02
+};
+
+struct lpfc_fcf_pri_rec {
+ uint16_t fcf_index;
+#define LPFC_FCF_ON_PRI_LIST 0x0001
+#define LPFC_FCF_FLOGI_FAILED 0x0002
+ uint16_t flag;
+ uint32_t priority;
+};
+
+struct lpfc_fcf_pri {
+ struct list_head list;
+ struct lpfc_fcf_pri_rec fcf_rec;
+};
+
+/*
+ * Maximum FCF table index, it is for driver internal book keeping, it
+ * just needs to be no less than the supported HBA's FCF table size.
+ */
+#define LPFC_SLI4_FCF_TBL_INDX_MAX 32
+
+struct lpfc_fcf {
+ uint16_t fcfi;
+ uint32_t fcf_flag;
+#define FCF_AVAILABLE 0x01 /* FCF available for discovery */
+#define FCF_REGISTERED 0x02 /* FCF registered with FW */
+#define FCF_SCAN_DONE 0x04 /* FCF table scan done */
+#define FCF_IN_USE 0x08 /* Atleast one discovery completed */
+#define FCF_INIT_DISC 0x10 /* Initial FCF discovery */
+#define FCF_DEAD_DISC 0x20 /* FCF DEAD fast FCF failover discovery */
+#define FCF_ACVL_DISC 0x40 /* All CVL fast FCF failover discovery */
+#define FCF_DISCOVERY (FCF_INIT_DISC | FCF_DEAD_DISC | FCF_ACVL_DISC)
+#define FCF_REDISC_PEND 0x80 /* FCF rediscovery pending */
+#define FCF_REDISC_EVT 0x100 /* FCF rediscovery event to worker thread */
+#define FCF_REDISC_FOV 0x200 /* Post FCF rediscovery fast failover */
+#define FCF_REDISC_PROG (FCF_REDISC_PEND | FCF_REDISC_EVT)
+ uint32_t addr_mode;
+ uint32_t eligible_fcf_cnt;
+ struct lpfc_fcf_rec current_rec;
+ struct lpfc_fcf_rec failover_rec;
+ struct list_head fcf_pri_list;
+ struct lpfc_fcf_pri fcf_pri[LPFC_SLI4_FCF_TBL_INDX_MAX];
+ uint32_t current_fcf_scan_pri;
+ struct timer_list redisc_wait;
+ unsigned long *fcf_rr_bmask; /* Eligible FCF indexes for RR failover */
+};
+
+
+#define LPFC_REGION23_SIGNATURE "RG23"
+#define LPFC_REGION23_VERSION 1
+#define LPFC_REGION23_LAST_REC 0xff
+#define DRIVER_SPECIFIC_TYPE 0xA2
+#define LINUX_DRIVER_ID 0x20
+#define PORT_STE_TYPE 0x1
+
+struct lpfc_fip_param_hdr {
+ uint8_t type;
+#define FCOE_PARAM_TYPE 0xA0
+ uint8_t length;
+#define FCOE_PARAM_LENGTH 2
+ uint8_t parm_version;
+#define FIPP_VERSION 0x01
+ uint8_t parm_flags;
+#define lpfc_fip_param_hdr_fipp_mode_SHIFT 6
+#define lpfc_fip_param_hdr_fipp_mode_MASK 0x3
+#define lpfc_fip_param_hdr_fipp_mode_WORD parm_flags
+#define FIPP_MODE_ON 0x1
+#define FIPP_MODE_OFF 0x0
+#define FIPP_VLAN_VALID 0x1
+};
+
+struct lpfc_fcoe_params {
+ uint8_t fc_map[3];
+ uint8_t reserved1;
+ uint16_t vlan_tag;
+ uint8_t reserved[2];
+};
+
+struct lpfc_fcf_conn_hdr {
+ uint8_t type;
+#define FCOE_CONN_TBL_TYPE 0xA1
+ uint8_t length; /* words */
+ uint8_t reserved[2];
+};
+
+struct lpfc_fcf_conn_rec {
+ uint16_t flags;
+#define FCFCNCT_VALID 0x0001
+#define FCFCNCT_BOOT 0x0002
+#define FCFCNCT_PRIMARY 0x0004 /* if not set, Secondary */
+#define FCFCNCT_FBNM_VALID 0x0008
+#define FCFCNCT_SWNM_VALID 0x0010
+#define FCFCNCT_VLAN_VALID 0x0020
+#define FCFCNCT_AM_VALID 0x0040
+#define FCFCNCT_AM_PREFERRED 0x0080 /* if not set, AM Required */
+#define FCFCNCT_AM_SPMA 0x0100 /* if not set, FPMA */
+
+ uint16_t vlan_tag;
+ uint8_t fabric_name[8];
+ uint8_t switch_name[8];
+};
+
+struct lpfc_fcf_conn_entry {
+ struct list_head list;
+ struct lpfc_fcf_conn_rec conn_rec;
+};
+
+/*
+ * Define the host's bootstrap mailbox. This structure contains
+ * the member attributes needed to create, use, and destroy the
+ * bootstrap mailbox region.
+ *
+ * The macro definitions for the bmbx data structure are defined
+ * in lpfc_hw4.h with the register definition.
+ */
+struct lpfc_bmbx {
+ struct lpfc_dmabuf *dmabuf;
+ struct dma_address dma_address;
+ void *avirt;
+ dma_addr_t aphys;
+ uint32_t bmbx_size;
+};
+
+#define LPFC_EQE_SIZE LPFC_EQE_SIZE_4
+
+#define LPFC_EQE_SIZE_4B 4
+#define LPFC_EQE_SIZE_16B 16
+#define LPFC_CQE_SIZE 16
+#define LPFC_WQE_SIZE 64
+#define LPFC_WQE128_SIZE 128
+#define LPFC_MQE_SIZE 256
+#define LPFC_RQE_SIZE 8
+
+#define LPFC_EQE_DEF_COUNT 1024
+#define LPFC_CQE_DEF_COUNT 1024
+#define LPFC_WQE_DEF_COUNT 256
+#define LPFC_WQE128_DEF_COUNT 128
+#define LPFC_MQE_DEF_COUNT 16
+#define LPFC_RQE_DEF_COUNT 512
+
+#define LPFC_QUEUE_NOARM false
+#define LPFC_QUEUE_REARM true
+
+
+/*
+ * SLI4 CT field defines
+ */
+#define SLI4_CT_RPI 0
+#define SLI4_CT_VPI 1
+#define SLI4_CT_VFI 2
+#define SLI4_CT_FCFI 3
+
+/*
+ * SLI4 specific data structures
+ */
+struct lpfc_max_cfg_param {
+ uint16_t max_xri;
+ uint16_t xri_base;
+ uint16_t xri_used;
+ uint16_t max_rpi;
+ uint16_t rpi_base;
+ uint16_t rpi_used;
+ uint16_t max_vpi;
+ uint16_t vpi_base;
+ uint16_t vpi_used;
+ uint16_t max_vfi;
+ uint16_t vfi_base;
+ uint16_t vfi_used;
+ uint16_t max_fcfi;
+ uint16_t fcfi_used;
+ uint16_t max_eq;
+ uint16_t max_rq;
+ uint16_t max_cq;
+ uint16_t max_wq;
+};
+
+struct lpfc_hba;
+/* SLI4 HBA multi-fcp queue handler struct */
+struct lpfc_fcp_eq_hdl {
+ uint32_t idx;
+ struct lpfc_hba *phba;
+ atomic_t fcp_eq_in_use;
+};
+
+/* Port Capabilities for SLI4 Parameters */
+struct lpfc_pc_sli4_params {
+ uint32_t supported;
+ uint32_t if_type;
+ uint32_t sli_rev;
+ uint32_t sli_family;
+ uint32_t featurelevel_1;
+ uint32_t featurelevel_2;
+ uint32_t proto_types;
+#define LPFC_SLI4_PROTO_FCOE 0x0000001
+#define LPFC_SLI4_PROTO_FC 0x0000002
+#define LPFC_SLI4_PROTO_NIC 0x0000004
+#define LPFC_SLI4_PROTO_ISCSI 0x0000008
+#define LPFC_SLI4_PROTO_RDMA 0x0000010
+ uint32_t sge_supp_len;
+ uint32_t if_page_sz;
+ uint32_t rq_db_window;
+ uint32_t loopbk_scope;
+ uint32_t oas_supported;
+ uint32_t eq_pages_max;
+ uint32_t eqe_size;
+ uint32_t cq_pages_max;
+ uint32_t cqe_size;
+ uint32_t mq_pages_max;
+ uint32_t mqe_size;
+ uint32_t mq_elem_cnt;
+ uint32_t wq_pages_max;
+ uint32_t wqe_size;
+ uint32_t rq_pages_max;
+ uint32_t rqe_size;
+ uint32_t hdr_pages_max;
+ uint32_t hdr_size;
+ uint32_t hdr_pp_align;
+ uint32_t sgl_pages_max;
+ uint32_t sgl_pp_align;
+ uint8_t cqv;
+ uint8_t mqv;
+ uint8_t wqv;
+ uint8_t rqv;
+ uint8_t wqsize;
+#define LPFC_WQ_SZ64_SUPPORT 1
+#define LPFC_WQ_SZ128_SUPPORT 2
+};
+
+struct lpfc_iov {
+ uint32_t pf_number;
+ uint32_t vf_number;
+};
+
+struct lpfc_sli4_lnk_info {
+ uint8_t lnk_dv;
+#define LPFC_LNK_DAT_INVAL 0
+#define LPFC_LNK_DAT_VAL 1
+ uint8_t lnk_tp;
+#define LPFC_LNK_GE 0x0 /* FCoE */
+#define LPFC_LNK_FC 0x1 /* FC */
+ uint8_t lnk_no;
+};
+
+#define LPFC_SLI4_HANDLER_CNT (LPFC_FCP_IO_CHAN_MAX+ \
+ LPFC_FOF_IO_CHAN_NUM)
+#define LPFC_SLI4_HANDLER_NAME_SZ 16
+
+/* Used for IRQ vector to CPU mapping */
+struct lpfc_vector_map_info {
+ uint16_t phys_id;
+ uint16_t core_id;
+ uint16_t irq;
+ uint16_t channel_id;
+ struct cpumask maskbits;
+};
+#define LPFC_VECTOR_MAP_EMPTY 0xffff
+
+/* SLI4 HBA data structure entries */
+struct lpfc_sli4_hba {
+ void __iomem *conf_regs_memmap_p; /* Kernel memory mapped address for
+ PCI BAR0, config space registers */
+ void __iomem *ctrl_regs_memmap_p; /* Kernel memory mapped address for
+ PCI BAR1, control registers */
+ void __iomem *drbl_regs_memmap_p; /* Kernel memory mapped address for
+ PCI BAR2, doorbell registers */
+ union {
+ struct {
+ /* IF Type 0, BAR 0 PCI cfg space reg mem map */
+ void __iomem *UERRLOregaddr;
+ void __iomem *UERRHIregaddr;
+ void __iomem *UEMASKLOregaddr;
+ void __iomem *UEMASKHIregaddr;
+ } if_type0;
+ struct {
+ /* IF Type 2, BAR 0 PCI cfg space reg mem map. */
+ void __iomem *STATUSregaddr;
+ void __iomem *CTRLregaddr;
+ void __iomem *ERR1regaddr;
+#define SLIPORT_ERR1_REG_ERR_CODE_1 0x1
+#define SLIPORT_ERR1_REG_ERR_CODE_2 0x2
+ void __iomem *ERR2regaddr;
+#define SLIPORT_ERR2_REG_FW_RESTART 0x0
+#define SLIPORT_ERR2_REG_FUNC_PROVISON 0x1
+#define SLIPORT_ERR2_REG_FORCED_DUMP 0x2
+#define SLIPORT_ERR2_REG_FAILURE_EQ 0x3
+#define SLIPORT_ERR2_REG_FAILURE_CQ 0x4
+#define SLIPORT_ERR2_REG_FAILURE_BUS 0x5
+#define SLIPORT_ERR2_REG_FAILURE_RQ 0x6
+ } if_type2;
+ } u;
+
+ /* IF type 0, BAR1 and if type 2, Bar 0 CSR register memory map */
+ void __iomem *PSMPHRregaddr;
+
+ /* Well-known SLI INTF register memory map. */
+ void __iomem *SLIINTFregaddr;
+
+ /* IF type 0, BAR 1 function CSR register memory map */
+ void __iomem *ISRregaddr; /* HST_ISR register */
+ void __iomem *IMRregaddr; /* HST_IMR register */
+ void __iomem *ISCRregaddr; /* HST_ISCR register */
+ /* IF type 0, BAR 0 and if type 2, BAR 0 doorbell register memory map */
+ void __iomem *RQDBregaddr; /* RQ_DOORBELL register */
+ void __iomem *WQDBregaddr; /* WQ_DOORBELL register */
+ void __iomem *EQCQDBregaddr; /* EQCQ_DOORBELL register */
+ void __iomem *MQDBregaddr; /* MQ_DOORBELL register */
+ void __iomem *BMBXregaddr; /* BootStrap MBX register */
+
+ uint32_t ue_mask_lo;
+ uint32_t ue_mask_hi;
+ struct lpfc_register sli_intf;
+ struct lpfc_pc_sli4_params pc_sli4_params;
+ struct msix_entry *msix_entries;
+ uint8_t handler_name[LPFC_SLI4_HANDLER_CNT][LPFC_SLI4_HANDLER_NAME_SZ];
+ struct lpfc_fcp_eq_hdl *fcp_eq_hdl; /* FCP per-WQ handle */
+
+ /* Pointers to the constructed SLI4 queues */
+ struct lpfc_queue **hba_eq;/* Event queues for HBA */
+ struct lpfc_queue **fcp_cq;/* Fast-path FCP compl queue */
+ struct lpfc_queue **fcp_wq;/* Fast-path FCP work queue */
+ uint16_t *fcp_cq_map;
+
+ struct lpfc_queue *mbx_cq; /* Slow-path mailbox complete queue */
+ struct lpfc_queue *els_cq; /* Slow-path ELS response complete queue */
+ struct lpfc_queue *mbx_wq; /* Slow-path MBOX work queue */
+ struct lpfc_queue *els_wq; /* Slow-path ELS work queue */
+ struct lpfc_queue *hdr_rq; /* Slow-path Header Receive queue */
+ struct lpfc_queue *dat_rq; /* Slow-path Data Receive queue */
+
+ uint32_t fw_func_mode; /* FW function protocol mode */
+ uint32_t ulp0_mode; /* ULP0 protocol mode */
+ uint32_t ulp1_mode; /* ULP1 protocol mode */
+
+ struct lpfc_queue *fof_eq; /* Flash Optimized Fabric Event queue */
+
+ /* Optimized Access Storage specific queues/structures */
+
+ struct lpfc_queue *oas_cq; /* OAS completion queue */
+ struct lpfc_queue *oas_wq; /* OAS Work queue */
+ struct lpfc_sli_ring *oas_ring;
+ uint64_t oas_next_lun;
+ uint8_t oas_next_tgt_wwpn[8];
+ uint8_t oas_next_vpt_wwpn[8];
+
+ /* Setup information for various queue parameters */
+ int eq_esize;
+ int eq_ecount;
+ int cq_esize;
+ int cq_ecount;
+ int wq_esize;
+ int wq_ecount;
+ int mq_esize;
+ int mq_ecount;
+ int rq_esize;
+ int rq_ecount;
+#define LPFC_SP_EQ_MAX_INTR_SEC 10000
+#define LPFC_FP_EQ_MAX_INTR_SEC 10000
+
+ uint32_t intr_enable;
+ struct lpfc_bmbx bmbx;
+ struct lpfc_max_cfg_param max_cfg_param;
+ uint16_t extents_in_use; /* must allocate resource extents. */
+ uint16_t rpi_hdrs_in_use; /* must post rpi hdrs if set. */
+ uint16_t next_xri; /* last_xri - max_cfg_param.xri_base = used */
+ uint16_t next_rpi;
+ uint16_t scsi_xri_max;
+ uint16_t scsi_xri_cnt;
+ uint16_t els_xri_cnt;
+ uint16_t scsi_xri_start;
+ struct list_head lpfc_free_sgl_list;
+ struct list_head lpfc_sgl_list;
+ struct list_head lpfc_abts_els_sgl_list;
+ struct list_head lpfc_abts_scsi_buf_list;
+ struct lpfc_sglq **lpfc_sglq_active_list;
+ struct list_head lpfc_rpi_hdr_list;
+ unsigned long *rpi_bmask;
+ uint16_t *rpi_ids;
+ uint16_t rpi_count;
+ struct list_head lpfc_rpi_blk_list;
+ unsigned long *xri_bmask;
+ uint16_t *xri_ids;
+ struct list_head lpfc_xri_blk_list;
+ unsigned long *vfi_bmask;
+ uint16_t *vfi_ids;
+ uint16_t vfi_count;
+ struct list_head lpfc_vfi_blk_list;
+ struct lpfc_sli4_flags sli4_flags;
+ struct list_head sp_queue_event;
+ struct list_head sp_cqe_event_pool;
+ struct list_head sp_asynce_work_queue;
+ struct list_head sp_fcp_xri_aborted_work_queue;
+ struct list_head sp_els_xri_aborted_work_queue;
+ struct list_head sp_unsol_work_queue;
+ struct lpfc_sli4_link link_state;
+ struct lpfc_sli4_lnk_info lnk_info;
+ uint32_t pport_name_sta;
+#define LPFC_SLI4_PPNAME_NON 0
+#define LPFC_SLI4_PPNAME_GET 1
+ struct lpfc_iov iov;
+ spinlock_t abts_scsi_buf_list_lock; /* list of aborted SCSI IOs */
+ spinlock_t abts_sgl_list_lock; /* list of aborted els IOs */
+
+ /* CPU to vector mapping information */
+ struct lpfc_vector_map_info *cpu_map;
+ uint16_t num_online_cpu;
+ uint16_t num_present_cpu;
+ uint16_t curr_disp_cpu;
+};
+
+enum lpfc_sge_type {
+ GEN_BUFF_TYPE,
+ SCSI_BUFF_TYPE
+};
+
+enum lpfc_sgl_state {
+ SGL_FREED,
+ SGL_ALLOCATED,
+ SGL_XRI_ABORTED
+};
+
+struct lpfc_sglq {
+ /* lpfc_sglqs are used in double linked lists */
+ struct list_head list;
+ struct list_head clist;
+ enum lpfc_sge_type buff_type; /* is this a scsi sgl */
+ enum lpfc_sgl_state state;
+ struct lpfc_nodelist *ndlp; /* ndlp associated with IO */
+ uint16_t iotag; /* pre-assigned IO tag */
+ uint16_t sli4_lxritag; /* logical pre-assigned xri. */
+ uint16_t sli4_xritag; /* pre-assigned XRI, (OXID) tag. */
+ struct sli4_sge *sgl; /* pre-assigned SGL */
+ void *virt; /* virtual address. */
+ dma_addr_t phys; /* physical address */
+};
+
+struct lpfc_rpi_hdr {
+ struct list_head list;
+ uint32_t len;
+ struct lpfc_dmabuf *dmabuf;
+ uint32_t page_count;
+ uint32_t start_rpi;
+};
+
+struct lpfc_rsrc_blks {
+ struct list_head list;
+ uint16_t rsrc_start;
+ uint16_t rsrc_size;
+ uint16_t rsrc_used;
+};
+
+/*
+ * SLI4 specific function prototypes
+ */
+int lpfc_pci_function_reset(struct lpfc_hba *);
+int lpfc_sli4_pdev_status_reg_wait(struct lpfc_hba *);
+int lpfc_sli4_hba_setup(struct lpfc_hba *);
+int lpfc_sli4_config(struct lpfc_hba *, struct lpfcMboxq *, uint8_t,
+ uint8_t, uint32_t, bool);
+void lpfc_sli4_mbox_cmd_free(struct lpfc_hba *, struct lpfcMboxq *);
+void lpfc_sli4_mbx_sge_set(struct lpfcMboxq *, uint32_t, dma_addr_t, uint32_t);
+void lpfc_sli4_mbx_sge_get(struct lpfcMboxq *, uint32_t,
+ struct lpfc_mbx_sge *);
+int lpfc_sli4_mbx_read_fcf_rec(struct lpfc_hba *, struct lpfcMboxq *,
+ uint16_t);
+
+void lpfc_sli4_hba_reset(struct lpfc_hba *);
+struct lpfc_queue *lpfc_sli4_queue_alloc(struct lpfc_hba *, uint32_t,
+ uint32_t);
+void lpfc_sli4_queue_free(struct lpfc_queue *);
+int lpfc_eq_create(struct lpfc_hba *, struct lpfc_queue *, uint32_t);
+int lpfc_modify_fcp_eq_delay(struct lpfc_hba *, uint32_t);
+int lpfc_cq_create(struct lpfc_hba *, struct lpfc_queue *,
+ struct lpfc_queue *, uint32_t, uint32_t);
+int32_t lpfc_mq_create(struct lpfc_hba *, struct lpfc_queue *,
+ struct lpfc_queue *, uint32_t);
+int lpfc_wq_create(struct lpfc_hba *, struct lpfc_queue *,
+ struct lpfc_queue *, uint32_t);
+int lpfc_rq_create(struct lpfc_hba *, struct lpfc_queue *,
+ struct lpfc_queue *, struct lpfc_queue *, uint32_t);
+void lpfc_rq_adjust_repost(struct lpfc_hba *, struct lpfc_queue *, int);
+int lpfc_eq_destroy(struct lpfc_hba *, struct lpfc_queue *);
+int lpfc_cq_destroy(struct lpfc_hba *, struct lpfc_queue *);
+int lpfc_mq_destroy(struct lpfc_hba *, struct lpfc_queue *);
+int lpfc_wq_destroy(struct lpfc_hba *, struct lpfc_queue *);
+int lpfc_rq_destroy(struct lpfc_hba *, struct lpfc_queue *,
+ struct lpfc_queue *);
+int lpfc_sli4_queue_setup(struct lpfc_hba *);
+void lpfc_sli4_queue_unset(struct lpfc_hba *);
+int lpfc_sli4_post_sgl(struct lpfc_hba *, dma_addr_t, dma_addr_t, uint16_t);
+int lpfc_sli4_repost_scsi_sgl_list(struct lpfc_hba *);
+uint16_t lpfc_sli4_next_xritag(struct lpfc_hba *);
+void lpfc_sli4_free_xri(struct lpfc_hba *, int);
+int lpfc_sli4_post_async_mbox(struct lpfc_hba *);
+int lpfc_sli4_post_scsi_sgl_block(struct lpfc_hba *, struct list_head *, int);
+struct lpfc_cq_event *__lpfc_sli4_cq_event_alloc(struct lpfc_hba *);
+struct lpfc_cq_event *lpfc_sli4_cq_event_alloc(struct lpfc_hba *);
+void __lpfc_sli4_cq_event_release(struct lpfc_hba *, struct lpfc_cq_event *);
+void lpfc_sli4_cq_event_release(struct lpfc_hba *, struct lpfc_cq_event *);
+int lpfc_sli4_init_rpi_hdrs(struct lpfc_hba *);
+int lpfc_sli4_post_rpi_hdr(struct lpfc_hba *, struct lpfc_rpi_hdr *);
+int lpfc_sli4_post_all_rpi_hdrs(struct lpfc_hba *);
+struct lpfc_rpi_hdr *lpfc_sli4_create_rpi_hdr(struct lpfc_hba *);
+void lpfc_sli4_remove_rpi_hdrs(struct lpfc_hba *);
+int lpfc_sli4_alloc_rpi(struct lpfc_hba *);
+void lpfc_sli4_free_rpi(struct lpfc_hba *, int);
+void lpfc_sli4_remove_rpis(struct lpfc_hba *);
+void lpfc_sli4_async_event_proc(struct lpfc_hba *);
+void lpfc_sli4_fcf_redisc_event_proc(struct lpfc_hba *);
+int lpfc_sli4_resume_rpi(struct lpfc_nodelist *,
+ void (*)(struct lpfc_hba *, LPFC_MBOXQ_t *), void *);
+void lpfc_sli4_fcp_xri_abort_event_proc(struct lpfc_hba *);
+void lpfc_sli4_els_xri_abort_event_proc(struct lpfc_hba *);
+void lpfc_sli4_fcp_xri_aborted(struct lpfc_hba *,
+ struct sli4_wcqe_xri_aborted *);
+void lpfc_sli4_els_xri_aborted(struct lpfc_hba *,
+ struct sli4_wcqe_xri_aborted *);
+void lpfc_sli4_vport_delete_els_xri_aborted(struct lpfc_vport *);
+void lpfc_sli4_vport_delete_fcp_xri_aborted(struct lpfc_vport *);
+int lpfc_sli4_brdreset(struct lpfc_hba *);
+int lpfc_sli4_add_fcf_record(struct lpfc_hba *, struct fcf_record *);
+void lpfc_sli_remove_dflt_fcf(struct lpfc_hba *);
+int lpfc_sli4_get_els_iocb_cnt(struct lpfc_hba *);
+int lpfc_sli4_init_vpi(struct lpfc_vport *);
+uint32_t lpfc_sli4_cq_release(struct lpfc_queue *, bool);
+uint32_t lpfc_sli4_eq_release(struct lpfc_queue *, bool);
+void lpfc_sli4_fcfi_unreg(struct lpfc_hba *, uint16_t);
+int lpfc_sli4_fcf_scan_read_fcf_rec(struct lpfc_hba *, uint16_t);
+int lpfc_sli4_fcf_rr_read_fcf_rec(struct lpfc_hba *, uint16_t);
+int lpfc_sli4_read_fcf_rec(struct lpfc_hba *, uint16_t);
+void lpfc_mbx_cmpl_fcf_scan_read_fcf_rec(struct lpfc_hba *, LPFC_MBOXQ_t *);
+void lpfc_mbx_cmpl_fcf_rr_read_fcf_rec(struct lpfc_hba *, LPFC_MBOXQ_t *);
+void lpfc_mbx_cmpl_read_fcf_rec(struct lpfc_hba *, LPFC_MBOXQ_t *);
+int lpfc_sli4_unregister_fcf(struct lpfc_hba *);
+int lpfc_sli4_post_status_check(struct lpfc_hba *);
+uint8_t lpfc_sli_config_mbox_subsys_get(struct lpfc_hba *, LPFC_MBOXQ_t *);
+uint8_t lpfc_sli_config_mbox_opcode_get(struct lpfc_hba *, LPFC_MBOXQ_t *);
diff --git a/drivers/scsi/lpfc/lpfc_version.h b/drivers/scsi/lpfc/lpfc_version.h
new file mode 100644
index 000000000..c37bb9f91
--- /dev/null
+++ b/drivers/scsi/lpfc/lpfc_version.h
@@ -0,0 +1,33 @@
+/*******************************************************************
+ * This file is part of the Emulex Linux Device Driver for *
+ * Fibre Channel Host Bus Adapters. *
+ * Copyright (C) 2004-2015 Emulex. All rights reserved. *
+ * EMULEX and SLI are trademarks of Emulex. *
+ * www.emulex.com *
+ * *
+ * This program is free software; you can redistribute it and/or *
+ * modify it under the terms of version 2 of the GNU General *
+ * Public License as published by the Free Software Foundation. *
+ * This program is distributed in the hope that it will be useful. *
+ * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND *
+ * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY, *
+ * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE *
+ * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD *
+ * TO BE LEGALLY INVALID. See the GNU General Public License for *
+ * more details, a copy of which can be found in the file COPYING *
+ * included with this package. *
+ *******************************************************************/
+
+#define LPFC_DRIVER_VERSION "10.5.0.0."
+#define LPFC_DRIVER_NAME "lpfc"
+
+/* Used for SLI 2/3 */
+#define LPFC_SP_DRIVER_HANDLER_NAME "lpfc:sp"
+#define LPFC_FP_DRIVER_HANDLER_NAME "lpfc:fp"
+
+/* Used for SLI4 */
+#define LPFC_DRIVER_HANDLER_NAME "lpfc:"
+
+#define LPFC_MODULE_DESC "Emulex LightPulse Fibre Channel SCSI driver " \
+ LPFC_DRIVER_VERSION
+#define LPFC_COPYRIGHT "Copyright(c) 2004-2015 Emulex. All rights reserved."
diff --git a/drivers/scsi/lpfc/lpfc_vport.c b/drivers/scsi/lpfc/lpfc_vport.c
new file mode 100644
index 000000000..a87ee33f4
--- /dev/null
+++ b/drivers/scsi/lpfc/lpfc_vport.c
@@ -0,0 +1,903 @@
+/*******************************************************************
+ * This file is part of the Emulex Linux Device Driver for *
+ * Fibre Channel Host Bus Adapters. *
+ * Copyright (C) 2004-2013 Emulex. All rights reserved. *
+ * EMULEX and SLI are trademarks of Emulex. *
+ * www.emulex.com *
+ * Portions Copyright (C) 2004-2005 Christoph Hellwig *
+ * *
+ * This program is free software; you can redistribute it and/or *
+ * modify it under the terms of version 2 of the GNU General *
+ * Public License as published by the Free Software Foundation. *
+ * This program is distributed in the hope that it will be useful. *
+ * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND *
+ * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY, *
+ * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE *
+ * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD *
+ * TO BE LEGALLY INVALID. See the GNU General Public License for *
+ * more details, a copy of which can be found in the file COPYING *
+ * included with this package. *
+ *******************************************************************/
+
+#include <linux/blkdev.h>
+#include <linux/delay.h>
+#include <linux/dma-mapping.h>
+#include <linux/idr.h>
+#include <linux/interrupt.h>
+#include <linux/kthread.h>
+#include <linux/pci.h>
+#include <linux/slab.h>
+#include <linux/spinlock.h>
+
+#include <scsi/scsi.h>
+#include <scsi/scsi_device.h>
+#include <scsi/scsi_host.h>
+#include <scsi/scsi_transport_fc.h>
+#include "lpfc_hw4.h"
+#include "lpfc_hw.h"
+#include "lpfc_sli.h"
+#include "lpfc_sli4.h"
+#include "lpfc_nl.h"
+#include "lpfc_disc.h"
+#include "lpfc_scsi.h"
+#include "lpfc.h"
+#include "lpfc_logmsg.h"
+#include "lpfc_crtn.h"
+#include "lpfc_version.h"
+#include "lpfc_vport.h"
+
+inline void lpfc_vport_set_state(struct lpfc_vport *vport,
+ enum fc_vport_state new_state)
+{
+ struct fc_vport *fc_vport = vport->fc_vport;
+
+ if (fc_vport) {
+ /*
+ * When the transport defines fc_vport_set state we will replace
+ * this code with the following line
+ */
+ /* fc_vport_set_state(fc_vport, new_state); */
+ if (new_state != FC_VPORT_INITIALIZING)
+ fc_vport->vport_last_state = fc_vport->vport_state;
+ fc_vport->vport_state = new_state;
+ }
+
+ /* for all the error states we will set the invternal state to FAILED */
+ switch (new_state) {
+ case FC_VPORT_NO_FABRIC_SUPP:
+ case FC_VPORT_NO_FABRIC_RSCS:
+ case FC_VPORT_FABRIC_LOGOUT:
+ case FC_VPORT_FABRIC_REJ_WWN:
+ case FC_VPORT_FAILED:
+ vport->port_state = LPFC_VPORT_FAILED;
+ break;
+ case FC_VPORT_LINKDOWN:
+ vport->port_state = LPFC_VPORT_UNKNOWN;
+ break;
+ default:
+ /* do nothing */
+ break;
+ }
+}
+
+int
+lpfc_alloc_vpi(struct lpfc_hba *phba)
+{
+ unsigned long vpi;
+
+ spin_lock_irq(&phba->hbalock);
+ /* Start at bit 1 because vpi zero is reserved for the physical port */
+ vpi = find_next_zero_bit(phba->vpi_bmask, (phba->max_vpi + 1), 1);
+ if (vpi > phba->max_vpi)
+ vpi = 0;
+ else
+ set_bit(vpi, phba->vpi_bmask);
+ if (phba->sli_rev == LPFC_SLI_REV4)
+ phba->sli4_hba.max_cfg_param.vpi_used++;
+ spin_unlock_irq(&phba->hbalock);
+ return vpi;
+}
+
+static void
+lpfc_free_vpi(struct lpfc_hba *phba, int vpi)
+{
+ if (vpi == 0)
+ return;
+ spin_lock_irq(&phba->hbalock);
+ clear_bit(vpi, phba->vpi_bmask);
+ if (phba->sli_rev == LPFC_SLI_REV4)
+ phba->sli4_hba.max_cfg_param.vpi_used--;
+ spin_unlock_irq(&phba->hbalock);
+}
+
+static int
+lpfc_vport_sparm(struct lpfc_hba *phba, struct lpfc_vport *vport)
+{
+ LPFC_MBOXQ_t *pmb;
+ MAILBOX_t *mb;
+ struct lpfc_dmabuf *mp;
+ int rc;
+
+ pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
+ if (!pmb) {
+ return -ENOMEM;
+ }
+ mb = &pmb->u.mb;
+
+ rc = lpfc_read_sparam(phba, pmb, vport->vpi);
+ if (rc) {
+ mempool_free(pmb, phba->mbox_mem_pool);
+ return -ENOMEM;
+ }
+
+ /*
+ * Grab buffer pointer and clear context1 so we can use
+ * lpfc_sli_issue_box_wait
+ */
+ mp = (struct lpfc_dmabuf *) pmb->context1;
+ pmb->context1 = NULL;
+
+ pmb->vport = vport;
+ rc = lpfc_sli_issue_mbox_wait(phba, pmb, phba->fc_ratov * 2);
+ if (rc != MBX_SUCCESS) {
+ if (signal_pending(current)) {
+ lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT | LOG_VPORT,
+ "1830 Signal aborted mbxCmd x%x\n",
+ mb->mbxCommand);
+ lpfc_mbuf_free(phba, mp->virt, mp->phys);
+ kfree(mp);
+ if (rc != MBX_TIMEOUT)
+ mempool_free(pmb, phba->mbox_mem_pool);
+ return -EINTR;
+ } else {
+ lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT | LOG_VPORT,
+ "1818 VPort failed init, mbxCmd x%x "
+ "READ_SPARM mbxStatus x%x, rc = x%x\n",
+ mb->mbxCommand, mb->mbxStatus, rc);
+ lpfc_mbuf_free(phba, mp->virt, mp->phys);
+ kfree(mp);
+ if (rc != MBX_TIMEOUT)
+ mempool_free(pmb, phba->mbox_mem_pool);
+ return -EIO;
+ }
+ }
+
+ memcpy(&vport->fc_sparam, mp->virt, sizeof (struct serv_parm));
+ memcpy(&vport->fc_nodename, &vport->fc_sparam.nodeName,
+ sizeof (struct lpfc_name));
+ memcpy(&vport->fc_portname, &vport->fc_sparam.portName,
+ sizeof (struct lpfc_name));
+
+ lpfc_mbuf_free(phba, mp->virt, mp->phys);
+ kfree(mp);
+ mempool_free(pmb, phba->mbox_mem_pool);
+
+ return 0;
+}
+
+static int
+lpfc_valid_wwn_format(struct lpfc_hba *phba, struct lpfc_name *wwn,
+ const char *name_type)
+{
+ /* ensure that IEEE format 1 addresses
+ * contain zeros in bits 59-48
+ */
+ if (!((wwn->u.wwn[0] >> 4) == 1 &&
+ ((wwn->u.wwn[0] & 0xf) != 0 || (wwn->u.wwn[1] & 0xf) != 0)))
+ return 1;
+
+ lpfc_printf_log(phba, KERN_ERR, LOG_VPORT,
+ "1822 Invalid %s: %02x:%02x:%02x:%02x:"
+ "%02x:%02x:%02x:%02x\n",
+ name_type,
+ wwn->u.wwn[0], wwn->u.wwn[1],
+ wwn->u.wwn[2], wwn->u.wwn[3],
+ wwn->u.wwn[4], wwn->u.wwn[5],
+ wwn->u.wwn[6], wwn->u.wwn[7]);
+ return 0;
+}
+
+static int
+lpfc_unique_wwpn(struct lpfc_hba *phba, struct lpfc_vport *new_vport)
+{
+ struct lpfc_vport *vport;
+ unsigned long flags;
+
+ spin_lock_irqsave(&phba->hbalock, flags);
+ list_for_each_entry(vport, &phba->port_list, listentry) {
+ if (vport == new_vport)
+ continue;
+ /* If they match, return not unique */
+ if (memcmp(&vport->fc_sparam.portName,
+ &new_vport->fc_sparam.portName,
+ sizeof(struct lpfc_name)) == 0) {
+ spin_unlock_irqrestore(&phba->hbalock, flags);
+ return 0;
+ }
+ }
+ spin_unlock_irqrestore(&phba->hbalock, flags);
+ return 1;
+}
+
+/**
+ * lpfc_discovery_wait - Wait for driver discovery to quiesce
+ * @vport: The virtual port for which this call is being executed.
+ *
+ * This driver calls this routine specifically from lpfc_vport_delete
+ * to enforce a synchronous execution of vport
+ * delete relative to discovery activities. The
+ * lpfc_vport_delete routine should not return until it
+ * can reasonably guarantee that discovery has quiesced.
+ * Post FDISC LOGO, the driver must wait until its SAN teardown is
+ * complete and all resources recovered before allowing
+ * cleanup.
+ *
+ * This routine does not require any locks held.
+ **/
+static void lpfc_discovery_wait(struct lpfc_vport *vport)
+{
+ struct lpfc_hba *phba = vport->phba;
+ uint32_t wait_flags = 0;
+ unsigned long wait_time_max;
+ unsigned long start_time;
+
+ wait_flags = FC_RSCN_MODE | FC_RSCN_DISCOVERY | FC_NLP_MORE |
+ FC_RSCN_DEFERRED | FC_NDISC_ACTIVE | FC_DISC_TMO;
+
+ /*
+ * The time constraint on this loop is a balance between the
+ * fabric RA_TOV value and dev_loss tmo. The driver's
+ * devloss_tmo is 10 giving this loop a 3x multiplier minimally.
+ */
+ wait_time_max = msecs_to_jiffies(((phba->fc_ratov * 3) + 3) * 1000);
+ wait_time_max += jiffies;
+ start_time = jiffies;
+ while (time_before(jiffies, wait_time_max)) {
+ if ((vport->num_disc_nodes > 0) ||
+ (vport->fc_flag & wait_flags) ||
+ ((vport->port_state > LPFC_VPORT_FAILED) &&
+ (vport->port_state < LPFC_VPORT_READY))) {
+ lpfc_printf_vlog(vport, KERN_INFO, LOG_VPORT,
+ "1833 Vport discovery quiesce Wait:"
+ " state x%x fc_flags x%x"
+ " num_nodes x%x, waiting 1000 msecs"
+ " total wait msecs x%x\n",
+ vport->port_state, vport->fc_flag,
+ vport->num_disc_nodes,
+ jiffies_to_msecs(jiffies - start_time));
+ msleep(1000);
+ } else {
+ /* Base case. Wait variants satisfied. Break out */
+ lpfc_printf_vlog(vport, KERN_INFO, LOG_VPORT,
+ "1834 Vport discovery quiesced:"
+ " state x%x fc_flags x%x"
+ " wait msecs x%x\n",
+ vport->port_state, vport->fc_flag,
+ jiffies_to_msecs(jiffies
+ - start_time));
+ break;
+ }
+ }
+
+ if (time_after(jiffies, wait_time_max))
+ lpfc_printf_vlog(vport, KERN_ERR, LOG_VPORT,
+ "1835 Vport discovery quiesce failed:"
+ " state x%x fc_flags x%x wait msecs x%x\n",
+ vport->port_state, vport->fc_flag,
+ jiffies_to_msecs(jiffies - start_time));
+}
+
+int
+lpfc_vport_create(struct fc_vport *fc_vport, bool disable)
+{
+ struct lpfc_nodelist *ndlp;
+ struct Scsi_Host *shost = fc_vport->shost;
+ struct lpfc_vport *pport = (struct lpfc_vport *) shost->hostdata;
+ struct lpfc_hba *phba = pport->phba;
+ struct lpfc_vport *vport = NULL;
+ int instance;
+ int vpi;
+ int rc = VPORT_ERROR;
+ int status;
+
+ if ((phba->sli_rev < 3) || !(phba->cfg_enable_npiv)) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_VPORT,
+ "1808 Create VPORT failed: "
+ "NPIV is not enabled: SLImode:%d\n",
+ phba->sli_rev);
+ rc = VPORT_INVAL;
+ goto error_out;
+ }
+
+ vpi = lpfc_alloc_vpi(phba);
+ if (vpi == 0) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_VPORT,
+ "1809 Create VPORT failed: "
+ "Max VPORTs (%d) exceeded\n",
+ phba->max_vpi);
+ rc = VPORT_NORESOURCES;
+ goto error_out;
+ }
+
+ /* Assign an unused board number */
+ if ((instance = lpfc_get_instance()) < 0) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_VPORT,
+ "1810 Create VPORT failed: Cannot get "
+ "instance number\n");
+ lpfc_free_vpi(phba, vpi);
+ rc = VPORT_NORESOURCES;
+ goto error_out;
+ }
+
+ vport = lpfc_create_port(phba, instance, &fc_vport->dev);
+ if (!vport) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_VPORT,
+ "1811 Create VPORT failed: vpi x%x\n", vpi);
+ lpfc_free_vpi(phba, vpi);
+ rc = VPORT_NORESOURCES;
+ goto error_out;
+ }
+
+ vport->vpi = vpi;
+ lpfc_debugfs_initialize(vport);
+
+ if ((status = lpfc_vport_sparm(phba, vport))) {
+ if (status == -EINTR) {
+ lpfc_printf_vlog(vport, KERN_ERR, LOG_VPORT,
+ "1831 Create VPORT Interrupted.\n");
+ rc = VPORT_ERROR;
+ } else {
+ lpfc_printf_vlog(vport, KERN_ERR, LOG_VPORT,
+ "1813 Create VPORT failed. "
+ "Cannot get sparam\n");
+ rc = VPORT_NORESOURCES;
+ }
+ lpfc_free_vpi(phba, vpi);
+ destroy_port(vport);
+ goto error_out;
+ }
+
+ u64_to_wwn(fc_vport->node_name, vport->fc_nodename.u.wwn);
+ u64_to_wwn(fc_vport->port_name, vport->fc_portname.u.wwn);
+
+ memcpy(&vport->fc_sparam.portName, vport->fc_portname.u.wwn, 8);
+ memcpy(&vport->fc_sparam.nodeName, vport->fc_nodename.u.wwn, 8);
+
+ if (!lpfc_valid_wwn_format(phba, &vport->fc_sparam.nodeName, "WWNN") ||
+ !lpfc_valid_wwn_format(phba, &vport->fc_sparam.portName, "WWPN")) {
+ lpfc_printf_vlog(vport, KERN_ERR, LOG_VPORT,
+ "1821 Create VPORT failed. "
+ "Invalid WWN format\n");
+ lpfc_free_vpi(phba, vpi);
+ destroy_port(vport);
+ rc = VPORT_INVAL;
+ goto error_out;
+ }
+
+ if (!lpfc_unique_wwpn(phba, vport)) {
+ lpfc_printf_vlog(vport, KERN_ERR, LOG_VPORT,
+ "1823 Create VPORT failed. "
+ "Duplicate WWN on HBA\n");
+ lpfc_free_vpi(phba, vpi);
+ destroy_port(vport);
+ rc = VPORT_INVAL;
+ goto error_out;
+ }
+
+ /* Create binary sysfs attribute for vport */
+ lpfc_alloc_sysfs_attr(vport);
+
+ /* Set the DFT_LUN_Q_DEPTH accordingly */
+ vport->cfg_lun_queue_depth = phba->pport->cfg_lun_queue_depth;
+
+ *(struct lpfc_vport **)fc_vport->dd_data = vport;
+ vport->fc_vport = fc_vport;
+
+ /*
+ * In SLI4, the vpi must be activated before it can be used
+ * by the port.
+ */
+ if ((phba->sli_rev == LPFC_SLI_REV4) &&
+ (pport->fc_flag & FC_VFI_REGISTERED)) {
+ rc = lpfc_sli4_init_vpi(vport);
+ if (rc) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_VPORT,
+ "1838 Failed to INIT_VPI on vpi %d "
+ "status %d\n", vpi, rc);
+ rc = VPORT_NORESOURCES;
+ lpfc_free_vpi(phba, vpi);
+ goto error_out;
+ }
+ } else if (phba->sli_rev == LPFC_SLI_REV4) {
+ /*
+ * Driver cannot INIT_VPI now. Set the flags to
+ * init_vpi when reg_vfi complete.
+ */
+ vport->fc_flag |= FC_VPORT_NEEDS_INIT_VPI;
+ lpfc_vport_set_state(vport, FC_VPORT_LINKDOWN);
+ rc = VPORT_OK;
+ goto out;
+ }
+
+ if ((phba->link_state < LPFC_LINK_UP) ||
+ (pport->port_state < LPFC_FABRIC_CFG_LINK) ||
+ (phba->fc_topology == LPFC_TOPOLOGY_LOOP)) {
+ lpfc_vport_set_state(vport, FC_VPORT_LINKDOWN);
+ rc = VPORT_OK;
+ goto out;
+ }
+
+ if (disable) {
+ lpfc_vport_set_state(vport, FC_VPORT_DISABLED);
+ rc = VPORT_OK;
+ goto out;
+ }
+
+ /* Use the Physical nodes Fabric NDLP to determine if the link is
+ * up and ready to FDISC.
+ */
+ ndlp = lpfc_findnode_did(phba->pport, Fabric_DID);
+ if (ndlp && NLP_CHK_NODE_ACT(ndlp) &&
+ ndlp->nlp_state == NLP_STE_UNMAPPED_NODE) {
+ if (phba->link_flag & LS_NPIV_FAB_SUPPORTED) {
+ lpfc_set_disctmo(vport);
+ lpfc_initial_fdisc(vport);
+ } else {
+ lpfc_vport_set_state(vport, FC_VPORT_NO_FABRIC_SUPP);
+ lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
+ "0262 No NPIV Fabric support\n");
+ }
+ } else {
+ lpfc_vport_set_state(vport, FC_VPORT_FAILED);
+ }
+ rc = VPORT_OK;
+
+out:
+ lpfc_printf_vlog(vport, KERN_ERR, LOG_VPORT,
+ "1825 Vport Created.\n");
+ lpfc_host_attrib_init(lpfc_shost_from_vport(vport));
+error_out:
+ return rc;
+}
+
+static int
+disable_vport(struct fc_vport *fc_vport)
+{
+ struct lpfc_vport *vport = *(struct lpfc_vport **)fc_vport->dd_data;
+ struct lpfc_hba *phba = vport->phba;
+ struct lpfc_nodelist *ndlp = NULL, *next_ndlp = NULL;
+ long timeout;
+ struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
+
+ ndlp = lpfc_findnode_did(vport, Fabric_DID);
+ if (ndlp && NLP_CHK_NODE_ACT(ndlp)
+ && phba->link_state >= LPFC_LINK_UP) {
+ vport->unreg_vpi_cmpl = VPORT_INVAL;
+ timeout = msecs_to_jiffies(phba->fc_ratov * 2000);
+ if (!lpfc_issue_els_npiv_logo(vport, ndlp))
+ while (vport->unreg_vpi_cmpl == VPORT_INVAL && timeout)
+ timeout = schedule_timeout(timeout);
+ }
+
+ lpfc_sli_host_down(vport);
+
+ /* Mark all nodes for discovery so we can remove them by
+ * calling lpfc_cleanup_rpis(vport, 1)
+ */
+ list_for_each_entry_safe(ndlp, next_ndlp, &vport->fc_nodes, nlp_listp) {
+ if (!NLP_CHK_NODE_ACT(ndlp))
+ continue;
+ if (ndlp->nlp_state == NLP_STE_UNUSED_NODE)
+ continue;
+ lpfc_disc_state_machine(vport, ndlp, NULL,
+ NLP_EVT_DEVICE_RECOVERY);
+ }
+ lpfc_cleanup_rpis(vport, 1);
+
+ lpfc_stop_vport_timers(vport);
+ lpfc_unreg_all_rpis(vport);
+ lpfc_unreg_default_rpis(vport);
+ /*
+ * Completion of unreg_vpi (lpfc_mbx_cmpl_unreg_vpi) does the
+ * scsi_host_put() to release the vport.
+ */
+ lpfc_mbx_unreg_vpi(vport);
+ spin_lock_irq(shost->host_lock);
+ vport->fc_flag |= FC_VPORT_NEEDS_INIT_VPI;
+ spin_unlock_irq(shost->host_lock);
+
+ lpfc_vport_set_state(vport, FC_VPORT_DISABLED);
+ lpfc_printf_vlog(vport, KERN_ERR, LOG_VPORT,
+ "1826 Vport Disabled.\n");
+ return VPORT_OK;
+}
+
+static int
+enable_vport(struct fc_vport *fc_vport)
+{
+ struct lpfc_vport *vport = *(struct lpfc_vport **)fc_vport->dd_data;
+ struct lpfc_hba *phba = vport->phba;
+ struct lpfc_nodelist *ndlp = NULL;
+ struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
+
+ if ((phba->link_state < LPFC_LINK_UP) ||
+ (phba->fc_topology == LPFC_TOPOLOGY_LOOP)) {
+ lpfc_vport_set_state(vport, FC_VPORT_LINKDOWN);
+ return VPORT_OK;
+ }
+
+ spin_lock_irq(shost->host_lock);
+ vport->load_flag |= FC_LOADING;
+ vport->fc_flag |= FC_VPORT_NEEDS_REG_VPI;
+ spin_unlock_irq(shost->host_lock);
+
+ /* Use the Physical nodes Fabric NDLP to determine if the link is
+ * up and ready to FDISC.
+ */
+ ndlp = lpfc_findnode_did(phba->pport, Fabric_DID);
+ if (ndlp && NLP_CHK_NODE_ACT(ndlp)
+ && ndlp->nlp_state == NLP_STE_UNMAPPED_NODE) {
+ if (phba->link_flag & LS_NPIV_FAB_SUPPORTED) {
+ lpfc_set_disctmo(vport);
+ lpfc_initial_fdisc(vport);
+ } else {
+ lpfc_vport_set_state(vport, FC_VPORT_NO_FABRIC_SUPP);
+ lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
+ "0264 No NPIV Fabric support\n");
+ }
+ } else {
+ lpfc_vport_set_state(vport, FC_VPORT_FAILED);
+ }
+ lpfc_printf_vlog(vport, KERN_ERR, LOG_VPORT,
+ "1827 Vport Enabled.\n");
+ return VPORT_OK;
+}
+
+int
+lpfc_vport_disable(struct fc_vport *fc_vport, bool disable)
+{
+ if (disable)
+ return disable_vport(fc_vport);
+ else
+ return enable_vport(fc_vport);
+}
+
+
+int
+lpfc_vport_delete(struct fc_vport *fc_vport)
+{
+ struct lpfc_nodelist *ndlp = NULL;
+ struct Scsi_Host *shost = (struct Scsi_Host *) fc_vport->shost;
+ struct lpfc_vport *vport = *(struct lpfc_vport **)fc_vport->dd_data;
+ struct lpfc_hba *phba = vport->phba;
+ long timeout;
+ bool ns_ndlp_referenced = false;
+
+ if (vport->port_type == LPFC_PHYSICAL_PORT) {
+ lpfc_printf_vlog(vport, KERN_ERR, LOG_VPORT,
+ "1812 vport_delete failed: Cannot delete "
+ "physical host\n");
+ return VPORT_ERROR;
+ }
+
+ /* If the vport is a static vport fail the deletion. */
+ if ((vport->vport_flag & STATIC_VPORT) &&
+ !(phba->pport->load_flag & FC_UNLOADING)) {
+ lpfc_printf_vlog(vport, KERN_ERR, LOG_VPORT,
+ "1837 vport_delete failed: Cannot delete "
+ "static vport.\n");
+ return VPORT_ERROR;
+ }
+ spin_lock_irq(&phba->hbalock);
+ vport->load_flag |= FC_UNLOADING;
+ spin_unlock_irq(&phba->hbalock);
+ /*
+ * If we are not unloading the driver then prevent the vport_delete
+ * from happening until after this vport's discovery is finished.
+ */
+ if (!(phba->pport->load_flag & FC_UNLOADING)) {
+ int check_count = 0;
+ while (check_count < ((phba->fc_ratov * 3) + 3) &&
+ vport->port_state > LPFC_VPORT_FAILED &&
+ vport->port_state < LPFC_VPORT_READY) {
+ check_count++;
+ msleep(1000);
+ }
+ if (vport->port_state > LPFC_VPORT_FAILED &&
+ vport->port_state < LPFC_VPORT_READY)
+ return -EAGAIN;
+ }
+ /*
+ * This is a bit of a mess. We want to ensure the shost doesn't get
+ * torn down until we're done with the embedded lpfc_vport structure.
+ *
+ * Beyond holding a reference for this function, we also need a
+ * reference for outstanding I/O requests we schedule during delete
+ * processing. But once we scsi_remove_host() we can no longer obtain
+ * a reference through scsi_host_get().
+ *
+ * So we take two references here. We release one reference at the
+ * bottom of the function -- after delinking the vport. And we
+ * release the other at the completion of the unreg_vpi that get's
+ * initiated after we've disposed of all other resources associated
+ * with the port.
+ */
+ if (!scsi_host_get(shost))
+ return VPORT_INVAL;
+ if (!scsi_host_get(shost)) {
+ scsi_host_put(shost);
+ return VPORT_INVAL;
+ }
+ lpfc_free_sysfs_attr(vport);
+
+ lpfc_debugfs_terminate(vport);
+
+ /*
+ * The call to fc_remove_host might release the NameServer ndlp. Since
+ * we might need to use the ndlp to send the DA_ID CT command,
+ * increment the reference for the NameServer ndlp to prevent it from
+ * being released.
+ */
+ ndlp = lpfc_findnode_did(vport, NameServer_DID);
+ if (ndlp && NLP_CHK_NODE_ACT(ndlp)) {
+ lpfc_nlp_get(ndlp);
+ ns_ndlp_referenced = true;
+ }
+
+ /* Remove FC host and then SCSI host with the vport */
+ fc_remove_host(lpfc_shost_from_vport(vport));
+ scsi_remove_host(lpfc_shost_from_vport(vport));
+
+ ndlp = lpfc_findnode_did(phba->pport, Fabric_DID);
+
+ /* In case of driver unload, we shall not perform fabric logo as the
+ * worker thread already stopped at this stage and, in this case, we
+ * can safely skip the fabric logo.
+ */
+ if (phba->pport->load_flag & FC_UNLOADING) {
+ if (ndlp && NLP_CHK_NODE_ACT(ndlp) &&
+ ndlp->nlp_state == NLP_STE_UNMAPPED_NODE &&
+ phba->link_state >= LPFC_LINK_UP) {
+ /* First look for the Fabric ndlp */
+ ndlp = lpfc_findnode_did(vport, Fabric_DID);
+ if (!ndlp)
+ goto skip_logo;
+ else if (!NLP_CHK_NODE_ACT(ndlp)) {
+ ndlp = lpfc_enable_node(vport, ndlp,
+ NLP_STE_UNUSED_NODE);
+ if (!ndlp)
+ goto skip_logo;
+ }
+ /* Remove ndlp from vport npld list */
+ lpfc_dequeue_node(vport, ndlp);
+
+ /* Indicate free memory when release */
+ spin_lock_irq(&phba->ndlp_lock);
+ NLP_SET_FREE_REQ(ndlp);
+ spin_unlock_irq(&phba->ndlp_lock);
+ /* Kick off release ndlp when it can be safely done */
+ lpfc_nlp_put(ndlp);
+ }
+ goto skip_logo;
+ }
+
+ /* Otherwise, we will perform fabric logo as needed */
+ if (ndlp && NLP_CHK_NODE_ACT(ndlp) &&
+ ndlp->nlp_state == NLP_STE_UNMAPPED_NODE &&
+ phba->link_state >= LPFC_LINK_UP &&
+ phba->fc_topology != LPFC_TOPOLOGY_LOOP) {
+ if (vport->cfg_enable_da_id) {
+ timeout = msecs_to_jiffies(phba->fc_ratov * 2000);
+ if (!lpfc_ns_cmd(vport, SLI_CTNS_DA_ID, 0, 0))
+ while (vport->ct_flags && timeout)
+ timeout = schedule_timeout(timeout);
+ else
+ lpfc_printf_log(vport->phba, KERN_WARNING,
+ LOG_VPORT,
+ "1829 CT command failed to "
+ "delete objects on fabric\n");
+ }
+ /* First look for the Fabric ndlp */
+ ndlp = lpfc_findnode_did(vport, Fabric_DID);
+ if (!ndlp) {
+ /* Cannot find existing Fabric ndlp, allocate one */
+ ndlp = mempool_alloc(phba->nlp_mem_pool, GFP_KERNEL);
+ if (!ndlp)
+ goto skip_logo;
+ lpfc_nlp_init(vport, ndlp, Fabric_DID);
+ /* Indicate free memory when release */
+ NLP_SET_FREE_REQ(ndlp);
+ } else {
+ if (!NLP_CHK_NODE_ACT(ndlp)) {
+ ndlp = lpfc_enable_node(vport, ndlp,
+ NLP_STE_UNUSED_NODE);
+ if (!ndlp)
+ goto skip_logo;
+ }
+
+ /* Remove ndlp from vport list */
+ lpfc_dequeue_node(vport, ndlp);
+ spin_lock_irq(&phba->ndlp_lock);
+ if (!NLP_CHK_FREE_REQ(ndlp))
+ /* Indicate free memory when release */
+ NLP_SET_FREE_REQ(ndlp);
+ else {
+ /* Skip this if ndlp is already in free mode */
+ spin_unlock_irq(&phba->ndlp_lock);
+ goto skip_logo;
+ }
+ spin_unlock_irq(&phba->ndlp_lock);
+ }
+
+ /*
+ * If the vpi is not registered, then a valid FDISC doesn't
+ * exist and there is no need for a ELS LOGO. Just cleanup
+ * the ndlp.
+ */
+ if (!(vport->vpi_state & LPFC_VPI_REGISTERED)) {
+ lpfc_nlp_put(ndlp);
+ goto skip_logo;
+ }
+
+ vport->unreg_vpi_cmpl = VPORT_INVAL;
+ timeout = msecs_to_jiffies(phba->fc_ratov * 2000);
+ if (!lpfc_issue_els_npiv_logo(vport, ndlp))
+ while (vport->unreg_vpi_cmpl == VPORT_INVAL && timeout)
+ timeout = schedule_timeout(timeout);
+ }
+
+ if (!(phba->pport->load_flag & FC_UNLOADING))
+ lpfc_discovery_wait(vport);
+
+skip_logo:
+
+ /*
+ * If the NameServer ndlp has been incremented to allow the DA_ID CT
+ * command to be sent, decrement the ndlp now.
+ */
+ if (ns_ndlp_referenced) {
+ ndlp = lpfc_findnode_did(vport, NameServer_DID);
+ lpfc_nlp_put(ndlp);
+ }
+
+ lpfc_cleanup(vport);
+ lpfc_sli_host_down(vport);
+
+ lpfc_stop_vport_timers(vport);
+
+ if (!(phba->pport->load_flag & FC_UNLOADING)) {
+ lpfc_unreg_all_rpis(vport);
+ lpfc_unreg_default_rpis(vport);
+ /*
+ * Completion of unreg_vpi (lpfc_mbx_cmpl_unreg_vpi)
+ * does the scsi_host_put() to release the vport.
+ */
+ if (lpfc_mbx_unreg_vpi(vport))
+ scsi_host_put(shost);
+ } else
+ scsi_host_put(shost);
+
+ lpfc_free_vpi(phba, vport->vpi);
+ vport->work_port_events = 0;
+ spin_lock_irq(&phba->hbalock);
+ list_del_init(&vport->listentry);
+ spin_unlock_irq(&phba->hbalock);
+ lpfc_printf_vlog(vport, KERN_ERR, LOG_VPORT,
+ "1828 Vport Deleted.\n");
+ scsi_host_put(shost);
+ return VPORT_OK;
+}
+
+struct lpfc_vport **
+lpfc_create_vport_work_array(struct lpfc_hba *phba)
+{
+ struct lpfc_vport *port_iterator;
+ struct lpfc_vport **vports;
+ int index = 0;
+ vports = kzalloc((phba->max_vports + 1) * sizeof(struct lpfc_vport *),
+ GFP_KERNEL);
+ if (vports == NULL)
+ return NULL;
+ spin_lock_irq(&phba->hbalock);
+ list_for_each_entry(port_iterator, &phba->port_list, listentry) {
+ if (port_iterator->load_flag & FC_UNLOADING)
+ continue;
+ if (!scsi_host_get(lpfc_shost_from_vport(port_iterator))) {
+ lpfc_printf_vlog(port_iterator, KERN_ERR, LOG_VPORT,
+ "1801 Create vport work array FAILED: "
+ "cannot do scsi_host_get\n");
+ continue;
+ }
+ vports[index++] = port_iterator;
+ }
+ spin_unlock_irq(&phba->hbalock);
+ return vports;
+}
+
+void
+lpfc_destroy_vport_work_array(struct lpfc_hba *phba, struct lpfc_vport **vports)
+{
+ int i;
+ if (vports == NULL)
+ return;
+ for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++)
+ scsi_host_put(lpfc_shost_from_vport(vports[i]));
+ kfree(vports);
+}
+
+
+/**
+ * lpfc_vport_reset_stat_data - Reset the statistical data for the vport
+ * @vport: Pointer to vport object.
+ *
+ * This function resets the statistical data for the vport. This function
+ * is called with the host_lock held
+ **/
+void
+lpfc_vport_reset_stat_data(struct lpfc_vport *vport)
+{
+ struct lpfc_nodelist *ndlp = NULL, *next_ndlp = NULL;
+
+ list_for_each_entry_safe(ndlp, next_ndlp, &vport->fc_nodes, nlp_listp) {
+ if (!NLP_CHK_NODE_ACT(ndlp))
+ continue;
+ if (ndlp->lat_data)
+ memset(ndlp->lat_data, 0, LPFC_MAX_BUCKET_COUNT *
+ sizeof(struct lpfc_scsicmd_bkt));
+ }
+}
+
+
+/**
+ * lpfc_alloc_bucket - Allocate data buffer required for statistical data
+ * @vport: Pointer to vport object.
+ *
+ * This function allocates data buffer required for all the FC
+ * nodes of the vport to collect statistical data.
+ **/
+void
+lpfc_alloc_bucket(struct lpfc_vport *vport)
+{
+ struct lpfc_nodelist *ndlp = NULL, *next_ndlp = NULL;
+
+ list_for_each_entry_safe(ndlp, next_ndlp, &vport->fc_nodes, nlp_listp) {
+ if (!NLP_CHK_NODE_ACT(ndlp))
+ continue;
+
+ kfree(ndlp->lat_data);
+ ndlp->lat_data = NULL;
+
+ if (ndlp->nlp_state == NLP_STE_MAPPED_NODE) {
+ ndlp->lat_data = kcalloc(LPFC_MAX_BUCKET_COUNT,
+ sizeof(struct lpfc_scsicmd_bkt),
+ GFP_ATOMIC);
+
+ if (!ndlp->lat_data)
+ lpfc_printf_vlog(vport, KERN_ERR, LOG_NODE,
+ "0287 lpfc_alloc_bucket failed to "
+ "allocate statistical data buffer DID "
+ "0x%x\n", ndlp->nlp_DID);
+ }
+ }
+}
+
+/**
+ * lpfc_free_bucket - Free data buffer required for statistical data
+ * @vport: Pointer to vport object.
+ *
+ * Th function frees statistical data buffer of all the FC
+ * nodes of the vport.
+ **/
+void
+lpfc_free_bucket(struct lpfc_vport *vport)
+{
+ struct lpfc_nodelist *ndlp = NULL, *next_ndlp = NULL;
+
+ list_for_each_entry_safe(ndlp, next_ndlp, &vport->fc_nodes, nlp_listp) {
+ if (!NLP_CHK_NODE_ACT(ndlp))
+ continue;
+
+ kfree(ndlp->lat_data);
+ ndlp->lat_data = NULL;
+ }
+}
diff --git a/drivers/scsi/lpfc/lpfc_vport.h b/drivers/scsi/lpfc/lpfc_vport.h
new file mode 100644
index 000000000..6b2c94eb8
--- /dev/null
+++ b/drivers/scsi/lpfc/lpfc_vport.h
@@ -0,0 +1,120 @@
+/*******************************************************************
+ * This file is part of the Emulex Linux Device Driver for *
+ * Fibre Channel Host Bus Adapters. *
+ * Copyright (C) 2004-2006 Emulex. All rights reserved. *
+ * EMULEX and SLI are trademarks of Emulex. *
+ * www.emulex.com *
+ * Portions Copyright (C) 2004-2005 Christoph Hellwig *
+ * *
+ * This program is free software; you can redistribute it and/or *
+ * modify it under the terms of version 2 of the GNU General *
+ * Public License as published by the Free Software Foundation. *
+ * This program is distributed in the hope that it will be useful. *
+ * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND *
+ * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY, *
+ * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE *
+ * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD *
+ * TO BE LEGALLY INVALID. See the GNU General Public License for *
+ * more details, a copy of which can be found in the file COPYING *
+ * included with this package. *
+ *******************************************************************/
+
+#ifndef _H_LPFC_VPORT
+#define _H_LPFC_VPORT
+
+/* API version values (each will be an individual bit) */
+#define VPORT_API_VERSION_1 0x01
+
+/* Values returned via lpfc_vport_getinfo() */
+struct vport_info {
+
+ uint32_t api_versions;
+ uint8_t linktype;
+#define VPORT_TYPE_PHYSICAL 0
+#define VPORT_TYPE_VIRTUAL 1
+
+ uint8_t state;
+#define VPORT_STATE_OFFLINE 0
+#define VPORT_STATE_ACTIVE 1
+#define VPORT_STATE_FAILED 2
+
+ uint8_t fail_reason;
+ uint8_t prev_fail_reason;
+#define VPORT_FAIL_UNKNOWN 0
+#define VPORT_FAIL_LINKDOWN 1
+#define VPORT_FAIL_FAB_UNSUPPORTED 2
+#define VPORT_FAIL_FAB_NORESOURCES 3
+#define VPORT_FAIL_FAB_LOGOUT 4
+#define VPORT_FAIL_ADAP_NORESOURCES 5
+
+ uint8_t node_name[8]; /* WWNN */
+ uint8_t port_name[8]; /* WWPN */
+
+ struct Scsi_Host *shost;
+
+/* Following values are valid only on physical links */
+ uint32_t vports_max;
+ uint32_t vports_inuse;
+ uint32_t rpi_max;
+ uint32_t rpi_inuse;
+#define VPORT_CNT_INVALID 0xFFFFFFFF
+};
+
+/* data used in link creation */
+struct vport_data {
+ uint32_t api_version;
+
+ uint32_t options;
+#define VPORT_OPT_AUTORETRY 0x01
+
+ uint8_t node_name[8]; /* WWNN */
+ uint8_t port_name[8]; /* WWPN */
+
+/*
+ * Upon successful creation, vport_shost will point to the new Scsi_Host
+ * structure for the new virtual link.
+ */
+ struct Scsi_Host *vport_shost;
+};
+
+/* API function return codes */
+#define VPORT_OK 0
+#define VPORT_ERROR -1
+#define VPORT_INVAL -2
+#define VPORT_NOMEM -3
+#define VPORT_NORESOURCES -4
+
+int lpfc_vport_create(struct fc_vport *, bool);
+int lpfc_vport_delete(struct fc_vport *);
+int lpfc_vport_getinfo(struct Scsi_Host *, struct vport_info *);
+int lpfc_vport_tgt_remove(struct Scsi_Host *, uint, uint);
+struct lpfc_vport **lpfc_create_vport_work_array(struct lpfc_hba *);
+void lpfc_destroy_vport_work_array(struct lpfc_hba *, struct lpfc_vport **);
+int lpfc_alloc_vpi(struct lpfc_hba *phba);
+
+/*
+ * queuecommand VPORT-specific return codes. Specified in the host byte code.
+ * Returned when the virtual link has failed or is not active.
+ */
+#define DID_VPORT_ERROR 0x0f
+
+#define VPORT_INFO 0x1
+#define VPORT_CREATE 0x2
+#define VPORT_DELETE 0x4
+
+struct vport_cmd_tag {
+ uint32_t cmd;
+ struct vport_data cdata;
+ struct vport_info cinfo;
+ void *vport;
+ int vport_num;
+};
+
+void lpfc_vport_set_state(struct lpfc_vport *vport,
+ enum fc_vport_state new_state);
+
+void lpfc_vport_reset_stat_data(struct lpfc_vport *);
+void lpfc_alloc_bucket(struct lpfc_vport *);
+void lpfc_free_bucket(struct lpfc_vport *);
+
+#endif /* H_LPFC_VPORT */
diff --git a/drivers/scsi/mac53c94.c b/drivers/scsi/mac53c94.c
new file mode 100644
index 000000000..0adb2e015
--- /dev/null
+++ b/drivers/scsi/mac53c94.c
@@ -0,0 +1,572 @@
+/*
+ * SCSI low-level driver for the 53c94 SCSI bus adaptor found
+ * on Power Macintosh computers, controlling the external SCSI chain.
+ * We assume the 53c94 is connected to a DBDMA (descriptor-based DMA)
+ * controller.
+ *
+ * Paul Mackerras, August 1996.
+ * Copyright (C) 1996 Paul Mackerras.
+ */
+#include <linux/kernel.h>
+#include <linux/delay.h>
+#include <linux/types.h>
+#include <linux/string.h>
+#include <linux/slab.h>
+#include <linux/blkdev.h>
+#include <linux/proc_fs.h>
+#include <linux/stat.h>
+#include <linux/spinlock.h>
+#include <linux/interrupt.h>
+#include <linux/module.h>
+#include <asm/dbdma.h>
+#include <asm/io.h>
+#include <asm/pgtable.h>
+#include <asm/prom.h>
+#include <asm/pci-bridge.h>
+#include <asm/macio.h>
+
+#include <scsi/scsi.h>
+#include <scsi/scsi_cmnd.h>
+#include <scsi/scsi_device.h>
+#include <scsi/scsi_host.h>
+
+#include "mac53c94.h"
+
+enum fsc_phase {
+ idle,
+ selecting,
+ dataing,
+ completing,
+ busfreeing,
+};
+
+struct fsc_state {
+ struct mac53c94_regs __iomem *regs;
+ int intr;
+ struct dbdma_regs __iomem *dma;
+ int dmaintr;
+ int clk_freq;
+ struct Scsi_Host *host;
+ struct scsi_cmnd *request_q;
+ struct scsi_cmnd *request_qtail;
+ struct scsi_cmnd *current_req; /* req we're currently working on */
+ enum fsc_phase phase; /* what we're currently trying to do */
+ struct dbdma_cmd *dma_cmds; /* space for dbdma commands, aligned */
+ void *dma_cmd_space;
+ struct pci_dev *pdev;
+ dma_addr_t dma_addr;
+ struct macio_dev *mdev;
+};
+
+static void mac53c94_init(struct fsc_state *);
+static void mac53c94_start(struct fsc_state *);
+static void mac53c94_interrupt(int, void *);
+static irqreturn_t do_mac53c94_interrupt(int, void *);
+static void cmd_done(struct fsc_state *, int result);
+static void set_dma_cmds(struct fsc_state *, struct scsi_cmnd *);
+
+
+static int mac53c94_queue_lck(struct scsi_cmnd *cmd, void (*done)(struct scsi_cmnd *))
+{
+ struct fsc_state *state;
+
+#if 0
+ if (cmd->sc_data_direction == DMA_TO_DEVICE) {
+ int i;
+ printk(KERN_DEBUG "mac53c94_queue %p: command is", cmd);
+ for (i = 0; i < cmd->cmd_len; ++i)
+ printk(KERN_CONT " %.2x", cmd->cmnd[i]);
+ printk(KERN_CONT "\n");
+ printk(KERN_DEBUG "use_sg=%d request_bufflen=%d request_buffer=%p\n",
+ scsi_sg_count(cmd), scsi_bufflen(cmd), scsi_sglist(cmd));
+ }
+#endif
+
+ cmd->scsi_done = done;
+ cmd->host_scribble = NULL;
+
+ state = (struct fsc_state *) cmd->device->host->hostdata;
+
+ if (state->request_q == NULL)
+ state->request_q = cmd;
+ else
+ state->request_qtail->host_scribble = (void *) cmd;
+ state->request_qtail = cmd;
+
+ if (state->phase == idle)
+ mac53c94_start(state);
+
+ return 0;
+}
+
+static DEF_SCSI_QCMD(mac53c94_queue)
+
+static int mac53c94_host_reset(struct scsi_cmnd *cmd)
+{
+ struct fsc_state *state = (struct fsc_state *) cmd->device->host->hostdata;
+ struct mac53c94_regs __iomem *regs = state->regs;
+ struct dbdma_regs __iomem *dma = state->dma;
+ unsigned long flags;
+
+ spin_lock_irqsave(cmd->device->host->host_lock, flags);
+
+ writel((RUN|PAUSE|FLUSH|WAKE) << 16, &dma->control);
+ writeb(CMD_SCSI_RESET, &regs->command); /* assert RST */
+ udelay(100); /* leave it on for a while (>= 25us) */
+ writeb(CMD_RESET, &regs->command);
+ udelay(20);
+ mac53c94_init(state);
+ writeb(CMD_NOP, &regs->command);
+
+ spin_unlock_irqrestore(cmd->device->host->host_lock, flags);
+ return SUCCESS;
+}
+
+static void mac53c94_init(struct fsc_state *state)
+{
+ struct mac53c94_regs __iomem *regs = state->regs;
+ struct dbdma_regs __iomem *dma = state->dma;
+ int x;
+
+ writeb(state->host->this_id | CF1_PAR_ENABLE, &regs->config1);
+ writeb(TIMO_VAL(250), &regs->sel_timeout); /* 250ms */
+ writeb(CLKF_VAL(state->clk_freq), &regs->clk_factor);
+ writeb(CF2_FEATURE_EN, &regs->config2);
+ writeb(0, &regs->config3);
+ writeb(0, &regs->sync_period);
+ writeb(0, &regs->sync_offset);
+ x = readb(&regs->interrupt);
+ writel((RUN|PAUSE|FLUSH|WAKE) << 16, &dma->control);
+}
+
+/*
+ * Start the next command for a 53C94.
+ * Should be called with interrupts disabled.
+ */
+static void mac53c94_start(struct fsc_state *state)
+{
+ struct scsi_cmnd *cmd;
+ struct mac53c94_regs __iomem *regs = state->regs;
+ int i;
+
+ if (state->phase != idle || state->current_req != NULL)
+ panic("inappropriate mac53c94_start (state=%p)", state);
+ if (state->request_q == NULL)
+ return;
+ state->current_req = cmd = state->request_q;
+ state->request_q = (struct scsi_cmnd *) cmd->host_scribble;
+
+ /* Off we go */
+ writeb(0, &regs->count_lo);
+ writeb(0, &regs->count_mid);
+ writeb(0, &regs->count_hi);
+ writeb(CMD_NOP + CMD_DMA_MODE, &regs->command);
+ udelay(1);
+ writeb(CMD_FLUSH, &regs->command);
+ udelay(1);
+ writeb(cmd->device->id, &regs->dest_id);
+ writeb(0, &regs->sync_period);
+ writeb(0, &regs->sync_offset);
+
+ /* load the command into the FIFO */
+ for (i = 0; i < cmd->cmd_len; ++i)
+ writeb(cmd->cmnd[i], &regs->fifo);
+
+ /* do select without ATN XXX */
+ writeb(CMD_SELECT, &regs->command);
+ state->phase = selecting;
+
+ set_dma_cmds(state, cmd);
+}
+
+static irqreturn_t do_mac53c94_interrupt(int irq, void *dev_id)
+{
+ unsigned long flags;
+ struct Scsi_Host *dev = ((struct fsc_state *) dev_id)->current_req->device->host;
+
+ spin_lock_irqsave(dev->host_lock, flags);
+ mac53c94_interrupt(irq, dev_id);
+ spin_unlock_irqrestore(dev->host_lock, flags);
+ return IRQ_HANDLED;
+}
+
+static void mac53c94_interrupt(int irq, void *dev_id)
+{
+ struct fsc_state *state = (struct fsc_state *) dev_id;
+ struct mac53c94_regs __iomem *regs = state->regs;
+ struct dbdma_regs __iomem *dma = state->dma;
+ struct scsi_cmnd *cmd = state->current_req;
+ int nb, stat, seq, intr;
+ static int mac53c94_errors;
+
+ /*
+ * Apparently, reading the interrupt register unlatches
+ * the status and sequence step registers.
+ */
+ seq = readb(&regs->seqstep);
+ stat = readb(&regs->status);
+ intr = readb(&regs->interrupt);
+
+#if 0
+ printk(KERN_DEBUG "mac53c94_intr, intr=%x stat=%x seq=%x phase=%d\n",
+ intr, stat, seq, state->phase);
+#endif
+
+ if (intr & INTR_RESET) {
+ /* SCSI bus was reset */
+ printk(KERN_INFO "external SCSI bus reset detected\n");
+ writeb(CMD_NOP, &regs->command);
+ writel(RUN << 16, &dma->control); /* stop dma */
+ cmd_done(state, DID_RESET << 16);
+ return;
+ }
+ if (intr & INTR_ILL_CMD) {
+ printk(KERN_ERR "53c94: invalid cmd, intr=%x stat=%x seq=%x phase=%d\n",
+ intr, stat, seq, state->phase);
+ cmd_done(state, DID_ERROR << 16);
+ return;
+ }
+ if (stat & STAT_ERROR) {
+#if 0
+ /* XXX these seem to be harmless? */
+ printk("53c94: bad error, intr=%x stat=%x seq=%x phase=%d\n",
+ intr, stat, seq, state->phase);
+#endif
+ ++mac53c94_errors;
+ writeb(CMD_NOP + CMD_DMA_MODE, &regs->command);
+ }
+ if (cmd == 0) {
+ printk(KERN_DEBUG "53c94: interrupt with no command active?\n");
+ return;
+ }
+ if (stat & STAT_PARITY) {
+ printk(KERN_ERR "mac53c94: parity error\n");
+ cmd_done(state, DID_PARITY << 16);
+ return;
+ }
+ switch (state->phase) {
+ case selecting:
+ if (intr & INTR_DISCONNECT) {
+ /* selection timed out */
+ cmd_done(state, DID_BAD_TARGET << 16);
+ return;
+ }
+ if (intr != INTR_BUS_SERV + INTR_DONE) {
+ printk(KERN_DEBUG "got intr %x during selection\n", intr);
+ cmd_done(state, DID_ERROR << 16);
+ return;
+ }
+ if ((seq & SS_MASK) != SS_DONE) {
+ printk(KERN_DEBUG "seq step %x after command\n", seq);
+ cmd_done(state, DID_ERROR << 16);
+ return;
+ }
+ writeb(CMD_NOP, &regs->command);
+ /* set DMA controller going if any data to transfer */
+ if ((stat & (STAT_MSG|STAT_CD)) == 0
+ && (scsi_sg_count(cmd) > 0 || scsi_bufflen(cmd))) {
+ nb = cmd->SCp.this_residual;
+ if (nb > 0xfff0)
+ nb = 0xfff0;
+ cmd->SCp.this_residual -= nb;
+ writeb(nb, &regs->count_lo);
+ writeb(nb >> 8, &regs->count_mid);
+ writeb(CMD_DMA_MODE + CMD_NOP, &regs->command);
+ writel(virt_to_phys(state->dma_cmds), &dma->cmdptr);
+ writel((RUN << 16) | RUN, &dma->control);
+ writeb(CMD_DMA_MODE + CMD_XFER_DATA, &regs->command);
+ state->phase = dataing;
+ break;
+ } else if ((stat & STAT_PHASE) == STAT_CD + STAT_IO) {
+ /* up to status phase already */
+ writeb(CMD_I_COMPLETE, &regs->command);
+ state->phase = completing;
+ } else {
+ printk(KERN_DEBUG "in unexpected phase %x after cmd\n",
+ stat & STAT_PHASE);
+ cmd_done(state, DID_ERROR << 16);
+ return;
+ }
+ break;
+
+ case dataing:
+ if (intr != INTR_BUS_SERV) {
+ printk(KERN_DEBUG "got intr %x before status\n", intr);
+ cmd_done(state, DID_ERROR << 16);
+ return;
+ }
+ if (cmd->SCp.this_residual != 0
+ && (stat & (STAT_MSG|STAT_CD)) == 0) {
+ /* Set up the count regs to transfer more */
+ nb = cmd->SCp.this_residual;
+ if (nb > 0xfff0)
+ nb = 0xfff0;
+ cmd->SCp.this_residual -= nb;
+ writeb(nb, &regs->count_lo);
+ writeb(nb >> 8, &regs->count_mid);
+ writeb(CMD_DMA_MODE + CMD_NOP, &regs->command);
+ writeb(CMD_DMA_MODE + CMD_XFER_DATA, &regs->command);
+ break;
+ }
+ if ((stat & STAT_PHASE) != STAT_CD + STAT_IO) {
+ printk(KERN_DEBUG "intr %x before data xfer complete\n", intr);
+ }
+ writel(RUN << 16, &dma->control); /* stop dma */
+ scsi_dma_unmap(cmd);
+ /* should check dma status */
+ writeb(CMD_I_COMPLETE, &regs->command);
+ state->phase = completing;
+ break;
+ case completing:
+ if (intr != INTR_DONE) {
+ printk(KERN_DEBUG "got intr %x on completion\n", intr);
+ cmd_done(state, DID_ERROR << 16);
+ return;
+ }
+ cmd->SCp.Status = readb(&regs->fifo);
+ cmd->SCp.Message = readb(&regs->fifo);
+ cmd->result = CMD_ACCEPT_MSG;
+ writeb(CMD_ACCEPT_MSG, &regs->command);
+ state->phase = busfreeing;
+ break;
+ case busfreeing:
+ if (intr != INTR_DISCONNECT) {
+ printk(KERN_DEBUG "got intr %x when expected disconnect\n", intr);
+ }
+ cmd_done(state, (DID_OK << 16) + (cmd->SCp.Message << 8)
+ + cmd->SCp.Status);
+ break;
+ default:
+ printk(KERN_DEBUG "don't know about phase %d\n", state->phase);
+ }
+}
+
+static void cmd_done(struct fsc_state *state, int result)
+{
+ struct scsi_cmnd *cmd;
+
+ cmd = state->current_req;
+ if (cmd != 0) {
+ cmd->result = result;
+ (*cmd->scsi_done)(cmd);
+ state->current_req = NULL;
+ }
+ state->phase = idle;
+ mac53c94_start(state);
+}
+
+/*
+ * Set up DMA commands for transferring data.
+ */
+static void set_dma_cmds(struct fsc_state *state, struct scsi_cmnd *cmd)
+{
+ int i, dma_cmd, total, nseg;
+ struct scatterlist *scl;
+ struct dbdma_cmd *dcmds;
+ dma_addr_t dma_addr;
+ u32 dma_len;
+
+ nseg = scsi_dma_map(cmd);
+ BUG_ON(nseg < 0);
+ if (!nseg)
+ return;
+
+ dma_cmd = cmd->sc_data_direction == DMA_TO_DEVICE ?
+ OUTPUT_MORE : INPUT_MORE;
+ dcmds = state->dma_cmds;
+ total = 0;
+
+ scsi_for_each_sg(cmd, scl, nseg, i) {
+ dma_addr = sg_dma_address(scl);
+ dma_len = sg_dma_len(scl);
+ if (dma_len > 0xffff)
+ panic("mac53c94: scatterlist element >= 64k");
+ total += dma_len;
+ dcmds->req_count = cpu_to_le16(dma_len);
+ dcmds->command = cpu_to_le16(dma_cmd);
+ dcmds->phy_addr = cpu_to_le32(dma_addr);
+ dcmds->xfer_status = 0;
+ ++dcmds;
+ }
+
+ dma_cmd += OUTPUT_LAST - OUTPUT_MORE;
+ dcmds[-1].command = cpu_to_le16(dma_cmd);
+ dcmds->command = cpu_to_le16(DBDMA_STOP);
+ cmd->SCp.this_residual = total;
+}
+
+static struct scsi_host_template mac53c94_template = {
+ .proc_name = "53c94",
+ .name = "53C94",
+ .queuecommand = mac53c94_queue,
+ .eh_host_reset_handler = mac53c94_host_reset,
+ .can_queue = 1,
+ .this_id = 7,
+ .sg_tablesize = SG_ALL,
+ .cmd_per_lun = 1,
+ .use_clustering = DISABLE_CLUSTERING,
+};
+
+static int mac53c94_probe(struct macio_dev *mdev, const struct of_device_id *match)
+{
+ struct device_node *node = macio_get_of_node(mdev);
+ struct pci_dev *pdev = macio_get_pci_dev(mdev);
+ struct fsc_state *state;
+ struct Scsi_Host *host;
+ void *dma_cmd_space;
+ const unsigned char *clkprop;
+ int proplen, rc = -ENODEV;
+
+ if (macio_resource_count(mdev) != 2 || macio_irq_count(mdev) != 2) {
+ printk(KERN_ERR "mac53c94: expected 2 addrs and intrs"
+ " (got %d/%d)\n",
+ macio_resource_count(mdev), macio_irq_count(mdev));
+ return -ENODEV;
+ }
+
+ if (macio_request_resources(mdev, "mac53c94") != 0) {
+ printk(KERN_ERR "mac53c94: unable to request memory resources");
+ return -EBUSY;
+ }
+
+ host = scsi_host_alloc(&mac53c94_template, sizeof(struct fsc_state));
+ if (host == NULL) {
+ printk(KERN_ERR "mac53c94: couldn't register host");
+ rc = -ENOMEM;
+ goto out_release;
+ }
+
+ state = (struct fsc_state *) host->hostdata;
+ macio_set_drvdata(mdev, state);
+ state->host = host;
+ state->pdev = pdev;
+ state->mdev = mdev;
+
+ state->regs = (struct mac53c94_regs __iomem *)
+ ioremap(macio_resource_start(mdev, 0), 0x1000);
+ state->intr = macio_irq(mdev, 0);
+ state->dma = (struct dbdma_regs __iomem *)
+ ioremap(macio_resource_start(mdev, 1), 0x1000);
+ state->dmaintr = macio_irq(mdev, 1);
+ if (state->regs == NULL || state->dma == NULL) {
+ printk(KERN_ERR "mac53c94: ioremap failed for %s\n",
+ node->full_name);
+ goto out_free;
+ }
+
+ clkprop = of_get_property(node, "clock-frequency", &proplen);
+ if (clkprop == NULL || proplen != sizeof(int)) {
+ printk(KERN_ERR "%s: can't get clock frequency, "
+ "assuming 25MHz\n", node->full_name);
+ state->clk_freq = 25000000;
+ } else
+ state->clk_freq = *(int *)clkprop;
+
+ /* Space for dma command list: +1 for stop command,
+ * +1 to allow for aligning.
+ * XXX FIXME: Use DMA consistent routines
+ */
+ dma_cmd_space = kmalloc((host->sg_tablesize + 2) *
+ sizeof(struct dbdma_cmd), GFP_KERNEL);
+ if (dma_cmd_space == 0) {
+ printk(KERN_ERR "mac53c94: couldn't allocate dma "
+ "command space for %s\n", node->full_name);
+ rc = -ENOMEM;
+ goto out_free;
+ }
+ state->dma_cmds = (struct dbdma_cmd *)DBDMA_ALIGN(dma_cmd_space);
+ memset(state->dma_cmds, 0, (host->sg_tablesize + 1)
+ * sizeof(struct dbdma_cmd));
+ state->dma_cmd_space = dma_cmd_space;
+
+ mac53c94_init(state);
+
+ if (request_irq(state->intr, do_mac53c94_interrupt, 0, "53C94",state)) {
+ printk(KERN_ERR "mac53C94: can't get irq %d for %s\n",
+ state->intr, node->full_name);
+ goto out_free_dma;
+ }
+
+ rc = scsi_add_host(host, &mdev->ofdev.dev);
+ if (rc != 0)
+ goto out_release_irq;
+
+ scsi_scan_host(host);
+ return 0;
+
+ out_release_irq:
+ free_irq(state->intr, state);
+ out_free_dma:
+ kfree(state->dma_cmd_space);
+ out_free:
+ if (state->dma != NULL)
+ iounmap(state->dma);
+ if (state->regs != NULL)
+ iounmap(state->regs);
+ scsi_host_put(host);
+ out_release:
+ macio_release_resources(mdev);
+
+ return rc;
+}
+
+static int mac53c94_remove(struct macio_dev *mdev)
+{
+ struct fsc_state *fp = (struct fsc_state *)macio_get_drvdata(mdev);
+ struct Scsi_Host *host = fp->host;
+
+ scsi_remove_host(host);
+
+ free_irq(fp->intr, fp);
+
+ if (fp->regs)
+ iounmap(fp->regs);
+ if (fp->dma)
+ iounmap(fp->dma);
+ kfree(fp->dma_cmd_space);
+
+ scsi_host_put(host);
+
+ macio_release_resources(mdev);
+
+ return 0;
+}
+
+
+static struct of_device_id mac53c94_match[] =
+{
+ {
+ .name = "53c94",
+ },
+ {},
+};
+MODULE_DEVICE_TABLE (of, mac53c94_match);
+
+static struct macio_driver mac53c94_driver =
+{
+ .driver = {
+ .name = "mac53c94",
+ .owner = THIS_MODULE,
+ .of_match_table = mac53c94_match,
+ },
+ .probe = mac53c94_probe,
+ .remove = mac53c94_remove,
+};
+
+
+static int __init init_mac53c94(void)
+{
+ return macio_register_driver(&mac53c94_driver);
+}
+
+static void __exit exit_mac53c94(void)
+{
+ return macio_unregister_driver(&mac53c94_driver);
+}
+
+module_init(init_mac53c94);
+module_exit(exit_mac53c94);
+
+MODULE_DESCRIPTION("PowerMac 53c94 SCSI driver");
+MODULE_AUTHOR("Paul Mackerras <paulus@samba.org>");
+MODULE_LICENSE("GPL");
diff --git a/drivers/scsi/mac53c94.h b/drivers/scsi/mac53c94.h
new file mode 100644
index 000000000..1ad24e4f0
--- /dev/null
+++ b/drivers/scsi/mac53c94.h
@@ -0,0 +1,214 @@
+/*
+ * mac53c94.h: definitions for the driver for the 53c94 SCSI bus adaptor
+ * found on Power Macintosh computers, controlling the external SCSI chain.
+ *
+ * Copyright (C) 1996 Paul Mackerras.
+ */
+#ifndef _MAC53C94_H
+#define _MAC53C94_H
+
+/*
+ * Registers in the 53C94 controller.
+ */
+
+struct mac53c94_regs {
+ unsigned char count_lo;
+ char pad0[15];
+ unsigned char count_mid;
+ char pad1[15];
+ unsigned char fifo;
+ char pad2[15];
+ unsigned char command;
+ char pad3[15];
+ unsigned char status;
+ char pad4[15];
+ unsigned char interrupt;
+ char pad5[15];
+ unsigned char seqstep;
+ char pad6[15];
+ unsigned char flags;
+ char pad7[15];
+ unsigned char config1;
+ char pad8[15];
+ unsigned char clk_factor;
+ char pad9[15];
+ unsigned char test;
+ char pad10[15];
+ unsigned char config2;
+ char pad11[15];
+ unsigned char config3;
+ char pad12[15];
+ unsigned char config4;
+ char pad13[15];
+ unsigned char count_hi;
+ char pad14[15];
+ unsigned char fifo_res;
+ char pad15[15];
+};
+
+/*
+ * Alternate functions for some registers.
+ */
+#define dest_id status
+#define sel_timeout interrupt
+#define sync_period seqstep
+#define sync_offset flags
+
+/*
+ * Bits in command register.
+ */
+#define CMD_DMA_MODE 0x80
+#define CMD_MODE_MASK 0x70
+#define CMD_MODE_INIT 0x10
+#define CMD_MODE_TARG 0x20
+#define CMD_MODE_DISC 0x40
+
+#define CMD_NOP 0
+#define CMD_FLUSH 1
+#define CMD_RESET 2
+#define CMD_SCSI_RESET 3
+
+#define CMD_XFER_DATA 0x10
+#define CMD_I_COMPLETE 0x11
+#define CMD_ACCEPT_MSG 0x12
+#define CMD_XFER_PAD 0x18
+#define CMD_SET_ATN 0x1a
+#define CMD_CLR_ATN 0x1b
+
+#define CMD_SEND_MSG 0x20
+#define CMD_SEND_STATUS 0x21
+#define CMD_SEND_DATA 0x22
+#define CMD_DISC_SEQ 0x23
+#define CMD_TERMINATE 0x24
+#define CMD_T_COMPLETE 0x25
+#define CMD_DISCONNECT 0x27
+#define CMD_RECV_MSG 0x28
+#define CMD_RECV_CDB 0x29
+#define CMD_RECV_DATA 0x2a
+#define CMD_RECV_CMD 0x2b
+#define CMD_ABORT_DMA 0x04
+
+#define CMD_RESELECT 0x40
+#define CMD_SELECT 0x41
+#define CMD_SELECT_ATN 0x42
+#define CMD_SELATN_STOP 0x43
+#define CMD_ENABLE_SEL 0x44
+#define CMD_DISABLE_SEL 0x45
+#define CMD_SEL_ATN3 0x46
+#define CMD_RESEL_ATN3 0x47
+
+/*
+ * Bits in status register.
+ */
+#define STAT_IRQ 0x80
+#define STAT_ERROR 0x40
+#define STAT_PARITY 0x20
+#define STAT_TC_ZERO 0x10
+#define STAT_DONE 0x08
+#define STAT_PHASE 0x07
+#define STAT_MSG 0x04
+#define STAT_CD 0x02
+#define STAT_IO 0x01
+
+/*
+ * Bits in interrupt register.
+ */
+#define INTR_RESET 0x80 /* SCSI bus was reset */
+#define INTR_ILL_CMD 0x40 /* illegal command */
+#define INTR_DISCONNECT 0x20 /* we got disconnected */
+#define INTR_BUS_SERV 0x10 /* bus service requested */
+#define INTR_DONE 0x08 /* function completed */
+#define INTR_RESELECTED 0x04 /* we were reselected */
+#define INTR_SEL_ATN 0x02 /* we were selected, ATN asserted */
+#define INTR_SELECT 0x01 /* we were selected, ATN negated */
+
+/*
+ * Encoding for the select timeout.
+ */
+#define TIMO_VAL(x) ((x) * 5000 / 7682)
+
+/*
+ * Bits in sequence step register.
+ */
+#define SS_MASK 7
+#define SS_ARB_SEL 0 /* Selection & arbitration complete */
+#define SS_MSG_SENT 1 /* One message byte sent */
+#define SS_NOT_CMD 2 /* Not in command phase */
+#define SS_PHASE_CHG 3 /* Early phase change, cmd bytes lost */
+#define SS_DONE 4 /* Command was sent OK */
+
+/*
+ * Encoding for sync transfer period.
+ */
+#define SYNCP_MASK 0x1f
+#define SYNCP_MIN 4
+#define SYNCP_MAX 31
+
+/*
+ * Bits in flags register.
+ */
+#define FLAGS_FIFO_LEV 0x1f
+#define FLAGS_SEQ_STEP 0xe0
+
+/*
+ * Encoding for sync offset.
+ */
+#define SYNCO_MASK 0x0f
+#define SYNCO_ASS_CTRL 0x30 /* REQ/ACK assertion control */
+#define SYNCO_NEG_CTRL 0xc0 /* REQ/ACK negation control */
+
+/*
+ * Bits in config1 register.
+ */
+#define CF1_SLOW_CABLE 0x80 /* Slow cable mode */
+#define CF1_NO_RES_REP 0x40 /* Disable SCSI reset reports */
+#define CF1_PAR_TEST 0x20 /* Parity test mode enable */
+#define CF1_PAR_ENABLE 0x10 /* Enable parity checks */
+#define CF1_TEST 0x08 /* Chip tests */
+#define CF1_MY_ID 0x07 /* Controller's address on bus */
+
+/*
+ * Encoding for clk_factor register.
+ */
+#define CLKF_MASK 7
+#define CLKF_VAL(freq) ((((freq) + 4999999) / 5000000) & CLKF_MASK)
+
+/*
+ * Bits in test mode register.
+ */
+#define TEST_TARGET 1 /* target test mode */
+#define TEST_INITIATOR 2 /* initiator test mode */
+#define TEST_TRISTATE 4 /* tristate (hi-z) test mode */
+
+/*
+ * Bits in config2 register.
+ */
+#define CF2_RFB 0x80
+#define CF2_FEATURE_EN 0x40 /* enable features / phase latch */
+#define CF2_BYTECTRL 0x20
+#define CF2_DREQ_HIZ 0x10
+#define CF2_SCSI2 0x08
+#define CF2_PAR_ABORT 0x04 /* bad parity target abort */
+#define CF2_REG_PARERR 0x02 /* register parity error */
+#define CF2_DMA_PARERR 0x01 /* DMA parity error */
+
+/*
+ * Bits in the config3 register.
+ */
+#define CF3_ID_MSG_CHK 0x80
+#define CF3_3B_MSGS 0x40
+#define CF3_CDB10 0x20
+#define CF3_FASTSCSI 0x10 /* enable fast SCSI support */
+#define CF3_FASTCLOCK 0x08
+#define CF3_SAVERESID 0x04
+#define CF3_ALT_DMA 0x02
+#define CF3_THRESH_8 0x01
+
+/*
+ * Bits in the config4 register.
+ */
+#define CF4_EAN 0x04
+#define CF4_TEST 0x02
+#define CF4_BBTE 0x01
+
+#endif /* _MAC53C94_H */
diff --git a/drivers/scsi/mac_esp.c b/drivers/scsi/mac_esp.c
new file mode 100644
index 000000000..14c0334f4
--- /dev/null
+++ b/drivers/scsi/mac_esp.c
@@ -0,0 +1,640 @@
+/* mac_esp.c: ESP front-end for Macintosh Quadra systems.
+ *
+ * Adapted from jazz_esp.c and the old mac_esp.c.
+ *
+ * The pseudo DMA algorithm is based on the one used in NetBSD.
+ * See sys/arch/mac68k/obio/esp.c for some background information.
+ *
+ * Copyright (C) 2007-2008 Finn Thain
+ */
+
+#include <linux/kernel.h>
+#include <linux/types.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/platform_device.h>
+#include <linux/dma-mapping.h>
+#include <linux/scatterlist.h>
+#include <linux/delay.h>
+#include <linux/io.h>
+#include <linux/nubus.h>
+#include <linux/slab.h>
+
+#include <asm/irq.h>
+#include <asm/dma.h>
+#include <asm/macints.h>
+#include <asm/macintosh.h>
+#include <asm/mac_via.h>
+
+#include <scsi/scsi_host.h>
+
+#include "esp_scsi.h"
+
+#define DRV_MODULE_NAME "mac_esp"
+#define PFX DRV_MODULE_NAME ": "
+#define DRV_VERSION "1.000"
+#define DRV_MODULE_RELDATE "Sept 15, 2007"
+
+#define MAC_ESP_IO_BASE 0x50F00000
+#define MAC_ESP_REGS_QUADRA (MAC_ESP_IO_BASE + 0x10000)
+#define MAC_ESP_REGS_QUADRA2 (MAC_ESP_IO_BASE + 0xF000)
+#define MAC_ESP_REGS_QUADRA3 (MAC_ESP_IO_BASE + 0x18000)
+#define MAC_ESP_REGS_SPACING 0x402
+#define MAC_ESP_PDMA_REG 0xF9800024
+#define MAC_ESP_PDMA_REG_SPACING 0x4
+#define MAC_ESP_PDMA_IO_OFFSET 0x100
+
+#define esp_read8(REG) mac_esp_read8(esp, REG)
+#define esp_write8(VAL, REG) mac_esp_write8(esp, VAL, REG)
+
+struct mac_esp_priv {
+ struct esp *esp;
+ void __iomem *pdma_regs;
+ void __iomem *pdma_io;
+ int error;
+};
+static struct esp *esp_chips[2];
+
+#define MAC_ESP_GET_PRIV(esp) ((struct mac_esp_priv *) \
+ platform_get_drvdata((struct platform_device *) \
+ (esp->dev)))
+
+static inline void mac_esp_write8(struct esp *esp, u8 val, unsigned long reg)
+{
+ nubus_writeb(val, esp->regs + reg * 16);
+}
+
+static inline u8 mac_esp_read8(struct esp *esp, unsigned long reg)
+{
+ return nubus_readb(esp->regs + reg * 16);
+}
+
+/* For pseudo DMA and PIO we need the virtual address
+ * so this address mapping is the identity mapping.
+ */
+
+static dma_addr_t mac_esp_map_single(struct esp *esp, void *buf,
+ size_t sz, int dir)
+{
+ return (dma_addr_t)buf;
+}
+
+static int mac_esp_map_sg(struct esp *esp, struct scatterlist *sg,
+ int num_sg, int dir)
+{
+ int i;
+
+ for (i = 0; i < num_sg; i++)
+ sg[i].dma_address = (u32)sg_virt(&sg[i]);
+ return num_sg;
+}
+
+static void mac_esp_unmap_single(struct esp *esp, dma_addr_t addr,
+ size_t sz, int dir)
+{
+ /* Nothing to do. */
+}
+
+static void mac_esp_unmap_sg(struct esp *esp, struct scatterlist *sg,
+ int num_sg, int dir)
+{
+ /* Nothing to do. */
+}
+
+static void mac_esp_reset_dma(struct esp *esp)
+{
+ /* Nothing to do. */
+}
+
+static void mac_esp_dma_drain(struct esp *esp)
+{
+ /* Nothing to do. */
+}
+
+static void mac_esp_dma_invalidate(struct esp *esp)
+{
+ /* Nothing to do. */
+}
+
+static int mac_esp_dma_error(struct esp *esp)
+{
+ return MAC_ESP_GET_PRIV(esp)->error;
+}
+
+static inline int mac_esp_wait_for_empty_fifo(struct esp *esp)
+{
+ struct mac_esp_priv *mep = MAC_ESP_GET_PRIV(esp);
+ int i = 500000;
+
+ do {
+ if (!(esp_read8(ESP_FFLAGS) & ESP_FF_FBYTES))
+ return 0;
+
+ if (esp_read8(ESP_STATUS) & ESP_STAT_INTR)
+ return 1;
+
+ udelay(2);
+ } while (--i);
+
+ printk(KERN_ERR PFX "FIFO is not empty (sreg %02x)\n",
+ esp_read8(ESP_STATUS));
+ mep->error = 1;
+ return 1;
+}
+
+static inline int mac_esp_wait_for_dreq(struct esp *esp)
+{
+ struct mac_esp_priv *mep = MAC_ESP_GET_PRIV(esp);
+ int i = 500000;
+
+ do {
+ if (mep->pdma_regs == NULL) {
+ if (via2_scsi_drq_pending())
+ return 0;
+ } else {
+ if (nubus_readl(mep->pdma_regs) & 0x200)
+ return 0;
+ }
+
+ if (esp_read8(ESP_STATUS) & ESP_STAT_INTR)
+ return 1;
+
+ udelay(2);
+ } while (--i);
+
+ printk(KERN_ERR PFX "PDMA timeout (sreg %02x)\n",
+ esp_read8(ESP_STATUS));
+ mep->error = 1;
+ return 1;
+}
+
+#define MAC_ESP_PDMA_LOOP(operands) \
+ asm volatile ( \
+ " tstw %1 \n" \
+ " jbeq 20f \n" \
+ "1: movew " operands " \n" \
+ "2: movew " operands " \n" \
+ "3: movew " operands " \n" \
+ "4: movew " operands " \n" \
+ "5: movew " operands " \n" \
+ "6: movew " operands " \n" \
+ "7: movew " operands " \n" \
+ "8: movew " operands " \n" \
+ "9: movew " operands " \n" \
+ "10: movew " operands " \n" \
+ "11: movew " operands " \n" \
+ "12: movew " operands " \n" \
+ "13: movew " operands " \n" \
+ "14: movew " operands " \n" \
+ "15: movew " operands " \n" \
+ "16: movew " operands " \n" \
+ " subqw #1,%1 \n" \
+ " jbne 1b \n" \
+ "20: tstw %2 \n" \
+ " jbeq 30f \n" \
+ "21: movew " operands " \n" \
+ " subqw #1,%2 \n" \
+ " jbne 21b \n" \
+ "30: tstw %3 \n" \
+ " jbeq 40f \n" \
+ "31: moveb " operands " \n" \
+ "32: nop \n" \
+ "40: \n" \
+ " \n" \
+ " .section __ex_table,\"a\" \n" \
+ " .align 4 \n" \
+ " .long 1b,40b \n" \
+ " .long 2b,40b \n" \
+ " .long 3b,40b \n" \
+ " .long 4b,40b \n" \
+ " .long 5b,40b \n" \
+ " .long 6b,40b \n" \
+ " .long 7b,40b \n" \
+ " .long 8b,40b \n" \
+ " .long 9b,40b \n" \
+ " .long 10b,40b \n" \
+ " .long 11b,40b \n" \
+ " .long 12b,40b \n" \
+ " .long 13b,40b \n" \
+ " .long 14b,40b \n" \
+ " .long 15b,40b \n" \
+ " .long 16b,40b \n" \
+ " .long 21b,40b \n" \
+ " .long 31b,40b \n" \
+ " .long 32b,40b \n" \
+ " .previous \n" \
+ : "+a" (addr), "+r" (count32), "+r" (count2) \
+ : "g" (count1), "a" (mep->pdma_io))
+
+static void mac_esp_send_pdma_cmd(struct esp *esp, u32 addr, u32 esp_count,
+ u32 dma_count, int write, u8 cmd)
+{
+ struct mac_esp_priv *mep = MAC_ESP_GET_PRIV(esp);
+
+ mep->error = 0;
+
+ if (!write)
+ scsi_esp_cmd(esp, ESP_CMD_FLUSH);
+
+ esp_write8((esp_count >> 0) & 0xFF, ESP_TCLOW);
+ esp_write8((esp_count >> 8) & 0xFF, ESP_TCMED);
+
+ scsi_esp_cmd(esp, cmd);
+
+ do {
+ unsigned int count32 = esp_count >> 5;
+ unsigned int count2 = (esp_count & 0x1F) >> 1;
+ unsigned int count1 = esp_count & 1;
+ unsigned int start_addr = addr;
+
+ if (mac_esp_wait_for_dreq(esp))
+ break;
+
+ if (write) {
+ MAC_ESP_PDMA_LOOP("%4@,%0@+");
+
+ esp_count -= addr - start_addr;
+ } else {
+ unsigned int n;
+
+ MAC_ESP_PDMA_LOOP("%0@+,%4@");
+
+ if (mac_esp_wait_for_empty_fifo(esp))
+ break;
+
+ n = (esp_read8(ESP_TCMED) << 8) + esp_read8(ESP_TCLOW);
+ addr = start_addr + esp_count - n;
+ esp_count = n;
+ }
+ } while (esp_count);
+}
+
+/*
+ * Programmed IO routines follow.
+ */
+
+static inline unsigned int mac_esp_wait_for_fifo(struct esp *esp)
+{
+ int i = 500000;
+
+ do {
+ unsigned int fbytes = esp_read8(ESP_FFLAGS) & ESP_FF_FBYTES;
+
+ if (fbytes)
+ return fbytes;
+
+ udelay(2);
+ } while (--i);
+
+ printk(KERN_ERR PFX "FIFO is empty (sreg %02x)\n",
+ esp_read8(ESP_STATUS));
+ return 0;
+}
+
+static inline int mac_esp_wait_for_intr(struct esp *esp)
+{
+ struct mac_esp_priv *mep = MAC_ESP_GET_PRIV(esp);
+ int i = 500000;
+
+ do {
+ esp->sreg = esp_read8(ESP_STATUS);
+ if (esp->sreg & ESP_STAT_INTR)
+ return 0;
+
+ udelay(2);
+ } while (--i);
+
+ printk(KERN_ERR PFX "IRQ timeout (sreg %02x)\n", esp->sreg);
+ mep->error = 1;
+ return 1;
+}
+
+#define MAC_ESP_PIO_LOOP(operands, reg1) \
+ asm volatile ( \
+ "1: moveb " operands " \n" \
+ " subqw #1,%1 \n" \
+ " jbne 1b \n" \
+ : "+a" (addr), "+r" (reg1) \
+ : "a" (fifo))
+
+#define MAC_ESP_PIO_FILL(operands, reg1) \
+ asm volatile ( \
+ " moveb " operands " \n" \
+ " moveb " operands " \n" \
+ " moveb " operands " \n" \
+ " moveb " operands " \n" \
+ " moveb " operands " \n" \
+ " moveb " operands " \n" \
+ " moveb " operands " \n" \
+ " moveb " operands " \n" \
+ " moveb " operands " \n" \
+ " moveb " operands " \n" \
+ " moveb " operands " \n" \
+ " moveb " operands " \n" \
+ " moveb " operands " \n" \
+ " moveb " operands " \n" \
+ " moveb " operands " \n" \
+ " moveb " operands " \n" \
+ " subqw #8,%1 \n" \
+ " subqw #8,%1 \n" \
+ : "+a" (addr), "+r" (reg1) \
+ : "a" (fifo))
+
+#define MAC_ESP_FIFO_SIZE 16
+
+static void mac_esp_send_pio_cmd(struct esp *esp, u32 addr, u32 esp_count,
+ u32 dma_count, int write, u8 cmd)
+{
+ struct mac_esp_priv *mep = MAC_ESP_GET_PRIV(esp);
+ u8 *fifo = esp->regs + ESP_FDATA * 16;
+
+ cmd &= ~ESP_CMD_DMA;
+ mep->error = 0;
+
+ if (write) {
+ scsi_esp_cmd(esp, cmd);
+
+ while (1) {
+ unsigned int n;
+
+ n = mac_esp_wait_for_fifo(esp);
+ if (!n)
+ break;
+
+ if (n > esp_count)
+ n = esp_count;
+ esp_count -= n;
+
+ MAC_ESP_PIO_LOOP("%2@,%0@+", n);
+
+ if (!esp_count)
+ break;
+
+ if (mac_esp_wait_for_intr(esp))
+ break;
+
+ if (((esp->sreg & ESP_STAT_PMASK) != ESP_DIP) &&
+ ((esp->sreg & ESP_STAT_PMASK) != ESP_MIP))
+ break;
+
+ esp->ireg = esp_read8(ESP_INTRPT);
+ if ((esp->ireg & (ESP_INTR_DC | ESP_INTR_BSERV)) !=
+ ESP_INTR_BSERV)
+ break;
+
+ scsi_esp_cmd(esp, ESP_CMD_TI);
+ }
+ } else {
+ scsi_esp_cmd(esp, ESP_CMD_FLUSH);
+
+ if (esp_count >= MAC_ESP_FIFO_SIZE)
+ MAC_ESP_PIO_FILL("%0@+,%2@", esp_count);
+ else
+ MAC_ESP_PIO_LOOP("%0@+,%2@", esp_count);
+
+ scsi_esp_cmd(esp, cmd);
+
+ while (esp_count) {
+ unsigned int n;
+
+ if (mac_esp_wait_for_intr(esp))
+ break;
+
+ if (((esp->sreg & ESP_STAT_PMASK) != ESP_DOP) &&
+ ((esp->sreg & ESP_STAT_PMASK) != ESP_MOP))
+ break;
+
+ esp->ireg = esp_read8(ESP_INTRPT);
+ if ((esp->ireg & (ESP_INTR_DC | ESP_INTR_BSERV)) !=
+ ESP_INTR_BSERV)
+ break;
+
+ n = MAC_ESP_FIFO_SIZE -
+ (esp_read8(ESP_FFLAGS) & ESP_FF_FBYTES);
+ if (n > esp_count)
+ n = esp_count;
+
+ if (n == MAC_ESP_FIFO_SIZE) {
+ MAC_ESP_PIO_FILL("%0@+,%2@", esp_count);
+ } else {
+ esp_count -= n;
+ MAC_ESP_PIO_LOOP("%0@+,%2@", n);
+ }
+
+ scsi_esp_cmd(esp, ESP_CMD_TI);
+ }
+ }
+}
+
+static int mac_esp_irq_pending(struct esp *esp)
+{
+ if (esp_read8(ESP_STATUS) & ESP_STAT_INTR)
+ return 1;
+ return 0;
+}
+
+static u32 mac_esp_dma_length_limit(struct esp *esp, u32 dma_addr, u32 dma_len)
+{
+ return dma_len > 0xFFFF ? 0xFFFF : dma_len;
+}
+
+static irqreturn_t mac_scsi_esp_intr(int irq, void *dev_id)
+{
+ int got_intr;
+
+ /*
+ * This is an edge triggered IRQ, so we have to be careful to
+ * avoid missing a transition when it is shared by two ESP devices.
+ */
+
+ do {
+ got_intr = 0;
+ if (esp_chips[0] &&
+ (mac_esp_read8(esp_chips[0], ESP_STATUS) & ESP_STAT_INTR)) {
+ (void)scsi_esp_intr(irq, esp_chips[0]);
+ got_intr = 1;
+ }
+ if (esp_chips[1] &&
+ (mac_esp_read8(esp_chips[1], ESP_STATUS) & ESP_STAT_INTR)) {
+ (void)scsi_esp_intr(irq, esp_chips[1]);
+ got_intr = 1;
+ }
+ } while (got_intr);
+
+ return IRQ_HANDLED;
+}
+
+static struct esp_driver_ops mac_esp_ops = {
+ .esp_write8 = mac_esp_write8,
+ .esp_read8 = mac_esp_read8,
+ .map_single = mac_esp_map_single,
+ .map_sg = mac_esp_map_sg,
+ .unmap_single = mac_esp_unmap_single,
+ .unmap_sg = mac_esp_unmap_sg,
+ .irq_pending = mac_esp_irq_pending,
+ .dma_length_limit = mac_esp_dma_length_limit,
+ .reset_dma = mac_esp_reset_dma,
+ .dma_drain = mac_esp_dma_drain,
+ .dma_invalidate = mac_esp_dma_invalidate,
+ .send_dma_cmd = mac_esp_send_pdma_cmd,
+ .dma_error = mac_esp_dma_error,
+};
+
+static int esp_mac_probe(struct platform_device *dev)
+{
+ struct scsi_host_template *tpnt = &scsi_esp_template;
+ struct Scsi_Host *host;
+ struct esp *esp;
+ int err;
+ struct mac_esp_priv *mep;
+
+ if (!MACH_IS_MAC)
+ return -ENODEV;
+
+ if (dev->id > 1)
+ return -ENODEV;
+
+ host = scsi_host_alloc(tpnt, sizeof(struct esp));
+
+ err = -ENOMEM;
+ if (!host)
+ goto fail;
+
+ host->max_id = 8;
+ host->use_clustering = DISABLE_CLUSTERING;
+ esp = shost_priv(host);
+
+ esp->host = host;
+ esp->dev = dev;
+
+ esp->command_block = kzalloc(16, GFP_KERNEL);
+ if (!esp->command_block)
+ goto fail_unlink;
+ esp->command_block_dma = (dma_addr_t)esp->command_block;
+
+ esp->scsi_id = 7;
+ host->this_id = esp->scsi_id;
+ esp->scsi_id_mask = 1 << esp->scsi_id;
+
+ mep = kzalloc(sizeof(struct mac_esp_priv), GFP_KERNEL);
+ if (!mep)
+ goto fail_free_command_block;
+ mep->esp = esp;
+ platform_set_drvdata(dev, mep);
+
+ switch (macintosh_config->scsi_type) {
+ case MAC_SCSI_QUADRA:
+ esp->cfreq = 16500000;
+ esp->regs = (void __iomem *)MAC_ESP_REGS_QUADRA;
+ mep->pdma_io = esp->regs + MAC_ESP_PDMA_IO_OFFSET;
+ mep->pdma_regs = NULL;
+ break;
+ case MAC_SCSI_QUADRA2:
+ esp->cfreq = 25000000;
+ esp->regs = (void __iomem *)(MAC_ESP_REGS_QUADRA2 +
+ dev->id * MAC_ESP_REGS_SPACING);
+ mep->pdma_io = esp->regs + MAC_ESP_PDMA_IO_OFFSET;
+ mep->pdma_regs = (void __iomem *)(MAC_ESP_PDMA_REG +
+ dev->id * MAC_ESP_PDMA_REG_SPACING);
+ nubus_writel(0x1d1, mep->pdma_regs);
+ break;
+ case MAC_SCSI_QUADRA3:
+ /* These quadras have a real DMA controller (the PSC) but we
+ * don't know how to drive it so we must use PIO instead.
+ */
+ esp->cfreq = 25000000;
+ esp->regs = (void __iomem *)MAC_ESP_REGS_QUADRA3;
+ mep->pdma_io = NULL;
+ mep->pdma_regs = NULL;
+ break;
+ }
+
+ esp->ops = &mac_esp_ops;
+ if (mep->pdma_io == NULL) {
+ printk(KERN_INFO PFX "using PIO for controller %d\n", dev->id);
+ esp_write8(0, ESP_TCLOW);
+ esp_write8(0, ESP_TCMED);
+ esp->flags = ESP_FLAG_DISABLE_SYNC;
+ mac_esp_ops.send_dma_cmd = mac_esp_send_pio_cmd;
+ } else {
+ printk(KERN_INFO PFX "using PDMA for controller %d\n", dev->id);
+ }
+
+ host->irq = IRQ_MAC_SCSI;
+ esp_chips[dev->id] = esp;
+ mb();
+ if (esp_chips[!dev->id] == NULL) {
+ err = request_irq(host->irq, mac_scsi_esp_intr, 0, "ESP", NULL);
+ if (err < 0) {
+ esp_chips[dev->id] = NULL;
+ goto fail_free_priv;
+ }
+ }
+
+ err = scsi_esp_register(esp, &dev->dev);
+ if (err)
+ goto fail_free_irq;
+
+ return 0;
+
+fail_free_irq:
+ if (esp_chips[!dev->id] == NULL)
+ free_irq(host->irq, esp);
+fail_free_priv:
+ kfree(mep);
+fail_free_command_block:
+ kfree(esp->command_block);
+fail_unlink:
+ scsi_host_put(host);
+fail:
+ return err;
+}
+
+static int esp_mac_remove(struct platform_device *dev)
+{
+ struct mac_esp_priv *mep = platform_get_drvdata(dev);
+ struct esp *esp = mep->esp;
+ unsigned int irq = esp->host->irq;
+
+ scsi_esp_unregister(esp);
+
+ esp_chips[dev->id] = NULL;
+ if (!(esp_chips[0] || esp_chips[1]))
+ free_irq(irq, NULL);
+
+ kfree(mep);
+
+ kfree(esp->command_block);
+
+ scsi_host_put(esp->host);
+
+ return 0;
+}
+
+static struct platform_driver esp_mac_driver = {
+ .probe = esp_mac_probe,
+ .remove = esp_mac_remove,
+ .driver = {
+ .name = DRV_MODULE_NAME,
+ },
+};
+
+static int __init mac_esp_init(void)
+{
+ return platform_driver_register(&esp_mac_driver);
+}
+
+static void __exit mac_esp_exit(void)
+{
+ platform_driver_unregister(&esp_mac_driver);
+}
+
+MODULE_DESCRIPTION("Mac ESP SCSI driver");
+MODULE_AUTHOR("Finn Thain <fthain@telegraphics.com.au>");
+MODULE_LICENSE("GPL v2");
+MODULE_VERSION(DRV_VERSION);
+MODULE_ALIAS("platform:" DRV_MODULE_NAME);
+
+module_init(mac_esp_init);
+module_exit(mac_esp_exit);
diff --git a/drivers/scsi/mac_scsi.c b/drivers/scsi/mac_scsi.c
new file mode 100644
index 000000000..d64a769b8
--- /dev/null
+++ b/drivers/scsi/mac_scsi.c
@@ -0,0 +1,492 @@
+/*
+ * Generic Macintosh NCR5380 driver
+ *
+ * Copyright 1998, Michael Schmitz <mschmitz@lbl.gov>
+ *
+ * derived in part from:
+ */
+/*
+ * Generic Generic NCR5380 driver
+ *
+ * Copyright 1995, Russell King
+ */
+
+#include <linux/types.h>
+#include <linux/delay.h>
+#include <linux/module.h>
+#include <linux/ioport.h>
+#include <linux/init.h>
+#include <linux/blkdev.h>
+#include <linux/interrupt.h>
+#include <linux/platform_device.h>
+
+#include <asm/hwtest.h>
+#include <asm/io.h>
+#include <asm/macints.h>
+#include <asm/setup.h>
+
+#include <scsi/scsi_host.h>
+
+/* Definitions for the core NCR5380 driver. */
+
+#define PSEUDO_DMA
+
+#define NCR5380_implementation_fields unsigned char *pdma_base
+#define NCR5380_local_declare() struct Scsi_Host *_instance
+#define NCR5380_setup(instance) _instance = instance
+
+#define NCR5380_read(reg) macscsi_read(_instance, reg)
+#define NCR5380_write(reg, value) macscsi_write(_instance, reg, value)
+
+#define NCR5380_pread macscsi_pread
+#define NCR5380_pwrite macscsi_pwrite
+
+#define NCR5380_intr macscsi_intr
+#define NCR5380_queue_command macscsi_queue_command
+#define NCR5380_abort macscsi_abort
+#define NCR5380_bus_reset macscsi_bus_reset
+#define NCR5380_info macscsi_info
+#define NCR5380_show_info macscsi_show_info
+#define NCR5380_write_info macscsi_write_info
+
+#include "NCR5380.h"
+
+#define RESET_BOOT
+
+static int setup_can_queue = -1;
+module_param(setup_can_queue, int, 0);
+static int setup_cmd_per_lun = -1;
+module_param(setup_cmd_per_lun, int, 0);
+static int setup_sg_tablesize = -1;
+module_param(setup_sg_tablesize, int, 0);
+static int setup_use_pdma = -1;
+module_param(setup_use_pdma, int, 0);
+static int setup_use_tagged_queuing = -1;
+module_param(setup_use_tagged_queuing, int, 0);
+static int setup_hostid = -1;
+module_param(setup_hostid, int, 0);
+
+/* Time (in jiffies) to wait after a reset; the SCSI standard calls for 250ms,
+ * we usually do 0.5s to be on the safe side. But Toshiba CD-ROMs once more
+ * need ten times the standard value... */
+#define TOSHIBA_DELAY
+
+#ifdef TOSHIBA_DELAY
+#define AFTER_RESET_DELAY (5*HZ/2)
+#else
+#define AFTER_RESET_DELAY (HZ/2)
+#endif
+
+/*
+ * NCR 5380 register access functions
+ */
+
+static inline char macscsi_read(struct Scsi_Host *instance, int reg)
+{
+ return in_8(instance->base + (reg << 4));
+}
+
+static inline void macscsi_write(struct Scsi_Host *instance, int reg, int value)
+{
+ out_8(instance->base + (reg << 4), value);
+}
+
+#ifndef MODULE
+static int __init mac_scsi_setup(char *str)
+{
+ int ints[7];
+
+ (void)get_options(str, ARRAY_SIZE(ints), ints);
+
+ if (ints[0] < 1 || ints[0] > 6) {
+ pr_err("Usage: mac5380=<can_queue>[,<cmd_per_lun>[,<sg_tablesize>[,<hostid>[,<use_tags>[,<use_pdma>]]]]]\n");
+ return 0;
+ }
+ if (ints[0] >= 1)
+ setup_can_queue = ints[1];
+ if (ints[0] >= 2)
+ setup_cmd_per_lun = ints[2];
+ if (ints[0] >= 3)
+ setup_sg_tablesize = ints[3];
+ if (ints[0] >= 4)
+ setup_hostid = ints[4];
+ if (ints[0] >= 5)
+ setup_use_tagged_queuing = ints[5];
+ if (ints[0] >= 6)
+ setup_use_pdma = ints[6];
+ return 1;
+}
+
+__setup("mac5380=", mac_scsi_setup);
+#endif /* !MODULE */
+
+#ifdef RESET_BOOT
+/*
+ * Our 'bus reset on boot' function
+ */
+
+static void mac_scsi_reset_boot(struct Scsi_Host *instance)
+{
+ unsigned long end;
+
+ NCR5380_local_declare();
+ NCR5380_setup(instance);
+
+ /*
+ * Do a SCSI reset to clean up the bus during initialization. No messing
+ * with the queues, interrupts, or locks necessary here.
+ */
+
+ printk(KERN_INFO "Macintosh SCSI: resetting the SCSI bus..." );
+
+ /* get in phase */
+ NCR5380_write( TARGET_COMMAND_REG,
+ PHASE_SR_TO_TCR( NCR5380_read(STATUS_REG) ));
+
+ /* assert RST */
+ NCR5380_write( INITIATOR_COMMAND_REG, ICR_BASE | ICR_ASSERT_RST );
+ /* The min. reset hold time is 25us, so 40us should be enough */
+ udelay( 50 );
+ /* reset RST and interrupt */
+ NCR5380_write( INITIATOR_COMMAND_REG, ICR_BASE );
+ NCR5380_read( RESET_PARITY_INTERRUPT_REG );
+
+ for( end = jiffies + AFTER_RESET_DELAY; time_before(jiffies, end); )
+ barrier();
+
+ printk(KERN_INFO " done\n" );
+}
+#endif
+
+#ifdef PSEUDO_DMA
+/*
+ Pseudo-DMA: (Ove Edlund)
+ The code attempts to catch bus errors that occur if one for example
+ "trips over the cable".
+ XXX: Since bus errors in the PDMA routines never happen on my
+ computer, the bus error code is untested.
+ If the code works as intended, a bus error results in Pseudo-DMA
+ being disabled, meaning that the driver switches to slow handshake.
+ If bus errors are NOT extremely rare, this has to be changed.
+*/
+
+#define CP_IO_TO_MEM(s,d,len) \
+__asm__ __volatile__ \
+ (" cmp.w #4,%2\n" \
+ " bls 8f\n" \
+ " move.w %1,%%d0\n" \
+ " neg.b %%d0\n" \
+ " and.w #3,%%d0\n" \
+ " sub.w %%d0,%2\n" \
+ " bra 2f\n" \
+ " 1: move.b (%0),(%1)+\n" \
+ " 2: dbf %%d0,1b\n" \
+ " move.w %2,%%d0\n" \
+ " lsr.w #5,%%d0\n" \
+ " bra 4f\n" \
+ " 3: move.l (%0),(%1)+\n" \
+ "31: move.l (%0),(%1)+\n" \
+ "32: move.l (%0),(%1)+\n" \
+ "33: move.l (%0),(%1)+\n" \
+ "34: move.l (%0),(%1)+\n" \
+ "35: move.l (%0),(%1)+\n" \
+ "36: move.l (%0),(%1)+\n" \
+ "37: move.l (%0),(%1)+\n" \
+ " 4: dbf %%d0,3b\n" \
+ " move.w %2,%%d0\n" \
+ " lsr.w #2,%%d0\n" \
+ " and.w #7,%%d0\n" \
+ " bra 6f\n" \
+ " 5: move.l (%0),(%1)+\n" \
+ " 6: dbf %%d0,5b\n" \
+ " and.w #3,%2\n" \
+ " bra 8f\n" \
+ " 7: move.b (%0),(%1)+\n" \
+ " 8: dbf %2,7b\n" \
+ " moveq.l #0, %2\n" \
+ " 9: \n" \
+ ".section .fixup,\"ax\"\n" \
+ " .even\n" \
+ "90: moveq.l #1, %2\n" \
+ " jra 9b\n" \
+ ".previous\n" \
+ ".section __ex_table,\"a\"\n" \
+ " .align 4\n" \
+ " .long 1b,90b\n" \
+ " .long 3b,90b\n" \
+ " .long 31b,90b\n" \
+ " .long 32b,90b\n" \
+ " .long 33b,90b\n" \
+ " .long 34b,90b\n" \
+ " .long 35b,90b\n" \
+ " .long 36b,90b\n" \
+ " .long 37b,90b\n" \
+ " .long 5b,90b\n" \
+ " .long 7b,90b\n" \
+ ".previous" \
+ : "=a"(s), "=a"(d), "=d"(len) \
+ : "0"(s), "1"(d), "2"(len) \
+ : "d0")
+
+static int macscsi_pread(struct Scsi_Host *instance,
+ unsigned char *dst, int len)
+{
+ struct NCR5380_hostdata *hostdata = shost_priv(instance);
+ unsigned char *d;
+ unsigned char *s;
+
+ NCR5380_local_declare();
+ NCR5380_setup(instance);
+
+ s = hostdata->pdma_base + (INPUT_DATA_REG << 4);
+ d = dst;
+
+ /* These conditions are derived from MacOS */
+
+ while (!(NCR5380_read(BUS_AND_STATUS_REG) & BASR_DRQ) &&
+ !(NCR5380_read(STATUS_REG) & SR_REQ))
+ ;
+
+ if (!(NCR5380_read(BUS_AND_STATUS_REG) & BASR_DRQ) &&
+ (NCR5380_read(BUS_AND_STATUS_REG) & BASR_PHASE_MATCH)) {
+ pr_err("Error in macscsi_pread\n");
+ return -1;
+ }
+
+ CP_IO_TO_MEM(s, d, len);
+
+ if (len != 0) {
+ pr_notice("Bus error in macscsi_pread\n");
+ return -1;
+ }
+
+ return 0;
+}
+
+
+#define CP_MEM_TO_IO(s,d,len) \
+__asm__ __volatile__ \
+ (" cmp.w #4,%2\n" \
+ " bls 8f\n" \
+ " move.w %0,%%d0\n" \
+ " neg.b %%d0\n" \
+ " and.w #3,%%d0\n" \
+ " sub.w %%d0,%2\n" \
+ " bra 2f\n" \
+ " 1: move.b (%0)+,(%1)\n" \
+ " 2: dbf %%d0,1b\n" \
+ " move.w %2,%%d0\n" \
+ " lsr.w #5,%%d0\n" \
+ " bra 4f\n" \
+ " 3: move.l (%0)+,(%1)\n" \
+ "31: move.l (%0)+,(%1)\n" \
+ "32: move.l (%0)+,(%1)\n" \
+ "33: move.l (%0)+,(%1)\n" \
+ "34: move.l (%0)+,(%1)\n" \
+ "35: move.l (%0)+,(%1)\n" \
+ "36: move.l (%0)+,(%1)\n" \
+ "37: move.l (%0)+,(%1)\n" \
+ " 4: dbf %%d0,3b\n" \
+ " move.w %2,%%d0\n" \
+ " lsr.w #2,%%d0\n" \
+ " and.w #7,%%d0\n" \
+ " bra 6f\n" \
+ " 5: move.l (%0)+,(%1)\n" \
+ " 6: dbf %%d0,5b\n" \
+ " and.w #3,%2\n" \
+ " bra 8f\n" \
+ " 7: move.b (%0)+,(%1)\n" \
+ " 8: dbf %2,7b\n" \
+ " moveq.l #0, %2\n" \
+ " 9: \n" \
+ ".section .fixup,\"ax\"\n" \
+ " .even\n" \
+ "90: moveq.l #1, %2\n" \
+ " jra 9b\n" \
+ ".previous\n" \
+ ".section __ex_table,\"a\"\n" \
+ " .align 4\n" \
+ " .long 1b,90b\n" \
+ " .long 3b,90b\n" \
+ " .long 31b,90b\n" \
+ " .long 32b,90b\n" \
+ " .long 33b,90b\n" \
+ " .long 34b,90b\n" \
+ " .long 35b,90b\n" \
+ " .long 36b,90b\n" \
+ " .long 37b,90b\n" \
+ " .long 5b,90b\n" \
+ " .long 7b,90b\n" \
+ ".previous" \
+ : "=a"(s), "=a"(d), "=d"(len) \
+ : "0"(s), "1"(d), "2"(len) \
+ : "d0")
+
+static int macscsi_pwrite(struct Scsi_Host *instance,
+ unsigned char *src, int len)
+{
+ struct NCR5380_hostdata *hostdata = shost_priv(instance);
+ unsigned char *s;
+ unsigned char *d;
+
+ NCR5380_local_declare();
+ NCR5380_setup(instance);
+
+ s = src;
+ d = hostdata->pdma_base + (OUTPUT_DATA_REG << 4);
+
+ /* These conditions are derived from MacOS */
+
+ while (!(NCR5380_read(BUS_AND_STATUS_REG) & BASR_DRQ) &&
+ (!(NCR5380_read(STATUS_REG) & SR_REQ) ||
+ (NCR5380_read(BUS_AND_STATUS_REG) & BASR_PHASE_MATCH)))
+ ;
+
+ if (!(NCR5380_read(BUS_AND_STATUS_REG) & BASR_DRQ)) {
+ pr_err("Error in macscsi_pwrite\n");
+ return -1;
+ }
+
+ CP_MEM_TO_IO(s, d, len);
+
+ if (len != 0) {
+ pr_notice("Bus error in macscsi_pwrite\n");
+ return -1;
+ }
+
+ return 0;
+}
+#endif
+
+#include "NCR5380.c"
+
+#define DRV_MODULE_NAME "mac_scsi"
+#define PFX DRV_MODULE_NAME ": "
+
+static struct scsi_host_template mac_scsi_template = {
+ .module = THIS_MODULE,
+ .proc_name = DRV_MODULE_NAME,
+ .show_info = macscsi_show_info,
+ .write_info = macscsi_write_info,
+ .name = "Macintosh NCR5380 SCSI",
+ .info = macscsi_info,
+ .queuecommand = macscsi_queue_command,
+ .eh_abort_handler = macscsi_abort,
+ .eh_bus_reset_handler = macscsi_bus_reset,
+ .can_queue = 16,
+ .this_id = 7,
+ .sg_tablesize = SG_ALL,
+ .cmd_per_lun = 2,
+ .use_clustering = DISABLE_CLUSTERING
+};
+
+static int __init mac_scsi_probe(struct platform_device *pdev)
+{
+ struct Scsi_Host *instance;
+ int error;
+ int host_flags = 0;
+ struct resource *irq, *pio_mem, *pdma_mem = NULL;
+
+ pio_mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!pio_mem)
+ return -ENODEV;
+
+#ifdef PSEUDO_DMA
+ pdma_mem = platform_get_resource(pdev, IORESOURCE_MEM, 1);
+#endif
+
+ irq = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
+
+ if (!hwreg_present((unsigned char *)pio_mem->start +
+ (STATUS_REG << 4))) {
+ pr_info(PFX "no device detected at %pap\n", &pio_mem->start);
+ return -ENODEV;
+ }
+
+ if (setup_can_queue > 0)
+ mac_scsi_template.can_queue = setup_can_queue;
+ if (setup_cmd_per_lun > 0)
+ mac_scsi_template.cmd_per_lun = setup_cmd_per_lun;
+ if (setup_sg_tablesize >= 0)
+ mac_scsi_template.sg_tablesize = setup_sg_tablesize;
+ if (setup_hostid >= 0)
+ mac_scsi_template.this_id = setup_hostid & 7;
+ if (setup_use_pdma < 0)
+ setup_use_pdma = 0;
+
+ instance = scsi_host_alloc(&mac_scsi_template,
+ sizeof(struct NCR5380_hostdata));
+ if (!instance)
+ return -ENOMEM;
+
+ instance->base = pio_mem->start;
+ if (irq)
+ instance->irq = irq->start;
+ else
+ instance->irq = NO_IRQ;
+
+ if (pdma_mem && setup_use_pdma) {
+ struct NCR5380_hostdata *hostdata = shost_priv(instance);
+
+ hostdata->pdma_base = (unsigned char *)pdma_mem->start;
+ } else
+ host_flags |= FLAG_NO_PSEUDO_DMA;
+
+#ifdef RESET_BOOT
+ mac_scsi_reset_boot(instance);
+#endif
+
+#ifdef SUPPORT_TAGS
+ host_flags |= setup_use_tagged_queuing > 0 ? FLAG_TAGGED_QUEUING : 0;
+#endif
+
+ NCR5380_init(instance, host_flags);
+
+ if (instance->irq != NO_IRQ) {
+ error = request_irq(instance->irq, macscsi_intr, IRQF_SHARED,
+ "NCR5380", instance);
+ if (error)
+ goto fail_irq;
+ }
+
+ error = scsi_add_host(instance, NULL);
+ if (error)
+ goto fail_host;
+
+ platform_set_drvdata(pdev, instance);
+
+ scsi_scan_host(instance);
+ return 0;
+
+fail_host:
+ if (instance->irq != NO_IRQ)
+ free_irq(instance->irq, instance);
+fail_irq:
+ NCR5380_exit(instance);
+ scsi_host_put(instance);
+ return error;
+}
+
+static int __exit mac_scsi_remove(struct platform_device *pdev)
+{
+ struct Scsi_Host *instance = platform_get_drvdata(pdev);
+
+ scsi_remove_host(instance);
+ if (instance->irq != NO_IRQ)
+ free_irq(instance->irq, instance);
+ NCR5380_exit(instance);
+ scsi_host_put(instance);
+ return 0;
+}
+
+static struct platform_driver mac_scsi_driver = {
+ .remove = __exit_p(mac_scsi_remove),
+ .driver = {
+ .name = DRV_MODULE_NAME,
+ },
+};
+
+module_platform_driver_probe(mac_scsi_driver, mac_scsi_probe);
+
+MODULE_ALIAS("platform:" DRV_MODULE_NAME);
+MODULE_LICENSE("GPL");
diff --git a/drivers/scsi/megaraid.c b/drivers/scsi/megaraid.c
new file mode 100644
index 000000000..bc7b34c02
--- /dev/null
+++ b/drivers/scsi/megaraid.c
@@ -0,0 +1,4738 @@
+/*
+ *
+ * Linux MegaRAID device driver
+ *
+ * Copyright (c) 2002 LSI Logic Corporation.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ *
+ * Copyright (c) 2002 Red Hat, Inc. All rights reserved.
+ * - fixes
+ * - speed-ups (list handling fixes, issued_list, optimizations.)
+ * - lots of cleanups.
+ *
+ * Copyright (c) 2003 Christoph Hellwig <hch@lst.de>
+ * - new-style, hotplug-aware pci probing and scsi registration
+ *
+ * Version : v2.00.4 Mon Nov 14 14:02:43 EST 2005 - Seokmann Ju
+ * <Seokmann.Ju@lsil.com>
+ *
+ * Description: Linux device driver for LSI Logic MegaRAID controller
+ *
+ * Supported controllers: MegaRAID 418, 428, 438, 466, 762, 467, 471, 490, 493
+ * 518, 520, 531, 532
+ *
+ * This driver is supported by LSI Logic, with assistance from Red Hat, Dell,
+ * and others. Please send updates to the mailing list
+ * linux-scsi@vger.kernel.org .
+ *
+ */
+
+#include <linux/mm.h>
+#include <linux/fs.h>
+#include <linux/blkdev.h>
+#include <asm/uaccess.h>
+#include <asm/io.h>
+#include <linux/completion.h>
+#include <linux/delay.h>
+#include <linux/proc_fs.h>
+#include <linux/seq_file.h>
+#include <linux/reboot.h>
+#include <linux/module.h>
+#include <linux/list.h>
+#include <linux/interrupt.h>
+#include <linux/pci.h>
+#include <linux/init.h>
+#include <linux/dma-mapping.h>
+#include <linux/mutex.h>
+#include <linux/slab.h>
+#include <scsi/scsicam.h>
+
+#include "scsi.h"
+#include <scsi/scsi_host.h>
+
+#include "megaraid.h"
+
+#define MEGARAID_MODULE_VERSION "2.00.4"
+
+MODULE_AUTHOR ("sju@lsil.com");
+MODULE_DESCRIPTION ("LSI Logic MegaRAID legacy driver");
+MODULE_LICENSE ("GPL");
+MODULE_VERSION(MEGARAID_MODULE_VERSION);
+
+static DEFINE_MUTEX(megadev_mutex);
+static unsigned int max_cmd_per_lun = DEF_CMD_PER_LUN;
+module_param(max_cmd_per_lun, uint, 0);
+MODULE_PARM_DESC(max_cmd_per_lun, "Maximum number of commands which can be issued to a single LUN (default=DEF_CMD_PER_LUN=63)");
+
+static unsigned short int max_sectors_per_io = MAX_SECTORS_PER_IO;
+module_param(max_sectors_per_io, ushort, 0);
+MODULE_PARM_DESC(max_sectors_per_io, "Maximum number of sectors per I/O request (default=MAX_SECTORS_PER_IO=128)");
+
+
+static unsigned short int max_mbox_busy_wait = MBOX_BUSY_WAIT;
+module_param(max_mbox_busy_wait, ushort, 0);
+MODULE_PARM_DESC(max_mbox_busy_wait, "Maximum wait for mailbox in microseconds if busy (default=MBOX_BUSY_WAIT=10)");
+
+#define RDINDOOR(adapter) readl((adapter)->mmio_base + 0x20)
+#define RDOUTDOOR(adapter) readl((adapter)->mmio_base + 0x2C)
+#define WRINDOOR(adapter,value) writel(value, (adapter)->mmio_base + 0x20)
+#define WROUTDOOR(adapter,value) writel(value, (adapter)->mmio_base + 0x2C)
+
+/*
+ * Global variables
+ */
+
+static int hba_count;
+static adapter_t *hba_soft_state[MAX_CONTROLLERS];
+static struct proc_dir_entry *mega_proc_dir_entry;
+
+/* For controller re-ordering */
+static struct mega_hbas mega_hbas[MAX_CONTROLLERS];
+
+static long
+megadev_unlocked_ioctl(struct file *filep, unsigned int cmd, unsigned long arg);
+
+/*
+ * The File Operations structure for the serial/ioctl interface of the driver
+ */
+static const struct file_operations megadev_fops = {
+ .owner = THIS_MODULE,
+ .unlocked_ioctl = megadev_unlocked_ioctl,
+ .open = megadev_open,
+ .llseek = noop_llseek,
+};
+
+/*
+ * Array to structures for storing the information about the controllers. This
+ * information is sent to the user level applications, when they do an ioctl
+ * for this information.
+ */
+static struct mcontroller mcontroller[MAX_CONTROLLERS];
+
+/* The current driver version */
+static u32 driver_ver = 0x02000000;
+
+/* major number used by the device for character interface */
+static int major;
+
+#define IS_RAID_CH(hba, ch) (((hba)->mega_ch_class >> (ch)) & 0x01)
+
+
+/*
+ * Debug variable to print some diagnostic messages
+ */
+static int trace_level;
+
+/**
+ * mega_setup_mailbox()
+ * @adapter - pointer to our soft state
+ *
+ * Allocates a 8 byte aligned memory for the handshake mailbox.
+ */
+static int
+mega_setup_mailbox(adapter_t *adapter)
+{
+ unsigned long align;
+
+ adapter->una_mbox64 = pci_alloc_consistent(adapter->dev,
+ sizeof(mbox64_t), &adapter->una_mbox64_dma);
+
+ if( !adapter->una_mbox64 ) return -1;
+
+ adapter->mbox = &adapter->una_mbox64->mbox;
+
+ adapter->mbox = (mbox_t *)((((unsigned long) adapter->mbox) + 15) &
+ (~0UL ^ 0xFUL));
+
+ adapter->mbox64 = (mbox64_t *)(((unsigned long)adapter->mbox) - 8);
+
+ align = ((void *)adapter->mbox) - ((void *)&adapter->una_mbox64->mbox);
+
+ adapter->mbox_dma = adapter->una_mbox64_dma + 8 + align;
+
+ /*
+ * Register the mailbox if the controller is an io-mapped controller
+ */
+ if( adapter->flag & BOARD_IOMAP ) {
+
+ outb(adapter->mbox_dma & 0xFF,
+ adapter->host->io_port + MBOX_PORT0);
+
+ outb((adapter->mbox_dma >> 8) & 0xFF,
+ adapter->host->io_port + MBOX_PORT1);
+
+ outb((adapter->mbox_dma >> 16) & 0xFF,
+ adapter->host->io_port + MBOX_PORT2);
+
+ outb((adapter->mbox_dma >> 24) & 0xFF,
+ adapter->host->io_port + MBOX_PORT3);
+
+ outb(ENABLE_MBOX_BYTE,
+ adapter->host->io_port + ENABLE_MBOX_REGION);
+
+ irq_ack(adapter);
+
+ irq_enable(adapter);
+ }
+
+ return 0;
+}
+
+
+/*
+ * mega_query_adapter()
+ * @adapter - pointer to our soft state
+ *
+ * Issue the adapter inquiry commands to the controller and find out
+ * information and parameter about the devices attached
+ */
+static int
+mega_query_adapter(adapter_t *adapter)
+{
+ dma_addr_t prod_info_dma_handle;
+ mega_inquiry3 *inquiry3;
+ u8 raw_mbox[sizeof(struct mbox_out)];
+ mbox_t *mbox;
+ int retval;
+
+ /* Initialize adapter inquiry mailbox */
+
+ mbox = (mbox_t *)raw_mbox;
+
+ memset((void *)adapter->mega_buffer, 0, MEGA_BUFFER_SIZE);
+ memset(&mbox->m_out, 0, sizeof(raw_mbox));
+
+ /*
+ * Try to issue Inquiry3 command
+ * if not succeeded, then issue MEGA_MBOXCMD_ADAPTERINQ command and
+ * update enquiry3 structure
+ */
+ mbox->m_out.xferaddr = (u32)adapter->buf_dma_handle;
+
+ inquiry3 = (mega_inquiry3 *)adapter->mega_buffer;
+
+ raw_mbox[0] = FC_NEW_CONFIG; /* i.e. mbox->cmd=0xA1 */
+ raw_mbox[2] = NC_SUBOP_ENQUIRY3; /* i.e. 0x0F */
+ raw_mbox[3] = ENQ3_GET_SOLICITED_FULL; /* i.e. 0x02 */
+
+ /* Issue a blocking command to the card */
+ if ((retval = issue_scb_block(adapter, raw_mbox))) {
+ /* the adapter does not support 40ld */
+
+ mraid_ext_inquiry *ext_inq;
+ mraid_inquiry *inq;
+ dma_addr_t dma_handle;
+
+ ext_inq = pci_alloc_consistent(adapter->dev,
+ sizeof(mraid_ext_inquiry), &dma_handle);
+
+ if( ext_inq == NULL ) return -1;
+
+ inq = &ext_inq->raid_inq;
+
+ mbox->m_out.xferaddr = (u32)dma_handle;
+
+ /*issue old 0x04 command to adapter */
+ mbox->m_out.cmd = MEGA_MBOXCMD_ADPEXTINQ;
+
+ issue_scb_block(adapter, raw_mbox);
+
+ /*
+ * update Enquiry3 and ProductInfo structures with
+ * mraid_inquiry structure
+ */
+ mega_8_to_40ld(inq, inquiry3,
+ (mega_product_info *)&adapter->product_info);
+
+ pci_free_consistent(adapter->dev, sizeof(mraid_ext_inquiry),
+ ext_inq, dma_handle);
+
+ } else { /*adapter supports 40ld */
+ adapter->flag |= BOARD_40LD;
+
+ /*
+ * get product_info, which is static information and will be
+ * unchanged
+ */
+ prod_info_dma_handle = pci_map_single(adapter->dev, (void *)
+ &adapter->product_info,
+ sizeof(mega_product_info), PCI_DMA_FROMDEVICE);
+
+ mbox->m_out.xferaddr = prod_info_dma_handle;
+
+ raw_mbox[0] = FC_NEW_CONFIG; /* i.e. mbox->cmd=0xA1 */
+ raw_mbox[2] = NC_SUBOP_PRODUCT_INFO; /* i.e. 0x0E */
+
+ if ((retval = issue_scb_block(adapter, raw_mbox)))
+ printk(KERN_WARNING
+ "megaraid: Product_info cmd failed with error: %d\n",
+ retval);
+
+ pci_unmap_single(adapter->dev, prod_info_dma_handle,
+ sizeof(mega_product_info), PCI_DMA_FROMDEVICE);
+ }
+
+
+ /*
+ * kernel scans the channels from 0 to <= max_channel
+ */
+ adapter->host->max_channel =
+ adapter->product_info.nchannels + NVIRT_CHAN -1;
+
+ adapter->host->max_id = 16; /* max targets per channel */
+
+ adapter->host->max_lun = 7; /* Up to 7 luns for non disk devices */
+
+ adapter->host->cmd_per_lun = max_cmd_per_lun;
+
+ adapter->numldrv = inquiry3->num_ldrv;
+
+ adapter->max_cmds = adapter->product_info.max_commands;
+
+ if(adapter->max_cmds > MAX_COMMANDS)
+ adapter->max_cmds = MAX_COMMANDS;
+
+ adapter->host->can_queue = adapter->max_cmds - 1;
+
+ /*
+ * Get the maximum number of scatter-gather elements supported by this
+ * firmware
+ */
+ mega_get_max_sgl(adapter);
+
+ adapter->host->sg_tablesize = adapter->sglen;
+
+ /* use HP firmware and bios version encoding
+ Note: fw_version[0|1] and bios_version[0|1] were originally shifted
+ right 8 bits making them zero. This 0 value was hardcoded to fix
+ sparse warnings. */
+ if (adapter->product_info.subsysvid == PCI_VENDOR_ID_HP) {
+ sprintf (adapter->fw_version, "%c%d%d.%d%d",
+ adapter->product_info.fw_version[2],
+ 0,
+ adapter->product_info.fw_version[1] & 0x0f,
+ 0,
+ adapter->product_info.fw_version[0] & 0x0f);
+ sprintf (adapter->bios_version, "%c%d%d.%d%d",
+ adapter->product_info.bios_version[2],
+ 0,
+ adapter->product_info.bios_version[1] & 0x0f,
+ 0,
+ adapter->product_info.bios_version[0] & 0x0f);
+ } else {
+ memcpy(adapter->fw_version,
+ (char *)adapter->product_info.fw_version, 4);
+ adapter->fw_version[4] = 0;
+
+ memcpy(adapter->bios_version,
+ (char *)adapter->product_info.bios_version, 4);
+
+ adapter->bios_version[4] = 0;
+ }
+
+ printk(KERN_NOTICE "megaraid: [%s:%s] detected %d logical drives.\n",
+ adapter->fw_version, adapter->bios_version, adapter->numldrv);
+
+ /*
+ * Do we support extended (>10 bytes) cdbs
+ */
+ adapter->support_ext_cdb = mega_support_ext_cdb(adapter);
+ if (adapter->support_ext_cdb)
+ printk(KERN_NOTICE "megaraid: supports extended CDBs.\n");
+
+
+ return 0;
+}
+
+/**
+ * mega_runpendq()
+ * @adapter - pointer to our soft state
+ *
+ * Runs through the list of pending requests.
+ */
+static inline void
+mega_runpendq(adapter_t *adapter)
+{
+ if(!list_empty(&adapter->pending_list))
+ __mega_runpendq(adapter);
+}
+
+/*
+ * megaraid_queue()
+ * @scmd - Issue this scsi command
+ * @done - the callback hook into the scsi mid-layer
+ *
+ * The command queuing entry point for the mid-layer.
+ */
+static int
+megaraid_queue_lck(Scsi_Cmnd *scmd, void (*done)(Scsi_Cmnd *))
+{
+ adapter_t *adapter;
+ scb_t *scb;
+ int busy=0;
+ unsigned long flags;
+
+ adapter = (adapter_t *)scmd->device->host->hostdata;
+
+ scmd->scsi_done = done;
+
+
+ /*
+ * Allocate and build a SCB request
+ * busy flag will be set if mega_build_cmd() command could not
+ * allocate scb. We will return non-zero status in that case.
+ * NOTE: scb can be null even though certain commands completed
+ * successfully, e.g., MODE_SENSE and TEST_UNIT_READY, we would
+ * return 0 in that case.
+ */
+
+ spin_lock_irqsave(&adapter->lock, flags);
+ scb = mega_build_cmd(adapter, scmd, &busy);
+ if (!scb)
+ goto out;
+
+ scb->state |= SCB_PENDQ;
+ list_add_tail(&scb->list, &adapter->pending_list);
+
+ /*
+ * Check if the HBA is in quiescent state, e.g., during a
+ * delete logical drive opertion. If it is, don't run
+ * the pending_list.
+ */
+ if (atomic_read(&adapter->quiescent) == 0)
+ mega_runpendq(adapter);
+
+ busy = 0;
+ out:
+ spin_unlock_irqrestore(&adapter->lock, flags);
+ return busy;
+}
+
+static DEF_SCSI_QCMD(megaraid_queue)
+
+/**
+ * mega_allocate_scb()
+ * @adapter - pointer to our soft state
+ * @cmd - scsi command from the mid-layer
+ *
+ * Allocate a SCB structure. This is the central structure for controller
+ * commands.
+ */
+static inline scb_t *
+mega_allocate_scb(adapter_t *adapter, Scsi_Cmnd *cmd)
+{
+ struct list_head *head = &adapter->free_list;
+ scb_t *scb;
+
+ /* Unlink command from Free List */
+ if( !list_empty(head) ) {
+
+ scb = list_entry(head->next, scb_t, list);
+
+ list_del_init(head->next);
+
+ scb->state = SCB_ACTIVE;
+ scb->cmd = cmd;
+ scb->dma_type = MEGA_DMA_TYPE_NONE;
+
+ return scb;
+ }
+
+ return NULL;
+}
+
+/**
+ * mega_get_ldrv_num()
+ * @adapter - pointer to our soft state
+ * @cmd - scsi mid layer command
+ * @channel - channel on the controller
+ *
+ * Calculate the logical drive number based on the information in scsi command
+ * and the channel number.
+ */
+static inline int
+mega_get_ldrv_num(adapter_t *adapter, Scsi_Cmnd *cmd, int channel)
+{
+ int tgt;
+ int ldrv_num;
+
+ tgt = cmd->device->id;
+
+ if ( tgt > adapter->this_id )
+ tgt--; /* we do not get inquires for initiator id */
+
+ ldrv_num = (channel * 15) + tgt;
+
+
+ /*
+ * If we have a logical drive with boot enabled, project it first
+ */
+ if( adapter->boot_ldrv_enabled ) {
+ if( ldrv_num == 0 ) {
+ ldrv_num = adapter->boot_ldrv;
+ }
+ else {
+ if( ldrv_num <= adapter->boot_ldrv ) {
+ ldrv_num--;
+ }
+ }
+ }
+
+ /*
+ * If "delete logical drive" feature is enabled on this controller.
+ * Do only if at least one delete logical drive operation was done.
+ *
+ * Also, after logical drive deletion, instead of logical drive number,
+ * the value returned should be 0x80+logical drive id.
+ *
+ * These is valid only for IO commands.
+ */
+
+ if (adapter->support_random_del && adapter->read_ldidmap )
+ switch (cmd->cmnd[0]) {
+ case READ_6: /* fall through */
+ case WRITE_6: /* fall through */
+ case READ_10: /* fall through */
+ case WRITE_10:
+ ldrv_num += 0x80;
+ }
+
+ return ldrv_num;
+}
+
+/**
+ * mega_build_cmd()
+ * @adapter - pointer to our soft state
+ * @cmd - Prepare using this scsi command
+ * @busy - busy flag if no resources
+ *
+ * Prepares a command and scatter gather list for the controller. This routine
+ * also finds out if the commands is intended for a logical drive or a
+ * physical device and prepares the controller command accordingly.
+ *
+ * We also re-order the logical drives and physical devices based on their
+ * boot settings.
+ */
+static scb_t *
+mega_build_cmd(adapter_t *adapter, Scsi_Cmnd *cmd, int *busy)
+{
+ mega_ext_passthru *epthru;
+ mega_passthru *pthru;
+ scb_t *scb;
+ mbox_t *mbox;
+ u32 seg;
+ char islogical;
+ int max_ldrv_num;
+ int channel = 0;
+ int target = 0;
+ int ldrv_num = 0; /* logical drive number */
+
+ /*
+ * We know what channels our logical drives are on - mega_find_card()
+ */
+ islogical = adapter->logdrv_chan[cmd->device->channel];
+
+ /*
+ * The theory: If physical drive is chosen for boot, all the physical
+ * devices are exported before the logical drives, otherwise physical
+ * devices are pushed after logical drives, in which case - Kernel sees
+ * the physical devices on virtual channel which is obviously converted
+ * to actual channel on the HBA.
+ */
+ if( adapter->boot_pdrv_enabled ) {
+ if( islogical ) {
+ /* logical channel */
+ channel = cmd->device->channel -
+ adapter->product_info.nchannels;
+ }
+ else {
+ /* this is physical channel */
+ channel = cmd->device->channel;
+ target = cmd->device->id;
+
+ /*
+ * boot from a physical disk, that disk needs to be
+ * exposed first IF both the channels are SCSI, then
+ * booting from the second channel is not allowed.
+ */
+ if( target == 0 ) {
+ target = adapter->boot_pdrv_tgt;
+ }
+ else if( target == adapter->boot_pdrv_tgt ) {
+ target = 0;
+ }
+ }
+ }
+ else {
+ if( islogical ) {
+ /* this is the logical channel */
+ channel = cmd->device->channel;
+ }
+ else {
+ /* physical channel */
+ channel = cmd->device->channel - NVIRT_CHAN;
+ target = cmd->device->id;
+ }
+ }
+
+
+ if(islogical) {
+
+ /* have just LUN 0 for each target on virtual channels */
+ if (cmd->device->lun) {
+ cmd->result = (DID_BAD_TARGET << 16);
+ cmd->scsi_done(cmd);
+ return NULL;
+ }
+
+ ldrv_num = mega_get_ldrv_num(adapter, cmd, channel);
+
+
+ max_ldrv_num = (adapter->flag & BOARD_40LD) ?
+ MAX_LOGICAL_DRIVES_40LD : MAX_LOGICAL_DRIVES_8LD;
+
+ /*
+ * max_ldrv_num increases by 0x80 if some logical drive was
+ * deleted.
+ */
+ if(adapter->read_ldidmap)
+ max_ldrv_num += 0x80;
+
+ if(ldrv_num > max_ldrv_num ) {
+ cmd->result = (DID_BAD_TARGET << 16);
+ cmd->scsi_done(cmd);
+ return NULL;
+ }
+
+ }
+ else {
+ if( cmd->device->lun > 7) {
+ /*
+ * Do not support lun >7 for physically accessed
+ * devices
+ */
+ cmd->result = (DID_BAD_TARGET << 16);
+ cmd->scsi_done(cmd);
+ return NULL;
+ }
+ }
+
+ /*
+ *
+ * Logical drive commands
+ *
+ */
+ if(islogical) {
+ switch (cmd->cmnd[0]) {
+ case TEST_UNIT_READY:
+#if MEGA_HAVE_CLUSTERING
+ /*
+ * Do we support clustering and is the support enabled
+ * If no, return success always
+ */
+ if( !adapter->has_cluster ) {
+ cmd->result = (DID_OK << 16);
+ cmd->scsi_done(cmd);
+ return NULL;
+ }
+
+ if(!(scb = mega_allocate_scb(adapter, cmd))) {
+ *busy = 1;
+ return NULL;
+ }
+
+ scb->raw_mbox[0] = MEGA_CLUSTER_CMD;
+ scb->raw_mbox[2] = MEGA_RESERVATION_STATUS;
+ scb->raw_mbox[3] = ldrv_num;
+
+ scb->dma_direction = PCI_DMA_NONE;
+
+ return scb;
+#else
+ cmd->result = (DID_OK << 16);
+ cmd->scsi_done(cmd);
+ return NULL;
+#endif
+
+ case MODE_SENSE: {
+ char *buf;
+ struct scatterlist *sg;
+
+ sg = scsi_sglist(cmd);
+ buf = kmap_atomic(sg_page(sg)) + sg->offset;
+
+ memset(buf, 0, cmd->cmnd[4]);
+ kunmap_atomic(buf - sg->offset);
+
+ cmd->result = (DID_OK << 16);
+ cmd->scsi_done(cmd);
+ return NULL;
+ }
+
+ case READ_CAPACITY:
+ case INQUIRY:
+
+ if(!(adapter->flag & (1L << cmd->device->channel))) {
+
+ printk(KERN_NOTICE
+ "scsi%d: scanning scsi channel %d ",
+ adapter->host->host_no,
+ cmd->device->channel);
+ printk("for logical drives.\n");
+
+ adapter->flag |= (1L << cmd->device->channel);
+ }
+
+ /* Allocate a SCB and initialize passthru */
+ if(!(scb = mega_allocate_scb(adapter, cmd))) {
+ *busy = 1;
+ return NULL;
+ }
+ pthru = scb->pthru;
+
+ mbox = (mbox_t *)scb->raw_mbox;
+ memset(mbox, 0, sizeof(scb->raw_mbox));
+ memset(pthru, 0, sizeof(mega_passthru));
+
+ pthru->timeout = 0;
+ pthru->ars = 1;
+ pthru->reqsenselen = 14;
+ pthru->islogical = 1;
+ pthru->logdrv = ldrv_num;
+ pthru->cdblen = cmd->cmd_len;
+ memcpy(pthru->cdb, cmd->cmnd, cmd->cmd_len);
+
+ if( adapter->has_64bit_addr ) {
+ mbox->m_out.cmd = MEGA_MBOXCMD_PASSTHRU64;
+ }
+ else {
+ mbox->m_out.cmd = MEGA_MBOXCMD_PASSTHRU;
+ }
+
+ scb->dma_direction = PCI_DMA_FROMDEVICE;
+
+ pthru->numsgelements = mega_build_sglist(adapter, scb,
+ &pthru->dataxferaddr, &pthru->dataxferlen);
+
+ mbox->m_out.xferaddr = scb->pthru_dma_addr;
+
+ return scb;
+
+ case READ_6:
+ case WRITE_6:
+ case READ_10:
+ case WRITE_10:
+ case READ_12:
+ case WRITE_12:
+
+ /* Allocate a SCB and initialize mailbox */
+ if(!(scb = mega_allocate_scb(adapter, cmd))) {
+ *busy = 1;
+ return NULL;
+ }
+ mbox = (mbox_t *)scb->raw_mbox;
+
+ memset(mbox, 0, sizeof(scb->raw_mbox));
+ mbox->m_out.logdrv = ldrv_num;
+
+ /*
+ * A little hack: 2nd bit is zero for all scsi read
+ * commands and is set for all scsi write commands
+ */
+ if( adapter->has_64bit_addr ) {
+ mbox->m_out.cmd = (*cmd->cmnd & 0x02) ?
+ MEGA_MBOXCMD_LWRITE64:
+ MEGA_MBOXCMD_LREAD64 ;
+ }
+ else {
+ mbox->m_out.cmd = (*cmd->cmnd & 0x02) ?
+ MEGA_MBOXCMD_LWRITE:
+ MEGA_MBOXCMD_LREAD ;
+ }
+
+ /*
+ * 6-byte READ(0x08) or WRITE(0x0A) cdb
+ */
+ if( cmd->cmd_len == 6 ) {
+ mbox->m_out.numsectors = (u32) cmd->cmnd[4];
+ mbox->m_out.lba =
+ ((u32)cmd->cmnd[1] << 16) |
+ ((u32)cmd->cmnd[2] << 8) |
+ (u32)cmd->cmnd[3];
+
+ mbox->m_out.lba &= 0x1FFFFF;
+
+#if MEGA_HAVE_STATS
+ /*
+ * Take modulo 0x80, since the logical drive
+ * number increases by 0x80 when a logical
+ * drive was deleted
+ */
+ if (*cmd->cmnd == READ_6) {
+ adapter->nreads[ldrv_num%0x80]++;
+ adapter->nreadblocks[ldrv_num%0x80] +=
+ mbox->m_out.numsectors;
+ } else {
+ adapter->nwrites[ldrv_num%0x80]++;
+ adapter->nwriteblocks[ldrv_num%0x80] +=
+ mbox->m_out.numsectors;
+ }
+#endif
+ }
+
+ /*
+ * 10-byte READ(0x28) or WRITE(0x2A) cdb
+ */
+ if( cmd->cmd_len == 10 ) {
+ mbox->m_out.numsectors =
+ (u32)cmd->cmnd[8] |
+ ((u32)cmd->cmnd[7] << 8);
+ mbox->m_out.lba =
+ ((u32)cmd->cmnd[2] << 24) |
+ ((u32)cmd->cmnd[3] << 16) |
+ ((u32)cmd->cmnd[4] << 8) |
+ (u32)cmd->cmnd[5];
+
+#if MEGA_HAVE_STATS
+ if (*cmd->cmnd == READ_10) {
+ adapter->nreads[ldrv_num%0x80]++;
+ adapter->nreadblocks[ldrv_num%0x80] +=
+ mbox->m_out.numsectors;
+ } else {
+ adapter->nwrites[ldrv_num%0x80]++;
+ adapter->nwriteblocks[ldrv_num%0x80] +=
+ mbox->m_out.numsectors;
+ }
+#endif
+ }
+
+ /*
+ * 12-byte READ(0xA8) or WRITE(0xAA) cdb
+ */
+ if( cmd->cmd_len == 12 ) {
+ mbox->m_out.lba =
+ ((u32)cmd->cmnd[2] << 24) |
+ ((u32)cmd->cmnd[3] << 16) |
+ ((u32)cmd->cmnd[4] << 8) |
+ (u32)cmd->cmnd[5];
+
+ mbox->m_out.numsectors =
+ ((u32)cmd->cmnd[6] << 24) |
+ ((u32)cmd->cmnd[7] << 16) |
+ ((u32)cmd->cmnd[8] << 8) |
+ (u32)cmd->cmnd[9];
+
+#if MEGA_HAVE_STATS
+ if (*cmd->cmnd == READ_12) {
+ adapter->nreads[ldrv_num%0x80]++;
+ adapter->nreadblocks[ldrv_num%0x80] +=
+ mbox->m_out.numsectors;
+ } else {
+ adapter->nwrites[ldrv_num%0x80]++;
+ adapter->nwriteblocks[ldrv_num%0x80] +=
+ mbox->m_out.numsectors;
+ }
+#endif
+ }
+
+ /*
+ * If it is a read command
+ */
+ if( (*cmd->cmnd & 0x0F) == 0x08 ) {
+ scb->dma_direction = PCI_DMA_FROMDEVICE;
+ }
+ else {
+ scb->dma_direction = PCI_DMA_TODEVICE;
+ }
+
+ /* Calculate Scatter-Gather info */
+ mbox->m_out.numsgelements = mega_build_sglist(adapter, scb,
+ (u32 *)&mbox->m_out.xferaddr, &seg);
+
+ return scb;
+
+#if MEGA_HAVE_CLUSTERING
+ case RESERVE: /* Fall through */
+ case RELEASE:
+
+ /*
+ * Do we support clustering and is the support enabled
+ */
+ if( ! adapter->has_cluster ) {
+
+ cmd->result = (DID_BAD_TARGET << 16);
+ cmd->scsi_done(cmd);
+ return NULL;
+ }
+
+ /* Allocate a SCB and initialize mailbox */
+ if(!(scb = mega_allocate_scb(adapter, cmd))) {
+ *busy = 1;
+ return NULL;
+ }
+
+ scb->raw_mbox[0] = MEGA_CLUSTER_CMD;
+ scb->raw_mbox[2] = ( *cmd->cmnd == RESERVE ) ?
+ MEGA_RESERVE_LD : MEGA_RELEASE_LD;
+
+ scb->raw_mbox[3] = ldrv_num;
+
+ scb->dma_direction = PCI_DMA_NONE;
+
+ return scb;
+#endif
+
+ default:
+ cmd->result = (DID_BAD_TARGET << 16);
+ cmd->scsi_done(cmd);
+ return NULL;
+ }
+ }
+
+ /*
+ * Passthru drive commands
+ */
+ else {
+ /* Allocate a SCB and initialize passthru */
+ if(!(scb = mega_allocate_scb(adapter, cmd))) {
+ *busy = 1;
+ return NULL;
+ }
+
+ mbox = (mbox_t *)scb->raw_mbox;
+ memset(mbox, 0, sizeof(scb->raw_mbox));
+
+ if( adapter->support_ext_cdb ) {
+
+ epthru = mega_prepare_extpassthru(adapter, scb, cmd,
+ channel, target);
+
+ mbox->m_out.cmd = MEGA_MBOXCMD_EXTPTHRU;
+
+ mbox->m_out.xferaddr = scb->epthru_dma_addr;
+
+ }
+ else {
+
+ pthru = mega_prepare_passthru(adapter, scb, cmd,
+ channel, target);
+
+ /* Initialize mailbox */
+ if( adapter->has_64bit_addr ) {
+ mbox->m_out.cmd = MEGA_MBOXCMD_PASSTHRU64;
+ }
+ else {
+ mbox->m_out.cmd = MEGA_MBOXCMD_PASSTHRU;
+ }
+
+ mbox->m_out.xferaddr = scb->pthru_dma_addr;
+
+ }
+ return scb;
+ }
+ return NULL;
+}
+
+
+/**
+ * mega_prepare_passthru()
+ * @adapter - pointer to our soft state
+ * @scb - our scsi control block
+ * @cmd - scsi command from the mid-layer
+ * @channel - actual channel on the controller
+ * @target - actual id on the controller.
+ *
+ * prepare a command for the scsi physical devices.
+ */
+static mega_passthru *
+mega_prepare_passthru(adapter_t *adapter, scb_t *scb, Scsi_Cmnd *cmd,
+ int channel, int target)
+{
+ mega_passthru *pthru;
+
+ pthru = scb->pthru;
+ memset(pthru, 0, sizeof (mega_passthru));
+
+ /* 0=6sec/1=60sec/2=10min/3=3hrs */
+ pthru->timeout = 2;
+
+ pthru->ars = 1;
+ pthru->reqsenselen = 14;
+ pthru->islogical = 0;
+
+ pthru->channel = (adapter->flag & BOARD_40LD) ? 0 : channel;
+
+ pthru->target = (adapter->flag & BOARD_40LD) ?
+ (channel << 4) | target : target;
+
+ pthru->cdblen = cmd->cmd_len;
+ pthru->logdrv = cmd->device->lun;
+
+ memcpy(pthru->cdb, cmd->cmnd, cmd->cmd_len);
+
+ /* Not sure about the direction */
+ scb->dma_direction = PCI_DMA_BIDIRECTIONAL;
+
+ /* Special Code for Handling READ_CAPA/ INQ using bounce buffers */
+ switch (cmd->cmnd[0]) {
+ case INQUIRY:
+ case READ_CAPACITY:
+ if(!(adapter->flag & (1L << cmd->device->channel))) {
+
+ printk(KERN_NOTICE
+ "scsi%d: scanning scsi channel %d [P%d] ",
+ adapter->host->host_no,
+ cmd->device->channel, channel);
+ printk("for physical devices.\n");
+
+ adapter->flag |= (1L << cmd->device->channel);
+ }
+ /* Fall through */
+ default:
+ pthru->numsgelements = mega_build_sglist(adapter, scb,
+ &pthru->dataxferaddr, &pthru->dataxferlen);
+ break;
+ }
+ return pthru;
+}
+
+
+/**
+ * mega_prepare_extpassthru()
+ * @adapter - pointer to our soft state
+ * @scb - our scsi control block
+ * @cmd - scsi command from the mid-layer
+ * @channel - actual channel on the controller
+ * @target - actual id on the controller.
+ *
+ * prepare a command for the scsi physical devices. This rountine prepares
+ * commands for devices which can take extended CDBs (>10 bytes)
+ */
+static mega_ext_passthru *
+mega_prepare_extpassthru(adapter_t *adapter, scb_t *scb, Scsi_Cmnd *cmd,
+ int channel, int target)
+{
+ mega_ext_passthru *epthru;
+
+ epthru = scb->epthru;
+ memset(epthru, 0, sizeof(mega_ext_passthru));
+
+ /* 0=6sec/1=60sec/2=10min/3=3hrs */
+ epthru->timeout = 2;
+
+ epthru->ars = 1;
+ epthru->reqsenselen = 14;
+ epthru->islogical = 0;
+
+ epthru->channel = (adapter->flag & BOARD_40LD) ? 0 : channel;
+ epthru->target = (adapter->flag & BOARD_40LD) ?
+ (channel << 4) | target : target;
+
+ epthru->cdblen = cmd->cmd_len;
+ epthru->logdrv = cmd->device->lun;
+
+ memcpy(epthru->cdb, cmd->cmnd, cmd->cmd_len);
+
+ /* Not sure about the direction */
+ scb->dma_direction = PCI_DMA_BIDIRECTIONAL;
+
+ switch(cmd->cmnd[0]) {
+ case INQUIRY:
+ case READ_CAPACITY:
+ if(!(adapter->flag & (1L << cmd->device->channel))) {
+
+ printk(KERN_NOTICE
+ "scsi%d: scanning scsi channel %d [P%d] ",
+ adapter->host->host_no,
+ cmd->device->channel, channel);
+ printk("for physical devices.\n");
+
+ adapter->flag |= (1L << cmd->device->channel);
+ }
+ /* Fall through */
+ default:
+ epthru->numsgelements = mega_build_sglist(adapter, scb,
+ &epthru->dataxferaddr, &epthru->dataxferlen);
+ break;
+ }
+
+ return epthru;
+}
+
+static void
+__mega_runpendq(adapter_t *adapter)
+{
+ scb_t *scb;
+ struct list_head *pos, *next;
+
+ /* Issue any pending commands to the card */
+ list_for_each_safe(pos, next, &adapter->pending_list) {
+
+ scb = list_entry(pos, scb_t, list);
+
+ if( !(scb->state & SCB_ISSUED) ) {
+
+ if( issue_scb(adapter, scb) != 0 )
+ return;
+ }
+ }
+
+ return;
+}
+
+
+/**
+ * issue_scb()
+ * @adapter - pointer to our soft state
+ * @scb - scsi control block
+ *
+ * Post a command to the card if the mailbox is available, otherwise return
+ * busy. We also take the scb from the pending list if the mailbox is
+ * available.
+ */
+static int
+issue_scb(adapter_t *adapter, scb_t *scb)
+{
+ volatile mbox64_t *mbox64 = adapter->mbox64;
+ volatile mbox_t *mbox = adapter->mbox;
+ unsigned int i = 0;
+
+ if(unlikely(mbox->m_in.busy)) {
+ do {
+ udelay(1);
+ i++;
+ } while( mbox->m_in.busy && (i < max_mbox_busy_wait) );
+
+ if(mbox->m_in.busy) return -1;
+ }
+
+ /* Copy mailbox data into host structure */
+ memcpy((char *)&mbox->m_out, (char *)scb->raw_mbox,
+ sizeof(struct mbox_out));
+
+ mbox->m_out.cmdid = scb->idx; /* Set cmdid */
+ mbox->m_in.busy = 1; /* Set busy */
+
+
+ /*
+ * Increment the pending queue counter
+ */
+ atomic_inc(&adapter->pend_cmds);
+
+ switch (mbox->m_out.cmd) {
+ case MEGA_MBOXCMD_LREAD64:
+ case MEGA_MBOXCMD_LWRITE64:
+ case MEGA_MBOXCMD_PASSTHRU64:
+ case MEGA_MBOXCMD_EXTPTHRU:
+ mbox64->xfer_segment_lo = mbox->m_out.xferaddr;
+ mbox64->xfer_segment_hi = 0;
+ mbox->m_out.xferaddr = 0xFFFFFFFF;
+ break;
+ default:
+ mbox64->xfer_segment_lo = 0;
+ mbox64->xfer_segment_hi = 0;
+ }
+
+ /*
+ * post the command
+ */
+ scb->state |= SCB_ISSUED;
+
+ if( likely(adapter->flag & BOARD_MEMMAP) ) {
+ mbox->m_in.poll = 0;
+ mbox->m_in.ack = 0;
+ WRINDOOR(adapter, adapter->mbox_dma | 0x1);
+ }
+ else {
+ irq_enable(adapter);
+ issue_command(adapter);
+ }
+
+ return 0;
+}
+
+/*
+ * Wait until the controller's mailbox is available
+ */
+static inline int
+mega_busywait_mbox (adapter_t *adapter)
+{
+ if (adapter->mbox->m_in.busy)
+ return __mega_busywait_mbox(adapter);
+ return 0;
+}
+
+/**
+ * issue_scb_block()
+ * @adapter - pointer to our soft state
+ * @raw_mbox - the mailbox
+ *
+ * Issue a scb in synchronous and non-interrupt mode
+ */
+static int
+issue_scb_block(adapter_t *adapter, u_char *raw_mbox)
+{
+ volatile mbox64_t *mbox64 = adapter->mbox64;
+ volatile mbox_t *mbox = adapter->mbox;
+ u8 byte;
+
+ /* Wait until mailbox is free */
+ if(mega_busywait_mbox (adapter))
+ goto bug_blocked_mailbox;
+
+ /* Copy mailbox data into host structure */
+ memcpy((char *) mbox, raw_mbox, sizeof(struct mbox_out));
+ mbox->m_out.cmdid = 0xFE;
+ mbox->m_in.busy = 1;
+
+ switch (raw_mbox[0]) {
+ case MEGA_MBOXCMD_LREAD64:
+ case MEGA_MBOXCMD_LWRITE64:
+ case MEGA_MBOXCMD_PASSTHRU64:
+ case MEGA_MBOXCMD_EXTPTHRU:
+ mbox64->xfer_segment_lo = mbox->m_out.xferaddr;
+ mbox64->xfer_segment_hi = 0;
+ mbox->m_out.xferaddr = 0xFFFFFFFF;
+ break;
+ default:
+ mbox64->xfer_segment_lo = 0;
+ mbox64->xfer_segment_hi = 0;
+ }
+
+ if( likely(adapter->flag & BOARD_MEMMAP) ) {
+ mbox->m_in.poll = 0;
+ mbox->m_in.ack = 0;
+ mbox->m_in.numstatus = 0xFF;
+ mbox->m_in.status = 0xFF;
+ WRINDOOR(adapter, adapter->mbox_dma | 0x1);
+
+ while((volatile u8)mbox->m_in.numstatus == 0xFF)
+ cpu_relax();
+
+ mbox->m_in.numstatus = 0xFF;
+
+ while( (volatile u8)mbox->m_in.poll != 0x77 )
+ cpu_relax();
+
+ mbox->m_in.poll = 0;
+ mbox->m_in.ack = 0x77;
+
+ WRINDOOR(adapter, adapter->mbox_dma | 0x2);
+
+ while(RDINDOOR(adapter) & 0x2)
+ cpu_relax();
+ }
+ else {
+ irq_disable(adapter);
+ issue_command(adapter);
+
+ while (!((byte = irq_state(adapter)) & INTR_VALID))
+ cpu_relax();
+
+ set_irq_state(adapter, byte);
+ irq_enable(adapter);
+ irq_ack(adapter);
+ }
+
+ return mbox->m_in.status;
+
+bug_blocked_mailbox:
+ printk(KERN_WARNING "megaraid: Blocked mailbox......!!\n");
+ udelay (1000);
+ return -1;
+}
+
+
+/**
+ * megaraid_isr_iomapped()
+ * @irq - irq
+ * @devp - pointer to our soft state
+ *
+ * Interrupt service routine for io-mapped controllers.
+ * Find out if our device is interrupting. If yes, acknowledge the interrupt
+ * and service the completed commands.
+ */
+static irqreturn_t
+megaraid_isr_iomapped(int irq, void *devp)
+{
+ adapter_t *adapter = devp;
+ unsigned long flags;
+ u8 status;
+ u8 nstatus;
+ u8 completed[MAX_FIRMWARE_STATUS];
+ u8 byte;
+ int handled = 0;
+
+
+ /*
+ * loop till F/W has more commands for us to complete.
+ */
+ spin_lock_irqsave(&adapter->lock, flags);
+
+ do {
+ /* Check if a valid interrupt is pending */
+ byte = irq_state(adapter);
+ if( (byte & VALID_INTR_BYTE) == 0 ) {
+ /*
+ * No more pending commands
+ */
+ goto out_unlock;
+ }
+ set_irq_state(adapter, byte);
+
+ while((nstatus = (volatile u8)adapter->mbox->m_in.numstatus)
+ == 0xFF)
+ cpu_relax();
+ adapter->mbox->m_in.numstatus = 0xFF;
+
+ status = adapter->mbox->m_in.status;
+
+ /*
+ * decrement the pending queue counter
+ */
+ atomic_sub(nstatus, &adapter->pend_cmds);
+
+ memcpy(completed, (void *)adapter->mbox->m_in.completed,
+ nstatus);
+
+ /* Acknowledge interrupt */
+ irq_ack(adapter);
+
+ mega_cmd_done(adapter, completed, nstatus, status);
+
+ mega_rundoneq(adapter);
+
+ handled = 1;
+
+ /* Loop through any pending requests */
+ if(atomic_read(&adapter->quiescent) == 0) {
+ mega_runpendq(adapter);
+ }
+
+ } while(1);
+
+ out_unlock:
+
+ spin_unlock_irqrestore(&adapter->lock, flags);
+
+ return IRQ_RETVAL(handled);
+}
+
+
+/**
+ * megaraid_isr_memmapped()
+ * @irq - irq
+ * @devp - pointer to our soft state
+ *
+ * Interrupt service routine for memory-mapped controllers.
+ * Find out if our device is interrupting. If yes, acknowledge the interrupt
+ * and service the completed commands.
+ */
+static irqreturn_t
+megaraid_isr_memmapped(int irq, void *devp)
+{
+ adapter_t *adapter = devp;
+ unsigned long flags;
+ u8 status;
+ u32 dword = 0;
+ u8 nstatus;
+ u8 completed[MAX_FIRMWARE_STATUS];
+ int handled = 0;
+
+
+ /*
+ * loop till F/W has more commands for us to complete.
+ */
+ spin_lock_irqsave(&adapter->lock, flags);
+
+ do {
+ /* Check if a valid interrupt is pending */
+ dword = RDOUTDOOR(adapter);
+ if(dword != 0x10001234) {
+ /*
+ * No more pending commands
+ */
+ goto out_unlock;
+ }
+ WROUTDOOR(adapter, 0x10001234);
+
+ while((nstatus = (volatile u8)adapter->mbox->m_in.numstatus)
+ == 0xFF) {
+ cpu_relax();
+ }
+ adapter->mbox->m_in.numstatus = 0xFF;
+
+ status = adapter->mbox->m_in.status;
+
+ /*
+ * decrement the pending queue counter
+ */
+ atomic_sub(nstatus, &adapter->pend_cmds);
+
+ memcpy(completed, (void *)adapter->mbox->m_in.completed,
+ nstatus);
+
+ /* Acknowledge interrupt */
+ WRINDOOR(adapter, 0x2);
+
+ handled = 1;
+
+ while( RDINDOOR(adapter) & 0x02 )
+ cpu_relax();
+
+ mega_cmd_done(adapter, completed, nstatus, status);
+
+ mega_rundoneq(adapter);
+
+ /* Loop through any pending requests */
+ if(atomic_read(&adapter->quiescent) == 0) {
+ mega_runpendq(adapter);
+ }
+
+ } while(1);
+
+ out_unlock:
+
+ spin_unlock_irqrestore(&adapter->lock, flags);
+
+ return IRQ_RETVAL(handled);
+}
+/**
+ * mega_cmd_done()
+ * @adapter - pointer to our soft state
+ * @completed - array of ids of completed commands
+ * @nstatus - number of completed commands
+ * @status - status of the last command completed
+ *
+ * Complete the commands and call the scsi mid-layer callback hooks.
+ */
+static void
+mega_cmd_done(adapter_t *adapter, u8 completed[], int nstatus, int status)
+{
+ mega_ext_passthru *epthru = NULL;
+ struct scatterlist *sgl;
+ Scsi_Cmnd *cmd = NULL;
+ mega_passthru *pthru = NULL;
+ mbox_t *mbox = NULL;
+ u8 c;
+ scb_t *scb;
+ int islogical;
+ int cmdid;
+ int i;
+
+ /*
+ * for all the commands completed, call the mid-layer callback routine
+ * and free the scb.
+ */
+ for( i = 0; i < nstatus; i++ ) {
+
+ cmdid = completed[i];
+
+ /*
+ * Only free SCBs for the commands coming down from the
+ * mid-layer, not for which were issued internally
+ *
+ * For internal command, restore the status returned by the
+ * firmware so that user can interpret it.
+ */
+ if (cmdid == CMDID_INT_CMDS) {
+ scb = &adapter->int_scb;
+
+ list_del_init(&scb->list);
+ scb->state = SCB_FREE;
+
+ adapter->int_status = status;
+ complete(&adapter->int_waitq);
+ } else {
+ scb = &adapter->scb_list[cmdid];
+
+ /*
+ * Make sure f/w has completed a valid command
+ */
+ if( !(scb->state & SCB_ISSUED) || scb->cmd == NULL ) {
+ printk(KERN_CRIT
+ "megaraid: invalid command ");
+ printk("Id %d, scb->state:%x, scsi cmd:%p\n",
+ cmdid, scb->state, scb->cmd);
+
+ continue;
+ }
+
+ /*
+ * Was a abort issued for this command
+ */
+ if( scb->state & SCB_ABORT ) {
+
+ printk(KERN_WARNING
+ "megaraid: aborted cmd [%x] complete.\n",
+ scb->idx);
+
+ scb->cmd->result = (DID_ABORT << 16);
+
+ list_add_tail(SCSI_LIST(scb->cmd),
+ &adapter->completed_list);
+
+ mega_free_scb(adapter, scb);
+
+ continue;
+ }
+
+ /*
+ * Was a reset issued for this command
+ */
+ if( scb->state & SCB_RESET ) {
+
+ printk(KERN_WARNING
+ "megaraid: reset cmd [%x] complete.\n",
+ scb->idx);
+
+ scb->cmd->result = (DID_RESET << 16);
+
+ list_add_tail(SCSI_LIST(scb->cmd),
+ &adapter->completed_list);
+
+ mega_free_scb (adapter, scb);
+
+ continue;
+ }
+
+ cmd = scb->cmd;
+ pthru = scb->pthru;
+ epthru = scb->epthru;
+ mbox = (mbox_t *)scb->raw_mbox;
+
+#if MEGA_HAVE_STATS
+ {
+
+ int logdrv = mbox->m_out.logdrv;
+
+ islogical = adapter->logdrv_chan[cmd->channel];
+ /*
+ * Maintain an error counter for the logical drive.
+ * Some application like SNMP agent need such
+ * statistics
+ */
+ if( status && islogical && (cmd->cmnd[0] == READ_6 ||
+ cmd->cmnd[0] == READ_10 ||
+ cmd->cmnd[0] == READ_12)) {
+ /*
+ * Logical drive number increases by 0x80 when
+ * a logical drive is deleted
+ */
+ adapter->rd_errors[logdrv%0x80]++;
+ }
+
+ if( status && islogical && (cmd->cmnd[0] == WRITE_6 ||
+ cmd->cmnd[0] == WRITE_10 ||
+ cmd->cmnd[0] == WRITE_12)) {
+ /*
+ * Logical drive number increases by 0x80 when
+ * a logical drive is deleted
+ */
+ adapter->wr_errors[logdrv%0x80]++;
+ }
+
+ }
+#endif
+ }
+
+ /*
+ * Do not return the presence of hard disk on the channel so,
+ * inquiry sent, and returned data==hard disk or removable
+ * hard disk and not logical, request should return failure! -
+ * PJ
+ */
+ islogical = adapter->logdrv_chan[cmd->device->channel];
+ if( cmd->cmnd[0] == INQUIRY && !islogical ) {
+
+ sgl = scsi_sglist(cmd);
+ if( sg_page(sgl) ) {
+ c = *(unsigned char *) sg_virt(&sgl[0]);
+ } else {
+ printk(KERN_WARNING
+ "megaraid: invalid sg.\n");
+ c = 0;
+ }
+
+ if(IS_RAID_CH(adapter, cmd->device->channel) &&
+ ((c & 0x1F ) == TYPE_DISK)) {
+ status = 0xF0;
+ }
+ }
+
+ /* clear result; otherwise, success returns corrupt value */
+ cmd->result = 0;
+
+ /* Convert MegaRAID status to Linux error code */
+ switch (status) {
+ case 0x00: /* SUCCESS , i.e. SCSI_STATUS_GOOD */
+ cmd->result |= (DID_OK << 16);
+ break;
+
+ case 0x02: /* ERROR_ABORTED, i.e.
+ SCSI_STATUS_CHECK_CONDITION */
+
+ /* set sense_buffer and result fields */
+ if( mbox->m_out.cmd == MEGA_MBOXCMD_PASSTHRU ||
+ mbox->m_out.cmd == MEGA_MBOXCMD_PASSTHRU64 ) {
+
+ memcpy(cmd->sense_buffer, pthru->reqsensearea,
+ 14);
+
+ cmd->result = (DRIVER_SENSE << 24) |
+ (DID_OK << 16) |
+ (CHECK_CONDITION << 1);
+ }
+ else {
+ if (mbox->m_out.cmd == MEGA_MBOXCMD_EXTPTHRU) {
+
+ memcpy(cmd->sense_buffer,
+ epthru->reqsensearea, 14);
+
+ cmd->result = (DRIVER_SENSE << 24) |
+ (DID_OK << 16) |
+ (CHECK_CONDITION << 1);
+ } else {
+ cmd->sense_buffer[0] = 0x70;
+ cmd->sense_buffer[2] = ABORTED_COMMAND;
+ cmd->result |= (CHECK_CONDITION << 1);
+ }
+ }
+ break;
+
+ case 0x08: /* ERR_DEST_DRIVE_FAILED, i.e.
+ SCSI_STATUS_BUSY */
+ cmd->result |= (DID_BUS_BUSY << 16) | status;
+ break;
+
+ default:
+#if MEGA_HAVE_CLUSTERING
+ /*
+ * If TEST_UNIT_READY fails, we know
+ * MEGA_RESERVATION_STATUS failed
+ */
+ if( cmd->cmnd[0] == TEST_UNIT_READY ) {
+ cmd->result |= (DID_ERROR << 16) |
+ (RESERVATION_CONFLICT << 1);
+ }
+ else
+ /*
+ * Error code returned is 1 if Reserve or Release
+ * failed or the input parameter is invalid
+ */
+ if( status == 1 &&
+ (cmd->cmnd[0] == RESERVE ||
+ cmd->cmnd[0] == RELEASE) ) {
+
+ cmd->result |= (DID_ERROR << 16) |
+ (RESERVATION_CONFLICT << 1);
+ }
+ else
+#endif
+ cmd->result |= (DID_BAD_TARGET << 16)|status;
+ }
+
+ mega_free_scb(adapter, scb);
+
+ /* Add Scsi_Command to end of completed queue */
+ list_add_tail(SCSI_LIST(cmd), &adapter->completed_list);
+ }
+}
+
+
+/*
+ * mega_runpendq()
+ *
+ * Run through the list of completed requests and finish it
+ */
+static void
+mega_rundoneq (adapter_t *adapter)
+{
+ Scsi_Cmnd *cmd;
+ struct list_head *pos;
+
+ list_for_each(pos, &adapter->completed_list) {
+
+ struct scsi_pointer* spos = (struct scsi_pointer *)pos;
+
+ cmd = list_entry(spos, Scsi_Cmnd, SCp);
+ cmd->scsi_done(cmd);
+ }
+
+ INIT_LIST_HEAD(&adapter->completed_list);
+}
+
+
+/*
+ * Free a SCB structure
+ * Note: We assume the scsi commands associated with this scb is not free yet.
+ */
+static void
+mega_free_scb(adapter_t *adapter, scb_t *scb)
+{
+ switch( scb->dma_type ) {
+
+ case MEGA_DMA_TYPE_NONE:
+ break;
+
+ case MEGA_SGLIST:
+ scsi_dma_unmap(scb->cmd);
+ break;
+ default:
+ break;
+ }
+
+ /*
+ * Remove from the pending list
+ */
+ list_del_init(&scb->list);
+
+ /* Link the scb back into free list */
+ scb->state = SCB_FREE;
+ scb->cmd = NULL;
+
+ list_add(&scb->list, &adapter->free_list);
+}
+
+
+static int
+__mega_busywait_mbox (adapter_t *adapter)
+{
+ volatile mbox_t *mbox = adapter->mbox;
+ long counter;
+
+ for (counter = 0; counter < 10000; counter++) {
+ if (!mbox->m_in.busy)
+ return 0;
+ udelay(100);
+ cond_resched();
+ }
+ return -1; /* give up after 1 second */
+}
+
+/*
+ * Copies data to SGLIST
+ * Note: For 64 bit cards, we need a minimum of one SG element for read/write
+ */
+static int
+mega_build_sglist(adapter_t *adapter, scb_t *scb, u32 *buf, u32 *len)
+{
+ struct scatterlist *sg;
+ Scsi_Cmnd *cmd;
+ int sgcnt;
+ int idx;
+
+ cmd = scb->cmd;
+
+ /*
+ * Copy Scatter-Gather list info into controller structure.
+ *
+ * The number of sg elements returned must not exceed our limit
+ */
+ sgcnt = scsi_dma_map(cmd);
+
+ scb->dma_type = MEGA_SGLIST;
+
+ BUG_ON(sgcnt > adapter->sglen || sgcnt < 0);
+
+ *len = 0;
+
+ if (scsi_sg_count(cmd) == 1 && !adapter->has_64bit_addr) {
+ sg = scsi_sglist(cmd);
+ scb->dma_h_bulkdata = sg_dma_address(sg);
+ *buf = (u32)scb->dma_h_bulkdata;
+ *len = sg_dma_len(sg);
+ return 0;
+ }
+
+ scsi_for_each_sg(cmd, sg, sgcnt, idx) {
+ if (adapter->has_64bit_addr) {
+ scb->sgl64[idx].address = sg_dma_address(sg);
+ *len += scb->sgl64[idx].length = sg_dma_len(sg);
+ } else {
+ scb->sgl[idx].address = sg_dma_address(sg);
+ *len += scb->sgl[idx].length = sg_dma_len(sg);
+ }
+ }
+
+ /* Reset pointer and length fields */
+ *buf = scb->sgl_dma_addr;
+
+ /* Return count of SG requests */
+ return sgcnt;
+}
+
+
+/*
+ * mega_8_to_40ld()
+ *
+ * takes all info in AdapterInquiry structure and puts it into ProductInfo and
+ * Enquiry3 structures for later use
+ */
+static void
+mega_8_to_40ld(mraid_inquiry *inquiry, mega_inquiry3 *enquiry3,
+ mega_product_info *product_info)
+{
+ int i;
+
+ product_info->max_commands = inquiry->adapter_info.max_commands;
+ enquiry3->rebuild_rate = inquiry->adapter_info.rebuild_rate;
+ product_info->nchannels = inquiry->adapter_info.nchannels;
+
+ for (i = 0; i < 4; i++) {
+ product_info->fw_version[i] =
+ inquiry->adapter_info.fw_version[i];
+
+ product_info->bios_version[i] =
+ inquiry->adapter_info.bios_version[i];
+ }
+ enquiry3->cache_flush_interval =
+ inquiry->adapter_info.cache_flush_interval;
+
+ product_info->dram_size = inquiry->adapter_info.dram_size;
+
+ enquiry3->num_ldrv = inquiry->logdrv_info.num_ldrv;
+
+ for (i = 0; i < MAX_LOGICAL_DRIVES_8LD; i++) {
+ enquiry3->ldrv_size[i] = inquiry->logdrv_info.ldrv_size[i];
+ enquiry3->ldrv_prop[i] = inquiry->logdrv_info.ldrv_prop[i];
+ enquiry3->ldrv_state[i] = inquiry->logdrv_info.ldrv_state[i];
+ }
+
+ for (i = 0; i < (MAX_PHYSICAL_DRIVES); i++)
+ enquiry3->pdrv_state[i] = inquiry->pdrv_info.pdrv_state[i];
+}
+
+static inline void
+mega_free_sgl(adapter_t *adapter)
+{
+ scb_t *scb;
+ int i;
+
+ for(i = 0; i < adapter->max_cmds; i++) {
+
+ scb = &adapter->scb_list[i];
+
+ if( scb->sgl64 ) {
+ pci_free_consistent(adapter->dev,
+ sizeof(mega_sgl64) * adapter->sglen,
+ scb->sgl64,
+ scb->sgl_dma_addr);
+
+ scb->sgl64 = NULL;
+ }
+
+ if( scb->pthru ) {
+ pci_free_consistent(adapter->dev, sizeof(mega_passthru),
+ scb->pthru, scb->pthru_dma_addr);
+
+ scb->pthru = NULL;
+ }
+
+ if( scb->epthru ) {
+ pci_free_consistent(adapter->dev,
+ sizeof(mega_ext_passthru),
+ scb->epthru, scb->epthru_dma_addr);
+
+ scb->epthru = NULL;
+ }
+
+ }
+}
+
+
+/*
+ * Get information about the card/driver
+ */
+const char *
+megaraid_info(struct Scsi_Host *host)
+{
+ static char buffer[512];
+ adapter_t *adapter;
+
+ adapter = (adapter_t *)host->hostdata;
+
+ sprintf (buffer,
+ "LSI Logic MegaRAID %s %d commands %d targs %d chans %d luns",
+ adapter->fw_version, adapter->product_info.max_commands,
+ adapter->host->max_id, adapter->host->max_channel,
+ (u32)adapter->host->max_lun);
+ return buffer;
+}
+
+/*
+ * Abort a previous SCSI request. Only commands on the pending list can be
+ * aborted. All the commands issued to the F/W must complete.
+ */
+static int
+megaraid_abort(Scsi_Cmnd *cmd)
+{
+ adapter_t *adapter;
+ int rval;
+
+ adapter = (adapter_t *)cmd->device->host->hostdata;
+
+ rval = megaraid_abort_and_reset(adapter, cmd, SCB_ABORT);
+
+ /*
+ * This is required here to complete any completed requests
+ * to be communicated over to the mid layer.
+ */
+ mega_rundoneq(adapter);
+
+ return rval;
+}
+
+
+static int
+megaraid_reset(struct scsi_cmnd *cmd)
+{
+ adapter_t *adapter;
+ megacmd_t mc;
+ int rval;
+
+ adapter = (adapter_t *)cmd->device->host->hostdata;
+
+#if MEGA_HAVE_CLUSTERING
+ mc.cmd = MEGA_CLUSTER_CMD;
+ mc.opcode = MEGA_RESET_RESERVATIONS;
+
+ if( mega_internal_command(adapter, &mc, NULL) != 0 ) {
+ printk(KERN_WARNING
+ "megaraid: reservation reset failed.\n");
+ }
+ else {
+ printk(KERN_INFO "megaraid: reservation reset.\n");
+ }
+#endif
+
+ spin_lock_irq(&adapter->lock);
+
+ rval = megaraid_abort_and_reset(adapter, cmd, SCB_RESET);
+
+ /*
+ * This is required here to complete any completed requests
+ * to be communicated over to the mid layer.
+ */
+ mega_rundoneq(adapter);
+ spin_unlock_irq(&adapter->lock);
+
+ return rval;
+}
+
+/**
+ * megaraid_abort_and_reset()
+ * @adapter - megaraid soft state
+ * @cmd - scsi command to be aborted or reset
+ * @aor - abort or reset flag
+ *
+ * Try to locate the scsi command in the pending queue. If found and is not
+ * issued to the controller, abort/reset it. Otherwise return failure
+ */
+static int
+megaraid_abort_and_reset(adapter_t *adapter, Scsi_Cmnd *cmd, int aor)
+{
+ struct list_head *pos, *next;
+ scb_t *scb;
+
+ printk(KERN_WARNING "megaraid: %s cmd=%x <c=%d t=%d l=%d>\n",
+ (aor == SCB_ABORT)? "ABORTING":"RESET",
+ cmd->cmnd[0], cmd->device->channel,
+ cmd->device->id, (u32)cmd->device->lun);
+
+ if(list_empty(&adapter->pending_list))
+ return FAILED;
+
+ list_for_each_safe(pos, next, &adapter->pending_list) {
+
+ scb = list_entry(pos, scb_t, list);
+
+ if (scb->cmd == cmd) { /* Found command */
+
+ scb->state |= aor;
+
+ /*
+ * Check if this command has firmware ownership. If
+ * yes, we cannot reset this command. Whenever f/w
+ * completes this command, we will return appropriate
+ * status from ISR.
+ */
+ if( scb->state & SCB_ISSUED ) {
+
+ printk(KERN_WARNING
+ "megaraid: %s[%x], fw owner.\n",
+ (aor==SCB_ABORT) ? "ABORTING":"RESET",
+ scb->idx);
+
+ return FAILED;
+ }
+ else {
+
+ /*
+ * Not yet issued! Remove from the pending
+ * list
+ */
+ printk(KERN_WARNING
+ "megaraid: %s-[%x], driver owner.\n",
+ (aor==SCB_ABORT) ? "ABORTING":"RESET",
+ scb->idx);
+
+ mega_free_scb(adapter, scb);
+
+ if( aor == SCB_ABORT ) {
+ cmd->result = (DID_ABORT << 16);
+ }
+ else {
+ cmd->result = (DID_RESET << 16);
+ }
+
+ list_add_tail(SCSI_LIST(cmd),
+ &adapter->completed_list);
+
+ return SUCCESS;
+ }
+ }
+ }
+
+ return FAILED;
+}
+
+static inline int
+make_local_pdev(adapter_t *adapter, struct pci_dev **pdev)
+{
+ *pdev = pci_alloc_dev(NULL);
+
+ if( *pdev == NULL ) return -1;
+
+ memcpy(*pdev, adapter->dev, sizeof(struct pci_dev));
+
+ if( pci_set_dma_mask(*pdev, DMA_BIT_MASK(32)) != 0 ) {
+ kfree(*pdev);
+ return -1;
+ }
+
+ return 0;
+}
+
+static inline void
+free_local_pdev(struct pci_dev *pdev)
+{
+ kfree(pdev);
+}
+
+/**
+ * mega_allocate_inquiry()
+ * @dma_handle - handle returned for dma address
+ * @pdev - handle to pci device
+ *
+ * allocates memory for inquiry structure
+ */
+static inline void *
+mega_allocate_inquiry(dma_addr_t *dma_handle, struct pci_dev *pdev)
+{
+ return pci_alloc_consistent(pdev, sizeof(mega_inquiry3), dma_handle);
+}
+
+
+static inline void
+mega_free_inquiry(void *inquiry, dma_addr_t dma_handle, struct pci_dev *pdev)
+{
+ pci_free_consistent(pdev, sizeof(mega_inquiry3), inquiry, dma_handle);
+}
+
+
+#ifdef CONFIG_PROC_FS
+/* Following code handles /proc fs */
+
+/**
+ * proc_show_config()
+ * @m - Synthetic file construction data
+ * @v - File iterator
+ *
+ * Display configuration information about the controller.
+ */
+static int
+proc_show_config(struct seq_file *m, void *v)
+{
+
+ adapter_t *adapter = m->private;
+
+ seq_puts(m, MEGARAID_VERSION);
+ if(adapter->product_info.product_name[0])
+ seq_printf(m, "%s\n", adapter->product_info.product_name);
+
+ seq_puts(m, "Controller Type: ");
+
+ if( adapter->flag & BOARD_MEMMAP )
+ seq_puts(m, "438/466/467/471/493/518/520/531/532\n");
+ else
+ seq_puts(m, "418/428/434\n");
+
+ if(adapter->flag & BOARD_40LD)
+ seq_puts(m, "Controller Supports 40 Logical Drives\n");
+
+ if(adapter->flag & BOARD_64BIT)
+ seq_puts(m, "Controller capable of 64-bit memory addressing\n");
+ if( adapter->has_64bit_addr )
+ seq_puts(m, "Controller using 64-bit memory addressing\n");
+ else
+ seq_puts(m, "Controller is not using 64-bit memory addressing\n");
+
+ seq_printf(m, "Base = %08lx, Irq = %d, ",
+ adapter->base, adapter->host->irq);
+
+ seq_printf(m, "Logical Drives = %d, Channels = %d\n",
+ adapter->numldrv, adapter->product_info.nchannels);
+
+ seq_printf(m, "Version =%s:%s, DRAM = %dMb\n",
+ adapter->fw_version, adapter->bios_version,
+ adapter->product_info.dram_size);
+
+ seq_printf(m, "Controller Queue Depth = %d, Driver Queue Depth = %d\n",
+ adapter->product_info.max_commands, adapter->max_cmds);
+
+ seq_printf(m, "support_ext_cdb = %d\n", adapter->support_ext_cdb);
+ seq_printf(m, "support_random_del = %d\n", adapter->support_random_del);
+ seq_printf(m, "boot_ldrv_enabled = %d\n", adapter->boot_ldrv_enabled);
+ seq_printf(m, "boot_ldrv = %d\n", adapter->boot_ldrv);
+ seq_printf(m, "boot_pdrv_enabled = %d\n", adapter->boot_pdrv_enabled);
+ seq_printf(m, "boot_pdrv_ch = %d\n", adapter->boot_pdrv_ch);
+ seq_printf(m, "boot_pdrv_tgt = %d\n", adapter->boot_pdrv_tgt);
+ seq_printf(m, "quiescent = %d\n",
+ atomic_read(&adapter->quiescent));
+ seq_printf(m, "has_cluster = %d\n", adapter->has_cluster);
+
+ seq_puts(m, "\nModule Parameters:\n");
+ seq_printf(m, "max_cmd_per_lun = %d\n", max_cmd_per_lun);
+ seq_printf(m, "max_sectors_per_io = %d\n", max_sectors_per_io);
+ return 0;
+}
+
+/**
+ * proc_show_stat()
+ * @m - Synthetic file construction data
+ * @v - File iterator
+ *
+ * Display statistical information about the I/O activity.
+ */
+static int
+proc_show_stat(struct seq_file *m, void *v)
+{
+ adapter_t *adapter = m->private;
+#if MEGA_HAVE_STATS
+ int i;
+#endif
+
+ seq_puts(m, "Statistical Information for this controller\n");
+ seq_printf(m, "pend_cmds = %d\n", atomic_read(&adapter->pend_cmds));
+#if MEGA_HAVE_STATS
+ for(i = 0; i < adapter->numldrv; i++) {
+ seq_printf(m, "Logical Drive %d:\n", i);
+ seq_printf(m, "\tReads Issued = %lu, Writes Issued = %lu\n",
+ adapter->nreads[i], adapter->nwrites[i]);
+ seq_printf(m, "\tSectors Read = %lu, Sectors Written = %lu\n",
+ adapter->nreadblocks[i], adapter->nwriteblocks[i]);
+ seq_printf(m, "\tRead errors = %lu, Write errors = %lu\n\n",
+ adapter->rd_errors[i], adapter->wr_errors[i]);
+ }
+#else
+ seq_puts(m, "IO and error counters not compiled in driver.\n");
+#endif
+ return 0;
+}
+
+
+/**
+ * proc_show_mbox()
+ * @m - Synthetic file construction data
+ * @v - File iterator
+ *
+ * Display mailbox information for the last command issued. This information
+ * is good for debugging.
+ */
+static int
+proc_show_mbox(struct seq_file *m, void *v)
+{
+ adapter_t *adapter = m->private;
+ volatile mbox_t *mbox = adapter->mbox;
+
+ seq_puts(m, "Contents of Mail Box Structure\n");
+ seq_printf(m, " Fw Command = 0x%02x\n", mbox->m_out.cmd);
+ seq_printf(m, " Cmd Sequence = 0x%02x\n", mbox->m_out.cmdid);
+ seq_printf(m, " No of Sectors= %04d\n", mbox->m_out.numsectors);
+ seq_printf(m, " LBA = 0x%02x\n", mbox->m_out.lba);
+ seq_printf(m, " DTA = 0x%08x\n", mbox->m_out.xferaddr);
+ seq_printf(m, " Logical Drive= 0x%02x\n", mbox->m_out.logdrv);
+ seq_printf(m, " No of SG Elmt= 0x%02x\n", mbox->m_out.numsgelements);
+ seq_printf(m, " Busy = %01x\n", mbox->m_in.busy);
+ seq_printf(m, " Status = 0x%02x\n", mbox->m_in.status);
+ return 0;
+}
+
+
+/**
+ * proc_show_rebuild_rate()
+ * @m - Synthetic file construction data
+ * @v - File iterator
+ *
+ * Display current rebuild rate
+ */
+static int
+proc_show_rebuild_rate(struct seq_file *m, void *v)
+{
+ adapter_t *adapter = m->private;
+ dma_addr_t dma_handle;
+ caddr_t inquiry;
+ struct pci_dev *pdev;
+
+ if( make_local_pdev(adapter, &pdev) != 0 )
+ return 0;
+
+ if( (inquiry = mega_allocate_inquiry(&dma_handle, pdev)) == NULL )
+ goto free_pdev;
+
+ if( mega_adapinq(adapter, dma_handle) != 0 ) {
+ seq_puts(m, "Adapter inquiry failed.\n");
+ printk(KERN_WARNING "megaraid: inquiry failed.\n");
+ goto free_inquiry;
+ }
+
+ if( adapter->flag & BOARD_40LD )
+ seq_printf(m, "Rebuild Rate: [%d%%]\n",
+ ((mega_inquiry3 *)inquiry)->rebuild_rate);
+ else
+ seq_printf(m, "Rebuild Rate: [%d%%]\n",
+ ((mraid_ext_inquiry *)
+ inquiry)->raid_inq.adapter_info.rebuild_rate);
+
+free_inquiry:
+ mega_free_inquiry(inquiry, dma_handle, pdev);
+free_pdev:
+ free_local_pdev(pdev);
+ return 0;
+}
+
+
+/**
+ * proc_show_battery()
+ * @m - Synthetic file construction data
+ * @v - File iterator
+ *
+ * Display information about the battery module on the controller.
+ */
+static int
+proc_show_battery(struct seq_file *m, void *v)
+{
+ adapter_t *adapter = m->private;
+ dma_addr_t dma_handle;
+ caddr_t inquiry;
+ struct pci_dev *pdev;
+ u8 battery_status;
+
+ if( make_local_pdev(adapter, &pdev) != 0 )
+ return 0;
+
+ if( (inquiry = mega_allocate_inquiry(&dma_handle, pdev)) == NULL )
+ goto free_pdev;
+
+ if( mega_adapinq(adapter, dma_handle) != 0 ) {
+ seq_puts(m, "Adapter inquiry failed.\n");
+ printk(KERN_WARNING "megaraid: inquiry failed.\n");
+ goto free_inquiry;
+ }
+
+ if( adapter->flag & BOARD_40LD ) {
+ battery_status = ((mega_inquiry3 *)inquiry)->battery_status;
+ }
+ else {
+ battery_status = ((mraid_ext_inquiry *)inquiry)->
+ raid_inq.adapter_info.battery_status;
+ }
+
+ /*
+ * Decode the battery status
+ */
+ seq_printf(m, "Battery Status:[%d]", battery_status);
+
+ if(battery_status == MEGA_BATT_CHARGE_DONE)
+ seq_puts(m, " Charge Done");
+
+ if(battery_status & MEGA_BATT_MODULE_MISSING)
+ seq_puts(m, " Module Missing");
+
+ if(battery_status & MEGA_BATT_LOW_VOLTAGE)
+ seq_puts(m, " Low Voltage");
+
+ if(battery_status & MEGA_BATT_TEMP_HIGH)
+ seq_puts(m, " Temperature High");
+
+ if(battery_status & MEGA_BATT_PACK_MISSING)
+ seq_puts(m, " Pack Missing");
+
+ if(battery_status & MEGA_BATT_CHARGE_INPROG)
+ seq_puts(m, " Charge In-progress");
+
+ if(battery_status & MEGA_BATT_CHARGE_FAIL)
+ seq_puts(m, " Charge Fail");
+
+ if(battery_status & MEGA_BATT_CYCLES_EXCEEDED)
+ seq_puts(m, " Cycles Exceeded");
+
+ seq_putc(m, '\n');
+
+free_inquiry:
+ mega_free_inquiry(inquiry, dma_handle, pdev);
+free_pdev:
+ free_local_pdev(pdev);
+ return 0;
+}
+
+
+/*
+ * Display scsi inquiry
+ */
+static void
+mega_print_inquiry(struct seq_file *m, char *scsi_inq)
+{
+ int i;
+
+ seq_puts(m, " Vendor: ");
+ seq_write(m, scsi_inq + 8, 8);
+ seq_puts(m, " Model: ");
+ seq_write(m, scsi_inq + 16, 16);
+ seq_puts(m, " Rev: ");
+ seq_write(m, scsi_inq + 32, 4);
+ seq_putc(m, '\n');
+
+ i = scsi_inq[0] & 0x1f;
+ seq_printf(m, " Type: %s ", scsi_device_type(i));
+
+ seq_printf(m, " ANSI SCSI revision: %02x",
+ scsi_inq[2] & 0x07);
+
+ if( (scsi_inq[2] & 0x07) == 1 && (scsi_inq[3] & 0x0f) == 1 )
+ seq_puts(m, " CCS\n");
+ else
+ seq_putc(m, '\n');
+}
+
+/**
+ * proc_show_pdrv()
+ * @m - Synthetic file construction data
+ * @page - buffer to write the data in
+ * @adapter - pointer to our soft state
+ *
+ * Display information about the physical drives.
+ */
+static int
+proc_show_pdrv(struct seq_file *m, adapter_t *adapter, int channel)
+{
+ dma_addr_t dma_handle;
+ char *scsi_inq;
+ dma_addr_t scsi_inq_dma_handle;
+ caddr_t inquiry;
+ struct pci_dev *pdev;
+ u8 *pdrv_state;
+ u8 state;
+ int tgt;
+ int max_channels;
+ int i;
+
+ if( make_local_pdev(adapter, &pdev) != 0 )
+ return 0;
+
+ if( (inquiry = mega_allocate_inquiry(&dma_handle, pdev)) == NULL )
+ goto free_pdev;
+
+ if( mega_adapinq(adapter, dma_handle) != 0 ) {
+ seq_puts(m, "Adapter inquiry failed.\n");
+ printk(KERN_WARNING "megaraid: inquiry failed.\n");
+ goto free_inquiry;
+ }
+
+
+ scsi_inq = pci_alloc_consistent(pdev, 256, &scsi_inq_dma_handle);
+ if( scsi_inq == NULL ) {
+ seq_puts(m, "memory not available for scsi inq.\n");
+ goto free_inquiry;
+ }
+
+ if( adapter->flag & BOARD_40LD ) {
+ pdrv_state = ((mega_inquiry3 *)inquiry)->pdrv_state;
+ }
+ else {
+ pdrv_state = ((mraid_ext_inquiry *)inquiry)->
+ raid_inq.pdrv_info.pdrv_state;
+ }
+
+ max_channels = adapter->product_info.nchannels;
+
+ if( channel >= max_channels ) {
+ goto free_pci;
+ }
+
+ for( tgt = 0; tgt <= MAX_TARGET; tgt++ ) {
+
+ i = channel*16 + tgt;
+
+ state = *(pdrv_state + i);
+ switch( state & 0x0F ) {
+ case PDRV_ONLINE:
+ seq_printf(m, "Channel:%2d Id:%2d State: Online",
+ channel, tgt);
+ break;
+
+ case PDRV_FAILED:
+ seq_printf(m, "Channel:%2d Id:%2d State: Failed",
+ channel, tgt);
+ break;
+
+ case PDRV_RBLD:
+ seq_printf(m, "Channel:%2d Id:%2d State: Rebuild",
+ channel, tgt);
+ break;
+
+ case PDRV_HOTSPARE:
+ seq_printf(m, "Channel:%2d Id:%2d State: Hot spare",
+ channel, tgt);
+ break;
+
+ default:
+ seq_printf(m, "Channel:%2d Id:%2d State: Un-configured",
+ channel, tgt);
+ break;
+ }
+
+ /*
+ * This interface displays inquiries for disk drives
+ * only. Inquries for logical drives and non-disk
+ * devices are available through /proc/scsi/scsi
+ */
+ memset(scsi_inq, 0, 256);
+ if( mega_internal_dev_inquiry(adapter, channel, tgt,
+ scsi_inq_dma_handle) ||
+ (scsi_inq[0] & 0x1F) != TYPE_DISK ) {
+ continue;
+ }
+
+ /*
+ * Check for overflow. We print less than 240
+ * characters for inquiry
+ */
+ seq_puts(m, ".\n");
+ mega_print_inquiry(m, scsi_inq);
+ }
+
+free_pci:
+ pci_free_consistent(pdev, 256, scsi_inq, scsi_inq_dma_handle);
+free_inquiry:
+ mega_free_inquiry(inquiry, dma_handle, pdev);
+free_pdev:
+ free_local_pdev(pdev);
+ return 0;
+}
+
+/**
+ * proc_show_pdrv_ch0()
+ * @m - Synthetic file construction data
+ * @v - File iterator
+ *
+ * Display information about the physical drives on physical channel 0.
+ */
+static int
+proc_show_pdrv_ch0(struct seq_file *m, void *v)
+{
+ return proc_show_pdrv(m, m->private, 0);
+}
+
+
+/**
+ * proc_show_pdrv_ch1()
+ * @m - Synthetic file construction data
+ * @v - File iterator
+ *
+ * Display information about the physical drives on physical channel 1.
+ */
+static int
+proc_show_pdrv_ch1(struct seq_file *m, void *v)
+{
+ return proc_show_pdrv(m, m->private, 1);
+}
+
+
+/**
+ * proc_show_pdrv_ch2()
+ * @m - Synthetic file construction data
+ * @v - File iterator
+ *
+ * Display information about the physical drives on physical channel 2.
+ */
+static int
+proc_show_pdrv_ch2(struct seq_file *m, void *v)
+{
+ return proc_show_pdrv(m, m->private, 2);
+}
+
+
+/**
+ * proc_show_pdrv_ch3()
+ * @m - Synthetic file construction data
+ * @v - File iterator
+ *
+ * Display information about the physical drives on physical channel 3.
+ */
+static int
+proc_show_pdrv_ch3(struct seq_file *m, void *v)
+{
+ return proc_show_pdrv(m, m->private, 3);
+}
+
+
+/**
+ * proc_show_rdrv()
+ * @m - Synthetic file construction data
+ * @adapter - pointer to our soft state
+ * @start - starting logical drive to display
+ * @end - ending logical drive to display
+ *
+ * We do not print the inquiry information since its already available through
+ * /proc/scsi/scsi interface
+ */
+static int
+proc_show_rdrv(struct seq_file *m, adapter_t *adapter, int start, int end )
+{
+ dma_addr_t dma_handle;
+ logdrv_param *lparam;
+ megacmd_t mc;
+ char *disk_array;
+ dma_addr_t disk_array_dma_handle;
+ caddr_t inquiry;
+ struct pci_dev *pdev;
+ u8 *rdrv_state;
+ int num_ldrv;
+ u32 array_sz;
+ int i;
+
+ if( make_local_pdev(adapter, &pdev) != 0 )
+ return 0;
+
+ if( (inquiry = mega_allocate_inquiry(&dma_handle, pdev)) == NULL )
+ goto free_pdev;
+
+ if( mega_adapinq(adapter, dma_handle) != 0 ) {
+ seq_puts(m, "Adapter inquiry failed.\n");
+ printk(KERN_WARNING "megaraid: inquiry failed.\n");
+ goto free_inquiry;
+ }
+
+ memset(&mc, 0, sizeof(megacmd_t));
+
+ if( adapter->flag & BOARD_40LD ) {
+ array_sz = sizeof(disk_array_40ld);
+
+ rdrv_state = ((mega_inquiry3 *)inquiry)->ldrv_state;
+
+ num_ldrv = ((mega_inquiry3 *)inquiry)->num_ldrv;
+ }
+ else {
+ array_sz = sizeof(disk_array_8ld);
+
+ rdrv_state = ((mraid_ext_inquiry *)inquiry)->
+ raid_inq.logdrv_info.ldrv_state;
+
+ num_ldrv = ((mraid_ext_inquiry *)inquiry)->
+ raid_inq.logdrv_info.num_ldrv;
+ }
+
+ disk_array = pci_alloc_consistent(pdev, array_sz,
+ &disk_array_dma_handle);
+
+ if( disk_array == NULL ) {
+ seq_puts(m, "memory not available.\n");
+ goto free_inquiry;
+ }
+
+ mc.xferaddr = (u32)disk_array_dma_handle;
+
+ if( adapter->flag & BOARD_40LD ) {
+ mc.cmd = FC_NEW_CONFIG;
+ mc.opcode = OP_DCMD_READ_CONFIG;
+
+ if( mega_internal_command(adapter, &mc, NULL) ) {
+ seq_puts(m, "40LD read config failed.\n");
+ goto free_pci;
+ }
+
+ }
+ else {
+ mc.cmd = NEW_READ_CONFIG_8LD;
+
+ if( mega_internal_command(adapter, &mc, NULL) ) {
+ mc.cmd = READ_CONFIG_8LD;
+ if( mega_internal_command(adapter, &mc, NULL) ) {
+ seq_puts(m, "8LD read config failed.\n");
+ goto free_pci;
+ }
+ }
+ }
+
+ for( i = start; i < ( (end+1 < num_ldrv) ? end+1 : num_ldrv ); i++ ) {
+
+ if( adapter->flag & BOARD_40LD ) {
+ lparam =
+ &((disk_array_40ld *)disk_array)->ldrv[i].lparam;
+ }
+ else {
+ lparam =
+ &((disk_array_8ld *)disk_array)->ldrv[i].lparam;
+ }
+
+ /*
+ * Check for overflow. We print less than 240 characters for
+ * information about each logical drive.
+ */
+ seq_printf(m, "Logical drive:%2d:, ", i);
+
+ switch( rdrv_state[i] & 0x0F ) {
+ case RDRV_OFFLINE:
+ seq_puts(m, "state: offline");
+ break;
+ case RDRV_DEGRADED:
+ seq_puts(m, "state: degraded");
+ break;
+ case RDRV_OPTIMAL:
+ seq_puts(m, "state: optimal");
+ break;
+ case RDRV_DELETED:
+ seq_puts(m, "state: deleted");
+ break;
+ default:
+ seq_puts(m, "state: unknown");
+ break;
+ }
+
+ /*
+ * Check if check consistency or initialization is going on
+ * for this logical drive.
+ */
+ if( (rdrv_state[i] & 0xF0) == 0x20 )
+ seq_puts(m, ", check-consistency in progress");
+ else if( (rdrv_state[i] & 0xF0) == 0x10 )
+ seq_puts(m, ", initialization in progress");
+
+ seq_putc(m, '\n');
+
+ seq_printf(m, "Span depth:%3d, ", lparam->span_depth);
+ seq_printf(m, "RAID level:%3d, ", lparam->level);
+ seq_printf(m, "Stripe size:%3d, ",
+ lparam->stripe_sz ? lparam->stripe_sz/2: 128);
+ seq_printf(m, "Row size:%3d\n", lparam->row_size);
+
+ seq_puts(m, "Read Policy: ");
+ switch(lparam->read_ahead) {
+ case NO_READ_AHEAD:
+ seq_puts(m, "No read ahead, ");
+ break;
+ case READ_AHEAD:
+ seq_puts(m, "Read ahead, ");
+ break;
+ case ADAP_READ_AHEAD:
+ seq_puts(m, "Adaptive, ");
+ break;
+
+ }
+
+ seq_puts(m, "Write Policy: ");
+ switch(lparam->write_mode) {
+ case WRMODE_WRITE_THRU:
+ seq_puts(m, "Write thru, ");
+ break;
+ case WRMODE_WRITE_BACK:
+ seq_puts(m, "Write back, ");
+ break;
+ }
+
+ seq_puts(m, "Cache Policy: ");
+ switch(lparam->direct_io) {
+ case CACHED_IO:
+ seq_puts(m, "Cached IO\n\n");
+ break;
+ case DIRECT_IO:
+ seq_puts(m, "Direct IO\n\n");
+ break;
+ }
+ }
+
+free_pci:
+ pci_free_consistent(pdev, array_sz, disk_array,
+ disk_array_dma_handle);
+free_inquiry:
+ mega_free_inquiry(inquiry, dma_handle, pdev);
+free_pdev:
+ free_local_pdev(pdev);
+ return 0;
+}
+
+/**
+ * proc_show_rdrv_10()
+ * @m - Synthetic file construction data
+ * @v - File iterator
+ *
+ * Display real time information about the logical drives 0 through 9.
+ */
+static int
+proc_show_rdrv_10(struct seq_file *m, void *v)
+{
+ return proc_show_rdrv(m, m->private, 0, 9);
+}
+
+
+/**
+ * proc_show_rdrv_20()
+ * @m - Synthetic file construction data
+ * @v - File iterator
+ *
+ * Display real time information about the logical drives 0 through 9.
+ */
+static int
+proc_show_rdrv_20(struct seq_file *m, void *v)
+{
+ return proc_show_rdrv(m, m->private, 10, 19);
+}
+
+
+/**
+ * proc_show_rdrv_30()
+ * @m - Synthetic file construction data
+ * @v - File iterator
+ *
+ * Display real time information about the logical drives 0 through 9.
+ */
+static int
+proc_show_rdrv_30(struct seq_file *m, void *v)
+{
+ return proc_show_rdrv(m, m->private, 20, 29);
+}
+
+
+/**
+ * proc_show_rdrv_40()
+ * @m - Synthetic file construction data
+ * @v - File iterator
+ *
+ * Display real time information about the logical drives 0 through 9.
+ */
+static int
+proc_show_rdrv_40(struct seq_file *m, void *v)
+{
+ return proc_show_rdrv(m, m->private, 30, 39);
+}
+
+
+/*
+ * seq_file wrappers for procfile show routines.
+ */
+static int mega_proc_open(struct inode *inode, struct file *file)
+{
+ adapter_t *adapter = proc_get_parent_data(inode);
+ int (*show)(struct seq_file *, void *) = PDE_DATA(inode);
+
+ return single_open(file, show, adapter);
+}
+
+static const struct file_operations mega_proc_fops = {
+ .open = mega_proc_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+
+/*
+ * Table of proc files we need to create.
+ */
+struct mega_proc_file {
+ const char *name;
+ unsigned short ptr_offset;
+ int (*show) (struct seq_file *m, void *v);
+};
+
+static const struct mega_proc_file mega_proc_files[] = {
+ { "config", offsetof(adapter_t, proc_read), proc_show_config },
+ { "stat", offsetof(adapter_t, proc_stat), proc_show_stat },
+ { "mailbox", offsetof(adapter_t, proc_mbox), proc_show_mbox },
+#if MEGA_HAVE_ENH_PROC
+ { "rebuild-rate", offsetof(adapter_t, proc_rr), proc_show_rebuild_rate },
+ { "battery-status", offsetof(adapter_t, proc_battery), proc_show_battery },
+ { "diskdrives-ch0", offsetof(adapter_t, proc_pdrvstat[0]), proc_show_pdrv_ch0 },
+ { "diskdrives-ch1", offsetof(adapter_t, proc_pdrvstat[1]), proc_show_pdrv_ch1 },
+ { "diskdrives-ch2", offsetof(adapter_t, proc_pdrvstat[2]), proc_show_pdrv_ch2 },
+ { "diskdrives-ch3", offsetof(adapter_t, proc_pdrvstat[3]), proc_show_pdrv_ch3 },
+ { "raiddrives-0-9", offsetof(adapter_t, proc_rdrvstat[0]), proc_show_rdrv_10 },
+ { "raiddrives-10-19", offsetof(adapter_t, proc_rdrvstat[1]), proc_show_rdrv_20 },
+ { "raiddrives-20-29", offsetof(adapter_t, proc_rdrvstat[2]), proc_show_rdrv_30 },
+ { "raiddrives-30-39", offsetof(adapter_t, proc_rdrvstat[3]), proc_show_rdrv_40 },
+#endif
+ { NULL }
+};
+
+/**
+ * mega_create_proc_entry()
+ * @index - index in soft state array
+ * @parent - parent node for this /proc entry
+ *
+ * Creates /proc entries for our controllers.
+ */
+static void
+mega_create_proc_entry(int index, struct proc_dir_entry *parent)
+{
+ const struct mega_proc_file *f;
+ adapter_t *adapter = hba_soft_state[index];
+ struct proc_dir_entry *dir, *de, **ppde;
+ u8 string[16];
+
+ sprintf(string, "hba%d", adapter->host->host_no);
+
+ dir = adapter->controller_proc_dir_entry =
+ proc_mkdir_data(string, 0, parent, adapter);
+ if(!dir) {
+ printk(KERN_WARNING "\nmegaraid: proc_mkdir failed\n");
+ return;
+ }
+
+ for (f = mega_proc_files; f->name; f++) {
+ de = proc_create_data(f->name, S_IRUSR, dir, &mega_proc_fops,
+ f->show);
+ if (!de) {
+ printk(KERN_WARNING "\nmegaraid: proc_create failed\n");
+ return;
+ }
+
+ ppde = (void *)adapter + f->ptr_offset;
+ *ppde = de;
+ }
+}
+
+#else
+static inline void mega_create_proc_entry(int index, struct proc_dir_entry *parent)
+{
+}
+#endif
+
+
+/**
+ * megaraid_biosparam()
+ *
+ * Return the disk geometry for a particular disk
+ */
+static int
+megaraid_biosparam(struct scsi_device *sdev, struct block_device *bdev,
+ sector_t capacity, int geom[])
+{
+ adapter_t *adapter;
+ unsigned char *bh;
+ int heads;
+ int sectors;
+ int cylinders;
+ int rval;
+
+ /* Get pointer to host config structure */
+ adapter = (adapter_t *)sdev->host->hostdata;
+
+ if (IS_RAID_CH(adapter, sdev->channel)) {
+ /* Default heads (64) & sectors (32) */
+ heads = 64;
+ sectors = 32;
+ cylinders = (ulong)capacity / (heads * sectors);
+
+ /*
+ * Handle extended translation size for logical drives
+ * > 1Gb
+ */
+ if ((ulong)capacity >= 0x200000) {
+ heads = 255;
+ sectors = 63;
+ cylinders = (ulong)capacity / (heads * sectors);
+ }
+
+ /* return result */
+ geom[0] = heads;
+ geom[1] = sectors;
+ geom[2] = cylinders;
+ }
+ else {
+ bh = scsi_bios_ptable(bdev);
+
+ if( bh ) {
+ rval = scsi_partsize(bh, capacity,
+ &geom[2], &geom[0], &geom[1]);
+ kfree(bh);
+ if( rval != -1 )
+ return rval;
+ }
+
+ printk(KERN_INFO
+ "megaraid: invalid partition on this disk on channel %d\n",
+ sdev->channel);
+
+ /* Default heads (64) & sectors (32) */
+ heads = 64;
+ sectors = 32;
+ cylinders = (ulong)capacity / (heads * sectors);
+
+ /* Handle extended translation size for logical drives > 1Gb */
+ if ((ulong)capacity >= 0x200000) {
+ heads = 255;
+ sectors = 63;
+ cylinders = (ulong)capacity / (heads * sectors);
+ }
+
+ /* return result */
+ geom[0] = heads;
+ geom[1] = sectors;
+ geom[2] = cylinders;
+ }
+
+ return 0;
+}
+
+/**
+ * mega_init_scb()
+ * @adapter - pointer to our soft state
+ *
+ * Allocate memory for the various pointers in the scb structures:
+ * scatter-gather list pointer, passthru and extended passthru structure
+ * pointers.
+ */
+static int
+mega_init_scb(adapter_t *adapter)
+{
+ scb_t *scb;
+ int i;
+
+ for( i = 0; i < adapter->max_cmds; i++ ) {
+
+ scb = &adapter->scb_list[i];
+
+ scb->sgl64 = NULL;
+ scb->sgl = NULL;
+ scb->pthru = NULL;
+ scb->epthru = NULL;
+ }
+
+ for( i = 0; i < adapter->max_cmds; i++ ) {
+
+ scb = &adapter->scb_list[i];
+
+ scb->idx = i;
+
+ scb->sgl64 = pci_alloc_consistent(adapter->dev,
+ sizeof(mega_sgl64) * adapter->sglen,
+ &scb->sgl_dma_addr);
+
+ scb->sgl = (mega_sglist *)scb->sgl64;
+
+ if( !scb->sgl ) {
+ printk(KERN_WARNING "RAID: Can't allocate sglist.\n");
+ mega_free_sgl(adapter);
+ return -1;
+ }
+
+ scb->pthru = pci_alloc_consistent(adapter->dev,
+ sizeof(mega_passthru),
+ &scb->pthru_dma_addr);
+
+ if( !scb->pthru ) {
+ printk(KERN_WARNING "RAID: Can't allocate passthru.\n");
+ mega_free_sgl(adapter);
+ return -1;
+ }
+
+ scb->epthru = pci_alloc_consistent(adapter->dev,
+ sizeof(mega_ext_passthru),
+ &scb->epthru_dma_addr);
+
+ if( !scb->epthru ) {
+ printk(KERN_WARNING
+ "Can't allocate extended passthru.\n");
+ mega_free_sgl(adapter);
+ return -1;
+ }
+
+
+ scb->dma_type = MEGA_DMA_TYPE_NONE;
+
+ /*
+ * Link to free list
+ * lock not required since we are loading the driver, so no
+ * commands possible right now.
+ */
+ scb->state = SCB_FREE;
+ scb->cmd = NULL;
+ list_add(&scb->list, &adapter->free_list);
+ }
+
+ return 0;
+}
+
+
+/**
+ * megadev_open()
+ * @inode - unused
+ * @filep - unused
+ *
+ * Routines for the character/ioctl interface to the driver. Find out if this
+ * is a valid open.
+ */
+static int
+megadev_open (struct inode *inode, struct file *filep)
+{
+ /*
+ * Only allow superuser to access private ioctl interface
+ */
+ if( !capable(CAP_SYS_ADMIN) ) return -EACCES;
+
+ return 0;
+}
+
+
+/**
+ * megadev_ioctl()
+ * @inode - Our device inode
+ * @filep - unused
+ * @cmd - ioctl command
+ * @arg - user buffer
+ *
+ * ioctl entry point for our private ioctl interface. We move the data in from
+ * the user space, prepare the command (if necessary, convert the old MIMD
+ * ioctl to new ioctl command), and issue a synchronous command to the
+ * controller.
+ */
+static int
+megadev_ioctl(struct file *filep, unsigned int cmd, unsigned long arg)
+{
+ adapter_t *adapter;
+ nitioctl_t uioc;
+ int adapno;
+ int rval;
+ mega_passthru __user *upthru; /* user address for passthru */
+ mega_passthru *pthru; /* copy user passthru here */
+ dma_addr_t pthru_dma_hndl;
+ void *data = NULL; /* data to be transferred */
+ dma_addr_t data_dma_hndl; /* dma handle for data xfer area */
+ megacmd_t mc;
+ megastat_t __user *ustats;
+ int num_ldrv;
+ u32 uxferaddr = 0;
+ struct pci_dev *pdev;
+
+ ustats = NULL; /* avoid compilation warnings */
+ num_ldrv = 0;
+
+ /*
+ * Make sure only USCSICMD are issued through this interface.
+ * MIMD application would still fire different command.
+ */
+ if( (_IOC_TYPE(cmd) != MEGAIOC_MAGIC) && (cmd != USCSICMD) ) {
+ return -EINVAL;
+ }
+
+ /*
+ * Check and convert a possible MIMD command to NIT command.
+ * mega_m_to_n() copies the data from the user space, so we do not
+ * have to do it here.
+ * NOTE: We will need some user address to copyout the data, therefore
+ * the inteface layer will also provide us with the required user
+ * addresses.
+ */
+ memset(&uioc, 0, sizeof(nitioctl_t));
+ if( (rval = mega_m_to_n( (void __user *)arg, &uioc)) != 0 )
+ return rval;
+
+
+ switch( uioc.opcode ) {
+
+ case GET_DRIVER_VER:
+ if( put_user(driver_ver, (u32 __user *)uioc.uioc_uaddr) )
+ return (-EFAULT);
+
+ break;
+
+ case GET_N_ADAP:
+ if( put_user(hba_count, (u32 __user *)uioc.uioc_uaddr) )
+ return (-EFAULT);
+
+ /*
+ * Shucks. MIMD interface returns a positive value for number
+ * of adapters. TODO: Change it to return 0 when there is no
+ * applicatio using mimd interface.
+ */
+ return hba_count;
+
+ case GET_ADAP_INFO:
+
+ /*
+ * Which adapter
+ */
+ if( (adapno = GETADAP(uioc.adapno)) >= hba_count )
+ return (-ENODEV);
+
+ if( copy_to_user(uioc.uioc_uaddr, mcontroller+adapno,
+ sizeof(struct mcontroller)) )
+ return (-EFAULT);
+ break;
+
+#if MEGA_HAVE_STATS
+
+ case GET_STATS:
+ /*
+ * Which adapter
+ */
+ if( (adapno = GETADAP(uioc.adapno)) >= hba_count )
+ return (-ENODEV);
+
+ adapter = hba_soft_state[adapno];
+
+ ustats = uioc.uioc_uaddr;
+
+ if( copy_from_user(&num_ldrv, &ustats->num_ldrv, sizeof(int)) )
+ return (-EFAULT);
+
+ /*
+ * Check for the validity of the logical drive number
+ */
+ if( num_ldrv >= MAX_LOGICAL_DRIVES_40LD ) return -EINVAL;
+
+ if( copy_to_user(ustats->nreads, adapter->nreads,
+ num_ldrv*sizeof(u32)) )
+ return -EFAULT;
+
+ if( copy_to_user(ustats->nreadblocks, adapter->nreadblocks,
+ num_ldrv*sizeof(u32)) )
+ return -EFAULT;
+
+ if( copy_to_user(ustats->nwrites, adapter->nwrites,
+ num_ldrv*sizeof(u32)) )
+ return -EFAULT;
+
+ if( copy_to_user(ustats->nwriteblocks, adapter->nwriteblocks,
+ num_ldrv*sizeof(u32)) )
+ return -EFAULT;
+
+ if( copy_to_user(ustats->rd_errors, adapter->rd_errors,
+ num_ldrv*sizeof(u32)) )
+ return -EFAULT;
+
+ if( copy_to_user(ustats->wr_errors, adapter->wr_errors,
+ num_ldrv*sizeof(u32)) )
+ return -EFAULT;
+
+ return 0;
+
+#endif
+ case MBOX_CMD:
+
+ /*
+ * Which adapter
+ */
+ if( (adapno = GETADAP(uioc.adapno)) >= hba_count )
+ return (-ENODEV);
+
+ adapter = hba_soft_state[adapno];
+
+ /*
+ * Deletion of logical drive is a special case. The adapter
+ * should be quiescent before this command is issued.
+ */
+ if( uioc.uioc_rmbox[0] == FC_DEL_LOGDRV &&
+ uioc.uioc_rmbox[2] == OP_DEL_LOGDRV ) {
+
+ /*
+ * Do we support this feature
+ */
+ if( !adapter->support_random_del ) {
+ printk(KERN_WARNING "megaraid: logdrv ");
+ printk("delete on non-supporting F/W.\n");
+
+ return (-EINVAL);
+ }
+
+ rval = mega_del_logdrv( adapter, uioc.uioc_rmbox[3] );
+
+ if( rval == 0 ) {
+ memset(&mc, 0, sizeof(megacmd_t));
+
+ mc.status = rval;
+
+ rval = mega_n_to_m((void __user *)arg, &mc);
+ }
+
+ return rval;
+ }
+ /*
+ * This interface only support the regular passthru commands.
+ * Reject extended passthru and 64-bit passthru
+ */
+ if( uioc.uioc_rmbox[0] == MEGA_MBOXCMD_PASSTHRU64 ||
+ uioc.uioc_rmbox[0] == MEGA_MBOXCMD_EXTPTHRU ) {
+
+ printk(KERN_WARNING "megaraid: rejected passthru.\n");
+
+ return (-EINVAL);
+ }
+
+ /*
+ * For all internal commands, the buffer must be allocated in
+ * <4GB address range
+ */
+ if( make_local_pdev(adapter, &pdev) != 0 )
+ return -EIO;
+
+ /* Is it a passthru command or a DCMD */
+ if( uioc.uioc_rmbox[0] == MEGA_MBOXCMD_PASSTHRU ) {
+ /* Passthru commands */
+
+ pthru = pci_alloc_consistent(pdev,
+ sizeof(mega_passthru),
+ &pthru_dma_hndl);
+
+ if( pthru == NULL ) {
+ free_local_pdev(pdev);
+ return (-ENOMEM);
+ }
+
+ /*
+ * The user passthru structure
+ */
+ upthru = (mega_passthru __user *)(unsigned long)MBOX(uioc)->xferaddr;
+
+ /*
+ * Copy in the user passthru here.
+ */
+ if( copy_from_user(pthru, upthru,
+ sizeof(mega_passthru)) ) {
+
+ pci_free_consistent(pdev,
+ sizeof(mega_passthru), pthru,
+ pthru_dma_hndl);
+
+ free_local_pdev(pdev);
+
+ return (-EFAULT);
+ }
+
+ /*
+ * Is there a data transfer
+ */
+ if( pthru->dataxferlen ) {
+ data = pci_alloc_consistent(pdev,
+ pthru->dataxferlen,
+ &data_dma_hndl);
+
+ if( data == NULL ) {
+ pci_free_consistent(pdev,
+ sizeof(mega_passthru),
+ pthru,
+ pthru_dma_hndl);
+
+ free_local_pdev(pdev);
+
+ return (-ENOMEM);
+ }
+
+ /*
+ * Save the user address and point the kernel
+ * address at just allocated memory
+ */
+ uxferaddr = pthru->dataxferaddr;
+ pthru->dataxferaddr = data_dma_hndl;
+ }
+
+
+ /*
+ * Is data coming down-stream
+ */
+ if( pthru->dataxferlen && (uioc.flags & UIOC_WR) ) {
+ /*
+ * Get the user data
+ */
+ if( copy_from_user(data, (char __user *)(unsigned long) uxferaddr,
+ pthru->dataxferlen) ) {
+ rval = (-EFAULT);
+ goto freemem_and_return;
+ }
+ }
+
+ memset(&mc, 0, sizeof(megacmd_t));
+
+ mc.cmd = MEGA_MBOXCMD_PASSTHRU;
+ mc.xferaddr = (u32)pthru_dma_hndl;
+
+ /*
+ * Issue the command
+ */
+ mega_internal_command(adapter, &mc, pthru);
+
+ rval = mega_n_to_m((void __user *)arg, &mc);
+
+ if( rval ) goto freemem_and_return;
+
+
+ /*
+ * Is data going up-stream
+ */
+ if( pthru->dataxferlen && (uioc.flags & UIOC_RD) ) {
+ if( copy_to_user((char __user *)(unsigned long) uxferaddr, data,
+ pthru->dataxferlen) ) {
+ rval = (-EFAULT);
+ }
+ }
+
+ /*
+ * Send the request sense data also, irrespective of
+ * whether the user has asked for it or not.
+ */
+ if (copy_to_user(upthru->reqsensearea,
+ pthru->reqsensearea, 14))
+ rval = -EFAULT;
+
+freemem_and_return:
+ if( pthru->dataxferlen ) {
+ pci_free_consistent(pdev,
+ pthru->dataxferlen, data,
+ data_dma_hndl);
+ }
+
+ pci_free_consistent(pdev, sizeof(mega_passthru),
+ pthru, pthru_dma_hndl);
+
+ free_local_pdev(pdev);
+
+ return rval;
+ }
+ else {
+ /* DCMD commands */
+
+ /*
+ * Is there a data transfer
+ */
+ if( uioc.xferlen ) {
+ data = pci_alloc_consistent(pdev,
+ uioc.xferlen, &data_dma_hndl);
+
+ if( data == NULL ) {
+ free_local_pdev(pdev);
+ return (-ENOMEM);
+ }
+
+ uxferaddr = MBOX(uioc)->xferaddr;
+ }
+
+ /*
+ * Is data coming down-stream
+ */
+ if( uioc.xferlen && (uioc.flags & UIOC_WR) ) {
+ /*
+ * Get the user data
+ */
+ if( copy_from_user(data, (char __user *)(unsigned long) uxferaddr,
+ uioc.xferlen) ) {
+
+ pci_free_consistent(pdev,
+ uioc.xferlen,
+ data, data_dma_hndl);
+
+ free_local_pdev(pdev);
+
+ return (-EFAULT);
+ }
+ }
+
+ memcpy(&mc, MBOX(uioc), sizeof(megacmd_t));
+
+ mc.xferaddr = (u32)data_dma_hndl;
+
+ /*
+ * Issue the command
+ */
+ mega_internal_command(adapter, &mc, NULL);
+
+ rval = mega_n_to_m((void __user *)arg, &mc);
+
+ if( rval ) {
+ if( uioc.xferlen ) {
+ pci_free_consistent(pdev,
+ uioc.xferlen, data,
+ data_dma_hndl);
+ }
+
+ free_local_pdev(pdev);
+
+ return rval;
+ }
+
+ /*
+ * Is data going up-stream
+ */
+ if( uioc.xferlen && (uioc.flags & UIOC_RD) ) {
+ if( copy_to_user((char __user *)(unsigned long) uxferaddr, data,
+ uioc.xferlen) ) {
+
+ rval = (-EFAULT);
+ }
+ }
+
+ if( uioc.xferlen ) {
+ pci_free_consistent(pdev,
+ uioc.xferlen, data,
+ data_dma_hndl);
+ }
+
+ free_local_pdev(pdev);
+
+ return rval;
+ }
+
+ default:
+ return (-EINVAL);
+ }
+
+ return 0;
+}
+
+static long
+megadev_unlocked_ioctl(struct file *filep, unsigned int cmd, unsigned long arg)
+{
+ int ret;
+
+ mutex_lock(&megadev_mutex);
+ ret = megadev_ioctl(filep, cmd, arg);
+ mutex_unlock(&megadev_mutex);
+
+ return ret;
+}
+
+/**
+ * mega_m_to_n()
+ * @arg - user address
+ * @uioc - new ioctl structure
+ *
+ * A thin layer to convert older mimd interface ioctl structure to NIT ioctl
+ * structure
+ *
+ * Converts the older mimd ioctl structure to newer NIT structure
+ */
+static int
+mega_m_to_n(void __user *arg, nitioctl_t *uioc)
+{
+ struct uioctl_t uioc_mimd;
+ char signature[8] = {0};
+ u8 opcode;
+ u8 subopcode;
+
+
+ /*
+ * check is the application conforms to NIT. We do not have to do much
+ * in that case.
+ * We exploit the fact that the signature is stored in the very
+ * beginning of the structure.
+ */
+
+ if( copy_from_user(signature, arg, 7) )
+ return (-EFAULT);
+
+ if( memcmp(signature, "MEGANIT", 7) == 0 ) {
+
+ /*
+ * NOTE NOTE: The nit ioctl is still under flux because of
+ * change of mailbox definition, in HPE. No applications yet
+ * use this interface and let's not have applications use this
+ * interface till the new specifitions are in place.
+ */
+ return -EINVAL;
+#if 0
+ if( copy_from_user(uioc, arg, sizeof(nitioctl_t)) )
+ return (-EFAULT);
+ return 0;
+#endif
+ }
+
+ /*
+ * Else assume we have mimd uioctl_t as arg. Convert to nitioctl_t
+ *
+ * Get the user ioctl structure
+ */
+ if( copy_from_user(&uioc_mimd, arg, sizeof(struct uioctl_t)) )
+ return (-EFAULT);
+
+
+ /*
+ * Get the opcode and subopcode for the commands
+ */
+ opcode = uioc_mimd.ui.fcs.opcode;
+ subopcode = uioc_mimd.ui.fcs.subopcode;
+
+ switch (opcode) {
+ case 0x82:
+
+ switch (subopcode) {
+
+ case MEGAIOC_QDRVRVER: /* Query driver version */
+ uioc->opcode = GET_DRIVER_VER;
+ uioc->uioc_uaddr = uioc_mimd.data;
+ break;
+
+ case MEGAIOC_QNADAP: /* Get # of adapters */
+ uioc->opcode = GET_N_ADAP;
+ uioc->uioc_uaddr = uioc_mimd.data;
+ break;
+
+ case MEGAIOC_QADAPINFO: /* Get adapter information */
+ uioc->opcode = GET_ADAP_INFO;
+ uioc->adapno = uioc_mimd.ui.fcs.adapno;
+ uioc->uioc_uaddr = uioc_mimd.data;
+ break;
+
+ default:
+ return(-EINVAL);
+ }
+
+ break;
+
+
+ case 0x81:
+
+ uioc->opcode = MBOX_CMD;
+ uioc->adapno = uioc_mimd.ui.fcs.adapno;
+
+ memcpy(uioc->uioc_rmbox, uioc_mimd.mbox, 18);
+
+ uioc->xferlen = uioc_mimd.ui.fcs.length;
+
+ if( uioc_mimd.outlen ) uioc->flags = UIOC_RD;
+ if( uioc_mimd.inlen ) uioc->flags |= UIOC_WR;
+
+ break;
+
+ case 0x80:
+
+ uioc->opcode = MBOX_CMD;
+ uioc->adapno = uioc_mimd.ui.fcs.adapno;
+
+ memcpy(uioc->uioc_rmbox, uioc_mimd.mbox, 18);
+
+ /*
+ * Choose the xferlen bigger of input and output data
+ */
+ uioc->xferlen = uioc_mimd.outlen > uioc_mimd.inlen ?
+ uioc_mimd.outlen : uioc_mimd.inlen;
+
+ if( uioc_mimd.outlen ) uioc->flags = UIOC_RD;
+ if( uioc_mimd.inlen ) uioc->flags |= UIOC_WR;
+
+ break;
+
+ default:
+ return (-EINVAL);
+
+ }
+
+ return 0;
+}
+
+/*
+ * mega_n_to_m()
+ * @arg - user address
+ * @mc - mailbox command
+ *
+ * Updates the status information to the application, depending on application
+ * conforms to older mimd ioctl interface or newer NIT ioctl interface
+ */
+static int
+mega_n_to_m(void __user *arg, megacmd_t *mc)
+{
+ nitioctl_t __user *uiocp;
+ megacmd_t __user *umc;
+ mega_passthru __user *upthru;
+ struct uioctl_t __user *uioc_mimd;
+ char signature[8] = {0};
+
+ /*
+ * check is the application conforms to NIT.
+ */
+ if( copy_from_user(signature, arg, 7) )
+ return -EFAULT;
+
+ if( memcmp(signature, "MEGANIT", 7) == 0 ) {
+
+ uiocp = arg;
+
+ if( put_user(mc->status, (u8 __user *)&MBOX_P(uiocp)->status) )
+ return (-EFAULT);
+
+ if( mc->cmd == MEGA_MBOXCMD_PASSTHRU ) {
+
+ umc = MBOX_P(uiocp);
+
+ if (get_user(upthru, (mega_passthru __user * __user *)&umc->xferaddr))
+ return -EFAULT;
+
+ if( put_user(mc->status, (u8 __user *)&upthru->scsistatus))
+ return (-EFAULT);
+ }
+ }
+ else {
+ uioc_mimd = arg;
+
+ if( put_user(mc->status, (u8 __user *)&uioc_mimd->mbox[17]) )
+ return (-EFAULT);
+
+ if( mc->cmd == MEGA_MBOXCMD_PASSTHRU ) {
+
+ umc = (megacmd_t __user *)uioc_mimd->mbox;
+
+ if (get_user(upthru, (mega_passthru __user * __user *)&umc->xferaddr))
+ return (-EFAULT);
+
+ if( put_user(mc->status, (u8 __user *)&upthru->scsistatus) )
+ return (-EFAULT);
+ }
+ }
+
+ return 0;
+}
+
+
+/*
+ * MEGARAID 'FW' commands.
+ */
+
+/**
+ * mega_is_bios_enabled()
+ * @adapter - pointer to our soft state
+ *
+ * issue command to find out if the BIOS is enabled for this controller
+ */
+static int
+mega_is_bios_enabled(adapter_t *adapter)
+{
+ unsigned char raw_mbox[sizeof(struct mbox_out)];
+ mbox_t *mbox;
+ int ret;
+
+ mbox = (mbox_t *)raw_mbox;
+
+ memset(&mbox->m_out, 0, sizeof(raw_mbox));
+
+ memset((void *)adapter->mega_buffer, 0, MEGA_BUFFER_SIZE);
+
+ mbox->m_out.xferaddr = (u32)adapter->buf_dma_handle;
+
+ raw_mbox[0] = IS_BIOS_ENABLED;
+ raw_mbox[2] = GET_BIOS;
+
+
+ ret = issue_scb_block(adapter, raw_mbox);
+
+ return *(char *)adapter->mega_buffer;
+}
+
+
+/**
+ * mega_enum_raid_scsi()
+ * @adapter - pointer to our soft state
+ *
+ * Find out what channels are RAID/SCSI. This information is used to
+ * differentiate the virtual channels and physical channels and to support
+ * ROMB feature and non-disk devices.
+ */
+static void
+mega_enum_raid_scsi(adapter_t *adapter)
+{
+ unsigned char raw_mbox[sizeof(struct mbox_out)];
+ mbox_t *mbox;
+ int i;
+
+ mbox = (mbox_t *)raw_mbox;
+
+ memset(&mbox->m_out, 0, sizeof(raw_mbox));
+
+ /*
+ * issue command to find out what channels are raid/scsi
+ */
+ raw_mbox[0] = CHNL_CLASS;
+ raw_mbox[2] = GET_CHNL_CLASS;
+
+ memset((void *)adapter->mega_buffer, 0, MEGA_BUFFER_SIZE);
+
+ mbox->m_out.xferaddr = (u32)adapter->buf_dma_handle;
+
+ /*
+ * Non-ROMB firmware fail this command, so all channels
+ * must be shown RAID
+ */
+ adapter->mega_ch_class = 0xFF;
+
+ if(!issue_scb_block(adapter, raw_mbox)) {
+ adapter->mega_ch_class = *((char *)adapter->mega_buffer);
+
+ }
+
+ for( i = 0; i < adapter->product_info.nchannels; i++ ) {
+ if( (adapter->mega_ch_class >> i) & 0x01 ) {
+ printk(KERN_INFO "megaraid: channel[%d] is raid.\n",
+ i);
+ }
+ else {
+ printk(KERN_INFO "megaraid: channel[%d] is scsi.\n",
+ i);
+ }
+ }
+
+ return;
+}
+
+
+/**
+ * mega_get_boot_drv()
+ * @adapter - pointer to our soft state
+ *
+ * Find out which device is the boot device. Note, any logical drive or any
+ * phyical device (e.g., a CDROM) can be designated as a boot device.
+ */
+static void
+mega_get_boot_drv(adapter_t *adapter)
+{
+ struct private_bios_data *prv_bios_data;
+ unsigned char raw_mbox[sizeof(struct mbox_out)];
+ mbox_t *mbox;
+ u16 cksum = 0;
+ u8 *cksum_p;
+ u8 boot_pdrv;
+ int i;
+
+ mbox = (mbox_t *)raw_mbox;
+
+ memset(&mbox->m_out, 0, sizeof(raw_mbox));
+
+ raw_mbox[0] = BIOS_PVT_DATA;
+ raw_mbox[2] = GET_BIOS_PVT_DATA;
+
+ memset((void *)adapter->mega_buffer, 0, MEGA_BUFFER_SIZE);
+
+ mbox->m_out.xferaddr = (u32)adapter->buf_dma_handle;
+
+ adapter->boot_ldrv_enabled = 0;
+ adapter->boot_ldrv = 0;
+
+ adapter->boot_pdrv_enabled = 0;
+ adapter->boot_pdrv_ch = 0;
+ adapter->boot_pdrv_tgt = 0;
+
+ if(issue_scb_block(adapter, raw_mbox) == 0) {
+ prv_bios_data =
+ (struct private_bios_data *)adapter->mega_buffer;
+
+ cksum = 0;
+ cksum_p = (char *)prv_bios_data;
+ for (i = 0; i < 14; i++ ) {
+ cksum += (u16)(*cksum_p++);
+ }
+
+ if (prv_bios_data->cksum == (u16)(0-cksum) ) {
+
+ /*
+ * If MSB is set, a physical drive is set as boot
+ * device
+ */
+ if( prv_bios_data->boot_drv & 0x80 ) {
+ adapter->boot_pdrv_enabled = 1;
+ boot_pdrv = prv_bios_data->boot_drv & 0x7F;
+ adapter->boot_pdrv_ch = boot_pdrv / 16;
+ adapter->boot_pdrv_tgt = boot_pdrv % 16;
+ }
+ else {
+ adapter->boot_ldrv_enabled = 1;
+ adapter->boot_ldrv = prv_bios_data->boot_drv;
+ }
+ }
+ }
+
+}
+
+/**
+ * mega_support_random_del()
+ * @adapter - pointer to our soft state
+ *
+ * Find out if this controller supports random deletion and addition of
+ * logical drives
+ */
+static int
+mega_support_random_del(adapter_t *adapter)
+{
+ unsigned char raw_mbox[sizeof(struct mbox_out)];
+ mbox_t *mbox;
+ int rval;
+
+ mbox = (mbox_t *)raw_mbox;
+
+ memset(&mbox->m_out, 0, sizeof(raw_mbox));
+
+ /*
+ * issue command
+ */
+ raw_mbox[0] = FC_DEL_LOGDRV;
+ raw_mbox[2] = OP_SUP_DEL_LOGDRV;
+
+ rval = issue_scb_block(adapter, raw_mbox);
+
+ return !rval;
+}
+
+
+/**
+ * mega_support_ext_cdb()
+ * @adapter - pointer to our soft state
+ *
+ * Find out if this firmware support cdblen > 10
+ */
+static int
+mega_support_ext_cdb(adapter_t *adapter)
+{
+ unsigned char raw_mbox[sizeof(struct mbox_out)];
+ mbox_t *mbox;
+ int rval;
+
+ mbox = (mbox_t *)raw_mbox;
+
+ memset(&mbox->m_out, 0, sizeof(raw_mbox));
+ /*
+ * issue command to find out if controller supports extended CDBs.
+ */
+ raw_mbox[0] = 0xA4;
+ raw_mbox[2] = 0x16;
+
+ rval = issue_scb_block(adapter, raw_mbox);
+
+ return !rval;
+}
+
+
+/**
+ * mega_del_logdrv()
+ * @adapter - pointer to our soft state
+ * @logdrv - logical drive to be deleted
+ *
+ * Delete the specified logical drive. It is the responsibility of the user
+ * app to let the OS know about this operation.
+ */
+static int
+mega_del_logdrv(adapter_t *adapter, int logdrv)
+{
+ unsigned long flags;
+ scb_t *scb;
+ int rval;
+
+ /*
+ * Stop sending commands to the controller, queue them internally.
+ * When deletion is complete, ISR will flush the queue.
+ */
+ atomic_set(&adapter->quiescent, 1);
+
+ /*
+ * Wait till all the issued commands are complete and there are no
+ * commands in the pending queue
+ */
+ while (atomic_read(&adapter->pend_cmds) > 0 ||
+ !list_empty(&adapter->pending_list))
+ msleep(1000); /* sleep for 1s */
+
+ rval = mega_do_del_logdrv(adapter, logdrv);
+
+ spin_lock_irqsave(&adapter->lock, flags);
+
+ /*
+ * If delete operation was successful, add 0x80 to the logical drive
+ * ids for commands in the pending queue.
+ */
+ if (adapter->read_ldidmap) {
+ struct list_head *pos;
+ list_for_each(pos, &adapter->pending_list) {
+ scb = list_entry(pos, scb_t, list);
+ if (scb->pthru->logdrv < 0x80 )
+ scb->pthru->logdrv += 0x80;
+ }
+ }
+
+ atomic_set(&adapter->quiescent, 0);
+
+ mega_runpendq(adapter);
+
+ spin_unlock_irqrestore(&adapter->lock, flags);
+
+ return rval;
+}
+
+
+static int
+mega_do_del_logdrv(adapter_t *adapter, int logdrv)
+{
+ megacmd_t mc;
+ int rval;
+
+ memset( &mc, 0, sizeof(megacmd_t));
+
+ mc.cmd = FC_DEL_LOGDRV;
+ mc.opcode = OP_DEL_LOGDRV;
+ mc.subopcode = logdrv;
+
+ rval = mega_internal_command(adapter, &mc, NULL);
+
+ /* log this event */
+ if(rval) {
+ printk(KERN_WARNING "megaraid: Delete LD-%d failed.", logdrv);
+ return rval;
+ }
+
+ /*
+ * After deleting first logical drive, the logical drives must be
+ * addressed by adding 0x80 to the logical drive id.
+ */
+ adapter->read_ldidmap = 1;
+
+ return rval;
+}
+
+
+/**
+ * mega_get_max_sgl()
+ * @adapter - pointer to our soft state
+ *
+ * Find out the maximum number of scatter-gather elements supported by this
+ * version of the firmware
+ */
+static void
+mega_get_max_sgl(adapter_t *adapter)
+{
+ unsigned char raw_mbox[sizeof(struct mbox_out)];
+ mbox_t *mbox;
+
+ mbox = (mbox_t *)raw_mbox;
+
+ memset(mbox, 0, sizeof(raw_mbox));
+
+ memset((void *)adapter->mega_buffer, 0, MEGA_BUFFER_SIZE);
+
+ mbox->m_out.xferaddr = (u32)adapter->buf_dma_handle;
+
+ raw_mbox[0] = MAIN_MISC_OPCODE;
+ raw_mbox[2] = GET_MAX_SG_SUPPORT;
+
+
+ if( issue_scb_block(adapter, raw_mbox) ) {
+ /*
+ * f/w does not support this command. Choose the default value
+ */
+ adapter->sglen = MIN_SGLIST;
+ }
+ else {
+ adapter->sglen = *((char *)adapter->mega_buffer);
+
+ /*
+ * Make sure this is not more than the resources we are
+ * planning to allocate
+ */
+ if ( adapter->sglen > MAX_SGLIST )
+ adapter->sglen = MAX_SGLIST;
+ }
+
+ return;
+}
+
+
+/**
+ * mega_support_cluster()
+ * @adapter - pointer to our soft state
+ *
+ * Find out if this firmware support cluster calls.
+ */
+static int
+mega_support_cluster(adapter_t *adapter)
+{
+ unsigned char raw_mbox[sizeof(struct mbox_out)];
+ mbox_t *mbox;
+
+ mbox = (mbox_t *)raw_mbox;
+
+ memset(mbox, 0, sizeof(raw_mbox));
+
+ memset((void *)adapter->mega_buffer, 0, MEGA_BUFFER_SIZE);
+
+ mbox->m_out.xferaddr = (u32)adapter->buf_dma_handle;
+
+ /*
+ * Try to get the initiator id. This command will succeed iff the
+ * clustering is available on this HBA.
+ */
+ raw_mbox[0] = MEGA_GET_TARGET_ID;
+
+ if( issue_scb_block(adapter, raw_mbox) == 0 ) {
+
+ /*
+ * Cluster support available. Get the initiator target id.
+ * Tell our id to mid-layer too.
+ */
+ adapter->this_id = *(u32 *)adapter->mega_buffer;
+ adapter->host->this_id = adapter->this_id;
+
+ return 1;
+ }
+
+ return 0;
+}
+
+#ifdef CONFIG_PROC_FS
+/**
+ * mega_adapinq()
+ * @adapter - pointer to our soft state
+ * @dma_handle - DMA address of the buffer
+ *
+ * Issue internal commands while interrupts are available.
+ * We only issue direct mailbox commands from within the driver. ioctl()
+ * interface using these routines can issue passthru commands.
+ */
+static int
+mega_adapinq(adapter_t *adapter, dma_addr_t dma_handle)
+{
+ megacmd_t mc;
+
+ memset(&mc, 0, sizeof(megacmd_t));
+
+ if( adapter->flag & BOARD_40LD ) {
+ mc.cmd = FC_NEW_CONFIG;
+ mc.opcode = NC_SUBOP_ENQUIRY3;
+ mc.subopcode = ENQ3_GET_SOLICITED_FULL;
+ }
+ else {
+ mc.cmd = MEGA_MBOXCMD_ADPEXTINQ;
+ }
+
+ mc.xferaddr = (u32)dma_handle;
+
+ if ( mega_internal_command(adapter, &mc, NULL) != 0 ) {
+ return -1;
+ }
+
+ return 0;
+}
+
+
+/** mega_internal_dev_inquiry()
+ * @adapter - pointer to our soft state
+ * @ch - channel for this device
+ * @tgt - ID of this device
+ * @buf_dma_handle - DMA address of the buffer
+ *
+ * Issue the scsi inquiry for the specified device.
+ */
+static int
+mega_internal_dev_inquiry(adapter_t *adapter, u8 ch, u8 tgt,
+ dma_addr_t buf_dma_handle)
+{
+ mega_passthru *pthru;
+ dma_addr_t pthru_dma_handle;
+ megacmd_t mc;
+ int rval;
+ struct pci_dev *pdev;
+
+
+ /*
+ * For all internal commands, the buffer must be allocated in <4GB
+ * address range
+ */
+ if( make_local_pdev(adapter, &pdev) != 0 ) return -1;
+
+ pthru = pci_alloc_consistent(pdev, sizeof(mega_passthru),
+ &pthru_dma_handle);
+
+ if( pthru == NULL ) {
+ free_local_pdev(pdev);
+ return -1;
+ }
+
+ pthru->timeout = 2;
+ pthru->ars = 1;
+ pthru->reqsenselen = 14;
+ pthru->islogical = 0;
+
+ pthru->channel = (adapter->flag & BOARD_40LD) ? 0 : ch;
+
+ pthru->target = (adapter->flag & BOARD_40LD) ? (ch << 4)|tgt : tgt;
+
+ pthru->cdblen = 6;
+
+ pthru->cdb[0] = INQUIRY;
+ pthru->cdb[1] = 0;
+ pthru->cdb[2] = 0;
+ pthru->cdb[3] = 0;
+ pthru->cdb[4] = 255;
+ pthru->cdb[5] = 0;
+
+
+ pthru->dataxferaddr = (u32)buf_dma_handle;
+ pthru->dataxferlen = 256;
+
+ memset(&mc, 0, sizeof(megacmd_t));
+
+ mc.cmd = MEGA_MBOXCMD_PASSTHRU;
+ mc.xferaddr = (u32)pthru_dma_handle;
+
+ rval = mega_internal_command(adapter, &mc, pthru);
+
+ pci_free_consistent(pdev, sizeof(mega_passthru), pthru,
+ pthru_dma_handle);
+
+ free_local_pdev(pdev);
+
+ return rval;
+}
+#endif
+
+/**
+ * mega_internal_command()
+ * @adapter - pointer to our soft state
+ * @mc - the mailbox command
+ * @pthru - Passthru structure for DCDB commands
+ *
+ * Issue the internal commands in interrupt mode.
+ * The last argument is the address of the passthru structure if the command
+ * to be fired is a passthru command
+ *
+ * Note: parameter 'pthru' is null for non-passthru commands.
+ */
+static int
+mega_internal_command(adapter_t *adapter, megacmd_t *mc, mega_passthru *pthru)
+{
+ unsigned long flags;
+ scb_t *scb;
+ int rval;
+
+ /*
+ * The internal commands share one command id and hence are
+ * serialized. This is so because we want to reserve maximum number of
+ * available command ids for the I/O commands.
+ */
+ mutex_lock(&adapter->int_mtx);
+
+ scb = &adapter->int_scb;
+ memset(scb, 0, sizeof(scb_t));
+
+ scb->idx = CMDID_INT_CMDS;
+ scb->state |= SCB_ACTIVE | SCB_PENDQ;
+
+ memcpy(scb->raw_mbox, mc, sizeof(megacmd_t));
+
+ /*
+ * Is it a passthru command
+ */
+ if (mc->cmd == MEGA_MBOXCMD_PASSTHRU)
+ scb->pthru = pthru;
+
+ spin_lock_irqsave(&adapter->lock, flags);
+ list_add_tail(&scb->list, &adapter->pending_list);
+ /*
+ * Check if the HBA is in quiescent state, e.g., during a
+ * delete logical drive opertion. If it is, don't run
+ * the pending_list.
+ */
+ if (atomic_read(&adapter->quiescent) == 0)
+ mega_runpendq(adapter);
+ spin_unlock_irqrestore(&adapter->lock, flags);
+
+ wait_for_completion(&adapter->int_waitq);
+
+ mc->status = rval = adapter->int_status;
+
+ /*
+ * Print a debug message for all failed commands. Applications can use
+ * this information.
+ */
+ if (rval && trace_level) {
+ printk("megaraid: cmd [%x, %x, %x] status:[%x]\n",
+ mc->cmd, mc->opcode, mc->subopcode, rval);
+ }
+
+ mutex_unlock(&adapter->int_mtx);
+ return rval;
+}
+
+static struct scsi_host_template megaraid_template = {
+ .module = THIS_MODULE,
+ .name = "MegaRAID",
+ .proc_name = "megaraid_legacy",
+ .info = megaraid_info,
+ .queuecommand = megaraid_queue,
+ .bios_param = megaraid_biosparam,
+ .max_sectors = MAX_SECTORS_PER_IO,
+ .can_queue = MAX_COMMANDS,
+ .this_id = DEFAULT_INITIATOR_ID,
+ .sg_tablesize = MAX_SGLIST,
+ .cmd_per_lun = DEF_CMD_PER_LUN,
+ .use_clustering = ENABLE_CLUSTERING,
+ .eh_abort_handler = megaraid_abort,
+ .eh_device_reset_handler = megaraid_reset,
+ .eh_bus_reset_handler = megaraid_reset,
+ .eh_host_reset_handler = megaraid_reset,
+ .no_write_same = 1,
+};
+
+static int
+megaraid_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
+{
+ struct Scsi_Host *host;
+ adapter_t *adapter;
+ unsigned long mega_baseport, tbase, flag = 0;
+ u16 subsysid, subsysvid;
+ u8 pci_bus, pci_dev_func;
+ int irq, i, j;
+ int error = -ENODEV;
+
+ if (pci_enable_device(pdev))
+ goto out;
+ pci_set_master(pdev);
+
+ pci_bus = pdev->bus->number;
+ pci_dev_func = pdev->devfn;
+
+ /*
+ * The megaraid3 stuff reports the ID of the Intel part which is not
+ * remotely specific to the megaraid
+ */
+ if (pdev->vendor == PCI_VENDOR_ID_INTEL) {
+ u16 magic;
+ /*
+ * Don't fall over the Compaq management cards using the same
+ * PCI identifier
+ */
+ if (pdev->subsystem_vendor == PCI_VENDOR_ID_COMPAQ &&
+ pdev->subsystem_device == 0xC000)
+ return -ENODEV;
+ /* Now check the magic signature byte */
+ pci_read_config_word(pdev, PCI_CONF_AMISIG, &magic);
+ if (magic != HBA_SIGNATURE_471 && magic != HBA_SIGNATURE)
+ return -ENODEV;
+ /* Ok it is probably a megaraid */
+ }
+
+ /*
+ * For these vendor and device ids, signature offsets are not
+ * valid and 64 bit is implicit
+ */
+ if (id->driver_data & BOARD_64BIT)
+ flag |= BOARD_64BIT;
+ else {
+ u32 magic64;
+
+ pci_read_config_dword(pdev, PCI_CONF_AMISIG64, &magic64);
+ if (magic64 == HBA_SIGNATURE_64BIT)
+ flag |= BOARD_64BIT;
+ }
+
+ subsysvid = pdev->subsystem_vendor;
+ subsysid = pdev->subsystem_device;
+
+ printk(KERN_NOTICE "megaraid: found 0x%4.04x:0x%4.04x:bus %d:",
+ id->vendor, id->device, pci_bus);
+
+ printk("slot %d:func %d\n",
+ PCI_SLOT(pci_dev_func), PCI_FUNC(pci_dev_func));
+
+ /* Read the base port and IRQ from PCI */
+ mega_baseport = pci_resource_start(pdev, 0);
+ irq = pdev->irq;
+
+ tbase = mega_baseport;
+ if (pci_resource_flags(pdev, 0) & IORESOURCE_MEM) {
+ flag |= BOARD_MEMMAP;
+
+ if (!request_mem_region(mega_baseport, 128, "megaraid")) {
+ printk(KERN_WARNING "megaraid: mem region busy!\n");
+ goto out_disable_device;
+ }
+
+ mega_baseport = (unsigned long)ioremap(mega_baseport, 128);
+ if (!mega_baseport) {
+ printk(KERN_WARNING
+ "megaraid: could not map hba memory\n");
+ goto out_release_region;
+ }
+ } else {
+ flag |= BOARD_IOMAP;
+ mega_baseport += 0x10;
+
+ if (!request_region(mega_baseport, 16, "megaraid"))
+ goto out_disable_device;
+ }
+
+ /* Initialize SCSI Host structure */
+ host = scsi_host_alloc(&megaraid_template, sizeof(adapter_t));
+ if (!host)
+ goto out_iounmap;
+
+ adapter = (adapter_t *)host->hostdata;
+ memset(adapter, 0, sizeof(adapter_t));
+
+ printk(KERN_NOTICE
+ "scsi%d:Found MegaRAID controller at 0x%lx, IRQ:%d\n",
+ host->host_no, mega_baseport, irq);
+
+ adapter->base = mega_baseport;
+ if (flag & BOARD_MEMMAP)
+ adapter->mmio_base = (void __iomem *) mega_baseport;
+
+ INIT_LIST_HEAD(&adapter->free_list);
+ INIT_LIST_HEAD(&adapter->pending_list);
+ INIT_LIST_HEAD(&adapter->completed_list);
+
+ adapter->flag = flag;
+ spin_lock_init(&adapter->lock);
+
+ host->cmd_per_lun = max_cmd_per_lun;
+ host->max_sectors = max_sectors_per_io;
+
+ adapter->dev = pdev;
+ adapter->host = host;
+
+ adapter->host->irq = irq;
+
+ if (flag & BOARD_MEMMAP)
+ adapter->host->base = tbase;
+ else {
+ adapter->host->io_port = tbase;
+ adapter->host->n_io_port = 16;
+ }
+
+ adapter->host->unique_id = (pci_bus << 8) | pci_dev_func;
+
+ /*
+ * Allocate buffer to issue internal commands.
+ */
+ adapter->mega_buffer = pci_alloc_consistent(adapter->dev,
+ MEGA_BUFFER_SIZE, &adapter->buf_dma_handle);
+ if (!adapter->mega_buffer) {
+ printk(KERN_WARNING "megaraid: out of RAM.\n");
+ goto out_host_put;
+ }
+
+ adapter->scb_list = kmalloc(sizeof(scb_t) * MAX_COMMANDS, GFP_KERNEL);
+ if (!adapter->scb_list) {
+ printk(KERN_WARNING "megaraid: out of RAM.\n");
+ goto out_free_cmd_buffer;
+ }
+
+ if (request_irq(irq, (adapter->flag & BOARD_MEMMAP) ?
+ megaraid_isr_memmapped : megaraid_isr_iomapped,
+ IRQF_SHARED, "megaraid", adapter)) {
+ printk(KERN_WARNING
+ "megaraid: Couldn't register IRQ %d!\n", irq);
+ goto out_free_scb_list;
+ }
+
+ if (mega_setup_mailbox(adapter))
+ goto out_free_irq;
+
+ if (mega_query_adapter(adapter))
+ goto out_free_mbox;
+
+ /*
+ * Have checks for some buggy f/w
+ */
+ if ((subsysid == 0x1111) && (subsysvid == 0x1111)) {
+ /*
+ * Which firmware
+ */
+ if (!strcmp(adapter->fw_version, "3.00") ||
+ !strcmp(adapter->fw_version, "3.01")) {
+
+ printk( KERN_WARNING
+ "megaraid: Your card is a Dell PERC "
+ "2/SC RAID controller with "
+ "firmware\nmegaraid: 3.00 or 3.01. "
+ "This driver is known to have "
+ "corruption issues\nmegaraid: with "
+ "those firmware versions on this "
+ "specific card. In order\nmegaraid: "
+ "to protect your data, please upgrade "
+ "your firmware to version\nmegaraid: "
+ "3.10 or later, available from the "
+ "Dell Technical Support web\n"
+ "megaraid: site at\nhttp://support."
+ "dell.com/us/en/filelib/download/"
+ "index.asp?fileid=2940\n"
+ );
+ }
+ }
+
+ /*
+ * If we have a HP 1M(0x60E7)/2M(0x60E8) controller with
+ * firmware H.01.07, H.01.08, and H.01.09 disable 64 bit
+ * support, since this firmware cannot handle 64 bit
+ * addressing
+ */
+ if ((subsysvid == PCI_VENDOR_ID_HP) &&
+ ((subsysid == 0x60E7) || (subsysid == 0x60E8))) {
+ /*
+ * which firmware
+ */
+ if (!strcmp(adapter->fw_version, "H01.07") ||
+ !strcmp(adapter->fw_version, "H01.08") ||
+ !strcmp(adapter->fw_version, "H01.09") ) {
+ printk(KERN_WARNING
+ "megaraid: Firmware H.01.07, "
+ "H.01.08, and H.01.09 on 1M/2M "
+ "controllers\n"
+ "megaraid: do not support 64 bit "
+ "addressing.\nmegaraid: DISABLING "
+ "64 bit support.\n");
+ adapter->flag &= ~BOARD_64BIT;
+ }
+ }
+
+ if (mega_is_bios_enabled(adapter))
+ mega_hbas[hba_count].is_bios_enabled = 1;
+ mega_hbas[hba_count].hostdata_addr = adapter;
+
+ /*
+ * Find out which channel is raid and which is scsi. This is
+ * for ROMB support.
+ */
+ mega_enum_raid_scsi(adapter);
+
+ /*
+ * Find out if a logical drive is set as the boot drive. If
+ * there is one, will make that as the first logical drive.
+ * ROMB: Do we have to boot from a physical drive. Then all
+ * the physical drives would appear before the logical disks.
+ * Else, all the physical drives would be exported to the mid
+ * layer after logical drives.
+ */
+ mega_get_boot_drv(adapter);
+
+ if (adapter->boot_pdrv_enabled) {
+ j = adapter->product_info.nchannels;
+ for( i = 0; i < j; i++ )
+ adapter->logdrv_chan[i] = 0;
+ for( i = j; i < NVIRT_CHAN + j; i++ )
+ adapter->logdrv_chan[i] = 1;
+ } else {
+ for (i = 0; i < NVIRT_CHAN; i++)
+ adapter->logdrv_chan[i] = 1;
+ for (i = NVIRT_CHAN; i < MAX_CHANNELS+NVIRT_CHAN; i++)
+ adapter->logdrv_chan[i] = 0;
+ adapter->mega_ch_class <<= NVIRT_CHAN;
+ }
+
+ /*
+ * Do we support random deletion and addition of logical
+ * drives
+ */
+ adapter->read_ldidmap = 0; /* set it after first logdrv
+ delete cmd */
+ adapter->support_random_del = mega_support_random_del(adapter);
+
+ /* Initialize SCBs */
+ if (mega_init_scb(adapter))
+ goto out_free_mbox;
+
+ /*
+ * Reset the pending commands counter
+ */
+ atomic_set(&adapter->pend_cmds, 0);
+
+ /*
+ * Reset the adapter quiescent flag
+ */
+ atomic_set(&adapter->quiescent, 0);
+
+ hba_soft_state[hba_count] = adapter;
+
+ /*
+ * Fill in the structure which needs to be passed back to the
+ * application when it does an ioctl() for controller related
+ * information.
+ */
+ i = hba_count;
+
+ mcontroller[i].base = mega_baseport;
+ mcontroller[i].irq = irq;
+ mcontroller[i].numldrv = adapter->numldrv;
+ mcontroller[i].pcibus = pci_bus;
+ mcontroller[i].pcidev = id->device;
+ mcontroller[i].pcifun = PCI_FUNC (pci_dev_func);
+ mcontroller[i].pciid = -1;
+ mcontroller[i].pcivendor = id->vendor;
+ mcontroller[i].pcislot = PCI_SLOT(pci_dev_func);
+ mcontroller[i].uid = (pci_bus << 8) | pci_dev_func;
+
+
+ /* Set the Mode of addressing to 64 bit if we can */
+ if ((adapter->flag & BOARD_64BIT) && (sizeof(dma_addr_t) == 8)) {
+ pci_set_dma_mask(pdev, DMA_BIT_MASK(64));
+ adapter->has_64bit_addr = 1;
+ } else {
+ pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
+ adapter->has_64bit_addr = 0;
+ }
+
+ mutex_init(&adapter->int_mtx);
+ init_completion(&adapter->int_waitq);
+
+ adapter->this_id = DEFAULT_INITIATOR_ID;
+ adapter->host->this_id = DEFAULT_INITIATOR_ID;
+
+#if MEGA_HAVE_CLUSTERING
+ /*
+ * Is cluster support enabled on this controller
+ * Note: In a cluster the HBAs ( the initiators ) will have
+ * different target IDs and we cannot assume it to be 7. Call
+ * to mega_support_cluster() will get the target ids also if
+ * the cluster support is available
+ */
+ adapter->has_cluster = mega_support_cluster(adapter);
+ if (adapter->has_cluster) {
+ printk(KERN_NOTICE
+ "megaraid: Cluster driver, initiator id:%d\n",
+ adapter->this_id);
+ }
+#endif
+
+ pci_set_drvdata(pdev, host);
+
+ mega_create_proc_entry(hba_count, mega_proc_dir_entry);
+
+ error = scsi_add_host(host, &pdev->dev);
+ if (error)
+ goto out_free_mbox;
+
+ scsi_scan_host(host);
+ hba_count++;
+ return 0;
+
+ out_free_mbox:
+ pci_free_consistent(adapter->dev, sizeof(mbox64_t),
+ adapter->una_mbox64, adapter->una_mbox64_dma);
+ out_free_irq:
+ free_irq(adapter->host->irq, adapter);
+ out_free_scb_list:
+ kfree(adapter->scb_list);
+ out_free_cmd_buffer:
+ pci_free_consistent(adapter->dev, MEGA_BUFFER_SIZE,
+ adapter->mega_buffer, adapter->buf_dma_handle);
+ out_host_put:
+ scsi_host_put(host);
+ out_iounmap:
+ if (flag & BOARD_MEMMAP)
+ iounmap((void *)mega_baseport);
+ out_release_region:
+ if (flag & BOARD_MEMMAP)
+ release_mem_region(tbase, 128);
+ else
+ release_region(mega_baseport, 16);
+ out_disable_device:
+ pci_disable_device(pdev);
+ out:
+ return error;
+}
+
+static void
+__megaraid_shutdown(adapter_t *adapter)
+{
+ u_char raw_mbox[sizeof(struct mbox_out)];
+ mbox_t *mbox = (mbox_t *)raw_mbox;
+ int i;
+
+ /* Flush adapter cache */
+ memset(&mbox->m_out, 0, sizeof(raw_mbox));
+ raw_mbox[0] = FLUSH_ADAPTER;
+
+ free_irq(adapter->host->irq, adapter);
+
+ /* Issue a blocking (interrupts disabled) command to the card */
+ issue_scb_block(adapter, raw_mbox);
+
+ /* Flush disks cache */
+ memset(&mbox->m_out, 0, sizeof(raw_mbox));
+ raw_mbox[0] = FLUSH_SYSTEM;
+
+ /* Issue a blocking (interrupts disabled) command to the card */
+ issue_scb_block(adapter, raw_mbox);
+
+ if (atomic_read(&adapter->pend_cmds) > 0)
+ printk(KERN_WARNING "megaraid: pending commands!!\n");
+
+ /*
+ * Have a delibrate delay to make sure all the caches are
+ * actually flushed.
+ */
+ for (i = 0; i <= 10; i++)
+ mdelay(1000);
+}
+
+static void
+megaraid_remove_one(struct pci_dev *pdev)
+{
+ struct Scsi_Host *host = pci_get_drvdata(pdev);
+ adapter_t *adapter = (adapter_t *)host->hostdata;
+
+ scsi_remove_host(host);
+
+ __megaraid_shutdown(adapter);
+
+ /* Free our resources */
+ if (adapter->flag & BOARD_MEMMAP) {
+ iounmap((void *)adapter->base);
+ release_mem_region(adapter->host->base, 128);
+ } else
+ release_region(adapter->base, 16);
+
+ mega_free_sgl(adapter);
+
+#ifdef CONFIG_PROC_FS
+ if (adapter->controller_proc_dir_entry) {
+ remove_proc_entry("stat", adapter->controller_proc_dir_entry);
+ remove_proc_entry("config",
+ adapter->controller_proc_dir_entry);
+ remove_proc_entry("mailbox",
+ adapter->controller_proc_dir_entry);
+#if MEGA_HAVE_ENH_PROC
+ remove_proc_entry("rebuild-rate",
+ adapter->controller_proc_dir_entry);
+ remove_proc_entry("battery-status",
+ adapter->controller_proc_dir_entry);
+
+ remove_proc_entry("diskdrives-ch0",
+ adapter->controller_proc_dir_entry);
+ remove_proc_entry("diskdrives-ch1",
+ adapter->controller_proc_dir_entry);
+ remove_proc_entry("diskdrives-ch2",
+ adapter->controller_proc_dir_entry);
+ remove_proc_entry("diskdrives-ch3",
+ adapter->controller_proc_dir_entry);
+
+ remove_proc_entry("raiddrives-0-9",
+ adapter->controller_proc_dir_entry);
+ remove_proc_entry("raiddrives-10-19",
+ adapter->controller_proc_dir_entry);
+ remove_proc_entry("raiddrives-20-29",
+ adapter->controller_proc_dir_entry);
+ remove_proc_entry("raiddrives-30-39",
+ adapter->controller_proc_dir_entry);
+#endif
+ {
+ char buf[12] = { 0 };
+ sprintf(buf, "hba%d", adapter->host->host_no);
+ remove_proc_entry(buf, mega_proc_dir_entry);
+ }
+ }
+#endif
+
+ pci_free_consistent(adapter->dev, MEGA_BUFFER_SIZE,
+ adapter->mega_buffer, adapter->buf_dma_handle);
+ kfree(adapter->scb_list);
+ pci_free_consistent(adapter->dev, sizeof(mbox64_t),
+ adapter->una_mbox64, adapter->una_mbox64_dma);
+
+ scsi_host_put(host);
+ pci_disable_device(pdev);
+
+ hba_count--;
+}
+
+static void
+megaraid_shutdown(struct pci_dev *pdev)
+{
+ struct Scsi_Host *host = pci_get_drvdata(pdev);
+ adapter_t *adapter = (adapter_t *)host->hostdata;
+
+ __megaraid_shutdown(adapter);
+}
+
+static struct pci_device_id megaraid_pci_tbl[] = {
+ {PCI_VENDOR_ID_AMI, PCI_DEVICE_ID_AMI_MEGARAID,
+ PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
+ {PCI_VENDOR_ID_AMI, PCI_DEVICE_ID_AMI_MEGARAID2,
+ PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
+ {PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_AMI_MEGARAID3,
+ PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
+ {0,}
+};
+MODULE_DEVICE_TABLE(pci, megaraid_pci_tbl);
+
+static struct pci_driver megaraid_pci_driver = {
+ .name = "megaraid_legacy",
+ .id_table = megaraid_pci_tbl,
+ .probe = megaraid_probe_one,
+ .remove = megaraid_remove_one,
+ .shutdown = megaraid_shutdown,
+};
+
+static int __init megaraid_init(void)
+{
+ int error;
+
+ if ((max_cmd_per_lun <= 0) || (max_cmd_per_lun > MAX_CMD_PER_LUN))
+ max_cmd_per_lun = MAX_CMD_PER_LUN;
+ if (max_mbox_busy_wait > MBOX_BUSY_WAIT)
+ max_mbox_busy_wait = MBOX_BUSY_WAIT;
+
+#ifdef CONFIG_PROC_FS
+ mega_proc_dir_entry = proc_mkdir("megaraid", NULL);
+ if (!mega_proc_dir_entry) {
+ printk(KERN_WARNING
+ "megaraid: failed to create megaraid root\n");
+ }
+#endif
+ error = pci_register_driver(&megaraid_pci_driver);
+ if (error) {
+#ifdef CONFIG_PROC_FS
+ remove_proc_entry("megaraid", NULL);
+#endif
+ return error;
+ }
+
+ /*
+ * Register the driver as a character device, for applications
+ * to access it for ioctls.
+ * First argument (major) to register_chrdev implies a dynamic
+ * major number allocation.
+ */
+ major = register_chrdev(0, "megadev_legacy", &megadev_fops);
+ if (!major) {
+ printk(KERN_WARNING
+ "megaraid: failed to register char device\n");
+ }
+
+ return 0;
+}
+
+static void __exit megaraid_exit(void)
+{
+ /*
+ * Unregister the character device interface to the driver.
+ */
+ unregister_chrdev(major, "megadev_legacy");
+
+ pci_unregister_driver(&megaraid_pci_driver);
+
+#ifdef CONFIG_PROC_FS
+ remove_proc_entry("megaraid", NULL);
+#endif
+}
+
+module_init(megaraid_init);
+module_exit(megaraid_exit);
+
+/* vi: set ts=8 sw=8 tw=78: */
diff --git a/drivers/scsi/megaraid.h b/drivers/scsi/megaraid.h
new file mode 100644
index 000000000..508d65e5a
--- /dev/null
+++ b/drivers/scsi/megaraid.h
@@ -0,0 +1,1010 @@
+#ifndef __MEGARAID_H__
+#define __MEGARAID_H__
+
+#include <linux/spinlock.h>
+#include <linux/mutex.h>
+
+#define MEGARAID_VERSION \
+ "v2.00.4 (Release Date: Thu Feb 9 08:51:30 EST 2006)\n"
+
+/*
+ * Driver features - change the values to enable or disable features in the
+ * driver.
+ */
+
+/*
+ * Command coalescing - This feature allows the driver to be able to combine
+ * two or more commands and issue as one command in order to boost I/O
+ * performance. Useful if the nature of the I/O is sequential. It is not very
+ * useful for random natured I/Os.
+ */
+#define MEGA_HAVE_COALESCING 0
+
+/*
+ * Clustering support - Set this flag if you are planning to use the
+ * clustering services provided by the megaraid controllers and planning to
+ * setup a cluster
+ */
+#define MEGA_HAVE_CLUSTERING 1
+
+/*
+ * Driver statistics - Set this flag if you are interested in statics about
+ * number of I/O completed on each logical drive and how many interrupts
+ * generated. If enabled, this information is available through /proc
+ * interface and through the private ioctl. Setting this flag has a
+ * performance penalty.
+ */
+#define MEGA_HAVE_STATS 0
+
+/*
+ * Enhanced /proc interface - This feature will allow you to have a more
+ * detailed /proc interface for megaraid driver. E.g., a real time update of
+ * the status of the logical drives, battery status, physical drives etc.
+ */
+#define MEGA_HAVE_ENH_PROC 1
+
+#define MAX_DEV_TYPE 32
+
+#define PCI_DEVICE_ID_DISCOVERY 0x000E
+#define PCI_DEVICE_ID_PERC4_DI 0x000F
+#define PCI_DEVICE_ID_PERC4_QC_VERDE 0x0407
+
+#define HBA_SIGNATURE 0x3344
+#define HBA_SIGNATURE_471 0xCCCC
+#define HBA_SIGNATURE_64BIT 0x0299
+
+#define MBOX_BUSY_WAIT 10 /* wait for up to 10 usec for
+ mailbox to be free */
+#define DEFAULT_INITIATOR_ID 7
+
+#define MAX_SGLIST 64 /* max supported in f/w */
+#define MIN_SGLIST 26 /* guaranteed to support these many */
+#define MAX_COMMANDS 126
+#define CMDID_INT_CMDS MAX_COMMANDS+1 /* make sure CMDID_INT_CMDS
+ is less than max commands
+ supported by any f/w */
+
+#define MAX_CDB_LEN 10
+#define MAX_EXT_CDB_LEN 16 /* we support cdb length up to 16 */
+
+#define DEF_CMD_PER_LUN 63
+#define MAX_CMD_PER_LUN MAX_COMMANDS
+#define MAX_FIRMWARE_STATUS 46
+#define MAX_XFER_PER_CMD (64*1024)
+#define MAX_SECTORS_PER_IO 128
+
+#define MAX_LOGICAL_DRIVES_40LD 40
+#define FC_MAX_PHYSICAL_DEVICES 256
+#define MAX_LOGICAL_DRIVES_8LD 8
+#define MAX_CHANNELS 5
+#define MAX_TARGET 15
+#define MAX_PHYSICAL_DRIVES MAX_CHANNELS*MAX_TARGET
+#define MAX_ROW_SIZE_40LD 32
+#define MAX_ROW_SIZE_8LD 8
+#define MAX_SPAN_DEPTH 8
+
+#define NVIRT_CHAN 4 /* # of virtual channels to represent
+ up to 60 logical drives */
+struct mbox_out {
+ /* 0x0 */ u8 cmd;
+ /* 0x1 */ u8 cmdid;
+ /* 0x2 */ u16 numsectors;
+ /* 0x4 */ u32 lba;
+ /* 0x8 */ u32 xferaddr;
+ /* 0xC */ u8 logdrv;
+ /* 0xD */ u8 numsgelements;
+ /* 0xE */ u8 resvd;
+} __attribute__ ((packed));
+
+struct mbox_in {
+ /* 0xF */ volatile u8 busy;
+ /* 0x10 */ volatile u8 numstatus;
+ /* 0x11 */ volatile u8 status;
+ /* 0x12 */ volatile u8 completed[MAX_FIRMWARE_STATUS];
+ volatile u8 poll;
+ volatile u8 ack;
+} __attribute__ ((packed));
+
+typedef struct {
+ struct mbox_out m_out;
+ struct mbox_in m_in;
+} __attribute__ ((packed)) mbox_t;
+
+typedef struct {
+ u32 xfer_segment_lo;
+ u32 xfer_segment_hi;
+ mbox_t mbox;
+} __attribute__ ((packed)) mbox64_t;
+
+
+/*
+ * Passthru definitions
+ */
+#define MAX_REQ_SENSE_LEN 0x20
+
+typedef struct {
+ u8 timeout:3; /* 0=6sec/1=60sec/2=10min/3=3hrs */
+ u8 ars:1;
+ u8 reserved:3;
+ u8 islogical:1;
+ u8 logdrv; /* if islogical == 1 */
+ u8 channel; /* if islogical == 0 */
+ u8 target; /* if islogical == 0 */
+ u8 queuetag; /* unused */
+ u8 queueaction; /* unused */
+ u8 cdb[MAX_CDB_LEN];
+ u8 cdblen;
+ u8 reqsenselen;
+ u8 reqsensearea[MAX_REQ_SENSE_LEN];
+ u8 numsgelements;
+ u8 scsistatus;
+ u32 dataxferaddr;
+ u32 dataxferlen;
+} __attribute__ ((packed)) mega_passthru;
+
+
+/*
+ * Extended passthru: support CDB > 10 bytes
+ */
+typedef struct {
+ u8 timeout:3; /* 0=6sec/1=60sec/2=10min/3=3hrs */
+ u8 ars:1;
+ u8 rsvd1:1;
+ u8 cd_rom:1;
+ u8 rsvd2:1;
+ u8 islogical:1;
+ u8 logdrv; /* if islogical == 1 */
+ u8 channel; /* if islogical == 0 */
+ u8 target; /* if islogical == 0 */
+ u8 queuetag; /* unused */
+ u8 queueaction; /* unused */
+ u8 cdblen;
+ u8 rsvd3;
+ u8 cdb[MAX_EXT_CDB_LEN];
+ u8 numsgelements;
+ u8 status;
+ u8 reqsenselen;
+ u8 reqsensearea[MAX_REQ_SENSE_LEN];
+ u8 rsvd4;
+ u32 dataxferaddr;
+ u32 dataxferlen;
+} __attribute__ ((packed)) mega_ext_passthru;
+
+typedef struct {
+ u64 address;
+ u32 length;
+} __attribute__ ((packed)) mega_sgl64;
+
+typedef struct {
+ u32 address;
+ u32 length;
+} __attribute__ ((packed)) mega_sglist;
+
+
+/* Queued command data */
+typedef struct {
+ int idx;
+ u32 state;
+ struct list_head list;
+ u8 raw_mbox[66];
+ u32 dma_type;
+ u32 dma_direction;
+
+ Scsi_Cmnd *cmd;
+ dma_addr_t dma_h_bulkdata;
+ dma_addr_t dma_h_sgdata;
+
+ mega_sglist *sgl;
+ mega_sgl64 *sgl64;
+ dma_addr_t sgl_dma_addr;
+
+ mega_passthru *pthru;
+ dma_addr_t pthru_dma_addr;
+ mega_ext_passthru *epthru;
+ dma_addr_t epthru_dma_addr;
+} scb_t;
+
+/*
+ * Flags to follow the scb as it transitions between various stages
+ */
+#define SCB_FREE 0x0000 /* on the free list */
+#define SCB_ACTIVE 0x0001 /* off the free list */
+#define SCB_PENDQ 0x0002 /* on the pending queue */
+#define SCB_ISSUED 0x0004 /* issued - owner f/w */
+#define SCB_ABORT 0x0008 /* Got an abort for this one */
+#define SCB_RESET 0x0010 /* Got a reset for this one */
+
+/*
+ * Utilities declare this strcture size as 1024 bytes. So more fields can
+ * be added in future.
+ */
+typedef struct {
+ u32 data_size; /* current size in bytes (not including resvd) */
+
+ u32 config_signature;
+ /* Current value is 0x00282008
+ * 0x28=MAX_LOGICAL_DRIVES,
+ * 0x20=Number of stripes and
+ * 0x08=Number of spans */
+
+ u8 fw_version[16]; /* printable ASCI string */
+ u8 bios_version[16]; /* printable ASCI string */
+ u8 product_name[80]; /* printable ASCI string */
+
+ u8 max_commands; /* Max. concurrent commands supported */
+ u8 nchannels; /* Number of SCSI Channels detected */
+ u8 fc_loop_present; /* Number of Fibre Loops detected */
+ u8 mem_type; /* EDO, FPM, SDRAM etc */
+
+ u32 signature;
+ u16 dram_size; /* In terms of MB */
+ u16 subsysid;
+
+ u16 subsysvid;
+ u8 notify_counters;
+ u8 pad1k[889]; /* 135 + 889 resvd = 1024 total size */
+} __attribute__ ((packed)) mega_product_info;
+
+struct notify {
+ u32 global_counter; /* Any change increments this counter */
+
+ u8 param_counter; /* Indicates any params changed */
+ u8 param_id; /* Param modified - defined below */
+ u16 param_val; /* New val of last param modified */
+
+ u8 write_config_counter; /* write config occurred */
+ u8 write_config_rsvd[3];
+
+ u8 ldrv_op_counter; /* Indicates ldrv op started/completed */
+ u8 ldrv_opid; /* ldrv num */
+ u8 ldrv_opcmd; /* ldrv operation - defined below */
+ u8 ldrv_opstatus; /* status of the operation */
+
+ u8 ldrv_state_counter; /* Indicates change of ldrv state */
+ u8 ldrv_state_id; /* ldrv num */
+ u8 ldrv_state_new; /* New state */
+ u8 ldrv_state_old; /* old state */
+
+ u8 pdrv_state_counter; /* Indicates change of ldrv state */
+ u8 pdrv_state_id; /* pdrv id */
+ u8 pdrv_state_new; /* New state */
+ u8 pdrv_state_old; /* old state */
+
+ u8 pdrv_fmt_counter; /* Indicates pdrv format started/over */
+ u8 pdrv_fmt_id; /* pdrv id */
+ u8 pdrv_fmt_val; /* format started/over */
+ u8 pdrv_fmt_rsvd;
+
+ u8 targ_xfer_counter; /* Indicates SCSI-2 Xfer rate change */
+ u8 targ_xfer_id; /* pdrv Id */
+ u8 targ_xfer_val; /* new Xfer params of last pdrv */
+ u8 targ_xfer_rsvd;
+
+ u8 fcloop_id_chg_counter; /* Indicates loopid changed */
+ u8 fcloopid_pdrvid; /* pdrv id */
+ u8 fcloop_id0; /* loopid on fc loop 0 */
+ u8 fcloop_id1; /* loopid on fc loop 1 */
+
+ u8 fcloop_state_counter; /* Indicates loop state changed */
+ u8 fcloop_state0; /* state of fc loop 0 */
+ u8 fcloop_state1; /* state of fc loop 1 */
+ u8 fcloop_state_rsvd;
+} __attribute__ ((packed));
+
+#define MAX_NOTIFY_SIZE 0x80
+#define CUR_NOTIFY_SIZE sizeof(struct notify)
+
+typedef struct {
+ u32 data_size; /* current size in bytes (not including resvd) */
+
+ struct notify notify;
+
+ u8 notify_rsvd[MAX_NOTIFY_SIZE - CUR_NOTIFY_SIZE];
+
+ u8 rebuild_rate; /* Rebuild rate (0% - 100%) */
+ u8 cache_flush_interval; /* In terms of Seconds */
+ u8 sense_alert;
+ u8 drive_insert_count; /* drive insertion count */
+
+ u8 battery_status;
+ u8 num_ldrv; /* No. of Log Drives configured */
+ u8 recon_state[MAX_LOGICAL_DRIVES_40LD / 8]; /* State of
+ reconstruct */
+ u16 ldrv_op_status[MAX_LOGICAL_DRIVES_40LD / 8]; /* logdrv
+ Status */
+
+ u32 ldrv_size[MAX_LOGICAL_DRIVES_40LD];/* Size of each log drv */
+ u8 ldrv_prop[MAX_LOGICAL_DRIVES_40LD];
+ u8 ldrv_state[MAX_LOGICAL_DRIVES_40LD];/* State of log drives */
+ u8 pdrv_state[FC_MAX_PHYSICAL_DEVICES];/* State of phys drvs. */
+ u16 pdrv_format[FC_MAX_PHYSICAL_DEVICES / 16];
+
+ u8 targ_xfer[80]; /* phys device transfer rate */
+ u8 pad1k[263]; /* 761 + 263reserved = 1024 bytes total size */
+} __attribute__ ((packed)) mega_inquiry3;
+
+
+/* Structures */
+typedef struct {
+ u8 max_commands; /* Max concurrent commands supported */
+ u8 rebuild_rate; /* Rebuild rate - 0% thru 100% */
+ u8 max_targ_per_chan; /* Max targ per channel */
+ u8 nchannels; /* Number of channels on HBA */
+ u8 fw_version[4]; /* Firmware version */
+ u16 age_of_flash; /* Number of times FW has been flashed */
+ u8 chip_set_value; /* Contents of 0xC0000832 */
+ u8 dram_size; /* In MB */
+ u8 cache_flush_interval; /* in seconds */
+ u8 bios_version[4];
+ u8 board_type;
+ u8 sense_alert;
+ u8 write_config_count; /* Increase with every configuration
+ change */
+ u8 drive_inserted_count; /* Increase with every drive inserted
+ */
+ u8 inserted_drive; /* Channel:Id of inserted drive */
+ u8 battery_status; /*
+ * BIT 0: battery module missing
+ * BIT 1: VBAD
+ * BIT 2: temperature high
+ * BIT 3: battery pack missing
+ * BIT 4,5:
+ * 00 - charge complete
+ * 01 - fast charge in progress
+ * 10 - fast charge fail
+ * 11 - undefined
+ * Bit 6: counter > 1000
+ * Bit 7: Undefined
+ */
+ u8 dec_fault_bus_info;
+} __attribute__ ((packed)) mega_adp_info;
+
+
+typedef struct {
+ u8 num_ldrv; /* Number of logical drives configured */
+ u8 rsvd[3];
+ u32 ldrv_size[MAX_LOGICAL_DRIVES_8LD];
+ u8 ldrv_prop[MAX_LOGICAL_DRIVES_8LD];
+ u8 ldrv_state[MAX_LOGICAL_DRIVES_8LD];
+} __attribute__ ((packed)) mega_ldrv_info;
+
+typedef struct {
+ u8 pdrv_state[MAX_PHYSICAL_DRIVES];
+ u8 rsvd;
+} __attribute__ ((packed)) mega_pdrv_info;
+
+/* RAID inquiry: Mailbox command 0x05*/
+typedef struct {
+ mega_adp_info adapter_info;
+ mega_ldrv_info logdrv_info;
+ mega_pdrv_info pdrv_info;
+} __attribute__ ((packed)) mraid_inquiry;
+
+
+/* RAID extended inquiry: Mailbox command 0x04*/
+typedef struct {
+ mraid_inquiry raid_inq;
+ u16 phys_drv_format[MAX_CHANNELS];
+ u8 stack_attn;
+ u8 modem_status;
+ u8 rsvd[2];
+} __attribute__ ((packed)) mraid_ext_inquiry;
+
+
+typedef struct {
+ u8 channel;
+ u8 target;
+}__attribute__ ((packed)) adp_device;
+
+typedef struct {
+ u32 start_blk; /* starting block */
+ u32 num_blks; /* # of blocks */
+ adp_device device[MAX_ROW_SIZE_40LD];
+}__attribute__ ((packed)) adp_span_40ld;
+
+typedef struct {
+ u32 start_blk; /* starting block */
+ u32 num_blks; /* # of blocks */
+ adp_device device[MAX_ROW_SIZE_8LD];
+}__attribute__ ((packed)) adp_span_8ld;
+
+typedef struct {
+ u8 span_depth; /* Total # of spans */
+ u8 level; /* RAID level */
+ u8 read_ahead; /* read ahead, no read ahead, adaptive read
+ ahead */
+ u8 stripe_sz; /* Encoded stripe size */
+ u8 status; /* Status of the logical drive */
+ u8 write_mode; /* write mode, write_through/write_back */
+ u8 direct_io; /* direct io or through cache */
+ u8 row_size; /* Number of stripes in a row */
+} __attribute__ ((packed)) logdrv_param;
+
+typedef struct {
+ logdrv_param lparam;
+ adp_span_40ld span[MAX_SPAN_DEPTH];
+}__attribute__ ((packed)) logdrv_40ld;
+
+typedef struct {
+ logdrv_param lparam;
+ adp_span_8ld span[MAX_SPAN_DEPTH];
+}__attribute__ ((packed)) logdrv_8ld;
+
+typedef struct {
+ u8 type; /* Type of the device */
+ u8 cur_status; /* current status of the device */
+ u8 tag_depth; /* Level of tagging */
+ u8 sync_neg; /* sync negotiation - ENABLE or DISABLE */
+ u32 size; /* configurable size in terms of 512 byte
+ blocks */
+}__attribute__ ((packed)) phys_drv;
+
+typedef struct {
+ u8 nlog_drives; /* number of logical drives */
+ u8 resvd[3];
+ logdrv_40ld ldrv[MAX_LOGICAL_DRIVES_40LD];
+ phys_drv pdrv[MAX_PHYSICAL_DRIVES];
+}__attribute__ ((packed)) disk_array_40ld;
+
+typedef struct {
+ u8 nlog_drives; /* number of logical drives */
+ u8 resvd[3];
+ logdrv_8ld ldrv[MAX_LOGICAL_DRIVES_8LD];
+ phys_drv pdrv[MAX_PHYSICAL_DRIVES];
+}__attribute__ ((packed)) disk_array_8ld;
+
+
+/*
+ * User ioctl structure.
+ * This structure will be used for Traditional Method ioctl interface
+ * commands (0x80),Alternate Buffer Method (0x81) ioctl commands and the
+ * Driver ioctls.
+ * The Driver ioctl interface handles the commands at the driver level,
+ * without being sent to the card.
+ */
+/* system call imposed limit. Change accordingly */
+#define IOCTL_MAX_DATALEN 4096
+
+struct uioctl_t {
+ u32 inlen;
+ u32 outlen;
+ union {
+ u8 fca[16];
+ struct {
+ u8 opcode;
+ u8 subopcode;
+ u16 adapno;
+#if BITS_PER_LONG == 32
+ u8 *buffer;
+ u8 pad[4];
+#endif
+#if BITS_PER_LONG == 64
+ u8 *buffer;
+#endif
+ u32 length;
+ } __attribute__ ((packed)) fcs;
+ } __attribute__ ((packed)) ui;
+ u8 mbox[18]; /* 16 bytes + 2 status bytes */
+ mega_passthru pthru;
+#if BITS_PER_LONG == 32
+ char __user *data; /* buffer <= 4096 for 0x80 commands */
+ char pad[4];
+#endif
+#if BITS_PER_LONG == 64
+ char __user *data;
+#endif
+} __attribute__ ((packed));
+
+/*
+ * struct mcontroller is used to pass information about the controllers in the
+ * system. Its up to the application how to use the information. We are passing
+ * as much info about the cards as possible and useful. Before issuing the
+ * call to find information about the cards, the application needs to issue a
+ * ioctl first to find out the number of controllers in the system.
+ */
+#define MAX_CONTROLLERS 32
+
+struct mcontroller {
+ u64 base;
+ u8 irq;
+ u8 numldrv;
+ u8 pcibus;
+ u16 pcidev;
+ u8 pcifun;
+ u16 pciid;
+ u16 pcivendor;
+ u8 pcislot;
+ u32 uid;
+};
+
+/*
+ * mailbox structure used for internal commands
+ */
+typedef struct {
+ u8 cmd;
+ u8 cmdid;
+ u8 opcode;
+ u8 subopcode;
+ u32 lba;
+ u32 xferaddr;
+ u8 logdrv;
+ u8 rsvd[3];
+ u8 numstatus;
+ u8 status;
+} __attribute__ ((packed)) megacmd_t;
+
+/*
+ * Defines for Driver IOCTL interface
+ */
+#define MEGAIOC_MAGIC 'm'
+
+#define MEGAIOC_QNADAP 'm' /* Query # of adapters */
+#define MEGAIOC_QDRVRVER 'e' /* Query driver version */
+#define MEGAIOC_QADAPINFO 'g' /* Query adapter information */
+#define MKADAP(adapno) (MEGAIOC_MAGIC << 8 | (adapno) )
+#define GETADAP(mkadap) ( (mkadap) ^ MEGAIOC_MAGIC << 8 )
+
+/*
+ * Definition for the new ioctl interface (NIT)
+ */
+
+/*
+ * Vendor specific Group-7 commands
+ */
+#define VENDOR_SPECIFIC_COMMANDS 0xE0
+#define MEGA_INTERNAL_CMD VENDOR_SPECIFIC_COMMANDS + 0x01
+
+/*
+ * The ioctl command. No other command shall be used for this interface
+ */
+#define USCSICMD VENDOR_SPECIFIC_COMMANDS
+
+/*
+ * Data direction flags
+ */
+#define UIOC_RD 0x00001
+#define UIOC_WR 0x00002
+
+/*
+ * ioctl opcodes
+ */
+#define MBOX_CMD 0x00000 /* DCMD or passthru command */
+#define GET_DRIVER_VER 0x10000 /* Get driver version */
+#define GET_N_ADAP 0x20000 /* Get number of adapters */
+#define GET_ADAP_INFO 0x30000 /* Get information about a adapter */
+#define GET_CAP 0x40000 /* Get ioctl capabilities */
+#define GET_STATS 0x50000 /* Get statistics, including error info */
+
+
+/*
+ * The ioctl structure.
+ * MBOX macro converts a nitioctl_t structure to megacmd_t pointer and
+ * MBOX_P macro converts a nitioctl_t pointer to megacmd_t pointer.
+ */
+typedef struct {
+ char signature[8]; /* Must contain "MEGANIT" */
+ u32 opcode; /* opcode for the command */
+ u32 adapno; /* adapter number */
+ union {
+ u8 __raw_mbox[18];
+ void __user *__uaddr; /* xferaddr for non-mbox cmds */
+ }__ua;
+
+#define uioc_rmbox __ua.__raw_mbox
+#define MBOX(uioc) ((megacmd_t *)&((uioc).__ua.__raw_mbox[0]))
+#define MBOX_P(uioc) ((megacmd_t __user *)&((uioc)->__ua.__raw_mbox[0]))
+#define uioc_uaddr __ua.__uaddr
+
+ u32 xferlen; /* xferlen for DCMD and non-mbox
+ commands */
+ u32 flags; /* data direction flags */
+}nitioctl_t;
+
+
+/*
+ * I/O statistics for some applications like SNMP agent. The caller must
+ * provide the number of logical drives for which status should be reported.
+ */
+typedef struct {
+ int num_ldrv; /* Number for logical drives for which the
+ status should be reported. */
+ u32 nreads[MAX_LOGICAL_DRIVES_40LD]; /* number of reads for
+ each logical drive */
+ u32 nreadblocks[MAX_LOGICAL_DRIVES_40LD]; /* number of blocks
+ read for each logical
+ drive */
+ u32 nwrites[MAX_LOGICAL_DRIVES_40LD]; /* number of writes
+ for each logical
+ drive */
+ u32 nwriteblocks[MAX_LOGICAL_DRIVES_40LD]; /* number of blocks
+ writes for each
+ logical drive */
+ u32 rd_errors[MAX_LOGICAL_DRIVES_40LD]; /* number of read
+ errors for each
+ logical drive */
+ u32 wr_errors[MAX_LOGICAL_DRIVES_40LD]; /* number of write
+ errors for each
+ logical drive */
+}megastat_t;
+
+
+struct private_bios_data {
+ u8 geometry:4; /*
+ * bits 0-3 - BIOS geometry
+ * 0x0001 - 1GB
+ * 0x0010 - 2GB
+ * 0x1000 - 8GB
+ * Others values are invalid
+ */
+ u8 unused:4; /* bits 4-7 are unused */
+ u8 boot_drv; /*
+ * logical drive set as boot drive
+ * 0..7 - for 8LD cards
+ * 0..39 - for 40LD cards
+ */
+ u8 rsvd[12];
+ u16 cksum; /* 0-(sum of first 13 bytes of this structure) */
+} __attribute__ ((packed));
+
+
+
+
+/*
+ * Mailbox and firmware commands and subopcodes used in this driver.
+ */
+
+#define MEGA_MBOXCMD_LREAD 0x01
+#define MEGA_MBOXCMD_LWRITE 0x02
+#define MEGA_MBOXCMD_PASSTHRU 0x03
+#define MEGA_MBOXCMD_ADPEXTINQ 0x04
+#define MEGA_MBOXCMD_ADAPTERINQ 0x05
+#define MEGA_MBOXCMD_LREAD64 0xA7
+#define MEGA_MBOXCMD_LWRITE64 0xA8
+#define MEGA_MBOXCMD_PASSTHRU64 0xC3
+#define MEGA_MBOXCMD_EXTPTHRU 0xE3
+
+#define MAIN_MISC_OPCODE 0xA4 /* f/w misc opcode */
+#define GET_MAX_SG_SUPPORT 0x01 /* get max sg len supported by f/w */
+
+#define FC_NEW_CONFIG 0xA1
+#define NC_SUBOP_PRODUCT_INFO 0x0E
+#define NC_SUBOP_ENQUIRY3 0x0F
+#define ENQ3_GET_SOLICITED_FULL 0x02
+#define OP_DCMD_READ_CONFIG 0x04
+#define NEW_READ_CONFIG_8LD 0x67
+#define READ_CONFIG_8LD 0x07
+#define FLUSH_ADAPTER 0x0A
+#define FLUSH_SYSTEM 0xFE
+
+/*
+ * Command for random deletion of logical drives
+ */
+#define FC_DEL_LOGDRV 0xA4 /* f/w command */
+#define OP_SUP_DEL_LOGDRV 0x2A /* is feature supported */
+#define OP_GET_LDID_MAP 0x18 /* get ldid and logdrv number map */
+#define OP_DEL_LOGDRV 0x1C /* delete logical drive */
+
+/*
+ * BIOS commands
+ */
+#define IS_BIOS_ENABLED 0x62
+#define GET_BIOS 0x01
+#define CHNL_CLASS 0xA9
+#define GET_CHNL_CLASS 0x00
+#define SET_CHNL_CLASS 0x01
+#define CH_RAID 0x01
+#define CH_SCSI 0x00
+#define BIOS_PVT_DATA 0x40
+#define GET_BIOS_PVT_DATA 0x00
+
+
+/*
+ * Commands to support clustering
+ */
+#define MEGA_GET_TARGET_ID 0x7D
+#define MEGA_CLUSTER_OP 0x70
+#define MEGA_GET_CLUSTER_MODE 0x02
+#define MEGA_CLUSTER_CMD 0x6E
+#define MEGA_RESERVE_LD 0x01
+#define MEGA_RELEASE_LD 0x02
+#define MEGA_RESET_RESERVATIONS 0x03
+#define MEGA_RESERVATION_STATUS 0x04
+#define MEGA_RESERVE_PD 0x05
+#define MEGA_RELEASE_PD 0x06
+
+
+/*
+ * Module battery status
+ */
+#define MEGA_BATT_MODULE_MISSING 0x01
+#define MEGA_BATT_LOW_VOLTAGE 0x02
+#define MEGA_BATT_TEMP_HIGH 0x04
+#define MEGA_BATT_PACK_MISSING 0x08
+#define MEGA_BATT_CHARGE_MASK 0x30
+#define MEGA_BATT_CHARGE_DONE 0x00
+#define MEGA_BATT_CHARGE_INPROG 0x10
+#define MEGA_BATT_CHARGE_FAIL 0x20
+#define MEGA_BATT_CYCLES_EXCEEDED 0x40
+
+/*
+ * Physical drive states.
+ */
+#define PDRV_UNCNF 0
+#define PDRV_ONLINE 3
+#define PDRV_FAILED 4
+#define PDRV_RBLD 5
+#define PDRV_HOTSPARE 6
+
+
+/*
+ * Raid logical drive states.
+ */
+#define RDRV_OFFLINE 0
+#define RDRV_DEGRADED 1
+#define RDRV_OPTIMAL 2
+#define RDRV_DELETED 3
+
+/*
+ * Read, write and cache policies
+ */
+#define NO_READ_AHEAD 0
+#define READ_AHEAD 1
+#define ADAP_READ_AHEAD 2
+#define WRMODE_WRITE_THRU 0
+#define WRMODE_WRITE_BACK 1
+#define CACHED_IO 0
+#define DIRECT_IO 1
+
+
+#define SCSI_LIST(scp) ((struct list_head *)(&(scp)->SCp))
+
+/*
+ * Each controller's soft state
+ */
+typedef struct {
+ int this_id; /* our id, may set to different than 7 if
+ clustering is available */
+ u32 flag;
+
+ unsigned long base;
+ void __iomem *mmio_base;
+
+ /* mbox64 with mbox not aligned on 16-byte boundary */
+ mbox64_t *una_mbox64;
+ dma_addr_t una_mbox64_dma;
+
+ volatile mbox64_t *mbox64;/* ptr to 64-bit mailbox */
+ volatile mbox_t *mbox; /* ptr to standard mailbox */
+ dma_addr_t mbox_dma;
+
+ struct pci_dev *dev;
+
+ struct list_head free_list;
+ struct list_head pending_list;
+ struct list_head completed_list;
+
+ struct Scsi_Host *host;
+
+#define MEGA_BUFFER_SIZE (2*1024)
+ u8 *mega_buffer;
+ dma_addr_t buf_dma_handle;
+
+ mega_product_info product_info;
+
+ u8 max_cmds;
+ scb_t *scb_list;
+
+ atomic_t pend_cmds; /* maintain a counter for pending
+ commands in firmware */
+
+#if MEGA_HAVE_STATS
+ u32 nreads[MAX_LOGICAL_DRIVES_40LD];
+ u32 nreadblocks[MAX_LOGICAL_DRIVES_40LD];
+ u32 nwrites[MAX_LOGICAL_DRIVES_40LD];
+ u32 nwriteblocks[MAX_LOGICAL_DRIVES_40LD];
+ u32 rd_errors[MAX_LOGICAL_DRIVES_40LD];
+ u32 wr_errors[MAX_LOGICAL_DRIVES_40LD];
+#endif
+
+ /* Host adapter parameters */
+ u8 numldrv;
+ u8 fw_version[7];
+ u8 bios_version[7];
+
+#ifdef CONFIG_PROC_FS
+ struct proc_dir_entry *controller_proc_dir_entry;
+ struct proc_dir_entry *proc_read;
+ struct proc_dir_entry *proc_stat;
+ struct proc_dir_entry *proc_mbox;
+
+#if MEGA_HAVE_ENH_PROC
+ struct proc_dir_entry *proc_rr;
+ struct proc_dir_entry *proc_battery;
+#define MAX_PROC_CHANNELS 4
+ struct proc_dir_entry *proc_pdrvstat[MAX_PROC_CHANNELS];
+ struct proc_dir_entry *proc_rdrvstat[MAX_PROC_CHANNELS];
+#endif
+
+#endif
+
+ int has_64bit_addr; /* are we using 64-bit addressing */
+ int support_ext_cdb;
+ int boot_ldrv_enabled;
+ int boot_ldrv;
+ int boot_pdrv_enabled; /* boot from physical drive */
+ int boot_pdrv_ch; /* boot physical drive channel */
+ int boot_pdrv_tgt; /* boot physical drive target */
+
+
+ int support_random_del; /* Do we support random deletion of
+ logdrvs */
+ int read_ldidmap; /* set after logical drive deltion. The
+ logical drive number must be read from the
+ map */
+ atomic_t quiescent; /* a stage reached when delete logical
+ drive needs to be done. Stop
+ sending requests to the hba till
+ delete operation is completed */
+ spinlock_t lock;
+
+ u8 logdrv_chan[MAX_CHANNELS+NVIRT_CHAN]; /* logical drive are on
+ what channels. */
+ int mega_ch_class;
+
+ u8 sglen; /* f/w supported scatter-gather list length */
+
+ scb_t int_scb;
+ struct mutex int_mtx; /* To synchronize the internal
+ commands */
+ int int_status; /* status of internal cmd */
+ struct completion int_waitq; /* wait queue for internal
+ cmds */
+
+ int has_cluster; /* cluster support on this HBA */
+}adapter_t;
+
+
+struct mega_hbas {
+ int is_bios_enabled;
+ adapter_t *hostdata_addr;
+};
+
+
+/*
+ * For state flag. Do not use LSB(8 bits) which are
+ * reserved for storing info about channels.
+ */
+#define IN_ABORT 0x80000000L
+#define IN_RESET 0x40000000L
+#define BOARD_MEMMAP 0x20000000L
+#define BOARD_IOMAP 0x10000000L
+#define BOARD_40LD 0x08000000L
+#define BOARD_64BIT 0x04000000L
+
+#define INTR_VALID 0x40
+
+#define PCI_CONF_AMISIG 0xa0
+#define PCI_CONF_AMISIG64 0xa4
+
+
+#define MEGA_DMA_TYPE_NONE 0xFFFF
+#define MEGA_BULK_DATA 0x0001
+#define MEGA_SGLIST 0x0002
+
+/*
+ * Parameters for the io-mapped controllers
+ */
+
+/* I/O Port offsets */
+#define CMD_PORT 0x00
+#define ACK_PORT 0x00
+#define TOGGLE_PORT 0x01
+#define INTR_PORT 0x0a
+
+#define MBOX_BUSY_PORT 0x00
+#define MBOX_PORT0 0x04
+#define MBOX_PORT1 0x05
+#define MBOX_PORT2 0x06
+#define MBOX_PORT3 0x07
+#define ENABLE_MBOX_REGION 0x0B
+
+/* I/O Port Values */
+#define ISSUE_BYTE 0x10
+#define ACK_BYTE 0x08
+#define ENABLE_INTR_BYTE 0xc0
+#define DISABLE_INTR_BYTE 0x00
+#define VALID_INTR_BYTE 0x40
+#define MBOX_BUSY_BYTE 0x10
+#define ENABLE_MBOX_BYTE 0x00
+
+
+/* Setup some port macros here */
+#define issue_command(adapter) \
+ outb_p(ISSUE_BYTE, (adapter)->base + CMD_PORT)
+
+#define irq_state(adapter) inb_p((adapter)->base + INTR_PORT)
+
+#define set_irq_state(adapter, value) \
+ outb_p((value), (adapter)->base + INTR_PORT)
+
+#define irq_ack(adapter) \
+ outb_p(ACK_BYTE, (adapter)->base + ACK_PORT)
+
+#define irq_enable(adapter) \
+ outb_p(ENABLE_INTR_BYTE, (adapter)->base + TOGGLE_PORT)
+
+#define irq_disable(adapter) \
+ outb_p(DISABLE_INTR_BYTE, (adapter)->base + TOGGLE_PORT)
+
+
+/*
+ * This is our SYSDEP area. All kernel specific detail should be placed here -
+ * as much as possible
+ */
+
+/*
+ * End of SYSDEP area
+ */
+
+const char *megaraid_info (struct Scsi_Host *);
+
+static int mega_query_adapter(adapter_t *);
+static int issue_scb(adapter_t *, scb_t *);
+static int mega_setup_mailbox(adapter_t *);
+
+static int megaraid_queue (struct Scsi_Host *, struct scsi_cmnd *);
+static scb_t * mega_build_cmd(adapter_t *, Scsi_Cmnd *, int *);
+static void __mega_runpendq(adapter_t *);
+static int issue_scb_block(adapter_t *, u_char *);
+
+static irqreturn_t megaraid_isr_memmapped(int, void *);
+static irqreturn_t megaraid_isr_iomapped(int, void *);
+
+static void mega_free_scb(adapter_t *, scb_t *);
+
+static int megaraid_abort(Scsi_Cmnd *);
+static int megaraid_reset(Scsi_Cmnd *);
+static int megaraid_abort_and_reset(adapter_t *, Scsi_Cmnd *, int);
+static int megaraid_biosparam(struct scsi_device *, struct block_device *,
+ sector_t, int []);
+
+static int mega_build_sglist (adapter_t *adapter, scb_t *scb,
+ u32 *buffer, u32 *length);
+static int __mega_busywait_mbox (adapter_t *);
+static void mega_rundoneq (adapter_t *);
+static void mega_cmd_done(adapter_t *, u8 [], int, int);
+static inline void mega_free_sgl (adapter_t *adapter);
+static void mega_8_to_40ld (mraid_inquiry *inquiry,
+ mega_inquiry3 *enquiry3, mega_product_info *);
+
+static int megadev_open (struct inode *, struct file *);
+static int megadev_ioctl (struct file *, unsigned int, unsigned long);
+static int mega_m_to_n(void __user *, nitioctl_t *);
+static int mega_n_to_m(void __user *, megacmd_t *);
+
+static int mega_init_scb (adapter_t *);
+
+static int mega_is_bios_enabled (adapter_t *);
+
+#ifdef CONFIG_PROC_FS
+static void mega_create_proc_entry(int, struct proc_dir_entry *);
+static int mega_adapinq(adapter_t *, dma_addr_t);
+static int mega_internal_dev_inquiry(adapter_t *, u8, u8, dma_addr_t);
+#endif
+
+static int mega_support_ext_cdb(adapter_t *);
+static mega_passthru* mega_prepare_passthru(adapter_t *, scb_t *,
+ Scsi_Cmnd *, int, int);
+static mega_ext_passthru* mega_prepare_extpassthru(adapter_t *,
+ scb_t *, Scsi_Cmnd *, int, int);
+static void mega_enum_raid_scsi(adapter_t *);
+static void mega_get_boot_drv(adapter_t *);
+static int mega_support_random_del(adapter_t *);
+static int mega_del_logdrv(adapter_t *, int);
+static int mega_do_del_logdrv(adapter_t *, int);
+static void mega_get_max_sgl(adapter_t *);
+static int mega_internal_command(adapter_t *, megacmd_t *, mega_passthru *);
+static int mega_support_cluster(adapter_t *);
+#endif
+
+/* vi: set ts=8 sw=8 tw=78: */
diff --git a/drivers/scsi/megaraid/Kconfig.megaraid b/drivers/scsi/megaraid/Kconfig.megaraid
new file mode 100644
index 000000000..17419e30f
--- /dev/null
+++ b/drivers/scsi/megaraid/Kconfig.megaraid
@@ -0,0 +1,85 @@
+config MEGARAID_NEWGEN
+ bool "LSI Logic New Generation RAID Device Drivers"
+ depends on PCI && SCSI
+ help
+ LSI Logic RAID Device Drivers
+
+config MEGARAID_MM
+ tristate "LSI Logic Management Module (New Driver)"
+ depends on PCI && SCSI && MEGARAID_NEWGEN
+ help
+ Management Module provides ioctl, sysfs support for LSI Logic
+ RAID controllers.
+ To compile this driver as a module, choose M here: the
+ module will be called megaraid_mm
+
+
+config MEGARAID_MAILBOX
+ tristate "LSI Logic MegaRAID Driver (New Driver)"
+ depends on PCI && SCSI && MEGARAID_MM
+ help
+ List of supported controllers
+
+ OEM Product Name VID :DID :SVID:SSID
+ --- ------------ ---- ---- ---- ----
+ Dell PERC3/QC 101E:1960:1028:0471
+ Dell PERC3/DC 101E:1960:1028:0493
+ Dell PERC3/SC 101E:1960:1028:0475
+ Dell PERC3/Di 1028:000E:1028:0123
+ Dell PERC4/SC 1000:1960:1028:0520
+ Dell PERC4/DC 1000:1960:1028:0518
+ Dell PERC4/QC 1000:0407:1028:0531
+ Dell PERC4/Di 1028:000F:1028:014A
+ Dell PERC 4e/Si 1028:0013:1028:016c
+ Dell PERC 4e/Di 1028:0013:1028:016d
+ Dell PERC 4e/Di 1028:0013:1028:016e
+ Dell PERC 4e/Di 1028:0013:1028:016f
+ Dell PERC 4e/Di 1028:0013:1028:0170
+ Dell PERC 4e/DC 1000:0408:1028:0002
+ Dell PERC 4e/SC 1000:0408:1028:0001
+ LSI MegaRAID SCSI 320-0 1000:1960:1000:A520
+ LSI MegaRAID SCSI 320-1 1000:1960:1000:0520
+ LSI MegaRAID SCSI 320-2 1000:1960:1000:0518
+ LSI MegaRAID SCSI 320-0X 1000:0407:1000:0530
+ LSI MegaRAID SCSI 320-2X 1000:0407:1000:0532
+ LSI MegaRAID SCSI 320-4X 1000:0407:1000:0531
+ LSI MegaRAID SCSI 320-1E 1000:0408:1000:0001
+ LSI MegaRAID SCSI 320-2E 1000:0408:1000:0002
+ LSI MegaRAID SATA 150-4 1000:1960:1000:4523
+ LSI MegaRAID SATA 150-6 1000:1960:1000:0523
+ LSI MegaRAID SATA 300-4X 1000:0409:1000:3004
+ LSI MegaRAID SATA 300-8X 1000:0409:1000:3008
+ INTEL RAID Controller SRCU42X 1000:0407:8086:0532
+ INTEL RAID Controller SRCS16 1000:1960:8086:0523
+ INTEL RAID Controller SRCU42E 1000:0408:8086:0002
+ INTEL RAID Controller SRCZCRX 1000:0407:8086:0530
+ INTEL RAID Controller SRCS28X 1000:0409:8086:3008
+ INTEL RAID Controller SROMBU42E 1000:0408:8086:3431
+ INTEL RAID Controller SROMBU42E 1000:0408:8086:3499
+ INTEL RAID Controller SRCU51L 1000:1960:8086:0520
+ FSC MegaRAID PCI Express ROMB 1000:0408:1734:1065
+ ACER MegaRAID ROMB-2E 1000:0408:1025:004D
+ NEC MegaRAID PCI Express ROMB 1000:0408:1033:8287
+
+ To compile this driver as a module, choose M here: the
+ module will be called megaraid_mbox
+
+config MEGARAID_LEGACY
+ tristate "LSI Logic Legacy MegaRAID Driver"
+ depends on PCI && SCSI
+ help
+ This driver supports the LSI MegaRAID 418, 428, 438, 466, 762, 490
+ and 467 SCSI host adapters. This driver also support the all U320
+ RAID controllers
+
+ To compile this driver as a module, choose M here: the
+ module will be called megaraid
+
+config MEGARAID_SAS
+ tristate "LSI Logic MegaRAID SAS RAID Module"
+ depends on PCI && SCSI
+ help
+ Module for LSI Logic's SAS based RAID controllers.
+ To compile this driver as a module, choose 'm' here.
+ Module will be called megaraid_sas
+
diff --git a/drivers/scsi/megaraid/Makefile b/drivers/scsi/megaraid/Makefile
new file mode 100644
index 000000000..5826ed509
--- /dev/null
+++ b/drivers/scsi/megaraid/Makefile
@@ -0,0 +1,5 @@
+obj-$(CONFIG_MEGARAID_MM) += megaraid_mm.o
+obj-$(CONFIG_MEGARAID_MAILBOX) += megaraid_mbox.o
+obj-$(CONFIG_MEGARAID_SAS) += megaraid_sas.o
+megaraid_sas-objs := megaraid_sas_base.o megaraid_sas_fusion.o \
+ megaraid_sas_fp.o
diff --git a/drivers/scsi/megaraid/mbox_defs.h b/drivers/scsi/megaraid/mbox_defs.h
new file mode 100644
index 000000000..e01c6f7c2
--- /dev/null
+++ b/drivers/scsi/megaraid/mbox_defs.h
@@ -0,0 +1,790 @@
+/*
+ *
+ * Linux MegaRAID Unified device driver
+ *
+ * Copyright (c) 2003-2004 LSI Logic Corporation.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ *
+ * FILE : mbox_defs.h
+ *
+ */
+#ifndef _MRAID_MBOX_DEFS_H_
+#define _MRAID_MBOX_DEFS_H_
+
+#include <linux/types.h>
+
+/*
+ * Commands and states for mailbox based controllers
+ */
+
+#define MBOXCMD_LREAD 0x01
+#define MBOXCMD_LWRITE 0x02
+#define MBOXCMD_PASSTHRU 0x03
+#define MBOXCMD_ADPEXTINQ 0x04
+#define MBOXCMD_ADAPTERINQ 0x05
+#define MBOXCMD_LREAD64 0xA7
+#define MBOXCMD_LWRITE64 0xA8
+#define MBOXCMD_PASSTHRU64 0xC3
+#define MBOXCMD_EXTPTHRU 0xE3
+
+#define MAIN_MISC_OPCODE 0xA4
+#define GET_MAX_SG_SUPPORT 0x01
+#define SUPPORT_EXT_CDB 0x16
+
+#define FC_NEW_CONFIG 0xA1
+#define NC_SUBOP_PRODUCT_INFO 0x0E
+#define NC_SUBOP_ENQUIRY3 0x0F
+#define ENQ3_GET_SOLICITED_FULL 0x02
+#define OP_DCMD_READ_CONFIG 0x04
+#define NEW_READ_CONFIG_8LD 0x67
+#define READ_CONFIG_8LD 0x07
+#define FLUSH_ADAPTER 0x0A
+#define FLUSH_SYSTEM 0xFE
+
+/*
+ * Command for random deletion of logical drives
+ */
+#define FC_DEL_LOGDRV 0xA4
+#define OP_SUP_DEL_LOGDRV 0x2A
+#define OP_GET_LDID_MAP 0x18
+#define OP_DEL_LOGDRV 0x1C
+
+/*
+ * BIOS commands
+ */
+#define IS_BIOS_ENABLED 0x62
+#define GET_BIOS 0x01
+#define CHNL_CLASS 0xA9
+#define GET_CHNL_CLASS 0x00
+#define SET_CHNL_CLASS 0x01
+#define CH_RAID 0x01
+#define CH_SCSI 0x00
+#define BIOS_PVT_DATA 0x40
+#define GET_BIOS_PVT_DATA 0x00
+
+
+/*
+ * Commands to support clustering
+ */
+#define GET_TARGET_ID 0x7D
+#define CLUSTER_OP 0x70
+#define GET_CLUSTER_MODE 0x02
+#define CLUSTER_CMD 0x6E
+#define RESERVE_LD 0x01
+#define RELEASE_LD 0x02
+#define RESET_RESERVATIONS 0x03
+#define RESERVATION_STATUS 0x04
+#define RESERVE_PD 0x05
+#define RELEASE_PD 0x06
+
+
+/*
+ * Module battery status
+ */
+#define BATTERY_MODULE_MISSING 0x01
+#define BATTERY_LOW_VOLTAGE 0x02
+#define BATTERY_TEMP_HIGH 0x04
+#define BATTERY_PACK_MISSING 0x08
+#define BATTERY_CHARGE_MASK 0x30
+#define BATTERY_CHARGE_DONE 0x00
+#define BATTERY_CHARGE_INPROG 0x10
+#define BATTERY_CHARGE_FAIL 0x20
+#define BATTERY_CYCLES_EXCEEDED 0x40
+
+/*
+ * Physical drive states.
+ */
+#define PDRV_UNCNF 0
+#define PDRV_ONLINE 3
+#define PDRV_FAILED 4
+#define PDRV_RBLD 5
+#define PDRV_HOTSPARE 6
+
+
+/*
+ * Raid logical drive states.
+ */
+#define RDRV_OFFLINE 0
+#define RDRV_DEGRADED 1
+#define RDRV_OPTIMAL 2
+#define RDRV_DELETED 3
+
+/*
+ * Read, write and cache policies
+ */
+#define NO_READ_AHEAD 0
+#define READ_AHEAD 1
+#define ADAP_READ_AHEAD 2
+#define WRMODE_WRITE_THRU 0
+#define WRMODE_WRITE_BACK 1
+#define CACHED_IO 0
+#define DIRECT_IO 1
+
+#define MAX_LOGICAL_DRIVES_8LD 8
+#define MAX_LOGICAL_DRIVES_40LD 40
+#define FC_MAX_PHYSICAL_DEVICES 256
+#define MAX_MBOX_CHANNELS 5
+#define MAX_MBOX_TARGET 15
+#define MBOX_MAX_PHYSICAL_DRIVES MAX_MBOX_CHANNELS*MAX_MBOX_TARGET
+#define MAX_ROW_SIZE_40LD 32
+#define MAX_ROW_SIZE_8LD 8
+#define SPAN_DEPTH_8_SPANS 8
+#define SPAN_DEPTH_4_SPANS 4
+#define MAX_REQ_SENSE_LEN 0x20
+
+
+
+/**
+ * struct mbox_t - Driver and f/w handshake structure.
+ * @cmd : firmware command
+ * @cmdid : command id
+ * @numsectors : number of sectors to be transferred
+ * @lba : Logical Block Address on LD
+ * @xferaddr : DMA address for data transfer
+ * @logdrv : logical drive number
+ * @numsge : number of scatter gather elements in sg list
+ * @resvd : reserved
+ * @busy : f/w busy, must wait to issue more commands.
+ * @numstatus : number of commands completed.
+ * @status : status of the commands completed
+ * @completed : array of completed command ids.
+ * @poll : poll and ack sequence
+ * @ack : poll and ack sequence
+ *
+ * The central handshake structure between the driver and the firmware. This
+ * structure must be allocated by the driver and aligned at 8-byte boundary.
+ */
+#define MBOX_MAX_FIRMWARE_STATUS 46
+typedef struct {
+ uint8_t cmd;
+ uint8_t cmdid;
+ uint16_t numsectors;
+ uint32_t lba;
+ uint32_t xferaddr;
+ uint8_t logdrv;
+ uint8_t numsge;
+ uint8_t resvd;
+ uint8_t busy;
+ uint8_t numstatus;
+ uint8_t status;
+ uint8_t completed[MBOX_MAX_FIRMWARE_STATUS];
+ uint8_t poll;
+ uint8_t ack;
+} __attribute__ ((packed)) mbox_t;
+
+
+/**
+ * mbox64_t - 64-bit extension for the mailbox
+ * @segment_lo : the low 32-bits of the address of the scatter-gather list
+ * @segment_hi : the upper 32-bits of the address of the scatter-gather list
+ * @mbox : 32-bit mailbox, whose xferadder field must be set to
+ * 0xFFFFFFFF
+ *
+ * This is the extension of the 32-bit mailbox to be able to perform DMA
+ * beyond 4GB address range.
+ */
+typedef struct {
+ uint32_t xferaddr_lo;
+ uint32_t xferaddr_hi;
+ mbox_t mbox32;
+} __attribute__ ((packed)) mbox64_t;
+
+/*
+ * mailbox structure used for internal commands
+ */
+typedef struct {
+ u8 cmd;
+ u8 cmdid;
+ u8 opcode;
+ u8 subopcode;
+ u32 lba;
+ u32 xferaddr;
+ u8 logdrv;
+ u8 rsvd[3];
+ u8 numstatus;
+ u8 status;
+} __attribute__ ((packed)) int_mbox_t;
+
+/**
+ * mraid_passthru_t - passthru structure to issue commands to physical devices
+ * @timeout : command timeout, 0=6sec, 1=60sec, 2=10min, 3=3hr
+ * @ars : set if ARS required after check condition
+ * @islogical : set if command meant for logical devices
+ * @logdrv : logical drive number if command for LD
+ * @channel : Channel on which physical device is located
+ * @target : SCSI target of the device
+ * @queuetag : unused
+ * @queueaction : unused
+ * @cdb : SCSI CDB
+ * @cdblen : length of the CDB
+ * @reqsenselen : amount of request sense data to be returned
+ * @reqsensearea : Sense information buffer
+ * @numsge : number of scatter-gather elements in the sg list
+ * @scsistatus : SCSI status of the command completed.
+ * @dataxferaddr : DMA data transfer address
+ * @dataxferlen : amount of the data to be transferred.
+ */
+typedef struct {
+ uint8_t timeout :3;
+ uint8_t ars :1;
+ uint8_t reserved :3;
+ uint8_t islogical :1;
+ uint8_t logdrv;
+ uint8_t channel;
+ uint8_t target;
+ uint8_t queuetag;
+ uint8_t queueaction;
+ uint8_t cdb[10];
+ uint8_t cdblen;
+ uint8_t reqsenselen;
+ uint8_t reqsensearea[MAX_REQ_SENSE_LEN];
+ uint8_t numsge;
+ uint8_t scsistatus;
+ uint32_t dataxferaddr;
+ uint32_t dataxferlen;
+} __attribute__ ((packed)) mraid_passthru_t;
+
+typedef struct {
+
+ uint32_t dataxferaddr_lo;
+ uint32_t dataxferaddr_hi;
+ mraid_passthru_t pthru32;
+
+} __attribute__ ((packed)) mega_passthru64_t;
+
+/**
+ * mraid_epassthru_t - passthru structure to issue commands to physical devices
+ * @timeout : command timeout, 0=6sec, 1=60sec, 2=10min, 3=3hr
+ * @ars : set if ARS required after check condition
+ * @rsvd1 : reserved field
+ * @cd_rom : (?)
+ * @rsvd2 : reserved field
+ * @islogical : set if command meant for logical devices
+ * @logdrv : logical drive number if command for LD
+ * @channel : Channel on which physical device is located
+ * @target : SCSI target of the device
+ * @queuetag : unused
+ * @queueaction : unused
+ * @cdblen : length of the CDB
+ * @rsvd3 : reserved field
+ * @cdb : SCSI CDB
+ * @numsge : number of scatter-gather elements in the sg list
+ * @status : SCSI status of the command completed.
+ * @reqsenselen : amount of request sense data to be returned
+ * @reqsensearea : Sense information buffer
+ * @rsvd4 : reserved field
+ * @dataxferaddr : DMA data transfer address
+ * @dataxferlen : amount of the data to be transferred.
+ */
+typedef struct {
+ uint8_t timeout :3;
+ uint8_t ars :1;
+ uint8_t rsvd1 :1;
+ uint8_t cd_rom :1;
+ uint8_t rsvd2 :1;
+ uint8_t islogical :1;
+ uint8_t logdrv;
+ uint8_t channel;
+ uint8_t target;
+ uint8_t queuetag;
+ uint8_t queueaction;
+ uint8_t cdblen;
+ uint8_t rsvd3;
+ uint8_t cdb[16];
+ uint8_t numsge;
+ uint8_t status;
+ uint8_t reqsenselen;
+ uint8_t reqsensearea[MAX_REQ_SENSE_LEN];
+ uint8_t rsvd4;
+ uint32_t dataxferaddr;
+ uint32_t dataxferlen;
+} __attribute__ ((packed)) mraid_epassthru_t;
+
+
+/**
+ * mraid_pinfo_t - product info, static information about the controller
+ * @data_size : current size in bytes (not including resvd)
+ * @config_signature : Current value is 0x00282008
+ * @fw_version : Firmware version
+ * @bios_version : version of the BIOS
+ * @product_name : Name given to the controller
+ * @max_commands : Maximum concurrent commands supported
+ * @nchannels : Number of SCSI Channels detected
+ * @fc_loop_present : Number of Fibre Loops detected
+ * @mem_type : EDO, FPM, SDRAM etc
+ * @signature :
+ * @dram_size : In terms of MB
+ * @subsysid : device PCI subsystem ID
+ * @subsysvid : device PCI subsystem vendor ID
+ * @notify_counters :
+ * @pad1k : 135 + 889 resvd = 1024 total size
+ *
+ * This structures holds the information about the controller which is not
+ * expected to change dynamically.
+ *
+ * The current value of config signature is 0x00282008:
+ * 0x28 = MAX_LOGICAL_DRIVES,
+ * 0x20 = Number of stripes and
+ * 0x08 = Number of spans
+ */
+typedef struct {
+ uint32_t data_size;
+ uint32_t config_signature;
+ uint8_t fw_version[16];
+ uint8_t bios_version[16];
+ uint8_t product_name[80];
+ uint8_t max_commands;
+ uint8_t nchannels;
+ uint8_t fc_loop_present;
+ uint8_t mem_type;
+ uint32_t signature;
+ uint16_t dram_size;
+ uint16_t subsysid;
+ uint16_t subsysvid;
+ uint8_t notify_counters;
+ uint8_t pad1k[889];
+} __attribute__ ((packed)) mraid_pinfo_t;
+
+
+/**
+ * mraid_notify_t - the notification structure
+ * @global_counter : Any change increments this counter
+ * @param_counter : Indicates any params changed
+ * @param_id : Param modified - defined below
+ * @param_val : New val of last param modified
+ * @write_config_counter : write config occurred
+ * @write_config_rsvd :
+ * @ldrv_op_counter : Indicates ldrv op started/completed
+ * @ldrv_opid : ldrv num
+ * @ldrv_opcmd : ldrv operation - defined below
+ * @ldrv_opstatus : status of the operation
+ * @ldrv_state_counter : Indicates change of ldrv state
+ * @ldrv_state_id : ldrv num
+ * @ldrv_state_new : New state
+ * @ldrv_state_old : old state
+ * @pdrv_state_counter : Indicates change of ldrv state
+ * @pdrv_state_id : pdrv id
+ * @pdrv_state_new : New state
+ * @pdrv_state_old : old state
+ * @pdrv_fmt_counter : Indicates pdrv format started/over
+ * @pdrv_fmt_id : pdrv id
+ * @pdrv_fmt_val : format started/over
+ * @pdrv_fmt_rsvd :
+ * @targ_xfer_counter : Indicates SCSI-2 Xfer rate change
+ * @targ_xfer_id : pdrv Id
+ * @targ_xfer_val : new Xfer params of last pdrv
+ * @targ_xfer_rsvd :
+ * @fcloop_id_chg_counter : Indicates loopid changed
+ * @fcloopid_pdrvid : pdrv id
+ * @fcloop_id0 : loopid on fc loop 0
+ * @fcloop_id1 : loopid on fc loop 1
+ * @fcloop_state_counter : Indicates loop state changed
+ * @fcloop_state0 : state of fc loop 0
+ * @fcloop_state1 : state of fc loop 1
+ * @fcloop_state_rsvd :
+ */
+typedef struct {
+ uint32_t global_counter;
+ uint8_t param_counter;
+ uint8_t param_id;
+ uint16_t param_val;
+ uint8_t write_config_counter;
+ uint8_t write_config_rsvd[3];
+ uint8_t ldrv_op_counter;
+ uint8_t ldrv_opid;
+ uint8_t ldrv_opcmd;
+ uint8_t ldrv_opstatus;
+ uint8_t ldrv_state_counter;
+ uint8_t ldrv_state_id;
+ uint8_t ldrv_state_new;
+ uint8_t ldrv_state_old;
+ uint8_t pdrv_state_counter;
+ uint8_t pdrv_state_id;
+ uint8_t pdrv_state_new;
+ uint8_t pdrv_state_old;
+ uint8_t pdrv_fmt_counter;
+ uint8_t pdrv_fmt_id;
+ uint8_t pdrv_fmt_val;
+ uint8_t pdrv_fmt_rsvd;
+ uint8_t targ_xfer_counter;
+ uint8_t targ_xfer_id;
+ uint8_t targ_xfer_val;
+ uint8_t targ_xfer_rsvd;
+ uint8_t fcloop_id_chg_counter;
+ uint8_t fcloopid_pdrvid;
+ uint8_t fcloop_id0;
+ uint8_t fcloop_id1;
+ uint8_t fcloop_state_counter;
+ uint8_t fcloop_state0;
+ uint8_t fcloop_state1;
+ uint8_t fcloop_state_rsvd;
+} __attribute__ ((packed)) mraid_notify_t;
+
+
+/**
+ * mraid_inquiry3_t - enquiry for device information
+ *
+ * @data_size : current size in bytes (not including resvd)
+ * @notify :
+ * @notify_rsvd :
+ * @rebuild_rate : rebuild rate (0% - 100%)
+ * @cache_flush_int : cache flush interval in seconds
+ * @sense_alert :
+ * @drive_insert_count : drive insertion count
+ * @battery_status :
+ * @num_ldrv : no. of Log Drives configured
+ * @recon_state : state of reconstruct
+ * @ldrv_op_status : logdrv Status
+ * @ldrv_size : size of each log drv
+ * @ldrv_prop :
+ * @ldrv_state : state of log drives
+ * @pdrv_state : state of phys drvs.
+ * @pdrv_format :
+ * @targ_xfer : phys device transfer rate
+ * @pad1k : 761 + 263reserved = 1024 bytes total size
+ */
+#define MAX_NOTIFY_SIZE 0x80
+#define CUR_NOTIFY_SIZE sizeof(mraid_notify_t)
+
+typedef struct {
+ uint32_t data_size;
+
+ mraid_notify_t notify;
+
+ uint8_t notify_rsvd[MAX_NOTIFY_SIZE - CUR_NOTIFY_SIZE];
+
+ uint8_t rebuild_rate;
+ uint8_t cache_flush_int;
+ uint8_t sense_alert;
+ uint8_t drive_insert_count;
+
+ uint8_t battery_status;
+ uint8_t num_ldrv;
+ uint8_t recon_state[MAX_LOGICAL_DRIVES_40LD / 8];
+ uint16_t ldrv_op_status[MAX_LOGICAL_DRIVES_40LD / 8];
+
+ uint32_t ldrv_size[MAX_LOGICAL_DRIVES_40LD];
+ uint8_t ldrv_prop[MAX_LOGICAL_DRIVES_40LD];
+ uint8_t ldrv_state[MAX_LOGICAL_DRIVES_40LD];
+ uint8_t pdrv_state[FC_MAX_PHYSICAL_DEVICES];
+ uint16_t pdrv_format[FC_MAX_PHYSICAL_DEVICES / 16];
+
+ uint8_t targ_xfer[80];
+ uint8_t pad1k[263];
+} __attribute__ ((packed)) mraid_inquiry3_t;
+
+
+/**
+ * mraid_adapinfo_t - information about the adapter
+ * @max_commands : max concurrent commands supported
+ * @rebuild_rate : rebuild rate - 0% thru 100%
+ * @max_targ_per_chan : max targ per channel
+ * @nchannels : number of channels on HBA
+ * @fw_version : firmware version
+ * @age_of_flash : number of times FW has been flashed
+ * @chip_set_value : contents of 0xC0000832
+ * @dram_size : in MB
+ * @cache_flush_interval : in seconds
+ * @bios_version :
+ * @board_type :
+ * @sense_alert :
+ * @write_config_count : increase with every configuration change
+ * @drive_inserted_count : increase with every drive inserted
+ * @inserted_drive : channel:Id of inserted drive
+ * @battery_status : bit 0: battery module missing
+ * bit 1: VBAD
+ * bit 2: temperature high
+ * bit 3: battery pack missing
+ * bit 4,5:
+ * 00 - charge complete
+ * 01 - fast charge in progress
+ * 10 - fast charge fail
+ * 11 - undefined
+ * bit 6: counter > 1000
+ * bit 7: Undefined
+ * @dec_fault_bus_info :
+ */
+typedef struct {
+ uint8_t max_commands;
+ uint8_t rebuild_rate;
+ uint8_t max_targ_per_chan;
+ uint8_t nchannels;
+ uint8_t fw_version[4];
+ uint16_t age_of_flash;
+ uint8_t chip_set_value;
+ uint8_t dram_size;
+ uint8_t cache_flush_interval;
+ uint8_t bios_version[4];
+ uint8_t board_type;
+ uint8_t sense_alert;
+ uint8_t write_config_count;
+ uint8_t battery_status;
+ uint8_t dec_fault_bus_info;
+} __attribute__ ((packed)) mraid_adapinfo_t;
+
+
+/**
+ * mraid_ldrv_info_t - information about the logical drives
+ * @nldrv : Number of logical drives configured
+ * @rsvd :
+ * @size : size of each logical drive
+ * @prop :
+ * @state : state of each logical drive
+ */
+typedef struct {
+ uint8_t nldrv;
+ uint8_t rsvd[3];
+ uint32_t size[MAX_LOGICAL_DRIVES_8LD];
+ uint8_t prop[MAX_LOGICAL_DRIVES_8LD];
+ uint8_t state[MAX_LOGICAL_DRIVES_8LD];
+} __attribute__ ((packed)) mraid_ldrv_info_t;
+
+
+/**
+ * mraid_pdrv_info_t - information about the physical drives
+ * @pdrv_state : state of each physical drive
+ */
+typedef struct {
+ uint8_t pdrv_state[MBOX_MAX_PHYSICAL_DRIVES];
+ uint8_t rsvd;
+} __attribute__ ((packed)) mraid_pdrv_info_t;
+
+
+/**
+ * mraid_inquiry_t - RAID inquiry, mailbox command 0x05
+ * @mraid_adapinfo_t : adapter information
+ * @mraid_ldrv_info_t : logical drives information
+ * @mraid_pdrv_info_t : physical drives information
+ */
+typedef struct {
+ mraid_adapinfo_t adapter_info;
+ mraid_ldrv_info_t logdrv_info;
+ mraid_pdrv_info_t pdrv_info;
+} __attribute__ ((packed)) mraid_inquiry_t;
+
+
+/**
+ * mraid_extinq_t - RAID extended inquiry, mailbox command 0x04
+ *
+ * @raid_inq : raid inquiry
+ * @phys_drv_format :
+ * @stack_attn :
+ * @modem_status :
+ * @rsvd :
+ */
+typedef struct {
+ mraid_inquiry_t raid_inq;
+ uint16_t phys_drv_format[MAX_MBOX_CHANNELS];
+ uint8_t stack_attn;
+ uint8_t modem_status;
+ uint8_t rsvd[2];
+} __attribute__ ((packed)) mraid_extinq_t;
+
+
+/**
+ * adap_device_t - device information
+ * @channel : channel fpor the device
+ * @target : target ID of the device
+ */
+typedef struct {
+ uint8_t channel;
+ uint8_t target;
+}__attribute__ ((packed)) adap_device_t;
+
+
+/**
+ * adap_span_40ld_t - 40LD span
+ * @start_blk : starting block
+ * @num_blks : number of blocks
+ */
+typedef struct {
+ uint32_t start_blk;
+ uint32_t num_blks;
+ adap_device_t device[MAX_ROW_SIZE_40LD];
+}__attribute__ ((packed)) adap_span_40ld_t;
+
+
+/**
+ * adap_span_8ld_t - 8LD span
+ * @start_blk : starting block
+ * @num_blks : number of blocks
+ */
+typedef struct {
+ uint32_t start_blk;
+ uint32_t num_blks;
+ adap_device_t device[MAX_ROW_SIZE_8LD];
+}__attribute__ ((packed)) adap_span_8ld_t;
+
+
+/**
+ * logdrv_param_t - logical drives parameters
+ *
+ * @span_depth : total number of spans
+ * @level : RAID level
+ * @read_ahead : read ahead, no read ahead, adaptive read ahead
+ * @stripe_sz : encoded stripe size
+ * @status : status of the logical drive
+ * @write_mode : write mode, write_through/write_back
+ * @direct_io : direct io or through cache
+ * @row_size : number of stripes in a row
+ */
+typedef struct {
+ uint8_t span_depth;
+ uint8_t level;
+ uint8_t read_ahead;
+ uint8_t stripe_sz;
+ uint8_t status;
+ uint8_t write_mode;
+ uint8_t direct_io;
+ uint8_t row_size;
+} __attribute__ ((packed)) logdrv_param_t;
+
+
+/**
+ * logdrv_40ld_t - logical drive definition for 40LD controllers
+ * @lparam : logical drives parameters
+ * @span : span
+ */
+typedef struct {
+ logdrv_param_t lparam;
+ adap_span_40ld_t span[SPAN_DEPTH_8_SPANS];
+}__attribute__ ((packed)) logdrv_40ld_t;
+
+
+/**
+ * logdrv_8ld_span8_t - logical drive definition for 8LD controllers
+ * @lparam : logical drives parameters
+ * @span : span
+ *
+ * 8-LD logical drive with up to 8 spans
+ */
+typedef struct {
+ logdrv_param_t lparam;
+ adap_span_8ld_t span[SPAN_DEPTH_8_SPANS];
+}__attribute__ ((packed)) logdrv_8ld_span8_t;
+
+
+/**
+ * logdrv_8ld_span4_t - logical drive definition for 8LD controllers
+ * @lparam : logical drives parameters
+ * @span : span
+ *
+ * 8-LD logical drive with up to 4 spans
+ */
+typedef struct {
+ logdrv_param_t lparam;
+ adap_span_8ld_t span[SPAN_DEPTH_4_SPANS];
+}__attribute__ ((packed)) logdrv_8ld_span4_t;
+
+
+/**
+ * phys_drive_t - physical device information
+ * @type : Type of the device
+ * @cur_status : current status of the device
+ * @tag_depth : Level of tagging
+ * @sync_neg : sync negotiation - ENABLE or DISABLE
+ * @size : configurable size in terms of 512 byte
+ */
+typedef struct {
+ uint8_t type;
+ uint8_t cur_status;
+ uint8_t tag_depth;
+ uint8_t sync_neg;
+ uint32_t size;
+}__attribute__ ((packed)) phys_drive_t;
+
+
+/**
+ * disk_array_40ld_t - disk array for 40LD controllers
+ * @numldrv : number of logical drives
+ * @resvd :
+ * @ldrv : logical drives information
+ * @pdrv : physical drives information
+ */
+typedef struct {
+ uint8_t numldrv;
+ uint8_t resvd[3];
+ logdrv_40ld_t ldrv[MAX_LOGICAL_DRIVES_40LD];
+ phys_drive_t pdrv[MBOX_MAX_PHYSICAL_DRIVES];
+}__attribute__ ((packed)) disk_array_40ld_t;
+
+
+/**
+ * disk_array_8ld_span8_t - disk array for 8LD controllers
+ * @numldrv : number of logical drives
+ * @resvd :
+ * @ldrv : logical drives information
+ * @pdrv : physical drives information
+ *
+ * Disk array for 8LD logical drives with up to 8 spans
+ */
+typedef struct {
+ uint8_t numldrv;
+ uint8_t resvd[3];
+ logdrv_8ld_span8_t ldrv[MAX_LOGICAL_DRIVES_8LD];
+ phys_drive_t pdrv[MBOX_MAX_PHYSICAL_DRIVES];
+}__attribute__ ((packed)) disk_array_8ld_span8_t;
+
+
+/**
+ * disk_array_8ld_span4_t - disk array for 8LD controllers
+ * @numldrv : number of logical drives
+ * @resvd :
+ * @ldrv : logical drives information
+ * @pdrv : physical drives information
+ *
+ * Disk array for 8LD logical drives with up to 4 spans
+ */
+typedef struct {
+ uint8_t numldrv;
+ uint8_t resvd[3];
+ logdrv_8ld_span4_t ldrv[MAX_LOGICAL_DRIVES_8LD];
+ phys_drive_t pdrv[MBOX_MAX_PHYSICAL_DRIVES];
+}__attribute__ ((packed)) disk_array_8ld_span4_t;
+
+
+/**
+ * struct private_bios_data - bios private data for boot devices
+ * @geometry : bits 0-3 - BIOS geometry, 0x0001 - 1GB, 0x0010 - 2GB,
+ * 0x1000 - 8GB, Others values are invalid
+ * @unused : bits 4-7 are unused
+ * @boot_drv : logical drive set as boot drive, 0..7 - for 8LD cards,
+ * 0..39 - for 40LD cards
+ * @cksum : 0-(sum of first 13 bytes of this structure)
+ */
+struct private_bios_data {
+ uint8_t geometry :4;
+ uint8_t unused :4;
+ uint8_t boot_drv;
+ uint8_t rsvd[12];
+ uint16_t cksum;
+} __attribute__ ((packed));
+
+
+/**
+ * mbox_sgl64 - 64-bit scatter list for mailbox based controllers
+ * @address : address of the buffer
+ * @length : data transfer length
+ */
+typedef struct {
+ uint64_t address;
+ uint32_t length;
+} __attribute__ ((packed)) mbox_sgl64;
+
+/**
+ * mbox_sgl32 - 32-bit scatter list for mailbox based controllers
+ * @address : address of the buffer
+ * @length : data transfer length
+ */
+typedef struct {
+ uint32_t address;
+ uint32_t length;
+} __attribute__ ((packed)) mbox_sgl32;
+
+#endif // _MRAID_MBOX_DEFS_H_
+
+/* vim: set ts=8 sw=8 tw=78: */
diff --git a/drivers/scsi/megaraid/mega_common.h b/drivers/scsi/megaraid/mega_common.h
new file mode 100644
index 000000000..1d037ed52
--- /dev/null
+++ b/drivers/scsi/megaraid/mega_common.h
@@ -0,0 +1,290 @@
+/*
+ *
+ * Linux MegaRAID device driver
+ *
+ * Copyright (c) 2003-2004 LSI Logic Corporation.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ *
+ * FILE : mega_common.h
+ *
+ * Libaray of common routine used by all low-level megaraid drivers
+ */
+
+#ifndef _MEGA_COMMON_H_
+#define _MEGA_COMMON_H_
+
+#include <linux/kernel.h>
+#include <linux/types.h>
+#include <linux/pci.h>
+#include <linux/spinlock.h>
+#include <linux/mutex.h>
+#include <linux/interrupt.h>
+#include <linux/delay.h>
+#include <linux/blkdev.h>
+#include <linux/list.h>
+#include <linux/moduleparam.h>
+#include <linux/dma-mapping.h>
+#include <scsi/scsi.h>
+#include <scsi/scsi_cmnd.h>
+#include <scsi/scsi_device.h>
+#include <scsi/scsi_host.h>
+
+
+#define LSI_MAX_CHANNELS 16
+#define LSI_MAX_LOGICAL_DRIVES_64LD (64+1)
+
+#define HBA_SIGNATURE_64_BIT 0x299
+#define PCI_CONF_AMISIG64 0xa4
+
+#define MEGA_SCSI_INQ_EVPD 1
+#define MEGA_INVALID_FIELD_IN_CDB 0x24
+
+
+/**
+ * scb_t - scsi command control block
+ * @ccb : command control block for individual driver
+ * @list : list of control blocks
+ * @gp : general purpose field for LLDs
+ * @sno : all SCBs have a serial number
+ * @scp : associated scsi command
+ * @state : current state of scb
+ * @dma_dir : direction of data transfer
+ * @dma_type : transfer with sg list, buffer, or no data transfer
+ * @dev_channel : actual channel on the device
+ * @dev_target : actual target on the device
+ * @status : completion status
+ *
+ * This is our central data structure to issue commands the each driver.
+ * Driver specific data structures are maintained in the ccb field.
+ * scb provides a field 'gp', which can be used by LLD for its own purposes
+ *
+ * dev_channel and dev_target must be initialized with the actual channel and
+ * target on the controller.
+ */
+typedef struct {
+ caddr_t ccb;
+ struct list_head list;
+ unsigned long gp;
+ unsigned int sno;
+ struct scsi_cmnd *scp;
+ uint32_t state;
+ uint32_t dma_direction;
+ uint32_t dma_type;
+ uint16_t dev_channel;
+ uint16_t dev_target;
+ uint32_t status;
+} scb_t;
+
+/*
+ * SCB states as it transitions from one state to another
+ */
+#define SCB_FREE 0x0000 /* on the free list */
+#define SCB_ACTIVE 0x0001 /* off the free list */
+#define SCB_PENDQ 0x0002 /* on the pending queue */
+#define SCB_ISSUED 0x0004 /* issued - owner f/w */
+#define SCB_ABORT 0x0008 /* Got an abort for this one */
+#define SCB_RESET 0x0010 /* Got a reset for this one */
+
+/*
+ * DMA types for scb
+ */
+#define MRAID_DMA_NONE 0x0000 /* no data transfer for this command */
+#define MRAID_DMA_WSG 0x0001 /* data transfer using a sg list */
+#define MRAID_DMA_WBUF 0x0002 /* data transfer using a contiguous buffer */
+
+
+/**
+ * struct adapter_t - driver's initialization structure
+ * @aram dpc_h : tasklet handle
+ * @pdev : pci configuration pointer for kernel
+ * @host : pointer to host structure of mid-layer
+ * @lock : synchronization lock for mid-layer and driver
+ * @quiescent : driver is quiescent for now.
+ * @outstanding_cmds : number of commands pending in the driver
+ * @kscb_list : pointer to the bulk of SCBs pointers for IO
+ * @kscb_pool : pool of free scbs for IO
+ * @kscb_pool_lock : lock for pool of free scbs
+ * @pend_list : pending commands list
+ * @pend_list_lock : exclusion lock for pending commands list
+ * @completed_list : list of completed commands
+ * @completed_list_lock : exclusion lock for list of completed commands
+ * @sglen : max sg elements supported
+ * @device_ids : to convert kernel device addr to our devices.
+ * @raid_device : raid adapter specific pointer
+ * @max_channel : maximum channel number supported - inclusive
+ * @max_target : max target supported - inclusive
+ * @max_lun : max lun supported - inclusive
+ * @unique_id : unique identifier for each adapter
+ * @irq : IRQ for this adapter
+ * @ito : internal timeout value, (-1) means no timeout
+ * @ibuf : buffer to issue internal commands
+ * @ibuf_dma_h : dma handle for the above buffer
+ * @uscb_list : SCB pointers for user cmds, common mgmt module
+ * @uscb_pool : pool of SCBs for user commands
+ * @uscb_pool_lock : exclusion lock for these SCBs
+ * @max_cmds : max outstanding commands
+ * @fw_version : firmware version
+ * @bios_version : bios version
+ * @max_cdb_sz : biggest CDB size supported.
+ * @ha : is high availability present - clustering
+ * @init_id : initiator ID, the default value should be 7
+ * @max_sectors : max sectors per request
+ * @cmd_per_lun : max outstanding commands per LUN
+ * @being_detached : set when unloading, no more mgmt calls
+ *
+ *
+ * mraid_setup_device_map() can be called anytime after the device map is
+ * available and MRAID_GET_DEVICE_MAP() can be called whenever the mapping is
+ * required, usually from LLD's queue entry point. The formar API sets up the
+ * MRAID_IS_LOGICAL(adapter_t *, struct scsi_cmnd *) to find out if the
+ * device in question is a logical drive.
+ *
+ * quiescent flag should be set by the driver if it is not accepting more
+ * commands
+ *
+ * NOTE: The fields of this structures are placed to minimize cache misses
+ */
+
+// amount of space required to store the bios and firmware version strings
+#define VERSION_SIZE 16
+
+typedef struct {
+ struct tasklet_struct dpc_h;
+ struct pci_dev *pdev;
+ struct Scsi_Host *host;
+ spinlock_t lock;
+ uint8_t quiescent;
+ int outstanding_cmds;
+ scb_t *kscb_list;
+ struct list_head kscb_pool;
+ spinlock_t kscb_pool_lock;
+ struct list_head pend_list;
+ spinlock_t pend_list_lock;
+ struct list_head completed_list;
+ spinlock_t completed_list_lock;
+ uint16_t sglen;
+ int device_ids[LSI_MAX_CHANNELS]
+ [LSI_MAX_LOGICAL_DRIVES_64LD];
+ caddr_t raid_device;
+ uint8_t max_channel;
+ uint16_t max_target;
+ uint8_t max_lun;
+
+ uint32_t unique_id;
+ int irq;
+ uint8_t ito;
+ caddr_t ibuf;
+ dma_addr_t ibuf_dma_h;
+ scb_t *uscb_list;
+ struct list_head uscb_pool;
+ spinlock_t uscb_pool_lock;
+ int max_cmds;
+ uint8_t fw_version[VERSION_SIZE];
+ uint8_t bios_version[VERSION_SIZE];
+ uint8_t max_cdb_sz;
+ uint8_t ha;
+ uint16_t init_id;
+ uint16_t max_sectors;
+ uint16_t cmd_per_lun;
+ atomic_t being_detached;
+} adapter_t;
+
+#define SCSI_FREE_LIST_LOCK(adapter) (&adapter->kscb_pool_lock)
+#define USER_FREE_LIST_LOCK(adapter) (&adapter->uscb_pool_lock)
+#define PENDING_LIST_LOCK(adapter) (&adapter->pend_list_lock)
+#define COMPLETED_LIST_LOCK(adapter) (&adapter->completed_list_lock)
+
+
+// conversion from scsi command
+#define SCP2HOST(scp) (scp)->device->host // to host
+#define SCP2HOSTDATA(scp) SCP2HOST(scp)->hostdata // to soft state
+#define SCP2CHANNEL(scp) (scp)->device->channel // to channel
+#define SCP2TARGET(scp) (scp)->device->id // to target
+#define SCP2LUN(scp) (u32)(scp)->device->lun // to LUN
+
+// generic macro to convert scsi command and host to controller's soft state
+#define SCSIHOST2ADAP(host) (((caddr_t *)(host->hostdata))[0])
+#define SCP2ADAPTER(scp) (adapter_t *)SCSIHOST2ADAP(SCP2HOST(scp))
+
+
+#define MRAID_IS_LOGICAL(adp, scp) \
+ (SCP2CHANNEL(scp) == (adp)->max_channel) ? 1 : 0
+
+#define MRAID_IS_LOGICAL_SDEV(adp, sdev) \
+ (sdev->channel == (adp)->max_channel) ? 1 : 0
+
+/**
+ * MRAID_GET_DEVICE_MAP - device ids
+ * @adp : adapter's soft state
+ * @scp : mid-layer scsi command pointer
+ * @p_chan : physical channel on the controller
+ * @target : target id of the device or logical drive number
+ * @islogical : set if the command is for the logical drive
+ *
+ * Macro to retrieve information about device class, logical or physical and
+ * the corresponding physical channel and target or logical drive number
+ */
+#define MRAID_GET_DEVICE_MAP(adp, scp, p_chan, target, islogical) \
+ /* \
+ * Is the request coming for the virtual channel \
+ */ \
+ islogical = MRAID_IS_LOGICAL(adp, scp); \
+ \
+ /* \
+ * Get an index into our table of drive ids mapping \
+ */ \
+ if (islogical) { \
+ p_chan = 0xFF; \
+ target = \
+ (adp)->device_ids[(adp)->max_channel][SCP2TARGET(scp)]; \
+ } \
+ else { \
+ p_chan = ((adp)->device_ids[SCP2CHANNEL(scp)] \
+ [SCP2TARGET(scp)] >> 8) & 0xFF; \
+ target = ((adp)->device_ids[SCP2CHANNEL(scp)] \
+ [SCP2TARGET(scp)] & 0xFF); \
+ }
+
+/*
+ * ### Helper routines ###
+ */
+#define LSI_DBGLVL mraid_debug_level // each LLD must define a global
+ // mraid_debug_level
+
+#ifdef DEBUG
+#if defined (_ASSERT_PANIC)
+#define ASSERT_ACTION panic
+#else
+#define ASSERT_ACTION printk
+#endif
+
+#define ASSERT(expression) \
+ if (!(expression)) { \
+ ASSERT_ACTION("assertion failed:(%s), file: %s, line: %d:%s\n", \
+ #expression, __FILE__, __LINE__, __func__); \
+ }
+#else
+#define ASSERT(expression)
+#endif
+
+/**
+ * struct mraid_pci_blk - structure holds DMA memory block info
+ * @vaddr : virtual address to a memory block
+ * @dma_addr : DMA handle to a memory block
+ *
+ * This structure is filled up for the caller. It is the responsibilty of the
+ * caller to allocate this array big enough to store addresses for all
+ * requested elements
+ */
+struct mraid_pci_blk {
+ caddr_t vaddr;
+ dma_addr_t dma_addr;
+};
+
+#endif // _MEGA_COMMON_H_
+
+// vim: set ts=8 sw=8 tw=78:
diff --git a/drivers/scsi/megaraid/megaraid_ioctl.h b/drivers/scsi/megaraid/megaraid_ioctl.h
new file mode 100644
index 000000000..05f6e4ec3
--- /dev/null
+++ b/drivers/scsi/megaraid/megaraid_ioctl.h
@@ -0,0 +1,300 @@
+/*
+ *
+ * Linux MegaRAID device driver
+ *
+ * Copyright (c) 2003-2004 LSI Logic Corporation.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ *
+ * FILE : megaraid_ioctl.h
+ *
+ * Definitions to interface with user level applications
+ */
+
+#ifndef _MEGARAID_IOCTL_H_
+#define _MEGARAID_IOCTL_H_
+
+#include <linux/types.h>
+#include <linux/semaphore.h>
+
+#include "mbox_defs.h"
+
+/*
+ * console messages debug levels
+ */
+#define CL_ANN 0 /* print unconditionally, announcements */
+#define CL_DLEVEL1 1 /* debug level 1, informative */
+#define CL_DLEVEL2 2 /* debug level 2, verbose */
+#define CL_DLEVEL3 3 /* debug level 3, very verbose */
+
+/**
+ * con_log() - console log routine
+ * @level : indicates the severity of the message.
+ * @fmt : format string
+ *
+ * con_log displays the error messages on the console based on the current
+ * debug level. Also it attaches the appropriate kernel severity level with
+ * the message.
+ */
+#define con_log(level, fmt) if (LSI_DBGLVL >= level) printk fmt;
+
+/*
+ * Definitions & Declarations needed to use common management module
+ */
+
+#define MEGAIOC_MAGIC 'm'
+#define MEGAIOCCMD _IOWR(MEGAIOC_MAGIC, 0, mimd_t)
+
+#define MEGAIOC_QNADAP 'm' /* Query # of adapters */
+#define MEGAIOC_QDRVRVER 'e' /* Query driver version */
+#define MEGAIOC_QADAPINFO 'g' /* Query adapter information */
+
+#define USCSICMD 0x80
+#define UIOC_RD 0x00001
+#define UIOC_WR 0x00002
+
+#define MBOX_CMD 0x00000
+#define GET_DRIVER_VER 0x10000
+#define GET_N_ADAP 0x20000
+#define GET_ADAP_INFO 0x30000
+#define GET_CAP 0x40000
+#define GET_STATS 0x50000
+#define GET_IOCTL_VERSION 0x01
+
+#define EXT_IOCTL_SIGN_SZ 16
+#define EXT_IOCTL_SIGN "$$_EXTD_IOCTL_$$"
+
+#define MBOX_LEGACY 0x00 /* ioctl has legacy mbox*/
+#define MBOX_HPE 0x01 /* ioctl has hpe mbox */
+
+#define APPTYPE_MIMD 0x00 /* old existing apps */
+#define APPTYPE_UIOC 0x01 /* new apps using uioc */
+
+#define IOCTL_ISSUE 0x00000001 /* Issue ioctl */
+#define IOCTL_ABORT 0x00000002 /* Abort previous ioctl */
+
+#define DRVRTYPE_MBOX 0x00000001 /* regular mbox driver */
+#define DRVRTYPE_HPE 0x00000002 /* new hpe driver */
+
+#define MKADAP(adapno) (MEGAIOC_MAGIC << 8 | (adapno) )
+#define GETADAP(mkadap) ((mkadap) ^ MEGAIOC_MAGIC << 8)
+
+#define MAX_DMA_POOLS 5 /* 4k, 8k, 16k, 32k, 64k*/
+
+
+/**
+ * struct uioc_t - the common ioctl packet structure
+ *
+ * @signature : Must be "$$_EXTD_IOCTL_$$"
+ * @mb_type : Type of the mail box (MB_LEGACY or MB_HPE)
+ * @app_type : Type of the issuing application (existing or new)
+ * @opcode : Opcode of the command
+ * @adapno : Adapter number
+ * @cmdbuf : Pointer to buffer - can point to mbox or plain data buffer
+ * @xferlen : xferlen for DCMD and non mailbox commands
+ * @data_dir : Direction of the data transfer
+ * @status : Status from the driver
+ * @reserved : reserved bytes for future expansion
+ *
+ * @user_data : user data transfer address is saved in this
+ * @user_data_len: length of the data buffer sent by user app
+ * @user_pthru : user passthru address is saves in this (null if DCMD)
+ * @pthru32 : kernel address passthru (allocated per kioc)
+ * @pthru32_h : physicall address of @pthru32
+ * @list : for kioc free pool list maintenance
+ * @done : call back routine for llds to call when kioc is completed
+ * @buf_vaddr : dma pool buffer attached to kioc for data transfer
+ * @buf_paddr : physical address of the dma pool buffer
+ * @pool_index : index of the dma pool that @buf_vaddr is taken from
+ * @free_buf : indicates if buffer needs to be freed after kioc completes
+ *
+ * Note : All LSI drivers understand only this packet. Any other
+ * : format sent by applications would be converted to this.
+ */
+typedef struct uioc {
+
+/* User Apps: */
+
+ uint8_t signature[EXT_IOCTL_SIGN_SZ];
+ uint16_t mb_type;
+ uint16_t app_type;
+ uint32_t opcode;
+ uint32_t adapno;
+ uint64_t cmdbuf;
+ uint32_t xferlen;
+ uint32_t data_dir;
+ int32_t status;
+ uint8_t reserved[128];
+
+/* Driver Data: */
+ void __user * user_data;
+ uint32_t user_data_len;
+
+ /* 64bit alignment */
+ uint32_t pad_for_64bit_align;
+
+ mraid_passthru_t __user *user_pthru;
+
+ mraid_passthru_t *pthru32;
+ dma_addr_t pthru32_h;
+
+ struct list_head list;
+ void (*done)(struct uioc*);
+
+ caddr_t buf_vaddr;
+ dma_addr_t buf_paddr;
+ int8_t pool_index;
+ uint8_t free_buf;
+
+ uint8_t timedout;
+
+} __attribute__ ((aligned(1024),packed)) uioc_t;
+
+
+/**
+ * struct mraid_hba_info - information about the controller
+ *
+ * @pci_vendor_id : PCI vendor id
+ * @pci_device_id : PCI device id
+ * @subsystem_vendor_id : PCI subsystem vendor id
+ * @subsystem_device_id : PCI subsystem device id
+ * @baseport : base port of hba memory
+ * @pci_bus : PCI bus
+ * @pci_dev_fn : PCI device/function values
+ * @irq : interrupt vector for the device
+ *
+ * Extended information of 256 bytes about the controller. Align on the single
+ * byte boundary so that 32-bit applications can be run on 64-bit platform
+ * drivers withoug re-compilation.
+ * NOTE: reduce the number of reserved bytes whenever new field are added, so
+ * that total size of the structure remains 256 bytes.
+ */
+typedef struct mraid_hba_info {
+
+ uint16_t pci_vendor_id;
+ uint16_t pci_device_id;
+ uint16_t subsys_vendor_id;
+ uint16_t subsys_device_id;
+
+ uint64_t baseport;
+ uint8_t pci_bus;
+ uint8_t pci_dev_fn;
+ uint8_t pci_slot;
+ uint8_t irq;
+
+ uint32_t unique_id;
+ uint32_t host_no;
+
+ uint8_t num_ldrv;
+} __attribute__ ((aligned(256), packed)) mraid_hba_info_t;
+
+
+/**
+ * mcontroller : adapter info structure for old mimd_t apps
+ *
+ * @base : base address
+ * @irq : irq number
+ * @numldrv : number of logical drives
+ * @pcibus : pci bus
+ * @pcidev : pci device
+ * @pcifun : pci function
+ * @pciid : pci id
+ * @pcivendor : vendor id
+ * @pcislot : slot number
+ * @uid : unique id
+ */
+typedef struct mcontroller {
+
+ uint64_t base;
+ uint8_t irq;
+ uint8_t numldrv;
+ uint8_t pcibus;
+ uint16_t pcidev;
+ uint8_t pcifun;
+ uint16_t pciid;
+ uint16_t pcivendor;
+ uint8_t pcislot;
+ uint32_t uid;
+
+} __attribute__ ((packed)) mcontroller_t;
+
+
+/**
+ * mm_dmapool_t : Represents one dma pool with just one buffer
+ *
+ * @vaddr : Virtual address
+ * @paddr : DMA physicall address
+ * @bufsize : In KB - 4 = 4k, 8 = 8k etc.
+ * @handle : Handle to the dma pool
+ * @lock : lock to synchronize access to the pool
+ * @in_use : If pool already in use, attach new block
+ */
+typedef struct mm_dmapool {
+ caddr_t vaddr;
+ dma_addr_t paddr;
+ uint32_t buf_size;
+ struct dma_pool *handle;
+ spinlock_t lock;
+ uint8_t in_use;
+} mm_dmapool_t;
+
+
+/**
+ * mraid_mmadp_t: Structure that drivers pass during (un)registration
+ *
+ * @unique_id : Any unique id (usually PCI bus+dev+fn)
+ * @drvr_type : megaraid or hpe (DRVRTYPE_MBOX or DRVRTYPE_HPE)
+ * @drv_data : Driver specific; not touched by the common module
+ * @timeout : timeout for issued kiocs
+ * @max_kioc : Maximum ioctl packets acceptable by the lld
+ * @pdev : pci dev; used for allocating dma'ble memory
+ * @issue_uioc : Driver supplied routine to issue uioc_t commands
+ * : issue_uioc(drvr_data, kioc, ISSUE/ABORT, uioc_done)
+ * @quiescent : flag to indicate if ioctl can be issued to this adp
+ * @list : attach with the global list of adapters
+ * @kioc_list : block of mem for @max_kioc number of kiocs
+ * @kioc_pool : pool of free kiocs
+ * @kioc_pool_lock : protection for free pool
+ * @kioc_semaphore : so as not to exceed @max_kioc parallel ioctls
+ * @mbox_list : block of mem for @max_kioc number of mboxes
+ * @pthru_dma_pool : DMA pool to allocate passthru packets
+ * @dma_pool_list : array of dma pools
+ */
+
+typedef struct mraid_mmadp {
+
+/* Filled by driver */
+
+ uint32_t unique_id;
+ uint32_t drvr_type;
+ unsigned long drvr_data;
+ uint16_t timeout;
+ uint8_t max_kioc;
+
+ struct pci_dev *pdev;
+
+ int(*issue_uioc)(unsigned long, uioc_t *, uint32_t);
+
+/* Maintained by common module */
+ uint32_t quiescent;
+
+ struct list_head list;
+ uioc_t *kioc_list;
+ struct list_head kioc_pool;
+ spinlock_t kioc_pool_lock;
+ struct semaphore kioc_semaphore;
+
+ mbox64_t *mbox_list;
+ struct dma_pool *pthru_dma_pool;
+ mm_dmapool_t dma_pool_list[MAX_DMA_POOLS];
+
+} mraid_mmadp_t;
+
+int mraid_mm_register_adp(mraid_mmadp_t *);
+int mraid_mm_unregister_adp(uint32_t);
+uint32_t mraid_mm_adapter_app_handle(uint32_t);
+
+#endif /* _MEGARAID_IOCTL_H_ */
diff --git a/drivers/scsi/megaraid/megaraid_mbox.c b/drivers/scsi/megaraid/megaraid_mbox.c
new file mode 100644
index 000000000..f0987f22e
--- /dev/null
+++ b/drivers/scsi/megaraid/megaraid_mbox.c
@@ -0,0 +1,4145 @@
+/*
+ *
+ * Linux MegaRAID device driver
+ *
+ * Copyright (c) 2003-2004 LSI Logic Corporation.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ *
+ * FILE : megaraid_mbox.c
+ * Version : v2.20.5.1 (Nov 16 2006)
+ *
+ * Authors:
+ * Atul Mukker <Atul.Mukker@lsi.com>
+ * Sreenivas Bagalkote <Sreenivas.Bagalkote@lsi.com>
+ * Manoj Jose <Manoj.Jose@lsi.com>
+ * Seokmann Ju
+ *
+ * List of supported controllers
+ *
+ * OEM Product Name VID DID SSVID SSID
+ * --- ------------ --- --- ---- ----
+ * Dell PERC3/QC 101E 1960 1028 0471
+ * Dell PERC3/DC 101E 1960 1028 0493
+ * Dell PERC3/SC 101E 1960 1028 0475
+ * Dell PERC3/Di 1028 1960 1028 0123
+ * Dell PERC4/SC 1000 1960 1028 0520
+ * Dell PERC4/DC 1000 1960 1028 0518
+ * Dell PERC4/QC 1000 0407 1028 0531
+ * Dell PERC4/Di 1028 000F 1028 014A
+ * Dell PERC 4e/Si 1028 0013 1028 016c
+ * Dell PERC 4e/Di 1028 0013 1028 016d
+ * Dell PERC 4e/Di 1028 0013 1028 016e
+ * Dell PERC 4e/Di 1028 0013 1028 016f
+ * Dell PERC 4e/Di 1028 0013 1028 0170
+ * Dell PERC 4e/DC 1000 0408 1028 0002
+ * Dell PERC 4e/SC 1000 0408 1028 0001
+ *
+ *
+ * LSI MegaRAID SCSI 320-0 1000 1960 1000 A520
+ * LSI MegaRAID SCSI 320-1 1000 1960 1000 0520
+ * LSI MegaRAID SCSI 320-2 1000 1960 1000 0518
+ * LSI MegaRAID SCSI 320-0X 1000 0407 1000 0530
+ * LSI MegaRAID SCSI 320-2X 1000 0407 1000 0532
+ * LSI MegaRAID SCSI 320-4X 1000 0407 1000 0531
+ * LSI MegaRAID SCSI 320-1E 1000 0408 1000 0001
+ * LSI MegaRAID SCSI 320-2E 1000 0408 1000 0002
+ * LSI MegaRAID SATA 150-4 1000 1960 1000 4523
+ * LSI MegaRAID SATA 150-6 1000 1960 1000 0523
+ * LSI MegaRAID SATA 300-4X 1000 0409 1000 3004
+ * LSI MegaRAID SATA 300-8X 1000 0409 1000 3008
+ *
+ * INTEL RAID Controller SRCU42X 1000 0407 8086 0532
+ * INTEL RAID Controller SRCS16 1000 1960 8086 0523
+ * INTEL RAID Controller SRCU42E 1000 0408 8086 0002
+ * INTEL RAID Controller SRCZCRX 1000 0407 8086 0530
+ * INTEL RAID Controller SRCS28X 1000 0409 8086 3008
+ * INTEL RAID Controller SROMBU42E 1000 0408 8086 3431
+ * INTEL RAID Controller SROMBU42E 1000 0408 8086 3499
+ * INTEL RAID Controller SRCU51L 1000 1960 8086 0520
+ *
+ * FSC MegaRAID PCI Express ROMB 1000 0408 1734 1065
+ *
+ * ACER MegaRAID ROMB-2E 1000 0408 1025 004D
+ *
+ * NEC MegaRAID PCI Express ROMB 1000 0408 1033 8287
+ *
+ * For history of changes, see Documentation/scsi/ChangeLog.megaraid
+ */
+
+#include <linux/slab.h>
+#include <linux/module.h>
+#include "megaraid_mbox.h"
+
+static int megaraid_init(void);
+static void megaraid_exit(void);
+
+static int megaraid_probe_one(struct pci_dev*, const struct pci_device_id *);
+static void megaraid_detach_one(struct pci_dev *);
+static void megaraid_mbox_shutdown(struct pci_dev *);
+
+static int megaraid_io_attach(adapter_t *);
+static void megaraid_io_detach(adapter_t *);
+
+static int megaraid_init_mbox(adapter_t *);
+static void megaraid_fini_mbox(adapter_t *);
+
+static int megaraid_alloc_cmd_packets(adapter_t *);
+static void megaraid_free_cmd_packets(adapter_t *);
+
+static int megaraid_mbox_setup_dma_pools(adapter_t *);
+static void megaraid_mbox_teardown_dma_pools(adapter_t *);
+
+static int megaraid_sysfs_alloc_resources(adapter_t *);
+static void megaraid_sysfs_free_resources(adapter_t *);
+
+static int megaraid_abort_handler(struct scsi_cmnd *);
+static int megaraid_reset_handler(struct scsi_cmnd *);
+
+static int mbox_post_sync_cmd(adapter_t *, uint8_t []);
+static int mbox_post_sync_cmd_fast(adapter_t *, uint8_t []);
+static int megaraid_busywait_mbox(mraid_device_t *);
+static int megaraid_mbox_product_info(adapter_t *);
+static int megaraid_mbox_extended_cdb(adapter_t *);
+static int megaraid_mbox_support_ha(adapter_t *, uint16_t *);
+static int megaraid_mbox_support_random_del(adapter_t *);
+static int megaraid_mbox_get_max_sg(adapter_t *);
+static void megaraid_mbox_enum_raid_scsi(adapter_t *);
+static void megaraid_mbox_flush_cache(adapter_t *);
+static int megaraid_mbox_fire_sync_cmd(adapter_t *);
+
+static void megaraid_mbox_display_scb(adapter_t *, scb_t *);
+static void megaraid_mbox_setup_device_map(adapter_t *);
+
+static int megaraid_queue_command(struct Scsi_Host *, struct scsi_cmnd *);
+static scb_t *megaraid_mbox_build_cmd(adapter_t *, struct scsi_cmnd *, int *);
+static void megaraid_mbox_runpendq(adapter_t *, scb_t *);
+static void megaraid_mbox_prepare_pthru(adapter_t *, scb_t *,
+ struct scsi_cmnd *);
+static void megaraid_mbox_prepare_epthru(adapter_t *, scb_t *,
+ struct scsi_cmnd *);
+
+static irqreturn_t megaraid_isr(int, void *);
+
+static void megaraid_mbox_dpc(unsigned long);
+
+static ssize_t megaraid_sysfs_show_app_hndl(struct device *, struct device_attribute *attr, char *);
+static ssize_t megaraid_sysfs_show_ldnum(struct device *, struct device_attribute *attr, char *);
+
+static int megaraid_cmm_register(adapter_t *);
+static int megaraid_cmm_unregister(adapter_t *);
+static int megaraid_mbox_mm_handler(unsigned long, uioc_t *, uint32_t);
+static int megaraid_mbox_mm_command(adapter_t *, uioc_t *);
+static void megaraid_mbox_mm_done(adapter_t *, scb_t *);
+static int gather_hbainfo(adapter_t *, mraid_hba_info_t *);
+static int wait_till_fw_empty(adapter_t *);
+
+
+
+MODULE_AUTHOR("megaraidlinux@lsi.com");
+MODULE_DESCRIPTION("LSI Logic MegaRAID Mailbox Driver");
+MODULE_LICENSE("GPL");
+MODULE_VERSION(MEGARAID_VERSION);
+
+/*
+ * ### modules parameters for driver ###
+ */
+
+/*
+ * Set to enable driver to expose unconfigured disk to kernel
+ */
+static int megaraid_expose_unconf_disks = 0;
+module_param_named(unconf_disks, megaraid_expose_unconf_disks, int, 0);
+MODULE_PARM_DESC(unconf_disks,
+ "Set to expose unconfigured disks to kernel (default=0)");
+
+/*
+ * driver wait time if the adapter's mailbox is busy
+ */
+static unsigned int max_mbox_busy_wait = MBOX_BUSY_WAIT;
+module_param_named(busy_wait, max_mbox_busy_wait, int, 0);
+MODULE_PARM_DESC(busy_wait,
+ "Max wait for mailbox in microseconds if busy (default=10)");
+
+/*
+ * number of sectors per IO command
+ */
+static unsigned int megaraid_max_sectors = MBOX_MAX_SECTORS;
+module_param_named(max_sectors, megaraid_max_sectors, int, 0);
+MODULE_PARM_DESC(max_sectors,
+ "Maximum number of sectors per IO command (default=128)");
+
+/*
+ * number of commands per logical unit
+ */
+static unsigned int megaraid_cmd_per_lun = MBOX_DEF_CMD_PER_LUN;
+module_param_named(cmd_per_lun, megaraid_cmd_per_lun, int, 0);
+MODULE_PARM_DESC(cmd_per_lun,
+ "Maximum number of commands per logical unit (default=64)");
+
+
+/*
+ * Fast driver load option, skip scanning for physical devices during load.
+ * This would result in non-disk devices being skipped during driver load
+ * time. These can be later added though, using /proc/scsi/scsi
+ */
+static unsigned int megaraid_fast_load = 0;
+module_param_named(fast_load, megaraid_fast_load, int, 0);
+MODULE_PARM_DESC(fast_load,
+ "Faster loading of the driver, skips physical devices! (default=0)");
+
+
+/*
+ * mraid_debug level - threshold for amount of information to be displayed by
+ * the driver. This level can be changed through modules parameters, ioctl or
+ * sysfs/proc interface. By default, print the announcement messages only.
+ */
+int mraid_debug_level = CL_ANN;
+module_param_named(debug_level, mraid_debug_level, int, 0);
+MODULE_PARM_DESC(debug_level, "Debug level for driver (default=0)");
+
+/*
+ * ### global data ###
+ */
+static uint8_t megaraid_mbox_version[8] =
+ { 0x02, 0x20, 0x04, 0x06, 3, 7, 20, 5 };
+
+
+/*
+ * PCI table for all supported controllers.
+ */
+static struct pci_device_id pci_id_table_g[] = {
+ {
+ PCI_VENDOR_ID_DELL,
+ PCI_DEVICE_ID_PERC4_DI_DISCOVERY,
+ PCI_VENDOR_ID_DELL,
+ PCI_SUBSYS_ID_PERC4_DI_DISCOVERY,
+ },
+ {
+ PCI_VENDOR_ID_LSI_LOGIC,
+ PCI_DEVICE_ID_PERC4_SC,
+ PCI_VENDOR_ID_DELL,
+ PCI_SUBSYS_ID_PERC4_SC,
+ },
+ {
+ PCI_VENDOR_ID_LSI_LOGIC,
+ PCI_DEVICE_ID_PERC4_DC,
+ PCI_VENDOR_ID_DELL,
+ PCI_SUBSYS_ID_PERC4_DC,
+ },
+ {
+ PCI_VENDOR_ID_LSI_LOGIC,
+ PCI_DEVICE_ID_VERDE,
+ PCI_ANY_ID,
+ PCI_ANY_ID,
+ },
+ {
+ PCI_VENDOR_ID_DELL,
+ PCI_DEVICE_ID_PERC4_DI_EVERGLADES,
+ PCI_VENDOR_ID_DELL,
+ PCI_SUBSYS_ID_PERC4_DI_EVERGLADES,
+ },
+ {
+ PCI_VENDOR_ID_DELL,
+ PCI_DEVICE_ID_PERC4E_SI_BIGBEND,
+ PCI_VENDOR_ID_DELL,
+ PCI_SUBSYS_ID_PERC4E_SI_BIGBEND,
+ },
+ {
+ PCI_VENDOR_ID_DELL,
+ PCI_DEVICE_ID_PERC4E_DI_KOBUK,
+ PCI_VENDOR_ID_DELL,
+ PCI_SUBSYS_ID_PERC4E_DI_KOBUK,
+ },
+ {
+ PCI_VENDOR_ID_DELL,
+ PCI_DEVICE_ID_PERC4E_DI_CORVETTE,
+ PCI_VENDOR_ID_DELL,
+ PCI_SUBSYS_ID_PERC4E_DI_CORVETTE,
+ },
+ {
+ PCI_VENDOR_ID_DELL,
+ PCI_DEVICE_ID_PERC4E_DI_EXPEDITION,
+ PCI_VENDOR_ID_DELL,
+ PCI_SUBSYS_ID_PERC4E_DI_EXPEDITION,
+ },
+ {
+ PCI_VENDOR_ID_DELL,
+ PCI_DEVICE_ID_PERC4E_DI_GUADALUPE,
+ PCI_VENDOR_ID_DELL,
+ PCI_SUBSYS_ID_PERC4E_DI_GUADALUPE,
+ },
+ {
+ PCI_VENDOR_ID_LSI_LOGIC,
+ PCI_DEVICE_ID_DOBSON,
+ PCI_ANY_ID,
+ PCI_ANY_ID,
+ },
+ {
+ PCI_VENDOR_ID_AMI,
+ PCI_DEVICE_ID_AMI_MEGARAID3,
+ PCI_ANY_ID,
+ PCI_ANY_ID,
+ },
+ {
+ PCI_VENDOR_ID_LSI_LOGIC,
+ PCI_DEVICE_ID_AMI_MEGARAID3,
+ PCI_ANY_ID,
+ PCI_ANY_ID,
+ },
+ {
+ PCI_VENDOR_ID_LSI_LOGIC,
+ PCI_DEVICE_ID_LINDSAY,
+ PCI_ANY_ID,
+ PCI_ANY_ID,
+ },
+ {0} /* Terminating entry */
+};
+MODULE_DEVICE_TABLE(pci, pci_id_table_g);
+
+
+static struct pci_driver megaraid_pci_driver = {
+ .name = "megaraid",
+ .id_table = pci_id_table_g,
+ .probe = megaraid_probe_one,
+ .remove = megaraid_detach_one,
+ .shutdown = megaraid_mbox_shutdown,
+};
+
+
+
+// definitions for the device attributes for exporting logical drive number
+// for a scsi address (Host, Channel, Id, Lun)
+
+DEVICE_ATTR(megaraid_mbox_app_hndl, S_IRUSR, megaraid_sysfs_show_app_hndl,
+ NULL);
+
+// Host template initializer for megaraid mbox sysfs device attributes
+static struct device_attribute *megaraid_shost_attrs[] = {
+ &dev_attr_megaraid_mbox_app_hndl,
+ NULL,
+};
+
+
+DEVICE_ATTR(megaraid_mbox_ld, S_IRUSR, megaraid_sysfs_show_ldnum, NULL);
+
+// Host template initializer for megaraid mbox sysfs device attributes
+static struct device_attribute *megaraid_sdev_attrs[] = {
+ &dev_attr_megaraid_mbox_ld,
+ NULL,
+};
+
+/*
+ * Scsi host template for megaraid unified driver
+ */
+static struct scsi_host_template megaraid_template_g = {
+ .module = THIS_MODULE,
+ .name = "LSI Logic MegaRAID driver",
+ .proc_name = "megaraid",
+ .queuecommand = megaraid_queue_command,
+ .eh_abort_handler = megaraid_abort_handler,
+ .eh_device_reset_handler = megaraid_reset_handler,
+ .eh_bus_reset_handler = megaraid_reset_handler,
+ .eh_host_reset_handler = megaraid_reset_handler,
+ .change_queue_depth = scsi_change_queue_depth,
+ .use_clustering = ENABLE_CLUSTERING,
+ .no_write_same = 1,
+ .sdev_attrs = megaraid_sdev_attrs,
+ .shost_attrs = megaraid_shost_attrs,
+};
+
+
+/**
+ * megaraid_init - module load hook
+ *
+ * We register ourselves as hotplug enabled module and let PCI subsystem
+ * discover our adapters.
+ */
+static int __init
+megaraid_init(void)
+{
+ int rval;
+
+ // Announce the driver version
+ con_log(CL_ANN, (KERN_INFO "megaraid: %s %s\n", MEGARAID_VERSION,
+ MEGARAID_EXT_VERSION));
+
+ // check validity of module parameters
+ if (megaraid_cmd_per_lun > MBOX_MAX_SCSI_CMDS) {
+
+ con_log(CL_ANN, (KERN_WARNING
+ "megaraid mailbox: max commands per lun reset to %d\n",
+ MBOX_MAX_SCSI_CMDS));
+
+ megaraid_cmd_per_lun = MBOX_MAX_SCSI_CMDS;
+ }
+
+
+ // register as a PCI hot-plug driver module
+ rval = pci_register_driver(&megaraid_pci_driver);
+ if (rval < 0) {
+ con_log(CL_ANN, (KERN_WARNING
+ "megaraid: could not register hotplug support.\n"));
+ }
+
+ return rval;
+}
+
+
+/**
+ * megaraid_exit - driver unload entry point
+ *
+ * We simply unwrap the megaraid_init routine here.
+ */
+static void __exit
+megaraid_exit(void)
+{
+ con_log(CL_DLEVEL1, (KERN_NOTICE "megaraid: unloading framework\n"));
+
+ // unregister as PCI hotplug driver
+ pci_unregister_driver(&megaraid_pci_driver);
+
+ return;
+}
+
+
+/**
+ * megaraid_probe_one - PCI hotplug entry point
+ * @pdev : handle to this controller's PCI configuration space
+ * @id : pci device id of the class of controllers
+ *
+ * This routine should be called whenever a new adapter is detected by the
+ * PCI hotplug susbsystem.
+ */
+static int
+megaraid_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
+{
+ adapter_t *adapter;
+
+
+ // detected a new controller
+ con_log(CL_ANN, (KERN_INFO
+ "megaraid: probe new device %#4.04x:%#4.04x:%#4.04x:%#4.04x: ",
+ pdev->vendor, pdev->device, pdev->subsystem_vendor,
+ pdev->subsystem_device));
+
+ con_log(CL_ANN, ("bus %d:slot %d:func %d\n", pdev->bus->number,
+ PCI_SLOT(pdev->devfn), PCI_FUNC(pdev->devfn)));
+
+ if (pci_enable_device(pdev)) {
+ con_log(CL_ANN, (KERN_WARNING
+ "megaraid: pci_enable_device failed\n"));
+
+ return -ENODEV;
+ }
+
+ // Enable bus-mastering on this controller
+ pci_set_master(pdev);
+
+ // Allocate the per driver initialization structure
+ adapter = kzalloc(sizeof(adapter_t), GFP_KERNEL);
+
+ if (adapter == NULL) {
+ con_log(CL_ANN, (KERN_WARNING
+ "megaraid: out of memory, %s %d.\n", __func__, __LINE__));
+
+ goto out_probe_one;
+ }
+
+
+ // set up PCI related soft state and other pre-known parameters
+ adapter->unique_id = pdev->bus->number << 8 | pdev->devfn;
+ adapter->irq = pdev->irq;
+ adapter->pdev = pdev;
+
+ atomic_set(&adapter->being_detached, 0);
+
+ // Setup the default DMA mask. This would be changed later on
+ // depending on hardware capabilities
+ if (pci_set_dma_mask(adapter->pdev, DMA_BIT_MASK(32)) != 0) {
+
+ con_log(CL_ANN, (KERN_WARNING
+ "megaraid: pci_set_dma_mask failed:%d\n", __LINE__));
+
+ goto out_free_adapter;
+ }
+
+
+ // Initialize the synchronization lock for kernel and LLD
+ spin_lock_init(&adapter->lock);
+
+ // Initialize the command queues: the list of free SCBs and the list
+ // of pending SCBs.
+ INIT_LIST_HEAD(&adapter->kscb_pool);
+ spin_lock_init(SCSI_FREE_LIST_LOCK(adapter));
+
+ INIT_LIST_HEAD(&adapter->pend_list);
+ spin_lock_init(PENDING_LIST_LOCK(adapter));
+
+ INIT_LIST_HEAD(&adapter->completed_list);
+ spin_lock_init(COMPLETED_LIST_LOCK(adapter));
+
+
+ // Start the mailbox based controller
+ if (megaraid_init_mbox(adapter) != 0) {
+ con_log(CL_ANN, (KERN_WARNING
+ "megaraid: maibox adapter did not initialize\n"));
+
+ goto out_free_adapter;
+ }
+
+ // Register with LSI Common Management Module
+ if (megaraid_cmm_register(adapter) != 0) {
+
+ con_log(CL_ANN, (KERN_WARNING
+ "megaraid: could not register with management module\n"));
+
+ goto out_fini_mbox;
+ }
+
+ // setup adapter handle in PCI soft state
+ pci_set_drvdata(pdev, adapter);
+
+ // attach with scsi mid-layer
+ if (megaraid_io_attach(adapter) != 0) {
+
+ con_log(CL_ANN, (KERN_WARNING "megaraid: io attach failed\n"));
+
+ goto out_cmm_unreg;
+ }
+
+ return 0;
+
+out_cmm_unreg:
+ megaraid_cmm_unregister(adapter);
+out_fini_mbox:
+ megaraid_fini_mbox(adapter);
+out_free_adapter:
+ kfree(adapter);
+out_probe_one:
+ pci_disable_device(pdev);
+
+ return -ENODEV;
+}
+
+
+/**
+ * megaraid_detach_one - release framework resources and call LLD release routine
+ * @pdev : handle for our PCI configuration space
+ *
+ * This routine is called during driver unload. We free all the allocated
+ * resources and call the corresponding LLD so that it can also release all
+ * its resources.
+ *
+ * This routine is also called from the PCI hotplug system.
+ */
+static void
+megaraid_detach_one(struct pci_dev *pdev)
+{
+ adapter_t *adapter;
+ struct Scsi_Host *host;
+
+
+ // Start a rollback on this adapter
+ adapter = pci_get_drvdata(pdev);
+
+ if (!adapter) {
+ con_log(CL_ANN, (KERN_CRIT
+ "megaraid: Invalid detach on %#4.04x:%#4.04x:%#4.04x:%#4.04x\n",
+ pdev->vendor, pdev->device, pdev->subsystem_vendor,
+ pdev->subsystem_device));
+
+ return;
+ }
+ else {
+ con_log(CL_ANN, (KERN_NOTICE
+ "megaraid: detaching device %#4.04x:%#4.04x:%#4.04x:%#4.04x\n",
+ pdev->vendor, pdev->device, pdev->subsystem_vendor,
+ pdev->subsystem_device));
+ }
+
+
+ host = adapter->host;
+
+ // do not allow any more requests from the management module for this
+ // adapter.
+ // FIXME: How do we account for the request which might still be
+ // pending with us?
+ atomic_set(&adapter->being_detached, 1);
+
+ // detach from the IO sub-system
+ megaraid_io_detach(adapter);
+
+ // Unregister from common management module
+ //
+ // FIXME: this must return success or failure for conditions if there
+ // is a command pending with LLD or not.
+ megaraid_cmm_unregister(adapter);
+
+ // finalize the mailbox based controller and release all resources
+ megaraid_fini_mbox(adapter);
+
+ kfree(adapter);
+
+ scsi_host_put(host);
+
+ pci_disable_device(pdev);
+
+ return;
+}
+
+
+/**
+ * megaraid_mbox_shutdown - PCI shutdown for megaraid HBA
+ * @pdev : generic driver model device
+ *
+ * Shutdown notification, perform flush cache.
+ */
+static void
+megaraid_mbox_shutdown(struct pci_dev *pdev)
+{
+ adapter_t *adapter = pci_get_drvdata(pdev);
+ static int counter;
+
+ if (!adapter) {
+ con_log(CL_ANN, (KERN_WARNING
+ "megaraid: null device in shutdown\n"));
+ return;
+ }
+
+ // flush caches now
+ con_log(CL_ANN, (KERN_INFO "megaraid: flushing adapter %d...",
+ counter++));
+
+ megaraid_mbox_flush_cache(adapter);
+
+ con_log(CL_ANN, ("done\n"));
+}
+
+
+/**
+ * megaraid_io_attach - attach a device with the IO subsystem
+ * @adapter : controller's soft state
+ *
+ * Attach this device with the IO subsystem.
+ */
+static int
+megaraid_io_attach(adapter_t *adapter)
+{
+ struct Scsi_Host *host;
+
+ // Initialize SCSI Host structure
+ host = scsi_host_alloc(&megaraid_template_g, 8);
+ if (!host) {
+ con_log(CL_ANN, (KERN_WARNING
+ "megaraid mbox: scsi_register failed\n"));
+
+ return -1;
+ }
+
+ SCSIHOST2ADAP(host) = (caddr_t)adapter;
+ adapter->host = host;
+
+ host->irq = adapter->irq;
+ host->unique_id = adapter->unique_id;
+ host->can_queue = adapter->max_cmds;
+ host->this_id = adapter->init_id;
+ host->sg_tablesize = adapter->sglen;
+ host->max_sectors = adapter->max_sectors;
+ host->cmd_per_lun = adapter->cmd_per_lun;
+ host->max_channel = adapter->max_channel;
+ host->max_id = adapter->max_target;
+ host->max_lun = adapter->max_lun;
+
+
+ // notify mid-layer about the new controller
+ if (scsi_add_host(host, &adapter->pdev->dev)) {
+
+ con_log(CL_ANN, (KERN_WARNING
+ "megaraid mbox: scsi_add_host failed\n"));
+
+ scsi_host_put(host);
+
+ return -1;
+ }
+
+ scsi_scan_host(host);
+
+ return 0;
+}
+
+
+/**
+ * megaraid_io_detach - detach a device from the IO subsystem
+ * @adapter : controller's soft state
+ *
+ * Detach this device from the IO subsystem.
+ */
+static void
+megaraid_io_detach(adapter_t *adapter)
+{
+ struct Scsi_Host *host;
+
+ con_log(CL_DLEVEL1, (KERN_INFO "megaraid: io detach\n"));
+
+ host = adapter->host;
+
+ scsi_remove_host(host);
+
+ return;
+}
+
+
+/*
+ * START: Mailbox Low Level Driver
+ *
+ * This is section specific to the single mailbox based controllers
+ */
+
+/**
+ * megaraid_init_mbox - initialize controller
+ * @adapter : our soft state
+ *
+ * - Allocate 16-byte aligned mailbox memory for firmware handshake
+ * - Allocate controller's memory resources
+ * - Find out all initialization data
+ * - Allocate memory required for all the commands
+ * - Use internal library of FW routines, build up complete soft state
+ */
+static int
+megaraid_init_mbox(adapter_t *adapter)
+{
+ struct pci_dev *pdev;
+ mraid_device_t *raid_dev;
+ int i;
+ uint32_t magic64;
+
+
+ adapter->ito = MBOX_TIMEOUT;
+ pdev = adapter->pdev;
+
+ /*
+ * Allocate and initialize the init data structure for mailbox
+ * controllers
+ */
+ raid_dev = kzalloc(sizeof(mraid_device_t), GFP_KERNEL);
+ if (raid_dev == NULL) return -1;
+
+
+ /*
+ * Attach the adapter soft state to raid device soft state
+ */
+ adapter->raid_device = (caddr_t)raid_dev;
+ raid_dev->fast_load = megaraid_fast_load;
+
+
+ // our baseport
+ raid_dev->baseport = pci_resource_start(pdev, 0);
+
+ if (pci_request_regions(pdev, "MegaRAID: LSI Logic Corporation") != 0) {
+
+ con_log(CL_ANN, (KERN_WARNING
+ "megaraid: mem region busy\n"));
+
+ goto out_free_raid_dev;
+ }
+
+ raid_dev->baseaddr = ioremap_nocache(raid_dev->baseport, 128);
+
+ if (!raid_dev->baseaddr) {
+
+ con_log(CL_ANN, (KERN_WARNING
+ "megaraid: could not map hba memory\n") );
+
+ goto out_release_regions;
+ }
+
+ /* initialize the mutual exclusion lock for the mailbox */
+ spin_lock_init(&raid_dev->mailbox_lock);
+
+ /* allocate memory required for commands */
+ if (megaraid_alloc_cmd_packets(adapter) != 0)
+ goto out_iounmap;
+
+ /*
+ * Issue SYNC cmd to flush the pending cmds in the adapter
+ * and initialize its internal state
+ */
+
+ if (megaraid_mbox_fire_sync_cmd(adapter))
+ con_log(CL_ANN, ("megaraid: sync cmd failed\n"));
+
+ /*
+ * Setup the rest of the soft state using the library of
+ * FW routines
+ */
+
+ /* request IRQ and register the interrupt service routine */
+ if (request_irq(adapter->irq, megaraid_isr, IRQF_SHARED, "megaraid",
+ adapter)) {
+
+ con_log(CL_ANN, (KERN_WARNING
+ "megaraid: Couldn't register IRQ %d!\n", adapter->irq));
+ goto out_alloc_cmds;
+
+ }
+
+ // Product info
+ if (megaraid_mbox_product_info(adapter) != 0)
+ goto out_free_irq;
+
+ // Do we support extended CDBs
+ adapter->max_cdb_sz = 10;
+ if (megaraid_mbox_extended_cdb(adapter) == 0) {
+ adapter->max_cdb_sz = 16;
+ }
+
+ /*
+ * Do we support cluster environment, if we do, what is the initiator
+ * id.
+ * NOTE: In a non-cluster aware firmware environment, the LLD should
+ * return 7 as initiator id.
+ */
+ adapter->ha = 0;
+ adapter->init_id = -1;
+ if (megaraid_mbox_support_ha(adapter, &adapter->init_id) == 0) {
+ adapter->ha = 1;
+ }
+
+ /*
+ * Prepare the device ids array to have the mapping between the kernel
+ * device address and megaraid device address.
+ * We export the physical devices on their actual addresses. The
+ * logical drives are exported on a virtual SCSI channel
+ */
+ megaraid_mbox_setup_device_map(adapter);
+
+ // If the firmware supports random deletion, update the device id map
+ if (megaraid_mbox_support_random_del(adapter)) {
+
+ // Change the logical drives numbers in device_ids array one
+ // slot in device_ids is reserved for target id, that's why
+ // "<=" below
+ for (i = 0; i <= MAX_LOGICAL_DRIVES_40LD; i++) {
+ adapter->device_ids[adapter->max_channel][i] += 0x80;
+ }
+ adapter->device_ids[adapter->max_channel][adapter->init_id] =
+ 0xFF;
+
+ raid_dev->random_del_supported = 1;
+ }
+
+ /*
+ * find out the maximum number of scatter-gather elements supported by
+ * this firmware
+ */
+ adapter->sglen = megaraid_mbox_get_max_sg(adapter);
+
+ // enumerate RAID and SCSI channels so that all devices on SCSI
+ // channels can later be exported, including disk devices
+ megaraid_mbox_enum_raid_scsi(adapter);
+
+ /*
+ * Other parameters required by upper layer
+ *
+ * maximum number of sectors per IO command
+ */
+ adapter->max_sectors = megaraid_max_sectors;
+
+ /*
+ * number of queued commands per LUN.
+ */
+ adapter->cmd_per_lun = megaraid_cmd_per_lun;
+
+ /*
+ * Allocate resources required to issue FW calls, when sysfs is
+ * accessed
+ */
+ if (megaraid_sysfs_alloc_resources(adapter) != 0)
+ goto out_free_irq;
+
+ // Set the DMA mask to 64-bit. All supported controllers as capable of
+ // DMA in this range
+ pci_read_config_dword(adapter->pdev, PCI_CONF_AMISIG64, &magic64);
+
+ if (((magic64 == HBA_SIGNATURE_64_BIT) &&
+ ((adapter->pdev->subsystem_device !=
+ PCI_SUBSYS_ID_MEGARAID_SATA_150_6) &&
+ (adapter->pdev->subsystem_device !=
+ PCI_SUBSYS_ID_MEGARAID_SATA_150_4))) ||
+ (adapter->pdev->vendor == PCI_VENDOR_ID_LSI_LOGIC &&
+ adapter->pdev->device == PCI_DEVICE_ID_VERDE) ||
+ (adapter->pdev->vendor == PCI_VENDOR_ID_LSI_LOGIC &&
+ adapter->pdev->device == PCI_DEVICE_ID_DOBSON) ||
+ (adapter->pdev->vendor == PCI_VENDOR_ID_LSI_LOGIC &&
+ adapter->pdev->device == PCI_DEVICE_ID_LINDSAY) ||
+ (adapter->pdev->vendor == PCI_VENDOR_ID_DELL &&
+ adapter->pdev->device == PCI_DEVICE_ID_PERC4_DI_EVERGLADES) ||
+ (adapter->pdev->vendor == PCI_VENDOR_ID_DELL &&
+ adapter->pdev->device == PCI_DEVICE_ID_PERC4E_DI_KOBUK)) {
+ if (pci_set_dma_mask(adapter->pdev, DMA_BIT_MASK(64))) {
+ con_log(CL_ANN, (KERN_WARNING
+ "megaraid: DMA mask for 64-bit failed\n"));
+
+ if (pci_set_dma_mask (adapter->pdev, DMA_BIT_MASK(32))) {
+ con_log(CL_ANN, (KERN_WARNING
+ "megaraid: 32-bit DMA mask failed\n"));
+ goto out_free_sysfs_res;
+ }
+ }
+ }
+
+ // setup tasklet for DPC
+ tasklet_init(&adapter->dpc_h, megaraid_mbox_dpc,
+ (unsigned long)adapter);
+
+ con_log(CL_DLEVEL1, (KERN_INFO
+ "megaraid mbox hba successfully initialized\n"));
+
+ return 0;
+
+out_free_sysfs_res:
+ megaraid_sysfs_free_resources(adapter);
+out_free_irq:
+ free_irq(adapter->irq, adapter);
+out_alloc_cmds:
+ megaraid_free_cmd_packets(adapter);
+out_iounmap:
+ iounmap(raid_dev->baseaddr);
+out_release_regions:
+ pci_release_regions(pdev);
+out_free_raid_dev:
+ kfree(raid_dev);
+
+ return -1;
+}
+
+
+/**
+ * megaraid_fini_mbox - undo controller initialization
+ * @adapter : our soft state
+ */
+static void
+megaraid_fini_mbox(adapter_t *adapter)
+{
+ mraid_device_t *raid_dev = ADAP2RAIDDEV(adapter);
+
+ // flush all caches
+ megaraid_mbox_flush_cache(adapter);
+
+ tasklet_kill(&adapter->dpc_h);
+
+ megaraid_sysfs_free_resources(adapter);
+
+ megaraid_free_cmd_packets(adapter);
+
+ free_irq(adapter->irq, adapter);
+
+ iounmap(raid_dev->baseaddr);
+
+ pci_release_regions(adapter->pdev);
+
+ kfree(raid_dev);
+
+ return;
+}
+
+
+/**
+ * megaraid_alloc_cmd_packets - allocate shared mailbox
+ * @adapter : soft state of the raid controller
+ *
+ * Allocate and align the shared mailbox. This maibox is used to issue
+ * all the commands. For IO based controllers, the mailbox is also registered
+ * with the FW. Allocate memory for all commands as well.
+ * This is our big allocator.
+ */
+static int
+megaraid_alloc_cmd_packets(adapter_t *adapter)
+{
+ mraid_device_t *raid_dev = ADAP2RAIDDEV(adapter);
+ struct pci_dev *pdev;
+ unsigned long align;
+ scb_t *scb;
+ mbox_ccb_t *ccb;
+ struct mraid_pci_blk *epthru_pci_blk;
+ struct mraid_pci_blk *sg_pci_blk;
+ struct mraid_pci_blk *mbox_pci_blk;
+ int i;
+
+ pdev = adapter->pdev;
+
+ /*
+ * Setup the mailbox
+ * Allocate the common 16-byte aligned memory for the handshake
+ * mailbox.
+ */
+ raid_dev->una_mbox64 = pci_zalloc_consistent(adapter->pdev,
+ sizeof(mbox64_t),
+ &raid_dev->una_mbox64_dma);
+
+ if (!raid_dev->una_mbox64) {
+ con_log(CL_ANN, (KERN_WARNING
+ "megaraid: out of memory, %s %d\n", __func__,
+ __LINE__));
+ return -1;
+ }
+
+ /*
+ * Align the mailbox at 16-byte boundary
+ */
+ raid_dev->mbox = &raid_dev->una_mbox64->mbox32;
+
+ raid_dev->mbox = (mbox_t *)((((unsigned long)raid_dev->mbox) + 15) &
+ (~0UL ^ 0xFUL));
+
+ raid_dev->mbox64 = (mbox64_t *)(((unsigned long)raid_dev->mbox) - 8);
+
+ align = ((void *)raid_dev->mbox -
+ ((void *)&raid_dev->una_mbox64->mbox32));
+
+ raid_dev->mbox_dma = (unsigned long)raid_dev->una_mbox64_dma + 8 +
+ align;
+
+ // Allocate memory for commands issued internally
+ adapter->ibuf = pci_zalloc_consistent(pdev, MBOX_IBUF_SIZE,
+ &adapter->ibuf_dma_h);
+ if (!adapter->ibuf) {
+
+ con_log(CL_ANN, (KERN_WARNING
+ "megaraid: out of memory, %s %d\n", __func__,
+ __LINE__));
+
+ goto out_free_common_mbox;
+ }
+
+ // Allocate memory for our SCSI Command Blocks and their associated
+ // memory
+
+ /*
+ * Allocate memory for the base list of scb. Later allocate memory for
+ * CCBs and embedded components of each CCB and point the pointers in
+ * scb to the allocated components
+ * NOTE: The code to allocate SCB will be duplicated in all the LLD
+ * since the calling routine does not yet know the number of available
+ * commands.
+ */
+ adapter->kscb_list = kcalloc(MBOX_MAX_SCSI_CMDS, sizeof(scb_t), GFP_KERNEL);
+
+ if (adapter->kscb_list == NULL) {
+ con_log(CL_ANN, (KERN_WARNING
+ "megaraid: out of memory, %s %d\n", __func__,
+ __LINE__));
+ goto out_free_ibuf;
+ }
+
+ // memory allocation for our command packets
+ if (megaraid_mbox_setup_dma_pools(adapter) != 0) {
+ con_log(CL_ANN, (KERN_WARNING
+ "megaraid: out of memory, %s %d\n", __func__,
+ __LINE__));
+ goto out_free_scb_list;
+ }
+
+ // Adjust the scb pointers and link in the free pool
+ epthru_pci_blk = raid_dev->epthru_pool;
+ sg_pci_blk = raid_dev->sg_pool;
+ mbox_pci_blk = raid_dev->mbox_pool;
+
+ for (i = 0; i < MBOX_MAX_SCSI_CMDS; i++) {
+ scb = adapter->kscb_list + i;
+ ccb = raid_dev->ccb_list + i;
+
+ ccb->mbox = (mbox_t *)(mbox_pci_blk[i].vaddr + 16);
+ ccb->raw_mbox = (uint8_t *)ccb->mbox;
+ ccb->mbox64 = (mbox64_t *)(mbox_pci_blk[i].vaddr + 8);
+ ccb->mbox_dma_h = (unsigned long)mbox_pci_blk[i].dma_addr + 16;
+
+ // make sure the mailbox is aligned properly
+ if (ccb->mbox_dma_h & 0x0F) {
+ con_log(CL_ANN, (KERN_CRIT
+ "megaraid mbox: not aligned on 16-bytes\n"));
+
+ goto out_teardown_dma_pools;
+ }
+
+ ccb->epthru = (mraid_epassthru_t *)
+ epthru_pci_blk[i].vaddr;
+ ccb->epthru_dma_h = epthru_pci_blk[i].dma_addr;
+ ccb->pthru = (mraid_passthru_t *)ccb->epthru;
+ ccb->pthru_dma_h = ccb->epthru_dma_h;
+
+
+ ccb->sgl64 = (mbox_sgl64 *)sg_pci_blk[i].vaddr;
+ ccb->sgl_dma_h = sg_pci_blk[i].dma_addr;
+ ccb->sgl32 = (mbox_sgl32 *)ccb->sgl64;
+
+ scb->ccb = (caddr_t)ccb;
+ scb->gp = 0;
+
+ scb->sno = i; // command index
+
+ scb->scp = NULL;
+ scb->state = SCB_FREE;
+ scb->dma_direction = PCI_DMA_NONE;
+ scb->dma_type = MRAID_DMA_NONE;
+ scb->dev_channel = -1;
+ scb->dev_target = -1;
+
+ // put scb in the free pool
+ list_add_tail(&scb->list, &adapter->kscb_pool);
+ }
+
+ return 0;
+
+out_teardown_dma_pools:
+ megaraid_mbox_teardown_dma_pools(adapter);
+out_free_scb_list:
+ kfree(adapter->kscb_list);
+out_free_ibuf:
+ pci_free_consistent(pdev, MBOX_IBUF_SIZE, (void *)adapter->ibuf,
+ adapter->ibuf_dma_h);
+out_free_common_mbox:
+ pci_free_consistent(adapter->pdev, sizeof(mbox64_t),
+ (caddr_t)raid_dev->una_mbox64, raid_dev->una_mbox64_dma);
+
+ return -1;
+}
+
+
+/**
+ * megaraid_free_cmd_packets - free memory
+ * @adapter : soft state of the raid controller
+ *
+ * Release memory resources allocated for commands.
+ */
+static void
+megaraid_free_cmd_packets(adapter_t *adapter)
+{
+ mraid_device_t *raid_dev = ADAP2RAIDDEV(adapter);
+
+ megaraid_mbox_teardown_dma_pools(adapter);
+
+ kfree(adapter->kscb_list);
+
+ pci_free_consistent(adapter->pdev, MBOX_IBUF_SIZE,
+ (void *)adapter->ibuf, adapter->ibuf_dma_h);
+
+ pci_free_consistent(adapter->pdev, sizeof(mbox64_t),
+ (caddr_t)raid_dev->una_mbox64, raid_dev->una_mbox64_dma);
+ return;
+}
+
+
+/**
+ * megaraid_mbox_setup_dma_pools - setup dma pool for command packets
+ * @adapter : HBA soft state
+ *
+ * Setup the dma pools for mailbox, passthru and extended passthru structures,
+ * and scatter-gather lists.
+ */
+static int
+megaraid_mbox_setup_dma_pools(adapter_t *adapter)
+{
+ mraid_device_t *raid_dev = ADAP2RAIDDEV(adapter);
+ struct mraid_pci_blk *epthru_pci_blk;
+ struct mraid_pci_blk *sg_pci_blk;
+ struct mraid_pci_blk *mbox_pci_blk;
+ int i;
+
+
+
+ // Allocate memory for 16-bytes aligned mailboxes
+ raid_dev->mbox_pool_handle = pci_pool_create("megaraid mbox pool",
+ adapter->pdev,
+ sizeof(mbox64_t) + 16,
+ 16, 0);
+
+ if (raid_dev->mbox_pool_handle == NULL) {
+ goto fail_setup_dma_pool;
+ }
+
+ mbox_pci_blk = raid_dev->mbox_pool;
+ for (i = 0; i < MBOX_MAX_SCSI_CMDS; i++) {
+ mbox_pci_blk[i].vaddr = pci_pool_alloc(
+ raid_dev->mbox_pool_handle,
+ GFP_KERNEL,
+ &mbox_pci_blk[i].dma_addr);
+ if (!mbox_pci_blk[i].vaddr) {
+ goto fail_setup_dma_pool;
+ }
+ }
+
+ /*
+ * Allocate memory for each embedded passthru strucuture pointer
+ * Request for a 128 bytes aligned structure for each passthru command
+ * structure
+ * Since passthru and extended passthru commands are exclusive, they
+ * share common memory pool. Passthru structures piggyback on memory
+ * allocted to extended passthru since passthru is smaller of the two
+ */
+ raid_dev->epthru_pool_handle = pci_pool_create("megaraid mbox pthru",
+ adapter->pdev, sizeof(mraid_epassthru_t), 128, 0);
+
+ if (raid_dev->epthru_pool_handle == NULL) {
+ goto fail_setup_dma_pool;
+ }
+
+ epthru_pci_blk = raid_dev->epthru_pool;
+ for (i = 0; i < MBOX_MAX_SCSI_CMDS; i++) {
+ epthru_pci_blk[i].vaddr = pci_pool_alloc(
+ raid_dev->epthru_pool_handle,
+ GFP_KERNEL,
+ &epthru_pci_blk[i].dma_addr);
+ if (!epthru_pci_blk[i].vaddr) {
+ goto fail_setup_dma_pool;
+ }
+ }
+
+
+ // Allocate memory for each scatter-gather list. Request for 512 bytes
+ // alignment for each sg list
+ raid_dev->sg_pool_handle = pci_pool_create("megaraid mbox sg",
+ adapter->pdev,
+ sizeof(mbox_sgl64) * MBOX_MAX_SG_SIZE,
+ 512, 0);
+
+ if (raid_dev->sg_pool_handle == NULL) {
+ goto fail_setup_dma_pool;
+ }
+
+ sg_pci_blk = raid_dev->sg_pool;
+ for (i = 0; i < MBOX_MAX_SCSI_CMDS; i++) {
+ sg_pci_blk[i].vaddr = pci_pool_alloc(
+ raid_dev->sg_pool_handle,
+ GFP_KERNEL,
+ &sg_pci_blk[i].dma_addr);
+ if (!sg_pci_blk[i].vaddr) {
+ goto fail_setup_dma_pool;
+ }
+ }
+
+ return 0;
+
+fail_setup_dma_pool:
+ megaraid_mbox_teardown_dma_pools(adapter);
+ return -1;
+}
+
+
+/**
+ * megaraid_mbox_teardown_dma_pools - teardown dma pools for command packets
+ * @adapter : HBA soft state
+ *
+ * Teardown the dma pool for mailbox, passthru and extended passthru
+ * structures, and scatter-gather lists.
+ */
+static void
+megaraid_mbox_teardown_dma_pools(adapter_t *adapter)
+{
+ mraid_device_t *raid_dev = ADAP2RAIDDEV(adapter);
+ struct mraid_pci_blk *epthru_pci_blk;
+ struct mraid_pci_blk *sg_pci_blk;
+ struct mraid_pci_blk *mbox_pci_blk;
+ int i;
+
+
+ sg_pci_blk = raid_dev->sg_pool;
+ for (i = 0; i < MBOX_MAX_SCSI_CMDS && sg_pci_blk[i].vaddr; i++) {
+ pci_pool_free(raid_dev->sg_pool_handle, sg_pci_blk[i].vaddr,
+ sg_pci_blk[i].dma_addr);
+ }
+ if (raid_dev->sg_pool_handle)
+ pci_pool_destroy(raid_dev->sg_pool_handle);
+
+
+ epthru_pci_blk = raid_dev->epthru_pool;
+ for (i = 0; i < MBOX_MAX_SCSI_CMDS && epthru_pci_blk[i].vaddr; i++) {
+ pci_pool_free(raid_dev->epthru_pool_handle,
+ epthru_pci_blk[i].vaddr, epthru_pci_blk[i].dma_addr);
+ }
+ if (raid_dev->epthru_pool_handle)
+ pci_pool_destroy(raid_dev->epthru_pool_handle);
+
+
+ mbox_pci_blk = raid_dev->mbox_pool;
+ for (i = 0; i < MBOX_MAX_SCSI_CMDS && mbox_pci_blk[i].vaddr; i++) {
+ pci_pool_free(raid_dev->mbox_pool_handle,
+ mbox_pci_blk[i].vaddr, mbox_pci_blk[i].dma_addr);
+ }
+ if (raid_dev->mbox_pool_handle)
+ pci_pool_destroy(raid_dev->mbox_pool_handle);
+
+ return;
+}
+
+
+/**
+ * megaraid_alloc_scb - detach and return a scb from the free list
+ * @adapter : controller's soft state
+ * @scp : pointer to the scsi command to be executed
+ *
+ * Return the scb from the head of the free list. %NULL if there are none
+ * available.
+ */
+static scb_t *
+megaraid_alloc_scb(adapter_t *adapter, struct scsi_cmnd *scp)
+{
+ struct list_head *head = &adapter->kscb_pool;
+ scb_t *scb = NULL;
+ unsigned long flags;
+
+ // detach scb from free pool
+ spin_lock_irqsave(SCSI_FREE_LIST_LOCK(adapter), flags);
+
+ if (list_empty(head)) {
+ spin_unlock_irqrestore(SCSI_FREE_LIST_LOCK(adapter), flags);
+ return NULL;
+ }
+
+ scb = list_entry(head->next, scb_t, list);
+ list_del_init(&scb->list);
+
+ spin_unlock_irqrestore(SCSI_FREE_LIST_LOCK(adapter), flags);
+
+ scb->state = SCB_ACTIVE;
+ scb->scp = scp;
+ scb->dma_type = MRAID_DMA_NONE;
+
+ return scb;
+}
+
+
+/**
+ * megaraid_dealloc_scb - return the scb to the free pool
+ * @adapter : controller's soft state
+ * @scb : scb to be freed
+ *
+ * Return the scb back to the free list of scbs. The caller must 'flush' the
+ * SCB before calling us. E.g., performing pci_unamp and/or pci_sync etc.
+ * NOTE NOTE: Make sure the scb is not on any list before calling this
+ * routine.
+ */
+static inline void
+megaraid_dealloc_scb(adapter_t *adapter, scb_t *scb)
+{
+ unsigned long flags;
+
+ // put scb in the free pool
+ scb->state = SCB_FREE;
+ scb->scp = NULL;
+ spin_lock_irqsave(SCSI_FREE_LIST_LOCK(adapter), flags);
+
+ list_add(&scb->list, &adapter->kscb_pool);
+
+ spin_unlock_irqrestore(SCSI_FREE_LIST_LOCK(adapter), flags);
+
+ return;
+}
+
+
+/**
+ * megaraid_mbox_mksgl - make the scatter-gather list
+ * @adapter : controller's soft state
+ * @scb : scsi control block
+ *
+ * Prepare the scatter-gather list.
+ */
+static int
+megaraid_mbox_mksgl(adapter_t *adapter, scb_t *scb)
+{
+ struct scatterlist *sgl;
+ mbox_ccb_t *ccb;
+ struct scsi_cmnd *scp;
+ int sgcnt;
+ int i;
+
+
+ scp = scb->scp;
+ ccb = (mbox_ccb_t *)scb->ccb;
+
+ sgcnt = scsi_dma_map(scp);
+ BUG_ON(sgcnt < 0 || sgcnt > adapter->sglen);
+
+ // no mapping required if no data to be transferred
+ if (!sgcnt)
+ return 0;
+
+ scb->dma_type = MRAID_DMA_WSG;
+
+ scsi_for_each_sg(scp, sgl, sgcnt, i) {
+ ccb->sgl64[i].address = sg_dma_address(sgl);
+ ccb->sgl64[i].length = sg_dma_len(sgl);
+ }
+
+ // Return count of SG nodes
+ return sgcnt;
+}
+
+
+/**
+ * mbox_post_cmd - issue a mailbox command
+ * @adapter : controller's soft state
+ * @scb : command to be issued
+ *
+ * Post the command to the controller if mailbox is available.
+ */
+static int
+mbox_post_cmd(adapter_t *adapter, scb_t *scb)
+{
+ mraid_device_t *raid_dev = ADAP2RAIDDEV(adapter);
+ mbox64_t *mbox64;
+ mbox_t *mbox;
+ mbox_ccb_t *ccb;
+ unsigned long flags;
+ unsigned int i = 0;
+
+
+ ccb = (mbox_ccb_t *)scb->ccb;
+ mbox = raid_dev->mbox;
+ mbox64 = raid_dev->mbox64;
+
+ /*
+ * Check for busy mailbox. If it is, return failure - the caller
+ * should retry later.
+ */
+ spin_lock_irqsave(MAILBOX_LOCK(raid_dev), flags);
+
+ if (unlikely(mbox->busy)) {
+ do {
+ udelay(1);
+ i++;
+ rmb();
+ } while(mbox->busy && (i < max_mbox_busy_wait));
+
+ if (mbox->busy) {
+
+ spin_unlock_irqrestore(MAILBOX_LOCK(raid_dev), flags);
+
+ return -1;
+ }
+ }
+
+
+ // Copy this command's mailbox data into "adapter's" mailbox
+ memcpy((caddr_t)mbox64, (caddr_t)ccb->mbox64, 22);
+ mbox->cmdid = scb->sno;
+
+ adapter->outstanding_cmds++;
+
+ if (scb->dma_direction == PCI_DMA_TODEVICE)
+ pci_dma_sync_sg_for_device(adapter->pdev,
+ scsi_sglist(scb->scp),
+ scsi_sg_count(scb->scp),
+ PCI_DMA_TODEVICE);
+
+ mbox->busy = 1; // Set busy
+ mbox->poll = 0;
+ mbox->ack = 0;
+ wmb();
+
+ WRINDOOR(raid_dev, raid_dev->mbox_dma | 0x1);
+
+ spin_unlock_irqrestore(MAILBOX_LOCK(raid_dev), flags);
+
+ return 0;
+}
+
+
+/**
+ * megaraid_queue_command - generic queue entry point for all LLDs
+ * @scp : pointer to the scsi command to be executed
+ * @done : callback routine to be called after the cmd has be completed
+ *
+ * Queue entry point for mailbox based controllers.
+ */
+static int
+megaraid_queue_command_lck(struct scsi_cmnd *scp, void (*done)(struct scsi_cmnd *))
+{
+ adapter_t *adapter;
+ scb_t *scb;
+ int if_busy;
+
+ adapter = SCP2ADAPTER(scp);
+ scp->scsi_done = done;
+ scp->result = 0;
+
+ /*
+ * Allocate and build a SCB request
+ * if_busy flag will be set if megaraid_mbox_build_cmd() command could
+ * not allocate scb. We will return non-zero status in that case.
+ * NOTE: scb can be null even though certain commands completed
+ * successfully, e.g., MODE_SENSE and TEST_UNIT_READY, it would
+ * return 0 in that case, and we would do the callback right away.
+ */
+ if_busy = 0;
+ scb = megaraid_mbox_build_cmd(adapter, scp, &if_busy);
+ if (!scb) { // command already completed
+ done(scp);
+ return 0;
+ }
+
+ megaraid_mbox_runpendq(adapter, scb);
+ return if_busy;
+}
+
+static DEF_SCSI_QCMD(megaraid_queue_command)
+
+/**
+ * megaraid_mbox_build_cmd - transform the mid-layer scsi commands
+ * @adapter : controller's soft state
+ * @scp : mid-layer scsi command pointer
+ * @busy : set if request could not be completed because of lack of
+ * resources
+ *
+ * Transform the mid-layer scsi command to megaraid firmware lingua.
+ * Convert the command issued by mid-layer to format understood by megaraid
+ * firmware. We also complete certain commands without sending them to firmware.
+ */
+static scb_t *
+megaraid_mbox_build_cmd(adapter_t *adapter, struct scsi_cmnd *scp, int *busy)
+{
+ mraid_device_t *rdev = ADAP2RAIDDEV(adapter);
+ int channel;
+ int target;
+ int islogical;
+ mbox_ccb_t *ccb;
+ mraid_passthru_t *pthru;
+ mbox64_t *mbox64;
+ mbox_t *mbox;
+ scb_t *scb;
+ char skip[] = "skipping";
+ char scan[] = "scanning";
+ char *ss;
+
+
+ /*
+ * Get the appropriate device map for the device this command is
+ * intended for
+ */
+ MRAID_GET_DEVICE_MAP(adapter, scp, channel, target, islogical);
+
+ /*
+ * Logical drive commands
+ */
+ if (islogical) {
+ switch (scp->cmnd[0]) {
+ case TEST_UNIT_READY:
+ /*
+ * Do we support clustering and is the support enabled
+ * If no, return success always
+ */
+ if (!adapter->ha) {
+ scp->result = (DID_OK << 16);
+ return NULL;
+ }
+
+ if (!(scb = megaraid_alloc_scb(adapter, scp))) {
+ scp->result = (DID_ERROR << 16);
+ *busy = 1;
+ return NULL;
+ }
+
+ scb->dma_direction = scp->sc_data_direction;
+ scb->dev_channel = 0xFF;
+ scb->dev_target = target;
+ ccb = (mbox_ccb_t *)scb->ccb;
+
+ /*
+ * The command id will be provided by the command
+ * issuance routine
+ */
+ ccb->raw_mbox[0] = CLUSTER_CMD;
+ ccb->raw_mbox[2] = RESERVATION_STATUS;
+ ccb->raw_mbox[3] = target;
+
+ return scb;
+
+ case MODE_SENSE:
+ {
+ struct scatterlist *sgl;
+ caddr_t vaddr;
+
+ sgl = scsi_sglist(scp);
+ if (sg_page(sgl)) {
+ vaddr = (caddr_t) sg_virt(&sgl[0]);
+
+ memset(vaddr, 0, scp->cmnd[4]);
+ }
+ else {
+ con_log(CL_ANN, (KERN_WARNING
+ "megaraid mailbox: invalid sg:%d\n",
+ __LINE__));
+ }
+ }
+ scp->result = (DID_OK << 16);
+ return NULL;
+
+ case INQUIRY:
+ /*
+ * Display the channel scan for logical drives
+ * Do not display scan for a channel if already done.
+ */
+ if (!(rdev->last_disp & (1L << SCP2CHANNEL(scp)))) {
+
+ con_log(CL_ANN, (KERN_INFO
+ "scsi[%d]: scanning scsi channel %d",
+ adapter->host->host_no,
+ SCP2CHANNEL(scp)));
+
+ con_log(CL_ANN, (
+ " [virtual] for logical drives\n"));
+
+ rdev->last_disp |= (1L << SCP2CHANNEL(scp));
+ }
+
+ if (scp->cmnd[1] & MEGA_SCSI_INQ_EVPD) {
+ scp->sense_buffer[0] = 0x70;
+ scp->sense_buffer[2] = ILLEGAL_REQUEST;
+ scp->sense_buffer[12] = MEGA_INVALID_FIELD_IN_CDB;
+ scp->result = CHECK_CONDITION << 1;
+ return NULL;
+ }
+
+ /* Fall through */
+
+ case READ_CAPACITY:
+ /*
+ * Do not allow LUN > 0 for logical drives and
+ * requests for more than 40 logical drives
+ */
+ if (SCP2LUN(scp)) {
+ scp->result = (DID_BAD_TARGET << 16);
+ return NULL;
+ }
+ if ((target % 0x80) >= MAX_LOGICAL_DRIVES_40LD) {
+ scp->result = (DID_BAD_TARGET << 16);
+ return NULL;
+ }
+
+
+ /* Allocate a SCB and initialize passthru */
+ if (!(scb = megaraid_alloc_scb(adapter, scp))) {
+ scp->result = (DID_ERROR << 16);
+ *busy = 1;
+ return NULL;
+ }
+
+ ccb = (mbox_ccb_t *)scb->ccb;
+ scb->dev_channel = 0xFF;
+ scb->dev_target = target;
+ pthru = ccb->pthru;
+ mbox = ccb->mbox;
+ mbox64 = ccb->mbox64;
+
+ pthru->timeout = 0;
+ pthru->ars = 1;
+ pthru->reqsenselen = 14;
+ pthru->islogical = 1;
+ pthru->logdrv = target;
+ pthru->cdblen = scp->cmd_len;
+ memcpy(pthru->cdb, scp->cmnd, scp->cmd_len);
+
+ mbox->cmd = MBOXCMD_PASSTHRU64;
+ scb->dma_direction = scp->sc_data_direction;
+
+ pthru->dataxferlen = scsi_bufflen(scp);
+ pthru->dataxferaddr = ccb->sgl_dma_h;
+ pthru->numsge = megaraid_mbox_mksgl(adapter,
+ scb);
+
+ mbox->xferaddr = 0xFFFFFFFF;
+ mbox64->xferaddr_lo = (uint32_t )ccb->pthru_dma_h;
+ mbox64->xferaddr_hi = 0;
+
+ return scb;
+
+ case READ_6:
+ case WRITE_6:
+ case READ_10:
+ case WRITE_10:
+ case READ_12:
+ case WRITE_12:
+
+ /*
+ * Allocate a SCB and initialize mailbox
+ */
+ if (!(scb = megaraid_alloc_scb(adapter, scp))) {
+ scp->result = (DID_ERROR << 16);
+ *busy = 1;
+ return NULL;
+ }
+ ccb = (mbox_ccb_t *)scb->ccb;
+ scb->dev_channel = 0xFF;
+ scb->dev_target = target;
+ mbox = ccb->mbox;
+ mbox64 = ccb->mbox64;
+ mbox->logdrv = target;
+
+ /*
+ * A little HACK: 2nd bit is zero for all scsi read
+ * commands and is set for all scsi write commands
+ */
+ mbox->cmd = (scp->cmnd[0] & 0x02) ? MBOXCMD_LWRITE64:
+ MBOXCMD_LREAD64 ;
+
+ /*
+ * 6-byte READ(0x08) or WRITE(0x0A) cdb
+ */
+ if (scp->cmd_len == 6) {
+ mbox->numsectors = (uint32_t)scp->cmnd[4];
+ mbox->lba =
+ ((uint32_t)scp->cmnd[1] << 16) |
+ ((uint32_t)scp->cmnd[2] << 8) |
+ (uint32_t)scp->cmnd[3];
+
+ mbox->lba &= 0x1FFFFF;
+ }
+
+ /*
+ * 10-byte READ(0x28) or WRITE(0x2A) cdb
+ */
+ else if (scp->cmd_len == 10) {
+ mbox->numsectors =
+ (uint32_t)scp->cmnd[8] |
+ ((uint32_t)scp->cmnd[7] << 8);
+ mbox->lba =
+ ((uint32_t)scp->cmnd[2] << 24) |
+ ((uint32_t)scp->cmnd[3] << 16) |
+ ((uint32_t)scp->cmnd[4] << 8) |
+ (uint32_t)scp->cmnd[5];
+ }
+
+ /*
+ * 12-byte READ(0xA8) or WRITE(0xAA) cdb
+ */
+ else if (scp->cmd_len == 12) {
+ mbox->lba =
+ ((uint32_t)scp->cmnd[2] << 24) |
+ ((uint32_t)scp->cmnd[3] << 16) |
+ ((uint32_t)scp->cmnd[4] << 8) |
+ (uint32_t)scp->cmnd[5];
+
+ mbox->numsectors =
+ ((uint32_t)scp->cmnd[6] << 24) |
+ ((uint32_t)scp->cmnd[7] << 16) |
+ ((uint32_t)scp->cmnd[8] << 8) |
+ (uint32_t)scp->cmnd[9];
+ }
+ else {
+ con_log(CL_ANN, (KERN_WARNING
+ "megaraid: unsupported CDB length\n"));
+
+ megaraid_dealloc_scb(adapter, scb);
+
+ scp->result = (DID_ERROR << 16);
+ return NULL;
+ }
+
+ scb->dma_direction = scp->sc_data_direction;
+
+ // Calculate Scatter-Gather info
+ mbox64->xferaddr_lo = (uint32_t )ccb->sgl_dma_h;
+ mbox->numsge = megaraid_mbox_mksgl(adapter,
+ scb);
+ mbox->xferaddr = 0xFFFFFFFF;
+ mbox64->xferaddr_hi = 0;
+
+ return scb;
+
+ case RESERVE:
+ case RELEASE:
+ /*
+ * Do we support clustering and is the support enabled
+ */
+ if (!adapter->ha) {
+ scp->result = (DID_BAD_TARGET << 16);
+ return NULL;
+ }
+
+ /*
+ * Allocate a SCB and initialize mailbox
+ */
+ if (!(scb = megaraid_alloc_scb(adapter, scp))) {
+ scp->result = (DID_ERROR << 16);
+ *busy = 1;
+ return NULL;
+ }
+
+ ccb = (mbox_ccb_t *)scb->ccb;
+ scb->dev_channel = 0xFF;
+ scb->dev_target = target;
+ ccb->raw_mbox[0] = CLUSTER_CMD;
+ ccb->raw_mbox[2] = (scp->cmnd[0] == RESERVE) ?
+ RESERVE_LD : RELEASE_LD;
+
+ ccb->raw_mbox[3] = target;
+ scb->dma_direction = scp->sc_data_direction;
+
+ return scb;
+
+ default:
+ scp->result = (DID_BAD_TARGET << 16);
+ return NULL;
+ }
+ }
+ else { // Passthru device commands
+
+ // Do not allow access to target id > 15 or LUN > 7
+ if (target > 15 || SCP2LUN(scp) > 7) {
+ scp->result = (DID_BAD_TARGET << 16);
+ return NULL;
+ }
+
+ // if fast load option was set and scan for last device is
+ // over, reset the fast_load flag so that during a possible
+ // next scan, devices can be made available
+ if (rdev->fast_load && (target == 15) &&
+ (SCP2CHANNEL(scp) == adapter->max_channel -1)) {
+
+ con_log(CL_ANN, (KERN_INFO
+ "megaraid[%d]: physical device scan re-enabled\n",
+ adapter->host->host_no));
+ rdev->fast_load = 0;
+ }
+
+ /*
+ * Display the channel scan for physical devices
+ */
+ if (!(rdev->last_disp & (1L << SCP2CHANNEL(scp)))) {
+
+ ss = rdev->fast_load ? skip : scan;
+
+ con_log(CL_ANN, (KERN_INFO
+ "scsi[%d]: %s scsi channel %d [Phy %d]",
+ adapter->host->host_no, ss, SCP2CHANNEL(scp),
+ channel));
+
+ con_log(CL_ANN, (
+ " for non-raid devices\n"));
+
+ rdev->last_disp |= (1L << SCP2CHANNEL(scp));
+ }
+
+ // disable channel sweep if fast load option given
+ if (rdev->fast_load) {
+ scp->result = (DID_BAD_TARGET << 16);
+ return NULL;
+ }
+
+ // Allocate a SCB and initialize passthru
+ if (!(scb = megaraid_alloc_scb(adapter, scp))) {
+ scp->result = (DID_ERROR << 16);
+ *busy = 1;
+ return NULL;
+ }
+
+ ccb = (mbox_ccb_t *)scb->ccb;
+ scb->dev_channel = channel;
+ scb->dev_target = target;
+ scb->dma_direction = scp->sc_data_direction;
+ mbox = ccb->mbox;
+ mbox64 = ccb->mbox64;
+
+ // Does this firmware support extended CDBs
+ if (adapter->max_cdb_sz == 16) {
+ mbox->cmd = MBOXCMD_EXTPTHRU;
+
+ megaraid_mbox_prepare_epthru(adapter, scb, scp);
+
+ mbox64->xferaddr_lo = (uint32_t)ccb->epthru_dma_h;
+ mbox64->xferaddr_hi = 0;
+ mbox->xferaddr = 0xFFFFFFFF;
+ }
+ else {
+ mbox->cmd = MBOXCMD_PASSTHRU64;
+
+ megaraid_mbox_prepare_pthru(adapter, scb, scp);
+
+ mbox64->xferaddr_lo = (uint32_t)ccb->pthru_dma_h;
+ mbox64->xferaddr_hi = 0;
+ mbox->xferaddr = 0xFFFFFFFF;
+ }
+ return scb;
+ }
+
+ // NOT REACHED
+}
+
+
+/**
+ * megaraid_mbox_runpendq - execute commands queued in the pending queue
+ * @adapter : controller's soft state
+ * @scb_q : SCB to be queued in the pending list
+ *
+ * Scan the pending list for commands which are not yet issued and try to
+ * post to the controller. The SCB can be a null pointer, which would indicate
+ * no SCB to be queue, just try to execute the ones in the pending list.
+ *
+ * NOTE: We do not actually traverse the pending list. The SCBs are plucked
+ * out from the head of the pending list. If it is successfully issued, the
+ * next SCB is at the head now.
+ */
+static void
+megaraid_mbox_runpendq(adapter_t *adapter, scb_t *scb_q)
+{
+ scb_t *scb;
+ unsigned long flags;
+
+ spin_lock_irqsave(PENDING_LIST_LOCK(adapter), flags);
+
+ if (scb_q) {
+ scb_q->state = SCB_PENDQ;
+ list_add_tail(&scb_q->list, &adapter->pend_list);
+ }
+
+ // if the adapter in not in quiescent mode, post the commands to FW
+ if (adapter->quiescent) {
+ spin_unlock_irqrestore(PENDING_LIST_LOCK(adapter), flags);
+ return;
+ }
+
+ while (!list_empty(&adapter->pend_list)) {
+
+ assert_spin_locked(PENDING_LIST_LOCK(adapter));
+
+ scb = list_entry(adapter->pend_list.next, scb_t, list);
+
+ // remove the scb from the pending list and try to
+ // issue. If we are unable to issue it, put back in
+ // the pending list and return
+
+ list_del_init(&scb->list);
+
+ spin_unlock_irqrestore(PENDING_LIST_LOCK(adapter), flags);
+
+ // if mailbox was busy, return SCB back to pending
+ // list. Make sure to add at the head, since that's
+ // where it would have been removed from
+
+ scb->state = SCB_ISSUED;
+
+ if (mbox_post_cmd(adapter, scb) != 0) {
+
+ spin_lock_irqsave(PENDING_LIST_LOCK(adapter), flags);
+
+ scb->state = SCB_PENDQ;
+
+ list_add(&scb->list, &adapter->pend_list);
+
+ spin_unlock_irqrestore(PENDING_LIST_LOCK(adapter),
+ flags);
+
+ return;
+ }
+
+ spin_lock_irqsave(PENDING_LIST_LOCK(adapter), flags);
+ }
+
+ spin_unlock_irqrestore(PENDING_LIST_LOCK(adapter), flags);
+
+
+ return;
+}
+
+
+/**
+ * megaraid_mbox_prepare_pthru - prepare a command for physical devices
+ * @adapter : pointer to controller's soft state
+ * @scb : scsi control block
+ * @scp : scsi command from the mid-layer
+ *
+ * Prepare a command for the scsi physical devices.
+ */
+static void
+megaraid_mbox_prepare_pthru(adapter_t *adapter, scb_t *scb,
+ struct scsi_cmnd *scp)
+{
+ mbox_ccb_t *ccb;
+ mraid_passthru_t *pthru;
+ uint8_t channel;
+ uint8_t target;
+
+ ccb = (mbox_ccb_t *)scb->ccb;
+ pthru = ccb->pthru;
+ channel = scb->dev_channel;
+ target = scb->dev_target;
+
+ // 0=6sec, 1=60sec, 2=10min, 3=3hrs, 4=NO timeout
+ pthru->timeout = 4;
+ pthru->ars = 1;
+ pthru->islogical = 0;
+ pthru->channel = 0;
+ pthru->target = (channel << 4) | target;
+ pthru->logdrv = SCP2LUN(scp);
+ pthru->reqsenselen = 14;
+ pthru->cdblen = scp->cmd_len;
+
+ memcpy(pthru->cdb, scp->cmnd, scp->cmd_len);
+
+ if (scsi_bufflen(scp)) {
+ pthru->dataxferlen = scsi_bufflen(scp);
+ pthru->dataxferaddr = ccb->sgl_dma_h;
+ pthru->numsge = megaraid_mbox_mksgl(adapter, scb);
+ }
+ else {
+ pthru->dataxferaddr = 0;
+ pthru->dataxferlen = 0;
+ pthru->numsge = 0;
+ }
+ return;
+}
+
+
+/**
+ * megaraid_mbox_prepare_epthru - prepare a command for physical devices
+ * @adapter : pointer to controller's soft state
+ * @scb : scsi control block
+ * @scp : scsi command from the mid-layer
+ *
+ * Prepare a command for the scsi physical devices. This routine prepares
+ * commands for devices which can take extended CDBs (>10 bytes).
+ */
+static void
+megaraid_mbox_prepare_epthru(adapter_t *adapter, scb_t *scb,
+ struct scsi_cmnd *scp)
+{
+ mbox_ccb_t *ccb;
+ mraid_epassthru_t *epthru;
+ uint8_t channel;
+ uint8_t target;
+
+ ccb = (mbox_ccb_t *)scb->ccb;
+ epthru = ccb->epthru;
+ channel = scb->dev_channel;
+ target = scb->dev_target;
+
+ // 0=6sec, 1=60sec, 2=10min, 3=3hrs, 4=NO timeout
+ epthru->timeout = 4;
+ epthru->ars = 1;
+ epthru->islogical = 0;
+ epthru->channel = 0;
+ epthru->target = (channel << 4) | target;
+ epthru->logdrv = SCP2LUN(scp);
+ epthru->reqsenselen = 14;
+ epthru->cdblen = scp->cmd_len;
+
+ memcpy(epthru->cdb, scp->cmnd, scp->cmd_len);
+
+ if (scsi_bufflen(scp)) {
+ epthru->dataxferlen = scsi_bufflen(scp);
+ epthru->dataxferaddr = ccb->sgl_dma_h;
+ epthru->numsge = megaraid_mbox_mksgl(adapter, scb);
+ }
+ else {
+ epthru->dataxferaddr = 0;
+ epthru->dataxferlen = 0;
+ epthru->numsge = 0;
+ }
+ return;
+}
+
+
+/**
+ * megaraid_ack_sequence - interrupt ack sequence for memory mapped HBAs
+ * @adapter : controller's soft state
+ *
+ * Interrupt acknowledgement sequence for memory mapped HBAs. Find out the
+ * completed command and put them on the completed list for later processing.
+ *
+ * Returns: 1 if the interrupt is valid, 0 otherwise
+ */
+static int
+megaraid_ack_sequence(adapter_t *adapter)
+{
+ mraid_device_t *raid_dev = ADAP2RAIDDEV(adapter);
+ mbox_t *mbox;
+ scb_t *scb;
+ uint8_t nstatus;
+ uint8_t completed[MBOX_MAX_FIRMWARE_STATUS];
+ struct list_head clist;
+ int handled;
+ uint32_t dword;
+ unsigned long flags;
+ int i, j;
+
+
+ mbox = raid_dev->mbox;
+
+ // move the SCBs from the firmware completed array to our local list
+ INIT_LIST_HEAD(&clist);
+
+ // loop till F/W has more commands for us to complete
+ handled = 0;
+ spin_lock_irqsave(MAILBOX_LOCK(raid_dev), flags);
+ do {
+ /*
+ * Check if a valid interrupt is pending. If found, force the
+ * interrupt line low.
+ */
+ dword = RDOUTDOOR(raid_dev);
+ if (dword != 0x10001234) break;
+
+ handled = 1;
+
+ WROUTDOOR(raid_dev, 0x10001234);
+
+ nstatus = 0;
+ // wait for valid numstatus to post
+ for (i = 0; i < 0xFFFFF; i++) {
+ if (mbox->numstatus != 0xFF) {
+ nstatus = mbox->numstatus;
+ break;
+ }
+ rmb();
+ }
+ mbox->numstatus = 0xFF;
+
+ adapter->outstanding_cmds -= nstatus;
+
+ for (i = 0; i < nstatus; i++) {
+
+ // wait for valid command index to post
+ for (j = 0; j < 0xFFFFF; j++) {
+ if (mbox->completed[i] != 0xFF) break;
+ rmb();
+ }
+ completed[i] = mbox->completed[i];
+ mbox->completed[i] = 0xFF;
+
+ if (completed[i] == 0xFF) {
+ con_log(CL_ANN, (KERN_CRIT
+ "megaraid: command posting timed out\n"));
+
+ BUG();
+ continue;
+ }
+
+ // Get SCB associated with this command id
+ if (completed[i] >= MBOX_MAX_SCSI_CMDS) {
+ // a cmm command
+ scb = adapter->uscb_list + (completed[i] -
+ MBOX_MAX_SCSI_CMDS);
+ }
+ else {
+ // an os command
+ scb = adapter->kscb_list + completed[i];
+ }
+
+ scb->status = mbox->status;
+ list_add_tail(&scb->list, &clist);
+ }
+
+ // Acknowledge interrupt
+ WRINDOOR(raid_dev, 0x02);
+
+ } while(1);
+
+ spin_unlock_irqrestore(MAILBOX_LOCK(raid_dev), flags);
+
+
+ // put the completed commands in the completed list. DPC would
+ // complete these commands later
+ spin_lock_irqsave(COMPLETED_LIST_LOCK(adapter), flags);
+
+ list_splice(&clist, &adapter->completed_list);
+
+ spin_unlock_irqrestore(COMPLETED_LIST_LOCK(adapter), flags);
+
+
+ // schedule the DPC if there is some work for it
+ if (handled)
+ tasklet_schedule(&adapter->dpc_h);
+
+ return handled;
+}
+
+
+/**
+ * megaraid_isr - isr for memory based mailbox based controllers
+ * @irq : irq
+ * @devp : pointer to our soft state
+ *
+ * Interrupt service routine for memory-mapped mailbox controllers.
+ */
+static irqreturn_t
+megaraid_isr(int irq, void *devp)
+{
+ adapter_t *adapter = devp;
+ int handled;
+
+ handled = megaraid_ack_sequence(adapter);
+
+ /* Loop through any pending requests */
+ if (!adapter->quiescent) {
+ megaraid_mbox_runpendq(adapter, NULL);
+ }
+
+ return IRQ_RETVAL(handled);
+}
+
+
+/**
+ * megaraid_mbox_sync_scb - sync kernel buffers
+ * @adapter : controller's soft state
+ * @scb : pointer to the resource packet
+ *
+ * DMA sync if required.
+ */
+static void
+megaraid_mbox_sync_scb(adapter_t *adapter, scb_t *scb)
+{
+ mbox_ccb_t *ccb;
+
+ ccb = (mbox_ccb_t *)scb->ccb;
+
+ if (scb->dma_direction == PCI_DMA_FROMDEVICE)
+ pci_dma_sync_sg_for_cpu(adapter->pdev,
+ scsi_sglist(scb->scp),
+ scsi_sg_count(scb->scp),
+ PCI_DMA_FROMDEVICE);
+
+ scsi_dma_unmap(scb->scp);
+ return;
+}
+
+
+/**
+ * megaraid_mbox_dpc - the tasklet to complete the commands from completed list
+ * @devp : pointer to HBA soft state
+ *
+ * Pick up the commands from the completed list and send back to the owners.
+ * This is a reentrant function and does not assume any locks are held while
+ * it is being called.
+ */
+static void
+megaraid_mbox_dpc(unsigned long devp)
+{
+ adapter_t *adapter = (adapter_t *)devp;
+ mraid_device_t *raid_dev;
+ struct list_head clist;
+ struct scatterlist *sgl;
+ scb_t *scb;
+ scb_t *tmp;
+ struct scsi_cmnd *scp;
+ mraid_passthru_t *pthru;
+ mraid_epassthru_t *epthru;
+ mbox_ccb_t *ccb;
+ int islogical;
+ int pdev_index;
+ int pdev_state;
+ mbox_t *mbox;
+ unsigned long flags;
+ uint8_t c;
+ int status;
+ uioc_t *kioc;
+
+
+ if (!adapter) return;
+
+ raid_dev = ADAP2RAIDDEV(adapter);
+
+ // move the SCBs from the completed list to our local list
+ INIT_LIST_HEAD(&clist);
+
+ spin_lock_irqsave(COMPLETED_LIST_LOCK(adapter), flags);
+
+ list_splice_init(&adapter->completed_list, &clist);
+
+ spin_unlock_irqrestore(COMPLETED_LIST_LOCK(adapter), flags);
+
+
+ list_for_each_entry_safe(scb, tmp, &clist, list) {
+
+ status = scb->status;
+ scp = scb->scp;
+ ccb = (mbox_ccb_t *)scb->ccb;
+ pthru = ccb->pthru;
+ epthru = ccb->epthru;
+ mbox = ccb->mbox;
+
+ // Make sure f/w has completed a valid command
+ if (scb->state != SCB_ISSUED) {
+ con_log(CL_ANN, (KERN_CRIT
+ "megaraid critical err: invalid command %d:%d:%p\n",
+ scb->sno, scb->state, scp));
+ BUG();
+ continue; // Must never happen!
+ }
+
+ // check for the management command and complete it right away
+ if (scb->sno >= MBOX_MAX_SCSI_CMDS) {
+ scb->state = SCB_FREE;
+ scb->status = status;
+
+ // remove from local clist
+ list_del_init(&scb->list);
+
+ kioc = (uioc_t *)scb->gp;
+ kioc->status = 0;
+
+ megaraid_mbox_mm_done(adapter, scb);
+
+ continue;
+ }
+
+ // Was an abort issued for this command earlier
+ if (scb->state & SCB_ABORT) {
+ con_log(CL_ANN, (KERN_NOTICE
+ "megaraid: aborted cmd [%x] completed\n",
+ scb->sno));
+ }
+
+ /*
+ * If the inquiry came of a disk drive which is not part of
+ * any RAID array, expose it to the kernel. For this to be
+ * enabled, user must set the "megaraid_expose_unconf_disks"
+ * flag to 1 by specifying it on module parameter list.
+ * This would enable data migration off drives from other
+ * configurations.
+ */
+ islogical = MRAID_IS_LOGICAL(adapter, scp);
+ if (scp->cmnd[0] == INQUIRY && status == 0 && islogical == 0
+ && IS_RAID_CH(raid_dev, scb->dev_channel)) {
+
+ sgl = scsi_sglist(scp);
+ if (sg_page(sgl)) {
+ c = *(unsigned char *) sg_virt(&sgl[0]);
+ } else {
+ con_log(CL_ANN, (KERN_WARNING
+ "megaraid mailbox: invalid sg:%d\n",
+ __LINE__));
+ c = 0;
+ }
+
+ if ((c & 0x1F ) == TYPE_DISK) {
+ pdev_index = (scb->dev_channel * 16) +
+ scb->dev_target;
+ pdev_state =
+ raid_dev->pdrv_state[pdev_index] & 0x0F;
+
+ if (pdev_state == PDRV_ONLINE ||
+ pdev_state == PDRV_FAILED ||
+ pdev_state == PDRV_RBLD ||
+ pdev_state == PDRV_HOTSPARE ||
+ megaraid_expose_unconf_disks == 0) {
+
+ status = 0xF0;
+ }
+ }
+ }
+
+ // Convert MegaRAID status to Linux error code
+ switch (status) {
+
+ case 0x00:
+
+ scp->result = (DID_OK << 16);
+ break;
+
+ case 0x02:
+
+ /* set sense_buffer and result fields */
+ if (mbox->cmd == MBOXCMD_PASSTHRU ||
+ mbox->cmd == MBOXCMD_PASSTHRU64) {
+
+ memcpy(scp->sense_buffer, pthru->reqsensearea,
+ 14);
+
+ scp->result = DRIVER_SENSE << 24 |
+ DID_OK << 16 | CHECK_CONDITION << 1;
+ }
+ else {
+ if (mbox->cmd == MBOXCMD_EXTPTHRU) {
+
+ memcpy(scp->sense_buffer,
+ epthru->reqsensearea, 14);
+
+ scp->result = DRIVER_SENSE << 24 |
+ DID_OK << 16 |
+ CHECK_CONDITION << 1;
+ } else {
+ scp->sense_buffer[0] = 0x70;
+ scp->sense_buffer[2] = ABORTED_COMMAND;
+ scp->result = CHECK_CONDITION << 1;
+ }
+ }
+ break;
+
+ case 0x08:
+
+ scp->result = DID_BUS_BUSY << 16 | status;
+ break;
+
+ default:
+
+ /*
+ * If TEST_UNIT_READY fails, we know RESERVATION_STATUS
+ * failed
+ */
+ if (scp->cmnd[0] == TEST_UNIT_READY) {
+ scp->result = DID_ERROR << 16 |
+ RESERVATION_CONFLICT << 1;
+ }
+ else
+ /*
+ * Error code returned is 1 if Reserve or Release
+ * failed or the input parameter is invalid
+ */
+ if (status == 1 && (scp->cmnd[0] == RESERVE ||
+ scp->cmnd[0] == RELEASE)) {
+
+ scp->result = DID_ERROR << 16 |
+ RESERVATION_CONFLICT << 1;
+ }
+ else {
+ scp->result = DID_BAD_TARGET << 16 | status;
+ }
+ }
+
+ // print a debug message for all failed commands
+ if (status) {
+ megaraid_mbox_display_scb(adapter, scb);
+ }
+
+ // Free our internal resources and call the mid-layer callback
+ // routine
+ megaraid_mbox_sync_scb(adapter, scb);
+
+ // remove from local clist
+ list_del_init(&scb->list);
+
+ // put back in free list
+ megaraid_dealloc_scb(adapter, scb);
+
+ // send the scsi packet back to kernel
+ scp->scsi_done(scp);
+ }
+
+ return;
+}
+
+
+/**
+ * megaraid_abort_handler - abort the scsi command
+ * @scp : command to be aborted
+ *
+ * Abort a previous SCSI request. Only commands on the pending list can be
+ * aborted. All the commands issued to the F/W must complete.
+ **/
+static int
+megaraid_abort_handler(struct scsi_cmnd *scp)
+{
+ adapter_t *adapter;
+ mraid_device_t *raid_dev;
+ scb_t *scb;
+ scb_t *tmp;
+ int found;
+ unsigned long flags;
+ int i;
+
+
+ adapter = SCP2ADAPTER(scp);
+ raid_dev = ADAP2RAIDDEV(adapter);
+
+ con_log(CL_ANN, (KERN_WARNING
+ "megaraid: aborting cmd=%x <c=%d t=%d l=%d>\n",
+ scp->cmnd[0], SCP2CHANNEL(scp),
+ SCP2TARGET(scp), SCP2LUN(scp)));
+
+ // If FW has stopped responding, simply return failure
+ if (raid_dev->hw_error) {
+ con_log(CL_ANN, (KERN_NOTICE
+ "megaraid: hw error, not aborting\n"));
+ return FAILED;
+ }
+
+ // There might a race here, where the command was completed by the
+ // firmware and now it is on the completed list. Before we could
+ // complete the command to the kernel in dpc, the abort came.
+ // Find out if this is the case to avoid the race.
+ scb = NULL;
+ spin_lock_irqsave(COMPLETED_LIST_LOCK(adapter), flags);
+ list_for_each_entry_safe(scb, tmp, &adapter->completed_list, list) {
+
+ if (scb->scp == scp) { // Found command
+
+ list_del_init(&scb->list); // from completed list
+
+ con_log(CL_ANN, (KERN_WARNING
+ "megaraid: %d[%d:%d], abort from completed list\n",
+ scb->sno, scb->dev_channel, scb->dev_target));
+
+ scp->result = (DID_ABORT << 16);
+ scp->scsi_done(scp);
+
+ megaraid_dealloc_scb(adapter, scb);
+
+ spin_unlock_irqrestore(COMPLETED_LIST_LOCK(adapter),
+ flags);
+
+ return SUCCESS;
+ }
+ }
+ spin_unlock_irqrestore(COMPLETED_LIST_LOCK(adapter), flags);
+
+
+ // Find out if this command is still on the pending list. If it is and
+ // was never issued, abort and return success. If the command is owned
+ // by the firmware, we must wait for it to complete by the FW.
+ spin_lock_irqsave(PENDING_LIST_LOCK(adapter), flags);
+ list_for_each_entry_safe(scb, tmp, &adapter->pend_list, list) {
+
+ if (scb->scp == scp) { // Found command
+
+ list_del_init(&scb->list); // from pending list
+
+ ASSERT(!(scb->state & SCB_ISSUED));
+
+ con_log(CL_ANN, (KERN_WARNING
+ "megaraid abort: [%d:%d], driver owner\n",
+ scb->dev_channel, scb->dev_target));
+
+ scp->result = (DID_ABORT << 16);
+ scp->scsi_done(scp);
+
+ megaraid_dealloc_scb(adapter, scb);
+
+ spin_unlock_irqrestore(PENDING_LIST_LOCK(adapter),
+ flags);
+
+ return SUCCESS;
+ }
+ }
+ spin_unlock_irqrestore(PENDING_LIST_LOCK(adapter), flags);
+
+
+ // Check do we even own this command, in which case this would be
+ // owned by the firmware. The only way to locate the FW scb is to
+ // traverse through the list of all SCB, since driver does not
+ // maintain these SCBs on any list
+ found = 0;
+ spin_lock_irq(&adapter->lock);
+ for (i = 0; i < MBOX_MAX_SCSI_CMDS; i++) {
+ scb = adapter->kscb_list + i;
+
+ if (scb->scp == scp) {
+
+ found = 1;
+
+ if (!(scb->state & SCB_ISSUED)) {
+ con_log(CL_ANN, (KERN_WARNING
+ "megaraid abort: %d[%d:%d], invalid state\n",
+ scb->sno, scb->dev_channel, scb->dev_target));
+ BUG();
+ }
+ else {
+ con_log(CL_ANN, (KERN_WARNING
+ "megaraid abort: %d[%d:%d], fw owner\n",
+ scb->sno, scb->dev_channel, scb->dev_target));
+ }
+ }
+ }
+ spin_unlock_irq(&adapter->lock);
+
+ if (!found) {
+ con_log(CL_ANN, (KERN_WARNING "megaraid abort: do now own\n"));
+
+ // FIXME: Should there be a callback for this command?
+ return SUCCESS;
+ }
+
+ // We cannot actually abort a command owned by firmware, return
+ // failure and wait for reset. In host reset handler, we will find out
+ // if the HBA is still live
+ return FAILED;
+}
+
+/**
+ * megaraid_reset_handler - device reset handler for mailbox based driver
+ * @scp : reference command
+ *
+ * Reset handler for the mailbox based controller. First try to find out if
+ * the FW is still live, in which case the outstanding commands counter mut go
+ * down to 0. If that happens, also issue the reservation reset command to
+ * relinquish (possible) reservations on the logical drives connected to this
+ * host.
+ **/
+static int
+megaraid_reset_handler(struct scsi_cmnd *scp)
+{
+ adapter_t *adapter;
+ scb_t *scb;
+ scb_t *tmp;
+ mraid_device_t *raid_dev;
+ unsigned long flags;
+ uint8_t raw_mbox[sizeof(mbox_t)];
+ int rval;
+ int recovery_window;
+ int recovering;
+ int i;
+ uioc_t *kioc;
+
+ adapter = SCP2ADAPTER(scp);
+ raid_dev = ADAP2RAIDDEV(adapter);
+
+ // return failure if adapter is not responding
+ if (raid_dev->hw_error) {
+ con_log(CL_ANN, (KERN_NOTICE
+ "megaraid: hw error, cannot reset\n"));
+ return FAILED;
+ }
+
+
+ // Under exceptional conditions, FW can take up to 3 minutes to
+ // complete command processing. Wait for additional 2 minutes for the
+ // pending commands counter to go down to 0. If it doesn't, let the
+ // controller be marked offline
+ // Also, reset all the commands currently owned by the driver
+ spin_lock_irqsave(PENDING_LIST_LOCK(adapter), flags);
+ list_for_each_entry_safe(scb, tmp, &adapter->pend_list, list) {
+ list_del_init(&scb->list); // from pending list
+
+ if (scb->sno >= MBOX_MAX_SCSI_CMDS) {
+ con_log(CL_ANN, (KERN_WARNING
+ "megaraid: IOCTL packet with %d[%d:%d] being reset\n",
+ scb->sno, scb->dev_channel, scb->dev_target));
+
+ scb->status = -1;
+
+ kioc = (uioc_t *)scb->gp;
+ kioc->status = -EFAULT;
+
+ megaraid_mbox_mm_done(adapter, scb);
+ } else {
+ if (scb->scp == scp) { // Found command
+ con_log(CL_ANN, (KERN_WARNING
+ "megaraid: %d[%d:%d], reset from pending list\n",
+ scb->sno, scb->dev_channel, scb->dev_target));
+ } else {
+ con_log(CL_ANN, (KERN_WARNING
+ "megaraid: IO packet with %d[%d:%d] being reset\n",
+ scb->sno, scb->dev_channel, scb->dev_target));
+ }
+
+ scb->scp->result = (DID_RESET << 16);
+ scb->scp->scsi_done(scb->scp);
+
+ megaraid_dealloc_scb(adapter, scb);
+ }
+ }
+ spin_unlock_irqrestore(PENDING_LIST_LOCK(adapter), flags);
+
+ if (adapter->outstanding_cmds) {
+ con_log(CL_ANN, (KERN_NOTICE
+ "megaraid: %d outstanding commands. Max wait %d sec\n",
+ adapter->outstanding_cmds,
+ (MBOX_RESET_WAIT + MBOX_RESET_EXT_WAIT)));
+ }
+
+ recovery_window = MBOX_RESET_WAIT + MBOX_RESET_EXT_WAIT;
+
+ recovering = adapter->outstanding_cmds;
+
+ for (i = 0; i < recovery_window; i++) {
+
+ megaraid_ack_sequence(adapter);
+
+ // print a message once every 5 seconds only
+ if (!(i % 5)) {
+ con_log(CL_ANN, (
+ "megaraid mbox: Wait for %d commands to complete:%d\n",
+ adapter->outstanding_cmds,
+ (MBOX_RESET_WAIT + MBOX_RESET_EXT_WAIT) - i));
+ }
+
+ // bailout if no recovery happened in reset time
+ if (adapter->outstanding_cmds == 0) {
+ break;
+ }
+
+ msleep(1000);
+ }
+
+ spin_lock(&adapter->lock);
+
+ // If still outstanding commands, bail out
+ if (adapter->outstanding_cmds) {
+ con_log(CL_ANN, (KERN_WARNING
+ "megaraid mbox: critical hardware error!\n"));
+
+ raid_dev->hw_error = 1;
+
+ rval = FAILED;
+ goto out;
+ }
+ else {
+ con_log(CL_ANN, (KERN_NOTICE
+ "megaraid mbox: reset sequence completed successfully\n"));
+ }
+
+
+ // If the controller supports clustering, reset reservations
+ if (!adapter->ha) {
+ rval = SUCCESS;
+ goto out;
+ }
+
+ // clear reservations if any
+ raw_mbox[0] = CLUSTER_CMD;
+ raw_mbox[2] = RESET_RESERVATIONS;
+
+ rval = SUCCESS;
+ if (mbox_post_sync_cmd_fast(adapter, raw_mbox) == 0) {
+ con_log(CL_ANN,
+ (KERN_INFO "megaraid: reservation reset\n"));
+ }
+ else {
+ rval = FAILED;
+ con_log(CL_ANN, (KERN_WARNING
+ "megaraid: reservation reset failed\n"));
+ }
+
+ out:
+ spin_unlock(&adapter->lock);
+ return rval;
+}
+
+/*
+ * START: internal commands library
+ *
+ * This section of the driver has the common routine used by the driver and
+ * also has all the FW routines
+ */
+
+/**
+ * mbox_post_sync_cmd() - blocking command to the mailbox based controllers
+ * @adapter : controller's soft state
+ * @raw_mbox : the mailbox
+ *
+ * Issue a scb in synchronous and non-interrupt mode for mailbox based
+ * controllers.
+ */
+static int
+mbox_post_sync_cmd(adapter_t *adapter, uint8_t raw_mbox[])
+{
+ mraid_device_t *raid_dev = ADAP2RAIDDEV(adapter);
+ mbox64_t *mbox64;
+ mbox_t *mbox;
+ uint8_t status;
+ int i;
+
+
+ mbox64 = raid_dev->mbox64;
+ mbox = raid_dev->mbox;
+
+ /*
+ * Wait until mailbox is free
+ */
+ if (megaraid_busywait_mbox(raid_dev) != 0)
+ goto blocked_mailbox;
+
+ /*
+ * Copy mailbox data into host structure
+ */
+ memcpy((caddr_t)mbox, (caddr_t)raw_mbox, 16);
+ mbox->cmdid = 0xFE;
+ mbox->busy = 1;
+ mbox->poll = 0;
+ mbox->ack = 0;
+ mbox->numstatus = 0xFF;
+ mbox->status = 0xFF;
+
+ wmb();
+ WRINDOOR(raid_dev, raid_dev->mbox_dma | 0x1);
+
+ // wait for maximum 1 second for status to post. If the status is not
+ // available within 1 second, assume FW is initializing and wait
+ // for an extended amount of time
+ if (mbox->numstatus == 0xFF) { // status not yet available
+ udelay(25);
+
+ for (i = 0; mbox->numstatus == 0xFF && i < 1000; i++) {
+ rmb();
+ msleep(1);
+ }
+
+
+ if (i == 1000) {
+ con_log(CL_ANN, (KERN_NOTICE
+ "megaraid mailbox: wait for FW to boot "));
+
+ for (i = 0; (mbox->numstatus == 0xFF) &&
+ (i < MBOX_RESET_WAIT); i++) {
+ rmb();
+ con_log(CL_ANN, ("\b\b\b\b\b[%03d]",
+ MBOX_RESET_WAIT - i));
+ msleep(1000);
+ }
+
+ if (i == MBOX_RESET_WAIT) {
+
+ con_log(CL_ANN, (
+ "\nmegaraid mailbox: status not available\n"));
+
+ return -1;
+ }
+ con_log(CL_ANN, ("\b\b\b\b\b[ok] \n"));
+ }
+ }
+
+ // wait for maximum 1 second for poll semaphore
+ if (mbox->poll != 0x77) {
+ udelay(25);
+
+ for (i = 0; (mbox->poll != 0x77) && (i < 1000); i++) {
+ rmb();
+ msleep(1);
+ }
+
+ if (i == 1000) {
+ con_log(CL_ANN, (KERN_WARNING
+ "megaraid mailbox: could not get poll semaphore\n"));
+ return -1;
+ }
+ }
+
+ WRINDOOR(raid_dev, raid_dev->mbox_dma | 0x2);
+ wmb();
+
+ // wait for maximum 1 second for acknowledgement
+ if (RDINDOOR(raid_dev) & 0x2) {
+ udelay(25);
+
+ for (i = 0; (RDINDOOR(raid_dev) & 0x2) && (i < 1000); i++) {
+ rmb();
+ msleep(1);
+ }
+
+ if (i == 1000) {
+ con_log(CL_ANN, (KERN_WARNING
+ "megaraid mailbox: could not acknowledge\n"));
+ return -1;
+ }
+ }
+ mbox->poll = 0;
+ mbox->ack = 0x77;
+
+ status = mbox->status;
+
+ // invalidate the completed command id array. After command
+ // completion, firmware would write the valid id.
+ mbox->numstatus = 0xFF;
+ mbox->status = 0xFF;
+ for (i = 0; i < MBOX_MAX_FIRMWARE_STATUS; i++) {
+ mbox->completed[i] = 0xFF;
+ }
+
+ return status;
+
+blocked_mailbox:
+
+ con_log(CL_ANN, (KERN_WARNING "megaraid: blocked mailbox\n") );
+ return -1;
+}
+
+
+/**
+ * mbox_post_sync_cmd_fast - blocking command to the mailbox based controllers
+ * @adapter : controller's soft state
+ * @raw_mbox : the mailbox
+ *
+ * Issue a scb in synchronous and non-interrupt mode for mailbox based
+ * controllers. This is a faster version of the synchronous command and
+ * therefore can be called in interrupt-context as well.
+ */
+static int
+mbox_post_sync_cmd_fast(adapter_t *adapter, uint8_t raw_mbox[])
+{
+ mraid_device_t *raid_dev = ADAP2RAIDDEV(adapter);
+ mbox_t *mbox;
+ long i;
+
+
+ mbox = raid_dev->mbox;
+
+ // return immediately if the mailbox is busy
+ if (mbox->busy) return -1;
+
+ // Copy mailbox data into host structure
+ memcpy((caddr_t)mbox, (caddr_t)raw_mbox, 14);
+ mbox->cmdid = 0xFE;
+ mbox->busy = 1;
+ mbox->poll = 0;
+ mbox->ack = 0;
+ mbox->numstatus = 0xFF;
+ mbox->status = 0xFF;
+
+ wmb();
+ WRINDOOR(raid_dev, raid_dev->mbox_dma | 0x1);
+
+ for (i = 0; i < MBOX_SYNC_WAIT_CNT; i++) {
+ if (mbox->numstatus != 0xFF) break;
+ rmb();
+ udelay(MBOX_SYNC_DELAY_200);
+ }
+
+ if (i == MBOX_SYNC_WAIT_CNT) {
+ // We may need to re-calibrate the counter
+ con_log(CL_ANN, (KERN_CRIT
+ "megaraid: fast sync command timed out\n"));
+ }
+
+ WRINDOOR(raid_dev, raid_dev->mbox_dma | 0x2);
+ wmb();
+
+ return mbox->status;
+}
+
+
+/**
+ * megaraid_busywait_mbox() - Wait until the controller's mailbox is available
+ * @raid_dev : RAID device (HBA) soft state
+ *
+ * Wait until the controller's mailbox is available to accept more commands.
+ * Wait for at most 1 second.
+ */
+static int
+megaraid_busywait_mbox(mraid_device_t *raid_dev)
+{
+ mbox_t *mbox = raid_dev->mbox;
+ int i = 0;
+
+ if (mbox->busy) {
+ udelay(25);
+ for (i = 0; mbox->busy && i < 1000; i++)
+ msleep(1);
+ }
+
+ if (i < 1000) return 0;
+ else return -1;
+}
+
+
+/**
+ * megaraid_mbox_product_info - some static information about the controller
+ * @adapter : our soft state
+ *
+ * Issue commands to the controller to grab some parameters required by our
+ * caller.
+ */
+static int
+megaraid_mbox_product_info(adapter_t *adapter)
+{
+ mraid_device_t *raid_dev = ADAP2RAIDDEV(adapter);
+ mbox_t *mbox;
+ uint8_t raw_mbox[sizeof(mbox_t)];
+ mraid_pinfo_t *pinfo;
+ dma_addr_t pinfo_dma_h;
+ mraid_inquiry3_t *mraid_inq3;
+ int i;
+
+
+ memset((caddr_t)raw_mbox, 0, sizeof(raw_mbox));
+ mbox = (mbox_t *)raw_mbox;
+
+ /*
+ * Issue an ENQUIRY3 command to find out certain adapter parameters,
+ * e.g., max channels, max commands etc.
+ */
+ pinfo = pci_zalloc_consistent(adapter->pdev, sizeof(mraid_pinfo_t),
+ &pinfo_dma_h);
+
+ if (pinfo == NULL) {
+ con_log(CL_ANN, (KERN_WARNING
+ "megaraid: out of memory, %s %d\n", __func__,
+ __LINE__));
+
+ return -1;
+ }
+
+ mbox->xferaddr = (uint32_t)adapter->ibuf_dma_h;
+ memset((void *)adapter->ibuf, 0, MBOX_IBUF_SIZE);
+
+ raw_mbox[0] = FC_NEW_CONFIG;
+ raw_mbox[2] = NC_SUBOP_ENQUIRY3;
+ raw_mbox[3] = ENQ3_GET_SOLICITED_FULL;
+
+ // Issue the command
+ if (mbox_post_sync_cmd(adapter, raw_mbox) != 0) {
+
+ con_log(CL_ANN, (KERN_WARNING "megaraid: Inquiry3 failed\n"));
+
+ pci_free_consistent(adapter->pdev, sizeof(mraid_pinfo_t),
+ pinfo, pinfo_dma_h);
+
+ return -1;
+ }
+
+ /*
+ * Collect information about state of each physical drive
+ * attached to the controller. We will expose all the disks
+ * which are not part of RAID
+ */
+ mraid_inq3 = (mraid_inquiry3_t *)adapter->ibuf;
+ for (i = 0; i < MBOX_MAX_PHYSICAL_DRIVES; i++) {
+ raid_dev->pdrv_state[i] = mraid_inq3->pdrv_state[i];
+ }
+
+ /*
+ * Get product info for information like number of channels,
+ * maximum commands supported.
+ */
+ memset((caddr_t)raw_mbox, 0, sizeof(raw_mbox));
+ mbox->xferaddr = (uint32_t)pinfo_dma_h;
+
+ raw_mbox[0] = FC_NEW_CONFIG;
+ raw_mbox[2] = NC_SUBOP_PRODUCT_INFO;
+
+ if (mbox_post_sync_cmd(adapter, raw_mbox) != 0) {
+
+ con_log(CL_ANN, (KERN_WARNING
+ "megaraid: product info failed\n"));
+
+ pci_free_consistent(adapter->pdev, sizeof(mraid_pinfo_t),
+ pinfo, pinfo_dma_h);
+
+ return -1;
+ }
+
+ /*
+ * Setup some parameters for host, as required by our caller
+ */
+ adapter->max_channel = pinfo->nchannels;
+
+ /*
+ * we will export all the logical drives on a single channel.
+ * Add 1 since inquires do not come for inititor ID
+ */
+ adapter->max_target = MAX_LOGICAL_DRIVES_40LD + 1;
+ adapter->max_lun = 8; // up to 8 LUNs for non-disk devices
+
+ /*
+ * These are the maximum outstanding commands for the scsi-layer
+ */
+ adapter->max_cmds = MBOX_MAX_SCSI_CMDS;
+
+ memset(adapter->fw_version, 0, VERSION_SIZE);
+ memset(adapter->bios_version, 0, VERSION_SIZE);
+
+ memcpy(adapter->fw_version, pinfo->fw_version, 4);
+ adapter->fw_version[4] = 0;
+
+ memcpy(adapter->bios_version, pinfo->bios_version, 4);
+ adapter->bios_version[4] = 0;
+
+ con_log(CL_ANN, (KERN_NOTICE
+ "megaraid: fw version:[%s] bios version:[%s]\n",
+ adapter->fw_version, adapter->bios_version));
+
+ pci_free_consistent(adapter->pdev, sizeof(mraid_pinfo_t), pinfo,
+ pinfo_dma_h);
+
+ return 0;
+}
+
+
+
+/**
+ * megaraid_mbox_extended_cdb - check for support for extended CDBs
+ * @adapter : soft state for the controller
+ *
+ * This routine check whether the controller in question supports extended
+ * ( > 10 bytes ) CDBs.
+ */
+static int
+megaraid_mbox_extended_cdb(adapter_t *adapter)
+{
+ mbox_t *mbox;
+ uint8_t raw_mbox[sizeof(mbox_t)];
+ int rval;
+
+ mbox = (mbox_t *)raw_mbox;
+
+ memset((caddr_t)raw_mbox, 0, sizeof(raw_mbox));
+ mbox->xferaddr = (uint32_t)adapter->ibuf_dma_h;
+
+ memset((void *)adapter->ibuf, 0, MBOX_IBUF_SIZE);
+
+ raw_mbox[0] = MAIN_MISC_OPCODE;
+ raw_mbox[2] = SUPPORT_EXT_CDB;
+
+ /*
+ * Issue the command
+ */
+ rval = 0;
+ if (mbox_post_sync_cmd(adapter, raw_mbox) != 0) {
+ rval = -1;
+ }
+
+ return rval;
+}
+
+
+/**
+ * megaraid_mbox_support_ha - Do we support clustering
+ * @adapter : soft state for the controller
+ * @init_id : ID of the initiator
+ *
+ * Determine if the firmware supports clustering and the ID of the initiator.
+ */
+static int
+megaraid_mbox_support_ha(adapter_t *adapter, uint16_t *init_id)
+{
+ mbox_t *mbox;
+ uint8_t raw_mbox[sizeof(mbox_t)];
+ int rval;
+
+
+ mbox = (mbox_t *)raw_mbox;
+
+ memset((caddr_t)raw_mbox, 0, sizeof(raw_mbox));
+
+ mbox->xferaddr = (uint32_t)adapter->ibuf_dma_h;
+
+ memset((void *)adapter->ibuf, 0, MBOX_IBUF_SIZE);
+
+ raw_mbox[0] = GET_TARGET_ID;
+
+ // Issue the command
+ *init_id = 7;
+ rval = -1;
+ if (mbox_post_sync_cmd(adapter, raw_mbox) == 0) {
+
+ *init_id = *(uint8_t *)adapter->ibuf;
+
+ con_log(CL_ANN, (KERN_INFO
+ "megaraid: cluster firmware, initiator ID: %d\n",
+ *init_id));
+
+ rval = 0;
+ }
+
+ return rval;
+}
+
+
+/**
+ * megaraid_mbox_support_random_del - Do we support random deletion
+ * @adapter : soft state for the controller
+ *
+ * Determine if the firmware supports random deletion.
+ * Return: 1 is operation supported, 0 otherwise
+ */
+static int
+megaraid_mbox_support_random_del(adapter_t *adapter)
+{
+ mbox_t *mbox;
+ uint8_t raw_mbox[sizeof(mbox_t)];
+ int rval;
+
+ /*
+ * Newer firmware on Dell CERC expect a different
+ * random deletion handling, so disable it.
+ */
+ if (adapter->pdev->vendor == PCI_VENDOR_ID_AMI &&
+ adapter->pdev->device == PCI_DEVICE_ID_AMI_MEGARAID3 &&
+ adapter->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL &&
+ adapter->pdev->subsystem_device == PCI_SUBSYS_ID_CERC_ATA100_4CH &&
+ (adapter->fw_version[0] > '6' ||
+ (adapter->fw_version[0] == '6' &&
+ adapter->fw_version[2] > '6') ||
+ (adapter->fw_version[0] == '6'
+ && adapter->fw_version[2] == '6'
+ && adapter->fw_version[3] > '1'))) {
+ con_log(CL_DLEVEL1, ("megaraid: disable random deletion\n"));
+ return 0;
+ }
+
+ mbox = (mbox_t *)raw_mbox;
+
+ memset((caddr_t)raw_mbox, 0, sizeof(mbox_t));
+
+ raw_mbox[0] = FC_DEL_LOGDRV;
+ raw_mbox[2] = OP_SUP_DEL_LOGDRV;
+
+ // Issue the command
+ rval = 0;
+ if (mbox_post_sync_cmd(adapter, raw_mbox) == 0) {
+
+ con_log(CL_DLEVEL1, ("megaraid: supports random deletion\n"));
+
+ rval = 1;
+ }
+
+ return rval;
+}
+
+
+/**
+ * megaraid_mbox_get_max_sg - maximum sg elements supported by the firmware
+ * @adapter : soft state for the controller
+ *
+ * Find out the maximum number of scatter-gather elements supported by the
+ * firmware.
+ */
+static int
+megaraid_mbox_get_max_sg(adapter_t *adapter)
+{
+ mbox_t *mbox;
+ uint8_t raw_mbox[sizeof(mbox_t)];
+ int nsg;
+
+
+ mbox = (mbox_t *)raw_mbox;
+
+ memset((caddr_t)raw_mbox, 0, sizeof(mbox_t));
+
+ mbox->xferaddr = (uint32_t)adapter->ibuf_dma_h;
+
+ memset((void *)adapter->ibuf, 0, MBOX_IBUF_SIZE);
+
+ raw_mbox[0] = MAIN_MISC_OPCODE;
+ raw_mbox[2] = GET_MAX_SG_SUPPORT;
+
+ // Issue the command
+ if (mbox_post_sync_cmd(adapter, raw_mbox) == 0) {
+ nsg = *(uint8_t *)adapter->ibuf;
+ }
+ else {
+ nsg = MBOX_DEFAULT_SG_SIZE;
+ }
+
+ if (nsg > MBOX_MAX_SG_SIZE) nsg = MBOX_MAX_SG_SIZE;
+
+ return nsg;
+}
+
+
+/**
+ * megaraid_mbox_enum_raid_scsi - enumerate the RAID and SCSI channels
+ * @adapter : soft state for the controller
+ *
+ * Enumerate the RAID and SCSI channels for ROMB platforms so that channels
+ * can be exported as regular SCSI channels.
+ */
+static void
+megaraid_mbox_enum_raid_scsi(adapter_t *adapter)
+{
+ mraid_device_t *raid_dev = ADAP2RAIDDEV(adapter);
+ mbox_t *mbox;
+ uint8_t raw_mbox[sizeof(mbox_t)];
+
+
+ mbox = (mbox_t *)raw_mbox;
+
+ memset((caddr_t)raw_mbox, 0, sizeof(mbox_t));
+
+ mbox->xferaddr = (uint32_t)adapter->ibuf_dma_h;
+
+ memset((void *)adapter->ibuf, 0, MBOX_IBUF_SIZE);
+
+ raw_mbox[0] = CHNL_CLASS;
+ raw_mbox[2] = GET_CHNL_CLASS;
+
+ // Issue the command. If the command fails, all channels are RAID
+ // channels
+ raid_dev->channel_class = 0xFF;
+ if (mbox_post_sync_cmd(adapter, raw_mbox) == 0) {
+ raid_dev->channel_class = *(uint8_t *)adapter->ibuf;
+ }
+
+ return;
+}
+
+
+/**
+ * megaraid_mbox_flush_cache - flush adapter and disks cache
+ * @adapter : soft state for the controller
+ *
+ * Flush adapter cache followed by disks cache.
+ */
+static void
+megaraid_mbox_flush_cache(adapter_t *adapter)
+{
+ mbox_t *mbox;
+ uint8_t raw_mbox[sizeof(mbox_t)];
+
+
+ mbox = (mbox_t *)raw_mbox;
+
+ memset((caddr_t)raw_mbox, 0, sizeof(mbox_t));
+
+ raw_mbox[0] = FLUSH_ADAPTER;
+
+ if (mbox_post_sync_cmd(adapter, raw_mbox) != 0) {
+ con_log(CL_ANN, ("megaraid: flush adapter failed\n"));
+ }
+
+ raw_mbox[0] = FLUSH_SYSTEM;
+
+ if (mbox_post_sync_cmd(adapter, raw_mbox) != 0) {
+ con_log(CL_ANN, ("megaraid: flush disks cache failed\n"));
+ }
+
+ return;
+}
+
+
+/**
+ * megaraid_mbox_fire_sync_cmd - fire the sync cmd
+ * @adapter : soft state for the controller
+ *
+ * Clears the pending cmds in FW and reinits its RAID structs.
+ */
+static int
+megaraid_mbox_fire_sync_cmd(adapter_t *adapter)
+{
+ mbox_t *mbox;
+ uint8_t raw_mbox[sizeof(mbox_t)];
+ mraid_device_t *raid_dev = ADAP2RAIDDEV(adapter);
+ mbox64_t *mbox64;
+ int status = 0;
+ int i;
+ uint32_t dword;
+
+ mbox = (mbox_t *)raw_mbox;
+
+ memset((caddr_t)raw_mbox, 0, sizeof(mbox_t));
+
+ raw_mbox[0] = 0xFF;
+
+ mbox64 = raid_dev->mbox64;
+ mbox = raid_dev->mbox;
+
+ /* Wait until mailbox is free */
+ if (megaraid_busywait_mbox(raid_dev) != 0) {
+ status = 1;
+ goto blocked_mailbox;
+ }
+
+ /* Copy mailbox data into host structure */
+ memcpy((caddr_t)mbox, (caddr_t)raw_mbox, 16);
+ mbox->cmdid = 0xFE;
+ mbox->busy = 1;
+ mbox->poll = 0;
+ mbox->ack = 0;
+ mbox->numstatus = 0;
+ mbox->status = 0;
+
+ wmb();
+ WRINDOOR(raid_dev, raid_dev->mbox_dma | 0x1);
+
+ /* Wait for maximum 1 min for status to post.
+ * If the Firmware SUPPORTS the ABOVE COMMAND,
+ * mbox->cmd will be set to 0
+ * else
+ * the firmware will reject the command with
+ * mbox->numstatus set to 1
+ */
+
+ i = 0;
+ status = 0;
+ while (!mbox->numstatus && mbox->cmd == 0xFF) {
+ rmb();
+ msleep(1);
+ i++;
+ if (i > 1000 * 60) {
+ status = 1;
+ break;
+ }
+ }
+ if (mbox->numstatus == 1)
+ status = 1; /*cmd not supported*/
+
+ /* Check for interrupt line */
+ dword = RDOUTDOOR(raid_dev);
+ WROUTDOOR(raid_dev, dword);
+ WRINDOOR(raid_dev,2);
+
+ return status;
+
+blocked_mailbox:
+ con_log(CL_ANN, (KERN_WARNING "megaraid: blocked mailbox\n"));
+ return status;
+}
+
+/**
+ * megaraid_mbox_display_scb - display SCB information, mostly debug purposes
+ * @adapter : controller's soft state
+ * @scb : SCB to be displayed
+ * @level : debug level for console print
+ *
+ * Diplay information about the given SCB iff the current debug level is
+ * verbose.
+ */
+static void
+megaraid_mbox_display_scb(adapter_t *adapter, scb_t *scb)
+{
+ mbox_ccb_t *ccb;
+ struct scsi_cmnd *scp;
+ mbox_t *mbox;
+ int level;
+ int i;
+
+
+ ccb = (mbox_ccb_t *)scb->ccb;
+ scp = scb->scp;
+ mbox = ccb->mbox;
+
+ level = CL_DLEVEL3;
+
+ con_log(level, (KERN_NOTICE
+ "megaraid mailbox: status:%#x cmd:%#x id:%#x ", scb->status,
+ mbox->cmd, scb->sno));
+
+ con_log(level, ("sec:%#x lba:%#x addr:%#x ld:%d sg:%d\n",
+ mbox->numsectors, mbox->lba, mbox->xferaddr, mbox->logdrv,
+ mbox->numsge));
+
+ if (!scp) return;
+
+ con_log(level, (KERN_NOTICE "scsi cmnd: "));
+
+ for (i = 0; i < scp->cmd_len; i++) {
+ con_log(level, ("%#2.02x ", scp->cmnd[i]));
+ }
+
+ con_log(level, ("\n"));
+
+ return;
+}
+
+
+/**
+ * megaraid_mbox_setup_device_map - manage device ids
+ * @adapter : Driver's soft state
+ *
+ * Manage the device ids to have an appropriate mapping between the kernel
+ * scsi addresses and megaraid scsi and logical drive addresses. We export
+ * scsi devices on their actual addresses, whereas the logical drives are
+ * exported on a virtual scsi channel.
+ */
+static void
+megaraid_mbox_setup_device_map(adapter_t *adapter)
+{
+ uint8_t c;
+ uint8_t t;
+
+ /*
+ * First fill the values on the logical drive channel
+ */
+ for (t = 0; t < LSI_MAX_LOGICAL_DRIVES_64LD; t++)
+ adapter->device_ids[adapter->max_channel][t] =
+ (t < adapter->init_id) ? t : t - 1;
+
+ adapter->device_ids[adapter->max_channel][adapter->init_id] = 0xFF;
+
+ /*
+ * Fill the values on the physical devices channels
+ */
+ for (c = 0; c < adapter->max_channel; c++)
+ for (t = 0; t < LSI_MAX_LOGICAL_DRIVES_64LD; t++)
+ adapter->device_ids[c][t] = (c << 8) | t;
+}
+
+
+/*
+ * END: internal commands library
+ */
+
+/*
+ * START: Interface for the common management module
+ *
+ * This is the module, which interfaces with the common management module to
+ * provide support for ioctl and sysfs
+ */
+
+/**
+ * megaraid_cmm_register - register with the management module
+ * @adapter : HBA soft state
+ *
+ * Register with the management module, which allows applications to issue
+ * ioctl calls to the drivers. This interface is used by the management module
+ * to setup sysfs support as well.
+ */
+static int
+megaraid_cmm_register(adapter_t *adapter)
+{
+ mraid_device_t *raid_dev = ADAP2RAIDDEV(adapter);
+ mraid_mmadp_t adp;
+ scb_t *scb;
+ mbox_ccb_t *ccb;
+ int rval;
+ int i;
+
+ // Allocate memory for the base list of scb for management module.
+ adapter->uscb_list = kcalloc(MBOX_MAX_USER_CMDS, sizeof(scb_t), GFP_KERNEL);
+
+ if (adapter->uscb_list == NULL) {
+ con_log(CL_ANN, (KERN_WARNING
+ "megaraid: out of memory, %s %d\n", __func__,
+ __LINE__));
+ return -1;
+ }
+
+
+ // Initialize the synchronization parameters for resources for
+ // commands for management module
+ INIT_LIST_HEAD(&adapter->uscb_pool);
+
+ spin_lock_init(USER_FREE_LIST_LOCK(adapter));
+
+
+
+ // link all the packets. Note, CCB for commands, coming from the
+ // commom management module, mailbox physical address are already
+ // setup by it. We just need placeholder for that in our local command
+ // control blocks
+ for (i = 0; i < MBOX_MAX_USER_CMDS; i++) {
+
+ scb = adapter->uscb_list + i;
+ ccb = raid_dev->uccb_list + i;
+
+ scb->ccb = (caddr_t)ccb;
+ ccb->mbox64 = raid_dev->umbox64 + i;
+ ccb->mbox = &ccb->mbox64->mbox32;
+ ccb->raw_mbox = (uint8_t *)ccb->mbox;
+
+ scb->gp = 0;
+
+ // COMMAND ID 0 - (MBOX_MAX_SCSI_CMDS-1) ARE RESERVED FOR
+ // COMMANDS COMING FROM IO SUBSYSTEM (MID-LAYER)
+ scb->sno = i + MBOX_MAX_SCSI_CMDS;
+
+ scb->scp = NULL;
+ scb->state = SCB_FREE;
+ scb->dma_direction = PCI_DMA_NONE;
+ scb->dma_type = MRAID_DMA_NONE;
+ scb->dev_channel = -1;
+ scb->dev_target = -1;
+
+ // put scb in the free pool
+ list_add_tail(&scb->list, &adapter->uscb_pool);
+ }
+
+ adp.unique_id = adapter->unique_id;
+ adp.drvr_type = DRVRTYPE_MBOX;
+ adp.drvr_data = (unsigned long)adapter;
+ adp.pdev = adapter->pdev;
+ adp.issue_uioc = megaraid_mbox_mm_handler;
+ adp.timeout = MBOX_RESET_WAIT + MBOX_RESET_EXT_WAIT;
+ adp.max_kioc = MBOX_MAX_USER_CMDS;
+
+ if ((rval = mraid_mm_register_adp(&adp)) != 0) {
+
+ con_log(CL_ANN, (KERN_WARNING
+ "megaraid mbox: did not register with CMM\n"));
+
+ kfree(adapter->uscb_list);
+ }
+
+ return rval;
+}
+
+
+/**
+ * megaraid_cmm_unregister - un-register with the management module
+ * @adapter : HBA soft state
+ *
+ * Un-register with the management module.
+ * FIXME: mgmt module must return failure for unregister if it has pending
+ * commands in LLD.
+ */
+static int
+megaraid_cmm_unregister(adapter_t *adapter)
+{
+ kfree(adapter->uscb_list);
+ mraid_mm_unregister_adp(adapter->unique_id);
+ return 0;
+}
+
+
+/**
+ * megaraid_mbox_mm_handler - interface for CMM to issue commands to LLD
+ * @drvr_data : LLD specific data
+ * @kioc : CMM interface packet
+ * @action : command action
+ *
+ * This routine is invoked whenever the Common Management Module (CMM) has a
+ * command for us. The 'action' parameter specifies if this is a new command
+ * or otherwise.
+ */
+static int
+megaraid_mbox_mm_handler(unsigned long drvr_data, uioc_t *kioc, uint32_t action)
+{
+ adapter_t *adapter;
+
+ if (action != IOCTL_ISSUE) {
+ con_log(CL_ANN, (KERN_WARNING
+ "megaraid: unsupported management action:%#2x\n",
+ action));
+ return (-ENOTSUPP);
+ }
+
+ adapter = (adapter_t *)drvr_data;
+
+ // make sure this adapter is not being detached right now.
+ if (atomic_read(&adapter->being_detached)) {
+ con_log(CL_ANN, (KERN_WARNING
+ "megaraid: reject management request, detaching\n"));
+ return (-ENODEV);
+ }
+
+ switch (kioc->opcode) {
+
+ case GET_ADAP_INFO:
+
+ kioc->status = gather_hbainfo(adapter, (mraid_hba_info_t *)
+ (unsigned long)kioc->buf_vaddr);
+
+ kioc->done(kioc);
+
+ return kioc->status;
+
+ case MBOX_CMD:
+
+ return megaraid_mbox_mm_command(adapter, kioc);
+
+ default:
+ kioc->status = (-EINVAL);
+ kioc->done(kioc);
+ return (-EINVAL);
+ }
+
+ return 0; // not reached
+}
+
+/**
+ * megaraid_mbox_mm_command - issues commands routed through CMM
+ * @adapter : HBA soft state
+ * @kioc : management command packet
+ *
+ * Issues commands, which are routed through the management module.
+ */
+static int
+megaraid_mbox_mm_command(adapter_t *adapter, uioc_t *kioc)
+{
+ struct list_head *head = &adapter->uscb_pool;
+ mbox64_t *mbox64;
+ uint8_t *raw_mbox;
+ scb_t *scb;
+ mbox_ccb_t *ccb;
+ unsigned long flags;
+
+ // detach one scb from free pool
+ spin_lock_irqsave(USER_FREE_LIST_LOCK(adapter), flags);
+
+ if (list_empty(head)) { // should never happen because of CMM
+
+ con_log(CL_ANN, (KERN_WARNING
+ "megaraid mbox: bug in cmm handler, lost resources\n"));
+
+ spin_unlock_irqrestore(USER_FREE_LIST_LOCK(adapter), flags);
+
+ return (-EINVAL);
+ }
+
+ scb = list_entry(head->next, scb_t, list);
+ list_del_init(&scb->list);
+
+ spin_unlock_irqrestore(USER_FREE_LIST_LOCK(adapter), flags);
+
+ scb->state = SCB_ACTIVE;
+ scb->dma_type = MRAID_DMA_NONE;
+ scb->dma_direction = PCI_DMA_NONE;
+
+ ccb = (mbox_ccb_t *)scb->ccb;
+ mbox64 = (mbox64_t *)(unsigned long)kioc->cmdbuf;
+ raw_mbox = (uint8_t *)&mbox64->mbox32;
+
+ memcpy(ccb->mbox64, mbox64, sizeof(mbox64_t));
+
+ scb->gp = (unsigned long)kioc;
+
+ /*
+ * If it is a logdrv random delete operation, we have to wait till
+ * there are no outstanding cmds at the fw and then issue it directly
+ */
+ if (raw_mbox[0] == FC_DEL_LOGDRV && raw_mbox[2] == OP_DEL_LOGDRV) {
+
+ if (wait_till_fw_empty(adapter)) {
+ con_log(CL_ANN, (KERN_NOTICE
+ "megaraid mbox: LD delete, timed out\n"));
+
+ kioc->status = -ETIME;
+
+ scb->status = -1;
+
+ megaraid_mbox_mm_done(adapter, scb);
+
+ return (-ETIME);
+ }
+
+ INIT_LIST_HEAD(&scb->list);
+
+ scb->state = SCB_ISSUED;
+ if (mbox_post_cmd(adapter, scb) != 0) {
+
+ con_log(CL_ANN, (KERN_NOTICE
+ "megaraid mbox: LD delete, mailbox busy\n"));
+
+ kioc->status = -EBUSY;
+
+ scb->status = -1;
+
+ megaraid_mbox_mm_done(adapter, scb);
+
+ return (-EBUSY);
+ }
+
+ return 0;
+ }
+
+ // put the command on the pending list and execute
+ megaraid_mbox_runpendq(adapter, scb);
+
+ return 0;
+}
+
+
+static int
+wait_till_fw_empty(adapter_t *adapter)
+{
+ unsigned long flags = 0;
+ int i;
+
+
+ /*
+ * Set the quiescent flag to stop issuing cmds to FW.
+ */
+ spin_lock_irqsave(&adapter->lock, flags);
+ adapter->quiescent++;
+ spin_unlock_irqrestore(&adapter->lock, flags);
+
+ /*
+ * Wait till there are no more cmds outstanding at FW. Try for at most
+ * 60 seconds
+ */
+ for (i = 0; i < 60 && adapter->outstanding_cmds; i++) {
+ con_log(CL_DLEVEL1, (KERN_INFO
+ "megaraid: FW has %d pending commands\n",
+ adapter->outstanding_cmds));
+
+ msleep(1000);
+ }
+
+ return adapter->outstanding_cmds;
+}
+
+
+/**
+ * megaraid_mbox_mm_done - callback for CMM commands
+ * @adapter : HBA soft state
+ * @scb : completed command
+ *
+ * Callback routine for internal commands originated from the management
+ * module.
+ */
+static void
+megaraid_mbox_mm_done(adapter_t *adapter, scb_t *scb)
+{
+ uioc_t *kioc;
+ mbox64_t *mbox64;
+ uint8_t *raw_mbox;
+ unsigned long flags;
+
+ kioc = (uioc_t *)scb->gp;
+ mbox64 = (mbox64_t *)(unsigned long)kioc->cmdbuf;
+ mbox64->mbox32.status = scb->status;
+ raw_mbox = (uint8_t *)&mbox64->mbox32;
+
+
+ // put scb in the free pool
+ scb->state = SCB_FREE;
+ scb->scp = NULL;
+
+ spin_lock_irqsave(USER_FREE_LIST_LOCK(adapter), flags);
+
+ list_add(&scb->list, &adapter->uscb_pool);
+
+ spin_unlock_irqrestore(USER_FREE_LIST_LOCK(adapter), flags);
+
+ // if a delete logical drive operation succeeded, restart the
+ // controller
+ if (raw_mbox[0] == FC_DEL_LOGDRV && raw_mbox[2] == OP_DEL_LOGDRV) {
+
+ adapter->quiescent--;
+
+ megaraid_mbox_runpendq(adapter, NULL);
+ }
+
+ kioc->done(kioc);
+
+ return;
+}
+
+
+/**
+ * gather_hbainfo - HBA characteristics for the applications
+ * @adapter : HBA soft state
+ * @hinfo : pointer to the caller's host info strucuture
+ */
+static int
+gather_hbainfo(adapter_t *adapter, mraid_hba_info_t *hinfo)
+{
+ uint8_t dmajor;
+
+ dmajor = megaraid_mbox_version[0];
+
+ hinfo->pci_vendor_id = adapter->pdev->vendor;
+ hinfo->pci_device_id = adapter->pdev->device;
+ hinfo->subsys_vendor_id = adapter->pdev->subsystem_vendor;
+ hinfo->subsys_device_id = adapter->pdev->subsystem_device;
+
+ hinfo->pci_bus = adapter->pdev->bus->number;
+ hinfo->pci_dev_fn = adapter->pdev->devfn;
+ hinfo->pci_slot = PCI_SLOT(adapter->pdev->devfn);
+ hinfo->irq = adapter->host->irq;
+ hinfo->baseport = ADAP2RAIDDEV(adapter)->baseport;
+
+ hinfo->unique_id = (hinfo->pci_bus << 8) | adapter->pdev->devfn;
+ hinfo->host_no = adapter->host->host_no;
+
+ return 0;
+}
+
+/*
+ * END: Interface for the common management module
+ */
+
+
+
+/**
+ * megaraid_sysfs_alloc_resources - allocate sysfs related resources
+ * @adapter : controller's soft state
+ *
+ * Allocate packets required to issue FW calls whenever the sysfs attributes
+ * are read. These attributes would require up-to-date information from the
+ * FW. Also set up resources for mutual exclusion to share these resources and
+ * the wait queue.
+ *
+ * Return 0 on success.
+ * Return -ERROR_CODE on failure.
+ */
+static int
+megaraid_sysfs_alloc_resources(adapter_t *adapter)
+{
+ mraid_device_t *raid_dev = ADAP2RAIDDEV(adapter);
+ int rval = 0;
+
+ raid_dev->sysfs_uioc = kmalloc(sizeof(uioc_t), GFP_KERNEL);
+
+ raid_dev->sysfs_mbox64 = kmalloc(sizeof(mbox64_t), GFP_KERNEL);
+
+ raid_dev->sysfs_buffer = pci_alloc_consistent(adapter->pdev,
+ PAGE_SIZE, &raid_dev->sysfs_buffer_dma);
+
+ if (!raid_dev->sysfs_uioc || !raid_dev->sysfs_mbox64 ||
+ !raid_dev->sysfs_buffer) {
+
+ con_log(CL_ANN, (KERN_WARNING
+ "megaraid: out of memory, %s %d\n", __func__,
+ __LINE__));
+
+ rval = -ENOMEM;
+
+ megaraid_sysfs_free_resources(adapter);
+ }
+
+ mutex_init(&raid_dev->sysfs_mtx);
+
+ init_waitqueue_head(&raid_dev->sysfs_wait_q);
+
+ return rval;
+}
+
+
+/**
+ * megaraid_sysfs_free_resources - free sysfs related resources
+ * @adapter : controller's soft state
+ *
+ * Free packets allocated for sysfs FW commands
+ */
+static void
+megaraid_sysfs_free_resources(adapter_t *adapter)
+{
+ mraid_device_t *raid_dev = ADAP2RAIDDEV(adapter);
+
+ kfree(raid_dev->sysfs_uioc);
+ kfree(raid_dev->sysfs_mbox64);
+
+ if (raid_dev->sysfs_buffer) {
+ pci_free_consistent(adapter->pdev, PAGE_SIZE,
+ raid_dev->sysfs_buffer, raid_dev->sysfs_buffer_dma);
+ }
+}
+
+
+/**
+ * megaraid_sysfs_get_ldmap_done - callback for get ldmap
+ * @uioc : completed packet
+ *
+ * Callback routine called in the ISR/tasklet context for get ldmap call
+ */
+static void
+megaraid_sysfs_get_ldmap_done(uioc_t *uioc)
+{
+ adapter_t *adapter = (adapter_t *)uioc->buf_vaddr;
+ mraid_device_t *raid_dev = ADAP2RAIDDEV(adapter);
+
+ uioc->status = 0;
+
+ wake_up(&raid_dev->sysfs_wait_q);
+}
+
+
+/**
+ * megaraid_sysfs_get_ldmap_timeout - timeout handling for get ldmap
+ * @data : timed out packet
+ *
+ * Timeout routine to recover and return to application, in case the adapter
+ * has stopped responding. A timeout of 60 seconds for this command seems like
+ * a good value.
+ */
+static void
+megaraid_sysfs_get_ldmap_timeout(unsigned long data)
+{
+ uioc_t *uioc = (uioc_t *)data;
+ adapter_t *adapter = (adapter_t *)uioc->buf_vaddr;
+ mraid_device_t *raid_dev = ADAP2RAIDDEV(adapter);
+
+ uioc->status = -ETIME;
+
+ wake_up(&raid_dev->sysfs_wait_q);
+}
+
+
+/**
+ * megaraid_sysfs_get_ldmap - get update logical drive map
+ * @adapter : controller's soft state
+ *
+ * This routine will be called whenever user reads the logical drive
+ * attributes, go get the current logical drive mapping table from the
+ * firmware. We use the management API's to issue commands to the controller.
+ *
+ * NOTE: The commands issuance functionality is not generalized and
+ * implemented in context of "get ld map" command only. If required, the
+ * command issuance logical can be trivially pulled out and implemented as a
+ * standalone library. For now, this should suffice since there is no other
+ * user of this interface.
+ *
+ * Return 0 on success.
+ * Return -1 on failure.
+ */
+static int
+megaraid_sysfs_get_ldmap(adapter_t *adapter)
+{
+ mraid_device_t *raid_dev = ADAP2RAIDDEV(adapter);
+ uioc_t *uioc;
+ mbox64_t *mbox64;
+ mbox_t *mbox;
+ char *raw_mbox;
+ struct timer_list sysfs_timer;
+ struct timer_list *timerp;
+ caddr_t ldmap;
+ int rval = 0;
+
+ /*
+ * Allow only one read at a time to go through the sysfs attributes
+ */
+ mutex_lock(&raid_dev->sysfs_mtx);
+
+ uioc = raid_dev->sysfs_uioc;
+ mbox64 = raid_dev->sysfs_mbox64;
+ ldmap = raid_dev->sysfs_buffer;
+
+ memset(uioc, 0, sizeof(uioc_t));
+ memset(mbox64, 0, sizeof(mbox64_t));
+ memset(ldmap, 0, sizeof(raid_dev->curr_ldmap));
+
+ mbox = &mbox64->mbox32;
+ raw_mbox = (char *)mbox;
+ uioc->cmdbuf = (uint64_t)(unsigned long)mbox64;
+ uioc->buf_vaddr = (caddr_t)adapter;
+ uioc->status = -ENODATA;
+ uioc->done = megaraid_sysfs_get_ldmap_done;
+
+ /*
+ * Prepare the mailbox packet to get the current logical drive mapping
+ * table
+ */
+ mbox->xferaddr = (uint32_t)raid_dev->sysfs_buffer_dma;
+
+ raw_mbox[0] = FC_DEL_LOGDRV;
+ raw_mbox[2] = OP_GET_LDID_MAP;
+
+ /*
+ * Setup a timer to recover from a non-responding controller
+ */
+ timerp = &sysfs_timer;
+ init_timer(timerp);
+
+ timerp->function = megaraid_sysfs_get_ldmap_timeout;
+ timerp->data = (unsigned long)uioc;
+ timerp->expires = jiffies + 60 * HZ;
+
+ add_timer(timerp);
+
+ /*
+ * Send the command to the firmware
+ */
+ rval = megaraid_mbox_mm_command(adapter, uioc);
+
+ if (rval == 0) { // command successfully issued
+ wait_event(raid_dev->sysfs_wait_q, (uioc->status != -ENODATA));
+
+ /*
+ * Check if the command timed out
+ */
+ if (uioc->status == -ETIME) {
+ con_log(CL_ANN, (KERN_NOTICE
+ "megaraid: sysfs get ld map timed out\n"));
+
+ rval = -ETIME;
+ }
+ else {
+ rval = mbox->status;
+ }
+
+ if (rval == 0) {
+ memcpy(raid_dev->curr_ldmap, ldmap,
+ sizeof(raid_dev->curr_ldmap));
+ }
+ else {
+ con_log(CL_ANN, (KERN_NOTICE
+ "megaraid: get ld map failed with %x\n", rval));
+ }
+ }
+ else {
+ con_log(CL_ANN, (KERN_NOTICE
+ "megaraid: could not issue ldmap command:%x\n", rval));
+ }
+
+
+ del_timer_sync(timerp);
+
+ mutex_unlock(&raid_dev->sysfs_mtx);
+
+ return rval;
+}
+
+
+/**
+ * megaraid_sysfs_show_app_hndl - display application handle for this adapter
+ * @cdev : class device object representation for the host
+ * @buf : buffer to send data to
+ *
+ * Display the handle used by the applications while executing management
+ * tasks on the adapter. We invoke a management module API to get the adapter
+ * handle, since we do not interface with applications directly.
+ */
+static ssize_t
+megaraid_sysfs_show_app_hndl(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct Scsi_Host *shost = class_to_shost(dev);
+ adapter_t *adapter = (adapter_t *)SCSIHOST2ADAP(shost);
+ uint32_t app_hndl;
+
+ app_hndl = mraid_mm_adapter_app_handle(adapter->unique_id);
+
+ return snprintf(buf, 8, "%u\n", app_hndl);
+}
+
+
+/**
+ * megaraid_sysfs_show_ldnum - display the logical drive number for this device
+ * @dev : device object representation for the scsi device
+ * @attr : device attribute to show
+ * @buf : buffer to send data to
+ *
+ * Display the logical drive number for the device in question, if it a valid
+ * logical drive. For physical devices, "-1" is returned.
+ *
+ * The logical drive number is displayed in following format:
+ *
+ * <SCSI ID> <LD NUM> <LD STICKY ID> <APP ADAPTER HANDLE>
+ *
+ * <int> <int> <int> <int>
+ */
+static ssize_t
+megaraid_sysfs_show_ldnum(struct device *dev, struct device_attribute *attr, char *buf)
+{
+ struct scsi_device *sdev = to_scsi_device(dev);
+ adapter_t *adapter = (adapter_t *)SCSIHOST2ADAP(sdev->host);
+ mraid_device_t *raid_dev = ADAP2RAIDDEV(adapter);
+ int scsi_id = -1;
+ int logical_drv = -1;
+ int ldid_map = -1;
+ uint32_t app_hndl = 0;
+ int mapped_sdev_id;
+ int rval;
+ int i;
+
+ if (raid_dev->random_del_supported &&
+ MRAID_IS_LOGICAL_SDEV(adapter, sdev)) {
+
+ rval = megaraid_sysfs_get_ldmap(adapter);
+ if (rval == 0) {
+
+ for (i = 0; i < MAX_LOGICAL_DRIVES_40LD; i++) {
+
+ mapped_sdev_id = sdev->id;
+
+ if (sdev->id > adapter->init_id) {
+ mapped_sdev_id -= 1;
+ }
+
+ if (raid_dev->curr_ldmap[i] == mapped_sdev_id) {
+
+ scsi_id = sdev->id;
+
+ logical_drv = i;
+
+ ldid_map = raid_dev->curr_ldmap[i];
+
+ app_hndl = mraid_mm_adapter_app_handle(
+ adapter->unique_id);
+
+ break;
+ }
+ }
+ }
+ else {
+ con_log(CL_ANN, (KERN_NOTICE
+ "megaraid: sysfs get ld map failed: %x\n",
+ rval));
+ }
+ }
+
+ return snprintf(buf, 36, "%d %d %d %d\n", scsi_id, logical_drv,
+ ldid_map, app_hndl);
+}
+
+
+/*
+ * END: Mailbox Low Level Driver
+ */
+module_init(megaraid_init);
+module_exit(megaraid_exit);
+
+/* vim: set ts=8 sw=8 tw=78 ai si: */
diff --git a/drivers/scsi/megaraid/megaraid_mbox.h b/drivers/scsi/megaraid/megaraid_mbox.h
new file mode 100644
index 000000000..c1d86d961
--- /dev/null
+++ b/drivers/scsi/megaraid/megaraid_mbox.h
@@ -0,0 +1,238 @@
+/*
+ *
+ * Linux MegaRAID device driver
+ *
+ * Copyright (c) 2003-2004 LSI Logic Corporation.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ *
+ * FILE : megaraid_mbox.h
+ */
+
+#ifndef _MEGARAID_H_
+#define _MEGARAID_H_
+
+
+#include "mega_common.h"
+#include "mbox_defs.h"
+#include "megaraid_ioctl.h"
+
+
+#define MEGARAID_VERSION "2.20.5.1"
+#define MEGARAID_EXT_VERSION "(Release Date: Thu Nov 16 15:32:35 EST 2006)"
+
+
+/*
+ * Define some PCI values here until they are put in the kernel
+ */
+#define PCI_DEVICE_ID_PERC4_DI_DISCOVERY 0x000E
+#define PCI_SUBSYS_ID_PERC4_DI_DISCOVERY 0x0123
+
+#define PCI_DEVICE_ID_PERC4_SC 0x1960
+#define PCI_SUBSYS_ID_PERC4_SC 0x0520
+
+#define PCI_DEVICE_ID_PERC4_DC 0x1960
+#define PCI_SUBSYS_ID_PERC4_DC 0x0518
+
+#define PCI_DEVICE_ID_VERDE 0x0407
+
+#define PCI_DEVICE_ID_PERC4_DI_EVERGLADES 0x000F
+#define PCI_SUBSYS_ID_PERC4_DI_EVERGLADES 0x014A
+
+#define PCI_DEVICE_ID_PERC4E_SI_BIGBEND 0x0013
+#define PCI_SUBSYS_ID_PERC4E_SI_BIGBEND 0x016c
+
+#define PCI_DEVICE_ID_PERC4E_DI_KOBUK 0x0013
+#define PCI_SUBSYS_ID_PERC4E_DI_KOBUK 0x016d
+
+#define PCI_DEVICE_ID_PERC4E_DI_CORVETTE 0x0013
+#define PCI_SUBSYS_ID_PERC4E_DI_CORVETTE 0x016e
+
+#define PCI_DEVICE_ID_PERC4E_DI_EXPEDITION 0x0013
+#define PCI_SUBSYS_ID_PERC4E_DI_EXPEDITION 0x016f
+
+#define PCI_DEVICE_ID_PERC4E_DI_GUADALUPE 0x0013
+#define PCI_SUBSYS_ID_PERC4E_DI_GUADALUPE 0x0170
+
+#define PCI_DEVICE_ID_DOBSON 0x0408
+
+#define PCI_DEVICE_ID_MEGARAID_SCSI_320_0 0x1960
+#define PCI_SUBSYS_ID_MEGARAID_SCSI_320_0 0xA520
+
+#define PCI_DEVICE_ID_MEGARAID_SCSI_320_1 0x1960
+#define PCI_SUBSYS_ID_MEGARAID_SCSI_320_1 0x0520
+
+#define PCI_DEVICE_ID_MEGARAID_SCSI_320_2 0x1960
+#define PCI_SUBSYS_ID_MEGARAID_SCSI_320_2 0x0518
+
+#define PCI_DEVICE_ID_MEGARAID_I4_133_RAID 0x1960
+#define PCI_SUBSYS_ID_MEGARAID_I4_133_RAID 0x0522
+
+#define PCI_DEVICE_ID_MEGARAID_SATA_150_4 0x1960
+#define PCI_SUBSYS_ID_MEGARAID_SATA_150_4 0x4523
+
+#define PCI_DEVICE_ID_MEGARAID_SATA_150_6 0x1960
+#define PCI_SUBSYS_ID_MEGARAID_SATA_150_6 0x0523
+
+#define PCI_DEVICE_ID_LINDSAY 0x0409
+
+#define PCI_DEVICE_ID_INTEL_RAID_SRCS16 0x1960
+#define PCI_SUBSYS_ID_INTEL_RAID_SRCS16 0x0523
+
+#define PCI_DEVICE_ID_INTEL_RAID_SRCU41L_LAKE_SHETEK 0x1960
+#define PCI_SUBSYS_ID_INTEL_RAID_SRCU41L_LAKE_SHETEK 0x0520
+
+#define PCI_SUBSYS_ID_PERC3_QC 0x0471
+#define PCI_SUBSYS_ID_PERC3_DC 0x0493
+#define PCI_SUBSYS_ID_PERC3_SC 0x0475
+#define PCI_SUBSYS_ID_CERC_ATA100_4CH 0x0511
+
+
+#define MBOX_MAX_SCSI_CMDS 128 // number of cmds reserved for kernel
+#define MBOX_MAX_USER_CMDS 32 // number of cmds for applications
+#define MBOX_DEF_CMD_PER_LUN 64 // default commands per lun
+#define MBOX_DEFAULT_SG_SIZE 26 // default sg size supported by all fw
+#define MBOX_MAX_SG_SIZE 32 // maximum scatter-gather list size
+#define MBOX_MAX_SECTORS 128 // maximum sectors per IO
+#define MBOX_TIMEOUT 30 // timeout value for internal cmds
+#define MBOX_BUSY_WAIT 10 // max usec to wait for busy mailbox
+#define MBOX_RESET_WAIT 180 // wait these many seconds in reset
+#define MBOX_RESET_EXT_WAIT 120 // extended wait reset
+#define MBOX_SYNC_WAIT_CNT 0xFFFF // wait loop index for synchronous mode
+
+#define MBOX_SYNC_DELAY_200 200 // 200 micro-seconds
+
+/*
+ * maximum transfer that can happen through the firmware commands issued
+ * internnaly from the driver.
+ */
+#define MBOX_IBUF_SIZE 4096
+
+
+/**
+ * mbox_ccb_t - command control block specific to mailbox based controllers
+ * @raw_mbox : raw mailbox pointer
+ * @mbox : mailbox
+ * @mbox64 : extended mailbox
+ * @mbox_dma_h : maibox dma address
+ * @sgl64 : 64-bit scatter-gather list
+ * @sgl32 : 32-bit scatter-gather list
+ * @sgl_dma_h : dma handle for the scatter-gather list
+ * @pthru : passthru structure
+ * @pthru_dma_h : dma handle for the passthru structure
+ * @epthru : extended passthru structure
+ * @epthru_dma_h : dma handle for extended passthru structure
+ * @buf_dma_h : dma handle for buffers w/o sg list
+ *
+ * command control block specific to the mailbox based controllers
+ */
+typedef struct {
+ uint8_t *raw_mbox;
+ mbox_t *mbox;
+ mbox64_t *mbox64;
+ dma_addr_t mbox_dma_h;
+ mbox_sgl64 *sgl64;
+ mbox_sgl32 *sgl32;
+ dma_addr_t sgl_dma_h;
+ mraid_passthru_t *pthru;
+ dma_addr_t pthru_dma_h;
+ mraid_epassthru_t *epthru;
+ dma_addr_t epthru_dma_h;
+ dma_addr_t buf_dma_h;
+} mbox_ccb_t;
+
+
+/**
+ * mraid_device_t - adapter soft state structure for mailbox controllers
+ * @una_mbox64 : 64-bit mbox - unaligned
+ * @una_mbox64_dma : mbox dma addr - unaligned
+ * @mbox : 32-bit mbox - aligned
+ * @mbox64 : 64-bit mbox - aligned
+ * @mbox_dma : mbox dma addr - aligned
+ * @mailbox_lock : exclusion lock for the mailbox
+ * @baseport : base port of hba memory
+ * @baseaddr : mapped addr of hba memory
+ * @mbox_pool : pool of mailboxes
+ * @mbox_pool_handle : handle for the mailbox pool memory
+ * @epthru_pool : a pool for extended passthru commands
+ * @epthru_pool_handle : handle to the pool above
+ * @sg_pool : pool of scatter-gather lists for this driver
+ * @sg_pool_handle : handle to the pool above
+ * @ccb_list : list of our command control blocks
+ * @uccb_list : list of cmd control blocks for mgmt module
+ * @umbox64 : array of mailbox for user commands (cmm)
+ * @pdrv_state : array for state of each physical drive.
+ * @last_disp : flag used to show device scanning
+ * @hw_error : set if FW not responding
+ * @fast_load : If set, skip physical device scanning
+ * @channel_class : channel class, RAID or SCSI
+ * @sysfs_mtx : mutex to serialize access to sysfs res.
+ * @sysfs_uioc : management packet to issue FW calls from sysfs
+ * @sysfs_mbox64 : mailbox packet to issue FW calls from sysfs
+ * @sysfs_buffer : data buffer for FW commands issued from sysfs
+ * @sysfs_buffer_dma : DMA buffer for FW commands issued from sysfs
+ * @sysfs_wait_q : wait queue for sysfs operations
+ * @random_del_supported : set if the random deletion is supported
+ * @curr_ldmap : current LDID map
+ *
+ * Initialization structure for mailbox controllers: memory based and IO based
+ * All the fields in this structure are LLD specific and may be discovered at
+ * init() or start() time.
+ *
+ * NOTE: The fields of this structures are placed to minimize cache misses
+ */
+#define MAX_LD_EXTENDED64 64
+typedef struct {
+ mbox64_t *una_mbox64;
+ dma_addr_t una_mbox64_dma;
+ mbox_t *mbox;
+ mbox64_t *mbox64;
+ dma_addr_t mbox_dma;
+ spinlock_t mailbox_lock;
+ unsigned long baseport;
+ void __iomem * baseaddr;
+ struct mraid_pci_blk mbox_pool[MBOX_MAX_SCSI_CMDS];
+ struct dma_pool *mbox_pool_handle;
+ struct mraid_pci_blk epthru_pool[MBOX_MAX_SCSI_CMDS];
+ struct dma_pool *epthru_pool_handle;
+ struct mraid_pci_blk sg_pool[MBOX_MAX_SCSI_CMDS];
+ struct dma_pool *sg_pool_handle;
+ mbox_ccb_t ccb_list[MBOX_MAX_SCSI_CMDS];
+ mbox_ccb_t uccb_list[MBOX_MAX_USER_CMDS];
+ mbox64_t umbox64[MBOX_MAX_USER_CMDS];
+
+ uint8_t pdrv_state[MBOX_MAX_PHYSICAL_DRIVES];
+ uint32_t last_disp;
+ int hw_error;
+ int fast_load;
+ uint8_t channel_class;
+ struct mutex sysfs_mtx;
+ uioc_t *sysfs_uioc;
+ mbox64_t *sysfs_mbox64;
+ caddr_t sysfs_buffer;
+ dma_addr_t sysfs_buffer_dma;
+ wait_queue_head_t sysfs_wait_q;
+ int random_del_supported;
+ uint16_t curr_ldmap[MAX_LD_EXTENDED64];
+} mraid_device_t;
+
+// route to raid device from adapter
+#define ADAP2RAIDDEV(adp) ((mraid_device_t *)((adp)->raid_device))
+
+#define MAILBOX_LOCK(rdev) (&(rdev)->mailbox_lock)
+
+// Find out if this channel is a RAID or SCSI
+#define IS_RAID_CH(rdev, ch) (((rdev)->channel_class >> (ch)) & 0x01)
+
+
+#define RDINDOOR(rdev) readl((rdev)->baseaddr + 0x20)
+#define RDOUTDOOR(rdev) readl((rdev)->baseaddr + 0x2C)
+#define WRINDOOR(rdev, value) writel(value, (rdev)->baseaddr + 0x20)
+#define WROUTDOOR(rdev, value) writel(value, (rdev)->baseaddr + 0x2C)
+
+#endif // _MEGARAID_H_
+
+// vim: set ts=8 sw=8 tw=78:
diff --git a/drivers/scsi/megaraid/megaraid_mm.c b/drivers/scsi/megaraid/megaraid_mm.c
new file mode 100644
index 000000000..a70692779
--- /dev/null
+++ b/drivers/scsi/megaraid/megaraid_mm.c
@@ -0,0 +1,1263 @@
+/*
+ *
+ * Linux MegaRAID device driver
+ *
+ * Copyright (c) 2003-2004 LSI Logic Corporation.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ *
+ * FILE : megaraid_mm.c
+ * Version : v2.20.2.7 (Jul 16 2006)
+ *
+ * Common management module
+ */
+#include <linux/sched.h>
+#include <linux/slab.h>
+#include <linux/mutex.h>
+#include "megaraid_mm.h"
+
+
+// Entry points for char node driver
+static DEFINE_MUTEX(mraid_mm_mutex);
+static int mraid_mm_open(struct inode *, struct file *);
+static long mraid_mm_unlocked_ioctl(struct file *, uint, unsigned long);
+
+
+// routines to convert to and from the old the format
+static int mimd_to_kioc(mimd_t __user *, mraid_mmadp_t *, uioc_t *);
+static int kioc_to_mimd(uioc_t *, mimd_t __user *);
+
+
+// Helper functions
+static int handle_drvrcmd(void __user *, uint8_t, int *);
+static int lld_ioctl(mraid_mmadp_t *, uioc_t *);
+static void ioctl_done(uioc_t *);
+static void lld_timedout(unsigned long);
+static void hinfo_to_cinfo(mraid_hba_info_t *, mcontroller_t *);
+static mraid_mmadp_t *mraid_mm_get_adapter(mimd_t __user *, int *);
+static uioc_t *mraid_mm_alloc_kioc(mraid_mmadp_t *);
+static void mraid_mm_dealloc_kioc(mraid_mmadp_t *, uioc_t *);
+static int mraid_mm_attach_buf(mraid_mmadp_t *, uioc_t *, int);
+static int mraid_mm_setup_dma_pools(mraid_mmadp_t *);
+static void mraid_mm_free_adp_resources(mraid_mmadp_t *);
+static void mraid_mm_teardown_dma_pools(mraid_mmadp_t *);
+
+#ifdef CONFIG_COMPAT
+static long mraid_mm_compat_ioctl(struct file *, unsigned int, unsigned long);
+#endif
+
+MODULE_AUTHOR("LSI Logic Corporation");
+MODULE_DESCRIPTION("LSI Logic Management Module");
+MODULE_LICENSE("GPL");
+MODULE_VERSION(LSI_COMMON_MOD_VERSION);
+
+static int dbglevel = CL_ANN;
+module_param_named(dlevel, dbglevel, int, 0);
+MODULE_PARM_DESC(dlevel, "Debug level (default=0)");
+
+EXPORT_SYMBOL(mraid_mm_register_adp);
+EXPORT_SYMBOL(mraid_mm_unregister_adp);
+EXPORT_SYMBOL(mraid_mm_adapter_app_handle);
+
+static uint32_t drvr_ver = 0x02200207;
+
+static int adapters_count_g;
+static struct list_head adapters_list_g;
+
+static wait_queue_head_t wait_q;
+
+static const struct file_operations lsi_fops = {
+ .open = mraid_mm_open,
+ .unlocked_ioctl = mraid_mm_unlocked_ioctl,
+#ifdef CONFIG_COMPAT
+ .compat_ioctl = mraid_mm_compat_ioctl,
+#endif
+ .owner = THIS_MODULE,
+ .llseek = noop_llseek,
+};
+
+static struct miscdevice megaraid_mm_dev = {
+ .minor = MISC_DYNAMIC_MINOR,
+ .name = "megadev0",
+ .fops = &lsi_fops,
+};
+
+/**
+ * mraid_mm_open - open routine for char node interface
+ * @inode : unused
+ * @filep : unused
+ *
+ * Allow ioctl operations by apps only if they have superuser privilege.
+ */
+static int
+mraid_mm_open(struct inode *inode, struct file *filep)
+{
+ /*
+ * Only allow superuser to access private ioctl interface
+ */
+ if (!capable(CAP_SYS_ADMIN)) return (-EACCES);
+
+ return 0;
+}
+
+/**
+ * mraid_mm_ioctl - module entry-point for ioctls
+ * @inode : inode (ignored)
+ * @filep : file operations pointer (ignored)
+ * @cmd : ioctl command
+ * @arg : user ioctl packet
+ */
+static int
+mraid_mm_ioctl(struct file *filep, unsigned int cmd, unsigned long arg)
+{
+ uioc_t *kioc;
+ char signature[EXT_IOCTL_SIGN_SZ] = {0};
+ int rval;
+ mraid_mmadp_t *adp;
+ uint8_t old_ioctl;
+ int drvrcmd_rval;
+ void __user *argp = (void __user *)arg;
+
+ /*
+ * Make sure only USCSICMD are issued through this interface.
+ * MIMD application would still fire different command.
+ */
+
+ if ((_IOC_TYPE(cmd) != MEGAIOC_MAGIC) && (cmd != USCSICMD)) {
+ return (-EINVAL);
+ }
+
+ /*
+ * Look for signature to see if this is the new or old ioctl format.
+ */
+ if (copy_from_user(signature, argp, EXT_IOCTL_SIGN_SZ)) {
+ con_log(CL_ANN, (KERN_WARNING
+ "megaraid cmm: copy from usr addr failed\n"));
+ return (-EFAULT);
+ }
+
+ if (memcmp(signature, EXT_IOCTL_SIGN, EXT_IOCTL_SIGN_SZ) == 0)
+ old_ioctl = 0;
+ else
+ old_ioctl = 1;
+
+ /*
+ * At present, we don't support the new ioctl packet
+ */
+ if (!old_ioctl )
+ return (-EINVAL);
+
+ /*
+ * If it is a driver ioctl (as opposed to fw ioctls), then we can
+ * handle the command locally. rval > 0 means it is not a drvr cmd
+ */
+ rval = handle_drvrcmd(argp, old_ioctl, &drvrcmd_rval);
+
+ if (rval < 0)
+ return rval;
+ else if (rval == 0)
+ return drvrcmd_rval;
+
+ rval = 0;
+ if ((adp = mraid_mm_get_adapter(argp, &rval)) == NULL) {
+ return rval;
+ }
+
+ /*
+ * Check if adapter can accept ioctl. We may have marked it offline
+ * if any previous kioc had timedout on this controller.
+ */
+ if (!adp->quiescent) {
+ con_log(CL_ANN, (KERN_WARNING
+ "megaraid cmm: controller cannot accept cmds due to "
+ "earlier errors\n" ));
+ return -EFAULT;
+ }
+
+ /*
+ * The following call will block till a kioc is available
+ */
+ kioc = mraid_mm_alloc_kioc(adp);
+
+ /*
+ * User sent the old mimd_t ioctl packet. Convert it to uioc_t.
+ */
+ if ((rval = mimd_to_kioc(argp, adp, kioc))) {
+ mraid_mm_dealloc_kioc(adp, kioc);
+ return rval;
+ }
+
+ kioc->done = ioctl_done;
+
+ /*
+ * Issue the IOCTL to the low level driver. After the IOCTL completes
+ * release the kioc if and only if it was _not_ timedout. If it was
+ * timedout, that means that resources are still with low level driver.
+ */
+ if ((rval = lld_ioctl(adp, kioc))) {
+
+ if (!kioc->timedout)
+ mraid_mm_dealloc_kioc(adp, kioc);
+
+ return rval;
+ }
+
+ /*
+ * Convert the kioc back to user space
+ */
+ rval = kioc_to_mimd(kioc, argp);
+
+ /*
+ * Return the kioc to free pool
+ */
+ mraid_mm_dealloc_kioc(adp, kioc);
+
+ return rval;
+}
+
+static long
+mraid_mm_unlocked_ioctl(struct file *filep, unsigned int cmd,
+ unsigned long arg)
+{
+ int err;
+
+ /* inconsistent: mraid_mm_compat_ioctl doesn't take the BKL */
+ mutex_lock(&mraid_mm_mutex);
+ err = mraid_mm_ioctl(filep, cmd, arg);
+ mutex_unlock(&mraid_mm_mutex);
+
+ return err;
+}
+
+/**
+ * mraid_mm_get_adapter - Returns corresponding adapters for the mimd packet
+ * @umimd : User space mimd_t ioctl packet
+ * @rval : returned success/error status
+ *
+ * The function return value is a pointer to the located @adapter.
+ */
+static mraid_mmadp_t *
+mraid_mm_get_adapter(mimd_t __user *umimd, int *rval)
+{
+ mraid_mmadp_t *adapter;
+ mimd_t mimd;
+ uint32_t adapno;
+ int iterator;
+
+
+ if (copy_from_user(&mimd, umimd, sizeof(mimd_t))) {
+ *rval = -EFAULT;
+ return NULL;
+ }
+
+ adapno = GETADAP(mimd.ui.fcs.adapno);
+
+ if (adapno >= adapters_count_g) {
+ *rval = -ENODEV;
+ return NULL;
+ }
+
+ adapter = NULL;
+ iterator = 0;
+
+ list_for_each_entry(adapter, &adapters_list_g, list) {
+ if (iterator++ == adapno) break;
+ }
+
+ if (!adapter) {
+ *rval = -ENODEV;
+ return NULL;
+ }
+
+ return adapter;
+}
+
+/**
+ * handle_drvrcmd - Checks if the opcode is a driver cmd and if it is, handles it.
+ * @arg : packet sent by the user app
+ * @old_ioctl : mimd if 1; uioc otherwise
+ * @rval : pointer for command's returned value (not function status)
+ */
+static int
+handle_drvrcmd(void __user *arg, uint8_t old_ioctl, int *rval)
+{
+ mimd_t __user *umimd;
+ mimd_t kmimd;
+ uint8_t opcode;
+ uint8_t subopcode;
+
+ if (old_ioctl)
+ goto old_packet;
+ else
+ goto new_packet;
+
+new_packet:
+ return (-ENOTSUPP);
+
+old_packet:
+ *rval = 0;
+ umimd = arg;
+
+ if (copy_from_user(&kmimd, umimd, sizeof(mimd_t)))
+ return (-EFAULT);
+
+ opcode = kmimd.ui.fcs.opcode;
+ subopcode = kmimd.ui.fcs.subopcode;
+
+ /*
+ * If the opcode is 0x82 and the subopcode is either GET_DRVRVER or
+ * GET_NUMADP, then we can handle. Otherwise we should return 1 to
+ * indicate that we cannot handle this.
+ */
+ if (opcode != 0x82)
+ return 1;
+
+ switch (subopcode) {
+
+ case MEGAIOC_QDRVRVER:
+
+ if (copy_to_user(kmimd.data, &drvr_ver, sizeof(uint32_t)))
+ return (-EFAULT);
+
+ return 0;
+
+ case MEGAIOC_QNADAP:
+
+ *rval = adapters_count_g;
+
+ if (copy_to_user(kmimd.data, &adapters_count_g,
+ sizeof(uint32_t)))
+ return (-EFAULT);
+
+ return 0;
+
+ default:
+ /* cannot handle */
+ return 1;
+ }
+
+ return 0;
+}
+
+
+/**
+ * mimd_to_kioc - Converter from old to new ioctl format
+ * @umimd : user space old MIMD IOCTL
+ * @adp : adapter softstate
+ * @kioc : kernel space new format IOCTL
+ *
+ * Routine to convert MIMD interface IOCTL to new interface IOCTL packet. The
+ * new packet is in kernel space so that driver can perform operations on it
+ * freely.
+ */
+
+static int
+mimd_to_kioc(mimd_t __user *umimd, mraid_mmadp_t *adp, uioc_t *kioc)
+{
+ mbox64_t *mbox64;
+ mbox_t *mbox;
+ mraid_passthru_t *pthru32;
+ uint32_t adapno;
+ uint8_t opcode;
+ uint8_t subopcode;
+ mimd_t mimd;
+
+ if (copy_from_user(&mimd, umimd, sizeof(mimd_t)))
+ return (-EFAULT);
+
+ /*
+ * Applications are not allowed to send extd pthru
+ */
+ if ((mimd.mbox[0] == MBOXCMD_PASSTHRU64) ||
+ (mimd.mbox[0] == MBOXCMD_EXTPTHRU))
+ return (-EINVAL);
+
+ opcode = mimd.ui.fcs.opcode;
+ subopcode = mimd.ui.fcs.subopcode;
+ adapno = GETADAP(mimd.ui.fcs.adapno);
+
+ if (adapno >= adapters_count_g)
+ return (-ENODEV);
+
+ kioc->adapno = adapno;
+ kioc->mb_type = MBOX_LEGACY;
+ kioc->app_type = APPTYPE_MIMD;
+
+ switch (opcode) {
+
+ case 0x82:
+
+ if (subopcode == MEGAIOC_QADAPINFO) {
+
+ kioc->opcode = GET_ADAP_INFO;
+ kioc->data_dir = UIOC_RD;
+ kioc->xferlen = sizeof(mraid_hba_info_t);
+
+ if (mraid_mm_attach_buf(adp, kioc, kioc->xferlen))
+ return (-ENOMEM);
+ }
+ else {
+ con_log(CL_ANN, (KERN_WARNING
+ "megaraid cmm: Invalid subop\n"));
+ return (-EINVAL);
+ }
+
+ break;
+
+ case 0x81:
+
+ kioc->opcode = MBOX_CMD;
+ kioc->xferlen = mimd.ui.fcs.length;
+ kioc->user_data_len = kioc->xferlen;
+ kioc->user_data = mimd.ui.fcs.buffer;
+
+ if (mraid_mm_attach_buf(adp, kioc, kioc->xferlen))
+ return (-ENOMEM);
+
+ if (mimd.outlen) kioc->data_dir = UIOC_RD;
+ if (mimd.inlen) kioc->data_dir |= UIOC_WR;
+
+ break;
+
+ case 0x80:
+
+ kioc->opcode = MBOX_CMD;
+ kioc->xferlen = (mimd.outlen > mimd.inlen) ?
+ mimd.outlen : mimd.inlen;
+ kioc->user_data_len = kioc->xferlen;
+ kioc->user_data = mimd.data;
+
+ if (mraid_mm_attach_buf(adp, kioc, kioc->xferlen))
+ return (-ENOMEM);
+
+ if (mimd.outlen) kioc->data_dir = UIOC_RD;
+ if (mimd.inlen) kioc->data_dir |= UIOC_WR;
+
+ break;
+
+ default:
+ return (-EINVAL);
+ }
+
+ /*
+ * If driver command, nothing else to do
+ */
+ if (opcode == 0x82)
+ return 0;
+
+ /*
+ * This is a mailbox cmd; copy the mailbox from mimd
+ */
+ mbox64 = (mbox64_t *)((unsigned long)kioc->cmdbuf);
+ mbox = &mbox64->mbox32;
+ memcpy(mbox, mimd.mbox, 14);
+
+ if (mbox->cmd != MBOXCMD_PASSTHRU) { // regular DCMD
+
+ mbox->xferaddr = (uint32_t)kioc->buf_paddr;
+
+ if (kioc->data_dir & UIOC_WR) {
+ if (copy_from_user(kioc->buf_vaddr, kioc->user_data,
+ kioc->xferlen)) {
+ return (-EFAULT);
+ }
+ }
+
+ return 0;
+ }
+
+ /*
+ * This is a regular 32-bit pthru cmd; mbox points to pthru struct.
+ * Just like in above case, the beginning for memblk is treated as
+ * a mailbox. The passthru will begin at next 1K boundary. And the
+ * data will start 1K after that.
+ */
+ pthru32 = kioc->pthru32;
+ kioc->user_pthru = &umimd->pthru;
+ mbox->xferaddr = (uint32_t)kioc->pthru32_h;
+
+ if (copy_from_user(pthru32, kioc->user_pthru,
+ sizeof(mraid_passthru_t))) {
+ return (-EFAULT);
+ }
+
+ pthru32->dataxferaddr = kioc->buf_paddr;
+ if (kioc->data_dir & UIOC_WR) {
+ if (pthru32->dataxferlen > kioc->xferlen)
+ return -EINVAL;
+ if (copy_from_user(kioc->buf_vaddr, kioc->user_data,
+ pthru32->dataxferlen)) {
+ return (-EFAULT);
+ }
+ }
+
+ return 0;
+}
+
+/**
+ * mraid_mm_attch_buf - Attach a free dma buffer for required size
+ * @adp : Adapter softstate
+ * @kioc : kioc that the buffer needs to be attached to
+ * @xferlen : required length for buffer
+ *
+ * First we search for a pool with smallest buffer that is >= @xferlen. If
+ * that pool has no free buffer, we will try for the next bigger size. If none
+ * is available, we will try to allocate the smallest buffer that is >=
+ * @xferlen and attach it the pool.
+ */
+static int
+mraid_mm_attach_buf(mraid_mmadp_t *adp, uioc_t *kioc, int xferlen)
+{
+ mm_dmapool_t *pool;
+ int right_pool = -1;
+ unsigned long flags;
+ int i;
+
+ kioc->pool_index = -1;
+ kioc->buf_vaddr = NULL;
+ kioc->buf_paddr = 0;
+ kioc->free_buf = 0;
+
+ /*
+ * We need xferlen amount of memory. See if we can get it from our
+ * dma pools. If we don't get exact size, we will try bigger buffer
+ */
+
+ for (i = 0; i < MAX_DMA_POOLS; i++) {
+
+ pool = &adp->dma_pool_list[i];
+
+ if (xferlen > pool->buf_size)
+ continue;
+
+ if (right_pool == -1)
+ right_pool = i;
+
+ spin_lock_irqsave(&pool->lock, flags);
+
+ if (!pool->in_use) {
+
+ pool->in_use = 1;
+ kioc->pool_index = i;
+ kioc->buf_vaddr = pool->vaddr;
+ kioc->buf_paddr = pool->paddr;
+
+ spin_unlock_irqrestore(&pool->lock, flags);
+ return 0;
+ }
+ else {
+ spin_unlock_irqrestore(&pool->lock, flags);
+ continue;
+ }
+ }
+
+ /*
+ * If xferlen doesn't match any of our pools, return error
+ */
+ if (right_pool == -1)
+ return -EINVAL;
+
+ /*
+ * We did not get any buffer from the preallocated pool. Let us try
+ * to allocate one new buffer. NOTE: This is a blocking call.
+ */
+ pool = &adp->dma_pool_list[right_pool];
+
+ spin_lock_irqsave(&pool->lock, flags);
+
+ kioc->pool_index = right_pool;
+ kioc->free_buf = 1;
+ kioc->buf_vaddr = pci_pool_alloc(pool->handle, GFP_KERNEL,
+ &kioc->buf_paddr);
+ spin_unlock_irqrestore(&pool->lock, flags);
+
+ if (!kioc->buf_vaddr)
+ return -ENOMEM;
+
+ return 0;
+}
+
+/**
+ * mraid_mm_alloc_kioc - Returns a uioc_t from free list
+ * @adp : Adapter softstate for this module
+ *
+ * The kioc_semaphore is initialized with number of kioc nodes in the
+ * free kioc pool. If the kioc pool is empty, this function blocks till
+ * a kioc becomes free.
+ */
+static uioc_t *
+mraid_mm_alloc_kioc(mraid_mmadp_t *adp)
+{
+ uioc_t *kioc;
+ struct list_head* head;
+ unsigned long flags;
+
+ down(&adp->kioc_semaphore);
+
+ spin_lock_irqsave(&adp->kioc_pool_lock, flags);
+
+ head = &adp->kioc_pool;
+
+ if (list_empty(head)) {
+ up(&adp->kioc_semaphore);
+ spin_unlock_irqrestore(&adp->kioc_pool_lock, flags);
+
+ con_log(CL_ANN, ("megaraid cmm: kioc list empty!\n"));
+ return NULL;
+ }
+
+ kioc = list_entry(head->next, uioc_t, list);
+ list_del_init(&kioc->list);
+
+ spin_unlock_irqrestore(&adp->kioc_pool_lock, flags);
+
+ memset((caddr_t)(unsigned long)kioc->cmdbuf, 0, sizeof(mbox64_t));
+ memset((caddr_t) kioc->pthru32, 0, sizeof(mraid_passthru_t));
+
+ kioc->buf_vaddr = NULL;
+ kioc->buf_paddr = 0;
+ kioc->pool_index =-1;
+ kioc->free_buf = 0;
+ kioc->user_data = NULL;
+ kioc->user_data_len = 0;
+ kioc->user_pthru = NULL;
+ kioc->timedout = 0;
+
+ return kioc;
+}
+
+/**
+ * mraid_mm_dealloc_kioc - Return kioc to free pool
+ * @adp : Adapter softstate
+ * @kioc : uioc_t node to be returned to free pool
+ */
+static void
+mraid_mm_dealloc_kioc(mraid_mmadp_t *adp, uioc_t *kioc)
+{
+ mm_dmapool_t *pool;
+ unsigned long flags;
+
+ if (kioc->pool_index != -1) {
+ pool = &adp->dma_pool_list[kioc->pool_index];
+
+ /* This routine may be called in non-isr context also */
+ spin_lock_irqsave(&pool->lock, flags);
+
+ /*
+ * While attaching the dma buffer, if we didn't get the
+ * required buffer from the pool, we would have allocated
+ * it at the run time and set the free_buf flag. We must
+ * free that buffer. Otherwise, just mark that the buffer is
+ * not in use
+ */
+ if (kioc->free_buf == 1)
+ pci_pool_free(pool->handle, kioc->buf_vaddr,
+ kioc->buf_paddr);
+ else
+ pool->in_use = 0;
+
+ spin_unlock_irqrestore(&pool->lock, flags);
+ }
+
+ /* Return the kioc to the free pool */
+ spin_lock_irqsave(&adp->kioc_pool_lock, flags);
+ list_add(&kioc->list, &adp->kioc_pool);
+ spin_unlock_irqrestore(&adp->kioc_pool_lock, flags);
+
+ /* increment the free kioc count */
+ up(&adp->kioc_semaphore);
+
+ return;
+}
+
+/**
+ * lld_ioctl - Routine to issue ioctl to low level drvr
+ * @adp : The adapter handle
+ * @kioc : The ioctl packet with kernel addresses
+ */
+static int
+lld_ioctl(mraid_mmadp_t *adp, uioc_t *kioc)
+{
+ int rval;
+ struct timer_list timer;
+ struct timer_list *tp = NULL;
+
+ kioc->status = -ENODATA;
+ rval = adp->issue_uioc(adp->drvr_data, kioc, IOCTL_ISSUE);
+
+ if (rval) return rval;
+
+ /*
+ * Start the timer
+ */
+ if (adp->timeout > 0) {
+ tp = &timer;
+ init_timer(tp);
+
+ tp->function = lld_timedout;
+ tp->data = (unsigned long)kioc;
+ tp->expires = jiffies + adp->timeout * HZ;
+
+ add_timer(tp);
+ }
+
+ /*
+ * Wait till the low level driver completes the ioctl. After this
+ * call, the ioctl either completed successfully or timedout.
+ */
+ wait_event(wait_q, (kioc->status != -ENODATA));
+ if (tp) {
+ del_timer_sync(tp);
+ }
+
+ /*
+ * If the command had timedout, we mark the controller offline
+ * before returning
+ */
+ if (kioc->timedout) {
+ adp->quiescent = 0;
+ }
+
+ return kioc->status;
+}
+
+
+/**
+ * ioctl_done - callback from the low level driver
+ * @kioc : completed ioctl packet
+ */
+static void
+ioctl_done(uioc_t *kioc)
+{
+ uint32_t adapno;
+ int iterator;
+ mraid_mmadp_t* adapter;
+
+ /*
+ * When the kioc returns from driver, make sure it still doesn't
+ * have ENODATA in status. Otherwise, driver will hang on wait_event
+ * forever
+ */
+ if (kioc->status == -ENODATA) {
+ con_log(CL_ANN, (KERN_WARNING
+ "megaraid cmm: lld didn't change status!\n"));
+
+ kioc->status = -EINVAL;
+ }
+
+ /*
+ * Check if this kioc was timedout before. If so, nobody is waiting
+ * on this kioc. We don't have to wake up anybody. Instead, we just
+ * have to free the kioc
+ */
+ if (kioc->timedout) {
+ iterator = 0;
+ adapter = NULL;
+ adapno = kioc->adapno;
+
+ con_log(CL_ANN, ( KERN_WARNING "megaraid cmm: completed "
+ "ioctl that was timedout before\n"));
+
+ list_for_each_entry(adapter, &adapters_list_g, list) {
+ if (iterator++ == adapno) break;
+ }
+
+ kioc->timedout = 0;
+
+ if (adapter) {
+ mraid_mm_dealloc_kioc( adapter, kioc );
+ }
+ }
+ else {
+ wake_up(&wait_q);
+ }
+}
+
+
+/**
+ * lld_timedout - callback from the expired timer
+ * @ptr : ioctl packet that timed out
+ */
+static void
+lld_timedout(unsigned long ptr)
+{
+ uioc_t *kioc = (uioc_t *)ptr;
+
+ kioc->status = -ETIME;
+ kioc->timedout = 1;
+
+ con_log(CL_ANN, (KERN_WARNING "megaraid cmm: ioctl timed out\n"));
+
+ wake_up(&wait_q);
+}
+
+
+/**
+ * kioc_to_mimd - Converter from new back to old format
+ * @kioc : Kernel space IOCTL packet (successfully issued)
+ * @mimd : User space MIMD packet
+ */
+static int
+kioc_to_mimd(uioc_t *kioc, mimd_t __user *mimd)
+{
+ mimd_t kmimd;
+ uint8_t opcode;
+ uint8_t subopcode;
+
+ mbox64_t *mbox64;
+ mraid_passthru_t __user *upthru32;
+ mraid_passthru_t *kpthru32;
+ mcontroller_t cinfo;
+ mraid_hba_info_t *hinfo;
+
+
+ if (copy_from_user(&kmimd, mimd, sizeof(mimd_t)))
+ return (-EFAULT);
+
+ opcode = kmimd.ui.fcs.opcode;
+ subopcode = kmimd.ui.fcs.subopcode;
+
+ if (opcode == 0x82) {
+ switch (subopcode) {
+
+ case MEGAIOC_QADAPINFO:
+
+ hinfo = (mraid_hba_info_t *)(unsigned long)
+ kioc->buf_vaddr;
+
+ hinfo_to_cinfo(hinfo, &cinfo);
+
+ if (copy_to_user(kmimd.data, &cinfo, sizeof(cinfo)))
+ return (-EFAULT);
+
+ return 0;
+
+ default:
+ return (-EINVAL);
+ }
+
+ return 0;
+ }
+
+ mbox64 = (mbox64_t *)(unsigned long)kioc->cmdbuf;
+
+ if (kioc->user_pthru) {
+
+ upthru32 = kioc->user_pthru;
+ kpthru32 = kioc->pthru32;
+
+ if (copy_to_user(&upthru32->scsistatus,
+ &kpthru32->scsistatus,
+ sizeof(uint8_t))) {
+ return (-EFAULT);
+ }
+ }
+
+ if (kioc->user_data) {
+ if (copy_to_user(kioc->user_data, kioc->buf_vaddr,
+ kioc->user_data_len)) {
+ return (-EFAULT);
+ }
+ }
+
+ if (copy_to_user(&mimd->mbox[17],
+ &mbox64->mbox32.status, sizeof(uint8_t))) {
+ return (-EFAULT);
+ }
+
+ return 0;
+}
+
+
+/**
+ * hinfo_to_cinfo - Convert new format hba info into old format
+ * @hinfo : New format, more comprehensive adapter info
+ * @cinfo : Old format adapter info to support mimd_t apps
+ */
+static void
+hinfo_to_cinfo(mraid_hba_info_t *hinfo, mcontroller_t *cinfo)
+{
+ if (!hinfo || !cinfo)
+ return;
+
+ cinfo->base = hinfo->baseport;
+ cinfo->irq = hinfo->irq;
+ cinfo->numldrv = hinfo->num_ldrv;
+ cinfo->pcibus = hinfo->pci_bus;
+ cinfo->pcidev = hinfo->pci_slot;
+ cinfo->pcifun = PCI_FUNC(hinfo->pci_dev_fn);
+ cinfo->pciid = hinfo->pci_device_id;
+ cinfo->pcivendor = hinfo->pci_vendor_id;
+ cinfo->pcislot = hinfo->pci_slot;
+ cinfo->uid = hinfo->unique_id;
+}
+
+
+/**
+ * mraid_mm_register_adp - Registration routine for low level drivers
+ * @lld_adp : Adapter object
+ */
+int
+mraid_mm_register_adp(mraid_mmadp_t *lld_adp)
+{
+ mraid_mmadp_t *adapter;
+ mbox64_t *mbox_list;
+ uioc_t *kioc;
+ uint32_t rval;
+ int i;
+
+
+ if (lld_adp->drvr_type != DRVRTYPE_MBOX)
+ return (-EINVAL);
+
+ adapter = kzalloc(sizeof(mraid_mmadp_t), GFP_KERNEL);
+
+ if (!adapter)
+ return -ENOMEM;
+
+
+ adapter->unique_id = lld_adp->unique_id;
+ adapter->drvr_type = lld_adp->drvr_type;
+ adapter->drvr_data = lld_adp->drvr_data;
+ adapter->pdev = lld_adp->pdev;
+ adapter->issue_uioc = lld_adp->issue_uioc;
+ adapter->timeout = lld_adp->timeout;
+ adapter->max_kioc = lld_adp->max_kioc;
+ adapter->quiescent = 1;
+
+ /*
+ * Allocate single blocks of memory for all required kiocs,
+ * mailboxes and passthru structures.
+ */
+ adapter->kioc_list = kmalloc(sizeof(uioc_t) * lld_adp->max_kioc,
+ GFP_KERNEL);
+ adapter->mbox_list = kmalloc(sizeof(mbox64_t) * lld_adp->max_kioc,
+ GFP_KERNEL);
+ adapter->pthru_dma_pool = pci_pool_create("megaraid mm pthru pool",
+ adapter->pdev,
+ sizeof(mraid_passthru_t),
+ 16, 0);
+
+ if (!adapter->kioc_list || !adapter->mbox_list ||
+ !adapter->pthru_dma_pool) {
+
+ con_log(CL_ANN, (KERN_WARNING
+ "megaraid cmm: out of memory, %s %d\n", __func__,
+ __LINE__));
+
+ rval = (-ENOMEM);
+
+ goto memalloc_error;
+ }
+
+ /*
+ * Slice kioc_list and make a kioc_pool with the individiual kiocs
+ */
+ INIT_LIST_HEAD(&adapter->kioc_pool);
+ spin_lock_init(&adapter->kioc_pool_lock);
+ sema_init(&adapter->kioc_semaphore, lld_adp->max_kioc);
+
+ mbox_list = (mbox64_t *)adapter->mbox_list;
+
+ for (i = 0; i < lld_adp->max_kioc; i++) {
+
+ kioc = adapter->kioc_list + i;
+ kioc->cmdbuf = (uint64_t)(unsigned long)(mbox_list + i);
+ kioc->pthru32 = pci_pool_alloc(adapter->pthru_dma_pool,
+ GFP_KERNEL, &kioc->pthru32_h);
+
+ if (!kioc->pthru32) {
+
+ con_log(CL_ANN, (KERN_WARNING
+ "megaraid cmm: out of memory, %s %d\n",
+ __func__, __LINE__));
+
+ rval = (-ENOMEM);
+
+ goto pthru_dma_pool_error;
+ }
+
+ list_add_tail(&kioc->list, &adapter->kioc_pool);
+ }
+
+ // Setup the dma pools for data buffers
+ if ((rval = mraid_mm_setup_dma_pools(adapter)) != 0) {
+ goto dma_pool_error;
+ }
+
+ list_add_tail(&adapter->list, &adapters_list_g);
+
+ adapters_count_g++;
+
+ return 0;
+
+dma_pool_error:
+ /* Do nothing */
+
+pthru_dma_pool_error:
+
+ for (i = 0; i < lld_adp->max_kioc; i++) {
+ kioc = adapter->kioc_list + i;
+ if (kioc->pthru32) {
+ pci_pool_free(adapter->pthru_dma_pool, kioc->pthru32,
+ kioc->pthru32_h);
+ }
+ }
+
+memalloc_error:
+
+ kfree(adapter->kioc_list);
+ kfree(adapter->mbox_list);
+
+ if (adapter->pthru_dma_pool)
+ pci_pool_destroy(adapter->pthru_dma_pool);
+
+ kfree(adapter);
+
+ return rval;
+}
+
+
+/**
+ * mraid_mm_adapter_app_handle - return the application handle for this adapter
+ * @unique_id : adapter unique identifier
+ *
+ * For the given driver data, locate the adapter in our global list and
+ * return the corresponding handle, which is also used by applications to
+ * uniquely identify an adapter.
+ *
+ * Return adapter handle if found in the list.
+ * Return 0 if adapter could not be located, should never happen though.
+ */
+uint32_t
+mraid_mm_adapter_app_handle(uint32_t unique_id)
+{
+ mraid_mmadp_t *adapter;
+ mraid_mmadp_t *tmp;
+ int index = 0;
+
+ list_for_each_entry_safe(adapter, tmp, &adapters_list_g, list) {
+
+ if (adapter->unique_id == unique_id) {
+
+ return MKADAP(index);
+ }
+
+ index++;
+ }
+
+ return 0;
+}
+
+
+/**
+ * mraid_mm_setup_dma_pools - Set up dma buffer pools per adapter
+ * @adp : Adapter softstate
+ *
+ * We maintain a pool of dma buffers per each adapter. Each pool has one
+ * buffer. E.g, we may have 5 dma pools - one each for 4k, 8k ... 64k buffers.
+ * We have just one 4k buffer in 4k pool, one 8k buffer in 8k pool etc. We
+ * dont' want to waste too much memory by allocating more buffers per each
+ * pool.
+ */
+static int
+mraid_mm_setup_dma_pools(mraid_mmadp_t *adp)
+{
+ mm_dmapool_t *pool;
+ int bufsize;
+ int i;
+
+ /*
+ * Create MAX_DMA_POOLS number of pools
+ */
+ bufsize = MRAID_MM_INIT_BUFF_SIZE;
+
+ for (i = 0; i < MAX_DMA_POOLS; i++){
+
+ pool = &adp->dma_pool_list[i];
+
+ pool->buf_size = bufsize;
+ spin_lock_init(&pool->lock);
+
+ pool->handle = pci_pool_create("megaraid mm data buffer",
+ adp->pdev, bufsize, 16, 0);
+
+ if (!pool->handle) {
+ goto dma_pool_setup_error;
+ }
+
+ pool->vaddr = pci_pool_alloc(pool->handle, GFP_KERNEL,
+ &pool->paddr);
+
+ if (!pool->vaddr)
+ goto dma_pool_setup_error;
+
+ bufsize = bufsize * 2;
+ }
+
+ return 0;
+
+dma_pool_setup_error:
+
+ mraid_mm_teardown_dma_pools(adp);
+ return (-ENOMEM);
+}
+
+
+/**
+ * mraid_mm_unregister_adp - Unregister routine for low level drivers
+ * @unique_id : UID of the adpater
+ *
+ * Assumes no outstanding ioctls to llds.
+ */
+int
+mraid_mm_unregister_adp(uint32_t unique_id)
+{
+ mraid_mmadp_t *adapter;
+ mraid_mmadp_t *tmp;
+
+ list_for_each_entry_safe(adapter, tmp, &adapters_list_g, list) {
+
+
+ if (adapter->unique_id == unique_id) {
+
+ adapters_count_g--;
+
+ list_del_init(&adapter->list);
+
+ mraid_mm_free_adp_resources(adapter);
+
+ kfree(adapter);
+
+ con_log(CL_ANN, (
+ "megaraid cmm: Unregistered one adapter:%#x\n",
+ unique_id));
+
+ return 0;
+ }
+ }
+
+ return (-ENODEV);
+}
+
+/**
+ * mraid_mm_free_adp_resources - Free adapter softstate
+ * @adp : Adapter softstate
+ */
+static void
+mraid_mm_free_adp_resources(mraid_mmadp_t *adp)
+{
+ uioc_t *kioc;
+ int i;
+
+ mraid_mm_teardown_dma_pools(adp);
+
+ for (i = 0; i < adp->max_kioc; i++) {
+
+ kioc = adp->kioc_list + i;
+
+ pci_pool_free(adp->pthru_dma_pool, kioc->pthru32,
+ kioc->pthru32_h);
+ }
+
+ kfree(adp->kioc_list);
+ kfree(adp->mbox_list);
+
+ pci_pool_destroy(adp->pthru_dma_pool);
+
+
+ return;
+}
+
+
+/**
+ * mraid_mm_teardown_dma_pools - Free all per adapter dma buffers
+ * @adp : Adapter softstate
+ */
+static void
+mraid_mm_teardown_dma_pools(mraid_mmadp_t *adp)
+{
+ int i;
+ mm_dmapool_t *pool;
+
+ for (i = 0; i < MAX_DMA_POOLS; i++) {
+
+ pool = &adp->dma_pool_list[i];
+
+ if (pool->handle) {
+
+ if (pool->vaddr)
+ pci_pool_free(pool->handle, pool->vaddr,
+ pool->paddr);
+
+ pci_pool_destroy(pool->handle);
+ pool->handle = NULL;
+ }
+ }
+
+ return;
+}
+
+/**
+ * mraid_mm_init - Module entry point
+ */
+static int __init
+mraid_mm_init(void)
+{
+ int err;
+
+ // Announce the driver version
+ con_log(CL_ANN, (KERN_INFO "megaraid cmm: %s %s\n",
+ LSI_COMMON_MOD_VERSION, LSI_COMMON_MOD_EXT_VERSION));
+
+ err = misc_register(&megaraid_mm_dev);
+ if (err < 0) {
+ con_log(CL_ANN, ("megaraid cmm: cannot register misc device\n"));
+ return err;
+ }
+
+ init_waitqueue_head(&wait_q);
+
+ INIT_LIST_HEAD(&adapters_list_g);
+
+ return 0;
+}
+
+
+#ifdef CONFIG_COMPAT
+/**
+ * mraid_mm_compat_ioctl - 32bit to 64bit ioctl conversion routine
+ * @filep : file operations pointer (ignored)
+ * @cmd : ioctl command
+ * @arg : user ioctl packet
+ */
+static long
+mraid_mm_compat_ioctl(struct file *filep, unsigned int cmd,
+ unsigned long arg)
+{
+ int err;
+
+ err = mraid_mm_ioctl(filep, cmd, arg);
+
+ return err;
+}
+#endif
+
+/**
+ * mraid_mm_exit - Module exit point
+ */
+static void __exit
+mraid_mm_exit(void)
+{
+ con_log(CL_DLEVEL1 , ("exiting common mod\n"));
+
+ misc_deregister(&megaraid_mm_dev);
+}
+
+module_init(mraid_mm_init);
+module_exit(mraid_mm_exit);
+
+/* vi: set ts=8 sw=8 tw=78: */
diff --git a/drivers/scsi/megaraid/megaraid_mm.h b/drivers/scsi/megaraid/megaraid_mm.h
new file mode 100644
index 000000000..55b425c0a
--- /dev/null
+++ b/drivers/scsi/megaraid/megaraid_mm.h
@@ -0,0 +1,101 @@
+/*
+ *
+ * Linux MegaRAID device driver
+ *
+ * Copyright (c) 2003-2004 LSI Logic Corporation.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ *
+ * FILE : megaraid_mm.h
+ */
+
+#ifndef MEGARAID_MM_H
+#define MEGARAID_MM_H
+
+#include <linux/spinlock.h>
+#include <linux/fs.h>
+#include <asm/uaccess.h>
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/pci.h>
+#include <linux/list.h>
+#include <linux/miscdevice.h>
+
+#include "mbox_defs.h"
+#include "megaraid_ioctl.h"
+
+
+#define LSI_COMMON_MOD_VERSION "2.20.2.7"
+#define LSI_COMMON_MOD_EXT_VERSION \
+ "(Release Date: Sun Jul 16 00:01:03 EST 2006)"
+
+
+#define LSI_DBGLVL dbglevel
+
+// The smallest dma pool
+#define MRAID_MM_INIT_BUFF_SIZE 4096
+
+/**
+ * mimd_t : Old style ioctl packet structure (deprecated)
+ *
+ * @inlen :
+ * @outlen :
+ * @fca :
+ * @opcode :
+ * @subopcode :
+ * @adapno :
+ * @buffer :
+ * @pad :
+ * @length :
+ * @mbox :
+ * @pthru :
+ * @data :
+ * @pad :
+ *
+ * Note : This structure is DEPRECATED. New applications must use
+ * : uioc_t structure instead. All new hba drivers use the new
+ * : format. If we get this mimd packet, we will convert it into
+ * : new uioc_t format and send it to the hba drivers.
+ */
+
+typedef struct mimd {
+
+ uint32_t inlen;
+ uint32_t outlen;
+
+ union {
+ uint8_t fca[16];
+ struct {
+ uint8_t opcode;
+ uint8_t subopcode;
+ uint16_t adapno;
+#if BITS_PER_LONG == 32
+ uint8_t __user *buffer;
+ uint8_t pad[4];
+#endif
+#if BITS_PER_LONG == 64
+ uint8_t __user *buffer;
+#endif
+ uint32_t length;
+ } __attribute__ ((packed)) fcs;
+ } __attribute__ ((packed)) ui;
+
+ uint8_t mbox[18]; /* 16 bytes + 2 status bytes */
+ mraid_passthru_t pthru;
+
+#if BITS_PER_LONG == 32
+ char __user *data; /* buffer <= 4096 for 0x80 commands */
+ char pad[4];
+#endif
+#if BITS_PER_LONG == 64
+ char __user *data;
+#endif
+
+} __attribute__ ((packed))mimd_t;
+
+#endif // MEGARAID_MM_H
+
+// vi: set ts=8 sw=8 tw=78:
diff --git a/drivers/scsi/megaraid/megaraid_sas.h b/drivers/scsi/megaraid/megaraid_sas.h
new file mode 100644
index 000000000..14e5c7cea
--- /dev/null
+++ b/drivers/scsi/megaraid/megaraid_sas.h
@@ -0,0 +1,1990 @@
+/*
+ * Linux MegaRAID driver for SAS based RAID controllers
+ *
+ * Copyright (c) 2003-2013 LSI Corporation
+ * Copyright (c) 2013-2014 Avago Technologies
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ *
+ * FILE: megaraid_sas.h
+ *
+ * Authors: Avago Technologies
+ * Kashyap Desai <kashyap.desai@avagotech.com>
+ * Sumit Saxena <sumit.saxena@avagotech.com>
+ *
+ * Send feedback to: megaraidlinux.pdl@avagotech.com
+ *
+ * Mail to: Avago Technologies, 350 West Trimble Road, Building 90,
+ * San Jose, California 95131
+ */
+
+#ifndef LSI_MEGARAID_SAS_H
+#define LSI_MEGARAID_SAS_H
+
+/*
+ * MegaRAID SAS Driver meta data
+ */
+#define MEGASAS_VERSION "06.806.08.00-rc1"
+
+/*
+ * Device IDs
+ */
+#define PCI_DEVICE_ID_LSI_SAS1078R 0x0060
+#define PCI_DEVICE_ID_LSI_SAS1078DE 0x007C
+#define PCI_DEVICE_ID_LSI_VERDE_ZCR 0x0413
+#define PCI_DEVICE_ID_LSI_SAS1078GEN2 0x0078
+#define PCI_DEVICE_ID_LSI_SAS0079GEN2 0x0079
+#define PCI_DEVICE_ID_LSI_SAS0073SKINNY 0x0073
+#define PCI_DEVICE_ID_LSI_SAS0071SKINNY 0x0071
+#define PCI_DEVICE_ID_LSI_FUSION 0x005b
+#define PCI_DEVICE_ID_LSI_PLASMA 0x002f
+#define PCI_DEVICE_ID_LSI_INVADER 0x005d
+#define PCI_DEVICE_ID_LSI_FURY 0x005f
+
+/*
+ * Intel HBA SSDIDs
+ */
+#define MEGARAID_INTEL_RS3DC080_SSDID 0x9360
+#define MEGARAID_INTEL_RS3DC040_SSDID 0x9362
+#define MEGARAID_INTEL_RS3SC008_SSDID 0x9380
+#define MEGARAID_INTEL_RS3MC044_SSDID 0x9381
+#define MEGARAID_INTEL_RS3WC080_SSDID 0x9341
+#define MEGARAID_INTEL_RS3WC040_SSDID 0x9343
+
+/*
+ * Intel HBA branding
+ */
+#define MEGARAID_INTEL_RS3DC080_BRANDING \
+ "Intel(R) RAID Controller RS3DC080"
+#define MEGARAID_INTEL_RS3DC040_BRANDING \
+ "Intel(R) RAID Controller RS3DC040"
+#define MEGARAID_INTEL_RS3SC008_BRANDING \
+ "Intel(R) RAID Controller RS3SC008"
+#define MEGARAID_INTEL_RS3MC044_BRANDING \
+ "Intel(R) RAID Controller RS3MC044"
+#define MEGARAID_INTEL_RS3WC080_BRANDING \
+ "Intel(R) RAID Controller RS3WC080"
+#define MEGARAID_INTEL_RS3WC040_BRANDING \
+ "Intel(R) RAID Controller RS3WC040"
+
+/*
+ * =====================================
+ * MegaRAID SAS MFI firmware definitions
+ * =====================================
+ */
+
+/*
+ * MFI stands for MegaRAID SAS FW Interface. This is just a moniker for
+ * protocol between the software and firmware. Commands are issued using
+ * "message frames"
+ */
+
+/*
+ * FW posts its state in upper 4 bits of outbound_msg_0 register
+ */
+#define MFI_STATE_MASK 0xF0000000
+#define MFI_STATE_UNDEFINED 0x00000000
+#define MFI_STATE_BB_INIT 0x10000000
+#define MFI_STATE_FW_INIT 0x40000000
+#define MFI_STATE_WAIT_HANDSHAKE 0x60000000
+#define MFI_STATE_FW_INIT_2 0x70000000
+#define MFI_STATE_DEVICE_SCAN 0x80000000
+#define MFI_STATE_BOOT_MESSAGE_PENDING 0x90000000
+#define MFI_STATE_FLUSH_CACHE 0xA0000000
+#define MFI_STATE_READY 0xB0000000
+#define MFI_STATE_OPERATIONAL 0xC0000000
+#define MFI_STATE_FAULT 0xF0000000
+#define MFI_STATE_FORCE_OCR 0x00000080
+#define MFI_STATE_DMADONE 0x00000008
+#define MFI_STATE_CRASH_DUMP_DONE 0x00000004
+#define MFI_RESET_REQUIRED 0x00000001
+#define MFI_RESET_ADAPTER 0x00000002
+#define MEGAMFI_FRAME_SIZE 64
+
+/*
+ * During FW init, clear pending cmds & reset state using inbound_msg_0
+ *
+ * ABORT : Abort all pending cmds
+ * READY : Move from OPERATIONAL to READY state; discard queue info
+ * MFIMODE : Discard (possible) low MFA posted in 64-bit mode (??)
+ * CLR_HANDSHAKE: FW is waiting for HANDSHAKE from BIOS or Driver
+ * HOTPLUG : Resume from Hotplug
+ * MFI_STOP_ADP : Send signal to FW to stop processing
+ */
+#define WRITE_SEQUENCE_OFFSET (0x0000000FC) /* I20 */
+#define HOST_DIAGNOSTIC_OFFSET (0x000000F8) /* I20 */
+#define DIAG_WRITE_ENABLE (0x00000080)
+#define DIAG_RESET_ADAPTER (0x00000004)
+
+#define MFI_ADP_RESET 0x00000040
+#define MFI_INIT_ABORT 0x00000001
+#define MFI_INIT_READY 0x00000002
+#define MFI_INIT_MFIMODE 0x00000004
+#define MFI_INIT_CLEAR_HANDSHAKE 0x00000008
+#define MFI_INIT_HOTPLUG 0x00000010
+#define MFI_STOP_ADP 0x00000020
+#define MFI_RESET_FLAGS MFI_INIT_READY| \
+ MFI_INIT_MFIMODE| \
+ MFI_INIT_ABORT
+
+/*
+ * MFI frame flags
+ */
+#define MFI_FRAME_POST_IN_REPLY_QUEUE 0x0000
+#define MFI_FRAME_DONT_POST_IN_REPLY_QUEUE 0x0001
+#define MFI_FRAME_SGL32 0x0000
+#define MFI_FRAME_SGL64 0x0002
+#define MFI_FRAME_SENSE32 0x0000
+#define MFI_FRAME_SENSE64 0x0004
+#define MFI_FRAME_DIR_NONE 0x0000
+#define MFI_FRAME_DIR_WRITE 0x0008
+#define MFI_FRAME_DIR_READ 0x0010
+#define MFI_FRAME_DIR_BOTH 0x0018
+#define MFI_FRAME_IEEE 0x0020
+
+/*
+ * Definition for cmd_status
+ */
+#define MFI_CMD_STATUS_POLL_MODE 0xFF
+
+/*
+ * MFI command opcodes
+ */
+#define MFI_CMD_INIT 0x00
+#define MFI_CMD_LD_READ 0x01
+#define MFI_CMD_LD_WRITE 0x02
+#define MFI_CMD_LD_SCSI_IO 0x03
+#define MFI_CMD_PD_SCSI_IO 0x04
+#define MFI_CMD_DCMD 0x05
+#define MFI_CMD_ABORT 0x06
+#define MFI_CMD_SMP 0x07
+#define MFI_CMD_STP 0x08
+#define MFI_CMD_INVALID 0xff
+
+#define MR_DCMD_CTRL_GET_INFO 0x01010000
+#define MR_DCMD_LD_GET_LIST 0x03010000
+#define MR_DCMD_LD_LIST_QUERY 0x03010100
+
+#define MR_DCMD_CTRL_CACHE_FLUSH 0x01101000
+#define MR_FLUSH_CTRL_CACHE 0x01
+#define MR_FLUSH_DISK_CACHE 0x02
+
+#define MR_DCMD_CTRL_SHUTDOWN 0x01050000
+#define MR_DCMD_HIBERNATE_SHUTDOWN 0x01060000
+#define MR_ENABLE_DRIVE_SPINDOWN 0x01
+
+#define MR_DCMD_CTRL_EVENT_GET_INFO 0x01040100
+#define MR_DCMD_CTRL_EVENT_GET 0x01040300
+#define MR_DCMD_CTRL_EVENT_WAIT 0x01040500
+#define MR_DCMD_LD_GET_PROPERTIES 0x03030000
+
+#define MR_DCMD_CLUSTER 0x08000000
+#define MR_DCMD_CLUSTER_RESET_ALL 0x08010100
+#define MR_DCMD_CLUSTER_RESET_LD 0x08010200
+#define MR_DCMD_PD_LIST_QUERY 0x02010100
+
+#define MR_DCMD_CTRL_SET_CRASH_DUMP_PARAMS 0x01190100
+#define MR_DRIVER_SET_APP_CRASHDUMP_MODE (0xF0010000 | 0x0600)
+
+/*
+ * Global functions
+ */
+extern u8 MR_ValidateMapInfo(struct megasas_instance *instance);
+
+
+/*
+ * MFI command completion codes
+ */
+enum MFI_STAT {
+ MFI_STAT_OK = 0x00,
+ MFI_STAT_INVALID_CMD = 0x01,
+ MFI_STAT_INVALID_DCMD = 0x02,
+ MFI_STAT_INVALID_PARAMETER = 0x03,
+ MFI_STAT_INVALID_SEQUENCE_NUMBER = 0x04,
+ MFI_STAT_ABORT_NOT_POSSIBLE = 0x05,
+ MFI_STAT_APP_HOST_CODE_NOT_FOUND = 0x06,
+ MFI_STAT_APP_IN_USE = 0x07,
+ MFI_STAT_APP_NOT_INITIALIZED = 0x08,
+ MFI_STAT_ARRAY_INDEX_INVALID = 0x09,
+ MFI_STAT_ARRAY_ROW_NOT_EMPTY = 0x0a,
+ MFI_STAT_CONFIG_RESOURCE_CONFLICT = 0x0b,
+ MFI_STAT_DEVICE_NOT_FOUND = 0x0c,
+ MFI_STAT_DRIVE_TOO_SMALL = 0x0d,
+ MFI_STAT_FLASH_ALLOC_FAIL = 0x0e,
+ MFI_STAT_FLASH_BUSY = 0x0f,
+ MFI_STAT_FLASH_ERROR = 0x10,
+ MFI_STAT_FLASH_IMAGE_BAD = 0x11,
+ MFI_STAT_FLASH_IMAGE_INCOMPLETE = 0x12,
+ MFI_STAT_FLASH_NOT_OPEN = 0x13,
+ MFI_STAT_FLASH_NOT_STARTED = 0x14,
+ MFI_STAT_FLUSH_FAILED = 0x15,
+ MFI_STAT_HOST_CODE_NOT_FOUNT = 0x16,
+ MFI_STAT_LD_CC_IN_PROGRESS = 0x17,
+ MFI_STAT_LD_INIT_IN_PROGRESS = 0x18,
+ MFI_STAT_LD_LBA_OUT_OF_RANGE = 0x19,
+ MFI_STAT_LD_MAX_CONFIGURED = 0x1a,
+ MFI_STAT_LD_NOT_OPTIMAL = 0x1b,
+ MFI_STAT_LD_RBLD_IN_PROGRESS = 0x1c,
+ MFI_STAT_LD_RECON_IN_PROGRESS = 0x1d,
+ MFI_STAT_LD_WRONG_RAID_LEVEL = 0x1e,
+ MFI_STAT_MAX_SPARES_EXCEEDED = 0x1f,
+ MFI_STAT_MEMORY_NOT_AVAILABLE = 0x20,
+ MFI_STAT_MFC_HW_ERROR = 0x21,
+ MFI_STAT_NO_HW_PRESENT = 0x22,
+ MFI_STAT_NOT_FOUND = 0x23,
+ MFI_STAT_NOT_IN_ENCL = 0x24,
+ MFI_STAT_PD_CLEAR_IN_PROGRESS = 0x25,
+ MFI_STAT_PD_TYPE_WRONG = 0x26,
+ MFI_STAT_PR_DISABLED = 0x27,
+ MFI_STAT_ROW_INDEX_INVALID = 0x28,
+ MFI_STAT_SAS_CONFIG_INVALID_ACTION = 0x29,
+ MFI_STAT_SAS_CONFIG_INVALID_DATA = 0x2a,
+ MFI_STAT_SAS_CONFIG_INVALID_PAGE = 0x2b,
+ MFI_STAT_SAS_CONFIG_INVALID_TYPE = 0x2c,
+ MFI_STAT_SCSI_DONE_WITH_ERROR = 0x2d,
+ MFI_STAT_SCSI_IO_FAILED = 0x2e,
+ MFI_STAT_SCSI_RESERVATION_CONFLICT = 0x2f,
+ MFI_STAT_SHUTDOWN_FAILED = 0x30,
+ MFI_STAT_TIME_NOT_SET = 0x31,
+ MFI_STAT_WRONG_STATE = 0x32,
+ MFI_STAT_LD_OFFLINE = 0x33,
+ MFI_STAT_PEER_NOTIFICATION_REJECTED = 0x34,
+ MFI_STAT_PEER_NOTIFICATION_FAILED = 0x35,
+ MFI_STAT_RESERVATION_IN_PROGRESS = 0x36,
+ MFI_STAT_I2C_ERRORS_DETECTED = 0x37,
+ MFI_STAT_PCI_ERRORS_DETECTED = 0x38,
+ MFI_STAT_CONFIG_SEQ_MISMATCH = 0x67,
+
+ MFI_STAT_INVALID_STATUS = 0xFF
+};
+
+/*
+ * Crash dump related defines
+ */
+#define MAX_CRASH_DUMP_SIZE 512
+#define CRASH_DMA_BUF_SIZE (1024 * 1024)
+
+enum MR_FW_CRASH_DUMP_STATE {
+ UNAVAILABLE = 0,
+ AVAILABLE = 1,
+ COPYING = 2,
+ COPIED = 3,
+ COPY_ERROR = 4,
+};
+
+enum _MR_CRASH_BUF_STATUS {
+ MR_CRASH_BUF_TURN_OFF = 0,
+ MR_CRASH_BUF_TURN_ON = 1,
+};
+
+/*
+ * Number of mailbox bytes in DCMD message frame
+ */
+#define MFI_MBOX_SIZE 12
+
+enum MR_EVT_CLASS {
+
+ MR_EVT_CLASS_DEBUG = -2,
+ MR_EVT_CLASS_PROGRESS = -1,
+ MR_EVT_CLASS_INFO = 0,
+ MR_EVT_CLASS_WARNING = 1,
+ MR_EVT_CLASS_CRITICAL = 2,
+ MR_EVT_CLASS_FATAL = 3,
+ MR_EVT_CLASS_DEAD = 4,
+
+};
+
+enum MR_EVT_LOCALE {
+
+ MR_EVT_LOCALE_LD = 0x0001,
+ MR_EVT_LOCALE_PD = 0x0002,
+ MR_EVT_LOCALE_ENCL = 0x0004,
+ MR_EVT_LOCALE_BBU = 0x0008,
+ MR_EVT_LOCALE_SAS = 0x0010,
+ MR_EVT_LOCALE_CTRL = 0x0020,
+ MR_EVT_LOCALE_CONFIG = 0x0040,
+ MR_EVT_LOCALE_CLUSTER = 0x0080,
+ MR_EVT_LOCALE_ALL = 0xffff,
+
+};
+
+enum MR_EVT_ARGS {
+
+ MR_EVT_ARGS_NONE,
+ MR_EVT_ARGS_CDB_SENSE,
+ MR_EVT_ARGS_LD,
+ MR_EVT_ARGS_LD_COUNT,
+ MR_EVT_ARGS_LD_LBA,
+ MR_EVT_ARGS_LD_OWNER,
+ MR_EVT_ARGS_LD_LBA_PD_LBA,
+ MR_EVT_ARGS_LD_PROG,
+ MR_EVT_ARGS_LD_STATE,
+ MR_EVT_ARGS_LD_STRIP,
+ MR_EVT_ARGS_PD,
+ MR_EVT_ARGS_PD_ERR,
+ MR_EVT_ARGS_PD_LBA,
+ MR_EVT_ARGS_PD_LBA_LD,
+ MR_EVT_ARGS_PD_PROG,
+ MR_EVT_ARGS_PD_STATE,
+ MR_EVT_ARGS_PCI,
+ MR_EVT_ARGS_RATE,
+ MR_EVT_ARGS_STR,
+ MR_EVT_ARGS_TIME,
+ MR_EVT_ARGS_ECC,
+ MR_EVT_ARGS_LD_PROP,
+ MR_EVT_ARGS_PD_SPARE,
+ MR_EVT_ARGS_PD_INDEX,
+ MR_EVT_ARGS_DIAG_PASS,
+ MR_EVT_ARGS_DIAG_FAIL,
+ MR_EVT_ARGS_PD_LBA_LBA,
+ MR_EVT_ARGS_PORT_PHY,
+ MR_EVT_ARGS_PD_MISSING,
+ MR_EVT_ARGS_PD_ADDRESS,
+ MR_EVT_ARGS_BITMAP,
+ MR_EVT_ARGS_CONNECTOR,
+ MR_EVT_ARGS_PD_PD,
+ MR_EVT_ARGS_PD_FRU,
+ MR_EVT_ARGS_PD_PATHINFO,
+ MR_EVT_ARGS_PD_POWER_STATE,
+ MR_EVT_ARGS_GENERIC,
+};
+
+/*
+ * define constants for device list query options
+ */
+enum MR_PD_QUERY_TYPE {
+ MR_PD_QUERY_TYPE_ALL = 0,
+ MR_PD_QUERY_TYPE_STATE = 1,
+ MR_PD_QUERY_TYPE_POWER_STATE = 2,
+ MR_PD_QUERY_TYPE_MEDIA_TYPE = 3,
+ MR_PD_QUERY_TYPE_SPEED = 4,
+ MR_PD_QUERY_TYPE_EXPOSED_TO_HOST = 5,
+};
+
+enum MR_LD_QUERY_TYPE {
+ MR_LD_QUERY_TYPE_ALL = 0,
+ MR_LD_QUERY_TYPE_EXPOSED_TO_HOST = 1,
+ MR_LD_QUERY_TYPE_USED_TGT_IDS = 2,
+ MR_LD_QUERY_TYPE_CLUSTER_ACCESS = 3,
+ MR_LD_QUERY_TYPE_CLUSTER_LOCALE = 4,
+};
+
+
+#define MR_EVT_CFG_CLEARED 0x0004
+#define MR_EVT_LD_STATE_CHANGE 0x0051
+#define MR_EVT_PD_INSERTED 0x005b
+#define MR_EVT_PD_REMOVED 0x0070
+#define MR_EVT_LD_CREATED 0x008a
+#define MR_EVT_LD_DELETED 0x008b
+#define MR_EVT_FOREIGN_CFG_IMPORTED 0x00db
+#define MR_EVT_LD_OFFLINE 0x00fc
+#define MR_EVT_CTRL_HOST_BUS_SCAN_REQUESTED 0x0152
+
+enum MR_PD_STATE {
+ MR_PD_STATE_UNCONFIGURED_GOOD = 0x00,
+ MR_PD_STATE_UNCONFIGURED_BAD = 0x01,
+ MR_PD_STATE_HOT_SPARE = 0x02,
+ MR_PD_STATE_OFFLINE = 0x10,
+ MR_PD_STATE_FAILED = 0x11,
+ MR_PD_STATE_REBUILD = 0x14,
+ MR_PD_STATE_ONLINE = 0x18,
+ MR_PD_STATE_COPYBACK = 0x20,
+ MR_PD_STATE_SYSTEM = 0x40,
+ };
+
+
+ /*
+ * defines the physical drive address structure
+ */
+struct MR_PD_ADDRESS {
+ u16 deviceId;
+ u16 enclDeviceId;
+
+ union {
+ struct {
+ u8 enclIndex;
+ u8 slotNumber;
+ } mrPdAddress;
+ struct {
+ u8 enclPosition;
+ u8 enclConnectorIndex;
+ } mrEnclAddress;
+ };
+ u8 scsiDevType;
+ union {
+ u8 connectedPortBitmap;
+ u8 connectedPortNumbers;
+ };
+ u64 sasAddr[2];
+} __packed;
+
+/*
+ * defines the physical drive list structure
+ */
+struct MR_PD_LIST {
+ u32 size;
+ u32 count;
+ struct MR_PD_ADDRESS addr[1];
+} __packed;
+
+struct megasas_pd_list {
+ u16 tid;
+ u8 driveType;
+ u8 driveState;
+} __packed;
+
+ /*
+ * defines the logical drive reference structure
+ */
+union MR_LD_REF {
+ struct {
+ u8 targetId;
+ u8 reserved;
+ u16 seqNum;
+ };
+ u32 ref;
+} __packed;
+
+/*
+ * defines the logical drive list structure
+ */
+struct MR_LD_LIST {
+ u32 ldCount;
+ u32 reserved;
+ struct {
+ union MR_LD_REF ref;
+ u8 state;
+ u8 reserved[3];
+ u64 size;
+ } ldList[MAX_LOGICAL_DRIVES_EXT];
+} __packed;
+
+struct MR_LD_TARGETID_LIST {
+ u32 size;
+ u32 count;
+ u8 pad[3];
+ u8 targetId[MAX_LOGICAL_DRIVES_EXT];
+};
+
+
+/*
+ * SAS controller properties
+ */
+struct megasas_ctrl_prop {
+
+ u16 seq_num;
+ u16 pred_fail_poll_interval;
+ u16 intr_throttle_count;
+ u16 intr_throttle_timeouts;
+ u8 rebuild_rate;
+ u8 patrol_read_rate;
+ u8 bgi_rate;
+ u8 cc_rate;
+ u8 recon_rate;
+ u8 cache_flush_interval;
+ u8 spinup_drv_count;
+ u8 spinup_delay;
+ u8 cluster_enable;
+ u8 coercion_mode;
+ u8 alarm_enable;
+ u8 disable_auto_rebuild;
+ u8 disable_battery_warn;
+ u8 ecc_bucket_size;
+ u16 ecc_bucket_leak_rate;
+ u8 restore_hotspare_on_insertion;
+ u8 expose_encl_devices;
+ u8 maintainPdFailHistory;
+ u8 disallowHostRequestReordering;
+ u8 abortCCOnError;
+ u8 loadBalanceMode;
+ u8 disableAutoDetectBackplane;
+
+ u8 snapVDSpace;
+
+ /*
+ * Add properties that can be controlled by
+ * a bit in the following structure.
+ */
+ struct {
+#if defined(__BIG_ENDIAN_BITFIELD)
+ u32 reserved:18;
+ u32 enableJBOD:1;
+ u32 disableSpinDownHS:1;
+ u32 allowBootWithPinnedCache:1;
+ u32 disableOnlineCtrlReset:1;
+ u32 enableSecretKeyControl:1;
+ u32 autoEnhancedImport:1;
+ u32 enableSpinDownUnconfigured:1;
+ u32 SSDPatrolReadEnabled:1;
+ u32 SSDSMARTerEnabled:1;
+ u32 disableNCQ:1;
+ u32 useFdeOnly:1;
+ u32 prCorrectUnconfiguredAreas:1;
+ u32 SMARTerEnabled:1;
+ u32 copyBackDisabled:1;
+#else
+ u32 copyBackDisabled:1;
+ u32 SMARTerEnabled:1;
+ u32 prCorrectUnconfiguredAreas:1;
+ u32 useFdeOnly:1;
+ u32 disableNCQ:1;
+ u32 SSDSMARTerEnabled:1;
+ u32 SSDPatrolReadEnabled:1;
+ u32 enableSpinDownUnconfigured:1;
+ u32 autoEnhancedImport:1;
+ u32 enableSecretKeyControl:1;
+ u32 disableOnlineCtrlReset:1;
+ u32 allowBootWithPinnedCache:1;
+ u32 disableSpinDownHS:1;
+ u32 enableJBOD:1;
+ u32 reserved:18;
+#endif
+ } OnOffProperties;
+ u8 autoSnapVDSpace;
+ u8 viewSpace;
+ u16 spinDownTime;
+ u8 reserved[24];
+} __packed;
+
+/*
+ * SAS controller information
+ */
+struct megasas_ctrl_info {
+
+ /*
+ * PCI device information
+ */
+ struct {
+
+ u16 vendor_id;
+ u16 device_id;
+ u16 sub_vendor_id;
+ u16 sub_device_id;
+ u8 reserved[24];
+
+ } __attribute__ ((packed)) pci;
+
+ /*
+ * Host interface information
+ */
+ struct {
+
+ u8 PCIX:1;
+ u8 PCIE:1;
+ u8 iSCSI:1;
+ u8 SAS_3G:1;
+ u8 SRIOV:1;
+ u8 reserved_0:3;
+ u8 reserved_1[6];
+ u8 port_count;
+ u64 port_addr[8];
+
+ } __attribute__ ((packed)) host_interface;
+
+ /*
+ * Device (backend) interface information
+ */
+ struct {
+
+ u8 SPI:1;
+ u8 SAS_3G:1;
+ u8 SATA_1_5G:1;
+ u8 SATA_3G:1;
+ u8 reserved_0:4;
+ u8 reserved_1[6];
+ u8 port_count;
+ u64 port_addr[8];
+
+ } __attribute__ ((packed)) device_interface;
+
+ /*
+ * List of components residing in flash. All str are null terminated
+ */
+ u32 image_check_word;
+ u32 image_component_count;
+
+ struct {
+
+ char name[8];
+ char version[32];
+ char build_date[16];
+ char built_time[16];
+
+ } __attribute__ ((packed)) image_component[8];
+
+ /*
+ * List of flash components that have been flashed on the card, but
+ * are not in use, pending reset of the adapter. This list will be
+ * empty if a flash operation has not occurred. All stings are null
+ * terminated
+ */
+ u32 pending_image_component_count;
+
+ struct {
+
+ char name[8];
+ char version[32];
+ char build_date[16];
+ char build_time[16];
+
+ } __attribute__ ((packed)) pending_image_component[8];
+
+ u8 max_arms;
+ u8 max_spans;
+ u8 max_arrays;
+ u8 max_lds;
+
+ char product_name[80];
+ char serial_no[32];
+
+ /*
+ * Other physical/controller/operation information. Indicates the
+ * presence of the hardware
+ */
+ struct {
+
+ u32 bbu:1;
+ u32 alarm:1;
+ u32 nvram:1;
+ u32 uart:1;
+ u32 reserved:28;
+
+ } __attribute__ ((packed)) hw_present;
+
+ u32 current_fw_time;
+
+ /*
+ * Maximum data transfer sizes
+ */
+ u16 max_concurrent_cmds;
+ u16 max_sge_count;
+ u32 max_request_size;
+
+ /*
+ * Logical and physical device counts
+ */
+ u16 ld_present_count;
+ u16 ld_degraded_count;
+ u16 ld_offline_count;
+
+ u16 pd_present_count;
+ u16 pd_disk_present_count;
+ u16 pd_disk_pred_failure_count;
+ u16 pd_disk_failed_count;
+
+ /*
+ * Memory size information
+ */
+ u16 nvram_size;
+ u16 memory_size;
+ u16 flash_size;
+
+ /*
+ * Error counters
+ */
+ u16 mem_correctable_error_count;
+ u16 mem_uncorrectable_error_count;
+
+ /*
+ * Cluster information
+ */
+ u8 cluster_permitted;
+ u8 cluster_active;
+
+ /*
+ * Additional max data transfer sizes
+ */
+ u16 max_strips_per_io;
+
+ /*
+ * Controller capabilities structures
+ */
+ struct {
+
+ u32 raid_level_0:1;
+ u32 raid_level_1:1;
+ u32 raid_level_5:1;
+ u32 raid_level_1E:1;
+ u32 raid_level_6:1;
+ u32 reserved:27;
+
+ } __attribute__ ((packed)) raid_levels;
+
+ struct {
+
+ u32 rbld_rate:1;
+ u32 cc_rate:1;
+ u32 bgi_rate:1;
+ u32 recon_rate:1;
+ u32 patrol_rate:1;
+ u32 alarm_control:1;
+ u32 cluster_supported:1;
+ u32 bbu:1;
+ u32 spanning_allowed:1;
+ u32 dedicated_hotspares:1;
+ u32 revertible_hotspares:1;
+ u32 foreign_config_import:1;
+ u32 self_diagnostic:1;
+ u32 mixed_redundancy_arr:1;
+ u32 global_hot_spares:1;
+ u32 reserved:17;
+
+ } __attribute__ ((packed)) adapter_operations;
+
+ struct {
+
+ u32 read_policy:1;
+ u32 write_policy:1;
+ u32 io_policy:1;
+ u32 access_policy:1;
+ u32 disk_cache_policy:1;
+ u32 reserved:27;
+
+ } __attribute__ ((packed)) ld_operations;
+
+ struct {
+
+ u8 min;
+ u8 max;
+ u8 reserved[2];
+
+ } __attribute__ ((packed)) stripe_sz_ops;
+
+ struct {
+
+ u32 force_online:1;
+ u32 force_offline:1;
+ u32 force_rebuild:1;
+ u32 reserved:29;
+
+ } __attribute__ ((packed)) pd_operations;
+
+ struct {
+
+ u32 ctrl_supports_sas:1;
+ u32 ctrl_supports_sata:1;
+ u32 allow_mix_in_encl:1;
+ u32 allow_mix_in_ld:1;
+ u32 allow_sata_in_cluster:1;
+ u32 reserved:27;
+
+ } __attribute__ ((packed)) pd_mix_support;
+
+ /*
+ * Define ECC single-bit-error bucket information
+ */
+ u8 ecc_bucket_count;
+ u8 reserved_2[11];
+
+ /*
+ * Include the controller properties (changeable items)
+ */
+ struct megasas_ctrl_prop properties;
+
+ /*
+ * Define FW pkg version (set in envt v'bles on OEM basis)
+ */
+ char package_version[0x60];
+
+
+ /*
+ * If adapterOperations.supportMoreThan8Phys is set,
+ * and deviceInterface.portCount is greater than 8,
+ * SAS Addrs for first 8 ports shall be populated in
+ * deviceInterface.portAddr, and the rest shall be
+ * populated in deviceInterfacePortAddr2.
+ */
+ u64 deviceInterfacePortAddr2[8]; /*6a0h */
+ u8 reserved3[128]; /*6e0h */
+
+ struct { /*760h */
+ u16 minPdRaidLevel_0:4;
+ u16 maxPdRaidLevel_0:12;
+
+ u16 minPdRaidLevel_1:4;
+ u16 maxPdRaidLevel_1:12;
+
+ u16 minPdRaidLevel_5:4;
+ u16 maxPdRaidLevel_5:12;
+
+ u16 minPdRaidLevel_1E:4;
+ u16 maxPdRaidLevel_1E:12;
+
+ u16 minPdRaidLevel_6:4;
+ u16 maxPdRaidLevel_6:12;
+
+ u16 minPdRaidLevel_10:4;
+ u16 maxPdRaidLevel_10:12;
+
+ u16 minPdRaidLevel_50:4;
+ u16 maxPdRaidLevel_50:12;
+
+ u16 minPdRaidLevel_60:4;
+ u16 maxPdRaidLevel_60:12;
+
+ u16 minPdRaidLevel_1E_RLQ0:4;
+ u16 maxPdRaidLevel_1E_RLQ0:12;
+
+ u16 minPdRaidLevel_1E0_RLQ0:4;
+ u16 maxPdRaidLevel_1E0_RLQ0:12;
+
+ u16 reserved[6];
+ } pdsForRaidLevels;
+
+ u16 maxPds; /*780h */
+ u16 maxDedHSPs; /*782h */
+ u16 maxGlobalHSPs; /*784h */
+ u16 ddfSize; /*786h */
+ u8 maxLdsPerArray; /*788h */
+ u8 partitionsInDDF; /*789h */
+ u8 lockKeyBinding; /*78ah */
+ u8 maxPITsPerLd; /*78bh */
+ u8 maxViewsPerLd; /*78ch */
+ u8 maxTargetId; /*78dh */
+ u16 maxBvlVdSize; /*78eh */
+
+ u16 maxConfigurableSSCSize; /*790h */
+ u16 currentSSCsize; /*792h */
+
+ char expanderFwVersion[12]; /*794h */
+
+ u16 PFKTrialTimeRemaining; /*7A0h */
+
+ u16 cacheMemorySize; /*7A2h */
+
+ struct { /*7A4h */
+#if defined(__BIG_ENDIAN_BITFIELD)
+ u32 reserved:5;
+ u32 activePassive:2;
+ u32 supportConfigAutoBalance:1;
+ u32 mpio:1;
+ u32 supportDataLDonSSCArray:1;
+ u32 supportPointInTimeProgress:1;
+ u32 supportUnevenSpans:1;
+ u32 dedicatedHotSparesLimited:1;
+ u32 headlessMode:1;
+ u32 supportEmulatedDrives:1;
+ u32 supportResetNow:1;
+ u32 realTimeScheduler:1;
+ u32 supportSSDPatrolRead:1;
+ u32 supportPerfTuning:1;
+ u32 disableOnlinePFKChange:1;
+ u32 supportJBOD:1;
+ u32 supportBootTimePFKChange:1;
+ u32 supportSetLinkSpeed:1;
+ u32 supportEmergencySpares:1;
+ u32 supportSuspendResumeBGops:1;
+ u32 blockSSDWriteCacheChange:1;
+ u32 supportShieldState:1;
+ u32 supportLdBBMInfo:1;
+ u32 supportLdPIType3:1;
+ u32 supportLdPIType2:1;
+ u32 supportLdPIType1:1;
+ u32 supportPIcontroller:1;
+#else
+ u32 supportPIcontroller:1;
+ u32 supportLdPIType1:1;
+ u32 supportLdPIType2:1;
+ u32 supportLdPIType3:1;
+ u32 supportLdBBMInfo:1;
+ u32 supportShieldState:1;
+ u32 blockSSDWriteCacheChange:1;
+ u32 supportSuspendResumeBGops:1;
+ u32 supportEmergencySpares:1;
+ u32 supportSetLinkSpeed:1;
+ u32 supportBootTimePFKChange:1;
+ u32 supportJBOD:1;
+ u32 disableOnlinePFKChange:1;
+ u32 supportPerfTuning:1;
+ u32 supportSSDPatrolRead:1;
+ u32 realTimeScheduler:1;
+
+ u32 supportResetNow:1;
+ u32 supportEmulatedDrives:1;
+ u32 headlessMode:1;
+ u32 dedicatedHotSparesLimited:1;
+
+
+ u32 supportUnevenSpans:1;
+ u32 supportPointInTimeProgress:1;
+ u32 supportDataLDonSSCArray:1;
+ u32 mpio:1;
+ u32 supportConfigAutoBalance:1;
+ u32 activePassive:2;
+ u32 reserved:5;
+#endif
+ } adapterOperations2;
+
+ u8 driverVersion[32]; /*7A8h */
+ u8 maxDAPdCountSpinup60; /*7C8h */
+ u8 temperatureROC; /*7C9h */
+ u8 temperatureCtrl; /*7CAh */
+ u8 reserved4; /*7CBh */
+ u16 maxConfigurablePds; /*7CCh */
+
+
+ u8 reserved5[2]; /*0x7CDh */
+
+ /*
+ * HA cluster information
+ */
+ struct {
+#if defined(__BIG_ENDIAN_BITFIELD)
+ u32 reserved:26;
+ u32 premiumFeatureMismatch:1;
+ u32 ctrlPropIncompatible:1;
+ u32 fwVersionMismatch:1;
+ u32 hwIncompatible:1;
+ u32 peerIsIncompatible:1;
+ u32 peerIsPresent:1;
+#else
+ u32 peerIsPresent:1;
+ u32 peerIsIncompatible:1;
+ u32 hwIncompatible:1;
+ u32 fwVersionMismatch:1;
+ u32 ctrlPropIncompatible:1;
+ u32 premiumFeatureMismatch:1;
+ u32 reserved:26;
+#endif
+ } cluster;
+
+ char clusterId[16]; /*7D4h */
+ struct {
+ u8 maxVFsSupported; /*0x7E4*/
+ u8 numVFsEnabled; /*0x7E5*/
+ u8 requestorId; /*0x7E6 0:PF, 1:VF1, 2:VF2*/
+ u8 reserved; /*0x7E7*/
+ } iov;
+
+ struct {
+#if defined(__BIG_ENDIAN_BITFIELD)
+ u32 reserved:12;
+ u32 discardCacheDuringLDDelete:1;
+ u32 supportSecurityonJBOD:1;
+ u32 supportCacheBypassModes:1;
+ u32 supportDisableSESMonitoring:1;
+ u32 supportForceFlash:1;
+ u32 supportNVDRAM:1;
+ u32 supportDrvActivityLEDSetting:1;
+ u32 supportAllowedOpsforDrvRemoval:1;
+ u32 supportHOQRebuild:1;
+ u32 supportForceTo512e:1;
+ u32 supportNVCacheErase:1;
+ u32 supportDebugQueue:1;
+ u32 supportSwZone:1;
+ u32 supportCrashDump:1;
+ u32 supportMaxExtLDs:1;
+ u32 supportT10RebuildAssist:1;
+ u32 supportDisableImmediateIO:1;
+ u32 supportThermalPollInterval:1;
+ u32 supportPersonalityChange:2;
+#else
+ u32 supportPersonalityChange:2;
+ u32 supportThermalPollInterval:1;
+ u32 supportDisableImmediateIO:1;
+ u32 supportT10RebuildAssist:1;
+ u32 supportMaxExtLDs:1;
+ u32 supportCrashDump:1;
+ u32 supportSwZone:1;
+ u32 supportDebugQueue:1;
+ u32 supportNVCacheErase:1;
+ u32 supportForceTo512e:1;
+ u32 supportHOQRebuild:1;
+ u32 supportAllowedOpsforDrvRemoval:1;
+ u32 supportDrvActivityLEDSetting:1;
+ u32 supportNVDRAM:1;
+ u32 supportForceFlash:1;
+ u32 supportDisableSESMonitoring:1;
+ u32 supportCacheBypassModes:1;
+ u32 supportSecurityonJBOD:1;
+ u32 discardCacheDuringLDDelete:1;
+ u32 reserved:12;
+#endif
+ } adapterOperations3;
+
+ u8 pad[0x800-0x7EC];
+} __packed;
+
+/*
+ * ===============================
+ * MegaRAID SAS driver definitions
+ * ===============================
+ */
+#define MEGASAS_MAX_PD_CHANNELS 2
+#define MEGASAS_MAX_LD_CHANNELS 2
+#define MEGASAS_MAX_CHANNELS (MEGASAS_MAX_PD_CHANNELS + \
+ MEGASAS_MAX_LD_CHANNELS)
+#define MEGASAS_MAX_DEV_PER_CHANNEL 128
+#define MEGASAS_DEFAULT_INIT_ID -1
+#define MEGASAS_MAX_LUN 8
+#define MEGASAS_DEFAULT_CMD_PER_LUN 256
+#define MEGASAS_MAX_PD (MEGASAS_MAX_PD_CHANNELS * \
+ MEGASAS_MAX_DEV_PER_CHANNEL)
+#define MEGASAS_MAX_LD_IDS (MEGASAS_MAX_LD_CHANNELS * \
+ MEGASAS_MAX_DEV_PER_CHANNEL)
+
+#define MEGASAS_MAX_SECTORS (2*1024)
+#define MEGASAS_MAX_SECTORS_IEEE (2*128)
+#define MEGASAS_DBG_LVL 1
+
+#define MEGASAS_FW_BUSY 1
+
+#define VD_EXT_DEBUG 0
+
+enum MR_MFI_MPT_PTHR_FLAGS {
+ MFI_MPT_DETACHED = 0,
+ MFI_LIST_ADDED = 1,
+ MFI_MPT_ATTACHED = 2,
+};
+
+enum MR_SCSI_CMD_TYPE {
+ READ_WRITE_LDIO = 0,
+ NON_READ_WRITE_LDIO = 1,
+ READ_WRITE_SYSPDIO = 2,
+ NON_READ_WRITE_SYSPDIO = 3,
+};
+
+/* Frame Type */
+#define IO_FRAME 0
+#define PTHRU_FRAME 1
+
+/*
+ * When SCSI mid-layer calls driver's reset routine, driver waits for
+ * MEGASAS_RESET_WAIT_TIME seconds for all outstanding IO to complete. Note
+ * that the driver cannot _actually_ abort or reset pending commands. While
+ * it is waiting for the commands to complete, it prints a diagnostic message
+ * every MEGASAS_RESET_NOTICE_INTERVAL seconds
+ */
+#define MEGASAS_RESET_WAIT_TIME 180
+#define MEGASAS_INTERNAL_CMD_WAIT_TIME 180
+#define MEGASAS_RESET_NOTICE_INTERVAL 5
+#define MEGASAS_IOCTL_CMD 0
+#define MEGASAS_DEFAULT_CMD_TIMEOUT 90
+#define MEGASAS_THROTTLE_QUEUE_DEPTH 16
+#define MEGASAS_BLOCKED_CMD_TIMEOUT 60
+/*
+ * FW reports the maximum of number of commands that it can accept (maximum
+ * commands that can be outstanding) at any time. The driver must report a
+ * lower number to the mid layer because it can issue a few internal commands
+ * itself (E.g, AEN, abort cmd, IOCTLs etc). The number of commands it needs
+ * is shown below
+ */
+#define MEGASAS_INT_CMDS 32
+#define MEGASAS_SKINNY_INT_CMDS 5
+#define MEGASAS_FUSION_INTERNAL_CMDS 5
+#define MEGASAS_FUSION_IOCTL_CMDS 3
+
+#define MEGASAS_MAX_MSIX_QUEUES 128
+/*
+ * FW can accept both 32 and 64 bit SGLs. We want to allocate 32/64 bit
+ * SGLs based on the size of dma_addr_t
+ */
+#define IS_DMA64 (sizeof(dma_addr_t) == 8)
+
+#define MFI_XSCALE_OMR0_CHANGE_INTERRUPT 0x00000001
+
+#define MFI_INTR_FLAG_REPLY_MESSAGE 0x00000001
+#define MFI_INTR_FLAG_FIRMWARE_STATE_CHANGE 0x00000002
+#define MFI_G2_OUTBOUND_DOORBELL_CHANGE_INTERRUPT 0x00000004
+
+#define MFI_OB_INTR_STATUS_MASK 0x00000002
+#define MFI_POLL_TIMEOUT_SECS 60
+#define MEGASAS_SRIOV_HEARTBEAT_INTERVAL_VF (5 * HZ)
+#define MEGASAS_OCR_SETTLE_TIME_VF (1000 * 30)
+#define MEGASAS_ROUTINE_WAIT_TIME_VF 300
+#define MFI_REPLY_1078_MESSAGE_INTERRUPT 0x80000000
+#define MFI_REPLY_GEN2_MESSAGE_INTERRUPT 0x00000001
+#define MFI_GEN2_ENABLE_INTERRUPT_MASK (0x00000001 | 0x00000004)
+#define MFI_REPLY_SKINNY_MESSAGE_INTERRUPT 0x40000000
+#define MFI_SKINNY_ENABLE_INTERRUPT_MASK (0x00000001)
+
+#define MFI_1068_PCSR_OFFSET 0x84
+#define MFI_1068_FW_HANDSHAKE_OFFSET 0x64
+#define MFI_1068_FW_READY 0xDDDD0000
+
+#define MR_MAX_REPLY_QUEUES_OFFSET 0X0000001F
+#define MR_MAX_REPLY_QUEUES_EXT_OFFSET 0X003FC000
+#define MR_MAX_REPLY_QUEUES_EXT_OFFSET_SHIFT 14
+#define MR_MAX_MSIX_REG_ARRAY 16
+/*
+* register set for both 1068 and 1078 controllers
+* structure extended for 1078 registers
+*/
+
+struct megasas_register_set {
+ u32 doorbell; /*0000h*/
+ u32 fusion_seq_offset; /*0004h*/
+ u32 fusion_host_diag; /*0008h*/
+ u32 reserved_01; /*000Ch*/
+
+ u32 inbound_msg_0; /*0010h*/
+ u32 inbound_msg_1; /*0014h*/
+ u32 outbound_msg_0; /*0018h*/
+ u32 outbound_msg_1; /*001Ch*/
+
+ u32 inbound_doorbell; /*0020h*/
+ u32 inbound_intr_status; /*0024h*/
+ u32 inbound_intr_mask; /*0028h*/
+
+ u32 outbound_doorbell; /*002Ch*/
+ u32 outbound_intr_status; /*0030h*/
+ u32 outbound_intr_mask; /*0034h*/
+
+ u32 reserved_1[2]; /*0038h*/
+
+ u32 inbound_queue_port; /*0040h*/
+ u32 outbound_queue_port; /*0044h*/
+
+ u32 reserved_2[9]; /*0048h*/
+ u32 reply_post_host_index; /*006Ch*/
+ u32 reserved_2_2[12]; /*0070h*/
+
+ u32 outbound_doorbell_clear; /*00A0h*/
+
+ u32 reserved_3[3]; /*00A4h*/
+
+ u32 outbound_scratch_pad ; /*00B0h*/
+ u32 outbound_scratch_pad_2; /*00B4h*/
+
+ u32 reserved_4[2]; /*00B8h*/
+
+ u32 inbound_low_queue_port ; /*00C0h*/
+
+ u32 inbound_high_queue_port ; /*00C4h*/
+
+ u32 reserved_5; /*00C8h*/
+ u32 res_6[11]; /*CCh*/
+ u32 host_diag;
+ u32 seq_offset;
+ u32 index_registers[807]; /*00CCh*/
+} __attribute__ ((packed));
+
+struct megasas_sge32 {
+
+ u32 phys_addr;
+ u32 length;
+
+} __attribute__ ((packed));
+
+struct megasas_sge64 {
+
+ u64 phys_addr;
+ u32 length;
+
+} __attribute__ ((packed));
+
+struct megasas_sge_skinny {
+ u64 phys_addr;
+ u32 length;
+ u32 flag;
+} __packed;
+
+union megasas_sgl {
+
+ struct megasas_sge32 sge32[1];
+ struct megasas_sge64 sge64[1];
+ struct megasas_sge_skinny sge_skinny[1];
+
+} __attribute__ ((packed));
+
+struct megasas_header {
+
+ u8 cmd; /*00h */
+ u8 sense_len; /*01h */
+ u8 cmd_status; /*02h */
+ u8 scsi_status; /*03h */
+
+ u8 target_id; /*04h */
+ u8 lun; /*05h */
+ u8 cdb_len; /*06h */
+ u8 sge_count; /*07h */
+
+ u32 context; /*08h */
+ u32 pad_0; /*0Ch */
+
+ u16 flags; /*10h */
+ u16 timeout; /*12h */
+ u32 data_xferlen; /*14h */
+
+} __attribute__ ((packed));
+
+union megasas_sgl_frame {
+
+ struct megasas_sge32 sge32[8];
+ struct megasas_sge64 sge64[5];
+
+} __attribute__ ((packed));
+
+typedef union _MFI_CAPABILITIES {
+ struct {
+#if defined(__BIG_ENDIAN_BITFIELD)
+ u32 reserved:25;
+ u32 security_protocol_cmds_fw:1;
+ u32 support_core_affinity:1;
+ u32 support_ndrive_r1_lb:1;
+ u32 support_max_255lds:1;
+ u32 support_fastpath_wb:1;
+ u32 support_additional_msix:1;
+ u32 support_fp_remote_lun:1;
+#else
+ u32 support_fp_remote_lun:1;
+ u32 support_additional_msix:1;
+ u32 support_fastpath_wb:1;
+ u32 support_max_255lds:1;
+ u32 support_ndrive_r1_lb:1;
+ u32 support_core_affinity:1;
+ u32 security_protocol_cmds_fw:1;
+ u32 reserved:25;
+#endif
+ } mfi_capabilities;
+ u32 reg;
+} MFI_CAPABILITIES;
+
+struct megasas_init_frame {
+
+ u8 cmd; /*00h */
+ u8 reserved_0; /*01h */
+ u8 cmd_status; /*02h */
+
+ u8 reserved_1; /*03h */
+ MFI_CAPABILITIES driver_operations; /*04h*/
+
+ u32 context; /*08h */
+ u32 pad_0; /*0Ch */
+
+ u16 flags; /*10h */
+ u16 reserved_3; /*12h */
+ u32 data_xfer_len; /*14h */
+
+ u32 queue_info_new_phys_addr_lo; /*18h */
+ u32 queue_info_new_phys_addr_hi; /*1Ch */
+ u32 queue_info_old_phys_addr_lo; /*20h */
+ u32 queue_info_old_phys_addr_hi; /*24h */
+
+ u32 reserved_4[6]; /*28h */
+
+} __attribute__ ((packed));
+
+struct megasas_init_queue_info {
+
+ u32 init_flags; /*00h */
+ u32 reply_queue_entries; /*04h */
+
+ u32 reply_queue_start_phys_addr_lo; /*08h */
+ u32 reply_queue_start_phys_addr_hi; /*0Ch */
+ u32 producer_index_phys_addr_lo; /*10h */
+ u32 producer_index_phys_addr_hi; /*14h */
+ u32 consumer_index_phys_addr_lo; /*18h */
+ u32 consumer_index_phys_addr_hi; /*1Ch */
+
+} __attribute__ ((packed));
+
+struct megasas_io_frame {
+
+ u8 cmd; /*00h */
+ u8 sense_len; /*01h */
+ u8 cmd_status; /*02h */
+ u8 scsi_status; /*03h */
+
+ u8 target_id; /*04h */
+ u8 access_byte; /*05h */
+ u8 reserved_0; /*06h */
+ u8 sge_count; /*07h */
+
+ u32 context; /*08h */
+ u32 pad_0; /*0Ch */
+
+ u16 flags; /*10h */
+ u16 timeout; /*12h */
+ u32 lba_count; /*14h */
+
+ u32 sense_buf_phys_addr_lo; /*18h */
+ u32 sense_buf_phys_addr_hi; /*1Ch */
+
+ u32 start_lba_lo; /*20h */
+ u32 start_lba_hi; /*24h */
+
+ union megasas_sgl sgl; /*28h */
+
+} __attribute__ ((packed));
+
+struct megasas_pthru_frame {
+
+ u8 cmd; /*00h */
+ u8 sense_len; /*01h */
+ u8 cmd_status; /*02h */
+ u8 scsi_status; /*03h */
+
+ u8 target_id; /*04h */
+ u8 lun; /*05h */
+ u8 cdb_len; /*06h */
+ u8 sge_count; /*07h */
+
+ u32 context; /*08h */
+ u32 pad_0; /*0Ch */
+
+ u16 flags; /*10h */
+ u16 timeout; /*12h */
+ u32 data_xfer_len; /*14h */
+
+ u32 sense_buf_phys_addr_lo; /*18h */
+ u32 sense_buf_phys_addr_hi; /*1Ch */
+
+ u8 cdb[16]; /*20h */
+ union megasas_sgl sgl; /*30h */
+
+} __attribute__ ((packed));
+
+struct megasas_dcmd_frame {
+
+ u8 cmd; /*00h */
+ u8 reserved_0; /*01h */
+ u8 cmd_status; /*02h */
+ u8 reserved_1[4]; /*03h */
+ u8 sge_count; /*07h */
+
+ u32 context; /*08h */
+ u32 pad_0; /*0Ch */
+
+ u16 flags; /*10h */
+ u16 timeout; /*12h */
+
+ u32 data_xfer_len; /*14h */
+ u32 opcode; /*18h */
+
+ union { /*1Ch */
+ u8 b[12];
+ u16 s[6];
+ u32 w[3];
+ } mbox;
+
+ union megasas_sgl sgl; /*28h */
+
+} __attribute__ ((packed));
+
+struct megasas_abort_frame {
+
+ u8 cmd; /*00h */
+ u8 reserved_0; /*01h */
+ u8 cmd_status; /*02h */
+
+ u8 reserved_1; /*03h */
+ u32 reserved_2; /*04h */
+
+ u32 context; /*08h */
+ u32 pad_0; /*0Ch */
+
+ u16 flags; /*10h */
+ u16 reserved_3; /*12h */
+ u32 reserved_4; /*14h */
+
+ u32 abort_context; /*18h */
+ u32 pad_1; /*1Ch */
+
+ u32 abort_mfi_phys_addr_lo; /*20h */
+ u32 abort_mfi_phys_addr_hi; /*24h */
+
+ u32 reserved_5[6]; /*28h */
+
+} __attribute__ ((packed));
+
+struct megasas_smp_frame {
+
+ u8 cmd; /*00h */
+ u8 reserved_1; /*01h */
+ u8 cmd_status; /*02h */
+ u8 connection_status; /*03h */
+
+ u8 reserved_2[3]; /*04h */
+ u8 sge_count; /*07h */
+
+ u32 context; /*08h */
+ u32 pad_0; /*0Ch */
+
+ u16 flags; /*10h */
+ u16 timeout; /*12h */
+
+ u32 data_xfer_len; /*14h */
+ u64 sas_addr; /*18h */
+
+ union {
+ struct megasas_sge32 sge32[2]; /* [0]: resp [1]: req */
+ struct megasas_sge64 sge64[2]; /* [0]: resp [1]: req */
+ } sgl;
+
+} __attribute__ ((packed));
+
+struct megasas_stp_frame {
+
+ u8 cmd; /*00h */
+ u8 reserved_1; /*01h */
+ u8 cmd_status; /*02h */
+ u8 reserved_2; /*03h */
+
+ u8 target_id; /*04h */
+ u8 reserved_3[2]; /*05h */
+ u8 sge_count; /*07h */
+
+ u32 context; /*08h */
+ u32 pad_0; /*0Ch */
+
+ u16 flags; /*10h */
+ u16 timeout; /*12h */
+
+ u32 data_xfer_len; /*14h */
+
+ u16 fis[10]; /*18h */
+ u32 stp_flags;
+
+ union {
+ struct megasas_sge32 sge32[2]; /* [0]: resp [1]: data */
+ struct megasas_sge64 sge64[2]; /* [0]: resp [1]: data */
+ } sgl;
+
+} __attribute__ ((packed));
+
+union megasas_frame {
+
+ struct megasas_header hdr;
+ struct megasas_init_frame init;
+ struct megasas_io_frame io;
+ struct megasas_pthru_frame pthru;
+ struct megasas_dcmd_frame dcmd;
+ struct megasas_abort_frame abort;
+ struct megasas_smp_frame smp;
+ struct megasas_stp_frame stp;
+
+ u8 raw_bytes[64];
+};
+
+struct megasas_cmd;
+
+union megasas_evt_class_locale {
+
+ struct {
+#ifndef __BIG_ENDIAN_BITFIELD
+ u16 locale;
+ u8 reserved;
+ s8 class;
+#else
+ s8 class;
+ u8 reserved;
+ u16 locale;
+#endif
+ } __attribute__ ((packed)) members;
+
+ u32 word;
+
+} __attribute__ ((packed));
+
+struct megasas_evt_log_info {
+ u32 newest_seq_num;
+ u32 oldest_seq_num;
+ u32 clear_seq_num;
+ u32 shutdown_seq_num;
+ u32 boot_seq_num;
+
+} __attribute__ ((packed));
+
+struct megasas_progress {
+
+ u16 progress;
+ u16 elapsed_seconds;
+
+} __attribute__ ((packed));
+
+struct megasas_evtarg_ld {
+
+ u16 target_id;
+ u8 ld_index;
+ u8 reserved;
+
+} __attribute__ ((packed));
+
+struct megasas_evtarg_pd {
+ u16 device_id;
+ u8 encl_index;
+ u8 slot_number;
+
+} __attribute__ ((packed));
+
+struct megasas_evt_detail {
+
+ u32 seq_num;
+ u32 time_stamp;
+ u32 code;
+ union megasas_evt_class_locale cl;
+ u8 arg_type;
+ u8 reserved1[15];
+
+ union {
+ struct {
+ struct megasas_evtarg_pd pd;
+ u8 cdb_length;
+ u8 sense_length;
+ u8 reserved[2];
+ u8 cdb[16];
+ u8 sense[64];
+ } __attribute__ ((packed)) cdbSense;
+
+ struct megasas_evtarg_ld ld;
+
+ struct {
+ struct megasas_evtarg_ld ld;
+ u64 count;
+ } __attribute__ ((packed)) ld_count;
+
+ struct {
+ u64 lba;
+ struct megasas_evtarg_ld ld;
+ } __attribute__ ((packed)) ld_lba;
+
+ struct {
+ struct megasas_evtarg_ld ld;
+ u32 prevOwner;
+ u32 newOwner;
+ } __attribute__ ((packed)) ld_owner;
+
+ struct {
+ u64 ld_lba;
+ u64 pd_lba;
+ struct megasas_evtarg_ld ld;
+ struct megasas_evtarg_pd pd;
+ } __attribute__ ((packed)) ld_lba_pd_lba;
+
+ struct {
+ struct megasas_evtarg_ld ld;
+ struct megasas_progress prog;
+ } __attribute__ ((packed)) ld_prog;
+
+ struct {
+ struct megasas_evtarg_ld ld;
+ u32 prev_state;
+ u32 new_state;
+ } __attribute__ ((packed)) ld_state;
+
+ struct {
+ u64 strip;
+ struct megasas_evtarg_ld ld;
+ } __attribute__ ((packed)) ld_strip;
+
+ struct megasas_evtarg_pd pd;
+
+ struct {
+ struct megasas_evtarg_pd pd;
+ u32 err;
+ } __attribute__ ((packed)) pd_err;
+
+ struct {
+ u64 lba;
+ struct megasas_evtarg_pd pd;
+ } __attribute__ ((packed)) pd_lba;
+
+ struct {
+ u64 lba;
+ struct megasas_evtarg_pd pd;
+ struct megasas_evtarg_ld ld;
+ } __attribute__ ((packed)) pd_lba_ld;
+
+ struct {
+ struct megasas_evtarg_pd pd;
+ struct megasas_progress prog;
+ } __attribute__ ((packed)) pd_prog;
+
+ struct {
+ struct megasas_evtarg_pd pd;
+ u32 prevState;
+ u32 newState;
+ } __attribute__ ((packed)) pd_state;
+
+ struct {
+ u16 vendorId;
+ u16 deviceId;
+ u16 subVendorId;
+ u16 subDeviceId;
+ } __attribute__ ((packed)) pci;
+
+ u32 rate;
+ char str[96];
+
+ struct {
+ u32 rtc;
+ u32 elapsedSeconds;
+ } __attribute__ ((packed)) time;
+
+ struct {
+ u32 ecar;
+ u32 elog;
+ char str[64];
+ } __attribute__ ((packed)) ecc;
+
+ u8 b[96];
+ u16 s[48];
+ u32 w[24];
+ u64 d[12];
+ } args;
+
+ char description[128];
+
+} __attribute__ ((packed));
+
+struct megasas_aen_event {
+ struct delayed_work hotplug_work;
+ struct megasas_instance *instance;
+};
+
+struct megasas_irq_context {
+ struct megasas_instance *instance;
+ u32 MSIxIndex;
+};
+
+struct megasas_instance {
+
+ u32 *producer;
+ dma_addr_t producer_h;
+ u32 *consumer;
+ dma_addr_t consumer_h;
+ struct MR_LD_VF_AFFILIATION *vf_affiliation;
+ dma_addr_t vf_affiliation_h;
+ struct MR_LD_VF_AFFILIATION_111 *vf_affiliation_111;
+ dma_addr_t vf_affiliation_111_h;
+ struct MR_CTRL_HB_HOST_MEM *hb_host_mem;
+ dma_addr_t hb_host_mem_h;
+
+ u32 *reply_queue;
+ dma_addr_t reply_queue_h;
+
+ u32 *crash_dump_buf;
+ dma_addr_t crash_dump_h;
+ void *crash_buf[MAX_CRASH_DUMP_SIZE];
+ u32 crash_buf_pages;
+ unsigned int fw_crash_buffer_size;
+ unsigned int fw_crash_state;
+ unsigned int fw_crash_buffer_offset;
+ u32 drv_buf_index;
+ u32 drv_buf_alloc;
+ u32 crash_dump_fw_support;
+ u32 crash_dump_drv_support;
+ u32 crash_dump_app_support;
+ u32 secure_jbod_support;
+ spinlock_t crashdump_lock;
+
+ struct megasas_register_set __iomem *reg_set;
+ u32 *reply_post_host_index_addr[MR_MAX_MSIX_REG_ARRAY];
+ struct megasas_pd_list pd_list[MEGASAS_MAX_PD];
+ struct megasas_pd_list local_pd_list[MEGASAS_MAX_PD];
+ u8 ld_ids[MEGASAS_MAX_LD_IDS];
+ s8 init_id;
+
+ u16 max_num_sge;
+ u16 max_fw_cmds;
+ u16 max_mfi_cmds;
+ u16 max_scsi_cmds;
+ u32 max_sectors_per_req;
+ struct megasas_aen_event *ev;
+
+ struct megasas_cmd **cmd_list;
+ struct list_head cmd_pool;
+ /* used to sync fire the cmd to fw */
+ spinlock_t mfi_pool_lock;
+ /* used to sync fire the cmd to fw */
+ spinlock_t hba_lock;
+ /* used to synch producer, consumer ptrs in dpc */
+ spinlock_t completion_lock;
+ struct dma_pool *frame_dma_pool;
+ struct dma_pool *sense_dma_pool;
+
+ struct megasas_evt_detail *evt_detail;
+ dma_addr_t evt_detail_h;
+ struct megasas_cmd *aen_cmd;
+ struct mutex aen_mutex;
+ struct semaphore ioctl_sem;
+
+ struct Scsi_Host *host;
+
+ wait_queue_head_t int_cmd_wait_q;
+ wait_queue_head_t abort_cmd_wait_q;
+
+ struct pci_dev *pdev;
+ u32 unique_id;
+ u32 fw_support_ieee;
+
+ atomic_t fw_outstanding;
+ atomic_t fw_reset_no_pci_access;
+
+ struct megasas_instance_template *instancet;
+ struct tasklet_struct isr_tasklet;
+ struct work_struct work_init;
+ struct work_struct crash_init;
+
+ u8 flag;
+ u8 unload;
+ u8 flag_ieee;
+ u8 issuepend_done;
+ u8 disableOnlineCtrlReset;
+ u8 UnevenSpanSupport;
+
+ u8 supportmax256vd;
+ u16 fw_supported_vd_count;
+ u16 fw_supported_pd_count;
+
+ u16 drv_supported_vd_count;
+ u16 drv_supported_pd_count;
+
+ u8 adprecovery;
+ unsigned long last_time;
+ u32 mfiStatus;
+ u32 last_seq_num;
+
+ struct list_head internal_reset_pending_q;
+
+ /* Ptr to hba specific information */
+ void *ctrl_context;
+ u32 ctrl_context_pages;
+ struct megasas_ctrl_info *ctrl_info;
+ unsigned int msix_vectors;
+ struct msix_entry msixentry[MEGASAS_MAX_MSIX_QUEUES];
+ struct megasas_irq_context irq_context[MEGASAS_MAX_MSIX_QUEUES];
+ u64 map_id;
+ struct megasas_cmd *map_update_cmd;
+ unsigned long bar;
+ long reset_flags;
+ struct mutex reset_mutex;
+ struct timer_list sriov_heartbeat_timer;
+ char skip_heartbeat_timer_del;
+ u8 requestorId;
+ char PlasmaFW111;
+ char mpio;
+ u16 throttlequeuedepth;
+ u8 mask_interrupts;
+ u8 is_imr;
+};
+struct MR_LD_VF_MAP {
+ u32 size;
+ union MR_LD_REF ref;
+ u8 ldVfCount;
+ u8 reserved[6];
+ u8 policy[1];
+};
+
+struct MR_LD_VF_AFFILIATION {
+ u32 size;
+ u8 ldCount;
+ u8 vfCount;
+ u8 thisVf;
+ u8 reserved[9];
+ struct MR_LD_VF_MAP map[1];
+};
+
+/* Plasma 1.11 FW backward compatibility structures */
+#define IOV_111_OFFSET 0x7CE
+#define MAX_VIRTUAL_FUNCTIONS 8
+#define MR_LD_ACCESS_HIDDEN 15
+
+struct IOV_111 {
+ u8 maxVFsSupported;
+ u8 numVFsEnabled;
+ u8 requestorId;
+ u8 reserved[5];
+};
+
+struct MR_LD_VF_MAP_111 {
+ u8 targetId;
+ u8 reserved[3];
+ u8 policy[MAX_VIRTUAL_FUNCTIONS];
+};
+
+struct MR_LD_VF_AFFILIATION_111 {
+ u8 vdCount;
+ u8 vfCount;
+ u8 thisVf;
+ u8 reserved[5];
+ struct MR_LD_VF_MAP_111 map[MAX_LOGICAL_DRIVES];
+};
+
+struct MR_CTRL_HB_HOST_MEM {
+ struct {
+ u32 fwCounter; /* Firmware heart beat counter */
+ struct {
+ u32 debugmode:1; /* 1=Firmware is in debug mode.
+ Heart beat will not be updated. */
+ u32 reserved:31;
+ } debug;
+ u32 reserved_fw[6];
+ u32 driverCounter; /* Driver heart beat counter. 0x20 */
+ u32 reserved_driver[7];
+ } HB;
+ u8 pad[0x400-0x40];
+};
+
+enum {
+ MEGASAS_HBA_OPERATIONAL = 0,
+ MEGASAS_ADPRESET_SM_INFAULT = 1,
+ MEGASAS_ADPRESET_SM_FW_RESET_SUCCESS = 2,
+ MEGASAS_ADPRESET_SM_OPERATIONAL = 3,
+ MEGASAS_HW_CRITICAL_ERROR = 4,
+ MEGASAS_ADPRESET_SM_POLLING = 5,
+ MEGASAS_ADPRESET_INPROG_SIGN = 0xDEADDEAD,
+};
+
+struct megasas_instance_template {
+ void (*fire_cmd)(struct megasas_instance *, dma_addr_t, \
+ u32, struct megasas_register_set __iomem *);
+
+ void (*enable_intr)(struct megasas_instance *);
+ void (*disable_intr)(struct megasas_instance *);
+
+ int (*clear_intr)(struct megasas_register_set __iomem *);
+
+ u32 (*read_fw_status_reg)(struct megasas_register_set __iomem *);
+ int (*adp_reset)(struct megasas_instance *, \
+ struct megasas_register_set __iomem *);
+ int (*check_reset)(struct megasas_instance *, \
+ struct megasas_register_set __iomem *);
+ irqreturn_t (*service_isr)(int irq, void *devp);
+ void (*tasklet)(unsigned long);
+ u32 (*init_adapter)(struct megasas_instance *);
+ u32 (*build_and_issue_cmd) (struct megasas_instance *,
+ struct scsi_cmnd *);
+ void (*issue_dcmd) (struct megasas_instance *instance,
+ struct megasas_cmd *cmd);
+};
+
+#define MEGASAS_IS_LOGICAL(scp) \
+ (scp->device->channel < MEGASAS_MAX_PD_CHANNELS) ? 0 : 1
+
+#define MEGASAS_DEV_INDEX(inst, scp) \
+ ((scp->device->channel % 2) * MEGASAS_MAX_DEV_PER_CHANNEL) + \
+ scp->device->id
+
+struct megasas_cmd {
+
+ union megasas_frame *frame;
+ dma_addr_t frame_phys_addr;
+ u8 *sense;
+ dma_addr_t sense_phys_addr;
+
+ u32 index;
+ u8 sync_cmd;
+ u8 cmd_status;
+ u8 abort_aen;
+ u8 retry_for_fw_reset;
+
+
+ struct list_head list;
+ struct scsi_cmnd *scmd;
+
+ void *mpt_pthr_cmd_blocked;
+ atomic_t mfi_mpt_pthr;
+ u8 is_wait_event;
+
+ struct megasas_instance *instance;
+ union {
+ struct {
+ u16 smid;
+ u16 resvd;
+ } context;
+ u32 frame_count;
+ };
+};
+
+#define MAX_MGMT_ADAPTERS 1024
+#define MAX_IOCTL_SGE 16
+
+struct megasas_iocpacket {
+
+ u16 host_no;
+ u16 __pad1;
+ u32 sgl_off;
+ u32 sge_count;
+ u32 sense_off;
+ u32 sense_len;
+ union {
+ u8 raw[128];
+ struct megasas_header hdr;
+ } frame;
+
+ struct iovec sgl[MAX_IOCTL_SGE];
+
+} __attribute__ ((packed));
+
+struct megasas_aen {
+ u16 host_no;
+ u16 __pad1;
+ u32 seq_num;
+ u32 class_locale_word;
+} __attribute__ ((packed));
+
+#ifdef CONFIG_COMPAT
+struct compat_megasas_iocpacket {
+ u16 host_no;
+ u16 __pad1;
+ u32 sgl_off;
+ u32 sge_count;
+ u32 sense_off;
+ u32 sense_len;
+ union {
+ u8 raw[128];
+ struct megasas_header hdr;
+ } frame;
+ struct compat_iovec sgl[MAX_IOCTL_SGE];
+} __attribute__ ((packed));
+
+#define MEGASAS_IOC_FIRMWARE32 _IOWR('M', 1, struct compat_megasas_iocpacket)
+#endif
+
+#define MEGASAS_IOC_FIRMWARE _IOWR('M', 1, struct megasas_iocpacket)
+#define MEGASAS_IOC_GET_AEN _IOW('M', 3, struct megasas_aen)
+
+struct megasas_mgmt_info {
+
+ u16 count;
+ struct megasas_instance *instance[MAX_MGMT_ADAPTERS];
+ int max_index;
+};
+
+u8
+MR_BuildRaidContext(struct megasas_instance *instance,
+ struct IO_REQUEST_INFO *io_info,
+ struct RAID_CONTEXT *pRAID_Context,
+ struct MR_DRV_RAID_MAP_ALL *map, u8 **raidLUN);
+u8 MR_TargetIdToLdGet(u32 ldTgtId, struct MR_DRV_RAID_MAP_ALL *map);
+struct MR_LD_RAID *MR_LdRaidGet(u32 ld, struct MR_DRV_RAID_MAP_ALL *map);
+u16 MR_ArPdGet(u32 ar, u32 arm, struct MR_DRV_RAID_MAP_ALL *map);
+u16 MR_LdSpanArrayGet(u32 ld, u32 span, struct MR_DRV_RAID_MAP_ALL *map);
+u16 MR_PdDevHandleGet(u32 pd, struct MR_DRV_RAID_MAP_ALL *map);
+u16 MR_GetLDTgtId(u32 ld, struct MR_DRV_RAID_MAP_ALL *map);
+
+u16 get_updated_dev_handle(struct megasas_instance *instance,
+ struct LD_LOAD_BALANCE_INFO *lbInfo, struct IO_REQUEST_INFO *in_info);
+void mr_update_load_balance_params(struct MR_DRV_RAID_MAP_ALL *map,
+ struct LD_LOAD_BALANCE_INFO *lbInfo);
+int megasas_get_ctrl_info(struct megasas_instance *instance);
+int megasas_set_crash_dump_params(struct megasas_instance *instance,
+ u8 crash_buf_state);
+void megasas_free_host_crash_buffer(struct megasas_instance *instance);
+void megasas_fusion_crash_dump_wq(struct work_struct *work);
+
+void megasas_return_cmd_fusion(struct megasas_instance *instance,
+ struct megasas_cmd_fusion *cmd);
+int megasas_issue_blocked_cmd(struct megasas_instance *instance,
+ struct megasas_cmd *cmd, int timeout);
+void __megasas_return_cmd(struct megasas_instance *instance,
+ struct megasas_cmd *cmd);
+
+void megasas_return_mfi_mpt_pthr(struct megasas_instance *instance,
+ struct megasas_cmd *cmd_mfi, struct megasas_cmd_fusion *cmd_fusion);
+int megasas_cmd_type(struct scsi_cmnd *cmd);
+
+#endif /*LSI_MEGARAID_SAS_H */
diff --git a/drivers/scsi/megaraid/megaraid_sas_base.c b/drivers/scsi/megaraid/megaraid_sas_base.c
new file mode 100644
index 000000000..890637fdd
--- /dev/null
+++ b/drivers/scsi/megaraid/megaraid_sas_base.c
@@ -0,0 +1,6892 @@
+/*
+ * Linux MegaRAID driver for SAS based RAID controllers
+ *
+ * Copyright (c) 2003-2013 LSI Corporation
+ * Copyright (c) 2013-2014 Avago Technologies
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ *
+ * Authors: Avago Technologies
+ * Sreenivas Bagalkote
+ * Sumant Patro
+ * Bo Yang
+ * Adam Radford
+ * Kashyap Desai <kashyap.desai@avagotech.com>
+ * Sumit Saxena <sumit.saxena@avagotech.com>
+ *
+ * Send feedback to: megaraidlinux.pdl@avagotech.com
+ *
+ * Mail to: Avago Technologies, 350 West Trimble Road, Building 90,
+ * San Jose, California 95131
+ */
+
+#include <linux/kernel.h>
+#include <linux/types.h>
+#include <linux/pci.h>
+#include <linux/list.h>
+#include <linux/moduleparam.h>
+#include <linux/module.h>
+#include <linux/spinlock.h>
+#include <linux/interrupt.h>
+#include <linux/delay.h>
+#include <linux/uio.h>
+#include <linux/slab.h>
+#include <asm/uaccess.h>
+#include <linux/fs.h>
+#include <linux/compat.h>
+#include <linux/blkdev.h>
+#include <linux/mutex.h>
+#include <linux/poll.h>
+
+#include <scsi/scsi.h>
+#include <scsi/scsi_cmnd.h>
+#include <scsi/scsi_device.h>
+#include <scsi/scsi_host.h>
+#include <scsi/scsi_tcq.h>
+#include "megaraid_sas_fusion.h"
+#include "megaraid_sas.h"
+
+/*
+ * Number of sectors per IO command
+ * Will be set in megasas_init_mfi if user does not provide
+ */
+static unsigned int max_sectors;
+module_param_named(max_sectors, max_sectors, int, 0);
+MODULE_PARM_DESC(max_sectors,
+ "Maximum number of sectors per IO command");
+
+static int msix_disable;
+module_param(msix_disable, int, S_IRUGO);
+MODULE_PARM_DESC(msix_disable, "Disable MSI-X interrupt handling. Default: 0");
+
+static unsigned int msix_vectors;
+module_param(msix_vectors, int, S_IRUGO);
+MODULE_PARM_DESC(msix_vectors, "MSI-X max vector count. Default: Set by FW");
+
+static int allow_vf_ioctls;
+module_param(allow_vf_ioctls, int, S_IRUGO);
+MODULE_PARM_DESC(allow_vf_ioctls, "Allow ioctls in SR-IOV VF mode. Default: 0");
+
+static unsigned int throttlequeuedepth = MEGASAS_THROTTLE_QUEUE_DEPTH;
+module_param(throttlequeuedepth, int, S_IRUGO);
+MODULE_PARM_DESC(throttlequeuedepth,
+ "Adapter queue depth when throttled due to I/O timeout. Default: 16");
+
+int resetwaittime = MEGASAS_RESET_WAIT_TIME;
+module_param(resetwaittime, int, S_IRUGO);
+MODULE_PARM_DESC(resetwaittime, "Wait time in seconds after I/O timeout "
+ "before resetting adapter. Default: 180");
+
+int smp_affinity_enable = 1;
+module_param(smp_affinity_enable, int, S_IRUGO);
+MODULE_PARM_DESC(smp_affinity_enable, "SMP affinity feature enable/disbale Default: enable(1)");
+
+MODULE_LICENSE("GPL");
+MODULE_VERSION(MEGASAS_VERSION);
+MODULE_AUTHOR("megaraidlinux@lsi.com");
+MODULE_DESCRIPTION("LSI MegaRAID SAS Driver");
+
+int megasas_transition_to_ready(struct megasas_instance *instance, int ocr);
+static int megasas_get_pd_list(struct megasas_instance *instance);
+static int megasas_ld_list_query(struct megasas_instance *instance,
+ u8 query_type);
+static int megasas_issue_init_mfi(struct megasas_instance *instance);
+static int megasas_register_aen(struct megasas_instance *instance,
+ u32 seq_num, u32 class_locale_word);
+/*
+ * PCI ID table for all supported controllers
+ */
+static struct pci_device_id megasas_pci_table[] = {
+
+ {PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_SAS1064R)},
+ /* xscale IOP */
+ {PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_SAS1078R)},
+ /* ppc IOP */
+ {PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_SAS1078DE)},
+ /* ppc IOP */
+ {PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_SAS1078GEN2)},
+ /* gen2*/
+ {PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_SAS0079GEN2)},
+ /* gen2*/
+ {PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_SAS0073SKINNY)},
+ /* skinny*/
+ {PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_SAS0071SKINNY)},
+ /* skinny*/
+ {PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_VERDE_ZCR)},
+ /* xscale IOP, vega */
+ {PCI_DEVICE(PCI_VENDOR_ID_DELL, PCI_DEVICE_ID_DELL_PERC5)},
+ /* xscale IOP */
+ {PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_FUSION)},
+ /* Fusion */
+ {PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_PLASMA)},
+ /* Plasma */
+ {PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_INVADER)},
+ /* Invader */
+ {PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_FURY)},
+ /* Fury */
+ {}
+};
+
+MODULE_DEVICE_TABLE(pci, megasas_pci_table);
+
+static int megasas_mgmt_majorno;
+struct megasas_mgmt_info megasas_mgmt_info;
+static struct fasync_struct *megasas_async_queue;
+static DEFINE_MUTEX(megasas_async_queue_mutex);
+
+static int megasas_poll_wait_aen;
+static DECLARE_WAIT_QUEUE_HEAD(megasas_poll_wait);
+static u32 support_poll_for_event;
+u32 megasas_dbg_lvl;
+static u32 support_device_change;
+
+/* define lock for aen poll */
+spinlock_t poll_aen_lock;
+
+void
+megasas_complete_cmd(struct megasas_instance *instance, struct megasas_cmd *cmd,
+ u8 alt_status);
+static u32
+megasas_read_fw_status_reg_gen2(struct megasas_register_set __iomem *regs);
+static int
+megasas_adp_reset_gen2(struct megasas_instance *instance,
+ struct megasas_register_set __iomem *reg_set);
+static irqreturn_t megasas_isr(int irq, void *devp);
+static u32
+megasas_init_adapter_mfi(struct megasas_instance *instance);
+u32
+megasas_build_and_issue_cmd(struct megasas_instance *instance,
+ struct scsi_cmnd *scmd);
+static void megasas_complete_cmd_dpc(unsigned long instance_addr);
+void
+megasas_release_fusion(struct megasas_instance *instance);
+int
+megasas_ioc_init_fusion(struct megasas_instance *instance);
+void
+megasas_free_cmds_fusion(struct megasas_instance *instance);
+u8
+megasas_get_map_info(struct megasas_instance *instance);
+int
+megasas_sync_map_info(struct megasas_instance *instance);
+int
+wait_and_poll(struct megasas_instance *instance, struct megasas_cmd *cmd,
+ int seconds);
+void megasas_reset_reply_desc(struct megasas_instance *instance);
+int megasas_reset_fusion(struct Scsi_Host *shost, int iotimeout);
+void megasas_fusion_ocr_wq(struct work_struct *work);
+static int megasas_get_ld_vf_affiliation(struct megasas_instance *instance,
+ int initial);
+int megasas_check_mpio_paths(struct megasas_instance *instance,
+ struct scsi_cmnd *scmd);
+
+void
+megasas_issue_dcmd(struct megasas_instance *instance, struct megasas_cmd *cmd)
+{
+ instance->instancet->fire_cmd(instance,
+ cmd->frame_phys_addr, 0, instance->reg_set);
+}
+
+/**
+ * megasas_get_cmd - Get a command from the free pool
+ * @instance: Adapter soft state
+ *
+ * Returns a free command from the pool
+ */
+struct megasas_cmd *megasas_get_cmd(struct megasas_instance
+ *instance)
+{
+ unsigned long flags;
+ struct megasas_cmd *cmd = NULL;
+
+ spin_lock_irqsave(&instance->mfi_pool_lock, flags);
+
+ if (!list_empty(&instance->cmd_pool)) {
+ cmd = list_entry((&instance->cmd_pool)->next,
+ struct megasas_cmd, list);
+ list_del_init(&cmd->list);
+ atomic_set(&cmd->mfi_mpt_pthr, MFI_MPT_DETACHED);
+ } else {
+ printk(KERN_ERR "megasas: Command pool empty!\n");
+ }
+
+ spin_unlock_irqrestore(&instance->mfi_pool_lock, flags);
+ return cmd;
+}
+
+/**
+ * __megasas_return_cmd - Return a cmd to free command pool
+ * @instance: Adapter soft state
+ * @cmd: Command packet to be returned to free command pool
+ */
+inline void
+__megasas_return_cmd(struct megasas_instance *instance, struct megasas_cmd *cmd)
+{
+ /*
+ * Don't go ahead and free the MFI frame, if corresponding
+ * MPT frame is not freed(valid for only fusion adapters).
+ * In case of MFI adapters, anyways for any allocated MFI
+ * frame will have cmd->mfi_mpt_mpthr set to MFI_MPT_DETACHED
+ */
+ if (atomic_read(&cmd->mfi_mpt_pthr) != MFI_MPT_DETACHED)
+ return;
+
+ cmd->scmd = NULL;
+ cmd->frame_count = 0;
+ cmd->is_wait_event = 0;
+ cmd->mpt_pthr_cmd_blocked = NULL;
+
+ if ((instance->pdev->device != PCI_DEVICE_ID_LSI_FUSION) &&
+ (instance->pdev->device != PCI_DEVICE_ID_LSI_INVADER) &&
+ (instance->pdev->device != PCI_DEVICE_ID_LSI_FURY) &&
+ (reset_devices))
+ cmd->frame->hdr.cmd = MFI_CMD_INVALID;
+
+ atomic_set(&cmd->mfi_mpt_pthr, MFI_LIST_ADDED);
+ list_add(&cmd->list, (&instance->cmd_pool)->next);
+}
+
+/**
+ * megasas_return_cmd - Return a cmd to free command pool
+ * @instance: Adapter soft state
+ * @cmd: Command packet to be returned to free command pool
+ */
+inline void
+megasas_return_cmd(struct megasas_instance *instance, struct megasas_cmd *cmd)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&instance->mfi_pool_lock, flags);
+ __megasas_return_cmd(instance, cmd);
+ spin_unlock_irqrestore(&instance->mfi_pool_lock, flags);
+}
+
+
+/**
+* The following functions are defined for xscale
+* (deviceid : 1064R, PERC5) controllers
+*/
+
+/**
+ * megasas_enable_intr_xscale - Enables interrupts
+ * @regs: MFI register set
+ */
+static inline void
+megasas_enable_intr_xscale(struct megasas_instance *instance)
+{
+ struct megasas_register_set __iomem *regs;
+ regs = instance->reg_set;
+ writel(0, &(regs)->outbound_intr_mask);
+
+ /* Dummy readl to force pci flush */
+ readl(&regs->outbound_intr_mask);
+}
+
+/**
+ * megasas_disable_intr_xscale -Disables interrupt
+ * @regs: MFI register set
+ */
+static inline void
+megasas_disable_intr_xscale(struct megasas_instance *instance)
+{
+ struct megasas_register_set __iomem *regs;
+ u32 mask = 0x1f;
+ regs = instance->reg_set;
+ writel(mask, &regs->outbound_intr_mask);
+ /* Dummy readl to force pci flush */
+ readl(&regs->outbound_intr_mask);
+}
+
+/**
+ * megasas_read_fw_status_reg_xscale - returns the current FW status value
+ * @regs: MFI register set
+ */
+static u32
+megasas_read_fw_status_reg_xscale(struct megasas_register_set __iomem * regs)
+{
+ return readl(&(regs)->outbound_msg_0);
+}
+/**
+ * megasas_clear_interrupt_xscale - Check & clear interrupt
+ * @regs: MFI register set
+ */
+static int
+megasas_clear_intr_xscale(struct megasas_register_set __iomem * regs)
+{
+ u32 status;
+ u32 mfiStatus = 0;
+ /*
+ * Check if it is our interrupt
+ */
+ status = readl(&regs->outbound_intr_status);
+
+ if (status & MFI_OB_INTR_STATUS_MASK)
+ mfiStatus = MFI_INTR_FLAG_REPLY_MESSAGE;
+ if (status & MFI_XSCALE_OMR0_CHANGE_INTERRUPT)
+ mfiStatus |= MFI_INTR_FLAG_FIRMWARE_STATE_CHANGE;
+
+ /*
+ * Clear the interrupt by writing back the same value
+ */
+ if (mfiStatus)
+ writel(status, &regs->outbound_intr_status);
+
+ /* Dummy readl to force pci flush */
+ readl(&regs->outbound_intr_status);
+
+ return mfiStatus;
+}
+
+/**
+ * megasas_fire_cmd_xscale - Sends command to the FW
+ * @frame_phys_addr : Physical address of cmd
+ * @frame_count : Number of frames for the command
+ * @regs : MFI register set
+ */
+static inline void
+megasas_fire_cmd_xscale(struct megasas_instance *instance,
+ dma_addr_t frame_phys_addr,
+ u32 frame_count,
+ struct megasas_register_set __iomem *regs)
+{
+ unsigned long flags;
+ spin_lock_irqsave(&instance->hba_lock, flags);
+ writel((frame_phys_addr >> 3)|(frame_count),
+ &(regs)->inbound_queue_port);
+ spin_unlock_irqrestore(&instance->hba_lock, flags);
+}
+
+/**
+ * megasas_adp_reset_xscale - For controller reset
+ * @regs: MFI register set
+ */
+static int
+megasas_adp_reset_xscale(struct megasas_instance *instance,
+ struct megasas_register_set __iomem *regs)
+{
+ u32 i;
+ u32 pcidata;
+ writel(MFI_ADP_RESET, &regs->inbound_doorbell);
+
+ for (i = 0; i < 3; i++)
+ msleep(1000); /* sleep for 3 secs */
+ pcidata = 0;
+ pci_read_config_dword(instance->pdev, MFI_1068_PCSR_OFFSET, &pcidata);
+ printk(KERN_NOTICE "pcidata = %x\n", pcidata);
+ if (pcidata & 0x2) {
+ printk(KERN_NOTICE "mfi 1068 offset read=%x\n", pcidata);
+ pcidata &= ~0x2;
+ pci_write_config_dword(instance->pdev,
+ MFI_1068_PCSR_OFFSET, pcidata);
+
+ for (i = 0; i < 2; i++)
+ msleep(1000); /* need to wait 2 secs again */
+
+ pcidata = 0;
+ pci_read_config_dword(instance->pdev,
+ MFI_1068_FW_HANDSHAKE_OFFSET, &pcidata);
+ printk(KERN_NOTICE "1068 offset handshake read=%x\n", pcidata);
+ if ((pcidata & 0xffff0000) == MFI_1068_FW_READY) {
+ printk(KERN_NOTICE "1068 offset pcidt=%x\n", pcidata);
+ pcidata = 0;
+ pci_write_config_dword(instance->pdev,
+ MFI_1068_FW_HANDSHAKE_OFFSET, pcidata);
+ }
+ }
+ return 0;
+}
+
+/**
+ * megasas_check_reset_xscale - For controller reset check
+ * @regs: MFI register set
+ */
+static int
+megasas_check_reset_xscale(struct megasas_instance *instance,
+ struct megasas_register_set __iomem *regs)
+{
+
+ if ((instance->adprecovery != MEGASAS_HBA_OPERATIONAL) &&
+ (le32_to_cpu(*instance->consumer) ==
+ MEGASAS_ADPRESET_INPROG_SIGN))
+ return 1;
+ return 0;
+}
+
+static struct megasas_instance_template megasas_instance_template_xscale = {
+
+ .fire_cmd = megasas_fire_cmd_xscale,
+ .enable_intr = megasas_enable_intr_xscale,
+ .disable_intr = megasas_disable_intr_xscale,
+ .clear_intr = megasas_clear_intr_xscale,
+ .read_fw_status_reg = megasas_read_fw_status_reg_xscale,
+ .adp_reset = megasas_adp_reset_xscale,
+ .check_reset = megasas_check_reset_xscale,
+ .service_isr = megasas_isr,
+ .tasklet = megasas_complete_cmd_dpc,
+ .init_adapter = megasas_init_adapter_mfi,
+ .build_and_issue_cmd = megasas_build_and_issue_cmd,
+ .issue_dcmd = megasas_issue_dcmd,
+};
+
+/**
+* This is the end of set of functions & definitions specific
+* to xscale (deviceid : 1064R, PERC5) controllers
+*/
+
+/**
+* The following functions are defined for ppc (deviceid : 0x60)
+* controllers
+*/
+
+/**
+ * megasas_enable_intr_ppc - Enables interrupts
+ * @regs: MFI register set
+ */
+static inline void
+megasas_enable_intr_ppc(struct megasas_instance *instance)
+{
+ struct megasas_register_set __iomem *regs;
+ regs = instance->reg_set;
+ writel(0xFFFFFFFF, &(regs)->outbound_doorbell_clear);
+
+ writel(~0x80000000, &(regs)->outbound_intr_mask);
+
+ /* Dummy readl to force pci flush */
+ readl(&regs->outbound_intr_mask);
+}
+
+/**
+ * megasas_disable_intr_ppc - Disable interrupt
+ * @regs: MFI register set
+ */
+static inline void
+megasas_disable_intr_ppc(struct megasas_instance *instance)
+{
+ struct megasas_register_set __iomem *regs;
+ u32 mask = 0xFFFFFFFF;
+ regs = instance->reg_set;
+ writel(mask, &regs->outbound_intr_mask);
+ /* Dummy readl to force pci flush */
+ readl(&regs->outbound_intr_mask);
+}
+
+/**
+ * megasas_read_fw_status_reg_ppc - returns the current FW status value
+ * @regs: MFI register set
+ */
+static u32
+megasas_read_fw_status_reg_ppc(struct megasas_register_set __iomem * regs)
+{
+ return readl(&(regs)->outbound_scratch_pad);
+}
+
+/**
+ * megasas_clear_interrupt_ppc - Check & clear interrupt
+ * @regs: MFI register set
+ */
+static int
+megasas_clear_intr_ppc(struct megasas_register_set __iomem * regs)
+{
+ u32 status, mfiStatus = 0;
+
+ /*
+ * Check if it is our interrupt
+ */
+ status = readl(&regs->outbound_intr_status);
+
+ if (status & MFI_REPLY_1078_MESSAGE_INTERRUPT)
+ mfiStatus = MFI_INTR_FLAG_REPLY_MESSAGE;
+
+ if (status & MFI_G2_OUTBOUND_DOORBELL_CHANGE_INTERRUPT)
+ mfiStatus |= MFI_INTR_FLAG_FIRMWARE_STATE_CHANGE;
+
+ /*
+ * Clear the interrupt by writing back the same value
+ */
+ writel(status, &regs->outbound_doorbell_clear);
+
+ /* Dummy readl to force pci flush */
+ readl(&regs->outbound_doorbell_clear);
+
+ return mfiStatus;
+}
+
+/**
+ * megasas_fire_cmd_ppc - Sends command to the FW
+ * @frame_phys_addr : Physical address of cmd
+ * @frame_count : Number of frames for the command
+ * @regs : MFI register set
+ */
+static inline void
+megasas_fire_cmd_ppc(struct megasas_instance *instance,
+ dma_addr_t frame_phys_addr,
+ u32 frame_count,
+ struct megasas_register_set __iomem *regs)
+{
+ unsigned long flags;
+ spin_lock_irqsave(&instance->hba_lock, flags);
+ writel((frame_phys_addr | (frame_count<<1))|1,
+ &(regs)->inbound_queue_port);
+ spin_unlock_irqrestore(&instance->hba_lock, flags);
+}
+
+/**
+ * megasas_check_reset_ppc - For controller reset check
+ * @regs: MFI register set
+ */
+static int
+megasas_check_reset_ppc(struct megasas_instance *instance,
+ struct megasas_register_set __iomem *regs)
+{
+ if (instance->adprecovery != MEGASAS_HBA_OPERATIONAL)
+ return 1;
+
+ return 0;
+}
+
+static struct megasas_instance_template megasas_instance_template_ppc = {
+
+ .fire_cmd = megasas_fire_cmd_ppc,
+ .enable_intr = megasas_enable_intr_ppc,
+ .disable_intr = megasas_disable_intr_ppc,
+ .clear_intr = megasas_clear_intr_ppc,
+ .read_fw_status_reg = megasas_read_fw_status_reg_ppc,
+ .adp_reset = megasas_adp_reset_xscale,
+ .check_reset = megasas_check_reset_ppc,
+ .service_isr = megasas_isr,
+ .tasklet = megasas_complete_cmd_dpc,
+ .init_adapter = megasas_init_adapter_mfi,
+ .build_and_issue_cmd = megasas_build_and_issue_cmd,
+ .issue_dcmd = megasas_issue_dcmd,
+};
+
+/**
+ * megasas_enable_intr_skinny - Enables interrupts
+ * @regs: MFI register set
+ */
+static inline void
+megasas_enable_intr_skinny(struct megasas_instance *instance)
+{
+ struct megasas_register_set __iomem *regs;
+ regs = instance->reg_set;
+ writel(0xFFFFFFFF, &(regs)->outbound_intr_mask);
+
+ writel(~MFI_SKINNY_ENABLE_INTERRUPT_MASK, &(regs)->outbound_intr_mask);
+
+ /* Dummy readl to force pci flush */
+ readl(&regs->outbound_intr_mask);
+}
+
+/**
+ * megasas_disable_intr_skinny - Disables interrupt
+ * @regs: MFI register set
+ */
+static inline void
+megasas_disable_intr_skinny(struct megasas_instance *instance)
+{
+ struct megasas_register_set __iomem *regs;
+ u32 mask = 0xFFFFFFFF;
+ regs = instance->reg_set;
+ writel(mask, &regs->outbound_intr_mask);
+ /* Dummy readl to force pci flush */
+ readl(&regs->outbound_intr_mask);
+}
+
+/**
+ * megasas_read_fw_status_reg_skinny - returns the current FW status value
+ * @regs: MFI register set
+ */
+static u32
+megasas_read_fw_status_reg_skinny(struct megasas_register_set __iomem *regs)
+{
+ return readl(&(regs)->outbound_scratch_pad);
+}
+
+/**
+ * megasas_clear_interrupt_skinny - Check & clear interrupt
+ * @regs: MFI register set
+ */
+static int
+megasas_clear_intr_skinny(struct megasas_register_set __iomem *regs)
+{
+ u32 status;
+ u32 mfiStatus = 0;
+
+ /*
+ * Check if it is our interrupt
+ */
+ status = readl(&regs->outbound_intr_status);
+
+ if (!(status & MFI_SKINNY_ENABLE_INTERRUPT_MASK)) {
+ return 0;
+ }
+
+ /*
+ * Check if it is our interrupt
+ */
+ if ((megasas_read_fw_status_reg_skinny(regs) & MFI_STATE_MASK) ==
+ MFI_STATE_FAULT) {
+ mfiStatus = MFI_INTR_FLAG_FIRMWARE_STATE_CHANGE;
+ } else
+ mfiStatus = MFI_INTR_FLAG_REPLY_MESSAGE;
+
+ /*
+ * Clear the interrupt by writing back the same value
+ */
+ writel(status, &regs->outbound_intr_status);
+
+ /*
+ * dummy read to flush PCI
+ */
+ readl(&regs->outbound_intr_status);
+
+ return mfiStatus;
+}
+
+/**
+ * megasas_fire_cmd_skinny - Sends command to the FW
+ * @frame_phys_addr : Physical address of cmd
+ * @frame_count : Number of frames for the command
+ * @regs : MFI register set
+ */
+static inline void
+megasas_fire_cmd_skinny(struct megasas_instance *instance,
+ dma_addr_t frame_phys_addr,
+ u32 frame_count,
+ struct megasas_register_set __iomem *regs)
+{
+ unsigned long flags;
+ spin_lock_irqsave(&instance->hba_lock, flags);
+ writel(upper_32_bits(frame_phys_addr),
+ &(regs)->inbound_high_queue_port);
+ writel((lower_32_bits(frame_phys_addr) | (frame_count<<1))|1,
+ &(regs)->inbound_low_queue_port);
+ spin_unlock_irqrestore(&instance->hba_lock, flags);
+}
+
+/**
+ * megasas_check_reset_skinny - For controller reset check
+ * @regs: MFI register set
+ */
+static int
+megasas_check_reset_skinny(struct megasas_instance *instance,
+ struct megasas_register_set __iomem *regs)
+{
+ if (instance->adprecovery != MEGASAS_HBA_OPERATIONAL)
+ return 1;
+
+ return 0;
+}
+
+static struct megasas_instance_template megasas_instance_template_skinny = {
+
+ .fire_cmd = megasas_fire_cmd_skinny,
+ .enable_intr = megasas_enable_intr_skinny,
+ .disable_intr = megasas_disable_intr_skinny,
+ .clear_intr = megasas_clear_intr_skinny,
+ .read_fw_status_reg = megasas_read_fw_status_reg_skinny,
+ .adp_reset = megasas_adp_reset_gen2,
+ .check_reset = megasas_check_reset_skinny,
+ .service_isr = megasas_isr,
+ .tasklet = megasas_complete_cmd_dpc,
+ .init_adapter = megasas_init_adapter_mfi,
+ .build_and_issue_cmd = megasas_build_and_issue_cmd,
+ .issue_dcmd = megasas_issue_dcmd,
+};
+
+
+/**
+* The following functions are defined for gen2 (deviceid : 0x78 0x79)
+* controllers
+*/
+
+/**
+ * megasas_enable_intr_gen2 - Enables interrupts
+ * @regs: MFI register set
+ */
+static inline void
+megasas_enable_intr_gen2(struct megasas_instance *instance)
+{
+ struct megasas_register_set __iomem *regs;
+ regs = instance->reg_set;
+ writel(0xFFFFFFFF, &(regs)->outbound_doorbell_clear);
+
+ /* write ~0x00000005 (4 & 1) to the intr mask*/
+ writel(~MFI_GEN2_ENABLE_INTERRUPT_MASK, &(regs)->outbound_intr_mask);
+
+ /* Dummy readl to force pci flush */
+ readl(&regs->outbound_intr_mask);
+}
+
+/**
+ * megasas_disable_intr_gen2 - Disables interrupt
+ * @regs: MFI register set
+ */
+static inline void
+megasas_disable_intr_gen2(struct megasas_instance *instance)
+{
+ struct megasas_register_set __iomem *regs;
+ u32 mask = 0xFFFFFFFF;
+ regs = instance->reg_set;
+ writel(mask, &regs->outbound_intr_mask);
+ /* Dummy readl to force pci flush */
+ readl(&regs->outbound_intr_mask);
+}
+
+/**
+ * megasas_read_fw_status_reg_gen2 - returns the current FW status value
+ * @regs: MFI register set
+ */
+static u32
+megasas_read_fw_status_reg_gen2(struct megasas_register_set __iomem *regs)
+{
+ return readl(&(regs)->outbound_scratch_pad);
+}
+
+/**
+ * megasas_clear_interrupt_gen2 - Check & clear interrupt
+ * @regs: MFI register set
+ */
+static int
+megasas_clear_intr_gen2(struct megasas_register_set __iomem *regs)
+{
+ u32 status;
+ u32 mfiStatus = 0;
+ /*
+ * Check if it is our interrupt
+ */
+ status = readl(&regs->outbound_intr_status);
+
+ if (status & MFI_INTR_FLAG_REPLY_MESSAGE) {
+ mfiStatus = MFI_INTR_FLAG_REPLY_MESSAGE;
+ }
+ if (status & MFI_G2_OUTBOUND_DOORBELL_CHANGE_INTERRUPT) {
+ mfiStatus |= MFI_INTR_FLAG_FIRMWARE_STATE_CHANGE;
+ }
+
+ /*
+ * Clear the interrupt by writing back the same value
+ */
+ if (mfiStatus)
+ writel(status, &regs->outbound_doorbell_clear);
+
+ /* Dummy readl to force pci flush */
+ readl(&regs->outbound_intr_status);
+
+ return mfiStatus;
+}
+/**
+ * megasas_fire_cmd_gen2 - Sends command to the FW
+ * @frame_phys_addr : Physical address of cmd
+ * @frame_count : Number of frames for the command
+ * @regs : MFI register set
+ */
+static inline void
+megasas_fire_cmd_gen2(struct megasas_instance *instance,
+ dma_addr_t frame_phys_addr,
+ u32 frame_count,
+ struct megasas_register_set __iomem *regs)
+{
+ unsigned long flags;
+ spin_lock_irqsave(&instance->hba_lock, flags);
+ writel((frame_phys_addr | (frame_count<<1))|1,
+ &(regs)->inbound_queue_port);
+ spin_unlock_irqrestore(&instance->hba_lock, flags);
+}
+
+/**
+ * megasas_adp_reset_gen2 - For controller reset
+ * @regs: MFI register set
+ */
+static int
+megasas_adp_reset_gen2(struct megasas_instance *instance,
+ struct megasas_register_set __iomem *reg_set)
+{
+ u32 retry = 0 ;
+ u32 HostDiag;
+ u32 *seq_offset = &reg_set->seq_offset;
+ u32 *hostdiag_offset = &reg_set->host_diag;
+
+ if (instance->instancet == &megasas_instance_template_skinny) {
+ seq_offset = &reg_set->fusion_seq_offset;
+ hostdiag_offset = &reg_set->fusion_host_diag;
+ }
+
+ writel(0, seq_offset);
+ writel(4, seq_offset);
+ writel(0xb, seq_offset);
+ writel(2, seq_offset);
+ writel(7, seq_offset);
+ writel(0xd, seq_offset);
+
+ msleep(1000);
+
+ HostDiag = (u32)readl(hostdiag_offset);
+
+ while ( !( HostDiag & DIAG_WRITE_ENABLE) ) {
+ msleep(100);
+ HostDiag = (u32)readl(hostdiag_offset);
+ printk(KERN_NOTICE "RESETGEN2: retry=%x, hostdiag=%x\n",
+ retry, HostDiag);
+
+ if (retry++ >= 100)
+ return 1;
+
+ }
+
+ printk(KERN_NOTICE "ADP_RESET_GEN2: HostDiag=%x\n", HostDiag);
+
+ writel((HostDiag | DIAG_RESET_ADAPTER), hostdiag_offset);
+
+ ssleep(10);
+
+ HostDiag = (u32)readl(hostdiag_offset);
+ while ( ( HostDiag & DIAG_RESET_ADAPTER) ) {
+ msleep(100);
+ HostDiag = (u32)readl(hostdiag_offset);
+ printk(KERN_NOTICE "RESET_GEN2: retry=%x, hostdiag=%x\n",
+ retry, HostDiag);
+
+ if (retry++ >= 1000)
+ return 1;
+
+ }
+ return 0;
+}
+
+/**
+ * megasas_check_reset_gen2 - For controller reset check
+ * @regs: MFI register set
+ */
+static int
+megasas_check_reset_gen2(struct megasas_instance *instance,
+ struct megasas_register_set __iomem *regs)
+{
+ if (instance->adprecovery != MEGASAS_HBA_OPERATIONAL) {
+ return 1;
+ }
+
+ return 0;
+}
+
+static struct megasas_instance_template megasas_instance_template_gen2 = {
+
+ .fire_cmd = megasas_fire_cmd_gen2,
+ .enable_intr = megasas_enable_intr_gen2,
+ .disable_intr = megasas_disable_intr_gen2,
+ .clear_intr = megasas_clear_intr_gen2,
+ .read_fw_status_reg = megasas_read_fw_status_reg_gen2,
+ .adp_reset = megasas_adp_reset_gen2,
+ .check_reset = megasas_check_reset_gen2,
+ .service_isr = megasas_isr,
+ .tasklet = megasas_complete_cmd_dpc,
+ .init_adapter = megasas_init_adapter_mfi,
+ .build_and_issue_cmd = megasas_build_and_issue_cmd,
+ .issue_dcmd = megasas_issue_dcmd,
+};
+
+/**
+* This is the end of set of functions & definitions
+* specific to gen2 (deviceid : 0x78, 0x79) controllers
+*/
+
+/*
+ * Template added for TB (Fusion)
+ */
+extern struct megasas_instance_template megasas_instance_template_fusion;
+
+/**
+ * megasas_issue_polled - Issues a polling command
+ * @instance: Adapter soft state
+ * @cmd: Command packet to be issued
+ *
+ * For polling, MFI requires the cmd_status to be set to 0xFF before posting.
+ */
+int
+megasas_issue_polled(struct megasas_instance *instance, struct megasas_cmd *cmd)
+{
+ int seconds;
+
+ struct megasas_header *frame_hdr = &cmd->frame->hdr;
+
+ frame_hdr->cmd_status = MFI_CMD_STATUS_POLL_MODE;
+ frame_hdr->flags |= cpu_to_le16(MFI_FRAME_DONT_POST_IN_REPLY_QUEUE);
+
+ /*
+ * Issue the frame using inbound queue port
+ */
+ instance->instancet->issue_dcmd(instance, cmd);
+
+ /*
+ * Wait for cmd_status to change
+ */
+ if (instance->requestorId)
+ seconds = MEGASAS_ROUTINE_WAIT_TIME_VF;
+ else
+ seconds = MFI_POLL_TIMEOUT_SECS;
+ return wait_and_poll(instance, cmd, seconds);
+}
+
+/**
+ * megasas_issue_blocked_cmd - Synchronous wrapper around regular FW cmds
+ * @instance: Adapter soft state
+ * @cmd: Command to be issued
+ * @timeout: Timeout in seconds
+ *
+ * This function waits on an event for the command to be returned from ISR.
+ * Max wait time is MEGASAS_INTERNAL_CMD_WAIT_TIME secs
+ * Used to issue ioctl commands.
+ */
+int
+megasas_issue_blocked_cmd(struct megasas_instance *instance,
+ struct megasas_cmd *cmd, int timeout)
+{
+ int ret = 0;
+ cmd->cmd_status = ENODATA;
+
+ cmd->is_wait_event = 1;
+ instance->instancet->issue_dcmd(instance, cmd);
+ if (timeout) {
+ ret = wait_event_timeout(instance->int_cmd_wait_q,
+ cmd->cmd_status != ENODATA, timeout * HZ);
+ if (!ret)
+ return 1;
+ } else
+ wait_event(instance->int_cmd_wait_q,
+ cmd->cmd_status != ENODATA);
+
+ return 0;
+}
+
+/**
+ * megasas_issue_blocked_abort_cmd - Aborts previously issued cmd
+ * @instance: Adapter soft state
+ * @cmd_to_abort: Previously issued cmd to be aborted
+ * @timeout: Timeout in seconds
+ *
+ * MFI firmware can abort previously issued AEN comamnd (automatic event
+ * notification). The megasas_issue_blocked_abort_cmd() issues such abort
+ * cmd and waits for return status.
+ * Max wait time is MEGASAS_INTERNAL_CMD_WAIT_TIME secs
+ */
+static int
+megasas_issue_blocked_abort_cmd(struct megasas_instance *instance,
+ struct megasas_cmd *cmd_to_abort, int timeout)
+{
+ struct megasas_cmd *cmd;
+ struct megasas_abort_frame *abort_fr;
+ int ret = 0;
+
+ cmd = megasas_get_cmd(instance);
+
+ if (!cmd)
+ return -1;
+
+ abort_fr = &cmd->frame->abort;
+
+ /*
+ * Prepare and issue the abort frame
+ */
+ abort_fr->cmd = MFI_CMD_ABORT;
+ abort_fr->cmd_status = 0xFF;
+ abort_fr->flags = cpu_to_le16(0);
+ abort_fr->abort_context = cpu_to_le32(cmd_to_abort->index);
+ abort_fr->abort_mfi_phys_addr_lo =
+ cpu_to_le32(lower_32_bits(cmd_to_abort->frame_phys_addr));
+ abort_fr->abort_mfi_phys_addr_hi =
+ cpu_to_le32(upper_32_bits(cmd_to_abort->frame_phys_addr));
+
+ cmd->sync_cmd = 1;
+ cmd->cmd_status = ENODATA;
+
+ instance->instancet->issue_dcmd(instance, cmd);
+
+ if (timeout) {
+ ret = wait_event_timeout(instance->abort_cmd_wait_q,
+ cmd->cmd_status != ENODATA, timeout * HZ);
+ if (!ret) {
+ dev_err(&instance->pdev->dev, "Command timedout"
+ "from %s\n", __func__);
+ return 1;
+ }
+ } else
+ wait_event(instance->abort_cmd_wait_q,
+ cmd->cmd_status != ENODATA);
+
+ cmd->sync_cmd = 0;
+
+ megasas_return_cmd(instance, cmd);
+ return 0;
+}
+
+/**
+ * megasas_make_sgl32 - Prepares 32-bit SGL
+ * @instance: Adapter soft state
+ * @scp: SCSI command from the mid-layer
+ * @mfi_sgl: SGL to be filled in
+ *
+ * If successful, this function returns the number of SG elements. Otherwise,
+ * it returnes -1.
+ */
+static int
+megasas_make_sgl32(struct megasas_instance *instance, struct scsi_cmnd *scp,
+ union megasas_sgl *mfi_sgl)
+{
+ int i;
+ int sge_count;
+ struct scatterlist *os_sgl;
+
+ sge_count = scsi_dma_map(scp);
+ BUG_ON(sge_count < 0);
+
+ if (sge_count) {
+ scsi_for_each_sg(scp, os_sgl, sge_count, i) {
+ mfi_sgl->sge32[i].length = cpu_to_le32(sg_dma_len(os_sgl));
+ mfi_sgl->sge32[i].phys_addr = cpu_to_le32(sg_dma_address(os_sgl));
+ }
+ }
+ return sge_count;
+}
+
+/**
+ * megasas_make_sgl64 - Prepares 64-bit SGL
+ * @instance: Adapter soft state
+ * @scp: SCSI command from the mid-layer
+ * @mfi_sgl: SGL to be filled in
+ *
+ * If successful, this function returns the number of SG elements. Otherwise,
+ * it returnes -1.
+ */
+static int
+megasas_make_sgl64(struct megasas_instance *instance, struct scsi_cmnd *scp,
+ union megasas_sgl *mfi_sgl)
+{
+ int i;
+ int sge_count;
+ struct scatterlist *os_sgl;
+
+ sge_count = scsi_dma_map(scp);
+ BUG_ON(sge_count < 0);
+
+ if (sge_count) {
+ scsi_for_each_sg(scp, os_sgl, sge_count, i) {
+ mfi_sgl->sge64[i].length = cpu_to_le32(sg_dma_len(os_sgl));
+ mfi_sgl->sge64[i].phys_addr = cpu_to_le64(sg_dma_address(os_sgl));
+ }
+ }
+ return sge_count;
+}
+
+/**
+ * megasas_make_sgl_skinny - Prepares IEEE SGL
+ * @instance: Adapter soft state
+ * @scp: SCSI command from the mid-layer
+ * @mfi_sgl: SGL to be filled in
+ *
+ * If successful, this function returns the number of SG elements. Otherwise,
+ * it returnes -1.
+ */
+static int
+megasas_make_sgl_skinny(struct megasas_instance *instance,
+ struct scsi_cmnd *scp, union megasas_sgl *mfi_sgl)
+{
+ int i;
+ int sge_count;
+ struct scatterlist *os_sgl;
+
+ sge_count = scsi_dma_map(scp);
+
+ if (sge_count) {
+ scsi_for_each_sg(scp, os_sgl, sge_count, i) {
+ mfi_sgl->sge_skinny[i].length =
+ cpu_to_le32(sg_dma_len(os_sgl));
+ mfi_sgl->sge_skinny[i].phys_addr =
+ cpu_to_le64(sg_dma_address(os_sgl));
+ mfi_sgl->sge_skinny[i].flag = cpu_to_le32(0);
+ }
+ }
+ return sge_count;
+}
+
+ /**
+ * megasas_get_frame_count - Computes the number of frames
+ * @frame_type : type of frame- io or pthru frame
+ * @sge_count : number of sg elements
+ *
+ * Returns the number of frames required for numnber of sge's (sge_count)
+ */
+
+static u32 megasas_get_frame_count(struct megasas_instance *instance,
+ u8 sge_count, u8 frame_type)
+{
+ int num_cnt;
+ int sge_bytes;
+ u32 sge_sz;
+ u32 frame_count=0;
+
+ sge_sz = (IS_DMA64) ? sizeof(struct megasas_sge64) :
+ sizeof(struct megasas_sge32);
+
+ if (instance->flag_ieee) {
+ sge_sz = sizeof(struct megasas_sge_skinny);
+ }
+
+ /*
+ * Main frame can contain 2 SGEs for 64-bit SGLs and
+ * 3 SGEs for 32-bit SGLs for ldio &
+ * 1 SGEs for 64-bit SGLs and
+ * 2 SGEs for 32-bit SGLs for pthru frame
+ */
+ if (unlikely(frame_type == PTHRU_FRAME)) {
+ if (instance->flag_ieee == 1) {
+ num_cnt = sge_count - 1;
+ } else if (IS_DMA64)
+ num_cnt = sge_count - 1;
+ else
+ num_cnt = sge_count - 2;
+ } else {
+ if (instance->flag_ieee == 1) {
+ num_cnt = sge_count - 1;
+ } else if (IS_DMA64)
+ num_cnt = sge_count - 2;
+ else
+ num_cnt = sge_count - 3;
+ }
+
+ if(num_cnt>0){
+ sge_bytes = sge_sz * num_cnt;
+
+ frame_count = (sge_bytes / MEGAMFI_FRAME_SIZE) +
+ ((sge_bytes % MEGAMFI_FRAME_SIZE) ? 1 : 0) ;
+ }
+ /* Main frame */
+ frame_count +=1;
+
+ if (frame_count > 7)
+ frame_count = 8;
+ return frame_count;
+}
+
+/**
+ * megasas_build_dcdb - Prepares a direct cdb (DCDB) command
+ * @instance: Adapter soft state
+ * @scp: SCSI command
+ * @cmd: Command to be prepared in
+ *
+ * This function prepares CDB commands. These are typcially pass-through
+ * commands to the devices.
+ */
+static int
+megasas_build_dcdb(struct megasas_instance *instance, struct scsi_cmnd *scp,
+ struct megasas_cmd *cmd)
+{
+ u32 is_logical;
+ u32 device_id;
+ u16 flags = 0;
+ struct megasas_pthru_frame *pthru;
+
+ is_logical = MEGASAS_IS_LOGICAL(scp);
+ device_id = MEGASAS_DEV_INDEX(instance, scp);
+ pthru = (struct megasas_pthru_frame *)cmd->frame;
+
+ if (scp->sc_data_direction == PCI_DMA_TODEVICE)
+ flags = MFI_FRAME_DIR_WRITE;
+ else if (scp->sc_data_direction == PCI_DMA_FROMDEVICE)
+ flags = MFI_FRAME_DIR_READ;
+ else if (scp->sc_data_direction == PCI_DMA_NONE)
+ flags = MFI_FRAME_DIR_NONE;
+
+ if (instance->flag_ieee == 1) {
+ flags |= MFI_FRAME_IEEE;
+ }
+
+ /*
+ * Prepare the DCDB frame
+ */
+ pthru->cmd = (is_logical) ? MFI_CMD_LD_SCSI_IO : MFI_CMD_PD_SCSI_IO;
+ pthru->cmd_status = 0x0;
+ pthru->scsi_status = 0x0;
+ pthru->target_id = device_id;
+ pthru->lun = scp->device->lun;
+ pthru->cdb_len = scp->cmd_len;
+ pthru->timeout = 0;
+ pthru->pad_0 = 0;
+ pthru->flags = cpu_to_le16(flags);
+ pthru->data_xfer_len = cpu_to_le32(scsi_bufflen(scp));
+
+ memcpy(pthru->cdb, scp->cmnd, scp->cmd_len);
+
+ /*
+ * If the command is for the tape device, set the
+ * pthru timeout to the os layer timeout value.
+ */
+ if (scp->device->type == TYPE_TAPE) {
+ if ((scp->request->timeout / HZ) > 0xFFFF)
+ pthru->timeout = 0xFFFF;
+ else
+ pthru->timeout = cpu_to_le16(scp->request->timeout / HZ);
+ }
+
+ /*
+ * Construct SGL
+ */
+ if (instance->flag_ieee == 1) {
+ pthru->flags |= cpu_to_le16(MFI_FRAME_SGL64);
+ pthru->sge_count = megasas_make_sgl_skinny(instance, scp,
+ &pthru->sgl);
+ } else if (IS_DMA64) {
+ pthru->flags |= cpu_to_le16(MFI_FRAME_SGL64);
+ pthru->sge_count = megasas_make_sgl64(instance, scp,
+ &pthru->sgl);
+ } else
+ pthru->sge_count = megasas_make_sgl32(instance, scp,
+ &pthru->sgl);
+
+ if (pthru->sge_count > instance->max_num_sge) {
+ printk(KERN_ERR "megasas: DCDB two many SGE NUM=%x\n",
+ pthru->sge_count);
+ return 0;
+ }
+
+ /*
+ * Sense info specific
+ */
+ pthru->sense_len = SCSI_SENSE_BUFFERSIZE;
+ pthru->sense_buf_phys_addr_hi =
+ cpu_to_le32(upper_32_bits(cmd->sense_phys_addr));
+ pthru->sense_buf_phys_addr_lo =
+ cpu_to_le32(lower_32_bits(cmd->sense_phys_addr));
+
+ /*
+ * Compute the total number of frames this command consumes. FW uses
+ * this number to pull sufficient number of frames from host memory.
+ */
+ cmd->frame_count = megasas_get_frame_count(instance, pthru->sge_count,
+ PTHRU_FRAME);
+
+ return cmd->frame_count;
+}
+
+/**
+ * megasas_build_ldio - Prepares IOs to logical devices
+ * @instance: Adapter soft state
+ * @scp: SCSI command
+ * @cmd: Command to be prepared
+ *
+ * Frames (and accompanying SGLs) for regular SCSI IOs use this function.
+ */
+static int
+megasas_build_ldio(struct megasas_instance *instance, struct scsi_cmnd *scp,
+ struct megasas_cmd *cmd)
+{
+ u32 device_id;
+ u8 sc = scp->cmnd[0];
+ u16 flags = 0;
+ struct megasas_io_frame *ldio;
+
+ device_id = MEGASAS_DEV_INDEX(instance, scp);
+ ldio = (struct megasas_io_frame *)cmd->frame;
+
+ if (scp->sc_data_direction == PCI_DMA_TODEVICE)
+ flags = MFI_FRAME_DIR_WRITE;
+ else if (scp->sc_data_direction == PCI_DMA_FROMDEVICE)
+ flags = MFI_FRAME_DIR_READ;
+
+ if (instance->flag_ieee == 1) {
+ flags |= MFI_FRAME_IEEE;
+ }
+
+ /*
+ * Prepare the Logical IO frame: 2nd bit is zero for all read cmds
+ */
+ ldio->cmd = (sc & 0x02) ? MFI_CMD_LD_WRITE : MFI_CMD_LD_READ;
+ ldio->cmd_status = 0x0;
+ ldio->scsi_status = 0x0;
+ ldio->target_id = device_id;
+ ldio->timeout = 0;
+ ldio->reserved_0 = 0;
+ ldio->pad_0 = 0;
+ ldio->flags = cpu_to_le16(flags);
+ ldio->start_lba_hi = 0;
+ ldio->access_byte = (scp->cmd_len != 6) ? scp->cmnd[1] : 0;
+
+ /*
+ * 6-byte READ(0x08) or WRITE(0x0A) cdb
+ */
+ if (scp->cmd_len == 6) {
+ ldio->lba_count = cpu_to_le32((u32) scp->cmnd[4]);
+ ldio->start_lba_lo = cpu_to_le32(((u32) scp->cmnd[1] << 16) |
+ ((u32) scp->cmnd[2] << 8) |
+ (u32) scp->cmnd[3]);
+
+ ldio->start_lba_lo &= cpu_to_le32(0x1FFFFF);
+ }
+
+ /*
+ * 10-byte READ(0x28) or WRITE(0x2A) cdb
+ */
+ else if (scp->cmd_len == 10) {
+ ldio->lba_count = cpu_to_le32((u32) scp->cmnd[8] |
+ ((u32) scp->cmnd[7] << 8));
+ ldio->start_lba_lo = cpu_to_le32(((u32) scp->cmnd[2] << 24) |
+ ((u32) scp->cmnd[3] << 16) |
+ ((u32) scp->cmnd[4] << 8) |
+ (u32) scp->cmnd[5]);
+ }
+
+ /*
+ * 12-byte READ(0xA8) or WRITE(0xAA) cdb
+ */
+ else if (scp->cmd_len == 12) {
+ ldio->lba_count = cpu_to_le32(((u32) scp->cmnd[6] << 24) |
+ ((u32) scp->cmnd[7] << 16) |
+ ((u32) scp->cmnd[8] << 8) |
+ (u32) scp->cmnd[9]);
+
+ ldio->start_lba_lo = cpu_to_le32(((u32) scp->cmnd[2] << 24) |
+ ((u32) scp->cmnd[3] << 16) |
+ ((u32) scp->cmnd[4] << 8) |
+ (u32) scp->cmnd[5]);
+ }
+
+ /*
+ * 16-byte READ(0x88) or WRITE(0x8A) cdb
+ */
+ else if (scp->cmd_len == 16) {
+ ldio->lba_count = cpu_to_le32(((u32) scp->cmnd[10] << 24) |
+ ((u32) scp->cmnd[11] << 16) |
+ ((u32) scp->cmnd[12] << 8) |
+ (u32) scp->cmnd[13]);
+
+ ldio->start_lba_lo = cpu_to_le32(((u32) scp->cmnd[6] << 24) |
+ ((u32) scp->cmnd[7] << 16) |
+ ((u32) scp->cmnd[8] << 8) |
+ (u32) scp->cmnd[9]);
+
+ ldio->start_lba_hi = cpu_to_le32(((u32) scp->cmnd[2] << 24) |
+ ((u32) scp->cmnd[3] << 16) |
+ ((u32) scp->cmnd[4] << 8) |
+ (u32) scp->cmnd[5]);
+
+ }
+
+ /*
+ * Construct SGL
+ */
+ if (instance->flag_ieee) {
+ ldio->flags |= cpu_to_le16(MFI_FRAME_SGL64);
+ ldio->sge_count = megasas_make_sgl_skinny(instance, scp,
+ &ldio->sgl);
+ } else if (IS_DMA64) {
+ ldio->flags |= cpu_to_le16(MFI_FRAME_SGL64);
+ ldio->sge_count = megasas_make_sgl64(instance, scp, &ldio->sgl);
+ } else
+ ldio->sge_count = megasas_make_sgl32(instance, scp, &ldio->sgl);
+
+ if (ldio->sge_count > instance->max_num_sge) {
+ printk(KERN_ERR "megasas: build_ld_io: sge_count = %x\n",
+ ldio->sge_count);
+ return 0;
+ }
+
+ /*
+ * Sense info specific
+ */
+ ldio->sense_len = SCSI_SENSE_BUFFERSIZE;
+ ldio->sense_buf_phys_addr_hi = 0;
+ ldio->sense_buf_phys_addr_lo = cpu_to_le32(cmd->sense_phys_addr);
+
+ /*
+ * Compute the total number of frames this command consumes. FW uses
+ * this number to pull sufficient number of frames from host memory.
+ */
+ cmd->frame_count = megasas_get_frame_count(instance,
+ ldio->sge_count, IO_FRAME);
+
+ return cmd->frame_count;
+}
+
+/**
+ * megasas_cmd_type - Checks if the cmd is for logical drive/sysPD
+ * and whether it's RW or non RW
+ * @scmd: SCSI command
+ *
+ */
+inline int megasas_cmd_type(struct scsi_cmnd *cmd)
+{
+ int ret;
+
+ switch (cmd->cmnd[0]) {
+ case READ_10:
+ case WRITE_10:
+ case READ_12:
+ case WRITE_12:
+ case READ_6:
+ case WRITE_6:
+ case READ_16:
+ case WRITE_16:
+ ret = (MEGASAS_IS_LOGICAL(cmd)) ?
+ READ_WRITE_LDIO : READ_WRITE_SYSPDIO;
+ break;
+ default:
+ ret = (MEGASAS_IS_LOGICAL(cmd)) ?
+ NON_READ_WRITE_LDIO : NON_READ_WRITE_SYSPDIO;
+ }
+ return ret;
+}
+
+ /**
+ * megasas_dump_pending_frames - Dumps the frame address of all pending cmds
+ * in FW
+ * @instance: Adapter soft state
+ */
+static inline void
+megasas_dump_pending_frames(struct megasas_instance *instance)
+{
+ struct megasas_cmd *cmd;
+ int i,n;
+ union megasas_sgl *mfi_sgl;
+ struct megasas_io_frame *ldio;
+ struct megasas_pthru_frame *pthru;
+ u32 sgcount;
+ u32 max_cmd = instance->max_fw_cmds;
+
+ printk(KERN_ERR "\nmegasas[%d]: Dumping Frame Phys Address of all pending cmds in FW\n",instance->host->host_no);
+ printk(KERN_ERR "megasas[%d]: Total OS Pending cmds : %d\n",instance->host->host_no,atomic_read(&instance->fw_outstanding));
+ if (IS_DMA64)
+ printk(KERN_ERR "\nmegasas[%d]: 64 bit SGLs were sent to FW\n",instance->host->host_no);
+ else
+ printk(KERN_ERR "\nmegasas[%d]: 32 bit SGLs were sent to FW\n",instance->host->host_no);
+
+ printk(KERN_ERR "megasas[%d]: Pending OS cmds in FW : \n",instance->host->host_no);
+ for (i = 0; i < max_cmd; i++) {
+ cmd = instance->cmd_list[i];
+ if(!cmd->scmd)
+ continue;
+ printk(KERN_ERR "megasas[%d]: Frame addr :0x%08lx : ",instance->host->host_no,(unsigned long)cmd->frame_phys_addr);
+ if (megasas_cmd_type(cmd->scmd) == READ_WRITE_LDIO) {
+ ldio = (struct megasas_io_frame *)cmd->frame;
+ mfi_sgl = &ldio->sgl;
+ sgcount = ldio->sge_count;
+ printk(KERN_ERR "megasas[%d]: frame count : 0x%x, Cmd : 0x%x, Tgt id : 0x%x,"
+ " lba lo : 0x%x, lba_hi : 0x%x, sense_buf addr : 0x%x,sge count : 0x%x\n",
+ instance->host->host_no, cmd->frame_count, ldio->cmd, ldio->target_id,
+ le32_to_cpu(ldio->start_lba_lo), le32_to_cpu(ldio->start_lba_hi),
+ le32_to_cpu(ldio->sense_buf_phys_addr_lo), sgcount);
+ }
+ else {
+ pthru = (struct megasas_pthru_frame *) cmd->frame;
+ mfi_sgl = &pthru->sgl;
+ sgcount = pthru->sge_count;
+ printk(KERN_ERR "megasas[%d]: frame count : 0x%x, Cmd : 0x%x, Tgt id : 0x%x, "
+ "lun : 0x%x, cdb_len : 0x%x, data xfer len : 0x%x, sense_buf addr : 0x%x,sge count : 0x%x\n",
+ instance->host->host_no, cmd->frame_count, pthru->cmd, pthru->target_id,
+ pthru->lun, pthru->cdb_len, le32_to_cpu(pthru->data_xfer_len),
+ le32_to_cpu(pthru->sense_buf_phys_addr_lo), sgcount);
+ }
+ if(megasas_dbg_lvl & MEGASAS_DBG_LVL){
+ for (n = 0; n < sgcount; n++){
+ if (IS_DMA64)
+ printk(KERN_ERR "megasas: sgl len : 0x%x, sgl addr : 0x%llx ",
+ le32_to_cpu(mfi_sgl->sge64[n].length),
+ le64_to_cpu(mfi_sgl->sge64[n].phys_addr));
+ else
+ printk(KERN_ERR "megasas: sgl len : 0x%x, sgl addr : 0x%x ",
+ le32_to_cpu(mfi_sgl->sge32[n].length),
+ le32_to_cpu(mfi_sgl->sge32[n].phys_addr));
+ }
+ }
+ printk(KERN_ERR "\n");
+ } /*for max_cmd*/
+ printk(KERN_ERR "\nmegasas[%d]: Pending Internal cmds in FW : \n",instance->host->host_no);
+ for (i = 0; i < max_cmd; i++) {
+
+ cmd = instance->cmd_list[i];
+
+ if(cmd->sync_cmd == 1){
+ printk(KERN_ERR "0x%08lx : ", (unsigned long)cmd->frame_phys_addr);
+ }
+ }
+ printk(KERN_ERR "megasas[%d]: Dumping Done.\n\n",instance->host->host_no);
+}
+
+u32
+megasas_build_and_issue_cmd(struct megasas_instance *instance,
+ struct scsi_cmnd *scmd)
+{
+ struct megasas_cmd *cmd;
+ u32 frame_count;
+
+ cmd = megasas_get_cmd(instance);
+ if (!cmd)
+ return SCSI_MLQUEUE_HOST_BUSY;
+
+ /*
+ * Logical drive command
+ */
+ if (megasas_cmd_type(scmd) == READ_WRITE_LDIO)
+ frame_count = megasas_build_ldio(instance, scmd, cmd);
+ else
+ frame_count = megasas_build_dcdb(instance, scmd, cmd);
+
+ if (!frame_count)
+ goto out_return_cmd;
+
+ cmd->scmd = scmd;
+ scmd->SCp.ptr = (char *)cmd;
+
+ /*
+ * Issue the command to the FW
+ */
+ atomic_inc(&instance->fw_outstanding);
+
+ instance->instancet->fire_cmd(instance, cmd->frame_phys_addr,
+ cmd->frame_count-1, instance->reg_set);
+
+ return 0;
+out_return_cmd:
+ megasas_return_cmd(instance, cmd);
+ return 1;
+}
+
+
+/**
+ * megasas_queue_command - Queue entry point
+ * @scmd: SCSI command to be queued
+ * @done: Callback entry point
+ */
+static int
+megasas_queue_command(struct Scsi_Host *shost, struct scsi_cmnd *scmd)
+{
+ struct megasas_instance *instance;
+ unsigned long flags;
+
+ instance = (struct megasas_instance *)
+ scmd->device->host->hostdata;
+
+ if (instance->unload == 1) {
+ scmd->result = DID_NO_CONNECT << 16;
+ scmd->scsi_done(scmd);
+ return 0;
+ }
+
+ if (instance->issuepend_done == 0)
+ return SCSI_MLQUEUE_HOST_BUSY;
+
+ spin_lock_irqsave(&instance->hba_lock, flags);
+
+ /* Check for an mpio path and adjust behavior */
+ if (instance->adprecovery == MEGASAS_ADPRESET_SM_INFAULT) {
+ if (megasas_check_mpio_paths(instance, scmd) ==
+ (DID_RESET << 16)) {
+ spin_unlock_irqrestore(&instance->hba_lock, flags);
+ return SCSI_MLQUEUE_HOST_BUSY;
+ } else {
+ spin_unlock_irqrestore(&instance->hba_lock, flags);
+ scmd->result = DID_NO_CONNECT << 16;
+ scmd->scsi_done(scmd);
+ return 0;
+ }
+ }
+
+ if (instance->adprecovery == MEGASAS_HW_CRITICAL_ERROR) {
+ spin_unlock_irqrestore(&instance->hba_lock, flags);
+ scmd->result = DID_NO_CONNECT << 16;
+ scmd->scsi_done(scmd);
+ return 0;
+ }
+
+ if (instance->adprecovery != MEGASAS_HBA_OPERATIONAL) {
+ spin_unlock_irqrestore(&instance->hba_lock, flags);
+ return SCSI_MLQUEUE_HOST_BUSY;
+ }
+
+ spin_unlock_irqrestore(&instance->hba_lock, flags);
+
+ scmd->result = 0;
+
+ if (MEGASAS_IS_LOGICAL(scmd) &&
+ (scmd->device->id >= instance->fw_supported_vd_count ||
+ scmd->device->lun)) {
+ scmd->result = DID_BAD_TARGET << 16;
+ goto out_done;
+ }
+
+ switch (scmd->cmnd[0]) {
+ case SYNCHRONIZE_CACHE:
+ /*
+ * FW takes care of flush cache on its own
+ * No need to send it down
+ */
+ scmd->result = DID_OK << 16;
+ goto out_done;
+ default:
+ break;
+ }
+
+ if (instance->instancet->build_and_issue_cmd(instance, scmd)) {
+ printk(KERN_ERR "megasas: Err returned from build_and_issue_cmd\n");
+ return SCSI_MLQUEUE_HOST_BUSY;
+ }
+
+ return 0;
+
+ out_done:
+ scmd->scsi_done(scmd);
+ return 0;
+}
+
+static struct megasas_instance *megasas_lookup_instance(u16 host_no)
+{
+ int i;
+
+ for (i = 0; i < megasas_mgmt_info.max_index; i++) {
+
+ if ((megasas_mgmt_info.instance[i]) &&
+ (megasas_mgmt_info.instance[i]->host->host_no == host_no))
+ return megasas_mgmt_info.instance[i];
+ }
+
+ return NULL;
+}
+
+static int megasas_slave_configure(struct scsi_device *sdev)
+{
+ /*
+ * The RAID firmware may require extended timeouts.
+ */
+ blk_queue_rq_timeout(sdev->request_queue,
+ MEGASAS_DEFAULT_CMD_TIMEOUT * HZ);
+
+ return 0;
+}
+
+static int megasas_slave_alloc(struct scsi_device *sdev)
+{
+ u16 pd_index = 0;
+ struct megasas_instance *instance ;
+ instance = megasas_lookup_instance(sdev->host->host_no);
+ if (sdev->channel < MEGASAS_MAX_PD_CHANNELS) {
+ /*
+ * Open the OS scan to the SYSTEM PD
+ */
+ pd_index =
+ (sdev->channel * MEGASAS_MAX_DEV_PER_CHANNEL) +
+ sdev->id;
+ if (instance->pd_list[pd_index].driveState ==
+ MR_PD_STATE_SYSTEM) {
+ return 0;
+ }
+ return -ENXIO;
+ }
+ return 0;
+}
+
+/*
+* megasas_complete_outstanding_ioctls - Complete outstanding ioctls after a
+* kill adapter
+* @instance: Adapter soft state
+*
+*/
+void megasas_complete_outstanding_ioctls(struct megasas_instance *instance)
+{
+ int i;
+ struct megasas_cmd *cmd_mfi;
+ struct megasas_cmd_fusion *cmd_fusion;
+ struct fusion_context *fusion = instance->ctrl_context;
+
+ /* Find all outstanding ioctls */
+ if (fusion) {
+ for (i = 0; i < instance->max_fw_cmds; i++) {
+ cmd_fusion = fusion->cmd_list[i];
+ if (cmd_fusion->sync_cmd_idx != (u32)ULONG_MAX) {
+ cmd_mfi = instance->cmd_list[cmd_fusion->sync_cmd_idx];
+ if (cmd_mfi->sync_cmd &&
+ cmd_mfi->frame->hdr.cmd != MFI_CMD_ABORT)
+ megasas_complete_cmd(instance,
+ cmd_mfi, DID_OK);
+ }
+ }
+ } else {
+ for (i = 0; i < instance->max_fw_cmds; i++) {
+ cmd_mfi = instance->cmd_list[i];
+ if (cmd_mfi->sync_cmd && cmd_mfi->frame->hdr.cmd !=
+ MFI_CMD_ABORT)
+ megasas_complete_cmd(instance, cmd_mfi, DID_OK);
+ }
+ }
+}
+
+
+void megaraid_sas_kill_hba(struct megasas_instance *instance)
+{
+ /* Set critical error to block I/O & ioctls in case caller didn't */
+ instance->adprecovery = MEGASAS_HW_CRITICAL_ERROR;
+ /* Wait 1 second to ensure IO or ioctls in build have posted */
+ msleep(1000);
+ if ((instance->pdev->device == PCI_DEVICE_ID_LSI_SAS0073SKINNY) ||
+ (instance->pdev->device == PCI_DEVICE_ID_LSI_SAS0071SKINNY) ||
+ (instance->pdev->device == PCI_DEVICE_ID_LSI_FUSION) ||
+ (instance->pdev->device == PCI_DEVICE_ID_LSI_PLASMA) ||
+ (instance->pdev->device == PCI_DEVICE_ID_LSI_INVADER) ||
+ (instance->pdev->device == PCI_DEVICE_ID_LSI_FURY)) {
+ writel(MFI_STOP_ADP,
+ &instance->reg_set->doorbell);
+ /* Flush */
+ readl(&instance->reg_set->doorbell);
+ if (instance->mpio && instance->requestorId)
+ memset(instance->ld_ids, 0xff, MEGASAS_MAX_LD_IDS);
+ } else {
+ writel(MFI_STOP_ADP,
+ &instance->reg_set->inbound_doorbell);
+ }
+ /* Complete outstanding ioctls when adapter is killed */
+ megasas_complete_outstanding_ioctls(instance);
+}
+
+ /**
+ * megasas_check_and_restore_queue_depth - Check if queue depth needs to be
+ * restored to max value
+ * @instance: Adapter soft state
+ *
+ */
+void
+megasas_check_and_restore_queue_depth(struct megasas_instance *instance)
+{
+ unsigned long flags;
+
+ if (instance->flag & MEGASAS_FW_BUSY
+ && time_after(jiffies, instance->last_time + 5 * HZ)
+ && atomic_read(&instance->fw_outstanding) <
+ instance->throttlequeuedepth + 1) {
+
+ spin_lock_irqsave(instance->host->host_lock, flags);
+ instance->flag &= ~MEGASAS_FW_BUSY;
+
+ instance->host->can_queue = instance->max_scsi_cmds;
+ spin_unlock_irqrestore(instance->host->host_lock, flags);
+ }
+}
+
+/**
+ * megasas_complete_cmd_dpc - Returns FW's controller structure
+ * @instance_addr: Address of adapter soft state
+ *
+ * Tasklet to complete cmds
+ */
+static void megasas_complete_cmd_dpc(unsigned long instance_addr)
+{
+ u32 producer;
+ u32 consumer;
+ u32 context;
+ struct megasas_cmd *cmd;
+ struct megasas_instance *instance =
+ (struct megasas_instance *)instance_addr;
+ unsigned long flags;
+
+ /* If we have already declared adapter dead, donot complete cmds */
+ if (instance->adprecovery == MEGASAS_HW_CRITICAL_ERROR )
+ return;
+
+ spin_lock_irqsave(&instance->completion_lock, flags);
+
+ producer = le32_to_cpu(*instance->producer);
+ consumer = le32_to_cpu(*instance->consumer);
+
+ while (consumer != producer) {
+ context = le32_to_cpu(instance->reply_queue[consumer]);
+ if (context >= instance->max_fw_cmds) {
+ printk(KERN_ERR "Unexpected context value %x\n",
+ context);
+ BUG();
+ }
+
+ cmd = instance->cmd_list[context];
+
+ megasas_complete_cmd(instance, cmd, DID_OK);
+
+ consumer++;
+ if (consumer == (instance->max_fw_cmds + 1)) {
+ consumer = 0;
+ }
+ }
+
+ *instance->consumer = cpu_to_le32(producer);
+
+ spin_unlock_irqrestore(&instance->completion_lock, flags);
+
+ /*
+ * Check if we can restore can_queue
+ */
+ megasas_check_and_restore_queue_depth(instance);
+}
+
+/**
+ * megasas_start_timer - Initializes a timer object
+ * @instance: Adapter soft state
+ * @timer: timer object to be initialized
+ * @fn: timer function
+ * @interval: time interval between timer function call
+ *
+ */
+void megasas_start_timer(struct megasas_instance *instance,
+ struct timer_list *timer,
+ void *fn, unsigned long interval)
+{
+ init_timer(timer);
+ timer->expires = jiffies + interval;
+ timer->data = (unsigned long)instance;
+ timer->function = fn;
+ add_timer(timer);
+}
+
+static void
+megasas_internal_reset_defer_cmds(struct megasas_instance *instance);
+
+static void
+process_fw_state_change_wq(struct work_struct *work);
+
+void megasas_do_ocr(struct megasas_instance *instance)
+{
+ if ((instance->pdev->device == PCI_DEVICE_ID_LSI_SAS1064R) ||
+ (instance->pdev->device == PCI_DEVICE_ID_DELL_PERC5) ||
+ (instance->pdev->device == PCI_DEVICE_ID_LSI_VERDE_ZCR)) {
+ *instance->consumer = cpu_to_le32(MEGASAS_ADPRESET_INPROG_SIGN);
+ }
+ instance->instancet->disable_intr(instance);
+ instance->adprecovery = MEGASAS_ADPRESET_SM_INFAULT;
+ instance->issuepend_done = 0;
+
+ atomic_set(&instance->fw_outstanding, 0);
+ megasas_internal_reset_defer_cmds(instance);
+ process_fw_state_change_wq(&instance->work_init);
+}
+
+static int megasas_get_ld_vf_affiliation_111(struct megasas_instance *instance,
+ int initial)
+{
+ struct megasas_cmd *cmd;
+ struct megasas_dcmd_frame *dcmd;
+ struct MR_LD_VF_AFFILIATION_111 *new_affiliation_111 = NULL;
+ dma_addr_t new_affiliation_111_h;
+ int ld, retval = 0;
+ u8 thisVf;
+
+ cmd = megasas_get_cmd(instance);
+
+ if (!cmd) {
+ printk(KERN_DEBUG "megasas: megasas_get_ld_vf_affiliation_111:"
+ "Failed to get cmd for scsi%d.\n",
+ instance->host->host_no);
+ return -ENOMEM;
+ }
+
+ dcmd = &cmd->frame->dcmd;
+
+ if (!instance->vf_affiliation_111) {
+ printk(KERN_WARNING "megasas: SR-IOV: Couldn't get LD/VF "
+ "affiliation for scsi%d.\n", instance->host->host_no);
+ megasas_return_cmd(instance, cmd);
+ return -ENOMEM;
+ }
+
+ if (initial)
+ memset(instance->vf_affiliation_111, 0,
+ sizeof(struct MR_LD_VF_AFFILIATION_111));
+ else {
+ new_affiliation_111 =
+ pci_alloc_consistent(instance->pdev,
+ sizeof(struct MR_LD_VF_AFFILIATION_111),
+ &new_affiliation_111_h);
+ if (!new_affiliation_111) {
+ printk(KERN_DEBUG "megasas: SR-IOV: Couldn't allocate "
+ "memory for new affiliation for scsi%d.\n",
+ instance->host->host_no);
+ megasas_return_cmd(instance, cmd);
+ return -ENOMEM;
+ }
+ memset(new_affiliation_111, 0,
+ sizeof(struct MR_LD_VF_AFFILIATION_111));
+ }
+
+ memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE);
+
+ dcmd->cmd = MFI_CMD_DCMD;
+ dcmd->cmd_status = 0xFF;
+ dcmd->sge_count = 1;
+ dcmd->flags = MFI_FRAME_DIR_BOTH;
+ dcmd->timeout = 0;
+ dcmd->pad_0 = 0;
+ dcmd->data_xfer_len = sizeof(struct MR_LD_VF_AFFILIATION_111);
+ dcmd->opcode = MR_DCMD_LD_VF_MAP_GET_ALL_LDS_111;
+
+ if (initial)
+ dcmd->sgl.sge32[0].phys_addr =
+ instance->vf_affiliation_111_h;
+ else
+ dcmd->sgl.sge32[0].phys_addr = new_affiliation_111_h;
+
+ dcmd->sgl.sge32[0].length =
+ sizeof(struct MR_LD_VF_AFFILIATION_111);
+
+ printk(KERN_WARNING "megasas: SR-IOV: Getting LD/VF affiliation for "
+ "scsi%d\n", instance->host->host_no);
+
+ megasas_issue_blocked_cmd(instance, cmd, 0);
+
+ if (dcmd->cmd_status) {
+ printk(KERN_WARNING "megasas: SR-IOV: LD/VF affiliation DCMD"
+ " failed with status 0x%x for scsi%d.\n",
+ dcmd->cmd_status, instance->host->host_no);
+ retval = 1; /* Do a scan if we couldn't get affiliation */
+ goto out;
+ }
+
+ if (!initial) {
+ thisVf = new_affiliation_111->thisVf;
+ for (ld = 0 ; ld < new_affiliation_111->vdCount; ld++)
+ if (instance->vf_affiliation_111->map[ld].policy[thisVf] !=
+ new_affiliation_111->map[ld].policy[thisVf]) {
+ printk(KERN_WARNING "megasas: SR-IOV: "
+ "Got new LD/VF affiliation "
+ "for scsi%d.\n",
+ instance->host->host_no);
+ memcpy(instance->vf_affiliation_111,
+ new_affiliation_111,
+ sizeof(struct MR_LD_VF_AFFILIATION_111));
+ retval = 1;
+ goto out;
+ }
+ }
+out:
+ if (new_affiliation_111) {
+ pci_free_consistent(instance->pdev,
+ sizeof(struct MR_LD_VF_AFFILIATION_111),
+ new_affiliation_111,
+ new_affiliation_111_h);
+ }
+
+ if (instance->ctrl_context && cmd->mpt_pthr_cmd_blocked)
+ megasas_return_mfi_mpt_pthr(instance, cmd,
+ cmd->mpt_pthr_cmd_blocked);
+ else
+ megasas_return_cmd(instance, cmd);
+
+ return retval;
+}
+
+static int megasas_get_ld_vf_affiliation_12(struct megasas_instance *instance,
+ int initial)
+{
+ struct megasas_cmd *cmd;
+ struct megasas_dcmd_frame *dcmd;
+ struct MR_LD_VF_AFFILIATION *new_affiliation = NULL;
+ struct MR_LD_VF_MAP *newmap = NULL, *savedmap = NULL;
+ dma_addr_t new_affiliation_h;
+ int i, j, retval = 0, found = 0, doscan = 0;
+ u8 thisVf;
+
+ cmd = megasas_get_cmd(instance);
+
+ if (!cmd) {
+ printk(KERN_DEBUG "megasas: megasas_get_ld_vf_affiliation12: "
+ "Failed to get cmd for scsi%d.\n",
+ instance->host->host_no);
+ return -ENOMEM;
+ }
+
+ dcmd = &cmd->frame->dcmd;
+
+ if (!instance->vf_affiliation) {
+ printk(KERN_WARNING "megasas: SR-IOV: Couldn't get LD/VF "
+ "affiliation for scsi%d.\n", instance->host->host_no);
+ megasas_return_cmd(instance, cmd);
+ return -ENOMEM;
+ }
+
+ if (initial)
+ memset(instance->vf_affiliation, 0, (MAX_LOGICAL_DRIVES + 1) *
+ sizeof(struct MR_LD_VF_AFFILIATION));
+ else {
+ new_affiliation =
+ pci_alloc_consistent(instance->pdev,
+ (MAX_LOGICAL_DRIVES + 1) *
+ sizeof(struct MR_LD_VF_AFFILIATION),
+ &new_affiliation_h);
+ if (!new_affiliation) {
+ printk(KERN_DEBUG "megasas: SR-IOV: Couldn't allocate "
+ "memory for new affiliation for scsi%d.\n",
+ instance->host->host_no);
+ megasas_return_cmd(instance, cmd);
+ return -ENOMEM;
+ }
+ memset(new_affiliation, 0, (MAX_LOGICAL_DRIVES + 1) *
+ sizeof(struct MR_LD_VF_AFFILIATION));
+ }
+
+ memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE);
+
+ dcmd->cmd = MFI_CMD_DCMD;
+ dcmd->cmd_status = 0xFF;
+ dcmd->sge_count = 1;
+ dcmd->flags = MFI_FRAME_DIR_BOTH;
+ dcmd->timeout = 0;
+ dcmd->pad_0 = 0;
+ dcmd->data_xfer_len = (MAX_LOGICAL_DRIVES + 1) *
+ sizeof(struct MR_LD_VF_AFFILIATION);
+ dcmd->opcode = MR_DCMD_LD_VF_MAP_GET_ALL_LDS;
+
+ if (initial)
+ dcmd->sgl.sge32[0].phys_addr = instance->vf_affiliation_h;
+ else
+ dcmd->sgl.sge32[0].phys_addr = new_affiliation_h;
+
+ dcmd->sgl.sge32[0].length = (MAX_LOGICAL_DRIVES + 1) *
+ sizeof(struct MR_LD_VF_AFFILIATION);
+
+ printk(KERN_WARNING "megasas: SR-IOV: Getting LD/VF affiliation for "
+ "scsi%d\n", instance->host->host_no);
+
+ megasas_issue_blocked_cmd(instance, cmd, 0);
+
+ if (dcmd->cmd_status) {
+ printk(KERN_WARNING "megasas: SR-IOV: LD/VF affiliation DCMD"
+ " failed with status 0x%x for scsi%d.\n",
+ dcmd->cmd_status, instance->host->host_no);
+ retval = 1; /* Do a scan if we couldn't get affiliation */
+ goto out;
+ }
+
+ if (!initial) {
+ if (!new_affiliation->ldCount) {
+ printk(KERN_WARNING "megasas: SR-IOV: Got new LD/VF "
+ "affiliation for passive path for scsi%d.\n",
+ instance->host->host_no);
+ retval = 1;
+ goto out;
+ }
+ newmap = new_affiliation->map;
+ savedmap = instance->vf_affiliation->map;
+ thisVf = new_affiliation->thisVf;
+ for (i = 0 ; i < new_affiliation->ldCount; i++) {
+ found = 0;
+ for (j = 0; j < instance->vf_affiliation->ldCount;
+ j++) {
+ if (newmap->ref.targetId ==
+ savedmap->ref.targetId) {
+ found = 1;
+ if (newmap->policy[thisVf] !=
+ savedmap->policy[thisVf]) {
+ doscan = 1;
+ goto out;
+ }
+ }
+ savedmap = (struct MR_LD_VF_MAP *)
+ ((unsigned char *)savedmap +
+ savedmap->size);
+ }
+ if (!found && newmap->policy[thisVf] !=
+ MR_LD_ACCESS_HIDDEN) {
+ doscan = 1;
+ goto out;
+ }
+ newmap = (struct MR_LD_VF_MAP *)
+ ((unsigned char *)newmap + newmap->size);
+ }
+
+ newmap = new_affiliation->map;
+ savedmap = instance->vf_affiliation->map;
+
+ for (i = 0 ; i < instance->vf_affiliation->ldCount; i++) {
+ found = 0;
+ for (j = 0 ; j < new_affiliation->ldCount; j++) {
+ if (savedmap->ref.targetId ==
+ newmap->ref.targetId) {
+ found = 1;
+ if (savedmap->policy[thisVf] !=
+ newmap->policy[thisVf]) {
+ doscan = 1;
+ goto out;
+ }
+ }
+ newmap = (struct MR_LD_VF_MAP *)
+ ((unsigned char *)newmap +
+ newmap->size);
+ }
+ if (!found && savedmap->policy[thisVf] !=
+ MR_LD_ACCESS_HIDDEN) {
+ doscan = 1;
+ goto out;
+ }
+ savedmap = (struct MR_LD_VF_MAP *)
+ ((unsigned char *)savedmap +
+ savedmap->size);
+ }
+ }
+out:
+ if (doscan) {
+ printk(KERN_WARNING "megasas: SR-IOV: Got new LD/VF "
+ "affiliation for scsi%d.\n", instance->host->host_no);
+ memcpy(instance->vf_affiliation, new_affiliation,
+ new_affiliation->size);
+ retval = 1;
+ }
+
+ if (new_affiliation)
+ pci_free_consistent(instance->pdev,
+ (MAX_LOGICAL_DRIVES + 1) *
+ sizeof(struct MR_LD_VF_AFFILIATION),
+ new_affiliation, new_affiliation_h);
+ if (instance->ctrl_context && cmd->mpt_pthr_cmd_blocked)
+ megasas_return_mfi_mpt_pthr(instance, cmd,
+ cmd->mpt_pthr_cmd_blocked);
+ else
+ megasas_return_cmd(instance, cmd);
+
+ return retval;
+}
+
+/* This function will get the current SR-IOV LD/VF affiliation */
+static int megasas_get_ld_vf_affiliation(struct megasas_instance *instance,
+ int initial)
+{
+ int retval;
+
+ if (instance->PlasmaFW111)
+ retval = megasas_get_ld_vf_affiliation_111(instance, initial);
+ else
+ retval = megasas_get_ld_vf_affiliation_12(instance, initial);
+ return retval;
+}
+
+/* This function will tell FW to start the SR-IOV heartbeat */
+int megasas_sriov_start_heartbeat(struct megasas_instance *instance,
+ int initial)
+{
+ struct megasas_cmd *cmd;
+ struct megasas_dcmd_frame *dcmd;
+ int retval = 0;
+
+ cmd = megasas_get_cmd(instance);
+
+ if (!cmd) {
+ printk(KERN_DEBUG "megasas: megasas_sriov_start_heartbeat: "
+ "Failed to get cmd for scsi%d.\n",
+ instance->host->host_no);
+ return -ENOMEM;
+ }
+
+ dcmd = &cmd->frame->dcmd;
+
+ if (initial) {
+ instance->hb_host_mem =
+ pci_zalloc_consistent(instance->pdev,
+ sizeof(struct MR_CTRL_HB_HOST_MEM),
+ &instance->hb_host_mem_h);
+ if (!instance->hb_host_mem) {
+ printk(KERN_DEBUG "megasas: SR-IOV: Couldn't allocate"
+ " memory for heartbeat host memory for "
+ "scsi%d.\n", instance->host->host_no);
+ retval = -ENOMEM;
+ goto out;
+ }
+ }
+
+ memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE);
+
+ dcmd->mbox.s[0] = sizeof(struct MR_CTRL_HB_HOST_MEM);
+ dcmd->cmd = MFI_CMD_DCMD;
+ dcmd->cmd_status = 0xFF;
+ dcmd->sge_count = 1;
+ dcmd->flags = MFI_FRAME_DIR_BOTH;
+ dcmd->timeout = 0;
+ dcmd->pad_0 = 0;
+ dcmd->data_xfer_len = sizeof(struct MR_CTRL_HB_HOST_MEM);
+ dcmd->opcode = MR_DCMD_CTRL_SHARED_HOST_MEM_ALLOC;
+ dcmd->sgl.sge32[0].phys_addr = instance->hb_host_mem_h;
+ dcmd->sgl.sge32[0].length = sizeof(struct MR_CTRL_HB_HOST_MEM);
+
+ printk(KERN_WARNING "megasas: SR-IOV: Starting heartbeat for scsi%d\n",
+ instance->host->host_no);
+
+ if (!megasas_issue_polled(instance, cmd)) {
+ retval = 0;
+ } else {
+ printk(KERN_WARNING "megasas: SR-IOV: MR_DCMD_CTRL_SHARED_HOST"
+ "_MEM_ALLOC DCMD timed out for scsi%d\n",
+ instance->host->host_no);
+ retval = 1;
+ goto out;
+ }
+
+
+ if (dcmd->cmd_status) {
+ printk(KERN_WARNING "megasas: SR-IOV: MR_DCMD_CTRL_SHARED_HOST"
+ "_MEM_ALLOC DCMD failed with status 0x%x for scsi%d\n",
+ dcmd->cmd_status,
+ instance->host->host_no);
+ retval = 1;
+ goto out;
+ }
+
+out:
+ megasas_return_cmd(instance, cmd);
+
+ return retval;
+}
+
+/* Handler for SR-IOV heartbeat */
+void megasas_sriov_heartbeat_handler(unsigned long instance_addr)
+{
+ struct megasas_instance *instance =
+ (struct megasas_instance *)instance_addr;
+
+ if (instance->hb_host_mem->HB.fwCounter !=
+ instance->hb_host_mem->HB.driverCounter) {
+ instance->hb_host_mem->HB.driverCounter =
+ instance->hb_host_mem->HB.fwCounter;
+ mod_timer(&instance->sriov_heartbeat_timer,
+ jiffies + MEGASAS_SRIOV_HEARTBEAT_INTERVAL_VF);
+ } else {
+ printk(KERN_WARNING "megasas: SR-IOV: Heartbeat never "
+ "completed for scsi%d\n", instance->host->host_no);
+ schedule_work(&instance->work_init);
+ }
+}
+
+/**
+ * megasas_wait_for_outstanding - Wait for all outstanding cmds
+ * @instance: Adapter soft state
+ *
+ * This function waits for up to MEGASAS_RESET_WAIT_TIME seconds for FW to
+ * complete all its outstanding commands. Returns error if one or more IOs
+ * are pending after this time period. It also marks the controller dead.
+ */
+static int megasas_wait_for_outstanding(struct megasas_instance *instance)
+{
+ int i;
+ u32 reset_index;
+ u32 wait_time = MEGASAS_RESET_WAIT_TIME;
+ u8 adprecovery;
+ unsigned long flags;
+ struct list_head clist_local;
+ struct megasas_cmd *reset_cmd;
+ u32 fw_state;
+ u8 kill_adapter_flag;
+
+ spin_lock_irqsave(&instance->hba_lock, flags);
+ adprecovery = instance->adprecovery;
+ spin_unlock_irqrestore(&instance->hba_lock, flags);
+
+ if (adprecovery != MEGASAS_HBA_OPERATIONAL) {
+
+ INIT_LIST_HEAD(&clist_local);
+ spin_lock_irqsave(&instance->hba_lock, flags);
+ list_splice_init(&instance->internal_reset_pending_q,
+ &clist_local);
+ spin_unlock_irqrestore(&instance->hba_lock, flags);
+
+ printk(KERN_NOTICE "megasas: HBA reset wait ...\n");
+ for (i = 0; i < wait_time; i++) {
+ msleep(1000);
+ spin_lock_irqsave(&instance->hba_lock, flags);
+ adprecovery = instance->adprecovery;
+ spin_unlock_irqrestore(&instance->hba_lock, flags);
+ if (adprecovery == MEGASAS_HBA_OPERATIONAL)
+ break;
+ }
+
+ if (adprecovery != MEGASAS_HBA_OPERATIONAL) {
+ printk(KERN_NOTICE "megasas: reset: Stopping HBA.\n");
+ spin_lock_irqsave(&instance->hba_lock, flags);
+ instance->adprecovery = MEGASAS_HW_CRITICAL_ERROR;
+ spin_unlock_irqrestore(&instance->hba_lock, flags);
+ return FAILED;
+ }
+
+ reset_index = 0;
+ while (!list_empty(&clist_local)) {
+ reset_cmd = list_entry((&clist_local)->next,
+ struct megasas_cmd, list);
+ list_del_init(&reset_cmd->list);
+ if (reset_cmd->scmd) {
+ reset_cmd->scmd->result = DID_RESET << 16;
+ printk(KERN_NOTICE "%d:%p reset [%02x]\n",
+ reset_index, reset_cmd,
+ reset_cmd->scmd->cmnd[0]);
+
+ reset_cmd->scmd->scsi_done(reset_cmd->scmd);
+ megasas_return_cmd(instance, reset_cmd);
+ } else if (reset_cmd->sync_cmd) {
+ printk(KERN_NOTICE "megasas:%p synch cmds"
+ "reset queue\n",
+ reset_cmd);
+
+ reset_cmd->cmd_status = ENODATA;
+ instance->instancet->fire_cmd(instance,
+ reset_cmd->frame_phys_addr,
+ 0, instance->reg_set);
+ } else {
+ printk(KERN_NOTICE "megasas: %p unexpected"
+ "cmds lst\n",
+ reset_cmd);
+ }
+ reset_index++;
+ }
+
+ return SUCCESS;
+ }
+
+ for (i = 0; i < resetwaittime; i++) {
+
+ int outstanding = atomic_read(&instance->fw_outstanding);
+
+ if (!outstanding)
+ break;
+
+ if (!(i % MEGASAS_RESET_NOTICE_INTERVAL)) {
+ printk(KERN_NOTICE "megasas: [%2d]waiting for %d "
+ "commands to complete\n",i,outstanding);
+ /*
+ * Call cmd completion routine. Cmd to be
+ * be completed directly without depending on isr.
+ */
+ megasas_complete_cmd_dpc((unsigned long)instance);
+ }
+
+ msleep(1000);
+ }
+
+ i = 0;
+ kill_adapter_flag = 0;
+ do {
+ fw_state = instance->instancet->read_fw_status_reg(
+ instance->reg_set) & MFI_STATE_MASK;
+ if ((fw_state == MFI_STATE_FAULT) &&
+ (instance->disableOnlineCtrlReset == 0)) {
+ if (i == 3) {
+ kill_adapter_flag = 2;
+ break;
+ }
+ megasas_do_ocr(instance);
+ kill_adapter_flag = 1;
+
+ /* wait for 1 secs to let FW finish the pending cmds */
+ msleep(1000);
+ }
+ i++;
+ } while (i <= 3);
+
+ if (atomic_read(&instance->fw_outstanding) &&
+ !kill_adapter_flag) {
+ if (instance->disableOnlineCtrlReset == 0) {
+
+ megasas_do_ocr(instance);
+
+ /* wait for 5 secs to let FW finish the pending cmds */
+ for (i = 0; i < wait_time; i++) {
+ int outstanding =
+ atomic_read(&instance->fw_outstanding);
+ if (!outstanding)
+ return SUCCESS;
+ msleep(1000);
+ }
+ }
+ }
+
+ if (atomic_read(&instance->fw_outstanding) ||
+ (kill_adapter_flag == 2)) {
+ printk(KERN_NOTICE "megaraid_sas: pending cmds after reset\n");
+ /*
+ * Send signal to FW to stop processing any pending cmds.
+ * The controller will be taken offline by the OS now.
+ */
+ if ((instance->pdev->device ==
+ PCI_DEVICE_ID_LSI_SAS0073SKINNY) ||
+ (instance->pdev->device ==
+ PCI_DEVICE_ID_LSI_SAS0071SKINNY)) {
+ writel(MFI_STOP_ADP,
+ &instance->reg_set->doorbell);
+ } else {
+ writel(MFI_STOP_ADP,
+ &instance->reg_set->inbound_doorbell);
+ }
+ megasas_dump_pending_frames(instance);
+ spin_lock_irqsave(&instance->hba_lock, flags);
+ instance->adprecovery = MEGASAS_HW_CRITICAL_ERROR;
+ spin_unlock_irqrestore(&instance->hba_lock, flags);
+ return FAILED;
+ }
+
+ printk(KERN_NOTICE "megaraid_sas: no pending cmds after reset\n");
+
+ return SUCCESS;
+}
+
+/**
+ * megasas_generic_reset - Generic reset routine
+ * @scmd: Mid-layer SCSI command
+ *
+ * This routine implements a generic reset handler for device, bus and host
+ * reset requests. Device, bus and host specific reset handlers can use this
+ * function after they do their specific tasks.
+ */
+static int megasas_generic_reset(struct scsi_cmnd *scmd)
+{
+ int ret_val;
+ struct megasas_instance *instance;
+
+ instance = (struct megasas_instance *)scmd->device->host->hostdata;
+
+ scmd_printk(KERN_NOTICE, scmd, "megasas: RESET cmd=%x retries=%x\n",
+ scmd->cmnd[0], scmd->retries);
+
+ if (instance->adprecovery == MEGASAS_HW_CRITICAL_ERROR) {
+ printk(KERN_ERR "megasas: cannot recover from previous reset "
+ "failures\n");
+ return FAILED;
+ }
+
+ ret_val = megasas_wait_for_outstanding(instance);
+ if (ret_val == SUCCESS)
+ printk(KERN_NOTICE "megasas: reset successful \n");
+ else
+ printk(KERN_ERR "megasas: failed to do reset\n");
+
+ return ret_val;
+}
+
+/**
+ * megasas_reset_timer - quiesce the adapter if required
+ * @scmd: scsi cmnd
+ *
+ * Sets the FW busy flag and reduces the host->can_queue if the
+ * cmd has not been completed within the timeout period.
+ */
+static enum
+blk_eh_timer_return megasas_reset_timer(struct scsi_cmnd *scmd)
+{
+ struct megasas_instance *instance;
+ unsigned long flags;
+
+ if (time_after(jiffies, scmd->jiffies_at_alloc +
+ (MEGASAS_DEFAULT_CMD_TIMEOUT * 2) * HZ)) {
+ return BLK_EH_NOT_HANDLED;
+ }
+
+ instance = (struct megasas_instance *)scmd->device->host->hostdata;
+ if (!(instance->flag & MEGASAS_FW_BUSY)) {
+ /* FW is busy, throttle IO */
+ spin_lock_irqsave(instance->host->host_lock, flags);
+
+ instance->host->can_queue = instance->throttlequeuedepth;
+ instance->last_time = jiffies;
+ instance->flag |= MEGASAS_FW_BUSY;
+
+ spin_unlock_irqrestore(instance->host->host_lock, flags);
+ }
+ return BLK_EH_RESET_TIMER;
+}
+
+/**
+ * megasas_reset_device - Device reset handler entry point
+ */
+static int megasas_reset_device(struct scsi_cmnd *scmd)
+{
+ int ret;
+
+ /*
+ * First wait for all commands to complete
+ */
+ ret = megasas_generic_reset(scmd);
+
+ return ret;
+}
+
+/**
+ * megasas_reset_bus_host - Bus & host reset handler entry point
+ */
+static int megasas_reset_bus_host(struct scsi_cmnd *scmd)
+{
+ int ret;
+ struct megasas_instance *instance;
+ instance = (struct megasas_instance *)scmd->device->host->hostdata;
+
+ /*
+ * First wait for all commands to complete
+ */
+ if ((instance->pdev->device == PCI_DEVICE_ID_LSI_FUSION) ||
+ (instance->pdev->device == PCI_DEVICE_ID_LSI_PLASMA) ||
+ (instance->pdev->device == PCI_DEVICE_ID_LSI_INVADER) ||
+ (instance->pdev->device == PCI_DEVICE_ID_LSI_FURY))
+ ret = megasas_reset_fusion(scmd->device->host, 1);
+ else
+ ret = megasas_generic_reset(scmd);
+
+ return ret;
+}
+
+/**
+ * megasas_bios_param - Returns disk geometry for a disk
+ * @sdev: device handle
+ * @bdev: block device
+ * @capacity: drive capacity
+ * @geom: geometry parameters
+ */
+static int
+megasas_bios_param(struct scsi_device *sdev, struct block_device *bdev,
+ sector_t capacity, int geom[])
+{
+ int heads;
+ int sectors;
+ sector_t cylinders;
+ unsigned long tmp;
+ /* Default heads (64) & sectors (32) */
+ heads = 64;
+ sectors = 32;
+
+ tmp = heads * sectors;
+ cylinders = capacity;
+
+ sector_div(cylinders, tmp);
+
+ /*
+ * Handle extended translation size for logical drives > 1Gb
+ */
+
+ if (capacity >= 0x200000) {
+ heads = 255;
+ sectors = 63;
+ tmp = heads*sectors;
+ cylinders = capacity;
+ sector_div(cylinders, tmp);
+ }
+
+ geom[0] = heads;
+ geom[1] = sectors;
+ geom[2] = cylinders;
+
+ return 0;
+}
+
+static void megasas_aen_polling(struct work_struct *work);
+
+/**
+ * megasas_service_aen - Processes an event notification
+ * @instance: Adapter soft state
+ * @cmd: AEN command completed by the ISR
+ *
+ * For AEN, driver sends a command down to FW that is held by the FW till an
+ * event occurs. When an event of interest occurs, FW completes the command
+ * that it was previously holding.
+ *
+ * This routines sends SIGIO signal to processes that have registered with the
+ * driver for AEN.
+ */
+static void
+megasas_service_aen(struct megasas_instance *instance, struct megasas_cmd *cmd)
+{
+ unsigned long flags;
+ /*
+ * Don't signal app if it is just an aborted previously registered aen
+ */
+ if ((!cmd->abort_aen) && (instance->unload == 0)) {
+ spin_lock_irqsave(&poll_aen_lock, flags);
+ megasas_poll_wait_aen = 1;
+ spin_unlock_irqrestore(&poll_aen_lock, flags);
+ wake_up(&megasas_poll_wait);
+ kill_fasync(&megasas_async_queue, SIGIO, POLL_IN);
+ }
+ else
+ cmd->abort_aen = 0;
+
+ instance->aen_cmd = NULL;
+
+ if (instance->ctrl_context && cmd->mpt_pthr_cmd_blocked)
+ megasas_return_mfi_mpt_pthr(instance, cmd,
+ cmd->mpt_pthr_cmd_blocked);
+ else
+ megasas_return_cmd(instance, cmd);
+
+ if ((instance->unload == 0) &&
+ ((instance->issuepend_done == 1))) {
+ struct megasas_aen_event *ev;
+ ev = kzalloc(sizeof(*ev), GFP_ATOMIC);
+ if (!ev) {
+ printk(KERN_ERR "megasas_service_aen: out of memory\n");
+ } else {
+ ev->instance = instance;
+ instance->ev = ev;
+ INIT_DELAYED_WORK(&ev->hotplug_work,
+ megasas_aen_polling);
+ schedule_delayed_work(&ev->hotplug_work, 0);
+ }
+ }
+}
+
+static ssize_t
+megasas_fw_crash_buffer_store(struct device *cdev,
+ struct device_attribute *attr, const char *buf, size_t count)
+{
+ struct Scsi_Host *shost = class_to_shost(cdev);
+ struct megasas_instance *instance =
+ (struct megasas_instance *) shost->hostdata;
+ int val = 0;
+ unsigned long flags;
+
+ if (kstrtoint(buf, 0, &val) != 0)
+ return -EINVAL;
+
+ spin_lock_irqsave(&instance->crashdump_lock, flags);
+ instance->fw_crash_buffer_offset = val;
+ spin_unlock_irqrestore(&instance->crashdump_lock, flags);
+ return strlen(buf);
+}
+
+static ssize_t
+megasas_fw_crash_buffer_show(struct device *cdev,
+ struct device_attribute *attr, char *buf)
+{
+ struct Scsi_Host *shost = class_to_shost(cdev);
+ struct megasas_instance *instance =
+ (struct megasas_instance *) shost->hostdata;
+ u32 size;
+ unsigned long buff_addr;
+ unsigned long dmachunk = CRASH_DMA_BUF_SIZE;
+ unsigned long src_addr;
+ unsigned long flags;
+ u32 buff_offset;
+
+ spin_lock_irqsave(&instance->crashdump_lock, flags);
+ buff_offset = instance->fw_crash_buffer_offset;
+ if (!instance->crash_dump_buf &&
+ !((instance->fw_crash_state == AVAILABLE) ||
+ (instance->fw_crash_state == COPYING))) {
+ dev_err(&instance->pdev->dev,
+ "Firmware crash dump is not available\n");
+ spin_unlock_irqrestore(&instance->crashdump_lock, flags);
+ return -EINVAL;
+ }
+
+ buff_addr = (unsigned long) buf;
+
+ if (buff_offset >
+ (instance->fw_crash_buffer_size * dmachunk)) {
+ dev_err(&instance->pdev->dev,
+ "Firmware crash dump offset is out of range\n");
+ spin_unlock_irqrestore(&instance->crashdump_lock, flags);
+ return 0;
+ }
+
+ size = (instance->fw_crash_buffer_size * dmachunk) - buff_offset;
+ size = (size >= PAGE_SIZE) ? (PAGE_SIZE - 1) : size;
+
+ src_addr = (unsigned long)instance->crash_buf[buff_offset / dmachunk] +
+ (buff_offset % dmachunk);
+ memcpy(buf, (void *)src_addr, size);
+ spin_unlock_irqrestore(&instance->crashdump_lock, flags);
+
+ return size;
+}
+
+static ssize_t
+megasas_fw_crash_buffer_size_show(struct device *cdev,
+ struct device_attribute *attr, char *buf)
+{
+ struct Scsi_Host *shost = class_to_shost(cdev);
+ struct megasas_instance *instance =
+ (struct megasas_instance *) shost->hostdata;
+
+ return snprintf(buf, PAGE_SIZE, "%ld\n", (unsigned long)
+ ((instance->fw_crash_buffer_size) * 1024 * 1024)/PAGE_SIZE);
+}
+
+static ssize_t
+megasas_fw_crash_state_store(struct device *cdev,
+ struct device_attribute *attr, const char *buf, size_t count)
+{
+ struct Scsi_Host *shost = class_to_shost(cdev);
+ struct megasas_instance *instance =
+ (struct megasas_instance *) shost->hostdata;
+ int val = 0;
+ unsigned long flags;
+
+ if (kstrtoint(buf, 0, &val) != 0)
+ return -EINVAL;
+
+ if ((val <= AVAILABLE || val > COPY_ERROR)) {
+ dev_err(&instance->pdev->dev, "application updates invalid "
+ "firmware crash state\n");
+ return -EINVAL;
+ }
+
+ instance->fw_crash_state = val;
+
+ if ((val == COPIED) || (val == COPY_ERROR)) {
+ spin_lock_irqsave(&instance->crashdump_lock, flags);
+ megasas_free_host_crash_buffer(instance);
+ spin_unlock_irqrestore(&instance->crashdump_lock, flags);
+ if (val == COPY_ERROR)
+ dev_info(&instance->pdev->dev, "application failed to "
+ "copy Firmware crash dump\n");
+ else
+ dev_info(&instance->pdev->dev, "Firmware crash dump "
+ "copied successfully\n");
+ }
+ return strlen(buf);
+}
+
+static ssize_t
+megasas_fw_crash_state_show(struct device *cdev,
+ struct device_attribute *attr, char *buf)
+{
+ struct Scsi_Host *shost = class_to_shost(cdev);
+ struct megasas_instance *instance =
+ (struct megasas_instance *) shost->hostdata;
+ return snprintf(buf, PAGE_SIZE, "%d\n", instance->fw_crash_state);
+}
+
+static ssize_t
+megasas_page_size_show(struct device *cdev,
+ struct device_attribute *attr, char *buf)
+{
+ return snprintf(buf, PAGE_SIZE, "%ld\n", (unsigned long)PAGE_SIZE - 1);
+}
+
+static DEVICE_ATTR(fw_crash_buffer, S_IRUGO | S_IWUSR,
+ megasas_fw_crash_buffer_show, megasas_fw_crash_buffer_store);
+static DEVICE_ATTR(fw_crash_buffer_size, S_IRUGO,
+ megasas_fw_crash_buffer_size_show, NULL);
+static DEVICE_ATTR(fw_crash_state, S_IRUGO | S_IWUSR,
+ megasas_fw_crash_state_show, megasas_fw_crash_state_store);
+static DEVICE_ATTR(page_size, S_IRUGO,
+ megasas_page_size_show, NULL);
+
+struct device_attribute *megaraid_host_attrs[] = {
+ &dev_attr_fw_crash_buffer_size,
+ &dev_attr_fw_crash_buffer,
+ &dev_attr_fw_crash_state,
+ &dev_attr_page_size,
+ NULL,
+};
+
+/*
+ * Scsi host template for megaraid_sas driver
+ */
+static struct scsi_host_template megasas_template = {
+
+ .module = THIS_MODULE,
+ .name = "LSI SAS based MegaRAID driver",
+ .proc_name = "megaraid_sas",
+ .slave_configure = megasas_slave_configure,
+ .slave_alloc = megasas_slave_alloc,
+ .queuecommand = megasas_queue_command,
+ .eh_device_reset_handler = megasas_reset_device,
+ .eh_bus_reset_handler = megasas_reset_bus_host,
+ .eh_host_reset_handler = megasas_reset_bus_host,
+ .eh_timed_out = megasas_reset_timer,
+ .shost_attrs = megaraid_host_attrs,
+ .bios_param = megasas_bios_param,
+ .use_clustering = ENABLE_CLUSTERING,
+ .change_queue_depth = scsi_change_queue_depth,
+ .no_write_same = 1,
+};
+
+/**
+ * megasas_complete_int_cmd - Completes an internal command
+ * @instance: Adapter soft state
+ * @cmd: Command to be completed
+ *
+ * The megasas_issue_blocked_cmd() function waits for a command to complete
+ * after it issues a command. This function wakes up that waiting routine by
+ * calling wake_up() on the wait queue.
+ */
+static void
+megasas_complete_int_cmd(struct megasas_instance *instance,
+ struct megasas_cmd *cmd)
+{
+ cmd->cmd_status = cmd->frame->io.cmd_status;
+
+ if (cmd->cmd_status == ENODATA) {
+ cmd->cmd_status = 0;
+ }
+ wake_up(&instance->int_cmd_wait_q);
+}
+
+/**
+ * megasas_complete_abort - Completes aborting a command
+ * @instance: Adapter soft state
+ * @cmd: Cmd that was issued to abort another cmd
+ *
+ * The megasas_issue_blocked_abort_cmd() function waits on abort_cmd_wait_q
+ * after it issues an abort on a previously issued command. This function
+ * wakes up all functions waiting on the same wait queue.
+ */
+static void
+megasas_complete_abort(struct megasas_instance *instance,
+ struct megasas_cmd *cmd)
+{
+ if (cmd->sync_cmd) {
+ cmd->sync_cmd = 0;
+ cmd->cmd_status = 0;
+ wake_up(&instance->abort_cmd_wait_q);
+ }
+
+ return;
+}
+
+/**
+ * megasas_complete_cmd - Completes a command
+ * @instance: Adapter soft state
+ * @cmd: Command to be completed
+ * @alt_status: If non-zero, use this value as status to
+ * SCSI mid-layer instead of the value returned
+ * by the FW. This should be used if caller wants
+ * an alternate status (as in the case of aborted
+ * commands)
+ */
+void
+megasas_complete_cmd(struct megasas_instance *instance, struct megasas_cmd *cmd,
+ u8 alt_status)
+{
+ int exception = 0;
+ struct megasas_header *hdr = &cmd->frame->hdr;
+ unsigned long flags;
+ struct fusion_context *fusion = instance->ctrl_context;
+ u32 opcode;
+
+ /* flag for the retry reset */
+ cmd->retry_for_fw_reset = 0;
+
+ if (cmd->scmd)
+ cmd->scmd->SCp.ptr = NULL;
+
+ switch (hdr->cmd) {
+ case MFI_CMD_INVALID:
+ /* Some older 1068 controller FW may keep a pended
+ MR_DCMD_CTRL_EVENT_GET_INFO left over from the main kernel
+ when booting the kdump kernel. Ignore this command to
+ prevent a kernel panic on shutdown of the kdump kernel. */
+ printk(KERN_WARNING "megaraid_sas: MFI_CMD_INVALID command "
+ "completed.\n");
+ printk(KERN_WARNING "megaraid_sas: If you have a controller "
+ "other than PERC5, please upgrade your firmware.\n");
+ break;
+ case MFI_CMD_PD_SCSI_IO:
+ case MFI_CMD_LD_SCSI_IO:
+
+ /*
+ * MFI_CMD_PD_SCSI_IO and MFI_CMD_LD_SCSI_IO could have been
+ * issued either through an IO path or an IOCTL path. If it
+ * was via IOCTL, we will send it to internal completion.
+ */
+ if (cmd->sync_cmd) {
+ cmd->sync_cmd = 0;
+ megasas_complete_int_cmd(instance, cmd);
+ break;
+ }
+
+ case MFI_CMD_LD_READ:
+ case MFI_CMD_LD_WRITE:
+
+ if (alt_status) {
+ cmd->scmd->result = alt_status << 16;
+ exception = 1;
+ }
+
+ if (exception) {
+
+ atomic_dec(&instance->fw_outstanding);
+
+ scsi_dma_unmap(cmd->scmd);
+ cmd->scmd->scsi_done(cmd->scmd);
+ megasas_return_cmd(instance, cmd);
+
+ break;
+ }
+
+ switch (hdr->cmd_status) {
+
+ case MFI_STAT_OK:
+ cmd->scmd->result = DID_OK << 16;
+ break;
+
+ case MFI_STAT_SCSI_IO_FAILED:
+ case MFI_STAT_LD_INIT_IN_PROGRESS:
+ cmd->scmd->result =
+ (DID_ERROR << 16) | hdr->scsi_status;
+ break;
+
+ case MFI_STAT_SCSI_DONE_WITH_ERROR:
+
+ cmd->scmd->result = (DID_OK << 16) | hdr->scsi_status;
+
+ if (hdr->scsi_status == SAM_STAT_CHECK_CONDITION) {
+ memset(cmd->scmd->sense_buffer, 0,
+ SCSI_SENSE_BUFFERSIZE);
+ memcpy(cmd->scmd->sense_buffer, cmd->sense,
+ hdr->sense_len);
+
+ cmd->scmd->result |= DRIVER_SENSE << 24;
+ }
+
+ break;
+
+ case MFI_STAT_LD_OFFLINE:
+ case MFI_STAT_DEVICE_NOT_FOUND:
+ cmd->scmd->result = DID_BAD_TARGET << 16;
+ break;
+
+ default:
+ printk(KERN_DEBUG "megasas: MFI FW status %#x\n",
+ hdr->cmd_status);
+ cmd->scmd->result = DID_ERROR << 16;
+ break;
+ }
+
+ atomic_dec(&instance->fw_outstanding);
+
+ scsi_dma_unmap(cmd->scmd);
+ cmd->scmd->scsi_done(cmd->scmd);
+ megasas_return_cmd(instance, cmd);
+
+ break;
+
+ case MFI_CMD_SMP:
+ case MFI_CMD_STP:
+ case MFI_CMD_DCMD:
+ opcode = le32_to_cpu(cmd->frame->dcmd.opcode);
+ /* Check for LD map update */
+ if ((opcode == MR_DCMD_LD_MAP_GET_INFO)
+ && (cmd->frame->dcmd.mbox.b[1] == 1)) {
+ fusion->fast_path_io = 0;
+ spin_lock_irqsave(instance->host->host_lock, flags);
+ if (cmd->frame->hdr.cmd_status != 0) {
+ if (cmd->frame->hdr.cmd_status !=
+ MFI_STAT_NOT_FOUND)
+ printk(KERN_WARNING "megasas: map sync"
+ "failed, status = 0x%x.\n",
+ cmd->frame->hdr.cmd_status);
+ else {
+ megasas_return_mfi_mpt_pthr(instance,
+ cmd, cmd->mpt_pthr_cmd_blocked);
+ spin_unlock_irqrestore(
+ instance->host->host_lock,
+ flags);
+ break;
+ }
+ } else
+ instance->map_id++;
+ megasas_return_mfi_mpt_pthr(instance, cmd,
+ cmd->mpt_pthr_cmd_blocked);
+
+ /*
+ * Set fast path IO to ZERO.
+ * Validate Map will set proper value.
+ * Meanwhile all IOs will go as LD IO.
+ */
+ if (MR_ValidateMapInfo(instance))
+ fusion->fast_path_io = 1;
+ else
+ fusion->fast_path_io = 0;
+ megasas_sync_map_info(instance);
+ spin_unlock_irqrestore(instance->host->host_lock,
+ flags);
+ break;
+ }
+ if (opcode == MR_DCMD_CTRL_EVENT_GET_INFO ||
+ opcode == MR_DCMD_CTRL_EVENT_GET) {
+ spin_lock_irqsave(&poll_aen_lock, flags);
+ megasas_poll_wait_aen = 0;
+ spin_unlock_irqrestore(&poll_aen_lock, flags);
+ }
+
+ /*
+ * See if got an event notification
+ */
+ if (opcode == MR_DCMD_CTRL_EVENT_WAIT)
+ megasas_service_aen(instance, cmd);
+ else
+ megasas_complete_int_cmd(instance, cmd);
+
+ break;
+
+ case MFI_CMD_ABORT:
+ /*
+ * Cmd issued to abort another cmd returned
+ */
+ megasas_complete_abort(instance, cmd);
+ break;
+
+ default:
+ printk("megasas: Unknown command completed! [0x%X]\n",
+ hdr->cmd);
+ break;
+ }
+}
+
+/**
+ * megasas_issue_pending_cmds_again - issue all pending cmds
+ * in FW again because of the fw reset
+ * @instance: Adapter soft state
+ */
+static inline void
+megasas_issue_pending_cmds_again(struct megasas_instance *instance)
+{
+ struct megasas_cmd *cmd;
+ struct list_head clist_local;
+ union megasas_evt_class_locale class_locale;
+ unsigned long flags;
+ u32 seq_num;
+
+ INIT_LIST_HEAD(&clist_local);
+ spin_lock_irqsave(&instance->hba_lock, flags);
+ list_splice_init(&instance->internal_reset_pending_q, &clist_local);
+ spin_unlock_irqrestore(&instance->hba_lock, flags);
+
+ while (!list_empty(&clist_local)) {
+ cmd = list_entry((&clist_local)->next,
+ struct megasas_cmd, list);
+ list_del_init(&cmd->list);
+
+ if (cmd->sync_cmd || cmd->scmd) {
+ printk(KERN_NOTICE "megaraid_sas: command %p, %p:%d"
+ "detected to be pending while HBA reset.\n",
+ cmd, cmd->scmd, cmd->sync_cmd);
+
+ cmd->retry_for_fw_reset++;
+
+ if (cmd->retry_for_fw_reset == 3) {
+ printk(KERN_NOTICE "megaraid_sas: cmd %p, %p:%d"
+ "was tried multiple times during reset."
+ "Shutting down the HBA\n",
+ cmd, cmd->scmd, cmd->sync_cmd);
+ instance->instancet->disable_intr(instance);
+ atomic_set(&instance->fw_reset_no_pci_access, 1);
+ megaraid_sas_kill_hba(instance);
+ return;
+ }
+ }
+
+ if (cmd->sync_cmd == 1) {
+ if (cmd->scmd) {
+ printk(KERN_NOTICE "megaraid_sas: unexpected"
+ "cmd attached to internal command!\n");
+ }
+ printk(KERN_NOTICE "megasas: %p synchronous cmd"
+ "on the internal reset queue,"
+ "issue it again.\n", cmd);
+ cmd->cmd_status = ENODATA;
+ instance->instancet->fire_cmd(instance,
+ cmd->frame_phys_addr ,
+ 0, instance->reg_set);
+ } else if (cmd->scmd) {
+ printk(KERN_NOTICE "megasas: %p scsi cmd [%02x]"
+ "detected on the internal queue, issue again.\n",
+ cmd, cmd->scmd->cmnd[0]);
+
+ atomic_inc(&instance->fw_outstanding);
+ instance->instancet->fire_cmd(instance,
+ cmd->frame_phys_addr,
+ cmd->frame_count-1, instance->reg_set);
+ } else {
+ printk(KERN_NOTICE "megasas: %p unexpected cmd on the"
+ "internal reset defer list while re-issue!!\n",
+ cmd);
+ }
+ }
+
+ if (instance->aen_cmd) {
+ printk(KERN_NOTICE "megaraid_sas: aen_cmd in def process\n");
+ megasas_return_cmd(instance, instance->aen_cmd);
+
+ instance->aen_cmd = NULL;
+ }
+
+ /*
+ * Initiate AEN (Asynchronous Event Notification)
+ */
+ seq_num = instance->last_seq_num;
+ class_locale.members.reserved = 0;
+ class_locale.members.locale = MR_EVT_LOCALE_ALL;
+ class_locale.members.class = MR_EVT_CLASS_DEBUG;
+
+ megasas_register_aen(instance, seq_num, class_locale.word);
+}
+
+/**
+ * Move the internal reset pending commands to a deferred queue.
+ *
+ * We move the commands pending at internal reset time to a
+ * pending queue. This queue would be flushed after successful
+ * completion of the internal reset sequence. if the internal reset
+ * did not complete in time, the kernel reset handler would flush
+ * these commands.
+ **/
+static void
+megasas_internal_reset_defer_cmds(struct megasas_instance *instance)
+{
+ struct megasas_cmd *cmd;
+ int i;
+ u32 max_cmd = instance->max_fw_cmds;
+ u32 defer_index;
+ unsigned long flags;
+
+ defer_index = 0;
+ spin_lock_irqsave(&instance->mfi_pool_lock, flags);
+ for (i = 0; i < max_cmd; i++) {
+ cmd = instance->cmd_list[i];
+ if (cmd->sync_cmd == 1 || cmd->scmd) {
+ printk(KERN_NOTICE "megasas: moving cmd[%d]:%p:%d:%p"
+ "on the defer queue as internal\n",
+ defer_index, cmd, cmd->sync_cmd, cmd->scmd);
+
+ if (!list_empty(&cmd->list)) {
+ printk(KERN_NOTICE "megaraid_sas: ERROR while"
+ " moving this cmd:%p, %d %p, it was"
+ "discovered on some list?\n",
+ cmd, cmd->sync_cmd, cmd->scmd);
+
+ list_del_init(&cmd->list);
+ }
+ defer_index++;
+ list_add_tail(&cmd->list,
+ &instance->internal_reset_pending_q);
+ }
+ }
+ spin_unlock_irqrestore(&instance->mfi_pool_lock, flags);
+}
+
+
+static void
+process_fw_state_change_wq(struct work_struct *work)
+{
+ struct megasas_instance *instance =
+ container_of(work, struct megasas_instance, work_init);
+ u32 wait;
+ unsigned long flags;
+
+ if (instance->adprecovery != MEGASAS_ADPRESET_SM_INFAULT) {
+ printk(KERN_NOTICE "megaraid_sas: error, recovery st %x \n",
+ instance->adprecovery);
+ return ;
+ }
+
+ if (instance->adprecovery == MEGASAS_ADPRESET_SM_INFAULT) {
+ printk(KERN_NOTICE "megaraid_sas: FW detected to be in fault"
+ "state, restarting it...\n");
+
+ instance->instancet->disable_intr(instance);
+ atomic_set(&instance->fw_outstanding, 0);
+
+ atomic_set(&instance->fw_reset_no_pci_access, 1);
+ instance->instancet->adp_reset(instance, instance->reg_set);
+ atomic_set(&instance->fw_reset_no_pci_access, 0 );
+
+ printk(KERN_NOTICE "megaraid_sas: FW restarted successfully,"
+ "initiating next stage...\n");
+
+ printk(KERN_NOTICE "megaraid_sas: HBA recovery state machine,"
+ "state 2 starting...\n");
+
+ /*waitting for about 20 second before start the second init*/
+ for (wait = 0; wait < 30; wait++) {
+ msleep(1000);
+ }
+
+ if (megasas_transition_to_ready(instance, 1)) {
+ printk(KERN_NOTICE "megaraid_sas:adapter not ready\n");
+
+ atomic_set(&instance->fw_reset_no_pci_access, 1);
+ megaraid_sas_kill_hba(instance);
+ return ;
+ }
+
+ if ((instance->pdev->device == PCI_DEVICE_ID_LSI_SAS1064R) ||
+ (instance->pdev->device == PCI_DEVICE_ID_DELL_PERC5) ||
+ (instance->pdev->device == PCI_DEVICE_ID_LSI_VERDE_ZCR)
+ ) {
+ *instance->consumer = *instance->producer;
+ } else {
+ *instance->consumer = 0;
+ *instance->producer = 0;
+ }
+
+ megasas_issue_init_mfi(instance);
+
+ spin_lock_irqsave(&instance->hba_lock, flags);
+ instance->adprecovery = MEGASAS_HBA_OPERATIONAL;
+ spin_unlock_irqrestore(&instance->hba_lock, flags);
+ instance->instancet->enable_intr(instance);
+
+ megasas_issue_pending_cmds_again(instance);
+ instance->issuepend_done = 1;
+ }
+ return ;
+}
+
+/**
+ * megasas_deplete_reply_queue - Processes all completed commands
+ * @instance: Adapter soft state
+ * @alt_status: Alternate status to be returned to
+ * SCSI mid-layer instead of the status
+ * returned by the FW
+ * Note: this must be called with hba lock held
+ */
+static int
+megasas_deplete_reply_queue(struct megasas_instance *instance,
+ u8 alt_status)
+{
+ u32 mfiStatus;
+ u32 fw_state;
+
+ if ((mfiStatus = instance->instancet->check_reset(instance,
+ instance->reg_set)) == 1) {
+ return IRQ_HANDLED;
+ }
+
+ if ((mfiStatus = instance->instancet->clear_intr(
+ instance->reg_set)
+ ) == 0) {
+ /* Hardware may not set outbound_intr_status in MSI-X mode */
+ if (!instance->msix_vectors)
+ return IRQ_NONE;
+ }
+
+ instance->mfiStatus = mfiStatus;
+
+ if ((mfiStatus & MFI_INTR_FLAG_FIRMWARE_STATE_CHANGE)) {
+ fw_state = instance->instancet->read_fw_status_reg(
+ instance->reg_set) & MFI_STATE_MASK;
+
+ if (fw_state != MFI_STATE_FAULT) {
+ printk(KERN_NOTICE "megaraid_sas: fw state:%x\n",
+ fw_state);
+ }
+
+ if ((fw_state == MFI_STATE_FAULT) &&
+ (instance->disableOnlineCtrlReset == 0)) {
+ printk(KERN_NOTICE "megaraid_sas: wait adp restart\n");
+
+ if ((instance->pdev->device ==
+ PCI_DEVICE_ID_LSI_SAS1064R) ||
+ (instance->pdev->device ==
+ PCI_DEVICE_ID_DELL_PERC5) ||
+ (instance->pdev->device ==
+ PCI_DEVICE_ID_LSI_VERDE_ZCR)) {
+
+ *instance->consumer =
+ cpu_to_le32(MEGASAS_ADPRESET_INPROG_SIGN);
+ }
+
+
+ instance->instancet->disable_intr(instance);
+ instance->adprecovery = MEGASAS_ADPRESET_SM_INFAULT;
+ instance->issuepend_done = 0;
+
+ atomic_set(&instance->fw_outstanding, 0);
+ megasas_internal_reset_defer_cmds(instance);
+
+ printk(KERN_NOTICE "megasas: fwState=%x, stage:%d\n",
+ fw_state, instance->adprecovery);
+
+ schedule_work(&instance->work_init);
+ return IRQ_HANDLED;
+
+ } else {
+ printk(KERN_NOTICE "megasas: fwstate:%x, dis_OCR=%x\n",
+ fw_state, instance->disableOnlineCtrlReset);
+ }
+ }
+
+ tasklet_schedule(&instance->isr_tasklet);
+ return IRQ_HANDLED;
+}
+/**
+ * megasas_isr - isr entry point
+ */
+static irqreturn_t megasas_isr(int irq, void *devp)
+{
+ struct megasas_irq_context *irq_context = devp;
+ struct megasas_instance *instance = irq_context->instance;
+ unsigned long flags;
+ irqreturn_t rc;
+
+ if (atomic_read(&instance->fw_reset_no_pci_access))
+ return IRQ_HANDLED;
+
+ spin_lock_irqsave(&instance->hba_lock, flags);
+ rc = megasas_deplete_reply_queue(instance, DID_OK);
+ spin_unlock_irqrestore(&instance->hba_lock, flags);
+
+ return rc;
+}
+
+/**
+ * megasas_transition_to_ready - Move the FW to READY state
+ * @instance: Adapter soft state
+ *
+ * During the initialization, FW passes can potentially be in any one of
+ * several possible states. If the FW in operational, waiting-for-handshake
+ * states, driver must take steps to bring it to ready state. Otherwise, it
+ * has to wait for the ready state.
+ */
+int
+megasas_transition_to_ready(struct megasas_instance *instance, int ocr)
+{
+ int i;
+ u8 max_wait;
+ u32 fw_state;
+ u32 cur_state;
+ u32 abs_state, curr_abs_state;
+
+ abs_state = instance->instancet->read_fw_status_reg(instance->reg_set);
+ fw_state = abs_state & MFI_STATE_MASK;
+
+ if (fw_state != MFI_STATE_READY)
+ printk(KERN_INFO "megasas: Waiting for FW to come to ready"
+ " state\n");
+
+ while (fw_state != MFI_STATE_READY) {
+
+ switch (fw_state) {
+
+ case MFI_STATE_FAULT:
+ printk(KERN_DEBUG "megasas: FW in FAULT state!!\n");
+ if (ocr) {
+ max_wait = MEGASAS_RESET_WAIT_TIME;
+ cur_state = MFI_STATE_FAULT;
+ break;
+ } else
+ return -ENODEV;
+
+ case MFI_STATE_WAIT_HANDSHAKE:
+ /*
+ * Set the CLR bit in inbound doorbell
+ */
+ if ((instance->pdev->device ==
+ PCI_DEVICE_ID_LSI_SAS0073SKINNY) ||
+ (instance->pdev->device ==
+ PCI_DEVICE_ID_LSI_SAS0071SKINNY) ||
+ (instance->pdev->device ==
+ PCI_DEVICE_ID_LSI_FUSION) ||
+ (instance->pdev->device ==
+ PCI_DEVICE_ID_LSI_PLASMA) ||
+ (instance->pdev->device ==
+ PCI_DEVICE_ID_LSI_INVADER) ||
+ (instance->pdev->device ==
+ PCI_DEVICE_ID_LSI_FURY)) {
+ writel(
+ MFI_INIT_CLEAR_HANDSHAKE|MFI_INIT_HOTPLUG,
+ &instance->reg_set->doorbell);
+ } else {
+ writel(
+ MFI_INIT_CLEAR_HANDSHAKE|MFI_INIT_HOTPLUG,
+ &instance->reg_set->inbound_doorbell);
+ }
+
+ max_wait = MEGASAS_RESET_WAIT_TIME;
+ cur_state = MFI_STATE_WAIT_HANDSHAKE;
+ break;
+
+ case MFI_STATE_BOOT_MESSAGE_PENDING:
+ if ((instance->pdev->device ==
+ PCI_DEVICE_ID_LSI_SAS0073SKINNY) ||
+ (instance->pdev->device ==
+ PCI_DEVICE_ID_LSI_SAS0071SKINNY) ||
+ (instance->pdev->device ==
+ PCI_DEVICE_ID_LSI_FUSION) ||
+ (instance->pdev->device ==
+ PCI_DEVICE_ID_LSI_PLASMA) ||
+ (instance->pdev->device ==
+ PCI_DEVICE_ID_LSI_INVADER) ||
+ (instance->pdev->device ==
+ PCI_DEVICE_ID_LSI_FURY)) {
+ writel(MFI_INIT_HOTPLUG,
+ &instance->reg_set->doorbell);
+ } else
+ writel(MFI_INIT_HOTPLUG,
+ &instance->reg_set->inbound_doorbell);
+
+ max_wait = MEGASAS_RESET_WAIT_TIME;
+ cur_state = MFI_STATE_BOOT_MESSAGE_PENDING;
+ break;
+
+ case MFI_STATE_OPERATIONAL:
+ /*
+ * Bring it to READY state; assuming max wait 10 secs
+ */
+ instance->instancet->disable_intr(instance);
+ if ((instance->pdev->device ==
+ PCI_DEVICE_ID_LSI_SAS0073SKINNY) ||
+ (instance->pdev->device ==
+ PCI_DEVICE_ID_LSI_SAS0071SKINNY) ||
+ (instance->pdev->device
+ == PCI_DEVICE_ID_LSI_FUSION) ||
+ (instance->pdev->device
+ == PCI_DEVICE_ID_LSI_PLASMA) ||
+ (instance->pdev->device
+ == PCI_DEVICE_ID_LSI_INVADER) ||
+ (instance->pdev->device
+ == PCI_DEVICE_ID_LSI_FURY)) {
+ writel(MFI_RESET_FLAGS,
+ &instance->reg_set->doorbell);
+ if ((instance->pdev->device ==
+ PCI_DEVICE_ID_LSI_FUSION) ||
+ (instance->pdev->device ==
+ PCI_DEVICE_ID_LSI_PLASMA) ||
+ (instance->pdev->device ==
+ PCI_DEVICE_ID_LSI_INVADER) ||
+ (instance->pdev->device ==
+ PCI_DEVICE_ID_LSI_FURY)) {
+ for (i = 0; i < (10 * 1000); i += 20) {
+ if (readl(
+ &instance->
+ reg_set->
+ doorbell) & 1)
+ msleep(20);
+ else
+ break;
+ }
+ }
+ } else
+ writel(MFI_RESET_FLAGS,
+ &instance->reg_set->inbound_doorbell);
+
+ max_wait = MEGASAS_RESET_WAIT_TIME;
+ cur_state = MFI_STATE_OPERATIONAL;
+ break;
+
+ case MFI_STATE_UNDEFINED:
+ /*
+ * This state should not last for more than 2 seconds
+ */
+ max_wait = MEGASAS_RESET_WAIT_TIME;
+ cur_state = MFI_STATE_UNDEFINED;
+ break;
+
+ case MFI_STATE_BB_INIT:
+ max_wait = MEGASAS_RESET_WAIT_TIME;
+ cur_state = MFI_STATE_BB_INIT;
+ break;
+
+ case MFI_STATE_FW_INIT:
+ max_wait = MEGASAS_RESET_WAIT_TIME;
+ cur_state = MFI_STATE_FW_INIT;
+ break;
+
+ case MFI_STATE_FW_INIT_2:
+ max_wait = MEGASAS_RESET_WAIT_TIME;
+ cur_state = MFI_STATE_FW_INIT_2;
+ break;
+
+ case MFI_STATE_DEVICE_SCAN:
+ max_wait = MEGASAS_RESET_WAIT_TIME;
+ cur_state = MFI_STATE_DEVICE_SCAN;
+ break;
+
+ case MFI_STATE_FLUSH_CACHE:
+ max_wait = MEGASAS_RESET_WAIT_TIME;
+ cur_state = MFI_STATE_FLUSH_CACHE;
+ break;
+
+ default:
+ printk(KERN_DEBUG "megasas: Unknown state 0x%x\n",
+ fw_state);
+ return -ENODEV;
+ }
+
+ /*
+ * The cur_state should not last for more than max_wait secs
+ */
+ for (i = 0; i < (max_wait * 1000); i++) {
+ curr_abs_state = instance->instancet->
+ read_fw_status_reg(instance->reg_set);
+
+ if (abs_state == curr_abs_state) {
+ msleep(1);
+ } else
+ break;
+ }
+
+ /*
+ * Return error if fw_state hasn't changed after max_wait
+ */
+ if (curr_abs_state == abs_state) {
+ printk(KERN_DEBUG "FW state [%d] hasn't changed "
+ "in %d secs\n", fw_state, max_wait);
+ return -ENODEV;
+ }
+
+ abs_state = curr_abs_state;
+ fw_state = curr_abs_state & MFI_STATE_MASK;
+ }
+ printk(KERN_INFO "megasas: FW now in Ready state\n");
+
+ return 0;
+}
+
+/**
+ * megasas_teardown_frame_pool - Destroy the cmd frame DMA pool
+ * @instance: Adapter soft state
+ */
+static void megasas_teardown_frame_pool(struct megasas_instance *instance)
+{
+ int i;
+ u32 max_cmd = instance->max_mfi_cmds;
+ struct megasas_cmd *cmd;
+
+ if (!instance->frame_dma_pool)
+ return;
+
+ /*
+ * Return all frames to pool
+ */
+ for (i = 0; i < max_cmd; i++) {
+
+ cmd = instance->cmd_list[i];
+
+ if (cmd->frame)
+ pci_pool_free(instance->frame_dma_pool, cmd->frame,
+ cmd->frame_phys_addr);
+
+ if (cmd->sense)
+ pci_pool_free(instance->sense_dma_pool, cmd->sense,
+ cmd->sense_phys_addr);
+ }
+
+ /*
+ * Now destroy the pool itself
+ */
+ pci_pool_destroy(instance->frame_dma_pool);
+ pci_pool_destroy(instance->sense_dma_pool);
+
+ instance->frame_dma_pool = NULL;
+ instance->sense_dma_pool = NULL;
+}
+
+/**
+ * megasas_create_frame_pool - Creates DMA pool for cmd frames
+ * @instance: Adapter soft state
+ *
+ * Each command packet has an embedded DMA memory buffer that is used for
+ * filling MFI frame and the SG list that immediately follows the frame. This
+ * function creates those DMA memory buffers for each command packet by using
+ * PCI pool facility.
+ */
+static int megasas_create_frame_pool(struct megasas_instance *instance)
+{
+ int i;
+ u32 max_cmd;
+ u32 sge_sz;
+ u32 total_sz;
+ u32 frame_count;
+ struct megasas_cmd *cmd;
+
+ max_cmd = instance->max_mfi_cmds;
+
+ /*
+ * Size of our frame is 64 bytes for MFI frame, followed by max SG
+ * elements and finally SCSI_SENSE_BUFFERSIZE bytes for sense buffer
+ */
+ sge_sz = (IS_DMA64) ? sizeof(struct megasas_sge64) :
+ sizeof(struct megasas_sge32);
+
+ if (instance->flag_ieee) {
+ sge_sz = sizeof(struct megasas_sge_skinny);
+ }
+
+ /*
+ * For MFI controllers.
+ * max_num_sge = 60
+ * max_sge_sz = 16 byte (sizeof megasas_sge_skinny)
+ * Total 960 byte (15 MFI frame of 64 byte)
+ *
+ * Fusion adapter require only 3 extra frame.
+ * max_num_sge = 16 (defined as MAX_IOCTL_SGE)
+ * max_sge_sz = 12 byte (sizeof megasas_sge64)
+ * Total 192 byte (3 MFI frame of 64 byte)
+ */
+ frame_count = instance->ctrl_context ? (3 + 1) : (15 + 1);
+ total_sz = MEGAMFI_FRAME_SIZE * frame_count;
+ /*
+ * Use DMA pool facility provided by PCI layer
+ */
+ instance->frame_dma_pool = pci_pool_create("megasas frame pool",
+ instance->pdev, total_sz, 256, 0);
+
+ if (!instance->frame_dma_pool) {
+ printk(KERN_DEBUG "megasas: failed to setup frame pool\n");
+ return -ENOMEM;
+ }
+
+ instance->sense_dma_pool = pci_pool_create("megasas sense pool",
+ instance->pdev, 128, 4, 0);
+
+ if (!instance->sense_dma_pool) {
+ printk(KERN_DEBUG "megasas: failed to setup sense pool\n");
+
+ pci_pool_destroy(instance->frame_dma_pool);
+ instance->frame_dma_pool = NULL;
+
+ return -ENOMEM;
+ }
+
+ /*
+ * Allocate and attach a frame to each of the commands in cmd_list.
+ * By making cmd->index as the context instead of the &cmd, we can
+ * always use 32bit context regardless of the architecture
+ */
+ for (i = 0; i < max_cmd; i++) {
+
+ cmd = instance->cmd_list[i];
+
+ cmd->frame = pci_pool_alloc(instance->frame_dma_pool,
+ GFP_KERNEL, &cmd->frame_phys_addr);
+
+ cmd->sense = pci_pool_alloc(instance->sense_dma_pool,
+ GFP_KERNEL, &cmd->sense_phys_addr);
+
+ /*
+ * megasas_teardown_frame_pool() takes care of freeing
+ * whatever has been allocated
+ */
+ if (!cmd->frame || !cmd->sense) {
+ printk(KERN_DEBUG "megasas: pci_pool_alloc failed \n");
+ megasas_teardown_frame_pool(instance);
+ return -ENOMEM;
+ }
+
+ memset(cmd->frame, 0, total_sz);
+ cmd->frame->io.context = cpu_to_le32(cmd->index);
+ cmd->frame->io.pad_0 = 0;
+ if ((instance->pdev->device != PCI_DEVICE_ID_LSI_FUSION) &&
+ (instance->pdev->device != PCI_DEVICE_ID_LSI_PLASMA) &&
+ (instance->pdev->device != PCI_DEVICE_ID_LSI_INVADER) &&
+ (instance->pdev->device != PCI_DEVICE_ID_LSI_FURY) &&
+ (reset_devices))
+ cmd->frame->hdr.cmd = MFI_CMD_INVALID;
+ }
+
+ return 0;
+}
+
+/**
+ * megasas_free_cmds - Free all the cmds in the free cmd pool
+ * @instance: Adapter soft state
+ */
+void megasas_free_cmds(struct megasas_instance *instance)
+{
+ int i;
+ /* First free the MFI frame pool */
+ megasas_teardown_frame_pool(instance);
+
+ /* Free all the commands in the cmd_list */
+ for (i = 0; i < instance->max_mfi_cmds; i++)
+
+ kfree(instance->cmd_list[i]);
+
+ /* Free the cmd_list buffer itself */
+ kfree(instance->cmd_list);
+ instance->cmd_list = NULL;
+
+ INIT_LIST_HEAD(&instance->cmd_pool);
+}
+
+/**
+ * megasas_alloc_cmds - Allocates the command packets
+ * @instance: Adapter soft state
+ *
+ * Each command that is issued to the FW, whether IO commands from the OS or
+ * internal commands like IOCTLs, are wrapped in local data structure called
+ * megasas_cmd. The frame embedded in this megasas_cmd is actually issued to
+ * the FW.
+ *
+ * Each frame has a 32-bit field called context (tag). This context is used
+ * to get back the megasas_cmd from the frame when a frame gets completed in
+ * the ISR. Typically the address of the megasas_cmd itself would be used as
+ * the context. But we wanted to keep the differences between 32 and 64 bit
+ * systems to the mininum. We always use 32 bit integers for the context. In
+ * this driver, the 32 bit values are the indices into an array cmd_list.
+ * This array is used only to look up the megasas_cmd given the context. The
+ * free commands themselves are maintained in a linked list called cmd_pool.
+ */
+int megasas_alloc_cmds(struct megasas_instance *instance)
+{
+ int i;
+ int j;
+ u32 max_cmd;
+ struct megasas_cmd *cmd;
+ struct fusion_context *fusion;
+
+ fusion = instance->ctrl_context;
+ max_cmd = instance->max_mfi_cmds;
+
+ /*
+ * instance->cmd_list is an array of struct megasas_cmd pointers.
+ * Allocate the dynamic array first and then allocate individual
+ * commands.
+ */
+ instance->cmd_list = kcalloc(max_cmd, sizeof(struct megasas_cmd*), GFP_KERNEL);
+
+ if (!instance->cmd_list) {
+ printk(KERN_DEBUG "megasas: out of memory\n");
+ return -ENOMEM;
+ }
+
+ memset(instance->cmd_list, 0, sizeof(struct megasas_cmd *) *max_cmd);
+
+ for (i = 0; i < max_cmd; i++) {
+ instance->cmd_list[i] = kmalloc(sizeof(struct megasas_cmd),
+ GFP_KERNEL);
+
+ if (!instance->cmd_list[i]) {
+
+ for (j = 0; j < i; j++)
+ kfree(instance->cmd_list[j]);
+
+ kfree(instance->cmd_list);
+ instance->cmd_list = NULL;
+
+ return -ENOMEM;
+ }
+ }
+
+ for (i = 0; i < max_cmd; i++) {
+ cmd = instance->cmd_list[i];
+ memset(cmd, 0, sizeof(struct megasas_cmd));
+ cmd->index = i;
+ atomic_set(&cmd->mfi_mpt_pthr, MFI_LIST_ADDED);
+ cmd->scmd = NULL;
+ cmd->instance = instance;
+
+ list_add_tail(&cmd->list, &instance->cmd_pool);
+ }
+
+ /*
+ * Create a frame pool and assign one frame to each cmd
+ */
+ if (megasas_create_frame_pool(instance)) {
+ printk(KERN_DEBUG "megasas: Error creating frame DMA pool\n");
+ megasas_free_cmds(instance);
+ }
+
+ return 0;
+}
+
+/*
+ * megasas_get_pd_list_info - Returns FW's pd_list structure
+ * @instance: Adapter soft state
+ * @pd_list: pd_list structure
+ *
+ * Issues an internal command (DCMD) to get the FW's controller PD
+ * list structure. This information is mainly used to find out SYSTEM
+ * supported by the FW.
+ */
+static int
+megasas_get_pd_list(struct megasas_instance *instance)
+{
+ int ret = 0, pd_index = 0;
+ struct megasas_cmd *cmd;
+ struct megasas_dcmd_frame *dcmd;
+ struct MR_PD_LIST *ci;
+ struct MR_PD_ADDRESS *pd_addr;
+ dma_addr_t ci_h = 0;
+
+ cmd = megasas_get_cmd(instance);
+
+ if (!cmd) {
+ printk(KERN_DEBUG "megasas (get_pd_list): Failed to get cmd\n");
+ return -ENOMEM;
+ }
+
+ dcmd = &cmd->frame->dcmd;
+
+ ci = pci_alloc_consistent(instance->pdev,
+ MEGASAS_MAX_PD * sizeof(struct MR_PD_LIST), &ci_h);
+
+ if (!ci) {
+ printk(KERN_DEBUG "Failed to alloc mem for pd_list\n");
+ megasas_return_cmd(instance, cmd);
+ return -ENOMEM;
+ }
+
+ memset(ci, 0, sizeof(*ci));
+ memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE);
+
+ dcmd->mbox.b[0] = MR_PD_QUERY_TYPE_EXPOSED_TO_HOST;
+ dcmd->mbox.b[1] = 0;
+ dcmd->cmd = MFI_CMD_DCMD;
+ dcmd->cmd_status = 0xFF;
+ dcmd->sge_count = 1;
+ dcmd->flags = cpu_to_le16(MFI_FRAME_DIR_READ);
+ dcmd->timeout = 0;
+ dcmd->pad_0 = 0;
+ dcmd->data_xfer_len = cpu_to_le32(MEGASAS_MAX_PD * sizeof(struct MR_PD_LIST));
+ dcmd->opcode = cpu_to_le32(MR_DCMD_PD_LIST_QUERY);
+ dcmd->sgl.sge32[0].phys_addr = cpu_to_le32(ci_h);
+ dcmd->sgl.sge32[0].length = cpu_to_le32(MEGASAS_MAX_PD * sizeof(struct MR_PD_LIST));
+
+ if (instance->ctrl_context && !instance->mask_interrupts)
+ ret = megasas_issue_blocked_cmd(instance, cmd,
+ MEGASAS_BLOCKED_CMD_TIMEOUT);
+ else
+ ret = megasas_issue_polled(instance, cmd);
+
+ /*
+ * the following function will get the instance PD LIST.
+ */
+
+ pd_addr = ci->addr;
+
+ if ( ret == 0 &&
+ (le32_to_cpu(ci->count) <
+ (MEGASAS_MAX_PD_CHANNELS * MEGASAS_MAX_DEV_PER_CHANNEL))) {
+
+ memset(instance->local_pd_list, 0,
+ MEGASAS_MAX_PD * sizeof(struct megasas_pd_list));
+
+ for (pd_index = 0; pd_index < le32_to_cpu(ci->count); pd_index++) {
+
+ instance->local_pd_list[le16_to_cpu(pd_addr->deviceId)].tid =
+ le16_to_cpu(pd_addr->deviceId);
+ instance->local_pd_list[le16_to_cpu(pd_addr->deviceId)].driveType =
+ pd_addr->scsiDevType;
+ instance->local_pd_list[le16_to_cpu(pd_addr->deviceId)].driveState =
+ MR_PD_STATE_SYSTEM;
+ pd_addr++;
+ }
+ memcpy(instance->pd_list, instance->local_pd_list,
+ sizeof(instance->pd_list));
+ }
+
+ pci_free_consistent(instance->pdev,
+ MEGASAS_MAX_PD * sizeof(struct MR_PD_LIST),
+ ci, ci_h);
+
+ if (instance->ctrl_context && cmd->mpt_pthr_cmd_blocked)
+ megasas_return_mfi_mpt_pthr(instance, cmd,
+ cmd->mpt_pthr_cmd_blocked);
+ else
+ megasas_return_cmd(instance, cmd);
+
+ return ret;
+}
+
+/*
+ * megasas_get_ld_list_info - Returns FW's ld_list structure
+ * @instance: Adapter soft state
+ * @ld_list: ld_list structure
+ *
+ * Issues an internal command (DCMD) to get the FW's controller PD
+ * list structure. This information is mainly used to find out SYSTEM
+ * supported by the FW.
+ */
+static int
+megasas_get_ld_list(struct megasas_instance *instance)
+{
+ int ret = 0, ld_index = 0, ids = 0;
+ struct megasas_cmd *cmd;
+ struct megasas_dcmd_frame *dcmd;
+ struct MR_LD_LIST *ci;
+ dma_addr_t ci_h = 0;
+ u32 ld_count;
+
+ cmd = megasas_get_cmd(instance);
+
+ if (!cmd) {
+ printk(KERN_DEBUG "megasas_get_ld_list: Failed to get cmd\n");
+ return -ENOMEM;
+ }
+
+ dcmd = &cmd->frame->dcmd;
+
+ ci = pci_alloc_consistent(instance->pdev,
+ sizeof(struct MR_LD_LIST),
+ &ci_h);
+
+ if (!ci) {
+ printk(KERN_DEBUG "Failed to alloc mem in get_ld_list\n");
+ megasas_return_cmd(instance, cmd);
+ return -ENOMEM;
+ }
+
+ memset(ci, 0, sizeof(*ci));
+ memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE);
+
+ if (instance->supportmax256vd)
+ dcmd->mbox.b[0] = 1;
+ dcmd->cmd = MFI_CMD_DCMD;
+ dcmd->cmd_status = 0xFF;
+ dcmd->sge_count = 1;
+ dcmd->flags = cpu_to_le16(MFI_FRAME_DIR_READ);
+ dcmd->timeout = 0;
+ dcmd->data_xfer_len = cpu_to_le32(sizeof(struct MR_LD_LIST));
+ dcmd->opcode = cpu_to_le32(MR_DCMD_LD_GET_LIST);
+ dcmd->sgl.sge32[0].phys_addr = cpu_to_le32(ci_h);
+ dcmd->sgl.sge32[0].length = cpu_to_le32(sizeof(struct MR_LD_LIST));
+ dcmd->pad_0 = 0;
+
+ if (instance->ctrl_context && !instance->mask_interrupts)
+ ret = megasas_issue_blocked_cmd(instance, cmd,
+ MEGASAS_BLOCKED_CMD_TIMEOUT);
+ else
+ ret = megasas_issue_polled(instance, cmd);
+
+
+ ld_count = le32_to_cpu(ci->ldCount);
+
+ /* the following function will get the instance PD LIST */
+
+ if ((ret == 0) && (ld_count <= instance->fw_supported_vd_count)) {
+ memset(instance->ld_ids, 0xff, MAX_LOGICAL_DRIVES_EXT);
+
+ for (ld_index = 0; ld_index < ld_count; ld_index++) {
+ if (ci->ldList[ld_index].state != 0) {
+ ids = ci->ldList[ld_index].ref.targetId;
+ instance->ld_ids[ids] =
+ ci->ldList[ld_index].ref.targetId;
+ }
+ }
+ }
+
+ pci_free_consistent(instance->pdev,
+ sizeof(struct MR_LD_LIST),
+ ci,
+ ci_h);
+
+ if (instance->ctrl_context && cmd->mpt_pthr_cmd_blocked)
+ megasas_return_mfi_mpt_pthr(instance, cmd,
+ cmd->mpt_pthr_cmd_blocked);
+ else
+ megasas_return_cmd(instance, cmd);
+ return ret;
+}
+
+/**
+ * megasas_ld_list_query - Returns FW's ld_list structure
+ * @instance: Adapter soft state
+ * @ld_list: ld_list structure
+ *
+ * Issues an internal command (DCMD) to get the FW's controller PD
+ * list structure. This information is mainly used to find out SYSTEM
+ * supported by the FW.
+ */
+static int
+megasas_ld_list_query(struct megasas_instance *instance, u8 query_type)
+{
+ int ret = 0, ld_index = 0, ids = 0;
+ struct megasas_cmd *cmd;
+ struct megasas_dcmd_frame *dcmd;
+ struct MR_LD_TARGETID_LIST *ci;
+ dma_addr_t ci_h = 0;
+ u32 tgtid_count;
+
+ cmd = megasas_get_cmd(instance);
+
+ if (!cmd) {
+ printk(KERN_WARNING
+ "megasas:(megasas_ld_list_query): Failed to get cmd\n");
+ return -ENOMEM;
+ }
+
+ dcmd = &cmd->frame->dcmd;
+
+ ci = pci_alloc_consistent(instance->pdev,
+ sizeof(struct MR_LD_TARGETID_LIST), &ci_h);
+
+ if (!ci) {
+ printk(KERN_WARNING
+ "megasas: Failed to alloc mem for ld_list_query\n");
+ megasas_return_cmd(instance, cmd);
+ return -ENOMEM;
+ }
+
+ memset(ci, 0, sizeof(*ci));
+ memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE);
+
+ dcmd->mbox.b[0] = query_type;
+ if (instance->supportmax256vd)
+ dcmd->mbox.b[2] = 1;
+
+ dcmd->cmd = MFI_CMD_DCMD;
+ dcmd->cmd_status = 0xFF;
+ dcmd->sge_count = 1;
+ dcmd->flags = cpu_to_le16(MFI_FRAME_DIR_READ);
+ dcmd->timeout = 0;
+ dcmd->data_xfer_len = cpu_to_le32(sizeof(struct MR_LD_TARGETID_LIST));
+ dcmd->opcode = cpu_to_le32(MR_DCMD_LD_LIST_QUERY);
+ dcmd->sgl.sge32[0].phys_addr = cpu_to_le32(ci_h);
+ dcmd->sgl.sge32[0].length = cpu_to_le32(sizeof(struct MR_LD_TARGETID_LIST));
+ dcmd->pad_0 = 0;
+
+ if (instance->ctrl_context && !instance->mask_interrupts)
+ ret = megasas_issue_blocked_cmd(instance, cmd,
+ MEGASAS_BLOCKED_CMD_TIMEOUT);
+ else
+ ret = megasas_issue_polled(instance, cmd);
+
+ tgtid_count = le32_to_cpu(ci->count);
+
+ if ((ret == 0) && (tgtid_count <= (instance->fw_supported_vd_count))) {
+ memset(instance->ld_ids, 0xff, MEGASAS_MAX_LD_IDS);
+ for (ld_index = 0; ld_index < tgtid_count; ld_index++) {
+ ids = ci->targetId[ld_index];
+ instance->ld_ids[ids] = ci->targetId[ld_index];
+ }
+
+ }
+
+ pci_free_consistent(instance->pdev, sizeof(struct MR_LD_TARGETID_LIST),
+ ci, ci_h);
+
+ if (instance->ctrl_context && cmd->mpt_pthr_cmd_blocked)
+ megasas_return_mfi_mpt_pthr(instance, cmd,
+ cmd->mpt_pthr_cmd_blocked);
+ else
+ megasas_return_cmd(instance, cmd);
+
+ return ret;
+}
+
+/*
+ * megasas_update_ext_vd_details : Update details w.r.t Extended VD
+ * instance : Controller's instance
+*/
+static void megasas_update_ext_vd_details(struct megasas_instance *instance)
+{
+ struct fusion_context *fusion;
+ u32 old_map_sz;
+ u32 new_map_sz;
+
+ fusion = instance->ctrl_context;
+ /* For MFI based controllers return dummy success */
+ if (!fusion)
+ return;
+
+ instance->supportmax256vd =
+ instance->ctrl_info->adapterOperations3.supportMaxExtLDs;
+ /* Below is additional check to address future FW enhancement */
+ if (instance->ctrl_info->max_lds > 64)
+ instance->supportmax256vd = 1;
+
+ instance->drv_supported_vd_count = MEGASAS_MAX_LD_CHANNELS
+ * MEGASAS_MAX_DEV_PER_CHANNEL;
+ instance->drv_supported_pd_count = MEGASAS_MAX_PD_CHANNELS
+ * MEGASAS_MAX_DEV_PER_CHANNEL;
+ if (instance->supportmax256vd) {
+ instance->fw_supported_vd_count = MAX_LOGICAL_DRIVES_EXT;
+ instance->fw_supported_pd_count = MAX_PHYSICAL_DEVICES;
+ } else {
+ instance->fw_supported_vd_count = MAX_LOGICAL_DRIVES;
+ instance->fw_supported_pd_count = MAX_PHYSICAL_DEVICES;
+ }
+ dev_info(&instance->pdev->dev, "Firmware supports %d VD %d PD\n",
+ instance->fw_supported_vd_count,
+ instance->fw_supported_pd_count);
+ dev_info(&instance->pdev->dev, "Driver supports %d VD %d PD\n",
+ instance->drv_supported_vd_count,
+ instance->drv_supported_pd_count);
+
+ old_map_sz = sizeof(struct MR_FW_RAID_MAP) +
+ (sizeof(struct MR_LD_SPAN_MAP) *
+ (instance->fw_supported_vd_count - 1));
+ new_map_sz = sizeof(struct MR_FW_RAID_MAP_EXT);
+ fusion->drv_map_sz = sizeof(struct MR_DRV_RAID_MAP) +
+ (sizeof(struct MR_LD_SPAN_MAP) *
+ (instance->drv_supported_vd_count - 1));
+
+ fusion->max_map_sz = max(old_map_sz, new_map_sz);
+
+
+ if (instance->supportmax256vd)
+ fusion->current_map_sz = new_map_sz;
+ else
+ fusion->current_map_sz = old_map_sz;
+
+}
+
+/**
+ * megasas_get_controller_info - Returns FW's controller structure
+ * @instance: Adapter soft state
+ *
+ * Issues an internal command (DCMD) to get the FW's controller structure.
+ * This information is mainly used to find out the maximum IO transfer per
+ * command supported by the FW.
+ */
+int
+megasas_get_ctrl_info(struct megasas_instance *instance)
+{
+ int ret = 0;
+ struct megasas_cmd *cmd;
+ struct megasas_dcmd_frame *dcmd;
+ struct megasas_ctrl_info *ci;
+ struct megasas_ctrl_info *ctrl_info;
+ dma_addr_t ci_h = 0;
+
+ ctrl_info = instance->ctrl_info;
+
+ cmd = megasas_get_cmd(instance);
+
+ if (!cmd) {
+ printk(KERN_DEBUG "megasas: Failed to get a free cmd\n");
+ return -ENOMEM;
+ }
+
+ dcmd = &cmd->frame->dcmd;
+
+ ci = pci_alloc_consistent(instance->pdev,
+ sizeof(struct megasas_ctrl_info), &ci_h);
+
+ if (!ci) {
+ printk(KERN_DEBUG "Failed to alloc mem for ctrl info\n");
+ megasas_return_cmd(instance, cmd);
+ return -ENOMEM;
+ }
+
+ memset(ci, 0, sizeof(*ci));
+ memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE);
+
+ dcmd->cmd = MFI_CMD_DCMD;
+ dcmd->cmd_status = 0xFF;
+ dcmd->sge_count = 1;
+ dcmd->flags = cpu_to_le16(MFI_FRAME_DIR_READ);
+ dcmd->timeout = 0;
+ dcmd->pad_0 = 0;
+ dcmd->data_xfer_len = cpu_to_le32(sizeof(struct megasas_ctrl_info));
+ dcmd->opcode = cpu_to_le32(MR_DCMD_CTRL_GET_INFO);
+ dcmd->sgl.sge32[0].phys_addr = cpu_to_le32(ci_h);
+ dcmd->sgl.sge32[0].length = cpu_to_le32(sizeof(struct megasas_ctrl_info));
+ dcmd->mbox.b[0] = 1;
+
+ if (instance->ctrl_context && !instance->mask_interrupts)
+ ret = megasas_issue_blocked_cmd(instance, cmd,
+ MEGASAS_BLOCKED_CMD_TIMEOUT);
+ else
+ ret = megasas_issue_polled(instance, cmd);
+
+ if (!ret) {
+ memcpy(ctrl_info, ci, sizeof(struct megasas_ctrl_info));
+ le32_to_cpus((u32 *)&ctrl_info->properties.OnOffProperties);
+ le32_to_cpus((u32 *)&ctrl_info->adapterOperations2);
+ le32_to_cpus((u32 *)&ctrl_info->adapterOperations3);
+ megasas_update_ext_vd_details(instance);
+ }
+
+ pci_free_consistent(instance->pdev, sizeof(struct megasas_ctrl_info),
+ ci, ci_h);
+
+ if (instance->ctrl_context && cmd->mpt_pthr_cmd_blocked)
+ megasas_return_mfi_mpt_pthr(instance, cmd,
+ cmd->mpt_pthr_cmd_blocked);
+ else
+ megasas_return_cmd(instance, cmd);
+ return ret;
+}
+
+/*
+ * megasas_set_crash_dump_params - Sends address of crash dump DMA buffer
+ * to firmware
+ *
+ * @instance: Adapter soft state
+ * @crash_buf_state - tell FW to turn ON/OFF crash dump feature
+ MR_CRASH_BUF_TURN_OFF = 0
+ MR_CRASH_BUF_TURN_ON = 1
+ * @return 0 on success non-zero on failure.
+ * Issues an internal command (DCMD) to set parameters for crash dump feature.
+ * Driver will send address of crash dump DMA buffer and set mbox to tell FW
+ * that driver supports crash dump feature. This DCMD will be sent only if
+ * crash dump feature is supported by the FW.
+ *
+ */
+int megasas_set_crash_dump_params(struct megasas_instance *instance,
+ u8 crash_buf_state)
+{
+ int ret = 0;
+ struct megasas_cmd *cmd;
+ struct megasas_dcmd_frame *dcmd;
+
+ cmd = megasas_get_cmd(instance);
+
+ if (!cmd) {
+ dev_err(&instance->pdev->dev, "Failed to get a free cmd\n");
+ return -ENOMEM;
+ }
+
+
+ dcmd = &cmd->frame->dcmd;
+
+ memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE);
+ dcmd->mbox.b[0] = crash_buf_state;
+ dcmd->cmd = MFI_CMD_DCMD;
+ dcmd->cmd_status = 0xFF;
+ dcmd->sge_count = 1;
+ dcmd->flags = cpu_to_le16(MFI_FRAME_DIR_NONE);
+ dcmd->timeout = 0;
+ dcmd->pad_0 = 0;
+ dcmd->data_xfer_len = cpu_to_le32(CRASH_DMA_BUF_SIZE);
+ dcmd->opcode = cpu_to_le32(MR_DCMD_CTRL_SET_CRASH_DUMP_PARAMS);
+ dcmd->sgl.sge32[0].phys_addr = cpu_to_le32(instance->crash_dump_h);
+ dcmd->sgl.sge32[0].length = cpu_to_le32(CRASH_DMA_BUF_SIZE);
+
+ if (instance->ctrl_context && !instance->mask_interrupts)
+ ret = megasas_issue_blocked_cmd(instance, cmd,
+ MEGASAS_BLOCKED_CMD_TIMEOUT);
+ else
+ ret = megasas_issue_polled(instance, cmd);
+
+ if (instance->ctrl_context && cmd->mpt_pthr_cmd_blocked)
+ megasas_return_mfi_mpt_pthr(instance, cmd,
+ cmd->mpt_pthr_cmd_blocked);
+ else
+ megasas_return_cmd(instance, cmd);
+ return ret;
+}
+
+/**
+ * megasas_issue_init_mfi - Initializes the FW
+ * @instance: Adapter soft state
+ *
+ * Issues the INIT MFI cmd
+ */
+static int
+megasas_issue_init_mfi(struct megasas_instance *instance)
+{
+ u32 context;
+
+ struct megasas_cmd *cmd;
+
+ struct megasas_init_frame *init_frame;
+ struct megasas_init_queue_info *initq_info;
+ dma_addr_t init_frame_h;
+ dma_addr_t initq_info_h;
+
+ /*
+ * Prepare a init frame. Note the init frame points to queue info
+ * structure. Each frame has SGL allocated after first 64 bytes. For
+ * this frame - since we don't need any SGL - we use SGL's space as
+ * queue info structure
+ *
+ * We will not get a NULL command below. We just created the pool.
+ */
+ cmd = megasas_get_cmd(instance);
+
+ init_frame = (struct megasas_init_frame *)cmd->frame;
+ initq_info = (struct megasas_init_queue_info *)
+ ((unsigned long)init_frame + 64);
+
+ init_frame_h = cmd->frame_phys_addr;
+ initq_info_h = init_frame_h + 64;
+
+ context = init_frame->context;
+ memset(init_frame, 0, MEGAMFI_FRAME_SIZE);
+ memset(initq_info, 0, sizeof(struct megasas_init_queue_info));
+ init_frame->context = context;
+
+ initq_info->reply_queue_entries = cpu_to_le32(instance->max_fw_cmds + 1);
+ initq_info->reply_queue_start_phys_addr_lo = cpu_to_le32(instance->reply_queue_h);
+
+ initq_info->producer_index_phys_addr_lo = cpu_to_le32(instance->producer_h);
+ initq_info->consumer_index_phys_addr_lo = cpu_to_le32(instance->consumer_h);
+
+ init_frame->cmd = MFI_CMD_INIT;
+ init_frame->cmd_status = 0xFF;
+ init_frame->queue_info_new_phys_addr_lo =
+ cpu_to_le32(lower_32_bits(initq_info_h));
+ init_frame->queue_info_new_phys_addr_hi =
+ cpu_to_le32(upper_32_bits(initq_info_h));
+
+ init_frame->data_xfer_len = cpu_to_le32(sizeof(struct megasas_init_queue_info));
+
+ /*
+ * disable the intr before firing the init frame to FW
+ */
+ instance->instancet->disable_intr(instance);
+
+ /*
+ * Issue the init frame in polled mode
+ */
+
+ if (megasas_issue_polled(instance, cmd)) {
+ printk(KERN_ERR "megasas: Failed to init firmware\n");
+ megasas_return_cmd(instance, cmd);
+ goto fail_fw_init;
+ }
+
+ megasas_return_cmd(instance, cmd);
+
+ return 0;
+
+fail_fw_init:
+ return -EINVAL;
+}
+
+static u32
+megasas_init_adapter_mfi(struct megasas_instance *instance)
+{
+ struct megasas_register_set __iomem *reg_set;
+ u32 context_sz;
+ u32 reply_q_sz;
+
+ reg_set = instance->reg_set;
+
+ /*
+ * Get various operational parameters from status register
+ */
+ instance->max_fw_cmds = instance->instancet->read_fw_status_reg(reg_set) & 0x00FFFF;
+ /*
+ * Reduce the max supported cmds by 1. This is to ensure that the
+ * reply_q_sz (1 more than the max cmd that driver may send)
+ * does not exceed max cmds that the FW can support
+ */
+ instance->max_fw_cmds = instance->max_fw_cmds-1;
+ instance->max_mfi_cmds = instance->max_fw_cmds;
+ instance->max_num_sge = (instance->instancet->read_fw_status_reg(reg_set) & 0xFF0000) >>
+ 0x10;
+ /*
+ * Create a pool of commands
+ */
+ if (megasas_alloc_cmds(instance))
+ goto fail_alloc_cmds;
+
+ /*
+ * Allocate memory for reply queue. Length of reply queue should
+ * be _one_ more than the maximum commands handled by the firmware.
+ *
+ * Note: When FW completes commands, it places corresponding contex
+ * values in this circular reply queue. This circular queue is a fairly
+ * typical producer-consumer queue. FW is the producer (of completed
+ * commands) and the driver is the consumer.
+ */
+ context_sz = sizeof(u32);
+ reply_q_sz = context_sz * (instance->max_fw_cmds + 1);
+
+ instance->reply_queue = pci_alloc_consistent(instance->pdev,
+ reply_q_sz,
+ &instance->reply_queue_h);
+
+ if (!instance->reply_queue) {
+ printk(KERN_DEBUG "megasas: Out of DMA mem for reply queue\n");
+ goto fail_reply_queue;
+ }
+
+ if (megasas_issue_init_mfi(instance))
+ goto fail_fw_init;
+
+ if (megasas_get_ctrl_info(instance)) {
+ dev_err(&instance->pdev->dev, "(%d): Could get controller info "
+ "Fail from %s %d\n", instance->unique_id,
+ __func__, __LINE__);
+ goto fail_fw_init;
+ }
+
+ instance->fw_support_ieee = 0;
+ instance->fw_support_ieee =
+ (instance->instancet->read_fw_status_reg(reg_set) &
+ 0x04000000);
+
+ printk(KERN_NOTICE "megasas_init_mfi: fw_support_ieee=%d",
+ instance->fw_support_ieee);
+
+ if (instance->fw_support_ieee)
+ instance->flag_ieee = 1;
+
+ return 0;
+
+fail_fw_init:
+
+ pci_free_consistent(instance->pdev, reply_q_sz,
+ instance->reply_queue, instance->reply_queue_h);
+fail_reply_queue:
+ megasas_free_cmds(instance);
+
+fail_alloc_cmds:
+ return 1;
+}
+
+/**
+ * megasas_init_fw - Initializes the FW
+ * @instance: Adapter soft state
+ *
+ * This is the main function for initializing firmware
+ */
+
+static int megasas_init_fw(struct megasas_instance *instance)
+{
+ u32 max_sectors_1;
+ u32 max_sectors_2;
+ u32 tmp_sectors, msix_enable, scratch_pad_2;
+ resource_size_t base_addr;
+ struct megasas_register_set __iomem *reg_set;
+ struct megasas_ctrl_info *ctrl_info = NULL;
+ unsigned long bar_list;
+ int i, loop, fw_msix_count = 0;
+ struct IOV_111 *iovPtr;
+
+ /* Find first memory bar */
+ bar_list = pci_select_bars(instance->pdev, IORESOURCE_MEM);
+ instance->bar = find_first_bit(&bar_list, sizeof(unsigned long));
+ if (pci_request_selected_regions(instance->pdev, instance->bar,
+ "megasas: LSI")) {
+ printk(KERN_DEBUG "megasas: IO memory region busy!\n");
+ return -EBUSY;
+ }
+
+ base_addr = pci_resource_start(instance->pdev, instance->bar);
+ instance->reg_set = ioremap_nocache(base_addr, 8192);
+
+ if (!instance->reg_set) {
+ printk(KERN_DEBUG "megasas: Failed to map IO mem\n");
+ goto fail_ioremap;
+ }
+
+ reg_set = instance->reg_set;
+
+ switch (instance->pdev->device) {
+ case PCI_DEVICE_ID_LSI_FUSION:
+ case PCI_DEVICE_ID_LSI_PLASMA:
+ case PCI_DEVICE_ID_LSI_INVADER:
+ case PCI_DEVICE_ID_LSI_FURY:
+ instance->instancet = &megasas_instance_template_fusion;
+ break;
+ case PCI_DEVICE_ID_LSI_SAS1078R:
+ case PCI_DEVICE_ID_LSI_SAS1078DE:
+ instance->instancet = &megasas_instance_template_ppc;
+ break;
+ case PCI_DEVICE_ID_LSI_SAS1078GEN2:
+ case PCI_DEVICE_ID_LSI_SAS0079GEN2:
+ instance->instancet = &megasas_instance_template_gen2;
+ break;
+ case PCI_DEVICE_ID_LSI_SAS0073SKINNY:
+ case PCI_DEVICE_ID_LSI_SAS0071SKINNY:
+ instance->instancet = &megasas_instance_template_skinny;
+ break;
+ case PCI_DEVICE_ID_LSI_SAS1064R:
+ case PCI_DEVICE_ID_DELL_PERC5:
+ default:
+ instance->instancet = &megasas_instance_template_xscale;
+ break;
+ }
+
+ if (megasas_transition_to_ready(instance, 0)) {
+ atomic_set(&instance->fw_reset_no_pci_access, 1);
+ instance->instancet->adp_reset
+ (instance, instance->reg_set);
+ atomic_set(&instance->fw_reset_no_pci_access, 0);
+ dev_info(&instance->pdev->dev,
+ "megasas: FW restarted successfully from %s!\n",
+ __func__);
+
+ /*waitting for about 30 second before retry*/
+ ssleep(30);
+
+ if (megasas_transition_to_ready(instance, 0))
+ goto fail_ready_state;
+ }
+
+ /*
+ * MSI-X host index 0 is common for all adapter.
+ * It is used for all MPT based Adapters.
+ */
+ instance->reply_post_host_index_addr[0] =
+ (u32 *)((u8 *)instance->reg_set +
+ MPI2_REPLY_POST_HOST_INDEX_OFFSET);
+
+ /* Check if MSI-X is supported while in ready state */
+ msix_enable = (instance->instancet->read_fw_status_reg(reg_set) &
+ 0x4000000) >> 0x1a;
+ if (msix_enable && !msix_disable) {
+ scratch_pad_2 = readl
+ (&instance->reg_set->outbound_scratch_pad_2);
+ /* Check max MSI-X vectors */
+ if ((instance->pdev->device == PCI_DEVICE_ID_LSI_FUSION) ||
+ (instance->pdev->device == PCI_DEVICE_ID_LSI_PLASMA)) {
+ instance->msix_vectors = (scratch_pad_2
+ & MR_MAX_REPLY_QUEUES_OFFSET) + 1;
+ fw_msix_count = instance->msix_vectors;
+ if (msix_vectors)
+ instance->msix_vectors =
+ min(msix_vectors,
+ instance->msix_vectors);
+ } else if ((instance->pdev->device == PCI_DEVICE_ID_LSI_INVADER)
+ || (instance->pdev->device == PCI_DEVICE_ID_LSI_FURY)) {
+ /* Invader/Fury supports more than 8 MSI-X */
+ instance->msix_vectors = ((scratch_pad_2
+ & MR_MAX_REPLY_QUEUES_EXT_OFFSET)
+ >> MR_MAX_REPLY_QUEUES_EXT_OFFSET_SHIFT) + 1;
+ fw_msix_count = instance->msix_vectors;
+ /* Save 1-15 reply post index address to local memory
+ * Index 0 is already saved from reg offset
+ * MPI2_REPLY_POST_HOST_INDEX_OFFSET
+ */
+ for (loop = 1; loop < MR_MAX_MSIX_REG_ARRAY; loop++) {
+ instance->reply_post_host_index_addr[loop] =
+ (u32 *)((u8 *)instance->reg_set +
+ MPI2_SUP_REPLY_POST_HOST_INDEX_OFFSET
+ + (loop * 0x10));
+ }
+ if (msix_vectors)
+ instance->msix_vectors = min(msix_vectors,
+ instance->msix_vectors);
+ } else
+ instance->msix_vectors = 1;
+ /* Don't bother allocating more MSI-X vectors than cpus */
+ instance->msix_vectors = min(instance->msix_vectors,
+ (unsigned int)num_online_cpus());
+ for (i = 0; i < instance->msix_vectors; i++)
+ instance->msixentry[i].entry = i;
+ i = pci_enable_msix_range(instance->pdev, instance->msixentry,
+ 1, instance->msix_vectors);
+ if (i > 0)
+ instance->msix_vectors = i;
+ else
+ instance->msix_vectors = 0;
+
+ dev_info(&instance->pdev->dev, "[scsi%d]: FW supports"
+ "<%d> MSIX vector,Online CPUs: <%d>,"
+ "Current MSIX <%d>\n", instance->host->host_no,
+ fw_msix_count, (unsigned int)num_online_cpus(),
+ instance->msix_vectors);
+ }
+
+ instance->ctrl_info = kzalloc(sizeof(struct megasas_ctrl_info),
+ GFP_KERNEL);
+ if (instance->ctrl_info == NULL)
+ goto fail_init_adapter;
+
+ /*
+ * Below are default value for legacy Firmware.
+ * non-fusion based controllers
+ */
+ instance->fw_supported_vd_count = MAX_LOGICAL_DRIVES;
+ instance->fw_supported_pd_count = MAX_PHYSICAL_DEVICES;
+ /* Get operational params, sge flags, send init cmd to controller */
+ if (instance->instancet->init_adapter(instance))
+ goto fail_init_adapter;
+
+ printk(KERN_ERR "megasas: INIT adapter done\n");
+
+ /** for passthrough
+ * the following function will get the PD LIST.
+ */
+
+ memset(instance->pd_list, 0 ,
+ (MEGASAS_MAX_PD * sizeof(struct megasas_pd_list)));
+ if (megasas_get_pd_list(instance) < 0) {
+ printk(KERN_ERR "megasas: failed to get PD list\n");
+ goto fail_init_adapter;
+ }
+
+ memset(instance->ld_ids, 0xff, MEGASAS_MAX_LD_IDS);
+ if (megasas_ld_list_query(instance,
+ MR_LD_QUERY_TYPE_EXPOSED_TO_HOST))
+ megasas_get_ld_list(instance);
+
+ /*
+ * Compute the max allowed sectors per IO: The controller info has two
+ * limits on max sectors. Driver should use the minimum of these two.
+ *
+ * 1 << stripe_sz_ops.min = max sectors per strip
+ *
+ * Note that older firmwares ( < FW ver 30) didn't report information
+ * to calculate max_sectors_1. So the number ended up as zero always.
+ */
+ tmp_sectors = 0;
+ ctrl_info = instance->ctrl_info;
+
+ max_sectors_1 = (1 << ctrl_info->stripe_sz_ops.min) *
+ le16_to_cpu(ctrl_info->max_strips_per_io);
+ max_sectors_2 = le32_to_cpu(ctrl_info->max_request_size);
+
+ tmp_sectors = min_t(u32, max_sectors_1 , max_sectors_2);
+
+ /*Check whether controller is iMR or MR */
+ if (ctrl_info->memory_size) {
+ instance->is_imr = 0;
+ dev_info(&instance->pdev->dev, "Controller type: MR,"
+ "Memory size is: %dMB\n",
+ le16_to_cpu(ctrl_info->memory_size));
+ } else {
+ instance->is_imr = 1;
+ dev_info(&instance->pdev->dev,
+ "Controller type: iMR\n");
+ }
+ instance->disableOnlineCtrlReset =
+ ctrl_info->properties.OnOffProperties.disableOnlineCtrlReset;
+ instance->mpio = ctrl_info->adapterOperations2.mpio;
+ instance->UnevenSpanSupport =
+ ctrl_info->adapterOperations2.supportUnevenSpans;
+ if (instance->UnevenSpanSupport) {
+ struct fusion_context *fusion = instance->ctrl_context;
+
+ dev_info(&instance->pdev->dev, "FW supports: "
+ "UnevenSpanSupport=%x\n", instance->UnevenSpanSupport);
+ if (MR_ValidateMapInfo(instance))
+ fusion->fast_path_io = 1;
+ else
+ fusion->fast_path_io = 0;
+
+ }
+ if (ctrl_info->host_interface.SRIOV) {
+ if (!ctrl_info->adapterOperations2.activePassive)
+ instance->PlasmaFW111 = 1;
+
+ if (!instance->PlasmaFW111)
+ instance->requestorId =
+ ctrl_info->iov.requestorId;
+ else {
+ iovPtr = (struct IOV_111 *)((unsigned char *)ctrl_info + IOV_111_OFFSET);
+ instance->requestorId = iovPtr->requestorId;
+ }
+ dev_warn(&instance->pdev->dev, "I am VF "
+ "requestorId %d\n", instance->requestorId);
+ }
+
+ instance->crash_dump_fw_support =
+ ctrl_info->adapterOperations3.supportCrashDump;
+ instance->crash_dump_drv_support =
+ (instance->crash_dump_fw_support &&
+ instance->crash_dump_buf);
+ if (instance->crash_dump_drv_support) {
+ dev_info(&instance->pdev->dev, "Firmware Crash dump "
+ "feature is supported\n");
+ megasas_set_crash_dump_params(instance,
+ MR_CRASH_BUF_TURN_OFF);
+
+ } else {
+ if (instance->crash_dump_buf)
+ pci_free_consistent(instance->pdev,
+ CRASH_DMA_BUF_SIZE,
+ instance->crash_dump_buf,
+ instance->crash_dump_h);
+ instance->crash_dump_buf = NULL;
+ }
+
+ instance->secure_jbod_support =
+ ctrl_info->adapterOperations3.supportSecurityonJBOD;
+ if (instance->secure_jbod_support)
+ dev_info(&instance->pdev->dev, "Firmware supports Secure JBOD\n");
+ instance->max_sectors_per_req = instance->max_num_sge *
+ PAGE_SIZE / 512;
+ if (tmp_sectors && (instance->max_sectors_per_req > tmp_sectors))
+ instance->max_sectors_per_req = tmp_sectors;
+
+ /*
+ * 1. For fusion adapters, 3 commands for IOCTL and 5 commands
+ * for driver's internal DCMDs.
+ * 2. For MFI skinny adapters, 5 commands for IOCTL + driver's
+ * internal DCMDs.
+ * 3. For rest of MFI adapters, 27 commands reserved for IOCTLs
+ * and 5 commands for drivers's internal DCMD.
+ */
+ if (instance->ctrl_context) {
+ instance->max_scsi_cmds = instance->max_fw_cmds -
+ (MEGASAS_FUSION_INTERNAL_CMDS +
+ MEGASAS_FUSION_IOCTL_CMDS);
+ sema_init(&instance->ioctl_sem, MEGASAS_FUSION_IOCTL_CMDS);
+ } else if ((instance->pdev->device == PCI_DEVICE_ID_LSI_SAS0073SKINNY) ||
+ (instance->pdev->device == PCI_DEVICE_ID_LSI_SAS0071SKINNY)) {
+ instance->max_scsi_cmds = instance->max_fw_cmds -
+ MEGASAS_SKINNY_INT_CMDS;
+ sema_init(&instance->ioctl_sem, MEGASAS_SKINNY_INT_CMDS);
+ } else {
+ instance->max_scsi_cmds = instance->max_fw_cmds -
+ MEGASAS_INT_CMDS;
+ sema_init(&instance->ioctl_sem, (MEGASAS_INT_CMDS - 5));
+ }
+
+ /* Check for valid throttlequeuedepth module parameter */
+ if (throttlequeuedepth &&
+ throttlequeuedepth <= instance->max_scsi_cmds)
+ instance->throttlequeuedepth = throttlequeuedepth;
+ else
+ instance->throttlequeuedepth =
+ MEGASAS_THROTTLE_QUEUE_DEPTH;
+
+ /*
+ * Setup tasklet for cmd completion
+ */
+
+ tasklet_init(&instance->isr_tasklet, instance->instancet->tasklet,
+ (unsigned long)instance);
+
+ /* Launch SR-IOV heartbeat timer */
+ if (instance->requestorId) {
+ if (!megasas_sriov_start_heartbeat(instance, 1))
+ megasas_start_timer(instance,
+ &instance->sriov_heartbeat_timer,
+ megasas_sriov_heartbeat_handler,
+ MEGASAS_SRIOV_HEARTBEAT_INTERVAL_VF);
+ else
+ instance->skip_heartbeat_timer_del = 1;
+ }
+
+ return 0;
+
+fail_init_adapter:
+fail_ready_state:
+ kfree(instance->ctrl_info);
+ instance->ctrl_info = NULL;
+ iounmap(instance->reg_set);
+
+ fail_ioremap:
+ pci_release_selected_regions(instance->pdev, instance->bar);
+
+ return -EINVAL;
+}
+
+/**
+ * megasas_release_mfi - Reverses the FW initialization
+ * @intance: Adapter soft state
+ */
+static void megasas_release_mfi(struct megasas_instance *instance)
+{
+ u32 reply_q_sz = sizeof(u32) *(instance->max_mfi_cmds + 1);
+
+ if (instance->reply_queue)
+ pci_free_consistent(instance->pdev, reply_q_sz,
+ instance->reply_queue, instance->reply_queue_h);
+
+ megasas_free_cmds(instance);
+
+ iounmap(instance->reg_set);
+
+ pci_release_selected_regions(instance->pdev, instance->bar);
+}
+
+/**
+ * megasas_get_seq_num - Gets latest event sequence numbers
+ * @instance: Adapter soft state
+ * @eli: FW event log sequence numbers information
+ *
+ * FW maintains a log of all events in a non-volatile area. Upper layers would
+ * usually find out the latest sequence number of the events, the seq number at
+ * the boot etc. They would "read" all the events below the latest seq number
+ * by issuing a direct fw cmd (DCMD). For the future events (beyond latest seq
+ * number), they would subsribe to AEN (asynchronous event notification) and
+ * wait for the events to happen.
+ */
+static int
+megasas_get_seq_num(struct megasas_instance *instance,
+ struct megasas_evt_log_info *eli)
+{
+ struct megasas_cmd *cmd;
+ struct megasas_dcmd_frame *dcmd;
+ struct megasas_evt_log_info *el_info;
+ dma_addr_t el_info_h = 0;
+
+ cmd = megasas_get_cmd(instance);
+
+ if (!cmd) {
+ return -ENOMEM;
+ }
+
+ dcmd = &cmd->frame->dcmd;
+ el_info = pci_alloc_consistent(instance->pdev,
+ sizeof(struct megasas_evt_log_info),
+ &el_info_h);
+
+ if (!el_info) {
+ megasas_return_cmd(instance, cmd);
+ return -ENOMEM;
+ }
+
+ memset(el_info, 0, sizeof(*el_info));
+ memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE);
+
+ dcmd->cmd = MFI_CMD_DCMD;
+ dcmd->cmd_status = 0x0;
+ dcmd->sge_count = 1;
+ dcmd->flags = cpu_to_le16(MFI_FRAME_DIR_READ);
+ dcmd->timeout = 0;
+ dcmd->pad_0 = 0;
+ dcmd->data_xfer_len = cpu_to_le32(sizeof(struct megasas_evt_log_info));
+ dcmd->opcode = cpu_to_le32(MR_DCMD_CTRL_EVENT_GET_INFO);
+ dcmd->sgl.sge32[0].phys_addr = cpu_to_le32(el_info_h);
+ dcmd->sgl.sge32[0].length = cpu_to_le32(sizeof(struct megasas_evt_log_info));
+
+ if (megasas_issue_blocked_cmd(instance, cmd, 30))
+ dev_err(&instance->pdev->dev, "Command timedout"
+ "from %s\n", __func__);
+ else {
+ /*
+ * Copy the data back into callers buffer
+ */
+ eli->newest_seq_num = le32_to_cpu(el_info->newest_seq_num);
+ eli->oldest_seq_num = le32_to_cpu(el_info->oldest_seq_num);
+ eli->clear_seq_num = le32_to_cpu(el_info->clear_seq_num);
+ eli->shutdown_seq_num = le32_to_cpu(el_info->shutdown_seq_num);
+ eli->boot_seq_num = le32_to_cpu(el_info->boot_seq_num);
+ }
+
+ pci_free_consistent(instance->pdev, sizeof(struct megasas_evt_log_info),
+ el_info, el_info_h);
+
+ if (instance->ctrl_context && cmd->mpt_pthr_cmd_blocked)
+ megasas_return_mfi_mpt_pthr(instance, cmd,
+ cmd->mpt_pthr_cmd_blocked);
+ else
+ megasas_return_cmd(instance, cmd);
+
+ return 0;
+}
+
+/**
+ * megasas_register_aen - Registers for asynchronous event notification
+ * @instance: Adapter soft state
+ * @seq_num: The starting sequence number
+ * @class_locale: Class of the event
+ *
+ * This function subscribes for AEN for events beyond the @seq_num. It requests
+ * to be notified if and only if the event is of type @class_locale
+ */
+static int
+megasas_register_aen(struct megasas_instance *instance, u32 seq_num,
+ u32 class_locale_word)
+{
+ int ret_val;
+ struct megasas_cmd *cmd;
+ struct megasas_dcmd_frame *dcmd;
+ union megasas_evt_class_locale curr_aen;
+ union megasas_evt_class_locale prev_aen;
+
+ /*
+ * If there an AEN pending already (aen_cmd), check if the
+ * class_locale of that pending AEN is inclusive of the new
+ * AEN request we currently have. If it is, then we don't have
+ * to do anything. In other words, whichever events the current
+ * AEN request is subscribing to, have already been subscribed
+ * to.
+ *
+ * If the old_cmd is _not_ inclusive, then we have to abort
+ * that command, form a class_locale that is superset of both
+ * old and current and re-issue to the FW
+ */
+
+ curr_aen.word = class_locale_word;
+
+ if (instance->aen_cmd) {
+
+ prev_aen.word = instance->aen_cmd->frame->dcmd.mbox.w[1];
+ prev_aen.members.locale = le16_to_cpu(prev_aen.members.locale);
+
+ /*
+ * A class whose enum value is smaller is inclusive of all
+ * higher values. If a PROGRESS (= -1) was previously
+ * registered, then a new registration requests for higher
+ * classes need not be sent to FW. They are automatically
+ * included.
+ *
+ * Locale numbers don't have such hierarchy. They are bitmap
+ * values
+ */
+ if ((prev_aen.members.class <= curr_aen.members.class) &&
+ !((prev_aen.members.locale & curr_aen.members.locale) ^
+ curr_aen.members.locale)) {
+ /*
+ * Previously issued event registration includes
+ * current request. Nothing to do.
+ */
+ return 0;
+ } else {
+ curr_aen.members.locale |= prev_aen.members.locale;
+
+ if (prev_aen.members.class < curr_aen.members.class)
+ curr_aen.members.class = prev_aen.members.class;
+
+ instance->aen_cmd->abort_aen = 1;
+ ret_val = megasas_issue_blocked_abort_cmd(instance,
+ instance->
+ aen_cmd, 30);
+
+ if (ret_val) {
+ printk(KERN_DEBUG "megasas: Failed to abort "
+ "previous AEN command\n");
+ return ret_val;
+ }
+ }
+ }
+
+ cmd = megasas_get_cmd(instance);
+
+ if (!cmd)
+ return -ENOMEM;
+
+ dcmd = &cmd->frame->dcmd;
+
+ memset(instance->evt_detail, 0, sizeof(struct megasas_evt_detail));
+
+ /*
+ * Prepare DCMD for aen registration
+ */
+ memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE);
+
+ dcmd->cmd = MFI_CMD_DCMD;
+ dcmd->cmd_status = 0x0;
+ dcmd->sge_count = 1;
+ dcmd->flags = cpu_to_le16(MFI_FRAME_DIR_READ);
+ dcmd->timeout = 0;
+ dcmd->pad_0 = 0;
+ dcmd->data_xfer_len = cpu_to_le32(sizeof(struct megasas_evt_detail));
+ dcmd->opcode = cpu_to_le32(MR_DCMD_CTRL_EVENT_WAIT);
+ dcmd->mbox.w[0] = cpu_to_le32(seq_num);
+ instance->last_seq_num = seq_num;
+ dcmd->mbox.w[1] = cpu_to_le32(curr_aen.word);
+ dcmd->sgl.sge32[0].phys_addr = cpu_to_le32(instance->evt_detail_h);
+ dcmd->sgl.sge32[0].length = cpu_to_le32(sizeof(struct megasas_evt_detail));
+
+ if (instance->aen_cmd != NULL) {
+ megasas_return_cmd(instance, cmd);
+ return 0;
+ }
+
+ /*
+ * Store reference to the cmd used to register for AEN. When an
+ * application wants us to register for AEN, we have to abort this
+ * cmd and re-register with a new EVENT LOCALE supplied by that app
+ */
+ instance->aen_cmd = cmd;
+
+ /*
+ * Issue the aen registration frame
+ */
+ instance->instancet->issue_dcmd(instance, cmd);
+
+ return 0;
+}
+
+/**
+ * megasas_start_aen - Subscribes to AEN during driver load time
+ * @instance: Adapter soft state
+ */
+static int megasas_start_aen(struct megasas_instance *instance)
+{
+ struct megasas_evt_log_info eli;
+ union megasas_evt_class_locale class_locale;
+
+ /*
+ * Get the latest sequence number from FW
+ */
+ memset(&eli, 0, sizeof(eli));
+
+ if (megasas_get_seq_num(instance, &eli))
+ return -1;
+
+ /*
+ * Register AEN with FW for latest sequence number plus 1
+ */
+ class_locale.members.reserved = 0;
+ class_locale.members.locale = MR_EVT_LOCALE_ALL;
+ class_locale.members.class = MR_EVT_CLASS_DEBUG;
+
+ return megasas_register_aen(instance,
+ eli.newest_seq_num + 1,
+ class_locale.word);
+}
+
+/**
+ * megasas_io_attach - Attaches this driver to SCSI mid-layer
+ * @instance: Adapter soft state
+ */
+static int megasas_io_attach(struct megasas_instance *instance)
+{
+ struct Scsi_Host *host = instance->host;
+
+ /*
+ * Export parameters required by SCSI mid-layer
+ */
+ host->irq = instance->pdev->irq;
+ host->unique_id = instance->unique_id;
+ host->can_queue = instance->max_scsi_cmds;
+ host->this_id = instance->init_id;
+ host->sg_tablesize = instance->max_num_sge;
+
+ if (instance->fw_support_ieee)
+ instance->max_sectors_per_req = MEGASAS_MAX_SECTORS_IEEE;
+
+ /*
+ * Check if the module parameter value for max_sectors can be used
+ */
+ if (max_sectors && max_sectors < instance->max_sectors_per_req)
+ instance->max_sectors_per_req = max_sectors;
+ else {
+ if (max_sectors) {
+ if (((instance->pdev->device ==
+ PCI_DEVICE_ID_LSI_SAS1078GEN2) ||
+ (instance->pdev->device ==
+ PCI_DEVICE_ID_LSI_SAS0079GEN2)) &&
+ (max_sectors <= MEGASAS_MAX_SECTORS)) {
+ instance->max_sectors_per_req = max_sectors;
+ } else {
+ printk(KERN_INFO "megasas: max_sectors should be > 0"
+ "and <= %d (or < 1MB for GEN2 controller)\n",
+ instance->max_sectors_per_req);
+ }
+ }
+ }
+
+ host->max_sectors = instance->max_sectors_per_req;
+ host->cmd_per_lun = MEGASAS_DEFAULT_CMD_PER_LUN;
+ host->max_channel = MEGASAS_MAX_CHANNELS - 1;
+ host->max_id = MEGASAS_MAX_DEV_PER_CHANNEL;
+ host->max_lun = MEGASAS_MAX_LUN;
+ host->max_cmd_len = 16;
+
+ /* Fusion only supports host reset */
+ if ((instance->pdev->device == PCI_DEVICE_ID_LSI_FUSION) ||
+ (instance->pdev->device == PCI_DEVICE_ID_LSI_PLASMA) ||
+ (instance->pdev->device == PCI_DEVICE_ID_LSI_INVADER) ||
+ (instance->pdev->device == PCI_DEVICE_ID_LSI_FURY)) {
+ host->hostt->eh_device_reset_handler = NULL;
+ host->hostt->eh_bus_reset_handler = NULL;
+ }
+
+ /*
+ * Notify the mid-layer about the new controller
+ */
+ if (scsi_add_host(host, &instance->pdev->dev)) {
+ printk(KERN_DEBUG "megasas: scsi_add_host failed\n");
+ return -ENODEV;
+ }
+
+ return 0;
+}
+
+static int
+megasas_set_dma_mask(struct pci_dev *pdev)
+{
+ /*
+ * All our contollers are capable of performing 64-bit DMA
+ */
+ if (IS_DMA64) {
+ if (pci_set_dma_mask(pdev, DMA_BIT_MASK(64)) != 0) {
+
+ if (pci_set_dma_mask(pdev, DMA_BIT_MASK(32)) != 0)
+ goto fail_set_dma_mask;
+ }
+ } else {
+ if (pci_set_dma_mask(pdev, DMA_BIT_MASK(32)) != 0)
+ goto fail_set_dma_mask;
+ }
+ /*
+ * Ensure that all data structures are allocated in 32-bit
+ * memory.
+ */
+ if (pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32)) != 0) {
+ /* Try 32bit DMA mask and 32 bit Consistent dma mask */
+ if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(32))
+ && !pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32)))
+ dev_info(&pdev->dev, "set 32bit DMA mask"
+ "and 32 bit consistent mask\n");
+ else
+ goto fail_set_dma_mask;
+ }
+
+ return 0;
+
+fail_set_dma_mask:
+ return 1;
+}
+
+/**
+ * megasas_probe_one - PCI hotplug entry point
+ * @pdev: PCI device structure
+ * @id: PCI ids of supported hotplugged adapter
+ */
+static int megasas_probe_one(struct pci_dev *pdev,
+ const struct pci_device_id *id)
+{
+ int rval, pos, i, j, cpu;
+ struct Scsi_Host *host;
+ struct megasas_instance *instance;
+ u16 control = 0;
+ struct fusion_context *fusion = NULL;
+
+ /* Reset MSI-X in the kdump kernel */
+ if (reset_devices) {
+ pos = pci_find_capability(pdev, PCI_CAP_ID_MSIX);
+ if (pos) {
+ pci_read_config_word(pdev, pos + PCI_MSIX_FLAGS,
+ &control);
+ if (control & PCI_MSIX_FLAGS_ENABLE) {
+ dev_info(&pdev->dev, "resetting MSI-X\n");
+ pci_write_config_word(pdev,
+ pos + PCI_MSIX_FLAGS,
+ control &
+ ~PCI_MSIX_FLAGS_ENABLE);
+ }
+ }
+ }
+
+ /*
+ * Announce PCI information
+ */
+ printk(KERN_INFO "megasas: %#4.04x:%#4.04x:%#4.04x:%#4.04x: ",
+ pdev->vendor, pdev->device, pdev->subsystem_vendor,
+ pdev->subsystem_device);
+
+ printk("bus %d:slot %d:func %d\n",
+ pdev->bus->number, PCI_SLOT(pdev->devfn), PCI_FUNC(pdev->devfn));
+
+ /*
+ * PCI prepping: enable device set bus mastering and dma mask
+ */
+ rval = pci_enable_device_mem(pdev);
+
+ if (rval) {
+ return rval;
+ }
+
+ pci_set_master(pdev);
+
+ if (megasas_set_dma_mask(pdev))
+ goto fail_set_dma_mask;
+
+ host = scsi_host_alloc(&megasas_template,
+ sizeof(struct megasas_instance));
+
+ if (!host) {
+ printk(KERN_DEBUG "megasas: scsi_host_alloc failed\n");
+ goto fail_alloc_instance;
+ }
+
+ instance = (struct megasas_instance *)host->hostdata;
+ memset(instance, 0, sizeof(*instance));
+ atomic_set( &instance->fw_reset_no_pci_access, 0 );
+ instance->pdev = pdev;
+
+ switch (instance->pdev->device) {
+ case PCI_DEVICE_ID_LSI_FUSION:
+ case PCI_DEVICE_ID_LSI_PLASMA:
+ case PCI_DEVICE_ID_LSI_INVADER:
+ case PCI_DEVICE_ID_LSI_FURY:
+ {
+ instance->ctrl_context_pages =
+ get_order(sizeof(struct fusion_context));
+ instance->ctrl_context = (void *)__get_free_pages(GFP_KERNEL,
+ instance->ctrl_context_pages);
+ if (!instance->ctrl_context) {
+ printk(KERN_DEBUG "megasas: Failed to allocate "
+ "memory for Fusion context info\n");
+ goto fail_alloc_dma_buf;
+ }
+ fusion = instance->ctrl_context;
+ memset(fusion, 0,
+ ((1 << PAGE_SHIFT) << instance->ctrl_context_pages));
+ INIT_LIST_HEAD(&fusion->cmd_pool);
+ spin_lock_init(&fusion->mpt_pool_lock);
+ }
+ break;
+ default: /* For all other supported controllers */
+
+ instance->producer =
+ pci_alloc_consistent(pdev, sizeof(u32),
+ &instance->producer_h);
+ instance->consumer =
+ pci_alloc_consistent(pdev, sizeof(u32),
+ &instance->consumer_h);
+
+ if (!instance->producer || !instance->consumer) {
+ printk(KERN_DEBUG "megasas: Failed to allocate"
+ "memory for producer, consumer\n");
+ goto fail_alloc_dma_buf;
+ }
+
+ *instance->producer = 0;
+ *instance->consumer = 0;
+ break;
+ }
+
+ /* Crash dump feature related initialisation*/
+ instance->drv_buf_index = 0;
+ instance->drv_buf_alloc = 0;
+ instance->crash_dump_fw_support = 0;
+ instance->crash_dump_app_support = 0;
+ instance->fw_crash_state = UNAVAILABLE;
+ spin_lock_init(&instance->crashdump_lock);
+ instance->crash_dump_buf = NULL;
+
+ if (!reset_devices)
+ instance->crash_dump_buf = pci_alloc_consistent(pdev,
+ CRASH_DMA_BUF_SIZE,
+ &instance->crash_dump_h);
+ if (!instance->crash_dump_buf)
+ dev_err(&instance->pdev->dev, "Can't allocate Firmware "
+ "crash dump DMA buffer\n");
+
+ megasas_poll_wait_aen = 0;
+ instance->flag_ieee = 0;
+ instance->ev = NULL;
+ instance->issuepend_done = 1;
+ instance->adprecovery = MEGASAS_HBA_OPERATIONAL;
+ instance->is_imr = 0;
+
+ instance->evt_detail = pci_alloc_consistent(pdev,
+ sizeof(struct
+ megasas_evt_detail),
+ &instance->evt_detail_h);
+
+ if (!instance->evt_detail) {
+ printk(KERN_DEBUG "megasas: Failed to allocate memory for "
+ "event detail structure\n");
+ goto fail_alloc_dma_buf;
+ }
+
+ /*
+ * Initialize locks and queues
+ */
+ INIT_LIST_HEAD(&instance->cmd_pool);
+ INIT_LIST_HEAD(&instance->internal_reset_pending_q);
+
+ atomic_set(&instance->fw_outstanding,0);
+
+ init_waitqueue_head(&instance->int_cmd_wait_q);
+ init_waitqueue_head(&instance->abort_cmd_wait_q);
+
+ spin_lock_init(&instance->mfi_pool_lock);
+ spin_lock_init(&instance->hba_lock);
+ spin_lock_init(&instance->completion_lock);
+
+ mutex_init(&instance->aen_mutex);
+ mutex_init(&instance->reset_mutex);
+
+ /*
+ * Initialize PCI related and misc parameters
+ */
+ instance->host = host;
+ instance->unique_id = pdev->bus->number << 8 | pdev->devfn;
+ instance->init_id = MEGASAS_DEFAULT_INIT_ID;
+ instance->ctrl_info = NULL;
+
+
+ if ((instance->pdev->device == PCI_DEVICE_ID_LSI_SAS0073SKINNY) ||
+ (instance->pdev->device == PCI_DEVICE_ID_LSI_SAS0071SKINNY))
+ instance->flag_ieee = 1;
+
+ megasas_dbg_lvl = 0;
+ instance->flag = 0;
+ instance->unload = 1;
+ instance->last_time = 0;
+ instance->disableOnlineCtrlReset = 1;
+ instance->UnevenSpanSupport = 0;
+
+ if ((instance->pdev->device == PCI_DEVICE_ID_LSI_FUSION) ||
+ (instance->pdev->device == PCI_DEVICE_ID_LSI_PLASMA) ||
+ (instance->pdev->device == PCI_DEVICE_ID_LSI_INVADER) ||
+ (instance->pdev->device == PCI_DEVICE_ID_LSI_FURY)) {
+ INIT_WORK(&instance->work_init, megasas_fusion_ocr_wq);
+ INIT_WORK(&instance->crash_init, megasas_fusion_crash_dump_wq);
+ } else
+ INIT_WORK(&instance->work_init, process_fw_state_change_wq);
+
+ /*
+ * Initialize MFI Firmware
+ */
+ if (megasas_init_fw(instance))
+ goto fail_init_mfi;
+
+ if (instance->requestorId) {
+ if (instance->PlasmaFW111) {
+ instance->vf_affiliation_111 =
+ pci_alloc_consistent(pdev, sizeof(struct MR_LD_VF_AFFILIATION_111),
+ &instance->vf_affiliation_111_h);
+ if (!instance->vf_affiliation_111)
+ printk(KERN_WARNING "megasas: Can't allocate "
+ "memory for VF affiliation buffer\n");
+ } else {
+ instance->vf_affiliation =
+ pci_alloc_consistent(pdev,
+ (MAX_LOGICAL_DRIVES + 1) *
+ sizeof(struct MR_LD_VF_AFFILIATION),
+ &instance->vf_affiliation_h);
+ if (!instance->vf_affiliation)
+ printk(KERN_WARNING "megasas: Can't allocate "
+ "memory for VF affiliation buffer\n");
+ }
+ }
+
+retry_irq_register:
+ /*
+ * Register IRQ
+ */
+ if (instance->msix_vectors) {
+ cpu = cpumask_first(cpu_online_mask);
+ for (i = 0; i < instance->msix_vectors; i++) {
+ instance->irq_context[i].instance = instance;
+ instance->irq_context[i].MSIxIndex = i;
+ if (request_irq(instance->msixentry[i].vector,
+ instance->instancet->service_isr, 0,
+ "megasas",
+ &instance->irq_context[i])) {
+ printk(KERN_DEBUG "megasas: Failed to "
+ "register IRQ for vector %d.\n", i);
+ for (j = 0; j < i; j++) {
+ if (smp_affinity_enable)
+ irq_set_affinity_hint(
+ instance->msixentry[j].vector, NULL);
+ free_irq(
+ instance->msixentry[j].vector,
+ &instance->irq_context[j]);
+ }
+ /* Retry irq register for IO_APIC */
+ instance->msix_vectors = 0;
+ goto retry_irq_register;
+ }
+ if (smp_affinity_enable) {
+ if (irq_set_affinity_hint(instance->msixentry[i].vector,
+ get_cpu_mask(cpu)))
+ dev_err(&instance->pdev->dev,
+ "Error setting affinity hint "
+ "for cpu %d\n", cpu);
+ cpu = cpumask_next(cpu, cpu_online_mask);
+ }
+ }
+ } else {
+ instance->irq_context[0].instance = instance;
+ instance->irq_context[0].MSIxIndex = 0;
+ if (request_irq(pdev->irq, instance->instancet->service_isr,
+ IRQF_SHARED, "megasas",
+ &instance->irq_context[0])) {
+ printk(KERN_DEBUG "megasas: Failed to register IRQ\n");
+ goto fail_irq;
+ }
+ }
+
+ instance->instancet->enable_intr(instance);
+
+ /*
+ * Store instance in PCI softstate
+ */
+ pci_set_drvdata(pdev, instance);
+
+ /*
+ * Add this controller to megasas_mgmt_info structure so that it
+ * can be exported to management applications
+ */
+ megasas_mgmt_info.count++;
+ megasas_mgmt_info.instance[megasas_mgmt_info.max_index] = instance;
+ megasas_mgmt_info.max_index++;
+
+ /*
+ * Register with SCSI mid-layer
+ */
+ if (megasas_io_attach(instance))
+ goto fail_io_attach;
+
+ instance->unload = 0;
+ /*
+ * Trigger SCSI to scan our drives
+ */
+ scsi_scan_host(host);
+
+ /*
+ * Initiate AEN (Asynchronous Event Notification)
+ */
+ if (megasas_start_aen(instance)) {
+ printk(KERN_DEBUG "megasas: start aen failed\n");
+ goto fail_start_aen;
+ }
+
+ /* Get current SR-IOV LD/VF affiliation */
+ if (instance->requestorId)
+ megasas_get_ld_vf_affiliation(instance, 1);
+
+ return 0;
+
+ fail_start_aen:
+ fail_io_attach:
+ megasas_mgmt_info.count--;
+ megasas_mgmt_info.instance[megasas_mgmt_info.max_index] = NULL;
+ megasas_mgmt_info.max_index--;
+
+ instance->instancet->disable_intr(instance);
+ if (instance->msix_vectors)
+ for (i = 0; i < instance->msix_vectors; i++) {
+ if (smp_affinity_enable)
+ irq_set_affinity_hint(
+ instance->msixentry[i].vector, NULL);
+ free_irq(instance->msixentry[i].vector,
+ &instance->irq_context[i]);
+ }
+ else
+ free_irq(instance->pdev->irq, &instance->irq_context[0]);
+fail_irq:
+ if ((instance->pdev->device == PCI_DEVICE_ID_LSI_FUSION) ||
+ (instance->pdev->device == PCI_DEVICE_ID_LSI_PLASMA) ||
+ (instance->pdev->device == PCI_DEVICE_ID_LSI_INVADER) ||
+ (instance->pdev->device == PCI_DEVICE_ID_LSI_FURY))
+ megasas_release_fusion(instance);
+ else
+ megasas_release_mfi(instance);
+ fail_init_mfi:
+ if (instance->msix_vectors)
+ pci_disable_msix(instance->pdev);
+ fail_alloc_dma_buf:
+ if (instance->evt_detail)
+ pci_free_consistent(pdev, sizeof(struct megasas_evt_detail),
+ instance->evt_detail,
+ instance->evt_detail_h);
+
+ if (instance->producer)
+ pci_free_consistent(pdev, sizeof(u32), instance->producer,
+ instance->producer_h);
+ if (instance->consumer)
+ pci_free_consistent(pdev, sizeof(u32), instance->consumer,
+ instance->consumer_h);
+ scsi_host_put(host);
+
+ fail_alloc_instance:
+ fail_set_dma_mask:
+ pci_disable_device(pdev);
+
+ return -ENODEV;
+}
+
+/**
+ * megasas_flush_cache - Requests FW to flush all its caches
+ * @instance: Adapter soft state
+ */
+static void megasas_flush_cache(struct megasas_instance *instance)
+{
+ struct megasas_cmd *cmd;
+ struct megasas_dcmd_frame *dcmd;
+
+ if (instance->adprecovery == MEGASAS_HW_CRITICAL_ERROR)
+ return;
+
+ cmd = megasas_get_cmd(instance);
+
+ if (!cmd)
+ return;
+
+ dcmd = &cmd->frame->dcmd;
+
+ memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE);
+
+ dcmd->cmd = MFI_CMD_DCMD;
+ dcmd->cmd_status = 0x0;
+ dcmd->sge_count = 0;
+ dcmd->flags = cpu_to_le16(MFI_FRAME_DIR_NONE);
+ dcmd->timeout = 0;
+ dcmd->pad_0 = 0;
+ dcmd->data_xfer_len = 0;
+ dcmd->opcode = cpu_to_le32(MR_DCMD_CTRL_CACHE_FLUSH);
+ dcmd->mbox.b[0] = MR_FLUSH_CTRL_CACHE | MR_FLUSH_DISK_CACHE;
+
+ if (megasas_issue_blocked_cmd(instance, cmd, 30))
+ dev_err(&instance->pdev->dev, "Command timedout"
+ " from %s\n", __func__);
+
+ if (instance->ctrl_context && cmd->mpt_pthr_cmd_blocked)
+ megasas_return_mfi_mpt_pthr(instance, cmd,
+ cmd->mpt_pthr_cmd_blocked);
+ else
+ megasas_return_cmd(instance, cmd);
+
+ return;
+}
+
+/**
+ * megasas_shutdown_controller - Instructs FW to shutdown the controller
+ * @instance: Adapter soft state
+ * @opcode: Shutdown/Hibernate
+ */
+static void megasas_shutdown_controller(struct megasas_instance *instance,
+ u32 opcode)
+{
+ struct megasas_cmd *cmd;
+ struct megasas_dcmd_frame *dcmd;
+
+ if (instance->adprecovery == MEGASAS_HW_CRITICAL_ERROR)
+ return;
+
+ cmd = megasas_get_cmd(instance);
+
+ if (!cmd)
+ return;
+
+ if (instance->aen_cmd)
+ megasas_issue_blocked_abort_cmd(instance,
+ instance->aen_cmd, 30);
+ if (instance->map_update_cmd)
+ megasas_issue_blocked_abort_cmd(instance,
+ instance->map_update_cmd, 30);
+ dcmd = &cmd->frame->dcmd;
+
+ memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE);
+
+ dcmd->cmd = MFI_CMD_DCMD;
+ dcmd->cmd_status = 0x0;
+ dcmd->sge_count = 0;
+ dcmd->flags = cpu_to_le16(MFI_FRAME_DIR_NONE);
+ dcmd->timeout = 0;
+ dcmd->pad_0 = 0;
+ dcmd->data_xfer_len = 0;
+ dcmd->opcode = cpu_to_le32(opcode);
+
+ if (megasas_issue_blocked_cmd(instance, cmd, 30))
+ dev_err(&instance->pdev->dev, "Command timedout"
+ "from %s\n", __func__);
+
+ if (instance->ctrl_context && cmd->mpt_pthr_cmd_blocked)
+ megasas_return_mfi_mpt_pthr(instance, cmd,
+ cmd->mpt_pthr_cmd_blocked);
+ else
+ megasas_return_cmd(instance, cmd);
+
+ return;
+}
+
+#ifdef CONFIG_PM
+/**
+ * megasas_suspend - driver suspend entry point
+ * @pdev: PCI device structure
+ * @state: PCI power state to suspend routine
+ */
+static int
+megasas_suspend(struct pci_dev *pdev, pm_message_t state)
+{
+ struct Scsi_Host *host;
+ struct megasas_instance *instance;
+ int i;
+
+ instance = pci_get_drvdata(pdev);
+ host = instance->host;
+ instance->unload = 1;
+
+ /* Shutdown SR-IOV heartbeat timer */
+ if (instance->requestorId && !instance->skip_heartbeat_timer_del)
+ del_timer_sync(&instance->sriov_heartbeat_timer);
+
+ megasas_flush_cache(instance);
+ megasas_shutdown_controller(instance, MR_DCMD_HIBERNATE_SHUTDOWN);
+
+ /* cancel the delayed work if this work still in queue */
+ if (instance->ev != NULL) {
+ struct megasas_aen_event *ev = instance->ev;
+ cancel_delayed_work_sync(&ev->hotplug_work);
+ instance->ev = NULL;
+ }
+
+ tasklet_kill(&instance->isr_tasklet);
+
+ pci_set_drvdata(instance->pdev, instance);
+ instance->instancet->disable_intr(instance);
+
+ if (instance->msix_vectors)
+ for (i = 0; i < instance->msix_vectors; i++) {
+ if (smp_affinity_enable)
+ irq_set_affinity_hint(
+ instance->msixentry[i].vector, NULL);
+ free_irq(instance->msixentry[i].vector,
+ &instance->irq_context[i]);
+ }
+ else
+ free_irq(instance->pdev->irq, &instance->irq_context[0]);
+ if (instance->msix_vectors)
+ pci_disable_msix(instance->pdev);
+
+ pci_save_state(pdev);
+ pci_disable_device(pdev);
+
+ pci_set_power_state(pdev, pci_choose_state(pdev, state));
+
+ return 0;
+}
+
+/**
+ * megasas_resume- driver resume entry point
+ * @pdev: PCI device structure
+ */
+static int
+megasas_resume(struct pci_dev *pdev)
+{
+ int rval, i, j, cpu;
+ struct Scsi_Host *host;
+ struct megasas_instance *instance;
+
+ instance = pci_get_drvdata(pdev);
+ host = instance->host;
+ pci_set_power_state(pdev, PCI_D0);
+ pci_enable_wake(pdev, PCI_D0, 0);
+ pci_restore_state(pdev);
+
+ /*
+ * PCI prepping: enable device set bus mastering and dma mask
+ */
+ rval = pci_enable_device_mem(pdev);
+
+ if (rval) {
+ printk(KERN_ERR "megasas: Enable device failed\n");
+ return rval;
+ }
+
+ pci_set_master(pdev);
+
+ if (megasas_set_dma_mask(pdev))
+ goto fail_set_dma_mask;
+
+ /*
+ * Initialize MFI Firmware
+ */
+
+ atomic_set(&instance->fw_outstanding, 0);
+
+ /*
+ * We expect the FW state to be READY
+ */
+ if (megasas_transition_to_ready(instance, 0))
+ goto fail_ready_state;
+
+ /* Now re-enable MSI-X */
+ if (instance->msix_vectors &&
+ pci_enable_msix_exact(instance->pdev, instance->msixentry,
+ instance->msix_vectors))
+ goto fail_reenable_msix;
+
+ switch (instance->pdev->device) {
+ case PCI_DEVICE_ID_LSI_FUSION:
+ case PCI_DEVICE_ID_LSI_PLASMA:
+ case PCI_DEVICE_ID_LSI_INVADER:
+ case PCI_DEVICE_ID_LSI_FURY:
+ {
+ megasas_reset_reply_desc(instance);
+ if (megasas_ioc_init_fusion(instance)) {
+ megasas_free_cmds(instance);
+ megasas_free_cmds_fusion(instance);
+ goto fail_init_mfi;
+ }
+ if (!megasas_get_map_info(instance))
+ megasas_sync_map_info(instance);
+ }
+ break;
+ default:
+ *instance->producer = 0;
+ *instance->consumer = 0;
+ if (megasas_issue_init_mfi(instance))
+ goto fail_init_mfi;
+ break;
+ }
+
+ tasklet_init(&instance->isr_tasklet, instance->instancet->tasklet,
+ (unsigned long)instance);
+
+ /*
+ * Register IRQ
+ */
+ if (instance->msix_vectors) {
+ cpu = cpumask_first(cpu_online_mask);
+ for (i = 0 ; i < instance->msix_vectors; i++) {
+ instance->irq_context[i].instance = instance;
+ instance->irq_context[i].MSIxIndex = i;
+ if (request_irq(instance->msixentry[i].vector,
+ instance->instancet->service_isr, 0,
+ "megasas",
+ &instance->irq_context[i])) {
+ printk(KERN_DEBUG "megasas: Failed to "
+ "register IRQ for vector %d.\n", i);
+ for (j = 0; j < i; j++) {
+ if (smp_affinity_enable)
+ irq_set_affinity_hint(
+ instance->msixentry[j].vector, NULL);
+ free_irq(
+ instance->msixentry[j].vector,
+ &instance->irq_context[j]);
+ }
+ goto fail_irq;
+ }
+
+ if (smp_affinity_enable) {
+ if (irq_set_affinity_hint(instance->msixentry[i].vector,
+ get_cpu_mask(cpu)))
+ dev_err(&instance->pdev->dev, "Error "
+ "setting affinity hint for cpu "
+ "%d\n", cpu);
+ cpu = cpumask_next(cpu, cpu_online_mask);
+ }
+ }
+ } else {
+ instance->irq_context[0].instance = instance;
+ instance->irq_context[0].MSIxIndex = 0;
+ if (request_irq(pdev->irq, instance->instancet->service_isr,
+ IRQF_SHARED, "megasas",
+ &instance->irq_context[0])) {
+ printk(KERN_DEBUG "megasas: Failed to register IRQ\n");
+ goto fail_irq;
+ }
+ }
+
+ /* Re-launch SR-IOV heartbeat timer */
+ if (instance->requestorId) {
+ if (!megasas_sriov_start_heartbeat(instance, 0))
+ megasas_start_timer(instance,
+ &instance->sriov_heartbeat_timer,
+ megasas_sriov_heartbeat_handler,
+ MEGASAS_SRIOV_HEARTBEAT_INTERVAL_VF);
+ else
+ instance->skip_heartbeat_timer_del = 1;
+ }
+
+ instance->instancet->enable_intr(instance);
+ instance->unload = 0;
+
+ /*
+ * Initiate AEN (Asynchronous Event Notification)
+ */
+ if (megasas_start_aen(instance))
+ printk(KERN_ERR "megasas: Start AEN failed\n");
+
+ return 0;
+
+fail_irq:
+fail_init_mfi:
+ if (instance->evt_detail)
+ pci_free_consistent(pdev, sizeof(struct megasas_evt_detail),
+ instance->evt_detail,
+ instance->evt_detail_h);
+
+ if (instance->producer)
+ pci_free_consistent(pdev, sizeof(u32), instance->producer,
+ instance->producer_h);
+ if (instance->consumer)
+ pci_free_consistent(pdev, sizeof(u32), instance->consumer,
+ instance->consumer_h);
+ scsi_host_put(host);
+
+fail_set_dma_mask:
+fail_ready_state:
+fail_reenable_msix:
+
+ pci_disable_device(pdev);
+
+ return -ENODEV;
+}
+#else
+#define megasas_suspend NULL
+#define megasas_resume NULL
+#endif
+
+/**
+ * megasas_detach_one - PCI hot"un"plug entry point
+ * @pdev: PCI device structure
+ */
+static void megasas_detach_one(struct pci_dev *pdev)
+{
+ int i;
+ struct Scsi_Host *host;
+ struct megasas_instance *instance;
+ struct fusion_context *fusion;
+
+ instance = pci_get_drvdata(pdev);
+ instance->unload = 1;
+ host = instance->host;
+ fusion = instance->ctrl_context;
+
+ /* Shutdown SR-IOV heartbeat timer */
+ if (instance->requestorId && !instance->skip_heartbeat_timer_del)
+ del_timer_sync(&instance->sriov_heartbeat_timer);
+
+ if (instance->fw_crash_state != UNAVAILABLE)
+ megasas_free_host_crash_buffer(instance);
+ scsi_remove_host(instance->host);
+ megasas_flush_cache(instance);
+ megasas_shutdown_controller(instance, MR_DCMD_CTRL_SHUTDOWN);
+
+ /* cancel the delayed work if this work still in queue*/
+ if (instance->ev != NULL) {
+ struct megasas_aen_event *ev = instance->ev;
+ cancel_delayed_work_sync(&ev->hotplug_work);
+ instance->ev = NULL;
+ }
+
+ /* cancel all wait events */
+ wake_up_all(&instance->int_cmd_wait_q);
+
+ tasklet_kill(&instance->isr_tasklet);
+
+ /*
+ * Take the instance off the instance array. Note that we will not
+ * decrement the max_index. We let this array be sparse array
+ */
+ for (i = 0; i < megasas_mgmt_info.max_index; i++) {
+ if (megasas_mgmt_info.instance[i] == instance) {
+ megasas_mgmt_info.count--;
+ megasas_mgmt_info.instance[i] = NULL;
+
+ break;
+ }
+ }
+
+ instance->instancet->disable_intr(instance);
+
+ if (instance->msix_vectors)
+ for (i = 0; i < instance->msix_vectors; i++) {
+ if (smp_affinity_enable)
+ irq_set_affinity_hint(
+ instance->msixentry[i].vector, NULL);
+ free_irq(instance->msixentry[i].vector,
+ &instance->irq_context[i]);
+ }
+ else
+ free_irq(instance->pdev->irq, &instance->irq_context[0]);
+ if (instance->msix_vectors)
+ pci_disable_msix(instance->pdev);
+
+ switch (instance->pdev->device) {
+ case PCI_DEVICE_ID_LSI_FUSION:
+ case PCI_DEVICE_ID_LSI_PLASMA:
+ case PCI_DEVICE_ID_LSI_INVADER:
+ case PCI_DEVICE_ID_LSI_FURY:
+ megasas_release_fusion(instance);
+ for (i = 0; i < 2 ; i++) {
+ if (fusion->ld_map[i])
+ dma_free_coherent(&instance->pdev->dev,
+ fusion->max_map_sz,
+ fusion->ld_map[i],
+ fusion->ld_map_phys[i]);
+ if (fusion->ld_drv_map[i])
+ free_pages((ulong)fusion->ld_drv_map[i],
+ fusion->drv_map_pages);
+ }
+ free_pages((ulong)instance->ctrl_context,
+ instance->ctrl_context_pages);
+ break;
+ default:
+ megasas_release_mfi(instance);
+ pci_free_consistent(pdev, sizeof(u32),
+ instance->producer,
+ instance->producer_h);
+ pci_free_consistent(pdev, sizeof(u32),
+ instance->consumer,
+ instance->consumer_h);
+ break;
+ }
+
+ kfree(instance->ctrl_info);
+
+ if (instance->evt_detail)
+ pci_free_consistent(pdev, sizeof(struct megasas_evt_detail),
+ instance->evt_detail, instance->evt_detail_h);
+
+ if (instance->vf_affiliation)
+ pci_free_consistent(pdev, (MAX_LOGICAL_DRIVES + 1) *
+ sizeof(struct MR_LD_VF_AFFILIATION),
+ instance->vf_affiliation,
+ instance->vf_affiliation_h);
+
+ if (instance->vf_affiliation_111)
+ pci_free_consistent(pdev,
+ sizeof(struct MR_LD_VF_AFFILIATION_111),
+ instance->vf_affiliation_111,
+ instance->vf_affiliation_111_h);
+
+ if (instance->hb_host_mem)
+ pci_free_consistent(pdev, sizeof(struct MR_CTRL_HB_HOST_MEM),
+ instance->hb_host_mem,
+ instance->hb_host_mem_h);
+
+ if (instance->crash_dump_buf)
+ pci_free_consistent(pdev, CRASH_DMA_BUF_SIZE,
+ instance->crash_dump_buf, instance->crash_dump_h);
+
+ scsi_host_put(host);
+
+ pci_disable_device(pdev);
+
+ return;
+}
+
+/**
+ * megasas_shutdown - Shutdown entry point
+ * @device: Generic device structure
+ */
+static void megasas_shutdown(struct pci_dev *pdev)
+{
+ int i;
+ struct megasas_instance *instance = pci_get_drvdata(pdev);
+
+ instance->unload = 1;
+ megasas_flush_cache(instance);
+ megasas_shutdown_controller(instance, MR_DCMD_CTRL_SHUTDOWN);
+ instance->instancet->disable_intr(instance);
+ if (instance->msix_vectors)
+ for (i = 0; i < instance->msix_vectors; i++) {
+ if (smp_affinity_enable)
+ irq_set_affinity_hint(
+ instance->msixentry[i].vector, NULL);
+ free_irq(instance->msixentry[i].vector,
+ &instance->irq_context[i]);
+ }
+ else
+ free_irq(instance->pdev->irq, &instance->irq_context[0]);
+ if (instance->msix_vectors)
+ pci_disable_msix(instance->pdev);
+}
+
+/**
+ * megasas_mgmt_open - char node "open" entry point
+ */
+static int megasas_mgmt_open(struct inode *inode, struct file *filep)
+{
+ /*
+ * Allow only those users with admin rights
+ */
+ if (!capable(CAP_SYS_ADMIN))
+ return -EACCES;
+
+ return 0;
+}
+
+/**
+ * megasas_mgmt_fasync - Async notifier registration from applications
+ *
+ * This function adds the calling process to a driver global queue. When an
+ * event occurs, SIGIO will be sent to all processes in this queue.
+ */
+static int megasas_mgmt_fasync(int fd, struct file *filep, int mode)
+{
+ int rc;
+
+ mutex_lock(&megasas_async_queue_mutex);
+
+ rc = fasync_helper(fd, filep, mode, &megasas_async_queue);
+
+ mutex_unlock(&megasas_async_queue_mutex);
+
+ if (rc >= 0) {
+ /* For sanity check when we get ioctl */
+ filep->private_data = filep;
+ return 0;
+ }
+
+ printk(KERN_DEBUG "megasas: fasync_helper failed [%d]\n", rc);
+
+ return rc;
+}
+
+/**
+ * megasas_mgmt_poll - char node "poll" entry point
+ * */
+static unsigned int megasas_mgmt_poll(struct file *file, poll_table *wait)
+{
+ unsigned int mask;
+ unsigned long flags;
+ poll_wait(file, &megasas_poll_wait, wait);
+ spin_lock_irqsave(&poll_aen_lock, flags);
+ if (megasas_poll_wait_aen)
+ mask = (POLLIN | POLLRDNORM);
+
+ else
+ mask = 0;
+ megasas_poll_wait_aen = 0;
+ spin_unlock_irqrestore(&poll_aen_lock, flags);
+ return mask;
+}
+
+/*
+ * megasas_set_crash_dump_params_ioctl:
+ * Send CRASH_DUMP_MODE DCMD to all controllers
+ * @cmd: MFI command frame
+ */
+
+static int megasas_set_crash_dump_params_ioctl(
+ struct megasas_cmd *cmd)
+{
+ struct megasas_instance *local_instance;
+ int i, error = 0;
+ int crash_support;
+
+ crash_support = cmd->frame->dcmd.mbox.w[0];
+
+ for (i = 0; i < megasas_mgmt_info.max_index; i++) {
+ local_instance = megasas_mgmt_info.instance[i];
+ if (local_instance && local_instance->crash_dump_drv_support) {
+ if ((local_instance->adprecovery ==
+ MEGASAS_HBA_OPERATIONAL) &&
+ !megasas_set_crash_dump_params(local_instance,
+ crash_support)) {
+ local_instance->crash_dump_app_support =
+ crash_support;
+ dev_info(&local_instance->pdev->dev,
+ "Application firmware crash "
+ "dump mode set success\n");
+ error = 0;
+ } else {
+ dev_info(&local_instance->pdev->dev,
+ "Application firmware crash "
+ "dump mode set failed\n");
+ error = -1;
+ }
+ }
+ }
+ return error;
+}
+
+/**
+ * megasas_mgmt_fw_ioctl - Issues management ioctls to FW
+ * @instance: Adapter soft state
+ * @argp: User's ioctl packet
+ */
+static int
+megasas_mgmt_fw_ioctl(struct megasas_instance *instance,
+ struct megasas_iocpacket __user * user_ioc,
+ struct megasas_iocpacket *ioc)
+{
+ struct megasas_sge32 *kern_sge32;
+ struct megasas_cmd *cmd;
+ void *kbuff_arr[MAX_IOCTL_SGE];
+ dma_addr_t buf_handle = 0;
+ int error = 0, i;
+ void *sense = NULL;
+ dma_addr_t sense_handle;
+ unsigned long *sense_ptr;
+
+ memset(kbuff_arr, 0, sizeof(kbuff_arr));
+
+ if (ioc->sge_count > MAX_IOCTL_SGE) {
+ printk(KERN_DEBUG "megasas: SGE count [%d] > max limit [%d]\n",
+ ioc->sge_count, MAX_IOCTL_SGE);
+ return -EINVAL;
+ }
+
+ cmd = megasas_get_cmd(instance);
+ if (!cmd) {
+ printk(KERN_DEBUG "megasas: Failed to get a cmd packet\n");
+ return -ENOMEM;
+ }
+
+ /*
+ * User's IOCTL packet has 2 frames (maximum). Copy those two
+ * frames into our cmd's frames. cmd->frame's context will get
+ * overwritten when we copy from user's frames. So set that value
+ * alone separately
+ */
+ memcpy(cmd->frame, ioc->frame.raw, 2 * MEGAMFI_FRAME_SIZE);
+ cmd->frame->hdr.context = cpu_to_le32(cmd->index);
+ cmd->frame->hdr.pad_0 = 0;
+ cmd->frame->hdr.flags &= cpu_to_le16(~(MFI_FRAME_IEEE |
+ MFI_FRAME_SGL64 |
+ MFI_FRAME_SENSE64));
+
+ if (cmd->frame->dcmd.opcode == MR_DRIVER_SET_APP_CRASHDUMP_MODE) {
+ error = megasas_set_crash_dump_params_ioctl(cmd);
+ megasas_return_cmd(instance, cmd);
+ return error;
+ }
+
+ /*
+ * The management interface between applications and the fw uses
+ * MFI frames. E.g, RAID configuration changes, LD property changes
+ * etc are accomplishes through different kinds of MFI frames. The
+ * driver needs to care only about substituting user buffers with
+ * kernel buffers in SGLs. The location of SGL is embedded in the
+ * struct iocpacket itself.
+ */
+ kern_sge32 = (struct megasas_sge32 *)
+ ((unsigned long)cmd->frame + ioc->sgl_off);
+
+ /*
+ * For each user buffer, create a mirror buffer and copy in
+ */
+ for (i = 0; i < ioc->sge_count; i++) {
+ if (!ioc->sgl[i].iov_len)
+ continue;
+
+ kbuff_arr[i] = dma_alloc_coherent(&instance->pdev->dev,
+ ioc->sgl[i].iov_len,
+ &buf_handle, GFP_KERNEL);
+ if (!kbuff_arr[i]) {
+ printk(KERN_DEBUG "megasas: Failed to alloc "
+ "kernel SGL buffer for IOCTL \n");
+ error = -ENOMEM;
+ goto out;
+ }
+
+ /*
+ * We don't change the dma_coherent_mask, so
+ * pci_alloc_consistent only returns 32bit addresses
+ */
+ kern_sge32[i].phys_addr = cpu_to_le32(buf_handle);
+ kern_sge32[i].length = cpu_to_le32(ioc->sgl[i].iov_len);
+
+ /*
+ * We created a kernel buffer corresponding to the
+ * user buffer. Now copy in from the user buffer
+ */
+ if (copy_from_user(kbuff_arr[i], ioc->sgl[i].iov_base,
+ (u32) (ioc->sgl[i].iov_len))) {
+ error = -EFAULT;
+ goto out;
+ }
+ }
+
+ if (ioc->sense_len) {
+ sense = dma_alloc_coherent(&instance->pdev->dev, ioc->sense_len,
+ &sense_handle, GFP_KERNEL);
+ if (!sense) {
+ error = -ENOMEM;
+ goto out;
+ }
+
+ sense_ptr =
+ (unsigned long *) ((unsigned long)cmd->frame + ioc->sense_off);
+ *sense_ptr = cpu_to_le32(sense_handle);
+ }
+
+ /*
+ * Set the sync_cmd flag so that the ISR knows not to complete this
+ * cmd to the SCSI mid-layer
+ */
+ cmd->sync_cmd = 1;
+ megasas_issue_blocked_cmd(instance, cmd, 0);
+ cmd->sync_cmd = 0;
+
+ if (instance->unload == 1) {
+ dev_info(&instance->pdev->dev, "Driver unload is in progress "
+ "don't submit data to application\n");
+ goto out;
+ }
+ /*
+ * copy out the kernel buffers to user buffers
+ */
+ for (i = 0; i < ioc->sge_count; i++) {
+ if (copy_to_user(ioc->sgl[i].iov_base, kbuff_arr[i],
+ ioc->sgl[i].iov_len)) {
+ error = -EFAULT;
+ goto out;
+ }
+ }
+
+ /*
+ * copy out the sense
+ */
+ if (ioc->sense_len) {
+ /*
+ * sense_ptr points to the location that has the user
+ * sense buffer address
+ */
+ sense_ptr = (unsigned long *) ((unsigned long)ioc->frame.raw +
+ ioc->sense_off);
+
+ if (copy_to_user((void __user *)((unsigned long)(*sense_ptr)),
+ sense, ioc->sense_len)) {
+ printk(KERN_ERR "megasas: Failed to copy out to user "
+ "sense data\n");
+ error = -EFAULT;
+ goto out;
+ }
+ }
+
+ /*
+ * copy the status codes returned by the fw
+ */
+ if (copy_to_user(&user_ioc->frame.hdr.cmd_status,
+ &cmd->frame->hdr.cmd_status, sizeof(u8))) {
+ printk(KERN_DEBUG "megasas: Error copying out cmd_status\n");
+ error = -EFAULT;
+ }
+
+ out:
+ if (sense) {
+ dma_free_coherent(&instance->pdev->dev, ioc->sense_len,
+ sense, sense_handle);
+ }
+
+ for (i = 0; i < ioc->sge_count; i++) {
+ if (kbuff_arr[i])
+ dma_free_coherent(&instance->pdev->dev,
+ le32_to_cpu(kern_sge32[i].length),
+ kbuff_arr[i],
+ le32_to_cpu(kern_sge32[i].phys_addr));
+ kbuff_arr[i] = NULL;
+ }
+
+ if (instance->ctrl_context && cmd->mpt_pthr_cmd_blocked)
+ megasas_return_mfi_mpt_pthr(instance, cmd,
+ cmd->mpt_pthr_cmd_blocked);
+ else
+ megasas_return_cmd(instance, cmd);
+ return error;
+}
+
+static int megasas_mgmt_ioctl_fw(struct file *file, unsigned long arg)
+{
+ struct megasas_iocpacket __user *user_ioc =
+ (struct megasas_iocpacket __user *)arg;
+ struct megasas_iocpacket *ioc;
+ struct megasas_instance *instance;
+ int error;
+ int i;
+ unsigned long flags;
+ u32 wait_time = MEGASAS_RESET_WAIT_TIME;
+
+ ioc = kmalloc(sizeof(*ioc), GFP_KERNEL);
+ if (!ioc)
+ return -ENOMEM;
+
+ if (copy_from_user(ioc, user_ioc, sizeof(*ioc))) {
+ error = -EFAULT;
+ goto out_kfree_ioc;
+ }
+
+ instance = megasas_lookup_instance(ioc->host_no);
+ if (!instance) {
+ error = -ENODEV;
+ goto out_kfree_ioc;
+ }
+
+ /* Adjust ioctl wait time for VF mode */
+ if (instance->requestorId)
+ wait_time = MEGASAS_ROUTINE_WAIT_TIME_VF;
+
+ /* Block ioctls in VF mode */
+ if (instance->requestorId && !allow_vf_ioctls) {
+ error = -ENODEV;
+ goto out_kfree_ioc;
+ }
+
+ if (instance->adprecovery == MEGASAS_HW_CRITICAL_ERROR) {
+ printk(KERN_ERR "Controller in crit error\n");
+ error = -ENODEV;
+ goto out_kfree_ioc;
+ }
+
+ if (instance->unload == 1) {
+ error = -ENODEV;
+ goto out_kfree_ioc;
+ }
+
+ if (down_interruptible(&instance->ioctl_sem)) {
+ error = -ERESTARTSYS;
+ goto out_kfree_ioc;
+ }
+
+ for (i = 0; i < wait_time; i++) {
+
+ spin_lock_irqsave(&instance->hba_lock, flags);
+ if (instance->adprecovery == MEGASAS_HBA_OPERATIONAL) {
+ spin_unlock_irqrestore(&instance->hba_lock, flags);
+ break;
+ }
+ spin_unlock_irqrestore(&instance->hba_lock, flags);
+
+ if (!(i % MEGASAS_RESET_NOTICE_INTERVAL)) {
+ printk(KERN_NOTICE "megasas: waiting"
+ "for controller reset to finish\n");
+ }
+
+ msleep(1000);
+ }
+
+ spin_lock_irqsave(&instance->hba_lock, flags);
+ if (instance->adprecovery != MEGASAS_HBA_OPERATIONAL) {
+ spin_unlock_irqrestore(&instance->hba_lock, flags);
+
+ printk(KERN_ERR "megaraid_sas: timed out while"
+ "waiting for HBA to recover\n");
+ error = -ENODEV;
+ goto out_up;
+ }
+ spin_unlock_irqrestore(&instance->hba_lock, flags);
+
+ error = megasas_mgmt_fw_ioctl(instance, user_ioc, ioc);
+ out_up:
+ up(&instance->ioctl_sem);
+
+ out_kfree_ioc:
+ kfree(ioc);
+ return error;
+}
+
+static int megasas_mgmt_ioctl_aen(struct file *file, unsigned long arg)
+{
+ struct megasas_instance *instance;
+ struct megasas_aen aen;
+ int error;
+ int i;
+ unsigned long flags;
+ u32 wait_time = MEGASAS_RESET_WAIT_TIME;
+
+ if (file->private_data != file) {
+ printk(KERN_DEBUG "megasas: fasync_helper was not "
+ "called first\n");
+ return -EINVAL;
+ }
+
+ if (copy_from_user(&aen, (void __user *)arg, sizeof(aen)))
+ return -EFAULT;
+
+ instance = megasas_lookup_instance(aen.host_no);
+
+ if (!instance)
+ return -ENODEV;
+
+ if (instance->adprecovery == MEGASAS_HW_CRITICAL_ERROR) {
+ return -ENODEV;
+ }
+
+ if (instance->unload == 1) {
+ return -ENODEV;
+ }
+
+ for (i = 0; i < wait_time; i++) {
+
+ spin_lock_irqsave(&instance->hba_lock, flags);
+ if (instance->adprecovery == MEGASAS_HBA_OPERATIONAL) {
+ spin_unlock_irqrestore(&instance->hba_lock,
+ flags);
+ break;
+ }
+
+ spin_unlock_irqrestore(&instance->hba_lock, flags);
+
+ if (!(i % MEGASAS_RESET_NOTICE_INTERVAL)) {
+ printk(KERN_NOTICE "megasas: waiting for"
+ "controller reset to finish\n");
+ }
+
+ msleep(1000);
+ }
+
+ spin_lock_irqsave(&instance->hba_lock, flags);
+ if (instance->adprecovery != MEGASAS_HBA_OPERATIONAL) {
+ spin_unlock_irqrestore(&instance->hba_lock, flags);
+ printk(KERN_ERR "megaraid_sas: timed out while waiting"
+ "for HBA to recover.\n");
+ return -ENODEV;
+ }
+ spin_unlock_irqrestore(&instance->hba_lock, flags);
+
+ mutex_lock(&instance->aen_mutex);
+ error = megasas_register_aen(instance, aen.seq_num,
+ aen.class_locale_word);
+ mutex_unlock(&instance->aen_mutex);
+ return error;
+}
+
+/**
+ * megasas_mgmt_ioctl - char node ioctl entry point
+ */
+static long
+megasas_mgmt_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
+{
+ switch (cmd) {
+ case MEGASAS_IOC_FIRMWARE:
+ return megasas_mgmt_ioctl_fw(file, arg);
+
+ case MEGASAS_IOC_GET_AEN:
+ return megasas_mgmt_ioctl_aen(file, arg);
+ }
+
+ return -ENOTTY;
+}
+
+#ifdef CONFIG_COMPAT
+static int megasas_mgmt_compat_ioctl_fw(struct file *file, unsigned long arg)
+{
+ struct compat_megasas_iocpacket __user *cioc =
+ (struct compat_megasas_iocpacket __user *)arg;
+ struct megasas_iocpacket __user *ioc =
+ compat_alloc_user_space(sizeof(struct megasas_iocpacket));
+ int i;
+ int error = 0;
+ compat_uptr_t ptr;
+
+ if (clear_user(ioc, sizeof(*ioc)))
+ return -EFAULT;
+
+ if (copy_in_user(&ioc->host_no, &cioc->host_no, sizeof(u16)) ||
+ copy_in_user(&ioc->sgl_off, &cioc->sgl_off, sizeof(u32)) ||
+ copy_in_user(&ioc->sense_off, &cioc->sense_off, sizeof(u32)) ||
+ copy_in_user(&ioc->sense_len, &cioc->sense_len, sizeof(u32)) ||
+ copy_in_user(ioc->frame.raw, cioc->frame.raw, 128) ||
+ copy_in_user(&ioc->sge_count, &cioc->sge_count, sizeof(u32)))
+ return -EFAULT;
+
+ /*
+ * The sense_ptr is used in megasas_mgmt_fw_ioctl only when
+ * sense_len is not null, so prepare the 64bit value under
+ * the same condition.
+ */
+ if (ioc->sense_len) {
+ void __user **sense_ioc_ptr =
+ (void __user **)(ioc->frame.raw + ioc->sense_off);
+ compat_uptr_t *sense_cioc_ptr =
+ (compat_uptr_t *)(cioc->frame.raw + cioc->sense_off);
+ if (get_user(ptr, sense_cioc_ptr) ||
+ put_user(compat_ptr(ptr), sense_ioc_ptr))
+ return -EFAULT;
+ }
+
+ for (i = 0; i < MAX_IOCTL_SGE; i++) {
+ if (get_user(ptr, &cioc->sgl[i].iov_base) ||
+ put_user(compat_ptr(ptr), &ioc->sgl[i].iov_base) ||
+ copy_in_user(&ioc->sgl[i].iov_len,
+ &cioc->sgl[i].iov_len, sizeof(compat_size_t)))
+ return -EFAULT;
+ }
+
+ error = megasas_mgmt_ioctl_fw(file, (unsigned long)ioc);
+
+ if (copy_in_user(&cioc->frame.hdr.cmd_status,
+ &ioc->frame.hdr.cmd_status, sizeof(u8))) {
+ printk(KERN_DEBUG "megasas: error copy_in_user cmd_status\n");
+ return -EFAULT;
+ }
+ return error;
+}
+
+static long
+megasas_mgmt_compat_ioctl(struct file *file, unsigned int cmd,
+ unsigned long arg)
+{
+ switch (cmd) {
+ case MEGASAS_IOC_FIRMWARE32:
+ return megasas_mgmt_compat_ioctl_fw(file, arg);
+ case MEGASAS_IOC_GET_AEN:
+ return megasas_mgmt_ioctl_aen(file, arg);
+ }
+
+ return -ENOTTY;
+}
+#endif
+
+/*
+ * File operations structure for management interface
+ */
+static const struct file_operations megasas_mgmt_fops = {
+ .owner = THIS_MODULE,
+ .open = megasas_mgmt_open,
+ .fasync = megasas_mgmt_fasync,
+ .unlocked_ioctl = megasas_mgmt_ioctl,
+ .poll = megasas_mgmt_poll,
+#ifdef CONFIG_COMPAT
+ .compat_ioctl = megasas_mgmt_compat_ioctl,
+#endif
+ .llseek = noop_llseek,
+};
+
+/*
+ * PCI hotplug support registration structure
+ */
+static struct pci_driver megasas_pci_driver = {
+
+ .name = "megaraid_sas",
+ .id_table = megasas_pci_table,
+ .probe = megasas_probe_one,
+ .remove = megasas_detach_one,
+ .suspend = megasas_suspend,
+ .resume = megasas_resume,
+ .shutdown = megasas_shutdown,
+};
+
+/*
+ * Sysfs driver attributes
+ */
+static ssize_t megasas_sysfs_show_version(struct device_driver *dd, char *buf)
+{
+ return snprintf(buf, strlen(MEGASAS_VERSION) + 2, "%s\n",
+ MEGASAS_VERSION);
+}
+
+static DRIVER_ATTR(version, S_IRUGO, megasas_sysfs_show_version, NULL);
+
+static ssize_t
+megasas_sysfs_show_support_poll_for_event(struct device_driver *dd, char *buf)
+{
+ return sprintf(buf, "%u\n", support_poll_for_event);
+}
+
+static DRIVER_ATTR(support_poll_for_event, S_IRUGO,
+ megasas_sysfs_show_support_poll_for_event, NULL);
+
+ static ssize_t
+megasas_sysfs_show_support_device_change(struct device_driver *dd, char *buf)
+{
+ return sprintf(buf, "%u\n", support_device_change);
+}
+
+static DRIVER_ATTR(support_device_change, S_IRUGO,
+ megasas_sysfs_show_support_device_change, NULL);
+
+static ssize_t
+megasas_sysfs_show_dbg_lvl(struct device_driver *dd, char *buf)
+{
+ return sprintf(buf, "%u\n", megasas_dbg_lvl);
+}
+
+static ssize_t
+megasas_sysfs_set_dbg_lvl(struct device_driver *dd, const char *buf, size_t count)
+{
+ int retval = count;
+ if(sscanf(buf,"%u",&megasas_dbg_lvl)<1){
+ printk(KERN_ERR "megasas: could not set dbg_lvl\n");
+ retval = -EINVAL;
+ }
+ return retval;
+}
+
+static DRIVER_ATTR(dbg_lvl, S_IRUGO|S_IWUSR, megasas_sysfs_show_dbg_lvl,
+ megasas_sysfs_set_dbg_lvl);
+
+static void
+megasas_aen_polling(struct work_struct *work)
+{
+ struct megasas_aen_event *ev =
+ container_of(work, struct megasas_aen_event, hotplug_work.work);
+ struct megasas_instance *instance = ev->instance;
+ union megasas_evt_class_locale class_locale;
+ struct Scsi_Host *host;
+ struct scsi_device *sdev1;
+ u16 pd_index = 0;
+ u16 ld_index = 0;
+ int i, j, doscan = 0;
+ u32 seq_num, wait_time = MEGASAS_RESET_WAIT_TIME;
+ int error;
+
+ if (!instance) {
+ printk(KERN_ERR "invalid instance!\n");
+ kfree(ev);
+ return;
+ }
+
+ /* Adjust event workqueue thread wait time for VF mode */
+ if (instance->requestorId)
+ wait_time = MEGASAS_ROUTINE_WAIT_TIME_VF;
+
+ /* Don't run the event workqueue thread if OCR is running */
+ for (i = 0; i < wait_time; i++) {
+ if (instance->adprecovery == MEGASAS_HBA_OPERATIONAL)
+ break;
+ if (!(i % MEGASAS_RESET_NOTICE_INTERVAL)) {
+ printk(KERN_NOTICE "megasas: %s waiting for "
+ "controller reset to finish for scsi%d\n",
+ __func__, instance->host->host_no);
+ }
+ msleep(1000);
+ }
+
+ instance->ev = NULL;
+ host = instance->host;
+ if (instance->evt_detail) {
+
+ switch (le32_to_cpu(instance->evt_detail->code)) {
+ case MR_EVT_PD_INSERTED:
+ if (megasas_get_pd_list(instance) == 0) {
+ for (i = 0; i < MEGASAS_MAX_PD_CHANNELS; i++) {
+ for (j = 0;
+ j < MEGASAS_MAX_DEV_PER_CHANNEL;
+ j++) {
+
+ pd_index =
+ (i * MEGASAS_MAX_DEV_PER_CHANNEL) + j;
+
+ sdev1 =
+ scsi_device_lookup(host, i, j, 0);
+
+ if (instance->pd_list[pd_index].driveState
+ == MR_PD_STATE_SYSTEM) {
+ if (!sdev1) {
+ scsi_add_device(host, i, j, 0);
+ }
+
+ if (sdev1)
+ scsi_device_put(sdev1);
+ }
+ }
+ }
+ }
+ doscan = 0;
+ break;
+
+ case MR_EVT_PD_REMOVED:
+ if (megasas_get_pd_list(instance) == 0) {
+ for (i = 0; i < MEGASAS_MAX_PD_CHANNELS; i++) {
+ for (j = 0;
+ j < MEGASAS_MAX_DEV_PER_CHANNEL;
+ j++) {
+
+ pd_index =
+ (i * MEGASAS_MAX_DEV_PER_CHANNEL) + j;
+
+ sdev1 =
+ scsi_device_lookup(host, i, j, 0);
+
+ if (instance->pd_list[pd_index].driveState
+ == MR_PD_STATE_SYSTEM) {
+ if (sdev1) {
+ scsi_device_put(sdev1);
+ }
+ } else {
+ if (sdev1) {
+ scsi_remove_device(sdev1);
+ scsi_device_put(sdev1);
+ }
+ }
+ }
+ }
+ }
+ doscan = 0;
+ break;
+
+ case MR_EVT_LD_OFFLINE:
+ case MR_EVT_CFG_CLEARED:
+ case MR_EVT_LD_DELETED:
+ if (!instance->requestorId ||
+ (instance->requestorId &&
+ megasas_get_ld_vf_affiliation(instance, 0))) {
+ if (megasas_ld_list_query(instance,
+ MR_LD_QUERY_TYPE_EXPOSED_TO_HOST))
+ megasas_get_ld_list(instance);
+ for (i = 0; i < MEGASAS_MAX_LD_CHANNELS; i++) {
+ for (j = 0;
+ j < MEGASAS_MAX_DEV_PER_CHANNEL;
+ j++) {
+
+ ld_index =
+ (i * MEGASAS_MAX_DEV_PER_CHANNEL) + j;
+
+ sdev1 = scsi_device_lookup(host, MEGASAS_MAX_PD_CHANNELS + i, j, 0);
+
+ if (instance->ld_ids[ld_index]
+ != 0xff) {
+ if (sdev1)
+ scsi_device_put(sdev1);
+ } else {
+ if (sdev1) {
+ scsi_remove_device(sdev1);
+ scsi_device_put(sdev1);
+ }
+ }
+ }
+ }
+ doscan = 0;
+ }
+ break;
+ case MR_EVT_LD_CREATED:
+ if (!instance->requestorId ||
+ (instance->requestorId &&
+ megasas_get_ld_vf_affiliation(instance, 0))) {
+ if (megasas_ld_list_query(instance,
+ MR_LD_QUERY_TYPE_EXPOSED_TO_HOST))
+ megasas_get_ld_list(instance);
+ for (i = 0; i < MEGASAS_MAX_LD_CHANNELS; i++) {
+ for (j = 0;
+ j < MEGASAS_MAX_DEV_PER_CHANNEL;
+ j++) {
+ ld_index =
+ (i * MEGASAS_MAX_DEV_PER_CHANNEL) + j;
+
+ sdev1 = scsi_device_lookup(host, MEGASAS_MAX_PD_CHANNELS + i, j, 0);
+
+ if (instance->ld_ids[ld_index]
+ != 0xff) {
+ if (!sdev1)
+ scsi_add_device(host, MEGASAS_MAX_PD_CHANNELS + i, j, 0);
+ }
+ if (sdev1)
+ scsi_device_put(sdev1);
+ }
+ }
+ doscan = 0;
+ }
+ break;
+ case MR_EVT_CTRL_HOST_BUS_SCAN_REQUESTED:
+ case MR_EVT_FOREIGN_CFG_IMPORTED:
+ case MR_EVT_LD_STATE_CHANGE:
+ doscan = 1;
+ break;
+ default:
+ doscan = 0;
+ break;
+ }
+ } else {
+ printk(KERN_ERR "invalid evt_detail!\n");
+ kfree(ev);
+ return;
+ }
+
+ if (doscan) {
+ printk(KERN_INFO "megaraid_sas: scanning for scsi%d...\n",
+ instance->host->host_no);
+ if (megasas_get_pd_list(instance) == 0) {
+ for (i = 0; i < MEGASAS_MAX_PD_CHANNELS; i++) {
+ for (j = 0; j < MEGASAS_MAX_DEV_PER_CHANNEL; j++) {
+ pd_index = i*MEGASAS_MAX_DEV_PER_CHANNEL + j;
+ sdev1 = scsi_device_lookup(host, i, j, 0);
+ if (instance->pd_list[pd_index].driveState ==
+ MR_PD_STATE_SYSTEM) {
+ if (!sdev1) {
+ scsi_add_device(host, i, j, 0);
+ }
+ if (sdev1)
+ scsi_device_put(sdev1);
+ } else {
+ if (sdev1) {
+ scsi_remove_device(sdev1);
+ scsi_device_put(sdev1);
+ }
+ }
+ }
+ }
+ }
+
+ if (!instance->requestorId ||
+ (instance->requestorId &&
+ megasas_get_ld_vf_affiliation(instance, 0))) {
+ if (megasas_ld_list_query(instance,
+ MR_LD_QUERY_TYPE_EXPOSED_TO_HOST))
+ megasas_get_ld_list(instance);
+ for (i = 0; i < MEGASAS_MAX_LD_CHANNELS; i++) {
+ for (j = 0; j < MEGASAS_MAX_DEV_PER_CHANNEL;
+ j++) {
+ ld_index =
+ (i * MEGASAS_MAX_DEV_PER_CHANNEL) + j;
+
+ sdev1 = scsi_device_lookup(host,
+ MEGASAS_MAX_PD_CHANNELS + i, j, 0);
+ if (instance->ld_ids[ld_index]
+ != 0xff) {
+ if (!sdev1)
+ scsi_add_device(host, MEGASAS_MAX_PD_CHANNELS + i, j, 0);
+ else
+ scsi_device_put(sdev1);
+ } else {
+ if (sdev1) {
+ scsi_remove_device(sdev1);
+ scsi_device_put(sdev1);
+ }
+ }
+ }
+ }
+ }
+ }
+
+ if ( instance->aen_cmd != NULL ) {
+ kfree(ev);
+ return ;
+ }
+
+ seq_num = le32_to_cpu(instance->evt_detail->seq_num) + 1;
+
+ /* Register AEN with FW for latest sequence number plus 1 */
+ class_locale.members.reserved = 0;
+ class_locale.members.locale = MR_EVT_LOCALE_ALL;
+ class_locale.members.class = MR_EVT_CLASS_DEBUG;
+ mutex_lock(&instance->aen_mutex);
+ error = megasas_register_aen(instance, seq_num,
+ class_locale.word);
+ mutex_unlock(&instance->aen_mutex);
+
+ if (error)
+ printk(KERN_ERR "register aen failed error %x\n", error);
+
+ kfree(ev);
+}
+
+/**
+ * megasas_init - Driver load entry point
+ */
+static int __init megasas_init(void)
+{
+ int rval;
+
+ /*
+ * Announce driver version and other information
+ */
+ pr_info("megasas: %s\n", MEGASAS_VERSION);
+
+ spin_lock_init(&poll_aen_lock);
+
+ support_poll_for_event = 2;
+ support_device_change = 1;
+
+ memset(&megasas_mgmt_info, 0, sizeof(megasas_mgmt_info));
+
+ /*
+ * Register character device node
+ */
+ rval = register_chrdev(0, "megaraid_sas_ioctl", &megasas_mgmt_fops);
+
+ if (rval < 0) {
+ printk(KERN_DEBUG "megasas: failed to open device node\n");
+ return rval;
+ }
+
+ megasas_mgmt_majorno = rval;
+
+ /*
+ * Register ourselves as PCI hotplug module
+ */
+ rval = pci_register_driver(&megasas_pci_driver);
+
+ if (rval) {
+ printk(KERN_DEBUG "megasas: PCI hotplug registration failed \n");
+ goto err_pcidrv;
+ }
+
+ rval = driver_create_file(&megasas_pci_driver.driver,
+ &driver_attr_version);
+ if (rval)
+ goto err_dcf_attr_ver;
+
+ rval = driver_create_file(&megasas_pci_driver.driver,
+ &driver_attr_support_poll_for_event);
+ if (rval)
+ goto err_dcf_support_poll_for_event;
+
+ rval = driver_create_file(&megasas_pci_driver.driver,
+ &driver_attr_dbg_lvl);
+ if (rval)
+ goto err_dcf_dbg_lvl;
+ rval = driver_create_file(&megasas_pci_driver.driver,
+ &driver_attr_support_device_change);
+ if (rval)
+ goto err_dcf_support_device_change;
+
+ return rval;
+
+err_dcf_support_device_change:
+ driver_remove_file(&megasas_pci_driver.driver,
+ &driver_attr_dbg_lvl);
+err_dcf_dbg_lvl:
+ driver_remove_file(&megasas_pci_driver.driver,
+ &driver_attr_support_poll_for_event);
+err_dcf_support_poll_for_event:
+ driver_remove_file(&megasas_pci_driver.driver, &driver_attr_version);
+err_dcf_attr_ver:
+ pci_unregister_driver(&megasas_pci_driver);
+err_pcidrv:
+ unregister_chrdev(megasas_mgmt_majorno, "megaraid_sas_ioctl");
+ return rval;
+}
+
+/**
+ * megasas_exit - Driver unload entry point
+ */
+static void __exit megasas_exit(void)
+{
+ driver_remove_file(&megasas_pci_driver.driver,
+ &driver_attr_dbg_lvl);
+ driver_remove_file(&megasas_pci_driver.driver,
+ &driver_attr_support_poll_for_event);
+ driver_remove_file(&megasas_pci_driver.driver,
+ &driver_attr_support_device_change);
+ driver_remove_file(&megasas_pci_driver.driver, &driver_attr_version);
+
+ pci_unregister_driver(&megasas_pci_driver);
+ unregister_chrdev(megasas_mgmt_majorno, "megaraid_sas_ioctl");
+}
+
+module_init(megasas_init);
+module_exit(megasas_exit);
diff --git a/drivers/scsi/megaraid/megaraid_sas_fp.c b/drivers/scsi/megaraid/megaraid_sas_fp.c
new file mode 100644
index 000000000..4f7228786
--- /dev/null
+++ b/drivers/scsi/megaraid/megaraid_sas_fp.c
@@ -0,0 +1,1360 @@
+/*
+ * Linux MegaRAID driver for SAS based RAID controllers
+ *
+ * Copyright (c) 2009-2013 LSI Corporation
+ * Copyright (c) 2013-2014 Avago Technologies
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ *
+ * FILE: megaraid_sas_fp.c
+ *
+ * Authors: Avago Technologies
+ * Sumant Patro
+ * Varad Talamacki
+ * Manoj Jose
+ * Kashyap Desai <kashyap.desai@avagotech.com>
+ * Sumit Saxena <sumit.saxena@avagotech.com>
+ *
+ * Send feedback to: megaraidlinux.pdl@avagotech.com
+ *
+ * Mail to: Avago Technologies, 350 West Trimble Road, Building 90,
+ * San Jose, California 95131
+ */
+
+#include <linux/kernel.h>
+#include <linux/types.h>
+#include <linux/pci.h>
+#include <linux/list.h>
+#include <linux/moduleparam.h>
+#include <linux/module.h>
+#include <linux/spinlock.h>
+#include <linux/interrupt.h>
+#include <linux/delay.h>
+#include <linux/uio.h>
+#include <linux/uaccess.h>
+#include <linux/fs.h>
+#include <linux/compat.h>
+#include <linux/blkdev.h>
+#include <linux/poll.h>
+
+#include <scsi/scsi.h>
+#include <scsi/scsi_cmnd.h>
+#include <scsi/scsi_device.h>
+#include <scsi/scsi_host.h>
+
+#include "megaraid_sas_fusion.h"
+#include "megaraid_sas.h"
+#include <asm/div64.h>
+
+#define LB_PENDING_CMDS_DEFAULT 4
+static unsigned int lb_pending_cmds = LB_PENDING_CMDS_DEFAULT;
+module_param(lb_pending_cmds, int, S_IRUGO);
+MODULE_PARM_DESC(lb_pending_cmds, "Change raid-1 load balancing outstanding "
+ "threshold. Valid Values are 1-128. Default: 4");
+
+
+#define ABS_DIFF(a, b) (((a) > (b)) ? ((a) - (b)) : ((b) - (a)))
+#define MR_LD_STATE_OPTIMAL 3
+#define FALSE 0
+#define TRUE 1
+
+#define SPAN_DEBUG 0
+#define SPAN_ROW_SIZE(map, ld, index_) (MR_LdSpanPtrGet(ld, index_, map)->spanRowSize)
+#define SPAN_ROW_DATA_SIZE(map_, ld, index_) (MR_LdSpanPtrGet(ld, index_, map)->spanRowDataSize)
+#define SPAN_INVALID 0xff
+
+/* Prototypes */
+static void mr_update_span_set(struct MR_DRV_RAID_MAP_ALL *map,
+ PLD_SPAN_INFO ldSpanInfo);
+static u8 mr_spanset_get_phy_params(struct megasas_instance *instance, u32 ld,
+ u64 stripRow, u16 stripRef, struct IO_REQUEST_INFO *io_info,
+ struct RAID_CONTEXT *pRAID_Context, struct MR_DRV_RAID_MAP_ALL *map);
+static u64 get_row_from_strip(struct megasas_instance *instance, u32 ld,
+ u64 strip, struct MR_DRV_RAID_MAP_ALL *map);
+
+u32 mega_mod64(u64 dividend, u32 divisor)
+{
+ u64 d;
+ u32 remainder;
+
+ if (!divisor)
+ printk(KERN_ERR "megasas : DIVISOR is zero, in div fn\n");
+ d = dividend;
+ remainder = do_div(d, divisor);
+ return remainder;
+}
+
+/**
+ * @param dividend : Dividend
+ * @param divisor : Divisor
+ *
+ * @return quotient
+ **/
+u64 mega_div64_32(uint64_t dividend, uint32_t divisor)
+{
+ u32 remainder;
+ u64 d;
+
+ if (!divisor)
+ printk(KERN_ERR "megasas : DIVISOR is zero in mod fn\n");
+
+ d = dividend;
+ remainder = do_div(d, divisor);
+
+ return d;
+}
+
+struct MR_LD_RAID *MR_LdRaidGet(u32 ld, struct MR_DRV_RAID_MAP_ALL *map)
+{
+ return &map->raidMap.ldSpanMap[ld].ldRaid;
+}
+
+static struct MR_SPAN_BLOCK_INFO *MR_LdSpanInfoGet(u32 ld,
+ struct MR_DRV_RAID_MAP_ALL
+ *map)
+{
+ return &map->raidMap.ldSpanMap[ld].spanBlock[0];
+}
+
+static u8 MR_LdDataArmGet(u32 ld, u32 armIdx, struct MR_DRV_RAID_MAP_ALL *map)
+{
+ return map->raidMap.ldSpanMap[ld].dataArmMap[armIdx];
+}
+
+u16 MR_ArPdGet(u32 ar, u32 arm, struct MR_DRV_RAID_MAP_ALL *map)
+{
+ return le16_to_cpu(map->raidMap.arMapInfo[ar].pd[arm]);
+}
+
+u16 MR_LdSpanArrayGet(u32 ld, u32 span, struct MR_DRV_RAID_MAP_ALL *map)
+{
+ return le16_to_cpu(map->raidMap.ldSpanMap[ld].spanBlock[span].span.arrayRef);
+}
+
+u16 MR_PdDevHandleGet(u32 pd, struct MR_DRV_RAID_MAP_ALL *map)
+{
+ return map->raidMap.devHndlInfo[pd].curDevHdl;
+}
+
+u16 MR_GetLDTgtId(u32 ld, struct MR_DRV_RAID_MAP_ALL *map)
+{
+ return le16_to_cpu(map->raidMap.ldSpanMap[ld].ldRaid.targetId);
+}
+
+u8 MR_TargetIdToLdGet(u32 ldTgtId, struct MR_DRV_RAID_MAP_ALL *map)
+{
+ return map->raidMap.ldTgtIdToLd[ldTgtId];
+}
+
+static struct MR_LD_SPAN *MR_LdSpanPtrGet(u32 ld, u32 span,
+ struct MR_DRV_RAID_MAP_ALL *map)
+{
+ return &map->raidMap.ldSpanMap[ld].spanBlock[span].span;
+}
+
+/*
+ * This function will Populate Driver Map using firmware raid map
+ */
+void MR_PopulateDrvRaidMap(struct megasas_instance *instance)
+{
+ struct fusion_context *fusion = instance->ctrl_context;
+ struct MR_FW_RAID_MAP_ALL *fw_map_old = NULL;
+ struct MR_FW_RAID_MAP *pFwRaidMap = NULL;
+ int i;
+ u16 ld_count;
+
+
+ struct MR_DRV_RAID_MAP_ALL *drv_map =
+ fusion->ld_drv_map[(instance->map_id & 1)];
+ struct MR_DRV_RAID_MAP *pDrvRaidMap = &drv_map->raidMap;
+
+ if (instance->supportmax256vd) {
+ memcpy(fusion->ld_drv_map[instance->map_id & 1],
+ fusion->ld_map[instance->map_id & 1],
+ fusion->current_map_sz);
+ /* New Raid map will not set totalSize, so keep expected value
+ * for legacy code in ValidateMapInfo
+ */
+ pDrvRaidMap->totalSize =
+ cpu_to_le32(sizeof(struct MR_FW_RAID_MAP_EXT));
+ } else {
+ fw_map_old = (struct MR_FW_RAID_MAP_ALL *)
+ fusion->ld_map[(instance->map_id & 1)];
+ pFwRaidMap = &fw_map_old->raidMap;
+ ld_count = (u16)le32_to_cpu(pFwRaidMap->ldCount);
+
+#if VD_EXT_DEBUG
+ for (i = 0; i < ld_count; i++) {
+ dev_dbg(&instance->pdev->dev, "(%d) :Index 0x%x "
+ "Target Id 0x%x Seq Num 0x%x Size 0/%llx\n",
+ instance->unique_id, i,
+ fw_map_old->raidMap.ldSpanMap[i].ldRaid.targetId,
+ fw_map_old->raidMap.ldSpanMap[i].ldRaid.seqNum,
+ fw_map_old->raidMap.ldSpanMap[i].ldRaid.size);
+ }
+#endif
+
+ memset(drv_map, 0, fusion->drv_map_sz);
+ pDrvRaidMap->totalSize = pFwRaidMap->totalSize;
+ pDrvRaidMap->ldCount = (__le16)cpu_to_le16(ld_count);
+ pDrvRaidMap->fpPdIoTimeoutSec = pFwRaidMap->fpPdIoTimeoutSec;
+ for (i = 0; i < MAX_RAIDMAP_LOGICAL_DRIVES + MAX_RAIDMAP_VIEWS; i++)
+ pDrvRaidMap->ldTgtIdToLd[i] =
+ (u8)pFwRaidMap->ldTgtIdToLd[i];
+ for (i = (MAX_RAIDMAP_LOGICAL_DRIVES + MAX_RAIDMAP_VIEWS);
+ i < MAX_LOGICAL_DRIVES_EXT; i++)
+ pDrvRaidMap->ldTgtIdToLd[i] = 0xff;
+ for (i = 0; i < ld_count; i++) {
+ pDrvRaidMap->ldSpanMap[i] = pFwRaidMap->ldSpanMap[i];
+#if VD_EXT_DEBUG
+ dev_dbg(&instance->pdev->dev,
+ "pFwRaidMap->ldSpanMap[%d].ldRaid.targetId 0x%x "
+ "pFwRaidMap->ldSpanMap[%d].ldRaid.seqNum 0x%x "
+ "size 0x%x\n", i, i,
+ pFwRaidMap->ldSpanMap[i].ldRaid.targetId,
+ pFwRaidMap->ldSpanMap[i].ldRaid.seqNum,
+ (u32)pFwRaidMap->ldSpanMap[i].ldRaid.rowSize);
+ dev_dbg(&instance->pdev->dev,
+ "pDrvRaidMap->ldSpanMap[%d].ldRaid.targetId 0x%x "
+ "pDrvRaidMap->ldSpanMap[%d].ldRaid.seqNum 0x%x "
+ "size 0x%x\n", i, i,
+ pDrvRaidMap->ldSpanMap[i].ldRaid.targetId,
+ pDrvRaidMap->ldSpanMap[i].ldRaid.seqNum,
+ (u32)pDrvRaidMap->ldSpanMap[i].ldRaid.rowSize);
+ dev_dbg(&instance->pdev->dev, "Driver raid map all %p "
+ "raid map %p LD RAID MAP %p/%p\n", drv_map,
+ pDrvRaidMap, &pFwRaidMap->ldSpanMap[i].ldRaid,
+ &pDrvRaidMap->ldSpanMap[i].ldRaid);
+#endif
+ }
+ memcpy(pDrvRaidMap->arMapInfo, pFwRaidMap->arMapInfo,
+ sizeof(struct MR_ARRAY_INFO) * MAX_RAIDMAP_ARRAYS);
+ memcpy(pDrvRaidMap->devHndlInfo, pFwRaidMap->devHndlInfo,
+ sizeof(struct MR_DEV_HANDLE_INFO) *
+ MAX_RAIDMAP_PHYSICAL_DEVICES);
+ }
+}
+
+/*
+ * This function will validate Map info data provided by FW
+ */
+u8 MR_ValidateMapInfo(struct megasas_instance *instance)
+{
+ struct fusion_context *fusion;
+ struct MR_DRV_RAID_MAP_ALL *drv_map;
+ struct MR_DRV_RAID_MAP *pDrvRaidMap;
+ struct LD_LOAD_BALANCE_INFO *lbInfo;
+ PLD_SPAN_INFO ldSpanInfo;
+ struct MR_LD_RAID *raid;
+ u16 ldCount, num_lds;
+ u16 ld;
+ u32 expected_size;
+
+
+ MR_PopulateDrvRaidMap(instance);
+
+ fusion = instance->ctrl_context;
+ drv_map = fusion->ld_drv_map[(instance->map_id & 1)];
+ pDrvRaidMap = &drv_map->raidMap;
+
+ lbInfo = fusion->load_balance_info;
+ ldSpanInfo = fusion->log_to_span;
+
+ if (instance->supportmax256vd)
+ expected_size = sizeof(struct MR_FW_RAID_MAP_EXT);
+ else
+ expected_size =
+ (sizeof(struct MR_FW_RAID_MAP) - sizeof(struct MR_LD_SPAN_MAP) +
+ (sizeof(struct MR_LD_SPAN_MAP) * le16_to_cpu(pDrvRaidMap->ldCount)));
+
+ if (le32_to_cpu(pDrvRaidMap->totalSize) != expected_size) {
+ dev_err(&instance->pdev->dev, "map info structure size 0x%x is not matching with ld count\n",
+ (unsigned int) expected_size);
+ dev_err(&instance->pdev->dev, "megasas: span map %x, pDrvRaidMap->totalSize : %x\n",
+ (unsigned int)sizeof(struct MR_LD_SPAN_MAP),
+ le32_to_cpu(pDrvRaidMap->totalSize));
+ return 0;
+ }
+
+ if (instance->UnevenSpanSupport)
+ mr_update_span_set(drv_map, ldSpanInfo);
+
+ mr_update_load_balance_params(drv_map, lbInfo);
+
+ num_lds = le16_to_cpu(drv_map->raidMap.ldCount);
+
+ /*Convert Raid capability values to CPU arch */
+ for (ldCount = 0; ldCount < num_lds; ldCount++) {
+ ld = MR_TargetIdToLdGet(ldCount, drv_map);
+ raid = MR_LdRaidGet(ld, drv_map);
+ le32_to_cpus((u32 *)&raid->capability);
+ }
+
+ return 1;
+}
+
+u32 MR_GetSpanBlock(u32 ld, u64 row, u64 *span_blk,
+ struct MR_DRV_RAID_MAP_ALL *map)
+{
+ struct MR_SPAN_BLOCK_INFO *pSpanBlock = MR_LdSpanInfoGet(ld, map);
+ struct MR_QUAD_ELEMENT *quad;
+ struct MR_LD_RAID *raid = MR_LdRaidGet(ld, map);
+ u32 span, j;
+
+ for (span = 0; span < raid->spanDepth; span++, pSpanBlock++) {
+
+ for (j = 0; j < le32_to_cpu(pSpanBlock->block_span_info.noElements); j++) {
+ quad = &pSpanBlock->block_span_info.quad[j];
+
+ if (le32_to_cpu(quad->diff) == 0)
+ return SPAN_INVALID;
+ if (le64_to_cpu(quad->logStart) <= row && row <=
+ le64_to_cpu(quad->logEnd) && (mega_mod64(row - le64_to_cpu(quad->logStart),
+ le32_to_cpu(quad->diff))) == 0) {
+ if (span_blk != NULL) {
+ u64 blk, debugBlk;
+ blk = mega_div64_32((row-le64_to_cpu(quad->logStart)), le32_to_cpu(quad->diff));
+ debugBlk = blk;
+
+ blk = (blk + le64_to_cpu(quad->offsetInSpan)) << raid->stripeShift;
+ *span_blk = blk;
+ }
+ return span;
+ }
+ }
+ }
+ return SPAN_INVALID;
+}
+
+/*
+******************************************************************************
+*
+* Function to print info about span set created in driver from FW raid map
+*
+* Inputs :
+* map - LD map
+* ldSpanInfo - ldSpanInfo per HBA instance
+*/
+#if SPAN_DEBUG
+static int getSpanInfo(struct MR_DRV_RAID_MAP_ALL *map,
+ PLD_SPAN_INFO ldSpanInfo)
+{
+
+ u8 span;
+ u32 element;
+ struct MR_LD_RAID *raid;
+ LD_SPAN_SET *span_set;
+ struct MR_QUAD_ELEMENT *quad;
+ int ldCount;
+ u16 ld;
+
+ for (ldCount = 0; ldCount < MAX_LOGICAL_DRIVES_EXT; ldCount++) {
+ ld = MR_TargetIdToLdGet(ldCount, map);
+ if (ld >= (MAX_LOGICAL_DRIVES_EXT - 1))
+ continue;
+ raid = MR_LdRaidGet(ld, map);
+ dev_dbg(&instance->pdev->dev, "LD %x: span_depth=%x\n",
+ ld, raid->spanDepth);
+ for (span = 0; span < raid->spanDepth; span++)
+ dev_dbg(&instance->pdev->dev, "Span=%x,"
+ " number of quads=%x\n", span,
+ le32_to_cpu(map->raidMap.ldSpanMap[ld].spanBlock[span].
+ block_span_info.noElements));
+ for (element = 0; element < MAX_QUAD_DEPTH; element++) {
+ span_set = &(ldSpanInfo[ld].span_set[element]);
+ if (span_set->span_row_data_width == 0)
+ break;
+
+ dev_dbg(&instance->pdev->dev, "Span Set %x:"
+ "width=%x, diff=%x\n", element,
+ (unsigned int)span_set->span_row_data_width,
+ (unsigned int)span_set->diff);
+ dev_dbg(&instance->pdev->dev, "logical LBA"
+ "start=0x%08lx, end=0x%08lx\n",
+ (long unsigned int)span_set->log_start_lba,
+ (long unsigned int)span_set->log_end_lba);
+ dev_dbg(&instance->pdev->dev, "span row start=0x%08lx,"
+ " end=0x%08lx\n",
+ (long unsigned int)span_set->span_row_start,
+ (long unsigned int)span_set->span_row_end);
+ dev_dbg(&instance->pdev->dev, "data row start=0x%08lx,"
+ " end=0x%08lx\n",
+ (long unsigned int)span_set->data_row_start,
+ (long unsigned int)span_set->data_row_end);
+ dev_dbg(&instance->pdev->dev, "data strip start=0x%08lx,"
+ " end=0x%08lx\n",
+ (long unsigned int)span_set->data_strip_start,
+ (long unsigned int)span_set->data_strip_end);
+
+ for (span = 0; span < raid->spanDepth; span++) {
+ if (le32_to_cpu(map->raidMap.ldSpanMap[ld].spanBlock[span].
+ block_span_info.noElements) >=
+ element + 1) {
+ quad = &map->raidMap.ldSpanMap[ld].
+ spanBlock[span].block_span_info.
+ quad[element];
+ dev_dbg(&instance->pdev->dev, "Span=%x,"
+ "Quad=%x, diff=%x\n", span,
+ element, le32_to_cpu(quad->diff));
+ dev_dbg(&instance->pdev->dev,
+ "offset_in_span=0x%08lx\n",
+ (long unsigned int)le64_to_cpu(quad->offsetInSpan));
+ dev_dbg(&instance->pdev->dev,
+ "logical start=0x%08lx, end=0x%08lx\n",
+ (long unsigned int)le64_to_cpu(quad->logStart),
+ (long unsigned int)le64_to_cpu(quad->logEnd));
+ }
+ }
+ }
+ }
+ return 0;
+}
+#endif
+
+/*
+******************************************************************************
+*
+* This routine calculates the Span block for given row using spanset.
+*
+* Inputs :
+* instance - HBA instance
+* ld - Logical drive number
+* row - Row number
+* map - LD map
+*
+* Outputs :
+*
+* span - Span number
+* block - Absolute Block number in the physical disk
+* div_error - Devide error code.
+*/
+
+u32 mr_spanset_get_span_block(struct megasas_instance *instance,
+ u32 ld, u64 row, u64 *span_blk, struct MR_DRV_RAID_MAP_ALL *map)
+{
+ struct fusion_context *fusion = instance->ctrl_context;
+ struct MR_LD_RAID *raid = MR_LdRaidGet(ld, map);
+ LD_SPAN_SET *span_set;
+ struct MR_QUAD_ELEMENT *quad;
+ u32 span, info;
+ PLD_SPAN_INFO ldSpanInfo = fusion->log_to_span;
+
+ for (info = 0; info < MAX_QUAD_DEPTH; info++) {
+ span_set = &(ldSpanInfo[ld].span_set[info]);
+
+ if (span_set->span_row_data_width == 0)
+ break;
+
+ if (row > span_set->data_row_end)
+ continue;
+
+ for (span = 0; span < raid->spanDepth; span++)
+ if (le32_to_cpu(map->raidMap.ldSpanMap[ld].spanBlock[span].
+ block_span_info.noElements) >= info+1) {
+ quad = &map->raidMap.ldSpanMap[ld].
+ spanBlock[span].
+ block_span_info.quad[info];
+ if (le32_to_cpu(quad->diff) == 0)
+ return SPAN_INVALID;
+ if (le64_to_cpu(quad->logStart) <= row &&
+ row <= le64_to_cpu(quad->logEnd) &&
+ (mega_mod64(row - le64_to_cpu(quad->logStart),
+ le32_to_cpu(quad->diff))) == 0) {
+ if (span_blk != NULL) {
+ u64 blk;
+ blk = mega_div64_32
+ ((row - le64_to_cpu(quad->logStart)),
+ le32_to_cpu(quad->diff));
+ blk = (blk + le64_to_cpu(quad->offsetInSpan))
+ << raid->stripeShift;
+ *span_blk = blk;
+ }
+ return span;
+ }
+ }
+ }
+ return SPAN_INVALID;
+}
+
+/*
+******************************************************************************
+*
+* This routine calculates the row for given strip using spanset.
+*
+* Inputs :
+* instance - HBA instance
+* ld - Logical drive number
+* Strip - Strip
+* map - LD map
+*
+* Outputs :
+*
+* row - row associated with strip
+*/
+
+static u64 get_row_from_strip(struct megasas_instance *instance,
+ u32 ld, u64 strip, struct MR_DRV_RAID_MAP_ALL *map)
+{
+ struct fusion_context *fusion = instance->ctrl_context;
+ struct MR_LD_RAID *raid = MR_LdRaidGet(ld, map);
+ LD_SPAN_SET *span_set;
+ PLD_SPAN_INFO ldSpanInfo = fusion->log_to_span;
+ u32 info, strip_offset, span, span_offset;
+ u64 span_set_Strip, span_set_Row, retval;
+
+ for (info = 0; info < MAX_QUAD_DEPTH; info++) {
+ span_set = &(ldSpanInfo[ld].span_set[info]);
+
+ if (span_set->span_row_data_width == 0)
+ break;
+ if (strip > span_set->data_strip_end)
+ continue;
+
+ span_set_Strip = strip - span_set->data_strip_start;
+ strip_offset = mega_mod64(span_set_Strip,
+ span_set->span_row_data_width);
+ span_set_Row = mega_div64_32(span_set_Strip,
+ span_set->span_row_data_width) * span_set->diff;
+ for (span = 0, span_offset = 0; span < raid->spanDepth; span++)
+ if (le32_to_cpu(map->raidMap.ldSpanMap[ld].spanBlock[span].
+ block_span_info.noElements) >= info+1) {
+ if (strip_offset >=
+ span_set->strip_offset[span])
+ span_offset++;
+ else
+ break;
+ }
+#if SPAN_DEBUG
+ dev_info(&instance->pdev->dev, "Strip 0x%llx,"
+ "span_set_Strip 0x%llx, span_set_Row 0x%llx"
+ "data width 0x%llx span offset 0x%x\n", strip,
+ (unsigned long long)span_set_Strip,
+ (unsigned long long)span_set_Row,
+ (unsigned long long)span_set->span_row_data_width,
+ span_offset);
+ dev_info(&instance->pdev->dev, "For strip 0x%llx"
+ "row is 0x%llx\n", strip,
+ (unsigned long long) span_set->data_row_start +
+ (unsigned long long) span_set_Row + (span_offset - 1));
+#endif
+ retval = (span_set->data_row_start + span_set_Row +
+ (span_offset - 1));
+ return retval;
+ }
+ return -1LLU;
+}
+
+
+/*
+******************************************************************************
+*
+* This routine calculates the Start Strip for given row using spanset.
+*
+* Inputs :
+* instance - HBA instance
+* ld - Logical drive number
+* row - Row number
+* map - LD map
+*
+* Outputs :
+*
+* Strip - Start strip associated with row
+*/
+
+static u64 get_strip_from_row(struct megasas_instance *instance,
+ u32 ld, u64 row, struct MR_DRV_RAID_MAP_ALL *map)
+{
+ struct fusion_context *fusion = instance->ctrl_context;
+ struct MR_LD_RAID *raid = MR_LdRaidGet(ld, map);
+ LD_SPAN_SET *span_set;
+ struct MR_QUAD_ELEMENT *quad;
+ PLD_SPAN_INFO ldSpanInfo = fusion->log_to_span;
+ u32 span, info;
+ u64 strip;
+
+ for (info = 0; info < MAX_QUAD_DEPTH; info++) {
+ span_set = &(ldSpanInfo[ld].span_set[info]);
+
+ if (span_set->span_row_data_width == 0)
+ break;
+ if (row > span_set->data_row_end)
+ continue;
+
+ for (span = 0; span < raid->spanDepth; span++)
+ if (le32_to_cpu(map->raidMap.ldSpanMap[ld].spanBlock[span].
+ block_span_info.noElements) >= info+1) {
+ quad = &map->raidMap.ldSpanMap[ld].
+ spanBlock[span].block_span_info.quad[info];
+ if (le64_to_cpu(quad->logStart) <= row &&
+ row <= le64_to_cpu(quad->logEnd) &&
+ mega_mod64((row - le64_to_cpu(quad->logStart)),
+ le32_to_cpu(quad->diff)) == 0) {
+ strip = mega_div64_32
+ (((row - span_set->data_row_start)
+ - le64_to_cpu(quad->logStart)),
+ le32_to_cpu(quad->diff));
+ strip *= span_set->span_row_data_width;
+ strip += span_set->data_strip_start;
+ strip += span_set->strip_offset[span];
+ return strip;
+ }
+ }
+ }
+ dev_err(&instance->pdev->dev, "get_strip_from_row"
+ "returns invalid strip for ld=%x, row=%lx\n",
+ ld, (long unsigned int)row);
+ return -1;
+}
+
+/*
+******************************************************************************
+*
+* This routine calculates the Physical Arm for given strip using spanset.
+*
+* Inputs :
+* instance - HBA instance
+* ld - Logical drive number
+* strip - Strip
+* map - LD map
+*
+* Outputs :
+*
+* Phys Arm - Phys Arm associated with strip
+*/
+
+static u32 get_arm_from_strip(struct megasas_instance *instance,
+ u32 ld, u64 strip, struct MR_DRV_RAID_MAP_ALL *map)
+{
+ struct fusion_context *fusion = instance->ctrl_context;
+ struct MR_LD_RAID *raid = MR_LdRaidGet(ld, map);
+ LD_SPAN_SET *span_set;
+ PLD_SPAN_INFO ldSpanInfo = fusion->log_to_span;
+ u32 info, strip_offset, span, span_offset, retval;
+
+ for (info = 0 ; info < MAX_QUAD_DEPTH; info++) {
+ span_set = &(ldSpanInfo[ld].span_set[info]);
+
+ if (span_set->span_row_data_width == 0)
+ break;
+ if (strip > span_set->data_strip_end)
+ continue;
+
+ strip_offset = (uint)mega_mod64
+ ((strip - span_set->data_strip_start),
+ span_set->span_row_data_width);
+
+ for (span = 0, span_offset = 0; span < raid->spanDepth; span++)
+ if (le32_to_cpu(map->raidMap.ldSpanMap[ld].spanBlock[span].
+ block_span_info.noElements) >= info+1) {
+ if (strip_offset >=
+ span_set->strip_offset[span])
+ span_offset =
+ span_set->strip_offset[span];
+ else
+ break;
+ }
+#if SPAN_DEBUG
+ dev_info(&instance->pdev->dev, "get_arm_from_strip:"
+ "for ld=0x%x strip=0x%lx arm is 0x%x\n", ld,
+ (long unsigned int)strip, (strip_offset - span_offset));
+#endif
+ retval = (strip_offset - span_offset);
+ return retval;
+ }
+
+ dev_err(&instance->pdev->dev, "get_arm_from_strip"
+ "returns invalid arm for ld=%x strip=%lx\n",
+ ld, (long unsigned int)strip);
+
+ return -1;
+}
+
+/* This Function will return Phys arm */
+u8 get_arm(struct megasas_instance *instance, u32 ld, u8 span, u64 stripe,
+ struct MR_DRV_RAID_MAP_ALL *map)
+{
+ struct MR_LD_RAID *raid = MR_LdRaidGet(ld, map);
+ /* Need to check correct default value */
+ u32 arm = 0;
+
+ switch (raid->level) {
+ case 0:
+ case 5:
+ case 6:
+ arm = mega_mod64(stripe, SPAN_ROW_SIZE(map, ld, span));
+ break;
+ case 1:
+ /* start with logical arm */
+ arm = get_arm_from_strip(instance, ld, stripe, map);
+ if (arm != -1U)
+ arm *= 2;
+ break;
+ }
+
+ return arm;
+}
+
+
+/*
+******************************************************************************
+*
+* This routine calculates the arm, span and block for the specified stripe and
+* reference in stripe using spanset
+*
+* Inputs :
+*
+* ld - Logical drive number
+* stripRow - Stripe number
+* stripRef - Reference in stripe
+*
+* Outputs :
+*
+* span - Span number
+* block - Absolute Block number in the physical disk
+*/
+static u8 mr_spanset_get_phy_params(struct megasas_instance *instance, u32 ld,
+ u64 stripRow, u16 stripRef, struct IO_REQUEST_INFO *io_info,
+ struct RAID_CONTEXT *pRAID_Context,
+ struct MR_DRV_RAID_MAP_ALL *map)
+{
+ struct MR_LD_RAID *raid = MR_LdRaidGet(ld, map);
+ u32 pd, arRef;
+ u8 physArm, span;
+ u64 row;
+ u8 retval = TRUE;
+ u8 do_invader = 0;
+ u64 *pdBlock = &io_info->pdBlock;
+ u16 *pDevHandle = &io_info->devHandle;
+ u32 logArm, rowMod, armQ, arm;
+
+ if ((instance->pdev->device == PCI_DEVICE_ID_LSI_INVADER ||
+ instance->pdev->device == PCI_DEVICE_ID_LSI_FURY))
+ do_invader = 1;
+
+ /*Get row and span from io_info for Uneven Span IO.*/
+ row = io_info->start_row;
+ span = io_info->start_span;
+
+
+ if (raid->level == 6) {
+ logArm = get_arm_from_strip(instance, ld, stripRow, map);
+ if (logArm == -1U)
+ return FALSE;
+ rowMod = mega_mod64(row, SPAN_ROW_SIZE(map, ld, span));
+ armQ = SPAN_ROW_SIZE(map, ld, span) - 1 - rowMod;
+ arm = armQ + 1 + logArm;
+ if (arm >= SPAN_ROW_SIZE(map, ld, span))
+ arm -= SPAN_ROW_SIZE(map, ld, span);
+ physArm = (u8)arm;
+ } else
+ /* Calculate the arm */
+ physArm = get_arm(instance, ld, span, stripRow, map);
+ if (physArm == 0xFF)
+ return FALSE;
+
+ arRef = MR_LdSpanArrayGet(ld, span, map);
+ pd = MR_ArPdGet(arRef, physArm, map);
+
+ if (pd != MR_PD_INVALID)
+ *pDevHandle = MR_PdDevHandleGet(pd, map);
+ else {
+ *pDevHandle = MR_PD_INVALID;
+ if ((raid->level >= 5) &&
+ (!do_invader || (do_invader &&
+ (raid->regTypeReqOnRead != REGION_TYPE_UNUSED))))
+ pRAID_Context->regLockFlags = REGION_TYPE_EXCLUSIVE;
+ else if (raid->level == 1) {
+ pd = MR_ArPdGet(arRef, physArm + 1, map);
+ if (pd != MR_PD_INVALID)
+ *pDevHandle = MR_PdDevHandleGet(pd, map);
+ }
+ }
+
+ *pdBlock += stripRef + le64_to_cpu(MR_LdSpanPtrGet(ld, span, map)->startBlk);
+ pRAID_Context->spanArm = (span << RAID_CTX_SPANARM_SPAN_SHIFT) |
+ physArm;
+ io_info->span_arm = pRAID_Context->spanArm;
+ return retval;
+}
+
+/*
+******************************************************************************
+*
+* This routine calculates the arm, span and block for the specified stripe and
+* reference in stripe.
+*
+* Inputs :
+*
+* ld - Logical drive number
+* stripRow - Stripe number
+* stripRef - Reference in stripe
+*
+* Outputs :
+*
+* span - Span number
+* block - Absolute Block number in the physical disk
+*/
+u8 MR_GetPhyParams(struct megasas_instance *instance, u32 ld, u64 stripRow,
+ u16 stripRef, struct IO_REQUEST_INFO *io_info,
+ struct RAID_CONTEXT *pRAID_Context,
+ struct MR_DRV_RAID_MAP_ALL *map)
+{
+ struct MR_LD_RAID *raid = MR_LdRaidGet(ld, map);
+ u32 pd, arRef;
+ u8 physArm, span;
+ u64 row;
+ u8 retval = TRUE;
+ u8 do_invader = 0;
+ u64 *pdBlock = &io_info->pdBlock;
+ u16 *pDevHandle = &io_info->devHandle;
+
+ if ((instance->pdev->device == PCI_DEVICE_ID_LSI_INVADER ||
+ instance->pdev->device == PCI_DEVICE_ID_LSI_FURY))
+ do_invader = 1;
+
+ row = mega_div64_32(stripRow, raid->rowDataSize);
+
+ if (raid->level == 6) {
+ /* logical arm within row */
+ u32 logArm = mega_mod64(stripRow, raid->rowDataSize);
+ u32 rowMod, armQ, arm;
+
+ if (raid->rowSize == 0)
+ return FALSE;
+ /* get logical row mod */
+ rowMod = mega_mod64(row, raid->rowSize);
+ armQ = raid->rowSize-1-rowMod; /* index of Q drive */
+ arm = armQ+1+logArm; /* data always logically follows Q */
+ if (arm >= raid->rowSize) /* handle wrap condition */
+ arm -= raid->rowSize;
+ physArm = (u8)arm;
+ } else {
+ if (raid->modFactor == 0)
+ return FALSE;
+ physArm = MR_LdDataArmGet(ld, mega_mod64(stripRow,
+ raid->modFactor),
+ map);
+ }
+
+ if (raid->spanDepth == 1) {
+ span = 0;
+ *pdBlock = row << raid->stripeShift;
+ } else {
+ span = (u8)MR_GetSpanBlock(ld, row, pdBlock, map);
+ if (span == SPAN_INVALID)
+ return FALSE;
+ }
+
+ /* Get the array on which this span is present */
+ arRef = MR_LdSpanArrayGet(ld, span, map);
+ pd = MR_ArPdGet(arRef, physArm, map); /* Get the pd */
+
+ if (pd != MR_PD_INVALID)
+ /* Get dev handle from Pd. */
+ *pDevHandle = MR_PdDevHandleGet(pd, map);
+ else {
+ *pDevHandle = MR_PD_INVALID; /* set dev handle as invalid. */
+ if ((raid->level >= 5) &&
+ (!do_invader || (do_invader &&
+ (raid->regTypeReqOnRead != REGION_TYPE_UNUSED))))
+ pRAID_Context->regLockFlags = REGION_TYPE_EXCLUSIVE;
+ else if (raid->level == 1) {
+ /* Get alternate Pd. */
+ pd = MR_ArPdGet(arRef, physArm + 1, map);
+ if (pd != MR_PD_INVALID)
+ /* Get dev handle from Pd */
+ *pDevHandle = MR_PdDevHandleGet(pd, map);
+ }
+ }
+
+ *pdBlock += stripRef + le64_to_cpu(MR_LdSpanPtrGet(ld, span, map)->startBlk);
+ pRAID_Context->spanArm = (span << RAID_CTX_SPANARM_SPAN_SHIFT) |
+ physArm;
+ io_info->span_arm = pRAID_Context->spanArm;
+ return retval;
+}
+
+/*
+******************************************************************************
+*
+* MR_BuildRaidContext function
+*
+* This function will initiate command processing. The start/end row and strip
+* information is calculated then the lock is acquired.
+* This function will return 0 if region lock was acquired OR return num strips
+*/
+u8
+MR_BuildRaidContext(struct megasas_instance *instance,
+ struct IO_REQUEST_INFO *io_info,
+ struct RAID_CONTEXT *pRAID_Context,
+ struct MR_DRV_RAID_MAP_ALL *map, u8 **raidLUN)
+{
+ struct MR_LD_RAID *raid;
+ u32 ld, stripSize, stripe_mask;
+ u64 endLba, endStrip, endRow, start_row, start_strip;
+ u64 regStart;
+ u32 regSize;
+ u8 num_strips, numRows;
+ u16 ref_in_start_stripe, ref_in_end_stripe;
+ u64 ldStartBlock;
+ u32 numBlocks, ldTgtId;
+ u8 isRead;
+ u8 retval = 0;
+ u8 startlba_span = SPAN_INVALID;
+ u64 *pdBlock = &io_info->pdBlock;
+
+ ldStartBlock = io_info->ldStartBlock;
+ numBlocks = io_info->numBlocks;
+ ldTgtId = io_info->ldTgtId;
+ isRead = io_info->isRead;
+ io_info->IoforUnevenSpan = 0;
+ io_info->start_span = SPAN_INVALID;
+
+ ld = MR_TargetIdToLdGet(ldTgtId, map);
+ raid = MR_LdRaidGet(ld, map);
+
+ /*
+ * if rowDataSize @RAID map and spanRowDataSize @SPAN INFO are zero
+ * return FALSE
+ */
+ if (raid->rowDataSize == 0) {
+ if (MR_LdSpanPtrGet(ld, 0, map)->spanRowDataSize == 0)
+ return FALSE;
+ else if (instance->UnevenSpanSupport) {
+ io_info->IoforUnevenSpan = 1;
+ } else {
+ dev_info(&instance->pdev->dev,
+ "raid->rowDataSize is 0, but has SPAN[0]"
+ "rowDataSize = 0x%0x,"
+ "but there is _NO_ UnevenSpanSupport\n",
+ MR_LdSpanPtrGet(ld, 0, map)->spanRowDataSize);
+ return FALSE;
+ }
+ }
+
+ stripSize = 1 << raid->stripeShift;
+ stripe_mask = stripSize-1;
+
+
+ /*
+ * calculate starting row and stripe, and number of strips and rows
+ */
+ start_strip = ldStartBlock >> raid->stripeShift;
+ ref_in_start_stripe = (u16)(ldStartBlock & stripe_mask);
+ endLba = ldStartBlock + numBlocks - 1;
+ ref_in_end_stripe = (u16)(endLba & stripe_mask);
+ endStrip = endLba >> raid->stripeShift;
+ num_strips = (u8)(endStrip - start_strip + 1); /* End strip */
+
+ if (io_info->IoforUnevenSpan) {
+ start_row = get_row_from_strip(instance, ld, start_strip, map);
+ endRow = get_row_from_strip(instance, ld, endStrip, map);
+ if (start_row == -1ULL || endRow == -1ULL) {
+ dev_info(&instance->pdev->dev, "return from %s %d."
+ "Send IO w/o region lock.\n",
+ __func__, __LINE__);
+ return FALSE;
+ }
+
+ if (raid->spanDepth == 1) {
+ startlba_span = 0;
+ *pdBlock = start_row << raid->stripeShift;
+ } else
+ startlba_span = (u8)mr_spanset_get_span_block(instance,
+ ld, start_row, pdBlock, map);
+ if (startlba_span == SPAN_INVALID) {
+ dev_info(&instance->pdev->dev, "return from %s %d"
+ "for row 0x%llx,start strip %llx"
+ "endSrip %llx\n", __func__, __LINE__,
+ (unsigned long long)start_row,
+ (unsigned long long)start_strip,
+ (unsigned long long)endStrip);
+ return FALSE;
+ }
+ io_info->start_span = startlba_span;
+ io_info->start_row = start_row;
+#if SPAN_DEBUG
+ dev_dbg(&instance->pdev->dev, "Check Span number from %s %d"
+ "for row 0x%llx, start strip 0x%llx end strip 0x%llx"
+ " span 0x%x\n", __func__, __LINE__,
+ (unsigned long long)start_row,
+ (unsigned long long)start_strip,
+ (unsigned long long)endStrip, startlba_span);
+ dev_dbg(&instance->pdev->dev, "start_row 0x%llx endRow 0x%llx"
+ "Start span 0x%x\n", (unsigned long long)start_row,
+ (unsigned long long)endRow, startlba_span);
+#endif
+ } else {
+ start_row = mega_div64_32(start_strip, raid->rowDataSize);
+ endRow = mega_div64_32(endStrip, raid->rowDataSize);
+ }
+ numRows = (u8)(endRow - start_row + 1);
+
+ /*
+ * calculate region info.
+ */
+
+ /* assume region is at the start of the first row */
+ regStart = start_row << raid->stripeShift;
+ /* assume this IO needs the full row - we'll adjust if not true */
+ regSize = stripSize;
+
+ /* Check if we can send this I/O via FastPath */
+ if (raid->capability.fpCapable) {
+ if (isRead)
+ io_info->fpOkForIo = (raid->capability.fpReadCapable &&
+ ((num_strips == 1) ||
+ raid->capability.
+ fpReadAcrossStripe));
+ else
+ io_info->fpOkForIo = (raid->capability.fpWriteCapable &&
+ ((num_strips == 1) ||
+ raid->capability.
+ fpWriteAcrossStripe));
+ } else
+ io_info->fpOkForIo = FALSE;
+
+ if (numRows == 1) {
+ /* single-strip IOs can always lock only the data needed */
+ if (num_strips == 1) {
+ regStart += ref_in_start_stripe;
+ regSize = numBlocks;
+ }
+ /* multi-strip IOs always need to full stripe locked */
+ } else if (io_info->IoforUnevenSpan == 0) {
+ /*
+ * For Even span region lock optimization.
+ * If the start strip is the last in the start row
+ */
+ if (start_strip == (start_row + 1) * raid->rowDataSize - 1) {
+ regStart += ref_in_start_stripe;
+ /* initialize count to sectors from startref to end
+ of strip */
+ regSize = stripSize - ref_in_start_stripe;
+ }
+
+ /* add complete rows in the middle of the transfer */
+ if (numRows > 2)
+ regSize += (numRows-2) << raid->stripeShift;
+
+ /* if IO ends within first strip of last row*/
+ if (endStrip == endRow*raid->rowDataSize)
+ regSize += ref_in_end_stripe+1;
+ else
+ regSize += stripSize;
+ } else {
+ /*
+ * For Uneven span region lock optimization.
+ * If the start strip is the last in the start row
+ */
+ if (start_strip == (get_strip_from_row(instance, ld, start_row, map) +
+ SPAN_ROW_DATA_SIZE(map, ld, startlba_span) - 1)) {
+ regStart += ref_in_start_stripe;
+ /* initialize count to sectors from
+ * startRef to end of strip
+ */
+ regSize = stripSize - ref_in_start_stripe;
+ }
+ /* Add complete rows in the middle of the transfer*/
+
+ if (numRows > 2)
+ /* Add complete rows in the middle of the transfer*/
+ regSize += (numRows-2) << raid->stripeShift;
+
+ /* if IO ends within first strip of last row */
+ if (endStrip == get_strip_from_row(instance, ld, endRow, map))
+ regSize += ref_in_end_stripe + 1;
+ else
+ regSize += stripSize;
+ }
+
+ pRAID_Context->timeoutValue =
+ cpu_to_le16(raid->fpIoTimeoutForLd ?
+ raid->fpIoTimeoutForLd :
+ map->raidMap.fpPdIoTimeoutSec);
+ if ((instance->pdev->device == PCI_DEVICE_ID_LSI_INVADER) ||
+ (instance->pdev->device == PCI_DEVICE_ID_LSI_FURY))
+ pRAID_Context->regLockFlags = (isRead) ?
+ raid->regTypeReqOnRead : raid->regTypeReqOnWrite;
+ else
+ pRAID_Context->regLockFlags = (isRead) ?
+ REGION_TYPE_SHARED_READ : raid->regTypeReqOnWrite;
+ pRAID_Context->VirtualDiskTgtId = raid->targetId;
+ pRAID_Context->regLockRowLBA = cpu_to_le64(regStart);
+ pRAID_Context->regLockLength = cpu_to_le32(regSize);
+ pRAID_Context->configSeqNum = raid->seqNum;
+ /* save pointer to raid->LUN array */
+ *raidLUN = raid->LUN;
+
+
+ /*Get Phy Params only if FP capable, or else leave it to MR firmware
+ to do the calculation.*/
+ if (io_info->fpOkForIo) {
+ retval = io_info->IoforUnevenSpan ?
+ mr_spanset_get_phy_params(instance, ld,
+ start_strip, ref_in_start_stripe,
+ io_info, pRAID_Context, map) :
+ MR_GetPhyParams(instance, ld, start_strip,
+ ref_in_start_stripe, io_info,
+ pRAID_Context, map);
+ /* If IO on an invalid Pd, then FP is not possible.*/
+ if (io_info->devHandle == MR_PD_INVALID)
+ io_info->fpOkForIo = FALSE;
+ return retval;
+ } else if (isRead) {
+ uint stripIdx;
+ for (stripIdx = 0; stripIdx < num_strips; stripIdx++) {
+ retval = io_info->IoforUnevenSpan ?
+ mr_spanset_get_phy_params(instance, ld,
+ start_strip + stripIdx,
+ ref_in_start_stripe, io_info,
+ pRAID_Context, map) :
+ MR_GetPhyParams(instance, ld,
+ start_strip + stripIdx, ref_in_start_stripe,
+ io_info, pRAID_Context, map);
+ if (!retval)
+ return TRUE;
+ }
+ }
+
+#if SPAN_DEBUG
+ /* Just for testing what arm we get for strip.*/
+ if (io_info->IoforUnevenSpan)
+ get_arm_from_strip(instance, ld, start_strip, map);
+#endif
+ return TRUE;
+}
+
+/*
+******************************************************************************
+*
+* This routine pepare spanset info from Valid Raid map and store it into
+* local copy of ldSpanInfo per instance data structure.
+*
+* Inputs :
+* map - LD map
+* ldSpanInfo - ldSpanInfo per HBA instance
+*
+*/
+void mr_update_span_set(struct MR_DRV_RAID_MAP_ALL *map,
+ PLD_SPAN_INFO ldSpanInfo)
+{
+ u8 span, count;
+ u32 element, span_row_width;
+ u64 span_row;
+ struct MR_LD_RAID *raid;
+ LD_SPAN_SET *span_set, *span_set_prev;
+ struct MR_QUAD_ELEMENT *quad;
+ int ldCount;
+ u16 ld;
+
+
+ for (ldCount = 0; ldCount < MAX_LOGICAL_DRIVES_EXT; ldCount++) {
+ ld = MR_TargetIdToLdGet(ldCount, map);
+ if (ld >= (MAX_LOGICAL_DRIVES_EXT - 1))
+ continue;
+ raid = MR_LdRaidGet(ld, map);
+ for (element = 0; element < MAX_QUAD_DEPTH; element++) {
+ for (span = 0; span < raid->spanDepth; span++) {
+ if (le32_to_cpu(map->raidMap.ldSpanMap[ld].spanBlock[span].
+ block_span_info.noElements) <
+ element + 1)
+ continue;
+ span_set = &(ldSpanInfo[ld].span_set[element]);
+ quad = &map->raidMap.ldSpanMap[ld].
+ spanBlock[span].block_span_info.
+ quad[element];
+
+ span_set->diff = le32_to_cpu(quad->diff);
+
+ for (count = 0, span_row_width = 0;
+ count < raid->spanDepth; count++) {
+ if (le32_to_cpu(map->raidMap.ldSpanMap[ld].
+ spanBlock[count].
+ block_span_info.
+ noElements) >= element + 1) {
+ span_set->strip_offset[count] =
+ span_row_width;
+ span_row_width +=
+ MR_LdSpanPtrGet
+ (ld, count, map)->spanRowDataSize;
+ printk(KERN_INFO "megasas:"
+ "span %x rowDataSize %x\n",
+ count, MR_LdSpanPtrGet
+ (ld, count, map)->spanRowDataSize);
+ }
+ }
+
+ span_set->span_row_data_width = span_row_width;
+ span_row = mega_div64_32(((le64_to_cpu(quad->logEnd) -
+ le64_to_cpu(quad->logStart)) + le32_to_cpu(quad->diff)),
+ le32_to_cpu(quad->diff));
+
+ if (element == 0) {
+ span_set->log_start_lba = 0;
+ span_set->log_end_lba =
+ ((span_row << raid->stripeShift)
+ * span_row_width) - 1;
+
+ span_set->span_row_start = 0;
+ span_set->span_row_end = span_row - 1;
+
+ span_set->data_strip_start = 0;
+ span_set->data_strip_end =
+ (span_row * span_row_width) - 1;
+
+ span_set->data_row_start = 0;
+ span_set->data_row_end =
+ (span_row * le32_to_cpu(quad->diff)) - 1;
+ } else {
+ span_set_prev = &(ldSpanInfo[ld].
+ span_set[element - 1]);
+ span_set->log_start_lba =
+ span_set_prev->log_end_lba + 1;
+ span_set->log_end_lba =
+ span_set->log_start_lba +
+ ((span_row << raid->stripeShift)
+ * span_row_width) - 1;
+
+ span_set->span_row_start =
+ span_set_prev->span_row_end + 1;
+ span_set->span_row_end =
+ span_set->span_row_start + span_row - 1;
+
+ span_set->data_strip_start =
+ span_set_prev->data_strip_end + 1;
+ span_set->data_strip_end =
+ span_set->data_strip_start +
+ (span_row * span_row_width) - 1;
+
+ span_set->data_row_start =
+ span_set_prev->data_row_end + 1;
+ span_set->data_row_end =
+ span_set->data_row_start +
+ (span_row * le32_to_cpu(quad->diff)) - 1;
+ }
+ break;
+ }
+ if (span == raid->spanDepth)
+ break;
+ }
+ }
+#if SPAN_DEBUG
+ getSpanInfo(map, ldSpanInfo);
+#endif
+
+}
+
+void mr_update_load_balance_params(struct MR_DRV_RAID_MAP_ALL *drv_map,
+ struct LD_LOAD_BALANCE_INFO *lbInfo)
+{
+ int ldCount;
+ u16 ld;
+ struct MR_LD_RAID *raid;
+
+ if (lb_pending_cmds > 128 || lb_pending_cmds < 1)
+ lb_pending_cmds = LB_PENDING_CMDS_DEFAULT;
+
+ for (ldCount = 0; ldCount < MAX_LOGICAL_DRIVES_EXT; ldCount++) {
+ ld = MR_TargetIdToLdGet(ldCount, drv_map);
+ if (ld >= MAX_LOGICAL_DRIVES_EXT) {
+ lbInfo[ldCount].loadBalanceFlag = 0;
+ continue;
+ }
+
+ raid = MR_LdRaidGet(ld, drv_map);
+ if ((raid->level != 1) ||
+ (raid->ldState != MR_LD_STATE_OPTIMAL)) {
+ lbInfo[ldCount].loadBalanceFlag = 0;
+ continue;
+ }
+ lbInfo[ldCount].loadBalanceFlag = 1;
+ }
+}
+
+u8 megasas_get_best_arm_pd(struct megasas_instance *instance,
+ struct LD_LOAD_BALANCE_INFO *lbInfo, struct IO_REQUEST_INFO *io_info)
+{
+ struct fusion_context *fusion;
+ struct MR_LD_RAID *raid;
+ struct MR_DRV_RAID_MAP_ALL *drv_map;
+ u16 pend0, pend1, ld;
+ u64 diff0, diff1;
+ u8 bestArm, pd0, pd1, span, arm;
+ u32 arRef, span_row_size;
+
+ u64 block = io_info->ldStartBlock;
+ u32 count = io_info->numBlocks;
+
+ span = ((io_info->span_arm & RAID_CTX_SPANARM_SPAN_MASK)
+ >> RAID_CTX_SPANARM_SPAN_SHIFT);
+ arm = (io_info->span_arm & RAID_CTX_SPANARM_ARM_MASK);
+
+
+ fusion = instance->ctrl_context;
+ drv_map = fusion->ld_drv_map[(instance->map_id & 1)];
+ ld = MR_TargetIdToLdGet(io_info->ldTgtId, drv_map);
+ raid = MR_LdRaidGet(ld, drv_map);
+ span_row_size = instance->UnevenSpanSupport ?
+ SPAN_ROW_SIZE(drv_map, ld, span) : raid->rowSize;
+
+ arRef = MR_LdSpanArrayGet(ld, span, drv_map);
+ pd0 = MR_ArPdGet(arRef, arm, drv_map);
+ pd1 = MR_ArPdGet(arRef, (arm + 1) >= span_row_size ?
+ (arm + 1 - span_row_size) : arm + 1, drv_map);
+
+ /* get the pending cmds for the data and mirror arms */
+ pend0 = atomic_read(&lbInfo->scsi_pending_cmds[pd0]);
+ pend1 = atomic_read(&lbInfo->scsi_pending_cmds[pd1]);
+
+ /* Determine the disk whose head is nearer to the req. block */
+ diff0 = ABS_DIFF(block, lbInfo->last_accessed_block[pd0]);
+ diff1 = ABS_DIFF(block, lbInfo->last_accessed_block[pd1]);
+ bestArm = (diff0 <= diff1 ? arm : arm ^ 1);
+
+ if ((bestArm == arm && pend0 > pend1 + lb_pending_cmds) ||
+ (bestArm != arm && pend1 > pend0 + lb_pending_cmds))
+ bestArm ^= 1;
+
+ /* Update the last accessed block on the correct pd */
+ io_info->pd_after_lb = (bestArm == arm) ? pd0 : pd1;
+ lbInfo->last_accessed_block[io_info->pd_after_lb] = block + count - 1;
+ io_info->span_arm = (span << RAID_CTX_SPANARM_SPAN_SHIFT) | bestArm;
+#if SPAN_DEBUG
+ if (arm != bestArm)
+ dev_dbg(&instance->pdev->dev, "LSI Debug R1 Load balance "
+ "occur - span 0x%x arm 0x%x bestArm 0x%x "
+ "io_info->span_arm 0x%x\n",
+ span, arm, bestArm, io_info->span_arm);
+#endif
+ return io_info->pd_after_lb;
+}
+
+u16 get_updated_dev_handle(struct megasas_instance *instance,
+ struct LD_LOAD_BALANCE_INFO *lbInfo, struct IO_REQUEST_INFO *io_info)
+{
+ u8 arm_pd;
+ u16 devHandle;
+ struct fusion_context *fusion;
+ struct MR_DRV_RAID_MAP_ALL *drv_map;
+
+ fusion = instance->ctrl_context;
+ drv_map = fusion->ld_drv_map[(instance->map_id & 1)];
+
+ /* get best new arm (PD ID) */
+ arm_pd = megasas_get_best_arm_pd(instance, lbInfo, io_info);
+ devHandle = MR_PdDevHandleGet(arm_pd, drv_map);
+ atomic_inc(&lbInfo->scsi_pending_cmds[arm_pd]);
+ return devHandle;
+}
diff --git a/drivers/scsi/megaraid/megaraid_sas_fusion.c b/drivers/scsi/megaraid/megaraid_sas_fusion.c
new file mode 100644
index 000000000..5a0800d19
--- /dev/null
+++ b/drivers/scsi/megaraid/megaraid_sas_fusion.c
@@ -0,0 +1,2993 @@
+/*
+ * Linux MegaRAID driver for SAS based RAID controllers
+ *
+ * Copyright (c) 2009-2013 LSI Corporation
+ * Copyright (c) 2013-2014 Avago Technologies
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ *
+ * FILE: megaraid_sas_fusion.c
+ *
+ * Authors: Avago Technologies
+ * Sumant Patro
+ * Adam Radford
+ * Kashyap Desai <kashyap.desai@avagotech.com>
+ * Sumit Saxena <sumit.saxena@avagotech.com>
+ *
+ * Send feedback to: megaraidlinux.pdl@avagotech.com
+ *
+ * Mail to: Avago Technologies, 350 West Trimble Road, Building 90,
+ * San Jose, California 95131
+ */
+
+#include <linux/kernel.h>
+#include <linux/types.h>
+#include <linux/pci.h>
+#include <linux/list.h>
+#include <linux/moduleparam.h>
+#include <linux/module.h>
+#include <linux/spinlock.h>
+#include <linux/interrupt.h>
+#include <linux/delay.h>
+#include <linux/uio.h>
+#include <linux/uaccess.h>
+#include <linux/fs.h>
+#include <linux/compat.h>
+#include <linux/blkdev.h>
+#include <linux/mutex.h>
+#include <linux/poll.h>
+
+#include <scsi/scsi.h>
+#include <scsi/scsi_cmnd.h>
+#include <scsi/scsi_device.h>
+#include <scsi/scsi_host.h>
+#include <scsi/scsi_dbg.h>
+
+#include "megaraid_sas_fusion.h"
+#include "megaraid_sas.h"
+
+extern void megasas_free_cmds(struct megasas_instance *instance);
+extern struct megasas_cmd *megasas_get_cmd(struct megasas_instance
+ *instance);
+extern void
+megasas_complete_cmd(struct megasas_instance *instance,
+ struct megasas_cmd *cmd, u8 alt_status);
+int
+wait_and_poll(struct megasas_instance *instance, struct megasas_cmd *cmd,
+ int seconds);
+
+void
+megasas_return_cmd(struct megasas_instance *instance, struct megasas_cmd *cmd);
+int megasas_alloc_cmds(struct megasas_instance *instance);
+int
+megasas_clear_intr_fusion(struct megasas_register_set __iomem *regs);
+int
+megasas_issue_polled(struct megasas_instance *instance,
+ struct megasas_cmd *cmd);
+void
+megasas_check_and_restore_queue_depth(struct megasas_instance *instance);
+
+int megasas_transition_to_ready(struct megasas_instance *instance, int ocr);
+void megaraid_sas_kill_hba(struct megasas_instance *instance);
+
+extern u32 megasas_dbg_lvl;
+void megasas_sriov_heartbeat_handler(unsigned long instance_addr);
+int megasas_sriov_start_heartbeat(struct megasas_instance *instance,
+ int initial);
+void megasas_start_timer(struct megasas_instance *instance,
+ struct timer_list *timer,
+ void *fn, unsigned long interval);
+extern struct megasas_mgmt_info megasas_mgmt_info;
+extern int resetwaittime;
+
+
+
+/**
+ * megasas_enable_intr_fusion - Enables interrupts
+ * @regs: MFI register set
+ */
+void
+megasas_enable_intr_fusion(struct megasas_instance *instance)
+{
+ struct megasas_register_set __iomem *regs;
+ regs = instance->reg_set;
+
+ instance->mask_interrupts = 0;
+ /* For Thunderbolt/Invader also clear intr on enable */
+ writel(~0, &regs->outbound_intr_status);
+ readl(&regs->outbound_intr_status);
+
+ writel(~MFI_FUSION_ENABLE_INTERRUPT_MASK, &(regs)->outbound_intr_mask);
+
+ /* Dummy readl to force pci flush */
+ readl(&regs->outbound_intr_mask);
+}
+
+/**
+ * megasas_disable_intr_fusion - Disables interrupt
+ * @regs: MFI register set
+ */
+void
+megasas_disable_intr_fusion(struct megasas_instance *instance)
+{
+ u32 mask = 0xFFFFFFFF;
+ u32 status;
+ struct megasas_register_set __iomem *regs;
+ regs = instance->reg_set;
+ instance->mask_interrupts = 1;
+
+ writel(mask, &regs->outbound_intr_mask);
+ /* Dummy readl to force pci flush */
+ status = readl(&regs->outbound_intr_mask);
+}
+
+int
+megasas_clear_intr_fusion(struct megasas_register_set __iomem *regs)
+{
+ u32 status;
+ /*
+ * Check if it is our interrupt
+ */
+ status = readl(&regs->outbound_intr_status);
+
+ if (status & 1) {
+ writel(status, &regs->outbound_intr_status);
+ readl(&regs->outbound_intr_status);
+ return 1;
+ }
+ if (!(status & MFI_FUSION_ENABLE_INTERRUPT_MASK))
+ return 0;
+
+ return 1;
+}
+
+/**
+ * megasas_get_cmd_fusion - Get a command from the free pool
+ * @instance: Adapter soft state
+ *
+ * Returns a free command from the pool
+ */
+struct megasas_cmd_fusion *megasas_get_cmd_fusion(struct megasas_instance
+ *instance)
+{
+ unsigned long flags;
+ struct fusion_context *fusion =
+ (struct fusion_context *)instance->ctrl_context;
+ struct megasas_cmd_fusion *cmd = NULL;
+
+ spin_lock_irqsave(&fusion->mpt_pool_lock, flags);
+
+ if (!list_empty(&fusion->cmd_pool)) {
+ cmd = list_entry((&fusion->cmd_pool)->next,
+ struct megasas_cmd_fusion, list);
+ list_del_init(&cmd->list);
+ } else {
+ printk(KERN_ERR "megasas: Command pool (fusion) empty!\n");
+ }
+
+ spin_unlock_irqrestore(&fusion->mpt_pool_lock, flags);
+ return cmd;
+}
+
+/**
+ * megasas_return_cmd_fusion - Return a cmd to free command pool
+ * @instance: Adapter soft state
+ * @cmd: Command packet to be returned to free command pool
+ */
+inline void megasas_return_cmd_fusion(struct megasas_instance *instance,
+ struct megasas_cmd_fusion *cmd)
+{
+ unsigned long flags;
+ struct fusion_context *fusion =
+ (struct fusion_context *)instance->ctrl_context;
+
+ spin_lock_irqsave(&fusion->mpt_pool_lock, flags);
+
+ cmd->scmd = NULL;
+ cmd->sync_cmd_idx = (u32)ULONG_MAX;
+ memset(cmd->io_request, 0, sizeof(struct MPI2_RAID_SCSI_IO_REQUEST));
+ list_add(&cmd->list, (&fusion->cmd_pool)->next);
+
+ spin_unlock_irqrestore(&fusion->mpt_pool_lock, flags);
+}
+
+/**
+ * megasas_return_mfi_mpt_pthr - Return a mfi and mpt to free command pool
+ * @instance: Adapter soft state
+ * @cmd_mfi: MFI Command packet to be returned to free command pool
+ * @cmd_mpt: MPT Command packet to be returned to free command pool
+ */
+inline void megasas_return_mfi_mpt_pthr(struct megasas_instance *instance,
+ struct megasas_cmd *cmd_mfi,
+ struct megasas_cmd_fusion *cmd_fusion)
+{
+ unsigned long flags;
+
+ /*
+ * TO DO: optimize this code and use only one lock instead of two
+ * locks being used currently- mpt_pool_lock is acquired
+ * inside mfi_pool_lock
+ */
+ spin_lock_irqsave(&instance->mfi_pool_lock, flags);
+ megasas_return_cmd_fusion(instance, cmd_fusion);
+ if (atomic_read(&cmd_mfi->mfi_mpt_pthr) != MFI_MPT_ATTACHED)
+ dev_err(&instance->pdev->dev, "Possible bug from %s %d\n",
+ __func__, __LINE__);
+ atomic_set(&cmd_mfi->mfi_mpt_pthr, MFI_MPT_DETACHED);
+ __megasas_return_cmd(instance, cmd_mfi);
+ spin_unlock_irqrestore(&instance->mfi_pool_lock, flags);
+}
+
+/**
+ * megasas_teardown_frame_pool_fusion - Destroy the cmd frame DMA pool
+ * @instance: Adapter soft state
+ */
+static void megasas_teardown_frame_pool_fusion(
+ struct megasas_instance *instance)
+{
+ int i;
+ struct fusion_context *fusion = instance->ctrl_context;
+
+ u16 max_cmd = instance->max_fw_cmds;
+
+ struct megasas_cmd_fusion *cmd;
+
+ if (!fusion->sg_dma_pool || !fusion->sense_dma_pool) {
+ printk(KERN_ERR "megasas: dma pool is null. SG Pool %p, "
+ "sense pool : %p\n", fusion->sg_dma_pool,
+ fusion->sense_dma_pool);
+ return;
+ }
+
+ /*
+ * Return all frames to pool
+ */
+ for (i = 0; i < max_cmd; i++) {
+
+ cmd = fusion->cmd_list[i];
+
+ if (cmd->sg_frame)
+ pci_pool_free(fusion->sg_dma_pool, cmd->sg_frame,
+ cmd->sg_frame_phys_addr);
+
+ if (cmd->sense)
+ pci_pool_free(fusion->sense_dma_pool, cmd->sense,
+ cmd->sense_phys_addr);
+ }
+
+ /*
+ * Now destroy the pool itself
+ */
+ pci_pool_destroy(fusion->sg_dma_pool);
+ pci_pool_destroy(fusion->sense_dma_pool);
+
+ fusion->sg_dma_pool = NULL;
+ fusion->sense_dma_pool = NULL;
+}
+
+/**
+ * megasas_free_cmds_fusion - Free all the cmds in the free cmd pool
+ * @instance: Adapter soft state
+ */
+void
+megasas_free_cmds_fusion(struct megasas_instance *instance)
+{
+ int i;
+ struct fusion_context *fusion = instance->ctrl_context;
+
+ u32 max_cmds, req_sz, reply_sz, io_frames_sz;
+
+
+ req_sz = fusion->request_alloc_sz;
+ reply_sz = fusion->reply_alloc_sz;
+ io_frames_sz = fusion->io_frames_alloc_sz;
+
+ max_cmds = instance->max_fw_cmds;
+
+ /* Free descriptors and request Frames memory */
+ if (fusion->req_frames_desc)
+ dma_free_coherent(&instance->pdev->dev, req_sz,
+ fusion->req_frames_desc,
+ fusion->req_frames_desc_phys);
+
+ if (fusion->reply_frames_desc) {
+ pci_pool_free(fusion->reply_frames_desc_pool,
+ fusion->reply_frames_desc,
+ fusion->reply_frames_desc_phys);
+ pci_pool_destroy(fusion->reply_frames_desc_pool);
+ }
+
+ if (fusion->io_request_frames) {
+ pci_pool_free(fusion->io_request_frames_pool,
+ fusion->io_request_frames,
+ fusion->io_request_frames_phys);
+ pci_pool_destroy(fusion->io_request_frames_pool);
+ }
+
+ /* Free the Fusion frame pool */
+ megasas_teardown_frame_pool_fusion(instance);
+
+ /* Free all the commands in the cmd_list */
+ for (i = 0; i < max_cmds; i++)
+ kfree(fusion->cmd_list[i]);
+
+ /* Free the cmd_list buffer itself */
+ kfree(fusion->cmd_list);
+ fusion->cmd_list = NULL;
+
+ INIT_LIST_HEAD(&fusion->cmd_pool);
+}
+
+/**
+ * megasas_create_frame_pool_fusion - Creates DMA pool for cmd frames
+ * @instance: Adapter soft state
+ *
+ */
+static int megasas_create_frame_pool_fusion(struct megasas_instance *instance)
+{
+ int i;
+ u32 max_cmd;
+ struct fusion_context *fusion;
+ struct megasas_cmd_fusion *cmd;
+ u32 total_sz_chain_frame;
+
+ fusion = instance->ctrl_context;
+ max_cmd = instance->max_fw_cmds;
+
+ total_sz_chain_frame = MEGASAS_MAX_SZ_CHAIN_FRAME;
+
+ /*
+ * Use DMA pool facility provided by PCI layer
+ */
+
+ fusion->sg_dma_pool = pci_pool_create("megasas sg pool fusion",
+ instance->pdev,
+ total_sz_chain_frame, 4,
+ 0);
+ if (!fusion->sg_dma_pool) {
+ printk(KERN_DEBUG "megasas: failed to setup request pool "
+ "fusion\n");
+ return -ENOMEM;
+ }
+ fusion->sense_dma_pool = pci_pool_create("megasas sense pool fusion",
+ instance->pdev,
+ SCSI_SENSE_BUFFERSIZE, 64, 0);
+
+ if (!fusion->sense_dma_pool) {
+ printk(KERN_DEBUG "megasas: failed to setup sense pool "
+ "fusion\n");
+ pci_pool_destroy(fusion->sg_dma_pool);
+ fusion->sg_dma_pool = NULL;
+ return -ENOMEM;
+ }
+
+ /*
+ * Allocate and attach a frame to each of the commands in cmd_list
+ */
+ for (i = 0; i < max_cmd; i++) {
+
+ cmd = fusion->cmd_list[i];
+
+ cmd->sg_frame = pci_pool_alloc(fusion->sg_dma_pool,
+ GFP_KERNEL,
+ &cmd->sg_frame_phys_addr);
+
+ cmd->sense = pci_pool_alloc(fusion->sense_dma_pool,
+ GFP_KERNEL, &cmd->sense_phys_addr);
+ /*
+ * megasas_teardown_frame_pool_fusion() takes care of freeing
+ * whatever has been allocated
+ */
+ if (!cmd->sg_frame || !cmd->sense) {
+ printk(KERN_DEBUG "megasas: pci_pool_alloc failed\n");
+ megasas_teardown_frame_pool_fusion(instance);
+ return -ENOMEM;
+ }
+ }
+ return 0;
+}
+
+/**
+ * megasas_alloc_cmds_fusion - Allocates the command packets
+ * @instance: Adapter soft state
+ *
+ *
+ * Each frame has a 32-bit field called context. This context is used to get
+ * back the megasas_cmd_fusion from the frame when a frame gets completed
+ * In this driver, the 32 bit values are the indices into an array cmd_list.
+ * This array is used only to look up the megasas_cmd_fusion given the context.
+ * The free commands themselves are maintained in a linked list called cmd_pool.
+ *
+ * cmds are formed in the io_request and sg_frame members of the
+ * megasas_cmd_fusion. The context field is used to get a request descriptor
+ * and is used as SMID of the cmd.
+ * SMID value range is from 1 to max_fw_cmds.
+ */
+int
+megasas_alloc_cmds_fusion(struct megasas_instance *instance)
+{
+ int i, j, count;
+ u32 max_cmd, io_frames_sz;
+ struct fusion_context *fusion;
+ struct megasas_cmd_fusion *cmd;
+ union MPI2_REPLY_DESCRIPTORS_UNION *reply_desc;
+ u32 offset;
+ dma_addr_t io_req_base_phys;
+ u8 *io_req_base;
+
+ fusion = instance->ctrl_context;
+
+ max_cmd = instance->max_fw_cmds;
+
+ fusion->req_frames_desc =
+ dma_alloc_coherent(&instance->pdev->dev,
+ fusion->request_alloc_sz,
+ &fusion->req_frames_desc_phys, GFP_KERNEL);
+
+ if (!fusion->req_frames_desc) {
+ printk(KERN_ERR "megasas; Could not allocate memory for "
+ "request_frames\n");
+ goto fail_req_desc;
+ }
+
+ count = instance->msix_vectors > 0 ? instance->msix_vectors : 1;
+ fusion->reply_frames_desc_pool =
+ pci_pool_create("reply_frames pool", instance->pdev,
+ fusion->reply_alloc_sz * count, 16, 0);
+
+ if (!fusion->reply_frames_desc_pool) {
+ printk(KERN_ERR "megasas; Could not allocate memory for "
+ "reply_frame pool\n");
+ goto fail_reply_desc;
+ }
+
+ fusion->reply_frames_desc =
+ pci_pool_alloc(fusion->reply_frames_desc_pool, GFP_KERNEL,
+ &fusion->reply_frames_desc_phys);
+ if (!fusion->reply_frames_desc) {
+ printk(KERN_ERR "megasas; Could not allocate memory for "
+ "reply_frame pool\n");
+ pci_pool_destroy(fusion->reply_frames_desc_pool);
+ goto fail_reply_desc;
+ }
+
+ reply_desc = fusion->reply_frames_desc;
+ for (i = 0; i < fusion->reply_q_depth * count; i++, reply_desc++)
+ reply_desc->Words = ULLONG_MAX;
+
+ io_frames_sz = fusion->io_frames_alloc_sz;
+
+ fusion->io_request_frames_pool =
+ pci_pool_create("io_request_frames pool", instance->pdev,
+ fusion->io_frames_alloc_sz, 16, 0);
+
+ if (!fusion->io_request_frames_pool) {
+ printk(KERN_ERR "megasas: Could not allocate memory for "
+ "io_request_frame pool\n");
+ goto fail_io_frames;
+ }
+
+ fusion->io_request_frames =
+ pci_pool_alloc(fusion->io_request_frames_pool, GFP_KERNEL,
+ &fusion->io_request_frames_phys);
+ if (!fusion->io_request_frames) {
+ printk(KERN_ERR "megasas: Could not allocate memory for "
+ "io_request_frames frames\n");
+ pci_pool_destroy(fusion->io_request_frames_pool);
+ goto fail_io_frames;
+ }
+
+ /*
+ * fusion->cmd_list is an array of struct megasas_cmd_fusion pointers.
+ * Allocate the dynamic array first and then allocate individual
+ * commands.
+ */
+ fusion->cmd_list = kzalloc(sizeof(struct megasas_cmd_fusion *)
+ * max_cmd, GFP_KERNEL);
+
+ if (!fusion->cmd_list) {
+ printk(KERN_DEBUG "megasas: out of memory. Could not alloc "
+ "memory for cmd_list_fusion\n");
+ goto fail_cmd_list;
+ }
+
+ max_cmd = instance->max_fw_cmds;
+ for (i = 0; i < max_cmd; i++) {
+ fusion->cmd_list[i] = kmalloc(sizeof(struct megasas_cmd_fusion),
+ GFP_KERNEL);
+ if (!fusion->cmd_list[i]) {
+ printk(KERN_ERR "Could not alloc cmd list fusion\n");
+
+ for (j = 0; j < i; j++)
+ kfree(fusion->cmd_list[j]);
+
+ kfree(fusion->cmd_list);
+ fusion->cmd_list = NULL;
+ goto fail_cmd_list;
+ }
+ }
+
+ /* The first 256 bytes (SMID 0) is not used. Don't add to cmd list */
+ io_req_base = fusion->io_request_frames +
+ MEGA_MPI2_RAID_DEFAULT_IO_FRAME_SIZE;
+ io_req_base_phys = fusion->io_request_frames_phys +
+ MEGA_MPI2_RAID_DEFAULT_IO_FRAME_SIZE;
+
+ /*
+ * Add all the commands to command pool (fusion->cmd_pool)
+ */
+
+ /* SMID 0 is reserved. Set SMID/index from 1 */
+ for (i = 0; i < max_cmd; i++) {
+ cmd = fusion->cmd_list[i];
+ offset = MEGA_MPI2_RAID_DEFAULT_IO_FRAME_SIZE * i;
+ memset(cmd, 0, sizeof(struct megasas_cmd_fusion));
+ cmd->index = i + 1;
+ cmd->scmd = NULL;
+ cmd->sync_cmd_idx = (u32)ULONG_MAX; /* Set to Invalid */
+ cmd->instance = instance;
+ cmd->io_request =
+ (struct MPI2_RAID_SCSI_IO_REQUEST *)
+ (io_req_base + offset);
+ memset(cmd->io_request, 0,
+ sizeof(struct MPI2_RAID_SCSI_IO_REQUEST));
+ cmd->io_request_phys_addr = io_req_base_phys + offset;
+
+ list_add_tail(&cmd->list, &fusion->cmd_pool);
+ }
+
+ /*
+ * Create a frame pool and assign one frame to each cmd
+ */
+ if (megasas_create_frame_pool_fusion(instance)) {
+ printk(KERN_DEBUG "megasas: Error creating frame DMA pool\n");
+ megasas_free_cmds_fusion(instance);
+ goto fail_req_desc;
+ }
+
+ return 0;
+
+fail_cmd_list:
+ pci_pool_free(fusion->io_request_frames_pool, fusion->io_request_frames,
+ fusion->io_request_frames_phys);
+ pci_pool_destroy(fusion->io_request_frames_pool);
+fail_io_frames:
+ dma_free_coherent(&instance->pdev->dev, fusion->request_alloc_sz,
+ fusion->reply_frames_desc,
+ fusion->reply_frames_desc_phys);
+ pci_pool_free(fusion->reply_frames_desc_pool,
+ fusion->reply_frames_desc,
+ fusion->reply_frames_desc_phys);
+ pci_pool_destroy(fusion->reply_frames_desc_pool);
+
+fail_reply_desc:
+ dma_free_coherent(&instance->pdev->dev, fusion->request_alloc_sz,
+ fusion->req_frames_desc,
+ fusion->req_frames_desc_phys);
+fail_req_desc:
+ return -ENOMEM;
+}
+
+/**
+ * wait_and_poll - Issues a polling command
+ * @instance: Adapter soft state
+ * @cmd: Command packet to be issued
+ *
+ * For polling, MFI requires the cmd_status to be set to 0xFF before posting.
+ */
+int
+wait_and_poll(struct megasas_instance *instance, struct megasas_cmd *cmd,
+ int seconds)
+{
+ int i;
+ struct megasas_header *frame_hdr = &cmd->frame->hdr;
+ struct fusion_context *fusion;
+
+ u32 msecs = seconds * 1000;
+
+ fusion = instance->ctrl_context;
+ /*
+ * Wait for cmd_status to change
+ */
+ for (i = 0; (i < msecs) && (frame_hdr->cmd_status == 0xff); i += 20) {
+ rmb();
+ msleep(20);
+ }
+
+ if (frame_hdr->cmd_status == 0xff) {
+ if (fusion)
+ megasas_return_mfi_mpt_pthr(instance, cmd,
+ cmd->mpt_pthr_cmd_blocked);
+ return -ETIME;
+ }
+
+ return 0;
+}
+
+/**
+ * megasas_ioc_init_fusion - Initializes the FW
+ * @instance: Adapter soft state
+ *
+ * Issues the IOC Init cmd
+ */
+int
+megasas_ioc_init_fusion(struct megasas_instance *instance)
+{
+ struct megasas_init_frame *init_frame;
+ struct MPI2_IOC_INIT_REQUEST *IOCInitMessage;
+ dma_addr_t ioc_init_handle;
+ struct megasas_cmd *cmd;
+ u8 ret;
+ struct fusion_context *fusion;
+ union MEGASAS_REQUEST_DESCRIPTOR_UNION req_desc;
+ int i;
+ struct megasas_header *frame_hdr;
+
+ fusion = instance->ctrl_context;
+
+ cmd = megasas_get_cmd(instance);
+
+ if (!cmd) {
+ printk(KERN_ERR "Could not allocate cmd for INIT Frame\n");
+ ret = 1;
+ goto fail_get_cmd;
+ }
+
+ IOCInitMessage =
+ dma_alloc_coherent(&instance->pdev->dev,
+ sizeof(struct MPI2_IOC_INIT_REQUEST),
+ &ioc_init_handle, GFP_KERNEL);
+
+ if (!IOCInitMessage) {
+ printk(KERN_ERR "Could not allocate memory for "
+ "IOCInitMessage\n");
+ ret = 1;
+ goto fail_fw_init;
+ }
+
+ memset(IOCInitMessage, 0, sizeof(struct MPI2_IOC_INIT_REQUEST));
+
+ IOCInitMessage->Function = MPI2_FUNCTION_IOC_INIT;
+ IOCInitMessage->WhoInit = MPI2_WHOINIT_HOST_DRIVER;
+ IOCInitMessage->MsgVersion = cpu_to_le16(MPI2_VERSION);
+ IOCInitMessage->HeaderVersion = cpu_to_le16(MPI2_HEADER_VERSION);
+ IOCInitMessage->SystemRequestFrameSize = cpu_to_le16(MEGA_MPI2_RAID_DEFAULT_IO_FRAME_SIZE / 4);
+
+ IOCInitMessage->ReplyDescriptorPostQueueDepth = cpu_to_le16(fusion->reply_q_depth);
+ IOCInitMessage->ReplyDescriptorPostQueueAddress = cpu_to_le64(fusion->reply_frames_desc_phys);
+ IOCInitMessage->SystemRequestFrameBaseAddress = cpu_to_le64(fusion->io_request_frames_phys);
+ IOCInitMessage->HostMSIxVectors = instance->msix_vectors;
+ init_frame = (struct megasas_init_frame *)cmd->frame;
+ memset(init_frame, 0, MEGAMFI_FRAME_SIZE);
+
+ frame_hdr = &cmd->frame->hdr;
+ frame_hdr->cmd_status = 0xFF;
+ frame_hdr->flags |= cpu_to_le16(MFI_FRAME_DONT_POST_IN_REPLY_QUEUE);
+
+ init_frame->cmd = MFI_CMD_INIT;
+ init_frame->cmd_status = 0xFF;
+
+ /* driver support Extended MSIX */
+ if ((instance->pdev->device == PCI_DEVICE_ID_LSI_INVADER) ||
+ (instance->pdev->device == PCI_DEVICE_ID_LSI_FURY))
+ init_frame->driver_operations.
+ mfi_capabilities.support_additional_msix = 1;
+ /* driver supports HA / Remote LUN over Fast Path interface */
+ init_frame->driver_operations.mfi_capabilities.support_fp_remote_lun
+ = 1;
+ init_frame->driver_operations.mfi_capabilities.support_max_255lds
+ = 1;
+ init_frame->driver_operations.mfi_capabilities.support_ndrive_r1_lb
+ = 1;
+ init_frame->driver_operations.mfi_capabilities.security_protocol_cmds_fw
+ = 1;
+ /* Convert capability to LE32 */
+ cpu_to_le32s((u32 *)&init_frame->driver_operations.mfi_capabilities);
+
+ init_frame->queue_info_new_phys_addr_hi =
+ cpu_to_le32(upper_32_bits(ioc_init_handle));
+ init_frame->queue_info_new_phys_addr_lo =
+ cpu_to_le32(lower_32_bits(ioc_init_handle));
+ init_frame->data_xfer_len = cpu_to_le32(sizeof(struct MPI2_IOC_INIT_REQUEST));
+
+ req_desc.u.low = cpu_to_le32(lower_32_bits(cmd->frame_phys_addr));
+ req_desc.u.high = cpu_to_le32(upper_32_bits(cmd->frame_phys_addr));
+ req_desc.MFAIo.RequestFlags =
+ (MEGASAS_REQ_DESCRIPT_FLAGS_MFA <<
+ MEGASAS_REQ_DESCRIPT_FLAGS_TYPE_SHIFT);
+
+ /*
+ * disable the intr before firing the init frame
+ */
+ instance->instancet->disable_intr(instance);
+
+ for (i = 0; i < (10 * 1000); i += 20) {
+ if (readl(&instance->reg_set->doorbell) & 1)
+ msleep(20);
+ else
+ break;
+ }
+
+ instance->instancet->fire_cmd(instance, req_desc.u.low,
+ req_desc.u.high, instance->reg_set);
+
+ wait_and_poll(instance, cmd, MFI_POLL_TIMEOUT_SECS);
+
+ frame_hdr = &cmd->frame->hdr;
+ if (frame_hdr->cmd_status != 0) {
+ ret = 1;
+ goto fail_fw_init;
+ }
+ printk(KERN_ERR "megasas:IOC Init cmd success\n");
+
+ ret = 0;
+
+fail_fw_init:
+ megasas_return_cmd(instance, cmd);
+ if (IOCInitMessage)
+ dma_free_coherent(&instance->pdev->dev,
+ sizeof(struct MPI2_IOC_INIT_REQUEST),
+ IOCInitMessage, ioc_init_handle);
+fail_get_cmd:
+ return ret;
+}
+
+/*
+ * megasas_get_ld_map_info - Returns FW's ld_map structure
+ * @instance: Adapter soft state
+ * @pend: Pend the command or not
+ * Issues an internal command (DCMD) to get the FW's controller PD
+ * list structure. This information is mainly used to find out SYSTEM
+ * supported by the FW.
+ * dcmd.mbox value setting for MR_DCMD_LD_MAP_GET_INFO
+ * dcmd.mbox.b[0] - number of LDs being sync'd
+ * dcmd.mbox.b[1] - 0 - complete command immediately.
+ * - 1 - pend till config change
+ * dcmd.mbox.b[2] - 0 - supports max 64 lds and uses legacy MR_FW_RAID_MAP
+ * - 1 - supports max MAX_LOGICAL_DRIVES_EXT lds and
+ * uses extended struct MR_FW_RAID_MAP_EXT
+ */
+static int
+megasas_get_ld_map_info(struct megasas_instance *instance)
+{
+ int ret = 0;
+ struct megasas_cmd *cmd;
+ struct megasas_dcmd_frame *dcmd;
+ void *ci;
+ dma_addr_t ci_h = 0;
+ u32 size_map_info;
+ struct fusion_context *fusion;
+
+ cmd = megasas_get_cmd(instance);
+
+ if (!cmd) {
+ printk(KERN_DEBUG "megasas: Failed to get cmd for map info.\n");
+ return -ENOMEM;
+ }
+
+ fusion = instance->ctrl_context;
+
+ if (!fusion) {
+ megasas_return_cmd(instance, cmd);
+ return -ENXIO;
+ }
+
+ dcmd = &cmd->frame->dcmd;
+
+ size_map_info = fusion->current_map_sz;
+
+ ci = (void *) fusion->ld_map[(instance->map_id & 1)];
+ ci_h = fusion->ld_map_phys[(instance->map_id & 1)];
+
+ if (!ci) {
+ printk(KERN_DEBUG "Failed to alloc mem for ld_map_info\n");
+ megasas_return_cmd(instance, cmd);
+ return -ENOMEM;
+ }
+
+ memset(ci, 0, fusion->max_map_sz);
+ memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE);
+#if VD_EXT_DEBUG
+ dev_dbg(&instance->pdev->dev,
+ "%s sending MR_DCMD_LD_MAP_GET_INFO with size %d\n",
+ __func__, cpu_to_le32(size_map_info));
+#endif
+ dcmd->cmd = MFI_CMD_DCMD;
+ dcmd->cmd_status = 0xFF;
+ dcmd->sge_count = 1;
+ dcmd->flags = cpu_to_le16(MFI_FRAME_DIR_READ);
+ dcmd->timeout = 0;
+ dcmd->pad_0 = 0;
+ dcmd->data_xfer_len = cpu_to_le32(size_map_info);
+ dcmd->opcode = cpu_to_le32(MR_DCMD_LD_MAP_GET_INFO);
+ dcmd->sgl.sge32[0].phys_addr = cpu_to_le32(ci_h);
+ dcmd->sgl.sge32[0].length = cpu_to_le32(size_map_info);
+
+ if (instance->ctrl_context && !instance->mask_interrupts)
+ ret = megasas_issue_blocked_cmd(instance, cmd,
+ MEGASAS_BLOCKED_CMD_TIMEOUT);
+ else
+ ret = megasas_issue_polled(instance, cmd);
+
+ if (instance->ctrl_context && cmd->mpt_pthr_cmd_blocked)
+ megasas_return_mfi_mpt_pthr(instance, cmd,
+ cmd->mpt_pthr_cmd_blocked);
+ else
+ megasas_return_cmd(instance, cmd);
+
+ return ret;
+}
+
+u8
+megasas_get_map_info(struct megasas_instance *instance)
+{
+ struct fusion_context *fusion = instance->ctrl_context;
+
+ fusion->fast_path_io = 0;
+ if (!megasas_get_ld_map_info(instance)) {
+ if (MR_ValidateMapInfo(instance)) {
+ fusion->fast_path_io = 1;
+ return 0;
+ }
+ }
+ return 1;
+}
+
+/*
+ * megasas_sync_map_info - Returns FW's ld_map structure
+ * @instance: Adapter soft state
+ *
+ * Issues an internal command (DCMD) to get the FW's controller PD
+ * list structure. This information is mainly used to find out SYSTEM
+ * supported by the FW.
+ */
+int
+megasas_sync_map_info(struct megasas_instance *instance)
+{
+ int ret = 0, i;
+ struct megasas_cmd *cmd;
+ struct megasas_dcmd_frame *dcmd;
+ u32 size_sync_info, num_lds;
+ struct fusion_context *fusion;
+ struct MR_LD_TARGET_SYNC *ci = NULL;
+ struct MR_DRV_RAID_MAP_ALL *map;
+ struct MR_LD_RAID *raid;
+ struct MR_LD_TARGET_SYNC *ld_sync;
+ dma_addr_t ci_h = 0;
+ u32 size_map_info;
+
+ cmd = megasas_get_cmd(instance);
+
+ if (!cmd) {
+ printk(KERN_DEBUG "megasas: Failed to get cmd for sync"
+ "info.\n");
+ return -ENOMEM;
+ }
+
+ fusion = instance->ctrl_context;
+
+ if (!fusion) {
+ megasas_return_cmd(instance, cmd);
+ return 1;
+ }
+
+ map = fusion->ld_drv_map[instance->map_id & 1];
+
+ num_lds = le16_to_cpu(map->raidMap.ldCount);
+
+ dcmd = &cmd->frame->dcmd;
+
+ size_sync_info = sizeof(struct MR_LD_TARGET_SYNC) *num_lds;
+
+ memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE);
+
+ ci = (struct MR_LD_TARGET_SYNC *)
+ fusion->ld_map[(instance->map_id - 1) & 1];
+ memset(ci, 0, fusion->max_map_sz);
+
+ ci_h = fusion->ld_map_phys[(instance->map_id - 1) & 1];
+
+ ld_sync = (struct MR_LD_TARGET_SYNC *)ci;
+
+ for (i = 0; i < num_lds; i++, ld_sync++) {
+ raid = MR_LdRaidGet(i, map);
+ ld_sync->targetId = MR_GetLDTgtId(i, map);
+ ld_sync->seqNum = raid->seqNum;
+ }
+
+ size_map_info = fusion->current_map_sz;
+
+ dcmd->cmd = MFI_CMD_DCMD;
+ dcmd->cmd_status = 0xFF;
+ dcmd->sge_count = 1;
+ dcmd->flags = cpu_to_le16(MFI_FRAME_DIR_WRITE);
+ dcmd->timeout = 0;
+ dcmd->pad_0 = 0;
+ dcmd->data_xfer_len = cpu_to_le32(size_map_info);
+ dcmd->mbox.b[0] = num_lds;
+ dcmd->mbox.b[1] = MEGASAS_DCMD_MBOX_PEND_FLAG;
+ dcmd->opcode = cpu_to_le32(MR_DCMD_LD_MAP_GET_INFO);
+ dcmd->sgl.sge32[0].phys_addr = cpu_to_le32(ci_h);
+ dcmd->sgl.sge32[0].length = cpu_to_le32(size_map_info);
+
+ instance->map_update_cmd = cmd;
+
+ instance->instancet->issue_dcmd(instance, cmd);
+
+ return ret;
+}
+
+/*
+ * meagasas_display_intel_branding - Display branding string
+ * @instance: per adapter object
+ *
+ * Return nothing.
+ */
+static void
+megasas_display_intel_branding(struct megasas_instance *instance)
+{
+ if (instance->pdev->subsystem_vendor != PCI_VENDOR_ID_INTEL)
+ return;
+
+ switch (instance->pdev->device) {
+ case PCI_DEVICE_ID_LSI_INVADER:
+ switch (instance->pdev->subsystem_device) {
+ case MEGARAID_INTEL_RS3DC080_SSDID:
+ dev_info(&instance->pdev->dev, "scsi host %d: %s\n",
+ instance->host->host_no,
+ MEGARAID_INTEL_RS3DC080_BRANDING);
+ break;
+ case MEGARAID_INTEL_RS3DC040_SSDID:
+ dev_info(&instance->pdev->dev, "scsi host %d: %s\n",
+ instance->host->host_no,
+ MEGARAID_INTEL_RS3DC040_BRANDING);
+ break;
+ case MEGARAID_INTEL_RS3SC008_SSDID:
+ dev_info(&instance->pdev->dev, "scsi host %d: %s\n",
+ instance->host->host_no,
+ MEGARAID_INTEL_RS3SC008_BRANDING);
+ break;
+ case MEGARAID_INTEL_RS3MC044_SSDID:
+ dev_info(&instance->pdev->dev, "scsi host %d: %s\n",
+ instance->host->host_no,
+ MEGARAID_INTEL_RS3MC044_BRANDING);
+ break;
+ default:
+ break;
+ }
+ break;
+ case PCI_DEVICE_ID_LSI_FURY:
+ switch (instance->pdev->subsystem_device) {
+ case MEGARAID_INTEL_RS3WC080_SSDID:
+ dev_info(&instance->pdev->dev, "scsi host %d: %s\n",
+ instance->host->host_no,
+ MEGARAID_INTEL_RS3WC080_BRANDING);
+ break;
+ case MEGARAID_INTEL_RS3WC040_SSDID:
+ dev_info(&instance->pdev->dev, "scsi host %d: %s\n",
+ instance->host->host_no,
+ MEGARAID_INTEL_RS3WC040_BRANDING);
+ break;
+ default:
+ break;
+ }
+ break;
+ default:
+ break;
+ }
+}
+
+/**
+ * megasas_init_adapter_fusion - Initializes the FW
+ * @instance: Adapter soft state
+ *
+ * This is the main function for initializing firmware.
+ */
+u32
+megasas_init_adapter_fusion(struct megasas_instance *instance)
+{
+ struct megasas_register_set __iomem *reg_set;
+ struct fusion_context *fusion;
+ u32 max_cmd;
+ int i = 0, count;
+
+ fusion = instance->ctrl_context;
+
+ reg_set = instance->reg_set;
+
+ /*
+ * Get various operational parameters from status register
+ */
+ instance->max_fw_cmds =
+ instance->instancet->read_fw_status_reg(reg_set) & 0x00FFFF;
+ instance->max_fw_cmds = min(instance->max_fw_cmds, (u16)1008);
+
+ /*
+ * Reduce the max supported cmds by 1. This is to ensure that the
+ * reply_q_sz (1 more than the max cmd that driver may send)
+ * does not exceed max cmds that the FW can support
+ */
+ instance->max_fw_cmds = instance->max_fw_cmds-1;
+
+ /*
+ * Only Driver's internal DCMDs and IOCTL DCMDs needs to have MFI frames
+ */
+ instance->max_mfi_cmds =
+ MEGASAS_FUSION_INTERNAL_CMDS + MEGASAS_FUSION_IOCTL_CMDS;
+
+ max_cmd = instance->max_fw_cmds;
+
+ fusion->reply_q_depth = 2 * (((max_cmd + 1 + 15)/16)*16);
+
+ fusion->request_alloc_sz =
+ sizeof(union MEGASAS_REQUEST_DESCRIPTOR_UNION) *max_cmd;
+ fusion->reply_alloc_sz = sizeof(union MPI2_REPLY_DESCRIPTORS_UNION)
+ *(fusion->reply_q_depth);
+ fusion->io_frames_alloc_sz = MEGA_MPI2_RAID_DEFAULT_IO_FRAME_SIZE +
+ (MEGA_MPI2_RAID_DEFAULT_IO_FRAME_SIZE *
+ (max_cmd + 1)); /* Extra 1 for SMID 0 */
+
+ fusion->max_sge_in_main_msg =
+ (MEGA_MPI2_RAID_DEFAULT_IO_FRAME_SIZE -
+ offsetof(struct MPI2_RAID_SCSI_IO_REQUEST, SGL))/16;
+
+ fusion->max_sge_in_chain =
+ MEGASAS_MAX_SZ_CHAIN_FRAME / sizeof(union MPI2_SGE_IO_UNION);
+
+ instance->max_num_sge = rounddown_pow_of_two(
+ fusion->max_sge_in_main_msg + fusion->max_sge_in_chain - 2);
+
+ /* Used for pass thru MFI frame (DCMD) */
+ fusion->chain_offset_mfi_pthru =
+ offsetof(struct MPI2_RAID_SCSI_IO_REQUEST, SGL)/16;
+
+ fusion->chain_offset_io_request =
+ (MEGA_MPI2_RAID_DEFAULT_IO_FRAME_SIZE -
+ sizeof(union MPI2_SGE_IO_UNION))/16;
+
+ count = instance->msix_vectors > 0 ? instance->msix_vectors : 1;
+ for (i = 0 ; i < count; i++)
+ fusion->last_reply_idx[i] = 0;
+
+ /*
+ * Allocate memory for descriptors
+ * Create a pool of commands
+ */
+ if (megasas_alloc_cmds(instance))
+ goto fail_alloc_mfi_cmds;
+ if (megasas_alloc_cmds_fusion(instance))
+ goto fail_alloc_cmds;
+
+ if (megasas_ioc_init_fusion(instance))
+ goto fail_ioc_init;
+
+ megasas_display_intel_branding(instance);
+ if (megasas_get_ctrl_info(instance)) {
+ dev_err(&instance->pdev->dev,
+ "Could not get controller info. Fail from %s %d\n",
+ __func__, __LINE__);
+ goto fail_ioc_init;
+ }
+
+ instance->flag_ieee = 1;
+ fusion->fast_path_io = 0;
+
+ fusion->drv_map_pages = get_order(fusion->drv_map_sz);
+ for (i = 0; i < 2; i++) {
+ fusion->ld_map[i] = NULL;
+ fusion->ld_drv_map[i] = (void *)__get_free_pages(GFP_KERNEL,
+ fusion->drv_map_pages);
+ if (!fusion->ld_drv_map[i]) {
+ dev_err(&instance->pdev->dev, "Could not allocate "
+ "memory for local map info for %d pages\n",
+ fusion->drv_map_pages);
+ if (i == 1)
+ free_pages((ulong)fusion->ld_drv_map[0],
+ fusion->drv_map_pages);
+ goto fail_ioc_init;
+ }
+ memset(fusion->ld_drv_map[i], 0,
+ ((1 << PAGE_SHIFT) << fusion->drv_map_pages));
+ }
+
+ for (i = 0; i < 2; i++) {
+ fusion->ld_map[i] = dma_alloc_coherent(&instance->pdev->dev,
+ fusion->max_map_sz,
+ &fusion->ld_map_phys[i],
+ GFP_KERNEL);
+ if (!fusion->ld_map[i]) {
+ printk(KERN_ERR "megasas: Could not allocate memory "
+ "for map info\n");
+ goto fail_map_info;
+ }
+ }
+
+ if (!megasas_get_map_info(instance))
+ megasas_sync_map_info(instance);
+
+ return 0;
+
+fail_map_info:
+ if (i == 1)
+ dma_free_coherent(&instance->pdev->dev, fusion->max_map_sz,
+ fusion->ld_map[0], fusion->ld_map_phys[0]);
+fail_ioc_init:
+ megasas_free_cmds_fusion(instance);
+fail_alloc_cmds:
+ megasas_free_cmds(instance);
+fail_alloc_mfi_cmds:
+ return 1;
+}
+
+/**
+ * megasas_fire_cmd_fusion - Sends command to the FW
+ * @frame_phys_addr : Physical address of cmd
+ * @frame_count : Number of frames for the command
+ * @regs : MFI register set
+ */
+void
+megasas_fire_cmd_fusion(struct megasas_instance *instance,
+ dma_addr_t req_desc_lo,
+ u32 req_desc_hi,
+ struct megasas_register_set __iomem *regs)
+{
+#if defined(writeq) && defined(CONFIG_64BIT)
+ u64 req_data = (((u64)le32_to_cpu(req_desc_hi) << 32) |
+ le32_to_cpu(req_desc_lo));
+
+ writeq(req_data, &(regs)->inbound_low_queue_port);
+#else
+ unsigned long flags;
+
+ spin_lock_irqsave(&instance->hba_lock, flags);
+
+ writel(le32_to_cpu(req_desc_lo), &(regs)->inbound_low_queue_port);
+ writel(le32_to_cpu(req_desc_hi), &(regs)->inbound_high_queue_port);
+ spin_unlock_irqrestore(&instance->hba_lock, flags);
+#endif
+}
+
+/**
+ * map_cmd_status - Maps FW cmd status to OS cmd status
+ * @cmd : Pointer to cmd
+ * @status : status of cmd returned by FW
+ * @ext_status : ext status of cmd returned by FW
+ */
+
+void
+map_cmd_status(struct megasas_cmd_fusion *cmd, u8 status, u8 ext_status)
+{
+
+ switch (status) {
+
+ case MFI_STAT_OK:
+ cmd->scmd->result = DID_OK << 16;
+ break;
+
+ case MFI_STAT_SCSI_IO_FAILED:
+ case MFI_STAT_LD_INIT_IN_PROGRESS:
+ cmd->scmd->result = (DID_ERROR << 16) | ext_status;
+ break;
+
+ case MFI_STAT_SCSI_DONE_WITH_ERROR:
+
+ cmd->scmd->result = (DID_OK << 16) | ext_status;
+ if (ext_status == SAM_STAT_CHECK_CONDITION) {
+ memset(cmd->scmd->sense_buffer, 0,
+ SCSI_SENSE_BUFFERSIZE);
+ memcpy(cmd->scmd->sense_buffer, cmd->sense,
+ SCSI_SENSE_BUFFERSIZE);
+ cmd->scmd->result |= DRIVER_SENSE << 24;
+ }
+ break;
+
+ case MFI_STAT_LD_OFFLINE:
+ case MFI_STAT_DEVICE_NOT_FOUND:
+ cmd->scmd->result = DID_BAD_TARGET << 16;
+ break;
+ case MFI_STAT_CONFIG_SEQ_MISMATCH:
+ cmd->scmd->result = DID_IMM_RETRY << 16;
+ break;
+ default:
+ printk(KERN_DEBUG "megasas: FW status %#x\n", status);
+ cmd->scmd->result = DID_ERROR << 16;
+ break;
+ }
+}
+
+/**
+ * megasas_make_sgl_fusion - Prepares 32-bit SGL
+ * @instance: Adapter soft state
+ * @scp: SCSI command from the mid-layer
+ * @sgl_ptr: SGL to be filled in
+ * @cmd: cmd we are working on
+ *
+ * If successful, this function returns the number of SG elements.
+ */
+static int
+megasas_make_sgl_fusion(struct megasas_instance *instance,
+ struct scsi_cmnd *scp,
+ struct MPI25_IEEE_SGE_CHAIN64 *sgl_ptr,
+ struct megasas_cmd_fusion *cmd)
+{
+ int i, sg_processed, sge_count;
+ struct scatterlist *os_sgl;
+ struct fusion_context *fusion;
+
+ fusion = instance->ctrl_context;
+
+ if ((instance->pdev->device == PCI_DEVICE_ID_LSI_INVADER) ||
+ (instance->pdev->device == PCI_DEVICE_ID_LSI_FURY)) {
+ struct MPI25_IEEE_SGE_CHAIN64 *sgl_ptr_end = sgl_ptr;
+ sgl_ptr_end += fusion->max_sge_in_main_msg - 1;
+ sgl_ptr_end->Flags = 0;
+ }
+
+ sge_count = scsi_dma_map(scp);
+
+ BUG_ON(sge_count < 0);
+
+ if (sge_count > instance->max_num_sge || !sge_count)
+ return sge_count;
+
+ scsi_for_each_sg(scp, os_sgl, sge_count, i) {
+ sgl_ptr->Length = cpu_to_le32(sg_dma_len(os_sgl));
+ sgl_ptr->Address = cpu_to_le64(sg_dma_address(os_sgl));
+ sgl_ptr->Flags = 0;
+ if ((instance->pdev->device == PCI_DEVICE_ID_LSI_INVADER) ||
+ (instance->pdev->device == PCI_DEVICE_ID_LSI_FURY)) {
+ if (i == sge_count - 1)
+ sgl_ptr->Flags = IEEE_SGE_FLAGS_END_OF_LIST;
+ }
+ sgl_ptr++;
+
+ sg_processed = i + 1;
+
+ if ((sg_processed == (fusion->max_sge_in_main_msg - 1)) &&
+ (sge_count > fusion->max_sge_in_main_msg)) {
+
+ struct MPI25_IEEE_SGE_CHAIN64 *sg_chain;
+ if ((instance->pdev->device ==
+ PCI_DEVICE_ID_LSI_INVADER) ||
+ (instance->pdev->device ==
+ PCI_DEVICE_ID_LSI_FURY)) {
+ if ((le16_to_cpu(cmd->io_request->IoFlags) &
+ MPI25_SAS_DEVICE0_FLAGS_ENABLED_FAST_PATH) !=
+ MPI25_SAS_DEVICE0_FLAGS_ENABLED_FAST_PATH)
+ cmd->io_request->ChainOffset =
+ fusion->
+ chain_offset_io_request;
+ else
+ cmd->io_request->ChainOffset = 0;
+ } else
+ cmd->io_request->ChainOffset =
+ fusion->chain_offset_io_request;
+
+ sg_chain = sgl_ptr;
+ /* Prepare chain element */
+ sg_chain->NextChainOffset = 0;
+ if ((instance->pdev->device ==
+ PCI_DEVICE_ID_LSI_INVADER) ||
+ (instance->pdev->device ==
+ PCI_DEVICE_ID_LSI_FURY))
+ sg_chain->Flags = IEEE_SGE_FLAGS_CHAIN_ELEMENT;
+ else
+ sg_chain->Flags =
+ (IEEE_SGE_FLAGS_CHAIN_ELEMENT |
+ MPI2_IEEE_SGE_FLAGS_IOCPLBNTA_ADDR);
+ sg_chain->Length = cpu_to_le32((sizeof(union MPI2_SGE_IO_UNION) * (sge_count - sg_processed)));
+ sg_chain->Address = cpu_to_le64(cmd->sg_frame_phys_addr);
+
+ sgl_ptr =
+ (struct MPI25_IEEE_SGE_CHAIN64 *)cmd->sg_frame;
+ memset(sgl_ptr, 0, MEGASAS_MAX_SZ_CHAIN_FRAME);
+ }
+ }
+
+ return sge_count;
+}
+
+/**
+ * megasas_set_pd_lba - Sets PD LBA
+ * @cdb: CDB
+ * @cdb_len: cdb length
+ * @start_blk: Start block of IO
+ *
+ * Used to set the PD LBA in CDB for FP IOs
+ */
+void
+megasas_set_pd_lba(struct MPI2_RAID_SCSI_IO_REQUEST *io_request, u8 cdb_len,
+ struct IO_REQUEST_INFO *io_info, struct scsi_cmnd *scp,
+ struct MR_DRV_RAID_MAP_ALL *local_map_ptr, u32 ref_tag)
+{
+ struct MR_LD_RAID *raid;
+ u32 ld;
+ u64 start_blk = io_info->pdBlock;
+ u8 *cdb = io_request->CDB.CDB32;
+ u32 num_blocks = io_info->numBlocks;
+ u8 opcode = 0, flagvals = 0, groupnum = 0, control = 0;
+
+ /* Check if T10 PI (DIF) is enabled for this LD */
+ ld = MR_TargetIdToLdGet(io_info->ldTgtId, local_map_ptr);
+ raid = MR_LdRaidGet(ld, local_map_ptr);
+ if (raid->capability.ldPiMode == MR_PROT_INFO_TYPE_CONTROLLER) {
+ memset(cdb, 0, sizeof(io_request->CDB.CDB32));
+ cdb[0] = MEGASAS_SCSI_VARIABLE_LENGTH_CMD;
+ cdb[7] = MEGASAS_SCSI_ADDL_CDB_LEN;
+
+ if (scp->sc_data_direction == PCI_DMA_FROMDEVICE)
+ cdb[9] = MEGASAS_SCSI_SERVICE_ACTION_READ32;
+ else
+ cdb[9] = MEGASAS_SCSI_SERVICE_ACTION_WRITE32;
+ cdb[10] = MEGASAS_RD_WR_PROTECT_CHECK_ALL;
+
+ /* LBA */
+ cdb[12] = (u8)((start_blk >> 56) & 0xff);
+ cdb[13] = (u8)((start_blk >> 48) & 0xff);
+ cdb[14] = (u8)((start_blk >> 40) & 0xff);
+ cdb[15] = (u8)((start_blk >> 32) & 0xff);
+ cdb[16] = (u8)((start_blk >> 24) & 0xff);
+ cdb[17] = (u8)((start_blk >> 16) & 0xff);
+ cdb[18] = (u8)((start_blk >> 8) & 0xff);
+ cdb[19] = (u8)(start_blk & 0xff);
+
+ /* Logical block reference tag */
+ io_request->CDB.EEDP32.PrimaryReferenceTag =
+ cpu_to_be32(ref_tag);
+ io_request->CDB.EEDP32.PrimaryApplicationTagMask = cpu_to_be16(0xffff);
+ io_request->IoFlags = cpu_to_le16(32); /* Specify 32-byte cdb */
+
+ /* Transfer length */
+ cdb[28] = (u8)((num_blocks >> 24) & 0xff);
+ cdb[29] = (u8)((num_blocks >> 16) & 0xff);
+ cdb[30] = (u8)((num_blocks >> 8) & 0xff);
+ cdb[31] = (u8)(num_blocks & 0xff);
+
+ /* set SCSI IO EEDPFlags */
+ if (scp->sc_data_direction == PCI_DMA_FROMDEVICE) {
+ io_request->EEDPFlags = cpu_to_le16(
+ MPI2_SCSIIO_EEDPFLAGS_INC_PRI_REFTAG |
+ MPI2_SCSIIO_EEDPFLAGS_CHECK_REFTAG |
+ MPI2_SCSIIO_EEDPFLAGS_CHECK_REMOVE_OP |
+ MPI2_SCSIIO_EEDPFLAGS_CHECK_APPTAG |
+ MPI2_SCSIIO_EEDPFLAGS_CHECK_GUARD);
+ } else {
+ io_request->EEDPFlags = cpu_to_le16(
+ MPI2_SCSIIO_EEDPFLAGS_INC_PRI_REFTAG |
+ MPI2_SCSIIO_EEDPFLAGS_INSERT_OP);
+ }
+ io_request->Control |= cpu_to_le32((0x4 << 26));
+ io_request->EEDPBlockSize = cpu_to_le32(scp->device->sector_size);
+ } else {
+ /* Some drives don't support 16/12 byte CDB's, convert to 10 */
+ if (((cdb_len == 12) || (cdb_len == 16)) &&
+ (start_blk <= 0xffffffff)) {
+ if (cdb_len == 16) {
+ opcode = cdb[0] == READ_16 ? READ_10 : WRITE_10;
+ flagvals = cdb[1];
+ groupnum = cdb[14];
+ control = cdb[15];
+ } else {
+ opcode = cdb[0] == READ_12 ? READ_10 : WRITE_10;
+ flagvals = cdb[1];
+ groupnum = cdb[10];
+ control = cdb[11];
+ }
+
+ memset(cdb, 0, sizeof(io_request->CDB.CDB32));
+
+ cdb[0] = opcode;
+ cdb[1] = flagvals;
+ cdb[6] = groupnum;
+ cdb[9] = control;
+
+ /* Transfer length */
+ cdb[8] = (u8)(num_blocks & 0xff);
+ cdb[7] = (u8)((num_blocks >> 8) & 0xff);
+
+ io_request->IoFlags = cpu_to_le16(10); /* Specify 10-byte cdb */
+ cdb_len = 10;
+ } else if ((cdb_len < 16) && (start_blk > 0xffffffff)) {
+ /* Convert to 16 byte CDB for large LBA's */
+ switch (cdb_len) {
+ case 6:
+ opcode = cdb[0] == READ_6 ? READ_16 : WRITE_16;
+ control = cdb[5];
+ break;
+ case 10:
+ opcode =
+ cdb[0] == READ_10 ? READ_16 : WRITE_16;
+ flagvals = cdb[1];
+ groupnum = cdb[6];
+ control = cdb[9];
+ break;
+ case 12:
+ opcode =
+ cdb[0] == READ_12 ? READ_16 : WRITE_16;
+ flagvals = cdb[1];
+ groupnum = cdb[10];
+ control = cdb[11];
+ break;
+ }
+
+ memset(cdb, 0, sizeof(io_request->CDB.CDB32));
+
+ cdb[0] = opcode;
+ cdb[1] = flagvals;
+ cdb[14] = groupnum;
+ cdb[15] = control;
+
+ /* Transfer length */
+ cdb[13] = (u8)(num_blocks & 0xff);
+ cdb[12] = (u8)((num_blocks >> 8) & 0xff);
+ cdb[11] = (u8)((num_blocks >> 16) & 0xff);
+ cdb[10] = (u8)((num_blocks >> 24) & 0xff);
+
+ io_request->IoFlags = cpu_to_le16(16); /* Specify 16-byte cdb */
+ cdb_len = 16;
+ }
+
+ /* Normal case, just load LBA here */
+ switch (cdb_len) {
+ case 6:
+ {
+ u8 val = cdb[1] & 0xE0;
+ cdb[3] = (u8)(start_blk & 0xff);
+ cdb[2] = (u8)((start_blk >> 8) & 0xff);
+ cdb[1] = val | ((u8)(start_blk >> 16) & 0x1f);
+ break;
+ }
+ case 10:
+ cdb[5] = (u8)(start_blk & 0xff);
+ cdb[4] = (u8)((start_blk >> 8) & 0xff);
+ cdb[3] = (u8)((start_blk >> 16) & 0xff);
+ cdb[2] = (u8)((start_blk >> 24) & 0xff);
+ break;
+ case 12:
+ cdb[5] = (u8)(start_blk & 0xff);
+ cdb[4] = (u8)((start_blk >> 8) & 0xff);
+ cdb[3] = (u8)((start_blk >> 16) & 0xff);
+ cdb[2] = (u8)((start_blk >> 24) & 0xff);
+ break;
+ case 16:
+ cdb[9] = (u8)(start_blk & 0xff);
+ cdb[8] = (u8)((start_blk >> 8) & 0xff);
+ cdb[7] = (u8)((start_blk >> 16) & 0xff);
+ cdb[6] = (u8)((start_blk >> 24) & 0xff);
+ cdb[5] = (u8)((start_blk >> 32) & 0xff);
+ cdb[4] = (u8)((start_blk >> 40) & 0xff);
+ cdb[3] = (u8)((start_blk >> 48) & 0xff);
+ cdb[2] = (u8)((start_blk >> 56) & 0xff);
+ break;
+ }
+ }
+}
+
+/**
+ * megasas_build_ldio_fusion - Prepares IOs to devices
+ * @instance: Adapter soft state
+ * @scp: SCSI command
+ * @cmd: Command to be prepared
+ *
+ * Prepares the io_request and chain elements (sg_frame) for IO
+ * The IO can be for PD (Fast Path) or LD
+ */
+void
+megasas_build_ldio_fusion(struct megasas_instance *instance,
+ struct scsi_cmnd *scp,
+ struct megasas_cmd_fusion *cmd)
+{
+ u8 fp_possible;
+ u32 start_lba_lo, start_lba_hi, device_id, datalength = 0;
+ struct MPI2_RAID_SCSI_IO_REQUEST *io_request;
+ union MEGASAS_REQUEST_DESCRIPTOR_UNION *req_desc;
+ struct IO_REQUEST_INFO io_info;
+ struct fusion_context *fusion;
+ struct MR_DRV_RAID_MAP_ALL *local_map_ptr;
+ u8 *raidLUN;
+
+ device_id = MEGASAS_DEV_INDEX(instance, scp);
+
+ fusion = instance->ctrl_context;
+
+ io_request = cmd->io_request;
+ io_request->RaidContext.VirtualDiskTgtId = cpu_to_le16(device_id);
+ io_request->RaidContext.status = 0;
+ io_request->RaidContext.exStatus = 0;
+
+ req_desc = (union MEGASAS_REQUEST_DESCRIPTOR_UNION *)cmd->request_desc;
+
+ start_lba_lo = 0;
+ start_lba_hi = 0;
+ fp_possible = 0;
+
+ /*
+ * 6-byte READ(0x08) or WRITE(0x0A) cdb
+ */
+ if (scp->cmd_len == 6) {
+ datalength = (u32) scp->cmnd[4];
+ start_lba_lo = ((u32) scp->cmnd[1] << 16) |
+ ((u32) scp->cmnd[2] << 8) | (u32) scp->cmnd[3];
+
+ start_lba_lo &= 0x1FFFFF;
+ }
+
+ /*
+ * 10-byte READ(0x28) or WRITE(0x2A) cdb
+ */
+ else if (scp->cmd_len == 10) {
+ datalength = (u32) scp->cmnd[8] |
+ ((u32) scp->cmnd[7] << 8);
+ start_lba_lo = ((u32) scp->cmnd[2] << 24) |
+ ((u32) scp->cmnd[3] << 16) |
+ ((u32) scp->cmnd[4] << 8) | (u32) scp->cmnd[5];
+ }
+
+ /*
+ * 12-byte READ(0xA8) or WRITE(0xAA) cdb
+ */
+ else if (scp->cmd_len == 12) {
+ datalength = ((u32) scp->cmnd[6] << 24) |
+ ((u32) scp->cmnd[7] << 16) |
+ ((u32) scp->cmnd[8] << 8) | (u32) scp->cmnd[9];
+ start_lba_lo = ((u32) scp->cmnd[2] << 24) |
+ ((u32) scp->cmnd[3] << 16) |
+ ((u32) scp->cmnd[4] << 8) | (u32) scp->cmnd[5];
+ }
+
+ /*
+ * 16-byte READ(0x88) or WRITE(0x8A) cdb
+ */
+ else if (scp->cmd_len == 16) {
+ datalength = ((u32) scp->cmnd[10] << 24) |
+ ((u32) scp->cmnd[11] << 16) |
+ ((u32) scp->cmnd[12] << 8) | (u32) scp->cmnd[13];
+ start_lba_lo = ((u32) scp->cmnd[6] << 24) |
+ ((u32) scp->cmnd[7] << 16) |
+ ((u32) scp->cmnd[8] << 8) | (u32) scp->cmnd[9];
+
+ start_lba_hi = ((u32) scp->cmnd[2] << 24) |
+ ((u32) scp->cmnd[3] << 16) |
+ ((u32) scp->cmnd[4] << 8) | (u32) scp->cmnd[5];
+ }
+
+ memset(&io_info, 0, sizeof(struct IO_REQUEST_INFO));
+ io_info.ldStartBlock = ((u64)start_lba_hi << 32) | start_lba_lo;
+ io_info.numBlocks = datalength;
+ io_info.ldTgtId = device_id;
+ io_request->DataLength = cpu_to_le32(scsi_bufflen(scp));
+
+ if (scp->sc_data_direction == PCI_DMA_FROMDEVICE)
+ io_info.isRead = 1;
+
+ local_map_ptr = fusion->ld_drv_map[(instance->map_id & 1)];
+
+ if ((MR_TargetIdToLdGet(device_id, local_map_ptr) >=
+ instance->fw_supported_vd_count) || (!fusion->fast_path_io)) {
+ io_request->RaidContext.regLockFlags = 0;
+ fp_possible = 0;
+ } else {
+ if (MR_BuildRaidContext(instance, &io_info,
+ &io_request->RaidContext,
+ local_map_ptr, &raidLUN))
+ fp_possible = io_info.fpOkForIo;
+ }
+
+ /* Use raw_smp_processor_id() for now until cmd->request->cpu is CPU
+ id by default, not CPU group id, otherwise all MSI-X queues won't
+ be utilized */
+ cmd->request_desc->SCSIIO.MSIxIndex = instance->msix_vectors ?
+ raw_smp_processor_id() % instance->msix_vectors : 0;
+
+ if (fp_possible) {
+ megasas_set_pd_lba(io_request, scp->cmd_len, &io_info, scp,
+ local_map_ptr, start_lba_lo);
+ io_request->Function = MPI2_FUNCTION_SCSI_IO_REQUEST;
+ cmd->request_desc->SCSIIO.RequestFlags =
+ (MPI2_REQ_DESCRIPT_FLAGS_HIGH_PRIORITY
+ << MEGASAS_REQ_DESCRIPT_FLAGS_TYPE_SHIFT);
+ if ((instance->pdev->device == PCI_DEVICE_ID_LSI_INVADER) ||
+ (instance->pdev->device == PCI_DEVICE_ID_LSI_FURY)) {
+ if (io_request->RaidContext.regLockFlags ==
+ REGION_TYPE_UNUSED)
+ cmd->request_desc->SCSIIO.RequestFlags =
+ (MEGASAS_REQ_DESCRIPT_FLAGS_NO_LOCK <<
+ MEGASAS_REQ_DESCRIPT_FLAGS_TYPE_SHIFT);
+ io_request->RaidContext.Type = MPI2_TYPE_CUDA;
+ io_request->RaidContext.nseg = 0x1;
+ io_request->IoFlags |= cpu_to_le16(MPI25_SAS_DEVICE0_FLAGS_ENABLED_FAST_PATH);
+ io_request->RaidContext.regLockFlags |=
+ (MR_RL_FLAGS_GRANT_DESTINATION_CUDA |
+ MR_RL_FLAGS_SEQ_NUM_ENABLE);
+ }
+ if ((fusion->load_balance_info[device_id].loadBalanceFlag) &&
+ (io_info.isRead)) {
+ io_info.devHandle =
+ get_updated_dev_handle(instance,
+ &fusion->load_balance_info[device_id],
+ &io_info);
+ scp->SCp.Status |= MEGASAS_LOAD_BALANCE_FLAG;
+ cmd->pd_r1_lb = io_info.pd_after_lb;
+ } else
+ scp->SCp.Status &= ~MEGASAS_LOAD_BALANCE_FLAG;
+ cmd->request_desc->SCSIIO.DevHandle = io_info.devHandle;
+ io_request->DevHandle = io_info.devHandle;
+ /* populate the LUN field */
+ memcpy(io_request->LUN, raidLUN, 8);
+ } else {
+ io_request->RaidContext.timeoutValue =
+ cpu_to_le16(local_map_ptr->raidMap.fpPdIoTimeoutSec);
+ cmd->request_desc->SCSIIO.RequestFlags =
+ (MEGASAS_REQ_DESCRIPT_FLAGS_LD_IO
+ << MEGASAS_REQ_DESCRIPT_FLAGS_TYPE_SHIFT);
+ if ((instance->pdev->device == PCI_DEVICE_ID_LSI_INVADER) ||
+ (instance->pdev->device == PCI_DEVICE_ID_LSI_FURY)) {
+ if (io_request->RaidContext.regLockFlags ==
+ REGION_TYPE_UNUSED)
+ cmd->request_desc->SCSIIO.RequestFlags =
+ (MEGASAS_REQ_DESCRIPT_FLAGS_NO_LOCK <<
+ MEGASAS_REQ_DESCRIPT_FLAGS_TYPE_SHIFT);
+ io_request->RaidContext.Type = MPI2_TYPE_CUDA;
+ io_request->RaidContext.regLockFlags |=
+ (MR_RL_FLAGS_GRANT_DESTINATION_CPU0 |
+ MR_RL_FLAGS_SEQ_NUM_ENABLE);
+ io_request->RaidContext.nseg = 0x1;
+ }
+ io_request->Function = MEGASAS_MPI2_FUNCTION_LD_IO_REQUEST;
+ io_request->DevHandle = cpu_to_le16(device_id);
+ } /* Not FP */
+}
+
+/**
+ * megasas_build_dcdb_fusion - Prepares IOs to devices
+ * @instance: Adapter soft state
+ * @scp: SCSI command
+ * @cmd: Command to be prepared
+ *
+ * Prepares the io_request frame for non-io cmds
+ */
+static void
+megasas_build_dcdb_fusion(struct megasas_instance *instance,
+ struct scsi_cmnd *scmd,
+ struct megasas_cmd_fusion *cmd)
+{
+ u32 device_id;
+ struct MPI2_RAID_SCSI_IO_REQUEST *io_request;
+ u16 pd_index = 0;
+ u16 os_timeout_value;
+ u16 timeout_limit;
+ struct MR_DRV_RAID_MAP_ALL *local_map_ptr;
+ struct fusion_context *fusion = instance->ctrl_context;
+ u8 span, physArm;
+ u16 devHandle;
+ u32 ld, arRef, pd;
+ struct MR_LD_RAID *raid;
+ struct RAID_CONTEXT *pRAID_Context;
+
+ io_request = cmd->io_request;
+ device_id = MEGASAS_DEV_INDEX(instance, scmd);
+ pd_index = (scmd->device->channel * MEGASAS_MAX_DEV_PER_CHANNEL)
+ +scmd->device->id;
+ local_map_ptr = fusion->ld_drv_map[(instance->map_id & 1)];
+
+ io_request->DataLength = cpu_to_le32(scsi_bufflen(scmd));
+
+ if (scmd->device->channel < MEGASAS_MAX_PD_CHANNELS &&
+ instance->pd_list[pd_index].driveState == MR_PD_STATE_SYSTEM) {
+ if (fusion->fast_path_io)
+ io_request->DevHandle =
+ local_map_ptr->raidMap.devHndlInfo[device_id].curDevHdl;
+ io_request->RaidContext.RAIDFlags =
+ MR_RAID_FLAGS_IO_SUB_TYPE_SYSTEM_PD
+ << MR_RAID_CTX_RAID_FLAGS_IO_SUB_TYPE_SHIFT;
+ cmd->request_desc->SCSIIO.DevHandle = io_request->DevHandle;
+ cmd->request_desc->SCSIIO.MSIxIndex =
+ instance->msix_vectors ?
+ raw_smp_processor_id() %
+ instance->msix_vectors :
+ 0;
+ os_timeout_value = scmd->request->timeout / HZ;
+
+ if (instance->secure_jbod_support &&
+ (megasas_cmd_type(scmd) == NON_READ_WRITE_SYSPDIO)) {
+ /* system pd firmware path */
+ io_request->Function =
+ MEGASAS_MPI2_FUNCTION_LD_IO_REQUEST;
+ cmd->request_desc->SCSIIO.RequestFlags =
+ (MPI2_REQ_DESCRIPT_FLAGS_SCSI_IO <<
+ MEGASAS_REQ_DESCRIPT_FLAGS_TYPE_SHIFT);
+ io_request->RaidContext.timeoutValue =
+ cpu_to_le16(os_timeout_value);
+ } else {
+ /* system pd Fast Path */
+ io_request->Function = MPI2_FUNCTION_SCSI_IO_REQUEST;
+ io_request->RaidContext.regLockFlags = 0;
+ io_request->RaidContext.regLockRowLBA = 0;
+ io_request->RaidContext.regLockLength = 0;
+ timeout_limit = (scmd->device->type == TYPE_DISK) ?
+ 255 : 0xFFFF;
+ io_request->RaidContext.timeoutValue =
+ cpu_to_le16((os_timeout_value > timeout_limit) ?
+ timeout_limit : os_timeout_value);
+ if ((instance->pdev->device == PCI_DEVICE_ID_LSI_INVADER) ||
+ (instance->pdev->device == PCI_DEVICE_ID_LSI_FURY))
+ io_request->IoFlags |=
+ cpu_to_le16(MPI25_SAS_DEVICE0_FLAGS_ENABLED_FAST_PATH);
+
+ cmd->request_desc->SCSIIO.RequestFlags =
+ (MPI2_REQ_DESCRIPT_FLAGS_HIGH_PRIORITY <<
+ MEGASAS_REQ_DESCRIPT_FLAGS_TYPE_SHIFT);
+ }
+ } else {
+ if (scmd->device->channel < MEGASAS_MAX_PD_CHANNELS)
+ goto NonFastPath;
+
+ /*
+ * For older firmware, Driver should not access ldTgtIdToLd
+ * beyond index 127 and for Extended VD firmware, ldTgtIdToLd
+ * should not go beyond 255.
+ */
+
+ if ((!fusion->fast_path_io) ||
+ (device_id >= instance->fw_supported_vd_count))
+ goto NonFastPath;
+
+ ld = MR_TargetIdToLdGet(device_id, local_map_ptr);
+
+ if (ld >= instance->fw_supported_vd_count)
+ goto NonFastPath;
+
+ raid = MR_LdRaidGet(ld, local_map_ptr);
+
+ /* check if this LD is FP capable */
+ if (!(raid->capability.fpNonRWCapable))
+ /* not FP capable, send as non-FP */
+ goto NonFastPath;
+
+ /* get RAID_Context pointer */
+ pRAID_Context = &io_request->RaidContext;
+
+ /* set RAID context values */
+ pRAID_Context->regLockFlags = REGION_TYPE_SHARED_READ;
+ pRAID_Context->timeoutValue = cpu_to_le16(raid->fpIoTimeoutForLd);
+ pRAID_Context->VirtualDiskTgtId = cpu_to_le16(device_id);
+ pRAID_Context->regLockRowLBA = 0;
+ pRAID_Context->regLockLength = 0;
+ pRAID_Context->configSeqNum = raid->seqNum;
+
+ /* get the DevHandle for the PD (since this is
+ fpNonRWCapable, this is a single disk RAID0) */
+ span = physArm = 0;
+ arRef = MR_LdSpanArrayGet(ld, span, local_map_ptr);
+ pd = MR_ArPdGet(arRef, physArm, local_map_ptr);
+ devHandle = MR_PdDevHandleGet(pd, local_map_ptr);
+
+ /* build request descriptor */
+ cmd->request_desc->SCSIIO.RequestFlags =
+ (MPI2_REQ_DESCRIPT_FLAGS_HIGH_PRIORITY <<
+ MEGASAS_REQ_DESCRIPT_FLAGS_TYPE_SHIFT);
+ cmd->request_desc->SCSIIO.DevHandle = devHandle;
+
+ /* populate the LUN field */
+ memcpy(io_request->LUN, raid->LUN, 8);
+
+ /* build the raidScsiIO structure */
+ io_request->Function = MPI2_FUNCTION_SCSI_IO_REQUEST;
+ io_request->DevHandle = devHandle;
+
+ return;
+
+NonFastPath:
+ io_request->Function = MEGASAS_MPI2_FUNCTION_LD_IO_REQUEST;
+ io_request->DevHandle = cpu_to_le16(device_id);
+ cmd->request_desc->SCSIIO.RequestFlags =
+ (MPI2_REQ_DESCRIPT_FLAGS_SCSI_IO <<
+ MEGASAS_REQ_DESCRIPT_FLAGS_TYPE_SHIFT);
+ }
+ io_request->RaidContext.VirtualDiskTgtId = cpu_to_le16(device_id);
+ int_to_scsilun(scmd->device->lun, (struct scsi_lun *)io_request->LUN);
+}
+
+/**
+ * megasas_build_io_fusion - Prepares IOs to devices
+ * @instance: Adapter soft state
+ * @scp: SCSI command
+ * @cmd: Command to be prepared
+ *
+ * Invokes helper functions to prepare request frames
+ * and sets flags appropriate for IO/Non-IO cmd
+ */
+int
+megasas_build_io_fusion(struct megasas_instance *instance,
+ struct scsi_cmnd *scp,
+ struct megasas_cmd_fusion *cmd)
+{
+ u32 device_id, sge_count;
+ struct MPI2_RAID_SCSI_IO_REQUEST *io_request = cmd->io_request;
+
+ device_id = MEGASAS_DEV_INDEX(instance, scp);
+
+ /* Zero out some fields so they don't get reused */
+ memset(io_request->LUN, 0x0, 8);
+ io_request->CDB.EEDP32.PrimaryReferenceTag = 0;
+ io_request->CDB.EEDP32.PrimaryApplicationTagMask = 0;
+ io_request->EEDPFlags = 0;
+ io_request->Control = 0;
+ io_request->EEDPBlockSize = 0;
+ io_request->ChainOffset = 0;
+ io_request->RaidContext.RAIDFlags = 0;
+ io_request->RaidContext.Type = 0;
+ io_request->RaidContext.nseg = 0;
+
+ memcpy(io_request->CDB.CDB32, scp->cmnd, scp->cmd_len);
+ /*
+ * Just the CDB length,rest of the Flags are zero
+ * This will be modified for FP in build_ldio_fusion
+ */
+ io_request->IoFlags = cpu_to_le16(scp->cmd_len);
+
+ if (megasas_cmd_type(scp) == READ_WRITE_LDIO)
+ megasas_build_ldio_fusion(instance, scp, cmd);
+ else
+ megasas_build_dcdb_fusion(instance, scp, cmd);
+
+ /*
+ * Construct SGL
+ */
+
+ sge_count =
+ megasas_make_sgl_fusion(instance, scp,
+ (struct MPI25_IEEE_SGE_CHAIN64 *)
+ &io_request->SGL, cmd);
+
+ if (sge_count > instance->max_num_sge) {
+ printk(KERN_ERR "megasas: Error. sge_count (0x%x) exceeds "
+ "max (0x%x) allowed\n", sge_count,
+ instance->max_num_sge);
+ return 1;
+ }
+
+ io_request->RaidContext.numSGE = sge_count;
+
+ io_request->SGLFlags = cpu_to_le16(MPI2_SGE_FLAGS_64_BIT_ADDRESSING);
+
+ if (scp->sc_data_direction == PCI_DMA_TODEVICE)
+ io_request->Control |= cpu_to_le32(MPI2_SCSIIO_CONTROL_WRITE);
+ else if (scp->sc_data_direction == PCI_DMA_FROMDEVICE)
+ io_request->Control |= cpu_to_le32(MPI2_SCSIIO_CONTROL_READ);
+
+ io_request->SGLOffset0 =
+ offsetof(struct MPI2_RAID_SCSI_IO_REQUEST, SGL) / 4;
+
+ io_request->SenseBufferLowAddress = cpu_to_le32(cmd->sense_phys_addr);
+ io_request->SenseBufferLength = SCSI_SENSE_BUFFERSIZE;
+
+ cmd->scmd = scp;
+ scp->SCp.ptr = (char *)cmd;
+
+ return 0;
+}
+
+union MEGASAS_REQUEST_DESCRIPTOR_UNION *
+megasas_get_request_descriptor(struct megasas_instance *instance, u16 index)
+{
+ u8 *p;
+ struct fusion_context *fusion;
+
+ if (index >= instance->max_fw_cmds) {
+ printk(KERN_ERR "megasas: Invalid SMID (0x%x)request for "
+ "descriptor for scsi%d\n", index,
+ instance->host->host_no);
+ return NULL;
+ }
+ fusion = instance->ctrl_context;
+ p = fusion->req_frames_desc
+ +sizeof(union MEGASAS_REQUEST_DESCRIPTOR_UNION) *index;
+
+ return (union MEGASAS_REQUEST_DESCRIPTOR_UNION *)p;
+}
+
+/**
+ * megasas_build_and_issue_cmd_fusion -Main routine for building and
+ * issuing non IOCTL cmd
+ * @instance: Adapter soft state
+ * @scmd: pointer to scsi cmd from OS
+ */
+static u32
+megasas_build_and_issue_cmd_fusion(struct megasas_instance *instance,
+ struct scsi_cmnd *scmd)
+{
+ struct megasas_cmd_fusion *cmd;
+ union MEGASAS_REQUEST_DESCRIPTOR_UNION *req_desc;
+ u32 index;
+ struct fusion_context *fusion;
+
+ fusion = instance->ctrl_context;
+
+ cmd = megasas_get_cmd_fusion(instance);
+ if (!cmd)
+ return SCSI_MLQUEUE_HOST_BUSY;
+
+ index = cmd->index;
+
+ req_desc = megasas_get_request_descriptor(instance, index-1);
+ if (!req_desc)
+ return 1;
+
+ req_desc->Words = 0;
+ cmd->request_desc = req_desc;
+
+ if (megasas_build_io_fusion(instance, scmd, cmd)) {
+ megasas_return_cmd_fusion(instance, cmd);
+ printk(KERN_ERR "megasas: Error building command.\n");
+ cmd->request_desc = NULL;
+ return 1;
+ }
+
+ req_desc = cmd->request_desc;
+ req_desc->SCSIIO.SMID = cpu_to_le16(index);
+
+ if (cmd->io_request->ChainOffset != 0 &&
+ cmd->io_request->ChainOffset != 0xF)
+ printk(KERN_ERR "megasas: The chain offset value is not "
+ "correct : %x\n", cmd->io_request->ChainOffset);
+
+ /*
+ * Issue the command to the FW
+ */
+ atomic_inc(&instance->fw_outstanding);
+
+ instance->instancet->fire_cmd(instance,
+ req_desc->u.low, req_desc->u.high,
+ instance->reg_set);
+
+ return 0;
+}
+
+/**
+ * complete_cmd_fusion - Completes command
+ * @instance: Adapter soft state
+ * Completes all commands that is in reply descriptor queue
+ */
+int
+complete_cmd_fusion(struct megasas_instance *instance, u32 MSIxIndex)
+{
+ union MPI2_REPLY_DESCRIPTORS_UNION *desc;
+ struct MPI2_SCSI_IO_SUCCESS_REPLY_DESCRIPTOR *reply_desc;
+ struct MPI2_RAID_SCSI_IO_REQUEST *scsi_io_req;
+ struct fusion_context *fusion;
+ struct megasas_cmd *cmd_mfi;
+ struct megasas_cmd_fusion *cmd_fusion;
+ u16 smid, num_completed;
+ u8 reply_descript_type;
+ u32 status, extStatus, device_id;
+ union desc_value d_val;
+ struct LD_LOAD_BALANCE_INFO *lbinfo;
+ int threshold_reply_count = 0;
+
+ fusion = instance->ctrl_context;
+
+ if (instance->adprecovery == MEGASAS_HW_CRITICAL_ERROR)
+ return IRQ_HANDLED;
+
+ desc = fusion->reply_frames_desc;
+ desc += ((MSIxIndex * fusion->reply_alloc_sz)/
+ sizeof(union MPI2_REPLY_DESCRIPTORS_UNION)) +
+ fusion->last_reply_idx[MSIxIndex];
+
+ reply_desc = (struct MPI2_SCSI_IO_SUCCESS_REPLY_DESCRIPTOR *)desc;
+
+ d_val.word = desc->Words;
+
+ reply_descript_type = reply_desc->ReplyFlags &
+ MPI2_RPY_DESCRIPT_FLAGS_TYPE_MASK;
+
+ if (reply_descript_type == MPI2_RPY_DESCRIPT_FLAGS_UNUSED)
+ return IRQ_NONE;
+
+ num_completed = 0;
+
+ while ((d_val.u.low != UINT_MAX) && (d_val.u.high != UINT_MAX)) {
+ smid = le16_to_cpu(reply_desc->SMID);
+
+ cmd_fusion = fusion->cmd_list[smid - 1];
+
+ scsi_io_req =
+ (struct MPI2_RAID_SCSI_IO_REQUEST *)
+ cmd_fusion->io_request;
+
+ if (cmd_fusion->scmd)
+ cmd_fusion->scmd->SCp.ptr = NULL;
+
+ status = scsi_io_req->RaidContext.status;
+ extStatus = scsi_io_req->RaidContext.exStatus;
+
+ switch (scsi_io_req->Function) {
+ case MPI2_FUNCTION_SCSI_IO_REQUEST: /*Fast Path IO.*/
+ /* Update load balancing info */
+ device_id = MEGASAS_DEV_INDEX(instance,
+ cmd_fusion->scmd);
+ lbinfo = &fusion->load_balance_info[device_id];
+ if (cmd_fusion->scmd->SCp.Status &
+ MEGASAS_LOAD_BALANCE_FLAG) {
+ atomic_dec(&lbinfo->scsi_pending_cmds[cmd_fusion->pd_r1_lb]);
+ cmd_fusion->scmd->SCp.Status &=
+ ~MEGASAS_LOAD_BALANCE_FLAG;
+ }
+ if (reply_descript_type ==
+ MPI2_RPY_DESCRIPT_FLAGS_SCSI_IO_SUCCESS) {
+ if (megasas_dbg_lvl == 5)
+ printk(KERN_ERR "\nmegasas: FAST Path "
+ "IO Success\n");
+ }
+ /* Fall thru and complete IO */
+ case MEGASAS_MPI2_FUNCTION_LD_IO_REQUEST: /* LD-IO Path */
+ /* Map the FW Cmd Status */
+ map_cmd_status(cmd_fusion, status, extStatus);
+ scsi_dma_unmap(cmd_fusion->scmd);
+ cmd_fusion->scmd->scsi_done(cmd_fusion->scmd);
+ scsi_io_req->RaidContext.status = 0;
+ scsi_io_req->RaidContext.exStatus = 0;
+ megasas_return_cmd_fusion(instance, cmd_fusion);
+ atomic_dec(&instance->fw_outstanding);
+
+ break;
+ case MEGASAS_MPI2_FUNCTION_PASSTHRU_IO_REQUEST: /*MFI command */
+ cmd_mfi = instance->cmd_list[cmd_fusion->sync_cmd_idx];
+
+ if (!cmd_mfi->mpt_pthr_cmd_blocked) {
+ if (megasas_dbg_lvl == 5)
+ dev_info(&instance->pdev->dev,
+ "freeing mfi/mpt pass-through "
+ "from %s %d\n",
+ __func__, __LINE__);
+ megasas_return_mfi_mpt_pthr(instance, cmd_mfi,
+ cmd_fusion);
+ }
+
+ megasas_complete_cmd(instance, cmd_mfi, DID_OK);
+ cmd_fusion->flags = 0;
+ break;
+ }
+
+ fusion->last_reply_idx[MSIxIndex]++;
+ if (fusion->last_reply_idx[MSIxIndex] >=
+ fusion->reply_q_depth)
+ fusion->last_reply_idx[MSIxIndex] = 0;
+
+ desc->Words = ULLONG_MAX;
+ num_completed++;
+ threshold_reply_count++;
+
+ /* Get the next reply descriptor */
+ if (!fusion->last_reply_idx[MSIxIndex])
+ desc = fusion->reply_frames_desc +
+ ((MSIxIndex * fusion->reply_alloc_sz)/
+ sizeof(union MPI2_REPLY_DESCRIPTORS_UNION));
+ else
+ desc++;
+
+ reply_desc =
+ (struct MPI2_SCSI_IO_SUCCESS_REPLY_DESCRIPTOR *)desc;
+
+ d_val.word = desc->Words;
+
+ reply_descript_type = reply_desc->ReplyFlags &
+ MPI2_RPY_DESCRIPT_FLAGS_TYPE_MASK;
+
+ if (reply_descript_type == MPI2_RPY_DESCRIPT_FLAGS_UNUSED)
+ break;
+ /*
+ * Write to reply post host index register after completing threshold
+ * number of reply counts and still there are more replies in reply queue
+ * pending to be completed
+ */
+ if (threshold_reply_count >= THRESHOLD_REPLY_COUNT) {
+ if ((instance->pdev->device ==
+ PCI_DEVICE_ID_LSI_INVADER) ||
+ (instance->pdev->device ==
+ PCI_DEVICE_ID_LSI_FURY))
+ writel(((MSIxIndex & 0x7) << 24) |
+ fusion->last_reply_idx[MSIxIndex],
+ instance->reply_post_host_index_addr[MSIxIndex/8]);
+ else
+ writel((MSIxIndex << 24) |
+ fusion->last_reply_idx[MSIxIndex],
+ instance->reply_post_host_index_addr[0]);
+ threshold_reply_count = 0;
+ }
+ }
+
+ if (!num_completed)
+ return IRQ_NONE;
+
+ wmb();
+ if ((instance->pdev->device == PCI_DEVICE_ID_LSI_INVADER) ||
+ (instance->pdev->device == PCI_DEVICE_ID_LSI_FURY))
+ writel(((MSIxIndex & 0x7) << 24) |
+ fusion->last_reply_idx[MSIxIndex],
+ instance->reply_post_host_index_addr[MSIxIndex/8]);
+ else
+ writel((MSIxIndex << 24) |
+ fusion->last_reply_idx[MSIxIndex],
+ instance->reply_post_host_index_addr[0]);
+ megasas_check_and_restore_queue_depth(instance);
+ return IRQ_HANDLED;
+}
+
+/**
+ * megasas_complete_cmd_dpc_fusion - Completes command
+ * @instance: Adapter soft state
+ *
+ * Tasklet to complete cmds
+ */
+void
+megasas_complete_cmd_dpc_fusion(unsigned long instance_addr)
+{
+ struct megasas_instance *instance =
+ (struct megasas_instance *)instance_addr;
+ unsigned long flags;
+ u32 count, MSIxIndex;
+
+ count = instance->msix_vectors > 0 ? instance->msix_vectors : 1;
+
+ /* If we have already declared adapter dead, donot complete cmds */
+ spin_lock_irqsave(&instance->hba_lock, flags);
+ if (instance->adprecovery == MEGASAS_HW_CRITICAL_ERROR) {
+ spin_unlock_irqrestore(&instance->hba_lock, flags);
+ return;
+ }
+ spin_unlock_irqrestore(&instance->hba_lock, flags);
+
+ for (MSIxIndex = 0 ; MSIxIndex < count; MSIxIndex++)
+ complete_cmd_fusion(instance, MSIxIndex);
+}
+
+/**
+ * megasas_isr_fusion - isr entry point
+ */
+irqreturn_t megasas_isr_fusion(int irq, void *devp)
+{
+ struct megasas_irq_context *irq_context = devp;
+ struct megasas_instance *instance = irq_context->instance;
+ u32 mfiStatus, fw_state, dma_state;
+
+ if (instance->mask_interrupts)
+ return IRQ_NONE;
+
+ if (!instance->msix_vectors) {
+ mfiStatus = instance->instancet->clear_intr(instance->reg_set);
+ if (!mfiStatus)
+ return IRQ_NONE;
+ }
+
+ /* If we are resetting, bail */
+ if (test_bit(MEGASAS_FUSION_IN_RESET, &instance->reset_flags)) {
+ instance->instancet->clear_intr(instance->reg_set);
+ return IRQ_HANDLED;
+ }
+
+ if (!complete_cmd_fusion(instance, irq_context->MSIxIndex)) {
+ instance->instancet->clear_intr(instance->reg_set);
+ /* If we didn't complete any commands, check for FW fault */
+ fw_state = instance->instancet->read_fw_status_reg(
+ instance->reg_set) & MFI_STATE_MASK;
+ dma_state = instance->instancet->read_fw_status_reg
+ (instance->reg_set) & MFI_STATE_DMADONE;
+ if (instance->crash_dump_drv_support &&
+ instance->crash_dump_app_support) {
+ /* Start collecting crash, if DMA bit is done */
+ if ((fw_state == MFI_STATE_FAULT) && dma_state)
+ schedule_work(&instance->crash_init);
+ else if (fw_state == MFI_STATE_FAULT)
+ schedule_work(&instance->work_init);
+ } else if (fw_state == MFI_STATE_FAULT) {
+ printk(KERN_WARNING "megaraid_sas: Iop2SysDoorbellInt"
+ "for scsi%d\n", instance->host->host_no);
+ schedule_work(&instance->work_init);
+ }
+ }
+
+ return IRQ_HANDLED;
+}
+
+/**
+ * build_mpt_mfi_pass_thru - builds a cmd fo MFI Pass thru
+ * @instance: Adapter soft state
+ * mfi_cmd: megasas_cmd pointer
+ *
+ */
+u8
+build_mpt_mfi_pass_thru(struct megasas_instance *instance,
+ struct megasas_cmd *mfi_cmd)
+{
+ struct MPI25_IEEE_SGE_CHAIN64 *mpi25_ieee_chain;
+ struct MPI2_RAID_SCSI_IO_REQUEST *io_req;
+ struct megasas_cmd_fusion *cmd;
+ struct fusion_context *fusion;
+ struct megasas_header *frame_hdr = &mfi_cmd->frame->hdr;
+ u32 opcode;
+
+ cmd = megasas_get_cmd_fusion(instance);
+ if (!cmd)
+ return 1;
+
+ /* Save the smid. To be used for returning the cmd */
+ mfi_cmd->context.smid = cmd->index;
+ cmd->sync_cmd_idx = mfi_cmd->index;
+
+ /* Set this only for Blocked commands */
+ opcode = le32_to_cpu(mfi_cmd->frame->dcmd.opcode);
+ if ((opcode == MR_DCMD_LD_MAP_GET_INFO)
+ && (mfi_cmd->frame->dcmd.mbox.b[1] == 1))
+ mfi_cmd->is_wait_event = 1;
+
+ if (opcode == MR_DCMD_CTRL_EVENT_WAIT)
+ mfi_cmd->is_wait_event = 1;
+
+ if (mfi_cmd->is_wait_event)
+ mfi_cmd->mpt_pthr_cmd_blocked = cmd;
+
+ /*
+ * For cmds where the flag is set, store the flag and check
+ * on completion. For cmds with this flag, don't call
+ * megasas_complete_cmd
+ */
+
+ if (frame_hdr->flags & cpu_to_le16(MFI_FRAME_DONT_POST_IN_REPLY_QUEUE))
+ cmd->flags = MFI_FRAME_DONT_POST_IN_REPLY_QUEUE;
+
+ fusion = instance->ctrl_context;
+ io_req = cmd->io_request;
+
+ if ((instance->pdev->device == PCI_DEVICE_ID_LSI_INVADER) ||
+ (instance->pdev->device == PCI_DEVICE_ID_LSI_FURY)) {
+ struct MPI25_IEEE_SGE_CHAIN64 *sgl_ptr_end =
+ (struct MPI25_IEEE_SGE_CHAIN64 *)&io_req->SGL;
+ sgl_ptr_end += fusion->max_sge_in_main_msg - 1;
+ sgl_ptr_end->Flags = 0;
+ }
+
+ mpi25_ieee_chain =
+ (struct MPI25_IEEE_SGE_CHAIN64 *)&io_req->SGL.IeeeChain;
+
+ io_req->Function = MEGASAS_MPI2_FUNCTION_PASSTHRU_IO_REQUEST;
+ io_req->SGLOffset0 = offsetof(struct MPI2_RAID_SCSI_IO_REQUEST,
+ SGL) / 4;
+ io_req->ChainOffset = fusion->chain_offset_mfi_pthru;
+
+ mpi25_ieee_chain->Address = cpu_to_le64(mfi_cmd->frame_phys_addr);
+
+ mpi25_ieee_chain->Flags = IEEE_SGE_FLAGS_CHAIN_ELEMENT |
+ MPI2_IEEE_SGE_FLAGS_IOCPLBNTA_ADDR;
+
+ mpi25_ieee_chain->Length = cpu_to_le32(MEGASAS_MAX_SZ_CHAIN_FRAME);
+
+ return 0;
+}
+
+/**
+ * build_mpt_cmd - Calls helper function to build a cmd MFI Pass thru cmd
+ * @instance: Adapter soft state
+ * @cmd: mfi cmd to build
+ *
+ */
+union MEGASAS_REQUEST_DESCRIPTOR_UNION *
+build_mpt_cmd(struct megasas_instance *instance, struct megasas_cmd *cmd)
+{
+ union MEGASAS_REQUEST_DESCRIPTOR_UNION *req_desc;
+ u16 index;
+
+ if (build_mpt_mfi_pass_thru(instance, cmd)) {
+ printk(KERN_ERR "Couldn't build MFI pass thru cmd\n");
+ return NULL;
+ }
+
+ index = cmd->context.smid;
+
+ req_desc = megasas_get_request_descriptor(instance, index - 1);
+
+ if (!req_desc)
+ return NULL;
+
+ req_desc->Words = 0;
+ req_desc->SCSIIO.RequestFlags = (MPI2_REQ_DESCRIPT_FLAGS_SCSI_IO <<
+ MEGASAS_REQ_DESCRIPT_FLAGS_TYPE_SHIFT);
+
+ req_desc->SCSIIO.SMID = cpu_to_le16(index);
+
+ return req_desc;
+}
+
+/**
+ * megasas_issue_dcmd_fusion - Issues a MFI Pass thru cmd
+ * @instance: Adapter soft state
+ * @cmd: mfi cmd pointer
+ *
+ */
+void
+megasas_issue_dcmd_fusion(struct megasas_instance *instance,
+ struct megasas_cmd *cmd)
+{
+ union MEGASAS_REQUEST_DESCRIPTOR_UNION *req_desc;
+
+ req_desc = build_mpt_cmd(instance, cmd);
+ if (!req_desc) {
+ printk(KERN_ERR "Couldn't issue MFI pass thru cmd\n");
+ return;
+ }
+ atomic_set(&cmd->mfi_mpt_pthr, MFI_MPT_ATTACHED);
+ instance->instancet->fire_cmd(instance, req_desc->u.low,
+ req_desc->u.high, instance->reg_set);
+}
+
+/**
+ * megasas_release_fusion - Reverses the FW initialization
+ * @intance: Adapter soft state
+ */
+void
+megasas_release_fusion(struct megasas_instance *instance)
+{
+ megasas_free_cmds(instance);
+ megasas_free_cmds_fusion(instance);
+
+ iounmap(instance->reg_set);
+
+ pci_release_selected_regions(instance->pdev, instance->bar);
+}
+
+/**
+ * megasas_read_fw_status_reg_fusion - returns the current FW status value
+ * @regs: MFI register set
+ */
+static u32
+megasas_read_fw_status_reg_fusion(struct megasas_register_set __iomem *regs)
+{
+ return readl(&(regs)->outbound_scratch_pad);
+}
+
+/**
+ * megasas_alloc_host_crash_buffer - Host buffers for Crash dump collection from Firmware
+ * @instance: Controller's soft instance
+ * return: Number of allocated host crash buffers
+ */
+static void
+megasas_alloc_host_crash_buffer(struct megasas_instance *instance)
+{
+ unsigned int i;
+
+ instance->crash_buf_pages = get_order(CRASH_DMA_BUF_SIZE);
+ for (i = 0; i < MAX_CRASH_DUMP_SIZE; i++) {
+ instance->crash_buf[i] = (void *)__get_free_pages(GFP_KERNEL,
+ instance->crash_buf_pages);
+ if (!instance->crash_buf[i]) {
+ dev_info(&instance->pdev->dev, "Firmware crash dump "
+ "memory allocation failed at index %d\n", i);
+ break;
+ }
+ memset(instance->crash_buf[i], 0,
+ ((1 << PAGE_SHIFT) << instance->crash_buf_pages));
+ }
+ instance->drv_buf_alloc = i;
+}
+
+/**
+ * megasas_free_host_crash_buffer - Host buffers for Crash dump collection from Firmware
+ * @instance: Controller's soft instance
+ */
+void
+megasas_free_host_crash_buffer(struct megasas_instance *instance)
+{
+ unsigned int i
+;
+ for (i = 0; i < instance->drv_buf_alloc; i++) {
+ if (instance->crash_buf[i])
+ free_pages((ulong)instance->crash_buf[i],
+ instance->crash_buf_pages);
+ }
+ instance->drv_buf_index = 0;
+ instance->drv_buf_alloc = 0;
+ instance->fw_crash_state = UNAVAILABLE;
+ instance->fw_crash_buffer_size = 0;
+}
+
+/**
+ * megasas_adp_reset_fusion - For controller reset
+ * @regs: MFI register set
+ */
+static int
+megasas_adp_reset_fusion(struct megasas_instance *instance,
+ struct megasas_register_set __iomem *regs)
+{
+ return 0;
+}
+
+/**
+ * megasas_check_reset_fusion - For controller reset check
+ * @regs: MFI register set
+ */
+static int
+megasas_check_reset_fusion(struct megasas_instance *instance,
+ struct megasas_register_set __iomem *regs)
+{
+ return 0;
+}
+
+/* This function waits for outstanding commands on fusion to complete */
+int megasas_wait_for_outstanding_fusion(struct megasas_instance *instance,
+ int iotimeout, int *convert)
+{
+ int i, outstanding, retval = 0, hb_seconds_missed = 0;
+ u32 fw_state;
+
+ for (i = 0; i < resetwaittime; i++) {
+ /* Check if firmware is in fault state */
+ fw_state = instance->instancet->read_fw_status_reg(
+ instance->reg_set) & MFI_STATE_MASK;
+ if (fw_state == MFI_STATE_FAULT) {
+ printk(KERN_WARNING "megasas: Found FW in FAULT state,"
+ " will reset adapter scsi%d.\n",
+ instance->host->host_no);
+ retval = 1;
+ goto out;
+ }
+ /* If SR-IOV VF mode & heartbeat timeout, don't wait */
+ if (instance->requestorId && !iotimeout) {
+ retval = 1;
+ goto out;
+ }
+
+ /* If SR-IOV VF mode & I/O timeout, check for HB timeout */
+ if (instance->requestorId && iotimeout) {
+ if (instance->hb_host_mem->HB.fwCounter !=
+ instance->hb_host_mem->HB.driverCounter) {
+ instance->hb_host_mem->HB.driverCounter =
+ instance->hb_host_mem->HB.fwCounter;
+ hb_seconds_missed = 0;
+ } else {
+ hb_seconds_missed++;
+ if (hb_seconds_missed ==
+ (MEGASAS_SRIOV_HEARTBEAT_INTERVAL_VF/HZ)) {
+ printk(KERN_WARNING "megasas: SR-IOV:"
+ " Heartbeat never completed "
+ " while polling during I/O "
+ " timeout handling for "
+ "scsi%d.\n",
+ instance->host->host_no);
+ *convert = 1;
+ retval = 1;
+ goto out;
+ }
+ }
+ }
+
+ outstanding = atomic_read(&instance->fw_outstanding);
+ if (!outstanding)
+ goto out;
+
+ if (!(i % MEGASAS_RESET_NOTICE_INTERVAL)) {
+ printk(KERN_NOTICE "megasas: [%2d]waiting for %d "
+ "commands to complete for scsi%d\n", i,
+ outstanding, instance->host->host_no);
+ megasas_complete_cmd_dpc_fusion(
+ (unsigned long)instance);
+ }
+ msleep(1000);
+ }
+
+ if (atomic_read(&instance->fw_outstanding)) {
+ printk("megaraid_sas: pending commands remain after waiting, "
+ "will reset adapter scsi%d.\n",
+ instance->host->host_no);
+ retval = 1;
+ }
+out:
+ return retval;
+}
+
+void megasas_reset_reply_desc(struct megasas_instance *instance)
+{
+ int i, count;
+ struct fusion_context *fusion;
+ union MPI2_REPLY_DESCRIPTORS_UNION *reply_desc;
+
+ fusion = instance->ctrl_context;
+ count = instance->msix_vectors > 0 ? instance->msix_vectors : 1;
+ for (i = 0 ; i < count ; i++)
+ fusion->last_reply_idx[i] = 0;
+ reply_desc = fusion->reply_frames_desc;
+ for (i = 0 ; i < fusion->reply_q_depth * count; i++, reply_desc++)
+ reply_desc->Words = ULLONG_MAX;
+}
+
+/* Check for a second path that is currently UP */
+int megasas_check_mpio_paths(struct megasas_instance *instance,
+ struct scsi_cmnd *scmd)
+{
+ int i, j, retval = (DID_RESET << 16);
+
+ if (instance->mpio && instance->requestorId) {
+ for (i = 0 ; i < MAX_MGMT_ADAPTERS ; i++)
+ for (j = 0 ; j < MAX_LOGICAL_DRIVES; j++)
+ if (megasas_mgmt_info.instance[i] &&
+ (megasas_mgmt_info.instance[i] != instance) &&
+ megasas_mgmt_info.instance[i]->mpio &&
+ megasas_mgmt_info.instance[i]->requestorId
+ &&
+ (megasas_mgmt_info.instance[i]->ld_ids[j]
+ == scmd->device->id)) {
+ retval = (DID_NO_CONNECT << 16);
+ goto out;
+ }
+ }
+out:
+ return retval;
+}
+
+/* Core fusion reset function */
+int megasas_reset_fusion(struct Scsi_Host *shost, int iotimeout)
+{
+ int retval = SUCCESS, i, j, retry = 0, convert = 0;
+ struct megasas_instance *instance;
+ struct megasas_cmd_fusion *cmd_fusion;
+ struct fusion_context *fusion;
+ struct megasas_cmd *cmd_mfi;
+ union MEGASAS_REQUEST_DESCRIPTOR_UNION *req_desc;
+ u32 host_diag, abs_state, status_reg, reset_adapter;
+ u32 io_timeout_in_crash_mode = 0;
+
+ instance = (struct megasas_instance *)shost->hostdata;
+ fusion = instance->ctrl_context;
+
+ mutex_lock(&instance->reset_mutex);
+
+ if (instance->adprecovery == MEGASAS_HW_CRITICAL_ERROR) {
+ printk(KERN_WARNING "megaraid_sas: Hardware critical error, "
+ "returning FAILED for scsi%d.\n",
+ instance->host->host_no);
+ mutex_unlock(&instance->reset_mutex);
+ return FAILED;
+ }
+ status_reg = instance->instancet->read_fw_status_reg(instance->reg_set);
+ abs_state = status_reg & MFI_STATE_MASK;
+
+ /* IO timeout detected, forcibly put FW in FAULT state */
+ if (abs_state != MFI_STATE_FAULT && instance->crash_dump_buf &&
+ instance->crash_dump_app_support && iotimeout) {
+ dev_info(&instance->pdev->dev, "IO timeout is detected, "
+ "forcibly FAULT Firmware\n");
+ instance->adprecovery = MEGASAS_ADPRESET_SM_INFAULT;
+ status_reg = readl(&instance->reg_set->doorbell);
+ writel(status_reg | MFI_STATE_FORCE_OCR,
+ &instance->reg_set->doorbell);
+ readl(&instance->reg_set->doorbell);
+ mutex_unlock(&instance->reset_mutex);
+ do {
+ ssleep(3);
+ io_timeout_in_crash_mode++;
+ dev_dbg(&instance->pdev->dev, "waiting for [%d] "
+ "seconds for crash dump collection and OCR "
+ "to be done\n", (io_timeout_in_crash_mode * 3));
+ } while ((instance->adprecovery != MEGASAS_HBA_OPERATIONAL) &&
+ (io_timeout_in_crash_mode < 80));
+
+ if (instance->adprecovery == MEGASAS_HBA_OPERATIONAL) {
+ dev_info(&instance->pdev->dev, "OCR done for IO "
+ "timeout case\n");
+ retval = SUCCESS;
+ } else {
+ dev_info(&instance->pdev->dev, "Controller is not "
+ "operational after 240 seconds wait for IO "
+ "timeout case in FW crash dump mode\n do "
+ "OCR/kill adapter\n");
+ retval = megasas_reset_fusion(shost, 0);
+ }
+ return retval;
+ }
+
+ if (instance->requestorId && !instance->skip_heartbeat_timer_del)
+ del_timer_sync(&instance->sriov_heartbeat_timer);
+ set_bit(MEGASAS_FUSION_IN_RESET, &instance->reset_flags);
+ instance->adprecovery = MEGASAS_ADPRESET_SM_POLLING;
+ instance->instancet->disable_intr(instance);
+ msleep(1000);
+
+ /* First try waiting for commands to complete */
+ if (megasas_wait_for_outstanding_fusion(instance, iotimeout,
+ &convert)) {
+ instance->adprecovery = MEGASAS_ADPRESET_SM_INFAULT;
+ printk(KERN_WARNING "megaraid_sas: resetting fusion "
+ "adapter scsi%d.\n", instance->host->host_no);
+ if (convert)
+ iotimeout = 0;
+
+ /* Now return commands back to the OS */
+ for (i = 0 ; i < instance->max_fw_cmds; i++) {
+ cmd_fusion = fusion->cmd_list[i];
+ if (cmd_fusion->scmd) {
+ scsi_dma_unmap(cmd_fusion->scmd);
+ cmd_fusion->scmd->result =
+ megasas_check_mpio_paths(instance,
+ cmd_fusion->scmd);
+ cmd_fusion->scmd->scsi_done(cmd_fusion->scmd);
+ megasas_return_cmd_fusion(instance, cmd_fusion);
+ atomic_dec(&instance->fw_outstanding);
+ }
+ }
+
+ status_reg = instance->instancet->read_fw_status_reg(
+ instance->reg_set);
+ abs_state = status_reg & MFI_STATE_MASK;
+ reset_adapter = status_reg & MFI_RESET_ADAPTER;
+ if (instance->disableOnlineCtrlReset ||
+ (abs_state == MFI_STATE_FAULT && !reset_adapter)) {
+ /* Reset not supported, kill adapter */
+ printk(KERN_WARNING "megaraid_sas: Reset not supported"
+ ", killing adapter scsi%d.\n",
+ instance->host->host_no);
+ megaraid_sas_kill_hba(instance);
+ instance->skip_heartbeat_timer_del = 1;
+ retval = FAILED;
+ goto out;
+ }
+
+ /* Let SR-IOV VF & PF sync up if there was a HB failure */
+ if (instance->requestorId && !iotimeout) {
+ msleep(MEGASAS_OCR_SETTLE_TIME_VF);
+ /* Look for a late HB update after VF settle time */
+ if (abs_state == MFI_STATE_OPERATIONAL &&
+ (instance->hb_host_mem->HB.fwCounter !=
+ instance->hb_host_mem->HB.driverCounter)) {
+ instance->hb_host_mem->HB.driverCounter =
+ instance->hb_host_mem->HB.fwCounter;
+ printk(KERN_WARNING "megasas: SR-IOV:"
+ "Late FW heartbeat update for "
+ "scsi%d.\n",
+ instance->host->host_no);
+ } else {
+ /* In VF mode, first poll for FW ready */
+ for (i = 0;
+ i < (MEGASAS_RESET_WAIT_TIME * 1000);
+ i += 20) {
+ status_reg =
+ instance->instancet->
+ read_fw_status_reg(
+ instance->reg_set);
+ abs_state = status_reg &
+ MFI_STATE_MASK;
+ if (abs_state == MFI_STATE_READY) {
+ printk(KERN_WARNING "megasas"
+ ": SR-IOV: FW was found"
+ "to be in ready state "
+ "for scsi%d.\n",
+ instance->host->host_no);
+ break;
+ }
+ msleep(20);
+ }
+ if (abs_state != MFI_STATE_READY) {
+ printk(KERN_WARNING "megasas: SR-IOV: "
+ "FW not in ready state after %d"
+ " seconds for scsi%d, status_reg = "
+ "0x%x.\n",
+ MEGASAS_RESET_WAIT_TIME,
+ instance->host->host_no,
+ status_reg);
+ megaraid_sas_kill_hba(instance);
+ instance->skip_heartbeat_timer_del = 1;
+ instance->adprecovery =
+ MEGASAS_HW_CRITICAL_ERROR;
+ retval = FAILED;
+ goto out;
+ }
+ }
+ }
+
+ /* Now try to reset the chip */
+ for (i = 0; i < MEGASAS_FUSION_MAX_RESET_TRIES; i++) {
+ writel(MPI2_WRSEQ_FLUSH_KEY_VALUE,
+ &instance->reg_set->fusion_seq_offset);
+ writel(MPI2_WRSEQ_1ST_KEY_VALUE,
+ &instance->reg_set->fusion_seq_offset);
+ writel(MPI2_WRSEQ_2ND_KEY_VALUE,
+ &instance->reg_set->fusion_seq_offset);
+ writel(MPI2_WRSEQ_3RD_KEY_VALUE,
+ &instance->reg_set->fusion_seq_offset);
+ writel(MPI2_WRSEQ_4TH_KEY_VALUE,
+ &instance->reg_set->fusion_seq_offset);
+ writel(MPI2_WRSEQ_5TH_KEY_VALUE,
+ &instance->reg_set->fusion_seq_offset);
+ writel(MPI2_WRSEQ_6TH_KEY_VALUE,
+ &instance->reg_set->fusion_seq_offset);
+
+ /* Check that the diag write enable (DRWE) bit is on */
+ host_diag = readl(&instance->reg_set->fusion_host_diag);
+ retry = 0;
+ while (!(host_diag & HOST_DIAG_WRITE_ENABLE)) {
+ msleep(100);
+ host_diag =
+ readl(&instance->reg_set->fusion_host_diag);
+ if (retry++ == 100) {
+ printk(KERN_WARNING "megaraid_sas: "
+ "Host diag unlock failed! "
+ "for scsi%d\n",
+ instance->host->host_no);
+ break;
+ }
+ }
+ if (!(host_diag & HOST_DIAG_WRITE_ENABLE))
+ continue;
+
+ /* Send chip reset command */
+ writel(host_diag | HOST_DIAG_RESET_ADAPTER,
+ &instance->reg_set->fusion_host_diag);
+ msleep(3000);
+
+ /* Make sure reset adapter bit is cleared */
+ host_diag = readl(&instance->reg_set->fusion_host_diag);
+ retry = 0;
+ while (host_diag & HOST_DIAG_RESET_ADAPTER) {
+ msleep(100);
+ host_diag =
+ readl(&instance->reg_set->fusion_host_diag);
+ if (retry++ == 1000) {
+ printk(KERN_WARNING "megaraid_sas: "
+ "Diag reset adapter never "
+ "cleared for scsi%d!\n",
+ instance->host->host_no);
+ break;
+ }
+ }
+ if (host_diag & HOST_DIAG_RESET_ADAPTER)
+ continue;
+
+ abs_state =
+ instance->instancet->read_fw_status_reg(
+ instance->reg_set) & MFI_STATE_MASK;
+ retry = 0;
+
+ while ((abs_state <= MFI_STATE_FW_INIT) &&
+ (retry++ < 1000)) {
+ msleep(100);
+ abs_state =
+ instance->instancet->read_fw_status_reg(
+ instance->reg_set) & MFI_STATE_MASK;
+ }
+ if (abs_state <= MFI_STATE_FW_INIT) {
+ printk(KERN_WARNING "megaraid_sas: firmware "
+ "state < MFI_STATE_FW_INIT, state = "
+ "0x%x for scsi%d\n", abs_state,
+ instance->host->host_no);
+ continue;
+ }
+
+ /* Wait for FW to become ready */
+ if (megasas_transition_to_ready(instance, 1)) {
+ printk(KERN_WARNING "megaraid_sas: Failed to "
+ "transition controller to ready "
+ "for scsi%d.\n",
+ instance->host->host_no);
+ continue;
+ }
+
+ megasas_reset_reply_desc(instance);
+ if (megasas_ioc_init_fusion(instance)) {
+ printk(KERN_WARNING "megaraid_sas: "
+ "megasas_ioc_init_fusion() failed!"
+ " for scsi%d\n",
+ instance->host->host_no);
+ continue;
+ }
+
+ /* Re-fire management commands */
+ for (j = 0 ; j < instance->max_fw_cmds; j++) {
+ cmd_fusion = fusion->cmd_list[j];
+ if (cmd_fusion->sync_cmd_idx !=
+ (u32)ULONG_MAX) {
+ cmd_mfi =
+ instance->
+ cmd_list[cmd_fusion->sync_cmd_idx];
+ if (cmd_mfi->frame->dcmd.opcode ==
+ cpu_to_le32(MR_DCMD_LD_MAP_GET_INFO)) {
+ megasas_return_mfi_mpt_pthr(instance, cmd_mfi, cmd_fusion);
+ } else {
+ req_desc =
+ megasas_get_request_descriptor(
+ instance,
+ cmd_mfi->context.smid
+ -1);
+ if (!req_desc) {
+ printk(KERN_WARNING
+ "req_desc NULL"
+ " for scsi%d\n",
+ instance->host->host_no);
+ /* Return leaked MPT
+ frame */
+ megasas_return_cmd_fusion(instance, cmd_fusion);
+ } else {
+ instance->instancet->
+ fire_cmd(instance,
+ req_desc->
+ u.low,
+ req_desc->
+ u.high,
+ instance->
+ reg_set);
+ }
+ }
+ }
+ }
+
+ if (megasas_get_ctrl_info(instance)) {
+ dev_info(&instance->pdev->dev,
+ "Failed from %s %d\n",
+ __func__, __LINE__);
+ megaraid_sas_kill_hba(instance);
+ retval = FAILED;
+ }
+ /* Reset load balance info */
+ memset(fusion->load_balance_info, 0,
+ sizeof(struct LD_LOAD_BALANCE_INFO)
+ *MAX_LOGICAL_DRIVES_EXT);
+
+ if (!megasas_get_map_info(instance))
+ megasas_sync_map_info(instance);
+
+ clear_bit(MEGASAS_FUSION_IN_RESET,
+ &instance->reset_flags);
+ instance->instancet->enable_intr(instance);
+ instance->adprecovery = MEGASAS_HBA_OPERATIONAL;
+
+ /* Restart SR-IOV heartbeat */
+ if (instance->requestorId) {
+ if (!megasas_sriov_start_heartbeat(instance, 0))
+ megasas_start_timer(instance,
+ &instance->sriov_heartbeat_timer,
+ megasas_sriov_heartbeat_handler,
+ MEGASAS_SRIOV_HEARTBEAT_INTERVAL_VF);
+ else
+ instance->skip_heartbeat_timer_del = 1;
+ }
+
+ /* Adapter reset completed successfully */
+ printk(KERN_WARNING "megaraid_sas: Reset "
+ "successful for scsi%d.\n",
+ instance->host->host_no);
+
+ if (instance->crash_dump_drv_support &&
+ instance->crash_dump_app_support)
+ megasas_set_crash_dump_params(instance,
+ MR_CRASH_BUF_TURN_ON);
+ else
+ megasas_set_crash_dump_params(instance,
+ MR_CRASH_BUF_TURN_OFF);
+
+ retval = SUCCESS;
+ goto out;
+ }
+ /* Reset failed, kill the adapter */
+ printk(KERN_WARNING "megaraid_sas: Reset failed, killing "
+ "adapter scsi%d.\n", instance->host->host_no);
+ megaraid_sas_kill_hba(instance);
+ instance->skip_heartbeat_timer_del = 1;
+ retval = FAILED;
+ } else {
+ /* For VF: Restart HB timer if we didn't OCR */
+ if (instance->requestorId) {
+ megasas_start_timer(instance,
+ &instance->sriov_heartbeat_timer,
+ megasas_sriov_heartbeat_handler,
+ MEGASAS_SRIOV_HEARTBEAT_INTERVAL_VF);
+ }
+ clear_bit(MEGASAS_FUSION_IN_RESET, &instance->reset_flags);
+ instance->instancet->enable_intr(instance);
+ instance->adprecovery = MEGASAS_HBA_OPERATIONAL;
+ }
+out:
+ clear_bit(MEGASAS_FUSION_IN_RESET, &instance->reset_flags);
+ mutex_unlock(&instance->reset_mutex);
+ return retval;
+}
+
+/* Fusion Crash dump collection work queue */
+void megasas_fusion_crash_dump_wq(struct work_struct *work)
+{
+ struct megasas_instance *instance =
+ container_of(work, struct megasas_instance, crash_init);
+ u32 status_reg;
+ u8 partial_copy = 0;
+
+
+ status_reg = instance->instancet->read_fw_status_reg(instance->reg_set);
+
+ /*
+ * Allocate host crash buffers to copy data from 1 MB DMA crash buffer
+ * to host crash buffers
+ */
+ if (instance->drv_buf_index == 0) {
+ /* Buffer is already allocated for old Crash dump.
+ * Do OCR and do not wait for crash dump collection
+ */
+ if (instance->drv_buf_alloc) {
+ dev_info(&instance->pdev->dev, "earlier crash dump is "
+ "not yet copied by application, ignoring this "
+ "crash dump and initiating OCR\n");
+ status_reg |= MFI_STATE_CRASH_DUMP_DONE;
+ writel(status_reg,
+ &instance->reg_set->outbound_scratch_pad);
+ readl(&instance->reg_set->outbound_scratch_pad);
+ return;
+ }
+ megasas_alloc_host_crash_buffer(instance);
+ dev_info(&instance->pdev->dev, "Number of host crash buffers "
+ "allocated: %d\n", instance->drv_buf_alloc);
+ }
+
+ /*
+ * Driver has allocated max buffers, which can be allocated
+ * and FW has more crash dump data, then driver will
+ * ignore the data.
+ */
+ if (instance->drv_buf_index >= (instance->drv_buf_alloc)) {
+ dev_info(&instance->pdev->dev, "Driver is done copying "
+ "the buffer: %d\n", instance->drv_buf_alloc);
+ status_reg |= MFI_STATE_CRASH_DUMP_DONE;
+ partial_copy = 1;
+ } else {
+ memcpy(instance->crash_buf[instance->drv_buf_index],
+ instance->crash_dump_buf, CRASH_DMA_BUF_SIZE);
+ instance->drv_buf_index++;
+ status_reg &= ~MFI_STATE_DMADONE;
+ }
+
+ if (status_reg & MFI_STATE_CRASH_DUMP_DONE) {
+ dev_info(&instance->pdev->dev, "Crash Dump is available,number "
+ "of copied buffers: %d\n", instance->drv_buf_index);
+ instance->fw_crash_buffer_size = instance->drv_buf_index;
+ instance->fw_crash_state = AVAILABLE;
+ instance->drv_buf_index = 0;
+ writel(status_reg, &instance->reg_set->outbound_scratch_pad);
+ readl(&instance->reg_set->outbound_scratch_pad);
+ if (!partial_copy)
+ megasas_reset_fusion(instance->host, 0);
+ } else {
+ writel(status_reg, &instance->reg_set->outbound_scratch_pad);
+ readl(&instance->reg_set->outbound_scratch_pad);
+ }
+}
+
+
+/* Fusion OCR work queue */
+void megasas_fusion_ocr_wq(struct work_struct *work)
+{
+ struct megasas_instance *instance =
+ container_of(work, struct megasas_instance, work_init);
+
+ megasas_reset_fusion(instance->host, 0);
+}
+
+struct megasas_instance_template megasas_instance_template_fusion = {
+ .fire_cmd = megasas_fire_cmd_fusion,
+ .enable_intr = megasas_enable_intr_fusion,
+ .disable_intr = megasas_disable_intr_fusion,
+ .clear_intr = megasas_clear_intr_fusion,
+ .read_fw_status_reg = megasas_read_fw_status_reg_fusion,
+ .adp_reset = megasas_adp_reset_fusion,
+ .check_reset = megasas_check_reset_fusion,
+ .service_isr = megasas_isr_fusion,
+ .tasklet = megasas_complete_cmd_dpc_fusion,
+ .init_adapter = megasas_init_adapter_fusion,
+ .build_and_issue_cmd = megasas_build_and_issue_cmd_fusion,
+ .issue_dcmd = megasas_issue_dcmd_fusion,
+};
diff --git a/drivers/scsi/megaraid/megaraid_sas_fusion.h b/drivers/scsi/megaraid/megaraid_sas_fusion.h
new file mode 100644
index 000000000..56e6db2d5
--- /dev/null
+++ b/drivers/scsi/megaraid/megaraid_sas_fusion.h
@@ -0,0 +1,850 @@
+/*
+ * Linux MegaRAID driver for SAS based RAID controllers
+ *
+ * Copyright (c) 2009-2013 LSI Corporation
+ * Copyright (c) 2013-2014 Avago Technologies
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ *
+ * FILE: megaraid_sas_fusion.h
+ *
+ * Authors: Avago Technologies
+ * Manoj Jose
+ * Sumant Patro
+ * Kashyap Desai <kashyap.desai@avagotech.com>
+ * Sumit Saxena <sumit.saxena@avagotech.com>
+ *
+ * Send feedback to: megaraidlinux.pdl@avagotech.com
+ *
+ * Mail to: Avago Technologies, 350 West Trimble Road, Building 90,
+ * San Jose, California 95131
+ */
+
+#ifndef _MEGARAID_SAS_FUSION_H_
+#define _MEGARAID_SAS_FUSION_H_
+
+/* Fusion defines */
+#define MEGASAS_MAX_SZ_CHAIN_FRAME 1024
+#define MFI_FUSION_ENABLE_INTERRUPT_MASK (0x00000009)
+#define MEGA_MPI2_RAID_DEFAULT_IO_FRAME_SIZE 256
+#define MEGASAS_MPI2_FUNCTION_PASSTHRU_IO_REQUEST 0xF0
+#define MEGASAS_MPI2_FUNCTION_LD_IO_REQUEST 0xF1
+#define MEGASAS_LOAD_BALANCE_FLAG 0x1
+#define MEGASAS_DCMD_MBOX_PEND_FLAG 0x1
+#define HOST_DIAG_WRITE_ENABLE 0x80
+#define HOST_DIAG_RESET_ADAPTER 0x4
+#define MEGASAS_FUSION_MAX_RESET_TRIES 3
+#define MAX_MSIX_QUEUES_FUSION 128
+
+/* Invader defines */
+#define MPI2_TYPE_CUDA 0x2
+#define MPI25_SAS_DEVICE0_FLAGS_ENABLED_FAST_PATH 0x4000
+#define MR_RL_FLAGS_GRANT_DESTINATION_CPU0 0x00
+#define MR_RL_FLAGS_GRANT_DESTINATION_CPU1 0x10
+#define MR_RL_FLAGS_GRANT_DESTINATION_CUDA 0x80
+#define MR_RL_FLAGS_SEQ_NUM_ENABLE 0x8
+
+/* T10 PI defines */
+#define MR_PROT_INFO_TYPE_CONTROLLER 0x8
+#define MEGASAS_SCSI_VARIABLE_LENGTH_CMD 0x7f
+#define MEGASAS_SCSI_SERVICE_ACTION_READ32 0x9
+#define MEGASAS_SCSI_SERVICE_ACTION_WRITE32 0xB
+#define MEGASAS_SCSI_ADDL_CDB_LEN 0x18
+#define MEGASAS_RD_WR_PROTECT_CHECK_ALL 0x20
+#define MEGASAS_RD_WR_PROTECT_CHECK_NONE 0x60
+
+#define MPI2_SUP_REPLY_POST_HOST_INDEX_OFFSET (0x0000030C)
+#define MPI2_REPLY_POST_HOST_INDEX_OFFSET (0x0000006C)
+
+/*
+ * Raid context flags
+ */
+
+#define MR_RAID_CTX_RAID_FLAGS_IO_SUB_TYPE_SHIFT 0x4
+#define MR_RAID_CTX_RAID_FLAGS_IO_SUB_TYPE_MASK 0x30
+enum MR_RAID_FLAGS_IO_SUB_TYPE {
+ MR_RAID_FLAGS_IO_SUB_TYPE_NONE = 0,
+ MR_RAID_FLAGS_IO_SUB_TYPE_SYSTEM_PD = 1,
+};
+
+/*
+ * Request descriptor types
+ */
+#define MEGASAS_REQ_DESCRIPT_FLAGS_LD_IO 0x7
+#define MEGASAS_REQ_DESCRIPT_FLAGS_MFA 0x1
+#define MEGASAS_REQ_DESCRIPT_FLAGS_NO_LOCK 0x2
+#define MEGASAS_REQ_DESCRIPT_FLAGS_TYPE_SHIFT 1
+
+#define MEGASAS_FP_CMD_LEN 16
+#define MEGASAS_FUSION_IN_RESET 0
+#define THRESHOLD_REPLY_COUNT 50
+
+/*
+ * Raid Context structure which describes MegaRAID specific IO Parameters
+ * This resides at offset 0x60 where the SGL normally starts in MPT IO Frames
+ */
+
+struct RAID_CONTEXT {
+#if defined(__BIG_ENDIAN_BITFIELD)
+ u8 nseg:4;
+ u8 Type:4;
+#else
+ u8 Type:4;
+ u8 nseg:4;
+#endif
+ u8 resvd0;
+ u16 timeoutValue;
+ u8 regLockFlags;
+ u8 resvd1;
+ u16 VirtualDiskTgtId;
+ u64 regLockRowLBA;
+ u32 regLockLength;
+ u16 nextLMId;
+ u8 exStatus;
+ u8 status;
+ u8 RAIDFlags;
+ u8 numSGE;
+ u16 configSeqNum;
+ u8 spanArm;
+ u8 resvd2[3];
+};
+
+#define RAID_CTX_SPANARM_ARM_SHIFT (0)
+#define RAID_CTX_SPANARM_ARM_MASK (0x1f)
+
+#define RAID_CTX_SPANARM_SPAN_SHIFT (5)
+#define RAID_CTX_SPANARM_SPAN_MASK (0xE0)
+
+/*
+ * define region lock types
+ */
+enum REGION_TYPE {
+ REGION_TYPE_UNUSED = 0,
+ REGION_TYPE_SHARED_READ = 1,
+ REGION_TYPE_SHARED_WRITE = 2,
+ REGION_TYPE_EXCLUSIVE = 3,
+};
+
+/* MPI2 defines */
+#define MPI2_FUNCTION_IOC_INIT (0x02) /* IOC Init */
+#define MPI2_WHOINIT_HOST_DRIVER (0x04)
+#define MPI2_VERSION_MAJOR (0x02)
+#define MPI2_VERSION_MINOR (0x00)
+#define MPI2_VERSION_MAJOR_MASK (0xFF00)
+#define MPI2_VERSION_MAJOR_SHIFT (8)
+#define MPI2_VERSION_MINOR_MASK (0x00FF)
+#define MPI2_VERSION_MINOR_SHIFT (0)
+#define MPI2_VERSION ((MPI2_VERSION_MAJOR << MPI2_VERSION_MAJOR_SHIFT) | \
+ MPI2_VERSION_MINOR)
+#define MPI2_HEADER_VERSION_UNIT (0x10)
+#define MPI2_HEADER_VERSION_DEV (0x00)
+#define MPI2_HEADER_VERSION_UNIT_MASK (0xFF00)
+#define MPI2_HEADER_VERSION_UNIT_SHIFT (8)
+#define MPI2_HEADER_VERSION_DEV_MASK (0x00FF)
+#define MPI2_HEADER_VERSION_DEV_SHIFT (0)
+#define MPI2_HEADER_VERSION ((MPI2_HEADER_VERSION_UNIT << 8) | \
+ MPI2_HEADER_VERSION_DEV)
+#define MPI2_IEEE_SGE_FLAGS_IOCPLBNTA_ADDR (0x03)
+#define MPI2_SCSIIO_EEDPFLAGS_INC_PRI_REFTAG (0x8000)
+#define MPI2_SCSIIO_EEDPFLAGS_CHECK_REFTAG (0x0400)
+#define MPI2_SCSIIO_EEDPFLAGS_CHECK_REMOVE_OP (0x0003)
+#define MPI2_SCSIIO_EEDPFLAGS_CHECK_APPTAG (0x0200)
+#define MPI2_SCSIIO_EEDPFLAGS_CHECK_GUARD (0x0100)
+#define MPI2_SCSIIO_EEDPFLAGS_INSERT_OP (0x0004)
+#define MPI2_FUNCTION_SCSI_IO_REQUEST (0x00) /* SCSI IO */
+#define MPI2_REQ_DESCRIPT_FLAGS_HIGH_PRIORITY (0x06)
+#define MPI2_REQ_DESCRIPT_FLAGS_SCSI_IO (0x00)
+#define MPI2_SGE_FLAGS_64_BIT_ADDRESSING (0x02)
+#define MPI2_SCSIIO_CONTROL_WRITE (0x01000000)
+#define MPI2_SCSIIO_CONTROL_READ (0x02000000)
+#define MPI2_REQ_DESCRIPT_FLAGS_TYPE_MASK (0x0E)
+#define MPI2_RPY_DESCRIPT_FLAGS_UNUSED (0x0F)
+#define MPI2_RPY_DESCRIPT_FLAGS_SCSI_IO_SUCCESS (0x00)
+#define MPI2_RPY_DESCRIPT_FLAGS_TYPE_MASK (0x0F)
+#define MPI2_WRSEQ_FLUSH_KEY_VALUE (0x0)
+#define MPI2_WRITE_SEQUENCE_OFFSET (0x00000004)
+#define MPI2_WRSEQ_1ST_KEY_VALUE (0xF)
+#define MPI2_WRSEQ_2ND_KEY_VALUE (0x4)
+#define MPI2_WRSEQ_3RD_KEY_VALUE (0xB)
+#define MPI2_WRSEQ_4TH_KEY_VALUE (0x2)
+#define MPI2_WRSEQ_5TH_KEY_VALUE (0x7)
+#define MPI2_WRSEQ_6TH_KEY_VALUE (0xD)
+
+struct MPI25_IEEE_SGE_CHAIN64 {
+ u64 Address;
+ u32 Length;
+ u16 Reserved1;
+ u8 NextChainOffset;
+ u8 Flags;
+};
+
+struct MPI2_SGE_SIMPLE_UNION {
+ u32 FlagsLength;
+ union {
+ u32 Address32;
+ u64 Address64;
+ } u;
+};
+
+struct MPI2_SCSI_IO_CDB_EEDP32 {
+ u8 CDB[20]; /* 0x00 */
+ u32 PrimaryReferenceTag; /* 0x14 */
+ u16 PrimaryApplicationTag; /* 0x18 */
+ u16 PrimaryApplicationTagMask; /* 0x1A */
+ u32 TransferLength; /* 0x1C */
+};
+
+struct MPI2_SGE_CHAIN_UNION {
+ u16 Length;
+ u8 NextChainOffset;
+ u8 Flags;
+ union {
+ u32 Address32;
+ u64 Address64;
+ } u;
+};
+
+struct MPI2_IEEE_SGE_SIMPLE32 {
+ u32 Address;
+ u32 FlagsLength;
+};
+
+struct MPI2_IEEE_SGE_CHAIN32 {
+ u32 Address;
+ u32 FlagsLength;
+};
+
+struct MPI2_IEEE_SGE_SIMPLE64 {
+ u64 Address;
+ u32 Length;
+ u16 Reserved1;
+ u8 Reserved2;
+ u8 Flags;
+};
+
+struct MPI2_IEEE_SGE_CHAIN64 {
+ u64 Address;
+ u32 Length;
+ u16 Reserved1;
+ u8 Reserved2;
+ u8 Flags;
+};
+
+union MPI2_IEEE_SGE_SIMPLE_UNION {
+ struct MPI2_IEEE_SGE_SIMPLE32 Simple32;
+ struct MPI2_IEEE_SGE_SIMPLE64 Simple64;
+};
+
+union MPI2_IEEE_SGE_CHAIN_UNION {
+ struct MPI2_IEEE_SGE_CHAIN32 Chain32;
+ struct MPI2_IEEE_SGE_CHAIN64 Chain64;
+};
+
+union MPI2_SGE_IO_UNION {
+ struct MPI2_SGE_SIMPLE_UNION MpiSimple;
+ struct MPI2_SGE_CHAIN_UNION MpiChain;
+ union MPI2_IEEE_SGE_SIMPLE_UNION IeeeSimple;
+ union MPI2_IEEE_SGE_CHAIN_UNION IeeeChain;
+};
+
+union MPI2_SCSI_IO_CDB_UNION {
+ u8 CDB32[32];
+ struct MPI2_SCSI_IO_CDB_EEDP32 EEDP32;
+ struct MPI2_SGE_SIMPLE_UNION SGE;
+};
+
+/*
+ * RAID SCSI IO Request Message
+ * Total SGE count will be one less than _MPI2_SCSI_IO_REQUEST
+ */
+struct MPI2_RAID_SCSI_IO_REQUEST {
+ u16 DevHandle; /* 0x00 */
+ u8 ChainOffset; /* 0x02 */
+ u8 Function; /* 0x03 */
+ u16 Reserved1; /* 0x04 */
+ u8 Reserved2; /* 0x06 */
+ u8 MsgFlags; /* 0x07 */
+ u8 VP_ID; /* 0x08 */
+ u8 VF_ID; /* 0x09 */
+ u16 Reserved3; /* 0x0A */
+ u32 SenseBufferLowAddress; /* 0x0C */
+ u16 SGLFlags; /* 0x10 */
+ u8 SenseBufferLength; /* 0x12 */
+ u8 Reserved4; /* 0x13 */
+ u8 SGLOffset0; /* 0x14 */
+ u8 SGLOffset1; /* 0x15 */
+ u8 SGLOffset2; /* 0x16 */
+ u8 SGLOffset3; /* 0x17 */
+ u32 SkipCount; /* 0x18 */
+ u32 DataLength; /* 0x1C */
+ u32 BidirectionalDataLength; /* 0x20 */
+ u16 IoFlags; /* 0x24 */
+ u16 EEDPFlags; /* 0x26 */
+ u32 EEDPBlockSize; /* 0x28 */
+ u32 SecondaryReferenceTag; /* 0x2C */
+ u16 SecondaryApplicationTag; /* 0x30 */
+ u16 ApplicationTagTranslationMask; /* 0x32 */
+ u8 LUN[8]; /* 0x34 */
+ u32 Control; /* 0x3C */
+ union MPI2_SCSI_IO_CDB_UNION CDB; /* 0x40 */
+ struct RAID_CONTEXT RaidContext; /* 0x60 */
+ union MPI2_SGE_IO_UNION SGL; /* 0x80 */
+};
+
+/*
+ * MPT RAID MFA IO Descriptor.
+ */
+struct MEGASAS_RAID_MFA_IO_REQUEST_DESCRIPTOR {
+ u32 RequestFlags:8;
+ u32 MessageAddress1:24;
+ u32 MessageAddress2;
+};
+
+/* Default Request Descriptor */
+struct MPI2_DEFAULT_REQUEST_DESCRIPTOR {
+ u8 RequestFlags; /* 0x00 */
+ u8 MSIxIndex; /* 0x01 */
+ u16 SMID; /* 0x02 */
+ u16 LMID; /* 0x04 */
+ u16 DescriptorTypeDependent; /* 0x06 */
+};
+
+/* High Priority Request Descriptor */
+struct MPI2_HIGH_PRIORITY_REQUEST_DESCRIPTOR {
+ u8 RequestFlags; /* 0x00 */
+ u8 MSIxIndex; /* 0x01 */
+ u16 SMID; /* 0x02 */
+ u16 LMID; /* 0x04 */
+ u16 Reserved1; /* 0x06 */
+};
+
+/* SCSI IO Request Descriptor */
+struct MPI2_SCSI_IO_REQUEST_DESCRIPTOR {
+ u8 RequestFlags; /* 0x00 */
+ u8 MSIxIndex; /* 0x01 */
+ u16 SMID; /* 0x02 */
+ u16 LMID; /* 0x04 */
+ u16 DevHandle; /* 0x06 */
+};
+
+/* SCSI Target Request Descriptor */
+struct MPI2_SCSI_TARGET_REQUEST_DESCRIPTOR {
+ u8 RequestFlags; /* 0x00 */
+ u8 MSIxIndex; /* 0x01 */
+ u16 SMID; /* 0x02 */
+ u16 LMID; /* 0x04 */
+ u16 IoIndex; /* 0x06 */
+};
+
+/* RAID Accelerator Request Descriptor */
+struct MPI2_RAID_ACCEL_REQUEST_DESCRIPTOR {
+ u8 RequestFlags; /* 0x00 */
+ u8 MSIxIndex; /* 0x01 */
+ u16 SMID; /* 0x02 */
+ u16 LMID; /* 0x04 */
+ u16 Reserved; /* 0x06 */
+};
+
+/* union of Request Descriptors */
+union MEGASAS_REQUEST_DESCRIPTOR_UNION {
+ struct MPI2_DEFAULT_REQUEST_DESCRIPTOR Default;
+ struct MPI2_HIGH_PRIORITY_REQUEST_DESCRIPTOR HighPriority;
+ struct MPI2_SCSI_IO_REQUEST_DESCRIPTOR SCSIIO;
+ struct MPI2_SCSI_TARGET_REQUEST_DESCRIPTOR SCSITarget;
+ struct MPI2_RAID_ACCEL_REQUEST_DESCRIPTOR RAIDAccelerator;
+ struct MEGASAS_RAID_MFA_IO_REQUEST_DESCRIPTOR MFAIo;
+ union {
+ struct {
+ u32 low;
+ u32 high;
+ } u;
+ u64 Words;
+ };
+};
+
+/* Default Reply Descriptor */
+struct MPI2_DEFAULT_REPLY_DESCRIPTOR {
+ u8 ReplyFlags; /* 0x00 */
+ u8 MSIxIndex; /* 0x01 */
+ u16 DescriptorTypeDependent1; /* 0x02 */
+ u32 DescriptorTypeDependent2; /* 0x04 */
+};
+
+/* Address Reply Descriptor */
+struct MPI2_ADDRESS_REPLY_DESCRIPTOR {
+ u8 ReplyFlags; /* 0x00 */
+ u8 MSIxIndex; /* 0x01 */
+ u16 SMID; /* 0x02 */
+ u32 ReplyFrameAddress; /* 0x04 */
+};
+
+/* SCSI IO Success Reply Descriptor */
+struct MPI2_SCSI_IO_SUCCESS_REPLY_DESCRIPTOR {
+ u8 ReplyFlags; /* 0x00 */
+ u8 MSIxIndex; /* 0x01 */
+ u16 SMID; /* 0x02 */
+ u16 TaskTag; /* 0x04 */
+ u16 Reserved1; /* 0x06 */
+};
+
+/* TargetAssist Success Reply Descriptor */
+struct MPI2_TARGETASSIST_SUCCESS_REPLY_DESCRIPTOR {
+ u8 ReplyFlags; /* 0x00 */
+ u8 MSIxIndex; /* 0x01 */
+ u16 SMID; /* 0x02 */
+ u8 SequenceNumber; /* 0x04 */
+ u8 Reserved1; /* 0x05 */
+ u16 IoIndex; /* 0x06 */
+};
+
+/* Target Command Buffer Reply Descriptor */
+struct MPI2_TARGET_COMMAND_BUFFER_REPLY_DESCRIPTOR {
+ u8 ReplyFlags; /* 0x00 */
+ u8 MSIxIndex; /* 0x01 */
+ u8 VP_ID; /* 0x02 */
+ u8 Flags; /* 0x03 */
+ u16 InitiatorDevHandle; /* 0x04 */
+ u16 IoIndex; /* 0x06 */
+};
+
+/* RAID Accelerator Success Reply Descriptor */
+struct MPI2_RAID_ACCELERATOR_SUCCESS_REPLY_DESCRIPTOR {
+ u8 ReplyFlags; /* 0x00 */
+ u8 MSIxIndex; /* 0x01 */
+ u16 SMID; /* 0x02 */
+ u32 Reserved; /* 0x04 */
+};
+
+/* union of Reply Descriptors */
+union MPI2_REPLY_DESCRIPTORS_UNION {
+ struct MPI2_DEFAULT_REPLY_DESCRIPTOR Default;
+ struct MPI2_ADDRESS_REPLY_DESCRIPTOR AddressReply;
+ struct MPI2_SCSI_IO_SUCCESS_REPLY_DESCRIPTOR SCSIIOSuccess;
+ struct MPI2_TARGETASSIST_SUCCESS_REPLY_DESCRIPTOR TargetAssistSuccess;
+ struct MPI2_TARGET_COMMAND_BUFFER_REPLY_DESCRIPTOR TargetCommandBuffer;
+ struct MPI2_RAID_ACCELERATOR_SUCCESS_REPLY_DESCRIPTOR
+ RAIDAcceleratorSuccess;
+ u64 Words;
+};
+
+/* IOCInit Request message */
+struct MPI2_IOC_INIT_REQUEST {
+ u8 WhoInit; /* 0x00 */
+ u8 Reserved1; /* 0x01 */
+ u8 ChainOffset; /* 0x02 */
+ u8 Function; /* 0x03 */
+ u16 Reserved2; /* 0x04 */
+ u8 Reserved3; /* 0x06 */
+ u8 MsgFlags; /* 0x07 */
+ u8 VP_ID; /* 0x08 */
+ u8 VF_ID; /* 0x09 */
+ u16 Reserved4; /* 0x0A */
+ u16 MsgVersion; /* 0x0C */
+ u16 HeaderVersion; /* 0x0E */
+ u32 Reserved5; /* 0x10 */
+ u16 Reserved6; /* 0x14 */
+ u8 Reserved7; /* 0x16 */
+ u8 HostMSIxVectors; /* 0x17 */
+ u16 Reserved8; /* 0x18 */
+ u16 SystemRequestFrameSize; /* 0x1A */
+ u16 ReplyDescriptorPostQueueDepth; /* 0x1C */
+ u16 ReplyFreeQueueDepth; /* 0x1E */
+ u32 SenseBufferAddressHigh; /* 0x20 */
+ u32 SystemReplyAddressHigh; /* 0x24 */
+ u64 SystemRequestFrameBaseAddress; /* 0x28 */
+ u64 ReplyDescriptorPostQueueAddress;/* 0x30 */
+ u64 ReplyFreeQueueAddress; /* 0x38 */
+ u64 TimeStamp; /* 0x40 */
+};
+
+/* mrpriv defines */
+#define MR_PD_INVALID 0xFFFF
+#define MAX_SPAN_DEPTH 8
+#define MAX_QUAD_DEPTH MAX_SPAN_DEPTH
+#define MAX_RAIDMAP_SPAN_DEPTH (MAX_SPAN_DEPTH)
+#define MAX_ROW_SIZE 32
+#define MAX_RAIDMAP_ROW_SIZE (MAX_ROW_SIZE)
+#define MAX_LOGICAL_DRIVES 64
+#define MAX_LOGICAL_DRIVES_EXT 256
+#define MAX_RAIDMAP_LOGICAL_DRIVES (MAX_LOGICAL_DRIVES)
+#define MAX_RAIDMAP_VIEWS (MAX_LOGICAL_DRIVES)
+#define MAX_ARRAYS 128
+#define MAX_RAIDMAP_ARRAYS (MAX_ARRAYS)
+#define MAX_ARRAYS_EXT 256
+#define MAX_API_ARRAYS_EXT (MAX_ARRAYS_EXT)
+#define MAX_PHYSICAL_DEVICES 256
+#define MAX_RAIDMAP_PHYSICAL_DEVICES (MAX_PHYSICAL_DEVICES)
+#define MR_DCMD_LD_MAP_GET_INFO 0x0300e101
+#define MR_DCMD_CTRL_SHARED_HOST_MEM_ALLOC 0x010e8485 /* SR-IOV HB alloc*/
+#define MR_DCMD_LD_VF_MAP_GET_ALL_LDS_111 0x03200200
+#define MR_DCMD_LD_VF_MAP_GET_ALL_LDS 0x03150200
+
+struct MR_DEV_HANDLE_INFO {
+ u16 curDevHdl;
+ u8 validHandles;
+ u8 reserved;
+ u16 devHandle[2];
+};
+
+struct MR_ARRAY_INFO {
+ u16 pd[MAX_RAIDMAP_ROW_SIZE];
+};
+
+struct MR_QUAD_ELEMENT {
+ u64 logStart;
+ u64 logEnd;
+ u64 offsetInSpan;
+ u32 diff;
+ u32 reserved1;
+};
+
+struct MR_SPAN_INFO {
+ u32 noElements;
+ u32 reserved1;
+ struct MR_QUAD_ELEMENT quad[MAX_RAIDMAP_SPAN_DEPTH];
+};
+
+struct MR_LD_SPAN {
+ u64 startBlk;
+ u64 numBlks;
+ u16 arrayRef;
+ u8 spanRowSize;
+ u8 spanRowDataSize;
+ u8 reserved[4];
+};
+
+struct MR_SPAN_BLOCK_INFO {
+ u64 num_rows;
+ struct MR_LD_SPAN span;
+ struct MR_SPAN_INFO block_span_info;
+};
+
+struct MR_LD_RAID {
+ struct {
+#if defined(__BIG_ENDIAN_BITFIELD)
+ u32 reserved4:7;
+ u32 fpNonRWCapable:1;
+ u32 fpReadAcrossStripe:1;
+ u32 fpWriteAcrossStripe:1;
+ u32 fpReadCapable:1;
+ u32 fpWriteCapable:1;
+ u32 encryptionType:8;
+ u32 pdPiMode:4;
+ u32 ldPiMode:4;
+ u32 reserved5:3;
+ u32 fpCapable:1;
+#else
+ u32 fpCapable:1;
+ u32 reserved5:3;
+ u32 ldPiMode:4;
+ u32 pdPiMode:4;
+ u32 encryptionType:8;
+ u32 fpWriteCapable:1;
+ u32 fpReadCapable:1;
+ u32 fpWriteAcrossStripe:1;
+ u32 fpReadAcrossStripe:1;
+ u32 fpNonRWCapable:1;
+ u32 reserved4:7;
+#endif
+ } capability;
+ u32 reserved6;
+ u64 size;
+ u8 spanDepth;
+ u8 level;
+ u8 stripeShift;
+ u8 rowSize;
+ u8 rowDataSize;
+ u8 writeMode;
+ u8 PRL;
+ u8 SRL;
+ u16 targetId;
+ u8 ldState;
+ u8 regTypeReqOnWrite;
+ u8 modFactor;
+ u8 regTypeReqOnRead;
+ u16 seqNum;
+
+ struct {
+ u32 ldSyncRequired:1;
+ u32 reserved:31;
+ } flags;
+
+ u8 LUN[8]; /* 0x24 8 byte LUN field used for SCSI IO's */
+ u8 fpIoTimeoutForLd;/*0x2C timeout value used by driver in FP IO*/
+ u8 reserved3[0x80-0x2D]; /* 0x2D */
+};
+
+struct MR_LD_SPAN_MAP {
+ struct MR_LD_RAID ldRaid;
+ u8 dataArmMap[MAX_RAIDMAP_ROW_SIZE];
+ struct MR_SPAN_BLOCK_INFO spanBlock[MAX_RAIDMAP_SPAN_DEPTH];
+};
+
+struct MR_FW_RAID_MAP {
+ u32 totalSize;
+ union {
+ struct {
+ u32 maxLd;
+ u32 maxSpanDepth;
+ u32 maxRowSize;
+ u32 maxPdCount;
+ u32 maxArrays;
+ } validationInfo;
+ u32 version[5];
+ };
+
+ u32 ldCount;
+ u32 Reserved1;
+ u8 ldTgtIdToLd[MAX_RAIDMAP_LOGICAL_DRIVES+
+ MAX_RAIDMAP_VIEWS];
+ u8 fpPdIoTimeoutSec;
+ u8 reserved2[7];
+ struct MR_ARRAY_INFO arMapInfo[MAX_RAIDMAP_ARRAYS];
+ struct MR_DEV_HANDLE_INFO devHndlInfo[MAX_RAIDMAP_PHYSICAL_DEVICES];
+ struct MR_LD_SPAN_MAP ldSpanMap[1];
+};
+
+struct IO_REQUEST_INFO {
+ u64 ldStartBlock;
+ u32 numBlocks;
+ u16 ldTgtId;
+ u8 isRead;
+ u16 devHandle;
+ u64 pdBlock;
+ u8 fpOkForIo;
+ u8 IoforUnevenSpan;
+ u8 start_span;
+ u8 reserved;
+ u64 start_row;
+ u8 span_arm; /* span[7:5], arm[4:0] */
+ u8 pd_after_lb;
+};
+
+struct MR_LD_TARGET_SYNC {
+ u8 targetId;
+ u8 reserved;
+ u16 seqNum;
+};
+
+#define IEEE_SGE_FLAGS_ADDR_MASK (0x03)
+#define IEEE_SGE_FLAGS_SYSTEM_ADDR (0x00)
+#define IEEE_SGE_FLAGS_IOCDDR_ADDR (0x01)
+#define IEEE_SGE_FLAGS_IOCPLB_ADDR (0x02)
+#define IEEE_SGE_FLAGS_IOCPLBNTA_ADDR (0x03)
+#define IEEE_SGE_FLAGS_CHAIN_ELEMENT (0x80)
+#define IEEE_SGE_FLAGS_END_OF_LIST (0x40)
+
+struct megasas_register_set;
+struct megasas_instance;
+
+union desc_word {
+ u64 word;
+ struct {
+ u32 low;
+ u32 high;
+ } u;
+};
+
+struct megasas_cmd_fusion {
+ struct MPI2_RAID_SCSI_IO_REQUEST *io_request;
+ dma_addr_t io_request_phys_addr;
+
+ union MPI2_SGE_IO_UNION *sg_frame;
+ dma_addr_t sg_frame_phys_addr;
+
+ u8 *sense;
+ dma_addr_t sense_phys_addr;
+
+ struct list_head list;
+ struct scsi_cmnd *scmd;
+ struct megasas_instance *instance;
+
+ u8 retry_for_fw_reset;
+ union MEGASAS_REQUEST_DESCRIPTOR_UNION *request_desc;
+
+ /*
+ * Context for a MFI frame.
+ * Used to get the mfi cmd from list when a MFI cmd is completed
+ */
+ u32 sync_cmd_idx;
+ u32 index;
+ u8 flags;
+ u8 pd_r1_lb;
+};
+
+struct LD_LOAD_BALANCE_INFO {
+ u8 loadBalanceFlag;
+ u8 reserved1;
+ atomic_t scsi_pending_cmds[MAX_PHYSICAL_DEVICES];
+ u64 last_accessed_block[MAX_PHYSICAL_DEVICES];
+};
+
+/* SPAN_SET is info caclulated from span info from Raid map per LD */
+typedef struct _LD_SPAN_SET {
+ u64 log_start_lba;
+ u64 log_end_lba;
+ u64 span_row_start;
+ u64 span_row_end;
+ u64 data_strip_start;
+ u64 data_strip_end;
+ u64 data_row_start;
+ u64 data_row_end;
+ u8 strip_offset[MAX_SPAN_DEPTH];
+ u32 span_row_data_width;
+ u32 diff;
+ u32 reserved[2];
+} LD_SPAN_SET, *PLD_SPAN_SET;
+
+typedef struct LOG_BLOCK_SPAN_INFO {
+ LD_SPAN_SET span_set[MAX_SPAN_DEPTH];
+} LD_SPAN_INFO, *PLD_SPAN_INFO;
+
+struct MR_FW_RAID_MAP_ALL {
+ struct MR_FW_RAID_MAP raidMap;
+ struct MR_LD_SPAN_MAP ldSpanMap[MAX_LOGICAL_DRIVES - 1];
+} __attribute__ ((packed));
+
+struct MR_DRV_RAID_MAP {
+ /* total size of this structure, including this field.
+ * This feild will be manupulated by driver for ext raid map,
+ * else pick the value from firmware raid map.
+ */
+ u32 totalSize;
+
+ union {
+ struct {
+ u32 maxLd;
+ u32 maxSpanDepth;
+ u32 maxRowSize;
+ u32 maxPdCount;
+ u32 maxArrays;
+ } validationInfo;
+ u32 version[5];
+ };
+
+ /* timeout value used by driver in FP IOs*/
+ u8 fpPdIoTimeoutSec;
+ u8 reserved2[7];
+
+ u16 ldCount;
+ u16 arCount;
+ u16 spanCount;
+ u16 reserve3;
+
+ struct MR_DEV_HANDLE_INFO devHndlInfo[MAX_RAIDMAP_PHYSICAL_DEVICES];
+ u8 ldTgtIdToLd[MAX_LOGICAL_DRIVES_EXT];
+ struct MR_ARRAY_INFO arMapInfo[MAX_API_ARRAYS_EXT];
+ struct MR_LD_SPAN_MAP ldSpanMap[1];
+
+};
+
+/* Driver raid map size is same as raid map ext
+ * MR_DRV_RAID_MAP_ALL is created to sync with old raid.
+ * And it is mainly for code re-use purpose.
+ */
+struct MR_DRV_RAID_MAP_ALL {
+
+ struct MR_DRV_RAID_MAP raidMap;
+ struct MR_LD_SPAN_MAP ldSpanMap[MAX_LOGICAL_DRIVES_EXT - 1];
+} __packed;
+
+
+
+struct MR_FW_RAID_MAP_EXT {
+ /* Not usred in new map */
+ u32 reserved;
+
+ union {
+ struct {
+ u32 maxLd;
+ u32 maxSpanDepth;
+ u32 maxRowSize;
+ u32 maxPdCount;
+ u32 maxArrays;
+ } validationInfo;
+ u32 version[5];
+ };
+
+ u8 fpPdIoTimeoutSec;
+ u8 reserved2[7];
+
+ u16 ldCount;
+ u16 arCount;
+ u16 spanCount;
+ u16 reserve3;
+
+ struct MR_DEV_HANDLE_INFO devHndlInfo[MAX_RAIDMAP_PHYSICAL_DEVICES];
+ u8 ldTgtIdToLd[MAX_LOGICAL_DRIVES_EXT];
+ struct MR_ARRAY_INFO arMapInfo[MAX_API_ARRAYS_EXT];
+ struct MR_LD_SPAN_MAP ldSpanMap[MAX_LOGICAL_DRIVES_EXT];
+};
+
+struct fusion_context {
+ struct megasas_cmd_fusion **cmd_list;
+ struct list_head cmd_pool;
+
+ spinlock_t mpt_pool_lock;
+
+ dma_addr_t req_frames_desc_phys;
+ u8 *req_frames_desc;
+
+ struct dma_pool *io_request_frames_pool;
+ dma_addr_t io_request_frames_phys;
+ u8 *io_request_frames;
+
+ struct dma_pool *sg_dma_pool;
+ struct dma_pool *sense_dma_pool;
+
+ dma_addr_t reply_frames_desc_phys;
+ union MPI2_REPLY_DESCRIPTORS_UNION *reply_frames_desc;
+ struct dma_pool *reply_frames_desc_pool;
+
+ u16 last_reply_idx[MAX_MSIX_QUEUES_FUSION];
+
+ u32 reply_q_depth;
+ u32 request_alloc_sz;
+ u32 reply_alloc_sz;
+ u32 io_frames_alloc_sz;
+
+ u16 max_sge_in_main_msg;
+ u16 max_sge_in_chain;
+
+ u8 chain_offset_io_request;
+ u8 chain_offset_mfi_pthru;
+
+ struct MR_FW_RAID_MAP_ALL *ld_map[2];
+ dma_addr_t ld_map_phys[2];
+
+ /*Non dma-able memory. Driver local copy.*/
+ struct MR_DRV_RAID_MAP_ALL *ld_drv_map[2];
+
+ u32 max_map_sz;
+ u32 current_map_sz;
+ u32 drv_map_sz;
+ u32 drv_map_pages;
+ u8 fast_path_io;
+ struct LD_LOAD_BALANCE_INFO load_balance_info[MAX_LOGICAL_DRIVES_EXT];
+ LD_SPAN_INFO log_to_span[MAX_LOGICAL_DRIVES_EXT];
+};
+
+union desc_value {
+ u64 word;
+ struct {
+ u32 low;
+ u32 high;
+ } u;
+};
+
+
+#endif /* _MEGARAID_SAS_FUSION_H_ */
diff --git a/drivers/scsi/mesh.c b/drivers/scsi/mesh.c
new file mode 100644
index 000000000..555367f00
--- /dev/null
+++ b/drivers/scsi/mesh.c
@@ -0,0 +1,2074 @@
+/*
+ * SCSI low-level driver for the MESH (Macintosh Enhanced SCSI Hardware)
+ * bus adaptor found on Power Macintosh computers.
+ * We assume the MESH is connected to a DBDMA (descriptor-based DMA)
+ * controller.
+ *
+ * Paul Mackerras, August 1996.
+ * Copyright (C) 1996 Paul Mackerras.
+ *
+ * Apr. 21 2002 - BenH Rework bus reset code for new error handler
+ * Add delay after initial bus reset
+ * Add module parameters
+ *
+ * Sep. 27 2003 - BenH Move to new driver model, fix some write posting
+ * issues
+ * To do:
+ * - handle aborts correctly
+ * - retry arbitration if lost (unless higher levels do this for us)
+ * - power down the chip when no device is detected
+ */
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/delay.h>
+#include <linux/types.h>
+#include <linux/string.h>
+#include <linux/blkdev.h>
+#include <linux/proc_fs.h>
+#include <linux/stat.h>
+#include <linux/interrupt.h>
+#include <linux/reboot.h>
+#include <linux/spinlock.h>
+#include <asm/dbdma.h>
+#include <asm/io.h>
+#include <asm/pgtable.h>
+#include <asm/prom.h>
+#include <asm/irq.h>
+#include <asm/hydra.h>
+#include <asm/processor.h>
+#include <asm/machdep.h>
+#include <asm/pmac_feature.h>
+#include <asm/pci-bridge.h>
+#include <asm/macio.h>
+
+#include <scsi/scsi.h>
+#include <scsi/scsi_cmnd.h>
+#include <scsi/scsi_device.h>
+#include <scsi/scsi_host.h>
+
+#include "mesh.h"
+
+#if 1
+#undef KERN_DEBUG
+#define KERN_DEBUG KERN_WARNING
+#endif
+
+MODULE_AUTHOR("Paul Mackerras (paulus@samba.org)");
+MODULE_DESCRIPTION("PowerMac MESH SCSI driver");
+MODULE_LICENSE("GPL");
+
+static int sync_rate = CONFIG_SCSI_MESH_SYNC_RATE;
+static int sync_targets = 0xff;
+static int resel_targets = 0xff;
+static int debug_targets = 0; /* print debug for these targets */
+static int init_reset_delay = CONFIG_SCSI_MESH_RESET_DELAY_MS;
+
+module_param(sync_rate, int, 0);
+MODULE_PARM_DESC(sync_rate, "Synchronous rate (0..10, 0=async)");
+module_param(sync_targets, int, 0);
+MODULE_PARM_DESC(sync_targets, "Bitmask of targets allowed to set synchronous");
+module_param(resel_targets, int, 0);
+MODULE_PARM_DESC(resel_targets, "Bitmask of targets allowed to set disconnect");
+module_param(debug_targets, int, 0644);
+MODULE_PARM_DESC(debug_targets, "Bitmask of debugged targets");
+module_param(init_reset_delay, int, 0);
+MODULE_PARM_DESC(init_reset_delay, "Initial bus reset delay (0=no reset)");
+
+static int mesh_sync_period = 100;
+static int mesh_sync_offset = 0;
+static unsigned char use_active_neg = 0; /* bit mask for SEQ_ACTIVE_NEG if used */
+
+#define ALLOW_SYNC(tgt) ((sync_targets >> (tgt)) & 1)
+#define ALLOW_RESEL(tgt) ((resel_targets >> (tgt)) & 1)
+#define ALLOW_DEBUG(tgt) ((debug_targets >> (tgt)) & 1)
+#define DEBUG_TARGET(cmd) ((cmd) && ALLOW_DEBUG((cmd)->device->id))
+
+#undef MESH_DBG
+#define N_DBG_LOG 50
+#define N_DBG_SLOG 20
+#define NUM_DBG_EVENTS 13
+#undef DBG_USE_TB /* bombs on 601 */
+
+struct dbglog {
+ char *fmt;
+ u32 tb;
+ u8 phase;
+ u8 bs0;
+ u8 bs1;
+ u8 tgt;
+ int d;
+};
+
+enum mesh_phase {
+ idle,
+ arbitrating,
+ selecting,
+ commanding,
+ dataing,
+ statusing,
+ busfreeing,
+ disconnecting,
+ reselecting,
+ sleeping
+};
+
+enum msg_phase {
+ msg_none,
+ msg_out,
+ msg_out_xxx,
+ msg_out_last,
+ msg_in,
+ msg_in_bad,
+};
+
+enum sdtr_phase {
+ do_sdtr,
+ sdtr_sent,
+ sdtr_done
+};
+
+struct mesh_target {
+ enum sdtr_phase sdtr_state;
+ int sync_params;
+ int data_goes_out; /* guess as to data direction */
+ struct scsi_cmnd *current_req;
+ u32 saved_ptr;
+#ifdef MESH_DBG
+ int log_ix;
+ int n_log;
+ struct dbglog log[N_DBG_LOG];
+#endif
+};
+
+struct mesh_state {
+ volatile struct mesh_regs __iomem *mesh;
+ int meshintr;
+ volatile struct dbdma_regs __iomem *dma;
+ int dmaintr;
+ struct Scsi_Host *host;
+ struct mesh_state *next;
+ struct scsi_cmnd *request_q;
+ struct scsi_cmnd *request_qtail;
+ enum mesh_phase phase; /* what we're currently trying to do */
+ enum msg_phase msgphase;
+ int conn_tgt; /* target we're connected to */
+ struct scsi_cmnd *current_req; /* req we're currently working on */
+ int data_ptr;
+ int dma_started;
+ int dma_count;
+ int stat;
+ int aborting;
+ int expect_reply;
+ int n_msgin;
+ u8 msgin[16];
+ int n_msgout;
+ int last_n_msgout;
+ u8 msgout[16];
+ struct dbdma_cmd *dma_cmds; /* space for dbdma commands, aligned */
+ dma_addr_t dma_cmd_bus;
+ void *dma_cmd_space;
+ int dma_cmd_size;
+ int clk_freq;
+ struct mesh_target tgts[8];
+ struct macio_dev *mdev;
+ struct pci_dev* pdev;
+#ifdef MESH_DBG
+ int log_ix;
+ int n_log;
+ struct dbglog log[N_DBG_SLOG];
+#endif
+};
+
+/*
+ * Driver is too messy, we need a few prototypes...
+ */
+static void mesh_done(struct mesh_state *ms, int start_next);
+static void mesh_interrupt(struct mesh_state *ms);
+static void cmd_complete(struct mesh_state *ms);
+static void set_dma_cmds(struct mesh_state *ms, struct scsi_cmnd *cmd);
+static void halt_dma(struct mesh_state *ms);
+static void phase_mismatch(struct mesh_state *ms);
+
+
+/*
+ * Some debugging & logging routines
+ */
+
+#ifdef MESH_DBG
+
+static inline u32 readtb(void)
+{
+ u32 tb;
+
+#ifdef DBG_USE_TB
+ /* Beware: if you enable this, it will crash on 601s. */
+ asm ("mftb %0" : "=r" (tb) : );
+#else
+ tb = 0;
+#endif
+ return tb;
+}
+
+static void dlog(struct mesh_state *ms, char *fmt, int a)
+{
+ struct mesh_target *tp = &ms->tgts[ms->conn_tgt];
+ struct dbglog *tlp, *slp;
+
+ tlp = &tp->log[tp->log_ix];
+ slp = &ms->log[ms->log_ix];
+ tlp->fmt = fmt;
+ tlp->tb = readtb();
+ tlp->phase = (ms->msgphase << 4) + ms->phase;
+ tlp->bs0 = ms->mesh->bus_status0;
+ tlp->bs1 = ms->mesh->bus_status1;
+ tlp->tgt = ms->conn_tgt;
+ tlp->d = a;
+ *slp = *tlp;
+ if (++tp->log_ix >= N_DBG_LOG)
+ tp->log_ix = 0;
+ if (tp->n_log < N_DBG_LOG)
+ ++tp->n_log;
+ if (++ms->log_ix >= N_DBG_SLOG)
+ ms->log_ix = 0;
+ if (ms->n_log < N_DBG_SLOG)
+ ++ms->n_log;
+}
+
+static void dumplog(struct mesh_state *ms, int t)
+{
+ struct mesh_target *tp = &ms->tgts[t];
+ struct dbglog *lp;
+ int i;
+
+ if (tp->n_log == 0)
+ return;
+ i = tp->log_ix - tp->n_log;
+ if (i < 0)
+ i += N_DBG_LOG;
+ tp->n_log = 0;
+ do {
+ lp = &tp->log[i];
+ printk(KERN_DEBUG "mesh log %d: bs=%.2x%.2x ph=%.2x ",
+ t, lp->bs1, lp->bs0, lp->phase);
+#ifdef DBG_USE_TB
+ printk("tb=%10u ", lp->tb);
+#endif
+ printk(lp->fmt, lp->d);
+ printk("\n");
+ if (++i >= N_DBG_LOG)
+ i = 0;
+ } while (i != tp->log_ix);
+}
+
+static void dumpslog(struct mesh_state *ms)
+{
+ struct dbglog *lp;
+ int i;
+
+ if (ms->n_log == 0)
+ return;
+ i = ms->log_ix - ms->n_log;
+ if (i < 0)
+ i += N_DBG_SLOG;
+ ms->n_log = 0;
+ do {
+ lp = &ms->log[i];
+ printk(KERN_DEBUG "mesh log: bs=%.2x%.2x ph=%.2x t%d ",
+ lp->bs1, lp->bs0, lp->phase, lp->tgt);
+#ifdef DBG_USE_TB
+ printk("tb=%10u ", lp->tb);
+#endif
+ printk(lp->fmt, lp->d);
+ printk("\n");
+ if (++i >= N_DBG_SLOG)
+ i = 0;
+ } while (i != ms->log_ix);
+}
+
+#else
+
+static inline void dlog(struct mesh_state *ms, char *fmt, int a)
+{}
+static inline void dumplog(struct mesh_state *ms, int tgt)
+{}
+static inline void dumpslog(struct mesh_state *ms)
+{}
+
+#endif /* MESH_DBG */
+
+#define MKWORD(a, b, c, d) (((a) << 24) + ((b) << 16) + ((c) << 8) + (d))
+
+static void
+mesh_dump_regs(struct mesh_state *ms)
+{
+ volatile struct mesh_regs __iomem *mr = ms->mesh;
+ volatile struct dbdma_regs __iomem *md = ms->dma;
+ int t;
+ struct mesh_target *tp;
+
+ printk(KERN_DEBUG "mesh: state at %p, regs at %p, dma at %p\n",
+ ms, mr, md);
+ printk(KERN_DEBUG " ct=%4x seq=%2x bs=%4x fc=%2x "
+ "exc=%2x err=%2x im=%2x int=%2x sp=%2x\n",
+ (mr->count_hi << 8) + mr->count_lo, mr->sequence,
+ (mr->bus_status1 << 8) + mr->bus_status0, mr->fifo_count,
+ mr->exception, mr->error, mr->intr_mask, mr->interrupt,
+ mr->sync_params);
+ while(in_8(&mr->fifo_count))
+ printk(KERN_DEBUG " fifo data=%.2x\n",in_8(&mr->fifo));
+ printk(KERN_DEBUG " dma stat=%x cmdptr=%x\n",
+ in_le32(&md->status), in_le32(&md->cmdptr));
+ printk(KERN_DEBUG " phase=%d msgphase=%d conn_tgt=%d data_ptr=%d\n",
+ ms->phase, ms->msgphase, ms->conn_tgt, ms->data_ptr);
+ printk(KERN_DEBUG " dma_st=%d dma_ct=%d n_msgout=%d\n",
+ ms->dma_started, ms->dma_count, ms->n_msgout);
+ for (t = 0; t < 8; ++t) {
+ tp = &ms->tgts[t];
+ if (tp->current_req == NULL)
+ continue;
+ printk(KERN_DEBUG " target %d: req=%p goes_out=%d saved_ptr=%d\n",
+ t, tp->current_req, tp->data_goes_out, tp->saved_ptr);
+ }
+}
+
+
+/*
+ * Flush write buffers on the bus path to the mesh
+ */
+static inline void mesh_flush_io(volatile struct mesh_regs __iomem *mr)
+{
+ (void)in_8(&mr->mesh_id);
+}
+
+
+/*
+ * Complete a SCSI command
+ */
+static void mesh_completed(struct mesh_state *ms, struct scsi_cmnd *cmd)
+{
+ (*cmd->scsi_done)(cmd);
+}
+
+
+/* Called with meshinterrupt disabled, initialize the chipset
+ * and eventually do the initial bus reset. The lock must not be
+ * held since we can schedule.
+ */
+static void mesh_init(struct mesh_state *ms)
+{
+ volatile struct mesh_regs __iomem *mr = ms->mesh;
+ volatile struct dbdma_regs __iomem *md = ms->dma;
+
+ mesh_flush_io(mr);
+ udelay(100);
+
+ /* Reset controller */
+ out_le32(&md->control, (RUN|PAUSE|FLUSH|WAKE) << 16); /* stop dma */
+ out_8(&mr->exception, 0xff); /* clear all exception bits */
+ out_8(&mr->error, 0xff); /* clear all error bits */
+ out_8(&mr->sequence, SEQ_RESETMESH);
+ mesh_flush_io(mr);
+ udelay(10);
+ out_8(&mr->intr_mask, INT_ERROR | INT_EXCEPTION | INT_CMDDONE);
+ out_8(&mr->source_id, ms->host->this_id);
+ out_8(&mr->sel_timeout, 25); /* 250ms */
+ out_8(&mr->sync_params, ASYNC_PARAMS);
+
+ if (init_reset_delay) {
+ printk(KERN_INFO "mesh: performing initial bus reset...\n");
+
+ /* Reset bus */
+ out_8(&mr->bus_status1, BS1_RST); /* assert RST */
+ mesh_flush_io(mr);
+ udelay(30); /* leave it on for >= 25us */
+ out_8(&mr->bus_status1, 0); /* negate RST */
+ mesh_flush_io(mr);
+
+ /* Wait for bus to come back */
+ msleep(init_reset_delay);
+ }
+
+ /* Reconfigure controller */
+ out_8(&mr->interrupt, 0xff); /* clear all interrupt bits */
+ out_8(&mr->sequence, SEQ_FLUSHFIFO);
+ mesh_flush_io(mr);
+ udelay(1);
+ out_8(&mr->sync_params, ASYNC_PARAMS);
+ out_8(&mr->sequence, SEQ_ENBRESEL);
+
+ ms->phase = idle;
+ ms->msgphase = msg_none;
+}
+
+
+static void mesh_start_cmd(struct mesh_state *ms, struct scsi_cmnd *cmd)
+{
+ volatile struct mesh_regs __iomem *mr = ms->mesh;
+ int t, id;
+
+ id = cmd->device->id;
+ ms->current_req = cmd;
+ ms->tgts[id].data_goes_out = cmd->sc_data_direction == DMA_TO_DEVICE;
+ ms->tgts[id].current_req = cmd;
+
+#if 1
+ if (DEBUG_TARGET(cmd)) {
+ int i;
+ printk(KERN_DEBUG "mesh_start: %p tgt=%d cmd=", cmd, id);
+ for (i = 0; i < cmd->cmd_len; ++i)
+ printk(" %x", cmd->cmnd[i]);
+ printk(" use_sg=%d buffer=%p bufflen=%u\n",
+ scsi_sg_count(cmd), scsi_sglist(cmd), scsi_bufflen(cmd));
+ }
+#endif
+ if (ms->dma_started)
+ panic("mesh: double DMA start !\n");
+
+ ms->phase = arbitrating;
+ ms->msgphase = msg_none;
+ ms->data_ptr = 0;
+ ms->dma_started = 0;
+ ms->n_msgout = 0;
+ ms->last_n_msgout = 0;
+ ms->expect_reply = 0;
+ ms->conn_tgt = id;
+ ms->tgts[id].saved_ptr = 0;
+ ms->stat = DID_OK;
+ ms->aborting = 0;
+#ifdef MESH_DBG
+ ms->tgts[id].n_log = 0;
+ dlog(ms, "start cmd=%x", (int) cmd);
+#endif
+
+ /* Off we go */
+ dlog(ms, "about to arb, intr/exc/err/fc=%.8x",
+ MKWORD(mr->interrupt, mr->exception, mr->error, mr->fifo_count));
+ out_8(&mr->interrupt, INT_CMDDONE);
+ out_8(&mr->sequence, SEQ_ENBRESEL);
+ mesh_flush_io(mr);
+ udelay(1);
+
+ if (in_8(&mr->bus_status1) & (BS1_BSY | BS1_SEL)) {
+ /*
+ * Some other device has the bus or is arbitrating for it -
+ * probably a target which is about to reselect us.
+ */
+ dlog(ms, "busy b4 arb, intr/exc/err/fc=%.8x",
+ MKWORD(mr->interrupt, mr->exception,
+ mr->error, mr->fifo_count));
+ for (t = 100; t > 0; --t) {
+ if ((in_8(&mr->bus_status1) & (BS1_BSY | BS1_SEL)) == 0)
+ break;
+ if (in_8(&mr->interrupt) != 0) {
+ dlog(ms, "intr b4 arb, intr/exc/err/fc=%.8x",
+ MKWORD(mr->interrupt, mr->exception,
+ mr->error, mr->fifo_count));
+ mesh_interrupt(ms);
+ if (ms->phase != arbitrating)
+ return;
+ }
+ udelay(1);
+ }
+ if (in_8(&mr->bus_status1) & (BS1_BSY | BS1_SEL)) {
+ /* XXX should try again in a little while */
+ ms->stat = DID_BUS_BUSY;
+ ms->phase = idle;
+ mesh_done(ms, 0);
+ return;
+ }
+ }
+
+ /*
+ * Apparently the mesh has a bug where it will assert both its
+ * own bit and the target's bit on the bus during arbitration.
+ */
+ out_8(&mr->dest_id, mr->source_id);
+
+ /*
+ * There appears to be a race with reselection sometimes,
+ * where a target reselects us just as we issue the
+ * arbitrate command. It seems that then the arbitrate
+ * command just hangs waiting for the bus to be free
+ * without giving us a reselection exception.
+ * The only way I have found to get it to respond correctly
+ * is this: disable reselection before issuing the arbitrate
+ * command, then after issuing it, if it looks like a target
+ * is trying to reselect us, reset the mesh and then enable
+ * reselection.
+ */
+ out_8(&mr->sequence, SEQ_DISRESEL);
+ if (in_8(&mr->interrupt) != 0) {
+ dlog(ms, "intr after disresel, intr/exc/err/fc=%.8x",
+ MKWORD(mr->interrupt, mr->exception,
+ mr->error, mr->fifo_count));
+ mesh_interrupt(ms);
+ if (ms->phase != arbitrating)
+ return;
+ dlog(ms, "after intr after disresel, intr/exc/err/fc=%.8x",
+ MKWORD(mr->interrupt, mr->exception,
+ mr->error, mr->fifo_count));
+ }
+
+ out_8(&mr->sequence, SEQ_ARBITRATE);
+
+ for (t = 230; t > 0; --t) {
+ if (in_8(&mr->interrupt) != 0)
+ break;
+ udelay(1);
+ }
+ dlog(ms, "after arb, intr/exc/err/fc=%.8x",
+ MKWORD(mr->interrupt, mr->exception, mr->error, mr->fifo_count));
+ if (in_8(&mr->interrupt) == 0 && (in_8(&mr->bus_status1) & BS1_SEL)
+ && (in_8(&mr->bus_status0) & BS0_IO)) {
+ /* looks like a reselection - try resetting the mesh */
+ dlog(ms, "resel? after arb, intr/exc/err/fc=%.8x",
+ MKWORD(mr->interrupt, mr->exception, mr->error, mr->fifo_count));
+ out_8(&mr->sequence, SEQ_RESETMESH);
+ mesh_flush_io(mr);
+ udelay(10);
+ out_8(&mr->interrupt, INT_ERROR | INT_EXCEPTION | INT_CMDDONE);
+ out_8(&mr->intr_mask, INT_ERROR | INT_EXCEPTION | INT_CMDDONE);
+ out_8(&mr->sequence, SEQ_ENBRESEL);
+ mesh_flush_io(mr);
+ for (t = 10; t > 0 && in_8(&mr->interrupt) == 0; --t)
+ udelay(1);
+ dlog(ms, "tried reset after arb, intr/exc/err/fc=%.8x",
+ MKWORD(mr->interrupt, mr->exception, mr->error, mr->fifo_count));
+#ifndef MESH_MULTIPLE_HOSTS
+ if (in_8(&mr->interrupt) == 0 && (in_8(&mr->bus_status1) & BS1_SEL)
+ && (in_8(&mr->bus_status0) & BS0_IO)) {
+ printk(KERN_ERR "mesh: controller not responding"
+ " to reselection!\n");
+ /*
+ * If this is a target reselecting us, and the
+ * mesh isn't responding, the higher levels of
+ * the scsi code will eventually time out and
+ * reset the bus.
+ */
+ }
+#endif
+ }
+}
+
+/*
+ * Start the next command for a MESH.
+ * Should be called with interrupts disabled.
+ */
+static void mesh_start(struct mesh_state *ms)
+{
+ struct scsi_cmnd *cmd, *prev, *next;
+
+ if (ms->phase != idle || ms->current_req != NULL) {
+ printk(KERN_ERR "inappropriate mesh_start (phase=%d, ms=%p)",
+ ms->phase, ms);
+ return;
+ }
+
+ while (ms->phase == idle) {
+ prev = NULL;
+ for (cmd = ms->request_q; ; cmd = (struct scsi_cmnd *) cmd->host_scribble) {
+ if (cmd == NULL)
+ return;
+ if (ms->tgts[cmd->device->id].current_req == NULL)
+ break;
+ prev = cmd;
+ }
+ next = (struct scsi_cmnd *) cmd->host_scribble;
+ if (prev == NULL)
+ ms->request_q = next;
+ else
+ prev->host_scribble = (void *) next;
+ if (next == NULL)
+ ms->request_qtail = prev;
+
+ mesh_start_cmd(ms, cmd);
+ }
+}
+
+static void mesh_done(struct mesh_state *ms, int start_next)
+{
+ struct scsi_cmnd *cmd;
+ struct mesh_target *tp = &ms->tgts[ms->conn_tgt];
+
+ cmd = ms->current_req;
+ ms->current_req = NULL;
+ tp->current_req = NULL;
+ if (cmd) {
+ cmd->result = (ms->stat << 16) + cmd->SCp.Status;
+ if (ms->stat == DID_OK)
+ cmd->result += (cmd->SCp.Message << 8);
+ if (DEBUG_TARGET(cmd)) {
+ printk(KERN_DEBUG "mesh_done: result = %x, data_ptr=%d, buflen=%d\n",
+ cmd->result, ms->data_ptr, scsi_bufflen(cmd));
+#if 0
+ /* needs to use sg? */
+ if ((cmd->cmnd[0] == 0 || cmd->cmnd[0] == 0x12 || cmd->cmnd[0] == 3)
+ && cmd->request_buffer != 0) {
+ unsigned char *b = cmd->request_buffer;
+ printk(KERN_DEBUG "buffer = %x %x %x %x %x %x %x %x\n",
+ b[0], b[1], b[2], b[3], b[4], b[5], b[6], b[7]);
+ }
+#endif
+ }
+ cmd->SCp.this_residual -= ms->data_ptr;
+ mesh_completed(ms, cmd);
+ }
+ if (start_next) {
+ out_8(&ms->mesh->sequence, SEQ_ENBRESEL);
+ mesh_flush_io(ms->mesh);
+ udelay(1);
+ ms->phase = idle;
+ mesh_start(ms);
+ }
+}
+
+static inline void add_sdtr_msg(struct mesh_state *ms)
+{
+ int i = ms->n_msgout;
+
+ ms->msgout[i] = EXTENDED_MESSAGE;
+ ms->msgout[i+1] = 3;
+ ms->msgout[i+2] = EXTENDED_SDTR;
+ ms->msgout[i+3] = mesh_sync_period/4;
+ ms->msgout[i+4] = (ALLOW_SYNC(ms->conn_tgt)? mesh_sync_offset: 0);
+ ms->n_msgout = i + 5;
+}
+
+static void set_sdtr(struct mesh_state *ms, int period, int offset)
+{
+ struct mesh_target *tp = &ms->tgts[ms->conn_tgt];
+ volatile struct mesh_regs __iomem *mr = ms->mesh;
+ int v, tr;
+
+ tp->sdtr_state = sdtr_done;
+ if (offset == 0) {
+ /* asynchronous */
+ if (SYNC_OFF(tp->sync_params))
+ printk(KERN_INFO "mesh: target %d now asynchronous\n",
+ ms->conn_tgt);
+ tp->sync_params = ASYNC_PARAMS;
+ out_8(&mr->sync_params, ASYNC_PARAMS);
+ return;
+ }
+ /*
+ * We need to compute ceil(clk_freq * period / 500e6) - 2
+ * without incurring overflow.
+ */
+ v = (ms->clk_freq / 5000) * period;
+ if (v <= 250000) {
+ /* special case: sync_period == 5 * clk_period */
+ v = 0;
+ /* units of tr are 100kB/s */
+ tr = (ms->clk_freq + 250000) / 500000;
+ } else {
+ /* sync_period == (v + 2) * 2 * clk_period */
+ v = (v + 99999) / 100000 - 2;
+ if (v > 15)
+ v = 15; /* oops */
+ tr = ((ms->clk_freq / (v + 2)) + 199999) / 200000;
+ }
+ if (offset > 15)
+ offset = 15; /* can't happen */
+ tp->sync_params = SYNC_PARAMS(offset, v);
+ out_8(&mr->sync_params, tp->sync_params);
+ printk(KERN_INFO "mesh: target %d synchronous at %d.%d MB/s\n",
+ ms->conn_tgt, tr/10, tr%10);
+}
+
+static void start_phase(struct mesh_state *ms)
+{
+ int i, seq, nb;
+ volatile struct mesh_regs __iomem *mr = ms->mesh;
+ volatile struct dbdma_regs __iomem *md = ms->dma;
+ struct scsi_cmnd *cmd = ms->current_req;
+ struct mesh_target *tp = &ms->tgts[ms->conn_tgt];
+
+ dlog(ms, "start_phase nmo/exc/fc/seq = %.8x",
+ MKWORD(ms->n_msgout, mr->exception, mr->fifo_count, mr->sequence));
+ out_8(&mr->interrupt, INT_ERROR | INT_EXCEPTION | INT_CMDDONE);
+ seq = use_active_neg + (ms->n_msgout? SEQ_ATN: 0);
+ switch (ms->msgphase) {
+ case msg_none:
+ break;
+
+ case msg_in:
+ out_8(&mr->count_hi, 0);
+ out_8(&mr->count_lo, 1);
+ out_8(&mr->sequence, SEQ_MSGIN + seq);
+ ms->n_msgin = 0;
+ return;
+
+ case msg_out:
+ /*
+ * To make sure ATN drops before we assert ACK for
+ * the last byte of the message, we have to do the
+ * last byte specially.
+ */
+ if (ms->n_msgout <= 0) {
+ printk(KERN_ERR "mesh: msg_out but n_msgout=%d\n",
+ ms->n_msgout);
+ mesh_dump_regs(ms);
+ ms->msgphase = msg_none;
+ break;
+ }
+ if (ALLOW_DEBUG(ms->conn_tgt)) {
+ printk(KERN_DEBUG "mesh: sending %d msg bytes:",
+ ms->n_msgout);
+ for (i = 0; i < ms->n_msgout; ++i)
+ printk(" %x", ms->msgout[i]);
+ printk("\n");
+ }
+ dlog(ms, "msgout msg=%.8x", MKWORD(ms->n_msgout, ms->msgout[0],
+ ms->msgout[1], ms->msgout[2]));
+ out_8(&mr->count_hi, 0);
+ out_8(&mr->sequence, SEQ_FLUSHFIFO);
+ mesh_flush_io(mr);
+ udelay(1);
+ /*
+ * If ATN is not already asserted, we assert it, then
+ * issue a SEQ_MSGOUT to get the mesh to drop ACK.
+ */
+ if ((in_8(&mr->bus_status0) & BS0_ATN) == 0) {
+ dlog(ms, "bus0 was %.2x explicitly asserting ATN", mr->bus_status0);
+ out_8(&mr->bus_status0, BS0_ATN); /* explicit ATN */
+ mesh_flush_io(mr);
+ udelay(1);
+ out_8(&mr->count_lo, 1);
+ out_8(&mr->sequence, SEQ_MSGOUT + seq);
+ out_8(&mr->bus_status0, 0); /* release explicit ATN */
+ dlog(ms,"hace: after explicit ATN bus0=%.2x",mr->bus_status0);
+ }
+ if (ms->n_msgout == 1) {
+ /*
+ * We can't issue the SEQ_MSGOUT without ATN
+ * until the target has asserted REQ. The logic
+ * in cmd_complete handles both situations:
+ * REQ already asserted or not.
+ */
+ cmd_complete(ms);
+ } else {
+ out_8(&mr->count_lo, ms->n_msgout - 1);
+ out_8(&mr->sequence, SEQ_MSGOUT + seq);
+ for (i = 0; i < ms->n_msgout - 1; ++i)
+ out_8(&mr->fifo, ms->msgout[i]);
+ }
+ return;
+
+ default:
+ printk(KERN_ERR "mesh bug: start_phase msgphase=%d\n",
+ ms->msgphase);
+ }
+
+ switch (ms->phase) {
+ case selecting:
+ out_8(&mr->dest_id, ms->conn_tgt);
+ out_8(&mr->sequence, SEQ_SELECT + SEQ_ATN);
+ break;
+ case commanding:
+ out_8(&mr->sync_params, tp->sync_params);
+ out_8(&mr->count_hi, 0);
+ if (cmd) {
+ out_8(&mr->count_lo, cmd->cmd_len);
+ out_8(&mr->sequence, SEQ_COMMAND + seq);
+ for (i = 0; i < cmd->cmd_len; ++i)
+ out_8(&mr->fifo, cmd->cmnd[i]);
+ } else {
+ out_8(&mr->count_lo, 6);
+ out_8(&mr->sequence, SEQ_COMMAND + seq);
+ for (i = 0; i < 6; ++i)
+ out_8(&mr->fifo, 0);
+ }
+ break;
+ case dataing:
+ /* transfer data, if any */
+ if (!ms->dma_started) {
+ set_dma_cmds(ms, cmd);
+ out_le32(&md->cmdptr, virt_to_phys(ms->dma_cmds));
+ out_le32(&md->control, (RUN << 16) | RUN);
+ ms->dma_started = 1;
+ }
+ nb = ms->dma_count;
+ if (nb > 0xfff0)
+ nb = 0xfff0;
+ ms->dma_count -= nb;
+ ms->data_ptr += nb;
+ out_8(&mr->count_lo, nb);
+ out_8(&mr->count_hi, nb >> 8);
+ out_8(&mr->sequence, (tp->data_goes_out?
+ SEQ_DATAOUT: SEQ_DATAIN) + SEQ_DMA_MODE + seq);
+ break;
+ case statusing:
+ out_8(&mr->count_hi, 0);
+ out_8(&mr->count_lo, 1);
+ out_8(&mr->sequence, SEQ_STATUS + seq);
+ break;
+ case busfreeing:
+ case disconnecting:
+ out_8(&mr->sequence, SEQ_ENBRESEL);
+ mesh_flush_io(mr);
+ udelay(1);
+ dlog(ms, "enbresel intr/exc/err/fc=%.8x",
+ MKWORD(mr->interrupt, mr->exception, mr->error,
+ mr->fifo_count));
+ out_8(&mr->sequence, SEQ_BUSFREE);
+ break;
+ default:
+ printk(KERN_ERR "mesh: start_phase called with phase=%d\n",
+ ms->phase);
+ dumpslog(ms);
+ }
+
+}
+
+static inline void get_msgin(struct mesh_state *ms)
+{
+ volatile struct mesh_regs __iomem *mr = ms->mesh;
+ int i, n;
+
+ n = mr->fifo_count;
+ if (n != 0) {
+ i = ms->n_msgin;
+ ms->n_msgin = i + n;
+ for (; n > 0; --n)
+ ms->msgin[i++] = in_8(&mr->fifo);
+ }
+}
+
+static inline int msgin_length(struct mesh_state *ms)
+{
+ int b, n;
+
+ n = 1;
+ if (ms->n_msgin > 0) {
+ b = ms->msgin[0];
+ if (b == 1) {
+ /* extended message */
+ n = ms->n_msgin < 2? 2: ms->msgin[1] + 2;
+ } else if (0x20 <= b && b <= 0x2f) {
+ /* 2-byte message */
+ n = 2;
+ }
+ }
+ return n;
+}
+
+static void reselected(struct mesh_state *ms)
+{
+ volatile struct mesh_regs __iomem *mr = ms->mesh;
+ struct scsi_cmnd *cmd;
+ struct mesh_target *tp;
+ int b, t, prev;
+
+ switch (ms->phase) {
+ case idle:
+ break;
+ case arbitrating:
+ if ((cmd = ms->current_req) != NULL) {
+ /* put the command back on the queue */
+ cmd->host_scribble = (void *) ms->request_q;
+ if (ms->request_q == NULL)
+ ms->request_qtail = cmd;
+ ms->request_q = cmd;
+ tp = &ms->tgts[cmd->device->id];
+ tp->current_req = NULL;
+ }
+ break;
+ case busfreeing:
+ ms->phase = reselecting;
+ mesh_done(ms, 0);
+ break;
+ case disconnecting:
+ break;
+ default:
+ printk(KERN_ERR "mesh: reselected in phase %d/%d tgt %d\n",
+ ms->msgphase, ms->phase, ms->conn_tgt);
+ dumplog(ms, ms->conn_tgt);
+ dumpslog(ms);
+ }
+
+ if (ms->dma_started) {
+ printk(KERN_ERR "mesh: reselected with DMA started !\n");
+ halt_dma(ms);
+ }
+ ms->current_req = NULL;
+ ms->phase = dataing;
+ ms->msgphase = msg_in;
+ ms->n_msgout = 0;
+ ms->last_n_msgout = 0;
+ prev = ms->conn_tgt;
+
+ /*
+ * We seem to get abortive reselections sometimes.
+ */
+ while ((in_8(&mr->bus_status1) & BS1_BSY) == 0) {
+ static int mesh_aborted_resels;
+ mesh_aborted_resels++;
+ out_8(&mr->interrupt, INT_ERROR | INT_EXCEPTION | INT_CMDDONE);
+ mesh_flush_io(mr);
+ udelay(1);
+ out_8(&mr->sequence, SEQ_ENBRESEL);
+ mesh_flush_io(mr);
+ udelay(5);
+ dlog(ms, "extra resel err/exc/fc = %.6x",
+ MKWORD(0, mr->error, mr->exception, mr->fifo_count));
+ }
+ out_8(&mr->interrupt, INT_ERROR | INT_EXCEPTION | INT_CMDDONE);
+ mesh_flush_io(mr);
+ udelay(1);
+ out_8(&mr->sequence, SEQ_ENBRESEL);
+ mesh_flush_io(mr);
+ udelay(1);
+ out_8(&mr->sync_params, ASYNC_PARAMS);
+
+ /*
+ * Find out who reselected us.
+ */
+ if (in_8(&mr->fifo_count) == 0) {
+ printk(KERN_ERR "mesh: reselection but nothing in fifo?\n");
+ ms->conn_tgt = ms->host->this_id;
+ goto bogus;
+ }
+ /* get the last byte in the fifo */
+ do {
+ b = in_8(&mr->fifo);
+ dlog(ms, "reseldata %x", b);
+ } while (in_8(&mr->fifo_count));
+ for (t = 0; t < 8; ++t)
+ if ((b & (1 << t)) != 0 && t != ms->host->this_id)
+ break;
+ if (b != (1 << t) + (1 << ms->host->this_id)) {
+ printk(KERN_ERR "mesh: bad reselection data %x\n", b);
+ ms->conn_tgt = ms->host->this_id;
+ goto bogus;
+ }
+
+
+ /*
+ * Set up to continue with that target's transfer.
+ */
+ ms->conn_tgt = t;
+ tp = &ms->tgts[t];
+ out_8(&mr->sync_params, tp->sync_params);
+ if (ALLOW_DEBUG(t)) {
+ printk(KERN_DEBUG "mesh: reselected by target %d\n", t);
+ printk(KERN_DEBUG "mesh: saved_ptr=%x goes_out=%d cmd=%p\n",
+ tp->saved_ptr, tp->data_goes_out, tp->current_req);
+ }
+ ms->current_req = tp->current_req;
+ if (tp->current_req == NULL) {
+ printk(KERN_ERR "mesh: reselected by tgt %d but no cmd!\n", t);
+ goto bogus;
+ }
+ ms->data_ptr = tp->saved_ptr;
+ dlog(ms, "resel prev tgt=%d", prev);
+ dlog(ms, "resel err/exc=%.4x", MKWORD(0, 0, mr->error, mr->exception));
+ start_phase(ms);
+ return;
+
+bogus:
+ dumplog(ms, ms->conn_tgt);
+ dumpslog(ms);
+ ms->data_ptr = 0;
+ ms->aborting = 1;
+ start_phase(ms);
+}
+
+static void do_abort(struct mesh_state *ms)
+{
+ ms->msgout[0] = ABORT;
+ ms->n_msgout = 1;
+ ms->aborting = 1;
+ ms->stat = DID_ABORT;
+ dlog(ms, "abort", 0);
+}
+
+static void handle_reset(struct mesh_state *ms)
+{
+ int tgt;
+ struct mesh_target *tp;
+ struct scsi_cmnd *cmd;
+ volatile struct mesh_regs __iomem *mr = ms->mesh;
+
+ for (tgt = 0; tgt < 8; ++tgt) {
+ tp = &ms->tgts[tgt];
+ if ((cmd = tp->current_req) != NULL) {
+ cmd->result = DID_RESET << 16;
+ tp->current_req = NULL;
+ mesh_completed(ms, cmd);
+ }
+ ms->tgts[tgt].sdtr_state = do_sdtr;
+ ms->tgts[tgt].sync_params = ASYNC_PARAMS;
+ }
+ ms->current_req = NULL;
+ while ((cmd = ms->request_q) != NULL) {
+ ms->request_q = (struct scsi_cmnd *) cmd->host_scribble;
+ cmd->result = DID_RESET << 16;
+ mesh_completed(ms, cmd);
+ }
+ ms->phase = idle;
+ ms->msgphase = msg_none;
+ out_8(&mr->interrupt, INT_ERROR | INT_EXCEPTION | INT_CMDDONE);
+ out_8(&mr->sequence, SEQ_FLUSHFIFO);
+ mesh_flush_io(mr);
+ udelay(1);
+ out_8(&mr->sync_params, ASYNC_PARAMS);
+ out_8(&mr->sequence, SEQ_ENBRESEL);
+}
+
+static irqreturn_t do_mesh_interrupt(int irq, void *dev_id)
+{
+ unsigned long flags;
+ struct mesh_state *ms = dev_id;
+ struct Scsi_Host *dev = ms->host;
+
+ spin_lock_irqsave(dev->host_lock, flags);
+ mesh_interrupt(ms);
+ spin_unlock_irqrestore(dev->host_lock, flags);
+ return IRQ_HANDLED;
+}
+
+static void handle_error(struct mesh_state *ms)
+{
+ int err, exc, count;
+ volatile struct mesh_regs __iomem *mr = ms->mesh;
+
+ err = in_8(&mr->error);
+ exc = in_8(&mr->exception);
+ out_8(&mr->interrupt, INT_ERROR | INT_EXCEPTION | INT_CMDDONE);
+ dlog(ms, "error err/exc/fc/cl=%.8x",
+ MKWORD(err, exc, mr->fifo_count, mr->count_lo));
+ if (err & ERR_SCSIRESET) {
+ /* SCSI bus was reset */
+ printk(KERN_INFO "mesh: SCSI bus reset detected: "
+ "waiting for end...");
+ while ((in_8(&mr->bus_status1) & BS1_RST) != 0)
+ udelay(1);
+ printk("done\n");
+ handle_reset(ms);
+ /* request_q is empty, no point in mesh_start() */
+ return;
+ }
+ if (err & ERR_UNEXPDISC) {
+ /* Unexpected disconnect */
+ if (exc & EXC_RESELECTED) {
+ reselected(ms);
+ return;
+ }
+ if (!ms->aborting) {
+ printk(KERN_WARNING "mesh: target %d aborted\n",
+ ms->conn_tgt);
+ dumplog(ms, ms->conn_tgt);
+ dumpslog(ms);
+ }
+ out_8(&mr->interrupt, INT_CMDDONE);
+ ms->stat = DID_ABORT;
+ mesh_done(ms, 1);
+ return;
+ }
+ if (err & ERR_PARITY) {
+ if (ms->msgphase == msg_in) {
+ printk(KERN_ERR "mesh: msg parity error, target %d\n",
+ ms->conn_tgt);
+ ms->msgout[0] = MSG_PARITY_ERROR;
+ ms->n_msgout = 1;
+ ms->msgphase = msg_in_bad;
+ cmd_complete(ms);
+ return;
+ }
+ if (ms->stat == DID_OK) {
+ printk(KERN_ERR "mesh: parity error, target %d\n",
+ ms->conn_tgt);
+ ms->stat = DID_PARITY;
+ }
+ count = (mr->count_hi << 8) + mr->count_lo;
+ if (count == 0) {
+ cmd_complete(ms);
+ } else {
+ /* reissue the data transfer command */
+ out_8(&mr->sequence, mr->sequence);
+ }
+ return;
+ }
+ if (err & ERR_SEQERR) {
+ if (exc & EXC_RESELECTED) {
+ /* This can happen if we issue a command to
+ get the bus just after the target reselects us. */
+ static int mesh_resel_seqerr;
+ mesh_resel_seqerr++;
+ reselected(ms);
+ return;
+ }
+ if (exc == EXC_PHASEMM) {
+ static int mesh_phasemm_seqerr;
+ mesh_phasemm_seqerr++;
+ phase_mismatch(ms);
+ return;
+ }
+ printk(KERN_ERR "mesh: sequence error (err=%x exc=%x)\n",
+ err, exc);
+ } else {
+ printk(KERN_ERR "mesh: unknown error %x (exc=%x)\n", err, exc);
+ }
+ mesh_dump_regs(ms);
+ dumplog(ms, ms->conn_tgt);
+ if (ms->phase > selecting && (in_8(&mr->bus_status1) & BS1_BSY)) {
+ /* try to do what the target wants */
+ do_abort(ms);
+ phase_mismatch(ms);
+ return;
+ }
+ ms->stat = DID_ERROR;
+ mesh_done(ms, 1);
+}
+
+static void handle_exception(struct mesh_state *ms)
+{
+ int exc;
+ volatile struct mesh_regs __iomem *mr = ms->mesh;
+
+ exc = in_8(&mr->exception);
+ out_8(&mr->interrupt, INT_EXCEPTION | INT_CMDDONE);
+ if (exc & EXC_RESELECTED) {
+ static int mesh_resel_exc;
+ mesh_resel_exc++;
+ reselected(ms);
+ } else if (exc == EXC_ARBLOST) {
+ printk(KERN_DEBUG "mesh: lost arbitration\n");
+ ms->stat = DID_BUS_BUSY;
+ mesh_done(ms, 1);
+ } else if (exc == EXC_SELTO) {
+ /* selection timed out */
+ ms->stat = DID_BAD_TARGET;
+ mesh_done(ms, 1);
+ } else if (exc == EXC_PHASEMM) {
+ /* target wants to do something different:
+ find out what it wants and do it. */
+ phase_mismatch(ms);
+ } else {
+ printk(KERN_ERR "mesh: can't cope with exception %x\n", exc);
+ mesh_dump_regs(ms);
+ dumplog(ms, ms->conn_tgt);
+ do_abort(ms);
+ phase_mismatch(ms);
+ }
+}
+
+static void handle_msgin(struct mesh_state *ms)
+{
+ int i, code;
+ struct scsi_cmnd *cmd = ms->current_req;
+ struct mesh_target *tp = &ms->tgts[ms->conn_tgt];
+
+ if (ms->n_msgin == 0)
+ return;
+ code = ms->msgin[0];
+ if (ALLOW_DEBUG(ms->conn_tgt)) {
+ printk(KERN_DEBUG "got %d message bytes:", ms->n_msgin);
+ for (i = 0; i < ms->n_msgin; ++i)
+ printk(" %x", ms->msgin[i]);
+ printk("\n");
+ }
+ dlog(ms, "msgin msg=%.8x",
+ MKWORD(ms->n_msgin, code, ms->msgin[1], ms->msgin[2]));
+
+ ms->expect_reply = 0;
+ ms->n_msgout = 0;
+ if (ms->n_msgin < msgin_length(ms))
+ goto reject;
+ if (cmd)
+ cmd->SCp.Message = code;
+ switch (code) {
+ case COMMAND_COMPLETE:
+ break;
+ case EXTENDED_MESSAGE:
+ switch (ms->msgin[2]) {
+ case EXTENDED_MODIFY_DATA_POINTER:
+ ms->data_ptr += (ms->msgin[3] << 24) + ms->msgin[6]
+ + (ms->msgin[4] << 16) + (ms->msgin[5] << 8);
+ break;
+ case EXTENDED_SDTR:
+ if (tp->sdtr_state != sdtr_sent) {
+ /* reply with an SDTR */
+ add_sdtr_msg(ms);
+ /* limit period to at least his value,
+ offset to no more than his */
+ if (ms->msgout[3] < ms->msgin[3])
+ ms->msgout[3] = ms->msgin[3];
+ if (ms->msgout[4] > ms->msgin[4])
+ ms->msgout[4] = ms->msgin[4];
+ set_sdtr(ms, ms->msgout[3], ms->msgout[4]);
+ ms->msgphase = msg_out;
+ } else {
+ set_sdtr(ms, ms->msgin[3], ms->msgin[4]);
+ }
+ break;
+ default:
+ goto reject;
+ }
+ break;
+ case SAVE_POINTERS:
+ tp->saved_ptr = ms->data_ptr;
+ break;
+ case RESTORE_POINTERS:
+ ms->data_ptr = tp->saved_ptr;
+ break;
+ case DISCONNECT:
+ ms->phase = disconnecting;
+ break;
+ case ABORT:
+ break;
+ case MESSAGE_REJECT:
+ if (tp->sdtr_state == sdtr_sent)
+ set_sdtr(ms, 0, 0);
+ break;
+ case NOP:
+ break;
+ default:
+ if (IDENTIFY_BASE <= code && code <= IDENTIFY_BASE + 7) {
+ if (cmd == NULL) {
+ do_abort(ms);
+ ms->msgphase = msg_out;
+ } else if (code != cmd->device->lun + IDENTIFY_BASE) {
+ printk(KERN_WARNING "mesh: lun mismatch "
+ "(%d != %llu) on reselection from "
+ "target %d\n", code - IDENTIFY_BASE,
+ cmd->device->lun, ms->conn_tgt);
+ }
+ break;
+ }
+ goto reject;
+ }
+ return;
+
+ reject:
+ printk(KERN_WARNING "mesh: rejecting message from target %d:",
+ ms->conn_tgt);
+ for (i = 0; i < ms->n_msgin; ++i)
+ printk(" %x", ms->msgin[i]);
+ printk("\n");
+ ms->msgout[0] = MESSAGE_REJECT;
+ ms->n_msgout = 1;
+ ms->msgphase = msg_out;
+}
+
+/*
+ * Set up DMA commands for transferring data.
+ */
+static void set_dma_cmds(struct mesh_state *ms, struct scsi_cmnd *cmd)
+{
+ int i, dma_cmd, total, off, dtot;
+ struct scatterlist *scl;
+ struct dbdma_cmd *dcmds;
+
+ dma_cmd = ms->tgts[ms->conn_tgt].data_goes_out?
+ OUTPUT_MORE: INPUT_MORE;
+ dcmds = ms->dma_cmds;
+ dtot = 0;
+ if (cmd) {
+ int nseg;
+
+ cmd->SCp.this_residual = scsi_bufflen(cmd);
+
+ nseg = scsi_dma_map(cmd);
+ BUG_ON(nseg < 0);
+
+ if (nseg) {
+ total = 0;
+ off = ms->data_ptr;
+
+ scsi_for_each_sg(cmd, scl, nseg, i) {
+ u32 dma_addr = sg_dma_address(scl);
+ u32 dma_len = sg_dma_len(scl);
+
+ total += scl->length;
+ if (off >= dma_len) {
+ off -= dma_len;
+ continue;
+ }
+ if (dma_len > 0xffff)
+ panic("mesh: scatterlist element >= 64k");
+ dcmds->req_count = cpu_to_le16(dma_len - off);
+ dcmds->command = cpu_to_le16(dma_cmd);
+ dcmds->phy_addr = cpu_to_le32(dma_addr + off);
+ dcmds->xfer_status = 0;
+ ++dcmds;
+ dtot += dma_len - off;
+ off = 0;
+ }
+ }
+ }
+ if (dtot == 0) {
+ /* Either the target has overrun our buffer,
+ or the caller didn't provide a buffer. */
+ static char mesh_extra_buf[64];
+
+ dtot = sizeof(mesh_extra_buf);
+ dcmds->req_count = cpu_to_le16(dtot);
+ dcmds->phy_addr = cpu_to_le32(virt_to_phys(mesh_extra_buf));
+ dcmds->xfer_status = 0;
+ ++dcmds;
+ }
+ dma_cmd += OUTPUT_LAST - OUTPUT_MORE;
+ dcmds[-1].command = cpu_to_le16(dma_cmd);
+ memset(dcmds, 0, sizeof(*dcmds));
+ dcmds->command = cpu_to_le16(DBDMA_STOP);
+ ms->dma_count = dtot;
+}
+
+static void halt_dma(struct mesh_state *ms)
+{
+ volatile struct dbdma_regs __iomem *md = ms->dma;
+ volatile struct mesh_regs __iomem *mr = ms->mesh;
+ struct scsi_cmnd *cmd = ms->current_req;
+ int t, nb;
+
+ if (!ms->tgts[ms->conn_tgt].data_goes_out) {
+ /* wait a little while until the fifo drains */
+ t = 50;
+ while (t > 0 && in_8(&mr->fifo_count) != 0
+ && (in_le32(&md->status) & ACTIVE) != 0) {
+ --t;
+ udelay(1);
+ }
+ }
+ out_le32(&md->control, RUN << 16); /* turn off RUN bit */
+ nb = (mr->count_hi << 8) + mr->count_lo;
+ dlog(ms, "halt_dma fc/count=%.6x",
+ MKWORD(0, mr->fifo_count, 0, nb));
+ if (ms->tgts[ms->conn_tgt].data_goes_out)
+ nb += mr->fifo_count;
+ /* nb is the number of bytes not yet transferred
+ to/from the target. */
+ ms->data_ptr -= nb;
+ dlog(ms, "data_ptr %x", ms->data_ptr);
+ if (ms->data_ptr < 0) {
+ printk(KERN_ERR "mesh: halt_dma: data_ptr=%d (nb=%d, ms=%p)\n",
+ ms->data_ptr, nb, ms);
+ ms->data_ptr = 0;
+#ifdef MESH_DBG
+ dumplog(ms, ms->conn_tgt);
+ dumpslog(ms);
+#endif /* MESH_DBG */
+ } else if (cmd && scsi_bufflen(cmd) &&
+ ms->data_ptr > scsi_bufflen(cmd)) {
+ printk(KERN_DEBUG "mesh: target %d overrun, "
+ "data_ptr=%x total=%x goes_out=%d\n",
+ ms->conn_tgt, ms->data_ptr, scsi_bufflen(cmd),
+ ms->tgts[ms->conn_tgt].data_goes_out);
+ }
+ scsi_dma_unmap(cmd);
+ ms->dma_started = 0;
+}
+
+static void phase_mismatch(struct mesh_state *ms)
+{
+ volatile struct mesh_regs __iomem *mr = ms->mesh;
+ int phase;
+
+ dlog(ms, "phasemm ch/cl/seq/fc=%.8x",
+ MKWORD(mr->count_hi, mr->count_lo, mr->sequence, mr->fifo_count));
+ phase = in_8(&mr->bus_status0) & BS0_PHASE;
+ if (ms->msgphase == msg_out_xxx && phase == BP_MSGOUT) {
+ /* output the last byte of the message, without ATN */
+ out_8(&mr->count_lo, 1);
+ out_8(&mr->sequence, SEQ_MSGOUT + use_active_neg);
+ mesh_flush_io(mr);
+ udelay(1);
+ out_8(&mr->fifo, ms->msgout[ms->n_msgout-1]);
+ ms->msgphase = msg_out_last;
+ return;
+ }
+
+ if (ms->msgphase == msg_in) {
+ get_msgin(ms);
+ if (ms->n_msgin)
+ handle_msgin(ms);
+ }
+
+ if (ms->dma_started)
+ halt_dma(ms);
+ if (mr->fifo_count) {
+ out_8(&mr->sequence, SEQ_FLUSHFIFO);
+ mesh_flush_io(mr);
+ udelay(1);
+ }
+
+ ms->msgphase = msg_none;
+ switch (phase) {
+ case BP_DATAIN:
+ ms->tgts[ms->conn_tgt].data_goes_out = 0;
+ ms->phase = dataing;
+ break;
+ case BP_DATAOUT:
+ ms->tgts[ms->conn_tgt].data_goes_out = 1;
+ ms->phase = dataing;
+ break;
+ case BP_COMMAND:
+ ms->phase = commanding;
+ break;
+ case BP_STATUS:
+ ms->phase = statusing;
+ break;
+ case BP_MSGIN:
+ ms->msgphase = msg_in;
+ ms->n_msgin = 0;
+ break;
+ case BP_MSGOUT:
+ ms->msgphase = msg_out;
+ if (ms->n_msgout == 0) {
+ if (ms->aborting) {
+ do_abort(ms);
+ } else {
+ if (ms->last_n_msgout == 0) {
+ printk(KERN_DEBUG
+ "mesh: no msg to repeat\n");
+ ms->msgout[0] = NOP;
+ ms->last_n_msgout = 1;
+ }
+ ms->n_msgout = ms->last_n_msgout;
+ }
+ }
+ break;
+ default:
+ printk(KERN_DEBUG "mesh: unknown scsi phase %x\n", phase);
+ ms->stat = DID_ERROR;
+ mesh_done(ms, 1);
+ return;
+ }
+
+ start_phase(ms);
+}
+
+static void cmd_complete(struct mesh_state *ms)
+{
+ volatile struct mesh_regs __iomem *mr = ms->mesh;
+ struct scsi_cmnd *cmd = ms->current_req;
+ struct mesh_target *tp = &ms->tgts[ms->conn_tgt];
+ int seq, n, t;
+
+ dlog(ms, "cmd_complete fc=%x", mr->fifo_count);
+ seq = use_active_neg + (ms->n_msgout? SEQ_ATN: 0);
+ switch (ms->msgphase) {
+ case msg_out_xxx:
+ /* huh? we expected a phase mismatch */
+ ms->n_msgin = 0;
+ ms->msgphase = msg_in;
+ /* fall through */
+
+ case msg_in:
+ /* should have some message bytes in fifo */
+ get_msgin(ms);
+ n = msgin_length(ms);
+ if (ms->n_msgin < n) {
+ out_8(&mr->count_lo, n - ms->n_msgin);
+ out_8(&mr->sequence, SEQ_MSGIN + seq);
+ } else {
+ ms->msgphase = msg_none;
+ handle_msgin(ms);
+ start_phase(ms);
+ }
+ break;
+
+ case msg_in_bad:
+ out_8(&mr->sequence, SEQ_FLUSHFIFO);
+ mesh_flush_io(mr);
+ udelay(1);
+ out_8(&mr->count_lo, 1);
+ out_8(&mr->sequence, SEQ_MSGIN + SEQ_ATN + use_active_neg);
+ break;
+
+ case msg_out:
+ /*
+ * To get the right timing on ATN wrt ACK, we have
+ * to get the MESH to drop ACK, wait until REQ gets
+ * asserted, then drop ATN. To do this we first
+ * issue a SEQ_MSGOUT with ATN and wait for REQ,
+ * then change the command to a SEQ_MSGOUT w/o ATN.
+ * If we don't see REQ in a reasonable time, we
+ * change the command to SEQ_MSGIN with ATN,
+ * wait for the phase mismatch interrupt, then
+ * issue the SEQ_MSGOUT without ATN.
+ */
+ out_8(&mr->count_lo, 1);
+ out_8(&mr->sequence, SEQ_MSGOUT + use_active_neg + SEQ_ATN);
+ t = 30; /* wait up to 30us */
+ while ((in_8(&mr->bus_status0) & BS0_REQ) == 0 && --t >= 0)
+ udelay(1);
+ dlog(ms, "last_mbyte err/exc/fc/cl=%.8x",
+ MKWORD(mr->error, mr->exception,
+ mr->fifo_count, mr->count_lo));
+ if (in_8(&mr->interrupt) & (INT_ERROR | INT_EXCEPTION)) {
+ /* whoops, target didn't do what we expected */
+ ms->last_n_msgout = ms->n_msgout;
+ ms->n_msgout = 0;
+ if (in_8(&mr->interrupt) & INT_ERROR) {
+ printk(KERN_ERR "mesh: error %x in msg_out\n",
+ in_8(&mr->error));
+ handle_error(ms);
+ return;
+ }
+ if (in_8(&mr->exception) != EXC_PHASEMM)
+ printk(KERN_ERR "mesh: exc %x in msg_out\n",
+ in_8(&mr->exception));
+ else
+ printk(KERN_DEBUG "mesh: bs0=%x in msg_out\n",
+ in_8(&mr->bus_status0));
+ handle_exception(ms);
+ return;
+ }
+ if (in_8(&mr->bus_status0) & BS0_REQ) {
+ out_8(&mr->sequence, SEQ_MSGOUT + use_active_neg);
+ mesh_flush_io(mr);
+ udelay(1);
+ out_8(&mr->fifo, ms->msgout[ms->n_msgout-1]);
+ ms->msgphase = msg_out_last;
+ } else {
+ out_8(&mr->sequence, SEQ_MSGIN + use_active_neg + SEQ_ATN);
+ ms->msgphase = msg_out_xxx;
+ }
+ break;
+
+ case msg_out_last:
+ ms->last_n_msgout = ms->n_msgout;
+ ms->n_msgout = 0;
+ ms->msgphase = ms->expect_reply? msg_in: msg_none;
+ start_phase(ms);
+ break;
+
+ case msg_none:
+ switch (ms->phase) {
+ case idle:
+ printk(KERN_ERR "mesh: interrupt in idle phase?\n");
+ dumpslog(ms);
+ return;
+ case selecting:
+ dlog(ms, "Selecting phase at command completion",0);
+ ms->msgout[0] = IDENTIFY(ALLOW_RESEL(ms->conn_tgt),
+ (cmd? cmd->device->lun: 0));
+ ms->n_msgout = 1;
+ ms->expect_reply = 0;
+ if (ms->aborting) {
+ ms->msgout[0] = ABORT;
+ ms->n_msgout++;
+ } else if (tp->sdtr_state == do_sdtr) {
+ /* add SDTR message */
+ add_sdtr_msg(ms);
+ ms->expect_reply = 1;
+ tp->sdtr_state = sdtr_sent;
+ }
+ ms->msgphase = msg_out;
+ /*
+ * We need to wait for REQ before dropping ATN.
+ * We wait for at most 30us, then fall back to
+ * a scheme where we issue a SEQ_COMMAND with ATN,
+ * which will give us a phase mismatch interrupt
+ * when REQ does come, and then we send the message.
+ */
+ t = 230; /* wait up to 230us */
+ while ((in_8(&mr->bus_status0) & BS0_REQ) == 0) {
+ if (--t < 0) {
+ dlog(ms, "impatient for req", ms->n_msgout);
+ ms->msgphase = msg_none;
+ break;
+ }
+ udelay(1);
+ }
+ break;
+ case dataing:
+ if (ms->dma_count != 0) {
+ start_phase(ms);
+ return;
+ }
+ /*
+ * We can get a phase mismatch here if the target
+ * changes to the status phase, even though we have
+ * had a command complete interrupt. Then, if we
+ * issue the SEQ_STATUS command, we'll get a sequence
+ * error interrupt. Which isn't so bad except that
+ * occasionally the mesh actually executes the
+ * SEQ_STATUS *as well as* giving us the sequence
+ * error and phase mismatch exception.
+ */
+ out_8(&mr->sequence, 0);
+ out_8(&mr->interrupt,
+ INT_ERROR | INT_EXCEPTION | INT_CMDDONE);
+ halt_dma(ms);
+ break;
+ case statusing:
+ if (cmd) {
+ cmd->SCp.Status = mr->fifo;
+ if (DEBUG_TARGET(cmd))
+ printk(KERN_DEBUG "mesh: status is %x\n",
+ cmd->SCp.Status);
+ }
+ ms->msgphase = msg_in;
+ break;
+ case busfreeing:
+ mesh_done(ms, 1);
+ return;
+ case disconnecting:
+ ms->current_req = NULL;
+ ms->phase = idle;
+ mesh_start(ms);
+ return;
+ default:
+ break;
+ }
+ ++ms->phase;
+ start_phase(ms);
+ break;
+ }
+}
+
+
+/*
+ * Called by midlayer with host locked to queue a new
+ * request
+ */
+static int mesh_queue_lck(struct scsi_cmnd *cmd, void (*done)(struct scsi_cmnd *))
+{
+ struct mesh_state *ms;
+
+ cmd->scsi_done = done;
+ cmd->host_scribble = NULL;
+
+ ms = (struct mesh_state *) cmd->device->host->hostdata;
+
+ if (ms->request_q == NULL)
+ ms->request_q = cmd;
+ else
+ ms->request_qtail->host_scribble = (void *) cmd;
+ ms->request_qtail = cmd;
+
+ if (ms->phase == idle)
+ mesh_start(ms);
+
+ return 0;
+}
+
+static DEF_SCSI_QCMD(mesh_queue)
+
+/*
+ * Called to handle interrupts, either call by the interrupt
+ * handler (do_mesh_interrupt) or by other functions in
+ * exceptional circumstances
+ */
+static void mesh_interrupt(struct mesh_state *ms)
+{
+ volatile struct mesh_regs __iomem *mr = ms->mesh;
+ int intr;
+
+#if 0
+ if (ALLOW_DEBUG(ms->conn_tgt))
+ printk(KERN_DEBUG "mesh_intr, bs0=%x int=%x exc=%x err=%x "
+ "phase=%d msgphase=%d\n", mr->bus_status0,
+ mr->interrupt, mr->exception, mr->error,
+ ms->phase, ms->msgphase);
+#endif
+ while ((intr = in_8(&mr->interrupt)) != 0) {
+ dlog(ms, "interrupt intr/err/exc/seq=%.8x",
+ MKWORD(intr, mr->error, mr->exception, mr->sequence));
+ if (intr & INT_ERROR) {
+ handle_error(ms);
+ } else if (intr & INT_EXCEPTION) {
+ handle_exception(ms);
+ } else if (intr & INT_CMDDONE) {
+ out_8(&mr->interrupt, INT_CMDDONE);
+ cmd_complete(ms);
+ }
+ }
+}
+
+/* Todo: here we can at least try to remove the command from the
+ * queue if it isn't connected yet, and for pending command, assert
+ * ATN until the bus gets freed.
+ */
+static int mesh_abort(struct scsi_cmnd *cmd)
+{
+ struct mesh_state *ms = (struct mesh_state *) cmd->device->host->hostdata;
+
+ printk(KERN_DEBUG "mesh_abort(%p)\n", cmd);
+ mesh_dump_regs(ms);
+ dumplog(ms, cmd->device->id);
+ dumpslog(ms);
+ return FAILED;
+}
+
+/*
+ * Called by the midlayer with the lock held to reset the
+ * SCSI host and bus.
+ * The midlayer will wait for devices to come back, we don't need
+ * to do that ourselves
+ */
+static int mesh_host_reset(struct scsi_cmnd *cmd)
+{
+ struct mesh_state *ms = (struct mesh_state *) cmd->device->host->hostdata;
+ volatile struct mesh_regs __iomem *mr = ms->mesh;
+ volatile struct dbdma_regs __iomem *md = ms->dma;
+ unsigned long flags;
+
+ printk(KERN_DEBUG "mesh_host_reset\n");
+
+ spin_lock_irqsave(ms->host->host_lock, flags);
+
+ /* Reset the controller & dbdma channel */
+ out_le32(&md->control, (RUN|PAUSE|FLUSH|WAKE) << 16); /* stop dma */
+ out_8(&mr->exception, 0xff); /* clear all exception bits */
+ out_8(&mr->error, 0xff); /* clear all error bits */
+ out_8(&mr->sequence, SEQ_RESETMESH);
+ mesh_flush_io(mr);
+ udelay(1);
+ out_8(&mr->intr_mask, INT_ERROR | INT_EXCEPTION | INT_CMDDONE);
+ out_8(&mr->source_id, ms->host->this_id);
+ out_8(&mr->sel_timeout, 25); /* 250ms */
+ out_8(&mr->sync_params, ASYNC_PARAMS);
+
+ /* Reset the bus */
+ out_8(&mr->bus_status1, BS1_RST); /* assert RST */
+ mesh_flush_io(mr);
+ udelay(30); /* leave it on for >= 25us */
+ out_8(&mr->bus_status1, 0); /* negate RST */
+
+ /* Complete pending commands */
+ handle_reset(ms);
+
+ spin_unlock_irqrestore(ms->host->host_lock, flags);
+ return SUCCESS;
+}
+
+static void set_mesh_power(struct mesh_state *ms, int state)
+{
+ if (!machine_is(powermac))
+ return;
+ if (state) {
+ pmac_call_feature(PMAC_FTR_MESH_ENABLE, macio_get_of_node(ms->mdev), 0, 1);
+ msleep(200);
+ } else {
+ pmac_call_feature(PMAC_FTR_MESH_ENABLE, macio_get_of_node(ms->mdev), 0, 0);
+ msleep(10);
+ }
+}
+
+
+#ifdef CONFIG_PM
+static int mesh_suspend(struct macio_dev *mdev, pm_message_t mesg)
+{
+ struct mesh_state *ms = (struct mesh_state *)macio_get_drvdata(mdev);
+ unsigned long flags;
+
+ switch (mesg.event) {
+ case PM_EVENT_SUSPEND:
+ case PM_EVENT_HIBERNATE:
+ case PM_EVENT_FREEZE:
+ break;
+ default:
+ return 0;
+ }
+ if (ms->phase == sleeping)
+ return 0;
+
+ scsi_block_requests(ms->host);
+ spin_lock_irqsave(ms->host->host_lock, flags);
+ while(ms->phase != idle) {
+ spin_unlock_irqrestore(ms->host->host_lock, flags);
+ msleep(10);
+ spin_lock_irqsave(ms->host->host_lock, flags);
+ }
+ ms->phase = sleeping;
+ spin_unlock_irqrestore(ms->host->host_lock, flags);
+ disable_irq(ms->meshintr);
+ set_mesh_power(ms, 0);
+
+ return 0;
+}
+
+static int mesh_resume(struct macio_dev *mdev)
+{
+ struct mesh_state *ms = (struct mesh_state *)macio_get_drvdata(mdev);
+ unsigned long flags;
+
+ if (ms->phase != sleeping)
+ return 0;
+
+ set_mesh_power(ms, 1);
+ mesh_init(ms);
+ spin_lock_irqsave(ms->host->host_lock, flags);
+ mesh_start(ms);
+ spin_unlock_irqrestore(ms->host->host_lock, flags);
+ enable_irq(ms->meshintr);
+ scsi_unblock_requests(ms->host);
+
+ return 0;
+}
+
+#endif /* CONFIG_PM */
+
+/*
+ * If we leave drives set for synchronous transfers (especially
+ * CDROMs), and reboot to MacOS, it gets confused, poor thing.
+ * So, on reboot we reset the SCSI bus.
+ */
+static int mesh_shutdown(struct macio_dev *mdev)
+{
+ struct mesh_state *ms = (struct mesh_state *)macio_get_drvdata(mdev);
+ volatile struct mesh_regs __iomem *mr;
+ unsigned long flags;
+
+ printk(KERN_INFO "resetting MESH scsi bus(es)\n");
+ spin_lock_irqsave(ms->host->host_lock, flags);
+ mr = ms->mesh;
+ out_8(&mr->intr_mask, 0);
+ out_8(&mr->interrupt, INT_ERROR | INT_EXCEPTION | INT_CMDDONE);
+ out_8(&mr->bus_status1, BS1_RST);
+ mesh_flush_io(mr);
+ udelay(30);
+ out_8(&mr->bus_status1, 0);
+ spin_unlock_irqrestore(ms->host->host_lock, flags);
+
+ return 0;
+}
+
+static struct scsi_host_template mesh_template = {
+ .proc_name = "mesh",
+ .name = "MESH",
+ .queuecommand = mesh_queue,
+ .eh_abort_handler = mesh_abort,
+ .eh_host_reset_handler = mesh_host_reset,
+ .can_queue = 20,
+ .this_id = 7,
+ .sg_tablesize = SG_ALL,
+ .cmd_per_lun = 2,
+ .use_clustering = DISABLE_CLUSTERING,
+};
+
+static int mesh_probe(struct macio_dev *mdev, const struct of_device_id *match)
+{
+ struct device_node *mesh = macio_get_of_node(mdev);
+ struct pci_dev* pdev = macio_get_pci_dev(mdev);
+ int tgt, minper;
+ const int *cfp;
+ struct mesh_state *ms;
+ struct Scsi_Host *mesh_host;
+ void *dma_cmd_space;
+ dma_addr_t dma_cmd_bus;
+
+ switch (mdev->bus->chip->type) {
+ case macio_heathrow:
+ case macio_gatwick:
+ case macio_paddington:
+ use_active_neg = 0;
+ break;
+ default:
+ use_active_neg = SEQ_ACTIVE_NEG;
+ }
+
+ if (macio_resource_count(mdev) != 2 || macio_irq_count(mdev) != 2) {
+ printk(KERN_ERR "mesh: expected 2 addrs and 2 intrs"
+ " (got %d,%d)\n", macio_resource_count(mdev),
+ macio_irq_count(mdev));
+ return -ENODEV;
+ }
+
+ if (macio_request_resources(mdev, "mesh") != 0) {
+ printk(KERN_ERR "mesh: unable to request memory resources");
+ return -EBUSY;
+ }
+ mesh_host = scsi_host_alloc(&mesh_template, sizeof(struct mesh_state));
+ if (mesh_host == NULL) {
+ printk(KERN_ERR "mesh: couldn't register host");
+ goto out_release;
+ }
+
+ /* Old junk for root discovery, that will die ultimately */
+#if !defined(MODULE)
+ note_scsi_host(mesh, mesh_host);
+#endif
+
+ mesh_host->base = macio_resource_start(mdev, 0);
+ mesh_host->irq = macio_irq(mdev, 0);
+ ms = (struct mesh_state *) mesh_host->hostdata;
+ macio_set_drvdata(mdev, ms);
+ ms->host = mesh_host;
+ ms->mdev = mdev;
+ ms->pdev = pdev;
+
+ ms->mesh = ioremap(macio_resource_start(mdev, 0), 0x1000);
+ if (ms->mesh == NULL) {
+ printk(KERN_ERR "mesh: can't map registers\n");
+ goto out_free;
+ }
+ ms->dma = ioremap(macio_resource_start(mdev, 1), 0x1000);
+ if (ms->dma == NULL) {
+ printk(KERN_ERR "mesh: can't map registers\n");
+ iounmap(ms->mesh);
+ goto out_free;
+ }
+
+ ms->meshintr = macio_irq(mdev, 0);
+ ms->dmaintr = macio_irq(mdev, 1);
+
+ /* Space for dma command list: +1 for stop command,
+ * +1 to allow for aligning.
+ */
+ ms->dma_cmd_size = (mesh_host->sg_tablesize + 2) * sizeof(struct dbdma_cmd);
+
+ /* We use the PCI APIs for now until the generic one gets fixed
+ * enough or until we get some macio-specific versions
+ */
+ dma_cmd_space = pci_zalloc_consistent(macio_get_pci_dev(mdev),
+ ms->dma_cmd_size, &dma_cmd_bus);
+ if (dma_cmd_space == NULL) {
+ printk(KERN_ERR "mesh: can't allocate DMA table\n");
+ goto out_unmap;
+ }
+
+ ms->dma_cmds = (struct dbdma_cmd *) DBDMA_ALIGN(dma_cmd_space);
+ ms->dma_cmd_space = dma_cmd_space;
+ ms->dma_cmd_bus = dma_cmd_bus + ((unsigned long)ms->dma_cmds)
+ - (unsigned long)dma_cmd_space;
+ ms->current_req = NULL;
+ for (tgt = 0; tgt < 8; ++tgt) {
+ ms->tgts[tgt].sdtr_state = do_sdtr;
+ ms->tgts[tgt].sync_params = ASYNC_PARAMS;
+ ms->tgts[tgt].current_req = NULL;
+ }
+
+ if ((cfp = of_get_property(mesh, "clock-frequency", NULL)))
+ ms->clk_freq = *cfp;
+ else {
+ printk(KERN_INFO "mesh: assuming 50MHz clock frequency\n");
+ ms->clk_freq = 50000000;
+ }
+
+ /* The maximum sync rate is clock / 5; increase
+ * mesh_sync_period if necessary.
+ */
+ minper = 1000000000 / (ms->clk_freq / 5); /* ns */
+ if (mesh_sync_period < minper)
+ mesh_sync_period = minper;
+
+ /* Power up the chip */
+ set_mesh_power(ms, 1);
+
+ /* Set it up */
+ mesh_init(ms);
+
+ /* Request interrupt */
+ if (request_irq(ms->meshintr, do_mesh_interrupt, 0, "MESH", ms)) {
+ printk(KERN_ERR "MESH: can't get irq %d\n", ms->meshintr);
+ goto out_shutdown;
+ }
+
+ /* Add scsi host & scan */
+ if (scsi_add_host(mesh_host, &mdev->ofdev.dev))
+ goto out_release_irq;
+ scsi_scan_host(mesh_host);
+
+ return 0;
+
+ out_release_irq:
+ free_irq(ms->meshintr, ms);
+ out_shutdown:
+ /* shutdown & reset bus in case of error or macos can be confused
+ * at reboot if the bus was set to synchronous mode already
+ */
+ mesh_shutdown(mdev);
+ set_mesh_power(ms, 0);
+ pci_free_consistent(macio_get_pci_dev(mdev), ms->dma_cmd_size,
+ ms->dma_cmd_space, ms->dma_cmd_bus);
+ out_unmap:
+ iounmap(ms->dma);
+ iounmap(ms->mesh);
+ out_free:
+ scsi_host_put(mesh_host);
+ out_release:
+ macio_release_resources(mdev);
+
+ return -ENODEV;
+}
+
+static int mesh_remove(struct macio_dev *mdev)
+{
+ struct mesh_state *ms = (struct mesh_state *)macio_get_drvdata(mdev);
+ struct Scsi_Host *mesh_host = ms->host;
+
+ scsi_remove_host(mesh_host);
+
+ free_irq(ms->meshintr, ms);
+
+ /* Reset scsi bus */
+ mesh_shutdown(mdev);
+
+ /* Shut down chip & termination */
+ set_mesh_power(ms, 0);
+
+ /* Unmap registers & dma controller */
+ iounmap(ms->mesh);
+ iounmap(ms->dma);
+
+ /* Free DMA commands memory */
+ pci_free_consistent(macio_get_pci_dev(mdev), ms->dma_cmd_size,
+ ms->dma_cmd_space, ms->dma_cmd_bus);
+
+ /* Release memory resources */
+ macio_release_resources(mdev);
+
+ scsi_host_put(mesh_host);
+
+ return 0;
+}
+
+
+static struct of_device_id mesh_match[] =
+{
+ {
+ .name = "mesh",
+ },
+ {
+ .type = "scsi",
+ .compatible = "chrp,mesh0"
+ },
+ {},
+};
+MODULE_DEVICE_TABLE (of, mesh_match);
+
+static struct macio_driver mesh_driver =
+{
+ .driver = {
+ .name = "mesh",
+ .owner = THIS_MODULE,
+ .of_match_table = mesh_match,
+ },
+ .probe = mesh_probe,
+ .remove = mesh_remove,
+ .shutdown = mesh_shutdown,
+#ifdef CONFIG_PM
+ .suspend = mesh_suspend,
+ .resume = mesh_resume,
+#endif
+};
+
+
+static int __init init_mesh(void)
+{
+
+ /* Calculate sync rate from module parameters */
+ if (sync_rate > 10)
+ sync_rate = 10;
+ if (sync_rate > 0) {
+ printk(KERN_INFO "mesh: configured for synchronous %d MB/s\n", sync_rate);
+ mesh_sync_period = 1000 / sync_rate; /* ns */
+ mesh_sync_offset = 15;
+ } else
+ printk(KERN_INFO "mesh: configured for asynchronous\n");
+
+ return macio_register_driver(&mesh_driver);
+}
+
+static void __exit exit_mesh(void)
+{
+ return macio_unregister_driver(&mesh_driver);
+}
+
+module_init(init_mesh);
+module_exit(exit_mesh);
diff --git a/drivers/scsi/mesh.h b/drivers/scsi/mesh.h
new file mode 100644
index 000000000..4fdb81fa5
--- /dev/null
+++ b/drivers/scsi/mesh.h
@@ -0,0 +1,127 @@
+/*
+ * mesh.h: definitions for the driver for the MESH SCSI bus adaptor
+ * (Macintosh Enhanced SCSI Hardware) found on Power Macintosh computers.
+ *
+ * Copyright (C) 1996 Paul Mackerras.
+ */
+#ifndef _MESH_H
+#define _MESH_H
+
+/*
+ * Registers in the MESH controller.
+ */
+
+struct mesh_regs {
+ unsigned char count_lo;
+ char pad0[15];
+ unsigned char count_hi;
+ char pad1[15];
+ unsigned char fifo;
+ char pad2[15];
+ unsigned char sequence;
+ char pad3[15];
+ unsigned char bus_status0;
+ char pad4[15];
+ unsigned char bus_status1;
+ char pad5[15];
+ unsigned char fifo_count;
+ char pad6[15];
+ unsigned char exception;
+ char pad7[15];
+ unsigned char error;
+ char pad8[15];
+ unsigned char intr_mask;
+ char pad9[15];
+ unsigned char interrupt;
+ char pad10[15];
+ unsigned char source_id;
+ char pad11[15];
+ unsigned char dest_id;
+ char pad12[15];
+ unsigned char sync_params;
+ char pad13[15];
+ unsigned char mesh_id;
+ char pad14[15];
+ unsigned char sel_timeout;
+ char pad15[15];
+};
+
+/* Bits in the sequence register. */
+#define SEQ_DMA_MODE 0x80 /* use DMA for data transfer */
+#define SEQ_TARGET 0x40 /* put the controller into target mode */
+#define SEQ_ATN 0x20 /* assert ATN signal */
+#define SEQ_ACTIVE_NEG 0x10 /* use active negation on REQ/ACK */
+#define SEQ_CMD 0x0f /* command bits: */
+#define SEQ_ARBITRATE 1 /* get the bus */
+#define SEQ_SELECT 2 /* select a target */
+#define SEQ_COMMAND 3 /* send a command */
+#define SEQ_STATUS 4 /* receive status */
+#define SEQ_DATAOUT 5 /* send data */
+#define SEQ_DATAIN 6 /* receive data */
+#define SEQ_MSGOUT 7 /* send a message */
+#define SEQ_MSGIN 8 /* receive a message */
+#define SEQ_BUSFREE 9 /* look for bus free */
+#define SEQ_ENBPARITY 0x0a /* enable parity checking */
+#define SEQ_DISPARITY 0x0b /* disable parity checking */
+#define SEQ_ENBRESEL 0x0c /* enable reselection */
+#define SEQ_DISRESEL 0x0d /* disable reselection */
+#define SEQ_RESETMESH 0x0e /* reset the controller */
+#define SEQ_FLUSHFIFO 0x0f /* clear out the FIFO */
+
+/* Bits in the bus_status0 and bus_status1 registers:
+ these correspond directly to the SCSI bus control signals. */
+#define BS0_REQ 0x20
+#define BS0_ACK 0x10
+#define BS0_ATN 0x08
+#define BS0_MSG 0x04
+#define BS0_CD 0x02
+#define BS0_IO 0x01
+#define BS1_RST 0x80
+#define BS1_BSY 0x40
+#define BS1_SEL 0x20
+
+/* Bus phases defined by the bits in bus_status0 */
+#define BS0_PHASE (BS0_MSG+BS0_CD+BS0_IO)
+#define BP_DATAOUT 0
+#define BP_DATAIN BS0_IO
+#define BP_COMMAND BS0_CD
+#define BP_STATUS (BS0_CD+BS0_IO)
+#define BP_MSGOUT (BS0_MSG+BS0_CD)
+#define BP_MSGIN (BS0_MSG+BS0_CD+BS0_IO)
+
+/* Bits in the exception register. */
+#define EXC_SELWATN 0x20 /* (as target) we were selected with ATN */
+#define EXC_SELECTED 0x10 /* (as target) we were selected w/o ATN */
+#define EXC_RESELECTED 0x08 /* (as initiator) we were reselected */
+#define EXC_ARBLOST 0x04 /* we lost arbitration */
+#define EXC_PHASEMM 0x02 /* SCSI phase mismatch */
+#define EXC_SELTO 0x01 /* selection timeout */
+
+/* Bits in the error register */
+#define ERR_UNEXPDISC 0x40 /* target unexpectedly disconnected */
+#define ERR_SCSIRESET 0x20 /* SCSI bus got reset on us */
+#define ERR_SEQERR 0x10 /* we did something the chip didn't like */
+#define ERR_PARITY 0x01 /* parity error was detected */
+
+/* Bits in the interrupt and intr_mask registers */
+#define INT_ERROR 0x04 /* error interrupt */
+#define INT_EXCEPTION 0x02 /* exception interrupt */
+#define INT_CMDDONE 0x01 /* command done interrupt */
+
+/* Fields in the sync_params register */
+#define SYNC_OFF(x) ((x) >> 4) /* offset field */
+#define SYNC_PER(x) ((x) & 0xf) /* period field */
+#define SYNC_PARAMS(o, p) (((o) << 4) | (p))
+#define ASYNC_PARAMS 2 /* sync_params value for async xfers */
+
+/*
+ * Assuming a clock frequency of 50MHz:
+ *
+ * The transfer period with SYNC_PER(sync_params) == x
+ * is (x + 2) * 40ns, except that x == 0 gives 100ns.
+ *
+ * The units of the sel_timeout register are 10ms.
+ */
+
+
+#endif /* _MESH_H */
diff --git a/drivers/scsi/mpt2sas/Kconfig b/drivers/scsi/mpt2sas/Kconfig
new file mode 100644
index 000000000..657b45ca0
--- /dev/null
+++ b/drivers/scsi/mpt2sas/Kconfig
@@ -0,0 +1,67 @@
+#
+# Kernel configuration file for the MPT2SAS
+#
+# This code is based on drivers/scsi/mpt2sas/Kconfig
+# Copyright (C) 2007-2014 LSI Corporation
+# (mailto:DL-MPTFusionLinux@lsi.com)
+
+# This program is free software; you can redistribute it and/or
+# modify it under the terms of the GNU General Public License
+# as published by the Free Software Foundation; either version 2
+# of the License, or (at your option) any later version.
+
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+
+# NO WARRANTY
+# THE PROGRAM IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OR
+# CONDITIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED INCLUDING, WITHOUT
+# LIMITATION, ANY WARRANTIES OR CONDITIONS OF TITLE, NON-INFRINGEMENT,
+# MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE. Each Recipient is
+# solely responsible for determining the appropriateness of using and
+# distributing the Program and assumes all risks associated with its
+# exercise of rights under this Agreement, including but not limited to
+# the risks and costs of program errors, damage to or loss of data,
+# programs or equipment, and unavailability or interruption of operations.
+
+# DISCLAIMER OF LIABILITY
+# NEITHER RECIPIENT NOR ANY CONTRIBUTORS SHALL HAVE ANY LIABILITY FOR ANY
+# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+# DAMAGES (INCLUDING WITHOUT LIMITATION LOST PROFITS), HOWEVER CAUSED AND
+# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR
+# TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
+# USE OR DISTRIBUTION OF THE PROGRAM OR THE EXERCISE OF ANY RIGHTS GRANTED
+# HEREUNDER, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGES
+
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301,
+# USA.
+
+config SCSI_MPT2SAS
+ tristate "LSI MPT Fusion SAS 2.0 Device Driver"
+ depends on PCI && SCSI
+ select SCSI_SAS_ATTRS
+ select RAID_ATTRS
+ ---help---
+ This driver supports PCI-Express SAS 6Gb/s Host Adapters.
+
+config SCSI_MPT2SAS_MAX_SGE
+ int "LSI MPT Fusion Max number of SG Entries (16 - 128)"
+ depends on PCI && SCSI && SCSI_MPT2SAS
+ default "128"
+ range 16 128
+ ---help---
+ This option allows you to specify the maximum number of scatter-
+ gather entries per I/O. The driver default is 128, which matches
+ SAFE_PHYS_SEGMENTS. However, it may decreased down to 16.
+ Decreasing this parameter will reduce memory requirements
+ on a per controller instance.
+
+config SCSI_MPT2SAS_LOGGING
+ bool "LSI MPT Fusion logging facility"
+ depends on PCI && SCSI && SCSI_MPT2SAS
+ ---help---
+ This turns on a logging facility.
diff --git a/drivers/scsi/mpt2sas/Makefile b/drivers/scsi/mpt2sas/Makefile
new file mode 100644
index 000000000..728f04757
--- /dev/null
+++ b/drivers/scsi/mpt2sas/Makefile
@@ -0,0 +1,7 @@
+# mpt2sas makefile
+obj-$(CONFIG_SCSI_MPT2SAS) += mpt2sas.o
+mpt2sas-y += mpt2sas_base.o \
+ mpt2sas_config.o \
+ mpt2sas_scsih.o \
+ mpt2sas_transport.o \
+ mpt2sas_ctl.o
diff --git a/drivers/scsi/mpt2sas/mpi/mpi2.h b/drivers/scsi/mpt2sas/mpi/mpi2.h
new file mode 100644
index 000000000..7fc6f23bd
--- /dev/null
+++ b/drivers/scsi/mpt2sas/mpi/mpi2.h
@@ -0,0 +1,1170 @@
+/*
+ * Copyright (c) 2000-2014 LSI Corporation.
+ *
+ *
+ * Name: mpi2.h
+ * Title: MPI Message independent structures and definitions
+ * including System Interface Register Set and
+ * scatter/gather formats.
+ * Creation Date: June 21, 2006
+ *
+ * mpi2.h Version: 02.00.35
+ *
+ * Version History
+ * ---------------
+ *
+ * Date Version Description
+ * -------- -------- ------------------------------------------------------
+ * 04-30-07 02.00.00 Corresponds to Fusion-MPT MPI Specification Rev A.
+ * 06-04-07 02.00.01 Bumped MPI2_HEADER_VERSION_UNIT.
+ * 06-26-07 02.00.02 Bumped MPI2_HEADER_VERSION_UNIT.
+ * 08-31-07 02.00.03 Bumped MPI2_HEADER_VERSION_UNIT.
+ * Moved ReplyPostHostIndex register to offset 0x6C of the
+ * MPI2_SYSTEM_INTERFACE_REGS and modified the define for
+ * MPI2_REPLY_POST_HOST_INDEX_OFFSET.
+ * Added union of request descriptors.
+ * Added union of reply descriptors.
+ * 10-31-07 02.00.04 Bumped MPI2_HEADER_VERSION_UNIT.
+ * Added define for MPI2_VERSION_02_00.
+ * Fixed the size of the FunctionDependent5 field in the
+ * MPI2_DEFAULT_REPLY structure.
+ * 12-18-07 02.00.05 Bumped MPI2_HEADER_VERSION_UNIT.
+ * Removed the MPI-defined Fault Codes and extended the
+ * product specific codes up to 0xEFFF.
+ * Added a sixth key value for the WriteSequence register
+ * and changed the flush value to 0x0.
+ * Added message function codes for Diagnostic Buffer Post
+ * and Diagnsotic Release.
+ * New IOCStatus define: MPI2_IOCSTATUS_DIAGNOSTIC_RELEASED
+ * Moved MPI2_VERSION_UNION from mpi2_ioc.h.
+ * 02-29-08 02.00.06 Bumped MPI2_HEADER_VERSION_UNIT.
+ * 03-03-08 02.00.07 Bumped MPI2_HEADER_VERSION_UNIT.
+ * 05-21-08 02.00.08 Bumped MPI2_HEADER_VERSION_UNIT.
+ * Added #defines for marking a reply descriptor as unused.
+ * 06-27-08 02.00.09 Bumped MPI2_HEADER_VERSION_UNIT.
+ * 10-02-08 02.00.10 Bumped MPI2_HEADER_VERSION_UNIT.
+ * Moved LUN field defines from mpi2_init.h.
+ * 01-19-09 02.00.11 Bumped MPI2_HEADER_VERSION_UNIT.
+ * 05-06-09 02.00.12 Bumped MPI2_HEADER_VERSION_UNIT.
+ * In all request and reply descriptors, replaced VF_ID
+ * field with MSIxIndex field.
+ * Removed DevHandle field from
+ * MPI2_SCSI_IO_SUCCESS_REPLY_DESCRIPTOR and made those
+ * bytes reserved.
+ * Added RAID Accelerator functionality.
+ * 07-30-09 02.00.13 Bumped MPI2_HEADER_VERSION_UNIT.
+ * 10-28-09 02.00.14 Bumped MPI2_HEADER_VERSION_UNIT.
+ * Added MSI-x index mask and shift for Reply Post Host
+ * Index register.
+ * Added function code for Host Based Discovery Action.
+ * 02-10-10 02.00.15 Bumped MPI2_HEADER_VERSION_UNIT.
+ * Added define for MPI2_FUNCTION_PWR_MGMT_CONTROL.
+ * Added defines for product-specific range of message
+ * function codes, 0xF0 to 0xFF.
+ * 05-12-10 02.00.16 Bumped MPI2_HEADER_VERSION_UNIT.
+ * Added alternative defines for the SGE Direction bit.
+ * 08-11-10 02.00.17 Bumped MPI2_HEADER_VERSION_UNIT.
+ * 11-10-10 02.00.18 Bumped MPI2_HEADER_VERSION_UNIT.
+ * Added MPI2_IEEE_SGE_FLAGS_SYSTEMPLBCPI_ADDR define.
+ * 02-23-11 02.00.19 Bumped MPI2_HEADER_VERSION_UNIT.
+ * Added MPI2_FUNCTION_SEND_HOST_MESSAGE.
+ * 03-09-11 02.00.20 Bumped MPI2_HEADER_VERSION_UNIT.
+ * 05-25-11 02.00.21 Bumped MPI2_HEADER_VERSION_UNIT.
+ * 08-24-11 02.00.22 Bumped MPI2_HEADER_VERSION_UNIT.
+ * 11-18-11 02.00.23 Bumped MPI2_HEADER_VERSION_UNIT.
+ * 02-06-12 02.00.24 Bumped MPI2_HEADER_VERSION_UNIT.
+ * 03-29-12 02.00.25 Bumped MPI2_HEADER_VERSION_UNIT.
+ * Added Hard Reset delay timings.
+ * 07-10-12 02.00.26 Bumped MPI2_HEADER_VERSION_UNIT.
+ * 07-26-12 02.00.27 Bumped MPI2_HEADER_VERSION_UNIT.
+ * 11-27-12 02.00.28 Bumped MPI2_HEADER_VERSION_UNIT.
+ * 12-20-12 02.00.29 Bumped MPI2_HEADER_VERSION_UNIT.
+ * Added MPI25_SUP_REPLY_POST_HOST_INDEX_OFFSET.
+ * 04-09-13 02.00.30 Bumped MPI2_HEADER_VERSION_UNIT.
+ * 04-17-13 02.00.31 Bumped MPI2_HEADER_VERSION_UNIT.
+ * 08-19-13 02.00.32 Bumped MPI2_HEADER_VERSION_UNIT.
+ * 12-05-13 02.00.33 Bumped MPI2_HEADER_VERSION_UNIT.
+ * 01-08-14 02.00.34 Bumped MPI2_HEADER_VERSION_UNIT.
+ * 06-13-14 02.00.35 Bumped MPI2_HEADER_VERSION_UNIT.
+ * --------------------------------------------------------------------------
+ */
+
+#ifndef MPI2_H
+#define MPI2_H
+
+
+/*****************************************************************************
+*
+* MPI Version Definitions
+*
+*****************************************************************************/
+
+#define MPI2_VERSION_MAJOR (0x02)
+#define MPI2_VERSION_MINOR (0x00)
+#define MPI2_VERSION_MAJOR_MASK (0xFF00)
+#define MPI2_VERSION_MAJOR_SHIFT (8)
+#define MPI2_VERSION_MINOR_MASK (0x00FF)
+#define MPI2_VERSION_MINOR_SHIFT (0)
+#define MPI2_VERSION ((MPI2_VERSION_MAJOR << MPI2_VERSION_MAJOR_SHIFT) | \
+ MPI2_VERSION_MINOR)
+
+#define MPI2_VERSION_02_00 (0x0200)
+
+/* versioning for this MPI header set */
+#define MPI2_HEADER_VERSION_UNIT (0x23)
+#define MPI2_HEADER_VERSION_DEV (0x00)
+#define MPI2_HEADER_VERSION_UNIT_MASK (0xFF00)
+#define MPI2_HEADER_VERSION_UNIT_SHIFT (8)
+#define MPI2_HEADER_VERSION_DEV_MASK (0x00FF)
+#define MPI2_HEADER_VERSION_DEV_SHIFT (0)
+#define MPI2_HEADER_VERSION ((MPI2_HEADER_VERSION_UNIT << 8) | MPI2_HEADER_VERSION_DEV)
+
+
+/*****************************************************************************
+*
+* IOC State Definitions
+*
+*****************************************************************************/
+
+#define MPI2_IOC_STATE_RESET (0x00000000)
+#define MPI2_IOC_STATE_READY (0x10000000)
+#define MPI2_IOC_STATE_OPERATIONAL (0x20000000)
+#define MPI2_IOC_STATE_FAULT (0x40000000)
+
+#define MPI2_IOC_STATE_MASK (0xF0000000)
+#define MPI2_IOC_STATE_SHIFT (28)
+
+/* Fault state range for prodcut specific codes */
+#define MPI2_FAULT_PRODUCT_SPECIFIC_MIN (0x0000)
+#define MPI2_FAULT_PRODUCT_SPECIFIC_MAX (0xEFFF)
+
+
+/*****************************************************************************
+*
+* System Interface Register Definitions
+*
+*****************************************************************************/
+
+typedef volatile struct _MPI2_SYSTEM_INTERFACE_REGS
+{
+ U32 Doorbell; /* 0x00 */
+ U32 WriteSequence; /* 0x04 */
+ U32 HostDiagnostic; /* 0x08 */
+ U32 Reserved1; /* 0x0C */
+ U32 DiagRWData; /* 0x10 */
+ U32 DiagRWAddressLow; /* 0x14 */
+ U32 DiagRWAddressHigh; /* 0x18 */
+ U32 Reserved2[5]; /* 0x1C */
+ U32 HostInterruptStatus; /* 0x30 */
+ U32 HostInterruptMask; /* 0x34 */
+ U32 DCRData; /* 0x38 */
+ U32 DCRAddress; /* 0x3C */
+ U32 Reserved3[2]; /* 0x40 */
+ U32 ReplyFreeHostIndex; /* 0x48 */
+ U32 Reserved4[8]; /* 0x4C */
+ U32 ReplyPostHostIndex; /* 0x6C */
+ U32 Reserved5; /* 0x70 */
+ U32 HCBSize; /* 0x74 */
+ U32 HCBAddressLow; /* 0x78 */
+ U32 HCBAddressHigh; /* 0x7C */
+ U32 Reserved6[16]; /* 0x80 */
+ U32 RequestDescriptorPostLow; /* 0xC0 */
+ U32 RequestDescriptorPostHigh; /* 0xC4 */
+ U32 Reserved7[14]; /* 0xC8 */
+} MPI2_SYSTEM_INTERFACE_REGS, MPI2_POINTER PTR_MPI2_SYSTEM_INTERFACE_REGS,
+ Mpi2SystemInterfaceRegs_t, MPI2_POINTER pMpi2SystemInterfaceRegs_t;
+
+/*
+ * Defines for working with the Doorbell register.
+ */
+#define MPI2_DOORBELL_OFFSET (0x00000000)
+
+/* IOC --> System values */
+#define MPI2_DOORBELL_USED (0x08000000)
+#define MPI2_DOORBELL_WHO_INIT_MASK (0x07000000)
+#define MPI2_DOORBELL_WHO_INIT_SHIFT (24)
+#define MPI2_DOORBELL_FAULT_CODE_MASK (0x0000FFFF)
+#define MPI2_DOORBELL_DATA_MASK (0x0000FFFF)
+
+/* System --> IOC values */
+#define MPI2_DOORBELL_FUNCTION_MASK (0xFF000000)
+#define MPI2_DOORBELL_FUNCTION_SHIFT (24)
+#define MPI2_DOORBELL_ADD_DWORDS_MASK (0x00FF0000)
+#define MPI2_DOORBELL_ADD_DWORDS_SHIFT (16)
+
+
+/*
+ * Defines for the WriteSequence register
+ */
+#define MPI2_WRITE_SEQUENCE_OFFSET (0x00000004)
+#define MPI2_WRSEQ_KEY_VALUE_MASK (0x0000000F)
+#define MPI2_WRSEQ_FLUSH_KEY_VALUE (0x0)
+#define MPI2_WRSEQ_1ST_KEY_VALUE (0xF)
+#define MPI2_WRSEQ_2ND_KEY_VALUE (0x4)
+#define MPI2_WRSEQ_3RD_KEY_VALUE (0xB)
+#define MPI2_WRSEQ_4TH_KEY_VALUE (0x2)
+#define MPI2_WRSEQ_5TH_KEY_VALUE (0x7)
+#define MPI2_WRSEQ_6TH_KEY_VALUE (0xD)
+
+/*
+ * Defines for the HostDiagnostic register
+ */
+#define MPI2_HOST_DIAGNOSTIC_OFFSET (0x00000008)
+
+#define MPI2_DIAG_BOOT_DEVICE_SELECT_MASK (0x00001800)
+#define MPI2_DIAG_BOOT_DEVICE_SELECT_DEFAULT (0x00000000)
+#define MPI2_DIAG_BOOT_DEVICE_SELECT_HCDW (0x00000800)
+
+#define MPI2_DIAG_CLEAR_FLASH_BAD_SIG (0x00000400)
+#define MPI2_DIAG_FORCE_HCB_ON_RESET (0x00000200)
+#define MPI2_DIAG_HCB_MODE (0x00000100)
+#define MPI2_DIAG_DIAG_WRITE_ENABLE (0x00000080)
+#define MPI2_DIAG_FLASH_BAD_SIG (0x00000040)
+#define MPI2_DIAG_RESET_HISTORY (0x00000020)
+#define MPI2_DIAG_DIAG_RW_ENABLE (0x00000010)
+#define MPI2_DIAG_RESET_ADAPTER (0x00000004)
+#define MPI2_DIAG_HOLD_IOC_RESET (0x00000002)
+
+/*
+ * Offsets for DiagRWData and address
+ */
+#define MPI2_DIAG_RW_DATA_OFFSET (0x00000010)
+#define MPI2_DIAG_RW_ADDRESS_LOW_OFFSET (0x00000014)
+#define MPI2_DIAG_RW_ADDRESS_HIGH_OFFSET (0x00000018)
+
+/*
+ * Defines for the HostInterruptStatus register
+ */
+#define MPI2_HOST_INTERRUPT_STATUS_OFFSET (0x00000030)
+#define MPI2_HIS_SYS2IOC_DB_STATUS (0x80000000)
+#define MPI2_HIS_IOP_DOORBELL_STATUS MPI2_HIS_SYS2IOC_DB_STATUS
+#define MPI2_HIS_RESET_IRQ_STATUS (0x40000000)
+#define MPI2_HIS_REPLY_DESCRIPTOR_INTERRUPT (0x00000008)
+#define MPI2_HIS_IOC2SYS_DB_STATUS (0x00000001)
+#define MPI2_HIS_DOORBELL_INTERRUPT MPI2_HIS_IOC2SYS_DB_STATUS
+
+/*
+ * Defines for the HostInterruptMask register
+ */
+#define MPI2_HOST_INTERRUPT_MASK_OFFSET (0x00000034)
+#define MPI2_HIM_RESET_IRQ_MASK (0x40000000)
+#define MPI2_HIM_REPLY_INT_MASK (0x00000008)
+#define MPI2_HIM_RIM MPI2_HIM_REPLY_INT_MASK
+#define MPI2_HIM_IOC2SYS_DB_MASK (0x00000001)
+#define MPI2_HIM_DIM MPI2_HIM_IOC2SYS_DB_MASK
+
+/*
+ * Offsets for DCRData and address
+ */
+#define MPI2_DCR_DATA_OFFSET (0x00000038)
+#define MPI2_DCR_ADDRESS_OFFSET (0x0000003C)
+
+/*
+ * Offset for the Reply Free Queue
+ */
+#define MPI2_REPLY_FREE_HOST_INDEX_OFFSET (0x00000048)
+
+/*
+ * Defines for the Reply Descriptor Post Queue
+ */
+#define MPI2_REPLY_POST_HOST_INDEX_OFFSET (0x0000006C)
+#define MPI2_REPLY_POST_HOST_INDEX_MASK (0x00FFFFFF)
+#define MPI2_RPHI_MSIX_INDEX_MASK (0xFF000000)
+#define MPI2_RPHI_MSIX_INDEX_SHIFT (24)
+#define MPI25_SUP_REPLY_POST_HOST_INDEX_OFFSET (0x0000030C) /* MPI v2.5 only */
+
+/*
+ * Defines for the HCBSize and address
+ */
+#define MPI2_HCB_SIZE_OFFSET (0x00000074)
+#define MPI2_HCB_SIZE_SIZE_MASK (0xFFFFF000)
+#define MPI2_HCB_SIZE_HCB_ENABLE (0x00000001)
+
+#define MPI2_HCB_ADDRESS_LOW_OFFSET (0x00000078)
+#define MPI2_HCB_ADDRESS_HIGH_OFFSET (0x0000007C)
+
+/*
+ * Offsets for the Request Queue
+ */
+#define MPI2_REQUEST_DESCRIPTOR_POST_LOW_OFFSET (0x000000C0)
+#define MPI2_REQUEST_DESCRIPTOR_POST_HIGH_OFFSET (0x000000C4)
+
+
+/* Hard Reset delay timings */
+#define MPI2_HARD_RESET_PCIE_FIRST_READ_DELAY_MICRO_SEC (50000)
+#define MPI2_HARD_RESET_PCIE_RESET_READ_WINDOW_MICRO_SEC (255000)
+#define MPI2_HARD_RESET_PCIE_SECOND_READ_DELAY_MICRO_SEC (256000)
+
+/*****************************************************************************
+*
+* Message Descriptors
+*
+*****************************************************************************/
+
+/* Request Descriptors */
+
+/* Default Request Descriptor */
+typedef struct _MPI2_DEFAULT_REQUEST_DESCRIPTOR
+{
+ U8 RequestFlags; /* 0x00 */
+ U8 MSIxIndex; /* 0x01 */
+ U16 SMID; /* 0x02 */
+ U16 LMID; /* 0x04 */
+ U16 DescriptorTypeDependent; /* 0x06 */
+} MPI2_DEFAULT_REQUEST_DESCRIPTOR,
+ MPI2_POINTER PTR_MPI2_DEFAULT_REQUEST_DESCRIPTOR,
+ Mpi2DefaultRequestDescriptor_t, MPI2_POINTER pMpi2DefaultRequestDescriptor_t;
+
+/* defines for the RequestFlags field */
+#define MPI2_REQ_DESCRIPT_FLAGS_TYPE_MASK (0x0E)
+#define MPI2_REQ_DESCRIPT_FLAGS_SCSI_IO (0x00)
+#define MPI2_REQ_DESCRIPT_FLAGS_SCSI_TARGET (0x02)
+#define MPI2_REQ_DESCRIPT_FLAGS_HIGH_PRIORITY (0x06)
+#define MPI2_REQ_DESCRIPT_FLAGS_DEFAULT_TYPE (0x08)
+#define MPI2_REQ_DESCRIPT_FLAGS_RAID_ACCELERATOR (0x0A)
+
+#define MPI2_REQ_DESCRIPT_FLAGS_IOC_FIFO_MARKER (0x01)
+
+
+/* High Priority Request Descriptor */
+typedef struct _MPI2_HIGH_PRIORITY_REQUEST_DESCRIPTOR
+{
+ U8 RequestFlags; /* 0x00 */
+ U8 MSIxIndex; /* 0x01 */
+ U16 SMID; /* 0x02 */
+ U16 LMID; /* 0x04 */
+ U16 Reserved1; /* 0x06 */
+} MPI2_HIGH_PRIORITY_REQUEST_DESCRIPTOR,
+ MPI2_POINTER PTR_MPI2_HIGH_PRIORITY_REQUEST_DESCRIPTOR,
+ Mpi2HighPriorityRequestDescriptor_t,
+ MPI2_POINTER pMpi2HighPriorityRequestDescriptor_t;
+
+
+/* SCSI IO Request Descriptor */
+typedef struct _MPI2_SCSI_IO_REQUEST_DESCRIPTOR
+{
+ U8 RequestFlags; /* 0x00 */
+ U8 MSIxIndex; /* 0x01 */
+ U16 SMID; /* 0x02 */
+ U16 LMID; /* 0x04 */
+ U16 DevHandle; /* 0x06 */
+} MPI2_SCSI_IO_REQUEST_DESCRIPTOR,
+ MPI2_POINTER PTR_MPI2_SCSI_IO_REQUEST_DESCRIPTOR,
+ Mpi2SCSIIORequestDescriptor_t, MPI2_POINTER pMpi2SCSIIORequestDescriptor_t;
+
+
+/* SCSI Target Request Descriptor */
+typedef struct _MPI2_SCSI_TARGET_REQUEST_DESCRIPTOR
+{
+ U8 RequestFlags; /* 0x00 */
+ U8 MSIxIndex; /* 0x01 */
+ U16 SMID; /* 0x02 */
+ U16 LMID; /* 0x04 */
+ U16 IoIndex; /* 0x06 */
+} MPI2_SCSI_TARGET_REQUEST_DESCRIPTOR,
+ MPI2_POINTER PTR_MPI2_SCSI_TARGET_REQUEST_DESCRIPTOR,
+ Mpi2SCSITargetRequestDescriptor_t,
+ MPI2_POINTER pMpi2SCSITargetRequestDescriptor_t;
+
+
+/* RAID Accelerator Request Descriptor */
+typedef struct _MPI2_RAID_ACCEL_REQUEST_DESCRIPTOR {
+ U8 RequestFlags; /* 0x00 */
+ U8 MSIxIndex; /* 0x01 */
+ U16 SMID; /* 0x02 */
+ U16 LMID; /* 0x04 */
+ U16 Reserved; /* 0x06 */
+} MPI2_RAID_ACCEL_REQUEST_DESCRIPTOR,
+ MPI2_POINTER PTR_MPI2_RAID_ACCEL_REQUEST_DESCRIPTOR,
+ Mpi2RAIDAcceleratorRequestDescriptor_t,
+ MPI2_POINTER pMpi2RAIDAcceleratorRequestDescriptor_t;
+
+
+/* union of Request Descriptors */
+typedef union _MPI2_REQUEST_DESCRIPTOR_UNION
+{
+ MPI2_DEFAULT_REQUEST_DESCRIPTOR Default;
+ MPI2_HIGH_PRIORITY_REQUEST_DESCRIPTOR HighPriority;
+ MPI2_SCSI_IO_REQUEST_DESCRIPTOR SCSIIO;
+ MPI2_SCSI_TARGET_REQUEST_DESCRIPTOR SCSITarget;
+ MPI2_RAID_ACCEL_REQUEST_DESCRIPTOR RAIDAccelerator;
+ U64 Words;
+} MPI2_REQUEST_DESCRIPTOR_UNION, MPI2_POINTER PTR_MPI2_REQUEST_DESCRIPTOR_UNION,
+ Mpi2RequestDescriptorUnion_t, MPI2_POINTER pMpi2RequestDescriptorUnion_t;
+
+
+/* Reply Descriptors */
+
+/* Default Reply Descriptor */
+typedef struct _MPI2_DEFAULT_REPLY_DESCRIPTOR
+{
+ U8 ReplyFlags; /* 0x00 */
+ U8 MSIxIndex; /* 0x01 */
+ U16 DescriptorTypeDependent1; /* 0x02 */
+ U32 DescriptorTypeDependent2; /* 0x04 */
+} MPI2_DEFAULT_REPLY_DESCRIPTOR, MPI2_POINTER PTR_MPI2_DEFAULT_REPLY_DESCRIPTOR,
+ Mpi2DefaultReplyDescriptor_t, MPI2_POINTER pMpi2DefaultReplyDescriptor_t;
+
+/* defines for the ReplyFlags field */
+#define MPI2_RPY_DESCRIPT_FLAGS_TYPE_MASK (0x0F)
+#define MPI2_RPY_DESCRIPT_FLAGS_SCSI_IO_SUCCESS (0x00)
+#define MPI2_RPY_DESCRIPT_FLAGS_ADDRESS_REPLY (0x01)
+#define MPI2_RPY_DESCRIPT_FLAGS_TARGETASSIST_SUCCESS (0x02)
+#define MPI2_RPY_DESCRIPT_FLAGS_TARGET_COMMAND_BUFFER (0x03)
+#define MPI2_RPY_DESCRIPT_FLAGS_RAID_ACCELERATOR_SUCCESS (0x05)
+#define MPI2_RPY_DESCRIPT_FLAGS_UNUSED (0x0F)
+
+/* values for marking a reply descriptor as unused */
+#define MPI2_RPY_DESCRIPT_UNUSED_WORD0_MARK (0xFFFFFFFF)
+#define MPI2_RPY_DESCRIPT_UNUSED_WORD1_MARK (0xFFFFFFFF)
+
+/* Address Reply Descriptor */
+typedef struct _MPI2_ADDRESS_REPLY_DESCRIPTOR
+{
+ U8 ReplyFlags; /* 0x00 */
+ U8 MSIxIndex; /* 0x01 */
+ U16 SMID; /* 0x02 */
+ U32 ReplyFrameAddress; /* 0x04 */
+} MPI2_ADDRESS_REPLY_DESCRIPTOR, MPI2_POINTER PTR_MPI2_ADDRESS_REPLY_DESCRIPTOR,
+ Mpi2AddressReplyDescriptor_t, MPI2_POINTER pMpi2AddressReplyDescriptor_t;
+
+#define MPI2_ADDRESS_REPLY_SMID_INVALID (0x00)
+
+
+/* SCSI IO Success Reply Descriptor */
+typedef struct _MPI2_SCSI_IO_SUCCESS_REPLY_DESCRIPTOR
+{
+ U8 ReplyFlags; /* 0x00 */
+ U8 MSIxIndex; /* 0x01 */
+ U16 SMID; /* 0x02 */
+ U16 TaskTag; /* 0x04 */
+ U16 Reserved1; /* 0x06 */
+} MPI2_SCSI_IO_SUCCESS_REPLY_DESCRIPTOR,
+ MPI2_POINTER PTR_MPI2_SCSI_IO_SUCCESS_REPLY_DESCRIPTOR,
+ Mpi2SCSIIOSuccessReplyDescriptor_t,
+ MPI2_POINTER pMpi2SCSIIOSuccessReplyDescriptor_t;
+
+
+/* TargetAssist Success Reply Descriptor */
+typedef struct _MPI2_TARGETASSIST_SUCCESS_REPLY_DESCRIPTOR
+{
+ U8 ReplyFlags; /* 0x00 */
+ U8 MSIxIndex; /* 0x01 */
+ U16 SMID; /* 0x02 */
+ U8 SequenceNumber; /* 0x04 */
+ U8 Reserved1; /* 0x05 */
+ U16 IoIndex; /* 0x06 */
+} MPI2_TARGETASSIST_SUCCESS_REPLY_DESCRIPTOR,
+ MPI2_POINTER PTR_MPI2_TARGETASSIST_SUCCESS_REPLY_DESCRIPTOR,
+ Mpi2TargetAssistSuccessReplyDescriptor_t,
+ MPI2_POINTER pMpi2TargetAssistSuccessReplyDescriptor_t;
+
+
+/* Target Command Buffer Reply Descriptor */
+typedef struct _MPI2_TARGET_COMMAND_BUFFER_REPLY_DESCRIPTOR
+{
+ U8 ReplyFlags; /* 0x00 */
+ U8 MSIxIndex; /* 0x01 */
+ U8 VP_ID; /* 0x02 */
+ U8 Flags; /* 0x03 */
+ U16 InitiatorDevHandle; /* 0x04 */
+ U16 IoIndex; /* 0x06 */
+} MPI2_TARGET_COMMAND_BUFFER_REPLY_DESCRIPTOR,
+ MPI2_POINTER PTR_MPI2_TARGET_COMMAND_BUFFER_REPLY_DESCRIPTOR,
+ Mpi2TargetCommandBufferReplyDescriptor_t,
+ MPI2_POINTER pMpi2TargetCommandBufferReplyDescriptor_t;
+
+/* defines for Flags field */
+#define MPI2_RPY_DESCRIPT_TCB_FLAGS_PHYNUM_MASK (0x3F)
+
+
+/* RAID Accelerator Success Reply Descriptor */
+typedef struct _MPI2_RAID_ACCELERATOR_SUCCESS_REPLY_DESCRIPTOR {
+ U8 ReplyFlags; /* 0x00 */
+ U8 MSIxIndex; /* 0x01 */
+ U16 SMID; /* 0x02 */
+ U32 Reserved; /* 0x04 */
+} MPI2_RAID_ACCELERATOR_SUCCESS_REPLY_DESCRIPTOR,
+ MPI2_POINTER PTR_MPI2_RAID_ACCELERATOR_SUCCESS_REPLY_DESCRIPTOR,
+ Mpi2RAIDAcceleratorSuccessReplyDescriptor_t,
+ MPI2_POINTER pMpi2RAIDAcceleratorSuccessReplyDescriptor_t;
+
+
+/* union of Reply Descriptors */
+typedef union _MPI2_REPLY_DESCRIPTORS_UNION
+{
+ MPI2_DEFAULT_REPLY_DESCRIPTOR Default;
+ MPI2_ADDRESS_REPLY_DESCRIPTOR AddressReply;
+ MPI2_SCSI_IO_SUCCESS_REPLY_DESCRIPTOR SCSIIOSuccess;
+ MPI2_TARGETASSIST_SUCCESS_REPLY_DESCRIPTOR TargetAssistSuccess;
+ MPI2_TARGET_COMMAND_BUFFER_REPLY_DESCRIPTOR TargetCommandBuffer;
+ MPI2_RAID_ACCELERATOR_SUCCESS_REPLY_DESCRIPTOR RAIDAcceleratorSuccess;
+ U64 Words;
+} MPI2_REPLY_DESCRIPTORS_UNION, MPI2_POINTER PTR_MPI2_REPLY_DESCRIPTORS_UNION,
+Mpi2ReplyDescriptorsUnion_t, MPI2_POINTER pMpi2ReplyDescriptorsUnion_t;
+
+
+
+/*****************************************************************************
+*
+* Message Functions
+*
+*****************************************************************************/
+
+#define MPI2_FUNCTION_SCSI_IO_REQUEST (0x00) /* SCSI IO */
+#define MPI2_FUNCTION_SCSI_TASK_MGMT (0x01) /* SCSI Task Management */
+#define MPI2_FUNCTION_IOC_INIT (0x02) /* IOC Init */
+#define MPI2_FUNCTION_IOC_FACTS (0x03) /* IOC Facts */
+#define MPI2_FUNCTION_CONFIG (0x04) /* Configuration */
+#define MPI2_FUNCTION_PORT_FACTS (0x05) /* Port Facts */
+#define MPI2_FUNCTION_PORT_ENABLE (0x06) /* Port Enable */
+#define MPI2_FUNCTION_EVENT_NOTIFICATION (0x07) /* Event Notification */
+#define MPI2_FUNCTION_EVENT_ACK (0x08) /* Event Acknowledge */
+#define MPI2_FUNCTION_FW_DOWNLOAD (0x09) /* FW Download */
+#define MPI2_FUNCTION_TARGET_ASSIST (0x0B) /* Target Assist */
+#define MPI2_FUNCTION_TARGET_STATUS_SEND (0x0C) /* Target Status Send */
+#define MPI2_FUNCTION_TARGET_MODE_ABORT (0x0D) /* Target Mode Abort */
+#define MPI2_FUNCTION_FW_UPLOAD (0x12) /* FW Upload */
+#define MPI2_FUNCTION_RAID_ACTION (0x15) /* RAID Action */
+#define MPI2_FUNCTION_RAID_SCSI_IO_PASSTHROUGH (0x16) /* SCSI IO RAID Passthrough */
+#define MPI2_FUNCTION_TOOLBOX (0x17) /* Toolbox */
+#define MPI2_FUNCTION_SCSI_ENCLOSURE_PROCESSOR (0x18) /* SCSI Enclosure Processor */
+#define MPI2_FUNCTION_SMP_PASSTHROUGH (0x1A) /* SMP Passthrough */
+#define MPI2_FUNCTION_SAS_IO_UNIT_CONTROL (0x1B) /* SAS IO Unit Control */
+#define MPI2_FUNCTION_SATA_PASSTHROUGH (0x1C) /* SATA Passthrough */
+#define MPI2_FUNCTION_DIAG_BUFFER_POST (0x1D) /* Diagnostic Buffer Post */
+#define MPI2_FUNCTION_DIAG_RELEASE (0x1E) /* Diagnostic Release */
+#define MPI2_FUNCTION_TARGET_CMD_BUF_BASE_POST (0x24) /* Target Command Buffer Post Base */
+#define MPI2_FUNCTION_TARGET_CMD_BUF_LIST_POST (0x25) /* Target Command Buffer Post List */
+#define MPI2_FUNCTION_RAID_ACCELERATOR (0x2C) /* RAID Accelerator*/
+/* Host Based Discovery Action */
+#define MPI2_FUNCTION_HOST_BASED_DISCOVERY_ACTION (0x2F)
+/* Power Management Control */
+#define MPI2_FUNCTION_PWR_MGMT_CONTROL (0x30)
+/* Send Host Message */
+#define MPI2_FUNCTION_SEND_HOST_MESSAGE (0x31)
+/* beginning of product-specific range */
+#define MPI2_FUNCTION_MIN_PRODUCT_SPECIFIC (0xF0)
+/* end of product-specific range */
+#define MPI2_FUNCTION_MAX_PRODUCT_SPECIFIC (0xFF)
+
+
+
+
+/* Doorbell functions */
+#define MPI2_FUNCTION_IOC_MESSAGE_UNIT_RESET (0x40)
+#define MPI2_FUNCTION_HANDSHAKE (0x42)
+
+
+/*****************************************************************************
+*
+* IOC Status Values
+*
+*****************************************************************************/
+
+/* mask for IOCStatus status value */
+#define MPI2_IOCSTATUS_MASK (0x7FFF)
+
+/****************************************************************************
+* Common IOCStatus values for all replies
+****************************************************************************/
+
+#define MPI2_IOCSTATUS_SUCCESS (0x0000)
+#define MPI2_IOCSTATUS_INVALID_FUNCTION (0x0001)
+#define MPI2_IOCSTATUS_BUSY (0x0002)
+#define MPI2_IOCSTATUS_INVALID_SGL (0x0003)
+#define MPI2_IOCSTATUS_INTERNAL_ERROR (0x0004)
+#define MPI2_IOCSTATUS_INVALID_VPID (0x0005)
+#define MPI2_IOCSTATUS_INSUFFICIENT_RESOURCES (0x0006)
+#define MPI2_IOCSTATUS_INVALID_FIELD (0x0007)
+#define MPI2_IOCSTATUS_INVALID_STATE (0x0008)
+#define MPI2_IOCSTATUS_OP_STATE_NOT_SUPPORTED (0x0009)
+
+/****************************************************************************
+* Config IOCStatus values
+****************************************************************************/
+
+#define MPI2_IOCSTATUS_CONFIG_INVALID_ACTION (0x0020)
+#define MPI2_IOCSTATUS_CONFIG_INVALID_TYPE (0x0021)
+#define MPI2_IOCSTATUS_CONFIG_INVALID_PAGE (0x0022)
+#define MPI2_IOCSTATUS_CONFIG_INVALID_DATA (0x0023)
+#define MPI2_IOCSTATUS_CONFIG_NO_DEFAULTS (0x0024)
+#define MPI2_IOCSTATUS_CONFIG_CANT_COMMIT (0x0025)
+
+/****************************************************************************
+* SCSI IO Reply
+****************************************************************************/
+
+#define MPI2_IOCSTATUS_SCSI_RECOVERED_ERROR (0x0040)
+#define MPI2_IOCSTATUS_SCSI_INVALID_DEVHANDLE (0x0042)
+#define MPI2_IOCSTATUS_SCSI_DEVICE_NOT_THERE (0x0043)
+#define MPI2_IOCSTATUS_SCSI_DATA_OVERRUN (0x0044)
+#define MPI2_IOCSTATUS_SCSI_DATA_UNDERRUN (0x0045)
+#define MPI2_IOCSTATUS_SCSI_IO_DATA_ERROR (0x0046)
+#define MPI2_IOCSTATUS_SCSI_PROTOCOL_ERROR (0x0047)
+#define MPI2_IOCSTATUS_SCSI_TASK_TERMINATED (0x0048)
+#define MPI2_IOCSTATUS_SCSI_RESIDUAL_MISMATCH (0x0049)
+#define MPI2_IOCSTATUS_SCSI_TASK_MGMT_FAILED (0x004A)
+#define MPI2_IOCSTATUS_SCSI_IOC_TERMINATED (0x004B)
+#define MPI2_IOCSTATUS_SCSI_EXT_TERMINATED (0x004C)
+
+/****************************************************************************
+* For use by SCSI Initiator and SCSI Target end-to-end data protection
+****************************************************************************/
+
+#define MPI2_IOCSTATUS_EEDP_GUARD_ERROR (0x004D)
+#define MPI2_IOCSTATUS_EEDP_REF_TAG_ERROR (0x004E)
+#define MPI2_IOCSTATUS_EEDP_APP_TAG_ERROR (0x004F)
+
+/****************************************************************************
+* SCSI Target values
+****************************************************************************/
+
+#define MPI2_IOCSTATUS_TARGET_INVALID_IO_INDEX (0x0062)
+#define MPI2_IOCSTATUS_TARGET_ABORTED (0x0063)
+#define MPI2_IOCSTATUS_TARGET_NO_CONN_RETRYABLE (0x0064)
+#define MPI2_IOCSTATUS_TARGET_NO_CONNECTION (0x0065)
+#define MPI2_IOCSTATUS_TARGET_XFER_COUNT_MISMATCH (0x006A)
+#define MPI2_IOCSTATUS_TARGET_DATA_OFFSET_ERROR (0x006D)
+#define MPI2_IOCSTATUS_TARGET_TOO_MUCH_WRITE_DATA (0x006E)
+#define MPI2_IOCSTATUS_TARGET_IU_TOO_SHORT (0x006F)
+#define MPI2_IOCSTATUS_TARGET_ACK_NAK_TIMEOUT (0x0070)
+#define MPI2_IOCSTATUS_TARGET_NAK_RECEIVED (0x0071)
+
+/****************************************************************************
+* Serial Attached SCSI values
+****************************************************************************/
+
+#define MPI2_IOCSTATUS_SAS_SMP_REQUEST_FAILED (0x0090)
+#define MPI2_IOCSTATUS_SAS_SMP_DATA_OVERRUN (0x0091)
+
+/****************************************************************************
+* Diagnostic Buffer Post / Diagnostic Release values
+****************************************************************************/
+
+#define MPI2_IOCSTATUS_DIAGNOSTIC_RELEASED (0x00A0)
+
+/****************************************************************************
+* RAID Accelerator values
+****************************************************************************/
+
+#define MPI2_IOCSTATUS_RAID_ACCEL_ERROR (0x00B0)
+
+/****************************************************************************
+* IOCStatus flag to indicate that log info is available
+****************************************************************************/
+
+#define MPI2_IOCSTATUS_FLAG_LOG_INFO_AVAILABLE (0x8000)
+
+/****************************************************************************
+* IOCLogInfo Types
+****************************************************************************/
+
+#define MPI2_IOCLOGINFO_TYPE_MASK (0xF0000000)
+#define MPI2_IOCLOGINFO_TYPE_SHIFT (28)
+#define MPI2_IOCLOGINFO_TYPE_NONE (0x0)
+#define MPI2_IOCLOGINFO_TYPE_SCSI (0x1)
+#define MPI2_IOCLOGINFO_TYPE_FC (0x2)
+#define MPI2_IOCLOGINFO_TYPE_SAS (0x3)
+#define MPI2_IOCLOGINFO_TYPE_ISCSI (0x4)
+#define MPI2_IOCLOGINFO_LOG_DATA_MASK (0x0FFFFFFF)
+
+
+/*****************************************************************************
+*
+* Standard Message Structures
+*
+*****************************************************************************/
+
+/****************************************************************************
+* Request Message Header for all request messages
+****************************************************************************/
+
+typedef struct _MPI2_REQUEST_HEADER
+{
+ U16 FunctionDependent1; /* 0x00 */
+ U8 ChainOffset; /* 0x02 */
+ U8 Function; /* 0x03 */
+ U16 FunctionDependent2; /* 0x04 */
+ U8 FunctionDependent3; /* 0x06 */
+ U8 MsgFlags; /* 0x07 */
+ U8 VP_ID; /* 0x08 */
+ U8 VF_ID; /* 0x09 */
+ U16 Reserved1; /* 0x0A */
+} MPI2_REQUEST_HEADER, MPI2_POINTER PTR_MPI2_REQUEST_HEADER,
+ MPI2RequestHeader_t, MPI2_POINTER pMPI2RequestHeader_t;
+
+
+/****************************************************************************
+* Default Reply
+****************************************************************************/
+
+typedef struct _MPI2_DEFAULT_REPLY
+{
+ U16 FunctionDependent1; /* 0x00 */
+ U8 MsgLength; /* 0x02 */
+ U8 Function; /* 0x03 */
+ U16 FunctionDependent2; /* 0x04 */
+ U8 FunctionDependent3; /* 0x06 */
+ U8 MsgFlags; /* 0x07 */
+ U8 VP_ID; /* 0x08 */
+ U8 VF_ID; /* 0x09 */
+ U16 Reserved1; /* 0x0A */
+ U16 FunctionDependent5; /* 0x0C */
+ U16 IOCStatus; /* 0x0E */
+ U32 IOCLogInfo; /* 0x10 */
+} MPI2_DEFAULT_REPLY, MPI2_POINTER PTR_MPI2_DEFAULT_REPLY,
+ MPI2DefaultReply_t, MPI2_POINTER pMPI2DefaultReply_t;
+
+
+/* common version structure/union used in messages and configuration pages */
+
+typedef struct _MPI2_VERSION_STRUCT
+{
+ U8 Dev; /* 0x00 */
+ U8 Unit; /* 0x01 */
+ U8 Minor; /* 0x02 */
+ U8 Major; /* 0x03 */
+} MPI2_VERSION_STRUCT;
+
+typedef union _MPI2_VERSION_UNION
+{
+ MPI2_VERSION_STRUCT Struct;
+ U32 Word;
+} MPI2_VERSION_UNION;
+
+
+/* LUN field defines, common to many structures */
+#define MPI2_LUN_FIRST_LEVEL_ADDRESSING (0x0000FFFF)
+#define MPI2_LUN_SECOND_LEVEL_ADDRESSING (0xFFFF0000)
+#define MPI2_LUN_THIRD_LEVEL_ADDRESSING (0x0000FFFF)
+#define MPI2_LUN_FOURTH_LEVEL_ADDRESSING (0xFFFF0000)
+#define MPI2_LUN_LEVEL_1_WORD (0xFF00)
+#define MPI2_LUN_LEVEL_1_DWORD (0x0000FF00)
+
+
+/*****************************************************************************
+*
+* Fusion-MPT MPI Scatter Gather Elements
+*
+*****************************************************************************/
+
+/****************************************************************************
+* MPI Simple Element structures
+****************************************************************************/
+
+typedef struct _MPI2_SGE_SIMPLE32
+{
+ U32 FlagsLength;
+ U32 Address;
+} MPI2_SGE_SIMPLE32, MPI2_POINTER PTR_MPI2_SGE_SIMPLE32,
+ Mpi2SGESimple32_t, MPI2_POINTER pMpi2SGESimple32_t;
+
+typedef struct _MPI2_SGE_SIMPLE64
+{
+ U32 FlagsLength;
+ U64 Address;
+} MPI2_SGE_SIMPLE64, MPI2_POINTER PTR_MPI2_SGE_SIMPLE64,
+ Mpi2SGESimple64_t, MPI2_POINTER pMpi2SGESimple64_t;
+
+typedef struct _MPI2_SGE_SIMPLE_UNION
+{
+ U32 FlagsLength;
+ union
+ {
+ U32 Address32;
+ U64 Address64;
+ } u;
+} MPI2_SGE_SIMPLE_UNION, MPI2_POINTER PTR_MPI2_SGE_SIMPLE_UNION,
+ Mpi2SGESimpleUnion_t, MPI2_POINTER pMpi2SGESimpleUnion_t;
+
+
+/****************************************************************************
+* MPI Chain Element structures
+****************************************************************************/
+
+typedef struct _MPI2_SGE_CHAIN32
+{
+ U16 Length;
+ U8 NextChainOffset;
+ U8 Flags;
+ U32 Address;
+} MPI2_SGE_CHAIN32, MPI2_POINTER PTR_MPI2_SGE_CHAIN32,
+ Mpi2SGEChain32_t, MPI2_POINTER pMpi2SGEChain32_t;
+
+typedef struct _MPI2_SGE_CHAIN64
+{
+ U16 Length;
+ U8 NextChainOffset;
+ U8 Flags;
+ U64 Address;
+} MPI2_SGE_CHAIN64, MPI2_POINTER PTR_MPI2_SGE_CHAIN64,
+ Mpi2SGEChain64_t, MPI2_POINTER pMpi2SGEChain64_t;
+
+typedef struct _MPI2_SGE_CHAIN_UNION
+{
+ U16 Length;
+ U8 NextChainOffset;
+ U8 Flags;
+ union
+ {
+ U32 Address32;
+ U64 Address64;
+ } u;
+} MPI2_SGE_CHAIN_UNION, MPI2_POINTER PTR_MPI2_SGE_CHAIN_UNION,
+ Mpi2SGEChainUnion_t, MPI2_POINTER pMpi2SGEChainUnion_t;
+
+
+/****************************************************************************
+* MPI Transaction Context Element structures
+****************************************************************************/
+
+typedef struct _MPI2_SGE_TRANSACTION32
+{
+ U8 Reserved;
+ U8 ContextSize;
+ U8 DetailsLength;
+ U8 Flags;
+ U32 TransactionContext[1];
+ U32 TransactionDetails[1];
+} MPI2_SGE_TRANSACTION32, MPI2_POINTER PTR_MPI2_SGE_TRANSACTION32,
+ Mpi2SGETransaction32_t, MPI2_POINTER pMpi2SGETransaction32_t;
+
+typedef struct _MPI2_SGE_TRANSACTION64
+{
+ U8 Reserved;
+ U8 ContextSize;
+ U8 DetailsLength;
+ U8 Flags;
+ U32 TransactionContext[2];
+ U32 TransactionDetails[1];
+} MPI2_SGE_TRANSACTION64, MPI2_POINTER PTR_MPI2_SGE_TRANSACTION64,
+ Mpi2SGETransaction64_t, MPI2_POINTER pMpi2SGETransaction64_t;
+
+typedef struct _MPI2_SGE_TRANSACTION96
+{
+ U8 Reserved;
+ U8 ContextSize;
+ U8 DetailsLength;
+ U8 Flags;
+ U32 TransactionContext[3];
+ U32 TransactionDetails[1];
+} MPI2_SGE_TRANSACTION96, MPI2_POINTER PTR_MPI2_SGE_TRANSACTION96,
+ Mpi2SGETransaction96_t, MPI2_POINTER pMpi2SGETransaction96_t;
+
+typedef struct _MPI2_SGE_TRANSACTION128
+{
+ U8 Reserved;
+ U8 ContextSize;
+ U8 DetailsLength;
+ U8 Flags;
+ U32 TransactionContext[4];
+ U32 TransactionDetails[1];
+} MPI2_SGE_TRANSACTION128, MPI2_POINTER PTR_MPI2_SGE_TRANSACTION128,
+ Mpi2SGETransaction_t128, MPI2_POINTER pMpi2SGETransaction_t128;
+
+typedef struct _MPI2_SGE_TRANSACTION_UNION
+{
+ U8 Reserved;
+ U8 ContextSize;
+ U8 DetailsLength;
+ U8 Flags;
+ union
+ {
+ U32 TransactionContext32[1];
+ U32 TransactionContext64[2];
+ U32 TransactionContext96[3];
+ U32 TransactionContext128[4];
+ } u;
+ U32 TransactionDetails[1];
+} MPI2_SGE_TRANSACTION_UNION, MPI2_POINTER PTR_MPI2_SGE_TRANSACTION_UNION,
+ Mpi2SGETransactionUnion_t, MPI2_POINTER pMpi2SGETransactionUnion_t;
+
+
+/****************************************************************************
+* MPI SGE union for IO SGL's
+****************************************************************************/
+
+typedef struct _MPI2_MPI_SGE_IO_UNION
+{
+ union
+ {
+ MPI2_SGE_SIMPLE_UNION Simple;
+ MPI2_SGE_CHAIN_UNION Chain;
+ } u;
+} MPI2_MPI_SGE_IO_UNION, MPI2_POINTER PTR_MPI2_MPI_SGE_IO_UNION,
+ Mpi2MpiSGEIOUnion_t, MPI2_POINTER pMpi2MpiSGEIOUnion_t;
+
+
+/****************************************************************************
+* MPI SGE union for SGL's with Simple and Transaction elements
+****************************************************************************/
+
+typedef struct _MPI2_SGE_TRANS_SIMPLE_UNION
+{
+ union
+ {
+ MPI2_SGE_SIMPLE_UNION Simple;
+ MPI2_SGE_TRANSACTION_UNION Transaction;
+ } u;
+} MPI2_SGE_TRANS_SIMPLE_UNION, MPI2_POINTER PTR_MPI2_SGE_TRANS_SIMPLE_UNION,
+ Mpi2SGETransSimpleUnion_t, MPI2_POINTER pMpi2SGETransSimpleUnion_t;
+
+
+/****************************************************************************
+* All MPI SGE types union
+****************************************************************************/
+
+typedef struct _MPI2_MPI_SGE_UNION
+{
+ union
+ {
+ MPI2_SGE_SIMPLE_UNION Simple;
+ MPI2_SGE_CHAIN_UNION Chain;
+ MPI2_SGE_TRANSACTION_UNION Transaction;
+ } u;
+} MPI2_MPI_SGE_UNION, MPI2_POINTER PTR_MPI2_MPI_SGE_UNION,
+ Mpi2MpiSgeUnion_t, MPI2_POINTER pMpi2MpiSgeUnion_t;
+
+
+/****************************************************************************
+* MPI SGE field definition and masks
+****************************************************************************/
+
+/* Flags field bit definitions */
+
+#define MPI2_SGE_FLAGS_LAST_ELEMENT (0x80)
+#define MPI2_SGE_FLAGS_END_OF_BUFFER (0x40)
+#define MPI2_SGE_FLAGS_ELEMENT_TYPE_MASK (0x30)
+#define MPI2_SGE_FLAGS_LOCAL_ADDRESS (0x08)
+#define MPI2_SGE_FLAGS_DIRECTION (0x04)
+#define MPI2_SGE_FLAGS_ADDRESS_SIZE (0x02)
+#define MPI2_SGE_FLAGS_END_OF_LIST (0x01)
+
+#define MPI2_SGE_FLAGS_SHIFT (24)
+
+#define MPI2_SGE_LENGTH_MASK (0x00FFFFFF)
+#define MPI2_SGE_CHAIN_LENGTH_MASK (0x0000FFFF)
+
+/* Element Type */
+
+#define MPI2_SGE_FLAGS_TRANSACTION_ELEMENT (0x00)
+#define MPI2_SGE_FLAGS_SIMPLE_ELEMENT (0x10)
+#define MPI2_SGE_FLAGS_CHAIN_ELEMENT (0x30)
+#define MPI2_SGE_FLAGS_ELEMENT_MASK (0x30)
+
+/* Address location */
+
+#define MPI2_SGE_FLAGS_SYSTEM_ADDRESS (0x00)
+
+/* Direction */
+
+#define MPI2_SGE_FLAGS_IOC_TO_HOST (0x00)
+#define MPI2_SGE_FLAGS_HOST_TO_IOC (0x04)
+
+#define MPI2_SGE_FLAGS_DEST (MPI2_SGE_FLAGS_IOC_TO_HOST)
+#define MPI2_SGE_FLAGS_SOURCE (MPI2_SGE_FLAGS_HOST_TO_IOC)
+
+/* Address Size */
+
+#define MPI2_SGE_FLAGS_32_BIT_ADDRESSING (0x00)
+#define MPI2_SGE_FLAGS_64_BIT_ADDRESSING (0x02)
+
+/* Context Size */
+
+#define MPI2_SGE_FLAGS_32_BIT_CONTEXT (0x00)
+#define MPI2_SGE_FLAGS_64_BIT_CONTEXT (0x02)
+#define MPI2_SGE_FLAGS_96_BIT_CONTEXT (0x04)
+#define MPI2_SGE_FLAGS_128_BIT_CONTEXT (0x06)
+
+#define MPI2_SGE_CHAIN_OFFSET_MASK (0x00FF0000)
+#define MPI2_SGE_CHAIN_OFFSET_SHIFT (16)
+
+/****************************************************************************
+* MPI SGE operation Macros
+****************************************************************************/
+
+/* SIMPLE FlagsLength manipulations... */
+#define MPI2_SGE_SET_FLAGS(f) ((U32)(f) << MPI2_SGE_FLAGS_SHIFT)
+#define MPI2_SGE_GET_FLAGS(f) (((f) & ~MPI2_SGE_LENGTH_MASK) >> MPI2_SGE_FLAGS_SHIFT)
+#define MPI2_SGE_LENGTH(f) ((f) & MPI2_SGE_LENGTH_MASK)
+#define MPI2_SGE_CHAIN_LENGTH(f) ((f) & MPI2_SGE_CHAIN_LENGTH_MASK)
+
+#define MPI2_SGE_SET_FLAGS_LENGTH(f,l) (MPI2_SGE_SET_FLAGS(f) | MPI2_SGE_LENGTH(l))
+
+#define MPI2_pSGE_GET_FLAGS(psg) MPI2_SGE_GET_FLAGS((psg)->FlagsLength)
+#define MPI2_pSGE_GET_LENGTH(psg) MPI2_SGE_LENGTH((psg)->FlagsLength)
+#define MPI2_pSGE_SET_FLAGS_LENGTH(psg,f,l) (psg)->FlagsLength = MPI2_SGE_SET_FLAGS_LENGTH(f,l)
+
+/* CAUTION - The following are READ-MODIFY-WRITE! */
+#define MPI2_pSGE_SET_FLAGS(psg,f) (psg)->FlagsLength |= MPI2_SGE_SET_FLAGS(f)
+#define MPI2_pSGE_SET_LENGTH(psg,l) (psg)->FlagsLength |= MPI2_SGE_LENGTH(l)
+
+#define MPI2_GET_CHAIN_OFFSET(x) ((x & MPI2_SGE_CHAIN_OFFSET_MASK) >> MPI2_SGE_CHAIN_OFFSET_SHIFT)
+
+
+/*****************************************************************************
+*
+* Fusion-MPT IEEE Scatter Gather Elements
+*
+*****************************************************************************/
+
+/****************************************************************************
+* IEEE Simple Element structures
+****************************************************************************/
+
+typedef struct _MPI2_IEEE_SGE_SIMPLE32
+{
+ U32 Address;
+ U32 FlagsLength;
+} MPI2_IEEE_SGE_SIMPLE32, MPI2_POINTER PTR_MPI2_IEEE_SGE_SIMPLE32,
+ Mpi2IeeeSgeSimple32_t, MPI2_POINTER pMpi2IeeeSgeSimple32_t;
+
+typedef struct _MPI2_IEEE_SGE_SIMPLE64
+{
+ U64 Address;
+ U32 Length;
+ U16 Reserved1;
+ U8 Reserved2;
+ U8 Flags;
+} MPI2_IEEE_SGE_SIMPLE64, MPI2_POINTER PTR_MPI2_IEEE_SGE_SIMPLE64,
+ Mpi2IeeeSgeSimple64_t, MPI2_POINTER pMpi2IeeeSgeSimple64_t;
+
+typedef union _MPI2_IEEE_SGE_SIMPLE_UNION
+{
+ MPI2_IEEE_SGE_SIMPLE32 Simple32;
+ MPI2_IEEE_SGE_SIMPLE64 Simple64;
+} MPI2_IEEE_SGE_SIMPLE_UNION, MPI2_POINTER PTR_MPI2_IEEE_SGE_SIMPLE_UNION,
+ Mpi2IeeeSgeSimpleUnion_t, MPI2_POINTER pMpi2IeeeSgeSimpleUnion_t;
+
+
+/****************************************************************************
+* IEEE Chain Element structures
+****************************************************************************/
+
+typedef MPI2_IEEE_SGE_SIMPLE32 MPI2_IEEE_SGE_CHAIN32;
+
+typedef MPI2_IEEE_SGE_SIMPLE64 MPI2_IEEE_SGE_CHAIN64;
+
+typedef union _MPI2_IEEE_SGE_CHAIN_UNION
+{
+ MPI2_IEEE_SGE_CHAIN32 Chain32;
+ MPI2_IEEE_SGE_CHAIN64 Chain64;
+} MPI2_IEEE_SGE_CHAIN_UNION, MPI2_POINTER PTR_MPI2_IEEE_SGE_CHAIN_UNION,
+ Mpi2IeeeSgeChainUnion_t, MPI2_POINTER pMpi2IeeeSgeChainUnion_t;
+
+
+/****************************************************************************
+* All IEEE SGE types union
+****************************************************************************/
+
+typedef struct _MPI2_IEEE_SGE_UNION
+{
+ union
+ {
+ MPI2_IEEE_SGE_SIMPLE_UNION Simple;
+ MPI2_IEEE_SGE_CHAIN_UNION Chain;
+ } u;
+} MPI2_IEEE_SGE_UNION, MPI2_POINTER PTR_MPI2_IEEE_SGE_UNION,
+ Mpi2IeeeSgeUnion_t, MPI2_POINTER pMpi2IeeeSgeUnion_t;
+
+
+/****************************************************************************
+* IEEE SGE field definitions and masks
+****************************************************************************/
+
+/* Flags field bit definitions */
+
+#define MPI2_IEEE_SGE_FLAGS_ELEMENT_TYPE_MASK (0x80)
+
+#define MPI2_IEEE32_SGE_FLAGS_SHIFT (24)
+
+#define MPI2_IEEE32_SGE_LENGTH_MASK (0x00FFFFFF)
+
+/* Element Type */
+
+#define MPI2_IEEE_SGE_FLAGS_SIMPLE_ELEMENT (0x00)
+#define MPI2_IEEE_SGE_FLAGS_CHAIN_ELEMENT (0x80)
+
+/* Data Location Address Space */
+
+#define MPI2_IEEE_SGE_FLAGS_ADDR_MASK (0x03)
+#define MPI2_IEEE_SGE_FLAGS_SYSTEM_ADDR (0x00)
+ /* IEEE Simple Element only */
+#define MPI2_IEEE_SGE_FLAGS_IOCDDR_ADDR (0x01)
+ /* IEEE Simple Element only */
+#define MPI2_IEEE_SGE_FLAGS_IOCPLB_ADDR (0x02)
+#define MPI2_IEEE_SGE_FLAGS_IOCPLBNTA_ADDR (0x03)
+ /* IEEE Simple Element only */
+#define MPI2_IEEE_SGE_FLAGS_SYSTEMPLBPCI_ADDR (0x03)
+ /* IEEE Chain Element only */
+#define MPI2_IEEE_SGE_FLAGS_SYSTEMPLBCPI_ADDR \
+ (MPI2_IEEE_SGE_FLAGS_SYSTEMPLBPCI_ADDR) /* typo in name */
+
+/****************************************************************************
+* IEEE SGE operation Macros
+****************************************************************************/
+
+/* SIMPLE FlagsLength manipulations... */
+#define MPI2_IEEE32_SGE_SET_FLAGS(f) ((U32)(f) << MPI2_IEEE32_SGE_FLAGS_SHIFT)
+#define MPI2_IEEE32_SGE_GET_FLAGS(f) (((f) & ~MPI2_IEEE32_SGE_LENGTH_MASK) >> MPI2_IEEE32_SGE_FLAGS_SHIFT)
+#define MPI2_IEEE32_SGE_LENGTH(f) ((f) & MPI2_IEEE32_SGE_LENGTH_MASK)
+
+#define MPI2_IEEE32_SGE_SET_FLAGS_LENGTH(f, l) (MPI2_IEEE32_SGE_SET_FLAGS(f) | MPI2_IEEE32_SGE_LENGTH(l))
+
+#define MPI2_IEEE32_pSGE_GET_FLAGS(psg) MPI2_IEEE32_SGE_GET_FLAGS((psg)->FlagsLength)
+#define MPI2_IEEE32_pSGE_GET_LENGTH(psg) MPI2_IEEE32_SGE_LENGTH((psg)->FlagsLength)
+#define MPI2_IEEE32_pSGE_SET_FLAGS_LENGTH(psg,f,l) (psg)->FlagsLength = MPI2_IEEE32_SGE_SET_FLAGS_LENGTH(f,l)
+
+/* CAUTION - The following are READ-MODIFY-WRITE! */
+#define MPI2_IEEE32_pSGE_SET_FLAGS(psg,f) (psg)->FlagsLength |= MPI2_IEEE32_SGE_SET_FLAGS(f)
+#define MPI2_IEEE32_pSGE_SET_LENGTH(psg,l) (psg)->FlagsLength |= MPI2_IEEE32_SGE_LENGTH(l)
+
+
+
+
+/*****************************************************************************
+*
+* Fusion-MPT MPI/IEEE Scatter Gather Unions
+*
+*****************************************************************************/
+
+typedef union _MPI2_SIMPLE_SGE_UNION
+{
+ MPI2_SGE_SIMPLE_UNION MpiSimple;
+ MPI2_IEEE_SGE_SIMPLE_UNION IeeeSimple;
+} MPI2_SIMPLE_SGE_UNION, MPI2_POINTER PTR_MPI2_SIMPLE_SGE_UNION,
+ Mpi2SimpleSgeUntion_t, MPI2_POINTER pMpi2SimpleSgeUntion_t;
+
+
+typedef union _MPI2_SGE_IO_UNION
+{
+ MPI2_SGE_SIMPLE_UNION MpiSimple;
+ MPI2_SGE_CHAIN_UNION MpiChain;
+ MPI2_IEEE_SGE_SIMPLE_UNION IeeeSimple;
+ MPI2_IEEE_SGE_CHAIN_UNION IeeeChain;
+} MPI2_SGE_IO_UNION, MPI2_POINTER PTR_MPI2_SGE_IO_UNION,
+ Mpi2SGEIOUnion_t, MPI2_POINTER pMpi2SGEIOUnion_t;
+
+
+/****************************************************************************
+*
+* Values for SGLFlags field, used in many request messages with an SGL
+*
+****************************************************************************/
+
+/* values for MPI SGL Data Location Address Space subfield */
+#define MPI2_SGLFLAGS_ADDRESS_SPACE_MASK (0x0C)
+#define MPI2_SGLFLAGS_SYSTEM_ADDRESS_SPACE (0x00)
+#define MPI2_SGLFLAGS_IOCDDR_ADDRESS_SPACE (0x04)
+#define MPI2_SGLFLAGS_IOCPLB_ADDRESS_SPACE (0x08)
+#define MPI2_SGLFLAGS_IOCPLBNTA_ADDRESS_SPACE (0x0C)
+/* values for SGL Type subfield */
+#define MPI2_SGLFLAGS_SGL_TYPE_MASK (0x03)
+#define MPI2_SGLFLAGS_SGL_TYPE_MPI (0x00)
+#define MPI2_SGLFLAGS_SGL_TYPE_IEEE32 (0x01)
+#define MPI2_SGLFLAGS_SGL_TYPE_IEEE64 (0x02)
+
+
+#endif
+
diff --git a/drivers/scsi/mpt2sas/mpi/mpi2_cnfg.h b/drivers/scsi/mpt2sas/mpi/mpi2_cnfg.h
new file mode 100644
index 000000000..ee8d2d695
--- /dev/null
+++ b/drivers/scsi/mpt2sas/mpi/mpi2_cnfg.h
@@ -0,0 +1,3068 @@
+/*
+ * Copyright (c) 2000-2014 LSI Corporation.
+ *
+ *
+ * Name: mpi2_cnfg.h
+ * Title: MPI Configuration messages and pages
+ * Creation Date: November 10, 2006
+ *
+ * mpi2_cnfg.h Version: 02.00.29
+ *
+ * Version History
+ * ---------------
+ *
+ * Date Version Description
+ * -------- -------- ------------------------------------------------------
+ * 04-30-07 02.00.00 Corresponds to Fusion-MPT MPI Specification Rev A.
+ * 06-04-07 02.00.01 Added defines for SAS IO Unit Page 2 PhyFlags.
+ * Added Manufacturing Page 11.
+ * Added MPI2_SAS_EXPANDER0_FLAGS_CONNECTOR_END_DEVICE
+ * define.
+ * 06-26-07 02.00.02 Adding generic structure for product-specific
+ * Manufacturing pages: MPI2_CONFIG_PAGE_MANUFACTURING_PS.
+ * Rework of BIOS Page 2 configuration page.
+ * Fixed MPI2_BIOSPAGE2_BOOT_DEVICE to be a union of the
+ * forms.
+ * Added configuration pages IOC Page 8 and Driver
+ * Persistent Mapping Page 0.
+ * 08-31-07 02.00.03 Modified configuration pages dealing with Integrated
+ * RAID (Manufacturing Page 4, RAID Volume Pages 0 and 1,
+ * RAID Physical Disk Pages 0 and 1, RAID Configuration
+ * Page 0).
+ * Added new value for AccessStatus field of SAS Device
+ * Page 0 (_SATA_NEEDS_INITIALIZATION).
+ * 10-31-07 02.00.04 Added missing SEPDevHandle field to
+ * MPI2_CONFIG_PAGE_SAS_ENCLOSURE_0.
+ * 12-18-07 02.00.05 Modified IO Unit Page 0 to use 32-bit version fields for
+ * NVDATA.
+ * Modified IOC Page 7 to use masks and added field for
+ * SASBroadcastPrimitiveMasks.
+ * Added MPI2_CONFIG_PAGE_BIOS_4.
+ * Added MPI2_CONFIG_PAGE_LOG_0.
+ * 02-29-08 02.00.06 Modified various names to make them 32-character unique.
+ * Added SAS Device IDs.
+ * Updated Integrated RAID configuration pages including
+ * Manufacturing Page 4, IOC Page 6, and RAID Configuration
+ * Page 0.
+ * 05-21-08 02.00.07 Added define MPI2_MANPAGE4_MIX_SSD_SAS_SATA.
+ * Added define MPI2_MANPAGE4_PHYSDISK_128MB_COERCION.
+ * Fixed define MPI2_IOCPAGE8_FLAGS_ENCLOSURE_SLOT_MAPPING.
+ * Added missing MaxNumRoutedSasAddresses field to
+ * MPI2_CONFIG_PAGE_EXPANDER_0.
+ * Added SAS Port Page 0.
+ * Modified structure layout for
+ * MPI2_CONFIG_PAGE_DRIVER_MAPPING_0.
+ * 06-27-08 02.00.08 Changed MPI2_CONFIG_PAGE_RD_PDISK_1 to use
+ * MPI2_RAID_PHYS_DISK1_PATH_MAX to size the array.
+ * 10-02-08 02.00.09 Changed MPI2_RAID_PGAD_CONFIGNUM_MASK from 0x0000FFFF
+ * to 0x000000FF.
+ * Added two new values for the Physical Disk Coercion Size
+ * bits in the Flags field of Manufacturing Page 4.
+ * Added product-specific Manufacturing pages 16 to 31.
+ * Modified Flags bits for controlling write cache on SATA
+ * drives in IO Unit Page 1.
+ * Added new bit to AdditionalControlFlags of SAS IO Unit
+ * Page 1 to control Invalid Topology Correction.
+ * Added additional defines for RAID Volume Page 0
+ * VolumeStatusFlags field.
+ * Modified meaning of RAID Volume Page 0 VolumeSettings
+ * define for auto-configure of hot-swap drives.
+ * Added SupportedPhysDisks field to RAID Volume Page 1 and
+ * added related defines.
+ * Added PhysDiskAttributes field (and related defines) to
+ * RAID Physical Disk Page 0.
+ * Added MPI2_SAS_PHYINFO_PHY_VACANT define.
+ * Added three new DiscoveryStatus bits for SAS IO Unit
+ * Page 0 and SAS Expander Page 0.
+ * Removed multiplexing information from SAS IO Unit pages.
+ * Added BootDeviceWaitTime field to SAS IO Unit Page 4.
+ * Removed Zone Address Resolved bit from PhyInfo and from
+ * Expander Page 0 Flags field.
+ * Added two new AccessStatus values to SAS Device Page 0
+ * for indicating routing problems. Added 3 reserved words
+ * to this page.
+ * 01-19-09 02.00.10 Fixed defines for GPIOVal field of IO Unit Page 3.
+ * Inserted missing reserved field into structure for IOC
+ * Page 6.
+ * Added more pending task bits to RAID Volume Page 0
+ * VolumeStatusFlags defines.
+ * Added MPI2_PHYSDISK0_STATUS_FLAG_NOT_CERTIFIED define.
+ * Added a new DiscoveryStatus bit for SAS IO Unit Page 0
+ * and SAS Expander Page 0 to flag a downstream initiator
+ * when in simplified routing mode.
+ * Removed SATA Init Failure defines for DiscoveryStatus
+ * fields of SAS IO Unit Page 0 and SAS Expander Page 0.
+ * Added MPI2_SAS_DEVICE0_ASTATUS_DEVICE_BLOCKED define.
+ * Added PortGroups, DmaGroup, and ControlGroup fields to
+ * SAS Device Page 0.
+ * 05-06-09 02.00.11 Added structures and defines for IO Unit Page 5 and IO
+ * Unit Page 6.
+ * Added expander reduced functionality data to SAS
+ * Expander Page 0.
+ * Added SAS PHY Page 2 and SAS PHY Page 3.
+ * 07-30-09 02.00.12 Added IO Unit Page 7.
+ * Added new device ids.
+ * Added SAS IO Unit Page 5.
+ * Added partial and slumber power management capable flags
+ * to SAS Device Page 0 Flags field.
+ * Added PhyInfo defines for power condition.
+ * Added Ethernet configuration pages.
+ * 10-28-09 02.00.13 Added MPI2_IOUNITPAGE1_ENABLE_HOST_BASED_DISCOVERY.
+ * Added SAS PHY Page 4 structure and defines.
+ * 02-10-10 02.00.14 Modified the comments for the configuration page
+ * structures that contain an array of data. The host
+ * should use the "count" field in the page data (e.g. the
+ * NumPhys field) to determine the number of valid elements
+ * in the array.
+ * Added/modified some MPI2_MFGPAGE_DEVID_SAS defines.
+ * Added PowerManagementCapabilities to IO Unit Page 7.
+ * Added PortWidthModGroup field to
+ * MPI2_SAS_IO_UNIT5_PHY_PM_SETTINGS.
+ * Added MPI2_CONFIG_PAGE_SASIOUNIT_6 and related defines.
+ * Added MPI2_CONFIG_PAGE_SASIOUNIT_7 and related defines.
+ * Added MPI2_CONFIG_PAGE_SASIOUNIT_8 and related defines.
+ * 05-12-10 02.00.15 Added MPI2_RAIDVOL0_STATUS_FLAG_VOL_NOT_CONSISTENT
+ * define.
+ * Added MPI2_PHYSDISK0_INCOMPATIBLE_MEDIA_TYPE define.
+ * Added MPI2_SAS_NEG_LINK_RATE_UNSUPPORTED_PHY define.
+ * 08-11-10 02.00.16 Removed IO Unit Page 1 device path (multi-pathing)
+ * defines.
+ * 11-10-10 02.00.17 Added ReceptacleID field (replacing Reserved1) to
+ * MPI2_MANPAGE7_CONNECTOR_INFO and reworked defines for
+ * the Pinout field.
+ * Added BoardTemperature and BoardTemperatureUnits fields
+ * to MPI2_CONFIG_PAGE_IO_UNIT_7.
+ * Added MPI2_CONFIG_EXTPAGETYPE_EXT_MANUFACTURING define
+ * and MPI2_CONFIG_PAGE_EXT_MAN_PS structure.
+ * 02-23-11 02.00.18 Added ProxyVF_ID field to MPI2_CONFIG_REQUEST.
+ * Added IO Unit Page 8, IO Unit Page 9,
+ * and IO Unit Page 10.
+ * Added SASNotifyPrimitiveMasks field to
+ * MPI2_CONFIG_PAGE_IOC_7.
+ * 03-09-11 02.00.19 Fixed IO Unit Page 10 (to match the spec).
+ * 05-25-11 02.00.20 Cleaned up a few comments.
+ * 08-24-11 02.00.21 Marked the IO Unit Page 7 PowerManagementCapabilities
+ * for PCIe link as obsolete.
+ * Added SpinupFlags field containing a Disable Spin-up
+ * bit to the MPI2_SAS_IOUNIT4_SPINUP_GROUP fields of
+ * SAS IO Unit Page 4.
+ * 11-18-11 02.00.22 Added define MPI2_IOCPAGE6_CAP_FLAGS_4K_SECTORS_SUPPORT.
+ * Added UEFIVersion field to BIOS Page 1 and defined new
+ * BiosOptions bits.
+ * 11-27-12 02.00.23 Added MPI2_MANPAGE7_FLAG_EVENTREPLAY_SLOT_ORDER.
+ * Added MPI2_BIOSPAGE1_OPTIONS_MASK_OEM_ID.
+ * 12-20-12 02.00.24 Marked MPI2_SASIOUNIT1_CONTROL_CLEAR_AFFILIATION as
+ * obsolete for MPI v2.5 and later.
+ * Added some defines for 12G SAS speeds.
+ * 04-09-13 02.00.25 Added MPI2_IOUNITPAGE1_ATA_SECURITY_FREEZE_LOCK.
+ * Fixed MPI2_IOUNITPAGE5_DMA_CAP_MASK_MAX_REQUESTS to
+ * match the specification.
+ * 12-05-13 02.00.27 Added MPI2_MANPAGE7_FLAG_BASE_ENCLOSURE_LEVEL for
+ * MPI2_CONFIG_PAGE_MAN_7.
+ * Added EnclosureLevel and ConnectorName fields to
+ * MPI2_CONFIG_PAGE_SAS_DEV_0.
+ * Added MPI2_SAS_DEVICE0_FLAGS_ENCL_LEVEL_VALID for
+ * MPI2_CONFIG_PAGE_SAS_DEV_0.
+ * Added EnclosureLevel field to
+ * MPI2_CONFIG_PAGE_SAS_ENCLOSURE_0.
+ * Added MPI2_SAS_ENCLS0_FLAGS_ENCL_LEVEL_VALID for
+ * MPI2_CONFIG_PAGE_SAS_ENCLOSURE_0.
+ * 01-08-14 02.00.28 Added more defines for the BiosOptions field of
+ * MPI2_CONFIG_PAGE_BIOS_1.
+ * 06-13-14 02.00.29 Added SSUTimeout field to MPI2_CONFIG_PAGE_BIOS_1, and
+ * more defines for the BiosOptions field.
+ * --------------------------------------------------------------------------
+ */
+
+#ifndef MPI2_CNFG_H
+#define MPI2_CNFG_H
+
+/*****************************************************************************
+* Configuration Page Header and defines
+*****************************************************************************/
+
+/* Config Page Header */
+typedef struct _MPI2_CONFIG_PAGE_HEADER
+{
+ U8 PageVersion; /* 0x00 */
+ U8 PageLength; /* 0x01 */
+ U8 PageNumber; /* 0x02 */
+ U8 PageType; /* 0x03 */
+} MPI2_CONFIG_PAGE_HEADER, MPI2_POINTER PTR_MPI2_CONFIG_PAGE_HEADER,
+ Mpi2ConfigPageHeader_t, MPI2_POINTER pMpi2ConfigPageHeader_t;
+
+typedef union _MPI2_CONFIG_PAGE_HEADER_UNION
+{
+ MPI2_CONFIG_PAGE_HEADER Struct;
+ U8 Bytes[4];
+ U16 Word16[2];
+ U32 Word32;
+} MPI2_CONFIG_PAGE_HEADER_UNION, MPI2_POINTER PTR_MPI2_CONFIG_PAGE_HEADER_UNION,
+ Mpi2ConfigPageHeaderUnion, MPI2_POINTER pMpi2ConfigPageHeaderUnion;
+
+/* Extended Config Page Header */
+typedef struct _MPI2_CONFIG_EXTENDED_PAGE_HEADER
+{
+ U8 PageVersion; /* 0x00 */
+ U8 Reserved1; /* 0x01 */
+ U8 PageNumber; /* 0x02 */
+ U8 PageType; /* 0x03 */
+ U16 ExtPageLength; /* 0x04 */
+ U8 ExtPageType; /* 0x06 */
+ U8 Reserved2; /* 0x07 */
+} MPI2_CONFIG_EXTENDED_PAGE_HEADER,
+ MPI2_POINTER PTR_MPI2_CONFIG_EXTENDED_PAGE_HEADER,
+ Mpi2ConfigExtendedPageHeader_t, MPI2_POINTER pMpi2ConfigExtendedPageHeader_t;
+
+typedef union _MPI2_CONFIG_EXT_PAGE_HEADER_UNION
+{
+ MPI2_CONFIG_PAGE_HEADER Struct;
+ MPI2_CONFIG_EXTENDED_PAGE_HEADER Ext;
+ U8 Bytes[8];
+ U16 Word16[4];
+ U32 Word32[2];
+} MPI2_CONFIG_EXT_PAGE_HEADER_UNION, MPI2_POINTER PTR_MPI2_CONFIG_EXT_PAGE_HEADER_UNION,
+ Mpi2ConfigPageExtendedHeaderUnion, MPI2_POINTER pMpi2ConfigPageExtendedHeaderUnion;
+
+
+/* PageType field values */
+#define MPI2_CONFIG_PAGEATTR_READ_ONLY (0x00)
+#define MPI2_CONFIG_PAGEATTR_CHANGEABLE (0x10)
+#define MPI2_CONFIG_PAGEATTR_PERSISTENT (0x20)
+#define MPI2_CONFIG_PAGEATTR_MASK (0xF0)
+
+#define MPI2_CONFIG_PAGETYPE_IO_UNIT (0x00)
+#define MPI2_CONFIG_PAGETYPE_IOC (0x01)
+#define MPI2_CONFIG_PAGETYPE_BIOS (0x02)
+#define MPI2_CONFIG_PAGETYPE_RAID_VOLUME (0x08)
+#define MPI2_CONFIG_PAGETYPE_MANUFACTURING (0x09)
+#define MPI2_CONFIG_PAGETYPE_RAID_PHYSDISK (0x0A)
+#define MPI2_CONFIG_PAGETYPE_EXTENDED (0x0F)
+#define MPI2_CONFIG_PAGETYPE_MASK (0x0F)
+
+#define MPI2_CONFIG_TYPENUM_MASK (0x0FFF)
+
+
+/* ExtPageType field values */
+#define MPI2_CONFIG_EXTPAGETYPE_SAS_IO_UNIT (0x10)
+#define MPI2_CONFIG_EXTPAGETYPE_SAS_EXPANDER (0x11)
+#define MPI2_CONFIG_EXTPAGETYPE_SAS_DEVICE (0x12)
+#define MPI2_CONFIG_EXTPAGETYPE_SAS_PHY (0x13)
+#define MPI2_CONFIG_EXTPAGETYPE_LOG (0x14)
+#define MPI2_CONFIG_EXTPAGETYPE_ENCLOSURE (0x15)
+#define MPI2_CONFIG_EXTPAGETYPE_RAID_CONFIG (0x16)
+#define MPI2_CONFIG_EXTPAGETYPE_DRIVER_MAPPING (0x17)
+#define MPI2_CONFIG_EXTPAGETYPE_SAS_PORT (0x18)
+#define MPI2_CONFIG_EXTPAGETYPE_ETHERNET (0x19)
+#define MPI2_CONFIG_EXTPAGETYPE_EXT_MANUFACTURING (0x1A)
+
+
+/*****************************************************************************
+* PageAddress defines
+*****************************************************************************/
+
+/* RAID Volume PageAddress format */
+#define MPI2_RAID_VOLUME_PGAD_FORM_MASK (0xF0000000)
+#define MPI2_RAID_VOLUME_PGAD_FORM_GET_NEXT_HANDLE (0x00000000)
+#define MPI2_RAID_VOLUME_PGAD_FORM_HANDLE (0x10000000)
+
+#define MPI2_RAID_VOLUME_PGAD_HANDLE_MASK (0x0000FFFF)
+
+
+/* RAID Physical Disk PageAddress format */
+#define MPI2_PHYSDISK_PGAD_FORM_MASK (0xF0000000)
+#define MPI2_PHYSDISK_PGAD_FORM_GET_NEXT_PHYSDISKNUM (0x00000000)
+#define MPI2_PHYSDISK_PGAD_FORM_PHYSDISKNUM (0x10000000)
+#define MPI2_PHYSDISK_PGAD_FORM_DEVHANDLE (0x20000000)
+
+#define MPI2_PHYSDISK_PGAD_PHYSDISKNUM_MASK (0x000000FF)
+#define MPI2_PHYSDISK_PGAD_DEVHANDLE_MASK (0x0000FFFF)
+
+
+/* SAS Expander PageAddress format */
+#define MPI2_SAS_EXPAND_PGAD_FORM_MASK (0xF0000000)
+#define MPI2_SAS_EXPAND_PGAD_FORM_GET_NEXT_HNDL (0x00000000)
+#define MPI2_SAS_EXPAND_PGAD_FORM_HNDL_PHY_NUM (0x10000000)
+#define MPI2_SAS_EXPAND_PGAD_FORM_HNDL (0x20000000)
+
+#define MPI2_SAS_EXPAND_PGAD_HANDLE_MASK (0x0000FFFF)
+#define MPI2_SAS_EXPAND_PGAD_PHYNUM_MASK (0x00FF0000)
+#define MPI2_SAS_EXPAND_PGAD_PHYNUM_SHIFT (16)
+
+
+/* SAS Device PageAddress format */
+#define MPI2_SAS_DEVICE_PGAD_FORM_MASK (0xF0000000)
+#define MPI2_SAS_DEVICE_PGAD_FORM_GET_NEXT_HANDLE (0x00000000)
+#define MPI2_SAS_DEVICE_PGAD_FORM_HANDLE (0x20000000)
+
+#define MPI2_SAS_DEVICE_PGAD_HANDLE_MASK (0x0000FFFF)
+
+
+/* SAS PHY PageAddress format */
+#define MPI2_SAS_PHY_PGAD_FORM_MASK (0xF0000000)
+#define MPI2_SAS_PHY_PGAD_FORM_PHY_NUMBER (0x00000000)
+#define MPI2_SAS_PHY_PGAD_FORM_PHY_TBL_INDEX (0x10000000)
+
+#define MPI2_SAS_PHY_PGAD_PHY_NUMBER_MASK (0x000000FF)
+#define MPI2_SAS_PHY_PGAD_PHY_TBL_INDEX_MASK (0x0000FFFF)
+
+
+/* SAS Port PageAddress format */
+#define MPI2_SASPORT_PGAD_FORM_MASK (0xF0000000)
+#define MPI2_SASPORT_PGAD_FORM_GET_NEXT_PORT (0x00000000)
+#define MPI2_SASPORT_PGAD_FORM_PORT_NUM (0x10000000)
+
+#define MPI2_SASPORT_PGAD_PORTNUMBER_MASK (0x00000FFF)
+
+
+/* SAS Enclosure PageAddress format */
+#define MPI2_SAS_ENCLOS_PGAD_FORM_MASK (0xF0000000)
+#define MPI2_SAS_ENCLOS_PGAD_FORM_GET_NEXT_HANDLE (0x00000000)
+#define MPI2_SAS_ENCLOS_PGAD_FORM_HANDLE (0x10000000)
+
+#define MPI2_SAS_ENCLOS_PGAD_HANDLE_MASK (0x0000FFFF)
+
+
+/* RAID Configuration PageAddress format */
+#define MPI2_RAID_PGAD_FORM_MASK (0xF0000000)
+#define MPI2_RAID_PGAD_FORM_GET_NEXT_CONFIGNUM (0x00000000)
+#define MPI2_RAID_PGAD_FORM_CONFIGNUM (0x10000000)
+#define MPI2_RAID_PGAD_FORM_ACTIVE_CONFIG (0x20000000)
+
+#define MPI2_RAID_PGAD_CONFIGNUM_MASK (0x000000FF)
+
+
+/* Driver Persistent Mapping PageAddress format */
+#define MPI2_DPM_PGAD_FORM_MASK (0xF0000000)
+#define MPI2_DPM_PGAD_FORM_ENTRY_RANGE (0x00000000)
+
+#define MPI2_DPM_PGAD_ENTRY_COUNT_MASK (0x0FFF0000)
+#define MPI2_DPM_PGAD_ENTRY_COUNT_SHIFT (16)
+#define MPI2_DPM_PGAD_START_ENTRY_MASK (0x0000FFFF)
+
+
+/* Ethernet PageAddress format */
+#define MPI2_ETHERNET_PGAD_FORM_MASK (0xF0000000)
+#define MPI2_ETHERNET_PGAD_FORM_IF_NUM (0x00000000)
+
+#define MPI2_ETHERNET_PGAD_IF_NUMBER_MASK (0x000000FF)
+
+
+
+/****************************************************************************
+* Configuration messages
+****************************************************************************/
+
+/* Configuration Request Message */
+typedef struct _MPI2_CONFIG_REQUEST
+{
+ U8 Action; /* 0x00 */
+ U8 SGLFlags; /* 0x01 */
+ U8 ChainOffset; /* 0x02 */
+ U8 Function; /* 0x03 */
+ U16 ExtPageLength; /* 0x04 */
+ U8 ExtPageType; /* 0x06 */
+ U8 MsgFlags; /* 0x07 */
+ U8 VP_ID; /* 0x08 */
+ U8 VF_ID; /* 0x09 */
+ U16 Reserved1; /* 0x0A */
+ U8 Reserved2; /* 0x0C */
+ U8 ProxyVF_ID; /* 0x0D */
+ U16 Reserved4; /* 0x0E */
+ U32 Reserved3; /* 0x10 */
+ MPI2_CONFIG_PAGE_HEADER Header; /* 0x14 */
+ U32 PageAddress; /* 0x18 */
+ MPI2_SGE_IO_UNION PageBufferSGE; /* 0x1C */
+} MPI2_CONFIG_REQUEST, MPI2_POINTER PTR_MPI2_CONFIG_REQUEST,
+ Mpi2ConfigRequest_t, MPI2_POINTER pMpi2ConfigRequest_t;
+
+/* values for the Action field */
+#define MPI2_CONFIG_ACTION_PAGE_HEADER (0x00)
+#define MPI2_CONFIG_ACTION_PAGE_READ_CURRENT (0x01)
+#define MPI2_CONFIG_ACTION_PAGE_WRITE_CURRENT (0x02)
+#define MPI2_CONFIG_ACTION_PAGE_DEFAULT (0x03)
+#define MPI2_CONFIG_ACTION_PAGE_WRITE_NVRAM (0x04)
+#define MPI2_CONFIG_ACTION_PAGE_READ_DEFAULT (0x05)
+#define MPI2_CONFIG_ACTION_PAGE_READ_NVRAM (0x06)
+#define MPI2_CONFIG_ACTION_PAGE_GET_CHANGEABLE (0x07)
+
+/* use MPI2_SGLFLAGS_ defines from mpi2.h for the SGLFlags field */
+
+
+/* Config Reply Message */
+typedef struct _MPI2_CONFIG_REPLY
+{
+ U8 Action; /* 0x00 */
+ U8 SGLFlags; /* 0x01 */
+ U8 MsgLength; /* 0x02 */
+ U8 Function; /* 0x03 */
+ U16 ExtPageLength; /* 0x04 */
+ U8 ExtPageType; /* 0x06 */
+ U8 MsgFlags; /* 0x07 */
+ U8 VP_ID; /* 0x08 */
+ U8 VF_ID; /* 0x09 */
+ U16 Reserved1; /* 0x0A */
+ U16 Reserved2; /* 0x0C */
+ U16 IOCStatus; /* 0x0E */
+ U32 IOCLogInfo; /* 0x10 */
+ MPI2_CONFIG_PAGE_HEADER Header; /* 0x14 */
+} MPI2_CONFIG_REPLY, MPI2_POINTER PTR_MPI2_CONFIG_REPLY,
+ Mpi2ConfigReply_t, MPI2_POINTER pMpi2ConfigReply_t;
+
+
+
+/*****************************************************************************
+*
+* C o n f i g u r a t i o n P a g e s
+*
+*****************************************************************************/
+
+/****************************************************************************
+* Manufacturing Config pages
+****************************************************************************/
+
+#define MPI2_MFGPAGE_VENDORID_LSI (0x1000)
+
+/* SAS */
+#define MPI2_MFGPAGE_DEVID_SAS2004 (0x0070)
+#define MPI2_MFGPAGE_DEVID_SAS2008 (0x0072)
+#define MPI2_MFGPAGE_DEVID_SAS2108_1 (0x0074)
+#define MPI2_MFGPAGE_DEVID_SAS2108_2 (0x0076)
+#define MPI2_MFGPAGE_DEVID_SAS2108_3 (0x0077)
+#define MPI2_MFGPAGE_DEVID_SAS2116_1 (0x0064)
+#define MPI2_MFGPAGE_DEVID_SAS2116_2 (0x0065)
+
+#define MPI2_MFGPAGE_DEVID_SSS6200 (0x007E)
+
+#define MPI2_MFGPAGE_DEVID_SAS2208_1 (0x0080)
+#define MPI2_MFGPAGE_DEVID_SAS2208_2 (0x0081)
+#define MPI2_MFGPAGE_DEVID_SAS2208_3 (0x0082)
+#define MPI2_MFGPAGE_DEVID_SAS2208_4 (0x0083)
+#define MPI2_MFGPAGE_DEVID_SAS2208_5 (0x0084)
+#define MPI2_MFGPAGE_DEVID_SAS2208_6 (0x0085)
+#define MPI2_MFGPAGE_DEVID_SAS2308_1 (0x0086)
+#define MPI2_MFGPAGE_DEVID_SAS2308_2 (0x0087)
+#define MPI2_MFGPAGE_DEVID_SAS2308_3 (0x006E)
+
+
+
+
+/* Manufacturing Page 0 */
+
+typedef struct _MPI2_CONFIG_PAGE_MAN_0
+{
+ MPI2_CONFIG_PAGE_HEADER Header; /* 0x00 */
+ U8 ChipName[16]; /* 0x04 */
+ U8 ChipRevision[8]; /* 0x14 */
+ U8 BoardName[16]; /* 0x1C */
+ U8 BoardAssembly[16]; /* 0x2C */
+ U8 BoardTracerNumber[16]; /* 0x3C */
+} MPI2_CONFIG_PAGE_MAN_0,
+ MPI2_POINTER PTR_MPI2_CONFIG_PAGE_MAN_0,
+ Mpi2ManufacturingPage0_t, MPI2_POINTER pMpi2ManufacturingPage0_t;
+
+#define MPI2_MANUFACTURING0_PAGEVERSION (0x00)
+
+
+/* Manufacturing Page 1 */
+
+typedef struct _MPI2_CONFIG_PAGE_MAN_1
+{
+ MPI2_CONFIG_PAGE_HEADER Header; /* 0x00 */
+ U8 VPD[256]; /* 0x04 */
+} MPI2_CONFIG_PAGE_MAN_1,
+ MPI2_POINTER PTR_MPI2_CONFIG_PAGE_MAN_1,
+ Mpi2ManufacturingPage1_t, MPI2_POINTER pMpi2ManufacturingPage1_t;
+
+#define MPI2_MANUFACTURING1_PAGEVERSION (0x00)
+
+
+typedef struct _MPI2_CHIP_REVISION_ID
+{
+ U16 DeviceID; /* 0x00 */
+ U8 PCIRevisionID; /* 0x02 */
+ U8 Reserved; /* 0x03 */
+} MPI2_CHIP_REVISION_ID, MPI2_POINTER PTR_MPI2_CHIP_REVISION_ID,
+ Mpi2ChipRevisionId_t, MPI2_POINTER pMpi2ChipRevisionId_t;
+
+
+/* Manufacturing Page 2 */
+
+/*
+ * Host code (drivers, BIOS, utilities, etc.) should leave this define set to
+ * one and check Header.PageLength at runtime.
+ */
+#ifndef MPI2_MAN_PAGE_2_HW_SETTINGS_WORDS
+#define MPI2_MAN_PAGE_2_HW_SETTINGS_WORDS (1)
+#endif
+
+typedef struct _MPI2_CONFIG_PAGE_MAN_2
+{
+ MPI2_CONFIG_PAGE_HEADER Header; /* 0x00 */
+ MPI2_CHIP_REVISION_ID ChipId; /* 0x04 */
+ U32 HwSettings[MPI2_MAN_PAGE_2_HW_SETTINGS_WORDS];/* 0x08 */
+} MPI2_CONFIG_PAGE_MAN_2,
+ MPI2_POINTER PTR_MPI2_CONFIG_PAGE_MAN_2,
+ Mpi2ManufacturingPage2_t, MPI2_POINTER pMpi2ManufacturingPage2_t;
+
+#define MPI2_MANUFACTURING2_PAGEVERSION (0x00)
+
+
+/* Manufacturing Page 3 */
+
+/*
+ * Host code (drivers, BIOS, utilities, etc.) should leave this define set to
+ * one and check Header.PageLength at runtime.
+ */
+#ifndef MPI2_MAN_PAGE_3_INFO_WORDS
+#define MPI2_MAN_PAGE_3_INFO_WORDS (1)
+#endif
+
+typedef struct _MPI2_CONFIG_PAGE_MAN_3
+{
+ MPI2_CONFIG_PAGE_HEADER Header; /* 0x00 */
+ MPI2_CHIP_REVISION_ID ChipId; /* 0x04 */
+ U32 Info[MPI2_MAN_PAGE_3_INFO_WORDS];/* 0x08 */
+} MPI2_CONFIG_PAGE_MAN_3,
+ MPI2_POINTER PTR_MPI2_CONFIG_PAGE_MAN_3,
+ Mpi2ManufacturingPage3_t, MPI2_POINTER pMpi2ManufacturingPage3_t;
+
+#define MPI2_MANUFACTURING3_PAGEVERSION (0x00)
+
+
+/* Manufacturing Page 4 */
+
+typedef struct _MPI2_MANPAGE4_PWR_SAVE_SETTINGS
+{
+ U8 PowerSaveFlags; /* 0x00 */
+ U8 InternalOperationsSleepTime; /* 0x01 */
+ U8 InternalOperationsRunTime; /* 0x02 */
+ U8 HostIdleTime; /* 0x03 */
+} MPI2_MANPAGE4_PWR_SAVE_SETTINGS,
+ MPI2_POINTER PTR_MPI2_MANPAGE4_PWR_SAVE_SETTINGS,
+ Mpi2ManPage4PwrSaveSettings_t, MPI2_POINTER pMpi2ManPage4PwrSaveSettings_t;
+
+/* defines for the PowerSaveFlags field */
+#define MPI2_MANPAGE4_MASK_POWERSAVE_MODE (0x03)
+#define MPI2_MANPAGE4_POWERSAVE_MODE_DISABLED (0x00)
+#define MPI2_MANPAGE4_CUSTOM_POWERSAVE_MODE (0x01)
+#define MPI2_MANPAGE4_FULL_POWERSAVE_MODE (0x02)
+
+typedef struct _MPI2_CONFIG_PAGE_MAN_4
+{
+ MPI2_CONFIG_PAGE_HEADER Header; /* 0x00 */
+ U32 Reserved1; /* 0x04 */
+ U32 Flags; /* 0x08 */
+ U8 InquirySize; /* 0x0C */
+ U8 Reserved2; /* 0x0D */
+ U16 Reserved3; /* 0x0E */
+ U8 InquiryData[56]; /* 0x10 */
+ U32 RAID0VolumeSettings; /* 0x48 */
+ U32 RAID1EVolumeSettings; /* 0x4C */
+ U32 RAID1VolumeSettings; /* 0x50 */
+ U32 RAID10VolumeSettings; /* 0x54 */
+ U32 Reserved4; /* 0x58 */
+ U32 Reserved5; /* 0x5C */
+ MPI2_MANPAGE4_PWR_SAVE_SETTINGS PowerSaveSettings; /* 0x60 */
+ U8 MaxOCEDisks; /* 0x64 */
+ U8 ResyncRate; /* 0x65 */
+ U16 DataScrubDuration; /* 0x66 */
+ U8 MaxHotSpares; /* 0x68 */
+ U8 MaxPhysDisksPerVol; /* 0x69 */
+ U8 MaxPhysDisks; /* 0x6A */
+ U8 MaxVolumes; /* 0x6B */
+} MPI2_CONFIG_PAGE_MAN_4,
+ MPI2_POINTER PTR_MPI2_CONFIG_PAGE_MAN_4,
+ Mpi2ManufacturingPage4_t, MPI2_POINTER pMpi2ManufacturingPage4_t;
+
+#define MPI2_MANUFACTURING4_PAGEVERSION (0x0A)
+
+/* Manufacturing Page 4 Flags field */
+#define MPI2_MANPAGE4_METADATA_SIZE_MASK (0x00030000)
+#define MPI2_MANPAGE4_METADATA_512MB (0x00000000)
+
+#define MPI2_MANPAGE4_MIX_SSD_SAS_SATA (0x00008000)
+#define MPI2_MANPAGE4_MIX_SSD_AND_NON_SSD (0x00004000)
+#define MPI2_MANPAGE4_HIDE_PHYSDISK_NON_IR (0x00002000)
+
+#define MPI2_MANPAGE4_MASK_PHYSDISK_COERCION (0x00001C00)
+#define MPI2_MANPAGE4_PHYSDISK_COERCION_1GB (0x00000000)
+#define MPI2_MANPAGE4_PHYSDISK_128MB_COERCION (0x00000400)
+#define MPI2_MANPAGE4_PHYSDISK_ADAPTIVE_COERCION (0x00000800)
+#define MPI2_MANPAGE4_PHYSDISK_ZERO_COERCION (0x00000C00)
+
+#define MPI2_MANPAGE4_MASK_BAD_BLOCK_MARKING (0x00000300)
+#define MPI2_MANPAGE4_DEFAULT_BAD_BLOCK_MARKING (0x00000000)
+#define MPI2_MANPAGE4_TABLE_BAD_BLOCK_MARKING (0x00000100)
+#define MPI2_MANPAGE4_WRITE_LONG_BAD_BLOCK_MARKING (0x00000200)
+
+#define MPI2_MANPAGE4_FORCE_OFFLINE_FAILOVER (0x00000080)
+#define MPI2_MANPAGE4_RAID10_DISABLE (0x00000040)
+#define MPI2_MANPAGE4_RAID1E_DISABLE (0x00000020)
+#define MPI2_MANPAGE4_RAID1_DISABLE (0x00000010)
+#define MPI2_MANPAGE4_RAID0_DISABLE (0x00000008)
+#define MPI2_MANPAGE4_IR_MODEPAGE8_DISABLE (0x00000004)
+#define MPI2_MANPAGE4_IM_RESYNC_CACHE_ENABLE (0x00000002)
+#define MPI2_MANPAGE4_IR_NO_MIX_SAS_SATA (0x00000001)
+
+
+/* Manufacturing Page 5 */
+
+/*
+ * Host code (drivers, BIOS, utilities, etc.) should leave this define set to
+ * one and check the value returned for NumPhys at runtime.
+ */
+#ifndef MPI2_MAN_PAGE_5_PHY_ENTRIES
+#define MPI2_MAN_PAGE_5_PHY_ENTRIES (1)
+#endif
+
+typedef struct _MPI2_MANUFACTURING5_ENTRY
+{
+ U64 WWID; /* 0x00 */
+ U64 DeviceName; /* 0x08 */
+} MPI2_MANUFACTURING5_ENTRY, MPI2_POINTER PTR_MPI2_MANUFACTURING5_ENTRY,
+ Mpi2Manufacturing5Entry_t, MPI2_POINTER pMpi2Manufacturing5Entry_t;
+
+typedef struct _MPI2_CONFIG_PAGE_MAN_5
+{
+ MPI2_CONFIG_PAGE_HEADER Header; /* 0x00 */
+ U8 NumPhys; /* 0x04 */
+ U8 Reserved1; /* 0x05 */
+ U16 Reserved2; /* 0x06 */
+ U32 Reserved3; /* 0x08 */
+ U32 Reserved4; /* 0x0C */
+ MPI2_MANUFACTURING5_ENTRY Phy[MPI2_MAN_PAGE_5_PHY_ENTRIES];/* 0x08 */
+} MPI2_CONFIG_PAGE_MAN_5,
+ MPI2_POINTER PTR_MPI2_CONFIG_PAGE_MAN_5,
+ Mpi2ManufacturingPage5_t, MPI2_POINTER pMpi2ManufacturingPage5_t;
+
+#define MPI2_MANUFACTURING5_PAGEVERSION (0x03)
+
+
+/* Manufacturing Page 6 */
+
+typedef struct _MPI2_CONFIG_PAGE_MAN_6
+{
+ MPI2_CONFIG_PAGE_HEADER Header; /* 0x00 */
+ U32 ProductSpecificInfo;/* 0x04 */
+} MPI2_CONFIG_PAGE_MAN_6,
+ MPI2_POINTER PTR_MPI2_CONFIG_PAGE_MAN_6,
+ Mpi2ManufacturingPage6_t, MPI2_POINTER pMpi2ManufacturingPage6_t;
+
+#define MPI2_MANUFACTURING6_PAGEVERSION (0x00)
+
+
+/* Manufacturing Page 7 */
+
+typedef struct _MPI2_MANPAGE7_CONNECTOR_INFO
+{
+ U32 Pinout; /* 0x00 */
+ U8 Connector[16]; /* 0x04 */
+ U8 Location; /* 0x14 */
+ U8 ReceptacleID; /* 0x15 */
+ U16 Slot; /* 0x16 */
+ U32 Reserved2; /* 0x18 */
+} MPI2_MANPAGE7_CONNECTOR_INFO, MPI2_POINTER PTR_MPI2_MANPAGE7_CONNECTOR_INFO,
+ Mpi2ManPage7ConnectorInfo_t, MPI2_POINTER pMpi2ManPage7ConnectorInfo_t;
+
+/* defines for the Pinout field */
+#define MPI2_MANPAGE7_PINOUT_LANE_MASK (0x0000FF00)
+#define MPI2_MANPAGE7_PINOUT_LANE_SHIFT (8)
+
+#define MPI2_MANPAGE7_PINOUT_TYPE_MASK (0x000000FF)
+#define MPI2_MANPAGE7_PINOUT_TYPE_UNKNOWN (0x00)
+#define MPI2_MANPAGE7_PINOUT_SATA_SINGLE (0x01)
+#define MPI2_MANPAGE7_PINOUT_SFF_8482 (0x02)
+#define MPI2_MANPAGE7_PINOUT_SFF_8486 (0x03)
+#define MPI2_MANPAGE7_PINOUT_SFF_8484 (0x04)
+#define MPI2_MANPAGE7_PINOUT_SFF_8087 (0x05)
+#define MPI2_MANPAGE7_PINOUT_SFF_8643_4I (0x06)
+#define MPI2_MANPAGE7_PINOUT_SFF_8643_8I (0x07)
+#define MPI2_MANPAGE7_PINOUT_SFF_8470 (0x08)
+#define MPI2_MANPAGE7_PINOUT_SFF_8088 (0x09)
+#define MPI2_MANPAGE7_PINOUT_SFF_8644_4X (0x0A)
+#define MPI2_MANPAGE7_PINOUT_SFF_8644_8X (0x0B)
+#define MPI2_MANPAGE7_PINOUT_SFF_8644_16X (0x0C)
+#define MPI2_MANPAGE7_PINOUT_SFF_8436 (0x0D)
+
+/* defines for the Location field */
+#define MPI2_MANPAGE7_LOCATION_UNKNOWN (0x01)
+#define MPI2_MANPAGE7_LOCATION_INTERNAL (0x02)
+#define MPI2_MANPAGE7_LOCATION_EXTERNAL (0x04)
+#define MPI2_MANPAGE7_LOCATION_SWITCHABLE (0x08)
+#define MPI2_MANPAGE7_LOCATION_AUTO (0x10)
+#define MPI2_MANPAGE7_LOCATION_NOT_PRESENT (0x20)
+#define MPI2_MANPAGE7_LOCATION_NOT_CONNECTED (0x80)
+
+/*
+ * Host code (drivers, BIOS, utilities, etc.) should leave this define set to
+ * one and check the value returned for NumPhys at runtime.
+ */
+#ifndef MPI2_MANPAGE7_CONNECTOR_INFO_MAX
+#define MPI2_MANPAGE7_CONNECTOR_INFO_MAX (1)
+#endif
+
+typedef struct _MPI2_CONFIG_PAGE_MAN_7
+{
+ MPI2_CONFIG_PAGE_HEADER Header; /* 0x00 */
+ U32 Reserved1; /* 0x04 */
+ U32 Reserved2; /* 0x08 */
+ U32 Flags; /* 0x0C */
+ U8 EnclosureName[16]; /* 0x10 */
+ U8 NumPhys; /* 0x20 */
+ U8 Reserved3; /* 0x21 */
+ U16 Reserved4; /* 0x22 */
+ MPI2_MANPAGE7_CONNECTOR_INFO ConnectorInfo[MPI2_MANPAGE7_CONNECTOR_INFO_MAX]; /* 0x24 */
+} MPI2_CONFIG_PAGE_MAN_7,
+ MPI2_POINTER PTR_MPI2_CONFIG_PAGE_MAN_7,
+ Mpi2ManufacturingPage7_t, MPI2_POINTER pMpi2ManufacturingPage7_t;
+
+#define MPI2_MANUFACTURING7_PAGEVERSION (0x01)
+
+/* defines for the Flags field */
+#define MPI2_MANPAGE7_FLAG_BASE_ENCLOSURE_LEVEL (0x00000008)
+#define MPI2_MANPAGE7_FLAG_EVENTREPLAY_SLOT_ORDER (0x00000002)
+#define MPI2_MANPAGE7_FLAG_USE_SLOT_INFO (0x00000001)
+
+
+/*
+ * Generic structure to use for product-specific manufacturing pages
+ * (currently Manufacturing Page 8 through Manufacturing Page 31).
+ */
+
+typedef struct _MPI2_CONFIG_PAGE_MAN_PS
+{
+ MPI2_CONFIG_PAGE_HEADER Header; /* 0x00 */
+ U32 ProductSpecificInfo;/* 0x04 */
+} MPI2_CONFIG_PAGE_MAN_PS,
+ MPI2_POINTER PTR_MPI2_CONFIG_PAGE_MAN_PS,
+ Mpi2ManufacturingPagePS_t, MPI2_POINTER pMpi2ManufacturingPagePS_t;
+
+#define MPI2_MANUFACTURING8_PAGEVERSION (0x00)
+#define MPI2_MANUFACTURING9_PAGEVERSION (0x00)
+#define MPI2_MANUFACTURING10_PAGEVERSION (0x00)
+#define MPI2_MANUFACTURING11_PAGEVERSION (0x00)
+#define MPI2_MANUFACTURING12_PAGEVERSION (0x00)
+#define MPI2_MANUFACTURING13_PAGEVERSION (0x00)
+#define MPI2_MANUFACTURING14_PAGEVERSION (0x00)
+#define MPI2_MANUFACTURING15_PAGEVERSION (0x00)
+#define MPI2_MANUFACTURING16_PAGEVERSION (0x00)
+#define MPI2_MANUFACTURING17_PAGEVERSION (0x00)
+#define MPI2_MANUFACTURING18_PAGEVERSION (0x00)
+#define MPI2_MANUFACTURING19_PAGEVERSION (0x00)
+#define MPI2_MANUFACTURING20_PAGEVERSION (0x00)
+#define MPI2_MANUFACTURING21_PAGEVERSION (0x00)
+#define MPI2_MANUFACTURING22_PAGEVERSION (0x00)
+#define MPI2_MANUFACTURING23_PAGEVERSION (0x00)
+#define MPI2_MANUFACTURING24_PAGEVERSION (0x00)
+#define MPI2_MANUFACTURING25_PAGEVERSION (0x00)
+#define MPI2_MANUFACTURING26_PAGEVERSION (0x00)
+#define MPI2_MANUFACTURING27_PAGEVERSION (0x00)
+#define MPI2_MANUFACTURING28_PAGEVERSION (0x00)
+#define MPI2_MANUFACTURING29_PAGEVERSION (0x00)
+#define MPI2_MANUFACTURING30_PAGEVERSION (0x00)
+#define MPI2_MANUFACTURING31_PAGEVERSION (0x00)
+
+
+/****************************************************************************
+* IO Unit Config Pages
+****************************************************************************/
+
+/* IO Unit Page 0 */
+
+typedef struct _MPI2_CONFIG_PAGE_IO_UNIT_0
+{
+ MPI2_CONFIG_PAGE_HEADER Header; /* 0x00 */
+ U64 UniqueValue; /* 0x04 */
+ MPI2_VERSION_UNION NvdataVersionDefault; /* 0x08 */
+ MPI2_VERSION_UNION NvdataVersionPersistent; /* 0x0A */
+} MPI2_CONFIG_PAGE_IO_UNIT_0, MPI2_POINTER PTR_MPI2_CONFIG_PAGE_IO_UNIT_0,
+ Mpi2IOUnitPage0_t, MPI2_POINTER pMpi2IOUnitPage0_t;
+
+#define MPI2_IOUNITPAGE0_PAGEVERSION (0x02)
+
+
+/* IO Unit Page 1 */
+
+typedef struct _MPI2_CONFIG_PAGE_IO_UNIT_1
+{
+ MPI2_CONFIG_PAGE_HEADER Header; /* 0x00 */
+ U32 Flags; /* 0x04 */
+} MPI2_CONFIG_PAGE_IO_UNIT_1, MPI2_POINTER PTR_MPI2_CONFIG_PAGE_IO_UNIT_1,
+ Mpi2IOUnitPage1_t, MPI2_POINTER pMpi2IOUnitPage1_t;
+
+#define MPI2_IOUNITPAGE1_PAGEVERSION (0x04)
+
+/* IO Unit Page 1 Flags defines */
+#define MPI2_IOUNITPAGE1_ATA_SECURITY_FREEZE_LOCK (0x00004000)
+#define MPI2_IOUNITPAGE1_ENABLE_HOST_BASED_DISCOVERY (0x00000800)
+#define MPI2_IOUNITPAGE1_MASK_SATA_WRITE_CACHE (0x00000600)
+#define MPI2_IOUNITPAGE1_SATA_WRITE_CACHE_SHIFT (9)
+#define MPI2_IOUNITPAGE1_ENABLE_SATA_WRITE_CACHE (0x00000000)
+#define MPI2_IOUNITPAGE1_DISABLE_SATA_WRITE_CACHE (0x00000200)
+#define MPI2_IOUNITPAGE1_UNCHANGED_SATA_WRITE_CACHE (0x00000400)
+#define MPI2_IOUNITPAGE1_NATIVE_COMMAND_Q_DISABLE (0x00000100)
+#define MPI2_IOUNITPAGE1_DISABLE_IR (0x00000040)
+#define MPI2_IOUNITPAGE1_DISABLE_TASK_SET_FULL_HANDLING (0x00000020)
+#define MPI2_IOUNITPAGE1_IR_USE_STATIC_VOLUME_ID (0x00000004)
+
+
+/* IO Unit Page 3 */
+
+/*
+ * Host code (drivers, BIOS, utilities, etc.) should leave this define set to
+ * one and check the value returned for GPIOCount at runtime.
+ */
+#ifndef MPI2_IO_UNIT_PAGE_3_GPIO_VAL_MAX
+#define MPI2_IO_UNIT_PAGE_3_GPIO_VAL_MAX (1)
+#endif
+
+typedef struct _MPI2_CONFIG_PAGE_IO_UNIT_3
+{
+ MPI2_CONFIG_PAGE_HEADER Header; /* 0x00 */
+ U8 GPIOCount; /* 0x04 */
+ U8 Reserved1; /* 0x05 */
+ U16 Reserved2; /* 0x06 */
+ U16 GPIOVal[MPI2_IO_UNIT_PAGE_3_GPIO_VAL_MAX];/* 0x08 */
+} MPI2_CONFIG_PAGE_IO_UNIT_3, MPI2_POINTER PTR_MPI2_CONFIG_PAGE_IO_UNIT_3,
+ Mpi2IOUnitPage3_t, MPI2_POINTER pMpi2IOUnitPage3_t;
+
+#define MPI2_IOUNITPAGE3_PAGEVERSION (0x01)
+
+/* defines for IO Unit Page 3 GPIOVal field */
+#define MPI2_IOUNITPAGE3_GPIO_FUNCTION_MASK (0xFFFC)
+#define MPI2_IOUNITPAGE3_GPIO_FUNCTION_SHIFT (2)
+#define MPI2_IOUNITPAGE3_GPIO_SETTING_OFF (0x0000)
+#define MPI2_IOUNITPAGE3_GPIO_SETTING_ON (0x0001)
+
+
+/* IO Unit Page 5 */
+
+/*
+ * Upper layer code (drivers, utilities, etc.) should leave this define set to
+ * one and check the value returned for NumDmaEngines at runtime.
+ */
+#ifndef MPI2_IOUNITPAGE5_DMAENGINE_ENTRIES
+#define MPI2_IOUNITPAGE5_DMAENGINE_ENTRIES (1)
+#endif
+
+typedef struct _MPI2_CONFIG_PAGE_IO_UNIT_5 {
+ MPI2_CONFIG_PAGE_HEADER Header; /* 0x00 */
+ U64 RaidAcceleratorBufferBaseAddress; /* 0x04 */
+ U64 RaidAcceleratorBufferSize; /* 0x0C */
+ U64 RaidAcceleratorControlBaseAddress; /* 0x14 */
+ U8 RAControlSize; /* 0x1C */
+ U8 NumDmaEngines; /* 0x1D */
+ U8 RAMinControlSize; /* 0x1E */
+ U8 RAMaxControlSize; /* 0x1F */
+ U32 Reserved1; /* 0x20 */
+ U32 Reserved2; /* 0x24 */
+ U32 Reserved3; /* 0x28 */
+ U32 DmaEngineCapabilities
+ [MPI2_IOUNITPAGE5_DMAENGINE_ENTRIES]; /* 0x2C */
+} MPI2_CONFIG_PAGE_IO_UNIT_5, MPI2_POINTER PTR_MPI2_CONFIG_PAGE_IO_UNIT_5,
+ Mpi2IOUnitPage5_t, MPI2_POINTER pMpi2IOUnitPage5_t;
+
+#define MPI2_IOUNITPAGE5_PAGEVERSION (0x00)
+
+/* defines for IO Unit Page 5 DmaEngineCapabilities field */
+#define MPI2_IOUNITPAGE5_DMA_CAP_MASK_MAX_REQUESTS (0xFFFF0000)
+#define MPI2_IOUNITPAGE5_DMA_CAP_SHIFT_MAX_REQUESTS (16)
+
+#define MPI2_IOUNITPAGE5_DMA_CAP_EEDP (0x0008)
+#define MPI2_IOUNITPAGE5_DMA_CAP_PARITY_GENERATION (0x0004)
+#define MPI2_IOUNITPAGE5_DMA_CAP_HASHING (0x0002)
+#define MPI2_IOUNITPAGE5_DMA_CAP_ENCRYPTION (0x0001)
+
+
+/* IO Unit Page 6 */
+
+typedef struct _MPI2_CONFIG_PAGE_IO_UNIT_6 {
+ MPI2_CONFIG_PAGE_HEADER Header; /* 0x00 */
+ U16 Flags; /* 0x04 */
+ U8 RAHostControlSize; /* 0x06 */
+ U8 Reserved0; /* 0x07 */
+ U64 RaidAcceleratorHostControlBaseAddress; /* 0x08 */
+ U32 Reserved1; /* 0x10 */
+ U32 Reserved2; /* 0x14 */
+ U32 Reserved3; /* 0x18 */
+} MPI2_CONFIG_PAGE_IO_UNIT_6, MPI2_POINTER PTR_MPI2_CONFIG_PAGE_IO_UNIT_6,
+ Mpi2IOUnitPage6_t, MPI2_POINTER pMpi2IOUnitPage6_t;
+
+#define MPI2_IOUNITPAGE6_PAGEVERSION (0x00)
+
+/* defines for IO Unit Page 6 Flags field */
+#define MPI2_IOUNITPAGE6_FLAGS_ENABLE_RAID_ACCELERATOR (0x0001)
+
+
+/* IO Unit Page 7 */
+
+typedef struct _MPI2_CONFIG_PAGE_IO_UNIT_7 {
+ MPI2_CONFIG_PAGE_HEADER Header; /* 0x00 */
+ U16 Reserved1; /* 0x04 */
+ U8 PCIeWidth; /* 0x06 */
+ U8 PCIeSpeed; /* 0x07 */
+ U32 ProcessorState; /* 0x08 */
+ U32 PowerManagementCapabilities; /* 0x0C */
+ U16 IOCTemperature; /* 0x10 */
+ U8 IOCTemperatureUnits; /* 0x12 */
+ U8 IOCSpeed; /* 0x13 */
+ U16 BoardTemperature; /* 0x14 */
+ U8 BoardTemperatureUnits; /* 0x16 */
+ U8 Reserved3; /* 0x17 */
+ U32 Reserved4; /* 0x18 */
+ U32 Reserved5; /* 0x1C */
+ U32 Reserved6; /* 0x20 */
+ U32 Reserved7; /* 0x24 */
+} MPI2_CONFIG_PAGE_IO_UNIT_7, MPI2_POINTER PTR_MPI2_CONFIG_PAGE_IO_UNIT_7,
+ Mpi2IOUnitPage7_t, MPI2_POINTER pMpi2IOUnitPage7_t;
+
+#define MPI2_IOUNITPAGE7_PAGEVERSION (0x04)
+
+/* defines for IO Unit Page 7 PCIeWidth field */
+#define MPI2_IOUNITPAGE7_PCIE_WIDTH_X1 (0x01)
+#define MPI2_IOUNITPAGE7_PCIE_WIDTH_X2 (0x02)
+#define MPI2_IOUNITPAGE7_PCIE_WIDTH_X4 (0x04)
+#define MPI2_IOUNITPAGE7_PCIE_WIDTH_X8 (0x08)
+
+/* defines for IO Unit Page 7 PCIeSpeed field */
+#define MPI2_IOUNITPAGE7_PCIE_SPEED_2_5_GBPS (0x00)
+#define MPI2_IOUNITPAGE7_PCIE_SPEED_5_0_GBPS (0x01)
+#define MPI2_IOUNITPAGE7_PCIE_SPEED_8_0_GBPS (0x02)
+
+/* defines for IO Unit Page 7 ProcessorState field */
+#define MPI2_IOUNITPAGE7_PSTATE_MASK_SECOND (0x0000000F)
+#define MPI2_IOUNITPAGE7_PSTATE_SHIFT_SECOND (0)
+
+#define MPI2_IOUNITPAGE7_PSTATE_NOT_PRESENT (0x00)
+#define MPI2_IOUNITPAGE7_PSTATE_DISABLED (0x01)
+#define MPI2_IOUNITPAGE7_PSTATE_ENABLED (0x02)
+
+/* defines for IO Unit Page 7 PowerManagementCapabilities field */
+#define MPI2_IOUNITPAGE7_PMCAP_12_5_PCT_IOCSPEED (0x00000400)
+#define MPI2_IOUNITPAGE7_PMCAP_25_0_PCT_IOCSPEED (0x00000200)
+#define MPI2_IOUNITPAGE7_PMCAP_50_0_PCT_IOCSPEED (0x00000100)
+#define MPI2_IOUNITPAGE7_PMCAP_PCIE_WIDTH_CHANGE (0x00000008) /* obsolete */
+#define MPI2_IOUNITPAGE7_PMCAP_PCIE_SPEED_CHANGE (0x00000004) /* obsolete */
+
+/* defines for IO Unit Page 7 IOCTemperatureUnits field */
+#define MPI2_IOUNITPAGE7_IOC_TEMP_NOT_PRESENT (0x00)
+#define MPI2_IOUNITPAGE7_IOC_TEMP_FAHRENHEIT (0x01)
+#define MPI2_IOUNITPAGE7_IOC_TEMP_CELSIUS (0x02)
+
+/* defines for IO Unit Page 7 IOCSpeed field */
+#define MPI2_IOUNITPAGE7_IOC_SPEED_FULL (0x01)
+#define MPI2_IOUNITPAGE7_IOC_SPEED_HALF (0x02)
+#define MPI2_IOUNITPAGE7_IOC_SPEED_QUARTER (0x04)
+#define MPI2_IOUNITPAGE7_IOC_SPEED_EIGHTH (0x08)
+
+/* defines for IO Unit Page 7 BoardTemperatureUnits field */
+#define MPI2_IOUNITPAGE7_BOARD_TEMP_NOT_PRESENT (0x00)
+#define MPI2_IOUNITPAGE7_BOARD_TEMP_FAHRENHEIT (0x01)
+#define MPI2_IOUNITPAGE7_BOARD_TEMP_CELSIUS (0x02)
+
+/* IO Unit Page 8 */
+
+#define MPI2_IOUNIT8_NUM_THRESHOLDS (4)
+
+typedef struct _MPI2_IOUNIT8_SENSOR {
+ U16 Flags; /* 0x00 */
+ U16 Reserved1; /* 0x02 */
+ U16
+ Threshold[MPI2_IOUNIT8_NUM_THRESHOLDS]; /* 0x04 */
+ U32 Reserved2; /* 0x0C */
+ U32 Reserved3; /* 0x10 */
+ U32 Reserved4; /* 0x14 */
+} MPI2_IOUNIT8_SENSOR, MPI2_POINTER PTR_MPI2_IOUNIT8_SENSOR,
+Mpi2IOUnit8Sensor_t, MPI2_POINTER pMpi2IOUnit8Sensor_t;
+
+/* defines for IO Unit Page 8 Sensor Flags field */
+#define MPI2_IOUNIT8_SENSOR_FLAGS_T3_ENABLE (0x0008)
+#define MPI2_IOUNIT8_SENSOR_FLAGS_T2_ENABLE (0x0004)
+#define MPI2_IOUNIT8_SENSOR_FLAGS_T1_ENABLE (0x0002)
+#define MPI2_IOUNIT8_SENSOR_FLAGS_T0_ENABLE (0x0001)
+
+/*
+ * Host code (drivers, BIOS, utilities, etc.) should leave this define set to
+ * one and check the value returned for NumSensors at runtime.
+ */
+#ifndef MPI2_IOUNITPAGE8_SENSOR_ENTRIES
+#define MPI2_IOUNITPAGE8_SENSOR_ENTRIES (1)
+#endif
+
+typedef struct _MPI2_CONFIG_PAGE_IO_UNIT_8 {
+ MPI2_CONFIG_PAGE_HEADER Header; /* 0x00 */
+ U32 Reserved1; /* 0x04 */
+ U32 Reserved2; /* 0x08 */
+ U8 NumSensors; /* 0x0C */
+ U8 PollingInterval; /* 0x0D */
+ U16 Reserved3; /* 0x0E */
+ MPI2_IOUNIT8_SENSOR
+ Sensor[MPI2_IOUNITPAGE8_SENSOR_ENTRIES];/* 0x10 */
+} MPI2_CONFIG_PAGE_IO_UNIT_8, MPI2_POINTER PTR_MPI2_CONFIG_PAGE_IO_UNIT_8,
+Mpi2IOUnitPage8_t, MPI2_POINTER pMpi2IOUnitPage8_t;
+
+#define MPI2_IOUNITPAGE8_PAGEVERSION (0x00)
+
+
+/* IO Unit Page 9 */
+
+typedef struct _MPI2_IOUNIT9_SENSOR {
+ U16 CurrentTemperature; /* 0x00 */
+ U16 Reserved1; /* 0x02 */
+ U8 Flags; /* 0x04 */
+ U8 Reserved2; /* 0x05 */
+ U16 Reserved3; /* 0x06 */
+ U32 Reserved4; /* 0x08 */
+ U32 Reserved5; /* 0x0C */
+} MPI2_IOUNIT9_SENSOR, MPI2_POINTER PTR_MPI2_IOUNIT9_SENSOR,
+Mpi2IOUnit9Sensor_t, MPI2_POINTER pMpi2IOUnit9Sensor_t;
+
+/* defines for IO Unit Page 9 Sensor Flags field */
+#define MPI2_IOUNIT9_SENSOR_FLAGS_TEMP_VALID (0x01)
+
+/*
+ * Host code (drivers, BIOS, utilities, etc.) should leave this define set to
+ * one and check the value returned for NumSensors at runtime.
+ */
+#ifndef MPI2_IOUNITPAGE9_SENSOR_ENTRIES
+#define MPI2_IOUNITPAGE9_SENSOR_ENTRIES (1)
+#endif
+
+typedef struct _MPI2_CONFIG_PAGE_IO_UNIT_9 {
+ MPI2_CONFIG_PAGE_HEADER Header; /* 0x00 */
+ U32 Reserved1; /* 0x04 */
+ U32 Reserved2; /* 0x08 */
+ U8 NumSensors; /* 0x0C */
+ U8 Reserved4; /* 0x0D */
+ U16 Reserved3; /* 0x0E */
+ MPI2_IOUNIT9_SENSOR
+ Sensor[MPI2_IOUNITPAGE9_SENSOR_ENTRIES];/* 0x10 */
+} MPI2_CONFIG_PAGE_IO_UNIT_9, MPI2_POINTER PTR_MPI2_CONFIG_PAGE_IO_UNIT_9,
+Mpi2IOUnitPage9_t, MPI2_POINTER pMpi2IOUnitPage9_t;
+
+#define MPI2_IOUNITPAGE9_PAGEVERSION (0x00)
+
+
+/* IO Unit Page 10 */
+
+typedef struct _MPI2_IOUNIT10_FUNCTION {
+ U8 CreditPercent; /* 0x00 */
+ U8 Reserved1; /* 0x01 */
+ U16 Reserved2; /* 0x02 */
+} MPI2_IOUNIT10_FUNCTION, MPI2_POINTER PTR_MPI2_IOUNIT10_FUNCTION,
+Mpi2IOUnit10Function_t, MPI2_POINTER pMpi2IOUnit10Function_t;
+
+/*
+ * Host code (drivers, BIOS, utilities, etc.) should leave this define set to
+ * one and check the value returned for NumFunctions at runtime.
+ */
+#ifndef MPI2_IOUNITPAGE10_FUNCTION_ENTRIES
+#define MPI2_IOUNITPAGE10_FUNCTION_ENTRIES (1)
+#endif
+
+typedef struct _MPI2_CONFIG_PAGE_IO_UNIT_10 {
+ MPI2_CONFIG_PAGE_HEADER Header; /* 0x00 */
+ U8 NumFunctions; /* 0x04 */
+ U8 Reserved1; /* 0x05 */
+ U16 Reserved2; /* 0x06 */
+ U32 Reserved3; /* 0x08 */
+ U32 Reserved4; /* 0x0C */
+ MPI2_IOUNIT10_FUNCTION
+ Function[MPI2_IOUNITPAGE10_FUNCTION_ENTRIES];/* 0x10 */
+} MPI2_CONFIG_PAGE_IO_UNIT_10, MPI2_POINTER PTR_MPI2_CONFIG_PAGE_IO_UNIT_10,
+Mpi2IOUnitPage10_t, MPI2_POINTER pMpi2IOUnitPage10_t;
+
+#define MPI2_IOUNITPAGE10_PAGEVERSION (0x01)
+
+
+
+/****************************************************************************
+* IOC Config Pages
+****************************************************************************/
+
+/* IOC Page 0 */
+
+typedef struct _MPI2_CONFIG_PAGE_IOC_0
+{
+ MPI2_CONFIG_PAGE_HEADER Header; /* 0x00 */
+ U32 Reserved1; /* 0x04 */
+ U32 Reserved2; /* 0x08 */
+ U16 VendorID; /* 0x0C */
+ U16 DeviceID; /* 0x0E */
+ U8 RevisionID; /* 0x10 */
+ U8 Reserved3; /* 0x11 */
+ U16 Reserved4; /* 0x12 */
+ U32 ClassCode; /* 0x14 */
+ U16 SubsystemVendorID; /* 0x18 */
+ U16 SubsystemID; /* 0x1A */
+} MPI2_CONFIG_PAGE_IOC_0, MPI2_POINTER PTR_MPI2_CONFIG_PAGE_IOC_0,
+ Mpi2IOCPage0_t, MPI2_POINTER pMpi2IOCPage0_t;
+
+#define MPI2_IOCPAGE0_PAGEVERSION (0x02)
+
+
+/* IOC Page 1 */
+
+typedef struct _MPI2_CONFIG_PAGE_IOC_1
+{
+ MPI2_CONFIG_PAGE_HEADER Header; /* 0x00 */
+ U32 Flags; /* 0x04 */
+ U32 CoalescingTimeout; /* 0x08 */
+ U8 CoalescingDepth; /* 0x0C */
+ U8 PCISlotNum; /* 0x0D */
+ U8 PCIBusNum; /* 0x0E */
+ U8 PCIDomainSegment; /* 0x0F */
+ U32 Reserved1; /* 0x10 */
+ U32 Reserved2; /* 0x14 */
+} MPI2_CONFIG_PAGE_IOC_1, MPI2_POINTER PTR_MPI2_CONFIG_PAGE_IOC_1,
+ Mpi2IOCPage1_t, MPI2_POINTER pMpi2IOCPage1_t;
+
+#define MPI2_IOCPAGE1_PAGEVERSION (0x05)
+
+/* defines for IOC Page 1 Flags field */
+#define MPI2_IOCPAGE1_REPLY_COALESCING (0x00000001)
+
+#define MPI2_IOCPAGE1_PCISLOTNUM_UNKNOWN (0xFF)
+#define MPI2_IOCPAGE1_PCIBUSNUM_UNKNOWN (0xFF)
+#define MPI2_IOCPAGE1_PCIDOMAIN_UNKNOWN (0xFF)
+
+/* IOC Page 6 */
+
+typedef struct _MPI2_CONFIG_PAGE_IOC_6
+{
+ MPI2_CONFIG_PAGE_HEADER Header; /* 0x00 */
+ U32 CapabilitiesFlags; /* 0x04 */
+ U8 MaxDrivesRAID0; /* 0x08 */
+ U8 MaxDrivesRAID1; /* 0x09 */
+ U8 MaxDrivesRAID1E; /* 0x0A */
+ U8 MaxDrivesRAID10; /* 0x0B */
+ U8 MinDrivesRAID0; /* 0x0C */
+ U8 MinDrivesRAID1; /* 0x0D */
+ U8 MinDrivesRAID1E; /* 0x0E */
+ U8 MinDrivesRAID10; /* 0x0F */
+ U32 Reserved1; /* 0x10 */
+ U8 MaxGlobalHotSpares; /* 0x14 */
+ U8 MaxPhysDisks; /* 0x15 */
+ U8 MaxVolumes; /* 0x16 */
+ U8 MaxConfigs; /* 0x17 */
+ U8 MaxOCEDisks; /* 0x18 */
+ U8 Reserved2; /* 0x19 */
+ U16 Reserved3; /* 0x1A */
+ U32 SupportedStripeSizeMapRAID0; /* 0x1C */
+ U32 SupportedStripeSizeMapRAID1E; /* 0x20 */
+ U32 SupportedStripeSizeMapRAID10; /* 0x24 */
+ U32 Reserved4; /* 0x28 */
+ U32 Reserved5; /* 0x2C */
+ U16 DefaultMetadataSize; /* 0x30 */
+ U16 Reserved6; /* 0x32 */
+ U16 MaxBadBlockTableEntries; /* 0x34 */
+ U16 Reserved7; /* 0x36 */
+ U32 IRNvsramVersion; /* 0x38 */
+} MPI2_CONFIG_PAGE_IOC_6, MPI2_POINTER PTR_MPI2_CONFIG_PAGE_IOC_6,
+ Mpi2IOCPage6_t, MPI2_POINTER pMpi2IOCPage6_t;
+
+#define MPI2_IOCPAGE6_PAGEVERSION (0x05)
+
+/* defines for IOC Page 6 CapabilitiesFlags */
+#define MPI2_IOCPAGE6_CAP_FLAGS_4K_SECTORS_SUPPORT (0x00000020)
+#define MPI2_IOCPAGE6_CAP_FLAGS_RAID10_SUPPORT (0x00000010)
+#define MPI2_IOCPAGE6_CAP_FLAGS_RAID1_SUPPORT (0x00000008)
+#define MPI2_IOCPAGE6_CAP_FLAGS_RAID1E_SUPPORT (0x00000004)
+#define MPI2_IOCPAGE6_CAP_FLAGS_RAID0_SUPPORT (0x00000002)
+#define MPI2_IOCPAGE6_CAP_FLAGS_GLOBAL_HOT_SPARE (0x00000001)
+
+
+/* IOC Page 7 */
+
+#define MPI2_IOCPAGE7_EVENTMASK_WORDS (4)
+
+typedef struct _MPI2_CONFIG_PAGE_IOC_7
+{
+ MPI2_CONFIG_PAGE_HEADER Header; /* 0x00 */
+ U32 Reserved1; /* 0x04 */
+ U32 EventMasks[MPI2_IOCPAGE7_EVENTMASK_WORDS];/* 0x08 */
+ U16 SASBroadcastPrimitiveMasks; /* 0x18 */
+ U16 SASNotifyPrimitiveMasks; /* 0x1A */
+ U32 Reserved3; /* 0x1C */
+} MPI2_CONFIG_PAGE_IOC_7, MPI2_POINTER PTR_MPI2_CONFIG_PAGE_IOC_7,
+ Mpi2IOCPage7_t, MPI2_POINTER pMpi2IOCPage7_t;
+
+#define MPI2_IOCPAGE7_PAGEVERSION (0x02)
+
+
+/* IOC Page 8 */
+
+typedef struct _MPI2_CONFIG_PAGE_IOC_8
+{
+ MPI2_CONFIG_PAGE_HEADER Header; /* 0x00 */
+ U8 NumDevsPerEnclosure; /* 0x04 */
+ U8 Reserved1; /* 0x05 */
+ U16 Reserved2; /* 0x06 */
+ U16 MaxPersistentEntries; /* 0x08 */
+ U16 MaxNumPhysicalMappedIDs; /* 0x0A */
+ U16 Flags; /* 0x0C */
+ U16 Reserved3; /* 0x0E */
+ U16 IRVolumeMappingFlags; /* 0x10 */
+ U16 Reserved4; /* 0x12 */
+ U32 Reserved5; /* 0x14 */
+} MPI2_CONFIG_PAGE_IOC_8, MPI2_POINTER PTR_MPI2_CONFIG_PAGE_IOC_8,
+ Mpi2IOCPage8_t, MPI2_POINTER pMpi2IOCPage8_t;
+
+#define MPI2_IOCPAGE8_PAGEVERSION (0x00)
+
+/* defines for IOC Page 8 Flags field */
+#define MPI2_IOCPAGE8_FLAGS_DA_START_SLOT_1 (0x00000020)
+#define MPI2_IOCPAGE8_FLAGS_RESERVED_TARGETID_0 (0x00000010)
+
+#define MPI2_IOCPAGE8_FLAGS_MASK_MAPPING_MODE (0x0000000E)
+#define MPI2_IOCPAGE8_FLAGS_DEVICE_PERSISTENCE_MAPPING (0x00000000)
+#define MPI2_IOCPAGE8_FLAGS_ENCLOSURE_SLOT_MAPPING (0x00000002)
+
+#define MPI2_IOCPAGE8_FLAGS_DISABLE_PERSISTENT_MAPPING (0x00000001)
+#define MPI2_IOCPAGE8_FLAGS_ENABLE_PERSISTENT_MAPPING (0x00000000)
+
+/* defines for IOC Page 8 IRVolumeMappingFlags */
+#define MPI2_IOCPAGE8_IRFLAGS_MASK_VOLUME_MAPPING_MODE (0x00000003)
+#define MPI2_IOCPAGE8_IRFLAGS_LOW_VOLUME_MAPPING (0x00000000)
+#define MPI2_IOCPAGE8_IRFLAGS_HIGH_VOLUME_MAPPING (0x00000001)
+
+
+/****************************************************************************
+* BIOS Config Pages
+****************************************************************************/
+
+/* BIOS Page 1 */
+
+typedef struct _MPI2_CONFIG_PAGE_BIOS_1
+{
+ MPI2_CONFIG_PAGE_HEADER Header; /* 0x00 */
+ U32 BiosOptions; /* 0x04 */
+ U32 IOCSettings; /* 0x08 */
+ U8 SSUTimeout; /* 0x0C */
+ U8 Reserved1; /* 0x0D */
+ U16 Reserved2; /* 0x0E */
+ U32 DeviceSettings; /* 0x10 */
+ U16 NumberOfDevices; /* 0x14 */
+ U16 UEFIVersion; /* 0x16 */
+ U16 IOTimeoutBlockDevicesNonRM; /* 0x18 */
+ U16 IOTimeoutSequential; /* 0x1A */
+ U16 IOTimeoutOther; /* 0x1C */
+ U16 IOTimeoutBlockDevicesRM; /* 0x1E */
+} MPI2_CONFIG_PAGE_BIOS_1, MPI2_POINTER PTR_MPI2_CONFIG_PAGE_BIOS_1,
+ Mpi2BiosPage1_t, MPI2_POINTER pMpi2BiosPage1_t;
+
+#define MPI2_BIOSPAGE1_PAGEVERSION (0x07)
+
+/* values for BIOS Page 1 BiosOptions field */
+#define MPI2_BIOSPAGE1_OPTIONS_PNS_MASK (0x00003800)
+#define MPI2_BIOSPAGE1_OPTIONS_PNS_PBDHL (0x00000000)
+#define MPI2_BIOSPAGE1_OPTIONS_PNS_ENCSLOSURE (0x00000800)
+#define MPI2_BIOSPAGE1_OPTIONS_PNS_LWWID (0x00001000)
+#define MPI2_BIOSPAGE1_OPTIONS_PNS_PSENS (0x00001800)
+#define MPI2_BIOSPAGE1_OPTIONS_PNS_ESPHY (0x00002000)
+
+#define MPI2_BIOSPAGE1_OPTIONS_X86_DISABLE_BIOS (0x00000400)
+
+#define MPI2_BIOSPAGE1_OPTIONS_MASK_REGISTRATION_UEFI_BSD (0x00000300)
+#define MPI2_BIOSPAGE1_OPTIONS_USE_BIT0_REGISTRATION_UEFI_BSD (0x00000000)
+#define MPI2_BIOSPAGE1_OPTIONS_FULL_REGISTRATION_UEFI_BSD (0x00000100)
+#define MPI2_BIOSPAGE1_OPTIONS_ADAPTER_REGISTRATION_UEFI_BSD (0x00000200)
+#define MPI2_BIOSPAGE1_OPTIONS_DISABLE_REGISTRATION_UEFI_BSD (0x00000300)
+
+#define MPI2_BIOSPAGE1_OPTIONS_MASK_OEM_ID (0x000000F0)
+#define MPI2_BIOSPAGE1_OPTIONS_LSI_OEM_ID (0x00000000)
+
+#define MPI2_BIOSPAGE1_OPTIONS_MASK_UEFI_HII_REGISTRATION (0x00000006)
+#define MPI2_BIOSPAGE1_OPTIONS_ENABLE_UEFI_HII (0x00000000)
+#define MPI2_BIOSPAGE1_OPTIONS_DISABLE_UEFI_HII (0x00000002)
+#define MPI2_BIOSPAGE1_OPTIONS_VERSION_CHECK_UEFI_HII (0x00000004)
+
+#define MPI2_BIOSPAGE1_OPTIONS_DISABLE_BIOS (0x00000001)
+
+/* values for BIOS Page 1 IOCSettings field */
+#define MPI2_BIOSPAGE1_IOCSET_MASK_BOOT_PREFERENCE (0x00030000)
+#define MPI2_BIOSPAGE1_IOCSET_ENCLOSURE_SLOT_BOOT (0x00000000)
+#define MPI2_BIOSPAGE1_IOCSET_SAS_ADDRESS_BOOT (0x00010000)
+
+#define MPI2_BIOSPAGE1_IOCSET_MASK_RM_SETTING (0x000000C0)
+#define MPI2_BIOSPAGE1_IOCSET_NONE_RM_SETTING (0x00000000)
+#define MPI2_BIOSPAGE1_IOCSET_BOOT_RM_SETTING (0x00000040)
+#define MPI2_BIOSPAGE1_IOCSET_MEDIA_RM_SETTING (0x00000080)
+
+#define MPI2_BIOSPAGE1_IOCSET_MASK_ADAPTER_SUPPORT (0x00000030)
+#define MPI2_BIOSPAGE1_IOCSET_NO_SUPPORT (0x00000000)
+#define MPI2_BIOSPAGE1_IOCSET_BIOS_SUPPORT (0x00000010)
+#define MPI2_BIOSPAGE1_IOCSET_OS_SUPPORT (0x00000020)
+#define MPI2_BIOSPAGE1_IOCSET_ALL_SUPPORT (0x00000030)
+
+#define MPI2_BIOSPAGE1_IOCSET_ALTERNATE_CHS (0x00000008)
+
+/* values for BIOS Page 1 DeviceSettings field */
+#define MPI2_BIOSPAGE1_DEVSET_DISABLE_SMART_POLLING (0x00000010)
+#define MPI2_BIOSPAGE1_DEVSET_DISABLE_SEQ_LUN (0x00000008)
+#define MPI2_BIOSPAGE1_DEVSET_DISABLE_RM_LUN (0x00000004)
+#define MPI2_BIOSPAGE1_DEVSET_DISABLE_NON_RM_LUN (0x00000002)
+#define MPI2_BIOSPAGE1_DEVSET_DISABLE_OTHER_LUN (0x00000001)
+
+/* defines for BIOS Page 1 UEFIVersion field */
+#define MPI2_BIOSPAGE1_UEFI_VER_MAJOR_MASK (0xFF00)
+#define MPI2_BIOSPAGE1_UEFI_VER_MAJOR_SHIFT (8)
+#define MPI2_BIOSPAGE1_UEFI_VER_MINOR_MASK (0x00FF)
+#define MPI2_BIOSPAGE1_UEFI_VER_MINOR_SHIFT (0)
+
+
+
+/* BIOS Page 2 */
+
+typedef struct _MPI2_BOOT_DEVICE_ADAPTER_ORDER
+{
+ U32 Reserved1; /* 0x00 */
+ U32 Reserved2; /* 0x04 */
+ U32 Reserved3; /* 0x08 */
+ U32 Reserved4; /* 0x0C */
+ U32 Reserved5; /* 0x10 */
+ U32 Reserved6; /* 0x14 */
+} MPI2_BOOT_DEVICE_ADAPTER_ORDER,
+ MPI2_POINTER PTR_MPI2_BOOT_DEVICE_ADAPTER_ORDER,
+ Mpi2BootDeviceAdapterOrder_t, MPI2_POINTER pMpi2BootDeviceAdapterOrder_t;
+
+typedef struct _MPI2_BOOT_DEVICE_SAS_WWID
+{
+ U64 SASAddress; /* 0x00 */
+ U8 LUN[8]; /* 0x08 */
+ U32 Reserved1; /* 0x10 */
+ U32 Reserved2; /* 0x14 */
+} MPI2_BOOT_DEVICE_SAS_WWID, MPI2_POINTER PTR_MPI2_BOOT_DEVICE_SAS_WWID,
+ Mpi2BootDeviceSasWwid_t, MPI2_POINTER pMpi2BootDeviceSasWwid_t;
+
+typedef struct _MPI2_BOOT_DEVICE_ENCLOSURE_SLOT
+{
+ U64 EnclosureLogicalID; /* 0x00 */
+ U32 Reserved1; /* 0x08 */
+ U32 Reserved2; /* 0x0C */
+ U16 SlotNumber; /* 0x10 */
+ U16 Reserved3; /* 0x12 */
+ U32 Reserved4; /* 0x14 */
+} MPI2_BOOT_DEVICE_ENCLOSURE_SLOT,
+ MPI2_POINTER PTR_MPI2_BOOT_DEVICE_ENCLOSURE_SLOT,
+ Mpi2BootDeviceEnclosureSlot_t, MPI2_POINTER pMpi2BootDeviceEnclosureSlot_t;
+
+typedef struct _MPI2_BOOT_DEVICE_DEVICE_NAME
+{
+ U64 DeviceName; /* 0x00 */
+ U8 LUN[8]; /* 0x08 */
+ U32 Reserved1; /* 0x10 */
+ U32 Reserved2; /* 0x14 */
+} MPI2_BOOT_DEVICE_DEVICE_NAME, MPI2_POINTER PTR_MPI2_BOOT_DEVICE_DEVICE_NAME,
+ Mpi2BootDeviceDeviceName_t, MPI2_POINTER pMpi2BootDeviceDeviceName_t;
+
+typedef union _MPI2_MPI2_BIOSPAGE2_BOOT_DEVICE
+{
+ MPI2_BOOT_DEVICE_ADAPTER_ORDER AdapterOrder;
+ MPI2_BOOT_DEVICE_SAS_WWID SasWwid;
+ MPI2_BOOT_DEVICE_ENCLOSURE_SLOT EnclosureSlot;
+ MPI2_BOOT_DEVICE_DEVICE_NAME DeviceName;
+} MPI2_BIOSPAGE2_BOOT_DEVICE, MPI2_POINTER PTR_MPI2_BIOSPAGE2_BOOT_DEVICE,
+ Mpi2BiosPage2BootDevice_t, MPI2_POINTER pMpi2BiosPage2BootDevice_t;
+
+typedef struct _MPI2_CONFIG_PAGE_BIOS_2
+{
+ MPI2_CONFIG_PAGE_HEADER Header; /* 0x00 */
+ U32 Reserved1; /* 0x04 */
+ U32 Reserved2; /* 0x08 */
+ U32 Reserved3; /* 0x0C */
+ U32 Reserved4; /* 0x10 */
+ U32 Reserved5; /* 0x14 */
+ U32 Reserved6; /* 0x18 */
+ U8 ReqBootDeviceForm; /* 0x1C */
+ U8 Reserved7; /* 0x1D */
+ U16 Reserved8; /* 0x1E */
+ MPI2_BIOSPAGE2_BOOT_DEVICE RequestedBootDevice; /* 0x20 */
+ U8 ReqAltBootDeviceForm; /* 0x38 */
+ U8 Reserved9; /* 0x39 */
+ U16 Reserved10; /* 0x3A */
+ MPI2_BIOSPAGE2_BOOT_DEVICE RequestedAltBootDevice; /* 0x3C */
+ U8 CurrentBootDeviceForm; /* 0x58 */
+ U8 Reserved11; /* 0x59 */
+ U16 Reserved12; /* 0x5A */
+ MPI2_BIOSPAGE2_BOOT_DEVICE CurrentBootDevice; /* 0x58 */
+} MPI2_CONFIG_PAGE_BIOS_2, MPI2_POINTER PTR_MPI2_CONFIG_PAGE_BIOS_2,
+ Mpi2BiosPage2_t, MPI2_POINTER pMpi2BiosPage2_t;
+
+#define MPI2_BIOSPAGE2_PAGEVERSION (0x04)
+
+/* values for BIOS Page 2 BootDeviceForm fields */
+#define MPI2_BIOSPAGE2_FORM_MASK (0x0F)
+#define MPI2_BIOSPAGE2_FORM_NO_DEVICE_SPECIFIED (0x00)
+#define MPI2_BIOSPAGE2_FORM_SAS_WWID (0x05)
+#define MPI2_BIOSPAGE2_FORM_ENCLOSURE_SLOT (0x06)
+#define MPI2_BIOSPAGE2_FORM_DEVICE_NAME (0x07)
+
+
+/* BIOS Page 3 */
+
+typedef struct _MPI2_ADAPTER_INFO
+{
+ U8 PciBusNumber; /* 0x00 */
+ U8 PciDeviceAndFunctionNumber; /* 0x01 */
+ U16 AdapterFlags; /* 0x02 */
+} MPI2_ADAPTER_INFO, MPI2_POINTER PTR_MPI2_ADAPTER_INFO,
+ Mpi2AdapterInfo_t, MPI2_POINTER pMpi2AdapterInfo_t;
+
+#define MPI2_ADAPTER_INFO_FLAGS_EMBEDDED (0x0001)
+#define MPI2_ADAPTER_INFO_FLAGS_INIT_STATUS (0x0002)
+
+typedef struct _MPI2_CONFIG_PAGE_BIOS_3
+{
+ MPI2_CONFIG_PAGE_HEADER Header; /* 0x00 */
+ U32 GlobalFlags; /* 0x04 */
+ U32 BiosVersion; /* 0x08 */
+ MPI2_ADAPTER_INFO AdapterOrder[4]; /* 0x0C */
+ U32 Reserved1; /* 0x1C */
+} MPI2_CONFIG_PAGE_BIOS_3, MPI2_POINTER PTR_MPI2_CONFIG_PAGE_BIOS_3,
+ Mpi2BiosPage3_t, MPI2_POINTER pMpi2BiosPage3_t;
+
+#define MPI2_BIOSPAGE3_PAGEVERSION (0x00)
+
+/* values for BIOS Page 3 GlobalFlags */
+#define MPI2_BIOSPAGE3_FLAGS_PAUSE_ON_ERROR (0x00000002)
+#define MPI2_BIOSPAGE3_FLAGS_VERBOSE_ENABLE (0x00000004)
+#define MPI2_BIOSPAGE3_FLAGS_HOOK_INT_40_DISABLE (0x00000010)
+
+#define MPI2_BIOSPAGE3_FLAGS_DEV_LIST_DISPLAY_MASK (0x000000E0)
+#define MPI2_BIOSPAGE3_FLAGS_INSTALLED_DEV_DISPLAY (0x00000000)
+#define MPI2_BIOSPAGE3_FLAGS_ADAPTER_DISPLAY (0x00000020)
+#define MPI2_BIOSPAGE3_FLAGS_ADAPTER_DEV_DISPLAY (0x00000040)
+
+
+/* BIOS Page 4 */
+
+/*
+ * Host code (drivers, BIOS, utilities, etc.) should leave this define set to
+ * one and check the value returned for NumPhys at runtime.
+ */
+#ifndef MPI2_BIOS_PAGE_4_PHY_ENTRIES
+#define MPI2_BIOS_PAGE_4_PHY_ENTRIES (1)
+#endif
+
+typedef struct _MPI2_BIOS4_ENTRY
+{
+ U64 ReassignmentWWID; /* 0x00 */
+ U64 ReassignmentDeviceName; /* 0x08 */
+} MPI2_BIOS4_ENTRY, MPI2_POINTER PTR_MPI2_BIOS4_ENTRY,
+ Mpi2MBios4Entry_t, MPI2_POINTER pMpi2Bios4Entry_t;
+
+typedef struct _MPI2_CONFIG_PAGE_BIOS_4
+{
+ MPI2_CONFIG_PAGE_HEADER Header; /* 0x00 */
+ U8 NumPhys; /* 0x04 */
+ U8 Reserved1; /* 0x05 */
+ U16 Reserved2; /* 0x06 */
+ MPI2_BIOS4_ENTRY Phy[MPI2_BIOS_PAGE_4_PHY_ENTRIES]; /* 0x08 */
+} MPI2_CONFIG_PAGE_BIOS_4, MPI2_POINTER PTR_MPI2_CONFIG_PAGE_BIOS_4,
+ Mpi2BiosPage4_t, MPI2_POINTER pMpi2BiosPage4_t;
+
+#define MPI2_BIOSPAGE4_PAGEVERSION (0x01)
+
+
+/****************************************************************************
+* RAID Volume Config Pages
+****************************************************************************/
+
+/* RAID Volume Page 0 */
+
+typedef struct _MPI2_RAIDVOL0_PHYS_DISK
+{
+ U8 RAIDSetNum; /* 0x00 */
+ U8 PhysDiskMap; /* 0x01 */
+ U8 PhysDiskNum; /* 0x02 */
+ U8 Reserved; /* 0x03 */
+} MPI2_RAIDVOL0_PHYS_DISK, MPI2_POINTER PTR_MPI2_RAIDVOL0_PHYS_DISK,
+ Mpi2RaidVol0PhysDisk_t, MPI2_POINTER pMpi2RaidVol0PhysDisk_t;
+
+/* defines for the PhysDiskMap field */
+#define MPI2_RAIDVOL0_PHYSDISK_PRIMARY (0x01)
+#define MPI2_RAIDVOL0_PHYSDISK_SECONDARY (0x02)
+
+typedef struct _MPI2_RAIDVOL0_SETTINGS
+{
+ U16 Settings; /* 0x00 */
+ U8 HotSparePool; /* 0x01 */
+ U8 Reserved; /* 0x02 */
+} MPI2_RAIDVOL0_SETTINGS, MPI2_POINTER PTR_MPI2_RAIDVOL0_SETTINGS,
+ Mpi2RaidVol0Settings_t, MPI2_POINTER pMpi2RaidVol0Settings_t;
+
+/* RAID Volume Page 0 HotSparePool defines, also used in RAID Physical Disk */
+#define MPI2_RAID_HOT_SPARE_POOL_0 (0x01)
+#define MPI2_RAID_HOT_SPARE_POOL_1 (0x02)
+#define MPI2_RAID_HOT_SPARE_POOL_2 (0x04)
+#define MPI2_RAID_HOT_SPARE_POOL_3 (0x08)
+#define MPI2_RAID_HOT_SPARE_POOL_4 (0x10)
+#define MPI2_RAID_HOT_SPARE_POOL_5 (0x20)
+#define MPI2_RAID_HOT_SPARE_POOL_6 (0x40)
+#define MPI2_RAID_HOT_SPARE_POOL_7 (0x80)
+
+/* RAID Volume Page 0 VolumeSettings defines */
+#define MPI2_RAIDVOL0_SETTING_USE_PRODUCT_ID_SUFFIX (0x0008)
+#define MPI2_RAIDVOL0_SETTING_AUTO_CONFIG_HSWAP_DISABLE (0x0004)
+
+#define MPI2_RAIDVOL0_SETTING_MASK_WRITE_CACHING (0x0003)
+#define MPI2_RAIDVOL0_SETTING_UNCHANGED (0x0000)
+#define MPI2_RAIDVOL0_SETTING_DISABLE_WRITE_CACHING (0x0001)
+#define MPI2_RAIDVOL0_SETTING_ENABLE_WRITE_CACHING (0x0002)
+
+/*
+ * Host code (drivers, BIOS, utilities, etc.) should leave this define set to
+ * one and check the value returned for NumPhysDisks at runtime.
+ */
+#ifndef MPI2_RAID_VOL_PAGE_0_PHYSDISK_MAX
+#define MPI2_RAID_VOL_PAGE_0_PHYSDISK_MAX (1)
+#endif
+
+typedef struct _MPI2_CONFIG_PAGE_RAID_VOL_0
+{
+ MPI2_CONFIG_PAGE_HEADER Header; /* 0x00 */
+ U16 DevHandle; /* 0x04 */
+ U8 VolumeState; /* 0x06 */
+ U8 VolumeType; /* 0x07 */
+ U32 VolumeStatusFlags; /* 0x08 */
+ MPI2_RAIDVOL0_SETTINGS VolumeSettings; /* 0x0C */
+ U64 MaxLBA; /* 0x10 */
+ U32 StripeSize; /* 0x18 */
+ U16 BlockSize; /* 0x1C */
+ U16 Reserved1; /* 0x1E */
+ U8 SupportedPhysDisks; /* 0x20 */
+ U8 ResyncRate; /* 0x21 */
+ U16 DataScrubDuration; /* 0x22 */
+ U8 NumPhysDisks; /* 0x24 */
+ U8 Reserved2; /* 0x25 */
+ U8 Reserved3; /* 0x26 */
+ U8 InactiveStatus; /* 0x27 */
+ MPI2_RAIDVOL0_PHYS_DISK PhysDisk[MPI2_RAID_VOL_PAGE_0_PHYSDISK_MAX]; /* 0x28 */
+} MPI2_CONFIG_PAGE_RAID_VOL_0, MPI2_POINTER PTR_MPI2_CONFIG_PAGE_RAID_VOL_0,
+ Mpi2RaidVolPage0_t, MPI2_POINTER pMpi2RaidVolPage0_t;
+
+#define MPI2_RAIDVOLPAGE0_PAGEVERSION (0x0A)
+
+/* values for RAID VolumeState */
+#define MPI2_RAID_VOL_STATE_MISSING (0x00)
+#define MPI2_RAID_VOL_STATE_FAILED (0x01)
+#define MPI2_RAID_VOL_STATE_INITIALIZING (0x02)
+#define MPI2_RAID_VOL_STATE_ONLINE (0x03)
+#define MPI2_RAID_VOL_STATE_DEGRADED (0x04)
+#define MPI2_RAID_VOL_STATE_OPTIMAL (0x05)
+
+/* values for RAID VolumeType */
+#define MPI2_RAID_VOL_TYPE_RAID0 (0x00)
+#define MPI2_RAID_VOL_TYPE_RAID1E (0x01)
+#define MPI2_RAID_VOL_TYPE_RAID1 (0x02)
+#define MPI2_RAID_VOL_TYPE_RAID10 (0x05)
+#define MPI2_RAID_VOL_TYPE_UNKNOWN (0xFF)
+
+/* values for RAID Volume Page 0 VolumeStatusFlags field */
+#define MPI2_RAIDVOL0_STATUS_FLAG_PENDING_RESYNC (0x02000000)
+#define MPI2_RAIDVOL0_STATUS_FLAG_BACKG_INIT_PENDING (0x01000000)
+#define MPI2_RAIDVOL0_STATUS_FLAG_MDC_PENDING (0x00800000)
+#define MPI2_RAIDVOL0_STATUS_FLAG_USER_CONSIST_PENDING (0x00400000)
+#define MPI2_RAIDVOL0_STATUS_FLAG_MAKE_DATA_CONSISTENT (0x00200000)
+#define MPI2_RAIDVOL0_STATUS_FLAG_DATA_SCRUB (0x00100000)
+#define MPI2_RAIDVOL0_STATUS_FLAG_CONSISTENCY_CHECK (0x00080000)
+#define MPI2_RAIDVOL0_STATUS_FLAG_CAPACITY_EXPANSION (0x00040000)
+#define MPI2_RAIDVOL0_STATUS_FLAG_BACKGROUND_INIT (0x00020000)
+#define MPI2_RAIDVOL0_STATUS_FLAG_RESYNC_IN_PROGRESS (0x00010000)
+#define MPI2_RAIDVOL0_STATUS_FLAG_VOL_NOT_CONSISTENT (0x00000080)
+#define MPI2_RAIDVOL0_STATUS_FLAG_OCE_ALLOWED (0x00000040)
+#define MPI2_RAIDVOL0_STATUS_FLAG_BGI_COMPLETE (0x00000020)
+#define MPI2_RAIDVOL0_STATUS_FLAG_1E_OFFSET_MIRROR (0x00000000)
+#define MPI2_RAIDVOL0_STATUS_FLAG_1E_ADJACENT_MIRROR (0x00000010)
+#define MPI2_RAIDVOL0_STATUS_FLAG_BAD_BLOCK_TABLE_FULL (0x00000008)
+#define MPI2_RAIDVOL0_STATUS_FLAG_VOLUME_INACTIVE (0x00000004)
+#define MPI2_RAIDVOL0_STATUS_FLAG_QUIESCED (0x00000002)
+#define MPI2_RAIDVOL0_STATUS_FLAG_ENABLED (0x00000001)
+
+/* values for RAID Volume Page 0 SupportedPhysDisks field */
+#define MPI2_RAIDVOL0_SUPPORT_SOLID_STATE_DISKS (0x08)
+#define MPI2_RAIDVOL0_SUPPORT_HARD_DISKS (0x04)
+#define MPI2_RAIDVOL0_SUPPORT_SAS_PROTOCOL (0x02)
+#define MPI2_RAIDVOL0_SUPPORT_SATA_PROTOCOL (0x01)
+
+/* values for RAID Volume Page 0 InactiveStatus field */
+#define MPI2_RAIDVOLPAGE0_UNKNOWN_INACTIVE (0x00)
+#define MPI2_RAIDVOLPAGE0_STALE_METADATA_INACTIVE (0x01)
+#define MPI2_RAIDVOLPAGE0_FOREIGN_VOLUME_INACTIVE (0x02)
+#define MPI2_RAIDVOLPAGE0_INSUFFICIENT_RESOURCE_INACTIVE (0x03)
+#define MPI2_RAIDVOLPAGE0_CLONE_VOLUME_INACTIVE (0x04)
+#define MPI2_RAIDVOLPAGE0_INSUFFICIENT_METADATA_INACTIVE (0x05)
+#define MPI2_RAIDVOLPAGE0_PREVIOUSLY_DELETED (0x06)
+
+
+/* RAID Volume Page 1 */
+
+typedef struct _MPI2_CONFIG_PAGE_RAID_VOL_1
+{
+ MPI2_CONFIG_PAGE_HEADER Header; /* 0x00 */
+ U16 DevHandle; /* 0x04 */
+ U16 Reserved0; /* 0x06 */
+ U8 GUID[24]; /* 0x08 */
+ U8 Name[16]; /* 0x20 */
+ U64 WWID; /* 0x30 */
+ U32 Reserved1; /* 0x38 */
+ U32 Reserved2; /* 0x3C */
+} MPI2_CONFIG_PAGE_RAID_VOL_1, MPI2_POINTER PTR_MPI2_CONFIG_PAGE_RAID_VOL_1,
+ Mpi2RaidVolPage1_t, MPI2_POINTER pMpi2RaidVolPage1_t;
+
+#define MPI2_RAIDVOLPAGE1_PAGEVERSION (0x03)
+
+
+/****************************************************************************
+* RAID Physical Disk Config Pages
+****************************************************************************/
+
+/* RAID Physical Disk Page 0 */
+
+typedef struct _MPI2_RAIDPHYSDISK0_SETTINGS
+{
+ U16 Reserved1; /* 0x00 */
+ U8 HotSparePool; /* 0x02 */
+ U8 Reserved2; /* 0x03 */
+} MPI2_RAIDPHYSDISK0_SETTINGS, MPI2_POINTER PTR_MPI2_RAIDPHYSDISK0_SETTINGS,
+ Mpi2RaidPhysDisk0Settings_t, MPI2_POINTER pMpi2RaidPhysDisk0Settings_t;
+
+/* use MPI2_RAID_HOT_SPARE_POOL_ defines for the HotSparePool field */
+
+typedef struct _MPI2_RAIDPHYSDISK0_INQUIRY_DATA
+{
+ U8 VendorID[8]; /* 0x00 */
+ U8 ProductID[16]; /* 0x08 */
+ U8 ProductRevLevel[4]; /* 0x18 */
+ U8 SerialNum[32]; /* 0x1C */
+} MPI2_RAIDPHYSDISK0_INQUIRY_DATA,
+ MPI2_POINTER PTR_MPI2_RAIDPHYSDISK0_INQUIRY_DATA,
+ Mpi2RaidPhysDisk0InquiryData_t, MPI2_POINTER pMpi2RaidPhysDisk0InquiryData_t;
+
+typedef struct _MPI2_CONFIG_PAGE_RD_PDISK_0
+{
+ MPI2_CONFIG_PAGE_HEADER Header; /* 0x00 */
+ U16 DevHandle; /* 0x04 */
+ U8 Reserved1; /* 0x06 */
+ U8 PhysDiskNum; /* 0x07 */
+ MPI2_RAIDPHYSDISK0_SETTINGS PhysDiskSettings; /* 0x08 */
+ U32 Reserved2; /* 0x0C */
+ MPI2_RAIDPHYSDISK0_INQUIRY_DATA InquiryData; /* 0x10 */
+ U32 Reserved3; /* 0x4C */
+ U8 PhysDiskState; /* 0x50 */
+ U8 OfflineReason; /* 0x51 */
+ U8 IncompatibleReason; /* 0x52 */
+ U8 PhysDiskAttributes; /* 0x53 */
+ U32 PhysDiskStatusFlags; /* 0x54 */
+ U64 DeviceMaxLBA; /* 0x58 */
+ U64 HostMaxLBA; /* 0x60 */
+ U64 CoercedMaxLBA; /* 0x68 */
+ U16 BlockSize; /* 0x70 */
+ U16 Reserved5; /* 0x72 */
+ U32 Reserved6; /* 0x74 */
+} MPI2_CONFIG_PAGE_RD_PDISK_0,
+ MPI2_POINTER PTR_MPI2_CONFIG_PAGE_RD_PDISK_0,
+ Mpi2RaidPhysDiskPage0_t, MPI2_POINTER pMpi2RaidPhysDiskPage0_t;
+
+#define MPI2_RAIDPHYSDISKPAGE0_PAGEVERSION (0x05)
+
+/* PhysDiskState defines */
+#define MPI2_RAID_PD_STATE_NOT_CONFIGURED (0x00)
+#define MPI2_RAID_PD_STATE_NOT_COMPATIBLE (0x01)
+#define MPI2_RAID_PD_STATE_OFFLINE (0x02)
+#define MPI2_RAID_PD_STATE_ONLINE (0x03)
+#define MPI2_RAID_PD_STATE_HOT_SPARE (0x04)
+#define MPI2_RAID_PD_STATE_DEGRADED (0x05)
+#define MPI2_RAID_PD_STATE_REBUILDING (0x06)
+#define MPI2_RAID_PD_STATE_OPTIMAL (0x07)
+
+/* OfflineReason defines */
+#define MPI2_PHYSDISK0_ONLINE (0x00)
+#define MPI2_PHYSDISK0_OFFLINE_MISSING (0x01)
+#define MPI2_PHYSDISK0_OFFLINE_FAILED (0x03)
+#define MPI2_PHYSDISK0_OFFLINE_INITIALIZING (0x04)
+#define MPI2_PHYSDISK0_OFFLINE_REQUESTED (0x05)
+#define MPI2_PHYSDISK0_OFFLINE_FAILED_REQUESTED (0x06)
+#define MPI2_PHYSDISK0_OFFLINE_OTHER (0xFF)
+
+/* IncompatibleReason defines */
+#define MPI2_PHYSDISK0_COMPATIBLE (0x00)
+#define MPI2_PHYSDISK0_INCOMPATIBLE_PROTOCOL (0x01)
+#define MPI2_PHYSDISK0_INCOMPATIBLE_BLOCKSIZE (0x02)
+#define MPI2_PHYSDISK0_INCOMPATIBLE_MAX_LBA (0x03)
+#define MPI2_PHYSDISK0_INCOMPATIBLE_SATA_EXTENDED_CMD (0x04)
+#define MPI2_PHYSDISK0_INCOMPATIBLE_REMOVEABLE_MEDIA (0x05)
+#define MPI2_PHYSDISK0_INCOMPATIBLE_MEDIA_TYPE (0x06)
+#define MPI2_PHYSDISK0_INCOMPATIBLE_UNKNOWN (0xFF)
+
+/* PhysDiskAttributes defines */
+#define MPI2_PHYSDISK0_ATTRIB_MEDIA_MASK (0x0C)
+#define MPI2_PHYSDISK0_ATTRIB_SOLID_STATE_DRIVE (0x08)
+#define MPI2_PHYSDISK0_ATTRIB_HARD_DISK_DRIVE (0x04)
+
+#define MPI2_PHYSDISK0_ATTRIB_PROTOCOL_MASK (0x03)
+#define MPI2_PHYSDISK0_ATTRIB_SAS_PROTOCOL (0x02)
+#define MPI2_PHYSDISK0_ATTRIB_SATA_PROTOCOL (0x01)
+
+/* PhysDiskStatusFlags defines */
+#define MPI2_PHYSDISK0_STATUS_FLAG_NOT_CERTIFIED (0x00000040)
+#define MPI2_PHYSDISK0_STATUS_FLAG_OCE_TARGET (0x00000020)
+#define MPI2_PHYSDISK0_STATUS_FLAG_WRITE_CACHE_ENABLED (0x00000010)
+#define MPI2_PHYSDISK0_STATUS_FLAG_OPTIMAL_PREVIOUS (0x00000000)
+#define MPI2_PHYSDISK0_STATUS_FLAG_NOT_OPTIMAL_PREVIOUS (0x00000008)
+#define MPI2_PHYSDISK0_STATUS_FLAG_INACTIVE_VOLUME (0x00000004)
+#define MPI2_PHYSDISK0_STATUS_FLAG_QUIESCED (0x00000002)
+#define MPI2_PHYSDISK0_STATUS_FLAG_OUT_OF_SYNC (0x00000001)
+
+
+/* RAID Physical Disk Page 1 */
+
+/*
+ * Host code (drivers, BIOS, utilities, etc.) should leave this define set to
+ * one and check the value returned for NumPhysDiskPaths at runtime.
+ */
+#ifndef MPI2_RAID_PHYS_DISK1_PATH_MAX
+#define MPI2_RAID_PHYS_DISK1_PATH_MAX (1)
+#endif
+
+typedef struct _MPI2_RAIDPHYSDISK1_PATH
+{
+ U16 DevHandle; /* 0x00 */
+ U16 Reserved1; /* 0x02 */
+ U64 WWID; /* 0x04 */
+ U64 OwnerWWID; /* 0x0C */
+ U8 OwnerIdentifier; /* 0x14 */
+ U8 Reserved2; /* 0x15 */
+ U16 Flags; /* 0x16 */
+} MPI2_RAIDPHYSDISK1_PATH, MPI2_POINTER PTR_MPI2_RAIDPHYSDISK1_PATH,
+ Mpi2RaidPhysDisk1Path_t, MPI2_POINTER pMpi2RaidPhysDisk1Path_t;
+
+/* RAID Physical Disk Page 1 Physical Disk Path Flags field defines */
+#define MPI2_RAID_PHYSDISK1_FLAG_PRIMARY (0x0004)
+#define MPI2_RAID_PHYSDISK1_FLAG_BROKEN (0x0002)
+#define MPI2_RAID_PHYSDISK1_FLAG_INVALID (0x0001)
+
+typedef struct _MPI2_CONFIG_PAGE_RD_PDISK_1
+{
+ MPI2_CONFIG_PAGE_HEADER Header; /* 0x00 */
+ U8 NumPhysDiskPaths; /* 0x04 */
+ U8 PhysDiskNum; /* 0x05 */
+ U16 Reserved1; /* 0x06 */
+ U32 Reserved2; /* 0x08 */
+ MPI2_RAIDPHYSDISK1_PATH PhysicalDiskPath[MPI2_RAID_PHYS_DISK1_PATH_MAX];/* 0x0C */
+} MPI2_CONFIG_PAGE_RD_PDISK_1,
+ MPI2_POINTER PTR_MPI2_CONFIG_PAGE_RD_PDISK_1,
+ Mpi2RaidPhysDiskPage1_t, MPI2_POINTER pMpi2RaidPhysDiskPage1_t;
+
+#define MPI2_RAIDPHYSDISKPAGE1_PAGEVERSION (0x02)
+
+
+/****************************************************************************
+* values for fields used by several types of SAS Config Pages
+****************************************************************************/
+
+/* values for NegotiatedLinkRates fields */
+#define MPI2_SAS_NEG_LINK_RATE_MASK_LOGICAL (0xF0)
+#define MPI2_SAS_NEG_LINK_RATE_SHIFT_LOGICAL (4)
+#define MPI2_SAS_NEG_LINK_RATE_MASK_PHYSICAL (0x0F)
+/* link rates used for Negotiated Physical and Logical Link Rate */
+#define MPI2_SAS_NEG_LINK_RATE_UNKNOWN_LINK_RATE (0x00)
+#define MPI2_SAS_NEG_LINK_RATE_PHY_DISABLED (0x01)
+#define MPI2_SAS_NEG_LINK_RATE_NEGOTIATION_FAILED (0x02)
+#define MPI2_SAS_NEG_LINK_RATE_SATA_OOB_COMPLETE (0x03)
+#define MPI2_SAS_NEG_LINK_RATE_PORT_SELECTOR (0x04)
+#define MPI2_SAS_NEG_LINK_RATE_SMP_RESET_IN_PROGRESS (0x05)
+#define MPI2_SAS_NEG_LINK_RATE_UNSUPPORTED_PHY (0x06)
+#define MPI2_SAS_NEG_LINK_RATE_1_5 (0x08)
+#define MPI2_SAS_NEG_LINK_RATE_3_0 (0x09)
+#define MPI2_SAS_NEG_LINK_RATE_6_0 (0x0A)
+
+
+/* values for AttachedPhyInfo fields */
+#define MPI2_SAS_APHYINFO_INSIDE_ZPSDS_PERSISTENT (0x00000040)
+#define MPI2_SAS_APHYINFO_REQUESTED_INSIDE_ZPSDS (0x00000020)
+#define MPI2_SAS_APHYINFO_BREAK_REPLY_CAPABLE (0x00000010)
+
+#define MPI2_SAS_APHYINFO_REASON_MASK (0x0000000F)
+#define MPI2_SAS_APHYINFO_REASON_UNKNOWN (0x00000000)
+#define MPI2_SAS_APHYINFO_REASON_POWER_ON (0x00000001)
+#define MPI2_SAS_APHYINFO_REASON_HARD_RESET (0x00000002)
+#define MPI2_SAS_APHYINFO_REASON_SMP_PHY_CONTROL (0x00000003)
+#define MPI2_SAS_APHYINFO_REASON_LOSS_OF_SYNC (0x00000004)
+#define MPI2_SAS_APHYINFO_REASON_MULTIPLEXING_SEQ (0x00000005)
+#define MPI2_SAS_APHYINFO_REASON_IT_NEXUS_LOSS_TIMER (0x00000006)
+#define MPI2_SAS_APHYINFO_REASON_BREAK_TIMEOUT (0x00000007)
+#define MPI2_SAS_APHYINFO_REASON_PHY_TEST_STOPPED (0x00000008)
+
+
+/* values for PhyInfo fields */
+#define MPI2_SAS_PHYINFO_PHY_VACANT (0x80000000)
+
+#define MPI2_SAS_PHYINFO_PHY_POWER_CONDITION_MASK (0x18000000)
+#define MPI2_SAS_PHYINFO_SHIFT_PHY_POWER_CONDITION (27)
+#define MPI2_SAS_PHYINFO_PHY_POWER_ACTIVE (0x00000000)
+#define MPI2_SAS_PHYINFO_PHY_POWER_PARTIAL (0x08000000)
+#define MPI2_SAS_PHYINFO_PHY_POWER_SLUMBER (0x10000000)
+
+#define MPI2_SAS_PHYINFO_CHANGED_REQ_INSIDE_ZPSDS (0x04000000)
+#define MPI2_SAS_PHYINFO_INSIDE_ZPSDS_PERSISTENT (0x02000000)
+#define MPI2_SAS_PHYINFO_REQ_INSIDE_ZPSDS (0x01000000)
+#define MPI2_SAS_PHYINFO_ZONE_GROUP_PERSISTENT (0x00400000)
+#define MPI2_SAS_PHYINFO_INSIDE_ZPSDS (0x00200000)
+#define MPI2_SAS_PHYINFO_ZONING_ENABLED (0x00100000)
+
+#define MPI2_SAS_PHYINFO_REASON_MASK (0x000F0000)
+#define MPI2_SAS_PHYINFO_REASON_UNKNOWN (0x00000000)
+#define MPI2_SAS_PHYINFO_REASON_POWER_ON (0x00010000)
+#define MPI2_SAS_PHYINFO_REASON_HARD_RESET (0x00020000)
+#define MPI2_SAS_PHYINFO_REASON_SMP_PHY_CONTROL (0x00030000)
+#define MPI2_SAS_PHYINFO_REASON_LOSS_OF_SYNC (0x00040000)
+#define MPI2_SAS_PHYINFO_REASON_MULTIPLEXING_SEQ (0x00050000)
+#define MPI2_SAS_PHYINFO_REASON_IT_NEXUS_LOSS_TIMER (0x00060000)
+#define MPI2_SAS_PHYINFO_REASON_BREAK_TIMEOUT (0x00070000)
+#define MPI2_SAS_PHYINFO_REASON_PHY_TEST_STOPPED (0x00080000)
+
+#define MPI2_SAS_PHYINFO_MULTIPLEXING_SUPPORTED (0x00008000)
+#define MPI2_SAS_PHYINFO_SATA_PORT_ACTIVE (0x00004000)
+#define MPI2_SAS_PHYINFO_SATA_PORT_SELECTOR_PRESENT (0x00002000)
+#define MPI2_SAS_PHYINFO_VIRTUAL_PHY (0x00001000)
+
+#define MPI2_SAS_PHYINFO_MASK_PARTIAL_PATHWAY_TIME (0x00000F00)
+#define MPI2_SAS_PHYINFO_SHIFT_PARTIAL_PATHWAY_TIME (8)
+
+#define MPI2_SAS_PHYINFO_MASK_ROUTING_ATTRIBUTE (0x000000F0)
+#define MPI2_SAS_PHYINFO_DIRECT_ROUTING (0x00000000)
+#define MPI2_SAS_PHYINFO_SUBTRACTIVE_ROUTING (0x00000010)
+#define MPI2_SAS_PHYINFO_TABLE_ROUTING (0x00000020)
+
+
+/* values for SAS ProgrammedLinkRate fields */
+#define MPI2_SAS_PRATE_MAX_RATE_MASK (0xF0)
+#define MPI2_SAS_PRATE_MAX_RATE_NOT_PROGRAMMABLE (0x00)
+#define MPI2_SAS_PRATE_MAX_RATE_1_5 (0x80)
+#define MPI2_SAS_PRATE_MAX_RATE_3_0 (0x90)
+#define MPI2_SAS_PRATE_MAX_RATE_6_0 (0xA0)
+#define MPI25_SAS_PRATE_MAX_RATE_12_0 (0xB0)
+#define MPI2_SAS_PRATE_MIN_RATE_MASK (0x0F)
+#define MPI2_SAS_PRATE_MIN_RATE_NOT_PROGRAMMABLE (0x00)
+#define MPI2_SAS_PRATE_MIN_RATE_1_5 (0x08)
+#define MPI2_SAS_PRATE_MIN_RATE_3_0 (0x09)
+#define MPI2_SAS_PRATE_MIN_RATE_6_0 (0x0A)
+
+
+/* values for SAS HwLinkRate fields */
+#define MPI2_SAS_HWRATE_MAX_RATE_MASK (0xF0)
+#define MPI2_SAS_HWRATE_MAX_RATE_1_5 (0x80)
+#define MPI2_SAS_HWRATE_MAX_RATE_3_0 (0x90)
+#define MPI2_SAS_HWRATE_MAX_RATE_6_0 (0xA0)
+#define MPI25_SAS_HWRATE_MAX_RATE_12_0 (0xB0)
+#define MPI2_SAS_HWRATE_MIN_RATE_MASK (0x0F)
+#define MPI2_SAS_HWRATE_MIN_RATE_1_5 (0x08)
+#define MPI2_SAS_HWRATE_MIN_RATE_3_0 (0x09)
+#define MPI2_SAS_HWRATE_MIN_RATE_6_0 (0x0A)
+
+
+
+/****************************************************************************
+* SAS IO Unit Config Pages
+****************************************************************************/
+
+/* SAS IO Unit Page 0 */
+
+typedef struct _MPI2_SAS_IO_UNIT0_PHY_DATA
+{
+ U8 Port; /* 0x00 */
+ U8 PortFlags; /* 0x01 */
+ U8 PhyFlags; /* 0x02 */
+ U8 NegotiatedLinkRate; /* 0x03 */
+ U32 ControllerPhyDeviceInfo;/* 0x04 */
+ U16 AttachedDevHandle; /* 0x08 */
+ U16 ControllerDevHandle; /* 0x0A */
+ U32 DiscoveryStatus; /* 0x0C */
+ U32 Reserved; /* 0x10 */
+} MPI2_SAS_IO_UNIT0_PHY_DATA, MPI2_POINTER PTR_MPI2_SAS_IO_UNIT0_PHY_DATA,
+ Mpi2SasIOUnit0PhyData_t, MPI2_POINTER pMpi2SasIOUnit0PhyData_t;
+
+/*
+ * Host code (drivers, BIOS, utilities, etc.) should leave this define set to
+ * one and check the value returned for NumPhys at runtime.
+ */
+#ifndef MPI2_SAS_IOUNIT0_PHY_MAX
+#define MPI2_SAS_IOUNIT0_PHY_MAX (1)
+#endif
+
+typedef struct _MPI2_CONFIG_PAGE_SASIOUNIT_0
+{
+ MPI2_CONFIG_EXTENDED_PAGE_HEADER Header; /* 0x00 */
+ U32 Reserved1; /* 0x08 */
+ U8 NumPhys; /* 0x0C */
+ U8 Reserved2; /* 0x0D */
+ U16 Reserved3; /* 0x0E */
+ MPI2_SAS_IO_UNIT0_PHY_DATA PhyData[MPI2_SAS_IOUNIT0_PHY_MAX]; /* 0x10 */
+} MPI2_CONFIG_PAGE_SASIOUNIT_0,
+ MPI2_POINTER PTR_MPI2_CONFIG_PAGE_SASIOUNIT_0,
+ Mpi2SasIOUnitPage0_t, MPI2_POINTER pMpi2SasIOUnitPage0_t;
+
+#define MPI2_SASIOUNITPAGE0_PAGEVERSION (0x05)
+
+/* values for SAS IO Unit Page 0 PortFlags */
+#define MPI2_SASIOUNIT0_PORTFLAGS_DISCOVERY_IN_PROGRESS (0x08)
+#define MPI2_SASIOUNIT0_PORTFLAGS_AUTO_PORT_CONFIG (0x01)
+
+/* values for SAS IO Unit Page 0 PhyFlags */
+#define MPI2_SASIOUNIT0_PHYFLAGS_ZONING_ENABLED (0x10)
+#define MPI2_SASIOUNIT0_PHYFLAGS_PHY_DISABLED (0x08)
+
+/* use MPI2_SAS_NEG_LINK_RATE_ defines for the NegotiatedLinkRate field */
+
+/* see mpi2_sas.h for values for SAS IO Unit Page 0 ControllerPhyDeviceInfo values */
+
+/* values for SAS IO Unit Page 0 DiscoveryStatus */
+#define MPI2_SASIOUNIT0_DS_MAX_ENCLOSURES_EXCEED (0x80000000)
+#define MPI2_SASIOUNIT0_DS_MAX_EXPANDERS_EXCEED (0x40000000)
+#define MPI2_SASIOUNIT0_DS_MAX_DEVICES_EXCEED (0x20000000)
+#define MPI2_SASIOUNIT0_DS_MAX_TOPO_PHYS_EXCEED (0x10000000)
+#define MPI2_SASIOUNIT0_DS_DOWNSTREAM_INITIATOR (0x08000000)
+#define MPI2_SASIOUNIT0_DS_MULTI_SUBTRACTIVE_SUBTRACTIVE (0x00008000)
+#define MPI2_SASIOUNIT0_DS_EXP_MULTI_SUBTRACTIVE (0x00004000)
+#define MPI2_SASIOUNIT0_DS_MULTI_PORT_DOMAIN (0x00002000)
+#define MPI2_SASIOUNIT0_DS_TABLE_TO_SUBTRACTIVE_LINK (0x00001000)
+#define MPI2_SASIOUNIT0_DS_UNSUPPORTED_DEVICE (0x00000800)
+#define MPI2_SASIOUNIT0_DS_TABLE_LINK (0x00000400)
+#define MPI2_SASIOUNIT0_DS_SUBTRACTIVE_LINK (0x00000200)
+#define MPI2_SASIOUNIT0_DS_SMP_CRC_ERROR (0x00000100)
+#define MPI2_SASIOUNIT0_DS_SMP_FUNCTION_FAILED (0x00000080)
+#define MPI2_SASIOUNIT0_DS_INDEX_NOT_EXIST (0x00000040)
+#define MPI2_SASIOUNIT0_DS_OUT_ROUTE_ENTRIES (0x00000020)
+#define MPI2_SASIOUNIT0_DS_SMP_TIMEOUT (0x00000010)
+#define MPI2_SASIOUNIT0_DS_MULTIPLE_PORTS (0x00000004)
+#define MPI2_SASIOUNIT0_DS_UNADDRESSABLE_DEVICE (0x00000002)
+#define MPI2_SASIOUNIT0_DS_LOOP_DETECTED (0x00000001)
+
+
+/* SAS IO Unit Page 1 */
+
+typedef struct _MPI2_SAS_IO_UNIT1_PHY_DATA
+{
+ U8 Port; /* 0x00 */
+ U8 PortFlags; /* 0x01 */
+ U8 PhyFlags; /* 0x02 */
+ U8 MaxMinLinkRate; /* 0x03 */
+ U32 ControllerPhyDeviceInfo; /* 0x04 */
+ U16 MaxTargetPortConnectTime; /* 0x08 */
+ U16 Reserved1; /* 0x0A */
+} MPI2_SAS_IO_UNIT1_PHY_DATA, MPI2_POINTER PTR_MPI2_SAS_IO_UNIT1_PHY_DATA,
+ Mpi2SasIOUnit1PhyData_t, MPI2_POINTER pMpi2SasIOUnit1PhyData_t;
+
+/*
+ * Host code (drivers, BIOS, utilities, etc.) should leave this define set to
+ * one and check the value returned for NumPhys at runtime.
+ */
+#ifndef MPI2_SAS_IOUNIT1_PHY_MAX
+#define MPI2_SAS_IOUNIT1_PHY_MAX (1)
+#endif
+
+typedef struct _MPI2_CONFIG_PAGE_SASIOUNIT_1
+{
+ MPI2_CONFIG_EXTENDED_PAGE_HEADER Header; /* 0x00 */
+ U16 ControlFlags; /* 0x08 */
+ U16 SASNarrowMaxQueueDepth; /* 0x0A */
+ U16 AdditionalControlFlags; /* 0x0C */
+ U16 SASWideMaxQueueDepth; /* 0x0E */
+ U8 NumPhys; /* 0x10 */
+ U8 SATAMaxQDepth; /* 0x11 */
+ U8 ReportDeviceMissingDelay; /* 0x12 */
+ U8 IODeviceMissingDelay; /* 0x13 */
+ MPI2_SAS_IO_UNIT1_PHY_DATA PhyData[MPI2_SAS_IOUNIT1_PHY_MAX]; /* 0x14 */
+} MPI2_CONFIG_PAGE_SASIOUNIT_1,
+ MPI2_POINTER PTR_MPI2_CONFIG_PAGE_SASIOUNIT_1,
+ Mpi2SasIOUnitPage1_t, MPI2_POINTER pMpi2SasIOUnitPage1_t;
+
+#define MPI2_SASIOUNITPAGE1_PAGEVERSION (0x09)
+
+/* values for SAS IO Unit Page 1 ControlFlags */
+#define MPI2_SASIOUNIT1_CONTROL_DEVICE_SELF_TEST (0x8000)
+#define MPI2_SASIOUNIT1_CONTROL_SATA_3_0_MAX (0x4000)
+#define MPI2_SASIOUNIT1_CONTROL_SATA_1_5_MAX (0x2000)
+#define MPI2_SASIOUNIT1_CONTROL_SATA_SW_PRESERVE (0x1000)
+
+#define MPI2_SASIOUNIT1_CONTROL_MASK_DEV_SUPPORT (0x0600)
+#define MPI2_SASIOUNIT1_CONTROL_SHIFT_DEV_SUPPORT (9)
+#define MPI2_SASIOUNIT1_CONTROL_DEV_SUPPORT_BOTH (0x0)
+#define MPI2_SASIOUNIT1_CONTROL_DEV_SAS_SUPPORT (0x1)
+#define MPI2_SASIOUNIT1_CONTROL_DEV_SATA_SUPPORT (0x2)
+
+#define MPI2_SASIOUNIT1_CONTROL_SATA_48BIT_LBA_REQUIRED (0x0080)
+#define MPI2_SASIOUNIT1_CONTROL_SATA_SMART_REQUIRED (0x0040)
+#define MPI2_SASIOUNIT1_CONTROL_SATA_NCQ_REQUIRED (0x0020)
+#define MPI2_SASIOUNIT1_CONTROL_SATA_FUA_REQUIRED (0x0010)
+#define MPI2_SASIOUNIT1_CONTROL_TABLE_SUBTRACTIVE_ILLEGAL (0x0008)
+#define MPI2_SASIOUNIT1_CONTROL_SUBTRACTIVE_ILLEGAL (0x0004)
+#define MPI2_SASIOUNIT1_CONTROL_FIRST_LVL_DISC_ONLY (0x0002)
+#define MPI2_SASIOUNIT1_CONTROL_CLEAR_AFFILIATION (0x0001)
+
+/* values for SAS IO Unit Page 1 AdditionalControlFlags */
+#define MPI2_SASIOUNIT1_ACONTROL_MULTI_PORT_DOMAIN_ILLEGAL (0x0080)
+#define MPI2_SASIOUNIT1_ACONTROL_SATA_ASYNCHROUNOUS_NOTIFICATION (0x0040)
+#define MPI2_SASIOUNIT1_ACONTROL_INVALID_TOPOLOGY_CORRECTION (0x0020)
+#define MPI2_SASIOUNIT1_ACONTROL_PORT_ENABLE_ONLY_SATA_LINK_RESET (0x0010)
+#define MPI2_SASIOUNIT1_ACONTROL_OTHER_AFFILIATION_SATA_LINK_RESET (0x0008)
+#define MPI2_SASIOUNIT1_ACONTROL_SELF_AFFILIATION_SATA_LINK_RESET (0x0004)
+#define MPI2_SASIOUNIT1_ACONTROL_NO_AFFILIATION_SATA_LINK_RESET (0x0002)
+#define MPI2_SASIOUNIT1_ACONTROL_ALLOW_TABLE_TO_TABLE (0x0001)
+
+/* defines for SAS IO Unit Page 1 ReportDeviceMissingDelay */
+#define MPI2_SASIOUNIT1_REPORT_MISSING_TIMEOUT_MASK (0x7F)
+#define MPI2_SASIOUNIT1_REPORT_MISSING_UNIT_16 (0x80)
+
+/* values for SAS IO Unit Page 1 PortFlags */
+#define MPI2_SASIOUNIT1_PORT_FLAGS_AUTO_PORT_CONFIG (0x01)
+
+/* values for SAS IO Unit Page 1 PhyFlags */
+#define MPI2_SASIOUNIT1_PHYFLAGS_ZONING_ENABLE (0x10)
+#define MPI2_SASIOUNIT1_PHYFLAGS_PHY_DISABLE (0x08)
+
+/* values for SAS IO Unit Page 1 MaxMinLinkRate */
+#define MPI2_SASIOUNIT1_MAX_RATE_MASK (0xF0)
+#define MPI2_SASIOUNIT1_MAX_RATE_1_5 (0x80)
+#define MPI2_SASIOUNIT1_MAX_RATE_3_0 (0x90)
+#define MPI2_SASIOUNIT1_MAX_RATE_6_0 (0xA0)
+#define MPI2_SASIOUNIT1_MIN_RATE_MASK (0x0F)
+#define MPI2_SASIOUNIT1_MIN_RATE_1_5 (0x08)
+#define MPI2_SASIOUNIT1_MIN_RATE_3_0 (0x09)
+#define MPI2_SASIOUNIT1_MIN_RATE_6_0 (0x0A)
+
+/* see mpi2_sas.h for values for SAS IO Unit Page 1 ControllerPhyDeviceInfo values */
+
+
+/* SAS IO Unit Page 4 */
+
+typedef struct _MPI2_SAS_IOUNIT4_SPINUP_GROUP
+{
+ U8 MaxTargetSpinup; /* 0x00 */
+ U8 SpinupDelay; /* 0x01 */
+ U8 SpinupFlags; /* 0x02 */
+ U8 Reserved1; /* 0x03 */
+} MPI2_SAS_IOUNIT4_SPINUP_GROUP, MPI2_POINTER PTR_MPI2_SAS_IOUNIT4_SPINUP_GROUP,
+ Mpi2SasIOUnit4SpinupGroup_t, MPI2_POINTER pMpi2SasIOUnit4SpinupGroup_t;
+
+/* defines for SAS IO Unit Page 4 SpinupFlags */
+#define MPI2_SASIOUNIT4_SPINUP_DISABLE_FLAG (0x01)
+
+/*
+ * Host code (drivers, BIOS, utilities, etc.) should leave this define set to
+ * one and check the value returned for NumPhys at runtime.
+ */
+#ifndef MPI2_SAS_IOUNIT4_PHY_MAX
+#define MPI2_SAS_IOUNIT4_PHY_MAX (4)
+#endif
+
+typedef struct _MPI2_CONFIG_PAGE_SASIOUNIT_4
+{
+ MPI2_CONFIG_EXTENDED_PAGE_HEADER Header; /* 0x00 */
+ MPI2_SAS_IOUNIT4_SPINUP_GROUP SpinupGroupParameters[4]; /* 0x08 */
+ U32 Reserved1; /* 0x18 */
+ U32 Reserved2; /* 0x1C */
+ U32 Reserved3; /* 0x20 */
+ U8 BootDeviceWaitTime; /* 0x24 */
+ U8 Reserved4; /* 0x25 */
+ U16 Reserved5; /* 0x26 */
+ U8 NumPhys; /* 0x28 */
+ U8 PEInitialSpinupDelay; /* 0x29 */
+ U8 PEReplyDelay; /* 0x2A */
+ U8 Flags; /* 0x2B */
+ U8 PHY[MPI2_SAS_IOUNIT4_PHY_MAX]; /* 0x2C */
+} MPI2_CONFIG_PAGE_SASIOUNIT_4,
+ MPI2_POINTER PTR_MPI2_CONFIG_PAGE_SASIOUNIT_4,
+ Mpi2SasIOUnitPage4_t, MPI2_POINTER pMpi2SasIOUnitPage4_t;
+
+#define MPI2_SASIOUNITPAGE4_PAGEVERSION (0x02)
+
+/* defines for Flags field */
+#define MPI2_SASIOUNIT4_FLAGS_AUTO_PORTENABLE (0x01)
+
+/* defines for PHY field */
+#define MPI2_SASIOUNIT4_PHY_SPINUP_GROUP_MASK (0x03)
+
+
+/* SAS IO Unit Page 5 */
+
+typedef struct _MPI2_SAS_IO_UNIT5_PHY_PM_SETTINGS {
+ U8 ControlFlags; /* 0x00 */
+ U8 PortWidthModGroup; /* 0x01 */
+ U16 InactivityTimerExponent; /* 0x02 */
+ U8 SATAPartialTimeout; /* 0x04 */
+ U8 Reserved2; /* 0x05 */
+ U8 SATASlumberTimeout; /* 0x06 */
+ U8 Reserved3; /* 0x07 */
+ U8 SASPartialTimeout; /* 0x08 */
+ U8 Reserved4; /* 0x09 */
+ U8 SASSlumberTimeout; /* 0x0A */
+ U8 Reserved5; /* 0x0B */
+} MPI2_SAS_IO_UNIT5_PHY_PM_SETTINGS,
+ MPI2_POINTER PTR_MPI2_SAS_IO_UNIT5_PHY_PM_SETTINGS,
+ Mpi2SasIOUnit5PhyPmSettings_t, MPI2_POINTER pMpi2SasIOUnit5PhyPmSettings_t;
+
+/* defines for ControlFlags field */
+#define MPI2_SASIOUNIT5_CONTROL_SAS_SLUMBER_ENABLE (0x08)
+#define MPI2_SASIOUNIT5_CONTROL_SAS_PARTIAL_ENABLE (0x04)
+#define MPI2_SASIOUNIT5_CONTROL_SATA_SLUMBER_ENABLE (0x02)
+#define MPI2_SASIOUNIT5_CONTROL_SATA_PARTIAL_ENABLE (0x01)
+
+/* defines for PortWidthModeGroup field */
+#define MPI2_SASIOUNIT5_PWMG_DISABLE (0xFF)
+
+/* defines for InactivityTimerExponent field */
+#define MPI2_SASIOUNIT5_ITE_MASK_SAS_SLUMBER (0x7000)
+#define MPI2_SASIOUNIT5_ITE_SHIFT_SAS_SLUMBER (12)
+#define MPI2_SASIOUNIT5_ITE_MASK_SAS_PARTIAL (0x0700)
+#define MPI2_SASIOUNIT5_ITE_SHIFT_SAS_PARTIAL (8)
+#define MPI2_SASIOUNIT5_ITE_MASK_SATA_SLUMBER (0x0070)
+#define MPI2_SASIOUNIT5_ITE_SHIFT_SATA_SLUMBER (4)
+#define MPI2_SASIOUNIT5_ITE_MASK_SATA_PARTIAL (0x0007)
+#define MPI2_SASIOUNIT5_ITE_SHIFT_SATA_PARTIAL (0)
+
+#define MPI2_SASIOUNIT5_ITE_TEN_SECONDS (7)
+#define MPI2_SASIOUNIT5_ITE_ONE_SECOND (6)
+#define MPI2_SASIOUNIT5_ITE_HUNDRED_MILLISECONDS (5)
+#define MPI2_SASIOUNIT5_ITE_TEN_MILLISECONDS (4)
+#define MPI2_SASIOUNIT5_ITE_ONE_MILLISECOND (3)
+#define MPI2_SASIOUNIT5_ITE_HUNDRED_MICROSECONDS (2)
+#define MPI2_SASIOUNIT5_ITE_TEN_MICROSECONDS (1)
+#define MPI2_SASIOUNIT5_ITE_ONE_MICROSECOND (0)
+
+/*
+ * Host code (drivers, BIOS, utilities, etc.) should leave this define set to
+ * one and check the value returned for NumPhys at runtime.
+ */
+#ifndef MPI2_SAS_IOUNIT5_PHY_MAX
+#define MPI2_SAS_IOUNIT5_PHY_MAX (1)
+#endif
+
+typedef struct _MPI2_CONFIG_PAGE_SASIOUNIT_5 {
+ MPI2_CONFIG_EXTENDED_PAGE_HEADER Header; /* 0x00 */
+ U8 NumPhys; /* 0x08 */
+ U8 Reserved1; /* 0x09 */
+ U16 Reserved2; /* 0x0A */
+ U32 Reserved3; /* 0x0C */
+ MPI2_SAS_IO_UNIT5_PHY_PM_SETTINGS SASPhyPowerManagementSettings
+ [MPI2_SAS_IOUNIT5_PHY_MAX]; /* 0x10 */
+} MPI2_CONFIG_PAGE_SASIOUNIT_5,
+ MPI2_POINTER PTR_MPI2_CONFIG_PAGE_SASIOUNIT_5,
+ Mpi2SasIOUnitPage5_t, MPI2_POINTER pMpi2SasIOUnitPage5_t;
+
+#define MPI2_SASIOUNITPAGE5_PAGEVERSION (0x01)
+
+
+/* SAS IO Unit Page 6 */
+
+typedef struct _MPI2_SAS_IO_UNIT6_PORT_WIDTH_MOD_GROUP_STATUS {
+ U8 CurrentStatus; /* 0x00 */
+ U8 CurrentModulation; /* 0x01 */
+ U8 CurrentUtilization; /* 0x02 */
+ U8 Reserved1; /* 0x03 */
+ U32 Reserved2; /* 0x04 */
+} MPI2_SAS_IO_UNIT6_PORT_WIDTH_MOD_GROUP_STATUS,
+ MPI2_POINTER PTR_MPI2_SAS_IO_UNIT6_PORT_WIDTH_MOD_GROUP_STATUS,
+ Mpi2SasIOUnit6PortWidthModGroupStatus_t,
+ MPI2_POINTER pMpi2SasIOUnit6PortWidthModGroupStatus_t;
+
+/* defines for CurrentStatus field */
+#define MPI2_SASIOUNIT6_STATUS_UNAVAILABLE (0x00)
+#define MPI2_SASIOUNIT6_STATUS_UNCONFIGURED (0x01)
+#define MPI2_SASIOUNIT6_STATUS_INVALID_CONFIG (0x02)
+#define MPI2_SASIOUNIT6_STATUS_LINK_DOWN (0x03)
+#define MPI2_SASIOUNIT6_STATUS_OBSERVATION_ONLY (0x04)
+#define MPI2_SASIOUNIT6_STATUS_INACTIVE (0x05)
+#define MPI2_SASIOUNIT6_STATUS_ACTIVE_IOUNIT (0x06)
+#define MPI2_SASIOUNIT6_STATUS_ACTIVE_HOST (0x07)
+
+/* defines for CurrentModulation field */
+#define MPI2_SASIOUNIT6_MODULATION_25_PERCENT (0x00)
+#define MPI2_SASIOUNIT6_MODULATION_50_PERCENT (0x01)
+#define MPI2_SASIOUNIT6_MODULATION_75_PERCENT (0x02)
+#define MPI2_SASIOUNIT6_MODULATION_100_PERCENT (0x03)
+
+/*
+ * Host code (drivers, BIOS, utilities, etc.) should leave this define set to
+ * one and check the value returned for NumGroups at runtime.
+ */
+#ifndef MPI2_SAS_IOUNIT6_GROUP_MAX
+#define MPI2_SAS_IOUNIT6_GROUP_MAX (1)
+#endif
+
+typedef struct _MPI2_CONFIG_PAGE_SASIOUNIT_6 {
+ MPI2_CONFIG_EXTENDED_PAGE_HEADER Header; /* 0x00 */
+ U32 Reserved1; /* 0x08 */
+ U32 Reserved2; /* 0x0C */
+ U8 NumGroups; /* 0x10 */
+ U8 Reserved3; /* 0x11 */
+ U16 Reserved4; /* 0x12 */
+ MPI2_SAS_IO_UNIT6_PORT_WIDTH_MOD_GROUP_STATUS
+ PortWidthModulationGroupStatus[MPI2_SAS_IOUNIT6_GROUP_MAX]; /* 0x14 */
+} MPI2_CONFIG_PAGE_SASIOUNIT_6,
+ MPI2_POINTER PTR_MPI2_CONFIG_PAGE_SASIOUNIT_6,
+ Mpi2SasIOUnitPage6_t, MPI2_POINTER pMpi2SasIOUnitPage6_t;
+
+#define MPI2_SASIOUNITPAGE6_PAGEVERSION (0x00)
+
+
+/* SAS IO Unit Page 7 */
+
+typedef struct _MPI2_SAS_IO_UNIT7_PORT_WIDTH_MOD_GROUP_SETTINGS {
+ U8 Flags; /* 0x00 */
+ U8 Reserved1; /* 0x01 */
+ U16 Reserved2; /* 0x02 */
+ U8 Threshold75Pct; /* 0x04 */
+ U8 Threshold50Pct; /* 0x05 */
+ U8 Threshold25Pct; /* 0x06 */
+ U8 Reserved3; /* 0x07 */
+} MPI2_SAS_IO_UNIT7_PORT_WIDTH_MOD_GROUP_SETTINGS,
+ MPI2_POINTER PTR_MPI2_SAS_IO_UNIT7_PORT_WIDTH_MOD_GROUP_SETTINGS,
+ Mpi2SasIOUnit7PortWidthModGroupSettings_t,
+ MPI2_POINTER pMpi2SasIOUnit7PortWidthModGroupSettings_t;
+
+/* defines for Flags field */
+#define MPI2_SASIOUNIT7_FLAGS_ENABLE_PORT_WIDTH_MODULATION (0x01)
+
+
+/*
+ * Host code (drivers, BIOS, utilities, etc.) should leave this define set to
+ * one and check the value returned for NumGroups at runtime.
+ */
+#ifndef MPI2_SAS_IOUNIT7_GROUP_MAX
+#define MPI2_SAS_IOUNIT7_GROUP_MAX (1)
+#endif
+
+typedef struct _MPI2_CONFIG_PAGE_SASIOUNIT_7 {
+ MPI2_CONFIG_EXTENDED_PAGE_HEADER Header; /* 0x00 */
+ U8 SamplingInterval; /* 0x08 */
+ U8 WindowLength; /* 0x09 */
+ U16 Reserved1; /* 0x0A */
+ U32 Reserved2; /* 0x0C */
+ U32 Reserved3; /* 0x10 */
+ U8 NumGroups; /* 0x14 */
+ U8 Reserved4; /* 0x15 */
+ U16 Reserved5; /* 0x16 */
+ MPI2_SAS_IO_UNIT7_PORT_WIDTH_MOD_GROUP_SETTINGS
+ PortWidthModulationGroupSettings[MPI2_SAS_IOUNIT7_GROUP_MAX]; /* 0x18 */
+} MPI2_CONFIG_PAGE_SASIOUNIT_7,
+ MPI2_POINTER PTR_MPI2_CONFIG_PAGE_SASIOUNIT_7,
+ Mpi2SasIOUnitPage7_t, MPI2_POINTER pMpi2SasIOUnitPage7_t;
+
+#define MPI2_SASIOUNITPAGE7_PAGEVERSION (0x00)
+
+
+/* SAS IO Unit Page 8 */
+
+typedef struct _MPI2_CONFIG_PAGE_SASIOUNIT_8 {
+ MPI2_CONFIG_EXTENDED_PAGE_HEADER Header; /* 0x00 */
+ U32 Reserved1; /* 0x08 */
+ U32 PowerManagementCapabilities;/* 0x0C */
+ U32 Reserved2; /* 0x10 */
+} MPI2_CONFIG_PAGE_SASIOUNIT_8,
+ MPI2_POINTER PTR_MPI2_CONFIG_PAGE_SASIOUNIT_8,
+ Mpi2SasIOUnitPage8_t, MPI2_POINTER pMpi2SasIOUnitPage8_t;
+
+#define MPI2_SASIOUNITPAGE8_PAGEVERSION (0x00)
+
+/* defines for PowerManagementCapabilities field */
+#define MPI2_SASIOUNIT8_PM_HOST_PORT_WIDTH_MOD (0x00001000)
+#define MPI2_SASIOUNIT8_PM_HOST_SAS_SLUMBER_MODE (0x00000800)
+#define MPI2_SASIOUNIT8_PM_HOST_SAS_PARTIAL_MODE (0x00000400)
+#define MPI2_SASIOUNIT8_PM_HOST_SATA_SLUMBER_MODE (0x00000200)
+#define MPI2_SASIOUNIT8_PM_HOST_SATA_PARTIAL_MODE (0x00000100)
+#define MPI2_SASIOUNIT8_PM_IOUNIT_PORT_WIDTH_MOD (0x00000010)
+#define MPI2_SASIOUNIT8_PM_IOUNIT_SAS_SLUMBER_MODE (0x00000008)
+#define MPI2_SASIOUNIT8_PM_IOUNIT_SAS_PARTIAL_MODE (0x00000004)
+#define MPI2_SASIOUNIT8_PM_IOUNIT_SATA_SLUMBER_MODE (0x00000002)
+#define MPI2_SASIOUNIT8_PM_IOUNIT_SATA_PARTIAL_MODE (0x00000001)
+
+
+
+/* SAS IO Unit Page 16 */
+
+typedef struct _MPI2_CONFIG_PAGE_SASIOUNIT16 {
+ MPI2_CONFIG_EXTENDED_PAGE_HEADER Header; /* 0x00 */
+ U64 TimeStamp; /* 0x08 */
+ U32 Reserved1; /* 0x10 */
+ U32 Reserved2; /* 0x14 */
+ U32 FastPathPendedRequests; /* 0x18 */
+ U32 FastPathUnPendedRequests; /* 0x1C */
+ U32 FastPathHostRequestStarts; /* 0x20 */
+ U32 FastPathFirmwareRequestStarts; /* 0x24 */
+ U32 FastPathHostCompletions; /* 0x28 */
+ U32 FastPathFirmwareCompletions; /* 0x2C */
+ U32 NonFastPathRequestStarts; /* 0x30 */
+ U32 NonFastPathHostCompletions; /* 0x30 */
+} MPI2_CONFIG_PAGE_SASIOUNIT16,
+MPI2_POINTER PTR_MPI2_CONFIG_PAGE_SASIOUNIT16,
+Mpi2SasIOUnitPage16_t, MPI2_POINTER pMpi2SasIOUnitPage16_t;
+
+#define MPI2_SASIOUNITPAGE16_PAGEVERSION (0x00)
+
+
+/****************************************************************************
+* SAS Expander Config Pages
+****************************************************************************/
+
+/* SAS Expander Page 0 */
+
+typedef struct _MPI2_CONFIG_PAGE_EXPANDER_0
+{
+ MPI2_CONFIG_EXTENDED_PAGE_HEADER Header; /* 0x00 */
+ U8 PhysicalPort; /* 0x08 */
+ U8 ReportGenLength; /* 0x09 */
+ U16 EnclosureHandle; /* 0x0A */
+ U64 SASAddress; /* 0x0C */
+ U32 DiscoveryStatus; /* 0x14 */
+ U16 DevHandle; /* 0x18 */
+ U16 ParentDevHandle; /* 0x1A */
+ U16 ExpanderChangeCount; /* 0x1C */
+ U16 ExpanderRouteIndexes; /* 0x1E */
+ U8 NumPhys; /* 0x20 */
+ U8 SASLevel; /* 0x21 */
+ U16 Flags; /* 0x22 */
+ U16 STPBusInactivityTimeLimit; /* 0x24 */
+ U16 STPMaxConnectTimeLimit; /* 0x26 */
+ U16 STP_SMP_NexusLossTime; /* 0x28 */
+ U16 MaxNumRoutedSasAddresses; /* 0x2A */
+ U64 ActiveZoneManagerSASAddress;/* 0x2C */
+ U16 ZoneLockInactivityLimit; /* 0x34 */
+ U16 Reserved1; /* 0x36 */
+ U8 TimeToReducedFunc; /* 0x38 */
+ U8 InitialTimeToReducedFunc; /* 0x39 */
+ U8 MaxReducedFuncTime; /* 0x3A */
+ U8 Reserved2; /* 0x3B */
+} MPI2_CONFIG_PAGE_EXPANDER_0, MPI2_POINTER PTR_MPI2_CONFIG_PAGE_EXPANDER_0,
+ Mpi2ExpanderPage0_t, MPI2_POINTER pMpi2ExpanderPage0_t;
+
+#define MPI2_SASEXPANDER0_PAGEVERSION (0x06)
+
+/* values for SAS Expander Page 0 DiscoveryStatus field */
+#define MPI2_SAS_EXPANDER0_DS_MAX_ENCLOSURES_EXCEED (0x80000000)
+#define MPI2_SAS_EXPANDER0_DS_MAX_EXPANDERS_EXCEED (0x40000000)
+#define MPI2_SAS_EXPANDER0_DS_MAX_DEVICES_EXCEED (0x20000000)
+#define MPI2_SAS_EXPANDER0_DS_MAX_TOPO_PHYS_EXCEED (0x10000000)
+#define MPI2_SAS_EXPANDER0_DS_DOWNSTREAM_INITIATOR (0x08000000)
+#define MPI2_SAS_EXPANDER0_DS_MULTI_SUBTRACTIVE_SUBTRACTIVE (0x00008000)
+#define MPI2_SAS_EXPANDER0_DS_EXP_MULTI_SUBTRACTIVE (0x00004000)
+#define MPI2_SAS_EXPANDER0_DS_MULTI_PORT_DOMAIN (0x00002000)
+#define MPI2_SAS_EXPANDER0_DS_TABLE_TO_SUBTRACTIVE_LINK (0x00001000)
+#define MPI2_SAS_EXPANDER0_DS_UNSUPPORTED_DEVICE (0x00000800)
+#define MPI2_SAS_EXPANDER0_DS_TABLE_LINK (0x00000400)
+#define MPI2_SAS_EXPANDER0_DS_SUBTRACTIVE_LINK (0x00000200)
+#define MPI2_SAS_EXPANDER0_DS_SMP_CRC_ERROR (0x00000100)
+#define MPI2_SAS_EXPANDER0_DS_SMP_FUNCTION_FAILED (0x00000080)
+#define MPI2_SAS_EXPANDER0_DS_INDEX_NOT_EXIST (0x00000040)
+#define MPI2_SAS_EXPANDER0_DS_OUT_ROUTE_ENTRIES (0x00000020)
+#define MPI2_SAS_EXPANDER0_DS_SMP_TIMEOUT (0x00000010)
+#define MPI2_SAS_EXPANDER0_DS_MULTIPLE_PORTS (0x00000004)
+#define MPI2_SAS_EXPANDER0_DS_UNADDRESSABLE_DEVICE (0x00000002)
+#define MPI2_SAS_EXPANDER0_DS_LOOP_DETECTED (0x00000001)
+
+/* values for SAS Expander Page 0 Flags field */
+#define MPI2_SAS_EXPANDER0_FLAGS_REDUCED_FUNCTIONALITY (0x2000)
+#define MPI2_SAS_EXPANDER0_FLAGS_ZONE_LOCKED (0x1000)
+#define MPI2_SAS_EXPANDER0_FLAGS_SUPPORTED_PHYSICAL_PRES (0x0800)
+#define MPI2_SAS_EXPANDER0_FLAGS_ASSERTED_PHYSICAL_PRES (0x0400)
+#define MPI2_SAS_EXPANDER0_FLAGS_ZONING_SUPPORT (0x0200)
+#define MPI2_SAS_EXPANDER0_FLAGS_ENABLED_ZONING (0x0100)
+#define MPI2_SAS_EXPANDER0_FLAGS_TABLE_TO_TABLE_SUPPORT (0x0080)
+#define MPI2_SAS_EXPANDER0_FLAGS_CONNECTOR_END_DEVICE (0x0010)
+#define MPI2_SAS_EXPANDER0_FLAGS_OTHERS_CONFIG (0x0004)
+#define MPI2_SAS_EXPANDER0_FLAGS_CONFIG_IN_PROGRESS (0x0002)
+#define MPI2_SAS_EXPANDER0_FLAGS_ROUTE_TABLE_CONFIG (0x0001)
+
+
+/* SAS Expander Page 1 */
+
+typedef struct _MPI2_CONFIG_PAGE_EXPANDER_1
+{
+ MPI2_CONFIG_EXTENDED_PAGE_HEADER Header; /* 0x00 */
+ U8 PhysicalPort; /* 0x08 */
+ U8 Reserved1; /* 0x09 */
+ U16 Reserved2; /* 0x0A */
+ U8 NumPhys; /* 0x0C */
+ U8 Phy; /* 0x0D */
+ U16 NumTableEntriesProgrammed; /* 0x0E */
+ U8 ProgrammedLinkRate; /* 0x10 */
+ U8 HwLinkRate; /* 0x11 */
+ U16 AttachedDevHandle; /* 0x12 */
+ U32 PhyInfo; /* 0x14 */
+ U32 AttachedDeviceInfo; /* 0x18 */
+ U16 ExpanderDevHandle; /* 0x1C */
+ U8 ChangeCount; /* 0x1E */
+ U8 NegotiatedLinkRate; /* 0x1F */
+ U8 PhyIdentifier; /* 0x20 */
+ U8 AttachedPhyIdentifier; /* 0x21 */
+ U8 Reserved3; /* 0x22 */
+ U8 DiscoveryInfo; /* 0x23 */
+ U32 AttachedPhyInfo; /* 0x24 */
+ U8 ZoneGroup; /* 0x28 */
+ U8 SelfConfigStatus; /* 0x29 */
+ U16 Reserved4; /* 0x2A */
+} MPI2_CONFIG_PAGE_EXPANDER_1, MPI2_POINTER PTR_MPI2_CONFIG_PAGE_EXPANDER_1,
+ Mpi2ExpanderPage1_t, MPI2_POINTER pMpi2ExpanderPage1_t;
+
+#define MPI2_SASEXPANDER1_PAGEVERSION (0x02)
+
+/* use MPI2_SAS_PRATE_ defines for the ProgrammedLinkRate field */
+
+/* use MPI2_SAS_HWRATE_ defines for the HwLinkRate field */
+
+/* use MPI2_SAS_PHYINFO_ for the PhyInfo field */
+
+/* see mpi2_sas.h for the MPI2_SAS_DEVICE_INFO_ defines used for the AttachedDeviceInfo field */
+
+/* use MPI2_SAS_NEG_LINK_RATE_ defines for the NegotiatedLinkRate field */
+
+/* values for SAS Expander Page 1 DiscoveryInfo field */
+#define MPI2_SAS_EXPANDER1_DISCINFO_BAD_PHY_DISABLED (0x04)
+#define MPI2_SAS_EXPANDER1_DISCINFO_LINK_STATUS_CHANGE (0x02)
+#define MPI2_SAS_EXPANDER1_DISCINFO_NO_ROUTING_ENTRIES (0x01)
+
+/* use MPI2_SAS_APHYINFO_ defines for AttachedPhyInfo field */
+
+/****************************************************************************
+* SAS Device Config Pages
+****************************************************************************/
+
+/* SAS Device Page 0 */
+
+typedef struct _MPI2_CONFIG_PAGE_SAS_DEV_0
+{
+ MPI2_CONFIG_EXTENDED_PAGE_HEADER Header; /* 0x00 */
+ U16 Slot; /* 0x08 */
+ U16 EnclosureHandle; /* 0x0A */
+ U64 SASAddress; /* 0x0C */
+ U16 ParentDevHandle; /* 0x14 */
+ U8 PhyNum; /* 0x16 */
+ U8 AccessStatus; /* 0x17 */
+ U16 DevHandle; /* 0x18 */
+ U8 AttachedPhyIdentifier; /* 0x1A */
+ U8 ZoneGroup; /* 0x1B */
+ U32 DeviceInfo; /* 0x1C */
+ U16 Flags; /* 0x20 */
+ U8 PhysicalPort; /* 0x22 */
+ U8 MaxPortConnections; /* 0x23 */
+ U64 DeviceName; /* 0x24 */
+ U8 PortGroups; /* 0x2C */
+ U8 DmaGroup; /* 0x2D */
+ U8 ControlGroup; /* 0x2E */
+ U8 EnclosureLevel; /* 0x2F */
+ U8 ConnectorName[4]; /* 0x30 */
+ U32 Reserved3; /* 0x34 */
+} MPI2_CONFIG_PAGE_SAS_DEV_0, MPI2_POINTER PTR_MPI2_CONFIG_PAGE_SAS_DEV_0,
+ Mpi2SasDevicePage0_t, MPI2_POINTER pMpi2SasDevicePage0_t;
+
+#define MPI2_SASDEVICE0_PAGEVERSION (0x09)
+
+/* values for SAS Device Page 0 AccessStatus field */
+#define MPI2_SAS_DEVICE0_ASTATUS_NO_ERRORS (0x00)
+#define MPI2_SAS_DEVICE0_ASTATUS_SATA_INIT_FAILED (0x01)
+#define MPI2_SAS_DEVICE0_ASTATUS_SATA_CAPABILITY_FAILED (0x02)
+#define MPI2_SAS_DEVICE0_ASTATUS_SATA_AFFILIATION_CONFLICT (0x03)
+#define MPI2_SAS_DEVICE0_ASTATUS_SATA_NEEDS_INITIALIZATION (0x04)
+#define MPI2_SAS_DEVICE0_ASTATUS_ROUTE_NOT_ADDRESSABLE (0x05)
+#define MPI2_SAS_DEVICE0_ASTATUS_SMP_ERROR_NOT_ADDRESSABLE (0x06)
+#define MPI2_SAS_DEVICE0_ASTATUS_DEVICE_BLOCKED (0x07)
+/* specific values for SATA Init failures */
+#define MPI2_SAS_DEVICE0_ASTATUS_SIF_UNKNOWN (0x10)
+#define MPI2_SAS_DEVICE0_ASTATUS_SIF_AFFILIATION_CONFLICT (0x11)
+#define MPI2_SAS_DEVICE0_ASTATUS_SIF_DIAG (0x12)
+#define MPI2_SAS_DEVICE0_ASTATUS_SIF_IDENTIFICATION (0x13)
+#define MPI2_SAS_DEVICE0_ASTATUS_SIF_CHECK_POWER (0x14)
+#define MPI2_SAS_DEVICE0_ASTATUS_SIF_PIO_SN (0x15)
+#define MPI2_SAS_DEVICE0_ASTATUS_SIF_MDMA_SN (0x16)
+#define MPI2_SAS_DEVICE0_ASTATUS_SIF_UDMA_SN (0x17)
+#define MPI2_SAS_DEVICE0_ASTATUS_SIF_ZONING_VIOLATION (0x18)
+#define MPI2_SAS_DEVICE0_ASTATUS_SIF_NOT_ADDRESSABLE (0x19)
+#define MPI2_SAS_DEVICE0_ASTATUS_SIF_MAX (0x1F)
+
+/* see mpi2_sas.h for values for SAS Device Page 0 DeviceInfo values */
+
+/* values for SAS Device Page 0 Flags field */
+#define MPI2_SAS_DEVICE0_FLAGS_UNAUTHORIZED_DEVICE (0x8000)
+#define MPI2_SAS_DEVICE0_FLAGS_SLUMBER_PM_CAPABLE (0x1000)
+#define MPI2_SAS_DEVICE0_FLAGS_PARTIAL_PM_CAPABLE (0x0800)
+#define MPI2_SAS_DEVICE0_FLAGS_SATA_ASYNCHRONOUS_NOTIFY (0x0400)
+#define MPI2_SAS_DEVICE0_FLAGS_SATA_SW_PRESERVE (0x0200)
+#define MPI2_SAS_DEVICE0_FLAGS_UNSUPPORTED_DEVICE (0x0100)
+#define MPI2_SAS_DEVICE0_FLAGS_SATA_48BIT_LBA_SUPPORTED (0x0080)
+#define MPI2_SAS_DEVICE0_FLAGS_SATA_SMART_SUPPORTED (0x0040)
+#define MPI2_SAS_DEVICE0_FLAGS_SATA_NCQ_SUPPORTED (0x0020)
+#define MPI2_SAS_DEVICE0_FLAGS_SATA_FUA_SUPPORTED (0x0010)
+#define MPI2_SAS_DEVICE0_FLAGS_PORT_SELECTOR_ATTACH (0x0008)
+#define MPI2_SAS_DEVICE0_FLAGS_ENCL_LEVEL_VALID (0x0002)
+#define MPI2_SAS_DEVICE0_FLAGS_DEVICE_PRESENT (0x0001)
+
+
+/* SAS Device Page 1 */
+
+typedef struct _MPI2_CONFIG_PAGE_SAS_DEV_1
+{
+ MPI2_CONFIG_EXTENDED_PAGE_HEADER Header; /* 0x00 */
+ U32 Reserved1; /* 0x08 */
+ U64 SASAddress; /* 0x0C */
+ U32 Reserved2; /* 0x14 */
+ U16 DevHandle; /* 0x18 */
+ U16 Reserved3; /* 0x1A */
+ U8 InitialRegDeviceFIS[20];/* 0x1C */
+} MPI2_CONFIG_PAGE_SAS_DEV_1, MPI2_POINTER PTR_MPI2_CONFIG_PAGE_SAS_DEV_1,
+ Mpi2SasDevicePage1_t, MPI2_POINTER pMpi2SasDevicePage1_t;
+
+#define MPI2_SASDEVICE1_PAGEVERSION (0x01)
+
+
+/****************************************************************************
+* SAS PHY Config Pages
+****************************************************************************/
+
+/* SAS PHY Page 0 */
+
+typedef struct _MPI2_CONFIG_PAGE_SAS_PHY_0
+{
+ MPI2_CONFIG_EXTENDED_PAGE_HEADER Header; /* 0x00 */
+ U16 OwnerDevHandle; /* 0x08 */
+ U16 Reserved1; /* 0x0A */
+ U16 AttachedDevHandle; /* 0x0C */
+ U8 AttachedPhyIdentifier; /* 0x0E */
+ U8 Reserved2; /* 0x0F */
+ U32 AttachedPhyInfo; /* 0x10 */
+ U8 ProgrammedLinkRate; /* 0x14 */
+ U8 HwLinkRate; /* 0x15 */
+ U8 ChangeCount; /* 0x16 */
+ U8 Flags; /* 0x17 */
+ U32 PhyInfo; /* 0x18 */
+ U8 NegotiatedLinkRate; /* 0x1C */
+ U8 Reserved3; /* 0x1D */
+ U16 Reserved4; /* 0x1E */
+} MPI2_CONFIG_PAGE_SAS_PHY_0, MPI2_POINTER PTR_MPI2_CONFIG_PAGE_SAS_PHY_0,
+ Mpi2SasPhyPage0_t, MPI2_POINTER pMpi2SasPhyPage0_t;
+
+#define MPI2_SASPHY0_PAGEVERSION (0x03)
+
+/* use MPI2_SAS_APHYINFO_ defines for AttachedPhyInfo field */
+
+/* use MPI2_SAS_PRATE_ defines for the ProgrammedLinkRate field */
+
+/* use MPI2_SAS_HWRATE_ defines for the HwLinkRate field */
+
+/* values for SAS PHY Page 0 Flags field */
+#define MPI2_SAS_PHY0_FLAGS_SGPIO_DIRECT_ATTACH_ENC (0x01)
+
+/* use MPI2_SAS_PHYINFO_ for the PhyInfo field */
+
+/* use MPI2_SAS_NEG_LINK_RATE_ defines for the NegotiatedLinkRate field */
+
+
+/* SAS PHY Page 1 */
+
+typedef struct _MPI2_CONFIG_PAGE_SAS_PHY_1
+{
+ MPI2_CONFIG_EXTENDED_PAGE_HEADER Header; /* 0x00 */
+ U32 Reserved1; /* 0x08 */
+ U32 InvalidDwordCount; /* 0x0C */
+ U32 RunningDisparityErrorCount; /* 0x10 */
+ U32 LossDwordSynchCount; /* 0x14 */
+ U32 PhyResetProblemCount; /* 0x18 */
+} MPI2_CONFIG_PAGE_SAS_PHY_1, MPI2_POINTER PTR_MPI2_CONFIG_PAGE_SAS_PHY_1,
+ Mpi2SasPhyPage1_t, MPI2_POINTER pMpi2SasPhyPage1_t;
+
+#define MPI2_SASPHY1_PAGEVERSION (0x01)
+
+
+/* SAS PHY Page 2 */
+
+typedef struct _MPI2_SASPHY2_PHY_EVENT {
+ U8 PhyEventCode; /* 0x00 */
+ U8 Reserved1; /* 0x01 */
+ U16 Reserved2; /* 0x02 */
+ U32 PhyEventInfo; /* 0x04 */
+} MPI2_SASPHY2_PHY_EVENT, MPI2_POINTER PTR_MPI2_SASPHY2_PHY_EVENT,
+ Mpi2SasPhy2PhyEvent_t, MPI2_POINTER pMpi2SasPhy2PhyEvent_t;
+
+/* use MPI2_SASPHY3_EVENT_CODE_ for the PhyEventCode field */
+
+
+/*
+ * Host code (drivers, BIOS, utilities, etc.) should leave this define set to
+ * one and check the value returned for NumPhyEvents at runtime.
+ */
+#ifndef MPI2_SASPHY2_PHY_EVENT_MAX
+#define MPI2_SASPHY2_PHY_EVENT_MAX (1)
+#endif
+
+typedef struct _MPI2_CONFIG_PAGE_SAS_PHY_2 {
+ MPI2_CONFIG_EXTENDED_PAGE_HEADER Header; /* 0x00 */
+ U32 Reserved1; /* 0x08 */
+ U8 NumPhyEvents; /* 0x0C */
+ U8 Reserved2; /* 0x0D */
+ U16 Reserved3; /* 0x0E */
+ MPI2_SASPHY2_PHY_EVENT PhyEvent[MPI2_SASPHY2_PHY_EVENT_MAX];
+ /* 0x10 */
+} MPI2_CONFIG_PAGE_SAS_PHY_2, MPI2_POINTER PTR_MPI2_CONFIG_PAGE_SAS_PHY_2,
+ Mpi2SasPhyPage2_t, MPI2_POINTER pMpi2SasPhyPage2_t;
+
+#define MPI2_SASPHY2_PAGEVERSION (0x00)
+
+
+/* SAS PHY Page 3 */
+
+typedef struct _MPI2_SASPHY3_PHY_EVENT_CONFIG {
+ U8 PhyEventCode; /* 0x00 */
+ U8 Reserved1; /* 0x01 */
+ U16 Reserved2; /* 0x02 */
+ U8 CounterType; /* 0x04 */
+ U8 ThresholdWindow; /* 0x05 */
+ U8 TimeUnits; /* 0x06 */
+ U8 Reserved3; /* 0x07 */
+ U32 EventThreshold; /* 0x08 */
+ U16 ThresholdFlags; /* 0x0C */
+ U16 Reserved4; /* 0x0E */
+} MPI2_SASPHY3_PHY_EVENT_CONFIG, MPI2_POINTER PTR_MPI2_SASPHY3_PHY_EVENT_CONFIG,
+ Mpi2SasPhy3PhyEventConfig_t, MPI2_POINTER pMpi2SasPhy3PhyEventConfig_t;
+
+/* values for PhyEventCode field */
+#define MPI2_SASPHY3_EVENT_CODE_NO_EVENT (0x00)
+#define MPI2_SASPHY3_EVENT_CODE_INVALID_DWORD (0x01)
+#define MPI2_SASPHY3_EVENT_CODE_RUNNING_DISPARITY_ERROR (0x02)
+#define MPI2_SASPHY3_EVENT_CODE_LOSS_DWORD_SYNC (0x03)
+#define MPI2_SASPHY3_EVENT_CODE_PHY_RESET_PROBLEM (0x04)
+#define MPI2_SASPHY3_EVENT_CODE_ELASTICITY_BUF_OVERFLOW (0x05)
+#define MPI2_SASPHY3_EVENT_CODE_RX_ERROR (0x06)
+#define MPI2_SASPHY3_EVENT_CODE_RX_ADDR_FRAME_ERROR (0x20)
+#define MPI2_SASPHY3_EVENT_CODE_TX_AC_OPEN_REJECT (0x21)
+#define MPI2_SASPHY3_EVENT_CODE_RX_AC_OPEN_REJECT (0x22)
+#define MPI2_SASPHY3_EVENT_CODE_TX_RC_OPEN_REJECT (0x23)
+#define MPI2_SASPHY3_EVENT_CODE_RX_RC_OPEN_REJECT (0x24)
+#define MPI2_SASPHY3_EVENT_CODE_RX_AIP_PARTIAL_WAITING_ON (0x25)
+#define MPI2_SASPHY3_EVENT_CODE_RX_AIP_CONNECT_WAITING_ON (0x26)
+#define MPI2_SASPHY3_EVENT_CODE_TX_BREAK (0x27)
+#define MPI2_SASPHY3_EVENT_CODE_RX_BREAK (0x28)
+#define MPI2_SASPHY3_EVENT_CODE_BREAK_TIMEOUT (0x29)
+#define MPI2_SASPHY3_EVENT_CODE_CONNECTION (0x2A)
+#define MPI2_SASPHY3_EVENT_CODE_PEAKTX_PATHWAY_BLOCKED (0x2B)
+#define MPI2_SASPHY3_EVENT_CODE_PEAKTX_ARB_WAIT_TIME (0x2C)
+#define MPI2_SASPHY3_EVENT_CODE_PEAK_ARB_WAIT_TIME (0x2D)
+#define MPI2_SASPHY3_EVENT_CODE_PEAK_CONNECT_TIME (0x2E)
+#define MPI2_SASPHY3_EVENT_CODE_TX_SSP_FRAMES (0x40)
+#define MPI2_SASPHY3_EVENT_CODE_RX_SSP_FRAMES (0x41)
+#define MPI2_SASPHY3_EVENT_CODE_TX_SSP_ERROR_FRAMES (0x42)
+#define MPI2_SASPHY3_EVENT_CODE_RX_SSP_ERROR_FRAMES (0x43)
+#define MPI2_SASPHY3_EVENT_CODE_TX_CREDIT_BLOCKED (0x44)
+#define MPI2_SASPHY3_EVENT_CODE_RX_CREDIT_BLOCKED (0x45)
+#define MPI2_SASPHY3_EVENT_CODE_TX_SATA_FRAMES (0x50)
+#define MPI2_SASPHY3_EVENT_CODE_RX_SATA_FRAMES (0x51)
+#define MPI2_SASPHY3_EVENT_CODE_SATA_OVERFLOW (0x52)
+#define MPI2_SASPHY3_EVENT_CODE_TX_SMP_FRAMES (0x60)
+#define MPI2_SASPHY3_EVENT_CODE_RX_SMP_FRAMES (0x61)
+#define MPI2_SASPHY3_EVENT_CODE_RX_SMP_ERROR_FRAMES (0x63)
+#define MPI2_SASPHY3_EVENT_CODE_HOTPLUG_TIMEOUT (0xD0)
+#define MPI2_SASPHY3_EVENT_CODE_MISALIGNED_MUX_PRIMITIVE (0xD1)
+#define MPI2_SASPHY3_EVENT_CODE_RX_AIP (0xD2)
+
+/* values for the CounterType field */
+#define MPI2_SASPHY3_COUNTER_TYPE_WRAPPING (0x00)
+#define MPI2_SASPHY3_COUNTER_TYPE_SATURATING (0x01)
+#define MPI2_SASPHY3_COUNTER_TYPE_PEAK_VALUE (0x02)
+
+/* values for the TimeUnits field */
+#define MPI2_SASPHY3_TIME_UNITS_10_MICROSECONDS (0x00)
+#define MPI2_SASPHY3_TIME_UNITS_100_MICROSECONDS (0x01)
+#define MPI2_SASPHY3_TIME_UNITS_1_MILLISECOND (0x02)
+#define MPI2_SASPHY3_TIME_UNITS_10_MILLISECONDS (0x03)
+
+/* values for the ThresholdFlags field */
+#define MPI2_SASPHY3_TFLAGS_PHY_RESET (0x0002)
+#define MPI2_SASPHY3_TFLAGS_EVENT_NOTIFY (0x0001)
+
+/*
+ * Host code (drivers, BIOS, utilities, etc.) should leave this define set to
+ * one and check the value returned for NumPhyEvents at runtime.
+ */
+#ifndef MPI2_SASPHY3_PHY_EVENT_MAX
+#define MPI2_SASPHY3_PHY_EVENT_MAX (1)
+#endif
+
+typedef struct _MPI2_CONFIG_PAGE_SAS_PHY_3 {
+ MPI2_CONFIG_EXTENDED_PAGE_HEADER Header; /* 0x00 */
+ U32 Reserved1; /* 0x08 */
+ U8 NumPhyEvents; /* 0x0C */
+ U8 Reserved2; /* 0x0D */
+ U16 Reserved3; /* 0x0E */
+ MPI2_SASPHY3_PHY_EVENT_CONFIG PhyEventConfig
+ [MPI2_SASPHY3_PHY_EVENT_MAX]; /* 0x10 */
+} MPI2_CONFIG_PAGE_SAS_PHY_3, MPI2_POINTER PTR_MPI2_CONFIG_PAGE_SAS_PHY_3,
+ Mpi2SasPhyPage3_t, MPI2_POINTER pMpi2SasPhyPage3_t;
+
+#define MPI2_SASPHY3_PAGEVERSION (0x00)
+
+
+/* SAS PHY Page 4 */
+
+typedef struct _MPI2_CONFIG_PAGE_SAS_PHY_4 {
+ MPI2_CONFIG_EXTENDED_PAGE_HEADER Header; /* 0x00 */
+ U16 Reserved1; /* 0x08 */
+ U8 Reserved2; /* 0x0A */
+ U8 Flags; /* 0x0B */
+ U8 InitialFrame[28]; /* 0x0C */
+} MPI2_CONFIG_PAGE_SAS_PHY_4, MPI2_POINTER PTR_MPI2_CONFIG_PAGE_SAS_PHY_4,
+ Mpi2SasPhyPage4_t, MPI2_POINTER pMpi2SasPhyPage4_t;
+
+#define MPI2_SASPHY4_PAGEVERSION (0x00)
+
+/* values for the Flags field */
+#define MPI2_SASPHY4_FLAGS_FRAME_VALID (0x02)
+#define MPI2_SASPHY4_FLAGS_SATA_FRAME (0x01)
+
+
+
+
+/****************************************************************************
+* SAS Port Config Pages
+****************************************************************************/
+
+/* SAS Port Page 0 */
+
+typedef struct _MPI2_CONFIG_PAGE_SAS_PORT_0
+{
+ MPI2_CONFIG_EXTENDED_PAGE_HEADER Header; /* 0x00 */
+ U8 PortNumber; /* 0x08 */
+ U8 PhysicalPort; /* 0x09 */
+ U8 PortWidth; /* 0x0A */
+ U8 PhysicalPortWidth; /* 0x0B */
+ U8 ZoneGroup; /* 0x0C */
+ U8 Reserved1; /* 0x0D */
+ U16 Reserved2; /* 0x0E */
+ U64 SASAddress; /* 0x10 */
+ U32 DeviceInfo; /* 0x18 */
+ U32 Reserved3; /* 0x1C */
+ U32 Reserved4; /* 0x20 */
+} MPI2_CONFIG_PAGE_SAS_PORT_0, MPI2_POINTER PTR_MPI2_CONFIG_PAGE_SAS_PORT_0,
+ Mpi2SasPortPage0_t, MPI2_POINTER pMpi2SasPortPage0_t;
+
+#define MPI2_SASPORT0_PAGEVERSION (0x00)
+
+/* see mpi2_sas.h for values for SAS Port Page 0 DeviceInfo values */
+
+
+/****************************************************************************
+* SAS Enclosure Config Pages
+****************************************************************************/
+
+/* SAS Enclosure Page 0 */
+
+typedef struct _MPI2_CONFIG_PAGE_SAS_ENCLOSURE_0
+{
+ MPI2_CONFIG_EXTENDED_PAGE_HEADER Header; /* 0x00 */
+ U32 Reserved1; /* 0x08 */
+ U64 EnclosureLogicalID; /* 0x0C */
+ U16 Flags; /* 0x14 */
+ U16 EnclosureHandle; /* 0x16 */
+ U16 NumSlots; /* 0x18 */
+ U16 StartSlot; /* 0x1A */
+ U8 Reserved2; /* 0x1C */
+ U8 EnclosureLevel; /* 0x1D */
+ U16 SEPDevHandle; /* 0x1E */
+ U32 Reserved3; /* 0x20 */
+ U32 Reserved4; /* 0x24 */
+} MPI2_CONFIG_PAGE_SAS_ENCLOSURE_0,
+ MPI2_POINTER PTR_MPI2_CONFIG_PAGE_SAS_ENCLOSURE_0,
+ Mpi2SasEnclosurePage0_t, MPI2_POINTER pMpi2SasEnclosurePage0_t;
+
+#define MPI2_SASENCLOSURE0_PAGEVERSION (0x04)
+
+/* values for SAS Enclosure Page 0 Flags field */
+#define MPI2_SAS_ENCLS0_FLAGS_ENCL_LEVEL_VALID (0x0010)
+#define MPI2_SAS_ENCLS0_FLAGS_MNG_MASK (0x000F)
+#define MPI2_SAS_ENCLS0_FLAGS_MNG_UNKNOWN (0x0000)
+#define MPI2_SAS_ENCLS0_FLAGS_MNG_IOC_SES (0x0001)
+#define MPI2_SAS_ENCLS0_FLAGS_MNG_IOC_SGPIO (0x0002)
+#define MPI2_SAS_ENCLS0_FLAGS_MNG_EXP_SGPIO (0x0003)
+#define MPI2_SAS_ENCLS0_FLAGS_MNG_SES_ENCLOSURE (0x0004)
+#define MPI2_SAS_ENCLS0_FLAGS_MNG_IOC_GPIO (0x0005)
+
+
+/****************************************************************************
+* Log Config Page
+****************************************************************************/
+
+/* Log Page 0 */
+
+/*
+ * Host code (drivers, BIOS, utilities, etc.) should leave this define set to
+ * one and check the value returned for NumLogEntries at runtime.
+ */
+#ifndef MPI2_LOG_0_NUM_LOG_ENTRIES
+#define MPI2_LOG_0_NUM_LOG_ENTRIES (1)
+#endif
+
+#define MPI2_LOG_0_LOG_DATA_LENGTH (0x1C)
+
+typedef struct _MPI2_LOG_0_ENTRY
+{
+ U64 TimeStamp; /* 0x00 */
+ U32 Reserved1; /* 0x08 */
+ U16 LogSequence; /* 0x0C */
+ U16 LogEntryQualifier; /* 0x0E */
+ U8 VP_ID; /* 0x10 */
+ U8 VF_ID; /* 0x11 */
+ U16 Reserved2; /* 0x12 */
+ U8 LogData[MPI2_LOG_0_LOG_DATA_LENGTH];/* 0x14 */
+} MPI2_LOG_0_ENTRY, MPI2_POINTER PTR_MPI2_LOG_0_ENTRY,
+ Mpi2Log0Entry_t, MPI2_POINTER pMpi2Log0Entry_t;
+
+/* values for Log Page 0 LogEntry LogEntryQualifier field */
+#define MPI2_LOG_0_ENTRY_QUAL_ENTRY_UNUSED (0x0000)
+#define MPI2_LOG_0_ENTRY_QUAL_POWER_ON_RESET (0x0001)
+#define MPI2_LOG_0_ENTRY_QUAL_TIMESTAMP_UPDATE (0x0002)
+#define MPI2_LOG_0_ENTRY_QUAL_MIN_IMPLEMENT_SPEC (0x8000)
+#define MPI2_LOG_0_ENTRY_QUAL_MAX_IMPLEMENT_SPEC (0xFFFF)
+
+typedef struct _MPI2_CONFIG_PAGE_LOG_0
+{
+ MPI2_CONFIG_EXTENDED_PAGE_HEADER Header; /* 0x00 */
+ U32 Reserved1; /* 0x08 */
+ U32 Reserved2; /* 0x0C */
+ U16 NumLogEntries; /* 0x10 */
+ U16 Reserved3; /* 0x12 */
+ MPI2_LOG_0_ENTRY LogEntry[MPI2_LOG_0_NUM_LOG_ENTRIES]; /* 0x14 */
+} MPI2_CONFIG_PAGE_LOG_0, MPI2_POINTER PTR_MPI2_CONFIG_PAGE_LOG_0,
+ Mpi2LogPage0_t, MPI2_POINTER pMpi2LogPage0_t;
+
+#define MPI2_LOG_0_PAGEVERSION (0x02)
+
+
+/****************************************************************************
+* RAID Config Page
+****************************************************************************/
+
+/* RAID Page 0 */
+
+/*
+ * Host code (drivers, BIOS, utilities, etc.) should leave this define set to
+ * one and check the value returned for NumElements at runtime.
+ */
+#ifndef MPI2_RAIDCONFIG0_MAX_ELEMENTS
+#define MPI2_RAIDCONFIG0_MAX_ELEMENTS (1)
+#endif
+
+typedef struct _MPI2_RAIDCONFIG0_CONFIG_ELEMENT
+{
+ U16 ElementFlags; /* 0x00 */
+ U16 VolDevHandle; /* 0x02 */
+ U8 HotSparePool; /* 0x04 */
+ U8 PhysDiskNum; /* 0x05 */
+ U16 PhysDiskDevHandle; /* 0x06 */
+} MPI2_RAIDCONFIG0_CONFIG_ELEMENT,
+ MPI2_POINTER PTR_MPI2_RAIDCONFIG0_CONFIG_ELEMENT,
+ Mpi2RaidConfig0ConfigElement_t, MPI2_POINTER pMpi2RaidConfig0ConfigElement_t;
+
+/* values for the ElementFlags field */
+#define MPI2_RAIDCONFIG0_EFLAGS_MASK_ELEMENT_TYPE (0x000F)
+#define MPI2_RAIDCONFIG0_EFLAGS_VOLUME_ELEMENT (0x0000)
+#define MPI2_RAIDCONFIG0_EFLAGS_VOL_PHYS_DISK_ELEMENT (0x0001)
+#define MPI2_RAIDCONFIG0_EFLAGS_HOT_SPARE_ELEMENT (0x0002)
+#define MPI2_RAIDCONFIG0_EFLAGS_OCE_ELEMENT (0x0003)
+
+
+typedef struct _MPI2_CONFIG_PAGE_RAID_CONFIGURATION_0
+{
+ MPI2_CONFIG_EXTENDED_PAGE_HEADER Header; /* 0x00 */
+ U8 NumHotSpares; /* 0x08 */
+ U8 NumPhysDisks; /* 0x09 */
+ U8 NumVolumes; /* 0x0A */
+ U8 ConfigNum; /* 0x0B */
+ U32 Flags; /* 0x0C */
+ U8 ConfigGUID[24]; /* 0x10 */
+ U32 Reserved1; /* 0x28 */
+ U8 NumElements; /* 0x2C */
+ U8 Reserved2; /* 0x2D */
+ U16 Reserved3; /* 0x2E */
+ MPI2_RAIDCONFIG0_CONFIG_ELEMENT ConfigElement[MPI2_RAIDCONFIG0_MAX_ELEMENTS]; /* 0x30 */
+} MPI2_CONFIG_PAGE_RAID_CONFIGURATION_0,
+ MPI2_POINTER PTR_MPI2_CONFIG_PAGE_RAID_CONFIGURATION_0,
+ Mpi2RaidConfigurationPage0_t, MPI2_POINTER pMpi2RaidConfigurationPage0_t;
+
+#define MPI2_RAIDCONFIG0_PAGEVERSION (0x00)
+
+/* values for RAID Configuration Page 0 Flags field */
+#define MPI2_RAIDCONFIG0_FLAG_FOREIGN_CONFIG (0x00000001)
+
+
+/****************************************************************************
+* Driver Persistent Mapping Config Pages
+****************************************************************************/
+
+/* Driver Persistent Mapping Page 0 */
+
+typedef struct _MPI2_CONFIG_PAGE_DRIVER_MAP0_ENTRY
+{
+ U64 PhysicalIdentifier; /* 0x00 */
+ U16 MappingInformation; /* 0x08 */
+ U16 DeviceIndex; /* 0x0A */
+ U32 PhysicalBitsMapping; /* 0x0C */
+ U32 Reserved1; /* 0x10 */
+} MPI2_CONFIG_PAGE_DRIVER_MAP0_ENTRY,
+ MPI2_POINTER PTR_MPI2_CONFIG_PAGE_DRIVER_MAP0_ENTRY,
+ Mpi2DriverMap0Entry_t, MPI2_POINTER pMpi2DriverMap0Entry_t;
+
+typedef struct _MPI2_CONFIG_PAGE_DRIVER_MAPPING_0
+{
+ MPI2_CONFIG_EXTENDED_PAGE_HEADER Header; /* 0x00 */
+ MPI2_CONFIG_PAGE_DRIVER_MAP0_ENTRY Entry; /* 0x08 */
+} MPI2_CONFIG_PAGE_DRIVER_MAPPING_0,
+ MPI2_POINTER PTR_MPI2_CONFIG_PAGE_DRIVER_MAPPING_0,
+ Mpi2DriverMappingPage0_t, MPI2_POINTER pMpi2DriverMappingPage0_t;
+
+#define MPI2_DRIVERMAPPING0_PAGEVERSION (0x00)
+
+/* values for Driver Persistent Mapping Page 0 MappingInformation field */
+#define MPI2_DRVMAP0_MAPINFO_SLOT_MASK (0x07F0)
+#define MPI2_DRVMAP0_MAPINFO_SLOT_SHIFT (4)
+#define MPI2_DRVMAP0_MAPINFO_MISSING_MASK (0x000F)
+
+
+/****************************************************************************
+* Ethernet Config Pages
+****************************************************************************/
+
+/* Ethernet Page 0 */
+
+/* IP address (union of IPv4 and IPv6) */
+typedef union _MPI2_ETHERNET_IP_ADDR {
+ U32 IPv4Addr;
+ U32 IPv6Addr[4];
+} MPI2_ETHERNET_IP_ADDR, MPI2_POINTER PTR_MPI2_ETHERNET_IP_ADDR,
+ Mpi2EthernetIpAddr_t, MPI2_POINTER pMpi2EthernetIpAddr_t;
+
+#define MPI2_ETHERNET_HOST_NAME_LENGTH (32)
+
+typedef struct _MPI2_CONFIG_PAGE_ETHERNET_0 {
+ MPI2_CONFIG_EXTENDED_PAGE_HEADER Header; /* 0x00 */
+ U8 NumInterfaces; /* 0x08 */
+ U8 Reserved0; /* 0x09 */
+ U16 Reserved1; /* 0x0A */
+ U32 Status; /* 0x0C */
+ U8 MediaState; /* 0x10 */
+ U8 Reserved2; /* 0x11 */
+ U16 Reserved3; /* 0x12 */
+ U8 MacAddress[6]; /* 0x14 */
+ U8 Reserved4; /* 0x1A */
+ U8 Reserved5; /* 0x1B */
+ MPI2_ETHERNET_IP_ADDR IpAddress; /* 0x1C */
+ MPI2_ETHERNET_IP_ADDR SubnetMask; /* 0x2C */
+ MPI2_ETHERNET_IP_ADDR GatewayIpAddress; /* 0x3C */
+ MPI2_ETHERNET_IP_ADDR DNS1IpAddress; /* 0x4C */
+ MPI2_ETHERNET_IP_ADDR DNS2IpAddress; /* 0x5C */
+ MPI2_ETHERNET_IP_ADDR DhcpIpAddress; /* 0x6C */
+ U8 HostName
+ [MPI2_ETHERNET_HOST_NAME_LENGTH];/* 0x7C */
+} MPI2_CONFIG_PAGE_ETHERNET_0, MPI2_POINTER PTR_MPI2_CONFIG_PAGE_ETHERNET_0,
+ Mpi2EthernetPage0_t, MPI2_POINTER pMpi2EthernetPage0_t;
+
+#define MPI2_ETHERNETPAGE0_PAGEVERSION (0x00)
+
+/* values for Ethernet Page 0 Status field */
+#define MPI2_ETHPG0_STATUS_IPV6_CAPABLE (0x80000000)
+#define MPI2_ETHPG0_STATUS_IPV4_CAPABLE (0x40000000)
+#define MPI2_ETHPG0_STATUS_CONSOLE_CONNECTED (0x20000000)
+#define MPI2_ETHPG0_STATUS_DEFAULT_IF (0x00000100)
+#define MPI2_ETHPG0_STATUS_FW_DWNLD_ENABLED (0x00000080)
+#define MPI2_ETHPG0_STATUS_TELNET_ENABLED (0x00000040)
+#define MPI2_ETHPG0_STATUS_SSH2_ENABLED (0x00000020)
+#define MPI2_ETHPG0_STATUS_DHCP_CLIENT_ENABLED (0x00000010)
+#define MPI2_ETHPG0_STATUS_IPV6_ENABLED (0x00000008)
+#define MPI2_ETHPG0_STATUS_IPV4_ENABLED (0x00000004)
+#define MPI2_ETHPG0_STATUS_IPV6_ADDRESSES (0x00000002)
+#define MPI2_ETHPG0_STATUS_ETH_IF_ENABLED (0x00000001)
+
+/* values for Ethernet Page 0 MediaState field */
+#define MPI2_ETHPG0_MS_DUPLEX_MASK (0x80)
+#define MPI2_ETHPG0_MS_HALF_DUPLEX (0x00)
+#define MPI2_ETHPG0_MS_FULL_DUPLEX (0x80)
+
+#define MPI2_ETHPG0_MS_CONNECT_SPEED_MASK (0x07)
+#define MPI2_ETHPG0_MS_NOT_CONNECTED (0x00)
+#define MPI2_ETHPG0_MS_10MBIT (0x01)
+#define MPI2_ETHPG0_MS_100MBIT (0x02)
+#define MPI2_ETHPG0_MS_1GBIT (0x03)
+
+
+/* Ethernet Page 1 */
+
+typedef struct _MPI2_CONFIG_PAGE_ETHERNET_1 {
+ MPI2_CONFIG_EXTENDED_PAGE_HEADER Header; /* 0x00 */
+ U32 Reserved0; /* 0x08 */
+ U32 Flags; /* 0x0C */
+ U8 MediaState; /* 0x10 */
+ U8 Reserved1; /* 0x11 */
+ U16 Reserved2; /* 0x12 */
+ U8 MacAddress[6]; /* 0x14 */
+ U8 Reserved3; /* 0x1A */
+ U8 Reserved4; /* 0x1B */
+ MPI2_ETHERNET_IP_ADDR StaticIpAddress; /* 0x1C */
+ MPI2_ETHERNET_IP_ADDR StaticSubnetMask; /* 0x2C */
+ MPI2_ETHERNET_IP_ADDR StaticGatewayIpAddress; /* 0x3C */
+ MPI2_ETHERNET_IP_ADDR StaticDNS1IpAddress; /* 0x4C */
+ MPI2_ETHERNET_IP_ADDR StaticDNS2IpAddress; /* 0x5C */
+ U32 Reserved5; /* 0x6C */
+ U32 Reserved6; /* 0x70 */
+ U32 Reserved7; /* 0x74 */
+ U32 Reserved8; /* 0x78 */
+ U8 HostName
+ [MPI2_ETHERNET_HOST_NAME_LENGTH];/* 0x7C */
+} MPI2_CONFIG_PAGE_ETHERNET_1, MPI2_POINTER PTR_MPI2_CONFIG_PAGE_ETHERNET_1,
+ Mpi2EthernetPage1_t, MPI2_POINTER pMpi2EthernetPage1_t;
+
+#define MPI2_ETHERNETPAGE1_PAGEVERSION (0x00)
+
+/* values for Ethernet Page 1 Flags field */
+#define MPI2_ETHPG1_FLAG_SET_DEFAULT_IF (0x00000100)
+#define MPI2_ETHPG1_FLAG_ENABLE_FW_DOWNLOAD (0x00000080)
+#define MPI2_ETHPG1_FLAG_ENABLE_TELNET (0x00000040)
+#define MPI2_ETHPG1_FLAG_ENABLE_SSH2 (0x00000020)
+#define MPI2_ETHPG1_FLAG_ENABLE_DHCP_CLIENT (0x00000010)
+#define MPI2_ETHPG1_FLAG_ENABLE_IPV6 (0x00000008)
+#define MPI2_ETHPG1_FLAG_ENABLE_IPV4 (0x00000004)
+#define MPI2_ETHPG1_FLAG_USE_IPV6_ADDRESSES (0x00000002)
+#define MPI2_ETHPG1_FLAG_ENABLE_ETH_IF (0x00000001)
+
+/* values for Ethernet Page 1 MediaState field */
+#define MPI2_ETHPG1_MS_DUPLEX_MASK (0x80)
+#define MPI2_ETHPG1_MS_HALF_DUPLEX (0x00)
+#define MPI2_ETHPG1_MS_FULL_DUPLEX (0x80)
+
+#define MPI2_ETHPG1_MS_DATA_RATE_MASK (0x07)
+#define MPI2_ETHPG1_MS_DATA_RATE_AUTO (0x00)
+#define MPI2_ETHPG1_MS_DATA_RATE_10MBIT (0x01)
+#define MPI2_ETHPG1_MS_DATA_RATE_100MBIT (0x02)
+#define MPI2_ETHPG1_MS_DATA_RATE_1GBIT (0x03)
+
+
+/****************************************************************************
+* Extended Manufacturing Config Pages
+****************************************************************************/
+
+/*
+ * Generic structure to use for product-specific extended manufacturing pages
+ * (currently Extended Manufacturing Page 40 through Extended Manufacturing
+ * Page 60).
+ */
+
+typedef struct _MPI2_CONFIG_PAGE_EXT_MAN_PS {
+ MPI2_CONFIG_EXTENDED_PAGE_HEADER Header; /* 0x00 */
+ U32 ProductSpecificInfo; /* 0x08 */
+} MPI2_CONFIG_PAGE_EXT_MAN_PS,
+ MPI2_POINTER PTR_MPI2_CONFIG_PAGE_EXT_MAN_PS,
+ Mpi2ExtManufacturingPagePS_t,
+ MPI2_POINTER pMpi2ExtManufacturingPagePS_t;
+
+/* PageVersion should be provided by product-specific code */
+
+#endif
+
diff --git a/drivers/scsi/mpt2sas/mpi/mpi2_init.h b/drivers/scsi/mpt2sas/mpi/mpi2_init.h
new file mode 100644
index 000000000..eea1a16b1
--- /dev/null
+++ b/drivers/scsi/mpt2sas/mpi/mpi2_init.h
@@ -0,0 +1,461 @@
+/*
+ * Copyright (c) 2000-2014 LSI Corporation.
+ *
+ *
+ * Name: mpi2_init.h
+ * Title: MPI SCSI initiator mode messages and structures
+ * Creation Date: June 23, 2006
+ *
+ * mpi2_init.h Version: 02.00.15
+ *
+ * Version History
+ * ---------------
+ *
+ * Date Version Description
+ * -------- -------- ------------------------------------------------------
+ * 04-30-07 02.00.00 Corresponds to Fusion-MPT MPI Specification Rev A.
+ * 10-31-07 02.00.01 Fixed name for pMpi2SCSITaskManagementRequest_t.
+ * 12-18-07 02.00.02 Modified Task Management Target Reset Method defines.
+ * 02-29-08 02.00.03 Added Query Task Set and Query Unit Attention.
+ * 03-03-08 02.00.04 Fixed name of struct _MPI2_SCSI_TASK_MANAGE_REPLY.
+ * 05-21-08 02.00.05 Fixed typo in name of Mpi2SepRequest_t.
+ * 10-02-08 02.00.06 Removed Untagged and No Disconnect values from SCSI IO
+ * Control field Task Attribute flags.
+ * Moved LUN field defines to mpi2.h because they are
+ * common to many structures.
+ * 05-06-09 02.00.07 Changed task management type of Query Unit Attention to
+ * Query Asynchronous Event.
+ * Defined two new bits in the SlotStatus field of the SCSI
+ * Enclosure Processor Request and Reply.
+ * 10-28-09 02.00.08 Added defines for decoding the ResponseInfo bytes for
+ * both SCSI IO Error Reply and SCSI Task Management Reply.
+ * Added ResponseInfo field to MPI2_SCSI_TASK_MANAGE_REPLY.
+ * Added MPI2_SCSITASKMGMT_RSP_TM_OVERLAPPED_TAG define.
+ * 02-10-10 02.00.09 Removed unused structure that had "#if 0" around it.
+ * 05-12-10 02.00.10 Added optional vendor-unique region to SCSI IO Request.
+ * 11-10-10 02.00.11 Added MPI2_SCSIIO_NUM_SGLOFFSETS define.
+ * 02-06-12 02.00.13 Added alternate defines for Task Priority / Command
+ * Priority to match SAM-4.
+ * 07-10-12 02.00.14 Added MPI2_SCSIIO_CONTROL_SHIFT_DATADIRECTION.
+ * 04-09-13 02.00.15 Added SCSIStatusQualifier field to MPI2_SCSI_IO_REPLY,
+ * replacing the Reserved4 field.
+ * --------------------------------------------------------------------------
+ */
+
+#ifndef MPI2_INIT_H
+#define MPI2_INIT_H
+
+/*****************************************************************************
+*
+* SCSI Initiator Messages
+*
+*****************************************************************************/
+
+/****************************************************************************
+* SCSI IO messages and associated structures
+****************************************************************************/
+
+typedef struct
+{
+ U8 CDB[20]; /* 0x00 */
+ U32 PrimaryReferenceTag; /* 0x14 */
+ U16 PrimaryApplicationTag; /* 0x18 */
+ U16 PrimaryApplicationTagMask; /* 0x1A */
+ U32 TransferLength; /* 0x1C */
+} MPI2_SCSI_IO_CDB_EEDP32, MPI2_POINTER PTR_MPI2_SCSI_IO_CDB_EEDP32,
+ Mpi2ScsiIoCdbEedp32_t, MPI2_POINTER pMpi2ScsiIoCdbEedp32_t;
+
+typedef union
+{
+ U8 CDB32[32];
+ MPI2_SCSI_IO_CDB_EEDP32 EEDP32;
+ MPI2_SGE_SIMPLE_UNION SGE;
+} MPI2_SCSI_IO_CDB_UNION, MPI2_POINTER PTR_MPI2_SCSI_IO_CDB_UNION,
+ Mpi2ScsiIoCdb_t, MPI2_POINTER pMpi2ScsiIoCdb_t;
+
+/* SCSI IO Request Message */
+typedef struct _MPI2_SCSI_IO_REQUEST
+{
+ U16 DevHandle; /* 0x00 */
+ U8 ChainOffset; /* 0x02 */
+ U8 Function; /* 0x03 */
+ U16 Reserved1; /* 0x04 */
+ U8 Reserved2; /* 0x06 */
+ U8 MsgFlags; /* 0x07 */
+ U8 VP_ID; /* 0x08 */
+ U8 VF_ID; /* 0x09 */
+ U16 Reserved3; /* 0x0A */
+ U32 SenseBufferLowAddress; /* 0x0C */
+ U16 SGLFlags; /* 0x10 */
+ U8 SenseBufferLength; /* 0x12 */
+ U8 Reserved4; /* 0x13 */
+ U8 SGLOffset0; /* 0x14 */
+ U8 SGLOffset1; /* 0x15 */
+ U8 SGLOffset2; /* 0x16 */
+ U8 SGLOffset3; /* 0x17 */
+ U32 SkipCount; /* 0x18 */
+ U32 DataLength; /* 0x1C */
+ U32 BidirectionalDataLength; /* 0x20 */
+ U16 IoFlags; /* 0x24 */
+ U16 EEDPFlags; /* 0x26 */
+ U32 EEDPBlockSize; /* 0x28 */
+ U32 SecondaryReferenceTag; /* 0x2C */
+ U16 SecondaryApplicationTag; /* 0x30 */
+ U16 ApplicationTagTranslationMask; /* 0x32 */
+ U8 LUN[8]; /* 0x34 */
+ U32 Control; /* 0x3C */
+ MPI2_SCSI_IO_CDB_UNION CDB; /* 0x40 */
+
+#ifdef MPI2_SCSI_IO_VENDOR_UNIQUE_REGION /* typically this is left undefined */
+ MPI2_SCSI_IO_VENDOR_UNIQUE VendorRegion;
+#endif
+
+ MPI2_SGE_IO_UNION SGL; /* 0x60 */
+
+} MPI2_SCSI_IO_REQUEST, MPI2_POINTER PTR_MPI2_SCSI_IO_REQUEST,
+ Mpi2SCSIIORequest_t, MPI2_POINTER pMpi2SCSIIORequest_t;
+
+/* SCSI IO MsgFlags bits */
+
+/* MsgFlags for SenseBufferAddressSpace */
+#define MPI2_SCSIIO_MSGFLAGS_MASK_SENSE_ADDR (0x0C)
+#define MPI2_SCSIIO_MSGFLAGS_SYSTEM_SENSE_ADDR (0x00)
+#define MPI2_SCSIIO_MSGFLAGS_IOCDDR_SENSE_ADDR (0x04)
+#define MPI2_SCSIIO_MSGFLAGS_IOCPLB_SENSE_ADDR (0x08)
+#define MPI2_SCSIIO_MSGFLAGS_IOCPLBNTA_SENSE_ADDR (0x0C)
+
+/* SCSI IO SGLFlags bits */
+
+/* base values for Data Location Address Space */
+#define MPI2_SCSIIO_SGLFLAGS_ADDR_MASK (0x0C)
+#define MPI2_SCSIIO_SGLFLAGS_SYSTEM_ADDR (0x00)
+#define MPI2_SCSIIO_SGLFLAGS_IOCDDR_ADDR (0x04)
+#define MPI2_SCSIIO_SGLFLAGS_IOCPLB_ADDR (0x08)
+#define MPI2_SCSIIO_SGLFLAGS_IOCPLBNTA_ADDR (0x0C)
+
+/* base values for Type */
+#define MPI2_SCSIIO_SGLFLAGS_TYPE_MASK (0x03)
+#define MPI2_SCSIIO_SGLFLAGS_TYPE_MPI (0x00)
+#define MPI2_SCSIIO_SGLFLAGS_TYPE_IEEE32 (0x01)
+#define MPI2_SCSIIO_SGLFLAGS_TYPE_IEEE64 (0x02)
+
+/* shift values for each sub-field */
+#define MPI2_SCSIIO_SGLFLAGS_SGL3_SHIFT (12)
+#define MPI2_SCSIIO_SGLFLAGS_SGL2_SHIFT (8)
+#define MPI2_SCSIIO_SGLFLAGS_SGL1_SHIFT (4)
+#define MPI2_SCSIIO_SGLFLAGS_SGL0_SHIFT (0)
+
+/* number of SGLOffset fields */
+#define MPI2_SCSIIO_NUM_SGLOFFSETS (4)
+
+/* SCSI IO IoFlags bits */
+
+/* Large CDB Address Space */
+#define MPI2_SCSIIO_CDB_ADDR_MASK (0x6000)
+#define MPI2_SCSIIO_CDB_ADDR_SYSTEM (0x0000)
+#define MPI2_SCSIIO_CDB_ADDR_IOCDDR (0x2000)
+#define MPI2_SCSIIO_CDB_ADDR_IOCPLB (0x4000)
+#define MPI2_SCSIIO_CDB_ADDR_IOCPLBNTA (0x6000)
+
+#define MPI2_SCSIIO_IOFLAGS_LARGE_CDB (0x1000)
+#define MPI2_SCSIIO_IOFLAGS_BIDIRECTIONAL (0x0800)
+#define MPI2_SCSIIO_IOFLAGS_MULTICAST (0x0400)
+#define MPI2_SCSIIO_IOFLAGS_CMD_DETERMINES_DATA_DIR (0x0200)
+#define MPI2_SCSIIO_IOFLAGS_CDBLENGTH_MASK (0x01FF)
+
+/* SCSI IO EEDPFlags bits */
+
+#define MPI2_SCSIIO_EEDPFLAGS_INC_PRI_REFTAG (0x8000)
+#define MPI2_SCSIIO_EEDPFLAGS_INC_SEC_REFTAG (0x4000)
+#define MPI2_SCSIIO_EEDPFLAGS_INC_PRI_APPTAG (0x2000)
+#define MPI2_SCSIIO_EEDPFLAGS_INC_SEC_APPTAG (0x1000)
+
+#define MPI2_SCSIIO_EEDPFLAGS_CHECK_REFTAG (0x0400)
+#define MPI2_SCSIIO_EEDPFLAGS_CHECK_APPTAG (0x0200)
+#define MPI2_SCSIIO_EEDPFLAGS_CHECK_GUARD (0x0100)
+
+#define MPI2_SCSIIO_EEDPFLAGS_PASSTHRU_REFTAG (0x0008)
+
+#define MPI2_SCSIIO_EEDPFLAGS_MASK_OP (0x0007)
+#define MPI2_SCSIIO_EEDPFLAGS_NOOP_OP (0x0000)
+#define MPI2_SCSIIO_EEDPFLAGS_CHECK_OP (0x0001)
+#define MPI2_SCSIIO_EEDPFLAGS_STRIP_OP (0x0002)
+#define MPI2_SCSIIO_EEDPFLAGS_CHECK_REMOVE_OP (0x0003)
+#define MPI2_SCSIIO_EEDPFLAGS_INSERT_OP (0x0004)
+#define MPI2_SCSIIO_EEDPFLAGS_REPLACE_OP (0x0006)
+#define MPI2_SCSIIO_EEDPFLAGS_CHECK_REGEN_OP (0x0007)
+
+/* SCSI IO LUN fields: use MPI2_LUN_ from mpi2.h */
+
+/* SCSI IO Control bits */
+#define MPI2_SCSIIO_CONTROL_ADDCDBLEN_MASK (0xFC000000)
+#define MPI2_SCSIIO_CONTROL_ADDCDBLEN_SHIFT (26)
+
+#define MPI2_SCSIIO_CONTROL_DATADIRECTION_MASK (0x03000000)
+#define MPI2_SCSIIO_CONTROL_SHIFT_DATADIRECTION (24)
+#define MPI2_SCSIIO_CONTROL_NODATATRANSFER (0x00000000)
+#define MPI2_SCSIIO_CONTROL_WRITE (0x01000000)
+#define MPI2_SCSIIO_CONTROL_READ (0x02000000)
+#define MPI2_SCSIIO_CONTROL_BIDIRECTIONAL (0x03000000)
+
+#define MPI2_SCSIIO_CONTROL_TASKPRI_MASK (0x00007800)
+#define MPI2_SCSIIO_CONTROL_TASKPRI_SHIFT (11)
+/* alternate name for the previous field; called Command Priority in SAM-4 */
+#define MPI2_SCSIIO_CONTROL_CMDPRI_MASK (0x00007800)
+#define MPI2_SCSIIO_CONTROL_CMDPRI_SHIFT (11)
+
+#define MPI2_SCSIIO_CONTROL_TASKATTRIBUTE_MASK (0x00000700)
+#define MPI2_SCSIIO_CONTROL_SIMPLEQ (0x00000000)
+#define MPI2_SCSIIO_CONTROL_HEADOFQ (0x00000100)
+#define MPI2_SCSIIO_CONTROL_ORDEREDQ (0x00000200)
+#define MPI2_SCSIIO_CONTROL_ACAQ (0x00000400)
+
+#define MPI2_SCSIIO_CONTROL_TLR_MASK (0x000000C0)
+#define MPI2_SCSIIO_CONTROL_NO_TLR (0x00000000)
+#define MPI2_SCSIIO_CONTROL_TLR_ON (0x00000040)
+#define MPI2_SCSIIO_CONTROL_TLR_OFF (0x00000080)
+
+
+/* SCSI IO Error Reply Message */
+typedef struct _MPI2_SCSI_IO_REPLY
+{
+ U16 DevHandle; /* 0x00 */
+ U8 MsgLength; /* 0x02 */
+ U8 Function; /* 0x03 */
+ U16 Reserved1; /* 0x04 */
+ U8 Reserved2; /* 0x06 */
+ U8 MsgFlags; /* 0x07 */
+ U8 VP_ID; /* 0x08 */
+ U8 VF_ID; /* 0x09 */
+ U16 Reserved3; /* 0x0A */
+ U8 SCSIStatus; /* 0x0C */
+ U8 SCSIState; /* 0x0D */
+ U16 IOCStatus; /* 0x0E */
+ U32 IOCLogInfo; /* 0x10 */
+ U32 TransferCount; /* 0x14 */
+ U32 SenseCount; /* 0x18 */
+ U32 ResponseInfo; /* 0x1C */
+ U16 TaskTag; /* 0x20 */
+ U16 SCSIStatusQualifier; /* 0x22 */
+ U32 BidirectionalTransferCount; /* 0x24 */
+ U32 Reserved5; /* 0x28 */
+ U32 Reserved6; /* 0x2C */
+} MPI2_SCSI_IO_REPLY, MPI2_POINTER PTR_MPI2_SCSI_IO_REPLY,
+ Mpi2SCSIIOReply_t, MPI2_POINTER pMpi2SCSIIOReply_t;
+
+/* SCSI IO Reply SCSIStatus values (SAM-4 status codes) */
+
+#define MPI2_SCSI_STATUS_GOOD (0x00)
+#define MPI2_SCSI_STATUS_CHECK_CONDITION (0x02)
+#define MPI2_SCSI_STATUS_CONDITION_MET (0x04)
+#define MPI2_SCSI_STATUS_BUSY (0x08)
+#define MPI2_SCSI_STATUS_INTERMEDIATE (0x10)
+#define MPI2_SCSI_STATUS_INTERMEDIATE_CONDMET (0x14)
+#define MPI2_SCSI_STATUS_RESERVATION_CONFLICT (0x18)
+#define MPI2_SCSI_STATUS_COMMAND_TERMINATED (0x22) /* obsolete */
+#define MPI2_SCSI_STATUS_TASK_SET_FULL (0x28)
+#define MPI2_SCSI_STATUS_ACA_ACTIVE (0x30)
+#define MPI2_SCSI_STATUS_TASK_ABORTED (0x40)
+
+/* SCSI IO Reply SCSIState flags */
+
+#define MPI2_SCSI_STATE_RESPONSE_INFO_VALID (0x10)
+#define MPI2_SCSI_STATE_TERMINATED (0x08)
+#define MPI2_SCSI_STATE_NO_SCSI_STATUS (0x04)
+#define MPI2_SCSI_STATE_AUTOSENSE_FAILED (0x02)
+#define MPI2_SCSI_STATE_AUTOSENSE_VALID (0x01)
+
+/* masks and shifts for the ResponseInfo field */
+
+#define MPI2_SCSI_RI_MASK_REASONCODE (0x000000FF)
+#define MPI2_SCSI_RI_SHIFT_REASONCODE (0)
+
+#define MPI2_SCSI_TASKTAG_UNKNOWN (0xFFFF)
+
+
+/****************************************************************************
+* SCSI Task Management messages
+****************************************************************************/
+
+/* SCSI Task Management Request Message */
+typedef struct _MPI2_SCSI_TASK_MANAGE_REQUEST
+{
+ U16 DevHandle; /* 0x00 */
+ U8 ChainOffset; /* 0x02 */
+ U8 Function; /* 0x03 */
+ U8 Reserved1; /* 0x04 */
+ U8 TaskType; /* 0x05 */
+ U8 Reserved2; /* 0x06 */
+ U8 MsgFlags; /* 0x07 */
+ U8 VP_ID; /* 0x08 */
+ U8 VF_ID; /* 0x09 */
+ U16 Reserved3; /* 0x0A */
+ U8 LUN[8]; /* 0x0C */
+ U32 Reserved4[7]; /* 0x14 */
+ U16 TaskMID; /* 0x30 */
+ U16 Reserved5; /* 0x32 */
+} MPI2_SCSI_TASK_MANAGE_REQUEST,
+ MPI2_POINTER PTR_MPI2_SCSI_TASK_MANAGE_REQUEST,
+ Mpi2SCSITaskManagementRequest_t,
+ MPI2_POINTER pMpi2SCSITaskManagementRequest_t;
+
+/* TaskType values */
+
+#define MPI2_SCSITASKMGMT_TASKTYPE_ABORT_TASK (0x01)
+#define MPI2_SCSITASKMGMT_TASKTYPE_ABRT_TASK_SET (0x02)
+#define MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET (0x03)
+#define MPI2_SCSITASKMGMT_TASKTYPE_LOGICAL_UNIT_RESET (0x05)
+#define MPI2_SCSITASKMGMT_TASKTYPE_CLEAR_TASK_SET (0x06)
+#define MPI2_SCSITASKMGMT_TASKTYPE_QUERY_TASK (0x07)
+#define MPI2_SCSITASKMGMT_TASKTYPE_CLR_ACA (0x08)
+#define MPI2_SCSITASKMGMT_TASKTYPE_QRY_TASK_SET (0x09)
+#define MPI2_SCSITASKMGMT_TASKTYPE_QRY_ASYNC_EVENT (0x0A)
+
+/* obsolete TaskType name */
+#define MPI2_SCSITASKMGMT_TASKTYPE_QRY_UNIT_ATTENTION \
+ (MPI2_SCSITASKMGMT_TASKTYPE_QRY_ASYNC_EVENT)
+
+/* MsgFlags bits */
+
+#define MPI2_SCSITASKMGMT_MSGFLAGS_MASK_TARGET_RESET (0x18)
+#define MPI2_SCSITASKMGMT_MSGFLAGS_LINK_RESET (0x00)
+#define MPI2_SCSITASKMGMT_MSGFLAGS_NEXUS_RESET_SRST (0x08)
+#define MPI2_SCSITASKMGMT_MSGFLAGS_SAS_HARD_LINK_RESET (0x10)
+
+#define MPI2_SCSITASKMGMT_MSGFLAGS_DO_NOT_SEND_TASK_IU (0x01)
+
+
+
+/* SCSI Task Management Reply Message */
+typedef struct _MPI2_SCSI_TASK_MANAGE_REPLY
+{
+ U16 DevHandle; /* 0x00 */
+ U8 MsgLength; /* 0x02 */
+ U8 Function; /* 0x03 */
+ U8 ResponseCode; /* 0x04 */
+ U8 TaskType; /* 0x05 */
+ U8 Reserved1; /* 0x06 */
+ U8 MsgFlags; /* 0x07 */
+ U8 VP_ID; /* 0x08 */
+ U8 VF_ID; /* 0x09 */
+ U16 Reserved2; /* 0x0A */
+ U16 Reserved3; /* 0x0C */
+ U16 IOCStatus; /* 0x0E */
+ U32 IOCLogInfo; /* 0x10 */
+ U32 TerminationCount; /* 0x14 */
+ U32 ResponseInfo; /* 0x18 */
+} MPI2_SCSI_TASK_MANAGE_REPLY,
+ MPI2_POINTER PTR_MPI2_SCSI_TASK_MANAGE_REPLY,
+ Mpi2SCSITaskManagementReply_t, MPI2_POINTER pMpi2SCSIManagementReply_t;
+
+/* ResponseCode values */
+
+#define MPI2_SCSITASKMGMT_RSP_TM_COMPLETE (0x00)
+#define MPI2_SCSITASKMGMT_RSP_INVALID_FRAME (0x02)
+#define MPI2_SCSITASKMGMT_RSP_TM_NOT_SUPPORTED (0x04)
+#define MPI2_SCSITASKMGMT_RSP_TM_FAILED (0x05)
+#define MPI2_SCSITASKMGMT_RSP_TM_SUCCEEDED (0x08)
+#define MPI2_SCSITASKMGMT_RSP_TM_INVALID_LUN (0x09)
+#define MPI2_SCSITASKMGMT_RSP_TM_OVERLAPPED_TAG (0x0A)
+#define MPI2_SCSITASKMGMT_RSP_IO_QUEUED_ON_IOC (0x80)
+
+/* masks and shifts for the ResponseInfo field */
+
+#define MPI2_SCSITASKMGMT_RI_MASK_REASONCODE (0x000000FF)
+#define MPI2_SCSITASKMGMT_RI_SHIFT_REASONCODE (0)
+#define MPI2_SCSITASKMGMT_RI_MASK_ARI2 (0x0000FF00)
+#define MPI2_SCSITASKMGMT_RI_SHIFT_ARI2 (8)
+#define MPI2_SCSITASKMGMT_RI_MASK_ARI1 (0x00FF0000)
+#define MPI2_SCSITASKMGMT_RI_SHIFT_ARI1 (16)
+#define MPI2_SCSITASKMGMT_RI_MASK_ARI0 (0xFF000000)
+#define MPI2_SCSITASKMGMT_RI_SHIFT_ARI0 (24)
+
+
+/****************************************************************************
+* SCSI Enclosure Processor messages
+****************************************************************************/
+
+/* SCSI Enclosure Processor Request Message */
+typedef struct _MPI2_SEP_REQUEST
+{
+ U16 DevHandle; /* 0x00 */
+ U8 ChainOffset; /* 0x02 */
+ U8 Function; /* 0x03 */
+ U8 Action; /* 0x04 */
+ U8 Flags; /* 0x05 */
+ U8 Reserved1; /* 0x06 */
+ U8 MsgFlags; /* 0x07 */
+ U8 VP_ID; /* 0x08 */
+ U8 VF_ID; /* 0x09 */
+ U16 Reserved2; /* 0x0A */
+ U32 SlotStatus; /* 0x0C */
+ U32 Reserved3; /* 0x10 */
+ U32 Reserved4; /* 0x14 */
+ U32 Reserved5; /* 0x18 */
+ U16 Slot; /* 0x1C */
+ U16 EnclosureHandle; /* 0x1E */
+} MPI2_SEP_REQUEST, MPI2_POINTER PTR_MPI2_SEP_REQUEST,
+ Mpi2SepRequest_t, MPI2_POINTER pMpi2SepRequest_t;
+
+/* Action defines */
+#define MPI2_SEP_REQ_ACTION_WRITE_STATUS (0x00)
+#define MPI2_SEP_REQ_ACTION_READ_STATUS (0x01)
+
+/* Flags defines */
+#define MPI2_SEP_REQ_FLAGS_DEVHANDLE_ADDRESS (0x00)
+#define MPI2_SEP_REQ_FLAGS_ENCLOSURE_SLOT_ADDRESS (0x01)
+
+/* SlotStatus defines */
+#define MPI2_SEP_REQ_SLOTSTATUS_REQUEST_REMOVE (0x00040000)
+#define MPI2_SEP_REQ_SLOTSTATUS_IDENTIFY_REQUEST (0x00020000)
+#define MPI2_SEP_REQ_SLOTSTATUS_REBUILD_STOPPED (0x00000200)
+#define MPI2_SEP_REQ_SLOTSTATUS_HOT_SPARE (0x00000100)
+#define MPI2_SEP_REQ_SLOTSTATUS_UNCONFIGURED (0x00000080)
+#define MPI2_SEP_REQ_SLOTSTATUS_PREDICTED_FAULT (0x00000040)
+#define MPI2_SEP_REQ_SLOTSTATUS_IN_CRITICAL_ARRAY (0x00000010)
+#define MPI2_SEP_REQ_SLOTSTATUS_IN_FAILED_ARRAY (0x00000008)
+#define MPI2_SEP_REQ_SLOTSTATUS_DEV_REBUILDING (0x00000004)
+#define MPI2_SEP_REQ_SLOTSTATUS_DEV_FAULTY (0x00000002)
+#define MPI2_SEP_REQ_SLOTSTATUS_NO_ERROR (0x00000001)
+
+
+/* SCSI Enclosure Processor Reply Message */
+typedef struct _MPI2_SEP_REPLY
+{
+ U16 DevHandle; /* 0x00 */
+ U8 MsgLength; /* 0x02 */
+ U8 Function; /* 0x03 */
+ U8 Action; /* 0x04 */
+ U8 Flags; /* 0x05 */
+ U8 Reserved1; /* 0x06 */
+ U8 MsgFlags; /* 0x07 */
+ U8 VP_ID; /* 0x08 */
+ U8 VF_ID; /* 0x09 */
+ U16 Reserved2; /* 0x0A */
+ U16 Reserved3; /* 0x0C */
+ U16 IOCStatus; /* 0x0E */
+ U32 IOCLogInfo; /* 0x10 */
+ U32 SlotStatus; /* 0x14 */
+ U32 Reserved4; /* 0x18 */
+ U16 Slot; /* 0x1C */
+ U16 EnclosureHandle; /* 0x1E */
+} MPI2_SEP_REPLY, MPI2_POINTER PTR_MPI2_SEP_REPLY,
+ Mpi2SepReply_t, MPI2_POINTER pMpi2SepReply_t;
+
+/* SlotStatus defines */
+#define MPI2_SEP_REPLY_SLOTSTATUS_REMOVE_READY (0x00040000)
+#define MPI2_SEP_REPLY_SLOTSTATUS_IDENTIFY_REQUEST (0x00020000)
+#define MPI2_SEP_REPLY_SLOTSTATUS_REBUILD_STOPPED (0x00000200)
+#define MPI2_SEP_REPLY_SLOTSTATUS_HOT_SPARE (0x00000100)
+#define MPI2_SEP_REPLY_SLOTSTATUS_UNCONFIGURED (0x00000080)
+#define MPI2_SEP_REPLY_SLOTSTATUS_PREDICTED_FAULT (0x00000040)
+#define MPI2_SEP_REPLY_SLOTSTATUS_IN_CRITICAL_ARRAY (0x00000010)
+#define MPI2_SEP_REPLY_SLOTSTATUS_IN_FAILED_ARRAY (0x00000008)
+#define MPI2_SEP_REPLY_SLOTSTATUS_DEV_REBUILDING (0x00000004)
+#define MPI2_SEP_REPLY_SLOTSTATUS_DEV_FAULTY (0x00000002)
+#define MPI2_SEP_REPLY_SLOTSTATUS_NO_ERROR (0x00000001)
+
+
+#endif
+
+
diff --git a/drivers/scsi/mpt2sas/mpi/mpi2_ioc.h b/drivers/scsi/mpt2sas/mpi/mpi2_ioc.h
new file mode 100644
index 000000000..b02de48be
--- /dev/null
+++ b/drivers/scsi/mpt2sas/mpi/mpi2_ioc.h
@@ -0,0 +1,1708 @@
+/*
+ * Copyright (c) 2000-2014 LSI Corporation.
+ *
+ *
+ * Name: mpi2_ioc.h
+ * Title: MPI IOC, Port, Event, FW Download, and FW Upload messages
+ * Creation Date: October 11, 2006
+ *
+ * mpi2_ioc.h Version: 02.00.24
+ *
+ * Version History
+ * ---------------
+ *
+ * Date Version Description
+ * -------- -------- ------------------------------------------------------
+ * 04-30-07 02.00.00 Corresponds to Fusion-MPT MPI Specification Rev A.
+ * 06-04-07 02.00.01 In IOCFacts Reply structure, renamed MaxDevices to
+ * MaxTargets.
+ * Added TotalImageSize field to FWDownload Request.
+ * Added reserved words to FWUpload Request.
+ * 06-26-07 02.00.02 Added IR Configuration Change List Event.
+ * 08-31-07 02.00.03 Removed SystemReplyQueueDepth field from the IOCInit
+ * request and replaced it with
+ * ReplyDescriptorPostQueueDepth and ReplyFreeQueueDepth.
+ * Replaced the MinReplyQueueDepth field of the IOCFacts
+ * reply with MaxReplyDescriptorPostQueueDepth.
+ * Added MPI2_RDPQ_DEPTH_MIN define to specify the minimum
+ * depth for the Reply Descriptor Post Queue.
+ * Added SASAddress field to Initiator Device Table
+ * Overflow Event data.
+ * 10-31-07 02.00.04 Added ReasonCode MPI2_EVENT_SAS_INIT_RC_NOT_RESPONDING
+ * for SAS Initiator Device Status Change Event data.
+ * Modified Reason Code defines for SAS Topology Change
+ * List Event data, including adding a bit for PHY Vacant
+ * status, and adding a mask for the Reason Code.
+ * Added define for
+ * MPI2_EVENT_SAS_TOPO_ES_DELAY_NOT_RESPONDING.
+ * Added define for MPI2_EXT_IMAGE_TYPE_MEGARAID.
+ * 12-18-07 02.00.05 Added Boot Status defines for the IOCExceptions field of
+ * the IOCFacts Reply.
+ * Removed MPI2_IOCFACTS_CAPABILITY_EXTENDED_BUFFER define.
+ * Moved MPI2_VERSION_UNION to mpi2.h.
+ * Changed MPI2_EVENT_NOTIFICATION_REQUEST to use masks
+ * instead of enables, and added SASBroadcastPrimitiveMasks
+ * field.
+ * Added Log Entry Added Event and related structure.
+ * 02-29-08 02.00.06 Added define MPI2_IOCFACTS_CAPABILITY_INTEGRATED_RAID.
+ * Removed define MPI2_IOCFACTS_PROTOCOL_SMP_TARGET.
+ * Added MaxVolumes and MaxPersistentEntries fields to
+ * IOCFacts reply.
+ * Added ProtocalFlags and IOCCapabilities fields to
+ * MPI2_FW_IMAGE_HEADER.
+ * Removed MPI2_PORTENABLE_FLAGS_ENABLE_SINGLE_PORT.
+ * 03-03-08 02.00.07 Fixed MPI2_FW_IMAGE_HEADER by changing Reserved26 to
+ * a U16 (from a U32).
+ * Removed extra 's' from EventMasks name.
+ * 06-27-08 02.00.08 Fixed an offset in a comment.
+ * 10-02-08 02.00.09 Removed SystemReplyFrameSize from MPI2_IOC_INIT_REQUEST.
+ * Removed CurReplyFrameSize from MPI2_IOC_FACTS_REPLY and
+ * renamed MinReplyFrameSize to ReplyFrameSize.
+ * Added MPI2_IOCFACTS_EXCEPT_IR_FOREIGN_CONFIG_MAX.
+ * Added two new RAIDOperation values for Integrated RAID
+ * Operations Status Event data.
+ * Added four new IR Configuration Change List Event data
+ * ReasonCode values.
+ * Added two new ReasonCode defines for SAS Device Status
+ * Change Event data.
+ * Added three new DiscoveryStatus bits for the SAS
+ * Discovery event data.
+ * Added Multiplexing Status Change bit to the PhyStatus
+ * field of the SAS Topology Change List event data.
+ * Removed define for MPI2_INIT_IMAGE_BOOTFLAGS_XMEMCOPY.
+ * BootFlags are now product-specific.
+ * Added defines for the indivdual signature bytes
+ * for MPI2_INIT_IMAGE_FOOTER.
+ * 01-19-09 02.00.10 Added MPI2_IOCFACTS_CAPABILITY_EVENT_REPLAY define.
+ * Added MPI2_EVENT_SAS_DISC_DS_DOWNSTREAM_INITIATOR
+ * define.
+ * Added MPI2_EVENT_SAS_DEV_STAT_RC_SATA_INIT_FAILURE
+ * define.
+ * Removed MPI2_EVENT_SAS_DISC_DS_SATA_INIT_FAILURE define.
+ * 05-06-09 02.00.11 Added MPI2_IOCFACTS_CAPABILITY_RAID_ACCELERATOR define.
+ * Added MPI2_IOCFACTS_CAPABILITY_MSI_X_INDEX define.
+ * Added two new reason codes for SAS Device Status Change
+ * Event.
+ * Added new event: SAS PHY Counter.
+ * 07-30-09 02.00.12 Added GPIO Interrupt event define and structure.
+ * Added MPI2_IOCFACTS_CAPABILITY_EXTENDED_BUFFER define.
+ * Added new product id family for 2208.
+ * 10-28-09 02.00.13 Added HostMSIxVectors field to MPI2_IOC_INIT_REQUEST.
+ * Added MaxMSIxVectors field to MPI2_IOC_FACTS_REPLY.
+ * Added MinDevHandle field to MPI2_IOC_FACTS_REPLY.
+ * Added MPI2_IOCFACTS_CAPABILITY_HOST_BASED_DISCOVERY.
+ * Added MPI2_EVENT_HOST_BASED_DISCOVERY_PHY define.
+ * Added MPI2_EVENT_SAS_TOPO_ES_NO_EXPANDER define.
+ * Added Host Based Discovery Phy Event data.
+ * Added defines for ProductID Product field
+ * (MPI2_FW_HEADER_PID_).
+ * Modified values for SAS ProductID Family
+ * (MPI2_FW_HEADER_PID_FAMILY_).
+ * 02-10-10 02.00.14 Added SAS Quiesce Event structure and defines.
+ * Added PowerManagementControl Request structures and
+ * defines.
+ * 05-12-10 02.00.15 Marked Task Set Full Event as obsolete.
+ * Added MPI2_EVENT_SAS_TOPO_LR_UNSUPPORTED_PHY define.
+ * 11-10-10 02.00.16 Added MPI2_FW_DOWNLOAD_ITYPE_MIN_PRODUCT_SPECIFIC.
+ * 02-23-11 02.00.17 Added SAS NOTIFY Primitive event, and added
+ * SASNotifyPrimitiveMasks field to
+ * MPI2_EVENT_NOTIFICATION_REQUEST.
+ * Added Temperature Threshold Event.
+ * Added Host Message Event.
+ * Added Send Host Message request and reply.
+ * 05-25-11 02.00.18 For Extended Image Header, added
+ * MPI2_EXT_IMAGE_TYPE_MIN_PRODUCT_SPECIFIC and
+ * MPI2_EXT_IMAGE_TYPE_MAX_PRODUCT_SPECIFIC defines.
+ * Deprecated MPI2_EXT_IMAGE_TYPE_MAX define.
+ * 08-24-11 02.00.19 Added PhysicalPort field to
+ * MPI2_EVENT_DATA_SAS_DEVICE_STATUS_CHANGE structure.
+ * Marked MPI2_PM_CONTROL_FEATURE_PCIE_LINK as obsolete.
+ * 03-29-12 02.00.21 Added a product specific range to event values.
+ * 07-26-12 02.00.22 Added MPI2_IOCFACTS_EXCEPT_PARTIAL_MEMORY_FAILURE.
+ * Added ElapsedSeconds field to
+ * MPI2_EVENT_DATA_IR_OPERATION_STATUS.
+ * 08-19-13 02.00.23 For IOCInit, added MPI2_IOCINIT_MSGFLAG_RDPQ_ARRAY_MODE
+ * and MPI2_IOC_INIT_RDPQ_ARRAY_ENTRY.
+ * Added MPI2_IOCFACTS_CAPABILITY_RDPQ_ARRAY_CAPABLE.
+ * Added MPI2_FW_DOWNLOAD_ITYPE_PUBLIC_KEY.
+ * Added Encrypted Hash Extended Image.
+ * 12-05-13 02.00.24 Added MPI25_HASH_IMAGE_TYPE_BIOS.
+ * --------------------------------------------------------------------------
+ */
+
+#ifndef MPI2_IOC_H
+#define MPI2_IOC_H
+
+/*****************************************************************************
+*
+* IOC Messages
+*
+*****************************************************************************/
+
+/****************************************************************************
+* IOCInit message
+****************************************************************************/
+
+/* IOCInit Request message */
+typedef struct _MPI2_IOC_INIT_REQUEST
+{
+ U8 WhoInit; /* 0x00 */
+ U8 Reserved1; /* 0x01 */
+ U8 ChainOffset; /* 0x02 */
+ U8 Function; /* 0x03 */
+ U16 Reserved2; /* 0x04 */
+ U8 Reserved3; /* 0x06 */
+ U8 MsgFlags; /* 0x07 */
+ U8 VP_ID; /* 0x08 */
+ U8 VF_ID; /* 0x09 */
+ U16 Reserved4; /* 0x0A */
+ U16 MsgVersion; /* 0x0C */
+ U16 HeaderVersion; /* 0x0E */
+ U32 Reserved5; /* 0x10 */
+ U16 Reserved6; /* 0x14 */
+ U8 Reserved7; /* 0x16 */
+ U8 HostMSIxVectors; /* 0x17 */
+ U16 Reserved8; /* 0x18 */
+ U16 SystemRequestFrameSize; /* 0x1A */
+ U16 ReplyDescriptorPostQueueDepth; /* 0x1C */
+ U16 ReplyFreeQueueDepth; /* 0x1E */
+ U32 SenseBufferAddressHigh; /* 0x20 */
+ U32 SystemReplyAddressHigh; /* 0x24 */
+ U64 SystemRequestFrameBaseAddress; /* 0x28 */
+ U64 ReplyDescriptorPostQueueAddress;/* 0x30 */
+ U64 ReplyFreeQueueAddress; /* 0x38 */
+ U64 TimeStamp; /* 0x40 */
+} MPI2_IOC_INIT_REQUEST, MPI2_POINTER PTR_MPI2_IOC_INIT_REQUEST,
+ Mpi2IOCInitRequest_t, MPI2_POINTER pMpi2IOCInitRequest_t;
+
+/* WhoInit values */
+#define MPI2_WHOINIT_NOT_INITIALIZED (0x00)
+#define MPI2_WHOINIT_SYSTEM_BIOS (0x01)
+#define MPI2_WHOINIT_ROM_BIOS (0x02)
+#define MPI2_WHOINIT_PCI_PEER (0x03)
+#define MPI2_WHOINIT_HOST_DRIVER (0x04)
+#define MPI2_WHOINIT_MANUFACTURER (0x05)
+
+/* MsgFlags */
+#define MPI2_IOCINIT_MSGFLAG_RDPQ_ARRAY_MODE (0x01)
+
+/* MsgVersion */
+#define MPI2_IOCINIT_MSGVERSION_MAJOR_MASK (0xFF00)
+#define MPI2_IOCINIT_MSGVERSION_MAJOR_SHIFT (8)
+#define MPI2_IOCINIT_MSGVERSION_MINOR_MASK (0x00FF)
+#define MPI2_IOCINIT_MSGVERSION_MINOR_SHIFT (0)
+
+/* HeaderVersion */
+#define MPI2_IOCINIT_HDRVERSION_UNIT_MASK (0xFF00)
+#define MPI2_IOCINIT_HDRVERSION_UNIT_SHIFT (8)
+#define MPI2_IOCINIT_HDRVERSION_DEV_MASK (0x00FF)
+#define MPI2_IOCINIT_HDRVERSION_DEV_SHIFT (0)
+
+/* minimum depth for a Reply Descriptor Post Queue */
+#define MPI2_RDPQ_DEPTH_MIN (16)
+
+/* Reply Descriptor Post Queue Array Entry */
+typedef struct _MPI2_IOC_INIT_RDPQ_ARRAY_ENTRY {
+ U64 RDPQBaseAddress; /* 0x00 */
+ U32 Reserved1; /* 0x08 */
+ U32 Reserved2; /* 0x0C */
+} MPI2_IOC_INIT_RDPQ_ARRAY_ENTRY,
+MPI2_POINTER PTR_MPI2_IOC_INIT_RDPQ_ARRAY_ENTRY,
+Mpi2IOCInitRDPQArrayEntry, MPI2_POINTER pMpi2IOCInitRDPQArrayEntry;
+
+/* IOCInit Reply message */
+typedef struct _MPI2_IOC_INIT_REPLY
+{
+ U8 WhoInit; /* 0x00 */
+ U8 Reserved1; /* 0x01 */
+ U8 MsgLength; /* 0x02 */
+ U8 Function; /* 0x03 */
+ U16 Reserved2; /* 0x04 */
+ U8 Reserved3; /* 0x06 */
+ U8 MsgFlags; /* 0x07 */
+ U8 VP_ID; /* 0x08 */
+ U8 VF_ID; /* 0x09 */
+ U16 Reserved4; /* 0x0A */
+ U16 Reserved5; /* 0x0C */
+ U16 IOCStatus; /* 0x0E */
+ U32 IOCLogInfo; /* 0x10 */
+} MPI2_IOC_INIT_REPLY, MPI2_POINTER PTR_MPI2_IOC_INIT_REPLY,
+ Mpi2IOCInitReply_t, MPI2_POINTER pMpi2IOCInitReply_t;
+
+
+/****************************************************************************
+* IOCFacts message
+****************************************************************************/
+
+/* IOCFacts Request message */
+typedef struct _MPI2_IOC_FACTS_REQUEST
+{
+ U16 Reserved1; /* 0x00 */
+ U8 ChainOffset; /* 0x02 */
+ U8 Function; /* 0x03 */
+ U16 Reserved2; /* 0x04 */
+ U8 Reserved3; /* 0x06 */
+ U8 MsgFlags; /* 0x07 */
+ U8 VP_ID; /* 0x08 */
+ U8 VF_ID; /* 0x09 */
+ U16 Reserved4; /* 0x0A */
+} MPI2_IOC_FACTS_REQUEST, MPI2_POINTER PTR_MPI2_IOC_FACTS_REQUEST,
+ Mpi2IOCFactsRequest_t, MPI2_POINTER pMpi2IOCFactsRequest_t;
+
+
+/* IOCFacts Reply message */
+typedef struct _MPI2_IOC_FACTS_REPLY
+{
+ U16 MsgVersion; /* 0x00 */
+ U8 MsgLength; /* 0x02 */
+ U8 Function; /* 0x03 */
+ U16 HeaderVersion; /* 0x04 */
+ U8 IOCNumber; /* 0x06 */
+ U8 MsgFlags; /* 0x07 */
+ U8 VP_ID; /* 0x08 */
+ U8 VF_ID; /* 0x09 */
+ U16 Reserved1; /* 0x0A */
+ U16 IOCExceptions; /* 0x0C */
+ U16 IOCStatus; /* 0x0E */
+ U32 IOCLogInfo; /* 0x10 */
+ U8 MaxChainDepth; /* 0x14 */
+ U8 WhoInit; /* 0x15 */
+ U8 NumberOfPorts; /* 0x16 */
+ U8 MaxMSIxVectors; /* 0x17 */
+ U16 RequestCredit; /* 0x18 */
+ U16 ProductID; /* 0x1A */
+ U32 IOCCapabilities; /* 0x1C */
+ MPI2_VERSION_UNION FWVersion; /* 0x20 */
+ U16 IOCRequestFrameSize; /* 0x24 */
+ U16 Reserved3; /* 0x26 */
+ U16 MaxInitiators; /* 0x28 */
+ U16 MaxTargets; /* 0x2A */
+ U16 MaxSasExpanders; /* 0x2C */
+ U16 MaxEnclosures; /* 0x2E */
+ U16 ProtocolFlags; /* 0x30 */
+ U16 HighPriorityCredit; /* 0x32 */
+ U16 MaxReplyDescriptorPostQueueDepth; /* 0x34 */
+ U8 ReplyFrameSize; /* 0x36 */
+ U8 MaxVolumes; /* 0x37 */
+ U16 MaxDevHandle; /* 0x38 */
+ U16 MaxPersistentEntries; /* 0x3A */
+ U16 MinDevHandle; /* 0x3C */
+ U16 Reserved4; /* 0x3E */
+} MPI2_IOC_FACTS_REPLY, MPI2_POINTER PTR_MPI2_IOC_FACTS_REPLY,
+ Mpi2IOCFactsReply_t, MPI2_POINTER pMpi2IOCFactsReply_t;
+
+/* MsgVersion */
+#define MPI2_IOCFACTS_MSGVERSION_MAJOR_MASK (0xFF00)
+#define MPI2_IOCFACTS_MSGVERSION_MAJOR_SHIFT (8)
+#define MPI2_IOCFACTS_MSGVERSION_MINOR_MASK (0x00FF)
+#define MPI2_IOCFACTS_MSGVERSION_MINOR_SHIFT (0)
+
+/* HeaderVersion */
+#define MPI2_IOCFACTS_HDRVERSION_UNIT_MASK (0xFF00)
+#define MPI2_IOCFACTS_HDRVERSION_UNIT_SHIFT (8)
+#define MPI2_IOCFACTS_HDRVERSION_DEV_MASK (0x00FF)
+#define MPI2_IOCFACTS_HDRVERSION_DEV_SHIFT (0)
+
+/* IOCExceptions */
+#define MPI2_IOCFACTS_EXCEPT_PARTIAL_MEMORY_FAILURE (0x0200)
+#define MPI2_IOCFACTS_EXCEPT_IR_FOREIGN_CONFIG_MAX (0x0100)
+
+#define MPI2_IOCFACTS_EXCEPT_BOOTSTAT_MASK (0x00E0)
+#define MPI2_IOCFACTS_EXCEPT_BOOTSTAT_GOOD (0x0000)
+#define MPI2_IOCFACTS_EXCEPT_BOOTSTAT_BACKUP (0x0020)
+#define MPI2_IOCFACTS_EXCEPT_BOOTSTAT_RESTORED (0x0040)
+#define MPI2_IOCFACTS_EXCEPT_BOOTSTAT_CORRUPT_BACKUP (0x0060)
+
+#define MPI2_IOCFACTS_EXCEPT_METADATA_UNSUPPORTED (0x0010)
+#define MPI2_IOCFACTS_EXCEPT_MANUFACT_CHECKSUM_FAIL (0x0008)
+#define MPI2_IOCFACTS_EXCEPT_FW_CHECKSUM_FAIL (0x0004)
+#define MPI2_IOCFACTS_EXCEPT_RAID_CONFIG_INVALID (0x0002)
+#define MPI2_IOCFACTS_EXCEPT_CONFIG_CHECKSUM_FAIL (0x0001)
+
+/* defines for WhoInit field are after the IOCInit Request */
+
+/* ProductID field uses MPI2_FW_HEADER_PID_ */
+
+/* IOCCapabilities */
+#define MPI2_IOCFACTS_CAPABILITY_RDPQ_ARRAY_CAPABLE (0x00040000)
+#define MPI2_IOCFACTS_CAPABILITY_HOST_BASED_DISCOVERY (0x00010000)
+#define MPI2_IOCFACTS_CAPABILITY_MSI_X_INDEX (0x00008000)
+#define MPI2_IOCFACTS_CAPABILITY_RAID_ACCELERATOR (0x00004000)
+#define MPI2_IOCFACTS_CAPABILITY_EVENT_REPLAY (0x00002000)
+#define MPI2_IOCFACTS_CAPABILITY_INTEGRATED_RAID (0x00001000)
+#define MPI2_IOCFACTS_CAPABILITY_TLR (0x00000800)
+#define MPI2_IOCFACTS_CAPABILITY_MULTICAST (0x00000100)
+#define MPI2_IOCFACTS_CAPABILITY_BIDIRECTIONAL_TARGET (0x00000080)
+#define MPI2_IOCFACTS_CAPABILITY_EEDP (0x00000040)
+#define MPI2_IOCFACTS_CAPABILITY_EXTENDED_BUFFER (0x00000020)
+#define MPI2_IOCFACTS_CAPABILITY_SNAPSHOT_BUFFER (0x00000010)
+#define MPI2_IOCFACTS_CAPABILITY_DIAG_TRACE_BUFFER (0x00000008)
+#define MPI2_IOCFACTS_CAPABILITY_TASK_SET_FULL_HANDLING (0x00000004)
+
+/* ProtocolFlags */
+#define MPI2_IOCFACTS_PROTOCOL_SCSI_TARGET (0x0001)
+#define MPI2_IOCFACTS_PROTOCOL_SCSI_INITIATOR (0x0002)
+
+
+/****************************************************************************
+* PortFacts message
+****************************************************************************/
+
+/* PortFacts Request message */
+typedef struct _MPI2_PORT_FACTS_REQUEST
+{
+ U16 Reserved1; /* 0x00 */
+ U8 ChainOffset; /* 0x02 */
+ U8 Function; /* 0x03 */
+ U16 Reserved2; /* 0x04 */
+ U8 PortNumber; /* 0x06 */
+ U8 MsgFlags; /* 0x07 */
+ U8 VP_ID; /* 0x08 */
+ U8 VF_ID; /* 0x09 */
+ U16 Reserved3; /* 0x0A */
+} MPI2_PORT_FACTS_REQUEST, MPI2_POINTER PTR_MPI2_PORT_FACTS_REQUEST,
+ Mpi2PortFactsRequest_t, MPI2_POINTER pMpi2PortFactsRequest_t;
+
+/* PortFacts Reply message */
+typedef struct _MPI2_PORT_FACTS_REPLY
+{
+ U16 Reserved1; /* 0x00 */
+ U8 MsgLength; /* 0x02 */
+ U8 Function; /* 0x03 */
+ U16 Reserved2; /* 0x04 */
+ U8 PortNumber; /* 0x06 */
+ U8 MsgFlags; /* 0x07 */
+ U8 VP_ID; /* 0x08 */
+ U8 VF_ID; /* 0x09 */
+ U16 Reserved3; /* 0x0A */
+ U16 Reserved4; /* 0x0C */
+ U16 IOCStatus; /* 0x0E */
+ U32 IOCLogInfo; /* 0x10 */
+ U8 Reserved5; /* 0x14 */
+ U8 PortType; /* 0x15 */
+ U16 Reserved6; /* 0x16 */
+ U16 MaxPostedCmdBuffers; /* 0x18 */
+ U16 Reserved7; /* 0x1A */
+} MPI2_PORT_FACTS_REPLY, MPI2_POINTER PTR_MPI2_PORT_FACTS_REPLY,
+ Mpi2PortFactsReply_t, MPI2_POINTER pMpi2PortFactsReply_t;
+
+/* PortType values */
+#define MPI2_PORTFACTS_PORTTYPE_INACTIVE (0x00)
+#define MPI2_PORTFACTS_PORTTYPE_FC (0x10)
+#define MPI2_PORTFACTS_PORTTYPE_ISCSI (0x20)
+#define MPI2_PORTFACTS_PORTTYPE_SAS_PHYSICAL (0x30)
+#define MPI2_PORTFACTS_PORTTYPE_SAS_VIRTUAL (0x31)
+
+
+/****************************************************************************
+* PortEnable message
+****************************************************************************/
+
+/* PortEnable Request message */
+typedef struct _MPI2_PORT_ENABLE_REQUEST
+{
+ U16 Reserved1; /* 0x00 */
+ U8 ChainOffset; /* 0x02 */
+ U8 Function; /* 0x03 */
+ U8 Reserved2; /* 0x04 */
+ U8 PortFlags; /* 0x05 */
+ U8 Reserved3; /* 0x06 */
+ U8 MsgFlags; /* 0x07 */
+ U8 VP_ID; /* 0x08 */
+ U8 VF_ID; /* 0x09 */
+ U16 Reserved4; /* 0x0A */
+} MPI2_PORT_ENABLE_REQUEST, MPI2_POINTER PTR_MPI2_PORT_ENABLE_REQUEST,
+ Mpi2PortEnableRequest_t, MPI2_POINTER pMpi2PortEnableRequest_t;
+
+
+/* PortEnable Reply message */
+typedef struct _MPI2_PORT_ENABLE_REPLY
+{
+ U16 Reserved1; /* 0x00 */
+ U8 MsgLength; /* 0x02 */
+ U8 Function; /* 0x03 */
+ U8 Reserved2; /* 0x04 */
+ U8 PortFlags; /* 0x05 */
+ U8 Reserved3; /* 0x06 */
+ U8 MsgFlags; /* 0x07 */
+ U8 VP_ID; /* 0x08 */
+ U8 VF_ID; /* 0x09 */
+ U16 Reserved4; /* 0x0A */
+ U16 Reserved5; /* 0x0C */
+ U16 IOCStatus; /* 0x0E */
+ U32 IOCLogInfo; /* 0x10 */
+} MPI2_PORT_ENABLE_REPLY, MPI2_POINTER PTR_MPI2_PORT_ENABLE_REPLY,
+ Mpi2PortEnableReply_t, MPI2_POINTER pMpi2PortEnableReply_t;
+
+
+/****************************************************************************
+* EventNotification message
+****************************************************************************/
+
+/* EventNotification Request message */
+#define MPI2_EVENT_NOTIFY_EVENTMASK_WORDS (4)
+
+typedef struct _MPI2_EVENT_NOTIFICATION_REQUEST
+{
+ U16 Reserved1; /* 0x00 */
+ U8 ChainOffset; /* 0x02 */
+ U8 Function; /* 0x03 */
+ U16 Reserved2; /* 0x04 */
+ U8 Reserved3; /* 0x06 */
+ U8 MsgFlags; /* 0x07 */
+ U8 VP_ID; /* 0x08 */
+ U8 VF_ID; /* 0x09 */
+ U16 Reserved4; /* 0x0A */
+ U32 Reserved5; /* 0x0C */
+ U32 Reserved6; /* 0x10 */
+ U32 EventMasks[MPI2_EVENT_NOTIFY_EVENTMASK_WORDS];/* 0x14 */
+ U16 SASBroadcastPrimitiveMasks; /* 0x24 */
+ U16 SASNotifyPrimitiveMasks; /* 0x26 */
+ U32 Reserved8; /* 0x28 */
+} MPI2_EVENT_NOTIFICATION_REQUEST,
+ MPI2_POINTER PTR_MPI2_EVENT_NOTIFICATION_REQUEST,
+ Mpi2EventNotificationRequest_t, MPI2_POINTER pMpi2EventNotificationRequest_t;
+
+
+/* EventNotification Reply message */
+typedef struct _MPI2_EVENT_NOTIFICATION_REPLY
+{
+ U16 EventDataLength; /* 0x00 */
+ U8 MsgLength; /* 0x02 */
+ U8 Function; /* 0x03 */
+ U16 Reserved1; /* 0x04 */
+ U8 AckRequired; /* 0x06 */
+ U8 MsgFlags; /* 0x07 */
+ U8 VP_ID; /* 0x08 */
+ U8 VF_ID; /* 0x09 */
+ U16 Reserved2; /* 0x0A */
+ U16 Reserved3; /* 0x0C */
+ U16 IOCStatus; /* 0x0E */
+ U32 IOCLogInfo; /* 0x10 */
+ U16 Event; /* 0x14 */
+ U16 Reserved4; /* 0x16 */
+ U32 EventContext; /* 0x18 */
+ U32 EventData[1]; /* 0x1C */
+} MPI2_EVENT_NOTIFICATION_REPLY, MPI2_POINTER PTR_MPI2_EVENT_NOTIFICATION_REPLY,
+ Mpi2EventNotificationReply_t, MPI2_POINTER pMpi2EventNotificationReply_t;
+
+/* AckRequired */
+#define MPI2_EVENT_NOTIFICATION_ACK_NOT_REQUIRED (0x00)
+#define MPI2_EVENT_NOTIFICATION_ACK_REQUIRED (0x01)
+
+/* Event */
+#define MPI2_EVENT_LOG_DATA (0x0001)
+#define MPI2_EVENT_STATE_CHANGE (0x0002)
+#define MPI2_EVENT_HARD_RESET_RECEIVED (0x0005)
+#define MPI2_EVENT_EVENT_CHANGE (0x000A)
+#define MPI2_EVENT_TASK_SET_FULL (0x000E) /* obsolete */
+#define MPI2_EVENT_SAS_DEVICE_STATUS_CHANGE (0x000F)
+#define MPI2_EVENT_IR_OPERATION_STATUS (0x0014)
+#define MPI2_EVENT_SAS_DISCOVERY (0x0016)
+#define MPI2_EVENT_SAS_BROADCAST_PRIMITIVE (0x0017)
+#define MPI2_EVENT_SAS_INIT_DEVICE_STATUS_CHANGE (0x0018)
+#define MPI2_EVENT_SAS_INIT_TABLE_OVERFLOW (0x0019)
+#define MPI2_EVENT_SAS_TOPOLOGY_CHANGE_LIST (0x001C)
+#define MPI2_EVENT_SAS_ENCL_DEVICE_STATUS_CHANGE (0x001D)
+#define MPI2_EVENT_IR_VOLUME (0x001E)
+#define MPI2_EVENT_IR_PHYSICAL_DISK (0x001F)
+#define MPI2_EVENT_IR_CONFIGURATION_CHANGE_LIST (0x0020)
+#define MPI2_EVENT_LOG_ENTRY_ADDED (0x0021)
+#define MPI2_EVENT_SAS_PHY_COUNTER (0x0022)
+#define MPI2_EVENT_GPIO_INTERRUPT (0x0023)
+#define MPI2_EVENT_HOST_BASED_DISCOVERY_PHY (0x0024)
+#define MPI2_EVENT_SAS_QUIESCE (0x0025)
+#define MPI2_EVENT_SAS_NOTIFY_PRIMITIVE (0x0026)
+#define MPI2_EVENT_TEMP_THRESHOLD (0x0027)
+#define MPI2_EVENT_HOST_MESSAGE (0x0028)
+#define MPI2_EVENT_MIN_PRODUCT_SPECIFIC (0x006E)
+#define MPI2_EVENT_MAX_PRODUCT_SPECIFIC (0x007F)
+
+/* Log Entry Added Event data */
+
+/* the following structure matches MPI2_LOG_0_ENTRY in mpi2_cnfg.h */
+#define MPI2_EVENT_DATA_LOG_DATA_LENGTH (0x1C)
+
+typedef struct _MPI2_EVENT_DATA_LOG_ENTRY_ADDED
+{
+ U64 TimeStamp; /* 0x00 */
+ U32 Reserved1; /* 0x08 */
+ U16 LogSequence; /* 0x0C */
+ U16 LogEntryQualifier; /* 0x0E */
+ U8 VP_ID; /* 0x10 */
+ U8 VF_ID; /* 0x11 */
+ U16 Reserved2; /* 0x12 */
+ U8 LogData[MPI2_EVENT_DATA_LOG_DATA_LENGTH];/* 0x14 */
+} MPI2_EVENT_DATA_LOG_ENTRY_ADDED,
+ MPI2_POINTER PTR_MPI2_EVENT_DATA_LOG_ENTRY_ADDED,
+ Mpi2EventDataLogEntryAdded_t, MPI2_POINTER pMpi2EventDataLogEntryAdded_t;
+
+/* GPIO Interrupt Event data */
+
+typedef struct _MPI2_EVENT_DATA_GPIO_INTERRUPT {
+ U8 GPIONum; /* 0x00 */
+ U8 Reserved1; /* 0x01 */
+ U16 Reserved2; /* 0x02 */
+} MPI2_EVENT_DATA_GPIO_INTERRUPT,
+ MPI2_POINTER PTR_MPI2_EVENT_DATA_GPIO_INTERRUPT,
+ Mpi2EventDataGpioInterrupt_t, MPI2_POINTER pMpi2EventDataGpioInterrupt_t;
+
+/* Temperature Threshold Event data */
+
+typedef struct _MPI2_EVENT_DATA_TEMPERATURE {
+ U16 Status; /* 0x00 */
+ U8 SensorNum; /* 0x02 */
+ U8 Reserved1; /* 0x03 */
+ U16 CurrentTemperature; /* 0x04 */
+ U16 Reserved2; /* 0x06 */
+ U32 Reserved3; /* 0x08 */
+ U32 Reserved4; /* 0x0C */
+} MPI2_EVENT_DATA_TEMPERATURE,
+MPI2_POINTER PTR_MPI2_EVENT_DATA_TEMPERATURE,
+Mpi2EventDataTemperature_t, MPI2_POINTER pMpi2EventDataTemperature_t;
+
+/* Temperature Threshold Event data Status bits */
+#define MPI2_EVENT_TEMPERATURE3_EXCEEDED (0x0008)
+#define MPI2_EVENT_TEMPERATURE2_EXCEEDED (0x0004)
+#define MPI2_EVENT_TEMPERATURE1_EXCEEDED (0x0002)
+#define MPI2_EVENT_TEMPERATURE0_EXCEEDED (0x0001)
+
+
+/* Host Message Event data */
+
+typedef struct _MPI2_EVENT_DATA_HOST_MESSAGE {
+ U8 SourceVF_ID; /* 0x00 */
+ U8 Reserved1; /* 0x01 */
+ U16 Reserved2; /* 0x02 */
+ U32 Reserved3; /* 0x04 */
+ U32 HostData[1]; /* 0x08 */
+} MPI2_EVENT_DATA_HOST_MESSAGE, MPI2_POINTER PTR_MPI2_EVENT_DATA_HOST_MESSAGE,
+Mpi2EventDataHostMessage_t, MPI2_POINTER pMpi2EventDataHostMessage_t;
+
+
+/* Hard Reset Received Event data */
+
+typedef struct _MPI2_EVENT_DATA_HARD_RESET_RECEIVED
+{
+ U8 Reserved1; /* 0x00 */
+ U8 Port; /* 0x01 */
+ U16 Reserved2; /* 0x02 */
+} MPI2_EVENT_DATA_HARD_RESET_RECEIVED,
+ MPI2_POINTER PTR_MPI2_EVENT_DATA_HARD_RESET_RECEIVED,
+ Mpi2EventDataHardResetReceived_t,
+ MPI2_POINTER pMpi2EventDataHardResetReceived_t;
+
+/* Task Set Full Event data */
+/* this event is obsolete */
+
+typedef struct _MPI2_EVENT_DATA_TASK_SET_FULL
+{
+ U16 DevHandle; /* 0x00 */
+ U16 CurrentDepth; /* 0x02 */
+} MPI2_EVENT_DATA_TASK_SET_FULL, MPI2_POINTER PTR_MPI2_EVENT_DATA_TASK_SET_FULL,
+ Mpi2EventDataTaskSetFull_t, MPI2_POINTER pMpi2EventDataTaskSetFull_t;
+
+
+/* SAS Device Status Change Event data */
+
+typedef struct _MPI2_EVENT_DATA_SAS_DEVICE_STATUS_CHANGE
+{
+ U16 TaskTag; /* 0x00 */
+ U8 ReasonCode; /* 0x02 */
+ U8 PhysicalPort; /* 0x03 */
+ U8 ASC; /* 0x04 */
+ U8 ASCQ; /* 0x05 */
+ U16 DevHandle; /* 0x06 */
+ U32 Reserved2; /* 0x08 */
+ U64 SASAddress; /* 0x0C */
+ U8 LUN[8]; /* 0x14 */
+} MPI2_EVENT_DATA_SAS_DEVICE_STATUS_CHANGE,
+ MPI2_POINTER PTR_MPI2_EVENT_DATA_SAS_DEVICE_STATUS_CHANGE,
+ Mpi2EventDataSasDeviceStatusChange_t,
+ MPI2_POINTER pMpi2EventDataSasDeviceStatusChange_t;
+
+/* SAS Device Status Change Event data ReasonCode values */
+#define MPI2_EVENT_SAS_DEV_STAT_RC_SMART_DATA (0x05)
+#define MPI2_EVENT_SAS_DEV_STAT_RC_UNSUPPORTED (0x07)
+#define MPI2_EVENT_SAS_DEV_STAT_RC_INTERNAL_DEVICE_RESET (0x08)
+#define MPI2_EVENT_SAS_DEV_STAT_RC_TASK_ABORT_INTERNAL (0x09)
+#define MPI2_EVENT_SAS_DEV_STAT_RC_ABORT_TASK_SET_INTERNAL (0x0A)
+#define MPI2_EVENT_SAS_DEV_STAT_RC_CLEAR_TASK_SET_INTERNAL (0x0B)
+#define MPI2_EVENT_SAS_DEV_STAT_RC_QUERY_TASK_INTERNAL (0x0C)
+#define MPI2_EVENT_SAS_DEV_STAT_RC_ASYNC_NOTIFICATION (0x0D)
+#define MPI2_EVENT_SAS_DEV_STAT_RC_CMP_INTERNAL_DEV_RESET (0x0E)
+#define MPI2_EVENT_SAS_DEV_STAT_RC_CMP_TASK_ABORT_INTERNAL (0x0F)
+#define MPI2_EVENT_SAS_DEV_STAT_RC_SATA_INIT_FAILURE (0x10)
+#define MPI2_EVENT_SAS_DEV_STAT_RC_EXPANDER_REDUCED_FUNCTIONALITY (0x11)
+#define MPI2_EVENT_SAS_DEV_STAT_RC_CMP_EXPANDER_REDUCED_FUNCTIONALITY (0x12)
+
+
+/* Integrated RAID Operation Status Event data */
+
+typedef struct _MPI2_EVENT_DATA_IR_OPERATION_STATUS
+{
+ U16 VolDevHandle; /* 0x00 */
+ U16 Reserved1; /* 0x02 */
+ U8 RAIDOperation; /* 0x04 */
+ U8 PercentComplete; /* 0x05 */
+ U16 Reserved2; /* 0x06 */
+ U32 ElapsedSeconds; /* 0x08 */
+} MPI2_EVENT_DATA_IR_OPERATION_STATUS,
+ MPI2_POINTER PTR_MPI2_EVENT_DATA_IR_OPERATION_STATUS,
+ Mpi2EventDataIrOperationStatus_t,
+ MPI2_POINTER pMpi2EventDataIrOperationStatus_t;
+
+/* Integrated RAID Operation Status Event data RAIDOperation values */
+#define MPI2_EVENT_IR_RAIDOP_RESYNC (0x00)
+#define MPI2_EVENT_IR_RAIDOP_ONLINE_CAP_EXPANSION (0x01)
+#define MPI2_EVENT_IR_RAIDOP_CONSISTENCY_CHECK (0x02)
+#define MPI2_EVENT_IR_RAIDOP_BACKGROUND_INIT (0x03)
+#define MPI2_EVENT_IR_RAIDOP_MAKE_DATA_CONSISTENT (0x04)
+
+
+/* Integrated RAID Volume Event data */
+
+typedef struct _MPI2_EVENT_DATA_IR_VOLUME
+{
+ U16 VolDevHandle; /* 0x00 */
+ U8 ReasonCode; /* 0x02 */
+ U8 Reserved1; /* 0x03 */
+ U32 NewValue; /* 0x04 */
+ U32 PreviousValue; /* 0x08 */
+} MPI2_EVENT_DATA_IR_VOLUME, MPI2_POINTER PTR_MPI2_EVENT_DATA_IR_VOLUME,
+ Mpi2EventDataIrVolume_t, MPI2_POINTER pMpi2EventDataIrVolume_t;
+
+/* Integrated RAID Volume Event data ReasonCode values */
+#define MPI2_EVENT_IR_VOLUME_RC_SETTINGS_CHANGED (0x01)
+#define MPI2_EVENT_IR_VOLUME_RC_STATUS_FLAGS_CHANGED (0x02)
+#define MPI2_EVENT_IR_VOLUME_RC_STATE_CHANGED (0x03)
+
+
+/* Integrated RAID Physical Disk Event data */
+
+typedef struct _MPI2_EVENT_DATA_IR_PHYSICAL_DISK
+{
+ U16 Reserved1; /* 0x00 */
+ U8 ReasonCode; /* 0x02 */
+ U8 PhysDiskNum; /* 0x03 */
+ U16 PhysDiskDevHandle; /* 0x04 */
+ U16 Reserved2; /* 0x06 */
+ U16 Slot; /* 0x08 */
+ U16 EnclosureHandle; /* 0x0A */
+ U32 NewValue; /* 0x0C */
+ U32 PreviousValue; /* 0x10 */
+} MPI2_EVENT_DATA_IR_PHYSICAL_DISK,
+ MPI2_POINTER PTR_MPI2_EVENT_DATA_IR_PHYSICAL_DISK,
+ Mpi2EventDataIrPhysicalDisk_t, MPI2_POINTER pMpi2EventDataIrPhysicalDisk_t;
+
+/* Integrated RAID Physical Disk Event data ReasonCode values */
+#define MPI2_EVENT_IR_PHYSDISK_RC_SETTINGS_CHANGED (0x01)
+#define MPI2_EVENT_IR_PHYSDISK_RC_STATUS_FLAGS_CHANGED (0x02)
+#define MPI2_EVENT_IR_PHYSDISK_RC_STATE_CHANGED (0x03)
+
+
+/* Integrated RAID Configuration Change List Event data */
+
+/*
+ * Host code (drivers, BIOS, utilities, etc.) should leave this define set to
+ * one and check NumElements at runtime.
+ */
+#ifndef MPI2_EVENT_IR_CONFIG_ELEMENT_COUNT
+#define MPI2_EVENT_IR_CONFIG_ELEMENT_COUNT (1)
+#endif
+
+typedef struct _MPI2_EVENT_IR_CONFIG_ELEMENT
+{
+ U16 ElementFlags; /* 0x00 */
+ U16 VolDevHandle; /* 0x02 */
+ U8 ReasonCode; /* 0x04 */
+ U8 PhysDiskNum; /* 0x05 */
+ U16 PhysDiskDevHandle; /* 0x06 */
+} MPI2_EVENT_IR_CONFIG_ELEMENT, MPI2_POINTER PTR_MPI2_EVENT_IR_CONFIG_ELEMENT,
+ Mpi2EventIrConfigElement_t, MPI2_POINTER pMpi2EventIrConfigElement_t;
+
+/* IR Configuration Change List Event data ElementFlags values */
+#define MPI2_EVENT_IR_CHANGE_EFLAGS_ELEMENT_TYPE_MASK (0x000F)
+#define MPI2_EVENT_IR_CHANGE_EFLAGS_VOLUME_ELEMENT (0x0000)
+#define MPI2_EVENT_IR_CHANGE_EFLAGS_VOLPHYSDISK_ELEMENT (0x0001)
+#define MPI2_EVENT_IR_CHANGE_EFLAGS_HOTSPARE_ELEMENT (0x0002)
+
+/* IR Configuration Change List Event data ReasonCode values */
+#define MPI2_EVENT_IR_CHANGE_RC_ADDED (0x01)
+#define MPI2_EVENT_IR_CHANGE_RC_REMOVED (0x02)
+#define MPI2_EVENT_IR_CHANGE_RC_NO_CHANGE (0x03)
+#define MPI2_EVENT_IR_CHANGE_RC_HIDE (0x04)
+#define MPI2_EVENT_IR_CHANGE_RC_UNHIDE (0x05)
+#define MPI2_EVENT_IR_CHANGE_RC_VOLUME_CREATED (0x06)
+#define MPI2_EVENT_IR_CHANGE_RC_VOLUME_DELETED (0x07)
+#define MPI2_EVENT_IR_CHANGE_RC_PD_CREATED (0x08)
+#define MPI2_EVENT_IR_CHANGE_RC_PD_DELETED (0x09)
+
+typedef struct _MPI2_EVENT_DATA_IR_CONFIG_CHANGE_LIST
+{
+ U8 NumElements; /* 0x00 */
+ U8 Reserved1; /* 0x01 */
+ U8 Reserved2; /* 0x02 */
+ U8 ConfigNum; /* 0x03 */
+ U32 Flags; /* 0x04 */
+ MPI2_EVENT_IR_CONFIG_ELEMENT ConfigElement[MPI2_EVENT_IR_CONFIG_ELEMENT_COUNT]; /* 0x08 */
+} MPI2_EVENT_DATA_IR_CONFIG_CHANGE_LIST,
+ MPI2_POINTER PTR_MPI2_EVENT_DATA_IR_CONFIG_CHANGE_LIST,
+ Mpi2EventDataIrConfigChangeList_t,
+ MPI2_POINTER pMpi2EventDataIrConfigChangeList_t;
+
+/* IR Configuration Change List Event data Flags values */
+#define MPI2_EVENT_IR_CHANGE_FLAGS_FOREIGN_CONFIG (0x00000001)
+
+
+/* SAS Discovery Event data */
+
+typedef struct _MPI2_EVENT_DATA_SAS_DISCOVERY
+{
+ U8 Flags; /* 0x00 */
+ U8 ReasonCode; /* 0x01 */
+ U8 PhysicalPort; /* 0x02 */
+ U8 Reserved1; /* 0x03 */
+ U32 DiscoveryStatus; /* 0x04 */
+} MPI2_EVENT_DATA_SAS_DISCOVERY,
+ MPI2_POINTER PTR_MPI2_EVENT_DATA_SAS_DISCOVERY,
+ Mpi2EventDataSasDiscovery_t, MPI2_POINTER pMpi2EventDataSasDiscovery_t;
+
+/* SAS Discovery Event data Flags values */
+#define MPI2_EVENT_SAS_DISC_DEVICE_CHANGE (0x02)
+#define MPI2_EVENT_SAS_DISC_IN_PROGRESS (0x01)
+
+/* SAS Discovery Event data ReasonCode values */
+#define MPI2_EVENT_SAS_DISC_RC_STARTED (0x01)
+#define MPI2_EVENT_SAS_DISC_RC_COMPLETED (0x02)
+
+/* SAS Discovery Event data DiscoveryStatus values */
+#define MPI2_EVENT_SAS_DISC_DS_MAX_ENCLOSURES_EXCEED (0x80000000)
+#define MPI2_EVENT_SAS_DISC_DS_MAX_EXPANDERS_EXCEED (0x40000000)
+#define MPI2_EVENT_SAS_DISC_DS_MAX_DEVICES_EXCEED (0x20000000)
+#define MPI2_EVENT_SAS_DISC_DS_MAX_TOPO_PHYS_EXCEED (0x10000000)
+#define MPI2_EVENT_SAS_DISC_DS_DOWNSTREAM_INITIATOR (0x08000000)
+#define MPI2_EVENT_SAS_DISC_DS_MULTI_SUBTRACTIVE_SUBTRACTIVE (0x00008000)
+#define MPI2_EVENT_SAS_DISC_DS_EXP_MULTI_SUBTRACTIVE (0x00004000)
+#define MPI2_EVENT_SAS_DISC_DS_MULTI_PORT_DOMAIN (0x00002000)
+#define MPI2_EVENT_SAS_DISC_DS_TABLE_TO_SUBTRACTIVE_LINK (0x00001000)
+#define MPI2_EVENT_SAS_DISC_DS_UNSUPPORTED_DEVICE (0x00000800)
+#define MPI2_EVENT_SAS_DISC_DS_TABLE_LINK (0x00000400)
+#define MPI2_EVENT_SAS_DISC_DS_SUBTRACTIVE_LINK (0x00000200)
+#define MPI2_EVENT_SAS_DISC_DS_SMP_CRC_ERROR (0x00000100)
+#define MPI2_EVENT_SAS_DISC_DS_SMP_FUNCTION_FAILED (0x00000080)
+#define MPI2_EVENT_SAS_DISC_DS_INDEX_NOT_EXIST (0x00000040)
+#define MPI2_EVENT_SAS_DISC_DS_OUT_ROUTE_ENTRIES (0x00000020)
+#define MPI2_EVENT_SAS_DISC_DS_SMP_TIMEOUT (0x00000010)
+#define MPI2_EVENT_SAS_DISC_DS_MULTIPLE_PORTS (0x00000004)
+#define MPI2_EVENT_SAS_DISC_DS_UNADDRESSABLE_DEVICE (0x00000002)
+#define MPI2_EVENT_SAS_DISC_DS_LOOP_DETECTED (0x00000001)
+
+
+/* SAS Broadcast Primitive Event data */
+
+typedef struct _MPI2_EVENT_DATA_SAS_BROADCAST_PRIMITIVE
+{
+ U8 PhyNum; /* 0x00 */
+ U8 Port; /* 0x01 */
+ U8 PortWidth; /* 0x02 */
+ U8 Primitive; /* 0x03 */
+} MPI2_EVENT_DATA_SAS_BROADCAST_PRIMITIVE,
+ MPI2_POINTER PTR_MPI2_EVENT_DATA_SAS_BROADCAST_PRIMITIVE,
+ Mpi2EventDataSasBroadcastPrimitive_t,
+ MPI2_POINTER pMpi2EventDataSasBroadcastPrimitive_t;
+
+/* defines for the Primitive field */
+#define MPI2_EVENT_PRIMITIVE_CHANGE (0x01)
+#define MPI2_EVENT_PRIMITIVE_SES (0x02)
+#define MPI2_EVENT_PRIMITIVE_EXPANDER (0x03)
+#define MPI2_EVENT_PRIMITIVE_ASYNCHRONOUS_EVENT (0x04)
+#define MPI2_EVENT_PRIMITIVE_RESERVED3 (0x05)
+#define MPI2_EVENT_PRIMITIVE_RESERVED4 (0x06)
+#define MPI2_EVENT_PRIMITIVE_CHANGE0_RESERVED (0x07)
+#define MPI2_EVENT_PRIMITIVE_CHANGE1_RESERVED (0x08)
+
+/* SAS Notify Primitive Event data */
+
+typedef struct _MPI2_EVENT_DATA_SAS_NOTIFY_PRIMITIVE {
+ U8 PhyNum; /* 0x00 */
+ U8 Port; /* 0x01 */
+ U8 Reserved1; /* 0x02 */
+ U8 Primitive; /* 0x03 */
+} MPI2_EVENT_DATA_SAS_NOTIFY_PRIMITIVE,
+MPI2_POINTER PTR_MPI2_EVENT_DATA_SAS_NOTIFY_PRIMITIVE,
+Mpi2EventDataSasNotifyPrimitive_t,
+MPI2_POINTER pMpi2EventDataSasNotifyPrimitive_t;
+
+/* defines for the Primitive field */
+#define MPI2_EVENT_NOTIFY_ENABLE_SPINUP (0x01)
+#define MPI2_EVENT_NOTIFY_POWER_LOSS_EXPECTED (0x02)
+#define MPI2_EVENT_NOTIFY_RESERVED1 (0x03)
+#define MPI2_EVENT_NOTIFY_RESERVED2 (0x04)
+
+
+/* SAS Initiator Device Status Change Event data */
+
+typedef struct _MPI2_EVENT_DATA_SAS_INIT_DEV_STATUS_CHANGE
+{
+ U8 ReasonCode; /* 0x00 */
+ U8 PhysicalPort; /* 0x01 */
+ U16 DevHandle; /* 0x02 */
+ U64 SASAddress; /* 0x04 */
+} MPI2_EVENT_DATA_SAS_INIT_DEV_STATUS_CHANGE,
+ MPI2_POINTER PTR_MPI2_EVENT_DATA_SAS_INIT_DEV_STATUS_CHANGE,
+ Mpi2EventDataSasInitDevStatusChange_t,
+ MPI2_POINTER pMpi2EventDataSasInitDevStatusChange_t;
+
+/* SAS Initiator Device Status Change event ReasonCode values */
+#define MPI2_EVENT_SAS_INIT_RC_ADDED (0x01)
+#define MPI2_EVENT_SAS_INIT_RC_NOT_RESPONDING (0x02)
+
+
+/* SAS Initiator Device Table Overflow Event data */
+
+typedef struct _MPI2_EVENT_DATA_SAS_INIT_TABLE_OVERFLOW
+{
+ U16 MaxInit; /* 0x00 */
+ U16 CurrentInit; /* 0x02 */
+ U64 SASAddress; /* 0x04 */
+} MPI2_EVENT_DATA_SAS_INIT_TABLE_OVERFLOW,
+ MPI2_POINTER PTR_MPI2_EVENT_DATA_SAS_INIT_TABLE_OVERFLOW,
+ Mpi2EventDataSasInitTableOverflow_t,
+ MPI2_POINTER pMpi2EventDataSasInitTableOverflow_t;
+
+
+/* SAS Topology Change List Event data */
+
+/*
+ * Host code (drivers, BIOS, utilities, etc.) should leave this define set to
+ * one and check NumEntries at runtime.
+ */
+#ifndef MPI2_EVENT_SAS_TOPO_PHY_COUNT
+#define MPI2_EVENT_SAS_TOPO_PHY_COUNT (1)
+#endif
+
+typedef struct _MPI2_EVENT_SAS_TOPO_PHY_ENTRY
+{
+ U16 AttachedDevHandle; /* 0x00 */
+ U8 LinkRate; /* 0x02 */
+ U8 PhyStatus; /* 0x03 */
+} MPI2_EVENT_SAS_TOPO_PHY_ENTRY, MPI2_POINTER PTR_MPI2_EVENT_SAS_TOPO_PHY_ENTRY,
+ Mpi2EventSasTopoPhyEntry_t, MPI2_POINTER pMpi2EventSasTopoPhyEntry_t;
+
+typedef struct _MPI2_EVENT_DATA_SAS_TOPOLOGY_CHANGE_LIST
+{
+ U16 EnclosureHandle; /* 0x00 */
+ U16 ExpanderDevHandle; /* 0x02 */
+ U8 NumPhys; /* 0x04 */
+ U8 Reserved1; /* 0x05 */
+ U16 Reserved2; /* 0x06 */
+ U8 NumEntries; /* 0x08 */
+ U8 StartPhyNum; /* 0x09 */
+ U8 ExpStatus; /* 0x0A */
+ U8 PhysicalPort; /* 0x0B */
+ MPI2_EVENT_SAS_TOPO_PHY_ENTRY PHY[MPI2_EVENT_SAS_TOPO_PHY_COUNT]; /* 0x0C*/
+} MPI2_EVENT_DATA_SAS_TOPOLOGY_CHANGE_LIST,
+ MPI2_POINTER PTR_MPI2_EVENT_DATA_SAS_TOPOLOGY_CHANGE_LIST,
+ Mpi2EventDataSasTopologyChangeList_t,
+ MPI2_POINTER pMpi2EventDataSasTopologyChangeList_t;
+
+/* values for the ExpStatus field */
+#define MPI2_EVENT_SAS_TOPO_ES_NO_EXPANDER (0x00)
+#define MPI2_EVENT_SAS_TOPO_ES_ADDED (0x01)
+#define MPI2_EVENT_SAS_TOPO_ES_NOT_RESPONDING (0x02)
+#define MPI2_EVENT_SAS_TOPO_ES_RESPONDING (0x03)
+#define MPI2_EVENT_SAS_TOPO_ES_DELAY_NOT_RESPONDING (0x04)
+
+/* defines for the LinkRate field */
+#define MPI2_EVENT_SAS_TOPO_LR_CURRENT_MASK (0xF0)
+#define MPI2_EVENT_SAS_TOPO_LR_CURRENT_SHIFT (4)
+#define MPI2_EVENT_SAS_TOPO_LR_PREV_MASK (0x0F)
+#define MPI2_EVENT_SAS_TOPO_LR_PREV_SHIFT (0)
+
+#define MPI2_EVENT_SAS_TOPO_LR_UNKNOWN_LINK_RATE (0x00)
+#define MPI2_EVENT_SAS_TOPO_LR_PHY_DISABLED (0x01)
+#define MPI2_EVENT_SAS_TOPO_LR_NEGOTIATION_FAILED (0x02)
+#define MPI2_EVENT_SAS_TOPO_LR_SATA_OOB_COMPLETE (0x03)
+#define MPI2_EVENT_SAS_TOPO_LR_PORT_SELECTOR (0x04)
+#define MPI2_EVENT_SAS_TOPO_LR_SMP_RESET_IN_PROGRESS (0x05)
+#define MPI2_EVENT_SAS_TOPO_LR_UNSUPPORTED_PHY (0x06)
+#define MPI2_EVENT_SAS_TOPO_LR_RATE_1_5 (0x08)
+#define MPI2_EVENT_SAS_TOPO_LR_RATE_3_0 (0x09)
+#define MPI2_EVENT_SAS_TOPO_LR_RATE_6_0 (0x0A)
+
+/* values for the PhyStatus field */
+#define MPI2_EVENT_SAS_TOPO_PHYSTATUS_VACANT (0x80)
+#define MPI2_EVENT_SAS_TOPO_PS_MULTIPLEX_CHANGE (0x10)
+/* values for the PhyStatus ReasonCode sub-field */
+#define MPI2_EVENT_SAS_TOPO_RC_MASK (0x0F)
+#define MPI2_EVENT_SAS_TOPO_RC_TARG_ADDED (0x01)
+#define MPI2_EVENT_SAS_TOPO_RC_TARG_NOT_RESPONDING (0x02)
+#define MPI2_EVENT_SAS_TOPO_RC_PHY_CHANGED (0x03)
+#define MPI2_EVENT_SAS_TOPO_RC_NO_CHANGE (0x04)
+#define MPI2_EVENT_SAS_TOPO_RC_DELAY_NOT_RESPONDING (0x05)
+
+
+/* SAS Enclosure Device Status Change Event data */
+
+typedef struct _MPI2_EVENT_DATA_SAS_ENCL_DEV_STATUS_CHANGE
+{
+ U16 EnclosureHandle; /* 0x00 */
+ U8 ReasonCode; /* 0x02 */
+ U8 PhysicalPort; /* 0x03 */
+ U64 EnclosureLogicalID; /* 0x04 */
+ U16 NumSlots; /* 0x0C */
+ U16 StartSlot; /* 0x0E */
+ U32 PhyBits; /* 0x10 */
+} MPI2_EVENT_DATA_SAS_ENCL_DEV_STATUS_CHANGE,
+ MPI2_POINTER PTR_MPI2_EVENT_DATA_SAS_ENCL_DEV_STATUS_CHANGE,
+ Mpi2EventDataSasEnclDevStatusChange_t,
+ MPI2_POINTER pMpi2EventDataSasEnclDevStatusChange_t;
+
+/* SAS Enclosure Device Status Change event ReasonCode values */
+#define MPI2_EVENT_SAS_ENCL_RC_ADDED (0x01)
+#define MPI2_EVENT_SAS_ENCL_RC_NOT_RESPONDING (0x02)
+
+
+/* SAS PHY Counter Event data */
+
+typedef struct _MPI2_EVENT_DATA_SAS_PHY_COUNTER {
+ U64 TimeStamp; /* 0x00 */
+ U32 Reserved1; /* 0x08 */
+ U8 PhyEventCode; /* 0x0C */
+ U8 PhyNum; /* 0x0D */
+ U16 Reserved2; /* 0x0E */
+ U32 PhyEventInfo; /* 0x10 */
+ U8 CounterType; /* 0x14 */
+ U8 ThresholdWindow; /* 0x15 */
+ U8 TimeUnits; /* 0x16 */
+ U8 Reserved3; /* 0x17 */
+ U32 EventThreshold; /* 0x18 */
+ U16 ThresholdFlags; /* 0x1C */
+ U16 Reserved4; /* 0x1E */
+} MPI2_EVENT_DATA_SAS_PHY_COUNTER,
+ MPI2_POINTER PTR_MPI2_EVENT_DATA_SAS_PHY_COUNTER,
+ Mpi2EventDataSasPhyCounter_t, MPI2_POINTER pMpi2EventDataSasPhyCounter_t;
+
+/* use MPI2_SASPHY3_EVENT_CODE_ values from mpi2_cnfg.h for the
+ * PhyEventCode field
+ * use MPI2_SASPHY3_COUNTER_TYPE_ values from mpi2_cnfg.h for the
+ * CounterType field
+ * use MPI2_SASPHY3_TIME_UNITS_ values from mpi2_cnfg.h for the
+ * TimeUnits field
+ * use MPI2_SASPHY3_TFLAGS_ values from mpi2_cnfg.h for the
+ * ThresholdFlags field
+ * */
+
+
+/* SAS Quiesce Event data */
+
+typedef struct _MPI2_EVENT_DATA_SAS_QUIESCE {
+ U8 ReasonCode; /* 0x00 */
+ U8 Reserved1; /* 0x01 */
+ U16 Reserved2; /* 0x02 */
+ U32 Reserved3; /* 0x04 */
+} MPI2_EVENT_DATA_SAS_QUIESCE,
+ MPI2_POINTER PTR_MPI2_EVENT_DATA_SAS_QUIESCE,
+ Mpi2EventDataSasQuiesce_t, MPI2_POINTER pMpi2EventDataSasQuiesce_t;
+
+/* SAS Quiesce Event data ReasonCode values */
+#define MPI2_EVENT_SAS_QUIESCE_RC_STARTED (0x01)
+#define MPI2_EVENT_SAS_QUIESCE_RC_COMPLETED (0x02)
+
+
+/* Host Based Discovery Phy Event data */
+
+typedef struct _MPI2_EVENT_HBD_PHY_SAS {
+ U8 Flags; /* 0x00 */
+ U8 NegotiatedLinkRate; /* 0x01 */
+ U8 PhyNum; /* 0x02 */
+ U8 PhysicalPort; /* 0x03 */
+ U32 Reserved1; /* 0x04 */
+ U8 InitialFrame[28]; /* 0x08 */
+} MPI2_EVENT_HBD_PHY_SAS, MPI2_POINTER PTR_MPI2_EVENT_HBD_PHY_SAS,
+ Mpi2EventHbdPhySas_t, MPI2_POINTER pMpi2EventHbdPhySas_t;
+
+/* values for the Flags field */
+#define MPI2_EVENT_HBD_SAS_FLAGS_FRAME_VALID (0x02)
+#define MPI2_EVENT_HBD_SAS_FLAGS_SATA_FRAME (0x01)
+
+/* use MPI2_SAS_NEG_LINK_RATE_ defines from mpi2_cnfg.h for
+ * the NegotiatedLinkRate field */
+
+typedef union _MPI2_EVENT_HBD_DESCRIPTOR {
+ MPI2_EVENT_HBD_PHY_SAS Sas;
+} MPI2_EVENT_HBD_DESCRIPTOR, MPI2_POINTER PTR_MPI2_EVENT_HBD_DESCRIPTOR,
+ Mpi2EventHbdDescriptor_t, MPI2_POINTER pMpi2EventHbdDescriptor_t;
+
+typedef struct _MPI2_EVENT_DATA_HBD_PHY {
+ U8 DescriptorType; /* 0x00 */
+ U8 Reserved1; /* 0x01 */
+ U16 Reserved2; /* 0x02 */
+ U32 Reserved3; /* 0x04 */
+ MPI2_EVENT_HBD_DESCRIPTOR Descriptor; /* 0x08 */
+} MPI2_EVENT_DATA_HBD_PHY, MPI2_POINTER PTR_MPI2_EVENT_DATA_HBD_PHY,
+ Mpi2EventDataHbdPhy_t, MPI2_POINTER pMpi2EventDataMpi2EventDataHbdPhy_t;
+
+/* values for the DescriptorType field */
+#define MPI2_EVENT_HBD_DT_SAS (0x01)
+
+
+
+/****************************************************************************
+* EventAck message
+****************************************************************************/
+
+/* EventAck Request message */
+typedef struct _MPI2_EVENT_ACK_REQUEST
+{
+ U16 Reserved1; /* 0x00 */
+ U8 ChainOffset; /* 0x02 */
+ U8 Function; /* 0x03 */
+ U16 Reserved2; /* 0x04 */
+ U8 Reserved3; /* 0x06 */
+ U8 MsgFlags; /* 0x07 */
+ U8 VP_ID; /* 0x08 */
+ U8 VF_ID; /* 0x09 */
+ U16 Reserved4; /* 0x0A */
+ U16 Event; /* 0x0C */
+ U16 Reserved5; /* 0x0E */
+ U32 EventContext; /* 0x10 */
+} MPI2_EVENT_ACK_REQUEST, MPI2_POINTER PTR_MPI2_EVENT_ACK_REQUEST,
+ Mpi2EventAckRequest_t, MPI2_POINTER pMpi2EventAckRequest_t;
+
+
+/* EventAck Reply message */
+typedef struct _MPI2_EVENT_ACK_REPLY
+{
+ U16 Reserved1; /* 0x00 */
+ U8 MsgLength; /* 0x02 */
+ U8 Function; /* 0x03 */
+ U16 Reserved2; /* 0x04 */
+ U8 Reserved3; /* 0x06 */
+ U8 MsgFlags; /* 0x07 */
+ U8 VP_ID; /* 0x08 */
+ U8 VF_ID; /* 0x09 */
+ U16 Reserved4; /* 0x0A */
+ U16 Reserved5; /* 0x0C */
+ U16 IOCStatus; /* 0x0E */
+ U32 IOCLogInfo; /* 0x10 */
+} MPI2_EVENT_ACK_REPLY, MPI2_POINTER PTR_MPI2_EVENT_ACK_REPLY,
+ Mpi2EventAckReply_t, MPI2_POINTER pMpi2EventAckReply_t;
+
+
+/****************************************************************************
+* SendHostMessage message
+****************************************************************************/
+
+/* SendHostMessage Request message */
+typedef struct _MPI2_SEND_HOST_MESSAGE_REQUEST {
+ U16 HostDataLength; /* 0x00 */
+ U8 ChainOffset; /* 0x02 */
+ U8 Function; /* 0x03 */
+ U16 Reserved1; /* 0x04 */
+ U8 Reserved2; /* 0x06 */
+ U8 MsgFlags; /* 0x07 */
+ U8 VP_ID; /* 0x08 */
+ U8 VF_ID; /* 0x09 */
+ U16 Reserved3; /* 0x0A */
+ U8 Reserved4; /* 0x0C */
+ U8 DestVF_ID; /* 0x0D */
+ U16 Reserved5; /* 0x0E */
+ U32 Reserved6; /* 0x10 */
+ U32 Reserved7; /* 0x14 */
+ U32 Reserved8; /* 0x18 */
+ U32 Reserved9; /* 0x1C */
+ U32 Reserved10; /* 0x20 */
+ U32 HostData[1]; /* 0x24 */
+} MPI2_SEND_HOST_MESSAGE_REQUEST,
+MPI2_POINTER PTR_MPI2_SEND_HOST_MESSAGE_REQUEST,
+Mpi2SendHostMessageRequest_t, MPI2_POINTER pMpi2SendHostMessageRequest_t;
+
+
+/* SendHostMessage Reply message */
+typedef struct _MPI2_SEND_HOST_MESSAGE_REPLY {
+ U16 HostDataLength; /* 0x00 */
+ U8 MsgLength; /* 0x02 */
+ U8 Function; /* 0x03 */
+ U16 Reserved1; /* 0x04 */
+ U8 Reserved2; /* 0x06 */
+ U8 MsgFlags; /* 0x07 */
+ U8 VP_ID; /* 0x08 */
+ U8 VF_ID; /* 0x09 */
+ U16 Reserved3; /* 0x0A */
+ U16 Reserved4; /* 0x0C */
+ U16 IOCStatus; /* 0x0E */
+ U32 IOCLogInfo; /* 0x10 */
+} MPI2_SEND_HOST_MESSAGE_REPLY, MPI2_POINTER PTR_MPI2_SEND_HOST_MESSAGE_REPLY,
+Mpi2SendHostMessageReply_t, MPI2_POINTER pMpi2SendHostMessageReply_t;
+
+
+/****************************************************************************
+* FWDownload message
+****************************************************************************/
+
+/* FWDownload Request message */
+typedef struct _MPI2_FW_DOWNLOAD_REQUEST
+{
+ U8 ImageType; /* 0x00 */
+ U8 Reserved1; /* 0x01 */
+ U8 ChainOffset; /* 0x02 */
+ U8 Function; /* 0x03 */
+ U16 Reserved2; /* 0x04 */
+ U8 Reserved3; /* 0x06 */
+ U8 MsgFlags; /* 0x07 */
+ U8 VP_ID; /* 0x08 */
+ U8 VF_ID; /* 0x09 */
+ U16 Reserved4; /* 0x0A */
+ U32 TotalImageSize; /* 0x0C */
+ U32 Reserved5; /* 0x10 */
+ MPI2_MPI_SGE_UNION SGL; /* 0x14 */
+} MPI2_FW_DOWNLOAD_REQUEST, MPI2_POINTER PTR_MPI2_FW_DOWNLOAD_REQUEST,
+ Mpi2FWDownloadRequest, MPI2_POINTER pMpi2FWDownloadRequest;
+
+#define MPI2_FW_DOWNLOAD_MSGFLGS_LAST_SEGMENT (0x01)
+
+#define MPI2_FW_DOWNLOAD_ITYPE_FW (0x01)
+#define MPI2_FW_DOWNLOAD_ITYPE_BIOS (0x02)
+#define MPI2_FW_DOWNLOAD_ITYPE_MANUFACTURING (0x06)
+#define MPI2_FW_DOWNLOAD_ITYPE_CONFIG_1 (0x07)
+#define MPI2_FW_DOWNLOAD_ITYPE_CONFIG_2 (0x08)
+#define MPI2_FW_DOWNLOAD_ITYPE_MEGARAID (0x09)
+#define MPI2_FW_DOWNLOAD_ITYPE_COMPLETE (0x0A)
+#define MPI2_FW_DOWNLOAD_ITYPE_COMMON_BOOT_BLOCK (0x0B)
+#define MPI2_FW_DOWNLOAD_ITYPE_PUBLIC_KEY (0x0C)
+#define MPI2_FW_DOWNLOAD_ITYPE_MIN_PRODUCT_SPECIFIC (0xF0)
+
+/* FWDownload TransactionContext Element */
+typedef struct _MPI2_FW_DOWNLOAD_TCSGE
+{
+ U8 Reserved1; /* 0x00 */
+ U8 ContextSize; /* 0x01 */
+ U8 DetailsLength; /* 0x02 */
+ U8 Flags; /* 0x03 */
+ U32 Reserved2; /* 0x04 */
+ U32 ImageOffset; /* 0x08 */
+ U32 ImageSize; /* 0x0C */
+} MPI2_FW_DOWNLOAD_TCSGE, MPI2_POINTER PTR_MPI2_FW_DOWNLOAD_TCSGE,
+ Mpi2FWDownloadTCSGE_t, MPI2_POINTER pMpi2FWDownloadTCSGE_t;
+
+/* FWDownload Reply message */
+typedef struct _MPI2_FW_DOWNLOAD_REPLY
+{
+ U8 ImageType; /* 0x00 */
+ U8 Reserved1; /* 0x01 */
+ U8 MsgLength; /* 0x02 */
+ U8 Function; /* 0x03 */
+ U16 Reserved2; /* 0x04 */
+ U8 Reserved3; /* 0x06 */
+ U8 MsgFlags; /* 0x07 */
+ U8 VP_ID; /* 0x08 */
+ U8 VF_ID; /* 0x09 */
+ U16 Reserved4; /* 0x0A */
+ U16 Reserved5; /* 0x0C */
+ U16 IOCStatus; /* 0x0E */
+ U32 IOCLogInfo; /* 0x10 */
+} MPI2_FW_DOWNLOAD_REPLY, MPI2_POINTER PTR_MPI2_FW_DOWNLOAD_REPLY,
+ Mpi2FWDownloadReply_t, MPI2_POINTER pMpi2FWDownloadReply_t;
+
+
+/****************************************************************************
+* FWUpload message
+****************************************************************************/
+
+/* FWUpload Request message */
+typedef struct _MPI2_FW_UPLOAD_REQUEST
+{
+ U8 ImageType; /* 0x00 */
+ U8 Reserved1; /* 0x01 */
+ U8 ChainOffset; /* 0x02 */
+ U8 Function; /* 0x03 */
+ U16 Reserved2; /* 0x04 */
+ U8 Reserved3; /* 0x06 */
+ U8 MsgFlags; /* 0x07 */
+ U8 VP_ID; /* 0x08 */
+ U8 VF_ID; /* 0x09 */
+ U16 Reserved4; /* 0x0A */
+ U32 Reserved5; /* 0x0C */
+ U32 Reserved6; /* 0x10 */
+ MPI2_MPI_SGE_UNION SGL; /* 0x14 */
+} MPI2_FW_UPLOAD_REQUEST, MPI2_POINTER PTR_MPI2_FW_UPLOAD_REQUEST,
+ Mpi2FWUploadRequest_t, MPI2_POINTER pMpi2FWUploadRequest_t;
+
+#define MPI2_FW_UPLOAD_ITYPE_FW_CURRENT (0x00)
+#define MPI2_FW_UPLOAD_ITYPE_FW_FLASH (0x01)
+#define MPI2_FW_UPLOAD_ITYPE_BIOS_FLASH (0x02)
+#define MPI2_FW_UPLOAD_ITYPE_FW_BACKUP (0x05)
+#define MPI2_FW_UPLOAD_ITYPE_MANUFACTURING (0x06)
+#define MPI2_FW_UPLOAD_ITYPE_CONFIG_1 (0x07)
+#define MPI2_FW_UPLOAD_ITYPE_CONFIG_2 (0x08)
+#define MPI2_FW_UPLOAD_ITYPE_MEGARAID (0x09)
+#define MPI2_FW_UPLOAD_ITYPE_COMPLETE (0x0A)
+#define MPI2_FW_UPLOAD_ITYPE_COMMON_BOOT_BLOCK (0x0B)
+
+typedef struct _MPI2_FW_UPLOAD_TCSGE
+{
+ U8 Reserved1; /* 0x00 */
+ U8 ContextSize; /* 0x01 */
+ U8 DetailsLength; /* 0x02 */
+ U8 Flags; /* 0x03 */
+ U32 Reserved2; /* 0x04 */
+ U32 ImageOffset; /* 0x08 */
+ U32 ImageSize; /* 0x0C */
+} MPI2_FW_UPLOAD_TCSGE, MPI2_POINTER PTR_MPI2_FW_UPLOAD_TCSGE,
+ Mpi2FWUploadTCSGE_t, MPI2_POINTER pMpi2FWUploadTCSGE_t;
+
+/* FWUpload Reply message */
+typedef struct _MPI2_FW_UPLOAD_REPLY
+{
+ U8 ImageType; /* 0x00 */
+ U8 Reserved1; /* 0x01 */
+ U8 MsgLength; /* 0x02 */
+ U8 Function; /* 0x03 */
+ U16 Reserved2; /* 0x04 */
+ U8 Reserved3; /* 0x06 */
+ U8 MsgFlags; /* 0x07 */
+ U8 VP_ID; /* 0x08 */
+ U8 VF_ID; /* 0x09 */
+ U16 Reserved4; /* 0x0A */
+ U16 Reserved5; /* 0x0C */
+ U16 IOCStatus; /* 0x0E */
+ U32 IOCLogInfo; /* 0x10 */
+ U32 ActualImageSize; /* 0x14 */
+} MPI2_FW_UPLOAD_REPLY, MPI2_POINTER PTR_MPI2_FW_UPLOAD_REPLY,
+ Mpi2FWUploadReply_t, MPI2_POINTER pMPi2FWUploadReply_t;
+
+
+/* FW Image Header */
+typedef struct _MPI2_FW_IMAGE_HEADER
+{
+ U32 Signature; /* 0x00 */
+ U32 Signature0; /* 0x04 */
+ U32 Signature1; /* 0x08 */
+ U32 Signature2; /* 0x0C */
+ MPI2_VERSION_UNION MPIVersion; /* 0x10 */
+ MPI2_VERSION_UNION FWVersion; /* 0x14 */
+ MPI2_VERSION_UNION NVDATAVersion; /* 0x18 */
+ MPI2_VERSION_UNION PackageVersion; /* 0x1C */
+ U16 VendorID; /* 0x20 */
+ U16 ProductID; /* 0x22 */
+ U16 ProtocolFlags; /* 0x24 */
+ U16 Reserved26; /* 0x26 */
+ U32 IOCCapabilities; /* 0x28 */
+ U32 ImageSize; /* 0x2C */
+ U32 NextImageHeaderOffset; /* 0x30 */
+ U32 Checksum; /* 0x34 */
+ U32 Reserved38; /* 0x38 */
+ U32 Reserved3C; /* 0x3C */
+ U32 Reserved40; /* 0x40 */
+ U32 Reserved44; /* 0x44 */
+ U32 Reserved48; /* 0x48 */
+ U32 Reserved4C; /* 0x4C */
+ U32 Reserved50; /* 0x50 */
+ U32 Reserved54; /* 0x54 */
+ U32 Reserved58; /* 0x58 */
+ U32 Reserved5C; /* 0x5C */
+ U32 Reserved60; /* 0x60 */
+ U32 FirmwareVersionNameWhat; /* 0x64 */
+ U8 FirmwareVersionName[32]; /* 0x68 */
+ U32 VendorNameWhat; /* 0x88 */
+ U8 VendorName[32]; /* 0x8C */
+ U32 PackageNameWhat; /* 0x88 */
+ U8 PackageName[32]; /* 0x8C */
+ U32 ReservedD0; /* 0xD0 */
+ U32 ReservedD4; /* 0xD4 */
+ U32 ReservedD8; /* 0xD8 */
+ U32 ReservedDC; /* 0xDC */
+ U32 ReservedE0; /* 0xE0 */
+ U32 ReservedE4; /* 0xE4 */
+ U32 ReservedE8; /* 0xE8 */
+ U32 ReservedEC; /* 0xEC */
+ U32 ReservedF0; /* 0xF0 */
+ U32 ReservedF4; /* 0xF4 */
+ U32 ReservedF8; /* 0xF8 */
+ U32 ReservedFC; /* 0xFC */
+} MPI2_FW_IMAGE_HEADER, MPI2_POINTER PTR_MPI2_FW_IMAGE_HEADER,
+ Mpi2FWImageHeader_t, MPI2_POINTER pMpi2FWImageHeader_t;
+
+/* Signature field */
+#define MPI2_FW_HEADER_SIGNATURE_OFFSET (0x00)
+#define MPI2_FW_HEADER_SIGNATURE_MASK (0xFF000000)
+#define MPI2_FW_HEADER_SIGNATURE (0xEA000000)
+
+/* Signature0 field */
+#define MPI2_FW_HEADER_SIGNATURE0_OFFSET (0x04)
+#define MPI2_FW_HEADER_SIGNATURE0 (0x5AFAA55A)
+
+/* Signature1 field */
+#define MPI2_FW_HEADER_SIGNATURE1_OFFSET (0x08)
+#define MPI2_FW_HEADER_SIGNATURE1 (0xA55AFAA5)
+
+/* Signature2 field */
+#define MPI2_FW_HEADER_SIGNATURE2_OFFSET (0x0C)
+#define MPI2_FW_HEADER_SIGNATURE2 (0x5AA55AFA)
+
+
+/* defines for using the ProductID field */
+#define MPI2_FW_HEADER_PID_TYPE_MASK (0xF000)
+#define MPI2_FW_HEADER_PID_TYPE_SAS (0x2000)
+
+#define MPI2_FW_HEADER_PID_PROD_MASK (0x0F00)
+#define MPI2_FW_HEADER_PID_PROD_A (0x0000)
+#define MPI2_FW_HEADER_PID_PROD_TARGET_INITIATOR_SCSI (0x0200)
+#define MPI2_FW_HEADER_PID_PROD_IR_SCSI (0x0700)
+
+
+#define MPI2_FW_HEADER_PID_FAMILY_MASK (0x00FF)
+/* SAS */
+#define MPI2_FW_HEADER_PID_FAMILY_2108_SAS (0x0013)
+#define MPI2_FW_HEADER_PID_FAMILY_2208_SAS (0x0014)
+
+/* use MPI2_IOCFACTS_PROTOCOL_ defines for ProtocolFlags field */
+
+/* use MPI2_IOCFACTS_CAPABILITY_ defines for IOCCapabilities field */
+
+
+#define MPI2_FW_HEADER_IMAGESIZE_OFFSET (0x2C)
+#define MPI2_FW_HEADER_NEXTIMAGE_OFFSET (0x30)
+#define MPI2_FW_HEADER_VERNMHWAT_OFFSET (0x64)
+
+#define MPI2_FW_HEADER_WHAT_SIGNATURE (0x29232840)
+
+#define MPI2_FW_HEADER_SIZE (0x100)
+
+
+/* Extended Image Header */
+typedef struct _MPI2_EXT_IMAGE_HEADER
+
+{
+ U8 ImageType; /* 0x00 */
+ U8 Reserved1; /* 0x01 */
+ U16 Reserved2; /* 0x02 */
+ U32 Checksum; /* 0x04 */
+ U32 ImageSize; /* 0x08 */
+ U32 NextImageHeaderOffset; /* 0x0C */
+ U32 PackageVersion; /* 0x10 */
+ U32 Reserved3; /* 0x14 */
+ U32 Reserved4; /* 0x18 */
+ U32 Reserved5; /* 0x1C */
+ U8 IdentifyString[32]; /* 0x20 */
+} MPI2_EXT_IMAGE_HEADER, MPI2_POINTER PTR_MPI2_EXT_IMAGE_HEADER,
+ Mpi2ExtImageHeader_t, MPI2_POINTER pMpi2ExtImageHeader_t;
+
+/* useful offsets */
+#define MPI2_EXT_IMAGE_IMAGETYPE_OFFSET (0x00)
+#define MPI2_EXT_IMAGE_IMAGESIZE_OFFSET (0x08)
+#define MPI2_EXT_IMAGE_NEXTIMAGE_OFFSET (0x0C)
+
+#define MPI2_EXT_IMAGE_HEADER_SIZE (0x40)
+
+/* defines for the ImageType field */
+#define MPI2_EXT_IMAGE_TYPE_UNSPECIFIED (0x00)
+#define MPI2_EXT_IMAGE_TYPE_FW (0x01)
+#define MPI2_EXT_IMAGE_TYPE_NVDATA (0x03)
+#define MPI2_EXT_IMAGE_TYPE_BOOTLOADER (0x04)
+#define MPI2_EXT_IMAGE_TYPE_INITIALIZATION (0x05)
+#define MPI2_EXT_IMAGE_TYPE_FLASH_LAYOUT (0x06)
+#define MPI2_EXT_IMAGE_TYPE_SUPPORTED_DEVICES (0x07)
+#define MPI2_EXT_IMAGE_TYPE_MEGARAID (0x08)
+#define MPI2_EXT_IMAGE_TYPE_ENCRYPTED_HASH (0x09)
+#define MPI2_EXT_IMAGE_TYPE_MIN_PRODUCT_SPECIFIC (0x80)
+#define MPI2_EXT_IMAGE_TYPE_MAX_PRODUCT_SPECIFIC (0xFF)
+#define MPI2_EXT_IMAGE_TYPE_MAX \
+ (MPI2_EXT_IMAGE_TYPE_MAX_PRODUCT_SPECIFIC) /* deprecated */
+
+
+
+/* FLASH Layout Extended Image Data */
+
+/*
+ * Host code (drivers, BIOS, utilities, etc.) should leave this define set to
+ * one and check RegionsPerLayout at runtime.
+ */
+#ifndef MPI2_FLASH_NUMBER_OF_REGIONS
+#define MPI2_FLASH_NUMBER_OF_REGIONS (1)
+#endif
+
+/*
+ * Host code (drivers, BIOS, utilities, etc.) should leave this define set to
+ * one and check NumberOfLayouts at runtime.
+ */
+#ifndef MPI2_FLASH_NUMBER_OF_LAYOUTS
+#define MPI2_FLASH_NUMBER_OF_LAYOUTS (1)
+#endif
+
+typedef struct _MPI2_FLASH_REGION
+{
+ U8 RegionType; /* 0x00 */
+ U8 Reserved1; /* 0x01 */
+ U16 Reserved2; /* 0x02 */
+ U32 RegionOffset; /* 0x04 */
+ U32 RegionSize; /* 0x08 */
+ U32 Reserved3; /* 0x0C */
+} MPI2_FLASH_REGION, MPI2_POINTER PTR_MPI2_FLASH_REGION,
+ Mpi2FlashRegion_t, MPI2_POINTER pMpi2FlashRegion_t;
+
+typedef struct _MPI2_FLASH_LAYOUT
+{
+ U32 FlashSize; /* 0x00 */
+ U32 Reserved1; /* 0x04 */
+ U32 Reserved2; /* 0x08 */
+ U32 Reserved3; /* 0x0C */
+ MPI2_FLASH_REGION Region[MPI2_FLASH_NUMBER_OF_REGIONS];/* 0x10 */
+} MPI2_FLASH_LAYOUT, MPI2_POINTER PTR_MPI2_FLASH_LAYOUT,
+ Mpi2FlashLayout_t, MPI2_POINTER pMpi2FlashLayout_t;
+
+typedef struct _MPI2_FLASH_LAYOUT_DATA
+{
+ U8 ImageRevision; /* 0x00 */
+ U8 Reserved1; /* 0x01 */
+ U8 SizeOfRegion; /* 0x02 */
+ U8 Reserved2; /* 0x03 */
+ U16 NumberOfLayouts; /* 0x04 */
+ U16 RegionsPerLayout; /* 0x06 */
+ U16 MinimumSectorAlignment; /* 0x08 */
+ U16 Reserved3; /* 0x0A */
+ U32 Reserved4; /* 0x0C */
+ MPI2_FLASH_LAYOUT Layout[MPI2_FLASH_NUMBER_OF_LAYOUTS];/* 0x10 */
+} MPI2_FLASH_LAYOUT_DATA, MPI2_POINTER PTR_MPI2_FLASH_LAYOUT_DATA,
+ Mpi2FlashLayoutData_t, MPI2_POINTER pMpi2FlashLayoutData_t;
+
+/* defines for the RegionType field */
+#define MPI2_FLASH_REGION_UNUSED (0x00)
+#define MPI2_FLASH_REGION_FIRMWARE (0x01)
+#define MPI2_FLASH_REGION_BIOS (0x02)
+#define MPI2_FLASH_REGION_NVDATA (0x03)
+#define MPI2_FLASH_REGION_FIRMWARE_BACKUP (0x05)
+#define MPI2_FLASH_REGION_MFG_INFORMATION (0x06)
+#define MPI2_FLASH_REGION_CONFIG_1 (0x07)
+#define MPI2_FLASH_REGION_CONFIG_2 (0x08)
+#define MPI2_FLASH_REGION_MEGARAID (0x09)
+#define MPI2_FLASH_REGION_INIT (0x0A)
+
+/* ImageRevision */
+#define MPI2_FLASH_LAYOUT_IMAGE_REVISION (0x00)
+
+
+
+/* Supported Devices Extended Image Data */
+
+/*
+ * Host code (drivers, BIOS, utilities, etc.) should leave this define set to
+ * one and check NumberOfDevices at runtime.
+ */
+#ifndef MPI2_SUPPORTED_DEVICES_IMAGE_NUM_DEVICES
+#define MPI2_SUPPORTED_DEVICES_IMAGE_NUM_DEVICES (1)
+#endif
+
+typedef struct _MPI2_SUPPORTED_DEVICE
+{
+ U16 DeviceID; /* 0x00 */
+ U16 VendorID; /* 0x02 */
+ U16 DeviceIDMask; /* 0x04 */
+ U16 Reserved1; /* 0x06 */
+ U8 LowPCIRev; /* 0x08 */
+ U8 HighPCIRev; /* 0x09 */
+ U16 Reserved2; /* 0x0A */
+ U32 Reserved3; /* 0x0C */
+} MPI2_SUPPORTED_DEVICE, MPI2_POINTER PTR_MPI2_SUPPORTED_DEVICE,
+ Mpi2SupportedDevice_t, MPI2_POINTER pMpi2SupportedDevice_t;
+
+typedef struct _MPI2_SUPPORTED_DEVICES_DATA
+{
+ U8 ImageRevision; /* 0x00 */
+ U8 Reserved1; /* 0x01 */
+ U8 NumberOfDevices; /* 0x02 */
+ U8 Reserved2; /* 0x03 */
+ U32 Reserved3; /* 0x04 */
+ MPI2_SUPPORTED_DEVICE SupportedDevice[MPI2_SUPPORTED_DEVICES_IMAGE_NUM_DEVICES]; /* 0x08 */
+} MPI2_SUPPORTED_DEVICES_DATA, MPI2_POINTER PTR_MPI2_SUPPORTED_DEVICES_DATA,
+ Mpi2SupportedDevicesData_t, MPI2_POINTER pMpi2SupportedDevicesData_t;
+
+/* ImageRevision */
+#define MPI2_SUPPORTED_DEVICES_IMAGE_REVISION (0x00)
+
+
+/* Init Extended Image Data */
+
+typedef struct _MPI2_INIT_IMAGE_FOOTER
+
+{
+ U32 BootFlags; /* 0x00 */
+ U32 ImageSize; /* 0x04 */
+ U32 Signature0; /* 0x08 */
+ U32 Signature1; /* 0x0C */
+ U32 Signature2; /* 0x10 */
+ U32 ResetVector; /* 0x14 */
+} MPI2_INIT_IMAGE_FOOTER, MPI2_POINTER PTR_MPI2_INIT_IMAGE_FOOTER,
+ Mpi2InitImageFooter_t, MPI2_POINTER pMpi2InitImageFooter_t;
+
+/* defines for the BootFlags field */
+#define MPI2_INIT_IMAGE_BOOTFLAGS_OFFSET (0x00)
+
+/* defines for the ImageSize field */
+#define MPI2_INIT_IMAGE_IMAGESIZE_OFFSET (0x04)
+
+/* defines for the Signature0 field */
+#define MPI2_INIT_IMAGE_SIGNATURE0_OFFSET (0x08)
+#define MPI2_INIT_IMAGE_SIGNATURE0 (0x5AA55AEA)
+
+/* defines for the Signature1 field */
+#define MPI2_INIT_IMAGE_SIGNATURE1_OFFSET (0x0C)
+#define MPI2_INIT_IMAGE_SIGNATURE1 (0xA55AEAA5)
+
+/* defines for the Signature2 field */
+#define MPI2_INIT_IMAGE_SIGNATURE2_OFFSET (0x10)
+#define MPI2_INIT_IMAGE_SIGNATURE2 (0x5AEAA55A)
+
+/* Signature fields as individual bytes */
+#define MPI2_INIT_IMAGE_SIGNATURE_BYTE_0 (0xEA)
+#define MPI2_INIT_IMAGE_SIGNATURE_BYTE_1 (0x5A)
+#define MPI2_INIT_IMAGE_SIGNATURE_BYTE_2 (0xA5)
+#define MPI2_INIT_IMAGE_SIGNATURE_BYTE_3 (0x5A)
+
+#define MPI2_INIT_IMAGE_SIGNATURE_BYTE_4 (0xA5)
+#define MPI2_INIT_IMAGE_SIGNATURE_BYTE_5 (0xEA)
+#define MPI2_INIT_IMAGE_SIGNATURE_BYTE_6 (0x5A)
+#define MPI2_INIT_IMAGE_SIGNATURE_BYTE_7 (0xA5)
+
+#define MPI2_INIT_IMAGE_SIGNATURE_BYTE_8 (0x5A)
+#define MPI2_INIT_IMAGE_SIGNATURE_BYTE_9 (0xA5)
+#define MPI2_INIT_IMAGE_SIGNATURE_BYTE_A (0xEA)
+#define MPI2_INIT_IMAGE_SIGNATURE_BYTE_B (0x5A)
+
+/* defines for the ResetVector field */
+#define MPI2_INIT_IMAGE_RESETVECTOR_OFFSET (0x14)
+
+
+/* Encrypted Hash Extended Image Data */
+
+typedef struct _MPI25_ENCRYPTED_HASH_ENTRY {
+ U8 HashImageType; /* 0x00 */
+ U8 HashAlgorithm; /* 0x01 */
+ U8 EncryptionAlgorithm; /* 0x02 */
+ U8 Reserved1; /* 0x03 */
+ U32 Reserved2; /* 0x04 */
+ U32 EncryptedHash[1]; /* 0x08 */
+} MPI25_ENCRYPTED_HASH_ENTRY, MPI2_POINTER PTR_MPI25_ENCRYPTED_HASH_ENTRY,
+Mpi25EncryptedHashEntry_t, MPI2_POINTER pMpi25EncryptedHashEntry_t;
+
+/* values for HashImageType */
+#define MPI25_HASH_IMAGE_TYPE_UNUSED (0x00)
+#define MPI25_HASH_IMAGE_TYPE_FIRMWARE (0x01)
+#define MPI25_HASH_IMAGE_TYPE_BIOS (0x02)
+
+/* values for HashAlgorithm */
+#define MPI25_HASH_ALGORITHM_UNUSED (0x00)
+#define MPI25_HASH_ALGORITHM_SHA256 (0x01)
+
+/* values for EncryptionAlgorithm */
+#define MPI25_ENCRYPTION_ALG_UNUSED (0x00)
+#define MPI25_ENCRYPTION_ALG_RSA256 (0x01)
+
+typedef struct _MPI25_ENCRYPTED_HASH_DATA {
+ U8 ImageVersion; /* 0x00 */
+ U8 NumHash; /* 0x01 */
+ U16 Reserved1; /* 0x02 */
+ U32 Reserved2; /* 0x04 */
+ MPI25_ENCRYPTED_HASH_ENTRY EncryptedHashEntry[1]; /* 0x08 */
+} MPI25_ENCRYPTED_HASH_DATA, MPI2_POINTER PTR_MPI25_ENCRYPTED_HASH_DATA,
+Mpi25EncryptedHashData_t, MPI2_POINTER pMpi25EncryptedHashData_t;
+
+/****************************************************************************
+* PowerManagementControl message
+****************************************************************************/
+
+/* PowerManagementControl Request message */
+typedef struct _MPI2_PWR_MGMT_CONTROL_REQUEST {
+ U8 Feature; /* 0x00 */
+ U8 Reserved1; /* 0x01 */
+ U8 ChainOffset; /* 0x02 */
+ U8 Function; /* 0x03 */
+ U16 Reserved2; /* 0x04 */
+ U8 Reserved3; /* 0x06 */
+ U8 MsgFlags; /* 0x07 */
+ U8 VP_ID; /* 0x08 */
+ U8 VF_ID; /* 0x09 */
+ U16 Reserved4; /* 0x0A */
+ U8 Parameter1; /* 0x0C */
+ U8 Parameter2; /* 0x0D */
+ U8 Parameter3; /* 0x0E */
+ U8 Parameter4; /* 0x0F */
+ U32 Reserved5; /* 0x10 */
+ U32 Reserved6; /* 0x14 */
+} MPI2_PWR_MGMT_CONTROL_REQUEST, MPI2_POINTER PTR_MPI2_PWR_MGMT_CONTROL_REQUEST,
+ Mpi2PwrMgmtControlRequest_t, MPI2_POINTER pMpi2PwrMgmtControlRequest_t;
+
+/* defines for the Feature field */
+#define MPI2_PM_CONTROL_FEATURE_DA_PHY_POWER_COND (0x01)
+#define MPI2_PM_CONTROL_FEATURE_PORT_WIDTH_MODULATION (0x02)
+#define MPI2_PM_CONTROL_FEATURE_PCIE_LINK (0x03) /* obsolete */
+#define MPI2_PM_CONTROL_FEATURE_IOC_SPEED (0x04)
+#define MPI2_PM_CONTROL_FEATURE_MIN_PRODUCT_SPECIFIC (0x80)
+#define MPI2_PM_CONTROL_FEATURE_MAX_PRODUCT_SPECIFIC (0xFF)
+
+/* parameter usage for the MPI2_PM_CONTROL_FEATURE_DA_PHY_POWER_COND Feature */
+/* Parameter1 contains a PHY number */
+/* Parameter2 indicates power condition action using these defines */
+#define MPI2_PM_CONTROL_PARAM2_PARTIAL (0x01)
+#define MPI2_PM_CONTROL_PARAM2_SLUMBER (0x02)
+#define MPI2_PM_CONTROL_PARAM2_EXIT_PWR_MGMT (0x03)
+/* Parameter3 and Parameter4 are reserved */
+
+/* parameter usage for the MPI2_PM_CONTROL_FEATURE_PORT_WIDTH_MODULATION
+ * Feature */
+/* Parameter1 contains SAS port width modulation group number */
+/* Parameter2 indicates IOC action using these defines */
+#define MPI2_PM_CONTROL_PARAM2_REQUEST_OWNERSHIP (0x01)
+#define MPI2_PM_CONTROL_PARAM2_CHANGE_MODULATION (0x02)
+#define MPI2_PM_CONTROL_PARAM2_RELINQUISH_OWNERSHIP (0x03)
+/* Parameter3 indicates desired modulation level using these defines */
+#define MPI2_PM_CONTROL_PARAM3_25_PERCENT (0x00)
+#define MPI2_PM_CONTROL_PARAM3_50_PERCENT (0x01)
+#define MPI2_PM_CONTROL_PARAM3_75_PERCENT (0x02)
+#define MPI2_PM_CONTROL_PARAM3_100_PERCENT (0x03)
+/* Parameter4 is reserved */
+
+/* parameter usage for the MPI2_PM_CONTROL_FEATURE_PCIE_LINK Feature */
+/* Parameter1 indicates desired PCIe link speed using these defines */
+#define MPI2_PM_CONTROL_PARAM1_PCIE_2_5_GBPS (0x00) /* obsolete */
+#define MPI2_PM_CONTROL_PARAM1_PCIE_5_0_GBPS (0x01) /* obsolete */
+#define MPI2_PM_CONTROL_PARAM1_PCIE_8_0_GBPS (0x02) /* obsolete */
+/* Parameter2 indicates desired PCIe link width using these defines */
+#define MPI2_PM_CONTROL_PARAM2_WIDTH_X1 (0x01) /* obsolete */
+#define MPI2_PM_CONTROL_PARAM2_WIDTH_X2 (0x02) /* obsolete */
+#define MPI2_PM_CONTROL_PARAM2_WIDTH_X4 (0x04) /* obsolete */
+#define MPI2_PM_CONTROL_PARAM2_WIDTH_X8 (0x08) /* obsolete */
+/* Parameter3 and Parameter4 are reserved */
+
+/* parameter usage for the MPI2_PM_CONTROL_FEATURE_IOC_SPEED Feature */
+/* Parameter1 indicates desired IOC hardware clock speed using these defines */
+#define MPI2_PM_CONTROL_PARAM1_FULL_IOC_SPEED (0x01)
+#define MPI2_PM_CONTROL_PARAM1_HALF_IOC_SPEED (0x02)
+#define MPI2_PM_CONTROL_PARAM1_QUARTER_IOC_SPEED (0x04)
+#define MPI2_PM_CONTROL_PARAM1_EIGHTH_IOC_SPEED (0x08)
+/* Parameter2, Parameter3, and Parameter4 are reserved */
+
+
+/* PowerManagementControl Reply message */
+typedef struct _MPI2_PWR_MGMT_CONTROL_REPLY {
+ U8 Feature; /* 0x00 */
+ U8 Reserved1; /* 0x01 */
+ U8 MsgLength; /* 0x02 */
+ U8 Function; /* 0x03 */
+ U16 Reserved2; /* 0x04 */
+ U8 Reserved3; /* 0x06 */
+ U8 MsgFlags; /* 0x07 */
+ U8 VP_ID; /* 0x08 */
+ U8 VF_ID; /* 0x09 */
+ U16 Reserved4; /* 0x0A */
+ U16 Reserved5; /* 0x0C */
+ U16 IOCStatus; /* 0x0E */
+ U32 IOCLogInfo; /* 0x10 */
+} MPI2_PWR_MGMT_CONTROL_REPLY, MPI2_POINTER PTR_MPI2_PWR_MGMT_CONTROL_REPLY,
+ Mpi2PwrMgmtControlReply_t, MPI2_POINTER pMpi2PwrMgmtControlReply_t;
+
+
+#endif
+
diff --git a/drivers/scsi/mpt2sas/mpi/mpi2_raid.h b/drivers/scsi/mpt2sas/mpi/mpi2_raid.h
new file mode 100644
index 000000000..7efa58ff0
--- /dev/null
+++ b/drivers/scsi/mpt2sas/mpi/mpi2_raid.h
@@ -0,0 +1,366 @@
+/*
+ * Copyright (c) 2000-2014 LSI Corporation.
+ *
+ *
+ * Name: mpi2_raid.h
+ * Title: MPI Integrated RAID messages and structures
+ * Creation Date: April 26, 2007
+ *
+ * mpi2_raid.h Version: 02.00.10
+ *
+ * Version History
+ * ---------------
+ *
+ * Date Version Description
+ * -------- -------- ------------------------------------------------------
+ * 04-30-07 02.00.00 Corresponds to Fusion-MPT MPI Specification Rev A.
+ * 08-31-07 02.00.01 Modifications to RAID Action request and reply,
+ * including the Actions and ActionData.
+ * 02-29-08 02.00.02 Added MPI2_RAID_ACTION_ADATA_DISABL_FULL_REBUILD.
+ * 05-21-08 02.00.03 Added MPI2_RAID_VOL_CREATION_NUM_PHYSDISKS so that
+ * the PhysDisk array in MPI2_RAID_VOLUME_CREATION_STRUCT
+ * can be sized by the build environment.
+ * 07-30-09 02.00.04 Added proper define for the Use Default Settings bit of
+ * VolumeCreationFlags and marked the old one as obsolete.
+ * 05-12-10 02.00.05 Added MPI2_RAID_VOL_FLAGS_OP_MDC define.
+ * 08-24-10 02.00.06 Added MPI2_RAID_ACTION_COMPATIBILITY_CHECK along with
+ * related structures and defines.
+ * Added product-specific range to RAID Action values.
+ * 02-06-12 02.00.08 Added MPI2_RAID_ACTION_PHYSDISK_HIDDEN.
+ * 07-26-12 02.00.09 Added ElapsedSeconds field to MPI2_RAID_VOL_INDICATOR.
+ * Added MPI2_RAID_VOL_FLAGS_ELAPSED_SECONDS_VALID define.
+ * 04-17-13 02.00.10 Added MPI25_RAID_ACTION_ADATA_ALLOW_PI.
+ * --------------------------------------------------------------------------
+ */
+
+#ifndef MPI2_RAID_H
+#define MPI2_RAID_H
+
+/*****************************************************************************
+*
+* Integrated RAID Messages
+*
+*****************************************************************************/
+
+/****************************************************************************
+* RAID Action messages
+****************************************************************************/
+
+/* ActionDataWord defines for use with MPI2_RAID_ACTION_CREATE_VOLUME action */
+#define MPI25_RAID_ACTION_ADATA_ALLOW_PI (0x80000000)
+
+/* ActionDataWord defines for use with MPI2_RAID_ACTION_DELETE_VOLUME action */
+#define MPI2_RAID_ACTION_ADATA_KEEP_LBA0 (0x00000000)
+#define MPI2_RAID_ACTION_ADATA_ZERO_LBA0 (0x00000001)
+
+/* use MPI2_RAIDVOL0_SETTING_ defines from mpi2_cnfg.h for MPI2_RAID_ACTION_CHANGE_VOL_WRITE_CACHE action */
+
+/* ActionDataWord defines for use with MPI2_RAID_ACTION_DISABLE_ALL_VOLUMES action */
+#define MPI2_RAID_ACTION_ADATA_DISABL_FULL_REBUILD (0x00000001)
+
+/* ActionDataWord for MPI2_RAID_ACTION_SET_RAID_FUNCTION_RATE Action */
+typedef struct _MPI2_RAID_ACTION_RATE_DATA
+{
+ U8 RateToChange; /* 0x00 */
+ U8 RateOrMode; /* 0x01 */
+ U16 DataScrubDuration; /* 0x02 */
+} MPI2_RAID_ACTION_RATE_DATA, MPI2_POINTER PTR_MPI2_RAID_ACTION_RATE_DATA,
+ Mpi2RaidActionRateData_t, MPI2_POINTER pMpi2RaidActionRateData_t;
+
+#define MPI2_RAID_ACTION_SET_RATE_RESYNC (0x00)
+#define MPI2_RAID_ACTION_SET_RATE_DATA_SCRUB (0x01)
+#define MPI2_RAID_ACTION_SET_RATE_POWERSAVE_MODE (0x02)
+
+/* ActionDataWord for MPI2_RAID_ACTION_START_RAID_FUNCTION Action */
+typedef struct _MPI2_RAID_ACTION_START_RAID_FUNCTION
+{
+ U8 RAIDFunction; /* 0x00 */
+ U8 Flags; /* 0x01 */
+ U16 Reserved1; /* 0x02 */
+} MPI2_RAID_ACTION_START_RAID_FUNCTION,
+ MPI2_POINTER PTR_MPI2_RAID_ACTION_START_RAID_FUNCTION,
+ Mpi2RaidActionStartRaidFunction_t,
+ MPI2_POINTER pMpi2RaidActionStartRaidFunction_t;
+
+/* defines for the RAIDFunction field */
+#define MPI2_RAID_ACTION_START_BACKGROUND_INIT (0x00)
+#define MPI2_RAID_ACTION_START_ONLINE_CAP_EXPANSION (0x01)
+#define MPI2_RAID_ACTION_START_CONSISTENCY_CHECK (0x02)
+
+/* defines for the Flags field */
+#define MPI2_RAID_ACTION_START_NEW (0x00)
+#define MPI2_RAID_ACTION_START_RESUME (0x01)
+
+/* ActionDataWord for MPI2_RAID_ACTION_STOP_RAID_FUNCTION Action */
+typedef struct _MPI2_RAID_ACTION_STOP_RAID_FUNCTION
+{
+ U8 RAIDFunction; /* 0x00 */
+ U8 Flags; /* 0x01 */
+ U16 Reserved1; /* 0x02 */
+} MPI2_RAID_ACTION_STOP_RAID_FUNCTION,
+ MPI2_POINTER PTR_MPI2_RAID_ACTION_STOP_RAID_FUNCTION,
+ Mpi2RaidActionStopRaidFunction_t,
+ MPI2_POINTER pMpi2RaidActionStopRaidFunction_t;
+
+/* defines for the RAIDFunction field */
+#define MPI2_RAID_ACTION_STOP_BACKGROUND_INIT (0x00)
+#define MPI2_RAID_ACTION_STOP_ONLINE_CAP_EXPANSION (0x01)
+#define MPI2_RAID_ACTION_STOP_CONSISTENCY_CHECK (0x02)
+
+/* defines for the Flags field */
+#define MPI2_RAID_ACTION_STOP_ABORT (0x00)
+#define MPI2_RAID_ACTION_STOP_PAUSE (0x01)
+
+/* ActionDataWord for MPI2_RAID_ACTION_CREATE_HOT_SPARE Action */
+typedef struct _MPI2_RAID_ACTION_HOT_SPARE
+{
+ U8 HotSparePool; /* 0x00 */
+ U8 Reserved1; /* 0x01 */
+ U16 DevHandle; /* 0x02 */
+} MPI2_RAID_ACTION_HOT_SPARE, MPI2_POINTER PTR_MPI2_RAID_ACTION_HOT_SPARE,
+ Mpi2RaidActionHotSpare_t, MPI2_POINTER pMpi2RaidActionHotSpare_t;
+
+/* ActionDataWord for MPI2_RAID_ACTION_DEVICE_FW_UPDATE_MODE Action */
+typedef struct _MPI2_RAID_ACTION_FW_UPDATE_MODE
+{
+ U8 Flags; /* 0x00 */
+ U8 DeviceFirmwareUpdateModeTimeout; /* 0x01 */
+ U16 Reserved1; /* 0x02 */
+} MPI2_RAID_ACTION_FW_UPDATE_MODE,
+ MPI2_POINTER PTR_MPI2_RAID_ACTION_FW_UPDATE_MODE,
+ Mpi2RaidActionFwUpdateMode_t, MPI2_POINTER pMpi2RaidActionFwUpdateMode_t;
+
+/* ActionDataWord defines for use with MPI2_RAID_ACTION_DEVICE_FW_UPDATE_MODE action */
+#define MPI2_RAID_ACTION_ADATA_DISABLE_FW_UPDATE (0x00)
+#define MPI2_RAID_ACTION_ADATA_ENABLE_FW_UPDATE (0x01)
+
+typedef union _MPI2_RAID_ACTION_DATA
+{
+ U32 Word;
+ MPI2_RAID_ACTION_RATE_DATA Rates;
+ MPI2_RAID_ACTION_START_RAID_FUNCTION StartRaidFunction;
+ MPI2_RAID_ACTION_STOP_RAID_FUNCTION StopRaidFunction;
+ MPI2_RAID_ACTION_HOT_SPARE HotSpare;
+ MPI2_RAID_ACTION_FW_UPDATE_MODE FwUpdateMode;
+} MPI2_RAID_ACTION_DATA, MPI2_POINTER PTR_MPI2_RAID_ACTION_DATA,
+ Mpi2RaidActionData_t, MPI2_POINTER pMpi2RaidActionData_t;
+
+
+/* RAID Action Request Message */
+typedef struct _MPI2_RAID_ACTION_REQUEST
+{
+ U8 Action; /* 0x00 */
+ U8 Reserved1; /* 0x01 */
+ U8 ChainOffset; /* 0x02 */
+ U8 Function; /* 0x03 */
+ U16 VolDevHandle; /* 0x04 */
+ U8 PhysDiskNum; /* 0x06 */
+ U8 MsgFlags; /* 0x07 */
+ U8 VP_ID; /* 0x08 */
+ U8 VF_ID; /* 0x09 */
+ U16 Reserved2; /* 0x0A */
+ U32 Reserved3; /* 0x0C */
+ MPI2_RAID_ACTION_DATA ActionDataWord; /* 0x10 */
+ MPI2_SGE_SIMPLE_UNION ActionDataSGE; /* 0x14 */
+} MPI2_RAID_ACTION_REQUEST, MPI2_POINTER PTR_MPI2_RAID_ACTION_REQUEST,
+ Mpi2RaidActionRequest_t, MPI2_POINTER pMpi2RaidActionRequest_t;
+
+/* RAID Action request Action values */
+
+#define MPI2_RAID_ACTION_INDICATOR_STRUCT (0x01)
+#define MPI2_RAID_ACTION_CREATE_VOLUME (0x02)
+#define MPI2_RAID_ACTION_DELETE_VOLUME (0x03)
+#define MPI2_RAID_ACTION_DISABLE_ALL_VOLUMES (0x04)
+#define MPI2_RAID_ACTION_ENABLE_ALL_VOLUMES (0x05)
+#define MPI2_RAID_ACTION_PHYSDISK_OFFLINE (0x0A)
+#define MPI2_RAID_ACTION_PHYSDISK_ONLINE (0x0B)
+#define MPI2_RAID_ACTION_FAIL_PHYSDISK (0x0F)
+#define MPI2_RAID_ACTION_ACTIVATE_VOLUME (0x11)
+#define MPI2_RAID_ACTION_DEVICE_FW_UPDATE_MODE (0x15)
+#define MPI2_RAID_ACTION_CHANGE_VOL_WRITE_CACHE (0x17)
+#define MPI2_RAID_ACTION_SET_VOLUME_NAME (0x18)
+#define MPI2_RAID_ACTION_SET_RAID_FUNCTION_RATE (0x19)
+#define MPI2_RAID_ACTION_ENABLE_FAILED_VOLUME (0x1C)
+#define MPI2_RAID_ACTION_CREATE_HOT_SPARE (0x1D)
+#define MPI2_RAID_ACTION_DELETE_HOT_SPARE (0x1E)
+#define MPI2_RAID_ACTION_SYSTEM_SHUTDOWN_INITIATED (0x20)
+#define MPI2_RAID_ACTION_START_RAID_FUNCTION (0x21)
+#define MPI2_RAID_ACTION_STOP_RAID_FUNCTION (0x22)
+#define MPI2_RAID_ACTION_COMPATIBILITY_CHECK (0x23)
+#define MPI2_RAID_ACTION_PHYSDISK_HIDDEN (0x24)
+#define MPI2_RAID_ACTION_MIN_PRODUCT_SPECIFIC (0x80)
+#define MPI2_RAID_ACTION_MAX_PRODUCT_SPECIFIC (0xFF)
+
+/* RAID Volume Creation Structure */
+
+/*
+ * The following define can be customized for the targeted product.
+ */
+#ifndef MPI2_RAID_VOL_CREATION_NUM_PHYSDISKS
+#define MPI2_RAID_VOL_CREATION_NUM_PHYSDISKS (1)
+#endif
+
+typedef struct _MPI2_RAID_VOLUME_PHYSDISK
+{
+ U8 RAIDSetNum; /* 0x00 */
+ U8 PhysDiskMap; /* 0x01 */
+ U16 PhysDiskDevHandle; /* 0x02 */
+} MPI2_RAID_VOLUME_PHYSDISK, MPI2_POINTER PTR_MPI2_RAID_VOLUME_PHYSDISK,
+ Mpi2RaidVolumePhysDisk_t, MPI2_POINTER pMpi2RaidVolumePhysDisk_t;
+
+/* defines for the PhysDiskMap field */
+#define MPI2_RAIDACTION_PHYSDISK_PRIMARY (0x01)
+#define MPI2_RAIDACTION_PHYSDISK_SECONDARY (0x02)
+
+typedef struct _MPI2_RAID_VOLUME_CREATION_STRUCT
+{
+ U8 NumPhysDisks; /* 0x00 */
+ U8 VolumeType; /* 0x01 */
+ U16 Reserved1; /* 0x02 */
+ U32 VolumeCreationFlags; /* 0x04 */
+ U32 VolumeSettings; /* 0x08 */
+ U8 Reserved2; /* 0x0C */
+ U8 ResyncRate; /* 0x0D */
+ U16 DataScrubDuration; /* 0x0E */
+ U64 VolumeMaxLBA; /* 0x10 */
+ U32 StripeSize; /* 0x18 */
+ U8 Name[16]; /* 0x1C */
+ MPI2_RAID_VOLUME_PHYSDISK PhysDisk[MPI2_RAID_VOL_CREATION_NUM_PHYSDISKS];/* 0x2C */
+} MPI2_RAID_VOLUME_CREATION_STRUCT,
+ MPI2_POINTER PTR_MPI2_RAID_VOLUME_CREATION_STRUCT,
+ Mpi2RaidVolumeCreationStruct_t, MPI2_POINTER pMpi2RaidVolumeCreationStruct_t;
+
+/* use MPI2_RAID_VOL_TYPE_ defines from mpi2_cnfg.h for VolumeType */
+
+/* defines for the VolumeCreationFlags field */
+#define MPI2_RAID_VOL_CREATION_DEFAULT_SETTINGS (0x80000000)
+#define MPI2_RAID_VOL_CREATION_BACKGROUND_INIT (0x00000004)
+#define MPI2_RAID_VOL_CREATION_LOW_LEVEL_INIT (0x00000002)
+#define MPI2_RAID_VOL_CREATION_MIGRATE_DATA (0x00000001)
+/* The following is an obsolete define.
+ * It must be shifted left 24 bits in order to set the proper bit.
+ */
+#define MPI2_RAID_VOL_CREATION_USE_DEFAULT_SETTINGS (0x80)
+
+
+/* RAID Online Capacity Expansion Structure */
+
+typedef struct _MPI2_RAID_ONLINE_CAPACITY_EXPANSION
+{
+ U32 Flags; /* 0x00 */
+ U16 DevHandle0; /* 0x04 */
+ U16 Reserved1; /* 0x06 */
+ U16 DevHandle1; /* 0x08 */
+ U16 Reserved2; /* 0x0A */
+} MPI2_RAID_ONLINE_CAPACITY_EXPANSION,
+ MPI2_POINTER PTR_MPI2_RAID_ONLINE_CAPACITY_EXPANSION,
+ Mpi2RaidOnlineCapacityExpansion_t,
+ MPI2_POINTER pMpi2RaidOnlineCapacityExpansion_t;
+
+/* RAID Compatibility Input Structure */
+
+typedef struct _MPI2_RAID_COMPATIBILITY_INPUT_STRUCT {
+ U16 SourceDevHandle; /* 0x00 */
+ U16 CandidateDevHandle; /* 0x02 */
+ U32 Flags; /* 0x04 */
+ U32 Reserved1; /* 0x08 */
+ U32 Reserved2; /* 0x0C */
+} MPI2_RAID_COMPATIBILITY_INPUT_STRUCT,
+MPI2_POINTER PTR_MPI2_RAID_COMPATIBILITY_INPUT_STRUCT,
+Mpi2RaidCompatibilityInputStruct_t,
+MPI2_POINTER pMpi2RaidCompatibilityInputStruct_t;
+
+/* defines for RAID Compatibility Structure Flags field */
+#define MPI2_RAID_COMPAT_SOURCE_IS_VOLUME_FLAG (0x00000002)
+#define MPI2_RAID_COMPAT_REPORT_SOURCE_INFO_FLAG (0x00000001)
+
+
+/* RAID Volume Indicator Structure */
+
+typedef struct _MPI2_RAID_VOL_INDICATOR
+{
+ U64 TotalBlocks; /* 0x00 */
+ U64 BlocksRemaining; /* 0x08 */
+ U32 Flags; /* 0x10 */
+ U32 ElapsedSeconds; /* 0x14 */
+} MPI2_RAID_VOL_INDICATOR, MPI2_POINTER PTR_MPI2_RAID_VOL_INDICATOR,
+ Mpi2RaidVolIndicator_t, MPI2_POINTER pMpi2RaidVolIndicator_t;
+
+/* defines for RAID Volume Indicator Flags field */
+#define MPI2_RAID_VOL_FLAGS_ELAPSED_SECONDS_VALID (0x80000000)
+
+#define MPI2_RAID_VOL_FLAGS_OP_MASK (0x0000000F)
+#define MPI2_RAID_VOL_FLAGS_OP_BACKGROUND_INIT (0x00000000)
+#define MPI2_RAID_VOL_FLAGS_OP_ONLINE_CAP_EXPANSION (0x00000001)
+#define MPI2_RAID_VOL_FLAGS_OP_CONSISTENCY_CHECK (0x00000002)
+#define MPI2_RAID_VOL_FLAGS_OP_RESYNC (0x00000003)
+#define MPI2_RAID_VOL_FLAGS_OP_MDC (0x00000004)
+
+/* RAID Compatibility Result Structure */
+
+typedef struct _MPI2_RAID_COMPATIBILITY_RESULT_STRUCT {
+ U8 State; /* 0x00 */
+ U8 Reserved1; /* 0x01 */
+ U16 Reserved2; /* 0x02 */
+ U32 GenericAttributes; /* 0x04 */
+ U32 OEMSpecificAttributes; /* 0x08 */
+ U32 Reserved3; /* 0x0C */
+ U32 Reserved4; /* 0x10 */
+} MPI2_RAID_COMPATIBILITY_RESULT_STRUCT,
+MPI2_POINTER PTR_MPI2_RAID_COMPATIBILITY_RESULT_STRUCT,
+Mpi2RaidCompatibilityResultStruct_t,
+MPI2_POINTER pMpi2RaidCompatibilityResultStruct_t;
+
+/* defines for RAID Compatibility Result Structure State field */
+#define MPI2_RAID_COMPAT_STATE_COMPATIBLE (0x00)
+#define MPI2_RAID_COMPAT_STATE_NOT_COMPATIBLE (0x01)
+
+/* defines for RAID Compatibility Result Structure GenericAttributes field */
+#define MPI2_RAID_COMPAT_GENATTRIB_4K_SECTOR (0x00000010)
+
+#define MPI2_RAID_COMPAT_GENATTRIB_MEDIA_MASK (0x0000000C)
+#define MPI2_RAID_COMPAT_GENATTRIB_SOLID_STATE_DRIVE (0x00000008)
+#define MPI2_RAID_COMPAT_GENATTRIB_HARD_DISK_DRIVE (0x00000004)
+
+#define MPI2_RAID_COMPAT_GENATTRIB_PROTOCOL_MASK (0x00000003)
+#define MPI2_RAID_COMPAT_GENATTRIB_SAS_PROTOCOL (0x00000002)
+#define MPI2_RAID_COMPAT_GENATTRIB_SATA_PROTOCOL (0x00000001)
+
+/* RAID Action Reply ActionData union */
+typedef union _MPI2_RAID_ACTION_REPLY_DATA
+{
+ U32 Word[6];
+ MPI2_RAID_VOL_INDICATOR RaidVolumeIndicator;
+ U16 VolDevHandle;
+ U8 VolumeState;
+ U8 PhysDiskNum;
+ MPI2_RAID_COMPATIBILITY_RESULT_STRUCT RaidCompatibilityResult;
+} MPI2_RAID_ACTION_REPLY_DATA, MPI2_POINTER PTR_MPI2_RAID_ACTION_REPLY_DATA,
+ Mpi2RaidActionReplyData_t, MPI2_POINTER pMpi2RaidActionReplyData_t;
+
+/* use MPI2_RAIDVOL0_SETTING_ defines from mpi2_cnfg.h for MPI2_RAID_ACTION_CHANGE_VOL_WRITE_CACHE action */
+
+
+/* RAID Action Reply Message */
+typedef struct _MPI2_RAID_ACTION_REPLY
+{
+ U8 Action; /* 0x00 */
+ U8 Reserved1; /* 0x01 */
+ U8 MsgLength; /* 0x02 */
+ U8 Function; /* 0x03 */
+ U16 VolDevHandle; /* 0x04 */
+ U8 PhysDiskNum; /* 0x06 */
+ U8 MsgFlags; /* 0x07 */
+ U8 VP_ID; /* 0x08 */
+ U8 VF_ID; /* 0x09 */
+ U16 Reserved2; /* 0x0A */
+ U16 Reserved3; /* 0x0C */
+ U16 IOCStatus; /* 0x0E */
+ U32 IOCLogInfo; /* 0x10 */
+ MPI2_RAID_ACTION_REPLY_DATA ActionData; /* 0x14 */
+} MPI2_RAID_ACTION_REPLY, MPI2_POINTER PTR_MPI2_RAID_ACTION_REPLY,
+ Mpi2RaidActionReply_t, MPI2_POINTER pMpi2RaidActionReply_t;
+
+
+#endif
+
diff --git a/drivers/scsi/mpt2sas/mpi/mpi2_sas.h b/drivers/scsi/mpt2sas/mpi/mpi2_sas.h
new file mode 100644
index 000000000..45b6fa10b
--- /dev/null
+++ b/drivers/scsi/mpt2sas/mpi/mpi2_sas.h
@@ -0,0 +1,288 @@
+/*
+ * Copyright (c) 2000-2014 LSI Corporation.
+ *
+ *
+ * Name: mpi2_sas.h
+ * Title: MPI Serial Attached SCSI structures and definitions
+ * Creation Date: February 9, 2007
+ *
+ * mpi2_sas.h Version: 02.00.05
+ *
+ * Version History
+ * ---------------
+ *
+ * Date Version Description
+ * -------- -------- ------------------------------------------------------
+ * 04-30-07 02.00.00 Corresponds to Fusion-MPT MPI Specification Rev A.
+ * 06-26-07 02.00.01 Added Clear All Persistent Operation to SAS IO Unit
+ * Control Request.
+ * 10-02-08 02.00.02 Added Set IOC Parameter Operation to SAS IO Unit Control
+ * Request.
+ * 10-28-09 02.00.03 Changed the type of SGL in MPI2_SATA_PASSTHROUGH_REQUEST
+ * to MPI2_SGE_IO_UNION since it supports chained SGLs.
+ * 05-12-10 02.00.04 Modified some comments.
+ * 08-11-10 02.00.05 Added NCQ operations to SAS IO Unit Control.
+ * --------------------------------------------------------------------------
+ */
+
+#ifndef MPI2_SAS_H
+#define MPI2_SAS_H
+
+/*
+ * Values for SASStatus.
+ */
+#define MPI2_SASSTATUS_SUCCESS (0x00)
+#define MPI2_SASSTATUS_UNKNOWN_ERROR (0x01)
+#define MPI2_SASSTATUS_INVALID_FRAME (0x02)
+#define MPI2_SASSTATUS_UTC_BAD_DEST (0x03)
+#define MPI2_SASSTATUS_UTC_BREAK_RECEIVED (0x04)
+#define MPI2_SASSTATUS_UTC_CONNECT_RATE_NOT_SUPPORTED (0x05)
+#define MPI2_SASSTATUS_UTC_PORT_LAYER_REQUEST (0x06)
+#define MPI2_SASSTATUS_UTC_PROTOCOL_NOT_SUPPORTED (0x07)
+#define MPI2_SASSTATUS_UTC_STP_RESOURCES_BUSY (0x08)
+#define MPI2_SASSTATUS_UTC_WRONG_DESTINATION (0x09)
+#define MPI2_SASSTATUS_SHORT_INFORMATION_UNIT (0x0A)
+#define MPI2_SASSTATUS_LONG_INFORMATION_UNIT (0x0B)
+#define MPI2_SASSTATUS_XFER_RDY_INCORRECT_WRITE_DATA (0x0C)
+#define MPI2_SASSTATUS_XFER_RDY_REQUEST_OFFSET_ERROR (0x0D)
+#define MPI2_SASSTATUS_XFER_RDY_NOT_EXPECTED (0x0E)
+#define MPI2_SASSTATUS_DATA_INCORRECT_DATA_LENGTH (0x0F)
+#define MPI2_SASSTATUS_DATA_TOO_MUCH_READ_DATA (0x10)
+#define MPI2_SASSTATUS_DATA_OFFSET_ERROR (0x11)
+#define MPI2_SASSTATUS_SDSF_NAK_RECEIVED (0x12)
+#define MPI2_SASSTATUS_SDSF_CONNECTION_FAILED (0x13)
+#define MPI2_SASSTATUS_INITIATOR_RESPONSE_TIMEOUT (0x14)
+
+
+/*
+ * Values for the SAS DeviceInfo field used in SAS Device Status Change Event
+ * data and SAS Configuration pages.
+ */
+#define MPI2_SAS_DEVICE_INFO_SEP (0x00004000)
+#define MPI2_SAS_DEVICE_INFO_ATAPI_DEVICE (0x00002000)
+#define MPI2_SAS_DEVICE_INFO_LSI_DEVICE (0x00001000)
+#define MPI2_SAS_DEVICE_INFO_DIRECT_ATTACH (0x00000800)
+#define MPI2_SAS_DEVICE_INFO_SSP_TARGET (0x00000400)
+#define MPI2_SAS_DEVICE_INFO_STP_TARGET (0x00000200)
+#define MPI2_SAS_DEVICE_INFO_SMP_TARGET (0x00000100)
+#define MPI2_SAS_DEVICE_INFO_SATA_DEVICE (0x00000080)
+#define MPI2_SAS_DEVICE_INFO_SSP_INITIATOR (0x00000040)
+#define MPI2_SAS_DEVICE_INFO_STP_INITIATOR (0x00000020)
+#define MPI2_SAS_DEVICE_INFO_SMP_INITIATOR (0x00000010)
+#define MPI2_SAS_DEVICE_INFO_SATA_HOST (0x00000008)
+
+#define MPI2_SAS_DEVICE_INFO_MASK_DEVICE_TYPE (0x00000007)
+#define MPI2_SAS_DEVICE_INFO_NO_DEVICE (0x00000000)
+#define MPI2_SAS_DEVICE_INFO_END_DEVICE (0x00000001)
+#define MPI2_SAS_DEVICE_INFO_EDGE_EXPANDER (0x00000002)
+#define MPI2_SAS_DEVICE_INFO_FANOUT_EXPANDER (0x00000003)
+
+
+/*****************************************************************************
+*
+* SAS Messages
+*
+*****************************************************************************/
+
+/****************************************************************************
+* SMP Passthrough messages
+****************************************************************************/
+
+/* SMP Passthrough Request Message */
+typedef struct _MPI2_SMP_PASSTHROUGH_REQUEST
+{
+ U8 PassthroughFlags; /* 0x00 */
+ U8 PhysicalPort; /* 0x01 */
+ U8 ChainOffset; /* 0x02 */
+ U8 Function; /* 0x03 */
+ U16 RequestDataLength; /* 0x04 */
+ U8 SGLFlags; /* 0x06 */
+ U8 MsgFlags; /* 0x07 */
+ U8 VP_ID; /* 0x08 */
+ U8 VF_ID; /* 0x09 */
+ U16 Reserved1; /* 0x0A */
+ U32 Reserved2; /* 0x0C */
+ U64 SASAddress; /* 0x10 */
+ U32 Reserved3; /* 0x18 */
+ U32 Reserved4; /* 0x1C */
+ MPI2_SIMPLE_SGE_UNION SGL; /* 0x20 */
+} MPI2_SMP_PASSTHROUGH_REQUEST, MPI2_POINTER PTR_MPI2_SMP_PASSTHROUGH_REQUEST,
+ Mpi2SmpPassthroughRequest_t, MPI2_POINTER pMpi2SmpPassthroughRequest_t;
+
+/* values for PassthroughFlags field */
+#define MPI2_SMP_PT_REQ_PT_FLAGS_IMMEDIATE (0x80)
+
+/* use MPI2_SGLFLAGS_ defines from mpi2.h for the SGLFlags field */
+
+
+/* SMP Passthrough Reply Message */
+typedef struct _MPI2_SMP_PASSTHROUGH_REPLY
+{
+ U8 PassthroughFlags; /* 0x00 */
+ U8 PhysicalPort; /* 0x01 */
+ U8 MsgLength; /* 0x02 */
+ U8 Function; /* 0x03 */
+ U16 ResponseDataLength; /* 0x04 */
+ U8 SGLFlags; /* 0x06 */
+ U8 MsgFlags; /* 0x07 */
+ U8 VP_ID; /* 0x08 */
+ U8 VF_ID; /* 0x09 */
+ U16 Reserved1; /* 0x0A */
+ U8 Reserved2; /* 0x0C */
+ U8 SASStatus; /* 0x0D */
+ U16 IOCStatus; /* 0x0E */
+ U32 IOCLogInfo; /* 0x10 */
+ U32 Reserved3; /* 0x14 */
+ U8 ResponseData[4]; /* 0x18 */
+} MPI2_SMP_PASSTHROUGH_REPLY, MPI2_POINTER PTR_MPI2_SMP_PASSTHROUGH_REPLY,
+ Mpi2SmpPassthroughReply_t, MPI2_POINTER pMpi2SmpPassthroughReply_t;
+
+/* values for PassthroughFlags field */
+#define MPI2_SMP_PT_REPLY_PT_FLAGS_IMMEDIATE (0x80)
+
+/* values for SASStatus field are at the top of this file */
+
+
+/****************************************************************************
+* SATA Passthrough messages
+****************************************************************************/
+
+/* SATA Passthrough Request Message */
+typedef struct _MPI2_SATA_PASSTHROUGH_REQUEST
+{
+ U16 DevHandle; /* 0x00 */
+ U8 ChainOffset; /* 0x02 */
+ U8 Function; /* 0x03 */
+ U16 PassthroughFlags; /* 0x04 */
+ U8 SGLFlags; /* 0x06 */
+ U8 MsgFlags; /* 0x07 */
+ U8 VP_ID; /* 0x08 */
+ U8 VF_ID; /* 0x09 */
+ U16 Reserved1; /* 0x0A */
+ U32 Reserved2; /* 0x0C */
+ U32 Reserved3; /* 0x10 */
+ U32 Reserved4; /* 0x14 */
+ U32 DataLength; /* 0x18 */
+ U8 CommandFIS[20]; /* 0x1C */
+ MPI2_SGE_IO_UNION SGL; /* 0x30 */
+} MPI2_SATA_PASSTHROUGH_REQUEST, MPI2_POINTER PTR_MPI2_SATA_PASSTHROUGH_REQUEST,
+ Mpi2SataPassthroughRequest_t, MPI2_POINTER pMpi2SataPassthroughRequest_t;
+
+/* values for PassthroughFlags field */
+#define MPI2_SATA_PT_REQ_PT_FLAGS_EXECUTE_DIAG (0x0100)
+#define MPI2_SATA_PT_REQ_PT_FLAGS_DMA (0x0020)
+#define MPI2_SATA_PT_REQ_PT_FLAGS_PIO (0x0010)
+#define MPI2_SATA_PT_REQ_PT_FLAGS_UNSPECIFIED_VU (0x0004)
+#define MPI2_SATA_PT_REQ_PT_FLAGS_WRITE (0x0002)
+#define MPI2_SATA_PT_REQ_PT_FLAGS_READ (0x0001)
+
+/* use MPI2_SGLFLAGS_ defines from mpi2.h for the SGLFlags field */
+
+
+/* SATA Passthrough Reply Message */
+typedef struct _MPI2_SATA_PASSTHROUGH_REPLY
+{
+ U16 DevHandle; /* 0x00 */
+ U8 MsgLength; /* 0x02 */
+ U8 Function; /* 0x03 */
+ U16 PassthroughFlags; /* 0x04 */
+ U8 SGLFlags; /* 0x06 */
+ U8 MsgFlags; /* 0x07 */
+ U8 VP_ID; /* 0x08 */
+ U8 VF_ID; /* 0x09 */
+ U16 Reserved1; /* 0x0A */
+ U8 Reserved2; /* 0x0C */
+ U8 SASStatus; /* 0x0D */
+ U16 IOCStatus; /* 0x0E */
+ U32 IOCLogInfo; /* 0x10 */
+ U8 StatusFIS[20]; /* 0x14 */
+ U32 StatusControlRegisters; /* 0x28 */
+ U32 TransferCount; /* 0x2C */
+} MPI2_SATA_PASSTHROUGH_REPLY, MPI2_POINTER PTR_MPI2_SATA_PASSTHROUGH_REPLY,
+ Mpi2SataPassthroughReply_t, MPI2_POINTER pMpi2SataPassthroughReply_t;
+
+/* values for SASStatus field are at the top of this file */
+
+
+/****************************************************************************
+* SAS IO Unit Control messages
+****************************************************************************/
+
+/* SAS IO Unit Control Request Message */
+typedef struct _MPI2_SAS_IOUNIT_CONTROL_REQUEST
+{
+ U8 Operation; /* 0x00 */
+ U8 Reserved1; /* 0x01 */
+ U8 ChainOffset; /* 0x02 */
+ U8 Function; /* 0x03 */
+ U16 DevHandle; /* 0x04 */
+ U8 IOCParameter; /* 0x06 */
+ U8 MsgFlags; /* 0x07 */
+ U8 VP_ID; /* 0x08 */
+ U8 VF_ID; /* 0x09 */
+ U16 Reserved3; /* 0x0A */
+ U16 Reserved4; /* 0x0C */
+ U8 PhyNum; /* 0x0E */
+ U8 PrimFlags; /* 0x0F */
+ U32 Primitive; /* 0x10 */
+ U8 LookupMethod; /* 0x14 */
+ U8 Reserved5; /* 0x15 */
+ U16 SlotNumber; /* 0x16 */
+ U64 LookupAddress; /* 0x18 */
+ U32 IOCParameterValue; /* 0x20 */
+ U32 Reserved7; /* 0x24 */
+ U32 Reserved8; /* 0x28 */
+} MPI2_SAS_IOUNIT_CONTROL_REQUEST,
+ MPI2_POINTER PTR_MPI2_SAS_IOUNIT_CONTROL_REQUEST,
+ Mpi2SasIoUnitControlRequest_t, MPI2_POINTER pMpi2SasIoUnitControlRequest_t;
+
+/* values for the Operation field */
+#define MPI2_SAS_OP_CLEAR_ALL_PERSISTENT (0x02)
+#define MPI2_SAS_OP_PHY_LINK_RESET (0x06)
+#define MPI2_SAS_OP_PHY_HARD_RESET (0x07)
+#define MPI2_SAS_OP_PHY_CLEAR_ERROR_LOG (0x08)
+#define MPI2_SAS_OP_SEND_PRIMITIVE (0x0A)
+#define MPI2_SAS_OP_FORCE_FULL_DISCOVERY (0x0B)
+#define MPI2_SAS_OP_TRANSMIT_PORT_SELECT_SIGNAL (0x0C)
+#define MPI2_SAS_OP_REMOVE_DEVICE (0x0D)
+#define MPI2_SAS_OP_LOOKUP_MAPPING (0x0E)
+#define MPI2_SAS_OP_SET_IOC_PARAMETER (0x0F)
+#define MPI2_SAS_OP_DEV_ENABLE_NCQ (0x14)
+#define MPI2_SAS_OP_DEV_DISABLE_NCQ (0x15)
+#define MPI2_SAS_OP_PRODUCT_SPECIFIC_MIN (0x80)
+
+/* values for the PrimFlags field */
+#define MPI2_SAS_PRIMFLAGS_SINGLE (0x08)
+#define MPI2_SAS_PRIMFLAGS_TRIPLE (0x02)
+#define MPI2_SAS_PRIMFLAGS_REDUNDANT (0x01)
+
+/* values for the LookupMethod field */
+#define MPI2_SAS_LOOKUP_METHOD_SAS_ADDRESS (0x01)
+#define MPI2_SAS_LOOKUP_METHOD_SAS_ENCLOSURE_SLOT (0x02)
+#define MPI2_SAS_LOOKUP_METHOD_SAS_DEVICE_NAME (0x03)
+
+
+/* SAS IO Unit Control Reply Message */
+typedef struct _MPI2_SAS_IOUNIT_CONTROL_REPLY
+{
+ U8 Operation; /* 0x00 */
+ U8 Reserved1; /* 0x01 */
+ U8 MsgLength; /* 0x02 */
+ U8 Function; /* 0x03 */
+ U16 DevHandle; /* 0x04 */
+ U8 IOCParameter; /* 0x06 */
+ U8 MsgFlags; /* 0x07 */
+ U8 VP_ID; /* 0x08 */
+ U8 VF_ID; /* 0x09 */
+ U16 Reserved3; /* 0x0A */
+ U16 Reserved4; /* 0x0C */
+ U16 IOCStatus; /* 0x0E */
+ U32 IOCLogInfo; /* 0x10 */
+} MPI2_SAS_IOUNIT_CONTROL_REPLY,
+ MPI2_POINTER PTR_MPI2_SAS_IOUNIT_CONTROL_REPLY,
+ Mpi2SasIoUnitControlReply_t, MPI2_POINTER pMpi2SasIoUnitControlReply_t;
+
+
+#endif
+
+
diff --git a/drivers/scsi/mpt2sas/mpi/mpi2_tool.h b/drivers/scsi/mpt2sas/mpi/mpi2_tool.h
new file mode 100644
index 000000000..659b8ac83
--- /dev/null
+++ b/drivers/scsi/mpt2sas/mpi/mpi2_tool.h
@@ -0,0 +1,481 @@
+/*
+ * Copyright (c) 2000-2014 LSI Corporation.
+ *
+ *
+ * Name: mpi2_tool.h
+ * Title: MPI diagnostic tool structures and definitions
+ * Creation Date: March 26, 2007
+ *
+ * mpi2_tool.h Version: 02.00.12
+ *
+ * Version History
+ * ---------------
+ *
+ * Date Version Description
+ * -------- -------- ------------------------------------------------------
+ * 04-30-07 02.00.00 Corresponds to Fusion-MPT MPI Specification Rev A.
+ * 12-18-07 02.00.01 Added Diagnostic Buffer Post and Diagnostic Release
+ * structures and defines.
+ * 02-29-08 02.00.02 Modified various names to make them 32-character unique.
+ * 05-06-09 02.00.03 Added ISTWI Read Write Tool and Diagnostic CLI Tool.
+ * 07-30-09 02.00.04 Added ExtendedType field to DiagnosticBufferPost request
+ * and reply messages.
+ * Added MPI2_DIAG_BUF_TYPE_EXTENDED.
+ * Incremented MPI2_DIAG_BUF_TYPE_COUNT.
+ * 05-12-10 02.00.05 Added Diagnostic Data Upload tool.
+ * 08-11-10 02.00.06 Added defines that were missing for Diagnostic Buffer
+ * Post Request.
+ * 05-25-11 02.00.07 Added Flags field and related defines to
+ * MPI2_TOOLBOX_ISTWI_READ_WRITE_REQUEST.
+ * 07-26-12 02.00.10 Modified MPI2_TOOLBOX_DIAGNOSTIC_CLI_REQUEST so that
+ * it uses MPI Chain SGE as well as MPI Simple SGE.
+ * 08-19-13 02.00.11 Added MPI2_TOOLBOX_TEXT_DISPLAY_TOOL and related info.
+ * 01-08-14 02.00.12 Added MPI2_TOOLBOX_CLEAN_BIT26_PRODUCT_SPECIFIC.
+ * --------------------------------------------------------------------------
+ */
+
+#ifndef MPI2_TOOL_H
+#define MPI2_TOOL_H
+
+/*****************************************************************************
+*
+* Toolbox Messages
+*
+*****************************************************************************/
+
+/* defines for the Tools */
+#define MPI2_TOOLBOX_CLEAN_TOOL (0x00)
+#define MPI2_TOOLBOX_MEMORY_MOVE_TOOL (0x01)
+#define MPI2_TOOLBOX_DIAG_DATA_UPLOAD_TOOL (0x02)
+#define MPI2_TOOLBOX_ISTWI_READ_WRITE_TOOL (0x03)
+#define MPI2_TOOLBOX_BEACON_TOOL (0x05)
+#define MPI2_TOOLBOX_DIAGNOSTIC_CLI_TOOL (0x06)
+#define MPI2_TOOLBOX_TEXT_DISPLAY_TOOL (0x07)
+
+
+/****************************************************************************
+* Toolbox reply
+****************************************************************************/
+
+typedef struct _MPI2_TOOLBOX_REPLY
+{
+ U8 Tool; /* 0x00 */
+ U8 Reserved1; /* 0x01 */
+ U8 MsgLength; /* 0x02 */
+ U8 Function; /* 0x03 */
+ U16 Reserved2; /* 0x04 */
+ U8 Reserved3; /* 0x06 */
+ U8 MsgFlags; /* 0x07 */
+ U8 VP_ID; /* 0x08 */
+ U8 VF_ID; /* 0x09 */
+ U16 Reserved4; /* 0x0A */
+ U16 Reserved5; /* 0x0C */
+ U16 IOCStatus; /* 0x0E */
+ U32 IOCLogInfo; /* 0x10 */
+} MPI2_TOOLBOX_REPLY, MPI2_POINTER PTR_MPI2_TOOLBOX_REPLY,
+ Mpi2ToolboxReply_t, MPI2_POINTER pMpi2ToolboxReply_t;
+
+
+/****************************************************************************
+* Toolbox Clean Tool request
+****************************************************************************/
+
+typedef struct _MPI2_TOOLBOX_CLEAN_REQUEST
+{
+ U8 Tool; /* 0x00 */
+ U8 Reserved1; /* 0x01 */
+ U8 ChainOffset; /* 0x02 */
+ U8 Function; /* 0x03 */
+ U16 Reserved2; /* 0x04 */
+ U8 Reserved3; /* 0x06 */
+ U8 MsgFlags; /* 0x07 */
+ U8 VP_ID; /* 0x08 */
+ U8 VF_ID; /* 0x09 */
+ U16 Reserved4; /* 0x0A */
+ U32 Flags; /* 0x0C */
+ } MPI2_TOOLBOX_CLEAN_REQUEST, MPI2_POINTER PTR_MPI2_TOOLBOX_CLEAN_REQUEST,
+ Mpi2ToolboxCleanRequest_t, MPI2_POINTER pMpi2ToolboxCleanRequest_t;
+
+/* values for the Flags field */
+#define MPI2_TOOLBOX_CLEAN_BOOT_SERVICES (0x80000000)
+#define MPI2_TOOLBOX_CLEAN_PERSIST_MANUFACT_PAGES (0x40000000)
+#define MPI2_TOOLBOX_CLEAN_OTHER_PERSIST_PAGES (0x20000000)
+#define MPI2_TOOLBOX_CLEAN_FW_CURRENT (0x10000000)
+#define MPI2_TOOLBOX_CLEAN_FW_BACKUP (0x08000000)
+#define MPI2_TOOLBOX_CLEAN_BIT26_PRODUCT_SPECIFIC (0x04000000)
+#define MPI2_TOOLBOX_CLEAN_MEGARAID (0x02000000)
+#define MPI2_TOOLBOX_CLEAN_INITIALIZATION (0x01000000)
+#define MPI2_TOOLBOX_CLEAN_FLASH (0x00000004)
+#define MPI2_TOOLBOX_CLEAN_SEEPROM (0x00000002)
+#define MPI2_TOOLBOX_CLEAN_NVSRAM (0x00000001)
+
+
+/****************************************************************************
+* Toolbox Memory Move request
+****************************************************************************/
+
+typedef struct _MPI2_TOOLBOX_MEM_MOVE_REQUEST {
+ U8 Tool; /* 0x00 */
+ U8 Reserved1; /* 0x01 */
+ U8 ChainOffset; /* 0x02 */
+ U8 Function; /* 0x03 */
+ U16 Reserved2; /* 0x04 */
+ U8 Reserved3; /* 0x06 */
+ U8 MsgFlags; /* 0x07 */
+ U8 VP_ID; /* 0x08 */
+ U8 VF_ID; /* 0x09 */
+ U16 Reserved4; /* 0x0A */
+ MPI2_SGE_SIMPLE_UNION SGL; /* 0x0C */
+} MPI2_TOOLBOX_MEM_MOVE_REQUEST, MPI2_POINTER PTR_MPI2_TOOLBOX_MEM_MOVE_REQUEST,
+ Mpi2ToolboxMemMoveRequest_t, MPI2_POINTER pMpi2ToolboxMemMoveRequest_t;
+
+
+/****************************************************************************
+* Toolbox Diagnostic Data Upload request
+****************************************************************************/
+
+typedef struct _MPI2_TOOLBOX_DIAG_DATA_UPLOAD_REQUEST {
+ U8 Tool; /* 0x00 */
+ U8 Reserved1; /* 0x01 */
+ U8 ChainOffset; /* 0x02 */
+ U8 Function; /* 0x03 */
+ U16 Reserved2; /* 0x04 */
+ U8 Reserved3; /* 0x06 */
+ U8 MsgFlags; /* 0x07 */
+ U8 VP_ID; /* 0x08 */
+ U8 VF_ID; /* 0x09 */
+ U16 Reserved4; /* 0x0A */
+ U8 SGLFlags; /* 0x0C */
+ U8 Reserved5; /* 0x0D */
+ U16 Reserved6; /* 0x0E */
+ U32 Flags; /* 0x10 */
+ U32 DataLength; /* 0x14 */
+ MPI2_SGE_SIMPLE_UNION SGL; /* 0x18 */
+} MPI2_TOOLBOX_DIAG_DATA_UPLOAD_REQUEST,
+MPI2_POINTER PTR_MPI2_TOOLBOX_DIAG_DATA_UPLOAD_REQUEST,
+Mpi2ToolboxDiagDataUploadRequest_t,
+MPI2_POINTER pMpi2ToolboxDiagDataUploadRequest_t;
+
+/* use MPI2_SGLFLAGS_ defines from mpi2.h for the SGLFlags field */
+
+
+typedef struct _MPI2_DIAG_DATA_UPLOAD_HEADER {
+ U32 DiagDataLength; /* 00h */
+ U8 FormatCode; /* 04h */
+ U8 Reserved1; /* 05h */
+ U16 Reserved2; /* 06h */
+} MPI2_DIAG_DATA_UPLOAD_HEADER, MPI2_POINTER PTR_MPI2_DIAG_DATA_UPLOAD_HEADER,
+Mpi2DiagDataUploadHeader_t, MPI2_POINTER pMpi2DiagDataUploadHeader_t;
+
+
+/****************************************************************************
+* Toolbox ISTWI Read Write Tool
+****************************************************************************/
+
+/* Toolbox ISTWI Read Write Tool request message */
+typedef struct _MPI2_TOOLBOX_ISTWI_READ_WRITE_REQUEST {
+ U8 Tool; /* 0x00 */
+ U8 Reserved1; /* 0x01 */
+ U8 ChainOffset; /* 0x02 */
+ U8 Function; /* 0x03 */
+ U16 Reserved2; /* 0x04 */
+ U8 Reserved3; /* 0x06 */
+ U8 MsgFlags; /* 0x07 */
+ U8 VP_ID; /* 0x08 */
+ U8 VF_ID; /* 0x09 */
+ U16 Reserved4; /* 0x0A */
+ U32 Reserved5; /* 0x0C */
+ U32 Reserved6; /* 0x10 */
+ U8 DevIndex; /* 0x14 */
+ U8 Action; /* 0x15 */
+ U8 SGLFlags; /* 0x16 */
+ U8 Flags; /* 0x17 */
+ U16 TxDataLength; /* 0x18 */
+ U16 RxDataLength; /* 0x1A */
+ U32 Reserved8; /* 0x1C */
+ U32 Reserved9; /* 0x20 */
+ U32 Reserved10; /* 0x24 */
+ U32 Reserved11; /* 0x28 */
+ U32 Reserved12; /* 0x2C */
+ MPI2_SGE_SIMPLE_UNION SGL; /* 0x30 */
+} MPI2_TOOLBOX_ISTWI_READ_WRITE_REQUEST,
+ MPI2_POINTER PTR_MPI2_TOOLBOX_ISTWI_READ_WRITE_REQUEST,
+ Mpi2ToolboxIstwiReadWriteRequest_t,
+ MPI2_POINTER pMpi2ToolboxIstwiReadWriteRequest_t;
+
+/* values for the Action field */
+#define MPI2_TOOL_ISTWI_ACTION_READ_DATA (0x01)
+#define MPI2_TOOL_ISTWI_ACTION_WRITE_DATA (0x02)
+#define MPI2_TOOL_ISTWI_ACTION_SEQUENCE (0x03)
+#define MPI2_TOOL_ISTWI_ACTION_RESERVE_BUS (0x10)
+#define MPI2_TOOL_ISTWI_ACTION_RELEASE_BUS (0x11)
+#define MPI2_TOOL_ISTWI_ACTION_RESET (0x12)
+
+/* use MPI2_SGLFLAGS_ defines from mpi2.h for the SGLFlags field */
+
+/* values for the Flags field */
+#define MPI2_TOOL_ISTWI_FLAG_AUTO_RESERVE_RELEASE (0x80)
+#define MPI2_TOOL_ISTWI_FLAG_PAGE_ADDR_MASK (0x07)
+
+/* Toolbox ISTWI Read Write Tool reply message */
+typedef struct _MPI2_TOOLBOX_ISTWI_REPLY {
+ U8 Tool; /* 0x00 */
+ U8 Reserved1; /* 0x01 */
+ U8 MsgLength; /* 0x02 */
+ U8 Function; /* 0x03 */
+ U16 Reserved2; /* 0x04 */
+ U8 Reserved3; /* 0x06 */
+ U8 MsgFlags; /* 0x07 */
+ U8 VP_ID; /* 0x08 */
+ U8 VF_ID; /* 0x09 */
+ U16 Reserved4; /* 0x0A */
+ U16 Reserved5; /* 0x0C */
+ U16 IOCStatus; /* 0x0E */
+ U32 IOCLogInfo; /* 0x10 */
+ U8 DevIndex; /* 0x14 */
+ U8 Action; /* 0x15 */
+ U8 IstwiStatus; /* 0x16 */
+ U8 Reserved6; /* 0x17 */
+ U16 TxDataCount; /* 0x18 */
+ U16 RxDataCount; /* 0x1A */
+} MPI2_TOOLBOX_ISTWI_REPLY, MPI2_POINTER PTR_MPI2_TOOLBOX_ISTWI_REPLY,
+ Mpi2ToolboxIstwiReply_t, MPI2_POINTER pMpi2ToolboxIstwiReply_t;
+
+
+/****************************************************************************
+* Toolbox Beacon Tool request
+****************************************************************************/
+
+typedef struct _MPI2_TOOLBOX_BEACON_REQUEST
+{
+ U8 Tool; /* 0x00 */
+ U8 Reserved1; /* 0x01 */
+ U8 ChainOffset; /* 0x02 */
+ U8 Function; /* 0x03 */
+ U16 Reserved2; /* 0x04 */
+ U8 Reserved3; /* 0x06 */
+ U8 MsgFlags; /* 0x07 */
+ U8 VP_ID; /* 0x08 */
+ U8 VF_ID; /* 0x09 */
+ U16 Reserved4; /* 0x0A */
+ U8 Reserved5; /* 0x0C */
+ U8 PhysicalPort; /* 0x0D */
+ U8 Reserved6; /* 0x0E */
+ U8 Flags; /* 0x0F */
+} MPI2_TOOLBOX_BEACON_REQUEST, MPI2_POINTER PTR_MPI2_TOOLBOX_BEACON_REQUEST,
+ Mpi2ToolboxBeaconRequest_t, MPI2_POINTER pMpi2ToolboxBeaconRequest_t;
+
+/* values for the Flags field */
+#define MPI2_TOOLBOX_FLAGS_BEACONMODE_OFF (0x00)
+#define MPI2_TOOLBOX_FLAGS_BEACONMODE_ON (0x01)
+
+
+/****************************************************************************
+* Toolbox Diagnostic CLI Tool
+****************************************************************************/
+
+#define MPI2_TOOLBOX_DIAG_CLI_CMD_LENGTH (0x5C)
+
+/* MPI v2.0 Toolbox Diagnostic CLI Tool request message */
+typedef struct _MPI2_TOOLBOX_DIAGNOSTIC_CLI_REQUEST {
+ U8 Tool; /* 0x00 */
+ U8 Reserved1; /* 0x01 */
+ U8 ChainOffset; /* 0x02 */
+ U8 Function; /* 0x03 */
+ U16 Reserved2; /* 0x04 */
+ U8 Reserved3; /* 0x06 */
+ U8 MsgFlags; /* 0x07 */
+ U8 VP_ID; /* 0x08 */
+ U8 VF_ID; /* 0x09 */
+ U16 Reserved4; /* 0x0A */
+ U8 SGLFlags; /* 0x0C */
+ U8 Reserved5; /* 0x0D */
+ U16 Reserved6; /* 0x0E */
+ U32 DataLength; /* 0x10 */
+ U8 DiagnosticCliCommand
+ [MPI2_TOOLBOX_DIAG_CLI_CMD_LENGTH]; /* 0x14 */
+ MPI2_MPI_SGE_IO_UNION SGL; /* 0x70 */
+} MPI2_TOOLBOX_DIAGNOSTIC_CLI_REQUEST,
+ MPI2_POINTER PTR_MPI2_TOOLBOX_DIAGNOSTIC_CLI_REQUEST,
+ Mpi2ToolboxDiagnosticCliRequest_t,
+ MPI2_POINTER pMpi2ToolboxDiagnosticCliRequest_t;
+
+/* use MPI2_SGLFLAGS_ defines from mpi2.h for the SGLFlags field */
+
+
+/* Toolbox Diagnostic CLI Tool reply message */
+typedef struct _MPI2_TOOLBOX_DIAGNOSTIC_CLI_REPLY {
+ U8 Tool; /* 0x00 */
+ U8 Reserved1; /* 0x01 */
+ U8 MsgLength; /* 0x02 */
+ U8 Function; /* 0x03 */
+ U16 Reserved2; /* 0x04 */
+ U8 Reserved3; /* 0x06 */
+ U8 MsgFlags; /* 0x07 */
+ U8 VP_ID; /* 0x08 */
+ U8 VF_ID; /* 0x09 */
+ U16 Reserved4; /* 0x0A */
+ U16 Reserved5; /* 0x0C */
+ U16 IOCStatus; /* 0x0E */
+ U32 IOCLogInfo; /* 0x10 */
+ U32 ReturnedDataLength; /* 0x14 */
+} MPI2_TOOLBOX_DIAGNOSTIC_CLI_REPLY,
+ MPI2_POINTER PTR_MPI2_TOOLBOX_DIAG_CLI_REPLY,
+ Mpi2ToolboxDiagnosticCliReply_t,
+ MPI2_POINTER pMpi2ToolboxDiagnosticCliReply_t;
+
+
+/****************************************************************************
+* Toolbox Console Text Display Tool
+****************************************************************************/
+
+/* Toolbox Console Text Display Tool request message */
+typedef struct _MPI2_TOOLBOX_TEXT_DISPLAY_REQUEST {
+ U8 Tool; /* 0x00 */
+ U8 Reserved1; /* 0x01 */
+ U8 ChainOffset; /* 0x02 */
+ U8 Function; /* 0x03 */
+ U16 Reserved2; /* 0x04 */
+ U8 Reserved3; /* 0x06 */
+ U8 MsgFlags; /* 0x07 */
+ U8 VP_ID; /* 0x08 */
+ U8 VF_ID; /* 0x09 */
+ U16 Reserved4; /* 0x0A */
+ U8 Console; /* 0x0C */
+ U8 Flags; /* 0x0D */
+ U16 Reserved6; /* 0x0E */
+ U8 TextToDisplay[4]; /* 0x10 */
+} MPI2_TOOLBOX_TEXT_DISPLAY_REQUEST,
+MPI2_POINTER PTR_MPI2_TOOLBOX_TEXT_DISPLAY_REQUEST,
+Mpi2ToolboxTextDisplayRequest_t,
+MPI2_POINTER pMpi2ToolboxTextDisplayRequest_t;
+
+/* defines for the Console field */
+#define MPI2_TOOLBOX_CONSOLE_TYPE_MASK (0xF0)
+#define MPI2_TOOLBOX_CONSOLE_TYPE_DEFAULT (0x00)
+#define MPI2_TOOLBOX_CONSOLE_TYPE_UART (0x10)
+#define MPI2_TOOLBOX_CONSOLE_TYPE_ETHERNET (0x20)
+
+#define MPI2_TOOLBOX_CONSOLE_NUMBER_MASK (0x0F)
+
+/* defines for the Flags field */
+#define MPI2_TOOLBOX_CONSOLE_FLAG_TIMESTAMP (0x01)
+
+
+
+/*****************************************************************************
+*
+* Diagnostic Buffer Messages
+*
+*****************************************************************************/
+
+
+/****************************************************************************
+* Diagnostic Buffer Post request
+****************************************************************************/
+
+typedef struct _MPI2_DIAG_BUFFER_POST_REQUEST
+{
+ U8 ExtendedType; /* 0x00 */
+ U8 BufferType; /* 0x01 */
+ U8 ChainOffset; /* 0x02 */
+ U8 Function; /* 0x03 */
+ U16 Reserved2; /* 0x04 */
+ U8 Reserved3; /* 0x06 */
+ U8 MsgFlags; /* 0x07 */
+ U8 VP_ID; /* 0x08 */
+ U8 VF_ID; /* 0x09 */
+ U16 Reserved4; /* 0x0A */
+ U64 BufferAddress; /* 0x0C */
+ U32 BufferLength; /* 0x14 */
+ U32 Reserved5; /* 0x18 */
+ U32 Reserved6; /* 0x1C */
+ U32 Flags; /* 0x20 */
+ U32 ProductSpecific[23]; /* 0x24 */
+} MPI2_DIAG_BUFFER_POST_REQUEST, MPI2_POINTER PTR_MPI2_DIAG_BUFFER_POST_REQUEST,
+ Mpi2DiagBufferPostRequest_t, MPI2_POINTER pMpi2DiagBufferPostRequest_t;
+
+/* values for the ExtendedType field */
+#define MPI2_DIAG_EXTENDED_TYPE_UTILIZATION (0x02)
+
+/* values for the BufferType field */
+#define MPI2_DIAG_BUF_TYPE_TRACE (0x00)
+#define MPI2_DIAG_BUF_TYPE_SNAPSHOT (0x01)
+#define MPI2_DIAG_BUF_TYPE_EXTENDED (0x02)
+/* count of the number of buffer types */
+#define MPI2_DIAG_BUF_TYPE_COUNT (0x03)
+
+/* values for the Flags field */
+#define MPI2_DIAG_BUF_FLAG_RELEASE_ON_FULL (0x00000002)
+#define MPI2_DIAG_BUF_FLAG_IMMEDIATE_RELEASE (0x00000001)
+
+
+/****************************************************************************
+* Diagnostic Buffer Post reply
+****************************************************************************/
+
+typedef struct _MPI2_DIAG_BUFFER_POST_REPLY
+{
+ U8 ExtendedType; /* 0x00 */
+ U8 BufferType; /* 0x01 */
+ U8 MsgLength; /* 0x02 */
+ U8 Function; /* 0x03 */
+ U16 Reserved2; /* 0x04 */
+ U8 Reserved3; /* 0x06 */
+ U8 MsgFlags; /* 0x07 */
+ U8 VP_ID; /* 0x08 */
+ U8 VF_ID; /* 0x09 */
+ U16 Reserved4; /* 0x0A */
+ U16 Reserved5; /* 0x0C */
+ U16 IOCStatus; /* 0x0E */
+ U32 IOCLogInfo; /* 0x10 */
+ U32 TransferLength; /* 0x14 */
+} MPI2_DIAG_BUFFER_POST_REPLY, MPI2_POINTER PTR_MPI2_DIAG_BUFFER_POST_REPLY,
+ Mpi2DiagBufferPostReply_t, MPI2_POINTER pMpi2DiagBufferPostReply_t;
+
+
+/****************************************************************************
+* Diagnostic Release request
+****************************************************************************/
+
+typedef struct _MPI2_DIAG_RELEASE_REQUEST
+{
+ U8 Reserved1; /* 0x00 */
+ U8 BufferType; /* 0x01 */
+ U8 ChainOffset; /* 0x02 */
+ U8 Function; /* 0x03 */
+ U16 Reserved2; /* 0x04 */
+ U8 Reserved3; /* 0x06 */
+ U8 MsgFlags; /* 0x07 */
+ U8 VP_ID; /* 0x08 */
+ U8 VF_ID; /* 0x09 */
+ U16 Reserved4; /* 0x0A */
+} MPI2_DIAG_RELEASE_REQUEST, MPI2_POINTER PTR_MPI2_DIAG_RELEASE_REQUEST,
+ Mpi2DiagReleaseRequest_t, MPI2_POINTER pMpi2DiagReleaseRequest_t;
+
+
+/****************************************************************************
+* Diagnostic Buffer Post reply
+****************************************************************************/
+
+typedef struct _MPI2_DIAG_RELEASE_REPLY
+{
+ U8 Reserved1; /* 0x00 */
+ U8 BufferType; /* 0x01 */
+ U8 MsgLength; /* 0x02 */
+ U8 Function; /* 0x03 */
+ U16 Reserved2; /* 0x04 */
+ U8 Reserved3; /* 0x06 */
+ U8 MsgFlags; /* 0x07 */
+ U8 VP_ID; /* 0x08 */
+ U8 VF_ID; /* 0x09 */
+ U16 Reserved4; /* 0x0A */
+ U16 Reserved5; /* 0x0C */
+ U16 IOCStatus; /* 0x0E */
+ U32 IOCLogInfo; /* 0x10 */
+} MPI2_DIAG_RELEASE_REPLY, MPI2_POINTER PTR_MPI2_DIAG_RELEASE_REPLY,
+ Mpi2DiagReleaseReply_t, MPI2_POINTER pMpi2DiagReleaseReply_t;
+
+
+#endif
+
diff --git a/drivers/scsi/mpt2sas/mpi/mpi2_type.h b/drivers/scsi/mpt2sas/mpi/mpi2_type.h
new file mode 100644
index 000000000..6b0dcdd02
--- /dev/null
+++ b/drivers/scsi/mpt2sas/mpi/mpi2_type.h
@@ -0,0 +1,61 @@
+/*
+ * Copyright (c) 2000-2014 LSI Corporation.
+ *
+ *
+ * Name: mpi2_type.h
+ * Title: MPI basic type definitions
+ * Creation Date: August 16, 2006
+ *
+ * mpi2_type.h Version: 02.00.00
+ *
+ * Version History
+ * ---------------
+ *
+ * Date Version Description
+ * -------- -------- ------------------------------------------------------
+ * 04-30-07 02.00.00 Corresponds to Fusion-MPT MPI Specification Rev A.
+ * --------------------------------------------------------------------------
+ */
+
+#ifndef MPI2_TYPE_H
+#define MPI2_TYPE_H
+
+
+/*******************************************************************************
+ * Define MPI2_POINTER if it hasn't already been defined. By default
+ * MPI2_POINTER is defined to be a near pointer. MPI2_POINTER can be defined as
+ * a far pointer by defining MPI2_POINTER as "far *" before this header file is
+ * included.
+ */
+#ifndef MPI2_POINTER
+#define MPI2_POINTER *
+#endif
+
+/* the basic types may have already been included by mpi_type.h */
+#ifndef MPI_TYPE_H
+/*****************************************************************************
+*
+* Basic Types
+*
+*****************************************************************************/
+
+typedef u8 U8;
+typedef __le16 U16;
+typedef __le32 U32;
+typedef __le64 U64 __attribute__((aligned(4)));
+
+/*****************************************************************************
+*
+* Pointer Types
+*
+*****************************************************************************/
+
+typedef U8 *PU8;
+typedef U16 *PU16;
+typedef U32 *PU32;
+typedef U64 *PU64;
+
+#endif
+
+#endif
+
diff --git a/drivers/scsi/mpt2sas/mpt2sas_base.c b/drivers/scsi/mpt2sas/mpt2sas_base.c
new file mode 100644
index 000000000..11248de92
--- /dev/null
+++ b/drivers/scsi/mpt2sas/mpt2sas_base.c
@@ -0,0 +1,4891 @@
+/*
+ * This is the Fusion MPT base driver providing common API layer interface
+ * for access to MPT (Message Passing Technology) firmware.
+ *
+ * This code is based on drivers/scsi/mpt2sas/mpt2_base.c
+ * Copyright (C) 2007-2014 LSI Corporation
+ * Copyright (C) 20013-2014 Avago Technologies
+ * (mailto: MPT-FusionLinux.pdl@avagotech.com)
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * NO WARRANTY
+ * THE PROGRAM IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OR
+ * CONDITIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED INCLUDING, WITHOUT
+ * LIMITATION, ANY WARRANTIES OR CONDITIONS OF TITLE, NON-INFRINGEMENT,
+ * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE. Each Recipient is
+ * solely responsible for determining the appropriateness of using and
+ * distributing the Program and assumes all risks associated with its
+ * exercise of rights under this Agreement, including but not limited to
+ * the risks and costs of program errors, damage to or loss of data,
+ * programs or equipment, and unavailability or interruption of operations.
+
+ * DISCLAIMER OF LIABILITY
+ * NEITHER RECIPIENT NOR ANY CONTRIBUTORS SHALL HAVE ANY LIABILITY FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING WITHOUT LIMITATION LOST PROFITS), HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR
+ * TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
+ * USE OR DISTRIBUTION OF THE PROGRAM OR THE EXERCISE OF ANY RIGHTS GRANTED
+ * HEREUNDER, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGES
+
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301,
+ * USA.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/errno.h>
+#include <linux/init.h>
+#include <linux/slab.h>
+#include <linux/types.h>
+#include <linux/pci.h>
+#include <linux/kdev_t.h>
+#include <linux/blkdev.h>
+#include <linux/delay.h>
+#include <linux/interrupt.h>
+#include <linux/dma-mapping.h>
+#include <linux/sort.h>
+#include <linux/io.h>
+#include <linux/time.h>
+#include <linux/kthread.h>
+#include <linux/aer.h>
+
+#include "mpt2sas_base.h"
+
+static MPT_CALLBACK mpt_callbacks[MPT_MAX_CALLBACKS];
+
+#define FAULT_POLLING_INTERVAL 1000 /* in milliseconds */
+
+#define MAX_HBA_QUEUE_DEPTH 30000
+#define MAX_CHAIN_DEPTH 100000
+static int max_queue_depth = -1;
+module_param(max_queue_depth, int, 0);
+MODULE_PARM_DESC(max_queue_depth, " max controller queue depth ");
+
+static int max_sgl_entries = -1;
+module_param(max_sgl_entries, int, 0);
+MODULE_PARM_DESC(max_sgl_entries, " max sg entries ");
+
+static int msix_disable = -1;
+module_param(msix_disable, int, 0);
+MODULE_PARM_DESC(msix_disable, " disable msix routed interrupts (default=0)");
+
+static int max_msix_vectors = -1;
+module_param(max_msix_vectors, int, 0);
+MODULE_PARM_DESC(max_msix_vectors, " max msix vectors ");
+
+static int mpt2sas_fwfault_debug;
+MODULE_PARM_DESC(mpt2sas_fwfault_debug, " enable detection of firmware fault "
+ "and halt firmware - (default=0)");
+
+static int disable_discovery = -1;
+module_param(disable_discovery, int, 0);
+MODULE_PARM_DESC(disable_discovery, " disable discovery ");
+
+static int
+_base_get_ioc_facts(struct MPT2SAS_ADAPTER *ioc, int sleep_flag);
+
+static int
+_base_diag_reset(struct MPT2SAS_ADAPTER *ioc, int sleep_flag);
+
+/**
+ * _scsih_set_fwfault_debug - global setting of ioc->fwfault_debug.
+ *
+ */
+static int
+_scsih_set_fwfault_debug(const char *val, struct kernel_param *kp)
+{
+ int ret = param_set_int(val, kp);
+ struct MPT2SAS_ADAPTER *ioc;
+
+ if (ret)
+ return ret;
+
+ printk(KERN_INFO "setting fwfault_debug(%d)\n", mpt2sas_fwfault_debug);
+ list_for_each_entry(ioc, &mpt2sas_ioc_list, list)
+ ioc->fwfault_debug = mpt2sas_fwfault_debug;
+ return 0;
+}
+
+module_param_call(mpt2sas_fwfault_debug, _scsih_set_fwfault_debug,
+ param_get_int, &mpt2sas_fwfault_debug, 0644);
+
+/**
+ * mpt2sas_remove_dead_ioc_func - kthread context to remove dead ioc
+ * @arg: input argument, used to derive ioc
+ *
+ * Return 0 if controller is removed from pci subsystem.
+ * Return -1 for other case.
+ */
+static int mpt2sas_remove_dead_ioc_func(void *arg)
+{
+ struct MPT2SAS_ADAPTER *ioc = (struct MPT2SAS_ADAPTER *)arg;
+ struct pci_dev *pdev;
+
+ if ((ioc == NULL))
+ return -1;
+
+ pdev = ioc->pdev;
+ if ((pdev == NULL))
+ return -1;
+ pci_stop_and_remove_bus_device_locked(pdev);
+ return 0;
+}
+
+
+/**
+ * _base_fault_reset_work - workq handling ioc fault conditions
+ * @work: input argument, used to derive ioc
+ * Context: sleep.
+ *
+ * Return nothing.
+ */
+static void
+_base_fault_reset_work(struct work_struct *work)
+{
+ struct MPT2SAS_ADAPTER *ioc =
+ container_of(work, struct MPT2SAS_ADAPTER, fault_reset_work.work);
+ unsigned long flags;
+ u32 doorbell;
+ int rc;
+ struct task_struct *p;
+
+ spin_lock_irqsave(&ioc->ioc_reset_in_progress_lock, flags);
+ if (ioc->shost_recovery || ioc->pci_error_recovery)
+ goto rearm_timer;
+ spin_unlock_irqrestore(&ioc->ioc_reset_in_progress_lock, flags);
+
+ doorbell = mpt2sas_base_get_iocstate(ioc, 0);
+ if ((doorbell & MPI2_IOC_STATE_MASK) == MPI2_IOC_STATE_MASK) {
+ printk(MPT2SAS_INFO_FMT "%s : SAS host is non-operational !!!!\n",
+ ioc->name, __func__);
+
+ /* It may be possible that EEH recovery can resolve some of
+ * pci bus failure issues rather removing the dead ioc function
+ * by considering controller is in a non-operational state. So
+ * here priority is given to the EEH recovery. If it doesn't
+ * not resolve this issue, mpt2sas driver will consider this
+ * controller to non-operational state and remove the dead ioc
+ * function.
+ */
+ if (ioc->non_operational_loop++ < 5) {
+ spin_lock_irqsave(&ioc->ioc_reset_in_progress_lock,
+ flags);
+ goto rearm_timer;
+ }
+
+ /*
+ * Call _scsih_flush_pending_cmds callback so that we flush all
+ * pending commands back to OS. This call is required to aovid
+ * deadlock at block layer. Dead IOC will fail to do diag reset,
+ * and this call is safe since dead ioc will never return any
+ * command back from HW.
+ */
+ ioc->schedule_dead_ioc_flush_running_cmds(ioc);
+ /*
+ * Set remove_host flag early since kernel thread will
+ * take some time to execute.
+ */
+ ioc->remove_host = 1;
+ /*Remove the Dead Host */
+ p = kthread_run(mpt2sas_remove_dead_ioc_func, ioc,
+ "mpt2sas_dead_ioc_%d", ioc->id);
+ if (IS_ERR(p)) {
+ printk(MPT2SAS_ERR_FMT
+ "%s: Running mpt2sas_dead_ioc thread failed !!!!\n",
+ ioc->name, __func__);
+ } else {
+ printk(MPT2SAS_ERR_FMT
+ "%s: Running mpt2sas_dead_ioc thread success !!!!\n",
+ ioc->name, __func__);
+ }
+
+ return; /* don't rearm timer */
+ }
+
+ ioc->non_operational_loop = 0;
+
+ if ((doorbell & MPI2_IOC_STATE_MASK) == MPI2_IOC_STATE_FAULT) {
+ rc = mpt2sas_base_hard_reset_handler(ioc, CAN_SLEEP,
+ FORCE_BIG_HAMMER);
+ printk(MPT2SAS_WARN_FMT "%s: hard reset: %s\n", ioc->name,
+ __func__, (rc == 0) ? "success" : "failed");
+ doorbell = mpt2sas_base_get_iocstate(ioc, 0);
+ if ((doorbell & MPI2_IOC_STATE_MASK) == MPI2_IOC_STATE_FAULT)
+ mpt2sas_base_fault_info(ioc, doorbell &
+ MPI2_DOORBELL_DATA_MASK);
+ }
+
+ spin_lock_irqsave(&ioc->ioc_reset_in_progress_lock, flags);
+ rearm_timer:
+ if (ioc->fault_reset_work_q)
+ queue_delayed_work(ioc->fault_reset_work_q,
+ &ioc->fault_reset_work,
+ msecs_to_jiffies(FAULT_POLLING_INTERVAL));
+ spin_unlock_irqrestore(&ioc->ioc_reset_in_progress_lock, flags);
+}
+
+/**
+ * mpt2sas_base_start_watchdog - start the fault_reset_work_q
+ * @ioc: per adapter object
+ * Context: sleep.
+ *
+ * Return nothing.
+ */
+void
+mpt2sas_base_start_watchdog(struct MPT2SAS_ADAPTER *ioc)
+{
+ unsigned long flags;
+
+ if (ioc->fault_reset_work_q)
+ return;
+
+ /* initialize fault polling */
+ INIT_DELAYED_WORK(&ioc->fault_reset_work, _base_fault_reset_work);
+ snprintf(ioc->fault_reset_work_q_name,
+ sizeof(ioc->fault_reset_work_q_name), "poll_%d_status", ioc->id);
+ ioc->fault_reset_work_q =
+ create_singlethread_workqueue(ioc->fault_reset_work_q_name);
+ if (!ioc->fault_reset_work_q) {
+ printk(MPT2SAS_ERR_FMT "%s: failed (line=%d)\n",
+ ioc->name, __func__, __LINE__);
+ return;
+ }
+ spin_lock_irqsave(&ioc->ioc_reset_in_progress_lock, flags);
+ if (ioc->fault_reset_work_q)
+ queue_delayed_work(ioc->fault_reset_work_q,
+ &ioc->fault_reset_work,
+ msecs_to_jiffies(FAULT_POLLING_INTERVAL));
+ spin_unlock_irqrestore(&ioc->ioc_reset_in_progress_lock, flags);
+}
+
+/**
+ * mpt2sas_base_stop_watchdog - stop the fault_reset_work_q
+ * @ioc: per adapter object
+ * Context: sleep.
+ *
+ * Return nothing.
+ */
+void
+mpt2sas_base_stop_watchdog(struct MPT2SAS_ADAPTER *ioc)
+{
+ unsigned long flags;
+ struct workqueue_struct *wq;
+
+ spin_lock_irqsave(&ioc->ioc_reset_in_progress_lock, flags);
+ wq = ioc->fault_reset_work_q;
+ ioc->fault_reset_work_q = NULL;
+ spin_unlock_irqrestore(&ioc->ioc_reset_in_progress_lock, flags);
+ if (wq) {
+ if (!cancel_delayed_work_sync(&ioc->fault_reset_work))
+ flush_workqueue(wq);
+ destroy_workqueue(wq);
+ }
+}
+
+/**
+ * mpt2sas_base_fault_info - verbose translation of firmware FAULT code
+ * @ioc: per adapter object
+ * @fault_code: fault code
+ *
+ * Return nothing.
+ */
+void
+mpt2sas_base_fault_info(struct MPT2SAS_ADAPTER *ioc , u16 fault_code)
+{
+ printk(MPT2SAS_ERR_FMT "fault_state(0x%04x)!\n",
+ ioc->name, fault_code);
+}
+
+/**
+ * mpt2sas_halt_firmware - halt's mpt controller firmware
+ * @ioc: per adapter object
+ *
+ * For debugging timeout related issues. Writing 0xCOFFEE00
+ * to the doorbell register will halt controller firmware. With
+ * the purpose to stop both driver and firmware, the enduser can
+ * obtain a ring buffer from controller UART.
+ */
+void
+mpt2sas_halt_firmware(struct MPT2SAS_ADAPTER *ioc)
+{
+ u32 doorbell;
+
+ if (!ioc->fwfault_debug)
+ return;
+
+ dump_stack();
+
+ doorbell = readl(&ioc->chip->Doorbell);
+ if ((doorbell & MPI2_IOC_STATE_MASK) == MPI2_IOC_STATE_FAULT)
+ mpt2sas_base_fault_info(ioc , doorbell);
+ else {
+ writel(0xC0FFEE00, &ioc->chip->Doorbell);
+ printk(MPT2SAS_ERR_FMT "Firmware is halted due to command "
+ "timeout\n", ioc->name);
+ }
+
+ panic("panic in %s\n", __func__);
+}
+
+#ifdef CONFIG_SCSI_MPT2SAS_LOGGING
+/**
+ * _base_sas_ioc_info - verbose translation of the ioc status
+ * @ioc: per adapter object
+ * @mpi_reply: reply mf payload returned from firmware
+ * @request_hdr: request mf
+ *
+ * Return nothing.
+ */
+static void
+_base_sas_ioc_info(struct MPT2SAS_ADAPTER *ioc, MPI2DefaultReply_t *mpi_reply,
+ MPI2RequestHeader_t *request_hdr)
+{
+ u16 ioc_status = le16_to_cpu(mpi_reply->IOCStatus) &
+ MPI2_IOCSTATUS_MASK;
+ char *desc = NULL;
+ u16 frame_sz;
+ char *func_str = NULL;
+
+ /* SCSI_IO, RAID_PASS are handled from _scsih_scsi_ioc_info */
+ if (request_hdr->Function == MPI2_FUNCTION_SCSI_IO_REQUEST ||
+ request_hdr->Function == MPI2_FUNCTION_RAID_SCSI_IO_PASSTHROUGH ||
+ request_hdr->Function == MPI2_FUNCTION_EVENT_NOTIFICATION)
+ return;
+
+ if (ioc_status == MPI2_IOCSTATUS_CONFIG_INVALID_PAGE)
+ return;
+
+ switch (ioc_status) {
+
+/****************************************************************************
+* Common IOCStatus values for all replies
+****************************************************************************/
+
+ case MPI2_IOCSTATUS_INVALID_FUNCTION:
+ desc = "invalid function";
+ break;
+ case MPI2_IOCSTATUS_BUSY:
+ desc = "busy";
+ break;
+ case MPI2_IOCSTATUS_INVALID_SGL:
+ desc = "invalid sgl";
+ break;
+ case MPI2_IOCSTATUS_INTERNAL_ERROR:
+ desc = "internal error";
+ break;
+ case MPI2_IOCSTATUS_INVALID_VPID:
+ desc = "invalid vpid";
+ break;
+ case MPI2_IOCSTATUS_INSUFFICIENT_RESOURCES:
+ desc = "insufficient resources";
+ break;
+ case MPI2_IOCSTATUS_INVALID_FIELD:
+ desc = "invalid field";
+ break;
+ case MPI2_IOCSTATUS_INVALID_STATE:
+ desc = "invalid state";
+ break;
+ case MPI2_IOCSTATUS_OP_STATE_NOT_SUPPORTED:
+ desc = "op state not supported";
+ break;
+
+/****************************************************************************
+* Config IOCStatus values
+****************************************************************************/
+
+ case MPI2_IOCSTATUS_CONFIG_INVALID_ACTION:
+ desc = "config invalid action";
+ break;
+ case MPI2_IOCSTATUS_CONFIG_INVALID_TYPE:
+ desc = "config invalid type";
+ break;
+ case MPI2_IOCSTATUS_CONFIG_INVALID_PAGE:
+ desc = "config invalid page";
+ break;
+ case MPI2_IOCSTATUS_CONFIG_INVALID_DATA:
+ desc = "config invalid data";
+ break;
+ case MPI2_IOCSTATUS_CONFIG_NO_DEFAULTS:
+ desc = "config no defaults";
+ break;
+ case MPI2_IOCSTATUS_CONFIG_CANT_COMMIT:
+ desc = "config cant commit";
+ break;
+
+/****************************************************************************
+* SCSI IO Reply
+****************************************************************************/
+
+ case MPI2_IOCSTATUS_SCSI_RECOVERED_ERROR:
+ case MPI2_IOCSTATUS_SCSI_INVALID_DEVHANDLE:
+ case MPI2_IOCSTATUS_SCSI_DEVICE_NOT_THERE:
+ case MPI2_IOCSTATUS_SCSI_DATA_OVERRUN:
+ case MPI2_IOCSTATUS_SCSI_DATA_UNDERRUN:
+ case MPI2_IOCSTATUS_SCSI_IO_DATA_ERROR:
+ case MPI2_IOCSTATUS_SCSI_PROTOCOL_ERROR:
+ case MPI2_IOCSTATUS_SCSI_TASK_TERMINATED:
+ case MPI2_IOCSTATUS_SCSI_RESIDUAL_MISMATCH:
+ case MPI2_IOCSTATUS_SCSI_TASK_MGMT_FAILED:
+ case MPI2_IOCSTATUS_SCSI_IOC_TERMINATED:
+ case MPI2_IOCSTATUS_SCSI_EXT_TERMINATED:
+ break;
+
+/****************************************************************************
+* For use by SCSI Initiator and SCSI Target end-to-end data protection
+****************************************************************************/
+
+ case MPI2_IOCSTATUS_EEDP_GUARD_ERROR:
+ desc = "eedp guard error";
+ break;
+ case MPI2_IOCSTATUS_EEDP_REF_TAG_ERROR:
+ desc = "eedp ref tag error";
+ break;
+ case MPI2_IOCSTATUS_EEDP_APP_TAG_ERROR:
+ desc = "eedp app tag error";
+ break;
+
+/****************************************************************************
+* SCSI Target values
+****************************************************************************/
+
+ case MPI2_IOCSTATUS_TARGET_INVALID_IO_INDEX:
+ desc = "target invalid io index";
+ break;
+ case MPI2_IOCSTATUS_TARGET_ABORTED:
+ desc = "target aborted";
+ break;
+ case MPI2_IOCSTATUS_TARGET_NO_CONN_RETRYABLE:
+ desc = "target no conn retryable";
+ break;
+ case MPI2_IOCSTATUS_TARGET_NO_CONNECTION:
+ desc = "target no connection";
+ break;
+ case MPI2_IOCSTATUS_TARGET_XFER_COUNT_MISMATCH:
+ desc = "target xfer count mismatch";
+ break;
+ case MPI2_IOCSTATUS_TARGET_DATA_OFFSET_ERROR:
+ desc = "target data offset error";
+ break;
+ case MPI2_IOCSTATUS_TARGET_TOO_MUCH_WRITE_DATA:
+ desc = "target too much write data";
+ break;
+ case MPI2_IOCSTATUS_TARGET_IU_TOO_SHORT:
+ desc = "target iu too short";
+ break;
+ case MPI2_IOCSTATUS_TARGET_ACK_NAK_TIMEOUT:
+ desc = "target ack nak timeout";
+ break;
+ case MPI2_IOCSTATUS_TARGET_NAK_RECEIVED:
+ desc = "target nak received";
+ break;
+
+/****************************************************************************
+* Serial Attached SCSI values
+****************************************************************************/
+
+ case MPI2_IOCSTATUS_SAS_SMP_REQUEST_FAILED:
+ desc = "smp request failed";
+ break;
+ case MPI2_IOCSTATUS_SAS_SMP_DATA_OVERRUN:
+ desc = "smp data overrun";
+ break;
+
+/****************************************************************************
+* Diagnostic Buffer Post / Diagnostic Release values
+****************************************************************************/
+
+ case MPI2_IOCSTATUS_DIAGNOSTIC_RELEASED:
+ desc = "diagnostic released";
+ break;
+ default:
+ break;
+ }
+
+ if (!desc)
+ return;
+
+ switch (request_hdr->Function) {
+ case MPI2_FUNCTION_CONFIG:
+ frame_sz = sizeof(Mpi2ConfigRequest_t) + ioc->sge_size;
+ func_str = "config_page";
+ break;
+ case MPI2_FUNCTION_SCSI_TASK_MGMT:
+ frame_sz = sizeof(Mpi2SCSITaskManagementRequest_t);
+ func_str = "task_mgmt";
+ break;
+ case MPI2_FUNCTION_SAS_IO_UNIT_CONTROL:
+ frame_sz = sizeof(Mpi2SasIoUnitControlRequest_t);
+ func_str = "sas_iounit_ctl";
+ break;
+ case MPI2_FUNCTION_SCSI_ENCLOSURE_PROCESSOR:
+ frame_sz = sizeof(Mpi2SepRequest_t);
+ func_str = "enclosure";
+ break;
+ case MPI2_FUNCTION_IOC_INIT:
+ frame_sz = sizeof(Mpi2IOCInitRequest_t);
+ func_str = "ioc_init";
+ break;
+ case MPI2_FUNCTION_PORT_ENABLE:
+ frame_sz = sizeof(Mpi2PortEnableRequest_t);
+ func_str = "port_enable";
+ break;
+ case MPI2_FUNCTION_SMP_PASSTHROUGH:
+ frame_sz = sizeof(Mpi2SmpPassthroughRequest_t) + ioc->sge_size;
+ func_str = "smp_passthru";
+ break;
+ default:
+ frame_sz = 32;
+ func_str = "unknown";
+ break;
+ }
+
+ printk(MPT2SAS_WARN_FMT "ioc_status: %s(0x%04x), request(0x%p),"
+ " (%s)\n", ioc->name, desc, ioc_status, request_hdr, func_str);
+
+ _debug_dump_mf(request_hdr, frame_sz/4);
+}
+
+/**
+ * _base_display_event_data - verbose translation of firmware asyn events
+ * @ioc: per adapter object
+ * @mpi_reply: reply mf payload returned from firmware
+ *
+ * Return nothing.
+ */
+static void
+_base_display_event_data(struct MPT2SAS_ADAPTER *ioc,
+ Mpi2EventNotificationReply_t *mpi_reply)
+{
+ char *desc = NULL;
+ u16 event;
+
+ if (!(ioc->logging_level & MPT_DEBUG_EVENTS))
+ return;
+
+ event = le16_to_cpu(mpi_reply->Event);
+
+ switch (event) {
+ case MPI2_EVENT_LOG_DATA:
+ desc = "Log Data";
+ break;
+ case MPI2_EVENT_STATE_CHANGE:
+ desc = "Status Change";
+ break;
+ case MPI2_EVENT_HARD_RESET_RECEIVED:
+ desc = "Hard Reset Received";
+ break;
+ case MPI2_EVENT_EVENT_CHANGE:
+ desc = "Event Change";
+ break;
+ case MPI2_EVENT_SAS_DEVICE_STATUS_CHANGE:
+ desc = "Device Status Change";
+ break;
+ case MPI2_EVENT_IR_OPERATION_STATUS:
+ if (!ioc->hide_ir_msg)
+ desc = "IR Operation Status";
+ break;
+ case MPI2_EVENT_SAS_DISCOVERY:
+ {
+ Mpi2EventDataSasDiscovery_t *event_data =
+ (Mpi2EventDataSasDiscovery_t *)mpi_reply->EventData;
+ printk(MPT2SAS_INFO_FMT "Discovery: (%s)", ioc->name,
+ (event_data->ReasonCode == MPI2_EVENT_SAS_DISC_RC_STARTED) ?
+ "start" : "stop");
+ if (event_data->DiscoveryStatus)
+ printk("discovery_status(0x%08x)",
+ le32_to_cpu(event_data->DiscoveryStatus));
+ printk("\n");
+ return;
+ }
+ case MPI2_EVENT_SAS_BROADCAST_PRIMITIVE:
+ desc = "SAS Broadcast Primitive";
+ break;
+ case MPI2_EVENT_SAS_INIT_DEVICE_STATUS_CHANGE:
+ desc = "SAS Init Device Status Change";
+ break;
+ case MPI2_EVENT_SAS_INIT_TABLE_OVERFLOW:
+ desc = "SAS Init Table Overflow";
+ break;
+ case MPI2_EVENT_SAS_TOPOLOGY_CHANGE_LIST:
+ desc = "SAS Topology Change List";
+ break;
+ case MPI2_EVENT_SAS_ENCL_DEVICE_STATUS_CHANGE:
+ desc = "SAS Enclosure Device Status Change";
+ break;
+ case MPI2_EVENT_IR_VOLUME:
+ if (!ioc->hide_ir_msg)
+ desc = "IR Volume";
+ break;
+ case MPI2_EVENT_IR_PHYSICAL_DISK:
+ if (!ioc->hide_ir_msg)
+ desc = "IR Physical Disk";
+ break;
+ case MPI2_EVENT_IR_CONFIGURATION_CHANGE_LIST:
+ if (!ioc->hide_ir_msg)
+ desc = "IR Configuration Change List";
+ break;
+ case MPI2_EVENT_LOG_ENTRY_ADDED:
+ if (!ioc->hide_ir_msg)
+ desc = "Log Entry Added";
+ break;
+ case MPI2_EVENT_TEMP_THRESHOLD:
+ desc = "Temperature Threshold";
+ break;
+ }
+
+ if (!desc)
+ return;
+
+ printk(MPT2SAS_INFO_FMT "%s\n", ioc->name, desc);
+}
+#endif
+
+/**
+ * _base_sas_log_info - verbose translation of firmware log info
+ * @ioc: per adapter object
+ * @log_info: log info
+ *
+ * Return nothing.
+ */
+static void
+_base_sas_log_info(struct MPT2SAS_ADAPTER *ioc , u32 log_info)
+{
+ union loginfo_type {
+ u32 loginfo;
+ struct {
+ u32 subcode:16;
+ u32 code:8;
+ u32 originator:4;
+ u32 bus_type:4;
+ } dw;
+ };
+ union loginfo_type sas_loginfo;
+ char *originator_str = NULL;
+
+ sas_loginfo.loginfo = log_info;
+ if (sas_loginfo.dw.bus_type != 3 /*SAS*/)
+ return;
+
+ /* each nexus loss loginfo */
+ if (log_info == 0x31170000)
+ return;
+
+ /* eat the loginfos associated with task aborts */
+ if (ioc->ignore_loginfos && (log_info == 0x30050000 || log_info ==
+ 0x31140000 || log_info == 0x31130000))
+ return;
+
+ switch (sas_loginfo.dw.originator) {
+ case 0:
+ originator_str = "IOP";
+ break;
+ case 1:
+ originator_str = "PL";
+ break;
+ case 2:
+ if (!ioc->hide_ir_msg)
+ originator_str = "IR";
+ else
+ originator_str = "WarpDrive";
+ break;
+ }
+
+ printk(MPT2SAS_WARN_FMT "log_info(0x%08x): originator(%s), "
+ "code(0x%02x), sub_code(0x%04x)\n", ioc->name, log_info,
+ originator_str, sas_loginfo.dw.code,
+ sas_loginfo.dw.subcode);
+}
+
+/**
+ * _base_display_reply_info -
+ * @ioc: per adapter object
+ * @smid: system request message index
+ * @msix_index: MSIX table index supplied by the OS
+ * @reply: reply message frame(lower 32bit addr)
+ *
+ * Return nothing.
+ */
+static void
+_base_display_reply_info(struct MPT2SAS_ADAPTER *ioc, u16 smid, u8 msix_index,
+ u32 reply)
+{
+ MPI2DefaultReply_t *mpi_reply;
+ u16 ioc_status;
+
+ mpi_reply = mpt2sas_base_get_reply_virt_addr(ioc, reply);
+ if (unlikely(!mpi_reply)) {
+ printk(MPT2SAS_ERR_FMT "mpi_reply not valid at %s:%d/%s()!\n",
+ ioc->name, __FILE__, __LINE__, __func__);
+ return;
+ }
+ ioc_status = le16_to_cpu(mpi_reply->IOCStatus);
+#ifdef CONFIG_SCSI_MPT2SAS_LOGGING
+ if ((ioc_status & MPI2_IOCSTATUS_MASK) &&
+ (ioc->logging_level & MPT_DEBUG_REPLY)) {
+ _base_sas_ioc_info(ioc , mpi_reply,
+ mpt2sas_base_get_msg_frame(ioc, smid));
+ }
+#endif
+ if (ioc_status & MPI2_IOCSTATUS_FLAG_LOG_INFO_AVAILABLE)
+ _base_sas_log_info(ioc, le32_to_cpu(mpi_reply->IOCLogInfo));
+}
+
+/**
+ * mpt2sas_base_done - base internal command completion routine
+ * @ioc: per adapter object
+ * @smid: system request message index
+ * @msix_index: MSIX table index supplied by the OS
+ * @reply: reply message frame(lower 32bit addr)
+ *
+ * Return 1 meaning mf should be freed from _base_interrupt
+ * 0 means the mf is freed from this function.
+ */
+u8
+mpt2sas_base_done(struct MPT2SAS_ADAPTER *ioc, u16 smid, u8 msix_index,
+ u32 reply)
+{
+ MPI2DefaultReply_t *mpi_reply;
+
+ mpi_reply = mpt2sas_base_get_reply_virt_addr(ioc, reply);
+ if (mpi_reply && mpi_reply->Function == MPI2_FUNCTION_EVENT_ACK)
+ return 1;
+
+ if (ioc->base_cmds.status == MPT2_CMD_NOT_USED)
+ return 1;
+
+ ioc->base_cmds.status |= MPT2_CMD_COMPLETE;
+ if (mpi_reply) {
+ ioc->base_cmds.status |= MPT2_CMD_REPLY_VALID;
+ memcpy(ioc->base_cmds.reply, mpi_reply, mpi_reply->MsgLength*4);
+ }
+ ioc->base_cmds.status &= ~MPT2_CMD_PENDING;
+
+ complete(&ioc->base_cmds.done);
+ return 1;
+}
+
+/**
+ * _base_async_event - main callback handler for firmware asyn events
+ * @ioc: per adapter object
+ * @msix_index: MSIX table index supplied by the OS
+ * @reply: reply message frame(lower 32bit addr)
+ *
+ * Returns void.
+ */
+static void
+_base_async_event(struct MPT2SAS_ADAPTER *ioc, u8 msix_index, u32 reply)
+{
+ Mpi2EventNotificationReply_t *mpi_reply;
+ Mpi2EventAckRequest_t *ack_request;
+ u16 smid;
+
+ mpi_reply = mpt2sas_base_get_reply_virt_addr(ioc, reply);
+ if (!mpi_reply)
+ return;
+ if (mpi_reply->Function != MPI2_FUNCTION_EVENT_NOTIFICATION)
+ return;
+#ifdef CONFIG_SCSI_MPT2SAS_LOGGING
+ _base_display_event_data(ioc, mpi_reply);
+#endif
+ if (!(mpi_reply->AckRequired & MPI2_EVENT_NOTIFICATION_ACK_REQUIRED))
+ goto out;
+ smid = mpt2sas_base_get_smid(ioc, ioc->base_cb_idx);
+ if (!smid) {
+ printk(MPT2SAS_ERR_FMT "%s: failed obtaining a smid\n",
+ ioc->name, __func__);
+ goto out;
+ }
+
+ ack_request = mpt2sas_base_get_msg_frame(ioc, smid);
+ memset(ack_request, 0, sizeof(Mpi2EventAckRequest_t));
+ ack_request->Function = MPI2_FUNCTION_EVENT_ACK;
+ ack_request->Event = mpi_reply->Event;
+ ack_request->EventContext = mpi_reply->EventContext;
+ ack_request->VF_ID = 0; /* TODO */
+ ack_request->VP_ID = 0;
+ mpt2sas_base_put_smid_default(ioc, smid);
+
+ out:
+
+ /* scsih callback handler */
+ mpt2sas_scsih_event_callback(ioc, msix_index, reply);
+
+ /* ctl callback handler */
+ mpt2sas_ctl_event_callback(ioc, msix_index, reply);
+
+ return;
+}
+
+/**
+ * _base_get_cb_idx - obtain the callback index
+ * @ioc: per adapter object
+ * @smid: system request message index
+ *
+ * Return callback index.
+ */
+static u8
+_base_get_cb_idx(struct MPT2SAS_ADAPTER *ioc, u16 smid)
+{
+ int i;
+ u8 cb_idx;
+
+ if (smid < ioc->hi_priority_smid) {
+ i = smid - 1;
+ cb_idx = ioc->scsi_lookup[i].cb_idx;
+ } else if (smid < ioc->internal_smid) {
+ i = smid - ioc->hi_priority_smid;
+ cb_idx = ioc->hpr_lookup[i].cb_idx;
+ } else if (smid <= ioc->hba_queue_depth) {
+ i = smid - ioc->internal_smid;
+ cb_idx = ioc->internal_lookup[i].cb_idx;
+ } else
+ cb_idx = 0xFF;
+ return cb_idx;
+}
+
+/**
+ * _base_mask_interrupts - disable interrupts
+ * @ioc: per adapter object
+ *
+ * Disabling ResetIRQ, Reply and Doorbell Interrupts
+ *
+ * Return nothing.
+ */
+static void
+_base_mask_interrupts(struct MPT2SAS_ADAPTER *ioc)
+{
+ u32 him_register;
+
+ ioc->mask_interrupts = 1;
+ him_register = readl(&ioc->chip->HostInterruptMask);
+ him_register |= MPI2_HIM_DIM + MPI2_HIM_RIM + MPI2_HIM_RESET_IRQ_MASK;
+ writel(him_register, &ioc->chip->HostInterruptMask);
+ readl(&ioc->chip->HostInterruptMask);
+}
+
+/**
+ * _base_unmask_interrupts - enable interrupts
+ * @ioc: per adapter object
+ *
+ * Enabling only Reply Interrupts
+ *
+ * Return nothing.
+ */
+static void
+_base_unmask_interrupts(struct MPT2SAS_ADAPTER *ioc)
+{
+ u32 him_register;
+
+ him_register = readl(&ioc->chip->HostInterruptMask);
+ him_register &= ~MPI2_HIM_RIM;
+ writel(him_register, &ioc->chip->HostInterruptMask);
+ ioc->mask_interrupts = 0;
+}
+
+union reply_descriptor {
+ u64 word;
+ struct {
+ u32 low;
+ u32 high;
+ } u;
+};
+
+/**
+ * _base_interrupt - MPT adapter (IOC) specific interrupt handler.
+ * @irq: irq number (not used)
+ * @bus_id: bus identifier cookie == pointer to MPT_ADAPTER structure
+ * @r: pt_regs pointer (not used)
+ *
+ * Return IRQ_HANDLE if processed, else IRQ_NONE.
+ */
+static irqreturn_t
+_base_interrupt(int irq, void *bus_id)
+{
+ struct adapter_reply_queue *reply_q = bus_id;
+ union reply_descriptor rd;
+ u32 completed_cmds;
+ u8 request_desript_type;
+ u16 smid;
+ u8 cb_idx;
+ u32 reply;
+ u8 msix_index = reply_q->msix_index;
+ struct MPT2SAS_ADAPTER *ioc = reply_q->ioc;
+ Mpi2ReplyDescriptorsUnion_t *rpf;
+ u8 rc;
+
+ if (ioc->mask_interrupts)
+ return IRQ_NONE;
+
+ if (!atomic_add_unless(&reply_q->busy, 1, 1))
+ return IRQ_NONE;
+
+ rpf = &reply_q->reply_post_free[reply_q->reply_post_host_index];
+ request_desript_type = rpf->Default.ReplyFlags
+ & MPI2_RPY_DESCRIPT_FLAGS_TYPE_MASK;
+ if (request_desript_type == MPI2_RPY_DESCRIPT_FLAGS_UNUSED) {
+ atomic_dec(&reply_q->busy);
+ return IRQ_NONE;
+ }
+
+ completed_cmds = 0;
+ cb_idx = 0xFF;
+ do {
+ rd.word = le64_to_cpu(rpf->Words);
+ if (rd.u.low == UINT_MAX || rd.u.high == UINT_MAX)
+ goto out;
+ reply = 0;
+ smid = le16_to_cpu(rpf->Default.DescriptorTypeDependent1);
+ if (request_desript_type ==
+ MPI2_RPY_DESCRIPT_FLAGS_ADDRESS_REPLY) {
+ reply = le32_to_cpu
+ (rpf->AddressReply.ReplyFrameAddress);
+ if (reply > ioc->reply_dma_max_address ||
+ reply < ioc->reply_dma_min_address)
+ reply = 0;
+ } else if (request_desript_type ==
+ MPI2_RPY_DESCRIPT_FLAGS_TARGET_COMMAND_BUFFER)
+ goto next;
+ else if (request_desript_type ==
+ MPI2_RPY_DESCRIPT_FLAGS_TARGETASSIST_SUCCESS)
+ goto next;
+ if (smid) {
+ cb_idx = _base_get_cb_idx(ioc, smid);
+ if ((likely(cb_idx < MPT_MAX_CALLBACKS))
+ && (likely(mpt_callbacks[cb_idx] != NULL))) {
+ rc = mpt_callbacks[cb_idx](ioc, smid,
+ msix_index, reply);
+ if (reply)
+ _base_display_reply_info(ioc, smid,
+ msix_index, reply);
+ if (rc)
+ mpt2sas_base_free_smid(ioc, smid);
+ }
+ }
+ if (!smid)
+ _base_async_event(ioc, msix_index, reply);
+
+ /* reply free queue handling */
+ if (reply) {
+ ioc->reply_free_host_index =
+ (ioc->reply_free_host_index ==
+ (ioc->reply_free_queue_depth - 1)) ?
+ 0 : ioc->reply_free_host_index + 1;
+ ioc->reply_free[ioc->reply_free_host_index] =
+ cpu_to_le32(reply);
+ wmb();
+ writel(ioc->reply_free_host_index,
+ &ioc->chip->ReplyFreeHostIndex);
+ }
+
+ next:
+
+ rpf->Words = cpu_to_le64(ULLONG_MAX);
+ reply_q->reply_post_host_index =
+ (reply_q->reply_post_host_index ==
+ (ioc->reply_post_queue_depth - 1)) ? 0 :
+ reply_q->reply_post_host_index + 1;
+ request_desript_type =
+ reply_q->reply_post_free[reply_q->reply_post_host_index].
+ Default.ReplyFlags & MPI2_RPY_DESCRIPT_FLAGS_TYPE_MASK;
+ completed_cmds++;
+ if (request_desript_type == MPI2_RPY_DESCRIPT_FLAGS_UNUSED)
+ goto out;
+ if (!reply_q->reply_post_host_index)
+ rpf = reply_q->reply_post_free;
+ else
+ rpf++;
+ } while (1);
+
+ out:
+
+ if (!completed_cmds) {
+ atomic_dec(&reply_q->busy);
+ return IRQ_NONE;
+ }
+ wmb();
+ if (ioc->is_warpdrive) {
+ writel(reply_q->reply_post_host_index,
+ ioc->reply_post_host_index[msix_index]);
+ atomic_dec(&reply_q->busy);
+ return IRQ_HANDLED;
+ }
+ writel(reply_q->reply_post_host_index | (msix_index <<
+ MPI2_RPHI_MSIX_INDEX_SHIFT), &ioc->chip->ReplyPostHostIndex);
+ atomic_dec(&reply_q->busy);
+ return IRQ_HANDLED;
+}
+
+/**
+ * _base_is_controller_msix_enabled - is controller support muli-reply queues
+ * @ioc: per adapter object
+ *
+ */
+static inline int
+_base_is_controller_msix_enabled(struct MPT2SAS_ADAPTER *ioc)
+{
+ return (ioc->facts.IOCCapabilities &
+ MPI2_IOCFACTS_CAPABILITY_MSI_X_INDEX) && ioc->msix_enable;
+}
+
+/**
+ * mpt2sas_base_flush_reply_queues - flushing the MSIX reply queues
+ * @ioc: per adapter object
+ * Context: ISR conext
+ *
+ * Called when a Task Management request has completed. We want
+ * to flush the other reply queues so all the outstanding IO has been
+ * completed back to OS before we process the TM completetion.
+ *
+ * Return nothing.
+ */
+void
+mpt2sas_base_flush_reply_queues(struct MPT2SAS_ADAPTER *ioc)
+{
+ struct adapter_reply_queue *reply_q;
+
+ /* If MSIX capability is turned off
+ * then multi-queues are not enabled
+ */
+ if (!_base_is_controller_msix_enabled(ioc))
+ return;
+
+ list_for_each_entry(reply_q, &ioc->reply_queue_list, list) {
+ if (ioc->shost_recovery)
+ return;
+ /* TMs are on msix_index == 0 */
+ if (reply_q->msix_index == 0)
+ continue;
+ _base_interrupt(reply_q->vector, (void *)reply_q);
+ }
+}
+
+/**
+ * mpt2sas_base_release_callback_handler - clear interrupt callback handler
+ * @cb_idx: callback index
+ *
+ * Return nothing.
+ */
+void
+mpt2sas_base_release_callback_handler(u8 cb_idx)
+{
+ mpt_callbacks[cb_idx] = NULL;
+}
+
+/**
+ * mpt2sas_base_register_callback_handler - obtain index for the interrupt callback handler
+ * @cb_func: callback function
+ *
+ * Returns cb_func.
+ */
+u8
+mpt2sas_base_register_callback_handler(MPT_CALLBACK cb_func)
+{
+ u8 cb_idx;
+
+ for (cb_idx = MPT_MAX_CALLBACKS-1; cb_idx; cb_idx--)
+ if (mpt_callbacks[cb_idx] == NULL)
+ break;
+
+ mpt_callbacks[cb_idx] = cb_func;
+ return cb_idx;
+}
+
+/**
+ * mpt2sas_base_initialize_callback_handler - initialize the interrupt callback handler
+ *
+ * Return nothing.
+ */
+void
+mpt2sas_base_initialize_callback_handler(void)
+{
+ u8 cb_idx;
+
+ for (cb_idx = 0; cb_idx < MPT_MAX_CALLBACKS; cb_idx++)
+ mpt2sas_base_release_callback_handler(cb_idx);
+}
+
+/**
+ * mpt2sas_base_build_zero_len_sge - build zero length sg entry
+ * @ioc: per adapter object
+ * @paddr: virtual address for SGE
+ *
+ * Create a zero length scatter gather entry to insure the IOCs hardware has
+ * something to use if the target device goes brain dead and tries
+ * to send data even when none is asked for.
+ *
+ * Return nothing.
+ */
+void
+mpt2sas_base_build_zero_len_sge(struct MPT2SAS_ADAPTER *ioc, void *paddr)
+{
+ u32 flags_length = (u32)((MPI2_SGE_FLAGS_LAST_ELEMENT |
+ MPI2_SGE_FLAGS_END_OF_BUFFER | MPI2_SGE_FLAGS_END_OF_LIST |
+ MPI2_SGE_FLAGS_SIMPLE_ELEMENT) <<
+ MPI2_SGE_FLAGS_SHIFT);
+ ioc->base_add_sg_single(paddr, flags_length, -1);
+}
+
+/**
+ * _base_add_sg_single_32 - Place a simple 32 bit SGE at address pAddr.
+ * @paddr: virtual address for SGE
+ * @flags_length: SGE flags and data transfer length
+ * @dma_addr: Physical address
+ *
+ * Return nothing.
+ */
+static void
+_base_add_sg_single_32(void *paddr, u32 flags_length, dma_addr_t dma_addr)
+{
+ Mpi2SGESimple32_t *sgel = paddr;
+
+ flags_length |= (MPI2_SGE_FLAGS_32_BIT_ADDRESSING |
+ MPI2_SGE_FLAGS_SYSTEM_ADDRESS) << MPI2_SGE_FLAGS_SHIFT;
+ sgel->FlagsLength = cpu_to_le32(flags_length);
+ sgel->Address = cpu_to_le32(dma_addr);
+}
+
+
+/**
+ * _base_add_sg_single_64 - Place a simple 64 bit SGE at address pAddr.
+ * @paddr: virtual address for SGE
+ * @flags_length: SGE flags and data transfer length
+ * @dma_addr: Physical address
+ *
+ * Return nothing.
+ */
+static void
+_base_add_sg_single_64(void *paddr, u32 flags_length, dma_addr_t dma_addr)
+{
+ Mpi2SGESimple64_t *sgel = paddr;
+
+ flags_length |= (MPI2_SGE_FLAGS_64_BIT_ADDRESSING |
+ MPI2_SGE_FLAGS_SYSTEM_ADDRESS) << MPI2_SGE_FLAGS_SHIFT;
+ sgel->FlagsLength = cpu_to_le32(flags_length);
+ sgel->Address = cpu_to_le64(dma_addr);
+}
+
+#define convert_to_kb(x) ((x) << (PAGE_SHIFT - 10))
+
+/**
+ * _base_config_dma_addressing - set dma addressing
+ * @ioc: per adapter object
+ * @pdev: PCI device struct
+ *
+ * Returns 0 for success, non-zero for failure.
+ */
+static int
+_base_config_dma_addressing(struct MPT2SAS_ADAPTER *ioc, struct pci_dev *pdev)
+{
+ struct sysinfo s;
+ u64 consistent_dma_mask;
+
+ if (ioc->dma_mask)
+ consistent_dma_mask = DMA_BIT_MASK(64);
+ else
+ consistent_dma_mask = DMA_BIT_MASK(32);
+
+ if (sizeof(dma_addr_t) > 4) {
+ const uint64_t required_mask =
+ dma_get_required_mask(&pdev->dev);
+ if ((required_mask > DMA_BIT_MASK(32)) &&
+ !pci_set_dma_mask(pdev, DMA_BIT_MASK(64)) &&
+ !pci_set_consistent_dma_mask(pdev, consistent_dma_mask)) {
+ ioc->base_add_sg_single = &_base_add_sg_single_64;
+ ioc->sge_size = sizeof(Mpi2SGESimple64_t);
+ ioc->dma_mask = 64;
+ goto out;
+ }
+ }
+
+ if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(32))
+ && !pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32))) {
+ ioc->base_add_sg_single = &_base_add_sg_single_32;
+ ioc->sge_size = sizeof(Mpi2SGESimple32_t);
+ ioc->dma_mask = 32;
+ } else
+ return -ENODEV;
+
+ out:
+ si_meminfo(&s);
+ printk(MPT2SAS_INFO_FMT
+ "%d BIT PCI BUS DMA ADDRESSING SUPPORTED, total mem (%ld kB)\n",
+ ioc->name, ioc->dma_mask, convert_to_kb(s.totalram));
+
+ return 0;
+}
+
+static int
+_base_change_consistent_dma_mask(struct MPT2SAS_ADAPTER *ioc,
+ struct pci_dev *pdev)
+{
+ if (pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64))) {
+ if (pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32)))
+ return -ENODEV;
+ }
+ return 0;
+}
+/**
+ * _base_check_enable_msix - checks MSIX capabable.
+ * @ioc: per adapter object
+ *
+ * Check to see if card is capable of MSIX, and set number
+ * of available msix vectors
+ */
+static int
+_base_check_enable_msix(struct MPT2SAS_ADAPTER *ioc)
+{
+ int base;
+ u16 message_control;
+
+
+ /* Check whether controller SAS2008 B0 controller,
+ if it is SAS2008 B0 controller use IO-APIC instead of MSIX */
+ if (ioc->pdev->device == MPI2_MFGPAGE_DEVID_SAS2008 &&
+ ioc->pdev->revision == 0x01) {
+ return -EINVAL;
+ }
+
+ base = pci_find_capability(ioc->pdev, PCI_CAP_ID_MSIX);
+ if (!base) {
+ dfailprintk(ioc, printk(MPT2SAS_INFO_FMT "msix not "
+ "supported\n", ioc->name));
+ return -EINVAL;
+ }
+
+ /* get msix vector count */
+ /* NUMA_IO not supported for older controllers */
+ if (ioc->pdev->device == MPI2_MFGPAGE_DEVID_SAS2004 ||
+ ioc->pdev->device == MPI2_MFGPAGE_DEVID_SAS2008 ||
+ ioc->pdev->device == MPI2_MFGPAGE_DEVID_SAS2108_1 ||
+ ioc->pdev->device == MPI2_MFGPAGE_DEVID_SAS2108_2 ||
+ ioc->pdev->device == MPI2_MFGPAGE_DEVID_SAS2108_3 ||
+ ioc->pdev->device == MPI2_MFGPAGE_DEVID_SAS2116_1 ||
+ ioc->pdev->device == MPI2_MFGPAGE_DEVID_SAS2116_2)
+ ioc->msix_vector_count = 1;
+ else {
+ pci_read_config_word(ioc->pdev, base + 2, &message_control);
+ ioc->msix_vector_count = (message_control & 0x3FF) + 1;
+ }
+ dinitprintk(ioc, printk(MPT2SAS_INFO_FMT "msix is supported, "
+ "vector_count(%d)\n", ioc->name, ioc->msix_vector_count));
+
+ return 0;
+}
+
+/**
+ * _base_free_irq - free irq
+ * @ioc: per adapter object
+ *
+ * Freeing respective reply_queue from the list.
+ */
+static void
+_base_free_irq(struct MPT2SAS_ADAPTER *ioc)
+{
+ struct adapter_reply_queue *reply_q, *next;
+
+ if (list_empty(&ioc->reply_queue_list))
+ return;
+
+ list_for_each_entry_safe(reply_q, next, &ioc->reply_queue_list, list) {
+ list_del(&reply_q->list);
+ irq_set_affinity_hint(reply_q->vector, NULL);
+ free_cpumask_var(reply_q->affinity_hint);
+ synchronize_irq(reply_q->vector);
+ free_irq(reply_q->vector, reply_q);
+ kfree(reply_q);
+ }
+}
+
+/**
+ * _base_request_irq - request irq
+ * @ioc: per adapter object
+ * @index: msix index into vector table
+ * @vector: irq vector
+ *
+ * Inserting respective reply_queue into the list.
+ */
+static int
+_base_request_irq(struct MPT2SAS_ADAPTER *ioc, u8 index, u32 vector)
+{
+ struct adapter_reply_queue *reply_q;
+ int r;
+
+ reply_q = kzalloc(sizeof(struct adapter_reply_queue), GFP_KERNEL);
+ if (!reply_q) {
+ printk(MPT2SAS_ERR_FMT "unable to allocate memory %d!\n",
+ ioc->name, (int)sizeof(struct adapter_reply_queue));
+ return -ENOMEM;
+ }
+ reply_q->ioc = ioc;
+ reply_q->msix_index = index;
+ reply_q->vector = vector;
+
+ if (!alloc_cpumask_var(&reply_q->affinity_hint, GFP_KERNEL))
+ return -ENOMEM;
+ cpumask_clear(reply_q->affinity_hint);
+
+ atomic_set(&reply_q->busy, 0);
+ if (ioc->msix_enable)
+ snprintf(reply_q->name, MPT_NAME_LENGTH, "%s%d-msix%d",
+ MPT2SAS_DRIVER_NAME, ioc->id, index);
+ else
+ snprintf(reply_q->name, MPT_NAME_LENGTH, "%s%d",
+ MPT2SAS_DRIVER_NAME, ioc->id);
+ r = request_irq(vector, _base_interrupt, IRQF_SHARED, reply_q->name,
+ reply_q);
+ if (r) {
+ printk(MPT2SAS_ERR_FMT "unable to allocate interrupt %d!\n",
+ reply_q->name, vector);
+ kfree(reply_q);
+ return -EBUSY;
+ }
+
+ INIT_LIST_HEAD(&reply_q->list);
+ list_add_tail(&reply_q->list, &ioc->reply_queue_list);
+ return 0;
+}
+
+/**
+ * _base_assign_reply_queues - assigning msix index for each cpu
+ * @ioc: per adapter object
+ *
+ * The enduser would need to set the affinity via /proc/irq/#/smp_affinity
+ *
+ * It would nice if we could call irq_set_affinity, however it is not
+ * an exported symbol
+ */
+static void
+_base_assign_reply_queues(struct MPT2SAS_ADAPTER *ioc)
+{
+ unsigned int cpu, nr_cpus, nr_msix, index = 0;
+ struct adapter_reply_queue *reply_q;
+
+ if (!_base_is_controller_msix_enabled(ioc))
+ return;
+
+ memset(ioc->cpu_msix_table, 0, ioc->cpu_msix_table_sz);
+
+ nr_cpus = num_online_cpus();
+ nr_msix = ioc->reply_queue_count = min(ioc->reply_queue_count,
+ ioc->facts.MaxMSIxVectors);
+ if (!nr_msix)
+ return;
+
+ cpu = cpumask_first(cpu_online_mask);
+
+ list_for_each_entry(reply_q, &ioc->reply_queue_list, list) {
+
+ unsigned int i, group = nr_cpus / nr_msix;
+
+ if (cpu >= nr_cpus)
+ break;
+
+ if (index < nr_cpus % nr_msix)
+ group++;
+
+ for (i = 0 ; i < group ; i++) {
+ ioc->cpu_msix_table[cpu] = index;
+ cpumask_or(reply_q->affinity_hint,
+ reply_q->affinity_hint, get_cpu_mask(cpu));
+ cpu = cpumask_next(cpu, cpu_online_mask);
+ }
+
+ if (irq_set_affinity_hint(reply_q->vector,
+ reply_q->affinity_hint))
+ dinitprintk(ioc, pr_info(MPT2SAS_FMT
+ "error setting affinity hint for irq vector %d\n",
+ ioc->name, reply_q->vector));
+ index++;
+ }
+}
+
+/**
+ * _base_disable_msix - disables msix
+ * @ioc: per adapter object
+ *
+ */
+static void
+_base_disable_msix(struct MPT2SAS_ADAPTER *ioc)
+{
+ if (ioc->msix_enable) {
+ pci_disable_msix(ioc->pdev);
+ ioc->msix_enable = 0;
+ }
+}
+
+/**
+ * _base_enable_msix - enables msix, failback to io_apic
+ * @ioc: per adapter object
+ *
+ */
+static int
+_base_enable_msix(struct MPT2SAS_ADAPTER *ioc)
+{
+ struct msix_entry *entries, *a;
+ int r;
+ int i;
+ u8 try_msix = 0;
+
+ if (msix_disable == -1 || msix_disable == 0)
+ try_msix = 1;
+
+ if (!try_msix)
+ goto try_ioapic;
+
+ if (_base_check_enable_msix(ioc) != 0)
+ goto try_ioapic;
+
+ ioc->reply_queue_count = min_t(int, ioc->cpu_count,
+ ioc->msix_vector_count);
+
+ if (!ioc->rdpq_array_enable && max_msix_vectors == -1)
+ max_msix_vectors = 8;
+
+ if (max_msix_vectors > 0) {
+ ioc->reply_queue_count = min_t(int, max_msix_vectors,
+ ioc->reply_queue_count);
+ ioc->msix_vector_count = ioc->reply_queue_count;
+ } else if (max_msix_vectors == 0)
+ goto try_ioapic;
+
+ printk(MPT2SAS_INFO_FMT
+ "MSI-X vectors supported: %d, no of cores: %d, max_msix_vectors: %d\n",
+ ioc->name, ioc->msix_vector_count, ioc->cpu_count, max_msix_vectors);
+
+ entries = kcalloc(ioc->reply_queue_count, sizeof(struct msix_entry),
+ GFP_KERNEL);
+ if (!entries) {
+ dfailprintk(ioc, printk(MPT2SAS_INFO_FMT "kcalloc "
+ "failed @ at %s:%d/%s() !!!\n", ioc->name, __FILE__,
+ __LINE__, __func__));
+ goto try_ioapic;
+ }
+
+ for (i = 0, a = entries; i < ioc->reply_queue_count; i++, a++)
+ a->entry = i;
+
+ r = pci_enable_msix_exact(ioc->pdev, entries, ioc->reply_queue_count);
+ if (r) {
+ dfailprintk(ioc, printk(MPT2SAS_INFO_FMT
+ "pci_enable_msix_exact failed (r=%d) !!!\n", ioc->name, r));
+ kfree(entries);
+ goto try_ioapic;
+ }
+
+ ioc->msix_enable = 1;
+ for (i = 0, a = entries; i < ioc->reply_queue_count; i++, a++) {
+ r = _base_request_irq(ioc, i, a->vector);
+ if (r) {
+ _base_free_irq(ioc);
+ _base_disable_msix(ioc);
+ kfree(entries);
+ goto try_ioapic;
+ }
+ }
+
+ kfree(entries);
+ return 0;
+
+/* failback to io_apic interrupt routing */
+ try_ioapic:
+
+ ioc->reply_queue_count = 1;
+ r = _base_request_irq(ioc, 0, ioc->pdev->irq);
+
+ return r;
+}
+
+/**
+ * mpt2sas_base_map_resources - map in controller resources (io/irq/memap)
+ * @ioc: per adapter object
+ *
+ * Returns 0 for success, non-zero for failure.
+ */
+int
+mpt2sas_base_map_resources(struct MPT2SAS_ADAPTER *ioc)
+{
+ struct pci_dev *pdev = ioc->pdev;
+ u32 memap_sz;
+ u32 pio_sz;
+ int i, r = 0;
+ u64 pio_chip = 0;
+ u64 chip_phys = 0;
+ struct adapter_reply_queue *reply_q;
+
+ dinitprintk(ioc, printk(MPT2SAS_INFO_FMT "%s\n",
+ ioc->name, __func__));
+
+ ioc->bars = pci_select_bars(pdev, IORESOURCE_MEM);
+ if (pci_enable_device_mem(pdev)) {
+ printk(MPT2SAS_WARN_FMT "pci_enable_device_mem: "
+ "failed\n", ioc->name);
+ ioc->bars = 0;
+ return -ENODEV;
+ }
+
+
+ if (pci_request_selected_regions(pdev, ioc->bars,
+ MPT2SAS_DRIVER_NAME)) {
+ printk(MPT2SAS_WARN_FMT "pci_request_selected_regions: "
+ "failed\n", ioc->name);
+ ioc->bars = 0;
+ r = -ENODEV;
+ goto out_fail;
+ }
+
+ /* AER (Advanced Error Reporting) hooks */
+ pci_enable_pcie_error_reporting(pdev);
+
+ pci_set_master(pdev);
+
+ if (_base_config_dma_addressing(ioc, pdev) != 0) {
+ printk(MPT2SAS_WARN_FMT "no suitable DMA mask for %s\n",
+ ioc->name, pci_name(pdev));
+ r = -ENODEV;
+ goto out_fail;
+ }
+
+ for (i = 0, memap_sz = 0, pio_sz = 0 ; i < DEVICE_COUNT_RESOURCE; i++) {
+ if (pci_resource_flags(pdev, i) & IORESOURCE_IO) {
+ if (pio_sz)
+ continue;
+ pio_chip = (u64)pci_resource_start(pdev, i);
+ pio_sz = pci_resource_len(pdev, i);
+ } else {
+ if (memap_sz)
+ continue;
+ /* verify memory resource is valid before using */
+ if (pci_resource_flags(pdev, i) & IORESOURCE_MEM) {
+ ioc->chip_phys = pci_resource_start(pdev, i);
+ chip_phys = (u64)ioc->chip_phys;
+ memap_sz = pci_resource_len(pdev, i);
+ ioc->chip = ioremap(ioc->chip_phys, memap_sz);
+ if (ioc->chip == NULL) {
+ printk(MPT2SAS_ERR_FMT "unable to map "
+ "adapter memory!\n", ioc->name);
+ r = -EINVAL;
+ goto out_fail;
+ }
+ }
+ }
+ }
+
+ _base_mask_interrupts(ioc);
+
+ r = _base_get_ioc_facts(ioc, CAN_SLEEP);
+ if (r)
+ goto out_fail;
+
+ if (!ioc->rdpq_array_enable_assigned) {
+ ioc->rdpq_array_enable = ioc->rdpq_array_capable;
+ ioc->rdpq_array_enable_assigned = 1;
+ }
+
+ r = _base_enable_msix(ioc);
+ if (r)
+ goto out_fail;
+
+ list_for_each_entry(reply_q, &ioc->reply_queue_list, list)
+ printk(MPT2SAS_INFO_FMT "%s: IRQ %d\n",
+ reply_q->name, ((ioc->msix_enable) ? "PCI-MSI-X enabled" :
+ "IO-APIC enabled"), reply_q->vector);
+
+ printk(MPT2SAS_INFO_FMT "iomem(0x%016llx), mapped(0x%p), size(%d)\n",
+ ioc->name, (unsigned long long)chip_phys, ioc->chip, memap_sz);
+ printk(MPT2SAS_INFO_FMT "ioport(0x%016llx), size(%d)\n",
+ ioc->name, (unsigned long long)pio_chip, pio_sz);
+
+ /* Save PCI configuration state for recovery from PCI AER/EEH errors */
+ pci_save_state(pdev);
+
+ return 0;
+
+ out_fail:
+ if (ioc->chip_phys)
+ iounmap(ioc->chip);
+ ioc->chip_phys = 0;
+ pci_release_selected_regions(ioc->pdev, ioc->bars);
+ pci_disable_pcie_error_reporting(pdev);
+ pci_disable_device(pdev);
+ return r;
+}
+
+/**
+ * mpt2sas_base_get_msg_frame - obtain request mf pointer
+ * @ioc: per adapter object
+ * @smid: system request message index(smid zero is invalid)
+ *
+ * Returns virt pointer to message frame.
+ */
+void *
+mpt2sas_base_get_msg_frame(struct MPT2SAS_ADAPTER *ioc, u16 smid)
+{
+ return (void *)(ioc->request + (smid * ioc->request_sz));
+}
+
+/**
+ * mpt2sas_base_get_sense_buffer - obtain a sense buffer assigned to a mf request
+ * @ioc: per adapter object
+ * @smid: system request message index
+ *
+ * Returns virt pointer to sense buffer.
+ */
+void *
+mpt2sas_base_get_sense_buffer(struct MPT2SAS_ADAPTER *ioc, u16 smid)
+{
+ return (void *)(ioc->sense + ((smid - 1) * SCSI_SENSE_BUFFERSIZE));
+}
+
+/**
+ * mpt2sas_base_get_sense_buffer_dma - obtain a sense buffer assigned to a mf request
+ * @ioc: per adapter object
+ * @smid: system request message index
+ *
+ * Returns phys pointer to the low 32bit address of the sense buffer.
+ */
+__le32
+mpt2sas_base_get_sense_buffer_dma(struct MPT2SAS_ADAPTER *ioc, u16 smid)
+{
+ return cpu_to_le32(ioc->sense_dma +
+ ((smid - 1) * SCSI_SENSE_BUFFERSIZE));
+}
+
+/**
+ * mpt2sas_base_get_reply_virt_addr - obtain reply frames virt address
+ * @ioc: per adapter object
+ * @phys_addr: lower 32 physical addr of the reply
+ *
+ * Converts 32bit lower physical addr into a virt address.
+ */
+void *
+mpt2sas_base_get_reply_virt_addr(struct MPT2SAS_ADAPTER *ioc, u32 phys_addr)
+{
+ if (!phys_addr)
+ return NULL;
+ return ioc->reply + (phys_addr - (u32)ioc->reply_dma);
+}
+
+/**
+ * mpt2sas_base_get_smid - obtain a free smid from internal queue
+ * @ioc: per adapter object
+ * @cb_idx: callback index
+ *
+ * Returns smid (zero is invalid)
+ */
+u16
+mpt2sas_base_get_smid(struct MPT2SAS_ADAPTER *ioc, u8 cb_idx)
+{
+ unsigned long flags;
+ struct request_tracker *request;
+ u16 smid;
+
+ spin_lock_irqsave(&ioc->scsi_lookup_lock, flags);
+ if (list_empty(&ioc->internal_free_list)) {
+ spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags);
+ printk(MPT2SAS_ERR_FMT "%s: smid not available\n",
+ ioc->name, __func__);
+ return 0;
+ }
+
+ request = list_entry(ioc->internal_free_list.next,
+ struct request_tracker, tracker_list);
+ request->cb_idx = cb_idx;
+ smid = request->smid;
+ list_del(&request->tracker_list);
+ spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags);
+ return smid;
+}
+
+/**
+ * mpt2sas_base_get_smid_scsiio - obtain a free smid from scsiio queue
+ * @ioc: per adapter object
+ * @cb_idx: callback index
+ * @scmd: pointer to scsi command object
+ *
+ * Returns smid (zero is invalid)
+ */
+u16
+mpt2sas_base_get_smid_scsiio(struct MPT2SAS_ADAPTER *ioc, u8 cb_idx,
+ struct scsi_cmnd *scmd)
+{
+ unsigned long flags;
+ struct scsiio_tracker *request;
+ u16 smid;
+
+ spin_lock_irqsave(&ioc->scsi_lookup_lock, flags);
+ if (list_empty(&ioc->free_list)) {
+ spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags);
+ printk(MPT2SAS_ERR_FMT "%s: smid not available\n",
+ ioc->name, __func__);
+ return 0;
+ }
+
+ request = list_entry(ioc->free_list.next,
+ struct scsiio_tracker, tracker_list);
+ request->scmd = scmd;
+ request->cb_idx = cb_idx;
+ smid = request->smid;
+ list_del(&request->tracker_list);
+ spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags);
+ return smid;
+}
+
+/**
+ * mpt2sas_base_get_smid_hpr - obtain a free smid from hi-priority queue
+ * @ioc: per adapter object
+ * @cb_idx: callback index
+ *
+ * Returns smid (zero is invalid)
+ */
+u16
+mpt2sas_base_get_smid_hpr(struct MPT2SAS_ADAPTER *ioc, u8 cb_idx)
+{
+ unsigned long flags;
+ struct request_tracker *request;
+ u16 smid;
+
+ spin_lock_irqsave(&ioc->scsi_lookup_lock, flags);
+ if (list_empty(&ioc->hpr_free_list)) {
+ spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags);
+ return 0;
+ }
+
+ request = list_entry(ioc->hpr_free_list.next,
+ struct request_tracker, tracker_list);
+ request->cb_idx = cb_idx;
+ smid = request->smid;
+ list_del(&request->tracker_list);
+ spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags);
+ return smid;
+}
+
+
+/**
+ * mpt2sas_base_free_smid - put smid back on free_list
+ * @ioc: per adapter object
+ * @smid: system request message index
+ *
+ * Return nothing.
+ */
+void
+mpt2sas_base_free_smid(struct MPT2SAS_ADAPTER *ioc, u16 smid)
+{
+ unsigned long flags;
+ int i;
+ struct chain_tracker *chain_req, *next;
+
+ spin_lock_irqsave(&ioc->scsi_lookup_lock, flags);
+ if (smid < ioc->hi_priority_smid) {
+ /* scsiio queue */
+ i = smid - 1;
+ if (!list_empty(&ioc->scsi_lookup[i].chain_list)) {
+ list_for_each_entry_safe(chain_req, next,
+ &ioc->scsi_lookup[i].chain_list, tracker_list) {
+ list_del_init(&chain_req->tracker_list);
+ list_add(&chain_req->tracker_list,
+ &ioc->free_chain_list);
+ }
+ }
+ ioc->scsi_lookup[i].cb_idx = 0xFF;
+ ioc->scsi_lookup[i].scmd = NULL;
+ ioc->scsi_lookup[i].direct_io = 0;
+ list_add(&ioc->scsi_lookup[i].tracker_list,
+ &ioc->free_list);
+ spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags);
+
+ /*
+ * See _wait_for_commands_to_complete() call with regards
+ * to this code.
+ */
+ if (ioc->shost_recovery && ioc->pending_io_count) {
+ if (ioc->pending_io_count == 1)
+ wake_up(&ioc->reset_wq);
+ ioc->pending_io_count--;
+ }
+ return;
+ } else if (smid < ioc->internal_smid) {
+ /* hi-priority */
+ i = smid - ioc->hi_priority_smid;
+ ioc->hpr_lookup[i].cb_idx = 0xFF;
+ list_add(&ioc->hpr_lookup[i].tracker_list,
+ &ioc->hpr_free_list);
+ } else if (smid <= ioc->hba_queue_depth) {
+ /* internal queue */
+ i = smid - ioc->internal_smid;
+ ioc->internal_lookup[i].cb_idx = 0xFF;
+ list_add(&ioc->internal_lookup[i].tracker_list,
+ &ioc->internal_free_list);
+ }
+ spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags);
+}
+
+/**
+ * _base_writeq - 64 bit write to MMIO
+ * @ioc: per adapter object
+ * @b: data payload
+ * @addr: address in MMIO space
+ * @writeq_lock: spin lock
+ *
+ * Glue for handling an atomic 64 bit word to MMIO. This special handling takes
+ * care of 32 bit environment where its not quarenteed to send the entire word
+ * in one transfer.
+ */
+#ifndef writeq
+static inline void _base_writeq(__u64 b, volatile void __iomem *addr,
+ spinlock_t *writeq_lock)
+{
+ unsigned long flags;
+ __u64 data_out = cpu_to_le64(b);
+
+ spin_lock_irqsave(writeq_lock, flags);
+ writel((u32)(data_out), addr);
+ writel((u32)(data_out >> 32), (addr + 4));
+ spin_unlock_irqrestore(writeq_lock, flags);
+}
+#else
+static inline void _base_writeq(__u64 b, volatile void __iomem *addr,
+ spinlock_t *writeq_lock)
+{
+ writeq(cpu_to_le64(b), addr);
+}
+#endif
+
+static inline u8
+_base_get_msix_index(struct MPT2SAS_ADAPTER *ioc)
+{
+ return ioc->cpu_msix_table[raw_smp_processor_id()];
+}
+
+/**
+ * mpt2sas_base_put_smid_scsi_io - send SCSI_IO request to firmware
+ * @ioc: per adapter object
+ * @smid: system request message index
+ * @handle: device handle
+ *
+ * Return nothing.
+ */
+void
+mpt2sas_base_put_smid_scsi_io(struct MPT2SAS_ADAPTER *ioc, u16 smid, u16 handle)
+{
+ Mpi2RequestDescriptorUnion_t descriptor;
+ u64 *request = (u64 *)&descriptor;
+
+
+ descriptor.SCSIIO.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_SCSI_IO;
+ descriptor.SCSIIO.MSIxIndex = _base_get_msix_index(ioc);
+ descriptor.SCSIIO.SMID = cpu_to_le16(smid);
+ descriptor.SCSIIO.DevHandle = cpu_to_le16(handle);
+ descriptor.SCSIIO.LMID = 0;
+ _base_writeq(*request, &ioc->chip->RequestDescriptorPostLow,
+ &ioc->scsi_lookup_lock);
+}
+
+
+/**
+ * mpt2sas_base_put_smid_hi_priority - send Task Management request to firmware
+ * @ioc: per adapter object
+ * @smid: system request message index
+ *
+ * Return nothing.
+ */
+void
+mpt2sas_base_put_smid_hi_priority(struct MPT2SAS_ADAPTER *ioc, u16 smid)
+{
+ Mpi2RequestDescriptorUnion_t descriptor;
+ u64 *request = (u64 *)&descriptor;
+
+ descriptor.HighPriority.RequestFlags =
+ MPI2_REQ_DESCRIPT_FLAGS_HIGH_PRIORITY;
+ descriptor.HighPriority.MSIxIndex = 0;
+ descriptor.HighPriority.SMID = cpu_to_le16(smid);
+ descriptor.HighPriority.LMID = 0;
+ descriptor.HighPriority.Reserved1 = 0;
+ _base_writeq(*request, &ioc->chip->RequestDescriptorPostLow,
+ &ioc->scsi_lookup_lock);
+}
+
+/**
+ * mpt2sas_base_put_smid_default - Default, primarily used for config pages
+ * @ioc: per adapter object
+ * @smid: system request message index
+ *
+ * Return nothing.
+ */
+void
+mpt2sas_base_put_smid_default(struct MPT2SAS_ADAPTER *ioc, u16 smid)
+{
+ Mpi2RequestDescriptorUnion_t descriptor;
+ u64 *request = (u64 *)&descriptor;
+
+ descriptor.Default.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_DEFAULT_TYPE;
+ descriptor.Default.MSIxIndex = _base_get_msix_index(ioc);
+ descriptor.Default.SMID = cpu_to_le16(smid);
+ descriptor.Default.LMID = 0;
+ descriptor.Default.DescriptorTypeDependent = 0;
+ _base_writeq(*request, &ioc->chip->RequestDescriptorPostLow,
+ &ioc->scsi_lookup_lock);
+}
+
+/**
+ * mpt2sas_base_put_smid_target_assist - send Target Assist/Status to firmware
+ * @ioc: per adapter object
+ * @smid: system request message index
+ * @io_index: value used to track the IO
+ *
+ * Return nothing.
+ */
+void
+mpt2sas_base_put_smid_target_assist(struct MPT2SAS_ADAPTER *ioc, u16 smid,
+ u16 io_index)
+{
+ Mpi2RequestDescriptorUnion_t descriptor;
+ u64 *request = (u64 *)&descriptor;
+
+ descriptor.SCSITarget.RequestFlags =
+ MPI2_REQ_DESCRIPT_FLAGS_SCSI_TARGET;
+ descriptor.SCSITarget.MSIxIndex = _base_get_msix_index(ioc);
+ descriptor.SCSITarget.SMID = cpu_to_le16(smid);
+ descriptor.SCSITarget.LMID = 0;
+ descriptor.SCSITarget.IoIndex = cpu_to_le16(io_index);
+ _base_writeq(*request, &ioc->chip->RequestDescriptorPostLow,
+ &ioc->scsi_lookup_lock);
+}
+
+/**
+ * _base_display_dell_branding - Disply branding string
+ * @ioc: per adapter object
+ *
+ * Return nothing.
+ */
+static void
+_base_display_dell_branding(struct MPT2SAS_ADAPTER *ioc)
+{
+ char dell_branding[MPT2SAS_DELL_BRANDING_SIZE];
+
+ if (ioc->pdev->subsystem_vendor != PCI_VENDOR_ID_DELL)
+ return;
+
+ memset(dell_branding, 0, MPT2SAS_DELL_BRANDING_SIZE);
+ switch (ioc->pdev->subsystem_device) {
+ case MPT2SAS_DELL_6GBPS_SAS_HBA_SSDID:
+ strncpy(dell_branding, MPT2SAS_DELL_6GBPS_SAS_HBA_BRANDING,
+ MPT2SAS_DELL_BRANDING_SIZE - 1);
+ break;
+ case MPT2SAS_DELL_PERC_H200_ADAPTER_SSDID:
+ strncpy(dell_branding, MPT2SAS_DELL_PERC_H200_ADAPTER_BRANDING,
+ MPT2SAS_DELL_BRANDING_SIZE - 1);
+ break;
+ case MPT2SAS_DELL_PERC_H200_INTEGRATED_SSDID:
+ strncpy(dell_branding,
+ MPT2SAS_DELL_PERC_H200_INTEGRATED_BRANDING,
+ MPT2SAS_DELL_BRANDING_SIZE - 1);
+ break;
+ case MPT2SAS_DELL_PERC_H200_MODULAR_SSDID:
+ strncpy(dell_branding,
+ MPT2SAS_DELL_PERC_H200_MODULAR_BRANDING,
+ MPT2SAS_DELL_BRANDING_SIZE - 1);
+ break;
+ case MPT2SAS_DELL_PERC_H200_EMBEDDED_SSDID:
+ strncpy(dell_branding,
+ MPT2SAS_DELL_PERC_H200_EMBEDDED_BRANDING,
+ MPT2SAS_DELL_BRANDING_SIZE - 1);
+ break;
+ case MPT2SAS_DELL_PERC_H200_SSDID:
+ strncpy(dell_branding, MPT2SAS_DELL_PERC_H200_BRANDING,
+ MPT2SAS_DELL_BRANDING_SIZE - 1);
+ break;
+ case MPT2SAS_DELL_6GBPS_SAS_SSDID:
+ strncpy(dell_branding, MPT2SAS_DELL_6GBPS_SAS_BRANDING,
+ MPT2SAS_DELL_BRANDING_SIZE - 1);
+ break;
+ default:
+ sprintf(dell_branding, "0x%4X", ioc->pdev->subsystem_device);
+ break;
+ }
+
+ printk(MPT2SAS_INFO_FMT "%s: Vendor(0x%04X), Device(0x%04X),"
+ " SSVID(0x%04X), SSDID(0x%04X)\n", ioc->name, dell_branding,
+ ioc->pdev->vendor, ioc->pdev->device, ioc->pdev->subsystem_vendor,
+ ioc->pdev->subsystem_device);
+}
+
+/**
+ * _base_display_intel_branding - Display branding string
+ * @ioc: per adapter object
+ *
+ * Return nothing.
+ */
+static void
+_base_display_intel_branding(struct MPT2SAS_ADAPTER *ioc)
+{
+ if (ioc->pdev->subsystem_vendor != PCI_VENDOR_ID_INTEL)
+ return;
+
+ switch (ioc->pdev->device) {
+ case MPI2_MFGPAGE_DEVID_SAS2008:
+ switch (ioc->pdev->subsystem_device) {
+ case MPT2SAS_INTEL_RMS2LL080_SSDID:
+ printk(MPT2SAS_INFO_FMT "%s\n", ioc->name,
+ MPT2SAS_INTEL_RMS2LL080_BRANDING);
+ break;
+ case MPT2SAS_INTEL_RMS2LL040_SSDID:
+ printk(MPT2SAS_INFO_FMT "%s\n", ioc->name,
+ MPT2SAS_INTEL_RMS2LL040_BRANDING);
+ break;
+ case MPT2SAS_INTEL_SSD910_SSDID:
+ printk(MPT2SAS_INFO_FMT "%s\n", ioc->name,
+ MPT2SAS_INTEL_SSD910_BRANDING);
+ break;
+ default:
+ break;
+ }
+ case MPI2_MFGPAGE_DEVID_SAS2308_2:
+ switch (ioc->pdev->subsystem_device) {
+ case MPT2SAS_INTEL_RS25GB008_SSDID:
+ printk(MPT2SAS_INFO_FMT "%s\n", ioc->name,
+ MPT2SAS_INTEL_RS25GB008_BRANDING);
+ break;
+ case MPT2SAS_INTEL_RMS25JB080_SSDID:
+ printk(MPT2SAS_INFO_FMT "%s\n", ioc->name,
+ MPT2SAS_INTEL_RMS25JB080_BRANDING);
+ break;
+ case MPT2SAS_INTEL_RMS25JB040_SSDID:
+ printk(MPT2SAS_INFO_FMT "%s\n", ioc->name,
+ MPT2SAS_INTEL_RMS25JB040_BRANDING);
+ break;
+ case MPT2SAS_INTEL_RMS25KB080_SSDID:
+ printk(MPT2SAS_INFO_FMT "%s\n", ioc->name,
+ MPT2SAS_INTEL_RMS25KB080_BRANDING);
+ break;
+ case MPT2SAS_INTEL_RMS25KB040_SSDID:
+ printk(MPT2SAS_INFO_FMT "%s\n", ioc->name,
+ MPT2SAS_INTEL_RMS25KB040_BRANDING);
+ break;
+ case MPT2SAS_INTEL_RMS25LB040_SSDID:
+ printk(MPT2SAS_INFO_FMT "%s\n", ioc->name,
+ MPT2SAS_INTEL_RMS25LB040_BRANDING);
+ break;
+ case MPT2SAS_INTEL_RMS25LB080_SSDID:
+ printk(MPT2SAS_INFO_FMT "%s\n", ioc->name,
+ MPT2SAS_INTEL_RMS25LB080_BRANDING);
+ break;
+ default:
+ break;
+ }
+ default:
+ break;
+ }
+}
+
+/**
+ * _base_display_hp_branding - Display branding string
+ * @ioc: per adapter object
+ *
+ * Return nothing.
+ */
+static void
+_base_display_hp_branding(struct MPT2SAS_ADAPTER *ioc)
+{
+ if (ioc->pdev->subsystem_vendor != MPT2SAS_HP_3PAR_SSVID)
+ return;
+
+ switch (ioc->pdev->device) {
+ case MPI2_MFGPAGE_DEVID_SAS2004:
+ switch (ioc->pdev->subsystem_device) {
+ case MPT2SAS_HP_DAUGHTER_2_4_INTERNAL_SSDID:
+ printk(MPT2SAS_INFO_FMT "%s\n", ioc->name,
+ MPT2SAS_HP_DAUGHTER_2_4_INTERNAL_BRANDING);
+ break;
+ default:
+ break;
+ }
+ case MPI2_MFGPAGE_DEVID_SAS2308_2:
+ switch (ioc->pdev->subsystem_device) {
+ case MPT2SAS_HP_2_4_INTERNAL_SSDID:
+ printk(MPT2SAS_INFO_FMT "%s\n", ioc->name,
+ MPT2SAS_HP_2_4_INTERNAL_BRANDING);
+ break;
+ case MPT2SAS_HP_2_4_EXTERNAL_SSDID:
+ printk(MPT2SAS_INFO_FMT "%s\n", ioc->name,
+ MPT2SAS_HP_2_4_EXTERNAL_BRANDING);
+ break;
+ case MPT2SAS_HP_1_4_INTERNAL_1_4_EXTERNAL_SSDID:
+ printk(MPT2SAS_INFO_FMT "%s\n", ioc->name,
+ MPT2SAS_HP_1_4_INTERNAL_1_4_EXTERNAL_BRANDING);
+ break;
+ case MPT2SAS_HP_EMBEDDED_2_4_INTERNAL_SSDID:
+ printk(MPT2SAS_INFO_FMT "%s\n", ioc->name,
+ MPT2SAS_HP_EMBEDDED_2_4_INTERNAL_BRANDING);
+ break;
+ default:
+ break;
+ }
+ default:
+ break;
+ }
+}
+
+/**
+ * _base_display_ioc_capabilities - Disply IOC's capabilities.
+ * @ioc: per adapter object
+ *
+ * Return nothing.
+ */
+static void
+_base_display_ioc_capabilities(struct MPT2SAS_ADAPTER *ioc)
+{
+ int i = 0;
+ char desc[16];
+ u32 iounit_pg1_flags;
+ u32 bios_version;
+
+ bios_version = le32_to_cpu(ioc->bios_pg3.BiosVersion);
+ strncpy(desc, ioc->manu_pg0.ChipName, 16);
+ printk(MPT2SAS_INFO_FMT "%s: FWVersion(%02d.%02d.%02d.%02d), "
+ "ChipRevision(0x%02x), BiosVersion(%02d.%02d.%02d.%02d)\n",
+ ioc->name, desc,
+ (ioc->facts.FWVersion.Word & 0xFF000000) >> 24,
+ (ioc->facts.FWVersion.Word & 0x00FF0000) >> 16,
+ (ioc->facts.FWVersion.Word & 0x0000FF00) >> 8,
+ ioc->facts.FWVersion.Word & 0x000000FF,
+ ioc->pdev->revision,
+ (bios_version & 0xFF000000) >> 24,
+ (bios_version & 0x00FF0000) >> 16,
+ (bios_version & 0x0000FF00) >> 8,
+ bios_version & 0x000000FF);
+
+ _base_display_dell_branding(ioc);
+ _base_display_intel_branding(ioc);
+ _base_display_hp_branding(ioc);
+
+ printk(MPT2SAS_INFO_FMT "Protocol=(", ioc->name);
+
+ if (ioc->facts.ProtocolFlags & MPI2_IOCFACTS_PROTOCOL_SCSI_INITIATOR) {
+ printk("Initiator");
+ i++;
+ }
+
+ if (ioc->facts.ProtocolFlags & MPI2_IOCFACTS_PROTOCOL_SCSI_TARGET) {
+ printk("%sTarget", i ? "," : "");
+ i++;
+ }
+
+ i = 0;
+ printk("), ");
+ printk("Capabilities=(");
+
+ if (!ioc->hide_ir_msg) {
+ if (ioc->facts.IOCCapabilities &
+ MPI2_IOCFACTS_CAPABILITY_INTEGRATED_RAID) {
+ printk("Raid");
+ i++;
+ }
+ }
+
+ if (ioc->facts.IOCCapabilities & MPI2_IOCFACTS_CAPABILITY_TLR) {
+ printk("%sTLR", i ? "," : "");
+ i++;
+ }
+
+ if (ioc->facts.IOCCapabilities & MPI2_IOCFACTS_CAPABILITY_MULTICAST) {
+ printk("%sMulticast", i ? "," : "");
+ i++;
+ }
+
+ if (ioc->facts.IOCCapabilities &
+ MPI2_IOCFACTS_CAPABILITY_BIDIRECTIONAL_TARGET) {
+ printk("%sBIDI Target", i ? "," : "");
+ i++;
+ }
+
+ if (ioc->facts.IOCCapabilities & MPI2_IOCFACTS_CAPABILITY_EEDP) {
+ printk("%sEEDP", i ? "," : "");
+ i++;
+ }
+
+ if (ioc->facts.IOCCapabilities &
+ MPI2_IOCFACTS_CAPABILITY_SNAPSHOT_BUFFER) {
+ printk("%sSnapshot Buffer", i ? "," : "");
+ i++;
+ }
+
+ if (ioc->facts.IOCCapabilities &
+ MPI2_IOCFACTS_CAPABILITY_DIAG_TRACE_BUFFER) {
+ printk("%sDiag Trace Buffer", i ? "," : "");
+ i++;
+ }
+
+ if (ioc->facts.IOCCapabilities &
+ MPI2_IOCFACTS_CAPABILITY_EXTENDED_BUFFER) {
+ printk(KERN_INFO "%sDiag Extended Buffer", i ? "," : "");
+ i++;
+ }
+
+ if (ioc->facts.IOCCapabilities &
+ MPI2_IOCFACTS_CAPABILITY_TASK_SET_FULL_HANDLING) {
+ printk("%sTask Set Full", i ? "," : "");
+ i++;
+ }
+
+ iounit_pg1_flags = le32_to_cpu(ioc->iounit_pg1.Flags);
+ if (!(iounit_pg1_flags & MPI2_IOUNITPAGE1_NATIVE_COMMAND_Q_DISABLE)) {
+ printk("%sNCQ", i ? "," : "");
+ i++;
+ }
+
+ printk(")\n");
+}
+
+/**
+ * mpt2sas_base_update_missing_delay - change the missing delay timers
+ * @ioc: per adapter object
+ * @device_missing_delay: amount of time till device is reported missing
+ * @io_missing_delay: interval IO is returned when there is a missing device
+ *
+ * Return nothing.
+ *
+ * Passed on the command line, this function will modify the device missing
+ * delay, as well as the io missing delay. This should be called at driver
+ * load time.
+ */
+void
+mpt2sas_base_update_missing_delay(struct MPT2SAS_ADAPTER *ioc,
+ u16 device_missing_delay, u8 io_missing_delay)
+{
+ u16 dmd, dmd_new, dmd_orignal;
+ u8 io_missing_delay_original;
+ u16 sz;
+ Mpi2SasIOUnitPage1_t *sas_iounit_pg1 = NULL;
+ Mpi2ConfigReply_t mpi_reply;
+ u8 num_phys = 0;
+ u16 ioc_status;
+
+ mpt2sas_config_get_number_hba_phys(ioc, &num_phys);
+ if (!num_phys)
+ return;
+
+ sz = offsetof(Mpi2SasIOUnitPage1_t, PhyData) + (num_phys *
+ sizeof(Mpi2SasIOUnit1PhyData_t));
+ sas_iounit_pg1 = kzalloc(sz, GFP_KERNEL);
+ if (!sas_iounit_pg1) {
+ printk(MPT2SAS_ERR_FMT "failure at %s:%d/%s()!\n",
+ ioc->name, __FILE__, __LINE__, __func__);
+ goto out;
+ }
+ if ((mpt2sas_config_get_sas_iounit_pg1(ioc, &mpi_reply,
+ sas_iounit_pg1, sz))) {
+ printk(MPT2SAS_ERR_FMT "failure at %s:%d/%s()!\n",
+ ioc->name, __FILE__, __LINE__, __func__);
+ goto out;
+ }
+ ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
+ MPI2_IOCSTATUS_MASK;
+ if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
+ printk(MPT2SAS_ERR_FMT "failure at %s:%d/%s()!\n",
+ ioc->name, __FILE__, __LINE__, __func__);
+ goto out;
+ }
+
+ /* device missing delay */
+ dmd = sas_iounit_pg1->ReportDeviceMissingDelay;
+ if (dmd & MPI2_SASIOUNIT1_REPORT_MISSING_UNIT_16)
+ dmd = (dmd & MPI2_SASIOUNIT1_REPORT_MISSING_TIMEOUT_MASK) * 16;
+ else
+ dmd = dmd & MPI2_SASIOUNIT1_REPORT_MISSING_TIMEOUT_MASK;
+ dmd_orignal = dmd;
+ if (device_missing_delay > 0x7F) {
+ dmd = (device_missing_delay > 0x7F0) ? 0x7F0 :
+ device_missing_delay;
+ dmd = dmd / 16;
+ dmd |= MPI2_SASIOUNIT1_REPORT_MISSING_UNIT_16;
+ } else
+ dmd = device_missing_delay;
+ sas_iounit_pg1->ReportDeviceMissingDelay = dmd;
+
+ /* io missing delay */
+ io_missing_delay_original = sas_iounit_pg1->IODeviceMissingDelay;
+ sas_iounit_pg1->IODeviceMissingDelay = io_missing_delay;
+
+ if (!mpt2sas_config_set_sas_iounit_pg1(ioc, &mpi_reply, sas_iounit_pg1,
+ sz)) {
+ if (dmd & MPI2_SASIOUNIT1_REPORT_MISSING_UNIT_16)
+ dmd_new = (dmd &
+ MPI2_SASIOUNIT1_REPORT_MISSING_TIMEOUT_MASK) * 16;
+ else
+ dmd_new =
+ dmd & MPI2_SASIOUNIT1_REPORT_MISSING_TIMEOUT_MASK;
+ printk(MPT2SAS_INFO_FMT "device_missing_delay: old(%d), "
+ "new(%d)\n", ioc->name, dmd_orignal, dmd_new);
+ printk(MPT2SAS_INFO_FMT "ioc_missing_delay: old(%d), "
+ "new(%d)\n", ioc->name, io_missing_delay_original,
+ io_missing_delay);
+ ioc->device_missing_delay = dmd_new;
+ ioc->io_missing_delay = io_missing_delay;
+ }
+
+out:
+ kfree(sas_iounit_pg1);
+}
+
+/**
+ * _base_static_config_pages - static start of day config pages
+ * @ioc: per adapter object
+ *
+ * Return nothing.
+ */
+static void
+_base_static_config_pages(struct MPT2SAS_ADAPTER *ioc)
+{
+ Mpi2ConfigReply_t mpi_reply;
+ u32 iounit_pg1_flags;
+
+ mpt2sas_config_get_manufacturing_pg0(ioc, &mpi_reply, &ioc->manu_pg0);
+ if (ioc->ir_firmware)
+ mpt2sas_config_get_manufacturing_pg10(ioc, &mpi_reply,
+ &ioc->manu_pg10);
+ mpt2sas_config_get_bios_pg2(ioc, &mpi_reply, &ioc->bios_pg2);
+ mpt2sas_config_get_bios_pg3(ioc, &mpi_reply, &ioc->bios_pg3);
+ mpt2sas_config_get_ioc_pg8(ioc, &mpi_reply, &ioc->ioc_pg8);
+ mpt2sas_config_get_iounit_pg0(ioc, &mpi_reply, &ioc->iounit_pg0);
+ mpt2sas_config_get_iounit_pg1(ioc, &mpi_reply, &ioc->iounit_pg1);
+ mpt2sas_config_get_iounit_pg8(ioc, &mpi_reply, &ioc->iounit_pg8);
+ _base_display_ioc_capabilities(ioc);
+
+ /*
+ * Enable task_set_full handling in iounit_pg1 when the
+ * facts capabilities indicate that its supported.
+ */
+ iounit_pg1_flags = le32_to_cpu(ioc->iounit_pg1.Flags);
+ if ((ioc->facts.IOCCapabilities &
+ MPI2_IOCFACTS_CAPABILITY_TASK_SET_FULL_HANDLING))
+ iounit_pg1_flags &=
+ ~MPI2_IOUNITPAGE1_DISABLE_TASK_SET_FULL_HANDLING;
+ else
+ iounit_pg1_flags |=
+ MPI2_IOUNITPAGE1_DISABLE_TASK_SET_FULL_HANDLING;
+ ioc->iounit_pg1.Flags = cpu_to_le32(iounit_pg1_flags);
+ mpt2sas_config_set_iounit_pg1(ioc, &mpi_reply, &ioc->iounit_pg1);
+
+ if (ioc->iounit_pg8.NumSensors)
+ ioc->temp_sensors_count = ioc->iounit_pg8.NumSensors;
+}
+
+/**
+ * _base_release_memory_pools - release memory
+ * @ioc: per adapter object
+ *
+ * Free memory allocated from _base_allocate_memory_pools.
+ *
+ * Return nothing.
+ */
+static void
+_base_release_memory_pools(struct MPT2SAS_ADAPTER *ioc)
+{
+ int i = 0;
+ struct reply_post_struct *rps;
+
+ dexitprintk(ioc, printk(MPT2SAS_INFO_FMT "%s\n", ioc->name,
+ __func__));
+
+ if (ioc->request) {
+ pci_free_consistent(ioc->pdev, ioc->request_dma_sz,
+ ioc->request, ioc->request_dma);
+ dexitprintk(ioc, printk(MPT2SAS_INFO_FMT "request_pool(0x%p)"
+ ": free\n", ioc->name, ioc->request));
+ ioc->request = NULL;
+ }
+
+ if (ioc->sense) {
+ pci_pool_free(ioc->sense_dma_pool, ioc->sense, ioc->sense_dma);
+ if (ioc->sense_dma_pool)
+ pci_pool_destroy(ioc->sense_dma_pool);
+ dexitprintk(ioc, printk(MPT2SAS_INFO_FMT "sense_pool(0x%p)"
+ ": free\n", ioc->name, ioc->sense));
+ ioc->sense = NULL;
+ }
+
+ if (ioc->reply) {
+ pci_pool_free(ioc->reply_dma_pool, ioc->reply, ioc->reply_dma);
+ if (ioc->reply_dma_pool)
+ pci_pool_destroy(ioc->reply_dma_pool);
+ dexitprintk(ioc, printk(MPT2SAS_INFO_FMT "reply_pool(0x%p)"
+ ": free\n", ioc->name, ioc->reply));
+ ioc->reply = NULL;
+ }
+
+ if (ioc->reply_free) {
+ pci_pool_free(ioc->reply_free_dma_pool, ioc->reply_free,
+ ioc->reply_free_dma);
+ if (ioc->reply_free_dma_pool)
+ pci_pool_destroy(ioc->reply_free_dma_pool);
+ dexitprintk(ioc, printk(MPT2SAS_INFO_FMT "reply_free_pool"
+ "(0x%p): free\n", ioc->name, ioc->reply_free));
+ ioc->reply_free = NULL;
+ }
+
+ if (ioc->reply_post) {
+ do {
+ rps = &ioc->reply_post[i];
+ if (rps->reply_post_free) {
+ pci_pool_free(
+ ioc->reply_post_free_dma_pool,
+ rps->reply_post_free,
+ rps->reply_post_free_dma);
+ dexitprintk(ioc, printk(MPT2SAS_INFO_FMT
+ "reply_post_free_pool(0x%p): free\n",
+ ioc->name, rps->reply_post_free));
+ rps->reply_post_free = NULL;
+ }
+ } while (ioc->rdpq_array_enable &&
+ (++i < ioc->reply_queue_count));
+
+ if (ioc->reply_post_free_dma_pool)
+ pci_pool_destroy(ioc->reply_post_free_dma_pool);
+ kfree(ioc->reply_post);
+ }
+
+ if (ioc->config_page) {
+ dexitprintk(ioc, printk(MPT2SAS_INFO_FMT
+ "config_page(0x%p): free\n", ioc->name,
+ ioc->config_page));
+ pci_free_consistent(ioc->pdev, ioc->config_page_sz,
+ ioc->config_page, ioc->config_page_dma);
+ }
+
+ if (ioc->scsi_lookup) {
+ free_pages((ulong)ioc->scsi_lookup, ioc->scsi_lookup_pages);
+ ioc->scsi_lookup = NULL;
+ }
+ kfree(ioc->hpr_lookup);
+ kfree(ioc->internal_lookup);
+ if (ioc->chain_lookup) {
+ for (i = 0; i < ioc->chain_depth; i++) {
+ if (ioc->chain_lookup[i].chain_buffer)
+ pci_pool_free(ioc->chain_dma_pool,
+ ioc->chain_lookup[i].chain_buffer,
+ ioc->chain_lookup[i].chain_buffer_dma);
+ }
+ if (ioc->chain_dma_pool)
+ pci_pool_destroy(ioc->chain_dma_pool);
+ free_pages((ulong)ioc->chain_lookup, ioc->chain_pages);
+ ioc->chain_lookup = NULL;
+ }
+}
+
+
+/**
+ * _base_allocate_memory_pools - allocate start of day memory pools
+ * @ioc: per adapter object
+ * @sleep_flag: CAN_SLEEP or NO_SLEEP
+ *
+ * Returns 0 success, anything else error
+ */
+static int
+_base_allocate_memory_pools(struct MPT2SAS_ADAPTER *ioc, int sleep_flag)
+{
+ struct mpt2sas_facts *facts;
+ u16 max_sge_elements;
+ u16 chains_needed_per_io;
+ u32 sz, total_sz, reply_post_free_sz;
+ u32 retry_sz;
+ u16 max_request_credit;
+ int i;
+
+ dinitprintk(ioc, printk(MPT2SAS_INFO_FMT "%s\n", ioc->name,
+ __func__));
+
+ retry_sz = 0;
+ facts = &ioc->facts;
+
+ /* command line tunables for max sgl entries */
+ if (max_sgl_entries != -1) {
+ ioc->shost->sg_tablesize = min_t(unsigned short,
+ max_sgl_entries, SCSI_MAX_SG_CHAIN_SEGMENTS);
+ if (ioc->shost->sg_tablesize > MPT2SAS_SG_DEPTH)
+ printk(MPT2SAS_WARN_FMT
+ "sg_tablesize(%u) is bigger than kernel defined"
+ " SCSI_MAX_SG_SEGMENTS(%u)\n", ioc->name,
+ ioc->shost->sg_tablesize, MPT2SAS_SG_DEPTH);
+ } else {
+ ioc->shost->sg_tablesize = MPT2SAS_SG_DEPTH;
+ }
+
+ /* command line tunables for max controller queue depth */
+ if (max_queue_depth != -1 && max_queue_depth != 0) {
+ max_request_credit = min_t(u16, max_queue_depth +
+ ioc->hi_priority_depth + ioc->internal_depth,
+ facts->RequestCredit);
+ if (max_request_credit > MAX_HBA_QUEUE_DEPTH)
+ max_request_credit = MAX_HBA_QUEUE_DEPTH;
+ } else
+ max_request_credit = min_t(u16, facts->RequestCredit,
+ MAX_HBA_QUEUE_DEPTH);
+
+ ioc->hba_queue_depth = max_request_credit;
+ ioc->hi_priority_depth = facts->HighPriorityCredit;
+ ioc->internal_depth = ioc->hi_priority_depth + 5;
+
+ /* request frame size */
+ ioc->request_sz = facts->IOCRequestFrameSize * 4;
+
+ /* reply frame size */
+ ioc->reply_sz = facts->ReplyFrameSize * 4;
+
+ retry_allocation:
+ total_sz = 0;
+ /* calculate number of sg elements left over in the 1st frame */
+ max_sge_elements = ioc->request_sz - ((sizeof(Mpi2SCSIIORequest_t) -
+ sizeof(Mpi2SGEIOUnion_t)) + ioc->sge_size);
+ ioc->max_sges_in_main_message = max_sge_elements/ioc->sge_size;
+
+ /* now do the same for a chain buffer */
+ max_sge_elements = ioc->request_sz - ioc->sge_size;
+ ioc->max_sges_in_chain_message = max_sge_elements/ioc->sge_size;
+
+ ioc->chain_offset_value_for_main_message =
+ ((sizeof(Mpi2SCSIIORequest_t) - sizeof(Mpi2SGEIOUnion_t)) +
+ (ioc->max_sges_in_chain_message * ioc->sge_size)) / 4;
+
+ /*
+ * MPT2SAS_SG_DEPTH = CONFIG_FUSION_MAX_SGE
+ */
+ chains_needed_per_io = ((ioc->shost->sg_tablesize -
+ ioc->max_sges_in_main_message)/ioc->max_sges_in_chain_message)
+ + 1;
+ if (chains_needed_per_io > facts->MaxChainDepth) {
+ chains_needed_per_io = facts->MaxChainDepth;
+ ioc->shost->sg_tablesize = min_t(u16,
+ ioc->max_sges_in_main_message + (ioc->max_sges_in_chain_message
+ * chains_needed_per_io), ioc->shost->sg_tablesize);
+ }
+ ioc->chains_needed_per_io = chains_needed_per_io;
+
+ /* reply free queue sizing - taking into account for 64 FW events */
+ ioc->reply_free_queue_depth = ioc->hba_queue_depth + 64;
+
+ /* calculate reply descriptor post queue depth */
+ ioc->reply_post_queue_depth = ioc->hba_queue_depth +
+ ioc->reply_free_queue_depth + 1;
+ /* align the reply post queue on the next 16 count boundary */
+ if (ioc->reply_post_queue_depth % 16)
+ ioc->reply_post_queue_depth += 16 -
+ (ioc->reply_post_queue_depth % 16);
+
+
+ if (ioc->reply_post_queue_depth >
+ facts->MaxReplyDescriptorPostQueueDepth) {
+ ioc->reply_post_queue_depth =
+ facts->MaxReplyDescriptorPostQueueDepth -
+ (facts->MaxReplyDescriptorPostQueueDepth % 16);
+ ioc->hba_queue_depth =
+ ((ioc->reply_post_queue_depth - 64) / 2) - 1;
+ ioc->reply_free_queue_depth = ioc->hba_queue_depth + 64;
+ }
+
+ dinitprintk(ioc, printk(MPT2SAS_INFO_FMT "scatter gather: "
+ "sge_in_main_msg(%d), sge_per_chain(%d), sge_per_io(%d), "
+ "chains_per_io(%d)\n", ioc->name, ioc->max_sges_in_main_message,
+ ioc->max_sges_in_chain_message, ioc->shost->sg_tablesize,
+ ioc->chains_needed_per_io));
+
+ /* reply post queue, 16 byte align */
+ reply_post_free_sz = ioc->reply_post_queue_depth *
+ sizeof(Mpi2DefaultReplyDescriptor_t);
+
+ sz = reply_post_free_sz;
+ if (_base_is_controller_msix_enabled(ioc) && !ioc->rdpq_array_enable)
+ sz *= ioc->reply_queue_count;
+
+ ioc->reply_post = kcalloc((ioc->rdpq_array_enable) ?
+ (ioc->reply_queue_count):1,
+ sizeof(struct reply_post_struct), GFP_KERNEL);
+
+ if (!ioc->reply_post) {
+ printk(MPT2SAS_ERR_FMT "reply_post_free pool: kcalloc failed\n",
+ ioc->name);
+ goto out;
+ }
+ ioc->reply_post_free_dma_pool = pci_pool_create("reply_post_free pool",
+ ioc->pdev, sz, 16, 0);
+ if (!ioc->reply_post_free_dma_pool) {
+ printk(MPT2SAS_ERR_FMT
+ "reply_post_free pool: pci_pool_create failed\n",
+ ioc->name);
+ goto out;
+ }
+ i = 0;
+ do {
+ ioc->reply_post[i].reply_post_free =
+ pci_pool_alloc(ioc->reply_post_free_dma_pool,
+ GFP_KERNEL,
+ &ioc->reply_post[i].reply_post_free_dma);
+ if (!ioc->reply_post[i].reply_post_free) {
+ printk(MPT2SAS_ERR_FMT
+ "reply_post_free pool: pci_pool_alloc failed\n",
+ ioc->name);
+ goto out;
+ }
+ memset(ioc->reply_post[i].reply_post_free, 0, sz);
+ dinitprintk(ioc, printk(MPT2SAS_INFO_FMT
+ "reply post free pool (0x%p): depth(%d),"
+ "element_size(%d), pool_size(%d kB)\n", ioc->name,
+ ioc->reply_post[i].reply_post_free,
+ ioc->reply_post_queue_depth, 8, sz/1024));
+ dinitprintk(ioc, printk(MPT2SAS_INFO_FMT
+ "reply_post_free_dma = (0x%llx)\n", ioc->name,
+ (unsigned long long)
+ ioc->reply_post[i].reply_post_free_dma));
+ total_sz += sz;
+ } while (ioc->rdpq_array_enable && (++i < ioc->reply_queue_count));
+
+ if (ioc->dma_mask == 64) {
+ if (_base_change_consistent_dma_mask(ioc, ioc->pdev) != 0) {
+ printk(MPT2SAS_WARN_FMT
+ "no suitable consistent DMA mask for %s\n",
+ ioc->name, pci_name(ioc->pdev));
+ goto out;
+ }
+ }
+
+ ioc->scsiio_depth = ioc->hba_queue_depth -
+ ioc->hi_priority_depth - ioc->internal_depth;
+
+ /* set the scsi host can_queue depth
+ * with some internal commands that could be outstanding
+ */
+ ioc->shost->can_queue = ioc->scsiio_depth;
+ dinitprintk(ioc, printk(MPT2SAS_INFO_FMT "scsi host: "
+ "can_queue depth (%d)\n", ioc->name, ioc->shost->can_queue));
+
+ /* contiguous pool for request and chains, 16 byte align, one extra "
+ * "frame for smid=0
+ */
+ ioc->chain_depth = ioc->chains_needed_per_io * ioc->scsiio_depth;
+ sz = ((ioc->scsiio_depth + 1) * ioc->request_sz);
+
+ /* hi-priority queue */
+ sz += (ioc->hi_priority_depth * ioc->request_sz);
+
+ /* internal queue */
+ sz += (ioc->internal_depth * ioc->request_sz);
+
+ ioc->request_dma_sz = sz;
+ ioc->request = pci_alloc_consistent(ioc->pdev, sz, &ioc->request_dma);
+ if (!ioc->request) {
+ printk(MPT2SAS_ERR_FMT "request pool: pci_alloc_consistent "
+ "failed: hba_depth(%d), chains_per_io(%d), frame_sz(%d), "
+ "total(%d kB)\n", ioc->name, ioc->hba_queue_depth,
+ ioc->chains_needed_per_io, ioc->request_sz, sz/1024);
+ if (ioc->scsiio_depth < MPT2SAS_SAS_QUEUE_DEPTH)
+ goto out;
+ retry_sz += 64;
+ ioc->hba_queue_depth = max_request_credit - retry_sz;
+ goto retry_allocation;
+ }
+
+ if (retry_sz)
+ printk(MPT2SAS_ERR_FMT "request pool: pci_alloc_consistent "
+ "succeed: hba_depth(%d), chains_per_io(%d), frame_sz(%d), "
+ "total(%d kb)\n", ioc->name, ioc->hba_queue_depth,
+ ioc->chains_needed_per_io, ioc->request_sz, sz/1024);
+
+
+ /* hi-priority queue */
+ ioc->hi_priority = ioc->request + ((ioc->scsiio_depth + 1) *
+ ioc->request_sz);
+ ioc->hi_priority_dma = ioc->request_dma + ((ioc->scsiio_depth + 1) *
+ ioc->request_sz);
+
+ /* internal queue */
+ ioc->internal = ioc->hi_priority + (ioc->hi_priority_depth *
+ ioc->request_sz);
+ ioc->internal_dma = ioc->hi_priority_dma + (ioc->hi_priority_depth *
+ ioc->request_sz);
+
+
+ dinitprintk(ioc, printk(MPT2SAS_INFO_FMT "request pool(0x%p): "
+ "depth(%d), frame_size(%d), pool_size(%d kB)\n", ioc->name,
+ ioc->request, ioc->hba_queue_depth, ioc->request_sz,
+ (ioc->hba_queue_depth * ioc->request_sz)/1024));
+ dinitprintk(ioc, printk(MPT2SAS_INFO_FMT "request pool: dma(0x%llx)\n",
+ ioc->name, (unsigned long long) ioc->request_dma));
+ total_sz += sz;
+
+ sz = ioc->scsiio_depth * sizeof(struct scsiio_tracker);
+ ioc->scsi_lookup_pages = get_order(sz);
+ ioc->scsi_lookup = (struct scsiio_tracker *)__get_free_pages(
+ GFP_KERNEL, ioc->scsi_lookup_pages);
+ if (!ioc->scsi_lookup) {
+ printk(MPT2SAS_ERR_FMT "scsi_lookup: get_free_pages failed, "
+ "sz(%d)\n", ioc->name, (int)sz);
+ goto out;
+ }
+
+ dinitprintk(ioc, printk(MPT2SAS_INFO_FMT "scsiio(0x%p): "
+ "depth(%d)\n", ioc->name, ioc->request,
+ ioc->scsiio_depth));
+
+ ioc->chain_depth = min_t(u32, ioc->chain_depth, MAX_CHAIN_DEPTH);
+ sz = ioc->chain_depth * sizeof(struct chain_tracker);
+ ioc->chain_pages = get_order(sz);
+
+ ioc->chain_lookup = (struct chain_tracker *)__get_free_pages(
+ GFP_KERNEL, ioc->chain_pages);
+ if (!ioc->chain_lookup) {
+ printk(MPT2SAS_ERR_FMT "chain_lookup: get_free_pages failed, "
+ "sz(%d)\n", ioc->name, (int)sz);
+ goto out;
+ }
+ ioc->chain_dma_pool = pci_pool_create("chain pool", ioc->pdev,
+ ioc->request_sz, 16, 0);
+ if (!ioc->chain_dma_pool) {
+ printk(MPT2SAS_ERR_FMT "chain_dma_pool: pci_pool_create "
+ "failed\n", ioc->name);
+ goto out;
+ }
+ for (i = 0; i < ioc->chain_depth; i++) {
+ ioc->chain_lookup[i].chain_buffer = pci_pool_alloc(
+ ioc->chain_dma_pool , GFP_KERNEL,
+ &ioc->chain_lookup[i].chain_buffer_dma);
+ if (!ioc->chain_lookup[i].chain_buffer) {
+ ioc->chain_depth = i;
+ goto chain_done;
+ }
+ total_sz += ioc->request_sz;
+ }
+chain_done:
+ dinitprintk(ioc, printk(MPT2SAS_INFO_FMT "chain pool depth"
+ "(%d), frame_size(%d), pool_size(%d kB)\n", ioc->name,
+ ioc->chain_depth, ioc->request_sz, ((ioc->chain_depth *
+ ioc->request_sz))/1024));
+
+ /* initialize hi-priority queue smid's */
+ ioc->hpr_lookup = kcalloc(ioc->hi_priority_depth,
+ sizeof(struct request_tracker), GFP_KERNEL);
+ if (!ioc->hpr_lookup) {
+ printk(MPT2SAS_ERR_FMT "hpr_lookup: kcalloc failed\n",
+ ioc->name);
+ goto out;
+ }
+ ioc->hi_priority_smid = ioc->scsiio_depth + 1;
+ dinitprintk(ioc, printk(MPT2SAS_INFO_FMT "hi_priority(0x%p): "
+ "depth(%d), start smid(%d)\n", ioc->name, ioc->hi_priority,
+ ioc->hi_priority_depth, ioc->hi_priority_smid));
+
+ /* initialize internal queue smid's */
+ ioc->internal_lookup = kcalloc(ioc->internal_depth,
+ sizeof(struct request_tracker), GFP_KERNEL);
+ if (!ioc->internal_lookup) {
+ printk(MPT2SAS_ERR_FMT "internal_lookup: kcalloc failed\n",
+ ioc->name);
+ goto out;
+ }
+ ioc->internal_smid = ioc->hi_priority_smid + ioc->hi_priority_depth;
+ dinitprintk(ioc, printk(MPT2SAS_INFO_FMT "internal(0x%p): "
+ "depth(%d), start smid(%d)\n", ioc->name, ioc->internal,
+ ioc->internal_depth, ioc->internal_smid));
+
+ /* sense buffers, 4 byte align */
+ sz = ioc->scsiio_depth * SCSI_SENSE_BUFFERSIZE;
+ ioc->sense_dma_pool = pci_pool_create("sense pool", ioc->pdev, sz, 4,
+ 0);
+ if (!ioc->sense_dma_pool) {
+ printk(MPT2SAS_ERR_FMT "sense pool: pci_pool_create failed\n",
+ ioc->name);
+ goto out;
+ }
+ ioc->sense = pci_pool_alloc(ioc->sense_dma_pool , GFP_KERNEL,
+ &ioc->sense_dma);
+ if (!ioc->sense) {
+ printk(MPT2SAS_ERR_FMT "sense pool: pci_pool_alloc failed\n",
+ ioc->name);
+ goto out;
+ }
+ dinitprintk(ioc, printk(MPT2SAS_INFO_FMT
+ "sense pool(0x%p): depth(%d), element_size(%d), pool_size"
+ "(%d kB)\n", ioc->name, ioc->sense, ioc->scsiio_depth,
+ SCSI_SENSE_BUFFERSIZE, sz/1024));
+ dinitprintk(ioc, printk(MPT2SAS_INFO_FMT "sense_dma(0x%llx)\n",
+ ioc->name, (unsigned long long)ioc->sense_dma));
+ total_sz += sz;
+
+ /* reply pool, 4 byte align */
+ sz = ioc->reply_free_queue_depth * ioc->reply_sz;
+ ioc->reply_dma_pool = pci_pool_create("reply pool", ioc->pdev, sz, 4,
+ 0);
+ if (!ioc->reply_dma_pool) {
+ printk(MPT2SAS_ERR_FMT "reply pool: pci_pool_create failed\n",
+ ioc->name);
+ goto out;
+ }
+ ioc->reply = pci_pool_alloc(ioc->reply_dma_pool , GFP_KERNEL,
+ &ioc->reply_dma);
+ if (!ioc->reply) {
+ printk(MPT2SAS_ERR_FMT "reply pool: pci_pool_alloc failed\n",
+ ioc->name);
+ goto out;
+ }
+ ioc->reply_dma_min_address = (u32)(ioc->reply_dma);
+ ioc->reply_dma_max_address = (u32)(ioc->reply_dma) + sz;
+ dinitprintk(ioc, printk(MPT2SAS_INFO_FMT "reply pool(0x%p): depth"
+ "(%d), frame_size(%d), pool_size(%d kB)\n", ioc->name, ioc->reply,
+ ioc->reply_free_queue_depth, ioc->reply_sz, sz/1024));
+ dinitprintk(ioc, printk(MPT2SAS_INFO_FMT "reply_dma(0x%llx)\n",
+ ioc->name, (unsigned long long)ioc->reply_dma));
+ total_sz += sz;
+
+ /* reply free queue, 16 byte align */
+ sz = ioc->reply_free_queue_depth * 4;
+ ioc->reply_free_dma_pool = pci_pool_create("reply_free pool",
+ ioc->pdev, sz, 16, 0);
+ if (!ioc->reply_free_dma_pool) {
+ printk(MPT2SAS_ERR_FMT "reply_free pool: pci_pool_create "
+ "failed\n", ioc->name);
+ goto out;
+ }
+ ioc->reply_free = pci_pool_alloc(ioc->reply_free_dma_pool , GFP_KERNEL,
+ &ioc->reply_free_dma);
+ if (!ioc->reply_free) {
+ printk(MPT2SAS_ERR_FMT "reply_free pool: pci_pool_alloc "
+ "failed\n", ioc->name);
+ goto out;
+ }
+ memset(ioc->reply_free, 0, sz);
+ dinitprintk(ioc, printk(MPT2SAS_INFO_FMT "reply_free pool(0x%p): "
+ "depth(%d), element_size(%d), pool_size(%d kB)\n", ioc->name,
+ ioc->reply_free, ioc->reply_free_queue_depth, 4, sz/1024));
+ dinitprintk(ioc, printk(MPT2SAS_INFO_FMT "reply_free_dma"
+ "(0x%llx)\n", ioc->name, (unsigned long long)ioc->reply_free_dma));
+ total_sz += sz;
+
+ ioc->config_page_sz = 512;
+ ioc->config_page = pci_alloc_consistent(ioc->pdev,
+ ioc->config_page_sz, &ioc->config_page_dma);
+ if (!ioc->config_page) {
+ printk(MPT2SAS_ERR_FMT "config page: pci_pool_alloc "
+ "failed\n", ioc->name);
+ goto out;
+ }
+ dinitprintk(ioc, printk(MPT2SAS_INFO_FMT "config page(0x%p): size"
+ "(%d)\n", ioc->name, ioc->config_page, ioc->config_page_sz));
+ dinitprintk(ioc, printk(MPT2SAS_INFO_FMT "config_page_dma"
+ "(0x%llx)\n", ioc->name, (unsigned long long)ioc->config_page_dma));
+ total_sz += ioc->config_page_sz;
+
+ printk(MPT2SAS_INFO_FMT "Allocated physical memory: size(%d kB)\n",
+ ioc->name, total_sz/1024);
+ printk(MPT2SAS_INFO_FMT "Current Controller Queue Depth(%d), "
+ "Max Controller Queue Depth(%d)\n",
+ ioc->name, ioc->shost->can_queue, facts->RequestCredit);
+ printk(MPT2SAS_INFO_FMT "Scatter Gather Elements per IO(%d)\n",
+ ioc->name, ioc->shost->sg_tablesize);
+ return 0;
+
+ out:
+ return -ENOMEM;
+}
+
+
+/**
+ * mpt2sas_base_get_iocstate - Get the current state of a MPT adapter.
+ * @ioc: Pointer to MPT_ADAPTER structure
+ * @cooked: Request raw or cooked IOC state
+ *
+ * Returns all IOC Doorbell register bits if cooked==0, else just the
+ * Doorbell bits in MPI_IOC_STATE_MASK.
+ */
+u32
+mpt2sas_base_get_iocstate(struct MPT2SAS_ADAPTER *ioc, int cooked)
+{
+ u32 s, sc;
+
+ s = readl(&ioc->chip->Doorbell);
+ sc = s & MPI2_IOC_STATE_MASK;
+ return cooked ? sc : s;
+}
+
+/**
+ * _base_wait_on_iocstate - waiting on a particular ioc state
+ * @ioc_state: controller state { READY, OPERATIONAL, or RESET }
+ * @timeout: timeout in second
+ * @sleep_flag: CAN_SLEEP or NO_SLEEP
+ *
+ * Returns 0 for success, non-zero for failure.
+ */
+static int
+_base_wait_on_iocstate(struct MPT2SAS_ADAPTER *ioc, u32 ioc_state, int timeout,
+ int sleep_flag)
+{
+ u32 count, cntdn;
+ u32 current_state;
+
+ count = 0;
+ cntdn = (sleep_flag == CAN_SLEEP) ? 1000*timeout : 2000*timeout;
+ do {
+ current_state = mpt2sas_base_get_iocstate(ioc, 1);
+ if (current_state == ioc_state)
+ return 0;
+ if (count && current_state == MPI2_IOC_STATE_FAULT)
+ break;
+ if (sleep_flag == CAN_SLEEP)
+ msleep(1);
+ else
+ udelay(500);
+ count++;
+ } while (--cntdn);
+
+ return current_state;
+}
+
+/**
+ * _base_wait_for_doorbell_int - waiting for controller interrupt(generated by
+ * a write to the doorbell)
+ * @ioc: per adapter object
+ * @timeout: timeout in second
+ * @sleep_flag: CAN_SLEEP or NO_SLEEP
+ *
+ * Returns 0 for success, non-zero for failure.
+ *
+ * Notes: MPI2_HIS_IOC2SYS_DB_STATUS - set to one when IOC writes to doorbell.
+ */
+static int
+_base_wait_for_doorbell_int(struct MPT2SAS_ADAPTER *ioc, int timeout,
+ int sleep_flag)
+{
+ u32 cntdn, count;
+ u32 int_status;
+
+ count = 0;
+ cntdn = (sleep_flag == CAN_SLEEP) ? 1000*timeout : 2000*timeout;
+ do {
+ int_status = readl(&ioc->chip->HostInterruptStatus);
+ if (int_status & MPI2_HIS_IOC2SYS_DB_STATUS) {
+ dhsprintk(ioc, printk(MPT2SAS_INFO_FMT "%s: "
+ "successful count(%d), timeout(%d)\n", ioc->name,
+ __func__, count, timeout));
+ return 0;
+ }
+ if (sleep_flag == CAN_SLEEP)
+ msleep(1);
+ else
+ udelay(500);
+ count++;
+ } while (--cntdn);
+
+ printk(MPT2SAS_ERR_FMT "%s: failed due to timeout count(%d), "
+ "int_status(%x)!\n", ioc->name, __func__, count, int_status);
+ return -EFAULT;
+}
+
+/**
+ * _base_wait_for_doorbell_ack - waiting for controller to read the doorbell.
+ * @ioc: per adapter object
+ * @timeout: timeout in second
+ * @sleep_flag: CAN_SLEEP or NO_SLEEP
+ *
+ * Returns 0 for success, non-zero for failure.
+ *
+ * Notes: MPI2_HIS_SYS2IOC_DB_STATUS - set to one when host writes to
+ * doorbell.
+ */
+static int
+_base_wait_for_doorbell_ack(struct MPT2SAS_ADAPTER *ioc, int timeout,
+ int sleep_flag)
+{
+ u32 cntdn, count;
+ u32 int_status;
+ u32 doorbell;
+
+ count = 0;
+ cntdn = (sleep_flag == CAN_SLEEP) ? 1000*timeout : 2000*timeout;
+ do {
+ int_status = readl(&ioc->chip->HostInterruptStatus);
+ if (!(int_status & MPI2_HIS_SYS2IOC_DB_STATUS)) {
+ dhsprintk(ioc, printk(MPT2SAS_INFO_FMT "%s: "
+ "successful count(%d), timeout(%d)\n", ioc->name,
+ __func__, count, timeout));
+ return 0;
+ } else if (int_status & MPI2_HIS_IOC2SYS_DB_STATUS) {
+ doorbell = readl(&ioc->chip->Doorbell);
+ if ((doorbell & MPI2_IOC_STATE_MASK) ==
+ MPI2_IOC_STATE_FAULT) {
+ mpt2sas_base_fault_info(ioc , doorbell);
+ return -EFAULT;
+ }
+ } else if (int_status == 0xFFFFFFFF)
+ goto out;
+
+ if (sleep_flag == CAN_SLEEP)
+ msleep(1);
+ else
+ udelay(500);
+ count++;
+ } while (--cntdn);
+
+ out:
+ printk(MPT2SAS_ERR_FMT "%s: failed due to timeout count(%d), "
+ "int_status(%x)!\n", ioc->name, __func__, count, int_status);
+ return -EFAULT;
+}
+
+/**
+ * _base_wait_for_doorbell_not_used - waiting for doorbell to not be in use
+ * @ioc: per adapter object
+ * @timeout: timeout in second
+ * @sleep_flag: CAN_SLEEP or NO_SLEEP
+ *
+ * Returns 0 for success, non-zero for failure.
+ *
+ */
+static int
+_base_wait_for_doorbell_not_used(struct MPT2SAS_ADAPTER *ioc, int timeout,
+ int sleep_flag)
+{
+ u32 cntdn, count;
+ u32 doorbell_reg;
+
+ count = 0;
+ cntdn = (sleep_flag == CAN_SLEEP) ? 1000*timeout : 2000*timeout;
+ do {
+ doorbell_reg = readl(&ioc->chip->Doorbell);
+ if (!(doorbell_reg & MPI2_DOORBELL_USED)) {
+ dhsprintk(ioc, printk(MPT2SAS_INFO_FMT "%s: "
+ "successful count(%d), timeout(%d)\n", ioc->name,
+ __func__, count, timeout));
+ return 0;
+ }
+ if (sleep_flag == CAN_SLEEP)
+ msleep(1);
+ else
+ udelay(500);
+ count++;
+ } while (--cntdn);
+
+ printk(MPT2SAS_ERR_FMT "%s: failed due to timeout count(%d), "
+ "doorbell_reg(%x)!\n", ioc->name, __func__, count, doorbell_reg);
+ return -EFAULT;
+}
+
+/**
+ * _base_send_ioc_reset - send doorbell reset
+ * @ioc: per adapter object
+ * @reset_type: currently only supports: MPI2_FUNCTION_IOC_MESSAGE_UNIT_RESET
+ * @timeout: timeout in second
+ * @sleep_flag: CAN_SLEEP or NO_SLEEP
+ *
+ * Returns 0 for success, non-zero for failure.
+ */
+static int
+_base_send_ioc_reset(struct MPT2SAS_ADAPTER *ioc, u8 reset_type, int timeout,
+ int sleep_flag)
+{
+ u32 ioc_state;
+ int r = 0;
+
+ if (reset_type != MPI2_FUNCTION_IOC_MESSAGE_UNIT_RESET) {
+ printk(MPT2SAS_ERR_FMT "%s: unknown reset_type\n",
+ ioc->name, __func__);
+ return -EFAULT;
+ }
+
+ if (!(ioc->facts.IOCCapabilities &
+ MPI2_IOCFACTS_CAPABILITY_EVENT_REPLAY))
+ return -EFAULT;
+
+ printk(MPT2SAS_INFO_FMT "sending message unit reset !!\n", ioc->name);
+
+ writel(reset_type << MPI2_DOORBELL_FUNCTION_SHIFT,
+ &ioc->chip->Doorbell);
+ if ((_base_wait_for_doorbell_ack(ioc, 15, sleep_flag))) {
+ r = -EFAULT;
+ goto out;
+ }
+ ioc_state = _base_wait_on_iocstate(ioc, MPI2_IOC_STATE_READY,
+ timeout, sleep_flag);
+ if (ioc_state) {
+ printk(MPT2SAS_ERR_FMT "%s: failed going to ready state "
+ " (ioc_state=0x%x)\n", ioc->name, __func__, ioc_state);
+ r = -EFAULT;
+ goto out;
+ }
+ out:
+ printk(MPT2SAS_INFO_FMT "message unit reset: %s\n",
+ ioc->name, ((r == 0) ? "SUCCESS" : "FAILED"));
+ return r;
+}
+
+/**
+ * _base_handshake_req_reply_wait - send request thru doorbell interface
+ * @ioc: per adapter object
+ * @request_bytes: request length
+ * @request: pointer having request payload
+ * @reply_bytes: reply length
+ * @reply: pointer to reply payload
+ * @timeout: timeout in second
+ * @sleep_flag: CAN_SLEEP or NO_SLEEP
+ *
+ * Returns 0 for success, non-zero for failure.
+ */
+static int
+_base_handshake_req_reply_wait(struct MPT2SAS_ADAPTER *ioc, int request_bytes,
+ u32 *request, int reply_bytes, u16 *reply, int timeout, int sleep_flag)
+{
+ MPI2DefaultReply_t *default_reply = (MPI2DefaultReply_t *)reply;
+ int i;
+ u8 failed;
+ u16 dummy;
+ __le32 *mfp;
+
+ /* make sure doorbell is not in use */
+ if ((readl(&ioc->chip->Doorbell) & MPI2_DOORBELL_USED)) {
+ printk(MPT2SAS_ERR_FMT "doorbell is in use "
+ " (line=%d)\n", ioc->name, __LINE__);
+ return -EFAULT;
+ }
+
+ /* clear pending doorbell interrupts from previous state changes */
+ if (readl(&ioc->chip->HostInterruptStatus) &
+ MPI2_HIS_IOC2SYS_DB_STATUS)
+ writel(0, &ioc->chip->HostInterruptStatus);
+
+ /* send message to ioc */
+ writel(((MPI2_FUNCTION_HANDSHAKE<<MPI2_DOORBELL_FUNCTION_SHIFT) |
+ ((request_bytes/4)<<MPI2_DOORBELL_ADD_DWORDS_SHIFT)),
+ &ioc->chip->Doorbell);
+
+ if ((_base_wait_for_doorbell_int(ioc, 5, NO_SLEEP))) {
+ printk(MPT2SAS_ERR_FMT "doorbell handshake "
+ "int failed (line=%d)\n", ioc->name, __LINE__);
+ return -EFAULT;
+ }
+ writel(0, &ioc->chip->HostInterruptStatus);
+
+ if ((_base_wait_for_doorbell_ack(ioc, 5, sleep_flag))) {
+ printk(MPT2SAS_ERR_FMT "doorbell handshake "
+ "ack failed (line=%d)\n", ioc->name, __LINE__);
+ return -EFAULT;
+ }
+
+ /* send message 32-bits at a time */
+ for (i = 0, failed = 0; i < request_bytes/4 && !failed; i++) {
+ writel(cpu_to_le32(request[i]), &ioc->chip->Doorbell);
+ if ((_base_wait_for_doorbell_ack(ioc, 5, sleep_flag)))
+ failed = 1;
+ }
+
+ if (failed) {
+ printk(MPT2SAS_ERR_FMT "doorbell handshake "
+ "sending request failed (line=%d)\n", ioc->name, __LINE__);
+ return -EFAULT;
+ }
+
+ /* now wait for the reply */
+ if ((_base_wait_for_doorbell_int(ioc, timeout, sleep_flag))) {
+ printk(MPT2SAS_ERR_FMT "doorbell handshake "
+ "int failed (line=%d)\n", ioc->name, __LINE__);
+ return -EFAULT;
+ }
+
+ /* read the first two 16-bits, it gives the total length of the reply */
+ reply[0] = le16_to_cpu(readl(&ioc->chip->Doorbell)
+ & MPI2_DOORBELL_DATA_MASK);
+ writel(0, &ioc->chip->HostInterruptStatus);
+ if ((_base_wait_for_doorbell_int(ioc, 5, sleep_flag))) {
+ printk(MPT2SAS_ERR_FMT "doorbell handshake "
+ "int failed (line=%d)\n", ioc->name, __LINE__);
+ return -EFAULT;
+ }
+ reply[1] = le16_to_cpu(readl(&ioc->chip->Doorbell)
+ & MPI2_DOORBELL_DATA_MASK);
+ writel(0, &ioc->chip->HostInterruptStatus);
+
+ for (i = 2; i < default_reply->MsgLength * 2; i++) {
+ if ((_base_wait_for_doorbell_int(ioc, 5, sleep_flag))) {
+ printk(MPT2SAS_ERR_FMT "doorbell "
+ "handshake int failed (line=%d)\n", ioc->name,
+ __LINE__);
+ return -EFAULT;
+ }
+ if (i >= reply_bytes/2) /* overflow case */
+ dummy = readl(&ioc->chip->Doorbell);
+ else
+ reply[i] = le16_to_cpu(readl(&ioc->chip->Doorbell)
+ & MPI2_DOORBELL_DATA_MASK);
+ writel(0, &ioc->chip->HostInterruptStatus);
+ }
+
+ _base_wait_for_doorbell_int(ioc, 5, sleep_flag);
+ if (_base_wait_for_doorbell_not_used(ioc, 5, sleep_flag) != 0) {
+ dhsprintk(ioc, printk(MPT2SAS_INFO_FMT "doorbell is in use "
+ " (line=%d)\n", ioc->name, __LINE__));
+ }
+ writel(0, &ioc->chip->HostInterruptStatus);
+
+ if (ioc->logging_level & MPT_DEBUG_INIT) {
+ mfp = (__le32 *)reply;
+ printk(KERN_INFO "\toffset:data\n");
+ for (i = 0; i < reply_bytes/4; i++)
+ printk(KERN_INFO "\t[0x%02x]:%08x\n", i*4,
+ le32_to_cpu(mfp[i]));
+ }
+ return 0;
+}
+
+/**
+ * mpt2sas_base_sas_iounit_control - send sas iounit control to FW
+ * @ioc: per adapter object
+ * @mpi_reply: the reply payload from FW
+ * @mpi_request: the request payload sent to FW
+ *
+ * The SAS IO Unit Control Request message allows the host to perform low-level
+ * operations, such as resets on the PHYs of the IO Unit, also allows the host
+ * to obtain the IOC assigned device handles for a device if it has other
+ * identifying information about the device, in addition allows the host to
+ * remove IOC resources associated with the device.
+ *
+ * Returns 0 for success, non-zero for failure.
+ */
+int
+mpt2sas_base_sas_iounit_control(struct MPT2SAS_ADAPTER *ioc,
+ Mpi2SasIoUnitControlReply_t *mpi_reply,
+ Mpi2SasIoUnitControlRequest_t *mpi_request)
+{
+ u16 smid;
+ u32 ioc_state;
+ unsigned long timeleft;
+ bool issue_reset = false;
+ int rc;
+ void *request;
+ u16 wait_state_count;
+
+ dinitprintk(ioc, printk(MPT2SAS_INFO_FMT "%s\n", ioc->name,
+ __func__));
+
+ mutex_lock(&ioc->base_cmds.mutex);
+
+ if (ioc->base_cmds.status != MPT2_CMD_NOT_USED) {
+ printk(MPT2SAS_ERR_FMT "%s: base_cmd in use\n",
+ ioc->name, __func__);
+ rc = -EAGAIN;
+ goto out;
+ }
+
+ wait_state_count = 0;
+ ioc_state = mpt2sas_base_get_iocstate(ioc, 1);
+ while (ioc_state != MPI2_IOC_STATE_OPERATIONAL) {
+ if (wait_state_count++ == 10) {
+ printk(MPT2SAS_ERR_FMT
+ "%s: failed due to ioc not operational\n",
+ ioc->name, __func__);
+ rc = -EFAULT;
+ goto out;
+ }
+ ssleep(1);
+ ioc_state = mpt2sas_base_get_iocstate(ioc, 1);
+ printk(MPT2SAS_INFO_FMT "%s: waiting for "
+ "operational state(count=%d)\n", ioc->name,
+ __func__, wait_state_count);
+ }
+
+ smid = mpt2sas_base_get_smid(ioc, ioc->base_cb_idx);
+ if (!smid) {
+ printk(MPT2SAS_ERR_FMT "%s: failed obtaining a smid\n",
+ ioc->name, __func__);
+ rc = -EAGAIN;
+ goto out;
+ }
+
+ rc = 0;
+ ioc->base_cmds.status = MPT2_CMD_PENDING;
+ request = mpt2sas_base_get_msg_frame(ioc, smid);
+ ioc->base_cmds.smid = smid;
+ memcpy(request, mpi_request, sizeof(Mpi2SasIoUnitControlRequest_t));
+ if (mpi_request->Operation == MPI2_SAS_OP_PHY_HARD_RESET ||
+ mpi_request->Operation == MPI2_SAS_OP_PHY_LINK_RESET)
+ ioc->ioc_link_reset_in_progress = 1;
+ init_completion(&ioc->base_cmds.done);
+ mpt2sas_base_put_smid_default(ioc, smid);
+ timeleft = wait_for_completion_timeout(&ioc->base_cmds.done,
+ msecs_to_jiffies(10000));
+ if ((mpi_request->Operation == MPI2_SAS_OP_PHY_HARD_RESET ||
+ mpi_request->Operation == MPI2_SAS_OP_PHY_LINK_RESET) &&
+ ioc->ioc_link_reset_in_progress)
+ ioc->ioc_link_reset_in_progress = 0;
+ if (!(ioc->base_cmds.status & MPT2_CMD_COMPLETE)) {
+ printk(MPT2SAS_ERR_FMT "%s: timeout\n",
+ ioc->name, __func__);
+ _debug_dump_mf(mpi_request,
+ sizeof(Mpi2SasIoUnitControlRequest_t)/4);
+ if (!(ioc->base_cmds.status & MPT2_CMD_RESET))
+ issue_reset = true;
+ goto issue_host_reset;
+ }
+ if (ioc->base_cmds.status & MPT2_CMD_REPLY_VALID)
+ memcpy(mpi_reply, ioc->base_cmds.reply,
+ sizeof(Mpi2SasIoUnitControlReply_t));
+ else
+ memset(mpi_reply, 0, sizeof(Mpi2SasIoUnitControlReply_t));
+ ioc->base_cmds.status = MPT2_CMD_NOT_USED;
+ goto out;
+
+ issue_host_reset:
+ if (issue_reset)
+ mpt2sas_base_hard_reset_handler(ioc, CAN_SLEEP,
+ FORCE_BIG_HAMMER);
+ ioc->base_cmds.status = MPT2_CMD_NOT_USED;
+ rc = -EFAULT;
+ out:
+ mutex_unlock(&ioc->base_cmds.mutex);
+ return rc;
+}
+
+
+/**
+ * mpt2sas_base_scsi_enclosure_processor - sending request to sep device
+ * @ioc: per adapter object
+ * @mpi_reply: the reply payload from FW
+ * @mpi_request: the request payload sent to FW
+ *
+ * The SCSI Enclosure Processor request message causes the IOC to
+ * communicate with SES devices to control LED status signals.
+ *
+ * Returns 0 for success, non-zero for failure.
+ */
+int
+mpt2sas_base_scsi_enclosure_processor(struct MPT2SAS_ADAPTER *ioc,
+ Mpi2SepReply_t *mpi_reply, Mpi2SepRequest_t *mpi_request)
+{
+ u16 smid;
+ u32 ioc_state;
+ unsigned long timeleft;
+ bool issue_reset = false;
+ int rc;
+ void *request;
+ u16 wait_state_count;
+
+ dinitprintk(ioc, printk(MPT2SAS_INFO_FMT "%s\n", ioc->name,
+ __func__));
+
+ mutex_lock(&ioc->base_cmds.mutex);
+
+ if (ioc->base_cmds.status != MPT2_CMD_NOT_USED) {
+ printk(MPT2SAS_ERR_FMT "%s: base_cmd in use\n",
+ ioc->name, __func__);
+ rc = -EAGAIN;
+ goto out;
+ }
+
+ wait_state_count = 0;
+ ioc_state = mpt2sas_base_get_iocstate(ioc, 1);
+ while (ioc_state != MPI2_IOC_STATE_OPERATIONAL) {
+ if (wait_state_count++ == 10) {
+ printk(MPT2SAS_ERR_FMT
+ "%s: failed due to ioc not operational\n",
+ ioc->name, __func__);
+ rc = -EFAULT;
+ goto out;
+ }
+ ssleep(1);
+ ioc_state = mpt2sas_base_get_iocstate(ioc, 1);
+ printk(MPT2SAS_INFO_FMT "%s: waiting for "
+ "operational state(count=%d)\n", ioc->name,
+ __func__, wait_state_count);
+ }
+
+ smid = mpt2sas_base_get_smid(ioc, ioc->base_cb_idx);
+ if (!smid) {
+ printk(MPT2SAS_ERR_FMT "%s: failed obtaining a smid\n",
+ ioc->name, __func__);
+ rc = -EAGAIN;
+ goto out;
+ }
+
+ rc = 0;
+ ioc->base_cmds.status = MPT2_CMD_PENDING;
+ request = mpt2sas_base_get_msg_frame(ioc, smid);
+ ioc->base_cmds.smid = smid;
+ memcpy(request, mpi_request, sizeof(Mpi2SepReply_t));
+ init_completion(&ioc->base_cmds.done);
+ mpt2sas_base_put_smid_default(ioc, smid);
+ timeleft = wait_for_completion_timeout(&ioc->base_cmds.done,
+ msecs_to_jiffies(10000));
+ if (!(ioc->base_cmds.status & MPT2_CMD_COMPLETE)) {
+ printk(MPT2SAS_ERR_FMT "%s: timeout\n",
+ ioc->name, __func__);
+ _debug_dump_mf(mpi_request,
+ sizeof(Mpi2SepRequest_t)/4);
+ if (!(ioc->base_cmds.status & MPT2_CMD_RESET))
+ issue_reset = true;
+ goto issue_host_reset;
+ }
+ if (ioc->base_cmds.status & MPT2_CMD_REPLY_VALID)
+ memcpy(mpi_reply, ioc->base_cmds.reply,
+ sizeof(Mpi2SepReply_t));
+ else
+ memset(mpi_reply, 0, sizeof(Mpi2SepReply_t));
+ ioc->base_cmds.status = MPT2_CMD_NOT_USED;
+ goto out;
+
+ issue_host_reset:
+ if (issue_reset)
+ mpt2sas_base_hard_reset_handler(ioc, CAN_SLEEP,
+ FORCE_BIG_HAMMER);
+ ioc->base_cmds.status = MPT2_CMD_NOT_USED;
+ rc = -EFAULT;
+ out:
+ mutex_unlock(&ioc->base_cmds.mutex);
+ return rc;
+}
+
+/**
+ * _base_get_port_facts - obtain port facts reply and save in ioc
+ * @ioc: per adapter object
+ * @sleep_flag: CAN_SLEEP or NO_SLEEP
+ *
+ * Returns 0 for success, non-zero for failure.
+ */
+static int
+_base_get_port_facts(struct MPT2SAS_ADAPTER *ioc, int port, int sleep_flag)
+{
+ Mpi2PortFactsRequest_t mpi_request;
+ Mpi2PortFactsReply_t mpi_reply;
+ struct mpt2sas_port_facts *pfacts;
+ int mpi_reply_sz, mpi_request_sz, r;
+
+ dinitprintk(ioc, printk(MPT2SAS_INFO_FMT "%s\n", ioc->name,
+ __func__));
+
+ mpi_reply_sz = sizeof(Mpi2PortFactsReply_t);
+ mpi_request_sz = sizeof(Mpi2PortFactsRequest_t);
+ memset(&mpi_request, 0, mpi_request_sz);
+ mpi_request.Function = MPI2_FUNCTION_PORT_FACTS;
+ mpi_request.PortNumber = port;
+ r = _base_handshake_req_reply_wait(ioc, mpi_request_sz,
+ (u32 *)&mpi_request, mpi_reply_sz, (u16 *)&mpi_reply, 5, CAN_SLEEP);
+
+ if (r != 0) {
+ printk(MPT2SAS_ERR_FMT "%s: handshake failed (r=%d)\n",
+ ioc->name, __func__, r);
+ return r;
+ }
+
+ pfacts = &ioc->pfacts[port];
+ memset(pfacts, 0, sizeof(struct mpt2sas_port_facts));
+ pfacts->PortNumber = mpi_reply.PortNumber;
+ pfacts->VP_ID = mpi_reply.VP_ID;
+ pfacts->VF_ID = mpi_reply.VF_ID;
+ pfacts->MaxPostedCmdBuffers =
+ le16_to_cpu(mpi_reply.MaxPostedCmdBuffers);
+
+ return 0;
+}
+
+/**
+ * _base_wait_for_iocstate - Wait until the card is in READY or OPERATIONAL
+ * @ioc: per adapter object
+ * @timeout:
+ * @sleep_flag: CAN_SLEEP or NO_SLEEP
+ *
+ * Returns 0 for success, non-zero for failure.
+ */
+static int
+_base_wait_for_iocstate(struct MPT2SAS_ADAPTER *ioc, int timeout,
+ int sleep_flag)
+{
+ u32 ioc_state, doorbell;
+ int rc;
+
+ dinitprintk(ioc, printk(MPT2SAS_INFO_FMT "%s\n", ioc->name,
+ __func__));
+
+ if (ioc->pci_error_recovery)
+ return 0;
+
+ doorbell = mpt2sas_base_get_iocstate(ioc, 0);
+ ioc_state = doorbell & MPI2_IOC_STATE_MASK;
+ dhsprintk(ioc, printk(MPT2SAS_INFO_FMT "%s: ioc_state(0x%08x)\n",
+ ioc->name, __func__, ioc_state));
+
+ switch (ioc_state) {
+ case MPI2_IOC_STATE_READY:
+ case MPI2_IOC_STATE_OPERATIONAL:
+ return 0;
+ }
+
+ if (doorbell & MPI2_DOORBELL_USED) {
+ dhsprintk(ioc, printk(MPT2SAS_INFO_FMT
+ "unexpected doorbell activ!e\n", ioc->name));
+ goto issue_diag_reset;
+ }
+
+ if (ioc_state == MPI2_IOC_STATE_FAULT) {
+ mpt2sas_base_fault_info(ioc, doorbell &
+ MPI2_DOORBELL_DATA_MASK);
+ goto issue_diag_reset;
+ }
+
+ ioc_state = _base_wait_on_iocstate(ioc, MPI2_IOC_STATE_READY,
+ timeout, sleep_flag);
+ if (ioc_state) {
+ printk(MPT2SAS_ERR_FMT
+ "%s: failed going to ready state (ioc_state=0x%x)\n",
+ ioc->name, __func__, ioc_state);
+ return -EFAULT;
+ }
+
+ issue_diag_reset:
+ rc = _base_diag_reset(ioc, sleep_flag);
+ return rc;
+}
+
+/**
+ * _base_get_ioc_facts - obtain ioc facts reply and save in ioc
+ * @ioc: per adapter object
+ * @sleep_flag: CAN_SLEEP or NO_SLEEP
+ *
+ * Returns 0 for success, non-zero for failure.
+ */
+static int
+_base_get_ioc_facts(struct MPT2SAS_ADAPTER *ioc, int sleep_flag)
+{
+ Mpi2IOCFactsRequest_t mpi_request;
+ Mpi2IOCFactsReply_t mpi_reply;
+ struct mpt2sas_facts *facts;
+ int mpi_reply_sz, mpi_request_sz, r;
+
+ dinitprintk(ioc, printk(MPT2SAS_INFO_FMT "%s\n", ioc->name,
+ __func__));
+
+ r = _base_wait_for_iocstate(ioc, 10, sleep_flag);
+ if (r) {
+ printk(MPT2SAS_ERR_FMT "%s: failed getting to correct state\n",
+ ioc->name, __func__);
+ return r;
+ }
+
+ mpi_reply_sz = sizeof(Mpi2IOCFactsReply_t);
+ mpi_request_sz = sizeof(Mpi2IOCFactsRequest_t);
+ memset(&mpi_request, 0, mpi_request_sz);
+ mpi_request.Function = MPI2_FUNCTION_IOC_FACTS;
+ r = _base_handshake_req_reply_wait(ioc, mpi_request_sz,
+ (u32 *)&mpi_request, mpi_reply_sz, (u16 *)&mpi_reply, 5, CAN_SLEEP);
+
+ if (r != 0) {
+ printk(MPT2SAS_ERR_FMT "%s: handshake failed (r=%d)\n",
+ ioc->name, __func__, r);
+ return r;
+ }
+
+ facts = &ioc->facts;
+ memset(facts, 0, sizeof(struct mpt2sas_facts));
+ facts->MsgVersion = le16_to_cpu(mpi_reply.MsgVersion);
+ facts->HeaderVersion = le16_to_cpu(mpi_reply.HeaderVersion);
+ facts->VP_ID = mpi_reply.VP_ID;
+ facts->VF_ID = mpi_reply.VF_ID;
+ facts->IOCExceptions = le16_to_cpu(mpi_reply.IOCExceptions);
+ facts->MaxChainDepth = mpi_reply.MaxChainDepth;
+ facts->WhoInit = mpi_reply.WhoInit;
+ facts->NumberOfPorts = mpi_reply.NumberOfPorts;
+ facts->MaxMSIxVectors = mpi_reply.MaxMSIxVectors;
+ facts->RequestCredit = le16_to_cpu(mpi_reply.RequestCredit);
+ facts->MaxReplyDescriptorPostQueueDepth =
+ le16_to_cpu(mpi_reply.MaxReplyDescriptorPostQueueDepth);
+ facts->ProductID = le16_to_cpu(mpi_reply.ProductID);
+ facts->IOCCapabilities = le32_to_cpu(mpi_reply.IOCCapabilities);
+ if ((facts->IOCCapabilities & MPI2_IOCFACTS_CAPABILITY_INTEGRATED_RAID))
+ ioc->ir_firmware = 1;
+ if ((facts->IOCCapabilities &
+ MPI2_IOCFACTS_CAPABILITY_RDPQ_ARRAY_CAPABLE))
+ ioc->rdpq_array_capable = 1;
+ facts->FWVersion.Word = le32_to_cpu(mpi_reply.FWVersion.Word);
+ facts->IOCRequestFrameSize =
+ le16_to_cpu(mpi_reply.IOCRequestFrameSize);
+ facts->MaxInitiators = le16_to_cpu(mpi_reply.MaxInitiators);
+ facts->MaxTargets = le16_to_cpu(mpi_reply.MaxTargets);
+ ioc->shost->max_id = -1;
+ facts->MaxSasExpanders = le16_to_cpu(mpi_reply.MaxSasExpanders);
+ facts->MaxEnclosures = le16_to_cpu(mpi_reply.MaxEnclosures);
+ facts->ProtocolFlags = le16_to_cpu(mpi_reply.ProtocolFlags);
+ facts->HighPriorityCredit =
+ le16_to_cpu(mpi_reply.HighPriorityCredit);
+ facts->ReplyFrameSize = mpi_reply.ReplyFrameSize;
+ facts->MaxDevHandle = le16_to_cpu(mpi_reply.MaxDevHandle);
+
+ dinitprintk(ioc, printk(MPT2SAS_INFO_FMT "hba queue depth(%d), "
+ "max chains per io(%d)\n", ioc->name, facts->RequestCredit,
+ facts->MaxChainDepth));
+ dinitprintk(ioc, printk(MPT2SAS_INFO_FMT "request frame size(%d), "
+ "reply frame size(%d)\n", ioc->name,
+ facts->IOCRequestFrameSize * 4, facts->ReplyFrameSize * 4));
+ return 0;
+}
+
+/**
+ * _base_send_ioc_init - send ioc_init to firmware
+ * @ioc: per adapter object
+ * @sleep_flag: CAN_SLEEP or NO_SLEEP
+ *
+ * Returns 0 for success, non-zero for failure.
+ */
+static int
+_base_send_ioc_init(struct MPT2SAS_ADAPTER *ioc, int sleep_flag)
+{
+ Mpi2IOCInitRequest_t mpi_request;
+ Mpi2IOCInitReply_t mpi_reply;
+ int i, r = 0;
+ struct timeval current_time;
+ u16 ioc_status;
+ u32 reply_post_free_array_sz = 0;
+ Mpi2IOCInitRDPQArrayEntry *reply_post_free_array = NULL;
+ dma_addr_t reply_post_free_array_dma;
+
+ dinitprintk(ioc, printk(MPT2SAS_INFO_FMT "%s\n", ioc->name,
+ __func__));
+
+ memset(&mpi_request, 0, sizeof(Mpi2IOCInitRequest_t));
+ mpi_request.Function = MPI2_FUNCTION_IOC_INIT;
+ mpi_request.WhoInit = MPI2_WHOINIT_HOST_DRIVER;
+ mpi_request.VF_ID = 0; /* TODO */
+ mpi_request.VP_ID = 0;
+ mpi_request.MsgVersion = cpu_to_le16(MPI2_VERSION);
+ mpi_request.HeaderVersion = cpu_to_le16(MPI2_HEADER_VERSION);
+
+ if (_base_is_controller_msix_enabled(ioc))
+ mpi_request.HostMSIxVectors = ioc->reply_queue_count;
+ mpi_request.SystemRequestFrameSize = cpu_to_le16(ioc->request_sz/4);
+ mpi_request.ReplyDescriptorPostQueueDepth =
+ cpu_to_le16(ioc->reply_post_queue_depth);
+ mpi_request.ReplyFreeQueueDepth =
+ cpu_to_le16(ioc->reply_free_queue_depth);
+
+ mpi_request.SenseBufferAddressHigh =
+ cpu_to_le32((u64)ioc->sense_dma >> 32);
+ mpi_request.SystemReplyAddressHigh =
+ cpu_to_le32((u64)ioc->reply_dma >> 32);
+ mpi_request.SystemRequestFrameBaseAddress =
+ cpu_to_le64((u64)ioc->request_dma);
+ mpi_request.ReplyFreeQueueAddress =
+ cpu_to_le64((u64)ioc->reply_free_dma);
+
+ if (ioc->rdpq_array_enable) {
+ reply_post_free_array_sz = ioc->reply_queue_count *
+ sizeof(Mpi2IOCInitRDPQArrayEntry);
+ reply_post_free_array = pci_alloc_consistent(ioc->pdev,
+ reply_post_free_array_sz, &reply_post_free_array_dma);
+ if (!reply_post_free_array) {
+ printk(MPT2SAS_ERR_FMT
+ "reply_post_free_array: pci_alloc_consistent failed\n",
+ ioc->name);
+ r = -ENOMEM;
+ goto out;
+ }
+ memset(reply_post_free_array, 0, reply_post_free_array_sz);
+ for (i = 0; i < ioc->reply_queue_count; i++)
+ reply_post_free_array[i].RDPQBaseAddress =
+ cpu_to_le64(
+ (u64)ioc->reply_post[i].reply_post_free_dma);
+ mpi_request.MsgFlags = MPI2_IOCINIT_MSGFLAG_RDPQ_ARRAY_MODE;
+ mpi_request.ReplyDescriptorPostQueueAddress =
+ cpu_to_le64((u64)reply_post_free_array_dma);
+ } else {
+ mpi_request.ReplyDescriptorPostQueueAddress =
+ cpu_to_le64((u64)ioc->reply_post[0].reply_post_free_dma);
+ }
+
+ /* This time stamp specifies number of milliseconds
+ * since epoch ~ midnight January 1, 1970.
+ */
+ do_gettimeofday(&current_time);
+ mpi_request.TimeStamp = cpu_to_le64((u64)current_time.tv_sec * 1000 +
+ (current_time.tv_usec / 1000));
+
+ if (ioc->logging_level & MPT_DEBUG_INIT) {
+ __le32 *mfp;
+ int i;
+
+ mfp = (__le32 *)&mpi_request;
+ printk(KERN_INFO "\toffset:data\n");
+ for (i = 0; i < sizeof(Mpi2IOCInitRequest_t)/4; i++)
+ printk(KERN_INFO "\t[0x%02x]:%08x\n", i*4,
+ le32_to_cpu(mfp[i]));
+ }
+
+ r = _base_handshake_req_reply_wait(ioc,
+ sizeof(Mpi2IOCInitRequest_t), (u32 *)&mpi_request,
+ sizeof(Mpi2IOCInitReply_t), (u16 *)&mpi_reply, 10,
+ sleep_flag);
+
+ if (r != 0) {
+ printk(MPT2SAS_ERR_FMT "%s: handshake failed (r=%d)\n",
+ ioc->name, __func__, r);
+ goto out;
+ }
+
+ ioc_status = le16_to_cpu(mpi_reply.IOCStatus) & MPI2_IOCSTATUS_MASK;
+ if (ioc_status != MPI2_IOCSTATUS_SUCCESS ||
+ mpi_reply.IOCLogInfo) {
+ printk(MPT2SAS_ERR_FMT "%s: failed\n", ioc->name, __func__);
+ r = -EIO;
+ }
+
+out:
+ if (reply_post_free_array)
+ pci_free_consistent(ioc->pdev, reply_post_free_array_sz,
+ reply_post_free_array,
+ reply_post_free_array_dma);
+ return r;
+}
+
+/**
+ * mpt2sas_port_enable_done - command completion routine for port enable
+ * @ioc: per adapter object
+ * @smid: system request message index
+ * @msix_index: MSIX table index supplied by the OS
+ * @reply: reply message frame(lower 32bit addr)
+ *
+ * Return 1 meaning mf should be freed from _base_interrupt
+ * 0 means the mf is freed from this function.
+ */
+u8
+mpt2sas_port_enable_done(struct MPT2SAS_ADAPTER *ioc, u16 smid, u8 msix_index,
+ u32 reply)
+{
+ MPI2DefaultReply_t *mpi_reply;
+ u16 ioc_status;
+
+ mpi_reply = mpt2sas_base_get_reply_virt_addr(ioc, reply);
+ if (mpi_reply && mpi_reply->Function == MPI2_FUNCTION_EVENT_ACK)
+ return 1;
+
+ if (ioc->port_enable_cmds.status == MPT2_CMD_NOT_USED)
+ return 1;
+
+ ioc->port_enable_cmds.status |= MPT2_CMD_COMPLETE;
+ if (mpi_reply) {
+ ioc->port_enable_cmds.status |= MPT2_CMD_REPLY_VALID;
+ memcpy(ioc->port_enable_cmds.reply, mpi_reply,
+ mpi_reply->MsgLength*4);
+ }
+ ioc->port_enable_cmds.status &= ~MPT2_CMD_PENDING;
+
+ ioc_status = le16_to_cpu(mpi_reply->IOCStatus) & MPI2_IOCSTATUS_MASK;
+
+ if (ioc_status != MPI2_IOCSTATUS_SUCCESS)
+ ioc->port_enable_failed = 1;
+
+ if (ioc->is_driver_loading) {
+ if (ioc_status == MPI2_IOCSTATUS_SUCCESS) {
+ mpt2sas_port_enable_complete(ioc);
+ return 1;
+ } else {
+ ioc->start_scan_failed = ioc_status;
+ ioc->start_scan = 0;
+ return 1;
+ }
+ }
+ complete(&ioc->port_enable_cmds.done);
+ return 1;
+}
+
+
+/**
+ * _base_send_port_enable - send port_enable(discovery stuff) to firmware
+ * @ioc: per adapter object
+ * @sleep_flag: CAN_SLEEP or NO_SLEEP
+ *
+ * Returns 0 for success, non-zero for failure.
+ */
+static int
+_base_send_port_enable(struct MPT2SAS_ADAPTER *ioc, int sleep_flag)
+{
+ Mpi2PortEnableRequest_t *mpi_request;
+ Mpi2PortEnableReply_t *mpi_reply;
+ unsigned long timeleft;
+ int r = 0;
+ u16 smid;
+ u16 ioc_status;
+
+ printk(MPT2SAS_INFO_FMT "sending port enable !!\n", ioc->name);
+
+ if (ioc->port_enable_cmds.status & MPT2_CMD_PENDING) {
+ printk(MPT2SAS_ERR_FMT "%s: internal command already in use\n",
+ ioc->name, __func__);
+ return -EAGAIN;
+ }
+
+ smid = mpt2sas_base_get_smid(ioc, ioc->port_enable_cb_idx);
+ if (!smid) {
+ printk(MPT2SAS_ERR_FMT "%s: failed obtaining a smid\n",
+ ioc->name, __func__);
+ return -EAGAIN;
+ }
+
+ ioc->port_enable_cmds.status = MPT2_CMD_PENDING;
+ mpi_request = mpt2sas_base_get_msg_frame(ioc, smid);
+ ioc->port_enable_cmds.smid = smid;
+ memset(mpi_request, 0, sizeof(Mpi2PortEnableRequest_t));
+ mpi_request->Function = MPI2_FUNCTION_PORT_ENABLE;
+
+ init_completion(&ioc->port_enable_cmds.done);
+ mpt2sas_base_put_smid_default(ioc, smid);
+ timeleft = wait_for_completion_timeout(&ioc->port_enable_cmds.done,
+ 300*HZ);
+ if (!(ioc->port_enable_cmds.status & MPT2_CMD_COMPLETE)) {
+ printk(MPT2SAS_ERR_FMT "%s: timeout\n",
+ ioc->name, __func__);
+ _debug_dump_mf(mpi_request,
+ sizeof(Mpi2PortEnableRequest_t)/4);
+ if (ioc->port_enable_cmds.status & MPT2_CMD_RESET)
+ r = -EFAULT;
+ else
+ r = -ETIME;
+ goto out;
+ }
+ mpi_reply = ioc->port_enable_cmds.reply;
+
+ ioc_status = le16_to_cpu(mpi_reply->IOCStatus) & MPI2_IOCSTATUS_MASK;
+ if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
+ printk(MPT2SAS_ERR_FMT "%s: failed with (ioc_status=0x%08x)\n",
+ ioc->name, __func__, ioc_status);
+ r = -EFAULT;
+ goto out;
+ }
+ out:
+ ioc->port_enable_cmds.status = MPT2_CMD_NOT_USED;
+ printk(MPT2SAS_INFO_FMT "port enable: %s\n", ioc->name, ((r == 0) ?
+ "SUCCESS" : "FAILED"));
+ return r;
+}
+
+/**
+ * mpt2sas_port_enable - initiate firmware discovery (don't wait for reply)
+ * @ioc: per adapter object
+ *
+ * Returns 0 for success, non-zero for failure.
+ */
+int
+mpt2sas_port_enable(struct MPT2SAS_ADAPTER *ioc)
+{
+ Mpi2PortEnableRequest_t *mpi_request;
+ u16 smid;
+
+ printk(MPT2SAS_INFO_FMT "sending port enable !!\n", ioc->name);
+
+ if (ioc->port_enable_cmds.status & MPT2_CMD_PENDING) {
+ printk(MPT2SAS_ERR_FMT "%s: internal command already in use\n",
+ ioc->name, __func__);
+ return -EAGAIN;
+ }
+
+ smid = mpt2sas_base_get_smid(ioc, ioc->port_enable_cb_idx);
+ if (!smid) {
+ printk(MPT2SAS_ERR_FMT "%s: failed obtaining a smid\n",
+ ioc->name, __func__);
+ return -EAGAIN;
+ }
+
+ ioc->port_enable_cmds.status = MPT2_CMD_PENDING;
+ mpi_request = mpt2sas_base_get_msg_frame(ioc, smid);
+ ioc->port_enable_cmds.smid = smid;
+ memset(mpi_request, 0, sizeof(Mpi2PortEnableRequest_t));
+ mpi_request->Function = MPI2_FUNCTION_PORT_ENABLE;
+
+ mpt2sas_base_put_smid_default(ioc, smid);
+ return 0;
+}
+
+/**
+ * _base_determine_wait_on_discovery - desposition
+ * @ioc: per adapter object
+ *
+ * Decide whether to wait on discovery to complete. Used to either
+ * locate boot device, or report volumes ahead of physical devices.
+ *
+ * Returns 1 for wait, 0 for don't wait
+ */
+static int
+_base_determine_wait_on_discovery(struct MPT2SAS_ADAPTER *ioc)
+{
+ /* We wait for discovery to complete if IR firmware is loaded.
+ * The sas topology events arrive before PD events, so we need time to
+ * turn on the bit in ioc->pd_handles to indicate PD
+ * Also, it maybe required to report Volumes ahead of physical
+ * devices when MPI2_IOCPAGE8_IRFLAGS_LOW_VOLUME_MAPPING is set.
+ */
+ if (ioc->ir_firmware)
+ return 1;
+
+ /* if no Bios, then we don't need to wait */
+ if (!ioc->bios_pg3.BiosVersion)
+ return 0;
+
+ /* Bios is present, then we drop down here.
+ *
+ * If there any entries in the Bios Page 2, then we wait
+ * for discovery to complete.
+ */
+
+ /* Current Boot Device */
+ if ((ioc->bios_pg2.CurrentBootDeviceForm &
+ MPI2_BIOSPAGE2_FORM_MASK) ==
+ MPI2_BIOSPAGE2_FORM_NO_DEVICE_SPECIFIED &&
+ /* Request Boot Device */
+ (ioc->bios_pg2.ReqBootDeviceForm &
+ MPI2_BIOSPAGE2_FORM_MASK) ==
+ MPI2_BIOSPAGE2_FORM_NO_DEVICE_SPECIFIED &&
+ /* Alternate Request Boot Device */
+ (ioc->bios_pg2.ReqAltBootDeviceForm &
+ MPI2_BIOSPAGE2_FORM_MASK) ==
+ MPI2_BIOSPAGE2_FORM_NO_DEVICE_SPECIFIED)
+ return 0;
+
+ return 1;
+}
+
+
+/**
+ * _base_unmask_events - turn on notification for this event
+ * @ioc: per adapter object
+ * @event: firmware event
+ *
+ * The mask is stored in ioc->event_masks.
+ */
+static void
+_base_unmask_events(struct MPT2SAS_ADAPTER *ioc, u16 event)
+{
+ u32 desired_event;
+
+ if (event >= 128)
+ return;
+
+ desired_event = (1 << (event % 32));
+
+ if (event < 32)
+ ioc->event_masks[0] &= ~desired_event;
+ else if (event < 64)
+ ioc->event_masks[1] &= ~desired_event;
+ else if (event < 96)
+ ioc->event_masks[2] &= ~desired_event;
+ else if (event < 128)
+ ioc->event_masks[3] &= ~desired_event;
+}
+
+/**
+ * _base_event_notification - send event notification
+ * @ioc: per adapter object
+ * @sleep_flag: CAN_SLEEP or NO_SLEEP
+ *
+ * Returns 0 for success, non-zero for failure.
+ */
+static int
+_base_event_notification(struct MPT2SAS_ADAPTER *ioc, int sleep_flag)
+{
+ Mpi2EventNotificationRequest_t *mpi_request;
+ unsigned long timeleft;
+ u16 smid;
+ int r = 0;
+ int i;
+
+ dinitprintk(ioc, printk(MPT2SAS_INFO_FMT "%s\n", ioc->name,
+ __func__));
+
+ if (ioc->base_cmds.status & MPT2_CMD_PENDING) {
+ printk(MPT2SAS_ERR_FMT "%s: internal command already in use\n",
+ ioc->name, __func__);
+ return -EAGAIN;
+ }
+
+ smid = mpt2sas_base_get_smid(ioc, ioc->base_cb_idx);
+ if (!smid) {
+ printk(MPT2SAS_ERR_FMT "%s: failed obtaining a smid\n",
+ ioc->name, __func__);
+ return -EAGAIN;
+ }
+ ioc->base_cmds.status = MPT2_CMD_PENDING;
+ mpi_request = mpt2sas_base_get_msg_frame(ioc, smid);
+ ioc->base_cmds.smid = smid;
+ memset(mpi_request, 0, sizeof(Mpi2EventNotificationRequest_t));
+ mpi_request->Function = MPI2_FUNCTION_EVENT_NOTIFICATION;
+ mpi_request->VF_ID = 0; /* TODO */
+ mpi_request->VP_ID = 0;
+ for (i = 0; i < MPI2_EVENT_NOTIFY_EVENTMASK_WORDS; i++)
+ mpi_request->EventMasks[i] =
+ cpu_to_le32(ioc->event_masks[i]);
+ init_completion(&ioc->base_cmds.done);
+ mpt2sas_base_put_smid_default(ioc, smid);
+ timeleft = wait_for_completion_timeout(&ioc->base_cmds.done, 30*HZ);
+ if (!(ioc->base_cmds.status & MPT2_CMD_COMPLETE)) {
+ printk(MPT2SAS_ERR_FMT "%s: timeout\n",
+ ioc->name, __func__);
+ _debug_dump_mf(mpi_request,
+ sizeof(Mpi2EventNotificationRequest_t)/4);
+ if (ioc->base_cmds.status & MPT2_CMD_RESET)
+ r = -EFAULT;
+ else
+ r = -ETIME;
+ } else
+ dinitprintk(ioc, printk(MPT2SAS_INFO_FMT "%s: complete\n",
+ ioc->name, __func__));
+ ioc->base_cmds.status = MPT2_CMD_NOT_USED;
+ return r;
+}
+
+/**
+ * mpt2sas_base_validate_event_type - validating event types
+ * @ioc: per adapter object
+ * @event: firmware event
+ *
+ * This will turn on firmware event notification when application
+ * ask for that event. We don't mask events that are already enabled.
+ */
+void
+mpt2sas_base_validate_event_type(struct MPT2SAS_ADAPTER *ioc, u32 *event_type)
+{
+ int i, j;
+ u32 event_mask, desired_event;
+ u8 send_update_to_fw;
+
+ for (i = 0, send_update_to_fw = 0; i <
+ MPI2_EVENT_NOTIFY_EVENTMASK_WORDS; i++) {
+ event_mask = ~event_type[i];
+ desired_event = 1;
+ for (j = 0; j < 32; j++) {
+ if (!(event_mask & desired_event) &&
+ (ioc->event_masks[i] & desired_event)) {
+ ioc->event_masks[i] &= ~desired_event;
+ send_update_to_fw = 1;
+ }
+ desired_event = (desired_event << 1);
+ }
+ }
+
+ if (!send_update_to_fw)
+ return;
+
+ mutex_lock(&ioc->base_cmds.mutex);
+ _base_event_notification(ioc, CAN_SLEEP);
+ mutex_unlock(&ioc->base_cmds.mutex);
+}
+
+/**
+ * _base_diag_reset - the "big hammer" start of day reset
+ * @ioc: per adapter object
+ * @sleep_flag: CAN_SLEEP or NO_SLEEP
+ *
+ * Returns 0 for success, non-zero for failure.
+ */
+static int
+_base_diag_reset(struct MPT2SAS_ADAPTER *ioc, int sleep_flag)
+{
+ u32 host_diagnostic;
+ u32 ioc_state;
+ u32 count;
+ u32 hcb_size;
+
+ printk(MPT2SAS_INFO_FMT "sending diag reset !!\n", ioc->name);
+ drsprintk(ioc, printk(MPT2SAS_INFO_FMT "clear interrupts\n",
+ ioc->name));
+
+ count = 0;
+ do {
+ /* Write magic sequence to WriteSequence register
+ * Loop until in diagnostic mode
+ */
+ drsprintk(ioc, printk(MPT2SAS_INFO_FMT "write magic "
+ "sequence\n", ioc->name));
+ writel(MPI2_WRSEQ_FLUSH_KEY_VALUE, &ioc->chip->WriteSequence);
+ writel(MPI2_WRSEQ_1ST_KEY_VALUE, &ioc->chip->WriteSequence);
+ writel(MPI2_WRSEQ_2ND_KEY_VALUE, &ioc->chip->WriteSequence);
+ writel(MPI2_WRSEQ_3RD_KEY_VALUE, &ioc->chip->WriteSequence);
+ writel(MPI2_WRSEQ_4TH_KEY_VALUE, &ioc->chip->WriteSequence);
+ writel(MPI2_WRSEQ_5TH_KEY_VALUE, &ioc->chip->WriteSequence);
+ writel(MPI2_WRSEQ_6TH_KEY_VALUE, &ioc->chip->WriteSequence);
+
+ /* wait 100 msec */
+ if (sleep_flag == CAN_SLEEP)
+ msleep(100);
+ else
+ mdelay(100);
+
+ if (count++ > 20)
+ goto out;
+
+ host_diagnostic = readl(&ioc->chip->HostDiagnostic);
+ drsprintk(ioc, printk(MPT2SAS_INFO_FMT "wrote magic "
+ "sequence: count(%d), host_diagnostic(0x%08x)\n",
+ ioc->name, count, host_diagnostic));
+
+ } while ((host_diagnostic & MPI2_DIAG_DIAG_WRITE_ENABLE) == 0);
+
+ hcb_size = readl(&ioc->chip->HCBSize);
+
+ drsprintk(ioc, printk(MPT2SAS_INFO_FMT "diag reset: issued\n",
+ ioc->name));
+ writel(host_diagnostic | MPI2_DIAG_RESET_ADAPTER,
+ &ioc->chip->HostDiagnostic);
+
+ /* This delay allows the chip PCIe hardware time to finish reset tasks*/
+ if (sleep_flag == CAN_SLEEP)
+ msleep(MPI2_HARD_RESET_PCIE_FIRST_READ_DELAY_MICRO_SEC/1000);
+ else
+ mdelay(MPI2_HARD_RESET_PCIE_FIRST_READ_DELAY_MICRO_SEC/1000);
+
+ /* Approximately 300 second max wait */
+ for (count = 0; count < (300000000 /
+ MPI2_HARD_RESET_PCIE_SECOND_READ_DELAY_MICRO_SEC); count++) {
+
+ host_diagnostic = readl(&ioc->chip->HostDiagnostic);
+
+ if (host_diagnostic == 0xFFFFFFFF)
+ goto out;
+ if (!(host_diagnostic & MPI2_DIAG_RESET_ADAPTER))
+ break;
+
+ /* Wait to pass the second read delay window */
+ if (sleep_flag == CAN_SLEEP)
+ msleep(MPI2_HARD_RESET_PCIE_SECOND_READ_DELAY_MICRO_SEC
+ /1000);
+ else
+ mdelay(MPI2_HARD_RESET_PCIE_SECOND_READ_DELAY_MICRO_SEC
+ /1000);
+ }
+
+ if (host_diagnostic & MPI2_DIAG_HCB_MODE) {
+
+ drsprintk(ioc, printk(MPT2SAS_INFO_FMT "restart the adapter "
+ "assuming the HCB Address points to good F/W\n",
+ ioc->name));
+ host_diagnostic &= ~MPI2_DIAG_BOOT_DEVICE_SELECT_MASK;
+ host_diagnostic |= MPI2_DIAG_BOOT_DEVICE_SELECT_HCDW;
+ writel(host_diagnostic, &ioc->chip->HostDiagnostic);
+
+ drsprintk(ioc, printk(MPT2SAS_INFO_FMT
+ "re-enable the HCDW\n", ioc->name));
+ writel(hcb_size | MPI2_HCB_SIZE_HCB_ENABLE,
+ &ioc->chip->HCBSize);
+ }
+
+ drsprintk(ioc, printk(MPT2SAS_INFO_FMT "restart the adapter\n",
+ ioc->name));
+ writel(host_diagnostic & ~MPI2_DIAG_HOLD_IOC_RESET,
+ &ioc->chip->HostDiagnostic);
+
+ drsprintk(ioc, printk(MPT2SAS_INFO_FMT "disable writes to the "
+ "diagnostic register\n", ioc->name));
+ writel(MPI2_WRSEQ_FLUSH_KEY_VALUE, &ioc->chip->WriteSequence);
+
+ drsprintk(ioc, printk(MPT2SAS_INFO_FMT "Wait for FW to go to the "
+ "READY state\n", ioc->name));
+ ioc_state = _base_wait_on_iocstate(ioc, MPI2_IOC_STATE_READY, 20,
+ sleep_flag);
+ if (ioc_state) {
+ printk(MPT2SAS_ERR_FMT "%s: failed going to ready state "
+ " (ioc_state=0x%x)\n", ioc->name, __func__, ioc_state);
+ goto out;
+ }
+
+ printk(MPT2SAS_INFO_FMT "diag reset: SUCCESS\n", ioc->name);
+ return 0;
+
+ out:
+ printk(MPT2SAS_ERR_FMT "diag reset: FAILED\n", ioc->name);
+ return -EFAULT;
+}
+
+/**
+ * _base_make_ioc_ready - put controller in READY state
+ * @ioc: per adapter object
+ * @sleep_flag: CAN_SLEEP or NO_SLEEP
+ * @type: FORCE_BIG_HAMMER or SOFT_RESET
+ *
+ * Returns 0 for success, non-zero for failure.
+ */
+static int
+_base_make_ioc_ready(struct MPT2SAS_ADAPTER *ioc, int sleep_flag,
+ enum reset_type type)
+{
+ u32 ioc_state;
+ int rc;
+
+ dinitprintk(ioc, printk(MPT2SAS_INFO_FMT "%s\n", ioc->name,
+ __func__));
+
+ if (ioc->pci_error_recovery)
+ return 0;
+
+ ioc_state = mpt2sas_base_get_iocstate(ioc, 0);
+ dhsprintk(ioc, printk(MPT2SAS_INFO_FMT "%s: ioc_state(0x%08x)\n",
+ ioc->name, __func__, ioc_state));
+
+ if ((ioc_state & MPI2_IOC_STATE_MASK) == MPI2_IOC_STATE_READY)
+ return 0;
+
+ if (ioc_state & MPI2_DOORBELL_USED) {
+ dhsprintk(ioc, printk(MPT2SAS_INFO_FMT "unexpected doorbell "
+ "active!\n", ioc->name));
+ goto issue_diag_reset;
+ }
+
+ if ((ioc_state & MPI2_IOC_STATE_MASK) == MPI2_IOC_STATE_FAULT) {
+ mpt2sas_base_fault_info(ioc, ioc_state &
+ MPI2_DOORBELL_DATA_MASK);
+ goto issue_diag_reset;
+ }
+
+ if (type == FORCE_BIG_HAMMER)
+ goto issue_diag_reset;
+
+ if ((ioc_state & MPI2_IOC_STATE_MASK) == MPI2_IOC_STATE_OPERATIONAL)
+ if (!(_base_send_ioc_reset(ioc,
+ MPI2_FUNCTION_IOC_MESSAGE_UNIT_RESET, 15, CAN_SLEEP))) {
+ ioc->ioc_reset_count++;
+ return 0;
+ }
+
+ issue_diag_reset:
+ rc = _base_diag_reset(ioc, CAN_SLEEP);
+ ioc->ioc_reset_count++;
+ return rc;
+}
+
+/**
+ * _base_make_ioc_operational - put controller in OPERATIONAL state
+ * @ioc: per adapter object
+ * @sleep_flag: CAN_SLEEP or NO_SLEEP
+ *
+ * Returns 0 for success, non-zero for failure.
+ */
+static int
+_base_make_ioc_operational(struct MPT2SAS_ADAPTER *ioc, int sleep_flag)
+{
+ int r, i;
+ unsigned long flags;
+ u32 reply_address;
+ u16 smid;
+ struct _tr_list *delayed_tr, *delayed_tr_next;
+ u8 hide_flag;
+ struct adapter_reply_queue *reply_q;
+ long reply_post_free;
+ u32 reply_post_free_sz, index = 0;
+
+ dinitprintk(ioc, printk(MPT2SAS_INFO_FMT "%s\n", ioc->name,
+ __func__));
+
+ /* clean the delayed target reset list */
+ list_for_each_entry_safe(delayed_tr, delayed_tr_next,
+ &ioc->delayed_tr_list, list) {
+ list_del(&delayed_tr->list);
+ kfree(delayed_tr);
+ }
+
+ list_for_each_entry_safe(delayed_tr, delayed_tr_next,
+ &ioc->delayed_tr_volume_list, list) {
+ list_del(&delayed_tr->list);
+ kfree(delayed_tr);
+ }
+
+ /* initialize the scsi lookup free list */
+ spin_lock_irqsave(&ioc->scsi_lookup_lock, flags);
+ INIT_LIST_HEAD(&ioc->free_list);
+ smid = 1;
+ for (i = 0; i < ioc->scsiio_depth; i++, smid++) {
+ INIT_LIST_HEAD(&ioc->scsi_lookup[i].chain_list);
+ ioc->scsi_lookup[i].cb_idx = 0xFF;
+ ioc->scsi_lookup[i].smid = smid;
+ ioc->scsi_lookup[i].scmd = NULL;
+ ioc->scsi_lookup[i].direct_io = 0;
+ list_add_tail(&ioc->scsi_lookup[i].tracker_list,
+ &ioc->free_list);
+ }
+
+ /* hi-priority queue */
+ INIT_LIST_HEAD(&ioc->hpr_free_list);
+ smid = ioc->hi_priority_smid;
+ for (i = 0; i < ioc->hi_priority_depth; i++, smid++) {
+ ioc->hpr_lookup[i].cb_idx = 0xFF;
+ ioc->hpr_lookup[i].smid = smid;
+ list_add_tail(&ioc->hpr_lookup[i].tracker_list,
+ &ioc->hpr_free_list);
+ }
+
+ /* internal queue */
+ INIT_LIST_HEAD(&ioc->internal_free_list);
+ smid = ioc->internal_smid;
+ for (i = 0; i < ioc->internal_depth; i++, smid++) {
+ ioc->internal_lookup[i].cb_idx = 0xFF;
+ ioc->internal_lookup[i].smid = smid;
+ list_add_tail(&ioc->internal_lookup[i].tracker_list,
+ &ioc->internal_free_list);
+ }
+
+ /* chain pool */
+ INIT_LIST_HEAD(&ioc->free_chain_list);
+ for (i = 0; i < ioc->chain_depth; i++)
+ list_add_tail(&ioc->chain_lookup[i].tracker_list,
+ &ioc->free_chain_list);
+
+ spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags);
+
+ /* initialize Reply Free Queue */
+ for (i = 0, reply_address = (u32)ioc->reply_dma ;
+ i < ioc->reply_free_queue_depth ; i++, reply_address +=
+ ioc->reply_sz)
+ ioc->reply_free[i] = cpu_to_le32(reply_address);
+
+ /* initialize reply queues */
+ if (ioc->is_driver_loading)
+ _base_assign_reply_queues(ioc);
+
+ /* initialize Reply Post Free Queue */
+ reply_post_free_sz = ioc->reply_post_queue_depth *
+ sizeof(Mpi2DefaultReplyDescriptor_t);
+ reply_post_free = (long)ioc->reply_post[index].reply_post_free;
+ list_for_each_entry(reply_q, &ioc->reply_queue_list, list) {
+ reply_q->reply_post_host_index = 0;
+ reply_q->reply_post_free = (Mpi2ReplyDescriptorsUnion_t *)
+ reply_post_free;
+ for (i = 0; i < ioc->reply_post_queue_depth; i++)
+ reply_q->reply_post_free[i].Words =
+ cpu_to_le64(ULLONG_MAX);
+ if (!_base_is_controller_msix_enabled(ioc))
+ goto skip_init_reply_post_free_queue;
+ /*
+ * If RDPQ is enabled, switch to the next allocation.
+ * Otherwise advance within the contiguous region.
+ */
+ if (ioc->rdpq_array_enable)
+ reply_post_free = (long)
+ ioc->reply_post[++index].reply_post_free;
+ else
+ reply_post_free += reply_post_free_sz;
+ }
+ skip_init_reply_post_free_queue:
+
+ r = _base_send_ioc_init(ioc, sleep_flag);
+ if (r)
+ return r;
+
+ /* initialize reply free host index */
+ ioc->reply_free_host_index = ioc->reply_free_queue_depth - 1;
+ writel(ioc->reply_free_host_index, &ioc->chip->ReplyFreeHostIndex);
+
+ /* initialize reply post host index */
+ list_for_each_entry(reply_q, &ioc->reply_queue_list, list) {
+ writel(reply_q->msix_index << MPI2_RPHI_MSIX_INDEX_SHIFT,
+ &ioc->chip->ReplyPostHostIndex);
+ if (!_base_is_controller_msix_enabled(ioc))
+ goto skip_init_reply_post_host_index;
+ }
+
+ skip_init_reply_post_host_index:
+
+ _base_unmask_interrupts(ioc);
+
+ r = _base_event_notification(ioc, sleep_flag);
+ if (r)
+ return r;
+
+ if (sleep_flag == CAN_SLEEP)
+ _base_static_config_pages(ioc);
+
+
+ if (ioc->is_driver_loading) {
+ if (ioc->is_warpdrive && ioc->manu_pg10.OEMIdentifier
+ == 0x80) {
+ hide_flag = (u8) (
+ le32_to_cpu(ioc->manu_pg10.OEMSpecificFlags0) &
+ MFG_PAGE10_HIDE_SSDS_MASK);
+ if (hide_flag != MFG_PAGE10_HIDE_SSDS_MASK)
+ ioc->mfg_pg10_hide_flag = hide_flag;
+ }
+ ioc->wait_for_discovery_to_complete =
+ _base_determine_wait_on_discovery(ioc);
+ return r; /* scan_start and scan_finished support */
+ }
+ r = _base_send_port_enable(ioc, sleep_flag);
+ if (r)
+ return r;
+
+ return r;
+}
+
+/**
+ * mpt2sas_base_free_resources - free resources controller resources (io/irq/memap)
+ * @ioc: per adapter object
+ *
+ * Return nothing.
+ */
+void
+mpt2sas_base_free_resources(struct MPT2SAS_ADAPTER *ioc)
+{
+ struct pci_dev *pdev = ioc->pdev;
+
+ dexitprintk(ioc, printk(MPT2SAS_INFO_FMT "%s\n", ioc->name,
+ __func__));
+
+ if (ioc->chip_phys && ioc->chip) {
+ _base_mask_interrupts(ioc);
+ ioc->shost_recovery = 1;
+ _base_make_ioc_ready(ioc, CAN_SLEEP, SOFT_RESET);
+ ioc->shost_recovery = 0;
+ }
+
+ _base_free_irq(ioc);
+ _base_disable_msix(ioc);
+
+ if (ioc->chip_phys && ioc->chip)
+ iounmap(ioc->chip);
+ ioc->chip_phys = 0;
+
+ if (pci_is_enabled(pdev)) {
+ pci_release_selected_regions(ioc->pdev, ioc->bars);
+ pci_disable_pcie_error_reporting(pdev);
+ pci_disable_device(pdev);
+ }
+ return;
+}
+
+/**
+ * mpt2sas_base_attach - attach controller instance
+ * @ioc: per adapter object
+ *
+ * Returns 0 for success, non-zero for failure.
+ */
+int
+mpt2sas_base_attach(struct MPT2SAS_ADAPTER *ioc)
+{
+ int r, i;
+ int cpu_id, last_cpu_id = 0;
+
+ dinitprintk(ioc, printk(MPT2SAS_INFO_FMT "%s\n", ioc->name,
+ __func__));
+
+ /* setup cpu_msix_table */
+ ioc->cpu_count = num_online_cpus();
+ for_each_online_cpu(cpu_id)
+ last_cpu_id = cpu_id;
+ ioc->cpu_msix_table_sz = last_cpu_id + 1;
+ ioc->cpu_msix_table = kzalloc(ioc->cpu_msix_table_sz, GFP_KERNEL);
+ ioc->reply_queue_count = 1;
+ if (!ioc->cpu_msix_table) {
+ dfailprintk(ioc, printk(MPT2SAS_INFO_FMT "allocation for "
+ "cpu_msix_table failed!!!\n", ioc->name));
+ r = -ENOMEM;
+ goto out_free_resources;
+ }
+
+ if (ioc->is_warpdrive) {
+ ioc->reply_post_host_index = kcalloc(ioc->cpu_msix_table_sz,
+ sizeof(resource_size_t *), GFP_KERNEL);
+ if (!ioc->reply_post_host_index) {
+ dfailprintk(ioc, printk(MPT2SAS_INFO_FMT "allocation "
+ "for cpu_msix_table failed!!!\n", ioc->name));
+ r = -ENOMEM;
+ goto out_free_resources;
+ }
+ }
+
+ ioc->rdpq_array_enable_assigned = 0;
+ ioc->dma_mask = 0;
+ r = mpt2sas_base_map_resources(ioc);
+ if (r)
+ goto out_free_resources;
+
+ if (ioc->is_warpdrive) {
+ ioc->reply_post_host_index[0] = (resource_size_t __iomem *)
+ &ioc->chip->ReplyPostHostIndex;
+
+ for (i = 1; i < ioc->cpu_msix_table_sz; i++)
+ ioc->reply_post_host_index[i] =
+ (resource_size_t __iomem *)
+ ((u8 __iomem *)&ioc->chip->Doorbell + (0x4000 + ((i - 1)
+ * 4)));
+ }
+
+ pci_set_drvdata(ioc->pdev, ioc->shost);
+ r = _base_get_ioc_facts(ioc, CAN_SLEEP);
+ if (r)
+ goto out_free_resources;
+
+ r = _base_make_ioc_ready(ioc, CAN_SLEEP, SOFT_RESET);
+ if (r)
+ goto out_free_resources;
+
+ ioc->pfacts = kcalloc(ioc->facts.NumberOfPorts,
+ sizeof(struct mpt2sas_port_facts), GFP_KERNEL);
+ if (!ioc->pfacts) {
+ r = -ENOMEM;
+ goto out_free_resources;
+ }
+
+ for (i = 0 ; i < ioc->facts.NumberOfPorts; i++) {
+ r = _base_get_port_facts(ioc, i, CAN_SLEEP);
+ if (r)
+ goto out_free_resources;
+ }
+
+ r = _base_allocate_memory_pools(ioc, CAN_SLEEP);
+ if (r)
+ goto out_free_resources;
+
+ init_waitqueue_head(&ioc->reset_wq);
+ /* allocate memory pd handle bitmask list */
+ ioc->pd_handles_sz = (ioc->facts.MaxDevHandle / 8);
+ if (ioc->facts.MaxDevHandle % 8)
+ ioc->pd_handles_sz++;
+ ioc->pd_handles = kzalloc(ioc->pd_handles_sz,
+ GFP_KERNEL);
+ if (!ioc->pd_handles) {
+ r = -ENOMEM;
+ goto out_free_resources;
+ }
+ ioc->blocking_handles = kzalloc(ioc->pd_handles_sz,
+ GFP_KERNEL);
+ if (!ioc->blocking_handles) {
+ r = -ENOMEM;
+ goto out_free_resources;
+ }
+ ioc->fwfault_debug = mpt2sas_fwfault_debug;
+
+ /* base internal command bits */
+ mutex_init(&ioc->base_cmds.mutex);
+ ioc->base_cmds.reply = kzalloc(ioc->reply_sz, GFP_KERNEL);
+ ioc->base_cmds.status = MPT2_CMD_NOT_USED;
+
+ /* port_enable command bits */
+ ioc->port_enable_cmds.reply = kzalloc(ioc->reply_sz, GFP_KERNEL);
+ ioc->port_enable_cmds.status = MPT2_CMD_NOT_USED;
+
+ /* transport internal command bits */
+ ioc->transport_cmds.reply = kzalloc(ioc->reply_sz, GFP_KERNEL);
+ ioc->transport_cmds.status = MPT2_CMD_NOT_USED;
+ mutex_init(&ioc->transport_cmds.mutex);
+
+ /* scsih internal command bits */
+ ioc->scsih_cmds.reply = kzalloc(ioc->reply_sz, GFP_KERNEL);
+ ioc->scsih_cmds.status = MPT2_CMD_NOT_USED;
+ mutex_init(&ioc->scsih_cmds.mutex);
+
+ /* task management internal command bits */
+ ioc->tm_cmds.reply = kzalloc(ioc->reply_sz, GFP_KERNEL);
+ ioc->tm_cmds.status = MPT2_CMD_NOT_USED;
+ mutex_init(&ioc->tm_cmds.mutex);
+
+ /* config page internal command bits */
+ ioc->config_cmds.reply = kzalloc(ioc->reply_sz, GFP_KERNEL);
+ ioc->config_cmds.status = MPT2_CMD_NOT_USED;
+ mutex_init(&ioc->config_cmds.mutex);
+
+ /* ctl module internal command bits */
+ ioc->ctl_cmds.reply = kzalloc(ioc->reply_sz, GFP_KERNEL);
+ ioc->ctl_cmds.sense = kzalloc(SCSI_SENSE_BUFFERSIZE, GFP_KERNEL);
+ ioc->ctl_cmds.status = MPT2_CMD_NOT_USED;
+ mutex_init(&ioc->ctl_cmds.mutex);
+
+ if (!ioc->base_cmds.reply || !ioc->transport_cmds.reply ||
+ !ioc->scsih_cmds.reply || !ioc->tm_cmds.reply ||
+ !ioc->config_cmds.reply || !ioc->ctl_cmds.reply ||
+ !ioc->ctl_cmds.sense) {
+ r = -ENOMEM;
+ goto out_free_resources;
+ }
+
+ if (!ioc->base_cmds.reply || !ioc->transport_cmds.reply ||
+ !ioc->scsih_cmds.reply || !ioc->tm_cmds.reply ||
+ !ioc->config_cmds.reply || !ioc->ctl_cmds.reply) {
+ r = -ENOMEM;
+ goto out_free_resources;
+ }
+
+ for (i = 0; i < MPI2_EVENT_NOTIFY_EVENTMASK_WORDS; i++)
+ ioc->event_masks[i] = -1;
+
+ /* here we enable the events we care about */
+ _base_unmask_events(ioc, MPI2_EVENT_SAS_DISCOVERY);
+ _base_unmask_events(ioc, MPI2_EVENT_SAS_BROADCAST_PRIMITIVE);
+ _base_unmask_events(ioc, MPI2_EVENT_SAS_TOPOLOGY_CHANGE_LIST);
+ _base_unmask_events(ioc, MPI2_EVENT_SAS_DEVICE_STATUS_CHANGE);
+ _base_unmask_events(ioc, MPI2_EVENT_SAS_ENCL_DEVICE_STATUS_CHANGE);
+ _base_unmask_events(ioc, MPI2_EVENT_IR_CONFIGURATION_CHANGE_LIST);
+ _base_unmask_events(ioc, MPI2_EVENT_IR_VOLUME);
+ _base_unmask_events(ioc, MPI2_EVENT_IR_PHYSICAL_DISK);
+ _base_unmask_events(ioc, MPI2_EVENT_IR_OPERATION_STATUS);
+ _base_unmask_events(ioc, MPI2_EVENT_LOG_ENTRY_ADDED);
+ _base_unmask_events(ioc, MPI2_EVENT_TEMP_THRESHOLD);
+ r = _base_make_ioc_operational(ioc, CAN_SLEEP);
+ if (r)
+ goto out_free_resources;
+
+ ioc->non_operational_loop = 0;
+
+ return 0;
+
+ out_free_resources:
+
+ ioc->remove_host = 1;
+ mpt2sas_base_free_resources(ioc);
+ _base_release_memory_pools(ioc);
+ pci_set_drvdata(ioc->pdev, NULL);
+ kfree(ioc->cpu_msix_table);
+ if (ioc->is_warpdrive)
+ kfree(ioc->reply_post_host_index);
+ kfree(ioc->pd_handles);
+ kfree(ioc->blocking_handles);
+ kfree(ioc->tm_cmds.reply);
+ kfree(ioc->transport_cmds.reply);
+ kfree(ioc->scsih_cmds.reply);
+ kfree(ioc->config_cmds.reply);
+ kfree(ioc->base_cmds.reply);
+ kfree(ioc->port_enable_cmds.reply);
+ kfree(ioc->ctl_cmds.reply);
+ kfree(ioc->ctl_cmds.sense);
+ kfree(ioc->pfacts);
+ ioc->ctl_cmds.reply = NULL;
+ ioc->base_cmds.reply = NULL;
+ ioc->tm_cmds.reply = NULL;
+ ioc->scsih_cmds.reply = NULL;
+ ioc->transport_cmds.reply = NULL;
+ ioc->config_cmds.reply = NULL;
+ ioc->pfacts = NULL;
+ return r;
+}
+
+
+/**
+ * mpt2sas_base_detach - remove controller instance
+ * @ioc: per adapter object
+ *
+ * Return nothing.
+ */
+void
+mpt2sas_base_detach(struct MPT2SAS_ADAPTER *ioc)
+{
+
+ dexitprintk(ioc, printk(MPT2SAS_INFO_FMT "%s\n", ioc->name,
+ __func__));
+
+ mpt2sas_base_stop_watchdog(ioc);
+ mpt2sas_base_free_resources(ioc);
+ _base_release_memory_pools(ioc);
+ pci_set_drvdata(ioc->pdev, NULL);
+ kfree(ioc->cpu_msix_table);
+ if (ioc->is_warpdrive)
+ kfree(ioc->reply_post_host_index);
+ kfree(ioc->pd_handles);
+ kfree(ioc->blocking_handles);
+ kfree(ioc->pfacts);
+ kfree(ioc->ctl_cmds.reply);
+ kfree(ioc->ctl_cmds.sense);
+ kfree(ioc->base_cmds.reply);
+ kfree(ioc->port_enable_cmds.reply);
+ kfree(ioc->tm_cmds.reply);
+ kfree(ioc->transport_cmds.reply);
+ kfree(ioc->scsih_cmds.reply);
+ kfree(ioc->config_cmds.reply);
+}
+
+/**
+ * _base_reset_handler - reset callback handler (for base)
+ * @ioc: per adapter object
+ * @reset_phase: phase
+ *
+ * The handler for doing any required cleanup or initialization.
+ *
+ * The reset phase can be MPT2_IOC_PRE_RESET, MPT2_IOC_AFTER_RESET,
+ * MPT2_IOC_DONE_RESET
+ *
+ * Return nothing.
+ */
+static void
+_base_reset_handler(struct MPT2SAS_ADAPTER *ioc, int reset_phase)
+{
+ mpt2sas_scsih_reset_handler(ioc, reset_phase);
+ mpt2sas_ctl_reset_handler(ioc, reset_phase);
+ switch (reset_phase) {
+ case MPT2_IOC_PRE_RESET:
+ dtmprintk(ioc, printk(MPT2SAS_INFO_FMT "%s: "
+ "MPT2_IOC_PRE_RESET\n", ioc->name, __func__));
+ break;
+ case MPT2_IOC_AFTER_RESET:
+ dtmprintk(ioc, printk(MPT2SAS_INFO_FMT "%s: "
+ "MPT2_IOC_AFTER_RESET\n", ioc->name, __func__));
+ if (ioc->transport_cmds.status & MPT2_CMD_PENDING) {
+ ioc->transport_cmds.status |= MPT2_CMD_RESET;
+ mpt2sas_base_free_smid(ioc, ioc->transport_cmds.smid);
+ complete(&ioc->transport_cmds.done);
+ }
+ if (ioc->base_cmds.status & MPT2_CMD_PENDING) {
+ ioc->base_cmds.status |= MPT2_CMD_RESET;
+ mpt2sas_base_free_smid(ioc, ioc->base_cmds.smid);
+ complete(&ioc->base_cmds.done);
+ }
+ if (ioc->port_enable_cmds.status & MPT2_CMD_PENDING) {
+ ioc->port_enable_failed = 1;
+ ioc->port_enable_cmds.status |= MPT2_CMD_RESET;
+ mpt2sas_base_free_smid(ioc, ioc->port_enable_cmds.smid);
+ if (ioc->is_driver_loading) {
+ ioc->start_scan_failed =
+ MPI2_IOCSTATUS_INTERNAL_ERROR;
+ ioc->start_scan = 0;
+ ioc->port_enable_cmds.status =
+ MPT2_CMD_NOT_USED;
+ } else
+ complete(&ioc->port_enable_cmds.done);
+
+ }
+ if (ioc->config_cmds.status & MPT2_CMD_PENDING) {
+ ioc->config_cmds.status |= MPT2_CMD_RESET;
+ mpt2sas_base_free_smid(ioc, ioc->config_cmds.smid);
+ ioc->config_cmds.smid = USHRT_MAX;
+ complete(&ioc->config_cmds.done);
+ }
+ break;
+ case MPT2_IOC_DONE_RESET:
+ dtmprintk(ioc, printk(MPT2SAS_INFO_FMT "%s: "
+ "MPT2_IOC_DONE_RESET\n", ioc->name, __func__));
+ break;
+ }
+}
+
+/**
+ * _wait_for_commands_to_complete - reset controller
+ * @ioc: Pointer to MPT_ADAPTER structure
+ * @sleep_flag: CAN_SLEEP or NO_SLEEP
+ *
+ * This function waiting(3s) for all pending commands to complete
+ * prior to putting controller in reset.
+ */
+static void
+_wait_for_commands_to_complete(struct MPT2SAS_ADAPTER *ioc, int sleep_flag)
+{
+ u32 ioc_state;
+ unsigned long flags;
+ u16 i;
+
+ ioc->pending_io_count = 0;
+ if (sleep_flag != CAN_SLEEP)
+ return;
+
+ ioc_state = mpt2sas_base_get_iocstate(ioc, 0);
+ if ((ioc_state & MPI2_IOC_STATE_MASK) != MPI2_IOC_STATE_OPERATIONAL)
+ return;
+
+ /* pending command count */
+ spin_lock_irqsave(&ioc->scsi_lookup_lock, flags);
+ for (i = 0; i < ioc->scsiio_depth; i++)
+ if (ioc->scsi_lookup[i].cb_idx != 0xFF)
+ ioc->pending_io_count++;
+ spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags);
+
+ if (!ioc->pending_io_count)
+ return;
+
+ /* wait for pending commands to complete */
+ wait_event_timeout(ioc->reset_wq, ioc->pending_io_count == 0, 10 * HZ);
+}
+
+/**
+ * mpt2sas_base_hard_reset_handler - reset controller
+ * @ioc: Pointer to MPT_ADAPTER structure
+ * @sleep_flag: CAN_SLEEP or NO_SLEEP
+ * @type: FORCE_BIG_HAMMER or SOFT_RESET
+ *
+ * Returns 0 for success, non-zero for failure.
+ */
+int
+mpt2sas_base_hard_reset_handler(struct MPT2SAS_ADAPTER *ioc, int sleep_flag,
+ enum reset_type type)
+{
+ int r;
+ unsigned long flags;
+
+ dtmprintk(ioc, printk(MPT2SAS_INFO_FMT "%s: enter\n", ioc->name,
+ __func__));
+
+ if (ioc->pci_error_recovery) {
+ printk(MPT2SAS_ERR_FMT "%s: pci error recovery reset\n",
+ ioc->name, __func__);
+ r = 0;
+ goto out_unlocked;
+ }
+
+ if (mpt2sas_fwfault_debug)
+ mpt2sas_halt_firmware(ioc);
+
+ /* TODO - What we really should be doing is pulling
+ * out all the code associated with NO_SLEEP; its never used.
+ * That is legacy code from mpt fusion driver, ported over.
+ * I will leave this BUG_ON here for now till its been resolved.
+ */
+ BUG_ON(sleep_flag == NO_SLEEP);
+
+ /* wait for an active reset in progress to complete */
+ if (!mutex_trylock(&ioc->reset_in_progress_mutex)) {
+ do {
+ ssleep(1);
+ } while (ioc->shost_recovery == 1);
+ dtmprintk(ioc, printk(MPT2SAS_INFO_FMT "%s: exit\n", ioc->name,
+ __func__));
+ return ioc->ioc_reset_in_progress_status;
+ }
+
+ spin_lock_irqsave(&ioc->ioc_reset_in_progress_lock, flags);
+ ioc->shost_recovery = 1;
+ spin_unlock_irqrestore(&ioc->ioc_reset_in_progress_lock, flags);
+
+ _base_reset_handler(ioc, MPT2_IOC_PRE_RESET);
+ _wait_for_commands_to_complete(ioc, sleep_flag);
+ _base_mask_interrupts(ioc);
+ r = _base_make_ioc_ready(ioc, sleep_flag, type);
+ if (r)
+ goto out;
+ _base_reset_handler(ioc, MPT2_IOC_AFTER_RESET);
+
+ /* If this hard reset is called while port enable is active, then
+ * there is no reason to call make_ioc_operational
+ */
+ if (ioc->is_driver_loading && ioc->port_enable_failed) {
+ ioc->remove_host = 1;
+ r = -EFAULT;
+ goto out;
+ }
+
+ r = _base_get_ioc_facts(ioc, CAN_SLEEP);
+ if (r)
+ goto out;
+
+ if (ioc->rdpq_array_enable && !ioc->rdpq_array_capable)
+ panic("%s: Issue occurred with flashing controller firmware."
+ "Please reboot the system and ensure that the correct"
+ " firmware version is running\n", ioc->name);
+
+ r = _base_make_ioc_operational(ioc, sleep_flag);
+ if (!r)
+ _base_reset_handler(ioc, MPT2_IOC_DONE_RESET);
+ out:
+ dtmprintk(ioc, printk(MPT2SAS_INFO_FMT "%s: %s\n",
+ ioc->name, __func__, ((r == 0) ? "SUCCESS" : "FAILED")));
+
+ spin_lock_irqsave(&ioc->ioc_reset_in_progress_lock, flags);
+ ioc->ioc_reset_in_progress_status = r;
+ ioc->shost_recovery = 0;
+ spin_unlock_irqrestore(&ioc->ioc_reset_in_progress_lock, flags);
+ mutex_unlock(&ioc->reset_in_progress_mutex);
+
+ out_unlocked:
+ dtmprintk(ioc, printk(MPT2SAS_INFO_FMT "%s: exit\n", ioc->name,
+ __func__));
+ return r;
+}
diff --git a/drivers/scsi/mpt2sas/mpt2sas_base.h b/drivers/scsi/mpt2sas/mpt2sas_base.h
new file mode 100644
index 000000000..caff8d10c
--- /dev/null
+++ b/drivers/scsi/mpt2sas/mpt2sas_base.h
@@ -0,0 +1,1198 @@
+/*
+ * This is the Fusion MPT base driver providing common API layer interface
+ * for access to MPT (Message Passing Technology) firmware.
+ *
+ * This code is based on drivers/scsi/mpt2sas/mpt2_base.h
+ * Copyright (C) 2007-2014 LSI Corporation
+ * Copyright (C) 20013-2014 Avago Technologies
+ * (mailto: MPT-FusionLinux.pdl@avagotech.com)
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * NO WARRANTY
+ * THE PROGRAM IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OR
+ * CONDITIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED INCLUDING, WITHOUT
+ * LIMITATION, ANY WARRANTIES OR CONDITIONS OF TITLE, NON-INFRINGEMENT,
+ * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE. Each Recipient is
+ * solely responsible for determining the appropriateness of using and
+ * distributing the Program and assumes all risks associated with its
+ * exercise of rights under this Agreement, including but not limited to
+ * the risks and costs of program errors, damage to or loss of data,
+ * programs or equipment, and unavailability or interruption of operations.
+
+ * DISCLAIMER OF LIABILITY
+ * NEITHER RECIPIENT NOR ANY CONTRIBUTORS SHALL HAVE ANY LIABILITY FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING WITHOUT LIMITATION LOST PROFITS), HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR
+ * TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
+ * USE OR DISTRIBUTION OF THE PROGRAM OR THE EXERCISE OF ANY RIGHTS GRANTED
+ * HEREUNDER, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGES
+
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301,
+ * USA.
+ */
+
+#ifndef MPT2SAS_BASE_H_INCLUDED
+#define MPT2SAS_BASE_H_INCLUDED
+
+#include "mpi/mpi2_type.h"
+#include "mpi/mpi2.h"
+#include "mpi/mpi2_ioc.h"
+#include "mpi/mpi2_cnfg.h"
+#include "mpi/mpi2_init.h"
+#include "mpi/mpi2_raid.h"
+#include "mpi/mpi2_tool.h"
+#include "mpi/mpi2_sas.h"
+
+#include <scsi/scsi.h>
+#include <scsi/scsi_cmnd.h>
+#include <scsi/scsi_device.h>
+#include <scsi/scsi_host.h>
+#include <scsi/scsi_tcq.h>
+#include <scsi/scsi_transport_sas.h>
+#include <scsi/scsi_dbg.h>
+#include <scsi/scsi_eh.h>
+
+#include "mpt2sas_debug.h"
+
+/* driver versioning info */
+#define MPT2SAS_DRIVER_NAME "mpt2sas"
+#define MPT2SAS_AUTHOR "Avago Technologies <MPT-FusionLinux.pdl@avagotech.com>"
+#define MPT2SAS_DESCRIPTION "LSI MPT Fusion SAS 2.0 Device Driver"
+#define MPT2SAS_DRIVER_VERSION "20.100.00.00"
+#define MPT2SAS_MAJOR_VERSION 20
+#define MPT2SAS_MINOR_VERSION 100
+#define MPT2SAS_BUILD_VERSION 00
+#define MPT2SAS_RELEASE_VERSION 00
+
+/*
+ * Set MPT2SAS_SG_DEPTH value based on user input.
+ */
+#ifdef CONFIG_SCSI_MPT2SAS_MAX_SGE
+#if CONFIG_SCSI_MPT2SAS_MAX_SGE < 16
+#define MPT2SAS_SG_DEPTH 16
+#elif CONFIG_SCSI_MPT2SAS_MAX_SGE > 128
+#define MPT2SAS_SG_DEPTH 128
+#else
+#define MPT2SAS_SG_DEPTH CONFIG_SCSI_MPT2SAS_MAX_SGE
+#endif
+#else
+#define MPT2SAS_SG_DEPTH 128 /* MAX_HW_SEGMENTS */
+#endif
+
+
+/*
+ * Generic Defines
+ */
+#define MPT2SAS_SATA_QUEUE_DEPTH 32
+#define MPT2SAS_SAS_QUEUE_DEPTH 254
+#define MPT2SAS_RAID_QUEUE_DEPTH 128
+
+#define MPT_NAME_LENGTH 32 /* generic length of strings */
+#define MPT_STRING_LENGTH 64
+
+#define MPT_MAX_CALLBACKS 16
+
+
+#define CAN_SLEEP 1
+#define NO_SLEEP 0
+
+#define INTERNAL_CMDS_COUNT 10 /* reserved cmds */
+
+#define MPI2_HIM_MASK 0xFFFFFFFF /* mask every bit*/
+
+#define MPT2SAS_INVALID_DEVICE_HANDLE 0xFFFF
+
+
+/*
+ * reset phases
+ */
+#define MPT2_IOC_PRE_RESET 1 /* prior to host reset */
+#define MPT2_IOC_AFTER_RESET 2 /* just after host reset */
+#define MPT2_IOC_DONE_RESET 3 /* links re-initialized */
+
+/*
+ * logging format
+ */
+#define MPT2SAS_FMT "%s: "
+#define MPT2SAS_INFO_FMT KERN_INFO MPT2SAS_FMT
+#define MPT2SAS_NOTE_FMT KERN_NOTICE MPT2SAS_FMT
+#define MPT2SAS_WARN_FMT KERN_WARNING MPT2SAS_FMT
+#define MPT2SAS_ERR_FMT KERN_ERR MPT2SAS_FMT
+
+/*
+ * Dell HBA branding
+ */
+#define MPT2SAS_DELL_BRANDING_SIZE 32
+
+#define MPT2SAS_DELL_6GBPS_SAS_HBA_BRANDING "Dell 6Gbps SAS HBA"
+#define MPT2SAS_DELL_PERC_H200_ADAPTER_BRANDING "Dell PERC H200 Adapter"
+#define MPT2SAS_DELL_PERC_H200_INTEGRATED_BRANDING "Dell PERC H200 Integrated"
+#define MPT2SAS_DELL_PERC_H200_MODULAR_BRANDING "Dell PERC H200 Modular"
+#define MPT2SAS_DELL_PERC_H200_EMBEDDED_BRANDING "Dell PERC H200 Embedded"
+#define MPT2SAS_DELL_PERC_H200_BRANDING "Dell PERC H200"
+#define MPT2SAS_DELL_6GBPS_SAS_BRANDING "Dell 6Gbps SAS"
+
+/*
+ * Dell HBA SSDIDs
+ */
+#define MPT2SAS_DELL_6GBPS_SAS_HBA_SSDID 0x1F1C
+#define MPT2SAS_DELL_PERC_H200_ADAPTER_SSDID 0x1F1D
+#define MPT2SAS_DELL_PERC_H200_INTEGRATED_SSDID 0x1F1E
+#define MPT2SAS_DELL_PERC_H200_MODULAR_SSDID 0x1F1F
+#define MPT2SAS_DELL_PERC_H200_EMBEDDED_SSDID 0x1F20
+#define MPT2SAS_DELL_PERC_H200_SSDID 0x1F21
+#define MPT2SAS_DELL_6GBPS_SAS_SSDID 0x1F22
+
+/*
+ * Intel HBA branding
+ */
+#define MPT2SAS_INTEL_RMS25JB080_BRANDING \
+ "Intel(R) Integrated RAID Module RMS25JB080"
+#define MPT2SAS_INTEL_RMS25JB040_BRANDING \
+ "Intel(R) Integrated RAID Module RMS25JB040"
+#define MPT2SAS_INTEL_RMS25KB080_BRANDING \
+ "Intel(R) Integrated RAID Module RMS25KB080"
+#define MPT2SAS_INTEL_RMS25KB040_BRANDING \
+ "Intel(R) Integrated RAID Module RMS25KB040"
+#define MPT2SAS_INTEL_RMS25LB040_BRANDING \
+ "Intel(R) Integrated RAID Module RMS25LB040"
+#define MPT2SAS_INTEL_RMS25LB080_BRANDING \
+ "Intel(R) Integrated RAID Module RMS25LB080"
+#define MPT2SAS_INTEL_RMS2LL080_BRANDING \
+ "Intel Integrated RAID Module RMS2LL080"
+#define MPT2SAS_INTEL_RMS2LL040_BRANDING \
+ "Intel Integrated RAID Module RMS2LL040"
+#define MPT2SAS_INTEL_RS25GB008_BRANDING \
+ "Intel(R) RAID Controller RS25GB008"
+#define MPT2SAS_INTEL_SSD910_BRANDING \
+ "Intel(R) SSD 910 Series"
+/*
+ * Intel HBA SSDIDs
+ */
+#define MPT2SAS_INTEL_RMS25JB080_SSDID 0x3516
+#define MPT2SAS_INTEL_RMS25JB040_SSDID 0x3517
+#define MPT2SAS_INTEL_RMS25KB080_SSDID 0x3518
+#define MPT2SAS_INTEL_RMS25KB040_SSDID 0x3519
+#define MPT2SAS_INTEL_RMS25LB040_SSDID 0x351A
+#define MPT2SAS_INTEL_RMS25LB080_SSDID 0x351B
+#define MPT2SAS_INTEL_RMS2LL080_SSDID 0x350E
+#define MPT2SAS_INTEL_RMS2LL040_SSDID 0x350F
+#define MPT2SAS_INTEL_RS25GB008_SSDID 0x3000
+#define MPT2SAS_INTEL_SSD910_SSDID 0x3700
+
+/*
+ * HP HBA branding
+ */
+#define MPT2SAS_HP_3PAR_SSVID 0x1590
+#define MPT2SAS_HP_2_4_INTERNAL_BRANDING "HP H220 Host Bus Adapter"
+#define MPT2SAS_HP_2_4_EXTERNAL_BRANDING "HP H221 Host Bus Adapter"
+#define MPT2SAS_HP_1_4_INTERNAL_1_4_EXTERNAL_BRANDING "HP H222 Host Bus Adapter"
+#define MPT2SAS_HP_EMBEDDED_2_4_INTERNAL_BRANDING "HP H220i Host Bus Adapter"
+#define MPT2SAS_HP_DAUGHTER_2_4_INTERNAL_BRANDING "HP H210i Host Bus Adapter"
+
+/*
+ * HO HBA SSDIDs
+ */
+#define MPT2SAS_HP_2_4_INTERNAL_SSDID 0x0041
+#define MPT2SAS_HP_2_4_EXTERNAL_SSDID 0x0042
+#define MPT2SAS_HP_1_4_INTERNAL_1_4_EXTERNAL_SSDID 0x0043
+#define MPT2SAS_HP_EMBEDDED_2_4_INTERNAL_SSDID 0x0044
+#define MPT2SAS_HP_DAUGHTER_2_4_INTERNAL_SSDID 0x0046
+
+/*
+ * WarpDrive Specific Log codes
+ */
+
+#define MPT2_WARPDRIVE_LOGENTRY (0x8002)
+#define MPT2_WARPDRIVE_LC_SSDT (0x41)
+#define MPT2_WARPDRIVE_LC_SSDLW (0x43)
+#define MPT2_WARPDRIVE_LC_SSDLF (0x44)
+#define MPT2_WARPDRIVE_LC_BRMF (0x4D)
+
+/*
+ * per target private data
+ */
+#define MPT_TARGET_FLAGS_RAID_COMPONENT 0x01
+#define MPT_TARGET_FLAGS_VOLUME 0x02
+#define MPT_TARGET_FLAGS_DELETED 0x04
+
+/**
+ * struct MPT2SAS_TARGET - starget private hostdata
+ * @starget: starget object
+ * @sas_address: target sas address
+ * @raid_device: raid_device pointer to access volume data
+ * @handle: device handle
+ * @num_luns: number luns
+ * @flags: MPT_TARGET_FLAGS_XXX flags
+ * @deleted: target flaged for deletion
+ * @tm_busy: target is busy with TM request.
+ */
+struct MPT2SAS_TARGET {
+ struct scsi_target *starget;
+ u64 sas_address;
+ struct _raid_device *raid_device;
+ u16 handle;
+ int num_luns;
+ u32 flags;
+ u8 deleted;
+ u8 tm_busy;
+};
+
+
+/*
+ * per device private data
+ */
+#define MPT_DEVICE_FLAGS_INIT 0x01
+#define MPT_DEVICE_TLR_ON 0x02
+
+/**
+ * struct MPT2SAS_DEVICE - sdev private hostdata
+ * @sas_target: starget private hostdata
+ * @lun: lun number
+ * @flags: MPT_DEVICE_XXX flags
+ * @configured_lun: lun is configured
+ * @block: device is in SDEV_BLOCK state
+ * @tlr_snoop_check: flag used in determining whether to disable TLR
+ */
+
+/* OEM Identifiers */
+#define MFG10_OEM_ID_INVALID (0x00000000)
+#define MFG10_OEM_ID_DELL (0x00000001)
+#define MFG10_OEM_ID_FSC (0x00000002)
+#define MFG10_OEM_ID_SUN (0x00000003)
+#define MFG10_OEM_ID_IBM (0x00000004)
+
+/* GENERIC Flags 0*/
+#define MFG10_GF0_OCE_DISABLED (0x00000001)
+#define MFG10_GF0_R1E_DRIVE_COUNT (0x00000002)
+#define MFG10_GF0_R10_DISPLAY (0x00000004)
+#define MFG10_GF0_SSD_DATA_SCRUB_DISABLE (0x00000008)
+#define MFG10_GF0_SINGLE_DRIVE_R0 (0x00000010)
+
+/* OEM Specific Flags will come from OEM specific header files */
+typedef struct _MPI2_CONFIG_PAGE_MAN_10 {
+ MPI2_CONFIG_PAGE_HEADER Header; /* 00h */
+ U8 OEMIdentifier; /* 04h */
+ U8 Reserved1; /* 05h */
+ U16 Reserved2; /* 08h */
+ U32 Reserved3; /* 0Ch */
+ U32 GenericFlags0; /* 10h */
+ U32 GenericFlags1; /* 14h */
+ U32 Reserved4; /* 18h */
+ U32 OEMSpecificFlags0; /* 1Ch */
+ U32 OEMSpecificFlags1; /* 20h */
+ U32 Reserved5[18]; /* 24h-60h*/
+} MPI2_CONFIG_PAGE_MAN_10,
+ MPI2_POINTER PTR_MPI2_CONFIG_PAGE_MAN_10,
+ Mpi2ManufacturingPage10_t, MPI2_POINTER pMpi2ManufacturingPage10_t;
+
+#define MFG_PAGE10_HIDE_SSDS_MASK (0x00000003)
+#define MFG_PAGE10_HIDE_ALL_DISKS (0x00)
+#define MFG_PAGE10_EXPOSE_ALL_DISKS (0x01)
+#define MFG_PAGE10_HIDE_IF_VOL_PRESENT (0x02)
+
+
+struct MPT2SAS_DEVICE {
+ struct MPT2SAS_TARGET *sas_target;
+ unsigned int lun;
+ u32 flags;
+ u8 configured_lun;
+ u8 block;
+ u8 tlr_snoop_check;
+};
+
+#define MPT2_CMD_NOT_USED 0x8000 /* free */
+#define MPT2_CMD_COMPLETE 0x0001 /* completed */
+#define MPT2_CMD_PENDING 0x0002 /* pending */
+#define MPT2_CMD_REPLY_VALID 0x0004 /* reply is valid */
+#define MPT2_CMD_RESET 0x0008 /* host reset dropped the command */
+
+/**
+ * struct _internal_cmd - internal commands struct
+ * @mutex: mutex
+ * @done: completion
+ * @reply: reply message pointer
+ * @sense: sense data
+ * @status: MPT2_CMD_XXX status
+ * @smid: system message id
+ */
+struct _internal_cmd {
+ struct mutex mutex;
+ struct completion done;
+ void *reply;
+ void *sense;
+ u16 status;
+ u16 smid;
+};
+
+
+/**
+ * struct _sas_device - attached device information
+ * @list: sas device list
+ * @starget: starget object
+ * @sas_address: device sas address
+ * @device_name: retrieved from the SAS IDENTIFY frame.
+ * @handle: device handle
+ * @sas_address_parent: sas address of parent expander or sas host
+ * @enclosure_handle: enclosure handle
+ * @enclosure_logical_id: enclosure logical identifier
+ * @volume_handle: volume handle (valid when hidden raid member)
+ * @volume_wwid: volume unique identifier
+ * @device_info: bitfield provides detailed info about the device
+ * @id: target id
+ * @channel: target channel
+ * @slot: number number
+ * @phy: phy identifier provided in sas device page 0
+ * @responding: used in _scsih_sas_device_mark_responding
+ * @pfa_led_on: flag for PFA LED status
+ */
+struct _sas_device {
+ struct list_head list;
+ struct scsi_target *starget;
+ u64 sas_address;
+ u64 device_name;
+ u16 handle;
+ u64 sas_address_parent;
+ u16 enclosure_handle;
+ u64 enclosure_logical_id;
+ u16 volume_handle;
+ u64 volume_wwid;
+ u32 device_info;
+ int id;
+ int channel;
+ u16 slot;
+ u8 phy;
+ u8 responding;
+ u8 pfa_led_on;
+};
+
+/**
+ * struct _raid_device - raid volume link list
+ * @list: sas device list
+ * @starget: starget object
+ * @sdev: scsi device struct (volumes are single lun)
+ * @wwid: unique identifier for the volume
+ * @handle: device handle
+ * @block_size: Block size of the volume
+ * @id: target id
+ * @channel: target channel
+ * @volume_type: the raid level
+ * @device_info: bitfield provides detailed info about the hidden components
+ * @num_pds: number of hidden raid components
+ * @responding: used in _scsih_raid_device_mark_responding
+ * @percent_complete: resync percent complete
+ * @direct_io_enabled: Whether direct io to PDs are allowed or not
+ * @stripe_exponent: X where 2powX is the stripe sz in blocks
+ * @block_exponent: X where 2powX is the block sz in bytes
+ * @max_lba: Maximum number of LBA in the volume
+ * @stripe_sz: Stripe Size of the volume
+ * @device_info: Device info of the volume member disk
+ * @pd_handle: Array of handles of the physical drives for direct I/O in le16
+ */
+#define MPT_MAX_WARPDRIVE_PDS 8
+struct _raid_device {
+ struct list_head list;
+ struct scsi_target *starget;
+ struct scsi_device *sdev;
+ u64 wwid;
+ u16 handle;
+ u16 block_sz;
+ int id;
+ int channel;
+ u8 volume_type;
+ u8 num_pds;
+ u8 responding;
+ u8 percent_complete;
+ u8 direct_io_enabled;
+ u8 stripe_exponent;
+ u8 block_exponent;
+ u64 max_lba;
+ u32 stripe_sz;
+ u32 device_info;
+ u16 pd_handle[MPT_MAX_WARPDRIVE_PDS];
+};
+
+/**
+ * struct _boot_device - boot device info
+ * @is_raid: flag to indicate whether this is volume
+ * @device: holds pointer for either struct _sas_device or
+ * struct _raid_device
+ */
+struct _boot_device {
+ u8 is_raid;
+ void *device;
+};
+
+/**
+ * struct _sas_port - wide/narrow sas port information
+ * @port_list: list of ports belonging to expander
+ * @num_phys: number of phys belonging to this port
+ * @remote_identify: attached device identification
+ * @rphy: sas transport rphy object
+ * @port: sas transport wide/narrow port object
+ * @phy_list: _sas_phy list objects belonging to this port
+ */
+struct _sas_port {
+ struct list_head port_list;
+ u8 num_phys;
+ struct sas_identify remote_identify;
+ struct sas_rphy *rphy;
+ struct sas_port *port;
+ struct list_head phy_list;
+};
+
+/**
+ * struct _sas_phy - phy information
+ * @port_siblings: list of phys belonging to a port
+ * @identify: phy identification
+ * @remote_identify: attached device identification
+ * @phy: sas transport phy object
+ * @phy_id: unique phy id
+ * @handle: device handle for this phy
+ * @attached_handle: device handle for attached device
+ * @phy_belongs_to_port: port has been created for this phy
+ */
+struct _sas_phy {
+ struct list_head port_siblings;
+ struct sas_identify identify;
+ struct sas_identify remote_identify;
+ struct sas_phy *phy;
+ u8 phy_id;
+ u16 handle;
+ u16 attached_handle;
+ u8 phy_belongs_to_port;
+};
+
+/**
+ * struct _sas_node - sas_host/expander information
+ * @list: list of expanders
+ * @parent_dev: parent device class
+ * @num_phys: number phys belonging to this sas_host/expander
+ * @sas_address: sas address of this sas_host/expander
+ * @handle: handle for this sas_host/expander
+ * @sas_address_parent: sas address of parent expander or sas host
+ * @enclosure_handle: handle for this a member of an enclosure
+ * @device_info: bitwise defining capabilities of this sas_host/expander
+ * @responding: used in _scsih_expander_device_mark_responding
+ * @phy: a list of phys that make up this sas_host/expander
+ * @sas_port_list: list of ports attached to this sas_host/expander
+ */
+struct _sas_node {
+ struct list_head list;
+ struct device *parent_dev;
+ u8 num_phys;
+ u64 sas_address;
+ u16 handle;
+ u64 sas_address_parent;
+ u16 enclosure_handle;
+ u64 enclosure_logical_id;
+ u8 responding;
+ struct _sas_phy *phy;
+ struct list_head sas_port_list;
+};
+
+/**
+ * enum reset_type - reset state
+ * @FORCE_BIG_HAMMER: issue diagnostic reset
+ * @SOFT_RESET: issue message_unit_reset, if fails to to big hammer
+ */
+enum reset_type {
+ FORCE_BIG_HAMMER,
+ SOFT_RESET,
+};
+
+/**
+ * struct chain_tracker - firmware chain tracker
+ * @chain_buffer: chain buffer
+ * @chain_buffer_dma: physical address
+ * @tracker_list: list of free request (ioc->free_chain_list)
+ */
+struct chain_tracker {
+ void *chain_buffer;
+ dma_addr_t chain_buffer_dma;
+ struct list_head tracker_list;
+};
+
+/**
+ * struct scsiio_tracker - scsi mf request tracker
+ * @smid: system message id
+ * @scmd: scsi request pointer
+ * @cb_idx: callback index
+ * @direct_io: To indicate whether I/O is direct (WARPDRIVE)
+ * @chain_list: list of chains associated to this IO
+ * @tracker_list: list of free request (ioc->free_list)
+ */
+struct scsiio_tracker {
+ u16 smid;
+ struct scsi_cmnd *scmd;
+ u8 cb_idx;
+ u8 direct_io;
+ struct list_head chain_list;
+ struct list_head tracker_list;
+};
+
+/**
+ * struct request_tracker - firmware request tracker
+ * @smid: system message id
+ * @cb_idx: callback index
+ * @tracker_list: list of free request (ioc->free_list)
+ */
+struct request_tracker {
+ u16 smid;
+ u8 cb_idx;
+ struct list_head tracker_list;
+};
+
+/**
+ * struct _tr_list - target reset list
+ * @handle: device handle
+ * @state: state machine
+ */
+struct _tr_list {
+ struct list_head list;
+ u16 handle;
+ u16 state;
+};
+
+typedef void (*MPT_ADD_SGE)(void *paddr, u32 flags_length, dma_addr_t dma_addr);
+
+/**
+ * struct adapter_reply_queue - the reply queue struct
+ * @ioc: per adapter object
+ * @msix_index: msix index into vector table
+ * @vector: irq vector
+ * @reply_post_host_index: head index in the pool where FW completes IO
+ * @reply_post_free: reply post base virt address
+ * @name: the name registered to request_irq()
+ * @busy: isr is actively processing replies on another cpu
+ * @list: this list
+*/
+struct adapter_reply_queue {
+ struct MPT2SAS_ADAPTER *ioc;
+ u8 msix_index;
+ unsigned int vector;
+ u32 reply_post_host_index;
+ Mpi2ReplyDescriptorsUnion_t *reply_post_free;
+ char name[MPT_NAME_LENGTH];
+ atomic_t busy;
+ cpumask_var_t affinity_hint;
+ struct list_head list;
+};
+
+/* IOC Facts and Port Facts converted from little endian to cpu */
+union mpi2_version_union {
+ MPI2_VERSION_STRUCT Struct;
+ u32 Word;
+};
+
+struct mpt2sas_facts {
+ u16 MsgVersion;
+ u16 HeaderVersion;
+ u8 IOCNumber;
+ u8 VP_ID;
+ u8 VF_ID;
+ u16 IOCExceptions;
+ u16 IOCStatus;
+ u32 IOCLogInfo;
+ u8 MaxChainDepth;
+ u8 WhoInit;
+ u8 NumberOfPorts;
+ u8 MaxMSIxVectors;
+ u16 RequestCredit;
+ u16 ProductID;
+ u32 IOCCapabilities;
+ union mpi2_version_union FWVersion;
+ u16 IOCRequestFrameSize;
+ u16 Reserved3;
+ u16 MaxInitiators;
+ u16 MaxTargets;
+ u16 MaxSasExpanders;
+ u16 MaxEnclosures;
+ u16 ProtocolFlags;
+ u16 HighPriorityCredit;
+ u16 MaxReplyDescriptorPostQueueDepth;
+ u8 ReplyFrameSize;
+ u8 MaxVolumes;
+ u16 MaxDevHandle;
+ u16 MaxPersistentEntries;
+ u16 MinDevHandle;
+};
+
+struct mpt2sas_port_facts {
+ u8 PortNumber;
+ u8 VP_ID;
+ u8 VF_ID;
+ u8 PortType;
+ u16 MaxPostedCmdBuffers;
+};
+
+struct reply_post_struct {
+ Mpi2ReplyDescriptorsUnion_t *reply_post_free;
+ dma_addr_t reply_post_free_dma;
+};
+
+/**
+ * enum mutex_type - task management mutex type
+ * @TM_MUTEX_OFF: mutex is not required becuase calling function is acquiring it
+ * @TM_MUTEX_ON: mutex is required
+ */
+enum mutex_type {
+ TM_MUTEX_OFF = 0,
+ TM_MUTEX_ON = 1,
+};
+
+typedef void (*MPT2SAS_FLUSH_RUNNING_CMDS)(struct MPT2SAS_ADAPTER *ioc);
+/**
+ * struct MPT2SAS_ADAPTER - per adapter struct
+ * @list: ioc_list
+ * @shost: shost object
+ * @id: unique adapter id
+ * @cpu_count: number online cpus
+ * @name: generic ioc string
+ * @tmp_string: tmp string used for logging
+ * @pdev: pci pdev object
+ * @chip: memory mapped register space
+ * @chip_phys: physical addrss prior to mapping
+ * @logging_level: see mpt2sas_debug.h
+ * @fwfault_debug: debuging FW timeouts
+ * @ir_firmware: IR firmware present
+ * @bars: bitmask of BAR's that must be configured
+ * @mask_interrupts: ignore interrupt
+ * @dma_mask: used to set the consistent dma mask
+ * @fault_reset_work_q_name: fw fault work queue
+ * @fault_reset_work_q: ""
+ * @fault_reset_work: ""
+ * @firmware_event_name: fw event work queue
+ * @firmware_event_thread: ""
+ * @fw_events_off: flag to turn off fw event handling
+ * @fw_event_lock:
+ * @fw_event_list: list of fw events
+ * @aen_event_read_flag: event log was read
+ * @broadcast_aen_busy: broadcast aen waiting to be serviced
+ * @shost_recovery: host reset in progress
+ * @ioc_reset_in_progress_lock:
+ * @ioc_link_reset_in_progress: phy/hard reset in progress
+ * @ignore_loginfos: ignore loginfos during task management
+ * @remove_host: flag for when driver unloads, to avoid sending dev resets
+ * @pci_error_recovery: flag to prevent ioc access until slot reset completes
+ * @wait_for_discovery_to_complete: flag set at driver load time when
+ * waiting on reporting devices
+ * @is_driver_loading: flag set at driver load time
+ * @port_enable_failed: flag set when port enable has failed
+ * @start_scan: flag set from scan_start callback, cleared from _mpt2sas_fw_work
+ * @start_scan_failed: means port enable failed, return's the ioc_status
+ * @msix_enable: flag indicating msix is enabled
+ * @msix_vector_count: number msix vectors
+ * @cpu_msix_table: table for mapping cpus to msix index
+ * @cpu_msix_table_sz: table size
+ * @schedule_dead_ioc_flush_running_cmds: callback to flush pending commands
+ * @scsi_io_cb_idx: shost generated commands
+ * @tm_cb_idx: task management commands
+ * @scsih_cb_idx: scsih internal commands
+ * @transport_cb_idx: transport internal commands
+ * @ctl_cb_idx: clt internal commands
+ * @base_cb_idx: base internal commands
+ * @config_cb_idx: base internal commands
+ * @tm_tr_cb_idx : device removal target reset handshake
+ * @tm_tr_volume_cb_idx : volume removal target reset
+ * @base_cmds:
+ * @transport_cmds:
+ * @scsih_cmds:
+ * @tm_cmds:
+ * @ctl_cmds:
+ * @config_cmds:
+ * @base_add_sg_single: handler for either 32/64 bit sgl's
+ * @event_type: bits indicating which events to log
+ * @event_context: unique id for each logged event
+ * @event_log: event log pointer
+ * @event_masks: events that are masked
+ * @facts: static facts data
+ * @pfacts: static port facts data
+ * @manu_pg0: static manufacturing page 0
+ * @manu_pg10: static manufacturing page 10
+ * @bios_pg2: static bios page 2
+ * @bios_pg3: static bios page 3
+ * @ioc_pg8: static ioc page 8
+ * @iounit_pg0: static iounit page 0
+ * @iounit_pg1: static iounit page 1
+ * @iounit_pg8: static iounit page 8
+ * @sas_hba: sas host object
+ * @sas_expander_list: expander object list
+ * @sas_node_lock:
+ * @sas_device_list: sas device object list
+ * @sas_device_init_list: sas device object list (used only at init time)
+ * @sas_device_lock:
+ * @io_missing_delay: time for IO completed by fw when PDR enabled
+ * @device_missing_delay: time for device missing by fw when PDR enabled
+ * @sas_id : used for setting volume target IDs
+ * @blocking_handles: bitmask used to identify which devices need blocking
+ * @pd_handles : bitmask for PD handles
+ * @pd_handles_sz : size of pd_handle bitmask
+ * @config_page_sz: config page size
+ * @config_page: reserve memory for config page payload
+ * @config_page_dma:
+ * @hba_queue_depth: hba request queue depth
+ * @sge_size: sg element size for either 32/64 bit
+ * @scsiio_depth: SCSI_IO queue depth
+ * @request_sz: per request frame size
+ * @request: pool of request frames
+ * @request_dma:
+ * @request_dma_sz:
+ * @scsi_lookup: firmware request tracker list
+ * @scsi_lookup_lock:
+ * @free_list: free list of request
+ * @chain: pool of chains
+ * @pending_io_count:
+ * @reset_wq:
+ * @chain_dma:
+ * @max_sges_in_main_message: number sg elements in main message
+ * @max_sges_in_chain_message: number sg elements per chain
+ * @chains_needed_per_io: max chains per io
+ * @chain_offset_value_for_main_message: location 1st sg in main
+ * @chain_depth: total chains allocated
+ * @hi_priority_smid:
+ * @hi_priority:
+ * @hi_priority_dma:
+ * @hi_priority_depth:
+ * @hpr_lookup:
+ * @hpr_free_list:
+ * @internal_smid:
+ * @internal:
+ * @internal_dma:
+ * @internal_depth:
+ * @internal_lookup:
+ * @internal_free_list:
+ * @sense: pool of sense
+ * @sense_dma:
+ * @sense_dma_pool:
+ * @reply_depth: hba reply queue depth:
+ * @reply_sz: per reply frame size:
+ * @reply: pool of replys:
+ * @reply_dma:
+ * @reply_dma_pool:
+ * @reply_free_queue_depth: reply free depth
+ * @reply_free: pool for reply free queue (32 bit addr)
+ * @reply_free_dma:
+ * @reply_free_dma_pool:
+ * @reply_free_host_index: tail index in pool to insert free replys
+ * @reply_post_queue_depth: reply post queue depth
+ * @reply_post_struct: struct for reply_post_free physical & virt address
+ * @rdpq_array_capable: FW supports multiple reply queue addresses in ioc_init
+ * @rdpq_array_enable: rdpq_array support is enabled in the driver
+ * @rdpq_array_enable_assigned: this ensures that rdpq_array_enable flag
+ * is assigned only ones
+ * @reply_queue_count: number of reply queue's
+ * @reply_queue_list: link list contaning the reply queue info
+ * @reply_post_host_index: head index in the pool where FW completes IO
+ * @delayed_tr_list: target reset link list
+ * @delayed_tr_volume_list: volume target reset link list
+ * @@temp_sensors_count: flag to carry the number of temperature sensors
+ */
+struct MPT2SAS_ADAPTER {
+ struct list_head list;
+ struct Scsi_Host *shost;
+ u8 id;
+ int cpu_count;
+ char name[MPT_NAME_LENGTH];
+ char tmp_string[MPT_STRING_LENGTH];
+ struct pci_dev *pdev;
+ Mpi2SystemInterfaceRegs_t __iomem *chip;
+ resource_size_t chip_phys;
+ int logging_level;
+ int fwfault_debug;
+ u8 ir_firmware;
+ int bars;
+ u8 mask_interrupts;
+ int dma_mask;
+
+ /* fw fault handler */
+ char fault_reset_work_q_name[20];
+ struct workqueue_struct *fault_reset_work_q;
+ struct delayed_work fault_reset_work;
+
+ /* fw event handler */
+ char firmware_event_name[20];
+ struct workqueue_struct *firmware_event_thread;
+ spinlock_t fw_event_lock;
+ struct list_head fw_event_list;
+
+ /* misc flags */
+ int aen_event_read_flag;
+ u8 broadcast_aen_busy;
+ u16 broadcast_aen_pending;
+ u8 shost_recovery;
+
+ struct mutex reset_in_progress_mutex;
+ spinlock_t ioc_reset_in_progress_lock;
+ u8 ioc_link_reset_in_progress;
+ u8 ioc_reset_in_progress_status;
+
+ u8 ignore_loginfos;
+ u8 remove_host;
+ u8 pci_error_recovery;
+ u8 wait_for_discovery_to_complete;
+ struct completion port_enable_done;
+ u8 is_driver_loading;
+ u8 port_enable_failed;
+
+ u8 start_scan;
+ u16 start_scan_failed;
+
+ u8 msix_enable;
+ u16 msix_vector_count;
+ u8 *cpu_msix_table;
+ resource_size_t __iomem **reply_post_host_index;
+ u16 cpu_msix_table_sz;
+ u32 ioc_reset_count;
+ MPT2SAS_FLUSH_RUNNING_CMDS schedule_dead_ioc_flush_running_cmds;
+ u32 non_operational_loop;
+
+ /* internal commands, callback index */
+ u8 scsi_io_cb_idx;
+ u8 tm_cb_idx;
+ u8 transport_cb_idx;
+ u8 scsih_cb_idx;
+ u8 ctl_cb_idx;
+ u8 base_cb_idx;
+ u8 port_enable_cb_idx;
+ u8 config_cb_idx;
+ u8 tm_tr_cb_idx;
+ u8 tm_tr_volume_cb_idx;
+ u8 tm_sas_control_cb_idx;
+ struct _internal_cmd base_cmds;
+ struct _internal_cmd port_enable_cmds;
+ struct _internal_cmd transport_cmds;
+ struct _internal_cmd scsih_cmds;
+ struct _internal_cmd tm_cmds;
+ struct _internal_cmd ctl_cmds;
+ struct _internal_cmd config_cmds;
+
+ MPT_ADD_SGE base_add_sg_single;
+
+ /* event log */
+ u32 event_type[MPI2_EVENT_NOTIFY_EVENTMASK_WORDS];
+ u32 event_context;
+ void *event_log;
+ u32 event_masks[MPI2_EVENT_NOTIFY_EVENTMASK_WORDS];
+
+ /* static config pages */
+ struct mpt2sas_facts facts;
+ struct mpt2sas_port_facts *pfacts;
+ Mpi2ManufacturingPage0_t manu_pg0;
+ Mpi2BiosPage2_t bios_pg2;
+ Mpi2BiosPage3_t bios_pg3;
+ Mpi2IOCPage8_t ioc_pg8;
+ Mpi2IOUnitPage0_t iounit_pg0;
+ Mpi2IOUnitPage1_t iounit_pg1;
+ Mpi2IOUnitPage8_t iounit_pg8;
+
+ struct _boot_device req_boot_device;
+ struct _boot_device req_alt_boot_device;
+ struct _boot_device current_boot_device;
+
+ /* sas hba, expander, and device list */
+ struct _sas_node sas_hba;
+ struct list_head sas_expander_list;
+ spinlock_t sas_node_lock;
+ struct list_head sas_device_list;
+ struct list_head sas_device_init_list;
+ spinlock_t sas_device_lock;
+ struct list_head raid_device_list;
+ spinlock_t raid_device_lock;
+ u8 io_missing_delay;
+ u16 device_missing_delay;
+ int sas_id;
+ void *blocking_handles;
+ void *pd_handles;
+ u16 pd_handles_sz;
+
+ /* config page */
+ u16 config_page_sz;
+ void *config_page;
+ dma_addr_t config_page_dma;
+
+ /* scsiio request */
+ u16 hba_queue_depth;
+ u16 sge_size;
+ u16 scsiio_depth;
+ u16 request_sz;
+ u8 *request;
+ dma_addr_t request_dma;
+ u32 request_dma_sz;
+ struct scsiio_tracker *scsi_lookup;
+ ulong scsi_lookup_pages;
+ spinlock_t scsi_lookup_lock;
+ struct list_head free_list;
+ int pending_io_count;
+ wait_queue_head_t reset_wq;
+
+ /* chain */
+ struct chain_tracker *chain_lookup;
+ struct list_head free_chain_list;
+ struct dma_pool *chain_dma_pool;
+ ulong chain_pages;
+ u16 max_sges_in_main_message;
+ u16 max_sges_in_chain_message;
+ u16 chains_needed_per_io;
+ u16 chain_offset_value_for_main_message;
+ u32 chain_depth;
+
+ /* hi-priority queue */
+ u16 hi_priority_smid;
+ u8 *hi_priority;
+ dma_addr_t hi_priority_dma;
+ u16 hi_priority_depth;
+ struct request_tracker *hpr_lookup;
+ struct list_head hpr_free_list;
+
+ /* internal queue */
+ u16 internal_smid;
+ u8 *internal;
+ dma_addr_t internal_dma;
+ u16 internal_depth;
+ struct request_tracker *internal_lookup;
+ struct list_head internal_free_list;
+
+ /* sense */
+ u8 *sense;
+ dma_addr_t sense_dma;
+ struct dma_pool *sense_dma_pool;
+
+ /* reply */
+ u16 reply_sz;
+ u8 *reply;
+ dma_addr_t reply_dma;
+ u32 reply_dma_max_address;
+ u32 reply_dma_min_address;
+ struct dma_pool *reply_dma_pool;
+
+ /* reply free queue */
+ u16 reply_free_queue_depth;
+ __le32 *reply_free;
+ dma_addr_t reply_free_dma;
+ struct dma_pool *reply_free_dma_pool;
+ u32 reply_free_host_index;
+
+ /* reply post queue */
+ u16 reply_post_queue_depth;
+ struct reply_post_struct *reply_post;
+ u8 rdpq_array_capable;
+ u8 rdpq_array_enable;
+ u8 rdpq_array_enable_assigned;
+ struct dma_pool *reply_post_free_dma_pool;
+ u8 reply_queue_count;
+ struct list_head reply_queue_list;
+
+ struct list_head delayed_tr_list;
+ struct list_head delayed_tr_volume_list;
+ u8 temp_sensors_count;
+
+ /* diag buffer support */
+ u8 *diag_buffer[MPI2_DIAG_BUF_TYPE_COUNT];
+ u32 diag_buffer_sz[MPI2_DIAG_BUF_TYPE_COUNT];
+ dma_addr_t diag_buffer_dma[MPI2_DIAG_BUF_TYPE_COUNT];
+ u8 diag_buffer_status[MPI2_DIAG_BUF_TYPE_COUNT];
+ u32 unique_id[MPI2_DIAG_BUF_TYPE_COUNT];
+ Mpi2ManufacturingPage10_t manu_pg10;
+ u32 product_specific[MPI2_DIAG_BUF_TYPE_COUNT][23];
+ u32 diagnostic_flags[MPI2_DIAG_BUF_TYPE_COUNT];
+ u32 ring_buffer_offset;
+ u32 ring_buffer_sz;
+ u8 is_warpdrive;
+ u8 hide_ir_msg;
+ u8 mfg_pg10_hide_flag;
+ u8 hide_drives;
+
+};
+
+typedef u8 (*MPT_CALLBACK)(struct MPT2SAS_ADAPTER *ioc, u16 smid, u8 msix_index,
+ u32 reply);
+
+
+/* base shared API */
+extern struct list_head mpt2sas_ioc_list;
+void mpt2sas_base_start_watchdog(struct MPT2SAS_ADAPTER *ioc);
+void mpt2sas_base_stop_watchdog(struct MPT2SAS_ADAPTER *ioc);
+
+int mpt2sas_base_attach(struct MPT2SAS_ADAPTER *ioc);
+void mpt2sas_base_detach(struct MPT2SAS_ADAPTER *ioc);
+int mpt2sas_base_map_resources(struct MPT2SAS_ADAPTER *ioc);
+void mpt2sas_base_free_resources(struct MPT2SAS_ADAPTER *ioc);
+int mpt2sas_base_hard_reset_handler(struct MPT2SAS_ADAPTER *ioc, int sleep_flag,
+ enum reset_type type);
+
+void *mpt2sas_base_get_msg_frame(struct MPT2SAS_ADAPTER *ioc, u16 smid);
+void *mpt2sas_base_get_sense_buffer(struct MPT2SAS_ADAPTER *ioc, u16 smid);
+void mpt2sas_base_build_zero_len_sge(struct MPT2SAS_ADAPTER *ioc, void *paddr);
+__le32 mpt2sas_base_get_sense_buffer_dma(struct MPT2SAS_ADAPTER *ioc,
+ u16 smid);
+void mpt2sas_base_flush_reply_queues(struct MPT2SAS_ADAPTER *ioc);
+
+/* hi-priority queue */
+u16 mpt2sas_base_get_smid_hpr(struct MPT2SAS_ADAPTER *ioc, u8 cb_idx);
+u16 mpt2sas_base_get_smid_scsiio(struct MPT2SAS_ADAPTER *ioc, u8 cb_idx,
+ struct scsi_cmnd *scmd);
+
+u16 mpt2sas_base_get_smid(struct MPT2SAS_ADAPTER *ioc, u8 cb_idx);
+void mpt2sas_base_free_smid(struct MPT2SAS_ADAPTER *ioc, u16 smid);
+void mpt2sas_base_put_smid_scsi_io(struct MPT2SAS_ADAPTER *ioc, u16 smid,
+ u16 handle);
+void mpt2sas_base_put_smid_hi_priority(struct MPT2SAS_ADAPTER *ioc, u16 smid);
+void mpt2sas_base_put_smid_target_assist(struct MPT2SAS_ADAPTER *ioc, u16 smid,
+ u16 io_index);
+void mpt2sas_base_put_smid_default(struct MPT2SAS_ADAPTER *ioc, u16 smid);
+void mpt2sas_base_initialize_callback_handler(void);
+u8 mpt2sas_base_register_callback_handler(MPT_CALLBACK cb_func);
+void mpt2sas_base_release_callback_handler(u8 cb_idx);
+
+u8 mpt2sas_base_done(struct MPT2SAS_ADAPTER *ioc, u16 smid, u8 msix_index,
+ u32 reply);
+u8 mpt2sas_port_enable_done(struct MPT2SAS_ADAPTER *ioc, u16 smid,
+ u8 msix_index, u32 reply);
+void *mpt2sas_base_get_reply_virt_addr(struct MPT2SAS_ADAPTER *ioc, u32 phys_addr);
+
+u32 mpt2sas_base_get_iocstate(struct MPT2SAS_ADAPTER *ioc, int cooked);
+
+void mpt2sas_base_fault_info(struct MPT2SAS_ADAPTER *ioc , u16 fault_code);
+int mpt2sas_base_sas_iounit_control(struct MPT2SAS_ADAPTER *ioc,
+ Mpi2SasIoUnitControlReply_t *mpi_reply, Mpi2SasIoUnitControlRequest_t
+ *mpi_request);
+int mpt2sas_base_scsi_enclosure_processor(struct MPT2SAS_ADAPTER *ioc,
+ Mpi2SepReply_t *mpi_reply, Mpi2SepRequest_t *mpi_request);
+void mpt2sas_base_validate_event_type(struct MPT2SAS_ADAPTER *ioc, u32 *event_type);
+
+void mpt2sas_halt_firmware(struct MPT2SAS_ADAPTER *ioc);
+
+void mpt2sas_base_update_missing_delay(struct MPT2SAS_ADAPTER *ioc,
+ u16 device_missing_delay, u8 io_missing_delay);
+
+int mpt2sas_port_enable(struct MPT2SAS_ADAPTER *ioc);
+
+/* scsih shared API */
+void mpt2sas_scsih_event_callback(struct MPT2SAS_ADAPTER *ioc, u8 msix_index,
+ u32 reply);
+int mpt2sas_scsih_issue_tm(struct MPT2SAS_ADAPTER *ioc, u16 handle,
+ uint channel, uint id, uint lun, u8 type, u16 smid_task,
+ ulong timeout, enum mutex_type m_type);
+void mpt2sas_scsih_set_tm_flag(struct MPT2SAS_ADAPTER *ioc, u16 handle);
+void mpt2sas_scsih_clear_tm_flag(struct MPT2SAS_ADAPTER *ioc, u16 handle);
+void mpt2sas_expander_remove(struct MPT2SAS_ADAPTER *ioc, u64 sas_address);
+void mpt2sas_device_remove_by_sas_address(struct MPT2SAS_ADAPTER *ioc,
+ u64 sas_address);
+struct _sas_node *mpt2sas_scsih_expander_find_by_handle(struct MPT2SAS_ADAPTER *ioc,
+ u16 handle);
+struct _sas_node *mpt2sas_scsih_expander_find_by_sas_address(struct MPT2SAS_ADAPTER
+ *ioc, u64 sas_address);
+struct _sas_device *mpt2sas_scsih_sas_device_find_by_sas_address(
+ struct MPT2SAS_ADAPTER *ioc, u64 sas_address);
+
+void mpt2sas_port_enable_complete(struct MPT2SAS_ADAPTER *ioc);
+
+void mpt2sas_scsih_reset_handler(struct MPT2SAS_ADAPTER *ioc, int reset_phase);
+
+/* config shared API */
+u8 mpt2sas_config_done(struct MPT2SAS_ADAPTER *ioc, u16 smid, u8 msix_index,
+ u32 reply);
+int mpt2sas_config_get_number_hba_phys(struct MPT2SAS_ADAPTER *ioc, u8 *num_phys);
+int mpt2sas_config_get_manufacturing_pg0(struct MPT2SAS_ADAPTER *ioc,
+ Mpi2ConfigReply_t *mpi_reply, Mpi2ManufacturingPage0_t *config_page);
+int mpt2sas_config_get_manufacturing_pg10(struct MPT2SAS_ADAPTER *ioc,
+ Mpi2ConfigReply_t *mpi_reply, Mpi2ManufacturingPage10_t *config_page);
+int mpt2sas_config_get_bios_pg2(struct MPT2SAS_ADAPTER *ioc, Mpi2ConfigReply_t
+ *mpi_reply, Mpi2BiosPage2_t *config_page);
+int mpt2sas_config_get_bios_pg3(struct MPT2SAS_ADAPTER *ioc, Mpi2ConfigReply_t
+ *mpi_reply, Mpi2BiosPage3_t *config_page);
+int mpt2sas_config_get_iounit_pg0(struct MPT2SAS_ADAPTER *ioc, Mpi2ConfigReply_t
+ *mpi_reply, Mpi2IOUnitPage0_t *config_page);
+int mpt2sas_config_get_sas_device_pg0(struct MPT2SAS_ADAPTER *ioc, Mpi2ConfigReply_t
+ *mpi_reply, Mpi2SasDevicePage0_t *config_page, u32 form, u32 handle);
+int mpt2sas_config_get_sas_device_pg1(struct MPT2SAS_ADAPTER *ioc, Mpi2ConfigReply_t
+ *mpi_reply, Mpi2SasDevicePage1_t *config_page, u32 form, u32 handle);
+int mpt2sas_config_get_sas_iounit_pg0(struct MPT2SAS_ADAPTER *ioc, Mpi2ConfigReply_t
+ *mpi_reply, Mpi2SasIOUnitPage0_t *config_page, u16 sz);
+int mpt2sas_config_get_iounit_pg1(struct MPT2SAS_ADAPTER *ioc, Mpi2ConfigReply_t
+ *mpi_reply, Mpi2IOUnitPage1_t *config_page);
+int mpt2sas_config_set_iounit_pg1(struct MPT2SAS_ADAPTER *ioc, Mpi2ConfigReply_t
+ *mpi_reply, Mpi2IOUnitPage1_t *config_page);
+int mpt2sas_config_get_iounit_pg8(struct MPT2SAS_ADAPTER *ioc,
+ Mpi2ConfigReply_t *mpi_reply, Mpi2IOUnitPage8_t *config_page);
+int mpt2sas_config_get_iounit_pg3(struct MPT2SAS_ADAPTER *ioc,
+ Mpi2ConfigReply_t *mpi_reply, Mpi2IOUnitPage3_t *config_page, u16 sz);
+int mpt2sas_config_get_sas_iounit_pg1(struct MPT2SAS_ADAPTER *ioc, Mpi2ConfigReply_t
+ *mpi_reply, Mpi2SasIOUnitPage1_t *config_page, u16 sz);
+int mpt2sas_config_set_sas_iounit_pg1(struct MPT2SAS_ADAPTER *ioc,
+ Mpi2ConfigReply_t *mpi_reply, Mpi2SasIOUnitPage1_t *config_page, u16 sz);
+int mpt2sas_config_get_ioc_pg8(struct MPT2SAS_ADAPTER *ioc, Mpi2ConfigReply_t
+ *mpi_reply, Mpi2IOCPage8_t *config_page);
+int mpt2sas_config_get_expander_pg0(struct MPT2SAS_ADAPTER *ioc, Mpi2ConfigReply_t
+ *mpi_reply, Mpi2ExpanderPage0_t *config_page, u32 form, u32 handle);
+int mpt2sas_config_get_expander_pg1(struct MPT2SAS_ADAPTER *ioc, Mpi2ConfigReply_t
+ *mpi_reply, Mpi2ExpanderPage1_t *config_page, u32 phy_number, u16 handle);
+int mpt2sas_config_get_enclosure_pg0(struct MPT2SAS_ADAPTER *ioc, Mpi2ConfigReply_t
+ *mpi_reply, Mpi2SasEnclosurePage0_t *config_page, u32 form, u32 handle);
+int mpt2sas_config_get_phy_pg0(struct MPT2SAS_ADAPTER *ioc, Mpi2ConfigReply_t
+ *mpi_reply, Mpi2SasPhyPage0_t *config_page, u32 phy_number);
+int mpt2sas_config_get_phy_pg1(struct MPT2SAS_ADAPTER *ioc, Mpi2ConfigReply_t
+ *mpi_reply, Mpi2SasPhyPage1_t *config_page, u32 phy_number);
+int mpt2sas_config_get_raid_volume_pg1(struct MPT2SAS_ADAPTER *ioc, Mpi2ConfigReply_t
+ *mpi_reply, Mpi2RaidVolPage1_t *config_page, u32 form, u32 handle);
+int mpt2sas_config_get_number_pds(struct MPT2SAS_ADAPTER *ioc, u16 handle, u8 *num_pds);
+int mpt2sas_config_get_raid_volume_pg0(struct MPT2SAS_ADAPTER *ioc, Mpi2ConfigReply_t
+ *mpi_reply, Mpi2RaidVolPage0_t *config_page, u32 form, u32 handle, u16 sz);
+int mpt2sas_config_get_phys_disk_pg0(struct MPT2SAS_ADAPTER *ioc, Mpi2ConfigReply_t
+ *mpi_reply, Mpi2RaidPhysDiskPage0_t *config_page, u32 form,
+ u32 form_specific);
+int mpt2sas_config_get_volume_handle(struct MPT2SAS_ADAPTER *ioc, u16 pd_handle,
+ u16 *volume_handle);
+int mpt2sas_config_get_volume_wwid(struct MPT2SAS_ADAPTER *ioc, u16 volume_handle,
+ u64 *wwid);
+/* ctl shared API */
+extern struct device_attribute *mpt2sas_host_attrs[];
+extern struct device_attribute *mpt2sas_dev_attrs[];
+void mpt2sas_ctl_init(void);
+void mpt2sas_ctl_exit(void);
+u8 mpt2sas_ctl_done(struct MPT2SAS_ADAPTER *ioc, u16 smid, u8 msix_index,
+ u32 reply);
+void mpt2sas_ctl_reset_handler(struct MPT2SAS_ADAPTER *ioc, int reset_phase);
+void mpt2sas_ctl_event_callback(struct MPT2SAS_ADAPTER *ioc, u8 msix_index,
+ u32 reply);
+void mpt2sas_ctl_add_to_event_log(struct MPT2SAS_ADAPTER *ioc,
+ Mpi2EventNotificationReply_t *mpi_reply);
+
+void mpt2sas_enable_diag_buffer(struct MPT2SAS_ADAPTER *ioc,
+ u8 bits_to_regsiter);
+
+/* transport shared API */
+u8 mpt2sas_transport_done(struct MPT2SAS_ADAPTER *ioc, u16 smid, u8 msix_index,
+ u32 reply);
+struct _sas_port *mpt2sas_transport_port_add(struct MPT2SAS_ADAPTER *ioc,
+ u16 handle, u64 sas_address);
+void mpt2sas_transport_port_remove(struct MPT2SAS_ADAPTER *ioc, u64 sas_address,
+ u64 sas_address_parent);
+int mpt2sas_transport_add_host_phy(struct MPT2SAS_ADAPTER *ioc, struct _sas_phy
+ *mpt2sas_phy, Mpi2SasPhyPage0_t phy_pg0, struct device *parent_dev);
+int mpt2sas_transport_add_expander_phy(struct MPT2SAS_ADAPTER *ioc, struct _sas_phy
+ *mpt2sas_phy, Mpi2ExpanderPage1_t expander_pg1, struct device *parent_dev);
+void mpt2sas_transport_update_links(struct MPT2SAS_ADAPTER *ioc,
+ u64 sas_address, u16 handle, u8 phy_number, u8 link_rate);
+extern struct sas_function_template mpt2sas_transport_functions;
+extern struct scsi_transport_template *mpt2sas_transport_template;
+extern int scsi_internal_device_block(struct scsi_device *sdev);
+extern u8 mpt2sas_stm_zero_smid_handler(struct MPT2SAS_ADAPTER *ioc,
+ u8 msix_index, u32 reply);
+extern int scsi_internal_device_unblock(struct scsi_device *sdev,
+ enum scsi_device_state new_state);
+
+#endif /* MPT2SAS_BASE_H_INCLUDED */
diff --git a/drivers/scsi/mpt2sas/mpt2sas_config.c b/drivers/scsi/mpt2sas/mpt2sas_config.c
new file mode 100644
index 000000000..c43815b1a
--- /dev/null
+++ b/drivers/scsi/mpt2sas/mpt2sas_config.c
@@ -0,0 +1,1527 @@
+/*
+ * This module provides common API for accessing firmware configuration pages
+ *
+ * This code is based on drivers/scsi/mpt2sas/mpt2_base.c
+ * Copyright (C) 2007-2014 LSI Corporation
+ * Copyright (C) 20013-2014 Avago Technologies
+ * (mailto: MPT-FusionLinux.pdl@avagotech.com)
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * NO WARRANTY
+ * THE PROGRAM IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OR
+ * CONDITIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED INCLUDING, WITHOUT
+ * LIMITATION, ANY WARRANTIES OR CONDITIONS OF TITLE, NON-INFRINGEMENT,
+ * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE. Each Recipient is
+ * solely responsible for determining the appropriateness of using and
+ * distributing the Program and assumes all risks associated with its
+ * exercise of rights under this Agreement, including but not limited to
+ * the risks and costs of program errors, damage to or loss of data,
+ * programs or equipment, and unavailability or interruption of operations.
+
+ * DISCLAIMER OF LIABILITY
+ * NEITHER RECIPIENT NOR ANY CONTRIBUTORS SHALL HAVE ANY LIABILITY FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING WITHOUT LIMITATION LOST PROFITS), HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR
+ * TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
+ * USE OR DISTRIBUTION OF THE PROGRAM OR THE EXERCISE OF ANY RIGHTS GRANTED
+ * HEREUNDER, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGES
+
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301,
+ * USA.
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/errno.h>
+#include <linux/blkdev.h>
+#include <linux/sched.h>
+#include <linux/workqueue.h>
+#include <linux/delay.h>
+#include <linux/pci.h>
+#include <linux/slab.h>
+
+#include "mpt2sas_base.h"
+
+/* local definitions */
+
+/* Timeout for config page request (in seconds) */
+#define MPT2_CONFIG_PAGE_DEFAULT_TIMEOUT 15
+
+/* Common sgl flags for READING a config page. */
+#define MPT2_CONFIG_COMMON_SGLFLAGS ((MPI2_SGE_FLAGS_SIMPLE_ELEMENT | \
+ MPI2_SGE_FLAGS_LAST_ELEMENT | MPI2_SGE_FLAGS_END_OF_BUFFER \
+ | MPI2_SGE_FLAGS_END_OF_LIST) << MPI2_SGE_FLAGS_SHIFT)
+
+/* Common sgl flags for WRITING a config page. */
+#define MPT2_CONFIG_COMMON_WRITE_SGLFLAGS ((MPI2_SGE_FLAGS_SIMPLE_ELEMENT | \
+ MPI2_SGE_FLAGS_LAST_ELEMENT | MPI2_SGE_FLAGS_END_OF_BUFFER \
+ | MPI2_SGE_FLAGS_END_OF_LIST | MPI2_SGE_FLAGS_HOST_TO_IOC) \
+ << MPI2_SGE_FLAGS_SHIFT)
+
+/**
+ * struct config_request - obtain dma memory via routine
+ * @sz: size
+ * @page: virt pointer
+ * @page_dma: phys pointer
+ *
+ */
+struct config_request{
+ u16 sz;
+ void *page;
+ dma_addr_t page_dma;
+};
+
+#ifdef CONFIG_SCSI_MPT2SAS_LOGGING
+/**
+ * _config_display_some_debug - debug routine
+ * @ioc: per adapter object
+ * @smid: system request message index
+ * @calling_function_name: string pass from calling function
+ * @mpi_reply: reply message frame
+ * Context: none.
+ *
+ * Function for displaying debug info helpful when debugging issues
+ * in this module.
+ */
+static void
+_config_display_some_debug(struct MPT2SAS_ADAPTER *ioc, u16 smid,
+ char *calling_function_name, MPI2DefaultReply_t *mpi_reply)
+{
+ Mpi2ConfigRequest_t *mpi_request;
+ char *desc = NULL;
+
+ if (!(ioc->logging_level & MPT_DEBUG_CONFIG))
+ return;
+
+ mpi_request = mpt2sas_base_get_msg_frame(ioc, smid);
+ switch (mpi_request->Header.PageType & MPI2_CONFIG_PAGETYPE_MASK) {
+ case MPI2_CONFIG_PAGETYPE_IO_UNIT:
+ desc = "io_unit";
+ break;
+ case MPI2_CONFIG_PAGETYPE_IOC:
+ desc = "ioc";
+ break;
+ case MPI2_CONFIG_PAGETYPE_BIOS:
+ desc = "bios";
+ break;
+ case MPI2_CONFIG_PAGETYPE_RAID_VOLUME:
+ desc = "raid_volume";
+ break;
+ case MPI2_CONFIG_PAGETYPE_MANUFACTURING:
+ desc = "manufaucturing";
+ break;
+ case MPI2_CONFIG_PAGETYPE_RAID_PHYSDISK:
+ desc = "physdisk";
+ break;
+ case MPI2_CONFIG_PAGETYPE_EXTENDED:
+ switch (mpi_request->ExtPageType) {
+ case MPI2_CONFIG_EXTPAGETYPE_SAS_IO_UNIT:
+ desc = "sas_io_unit";
+ break;
+ case MPI2_CONFIG_EXTPAGETYPE_SAS_EXPANDER:
+ desc = "sas_expander";
+ break;
+ case MPI2_CONFIG_EXTPAGETYPE_SAS_DEVICE:
+ desc = "sas_device";
+ break;
+ case MPI2_CONFIG_EXTPAGETYPE_SAS_PHY:
+ desc = "sas_phy";
+ break;
+ case MPI2_CONFIG_EXTPAGETYPE_LOG:
+ desc = "log";
+ break;
+ case MPI2_CONFIG_EXTPAGETYPE_ENCLOSURE:
+ desc = "enclosure";
+ break;
+ case MPI2_CONFIG_EXTPAGETYPE_RAID_CONFIG:
+ desc = "raid_config";
+ break;
+ case MPI2_CONFIG_EXTPAGETYPE_DRIVER_MAPPING:
+ desc = "driver_mapping";
+ break;
+ }
+ break;
+ }
+
+ if (!desc)
+ return;
+
+ printk(MPT2SAS_INFO_FMT "%s: %s(%d), action(%d), form(0x%08x), "
+ "smid(%d)\n", ioc->name, calling_function_name, desc,
+ mpi_request->Header.PageNumber, mpi_request->Action,
+ le32_to_cpu(mpi_request->PageAddress), smid);
+
+ if (!mpi_reply)
+ return;
+
+ if (mpi_reply->IOCStatus || mpi_reply->IOCLogInfo)
+ printk(MPT2SAS_INFO_FMT
+ "\tiocstatus(0x%04x), loginfo(0x%08x)\n",
+ ioc->name, le16_to_cpu(mpi_reply->IOCStatus),
+ le32_to_cpu(mpi_reply->IOCLogInfo));
+}
+#endif
+
+/**
+ * _config_alloc_config_dma_memory - obtain physical memory
+ * @ioc: per adapter object
+ * @mem: struct config_request
+ *
+ * A wrapper for obtaining dma-able memory for config page request.
+ *
+ * Returns 0 for success, non-zero for failure.
+ */
+static int
+_config_alloc_config_dma_memory(struct MPT2SAS_ADAPTER *ioc,
+ struct config_request *mem)
+{
+ int r = 0;
+
+ if (mem->sz > ioc->config_page_sz) {
+ mem->page = dma_alloc_coherent(&ioc->pdev->dev, mem->sz,
+ &mem->page_dma, GFP_KERNEL);
+ if (!mem->page) {
+ printk(MPT2SAS_ERR_FMT "%s: dma_alloc_coherent"
+ " failed asking for (%d) bytes!!\n",
+ ioc->name, __func__, mem->sz);
+ r = -ENOMEM;
+ }
+ } else { /* use tmp buffer if less than 512 bytes */
+ mem->page = ioc->config_page;
+ mem->page_dma = ioc->config_page_dma;
+ }
+ return r;
+}
+
+/**
+ * _config_free_config_dma_memory - wrapper to free the memory
+ * @ioc: per adapter object
+ * @mem: struct config_request
+ *
+ * A wrapper to free dma-able memory when using _config_alloc_config_dma_memory.
+ *
+ * Returns 0 for success, non-zero for failure.
+ */
+static void
+_config_free_config_dma_memory(struct MPT2SAS_ADAPTER *ioc,
+ struct config_request *mem)
+{
+ if (mem->sz > ioc->config_page_sz)
+ dma_free_coherent(&ioc->pdev->dev, mem->sz, mem->page,
+ mem->page_dma);
+}
+
+/**
+ * mpt2sas_config_done - config page completion routine
+ * @ioc: per adapter object
+ * @smid: system request message index
+ * @msix_index: MSIX table index supplied by the OS
+ * @reply: reply message frame(lower 32bit addr)
+ * Context: none.
+ *
+ * The callback handler when using _config_request.
+ *
+ * Return 1 meaning mf should be freed from _base_interrupt
+ * 0 means the mf is freed from this function.
+ */
+u8
+mpt2sas_config_done(struct MPT2SAS_ADAPTER *ioc, u16 smid, u8 msix_index,
+ u32 reply)
+{
+ MPI2DefaultReply_t *mpi_reply;
+
+ if (ioc->config_cmds.status == MPT2_CMD_NOT_USED)
+ return 1;
+ if (ioc->config_cmds.smid != smid)
+ return 1;
+ ioc->config_cmds.status |= MPT2_CMD_COMPLETE;
+ mpi_reply = mpt2sas_base_get_reply_virt_addr(ioc, reply);
+ if (mpi_reply) {
+ ioc->config_cmds.status |= MPT2_CMD_REPLY_VALID;
+ memcpy(ioc->config_cmds.reply, mpi_reply,
+ mpi_reply->MsgLength*4);
+ }
+ ioc->config_cmds.status &= ~MPT2_CMD_PENDING;
+#ifdef CONFIG_SCSI_MPT2SAS_LOGGING
+ _config_display_some_debug(ioc, smid, "config_done", mpi_reply);
+#endif
+ ioc->config_cmds.smid = USHRT_MAX;
+ complete(&ioc->config_cmds.done);
+ return 1;
+}
+
+/**
+ * _config_request - main routine for sending config page requests
+ * @ioc: per adapter object
+ * @mpi_request: request message frame
+ * @mpi_reply: reply mf payload returned from firmware
+ * @timeout: timeout in seconds
+ * @config_page: contents of the config page
+ * @config_page_sz: size of config page
+ * Context: sleep
+ *
+ * A generic API for config page requests to firmware.
+ *
+ * The ioc->config_cmds.status flag should be MPT2_CMD_NOT_USED before calling
+ * this API.
+ *
+ * The callback index is set inside `ioc->config_cb_idx.
+ *
+ * Returns 0 for success, non-zero for failure.
+ */
+static int
+_config_request(struct MPT2SAS_ADAPTER *ioc, Mpi2ConfigRequest_t
+ *mpi_request, Mpi2ConfigReply_t *mpi_reply, int timeout,
+ void *config_page, u16 config_page_sz)
+{
+ u16 smid;
+ u32 ioc_state;
+ unsigned long timeleft;
+ Mpi2ConfigRequest_t *config_request;
+ int r;
+ u8 retry_count, issue_host_reset = 0;
+ u16 wait_state_count;
+ struct config_request mem;
+
+ mutex_lock(&ioc->config_cmds.mutex);
+ if (ioc->config_cmds.status != MPT2_CMD_NOT_USED) {
+ printk(MPT2SAS_ERR_FMT "%s: config_cmd in use\n",
+ ioc->name, __func__);
+ mutex_unlock(&ioc->config_cmds.mutex);
+ return -EAGAIN;
+ }
+
+ retry_count = 0;
+ memset(&mem, 0, sizeof(struct config_request));
+
+ mpi_request->VF_ID = 0; /* TODO */
+ mpi_request->VP_ID = 0;
+
+ if (config_page) {
+ mpi_request->Header.PageVersion = mpi_reply->Header.PageVersion;
+ mpi_request->Header.PageNumber = mpi_reply->Header.PageNumber;
+ mpi_request->Header.PageType = mpi_reply->Header.PageType;
+ mpi_request->Header.PageLength = mpi_reply->Header.PageLength;
+ mpi_request->ExtPageLength = mpi_reply->ExtPageLength;
+ mpi_request->ExtPageType = mpi_reply->ExtPageType;
+ if (mpi_request->Header.PageLength)
+ mem.sz = mpi_request->Header.PageLength * 4;
+ else
+ mem.sz = le16_to_cpu(mpi_reply->ExtPageLength) * 4;
+ r = _config_alloc_config_dma_memory(ioc, &mem);
+ if (r != 0)
+ goto out;
+ if (mpi_request->Action ==
+ MPI2_CONFIG_ACTION_PAGE_WRITE_CURRENT ||
+ mpi_request->Action ==
+ MPI2_CONFIG_ACTION_PAGE_WRITE_NVRAM) {
+ ioc->base_add_sg_single(&mpi_request->PageBufferSGE,
+ MPT2_CONFIG_COMMON_WRITE_SGLFLAGS | mem.sz,
+ mem.page_dma);
+ memcpy(mem.page, config_page, min_t(u16, mem.sz,
+ config_page_sz));
+ } else {
+ memset(config_page, 0, config_page_sz);
+ ioc->base_add_sg_single(&mpi_request->PageBufferSGE,
+ MPT2_CONFIG_COMMON_SGLFLAGS | mem.sz, mem.page_dma);
+ }
+ }
+
+ retry_config:
+ if (retry_count) {
+ if (retry_count > 2) { /* attempt only 2 retries */
+ r = -EFAULT;
+ goto free_mem;
+ }
+ printk(MPT2SAS_INFO_FMT "%s: attempting retry (%d)\n",
+ ioc->name, __func__, retry_count);
+ }
+ wait_state_count = 0;
+ ioc_state = mpt2sas_base_get_iocstate(ioc, 1);
+ while (ioc_state != MPI2_IOC_STATE_OPERATIONAL) {
+ if (wait_state_count++ == MPT2_CONFIG_PAGE_DEFAULT_TIMEOUT) {
+ printk(MPT2SAS_ERR_FMT
+ "%s: failed due to ioc not operational\n",
+ ioc->name, __func__);
+ ioc->config_cmds.status = MPT2_CMD_NOT_USED;
+ r = -EFAULT;
+ goto free_mem;
+ }
+ ssleep(1);
+ ioc_state = mpt2sas_base_get_iocstate(ioc, 1);
+ printk(MPT2SAS_INFO_FMT "%s: waiting for "
+ "operational state(count=%d)\n", ioc->name,
+ __func__, wait_state_count);
+ }
+ if (wait_state_count)
+ printk(MPT2SAS_INFO_FMT "%s: ioc is operational\n",
+ ioc->name, __func__);
+
+ smid = mpt2sas_base_get_smid(ioc, ioc->config_cb_idx);
+ if (!smid) {
+ printk(MPT2SAS_ERR_FMT "%s: failed obtaining a smid\n",
+ ioc->name, __func__);
+ ioc->config_cmds.status = MPT2_CMD_NOT_USED;
+ r = -EAGAIN;
+ goto free_mem;
+ }
+
+ r = 0;
+ memset(mpi_reply, 0, sizeof(Mpi2ConfigReply_t));
+ ioc->config_cmds.status = MPT2_CMD_PENDING;
+ config_request = mpt2sas_base_get_msg_frame(ioc, smid);
+ ioc->config_cmds.smid = smid;
+ memcpy(config_request, mpi_request, sizeof(Mpi2ConfigRequest_t));
+#ifdef CONFIG_SCSI_MPT2SAS_LOGGING
+ _config_display_some_debug(ioc, smid, "config_request", NULL);
+#endif
+ init_completion(&ioc->config_cmds.done);
+ mpt2sas_base_put_smid_default(ioc, smid);
+ timeleft = wait_for_completion_timeout(&ioc->config_cmds.done,
+ timeout*HZ);
+ if (!(ioc->config_cmds.status & MPT2_CMD_COMPLETE)) {
+ printk(MPT2SAS_ERR_FMT "%s: timeout\n",
+ ioc->name, __func__);
+ _debug_dump_mf(mpi_request,
+ sizeof(Mpi2ConfigRequest_t)/4);
+ retry_count++;
+ if (ioc->config_cmds.smid == smid)
+ mpt2sas_base_free_smid(ioc, smid);
+ if ((ioc->shost_recovery) || (ioc->config_cmds.status &
+ MPT2_CMD_RESET) || ioc->pci_error_recovery)
+ goto retry_config;
+ issue_host_reset = 1;
+ r = -EFAULT;
+ goto free_mem;
+ }
+
+ if (ioc->config_cmds.status & MPT2_CMD_REPLY_VALID)
+ memcpy(mpi_reply, ioc->config_cmds.reply,
+ sizeof(Mpi2ConfigReply_t));
+ if (retry_count)
+ printk(MPT2SAS_INFO_FMT "%s: retry (%d) completed!!\n",
+ ioc->name, __func__, retry_count);
+ if (config_page && mpi_request->Action ==
+ MPI2_CONFIG_ACTION_PAGE_READ_CURRENT)
+ memcpy(config_page, mem.page, min_t(u16, mem.sz,
+ config_page_sz));
+ free_mem:
+ if (config_page)
+ _config_free_config_dma_memory(ioc, &mem);
+ out:
+ ioc->config_cmds.status = MPT2_CMD_NOT_USED;
+ mutex_unlock(&ioc->config_cmds.mutex);
+
+ if (issue_host_reset)
+ mpt2sas_base_hard_reset_handler(ioc, CAN_SLEEP,
+ FORCE_BIG_HAMMER);
+ return r;
+}
+
+/**
+ * mpt2sas_config_get_manufacturing_pg0 - obtain manufacturing page 0
+ * @ioc: per adapter object
+ * @mpi_reply: reply mf payload returned from firmware
+ * @config_page: contents of the config page
+ * Context: sleep.
+ *
+ * Returns 0 for success, non-zero for failure.
+ */
+int
+mpt2sas_config_get_manufacturing_pg0(struct MPT2SAS_ADAPTER *ioc,
+ Mpi2ConfigReply_t *mpi_reply, Mpi2ManufacturingPage0_t *config_page)
+{
+ Mpi2ConfigRequest_t mpi_request;
+ int r;
+
+ memset(&mpi_request, 0, sizeof(Mpi2ConfigRequest_t));
+ mpi_request.Function = MPI2_FUNCTION_CONFIG;
+ mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_HEADER;
+ mpi_request.Header.PageType = MPI2_CONFIG_PAGETYPE_MANUFACTURING;
+ mpi_request.Header.PageNumber = 0;
+ mpi_request.Header.PageVersion = MPI2_MANUFACTURING0_PAGEVERSION;
+ mpt2sas_base_build_zero_len_sge(ioc, &mpi_request.PageBufferSGE);
+ r = _config_request(ioc, &mpi_request, mpi_reply,
+ MPT2_CONFIG_PAGE_DEFAULT_TIMEOUT, NULL, 0);
+ if (r)
+ goto out;
+
+ mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_READ_CURRENT;
+ r = _config_request(ioc, &mpi_request, mpi_reply,
+ MPT2_CONFIG_PAGE_DEFAULT_TIMEOUT, config_page,
+ sizeof(*config_page));
+ out:
+ return r;
+}
+
+/**
+ * mpt2sas_config_get_manufacturing_pg10 - obtain manufacturing page 10
+ * @ioc: per adapter object
+ * @mpi_reply: reply mf payload returned from firmware
+ * @config_page: contents of the config page
+ * Context: sleep.
+ *
+ * Returns 0 for success, non-zero for failure.
+ */
+int
+mpt2sas_config_get_manufacturing_pg10(struct MPT2SAS_ADAPTER *ioc,
+ Mpi2ConfigReply_t *mpi_reply, Mpi2ManufacturingPage10_t *config_page)
+{
+ Mpi2ConfigRequest_t mpi_request;
+ int r;
+
+ memset(&mpi_request, 0, sizeof(Mpi2ConfigRequest_t));
+ mpi_request.Function = MPI2_FUNCTION_CONFIG;
+ mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_HEADER;
+ mpi_request.Header.PageType = MPI2_CONFIG_PAGETYPE_MANUFACTURING;
+ mpi_request.Header.PageNumber = 10;
+ mpi_request.Header.PageVersion = MPI2_MANUFACTURING0_PAGEVERSION;
+ mpt2sas_base_build_zero_len_sge(ioc, &mpi_request.PageBufferSGE);
+ r = _config_request(ioc, &mpi_request, mpi_reply,
+ MPT2_CONFIG_PAGE_DEFAULT_TIMEOUT, NULL, 0);
+ if (r)
+ goto out;
+
+ mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_READ_CURRENT;
+ r = _config_request(ioc, &mpi_request, mpi_reply,
+ MPT2_CONFIG_PAGE_DEFAULT_TIMEOUT, config_page,
+ sizeof(*config_page));
+ out:
+ return r;
+}
+
+/**
+ * mpt2sas_config_get_bios_pg2 - obtain bios page 2
+ * @ioc: per adapter object
+ * @mpi_reply: reply mf payload returned from firmware
+ * @config_page: contents of the config page
+ * Context: sleep.
+ *
+ * Returns 0 for success, non-zero for failure.
+ */
+int
+mpt2sas_config_get_bios_pg2(struct MPT2SAS_ADAPTER *ioc,
+ Mpi2ConfigReply_t *mpi_reply, Mpi2BiosPage2_t *config_page)
+{
+ Mpi2ConfigRequest_t mpi_request;
+ int r;
+
+ memset(&mpi_request, 0, sizeof(Mpi2ConfigRequest_t));
+ mpi_request.Function = MPI2_FUNCTION_CONFIG;
+ mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_HEADER;
+ mpi_request.Header.PageType = MPI2_CONFIG_PAGETYPE_BIOS;
+ mpi_request.Header.PageNumber = 2;
+ mpi_request.Header.PageVersion = MPI2_BIOSPAGE2_PAGEVERSION;
+ mpt2sas_base_build_zero_len_sge(ioc, &mpi_request.PageBufferSGE);
+ r = _config_request(ioc, &mpi_request, mpi_reply,
+ MPT2_CONFIG_PAGE_DEFAULT_TIMEOUT, NULL, 0);
+ if (r)
+ goto out;
+
+ mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_READ_CURRENT;
+ r = _config_request(ioc, &mpi_request, mpi_reply,
+ MPT2_CONFIG_PAGE_DEFAULT_TIMEOUT, config_page,
+ sizeof(*config_page));
+ out:
+ return r;
+}
+
+/**
+ * mpt2sas_config_get_bios_pg3 - obtain bios page 3
+ * @ioc: per adapter object
+ * @mpi_reply: reply mf payload returned from firmware
+ * @config_page: contents of the config page
+ * Context: sleep.
+ *
+ * Returns 0 for success, non-zero for failure.
+ */
+int
+mpt2sas_config_get_bios_pg3(struct MPT2SAS_ADAPTER *ioc, Mpi2ConfigReply_t
+ *mpi_reply, Mpi2BiosPage3_t *config_page)
+{
+ Mpi2ConfigRequest_t mpi_request;
+ int r;
+
+ memset(&mpi_request, 0, sizeof(Mpi2ConfigRequest_t));
+ mpi_request.Function = MPI2_FUNCTION_CONFIG;
+ mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_HEADER;
+ mpi_request.Header.PageType = MPI2_CONFIG_PAGETYPE_BIOS;
+ mpi_request.Header.PageNumber = 3;
+ mpi_request.Header.PageVersion = MPI2_BIOSPAGE3_PAGEVERSION;
+ mpt2sas_base_build_zero_len_sge(ioc, &mpi_request.PageBufferSGE);
+ r = _config_request(ioc, &mpi_request, mpi_reply,
+ MPT2_CONFIG_PAGE_DEFAULT_TIMEOUT, NULL, 0);
+ if (r)
+ goto out;
+
+ mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_READ_CURRENT;
+ r = _config_request(ioc, &mpi_request, mpi_reply,
+ MPT2_CONFIG_PAGE_DEFAULT_TIMEOUT, config_page,
+ sizeof(*config_page));
+ out:
+ return r;
+}
+
+/**
+ * mpt2sas_config_get_iounit_pg0 - obtain iounit page 0
+ * @ioc: per adapter object
+ * @mpi_reply: reply mf payload returned from firmware
+ * @config_page: contents of the config page
+ * Context: sleep.
+ *
+ * Returns 0 for success, non-zero for failure.
+ */
+int
+mpt2sas_config_get_iounit_pg0(struct MPT2SAS_ADAPTER *ioc,
+ Mpi2ConfigReply_t *mpi_reply, Mpi2IOUnitPage0_t *config_page)
+{
+ Mpi2ConfigRequest_t mpi_request;
+ int r;
+
+ memset(&mpi_request, 0, sizeof(Mpi2ConfigRequest_t));
+ mpi_request.Function = MPI2_FUNCTION_CONFIG;
+ mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_HEADER;
+ mpi_request.Header.PageType = MPI2_CONFIG_PAGETYPE_IO_UNIT;
+ mpi_request.Header.PageNumber = 0;
+ mpi_request.Header.PageVersion = MPI2_IOUNITPAGE0_PAGEVERSION;
+ mpt2sas_base_build_zero_len_sge(ioc, &mpi_request.PageBufferSGE);
+ r = _config_request(ioc, &mpi_request, mpi_reply,
+ MPT2_CONFIG_PAGE_DEFAULT_TIMEOUT, NULL, 0);
+ if (r)
+ goto out;
+
+ mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_READ_CURRENT;
+ r = _config_request(ioc, &mpi_request, mpi_reply,
+ MPT2_CONFIG_PAGE_DEFAULT_TIMEOUT, config_page,
+ sizeof(*config_page));
+ out:
+ return r;
+}
+
+/**
+ * mpt2sas_config_get_iounit_pg1 - obtain iounit page 1
+ * @ioc: per adapter object
+ * @mpi_reply: reply mf payload returned from firmware
+ * @config_page: contents of the config page
+ * Context: sleep.
+ *
+ * Returns 0 for success, non-zero for failure.
+ */
+int
+mpt2sas_config_get_iounit_pg1(struct MPT2SAS_ADAPTER *ioc,
+ Mpi2ConfigReply_t *mpi_reply, Mpi2IOUnitPage1_t *config_page)
+{
+ Mpi2ConfigRequest_t mpi_request;
+ int r;
+
+ memset(&mpi_request, 0, sizeof(Mpi2ConfigRequest_t));
+ mpi_request.Function = MPI2_FUNCTION_CONFIG;
+ mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_HEADER;
+ mpi_request.Header.PageType = MPI2_CONFIG_PAGETYPE_IO_UNIT;
+ mpi_request.Header.PageNumber = 1;
+ mpi_request.Header.PageVersion = MPI2_IOUNITPAGE1_PAGEVERSION;
+ mpt2sas_base_build_zero_len_sge(ioc, &mpi_request.PageBufferSGE);
+ r = _config_request(ioc, &mpi_request, mpi_reply,
+ MPT2_CONFIG_PAGE_DEFAULT_TIMEOUT, NULL, 0);
+ if (r)
+ goto out;
+
+ mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_READ_CURRENT;
+ r = _config_request(ioc, &mpi_request, mpi_reply,
+ MPT2_CONFIG_PAGE_DEFAULT_TIMEOUT, config_page,
+ sizeof(*config_page));
+ out:
+ return r;
+}
+
+/**
+ * mpt2sas_config_set_iounit_pg1 - set iounit page 1
+ * @ioc: per adapter object
+ * @mpi_reply: reply mf payload returned from firmware
+ * @config_page: contents of the config page
+ * Context: sleep.
+ *
+ * Returns 0 for success, non-zero for failure.
+ */
+int
+mpt2sas_config_set_iounit_pg1(struct MPT2SAS_ADAPTER *ioc,
+ Mpi2ConfigReply_t *mpi_reply, Mpi2IOUnitPage1_t *config_page)
+{
+ Mpi2ConfigRequest_t mpi_request;
+ int r;
+
+ memset(&mpi_request, 0, sizeof(Mpi2ConfigRequest_t));
+ mpi_request.Function = MPI2_FUNCTION_CONFIG;
+ mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_HEADER;
+ mpi_request.Header.PageType = MPI2_CONFIG_PAGETYPE_IO_UNIT;
+ mpi_request.Header.PageNumber = 1;
+ mpi_request.Header.PageVersion = MPI2_IOUNITPAGE1_PAGEVERSION;
+ mpt2sas_base_build_zero_len_sge(ioc, &mpi_request.PageBufferSGE);
+ r = _config_request(ioc, &mpi_request, mpi_reply,
+ MPT2_CONFIG_PAGE_DEFAULT_TIMEOUT, NULL, 0);
+ if (r)
+ goto out;
+
+ mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_WRITE_CURRENT;
+ r = _config_request(ioc, &mpi_request, mpi_reply,
+ MPT2_CONFIG_PAGE_DEFAULT_TIMEOUT, config_page,
+ sizeof(*config_page));
+ out:
+ return r;
+}
+
+/**
+ * mpt2sas_config_get_iounit_pg3 - obtain iounit page 3
+ * @ioc: per adapter object
+ * @mpi_reply: reply mf payload returned from firmware
+ * @config_page: contents of the config page
+ * @sz: size of buffer passed in config_page
+ * Context: sleep.
+ *
+ * Returns 0 for success, non-zero for failure.
+ */
+int
+mpt2sas_config_get_iounit_pg3(struct MPT2SAS_ADAPTER *ioc,
+ Mpi2ConfigReply_t *mpi_reply, Mpi2IOUnitPage3_t *config_page, u16 sz)
+{
+ Mpi2ConfigRequest_t mpi_request;
+ int r;
+
+ memset(&mpi_request, 0, sizeof(Mpi2ConfigRequest_t));
+ mpi_request.Function = MPI2_FUNCTION_CONFIG;
+ mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_HEADER;
+ mpi_request.Header.PageType = MPI2_CONFIG_PAGETYPE_IO_UNIT;
+ mpi_request.Header.PageNumber = 3;
+ mpi_request.Header.PageVersion = MPI2_IOUNITPAGE3_PAGEVERSION;
+ mpt2sas_base_build_zero_len_sge(ioc, &mpi_request.PageBufferSGE);
+ r = _config_request(ioc, &mpi_request, mpi_reply,
+ MPT2_CONFIG_PAGE_DEFAULT_TIMEOUT, NULL, 0);
+ if (r)
+ goto out;
+
+ mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_READ_CURRENT;
+ r = _config_request(ioc, &mpi_request, mpi_reply,
+ MPT2_CONFIG_PAGE_DEFAULT_TIMEOUT, config_page, sz);
+ out:
+ return r;
+}
+
+/**
+ * mpt2sas_config_get_iounit_pg8 - obtain iounit page 8
+ * @ioc: per adapter object
+ * @mpi_reply: reply mf payload returned from firmware
+ * @config_page: contents of the config page
+ * Context: sleep.
+ *
+ * Returns 0 for success, non-zero for failure.
+ */
+int
+mpt2sas_config_get_iounit_pg8(struct MPT2SAS_ADAPTER *ioc,
+ Mpi2ConfigReply_t *mpi_reply, Mpi2IOUnitPage8_t *config_page)
+{
+ Mpi2ConfigRequest_t mpi_request;
+ int r;
+
+ memset(&mpi_request, 0, sizeof(Mpi2ConfigRequest_t));
+ mpi_request.Function = MPI2_FUNCTION_CONFIG;
+ mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_HEADER;
+ mpi_request.Header.PageType = MPI2_CONFIG_PAGETYPE_IO_UNIT;
+ mpi_request.Header.PageNumber = 8;
+ mpi_request.Header.PageVersion = MPI2_IOUNITPAGE8_PAGEVERSION;
+ mpt2sas_base_build_zero_len_sge(ioc, &mpi_request.PageBufferSGE);
+ r = _config_request(ioc, &mpi_request, mpi_reply,
+ MPT2_CONFIG_PAGE_DEFAULT_TIMEOUT, NULL, 0);
+ if (r)
+ goto out;
+
+ mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_READ_CURRENT;
+ r = _config_request(ioc, &mpi_request, mpi_reply,
+ MPT2_CONFIG_PAGE_DEFAULT_TIMEOUT, config_page,
+ sizeof(*config_page));
+ out:
+ return r;
+}
+
+/**
+ * mpt2sas_config_get_ioc_pg8 - obtain ioc page 8
+ * @ioc: per adapter object
+ * @mpi_reply: reply mf payload returned from firmware
+ * @config_page: contents of the config page
+ * Context: sleep.
+ *
+ * Returns 0 for success, non-zero for failure.
+ */
+int
+mpt2sas_config_get_ioc_pg8(struct MPT2SAS_ADAPTER *ioc,
+ Mpi2ConfigReply_t *mpi_reply, Mpi2IOCPage8_t *config_page)
+{
+ Mpi2ConfigRequest_t mpi_request;
+ int r;
+
+ memset(&mpi_request, 0, sizeof(Mpi2ConfigRequest_t));
+ mpi_request.Function = MPI2_FUNCTION_CONFIG;
+ mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_HEADER;
+ mpi_request.Header.PageType = MPI2_CONFIG_PAGETYPE_IOC;
+ mpi_request.Header.PageNumber = 8;
+ mpi_request.Header.PageVersion = MPI2_IOCPAGE8_PAGEVERSION;
+ mpt2sas_base_build_zero_len_sge(ioc, &mpi_request.PageBufferSGE);
+ r = _config_request(ioc, &mpi_request, mpi_reply,
+ MPT2_CONFIG_PAGE_DEFAULT_TIMEOUT, NULL, 0);
+ if (r)
+ goto out;
+
+ mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_READ_CURRENT;
+ r = _config_request(ioc, &mpi_request, mpi_reply,
+ MPT2_CONFIG_PAGE_DEFAULT_TIMEOUT, config_page,
+ sizeof(*config_page));
+ out:
+ return r;
+}
+
+/**
+ * mpt2sas_config_get_sas_device_pg0 - obtain sas device page 0
+ * @ioc: per adapter object
+ * @mpi_reply: reply mf payload returned from firmware
+ * @config_page: contents of the config page
+ * @form: GET_NEXT_HANDLE or HANDLE
+ * @handle: device handle
+ * Context: sleep.
+ *
+ * Returns 0 for success, non-zero for failure.
+ */
+int
+mpt2sas_config_get_sas_device_pg0(struct MPT2SAS_ADAPTER *ioc, Mpi2ConfigReply_t
+ *mpi_reply, Mpi2SasDevicePage0_t *config_page, u32 form, u32 handle)
+{
+ Mpi2ConfigRequest_t mpi_request;
+ int r;
+
+ memset(&mpi_request, 0, sizeof(Mpi2ConfigRequest_t));
+ mpi_request.Function = MPI2_FUNCTION_CONFIG;
+ mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_HEADER;
+ mpi_request.Header.PageType = MPI2_CONFIG_PAGETYPE_EXTENDED;
+ mpi_request.ExtPageType = MPI2_CONFIG_EXTPAGETYPE_SAS_DEVICE;
+ mpi_request.Header.PageVersion = MPI2_SASDEVICE0_PAGEVERSION;
+ mpi_request.Header.PageNumber = 0;
+ mpt2sas_base_build_zero_len_sge(ioc, &mpi_request.PageBufferSGE);
+ r = _config_request(ioc, &mpi_request, mpi_reply,
+ MPT2_CONFIG_PAGE_DEFAULT_TIMEOUT, NULL, 0);
+ if (r)
+ goto out;
+
+ mpi_request.PageAddress = cpu_to_le32(form | handle);
+ mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_READ_CURRENT;
+ r = _config_request(ioc, &mpi_request, mpi_reply,
+ MPT2_CONFIG_PAGE_DEFAULT_TIMEOUT, config_page,
+ sizeof(*config_page));
+ out:
+ return r;
+}
+
+/**
+ * mpt2sas_config_get_sas_device_pg1 - obtain sas device page 1
+ * @ioc: per adapter object
+ * @mpi_reply: reply mf payload returned from firmware
+ * @config_page: contents of the config page
+ * @form: GET_NEXT_HANDLE or HANDLE
+ * @handle: device handle
+ * Context: sleep.
+ *
+ * Returns 0 for success, non-zero for failure.
+ */
+int
+mpt2sas_config_get_sas_device_pg1(struct MPT2SAS_ADAPTER *ioc, Mpi2ConfigReply_t
+ *mpi_reply, Mpi2SasDevicePage1_t *config_page, u32 form, u32 handle)
+{
+ Mpi2ConfigRequest_t mpi_request;
+ int r;
+
+ memset(&mpi_request, 0, sizeof(Mpi2ConfigRequest_t));
+ mpi_request.Function = MPI2_FUNCTION_CONFIG;
+ mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_HEADER;
+ mpi_request.Header.PageType = MPI2_CONFIG_PAGETYPE_EXTENDED;
+ mpi_request.ExtPageType = MPI2_CONFIG_EXTPAGETYPE_SAS_DEVICE;
+ mpi_request.Header.PageVersion = MPI2_SASDEVICE1_PAGEVERSION;
+ mpi_request.Header.PageNumber = 1;
+ mpt2sas_base_build_zero_len_sge(ioc, &mpi_request.PageBufferSGE);
+ r = _config_request(ioc, &mpi_request, mpi_reply,
+ MPT2_CONFIG_PAGE_DEFAULT_TIMEOUT, NULL, 0);
+ if (r)
+ goto out;
+
+ mpi_request.PageAddress = cpu_to_le32(form | handle);
+ mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_READ_CURRENT;
+ r = _config_request(ioc, &mpi_request, mpi_reply,
+ MPT2_CONFIG_PAGE_DEFAULT_TIMEOUT, config_page,
+ sizeof(*config_page));
+ out:
+ return r;
+}
+
+/**
+ * mpt2sas_config_get_number_hba_phys - obtain number of phys on the host
+ * @ioc: per adapter object
+ * @num_phys: pointer returned with the number of phys
+ * Context: sleep.
+ *
+ * Returns 0 for success, non-zero for failure.
+ */
+int
+mpt2sas_config_get_number_hba_phys(struct MPT2SAS_ADAPTER *ioc, u8 *num_phys)
+{
+ Mpi2ConfigRequest_t mpi_request;
+ int r;
+ u16 ioc_status;
+ Mpi2ConfigReply_t mpi_reply;
+ Mpi2SasIOUnitPage0_t config_page;
+
+ *num_phys = 0;
+ memset(&mpi_request, 0, sizeof(Mpi2ConfigRequest_t));
+ mpi_request.Function = MPI2_FUNCTION_CONFIG;
+ mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_HEADER;
+ mpi_request.Header.PageType = MPI2_CONFIG_PAGETYPE_EXTENDED;
+ mpi_request.ExtPageType = MPI2_CONFIG_EXTPAGETYPE_SAS_IO_UNIT;
+ mpi_request.Header.PageNumber = 0;
+ mpi_request.Header.PageVersion = MPI2_SASIOUNITPAGE0_PAGEVERSION;
+ mpt2sas_base_build_zero_len_sge(ioc, &mpi_request.PageBufferSGE);
+ r = _config_request(ioc, &mpi_request, &mpi_reply,
+ MPT2_CONFIG_PAGE_DEFAULT_TIMEOUT, NULL, 0);
+ if (r)
+ goto out;
+
+ mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_READ_CURRENT;
+ r = _config_request(ioc, &mpi_request, &mpi_reply,
+ MPT2_CONFIG_PAGE_DEFAULT_TIMEOUT, &config_page,
+ sizeof(Mpi2SasIOUnitPage0_t));
+ if (!r) {
+ ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
+ MPI2_IOCSTATUS_MASK;
+ if (ioc_status == MPI2_IOCSTATUS_SUCCESS)
+ *num_phys = config_page.NumPhys;
+ }
+ out:
+ return r;
+}
+
+/**
+ * mpt2sas_config_get_sas_iounit_pg0 - obtain sas iounit page 0
+ * @ioc: per adapter object
+ * @mpi_reply: reply mf payload returned from firmware
+ * @config_page: contents of the config page
+ * @sz: size of buffer passed in config_page
+ * Context: sleep.
+ *
+ * Calling function should call config_get_number_hba_phys prior to
+ * this function, so enough memory is allocated for config_page.
+ *
+ * Returns 0 for success, non-zero for failure.
+ */
+int
+mpt2sas_config_get_sas_iounit_pg0(struct MPT2SAS_ADAPTER *ioc, Mpi2ConfigReply_t
+ *mpi_reply, Mpi2SasIOUnitPage0_t *config_page, u16 sz)
+{
+ Mpi2ConfigRequest_t mpi_request;
+ int r;
+
+ memset(&mpi_request, 0, sizeof(Mpi2ConfigRequest_t));
+ mpi_request.Function = MPI2_FUNCTION_CONFIG;
+ mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_HEADER;
+ mpi_request.Header.PageType = MPI2_CONFIG_PAGETYPE_EXTENDED;
+ mpi_request.ExtPageType = MPI2_CONFIG_EXTPAGETYPE_SAS_IO_UNIT;
+ mpi_request.Header.PageNumber = 0;
+ mpi_request.Header.PageVersion = MPI2_SASIOUNITPAGE0_PAGEVERSION;
+ mpt2sas_base_build_zero_len_sge(ioc, &mpi_request.PageBufferSGE);
+ r = _config_request(ioc, &mpi_request, mpi_reply,
+ MPT2_CONFIG_PAGE_DEFAULT_TIMEOUT, NULL, 0);
+ if (r)
+ goto out;
+
+ mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_READ_CURRENT;
+ r = _config_request(ioc, &mpi_request, mpi_reply,
+ MPT2_CONFIG_PAGE_DEFAULT_TIMEOUT, config_page, sz);
+ out:
+ return r;
+}
+
+/**
+ * mpt2sas_config_get_sas_iounit_pg1 - obtain sas iounit page 1
+ * @ioc: per adapter object
+ * @mpi_reply: reply mf payload returned from firmware
+ * @config_page: contents of the config page
+ * @sz: size of buffer passed in config_page
+ * Context: sleep.
+ *
+ * Calling function should call config_get_number_hba_phys prior to
+ * this function, so enough memory is allocated for config_page.
+ *
+ * Returns 0 for success, non-zero for failure.
+ */
+int
+mpt2sas_config_get_sas_iounit_pg1(struct MPT2SAS_ADAPTER *ioc, Mpi2ConfigReply_t
+ *mpi_reply, Mpi2SasIOUnitPage1_t *config_page, u16 sz)
+{
+ Mpi2ConfigRequest_t mpi_request;
+ int r;
+
+ memset(&mpi_request, 0, sizeof(Mpi2ConfigRequest_t));
+ mpi_request.Function = MPI2_FUNCTION_CONFIG;
+ mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_HEADER;
+ mpi_request.Header.PageType = MPI2_CONFIG_PAGETYPE_EXTENDED;
+ mpi_request.ExtPageType = MPI2_CONFIG_EXTPAGETYPE_SAS_IO_UNIT;
+ mpi_request.Header.PageNumber = 1;
+ mpi_request.Header.PageVersion = MPI2_SASIOUNITPAGE1_PAGEVERSION;
+ mpt2sas_base_build_zero_len_sge(ioc, &mpi_request.PageBufferSGE);
+ r = _config_request(ioc, &mpi_request, mpi_reply,
+ MPT2_CONFIG_PAGE_DEFAULT_TIMEOUT, NULL, 0);
+ if (r)
+ goto out;
+
+ mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_READ_CURRENT;
+ r = _config_request(ioc, &mpi_request, mpi_reply,
+ MPT2_CONFIG_PAGE_DEFAULT_TIMEOUT, config_page, sz);
+ out:
+ return r;
+}
+
+/**
+ * mpt2sas_config_set_sas_iounit_pg1 - send sas iounit page 1
+ * @ioc: per adapter object
+ * @mpi_reply: reply mf payload returned from firmware
+ * @config_page: contents of the config page
+ * @sz: size of buffer passed in config_page
+ * Context: sleep.
+ *
+ * Calling function should call config_get_number_hba_phys prior to
+ * this function, so enough memory is allocated for config_page.
+ *
+ * Returns 0 for success, non-zero for failure.
+ */
+int
+mpt2sas_config_set_sas_iounit_pg1(struct MPT2SAS_ADAPTER *ioc, Mpi2ConfigReply_t
+ *mpi_reply, Mpi2SasIOUnitPage1_t *config_page, u16 sz)
+{
+ Mpi2ConfigRequest_t mpi_request;
+ int r;
+
+ memset(&mpi_request, 0, sizeof(Mpi2ConfigRequest_t));
+ mpi_request.Function = MPI2_FUNCTION_CONFIG;
+ mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_HEADER;
+ mpi_request.Header.PageType = MPI2_CONFIG_PAGETYPE_EXTENDED;
+ mpi_request.ExtPageType = MPI2_CONFIG_EXTPAGETYPE_SAS_IO_UNIT;
+ mpi_request.Header.PageNumber = 1;
+ mpi_request.Header.PageVersion = MPI2_SASIOUNITPAGE1_PAGEVERSION;
+ mpt2sas_base_build_zero_len_sge(ioc, &mpi_request.PageBufferSGE);
+ r = _config_request(ioc, &mpi_request, mpi_reply,
+ MPT2_CONFIG_PAGE_DEFAULT_TIMEOUT, NULL, 0);
+ if (r)
+ goto out;
+
+ mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_WRITE_CURRENT;
+ _config_request(ioc, &mpi_request, mpi_reply,
+ MPT2_CONFIG_PAGE_DEFAULT_TIMEOUT, config_page, sz);
+ mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_WRITE_NVRAM;
+ r = _config_request(ioc, &mpi_request, mpi_reply,
+ MPT2_CONFIG_PAGE_DEFAULT_TIMEOUT, config_page, sz);
+ out:
+ return r;
+}
+
+/**
+ * mpt2sas_config_get_expander_pg0 - obtain expander page 0
+ * @ioc: per adapter object
+ * @mpi_reply: reply mf payload returned from firmware
+ * @config_page: contents of the config page
+ * @form: GET_NEXT_HANDLE or HANDLE
+ * @handle: expander handle
+ * Context: sleep.
+ *
+ * Returns 0 for success, non-zero for failure.
+ */
+int
+mpt2sas_config_get_expander_pg0(struct MPT2SAS_ADAPTER *ioc, Mpi2ConfigReply_t
+ *mpi_reply, Mpi2ExpanderPage0_t *config_page, u32 form, u32 handle)
+{
+ Mpi2ConfigRequest_t mpi_request;
+ int r;
+
+ memset(&mpi_request, 0, sizeof(Mpi2ConfigRequest_t));
+ mpi_request.Function = MPI2_FUNCTION_CONFIG;
+ mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_HEADER;
+ mpi_request.Header.PageType = MPI2_CONFIG_PAGETYPE_EXTENDED;
+ mpi_request.ExtPageType = MPI2_CONFIG_EXTPAGETYPE_SAS_EXPANDER;
+ mpi_request.Header.PageNumber = 0;
+ mpi_request.Header.PageVersion = MPI2_SASEXPANDER0_PAGEVERSION;
+ mpt2sas_base_build_zero_len_sge(ioc, &mpi_request.PageBufferSGE);
+ r = _config_request(ioc, &mpi_request, mpi_reply,
+ MPT2_CONFIG_PAGE_DEFAULT_TIMEOUT, NULL, 0);
+ if (r)
+ goto out;
+
+ mpi_request.PageAddress = cpu_to_le32(form | handle);
+ mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_READ_CURRENT;
+ r = _config_request(ioc, &mpi_request, mpi_reply,
+ MPT2_CONFIG_PAGE_DEFAULT_TIMEOUT, config_page,
+ sizeof(*config_page));
+ out:
+ return r;
+}
+
+/**
+ * mpt2sas_config_get_expander_pg1 - obtain expander page 1
+ * @ioc: per adapter object
+ * @mpi_reply: reply mf payload returned from firmware
+ * @config_page: contents of the config page
+ * @phy_number: phy number
+ * @handle: expander handle
+ * Context: sleep.
+ *
+ * Returns 0 for success, non-zero for failure.
+ */
+int
+mpt2sas_config_get_expander_pg1(struct MPT2SAS_ADAPTER *ioc, Mpi2ConfigReply_t
+ *mpi_reply, Mpi2ExpanderPage1_t *config_page, u32 phy_number,
+ u16 handle)
+{
+ Mpi2ConfigRequest_t mpi_request;
+ int r;
+
+ memset(&mpi_request, 0, sizeof(Mpi2ConfigRequest_t));
+ mpi_request.Function = MPI2_FUNCTION_CONFIG;
+ mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_HEADER;
+ mpi_request.Header.PageType = MPI2_CONFIG_PAGETYPE_EXTENDED;
+ mpi_request.ExtPageType = MPI2_CONFIG_EXTPAGETYPE_SAS_EXPANDER;
+ mpi_request.Header.PageNumber = 1;
+ mpi_request.Header.PageVersion = MPI2_SASEXPANDER1_PAGEVERSION;
+ mpt2sas_base_build_zero_len_sge(ioc, &mpi_request.PageBufferSGE);
+ r = _config_request(ioc, &mpi_request, mpi_reply,
+ MPT2_CONFIG_PAGE_DEFAULT_TIMEOUT, NULL, 0);
+ if (r)
+ goto out;
+
+ mpi_request.PageAddress =
+ cpu_to_le32(MPI2_SAS_EXPAND_PGAD_FORM_HNDL_PHY_NUM |
+ (phy_number << MPI2_SAS_EXPAND_PGAD_PHYNUM_SHIFT) | handle);
+ mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_READ_CURRENT;
+ r = _config_request(ioc, &mpi_request, mpi_reply,
+ MPT2_CONFIG_PAGE_DEFAULT_TIMEOUT, config_page,
+ sizeof(*config_page));
+ out:
+ return r;
+}
+
+/**
+ * mpt2sas_config_get_enclosure_pg0 - obtain enclosure page 0
+ * @ioc: per adapter object
+ * @mpi_reply: reply mf payload returned from firmware
+ * @config_page: contents of the config page
+ * @form: GET_NEXT_HANDLE or HANDLE
+ * @handle: expander handle
+ * Context: sleep.
+ *
+ * Returns 0 for success, non-zero for failure.
+ */
+int
+mpt2sas_config_get_enclosure_pg0(struct MPT2SAS_ADAPTER *ioc, Mpi2ConfigReply_t
+ *mpi_reply, Mpi2SasEnclosurePage0_t *config_page, u32 form, u32 handle)
+{
+ Mpi2ConfigRequest_t mpi_request;
+ int r;
+
+ memset(&mpi_request, 0, sizeof(Mpi2ConfigRequest_t));
+ mpi_request.Function = MPI2_FUNCTION_CONFIG;
+ mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_HEADER;
+ mpi_request.Header.PageType = MPI2_CONFIG_PAGETYPE_EXTENDED;
+ mpi_request.ExtPageType = MPI2_CONFIG_EXTPAGETYPE_ENCLOSURE;
+ mpi_request.Header.PageNumber = 0;
+ mpi_request.Header.PageVersion = MPI2_SASENCLOSURE0_PAGEVERSION;
+ mpt2sas_base_build_zero_len_sge(ioc, &mpi_request.PageBufferSGE);
+ r = _config_request(ioc, &mpi_request, mpi_reply,
+ MPT2_CONFIG_PAGE_DEFAULT_TIMEOUT, NULL, 0);
+ if (r)
+ goto out;
+
+ mpi_request.PageAddress = cpu_to_le32(form | handle);
+ mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_READ_CURRENT;
+ r = _config_request(ioc, &mpi_request, mpi_reply,
+ MPT2_CONFIG_PAGE_DEFAULT_TIMEOUT, config_page,
+ sizeof(*config_page));
+ out:
+ return r;
+}
+
+/**
+ * mpt2sas_config_get_phy_pg0 - obtain phy page 0
+ * @ioc: per adapter object
+ * @mpi_reply: reply mf payload returned from firmware
+ * @config_page: contents of the config page
+ * @phy_number: phy number
+ * Context: sleep.
+ *
+ * Returns 0 for success, non-zero for failure.
+ */
+int
+mpt2sas_config_get_phy_pg0(struct MPT2SAS_ADAPTER *ioc, Mpi2ConfigReply_t
+ *mpi_reply, Mpi2SasPhyPage0_t *config_page, u32 phy_number)
+{
+ Mpi2ConfigRequest_t mpi_request;
+ int r;
+
+ memset(&mpi_request, 0, sizeof(Mpi2ConfigRequest_t));
+ mpi_request.Function = MPI2_FUNCTION_CONFIG;
+ mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_HEADER;
+ mpi_request.Header.PageType = MPI2_CONFIG_PAGETYPE_EXTENDED;
+ mpi_request.ExtPageType = MPI2_CONFIG_EXTPAGETYPE_SAS_PHY;
+ mpi_request.Header.PageNumber = 0;
+ mpi_request.Header.PageVersion = MPI2_SASPHY0_PAGEVERSION;
+ mpt2sas_base_build_zero_len_sge(ioc, &mpi_request.PageBufferSGE);
+ r = _config_request(ioc, &mpi_request, mpi_reply,
+ MPT2_CONFIG_PAGE_DEFAULT_TIMEOUT, NULL, 0);
+ if (r)
+ goto out;
+
+ mpi_request.PageAddress =
+ cpu_to_le32(MPI2_SAS_PHY_PGAD_FORM_PHY_NUMBER | phy_number);
+ mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_READ_CURRENT;
+ r = _config_request(ioc, &mpi_request, mpi_reply,
+ MPT2_CONFIG_PAGE_DEFAULT_TIMEOUT, config_page,
+ sizeof(*config_page));
+ out:
+ return r;
+}
+
+/**
+ * mpt2sas_config_get_phy_pg1 - obtain phy page 1
+ * @ioc: per adapter object
+ * @mpi_reply: reply mf payload returned from firmware
+ * @config_page: contents of the config page
+ * @phy_number: phy number
+ * Context: sleep.
+ *
+ * Returns 0 for success, non-zero for failure.
+ */
+int
+mpt2sas_config_get_phy_pg1(struct MPT2SAS_ADAPTER *ioc, Mpi2ConfigReply_t
+ *mpi_reply, Mpi2SasPhyPage1_t *config_page, u32 phy_number)
+{
+ Mpi2ConfigRequest_t mpi_request;
+ int r;
+
+ memset(&mpi_request, 0, sizeof(Mpi2ConfigRequest_t));
+ mpi_request.Function = MPI2_FUNCTION_CONFIG;
+ mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_HEADER;
+ mpi_request.Header.PageType = MPI2_CONFIG_PAGETYPE_EXTENDED;
+ mpi_request.ExtPageType = MPI2_CONFIG_EXTPAGETYPE_SAS_PHY;
+ mpi_request.Header.PageNumber = 1;
+ mpi_request.Header.PageVersion = MPI2_SASPHY1_PAGEVERSION;
+ mpt2sas_base_build_zero_len_sge(ioc, &mpi_request.PageBufferSGE);
+ r = _config_request(ioc, &mpi_request, mpi_reply,
+ MPT2_CONFIG_PAGE_DEFAULT_TIMEOUT, NULL, 0);
+ if (r)
+ goto out;
+
+ mpi_request.PageAddress =
+ cpu_to_le32(MPI2_SAS_PHY_PGAD_FORM_PHY_NUMBER | phy_number);
+ mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_READ_CURRENT;
+ r = _config_request(ioc, &mpi_request, mpi_reply,
+ MPT2_CONFIG_PAGE_DEFAULT_TIMEOUT, config_page,
+ sizeof(*config_page));
+ out:
+ return r;
+}
+
+/**
+ * mpt2sas_config_get_raid_volume_pg1 - obtain raid volume page 1
+ * @ioc: per adapter object
+ * @mpi_reply: reply mf payload returned from firmware
+ * @config_page: contents of the config page
+ * @form: GET_NEXT_HANDLE or HANDLE
+ * @handle: volume handle
+ * Context: sleep.
+ *
+ * Returns 0 for success, non-zero for failure.
+ */
+int
+mpt2sas_config_get_raid_volume_pg1(struct MPT2SAS_ADAPTER *ioc,
+ Mpi2ConfigReply_t *mpi_reply, Mpi2RaidVolPage1_t *config_page, u32 form,
+ u32 handle)
+{
+ Mpi2ConfigRequest_t mpi_request;
+ int r;
+
+ memset(&mpi_request, 0, sizeof(Mpi2ConfigRequest_t));
+ mpi_request.Function = MPI2_FUNCTION_CONFIG;
+ mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_HEADER;
+ mpi_request.Header.PageType = MPI2_CONFIG_PAGETYPE_RAID_VOLUME;
+ mpi_request.Header.PageNumber = 1;
+ mpi_request.Header.PageVersion = MPI2_RAIDVOLPAGE1_PAGEVERSION;
+ mpt2sas_base_build_zero_len_sge(ioc, &mpi_request.PageBufferSGE);
+ r = _config_request(ioc, &mpi_request, mpi_reply,
+ MPT2_CONFIG_PAGE_DEFAULT_TIMEOUT, NULL, 0);
+ if (r)
+ goto out;
+
+ mpi_request.PageAddress = cpu_to_le32(form | handle);
+ mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_READ_CURRENT;
+ r = _config_request(ioc, &mpi_request, mpi_reply,
+ MPT2_CONFIG_PAGE_DEFAULT_TIMEOUT, config_page,
+ sizeof(*config_page));
+ out:
+ return r;
+}
+
+/**
+ * mpt2sas_config_get_number_pds - obtain number of phys disk assigned to volume
+ * @ioc: per adapter object
+ * @handle: volume handle
+ * @num_pds: returns pds count
+ * Context: sleep.
+ *
+ * Returns 0 for success, non-zero for failure.
+ */
+int
+mpt2sas_config_get_number_pds(struct MPT2SAS_ADAPTER *ioc, u16 handle,
+ u8 *num_pds)
+{
+ Mpi2ConfigRequest_t mpi_request;
+ Mpi2RaidVolPage0_t config_page;
+ Mpi2ConfigReply_t mpi_reply;
+ int r;
+ u16 ioc_status;
+
+ memset(&mpi_request, 0, sizeof(Mpi2ConfigRequest_t));
+ *num_pds = 0;
+ mpi_request.Function = MPI2_FUNCTION_CONFIG;
+ mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_HEADER;
+ mpi_request.Header.PageType = MPI2_CONFIG_PAGETYPE_RAID_VOLUME;
+ mpi_request.Header.PageNumber = 0;
+ mpi_request.Header.PageVersion = MPI2_RAIDVOLPAGE0_PAGEVERSION;
+ mpt2sas_base_build_zero_len_sge(ioc, &mpi_request.PageBufferSGE);
+ r = _config_request(ioc, &mpi_request, &mpi_reply,
+ MPT2_CONFIG_PAGE_DEFAULT_TIMEOUT, NULL, 0);
+ if (r)
+ goto out;
+
+ mpi_request.PageAddress =
+ cpu_to_le32(MPI2_RAID_VOLUME_PGAD_FORM_HANDLE | handle);
+ mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_READ_CURRENT;
+ r = _config_request(ioc, &mpi_request, &mpi_reply,
+ MPT2_CONFIG_PAGE_DEFAULT_TIMEOUT, &config_page,
+ sizeof(Mpi2RaidVolPage0_t));
+ if (!r) {
+ ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
+ MPI2_IOCSTATUS_MASK;
+ if (ioc_status == MPI2_IOCSTATUS_SUCCESS)
+ *num_pds = config_page.NumPhysDisks;
+ }
+
+ out:
+ return r;
+}
+
+/**
+ * mpt2sas_config_get_raid_volume_pg0 - obtain raid volume page 0
+ * @ioc: per adapter object
+ * @mpi_reply: reply mf payload returned from firmware
+ * @config_page: contents of the config page
+ * @form: GET_NEXT_HANDLE or HANDLE
+ * @handle: volume handle
+ * @sz: size of buffer passed in config_page
+ * Context: sleep.
+ *
+ * Returns 0 for success, non-zero for failure.
+ */
+int
+mpt2sas_config_get_raid_volume_pg0(struct MPT2SAS_ADAPTER *ioc,
+ Mpi2ConfigReply_t *mpi_reply, Mpi2RaidVolPage0_t *config_page, u32 form,
+ u32 handle, u16 sz)
+{
+ Mpi2ConfigRequest_t mpi_request;
+ int r;
+
+ memset(&mpi_request, 0, sizeof(Mpi2ConfigRequest_t));
+ mpi_request.Function = MPI2_FUNCTION_CONFIG;
+ mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_HEADER;
+ mpi_request.Header.PageType = MPI2_CONFIG_PAGETYPE_RAID_VOLUME;
+ mpi_request.Header.PageNumber = 0;
+ mpi_request.Header.PageVersion = MPI2_RAIDVOLPAGE0_PAGEVERSION;
+ mpt2sas_base_build_zero_len_sge(ioc, &mpi_request.PageBufferSGE);
+ r = _config_request(ioc, &mpi_request, mpi_reply,
+ MPT2_CONFIG_PAGE_DEFAULT_TIMEOUT, NULL, 0);
+ if (r)
+ goto out;
+
+ mpi_request.PageAddress = cpu_to_le32(form | handle);
+ mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_READ_CURRENT;
+ r = _config_request(ioc, &mpi_request, mpi_reply,
+ MPT2_CONFIG_PAGE_DEFAULT_TIMEOUT, config_page, sz);
+ out:
+ return r;
+}
+
+/**
+ * mpt2sas_config_get_phys_disk_pg0 - obtain phys disk page 0
+ * @ioc: per adapter object
+ * @mpi_reply: reply mf payload returned from firmware
+ * @config_page: contents of the config page
+ * @form: GET_NEXT_PHYSDISKNUM, PHYSDISKNUM, DEVHANDLE
+ * @form_specific: specific to the form
+ * Context: sleep.
+ *
+ * Returns 0 for success, non-zero for failure.
+ */
+int
+mpt2sas_config_get_phys_disk_pg0(struct MPT2SAS_ADAPTER *ioc, Mpi2ConfigReply_t
+ *mpi_reply, Mpi2RaidPhysDiskPage0_t *config_page, u32 form,
+ u32 form_specific)
+{
+ Mpi2ConfigRequest_t mpi_request;
+ int r;
+
+ memset(&mpi_request, 0, sizeof(Mpi2ConfigRequest_t));
+ mpi_request.Function = MPI2_FUNCTION_CONFIG;
+ mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_HEADER;
+ mpi_request.Header.PageType = MPI2_CONFIG_PAGETYPE_RAID_PHYSDISK;
+ mpi_request.Header.PageNumber = 0;
+ mpi_request.Header.PageVersion = MPI2_RAIDPHYSDISKPAGE0_PAGEVERSION;
+ mpt2sas_base_build_zero_len_sge(ioc, &mpi_request.PageBufferSGE);
+ r = _config_request(ioc, &mpi_request, mpi_reply,
+ MPT2_CONFIG_PAGE_DEFAULT_TIMEOUT, NULL, 0);
+ if (r)
+ goto out;
+
+ mpi_request.PageAddress = cpu_to_le32(form | form_specific);
+ mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_READ_CURRENT;
+ r = _config_request(ioc, &mpi_request, mpi_reply,
+ MPT2_CONFIG_PAGE_DEFAULT_TIMEOUT, config_page,
+ sizeof(*config_page));
+ out:
+ return r;
+}
+
+/**
+ * mpt2sas_config_get_volume_handle - returns volume handle for give hidden raid components
+ * @ioc: per adapter object
+ * @pd_handle: phys disk handle
+ * @volume_handle: volume handle
+ * Context: sleep.
+ *
+ * Returns 0 for success, non-zero for failure.
+ */
+int
+mpt2sas_config_get_volume_handle(struct MPT2SAS_ADAPTER *ioc, u16 pd_handle,
+ u16 *volume_handle)
+{
+ Mpi2RaidConfigurationPage0_t *config_page = NULL;
+ Mpi2ConfigRequest_t mpi_request;
+ Mpi2ConfigReply_t mpi_reply;
+ int r, i, config_page_sz;
+ u16 ioc_status;
+ int config_num;
+ u16 element_type;
+ u16 phys_disk_dev_handle;
+
+ *volume_handle = 0;
+ memset(&mpi_request, 0, sizeof(Mpi2ConfigRequest_t));
+ mpi_request.Function = MPI2_FUNCTION_CONFIG;
+ mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_HEADER;
+ mpi_request.Header.PageType = MPI2_CONFIG_PAGETYPE_EXTENDED;
+ mpi_request.ExtPageType = MPI2_CONFIG_EXTPAGETYPE_RAID_CONFIG;
+ mpi_request.Header.PageVersion = MPI2_RAIDCONFIG0_PAGEVERSION;
+ mpi_request.Header.PageNumber = 0;
+ mpt2sas_base_build_zero_len_sge(ioc, &mpi_request.PageBufferSGE);
+ r = _config_request(ioc, &mpi_request, &mpi_reply,
+ MPT2_CONFIG_PAGE_DEFAULT_TIMEOUT, NULL, 0);
+ if (r)
+ goto out;
+
+ mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_READ_CURRENT;
+ config_page_sz = (le16_to_cpu(mpi_reply.ExtPageLength) * 4);
+ config_page = kmalloc(config_page_sz, GFP_KERNEL);
+ if (!config_page) {
+ r = -1;
+ goto out;
+ }
+ config_num = 0xff;
+ while (1) {
+ mpi_request.PageAddress = cpu_to_le32(config_num +
+ MPI2_RAID_PGAD_FORM_GET_NEXT_CONFIGNUM);
+ r = _config_request(ioc, &mpi_request, &mpi_reply,
+ MPT2_CONFIG_PAGE_DEFAULT_TIMEOUT, config_page,
+ config_page_sz);
+ if (r)
+ goto out;
+ r = -1;
+ ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
+ MPI2_IOCSTATUS_MASK;
+ if (ioc_status != MPI2_IOCSTATUS_SUCCESS)
+ goto out;
+ for (i = 0; i < config_page->NumElements; i++) {
+ element_type = le16_to_cpu(config_page->
+ ConfigElement[i].ElementFlags) &
+ MPI2_RAIDCONFIG0_EFLAGS_MASK_ELEMENT_TYPE;
+ if (element_type ==
+ MPI2_RAIDCONFIG0_EFLAGS_VOL_PHYS_DISK_ELEMENT ||
+ element_type ==
+ MPI2_RAIDCONFIG0_EFLAGS_OCE_ELEMENT) {
+ phys_disk_dev_handle =
+ le16_to_cpu(config_page->ConfigElement[i].
+ PhysDiskDevHandle);
+ if (phys_disk_dev_handle == pd_handle) {
+ *volume_handle =
+ le16_to_cpu(config_page->
+ ConfigElement[i].VolDevHandle);
+ r = 0;
+ goto out;
+ }
+ } else if (element_type ==
+ MPI2_RAIDCONFIG0_EFLAGS_HOT_SPARE_ELEMENT) {
+ *volume_handle = 0;
+ r = 0;
+ goto out;
+ }
+ }
+ config_num = config_page->ConfigNum;
+ }
+ out:
+ kfree(config_page);
+ return r;
+}
+
+/**
+ * mpt2sas_config_get_volume_wwid - returns wwid given the volume handle
+ * @ioc: per adapter object
+ * @volume_handle: volume handle
+ * @wwid: volume wwid
+ * Context: sleep.
+ *
+ * Returns 0 for success, non-zero for failure.
+ */
+int
+mpt2sas_config_get_volume_wwid(struct MPT2SAS_ADAPTER *ioc, u16 volume_handle,
+ u64 *wwid)
+{
+ Mpi2ConfigReply_t mpi_reply;
+ Mpi2RaidVolPage1_t raid_vol_pg1;
+
+ *wwid = 0;
+ if (!(mpt2sas_config_get_raid_volume_pg1(ioc, &mpi_reply,
+ &raid_vol_pg1, MPI2_RAID_VOLUME_PGAD_FORM_HANDLE,
+ volume_handle))) {
+ *wwid = le64_to_cpu(raid_vol_pg1.WWID);
+ return 0;
+ } else
+ return -1;
+}
diff --git a/drivers/scsi/mpt2sas/mpt2sas_ctl.c b/drivers/scsi/mpt2sas/mpt2sas_ctl.c
new file mode 100644
index 000000000..4e509604b
--- /dev/null
+++ b/drivers/scsi/mpt2sas/mpt2sas_ctl.c
@@ -0,0 +1,3077 @@
+/*
+ * Management Module Support for MPT (Message Passing Technology) based
+ * controllers
+ *
+ * This code is based on drivers/scsi/mpt2sas/mpt2_ctl.c
+ * Copyright (C) 2007-2014 LSI Corporation
+ * Copyright (C) 20013-2014 Avago Technologies
+ * (mailto: MPT-FusionLinux.pdl@avagotech.com)
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * NO WARRANTY
+ * THE PROGRAM IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OR
+ * CONDITIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED INCLUDING, WITHOUT
+ * LIMITATION, ANY WARRANTIES OR CONDITIONS OF TITLE, NON-INFRINGEMENT,
+ * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE. Each Recipient is
+ * solely responsible for determining the appropriateness of using and
+ * distributing the Program and assumes all risks associated with its
+ * exercise of rights under this Agreement, including but not limited to
+ * the risks and costs of program errors, damage to or loss of data,
+ * programs or equipment, and unavailability or interruption of operations.
+
+ * DISCLAIMER OF LIABILITY
+ * NEITHER RECIPIENT NOR ANY CONTRIBUTORS SHALL HAVE ANY LIABILITY FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING WITHOUT LIMITATION LOST PROFITS), HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR
+ * TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
+ * USE OR DISTRIBUTION OF THE PROGRAM OR THE EXERCISE OF ANY RIGHTS GRANTED
+ * HEREUNDER, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGES
+
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301,
+ * USA.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/errno.h>
+#include <linux/init.h>
+#include <linux/slab.h>
+#include <linux/types.h>
+#include <linux/pci.h>
+#include <linux/delay.h>
+#include <linux/mutex.h>
+#include <linux/compat.h>
+#include <linux/poll.h>
+
+#include <linux/io.h>
+#include <linux/uaccess.h>
+
+#include "mpt2sas_base.h"
+#include "mpt2sas_ctl.h"
+
+static DEFINE_MUTEX(_ctl_mutex);
+static struct fasync_struct *async_queue;
+static DECLARE_WAIT_QUEUE_HEAD(ctl_poll_wait);
+
+static int _ctl_send_release(struct MPT2SAS_ADAPTER *ioc, u8 buffer_type,
+ u8 *issue_reset);
+
+/**
+ * enum block_state - blocking state
+ * @NON_BLOCKING: non blocking
+ * @BLOCKING: blocking
+ *
+ * These states are for ioctls that need to wait for a response
+ * from firmware, so they probably require sleep.
+ */
+enum block_state {
+ NON_BLOCKING,
+ BLOCKING,
+};
+
+#ifdef CONFIG_SCSI_MPT2SAS_LOGGING
+/**
+ * _ctl_sas_device_find_by_handle - sas device search
+ * @ioc: per adapter object
+ * @handle: sas device handle (assigned by firmware)
+ * Context: Calling function should acquire ioc->sas_device_lock
+ *
+ * This searches for sas_device based on sas_address, then return sas_device
+ * object.
+ */
+static struct _sas_device *
+_ctl_sas_device_find_by_handle(struct MPT2SAS_ADAPTER *ioc, u16 handle)
+{
+ struct _sas_device *sas_device, *r;
+
+ r = NULL;
+ list_for_each_entry(sas_device, &ioc->sas_device_list, list) {
+ if (sas_device->handle != handle)
+ continue;
+ r = sas_device;
+ goto out;
+ }
+
+ out:
+ return r;
+}
+
+/**
+ * _ctl_display_some_debug - debug routine
+ * @ioc: per adapter object
+ * @smid: system request message index
+ * @calling_function_name: string pass from calling function
+ * @mpi_reply: reply message frame
+ * Context: none.
+ *
+ * Function for displaying debug info helpful when debugging issues
+ * in this module.
+ */
+static void
+_ctl_display_some_debug(struct MPT2SAS_ADAPTER *ioc, u16 smid,
+ char *calling_function_name, MPI2DefaultReply_t *mpi_reply)
+{
+ Mpi2ConfigRequest_t *mpi_request;
+ char *desc = NULL;
+
+ if (!(ioc->logging_level & MPT_DEBUG_IOCTL))
+ return;
+
+ mpi_request = mpt2sas_base_get_msg_frame(ioc, smid);
+ switch (mpi_request->Function) {
+ case MPI2_FUNCTION_SCSI_IO_REQUEST:
+ {
+ Mpi2SCSIIORequest_t *scsi_request =
+ (Mpi2SCSIIORequest_t *)mpi_request;
+
+ snprintf(ioc->tmp_string, MPT_STRING_LENGTH,
+ "scsi_io, cmd(0x%02x), cdb_len(%d)",
+ scsi_request->CDB.CDB32[0],
+ le16_to_cpu(scsi_request->IoFlags) & 0xF);
+ desc = ioc->tmp_string;
+ break;
+ }
+ case MPI2_FUNCTION_SCSI_TASK_MGMT:
+ desc = "task_mgmt";
+ break;
+ case MPI2_FUNCTION_IOC_INIT:
+ desc = "ioc_init";
+ break;
+ case MPI2_FUNCTION_IOC_FACTS:
+ desc = "ioc_facts";
+ break;
+ case MPI2_FUNCTION_CONFIG:
+ {
+ Mpi2ConfigRequest_t *config_request =
+ (Mpi2ConfigRequest_t *)mpi_request;
+
+ snprintf(ioc->tmp_string, MPT_STRING_LENGTH,
+ "config, type(0x%02x), ext_type(0x%02x), number(%d)",
+ (config_request->Header.PageType &
+ MPI2_CONFIG_PAGETYPE_MASK), config_request->ExtPageType,
+ config_request->Header.PageNumber);
+ desc = ioc->tmp_string;
+ break;
+ }
+ case MPI2_FUNCTION_PORT_FACTS:
+ desc = "port_facts";
+ break;
+ case MPI2_FUNCTION_PORT_ENABLE:
+ desc = "port_enable";
+ break;
+ case MPI2_FUNCTION_EVENT_NOTIFICATION:
+ desc = "event_notification";
+ break;
+ case MPI2_FUNCTION_FW_DOWNLOAD:
+ desc = "fw_download";
+ break;
+ case MPI2_FUNCTION_FW_UPLOAD:
+ desc = "fw_upload";
+ break;
+ case MPI2_FUNCTION_RAID_ACTION:
+ desc = "raid_action";
+ break;
+ case MPI2_FUNCTION_RAID_SCSI_IO_PASSTHROUGH:
+ {
+ Mpi2SCSIIORequest_t *scsi_request =
+ (Mpi2SCSIIORequest_t *)mpi_request;
+
+ snprintf(ioc->tmp_string, MPT_STRING_LENGTH,
+ "raid_pass, cmd(0x%02x), cdb_len(%d)",
+ scsi_request->CDB.CDB32[0],
+ le16_to_cpu(scsi_request->IoFlags) & 0xF);
+ desc = ioc->tmp_string;
+ break;
+ }
+ case MPI2_FUNCTION_SAS_IO_UNIT_CONTROL:
+ desc = "sas_iounit_cntl";
+ break;
+ case MPI2_FUNCTION_SATA_PASSTHROUGH:
+ desc = "sata_pass";
+ break;
+ case MPI2_FUNCTION_DIAG_BUFFER_POST:
+ desc = "diag_buffer_post";
+ break;
+ case MPI2_FUNCTION_DIAG_RELEASE:
+ desc = "diag_release";
+ break;
+ case MPI2_FUNCTION_SMP_PASSTHROUGH:
+ desc = "smp_passthrough";
+ break;
+ }
+
+ if (!desc)
+ return;
+
+ printk(MPT2SAS_INFO_FMT "%s: %s, smid(%d)\n",
+ ioc->name, calling_function_name, desc, smid);
+
+ if (!mpi_reply)
+ return;
+
+ if (mpi_reply->IOCStatus || mpi_reply->IOCLogInfo)
+ printk(MPT2SAS_INFO_FMT
+ "\tiocstatus(0x%04x), loginfo(0x%08x)\n",
+ ioc->name, le16_to_cpu(mpi_reply->IOCStatus),
+ le32_to_cpu(mpi_reply->IOCLogInfo));
+
+ if (mpi_request->Function == MPI2_FUNCTION_SCSI_IO_REQUEST ||
+ mpi_request->Function ==
+ MPI2_FUNCTION_RAID_SCSI_IO_PASSTHROUGH) {
+ Mpi2SCSIIOReply_t *scsi_reply =
+ (Mpi2SCSIIOReply_t *)mpi_reply;
+ struct _sas_device *sas_device = NULL;
+ unsigned long flags;
+
+ spin_lock_irqsave(&ioc->sas_device_lock, flags);
+ sas_device = _ctl_sas_device_find_by_handle(ioc,
+ le16_to_cpu(scsi_reply->DevHandle));
+ if (sas_device) {
+ printk(MPT2SAS_WARN_FMT "\tsas_address(0x%016llx), "
+ "phy(%d)\n", ioc->name, (unsigned long long)
+ sas_device->sas_address, sas_device->phy);
+ printk(MPT2SAS_WARN_FMT
+ "\tenclosure_logical_id(0x%016llx), slot(%d)\n",
+ ioc->name, sas_device->enclosure_logical_id,
+ sas_device->slot);
+ }
+ spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
+ if (scsi_reply->SCSIState || scsi_reply->SCSIStatus)
+ printk(MPT2SAS_INFO_FMT
+ "\tscsi_state(0x%02x), scsi_status"
+ "(0x%02x)\n", ioc->name,
+ scsi_reply->SCSIState,
+ scsi_reply->SCSIStatus);
+ }
+}
+#endif
+
+/**
+ * mpt2sas_ctl_done - ctl module completion routine
+ * @ioc: per adapter object
+ * @smid: system request message index
+ * @msix_index: MSIX table index supplied by the OS
+ * @reply: reply message frame(lower 32bit addr)
+ * Context: none.
+ *
+ * The callback handler when using ioc->ctl_cb_idx.
+ *
+ * Return 1 meaning mf should be freed from _base_interrupt
+ * 0 means the mf is freed from this function.
+ */
+u8
+mpt2sas_ctl_done(struct MPT2SAS_ADAPTER *ioc, u16 smid, u8 msix_index,
+ u32 reply)
+{
+ MPI2DefaultReply_t *mpi_reply;
+ Mpi2SCSIIOReply_t *scsiio_reply;
+ const void *sense_data;
+ u32 sz;
+
+ if (ioc->ctl_cmds.status == MPT2_CMD_NOT_USED)
+ return 1;
+ if (ioc->ctl_cmds.smid != smid)
+ return 1;
+ ioc->ctl_cmds.status |= MPT2_CMD_COMPLETE;
+ mpi_reply = mpt2sas_base_get_reply_virt_addr(ioc, reply);
+ if (mpi_reply) {
+ memcpy(ioc->ctl_cmds.reply, mpi_reply, mpi_reply->MsgLength*4);
+ ioc->ctl_cmds.status |= MPT2_CMD_REPLY_VALID;
+ /* get sense data */
+ if (mpi_reply->Function == MPI2_FUNCTION_SCSI_IO_REQUEST ||
+ mpi_reply->Function ==
+ MPI2_FUNCTION_RAID_SCSI_IO_PASSTHROUGH) {
+ scsiio_reply = (Mpi2SCSIIOReply_t *)mpi_reply;
+ if (scsiio_reply->SCSIState &
+ MPI2_SCSI_STATE_AUTOSENSE_VALID) {
+ sz = min_t(u32, SCSI_SENSE_BUFFERSIZE,
+ le32_to_cpu(scsiio_reply->SenseCount));
+ sense_data = mpt2sas_base_get_sense_buffer(ioc,
+ smid);
+ memcpy(ioc->ctl_cmds.sense, sense_data, sz);
+ }
+ }
+ }
+#ifdef CONFIG_SCSI_MPT2SAS_LOGGING
+ _ctl_display_some_debug(ioc, smid, "ctl_done", mpi_reply);
+#endif
+ ioc->ctl_cmds.status &= ~MPT2_CMD_PENDING;
+ complete(&ioc->ctl_cmds.done);
+ return 1;
+}
+
+/**
+ * _ctl_check_event_type - determines when an event needs logging
+ * @ioc: per adapter object
+ * @event: firmware event
+ *
+ * The bitmask in ioc->event_type[] indicates which events should be
+ * be saved in the driver event_log. This bitmask is set by application.
+ *
+ * Returns 1 when event should be captured, or zero means no match.
+ */
+static int
+_ctl_check_event_type(struct MPT2SAS_ADAPTER *ioc, u16 event)
+{
+ u16 i;
+ u32 desired_event;
+
+ if (event >= 128 || !event || !ioc->event_log)
+ return 0;
+
+ desired_event = (1 << (event % 32));
+ if (!desired_event)
+ desired_event = 1;
+ i = event / 32;
+ return desired_event & ioc->event_type[i];
+}
+
+/**
+ * mpt2sas_ctl_add_to_event_log - add event
+ * @ioc: per adapter object
+ * @mpi_reply: reply message frame
+ *
+ * Return nothing.
+ */
+void
+mpt2sas_ctl_add_to_event_log(struct MPT2SAS_ADAPTER *ioc,
+ Mpi2EventNotificationReply_t *mpi_reply)
+{
+ struct MPT2_IOCTL_EVENTS *event_log;
+ u16 event;
+ int i;
+ u32 sz, event_data_sz;
+ u8 send_aen = 0;
+
+ if (!ioc->event_log)
+ return;
+
+ event = le16_to_cpu(mpi_reply->Event);
+
+ if (_ctl_check_event_type(ioc, event)) {
+
+ /* insert entry into circular event_log */
+ i = ioc->event_context % MPT2SAS_CTL_EVENT_LOG_SIZE;
+ event_log = ioc->event_log;
+ event_log[i].event = event;
+ event_log[i].context = ioc->event_context++;
+
+ event_data_sz = le16_to_cpu(mpi_reply->EventDataLength)*4;
+ sz = min_t(u32, event_data_sz, MPT2_EVENT_DATA_SIZE);
+ memset(event_log[i].data, 0, MPT2_EVENT_DATA_SIZE);
+ memcpy(event_log[i].data, mpi_reply->EventData, sz);
+ send_aen = 1;
+ }
+
+ /* This aen_event_read_flag flag is set until the
+ * application has read the event log.
+ * For MPI2_EVENT_LOG_ENTRY_ADDED, we always notify.
+ */
+ if (event == MPI2_EVENT_LOG_ENTRY_ADDED ||
+ (send_aen && !ioc->aen_event_read_flag)) {
+ ioc->aen_event_read_flag = 1;
+ wake_up_interruptible(&ctl_poll_wait);
+ if (async_queue)
+ kill_fasync(&async_queue, SIGIO, POLL_IN);
+ }
+}
+
+/**
+ * mpt2sas_ctl_event_callback - firmware event handler (called at ISR time)
+ * @ioc: per adapter object
+ * @msix_index: MSIX table index supplied by the OS
+ * @reply: reply message frame(lower 32bit addr)
+ * Context: interrupt.
+ *
+ * This function merely adds a new work task into ioc->firmware_event_thread.
+ * The tasks are worked from _firmware_event_work in user context.
+ *
+ * Returns void.
+ */
+void
+mpt2sas_ctl_event_callback(struct MPT2SAS_ADAPTER *ioc, u8 msix_index,
+ u32 reply)
+{
+ Mpi2EventNotificationReply_t *mpi_reply;
+
+ mpi_reply = mpt2sas_base_get_reply_virt_addr(ioc, reply);
+ if (unlikely(!mpi_reply)) {
+ printk(MPT2SAS_ERR_FMT "mpi_reply not valid at %s:%d/%s()!\n",
+ ioc->name, __FILE__, __LINE__, __func__);
+ return;
+ }
+ mpt2sas_ctl_add_to_event_log(ioc, mpi_reply);
+ return;
+}
+
+/**
+ * _ctl_verify_adapter - validates ioc_number passed from application
+ * @ioc: per adapter object
+ * @iocpp: The ioc pointer is returned in this.
+ *
+ * Return (-1) means error, else ioc_number.
+ */
+static int
+_ctl_verify_adapter(int ioc_number, struct MPT2SAS_ADAPTER **iocpp)
+{
+ struct MPT2SAS_ADAPTER *ioc;
+
+ list_for_each_entry(ioc, &mpt2sas_ioc_list, list) {
+ if (ioc->id != ioc_number)
+ continue;
+ *iocpp = ioc;
+ return ioc_number;
+ }
+ *iocpp = NULL;
+ return -1;
+}
+
+/**
+ * mpt2sas_ctl_reset_handler - reset callback handler (for ctl)
+ * @ioc: per adapter object
+ * @reset_phase: phase
+ *
+ * The handler for doing any required cleanup or initialization.
+ *
+ * The reset phase can be MPT2_IOC_PRE_RESET, MPT2_IOC_AFTER_RESET,
+ * MPT2_IOC_DONE_RESET
+ */
+void
+mpt2sas_ctl_reset_handler(struct MPT2SAS_ADAPTER *ioc, int reset_phase)
+{
+ int i;
+ u8 issue_reset;
+
+ switch (reset_phase) {
+ case MPT2_IOC_PRE_RESET:
+ dtmprintk(ioc, printk(MPT2SAS_INFO_FMT "%s: "
+ "MPT2_IOC_PRE_RESET\n", ioc->name, __func__));
+ for (i = 0; i < MPI2_DIAG_BUF_TYPE_COUNT; i++) {
+ if (!(ioc->diag_buffer_status[i] &
+ MPT2_DIAG_BUFFER_IS_REGISTERED))
+ continue;
+ if ((ioc->diag_buffer_status[i] &
+ MPT2_DIAG_BUFFER_IS_RELEASED))
+ continue;
+ _ctl_send_release(ioc, i, &issue_reset);
+ }
+ break;
+ case MPT2_IOC_AFTER_RESET:
+ dtmprintk(ioc, printk(MPT2SAS_INFO_FMT "%s: "
+ "MPT2_IOC_AFTER_RESET\n", ioc->name, __func__));
+ if (ioc->ctl_cmds.status & MPT2_CMD_PENDING) {
+ ioc->ctl_cmds.status |= MPT2_CMD_RESET;
+ mpt2sas_base_free_smid(ioc, ioc->ctl_cmds.smid);
+ complete(&ioc->ctl_cmds.done);
+ }
+ break;
+ case MPT2_IOC_DONE_RESET:
+ dtmprintk(ioc, printk(MPT2SAS_INFO_FMT "%s: "
+ "MPT2_IOC_DONE_RESET\n", ioc->name, __func__));
+
+ for (i = 0; i < MPI2_DIAG_BUF_TYPE_COUNT; i++) {
+ if (!(ioc->diag_buffer_status[i] &
+ MPT2_DIAG_BUFFER_IS_REGISTERED))
+ continue;
+ if ((ioc->diag_buffer_status[i] &
+ MPT2_DIAG_BUFFER_IS_RELEASED))
+ continue;
+ ioc->diag_buffer_status[i] |=
+ MPT2_DIAG_BUFFER_IS_DIAG_RESET;
+ }
+ break;
+ }
+}
+
+/**
+ * _ctl_fasync -
+ * @fd -
+ * @filep -
+ * @mode -
+ *
+ * Called when application request fasyn callback handler.
+ */
+static int
+_ctl_fasync(int fd, struct file *filep, int mode)
+{
+ return fasync_helper(fd, filep, mode, &async_queue);
+}
+
+/**
+ * _ctl_poll -
+ * @file -
+ * @wait -
+ *
+ */
+static unsigned int
+_ctl_poll(struct file *filep, poll_table *wait)
+{
+ struct MPT2SAS_ADAPTER *ioc;
+
+ poll_wait(filep, &ctl_poll_wait, wait);
+
+ list_for_each_entry(ioc, &mpt2sas_ioc_list, list) {
+ if (ioc->aen_event_read_flag)
+ return POLLIN | POLLRDNORM;
+ }
+ return 0;
+}
+
+/**
+ * _ctl_set_task_mid - assign an active smid to tm request
+ * @ioc: per adapter object
+ * @karg - (struct mpt2_ioctl_command)
+ * @tm_request - pointer to mf from user space
+ *
+ * Returns 0 when an smid if found, else fail.
+ * during failure, the reply frame is filled.
+ */
+static int
+_ctl_set_task_mid(struct MPT2SAS_ADAPTER *ioc, struct mpt2_ioctl_command *karg,
+ Mpi2SCSITaskManagementRequest_t *tm_request)
+{
+ u8 found = 0;
+ u16 i;
+ u16 handle;
+ struct scsi_cmnd *scmd;
+ struct MPT2SAS_DEVICE *priv_data;
+ unsigned long flags;
+ Mpi2SCSITaskManagementReply_t *tm_reply;
+ u32 sz;
+ u32 lun;
+ char *desc = NULL;
+
+ if (tm_request->TaskType == MPI2_SCSITASKMGMT_TASKTYPE_ABORT_TASK)
+ desc = "abort_task";
+ else if (tm_request->TaskType == MPI2_SCSITASKMGMT_TASKTYPE_QUERY_TASK)
+ desc = "query_task";
+ else
+ return 0;
+
+ lun = scsilun_to_int((struct scsi_lun *)tm_request->LUN);
+
+ handle = le16_to_cpu(tm_request->DevHandle);
+ spin_lock_irqsave(&ioc->scsi_lookup_lock, flags);
+ for (i = ioc->scsiio_depth; i && !found; i--) {
+ scmd = ioc->scsi_lookup[i - 1].scmd;
+ if (scmd == NULL || scmd->device == NULL ||
+ scmd->device->hostdata == NULL)
+ continue;
+ if (lun != scmd->device->lun)
+ continue;
+ priv_data = scmd->device->hostdata;
+ if (priv_data->sas_target == NULL)
+ continue;
+ if (priv_data->sas_target->handle != handle)
+ continue;
+ tm_request->TaskMID = cpu_to_le16(ioc->scsi_lookup[i - 1].smid);
+ found = 1;
+ }
+ spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags);
+
+ if (!found) {
+ dctlprintk(ioc, printk(MPT2SAS_INFO_FMT "%s: "
+ "handle(0x%04x), lun(%d), no active mid!!\n", ioc->name,
+ desc, le16_to_cpu(tm_request->DevHandle), lun));
+ tm_reply = ioc->ctl_cmds.reply;
+ tm_reply->DevHandle = tm_request->DevHandle;
+ tm_reply->Function = MPI2_FUNCTION_SCSI_TASK_MGMT;
+ tm_reply->TaskType = tm_request->TaskType;
+ tm_reply->MsgLength = sizeof(Mpi2SCSITaskManagementReply_t)/4;
+ tm_reply->VP_ID = tm_request->VP_ID;
+ tm_reply->VF_ID = tm_request->VF_ID;
+ sz = min_t(u32, karg->max_reply_bytes, ioc->reply_sz);
+ if (copy_to_user(karg->reply_frame_buf_ptr, ioc->ctl_cmds.reply,
+ sz))
+ printk(KERN_ERR "failure at %s:%d/%s()!\n", __FILE__,
+ __LINE__, __func__);
+ return 1;
+ }
+
+ dctlprintk(ioc, printk(MPT2SAS_INFO_FMT "%s: "
+ "handle(0x%04x), lun(%d), task_mid(%d)\n", ioc->name,
+ desc, le16_to_cpu(tm_request->DevHandle), lun,
+ le16_to_cpu(tm_request->TaskMID)));
+ return 0;
+}
+
+/**
+ * _ctl_do_mpt_command - main handler for MPT2COMMAND opcode
+ * @ioc: per adapter object
+ * @karg - (struct mpt2_ioctl_command)
+ * @mf - pointer to mf in user space
+ */
+static long
+_ctl_do_mpt_command(struct MPT2SAS_ADAPTER *ioc, struct mpt2_ioctl_command karg,
+ void __user *mf)
+{
+ MPI2RequestHeader_t *mpi_request = NULL, *request;
+ MPI2DefaultReply_t *mpi_reply;
+ u32 ioc_state;
+ u16 ioc_status;
+ u16 smid;
+ unsigned long timeout, timeleft;
+ u8 issue_reset;
+ u32 sz;
+ void *psge;
+ void *data_out = NULL;
+ dma_addr_t data_out_dma;
+ size_t data_out_sz = 0;
+ void *data_in = NULL;
+ dma_addr_t data_in_dma;
+ size_t data_in_sz = 0;
+ u32 sgl_flags;
+ long ret;
+ u16 wait_state_count;
+
+ issue_reset = 0;
+
+ if (ioc->ctl_cmds.status != MPT2_CMD_NOT_USED) {
+ printk(MPT2SAS_ERR_FMT "%s: ctl_cmd in use\n",
+ ioc->name, __func__);
+ ret = -EAGAIN;
+ goto out;
+ }
+
+ wait_state_count = 0;
+ ioc_state = mpt2sas_base_get_iocstate(ioc, 1);
+ while (ioc_state != MPI2_IOC_STATE_OPERATIONAL) {
+ if (wait_state_count++ == 10) {
+ printk(MPT2SAS_ERR_FMT
+ "%s: failed due to ioc not operational\n",
+ ioc->name, __func__);
+ ret = -EFAULT;
+ goto out;
+ }
+ ssleep(1);
+ ioc_state = mpt2sas_base_get_iocstate(ioc, 1);
+ printk(MPT2SAS_INFO_FMT "%s: waiting for "
+ "operational state(count=%d)\n", ioc->name,
+ __func__, wait_state_count);
+ }
+ if (wait_state_count)
+ printk(MPT2SAS_INFO_FMT "%s: ioc is operational\n",
+ ioc->name, __func__);
+
+ mpi_request = kzalloc(ioc->request_sz, GFP_KERNEL);
+ if (!mpi_request) {
+ printk(MPT2SAS_ERR_FMT "%s: failed obtaining a memory for "
+ "mpi_request\n", ioc->name, __func__);
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ /* Check for overflow and wraparound */
+ if (karg.data_sge_offset * 4 > ioc->request_sz ||
+ karg.data_sge_offset > (UINT_MAX / 4)) {
+ ret = -EINVAL;
+ goto out;
+ }
+
+ /* copy in request message frame from user */
+ if (copy_from_user(mpi_request, mf, karg.data_sge_offset*4)) {
+ printk(KERN_ERR "failure at %s:%d/%s()!\n", __FILE__, __LINE__,
+ __func__);
+ ret = -EFAULT;
+ goto out;
+ }
+
+ if (mpi_request->Function == MPI2_FUNCTION_SCSI_TASK_MGMT) {
+ smid = mpt2sas_base_get_smid_hpr(ioc, ioc->ctl_cb_idx);
+ if (!smid) {
+ printk(MPT2SAS_ERR_FMT "%s: failed obtaining a smid\n",
+ ioc->name, __func__);
+ ret = -EAGAIN;
+ goto out;
+ }
+ } else {
+
+ smid = mpt2sas_base_get_smid_scsiio(ioc, ioc->ctl_cb_idx, NULL);
+ if (!smid) {
+ printk(MPT2SAS_ERR_FMT "%s: failed obtaining a smid\n",
+ ioc->name, __func__);
+ ret = -EAGAIN;
+ goto out;
+ }
+ }
+
+ ret = 0;
+ ioc->ctl_cmds.status = MPT2_CMD_PENDING;
+ memset(ioc->ctl_cmds.reply, 0, ioc->reply_sz);
+ request = mpt2sas_base_get_msg_frame(ioc, smid);
+ memcpy(request, mpi_request, karg.data_sge_offset*4);
+ ioc->ctl_cmds.smid = smid;
+ data_out_sz = karg.data_out_size;
+ data_in_sz = karg.data_in_size;
+
+ if (mpi_request->Function == MPI2_FUNCTION_SCSI_IO_REQUEST ||
+ mpi_request->Function == MPI2_FUNCTION_RAID_SCSI_IO_PASSTHROUGH) {
+ if (!le16_to_cpu(mpi_request->FunctionDependent1) ||
+ le16_to_cpu(mpi_request->FunctionDependent1) >
+ ioc->facts.MaxDevHandle) {
+ ret = -EINVAL;
+ mpt2sas_base_free_smid(ioc, smid);
+ goto out;
+ }
+ }
+
+ /* obtain dma-able memory for data transfer */
+ if (data_out_sz) /* WRITE */ {
+ data_out = pci_alloc_consistent(ioc->pdev, data_out_sz,
+ &data_out_dma);
+ if (!data_out) {
+ printk(KERN_ERR "failure at %s:%d/%s()!\n", __FILE__,
+ __LINE__, __func__);
+ ret = -ENOMEM;
+ mpt2sas_base_free_smid(ioc, smid);
+ goto out;
+ }
+ if (copy_from_user(data_out, karg.data_out_buf_ptr,
+ data_out_sz)) {
+ printk(KERN_ERR "failure at %s:%d/%s()!\n", __FILE__,
+ __LINE__, __func__);
+ ret = -EFAULT;
+ mpt2sas_base_free_smid(ioc, smid);
+ goto out;
+ }
+ }
+
+ if (data_in_sz) /* READ */ {
+ data_in = pci_alloc_consistent(ioc->pdev, data_in_sz,
+ &data_in_dma);
+ if (!data_in) {
+ printk(KERN_ERR "failure at %s:%d/%s()!\n", __FILE__,
+ __LINE__, __func__);
+ ret = -ENOMEM;
+ mpt2sas_base_free_smid(ioc, smid);
+ goto out;
+ }
+ }
+
+ /* add scatter gather elements */
+ psge = (void *)request + (karg.data_sge_offset*4);
+
+ if (!data_out_sz && !data_in_sz) {
+ mpt2sas_base_build_zero_len_sge(ioc, psge);
+ } else if (data_out_sz && data_in_sz) {
+ /* WRITE sgel first */
+ sgl_flags = (MPI2_SGE_FLAGS_SIMPLE_ELEMENT |
+ MPI2_SGE_FLAGS_END_OF_BUFFER | MPI2_SGE_FLAGS_HOST_TO_IOC);
+ sgl_flags = sgl_flags << MPI2_SGE_FLAGS_SHIFT;
+ ioc->base_add_sg_single(psge, sgl_flags |
+ data_out_sz, data_out_dma);
+
+ /* incr sgel */
+ psge += ioc->sge_size;
+
+ /* READ sgel last */
+ sgl_flags = (MPI2_SGE_FLAGS_SIMPLE_ELEMENT |
+ MPI2_SGE_FLAGS_LAST_ELEMENT | MPI2_SGE_FLAGS_END_OF_BUFFER |
+ MPI2_SGE_FLAGS_END_OF_LIST);
+ sgl_flags = sgl_flags << MPI2_SGE_FLAGS_SHIFT;
+ ioc->base_add_sg_single(psge, sgl_flags |
+ data_in_sz, data_in_dma);
+ } else if (data_out_sz) /* WRITE */ {
+ sgl_flags = (MPI2_SGE_FLAGS_SIMPLE_ELEMENT |
+ MPI2_SGE_FLAGS_LAST_ELEMENT | MPI2_SGE_FLAGS_END_OF_BUFFER |
+ MPI2_SGE_FLAGS_END_OF_LIST | MPI2_SGE_FLAGS_HOST_TO_IOC);
+ sgl_flags = sgl_flags << MPI2_SGE_FLAGS_SHIFT;
+ ioc->base_add_sg_single(psge, sgl_flags |
+ data_out_sz, data_out_dma);
+ } else if (data_in_sz) /* READ */ {
+ sgl_flags = (MPI2_SGE_FLAGS_SIMPLE_ELEMENT |
+ MPI2_SGE_FLAGS_LAST_ELEMENT | MPI2_SGE_FLAGS_END_OF_BUFFER |
+ MPI2_SGE_FLAGS_END_OF_LIST);
+ sgl_flags = sgl_flags << MPI2_SGE_FLAGS_SHIFT;
+ ioc->base_add_sg_single(psge, sgl_flags |
+ data_in_sz, data_in_dma);
+ }
+
+ /* send command to firmware */
+#ifdef CONFIG_SCSI_MPT2SAS_LOGGING
+ _ctl_display_some_debug(ioc, smid, "ctl_request", NULL);
+#endif
+
+ init_completion(&ioc->ctl_cmds.done);
+ switch (mpi_request->Function) {
+ case MPI2_FUNCTION_SCSI_IO_REQUEST:
+ case MPI2_FUNCTION_RAID_SCSI_IO_PASSTHROUGH:
+ {
+ Mpi2SCSIIORequest_t *scsiio_request =
+ (Mpi2SCSIIORequest_t *)request;
+ scsiio_request->SenseBufferLength = SCSI_SENSE_BUFFERSIZE;
+ scsiio_request->SenseBufferLowAddress =
+ mpt2sas_base_get_sense_buffer_dma(ioc, smid);
+ memset(ioc->ctl_cmds.sense, 0, SCSI_SENSE_BUFFERSIZE);
+ if (mpi_request->Function == MPI2_FUNCTION_SCSI_IO_REQUEST)
+ mpt2sas_base_put_smid_scsi_io(ioc, smid,
+ le16_to_cpu(mpi_request->FunctionDependent1));
+ else
+ mpt2sas_base_put_smid_default(ioc, smid);
+ break;
+ }
+ case MPI2_FUNCTION_SCSI_TASK_MGMT:
+ {
+ Mpi2SCSITaskManagementRequest_t *tm_request =
+ (Mpi2SCSITaskManagementRequest_t *)request;
+
+ dtmprintk(ioc, printk(MPT2SAS_INFO_FMT "TASK_MGMT: "
+ "handle(0x%04x), task_type(0x%02x)\n", ioc->name,
+ le16_to_cpu(tm_request->DevHandle), tm_request->TaskType));
+
+ if (tm_request->TaskType ==
+ MPI2_SCSITASKMGMT_TASKTYPE_ABORT_TASK ||
+ tm_request->TaskType ==
+ MPI2_SCSITASKMGMT_TASKTYPE_QUERY_TASK) {
+ if (_ctl_set_task_mid(ioc, &karg, tm_request)) {
+ mpt2sas_base_free_smid(ioc, smid);
+ goto out;
+ }
+ }
+
+ mpt2sas_scsih_set_tm_flag(ioc, le16_to_cpu(
+ tm_request->DevHandle));
+ mpt2sas_base_put_smid_hi_priority(ioc, smid);
+ break;
+ }
+ case MPI2_FUNCTION_SMP_PASSTHROUGH:
+ {
+ Mpi2SmpPassthroughRequest_t *smp_request =
+ (Mpi2SmpPassthroughRequest_t *)mpi_request;
+ u8 *data;
+
+ /* ioc determines which port to use */
+ smp_request->PhysicalPort = 0xFF;
+ if (smp_request->PassthroughFlags &
+ MPI2_SMP_PT_REQ_PT_FLAGS_IMMEDIATE)
+ data = (u8 *)&smp_request->SGL;
+ else {
+ if (unlikely(data_out == NULL)) {
+ printk(KERN_ERR "failure at %s:%d/%s()!\n",
+ __FILE__, __LINE__, __func__);
+ mpt2sas_base_free_smid(ioc, smid);
+ ret = -EINVAL;
+ goto out;
+ }
+ data = data_out;
+ }
+
+ if (data[1] == 0x91 && (data[10] == 1 || data[10] == 2)) {
+ ioc->ioc_link_reset_in_progress = 1;
+ ioc->ignore_loginfos = 1;
+ }
+ mpt2sas_base_put_smid_default(ioc, smid);
+ break;
+ }
+ case MPI2_FUNCTION_SAS_IO_UNIT_CONTROL:
+ {
+ Mpi2SasIoUnitControlRequest_t *sasiounit_request =
+ (Mpi2SasIoUnitControlRequest_t *)mpi_request;
+
+ if (sasiounit_request->Operation == MPI2_SAS_OP_PHY_HARD_RESET
+ || sasiounit_request->Operation ==
+ MPI2_SAS_OP_PHY_LINK_RESET) {
+ ioc->ioc_link_reset_in_progress = 1;
+ ioc->ignore_loginfos = 1;
+ }
+ mpt2sas_base_put_smid_default(ioc, smid);
+ break;
+ }
+ default:
+ mpt2sas_base_put_smid_default(ioc, smid);
+ break;
+ }
+
+ if (karg.timeout < MPT2_IOCTL_DEFAULT_TIMEOUT)
+ timeout = MPT2_IOCTL_DEFAULT_TIMEOUT;
+ else
+ timeout = karg.timeout;
+ timeleft = wait_for_completion_timeout(&ioc->ctl_cmds.done,
+ timeout*HZ);
+ if (mpi_request->Function == MPI2_FUNCTION_SCSI_TASK_MGMT) {
+ Mpi2SCSITaskManagementRequest_t *tm_request =
+ (Mpi2SCSITaskManagementRequest_t *)mpi_request;
+ mpt2sas_scsih_clear_tm_flag(ioc, le16_to_cpu(
+ tm_request->DevHandle));
+ } else if ((mpi_request->Function == MPI2_FUNCTION_SMP_PASSTHROUGH ||
+ mpi_request->Function == MPI2_FUNCTION_SAS_IO_UNIT_CONTROL) &&
+ ioc->ioc_link_reset_in_progress) {
+ ioc->ioc_link_reset_in_progress = 0;
+ ioc->ignore_loginfos = 0;
+ }
+ if (!(ioc->ctl_cmds.status & MPT2_CMD_COMPLETE)) {
+ printk(MPT2SAS_ERR_FMT "%s: timeout\n", ioc->name,
+ __func__);
+ _debug_dump_mf(mpi_request, karg.data_sge_offset);
+ if (!(ioc->ctl_cmds.status & MPT2_CMD_RESET))
+ issue_reset = 1;
+ goto issue_host_reset;
+ }
+
+ mpi_reply = ioc->ctl_cmds.reply;
+ ioc_status = le16_to_cpu(mpi_reply->IOCStatus) & MPI2_IOCSTATUS_MASK;
+
+#ifdef CONFIG_SCSI_MPT2SAS_LOGGING
+ if (mpi_reply->Function == MPI2_FUNCTION_SCSI_TASK_MGMT &&
+ (ioc->logging_level & MPT_DEBUG_TM)) {
+ Mpi2SCSITaskManagementReply_t *tm_reply =
+ (Mpi2SCSITaskManagementReply_t *)mpi_reply;
+
+ printk(MPT2SAS_INFO_FMT "TASK_MGMT: "
+ "IOCStatus(0x%04x), IOCLogInfo(0x%08x), "
+ "TerminationCount(0x%08x)\n", ioc->name,
+ le16_to_cpu(tm_reply->IOCStatus),
+ le32_to_cpu(tm_reply->IOCLogInfo),
+ le32_to_cpu(tm_reply->TerminationCount));
+ }
+#endif
+ /* copy out xdata to user */
+ if (data_in_sz) {
+ if (copy_to_user(karg.data_in_buf_ptr, data_in,
+ data_in_sz)) {
+ printk(KERN_ERR "failure at %s:%d/%s()!\n", __FILE__,
+ __LINE__, __func__);
+ ret = -ENODATA;
+ goto out;
+ }
+ }
+
+ /* copy out reply message frame to user */
+ if (karg.max_reply_bytes) {
+ sz = min_t(u32, karg.max_reply_bytes, ioc->reply_sz);
+ if (copy_to_user(karg.reply_frame_buf_ptr, ioc->ctl_cmds.reply,
+ sz)) {
+ printk(KERN_ERR "failure at %s:%d/%s()!\n", __FILE__,
+ __LINE__, __func__);
+ ret = -ENODATA;
+ goto out;
+ }
+ }
+
+ /* copy out sense to user */
+ if (karg.max_sense_bytes && (mpi_request->Function ==
+ MPI2_FUNCTION_SCSI_IO_REQUEST || mpi_request->Function ==
+ MPI2_FUNCTION_RAID_SCSI_IO_PASSTHROUGH)) {
+ sz = min_t(u32, karg.max_sense_bytes, SCSI_SENSE_BUFFERSIZE);
+ if (copy_to_user(karg.sense_data_ptr,
+ ioc->ctl_cmds.sense, sz)) {
+ printk(KERN_ERR "failure at %s:%d/%s()!\n", __FILE__,
+ __LINE__, __func__);
+ ret = -ENODATA;
+ goto out;
+ }
+ }
+
+ issue_host_reset:
+ if (issue_reset) {
+ ret = -ENODATA;
+ if ((mpi_request->Function == MPI2_FUNCTION_SCSI_IO_REQUEST ||
+ mpi_request->Function ==
+ MPI2_FUNCTION_RAID_SCSI_IO_PASSTHROUGH ||
+ mpi_request->Function == MPI2_FUNCTION_SATA_PASSTHROUGH)) {
+ printk(MPT2SAS_INFO_FMT "issue target reset: handle "
+ "= (0x%04x)\n", ioc->name,
+ le16_to_cpu(mpi_request->FunctionDependent1));
+ mpt2sas_halt_firmware(ioc);
+ mpt2sas_scsih_issue_tm(ioc,
+ le16_to_cpu(mpi_request->FunctionDependent1), 0, 0,
+ 0, MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET, 0, 10,
+ TM_MUTEX_ON);
+ ioc->tm_cmds.status = MPT2_CMD_NOT_USED;
+ } else
+ mpt2sas_base_hard_reset_handler(ioc, CAN_SLEEP,
+ FORCE_BIG_HAMMER);
+ }
+
+ out:
+
+ /* free memory associated with sg buffers */
+ if (data_in)
+ pci_free_consistent(ioc->pdev, data_in_sz, data_in,
+ data_in_dma);
+
+ if (data_out)
+ pci_free_consistent(ioc->pdev, data_out_sz, data_out,
+ data_out_dma);
+
+ kfree(mpi_request);
+ ioc->ctl_cmds.status = MPT2_CMD_NOT_USED;
+ return ret;
+}
+
+/**
+ * _ctl_getiocinfo - main handler for MPT2IOCINFO opcode
+ * @ioc: per adapter object
+ * @arg - user space buffer containing ioctl content
+ */
+static long
+_ctl_getiocinfo(struct MPT2SAS_ADAPTER *ioc, void __user *arg)
+{
+ struct mpt2_ioctl_iocinfo karg;
+
+ if (copy_from_user(&karg, arg, sizeof(karg))) {
+ printk(KERN_ERR "failure at %s:%d/%s()!\n",
+ __FILE__, __LINE__, __func__);
+ return -EFAULT;
+ }
+
+ dctlprintk(ioc, printk(MPT2SAS_INFO_FMT "%s: enter\n", ioc->name,
+ __func__));
+
+ memset(&karg, 0 , sizeof(karg));
+ if (ioc->is_warpdrive)
+ karg.adapter_type = MPT2_IOCTL_INTERFACE_SAS2_SSS6200;
+ else
+ karg.adapter_type = MPT2_IOCTL_INTERFACE_SAS2;
+ if (ioc->pfacts)
+ karg.port_number = ioc->pfacts[0].PortNumber;
+ karg.hw_rev = ioc->pdev->revision;
+ karg.pci_id = ioc->pdev->device;
+ karg.subsystem_device = ioc->pdev->subsystem_device;
+ karg.subsystem_vendor = ioc->pdev->subsystem_vendor;
+ karg.pci_information.u.bits.bus = ioc->pdev->bus->number;
+ karg.pci_information.u.bits.device = PCI_SLOT(ioc->pdev->devfn);
+ karg.pci_information.u.bits.function = PCI_FUNC(ioc->pdev->devfn);
+ karg.pci_information.segment_id = pci_domain_nr(ioc->pdev->bus);
+ karg.firmware_version = ioc->facts.FWVersion.Word;
+ strcpy(karg.driver_version, MPT2SAS_DRIVER_NAME);
+ strcat(karg.driver_version, "-");
+ strcat(karg.driver_version, MPT2SAS_DRIVER_VERSION);
+ karg.bios_version = le32_to_cpu(ioc->bios_pg3.BiosVersion);
+
+ if (copy_to_user(arg, &karg, sizeof(karg))) {
+ printk(KERN_ERR "failure at %s:%d/%s()!\n",
+ __FILE__, __LINE__, __func__);
+ return -EFAULT;
+ }
+ return 0;
+}
+
+/**
+ * _ctl_eventquery - main handler for MPT2EVENTQUERY opcode
+ * @ioc: per adapter object
+ * @arg - user space buffer containing ioctl content
+ */
+static long
+_ctl_eventquery(struct MPT2SAS_ADAPTER *ioc, void __user *arg)
+{
+ struct mpt2_ioctl_eventquery karg;
+
+ if (copy_from_user(&karg, arg, sizeof(karg))) {
+ printk(KERN_ERR "failure at %s:%d/%s()!\n",
+ __FILE__, __LINE__, __func__);
+ return -EFAULT;
+ }
+
+ dctlprintk(ioc, printk(MPT2SAS_INFO_FMT "%s: enter\n", ioc->name,
+ __func__));
+
+ karg.event_entries = MPT2SAS_CTL_EVENT_LOG_SIZE;
+ memcpy(karg.event_types, ioc->event_type,
+ MPI2_EVENT_NOTIFY_EVENTMASK_WORDS * sizeof(u32));
+
+ if (copy_to_user(arg, &karg, sizeof(karg))) {
+ printk(KERN_ERR "failure at %s:%d/%s()!\n",
+ __FILE__, __LINE__, __func__);
+ return -EFAULT;
+ }
+ return 0;
+}
+
+/**
+ * _ctl_eventenable - main handler for MPT2EVENTENABLE opcode
+ * @ioc: per adapter object
+ * @arg - user space buffer containing ioctl content
+ */
+static long
+_ctl_eventenable(struct MPT2SAS_ADAPTER *ioc, void __user *arg)
+{
+ struct mpt2_ioctl_eventenable karg;
+
+ if (copy_from_user(&karg, arg, sizeof(karg))) {
+ printk(KERN_ERR "failure at %s:%d/%s()!\n",
+ __FILE__, __LINE__, __func__);
+ return -EFAULT;
+ }
+
+ dctlprintk(ioc, printk(MPT2SAS_INFO_FMT "%s: enter\n", ioc->name,
+ __func__));
+
+ if (ioc->event_log)
+ return 0;
+ memcpy(ioc->event_type, karg.event_types,
+ MPI2_EVENT_NOTIFY_EVENTMASK_WORDS * sizeof(u32));
+ mpt2sas_base_validate_event_type(ioc, ioc->event_type);
+
+ /* initialize event_log */
+ ioc->event_context = 0;
+ ioc->aen_event_read_flag = 0;
+ ioc->event_log = kcalloc(MPT2SAS_CTL_EVENT_LOG_SIZE,
+ sizeof(struct MPT2_IOCTL_EVENTS), GFP_KERNEL);
+ if (!ioc->event_log) {
+ printk(KERN_ERR "failure at %s:%d/%s()!\n",
+ __FILE__, __LINE__, __func__);
+ return -ENOMEM;
+ }
+ return 0;
+}
+
+/**
+ * _ctl_eventreport - main handler for MPT2EVENTREPORT opcode
+ * @ioc: per adapter object
+ * @arg - user space buffer containing ioctl content
+ */
+static long
+_ctl_eventreport(struct MPT2SAS_ADAPTER *ioc, void __user *arg)
+{
+ struct mpt2_ioctl_eventreport karg;
+ u32 number_bytes, max_events, max;
+ struct mpt2_ioctl_eventreport __user *uarg = arg;
+
+ if (copy_from_user(&karg, arg, sizeof(karg))) {
+ printk(KERN_ERR "failure at %s:%d/%s()!\n",
+ __FILE__, __LINE__, __func__);
+ return -EFAULT;
+ }
+
+ dctlprintk(ioc, printk(MPT2SAS_INFO_FMT "%s: enter\n", ioc->name,
+ __func__));
+
+ number_bytes = karg.hdr.max_data_size -
+ sizeof(struct mpt2_ioctl_header);
+ max_events = number_bytes/sizeof(struct MPT2_IOCTL_EVENTS);
+ max = min_t(u32, MPT2SAS_CTL_EVENT_LOG_SIZE, max_events);
+
+ /* If fewer than 1 event is requested, there must have
+ * been some type of error.
+ */
+ if (!max || !ioc->event_log)
+ return -ENODATA;
+
+ number_bytes = max * sizeof(struct MPT2_IOCTL_EVENTS);
+ if (copy_to_user(uarg->event_data, ioc->event_log, number_bytes)) {
+ printk(KERN_ERR "failure at %s:%d/%s()!\n",
+ __FILE__, __LINE__, __func__);
+ return -EFAULT;
+ }
+
+ /* reset flag so SIGIO can restart */
+ ioc->aen_event_read_flag = 0;
+ return 0;
+}
+
+/**
+ * _ctl_do_reset - main handler for MPT2HARDRESET opcode
+ * @ioc: per adapter object
+ * @arg - user space buffer containing ioctl content
+ */
+static long
+_ctl_do_reset(struct MPT2SAS_ADAPTER *ioc, void __user *arg)
+{
+ struct mpt2_ioctl_diag_reset karg;
+ int retval;
+
+ if (copy_from_user(&karg, arg, sizeof(karg))) {
+ printk(KERN_ERR "failure at %s:%d/%s()!\n",
+ __FILE__, __LINE__, __func__);
+ return -EFAULT;
+ }
+
+ if (ioc->shost_recovery || ioc->pci_error_recovery ||
+ ioc->is_driver_loading)
+ return -EAGAIN;
+ dctlprintk(ioc, printk(MPT2SAS_INFO_FMT "%s: enter\n", ioc->name,
+ __func__));
+
+ retval = mpt2sas_base_hard_reset_handler(ioc, CAN_SLEEP,
+ FORCE_BIG_HAMMER);
+ printk(MPT2SAS_INFO_FMT "host reset: %s\n",
+ ioc->name, ((!retval) ? "SUCCESS" : "FAILED"));
+ return 0;
+}
+
+/**
+ * _ctl_btdh_search_sas_device - searching for sas device
+ * @ioc: per adapter object
+ * @btdh: btdh ioctl payload
+ */
+static int
+_ctl_btdh_search_sas_device(struct MPT2SAS_ADAPTER *ioc,
+ struct mpt2_ioctl_btdh_mapping *btdh)
+{
+ struct _sas_device *sas_device;
+ unsigned long flags;
+ int rc = 0;
+
+ if (list_empty(&ioc->sas_device_list))
+ return rc;
+
+ spin_lock_irqsave(&ioc->sas_device_lock, flags);
+ list_for_each_entry(sas_device, &ioc->sas_device_list, list) {
+ if (btdh->bus == 0xFFFFFFFF && btdh->id == 0xFFFFFFFF &&
+ btdh->handle == sas_device->handle) {
+ btdh->bus = sas_device->channel;
+ btdh->id = sas_device->id;
+ rc = 1;
+ goto out;
+ } else if (btdh->bus == sas_device->channel && btdh->id ==
+ sas_device->id && btdh->handle == 0xFFFF) {
+ btdh->handle = sas_device->handle;
+ rc = 1;
+ goto out;
+ }
+ }
+ out:
+ spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
+ return rc;
+}
+
+/**
+ * _ctl_btdh_search_raid_device - searching for raid device
+ * @ioc: per adapter object
+ * @btdh: btdh ioctl payload
+ */
+static int
+_ctl_btdh_search_raid_device(struct MPT2SAS_ADAPTER *ioc,
+ struct mpt2_ioctl_btdh_mapping *btdh)
+{
+ struct _raid_device *raid_device;
+ unsigned long flags;
+ int rc = 0;
+
+ if (list_empty(&ioc->raid_device_list))
+ return rc;
+
+ spin_lock_irqsave(&ioc->raid_device_lock, flags);
+ list_for_each_entry(raid_device, &ioc->raid_device_list, list) {
+ if (btdh->bus == 0xFFFFFFFF && btdh->id == 0xFFFFFFFF &&
+ btdh->handle == raid_device->handle) {
+ btdh->bus = raid_device->channel;
+ btdh->id = raid_device->id;
+ rc = 1;
+ goto out;
+ } else if (btdh->bus == raid_device->channel && btdh->id ==
+ raid_device->id && btdh->handle == 0xFFFF) {
+ btdh->handle = raid_device->handle;
+ rc = 1;
+ goto out;
+ }
+ }
+ out:
+ spin_unlock_irqrestore(&ioc->raid_device_lock, flags);
+ return rc;
+}
+
+/**
+ * _ctl_btdh_mapping - main handler for MPT2BTDHMAPPING opcode
+ * @ioc: per adapter object
+ * @arg - user space buffer containing ioctl content
+ */
+static long
+_ctl_btdh_mapping(struct MPT2SAS_ADAPTER *ioc, void __user *arg)
+{
+ struct mpt2_ioctl_btdh_mapping karg;
+ int rc;
+
+ if (copy_from_user(&karg, arg, sizeof(karg))) {
+ printk(KERN_ERR "failure at %s:%d/%s()!\n",
+ __FILE__, __LINE__, __func__);
+ return -EFAULT;
+ }
+
+ dctlprintk(ioc, printk(MPT2SAS_INFO_FMT "%s\n", ioc->name,
+ __func__));
+
+ rc = _ctl_btdh_search_sas_device(ioc, &karg);
+ if (!rc)
+ _ctl_btdh_search_raid_device(ioc, &karg);
+
+ if (copy_to_user(arg, &karg, sizeof(karg))) {
+ printk(KERN_ERR "failure at %s:%d/%s()!\n",
+ __FILE__, __LINE__, __func__);
+ return -EFAULT;
+ }
+ return 0;
+}
+
+/**
+ * _ctl_diag_capability - return diag buffer capability
+ * @ioc: per adapter object
+ * @buffer_type: specifies either TRACE, SNAPSHOT, or EXTENDED
+ *
+ * returns 1 when diag buffer support is enabled in firmware
+ */
+static u8
+_ctl_diag_capability(struct MPT2SAS_ADAPTER *ioc, u8 buffer_type)
+{
+ u8 rc = 0;
+
+ switch (buffer_type) {
+ case MPI2_DIAG_BUF_TYPE_TRACE:
+ if (ioc->facts.IOCCapabilities &
+ MPI2_IOCFACTS_CAPABILITY_DIAG_TRACE_BUFFER)
+ rc = 1;
+ break;
+ case MPI2_DIAG_BUF_TYPE_SNAPSHOT:
+ if (ioc->facts.IOCCapabilities &
+ MPI2_IOCFACTS_CAPABILITY_SNAPSHOT_BUFFER)
+ rc = 1;
+ break;
+ case MPI2_DIAG_BUF_TYPE_EXTENDED:
+ if (ioc->facts.IOCCapabilities &
+ MPI2_IOCFACTS_CAPABILITY_EXTENDED_BUFFER)
+ rc = 1;
+ }
+
+ return rc;
+}
+
+/**
+ * _ctl_diag_register_2 - wrapper for registering diag buffer support
+ * @ioc: per adapter object
+ * @diag_register: the diag_register struct passed in from user space
+ *
+ */
+static long
+_ctl_diag_register_2(struct MPT2SAS_ADAPTER *ioc,
+ struct mpt2_diag_register *diag_register)
+{
+ int rc, i;
+ void *request_data = NULL;
+ dma_addr_t request_data_dma;
+ u32 request_data_sz = 0;
+ Mpi2DiagBufferPostRequest_t *mpi_request;
+ Mpi2DiagBufferPostReply_t *mpi_reply;
+ u8 buffer_type;
+ unsigned long timeleft;
+ u16 smid;
+ u16 ioc_status;
+ u8 issue_reset = 0;
+
+ dctlprintk(ioc, printk(MPT2SAS_INFO_FMT "%s\n", ioc->name,
+ __func__));
+
+ if (ioc->ctl_cmds.status != MPT2_CMD_NOT_USED) {
+ printk(MPT2SAS_ERR_FMT "%s: ctl_cmd in use\n",
+ ioc->name, __func__);
+ rc = -EAGAIN;
+ goto out;
+ }
+
+ buffer_type = diag_register->buffer_type;
+ if (!_ctl_diag_capability(ioc, buffer_type)) {
+ printk(MPT2SAS_ERR_FMT "%s: doesn't have capability for "
+ "buffer_type(0x%02x)\n", ioc->name, __func__, buffer_type);
+ return -EPERM;
+ }
+
+ if (ioc->diag_buffer_status[buffer_type] &
+ MPT2_DIAG_BUFFER_IS_REGISTERED) {
+ printk(MPT2SAS_ERR_FMT "%s: already has a registered "
+ "buffer for buffer_type(0x%02x)\n", ioc->name, __func__,
+ buffer_type);
+ return -EINVAL;
+ }
+
+ if (diag_register->requested_buffer_size % 4) {
+ printk(MPT2SAS_ERR_FMT "%s: the requested_buffer_size "
+ "is not 4 byte aligned\n", ioc->name, __func__);
+ return -EINVAL;
+ }
+
+ smid = mpt2sas_base_get_smid(ioc, ioc->ctl_cb_idx);
+ if (!smid) {
+ printk(MPT2SAS_ERR_FMT "%s: failed obtaining a smid\n",
+ ioc->name, __func__);
+ rc = -EAGAIN;
+ goto out;
+ }
+
+ rc = 0;
+ ioc->ctl_cmds.status = MPT2_CMD_PENDING;
+ memset(ioc->ctl_cmds.reply, 0, ioc->reply_sz);
+ mpi_request = mpt2sas_base_get_msg_frame(ioc, smid);
+ ioc->ctl_cmds.smid = smid;
+
+ request_data = ioc->diag_buffer[buffer_type];
+ request_data_sz = diag_register->requested_buffer_size;
+ ioc->unique_id[buffer_type] = diag_register->unique_id;
+ ioc->diag_buffer_status[buffer_type] = 0;
+ memcpy(ioc->product_specific[buffer_type],
+ diag_register->product_specific, MPT2_PRODUCT_SPECIFIC_DWORDS);
+ ioc->diagnostic_flags[buffer_type] = diag_register->diagnostic_flags;
+
+ if (request_data) {
+ request_data_dma = ioc->diag_buffer_dma[buffer_type];
+ if (request_data_sz != ioc->diag_buffer_sz[buffer_type]) {
+ pci_free_consistent(ioc->pdev,
+ ioc->diag_buffer_sz[buffer_type],
+ request_data, request_data_dma);
+ request_data = NULL;
+ }
+ }
+
+ if (request_data == NULL) {
+ ioc->diag_buffer_sz[buffer_type] = 0;
+ ioc->diag_buffer_dma[buffer_type] = 0;
+ request_data = pci_alloc_consistent(
+ ioc->pdev, request_data_sz, &request_data_dma);
+ if (request_data == NULL) {
+ printk(MPT2SAS_ERR_FMT "%s: failed allocating memory"
+ " for diag buffers, requested size(%d)\n",
+ ioc->name, __func__, request_data_sz);
+ mpt2sas_base_free_smid(ioc, smid);
+ return -ENOMEM;
+ }
+ ioc->diag_buffer[buffer_type] = request_data;
+ ioc->diag_buffer_sz[buffer_type] = request_data_sz;
+ ioc->diag_buffer_dma[buffer_type] = request_data_dma;
+ }
+
+ mpi_request->Function = MPI2_FUNCTION_DIAG_BUFFER_POST;
+ mpi_request->BufferType = diag_register->buffer_type;
+ mpi_request->Flags = cpu_to_le32(diag_register->diagnostic_flags);
+ mpi_request->BufferAddress = cpu_to_le64(request_data_dma);
+ mpi_request->BufferLength = cpu_to_le32(request_data_sz);
+ mpi_request->VF_ID = 0; /* TODO */
+ mpi_request->VP_ID = 0;
+
+ dctlprintk(ioc, printk(MPT2SAS_INFO_FMT "%s: diag_buffer(0x%p), "
+ "dma(0x%llx), sz(%d)\n", ioc->name, __func__, request_data,
+ (unsigned long long)request_data_dma,
+ le32_to_cpu(mpi_request->BufferLength)));
+
+ for (i = 0; i < MPT2_PRODUCT_SPECIFIC_DWORDS; i++)
+ mpi_request->ProductSpecific[i] =
+ cpu_to_le32(ioc->product_specific[buffer_type][i]);
+
+ init_completion(&ioc->ctl_cmds.done);
+ mpt2sas_base_put_smid_default(ioc, smid);
+ timeleft = wait_for_completion_timeout(&ioc->ctl_cmds.done,
+ MPT2_IOCTL_DEFAULT_TIMEOUT*HZ);
+
+ if (!(ioc->ctl_cmds.status & MPT2_CMD_COMPLETE)) {
+ printk(MPT2SAS_ERR_FMT "%s: timeout\n", ioc->name,
+ __func__);
+ _debug_dump_mf(mpi_request,
+ sizeof(Mpi2DiagBufferPostRequest_t)/4);
+ if (!(ioc->ctl_cmds.status & MPT2_CMD_RESET))
+ issue_reset = 1;
+ goto issue_host_reset;
+ }
+
+ /* process the completed Reply Message Frame */
+ if ((ioc->ctl_cmds.status & MPT2_CMD_REPLY_VALID) == 0) {
+ printk(MPT2SAS_ERR_FMT "%s: no reply message\n",
+ ioc->name, __func__);
+ rc = -EFAULT;
+ goto out;
+ }
+
+ mpi_reply = ioc->ctl_cmds.reply;
+ ioc_status = le16_to_cpu(mpi_reply->IOCStatus) & MPI2_IOCSTATUS_MASK;
+
+ if (ioc_status == MPI2_IOCSTATUS_SUCCESS) {
+ ioc->diag_buffer_status[buffer_type] |=
+ MPT2_DIAG_BUFFER_IS_REGISTERED;
+ dctlprintk(ioc, printk(MPT2SAS_INFO_FMT "%s: success\n",
+ ioc->name, __func__));
+ } else {
+ printk(MPT2SAS_INFO_FMT "%s: ioc_status(0x%04x) "
+ "log_info(0x%08x)\n", ioc->name, __func__,
+ ioc_status, le32_to_cpu(mpi_reply->IOCLogInfo));
+ rc = -EFAULT;
+ }
+
+ issue_host_reset:
+ if (issue_reset)
+ mpt2sas_base_hard_reset_handler(ioc, CAN_SLEEP,
+ FORCE_BIG_HAMMER);
+
+ out:
+
+ if (rc && request_data)
+ pci_free_consistent(ioc->pdev, request_data_sz,
+ request_data, request_data_dma);
+
+ ioc->ctl_cmds.status = MPT2_CMD_NOT_USED;
+ return rc;
+}
+
+/**
+ * mpt2sas_enable_diag_buffer - enabling diag_buffers support driver load time
+ * @ioc: per adapter object
+ * @bits_to_register: bitwise field where trace is bit 0, and snapshot is bit 1
+ *
+ * This is called when command line option diag_buffer_enable is enabled
+ * at driver load time.
+ */
+void
+mpt2sas_enable_diag_buffer(struct MPT2SAS_ADAPTER *ioc, u8 bits_to_register)
+{
+ struct mpt2_diag_register diag_register;
+
+ memset(&diag_register, 0, sizeof(struct mpt2_diag_register));
+
+ if (bits_to_register & 1) {
+ printk(MPT2SAS_INFO_FMT "registering trace buffer support\n",
+ ioc->name);
+ diag_register.buffer_type = MPI2_DIAG_BUF_TYPE_TRACE;
+ /* register for 1MB buffers */
+ diag_register.requested_buffer_size = (1024 * 1024);
+ diag_register.unique_id = 0x7075900;
+ _ctl_diag_register_2(ioc, &diag_register);
+ }
+
+ if (bits_to_register & 2) {
+ printk(MPT2SAS_INFO_FMT "registering snapshot buffer support\n",
+ ioc->name);
+ diag_register.buffer_type = MPI2_DIAG_BUF_TYPE_SNAPSHOT;
+ /* register for 2MB buffers */
+ diag_register.requested_buffer_size = 2 * (1024 * 1024);
+ diag_register.unique_id = 0x7075901;
+ _ctl_diag_register_2(ioc, &diag_register);
+ }
+
+ if (bits_to_register & 4) {
+ printk(MPT2SAS_INFO_FMT "registering extended buffer support\n",
+ ioc->name);
+ diag_register.buffer_type = MPI2_DIAG_BUF_TYPE_EXTENDED;
+ /* register for 2MB buffers */
+ diag_register.requested_buffer_size = 2 * (1024 * 1024);
+ diag_register.unique_id = 0x7075901;
+ _ctl_diag_register_2(ioc, &diag_register);
+ }
+}
+
+/**
+ * _ctl_diag_register - application register with driver
+ * @ioc: per adapter object
+ * @arg - user space buffer containing ioctl content
+ *
+ * This will allow the driver to setup any required buffers that will be
+ * needed by firmware to communicate with the driver.
+ */
+static long
+_ctl_diag_register(struct MPT2SAS_ADAPTER *ioc, void __user *arg)
+{
+ struct mpt2_diag_register karg;
+ long rc;
+
+ if (copy_from_user(&karg, arg, sizeof(karg))) {
+ printk(KERN_ERR "failure at %s:%d/%s()!\n",
+ __FILE__, __LINE__, __func__);
+ return -EFAULT;
+ }
+
+ rc = _ctl_diag_register_2(ioc, &karg);
+ return rc;
+}
+
+/**
+ * _ctl_diag_unregister - application unregister with driver
+ * @ioc: per adapter object
+ * @arg - user space buffer containing ioctl content
+ *
+ * This will allow the driver to cleanup any memory allocated for diag
+ * messages and to free up any resources.
+ */
+static long
+_ctl_diag_unregister(struct MPT2SAS_ADAPTER *ioc, void __user *arg)
+{
+ struct mpt2_diag_unregister karg;
+ void *request_data;
+ dma_addr_t request_data_dma;
+ u32 request_data_sz;
+ u8 buffer_type;
+
+ if (copy_from_user(&karg, arg, sizeof(karg))) {
+ printk(KERN_ERR "failure at %s:%d/%s()!\n",
+ __FILE__, __LINE__, __func__);
+ return -EFAULT;
+ }
+
+ dctlprintk(ioc, printk(MPT2SAS_INFO_FMT "%s\n", ioc->name,
+ __func__));
+
+ buffer_type = karg.unique_id & 0x000000ff;
+ if (!_ctl_diag_capability(ioc, buffer_type)) {
+ printk(MPT2SAS_ERR_FMT "%s: doesn't have capability for "
+ "buffer_type(0x%02x)\n", ioc->name, __func__, buffer_type);
+ return -EPERM;
+ }
+
+ if ((ioc->diag_buffer_status[buffer_type] &
+ MPT2_DIAG_BUFFER_IS_REGISTERED) == 0) {
+ printk(MPT2SAS_ERR_FMT "%s: buffer_type(0x%02x) is not "
+ "registered\n", ioc->name, __func__, buffer_type);
+ return -EINVAL;
+ }
+ if ((ioc->diag_buffer_status[buffer_type] &
+ MPT2_DIAG_BUFFER_IS_RELEASED) == 0) {
+ printk(MPT2SAS_ERR_FMT "%s: buffer_type(0x%02x) has not been "
+ "released\n", ioc->name, __func__, buffer_type);
+ return -EINVAL;
+ }
+
+ if (karg.unique_id != ioc->unique_id[buffer_type]) {
+ printk(MPT2SAS_ERR_FMT "%s: unique_id(0x%08x) is not "
+ "registered\n", ioc->name, __func__, karg.unique_id);
+ return -EINVAL;
+ }
+
+ request_data = ioc->diag_buffer[buffer_type];
+ if (!request_data) {
+ printk(MPT2SAS_ERR_FMT "%s: doesn't have memory allocated for "
+ "buffer_type(0x%02x)\n", ioc->name, __func__, buffer_type);
+ return -ENOMEM;
+ }
+
+ request_data_sz = ioc->diag_buffer_sz[buffer_type];
+ request_data_dma = ioc->diag_buffer_dma[buffer_type];
+ pci_free_consistent(ioc->pdev, request_data_sz,
+ request_data, request_data_dma);
+ ioc->diag_buffer[buffer_type] = NULL;
+ ioc->diag_buffer_status[buffer_type] = 0;
+ return 0;
+}
+
+/**
+ * _ctl_diag_query - query relevant info associated with diag buffers
+ * @ioc: per adapter object
+ * @arg - user space buffer containing ioctl content
+ *
+ * The application will send only buffer_type and unique_id. Driver will
+ * inspect unique_id first, if valid, fill in all the info. If unique_id is
+ * 0x00, the driver will return info specified by Buffer Type.
+ */
+static long
+_ctl_diag_query(struct MPT2SAS_ADAPTER *ioc, void __user *arg)
+{
+ struct mpt2_diag_query karg;
+ void *request_data;
+ int i;
+ u8 buffer_type;
+
+ if (copy_from_user(&karg, arg, sizeof(karg))) {
+ printk(KERN_ERR "failure at %s:%d/%s()!\n",
+ __FILE__, __LINE__, __func__);
+ return -EFAULT;
+ }
+
+ dctlprintk(ioc, printk(MPT2SAS_INFO_FMT "%s\n", ioc->name,
+ __func__));
+
+ karg.application_flags = 0;
+ buffer_type = karg.buffer_type;
+
+ if (!_ctl_diag_capability(ioc, buffer_type)) {
+ printk(MPT2SAS_ERR_FMT "%s: doesn't have capability for "
+ "buffer_type(0x%02x)\n", ioc->name, __func__, buffer_type);
+ return -EPERM;
+ }
+
+ if ((ioc->diag_buffer_status[buffer_type] &
+ MPT2_DIAG_BUFFER_IS_REGISTERED) == 0) {
+ printk(MPT2SAS_ERR_FMT "%s: buffer_type(0x%02x) is not "
+ "registered\n", ioc->name, __func__, buffer_type);
+ return -EINVAL;
+ }
+
+ if (karg.unique_id & 0xffffff00) {
+ if (karg.unique_id != ioc->unique_id[buffer_type]) {
+ printk(MPT2SAS_ERR_FMT "%s: unique_id(0x%08x) is not "
+ "registered\n", ioc->name, __func__,
+ karg.unique_id);
+ return -EINVAL;
+ }
+ }
+
+ request_data = ioc->diag_buffer[buffer_type];
+ if (!request_data) {
+ printk(MPT2SAS_ERR_FMT "%s: doesn't have buffer for "
+ "buffer_type(0x%02x)\n", ioc->name, __func__, buffer_type);
+ return -ENOMEM;
+ }
+
+ if (ioc->diag_buffer_status[buffer_type] & MPT2_DIAG_BUFFER_IS_RELEASED)
+ karg.application_flags = (MPT2_APP_FLAGS_APP_OWNED |
+ MPT2_APP_FLAGS_BUFFER_VALID);
+ else
+ karg.application_flags = (MPT2_APP_FLAGS_APP_OWNED |
+ MPT2_APP_FLAGS_BUFFER_VALID |
+ MPT2_APP_FLAGS_FW_BUFFER_ACCESS);
+
+ for (i = 0; i < MPT2_PRODUCT_SPECIFIC_DWORDS; i++)
+ karg.product_specific[i] =
+ ioc->product_specific[buffer_type][i];
+
+ karg.total_buffer_size = ioc->diag_buffer_sz[buffer_type];
+ karg.driver_added_buffer_size = 0;
+ karg.unique_id = ioc->unique_id[buffer_type];
+ karg.diagnostic_flags = ioc->diagnostic_flags[buffer_type];
+
+ if (copy_to_user(arg, &karg, sizeof(struct mpt2_diag_query))) {
+ printk(MPT2SAS_ERR_FMT "%s: unable to write mpt2_diag_query "
+ "data @ %p\n", ioc->name, __func__, arg);
+ return -EFAULT;
+ }
+ return 0;
+}
+
+/**
+ * _ctl_send_release - Diag Release Message
+ * @ioc: per adapter object
+ * @buffer_type - specifies either TRACE, SNAPSHOT, or EXTENDED
+ * @issue_reset - specifies whether host reset is required.
+ *
+ */
+static int
+_ctl_send_release(struct MPT2SAS_ADAPTER *ioc, u8 buffer_type, u8 *issue_reset)
+{
+ Mpi2DiagReleaseRequest_t *mpi_request;
+ Mpi2DiagReleaseReply_t *mpi_reply;
+ u16 smid;
+ u16 ioc_status;
+ u32 ioc_state;
+ int rc;
+ unsigned long timeleft;
+
+ dctlprintk(ioc, printk(MPT2SAS_INFO_FMT "%s\n", ioc->name,
+ __func__));
+
+ rc = 0;
+ *issue_reset = 0;
+
+ ioc_state = mpt2sas_base_get_iocstate(ioc, 1);
+ if (ioc_state != MPI2_IOC_STATE_OPERATIONAL) {
+ dctlprintk(ioc, printk(MPT2SAS_INFO_FMT "%s: "
+ "skipping due to FAULT state\n", ioc->name,
+ __func__));
+ rc = -EAGAIN;
+ goto out;
+ }
+
+ if (ioc->ctl_cmds.status != MPT2_CMD_NOT_USED) {
+ printk(MPT2SAS_ERR_FMT "%s: ctl_cmd in use\n",
+ ioc->name, __func__);
+ rc = -EAGAIN;
+ goto out;
+ }
+
+ smid = mpt2sas_base_get_smid(ioc, ioc->ctl_cb_idx);
+ if (!smid) {
+ printk(MPT2SAS_ERR_FMT "%s: failed obtaining a smid\n",
+ ioc->name, __func__);
+ rc = -EAGAIN;
+ goto out;
+ }
+
+ ioc->ctl_cmds.status = MPT2_CMD_PENDING;
+ memset(ioc->ctl_cmds.reply, 0, ioc->reply_sz);
+ mpi_request = mpt2sas_base_get_msg_frame(ioc, smid);
+ ioc->ctl_cmds.smid = smid;
+
+ mpi_request->Function = MPI2_FUNCTION_DIAG_RELEASE;
+ mpi_request->BufferType = buffer_type;
+ mpi_request->VF_ID = 0; /* TODO */
+ mpi_request->VP_ID = 0;
+
+ init_completion(&ioc->ctl_cmds.done);
+ mpt2sas_base_put_smid_default(ioc, smid);
+ timeleft = wait_for_completion_timeout(&ioc->ctl_cmds.done,
+ MPT2_IOCTL_DEFAULT_TIMEOUT*HZ);
+
+ if (!(ioc->ctl_cmds.status & MPT2_CMD_COMPLETE)) {
+ printk(MPT2SAS_ERR_FMT "%s: timeout\n", ioc->name,
+ __func__);
+ _debug_dump_mf(mpi_request,
+ sizeof(Mpi2DiagReleaseRequest_t)/4);
+ if (!(ioc->ctl_cmds.status & MPT2_CMD_RESET))
+ *issue_reset = 1;
+ rc = -EFAULT;
+ goto out;
+ }
+
+ /* process the completed Reply Message Frame */
+ if ((ioc->ctl_cmds.status & MPT2_CMD_REPLY_VALID) == 0) {
+ printk(MPT2SAS_ERR_FMT "%s: no reply message\n",
+ ioc->name, __func__);
+ rc = -EFAULT;
+ goto out;
+ }
+
+ mpi_reply = ioc->ctl_cmds.reply;
+ ioc_status = le16_to_cpu(mpi_reply->IOCStatus) & MPI2_IOCSTATUS_MASK;
+
+ if (ioc_status == MPI2_IOCSTATUS_SUCCESS) {
+ ioc->diag_buffer_status[buffer_type] |=
+ MPT2_DIAG_BUFFER_IS_RELEASED;
+ dctlprintk(ioc, printk(MPT2SAS_INFO_FMT "%s: success\n",
+ ioc->name, __func__));
+ } else {
+ printk(MPT2SAS_INFO_FMT "%s: ioc_status(0x%04x) "
+ "log_info(0x%08x)\n", ioc->name, __func__,
+ ioc_status, le32_to_cpu(mpi_reply->IOCLogInfo));
+ rc = -EFAULT;
+ }
+
+ out:
+ ioc->ctl_cmds.status = MPT2_CMD_NOT_USED;
+ return rc;
+}
+
+/**
+ * _ctl_diag_release - request to send Diag Release Message to firmware
+ * @arg - user space buffer containing ioctl content
+ *
+ * This allows ownership of the specified buffer to returned to the driver,
+ * allowing an application to read the buffer without fear that firmware is
+ * overwritting information in the buffer.
+ */
+static long
+_ctl_diag_release(struct MPT2SAS_ADAPTER *ioc, void __user *arg)
+{
+ struct mpt2_diag_release karg;
+ void *request_data;
+ int rc;
+ u8 buffer_type;
+ u8 issue_reset = 0;
+
+ if (copy_from_user(&karg, arg, sizeof(karg))) {
+ printk(KERN_ERR "failure at %s:%d/%s()!\n",
+ __FILE__, __LINE__, __func__);
+ return -EFAULT;
+ }
+
+ dctlprintk(ioc, printk(MPT2SAS_INFO_FMT "%s\n", ioc->name,
+ __func__));
+
+ buffer_type = karg.unique_id & 0x000000ff;
+ if (!_ctl_diag_capability(ioc, buffer_type)) {
+ printk(MPT2SAS_ERR_FMT "%s: doesn't have capability for "
+ "buffer_type(0x%02x)\n", ioc->name, __func__, buffer_type);
+ return -EPERM;
+ }
+
+ if ((ioc->diag_buffer_status[buffer_type] &
+ MPT2_DIAG_BUFFER_IS_REGISTERED) == 0) {
+ printk(MPT2SAS_ERR_FMT "%s: buffer_type(0x%02x) is not "
+ "registered\n", ioc->name, __func__, buffer_type);
+ return -EINVAL;
+ }
+
+ if (karg.unique_id != ioc->unique_id[buffer_type]) {
+ printk(MPT2SAS_ERR_FMT "%s: unique_id(0x%08x) is not "
+ "registered\n", ioc->name, __func__, karg.unique_id);
+ return -EINVAL;
+ }
+
+ if (ioc->diag_buffer_status[buffer_type] &
+ MPT2_DIAG_BUFFER_IS_RELEASED) {
+ printk(MPT2SAS_ERR_FMT "%s: buffer_type(0x%02x) "
+ "is already released\n", ioc->name, __func__,
+ buffer_type);
+ return 0;
+ }
+
+ request_data = ioc->diag_buffer[buffer_type];
+
+ if (!request_data) {
+ printk(MPT2SAS_ERR_FMT "%s: doesn't have memory allocated for "
+ "buffer_type(0x%02x)\n", ioc->name, __func__, buffer_type);
+ return -ENOMEM;
+ }
+
+ /* buffers were released by due to host reset */
+ if ((ioc->diag_buffer_status[buffer_type] &
+ MPT2_DIAG_BUFFER_IS_DIAG_RESET)) {
+ ioc->diag_buffer_status[buffer_type] |=
+ MPT2_DIAG_BUFFER_IS_RELEASED;
+ ioc->diag_buffer_status[buffer_type] &=
+ ~MPT2_DIAG_BUFFER_IS_DIAG_RESET;
+ printk(MPT2SAS_ERR_FMT "%s: buffer_type(0x%02x) "
+ "was released due to host reset\n", ioc->name, __func__,
+ buffer_type);
+ return 0;
+ }
+
+ rc = _ctl_send_release(ioc, buffer_type, &issue_reset);
+
+ if (issue_reset)
+ mpt2sas_base_hard_reset_handler(ioc, CAN_SLEEP,
+ FORCE_BIG_HAMMER);
+
+ return rc;
+}
+
+/**
+ * _ctl_diag_read_buffer - request for copy of the diag buffer
+ * @ioc: per adapter object
+ * @arg - user space buffer containing ioctl content
+ */
+static long
+_ctl_diag_read_buffer(struct MPT2SAS_ADAPTER *ioc, void __user *arg)
+{
+ struct mpt2_diag_read_buffer karg;
+ struct mpt2_diag_read_buffer __user *uarg = arg;
+ void *request_data, *diag_data;
+ Mpi2DiagBufferPostRequest_t *mpi_request;
+ Mpi2DiagBufferPostReply_t *mpi_reply;
+ int rc, i;
+ u8 buffer_type;
+ unsigned long timeleft, request_size, copy_size;
+ u16 smid;
+ u16 ioc_status;
+ u8 issue_reset = 0;
+
+ if (copy_from_user(&karg, arg, sizeof(karg))) {
+ printk(KERN_ERR "failure at %s:%d/%s()!\n",
+ __FILE__, __LINE__, __func__);
+ return -EFAULT;
+ }
+
+ dctlprintk(ioc, printk(MPT2SAS_INFO_FMT "%s\n", ioc->name,
+ __func__));
+
+ buffer_type = karg.unique_id & 0x000000ff;
+ if (!_ctl_diag_capability(ioc, buffer_type)) {
+ printk(MPT2SAS_ERR_FMT "%s: doesn't have capability for "
+ "buffer_type(0x%02x)\n", ioc->name, __func__, buffer_type);
+ return -EPERM;
+ }
+
+ if (karg.unique_id != ioc->unique_id[buffer_type]) {
+ printk(MPT2SAS_ERR_FMT "%s: unique_id(0x%08x) is not "
+ "registered\n", ioc->name, __func__, karg.unique_id);
+ return -EINVAL;
+ }
+
+ request_data = ioc->diag_buffer[buffer_type];
+ if (!request_data) {
+ printk(MPT2SAS_ERR_FMT "%s: doesn't have buffer for "
+ "buffer_type(0x%02x)\n", ioc->name, __func__, buffer_type);
+ return -ENOMEM;
+ }
+
+ request_size = ioc->diag_buffer_sz[buffer_type];
+
+ if ((karg.starting_offset % 4) || (karg.bytes_to_read % 4)) {
+ printk(MPT2SAS_ERR_FMT "%s: either the starting_offset "
+ "or bytes_to_read are not 4 byte aligned\n", ioc->name,
+ __func__);
+ return -EINVAL;
+ }
+
+ if (karg.starting_offset > request_size)
+ return -EINVAL;
+
+ diag_data = (void *)(request_data + karg.starting_offset);
+ dctlprintk(ioc, printk(MPT2SAS_INFO_FMT "%s: diag_buffer(%p), "
+ "offset(%d), sz(%d)\n", ioc->name, __func__,
+ diag_data, karg.starting_offset, karg.bytes_to_read));
+
+ /* Truncate data on requests that are too large */
+ if ((diag_data + karg.bytes_to_read < diag_data) ||
+ (diag_data + karg.bytes_to_read > request_data + request_size))
+ copy_size = request_size - karg.starting_offset;
+ else
+ copy_size = karg.bytes_to_read;
+
+ if (copy_to_user((void __user *)uarg->diagnostic_data,
+ diag_data, copy_size)) {
+ printk(MPT2SAS_ERR_FMT "%s: Unable to write "
+ "mpt_diag_read_buffer_t data @ %p\n", ioc->name,
+ __func__, diag_data);
+ return -EFAULT;
+ }
+
+ if ((karg.flags & MPT2_FLAGS_REREGISTER) == 0)
+ return 0;
+
+ dctlprintk(ioc, printk(MPT2SAS_INFO_FMT "%s: Reregister "
+ "buffer_type(0x%02x)\n", ioc->name, __func__, buffer_type));
+ if ((ioc->diag_buffer_status[buffer_type] &
+ MPT2_DIAG_BUFFER_IS_RELEASED) == 0) {
+ dctlprintk(ioc, printk(MPT2SAS_INFO_FMT "%s: "
+ "buffer_type(0x%02x) is still registered\n", ioc->name,
+ __func__, buffer_type));
+ return 0;
+ }
+ /* Get a free request frame and save the message context.
+ */
+
+ if (ioc->ctl_cmds.status != MPT2_CMD_NOT_USED) {
+ printk(MPT2SAS_ERR_FMT "%s: ctl_cmd in use\n",
+ ioc->name, __func__);
+ rc = -EAGAIN;
+ goto out;
+ }
+
+ smid = mpt2sas_base_get_smid(ioc, ioc->ctl_cb_idx);
+ if (!smid) {
+ printk(MPT2SAS_ERR_FMT "%s: failed obtaining a smid\n",
+ ioc->name, __func__);
+ rc = -EAGAIN;
+ goto out;
+ }
+
+ rc = 0;
+ ioc->ctl_cmds.status = MPT2_CMD_PENDING;
+ memset(ioc->ctl_cmds.reply, 0, ioc->reply_sz);
+ mpi_request = mpt2sas_base_get_msg_frame(ioc, smid);
+ ioc->ctl_cmds.smid = smid;
+
+ mpi_request->Function = MPI2_FUNCTION_DIAG_BUFFER_POST;
+ mpi_request->BufferType = buffer_type;
+ mpi_request->BufferLength =
+ cpu_to_le32(ioc->diag_buffer_sz[buffer_type]);
+ mpi_request->BufferAddress =
+ cpu_to_le64(ioc->diag_buffer_dma[buffer_type]);
+ for (i = 0; i < MPT2_PRODUCT_SPECIFIC_DWORDS; i++)
+ mpi_request->ProductSpecific[i] =
+ cpu_to_le32(ioc->product_specific[buffer_type][i]);
+ mpi_request->VF_ID = 0; /* TODO */
+ mpi_request->VP_ID = 0;
+
+ init_completion(&ioc->ctl_cmds.done);
+ mpt2sas_base_put_smid_default(ioc, smid);
+ timeleft = wait_for_completion_timeout(&ioc->ctl_cmds.done,
+ MPT2_IOCTL_DEFAULT_TIMEOUT*HZ);
+
+ if (!(ioc->ctl_cmds.status & MPT2_CMD_COMPLETE)) {
+ printk(MPT2SAS_ERR_FMT "%s: timeout\n", ioc->name,
+ __func__);
+ _debug_dump_mf(mpi_request,
+ sizeof(Mpi2DiagBufferPostRequest_t)/4);
+ if (!(ioc->ctl_cmds.status & MPT2_CMD_RESET))
+ issue_reset = 1;
+ goto issue_host_reset;
+ }
+
+ /* process the completed Reply Message Frame */
+ if ((ioc->ctl_cmds.status & MPT2_CMD_REPLY_VALID) == 0) {
+ printk(MPT2SAS_ERR_FMT "%s: no reply message\n",
+ ioc->name, __func__);
+ rc = -EFAULT;
+ goto out;
+ }
+
+ mpi_reply = ioc->ctl_cmds.reply;
+ ioc_status = le16_to_cpu(mpi_reply->IOCStatus) & MPI2_IOCSTATUS_MASK;
+
+ if (ioc_status == MPI2_IOCSTATUS_SUCCESS) {
+ ioc->diag_buffer_status[buffer_type] |=
+ MPT2_DIAG_BUFFER_IS_REGISTERED;
+ dctlprintk(ioc, printk(MPT2SAS_INFO_FMT "%s: success\n",
+ ioc->name, __func__));
+ } else {
+ printk(MPT2SAS_INFO_FMT "%s: ioc_status(0x%04x) "
+ "log_info(0x%08x)\n", ioc->name, __func__,
+ ioc_status, le32_to_cpu(mpi_reply->IOCLogInfo));
+ rc = -EFAULT;
+ }
+
+ issue_host_reset:
+ if (issue_reset)
+ mpt2sas_base_hard_reset_handler(ioc, CAN_SLEEP,
+ FORCE_BIG_HAMMER);
+
+ out:
+
+ ioc->ctl_cmds.status = MPT2_CMD_NOT_USED;
+ return rc;
+}
+
+
+#ifdef CONFIG_COMPAT
+/**
+ * _ctl_compat_mpt_command - convert 32bit pointers to 64bit.
+ * @ioc: per adapter object
+ * @cmd - ioctl opcode
+ * @arg - (struct mpt2_ioctl_command32)
+ *
+ * MPT2COMMAND32 - Handle 32bit applications running on 64bit os.
+ */
+static long
+_ctl_compat_mpt_command(struct MPT2SAS_ADAPTER *ioc, unsigned cmd,
+ void __user *arg)
+{
+ struct mpt2_ioctl_command32 karg32;
+ struct mpt2_ioctl_command32 __user *uarg;
+ struct mpt2_ioctl_command karg;
+
+ if (_IOC_SIZE(cmd) != sizeof(struct mpt2_ioctl_command32))
+ return -EINVAL;
+
+ uarg = (struct mpt2_ioctl_command32 __user *) arg;
+
+ if (copy_from_user(&karg32, (char __user *)arg, sizeof(karg32))) {
+ printk(KERN_ERR "failure at %s:%d/%s()!\n",
+ __FILE__, __LINE__, __func__);
+ return -EFAULT;
+ }
+
+ memset(&karg, 0, sizeof(struct mpt2_ioctl_command));
+ karg.hdr.ioc_number = karg32.hdr.ioc_number;
+ karg.hdr.port_number = karg32.hdr.port_number;
+ karg.hdr.max_data_size = karg32.hdr.max_data_size;
+ karg.timeout = karg32.timeout;
+ karg.max_reply_bytes = karg32.max_reply_bytes;
+ karg.data_in_size = karg32.data_in_size;
+ karg.data_out_size = karg32.data_out_size;
+ karg.max_sense_bytes = karg32.max_sense_bytes;
+ karg.data_sge_offset = karg32.data_sge_offset;
+ karg.reply_frame_buf_ptr = compat_ptr(karg32.reply_frame_buf_ptr);
+ karg.data_in_buf_ptr = compat_ptr(karg32.data_in_buf_ptr);
+ karg.data_out_buf_ptr = compat_ptr(karg32.data_out_buf_ptr);
+ karg.sense_data_ptr = compat_ptr(karg32.sense_data_ptr);
+ return _ctl_do_mpt_command(ioc, karg, &uarg->mf);
+}
+#endif
+
+/**
+ * _ctl_ioctl_main - main ioctl entry point
+ * @file - (struct file)
+ * @cmd - ioctl opcode
+ * @arg -
+ * compat - handles 32 bit applications in 64bit os
+ */
+static long
+_ctl_ioctl_main(struct file *file, unsigned int cmd, void __user *arg,
+ u8 compat)
+{
+ struct MPT2SAS_ADAPTER *ioc;
+ struct mpt2_ioctl_header ioctl_header;
+ enum block_state state;
+ long ret = -EINVAL;
+
+ /* get IOCTL header */
+ if (copy_from_user(&ioctl_header, (char __user *)arg,
+ sizeof(struct mpt2_ioctl_header))) {
+ printk(KERN_ERR "failure at %s:%d/%s()!\n",
+ __FILE__, __LINE__, __func__);
+ return -EFAULT;
+ }
+
+ if (_ctl_verify_adapter(ioctl_header.ioc_number, &ioc) == -1 || !ioc)
+ return -ENODEV;
+ if (ioc->shost_recovery || ioc->pci_error_recovery ||
+ ioc->is_driver_loading)
+ return -EAGAIN;
+
+ state = (file->f_flags & O_NONBLOCK) ? NON_BLOCKING : BLOCKING;
+ if (state == NON_BLOCKING) {
+ if (!mutex_trylock(&ioc->ctl_cmds.mutex))
+ return -EAGAIN;
+ } else if (mutex_lock_interruptible(&ioc->ctl_cmds.mutex)) {
+ return -ERESTARTSYS;
+ }
+
+ switch (cmd) {
+ case MPT2IOCINFO:
+ if (_IOC_SIZE(cmd) == sizeof(struct mpt2_ioctl_iocinfo))
+ ret = _ctl_getiocinfo(ioc, arg);
+ break;
+#ifdef CONFIG_COMPAT
+ case MPT2COMMAND32:
+#endif
+ case MPT2COMMAND:
+ {
+ struct mpt2_ioctl_command __user *uarg;
+ struct mpt2_ioctl_command karg;
+#ifdef CONFIG_COMPAT
+ if (compat) {
+ ret = _ctl_compat_mpt_command(ioc, cmd, arg);
+ break;
+ }
+#endif
+ if (copy_from_user(&karg, arg, sizeof(karg))) {
+ printk(KERN_ERR "failure at %s:%d/%s()!\n",
+ __FILE__, __LINE__, __func__);
+ ret = -EFAULT;
+ break;
+ }
+
+ if (_IOC_SIZE(cmd) == sizeof(struct mpt2_ioctl_command)) {
+ uarg = arg;
+ ret = _ctl_do_mpt_command(ioc, karg, &uarg->mf);
+ }
+ break;
+ }
+ case MPT2EVENTQUERY:
+ if (_IOC_SIZE(cmd) == sizeof(struct mpt2_ioctl_eventquery))
+ ret = _ctl_eventquery(ioc, arg);
+ break;
+ case MPT2EVENTENABLE:
+ if (_IOC_SIZE(cmd) == sizeof(struct mpt2_ioctl_eventenable))
+ ret = _ctl_eventenable(ioc, arg);
+ break;
+ case MPT2EVENTREPORT:
+ ret = _ctl_eventreport(ioc, arg);
+ break;
+ case MPT2HARDRESET:
+ if (_IOC_SIZE(cmd) == sizeof(struct mpt2_ioctl_diag_reset))
+ ret = _ctl_do_reset(ioc, arg);
+ break;
+ case MPT2BTDHMAPPING:
+ if (_IOC_SIZE(cmd) == sizeof(struct mpt2_ioctl_btdh_mapping))
+ ret = _ctl_btdh_mapping(ioc, arg);
+ break;
+ case MPT2DIAGREGISTER:
+ if (_IOC_SIZE(cmd) == sizeof(struct mpt2_diag_register))
+ ret = _ctl_diag_register(ioc, arg);
+ break;
+ case MPT2DIAGUNREGISTER:
+ if (_IOC_SIZE(cmd) == sizeof(struct mpt2_diag_unregister))
+ ret = _ctl_diag_unregister(ioc, arg);
+ break;
+ case MPT2DIAGQUERY:
+ if (_IOC_SIZE(cmd) == sizeof(struct mpt2_diag_query))
+ ret = _ctl_diag_query(ioc, arg);
+ break;
+ case MPT2DIAGRELEASE:
+ if (_IOC_SIZE(cmd) == sizeof(struct mpt2_diag_release))
+ ret = _ctl_diag_release(ioc, arg);
+ break;
+ case MPT2DIAGREADBUFFER:
+ if (_IOC_SIZE(cmd) == sizeof(struct mpt2_diag_read_buffer))
+ ret = _ctl_diag_read_buffer(ioc, arg);
+ break;
+ default:
+
+ dctlprintk(ioc, printk(MPT2SAS_INFO_FMT
+ "unsupported ioctl opcode(0x%08x)\n", ioc->name, cmd));
+ break;
+ }
+
+ mutex_unlock(&ioc->ctl_cmds.mutex);
+ return ret;
+}
+
+/**
+ * _ctl_ioctl - main ioctl entry point (unlocked)
+ * @file - (struct file)
+ * @cmd - ioctl opcode
+ * @arg -
+ */
+static long
+_ctl_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
+{
+ long ret;
+
+ ret = _ctl_ioctl_main(file, cmd, (void __user *)arg, 0);
+ return ret;
+}
+#ifdef CONFIG_COMPAT
+/**
+ * _ctl_ioctl_compat - main ioctl entry point (compat)
+ * @file -
+ * @cmd -
+ * @arg -
+ *
+ * This routine handles 32 bit applications in 64bit os.
+ */
+static long
+_ctl_ioctl_compat(struct file *file, unsigned cmd, unsigned long arg)
+{
+ long ret;
+
+ ret = _ctl_ioctl_main(file, cmd, (void __user *)arg, 1);
+ return ret;
+}
+#endif
+
+/* scsi host attributes */
+
+/**
+ * _ctl_version_fw_show - firmware version
+ * @cdev - pointer to embedded class device
+ * @buf - the buffer returned
+ *
+ * A sysfs 'read-only' shost attribute.
+ */
+static ssize_t
+_ctl_version_fw_show(struct device *cdev, struct device_attribute *attr,
+ char *buf)
+{
+ struct Scsi_Host *shost = class_to_shost(cdev);
+ struct MPT2SAS_ADAPTER *ioc = shost_priv(shost);
+
+ return snprintf(buf, PAGE_SIZE, "%02d.%02d.%02d.%02d\n",
+ (ioc->facts.FWVersion.Word & 0xFF000000) >> 24,
+ (ioc->facts.FWVersion.Word & 0x00FF0000) >> 16,
+ (ioc->facts.FWVersion.Word & 0x0000FF00) >> 8,
+ ioc->facts.FWVersion.Word & 0x000000FF);
+}
+static DEVICE_ATTR(version_fw, S_IRUGO, _ctl_version_fw_show, NULL);
+
+/**
+ * _ctl_version_bios_show - bios version
+ * @cdev - pointer to embedded class device
+ * @buf - the buffer returned
+ *
+ * A sysfs 'read-only' shost attribute.
+ */
+static ssize_t
+_ctl_version_bios_show(struct device *cdev, struct device_attribute *attr,
+ char *buf)
+{
+ struct Scsi_Host *shost = class_to_shost(cdev);
+ struct MPT2SAS_ADAPTER *ioc = shost_priv(shost);
+
+ u32 version = le32_to_cpu(ioc->bios_pg3.BiosVersion);
+
+ return snprintf(buf, PAGE_SIZE, "%02d.%02d.%02d.%02d\n",
+ (version & 0xFF000000) >> 24,
+ (version & 0x00FF0000) >> 16,
+ (version & 0x0000FF00) >> 8,
+ version & 0x000000FF);
+}
+static DEVICE_ATTR(version_bios, S_IRUGO, _ctl_version_bios_show, NULL);
+
+/**
+ * _ctl_version_mpi_show - MPI (message passing interface) version
+ * @cdev - pointer to embedded class device
+ * @buf - the buffer returned
+ *
+ * A sysfs 'read-only' shost attribute.
+ */
+static ssize_t
+_ctl_version_mpi_show(struct device *cdev, struct device_attribute *attr,
+ char *buf)
+{
+ struct Scsi_Host *shost = class_to_shost(cdev);
+ struct MPT2SAS_ADAPTER *ioc = shost_priv(shost);
+
+ return snprintf(buf, PAGE_SIZE, "%03x.%02x\n",
+ ioc->facts.MsgVersion, ioc->facts.HeaderVersion >> 8);
+}
+static DEVICE_ATTR(version_mpi, S_IRUGO, _ctl_version_mpi_show, NULL);
+
+/**
+ * _ctl_version_product_show - product name
+ * @cdev - pointer to embedded class device
+ * @buf - the buffer returned
+ *
+ * A sysfs 'read-only' shost attribute.
+ */
+static ssize_t
+_ctl_version_product_show(struct device *cdev, struct device_attribute *attr,
+ char *buf)
+{
+ struct Scsi_Host *shost = class_to_shost(cdev);
+ struct MPT2SAS_ADAPTER *ioc = shost_priv(shost);
+
+ return snprintf(buf, 16, "%s\n", ioc->manu_pg0.ChipName);
+}
+static DEVICE_ATTR(version_product, S_IRUGO,
+ _ctl_version_product_show, NULL);
+
+/**
+ * _ctl_version_nvdata_persistent_show - ndvata persistent version
+ * @cdev - pointer to embedded class device
+ * @buf - the buffer returned
+ *
+ * A sysfs 'read-only' shost attribute.
+ */
+static ssize_t
+_ctl_version_nvdata_persistent_show(struct device *cdev,
+ struct device_attribute *attr, char *buf)
+{
+ struct Scsi_Host *shost = class_to_shost(cdev);
+ struct MPT2SAS_ADAPTER *ioc = shost_priv(shost);
+
+ return snprintf(buf, PAGE_SIZE, "%08xh\n",
+ le32_to_cpu(ioc->iounit_pg0.NvdataVersionPersistent.Word));
+}
+static DEVICE_ATTR(version_nvdata_persistent, S_IRUGO,
+ _ctl_version_nvdata_persistent_show, NULL);
+
+/**
+ * _ctl_version_nvdata_default_show - nvdata default version
+ * @cdev - pointer to embedded class device
+ * @buf - the buffer returned
+ *
+ * A sysfs 'read-only' shost attribute.
+ */
+static ssize_t
+_ctl_version_nvdata_default_show(struct device *cdev,
+ struct device_attribute *attr, char *buf)
+{
+ struct Scsi_Host *shost = class_to_shost(cdev);
+ struct MPT2SAS_ADAPTER *ioc = shost_priv(shost);
+
+ return snprintf(buf, PAGE_SIZE, "%08xh\n",
+ le32_to_cpu(ioc->iounit_pg0.NvdataVersionDefault.Word));
+}
+static DEVICE_ATTR(version_nvdata_default, S_IRUGO,
+ _ctl_version_nvdata_default_show, NULL);
+
+/**
+ * _ctl_board_name_show - board name
+ * @cdev - pointer to embedded class device
+ * @buf - the buffer returned
+ *
+ * A sysfs 'read-only' shost attribute.
+ */
+static ssize_t
+_ctl_board_name_show(struct device *cdev, struct device_attribute *attr,
+ char *buf)
+{
+ struct Scsi_Host *shost = class_to_shost(cdev);
+ struct MPT2SAS_ADAPTER *ioc = shost_priv(shost);
+
+ return snprintf(buf, 16, "%s\n", ioc->manu_pg0.BoardName);
+}
+static DEVICE_ATTR(board_name, S_IRUGO, _ctl_board_name_show, NULL);
+
+/**
+ * _ctl_board_assembly_show - board assembly name
+ * @cdev - pointer to embedded class device
+ * @buf - the buffer returned
+ *
+ * A sysfs 'read-only' shost attribute.
+ */
+static ssize_t
+_ctl_board_assembly_show(struct device *cdev, struct device_attribute *attr,
+ char *buf)
+{
+ struct Scsi_Host *shost = class_to_shost(cdev);
+ struct MPT2SAS_ADAPTER *ioc = shost_priv(shost);
+
+ return snprintf(buf, 16, "%s\n", ioc->manu_pg0.BoardAssembly);
+}
+static DEVICE_ATTR(board_assembly, S_IRUGO,
+ _ctl_board_assembly_show, NULL);
+
+/**
+ * _ctl_board_tracer_show - board tracer number
+ * @cdev - pointer to embedded class device
+ * @buf - the buffer returned
+ *
+ * A sysfs 'read-only' shost attribute.
+ */
+static ssize_t
+_ctl_board_tracer_show(struct device *cdev, struct device_attribute *attr,
+ char *buf)
+{
+ struct Scsi_Host *shost = class_to_shost(cdev);
+ struct MPT2SAS_ADAPTER *ioc = shost_priv(shost);
+
+ return snprintf(buf, 16, "%s\n", ioc->manu_pg0.BoardTracerNumber);
+}
+static DEVICE_ATTR(board_tracer, S_IRUGO,
+ _ctl_board_tracer_show, NULL);
+
+/**
+ * _ctl_io_delay_show - io missing delay
+ * @cdev - pointer to embedded class device
+ * @buf - the buffer returned
+ *
+ * This is for firmware implemention for deboucing device
+ * removal events.
+ *
+ * A sysfs 'read-only' shost attribute.
+ */
+static ssize_t
+_ctl_io_delay_show(struct device *cdev, struct device_attribute *attr,
+ char *buf)
+{
+ struct Scsi_Host *shost = class_to_shost(cdev);
+ struct MPT2SAS_ADAPTER *ioc = shost_priv(shost);
+
+ return snprintf(buf, PAGE_SIZE, "%02d\n", ioc->io_missing_delay);
+}
+static DEVICE_ATTR(io_delay, S_IRUGO,
+ _ctl_io_delay_show, NULL);
+
+/**
+ * _ctl_device_delay_show - device missing delay
+ * @cdev - pointer to embedded class device
+ * @buf - the buffer returned
+ *
+ * This is for firmware implemention for deboucing device
+ * removal events.
+ *
+ * A sysfs 'read-only' shost attribute.
+ */
+static ssize_t
+_ctl_device_delay_show(struct device *cdev, struct device_attribute *attr,
+ char *buf)
+{
+ struct Scsi_Host *shost = class_to_shost(cdev);
+ struct MPT2SAS_ADAPTER *ioc = shost_priv(shost);
+
+ return snprintf(buf, PAGE_SIZE, "%02d\n", ioc->device_missing_delay);
+}
+static DEVICE_ATTR(device_delay, S_IRUGO,
+ _ctl_device_delay_show, NULL);
+
+/**
+ * _ctl_fw_queue_depth_show - global credits
+ * @cdev - pointer to embedded class device
+ * @buf - the buffer returned
+ *
+ * This is firmware queue depth limit
+ *
+ * A sysfs 'read-only' shost attribute.
+ */
+static ssize_t
+_ctl_fw_queue_depth_show(struct device *cdev, struct device_attribute *attr,
+ char *buf)
+{
+ struct Scsi_Host *shost = class_to_shost(cdev);
+ struct MPT2SAS_ADAPTER *ioc = shost_priv(shost);
+
+ return snprintf(buf, PAGE_SIZE, "%02d\n", ioc->facts.RequestCredit);
+}
+static DEVICE_ATTR(fw_queue_depth, S_IRUGO,
+ _ctl_fw_queue_depth_show, NULL);
+
+/**
+ * _ctl_sas_address_show - sas address
+ * @cdev - pointer to embedded class device
+ * @buf - the buffer returned
+ *
+ * This is the controller sas address
+ *
+ * A sysfs 'read-only' shost attribute.
+ */
+static ssize_t
+_ctl_host_sas_address_show(struct device *cdev, struct device_attribute *attr,
+ char *buf)
+{
+ struct Scsi_Host *shost = class_to_shost(cdev);
+ struct MPT2SAS_ADAPTER *ioc = shost_priv(shost);
+
+ return snprintf(buf, PAGE_SIZE, "0x%016llx\n",
+ (unsigned long long)ioc->sas_hba.sas_address);
+}
+static DEVICE_ATTR(host_sas_address, S_IRUGO,
+ _ctl_host_sas_address_show, NULL);
+
+/**
+ * _ctl_logging_level_show - logging level
+ * @cdev - pointer to embedded class device
+ * @buf - the buffer returned
+ *
+ * A sysfs 'read/write' shost attribute.
+ */
+static ssize_t
+_ctl_logging_level_show(struct device *cdev, struct device_attribute *attr,
+ char *buf)
+{
+ struct Scsi_Host *shost = class_to_shost(cdev);
+ struct MPT2SAS_ADAPTER *ioc = shost_priv(shost);
+
+ return snprintf(buf, PAGE_SIZE, "%08xh\n", ioc->logging_level);
+}
+static ssize_t
+_ctl_logging_level_store(struct device *cdev, struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct Scsi_Host *shost = class_to_shost(cdev);
+ struct MPT2SAS_ADAPTER *ioc = shost_priv(shost);
+ int val = 0;
+
+ if (sscanf(buf, "%x", &val) != 1)
+ return -EINVAL;
+
+ ioc->logging_level = val;
+ printk(MPT2SAS_INFO_FMT "logging_level=%08xh\n", ioc->name,
+ ioc->logging_level);
+ return strlen(buf);
+}
+static DEVICE_ATTR(logging_level, S_IRUGO | S_IWUSR,
+ _ctl_logging_level_show, _ctl_logging_level_store);
+
+/* device attributes */
+/*
+ * _ctl_fwfault_debug_show - show/store fwfault_debug
+ * @cdev - pointer to embedded class device
+ * @buf - the buffer returned
+ *
+ * mpt2sas_fwfault_debug is command line option
+ * A sysfs 'read/write' shost attribute.
+ */
+static ssize_t
+_ctl_fwfault_debug_show(struct device *cdev,
+ struct device_attribute *attr, char *buf)
+{
+ struct Scsi_Host *shost = class_to_shost(cdev);
+ struct MPT2SAS_ADAPTER *ioc = shost_priv(shost);
+
+ return snprintf(buf, PAGE_SIZE, "%d\n", ioc->fwfault_debug);
+}
+static ssize_t
+_ctl_fwfault_debug_store(struct device *cdev,
+ struct device_attribute *attr, const char *buf, size_t count)
+{
+ struct Scsi_Host *shost = class_to_shost(cdev);
+ struct MPT2SAS_ADAPTER *ioc = shost_priv(shost);
+ int val = 0;
+
+ if (sscanf(buf, "%d", &val) != 1)
+ return -EINVAL;
+
+ ioc->fwfault_debug = val;
+ printk(MPT2SAS_INFO_FMT "fwfault_debug=%d\n", ioc->name,
+ ioc->fwfault_debug);
+ return strlen(buf);
+}
+static DEVICE_ATTR(fwfault_debug, S_IRUGO | S_IWUSR,
+ _ctl_fwfault_debug_show, _ctl_fwfault_debug_store);
+
+
+/**
+ * _ctl_ioc_reset_count_show - ioc reset count
+ * @cdev - pointer to embedded class device
+ * @buf - the buffer returned
+ *
+ * This is firmware queue depth limit
+ *
+ * A sysfs 'read-only' shost attribute.
+ */
+static ssize_t
+_ctl_ioc_reset_count_show(struct device *cdev, struct device_attribute *attr,
+ char *buf)
+{
+ struct Scsi_Host *shost = class_to_shost(cdev);
+ struct MPT2SAS_ADAPTER *ioc = shost_priv(shost);
+
+ return snprintf(buf, PAGE_SIZE, "%08d\n", ioc->ioc_reset_count);
+}
+static DEVICE_ATTR(ioc_reset_count, S_IRUGO,
+ _ctl_ioc_reset_count_show, NULL);
+
+/**
+ * _ctl_ioc_reply_queue_count_show - number of reply queues
+ * @cdev - pointer to embedded class device
+ * @buf - the buffer returned
+ *
+ * This is number of reply queues
+ *
+ * A sysfs 'read-only' shost attribute.
+ */
+static ssize_t
+_ctl_ioc_reply_queue_count_show(struct device *cdev,
+ struct device_attribute *attr, char *buf)
+{
+ u8 reply_queue_count;
+ struct Scsi_Host *shost = class_to_shost(cdev);
+ struct MPT2SAS_ADAPTER *ioc = shost_priv(shost);
+
+ if ((ioc->facts.IOCCapabilities &
+ MPI2_IOCFACTS_CAPABILITY_MSI_X_INDEX) && ioc->msix_enable)
+ reply_queue_count = ioc->reply_queue_count;
+ else
+ reply_queue_count = 1;
+ return snprintf(buf, PAGE_SIZE, "%d\n", reply_queue_count);
+}
+static DEVICE_ATTR(reply_queue_count, S_IRUGO,
+ _ctl_ioc_reply_queue_count_show, NULL);
+
+/**
+ * _ctl_BRM_status_show - Backup Rail Monitor Status
+ * @cdev - pointer to embedded class device
+ * @buf - the buffer returned
+ *
+ * This is number of reply queues
+ *
+ * A sysfs 'read-only' shost attribute.
+ */
+static ssize_t
+_ctl_BRM_status_show(struct device *cdev, struct device_attribute *attr,
+ char *buf)
+{
+ struct Scsi_Host *shost = class_to_shost(cdev);
+ struct MPT2SAS_ADAPTER *ioc = shost_priv(shost);
+ Mpi2IOUnitPage3_t *io_unit_pg3 = NULL;
+ Mpi2ConfigReply_t mpi_reply;
+ u16 backup_rail_monitor_status = 0;
+ u16 ioc_status;
+ int sz;
+ ssize_t rc = 0;
+
+ if (!ioc->is_warpdrive) {
+ printk(MPT2SAS_ERR_FMT "%s: BRM attribute is only for"\
+ "warpdrive\n", ioc->name, __func__);
+ goto out;
+ }
+
+ /* allocate upto GPIOVal 36 entries */
+ sz = offsetof(Mpi2IOUnitPage3_t, GPIOVal) + (sizeof(u16) * 36);
+ io_unit_pg3 = kzalloc(sz, GFP_KERNEL);
+ if (!io_unit_pg3) {
+ printk(MPT2SAS_ERR_FMT "%s: failed allocating memory"\
+ "for iounit_pg3: (%d) bytes\n", ioc->name, __func__, sz);
+ goto out;
+ }
+
+ if (mpt2sas_config_get_iounit_pg3(ioc, &mpi_reply, io_unit_pg3, sz) !=
+ 0) {
+ printk(MPT2SAS_ERR_FMT
+ "%s: failed reading iounit_pg3\n", ioc->name,
+ __func__);
+ goto out;
+ }
+
+ ioc_status = le16_to_cpu(mpi_reply.IOCStatus) & MPI2_IOCSTATUS_MASK;
+ if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
+ printk(MPT2SAS_ERR_FMT "%s: iounit_pg3 failed with"\
+ "ioc_status(0x%04x)\n", ioc->name, __func__, ioc_status);
+ goto out;
+ }
+
+ if (io_unit_pg3->GPIOCount < 25) {
+ printk(MPT2SAS_ERR_FMT "%s: iounit_pg3->GPIOCount less than"\
+ "25 entries, detected (%d) entries\n", ioc->name, __func__,
+ io_unit_pg3->GPIOCount);
+ goto out;
+ }
+
+ /* BRM status is in bit zero of GPIOVal[24] */
+ backup_rail_monitor_status = le16_to_cpu(io_unit_pg3->GPIOVal[24]);
+ rc = snprintf(buf, PAGE_SIZE, "%d\n", (backup_rail_monitor_status & 1));
+
+ out:
+ kfree(io_unit_pg3);
+ return rc;
+}
+static DEVICE_ATTR(BRM_status, S_IRUGO, _ctl_BRM_status_show, NULL);
+
+struct DIAG_BUFFER_START {
+ __le32 Size;
+ __le32 DiagVersion;
+ u8 BufferType;
+ u8 Reserved[3];
+ __le32 Reserved1;
+ __le32 Reserved2;
+ __le32 Reserved3;
+};
+/**
+ * _ctl_host_trace_buffer_size_show - host buffer size (trace only)
+ * @cdev - pointer to embedded class device
+ * @buf - the buffer returned
+ *
+ * A sysfs 'read-only' shost attribute.
+ */
+static ssize_t
+_ctl_host_trace_buffer_size_show(struct device *cdev,
+ struct device_attribute *attr, char *buf)
+{
+ struct Scsi_Host *shost = class_to_shost(cdev);
+ struct MPT2SAS_ADAPTER *ioc = shost_priv(shost);
+ u32 size = 0;
+ struct DIAG_BUFFER_START *request_data;
+
+ if (!ioc->diag_buffer[MPI2_DIAG_BUF_TYPE_TRACE]) {
+ printk(MPT2SAS_ERR_FMT "%s: host_trace_buffer is not "
+ "registered\n", ioc->name, __func__);
+ return 0;
+ }
+
+ if ((ioc->diag_buffer_status[MPI2_DIAG_BUF_TYPE_TRACE] &
+ MPT2_DIAG_BUFFER_IS_REGISTERED) == 0) {
+ printk(MPT2SAS_ERR_FMT "%s: host_trace_buffer is not "
+ "registered\n", ioc->name, __func__);
+ return 0;
+ }
+
+ request_data = (struct DIAG_BUFFER_START *)
+ ioc->diag_buffer[MPI2_DIAG_BUF_TYPE_TRACE];
+ if ((le32_to_cpu(request_data->DiagVersion) == 0x00000000 ||
+ le32_to_cpu(request_data->DiagVersion) == 0x01000000) &&
+ le32_to_cpu(request_data->Reserved3) == 0x4742444c)
+ size = le32_to_cpu(request_data->Size);
+
+ ioc->ring_buffer_sz = size;
+ return snprintf(buf, PAGE_SIZE, "%d\n", size);
+}
+static DEVICE_ATTR(host_trace_buffer_size, S_IRUGO,
+ _ctl_host_trace_buffer_size_show, NULL);
+
+/**
+ * _ctl_host_trace_buffer_show - firmware ring buffer (trace only)
+ * @cdev - pointer to embedded class device
+ * @buf - the buffer returned
+ *
+ * A sysfs 'read/write' shost attribute.
+ *
+ * You will only be able to read 4k bytes of ring buffer at a time.
+ * In order to read beyond 4k bytes, you will have to write out the
+ * offset to the same attribute, it will move the pointer.
+ */
+static ssize_t
+_ctl_host_trace_buffer_show(struct device *cdev, struct device_attribute *attr,
+ char *buf)
+{
+ struct Scsi_Host *shost = class_to_shost(cdev);
+ struct MPT2SAS_ADAPTER *ioc = shost_priv(shost);
+ void *request_data;
+ u32 size;
+
+ if (!ioc->diag_buffer[MPI2_DIAG_BUF_TYPE_TRACE]) {
+ printk(MPT2SAS_ERR_FMT "%s: host_trace_buffer is not "
+ "registered\n", ioc->name, __func__);
+ return 0;
+ }
+
+ if ((ioc->diag_buffer_status[MPI2_DIAG_BUF_TYPE_TRACE] &
+ MPT2_DIAG_BUFFER_IS_REGISTERED) == 0) {
+ printk(MPT2SAS_ERR_FMT "%s: host_trace_buffer is not "
+ "registered\n", ioc->name, __func__);
+ return 0;
+ }
+
+ if (ioc->ring_buffer_offset > ioc->ring_buffer_sz)
+ return 0;
+
+ size = ioc->ring_buffer_sz - ioc->ring_buffer_offset;
+ size = (size > PAGE_SIZE) ? PAGE_SIZE : size;
+ request_data = ioc->diag_buffer[0] + ioc->ring_buffer_offset;
+ memcpy(buf, request_data, size);
+ return size;
+}
+
+static ssize_t
+_ctl_host_trace_buffer_store(struct device *cdev, struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct Scsi_Host *shost = class_to_shost(cdev);
+ struct MPT2SAS_ADAPTER *ioc = shost_priv(shost);
+ int val = 0;
+
+ if (sscanf(buf, "%d", &val) != 1)
+ return -EINVAL;
+
+ ioc->ring_buffer_offset = val;
+ return strlen(buf);
+}
+static DEVICE_ATTR(host_trace_buffer, S_IRUGO | S_IWUSR,
+ _ctl_host_trace_buffer_show, _ctl_host_trace_buffer_store);
+
+/*****************************************/
+
+/**
+ * _ctl_host_trace_buffer_enable_show - firmware ring buffer (trace only)
+ * @cdev - pointer to embedded class device
+ * @buf - the buffer returned
+ *
+ * A sysfs 'read/write' shost attribute.
+ *
+ * This is a mechnism to post/release host_trace_buffers
+ */
+static ssize_t
+_ctl_host_trace_buffer_enable_show(struct device *cdev,
+ struct device_attribute *attr, char *buf)
+{
+ struct Scsi_Host *shost = class_to_shost(cdev);
+ struct MPT2SAS_ADAPTER *ioc = shost_priv(shost);
+
+ if ((!ioc->diag_buffer[MPI2_DIAG_BUF_TYPE_TRACE]) ||
+ ((ioc->diag_buffer_status[MPI2_DIAG_BUF_TYPE_TRACE] &
+ MPT2_DIAG_BUFFER_IS_REGISTERED) == 0))
+ return snprintf(buf, PAGE_SIZE, "off\n");
+ else if ((ioc->diag_buffer_status[MPI2_DIAG_BUF_TYPE_TRACE] &
+ MPT2_DIAG_BUFFER_IS_RELEASED))
+ return snprintf(buf, PAGE_SIZE, "release\n");
+ else
+ return snprintf(buf, PAGE_SIZE, "post\n");
+}
+
+static ssize_t
+_ctl_host_trace_buffer_enable_store(struct device *cdev,
+ struct device_attribute *attr, const char *buf, size_t count)
+{
+ struct Scsi_Host *shost = class_to_shost(cdev);
+ struct MPT2SAS_ADAPTER *ioc = shost_priv(shost);
+ char str[10] = "";
+ struct mpt2_diag_register diag_register;
+ u8 issue_reset = 0;
+
+ if (sscanf(buf, "%9s", str) != 1)
+ return -EINVAL;
+
+ if (!strcmp(str, "post")) {
+ /* exit out if host buffers are already posted */
+ if ((ioc->diag_buffer[MPI2_DIAG_BUF_TYPE_TRACE]) &&
+ (ioc->diag_buffer_status[MPI2_DIAG_BUF_TYPE_TRACE] &
+ MPT2_DIAG_BUFFER_IS_REGISTERED) &&
+ ((ioc->diag_buffer_status[MPI2_DIAG_BUF_TYPE_TRACE] &
+ MPT2_DIAG_BUFFER_IS_RELEASED) == 0))
+ goto out;
+ memset(&diag_register, 0, sizeof(struct mpt2_diag_register));
+ printk(MPT2SAS_INFO_FMT "posting host trace buffers\n",
+ ioc->name);
+ diag_register.buffer_type = MPI2_DIAG_BUF_TYPE_TRACE;
+ diag_register.requested_buffer_size = (1024 * 1024);
+ diag_register.unique_id = 0x7075900;
+ ioc->diag_buffer_status[MPI2_DIAG_BUF_TYPE_TRACE] = 0;
+ _ctl_diag_register_2(ioc, &diag_register);
+ } else if (!strcmp(str, "release")) {
+ /* exit out if host buffers are already released */
+ if (!ioc->diag_buffer[MPI2_DIAG_BUF_TYPE_TRACE])
+ goto out;
+ if ((ioc->diag_buffer_status[MPI2_DIAG_BUF_TYPE_TRACE] &
+ MPT2_DIAG_BUFFER_IS_REGISTERED) == 0)
+ goto out;
+ if ((ioc->diag_buffer_status[MPI2_DIAG_BUF_TYPE_TRACE] &
+ MPT2_DIAG_BUFFER_IS_RELEASED))
+ goto out;
+ printk(MPT2SAS_INFO_FMT "releasing host trace buffer\n",
+ ioc->name);
+ _ctl_send_release(ioc, MPI2_DIAG_BUF_TYPE_TRACE, &issue_reset);
+ }
+
+ out:
+ return strlen(buf);
+}
+static DEVICE_ATTR(host_trace_buffer_enable, S_IRUGO | S_IWUSR,
+ _ctl_host_trace_buffer_enable_show, _ctl_host_trace_buffer_enable_store);
+
+struct device_attribute *mpt2sas_host_attrs[] = {
+ &dev_attr_version_fw,
+ &dev_attr_version_bios,
+ &dev_attr_version_mpi,
+ &dev_attr_version_product,
+ &dev_attr_version_nvdata_persistent,
+ &dev_attr_version_nvdata_default,
+ &dev_attr_board_name,
+ &dev_attr_board_assembly,
+ &dev_attr_board_tracer,
+ &dev_attr_io_delay,
+ &dev_attr_device_delay,
+ &dev_attr_logging_level,
+ &dev_attr_fwfault_debug,
+ &dev_attr_fw_queue_depth,
+ &dev_attr_host_sas_address,
+ &dev_attr_ioc_reset_count,
+ &dev_attr_host_trace_buffer_size,
+ &dev_attr_host_trace_buffer,
+ &dev_attr_host_trace_buffer_enable,
+ &dev_attr_reply_queue_count,
+ &dev_attr_BRM_status,
+ NULL,
+};
+
+/**
+ * _ctl_device_sas_address_show - sas address
+ * @cdev - pointer to embedded class device
+ * @buf - the buffer returned
+ *
+ * This is the sas address for the target
+ *
+ * A sysfs 'read-only' shost attribute.
+ */
+static ssize_t
+_ctl_device_sas_address_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct scsi_device *sdev = to_scsi_device(dev);
+ struct MPT2SAS_DEVICE *sas_device_priv_data = sdev->hostdata;
+
+ return snprintf(buf, PAGE_SIZE, "0x%016llx\n",
+ (unsigned long long)sas_device_priv_data->sas_target->sas_address);
+}
+static DEVICE_ATTR(sas_address, S_IRUGO, _ctl_device_sas_address_show, NULL);
+
+/**
+ * _ctl_device_handle_show - device handle
+ * @cdev - pointer to embedded class device
+ * @buf - the buffer returned
+ *
+ * This is the firmware assigned device handle
+ *
+ * A sysfs 'read-only' shost attribute.
+ */
+static ssize_t
+_ctl_device_handle_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct scsi_device *sdev = to_scsi_device(dev);
+ struct MPT2SAS_DEVICE *sas_device_priv_data = sdev->hostdata;
+
+ return snprintf(buf, PAGE_SIZE, "0x%04x\n",
+ sas_device_priv_data->sas_target->handle);
+}
+static DEVICE_ATTR(sas_device_handle, S_IRUGO, _ctl_device_handle_show, NULL);
+
+struct device_attribute *mpt2sas_dev_attrs[] = {
+ &dev_attr_sas_address,
+ &dev_attr_sas_device_handle,
+ NULL,
+};
+
+static const struct file_operations ctl_fops = {
+ .owner = THIS_MODULE,
+ .unlocked_ioctl = _ctl_ioctl,
+ .poll = _ctl_poll,
+ .fasync = _ctl_fasync,
+#ifdef CONFIG_COMPAT
+ .compat_ioctl = _ctl_ioctl_compat,
+#endif
+ .llseek = noop_llseek,
+};
+
+static struct miscdevice ctl_dev = {
+ .minor = MPT2SAS_MINOR,
+ .name = MPT2SAS_DEV_NAME,
+ .fops = &ctl_fops,
+};
+
+/**
+ * mpt2sas_ctl_init - main entry point for ctl.
+ *
+ */
+void
+mpt2sas_ctl_init(void)
+{
+ async_queue = NULL;
+ if (misc_register(&ctl_dev) < 0)
+ printk(KERN_ERR "%s can't register misc device [minor=%d]\n",
+ MPT2SAS_DRIVER_NAME, MPT2SAS_MINOR);
+
+ init_waitqueue_head(&ctl_poll_wait);
+}
+
+/**
+ * mpt2sas_ctl_exit - exit point for ctl
+ *
+ */
+void
+mpt2sas_ctl_exit(void)
+{
+ struct MPT2SAS_ADAPTER *ioc;
+ int i;
+
+ list_for_each_entry(ioc, &mpt2sas_ioc_list, list) {
+
+ /* free memory associated to diag buffers */
+ for (i = 0; i < MPI2_DIAG_BUF_TYPE_COUNT; i++) {
+ if (!ioc->diag_buffer[i])
+ continue;
+ pci_free_consistent(ioc->pdev, ioc->diag_buffer_sz[i],
+ ioc->diag_buffer[i], ioc->diag_buffer_dma[i]);
+ ioc->diag_buffer[i] = NULL;
+ ioc->diag_buffer_status[i] = 0;
+ }
+
+ kfree(ioc->event_log);
+ }
+ misc_deregister(&ctl_dev);
+}
+
diff --git a/drivers/scsi/mpt2sas/mpt2sas_ctl.h b/drivers/scsi/mpt2sas/mpt2sas_ctl.h
new file mode 100644
index 000000000..46b2fc5b7
--- /dev/null
+++ b/drivers/scsi/mpt2sas/mpt2sas_ctl.h
@@ -0,0 +1,419 @@
+/*
+ * Management Module Support for MPT (Message Passing Technology) based
+ * controllers
+ *
+ * This code is based on drivers/scsi/mpt2sas/mpt2_ctl.h
+ * Copyright (C) 2007-2014 LSI Corporation
+ * Copyright (C) 20013-2014 Avago Technologies
+ * (mailto: MPT-FusionLinux.pdl@avagotech.com)
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * NO WARRANTY
+ * THE PROGRAM IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OR
+ * CONDITIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED INCLUDING, WITHOUT
+ * LIMITATION, ANY WARRANTIES OR CONDITIONS OF TITLE, NON-INFRINGEMENT,
+ * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE. Each Recipient is
+ * solely responsible for determining the appropriateness of using and
+ * distributing the Program and assumes all risks associated with its
+ * exercise of rights under this Agreement, including but not limited to
+ * the risks and costs of program errors, damage to or loss of data,
+ * programs or equipment, and unavailability or interruption of operations.
+
+ * DISCLAIMER OF LIABILITY
+ * NEITHER RECIPIENT NOR ANY CONTRIBUTORS SHALL HAVE ANY LIABILITY FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING WITHOUT LIMITATION LOST PROFITS), HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR
+ * TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
+ * USE OR DISTRIBUTION OF THE PROGRAM OR THE EXERCISE OF ANY RIGHTS GRANTED
+ * HEREUNDER, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGES
+
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301,
+ * USA.
+ */
+
+#ifndef MPT2SAS_CTL_H_INCLUDED
+#define MPT2SAS_CTL_H_INCLUDED
+
+#ifdef __KERNEL__
+#include <linux/miscdevice.h>
+#endif
+
+#define MPT2SAS_DEV_NAME "mpt2ctl"
+#define MPT2_MAGIC_NUMBER 'L'
+#define MPT2_IOCTL_DEFAULT_TIMEOUT (10) /* in seconds */
+
+/**
+ * IOCTL opcodes
+ */
+#define MPT2IOCINFO _IOWR(MPT2_MAGIC_NUMBER, 17, \
+ struct mpt2_ioctl_iocinfo)
+#define MPT2COMMAND _IOWR(MPT2_MAGIC_NUMBER, 20, \
+ struct mpt2_ioctl_command)
+#ifdef CONFIG_COMPAT
+#define MPT2COMMAND32 _IOWR(MPT2_MAGIC_NUMBER, 20, \
+ struct mpt2_ioctl_command32)
+#endif
+#define MPT2EVENTQUERY _IOWR(MPT2_MAGIC_NUMBER, 21, \
+ struct mpt2_ioctl_eventquery)
+#define MPT2EVENTENABLE _IOWR(MPT2_MAGIC_NUMBER, 22, \
+ struct mpt2_ioctl_eventenable)
+#define MPT2EVENTREPORT _IOWR(MPT2_MAGIC_NUMBER, 23, \
+ struct mpt2_ioctl_eventreport)
+#define MPT2HARDRESET _IOWR(MPT2_MAGIC_NUMBER, 24, \
+ struct mpt2_ioctl_diag_reset)
+#define MPT2BTDHMAPPING _IOWR(MPT2_MAGIC_NUMBER, 31, \
+ struct mpt2_ioctl_btdh_mapping)
+
+/* diag buffer support */
+#define MPT2DIAGREGISTER _IOWR(MPT2_MAGIC_NUMBER, 26, \
+ struct mpt2_diag_register)
+#define MPT2DIAGRELEASE _IOWR(MPT2_MAGIC_NUMBER, 27, \
+ struct mpt2_diag_release)
+#define MPT2DIAGUNREGISTER _IOWR(MPT2_MAGIC_NUMBER, 28, \
+ struct mpt2_diag_unregister)
+#define MPT2DIAGQUERY _IOWR(MPT2_MAGIC_NUMBER, 29, \
+ struct mpt2_diag_query)
+#define MPT2DIAGREADBUFFER _IOWR(MPT2_MAGIC_NUMBER, 30, \
+ struct mpt2_diag_read_buffer)
+
+/**
+ * struct mpt2_ioctl_header - main header structure
+ * @ioc_number - IOC unit number
+ * @port_number - IOC port number
+ * @max_data_size - maximum number bytes to transfer on read
+ */
+struct mpt2_ioctl_header {
+ uint32_t ioc_number;
+ uint32_t port_number;
+ uint32_t max_data_size;
+};
+
+/**
+ * struct mpt2_ioctl_diag_reset - diagnostic reset
+ * @hdr - generic header
+ */
+struct mpt2_ioctl_diag_reset {
+ struct mpt2_ioctl_header hdr;
+};
+
+
+/**
+ * struct mpt2_ioctl_pci_info - pci device info
+ * @device - pci device id
+ * @function - pci function id
+ * @bus - pci bus id
+ * @segment_id - pci segment id
+ */
+struct mpt2_ioctl_pci_info {
+ union {
+ struct {
+ uint32_t device:5;
+ uint32_t function:3;
+ uint32_t bus:24;
+ } bits;
+ uint32_t word;
+ } u;
+ uint32_t segment_id;
+};
+
+
+#define MPT2_IOCTL_INTERFACE_SCSI (0x00)
+#define MPT2_IOCTL_INTERFACE_FC (0x01)
+#define MPT2_IOCTL_INTERFACE_FC_IP (0x02)
+#define MPT2_IOCTL_INTERFACE_SAS (0x03)
+#define MPT2_IOCTL_INTERFACE_SAS2 (0x04)
+#define MPT2_IOCTL_INTERFACE_SAS2_SSS6200 (0x05)
+#define MPT2_IOCTL_VERSION_LENGTH (32)
+
+/**
+ * struct mpt2_ioctl_iocinfo - generic controller info
+ * @hdr - generic header
+ * @adapter_type - type of adapter (spi, fc, sas)
+ * @port_number - port number
+ * @pci_id - PCI Id
+ * @hw_rev - hardware revision
+ * @sub_system_device - PCI subsystem Device ID
+ * @sub_system_vendor - PCI subsystem Vendor ID
+ * @rsvd0 - reserved
+ * @firmware_version - firmware version
+ * @bios_version - BIOS version
+ * @driver_version - driver version - 32 ASCII characters
+ * @rsvd1 - reserved
+ * @scsi_id - scsi id of adapter 0
+ * @rsvd2 - reserved
+ * @pci_information - pci info (2nd revision)
+ */
+struct mpt2_ioctl_iocinfo {
+ struct mpt2_ioctl_header hdr;
+ uint32_t adapter_type;
+ uint32_t port_number;
+ uint32_t pci_id;
+ uint32_t hw_rev;
+ uint32_t subsystem_device;
+ uint32_t subsystem_vendor;
+ uint32_t rsvd0;
+ uint32_t firmware_version;
+ uint32_t bios_version;
+ uint8_t driver_version[MPT2_IOCTL_VERSION_LENGTH];
+ uint8_t rsvd1;
+ uint8_t scsi_id;
+ uint16_t rsvd2;
+ struct mpt2_ioctl_pci_info pci_information;
+};
+
+
+/* number of event log entries */
+#define MPT2SAS_CTL_EVENT_LOG_SIZE (50)
+
+/**
+ * struct mpt2_ioctl_eventquery - query event count and type
+ * @hdr - generic header
+ * @event_entries - number of events returned by get_event_report
+ * @rsvd - reserved
+ * @event_types - type of events currently being captured
+ */
+struct mpt2_ioctl_eventquery {
+ struct mpt2_ioctl_header hdr;
+ uint16_t event_entries;
+ uint16_t rsvd;
+ uint32_t event_types[MPI2_EVENT_NOTIFY_EVENTMASK_WORDS];
+};
+
+/**
+ * struct mpt2_ioctl_eventenable - enable/disable event capturing
+ * @hdr - generic header
+ * @event_types - toggle off/on type of events to be captured
+ */
+struct mpt2_ioctl_eventenable {
+ struct mpt2_ioctl_header hdr;
+ uint32_t event_types[4];
+};
+
+#define MPT2_EVENT_DATA_SIZE (192)
+/**
+ * struct MPT2_IOCTL_EVENTS -
+ * @event - the event that was reported
+ * @context - unique value for each event assigned by driver
+ * @data - event data returned in fw reply message
+ */
+struct MPT2_IOCTL_EVENTS {
+ uint32_t event;
+ uint32_t context;
+ uint8_t data[MPT2_EVENT_DATA_SIZE];
+};
+
+/**
+ * struct mpt2_ioctl_eventreport - returing event log
+ * @hdr - generic header
+ * @event_data - (see struct MPT2_IOCTL_EVENTS)
+ */
+struct mpt2_ioctl_eventreport {
+ struct mpt2_ioctl_header hdr;
+ struct MPT2_IOCTL_EVENTS event_data[1];
+};
+
+/**
+ * struct mpt2_ioctl_command - generic mpt firmware passthru ioctl
+ * @hdr - generic header
+ * @timeout - command timeout in seconds. (if zero then use driver default
+ * value).
+ * @reply_frame_buf_ptr - reply location
+ * @data_in_buf_ptr - destination for read
+ * @data_out_buf_ptr - data source for write
+ * @sense_data_ptr - sense data location
+ * @max_reply_bytes - maximum number of reply bytes to be sent to app.
+ * @data_in_size - number bytes for data transfer in (read)
+ * @data_out_size - number bytes for data transfer out (write)
+ * @max_sense_bytes - maximum number of bytes for auto sense buffers
+ * @data_sge_offset - offset in words from the start of the request message to
+ * the first SGL
+ * @mf[1];
+ */
+struct mpt2_ioctl_command {
+ struct mpt2_ioctl_header hdr;
+ uint32_t timeout;
+ void __user *reply_frame_buf_ptr;
+ void __user *data_in_buf_ptr;
+ void __user *data_out_buf_ptr;
+ void __user *sense_data_ptr;
+ uint32_t max_reply_bytes;
+ uint32_t data_in_size;
+ uint32_t data_out_size;
+ uint32_t max_sense_bytes;
+ uint32_t data_sge_offset;
+ uint8_t mf[1];
+};
+
+#ifdef CONFIG_COMPAT
+struct mpt2_ioctl_command32 {
+ struct mpt2_ioctl_header hdr;
+ uint32_t timeout;
+ uint32_t reply_frame_buf_ptr;
+ uint32_t data_in_buf_ptr;
+ uint32_t data_out_buf_ptr;
+ uint32_t sense_data_ptr;
+ uint32_t max_reply_bytes;
+ uint32_t data_in_size;
+ uint32_t data_out_size;
+ uint32_t max_sense_bytes;
+ uint32_t data_sge_offset;
+ uint8_t mf[1];
+};
+#endif
+
+/**
+ * struct mpt2_ioctl_btdh_mapping - mapping info
+ * @hdr - generic header
+ * @id - target device identification number
+ * @bus - SCSI bus number that the target device exists on
+ * @handle - device handle for the target device
+ * @rsvd - reserved
+ *
+ * To obtain a bus/id the application sets
+ * handle to valid handle, and bus/id to 0xFFFF.
+ *
+ * To obtain the device handle the application sets
+ * bus/id valid value, and the handle to 0xFFFF.
+ */
+struct mpt2_ioctl_btdh_mapping {
+ struct mpt2_ioctl_header hdr;
+ uint32_t id;
+ uint32_t bus;
+ uint16_t handle;
+ uint16_t rsvd;
+};
+
+
+/* status bits for ioc->diag_buffer_status */
+#define MPT2_DIAG_BUFFER_IS_REGISTERED (0x01)
+#define MPT2_DIAG_BUFFER_IS_RELEASED (0x02)
+#define MPT2_DIAG_BUFFER_IS_DIAG_RESET (0x04)
+
+/* application flags for mpt2_diag_register, mpt2_diag_query */
+#define MPT2_APP_FLAGS_APP_OWNED (0x0001)
+#define MPT2_APP_FLAGS_BUFFER_VALID (0x0002)
+#define MPT2_APP_FLAGS_FW_BUFFER_ACCESS (0x0004)
+
+/* flags for mpt2_diag_read_buffer */
+#define MPT2_FLAGS_REREGISTER (0x0001)
+
+#define MPT2_PRODUCT_SPECIFIC_DWORDS 23
+
+/**
+ * struct mpt2_diag_register - application register with driver
+ * @hdr - generic header
+ * @reserved -
+ * @buffer_type - specifies either TRACE, SNAPSHOT, or EXTENDED
+ * @application_flags - misc flags
+ * @diagnostic_flags - specifies flags affecting command processing
+ * @product_specific - product specific information
+ * @requested_buffer_size - buffers size in bytes
+ * @unique_id - tag specified by application that is used to signal ownership
+ * of the buffer.
+ *
+ * This will allow the driver to setup any required buffers that will be
+ * needed by firmware to communicate with the driver.
+ */
+struct mpt2_diag_register {
+ struct mpt2_ioctl_header hdr;
+ uint8_t reserved;
+ uint8_t buffer_type;
+ uint16_t application_flags;
+ uint32_t diagnostic_flags;
+ uint32_t product_specific[MPT2_PRODUCT_SPECIFIC_DWORDS];
+ uint32_t requested_buffer_size;
+ uint32_t unique_id;
+};
+
+/**
+ * struct mpt2_diag_unregister - application unregister with driver
+ * @hdr - generic header
+ * @unique_id - tag uniquely identifies the buffer to be unregistered
+ *
+ * This will allow the driver to cleanup any memory allocated for diag
+ * messages and to free up any resources.
+ */
+struct mpt2_diag_unregister {
+ struct mpt2_ioctl_header hdr;
+ uint32_t unique_id;
+};
+
+/**
+ * struct mpt2_diag_query - query relevant info associated with diag buffers
+ * @hdr - generic header
+ * @reserved -
+ * @buffer_type - specifies either TRACE, SNAPSHOT, or EXTENDED
+ * @application_flags - misc flags
+ * @diagnostic_flags - specifies flags affecting command processing
+ * @product_specific - product specific information
+ * @total_buffer_size - diag buffer size in bytes
+ * @driver_added_buffer_size - size of extra space appended to end of buffer
+ * @unique_id - unique id associated with this buffer.
+ *
+ * The application will send only buffer_type and unique_id. Driver will
+ * inspect unique_id first, if valid, fill in all the info. If unique_id is
+ * 0x00, the driver will return info specified by Buffer Type.
+ */
+struct mpt2_diag_query {
+ struct mpt2_ioctl_header hdr;
+ uint8_t reserved;
+ uint8_t buffer_type;
+ uint16_t application_flags;
+ uint32_t diagnostic_flags;
+ uint32_t product_specific[MPT2_PRODUCT_SPECIFIC_DWORDS];
+ uint32_t total_buffer_size;
+ uint32_t driver_added_buffer_size;
+ uint32_t unique_id;
+};
+
+/**
+ * struct mpt2_diag_release - request to send Diag Release Message to firmware
+ * @hdr - generic header
+ * @unique_id - tag uniquely identifies the buffer to be released
+ *
+ * This allows ownership of the specified buffer to returned to the driver,
+ * allowing an application to read the buffer without fear that firmware is
+ * overwritting information in the buffer.
+ */
+struct mpt2_diag_release {
+ struct mpt2_ioctl_header hdr;
+ uint32_t unique_id;
+};
+
+/**
+ * struct mpt2_diag_read_buffer - request for copy of the diag buffer
+ * @hdr - generic header
+ * @status -
+ * @reserved -
+ * @flags - misc flags
+ * @starting_offset - starting offset within drivers buffer where to start
+ * reading data at into the specified application buffer
+ * @bytes_to_read - number of bytes to copy from the drivers buffer into the
+ * application buffer starting at starting_offset.
+ * @unique_id - unique id associated with this buffer.
+ * @diagnostic_data - data payload
+ */
+struct mpt2_diag_read_buffer {
+ struct mpt2_ioctl_header hdr;
+ uint8_t status;
+ uint8_t reserved;
+ uint16_t flags;
+ uint32_t starting_offset;
+ uint32_t bytes_to_read;
+ uint32_t unique_id;
+ uint32_t diagnostic_data[1];
+};
+
+#endif /* MPT2SAS_CTL_H_INCLUDED */
diff --git a/drivers/scsi/mpt2sas/mpt2sas_debug.h b/drivers/scsi/mpt2sas/mpt2sas_debug.h
new file mode 100644
index 000000000..277120d45
--- /dev/null
+++ b/drivers/scsi/mpt2sas/mpt2sas_debug.h
@@ -0,0 +1,182 @@
+/*
+ * Logging Support for MPT (Message Passing Technology) based controllers
+ *
+ * This code is based on drivers/scsi/mpt2sas/mpt2_debug.c
+ * Copyright (C) 2007-2014 LSI Corporation
+ * Copyright (C) 20013-2014 Avago Technologies
+ * (mailto: MPT-FusionLinux.pdl@avagotech.com)
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * NO WARRANTY
+ * THE PROGRAM IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OR
+ * CONDITIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED INCLUDING, WITHOUT
+ * LIMITATION, ANY WARRANTIES OR CONDITIONS OF TITLE, NON-INFRINGEMENT,
+ * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE. Each Recipient is
+ * solely responsible for determining the appropriateness of using and
+ * distributing the Program and assumes all risks associated with its
+ * exercise of rights under this Agreement, including but not limited to
+ * the risks and costs of program errors, damage to or loss of data,
+ * programs or equipment, and unavailability or interruption of operations.
+
+ * DISCLAIMER OF LIABILITY
+ * NEITHER RECIPIENT NOR ANY CONTRIBUTORS SHALL HAVE ANY LIABILITY FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING WITHOUT LIMITATION LOST PROFITS), HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR
+ * TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
+ * USE OR DISTRIBUTION OF THE PROGRAM OR THE EXERCISE OF ANY RIGHTS GRANTED
+ * HEREUNDER, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGES
+
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301,
+ * USA.
+ */
+
+#ifndef MPT2SAS_DEBUG_H_INCLUDED
+#define MPT2SAS_DEBUG_H_INCLUDED
+
+#define MPT_DEBUG 0x00000001
+#define MPT_DEBUG_MSG_FRAME 0x00000002
+#define MPT_DEBUG_SG 0x00000004
+#define MPT_DEBUG_EVENTS 0x00000008
+#define MPT_DEBUG_EVENT_WORK_TASK 0x00000010
+#define MPT_DEBUG_INIT 0x00000020
+#define MPT_DEBUG_EXIT 0x00000040
+#define MPT_DEBUG_FAIL 0x00000080
+#define MPT_DEBUG_TM 0x00000100
+#define MPT_DEBUG_REPLY 0x00000200
+#define MPT_DEBUG_HANDSHAKE 0x00000400
+#define MPT_DEBUG_CONFIG 0x00000800
+#define MPT_DEBUG_DL 0x00001000
+#define MPT_DEBUG_RESET 0x00002000
+#define MPT_DEBUG_SCSI 0x00004000
+#define MPT_DEBUG_IOCTL 0x00008000
+#define MPT_DEBUG_CSMISAS 0x00010000
+#define MPT_DEBUG_SAS 0x00020000
+#define MPT_DEBUG_TRANSPORT 0x00040000
+#define MPT_DEBUG_TASK_SET_FULL 0x00080000
+
+#define MPT_DEBUG_TARGET_MODE 0x00100000
+
+
+/*
+ * CONFIG_SCSI_MPT2SAS_LOGGING - enabled in Kconfig
+ */
+
+#ifdef CONFIG_SCSI_MPT2SAS_LOGGING
+#define MPT_CHECK_LOGGING(IOC, CMD, BITS) \
+{ \
+ if (IOC->logging_level & BITS) \
+ CMD; \
+}
+#else
+#define MPT_CHECK_LOGGING(IOC, CMD, BITS)
+#endif /* CONFIG_SCSI_MPT2SAS_LOGGING */
+
+
+/*
+ * debug macros
+ */
+
+#define dprintk(IOC, CMD) \
+ MPT_CHECK_LOGGING(IOC, CMD, MPT_DEBUG)
+
+#define dsgprintk(IOC, CMD) \
+ MPT_CHECK_LOGGING(IOC, CMD, MPT_DEBUG_SG)
+
+#define devtprintk(IOC, CMD) \
+ MPT_CHECK_LOGGING(IOC, CMD, MPT_DEBUG_EVENTS)
+
+#define dewtprintk(IOC, CMD) \
+ MPT_CHECK_LOGGING(IOC, CMD, MPT_DEBUG_EVENT_WORK_TASK)
+
+#define dinitprintk(IOC, CMD) \
+ MPT_CHECK_LOGGING(IOC, CMD, MPT_DEBUG_INIT)
+
+#define dexitprintk(IOC, CMD) \
+ MPT_CHECK_LOGGING(IOC, CMD, MPT_DEBUG_EXIT)
+
+#define dfailprintk(IOC, CMD) \
+ MPT_CHECK_LOGGING(IOC, CMD, MPT_DEBUG_FAIL)
+
+#define dtmprintk(IOC, CMD) \
+ MPT_CHECK_LOGGING(IOC, CMD, MPT_DEBUG_TM)
+
+#define dreplyprintk(IOC, CMD) \
+ MPT_CHECK_LOGGING(IOC, CMD, MPT_DEBUG_REPLY)
+
+#define dhsprintk(IOC, CMD) \
+ MPT_CHECK_LOGGING(IOC, CMD, MPT_DEBUG_HANDSHAKE)
+
+#define dcprintk(IOC, CMD) \
+ MPT_CHECK_LOGGING(IOC, CMD, MPT_DEBUG_CONFIG)
+
+#define ddlprintk(IOC, CMD) \
+ MPT_CHECK_LOGGING(IOC, CMD, MPT_DEBUG_DL)
+
+#define drsprintk(IOC, CMD) \
+ MPT_CHECK_LOGGING(IOC, CMD, MPT_DEBUG_RESET)
+
+#define dsprintk(IOC, CMD) \
+ MPT_CHECK_LOGGING(IOC, CMD, MPT_DEBUG_SCSI)
+
+#define dctlprintk(IOC, CMD) \
+ MPT_CHECK_LOGGING(IOC, CMD, MPT_DEBUG_IOCTL)
+
+#define dcsmisasprintk(IOC, CMD) \
+ MPT_CHECK_LOGGING(IOC, CMD, MPT_DEBUG_CSMISAS)
+
+#define dsasprintk(IOC, CMD) \
+ MPT_CHECK_LOGGING(IOC, CMD, MPT_DEBUG_SAS)
+
+#define dsastransport(IOC, CMD) \
+ MPT_CHECK_LOGGING(IOC, CMD, MPT_DEBUG_SAS_WIDE)
+
+#define dmfprintk(IOC, CMD) \
+ MPT_CHECK_LOGGING(IOC, CMD, MPT_DEBUG_MSG_FRAME)
+
+#define dtsfprintk(IOC, CMD) \
+ MPT_CHECK_LOGGING(IOC, CMD, MPT_DEBUG_TASK_SET_FULL)
+
+#define dtransportprintk(IOC, CMD) \
+ MPT_CHECK_LOGGING(IOC, CMD, MPT_DEBUG_TRANSPORT)
+
+#define dTMprintk(IOC, CMD) \
+ MPT_CHECK_LOGGING(IOC, CMD, MPT_DEBUG_TARGET_MODE)
+
+/* inline functions for dumping debug data*/
+#ifdef CONFIG_SCSI_MPT2SAS_LOGGING
+/**
+ * _debug_dump_mf - print message frame contents
+ * @mpi_request: pointer to message frame
+ * @sz: number of dwords
+ */
+static inline void
+_debug_dump_mf(void *mpi_request, int sz)
+{
+ int i;
+ __le32 *mfp = (__le32 *)mpi_request;
+
+ printk(KERN_INFO "mf:\n\t");
+ for (i = 0; i < sz; i++) {
+ if (i && ((i % 8) == 0))
+ printk("\n\t");
+ printk("%08x ", le32_to_cpu(mfp[i]));
+ }
+ printk("\n");
+}
+#else
+#define _debug_dump_mf(mpi_request, sz)
+#endif /* CONFIG_SCSI_MPT2SAS_LOGGING */
+
+#endif /* MPT2SAS_DEBUG_H_INCLUDED */
diff --git a/drivers/scsi/mpt2sas/mpt2sas_scsih.c b/drivers/scsi/mpt2sas/mpt2sas_scsih.c
new file mode 100644
index 000000000..3f26147bb
--- /dev/null
+++ b/drivers/scsi/mpt2sas/mpt2sas_scsih.c
@@ -0,0 +1,8592 @@
+/*
+ * Scsi Host Layer for MPT (Message Passing Technology) based controllers
+ *
+ * This code is based on drivers/scsi/mpt2sas/mpt2_scsih.c
+ * Copyright (C) 2007-2014 LSI Corporation
+ * Copyright (C) 20013-2014 Avago Technologies
+ * (mailto: MPT-FusionLinux.pdl@avagotech.com)
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * NO WARRANTY
+ * THE PROGRAM IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OR
+ * CONDITIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED INCLUDING, WITHOUT
+ * LIMITATION, ANY WARRANTIES OR CONDITIONS OF TITLE, NON-INFRINGEMENT,
+ * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE. Each Recipient is
+ * solely responsible for determining the appropriateness of using and
+ * distributing the Program and assumes all risks associated with its
+ * exercise of rights under this Agreement, including but not limited to
+ * the risks and costs of program errors, damage to or loss of data,
+ * programs or equipment, and unavailability or interruption of operations.
+
+ * DISCLAIMER OF LIABILITY
+ * NEITHER RECIPIENT NOR ANY CONTRIBUTORS SHALL HAVE ANY LIABILITY FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING WITHOUT LIMITATION LOST PROFITS), HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR
+ * TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
+ * USE OR DISTRIBUTION OF THE PROGRAM OR THE EXERCISE OF ANY RIGHTS GRANTED
+ * HEREUNDER, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGES
+
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301,
+ * USA.
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/errno.h>
+#include <linux/blkdev.h>
+#include <linux/sched.h>
+#include <linux/workqueue.h>
+#include <linux/delay.h>
+#include <linux/pci.h>
+#include <linux/interrupt.h>
+#include <linux/aer.h>
+#include <linux/raid_class.h>
+#include <linux/slab.h>
+
+#include <asm/unaligned.h>
+
+#include "mpt2sas_base.h"
+
+MODULE_AUTHOR(MPT2SAS_AUTHOR);
+MODULE_DESCRIPTION(MPT2SAS_DESCRIPTION);
+MODULE_LICENSE("GPL");
+MODULE_VERSION(MPT2SAS_DRIVER_VERSION);
+
+#define RAID_CHANNEL 1
+
+/* forward proto's */
+static void _scsih_expander_node_remove(struct MPT2SAS_ADAPTER *ioc,
+ struct _sas_node *sas_expander);
+static void _firmware_event_work(struct work_struct *work);
+
+static u8 _scsih_check_for_pending_tm(struct MPT2SAS_ADAPTER *ioc, u16 smid);
+
+static void _scsih_scan_start(struct Scsi_Host *shost);
+static int _scsih_scan_finished(struct Scsi_Host *shost, unsigned long time);
+
+/* global parameters */
+LIST_HEAD(mpt2sas_ioc_list);
+
+/* local parameters */
+static u8 scsi_io_cb_idx = -1;
+static u8 tm_cb_idx = -1;
+static u8 ctl_cb_idx = -1;
+static u8 base_cb_idx = -1;
+static u8 port_enable_cb_idx = -1;
+static u8 transport_cb_idx = -1;
+static u8 scsih_cb_idx = -1;
+static u8 config_cb_idx = -1;
+static int mpt_ids;
+
+static u8 tm_tr_cb_idx = -1 ;
+static u8 tm_tr_volume_cb_idx = -1 ;
+static u8 tm_sas_control_cb_idx = -1;
+
+/* command line options */
+static u32 logging_level;
+MODULE_PARM_DESC(logging_level, " bits for enabling additional logging info "
+ "(default=0)");
+
+static ushort max_sectors = 0xFFFF;
+module_param(max_sectors, ushort, 0);
+MODULE_PARM_DESC(max_sectors, "max sectors, range 64 to 32767 default=32767");
+
+static int missing_delay[2] = {-1, -1};
+module_param_array(missing_delay, int, NULL, 0);
+MODULE_PARM_DESC(missing_delay, " device missing delay , io missing delay");
+
+/* scsi-mid layer global parmeter is max_report_luns, which is 511 */
+#define MPT2SAS_MAX_LUN (16895)
+static int max_lun = MPT2SAS_MAX_LUN;
+module_param(max_lun, int, 0);
+MODULE_PARM_DESC(max_lun, " max lun, default=16895 ");
+
+/* diag_buffer_enable is bitwise
+ * bit 0 set = TRACE
+ * bit 1 set = SNAPSHOT
+ * bit 2 set = EXTENDED
+ *
+ * Either bit can be set, or both
+ */
+static int diag_buffer_enable = -1;
+module_param(diag_buffer_enable, int, 0);
+MODULE_PARM_DESC(diag_buffer_enable, " post diag buffers "
+ "(TRACE=1/SNAPSHOT=2/EXTENDED=4/default=0)");
+
+static int disable_discovery = -1;
+module_param(disable_discovery, int, 0);
+MODULE_PARM_DESC(disable_discovery, " disable discovery ");
+
+/* permit overriding the host protection capabilities mask (EEDP/T10 PI) */
+static int prot_mask = 0;
+module_param(prot_mask, int, 0);
+MODULE_PARM_DESC(prot_mask, " host protection capabilities mask, def=7 ");
+
+/**
+ * struct sense_info - common structure for obtaining sense keys
+ * @skey: sense key
+ * @asc: additional sense code
+ * @ascq: additional sense code qualifier
+ */
+struct sense_info {
+ u8 skey;
+ u8 asc;
+ u8 ascq;
+};
+
+
+#define MPT2SAS_TURN_ON_PFA_LED (0xFFFC)
+#define MPT2SAS_PORT_ENABLE_COMPLETE (0xFFFD)
+#define MPT2SAS_REMOVE_UNRESPONDING_DEVICES (0xFFFF)
+/**
+ * struct fw_event_work - firmware event struct
+ * @list: link list framework
+ * @work: work object (ioc->fault_reset_work_q)
+ * @cancel_pending_work: flag set during reset handling
+ * @ioc: per adapter object
+ * @device_handle: device handle
+ * @VF_ID: virtual function id
+ * @VP_ID: virtual port id
+ * @ignore: flag meaning this event has been marked to ignore
+ * @event: firmware event MPI2_EVENT_XXX defined in mpt2_ioc.h
+ * @event_data: reply event data payload follows
+ *
+ * This object stored on ioc->fw_event_list.
+ */
+struct fw_event_work {
+ struct list_head list;
+ u8 cancel_pending_work;
+ struct delayed_work delayed_work;
+ struct MPT2SAS_ADAPTER *ioc;
+ u16 device_handle;
+ u8 VF_ID;
+ u8 VP_ID;
+ u8 ignore;
+ u16 event;
+ char event_data[0] __aligned(4);
+};
+
+/* raid transport support */
+static struct raid_template *mpt2sas_raid_template;
+
+/**
+ * struct _scsi_io_transfer - scsi io transfer
+ * @handle: sas device handle (assigned by firmware)
+ * @is_raid: flag set for hidden raid components
+ * @dir: DMA_TO_DEVICE, DMA_FROM_DEVICE,
+ * @data_length: data transfer length
+ * @data_dma: dma pointer to data
+ * @sense: sense data
+ * @lun: lun number
+ * @cdb_length: cdb length
+ * @cdb: cdb contents
+ * @timeout: timeout for this command
+ * @VF_ID: virtual function id
+ * @VP_ID: virtual port id
+ * @valid_reply: flag set for reply message
+ * @sense_length: sense length
+ * @ioc_status: ioc status
+ * @scsi_state: scsi state
+ * @scsi_status: scsi staus
+ * @log_info: log information
+ * @transfer_length: data length transfer when there is a reply message
+ *
+ * Used for sending internal scsi commands to devices within this module.
+ * Refer to _scsi_send_scsi_io().
+ */
+struct _scsi_io_transfer {
+ u16 handle;
+ u8 is_raid;
+ enum dma_data_direction dir;
+ u32 data_length;
+ dma_addr_t data_dma;
+ u8 sense[SCSI_SENSE_BUFFERSIZE];
+ u32 lun;
+ u8 cdb_length;
+ u8 cdb[32];
+ u8 timeout;
+ u8 VF_ID;
+ u8 VP_ID;
+ u8 valid_reply;
+ /* the following bits are only valid when 'valid_reply = 1' */
+ u32 sense_length;
+ u16 ioc_status;
+ u8 scsi_state;
+ u8 scsi_status;
+ u32 log_info;
+ u32 transfer_length;
+};
+
+/*
+ * The pci device ids are defined in mpi/mpi2_cnfg.h.
+ */
+static struct pci_device_id scsih_pci_table[] = {
+ { MPI2_MFGPAGE_VENDORID_LSI, MPI2_MFGPAGE_DEVID_SAS2004,
+ PCI_ANY_ID, PCI_ANY_ID },
+ /* Falcon ~ 2008*/
+ { MPI2_MFGPAGE_VENDORID_LSI, MPI2_MFGPAGE_DEVID_SAS2008,
+ PCI_ANY_ID, PCI_ANY_ID },
+ /* Liberator ~ 2108 */
+ { MPI2_MFGPAGE_VENDORID_LSI, MPI2_MFGPAGE_DEVID_SAS2108_1,
+ PCI_ANY_ID, PCI_ANY_ID },
+ { MPI2_MFGPAGE_VENDORID_LSI, MPI2_MFGPAGE_DEVID_SAS2108_2,
+ PCI_ANY_ID, PCI_ANY_ID },
+ { MPI2_MFGPAGE_VENDORID_LSI, MPI2_MFGPAGE_DEVID_SAS2108_3,
+ PCI_ANY_ID, PCI_ANY_ID },
+ /* Meteor ~ 2116 */
+ { MPI2_MFGPAGE_VENDORID_LSI, MPI2_MFGPAGE_DEVID_SAS2116_1,
+ PCI_ANY_ID, PCI_ANY_ID },
+ { MPI2_MFGPAGE_VENDORID_LSI, MPI2_MFGPAGE_DEVID_SAS2116_2,
+ PCI_ANY_ID, PCI_ANY_ID },
+ /* Thunderbolt ~ 2208 */
+ { MPI2_MFGPAGE_VENDORID_LSI, MPI2_MFGPAGE_DEVID_SAS2208_1,
+ PCI_ANY_ID, PCI_ANY_ID },
+ { MPI2_MFGPAGE_VENDORID_LSI, MPI2_MFGPAGE_DEVID_SAS2208_2,
+ PCI_ANY_ID, PCI_ANY_ID },
+ { MPI2_MFGPAGE_VENDORID_LSI, MPI2_MFGPAGE_DEVID_SAS2208_3,
+ PCI_ANY_ID, PCI_ANY_ID },
+ { MPI2_MFGPAGE_VENDORID_LSI, MPI2_MFGPAGE_DEVID_SAS2208_4,
+ PCI_ANY_ID, PCI_ANY_ID },
+ { MPI2_MFGPAGE_VENDORID_LSI, MPI2_MFGPAGE_DEVID_SAS2208_5,
+ PCI_ANY_ID, PCI_ANY_ID },
+ { MPI2_MFGPAGE_VENDORID_LSI, MPI2_MFGPAGE_DEVID_SAS2208_6,
+ PCI_ANY_ID, PCI_ANY_ID },
+ /* Mustang ~ 2308 */
+ { MPI2_MFGPAGE_VENDORID_LSI, MPI2_MFGPAGE_DEVID_SAS2308_1,
+ PCI_ANY_ID, PCI_ANY_ID },
+ { MPI2_MFGPAGE_VENDORID_LSI, MPI2_MFGPAGE_DEVID_SAS2308_2,
+ PCI_ANY_ID, PCI_ANY_ID },
+ { MPI2_MFGPAGE_VENDORID_LSI, MPI2_MFGPAGE_DEVID_SAS2308_3,
+ PCI_ANY_ID, PCI_ANY_ID },
+ /* SSS6200 */
+ { MPI2_MFGPAGE_VENDORID_LSI, MPI2_MFGPAGE_DEVID_SSS6200,
+ PCI_ANY_ID, PCI_ANY_ID },
+ {0} /* Terminating entry */
+};
+MODULE_DEVICE_TABLE(pci, scsih_pci_table);
+
+/**
+ * _scsih_set_debug_level - global setting of ioc->logging_level.
+ *
+ * Note: The logging levels are defined in mpt2sas_debug.h.
+ */
+static int
+_scsih_set_debug_level(const char *val, struct kernel_param *kp)
+{
+ int ret = param_set_int(val, kp);
+ struct MPT2SAS_ADAPTER *ioc;
+
+ if (ret)
+ return ret;
+
+ printk(KERN_INFO "setting logging_level(0x%08x)\n", logging_level);
+ list_for_each_entry(ioc, &mpt2sas_ioc_list, list)
+ ioc->logging_level = logging_level;
+ return 0;
+}
+module_param_call(logging_level, _scsih_set_debug_level, param_get_int,
+ &logging_level, 0644);
+
+/**
+ * _scsih_srch_boot_sas_address - search based on sas_address
+ * @sas_address: sas address
+ * @boot_device: boot device object from bios page 2
+ *
+ * Returns 1 when there's a match, 0 means no match.
+ */
+static inline int
+_scsih_srch_boot_sas_address(u64 sas_address,
+ Mpi2BootDeviceSasWwid_t *boot_device)
+{
+ return (sas_address == le64_to_cpu(boot_device->SASAddress)) ? 1 : 0;
+}
+
+/**
+ * _scsih_srch_boot_device_name - search based on device name
+ * @device_name: device name specified in INDENTIFY fram
+ * @boot_device: boot device object from bios page 2
+ *
+ * Returns 1 when there's a match, 0 means no match.
+ */
+static inline int
+_scsih_srch_boot_device_name(u64 device_name,
+ Mpi2BootDeviceDeviceName_t *boot_device)
+{
+ return (device_name == le64_to_cpu(boot_device->DeviceName)) ? 1 : 0;
+}
+
+/**
+ * _scsih_srch_boot_encl_slot - search based on enclosure_logical_id/slot
+ * @enclosure_logical_id: enclosure logical id
+ * @slot_number: slot number
+ * @boot_device: boot device object from bios page 2
+ *
+ * Returns 1 when there's a match, 0 means no match.
+ */
+static inline int
+_scsih_srch_boot_encl_slot(u64 enclosure_logical_id, u16 slot_number,
+ Mpi2BootDeviceEnclosureSlot_t *boot_device)
+{
+ return (enclosure_logical_id == le64_to_cpu(boot_device->
+ EnclosureLogicalID) && slot_number == le16_to_cpu(boot_device->
+ SlotNumber)) ? 1 : 0;
+}
+
+/**
+ * _scsih_is_boot_device - search for matching boot device.
+ * @sas_address: sas address
+ * @device_name: device name specified in INDENTIFY fram
+ * @enclosure_logical_id: enclosure logical id
+ * @slot_number: slot number
+ * @form: specifies boot device form
+ * @boot_device: boot device object from bios page 2
+ *
+ * Returns 1 when there's a match, 0 means no match.
+ */
+static int
+_scsih_is_boot_device(u64 sas_address, u64 device_name,
+ u64 enclosure_logical_id, u16 slot, u8 form,
+ Mpi2BiosPage2BootDevice_t *boot_device)
+{
+ int rc = 0;
+
+ switch (form) {
+ case MPI2_BIOSPAGE2_FORM_SAS_WWID:
+ if (!sas_address)
+ break;
+ rc = _scsih_srch_boot_sas_address(
+ sas_address, &boot_device->SasWwid);
+ break;
+ case MPI2_BIOSPAGE2_FORM_ENCLOSURE_SLOT:
+ if (!enclosure_logical_id)
+ break;
+ rc = _scsih_srch_boot_encl_slot(
+ enclosure_logical_id,
+ slot, &boot_device->EnclosureSlot);
+ break;
+ case MPI2_BIOSPAGE2_FORM_DEVICE_NAME:
+ if (!device_name)
+ break;
+ rc = _scsih_srch_boot_device_name(
+ device_name, &boot_device->DeviceName);
+ break;
+ case MPI2_BIOSPAGE2_FORM_NO_DEVICE_SPECIFIED:
+ break;
+ }
+
+ return rc;
+}
+
+/**
+ * _scsih_get_sas_address - set the sas_address for given device handle
+ * @handle: device handle
+ * @sas_address: sas address
+ *
+ * Returns 0 success, non-zero when failure
+ */
+static int
+_scsih_get_sas_address(struct MPT2SAS_ADAPTER *ioc, u16 handle,
+ u64 *sas_address)
+{
+ Mpi2SasDevicePage0_t sas_device_pg0;
+ Mpi2ConfigReply_t mpi_reply;
+ u32 ioc_status;
+ *sas_address = 0;
+
+ if (handle <= ioc->sas_hba.num_phys) {
+ *sas_address = ioc->sas_hba.sas_address;
+ return 0;
+ }
+
+ if ((mpt2sas_config_get_sas_device_pg0(ioc, &mpi_reply, &sas_device_pg0,
+ MPI2_SAS_DEVICE_PGAD_FORM_HANDLE, handle))) {
+ printk(MPT2SAS_ERR_FMT "failure at %s:%d/%s()!\n", ioc->name,
+ __FILE__, __LINE__, __func__);
+ return -ENXIO;
+ }
+
+ ioc_status = le16_to_cpu(mpi_reply.IOCStatus) & MPI2_IOCSTATUS_MASK;
+ if (ioc_status == MPI2_IOCSTATUS_SUCCESS) {
+ *sas_address = le64_to_cpu(sas_device_pg0.SASAddress);
+ return 0;
+ }
+
+ /* we hit this becuase the given parent handle doesn't exist */
+ if (ioc_status == MPI2_IOCSTATUS_CONFIG_INVALID_PAGE)
+ return -ENXIO;
+ /* else error case */
+ printk(MPT2SAS_ERR_FMT "handle(0x%04x), ioc_status(0x%04x), "
+ "failure at %s:%d/%s()!\n", ioc->name, handle, ioc_status,
+ __FILE__, __LINE__, __func__);
+ return -EIO;
+}
+
+/**
+ * _scsih_determine_boot_device - determine boot device.
+ * @ioc: per adapter object
+ * @device: either sas_device or raid_device object
+ * @is_raid: [flag] 1 = raid object, 0 = sas object
+ *
+ * Determines whether this device should be first reported device to
+ * to scsi-ml or sas transport, this purpose is for persistent boot device.
+ * There are primary, alternate, and current entries in bios page 2. The order
+ * priority is primary, alternate, then current. This routine saves
+ * the corresponding device object and is_raid flag in the ioc object.
+ * The saved data to be used later in _scsih_probe_boot_devices().
+ */
+static void
+_scsih_determine_boot_device(struct MPT2SAS_ADAPTER *ioc,
+ void *device, u8 is_raid)
+{
+ struct _sas_device *sas_device;
+ struct _raid_device *raid_device;
+ u64 sas_address;
+ u64 device_name;
+ u64 enclosure_logical_id;
+ u16 slot;
+
+ /* only process this function when driver loads */
+ if (!ioc->is_driver_loading)
+ return;
+
+ /* no Bios, return immediately */
+ if (!ioc->bios_pg3.BiosVersion)
+ return;
+
+ if (!is_raid) {
+ sas_device = device;
+ sas_address = sas_device->sas_address;
+ device_name = sas_device->device_name;
+ enclosure_logical_id = sas_device->enclosure_logical_id;
+ slot = sas_device->slot;
+ } else {
+ raid_device = device;
+ sas_address = raid_device->wwid;
+ device_name = 0;
+ enclosure_logical_id = 0;
+ slot = 0;
+ }
+
+ if (!ioc->req_boot_device.device) {
+ if (_scsih_is_boot_device(sas_address, device_name,
+ enclosure_logical_id, slot,
+ (ioc->bios_pg2.ReqBootDeviceForm &
+ MPI2_BIOSPAGE2_FORM_MASK),
+ &ioc->bios_pg2.RequestedBootDevice)) {
+ dinitprintk(ioc, printk(MPT2SAS_INFO_FMT
+ "%s: req_boot_device(0x%016llx)\n",
+ ioc->name, __func__,
+ (unsigned long long)sas_address));
+ ioc->req_boot_device.device = device;
+ ioc->req_boot_device.is_raid = is_raid;
+ }
+ }
+
+ if (!ioc->req_alt_boot_device.device) {
+ if (_scsih_is_boot_device(sas_address, device_name,
+ enclosure_logical_id, slot,
+ (ioc->bios_pg2.ReqAltBootDeviceForm &
+ MPI2_BIOSPAGE2_FORM_MASK),
+ &ioc->bios_pg2.RequestedAltBootDevice)) {
+ dinitprintk(ioc, printk(MPT2SAS_INFO_FMT
+ "%s: req_alt_boot_device(0x%016llx)\n",
+ ioc->name, __func__,
+ (unsigned long long)sas_address));
+ ioc->req_alt_boot_device.device = device;
+ ioc->req_alt_boot_device.is_raid = is_raid;
+ }
+ }
+
+ if (!ioc->current_boot_device.device) {
+ if (_scsih_is_boot_device(sas_address, device_name,
+ enclosure_logical_id, slot,
+ (ioc->bios_pg2.CurrentBootDeviceForm &
+ MPI2_BIOSPAGE2_FORM_MASK),
+ &ioc->bios_pg2.CurrentBootDevice)) {
+ dinitprintk(ioc, printk(MPT2SAS_INFO_FMT
+ "%s: current_boot_device(0x%016llx)\n",
+ ioc->name, __func__,
+ (unsigned long long)sas_address));
+ ioc->current_boot_device.device = device;
+ ioc->current_boot_device.is_raid = is_raid;
+ }
+ }
+}
+
+/**
+ * mpt2sas_scsih_sas_device_find_by_sas_address - sas device search
+ * @ioc: per adapter object
+ * @sas_address: sas address
+ * Context: Calling function should acquire ioc->sas_device_lock
+ *
+ * This searches for sas_device based on sas_address, then return sas_device
+ * object.
+ */
+struct _sas_device *
+mpt2sas_scsih_sas_device_find_by_sas_address(struct MPT2SAS_ADAPTER *ioc,
+ u64 sas_address)
+{
+ struct _sas_device *sas_device;
+
+ list_for_each_entry(sas_device, &ioc->sas_device_list, list)
+ if (sas_device->sas_address == sas_address)
+ return sas_device;
+
+ list_for_each_entry(sas_device, &ioc->sas_device_init_list, list)
+ if (sas_device->sas_address == sas_address)
+ return sas_device;
+
+ return NULL;
+}
+
+/**
+ * _scsih_sas_device_find_by_handle - sas device search
+ * @ioc: per adapter object
+ * @handle: sas device handle (assigned by firmware)
+ * Context: Calling function should acquire ioc->sas_device_lock
+ *
+ * This searches for sas_device based on sas_address, then return sas_device
+ * object.
+ */
+static struct _sas_device *
+_scsih_sas_device_find_by_handle(struct MPT2SAS_ADAPTER *ioc, u16 handle)
+{
+ struct _sas_device *sas_device;
+
+ list_for_each_entry(sas_device, &ioc->sas_device_list, list)
+ if (sas_device->handle == handle)
+ return sas_device;
+
+ list_for_each_entry(sas_device, &ioc->sas_device_init_list, list)
+ if (sas_device->handle == handle)
+ return sas_device;
+
+ return NULL;
+}
+
+/**
+ * _scsih_sas_device_remove - remove sas_device from list.
+ * @ioc: per adapter object
+ * @sas_device: the sas_device object
+ * Context: This function will acquire ioc->sas_device_lock.
+ *
+ * Removing object and freeing associated memory from the ioc->sas_device_list.
+ */
+static void
+_scsih_sas_device_remove(struct MPT2SAS_ADAPTER *ioc,
+ struct _sas_device *sas_device)
+{
+ unsigned long flags;
+
+ if (!sas_device)
+ return;
+
+ spin_lock_irqsave(&ioc->sas_device_lock, flags);
+ list_del(&sas_device->list);
+ kfree(sas_device);
+ spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
+}
+
+
+/**
+ * _scsih_sas_device_add - insert sas_device to the list.
+ * @ioc: per adapter object
+ * @sas_device: the sas_device object
+ * Context: This function will acquire ioc->sas_device_lock.
+ *
+ * Adding new object to the ioc->sas_device_list.
+ */
+static void
+_scsih_sas_device_add(struct MPT2SAS_ADAPTER *ioc,
+ struct _sas_device *sas_device)
+{
+ unsigned long flags;
+
+ dewtprintk(ioc, printk(MPT2SAS_INFO_FMT "%s: handle"
+ "(0x%04x), sas_addr(0x%016llx)\n", ioc->name, __func__,
+ sas_device->handle, (unsigned long long)sas_device->sas_address));
+
+ spin_lock_irqsave(&ioc->sas_device_lock, flags);
+ list_add_tail(&sas_device->list, &ioc->sas_device_list);
+ spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
+
+ if (!mpt2sas_transport_port_add(ioc, sas_device->handle,
+ sas_device->sas_address_parent)) {
+ _scsih_sas_device_remove(ioc, sas_device);
+ } else if (!sas_device->starget) {
+ /* When asyn scanning is enabled, its not possible to remove
+ * devices while scanning is turned on due to an oops in
+ * scsi_sysfs_add_sdev()->add_device()->sysfs_addrm_start()
+ */
+ if (!ioc->is_driver_loading) {
+ mpt2sas_transport_port_remove(ioc,
+ sas_device->sas_address,
+ sas_device->sas_address_parent);
+ _scsih_sas_device_remove(ioc, sas_device);
+ }
+ }
+}
+
+/**
+ * _scsih_sas_device_init_add - insert sas_device to the list.
+ * @ioc: per adapter object
+ * @sas_device: the sas_device object
+ * Context: This function will acquire ioc->sas_device_lock.
+ *
+ * Adding new object at driver load time to the ioc->sas_device_init_list.
+ */
+static void
+_scsih_sas_device_init_add(struct MPT2SAS_ADAPTER *ioc,
+ struct _sas_device *sas_device)
+{
+ unsigned long flags;
+
+ dewtprintk(ioc, printk(MPT2SAS_INFO_FMT "%s: handle"
+ "(0x%04x), sas_addr(0x%016llx)\n", ioc->name, __func__,
+ sas_device->handle, (unsigned long long)sas_device->sas_address));
+
+ spin_lock_irqsave(&ioc->sas_device_lock, flags);
+ list_add_tail(&sas_device->list, &ioc->sas_device_init_list);
+ _scsih_determine_boot_device(ioc, sas_device, 0);
+ spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
+}
+
+/**
+ * _scsih_raid_device_find_by_id - raid device search
+ * @ioc: per adapter object
+ * @id: sas device target id
+ * @channel: sas device channel
+ * Context: Calling function should acquire ioc->raid_device_lock
+ *
+ * This searches for raid_device based on target id, then return raid_device
+ * object.
+ */
+static struct _raid_device *
+_scsih_raid_device_find_by_id(struct MPT2SAS_ADAPTER *ioc, int id, int channel)
+{
+ struct _raid_device *raid_device, *r;
+
+ r = NULL;
+ list_for_each_entry(raid_device, &ioc->raid_device_list, list) {
+ if (raid_device->id == id && raid_device->channel == channel) {
+ r = raid_device;
+ goto out;
+ }
+ }
+
+ out:
+ return r;
+}
+
+/**
+ * _scsih_raid_device_find_by_handle - raid device search
+ * @ioc: per adapter object
+ * @handle: sas device handle (assigned by firmware)
+ * Context: Calling function should acquire ioc->raid_device_lock
+ *
+ * This searches for raid_device based on handle, then return raid_device
+ * object.
+ */
+static struct _raid_device *
+_scsih_raid_device_find_by_handle(struct MPT2SAS_ADAPTER *ioc, u16 handle)
+{
+ struct _raid_device *raid_device, *r;
+
+ r = NULL;
+ list_for_each_entry(raid_device, &ioc->raid_device_list, list) {
+ if (raid_device->handle != handle)
+ continue;
+ r = raid_device;
+ goto out;
+ }
+
+ out:
+ return r;
+}
+
+/**
+ * _scsih_raid_device_find_by_wwid - raid device search
+ * @ioc: per adapter object
+ * @handle: sas device handle (assigned by firmware)
+ * Context: Calling function should acquire ioc->raid_device_lock
+ *
+ * This searches for raid_device based on wwid, then return raid_device
+ * object.
+ */
+static struct _raid_device *
+_scsih_raid_device_find_by_wwid(struct MPT2SAS_ADAPTER *ioc, u64 wwid)
+{
+ struct _raid_device *raid_device, *r;
+
+ r = NULL;
+ list_for_each_entry(raid_device, &ioc->raid_device_list, list) {
+ if (raid_device->wwid != wwid)
+ continue;
+ r = raid_device;
+ goto out;
+ }
+
+ out:
+ return r;
+}
+
+/**
+ * _scsih_raid_device_add - add raid_device object
+ * @ioc: per adapter object
+ * @raid_device: raid_device object
+ *
+ * This is added to the raid_device_list link list.
+ */
+static void
+_scsih_raid_device_add(struct MPT2SAS_ADAPTER *ioc,
+ struct _raid_device *raid_device)
+{
+ unsigned long flags;
+
+ dewtprintk(ioc, printk(MPT2SAS_INFO_FMT "%s: handle"
+ "(0x%04x), wwid(0x%016llx)\n", ioc->name, __func__,
+ raid_device->handle, (unsigned long long)raid_device->wwid));
+
+ spin_lock_irqsave(&ioc->raid_device_lock, flags);
+ list_add_tail(&raid_device->list, &ioc->raid_device_list);
+ spin_unlock_irqrestore(&ioc->raid_device_lock, flags);
+}
+
+/**
+ * _scsih_raid_device_remove - delete raid_device object
+ * @ioc: per adapter object
+ * @raid_device: raid_device object
+ *
+ */
+static void
+_scsih_raid_device_remove(struct MPT2SAS_ADAPTER *ioc,
+ struct _raid_device *raid_device)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&ioc->raid_device_lock, flags);
+ list_del(&raid_device->list);
+ kfree(raid_device);
+ spin_unlock_irqrestore(&ioc->raid_device_lock, flags);
+}
+
+/**
+ * mpt2sas_scsih_expander_find_by_handle - expander device search
+ * @ioc: per adapter object
+ * @handle: expander handle (assigned by firmware)
+ * Context: Calling function should acquire ioc->sas_device_lock
+ *
+ * This searches for expander device based on handle, then returns the
+ * sas_node object.
+ */
+struct _sas_node *
+mpt2sas_scsih_expander_find_by_handle(struct MPT2SAS_ADAPTER *ioc, u16 handle)
+{
+ struct _sas_node *sas_expander, *r;
+
+ r = NULL;
+ list_for_each_entry(sas_expander, &ioc->sas_expander_list, list) {
+ if (sas_expander->handle != handle)
+ continue;
+ r = sas_expander;
+ goto out;
+ }
+ out:
+ return r;
+}
+
+/**
+ * mpt2sas_scsih_expander_find_by_sas_address - expander device search
+ * @ioc: per adapter object
+ * @sas_address: sas address
+ * Context: Calling function should acquire ioc->sas_node_lock.
+ *
+ * This searches for expander device based on sas_address, then returns the
+ * sas_node object.
+ */
+struct _sas_node *
+mpt2sas_scsih_expander_find_by_sas_address(struct MPT2SAS_ADAPTER *ioc,
+ u64 sas_address)
+{
+ struct _sas_node *sas_expander, *r;
+
+ r = NULL;
+ list_for_each_entry(sas_expander, &ioc->sas_expander_list, list) {
+ if (sas_expander->sas_address != sas_address)
+ continue;
+ r = sas_expander;
+ goto out;
+ }
+ out:
+ return r;
+}
+
+/**
+ * _scsih_expander_node_add - insert expander device to the list.
+ * @ioc: per adapter object
+ * @sas_expander: the sas_device object
+ * Context: This function will acquire ioc->sas_node_lock.
+ *
+ * Adding new object to the ioc->sas_expander_list.
+ *
+ * Return nothing.
+ */
+static void
+_scsih_expander_node_add(struct MPT2SAS_ADAPTER *ioc,
+ struct _sas_node *sas_expander)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&ioc->sas_node_lock, flags);
+ list_add_tail(&sas_expander->list, &ioc->sas_expander_list);
+ spin_unlock_irqrestore(&ioc->sas_node_lock, flags);
+}
+
+/**
+ * _scsih_is_end_device - determines if device is an end device
+ * @device_info: bitfield providing information about the device.
+ * Context: none
+ *
+ * Returns 1 if end device.
+ */
+static int
+_scsih_is_end_device(u32 device_info)
+{
+ if (device_info & MPI2_SAS_DEVICE_INFO_END_DEVICE &&
+ ((device_info & MPI2_SAS_DEVICE_INFO_SSP_TARGET) |
+ (device_info & MPI2_SAS_DEVICE_INFO_STP_TARGET) |
+ (device_info & MPI2_SAS_DEVICE_INFO_SATA_DEVICE)))
+ return 1;
+ else
+ return 0;
+}
+
+/**
+ * _scsih_scsi_lookup_get - returns scmd entry
+ * @ioc: per adapter object
+ * @smid: system request message index
+ *
+ * Returns the smid stored scmd pointer.
+ */
+static struct scsi_cmnd *
+_scsih_scsi_lookup_get(struct MPT2SAS_ADAPTER *ioc, u16 smid)
+{
+ return ioc->scsi_lookup[smid - 1].scmd;
+}
+
+/**
+ * _scsih_scsi_lookup_get_clear - returns scmd entry
+ * @ioc: per adapter object
+ * @smid: system request message index
+ *
+ * Returns the smid stored scmd pointer.
+ * Then will derefrence the stored scmd pointer.
+ */
+static inline struct scsi_cmnd *
+_scsih_scsi_lookup_get_clear(struct MPT2SAS_ADAPTER *ioc, u16 smid)
+{
+ unsigned long flags;
+ struct scsi_cmnd *scmd;
+
+ spin_lock_irqsave(&ioc->scsi_lookup_lock, flags);
+ scmd = ioc->scsi_lookup[smid - 1].scmd;
+ ioc->scsi_lookup[smid - 1].scmd = NULL;
+ spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags);
+
+ return scmd;
+}
+
+/**
+ * _scsih_scsi_lookup_find_by_scmd - scmd lookup
+ * @ioc: per adapter object
+ * @smid: system request message index
+ * @scmd: pointer to scsi command object
+ * Context: This function will acquire ioc->scsi_lookup_lock.
+ *
+ * This will search for a scmd pointer in the scsi_lookup array,
+ * returning the revelent smid. A returned value of zero means invalid.
+ */
+static u16
+_scsih_scsi_lookup_find_by_scmd(struct MPT2SAS_ADAPTER *ioc, struct scsi_cmnd
+ *scmd)
+{
+ u16 smid;
+ unsigned long flags;
+ int i;
+
+ spin_lock_irqsave(&ioc->scsi_lookup_lock, flags);
+ smid = 0;
+ for (i = 0; i < ioc->scsiio_depth; i++) {
+ if (ioc->scsi_lookup[i].scmd == scmd) {
+ smid = ioc->scsi_lookup[i].smid;
+ goto out;
+ }
+ }
+ out:
+ spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags);
+ return smid;
+}
+
+/**
+ * _scsih_scsi_lookup_find_by_target - search for matching channel:id
+ * @ioc: per adapter object
+ * @id: target id
+ * @channel: channel
+ * Context: This function will acquire ioc->scsi_lookup_lock.
+ *
+ * This will search for a matching channel:id in the scsi_lookup array,
+ * returning 1 if found.
+ */
+static u8
+_scsih_scsi_lookup_find_by_target(struct MPT2SAS_ADAPTER *ioc, int id,
+ int channel)
+{
+ u8 found;
+ unsigned long flags;
+ int i;
+
+ spin_lock_irqsave(&ioc->scsi_lookup_lock, flags);
+ found = 0;
+ for (i = 0 ; i < ioc->scsiio_depth; i++) {
+ if (ioc->scsi_lookup[i].scmd &&
+ (ioc->scsi_lookup[i].scmd->device->id == id &&
+ ioc->scsi_lookup[i].scmd->device->channel == channel)) {
+ found = 1;
+ goto out;
+ }
+ }
+ out:
+ spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags);
+ return found;
+}
+
+/**
+ * _scsih_scsi_lookup_find_by_lun - search for matching channel:id:lun
+ * @ioc: per adapter object
+ * @id: target id
+ * @lun: lun number
+ * @channel: channel
+ * Context: This function will acquire ioc->scsi_lookup_lock.
+ *
+ * This will search for a matching channel:id:lun in the scsi_lookup array,
+ * returning 1 if found.
+ */
+static u8
+_scsih_scsi_lookup_find_by_lun(struct MPT2SAS_ADAPTER *ioc, int id,
+ unsigned int lun, int channel)
+{
+ u8 found;
+ unsigned long flags;
+ int i;
+
+ spin_lock_irqsave(&ioc->scsi_lookup_lock, flags);
+ found = 0;
+ for (i = 0 ; i < ioc->scsiio_depth; i++) {
+ if (ioc->scsi_lookup[i].scmd &&
+ (ioc->scsi_lookup[i].scmd->device->id == id &&
+ ioc->scsi_lookup[i].scmd->device->channel == channel &&
+ ioc->scsi_lookup[i].scmd->device->lun == lun)) {
+ found = 1;
+ goto out;
+ }
+ }
+ out:
+ spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags);
+ return found;
+}
+
+/**
+ * _scsih_get_chain_buffer_tracker - obtain chain tracker
+ * @ioc: per adapter object
+ * @smid: smid associated to an IO request
+ *
+ * Returns chain tracker(from ioc->free_chain_list)
+ */
+static struct chain_tracker *
+_scsih_get_chain_buffer_tracker(struct MPT2SAS_ADAPTER *ioc, u16 smid)
+{
+ struct chain_tracker *chain_req;
+ unsigned long flags;
+
+ spin_lock_irqsave(&ioc->scsi_lookup_lock, flags);
+ if (list_empty(&ioc->free_chain_list)) {
+ spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags);
+ dfailprintk(ioc, printk(MPT2SAS_WARN_FMT "chain buffers not "
+ "available\n", ioc->name));
+ return NULL;
+ }
+ chain_req = list_entry(ioc->free_chain_list.next,
+ struct chain_tracker, tracker_list);
+ list_del_init(&chain_req->tracker_list);
+ list_add_tail(&chain_req->tracker_list,
+ &ioc->scsi_lookup[smid - 1].chain_list);
+ spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags);
+ return chain_req;
+}
+
+/**
+ * _scsih_build_scatter_gather - main sg creation routine
+ * @ioc: per adapter object
+ * @scmd: scsi command
+ * @smid: system request message index
+ * Context: none.
+ *
+ * The main routine that builds scatter gather table from a given
+ * scsi request sent via the .queuecommand main handler.
+ *
+ * Returns 0 success, anything else error
+ */
+static int
+_scsih_build_scatter_gather(struct MPT2SAS_ADAPTER *ioc,
+ struct scsi_cmnd *scmd, u16 smid)
+{
+ Mpi2SCSIIORequest_t *mpi_request;
+ dma_addr_t chain_dma;
+ struct scatterlist *sg_scmd;
+ void *sg_local, *chain;
+ u32 chain_offset;
+ u32 chain_length;
+ u32 chain_flags;
+ int sges_left;
+ u32 sges_in_segment;
+ u32 sgl_flags;
+ u32 sgl_flags_last_element;
+ u32 sgl_flags_end_buffer;
+ struct chain_tracker *chain_req;
+
+ mpi_request = mpt2sas_base_get_msg_frame(ioc, smid);
+
+ /* init scatter gather flags */
+ sgl_flags = MPI2_SGE_FLAGS_SIMPLE_ELEMENT;
+ if (scmd->sc_data_direction == DMA_TO_DEVICE)
+ sgl_flags |= MPI2_SGE_FLAGS_HOST_TO_IOC;
+ sgl_flags_last_element = (sgl_flags | MPI2_SGE_FLAGS_LAST_ELEMENT)
+ << MPI2_SGE_FLAGS_SHIFT;
+ sgl_flags_end_buffer = (sgl_flags | MPI2_SGE_FLAGS_LAST_ELEMENT |
+ MPI2_SGE_FLAGS_END_OF_BUFFER | MPI2_SGE_FLAGS_END_OF_LIST)
+ << MPI2_SGE_FLAGS_SHIFT;
+ sgl_flags = sgl_flags << MPI2_SGE_FLAGS_SHIFT;
+
+ sg_scmd = scsi_sglist(scmd);
+ sges_left = scsi_dma_map(scmd);
+ if (sges_left < 0) {
+ sdev_printk(KERN_ERR, scmd->device, "pci_map_sg"
+ " failed: request for %d bytes!\n", scsi_bufflen(scmd));
+ return -ENOMEM;
+ }
+
+ sg_local = &mpi_request->SGL;
+ sges_in_segment = ioc->max_sges_in_main_message;
+ if (sges_left <= sges_in_segment)
+ goto fill_in_last_segment;
+
+ mpi_request->ChainOffset = (offsetof(Mpi2SCSIIORequest_t, SGL) +
+ (sges_in_segment * ioc->sge_size))/4;
+
+ /* fill in main message segment when there is a chain following */
+ while (sges_in_segment) {
+ if (sges_in_segment == 1)
+ ioc->base_add_sg_single(sg_local,
+ sgl_flags_last_element | sg_dma_len(sg_scmd),
+ sg_dma_address(sg_scmd));
+ else
+ ioc->base_add_sg_single(sg_local, sgl_flags |
+ sg_dma_len(sg_scmd), sg_dma_address(sg_scmd));
+ sg_scmd = sg_next(sg_scmd);
+ sg_local += ioc->sge_size;
+ sges_left--;
+ sges_in_segment--;
+ }
+
+ /* initializing the chain flags and pointers */
+ chain_flags = MPI2_SGE_FLAGS_CHAIN_ELEMENT << MPI2_SGE_FLAGS_SHIFT;
+ chain_req = _scsih_get_chain_buffer_tracker(ioc, smid);
+ if (!chain_req)
+ return -1;
+ chain = chain_req->chain_buffer;
+ chain_dma = chain_req->chain_buffer_dma;
+ do {
+ sges_in_segment = (sges_left <=
+ ioc->max_sges_in_chain_message) ? sges_left :
+ ioc->max_sges_in_chain_message;
+ chain_offset = (sges_left == sges_in_segment) ?
+ 0 : (sges_in_segment * ioc->sge_size)/4;
+ chain_length = sges_in_segment * ioc->sge_size;
+ if (chain_offset) {
+ chain_offset = chain_offset <<
+ MPI2_SGE_CHAIN_OFFSET_SHIFT;
+ chain_length += ioc->sge_size;
+ }
+ ioc->base_add_sg_single(sg_local, chain_flags | chain_offset |
+ chain_length, chain_dma);
+ sg_local = chain;
+ if (!chain_offset)
+ goto fill_in_last_segment;
+
+ /* fill in chain segments */
+ while (sges_in_segment) {
+ if (sges_in_segment == 1)
+ ioc->base_add_sg_single(sg_local,
+ sgl_flags_last_element |
+ sg_dma_len(sg_scmd),
+ sg_dma_address(sg_scmd));
+ else
+ ioc->base_add_sg_single(sg_local, sgl_flags |
+ sg_dma_len(sg_scmd),
+ sg_dma_address(sg_scmd));
+ sg_scmd = sg_next(sg_scmd);
+ sg_local += ioc->sge_size;
+ sges_left--;
+ sges_in_segment--;
+ }
+
+ chain_req = _scsih_get_chain_buffer_tracker(ioc, smid);
+ if (!chain_req)
+ return -1;
+ chain = chain_req->chain_buffer;
+ chain_dma = chain_req->chain_buffer_dma;
+ } while (1);
+
+
+ fill_in_last_segment:
+
+ /* fill the last segment */
+ while (sges_left) {
+ if (sges_left == 1)
+ ioc->base_add_sg_single(sg_local, sgl_flags_end_buffer |
+ sg_dma_len(sg_scmd), sg_dma_address(sg_scmd));
+ else
+ ioc->base_add_sg_single(sg_local, sgl_flags |
+ sg_dma_len(sg_scmd), sg_dma_address(sg_scmd));
+ sg_scmd = sg_next(sg_scmd);
+ sg_local += ioc->sge_size;
+ sges_left--;
+ }
+
+ return 0;
+}
+
+/**
+ * _scsih_change_queue_depth - setting device queue depth
+ * @sdev: scsi device struct
+ * @qdepth: requested queue depth
+ *
+ * Returns queue depth.
+ */
+static int
+_scsih_change_queue_depth(struct scsi_device *sdev, int qdepth)
+{
+ struct Scsi_Host *shost = sdev->host;
+ int max_depth;
+ struct MPT2SAS_ADAPTER *ioc = shost_priv(shost);
+ struct MPT2SAS_DEVICE *sas_device_priv_data;
+ struct MPT2SAS_TARGET *sas_target_priv_data;
+ struct _sas_device *sas_device;
+ unsigned long flags;
+
+ max_depth = shost->can_queue;
+
+ /* limit max device queue for SATA to 32 */
+ sas_device_priv_data = sdev->hostdata;
+ if (!sas_device_priv_data)
+ goto not_sata;
+ sas_target_priv_data = sas_device_priv_data->sas_target;
+ if (!sas_target_priv_data)
+ goto not_sata;
+ if ((sas_target_priv_data->flags & MPT_TARGET_FLAGS_VOLUME))
+ goto not_sata;
+ spin_lock_irqsave(&ioc->sas_device_lock, flags);
+ sas_device = mpt2sas_scsih_sas_device_find_by_sas_address(ioc,
+ sas_device_priv_data->sas_target->sas_address);
+ if (sas_device && sas_device->device_info &
+ MPI2_SAS_DEVICE_INFO_SATA_DEVICE)
+ max_depth = MPT2SAS_SATA_QUEUE_DEPTH;
+ spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
+
+ not_sata:
+ if (!sdev->tagged_supported)
+ max_depth = 1;
+ if (qdepth > max_depth)
+ qdepth = max_depth;
+ return scsi_change_queue_depth(sdev, qdepth);
+}
+
+/**
+ * _scsih_target_alloc - target add routine
+ * @starget: scsi target struct
+ *
+ * Returns 0 if ok. Any other return is assumed to be an error and
+ * the device is ignored.
+ */
+static int
+_scsih_target_alloc(struct scsi_target *starget)
+{
+ struct Scsi_Host *shost = dev_to_shost(&starget->dev);
+ struct MPT2SAS_ADAPTER *ioc = shost_priv(shost);
+ struct MPT2SAS_TARGET *sas_target_priv_data;
+ struct _sas_device *sas_device;
+ struct _raid_device *raid_device;
+ unsigned long flags;
+ struct sas_rphy *rphy;
+
+ sas_target_priv_data = kzalloc(sizeof(*sas_target_priv_data),
+ GFP_KERNEL);
+ if (!sas_target_priv_data)
+ return -ENOMEM;
+
+ starget->hostdata = sas_target_priv_data;
+ sas_target_priv_data->starget = starget;
+ sas_target_priv_data->handle = MPT2SAS_INVALID_DEVICE_HANDLE;
+
+ /* RAID volumes */
+ if (starget->channel == RAID_CHANNEL) {
+ spin_lock_irqsave(&ioc->raid_device_lock, flags);
+ raid_device = _scsih_raid_device_find_by_id(ioc, starget->id,
+ starget->channel);
+ if (raid_device) {
+ sas_target_priv_data->handle = raid_device->handle;
+ sas_target_priv_data->sas_address = raid_device->wwid;
+ sas_target_priv_data->flags |= MPT_TARGET_FLAGS_VOLUME;
+ if (ioc->is_warpdrive)
+ sas_target_priv_data->raid_device = raid_device;
+ raid_device->starget = starget;
+ }
+ spin_unlock_irqrestore(&ioc->raid_device_lock, flags);
+ return 0;
+ }
+
+ /* sas/sata devices */
+ spin_lock_irqsave(&ioc->sas_device_lock, flags);
+ rphy = dev_to_rphy(starget->dev.parent);
+ sas_device = mpt2sas_scsih_sas_device_find_by_sas_address(ioc,
+ rphy->identify.sas_address);
+
+ if (sas_device) {
+ sas_target_priv_data->handle = sas_device->handle;
+ sas_target_priv_data->sas_address = sas_device->sas_address;
+ sas_device->starget = starget;
+ sas_device->id = starget->id;
+ sas_device->channel = starget->channel;
+ if (test_bit(sas_device->handle, ioc->pd_handles))
+ sas_target_priv_data->flags |=
+ MPT_TARGET_FLAGS_RAID_COMPONENT;
+ }
+ spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
+
+ return 0;
+}
+
+/**
+ * _scsih_target_destroy - target destroy routine
+ * @starget: scsi target struct
+ *
+ * Returns nothing.
+ */
+static void
+_scsih_target_destroy(struct scsi_target *starget)
+{
+ struct Scsi_Host *shost = dev_to_shost(&starget->dev);
+ struct MPT2SAS_ADAPTER *ioc = shost_priv(shost);
+ struct MPT2SAS_TARGET *sas_target_priv_data;
+ struct _sas_device *sas_device;
+ struct _raid_device *raid_device;
+ unsigned long flags;
+ struct sas_rphy *rphy;
+
+ sas_target_priv_data = starget->hostdata;
+ if (!sas_target_priv_data)
+ return;
+
+ if (starget->channel == RAID_CHANNEL) {
+ spin_lock_irqsave(&ioc->raid_device_lock, flags);
+ raid_device = _scsih_raid_device_find_by_id(ioc, starget->id,
+ starget->channel);
+ if (raid_device) {
+ raid_device->starget = NULL;
+ raid_device->sdev = NULL;
+ }
+ spin_unlock_irqrestore(&ioc->raid_device_lock, flags);
+ goto out;
+ }
+
+ spin_lock_irqsave(&ioc->sas_device_lock, flags);
+ rphy = dev_to_rphy(starget->dev.parent);
+ sas_device = mpt2sas_scsih_sas_device_find_by_sas_address(ioc,
+ rphy->identify.sas_address);
+ if (sas_device && (sas_device->starget == starget) &&
+ (sas_device->id == starget->id) &&
+ (sas_device->channel == starget->channel))
+ sas_device->starget = NULL;
+
+ spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
+
+ out:
+ kfree(sas_target_priv_data);
+ starget->hostdata = NULL;
+}
+
+/**
+ * _scsih_slave_alloc - device add routine
+ * @sdev: scsi device struct
+ *
+ * Returns 0 if ok. Any other return is assumed to be an error and
+ * the device is ignored.
+ */
+static int
+_scsih_slave_alloc(struct scsi_device *sdev)
+{
+ struct Scsi_Host *shost;
+ struct MPT2SAS_ADAPTER *ioc;
+ struct MPT2SAS_TARGET *sas_target_priv_data;
+ struct MPT2SAS_DEVICE *sas_device_priv_data;
+ struct scsi_target *starget;
+ struct _raid_device *raid_device;
+ struct _sas_device *sas_device;
+ unsigned long flags;
+
+ sas_device_priv_data = kzalloc(sizeof(*sas_device_priv_data),
+ GFP_KERNEL);
+ if (!sas_device_priv_data)
+ return -ENOMEM;
+
+ sas_device_priv_data->lun = sdev->lun;
+ sas_device_priv_data->flags = MPT_DEVICE_FLAGS_INIT;
+
+ starget = scsi_target(sdev);
+ sas_target_priv_data = starget->hostdata;
+ sas_target_priv_data->num_luns++;
+ sas_device_priv_data->sas_target = sas_target_priv_data;
+ sdev->hostdata = sas_device_priv_data;
+ if ((sas_target_priv_data->flags & MPT_TARGET_FLAGS_RAID_COMPONENT))
+ sdev->no_uld_attach = 1;
+
+ shost = dev_to_shost(&starget->dev);
+ ioc = shost_priv(shost);
+ if (starget->channel == RAID_CHANNEL) {
+ spin_lock_irqsave(&ioc->raid_device_lock, flags);
+ raid_device = _scsih_raid_device_find_by_id(ioc,
+ starget->id, starget->channel);
+ if (raid_device)
+ raid_device->sdev = sdev; /* raid is single lun */
+ spin_unlock_irqrestore(&ioc->raid_device_lock, flags);
+ }
+
+ if (!(sas_target_priv_data->flags & MPT_TARGET_FLAGS_VOLUME)) {
+ spin_lock_irqsave(&ioc->sas_device_lock, flags);
+ sas_device = mpt2sas_scsih_sas_device_find_by_sas_address(ioc,
+ sas_target_priv_data->sas_address);
+ if (sas_device && (sas_device->starget == NULL)) {
+ sdev_printk(KERN_INFO, sdev,
+ "%s : sas_device->starget set to starget @ %d\n",
+ __func__, __LINE__);
+ sas_device->starget = starget;
+ }
+ spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
+ }
+
+ return 0;
+}
+
+/**
+ * _scsih_slave_destroy - device destroy routine
+ * @sdev: scsi device struct
+ *
+ * Returns nothing.
+ */
+static void
+_scsih_slave_destroy(struct scsi_device *sdev)
+{
+ struct MPT2SAS_TARGET *sas_target_priv_data;
+ struct scsi_target *starget;
+ struct Scsi_Host *shost;
+ struct MPT2SAS_ADAPTER *ioc;
+ struct _sas_device *sas_device;
+ unsigned long flags;
+
+ if (!sdev->hostdata)
+ return;
+
+ starget = scsi_target(sdev);
+ sas_target_priv_data = starget->hostdata;
+ sas_target_priv_data->num_luns--;
+
+ shost = dev_to_shost(&starget->dev);
+ ioc = shost_priv(shost);
+
+ if (!(sas_target_priv_data->flags & MPT_TARGET_FLAGS_VOLUME)) {
+ spin_lock_irqsave(&ioc->sas_device_lock, flags);
+ sas_device = mpt2sas_scsih_sas_device_find_by_sas_address(ioc,
+ sas_target_priv_data->sas_address);
+ if (sas_device && !sas_target_priv_data->num_luns)
+ sas_device->starget = NULL;
+ spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
+ }
+
+ kfree(sdev->hostdata);
+ sdev->hostdata = NULL;
+}
+
+/**
+ * _scsih_display_sata_capabilities - sata capabilities
+ * @ioc: per adapter object
+ * @handle: device handle
+ * @sdev: scsi device struct
+ */
+static void
+_scsih_display_sata_capabilities(struct MPT2SAS_ADAPTER *ioc,
+ u16 handle, struct scsi_device *sdev)
+{
+ Mpi2ConfigReply_t mpi_reply;
+ Mpi2SasDevicePage0_t sas_device_pg0;
+ u32 ioc_status;
+ u16 flags;
+ u32 device_info;
+
+ if ((mpt2sas_config_get_sas_device_pg0(ioc, &mpi_reply, &sas_device_pg0,
+ MPI2_SAS_DEVICE_PGAD_FORM_HANDLE, handle))) {
+ printk(MPT2SAS_ERR_FMT "failure at %s:%d/%s()!\n",
+ ioc->name, __FILE__, __LINE__, __func__);
+ return;
+ }
+
+ ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
+ MPI2_IOCSTATUS_MASK;
+ if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
+ printk(MPT2SAS_ERR_FMT "failure at %s:%d/%s()!\n",
+ ioc->name, __FILE__, __LINE__, __func__);
+ return;
+ }
+
+ flags = le16_to_cpu(sas_device_pg0.Flags);
+ device_info = le32_to_cpu(sas_device_pg0.DeviceInfo);
+
+ sdev_printk(KERN_INFO, sdev,
+ "atapi(%s), ncq(%s), asyn_notify(%s), smart(%s), fua(%s), "
+ "sw_preserve(%s)\n",
+ (device_info & MPI2_SAS_DEVICE_INFO_ATAPI_DEVICE) ? "y" : "n",
+ (flags & MPI2_SAS_DEVICE0_FLAGS_SATA_NCQ_SUPPORTED) ? "y" : "n",
+ (flags & MPI2_SAS_DEVICE0_FLAGS_SATA_ASYNCHRONOUS_NOTIFY) ? "y" :
+ "n",
+ (flags & MPI2_SAS_DEVICE0_FLAGS_SATA_SMART_SUPPORTED) ? "y" : "n",
+ (flags & MPI2_SAS_DEVICE0_FLAGS_SATA_FUA_SUPPORTED) ? "y" : "n",
+ (flags & MPI2_SAS_DEVICE0_FLAGS_SATA_SW_PRESERVE) ? "y" : "n");
+}
+
+/**
+ * _scsih_is_raid - return boolean indicating device is raid volume
+ * @dev the device struct object
+ */
+static int
+_scsih_is_raid(struct device *dev)
+{
+ struct scsi_device *sdev = to_scsi_device(dev);
+ struct MPT2SAS_ADAPTER *ioc = shost_priv(sdev->host);
+
+ if (ioc->is_warpdrive)
+ return 0;
+ return (sdev->channel == RAID_CHANNEL) ? 1 : 0;
+}
+
+/**
+ * _scsih_get_resync - get raid volume resync percent complete
+ * @dev the device struct object
+ */
+static void
+_scsih_get_resync(struct device *dev)
+{
+ struct scsi_device *sdev = to_scsi_device(dev);
+ struct MPT2SAS_ADAPTER *ioc = shost_priv(sdev->host);
+ static struct _raid_device *raid_device;
+ unsigned long flags;
+ Mpi2RaidVolPage0_t vol_pg0;
+ Mpi2ConfigReply_t mpi_reply;
+ u32 volume_status_flags;
+ u8 percent_complete;
+ u16 handle;
+
+ percent_complete = 0;
+ handle = 0;
+ if (ioc->is_warpdrive)
+ goto out;
+
+ spin_lock_irqsave(&ioc->raid_device_lock, flags);
+ raid_device = _scsih_raid_device_find_by_id(ioc, sdev->id,
+ sdev->channel);
+ if (raid_device) {
+ handle = raid_device->handle;
+ percent_complete = raid_device->percent_complete;
+ }
+ spin_unlock_irqrestore(&ioc->raid_device_lock, flags);
+
+ if (!handle)
+ goto out;
+
+ if (mpt2sas_config_get_raid_volume_pg0(ioc, &mpi_reply, &vol_pg0,
+ MPI2_RAID_VOLUME_PGAD_FORM_HANDLE, handle,
+ sizeof(Mpi2RaidVolPage0_t))) {
+ printk(MPT2SAS_ERR_FMT "failure at %s:%d/%s()!\n",
+ ioc->name, __FILE__, __LINE__, __func__);
+ percent_complete = 0;
+ goto out;
+ }
+
+ volume_status_flags = le32_to_cpu(vol_pg0.VolumeStatusFlags);
+ if (!(volume_status_flags &
+ MPI2_RAIDVOL0_STATUS_FLAG_RESYNC_IN_PROGRESS))
+ percent_complete = 0;
+
+ out:
+ raid_set_resync(mpt2sas_raid_template, dev, percent_complete);
+}
+
+/**
+ * _scsih_get_state - get raid volume level
+ * @dev the device struct object
+ */
+static void
+_scsih_get_state(struct device *dev)
+{
+ struct scsi_device *sdev = to_scsi_device(dev);
+ struct MPT2SAS_ADAPTER *ioc = shost_priv(sdev->host);
+ static struct _raid_device *raid_device;
+ unsigned long flags;
+ Mpi2RaidVolPage0_t vol_pg0;
+ Mpi2ConfigReply_t mpi_reply;
+ u32 volstate;
+ enum raid_state state = RAID_STATE_UNKNOWN;
+ u16 handle = 0;
+
+ spin_lock_irqsave(&ioc->raid_device_lock, flags);
+ raid_device = _scsih_raid_device_find_by_id(ioc, sdev->id,
+ sdev->channel);
+ if (raid_device)
+ handle = raid_device->handle;
+ spin_unlock_irqrestore(&ioc->raid_device_lock, flags);
+
+ if (!raid_device)
+ goto out;
+
+ if (mpt2sas_config_get_raid_volume_pg0(ioc, &mpi_reply, &vol_pg0,
+ MPI2_RAID_VOLUME_PGAD_FORM_HANDLE, handle,
+ sizeof(Mpi2RaidVolPage0_t))) {
+ printk(MPT2SAS_ERR_FMT "failure at %s:%d/%s()!\n",
+ ioc->name, __FILE__, __LINE__, __func__);
+ goto out;
+ }
+
+ volstate = le32_to_cpu(vol_pg0.VolumeStatusFlags);
+ if (volstate & MPI2_RAIDVOL0_STATUS_FLAG_RESYNC_IN_PROGRESS) {
+ state = RAID_STATE_RESYNCING;
+ goto out;
+ }
+
+ switch (vol_pg0.VolumeState) {
+ case MPI2_RAID_VOL_STATE_OPTIMAL:
+ case MPI2_RAID_VOL_STATE_ONLINE:
+ state = RAID_STATE_ACTIVE;
+ break;
+ case MPI2_RAID_VOL_STATE_DEGRADED:
+ state = RAID_STATE_DEGRADED;
+ break;
+ case MPI2_RAID_VOL_STATE_FAILED:
+ case MPI2_RAID_VOL_STATE_MISSING:
+ state = RAID_STATE_OFFLINE;
+ break;
+ }
+ out:
+ raid_set_state(mpt2sas_raid_template, dev, state);
+}
+
+/**
+ * _scsih_set_level - set raid level
+ * @sdev: scsi device struct
+ * @volume_type: volume type
+ */
+static void
+_scsih_set_level(struct scsi_device *sdev, u8 volume_type)
+{
+ enum raid_level level = RAID_LEVEL_UNKNOWN;
+
+ switch (volume_type) {
+ case MPI2_RAID_VOL_TYPE_RAID0:
+ level = RAID_LEVEL_0;
+ break;
+ case MPI2_RAID_VOL_TYPE_RAID10:
+ level = RAID_LEVEL_10;
+ break;
+ case MPI2_RAID_VOL_TYPE_RAID1E:
+ level = RAID_LEVEL_1E;
+ break;
+ case MPI2_RAID_VOL_TYPE_RAID1:
+ level = RAID_LEVEL_1;
+ break;
+ }
+
+ raid_set_level(mpt2sas_raid_template, &sdev->sdev_gendev, level);
+}
+
+/**
+ * _scsih_get_volume_capabilities - volume capabilities
+ * @ioc: per adapter object
+ * @sas_device: the raid_device object
+ *
+ * Returns 0 for success, else 1
+ */
+static int
+_scsih_get_volume_capabilities(struct MPT2SAS_ADAPTER *ioc,
+ struct _raid_device *raid_device)
+{
+ Mpi2RaidVolPage0_t *vol_pg0;
+ Mpi2RaidPhysDiskPage0_t pd_pg0;
+ Mpi2SasDevicePage0_t sas_device_pg0;
+ Mpi2ConfigReply_t mpi_reply;
+ u16 sz;
+ u8 num_pds;
+
+ if ((mpt2sas_config_get_number_pds(ioc, raid_device->handle,
+ &num_pds)) || !num_pds) {
+ dfailprintk(ioc, printk(MPT2SAS_WARN_FMT
+ "failure at %s:%d/%s()!\n", ioc->name, __FILE__, __LINE__,
+ __func__));
+ return 1;
+ }
+
+ raid_device->num_pds = num_pds;
+ sz = offsetof(Mpi2RaidVolPage0_t, PhysDisk) + (num_pds *
+ sizeof(Mpi2RaidVol0PhysDisk_t));
+ vol_pg0 = kzalloc(sz, GFP_KERNEL);
+ if (!vol_pg0) {
+ dfailprintk(ioc, printk(MPT2SAS_WARN_FMT
+ "failure at %s:%d/%s()!\n", ioc->name, __FILE__, __LINE__,
+ __func__));
+ return 1;
+ }
+
+ if ((mpt2sas_config_get_raid_volume_pg0(ioc, &mpi_reply, vol_pg0,
+ MPI2_RAID_VOLUME_PGAD_FORM_HANDLE, raid_device->handle, sz))) {
+ dfailprintk(ioc, printk(MPT2SAS_WARN_FMT
+ "failure at %s:%d/%s()!\n", ioc->name, __FILE__, __LINE__,
+ __func__));
+ kfree(vol_pg0);
+ return 1;
+ }
+
+ raid_device->volume_type = vol_pg0->VolumeType;
+
+ /* figure out what the underlying devices are by
+ * obtaining the device_info bits for the 1st device
+ */
+ if (!(mpt2sas_config_get_phys_disk_pg0(ioc, &mpi_reply,
+ &pd_pg0, MPI2_PHYSDISK_PGAD_FORM_PHYSDISKNUM,
+ vol_pg0->PhysDisk[0].PhysDiskNum))) {
+ if (!(mpt2sas_config_get_sas_device_pg0(ioc, &mpi_reply,
+ &sas_device_pg0, MPI2_SAS_DEVICE_PGAD_FORM_HANDLE,
+ le16_to_cpu(pd_pg0.DevHandle)))) {
+ raid_device->device_info =
+ le32_to_cpu(sas_device_pg0.DeviceInfo);
+ }
+ }
+
+ kfree(vol_pg0);
+ return 0;
+}
+/**
+ * _scsih_disable_ddio - Disable direct I/O for all the volumes
+ * @ioc: per adapter object
+ */
+static void
+_scsih_disable_ddio(struct MPT2SAS_ADAPTER *ioc)
+{
+ Mpi2RaidVolPage1_t vol_pg1;
+ Mpi2ConfigReply_t mpi_reply;
+ struct _raid_device *raid_device;
+ u16 handle;
+ u16 ioc_status;
+ unsigned long flags;
+
+ handle = 0xFFFF;
+ while (!(mpt2sas_config_get_raid_volume_pg1(ioc, &mpi_reply,
+ &vol_pg1, MPI2_RAID_VOLUME_PGAD_FORM_GET_NEXT_HANDLE, handle))) {
+ ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
+ MPI2_IOCSTATUS_MASK;
+ if (ioc_status == MPI2_IOCSTATUS_CONFIG_INVALID_PAGE)
+ break;
+ handle = le16_to_cpu(vol_pg1.DevHandle);
+ spin_lock_irqsave(&ioc->raid_device_lock, flags);
+ raid_device = _scsih_raid_device_find_by_handle(ioc, handle);
+ if (raid_device)
+ raid_device->direct_io_enabled = 0;
+ spin_unlock_irqrestore(&ioc->raid_device_lock, flags);
+ }
+ return;
+}
+
+
+/**
+ * _scsih_get_num_volumes - Get number of volumes in the ioc
+ * @ioc: per adapter object
+ */
+static u8
+_scsih_get_num_volumes(struct MPT2SAS_ADAPTER *ioc)
+{
+ Mpi2RaidVolPage1_t vol_pg1;
+ Mpi2ConfigReply_t mpi_reply;
+ u16 handle;
+ u8 vol_cnt = 0;
+ u16 ioc_status;
+
+ handle = 0xFFFF;
+ while (!(mpt2sas_config_get_raid_volume_pg1(ioc, &mpi_reply,
+ &vol_pg1, MPI2_RAID_VOLUME_PGAD_FORM_GET_NEXT_HANDLE, handle))) {
+ ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
+ MPI2_IOCSTATUS_MASK;
+ if (ioc_status == MPI2_IOCSTATUS_CONFIG_INVALID_PAGE)
+ break;
+ vol_cnt++;
+ handle = le16_to_cpu(vol_pg1.DevHandle);
+ }
+ return vol_cnt;
+}
+
+
+/**
+ * _scsih_init_warpdrive_properties - Set properties for warpdrive direct I/O.
+ * @ioc: per adapter object
+ * @raid_device: the raid_device object
+ */
+static void
+_scsih_init_warpdrive_properties(struct MPT2SAS_ADAPTER *ioc,
+ struct _raid_device *raid_device)
+{
+ Mpi2RaidVolPage0_t *vol_pg0;
+ Mpi2RaidPhysDiskPage0_t pd_pg0;
+ Mpi2ConfigReply_t mpi_reply;
+ u16 sz;
+ u8 num_pds, count;
+ unsigned long stripe_sz, block_sz;
+ u8 stripe_exp, block_exp;
+ u64 dev_max_lba;
+
+ if (!ioc->is_warpdrive)
+ return;
+
+ if (ioc->mfg_pg10_hide_flag == MFG_PAGE10_EXPOSE_ALL_DISKS) {
+ printk(MPT2SAS_INFO_FMT "WarpDrive : Direct IO is disabled "
+ "globally as drives are exposed\n", ioc->name);
+ return;
+ }
+ if (_scsih_get_num_volumes(ioc) > 1) {
+ _scsih_disable_ddio(ioc);
+ printk(MPT2SAS_INFO_FMT "WarpDrive : Direct IO is disabled "
+ "globally as number of drives > 1\n", ioc->name);
+ return;
+ }
+ if ((mpt2sas_config_get_number_pds(ioc, raid_device->handle,
+ &num_pds)) || !num_pds) {
+ printk(MPT2SAS_INFO_FMT "WarpDrive : Direct IO is disabled "
+ "Failure in computing number of drives\n", ioc->name);
+ return;
+ }
+
+ sz = offsetof(Mpi2RaidVolPage0_t, PhysDisk) + (num_pds *
+ sizeof(Mpi2RaidVol0PhysDisk_t));
+ vol_pg0 = kzalloc(sz, GFP_KERNEL);
+ if (!vol_pg0) {
+ printk(MPT2SAS_INFO_FMT "WarpDrive : Direct IO is disabled "
+ "Memory allocation failure for RVPG0\n", ioc->name);
+ return;
+ }
+
+ if ((mpt2sas_config_get_raid_volume_pg0(ioc, &mpi_reply, vol_pg0,
+ MPI2_RAID_VOLUME_PGAD_FORM_HANDLE, raid_device->handle, sz))) {
+ printk(MPT2SAS_INFO_FMT "WarpDrive : Direct IO is disabled "
+ "Failure in retrieving RVPG0\n", ioc->name);
+ kfree(vol_pg0);
+ return;
+ }
+
+ /*
+ * WARPDRIVE:If number of physical disks in a volume exceeds the max pds
+ * assumed for WARPDRIVE, disable direct I/O
+ */
+ if (num_pds > MPT_MAX_WARPDRIVE_PDS) {
+ printk(MPT2SAS_WARN_FMT "WarpDrive : Direct IO is disabled "
+ "for the drive with handle(0x%04x): num_mem=%d, "
+ "max_mem_allowed=%d\n", ioc->name, raid_device->handle,
+ num_pds, MPT_MAX_WARPDRIVE_PDS);
+ kfree(vol_pg0);
+ return;
+ }
+ for (count = 0; count < num_pds; count++) {
+ if (mpt2sas_config_get_phys_disk_pg0(ioc, &mpi_reply,
+ &pd_pg0, MPI2_PHYSDISK_PGAD_FORM_PHYSDISKNUM,
+ vol_pg0->PhysDisk[count].PhysDiskNum) ||
+ le16_to_cpu(pd_pg0.DevHandle) ==
+ MPT2SAS_INVALID_DEVICE_HANDLE) {
+ printk(MPT2SAS_INFO_FMT "WarpDrive : Direct IO is "
+ "disabled for the drive with handle(0x%04x) member"
+ "handle retrieval failed for member number=%d\n",
+ ioc->name, raid_device->handle,
+ vol_pg0->PhysDisk[count].PhysDiskNum);
+ goto out_error;
+ }
+ /* Disable direct I/O if member drive lba exceeds 4 bytes */
+ dev_max_lba = le64_to_cpu(pd_pg0.DeviceMaxLBA);
+ if (dev_max_lba >> 32) {
+ printk(MPT2SAS_INFO_FMT "WarpDrive : Direct IO is "
+ "disabled for the drive with handle(0x%04x) member"
+ "handle (0x%04x) unsupported max lba 0x%016llx\n",
+ ioc->name, raid_device->handle,
+ le16_to_cpu(pd_pg0.DevHandle),
+ (unsigned long long)dev_max_lba);
+ goto out_error;
+ }
+
+ raid_device->pd_handle[count] = le16_to_cpu(pd_pg0.DevHandle);
+ }
+
+ /*
+ * Assumption for WD: Direct I/O is not supported if the volume is
+ * not RAID0
+ */
+ if (raid_device->volume_type != MPI2_RAID_VOL_TYPE_RAID0) {
+ printk(MPT2SAS_INFO_FMT "WarpDrive : Direct IO is disabled "
+ "for the drive with handle(0x%04x): type=%d, "
+ "s_sz=%uK, blk_size=%u\n", ioc->name,
+ raid_device->handle, raid_device->volume_type,
+ (le32_to_cpu(vol_pg0->StripeSize) *
+ le16_to_cpu(vol_pg0->BlockSize)) / 1024,
+ le16_to_cpu(vol_pg0->BlockSize));
+ goto out_error;
+ }
+
+ stripe_sz = le32_to_cpu(vol_pg0->StripeSize);
+ stripe_exp = find_first_bit(&stripe_sz, 32);
+ if (stripe_exp == 32) {
+ printk(MPT2SAS_INFO_FMT "WarpDrive : Direct IO is disabled "
+ "for the drive with handle(0x%04x) invalid stripe sz %uK\n",
+ ioc->name, raid_device->handle,
+ (le32_to_cpu(vol_pg0->StripeSize) *
+ le16_to_cpu(vol_pg0->BlockSize)) / 1024);
+ goto out_error;
+ }
+ raid_device->stripe_exponent = stripe_exp;
+ block_sz = le16_to_cpu(vol_pg0->BlockSize);
+ block_exp = find_first_bit(&block_sz, 16);
+ if (block_exp == 16) {
+ printk(MPT2SAS_INFO_FMT "WarpDrive : Direct IO is disabled "
+ "for the drive with handle(0x%04x) invalid block sz %u\n",
+ ioc->name, raid_device->handle,
+ le16_to_cpu(vol_pg0->BlockSize));
+ goto out_error;
+ }
+ raid_device->block_exponent = block_exp;
+ raid_device->direct_io_enabled = 1;
+
+ printk(MPT2SAS_INFO_FMT "WarpDrive : Direct IO is Enabled for the drive"
+ " with handle(0x%04x)\n", ioc->name, raid_device->handle);
+ /*
+ * WARPDRIVE: Though the following fields are not used for direct IO,
+ * stored for future purpose:
+ */
+ raid_device->max_lba = le64_to_cpu(vol_pg0->MaxLBA);
+ raid_device->stripe_sz = le32_to_cpu(vol_pg0->StripeSize);
+ raid_device->block_sz = le16_to_cpu(vol_pg0->BlockSize);
+
+
+ kfree(vol_pg0);
+ return;
+
+out_error:
+ raid_device->direct_io_enabled = 0;
+ for (count = 0; count < num_pds; count++)
+ raid_device->pd_handle[count] = 0;
+ kfree(vol_pg0);
+ return;
+}
+
+/**
+ * _scsih_enable_tlr - setting TLR flags
+ * @ioc: per adapter object
+ * @sdev: scsi device struct
+ *
+ * Enabling Transaction Layer Retries for tape devices when
+ * vpd page 0x90 is present
+ *
+ */
+static void
+_scsih_enable_tlr(struct MPT2SAS_ADAPTER *ioc, struct scsi_device *sdev)
+{
+ /* only for TAPE */
+ if (sdev->type != TYPE_TAPE)
+ return;
+
+ if (!(ioc->facts.IOCCapabilities & MPI2_IOCFACTS_CAPABILITY_TLR))
+ return;
+
+ sas_enable_tlr(sdev);
+ sdev_printk(KERN_INFO, sdev, "TLR %s\n",
+ sas_is_tlr_enabled(sdev) ? "Enabled" : "Disabled");
+ return;
+
+}
+
+/**
+ * _scsih_slave_configure - device configure routine.
+ * @sdev: scsi device struct
+ *
+ * Returns 0 if ok. Any other return is assumed to be an error and
+ * the device is ignored.
+ */
+static int
+_scsih_slave_configure(struct scsi_device *sdev)
+{
+ struct Scsi_Host *shost = sdev->host;
+ struct MPT2SAS_ADAPTER *ioc = shost_priv(shost);
+ struct MPT2SAS_DEVICE *sas_device_priv_data;
+ struct MPT2SAS_TARGET *sas_target_priv_data;
+ struct _sas_device *sas_device;
+ struct _raid_device *raid_device;
+ unsigned long flags;
+ int qdepth;
+ u8 ssp_target = 0;
+ char *ds = "";
+ char *r_level = "";
+ u16 handle, volume_handle = 0;
+ u64 volume_wwid = 0;
+
+ qdepth = 1;
+ sas_device_priv_data = sdev->hostdata;
+ sas_device_priv_data->configured_lun = 1;
+ sas_device_priv_data->flags &= ~MPT_DEVICE_FLAGS_INIT;
+ sas_target_priv_data = sas_device_priv_data->sas_target;
+ handle = sas_target_priv_data->handle;
+
+ /* raid volume handling */
+ if (sas_target_priv_data->flags & MPT_TARGET_FLAGS_VOLUME) {
+
+ spin_lock_irqsave(&ioc->raid_device_lock, flags);
+ raid_device = _scsih_raid_device_find_by_handle(ioc, handle);
+ spin_unlock_irqrestore(&ioc->raid_device_lock, flags);
+ if (!raid_device) {
+ dfailprintk(ioc, printk(MPT2SAS_WARN_FMT
+ "failure at %s:%d/%s()!\n", ioc->name, __FILE__,
+ __LINE__, __func__));
+ return 1;
+ }
+
+ if (_scsih_get_volume_capabilities(ioc, raid_device)) {
+ dfailprintk(ioc, printk(MPT2SAS_WARN_FMT
+ "failure at %s:%d/%s()!\n", ioc->name, __FILE__,
+ __LINE__, __func__));
+ return 1;
+ }
+ /*
+ * WARPDRIVE: Initialize the required data for Direct IO
+ */
+ _scsih_init_warpdrive_properties(ioc, raid_device);
+
+ /* RAID Queue Depth Support
+ * IS volume = underlying qdepth of drive type, either
+ * MPT2SAS_SAS_QUEUE_DEPTH or MPT2SAS_SATA_QUEUE_DEPTH
+ * IM/IME/R10 = 128 (MPT2SAS_RAID_QUEUE_DEPTH)
+ */
+ if (raid_device->device_info &
+ MPI2_SAS_DEVICE_INFO_SSP_TARGET) {
+ qdepth = MPT2SAS_SAS_QUEUE_DEPTH;
+ ds = "SSP";
+ } else {
+ qdepth = MPT2SAS_SATA_QUEUE_DEPTH;
+ if (raid_device->device_info &
+ MPI2_SAS_DEVICE_INFO_SATA_DEVICE)
+ ds = "SATA";
+ else
+ ds = "STP";
+ }
+
+ switch (raid_device->volume_type) {
+ case MPI2_RAID_VOL_TYPE_RAID0:
+ r_level = "RAID0";
+ break;
+ case MPI2_RAID_VOL_TYPE_RAID1E:
+ qdepth = MPT2SAS_RAID_QUEUE_DEPTH;
+ if (ioc->manu_pg10.OEMIdentifier &&
+ (le32_to_cpu(ioc->manu_pg10.GenericFlags0) &
+ MFG10_GF0_R10_DISPLAY) &&
+ !(raid_device->num_pds % 2))
+ r_level = "RAID10";
+ else
+ r_level = "RAID1E";
+ break;
+ case MPI2_RAID_VOL_TYPE_RAID1:
+ qdepth = MPT2SAS_RAID_QUEUE_DEPTH;
+ r_level = "RAID1";
+ break;
+ case MPI2_RAID_VOL_TYPE_RAID10:
+ qdepth = MPT2SAS_RAID_QUEUE_DEPTH;
+ r_level = "RAID10";
+ break;
+ case MPI2_RAID_VOL_TYPE_UNKNOWN:
+ default:
+ qdepth = MPT2SAS_RAID_QUEUE_DEPTH;
+ r_level = "RAIDX";
+ break;
+ }
+
+ if (!ioc->hide_ir_msg)
+ sdev_printk(KERN_INFO, sdev, "%s: handle(0x%04x), "
+ "wwid(0x%016llx), pd_count(%d), type(%s)\n",
+ r_level, raid_device->handle,
+ (unsigned long long)raid_device->wwid,
+ raid_device->num_pds, ds);
+ _scsih_change_queue_depth(sdev, qdepth);
+ /* raid transport support */
+ if (!ioc->is_warpdrive)
+ _scsih_set_level(sdev, raid_device->volume_type);
+ return 0;
+ }
+
+ /* non-raid handling */
+ if (sas_target_priv_data->flags & MPT_TARGET_FLAGS_RAID_COMPONENT) {
+ if (mpt2sas_config_get_volume_handle(ioc, handle,
+ &volume_handle)) {
+ dfailprintk(ioc, printk(MPT2SAS_WARN_FMT
+ "failure at %s:%d/%s()!\n", ioc->name,
+ __FILE__, __LINE__, __func__));
+ return 1;
+ }
+ if (volume_handle && mpt2sas_config_get_volume_wwid(ioc,
+ volume_handle, &volume_wwid)) {
+ dfailprintk(ioc, printk(MPT2SAS_WARN_FMT
+ "failure at %s:%d/%s()!\n", ioc->name,
+ __FILE__, __LINE__, __func__));
+ return 1;
+ }
+ }
+
+ spin_lock_irqsave(&ioc->sas_device_lock, flags);
+ sas_device = mpt2sas_scsih_sas_device_find_by_sas_address(ioc,
+ sas_device_priv_data->sas_target->sas_address);
+ if (!sas_device) {
+ spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
+ dfailprintk(ioc, printk(MPT2SAS_WARN_FMT
+ "failure at %s:%d/%s()!\n", ioc->name, __FILE__,
+ __LINE__, __func__));
+ return 1;
+ }
+ sas_device->volume_handle = volume_handle;
+ sas_device->volume_wwid = volume_wwid;
+ if (sas_device->device_info & MPI2_SAS_DEVICE_INFO_SSP_TARGET) {
+ qdepth = MPT2SAS_SAS_QUEUE_DEPTH;
+ ssp_target = 1;
+ ds = "SSP";
+ } else {
+ qdepth = MPT2SAS_SATA_QUEUE_DEPTH;
+ if (sas_device->device_info & MPI2_SAS_DEVICE_INFO_STP_TARGET)
+ ds = "STP";
+ else if (sas_device->device_info &
+ MPI2_SAS_DEVICE_INFO_SATA_DEVICE)
+ ds = "SATA";
+ }
+ sdev_printk(KERN_INFO, sdev, "%s: handle(0x%04x), "
+ "sas_addr(0x%016llx), phy(%d), device_name(0x%016llx)\n",
+ ds, sas_device->handle,
+ (unsigned long long)sas_device->sas_address,
+ sas_device->phy,
+ (unsigned long long)sas_device->device_name);
+ sdev_printk(KERN_INFO, sdev, "%s: "
+ "enclosure_logical_id(0x%016llx), slot(%d)\n", ds,
+ (unsigned long long) sas_device->enclosure_logical_id,
+ sas_device->slot);
+
+ spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
+ if (!ssp_target)
+ _scsih_display_sata_capabilities(ioc, handle, sdev);
+
+
+ _scsih_change_queue_depth(sdev, qdepth);
+
+ if (ssp_target) {
+ sas_read_port_mode_page(sdev);
+ _scsih_enable_tlr(ioc, sdev);
+ }
+ return 0;
+}
+
+/**
+ * _scsih_bios_param - fetch head, sector, cylinder info for a disk
+ * @sdev: scsi device struct
+ * @bdev: pointer to block device context
+ * @capacity: device size (in 512 byte sectors)
+ * @params: three element array to place output:
+ * params[0] number of heads (max 255)
+ * params[1] number of sectors (max 63)
+ * params[2] number of cylinders
+ *
+ * Return nothing.
+ */
+static int
+_scsih_bios_param(struct scsi_device *sdev, struct block_device *bdev,
+ sector_t capacity, int params[])
+{
+ int heads;
+ int sectors;
+ sector_t cylinders;
+ ulong dummy;
+
+ heads = 64;
+ sectors = 32;
+
+ dummy = heads * sectors;
+ cylinders = capacity;
+ sector_div(cylinders, dummy);
+
+ /*
+ * Handle extended translation size for logical drives
+ * > 1Gb
+ */
+ if ((ulong)capacity >= 0x200000) {
+ heads = 255;
+ sectors = 63;
+ dummy = heads * sectors;
+ cylinders = capacity;
+ sector_div(cylinders, dummy);
+ }
+
+ /* return result */
+ params[0] = heads;
+ params[1] = sectors;
+ params[2] = cylinders;
+
+ return 0;
+}
+
+/**
+ * _scsih_response_code - translation of device response code
+ * @ioc: per adapter object
+ * @response_code: response code returned by the device
+ *
+ * Return nothing.
+ */
+static void
+_scsih_response_code(struct MPT2SAS_ADAPTER *ioc, u8 response_code)
+{
+ char *desc;
+
+ switch (response_code) {
+ case MPI2_SCSITASKMGMT_RSP_TM_COMPLETE:
+ desc = "task management request completed";
+ break;
+ case MPI2_SCSITASKMGMT_RSP_INVALID_FRAME:
+ desc = "invalid frame";
+ break;
+ case MPI2_SCSITASKMGMT_RSP_TM_NOT_SUPPORTED:
+ desc = "task management request not supported";
+ break;
+ case MPI2_SCSITASKMGMT_RSP_TM_FAILED:
+ desc = "task management request failed";
+ break;
+ case MPI2_SCSITASKMGMT_RSP_TM_SUCCEEDED:
+ desc = "task management request succeeded";
+ break;
+ case MPI2_SCSITASKMGMT_RSP_TM_INVALID_LUN:
+ desc = "invalid lun";
+ break;
+ case 0xA:
+ desc = "overlapped tag attempted";
+ break;
+ case MPI2_SCSITASKMGMT_RSP_IO_QUEUED_ON_IOC:
+ desc = "task queued, however not sent to target";
+ break;
+ default:
+ desc = "unknown";
+ break;
+ }
+ printk(MPT2SAS_WARN_FMT "response_code(0x%01x): %s\n",
+ ioc->name, response_code, desc);
+}
+
+/**
+ * _scsih_tm_done - tm completion routine
+ * @ioc: per adapter object
+ * @smid: system request message index
+ * @msix_index: MSIX table index supplied by the OS
+ * @reply: reply message frame(lower 32bit addr)
+ * Context: none.
+ *
+ * The callback handler when using scsih_issue_tm.
+ *
+ * Return 1 meaning mf should be freed from _base_interrupt
+ * 0 means the mf is freed from this function.
+ */
+static u8
+_scsih_tm_done(struct MPT2SAS_ADAPTER *ioc, u16 smid, u8 msix_index, u32 reply)
+{
+ MPI2DefaultReply_t *mpi_reply;
+
+ if (ioc->tm_cmds.status == MPT2_CMD_NOT_USED)
+ return 1;
+ if (ioc->tm_cmds.smid != smid)
+ return 1;
+ mpt2sas_base_flush_reply_queues(ioc);
+ ioc->tm_cmds.status |= MPT2_CMD_COMPLETE;
+ mpi_reply = mpt2sas_base_get_reply_virt_addr(ioc, reply);
+ if (mpi_reply) {
+ memcpy(ioc->tm_cmds.reply, mpi_reply, mpi_reply->MsgLength*4);
+ ioc->tm_cmds.status |= MPT2_CMD_REPLY_VALID;
+ }
+ ioc->tm_cmds.status &= ~MPT2_CMD_PENDING;
+ complete(&ioc->tm_cmds.done);
+ return 1;
+}
+
+/**
+ * mpt2sas_scsih_set_tm_flag - set per target tm_busy
+ * @ioc: per adapter object
+ * @handle: device handle
+ *
+ * During taskmangement request, we need to freeze the device queue.
+ */
+void
+mpt2sas_scsih_set_tm_flag(struct MPT2SAS_ADAPTER *ioc, u16 handle)
+{
+ struct MPT2SAS_DEVICE *sas_device_priv_data;
+ struct scsi_device *sdev;
+ u8 skip = 0;
+
+ shost_for_each_device(sdev, ioc->shost) {
+ if (skip)
+ continue;
+ sas_device_priv_data = sdev->hostdata;
+ if (!sas_device_priv_data)
+ continue;
+ if (sas_device_priv_data->sas_target->handle == handle) {
+ sas_device_priv_data->sas_target->tm_busy = 1;
+ skip = 1;
+ ioc->ignore_loginfos = 1;
+ }
+ }
+}
+
+/**
+ * mpt2sas_scsih_clear_tm_flag - clear per target tm_busy
+ * @ioc: per adapter object
+ * @handle: device handle
+ *
+ * During taskmangement request, we need to freeze the device queue.
+ */
+void
+mpt2sas_scsih_clear_tm_flag(struct MPT2SAS_ADAPTER *ioc, u16 handle)
+{
+ struct MPT2SAS_DEVICE *sas_device_priv_data;
+ struct scsi_device *sdev;
+ u8 skip = 0;
+
+ shost_for_each_device(sdev, ioc->shost) {
+ if (skip)
+ continue;
+ sas_device_priv_data = sdev->hostdata;
+ if (!sas_device_priv_data)
+ continue;
+ if (sas_device_priv_data->sas_target->handle == handle) {
+ sas_device_priv_data->sas_target->tm_busy = 0;
+ skip = 1;
+ ioc->ignore_loginfos = 0;
+ }
+ }
+}
+
+
+/**
+ * mpt2sas_scsih_issue_tm - main routine for sending tm requests
+ * @ioc: per adapter struct
+ * @device_handle: device handle
+ * @channel: the channel assigned by the OS
+ * @id: the id assigned by the OS
+ * @lun: lun number
+ * @type: MPI2_SCSITASKMGMT_TASKTYPE__XXX (defined in mpi2_init.h)
+ * @smid_task: smid assigned to the task
+ * @timeout: timeout in seconds
+ * @m_type: TM_MUTEX_ON or TM_MUTEX_OFF
+ * Context: user
+ *
+ * A generic API for sending task management requests to firmware.
+ *
+ * The callback index is set inside `ioc->tm_cb_idx`.
+ *
+ * Return SUCCESS or FAILED.
+ */
+int
+mpt2sas_scsih_issue_tm(struct MPT2SAS_ADAPTER *ioc, u16 handle, uint channel,
+ uint id, uint lun, u8 type, u16 smid_task, ulong timeout,
+ enum mutex_type m_type)
+{
+ Mpi2SCSITaskManagementRequest_t *mpi_request;
+ Mpi2SCSITaskManagementReply_t *mpi_reply;
+ u16 smid = 0;
+ u32 ioc_state;
+ unsigned long timeleft;
+ struct scsiio_tracker *scsi_lookup = NULL;
+ int rc;
+
+ if (m_type == TM_MUTEX_ON)
+ mutex_lock(&ioc->tm_cmds.mutex);
+ if (ioc->tm_cmds.status != MPT2_CMD_NOT_USED) {
+ printk(MPT2SAS_INFO_FMT "%s: tm_cmd busy!!!\n",
+ __func__, ioc->name);
+ rc = FAILED;
+ goto err_out;
+ }
+
+ if (ioc->shost_recovery || ioc->remove_host ||
+ ioc->pci_error_recovery) {
+ printk(MPT2SAS_INFO_FMT "%s: host reset in progress!\n",
+ __func__, ioc->name);
+ rc = FAILED;
+ goto err_out;
+ }
+
+ ioc_state = mpt2sas_base_get_iocstate(ioc, 0);
+ if (ioc_state & MPI2_DOORBELL_USED) {
+ dhsprintk(ioc, printk(MPT2SAS_INFO_FMT "unexpected doorbell "
+ "active!\n", ioc->name));
+ rc = mpt2sas_base_hard_reset_handler(ioc, CAN_SLEEP,
+ FORCE_BIG_HAMMER);
+ rc = (!rc) ? SUCCESS : FAILED;
+ goto err_out;
+ }
+
+ if ((ioc_state & MPI2_IOC_STATE_MASK) == MPI2_IOC_STATE_FAULT) {
+ mpt2sas_base_fault_info(ioc, ioc_state &
+ MPI2_DOORBELL_DATA_MASK);
+ rc = mpt2sas_base_hard_reset_handler(ioc, CAN_SLEEP,
+ FORCE_BIG_HAMMER);
+ rc = (!rc) ? SUCCESS : FAILED;
+ goto err_out;
+ }
+
+ smid = mpt2sas_base_get_smid_hpr(ioc, ioc->tm_cb_idx);
+ if (!smid) {
+ printk(MPT2SAS_ERR_FMT "%s: failed obtaining a smid\n",
+ ioc->name, __func__);
+ rc = FAILED;
+ goto err_out;
+ }
+
+ if (type == MPI2_SCSITASKMGMT_TASKTYPE_ABORT_TASK)
+ scsi_lookup = &ioc->scsi_lookup[smid_task - 1];
+
+ dtmprintk(ioc, printk(MPT2SAS_INFO_FMT "sending tm: handle(0x%04x),"
+ " task_type(0x%02x), smid(%d)\n", ioc->name, handle, type,
+ smid_task));
+ ioc->tm_cmds.status = MPT2_CMD_PENDING;
+ mpi_request = mpt2sas_base_get_msg_frame(ioc, smid);
+ ioc->tm_cmds.smid = smid;
+ memset(mpi_request, 0, sizeof(Mpi2SCSITaskManagementRequest_t));
+ memset(ioc->tm_cmds.reply, 0, sizeof(Mpi2SCSITaskManagementReply_t));
+ mpi_request->Function = MPI2_FUNCTION_SCSI_TASK_MGMT;
+ mpi_request->DevHandle = cpu_to_le16(handle);
+ mpi_request->TaskType = type;
+ mpi_request->TaskMID = cpu_to_le16(smid_task);
+ int_to_scsilun(lun, (struct scsi_lun *)mpi_request->LUN);
+ mpt2sas_scsih_set_tm_flag(ioc, handle);
+ init_completion(&ioc->tm_cmds.done);
+ mpt2sas_base_put_smid_hi_priority(ioc, smid);
+ timeleft = wait_for_completion_timeout(&ioc->tm_cmds.done, timeout*HZ);
+ if (!(ioc->tm_cmds.status & MPT2_CMD_COMPLETE)) {
+ printk(MPT2SAS_ERR_FMT "%s: timeout\n",
+ ioc->name, __func__);
+ _debug_dump_mf(mpi_request,
+ sizeof(Mpi2SCSITaskManagementRequest_t)/4);
+ if (!(ioc->tm_cmds.status & MPT2_CMD_RESET)) {
+ rc = mpt2sas_base_hard_reset_handler(ioc, CAN_SLEEP,
+ FORCE_BIG_HAMMER);
+ rc = (!rc) ? SUCCESS : FAILED;
+ ioc->tm_cmds.status = MPT2_CMD_NOT_USED;
+ mpt2sas_scsih_clear_tm_flag(ioc, handle);
+ goto err_out;
+ }
+ }
+
+ if (ioc->tm_cmds.status & MPT2_CMD_REPLY_VALID) {
+ mpi_reply = ioc->tm_cmds.reply;
+ dtmprintk(ioc, printk(MPT2SAS_INFO_FMT "complete tm: "
+ "ioc_status(0x%04x), loginfo(0x%08x), term_count(0x%08x)\n",
+ ioc->name, le16_to_cpu(mpi_reply->IOCStatus),
+ le32_to_cpu(mpi_reply->IOCLogInfo),
+ le32_to_cpu(mpi_reply->TerminationCount)));
+ if (ioc->logging_level & MPT_DEBUG_TM) {
+ _scsih_response_code(ioc, mpi_reply->ResponseCode);
+ if (mpi_reply->IOCStatus)
+ _debug_dump_mf(mpi_request,
+ sizeof(Mpi2SCSITaskManagementRequest_t)/4);
+ }
+ }
+
+ switch (type) {
+ case MPI2_SCSITASKMGMT_TASKTYPE_ABORT_TASK:
+ rc = SUCCESS;
+ if (scsi_lookup->scmd == NULL)
+ break;
+ rc = FAILED;
+ break;
+
+ case MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET:
+ if (_scsih_scsi_lookup_find_by_target(ioc, id, channel))
+ rc = FAILED;
+ else
+ rc = SUCCESS;
+ break;
+
+ case MPI2_SCSITASKMGMT_TASKTYPE_ABRT_TASK_SET:
+ case MPI2_SCSITASKMGMT_TASKTYPE_LOGICAL_UNIT_RESET:
+ if (_scsih_scsi_lookup_find_by_lun(ioc, id, lun, channel))
+ rc = FAILED;
+ else
+ rc = SUCCESS;
+ break;
+ case MPI2_SCSITASKMGMT_TASKTYPE_QUERY_TASK:
+ rc = SUCCESS;
+ break;
+ default:
+ rc = FAILED;
+ break;
+ }
+
+ mpt2sas_scsih_clear_tm_flag(ioc, handle);
+ ioc->tm_cmds.status = MPT2_CMD_NOT_USED;
+ if (m_type == TM_MUTEX_ON)
+ mutex_unlock(&ioc->tm_cmds.mutex);
+
+ return rc;
+
+ err_out:
+ if (m_type == TM_MUTEX_ON)
+ mutex_unlock(&ioc->tm_cmds.mutex);
+ return rc;
+}
+
+/**
+ * _scsih_tm_display_info - displays info about the device
+ * @ioc: per adapter struct
+ * @scmd: pointer to scsi command object
+ *
+ * Called by task management callback handlers.
+ */
+static void
+_scsih_tm_display_info(struct MPT2SAS_ADAPTER *ioc, struct scsi_cmnd *scmd)
+{
+ struct scsi_target *starget = scmd->device->sdev_target;
+ struct MPT2SAS_TARGET *priv_target = starget->hostdata;
+ struct _sas_device *sas_device = NULL;
+ unsigned long flags;
+ char *device_str = NULL;
+
+ if (!priv_target)
+ return;
+ if (ioc->hide_ir_msg)
+ device_str = "WarpDrive";
+ else
+ device_str = "volume";
+
+ scsi_print_command(scmd);
+ if (priv_target->flags & MPT_TARGET_FLAGS_VOLUME) {
+ starget_printk(KERN_INFO, starget, "%s handle(0x%04x), "
+ "%s wwid(0x%016llx)\n", device_str, priv_target->handle,
+ device_str, (unsigned long long)priv_target->sas_address);
+ } else {
+ spin_lock_irqsave(&ioc->sas_device_lock, flags);
+ sas_device = mpt2sas_scsih_sas_device_find_by_sas_address(ioc,
+ priv_target->sas_address);
+ if (sas_device) {
+ if (priv_target->flags &
+ MPT_TARGET_FLAGS_RAID_COMPONENT) {
+ starget_printk(KERN_INFO, starget,
+ "volume handle(0x%04x), "
+ "volume wwid(0x%016llx)\n",
+ sas_device->volume_handle,
+ (unsigned long long)sas_device->volume_wwid);
+ }
+ starget_printk(KERN_INFO, starget,
+ "handle(0x%04x), sas_address(0x%016llx), phy(%d)\n",
+ sas_device->handle,
+ (unsigned long long)sas_device->sas_address,
+ sas_device->phy);
+ starget_printk(KERN_INFO, starget,
+ "enclosure_logical_id(0x%016llx), slot(%d)\n",
+ (unsigned long long)sas_device->enclosure_logical_id,
+ sas_device->slot);
+ }
+ spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
+ }
+}
+
+/**
+ * _scsih_abort - eh threads main abort routine
+ * @scmd: pointer to scsi command object
+ *
+ * Returns SUCCESS if command aborted else FAILED
+ */
+static int
+_scsih_abort(struct scsi_cmnd *scmd)
+{
+ struct MPT2SAS_ADAPTER *ioc = shost_priv(scmd->device->host);
+ struct MPT2SAS_DEVICE *sas_device_priv_data;
+ u16 smid;
+ u16 handle;
+ int r;
+
+ sdev_printk(KERN_INFO, scmd->device, "attempting task abort! "
+ "scmd(%p)\n", scmd);
+ _scsih_tm_display_info(ioc, scmd);
+
+ sas_device_priv_data = scmd->device->hostdata;
+ if (!sas_device_priv_data || !sas_device_priv_data->sas_target) {
+ sdev_printk(KERN_INFO, scmd->device, "device been deleted! "
+ "scmd(%p)\n", scmd);
+ scmd->result = DID_NO_CONNECT << 16;
+ scmd->scsi_done(scmd);
+ r = SUCCESS;
+ goto out;
+ }
+
+ /* search for the command */
+ smid = _scsih_scsi_lookup_find_by_scmd(ioc, scmd);
+ if (!smid) {
+ scmd->result = DID_RESET << 16;
+ r = SUCCESS;
+ goto out;
+ }
+
+ /* for hidden raid components and volumes this is not supported */
+ if (sas_device_priv_data->sas_target->flags &
+ MPT_TARGET_FLAGS_RAID_COMPONENT ||
+ sas_device_priv_data->sas_target->flags & MPT_TARGET_FLAGS_VOLUME) {
+ scmd->result = DID_RESET << 16;
+ r = FAILED;
+ goto out;
+ }
+
+ mpt2sas_halt_firmware(ioc);
+
+ handle = sas_device_priv_data->sas_target->handle;
+ r = mpt2sas_scsih_issue_tm(ioc, handle, scmd->device->channel,
+ scmd->device->id, scmd->device->lun,
+ MPI2_SCSITASKMGMT_TASKTYPE_ABORT_TASK, smid, 30, TM_MUTEX_ON);
+
+ out:
+ sdev_printk(KERN_INFO, scmd->device, "task abort: %s scmd(%p)\n",
+ ((r == SUCCESS) ? "SUCCESS" : "FAILED"), scmd);
+ return r;
+}
+
+/**
+ * _scsih_dev_reset - eh threads main device reset routine
+ * @scmd: pointer to scsi command object
+ *
+ * Returns SUCCESS if command aborted else FAILED
+ */
+static int
+_scsih_dev_reset(struct scsi_cmnd *scmd)
+{
+ struct MPT2SAS_ADAPTER *ioc = shost_priv(scmd->device->host);
+ struct MPT2SAS_DEVICE *sas_device_priv_data;
+ struct _sas_device *sas_device;
+ unsigned long flags;
+ u16 handle;
+ int r;
+
+ struct scsi_target *starget = scmd->device->sdev_target;
+
+ starget_printk(KERN_INFO, starget, "attempting device reset! "
+ "scmd(%p)\n", scmd);
+ _scsih_tm_display_info(ioc, scmd);
+
+ sas_device_priv_data = scmd->device->hostdata;
+ if (!sas_device_priv_data || !sas_device_priv_data->sas_target) {
+ starget_printk(KERN_INFO, starget, "device been deleted! "
+ "scmd(%p)\n", scmd);
+ scmd->result = DID_NO_CONNECT << 16;
+ scmd->scsi_done(scmd);
+ r = SUCCESS;
+ goto out;
+ }
+
+ /* for hidden raid components obtain the volume_handle */
+ handle = 0;
+ if (sas_device_priv_data->sas_target->flags &
+ MPT_TARGET_FLAGS_RAID_COMPONENT) {
+ spin_lock_irqsave(&ioc->sas_device_lock, flags);
+ sas_device = _scsih_sas_device_find_by_handle(ioc,
+ sas_device_priv_data->sas_target->handle);
+ if (sas_device)
+ handle = sas_device->volume_handle;
+ spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
+ } else
+ handle = sas_device_priv_data->sas_target->handle;
+
+ if (!handle) {
+ scmd->result = DID_RESET << 16;
+ r = FAILED;
+ goto out;
+ }
+
+ r = mpt2sas_scsih_issue_tm(ioc, handle, scmd->device->channel,
+ scmd->device->id, scmd->device->lun,
+ MPI2_SCSITASKMGMT_TASKTYPE_LOGICAL_UNIT_RESET, 0, 30, TM_MUTEX_ON);
+
+ out:
+ sdev_printk(KERN_INFO, scmd->device, "device reset: %s scmd(%p)\n",
+ ((r == SUCCESS) ? "SUCCESS" : "FAILED"), scmd);
+ return r;
+}
+
+/**
+ * _scsih_target_reset - eh threads main target reset routine
+ * @scmd: pointer to scsi command object
+ *
+ * Returns SUCCESS if command aborted else FAILED
+ */
+static int
+_scsih_target_reset(struct scsi_cmnd *scmd)
+{
+ struct MPT2SAS_ADAPTER *ioc = shost_priv(scmd->device->host);
+ struct MPT2SAS_DEVICE *sas_device_priv_data;
+ struct _sas_device *sas_device;
+ unsigned long flags;
+ u16 handle;
+ int r;
+ struct scsi_target *starget = scmd->device->sdev_target;
+
+ starget_printk(KERN_INFO, starget, "attempting target reset! "
+ "scmd(%p)\n", scmd);
+ _scsih_tm_display_info(ioc, scmd);
+
+ sas_device_priv_data = scmd->device->hostdata;
+ if (!sas_device_priv_data || !sas_device_priv_data->sas_target) {
+ starget_printk(KERN_INFO, starget, "target been deleted! "
+ "scmd(%p)\n", scmd);
+ scmd->result = DID_NO_CONNECT << 16;
+ scmd->scsi_done(scmd);
+ r = SUCCESS;
+ goto out;
+ }
+
+ /* for hidden raid components obtain the volume_handle */
+ handle = 0;
+ if (sas_device_priv_data->sas_target->flags &
+ MPT_TARGET_FLAGS_RAID_COMPONENT) {
+ spin_lock_irqsave(&ioc->sas_device_lock, flags);
+ sas_device = _scsih_sas_device_find_by_handle(ioc,
+ sas_device_priv_data->sas_target->handle);
+ if (sas_device)
+ handle = sas_device->volume_handle;
+ spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
+ } else
+ handle = sas_device_priv_data->sas_target->handle;
+
+ if (!handle) {
+ scmd->result = DID_RESET << 16;
+ r = FAILED;
+ goto out;
+ }
+
+ r = mpt2sas_scsih_issue_tm(ioc, handle, scmd->device->channel,
+ scmd->device->id, 0, MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET, 0,
+ 30, TM_MUTEX_ON);
+
+ out:
+ starget_printk(KERN_INFO, starget, "target reset: %s scmd(%p)\n",
+ ((r == SUCCESS) ? "SUCCESS" : "FAILED"), scmd);
+ return r;
+}
+
+/**
+ * _scsih_host_reset - eh threads main host reset routine
+ * @scmd: pointer to scsi command object
+ *
+ * Returns SUCCESS if command aborted else FAILED
+ */
+static int
+_scsih_host_reset(struct scsi_cmnd *scmd)
+{
+ struct MPT2SAS_ADAPTER *ioc = shost_priv(scmd->device->host);
+ int r, retval;
+
+ printk(MPT2SAS_INFO_FMT "attempting host reset! scmd(%p)\n",
+ ioc->name, scmd);
+ scsi_print_command(scmd);
+
+ if (ioc->is_driver_loading) {
+ printk(MPT2SAS_INFO_FMT "Blocking the host reset\n",
+ ioc->name);
+ r = FAILED;
+ goto out;
+ }
+
+ retval = mpt2sas_base_hard_reset_handler(ioc, CAN_SLEEP,
+ FORCE_BIG_HAMMER);
+ r = (retval < 0) ? FAILED : SUCCESS;
+
+ out:
+ printk(MPT2SAS_INFO_FMT "host reset: %s scmd(%p)\n",
+ ioc->name, ((r == SUCCESS) ? "SUCCESS" : "FAILED"), scmd);
+
+ return r;
+}
+
+/**
+ * _scsih_fw_event_add - insert and queue up fw_event
+ * @ioc: per adapter object
+ * @fw_event: object describing the event
+ * Context: This function will acquire ioc->fw_event_lock.
+ *
+ * This adds the firmware event object into link list, then queues it up to
+ * be processed from user context.
+ *
+ * Return nothing.
+ */
+static void
+_scsih_fw_event_add(struct MPT2SAS_ADAPTER *ioc, struct fw_event_work *fw_event)
+{
+ unsigned long flags;
+
+ if (ioc->firmware_event_thread == NULL)
+ return;
+
+ spin_lock_irqsave(&ioc->fw_event_lock, flags);
+ list_add_tail(&fw_event->list, &ioc->fw_event_list);
+ INIT_DELAYED_WORK(&fw_event->delayed_work, _firmware_event_work);
+ queue_delayed_work(ioc->firmware_event_thread,
+ &fw_event->delayed_work, 0);
+ spin_unlock_irqrestore(&ioc->fw_event_lock, flags);
+}
+
+/**
+ * _scsih_fw_event_free - delete fw_event
+ * @ioc: per adapter object
+ * @fw_event: object describing the event
+ * Context: This function will acquire ioc->fw_event_lock.
+ *
+ * This removes firmware event object from link list, frees associated memory.
+ *
+ * Return nothing.
+ */
+static void
+_scsih_fw_event_free(struct MPT2SAS_ADAPTER *ioc, struct fw_event_work
+ *fw_event)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&ioc->fw_event_lock, flags);
+ list_del(&fw_event->list);
+ kfree(fw_event);
+ spin_unlock_irqrestore(&ioc->fw_event_lock, flags);
+}
+
+
+/**
+ * _scsih_error_recovery_delete_devices - remove devices not responding
+ * @ioc: per adapter object
+ *
+ * Return nothing.
+ */
+static void
+_scsih_error_recovery_delete_devices(struct MPT2SAS_ADAPTER *ioc)
+{
+ struct fw_event_work *fw_event;
+
+ if (ioc->is_driver_loading)
+ return;
+
+ fw_event = kzalloc(sizeof(struct fw_event_work), GFP_ATOMIC);
+ if (!fw_event)
+ return;
+
+ fw_event->event = MPT2SAS_REMOVE_UNRESPONDING_DEVICES;
+ fw_event->ioc = ioc;
+ _scsih_fw_event_add(ioc, fw_event);
+}
+
+/**
+ * mpt2sas_port_enable_complete - port enable completed (fake event)
+ * @ioc: per adapter object
+ *
+ * Return nothing.
+ */
+void
+mpt2sas_port_enable_complete(struct MPT2SAS_ADAPTER *ioc)
+{
+ struct fw_event_work *fw_event;
+
+ fw_event = kzalloc(sizeof(struct fw_event_work), GFP_ATOMIC);
+ if (!fw_event)
+ return;
+ fw_event->event = MPT2SAS_PORT_ENABLE_COMPLETE;
+ fw_event->ioc = ioc;
+ _scsih_fw_event_add(ioc, fw_event);
+}
+
+/**
+ * _scsih_fw_event_cleanup_queue - cleanup event queue
+ * @ioc: per adapter object
+ *
+ * Walk the firmware event queue, either killing timers, or waiting
+ * for outstanding events to complete
+ *
+ * Return nothing.
+ */
+static void
+_scsih_fw_event_cleanup_queue(struct MPT2SAS_ADAPTER *ioc)
+{
+ struct fw_event_work *fw_event, *next;
+
+ if (list_empty(&ioc->fw_event_list) ||
+ !ioc->firmware_event_thread || in_interrupt())
+ return;
+
+ list_for_each_entry_safe(fw_event, next, &ioc->fw_event_list, list) {
+ if (cancel_delayed_work_sync(&fw_event->delayed_work)) {
+ _scsih_fw_event_free(ioc, fw_event);
+ continue;
+ }
+ }
+}
+
+/**
+ * _scsih_ublock_io_all_device - unblock every device
+ * @ioc: per adapter object
+ *
+ * change the device state from block to running
+ */
+static void
+_scsih_ublock_io_all_device(struct MPT2SAS_ADAPTER *ioc)
+{
+ struct MPT2SAS_DEVICE *sas_device_priv_data;
+ struct scsi_device *sdev;
+
+ shost_for_each_device(sdev, ioc->shost) {
+ sas_device_priv_data = sdev->hostdata;
+ if (!sas_device_priv_data)
+ continue;
+ if (!sas_device_priv_data->block)
+ continue;
+ sas_device_priv_data->block = 0;
+ dewtprintk(ioc, sdev_printk(KERN_INFO, sdev, "device_running, "
+ "handle(0x%04x)\n",
+ sas_device_priv_data->sas_target->handle));
+ scsi_internal_device_unblock(sdev, SDEV_RUNNING);
+ }
+}
+/**
+ * _scsih_ublock_io_device - set the device state to SDEV_RUNNING
+ * @ioc: per adapter object
+ * @handle: device handle
+ *
+ * During device pull we need to appropiately set the sdev state.
+ */
+static void
+_scsih_ublock_io_device(struct MPT2SAS_ADAPTER *ioc, u64 sas_address)
+{
+ struct MPT2SAS_DEVICE *sas_device_priv_data;
+ struct scsi_device *sdev;
+
+ shost_for_each_device(sdev, ioc->shost) {
+ sas_device_priv_data = sdev->hostdata;
+ if (!sas_device_priv_data)
+ continue;
+ if (!sas_device_priv_data->block)
+ continue;
+ if (sas_device_priv_data->sas_target->sas_address ==
+ sas_address) {
+ dewtprintk(ioc, sdev_printk(KERN_INFO, sdev,
+ MPT2SAS_INFO_FMT "SDEV_RUNNING: "
+ "sas address(0x%016llx)\n", ioc->name,
+ (unsigned long long)sas_address));
+ sas_device_priv_data->block = 0;
+ scsi_internal_device_unblock(sdev, SDEV_RUNNING);
+ }
+ }
+}
+
+/**
+ * _scsih_block_io_all_device - set the device state to SDEV_BLOCK
+ * @ioc: per adapter object
+ * @handle: device handle
+ *
+ * During device pull we need to appropiately set the sdev state.
+ */
+static void
+_scsih_block_io_all_device(struct MPT2SAS_ADAPTER *ioc)
+{
+ struct MPT2SAS_DEVICE *sas_device_priv_data;
+ struct scsi_device *sdev;
+
+ shost_for_each_device(sdev, ioc->shost) {
+ sas_device_priv_data = sdev->hostdata;
+ if (!sas_device_priv_data)
+ continue;
+ if (sas_device_priv_data->block)
+ continue;
+ sas_device_priv_data->block = 1;
+ dewtprintk(ioc, sdev_printk(KERN_INFO, sdev, "device_blocked, "
+ "handle(0x%04x)\n",
+ sas_device_priv_data->sas_target->handle));
+ scsi_internal_device_block(sdev);
+ }
+}
+
+
+/**
+ * _scsih_block_io_device - set the device state to SDEV_BLOCK
+ * @ioc: per adapter object
+ * @handle: device handle
+ *
+ * During device pull we need to appropiately set the sdev state.
+ */
+static void
+_scsih_block_io_device(struct MPT2SAS_ADAPTER *ioc, u16 handle)
+{
+ struct MPT2SAS_DEVICE *sas_device_priv_data;
+ struct scsi_device *sdev;
+
+ shost_for_each_device(sdev, ioc->shost) {
+ sas_device_priv_data = sdev->hostdata;
+ if (!sas_device_priv_data)
+ continue;
+ if (sas_device_priv_data->block)
+ continue;
+ if (sas_device_priv_data->sas_target->handle == handle) {
+ dewtprintk(ioc, sdev_printk(KERN_INFO, sdev,
+ MPT2SAS_INFO_FMT "SDEV_BLOCK: "
+ "handle(0x%04x)\n", ioc->name, handle));
+ sas_device_priv_data->block = 1;
+ scsi_internal_device_block(sdev);
+ }
+ }
+}
+
+/**
+ * _scsih_block_io_to_children_attached_to_ex
+ * @ioc: per adapter object
+ * @sas_expander: the sas_device object
+ *
+ * This routine set sdev state to SDEV_BLOCK for all devices
+ * attached to this expander. This function called when expander is
+ * pulled.
+ */
+static void
+_scsih_block_io_to_children_attached_to_ex(struct MPT2SAS_ADAPTER *ioc,
+ struct _sas_node *sas_expander)
+{
+ struct _sas_port *mpt2sas_port;
+ struct _sas_device *sas_device;
+ struct _sas_node *expander_sibling;
+ unsigned long flags;
+
+ if (!sas_expander)
+ return;
+
+ list_for_each_entry(mpt2sas_port,
+ &sas_expander->sas_port_list, port_list) {
+ if (mpt2sas_port->remote_identify.device_type ==
+ SAS_END_DEVICE) {
+ spin_lock_irqsave(&ioc->sas_device_lock, flags);
+ sas_device =
+ mpt2sas_scsih_sas_device_find_by_sas_address(ioc,
+ mpt2sas_port->remote_identify.sas_address);
+ if (sas_device)
+ set_bit(sas_device->handle,
+ ioc->blocking_handles);
+ spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
+ }
+ }
+
+ list_for_each_entry(mpt2sas_port,
+ &sas_expander->sas_port_list, port_list) {
+
+ if (mpt2sas_port->remote_identify.device_type ==
+ SAS_EDGE_EXPANDER_DEVICE ||
+ mpt2sas_port->remote_identify.device_type ==
+ SAS_FANOUT_EXPANDER_DEVICE) {
+ expander_sibling =
+ mpt2sas_scsih_expander_find_by_sas_address(
+ ioc, mpt2sas_port->remote_identify.sas_address);
+ _scsih_block_io_to_children_attached_to_ex(ioc,
+ expander_sibling);
+ }
+ }
+}
+
+/**
+ * _scsih_block_io_to_children_attached_directly
+ * @ioc: per adapter object
+ * @event_data: topology change event data
+ *
+ * This routine set sdev state to SDEV_BLOCK for all devices
+ * direct attached during device pull.
+ */
+static void
+_scsih_block_io_to_children_attached_directly(struct MPT2SAS_ADAPTER *ioc,
+ Mpi2EventDataSasTopologyChangeList_t *event_data)
+{
+ int i;
+ u16 handle;
+ u16 reason_code;
+ u8 phy_number;
+
+ for (i = 0; i < event_data->NumEntries; i++) {
+ handle = le16_to_cpu(event_data->PHY[i].AttachedDevHandle);
+ if (!handle)
+ continue;
+ phy_number = event_data->StartPhyNum + i;
+ reason_code = event_data->PHY[i].PhyStatus &
+ MPI2_EVENT_SAS_TOPO_RC_MASK;
+ if (reason_code == MPI2_EVENT_SAS_TOPO_RC_DELAY_NOT_RESPONDING)
+ _scsih_block_io_device(ioc, handle);
+ }
+}
+
+/**
+ * _scsih_tm_tr_send - send task management request
+ * @ioc: per adapter object
+ * @handle: device handle
+ * Context: interrupt time.
+ *
+ * This code is to initiate the device removal handshake protocol
+ * with controller firmware. This function will issue target reset
+ * using high priority request queue. It will send a sas iounit
+ * control request (MPI2_SAS_OP_REMOVE_DEVICE) from this completion.
+ *
+ * This is designed to send muliple task management request at the same
+ * time to the fifo. If the fifo is full, we will append the request,
+ * and process it in a future completion.
+ */
+static void
+_scsih_tm_tr_send(struct MPT2SAS_ADAPTER *ioc, u16 handle)
+{
+ Mpi2SCSITaskManagementRequest_t *mpi_request;
+ u16 smid;
+ struct _sas_device *sas_device;
+ struct MPT2SAS_TARGET *sas_target_priv_data = NULL;
+ u64 sas_address = 0;
+ unsigned long flags;
+ struct _tr_list *delayed_tr;
+ u32 ioc_state;
+
+ if (ioc->remove_host) {
+ dewtprintk(ioc, printk(MPT2SAS_INFO_FMT "%s: host has been "
+ "removed: handle(0x%04x)\n", __func__, ioc->name, handle));
+ return;
+ } else if (ioc->pci_error_recovery) {
+ dewtprintk(ioc, printk(MPT2SAS_INFO_FMT "%s: host in pci "
+ "error recovery: handle(0x%04x)\n", __func__, ioc->name,
+ handle));
+ return;
+ }
+ ioc_state = mpt2sas_base_get_iocstate(ioc, 1);
+ if (ioc_state != MPI2_IOC_STATE_OPERATIONAL) {
+ dewtprintk(ioc, printk(MPT2SAS_INFO_FMT "%s: host is not "
+ "operational: handle(0x%04x)\n", __func__, ioc->name,
+ handle));
+ return;
+ }
+
+ /* if PD, then return */
+ if (test_bit(handle, ioc->pd_handles))
+ return;
+
+ spin_lock_irqsave(&ioc->sas_device_lock, flags);
+ sas_device = _scsih_sas_device_find_by_handle(ioc, handle);
+ if (sas_device && sas_device->starget &&
+ sas_device->starget->hostdata) {
+ sas_target_priv_data = sas_device->starget->hostdata;
+ sas_target_priv_data->deleted = 1;
+ sas_address = sas_device->sas_address;
+ }
+ spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
+
+ if (sas_target_priv_data) {
+ dewtprintk(ioc, printk(MPT2SAS_INFO_FMT "setting delete flag: "
+ "handle(0x%04x), sas_addr(0x%016llx)\n", ioc->name, handle,
+ (unsigned long long)sas_address));
+ _scsih_ublock_io_device(ioc, sas_address);
+ sas_target_priv_data->handle = MPT2SAS_INVALID_DEVICE_HANDLE;
+ }
+
+ smid = mpt2sas_base_get_smid_hpr(ioc, ioc->tm_tr_cb_idx);
+ if (!smid) {
+ delayed_tr = kzalloc(sizeof(*delayed_tr), GFP_ATOMIC);
+ if (!delayed_tr)
+ return;
+ INIT_LIST_HEAD(&delayed_tr->list);
+ delayed_tr->handle = handle;
+ list_add_tail(&delayed_tr->list, &ioc->delayed_tr_list);
+ dewtprintk(ioc, printk(MPT2SAS_INFO_FMT
+ "DELAYED:tr:handle(0x%04x), (open)\n",
+ ioc->name, handle));
+ return;
+ }
+
+ dewtprintk(ioc, printk(MPT2SAS_INFO_FMT "tr_send:handle(0x%04x), "
+ "(open), smid(%d), cb(%d)\n", ioc->name, handle, smid,
+ ioc->tm_tr_cb_idx));
+ mpi_request = mpt2sas_base_get_msg_frame(ioc, smid);
+ memset(mpi_request, 0, sizeof(Mpi2SCSITaskManagementRequest_t));
+ mpi_request->Function = MPI2_FUNCTION_SCSI_TASK_MGMT;
+ mpi_request->DevHandle = cpu_to_le16(handle);
+ mpi_request->TaskType = MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET;
+ mpt2sas_base_put_smid_hi_priority(ioc, smid);
+}
+
+
+
+/**
+ * _scsih_sas_control_complete - completion routine
+ * @ioc: per adapter object
+ * @smid: system request message index
+ * @msix_index: MSIX table index supplied by the OS
+ * @reply: reply message frame(lower 32bit addr)
+ * Context: interrupt time.
+ *
+ * This is the sas iounit control completion routine.
+ * This code is part of the code to initiate the device removal
+ * handshake protocol with controller firmware.
+ *
+ * Return 1 meaning mf should be freed from _base_interrupt
+ * 0 means the mf is freed from this function.
+ */
+static u8
+_scsih_sas_control_complete(struct MPT2SAS_ADAPTER *ioc, u16 smid,
+ u8 msix_index, u32 reply)
+{
+ Mpi2SasIoUnitControlReply_t *mpi_reply =
+ mpt2sas_base_get_reply_virt_addr(ioc, reply);
+ if (likely(mpi_reply)) {
+ dewtprintk(ioc, printk(MPT2SAS_INFO_FMT
+ "sc_complete:handle(0x%04x), (open) "
+ "smid(%d), ioc_status(0x%04x), loginfo(0x%08x)\n",
+ ioc->name, le16_to_cpu(mpi_reply->DevHandle), smid,
+ le16_to_cpu(mpi_reply->IOCStatus),
+ le32_to_cpu(mpi_reply->IOCLogInfo)));
+ } else {
+ printk(MPT2SAS_ERR_FMT "mpi_reply not valid at %s:%d/%s()!\n",
+ ioc->name, __FILE__, __LINE__, __func__);
+ }
+ return 1;
+}
+
+/**
+ * _scsih_tm_tr_volume_send - send target reset request for volumes
+ * @ioc: per adapter object
+ * @handle: device handle
+ * Context: interrupt time.
+ *
+ * This is designed to send muliple task management request at the same
+ * time to the fifo. If the fifo is full, we will append the request,
+ * and process it in a future completion.
+ */
+static void
+_scsih_tm_tr_volume_send(struct MPT2SAS_ADAPTER *ioc, u16 handle)
+{
+ Mpi2SCSITaskManagementRequest_t *mpi_request;
+ u16 smid;
+ struct _tr_list *delayed_tr;
+
+ if (ioc->shost_recovery || ioc->remove_host ||
+ ioc->pci_error_recovery) {
+ dewtprintk(ioc, printk(MPT2SAS_INFO_FMT "%s: host reset in "
+ "progress!\n", __func__, ioc->name));
+ return;
+ }
+
+ smid = mpt2sas_base_get_smid_hpr(ioc, ioc->tm_tr_volume_cb_idx);
+ if (!smid) {
+ delayed_tr = kzalloc(sizeof(*delayed_tr), GFP_ATOMIC);
+ if (!delayed_tr)
+ return;
+ INIT_LIST_HEAD(&delayed_tr->list);
+ delayed_tr->handle = handle;
+ list_add_tail(&delayed_tr->list, &ioc->delayed_tr_volume_list);
+ dewtprintk(ioc, printk(MPT2SAS_INFO_FMT
+ "DELAYED:tr:handle(0x%04x), (open)\n",
+ ioc->name, handle));
+ return;
+ }
+
+ dewtprintk(ioc, printk(MPT2SAS_INFO_FMT "tr_send:handle(0x%04x), "
+ "(open), smid(%d), cb(%d)\n", ioc->name, handle, smid,
+ ioc->tm_tr_volume_cb_idx));
+ mpi_request = mpt2sas_base_get_msg_frame(ioc, smid);
+ memset(mpi_request, 0, sizeof(Mpi2SCSITaskManagementRequest_t));
+ mpi_request->Function = MPI2_FUNCTION_SCSI_TASK_MGMT;
+ mpi_request->DevHandle = cpu_to_le16(handle);
+ mpi_request->TaskType = MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET;
+ mpt2sas_base_put_smid_hi_priority(ioc, smid);
+}
+
+/**
+ * _scsih_tm_volume_tr_complete - target reset completion
+ * @ioc: per adapter object
+ * @smid: system request message index
+ * @msix_index: MSIX table index supplied by the OS
+ * @reply: reply message frame(lower 32bit addr)
+ * Context: interrupt time.
+ *
+ * Return 1 meaning mf should be freed from _base_interrupt
+ * 0 means the mf is freed from this function.
+ */
+static u8
+_scsih_tm_volume_tr_complete(struct MPT2SAS_ADAPTER *ioc, u16 smid,
+ u8 msix_index, u32 reply)
+{
+ u16 handle;
+ Mpi2SCSITaskManagementRequest_t *mpi_request_tm;
+ Mpi2SCSITaskManagementReply_t *mpi_reply =
+ mpt2sas_base_get_reply_virt_addr(ioc, reply);
+
+ if (ioc->shost_recovery || ioc->remove_host ||
+ ioc->pci_error_recovery) {
+ dewtprintk(ioc, printk(MPT2SAS_INFO_FMT "%s: host reset in "
+ "progress!\n", __func__, ioc->name));
+ return 1;
+ }
+ if (unlikely(!mpi_reply)) {
+ printk(MPT2SAS_ERR_FMT "mpi_reply not valid at %s:%d/%s()!\n",
+ ioc->name, __FILE__, __LINE__, __func__);
+ return 1;
+ }
+ mpi_request_tm = mpt2sas_base_get_msg_frame(ioc, smid);
+ handle = le16_to_cpu(mpi_request_tm->DevHandle);
+ if (handle != le16_to_cpu(mpi_reply->DevHandle)) {
+ dewtprintk(ioc, printk("spurious interrupt: "
+ "handle(0x%04x:0x%04x), smid(%d)!!!\n", handle,
+ le16_to_cpu(mpi_reply->DevHandle), smid));
+ return 0;
+ }
+
+ dewtprintk(ioc, printk(MPT2SAS_INFO_FMT
+ "tr_complete:handle(0x%04x), (open) smid(%d), ioc_status(0x%04x), "
+ "loginfo(0x%08x), completed(%d)\n", ioc->name,
+ handle, smid, le16_to_cpu(mpi_reply->IOCStatus),
+ le32_to_cpu(mpi_reply->IOCLogInfo),
+ le32_to_cpu(mpi_reply->TerminationCount)));
+
+ return _scsih_check_for_pending_tm(ioc, smid);
+}
+
+/**
+ * _scsih_tm_tr_complete -
+ * @ioc: per adapter object
+ * @smid: system request message index
+ * @msix_index: MSIX table index supplied by the OS
+ * @reply: reply message frame(lower 32bit addr)
+ * Context: interrupt time.
+ *
+ * This is the target reset completion routine.
+ * This code is part of the code to initiate the device removal
+ * handshake protocol with controller firmware.
+ * It will send a sas iounit control request (MPI2_SAS_OP_REMOVE_DEVICE)
+ *
+ * Return 1 meaning mf should be freed from _base_interrupt
+ * 0 means the mf is freed from this function.
+ */
+static u8
+_scsih_tm_tr_complete(struct MPT2SAS_ADAPTER *ioc, u16 smid, u8 msix_index,
+ u32 reply)
+{
+ u16 handle;
+ Mpi2SCSITaskManagementRequest_t *mpi_request_tm;
+ Mpi2SCSITaskManagementReply_t *mpi_reply =
+ mpt2sas_base_get_reply_virt_addr(ioc, reply);
+ Mpi2SasIoUnitControlRequest_t *mpi_request;
+ u16 smid_sas_ctrl;
+ u32 ioc_state;
+
+ if (ioc->remove_host) {
+ dewtprintk(ioc, printk(MPT2SAS_INFO_FMT "%s: host has been "
+ "removed\n", __func__, ioc->name));
+ return 1;
+ } else if (ioc->pci_error_recovery) {
+ dewtprintk(ioc, printk(MPT2SAS_INFO_FMT "%s: host in pci "
+ "error recovery\n", __func__, ioc->name));
+ return 1;
+ }
+ ioc_state = mpt2sas_base_get_iocstate(ioc, 1);
+ if (ioc_state != MPI2_IOC_STATE_OPERATIONAL) {
+ dewtprintk(ioc, printk(MPT2SAS_INFO_FMT "%s: host is not "
+ "operational\n", __func__, ioc->name));
+ return 1;
+ }
+ if (unlikely(!mpi_reply)) {
+ printk(MPT2SAS_ERR_FMT "mpi_reply not valid at %s:%d/%s()!\n",
+ ioc->name, __FILE__, __LINE__, __func__);
+ return 1;
+ }
+ mpi_request_tm = mpt2sas_base_get_msg_frame(ioc, smid);
+ handle = le16_to_cpu(mpi_request_tm->DevHandle);
+ if (handle != le16_to_cpu(mpi_reply->DevHandle)) {
+ dewtprintk(ioc, printk(MPT2SAS_INFO_FMT "spurious interrupt: "
+ "handle(0x%04x:0x%04x), smid(%d)!!!\n", ioc->name, handle,
+ le16_to_cpu(mpi_reply->DevHandle), smid));
+ return 0;
+ }
+
+ dewtprintk(ioc, printk(MPT2SAS_INFO_FMT
+ "tr_complete:handle(0x%04x), (open) smid(%d), ioc_status(0x%04x), "
+ "loginfo(0x%08x), completed(%d)\n", ioc->name,
+ handle, smid, le16_to_cpu(mpi_reply->IOCStatus),
+ le32_to_cpu(mpi_reply->IOCLogInfo),
+ le32_to_cpu(mpi_reply->TerminationCount)));
+
+ smid_sas_ctrl = mpt2sas_base_get_smid(ioc, ioc->tm_sas_control_cb_idx);
+ if (!smid_sas_ctrl) {
+ printk(MPT2SAS_ERR_FMT "%s: failed obtaining a smid\n",
+ ioc->name, __func__);
+ return 1;
+ }
+
+ dewtprintk(ioc, printk(MPT2SAS_INFO_FMT "sc_send:handle(0x%04x), "
+ "(open), smid(%d), cb(%d)\n", ioc->name, handle, smid_sas_ctrl,
+ ioc->tm_sas_control_cb_idx));
+ mpi_request = mpt2sas_base_get_msg_frame(ioc, smid_sas_ctrl);
+ memset(mpi_request, 0, sizeof(Mpi2SasIoUnitControlRequest_t));
+ mpi_request->Function = MPI2_FUNCTION_SAS_IO_UNIT_CONTROL;
+ mpi_request->Operation = MPI2_SAS_OP_REMOVE_DEVICE;
+ mpi_request->DevHandle = mpi_request_tm->DevHandle;
+ mpt2sas_base_put_smid_default(ioc, smid_sas_ctrl);
+
+ return _scsih_check_for_pending_tm(ioc, smid);
+}
+
+/**
+ * _scsih_check_for_pending_tm - check for pending task management
+ * @ioc: per adapter object
+ * @smid: system request message index
+ *
+ * This will check delayed target reset list, and feed the
+ * next reqeust.
+ *
+ * Return 1 meaning mf should be freed from _base_interrupt
+ * 0 means the mf is freed from this function.
+ */
+static u8
+_scsih_check_for_pending_tm(struct MPT2SAS_ADAPTER *ioc, u16 smid)
+{
+ struct _tr_list *delayed_tr;
+
+ if (!list_empty(&ioc->delayed_tr_volume_list)) {
+ delayed_tr = list_entry(ioc->delayed_tr_volume_list.next,
+ struct _tr_list, list);
+ mpt2sas_base_free_smid(ioc, smid);
+ _scsih_tm_tr_volume_send(ioc, delayed_tr->handle);
+ list_del(&delayed_tr->list);
+ kfree(delayed_tr);
+ return 0;
+ }
+
+ if (!list_empty(&ioc->delayed_tr_list)) {
+ delayed_tr = list_entry(ioc->delayed_tr_list.next,
+ struct _tr_list, list);
+ mpt2sas_base_free_smid(ioc, smid);
+ _scsih_tm_tr_send(ioc, delayed_tr->handle);
+ list_del(&delayed_tr->list);
+ kfree(delayed_tr);
+ return 0;
+ }
+
+ return 1;
+}
+
+/**
+ * _scsih_check_topo_delete_events - sanity check on topo events
+ * @ioc: per adapter object
+ * @event_data: the event data payload
+ *
+ * This routine added to better handle cable breaker.
+ *
+ * This handles the case where driver receives multiple expander
+ * add and delete events in a single shot. When there is a delete event
+ * the routine will void any pending add events waiting in the event queue.
+ *
+ * Return nothing.
+ */
+static void
+_scsih_check_topo_delete_events(struct MPT2SAS_ADAPTER *ioc,
+ Mpi2EventDataSasTopologyChangeList_t *event_data)
+{
+ struct fw_event_work *fw_event;
+ Mpi2EventDataSasTopologyChangeList_t *local_event_data;
+ u16 expander_handle;
+ struct _sas_node *sas_expander;
+ unsigned long flags;
+ int i, reason_code;
+ u16 handle;
+
+ for (i = 0 ; i < event_data->NumEntries; i++) {
+ handle = le16_to_cpu(event_data->PHY[i].AttachedDevHandle);
+ if (!handle)
+ continue;
+ reason_code = event_data->PHY[i].PhyStatus &
+ MPI2_EVENT_SAS_TOPO_RC_MASK;
+ if (reason_code == MPI2_EVENT_SAS_TOPO_RC_TARG_NOT_RESPONDING)
+ _scsih_tm_tr_send(ioc, handle);
+ }
+
+ expander_handle = le16_to_cpu(event_data->ExpanderDevHandle);
+ if (expander_handle < ioc->sas_hba.num_phys) {
+ _scsih_block_io_to_children_attached_directly(ioc, event_data);
+ return;
+ }
+ if (event_data->ExpStatus ==
+ MPI2_EVENT_SAS_TOPO_ES_DELAY_NOT_RESPONDING) {
+ /* put expander attached devices into blocking state */
+ spin_lock_irqsave(&ioc->sas_node_lock, flags);
+ sas_expander = mpt2sas_scsih_expander_find_by_handle(ioc,
+ expander_handle);
+ _scsih_block_io_to_children_attached_to_ex(ioc, sas_expander);
+ spin_unlock_irqrestore(&ioc->sas_node_lock, flags);
+ do {
+ handle = find_first_bit(ioc->blocking_handles,
+ ioc->facts.MaxDevHandle);
+ if (handle < ioc->facts.MaxDevHandle)
+ _scsih_block_io_device(ioc, handle);
+ } while (test_and_clear_bit(handle, ioc->blocking_handles));
+ } else if (event_data->ExpStatus == MPI2_EVENT_SAS_TOPO_ES_RESPONDING)
+ _scsih_block_io_to_children_attached_directly(ioc, event_data);
+
+ if (event_data->ExpStatus != MPI2_EVENT_SAS_TOPO_ES_NOT_RESPONDING)
+ return;
+
+ /* mark ignore flag for pending events */
+ spin_lock_irqsave(&ioc->fw_event_lock, flags);
+ list_for_each_entry(fw_event, &ioc->fw_event_list, list) {
+ if (fw_event->event != MPI2_EVENT_SAS_TOPOLOGY_CHANGE_LIST ||
+ fw_event->ignore)
+ continue;
+ local_event_data = (Mpi2EventDataSasTopologyChangeList_t *)
+ fw_event->event_data;
+ if (local_event_data->ExpStatus ==
+ MPI2_EVENT_SAS_TOPO_ES_ADDED ||
+ local_event_data->ExpStatus ==
+ MPI2_EVENT_SAS_TOPO_ES_RESPONDING) {
+ if (le16_to_cpu(local_event_data->ExpanderDevHandle) ==
+ expander_handle) {
+ dewtprintk(ioc, printk(MPT2SAS_INFO_FMT
+ "setting ignoring flag\n", ioc->name));
+ fw_event->ignore = 1;
+ }
+ }
+ }
+ spin_unlock_irqrestore(&ioc->fw_event_lock, flags);
+}
+
+/**
+ * _scsih_set_volume_delete_flag - setting volume delete flag
+ * @ioc: per adapter object
+ * @handle: device handle
+ *
+ * This
+ * Return nothing.
+ */
+static void
+_scsih_set_volume_delete_flag(struct MPT2SAS_ADAPTER *ioc, u16 handle)
+{
+ struct _raid_device *raid_device;
+ struct MPT2SAS_TARGET *sas_target_priv_data;
+ unsigned long flags;
+
+ spin_lock_irqsave(&ioc->raid_device_lock, flags);
+ raid_device = _scsih_raid_device_find_by_handle(ioc, handle);
+ if (raid_device && raid_device->starget &&
+ raid_device->starget->hostdata) {
+ sas_target_priv_data =
+ raid_device->starget->hostdata;
+ sas_target_priv_data->deleted = 1;
+ dewtprintk(ioc, printk(MPT2SAS_INFO_FMT
+ "setting delete flag: handle(0x%04x), "
+ "wwid(0x%016llx)\n", ioc->name, handle,
+ (unsigned long long) raid_device->wwid));
+ }
+ spin_unlock_irqrestore(&ioc->raid_device_lock, flags);
+}
+
+/**
+ * _scsih_set_volume_handle_for_tr - set handle for target reset to volume
+ * @handle: input handle
+ * @a: handle for volume a
+ * @b: handle for volume b
+ *
+ * IR firmware only supports two raid volumes. The purpose of this
+ * routine is to set the volume handle in either a or b. When the given
+ * input handle is non-zero, or when a and b have not been set before.
+ */
+static void
+_scsih_set_volume_handle_for_tr(u16 handle, u16 *a, u16 *b)
+{
+ if (!handle || handle == *a || handle == *b)
+ return;
+ if (!*a)
+ *a = handle;
+ else if (!*b)
+ *b = handle;
+}
+
+/**
+ * _scsih_check_ir_config_unhide_events - check for UNHIDE events
+ * @ioc: per adapter object
+ * @event_data: the event data payload
+ * Context: interrupt time.
+ *
+ * This routine will send target reset to volume, followed by target
+ * resets to the PDs. This is called when a PD has been removed, or
+ * volume has been deleted or removed. When the target reset is sent
+ * to volume, the PD target resets need to be queued to start upon
+ * completion of the volume target reset.
+ *
+ * Return nothing.
+ */
+static void
+_scsih_check_ir_config_unhide_events(struct MPT2SAS_ADAPTER *ioc,
+ Mpi2EventDataIrConfigChangeList_t *event_data)
+{
+ Mpi2EventIrConfigElement_t *element;
+ int i;
+ u16 handle, volume_handle, a, b;
+ struct _tr_list *delayed_tr;
+
+ a = 0;
+ b = 0;
+
+ if (ioc->is_warpdrive)
+ return;
+
+ /* Volume Resets for Deleted or Removed */
+ element = (Mpi2EventIrConfigElement_t *)&event_data->ConfigElement[0];
+ for (i = 0; i < event_data->NumElements; i++, element++) {
+ if (element->ReasonCode ==
+ MPI2_EVENT_IR_CHANGE_RC_VOLUME_DELETED ||
+ element->ReasonCode ==
+ MPI2_EVENT_IR_CHANGE_RC_REMOVED) {
+ volume_handle = le16_to_cpu(element->VolDevHandle);
+ _scsih_set_volume_delete_flag(ioc, volume_handle);
+ _scsih_set_volume_handle_for_tr(volume_handle, &a, &b);
+ }
+ }
+
+ /* Volume Resets for UNHIDE events */
+ element = (Mpi2EventIrConfigElement_t *)&event_data->ConfigElement[0];
+ for (i = 0; i < event_data->NumElements; i++, element++) {
+ if (le32_to_cpu(event_data->Flags) &
+ MPI2_EVENT_IR_CHANGE_FLAGS_FOREIGN_CONFIG)
+ continue;
+ if (element->ReasonCode == MPI2_EVENT_IR_CHANGE_RC_UNHIDE) {
+ volume_handle = le16_to_cpu(element->VolDevHandle);
+ _scsih_set_volume_handle_for_tr(volume_handle, &a, &b);
+ }
+ }
+
+ if (a)
+ _scsih_tm_tr_volume_send(ioc, a);
+ if (b)
+ _scsih_tm_tr_volume_send(ioc, b);
+
+ /* PD target resets */
+ element = (Mpi2EventIrConfigElement_t *)&event_data->ConfigElement[0];
+ for (i = 0; i < event_data->NumElements; i++, element++) {
+ if (element->ReasonCode != MPI2_EVENT_IR_CHANGE_RC_UNHIDE)
+ continue;
+ handle = le16_to_cpu(element->PhysDiskDevHandle);
+ volume_handle = le16_to_cpu(element->VolDevHandle);
+ clear_bit(handle, ioc->pd_handles);
+ if (!volume_handle)
+ _scsih_tm_tr_send(ioc, handle);
+ else if (volume_handle == a || volume_handle == b) {
+ delayed_tr = kzalloc(sizeof(*delayed_tr), GFP_ATOMIC);
+ BUG_ON(!delayed_tr);
+ INIT_LIST_HEAD(&delayed_tr->list);
+ delayed_tr->handle = handle;
+ list_add_tail(&delayed_tr->list, &ioc->delayed_tr_list);
+ dewtprintk(ioc, printk(MPT2SAS_INFO_FMT
+ "DELAYED:tr:handle(0x%04x), (open)\n", ioc->name,
+ handle));
+ } else
+ _scsih_tm_tr_send(ioc, handle);
+ }
+}
+
+
+/**
+ * _scsih_check_volume_delete_events - set delete flag for volumes
+ * @ioc: per adapter object
+ * @event_data: the event data payload
+ * Context: interrupt time.
+ *
+ * This will handle the case when the cable connected to entire volume is
+ * pulled. We will take care of setting the deleted flag so normal IO will
+ * not be sent.
+ *
+ * Return nothing.
+ */
+static void
+_scsih_check_volume_delete_events(struct MPT2SAS_ADAPTER *ioc,
+ Mpi2EventDataIrVolume_t *event_data)
+{
+ u32 state;
+
+ if (event_data->ReasonCode != MPI2_EVENT_IR_VOLUME_RC_STATE_CHANGED)
+ return;
+ state = le32_to_cpu(event_data->NewValue);
+ if (state == MPI2_RAID_VOL_STATE_MISSING || state ==
+ MPI2_RAID_VOL_STATE_FAILED)
+ _scsih_set_volume_delete_flag(ioc,
+ le16_to_cpu(event_data->VolDevHandle));
+}
+
+/**
+ * _scsih_temp_threshold_events - display temperature threshold exceeded events
+ * @ioc: per adapter object
+ * @event_data: the temp threshold event data
+ * Context: interrupt time.
+ *
+ * Return nothing.
+ */
+static void
+_scsih_temp_threshold_events(struct MPT2SAS_ADAPTER *ioc,
+ Mpi2EventDataTemperature_t *event_data)
+{
+ if (ioc->temp_sensors_count >= event_data->SensorNum) {
+ printk(MPT2SAS_ERR_FMT "Temperature Threshold flags %s%s%s%s"
+ " exceeded for Sensor: %d !!!\n", ioc->name,
+ ((le16_to_cpu(event_data->Status) & 0x1) == 1) ? "0 " : " ",
+ ((le16_to_cpu(event_data->Status) & 0x2) == 2) ? "1 " : " ",
+ ((le16_to_cpu(event_data->Status) & 0x4) == 4) ? "2 " : " ",
+ ((le16_to_cpu(event_data->Status) & 0x8) == 8) ? "3 " : " ",
+ event_data->SensorNum);
+ printk(MPT2SAS_ERR_FMT "Current Temp In Celsius: %d\n",
+ ioc->name, event_data->CurrentTemperature);
+ }
+}
+
+/**
+ * _scsih_flush_running_cmds - completing outstanding commands.
+ * @ioc: per adapter object
+ *
+ * The flushing out of all pending scmd commands following host reset,
+ * where all IO is dropped to the floor.
+ *
+ * Return nothing.
+ */
+static void
+_scsih_flush_running_cmds(struct MPT2SAS_ADAPTER *ioc)
+{
+ struct scsi_cmnd *scmd;
+ u16 smid;
+ u16 count = 0;
+
+ for (smid = 1; smid <= ioc->scsiio_depth; smid++) {
+ scmd = _scsih_scsi_lookup_get_clear(ioc, smid);
+ if (!scmd)
+ continue;
+ count++;
+ mpt2sas_base_free_smid(ioc, smid);
+ scsi_dma_unmap(scmd);
+ if (ioc->pci_error_recovery)
+ scmd->result = DID_NO_CONNECT << 16;
+ else
+ scmd->result = DID_RESET << 16;
+ scmd->scsi_done(scmd);
+ }
+ dtmprintk(ioc, printk(MPT2SAS_INFO_FMT "completing %d cmds\n",
+ ioc->name, count));
+}
+
+/**
+ * _scsih_setup_eedp - setup MPI request for EEDP transfer
+ * @scmd: pointer to scsi command object
+ * @mpi_request: pointer to the SCSI_IO reqest message frame
+ *
+ * Supporting protection 1 and 3.
+ *
+ * Returns nothing
+ */
+static void
+_scsih_setup_eedp(struct scsi_cmnd *scmd, Mpi2SCSIIORequest_t *mpi_request)
+{
+ u16 eedp_flags;
+ unsigned char prot_op = scsi_get_prot_op(scmd);
+ unsigned char prot_type = scsi_get_prot_type(scmd);
+
+ if (prot_type == SCSI_PROT_DIF_TYPE0 || prot_op == SCSI_PROT_NORMAL)
+ return;
+
+ if (prot_op == SCSI_PROT_READ_STRIP)
+ eedp_flags = MPI2_SCSIIO_EEDPFLAGS_CHECK_REMOVE_OP;
+ else if (prot_op == SCSI_PROT_WRITE_INSERT)
+ eedp_flags = MPI2_SCSIIO_EEDPFLAGS_INSERT_OP;
+ else
+ return;
+
+ switch (prot_type) {
+ case SCSI_PROT_DIF_TYPE1:
+ case SCSI_PROT_DIF_TYPE2:
+
+ /*
+ * enable ref/guard checking
+ * auto increment ref tag
+ */
+ eedp_flags |= MPI2_SCSIIO_EEDPFLAGS_INC_PRI_REFTAG |
+ MPI2_SCSIIO_EEDPFLAGS_CHECK_REFTAG |
+ MPI2_SCSIIO_EEDPFLAGS_CHECK_GUARD;
+ mpi_request->CDB.EEDP32.PrimaryReferenceTag =
+ cpu_to_be32(scsi_get_lba(scmd));
+ break;
+
+ case SCSI_PROT_DIF_TYPE3:
+
+ /*
+ * enable guard checking
+ */
+ eedp_flags |= MPI2_SCSIIO_EEDPFLAGS_CHECK_GUARD;
+ break;
+ }
+ mpi_request->EEDPBlockSize = cpu_to_le32(scmd->device->sector_size);
+ mpi_request->EEDPFlags = cpu_to_le16(eedp_flags);
+}
+
+/**
+ * _scsih_eedp_error_handling - return sense code for EEDP errors
+ * @scmd: pointer to scsi command object
+ * @ioc_status: ioc status
+ *
+ * Returns nothing
+ */
+static void
+_scsih_eedp_error_handling(struct scsi_cmnd *scmd, u16 ioc_status)
+{
+ u8 ascq;
+
+ switch (ioc_status) {
+ case MPI2_IOCSTATUS_EEDP_GUARD_ERROR:
+ ascq = 0x01;
+ break;
+ case MPI2_IOCSTATUS_EEDP_APP_TAG_ERROR:
+ ascq = 0x02;
+ break;
+ case MPI2_IOCSTATUS_EEDP_REF_TAG_ERROR:
+ ascq = 0x03;
+ break;
+ default:
+ ascq = 0x00;
+ break;
+ }
+
+ scsi_build_sense_buffer(0, scmd->sense_buffer, ILLEGAL_REQUEST, 0x10, ascq);
+ scmd->result = DRIVER_SENSE << 24 | (DID_ABORT << 16) |
+ SAM_STAT_CHECK_CONDITION;
+}
+
+/**
+ * _scsih_scsi_direct_io_get - returns direct io flag
+ * @ioc: per adapter object
+ * @smid: system request message index
+ *
+ * Returns the smid stored scmd pointer.
+ */
+static inline u8
+_scsih_scsi_direct_io_get(struct MPT2SAS_ADAPTER *ioc, u16 smid)
+{
+ return ioc->scsi_lookup[smid - 1].direct_io;
+}
+
+/**
+ * _scsih_scsi_direct_io_set - sets direct io flag
+ * @ioc: per adapter object
+ * @smid: system request message index
+ * @direct_io: Zero or non-zero value to set in the direct_io flag
+ *
+ * Returns Nothing.
+ */
+static inline void
+_scsih_scsi_direct_io_set(struct MPT2SAS_ADAPTER *ioc, u16 smid, u8 direct_io)
+{
+ ioc->scsi_lookup[smid - 1].direct_io = direct_io;
+}
+
+
+/**
+ * _scsih_setup_direct_io - setup MPI request for WARPDRIVE Direct I/O
+ * @ioc: per adapter object
+ * @scmd: pointer to scsi command object
+ * @raid_device: pointer to raid device data structure
+ * @mpi_request: pointer to the SCSI_IO reqest message frame
+ * @smid: system request message index
+ *
+ * Returns nothing
+ */
+static void
+_scsih_setup_direct_io(struct MPT2SAS_ADAPTER *ioc, struct scsi_cmnd *scmd,
+ struct _raid_device *raid_device, Mpi2SCSIIORequest_t *mpi_request,
+ u16 smid)
+{
+ sector_t v_lba, p_lba, stripe_off, column, io_size;
+ u32 stripe_sz, stripe_exp;
+ u8 num_pds, cmd = scmd->cmnd[0];
+
+ if (cmd != READ_10 && cmd != WRITE_10 &&
+ cmd != READ_16 && cmd != WRITE_16)
+ return;
+
+ if (cmd == READ_10 || cmd == WRITE_10)
+ v_lba = get_unaligned_be32(&mpi_request->CDB.CDB32[2]);
+ else
+ v_lba = get_unaligned_be64(&mpi_request->CDB.CDB32[2]);
+
+ io_size = scsi_bufflen(scmd) >> raid_device->block_exponent;
+
+ if (v_lba + io_size - 1 > raid_device->max_lba)
+ return;
+
+ stripe_sz = raid_device->stripe_sz;
+ stripe_exp = raid_device->stripe_exponent;
+ stripe_off = v_lba & (stripe_sz - 1);
+
+ /* Return unless IO falls within a stripe */
+ if (stripe_off + io_size > stripe_sz)
+ return;
+
+ num_pds = raid_device->num_pds;
+ p_lba = v_lba >> stripe_exp;
+ column = sector_div(p_lba, num_pds);
+ p_lba = (p_lba << stripe_exp) + stripe_off;
+
+ mpi_request->DevHandle = cpu_to_le16(raid_device->pd_handle[column]);
+
+ if (cmd == READ_10 || cmd == WRITE_10)
+ put_unaligned_be32(lower_32_bits(p_lba),
+ &mpi_request->CDB.CDB32[2]);
+ else
+ put_unaligned_be64(p_lba, &mpi_request->CDB.CDB32[2]);
+
+ _scsih_scsi_direct_io_set(ioc, smid, 1);
+}
+
+/**
+ * _scsih_qcmd - main scsi request entry point
+ * @scmd: pointer to scsi command object
+ * @done: function pointer to be invoked on completion
+ *
+ * The callback index is set inside `ioc->scsi_io_cb_idx`.
+ *
+ * Returns 0 on success. If there's a failure, return either:
+ * SCSI_MLQUEUE_DEVICE_BUSY if the device queue is full, or
+ * SCSI_MLQUEUE_HOST_BUSY if the entire host queue is full
+ */
+static int
+_scsih_qcmd(struct Scsi_Host *shost, struct scsi_cmnd *scmd)
+{
+ struct MPT2SAS_ADAPTER *ioc = shost_priv(shost);
+ struct MPT2SAS_DEVICE *sas_device_priv_data;
+ struct MPT2SAS_TARGET *sas_target_priv_data;
+ struct _raid_device *raid_device;
+ Mpi2SCSIIORequest_t *mpi_request;
+ u32 mpi_control;
+ u16 smid;
+
+ sas_device_priv_data = scmd->device->hostdata;
+ if (!sas_device_priv_data || !sas_device_priv_data->sas_target) {
+ scmd->result = DID_NO_CONNECT << 16;
+ scmd->scsi_done(scmd);
+ return 0;
+ }
+
+ if (ioc->pci_error_recovery || ioc->remove_host) {
+ scmd->result = DID_NO_CONNECT << 16;
+ scmd->scsi_done(scmd);
+ return 0;
+ }
+
+ sas_target_priv_data = sas_device_priv_data->sas_target;
+ /* invalid device handle */
+ if (sas_target_priv_data->handle == MPT2SAS_INVALID_DEVICE_HANDLE) {
+ scmd->result = DID_NO_CONNECT << 16;
+ scmd->scsi_done(scmd);
+ return 0;
+ }
+
+ /* host recovery or link resets sent via IOCTLs */
+ if (ioc->shost_recovery || ioc->ioc_link_reset_in_progress)
+ return SCSI_MLQUEUE_HOST_BUSY;
+ /* device busy with task management */
+ else if (sas_device_priv_data->block || sas_target_priv_data->tm_busy)
+ return SCSI_MLQUEUE_DEVICE_BUSY;
+ /* device has been deleted */
+ else if (sas_target_priv_data->deleted) {
+ scmd->result = DID_NO_CONNECT << 16;
+ scmd->scsi_done(scmd);
+ return 0;
+ }
+
+ if (scmd->sc_data_direction == DMA_FROM_DEVICE)
+ mpi_control = MPI2_SCSIIO_CONTROL_READ;
+ else if (scmd->sc_data_direction == DMA_TO_DEVICE)
+ mpi_control = MPI2_SCSIIO_CONTROL_WRITE;
+ else
+ mpi_control = MPI2_SCSIIO_CONTROL_NODATATRANSFER;
+
+ /* set tags */
+ mpi_control |= MPI2_SCSIIO_CONTROL_SIMPLEQ;
+
+ /* Make sure Device is not raid volume.
+ * We do not expose raid functionality to upper layer for warpdrive.
+ */
+ if (!ioc->is_warpdrive && !_scsih_is_raid(&scmd->device->sdev_gendev) &&
+ sas_is_tlr_enabled(scmd->device) && scmd->cmd_len != 32)
+ mpi_control |= MPI2_SCSIIO_CONTROL_TLR_ON;
+
+ smid = mpt2sas_base_get_smid_scsiio(ioc, ioc->scsi_io_cb_idx, scmd);
+ if (!smid) {
+ printk(MPT2SAS_ERR_FMT "%s: failed obtaining a smid\n",
+ ioc->name, __func__);
+ goto out;
+ }
+ mpi_request = mpt2sas_base_get_msg_frame(ioc, smid);
+ memset(mpi_request, 0, sizeof(Mpi2SCSIIORequest_t));
+ _scsih_setup_eedp(scmd, mpi_request);
+ if (scmd->cmd_len == 32)
+ mpi_control |= 4 << MPI2_SCSIIO_CONTROL_ADDCDBLEN_SHIFT;
+ mpi_request->Function = MPI2_FUNCTION_SCSI_IO_REQUEST;
+ if (sas_device_priv_data->sas_target->flags &
+ MPT_TARGET_FLAGS_RAID_COMPONENT)
+ mpi_request->Function = MPI2_FUNCTION_RAID_SCSI_IO_PASSTHROUGH;
+ else
+ mpi_request->Function = MPI2_FUNCTION_SCSI_IO_REQUEST;
+ mpi_request->DevHandle =
+ cpu_to_le16(sas_device_priv_data->sas_target->handle);
+ mpi_request->DataLength = cpu_to_le32(scsi_bufflen(scmd));
+ mpi_request->Control = cpu_to_le32(mpi_control);
+ mpi_request->IoFlags = cpu_to_le16(scmd->cmd_len);
+ mpi_request->MsgFlags = MPI2_SCSIIO_MSGFLAGS_SYSTEM_SENSE_ADDR;
+ mpi_request->SenseBufferLength = SCSI_SENSE_BUFFERSIZE;
+ mpi_request->SenseBufferLowAddress =
+ mpt2sas_base_get_sense_buffer_dma(ioc, smid);
+ mpi_request->SGLOffset0 = offsetof(Mpi2SCSIIORequest_t, SGL) / 4;
+ mpi_request->SGLFlags = cpu_to_le16(MPI2_SCSIIO_SGLFLAGS_TYPE_MPI +
+ MPI2_SCSIIO_SGLFLAGS_SYSTEM_ADDR);
+ mpi_request->VF_ID = 0; /* TODO */
+ mpi_request->VP_ID = 0;
+ int_to_scsilun(sas_device_priv_data->lun, (struct scsi_lun *)
+ mpi_request->LUN);
+ memcpy(mpi_request->CDB.CDB32, scmd->cmnd, scmd->cmd_len);
+
+ if (!mpi_request->DataLength) {
+ mpt2sas_base_build_zero_len_sge(ioc, &mpi_request->SGL);
+ } else {
+ if (_scsih_build_scatter_gather(ioc, scmd, smid)) {
+ mpt2sas_base_free_smid(ioc, smid);
+ goto out;
+ }
+ }
+
+ raid_device = sas_target_priv_data->raid_device;
+ if (raid_device && raid_device->direct_io_enabled)
+ _scsih_setup_direct_io(ioc, scmd, raid_device, mpi_request,
+ smid);
+
+ if (likely(mpi_request->Function == MPI2_FUNCTION_SCSI_IO_REQUEST))
+ mpt2sas_base_put_smid_scsi_io(ioc, smid,
+ le16_to_cpu(mpi_request->DevHandle));
+ else
+ mpt2sas_base_put_smid_default(ioc, smid);
+ return 0;
+
+ out:
+ return SCSI_MLQUEUE_HOST_BUSY;
+}
+
+/**
+ * _scsih_normalize_sense - normalize descriptor and fixed format sense data
+ * @sense_buffer: sense data returned by target
+ * @data: normalized skey/asc/ascq
+ *
+ * Return nothing.
+ */
+static void
+_scsih_normalize_sense(char *sense_buffer, struct sense_info *data)
+{
+ if ((sense_buffer[0] & 0x7F) >= 0x72) {
+ /* descriptor format */
+ data->skey = sense_buffer[1] & 0x0F;
+ data->asc = sense_buffer[2];
+ data->ascq = sense_buffer[3];
+ } else {
+ /* fixed format */
+ data->skey = sense_buffer[2] & 0x0F;
+ data->asc = sense_buffer[12];
+ data->ascq = sense_buffer[13];
+ }
+}
+
+#ifdef CONFIG_SCSI_MPT2SAS_LOGGING
+/**
+ * _scsih_scsi_ioc_info - translated non-successful SCSI_IO request
+ * @ioc: per adapter object
+ * @scmd: pointer to scsi command object
+ * @mpi_reply: reply mf payload returned from firmware
+ *
+ * scsi_status - SCSI Status code returned from target device
+ * scsi_state - state info associated with SCSI_IO determined by ioc
+ * ioc_status - ioc supplied status info
+ *
+ * Return nothing.
+ */
+static void
+_scsih_scsi_ioc_info(struct MPT2SAS_ADAPTER *ioc, struct scsi_cmnd *scmd,
+ Mpi2SCSIIOReply_t *mpi_reply, u16 smid)
+{
+ u32 response_info;
+ u8 *response_bytes;
+ u16 ioc_status = le16_to_cpu(mpi_reply->IOCStatus) &
+ MPI2_IOCSTATUS_MASK;
+ u8 scsi_state = mpi_reply->SCSIState;
+ u8 scsi_status = mpi_reply->SCSIStatus;
+ char *desc_ioc_state = NULL;
+ char *desc_scsi_status = NULL;
+ char *desc_scsi_state = ioc->tmp_string;
+ u32 log_info = le32_to_cpu(mpi_reply->IOCLogInfo);
+ struct _sas_device *sas_device = NULL;
+ unsigned long flags;
+ struct scsi_target *starget = scmd->device->sdev_target;
+ struct MPT2SAS_TARGET *priv_target = starget->hostdata;
+ char *device_str = NULL;
+
+ if (!priv_target)
+ return;
+
+ if (ioc->hide_ir_msg)
+ device_str = "WarpDrive";
+ else
+ device_str = "volume";
+
+ if (log_info == 0x31170000)
+ return;
+
+ switch (ioc_status) {
+ case MPI2_IOCSTATUS_SUCCESS:
+ desc_ioc_state = "success";
+ break;
+ case MPI2_IOCSTATUS_INVALID_FUNCTION:
+ desc_ioc_state = "invalid function";
+ break;
+ case MPI2_IOCSTATUS_SCSI_RECOVERED_ERROR:
+ desc_ioc_state = "scsi recovered error";
+ break;
+ case MPI2_IOCSTATUS_SCSI_INVALID_DEVHANDLE:
+ desc_ioc_state = "scsi invalid dev handle";
+ break;
+ case MPI2_IOCSTATUS_SCSI_DEVICE_NOT_THERE:
+ desc_ioc_state = "scsi device not there";
+ break;
+ case MPI2_IOCSTATUS_SCSI_DATA_OVERRUN:
+ desc_ioc_state = "scsi data overrun";
+ break;
+ case MPI2_IOCSTATUS_SCSI_DATA_UNDERRUN:
+ desc_ioc_state = "scsi data underrun";
+ break;
+ case MPI2_IOCSTATUS_SCSI_IO_DATA_ERROR:
+ desc_ioc_state = "scsi io data error";
+ break;
+ case MPI2_IOCSTATUS_SCSI_PROTOCOL_ERROR:
+ desc_ioc_state = "scsi protocol error";
+ break;
+ case MPI2_IOCSTATUS_SCSI_TASK_TERMINATED:
+ desc_ioc_state = "scsi task terminated";
+ break;
+ case MPI2_IOCSTATUS_SCSI_RESIDUAL_MISMATCH:
+ desc_ioc_state = "scsi residual mismatch";
+ break;
+ case MPI2_IOCSTATUS_SCSI_TASK_MGMT_FAILED:
+ desc_ioc_state = "scsi task mgmt failed";
+ break;
+ case MPI2_IOCSTATUS_SCSI_IOC_TERMINATED:
+ desc_ioc_state = "scsi ioc terminated";
+ break;
+ case MPI2_IOCSTATUS_SCSI_EXT_TERMINATED:
+ desc_ioc_state = "scsi ext terminated";
+ break;
+ case MPI2_IOCSTATUS_EEDP_GUARD_ERROR:
+ desc_ioc_state = "eedp guard error";
+ break;
+ case MPI2_IOCSTATUS_EEDP_REF_TAG_ERROR:
+ desc_ioc_state = "eedp ref tag error";
+ break;
+ case MPI2_IOCSTATUS_EEDP_APP_TAG_ERROR:
+ desc_ioc_state = "eedp app tag error";
+ break;
+ default:
+ desc_ioc_state = "unknown";
+ break;
+ }
+
+ switch (scsi_status) {
+ case MPI2_SCSI_STATUS_GOOD:
+ desc_scsi_status = "good";
+ break;
+ case MPI2_SCSI_STATUS_CHECK_CONDITION:
+ desc_scsi_status = "check condition";
+ break;
+ case MPI2_SCSI_STATUS_CONDITION_MET:
+ desc_scsi_status = "condition met";
+ break;
+ case MPI2_SCSI_STATUS_BUSY:
+ desc_scsi_status = "busy";
+ break;
+ case MPI2_SCSI_STATUS_INTERMEDIATE:
+ desc_scsi_status = "intermediate";
+ break;
+ case MPI2_SCSI_STATUS_INTERMEDIATE_CONDMET:
+ desc_scsi_status = "intermediate condmet";
+ break;
+ case MPI2_SCSI_STATUS_RESERVATION_CONFLICT:
+ desc_scsi_status = "reservation conflict";
+ break;
+ case MPI2_SCSI_STATUS_COMMAND_TERMINATED:
+ desc_scsi_status = "command terminated";
+ break;
+ case MPI2_SCSI_STATUS_TASK_SET_FULL:
+ desc_scsi_status = "task set full";
+ break;
+ case MPI2_SCSI_STATUS_ACA_ACTIVE:
+ desc_scsi_status = "aca active";
+ break;
+ case MPI2_SCSI_STATUS_TASK_ABORTED:
+ desc_scsi_status = "task aborted";
+ break;
+ default:
+ desc_scsi_status = "unknown";
+ break;
+ }
+
+ desc_scsi_state[0] = '\0';
+ if (!scsi_state)
+ desc_scsi_state = " ";
+ if (scsi_state & MPI2_SCSI_STATE_RESPONSE_INFO_VALID)
+ strcat(desc_scsi_state, "response info ");
+ if (scsi_state & MPI2_SCSI_STATE_TERMINATED)
+ strcat(desc_scsi_state, "state terminated ");
+ if (scsi_state & MPI2_SCSI_STATE_NO_SCSI_STATUS)
+ strcat(desc_scsi_state, "no status ");
+ if (scsi_state & MPI2_SCSI_STATE_AUTOSENSE_FAILED)
+ strcat(desc_scsi_state, "autosense failed ");
+ if (scsi_state & MPI2_SCSI_STATE_AUTOSENSE_VALID)
+ strcat(desc_scsi_state, "autosense valid ");
+
+ scsi_print_command(scmd);
+
+ if (priv_target->flags & MPT_TARGET_FLAGS_VOLUME) {
+ printk(MPT2SAS_WARN_FMT "\t%s wwid(0x%016llx)\n", ioc->name,
+ device_str, (unsigned long long)priv_target->sas_address);
+ } else {
+ spin_lock_irqsave(&ioc->sas_device_lock, flags);
+ sas_device = mpt2sas_scsih_sas_device_find_by_sas_address(ioc,
+ priv_target->sas_address);
+ if (sas_device) {
+ printk(MPT2SAS_WARN_FMT "\tsas_address(0x%016llx), "
+ "phy(%d)\n", ioc->name, sas_device->sas_address,
+ sas_device->phy);
+ printk(MPT2SAS_WARN_FMT
+ "\tenclosure_logical_id(0x%016llx), slot(%d)\n",
+ ioc->name, sas_device->enclosure_logical_id,
+ sas_device->slot);
+ }
+ spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
+ }
+
+ printk(MPT2SAS_WARN_FMT "\thandle(0x%04x), ioc_status(%s)(0x%04x), "
+ "smid(%d)\n", ioc->name, le16_to_cpu(mpi_reply->DevHandle),
+ desc_ioc_state, ioc_status, smid);
+ printk(MPT2SAS_WARN_FMT "\trequest_len(%d), underflow(%d), "
+ "resid(%d)\n", ioc->name, scsi_bufflen(scmd), scmd->underflow,
+ scsi_get_resid(scmd));
+ printk(MPT2SAS_WARN_FMT "\ttag(%d), transfer_count(%d), "
+ "sc->result(0x%08x)\n", ioc->name, le16_to_cpu(mpi_reply->TaskTag),
+ le32_to_cpu(mpi_reply->TransferCount), scmd->result);
+ printk(MPT2SAS_WARN_FMT "\tscsi_status(%s)(0x%02x), "
+ "scsi_state(%s)(0x%02x)\n", ioc->name, desc_scsi_status,
+ scsi_status, desc_scsi_state, scsi_state);
+
+ if (scsi_state & MPI2_SCSI_STATE_AUTOSENSE_VALID) {
+ struct sense_info data;
+ _scsih_normalize_sense(scmd->sense_buffer, &data);
+ printk(MPT2SAS_WARN_FMT "\t[sense_key,asc,ascq]: "
+ "[0x%02x,0x%02x,0x%02x], count(%d)\n", ioc->name, data.skey,
+ data.asc, data.ascq, le32_to_cpu(mpi_reply->SenseCount));
+ }
+
+ if (scsi_state & MPI2_SCSI_STATE_RESPONSE_INFO_VALID) {
+ response_info = le32_to_cpu(mpi_reply->ResponseInfo);
+ response_bytes = (u8 *)&response_info;
+ _scsih_response_code(ioc, response_bytes[0]);
+ }
+}
+#endif
+
+/**
+ * _scsih_turn_on_pfa_led - illuminate PFA LED
+ * @ioc: per adapter object
+ * @handle: device handle
+ * Context: process
+ *
+ * Return nothing.
+ */
+static void
+_scsih_turn_on_pfa_led(struct MPT2SAS_ADAPTER *ioc, u16 handle)
+{
+ Mpi2SepReply_t mpi_reply;
+ Mpi2SepRequest_t mpi_request;
+ struct _sas_device *sas_device;
+
+ sas_device = _scsih_sas_device_find_by_handle(ioc, handle);
+ if (!sas_device)
+ return;
+
+ memset(&mpi_request, 0, sizeof(Mpi2SepRequest_t));
+ mpi_request.Function = MPI2_FUNCTION_SCSI_ENCLOSURE_PROCESSOR;
+ mpi_request.Action = MPI2_SEP_REQ_ACTION_WRITE_STATUS;
+ mpi_request.SlotStatus =
+ cpu_to_le32(MPI2_SEP_REQ_SLOTSTATUS_PREDICTED_FAULT);
+ mpi_request.DevHandle = cpu_to_le16(handle);
+ mpi_request.Flags = MPI2_SEP_REQ_FLAGS_DEVHANDLE_ADDRESS;
+ if ((mpt2sas_base_scsi_enclosure_processor(ioc, &mpi_reply,
+ &mpi_request)) != 0) {
+ printk(MPT2SAS_ERR_FMT "failure at %s:%d/%s()!\n", ioc->name,
+ __FILE__, __LINE__, __func__);
+ return;
+ }
+ sas_device->pfa_led_on = 1;
+
+
+ if (mpi_reply.IOCStatus || mpi_reply.IOCLogInfo) {
+ dewtprintk(ioc, printk(MPT2SAS_INFO_FMT
+ "enclosure_processor: ioc_status (0x%04x), loginfo(0x%08x)\n",
+ ioc->name, le16_to_cpu(mpi_reply.IOCStatus),
+ le32_to_cpu(mpi_reply.IOCLogInfo)));
+ return;
+ }
+}
+
+/**
+ * _scsih_turn_off_pfa_led - turn off PFA LED
+ * @ioc: per adapter object
+ * @sas_device: sas device whose PFA LED has to turned off
+ * Context: process
+ *
+ * Return nothing.
+ */
+static void
+_scsih_turn_off_pfa_led(struct MPT2SAS_ADAPTER *ioc,
+ struct _sas_device *sas_device)
+{
+ Mpi2SepReply_t mpi_reply;
+ Mpi2SepRequest_t mpi_request;
+
+ memset(&mpi_request, 0, sizeof(Mpi2SepRequest_t));
+ mpi_request.Function = MPI2_FUNCTION_SCSI_ENCLOSURE_PROCESSOR;
+ mpi_request.Action = MPI2_SEP_REQ_ACTION_WRITE_STATUS;
+ mpi_request.SlotStatus = 0;
+ mpi_request.Slot = cpu_to_le16(sas_device->slot);
+ mpi_request.DevHandle = 0;
+ mpi_request.EnclosureHandle = cpu_to_le16(sas_device->enclosure_handle);
+ mpi_request.Flags = MPI2_SEP_REQ_FLAGS_ENCLOSURE_SLOT_ADDRESS;
+ if ((mpt2sas_base_scsi_enclosure_processor(ioc, &mpi_reply,
+ &mpi_request)) != 0) {
+ printk(MPT2SAS_ERR_FMT "failure at %s:%d/%s()!\n", ioc->name,
+ __FILE__, __LINE__, __func__);
+ return;
+ }
+
+ if (mpi_reply.IOCStatus || mpi_reply.IOCLogInfo) {
+ dewtprintk(ioc, printk(MPT2SAS_INFO_FMT "enclosure_processor: "
+ "ioc_status (0x%04x), loginfo(0x%08x)\n", ioc->name,
+ le16_to_cpu(mpi_reply.IOCStatus),
+ le32_to_cpu(mpi_reply.IOCLogInfo)));
+ return;
+ }
+}
+
+/**
+ * _scsih_send_event_to_turn_on_pfa_led - fire delayed event
+ * @ioc: per adapter object
+ * @handle: device handle
+ * Context: interrupt.
+ *
+ * Return nothing.
+ */
+static void
+_scsih_send_event_to_turn_on_pfa_led(struct MPT2SAS_ADAPTER *ioc, u16 handle)
+{
+ struct fw_event_work *fw_event;
+
+ fw_event = kzalloc(sizeof(struct fw_event_work), GFP_ATOMIC);
+ if (!fw_event)
+ return;
+ fw_event->event = MPT2SAS_TURN_ON_PFA_LED;
+ fw_event->device_handle = handle;
+ fw_event->ioc = ioc;
+ _scsih_fw_event_add(ioc, fw_event);
+}
+
+/**
+ * _scsih_smart_predicted_fault - process smart errors
+ * @ioc: per adapter object
+ * @handle: device handle
+ * Context: interrupt.
+ *
+ * Return nothing.
+ */
+static void
+_scsih_smart_predicted_fault(struct MPT2SAS_ADAPTER *ioc, u16 handle)
+{
+ struct scsi_target *starget;
+ struct MPT2SAS_TARGET *sas_target_priv_data;
+ Mpi2EventNotificationReply_t *event_reply;
+ Mpi2EventDataSasDeviceStatusChange_t *event_data;
+ struct _sas_device *sas_device;
+ ssize_t sz;
+ unsigned long flags;
+
+ /* only handle non-raid devices */
+ spin_lock_irqsave(&ioc->sas_device_lock, flags);
+ sas_device = _scsih_sas_device_find_by_handle(ioc, handle);
+ if (!sas_device) {
+ spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
+ return;
+ }
+ starget = sas_device->starget;
+ sas_target_priv_data = starget->hostdata;
+
+ if ((sas_target_priv_data->flags & MPT_TARGET_FLAGS_RAID_COMPONENT) ||
+ ((sas_target_priv_data->flags & MPT_TARGET_FLAGS_VOLUME))) {
+ spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
+ return;
+ }
+ starget_printk(KERN_WARNING, starget, "predicted fault\n");
+ spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
+
+ if (ioc->pdev->subsystem_vendor == PCI_VENDOR_ID_IBM)
+ _scsih_send_event_to_turn_on_pfa_led(ioc, handle);
+
+ /* insert into event log */
+ sz = offsetof(Mpi2EventNotificationReply_t, EventData) +
+ sizeof(Mpi2EventDataSasDeviceStatusChange_t);
+ event_reply = kzalloc(sz, GFP_ATOMIC);
+ if (!event_reply) {
+ printk(MPT2SAS_ERR_FMT "failure at %s:%d/%s()!\n",
+ ioc->name, __FILE__, __LINE__, __func__);
+ return;
+ }
+
+ event_reply->Function = MPI2_FUNCTION_EVENT_NOTIFICATION;
+ event_reply->Event =
+ cpu_to_le16(MPI2_EVENT_SAS_DEVICE_STATUS_CHANGE);
+ event_reply->MsgLength = sz/4;
+ event_reply->EventDataLength =
+ cpu_to_le16(sizeof(Mpi2EventDataSasDeviceStatusChange_t)/4);
+ event_data = (Mpi2EventDataSasDeviceStatusChange_t *)
+ event_reply->EventData;
+ event_data->ReasonCode = MPI2_EVENT_SAS_DEV_STAT_RC_SMART_DATA;
+ event_data->ASC = 0x5D;
+ event_data->DevHandle = cpu_to_le16(handle);
+ event_data->SASAddress = cpu_to_le64(sas_target_priv_data->sas_address);
+ mpt2sas_ctl_add_to_event_log(ioc, event_reply);
+ kfree(event_reply);
+}
+
+/**
+ * _scsih_io_done - scsi request callback
+ * @ioc: per adapter object
+ * @smid: system request message index
+ * @msix_index: MSIX table index supplied by the OS
+ * @reply: reply message frame(lower 32bit addr)
+ *
+ * Callback handler when using _scsih_qcmd.
+ *
+ * Return 1 meaning mf should be freed from _base_interrupt
+ * 0 means the mf is freed from this function.
+ */
+static u8
+_scsih_io_done(struct MPT2SAS_ADAPTER *ioc, u16 smid, u8 msix_index, u32 reply)
+{
+ Mpi2SCSIIORequest_t *mpi_request;
+ Mpi2SCSIIOReply_t *mpi_reply;
+ struct scsi_cmnd *scmd;
+ u16 ioc_status;
+ u32 xfer_cnt;
+ u8 scsi_state;
+ u8 scsi_status;
+ u32 log_info;
+ struct MPT2SAS_DEVICE *sas_device_priv_data;
+ u32 response_code = 0;
+ unsigned long flags;
+
+ mpi_reply = mpt2sas_base_get_reply_virt_addr(ioc, reply);
+ scmd = _scsih_scsi_lookup_get_clear(ioc, smid);
+ if (scmd == NULL)
+ return 1;
+
+ mpi_request = mpt2sas_base_get_msg_frame(ioc, smid);
+
+ if (mpi_reply == NULL) {
+ scmd->result = DID_OK << 16;
+ goto out;
+ }
+
+ sas_device_priv_data = scmd->device->hostdata;
+ if (!sas_device_priv_data || !sas_device_priv_data->sas_target ||
+ sas_device_priv_data->sas_target->deleted) {
+ scmd->result = DID_NO_CONNECT << 16;
+ goto out;
+ }
+ ioc_status = le16_to_cpu(mpi_reply->IOCStatus);
+ /*
+ * WARPDRIVE: If direct_io is set then it is directIO,
+ * the failed direct I/O should be redirected to volume
+ */
+ if (_scsih_scsi_direct_io_get(ioc, smid) &&
+ ((ioc_status & MPI2_IOCSTATUS_MASK)
+ != MPI2_IOCSTATUS_SCSI_TASK_TERMINATED)) {
+ spin_lock_irqsave(&ioc->scsi_lookup_lock, flags);
+ ioc->scsi_lookup[smid - 1].scmd = scmd;
+ _scsih_scsi_direct_io_set(ioc, smid, 0);
+ spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags);
+ memcpy(mpi_request->CDB.CDB32, scmd->cmnd, scmd->cmd_len);
+ mpi_request->DevHandle =
+ cpu_to_le16(sas_device_priv_data->sas_target->handle);
+ mpt2sas_base_put_smid_scsi_io(ioc, smid,
+ sas_device_priv_data->sas_target->handle);
+ return 0;
+ }
+
+
+ /* turning off TLR */
+ scsi_state = mpi_reply->SCSIState;
+ if (scsi_state & MPI2_SCSI_STATE_RESPONSE_INFO_VALID)
+ response_code =
+ le32_to_cpu(mpi_reply->ResponseInfo) & 0xFF;
+ if (!sas_device_priv_data->tlr_snoop_check) {
+ sas_device_priv_data->tlr_snoop_check++;
+ /* Make sure Device is not raid volume.
+ * We do not expose raid functionality to upper layer for warpdrive.
+ */
+ if (!ioc->is_warpdrive && !_scsih_is_raid(&scmd->device->sdev_gendev) &&
+ sas_is_tlr_enabled(scmd->device) &&
+ response_code == MPI2_SCSITASKMGMT_RSP_INVALID_FRAME) {
+ sas_disable_tlr(scmd->device);
+ sdev_printk(KERN_INFO, scmd->device, "TLR disabled\n");
+ }
+ }
+
+ xfer_cnt = le32_to_cpu(mpi_reply->TransferCount);
+ scsi_set_resid(scmd, scsi_bufflen(scmd) - xfer_cnt);
+ if (ioc_status & MPI2_IOCSTATUS_FLAG_LOG_INFO_AVAILABLE)
+ log_info = le32_to_cpu(mpi_reply->IOCLogInfo);
+ else
+ log_info = 0;
+ ioc_status &= MPI2_IOCSTATUS_MASK;
+ scsi_status = mpi_reply->SCSIStatus;
+
+ if (ioc_status == MPI2_IOCSTATUS_SCSI_DATA_UNDERRUN && xfer_cnt == 0 &&
+ (scsi_status == MPI2_SCSI_STATUS_BUSY ||
+ scsi_status == MPI2_SCSI_STATUS_RESERVATION_CONFLICT ||
+ scsi_status == MPI2_SCSI_STATUS_TASK_SET_FULL)) {
+ ioc_status = MPI2_IOCSTATUS_SUCCESS;
+ }
+
+ if (scsi_state & MPI2_SCSI_STATE_AUTOSENSE_VALID) {
+ struct sense_info data;
+ const void *sense_data = mpt2sas_base_get_sense_buffer(ioc,
+ smid);
+ u32 sz = min_t(u32, SCSI_SENSE_BUFFERSIZE,
+ le32_to_cpu(mpi_reply->SenseCount));
+ memcpy(scmd->sense_buffer, sense_data, sz);
+ _scsih_normalize_sense(scmd->sense_buffer, &data);
+ /* failure prediction threshold exceeded */
+ if (data.asc == 0x5D)
+ _scsih_smart_predicted_fault(ioc,
+ le16_to_cpu(mpi_reply->DevHandle));
+ }
+
+ switch (ioc_status) {
+ case MPI2_IOCSTATUS_BUSY:
+ case MPI2_IOCSTATUS_INSUFFICIENT_RESOURCES:
+ scmd->result = SAM_STAT_BUSY;
+ break;
+
+ case MPI2_IOCSTATUS_SCSI_DEVICE_NOT_THERE:
+ scmd->result = DID_NO_CONNECT << 16;
+ break;
+
+ case MPI2_IOCSTATUS_SCSI_IOC_TERMINATED:
+ if (sas_device_priv_data->block) {
+ scmd->result = DID_TRANSPORT_DISRUPTED << 16;
+ goto out;
+ }
+ if (log_info == 0x32010081) {
+ scmd->result = DID_RESET << 16;
+ break;
+ }
+ scmd->result = DID_SOFT_ERROR << 16;
+ break;
+ case MPI2_IOCSTATUS_SCSI_TASK_TERMINATED:
+ case MPI2_IOCSTATUS_SCSI_EXT_TERMINATED:
+ scmd->result = DID_RESET << 16;
+ break;
+
+ case MPI2_IOCSTATUS_SCSI_RESIDUAL_MISMATCH:
+ if ((xfer_cnt == 0) || (scmd->underflow > xfer_cnt))
+ scmd->result = DID_SOFT_ERROR << 16;
+ else
+ scmd->result = (DID_OK << 16) | scsi_status;
+ break;
+
+ case MPI2_IOCSTATUS_SCSI_DATA_UNDERRUN:
+ scmd->result = (DID_OK << 16) | scsi_status;
+
+ if ((scsi_state & MPI2_SCSI_STATE_AUTOSENSE_VALID))
+ break;
+
+ if (xfer_cnt < scmd->underflow) {
+ if (scsi_status == SAM_STAT_BUSY)
+ scmd->result = SAM_STAT_BUSY;
+ else
+ scmd->result = DID_SOFT_ERROR << 16;
+ } else if (scsi_state & (MPI2_SCSI_STATE_AUTOSENSE_FAILED |
+ MPI2_SCSI_STATE_NO_SCSI_STATUS))
+ scmd->result = DID_SOFT_ERROR << 16;
+ else if (scsi_state & MPI2_SCSI_STATE_TERMINATED)
+ scmd->result = DID_RESET << 16;
+ else if (!xfer_cnt && scmd->cmnd[0] == REPORT_LUNS) {
+ mpi_reply->SCSIState = MPI2_SCSI_STATE_AUTOSENSE_VALID;
+ mpi_reply->SCSIStatus = SAM_STAT_CHECK_CONDITION;
+ scmd->result = (DRIVER_SENSE << 24) |
+ SAM_STAT_CHECK_CONDITION;
+ scmd->sense_buffer[0] = 0x70;
+ scmd->sense_buffer[2] = ILLEGAL_REQUEST;
+ scmd->sense_buffer[12] = 0x20;
+ scmd->sense_buffer[13] = 0;
+ }
+ break;
+
+ case MPI2_IOCSTATUS_SCSI_DATA_OVERRUN:
+ scsi_set_resid(scmd, 0);
+ case MPI2_IOCSTATUS_SCSI_RECOVERED_ERROR:
+ case MPI2_IOCSTATUS_SUCCESS:
+ scmd->result = (DID_OK << 16) | scsi_status;
+ if (response_code ==
+ MPI2_SCSITASKMGMT_RSP_INVALID_FRAME ||
+ (scsi_state & (MPI2_SCSI_STATE_AUTOSENSE_FAILED |
+ MPI2_SCSI_STATE_NO_SCSI_STATUS)))
+ scmd->result = DID_SOFT_ERROR << 16;
+ else if (scsi_state & MPI2_SCSI_STATE_TERMINATED)
+ scmd->result = DID_RESET << 16;
+ break;
+
+ case MPI2_IOCSTATUS_EEDP_GUARD_ERROR:
+ case MPI2_IOCSTATUS_EEDP_REF_TAG_ERROR:
+ case MPI2_IOCSTATUS_EEDP_APP_TAG_ERROR:
+ _scsih_eedp_error_handling(scmd, ioc_status);
+ break;
+ case MPI2_IOCSTATUS_SCSI_PROTOCOL_ERROR:
+ case MPI2_IOCSTATUS_INVALID_FUNCTION:
+ case MPI2_IOCSTATUS_INVALID_SGL:
+ case MPI2_IOCSTATUS_INTERNAL_ERROR:
+ case MPI2_IOCSTATUS_INVALID_FIELD:
+ case MPI2_IOCSTATUS_INVALID_STATE:
+ case MPI2_IOCSTATUS_SCSI_IO_DATA_ERROR:
+ case MPI2_IOCSTATUS_SCSI_TASK_MGMT_FAILED:
+ default:
+ scmd->result = DID_SOFT_ERROR << 16;
+ break;
+
+ }
+
+#ifdef CONFIG_SCSI_MPT2SAS_LOGGING
+ if (scmd->result && (ioc->logging_level & MPT_DEBUG_REPLY))
+ _scsih_scsi_ioc_info(ioc , scmd, mpi_reply, smid);
+#endif
+
+ out:
+ scsi_dma_unmap(scmd);
+ scmd->scsi_done(scmd);
+ return 1;
+}
+
+/**
+ * _scsih_sas_host_refresh - refreshing sas host object contents
+ * @ioc: per adapter object
+ * Context: user
+ *
+ * During port enable, fw will send topology events for every device. Its
+ * possible that the handles may change from the previous setting, so this
+ * code keeping handles updating if changed.
+ *
+ * Return nothing.
+ */
+static void
+_scsih_sas_host_refresh(struct MPT2SAS_ADAPTER *ioc)
+{
+ u16 sz;
+ u16 ioc_status;
+ int i;
+ Mpi2ConfigReply_t mpi_reply;
+ Mpi2SasIOUnitPage0_t *sas_iounit_pg0 = NULL;
+ u16 attached_handle;
+ u8 link_rate;
+
+ dtmprintk(ioc, printk(MPT2SAS_INFO_FMT
+ "updating handles for sas_host(0x%016llx)\n",
+ ioc->name, (unsigned long long)ioc->sas_hba.sas_address));
+
+ sz = offsetof(Mpi2SasIOUnitPage0_t, PhyData) + (ioc->sas_hba.num_phys
+ * sizeof(Mpi2SasIOUnit0PhyData_t));
+ sas_iounit_pg0 = kzalloc(sz, GFP_KERNEL);
+ if (!sas_iounit_pg0) {
+ printk(MPT2SAS_ERR_FMT "failure at %s:%d/%s()!\n",
+ ioc->name, __FILE__, __LINE__, __func__);
+ return;
+ }
+
+ if ((mpt2sas_config_get_sas_iounit_pg0(ioc, &mpi_reply,
+ sas_iounit_pg0, sz)) != 0)
+ goto out;
+ ioc_status = le16_to_cpu(mpi_reply.IOCStatus) & MPI2_IOCSTATUS_MASK;
+ if (ioc_status != MPI2_IOCSTATUS_SUCCESS)
+ goto out;
+ for (i = 0; i < ioc->sas_hba.num_phys ; i++) {
+ link_rate = sas_iounit_pg0->PhyData[i].NegotiatedLinkRate >> 4;
+ if (i == 0)
+ ioc->sas_hba.handle = le16_to_cpu(sas_iounit_pg0->
+ PhyData[0].ControllerDevHandle);
+ ioc->sas_hba.phy[i].handle = ioc->sas_hba.handle;
+ attached_handle = le16_to_cpu(sas_iounit_pg0->PhyData[i].
+ AttachedDevHandle);
+ if (attached_handle && link_rate < MPI2_SAS_NEG_LINK_RATE_1_5)
+ link_rate = MPI2_SAS_NEG_LINK_RATE_1_5;
+ mpt2sas_transport_update_links(ioc, ioc->sas_hba.sas_address,
+ attached_handle, i, link_rate);
+ }
+ out:
+ kfree(sas_iounit_pg0);
+}
+
+/**
+ * _scsih_sas_host_add - create sas host object
+ * @ioc: per adapter object
+ *
+ * Creating host side data object, stored in ioc->sas_hba
+ *
+ * Return nothing.
+ */
+static void
+_scsih_sas_host_add(struct MPT2SAS_ADAPTER *ioc)
+{
+ int i;
+ Mpi2ConfigReply_t mpi_reply;
+ Mpi2SasIOUnitPage0_t *sas_iounit_pg0 = NULL;
+ Mpi2SasIOUnitPage1_t *sas_iounit_pg1 = NULL;
+ Mpi2SasPhyPage0_t phy_pg0;
+ Mpi2SasDevicePage0_t sas_device_pg0;
+ Mpi2SasEnclosurePage0_t enclosure_pg0;
+ u16 ioc_status;
+ u16 sz;
+ u16 device_missing_delay;
+
+ mpt2sas_config_get_number_hba_phys(ioc, &ioc->sas_hba.num_phys);
+ if (!ioc->sas_hba.num_phys) {
+ printk(MPT2SAS_ERR_FMT "failure at %s:%d/%s()!\n",
+ ioc->name, __FILE__, __LINE__, __func__);
+ return;
+ }
+
+ /* sas_iounit page 0 */
+ sz = offsetof(Mpi2SasIOUnitPage0_t, PhyData) + (ioc->sas_hba.num_phys *
+ sizeof(Mpi2SasIOUnit0PhyData_t));
+ sas_iounit_pg0 = kzalloc(sz, GFP_KERNEL);
+ if (!sas_iounit_pg0) {
+ printk(MPT2SAS_ERR_FMT "failure at %s:%d/%s()!\n",
+ ioc->name, __FILE__, __LINE__, __func__);
+ return;
+ }
+ if ((mpt2sas_config_get_sas_iounit_pg0(ioc, &mpi_reply,
+ sas_iounit_pg0, sz))) {
+ printk(MPT2SAS_ERR_FMT "failure at %s:%d/%s()!\n",
+ ioc->name, __FILE__, __LINE__, __func__);
+ goto out;
+ }
+ ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
+ MPI2_IOCSTATUS_MASK;
+ if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
+ printk(MPT2SAS_ERR_FMT "failure at %s:%d/%s()!\n",
+ ioc->name, __FILE__, __LINE__, __func__);
+ goto out;
+ }
+
+ /* sas_iounit page 1 */
+ sz = offsetof(Mpi2SasIOUnitPage1_t, PhyData) + (ioc->sas_hba.num_phys *
+ sizeof(Mpi2SasIOUnit1PhyData_t));
+ sas_iounit_pg1 = kzalloc(sz, GFP_KERNEL);
+ if (!sas_iounit_pg1) {
+ printk(MPT2SAS_ERR_FMT "failure at %s:%d/%s()!\n",
+ ioc->name, __FILE__, __LINE__, __func__);
+ goto out;
+ }
+ if ((mpt2sas_config_get_sas_iounit_pg1(ioc, &mpi_reply,
+ sas_iounit_pg1, sz))) {
+ printk(MPT2SAS_ERR_FMT "failure at %s:%d/%s()!\n",
+ ioc->name, __FILE__, __LINE__, __func__);
+ goto out;
+ }
+ ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
+ MPI2_IOCSTATUS_MASK;
+ if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
+ printk(MPT2SAS_ERR_FMT "failure at %s:%d/%s()!\n",
+ ioc->name, __FILE__, __LINE__, __func__);
+ goto out;
+ }
+
+ ioc->io_missing_delay =
+ le16_to_cpu(sas_iounit_pg1->IODeviceMissingDelay);
+ device_missing_delay =
+ le16_to_cpu(sas_iounit_pg1->ReportDeviceMissingDelay);
+ if (device_missing_delay & MPI2_SASIOUNIT1_REPORT_MISSING_UNIT_16)
+ ioc->device_missing_delay = (device_missing_delay &
+ MPI2_SASIOUNIT1_REPORT_MISSING_TIMEOUT_MASK) * 16;
+ else
+ ioc->device_missing_delay = device_missing_delay &
+ MPI2_SASIOUNIT1_REPORT_MISSING_TIMEOUT_MASK;
+
+ ioc->sas_hba.parent_dev = &ioc->shost->shost_gendev;
+ ioc->sas_hba.phy = kcalloc(ioc->sas_hba.num_phys,
+ sizeof(struct _sas_phy), GFP_KERNEL);
+ if (!ioc->sas_hba.phy) {
+ printk(MPT2SAS_ERR_FMT "failure at %s:%d/%s()!\n",
+ ioc->name, __FILE__, __LINE__, __func__);
+ goto out;
+ }
+ for (i = 0; i < ioc->sas_hba.num_phys ; i++) {
+ if ((mpt2sas_config_get_phy_pg0(ioc, &mpi_reply, &phy_pg0,
+ i))) {
+ printk(MPT2SAS_ERR_FMT "failure at %s:%d/%s()!\n",
+ ioc->name, __FILE__, __LINE__, __func__);
+ goto out;
+ }
+ ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
+ MPI2_IOCSTATUS_MASK;
+ if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
+ printk(MPT2SAS_ERR_FMT "failure at %s:%d/%s()!\n",
+ ioc->name, __FILE__, __LINE__, __func__);
+ goto out;
+ }
+
+ if (i == 0)
+ ioc->sas_hba.handle = le16_to_cpu(sas_iounit_pg0->
+ PhyData[0].ControllerDevHandle);
+ ioc->sas_hba.phy[i].handle = ioc->sas_hba.handle;
+ ioc->sas_hba.phy[i].phy_id = i;
+ mpt2sas_transport_add_host_phy(ioc, &ioc->sas_hba.phy[i],
+ phy_pg0, ioc->sas_hba.parent_dev);
+ }
+ if ((mpt2sas_config_get_sas_device_pg0(ioc, &mpi_reply, &sas_device_pg0,
+ MPI2_SAS_DEVICE_PGAD_FORM_HANDLE, ioc->sas_hba.handle))) {
+ printk(MPT2SAS_ERR_FMT "failure at %s:%d/%s()!\n",
+ ioc->name, __FILE__, __LINE__, __func__);
+ goto out;
+ }
+ ioc->sas_hba.enclosure_handle =
+ le16_to_cpu(sas_device_pg0.EnclosureHandle);
+ ioc->sas_hba.sas_address = le64_to_cpu(sas_device_pg0.SASAddress);
+ printk(MPT2SAS_INFO_FMT "host_add: handle(0x%04x), "
+ "sas_addr(0x%016llx), phys(%d)\n", ioc->name, ioc->sas_hba.handle,
+ (unsigned long long) ioc->sas_hba.sas_address,
+ ioc->sas_hba.num_phys) ;
+
+ if (ioc->sas_hba.enclosure_handle) {
+ if (!(mpt2sas_config_get_enclosure_pg0(ioc, &mpi_reply,
+ &enclosure_pg0,
+ MPI2_SAS_ENCLOS_PGAD_FORM_HANDLE,
+ ioc->sas_hba.enclosure_handle))) {
+ ioc->sas_hba.enclosure_logical_id =
+ le64_to_cpu(enclosure_pg0.EnclosureLogicalID);
+ }
+ }
+
+ out:
+ kfree(sas_iounit_pg1);
+ kfree(sas_iounit_pg0);
+}
+
+/**
+ * _scsih_expander_add - creating expander object
+ * @ioc: per adapter object
+ * @handle: expander handle
+ *
+ * Creating expander object, stored in ioc->sas_expander_list.
+ *
+ * Return 0 for success, else error.
+ */
+static int
+_scsih_expander_add(struct MPT2SAS_ADAPTER *ioc, u16 handle)
+{
+ struct _sas_node *sas_expander;
+ Mpi2ConfigReply_t mpi_reply;
+ Mpi2ExpanderPage0_t expander_pg0;
+ Mpi2ExpanderPage1_t expander_pg1;
+ Mpi2SasEnclosurePage0_t enclosure_pg0;
+ u32 ioc_status;
+ u16 parent_handle;
+ u64 sas_address, sas_address_parent = 0;
+ int i;
+ unsigned long flags;
+ struct _sas_port *mpt2sas_port = NULL;
+ int rc = 0;
+
+ if (!handle)
+ return -1;
+
+ if (ioc->shost_recovery || ioc->pci_error_recovery)
+ return -1;
+
+ if ((mpt2sas_config_get_expander_pg0(ioc, &mpi_reply, &expander_pg0,
+ MPI2_SAS_EXPAND_PGAD_FORM_HNDL, handle))) {
+ printk(MPT2SAS_ERR_FMT "failure at %s:%d/%s()!\n",
+ ioc->name, __FILE__, __LINE__, __func__);
+ return -1;
+ }
+
+ ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
+ MPI2_IOCSTATUS_MASK;
+ if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
+ printk(MPT2SAS_ERR_FMT "failure at %s:%d/%s()!\n",
+ ioc->name, __FILE__, __LINE__, __func__);
+ return -1;
+ }
+
+ /* handle out of order topology events */
+ parent_handle = le16_to_cpu(expander_pg0.ParentDevHandle);
+ if (_scsih_get_sas_address(ioc, parent_handle, &sas_address_parent)
+ != 0) {
+ printk(MPT2SAS_ERR_FMT "failure at %s:%d/%s()!\n",
+ ioc->name, __FILE__, __LINE__, __func__);
+ return -1;
+ }
+ if (sas_address_parent != ioc->sas_hba.sas_address) {
+ spin_lock_irqsave(&ioc->sas_node_lock, flags);
+ sas_expander = mpt2sas_scsih_expander_find_by_sas_address(ioc,
+ sas_address_parent);
+ spin_unlock_irqrestore(&ioc->sas_node_lock, flags);
+ if (!sas_expander) {
+ rc = _scsih_expander_add(ioc, parent_handle);
+ if (rc != 0)
+ return rc;
+ }
+ }
+
+ spin_lock_irqsave(&ioc->sas_node_lock, flags);
+ sas_address = le64_to_cpu(expander_pg0.SASAddress);
+ sas_expander = mpt2sas_scsih_expander_find_by_sas_address(ioc,
+ sas_address);
+ spin_unlock_irqrestore(&ioc->sas_node_lock, flags);
+
+ if (sas_expander)
+ return 0;
+
+ sas_expander = kzalloc(sizeof(struct _sas_node),
+ GFP_KERNEL);
+ if (!sas_expander) {
+ printk(MPT2SAS_ERR_FMT "failure at %s:%d/%s()!\n",
+ ioc->name, __FILE__, __LINE__, __func__);
+ return -1;
+ }
+
+ sas_expander->handle = handle;
+ sas_expander->num_phys = expander_pg0.NumPhys;
+ sas_expander->sas_address_parent = sas_address_parent;
+ sas_expander->sas_address = sas_address;
+
+ printk(MPT2SAS_INFO_FMT "expander_add: handle(0x%04x),"
+ " parent(0x%04x), sas_addr(0x%016llx), phys(%d)\n", ioc->name,
+ handle, parent_handle, (unsigned long long)
+ sas_expander->sas_address, sas_expander->num_phys);
+
+ if (!sas_expander->num_phys)
+ goto out_fail;
+ sas_expander->phy = kcalloc(sas_expander->num_phys,
+ sizeof(struct _sas_phy), GFP_KERNEL);
+ if (!sas_expander->phy) {
+ printk(MPT2SAS_ERR_FMT "failure at %s:%d/%s()!\n",
+ ioc->name, __FILE__, __LINE__, __func__);
+ rc = -1;
+ goto out_fail;
+ }
+
+ INIT_LIST_HEAD(&sas_expander->sas_port_list);
+ mpt2sas_port = mpt2sas_transport_port_add(ioc, handle,
+ sas_address_parent);
+ if (!mpt2sas_port) {
+ printk(MPT2SAS_ERR_FMT "failure at %s:%d/%s()!\n",
+ ioc->name, __FILE__, __LINE__, __func__);
+ rc = -1;
+ goto out_fail;
+ }
+ sas_expander->parent_dev = &mpt2sas_port->rphy->dev;
+
+ for (i = 0 ; i < sas_expander->num_phys ; i++) {
+ if ((mpt2sas_config_get_expander_pg1(ioc, &mpi_reply,
+ &expander_pg1, i, handle))) {
+ printk(MPT2SAS_ERR_FMT "failure at %s:%d/%s()!\n",
+ ioc->name, __FILE__, __LINE__, __func__);
+ rc = -1;
+ goto out_fail;
+ }
+ sas_expander->phy[i].handle = handle;
+ sas_expander->phy[i].phy_id = i;
+
+ if ((mpt2sas_transport_add_expander_phy(ioc,
+ &sas_expander->phy[i], expander_pg1,
+ sas_expander->parent_dev))) {
+ printk(MPT2SAS_ERR_FMT "failure at %s:%d/%s()!\n",
+ ioc->name, __FILE__, __LINE__, __func__);
+ rc = -1;
+ goto out_fail;
+ }
+ }
+
+ if (sas_expander->enclosure_handle) {
+ if (!(mpt2sas_config_get_enclosure_pg0(ioc, &mpi_reply,
+ &enclosure_pg0, MPI2_SAS_ENCLOS_PGAD_FORM_HANDLE,
+ sas_expander->enclosure_handle))) {
+ sas_expander->enclosure_logical_id =
+ le64_to_cpu(enclosure_pg0.EnclosureLogicalID);
+ }
+ }
+
+ _scsih_expander_node_add(ioc, sas_expander);
+ return 0;
+
+ out_fail:
+
+ if (mpt2sas_port)
+ mpt2sas_transport_port_remove(ioc, sas_expander->sas_address,
+ sas_address_parent);
+ kfree(sas_expander);
+ return rc;
+}
+
+/**
+ * _scsih_done - scsih callback handler.
+ * @ioc: per adapter object
+ * @smid: system request message index
+ * @msix_index: MSIX table index supplied by the OS
+ * @reply: reply message frame(lower 32bit addr)
+ *
+ * Callback handler when sending internal generated message frames.
+ * The callback index passed is `ioc->scsih_cb_idx`
+ *
+ * Return 1 meaning mf should be freed from _base_interrupt
+ * 0 means the mf is freed from this function.
+ */
+static u8
+_scsih_done(struct MPT2SAS_ADAPTER *ioc, u16 smid, u8 msix_index, u32 reply)
+{
+ MPI2DefaultReply_t *mpi_reply;
+
+ mpi_reply = mpt2sas_base_get_reply_virt_addr(ioc, reply);
+ if (ioc->scsih_cmds.status == MPT2_CMD_NOT_USED)
+ return 1;
+ if (ioc->scsih_cmds.smid != smid)
+ return 1;
+ ioc->scsih_cmds.status |= MPT2_CMD_COMPLETE;
+ if (mpi_reply) {
+ memcpy(ioc->scsih_cmds.reply, mpi_reply,
+ mpi_reply->MsgLength*4);
+ ioc->scsih_cmds.status |= MPT2_CMD_REPLY_VALID;
+ }
+ ioc->scsih_cmds.status &= ~MPT2_CMD_PENDING;
+ complete(&ioc->scsih_cmds.done);
+ return 1;
+}
+
+/**
+ * mpt2sas_expander_remove - removing expander object
+ * @ioc: per adapter object
+ * @sas_address: expander sas_address
+ *
+ * Return nothing.
+ */
+void
+mpt2sas_expander_remove(struct MPT2SAS_ADAPTER *ioc, u64 sas_address)
+{
+ struct _sas_node *sas_expander;
+ unsigned long flags;
+
+ if (ioc->shost_recovery)
+ return;
+
+ spin_lock_irqsave(&ioc->sas_node_lock, flags);
+ sas_expander = mpt2sas_scsih_expander_find_by_sas_address(ioc,
+ sas_address);
+ if (sas_expander)
+ list_del(&sas_expander->list);
+ spin_unlock_irqrestore(&ioc->sas_node_lock, flags);
+ if (sas_expander)
+ _scsih_expander_node_remove(ioc, sas_expander);
+}
+
+/**
+ * _scsih_check_access_status - check access flags
+ * @ioc: per adapter object
+ * @sas_address: sas address
+ * @handle: sas device handle
+ * @access_flags: errors returned during discovery of the device
+ *
+ * Return 0 for success, else failure
+ */
+static u8
+_scsih_check_access_status(struct MPT2SAS_ADAPTER *ioc, u64 sas_address,
+ u16 handle, u8 access_status)
+{
+ u8 rc = 1;
+ char *desc = NULL;
+
+ switch (access_status) {
+ case MPI2_SAS_DEVICE0_ASTATUS_NO_ERRORS:
+ case MPI2_SAS_DEVICE0_ASTATUS_SATA_NEEDS_INITIALIZATION:
+ rc = 0;
+ break;
+ case MPI2_SAS_DEVICE0_ASTATUS_SATA_CAPABILITY_FAILED:
+ desc = "sata capability failed";
+ break;
+ case MPI2_SAS_DEVICE0_ASTATUS_SATA_AFFILIATION_CONFLICT:
+ desc = "sata affiliation conflict";
+ break;
+ case MPI2_SAS_DEVICE0_ASTATUS_ROUTE_NOT_ADDRESSABLE:
+ desc = "route not addressable";
+ break;
+ case MPI2_SAS_DEVICE0_ASTATUS_SMP_ERROR_NOT_ADDRESSABLE:
+ desc = "smp error not addressable";
+ break;
+ case MPI2_SAS_DEVICE0_ASTATUS_DEVICE_BLOCKED:
+ desc = "device blocked";
+ break;
+ case MPI2_SAS_DEVICE0_ASTATUS_SATA_INIT_FAILED:
+ case MPI2_SAS_DEVICE0_ASTATUS_SIF_UNKNOWN:
+ case MPI2_SAS_DEVICE0_ASTATUS_SIF_AFFILIATION_CONFLICT:
+ case MPI2_SAS_DEVICE0_ASTATUS_SIF_DIAG:
+ case MPI2_SAS_DEVICE0_ASTATUS_SIF_IDENTIFICATION:
+ case MPI2_SAS_DEVICE0_ASTATUS_SIF_CHECK_POWER:
+ case MPI2_SAS_DEVICE0_ASTATUS_SIF_PIO_SN:
+ case MPI2_SAS_DEVICE0_ASTATUS_SIF_MDMA_SN:
+ case MPI2_SAS_DEVICE0_ASTATUS_SIF_UDMA_SN:
+ case MPI2_SAS_DEVICE0_ASTATUS_SIF_ZONING_VIOLATION:
+ case MPI2_SAS_DEVICE0_ASTATUS_SIF_NOT_ADDRESSABLE:
+ case MPI2_SAS_DEVICE0_ASTATUS_SIF_MAX:
+ desc = "sata initialization failed";
+ break;
+ default:
+ desc = "unknown";
+ break;
+ }
+
+ if (!rc)
+ return 0;
+
+ printk(MPT2SAS_ERR_FMT "discovery errors(%s): sas_address(0x%016llx), "
+ "handle(0x%04x)\n", ioc->name, desc,
+ (unsigned long long)sas_address, handle);
+ return rc;
+}
+
+static void
+_scsih_check_device(struct MPT2SAS_ADAPTER *ioc, u16 handle)
+{
+ Mpi2ConfigReply_t mpi_reply;
+ Mpi2SasDevicePage0_t sas_device_pg0;
+ struct _sas_device *sas_device;
+ u32 ioc_status;
+ unsigned long flags;
+ u64 sas_address;
+ struct scsi_target *starget;
+ struct MPT2SAS_TARGET *sas_target_priv_data;
+ u32 device_info;
+
+
+ if ((mpt2sas_config_get_sas_device_pg0(ioc, &mpi_reply, &sas_device_pg0,
+ MPI2_SAS_DEVICE_PGAD_FORM_HANDLE, handle)))
+ return;
+
+ ioc_status = le16_to_cpu(mpi_reply.IOCStatus) & MPI2_IOCSTATUS_MASK;
+ if (ioc_status != MPI2_IOCSTATUS_SUCCESS)
+ return;
+
+ /* check if this is end device */
+ device_info = le32_to_cpu(sas_device_pg0.DeviceInfo);
+ if (!(_scsih_is_end_device(device_info)))
+ return;
+
+ spin_lock_irqsave(&ioc->sas_device_lock, flags);
+ sas_address = le64_to_cpu(sas_device_pg0.SASAddress);
+ sas_device = mpt2sas_scsih_sas_device_find_by_sas_address(ioc,
+ sas_address);
+
+ if (!sas_device) {
+ printk(MPT2SAS_ERR_FMT "device is not present "
+ "handle(0x%04x), no sas_device!!!\n", ioc->name, handle);
+ spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
+ return;
+ }
+
+ if (unlikely(sas_device->handle != handle)) {
+ starget = sas_device->starget;
+ sas_target_priv_data = starget->hostdata;
+ starget_printk(KERN_INFO, starget, "handle changed from(0x%04x)"
+ " to (0x%04x)!!!\n", sas_device->handle, handle);
+ sas_target_priv_data->handle = handle;
+ sas_device->handle = handle;
+ }
+
+ /* check if device is present */
+ if (!(le16_to_cpu(sas_device_pg0.Flags) &
+ MPI2_SAS_DEVICE0_FLAGS_DEVICE_PRESENT)) {
+ printk(MPT2SAS_ERR_FMT "device is not present "
+ "handle(0x%04x), flags!!!\n", ioc->name, handle);
+ spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
+ return;
+ }
+
+ /* check if there were any issues with discovery */
+ if (_scsih_check_access_status(ioc, sas_address, handle,
+ sas_device_pg0.AccessStatus)) {
+ spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
+ return;
+ }
+ spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
+ _scsih_ublock_io_device(ioc, sas_address);
+
+}
+
+/**
+ * _scsih_add_device - creating sas device object
+ * @ioc: per adapter object
+ * @handle: sas device handle
+ * @phy_num: phy number end device attached to
+ * @is_pd: is this hidden raid component
+ *
+ * Creating end device object, stored in ioc->sas_device_list.
+ *
+ * Returns 0 for success, non-zero for failure.
+ */
+static int
+_scsih_add_device(struct MPT2SAS_ADAPTER *ioc, u16 handle, u8 phy_num, u8 is_pd)
+{
+ Mpi2ConfigReply_t mpi_reply;
+ Mpi2SasDevicePage0_t sas_device_pg0;
+ Mpi2SasEnclosurePage0_t enclosure_pg0;
+ struct _sas_device *sas_device;
+ u32 ioc_status;
+ __le64 sas_address;
+ u32 device_info;
+ unsigned long flags;
+
+ if ((mpt2sas_config_get_sas_device_pg0(ioc, &mpi_reply, &sas_device_pg0,
+ MPI2_SAS_DEVICE_PGAD_FORM_HANDLE, handle))) {
+ printk(MPT2SAS_ERR_FMT "failure at %s:%d/%s()!\n",
+ ioc->name, __FILE__, __LINE__, __func__);
+ return -1;
+ }
+
+ ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
+ MPI2_IOCSTATUS_MASK;
+ if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
+ printk(MPT2SAS_ERR_FMT "failure at %s:%d/%s()!\n",
+ ioc->name, __FILE__, __LINE__, __func__);
+ return -1;
+ }
+
+ sas_address = le64_to_cpu(sas_device_pg0.SASAddress);
+
+ /* check if device is present */
+ if (!(le16_to_cpu(sas_device_pg0.Flags) &
+ MPI2_SAS_DEVICE0_FLAGS_DEVICE_PRESENT)) {
+ printk(MPT2SAS_ERR_FMT "failure at %s:%d/%s()!\n",
+ ioc->name, __FILE__, __LINE__, __func__);
+ printk(MPT2SAS_ERR_FMT "Flags = 0x%04x\n",
+ ioc->name, le16_to_cpu(sas_device_pg0.Flags));
+ return -1;
+ }
+
+ /* check if there were any issues with discovery */
+ if (_scsih_check_access_status(ioc, sas_address, handle,
+ sas_device_pg0.AccessStatus))
+ return -1;
+
+ /* check if this is end device */
+ device_info = le32_to_cpu(sas_device_pg0.DeviceInfo);
+ if (!(_scsih_is_end_device(device_info))) {
+ printk(MPT2SAS_ERR_FMT "failure at %s:%d/%s()!\n",
+ ioc->name, __FILE__, __LINE__, __func__);
+ return -1;
+ }
+
+
+ spin_lock_irqsave(&ioc->sas_device_lock, flags);
+ sas_device = mpt2sas_scsih_sas_device_find_by_sas_address(ioc,
+ sas_address);
+ spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
+
+ if (sas_device)
+ return 0;
+
+ sas_device = kzalloc(sizeof(struct _sas_device),
+ GFP_KERNEL);
+ if (!sas_device) {
+ printk(MPT2SAS_ERR_FMT "failure at %s:%d/%s()!\n",
+ ioc->name, __FILE__, __LINE__, __func__);
+ return -1;
+ }
+
+ sas_device->handle = handle;
+ if (_scsih_get_sas_address(ioc, le16_to_cpu
+ (sas_device_pg0.ParentDevHandle),
+ &sas_device->sas_address_parent) != 0)
+ printk(MPT2SAS_ERR_FMT "failure at %s:%d/%s()!\n",
+ ioc->name, __FILE__, __LINE__, __func__);
+ sas_device->enclosure_handle =
+ le16_to_cpu(sas_device_pg0.EnclosureHandle);
+ sas_device->slot =
+ le16_to_cpu(sas_device_pg0.Slot);
+ sas_device->device_info = device_info;
+ sas_device->sas_address = sas_address;
+ sas_device->phy = sas_device_pg0.PhyNum;
+
+ /* get enclosure_logical_id */
+ if (sas_device->enclosure_handle && !(mpt2sas_config_get_enclosure_pg0(
+ ioc, &mpi_reply, &enclosure_pg0, MPI2_SAS_ENCLOS_PGAD_FORM_HANDLE,
+ sas_device->enclosure_handle)))
+ sas_device->enclosure_logical_id =
+ le64_to_cpu(enclosure_pg0.EnclosureLogicalID);
+
+ /* get device name */
+ sas_device->device_name = le64_to_cpu(sas_device_pg0.DeviceName);
+
+ if (ioc->wait_for_discovery_to_complete)
+ _scsih_sas_device_init_add(ioc, sas_device);
+ else
+ _scsih_sas_device_add(ioc, sas_device);
+
+ return 0;
+}
+
+/**
+ * _scsih_remove_device - removing sas device object
+ * @ioc: per adapter object
+ * @sas_device_delete: the sas_device object
+ *
+ * Return nothing.
+ */
+static void
+_scsih_remove_device(struct MPT2SAS_ADAPTER *ioc,
+ struct _sas_device *sas_device)
+{
+ struct MPT2SAS_TARGET *sas_target_priv_data;
+
+ if ((ioc->pdev->subsystem_vendor == PCI_VENDOR_ID_IBM) &&
+ (sas_device->pfa_led_on)) {
+ _scsih_turn_off_pfa_led(ioc, sas_device);
+ sas_device->pfa_led_on = 0;
+ }
+
+ dewtprintk(ioc, printk(MPT2SAS_INFO_FMT "%s: enter: "
+ "handle(0x%04x), sas_addr(0x%016llx)\n", ioc->name, __func__,
+ sas_device->handle, (unsigned long long)
+ sas_device->sas_address));
+
+ if (sas_device->starget && sas_device->starget->hostdata) {
+ sas_target_priv_data = sas_device->starget->hostdata;
+ sas_target_priv_data->deleted = 1;
+ _scsih_ublock_io_device(ioc, sas_device->sas_address);
+ sas_target_priv_data->handle =
+ MPT2SAS_INVALID_DEVICE_HANDLE;
+ }
+
+ if (!ioc->hide_drives)
+ mpt2sas_transport_port_remove(ioc,
+ sas_device->sas_address,
+ sas_device->sas_address_parent);
+
+ printk(MPT2SAS_INFO_FMT "removing handle(0x%04x), sas_addr"
+ "(0x%016llx)\n", ioc->name, sas_device->handle,
+ (unsigned long long) sas_device->sas_address);
+
+ dewtprintk(ioc, printk(MPT2SAS_INFO_FMT "%s: exit: "
+ "handle(0x%04x), sas_addr(0x%016llx)\n", ioc->name, __func__,
+ sas_device->handle, (unsigned long long)
+ sas_device->sas_address));
+ kfree(sas_device);
+}
+/**
+ * _scsih_device_remove_by_handle - removing device object by handle
+ * @ioc: per adapter object
+ * @handle: device handle
+ *
+ * Return nothing.
+ */
+static void
+_scsih_device_remove_by_handle(struct MPT2SAS_ADAPTER *ioc, u16 handle)
+{
+ struct _sas_device *sas_device;
+ unsigned long flags;
+
+ if (ioc->shost_recovery)
+ return;
+
+ spin_lock_irqsave(&ioc->sas_device_lock, flags);
+ sas_device = _scsih_sas_device_find_by_handle(ioc, handle);
+ if (sas_device)
+ list_del(&sas_device->list);
+ spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
+ if (sas_device)
+ _scsih_remove_device(ioc, sas_device);
+}
+
+/**
+ * mpt2sas_device_remove_by_sas_address - removing device object by sas address
+ * @ioc: per adapter object
+ * @sas_address: device sas_address
+ *
+ * Return nothing.
+ */
+void
+mpt2sas_device_remove_by_sas_address(struct MPT2SAS_ADAPTER *ioc,
+ u64 sas_address)
+{
+ struct _sas_device *sas_device;
+ unsigned long flags;
+
+ if (ioc->shost_recovery)
+ return;
+
+ spin_lock_irqsave(&ioc->sas_device_lock, flags);
+ sas_device = mpt2sas_scsih_sas_device_find_by_sas_address(ioc,
+ sas_address);
+ if (sas_device)
+ list_del(&sas_device->list);
+ spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
+ if (sas_device)
+ _scsih_remove_device(ioc, sas_device);
+}
+#ifdef CONFIG_SCSI_MPT2SAS_LOGGING
+/**
+ * _scsih_sas_topology_change_event_debug - debug for topology event
+ * @ioc: per adapter object
+ * @event_data: event data payload
+ * Context: user.
+ */
+static void
+_scsih_sas_topology_change_event_debug(struct MPT2SAS_ADAPTER *ioc,
+ Mpi2EventDataSasTopologyChangeList_t *event_data)
+{
+ int i;
+ u16 handle;
+ u16 reason_code;
+ u8 phy_number;
+ char *status_str = NULL;
+ u8 link_rate, prev_link_rate;
+
+ switch (event_data->ExpStatus) {
+ case MPI2_EVENT_SAS_TOPO_ES_ADDED:
+ status_str = "add";
+ break;
+ case MPI2_EVENT_SAS_TOPO_ES_NOT_RESPONDING:
+ status_str = "remove";
+ break;
+ case MPI2_EVENT_SAS_TOPO_ES_RESPONDING:
+ case 0:
+ status_str = "responding";
+ break;
+ case MPI2_EVENT_SAS_TOPO_ES_DELAY_NOT_RESPONDING:
+ status_str = "remove delay";
+ break;
+ default:
+ status_str = "unknown status";
+ break;
+ }
+ printk(MPT2SAS_INFO_FMT "sas topology change: (%s)\n",
+ ioc->name, status_str);
+ printk(KERN_INFO "\thandle(0x%04x), enclosure_handle(0x%04x) "
+ "start_phy(%02d), count(%d)\n",
+ le16_to_cpu(event_data->ExpanderDevHandle),
+ le16_to_cpu(event_data->EnclosureHandle),
+ event_data->StartPhyNum, event_data->NumEntries);
+ for (i = 0; i < event_data->NumEntries; i++) {
+ handle = le16_to_cpu(event_data->PHY[i].AttachedDevHandle);
+ if (!handle)
+ continue;
+ phy_number = event_data->StartPhyNum + i;
+ reason_code = event_data->PHY[i].PhyStatus &
+ MPI2_EVENT_SAS_TOPO_RC_MASK;
+ switch (reason_code) {
+ case MPI2_EVENT_SAS_TOPO_RC_TARG_ADDED:
+ status_str = "target add";
+ break;
+ case MPI2_EVENT_SAS_TOPO_RC_TARG_NOT_RESPONDING:
+ status_str = "target remove";
+ break;
+ case MPI2_EVENT_SAS_TOPO_RC_DELAY_NOT_RESPONDING:
+ status_str = "delay target remove";
+ break;
+ case MPI2_EVENT_SAS_TOPO_RC_PHY_CHANGED:
+ status_str = "link rate change";
+ break;
+ case MPI2_EVENT_SAS_TOPO_RC_NO_CHANGE:
+ status_str = "target responding";
+ break;
+ default:
+ status_str = "unknown";
+ break;
+ }
+ link_rate = event_data->PHY[i].LinkRate >> 4;
+ prev_link_rate = event_data->PHY[i].LinkRate & 0xF;
+ printk(KERN_INFO "\tphy(%02d), attached_handle(0x%04x): %s:"
+ " link rate: new(0x%02x), old(0x%02x)\n", phy_number,
+ handle, status_str, link_rate, prev_link_rate);
+
+ }
+}
+#endif
+
+/**
+ * _scsih_sas_topology_change_event - handle topology changes
+ * @ioc: per adapter object
+ * @fw_event: The fw_event_work object
+ * Context: user.
+ *
+ */
+static void
+_scsih_sas_topology_change_event(struct MPT2SAS_ADAPTER *ioc,
+ struct fw_event_work *fw_event)
+{
+ int i;
+ u16 parent_handle, handle;
+ u16 reason_code;
+ u8 phy_number, max_phys;
+ struct _sas_node *sas_expander;
+ u64 sas_address;
+ unsigned long flags;
+ u8 link_rate, prev_link_rate;
+ Mpi2EventDataSasTopologyChangeList_t *event_data =
+ (Mpi2EventDataSasTopologyChangeList_t *)
+ fw_event->event_data;
+
+#ifdef CONFIG_SCSI_MPT2SAS_LOGGING
+ if (ioc->logging_level & MPT_DEBUG_EVENT_WORK_TASK)
+ _scsih_sas_topology_change_event_debug(ioc, event_data);
+#endif
+
+ if (ioc->remove_host || ioc->pci_error_recovery)
+ return;
+
+ if (!ioc->sas_hba.num_phys)
+ _scsih_sas_host_add(ioc);
+ else
+ _scsih_sas_host_refresh(ioc);
+
+ if (fw_event->ignore) {
+ dewtprintk(ioc, printk(MPT2SAS_INFO_FMT "ignoring expander "
+ "event\n", ioc->name));
+ return;
+ }
+
+ parent_handle = le16_to_cpu(event_data->ExpanderDevHandle);
+
+ /* handle expander add */
+ if (event_data->ExpStatus == MPI2_EVENT_SAS_TOPO_ES_ADDED)
+ if (_scsih_expander_add(ioc, parent_handle) != 0)
+ return;
+
+ spin_lock_irqsave(&ioc->sas_node_lock, flags);
+ sas_expander = mpt2sas_scsih_expander_find_by_handle(ioc,
+ parent_handle);
+ if (sas_expander) {
+ sas_address = sas_expander->sas_address;
+ max_phys = sas_expander->num_phys;
+ } else if (parent_handle < ioc->sas_hba.num_phys) {
+ sas_address = ioc->sas_hba.sas_address;
+ max_phys = ioc->sas_hba.num_phys;
+ } else {
+ spin_unlock_irqrestore(&ioc->sas_node_lock, flags);
+ return;
+ }
+ spin_unlock_irqrestore(&ioc->sas_node_lock, flags);
+
+ /* handle siblings events */
+ for (i = 0; i < event_data->NumEntries; i++) {
+ if (fw_event->ignore) {
+ dewtprintk(ioc, printk(MPT2SAS_INFO_FMT "ignoring "
+ "expander event\n", ioc->name));
+ return;
+ }
+ if (ioc->shost_recovery || ioc->remove_host ||
+ ioc->pci_error_recovery)
+ return;
+ phy_number = event_data->StartPhyNum + i;
+ if (phy_number >= max_phys)
+ continue;
+ reason_code = event_data->PHY[i].PhyStatus &
+ MPI2_EVENT_SAS_TOPO_RC_MASK;
+ if ((event_data->PHY[i].PhyStatus &
+ MPI2_EVENT_SAS_TOPO_PHYSTATUS_VACANT) && (reason_code !=
+ MPI2_EVENT_SAS_TOPO_RC_TARG_NOT_RESPONDING))
+ continue;
+ handle = le16_to_cpu(event_data->PHY[i].AttachedDevHandle);
+ if (!handle)
+ continue;
+ link_rate = event_data->PHY[i].LinkRate >> 4;
+ prev_link_rate = event_data->PHY[i].LinkRate & 0xF;
+ switch (reason_code) {
+ case MPI2_EVENT_SAS_TOPO_RC_PHY_CHANGED:
+
+ if (ioc->shost_recovery)
+ break;
+
+ if (link_rate == prev_link_rate)
+ break;
+
+ mpt2sas_transport_update_links(ioc, sas_address,
+ handle, phy_number, link_rate);
+
+ if (link_rate < MPI2_SAS_NEG_LINK_RATE_1_5)
+ break;
+
+ _scsih_check_device(ioc, handle);
+ break;
+ case MPI2_EVENT_SAS_TOPO_RC_TARG_ADDED:
+
+ if (ioc->shost_recovery)
+ break;
+
+ mpt2sas_transport_update_links(ioc, sas_address,
+ handle, phy_number, link_rate);
+
+ _scsih_add_device(ioc, handle, phy_number, 0);
+ break;
+ case MPI2_EVENT_SAS_TOPO_RC_TARG_NOT_RESPONDING:
+
+ _scsih_device_remove_by_handle(ioc, handle);
+ break;
+ }
+ }
+
+ /* handle expander removal */
+ if (event_data->ExpStatus == MPI2_EVENT_SAS_TOPO_ES_NOT_RESPONDING &&
+ sas_expander)
+ mpt2sas_expander_remove(ioc, sas_address);
+
+}
+
+#ifdef CONFIG_SCSI_MPT2SAS_LOGGING
+/**
+ * _scsih_sas_device_status_change_event_debug - debug for device event
+ * @event_data: event data payload
+ * Context: user.
+ *
+ * Return nothing.
+ */
+static void
+_scsih_sas_device_status_change_event_debug(struct MPT2SAS_ADAPTER *ioc,
+ Mpi2EventDataSasDeviceStatusChange_t *event_data)
+{
+ char *reason_str = NULL;
+
+ switch (event_data->ReasonCode) {
+ case MPI2_EVENT_SAS_DEV_STAT_RC_SMART_DATA:
+ reason_str = "smart data";
+ break;
+ case MPI2_EVENT_SAS_DEV_STAT_RC_UNSUPPORTED:
+ reason_str = "unsupported device discovered";
+ break;
+ case MPI2_EVENT_SAS_DEV_STAT_RC_INTERNAL_DEVICE_RESET:
+ reason_str = "internal device reset";
+ break;
+ case MPI2_EVENT_SAS_DEV_STAT_RC_TASK_ABORT_INTERNAL:
+ reason_str = "internal task abort";
+ break;
+ case MPI2_EVENT_SAS_DEV_STAT_RC_ABORT_TASK_SET_INTERNAL:
+ reason_str = "internal task abort set";
+ break;
+ case MPI2_EVENT_SAS_DEV_STAT_RC_CLEAR_TASK_SET_INTERNAL:
+ reason_str = "internal clear task set";
+ break;
+ case MPI2_EVENT_SAS_DEV_STAT_RC_QUERY_TASK_INTERNAL:
+ reason_str = "internal query task";
+ break;
+ case MPI2_EVENT_SAS_DEV_STAT_RC_SATA_INIT_FAILURE:
+ reason_str = "sata init failure";
+ break;
+ case MPI2_EVENT_SAS_DEV_STAT_RC_CMP_INTERNAL_DEV_RESET:
+ reason_str = "internal device reset complete";
+ break;
+ case MPI2_EVENT_SAS_DEV_STAT_RC_CMP_TASK_ABORT_INTERNAL:
+ reason_str = "internal task abort complete";
+ break;
+ case MPI2_EVENT_SAS_DEV_STAT_RC_ASYNC_NOTIFICATION:
+ reason_str = "internal async notification";
+ break;
+ case MPI2_EVENT_SAS_DEV_STAT_RC_EXPANDER_REDUCED_FUNCTIONALITY:
+ reason_str = "expander reduced functionality";
+ break;
+ case MPI2_EVENT_SAS_DEV_STAT_RC_CMP_EXPANDER_REDUCED_FUNCTIONALITY:
+ reason_str = "expander reduced functionality complete";
+ break;
+ default:
+ reason_str = "unknown reason";
+ break;
+ }
+ printk(MPT2SAS_INFO_FMT "device status change: (%s)\n"
+ "\thandle(0x%04x), sas address(0x%016llx), tag(%d)",
+ ioc->name, reason_str, le16_to_cpu(event_data->DevHandle),
+ (unsigned long long)le64_to_cpu(event_data->SASAddress),
+ le16_to_cpu(event_data->TaskTag));
+ if (event_data->ReasonCode == MPI2_EVENT_SAS_DEV_STAT_RC_SMART_DATA)
+ printk(MPT2SAS_INFO_FMT ", ASC(0x%x), ASCQ(0x%x)\n", ioc->name,
+ event_data->ASC, event_data->ASCQ);
+ printk(KERN_INFO "\n");
+}
+#endif
+
+/**
+ * _scsih_sas_device_status_change_event - handle device status change
+ * @ioc: per adapter object
+ * @fw_event: The fw_event_work object
+ * Context: user.
+ *
+ * Return nothing.
+ */
+static void
+_scsih_sas_device_status_change_event(struct MPT2SAS_ADAPTER *ioc,
+ struct fw_event_work *fw_event)
+{
+ struct MPT2SAS_TARGET *target_priv_data;
+ struct _sas_device *sas_device;
+ u64 sas_address;
+ unsigned long flags;
+ Mpi2EventDataSasDeviceStatusChange_t *event_data =
+ (Mpi2EventDataSasDeviceStatusChange_t *)
+ fw_event->event_data;
+
+#ifdef CONFIG_SCSI_MPT2SAS_LOGGING
+ if (ioc->logging_level & MPT_DEBUG_EVENT_WORK_TASK)
+ _scsih_sas_device_status_change_event_debug(ioc,
+ event_data);
+#endif
+
+ /* In MPI Revision K (0xC), the internal device reset complete was
+ * implemented, so avoid setting tm_busy flag for older firmware.
+ */
+ if ((ioc->facts.HeaderVersion >> 8) < 0xC)
+ return;
+
+ if (event_data->ReasonCode !=
+ MPI2_EVENT_SAS_DEV_STAT_RC_INTERNAL_DEVICE_RESET &&
+ event_data->ReasonCode !=
+ MPI2_EVENT_SAS_DEV_STAT_RC_CMP_INTERNAL_DEV_RESET)
+ return;
+
+ spin_lock_irqsave(&ioc->sas_device_lock, flags);
+ sas_address = le64_to_cpu(event_data->SASAddress);
+ sas_device = mpt2sas_scsih_sas_device_find_by_sas_address(ioc,
+ sas_address);
+
+ if (!sas_device || !sas_device->starget) {
+ spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
+ return;
+ }
+
+ target_priv_data = sas_device->starget->hostdata;
+ if (!target_priv_data) {
+ spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
+ return;
+ }
+
+ if (event_data->ReasonCode ==
+ MPI2_EVENT_SAS_DEV_STAT_RC_INTERNAL_DEVICE_RESET)
+ target_priv_data->tm_busy = 1;
+ else
+ target_priv_data->tm_busy = 0;
+ spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
+}
+
+#ifdef CONFIG_SCSI_MPT2SAS_LOGGING
+/**
+ * _scsih_sas_enclosure_dev_status_change_event_debug - debug for enclosure event
+ * @ioc: per adapter object
+ * @event_data: event data payload
+ * Context: user.
+ *
+ * Return nothing.
+ */
+static void
+_scsih_sas_enclosure_dev_status_change_event_debug(struct MPT2SAS_ADAPTER *ioc,
+ Mpi2EventDataSasEnclDevStatusChange_t *event_data)
+{
+ char *reason_str = NULL;
+
+ switch (event_data->ReasonCode) {
+ case MPI2_EVENT_SAS_ENCL_RC_ADDED:
+ reason_str = "enclosure add";
+ break;
+ case MPI2_EVENT_SAS_ENCL_RC_NOT_RESPONDING:
+ reason_str = "enclosure remove";
+ break;
+ default:
+ reason_str = "unknown reason";
+ break;
+ }
+
+ printk(MPT2SAS_INFO_FMT "enclosure status change: (%s)\n"
+ "\thandle(0x%04x), enclosure logical id(0x%016llx)"
+ " number slots(%d)\n", ioc->name, reason_str,
+ le16_to_cpu(event_data->EnclosureHandle),
+ (unsigned long long)le64_to_cpu(event_data->EnclosureLogicalID),
+ le16_to_cpu(event_data->StartSlot));
+}
+#endif
+
+/**
+ * _scsih_sas_enclosure_dev_status_change_event - handle enclosure events
+ * @ioc: per adapter object
+ * @fw_event: The fw_event_work object
+ * Context: user.
+ *
+ * Return nothing.
+ */
+static void
+_scsih_sas_enclosure_dev_status_change_event(struct MPT2SAS_ADAPTER *ioc,
+ struct fw_event_work *fw_event)
+{
+#ifdef CONFIG_SCSI_MPT2SAS_LOGGING
+ if (ioc->logging_level & MPT_DEBUG_EVENT_WORK_TASK)
+ _scsih_sas_enclosure_dev_status_change_event_debug(ioc,
+ (Mpi2EventDataSasEnclDevStatusChange_t *)
+ fw_event->event_data);
+#endif
+}
+
+/**
+ * _scsih_sas_broadcast_primitive_event - handle broadcast events
+ * @ioc: per adapter object
+ * @fw_event: The fw_event_work object
+ * Context: user.
+ *
+ * Return nothing.
+ */
+static void
+_scsih_sas_broadcast_primitive_event(struct MPT2SAS_ADAPTER *ioc,
+ struct fw_event_work *fw_event)
+{
+ struct scsi_cmnd *scmd;
+ struct scsi_device *sdev;
+ u16 smid, handle;
+ u32 lun;
+ struct MPT2SAS_DEVICE *sas_device_priv_data;
+ u32 termination_count;
+ u32 query_count;
+ Mpi2SCSITaskManagementReply_t *mpi_reply;
+ Mpi2EventDataSasBroadcastPrimitive_t *event_data =
+ (Mpi2EventDataSasBroadcastPrimitive_t *)
+ fw_event->event_data;
+ u16 ioc_status;
+ unsigned long flags;
+ int r;
+ u8 max_retries = 0;
+ u8 task_abort_retries;
+
+ mutex_lock(&ioc->tm_cmds.mutex);
+ pr_info(MPT2SAS_FMT
+ "%s: enter: phy number(%d), width(%d)\n",
+ ioc->name, __func__, event_data->PhyNum,
+ event_data->PortWidth);
+
+ _scsih_block_io_all_device(ioc);
+
+ spin_lock_irqsave(&ioc->scsi_lookup_lock, flags);
+ mpi_reply = ioc->tm_cmds.reply;
+broadcast_aen_retry:
+
+ /* sanity checks for retrying this loop */
+ if (max_retries++ == 5) {
+ dewtprintk(ioc, printk(MPT2SAS_INFO_FMT "%s: giving up\n",
+ ioc->name, __func__));
+ goto out;
+ } else if (max_retries > 1)
+ dewtprintk(ioc, printk(MPT2SAS_INFO_FMT "%s: %d retry\n",
+ ioc->name, __func__, max_retries - 1));
+
+ termination_count = 0;
+ query_count = 0;
+ for (smid = 1; smid <= ioc->scsiio_depth; smid++) {
+ if (ioc->shost_recovery)
+ goto out;
+ scmd = _scsih_scsi_lookup_get(ioc, smid);
+ if (!scmd)
+ continue;
+ sdev = scmd->device;
+ sas_device_priv_data = sdev->hostdata;
+ if (!sas_device_priv_data || !sas_device_priv_data->sas_target)
+ continue;
+ /* skip hidden raid components */
+ if (sas_device_priv_data->sas_target->flags &
+ MPT_TARGET_FLAGS_RAID_COMPONENT)
+ continue;
+ /* skip volumes */
+ if (sas_device_priv_data->sas_target->flags &
+ MPT_TARGET_FLAGS_VOLUME)
+ continue;
+
+ handle = sas_device_priv_data->sas_target->handle;
+ lun = sas_device_priv_data->lun;
+ query_count++;
+
+ if (ioc->shost_recovery)
+ goto out;
+
+ spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags);
+ r = mpt2sas_scsih_issue_tm(ioc, handle, 0, 0, lun,
+ MPI2_SCSITASKMGMT_TASKTYPE_QUERY_TASK, smid, 30,
+ TM_MUTEX_OFF);
+ if (r == FAILED) {
+ sdev_printk(KERN_WARNING, sdev,
+ "mpt2sas_scsih_issue_tm: FAILED when sending "
+ "QUERY_TASK: scmd(%p)\n", scmd);
+ spin_lock_irqsave(&ioc->scsi_lookup_lock, flags);
+ goto broadcast_aen_retry;
+ }
+ ioc_status = le16_to_cpu(mpi_reply->IOCStatus)
+ & MPI2_IOCSTATUS_MASK;
+ if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
+ sdev_printk(KERN_WARNING, sdev, "query task: FAILED "
+ "with IOCSTATUS(0x%04x), scmd(%p)\n", ioc_status,
+ scmd);
+ spin_lock_irqsave(&ioc->scsi_lookup_lock, flags);
+ goto broadcast_aen_retry;
+ }
+
+ /* see if IO is still owned by IOC and target */
+ if (mpi_reply->ResponseCode ==
+ MPI2_SCSITASKMGMT_RSP_TM_SUCCEEDED ||
+ mpi_reply->ResponseCode ==
+ MPI2_SCSITASKMGMT_RSP_IO_QUEUED_ON_IOC) {
+ spin_lock_irqsave(&ioc->scsi_lookup_lock, flags);
+ continue;
+ }
+ task_abort_retries = 0;
+ tm_retry:
+ if (task_abort_retries++ == 60) {
+ dewtprintk(ioc, printk(MPT2SAS_INFO_FMT
+ "%s: ABORT_TASK: giving up\n", ioc->name,
+ __func__));
+ spin_lock_irqsave(&ioc->scsi_lookup_lock, flags);
+ goto broadcast_aen_retry;
+ }
+
+ if (ioc->shost_recovery)
+ goto out_no_lock;
+
+ r = mpt2sas_scsih_issue_tm(ioc, handle, sdev->channel, sdev->id,
+ sdev->lun, MPI2_SCSITASKMGMT_TASKTYPE_ABORT_TASK, smid, 30,
+ TM_MUTEX_OFF);
+ if (r == FAILED) {
+ sdev_printk(KERN_WARNING, sdev,
+ "mpt2sas_scsih_issue_tm: ABORT_TASK: FAILED : "
+ "scmd(%p)\n", scmd);
+ goto tm_retry;
+ }
+
+ if (task_abort_retries > 1)
+ sdev_printk(KERN_WARNING, sdev,
+ "mpt2sas_scsih_issue_tm: ABORT_TASK: RETRIES (%d):"
+ " scmd(%p)\n",
+ task_abort_retries - 1, scmd);
+
+ termination_count += le32_to_cpu(mpi_reply->TerminationCount);
+ spin_lock_irqsave(&ioc->scsi_lookup_lock, flags);
+ }
+
+ if (ioc->broadcast_aen_pending) {
+ dewtprintk(ioc, printk(MPT2SAS_INFO_FMT "%s: loop back due to"
+ " pending AEN\n", ioc->name, __func__));
+ ioc->broadcast_aen_pending = 0;
+ goto broadcast_aen_retry;
+ }
+
+ out:
+ spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags);
+ out_no_lock:
+
+ dewtprintk(ioc, printk(MPT2SAS_INFO_FMT
+ "%s - exit, query_count = %d termination_count = %d\n",
+ ioc->name, __func__, query_count, termination_count));
+
+ ioc->broadcast_aen_busy = 0;
+ if (!ioc->shost_recovery)
+ _scsih_ublock_io_all_device(ioc);
+ mutex_unlock(&ioc->tm_cmds.mutex);
+}
+
+/**
+ * _scsih_sas_discovery_event - handle discovery events
+ * @ioc: per adapter object
+ * @fw_event: The fw_event_work object
+ * Context: user.
+ *
+ * Return nothing.
+ */
+static void
+_scsih_sas_discovery_event(struct MPT2SAS_ADAPTER *ioc,
+ struct fw_event_work *fw_event)
+{
+ Mpi2EventDataSasDiscovery_t *event_data =
+ (Mpi2EventDataSasDiscovery_t *)
+ fw_event->event_data;
+
+#ifdef CONFIG_SCSI_MPT2SAS_LOGGING
+ if (ioc->logging_level & MPT_DEBUG_EVENT_WORK_TASK) {
+ printk(MPT2SAS_INFO_FMT "discovery event: (%s)", ioc->name,
+ (event_data->ReasonCode == MPI2_EVENT_SAS_DISC_RC_STARTED) ?
+ "start" : "stop");
+ if (event_data->DiscoveryStatus)
+ printk("discovery_status(0x%08x)",
+ le32_to_cpu(event_data->DiscoveryStatus));
+ printk("\n");
+ }
+#endif
+
+ if (event_data->ReasonCode == MPI2_EVENT_SAS_DISC_RC_STARTED &&
+ !ioc->sas_hba.num_phys) {
+ if (disable_discovery > 0 && ioc->shost_recovery) {
+ /* Wait for the reset to complete */
+ while (ioc->shost_recovery)
+ ssleep(1);
+ }
+ _scsih_sas_host_add(ioc);
+ }
+}
+
+/**
+ * _scsih_reprobe_lun - reprobing lun
+ * @sdev: scsi device struct
+ * @no_uld_attach: sdev->no_uld_attach flag setting
+ *
+ **/
+static void
+_scsih_reprobe_lun(struct scsi_device *sdev, void *no_uld_attach)
+{
+ int rc;
+
+ sdev->no_uld_attach = no_uld_attach ? 1 : 0;
+ sdev_printk(KERN_INFO, sdev, "%s raid component\n",
+ sdev->no_uld_attach ? "hidding" : "exposing");
+ rc = scsi_device_reprobe(sdev);
+}
+
+/**
+ * _scsih_sas_volume_add - add new volume
+ * @ioc: per adapter object
+ * @element: IR config element data
+ * Context: user.
+ *
+ * Return nothing.
+ */
+static void
+_scsih_sas_volume_add(struct MPT2SAS_ADAPTER *ioc,
+ Mpi2EventIrConfigElement_t *element)
+{
+ struct _raid_device *raid_device;
+ unsigned long flags;
+ u64 wwid;
+ u16 handle = le16_to_cpu(element->VolDevHandle);
+ int rc;
+
+ mpt2sas_config_get_volume_wwid(ioc, handle, &wwid);
+ if (!wwid) {
+ printk(MPT2SAS_ERR_FMT
+ "failure at %s:%d/%s()!\n", ioc->name,
+ __FILE__, __LINE__, __func__);
+ return;
+ }
+
+ spin_lock_irqsave(&ioc->raid_device_lock, flags);
+ raid_device = _scsih_raid_device_find_by_wwid(ioc, wwid);
+ spin_unlock_irqrestore(&ioc->raid_device_lock, flags);
+
+ if (raid_device)
+ return;
+
+ raid_device = kzalloc(sizeof(struct _raid_device), GFP_KERNEL);
+ if (!raid_device) {
+ printk(MPT2SAS_ERR_FMT
+ "failure at %s:%d/%s()!\n", ioc->name,
+ __FILE__, __LINE__, __func__);
+ return;
+ }
+
+ raid_device->id = ioc->sas_id++;
+ raid_device->channel = RAID_CHANNEL;
+ raid_device->handle = handle;
+ raid_device->wwid = wwid;
+ _scsih_raid_device_add(ioc, raid_device);
+ if (!ioc->wait_for_discovery_to_complete) {
+ rc = scsi_add_device(ioc->shost, RAID_CHANNEL,
+ raid_device->id, 0);
+ if (rc)
+ _scsih_raid_device_remove(ioc, raid_device);
+ } else {
+ spin_lock_irqsave(&ioc->raid_device_lock, flags);
+ _scsih_determine_boot_device(ioc, raid_device, 1);
+ spin_unlock_irqrestore(&ioc->raid_device_lock, flags);
+ }
+}
+
+/**
+ * _scsih_sas_volume_delete - delete volume
+ * @ioc: per adapter object
+ * @handle: volume device handle
+ * Context: user.
+ *
+ * Return nothing.
+ */
+static void
+_scsih_sas_volume_delete(struct MPT2SAS_ADAPTER *ioc, u16 handle)
+{
+ struct _raid_device *raid_device;
+ unsigned long flags;
+ struct MPT2SAS_TARGET *sas_target_priv_data;
+ struct scsi_target *starget = NULL;
+
+ spin_lock_irqsave(&ioc->raid_device_lock, flags);
+ raid_device = _scsih_raid_device_find_by_handle(ioc, handle);
+ if (raid_device) {
+ if (raid_device->starget) {
+ starget = raid_device->starget;
+ sas_target_priv_data = starget->hostdata;
+ sas_target_priv_data->deleted = 1;
+ }
+ printk(MPT2SAS_INFO_FMT "removing handle(0x%04x), wwid"
+ "(0x%016llx)\n", ioc->name, raid_device->handle,
+ (unsigned long long) raid_device->wwid);
+ list_del(&raid_device->list);
+ kfree(raid_device);
+ }
+ spin_unlock_irqrestore(&ioc->raid_device_lock, flags);
+ if (starget)
+ scsi_remove_target(&starget->dev);
+}
+
+/**
+ * _scsih_sas_pd_expose - expose pd component to /dev/sdX
+ * @ioc: per adapter object
+ * @element: IR config element data
+ * Context: user.
+ *
+ * Return nothing.
+ */
+static void
+_scsih_sas_pd_expose(struct MPT2SAS_ADAPTER *ioc,
+ Mpi2EventIrConfigElement_t *element)
+{
+ struct _sas_device *sas_device;
+ struct scsi_target *starget = NULL;
+ struct MPT2SAS_TARGET *sas_target_priv_data;
+ unsigned long flags;
+ u16 handle = le16_to_cpu(element->PhysDiskDevHandle);
+
+ spin_lock_irqsave(&ioc->sas_device_lock, flags);
+ sas_device = _scsih_sas_device_find_by_handle(ioc, handle);
+ if (sas_device) {
+ sas_device->volume_handle = 0;
+ sas_device->volume_wwid = 0;
+ clear_bit(handle, ioc->pd_handles);
+ if (sas_device->starget && sas_device->starget->hostdata) {
+ starget = sas_device->starget;
+ sas_target_priv_data = starget->hostdata;
+ sas_target_priv_data->flags &=
+ ~MPT_TARGET_FLAGS_RAID_COMPONENT;
+ }
+ }
+ spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
+ if (!sas_device)
+ return;
+
+ /* exposing raid component */
+ if (starget)
+ starget_for_each_device(starget, NULL, _scsih_reprobe_lun);
+}
+
+/**
+ * _scsih_sas_pd_hide - hide pd component from /dev/sdX
+ * @ioc: per adapter object
+ * @element: IR config element data
+ * Context: user.
+ *
+ * Return nothing.
+ */
+static void
+_scsih_sas_pd_hide(struct MPT2SAS_ADAPTER *ioc,
+ Mpi2EventIrConfigElement_t *element)
+{
+ struct _sas_device *sas_device;
+ struct scsi_target *starget = NULL;
+ struct MPT2SAS_TARGET *sas_target_priv_data;
+ unsigned long flags;
+ u16 handle = le16_to_cpu(element->PhysDiskDevHandle);
+ u16 volume_handle = 0;
+ u64 volume_wwid = 0;
+
+ mpt2sas_config_get_volume_handle(ioc, handle, &volume_handle);
+ if (volume_handle)
+ mpt2sas_config_get_volume_wwid(ioc, volume_handle,
+ &volume_wwid);
+
+ spin_lock_irqsave(&ioc->sas_device_lock, flags);
+ sas_device = _scsih_sas_device_find_by_handle(ioc, handle);
+ if (sas_device) {
+ set_bit(handle, ioc->pd_handles);
+ if (sas_device->starget && sas_device->starget->hostdata) {
+ starget = sas_device->starget;
+ sas_target_priv_data = starget->hostdata;
+ sas_target_priv_data->flags |=
+ MPT_TARGET_FLAGS_RAID_COMPONENT;
+ sas_device->volume_handle = volume_handle;
+ sas_device->volume_wwid = volume_wwid;
+ }
+ }
+ spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
+ if (!sas_device)
+ return;
+
+ /* hiding raid component */
+ if (starget)
+ starget_for_each_device(starget, (void *)1, _scsih_reprobe_lun);
+}
+
+/**
+ * _scsih_sas_pd_delete - delete pd component
+ * @ioc: per adapter object
+ * @element: IR config element data
+ * Context: user.
+ *
+ * Return nothing.
+ */
+static void
+_scsih_sas_pd_delete(struct MPT2SAS_ADAPTER *ioc,
+ Mpi2EventIrConfigElement_t *element)
+{
+ u16 handle = le16_to_cpu(element->PhysDiskDevHandle);
+
+ _scsih_device_remove_by_handle(ioc, handle);
+}
+
+/**
+ * _scsih_sas_pd_add - remove pd component
+ * @ioc: per adapter object
+ * @element: IR config element data
+ * Context: user.
+ *
+ * Return nothing.
+ */
+static void
+_scsih_sas_pd_add(struct MPT2SAS_ADAPTER *ioc,
+ Mpi2EventIrConfigElement_t *element)
+{
+ struct _sas_device *sas_device;
+ unsigned long flags;
+ u16 handle = le16_to_cpu(element->PhysDiskDevHandle);
+ Mpi2ConfigReply_t mpi_reply;
+ Mpi2SasDevicePage0_t sas_device_pg0;
+ u32 ioc_status;
+ u64 sas_address;
+ u16 parent_handle;
+
+ set_bit(handle, ioc->pd_handles);
+
+ spin_lock_irqsave(&ioc->sas_device_lock, flags);
+ sas_device = _scsih_sas_device_find_by_handle(ioc, handle);
+ spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
+ if (sas_device)
+ return;
+
+ if ((mpt2sas_config_get_sas_device_pg0(ioc, &mpi_reply, &sas_device_pg0,
+ MPI2_SAS_DEVICE_PGAD_FORM_HANDLE, handle))) {
+ printk(MPT2SAS_ERR_FMT "failure at %s:%d/%s()!\n",
+ ioc->name, __FILE__, __LINE__, __func__);
+ return;
+ }
+
+ ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
+ MPI2_IOCSTATUS_MASK;
+ if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
+ printk(MPT2SAS_ERR_FMT "failure at %s:%d/%s()!\n",
+ ioc->name, __FILE__, __LINE__, __func__);
+ return;
+ }
+
+ parent_handle = le16_to_cpu(sas_device_pg0.ParentDevHandle);
+ if (!_scsih_get_sas_address(ioc, parent_handle, &sas_address))
+ mpt2sas_transport_update_links(ioc, sas_address, handle,
+ sas_device_pg0.PhyNum, MPI2_SAS_NEG_LINK_RATE_1_5);
+
+ _scsih_add_device(ioc, handle, 0, 1);
+}
+
+#ifdef CONFIG_SCSI_MPT2SAS_LOGGING
+/**
+ * _scsih_sas_ir_config_change_event_debug - debug for IR Config Change events
+ * @ioc: per adapter object
+ * @event_data: event data payload
+ * Context: user.
+ *
+ * Return nothing.
+ */
+static void
+_scsih_sas_ir_config_change_event_debug(struct MPT2SAS_ADAPTER *ioc,
+ Mpi2EventDataIrConfigChangeList_t *event_data)
+{
+ Mpi2EventIrConfigElement_t *element;
+ u8 element_type;
+ int i;
+ char *reason_str = NULL, *element_str = NULL;
+
+ element = (Mpi2EventIrConfigElement_t *)&event_data->ConfigElement[0];
+
+ printk(MPT2SAS_INFO_FMT "raid config change: (%s), elements(%d)\n",
+ ioc->name, (le32_to_cpu(event_data->Flags) &
+ MPI2_EVENT_IR_CHANGE_FLAGS_FOREIGN_CONFIG) ?
+ "foreign" : "native", event_data->NumElements);
+ for (i = 0; i < event_data->NumElements; i++, element++) {
+ switch (element->ReasonCode) {
+ case MPI2_EVENT_IR_CHANGE_RC_ADDED:
+ reason_str = "add";
+ break;
+ case MPI2_EVENT_IR_CHANGE_RC_REMOVED:
+ reason_str = "remove";
+ break;
+ case MPI2_EVENT_IR_CHANGE_RC_NO_CHANGE:
+ reason_str = "no change";
+ break;
+ case MPI2_EVENT_IR_CHANGE_RC_HIDE:
+ reason_str = "hide";
+ break;
+ case MPI2_EVENT_IR_CHANGE_RC_UNHIDE:
+ reason_str = "unhide";
+ break;
+ case MPI2_EVENT_IR_CHANGE_RC_VOLUME_CREATED:
+ reason_str = "volume_created";
+ break;
+ case MPI2_EVENT_IR_CHANGE_RC_VOLUME_DELETED:
+ reason_str = "volume_deleted";
+ break;
+ case MPI2_EVENT_IR_CHANGE_RC_PD_CREATED:
+ reason_str = "pd_created";
+ break;
+ case MPI2_EVENT_IR_CHANGE_RC_PD_DELETED:
+ reason_str = "pd_deleted";
+ break;
+ default:
+ reason_str = "unknown reason";
+ break;
+ }
+ element_type = le16_to_cpu(element->ElementFlags) &
+ MPI2_EVENT_IR_CHANGE_EFLAGS_ELEMENT_TYPE_MASK;
+ switch (element_type) {
+ case MPI2_EVENT_IR_CHANGE_EFLAGS_VOLUME_ELEMENT:
+ element_str = "volume";
+ break;
+ case MPI2_EVENT_IR_CHANGE_EFLAGS_VOLPHYSDISK_ELEMENT:
+ element_str = "phys disk";
+ break;
+ case MPI2_EVENT_IR_CHANGE_EFLAGS_HOTSPARE_ELEMENT:
+ element_str = "hot spare";
+ break;
+ default:
+ element_str = "unknown element";
+ break;
+ }
+ printk(KERN_INFO "\t(%s:%s), vol handle(0x%04x), "
+ "pd handle(0x%04x), pd num(0x%02x)\n", element_str,
+ reason_str, le16_to_cpu(element->VolDevHandle),
+ le16_to_cpu(element->PhysDiskDevHandle),
+ element->PhysDiskNum);
+ }
+}
+#endif
+
+/**
+ * _scsih_sas_ir_config_change_event - handle ir configuration change events
+ * @ioc: per adapter object
+ * @fw_event: The fw_event_work object
+ * Context: user.
+ *
+ * Return nothing.
+ */
+static void
+_scsih_sas_ir_config_change_event(struct MPT2SAS_ADAPTER *ioc,
+ struct fw_event_work *fw_event)
+{
+ Mpi2EventIrConfigElement_t *element;
+ int i;
+ u8 foreign_config;
+ Mpi2EventDataIrConfigChangeList_t *event_data =
+ (Mpi2EventDataIrConfigChangeList_t *)
+ fw_event->event_data;
+
+#ifdef CONFIG_SCSI_MPT2SAS_LOGGING
+ if ((ioc->logging_level & MPT_DEBUG_EVENT_WORK_TASK)
+ && !ioc->hide_ir_msg)
+ _scsih_sas_ir_config_change_event_debug(ioc, event_data);
+
+#endif
+
+ if (ioc->shost_recovery)
+ return;
+
+ foreign_config = (le32_to_cpu(event_data->Flags) &
+ MPI2_EVENT_IR_CHANGE_FLAGS_FOREIGN_CONFIG) ? 1 : 0;
+
+ element = (Mpi2EventIrConfigElement_t *)&event_data->ConfigElement[0];
+ for (i = 0; i < event_data->NumElements; i++, element++) {
+
+ switch (element->ReasonCode) {
+ case MPI2_EVENT_IR_CHANGE_RC_VOLUME_CREATED:
+ case MPI2_EVENT_IR_CHANGE_RC_ADDED:
+ if (!foreign_config)
+ _scsih_sas_volume_add(ioc, element);
+ break;
+ case MPI2_EVENT_IR_CHANGE_RC_VOLUME_DELETED:
+ case MPI2_EVENT_IR_CHANGE_RC_REMOVED:
+ if (!foreign_config)
+ _scsih_sas_volume_delete(ioc,
+ le16_to_cpu(element->VolDevHandle));
+ break;
+ case MPI2_EVENT_IR_CHANGE_RC_PD_CREATED:
+ if (!ioc->is_warpdrive)
+ _scsih_sas_pd_hide(ioc, element);
+ break;
+ case MPI2_EVENT_IR_CHANGE_RC_PD_DELETED:
+ if (!ioc->is_warpdrive)
+ _scsih_sas_pd_expose(ioc, element);
+ break;
+ case MPI2_EVENT_IR_CHANGE_RC_HIDE:
+ if (!ioc->is_warpdrive)
+ _scsih_sas_pd_add(ioc, element);
+ break;
+ case MPI2_EVENT_IR_CHANGE_RC_UNHIDE:
+ if (!ioc->is_warpdrive)
+ _scsih_sas_pd_delete(ioc, element);
+ break;
+ }
+ }
+}
+
+/**
+ * _scsih_sas_ir_volume_event - IR volume event
+ * @ioc: per adapter object
+ * @fw_event: The fw_event_work object
+ * Context: user.
+ *
+ * Return nothing.
+ */
+static void
+_scsih_sas_ir_volume_event(struct MPT2SAS_ADAPTER *ioc,
+ struct fw_event_work *fw_event)
+{
+ u64 wwid;
+ unsigned long flags;
+ struct _raid_device *raid_device;
+ u16 handle;
+ u32 state;
+ int rc;
+ Mpi2EventDataIrVolume_t *event_data =
+ (Mpi2EventDataIrVolume_t *)
+ fw_event->event_data;
+
+ if (ioc->shost_recovery)
+ return;
+
+ if (event_data->ReasonCode != MPI2_EVENT_IR_VOLUME_RC_STATE_CHANGED)
+ return;
+
+ handle = le16_to_cpu(event_data->VolDevHandle);
+ state = le32_to_cpu(event_data->NewValue);
+ if (!ioc->hide_ir_msg)
+ dewtprintk(ioc, printk(MPT2SAS_INFO_FMT "%s: handle(0x%04x), "
+ "old(0x%08x), new(0x%08x)\n", ioc->name, __func__, handle,
+ le32_to_cpu(event_data->PreviousValue), state));
+
+ switch (state) {
+ case MPI2_RAID_VOL_STATE_MISSING:
+ case MPI2_RAID_VOL_STATE_FAILED:
+ _scsih_sas_volume_delete(ioc, handle);
+ break;
+
+ case MPI2_RAID_VOL_STATE_ONLINE:
+ case MPI2_RAID_VOL_STATE_DEGRADED:
+ case MPI2_RAID_VOL_STATE_OPTIMAL:
+
+ spin_lock_irqsave(&ioc->raid_device_lock, flags);
+ raid_device = _scsih_raid_device_find_by_handle(ioc, handle);
+ spin_unlock_irqrestore(&ioc->raid_device_lock, flags);
+
+ if (raid_device)
+ break;
+
+ mpt2sas_config_get_volume_wwid(ioc, handle, &wwid);
+ if (!wwid) {
+ printk(MPT2SAS_ERR_FMT
+ "failure at %s:%d/%s()!\n", ioc->name,
+ __FILE__, __LINE__, __func__);
+ break;
+ }
+
+ raid_device = kzalloc(sizeof(struct _raid_device), GFP_KERNEL);
+ if (!raid_device) {
+ printk(MPT2SAS_ERR_FMT
+ "failure at %s:%d/%s()!\n", ioc->name,
+ __FILE__, __LINE__, __func__);
+ break;
+ }
+
+ raid_device->id = ioc->sas_id++;
+ raid_device->channel = RAID_CHANNEL;
+ raid_device->handle = handle;
+ raid_device->wwid = wwid;
+ _scsih_raid_device_add(ioc, raid_device);
+ rc = scsi_add_device(ioc->shost, RAID_CHANNEL,
+ raid_device->id, 0);
+ if (rc)
+ _scsih_raid_device_remove(ioc, raid_device);
+ break;
+
+ case MPI2_RAID_VOL_STATE_INITIALIZING:
+ default:
+ break;
+ }
+}
+
+/**
+ * _scsih_sas_ir_physical_disk_event - PD event
+ * @ioc: per adapter object
+ * @fw_event: The fw_event_work object
+ * Context: user.
+ *
+ * Return nothing.
+ */
+static void
+_scsih_sas_ir_physical_disk_event(struct MPT2SAS_ADAPTER *ioc,
+ struct fw_event_work *fw_event)
+{
+ u16 handle, parent_handle;
+ u32 state;
+ struct _sas_device *sas_device;
+ unsigned long flags;
+ Mpi2ConfigReply_t mpi_reply;
+ Mpi2SasDevicePage0_t sas_device_pg0;
+ u32 ioc_status;
+ Mpi2EventDataIrPhysicalDisk_t *event_data =
+ (Mpi2EventDataIrPhysicalDisk_t *)
+ fw_event->event_data;
+ u64 sas_address;
+
+ if (ioc->shost_recovery)
+ return;
+
+ if (event_data->ReasonCode != MPI2_EVENT_IR_PHYSDISK_RC_STATE_CHANGED)
+ return;
+
+ handle = le16_to_cpu(event_data->PhysDiskDevHandle);
+ state = le32_to_cpu(event_data->NewValue);
+
+ if (!ioc->hide_ir_msg)
+ dewtprintk(ioc, printk(MPT2SAS_INFO_FMT "%s: handle(0x%04x), "
+ "old(0x%08x), new(0x%08x)\n", ioc->name, __func__, handle,
+ le32_to_cpu(event_data->PreviousValue), state));
+
+ switch (state) {
+ case MPI2_RAID_PD_STATE_ONLINE:
+ case MPI2_RAID_PD_STATE_DEGRADED:
+ case MPI2_RAID_PD_STATE_REBUILDING:
+ case MPI2_RAID_PD_STATE_OPTIMAL:
+ case MPI2_RAID_PD_STATE_HOT_SPARE:
+
+ if (!ioc->is_warpdrive)
+ set_bit(handle, ioc->pd_handles);
+
+ spin_lock_irqsave(&ioc->sas_device_lock, flags);
+ sas_device = _scsih_sas_device_find_by_handle(ioc, handle);
+ spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
+
+ if (sas_device)
+ return;
+
+ if ((mpt2sas_config_get_sas_device_pg0(ioc, &mpi_reply,
+ &sas_device_pg0, MPI2_SAS_DEVICE_PGAD_FORM_HANDLE,
+ handle))) {
+ printk(MPT2SAS_ERR_FMT "failure at %s:%d/%s()!\n",
+ ioc->name, __FILE__, __LINE__, __func__);
+ return;
+ }
+
+ ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
+ MPI2_IOCSTATUS_MASK;
+ if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
+ printk(MPT2SAS_ERR_FMT "failure at %s:%d/%s()!\n",
+ ioc->name, __FILE__, __LINE__, __func__);
+ return;
+ }
+
+ parent_handle = le16_to_cpu(sas_device_pg0.ParentDevHandle);
+ if (!_scsih_get_sas_address(ioc, parent_handle, &sas_address))
+ mpt2sas_transport_update_links(ioc, sas_address, handle,
+ sas_device_pg0.PhyNum, MPI2_SAS_NEG_LINK_RATE_1_5);
+
+ _scsih_add_device(ioc, handle, 0, 1);
+
+ break;
+
+ case MPI2_RAID_PD_STATE_OFFLINE:
+ case MPI2_RAID_PD_STATE_NOT_CONFIGURED:
+ case MPI2_RAID_PD_STATE_NOT_COMPATIBLE:
+ default:
+ break;
+ }
+}
+
+#ifdef CONFIG_SCSI_MPT2SAS_LOGGING
+/**
+ * _scsih_sas_ir_operation_status_event_debug - debug for IR op event
+ * @ioc: per adapter object
+ * @event_data: event data payload
+ * Context: user.
+ *
+ * Return nothing.
+ */
+static void
+_scsih_sas_ir_operation_status_event_debug(struct MPT2SAS_ADAPTER *ioc,
+ Mpi2EventDataIrOperationStatus_t *event_data)
+{
+ char *reason_str = NULL;
+
+ switch (event_data->RAIDOperation) {
+ case MPI2_EVENT_IR_RAIDOP_RESYNC:
+ reason_str = "resync";
+ break;
+ case MPI2_EVENT_IR_RAIDOP_ONLINE_CAP_EXPANSION:
+ reason_str = "online capacity expansion";
+ break;
+ case MPI2_EVENT_IR_RAIDOP_CONSISTENCY_CHECK:
+ reason_str = "consistency check";
+ break;
+ case MPI2_EVENT_IR_RAIDOP_BACKGROUND_INIT:
+ reason_str = "background init";
+ break;
+ case MPI2_EVENT_IR_RAIDOP_MAKE_DATA_CONSISTENT:
+ reason_str = "make data consistent";
+ break;
+ }
+
+ if (!reason_str)
+ return;
+
+ printk(MPT2SAS_INFO_FMT "raid operational status: (%s)"
+ "\thandle(0x%04x), percent complete(%d)\n",
+ ioc->name, reason_str,
+ le16_to_cpu(event_data->VolDevHandle),
+ event_data->PercentComplete);
+}
+#endif
+
+/**
+ * _scsih_sas_ir_operation_status_event - handle RAID operation events
+ * @ioc: per adapter object
+ * @fw_event: The fw_event_work object
+ * Context: user.
+ *
+ * Return nothing.
+ */
+static void
+_scsih_sas_ir_operation_status_event(struct MPT2SAS_ADAPTER *ioc,
+ struct fw_event_work *fw_event)
+{
+ Mpi2EventDataIrOperationStatus_t *event_data =
+ (Mpi2EventDataIrOperationStatus_t *)
+ fw_event->event_data;
+ static struct _raid_device *raid_device;
+ unsigned long flags;
+ u16 handle;
+
+#ifdef CONFIG_SCSI_MPT2SAS_LOGGING
+ if ((ioc->logging_level & MPT_DEBUG_EVENT_WORK_TASK)
+ && !ioc->hide_ir_msg)
+ _scsih_sas_ir_operation_status_event_debug(ioc,
+ event_data);
+#endif
+
+ /* code added for raid transport support */
+ if (event_data->RAIDOperation == MPI2_EVENT_IR_RAIDOP_RESYNC) {
+
+ spin_lock_irqsave(&ioc->raid_device_lock, flags);
+ handle = le16_to_cpu(event_data->VolDevHandle);
+ raid_device = _scsih_raid_device_find_by_handle(ioc, handle);
+ if (raid_device)
+ raid_device->percent_complete =
+ event_data->PercentComplete;
+ spin_unlock_irqrestore(&ioc->raid_device_lock, flags);
+ }
+}
+
+/**
+ * _scsih_prep_device_scan - initialize parameters prior to device scan
+ * @ioc: per adapter object
+ *
+ * Set the deleted flag prior to device scan. If the device is found during
+ * the scan, then we clear the deleted flag.
+ */
+static void
+_scsih_prep_device_scan(struct MPT2SAS_ADAPTER *ioc)
+{
+ struct MPT2SAS_DEVICE *sas_device_priv_data;
+ struct scsi_device *sdev;
+
+ shost_for_each_device(sdev, ioc->shost) {
+ sas_device_priv_data = sdev->hostdata;
+ if (sas_device_priv_data && sas_device_priv_data->sas_target)
+ sas_device_priv_data->sas_target->deleted = 1;
+ }
+}
+
+/**
+ * _scsih_mark_responding_sas_device - mark a sas_devices as responding
+ * @ioc: per adapter object
+ * @sas_address: sas address
+ * @slot: enclosure slot id
+ * @handle: device handle
+ *
+ * After host reset, find out whether devices are still responding.
+ * Used in _scsi_remove_unresponsive_sas_devices.
+ *
+ * Return nothing.
+ */
+static void
+_scsih_mark_responding_sas_device(struct MPT2SAS_ADAPTER *ioc, u64 sas_address,
+ u16 slot, u16 handle)
+{
+ struct MPT2SAS_TARGET *sas_target_priv_data = NULL;
+ struct scsi_target *starget;
+ struct _sas_device *sas_device;
+ unsigned long flags;
+
+ spin_lock_irqsave(&ioc->sas_device_lock, flags);
+ list_for_each_entry(sas_device, &ioc->sas_device_list, list) {
+ if (sas_device->sas_address == sas_address &&
+ sas_device->slot == slot) {
+ sas_device->responding = 1;
+ starget = sas_device->starget;
+ if (starget && starget->hostdata) {
+ sas_target_priv_data = starget->hostdata;
+ sas_target_priv_data->tm_busy = 0;
+ sas_target_priv_data->deleted = 0;
+ } else
+ sas_target_priv_data = NULL;
+ if (starget)
+ starget_printk(KERN_INFO, starget,
+ "handle(0x%04x), sas_addr(0x%016llx), "
+ "enclosure logical id(0x%016llx), "
+ "slot(%d)\n", handle,
+ (unsigned long long)sas_device->sas_address,
+ (unsigned long long)
+ sas_device->enclosure_logical_id,
+ sas_device->slot);
+ if (sas_device->handle == handle)
+ goto out;
+ printk(KERN_INFO "\thandle changed from(0x%04x)!!!\n",
+ sas_device->handle);
+ sas_device->handle = handle;
+ if (sas_target_priv_data)
+ sas_target_priv_data->handle = handle;
+ goto out;
+ }
+ }
+ out:
+ spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
+}
+
+/**
+ * _scsih_search_responding_sas_devices -
+ * @ioc: per adapter object
+ *
+ * After host reset, find out whether devices are still responding.
+ * If not remove.
+ *
+ * Return nothing.
+ */
+static void
+_scsih_search_responding_sas_devices(struct MPT2SAS_ADAPTER *ioc)
+{
+ Mpi2SasDevicePage0_t sas_device_pg0;
+ Mpi2ConfigReply_t mpi_reply;
+ u16 ioc_status;
+ __le64 sas_address;
+ u16 handle;
+ u32 device_info;
+ u16 slot;
+
+ printk(MPT2SAS_INFO_FMT "search for end-devices: start\n", ioc->name);
+
+ if (list_empty(&ioc->sas_device_list))
+ goto out;
+
+ handle = 0xFFFF;
+ while (!(mpt2sas_config_get_sas_device_pg0(ioc, &mpi_reply,
+ &sas_device_pg0, MPI2_SAS_DEVICE_PGAD_FORM_GET_NEXT_HANDLE,
+ handle))) {
+ ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
+ MPI2_IOCSTATUS_MASK;
+ if (ioc_status != MPI2_IOCSTATUS_SUCCESS)
+ break;
+ handle = le16_to_cpu(sas_device_pg0.DevHandle);
+ device_info = le32_to_cpu(sas_device_pg0.DeviceInfo);
+ if (!(_scsih_is_end_device(device_info)))
+ continue;
+ sas_address = le64_to_cpu(sas_device_pg0.SASAddress);
+ slot = le16_to_cpu(sas_device_pg0.Slot);
+ _scsih_mark_responding_sas_device(ioc, sas_address, slot,
+ handle);
+ }
+out:
+ printk(MPT2SAS_INFO_FMT "search for end-devices: complete\n",
+ ioc->name);
+}
+
+/**
+ * _scsih_mark_responding_raid_device - mark a raid_device as responding
+ * @ioc: per adapter object
+ * @wwid: world wide identifier for raid volume
+ * @handle: device handle
+ *
+ * After host reset, find out whether devices are still responding.
+ * Used in _scsi_remove_unresponsive_raid_devices.
+ *
+ * Return nothing.
+ */
+static void
+_scsih_mark_responding_raid_device(struct MPT2SAS_ADAPTER *ioc, u64 wwid,
+ u16 handle)
+{
+ struct MPT2SAS_TARGET *sas_target_priv_data;
+ struct scsi_target *starget;
+ struct _raid_device *raid_device;
+ unsigned long flags;
+
+ spin_lock_irqsave(&ioc->raid_device_lock, flags);
+ list_for_each_entry(raid_device, &ioc->raid_device_list, list) {
+ if (raid_device->wwid == wwid && raid_device->starget) {
+ starget = raid_device->starget;
+ if (starget && starget->hostdata) {
+ sas_target_priv_data = starget->hostdata;
+ sas_target_priv_data->deleted = 0;
+ } else
+ sas_target_priv_data = NULL;
+ raid_device->responding = 1;
+ spin_unlock_irqrestore(&ioc->raid_device_lock, flags);
+ starget_printk(KERN_INFO, raid_device->starget,
+ "handle(0x%04x), wwid(0x%016llx)\n", handle,
+ (unsigned long long)raid_device->wwid);
+ /*
+ * WARPDRIVE: The handles of the PDs might have changed
+ * across the host reset so re-initialize the
+ * required data for Direct IO
+ */
+ _scsih_init_warpdrive_properties(ioc, raid_device);
+ spin_lock_irqsave(&ioc->raid_device_lock, flags);
+ if (raid_device->handle == handle) {
+ spin_unlock_irqrestore(&ioc->raid_device_lock,
+ flags);
+ return;
+ }
+ printk(KERN_INFO "\thandle changed from(0x%04x)!!!\n",
+ raid_device->handle);
+ raid_device->handle = handle;
+ if (sas_target_priv_data)
+ sas_target_priv_data->handle = handle;
+ spin_unlock_irqrestore(&ioc->raid_device_lock, flags);
+ return;
+ }
+ }
+
+ spin_unlock_irqrestore(&ioc->raid_device_lock, flags);
+}
+
+/**
+ * _scsih_search_responding_raid_devices -
+ * @ioc: per adapter object
+ *
+ * After host reset, find out whether devices are still responding.
+ * If not remove.
+ *
+ * Return nothing.
+ */
+static void
+_scsih_search_responding_raid_devices(struct MPT2SAS_ADAPTER *ioc)
+{
+ Mpi2RaidVolPage1_t volume_pg1;
+ Mpi2RaidVolPage0_t volume_pg0;
+ Mpi2RaidPhysDiskPage0_t pd_pg0;
+ Mpi2ConfigReply_t mpi_reply;
+ u16 ioc_status;
+ u16 handle;
+ u8 phys_disk_num;
+
+ if (!ioc->ir_firmware)
+ return;
+
+ printk(MPT2SAS_INFO_FMT "search for raid volumes: start\n",
+ ioc->name);
+
+ if (list_empty(&ioc->raid_device_list))
+ goto out;
+
+ handle = 0xFFFF;
+ while (!(mpt2sas_config_get_raid_volume_pg1(ioc, &mpi_reply,
+ &volume_pg1, MPI2_RAID_VOLUME_PGAD_FORM_GET_NEXT_HANDLE, handle))) {
+ ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
+ MPI2_IOCSTATUS_MASK;
+ if (ioc_status != MPI2_IOCSTATUS_SUCCESS)
+ break;
+ handle = le16_to_cpu(volume_pg1.DevHandle);
+
+ if (mpt2sas_config_get_raid_volume_pg0(ioc, &mpi_reply,
+ &volume_pg0, MPI2_RAID_VOLUME_PGAD_FORM_HANDLE, handle,
+ sizeof(Mpi2RaidVolPage0_t)))
+ continue;
+
+ if (volume_pg0.VolumeState == MPI2_RAID_VOL_STATE_OPTIMAL ||
+ volume_pg0.VolumeState == MPI2_RAID_VOL_STATE_ONLINE ||
+ volume_pg0.VolumeState == MPI2_RAID_VOL_STATE_DEGRADED)
+ _scsih_mark_responding_raid_device(ioc,
+ le64_to_cpu(volume_pg1.WWID), handle);
+ }
+
+ /* refresh the pd_handles */
+ if (!ioc->is_warpdrive) {
+ phys_disk_num = 0xFF;
+ memset(ioc->pd_handles, 0, ioc->pd_handles_sz);
+ while (!(mpt2sas_config_get_phys_disk_pg0(ioc, &mpi_reply,
+ &pd_pg0, MPI2_PHYSDISK_PGAD_FORM_GET_NEXT_PHYSDISKNUM,
+ phys_disk_num))) {
+ ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
+ MPI2_IOCSTATUS_MASK;
+ if (ioc_status != MPI2_IOCSTATUS_SUCCESS)
+ break;
+ phys_disk_num = pd_pg0.PhysDiskNum;
+ handle = le16_to_cpu(pd_pg0.DevHandle);
+ set_bit(handle, ioc->pd_handles);
+ }
+ }
+out:
+ printk(MPT2SAS_INFO_FMT "search for responding raid volumes: "
+ "complete\n", ioc->name);
+}
+
+/**
+ * _scsih_mark_responding_expander - mark a expander as responding
+ * @ioc: per adapter object
+ * @sas_address: sas address
+ * @handle:
+ *
+ * After host reset, find out whether devices are still responding.
+ * Used in _scsi_remove_unresponsive_expanders.
+ *
+ * Return nothing.
+ */
+static void
+_scsih_mark_responding_expander(struct MPT2SAS_ADAPTER *ioc, u64 sas_address,
+ u16 handle)
+{
+ struct _sas_node *sas_expander;
+ unsigned long flags;
+ int i;
+
+ spin_lock_irqsave(&ioc->sas_node_lock, flags);
+ list_for_each_entry(sas_expander, &ioc->sas_expander_list, list) {
+ if (sas_expander->sas_address != sas_address)
+ continue;
+ sas_expander->responding = 1;
+ if (sas_expander->handle == handle)
+ goto out;
+ printk(KERN_INFO "\texpander(0x%016llx): handle changed"
+ " from(0x%04x) to (0x%04x)!!!\n",
+ (unsigned long long)sas_expander->sas_address,
+ sas_expander->handle, handle);
+ sas_expander->handle = handle;
+ for (i = 0 ; i < sas_expander->num_phys ; i++)
+ sas_expander->phy[i].handle = handle;
+ goto out;
+ }
+ out:
+ spin_unlock_irqrestore(&ioc->sas_node_lock, flags);
+}
+
+/**
+ * _scsih_search_responding_expanders -
+ * @ioc: per adapter object
+ *
+ * After host reset, find out whether devices are still responding.
+ * If not remove.
+ *
+ * Return nothing.
+ */
+static void
+_scsih_search_responding_expanders(struct MPT2SAS_ADAPTER *ioc)
+{
+ Mpi2ExpanderPage0_t expander_pg0;
+ Mpi2ConfigReply_t mpi_reply;
+ u16 ioc_status;
+ u64 sas_address;
+ u16 handle;
+
+ printk(MPT2SAS_INFO_FMT "search for expanders: start\n", ioc->name);
+
+ if (list_empty(&ioc->sas_expander_list))
+ goto out;
+
+ handle = 0xFFFF;
+ while (!(mpt2sas_config_get_expander_pg0(ioc, &mpi_reply, &expander_pg0,
+ MPI2_SAS_EXPAND_PGAD_FORM_GET_NEXT_HNDL, handle))) {
+
+ ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
+ MPI2_IOCSTATUS_MASK;
+ if (ioc_status != MPI2_IOCSTATUS_SUCCESS)
+ break;
+
+ handle = le16_to_cpu(expander_pg0.DevHandle);
+ sas_address = le64_to_cpu(expander_pg0.SASAddress);
+ printk(KERN_INFO "\texpander present: handle(0x%04x), "
+ "sas_addr(0x%016llx)\n", handle,
+ (unsigned long long)sas_address);
+ _scsih_mark_responding_expander(ioc, sas_address, handle);
+ }
+
+ out:
+ printk(MPT2SAS_INFO_FMT "search for expanders: complete\n", ioc->name);
+}
+
+/**
+ * _scsih_remove_unresponding_sas_devices - removing unresponding devices
+ * @ioc: per adapter object
+ *
+ * Return nothing.
+ */
+static void
+_scsih_remove_unresponding_sas_devices(struct MPT2SAS_ADAPTER *ioc)
+{
+ struct _sas_device *sas_device, *sas_device_next;
+ struct _sas_node *sas_expander, *sas_expander_next;
+ struct _raid_device *raid_device, *raid_device_next;
+ struct list_head tmp_list;
+ unsigned long flags;
+
+ printk(MPT2SAS_INFO_FMT "removing unresponding devices: start\n",
+ ioc->name);
+
+ /* removing unresponding end devices */
+ printk(MPT2SAS_INFO_FMT "removing unresponding devices: end-devices\n",
+ ioc->name);
+ list_for_each_entry_safe(sas_device, sas_device_next,
+ &ioc->sas_device_list, list) {
+ if (!sas_device->responding)
+ mpt2sas_device_remove_by_sas_address(ioc,
+ sas_device->sas_address);
+ else
+ sas_device->responding = 0;
+ }
+
+ /* removing unresponding volumes */
+ if (ioc->ir_firmware) {
+ printk(MPT2SAS_INFO_FMT "removing unresponding devices: "
+ "volumes\n", ioc->name);
+ list_for_each_entry_safe(raid_device, raid_device_next,
+ &ioc->raid_device_list, list) {
+ if (!raid_device->responding)
+ _scsih_sas_volume_delete(ioc,
+ raid_device->handle);
+ else
+ raid_device->responding = 0;
+ }
+ }
+ /* removing unresponding expanders */
+ printk(MPT2SAS_INFO_FMT "removing unresponding devices: expanders\n",
+ ioc->name);
+ spin_lock_irqsave(&ioc->sas_node_lock, flags);
+ INIT_LIST_HEAD(&tmp_list);
+ list_for_each_entry_safe(sas_expander, sas_expander_next,
+ &ioc->sas_expander_list, list) {
+ if (!sas_expander->responding)
+ list_move_tail(&sas_expander->list, &tmp_list);
+ else
+ sas_expander->responding = 0;
+ }
+ spin_unlock_irqrestore(&ioc->sas_node_lock, flags);
+ list_for_each_entry_safe(sas_expander, sas_expander_next, &tmp_list,
+ list) {
+ list_del(&sas_expander->list);
+ _scsih_expander_node_remove(ioc, sas_expander);
+ }
+ printk(MPT2SAS_INFO_FMT "removing unresponding devices: complete\n",
+ ioc->name);
+ /* unblock devices */
+ _scsih_ublock_io_all_device(ioc);
+}
+
+static void
+_scsih_refresh_expander_links(struct MPT2SAS_ADAPTER *ioc,
+ struct _sas_node *sas_expander, u16 handle)
+{
+ Mpi2ExpanderPage1_t expander_pg1;
+ Mpi2ConfigReply_t mpi_reply;
+ int i;
+
+ for (i = 0 ; i < sas_expander->num_phys ; i++) {
+ if ((mpt2sas_config_get_expander_pg1(ioc, &mpi_reply,
+ &expander_pg1, i, handle))) {
+ printk(MPT2SAS_ERR_FMT "failure at %s:%d/%s()!\n",
+ ioc->name, __FILE__, __LINE__, __func__);
+ return;
+ }
+
+ mpt2sas_transport_update_links(ioc, sas_expander->sas_address,
+ le16_to_cpu(expander_pg1.AttachedDevHandle), i,
+ expander_pg1.NegotiatedLinkRate >> 4);
+ }
+}
+
+/**
+ * _scsih_scan_for_devices_after_reset - scan for devices after host reset
+ * @ioc: per adapter object
+ *
+ * Return nothing.
+ */
+static void
+_scsih_scan_for_devices_after_reset(struct MPT2SAS_ADAPTER *ioc)
+{
+ Mpi2ExpanderPage0_t expander_pg0;
+ Mpi2SasDevicePage0_t sas_device_pg0;
+ Mpi2RaidVolPage1_t volume_pg1;
+ Mpi2RaidVolPage0_t volume_pg0;
+ Mpi2RaidPhysDiskPage0_t pd_pg0;
+ Mpi2EventIrConfigElement_t element;
+ Mpi2ConfigReply_t mpi_reply;
+ u8 phys_disk_num;
+ u16 ioc_status;
+ u16 handle, parent_handle;
+ u64 sas_address;
+ struct _sas_device *sas_device;
+ struct _sas_node *expander_device;
+ static struct _raid_device *raid_device;
+ u8 retry_count;
+ unsigned long flags;
+
+ printk(MPT2SAS_INFO_FMT "scan devices: start\n", ioc->name);
+
+ _scsih_sas_host_refresh(ioc);
+
+ printk(MPT2SAS_INFO_FMT "\tscan devices: expanders start\n",
+ ioc->name);
+ /* expanders */
+ handle = 0xFFFF;
+ while (!(mpt2sas_config_get_expander_pg0(ioc, &mpi_reply, &expander_pg0,
+ MPI2_SAS_EXPAND_PGAD_FORM_GET_NEXT_HNDL, handle))) {
+ ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
+ MPI2_IOCSTATUS_MASK;
+ if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
+ printk(MPT2SAS_INFO_FMT "\tbreak from expander scan: "
+ "ioc_status(0x%04x), loginfo(0x%08x)\n",
+ ioc->name, ioc_status,
+ le32_to_cpu(mpi_reply.IOCLogInfo));
+ break;
+ }
+ handle = le16_to_cpu(expander_pg0.DevHandle);
+ spin_lock_irqsave(&ioc->sas_node_lock, flags);
+ expander_device = mpt2sas_scsih_expander_find_by_sas_address(
+ ioc, le64_to_cpu(expander_pg0.SASAddress));
+ spin_unlock_irqrestore(&ioc->sas_node_lock, flags);
+ if (expander_device)
+ _scsih_refresh_expander_links(ioc, expander_device,
+ handle);
+ else {
+ printk(MPT2SAS_INFO_FMT "\tBEFORE adding expander: "
+ "handle (0x%04x), sas_addr(0x%016llx)\n",
+ ioc->name, handle, (unsigned long long)
+ le64_to_cpu(expander_pg0.SASAddress));
+ _scsih_expander_add(ioc, handle);
+ printk(MPT2SAS_INFO_FMT "\tAFTER adding expander: "
+ "handle (0x%04x), sas_addr(0x%016llx)\n",
+ ioc->name, handle, (unsigned long long)
+ le64_to_cpu(expander_pg0.SASAddress));
+ }
+ }
+
+ printk(MPT2SAS_INFO_FMT "\tscan devices: expanders complete\n",
+ ioc->name);
+
+ if (!ioc->ir_firmware)
+ goto skip_to_sas;
+
+ printk(MPT2SAS_INFO_FMT "\tscan devices phys disk start\n", ioc->name);
+ /* phys disk */
+ phys_disk_num = 0xFF;
+ while (!(mpt2sas_config_get_phys_disk_pg0(ioc, &mpi_reply,
+ &pd_pg0, MPI2_PHYSDISK_PGAD_FORM_GET_NEXT_PHYSDISKNUM,
+ phys_disk_num))) {
+ ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
+ MPI2_IOCSTATUS_MASK;
+ if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
+ printk(MPT2SAS_INFO_FMT "\tbreak from phys disk scan:"
+ "ioc_status(0x%04x), loginfo(0x%08x)\n",
+ ioc->name, ioc_status,
+ le32_to_cpu(mpi_reply.IOCLogInfo));
+ break;
+ }
+ phys_disk_num = pd_pg0.PhysDiskNum;
+ handle = le16_to_cpu(pd_pg0.DevHandle);
+ spin_lock_irqsave(&ioc->sas_device_lock, flags);
+ sas_device = _scsih_sas_device_find_by_handle(ioc, handle);
+ spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
+ if (sas_device)
+ continue;
+ if (mpt2sas_config_get_sas_device_pg0(ioc, &mpi_reply,
+ &sas_device_pg0, MPI2_SAS_DEVICE_PGAD_FORM_HANDLE,
+ handle) != 0)
+ continue;
+ ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
+ MPI2_IOCSTATUS_MASK;
+ if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
+ printk(MPT2SAS_INFO_FMT "\tbreak from phys disk scan "
+ "ioc_status(0x%04x), loginfo(0x%08x)\n",
+ ioc->name, ioc_status,
+ le32_to_cpu(mpi_reply.IOCLogInfo));
+ break;
+ }
+ parent_handle = le16_to_cpu(sas_device_pg0.ParentDevHandle);
+ if (!_scsih_get_sas_address(ioc, parent_handle,
+ &sas_address)) {
+ printk(MPT2SAS_INFO_FMT "\tBEFORE adding phys disk: "
+ " handle (0x%04x), sas_addr(0x%016llx)\n",
+ ioc->name, handle, (unsigned long long)
+ le64_to_cpu(sas_device_pg0.SASAddress));
+ mpt2sas_transport_update_links(ioc, sas_address,
+ handle, sas_device_pg0.PhyNum,
+ MPI2_SAS_NEG_LINK_RATE_1_5);
+ set_bit(handle, ioc->pd_handles);
+ retry_count = 0;
+ /* This will retry adding the end device.
+ * _scsih_add_device() will decide on retries and
+ * return "1" when it should be retried
+ */
+ while (_scsih_add_device(ioc, handle, retry_count++,
+ 1)) {
+ ssleep(1);
+ }
+ printk(MPT2SAS_INFO_FMT "\tAFTER adding phys disk: "
+ " handle (0x%04x), sas_addr(0x%016llx)\n",
+ ioc->name, handle, (unsigned long long)
+ le64_to_cpu(sas_device_pg0.SASAddress));
+ }
+ }
+
+ printk(MPT2SAS_INFO_FMT "\tscan devices: phys disk complete\n",
+ ioc->name);
+
+ printk(MPT2SAS_INFO_FMT "\tscan devices: volumes start\n", ioc->name);
+ /* volumes */
+ handle = 0xFFFF;
+ while (!(mpt2sas_config_get_raid_volume_pg1(ioc, &mpi_reply,
+ &volume_pg1, MPI2_RAID_VOLUME_PGAD_FORM_GET_NEXT_HANDLE, handle))) {
+ ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
+ MPI2_IOCSTATUS_MASK;
+ if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
+ printk(MPT2SAS_INFO_FMT "\tbreak from volume scan: "
+ "ioc_status(0x%04x), loginfo(0x%08x)\n",
+ ioc->name, ioc_status,
+ le32_to_cpu(mpi_reply.IOCLogInfo));
+ break;
+ }
+ handle = le16_to_cpu(volume_pg1.DevHandle);
+ spin_lock_irqsave(&ioc->raid_device_lock, flags);
+ raid_device = _scsih_raid_device_find_by_wwid(ioc,
+ le64_to_cpu(volume_pg1.WWID));
+ spin_unlock_irqrestore(&ioc->raid_device_lock, flags);
+ if (raid_device)
+ continue;
+ if (mpt2sas_config_get_raid_volume_pg0(ioc, &mpi_reply,
+ &volume_pg0, MPI2_RAID_VOLUME_PGAD_FORM_HANDLE, handle,
+ sizeof(Mpi2RaidVolPage0_t)))
+ continue;
+ ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
+ MPI2_IOCSTATUS_MASK;
+ if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
+ printk(MPT2SAS_INFO_FMT "\tbreak from volume scan: "
+ "ioc_status(0x%04x), loginfo(0x%08x)\n",
+ ioc->name, ioc_status,
+ le32_to_cpu(mpi_reply.IOCLogInfo));
+ break;
+ }
+ if (volume_pg0.VolumeState == MPI2_RAID_VOL_STATE_OPTIMAL ||
+ volume_pg0.VolumeState == MPI2_RAID_VOL_STATE_ONLINE ||
+ volume_pg0.VolumeState == MPI2_RAID_VOL_STATE_DEGRADED) {
+ memset(&element, 0, sizeof(Mpi2EventIrConfigElement_t));
+ element.ReasonCode = MPI2_EVENT_IR_CHANGE_RC_ADDED;
+ element.VolDevHandle = volume_pg1.DevHandle;
+ printk(MPT2SAS_INFO_FMT "\tBEFORE adding volume: "
+ " handle (0x%04x)\n", ioc->name,
+ volume_pg1.DevHandle);
+ _scsih_sas_volume_add(ioc, &element);
+ printk(MPT2SAS_INFO_FMT "\tAFTER adding volume: "
+ " handle (0x%04x)\n", ioc->name,
+ volume_pg1.DevHandle);
+ }
+ }
+
+ printk(MPT2SAS_INFO_FMT "\tscan devices: volumes complete\n",
+ ioc->name);
+
+ skip_to_sas:
+
+ printk(MPT2SAS_INFO_FMT "\tscan devices: end devices start\n",
+ ioc->name);
+ /* sas devices */
+ handle = 0xFFFF;
+ while (!(mpt2sas_config_get_sas_device_pg0(ioc, &mpi_reply,
+ &sas_device_pg0, MPI2_SAS_DEVICE_PGAD_FORM_GET_NEXT_HANDLE,
+ handle))) {
+ ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
+ MPI2_IOCSTATUS_MASK;
+ if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
+ printk(MPT2SAS_INFO_FMT "\tbreak from end device scan:"
+ " ioc_status(0x%04x), loginfo(0x%08x)\n",
+ ioc->name, ioc_status,
+ le32_to_cpu(mpi_reply.IOCLogInfo));
+ break;
+ }
+ handle = le16_to_cpu(sas_device_pg0.DevHandle);
+ if (!(_scsih_is_end_device(
+ le32_to_cpu(sas_device_pg0.DeviceInfo))))
+ continue;
+ spin_lock_irqsave(&ioc->sas_device_lock, flags);
+ sas_device = mpt2sas_scsih_sas_device_find_by_sas_address(ioc,
+ le64_to_cpu(sas_device_pg0.SASAddress));
+ spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
+ if (sas_device)
+ continue;
+ parent_handle = le16_to_cpu(sas_device_pg0.ParentDevHandle);
+ if (!_scsih_get_sas_address(ioc, parent_handle, &sas_address)) {
+ printk(MPT2SAS_INFO_FMT "\tBEFORE adding end device: "
+ "handle (0x%04x), sas_addr(0x%016llx)\n",
+ ioc->name, handle, (unsigned long long)
+ le64_to_cpu(sas_device_pg0.SASAddress));
+ mpt2sas_transport_update_links(ioc, sas_address, handle,
+ sas_device_pg0.PhyNum, MPI2_SAS_NEG_LINK_RATE_1_5);
+ retry_count = 0;
+ /* This will retry adding the end device.
+ * _scsih_add_device() will decide on retries and
+ * return "1" when it should be retried
+ */
+ while (_scsih_add_device(ioc, handle, retry_count++,
+ 0)) {
+ ssleep(1);
+ }
+ printk(MPT2SAS_INFO_FMT "\tAFTER adding end device: "
+ "handle (0x%04x), sas_addr(0x%016llx)\n",
+ ioc->name, handle, (unsigned long long)
+ le64_to_cpu(sas_device_pg0.SASAddress));
+ }
+ }
+
+ printk(MPT2SAS_INFO_FMT "\tscan devices: end devices complete\n",
+ ioc->name);
+
+ printk(MPT2SAS_INFO_FMT "scan devices: complete\n", ioc->name);
+}
+
+
+/**
+ * mpt2sas_scsih_reset_handler - reset callback handler (for scsih)
+ * @ioc: per adapter object
+ * @reset_phase: phase
+ *
+ * The handler for doing any required cleanup or initialization.
+ *
+ * The reset phase can be MPT2_IOC_PRE_RESET, MPT2_IOC_AFTER_RESET,
+ * MPT2_IOC_DONE_RESET
+ *
+ * Return nothing.
+ */
+void
+mpt2sas_scsih_reset_handler(struct MPT2SAS_ADAPTER *ioc, int reset_phase)
+{
+ switch (reset_phase) {
+ case MPT2_IOC_PRE_RESET:
+ dtmprintk(ioc, printk(MPT2SAS_INFO_FMT "%s: "
+ "MPT2_IOC_PRE_RESET\n", ioc->name, __func__));
+ break;
+ case MPT2_IOC_AFTER_RESET:
+ dtmprintk(ioc, printk(MPT2SAS_INFO_FMT "%s: "
+ "MPT2_IOC_AFTER_RESET\n", ioc->name, __func__));
+ if (ioc->scsih_cmds.status & MPT2_CMD_PENDING) {
+ ioc->scsih_cmds.status |= MPT2_CMD_RESET;
+ mpt2sas_base_free_smid(ioc, ioc->scsih_cmds.smid);
+ complete(&ioc->scsih_cmds.done);
+ }
+ if (ioc->tm_cmds.status & MPT2_CMD_PENDING) {
+ ioc->tm_cmds.status |= MPT2_CMD_RESET;
+ mpt2sas_base_free_smid(ioc, ioc->tm_cmds.smid);
+ complete(&ioc->tm_cmds.done);
+ }
+ _scsih_fw_event_cleanup_queue(ioc);
+ _scsih_flush_running_cmds(ioc);
+ break;
+ case MPT2_IOC_DONE_RESET:
+ dtmprintk(ioc, printk(MPT2SAS_INFO_FMT "%s: "
+ "MPT2_IOC_DONE_RESET\n", ioc->name, __func__));
+ _scsih_sas_host_refresh(ioc);
+ _scsih_prep_device_scan(ioc);
+ _scsih_search_responding_sas_devices(ioc);
+ _scsih_search_responding_raid_devices(ioc);
+ _scsih_search_responding_expanders(ioc);
+ if ((!ioc->is_driver_loading) && !(disable_discovery > 0 &&
+ !ioc->sas_hba.num_phys)) {
+ _scsih_prep_device_scan(ioc);
+ _scsih_search_responding_sas_devices(ioc);
+ _scsih_search_responding_raid_devices(ioc);
+ _scsih_search_responding_expanders(ioc);
+ _scsih_error_recovery_delete_devices(ioc);
+ }
+ break;
+ }
+}
+
+/**
+ * _firmware_event_work - delayed task for processing firmware events
+ * @ioc: per adapter object
+ * @work: equal to the fw_event_work object
+ * Context: user.
+ *
+ * Return nothing.
+ */
+static void
+_firmware_event_work(struct work_struct *work)
+{
+ struct fw_event_work *fw_event = container_of(work,
+ struct fw_event_work, delayed_work.work);
+ struct MPT2SAS_ADAPTER *ioc = fw_event->ioc;
+
+ /* the queue is being flushed so ignore this event */
+ if (ioc->remove_host ||
+ ioc->pci_error_recovery) {
+ _scsih_fw_event_free(ioc, fw_event);
+ return;
+ }
+
+ switch (fw_event->event) {
+ case MPT2SAS_REMOVE_UNRESPONDING_DEVICES:
+ while (scsi_host_in_recovery(ioc->shost) || ioc->shost_recovery)
+ ssleep(1);
+ _scsih_remove_unresponding_sas_devices(ioc);
+ _scsih_scan_for_devices_after_reset(ioc);
+ break;
+ case MPT2SAS_PORT_ENABLE_COMPLETE:
+ ioc->start_scan = 0;
+
+ if (missing_delay[0] != -1 && missing_delay[1] != -1)
+ mpt2sas_base_update_missing_delay(ioc, missing_delay[0],
+ missing_delay[1]);
+
+ dewtprintk(ioc, printk(MPT2SAS_INFO_FMT "port enable: complete "
+ "from worker thread\n", ioc->name));
+ break;
+ case MPT2SAS_TURN_ON_PFA_LED:
+ _scsih_turn_on_pfa_led(ioc, fw_event->device_handle);
+ break;
+ case MPI2_EVENT_SAS_TOPOLOGY_CHANGE_LIST:
+ _scsih_sas_topology_change_event(ioc, fw_event);
+ break;
+ case MPI2_EVENT_SAS_DEVICE_STATUS_CHANGE:
+ _scsih_sas_device_status_change_event(ioc,
+ fw_event);
+ break;
+ case MPI2_EVENT_SAS_DISCOVERY:
+ _scsih_sas_discovery_event(ioc,
+ fw_event);
+ break;
+ case MPI2_EVENT_SAS_BROADCAST_PRIMITIVE:
+ _scsih_sas_broadcast_primitive_event(ioc,
+ fw_event);
+ break;
+ case MPI2_EVENT_SAS_ENCL_DEVICE_STATUS_CHANGE:
+ _scsih_sas_enclosure_dev_status_change_event(ioc,
+ fw_event);
+ break;
+ case MPI2_EVENT_IR_CONFIGURATION_CHANGE_LIST:
+ _scsih_sas_ir_config_change_event(ioc, fw_event);
+ break;
+ case MPI2_EVENT_IR_VOLUME:
+ _scsih_sas_ir_volume_event(ioc, fw_event);
+ break;
+ case MPI2_EVENT_IR_PHYSICAL_DISK:
+ _scsih_sas_ir_physical_disk_event(ioc, fw_event);
+ break;
+ case MPI2_EVENT_IR_OPERATION_STATUS:
+ _scsih_sas_ir_operation_status_event(ioc, fw_event);
+ break;
+ }
+ _scsih_fw_event_free(ioc, fw_event);
+}
+
+/**
+ * mpt2sas_scsih_event_callback - firmware event handler (called at ISR time)
+ * @ioc: per adapter object
+ * @msix_index: MSIX table index supplied by the OS
+ * @reply: reply message frame(lower 32bit addr)
+ * Context: interrupt.
+ *
+ * This function merely adds a new work task into ioc->firmware_event_thread.
+ * The tasks are worked from _firmware_event_work in user context.
+ *
+ * Returns void.
+ */
+void
+mpt2sas_scsih_event_callback(struct MPT2SAS_ADAPTER *ioc, u8 msix_index,
+ u32 reply)
+{
+ struct fw_event_work *fw_event;
+ Mpi2EventNotificationReply_t *mpi_reply;
+ u16 event;
+ u16 sz;
+
+ /* events turned off due to host reset or driver unloading */
+ if (ioc->remove_host || ioc->pci_error_recovery)
+ return;
+
+ mpi_reply = mpt2sas_base_get_reply_virt_addr(ioc, reply);
+
+ if (unlikely(!mpi_reply)) {
+ printk(MPT2SAS_ERR_FMT "mpi_reply not valid at %s:%d/%s()!\n",
+ ioc->name, __FILE__, __LINE__, __func__);
+ return;
+ }
+
+ event = le16_to_cpu(mpi_reply->Event);
+
+ switch (event) {
+ /* handle these */
+ case MPI2_EVENT_SAS_BROADCAST_PRIMITIVE:
+ {
+ Mpi2EventDataSasBroadcastPrimitive_t *baen_data =
+ (Mpi2EventDataSasBroadcastPrimitive_t *)
+ mpi_reply->EventData;
+
+ if (baen_data->Primitive !=
+ MPI2_EVENT_PRIMITIVE_ASYNCHRONOUS_EVENT)
+ return;
+
+ if (ioc->broadcast_aen_busy) {
+ ioc->broadcast_aen_pending++;
+ return;
+ } else
+ ioc->broadcast_aen_busy = 1;
+ break;
+ }
+
+ case MPI2_EVENT_SAS_TOPOLOGY_CHANGE_LIST:
+ _scsih_check_topo_delete_events(ioc,
+ (Mpi2EventDataSasTopologyChangeList_t *)
+ mpi_reply->EventData);
+ break;
+ case MPI2_EVENT_IR_CONFIGURATION_CHANGE_LIST:
+ _scsih_check_ir_config_unhide_events(ioc,
+ (Mpi2EventDataIrConfigChangeList_t *)
+ mpi_reply->EventData);
+ break;
+ case MPI2_EVENT_IR_VOLUME:
+ _scsih_check_volume_delete_events(ioc,
+ (Mpi2EventDataIrVolume_t *)
+ mpi_reply->EventData);
+ break;
+ case MPI2_EVENT_LOG_ENTRY_ADDED:
+ {
+ Mpi2EventDataLogEntryAdded_t *log_entry;
+ __le32 *log_code;
+
+ if (!ioc->is_warpdrive)
+ break;
+
+ log_entry = (Mpi2EventDataLogEntryAdded_t *)
+ mpi_reply->EventData;
+ log_code = (__le32 *)log_entry->LogData;
+
+ if (le16_to_cpu(log_entry->LogEntryQualifier)
+ != MPT2_WARPDRIVE_LOGENTRY)
+ break;
+
+ switch (le32_to_cpu(*log_code)) {
+ case MPT2_WARPDRIVE_LC_SSDT:
+ printk(MPT2SAS_WARN_FMT "WarpDrive Warning: "
+ "IO Throttling has occurred in the WarpDrive "
+ "subsystem. Check WarpDrive documentation for "
+ "additional details.\n", ioc->name);
+ break;
+ case MPT2_WARPDRIVE_LC_SSDLW:
+ printk(MPT2SAS_WARN_FMT "WarpDrive Warning: "
+ "Program/Erase Cycles for the WarpDrive subsystem "
+ "in degraded range. Check WarpDrive documentation "
+ "for additional details.\n", ioc->name);
+ break;
+ case MPT2_WARPDRIVE_LC_SSDLF:
+ printk(MPT2SAS_ERR_FMT "WarpDrive Fatal Error: "
+ "There are no Program/Erase Cycles for the "
+ "WarpDrive subsystem. The storage device will be "
+ "in read-only mode. Check WarpDrive documentation "
+ "for additional details.\n", ioc->name);
+ break;
+ case MPT2_WARPDRIVE_LC_BRMF:
+ printk(MPT2SAS_ERR_FMT "WarpDrive Fatal Error: "
+ "The Backup Rail Monitor has failed on the "
+ "WarpDrive subsystem. Check WarpDrive "
+ "documentation for additional details.\n",
+ ioc->name);
+ break;
+ }
+
+ break;
+ }
+ case MPI2_EVENT_SAS_DEVICE_STATUS_CHANGE:
+ case MPI2_EVENT_IR_OPERATION_STATUS:
+ case MPI2_EVENT_SAS_DISCOVERY:
+ case MPI2_EVENT_SAS_ENCL_DEVICE_STATUS_CHANGE:
+ case MPI2_EVENT_IR_PHYSICAL_DISK:
+ break;
+
+ case MPI2_EVENT_TEMP_THRESHOLD:
+ _scsih_temp_threshold_events(ioc,
+ (Mpi2EventDataTemperature_t *)
+ mpi_reply->EventData);
+ break;
+
+ default: /* ignore the rest */
+ return;
+ }
+
+ sz = le16_to_cpu(mpi_reply->EventDataLength) * 4;
+ fw_event = kzalloc(sizeof(*fw_event) + sz, GFP_ATOMIC);
+ if (!fw_event) {
+ printk(MPT2SAS_ERR_FMT "failure at %s:%d/%s()!\n",
+ ioc->name, __FILE__, __LINE__, __func__);
+ return;
+ }
+
+ memcpy(fw_event->event_data, mpi_reply->EventData, sz);
+ fw_event->ioc = ioc;
+ fw_event->VF_ID = mpi_reply->VF_ID;
+ fw_event->VP_ID = mpi_reply->VP_ID;
+ fw_event->event = event;
+ _scsih_fw_event_add(ioc, fw_event);
+ return;
+}
+
+/* shost template */
+static struct scsi_host_template scsih_driver_template = {
+ .module = THIS_MODULE,
+ .name = "Fusion MPT SAS Host",
+ .proc_name = MPT2SAS_DRIVER_NAME,
+ .queuecommand = _scsih_qcmd,
+ .target_alloc = _scsih_target_alloc,
+ .slave_alloc = _scsih_slave_alloc,
+ .slave_configure = _scsih_slave_configure,
+ .target_destroy = _scsih_target_destroy,
+ .slave_destroy = _scsih_slave_destroy,
+ .scan_finished = _scsih_scan_finished,
+ .scan_start = _scsih_scan_start,
+ .change_queue_depth = _scsih_change_queue_depth,
+ .eh_abort_handler = _scsih_abort,
+ .eh_device_reset_handler = _scsih_dev_reset,
+ .eh_target_reset_handler = _scsih_target_reset,
+ .eh_host_reset_handler = _scsih_host_reset,
+ .bios_param = _scsih_bios_param,
+ .can_queue = 1,
+ .this_id = -1,
+ .sg_tablesize = MPT2SAS_SG_DEPTH,
+ .max_sectors = 32767,
+ .cmd_per_lun = 7,
+ .use_clustering = ENABLE_CLUSTERING,
+ .shost_attrs = mpt2sas_host_attrs,
+ .sdev_attrs = mpt2sas_dev_attrs,
+ .track_queue_depth = 1,
+};
+
+/**
+ * _scsih_expander_node_remove - removing expander device from list.
+ * @ioc: per adapter object
+ * @sas_expander: the sas_device object
+ * Context: Calling function should acquire ioc->sas_node_lock.
+ *
+ * Removing object and freeing associated memory from the
+ * ioc->sas_expander_list.
+ *
+ * Return nothing.
+ */
+static void
+_scsih_expander_node_remove(struct MPT2SAS_ADAPTER *ioc,
+ struct _sas_node *sas_expander)
+{
+ struct _sas_port *mpt2sas_port, *next;
+
+ /* remove sibling ports attached to this expander */
+ list_for_each_entry_safe(mpt2sas_port, next,
+ &sas_expander->sas_port_list, port_list) {
+ if (ioc->shost_recovery)
+ return;
+ if (mpt2sas_port->remote_identify.device_type ==
+ SAS_END_DEVICE)
+ mpt2sas_device_remove_by_sas_address(ioc,
+ mpt2sas_port->remote_identify.sas_address);
+ else if (mpt2sas_port->remote_identify.device_type ==
+ SAS_EDGE_EXPANDER_DEVICE ||
+ mpt2sas_port->remote_identify.device_type ==
+ SAS_FANOUT_EXPANDER_DEVICE)
+ mpt2sas_expander_remove(ioc,
+ mpt2sas_port->remote_identify.sas_address);
+ }
+
+ mpt2sas_transport_port_remove(ioc, sas_expander->sas_address,
+ sas_expander->sas_address_parent);
+
+ printk(MPT2SAS_INFO_FMT "expander_remove: handle"
+ "(0x%04x), sas_addr(0x%016llx)\n", ioc->name,
+ sas_expander->handle, (unsigned long long)
+ sas_expander->sas_address);
+
+ kfree(sas_expander->phy);
+ kfree(sas_expander);
+}
+
+/**
+ * _scsih_ir_shutdown - IR shutdown notification
+ * @ioc: per adapter object
+ *
+ * Sending RAID Action to alert the Integrated RAID subsystem of the IOC that
+ * the host system is shutting down.
+ *
+ * Return nothing.
+ */
+static void
+_scsih_ir_shutdown(struct MPT2SAS_ADAPTER *ioc)
+{
+ Mpi2RaidActionRequest_t *mpi_request;
+ Mpi2RaidActionReply_t *mpi_reply;
+ u16 smid;
+
+ /* is IR firmware build loaded ? */
+ if (!ioc->ir_firmware)
+ return;
+
+ mutex_lock(&ioc->scsih_cmds.mutex);
+
+ if (ioc->scsih_cmds.status != MPT2_CMD_NOT_USED) {
+ printk(MPT2SAS_ERR_FMT "%s: scsih_cmd in use\n",
+ ioc->name, __func__);
+ goto out;
+ }
+ ioc->scsih_cmds.status = MPT2_CMD_PENDING;
+
+ smid = mpt2sas_base_get_smid(ioc, ioc->scsih_cb_idx);
+ if (!smid) {
+ printk(MPT2SAS_ERR_FMT "%s: failed obtaining a smid\n",
+ ioc->name, __func__);
+ ioc->scsih_cmds.status = MPT2_CMD_NOT_USED;
+ goto out;
+ }
+
+ mpi_request = mpt2sas_base_get_msg_frame(ioc, smid);
+ ioc->scsih_cmds.smid = smid;
+ memset(mpi_request, 0, sizeof(Mpi2RaidActionRequest_t));
+
+ mpi_request->Function = MPI2_FUNCTION_RAID_ACTION;
+ mpi_request->Action = MPI2_RAID_ACTION_SYSTEM_SHUTDOWN_INITIATED;
+
+ if (!ioc->hide_ir_msg)
+ printk(MPT2SAS_INFO_FMT "IR shutdown (sending)\n", ioc->name);
+ init_completion(&ioc->scsih_cmds.done);
+ mpt2sas_base_put_smid_default(ioc, smid);
+ wait_for_completion_timeout(&ioc->scsih_cmds.done, 10*HZ);
+
+ if (!(ioc->scsih_cmds.status & MPT2_CMD_COMPLETE)) {
+ printk(MPT2SAS_ERR_FMT "%s: timeout\n",
+ ioc->name, __func__);
+ goto out;
+ }
+
+ if (ioc->scsih_cmds.status & MPT2_CMD_REPLY_VALID) {
+ mpi_reply = ioc->scsih_cmds.reply;
+
+ if (!ioc->hide_ir_msg)
+ printk(MPT2SAS_INFO_FMT "IR shutdown (complete): "
+ "ioc_status(0x%04x), loginfo(0x%08x)\n",
+ ioc->name, le16_to_cpu(mpi_reply->IOCStatus),
+ le32_to_cpu(mpi_reply->IOCLogInfo));
+ }
+
+ out:
+ ioc->scsih_cmds.status = MPT2_CMD_NOT_USED;
+ mutex_unlock(&ioc->scsih_cmds.mutex);
+}
+
+/**
+ * _scsih_shutdown - routine call during system shutdown
+ * @pdev: PCI device struct
+ *
+ * Return nothing.
+ */
+static void
+_scsih_shutdown(struct pci_dev *pdev)
+{
+ struct Scsi_Host *shost = pci_get_drvdata(pdev);
+ struct MPT2SAS_ADAPTER *ioc = shost_priv(shost);
+ struct workqueue_struct *wq;
+ unsigned long flags;
+
+ ioc->remove_host = 1;
+ _scsih_fw_event_cleanup_queue(ioc);
+
+ spin_lock_irqsave(&ioc->fw_event_lock, flags);
+ wq = ioc->firmware_event_thread;
+ ioc->firmware_event_thread = NULL;
+ spin_unlock_irqrestore(&ioc->fw_event_lock, flags);
+ if (wq)
+ destroy_workqueue(wq);
+
+ _scsih_ir_shutdown(ioc);
+ mpt2sas_base_detach(ioc);
+}
+
+/**
+ * _scsih_remove - detach and remove add host
+ * @pdev: PCI device struct
+ *
+ * Routine called when unloading the driver.
+ * Return nothing.
+ */
+static void
+_scsih_remove(struct pci_dev *pdev)
+{
+ struct Scsi_Host *shost = pci_get_drvdata(pdev);
+ struct MPT2SAS_ADAPTER *ioc = shost_priv(shost);
+ struct _sas_port *mpt2sas_port, *next_port;
+ struct _raid_device *raid_device, *next;
+ struct MPT2SAS_TARGET *sas_target_priv_data;
+ struct workqueue_struct *wq;
+ unsigned long flags;
+
+ ioc->remove_host = 1;
+ _scsih_fw_event_cleanup_queue(ioc);
+
+ spin_lock_irqsave(&ioc->fw_event_lock, flags);
+ wq = ioc->firmware_event_thread;
+ ioc->firmware_event_thread = NULL;
+ spin_unlock_irqrestore(&ioc->fw_event_lock, flags);
+ if (wq)
+ destroy_workqueue(wq);
+
+ /* release all the volumes */
+ _scsih_ir_shutdown(ioc);
+ list_for_each_entry_safe(raid_device, next, &ioc->raid_device_list,
+ list) {
+ if (raid_device->starget) {
+ sas_target_priv_data =
+ raid_device->starget->hostdata;
+ sas_target_priv_data->deleted = 1;
+ scsi_remove_target(&raid_device->starget->dev);
+ }
+ printk(MPT2SAS_INFO_FMT "removing handle(0x%04x), wwid"
+ "(0x%016llx)\n", ioc->name, raid_device->handle,
+ (unsigned long long) raid_device->wwid);
+ _scsih_raid_device_remove(ioc, raid_device);
+ }
+
+ /* free ports attached to the sas_host */
+ list_for_each_entry_safe(mpt2sas_port, next_port,
+ &ioc->sas_hba.sas_port_list, port_list) {
+ if (mpt2sas_port->remote_identify.device_type ==
+ SAS_END_DEVICE)
+ mpt2sas_device_remove_by_sas_address(ioc,
+ mpt2sas_port->remote_identify.sas_address);
+ else if (mpt2sas_port->remote_identify.device_type ==
+ SAS_EDGE_EXPANDER_DEVICE ||
+ mpt2sas_port->remote_identify.device_type ==
+ SAS_FANOUT_EXPANDER_DEVICE)
+ mpt2sas_expander_remove(ioc,
+ mpt2sas_port->remote_identify.sas_address);
+ }
+
+ /* free phys attached to the sas_host */
+ if (ioc->sas_hba.num_phys) {
+ kfree(ioc->sas_hba.phy);
+ ioc->sas_hba.phy = NULL;
+ ioc->sas_hba.num_phys = 0;
+ }
+
+ sas_remove_host(shost);
+ scsi_remove_host(shost);
+ mpt2sas_base_detach(ioc);
+ list_del(&ioc->list);
+ scsi_host_put(shost);
+}
+
+/**
+ * _scsih_probe_boot_devices - reports 1st device
+ * @ioc: per adapter object
+ *
+ * If specified in bios page 2, this routine reports the 1st
+ * device scsi-ml or sas transport for persistent boot device
+ * purposes. Please refer to function _scsih_determine_boot_device()
+ */
+static void
+_scsih_probe_boot_devices(struct MPT2SAS_ADAPTER *ioc)
+{
+ u8 is_raid;
+ void *device;
+ struct _sas_device *sas_device;
+ struct _raid_device *raid_device;
+ u16 handle;
+ u64 sas_address_parent;
+ u64 sas_address;
+ unsigned long flags;
+ int rc;
+
+ /* no Bios, return immediately */
+ if (!ioc->bios_pg3.BiosVersion)
+ return;
+
+ device = NULL;
+ is_raid = 0;
+ if (ioc->req_boot_device.device) {
+ device = ioc->req_boot_device.device;
+ is_raid = ioc->req_boot_device.is_raid;
+ } else if (ioc->req_alt_boot_device.device) {
+ device = ioc->req_alt_boot_device.device;
+ is_raid = ioc->req_alt_boot_device.is_raid;
+ } else if (ioc->current_boot_device.device) {
+ device = ioc->current_boot_device.device;
+ is_raid = ioc->current_boot_device.is_raid;
+ }
+
+ if (!device)
+ return;
+
+ if (is_raid) {
+ raid_device = device;
+ rc = scsi_add_device(ioc->shost, RAID_CHANNEL,
+ raid_device->id, 0);
+ if (rc)
+ _scsih_raid_device_remove(ioc, raid_device);
+ } else {
+ spin_lock_irqsave(&ioc->sas_device_lock, flags);
+ sas_device = device;
+ handle = sas_device->handle;
+ sas_address_parent = sas_device->sas_address_parent;
+ sas_address = sas_device->sas_address;
+ list_move_tail(&sas_device->list, &ioc->sas_device_list);
+ spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
+
+ if (ioc->hide_drives)
+ return;
+ if (!mpt2sas_transport_port_add(ioc, sas_device->handle,
+ sas_device->sas_address_parent)) {
+ _scsih_sas_device_remove(ioc, sas_device);
+ } else if (!sas_device->starget) {
+ if (!ioc->is_driver_loading) {
+ mpt2sas_transport_port_remove(ioc,
+ sas_address,
+ sas_address_parent);
+ _scsih_sas_device_remove(ioc, sas_device);
+ }
+ }
+ }
+}
+
+/**
+ * _scsih_probe_raid - reporting raid volumes to scsi-ml
+ * @ioc: per adapter object
+ *
+ * Called during initial loading of the driver.
+ */
+static void
+_scsih_probe_raid(struct MPT2SAS_ADAPTER *ioc)
+{
+ struct _raid_device *raid_device, *raid_next;
+ int rc;
+
+ list_for_each_entry_safe(raid_device, raid_next,
+ &ioc->raid_device_list, list) {
+ if (raid_device->starget)
+ continue;
+ rc = scsi_add_device(ioc->shost, RAID_CHANNEL,
+ raid_device->id, 0);
+ if (rc)
+ _scsih_raid_device_remove(ioc, raid_device);
+ }
+}
+
+/**
+ * _scsih_probe_sas - reporting sas devices to sas transport
+ * @ioc: per adapter object
+ *
+ * Called during initial loading of the driver.
+ */
+static void
+_scsih_probe_sas(struct MPT2SAS_ADAPTER *ioc)
+{
+ struct _sas_device *sas_device, *next;
+ unsigned long flags;
+
+ /* SAS Device List */
+ list_for_each_entry_safe(sas_device, next, &ioc->sas_device_init_list,
+ list) {
+
+ if (ioc->hide_drives)
+ continue;
+
+ if (!mpt2sas_transport_port_add(ioc, sas_device->handle,
+ sas_device->sas_address_parent)) {
+ list_del(&sas_device->list);
+ kfree(sas_device);
+ continue;
+ } else if (!sas_device->starget) {
+ if (!ioc->is_driver_loading) {
+ mpt2sas_transport_port_remove(ioc,
+ sas_device->sas_address,
+ sas_device->sas_address_parent);
+ list_del(&sas_device->list);
+ kfree(sas_device);
+ continue;
+ }
+ }
+ spin_lock_irqsave(&ioc->sas_device_lock, flags);
+ list_move_tail(&sas_device->list, &ioc->sas_device_list);
+ spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
+ }
+}
+
+/**
+ * _scsih_probe_devices - probing for devices
+ * @ioc: per adapter object
+ *
+ * Called during initial loading of the driver.
+ */
+static void
+_scsih_probe_devices(struct MPT2SAS_ADAPTER *ioc)
+{
+ u16 volume_mapping_flags;
+
+ if (!(ioc->facts.ProtocolFlags & MPI2_IOCFACTS_PROTOCOL_SCSI_INITIATOR))
+ return; /* return when IOC doesn't support initiator mode */
+
+ _scsih_probe_boot_devices(ioc);
+
+ if (ioc->ir_firmware) {
+ volume_mapping_flags =
+ le16_to_cpu(ioc->ioc_pg8.IRVolumeMappingFlags) &
+ MPI2_IOCPAGE8_IRFLAGS_MASK_VOLUME_MAPPING_MODE;
+ if (volume_mapping_flags ==
+ MPI2_IOCPAGE8_IRFLAGS_LOW_VOLUME_MAPPING) {
+ _scsih_probe_raid(ioc);
+ _scsih_probe_sas(ioc);
+ } else {
+ _scsih_probe_sas(ioc);
+ _scsih_probe_raid(ioc);
+ }
+ } else
+ _scsih_probe_sas(ioc);
+}
+
+
+/**
+ * _scsih_scan_start - scsi lld callback for .scan_start
+ * @shost: SCSI host pointer
+ *
+ * The shost has the ability to discover targets on its own instead
+ * of scanning the entire bus. In our implemention, we will kick off
+ * firmware discovery.
+ */
+static void
+_scsih_scan_start(struct Scsi_Host *shost)
+{
+ struct MPT2SAS_ADAPTER *ioc = shost_priv(shost);
+ int rc;
+
+ if (diag_buffer_enable != -1 && diag_buffer_enable != 0)
+ mpt2sas_enable_diag_buffer(ioc, diag_buffer_enable);
+
+ if (disable_discovery > 0)
+ return;
+
+ ioc->start_scan = 1;
+ rc = mpt2sas_port_enable(ioc);
+
+ if (rc != 0)
+ printk(MPT2SAS_INFO_FMT "port enable: FAILED\n", ioc->name);
+}
+
+/**
+ * _scsih_scan_finished - scsi lld callback for .scan_finished
+ * @shost: SCSI host pointer
+ * @time: elapsed time of the scan in jiffies
+ *
+ * This function will be called periodically until it returns 1 with the
+ * scsi_host and the elapsed time of the scan in jiffies. In our implemention,
+ * we wait for firmware discovery to complete, then return 1.
+ */
+static int
+_scsih_scan_finished(struct Scsi_Host *shost, unsigned long time)
+{
+ struct MPT2SAS_ADAPTER *ioc = shost_priv(shost);
+
+ if (disable_discovery > 0) {
+ ioc->is_driver_loading = 0;
+ ioc->wait_for_discovery_to_complete = 0;
+ return 1;
+ }
+
+ if (time >= (300 * HZ)) {
+ ioc->base_cmds.status = MPT2_CMD_NOT_USED;
+ printk(MPT2SAS_INFO_FMT "port enable: FAILED with timeout "
+ "(timeout=300s)\n", ioc->name);
+ ioc->is_driver_loading = 0;
+ return 1;
+ }
+
+ if (ioc->start_scan)
+ return 0;
+
+ if (ioc->start_scan_failed) {
+ printk(MPT2SAS_INFO_FMT "port enable: FAILED with "
+ "(ioc_status=0x%08x)\n", ioc->name, ioc->start_scan_failed);
+ ioc->is_driver_loading = 0;
+ ioc->wait_for_discovery_to_complete = 0;
+ ioc->remove_host = 1;
+ return 1;
+ }
+
+ printk(MPT2SAS_INFO_FMT "port enable: SUCCESS\n", ioc->name);
+ ioc->base_cmds.status = MPT2_CMD_NOT_USED;
+
+ if (ioc->wait_for_discovery_to_complete) {
+ ioc->wait_for_discovery_to_complete = 0;
+ _scsih_probe_devices(ioc);
+ }
+ mpt2sas_base_start_watchdog(ioc);
+ ioc->is_driver_loading = 0;
+ return 1;
+}
+
+
+/**
+ * _scsih_probe - attach and add scsi host
+ * @pdev: PCI device struct
+ * @id: pci device id
+ *
+ * Returns 0 success, anything else error.
+ */
+static int
+_scsih_probe(struct pci_dev *pdev, const struct pci_device_id *id)
+{
+ struct MPT2SAS_ADAPTER *ioc;
+ struct Scsi_Host *shost;
+ int rv;
+
+ shost = scsi_host_alloc(&scsih_driver_template,
+ sizeof(struct MPT2SAS_ADAPTER));
+ if (!shost)
+ return -ENODEV;
+
+ /* init local params */
+ ioc = shost_priv(shost);
+ memset(ioc, 0, sizeof(struct MPT2SAS_ADAPTER));
+ INIT_LIST_HEAD(&ioc->list);
+ list_add_tail(&ioc->list, &mpt2sas_ioc_list);
+ ioc->shost = shost;
+ ioc->id = mpt_ids++;
+ sprintf(ioc->name, "%s%d", MPT2SAS_DRIVER_NAME, ioc->id);
+ ioc->pdev = pdev;
+ if (id->device == MPI2_MFGPAGE_DEVID_SSS6200) {
+ ioc->is_warpdrive = 1;
+ ioc->hide_ir_msg = 1;
+ } else
+ ioc->mfg_pg10_hide_flag = MFG_PAGE10_EXPOSE_ALL_DISKS;
+ ioc->scsi_io_cb_idx = scsi_io_cb_idx;
+ ioc->tm_cb_idx = tm_cb_idx;
+ ioc->ctl_cb_idx = ctl_cb_idx;
+ ioc->base_cb_idx = base_cb_idx;
+ ioc->port_enable_cb_idx = port_enable_cb_idx;
+ ioc->transport_cb_idx = transport_cb_idx;
+ ioc->scsih_cb_idx = scsih_cb_idx;
+ ioc->config_cb_idx = config_cb_idx;
+ ioc->tm_tr_cb_idx = tm_tr_cb_idx;
+ ioc->tm_tr_volume_cb_idx = tm_tr_volume_cb_idx;
+ ioc->tm_sas_control_cb_idx = tm_sas_control_cb_idx;
+ ioc->logging_level = logging_level;
+ ioc->schedule_dead_ioc_flush_running_cmds = &_scsih_flush_running_cmds;
+ /* misc semaphores and spin locks */
+ mutex_init(&ioc->reset_in_progress_mutex);
+ spin_lock_init(&ioc->ioc_reset_in_progress_lock);
+ spin_lock_init(&ioc->scsi_lookup_lock);
+ spin_lock_init(&ioc->sas_device_lock);
+ spin_lock_init(&ioc->sas_node_lock);
+ spin_lock_init(&ioc->fw_event_lock);
+ spin_lock_init(&ioc->raid_device_lock);
+
+ INIT_LIST_HEAD(&ioc->sas_device_list);
+ INIT_LIST_HEAD(&ioc->sas_device_init_list);
+ INIT_LIST_HEAD(&ioc->sas_expander_list);
+ INIT_LIST_HEAD(&ioc->fw_event_list);
+ INIT_LIST_HEAD(&ioc->raid_device_list);
+ INIT_LIST_HEAD(&ioc->sas_hba.sas_port_list);
+ INIT_LIST_HEAD(&ioc->delayed_tr_list);
+ INIT_LIST_HEAD(&ioc->delayed_tr_volume_list);
+ INIT_LIST_HEAD(&ioc->reply_queue_list);
+
+ /* init shost parameters */
+ shost->max_cmd_len = 32;
+ shost->max_lun = max_lun;
+ shost->transportt = mpt2sas_transport_template;
+ shost->unique_id = ioc->id;
+
+ if (max_sectors != 0xFFFF) {
+ if (max_sectors < 64) {
+ shost->max_sectors = 64;
+ printk(MPT2SAS_WARN_FMT "Invalid value %d passed "
+ "for max_sectors, range is 64 to 32767. Assigning "
+ "value of 64.\n", ioc->name, max_sectors);
+ } else if (max_sectors > 32767) {
+ shost->max_sectors = 32767;
+ printk(MPT2SAS_WARN_FMT "Invalid value %d passed "
+ "for max_sectors, range is 64 to 8192. Assigning "
+ "default value of 32767.\n", ioc->name,
+ max_sectors);
+ } else {
+ shost->max_sectors = max_sectors & 0xFFFE;
+ printk(MPT2SAS_INFO_FMT "The max_sectors value is "
+ "set to %d\n", ioc->name, shost->max_sectors);
+ }
+ }
+
+ /* register EEDP capabilities with SCSI layer */
+ if (prot_mask)
+ scsi_host_set_prot(shost, prot_mask);
+ else
+ scsi_host_set_prot(shost, SHOST_DIF_TYPE1_PROTECTION
+ | SHOST_DIF_TYPE2_PROTECTION
+ | SHOST_DIF_TYPE3_PROTECTION);
+
+ scsi_host_set_guard(shost, SHOST_DIX_GUARD_CRC);
+
+ /* event thread */
+ snprintf(ioc->firmware_event_name, sizeof(ioc->firmware_event_name),
+ "fw_event%d", ioc->id);
+ ioc->firmware_event_thread = create_singlethread_workqueue(
+ ioc->firmware_event_name);
+ if (!ioc->firmware_event_thread) {
+ printk(MPT2SAS_ERR_FMT "failure at %s:%d/%s()!\n",
+ ioc->name, __FILE__, __LINE__, __func__);
+ rv = -ENODEV;
+ goto out_thread_fail;
+ }
+
+ ioc->is_driver_loading = 1;
+ if ((mpt2sas_base_attach(ioc))) {
+ printk(MPT2SAS_ERR_FMT "failure at %s:%d/%s()!\n",
+ ioc->name, __FILE__, __LINE__, __func__);
+ rv = -ENODEV;
+ goto out_attach_fail;
+ }
+
+ if (ioc->is_warpdrive) {
+ if (ioc->mfg_pg10_hide_flag == MFG_PAGE10_EXPOSE_ALL_DISKS)
+ ioc->hide_drives = 0;
+ else if (ioc->mfg_pg10_hide_flag == MFG_PAGE10_HIDE_ALL_DISKS)
+ ioc->hide_drives = 1;
+ else {
+ if (_scsih_get_num_volumes(ioc))
+ ioc->hide_drives = 1;
+ else
+ ioc->hide_drives = 0;
+ }
+ } else
+ ioc->hide_drives = 0;
+
+ rv = scsi_add_host(shost, &pdev->dev);
+ if (rv) {
+ printk(MPT2SAS_ERR_FMT "failure at %s:%d/%s()!\n",
+ ioc->name, __FILE__, __LINE__, __func__);
+ goto out_add_shost_fail;
+ }
+
+ scsi_scan_host(shost);
+
+ return 0;
+
+ out_add_shost_fail:
+ mpt2sas_base_detach(ioc);
+ out_attach_fail:
+ destroy_workqueue(ioc->firmware_event_thread);
+ out_thread_fail:
+ list_del(&ioc->list);
+ scsi_host_put(shost);
+ return rv;
+}
+
+#ifdef CONFIG_PM
+/**
+ * _scsih_suspend - power management suspend main entry point
+ * @pdev: PCI device struct
+ * @state: PM state change to (usually PCI_D3)
+ *
+ * Returns 0 success, anything else error.
+ */
+static int
+_scsih_suspend(struct pci_dev *pdev, pm_message_t state)
+{
+ struct Scsi_Host *shost = pci_get_drvdata(pdev);
+ struct MPT2SAS_ADAPTER *ioc = shost_priv(shost);
+ pci_power_t device_state;
+
+ mpt2sas_base_stop_watchdog(ioc);
+ scsi_block_requests(shost);
+ _scsih_ir_shutdown(ioc);
+ device_state = pci_choose_state(pdev, state);
+ printk(MPT2SAS_INFO_FMT "pdev=0x%p, slot=%s, entering "
+ "operating state [D%d]\n", ioc->name, pdev,
+ pci_name(pdev), device_state);
+
+ mpt2sas_base_free_resources(ioc);
+ pci_save_state(pdev);
+ pci_set_power_state(pdev, device_state);
+ return 0;
+}
+
+/**
+ * _scsih_resume - power management resume main entry point
+ * @pdev: PCI device struct
+ *
+ * Returns 0 success, anything else error.
+ */
+static int
+_scsih_resume(struct pci_dev *pdev)
+{
+ struct Scsi_Host *shost = pci_get_drvdata(pdev);
+ struct MPT2SAS_ADAPTER *ioc = shost_priv(shost);
+ pci_power_t device_state = pdev->current_state;
+ int r;
+
+ printk(MPT2SAS_INFO_FMT "pdev=0x%p, slot=%s, previous "
+ "operating state [D%d]\n", ioc->name, pdev,
+ pci_name(pdev), device_state);
+
+ pci_set_power_state(pdev, PCI_D0);
+ pci_enable_wake(pdev, PCI_D0, 0);
+ pci_restore_state(pdev);
+ ioc->pdev = pdev;
+ r = mpt2sas_base_map_resources(ioc);
+ if (r)
+ return r;
+
+ mpt2sas_base_hard_reset_handler(ioc, CAN_SLEEP, SOFT_RESET);
+ scsi_unblock_requests(shost);
+ mpt2sas_base_start_watchdog(ioc);
+ return 0;
+}
+#endif /* CONFIG_PM */
+
+/**
+ * _scsih_pci_error_detected - Called when a PCI error is detected.
+ * @pdev: PCI device struct
+ * @state: PCI channel state
+ *
+ * Description: Called when a PCI error is detected.
+ *
+ * Return value:
+ * PCI_ERS_RESULT_NEED_RESET or PCI_ERS_RESULT_DISCONNECT
+ */
+static pci_ers_result_t
+_scsih_pci_error_detected(struct pci_dev *pdev, pci_channel_state_t state)
+{
+ struct Scsi_Host *shost = pci_get_drvdata(pdev);
+ struct MPT2SAS_ADAPTER *ioc = shost_priv(shost);
+
+ printk(MPT2SAS_INFO_FMT "PCI error: detected callback, state(%d)!!\n",
+ ioc->name, state);
+
+ switch (state) {
+ case pci_channel_io_normal:
+ return PCI_ERS_RESULT_CAN_RECOVER;
+ case pci_channel_io_frozen:
+ /* Fatal error, prepare for slot reset */
+ ioc->pci_error_recovery = 1;
+ scsi_block_requests(ioc->shost);
+ mpt2sas_base_stop_watchdog(ioc);
+ mpt2sas_base_free_resources(ioc);
+ return PCI_ERS_RESULT_NEED_RESET;
+ case pci_channel_io_perm_failure:
+ /* Permanent error, prepare for device removal */
+ ioc->pci_error_recovery = 1;
+ mpt2sas_base_stop_watchdog(ioc);
+ _scsih_flush_running_cmds(ioc);
+ return PCI_ERS_RESULT_DISCONNECT;
+ }
+ return PCI_ERS_RESULT_NEED_RESET;
+}
+
+/**
+ * _scsih_pci_slot_reset - Called when PCI slot has been reset.
+ * @pdev: PCI device struct
+ *
+ * Description: This routine is called by the pci error recovery
+ * code after the PCI slot has been reset, just before we
+ * should resume normal operations.
+ */
+static pci_ers_result_t
+_scsih_pci_slot_reset(struct pci_dev *pdev)
+{
+ struct Scsi_Host *shost = pci_get_drvdata(pdev);
+ struct MPT2SAS_ADAPTER *ioc = shost_priv(shost);
+ int rc;
+
+ printk(MPT2SAS_INFO_FMT "PCI error: slot reset callback!!\n",
+ ioc->name);
+
+ ioc->pci_error_recovery = 0;
+ ioc->pdev = pdev;
+ pci_restore_state(pdev);
+ rc = mpt2sas_base_map_resources(ioc);
+ if (rc)
+ return PCI_ERS_RESULT_DISCONNECT;
+
+
+ rc = mpt2sas_base_hard_reset_handler(ioc, CAN_SLEEP,
+ FORCE_BIG_HAMMER);
+
+ printk(MPT2SAS_WARN_FMT "hard reset: %s\n", ioc->name,
+ (rc == 0) ? "success" : "failed");
+
+ if (!rc)
+ return PCI_ERS_RESULT_RECOVERED;
+ else
+ return PCI_ERS_RESULT_DISCONNECT;
+}
+
+/**
+ * _scsih_pci_resume() - resume normal ops after PCI reset
+ * @pdev: pointer to PCI device
+ *
+ * Called when the error recovery driver tells us that its
+ * OK to resume normal operation. Use completion to allow
+ * halted scsi ops to resume.
+ */
+static void
+_scsih_pci_resume(struct pci_dev *pdev)
+{
+ struct Scsi_Host *shost = pci_get_drvdata(pdev);
+ struct MPT2SAS_ADAPTER *ioc = shost_priv(shost);
+
+ printk(MPT2SAS_INFO_FMT "PCI error: resume callback!!\n", ioc->name);
+
+ pci_cleanup_aer_uncorrect_error_status(pdev);
+ mpt2sas_base_start_watchdog(ioc);
+ scsi_unblock_requests(ioc->shost);
+}
+
+/**
+ * _scsih_pci_mmio_enabled - Enable MMIO and dump debug registers
+ * @pdev: pointer to PCI device
+ */
+static pci_ers_result_t
+_scsih_pci_mmio_enabled(struct pci_dev *pdev)
+{
+ struct Scsi_Host *shost = pci_get_drvdata(pdev);
+ struct MPT2SAS_ADAPTER *ioc = shost_priv(shost);
+
+ printk(MPT2SAS_INFO_FMT "PCI error: mmio enabled callback!!\n",
+ ioc->name);
+
+ /* TODO - dump whatever for debugging purposes */
+
+ /* Request a slot reset. */
+ return PCI_ERS_RESULT_NEED_RESET;
+}
+
+static const struct pci_error_handlers _scsih_err_handler = {
+ .error_detected = _scsih_pci_error_detected,
+ .mmio_enabled = _scsih_pci_mmio_enabled,
+ .slot_reset = _scsih_pci_slot_reset,
+ .resume = _scsih_pci_resume,
+};
+
+static struct pci_driver scsih_driver = {
+ .name = MPT2SAS_DRIVER_NAME,
+ .id_table = scsih_pci_table,
+ .probe = _scsih_probe,
+ .remove = _scsih_remove,
+ .shutdown = _scsih_shutdown,
+ .err_handler = &_scsih_err_handler,
+#ifdef CONFIG_PM
+ .suspend = _scsih_suspend,
+ .resume = _scsih_resume,
+#endif
+};
+
+/* raid transport support */
+static struct raid_function_template mpt2sas_raid_functions = {
+ .cookie = &scsih_driver_template,
+ .is_raid = _scsih_is_raid,
+ .get_resync = _scsih_get_resync,
+ .get_state = _scsih_get_state,
+};
+
+/**
+ * _scsih_init - main entry point for this driver.
+ *
+ * Returns 0 success, anything else error.
+ */
+static int __init
+_scsih_init(void)
+{
+ int error;
+
+ mpt_ids = 0;
+ printk(KERN_INFO "%s version %s loaded\n", MPT2SAS_DRIVER_NAME,
+ MPT2SAS_DRIVER_VERSION);
+
+ mpt2sas_transport_template =
+ sas_attach_transport(&mpt2sas_transport_functions);
+ if (!mpt2sas_transport_template)
+ return -ENODEV;
+ /* raid transport support */
+ mpt2sas_raid_template = raid_class_attach(&mpt2sas_raid_functions);
+ if (!mpt2sas_raid_template) {
+ sas_release_transport(mpt2sas_transport_template);
+ return -ENODEV;
+ }
+
+ mpt2sas_base_initialize_callback_handler();
+
+ /* queuecommand callback hander */
+ scsi_io_cb_idx = mpt2sas_base_register_callback_handler(_scsih_io_done);
+
+ /* task management callback handler */
+ tm_cb_idx = mpt2sas_base_register_callback_handler(_scsih_tm_done);
+
+ /* base internal commands callback handler */
+ base_cb_idx = mpt2sas_base_register_callback_handler(mpt2sas_base_done);
+ port_enable_cb_idx = mpt2sas_base_register_callback_handler(
+ mpt2sas_port_enable_done);
+
+ /* transport internal commands callback handler */
+ transport_cb_idx = mpt2sas_base_register_callback_handler(
+ mpt2sas_transport_done);
+
+ /* scsih internal commands callback handler */
+ scsih_cb_idx = mpt2sas_base_register_callback_handler(_scsih_done);
+
+ /* configuration page API internal commands callback handler */
+ config_cb_idx = mpt2sas_base_register_callback_handler(
+ mpt2sas_config_done);
+
+ /* ctl module callback handler */
+ ctl_cb_idx = mpt2sas_base_register_callback_handler(mpt2sas_ctl_done);
+
+ tm_tr_cb_idx = mpt2sas_base_register_callback_handler(
+ _scsih_tm_tr_complete);
+
+ tm_tr_volume_cb_idx = mpt2sas_base_register_callback_handler(
+ _scsih_tm_volume_tr_complete);
+
+ tm_sas_control_cb_idx = mpt2sas_base_register_callback_handler(
+ _scsih_sas_control_complete);
+
+ mpt2sas_ctl_init();
+
+ error = pci_register_driver(&scsih_driver);
+ if (error) {
+ /* raid transport support */
+ raid_class_release(mpt2sas_raid_template);
+ sas_release_transport(mpt2sas_transport_template);
+ }
+
+ return error;
+}
+
+/**
+ * _scsih_exit - exit point for this driver (when it is a module).
+ *
+ * Returns 0 success, anything else error.
+ */
+static void __exit
+_scsih_exit(void)
+{
+ printk(KERN_INFO "mpt2sas version %s unloading\n",
+ MPT2SAS_DRIVER_VERSION);
+
+ pci_unregister_driver(&scsih_driver);
+
+ mpt2sas_ctl_exit();
+
+ mpt2sas_base_release_callback_handler(scsi_io_cb_idx);
+ mpt2sas_base_release_callback_handler(tm_cb_idx);
+ mpt2sas_base_release_callback_handler(base_cb_idx);
+ mpt2sas_base_release_callback_handler(port_enable_cb_idx);
+ mpt2sas_base_release_callback_handler(transport_cb_idx);
+ mpt2sas_base_release_callback_handler(scsih_cb_idx);
+ mpt2sas_base_release_callback_handler(config_cb_idx);
+ mpt2sas_base_release_callback_handler(ctl_cb_idx);
+
+ mpt2sas_base_release_callback_handler(tm_tr_cb_idx);
+ mpt2sas_base_release_callback_handler(tm_tr_volume_cb_idx);
+ mpt2sas_base_release_callback_handler(tm_sas_control_cb_idx);
+
+ /* raid transport support */
+ raid_class_release(mpt2sas_raid_template);
+ sas_release_transport(mpt2sas_transport_template);
+
+}
+
+module_init(_scsih_init);
+module_exit(_scsih_exit);
diff --git a/drivers/scsi/mpt2sas/mpt2sas_transport.c b/drivers/scsi/mpt2sas/mpt2sas_transport.c
new file mode 100644
index 000000000..ff2500ab9
--- /dev/null
+++ b/drivers/scsi/mpt2sas/mpt2sas_transport.c
@@ -0,0 +1,2169 @@
+/*
+ * SAS Transport Layer for MPT (Message Passing Technology) based controllers
+ *
+ * This code is based on drivers/scsi/mpt2sas/mpt2_transport.c
+ * Copyright (C) 2007-2014 LSI Corporation
+ * Copyright (C) 20013-2014 Avago Technologies
+ * (mailto: MPT-FusionLinux.pdl@avagotech.com)
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * NO WARRANTY
+ * THE PROGRAM IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OR
+ * CONDITIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED INCLUDING, WITHOUT
+ * LIMITATION, ANY WARRANTIES OR CONDITIONS OF TITLE, NON-INFRINGEMENT,
+ * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE. Each Recipient is
+ * solely responsible for determining the appropriateness of using and
+ * distributing the Program and assumes all risks associated with its
+ * exercise of rights under this Agreement, including but not limited to
+ * the risks and costs of program errors, damage to or loss of data,
+ * programs or equipment, and unavailability or interruption of operations.
+
+ * DISCLAIMER OF LIABILITY
+ * NEITHER RECIPIENT NOR ANY CONTRIBUTORS SHALL HAVE ANY LIABILITY FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING WITHOUT LIMITATION LOST PROFITS), HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR
+ * TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
+ * USE OR DISTRIBUTION OF THE PROGRAM OR THE EXERCISE OF ANY RIGHTS GRANTED
+ * HEREUNDER, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGES
+
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301,
+ * USA.
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/errno.h>
+#include <linux/sched.h>
+#include <linux/workqueue.h>
+#include <linux/delay.h>
+#include <linux/pci.h>
+#include <linux/slab.h>
+
+#include <scsi/scsi.h>
+#include <scsi/scsi_cmnd.h>
+#include <scsi/scsi_device.h>
+#include <scsi/scsi_host.h>
+#include <scsi/scsi_transport_sas.h>
+#include <scsi/scsi_dbg.h>
+
+#include "mpt2sas_base.h"
+/**
+ * _transport_sas_node_find_by_sas_address - sas node search
+ * @ioc: per adapter object
+ * @sas_address: sas address of expander or sas host
+ * Context: Calling function should acquire ioc->sas_node_lock.
+ *
+ * Search for either hba phys or expander device based on handle, then returns
+ * the sas_node object.
+ */
+static struct _sas_node *
+_transport_sas_node_find_by_sas_address(struct MPT2SAS_ADAPTER *ioc,
+ u64 sas_address)
+{
+ if (ioc->sas_hba.sas_address == sas_address)
+ return &ioc->sas_hba;
+ else
+ return mpt2sas_scsih_expander_find_by_sas_address(ioc,
+ sas_address);
+}
+
+/**
+ * _transport_convert_phy_link_rate -
+ * @link_rate: link rate returned from mpt firmware
+ *
+ * Convert link_rate from mpi fusion into sas_transport form.
+ */
+static enum sas_linkrate
+_transport_convert_phy_link_rate(u8 link_rate)
+{
+ enum sas_linkrate rc;
+
+ switch (link_rate) {
+ case MPI2_SAS_NEG_LINK_RATE_1_5:
+ rc = SAS_LINK_RATE_1_5_GBPS;
+ break;
+ case MPI2_SAS_NEG_LINK_RATE_3_0:
+ rc = SAS_LINK_RATE_3_0_GBPS;
+ break;
+ case MPI2_SAS_NEG_LINK_RATE_6_0:
+ rc = SAS_LINK_RATE_6_0_GBPS;
+ break;
+ case MPI2_SAS_NEG_LINK_RATE_PHY_DISABLED:
+ rc = SAS_PHY_DISABLED;
+ break;
+ case MPI2_SAS_NEG_LINK_RATE_NEGOTIATION_FAILED:
+ rc = SAS_LINK_RATE_FAILED;
+ break;
+ case MPI2_SAS_NEG_LINK_RATE_PORT_SELECTOR:
+ rc = SAS_SATA_PORT_SELECTOR;
+ break;
+ case MPI2_SAS_NEG_LINK_RATE_SMP_RESET_IN_PROGRESS:
+ rc = SAS_PHY_RESET_IN_PROGRESS;
+ break;
+ default:
+ case MPI2_SAS_NEG_LINK_RATE_SATA_OOB_COMPLETE:
+ case MPI2_SAS_NEG_LINK_RATE_UNKNOWN_LINK_RATE:
+ rc = SAS_LINK_RATE_UNKNOWN;
+ break;
+ }
+ return rc;
+}
+
+/**
+ * _transport_set_identify - set identify for phys and end devices
+ * @ioc: per adapter object
+ * @handle: device handle
+ * @identify: sas identify info
+ *
+ * Populates sas identify info.
+ *
+ * Returns 0 for success, non-zero for failure.
+ */
+static int
+_transport_set_identify(struct MPT2SAS_ADAPTER *ioc, u16 handle,
+ struct sas_identify *identify)
+{
+ Mpi2SasDevicePage0_t sas_device_pg0;
+ Mpi2ConfigReply_t mpi_reply;
+ u32 device_info;
+ u32 ioc_status;
+
+ if (ioc->shost_recovery || ioc->pci_error_recovery) {
+ printk(MPT2SAS_INFO_FMT "%s: host reset in progress!\n",
+ __func__, ioc->name);
+ return -EFAULT;
+ }
+
+ if ((mpt2sas_config_get_sas_device_pg0(ioc, &mpi_reply, &sas_device_pg0,
+ MPI2_SAS_DEVICE_PGAD_FORM_HANDLE, handle))) {
+ printk(MPT2SAS_ERR_FMT "failure at %s:%d/%s()!\n",
+
+ ioc->name, __FILE__, __LINE__, __func__);
+ return -ENXIO;
+ }
+
+ ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
+ MPI2_IOCSTATUS_MASK;
+ if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
+ printk(MPT2SAS_ERR_FMT "handle(0x%04x), ioc_status(0x%04x)"
+ "\nfailure at %s:%d/%s()!\n", ioc->name, handle, ioc_status,
+ __FILE__, __LINE__, __func__);
+ return -EIO;
+ }
+
+ memset(identify, 0, sizeof(struct sas_identify));
+ device_info = le32_to_cpu(sas_device_pg0.DeviceInfo);
+
+ /* sas_address */
+ identify->sas_address = le64_to_cpu(sas_device_pg0.SASAddress);
+
+ /* phy number of the parent device this device is linked to */
+ identify->phy_identifier = sas_device_pg0.PhyNum;
+
+ /* device_type */
+ switch (device_info & MPI2_SAS_DEVICE_INFO_MASK_DEVICE_TYPE) {
+ case MPI2_SAS_DEVICE_INFO_NO_DEVICE:
+ identify->device_type = SAS_PHY_UNUSED;
+ break;
+ case MPI2_SAS_DEVICE_INFO_END_DEVICE:
+ identify->device_type = SAS_END_DEVICE;
+ break;
+ case MPI2_SAS_DEVICE_INFO_EDGE_EXPANDER:
+ identify->device_type = SAS_EDGE_EXPANDER_DEVICE;
+ break;
+ case MPI2_SAS_DEVICE_INFO_FANOUT_EXPANDER:
+ identify->device_type = SAS_FANOUT_EXPANDER_DEVICE;
+ break;
+ }
+
+ /* initiator_port_protocols */
+ if (device_info & MPI2_SAS_DEVICE_INFO_SSP_INITIATOR)
+ identify->initiator_port_protocols |= SAS_PROTOCOL_SSP;
+ if (device_info & MPI2_SAS_DEVICE_INFO_STP_INITIATOR)
+ identify->initiator_port_protocols |= SAS_PROTOCOL_STP;
+ if (device_info & MPI2_SAS_DEVICE_INFO_SMP_INITIATOR)
+ identify->initiator_port_protocols |= SAS_PROTOCOL_SMP;
+ if (device_info & MPI2_SAS_DEVICE_INFO_SATA_HOST)
+ identify->initiator_port_protocols |= SAS_PROTOCOL_SATA;
+
+ /* target_port_protocols */
+ if (device_info & MPI2_SAS_DEVICE_INFO_SSP_TARGET)
+ identify->target_port_protocols |= SAS_PROTOCOL_SSP;
+ if (device_info & MPI2_SAS_DEVICE_INFO_STP_TARGET)
+ identify->target_port_protocols |= SAS_PROTOCOL_STP;
+ if (device_info & MPI2_SAS_DEVICE_INFO_SMP_TARGET)
+ identify->target_port_protocols |= SAS_PROTOCOL_SMP;
+ if (device_info & MPI2_SAS_DEVICE_INFO_SATA_DEVICE)
+ identify->target_port_protocols |= SAS_PROTOCOL_SATA;
+
+ return 0;
+}
+
+/**
+ * mpt2sas_transport_done - internal transport layer callback handler.
+ * @ioc: per adapter object
+ * @smid: system request message index
+ * @msix_index: MSIX table index supplied by the OS
+ * @reply: reply message frame(lower 32bit addr)
+ *
+ * Callback handler when sending internal generated transport cmds.
+ * The callback index passed is `ioc->transport_cb_idx`
+ *
+ * Return 1 meaning mf should be freed from _base_interrupt
+ * 0 means the mf is freed from this function.
+ */
+u8
+mpt2sas_transport_done(struct MPT2SAS_ADAPTER *ioc, u16 smid, u8 msix_index,
+ u32 reply)
+{
+ MPI2DefaultReply_t *mpi_reply;
+
+ mpi_reply = mpt2sas_base_get_reply_virt_addr(ioc, reply);
+ if (ioc->transport_cmds.status == MPT2_CMD_NOT_USED)
+ return 1;
+ if (ioc->transport_cmds.smid != smid)
+ return 1;
+ ioc->transport_cmds.status |= MPT2_CMD_COMPLETE;
+ if (mpi_reply) {
+ memcpy(ioc->transport_cmds.reply, mpi_reply,
+ mpi_reply->MsgLength*4);
+ ioc->transport_cmds.status |= MPT2_CMD_REPLY_VALID;
+ }
+ ioc->transport_cmds.status &= ~MPT2_CMD_PENDING;
+ complete(&ioc->transport_cmds.done);
+ return 1;
+}
+
+/* report manufacture request structure */
+struct rep_manu_request{
+ u8 smp_frame_type;
+ u8 function;
+ u8 reserved;
+ u8 request_length;
+};
+
+/* report manufacture reply structure */
+struct rep_manu_reply{
+ u8 smp_frame_type; /* 0x41 */
+ u8 function; /* 0x01 */
+ u8 function_result;
+ u8 response_length;
+ u16 expander_change_count;
+ u8 reserved0[2];
+ u8 sas_format;
+ u8 reserved2[3];
+ u8 vendor_id[SAS_EXPANDER_VENDOR_ID_LEN];
+ u8 product_id[SAS_EXPANDER_PRODUCT_ID_LEN];
+ u8 product_rev[SAS_EXPANDER_PRODUCT_REV_LEN];
+ u8 component_vendor_id[SAS_EXPANDER_COMPONENT_VENDOR_ID_LEN];
+ u16 component_id;
+ u8 component_revision_id;
+ u8 reserved3;
+ u8 vendor_specific[8];
+};
+
+/**
+ * _transport_expander_report_manufacture - obtain SMP report_manufacture
+ * @ioc: per adapter object
+ * @sas_address: expander sas address
+ * @edev: the sas_expander_device object
+ *
+ * Fills in the sas_expander_device object when SMP port is created.
+ *
+ * Returns 0 for success, non-zero for failure.
+ */
+static int
+_transport_expander_report_manufacture(struct MPT2SAS_ADAPTER *ioc,
+ u64 sas_address, struct sas_expander_device *edev)
+{
+ Mpi2SmpPassthroughRequest_t *mpi_request;
+ Mpi2SmpPassthroughReply_t *mpi_reply;
+ struct rep_manu_reply *manufacture_reply;
+ struct rep_manu_request *manufacture_request;
+ int rc;
+ u16 smid;
+ u32 ioc_state;
+ unsigned long timeleft;
+ void *psge;
+ u32 sgl_flags;
+ u8 issue_reset = 0;
+ void *data_out = NULL;
+ dma_addr_t data_out_dma;
+ u32 sz;
+ u16 wait_state_count;
+
+ if (ioc->shost_recovery || ioc->pci_error_recovery) {
+ printk(MPT2SAS_INFO_FMT "%s: host reset in progress!\n",
+ __func__, ioc->name);
+ return -EFAULT;
+ }
+
+ mutex_lock(&ioc->transport_cmds.mutex);
+
+ if (ioc->transport_cmds.status != MPT2_CMD_NOT_USED) {
+ printk(MPT2SAS_ERR_FMT "%s: transport_cmds in use\n",
+ ioc->name, __func__);
+ rc = -EAGAIN;
+ goto out;
+ }
+ ioc->transport_cmds.status = MPT2_CMD_PENDING;
+
+ wait_state_count = 0;
+ ioc_state = mpt2sas_base_get_iocstate(ioc, 1);
+ while (ioc_state != MPI2_IOC_STATE_OPERATIONAL) {
+ if (wait_state_count++ == 10) {
+ printk(MPT2SAS_ERR_FMT
+ "%s: failed due to ioc not operational\n",
+ ioc->name, __func__);
+ rc = -EFAULT;
+ goto out;
+ }
+ ssleep(1);
+ ioc_state = mpt2sas_base_get_iocstate(ioc, 1);
+ printk(MPT2SAS_INFO_FMT "%s: waiting for "
+ "operational state(count=%d)\n", ioc->name,
+ __func__, wait_state_count);
+ }
+ if (wait_state_count)
+ printk(MPT2SAS_INFO_FMT "%s: ioc is operational\n",
+ ioc->name, __func__);
+
+ smid = mpt2sas_base_get_smid(ioc, ioc->transport_cb_idx);
+ if (!smid) {
+ printk(MPT2SAS_ERR_FMT "%s: failed obtaining a smid\n",
+ ioc->name, __func__);
+ rc = -EAGAIN;
+ goto out;
+ }
+
+ rc = 0;
+ mpi_request = mpt2sas_base_get_msg_frame(ioc, smid);
+ ioc->transport_cmds.smid = smid;
+
+ sz = sizeof(struct rep_manu_request) + sizeof(struct rep_manu_reply);
+ data_out = pci_alloc_consistent(ioc->pdev, sz, &data_out_dma);
+
+ if (!data_out) {
+ printk(KERN_ERR "failure at %s:%d/%s()!\n", __FILE__,
+ __LINE__, __func__);
+ rc = -ENOMEM;
+ mpt2sas_base_free_smid(ioc, smid);
+ goto out;
+ }
+
+ manufacture_request = data_out;
+ manufacture_request->smp_frame_type = 0x40;
+ manufacture_request->function = 1;
+ manufacture_request->reserved = 0;
+ manufacture_request->request_length = 0;
+
+ memset(mpi_request, 0, sizeof(Mpi2SmpPassthroughRequest_t));
+ mpi_request->Function = MPI2_FUNCTION_SMP_PASSTHROUGH;
+ mpi_request->PhysicalPort = 0xFF;
+ mpi_request->VF_ID = 0; /* TODO */
+ mpi_request->VP_ID = 0;
+ mpi_request->SASAddress = cpu_to_le64(sas_address);
+ mpi_request->RequestDataLength =
+ cpu_to_le16(sizeof(struct rep_manu_request));
+ psge = &mpi_request->SGL;
+
+ /* WRITE sgel first */
+ sgl_flags = (MPI2_SGE_FLAGS_SIMPLE_ELEMENT |
+ MPI2_SGE_FLAGS_END_OF_BUFFER | MPI2_SGE_FLAGS_HOST_TO_IOC);
+ sgl_flags = sgl_flags << MPI2_SGE_FLAGS_SHIFT;
+ ioc->base_add_sg_single(psge, sgl_flags |
+ sizeof(struct rep_manu_request), data_out_dma);
+
+ /* incr sgel */
+ psge += ioc->sge_size;
+
+ /* READ sgel last */
+ sgl_flags = (MPI2_SGE_FLAGS_SIMPLE_ELEMENT |
+ MPI2_SGE_FLAGS_LAST_ELEMENT | MPI2_SGE_FLAGS_END_OF_BUFFER |
+ MPI2_SGE_FLAGS_END_OF_LIST);
+ sgl_flags = sgl_flags << MPI2_SGE_FLAGS_SHIFT;
+ ioc->base_add_sg_single(psge, sgl_flags |
+ sizeof(struct rep_manu_reply), data_out_dma +
+ sizeof(struct rep_manu_request));
+
+ dtransportprintk(ioc, printk(MPT2SAS_INFO_FMT "report_manufacture - "
+ "send to sas_addr(0x%016llx)\n", ioc->name,
+ (unsigned long long)sas_address));
+ init_completion(&ioc->transport_cmds.done);
+ mpt2sas_base_put_smid_default(ioc, smid);
+ timeleft = wait_for_completion_timeout(&ioc->transport_cmds.done,
+ 10*HZ);
+
+ if (!(ioc->transport_cmds.status & MPT2_CMD_COMPLETE)) {
+ printk(MPT2SAS_ERR_FMT "%s: timeout\n",
+ ioc->name, __func__);
+ _debug_dump_mf(mpi_request,
+ sizeof(Mpi2SmpPassthroughRequest_t)/4);
+ if (!(ioc->transport_cmds.status & MPT2_CMD_RESET))
+ issue_reset = 1;
+ goto issue_host_reset;
+ }
+
+ dtransportprintk(ioc, printk(MPT2SAS_INFO_FMT "report_manufacture - "
+ "complete\n", ioc->name));
+
+ if (ioc->transport_cmds.status & MPT2_CMD_REPLY_VALID) {
+ u8 *tmp;
+
+ mpi_reply = ioc->transport_cmds.reply;
+
+ dtransportprintk(ioc, printk(MPT2SAS_INFO_FMT
+ "report_manufacture - reply data transfer size(%d)\n",
+ ioc->name, le16_to_cpu(mpi_reply->ResponseDataLength)));
+
+ if (le16_to_cpu(mpi_reply->ResponseDataLength) !=
+ sizeof(struct rep_manu_reply))
+ goto out;
+
+ manufacture_reply = data_out + sizeof(struct rep_manu_request);
+ strncpy(edev->vendor_id, manufacture_reply->vendor_id,
+ SAS_EXPANDER_VENDOR_ID_LEN);
+ strncpy(edev->product_id, manufacture_reply->product_id,
+ SAS_EXPANDER_PRODUCT_ID_LEN);
+ strncpy(edev->product_rev, manufacture_reply->product_rev,
+ SAS_EXPANDER_PRODUCT_REV_LEN);
+ edev->level = manufacture_reply->sas_format & 1;
+ if (edev->level) {
+ strncpy(edev->component_vendor_id,
+ manufacture_reply->component_vendor_id,
+ SAS_EXPANDER_COMPONENT_VENDOR_ID_LEN);
+ tmp = (u8 *)&manufacture_reply->component_id;
+ edev->component_id = tmp[0] << 8 | tmp[1];
+ edev->component_revision_id =
+ manufacture_reply->component_revision_id;
+ }
+ } else
+ dtransportprintk(ioc, printk(MPT2SAS_INFO_FMT
+ "report_manufacture - no reply\n", ioc->name));
+
+ issue_host_reset:
+ if (issue_reset)
+ mpt2sas_base_hard_reset_handler(ioc, CAN_SLEEP,
+ FORCE_BIG_HAMMER);
+ out:
+ ioc->transport_cmds.status = MPT2_CMD_NOT_USED;
+ if (data_out)
+ pci_free_consistent(ioc->pdev, sz, data_out, data_out_dma);
+
+ mutex_unlock(&ioc->transport_cmds.mutex);
+ return rc;
+}
+
+/**
+ * _transport_delete_port - helper function to removing a port
+ * @ioc: per adapter object
+ * @mpt2sas_port: mpt2sas per port object
+ *
+ * Returns nothing.
+ */
+static void
+_transport_delete_port(struct MPT2SAS_ADAPTER *ioc,
+ struct _sas_port *mpt2sas_port)
+{
+ u64 sas_address = mpt2sas_port->remote_identify.sas_address;
+ enum sas_device_type device_type =
+ mpt2sas_port->remote_identify.device_type;
+
+ dev_printk(KERN_INFO, &mpt2sas_port->port->dev,
+ "remove: sas_addr(0x%016llx)\n",
+ (unsigned long long) sas_address);
+
+ ioc->logging_level |= MPT_DEBUG_TRANSPORT;
+ if (device_type == SAS_END_DEVICE)
+ mpt2sas_device_remove_by_sas_address(ioc, sas_address);
+ else if (device_type == SAS_EDGE_EXPANDER_DEVICE ||
+ device_type == SAS_FANOUT_EXPANDER_DEVICE)
+ mpt2sas_expander_remove(ioc, sas_address);
+ ioc->logging_level &= ~MPT_DEBUG_TRANSPORT;
+}
+
+/**
+ * _transport_delete_phy - helper function to removing single phy from port
+ * @ioc: per adapter object
+ * @mpt2sas_port: mpt2sas per port object
+ * @mpt2sas_phy: mpt2sas per phy object
+ *
+ * Returns nothing.
+ */
+static void
+_transport_delete_phy(struct MPT2SAS_ADAPTER *ioc,
+ struct _sas_port *mpt2sas_port, struct _sas_phy *mpt2sas_phy)
+{
+ u64 sas_address = mpt2sas_port->remote_identify.sas_address;
+
+ dev_printk(KERN_INFO, &mpt2sas_phy->phy->dev,
+ "remove: sas_addr(0x%016llx), phy(%d)\n",
+ (unsigned long long) sas_address, mpt2sas_phy->phy_id);
+
+ list_del(&mpt2sas_phy->port_siblings);
+ mpt2sas_port->num_phys--;
+ sas_port_delete_phy(mpt2sas_port->port, mpt2sas_phy->phy);
+ mpt2sas_phy->phy_belongs_to_port = 0;
+}
+
+/**
+ * _transport_add_phy - helper function to adding single phy to port
+ * @ioc: per adapter object
+ * @mpt2sas_port: mpt2sas per port object
+ * @mpt2sas_phy: mpt2sas per phy object
+ *
+ * Returns nothing.
+ */
+static void
+_transport_add_phy(struct MPT2SAS_ADAPTER *ioc, struct _sas_port *mpt2sas_port,
+ struct _sas_phy *mpt2sas_phy)
+{
+ u64 sas_address = mpt2sas_port->remote_identify.sas_address;
+
+ dev_printk(KERN_INFO, &mpt2sas_phy->phy->dev,
+ "add: sas_addr(0x%016llx), phy(%d)\n", (unsigned long long)
+ sas_address, mpt2sas_phy->phy_id);
+
+ list_add_tail(&mpt2sas_phy->port_siblings, &mpt2sas_port->phy_list);
+ mpt2sas_port->num_phys++;
+ sas_port_add_phy(mpt2sas_port->port, mpt2sas_phy->phy);
+ mpt2sas_phy->phy_belongs_to_port = 1;
+}
+
+/**
+ * _transport_add_phy_to_an_existing_port - adding new phy to existing port
+ * @ioc: per adapter object
+ * @sas_node: sas node object (either expander or sas host)
+ * @mpt2sas_phy: mpt2sas per phy object
+ * @sas_address: sas address of device/expander were phy needs to be added to
+ *
+ * Returns nothing.
+ */
+static void
+_transport_add_phy_to_an_existing_port(struct MPT2SAS_ADAPTER *ioc,
+struct _sas_node *sas_node, struct _sas_phy *mpt2sas_phy, u64 sas_address)
+{
+ struct _sas_port *mpt2sas_port;
+ struct _sas_phy *phy_srch;
+
+ if (mpt2sas_phy->phy_belongs_to_port == 1)
+ return;
+
+ list_for_each_entry(mpt2sas_port, &sas_node->sas_port_list,
+ port_list) {
+ if (mpt2sas_port->remote_identify.sas_address !=
+ sas_address)
+ continue;
+ list_for_each_entry(phy_srch, &mpt2sas_port->phy_list,
+ port_siblings) {
+ if (phy_srch == mpt2sas_phy)
+ return;
+ }
+ _transport_add_phy(ioc, mpt2sas_port, mpt2sas_phy);
+ return;
+ }
+
+}
+
+/**
+ * _transport_del_phy_from_an_existing_port - delete phy from existing port
+ * @ioc: per adapter object
+ * @sas_node: sas node object (either expander or sas host)
+ * @mpt2sas_phy: mpt2sas per phy object
+ *
+ * Returns nothing.
+ */
+static void
+_transport_del_phy_from_an_existing_port(struct MPT2SAS_ADAPTER *ioc,
+ struct _sas_node *sas_node, struct _sas_phy *mpt2sas_phy)
+{
+ struct _sas_port *mpt2sas_port, *next;
+ struct _sas_phy *phy_srch;
+
+ if (mpt2sas_phy->phy_belongs_to_port == 0)
+ return;
+
+ list_for_each_entry_safe(mpt2sas_port, next, &sas_node->sas_port_list,
+ port_list) {
+ list_for_each_entry(phy_srch, &mpt2sas_port->phy_list,
+ port_siblings) {
+ if (phy_srch != mpt2sas_phy)
+ continue;
+ if (mpt2sas_port->num_phys == 1)
+ _transport_delete_port(ioc, mpt2sas_port);
+ else
+ _transport_delete_phy(ioc, mpt2sas_port,
+ mpt2sas_phy);
+ return;
+ }
+ }
+}
+
+/**
+ * _transport_sanity_check - sanity check when adding a new port
+ * @ioc: per adapter object
+ * @sas_node: sas node object (either expander or sas host)
+ * @sas_address: sas address of device being added
+ *
+ * See the explanation above from _transport_delete_duplicate_port
+ */
+static void
+_transport_sanity_check(struct MPT2SAS_ADAPTER *ioc, struct _sas_node *sas_node,
+ u64 sas_address)
+{
+ int i;
+
+ for (i = 0; i < sas_node->num_phys; i++) {
+ if (sas_node->phy[i].remote_identify.sas_address != sas_address)
+ continue;
+ if (sas_node->phy[i].phy_belongs_to_port == 1)
+ _transport_del_phy_from_an_existing_port(ioc, sas_node,
+ &sas_node->phy[i]);
+ }
+}
+
+/**
+ * mpt2sas_transport_port_add - insert port to the list
+ * @ioc: per adapter object
+ * @handle: handle of attached device
+ * @sas_address: sas address of parent expander or sas host
+ * Context: This function will acquire ioc->sas_node_lock.
+ *
+ * Adding new port object to the sas_node->sas_port_list.
+ *
+ * Returns mpt2sas_port.
+ */
+struct _sas_port *
+mpt2sas_transport_port_add(struct MPT2SAS_ADAPTER *ioc, u16 handle,
+ u64 sas_address)
+{
+ struct _sas_phy *mpt2sas_phy, *next;
+ struct _sas_port *mpt2sas_port;
+ unsigned long flags;
+ struct _sas_node *sas_node;
+ struct sas_rphy *rphy;
+ int i;
+ struct sas_port *port;
+
+ mpt2sas_port = kzalloc(sizeof(struct _sas_port),
+ GFP_KERNEL);
+ if (!mpt2sas_port) {
+ printk(MPT2SAS_ERR_FMT "failure at %s:%d/%s()!\n",
+ ioc->name, __FILE__, __LINE__, __func__);
+ return NULL;
+ }
+
+ INIT_LIST_HEAD(&mpt2sas_port->port_list);
+ INIT_LIST_HEAD(&mpt2sas_port->phy_list);
+ spin_lock_irqsave(&ioc->sas_node_lock, flags);
+ sas_node = _transport_sas_node_find_by_sas_address(ioc, sas_address);
+ spin_unlock_irqrestore(&ioc->sas_node_lock, flags);
+
+ if (!sas_node) {
+ printk(MPT2SAS_ERR_FMT "%s: Could not find "
+ "parent sas_address(0x%016llx)!\n", ioc->name,
+ __func__, (unsigned long long)sas_address);
+ goto out_fail;
+ }
+
+ if ((_transport_set_identify(ioc, handle,
+ &mpt2sas_port->remote_identify))) {
+ printk(MPT2SAS_ERR_FMT "failure at %s:%d/%s()!\n",
+ ioc->name, __FILE__, __LINE__, __func__);
+ goto out_fail;
+ }
+
+ if (mpt2sas_port->remote_identify.device_type == SAS_PHY_UNUSED) {
+ printk(MPT2SAS_ERR_FMT "failure at %s:%d/%s()!\n",
+ ioc->name, __FILE__, __LINE__, __func__);
+ goto out_fail;
+ }
+
+ _transport_sanity_check(ioc, sas_node,
+ mpt2sas_port->remote_identify.sas_address);
+
+ for (i = 0; i < sas_node->num_phys; i++) {
+ if (sas_node->phy[i].remote_identify.sas_address !=
+ mpt2sas_port->remote_identify.sas_address)
+ continue;
+ list_add_tail(&sas_node->phy[i].port_siblings,
+ &mpt2sas_port->phy_list);
+ mpt2sas_port->num_phys++;
+ }
+
+ if (!mpt2sas_port->num_phys) {
+ printk(MPT2SAS_ERR_FMT "failure at %s:%d/%s()!\n",
+ ioc->name, __FILE__, __LINE__, __func__);
+ goto out_fail;
+ }
+
+ port = sas_port_alloc_num(sas_node->parent_dev);
+ if ((sas_port_add(port))) {
+ printk(MPT2SAS_ERR_FMT "failure at %s:%d/%s()!\n",
+ ioc->name, __FILE__, __LINE__, __func__);
+ goto out_fail;
+ }
+
+ list_for_each_entry(mpt2sas_phy, &mpt2sas_port->phy_list,
+ port_siblings) {
+ if ((ioc->logging_level & MPT_DEBUG_TRANSPORT))
+ dev_printk(KERN_INFO, &port->dev, "add: handle(0x%04x)"
+ ", sas_addr(0x%016llx), phy(%d)\n", handle,
+ (unsigned long long)
+ mpt2sas_port->remote_identify.sas_address,
+ mpt2sas_phy->phy_id);
+ sas_port_add_phy(port, mpt2sas_phy->phy);
+ mpt2sas_phy->phy_belongs_to_port = 1;
+ }
+
+ mpt2sas_port->port = port;
+ if (mpt2sas_port->remote_identify.device_type == SAS_END_DEVICE)
+ rphy = sas_end_device_alloc(port);
+ else
+ rphy = sas_expander_alloc(port,
+ mpt2sas_port->remote_identify.device_type);
+
+ rphy->identify = mpt2sas_port->remote_identify;
+ if ((sas_rphy_add(rphy))) {
+ printk(MPT2SAS_ERR_FMT "failure at %s:%d/%s()!\n",
+ ioc->name, __FILE__, __LINE__, __func__);
+ }
+ if ((ioc->logging_level & MPT_DEBUG_TRANSPORT))
+ dev_printk(KERN_INFO, &rphy->dev, "add: handle(0x%04x), "
+ "sas_addr(0x%016llx)\n", handle,
+ (unsigned long long)
+ mpt2sas_port->remote_identify.sas_address);
+ mpt2sas_port->rphy = rphy;
+ spin_lock_irqsave(&ioc->sas_node_lock, flags);
+ list_add_tail(&mpt2sas_port->port_list, &sas_node->sas_port_list);
+ spin_unlock_irqrestore(&ioc->sas_node_lock, flags);
+
+ /* fill in report manufacture */
+ if (mpt2sas_port->remote_identify.device_type ==
+ MPI2_SAS_DEVICE_INFO_EDGE_EXPANDER ||
+ mpt2sas_port->remote_identify.device_type ==
+ MPI2_SAS_DEVICE_INFO_FANOUT_EXPANDER)
+ _transport_expander_report_manufacture(ioc,
+ mpt2sas_port->remote_identify.sas_address,
+ rphy_to_expander_device(rphy));
+
+ return mpt2sas_port;
+
+ out_fail:
+ list_for_each_entry_safe(mpt2sas_phy, next, &mpt2sas_port->phy_list,
+ port_siblings)
+ list_del(&mpt2sas_phy->port_siblings);
+ kfree(mpt2sas_port);
+ return NULL;
+}
+
+/**
+ * mpt2sas_transport_port_remove - remove port from the list
+ * @ioc: per adapter object
+ * @sas_address: sas address of attached device
+ * @sas_address_parent: sas address of parent expander or sas host
+ * Context: This function will acquire ioc->sas_node_lock.
+ *
+ * Removing object and freeing associated memory from the
+ * ioc->sas_port_list.
+ *
+ * Return nothing.
+ */
+void
+mpt2sas_transport_port_remove(struct MPT2SAS_ADAPTER *ioc, u64 sas_address,
+ u64 sas_address_parent)
+{
+ int i;
+ unsigned long flags;
+ struct _sas_port *mpt2sas_port, *next;
+ struct _sas_node *sas_node;
+ u8 found = 0;
+ struct _sas_phy *mpt2sas_phy, *next_phy;
+
+ spin_lock_irqsave(&ioc->sas_node_lock, flags);
+ sas_node = _transport_sas_node_find_by_sas_address(ioc,
+ sas_address_parent);
+ if (!sas_node) {
+ spin_unlock_irqrestore(&ioc->sas_node_lock, flags);
+ return;
+ }
+ list_for_each_entry_safe(mpt2sas_port, next, &sas_node->sas_port_list,
+ port_list) {
+ if (mpt2sas_port->remote_identify.sas_address != sas_address)
+ continue;
+ found = 1;
+ list_del(&mpt2sas_port->port_list);
+ goto out;
+ }
+ out:
+ if (!found) {
+ spin_unlock_irqrestore(&ioc->sas_node_lock, flags);
+ return;
+ }
+
+ for (i = 0; i < sas_node->num_phys; i++) {
+ if (sas_node->phy[i].remote_identify.sas_address == sas_address)
+ memset(&sas_node->phy[i].remote_identify, 0 ,
+ sizeof(struct sas_identify));
+ }
+
+ spin_unlock_irqrestore(&ioc->sas_node_lock, flags);
+ list_for_each_entry_safe(mpt2sas_phy, next_phy,
+ &mpt2sas_port->phy_list, port_siblings) {
+ if ((ioc->logging_level & MPT_DEBUG_TRANSPORT))
+ dev_printk(KERN_INFO, &mpt2sas_port->port->dev,
+ "remove: sas_addr(0x%016llx), phy(%d)\n",
+ (unsigned long long)
+ mpt2sas_port->remote_identify.sas_address,
+ mpt2sas_phy->phy_id);
+ mpt2sas_phy->phy_belongs_to_port = 0;
+ sas_port_delete_phy(mpt2sas_port->port, mpt2sas_phy->phy);
+ list_del(&mpt2sas_phy->port_siblings);
+ }
+ sas_port_delete(mpt2sas_port->port);
+ kfree(mpt2sas_port);
+}
+
+/**
+ * mpt2sas_transport_add_host_phy - report sas_host phy to transport
+ * @ioc: per adapter object
+ * @mpt2sas_phy: mpt2sas per phy object
+ * @phy_pg0: sas phy page 0
+ * @parent_dev: parent device class object
+ *
+ * Returns 0 for success, non-zero for failure.
+ */
+int
+mpt2sas_transport_add_host_phy(struct MPT2SAS_ADAPTER *ioc, struct _sas_phy
+ *mpt2sas_phy, Mpi2SasPhyPage0_t phy_pg0, struct device *parent_dev)
+{
+ struct sas_phy *phy;
+ int phy_index = mpt2sas_phy->phy_id;
+
+
+ INIT_LIST_HEAD(&mpt2sas_phy->port_siblings);
+ phy = sas_phy_alloc(parent_dev, phy_index);
+ if (!phy) {
+ printk(MPT2SAS_ERR_FMT "failure at %s:%d/%s()!\n",
+ ioc->name, __FILE__, __LINE__, __func__);
+ return -1;
+ }
+ if ((_transport_set_identify(ioc, mpt2sas_phy->handle,
+ &mpt2sas_phy->identify))) {
+ printk(MPT2SAS_ERR_FMT "failure at %s:%d/%s()!\n",
+ ioc->name, __FILE__, __LINE__, __func__);
+ return -1;
+ }
+ phy->identify = mpt2sas_phy->identify;
+ mpt2sas_phy->attached_handle = le16_to_cpu(phy_pg0.AttachedDevHandle);
+ if (mpt2sas_phy->attached_handle)
+ _transport_set_identify(ioc, mpt2sas_phy->attached_handle,
+ &mpt2sas_phy->remote_identify);
+ phy->identify.phy_identifier = mpt2sas_phy->phy_id;
+ phy->negotiated_linkrate = _transport_convert_phy_link_rate(
+ phy_pg0.NegotiatedLinkRate & MPI2_SAS_NEG_LINK_RATE_MASK_PHYSICAL);
+ phy->minimum_linkrate_hw = _transport_convert_phy_link_rate(
+ phy_pg0.HwLinkRate & MPI2_SAS_HWRATE_MIN_RATE_MASK);
+ phy->maximum_linkrate_hw = _transport_convert_phy_link_rate(
+ phy_pg0.HwLinkRate >> 4);
+ phy->minimum_linkrate = _transport_convert_phy_link_rate(
+ phy_pg0.ProgrammedLinkRate & MPI2_SAS_PRATE_MIN_RATE_MASK);
+ phy->maximum_linkrate = _transport_convert_phy_link_rate(
+ phy_pg0.ProgrammedLinkRate >> 4);
+
+ if ((sas_phy_add(phy))) {
+ printk(MPT2SAS_ERR_FMT "failure at %s:%d/%s()!\n",
+ ioc->name, __FILE__, __LINE__, __func__);
+ sas_phy_free(phy);
+ return -1;
+ }
+ if ((ioc->logging_level & MPT_DEBUG_TRANSPORT))
+ dev_printk(KERN_INFO, &phy->dev,
+ "add: handle(0x%04x), sas_addr(0x%016llx)\n"
+ "\tattached_handle(0x%04x), sas_addr(0x%016llx)\n",
+ mpt2sas_phy->handle, (unsigned long long)
+ mpt2sas_phy->identify.sas_address,
+ mpt2sas_phy->attached_handle,
+ (unsigned long long)
+ mpt2sas_phy->remote_identify.sas_address);
+ mpt2sas_phy->phy = phy;
+ return 0;
+}
+
+
+/**
+ * mpt2sas_transport_add_expander_phy - report expander phy to transport
+ * @ioc: per adapter object
+ * @mpt2sas_phy: mpt2sas per phy object
+ * @expander_pg1: expander page 1
+ * @parent_dev: parent device class object
+ *
+ * Returns 0 for success, non-zero for failure.
+ */
+int
+mpt2sas_transport_add_expander_phy(struct MPT2SAS_ADAPTER *ioc, struct _sas_phy
+ *mpt2sas_phy, Mpi2ExpanderPage1_t expander_pg1, struct device *parent_dev)
+{
+ struct sas_phy *phy;
+ int phy_index = mpt2sas_phy->phy_id;
+
+ INIT_LIST_HEAD(&mpt2sas_phy->port_siblings);
+ phy = sas_phy_alloc(parent_dev, phy_index);
+ if (!phy) {
+ printk(MPT2SAS_ERR_FMT "failure at %s:%d/%s()!\n",
+ ioc->name, __FILE__, __LINE__, __func__);
+ return -1;
+ }
+ if ((_transport_set_identify(ioc, mpt2sas_phy->handle,
+ &mpt2sas_phy->identify))) {
+ printk(MPT2SAS_ERR_FMT "failure at %s:%d/%s()!\n",
+ ioc->name, __FILE__, __LINE__, __func__);
+ return -1;
+ }
+ phy->identify = mpt2sas_phy->identify;
+ mpt2sas_phy->attached_handle =
+ le16_to_cpu(expander_pg1.AttachedDevHandle);
+ if (mpt2sas_phy->attached_handle)
+ _transport_set_identify(ioc, mpt2sas_phy->attached_handle,
+ &mpt2sas_phy->remote_identify);
+ phy->identify.phy_identifier = mpt2sas_phy->phy_id;
+ phy->negotiated_linkrate = _transport_convert_phy_link_rate(
+ expander_pg1.NegotiatedLinkRate &
+ MPI2_SAS_NEG_LINK_RATE_MASK_PHYSICAL);
+ phy->minimum_linkrate_hw = _transport_convert_phy_link_rate(
+ expander_pg1.HwLinkRate & MPI2_SAS_HWRATE_MIN_RATE_MASK);
+ phy->maximum_linkrate_hw = _transport_convert_phy_link_rate(
+ expander_pg1.HwLinkRate >> 4);
+ phy->minimum_linkrate = _transport_convert_phy_link_rate(
+ expander_pg1.ProgrammedLinkRate & MPI2_SAS_PRATE_MIN_RATE_MASK);
+ phy->maximum_linkrate = _transport_convert_phy_link_rate(
+ expander_pg1.ProgrammedLinkRate >> 4);
+
+ if ((sas_phy_add(phy))) {
+ printk(MPT2SAS_ERR_FMT "failure at %s:%d/%s()!\n",
+ ioc->name, __FILE__, __LINE__, __func__);
+ sas_phy_free(phy);
+ return -1;
+ }
+ if ((ioc->logging_level & MPT_DEBUG_TRANSPORT))
+ dev_printk(KERN_INFO, &phy->dev,
+ "add: handle(0x%04x), sas_addr(0x%016llx)\n"
+ "\tattached_handle(0x%04x), sas_addr(0x%016llx)\n",
+ mpt2sas_phy->handle, (unsigned long long)
+ mpt2sas_phy->identify.sas_address,
+ mpt2sas_phy->attached_handle,
+ (unsigned long long)
+ mpt2sas_phy->remote_identify.sas_address);
+ mpt2sas_phy->phy = phy;
+ return 0;
+}
+
+/**
+ * mpt2sas_transport_update_links - refreshing phy link changes
+ * @ioc: per adapter object
+ * @sas_address: sas address of parent expander or sas host
+ * @handle: attached device handle
+ * @phy_numberv: phy number
+ * @link_rate: new link rate
+ *
+ * Returns nothing.
+ */
+void
+mpt2sas_transport_update_links(struct MPT2SAS_ADAPTER *ioc,
+ u64 sas_address, u16 handle, u8 phy_number, u8 link_rate)
+{
+ unsigned long flags;
+ struct _sas_node *sas_node;
+ struct _sas_phy *mpt2sas_phy;
+
+ if (ioc->shost_recovery || ioc->pci_error_recovery)
+ return;
+
+ spin_lock_irqsave(&ioc->sas_node_lock, flags);
+ sas_node = _transport_sas_node_find_by_sas_address(ioc, sas_address);
+ if (!sas_node) {
+ spin_unlock_irqrestore(&ioc->sas_node_lock, flags);
+ return;
+ }
+
+ mpt2sas_phy = &sas_node->phy[phy_number];
+ mpt2sas_phy->attached_handle = handle;
+ spin_unlock_irqrestore(&ioc->sas_node_lock, flags);
+ if (handle && (link_rate >= MPI2_SAS_NEG_LINK_RATE_1_5)) {
+ _transport_set_identify(ioc, handle,
+ &mpt2sas_phy->remote_identify);
+ _transport_add_phy_to_an_existing_port(ioc, sas_node,
+ mpt2sas_phy, mpt2sas_phy->remote_identify.sas_address);
+ } else
+ memset(&mpt2sas_phy->remote_identify, 0 , sizeof(struct
+ sas_identify));
+
+ if (mpt2sas_phy->phy)
+ mpt2sas_phy->phy->negotiated_linkrate =
+ _transport_convert_phy_link_rate(link_rate);
+
+ if ((ioc->logging_level & MPT_DEBUG_TRANSPORT))
+ dev_printk(KERN_INFO, &mpt2sas_phy->phy->dev,
+ "refresh: parent sas_addr(0x%016llx),\n"
+ "\tlink_rate(0x%02x), phy(%d)\n"
+ "\tattached_handle(0x%04x), sas_addr(0x%016llx)\n",
+ (unsigned long long)sas_address,
+ link_rate, phy_number, handle, (unsigned long long)
+ mpt2sas_phy->remote_identify.sas_address);
+}
+
+static inline void *
+phy_to_ioc(struct sas_phy *phy)
+{
+ struct Scsi_Host *shost = dev_to_shost(phy->dev.parent);
+ return shost_priv(shost);
+}
+
+static inline void *
+rphy_to_ioc(struct sas_rphy *rphy)
+{
+ struct Scsi_Host *shost = dev_to_shost(rphy->dev.parent->parent);
+ return shost_priv(shost);
+}
+
+
+/* report phy error log structure */
+struct phy_error_log_request{
+ u8 smp_frame_type; /* 0x40 */
+ u8 function; /* 0x11 */
+ u8 allocated_response_length;
+ u8 request_length; /* 02 */
+ u8 reserved_1[5];
+ u8 phy_identifier;
+ u8 reserved_2[2];
+};
+
+/* report phy error log reply structure */
+struct phy_error_log_reply{
+ u8 smp_frame_type; /* 0x41 */
+ u8 function; /* 0x11 */
+ u8 function_result;
+ u8 response_length;
+ __be16 expander_change_count;
+ u8 reserved_1[3];
+ u8 phy_identifier;
+ u8 reserved_2[2];
+ __be32 invalid_dword;
+ __be32 running_disparity_error;
+ __be32 loss_of_dword_sync;
+ __be32 phy_reset_problem;
+};
+
+/**
+ * _transport_get_expander_phy_error_log - return expander counters
+ * @ioc: per adapter object
+ * @phy: The sas phy object
+ *
+ * Returns 0 for success, non-zero for failure.
+ *
+ */
+static int
+_transport_get_expander_phy_error_log(struct MPT2SAS_ADAPTER *ioc,
+ struct sas_phy *phy)
+{
+ Mpi2SmpPassthroughRequest_t *mpi_request;
+ Mpi2SmpPassthroughReply_t *mpi_reply;
+ struct phy_error_log_request *phy_error_log_request;
+ struct phy_error_log_reply *phy_error_log_reply;
+ int rc;
+ u16 smid;
+ u32 ioc_state;
+ unsigned long timeleft;
+ void *psge;
+ u32 sgl_flags;
+ u8 issue_reset = 0;
+ void *data_out = NULL;
+ dma_addr_t data_out_dma;
+ u32 sz;
+ u16 wait_state_count;
+
+ if (ioc->shost_recovery || ioc->pci_error_recovery) {
+ printk(MPT2SAS_INFO_FMT "%s: host reset in progress!\n",
+ __func__, ioc->name);
+ return -EFAULT;
+ }
+
+ mutex_lock(&ioc->transport_cmds.mutex);
+
+ if (ioc->transport_cmds.status != MPT2_CMD_NOT_USED) {
+ printk(MPT2SAS_ERR_FMT "%s: transport_cmds in use\n",
+ ioc->name, __func__);
+ rc = -EAGAIN;
+ goto out;
+ }
+ ioc->transport_cmds.status = MPT2_CMD_PENDING;
+
+ wait_state_count = 0;
+ ioc_state = mpt2sas_base_get_iocstate(ioc, 1);
+ while (ioc_state != MPI2_IOC_STATE_OPERATIONAL) {
+ if (wait_state_count++ == 10) {
+ printk(MPT2SAS_ERR_FMT
+ "%s: failed due to ioc not operational\n",
+ ioc->name, __func__);
+ rc = -EFAULT;
+ goto out;
+ }
+ ssleep(1);
+ ioc_state = mpt2sas_base_get_iocstate(ioc, 1);
+ printk(MPT2SAS_INFO_FMT "%s: waiting for "
+ "operational state(count=%d)\n", ioc->name,
+ __func__, wait_state_count);
+ }
+ if (wait_state_count)
+ printk(MPT2SAS_INFO_FMT "%s: ioc is operational\n",
+ ioc->name, __func__);
+
+ smid = mpt2sas_base_get_smid(ioc, ioc->transport_cb_idx);
+ if (!smid) {
+ printk(MPT2SAS_ERR_FMT "%s: failed obtaining a smid\n",
+ ioc->name, __func__);
+ rc = -EAGAIN;
+ goto out;
+ }
+
+ mpi_request = mpt2sas_base_get_msg_frame(ioc, smid);
+ ioc->transport_cmds.smid = smid;
+
+ sz = sizeof(struct phy_error_log_request) +
+ sizeof(struct phy_error_log_reply);
+ data_out = pci_alloc_consistent(ioc->pdev, sz, &data_out_dma);
+ if (!data_out) {
+ printk(KERN_ERR "failure at %s:%d/%s()!\n", __FILE__,
+ __LINE__, __func__);
+ rc = -ENOMEM;
+ mpt2sas_base_free_smid(ioc, smid);
+ goto out;
+ }
+
+ rc = -EINVAL;
+ memset(data_out, 0, sz);
+ phy_error_log_request = data_out;
+ phy_error_log_request->smp_frame_type = 0x40;
+ phy_error_log_request->function = 0x11;
+ phy_error_log_request->request_length = 2;
+ phy_error_log_request->allocated_response_length = 0;
+ phy_error_log_request->phy_identifier = phy->number;
+
+ memset(mpi_request, 0, sizeof(Mpi2SmpPassthroughRequest_t));
+ mpi_request->Function = MPI2_FUNCTION_SMP_PASSTHROUGH;
+ mpi_request->PhysicalPort = 0xFF;
+ mpi_request->VF_ID = 0; /* TODO */
+ mpi_request->VP_ID = 0;
+ mpi_request->SASAddress = cpu_to_le64(phy->identify.sas_address);
+ mpi_request->RequestDataLength =
+ cpu_to_le16(sizeof(struct phy_error_log_request));
+ psge = &mpi_request->SGL;
+
+ /* WRITE sgel first */
+ sgl_flags = (MPI2_SGE_FLAGS_SIMPLE_ELEMENT |
+ MPI2_SGE_FLAGS_END_OF_BUFFER | MPI2_SGE_FLAGS_HOST_TO_IOC);
+ sgl_flags = sgl_flags << MPI2_SGE_FLAGS_SHIFT;
+ ioc->base_add_sg_single(psge, sgl_flags |
+ sizeof(struct phy_error_log_request), data_out_dma);
+
+ /* incr sgel */
+ psge += ioc->sge_size;
+
+ /* READ sgel last */
+ sgl_flags = (MPI2_SGE_FLAGS_SIMPLE_ELEMENT |
+ MPI2_SGE_FLAGS_LAST_ELEMENT | MPI2_SGE_FLAGS_END_OF_BUFFER |
+ MPI2_SGE_FLAGS_END_OF_LIST);
+ sgl_flags = sgl_flags << MPI2_SGE_FLAGS_SHIFT;
+ ioc->base_add_sg_single(psge, sgl_flags |
+ sizeof(struct phy_error_log_reply), data_out_dma +
+ sizeof(struct phy_error_log_request));
+
+ dtransportprintk(ioc, printk(MPT2SAS_INFO_FMT "phy_error_log - "
+ "send to sas_addr(0x%016llx), phy(%d)\n", ioc->name,
+ (unsigned long long)phy->identify.sas_address, phy->number));
+ init_completion(&ioc->transport_cmds.done);
+ mpt2sas_base_put_smid_default(ioc, smid);
+ timeleft = wait_for_completion_timeout(&ioc->transport_cmds.done,
+ 10*HZ);
+
+ if (!(ioc->transport_cmds.status & MPT2_CMD_COMPLETE)) {
+ printk(MPT2SAS_ERR_FMT "%s: timeout\n",
+ ioc->name, __func__);
+ _debug_dump_mf(mpi_request,
+ sizeof(Mpi2SmpPassthroughRequest_t)/4);
+ if (!(ioc->transport_cmds.status & MPT2_CMD_RESET))
+ issue_reset = 1;
+ goto issue_host_reset;
+ }
+
+ dtransportprintk(ioc, printk(MPT2SAS_INFO_FMT "phy_error_log - "
+ "complete\n", ioc->name));
+
+ if (ioc->transport_cmds.status & MPT2_CMD_REPLY_VALID) {
+
+ mpi_reply = ioc->transport_cmds.reply;
+
+ dtransportprintk(ioc, printk(MPT2SAS_INFO_FMT
+ "phy_error_log - reply data transfer size(%d)\n",
+ ioc->name, le16_to_cpu(mpi_reply->ResponseDataLength)));
+
+ if (le16_to_cpu(mpi_reply->ResponseDataLength) !=
+ sizeof(struct phy_error_log_reply))
+ goto out;
+
+ phy_error_log_reply = data_out +
+ sizeof(struct phy_error_log_request);
+
+ dtransportprintk(ioc, printk(MPT2SAS_INFO_FMT
+ "phy_error_log - function_result(%d)\n",
+ ioc->name, phy_error_log_reply->function_result));
+
+ phy->invalid_dword_count =
+ be32_to_cpu(phy_error_log_reply->invalid_dword);
+ phy->running_disparity_error_count =
+ be32_to_cpu(phy_error_log_reply->running_disparity_error);
+ phy->loss_of_dword_sync_count =
+ be32_to_cpu(phy_error_log_reply->loss_of_dword_sync);
+ phy->phy_reset_problem_count =
+ be32_to_cpu(phy_error_log_reply->phy_reset_problem);
+ rc = 0;
+ } else
+ dtransportprintk(ioc, printk(MPT2SAS_INFO_FMT
+ "phy_error_log - no reply\n", ioc->name));
+
+ issue_host_reset:
+ if (issue_reset)
+ mpt2sas_base_hard_reset_handler(ioc, CAN_SLEEP,
+ FORCE_BIG_HAMMER);
+ out:
+ ioc->transport_cmds.status = MPT2_CMD_NOT_USED;
+ if (data_out)
+ pci_free_consistent(ioc->pdev, sz, data_out, data_out_dma);
+
+ mutex_unlock(&ioc->transport_cmds.mutex);
+ return rc;
+}
+
+/**
+ * _transport_get_linkerrors - return phy counters for both hba and expanders
+ * @phy: The sas phy object
+ *
+ * Returns 0 for success, non-zero for failure.
+ *
+ */
+static int
+_transport_get_linkerrors(struct sas_phy *phy)
+{
+ struct MPT2SAS_ADAPTER *ioc = phy_to_ioc(phy);
+ unsigned long flags;
+ Mpi2ConfigReply_t mpi_reply;
+ Mpi2SasPhyPage1_t phy_pg1;
+
+ spin_lock_irqsave(&ioc->sas_node_lock, flags);
+ if (_transport_sas_node_find_by_sas_address(ioc,
+ phy->identify.sas_address) == NULL) {
+ spin_unlock_irqrestore(&ioc->sas_node_lock, flags);
+ return -EINVAL;
+ }
+ spin_unlock_irqrestore(&ioc->sas_node_lock, flags);
+
+ if (phy->identify.sas_address != ioc->sas_hba.sas_address)
+ return _transport_get_expander_phy_error_log(ioc, phy);
+
+ /* get hba phy error logs */
+ if ((mpt2sas_config_get_phy_pg1(ioc, &mpi_reply, &phy_pg1,
+ phy->number))) {
+ printk(MPT2SAS_ERR_FMT "failure at %s:%d/%s()!\n",
+ ioc->name, __FILE__, __LINE__, __func__);
+ return -ENXIO;
+ }
+
+ if (mpi_reply.IOCStatus || mpi_reply.IOCLogInfo)
+ printk(MPT2SAS_INFO_FMT "phy(%d), ioc_status"
+ "(0x%04x), loginfo(0x%08x)\n", ioc->name,
+ phy->number, le16_to_cpu(mpi_reply.IOCStatus),
+ le32_to_cpu(mpi_reply.IOCLogInfo));
+
+ phy->invalid_dword_count = le32_to_cpu(phy_pg1.InvalidDwordCount);
+ phy->running_disparity_error_count =
+ le32_to_cpu(phy_pg1.RunningDisparityErrorCount);
+ phy->loss_of_dword_sync_count =
+ le32_to_cpu(phy_pg1.LossDwordSynchCount);
+ phy->phy_reset_problem_count =
+ le32_to_cpu(phy_pg1.PhyResetProblemCount);
+ return 0;
+}
+
+/**
+ * _transport_get_enclosure_identifier -
+ * @phy: The sas phy object
+ *
+ * Obtain the enclosure logical id for an expander.
+ * Returns 0 for success, non-zero for failure.
+ */
+static int
+_transport_get_enclosure_identifier(struct sas_rphy *rphy, u64 *identifier)
+{
+ struct MPT2SAS_ADAPTER *ioc = rphy_to_ioc(rphy);
+ struct _sas_device *sas_device;
+ unsigned long flags;
+ int rc;
+
+ spin_lock_irqsave(&ioc->sas_device_lock, flags);
+ sas_device = mpt2sas_scsih_sas_device_find_by_sas_address(ioc,
+ rphy->identify.sas_address);
+ if (sas_device) {
+ *identifier = sas_device->enclosure_logical_id;
+ rc = 0;
+ } else {
+ *identifier = 0;
+ rc = -ENXIO;
+ }
+ spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
+ return rc;
+}
+
+/**
+ * _transport_get_bay_identifier -
+ * @phy: The sas phy object
+ *
+ * Returns the slot id for a device that resides inside an enclosure.
+ */
+static int
+_transport_get_bay_identifier(struct sas_rphy *rphy)
+{
+ struct MPT2SAS_ADAPTER *ioc = rphy_to_ioc(rphy);
+ struct _sas_device *sas_device;
+ unsigned long flags;
+ int rc;
+
+ spin_lock_irqsave(&ioc->sas_device_lock, flags);
+ sas_device = mpt2sas_scsih_sas_device_find_by_sas_address(ioc,
+ rphy->identify.sas_address);
+ if (sas_device)
+ rc = sas_device->slot;
+ else
+ rc = -ENXIO;
+ spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
+ return rc;
+}
+
+/* phy control request structure */
+struct phy_control_request{
+ u8 smp_frame_type; /* 0x40 */
+ u8 function; /* 0x91 */
+ u8 allocated_response_length;
+ u8 request_length; /* 0x09 */
+ u16 expander_change_count;
+ u8 reserved_1[3];
+ u8 phy_identifier;
+ u8 phy_operation;
+ u8 reserved_2[13];
+ u64 attached_device_name;
+ u8 programmed_min_physical_link_rate;
+ u8 programmed_max_physical_link_rate;
+ u8 reserved_3[6];
+};
+
+/* phy control reply structure */
+struct phy_control_reply{
+ u8 smp_frame_type; /* 0x41 */
+ u8 function; /* 0x11 */
+ u8 function_result;
+ u8 response_length;
+};
+
+#define SMP_PHY_CONTROL_LINK_RESET (0x01)
+#define SMP_PHY_CONTROL_HARD_RESET (0x02)
+#define SMP_PHY_CONTROL_DISABLE (0x03)
+
+/**
+ * _transport_expander_phy_control - expander phy control
+ * @ioc: per adapter object
+ * @phy: The sas phy object
+ *
+ * Returns 0 for success, non-zero for failure.
+ *
+ */
+static int
+_transport_expander_phy_control(struct MPT2SAS_ADAPTER *ioc,
+ struct sas_phy *phy, u8 phy_operation)
+{
+ Mpi2SmpPassthroughRequest_t *mpi_request;
+ Mpi2SmpPassthroughReply_t *mpi_reply;
+ struct phy_control_request *phy_control_request;
+ struct phy_control_reply *phy_control_reply;
+ int rc;
+ u16 smid;
+ u32 ioc_state;
+ unsigned long timeleft;
+ void *psge;
+ u32 sgl_flags;
+ u8 issue_reset = 0;
+ void *data_out = NULL;
+ dma_addr_t data_out_dma;
+ u32 sz;
+ u16 wait_state_count;
+
+ if (ioc->shost_recovery) {
+ printk(MPT2SAS_INFO_FMT "%s: host reset in progress!\n",
+ __func__, ioc->name);
+ return -EFAULT;
+ }
+
+ mutex_lock(&ioc->transport_cmds.mutex);
+
+ if (ioc->transport_cmds.status != MPT2_CMD_NOT_USED) {
+ printk(MPT2SAS_ERR_FMT "%s: transport_cmds in use\n",
+ ioc->name, __func__);
+ rc = -EAGAIN;
+ goto out;
+ }
+ ioc->transport_cmds.status = MPT2_CMD_PENDING;
+
+ wait_state_count = 0;
+ ioc_state = mpt2sas_base_get_iocstate(ioc, 1);
+ while (ioc_state != MPI2_IOC_STATE_OPERATIONAL) {
+ if (wait_state_count++ == 10) {
+ printk(MPT2SAS_ERR_FMT
+ "%s: failed due to ioc not operational\n",
+ ioc->name, __func__);
+ rc = -EFAULT;
+ goto out;
+ }
+ ssleep(1);
+ ioc_state = mpt2sas_base_get_iocstate(ioc, 1);
+ printk(MPT2SAS_INFO_FMT "%s: waiting for "
+ "operational state(count=%d)\n", ioc->name,
+ __func__, wait_state_count);
+ }
+ if (wait_state_count)
+ printk(MPT2SAS_INFO_FMT "%s: ioc is operational\n",
+ ioc->name, __func__);
+
+ smid = mpt2sas_base_get_smid(ioc, ioc->transport_cb_idx);
+ if (!smid) {
+ printk(MPT2SAS_ERR_FMT "%s: failed obtaining a smid\n",
+ ioc->name, __func__);
+ rc = -EAGAIN;
+ goto out;
+ }
+
+ mpi_request = mpt2sas_base_get_msg_frame(ioc, smid);
+ ioc->transport_cmds.smid = smid;
+
+ sz = sizeof(struct phy_control_request) +
+ sizeof(struct phy_control_reply);
+ data_out = pci_alloc_consistent(ioc->pdev, sz, &data_out_dma);
+ if (!data_out) {
+ printk(KERN_ERR "failure at %s:%d/%s()!\n", __FILE__,
+ __LINE__, __func__);
+ rc = -ENOMEM;
+ mpt2sas_base_free_smid(ioc, smid);
+ goto out;
+ }
+
+ rc = -EINVAL;
+ memset(data_out, 0, sz);
+ phy_control_request = data_out;
+ phy_control_request->smp_frame_type = 0x40;
+ phy_control_request->function = 0x91;
+ phy_control_request->request_length = 9;
+ phy_control_request->allocated_response_length = 0;
+ phy_control_request->phy_identifier = phy->number;
+ phy_control_request->phy_operation = phy_operation;
+ phy_control_request->programmed_min_physical_link_rate =
+ phy->minimum_linkrate << 4;
+ phy_control_request->programmed_max_physical_link_rate =
+ phy->maximum_linkrate << 4;
+
+ memset(mpi_request, 0, sizeof(Mpi2SmpPassthroughRequest_t));
+ mpi_request->Function = MPI2_FUNCTION_SMP_PASSTHROUGH;
+ mpi_request->PhysicalPort = 0xFF;
+ mpi_request->VF_ID = 0; /* TODO */
+ mpi_request->VP_ID = 0;
+ mpi_request->SASAddress = cpu_to_le64(phy->identify.sas_address);
+ mpi_request->RequestDataLength =
+ cpu_to_le16(sizeof(struct phy_error_log_request));
+ psge = &mpi_request->SGL;
+
+ /* WRITE sgel first */
+ sgl_flags = (MPI2_SGE_FLAGS_SIMPLE_ELEMENT |
+ MPI2_SGE_FLAGS_END_OF_BUFFER | MPI2_SGE_FLAGS_HOST_TO_IOC);
+ sgl_flags = sgl_flags << MPI2_SGE_FLAGS_SHIFT;
+ ioc->base_add_sg_single(psge, sgl_flags |
+ sizeof(struct phy_control_request), data_out_dma);
+
+ /* incr sgel */
+ psge += ioc->sge_size;
+
+ /* READ sgel last */
+ sgl_flags = (MPI2_SGE_FLAGS_SIMPLE_ELEMENT |
+ MPI2_SGE_FLAGS_LAST_ELEMENT | MPI2_SGE_FLAGS_END_OF_BUFFER |
+ MPI2_SGE_FLAGS_END_OF_LIST);
+ sgl_flags = sgl_flags << MPI2_SGE_FLAGS_SHIFT;
+ ioc->base_add_sg_single(psge, sgl_flags |
+ sizeof(struct phy_control_reply), data_out_dma +
+ sizeof(struct phy_control_request));
+
+ dtransportprintk(ioc, printk(MPT2SAS_INFO_FMT "phy_control - "
+ "send to sas_addr(0x%016llx), phy(%d), opcode(%d)\n", ioc->name,
+ (unsigned long long)phy->identify.sas_address, phy->number,
+ phy_operation));
+
+ init_completion(&ioc->transport_cmds.done);
+ mpt2sas_base_put_smid_default(ioc, smid);
+ timeleft = wait_for_completion_timeout(&ioc->transport_cmds.done,
+ 10*HZ);
+
+ if (!(ioc->transport_cmds.status & MPT2_CMD_COMPLETE)) {
+ printk(MPT2SAS_ERR_FMT "%s: timeout\n",
+ ioc->name, __func__);
+ _debug_dump_mf(mpi_request,
+ sizeof(Mpi2SmpPassthroughRequest_t)/4);
+ if (!(ioc->transport_cmds.status & MPT2_CMD_RESET))
+ issue_reset = 1;
+ goto issue_host_reset;
+ }
+
+ dtransportprintk(ioc, printk(MPT2SAS_INFO_FMT "phy_control - "
+ "complete\n", ioc->name));
+
+ if (ioc->transport_cmds.status & MPT2_CMD_REPLY_VALID) {
+
+ mpi_reply = ioc->transport_cmds.reply;
+
+ dtransportprintk(ioc, printk(MPT2SAS_INFO_FMT
+ "phy_control - reply data transfer size(%d)\n",
+ ioc->name, le16_to_cpu(mpi_reply->ResponseDataLength)));
+
+ if (le16_to_cpu(mpi_reply->ResponseDataLength) !=
+ sizeof(struct phy_control_reply))
+ goto out;
+
+ phy_control_reply = data_out +
+ sizeof(struct phy_control_request);
+
+ dtransportprintk(ioc, printk(MPT2SAS_INFO_FMT
+ "phy_control - function_result(%d)\n",
+ ioc->name, phy_control_reply->function_result));
+
+ rc = 0;
+ } else
+ dtransportprintk(ioc, printk(MPT2SAS_INFO_FMT
+ "phy_control - no reply\n", ioc->name));
+
+ issue_host_reset:
+ if (issue_reset)
+ mpt2sas_base_hard_reset_handler(ioc, CAN_SLEEP,
+ FORCE_BIG_HAMMER);
+ out:
+ ioc->transport_cmds.status = MPT2_CMD_NOT_USED;
+ if (data_out)
+ pci_free_consistent(ioc->pdev, sz, data_out, data_out_dma);
+
+ mutex_unlock(&ioc->transport_cmds.mutex);
+ return rc;
+}
+
+/**
+ * _transport_phy_reset -
+ * @phy: The sas phy object
+ * @hard_reset:
+ *
+ * Returns 0 for success, non-zero for failure.
+ */
+static int
+_transport_phy_reset(struct sas_phy *phy, int hard_reset)
+{
+ struct MPT2SAS_ADAPTER *ioc = phy_to_ioc(phy);
+ Mpi2SasIoUnitControlReply_t mpi_reply;
+ Mpi2SasIoUnitControlRequest_t mpi_request;
+ unsigned long flags;
+
+ spin_lock_irqsave(&ioc->sas_node_lock, flags);
+ if (_transport_sas_node_find_by_sas_address(ioc,
+ phy->identify.sas_address) == NULL) {
+ spin_unlock_irqrestore(&ioc->sas_node_lock, flags);
+ return -EINVAL;
+ }
+ spin_unlock_irqrestore(&ioc->sas_node_lock, flags);
+
+ /* handle expander phys */
+ if (phy->identify.sas_address != ioc->sas_hba.sas_address)
+ return _transport_expander_phy_control(ioc, phy,
+ (hard_reset == 1) ? SMP_PHY_CONTROL_HARD_RESET :
+ SMP_PHY_CONTROL_LINK_RESET);
+
+ /* handle hba phys */
+ memset(&mpi_request, 0, sizeof(Mpi2SasIoUnitControlReply_t));
+ mpi_request.Function = MPI2_FUNCTION_SAS_IO_UNIT_CONTROL;
+ mpi_request.Operation = hard_reset ?
+ MPI2_SAS_OP_PHY_HARD_RESET : MPI2_SAS_OP_PHY_LINK_RESET;
+ mpi_request.PhyNum = phy->number;
+
+ if ((mpt2sas_base_sas_iounit_control(ioc, &mpi_reply, &mpi_request))) {
+ printk(MPT2SAS_ERR_FMT "failure at %s:%d/%s()!\n",
+ ioc->name, __FILE__, __LINE__, __func__);
+ return -ENXIO;
+ }
+
+ if (mpi_reply.IOCStatus || mpi_reply.IOCLogInfo)
+ printk(MPT2SAS_INFO_FMT "phy(%d), ioc_status"
+ "(0x%04x), loginfo(0x%08x)\n", ioc->name,
+ phy->number, le16_to_cpu(mpi_reply.IOCStatus),
+ le32_to_cpu(mpi_reply.IOCLogInfo));
+
+ return 0;
+}
+
+/**
+ * _transport_phy_enable - enable/disable phys
+ * @phy: The sas phy object
+ * @enable: enable phy when true
+ *
+ * Only support sas_host direct attached phys.
+ * Returns 0 for success, non-zero for failure.
+ */
+static int
+_transport_phy_enable(struct sas_phy *phy, int enable)
+{
+ struct MPT2SAS_ADAPTER *ioc = phy_to_ioc(phy);
+ Mpi2SasIOUnitPage1_t *sas_iounit_pg1 = NULL;
+ Mpi2SasIOUnitPage0_t *sas_iounit_pg0 = NULL;
+ Mpi2ConfigReply_t mpi_reply;
+ u16 ioc_status;
+ u16 sz;
+ int rc = 0;
+ unsigned long flags;
+ int i, discovery_active;
+
+ spin_lock_irqsave(&ioc->sas_node_lock, flags);
+ if (_transport_sas_node_find_by_sas_address(ioc,
+ phy->identify.sas_address) == NULL) {
+ spin_unlock_irqrestore(&ioc->sas_node_lock, flags);
+ return -EINVAL;
+ }
+ spin_unlock_irqrestore(&ioc->sas_node_lock, flags);
+
+ /* handle expander phys */
+ if (phy->identify.sas_address != ioc->sas_hba.sas_address)
+ return _transport_expander_phy_control(ioc, phy,
+ (enable == 1) ? SMP_PHY_CONTROL_LINK_RESET :
+ SMP_PHY_CONTROL_DISABLE);
+
+ /* handle hba phys */
+
+ /* read sas_iounit page 0 */
+ sz = offsetof(Mpi2SasIOUnitPage0_t, PhyData) + (ioc->sas_hba.num_phys *
+ sizeof(Mpi2SasIOUnit0PhyData_t));
+ sas_iounit_pg0 = kzalloc(sz, GFP_KERNEL);
+ if (!sas_iounit_pg0) {
+ printk(MPT2SAS_ERR_FMT "failure at %s:%d/%s()!\n",
+ ioc->name, __FILE__, __LINE__, __func__);
+ rc = -ENOMEM;
+ goto out;
+ }
+ if ((mpt2sas_config_get_sas_iounit_pg0(ioc, &mpi_reply,
+ sas_iounit_pg0, sz))) {
+ printk(MPT2SAS_ERR_FMT "failure at %s:%d/%s()!\n",
+ ioc->name, __FILE__, __LINE__, __func__);
+ rc = -ENXIO;
+ goto out;
+ }
+ ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
+ MPI2_IOCSTATUS_MASK;
+ if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
+ printk(MPT2SAS_ERR_FMT "failure at %s:%d/%s()!\n",
+ ioc->name, __FILE__, __LINE__, __func__);
+ rc = -EIO;
+ goto out;
+ }
+
+ /* unable to enable/disable phys when when discovery is active */
+ for (i = 0, discovery_active = 0; i < ioc->sas_hba.num_phys ; i++) {
+ if (sas_iounit_pg0->PhyData[i].PortFlags &
+ MPI2_SASIOUNIT0_PORTFLAGS_DISCOVERY_IN_PROGRESS) {
+ printk(MPT2SAS_ERR_FMT "discovery is active on "
+ "port = %d, phy = %d: unable to enable/disable "
+ "phys, try again later!\n", ioc->name,
+ sas_iounit_pg0->PhyData[i].Port, i);
+ discovery_active = 1;
+ }
+ }
+
+ if (discovery_active) {
+ rc = -EAGAIN;
+ goto out;
+ }
+
+ /* read sas_iounit page 1 */
+ sz = offsetof(Mpi2SasIOUnitPage1_t, PhyData) + (ioc->sas_hba.num_phys *
+ sizeof(Mpi2SasIOUnit1PhyData_t));
+ sas_iounit_pg1 = kzalloc(sz, GFP_KERNEL);
+ if (!sas_iounit_pg1) {
+ printk(MPT2SAS_ERR_FMT "failure at %s:%d/%s()!\n",
+ ioc->name, __FILE__, __LINE__, __func__);
+ rc = -ENOMEM;
+ goto out;
+ }
+ if ((mpt2sas_config_get_sas_iounit_pg1(ioc, &mpi_reply,
+ sas_iounit_pg1, sz))) {
+ printk(MPT2SAS_ERR_FMT "failure at %s:%d/%s()!\n",
+ ioc->name, __FILE__, __LINE__, __func__);
+ rc = -ENXIO;
+ goto out;
+ }
+ ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
+ MPI2_IOCSTATUS_MASK;
+ if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
+ printk(MPT2SAS_ERR_FMT "failure at %s:%d/%s()!\n",
+ ioc->name, __FILE__, __LINE__, __func__);
+ rc = -EIO;
+ goto out;
+ }
+ /* copy Port/PortFlags/PhyFlags from page 0 */
+ for (i = 0; i < ioc->sas_hba.num_phys ; i++) {
+ sas_iounit_pg1->PhyData[i].Port =
+ sas_iounit_pg0->PhyData[i].Port;
+ sas_iounit_pg1->PhyData[i].PortFlags =
+ (sas_iounit_pg0->PhyData[i].PortFlags &
+ MPI2_SASIOUNIT0_PORTFLAGS_AUTO_PORT_CONFIG);
+ sas_iounit_pg1->PhyData[i].PhyFlags =
+ (sas_iounit_pg0->PhyData[i].PhyFlags &
+ (MPI2_SASIOUNIT0_PHYFLAGS_ZONING_ENABLED +
+ MPI2_SASIOUNIT0_PHYFLAGS_PHY_DISABLED));
+ }
+ if (enable)
+ sas_iounit_pg1->PhyData[phy->number].PhyFlags
+ &= ~MPI2_SASIOUNIT1_PHYFLAGS_PHY_DISABLE;
+ else
+ sas_iounit_pg1->PhyData[phy->number].PhyFlags
+ |= MPI2_SASIOUNIT1_PHYFLAGS_PHY_DISABLE;
+
+ mpt2sas_config_set_sas_iounit_pg1(ioc, &mpi_reply, sas_iounit_pg1, sz);
+
+ /* link reset */
+ if (enable)
+ _transport_phy_reset(phy, 0);
+
+ out:
+ kfree(sas_iounit_pg1);
+ kfree(sas_iounit_pg0);
+ return rc;
+}
+
+/**
+ * _transport_phy_speed - set phy min/max link rates
+ * @phy: The sas phy object
+ * @rates: rates defined in sas_phy_linkrates
+ *
+ * Only support sas_host direct attached phys.
+ * Returns 0 for success, non-zero for failure.
+ */
+static int
+_transport_phy_speed(struct sas_phy *phy, struct sas_phy_linkrates *rates)
+{
+ struct MPT2SAS_ADAPTER *ioc = phy_to_ioc(phy);
+ Mpi2SasIOUnitPage1_t *sas_iounit_pg1 = NULL;
+ Mpi2SasPhyPage0_t phy_pg0;
+ Mpi2ConfigReply_t mpi_reply;
+ u16 ioc_status;
+ u16 sz;
+ int i;
+ int rc = 0;
+ unsigned long flags;
+
+ spin_lock_irqsave(&ioc->sas_node_lock, flags);
+ if (_transport_sas_node_find_by_sas_address(ioc,
+ phy->identify.sas_address) == NULL) {
+ spin_unlock_irqrestore(&ioc->sas_node_lock, flags);
+ return -EINVAL;
+ }
+ spin_unlock_irqrestore(&ioc->sas_node_lock, flags);
+
+ if (!rates->minimum_linkrate)
+ rates->minimum_linkrate = phy->minimum_linkrate;
+ else if (rates->minimum_linkrate < phy->minimum_linkrate_hw)
+ rates->minimum_linkrate = phy->minimum_linkrate_hw;
+
+ if (!rates->maximum_linkrate)
+ rates->maximum_linkrate = phy->maximum_linkrate;
+ else if (rates->maximum_linkrate > phy->maximum_linkrate_hw)
+ rates->maximum_linkrate = phy->maximum_linkrate_hw;
+
+ /* handle expander phys */
+ if (phy->identify.sas_address != ioc->sas_hba.sas_address) {
+ phy->minimum_linkrate = rates->minimum_linkrate;
+ phy->maximum_linkrate = rates->maximum_linkrate;
+ return _transport_expander_phy_control(ioc, phy,
+ SMP_PHY_CONTROL_LINK_RESET);
+ }
+
+ /* handle hba phys */
+
+ /* sas_iounit page 1 */
+ sz = offsetof(Mpi2SasIOUnitPage1_t, PhyData) + (ioc->sas_hba.num_phys *
+ sizeof(Mpi2SasIOUnit1PhyData_t));
+ sas_iounit_pg1 = kzalloc(sz, GFP_KERNEL);
+ if (!sas_iounit_pg1) {
+ printk(MPT2SAS_ERR_FMT "failure at %s:%d/%s()!\n",
+ ioc->name, __FILE__, __LINE__, __func__);
+ rc = -ENOMEM;
+ goto out;
+ }
+ if ((mpt2sas_config_get_sas_iounit_pg1(ioc, &mpi_reply,
+ sas_iounit_pg1, sz))) {
+ printk(MPT2SAS_ERR_FMT "failure at %s:%d/%s()!\n",
+ ioc->name, __FILE__, __LINE__, __func__);
+ rc = -ENXIO;
+ goto out;
+ }
+ ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
+ MPI2_IOCSTATUS_MASK;
+ if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
+ printk(MPT2SAS_ERR_FMT "failure at %s:%d/%s()!\n",
+ ioc->name, __FILE__, __LINE__, __func__);
+ rc = -EIO;
+ goto out;
+ }
+
+ for (i = 0; i < ioc->sas_hba.num_phys; i++) {
+ if (phy->number != i) {
+ sas_iounit_pg1->PhyData[i].MaxMinLinkRate =
+ (ioc->sas_hba.phy[i].phy->minimum_linkrate +
+ (ioc->sas_hba.phy[i].phy->maximum_linkrate << 4));
+ } else {
+ sas_iounit_pg1->PhyData[i].MaxMinLinkRate =
+ (rates->minimum_linkrate +
+ (rates->maximum_linkrate << 4));
+ }
+ }
+
+ if (mpt2sas_config_set_sas_iounit_pg1(ioc, &mpi_reply, sas_iounit_pg1,
+ sz)) {
+ printk(MPT2SAS_ERR_FMT "failure at %s:%d/%s()!\n",
+ ioc->name, __FILE__, __LINE__, __func__);
+ rc = -ENXIO;
+ goto out;
+ }
+
+ /* link reset */
+ _transport_phy_reset(phy, 0);
+
+ /* read phy page 0, then update the rates in the sas transport phy */
+ if (!mpt2sas_config_get_phy_pg0(ioc, &mpi_reply, &phy_pg0,
+ phy->number)) {
+ phy->minimum_linkrate = _transport_convert_phy_link_rate(
+ phy_pg0.ProgrammedLinkRate & MPI2_SAS_PRATE_MIN_RATE_MASK);
+ phy->maximum_linkrate = _transport_convert_phy_link_rate(
+ phy_pg0.ProgrammedLinkRate >> 4);
+ phy->negotiated_linkrate = _transport_convert_phy_link_rate(
+ phy_pg0.NegotiatedLinkRate &
+ MPI2_SAS_NEG_LINK_RATE_MASK_PHYSICAL);
+ }
+
+ out:
+ kfree(sas_iounit_pg1);
+ return rc;
+}
+
+
+/**
+ * _transport_smp_handler - transport portal for smp passthru
+ * @shost: shost object
+ * @rphy: sas transport rphy object
+ * @req:
+ *
+ * This used primarily for smp_utils.
+ * Example:
+ * smp_rep_general /sys/class/bsg/expander-5:0
+ */
+static int
+_transport_smp_handler(struct Scsi_Host *shost, struct sas_rphy *rphy,
+ struct request *req)
+{
+ struct MPT2SAS_ADAPTER *ioc = shost_priv(shost);
+ Mpi2SmpPassthroughRequest_t *mpi_request;
+ Mpi2SmpPassthroughReply_t *mpi_reply;
+ int rc;
+ u16 smid;
+ u32 ioc_state;
+ unsigned long timeleft;
+ void *psge;
+ u32 sgl_flags;
+ u8 issue_reset = 0;
+ dma_addr_t dma_addr_in = 0;
+ dma_addr_t dma_addr_out = 0;
+ dma_addr_t pci_dma_in = 0;
+ dma_addr_t pci_dma_out = 0;
+ void *pci_addr_in = NULL;
+ void *pci_addr_out = NULL;
+ u16 wait_state_count;
+ struct request *rsp = req->next_rq;
+ struct bio_vec bvec;
+ struct bvec_iter iter;
+
+ if (!rsp) {
+ printk(MPT2SAS_ERR_FMT "%s: the smp response space is "
+ "missing\n", ioc->name, __func__);
+ return -EINVAL;
+ }
+ if (ioc->shost_recovery || ioc->pci_error_recovery) {
+ printk(MPT2SAS_INFO_FMT "%s: host reset in progress!\n",
+ __func__, ioc->name);
+ return -EFAULT;
+ }
+
+ rc = mutex_lock_interruptible(&ioc->transport_cmds.mutex);
+ if (rc)
+ return rc;
+
+ if (ioc->transport_cmds.status != MPT2_CMD_NOT_USED) {
+ printk(MPT2SAS_ERR_FMT "%s: transport_cmds in use\n", ioc->name,
+ __func__);
+ rc = -EAGAIN;
+ goto out;
+ }
+ ioc->transport_cmds.status = MPT2_CMD_PENDING;
+
+ /* Check if the request is split across multiple segments */
+ if (bio_multiple_segments(req->bio)) {
+ u32 offset = 0;
+
+ /* Allocate memory and copy the request */
+ pci_addr_out = pci_alloc_consistent(ioc->pdev,
+ blk_rq_bytes(req), &pci_dma_out);
+ if (!pci_addr_out) {
+ printk(MPT2SAS_INFO_FMT "%s(): PCI Addr out = NULL\n",
+ ioc->name, __func__);
+ rc = -ENOMEM;
+ goto out;
+ }
+
+ bio_for_each_segment(bvec, req->bio, iter) {
+ memcpy(pci_addr_out + offset,
+ page_address(bvec.bv_page) + bvec.bv_offset,
+ bvec.bv_len);
+ offset += bvec.bv_len;
+ }
+ } else {
+ dma_addr_out = pci_map_single(ioc->pdev, bio_data(req->bio),
+ blk_rq_bytes(req), PCI_DMA_BIDIRECTIONAL);
+ if (!dma_addr_out) {
+ printk(MPT2SAS_INFO_FMT "%s(): DMA Addr out = NULL\n",
+ ioc->name, __func__);
+ rc = -ENOMEM;
+ goto free_pci;
+ }
+ }
+
+ /* Check if the response needs to be populated across
+ * multiple segments */
+ if (bio_multiple_segments(rsp->bio)) {
+ pci_addr_in = pci_alloc_consistent(ioc->pdev, blk_rq_bytes(rsp),
+ &pci_dma_in);
+ if (!pci_addr_in) {
+ printk(MPT2SAS_INFO_FMT "%s(): PCI Addr in = NULL\n",
+ ioc->name, __func__);
+ rc = -ENOMEM;
+ goto unmap;
+ }
+ } else {
+ dma_addr_in = pci_map_single(ioc->pdev, bio_data(rsp->bio),
+ blk_rq_bytes(rsp), PCI_DMA_BIDIRECTIONAL);
+ if (!dma_addr_in) {
+ printk(MPT2SAS_INFO_FMT "%s(): DMA Addr in = NULL\n",
+ ioc->name, __func__);
+ rc = -ENOMEM;
+ goto unmap;
+ }
+ }
+
+ wait_state_count = 0;
+ ioc_state = mpt2sas_base_get_iocstate(ioc, 1);
+ while (ioc_state != MPI2_IOC_STATE_OPERATIONAL) {
+ if (wait_state_count++ == 10) {
+ printk(MPT2SAS_ERR_FMT
+ "%s: failed due to ioc not operational\n",
+ ioc->name, __func__);
+ rc = -EFAULT;
+ goto unmap;
+ }
+ ssleep(1);
+ ioc_state = mpt2sas_base_get_iocstate(ioc, 1);
+ printk(MPT2SAS_INFO_FMT "%s: waiting for "
+ "operational state(count=%d)\n", ioc->name,
+ __func__, wait_state_count);
+ }
+ if (wait_state_count)
+ printk(MPT2SAS_INFO_FMT "%s: ioc is operational\n",
+ ioc->name, __func__);
+
+ smid = mpt2sas_base_get_smid(ioc, ioc->transport_cb_idx);
+ if (!smid) {
+ printk(MPT2SAS_ERR_FMT "%s: failed obtaining a smid\n",
+ ioc->name, __func__);
+ rc = -EAGAIN;
+ goto unmap;
+ }
+
+ rc = 0;
+ mpi_request = mpt2sas_base_get_msg_frame(ioc, smid);
+ ioc->transport_cmds.smid = smid;
+
+ memset(mpi_request, 0, sizeof(Mpi2SmpPassthroughRequest_t));
+ mpi_request->Function = MPI2_FUNCTION_SMP_PASSTHROUGH;
+ mpi_request->PhysicalPort = 0xFF;
+ mpi_request->VF_ID = 0; /* TODO */
+ mpi_request->VP_ID = 0;
+ mpi_request->SASAddress = (rphy) ?
+ cpu_to_le64(rphy->identify.sas_address) :
+ cpu_to_le64(ioc->sas_hba.sas_address);
+ mpi_request->RequestDataLength = cpu_to_le16(blk_rq_bytes(req) - 4);
+ psge = &mpi_request->SGL;
+
+ /* WRITE sgel first */
+ sgl_flags = (MPI2_SGE_FLAGS_SIMPLE_ELEMENT |
+ MPI2_SGE_FLAGS_END_OF_BUFFER | MPI2_SGE_FLAGS_HOST_TO_IOC);
+ sgl_flags = sgl_flags << MPI2_SGE_FLAGS_SHIFT;
+ if (bio_multiple_segments(req->bio)) {
+ ioc->base_add_sg_single(psge, sgl_flags |
+ (blk_rq_bytes(req) - 4), pci_dma_out);
+ } else {
+ ioc->base_add_sg_single(psge, sgl_flags |
+ (blk_rq_bytes(req) - 4), dma_addr_out);
+ }
+
+ /* incr sgel */
+ psge += ioc->sge_size;
+
+ /* READ sgel last */
+ sgl_flags = (MPI2_SGE_FLAGS_SIMPLE_ELEMENT |
+ MPI2_SGE_FLAGS_LAST_ELEMENT | MPI2_SGE_FLAGS_END_OF_BUFFER |
+ MPI2_SGE_FLAGS_END_OF_LIST);
+ sgl_flags = sgl_flags << MPI2_SGE_FLAGS_SHIFT;
+ if (bio_multiple_segments(rsp->bio)) {
+ ioc->base_add_sg_single(psge, sgl_flags |
+ (blk_rq_bytes(rsp) + 4), pci_dma_in);
+ } else {
+ ioc->base_add_sg_single(psge, sgl_flags |
+ (blk_rq_bytes(rsp) + 4), dma_addr_in);
+ }
+
+ dtransportprintk(ioc, printk(MPT2SAS_INFO_FMT "%s - "
+ "sending smp request\n", ioc->name, __func__));
+
+ init_completion(&ioc->transport_cmds.done);
+ mpt2sas_base_put_smid_default(ioc, smid);
+ timeleft = wait_for_completion_timeout(&ioc->transport_cmds.done,
+ 10*HZ);
+
+ if (!(ioc->transport_cmds.status & MPT2_CMD_COMPLETE)) {
+ printk(MPT2SAS_ERR_FMT "%s : timeout\n",
+ __func__, ioc->name);
+ _debug_dump_mf(mpi_request,
+ sizeof(Mpi2SmpPassthroughRequest_t)/4);
+ if (!(ioc->transport_cmds.status & MPT2_CMD_RESET))
+ issue_reset = 1;
+ goto issue_host_reset;
+ }
+
+ dtransportprintk(ioc, printk(MPT2SAS_INFO_FMT "%s - "
+ "complete\n", ioc->name, __func__));
+
+ if (ioc->transport_cmds.status & MPT2_CMD_REPLY_VALID) {
+
+ mpi_reply = ioc->transport_cmds.reply;
+
+ dtransportprintk(ioc, printk(MPT2SAS_INFO_FMT
+ "%s - reply data transfer size(%d)\n",
+ ioc->name, __func__,
+ le16_to_cpu(mpi_reply->ResponseDataLength)));
+
+ memcpy(req->sense, mpi_reply, sizeof(*mpi_reply));
+ req->sense_len = sizeof(*mpi_reply);
+ req->resid_len = 0;
+ rsp->resid_len -=
+ le16_to_cpu(mpi_reply->ResponseDataLength);
+ /* check if the resp needs to be copied from the allocated
+ * pci mem */
+ if (bio_multiple_segments(rsp->bio)) {
+ u32 offset = 0;
+ u32 bytes_to_copy =
+ le16_to_cpu(mpi_reply->ResponseDataLength);
+ bio_for_each_segment(bvec, rsp->bio, iter) {
+ if (bytes_to_copy <= bvec.bv_len) {
+ memcpy(page_address(bvec.bv_page) +
+ bvec.bv_offset, pci_addr_in +
+ offset, bytes_to_copy);
+ break;
+ } else {
+ memcpy(page_address(bvec.bv_page) +
+ bvec.bv_offset, pci_addr_in +
+ offset, bvec.bv_len);
+ bytes_to_copy -= bvec.bv_len;
+ }
+ offset += bvec.bv_len;
+ }
+ }
+ } else {
+ dtransportprintk(ioc, printk(MPT2SAS_INFO_FMT
+ "%s - no reply\n", ioc->name, __func__));
+ rc = -ENXIO;
+ }
+
+ issue_host_reset:
+ if (issue_reset) {
+ mpt2sas_base_hard_reset_handler(ioc, CAN_SLEEP,
+ FORCE_BIG_HAMMER);
+ rc = -ETIMEDOUT;
+ }
+
+ unmap:
+ if (dma_addr_out)
+ pci_unmap_single(ioc->pdev, dma_addr_out, blk_rq_bytes(req),
+ PCI_DMA_BIDIRECTIONAL);
+ if (dma_addr_in)
+ pci_unmap_single(ioc->pdev, dma_addr_in, blk_rq_bytes(rsp),
+ PCI_DMA_BIDIRECTIONAL);
+
+ free_pci:
+ if (pci_addr_out)
+ pci_free_consistent(ioc->pdev, blk_rq_bytes(req), pci_addr_out,
+ pci_dma_out);
+
+ if (pci_addr_in)
+ pci_free_consistent(ioc->pdev, blk_rq_bytes(rsp), pci_addr_in,
+ pci_dma_in);
+
+ out:
+ ioc->transport_cmds.status = MPT2_CMD_NOT_USED;
+ mutex_unlock(&ioc->transport_cmds.mutex);
+ return rc;
+}
+
+struct sas_function_template mpt2sas_transport_functions = {
+ .get_linkerrors = _transport_get_linkerrors,
+ .get_enclosure_identifier = _transport_get_enclosure_identifier,
+ .get_bay_identifier = _transport_get_bay_identifier,
+ .phy_reset = _transport_phy_reset,
+ .phy_enable = _transport_phy_enable,
+ .set_phy_speed = _transport_phy_speed,
+ .smp_handler = _transport_smp_handler,
+};
+
+struct scsi_transport_template *mpt2sas_transport_template;
diff --git a/drivers/scsi/mpt3sas/Kconfig b/drivers/scsi/mpt3sas/Kconfig
new file mode 100644
index 000000000..4d235dd74
--- /dev/null
+++ b/drivers/scsi/mpt3sas/Kconfig
@@ -0,0 +1,67 @@
+#
+# Kernel configuration file for the MPT3SAS
+#
+# This code is based on drivers/scsi/mpt3sas/Kconfig
+# Copyright (C) 2012-2014 LSI Corporation
+# (mailto:DL-MPTFusionLinux@lsi.com)
+
+# This program is free software; you can redistribute it and/or
+# modify it under the terms of the GNU General Public License
+# as published by the Free Software Foundation; either version 2
+# of the License, or (at your option) any later version.
+
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+
+# NO WARRANTY
+# THE PROGRAM IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OR
+# CONDITIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED INCLUDING, WITHOUT
+# LIMITATION, ANY WARRANTIES OR CONDITIONS OF TITLE, NON-INFRINGEMENT,
+# MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE. Each Recipient is
+# solely responsible for determining the appropriateness of using and
+# distributing the Program and assumes all risks associated with its
+# exercise of rights under this Agreement, including but not limited to
+# the risks and costs of program errors, damage to or loss of data,
+# programs or equipment, and unavailability or interruption of operations.
+
+# DISCLAIMER OF LIABILITY
+# NEITHER RECIPIENT NOR ANY CONTRIBUTORS SHALL HAVE ANY LIABILITY FOR ANY
+# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+# DAMAGES (INCLUDING WITHOUT LIMITATION LOST PROFITS), HOWEVER CAUSED AND
+# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR
+# TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
+# USE OR DISTRIBUTION OF THE PROGRAM OR THE EXERCISE OF ANY RIGHTS GRANTED
+# HEREUNDER, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGES
+
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301,
+# USA.
+
+config SCSI_MPT3SAS
+ tristate "LSI MPT Fusion SAS 3.0 Device Driver"
+ depends on PCI && SCSI
+ select SCSI_SAS_ATTRS
+ select RAID_ATTRS
+ ---help---
+ This driver supports PCI-Express SAS 12Gb/s Host Adapters.
+
+config SCSI_MPT3SAS_MAX_SGE
+ int "LSI MPT Fusion Max number of SG Entries (16 - 256)"
+ depends on PCI && SCSI && SCSI_MPT3SAS
+ default "128"
+ range 16 256
+ ---help---
+ This option allows you to specify the maximum number of scatter-
+ gather entries per I/O. The driver default is 128, which matches
+ MAX_PHYS_SEGMENTS in most kernels. However in SuSE kernels this
+ can be 256. However, it may decreased down to 16. Decreasing this
+ parameter will reduce memory requirements on a per controller instance.
+
+config SCSI_MPT3SAS_LOGGING
+ bool "LSI MPT Fusion logging facility"
+ depends on PCI && SCSI && SCSI_MPT3SAS
+ ---help---
+ This turns on a logging facility.
diff --git a/drivers/scsi/mpt3sas/Makefile b/drivers/scsi/mpt3sas/Makefile
new file mode 100644
index 000000000..efb0c4c2e
--- /dev/null
+++ b/drivers/scsi/mpt3sas/Makefile
@@ -0,0 +1,8 @@
+# mpt3sas makefile
+obj-$(CONFIG_SCSI_MPT3SAS) += mpt3sas.o
+mpt3sas-y += mpt3sas_base.o \
+ mpt3sas_config.o \
+ mpt3sas_scsih.o \
+ mpt3sas_transport.o \
+ mpt3sas_ctl.o \
+ mpt3sas_trigger_diag.o
diff --git a/drivers/scsi/mpt3sas/mpi/mpi2.h b/drivers/scsi/mpt3sas/mpi/mpi2.h
new file mode 100644
index 000000000..c34c11579
--- /dev/null
+++ b/drivers/scsi/mpt3sas/mpi/mpi2.h
@@ -0,0 +1,1172 @@
+/*
+ * Copyright (c) 2000-2014 LSI Corporation.
+ *
+ *
+ * Name: mpi2.h
+ * Title: MPI Message independent structures and definitions
+ * including System Interface Register Set and
+ * scatter/gather formats.
+ * Creation Date: June 21, 2006
+ *
+ * mpi2.h Version: 02.00.31
+ *
+ * NOTE: Names (typedefs, defines, etc.) beginning with an MPI25 or Mpi25
+ * prefix are for use only on MPI v2.5 products, and must not be used
+ * with MPI v2.0 products. Unless otherwise noted, names beginning with
+ * MPI2 or Mpi2 are for use with both MPI v2.0 and MPI v2.5 products.
+ *
+ * Version History
+ * ---------------
+ *
+ * Date Version Description
+ * -------- -------- ------------------------------------------------------
+ * 04-30-07 02.00.00 Corresponds to Fusion-MPT MPI Specification Rev A.
+ * 06-04-07 02.00.01 Bumped MPI2_HEADER_VERSION_UNIT.
+ * 06-26-07 02.00.02 Bumped MPI2_HEADER_VERSION_UNIT.
+ * 08-31-07 02.00.03 Bumped MPI2_HEADER_VERSION_UNIT.
+ * Moved ReplyPostHostIndex register to offset 0x6C of the
+ * MPI2_SYSTEM_INTERFACE_REGS and modified the define for
+ * MPI2_REPLY_POST_HOST_INDEX_OFFSET.
+ * Added union of request descriptors.
+ * Added union of reply descriptors.
+ * 10-31-07 02.00.04 Bumped MPI2_HEADER_VERSION_UNIT.
+ * Added define for MPI2_VERSION_02_00.
+ * Fixed the size of the FunctionDependent5 field in the
+ * MPI2_DEFAULT_REPLY structure.
+ * 12-18-07 02.00.05 Bumped MPI2_HEADER_VERSION_UNIT.
+ * Removed the MPI-defined Fault Codes and extended the
+ * product specific codes up to 0xEFFF.
+ * Added a sixth key value for the WriteSequence register
+ * and changed the flush value to 0x0.
+ * Added message function codes for Diagnostic Buffer Post
+ * and Diagnsotic Release.
+ * New IOCStatus define: MPI2_IOCSTATUS_DIAGNOSTIC_RELEASED
+ * Moved MPI2_VERSION_UNION from mpi2_ioc.h.
+ * 02-29-08 02.00.06 Bumped MPI2_HEADER_VERSION_UNIT.
+ * 03-03-08 02.00.07 Bumped MPI2_HEADER_VERSION_UNIT.
+ * 05-21-08 02.00.08 Bumped MPI2_HEADER_VERSION_UNIT.
+ * Added #defines for marking a reply descriptor as unused.
+ * 06-27-08 02.00.09 Bumped MPI2_HEADER_VERSION_UNIT.
+ * 10-02-08 02.00.10 Bumped MPI2_HEADER_VERSION_UNIT.
+ * Moved LUN field defines from mpi2_init.h.
+ * 01-19-09 02.00.11 Bumped MPI2_HEADER_VERSION_UNIT.
+ * 05-06-09 02.00.12 Bumped MPI2_HEADER_VERSION_UNIT.
+ * In all request and reply descriptors, replaced VF_ID
+ * field with MSIxIndex field.
+ * Removed DevHandle field from
+ * MPI2_SCSI_IO_SUCCESS_REPLY_DESCRIPTOR and made those
+ * bytes reserved.
+ * Added RAID Accelerator functionality.
+ * 07-30-09 02.00.13 Bumped MPI2_HEADER_VERSION_UNIT.
+ * 10-28-09 02.00.14 Bumped MPI2_HEADER_VERSION_UNIT.
+ * Added MSI-x index mask and shift for Reply Post Host
+ * Index register.
+ * Added function code for Host Based Discovery Action.
+ * 02-10-10 02.00.15 Bumped MPI2_HEADER_VERSION_UNIT.
+ * Added define for MPI2_FUNCTION_PWR_MGMT_CONTROL.
+ * Added defines for product-specific range of message
+ * function codes, 0xF0 to 0xFF.
+ * 05-12-10 02.00.16 Bumped MPI2_HEADER_VERSION_UNIT.
+ * Added alternative defines for the SGE Direction bit.
+ * 08-11-10 02.00.17 Bumped MPI2_HEADER_VERSION_UNIT.
+ * 11-10-10 02.00.18 Bumped MPI2_HEADER_VERSION_UNIT.
+ * Added MPI2_IEEE_SGE_FLAGS_SYSTEMPLBCPI_ADDR define.
+ * 02-23-11 02.00.19 Bumped MPI2_HEADER_VERSION_UNIT.
+ * Added MPI2_FUNCTION_SEND_HOST_MESSAGE.
+ * 03-09-11 02.00.20 Bumped MPI2_HEADER_VERSION_UNIT.
+ * 05-25-11 02.00.21 Bumped MPI2_HEADER_VERSION_UNIT.
+ * 08-24-11 02.00.22 Bumped MPI2_HEADER_VERSION_UNIT.
+ * 11-18-11 02.00.23 Bumped MPI2_HEADER_VERSION_UNIT.
+ * Incorporating additions for MPI v2.5.
+ * 02-06-12 02.00.24 Bumped MPI2_HEADER_VERSION_UNIT.
+ * 03-29-12 02.00.25 Bumped MPI2_HEADER_VERSION_UNIT.
+ * Added Hard Reset delay timings.
+ * 07-10-12 02.00.26 Bumped MPI2_HEADER_VERSION_UNIT.
+ * 07-26-12 02.00.27 Bumped MPI2_HEADER_VERSION_UNIT.
+ * 11-27-12 02.00.28 Bumped MPI2_HEADER_VERSION_UNIT.
+ * 12-20-12 02.00.29 Bumped MPI2_HEADER_VERSION_UNIT.
+ * Added MPI25_SUP_REPLY_POST_HOST_INDEX_OFFSET.
+ * 04-09-13 02.00.30 Bumped MPI2_HEADER_VERSION_UNIT.
+ * 04-17-13 02.00.31 Bumped MPI2_HEADER_VERSION_UNIT.
+ * --------------------------------------------------------------------------
+ */
+
+#ifndef MPI2_H
+#define MPI2_H
+
+/*****************************************************************************
+*
+* MPI Version Definitions
+*
+*****************************************************************************/
+
+#define MPI2_VERSION_MAJOR_MASK (0xFF00)
+#define MPI2_VERSION_MAJOR_SHIFT (8)
+#define MPI2_VERSION_MINOR_MASK (0x00FF)
+#define MPI2_VERSION_MINOR_SHIFT (0)
+
+/*major version for all MPI v2.x */
+#define MPI2_VERSION_MAJOR (0x02)
+
+/*minor version for MPI v2.0 compatible products */
+#define MPI2_VERSION_MINOR (0x00)
+#define MPI2_VERSION ((MPI2_VERSION_MAJOR << MPI2_VERSION_MAJOR_SHIFT) | \
+ MPI2_VERSION_MINOR)
+#define MPI2_VERSION_02_00 (0x0200)
+
+/*minor version for MPI v2.5 compatible products */
+#define MPI25_VERSION_MINOR (0x05)
+#define MPI25_VERSION ((MPI2_VERSION_MAJOR << MPI2_VERSION_MAJOR_SHIFT) | \
+ MPI25_VERSION_MINOR)
+#define MPI2_VERSION_02_05 (0x0205)
+
+/*Unit and Dev versioning for this MPI header set */
+#define MPI2_HEADER_VERSION_UNIT (0x1F)
+#define MPI2_HEADER_VERSION_DEV (0x00)
+#define MPI2_HEADER_VERSION_UNIT_MASK (0xFF00)
+#define MPI2_HEADER_VERSION_UNIT_SHIFT (8)
+#define MPI2_HEADER_VERSION_DEV_MASK (0x00FF)
+#define MPI2_HEADER_VERSION_DEV_SHIFT (0)
+#define MPI2_HEADER_VERSION ((MPI2_HEADER_VERSION_UNIT << 8) | \
+ MPI2_HEADER_VERSION_DEV)
+
+/*****************************************************************************
+*
+* IOC State Definitions
+*
+*****************************************************************************/
+
+#define MPI2_IOC_STATE_RESET (0x00000000)
+#define MPI2_IOC_STATE_READY (0x10000000)
+#define MPI2_IOC_STATE_OPERATIONAL (0x20000000)
+#define MPI2_IOC_STATE_FAULT (0x40000000)
+
+#define MPI2_IOC_STATE_MASK (0xF0000000)
+#define MPI2_IOC_STATE_SHIFT (28)
+
+/*Fault state range for prodcut specific codes */
+#define MPI2_FAULT_PRODUCT_SPECIFIC_MIN (0x0000)
+#define MPI2_FAULT_PRODUCT_SPECIFIC_MAX (0xEFFF)
+
+/*****************************************************************************
+*
+* System Interface Register Definitions
+*
+*****************************************************************************/
+
+typedef volatile struct _MPI2_SYSTEM_INTERFACE_REGS {
+ U32 Doorbell; /*0x00 */
+ U32 WriteSequence; /*0x04 */
+ U32 HostDiagnostic; /*0x08 */
+ U32 Reserved1; /*0x0C */
+ U32 DiagRWData; /*0x10 */
+ U32 DiagRWAddressLow; /*0x14 */
+ U32 DiagRWAddressHigh; /*0x18 */
+ U32 Reserved2[5]; /*0x1C */
+ U32 HostInterruptStatus; /*0x30 */
+ U32 HostInterruptMask; /*0x34 */
+ U32 DCRData; /*0x38 */
+ U32 DCRAddress; /*0x3C */
+ U32 Reserved3[2]; /*0x40 */
+ U32 ReplyFreeHostIndex; /*0x48 */
+ U32 Reserved4[8]; /*0x4C */
+ U32 ReplyPostHostIndex; /*0x6C */
+ U32 Reserved5; /*0x70 */
+ U32 HCBSize; /*0x74 */
+ U32 HCBAddressLow; /*0x78 */
+ U32 HCBAddressHigh; /*0x7C */
+ U32 Reserved6[16]; /*0x80 */
+ U32 RequestDescriptorPostLow; /*0xC0 */
+ U32 RequestDescriptorPostHigh; /*0xC4 */
+ U32 Reserved7[14]; /*0xC8 */
+} MPI2_SYSTEM_INTERFACE_REGS,
+ *PTR_MPI2_SYSTEM_INTERFACE_REGS,
+ Mpi2SystemInterfaceRegs_t,
+ *pMpi2SystemInterfaceRegs_t;
+
+/*
+ *Defines for working with the Doorbell register.
+ */
+#define MPI2_DOORBELL_OFFSET (0x00000000)
+
+/*IOC --> System values */
+#define MPI2_DOORBELL_USED (0x08000000)
+#define MPI2_DOORBELL_WHO_INIT_MASK (0x07000000)
+#define MPI2_DOORBELL_WHO_INIT_SHIFT (24)
+#define MPI2_DOORBELL_FAULT_CODE_MASK (0x0000FFFF)
+#define MPI2_DOORBELL_DATA_MASK (0x0000FFFF)
+
+/*System --> IOC values */
+#define MPI2_DOORBELL_FUNCTION_MASK (0xFF000000)
+#define MPI2_DOORBELL_FUNCTION_SHIFT (24)
+#define MPI2_DOORBELL_ADD_DWORDS_MASK (0x00FF0000)
+#define MPI2_DOORBELL_ADD_DWORDS_SHIFT (16)
+
+/*
+ *Defines for the WriteSequence register
+ */
+#define MPI2_WRITE_SEQUENCE_OFFSET (0x00000004)
+#define MPI2_WRSEQ_KEY_VALUE_MASK (0x0000000F)
+#define MPI2_WRSEQ_FLUSH_KEY_VALUE (0x0)
+#define MPI2_WRSEQ_1ST_KEY_VALUE (0xF)
+#define MPI2_WRSEQ_2ND_KEY_VALUE (0x4)
+#define MPI2_WRSEQ_3RD_KEY_VALUE (0xB)
+#define MPI2_WRSEQ_4TH_KEY_VALUE (0x2)
+#define MPI2_WRSEQ_5TH_KEY_VALUE (0x7)
+#define MPI2_WRSEQ_6TH_KEY_VALUE (0xD)
+
+/*
+ *Defines for the HostDiagnostic register
+ */
+#define MPI2_HOST_DIAGNOSTIC_OFFSET (0x00000008)
+
+#define MPI2_DIAG_BOOT_DEVICE_SELECT_MASK (0x00001800)
+#define MPI2_DIAG_BOOT_DEVICE_SELECT_DEFAULT (0x00000000)
+#define MPI2_DIAG_BOOT_DEVICE_SELECT_HCDW (0x00000800)
+
+#define MPI2_DIAG_CLEAR_FLASH_BAD_SIG (0x00000400)
+#define MPI2_DIAG_FORCE_HCB_ON_RESET (0x00000200)
+#define MPI2_DIAG_HCB_MODE (0x00000100)
+#define MPI2_DIAG_DIAG_WRITE_ENABLE (0x00000080)
+#define MPI2_DIAG_FLASH_BAD_SIG (0x00000040)
+#define MPI2_DIAG_RESET_HISTORY (0x00000020)
+#define MPI2_DIAG_DIAG_RW_ENABLE (0x00000010)
+#define MPI2_DIAG_RESET_ADAPTER (0x00000004)
+#define MPI2_DIAG_HOLD_IOC_RESET (0x00000002)
+
+/*
+ *Offsets for DiagRWData and address
+ */
+#define MPI2_DIAG_RW_DATA_OFFSET (0x00000010)
+#define MPI2_DIAG_RW_ADDRESS_LOW_OFFSET (0x00000014)
+#define MPI2_DIAG_RW_ADDRESS_HIGH_OFFSET (0x00000018)
+
+/*
+ *Defines for the HostInterruptStatus register
+ */
+#define MPI2_HOST_INTERRUPT_STATUS_OFFSET (0x00000030)
+#define MPI2_HIS_SYS2IOC_DB_STATUS (0x80000000)
+#define MPI2_HIS_IOP_DOORBELL_STATUS MPI2_HIS_SYS2IOC_DB_STATUS
+#define MPI2_HIS_RESET_IRQ_STATUS (0x40000000)
+#define MPI2_HIS_REPLY_DESCRIPTOR_INTERRUPT (0x00000008)
+#define MPI2_HIS_IOC2SYS_DB_STATUS (0x00000001)
+#define MPI2_HIS_DOORBELL_INTERRUPT MPI2_HIS_IOC2SYS_DB_STATUS
+
+/*
+ *Defines for the HostInterruptMask register
+ */
+#define MPI2_HOST_INTERRUPT_MASK_OFFSET (0x00000034)
+#define MPI2_HIM_RESET_IRQ_MASK (0x40000000)
+#define MPI2_HIM_REPLY_INT_MASK (0x00000008)
+#define MPI2_HIM_RIM MPI2_HIM_REPLY_INT_MASK
+#define MPI2_HIM_IOC2SYS_DB_MASK (0x00000001)
+#define MPI2_HIM_DIM MPI2_HIM_IOC2SYS_DB_MASK
+
+/*
+ *Offsets for DCRData and address
+ */
+#define MPI2_DCR_DATA_OFFSET (0x00000038)
+#define MPI2_DCR_ADDRESS_OFFSET (0x0000003C)
+
+/*
+ *Offset for the Reply Free Queue
+ */
+#define MPI2_REPLY_FREE_HOST_INDEX_OFFSET (0x00000048)
+
+/*
+ *Defines for the Reply Descriptor Post Queue
+ */
+#define MPI2_REPLY_POST_HOST_INDEX_OFFSET (0x0000006C)
+#define MPI2_REPLY_POST_HOST_INDEX_MASK (0x00FFFFFF)
+#define MPI2_RPHI_MSIX_INDEX_MASK (0xFF000000)
+#define MPI2_RPHI_MSIX_INDEX_SHIFT (24)
+#define MPI25_SUP_REPLY_POST_HOST_INDEX_OFFSET (0x0000030C) /*MPI v2.5 only*/
+
+
+/*
+ *Defines for the HCBSize and address
+ */
+#define MPI2_HCB_SIZE_OFFSET (0x00000074)
+#define MPI2_HCB_SIZE_SIZE_MASK (0xFFFFF000)
+#define MPI2_HCB_SIZE_HCB_ENABLE (0x00000001)
+
+#define MPI2_HCB_ADDRESS_LOW_OFFSET (0x00000078)
+#define MPI2_HCB_ADDRESS_HIGH_OFFSET (0x0000007C)
+
+/*
+ *Offsets for the Request Queue
+ */
+#define MPI2_REQUEST_DESCRIPTOR_POST_LOW_OFFSET (0x000000C0)
+#define MPI2_REQUEST_DESCRIPTOR_POST_HIGH_OFFSET (0x000000C4)
+
+/*Hard Reset delay timings */
+#define MPI2_HARD_RESET_PCIE_FIRST_READ_DELAY_MICRO_SEC (50000)
+#define MPI2_HARD_RESET_PCIE_RESET_READ_WINDOW_MICRO_SEC (255000)
+#define MPI2_HARD_RESET_PCIE_SECOND_READ_DELAY_MICRO_SEC (256000)
+
+/*****************************************************************************
+*
+* Message Descriptors
+*
+*****************************************************************************/
+
+/*Request Descriptors */
+
+/*Default Request Descriptor */
+typedef struct _MPI2_DEFAULT_REQUEST_DESCRIPTOR {
+ U8 RequestFlags; /*0x00 */
+ U8 MSIxIndex; /*0x01 */
+ U16 SMID; /*0x02 */
+ U16 LMID; /*0x04 */
+ U16 DescriptorTypeDependent; /*0x06 */
+} MPI2_DEFAULT_REQUEST_DESCRIPTOR,
+ *PTR_MPI2_DEFAULT_REQUEST_DESCRIPTOR,
+ Mpi2DefaultRequestDescriptor_t,
+ *pMpi2DefaultRequestDescriptor_t;
+
+/*defines for the RequestFlags field */
+#define MPI2_REQ_DESCRIPT_FLAGS_TYPE_MASK (0x0E)
+#define MPI2_REQ_DESCRIPT_FLAGS_SCSI_IO (0x00)
+#define MPI2_REQ_DESCRIPT_FLAGS_SCSI_TARGET (0x02)
+#define MPI2_REQ_DESCRIPT_FLAGS_HIGH_PRIORITY (0x06)
+#define MPI2_REQ_DESCRIPT_FLAGS_DEFAULT_TYPE (0x08)
+#define MPI2_REQ_DESCRIPT_FLAGS_RAID_ACCELERATOR (0x0A)
+#define MPI25_REQ_DESCRIPT_FLAGS_FAST_PATH_SCSI_IO (0x0C)
+
+#define MPI2_REQ_DESCRIPT_FLAGS_IOC_FIFO_MARKER (0x01)
+
+/*High Priority Request Descriptor */
+typedef struct _MPI2_HIGH_PRIORITY_REQUEST_DESCRIPTOR {
+ U8 RequestFlags; /*0x00 */
+ U8 MSIxIndex; /*0x01 */
+ U16 SMID; /*0x02 */
+ U16 LMID; /*0x04 */
+ U16 Reserved1; /*0x06 */
+} MPI2_HIGH_PRIORITY_REQUEST_DESCRIPTOR,
+ *PTR_MPI2_HIGH_PRIORITY_REQUEST_DESCRIPTOR,
+ Mpi2HighPriorityRequestDescriptor_t,
+ *pMpi2HighPriorityRequestDescriptor_t;
+
+/*SCSI IO Request Descriptor */
+typedef struct _MPI2_SCSI_IO_REQUEST_DESCRIPTOR {
+ U8 RequestFlags; /*0x00 */
+ U8 MSIxIndex; /*0x01 */
+ U16 SMID; /*0x02 */
+ U16 LMID; /*0x04 */
+ U16 DevHandle; /*0x06 */
+} MPI2_SCSI_IO_REQUEST_DESCRIPTOR,
+ *PTR_MPI2_SCSI_IO_REQUEST_DESCRIPTOR,
+ Mpi2SCSIIORequestDescriptor_t,
+ *pMpi2SCSIIORequestDescriptor_t;
+
+/*SCSI Target Request Descriptor */
+typedef struct _MPI2_SCSI_TARGET_REQUEST_DESCRIPTOR {
+ U8 RequestFlags; /*0x00 */
+ U8 MSIxIndex; /*0x01 */
+ U16 SMID; /*0x02 */
+ U16 LMID; /*0x04 */
+ U16 IoIndex; /*0x06 */
+} MPI2_SCSI_TARGET_REQUEST_DESCRIPTOR,
+ *PTR_MPI2_SCSI_TARGET_REQUEST_DESCRIPTOR,
+ Mpi2SCSITargetRequestDescriptor_t,
+ *pMpi2SCSITargetRequestDescriptor_t;
+
+/*RAID Accelerator Request Descriptor */
+typedef struct _MPI2_RAID_ACCEL_REQUEST_DESCRIPTOR {
+ U8 RequestFlags; /*0x00 */
+ U8 MSIxIndex; /*0x01 */
+ U16 SMID; /*0x02 */
+ U16 LMID; /*0x04 */
+ U16 Reserved; /*0x06 */
+} MPI2_RAID_ACCEL_REQUEST_DESCRIPTOR,
+ *PTR_MPI2_RAID_ACCEL_REQUEST_DESCRIPTOR,
+ Mpi2RAIDAcceleratorRequestDescriptor_t,
+ *pMpi2RAIDAcceleratorRequestDescriptor_t;
+
+/*Fast Path SCSI IO Request Descriptor */
+typedef MPI2_SCSI_IO_REQUEST_DESCRIPTOR
+ MPI25_FP_SCSI_IO_REQUEST_DESCRIPTOR,
+ *PTR_MPI25_FP_SCSI_IO_REQUEST_DESCRIPTOR,
+ Mpi25FastPathSCSIIORequestDescriptor_t,
+ *pMpi25FastPathSCSIIORequestDescriptor_t;
+
+/*union of Request Descriptors */
+typedef union _MPI2_REQUEST_DESCRIPTOR_UNION {
+ MPI2_DEFAULT_REQUEST_DESCRIPTOR Default;
+ MPI2_HIGH_PRIORITY_REQUEST_DESCRIPTOR HighPriority;
+ MPI2_SCSI_IO_REQUEST_DESCRIPTOR SCSIIO;
+ MPI2_SCSI_TARGET_REQUEST_DESCRIPTOR SCSITarget;
+ MPI2_RAID_ACCEL_REQUEST_DESCRIPTOR RAIDAccelerator;
+ MPI25_FP_SCSI_IO_REQUEST_DESCRIPTOR FastPathSCSIIO;
+ U64 Words;
+} MPI2_REQUEST_DESCRIPTOR_UNION,
+ *PTR_MPI2_REQUEST_DESCRIPTOR_UNION,
+ Mpi2RequestDescriptorUnion_t,
+ *pMpi2RequestDescriptorUnion_t;
+
+/*Reply Descriptors */
+
+/*Default Reply Descriptor */
+typedef struct _MPI2_DEFAULT_REPLY_DESCRIPTOR {
+ U8 ReplyFlags; /*0x00 */
+ U8 MSIxIndex; /*0x01 */
+ U16 DescriptorTypeDependent1; /*0x02 */
+ U32 DescriptorTypeDependent2; /*0x04 */
+} MPI2_DEFAULT_REPLY_DESCRIPTOR,
+ *PTR_MPI2_DEFAULT_REPLY_DESCRIPTOR,
+ Mpi2DefaultReplyDescriptor_t,
+ *pMpi2DefaultReplyDescriptor_t;
+
+/*defines for the ReplyFlags field */
+#define MPI2_RPY_DESCRIPT_FLAGS_TYPE_MASK (0x0F)
+#define MPI2_RPY_DESCRIPT_FLAGS_SCSI_IO_SUCCESS (0x00)
+#define MPI2_RPY_DESCRIPT_FLAGS_ADDRESS_REPLY (0x01)
+#define MPI2_RPY_DESCRIPT_FLAGS_TARGETASSIST_SUCCESS (0x02)
+#define MPI2_RPY_DESCRIPT_FLAGS_TARGET_COMMAND_BUFFER (0x03)
+#define MPI2_RPY_DESCRIPT_FLAGS_RAID_ACCELERATOR_SUCCESS (0x05)
+#define MPI25_RPY_DESCRIPT_FLAGS_FAST_PATH_SCSI_IO_SUCCESS (0x06)
+#define MPI2_RPY_DESCRIPT_FLAGS_UNUSED (0x0F)
+
+/*values for marking a reply descriptor as unused */
+#define MPI2_RPY_DESCRIPT_UNUSED_WORD0_MARK (0xFFFFFFFF)
+#define MPI2_RPY_DESCRIPT_UNUSED_WORD1_MARK (0xFFFFFFFF)
+
+/*Address Reply Descriptor */
+typedef struct _MPI2_ADDRESS_REPLY_DESCRIPTOR {
+ U8 ReplyFlags; /*0x00 */
+ U8 MSIxIndex; /*0x01 */
+ U16 SMID; /*0x02 */
+ U32 ReplyFrameAddress; /*0x04 */
+} MPI2_ADDRESS_REPLY_DESCRIPTOR,
+ *PTR_MPI2_ADDRESS_REPLY_DESCRIPTOR,
+ Mpi2AddressReplyDescriptor_t,
+ *pMpi2AddressReplyDescriptor_t;
+
+#define MPI2_ADDRESS_REPLY_SMID_INVALID (0x00)
+
+/*SCSI IO Success Reply Descriptor */
+typedef struct _MPI2_SCSI_IO_SUCCESS_REPLY_DESCRIPTOR {
+ U8 ReplyFlags; /*0x00 */
+ U8 MSIxIndex; /*0x01 */
+ U16 SMID; /*0x02 */
+ U16 TaskTag; /*0x04 */
+ U16 Reserved1; /*0x06 */
+} MPI2_SCSI_IO_SUCCESS_REPLY_DESCRIPTOR,
+ *PTR_MPI2_SCSI_IO_SUCCESS_REPLY_DESCRIPTOR,
+ Mpi2SCSIIOSuccessReplyDescriptor_t,
+ *pMpi2SCSIIOSuccessReplyDescriptor_t;
+
+/*TargetAssist Success Reply Descriptor */
+typedef struct _MPI2_TARGETASSIST_SUCCESS_REPLY_DESCRIPTOR {
+ U8 ReplyFlags; /*0x00 */
+ U8 MSIxIndex; /*0x01 */
+ U16 SMID; /*0x02 */
+ U8 SequenceNumber; /*0x04 */
+ U8 Reserved1; /*0x05 */
+ U16 IoIndex; /*0x06 */
+} MPI2_TARGETASSIST_SUCCESS_REPLY_DESCRIPTOR,
+ *PTR_MPI2_TARGETASSIST_SUCCESS_REPLY_DESCRIPTOR,
+ Mpi2TargetAssistSuccessReplyDescriptor_t,
+ *pMpi2TargetAssistSuccessReplyDescriptor_t;
+
+/*Target Command Buffer Reply Descriptor */
+typedef struct _MPI2_TARGET_COMMAND_BUFFER_REPLY_DESCRIPTOR {
+ U8 ReplyFlags; /*0x00 */
+ U8 MSIxIndex; /*0x01 */
+ U8 VP_ID; /*0x02 */
+ U8 Flags; /*0x03 */
+ U16 InitiatorDevHandle; /*0x04 */
+ U16 IoIndex; /*0x06 */
+} MPI2_TARGET_COMMAND_BUFFER_REPLY_DESCRIPTOR,
+ *PTR_MPI2_TARGET_COMMAND_BUFFER_REPLY_DESCRIPTOR,
+ Mpi2TargetCommandBufferReplyDescriptor_t,
+ *pMpi2TargetCommandBufferReplyDescriptor_t;
+
+/*defines for Flags field */
+#define MPI2_RPY_DESCRIPT_TCB_FLAGS_PHYNUM_MASK (0x3F)
+
+/*RAID Accelerator Success Reply Descriptor */
+typedef struct _MPI2_RAID_ACCELERATOR_SUCCESS_REPLY_DESCRIPTOR {
+ U8 ReplyFlags; /*0x00 */
+ U8 MSIxIndex; /*0x01 */
+ U16 SMID; /*0x02 */
+ U32 Reserved; /*0x04 */
+} MPI2_RAID_ACCELERATOR_SUCCESS_REPLY_DESCRIPTOR,
+ *PTR_MPI2_RAID_ACCELERATOR_SUCCESS_REPLY_DESCRIPTOR,
+ Mpi2RAIDAcceleratorSuccessReplyDescriptor_t,
+ *pMpi2RAIDAcceleratorSuccessReplyDescriptor_t;
+
+/*Fast Path SCSI IO Success Reply Descriptor */
+typedef MPI2_SCSI_IO_SUCCESS_REPLY_DESCRIPTOR
+ MPI25_FP_SCSI_IO_SUCCESS_REPLY_DESCRIPTOR,
+ *PTR_MPI25_FP_SCSI_IO_SUCCESS_REPLY_DESCRIPTOR,
+ Mpi25FastPathSCSIIOSuccessReplyDescriptor_t,
+ *pMpi25FastPathSCSIIOSuccessReplyDescriptor_t;
+
+/*union of Reply Descriptors */
+typedef union _MPI2_REPLY_DESCRIPTORS_UNION {
+ MPI2_DEFAULT_REPLY_DESCRIPTOR Default;
+ MPI2_ADDRESS_REPLY_DESCRIPTOR AddressReply;
+ MPI2_SCSI_IO_SUCCESS_REPLY_DESCRIPTOR SCSIIOSuccess;
+ MPI2_TARGETASSIST_SUCCESS_REPLY_DESCRIPTOR TargetAssistSuccess;
+ MPI2_TARGET_COMMAND_BUFFER_REPLY_DESCRIPTOR TargetCommandBuffer;
+ MPI2_RAID_ACCELERATOR_SUCCESS_REPLY_DESCRIPTOR RAIDAcceleratorSuccess;
+ MPI25_FP_SCSI_IO_SUCCESS_REPLY_DESCRIPTOR FastPathSCSIIOSuccess;
+ U64 Words;
+} MPI2_REPLY_DESCRIPTORS_UNION,
+ *PTR_MPI2_REPLY_DESCRIPTORS_UNION,
+ Mpi2ReplyDescriptorsUnion_t,
+ *pMpi2ReplyDescriptorsUnion_t;
+
+/*****************************************************************************
+*
+* Message Functions
+*
+*****************************************************************************/
+
+#define MPI2_FUNCTION_SCSI_IO_REQUEST (0x00)
+#define MPI2_FUNCTION_SCSI_TASK_MGMT (0x01)
+#define MPI2_FUNCTION_IOC_INIT (0x02)
+#define MPI2_FUNCTION_IOC_FACTS (0x03)
+#define MPI2_FUNCTION_CONFIG (0x04)
+#define MPI2_FUNCTION_PORT_FACTS (0x05)
+#define MPI2_FUNCTION_PORT_ENABLE (0x06)
+#define MPI2_FUNCTION_EVENT_NOTIFICATION (0x07)
+#define MPI2_FUNCTION_EVENT_ACK (0x08)
+#define MPI2_FUNCTION_FW_DOWNLOAD (0x09)
+#define MPI2_FUNCTION_TARGET_ASSIST (0x0B)
+#define MPI2_FUNCTION_TARGET_STATUS_SEND (0x0C)
+#define MPI2_FUNCTION_TARGET_MODE_ABORT (0x0D)
+#define MPI2_FUNCTION_FW_UPLOAD (0x12)
+#define MPI2_FUNCTION_RAID_ACTION (0x15)
+#define MPI2_FUNCTION_RAID_SCSI_IO_PASSTHROUGH (0x16)
+#define MPI2_FUNCTION_TOOLBOX (0x17)
+#define MPI2_FUNCTION_SCSI_ENCLOSURE_PROCESSOR (0x18)
+#define MPI2_FUNCTION_SMP_PASSTHROUGH (0x1A)
+#define MPI2_FUNCTION_SAS_IO_UNIT_CONTROL (0x1B)
+#define MPI2_FUNCTION_SATA_PASSTHROUGH (0x1C)
+#define MPI2_FUNCTION_DIAG_BUFFER_POST (0x1D)
+#define MPI2_FUNCTION_DIAG_RELEASE (0x1E)
+#define MPI2_FUNCTION_TARGET_CMD_BUF_BASE_POST (0x24)
+#define MPI2_FUNCTION_TARGET_CMD_BUF_LIST_POST (0x25)
+#define MPI2_FUNCTION_RAID_ACCELERATOR (0x2C)
+#define MPI2_FUNCTION_HOST_BASED_DISCOVERY_ACTION (0x2F)
+#define MPI2_FUNCTION_PWR_MGMT_CONTROL (0x30)
+#define MPI2_FUNCTION_SEND_HOST_MESSAGE (0x31)
+#define MPI2_FUNCTION_MIN_PRODUCT_SPECIFIC (0xF0)
+#define MPI2_FUNCTION_MAX_PRODUCT_SPECIFIC (0xFF)
+
+/*Doorbell functions */
+#define MPI2_FUNCTION_IOC_MESSAGE_UNIT_RESET (0x40)
+#define MPI2_FUNCTION_HANDSHAKE (0x42)
+
+/*****************************************************************************
+*
+* IOC Status Values
+*
+*****************************************************************************/
+
+/*mask for IOCStatus status value */
+#define MPI2_IOCSTATUS_MASK (0x7FFF)
+
+/****************************************************************************
+* Common IOCStatus values for all replies
+****************************************************************************/
+
+#define MPI2_IOCSTATUS_SUCCESS (0x0000)
+#define MPI2_IOCSTATUS_INVALID_FUNCTION (0x0001)
+#define MPI2_IOCSTATUS_BUSY (0x0002)
+#define MPI2_IOCSTATUS_INVALID_SGL (0x0003)
+#define MPI2_IOCSTATUS_INTERNAL_ERROR (0x0004)
+#define MPI2_IOCSTATUS_INVALID_VPID (0x0005)
+#define MPI2_IOCSTATUS_INSUFFICIENT_RESOURCES (0x0006)
+#define MPI2_IOCSTATUS_INVALID_FIELD (0x0007)
+#define MPI2_IOCSTATUS_INVALID_STATE (0x0008)
+#define MPI2_IOCSTATUS_OP_STATE_NOT_SUPPORTED (0x0009)
+
+/****************************************************************************
+* Config IOCStatus values
+****************************************************************************/
+
+#define MPI2_IOCSTATUS_CONFIG_INVALID_ACTION (0x0020)
+#define MPI2_IOCSTATUS_CONFIG_INVALID_TYPE (0x0021)
+#define MPI2_IOCSTATUS_CONFIG_INVALID_PAGE (0x0022)
+#define MPI2_IOCSTATUS_CONFIG_INVALID_DATA (0x0023)
+#define MPI2_IOCSTATUS_CONFIG_NO_DEFAULTS (0x0024)
+#define MPI2_IOCSTATUS_CONFIG_CANT_COMMIT (0x0025)
+
+/****************************************************************************
+* SCSI IO Reply
+****************************************************************************/
+
+#define MPI2_IOCSTATUS_SCSI_RECOVERED_ERROR (0x0040)
+#define MPI2_IOCSTATUS_SCSI_INVALID_DEVHANDLE (0x0042)
+#define MPI2_IOCSTATUS_SCSI_DEVICE_NOT_THERE (0x0043)
+#define MPI2_IOCSTATUS_SCSI_DATA_OVERRUN (0x0044)
+#define MPI2_IOCSTATUS_SCSI_DATA_UNDERRUN (0x0045)
+#define MPI2_IOCSTATUS_SCSI_IO_DATA_ERROR (0x0046)
+#define MPI2_IOCSTATUS_SCSI_PROTOCOL_ERROR (0x0047)
+#define MPI2_IOCSTATUS_SCSI_TASK_TERMINATED (0x0048)
+#define MPI2_IOCSTATUS_SCSI_RESIDUAL_MISMATCH (0x0049)
+#define MPI2_IOCSTATUS_SCSI_TASK_MGMT_FAILED (0x004A)
+#define MPI2_IOCSTATUS_SCSI_IOC_TERMINATED (0x004B)
+#define MPI2_IOCSTATUS_SCSI_EXT_TERMINATED (0x004C)
+
+/****************************************************************************
+* For use by SCSI Initiator and SCSI Target end-to-end data protection
+****************************************************************************/
+
+#define MPI2_IOCSTATUS_EEDP_GUARD_ERROR (0x004D)
+#define MPI2_IOCSTATUS_EEDP_REF_TAG_ERROR (0x004E)
+#define MPI2_IOCSTATUS_EEDP_APP_TAG_ERROR (0x004F)
+
+/****************************************************************************
+* SCSI Target values
+****************************************************************************/
+
+#define MPI2_IOCSTATUS_TARGET_INVALID_IO_INDEX (0x0062)
+#define MPI2_IOCSTATUS_TARGET_ABORTED (0x0063)
+#define MPI2_IOCSTATUS_TARGET_NO_CONN_RETRYABLE (0x0064)
+#define MPI2_IOCSTATUS_TARGET_NO_CONNECTION (0x0065)
+#define MPI2_IOCSTATUS_TARGET_XFER_COUNT_MISMATCH (0x006A)
+#define MPI2_IOCSTATUS_TARGET_DATA_OFFSET_ERROR (0x006D)
+#define MPI2_IOCSTATUS_TARGET_TOO_MUCH_WRITE_DATA (0x006E)
+#define MPI2_IOCSTATUS_TARGET_IU_TOO_SHORT (0x006F)
+#define MPI2_IOCSTATUS_TARGET_ACK_NAK_TIMEOUT (0x0070)
+#define MPI2_IOCSTATUS_TARGET_NAK_RECEIVED (0x0071)
+
+/****************************************************************************
+* Serial Attached SCSI values
+****************************************************************************/
+
+#define MPI2_IOCSTATUS_SAS_SMP_REQUEST_FAILED (0x0090)
+#define MPI2_IOCSTATUS_SAS_SMP_DATA_OVERRUN (0x0091)
+
+/****************************************************************************
+* Diagnostic Buffer Post / Diagnostic Release values
+****************************************************************************/
+
+#define MPI2_IOCSTATUS_DIAGNOSTIC_RELEASED (0x00A0)
+
+/****************************************************************************
+* RAID Accelerator values
+****************************************************************************/
+
+#define MPI2_IOCSTATUS_RAID_ACCEL_ERROR (0x00B0)
+
+/****************************************************************************
+* IOCStatus flag to indicate that log info is available
+****************************************************************************/
+
+#define MPI2_IOCSTATUS_FLAG_LOG_INFO_AVAILABLE (0x8000)
+
+/****************************************************************************
+* IOCLogInfo Types
+****************************************************************************/
+
+#define MPI2_IOCLOGINFO_TYPE_MASK (0xF0000000)
+#define MPI2_IOCLOGINFO_TYPE_SHIFT (28)
+#define MPI2_IOCLOGINFO_TYPE_NONE (0x0)
+#define MPI2_IOCLOGINFO_TYPE_SCSI (0x1)
+#define MPI2_IOCLOGINFO_TYPE_FC (0x2)
+#define MPI2_IOCLOGINFO_TYPE_SAS (0x3)
+#define MPI2_IOCLOGINFO_TYPE_ISCSI (0x4)
+#define MPI2_IOCLOGINFO_LOG_DATA_MASK (0x0FFFFFFF)
+
+/*****************************************************************************
+*
+* Standard Message Structures
+*
+*****************************************************************************/
+
+/****************************************************************************
+*Request Message Header for all request messages
+****************************************************************************/
+
+typedef struct _MPI2_REQUEST_HEADER {
+ U16 FunctionDependent1; /*0x00 */
+ U8 ChainOffset; /*0x02 */
+ U8 Function; /*0x03 */
+ U16 FunctionDependent2; /*0x04 */
+ U8 FunctionDependent3; /*0x06 */
+ U8 MsgFlags; /*0x07 */
+ U8 VP_ID; /*0x08 */
+ U8 VF_ID; /*0x09 */
+ U16 Reserved1; /*0x0A */
+} MPI2_REQUEST_HEADER, *PTR_MPI2_REQUEST_HEADER,
+ MPI2RequestHeader_t, *pMPI2RequestHeader_t;
+
+/****************************************************************************
+* Default Reply
+****************************************************************************/
+
+typedef struct _MPI2_DEFAULT_REPLY {
+ U16 FunctionDependent1; /*0x00 */
+ U8 MsgLength; /*0x02 */
+ U8 Function; /*0x03 */
+ U16 FunctionDependent2; /*0x04 */
+ U8 FunctionDependent3; /*0x06 */
+ U8 MsgFlags; /*0x07 */
+ U8 VP_ID; /*0x08 */
+ U8 VF_ID; /*0x09 */
+ U16 Reserved1; /*0x0A */
+ U16 FunctionDependent5; /*0x0C */
+ U16 IOCStatus; /*0x0E */
+ U32 IOCLogInfo; /*0x10 */
+} MPI2_DEFAULT_REPLY, *PTR_MPI2_DEFAULT_REPLY,
+ MPI2DefaultReply_t, *pMPI2DefaultReply_t;
+
+/*common version structure/union used in messages and configuration pages */
+
+typedef struct _MPI2_VERSION_STRUCT {
+ U8 Dev; /*0x00 */
+ U8 Unit; /*0x01 */
+ U8 Minor; /*0x02 */
+ U8 Major; /*0x03 */
+} MPI2_VERSION_STRUCT;
+
+typedef union _MPI2_VERSION_UNION {
+ MPI2_VERSION_STRUCT Struct;
+ U32 Word;
+} MPI2_VERSION_UNION;
+
+/*LUN field defines, common to many structures */
+#define MPI2_LUN_FIRST_LEVEL_ADDRESSING (0x0000FFFF)
+#define MPI2_LUN_SECOND_LEVEL_ADDRESSING (0xFFFF0000)
+#define MPI2_LUN_THIRD_LEVEL_ADDRESSING (0x0000FFFF)
+#define MPI2_LUN_FOURTH_LEVEL_ADDRESSING (0xFFFF0000)
+#define MPI2_LUN_LEVEL_1_WORD (0xFF00)
+#define MPI2_LUN_LEVEL_1_DWORD (0x0000FF00)
+
+/*****************************************************************************
+*
+* Fusion-MPT MPI Scatter Gather Elements
+*
+*****************************************************************************/
+
+/****************************************************************************
+* MPI Simple Element structures
+****************************************************************************/
+
+typedef struct _MPI2_SGE_SIMPLE32 {
+ U32 FlagsLength;
+ U32 Address;
+} MPI2_SGE_SIMPLE32, *PTR_MPI2_SGE_SIMPLE32,
+ Mpi2SGESimple32_t, *pMpi2SGESimple32_t;
+
+typedef struct _MPI2_SGE_SIMPLE64 {
+ U32 FlagsLength;
+ U64 Address;
+} MPI2_SGE_SIMPLE64, *PTR_MPI2_SGE_SIMPLE64,
+ Mpi2SGESimple64_t, *pMpi2SGESimple64_t;
+
+typedef struct _MPI2_SGE_SIMPLE_UNION {
+ U32 FlagsLength;
+ union {
+ U32 Address32;
+ U64 Address64;
+ } u;
+} MPI2_SGE_SIMPLE_UNION,
+ *PTR_MPI2_SGE_SIMPLE_UNION,
+ Mpi2SGESimpleUnion_t,
+ *pMpi2SGESimpleUnion_t;
+
+/****************************************************************************
+* MPI Chain Element structures - for MPI v2.0 products only
+****************************************************************************/
+
+typedef struct _MPI2_SGE_CHAIN32 {
+ U16 Length;
+ U8 NextChainOffset;
+ U8 Flags;
+ U32 Address;
+} MPI2_SGE_CHAIN32, *PTR_MPI2_SGE_CHAIN32,
+ Mpi2SGEChain32_t, *pMpi2SGEChain32_t;
+
+typedef struct _MPI2_SGE_CHAIN64 {
+ U16 Length;
+ U8 NextChainOffset;
+ U8 Flags;
+ U64 Address;
+} MPI2_SGE_CHAIN64, *PTR_MPI2_SGE_CHAIN64,
+ Mpi2SGEChain64_t, *pMpi2SGEChain64_t;
+
+typedef struct _MPI2_SGE_CHAIN_UNION {
+ U16 Length;
+ U8 NextChainOffset;
+ U8 Flags;
+ union {
+ U32 Address32;
+ U64 Address64;
+ } u;
+} MPI2_SGE_CHAIN_UNION,
+ *PTR_MPI2_SGE_CHAIN_UNION,
+ Mpi2SGEChainUnion_t,
+ *pMpi2SGEChainUnion_t;
+
+/****************************************************************************
+* MPI Transaction Context Element structures - for MPI v2.0 products only
+****************************************************************************/
+
+typedef struct _MPI2_SGE_TRANSACTION32 {
+ U8 Reserved;
+ U8 ContextSize;
+ U8 DetailsLength;
+ U8 Flags;
+ U32 TransactionContext[1];
+ U32 TransactionDetails[1];
+} MPI2_SGE_TRANSACTION32,
+ *PTR_MPI2_SGE_TRANSACTION32,
+ Mpi2SGETransaction32_t,
+ *pMpi2SGETransaction32_t;
+
+typedef struct _MPI2_SGE_TRANSACTION64 {
+ U8 Reserved;
+ U8 ContextSize;
+ U8 DetailsLength;
+ U8 Flags;
+ U32 TransactionContext[2];
+ U32 TransactionDetails[1];
+} MPI2_SGE_TRANSACTION64,
+ *PTR_MPI2_SGE_TRANSACTION64,
+ Mpi2SGETransaction64_t,
+ *pMpi2SGETransaction64_t;
+
+typedef struct _MPI2_SGE_TRANSACTION96 {
+ U8 Reserved;
+ U8 ContextSize;
+ U8 DetailsLength;
+ U8 Flags;
+ U32 TransactionContext[3];
+ U32 TransactionDetails[1];
+} MPI2_SGE_TRANSACTION96, *PTR_MPI2_SGE_TRANSACTION96,
+ Mpi2SGETransaction96_t, *pMpi2SGETransaction96_t;
+
+typedef struct _MPI2_SGE_TRANSACTION128 {
+ U8 Reserved;
+ U8 ContextSize;
+ U8 DetailsLength;
+ U8 Flags;
+ U32 TransactionContext[4];
+ U32 TransactionDetails[1];
+} MPI2_SGE_TRANSACTION128, *PTR_MPI2_SGE_TRANSACTION128,
+ Mpi2SGETransaction_t128, *pMpi2SGETransaction_t128;
+
+typedef struct _MPI2_SGE_TRANSACTION_UNION {
+ U8 Reserved;
+ U8 ContextSize;
+ U8 DetailsLength;
+ U8 Flags;
+ union {
+ U32 TransactionContext32[1];
+ U32 TransactionContext64[2];
+ U32 TransactionContext96[3];
+ U32 TransactionContext128[4];
+ } u;
+ U32 TransactionDetails[1];
+} MPI2_SGE_TRANSACTION_UNION,
+ *PTR_MPI2_SGE_TRANSACTION_UNION,
+ Mpi2SGETransactionUnion_t,
+ *pMpi2SGETransactionUnion_t;
+
+/****************************************************************************
+* MPI SGE union for IO SGL's - for MPI v2.0 products only
+****************************************************************************/
+
+typedef struct _MPI2_MPI_SGE_IO_UNION {
+ union {
+ MPI2_SGE_SIMPLE_UNION Simple;
+ MPI2_SGE_CHAIN_UNION Chain;
+ } u;
+} MPI2_MPI_SGE_IO_UNION, *PTR_MPI2_MPI_SGE_IO_UNION,
+ Mpi2MpiSGEIOUnion_t, *pMpi2MpiSGEIOUnion_t;
+
+/****************************************************************************
+* MPI SGE union for SGL's with Simple and Transaction elements - for MPI v2.0 products only
+****************************************************************************/
+
+typedef struct _MPI2_SGE_TRANS_SIMPLE_UNION {
+ union {
+ MPI2_SGE_SIMPLE_UNION Simple;
+ MPI2_SGE_TRANSACTION_UNION Transaction;
+ } u;
+} MPI2_SGE_TRANS_SIMPLE_UNION,
+ *PTR_MPI2_SGE_TRANS_SIMPLE_UNION,
+ Mpi2SGETransSimpleUnion_t,
+ *pMpi2SGETransSimpleUnion_t;
+
+/****************************************************************************
+* All MPI SGE types union
+****************************************************************************/
+
+typedef struct _MPI2_MPI_SGE_UNION {
+ union {
+ MPI2_SGE_SIMPLE_UNION Simple;
+ MPI2_SGE_CHAIN_UNION Chain;
+ MPI2_SGE_TRANSACTION_UNION Transaction;
+ } u;
+} MPI2_MPI_SGE_UNION, *PTR_MPI2_MPI_SGE_UNION,
+ Mpi2MpiSgeUnion_t, *pMpi2MpiSgeUnion_t;
+
+/****************************************************************************
+* MPI SGE field definition and masks
+****************************************************************************/
+
+/*Flags field bit definitions */
+
+#define MPI2_SGE_FLAGS_LAST_ELEMENT (0x80)
+#define MPI2_SGE_FLAGS_END_OF_BUFFER (0x40)
+#define MPI2_SGE_FLAGS_ELEMENT_TYPE_MASK (0x30)
+#define MPI2_SGE_FLAGS_LOCAL_ADDRESS (0x08)
+#define MPI2_SGE_FLAGS_DIRECTION (0x04)
+#define MPI2_SGE_FLAGS_ADDRESS_SIZE (0x02)
+#define MPI2_SGE_FLAGS_END_OF_LIST (0x01)
+
+#define MPI2_SGE_FLAGS_SHIFT (24)
+
+#define MPI2_SGE_LENGTH_MASK (0x00FFFFFF)
+#define MPI2_SGE_CHAIN_LENGTH_MASK (0x0000FFFF)
+
+/*Element Type */
+
+#define MPI2_SGE_FLAGS_TRANSACTION_ELEMENT (0x00)
+#define MPI2_SGE_FLAGS_SIMPLE_ELEMENT (0x10)
+#define MPI2_SGE_FLAGS_CHAIN_ELEMENT (0x30)
+#define MPI2_SGE_FLAGS_ELEMENT_MASK (0x30)
+
+/*Address location */
+
+#define MPI2_SGE_FLAGS_SYSTEM_ADDRESS (0x00)
+
+/*Direction */
+
+#define MPI2_SGE_FLAGS_IOC_TO_HOST (0x00)
+#define MPI2_SGE_FLAGS_HOST_TO_IOC (0x04)
+
+#define MPI2_SGE_FLAGS_DEST (MPI2_SGE_FLAGS_IOC_TO_HOST)
+#define MPI2_SGE_FLAGS_SOURCE (MPI2_SGE_FLAGS_HOST_TO_IOC)
+
+/*Address Size */
+
+#define MPI2_SGE_FLAGS_32_BIT_ADDRESSING (0x00)
+#define MPI2_SGE_FLAGS_64_BIT_ADDRESSING (0x02)
+
+/*Context Size */
+
+#define MPI2_SGE_FLAGS_32_BIT_CONTEXT (0x00)
+#define MPI2_SGE_FLAGS_64_BIT_CONTEXT (0x02)
+#define MPI2_SGE_FLAGS_96_BIT_CONTEXT (0x04)
+#define MPI2_SGE_FLAGS_128_BIT_CONTEXT (0x06)
+
+#define MPI2_SGE_CHAIN_OFFSET_MASK (0x00FF0000)
+#define MPI2_SGE_CHAIN_OFFSET_SHIFT (16)
+
+/****************************************************************************
+* MPI SGE operation Macros
+****************************************************************************/
+
+/*SIMPLE FlagsLength manipulations... */
+#define MPI2_SGE_SET_FLAGS(f) ((U32)(f) << MPI2_SGE_FLAGS_SHIFT)
+#define MPI2_SGE_GET_FLAGS(f) (((f) & ~MPI2_SGE_LENGTH_MASK) >> \
+ MPI2_SGE_FLAGS_SHIFT)
+#define MPI2_SGE_LENGTH(f) ((f) & MPI2_SGE_LENGTH_MASK)
+#define MPI2_SGE_CHAIN_LENGTH(f) ((f) & MPI2_SGE_CHAIN_LENGTH_MASK)
+
+#define MPI2_SGE_SET_FLAGS_LENGTH(f, l) (MPI2_SGE_SET_FLAGS(f) | \
+ MPI2_SGE_LENGTH(l))
+
+#define MPI2_pSGE_GET_FLAGS(psg) MPI2_SGE_GET_FLAGS((psg)->FlagsLength)
+#define MPI2_pSGE_GET_LENGTH(psg) MPI2_SGE_LENGTH((psg)->FlagsLength)
+#define MPI2_pSGE_SET_FLAGS_LENGTH(psg, f, l) ((psg)->FlagsLength = \
+ MPI2_SGE_SET_FLAGS_LENGTH(f, l))
+
+/*CAUTION - The following are READ-MODIFY-WRITE! */
+#define MPI2_pSGE_SET_FLAGS(psg, f) ((psg)->FlagsLength |= \
+ MPI2_SGE_SET_FLAGS(f))
+#define MPI2_pSGE_SET_LENGTH(psg, l) ((psg)->FlagsLength |= \
+ MPI2_SGE_LENGTH(l))
+
+#define MPI2_GET_CHAIN_OFFSET(x) ((x & MPI2_SGE_CHAIN_OFFSET_MASK) >> \
+ MPI2_SGE_CHAIN_OFFSET_SHIFT)
+
+/*****************************************************************************
+*
+* Fusion-MPT IEEE Scatter Gather Elements
+*
+*****************************************************************************/
+
+/****************************************************************************
+* IEEE Simple Element structures
+****************************************************************************/
+
+/*MPI2_IEEE_SGE_SIMPLE32 is for MPI v2.0 products only */
+typedef struct _MPI2_IEEE_SGE_SIMPLE32 {
+ U32 Address;
+ U32 FlagsLength;
+} MPI2_IEEE_SGE_SIMPLE32, *PTR_MPI2_IEEE_SGE_SIMPLE32,
+ Mpi2IeeeSgeSimple32_t, *pMpi2IeeeSgeSimple32_t;
+
+typedef struct _MPI2_IEEE_SGE_SIMPLE64 {
+ U64 Address;
+ U32 Length;
+ U16 Reserved1;
+ U8 Reserved2;
+ U8 Flags;
+} MPI2_IEEE_SGE_SIMPLE64, *PTR_MPI2_IEEE_SGE_SIMPLE64,
+ Mpi2IeeeSgeSimple64_t, *pMpi2IeeeSgeSimple64_t;
+
+typedef union _MPI2_IEEE_SGE_SIMPLE_UNION {
+ MPI2_IEEE_SGE_SIMPLE32 Simple32;
+ MPI2_IEEE_SGE_SIMPLE64 Simple64;
+} MPI2_IEEE_SGE_SIMPLE_UNION,
+ *PTR_MPI2_IEEE_SGE_SIMPLE_UNION,
+ Mpi2IeeeSgeSimpleUnion_t,
+ *pMpi2IeeeSgeSimpleUnion_t;
+
+/****************************************************************************
+* IEEE Chain Element structures
+****************************************************************************/
+
+/*MPI2_IEEE_SGE_CHAIN32 is for MPI v2.0 products only */
+typedef MPI2_IEEE_SGE_SIMPLE32 MPI2_IEEE_SGE_CHAIN32;
+
+/*MPI2_IEEE_SGE_CHAIN64 is for MPI v2.0 products only */
+typedef MPI2_IEEE_SGE_SIMPLE64 MPI2_IEEE_SGE_CHAIN64;
+
+typedef union _MPI2_IEEE_SGE_CHAIN_UNION {
+ MPI2_IEEE_SGE_CHAIN32 Chain32;
+ MPI2_IEEE_SGE_CHAIN64 Chain64;
+} MPI2_IEEE_SGE_CHAIN_UNION,
+ *PTR_MPI2_IEEE_SGE_CHAIN_UNION,
+ Mpi2IeeeSgeChainUnion_t,
+ *pMpi2IeeeSgeChainUnion_t;
+
+/*MPI25_IEEE_SGE_CHAIN64 is for MPI v2.5 products only */
+typedef struct _MPI25_IEEE_SGE_CHAIN64 {
+ U64 Address;
+ U32 Length;
+ U16 Reserved1;
+ U8 NextChainOffset;
+ U8 Flags;
+} MPI25_IEEE_SGE_CHAIN64,
+ *PTR_MPI25_IEEE_SGE_CHAIN64,
+ Mpi25IeeeSgeChain64_t,
+ *pMpi25IeeeSgeChain64_t;
+
+/****************************************************************************
+* All IEEE SGE types union
+****************************************************************************/
+
+/*MPI2_IEEE_SGE_UNION is for MPI v2.0 products only */
+typedef struct _MPI2_IEEE_SGE_UNION {
+ union {
+ MPI2_IEEE_SGE_SIMPLE_UNION Simple;
+ MPI2_IEEE_SGE_CHAIN_UNION Chain;
+ } u;
+} MPI2_IEEE_SGE_UNION, *PTR_MPI2_IEEE_SGE_UNION,
+ Mpi2IeeeSgeUnion_t, *pMpi2IeeeSgeUnion_t;
+
+/****************************************************************************
+* IEEE SGE union for IO SGL's
+****************************************************************************/
+
+typedef union _MPI25_SGE_IO_UNION {
+ MPI2_IEEE_SGE_SIMPLE64 IeeeSimple;
+ MPI25_IEEE_SGE_CHAIN64 IeeeChain;
+} MPI25_SGE_IO_UNION, *PTR_MPI25_SGE_IO_UNION,
+ Mpi25SGEIOUnion_t, *pMpi25SGEIOUnion_t;
+
+/****************************************************************************
+* IEEE SGE field definitions and masks
+****************************************************************************/
+
+/*Flags field bit definitions */
+
+#define MPI2_IEEE_SGE_FLAGS_ELEMENT_TYPE_MASK (0x80)
+#define MPI25_IEEE_SGE_FLAGS_END_OF_LIST (0x40)
+
+#define MPI2_IEEE32_SGE_FLAGS_SHIFT (24)
+
+#define MPI2_IEEE32_SGE_LENGTH_MASK (0x00FFFFFF)
+
+/*Element Type */
+
+#define MPI2_IEEE_SGE_FLAGS_SIMPLE_ELEMENT (0x00)
+#define MPI2_IEEE_SGE_FLAGS_CHAIN_ELEMENT (0x80)
+
+/*Data Location Address Space */
+
+#define MPI2_IEEE_SGE_FLAGS_ADDR_MASK (0x03)
+#define MPI2_IEEE_SGE_FLAGS_SYSTEM_ADDR (0x00)
+#define MPI2_IEEE_SGE_FLAGS_IOCDDR_ADDR (0x01)
+#define MPI2_IEEE_SGE_FLAGS_IOCPLB_ADDR (0x02)
+#define MPI2_IEEE_SGE_FLAGS_IOCPLBNTA_ADDR (0x03)
+#define MPI2_IEEE_SGE_FLAGS_SYSTEMPLBPCI_ADDR (0x03)
+#define MPI2_IEEE_SGE_FLAGS_SYSTEMPLBCPI_ADDR \
+ (MPI2_IEEE_SGE_FLAGS_SYSTEMPLBPCI_ADDR)
+
+/****************************************************************************
+* IEEE SGE operation Macros
+****************************************************************************/
+
+/*SIMPLE FlagsLength manipulations... */
+#define MPI2_IEEE32_SGE_SET_FLAGS(f) ((U32)(f) << MPI2_IEEE32_SGE_FLAGS_SHIFT)
+#define MPI2_IEEE32_SGE_GET_FLAGS(f) (((f) & ~MPI2_IEEE32_SGE_LENGTH_MASK) \
+ >> MPI2_IEEE32_SGE_FLAGS_SHIFT)
+#define MPI2_IEEE32_SGE_LENGTH(f) ((f) & MPI2_IEEE32_SGE_LENGTH_MASK)
+
+#define MPI2_IEEE32_SGE_SET_FLAGS_LENGTH(f, l) (MPI2_IEEE32_SGE_SET_FLAGS(f) |\
+ MPI2_IEEE32_SGE_LENGTH(l))
+
+#define MPI2_IEEE32_pSGE_GET_FLAGS(psg) \
+ MPI2_IEEE32_SGE_GET_FLAGS((psg)->FlagsLength)
+#define MPI2_IEEE32_pSGE_GET_LENGTH(psg) \
+ MPI2_IEEE32_SGE_LENGTH((psg)->FlagsLength)
+#define MPI2_IEEE32_pSGE_SET_FLAGS_LENGTH(psg, f, l) ((psg)->FlagsLength = \
+ MPI2_IEEE32_SGE_SET_FLAGS_LENGTH(f, l))
+
+/*CAUTION - The following are READ-MODIFY-WRITE! */
+#define MPI2_IEEE32_pSGE_SET_FLAGS(psg, f) ((psg)->FlagsLength |= \
+ MPI2_IEEE32_SGE_SET_FLAGS(f))
+#define MPI2_IEEE32_pSGE_SET_LENGTH(psg, l) ((psg)->FlagsLength |= \
+ MPI2_IEEE32_SGE_LENGTH(l))
+
+/*****************************************************************************
+*
+* Fusion-MPT MPI/IEEE Scatter Gather Unions
+*
+*****************************************************************************/
+
+typedef union _MPI2_SIMPLE_SGE_UNION {
+ MPI2_SGE_SIMPLE_UNION MpiSimple;
+ MPI2_IEEE_SGE_SIMPLE_UNION IeeeSimple;
+} MPI2_SIMPLE_SGE_UNION, *PTR_MPI2_SIMPLE_SGE_UNION,
+ Mpi2SimpleSgeUntion_t, *pMpi2SimpleSgeUntion_t;
+
+typedef union _MPI2_SGE_IO_UNION {
+ MPI2_SGE_SIMPLE_UNION MpiSimple;
+ MPI2_SGE_CHAIN_UNION MpiChain;
+ MPI2_IEEE_SGE_SIMPLE_UNION IeeeSimple;
+ MPI2_IEEE_SGE_CHAIN_UNION IeeeChain;
+} MPI2_SGE_IO_UNION, *PTR_MPI2_SGE_IO_UNION,
+ Mpi2SGEIOUnion_t, *pMpi2SGEIOUnion_t;
+
+/****************************************************************************
+*
+* Values for SGLFlags field, used in many request messages with an SGL
+*
+****************************************************************************/
+
+/*values for MPI SGL Data Location Address Space subfield */
+#define MPI2_SGLFLAGS_ADDRESS_SPACE_MASK (0x0C)
+#define MPI2_SGLFLAGS_SYSTEM_ADDRESS_SPACE (0x00)
+#define MPI2_SGLFLAGS_IOCDDR_ADDRESS_SPACE (0x04)
+#define MPI2_SGLFLAGS_IOCPLB_ADDRESS_SPACE (0x08)
+#define MPI2_SGLFLAGS_IOCPLBNTA_ADDRESS_SPACE (0x0C)
+/*values for SGL Type subfield */
+#define MPI2_SGLFLAGS_SGL_TYPE_MASK (0x03)
+#define MPI2_SGLFLAGS_SGL_TYPE_MPI (0x00)
+#define MPI2_SGLFLAGS_SGL_TYPE_IEEE32 (0x01)
+#define MPI2_SGLFLAGS_SGL_TYPE_IEEE64 (0x02)
+
+#endif
diff --git a/drivers/scsi/mpt3sas/mpi/mpi2_cnfg.h b/drivers/scsi/mpt3sas/mpi/mpi2_cnfg.h
new file mode 100644
index 000000000..e261a3153
--- /dev/null
+++ b/drivers/scsi/mpt3sas/mpi/mpi2_cnfg.h
@@ -0,0 +1,3344 @@
+/*
+ * Copyright (c) 2000-2014 LSI Corporation.
+ *
+ *
+ * Name: mpi2_cnfg.h
+ * Title: MPI Configuration messages and pages
+ * Creation Date: November 10, 2006
+ *
+ * mpi2_cnfg.h Version: 02.00.26
+ *
+ * NOTE: Names (typedefs, defines, etc.) beginning with an MPI25 or Mpi25
+ * prefix are for use only on MPI v2.5 products, and must not be used
+ * with MPI v2.0 products. Unless otherwise noted, names beginning with
+ * MPI2 or Mpi2 are for use with both MPI v2.0 and MPI v2.5 products.
+ *
+ * Version History
+ * ---------------
+ *
+ * Date Version Description
+ * -------- -------- ------------------------------------------------------
+ * 04-30-07 02.00.00 Corresponds to Fusion-MPT MPI Specification Rev A.
+ * 06-04-07 02.00.01 Added defines for SAS IO Unit Page 2 PhyFlags.
+ * Added Manufacturing Page 11.
+ * Added MPI2_SAS_EXPANDER0_FLAGS_CONNECTOR_END_DEVICE
+ * define.
+ * 06-26-07 02.00.02 Adding generic structure for product-specific
+ * Manufacturing pages: MPI2_CONFIG_PAGE_MANUFACTURING_PS.
+ * Rework of BIOS Page 2 configuration page.
+ * Fixed MPI2_BIOSPAGE2_BOOT_DEVICE to be a union of the
+ * forms.
+ * Added configuration pages IOC Page 8 and Driver
+ * Persistent Mapping Page 0.
+ * 08-31-07 02.00.03 Modified configuration pages dealing with Integrated
+ * RAID (Manufacturing Page 4, RAID Volume Pages 0 and 1,
+ * RAID Physical Disk Pages 0 and 1, RAID Configuration
+ * Page 0).
+ * Added new value for AccessStatus field of SAS Device
+ * Page 0 (_SATA_NEEDS_INITIALIZATION).
+ * 10-31-07 02.00.04 Added missing SEPDevHandle field to
+ * MPI2_CONFIG_PAGE_SAS_ENCLOSURE_0.
+ * 12-18-07 02.00.05 Modified IO Unit Page 0 to use 32-bit version fields for
+ * NVDATA.
+ * Modified IOC Page 7 to use masks and added field for
+ * SASBroadcastPrimitiveMasks.
+ * Added MPI2_CONFIG_PAGE_BIOS_4.
+ * Added MPI2_CONFIG_PAGE_LOG_0.
+ * 02-29-08 02.00.06 Modified various names to make them 32-character unique.
+ * Added SAS Device IDs.
+ * Updated Integrated RAID configuration pages including
+ * Manufacturing Page 4, IOC Page 6, and RAID Configuration
+ * Page 0.
+ * 05-21-08 02.00.07 Added define MPI2_MANPAGE4_MIX_SSD_SAS_SATA.
+ * Added define MPI2_MANPAGE4_PHYSDISK_128MB_COERCION.
+ * Fixed define MPI2_IOCPAGE8_FLAGS_ENCLOSURE_SLOT_MAPPING.
+ * Added missing MaxNumRoutedSasAddresses field to
+ * MPI2_CONFIG_PAGE_EXPANDER_0.
+ * Added SAS Port Page 0.
+ * Modified structure layout for
+ * MPI2_CONFIG_PAGE_DRIVER_MAPPING_0.
+ * 06-27-08 02.00.08 Changed MPI2_CONFIG_PAGE_RD_PDISK_1 to use
+ * MPI2_RAID_PHYS_DISK1_PATH_MAX to size the array.
+ * 10-02-08 02.00.09 Changed MPI2_RAID_PGAD_CONFIGNUM_MASK from 0x0000FFFF
+ * to 0x000000FF.
+ * Added two new values for the Physical Disk Coercion Size
+ * bits in the Flags field of Manufacturing Page 4.
+ * Added product-specific Manufacturing pages 16 to 31.
+ * Modified Flags bits for controlling write cache on SATA
+ * drives in IO Unit Page 1.
+ * Added new bit to AdditionalControlFlags of SAS IO Unit
+ * Page 1 to control Invalid Topology Correction.
+ * Added additional defines for RAID Volume Page 0
+ * VolumeStatusFlags field.
+ * Modified meaning of RAID Volume Page 0 VolumeSettings
+ * define for auto-configure of hot-swap drives.
+ * Added SupportedPhysDisks field to RAID Volume Page 1 and
+ * added related defines.
+ * Added PhysDiskAttributes field (and related defines) to
+ * RAID Physical Disk Page 0.
+ * Added MPI2_SAS_PHYINFO_PHY_VACANT define.
+ * Added three new DiscoveryStatus bits for SAS IO Unit
+ * Page 0 and SAS Expander Page 0.
+ * Removed multiplexing information from SAS IO Unit pages.
+ * Added BootDeviceWaitTime field to SAS IO Unit Page 4.
+ * Removed Zone Address Resolved bit from PhyInfo and from
+ * Expander Page 0 Flags field.
+ * Added two new AccessStatus values to SAS Device Page 0
+ * for indicating routing problems. Added 3 reserved words
+ * to this page.
+ * 01-19-09 02.00.10 Fixed defines for GPIOVal field of IO Unit Page 3.
+ * Inserted missing reserved field into structure for IOC
+ * Page 6.
+ * Added more pending task bits to RAID Volume Page 0
+ * VolumeStatusFlags defines.
+ * Added MPI2_PHYSDISK0_STATUS_FLAG_NOT_CERTIFIED define.
+ * Added a new DiscoveryStatus bit for SAS IO Unit Page 0
+ * and SAS Expander Page 0 to flag a downstream initiator
+ * when in simplified routing mode.
+ * Removed SATA Init Failure defines for DiscoveryStatus
+ * fields of SAS IO Unit Page 0 and SAS Expander Page 0.
+ * Added MPI2_SAS_DEVICE0_ASTATUS_DEVICE_BLOCKED define.
+ * Added PortGroups, DmaGroup, and ControlGroup fields to
+ * SAS Device Page 0.
+ * 05-06-09 02.00.11 Added structures and defines for IO Unit Page 5 and IO
+ * Unit Page 6.
+ * Added expander reduced functionality data to SAS
+ * Expander Page 0.
+ * Added SAS PHY Page 2 and SAS PHY Page 3.
+ * 07-30-09 02.00.12 Added IO Unit Page 7.
+ * Added new device ids.
+ * Added SAS IO Unit Page 5.
+ * Added partial and slumber power management capable flags
+ * to SAS Device Page 0 Flags field.
+ * Added PhyInfo defines for power condition.
+ * Added Ethernet configuration pages.
+ * 10-28-09 02.00.13 Added MPI2_IOUNITPAGE1_ENABLE_HOST_BASED_DISCOVERY.
+ * Added SAS PHY Page 4 structure and defines.
+ * 02-10-10 02.00.14 Modified the comments for the configuration page
+ * structures that contain an array of data. The host
+ * should use the "count" field in the page data (e.g. the
+ * NumPhys field) to determine the number of valid elements
+ * in the array.
+ * Added/modified some MPI2_MFGPAGE_DEVID_SAS defines.
+ * Added PowerManagementCapabilities to IO Unit Page 7.
+ * Added PortWidthModGroup field to
+ * MPI2_SAS_IO_UNIT5_PHY_PM_SETTINGS.
+ * Added MPI2_CONFIG_PAGE_SASIOUNIT_6 and related defines.
+ * Added MPI2_CONFIG_PAGE_SASIOUNIT_7 and related defines.
+ * Added MPI2_CONFIG_PAGE_SASIOUNIT_8 and related defines.
+ * 05-12-10 02.00.15 Added MPI2_RAIDVOL0_STATUS_FLAG_VOL_NOT_CONSISTENT
+ * define.
+ * Added MPI2_PHYSDISK0_INCOMPATIBLE_MEDIA_TYPE define.
+ * Added MPI2_SAS_NEG_LINK_RATE_UNSUPPORTED_PHY define.
+ * 08-11-10 02.00.16 Removed IO Unit Page 1 device path (multi-pathing)
+ * defines.
+ * 11-10-10 02.00.17 Added ReceptacleID field (replacing Reserved1) to
+ * MPI2_MANPAGE7_CONNECTOR_INFO and reworked defines for
+ * the Pinout field.
+ * Added BoardTemperature and BoardTemperatureUnits fields
+ * to MPI2_CONFIG_PAGE_IO_UNIT_7.
+ * Added MPI2_CONFIG_EXTPAGETYPE_EXT_MANUFACTURING define
+ * and MPI2_CONFIG_PAGE_EXT_MAN_PS structure.
+ * 02-23-11 02.00.18 Added ProxyVF_ID field to MPI2_CONFIG_REQUEST.
+ * Added IO Unit Page 8, IO Unit Page 9,
+ * and IO Unit Page 10.
+ * Added SASNotifyPrimitiveMasks field to
+ * MPI2_CONFIG_PAGE_IOC_7.
+ * 03-09-11 02.00.19 Fixed IO Unit Page 10 (to match the spec).
+ * 05-25-11 02.00.20 Cleaned up a few comments.
+ * 08-24-11 02.00.21 Marked the IO Unit Page 7 PowerManagementCapabilities
+ * for PCIe link as obsolete.
+ * Added SpinupFlags field containing a Disable Spin-up bit
+ * to the MPI2_SAS_IOUNIT4_SPINUP_GROUP fields of SAS IO
+ * Unit Page 4.
+ * 11-18-11 02.00.22 Added define MPI2_IOCPAGE6_CAP_FLAGS_4K_SECTORS_SUPPORT.
+ * Added UEFIVersion field to BIOS Page 1 and defined new
+ * BiosOptions bits.
+ * Incorporating additions for MPI v2.5.
+ * 11-27-12 02.00.23 Added MPI2_MANPAGE7_FLAG_EVENTREPLAY_SLOT_ORDER.
+ * Added MPI2_BIOSPAGE1_OPTIONS_MASK_OEM_ID.
+ * 12-20-12 02.00.24 Marked MPI2_SASIOUNIT1_CONTROL_CLEAR_AFFILIATION as
+ * obsolete for MPI v2.5 and later.
+ * Added some defines for 12G SAS speeds.
+ * 04-09-13 02.00.25 Added MPI2_IOUNITPAGE1_ATA_SECURITY_FREEZE_LOCK.
+ * Fixed MPI2_IOUNITPAGE5_DMA_CAP_MASK_MAX_REQUESTS to
+ * match the specification.
+ * 08-19-13 02.00.26 Added reserved words to MPI2_CONFIG_PAGE_IO_UNIT_7 for
+ * future use.
+ * --------------------------------------------------------------------------
+ */
+
+#ifndef MPI2_CNFG_H
+#define MPI2_CNFG_H
+
+/*****************************************************************************
+* Configuration Page Header and defines
+*****************************************************************************/
+
+/*Config Page Header */
+typedef struct _MPI2_CONFIG_PAGE_HEADER {
+ U8 PageVersion; /*0x00 */
+ U8 PageLength; /*0x01 */
+ U8 PageNumber; /*0x02 */
+ U8 PageType; /*0x03 */
+} MPI2_CONFIG_PAGE_HEADER, *PTR_MPI2_CONFIG_PAGE_HEADER,
+ Mpi2ConfigPageHeader_t, *pMpi2ConfigPageHeader_t;
+
+typedef union _MPI2_CONFIG_PAGE_HEADER_UNION {
+ MPI2_CONFIG_PAGE_HEADER Struct;
+ U8 Bytes[4];
+ U16 Word16[2];
+ U32 Word32;
+} MPI2_CONFIG_PAGE_HEADER_UNION, *PTR_MPI2_CONFIG_PAGE_HEADER_UNION,
+ Mpi2ConfigPageHeaderUnion, *pMpi2ConfigPageHeaderUnion;
+
+/*Extended Config Page Header */
+typedef struct _MPI2_CONFIG_EXTENDED_PAGE_HEADER {
+ U8 PageVersion; /*0x00 */
+ U8 Reserved1; /*0x01 */
+ U8 PageNumber; /*0x02 */
+ U8 PageType; /*0x03 */
+ U16 ExtPageLength; /*0x04 */
+ U8 ExtPageType; /*0x06 */
+ U8 Reserved2; /*0x07 */
+} MPI2_CONFIG_EXTENDED_PAGE_HEADER,
+ *PTR_MPI2_CONFIG_EXTENDED_PAGE_HEADER,
+ Mpi2ConfigExtendedPageHeader_t,
+ *pMpi2ConfigExtendedPageHeader_t;
+
+typedef union _MPI2_CONFIG_EXT_PAGE_HEADER_UNION {
+ MPI2_CONFIG_PAGE_HEADER Struct;
+ MPI2_CONFIG_EXTENDED_PAGE_HEADER Ext;
+ U8 Bytes[8];
+ U16 Word16[4];
+ U32 Word32[2];
+} MPI2_CONFIG_EXT_PAGE_HEADER_UNION,
+ *PTR_MPI2_CONFIG_EXT_PAGE_HEADER_UNION,
+ Mpi2ConfigPageExtendedHeaderUnion,
+ *pMpi2ConfigPageExtendedHeaderUnion;
+
+
+/*PageType field values */
+#define MPI2_CONFIG_PAGEATTR_READ_ONLY (0x00)
+#define MPI2_CONFIG_PAGEATTR_CHANGEABLE (0x10)
+#define MPI2_CONFIG_PAGEATTR_PERSISTENT (0x20)
+#define MPI2_CONFIG_PAGEATTR_MASK (0xF0)
+
+#define MPI2_CONFIG_PAGETYPE_IO_UNIT (0x00)
+#define MPI2_CONFIG_PAGETYPE_IOC (0x01)
+#define MPI2_CONFIG_PAGETYPE_BIOS (0x02)
+#define MPI2_CONFIG_PAGETYPE_RAID_VOLUME (0x08)
+#define MPI2_CONFIG_PAGETYPE_MANUFACTURING (0x09)
+#define MPI2_CONFIG_PAGETYPE_RAID_PHYSDISK (0x0A)
+#define MPI2_CONFIG_PAGETYPE_EXTENDED (0x0F)
+#define MPI2_CONFIG_PAGETYPE_MASK (0x0F)
+
+#define MPI2_CONFIG_TYPENUM_MASK (0x0FFF)
+
+
+/*ExtPageType field values */
+#define MPI2_CONFIG_EXTPAGETYPE_SAS_IO_UNIT (0x10)
+#define MPI2_CONFIG_EXTPAGETYPE_SAS_EXPANDER (0x11)
+#define MPI2_CONFIG_EXTPAGETYPE_SAS_DEVICE (0x12)
+#define MPI2_CONFIG_EXTPAGETYPE_SAS_PHY (0x13)
+#define MPI2_CONFIG_EXTPAGETYPE_LOG (0x14)
+#define MPI2_CONFIG_EXTPAGETYPE_ENCLOSURE (0x15)
+#define MPI2_CONFIG_EXTPAGETYPE_RAID_CONFIG (0x16)
+#define MPI2_CONFIG_EXTPAGETYPE_DRIVER_MAPPING (0x17)
+#define MPI2_CONFIG_EXTPAGETYPE_SAS_PORT (0x18)
+#define MPI2_CONFIG_EXTPAGETYPE_ETHERNET (0x19)
+#define MPI2_CONFIG_EXTPAGETYPE_EXT_MANUFACTURING (0x1A)
+
+
+/*****************************************************************************
+* PageAddress defines
+*****************************************************************************/
+
+/*RAID Volume PageAddress format */
+#define MPI2_RAID_VOLUME_PGAD_FORM_MASK (0xF0000000)
+#define MPI2_RAID_VOLUME_PGAD_FORM_GET_NEXT_HANDLE (0x00000000)
+#define MPI2_RAID_VOLUME_PGAD_FORM_HANDLE (0x10000000)
+
+#define MPI2_RAID_VOLUME_PGAD_HANDLE_MASK (0x0000FFFF)
+
+
+/*RAID Physical Disk PageAddress format */
+#define MPI2_PHYSDISK_PGAD_FORM_MASK (0xF0000000)
+#define MPI2_PHYSDISK_PGAD_FORM_GET_NEXT_PHYSDISKNUM (0x00000000)
+#define MPI2_PHYSDISK_PGAD_FORM_PHYSDISKNUM (0x10000000)
+#define MPI2_PHYSDISK_PGAD_FORM_DEVHANDLE (0x20000000)
+
+#define MPI2_PHYSDISK_PGAD_PHYSDISKNUM_MASK (0x000000FF)
+#define MPI2_PHYSDISK_PGAD_DEVHANDLE_MASK (0x0000FFFF)
+
+
+/*SAS Expander PageAddress format */
+#define MPI2_SAS_EXPAND_PGAD_FORM_MASK (0xF0000000)
+#define MPI2_SAS_EXPAND_PGAD_FORM_GET_NEXT_HNDL (0x00000000)
+#define MPI2_SAS_EXPAND_PGAD_FORM_HNDL_PHY_NUM (0x10000000)
+#define MPI2_SAS_EXPAND_PGAD_FORM_HNDL (0x20000000)
+
+#define MPI2_SAS_EXPAND_PGAD_HANDLE_MASK (0x0000FFFF)
+#define MPI2_SAS_EXPAND_PGAD_PHYNUM_MASK (0x00FF0000)
+#define MPI2_SAS_EXPAND_PGAD_PHYNUM_SHIFT (16)
+
+
+/*SAS Device PageAddress format */
+#define MPI2_SAS_DEVICE_PGAD_FORM_MASK (0xF0000000)
+#define MPI2_SAS_DEVICE_PGAD_FORM_GET_NEXT_HANDLE (0x00000000)
+#define MPI2_SAS_DEVICE_PGAD_FORM_HANDLE (0x20000000)
+
+#define MPI2_SAS_DEVICE_PGAD_HANDLE_MASK (0x0000FFFF)
+
+
+/*SAS PHY PageAddress format */
+#define MPI2_SAS_PHY_PGAD_FORM_MASK (0xF0000000)
+#define MPI2_SAS_PHY_PGAD_FORM_PHY_NUMBER (0x00000000)
+#define MPI2_SAS_PHY_PGAD_FORM_PHY_TBL_INDEX (0x10000000)
+
+#define MPI2_SAS_PHY_PGAD_PHY_NUMBER_MASK (0x000000FF)
+#define MPI2_SAS_PHY_PGAD_PHY_TBL_INDEX_MASK (0x0000FFFF)
+
+
+/*SAS Port PageAddress format */
+#define MPI2_SASPORT_PGAD_FORM_MASK (0xF0000000)
+#define MPI2_SASPORT_PGAD_FORM_GET_NEXT_PORT (0x00000000)
+#define MPI2_SASPORT_PGAD_FORM_PORT_NUM (0x10000000)
+
+#define MPI2_SASPORT_PGAD_PORTNUMBER_MASK (0x00000FFF)
+
+
+/*SAS Enclosure PageAddress format */
+#define MPI2_SAS_ENCLOS_PGAD_FORM_MASK (0xF0000000)
+#define MPI2_SAS_ENCLOS_PGAD_FORM_GET_NEXT_HANDLE (0x00000000)
+#define MPI2_SAS_ENCLOS_PGAD_FORM_HANDLE (0x10000000)
+
+#define MPI2_SAS_ENCLOS_PGAD_HANDLE_MASK (0x0000FFFF)
+
+
+/*RAID Configuration PageAddress format */
+#define MPI2_RAID_PGAD_FORM_MASK (0xF0000000)
+#define MPI2_RAID_PGAD_FORM_GET_NEXT_CONFIGNUM (0x00000000)
+#define MPI2_RAID_PGAD_FORM_CONFIGNUM (0x10000000)
+#define MPI2_RAID_PGAD_FORM_ACTIVE_CONFIG (0x20000000)
+
+#define MPI2_RAID_PGAD_CONFIGNUM_MASK (0x000000FF)
+
+
+/*Driver Persistent Mapping PageAddress format */
+#define MPI2_DPM_PGAD_FORM_MASK (0xF0000000)
+#define MPI2_DPM_PGAD_FORM_ENTRY_RANGE (0x00000000)
+
+#define MPI2_DPM_PGAD_ENTRY_COUNT_MASK (0x0FFF0000)
+#define MPI2_DPM_PGAD_ENTRY_COUNT_SHIFT (16)
+#define MPI2_DPM_PGAD_START_ENTRY_MASK (0x0000FFFF)
+
+
+/*Ethernet PageAddress format */
+#define MPI2_ETHERNET_PGAD_FORM_MASK (0xF0000000)
+#define MPI2_ETHERNET_PGAD_FORM_IF_NUM (0x00000000)
+
+#define MPI2_ETHERNET_PGAD_IF_NUMBER_MASK (0x000000FF)
+
+
+
+/****************************************************************************
+* Configuration messages
+****************************************************************************/
+
+/*Configuration Request Message */
+typedef struct _MPI2_CONFIG_REQUEST {
+ U8 Action; /*0x00 */
+ U8 SGLFlags; /*0x01 */
+ U8 ChainOffset; /*0x02 */
+ U8 Function; /*0x03 */
+ U16 ExtPageLength; /*0x04 */
+ U8 ExtPageType; /*0x06 */
+ U8 MsgFlags; /*0x07 */
+ U8 VP_ID; /*0x08 */
+ U8 VF_ID; /*0x09 */
+ U16 Reserved1; /*0x0A */
+ U8 Reserved2; /*0x0C */
+ U8 ProxyVF_ID; /*0x0D */
+ U16 Reserved4; /*0x0E */
+ U32 Reserved3; /*0x10 */
+ MPI2_CONFIG_PAGE_HEADER Header; /*0x14 */
+ U32 PageAddress; /*0x18 */
+ MPI2_SGE_IO_UNION PageBufferSGE; /*0x1C */
+} MPI2_CONFIG_REQUEST, *PTR_MPI2_CONFIG_REQUEST,
+ Mpi2ConfigRequest_t, *pMpi2ConfigRequest_t;
+
+/*values for the Action field */
+#define MPI2_CONFIG_ACTION_PAGE_HEADER (0x00)
+#define MPI2_CONFIG_ACTION_PAGE_READ_CURRENT (0x01)
+#define MPI2_CONFIG_ACTION_PAGE_WRITE_CURRENT (0x02)
+#define MPI2_CONFIG_ACTION_PAGE_DEFAULT (0x03)
+#define MPI2_CONFIG_ACTION_PAGE_WRITE_NVRAM (0x04)
+#define MPI2_CONFIG_ACTION_PAGE_READ_DEFAULT (0x05)
+#define MPI2_CONFIG_ACTION_PAGE_READ_NVRAM (0x06)
+#define MPI2_CONFIG_ACTION_PAGE_GET_CHANGEABLE (0x07)
+
+/*use MPI2_SGLFLAGS_ defines from mpi2.h for the SGLFlags field */
+
+
+/*Config Reply Message */
+typedef struct _MPI2_CONFIG_REPLY {
+ U8 Action; /*0x00 */
+ U8 SGLFlags; /*0x01 */
+ U8 MsgLength; /*0x02 */
+ U8 Function; /*0x03 */
+ U16 ExtPageLength; /*0x04 */
+ U8 ExtPageType; /*0x06 */
+ U8 MsgFlags; /*0x07 */
+ U8 VP_ID; /*0x08 */
+ U8 VF_ID; /*0x09 */
+ U16 Reserved1; /*0x0A */
+ U16 Reserved2; /*0x0C */
+ U16 IOCStatus; /*0x0E */
+ U32 IOCLogInfo; /*0x10 */
+ MPI2_CONFIG_PAGE_HEADER Header; /*0x14 */
+} MPI2_CONFIG_REPLY, *PTR_MPI2_CONFIG_REPLY,
+ Mpi2ConfigReply_t, *pMpi2ConfigReply_t;
+
+
+
+/*****************************************************************************
+*
+* C o n f i g u r a t i o n P a g e s
+*
+*****************************************************************************/
+
+/****************************************************************************
+* Manufacturing Config pages
+****************************************************************************/
+
+#define MPI2_MFGPAGE_VENDORID_LSI (0x1000)
+
+/*MPI v2.0 SAS products */
+#define MPI2_MFGPAGE_DEVID_SAS2004 (0x0070)
+#define MPI2_MFGPAGE_DEVID_SAS2008 (0x0072)
+#define MPI2_MFGPAGE_DEVID_SAS2108_1 (0x0074)
+#define MPI2_MFGPAGE_DEVID_SAS2108_2 (0x0076)
+#define MPI2_MFGPAGE_DEVID_SAS2108_3 (0x0077)
+#define MPI2_MFGPAGE_DEVID_SAS2116_1 (0x0064)
+#define MPI2_MFGPAGE_DEVID_SAS2116_2 (0x0065)
+
+#define MPI2_MFGPAGE_DEVID_SSS6200 (0x007E)
+
+#define MPI2_MFGPAGE_DEVID_SAS2208_1 (0x0080)
+#define MPI2_MFGPAGE_DEVID_SAS2208_2 (0x0081)
+#define MPI2_MFGPAGE_DEVID_SAS2208_3 (0x0082)
+#define MPI2_MFGPAGE_DEVID_SAS2208_4 (0x0083)
+#define MPI2_MFGPAGE_DEVID_SAS2208_5 (0x0084)
+#define MPI2_MFGPAGE_DEVID_SAS2208_6 (0x0085)
+#define MPI2_MFGPAGE_DEVID_SAS2308_1 (0x0086)
+#define MPI2_MFGPAGE_DEVID_SAS2308_2 (0x0087)
+#define MPI2_MFGPAGE_DEVID_SAS2308_3 (0x006E)
+
+/*MPI v2.5 SAS products */
+#define MPI25_MFGPAGE_DEVID_SAS3004 (0x0096)
+#define MPI25_MFGPAGE_DEVID_SAS3008 (0x0097)
+#define MPI25_MFGPAGE_DEVID_SAS3108_1 (0x0090)
+#define MPI25_MFGPAGE_DEVID_SAS3108_2 (0x0091)
+#define MPI25_MFGPAGE_DEVID_SAS3108_5 (0x0094)
+#define MPI25_MFGPAGE_DEVID_SAS3108_6 (0x0095)
+
+
+
+
+/*Manufacturing Page 0 */
+
+typedef struct _MPI2_CONFIG_PAGE_MAN_0 {
+ MPI2_CONFIG_PAGE_HEADER Header; /*0x00 */
+ U8 ChipName[16]; /*0x04 */
+ U8 ChipRevision[8]; /*0x14 */
+ U8 BoardName[16]; /*0x1C */
+ U8 BoardAssembly[16]; /*0x2C */
+ U8 BoardTracerNumber[16]; /*0x3C */
+} MPI2_CONFIG_PAGE_MAN_0,
+ *PTR_MPI2_CONFIG_PAGE_MAN_0,
+ Mpi2ManufacturingPage0_t,
+ *pMpi2ManufacturingPage0_t;
+
+#define MPI2_MANUFACTURING0_PAGEVERSION (0x00)
+
+
+/*Manufacturing Page 1 */
+
+typedef struct _MPI2_CONFIG_PAGE_MAN_1 {
+ MPI2_CONFIG_PAGE_HEADER Header; /*0x00 */
+ U8 VPD[256]; /*0x04 */
+} MPI2_CONFIG_PAGE_MAN_1,
+ *PTR_MPI2_CONFIG_PAGE_MAN_1,
+ Mpi2ManufacturingPage1_t,
+ *pMpi2ManufacturingPage1_t;
+
+#define MPI2_MANUFACTURING1_PAGEVERSION (0x00)
+
+
+typedef struct _MPI2_CHIP_REVISION_ID {
+ U16 DeviceID; /*0x00 */
+ U8 PCIRevisionID; /*0x02 */
+ U8 Reserved; /*0x03 */
+} MPI2_CHIP_REVISION_ID, *PTR_MPI2_CHIP_REVISION_ID,
+ Mpi2ChipRevisionId_t, *pMpi2ChipRevisionId_t;
+
+
+/*Manufacturing Page 2 */
+
+/*
+ *Host code (drivers, BIOS, utilities, etc.) should leave this define set to
+ *one and check Header.PageLength at runtime.
+ */
+#ifndef MPI2_MAN_PAGE_2_HW_SETTINGS_WORDS
+#define MPI2_MAN_PAGE_2_HW_SETTINGS_WORDS (1)
+#endif
+
+typedef struct _MPI2_CONFIG_PAGE_MAN_2 {
+ MPI2_CONFIG_PAGE_HEADER Header; /*0x00 */
+ MPI2_CHIP_REVISION_ID ChipId; /*0x04 */
+ U32
+ HwSettings[MPI2_MAN_PAGE_2_HW_SETTINGS_WORDS];/*0x08 */
+} MPI2_CONFIG_PAGE_MAN_2,
+ *PTR_MPI2_CONFIG_PAGE_MAN_2,
+ Mpi2ManufacturingPage2_t,
+ *pMpi2ManufacturingPage2_t;
+
+#define MPI2_MANUFACTURING2_PAGEVERSION (0x00)
+
+
+/*Manufacturing Page 3 */
+
+/*
+ *Host code (drivers, BIOS, utilities, etc.) should leave this define set to
+ *one and check Header.PageLength at runtime.
+ */
+#ifndef MPI2_MAN_PAGE_3_INFO_WORDS
+#define MPI2_MAN_PAGE_3_INFO_WORDS (1)
+#endif
+
+typedef struct _MPI2_CONFIG_PAGE_MAN_3 {
+ MPI2_CONFIG_PAGE_HEADER Header; /*0x00 */
+ MPI2_CHIP_REVISION_ID ChipId; /*0x04 */
+ U32
+ Info[MPI2_MAN_PAGE_3_INFO_WORDS];/*0x08 */
+} MPI2_CONFIG_PAGE_MAN_3,
+ *PTR_MPI2_CONFIG_PAGE_MAN_3,
+ Mpi2ManufacturingPage3_t,
+ *pMpi2ManufacturingPage3_t;
+
+#define MPI2_MANUFACTURING3_PAGEVERSION (0x00)
+
+
+/*Manufacturing Page 4 */
+
+typedef struct _MPI2_MANPAGE4_PWR_SAVE_SETTINGS {
+ U8 PowerSaveFlags; /*0x00 */
+ U8 InternalOperationsSleepTime; /*0x01 */
+ U8 InternalOperationsRunTime; /*0x02 */
+ U8 HostIdleTime; /*0x03 */
+} MPI2_MANPAGE4_PWR_SAVE_SETTINGS,
+ *PTR_MPI2_MANPAGE4_PWR_SAVE_SETTINGS,
+ Mpi2ManPage4PwrSaveSettings_t,
+ *pMpi2ManPage4PwrSaveSettings_t;
+
+/*defines for the PowerSaveFlags field */
+#define MPI2_MANPAGE4_MASK_POWERSAVE_MODE (0x03)
+#define MPI2_MANPAGE4_POWERSAVE_MODE_DISABLED (0x00)
+#define MPI2_MANPAGE4_CUSTOM_POWERSAVE_MODE (0x01)
+#define MPI2_MANPAGE4_FULL_POWERSAVE_MODE (0x02)
+
+typedef struct _MPI2_CONFIG_PAGE_MAN_4 {
+ MPI2_CONFIG_PAGE_HEADER Header; /*0x00 */
+ U32 Reserved1; /*0x04 */
+ U32 Flags; /*0x08 */
+ U8 InquirySize; /*0x0C */
+ U8 Reserved2; /*0x0D */
+ U16 Reserved3; /*0x0E */
+ U8 InquiryData[56]; /*0x10 */
+ U32 RAID0VolumeSettings; /*0x48 */
+ U32 RAID1EVolumeSettings; /*0x4C */
+ U32 RAID1VolumeSettings; /*0x50 */
+ U32 RAID10VolumeSettings; /*0x54 */
+ U32 Reserved4; /*0x58 */
+ U32 Reserved5; /*0x5C */
+ MPI2_MANPAGE4_PWR_SAVE_SETTINGS PowerSaveSettings; /*0x60 */
+ U8 MaxOCEDisks; /*0x64 */
+ U8 ResyncRate; /*0x65 */
+ U16 DataScrubDuration; /*0x66 */
+ U8 MaxHotSpares; /*0x68 */
+ U8 MaxPhysDisksPerVol; /*0x69 */
+ U8 MaxPhysDisks; /*0x6A */
+ U8 MaxVolumes; /*0x6B */
+} MPI2_CONFIG_PAGE_MAN_4,
+ *PTR_MPI2_CONFIG_PAGE_MAN_4,
+ Mpi2ManufacturingPage4_t,
+ *pMpi2ManufacturingPage4_t;
+
+#define MPI2_MANUFACTURING4_PAGEVERSION (0x0A)
+
+/*Manufacturing Page 4 Flags field */
+#define MPI2_MANPAGE4_METADATA_SIZE_MASK (0x00030000)
+#define MPI2_MANPAGE4_METADATA_512MB (0x00000000)
+
+#define MPI2_MANPAGE4_MIX_SSD_SAS_SATA (0x00008000)
+#define MPI2_MANPAGE4_MIX_SSD_AND_NON_SSD (0x00004000)
+#define MPI2_MANPAGE4_HIDE_PHYSDISK_NON_IR (0x00002000)
+
+#define MPI2_MANPAGE4_MASK_PHYSDISK_COERCION (0x00001C00)
+#define MPI2_MANPAGE4_PHYSDISK_COERCION_1GB (0x00000000)
+#define MPI2_MANPAGE4_PHYSDISK_128MB_COERCION (0x00000400)
+#define MPI2_MANPAGE4_PHYSDISK_ADAPTIVE_COERCION (0x00000800)
+#define MPI2_MANPAGE4_PHYSDISK_ZERO_COERCION (0x00000C00)
+
+#define MPI2_MANPAGE4_MASK_BAD_BLOCK_MARKING (0x00000300)
+#define MPI2_MANPAGE4_DEFAULT_BAD_BLOCK_MARKING (0x00000000)
+#define MPI2_MANPAGE4_TABLE_BAD_BLOCK_MARKING (0x00000100)
+#define MPI2_MANPAGE4_WRITE_LONG_BAD_BLOCK_MARKING (0x00000200)
+
+#define MPI2_MANPAGE4_FORCE_OFFLINE_FAILOVER (0x00000080)
+#define MPI2_MANPAGE4_RAID10_DISABLE (0x00000040)
+#define MPI2_MANPAGE4_RAID1E_DISABLE (0x00000020)
+#define MPI2_MANPAGE4_RAID1_DISABLE (0x00000010)
+#define MPI2_MANPAGE4_RAID0_DISABLE (0x00000008)
+#define MPI2_MANPAGE4_IR_MODEPAGE8_DISABLE (0x00000004)
+#define MPI2_MANPAGE4_IM_RESYNC_CACHE_ENABLE (0x00000002)
+#define MPI2_MANPAGE4_IR_NO_MIX_SAS_SATA (0x00000001)
+
+
+/*Manufacturing Page 5 */
+
+/*
+ *Host code (drivers, BIOS, utilities, etc.) should leave this define set to
+ *one and check the value returned for NumPhys at runtime.
+ */
+#ifndef MPI2_MAN_PAGE_5_PHY_ENTRIES
+#define MPI2_MAN_PAGE_5_PHY_ENTRIES (1)
+#endif
+
+typedef struct _MPI2_MANUFACTURING5_ENTRY {
+ U64 WWID; /*0x00 */
+ U64 DeviceName; /*0x08 */
+} MPI2_MANUFACTURING5_ENTRY,
+ *PTR_MPI2_MANUFACTURING5_ENTRY,
+ Mpi2Manufacturing5Entry_t,
+ *pMpi2Manufacturing5Entry_t;
+
+typedef struct _MPI2_CONFIG_PAGE_MAN_5 {
+ MPI2_CONFIG_PAGE_HEADER Header; /*0x00 */
+ U8 NumPhys; /*0x04 */
+ U8 Reserved1; /*0x05 */
+ U16 Reserved2; /*0x06 */
+ U32 Reserved3; /*0x08 */
+ U32 Reserved4; /*0x0C */
+ MPI2_MANUFACTURING5_ENTRY
+ Phy[MPI2_MAN_PAGE_5_PHY_ENTRIES];/*0x08 */
+} MPI2_CONFIG_PAGE_MAN_5,
+ *PTR_MPI2_CONFIG_PAGE_MAN_5,
+ Mpi2ManufacturingPage5_t,
+ *pMpi2ManufacturingPage5_t;
+
+#define MPI2_MANUFACTURING5_PAGEVERSION (0x03)
+
+
+/*Manufacturing Page 6 */
+
+typedef struct _MPI2_CONFIG_PAGE_MAN_6 {
+ MPI2_CONFIG_PAGE_HEADER Header; /*0x00 */
+ U32 ProductSpecificInfo;/*0x04 */
+} MPI2_CONFIG_PAGE_MAN_6,
+ *PTR_MPI2_CONFIG_PAGE_MAN_6,
+ Mpi2ManufacturingPage6_t,
+ *pMpi2ManufacturingPage6_t;
+
+#define MPI2_MANUFACTURING6_PAGEVERSION (0x00)
+
+
+/*Manufacturing Page 7 */
+
+typedef struct _MPI2_MANPAGE7_CONNECTOR_INFO {
+ U32 Pinout; /*0x00 */
+ U8 Connector[16]; /*0x04 */
+ U8 Location; /*0x14 */
+ U8 ReceptacleID; /*0x15 */
+ U16 Slot; /*0x16 */
+ U32 Reserved2; /*0x18 */
+} MPI2_MANPAGE7_CONNECTOR_INFO,
+ *PTR_MPI2_MANPAGE7_CONNECTOR_INFO,
+ Mpi2ManPage7ConnectorInfo_t,
+ *pMpi2ManPage7ConnectorInfo_t;
+
+/*defines for the Pinout field */
+#define MPI2_MANPAGE7_PINOUT_LANE_MASK (0x0000FF00)
+#define MPI2_MANPAGE7_PINOUT_LANE_SHIFT (8)
+
+#define MPI2_MANPAGE7_PINOUT_TYPE_MASK (0x000000FF)
+#define MPI2_MANPAGE7_PINOUT_TYPE_UNKNOWN (0x00)
+#define MPI2_MANPAGE7_PINOUT_SATA_SINGLE (0x01)
+#define MPI2_MANPAGE7_PINOUT_SFF_8482 (0x02)
+#define MPI2_MANPAGE7_PINOUT_SFF_8486 (0x03)
+#define MPI2_MANPAGE7_PINOUT_SFF_8484 (0x04)
+#define MPI2_MANPAGE7_PINOUT_SFF_8087 (0x05)
+#define MPI2_MANPAGE7_PINOUT_SFF_8643_4I (0x06)
+#define MPI2_MANPAGE7_PINOUT_SFF_8643_8I (0x07)
+#define MPI2_MANPAGE7_PINOUT_SFF_8470 (0x08)
+#define MPI2_MANPAGE7_PINOUT_SFF_8088 (0x09)
+#define MPI2_MANPAGE7_PINOUT_SFF_8644_4X (0x0A)
+#define MPI2_MANPAGE7_PINOUT_SFF_8644_8X (0x0B)
+#define MPI2_MANPAGE7_PINOUT_SFF_8644_16X (0x0C)
+#define MPI2_MANPAGE7_PINOUT_SFF_8436 (0x0D)
+
+/*defines for the Location field */
+#define MPI2_MANPAGE7_LOCATION_UNKNOWN (0x01)
+#define MPI2_MANPAGE7_LOCATION_INTERNAL (0x02)
+#define MPI2_MANPAGE7_LOCATION_EXTERNAL (0x04)
+#define MPI2_MANPAGE7_LOCATION_SWITCHABLE (0x08)
+#define MPI2_MANPAGE7_LOCATION_AUTO (0x10)
+#define MPI2_MANPAGE7_LOCATION_NOT_PRESENT (0x20)
+#define MPI2_MANPAGE7_LOCATION_NOT_CONNECTED (0x80)
+
+/*
+ *Host code (drivers, BIOS, utilities, etc.) should leave this define set to
+ *one and check the value returned for NumPhys at runtime.
+ */
+#ifndef MPI2_MANPAGE7_CONNECTOR_INFO_MAX
+#define MPI2_MANPAGE7_CONNECTOR_INFO_MAX (1)
+#endif
+
+typedef struct _MPI2_CONFIG_PAGE_MAN_7 {
+ MPI2_CONFIG_PAGE_HEADER Header; /*0x00 */
+ U32 Reserved1; /*0x04 */
+ U32 Reserved2; /*0x08 */
+ U32 Flags; /*0x0C */
+ U8 EnclosureName[16]; /*0x10 */
+ U8 NumPhys; /*0x20 */
+ U8 Reserved3; /*0x21 */
+ U16 Reserved4; /*0x22 */
+ MPI2_MANPAGE7_CONNECTOR_INFO
+ ConnectorInfo[MPI2_MANPAGE7_CONNECTOR_INFO_MAX]; /*0x24 */
+} MPI2_CONFIG_PAGE_MAN_7,
+ *PTR_MPI2_CONFIG_PAGE_MAN_7,
+ Mpi2ManufacturingPage7_t,
+ *pMpi2ManufacturingPage7_t;
+
+#define MPI2_MANUFACTURING7_PAGEVERSION (0x01)
+
+/*defines for the Flags field */
+#define MPI2_MANPAGE7_FLAG_EVENTREPLAY_SLOT_ORDER (0x00000002)
+#define MPI2_MANPAGE7_FLAG_USE_SLOT_INFO (0x00000001)
+
+
+/*
+ *Generic structure to use for product-specific manufacturing pages
+ *(currently Manufacturing Page 8 through Manufacturing Page 31).
+ */
+
+typedef struct _MPI2_CONFIG_PAGE_MAN_PS {
+ MPI2_CONFIG_PAGE_HEADER Header; /*0x00 */
+ U32 ProductSpecificInfo;/*0x04 */
+} MPI2_CONFIG_PAGE_MAN_PS,
+ *PTR_MPI2_CONFIG_PAGE_MAN_PS,
+ Mpi2ManufacturingPagePS_t,
+ *pMpi2ManufacturingPagePS_t;
+
+#define MPI2_MANUFACTURING8_PAGEVERSION (0x00)
+#define MPI2_MANUFACTURING9_PAGEVERSION (0x00)
+#define MPI2_MANUFACTURING10_PAGEVERSION (0x00)
+#define MPI2_MANUFACTURING11_PAGEVERSION (0x00)
+#define MPI2_MANUFACTURING12_PAGEVERSION (0x00)
+#define MPI2_MANUFACTURING13_PAGEVERSION (0x00)
+#define MPI2_MANUFACTURING14_PAGEVERSION (0x00)
+#define MPI2_MANUFACTURING15_PAGEVERSION (0x00)
+#define MPI2_MANUFACTURING16_PAGEVERSION (0x00)
+#define MPI2_MANUFACTURING17_PAGEVERSION (0x00)
+#define MPI2_MANUFACTURING18_PAGEVERSION (0x00)
+#define MPI2_MANUFACTURING19_PAGEVERSION (0x00)
+#define MPI2_MANUFACTURING20_PAGEVERSION (0x00)
+#define MPI2_MANUFACTURING21_PAGEVERSION (0x00)
+#define MPI2_MANUFACTURING22_PAGEVERSION (0x00)
+#define MPI2_MANUFACTURING23_PAGEVERSION (0x00)
+#define MPI2_MANUFACTURING24_PAGEVERSION (0x00)
+#define MPI2_MANUFACTURING25_PAGEVERSION (0x00)
+#define MPI2_MANUFACTURING26_PAGEVERSION (0x00)
+#define MPI2_MANUFACTURING27_PAGEVERSION (0x00)
+#define MPI2_MANUFACTURING28_PAGEVERSION (0x00)
+#define MPI2_MANUFACTURING29_PAGEVERSION (0x00)
+#define MPI2_MANUFACTURING30_PAGEVERSION (0x00)
+#define MPI2_MANUFACTURING31_PAGEVERSION (0x00)
+
+
+/****************************************************************************
+* IO Unit Config Pages
+****************************************************************************/
+
+/*IO Unit Page 0 */
+
+typedef struct _MPI2_CONFIG_PAGE_IO_UNIT_0 {
+ MPI2_CONFIG_PAGE_HEADER Header; /*0x00 */
+ U64 UniqueValue; /*0x04 */
+ MPI2_VERSION_UNION NvdataVersionDefault; /*0x08 */
+ MPI2_VERSION_UNION NvdataVersionPersistent; /*0x0A */
+} MPI2_CONFIG_PAGE_IO_UNIT_0,
+ *PTR_MPI2_CONFIG_PAGE_IO_UNIT_0,
+ Mpi2IOUnitPage0_t, *pMpi2IOUnitPage0_t;
+
+#define MPI2_IOUNITPAGE0_PAGEVERSION (0x02)
+
+
+/*IO Unit Page 1 */
+
+typedef struct _MPI2_CONFIG_PAGE_IO_UNIT_1 {
+ MPI2_CONFIG_PAGE_HEADER Header; /*0x00 */
+ U32 Flags; /*0x04 */
+} MPI2_CONFIG_PAGE_IO_UNIT_1,
+ *PTR_MPI2_CONFIG_PAGE_IO_UNIT_1,
+ Mpi2IOUnitPage1_t, *pMpi2IOUnitPage1_t;
+
+#define MPI2_IOUNITPAGE1_PAGEVERSION (0x04)
+
+/*IO Unit Page 1 Flags defines */
+#define MPI2_IOUNITPAGE1_ATA_SECURITY_FREEZE_LOCK (0x00004000)
+#define MPI25_IOUNITPAGE1_NEW_DEVICE_FAST_PATH_DISABLE (0x00002000)
+#define MPI25_IOUNITPAGE1_DISABLE_FAST_PATH (0x00001000)
+#define MPI2_IOUNITPAGE1_ENABLE_HOST_BASED_DISCOVERY (0x00000800)
+#define MPI2_IOUNITPAGE1_MASK_SATA_WRITE_CACHE (0x00000600)
+#define MPI2_IOUNITPAGE1_SATA_WRITE_CACHE_SHIFT (9)
+#define MPI2_IOUNITPAGE1_ENABLE_SATA_WRITE_CACHE (0x00000000)
+#define MPI2_IOUNITPAGE1_DISABLE_SATA_WRITE_CACHE (0x00000200)
+#define MPI2_IOUNITPAGE1_UNCHANGED_SATA_WRITE_CACHE (0x00000400)
+#define MPI2_IOUNITPAGE1_NATIVE_COMMAND_Q_DISABLE (0x00000100)
+#define MPI2_IOUNITPAGE1_DISABLE_IR (0x00000040)
+#define MPI2_IOUNITPAGE1_DISABLE_TASK_SET_FULL_HANDLING (0x00000020)
+#define MPI2_IOUNITPAGE1_IR_USE_STATIC_VOLUME_ID (0x00000004)
+
+
+/*IO Unit Page 3 */
+
+/*
+ *Host code (drivers, BIOS, utilities, etc.) should leave this define set to
+ *one and check the value returned for GPIOCount at runtime.
+ */
+#ifndef MPI2_IO_UNIT_PAGE_3_GPIO_VAL_MAX
+#define MPI2_IO_UNIT_PAGE_3_GPIO_VAL_MAX (1)
+#endif
+
+typedef struct _MPI2_CONFIG_PAGE_IO_UNIT_3 {
+ MPI2_CONFIG_PAGE_HEADER Header; /*0x00 */
+ U8 GPIOCount; /*0x04 */
+ U8 Reserved1; /*0x05 */
+ U16 Reserved2; /*0x06 */
+ U16
+ GPIOVal[MPI2_IO_UNIT_PAGE_3_GPIO_VAL_MAX];/*0x08 */
+} MPI2_CONFIG_PAGE_IO_UNIT_3,
+ *PTR_MPI2_CONFIG_PAGE_IO_UNIT_3,
+ Mpi2IOUnitPage3_t, *pMpi2IOUnitPage3_t;
+
+#define MPI2_IOUNITPAGE3_PAGEVERSION (0x01)
+
+/*defines for IO Unit Page 3 GPIOVal field */
+#define MPI2_IOUNITPAGE3_GPIO_FUNCTION_MASK (0xFFFC)
+#define MPI2_IOUNITPAGE3_GPIO_FUNCTION_SHIFT (2)
+#define MPI2_IOUNITPAGE3_GPIO_SETTING_OFF (0x0000)
+#define MPI2_IOUNITPAGE3_GPIO_SETTING_ON (0x0001)
+
+
+/*IO Unit Page 5 */
+
+/*
+ *Upper layer code (drivers, utilities, etc.) should leave this define set to
+ *one and check the value returned for NumDmaEngines at runtime.
+ */
+#ifndef MPI2_IOUNITPAGE5_DMAENGINE_ENTRIES
+#define MPI2_IOUNITPAGE5_DMAENGINE_ENTRIES (1)
+#endif
+
+typedef struct _MPI2_CONFIG_PAGE_IO_UNIT_5 {
+ MPI2_CONFIG_PAGE_HEADER Header; /*0x00 */
+ U64
+ RaidAcceleratorBufferBaseAddress; /*0x04 */
+ U64
+ RaidAcceleratorBufferSize; /*0x0C */
+ U64
+ RaidAcceleratorControlBaseAddress; /*0x14 */
+ U8 RAControlSize; /*0x1C */
+ U8 NumDmaEngines; /*0x1D */
+ U8 RAMinControlSize; /*0x1E */
+ U8 RAMaxControlSize; /*0x1F */
+ U32 Reserved1; /*0x20 */
+ U32 Reserved2; /*0x24 */
+ U32 Reserved3; /*0x28 */
+ U32
+ DmaEngineCapabilities[MPI2_IOUNITPAGE5_DMAENGINE_ENTRIES]; /*0x2C */
+} MPI2_CONFIG_PAGE_IO_UNIT_5,
+ *PTR_MPI2_CONFIG_PAGE_IO_UNIT_5,
+ Mpi2IOUnitPage5_t, *pMpi2IOUnitPage5_t;
+
+#define MPI2_IOUNITPAGE5_PAGEVERSION (0x00)
+
+/*defines for IO Unit Page 5 DmaEngineCapabilities field */
+#define MPI2_IOUNITPAGE5_DMA_CAP_MASK_MAX_REQUESTS (0xFFFF0000)
+#define MPI2_IOUNITPAGE5_DMA_CAP_SHIFT_MAX_REQUESTS (16)
+
+#define MPI2_IOUNITPAGE5_DMA_CAP_EEDP (0x0008)
+#define MPI2_IOUNITPAGE5_DMA_CAP_PARITY_GENERATION (0x0004)
+#define MPI2_IOUNITPAGE5_DMA_CAP_HASHING (0x0002)
+#define MPI2_IOUNITPAGE5_DMA_CAP_ENCRYPTION (0x0001)
+
+
+/*IO Unit Page 6 */
+
+typedef struct _MPI2_CONFIG_PAGE_IO_UNIT_6 {
+ MPI2_CONFIG_PAGE_HEADER Header; /*0x00 */
+ U16 Flags; /*0x04 */
+ U8 RAHostControlSize; /*0x06 */
+ U8 Reserved0; /*0x07 */
+ U64
+ RaidAcceleratorHostControlBaseAddress; /*0x08 */
+ U32 Reserved1; /*0x10 */
+ U32 Reserved2; /*0x14 */
+ U32 Reserved3; /*0x18 */
+} MPI2_CONFIG_PAGE_IO_UNIT_6,
+ *PTR_MPI2_CONFIG_PAGE_IO_UNIT_6,
+ Mpi2IOUnitPage6_t, *pMpi2IOUnitPage6_t;
+
+#define MPI2_IOUNITPAGE6_PAGEVERSION (0x00)
+
+/*defines for IO Unit Page 6 Flags field */
+#define MPI2_IOUNITPAGE6_FLAGS_ENABLE_RAID_ACCELERATOR (0x0001)
+
+
+/*IO Unit Page 7 */
+
+typedef struct _MPI2_CONFIG_PAGE_IO_UNIT_7 {
+ MPI2_CONFIG_PAGE_HEADER Header; /*0x00 */
+ U8 CurrentPowerMode; /*0x04 */
+ U8 PreviousPowerMode; /*0x05 */
+ U8 PCIeWidth; /*0x06 */
+ U8 PCIeSpeed; /*0x07 */
+ U32 ProcessorState; /*0x08 */
+ U32
+ PowerManagementCapabilities; /*0x0C */
+ U16 IOCTemperature; /*0x10 */
+ U8
+ IOCTemperatureUnits; /*0x12 */
+ U8 IOCSpeed; /*0x13 */
+ U16 BoardTemperature; /*0x14 */
+ U8
+ BoardTemperatureUnits; /*0x16 */
+ U8 Reserved3; /*0x17 */
+ U32 Reserved4; /* 0x18 */
+ U32 Reserved5; /* 0x1C */
+ U32 Reserved6; /* 0x20 */
+ U32 Reserved7; /* 0x24 */
+} MPI2_CONFIG_PAGE_IO_UNIT_7,
+ *PTR_MPI2_CONFIG_PAGE_IO_UNIT_7,
+ Mpi2IOUnitPage7_t, *pMpi2IOUnitPage7_t;
+
+#define MPI2_IOUNITPAGE7_PAGEVERSION (0x04)
+
+/*defines for IO Unit Page 7 CurrentPowerMode and PreviousPowerMode fields */
+#define MPI25_IOUNITPAGE7_PM_INIT_MASK (0xC0)
+#define MPI25_IOUNITPAGE7_PM_INIT_UNAVAILABLE (0x00)
+#define MPI25_IOUNITPAGE7_PM_INIT_HOST (0x40)
+#define MPI25_IOUNITPAGE7_PM_INIT_IO_UNIT (0x80)
+#define MPI25_IOUNITPAGE7_PM_INIT_PCIE_DPA (0xC0)
+
+#define MPI25_IOUNITPAGE7_PM_MODE_MASK (0x07)
+#define MPI25_IOUNITPAGE7_PM_MODE_UNAVAILABLE (0x00)
+#define MPI25_IOUNITPAGE7_PM_MODE_UNKNOWN (0x01)
+#define MPI25_IOUNITPAGE7_PM_MODE_FULL_POWER (0x04)
+#define MPI25_IOUNITPAGE7_PM_MODE_REDUCED_POWER (0x05)
+#define MPI25_IOUNITPAGE7_PM_MODE_STANDBY (0x06)
+
+
+/*defines for IO Unit Page 7 PCIeWidth field */
+#define MPI2_IOUNITPAGE7_PCIE_WIDTH_X1 (0x01)
+#define MPI2_IOUNITPAGE7_PCIE_WIDTH_X2 (0x02)
+#define MPI2_IOUNITPAGE7_PCIE_WIDTH_X4 (0x04)
+#define MPI2_IOUNITPAGE7_PCIE_WIDTH_X8 (0x08)
+
+/*defines for IO Unit Page 7 PCIeSpeed field */
+#define MPI2_IOUNITPAGE7_PCIE_SPEED_2_5_GBPS (0x00)
+#define MPI2_IOUNITPAGE7_PCIE_SPEED_5_0_GBPS (0x01)
+#define MPI2_IOUNITPAGE7_PCIE_SPEED_8_0_GBPS (0x02)
+
+/*defines for IO Unit Page 7 ProcessorState field */
+#define MPI2_IOUNITPAGE7_PSTATE_MASK_SECOND (0x0000000F)
+#define MPI2_IOUNITPAGE7_PSTATE_SHIFT_SECOND (0)
+
+#define MPI2_IOUNITPAGE7_PSTATE_NOT_PRESENT (0x00)
+#define MPI2_IOUNITPAGE7_PSTATE_DISABLED (0x01)
+#define MPI2_IOUNITPAGE7_PSTATE_ENABLED (0x02)
+
+/*defines for IO Unit Page 7 PowerManagementCapabilities field */
+#define MPI25_IOUNITPAGE7_PMCAP_DPA_FULL_PWR_MODE (0x00400000)
+#define MPI25_IOUNITPAGE7_PMCAP_DPA_REDUCED_PWR_MODE (0x00200000)
+#define MPI25_IOUNITPAGE7_PMCAP_DPA_STANDBY_MODE (0x00100000)
+#define MPI25_IOUNITPAGE7_PMCAP_HOST_FULL_PWR_MODE (0x00040000)
+#define MPI25_IOUNITPAGE7_PMCAP_HOST_REDUCED_PWR_MODE (0x00020000)
+#define MPI25_IOUNITPAGE7_PMCAP_HOST_STANDBY_MODE (0x00010000)
+#define MPI25_IOUNITPAGE7_PMCAP_IO_FULL_PWR_MODE (0x00004000)
+#define MPI25_IOUNITPAGE7_PMCAP_IO_REDUCED_PWR_MODE (0x00002000)
+#define MPI25_IOUNITPAGE7_PMCAP_IO_STANDBY_MODE (0x00001000)
+#define MPI2_IOUNITPAGE7_PMCAP_HOST_12_5_PCT_IOCSPEED (0x00000400)
+#define MPI2_IOUNITPAGE7_PMCAP_HOST_25_0_PCT_IOCSPEED (0x00000200)
+#define MPI2_IOUNITPAGE7_PMCAP_HOST_50_0_PCT_IOCSPEED (0x00000100)
+#define MPI25_IOUNITPAGE7_PMCAP_IO_12_5_PCT_IOCSPEED (0x00000040)
+#define MPI25_IOUNITPAGE7_PMCAP_IO_25_0_PCT_IOCSPEED (0x00000020)
+#define MPI25_IOUNITPAGE7_PMCAP_IO_50_0_PCT_IOCSPEED (0x00000010)
+#define MPI2_IOUNITPAGE7_PMCAP_HOST_WIDTH_CHANGE_PCIE (0x00000008)
+#define MPI2_IOUNITPAGE7_PMCAP_HOST_SPEED_CHANGE_PCIE (0x00000004)
+#define MPI25_IOUNITPAGE7_PMCAP_IO_WIDTH_CHANGE_PCIE (0x00000002)
+#define MPI25_IOUNITPAGE7_PMCAP_IO_SPEED_CHANGE_PCIE (0x00000001)
+
+/*obsolete names for the PowerManagementCapabilities bits (above) */
+#define MPI2_IOUNITPAGE7_PMCAP_12_5_PCT_IOCSPEED (0x00000400)
+#define MPI2_IOUNITPAGE7_PMCAP_25_0_PCT_IOCSPEED (0x00000200)
+#define MPI2_IOUNITPAGE7_PMCAP_50_0_PCT_IOCSPEED (0x00000100)
+#define MPI2_IOUNITPAGE7_PMCAP_PCIE_WIDTH_CHANGE (0x00000008) /*obsolete */
+#define MPI2_IOUNITPAGE7_PMCAP_PCIE_SPEED_CHANGE (0x00000004) /*obsolete */
+
+
+/*defines for IO Unit Page 7 IOCTemperatureUnits field */
+#define MPI2_IOUNITPAGE7_IOC_TEMP_NOT_PRESENT (0x00)
+#define MPI2_IOUNITPAGE7_IOC_TEMP_FAHRENHEIT (0x01)
+#define MPI2_IOUNITPAGE7_IOC_TEMP_CELSIUS (0x02)
+
+/*defines for IO Unit Page 7 IOCSpeed field */
+#define MPI2_IOUNITPAGE7_IOC_SPEED_FULL (0x01)
+#define MPI2_IOUNITPAGE7_IOC_SPEED_HALF (0x02)
+#define MPI2_IOUNITPAGE7_IOC_SPEED_QUARTER (0x04)
+#define MPI2_IOUNITPAGE7_IOC_SPEED_EIGHTH (0x08)
+
+/*defines for IO Unit Page 7 BoardTemperatureUnits field */
+#define MPI2_IOUNITPAGE7_BOARD_TEMP_NOT_PRESENT (0x00)
+#define MPI2_IOUNITPAGE7_BOARD_TEMP_FAHRENHEIT (0x01)
+#define MPI2_IOUNITPAGE7_BOARD_TEMP_CELSIUS (0x02)
+
+
+/*IO Unit Page 8 */
+
+#define MPI2_IOUNIT8_NUM_THRESHOLDS (4)
+
+typedef struct _MPI2_IOUNIT8_SENSOR {
+ U16 Flags; /*0x00 */
+ U16 Reserved1; /*0x02 */
+ U16
+ Threshold[MPI2_IOUNIT8_NUM_THRESHOLDS]; /*0x04 */
+ U32 Reserved2; /*0x0C */
+ U32 Reserved3; /*0x10 */
+ U32 Reserved4; /*0x14 */
+} MPI2_IOUNIT8_SENSOR, *PTR_MPI2_IOUNIT8_SENSOR,
+ Mpi2IOUnit8Sensor_t, *pMpi2IOUnit8Sensor_t;
+
+/*defines for IO Unit Page 8 Sensor Flags field */
+#define MPI2_IOUNIT8_SENSOR_FLAGS_T3_ENABLE (0x0008)
+#define MPI2_IOUNIT8_SENSOR_FLAGS_T2_ENABLE (0x0004)
+#define MPI2_IOUNIT8_SENSOR_FLAGS_T1_ENABLE (0x0002)
+#define MPI2_IOUNIT8_SENSOR_FLAGS_T0_ENABLE (0x0001)
+
+/*
+ *Host code (drivers, BIOS, utilities, etc.) should leave this define set to
+ *one and check the value returned for NumSensors at runtime.
+ */
+#ifndef MPI2_IOUNITPAGE8_SENSOR_ENTRIES
+#define MPI2_IOUNITPAGE8_SENSOR_ENTRIES (1)
+#endif
+
+typedef struct _MPI2_CONFIG_PAGE_IO_UNIT_8 {
+ MPI2_CONFIG_PAGE_HEADER Header; /*0x00 */
+ U32 Reserved1; /*0x04 */
+ U32 Reserved2; /*0x08 */
+ U8 NumSensors; /*0x0C */
+ U8 PollingInterval; /*0x0D */
+ U16 Reserved3; /*0x0E */
+ MPI2_IOUNIT8_SENSOR
+ Sensor[MPI2_IOUNITPAGE8_SENSOR_ENTRIES];/*0x10 */
+} MPI2_CONFIG_PAGE_IO_UNIT_8,
+ *PTR_MPI2_CONFIG_PAGE_IO_UNIT_8,
+ Mpi2IOUnitPage8_t, *pMpi2IOUnitPage8_t;
+
+#define MPI2_IOUNITPAGE8_PAGEVERSION (0x00)
+
+
+/*IO Unit Page 9 */
+
+typedef struct _MPI2_IOUNIT9_SENSOR {
+ U16 CurrentTemperature; /*0x00 */
+ U16 Reserved1; /*0x02 */
+ U8 Flags; /*0x04 */
+ U8 Reserved2; /*0x05 */
+ U16 Reserved3; /*0x06 */
+ U32 Reserved4; /*0x08 */
+ U32 Reserved5; /*0x0C */
+} MPI2_IOUNIT9_SENSOR, *PTR_MPI2_IOUNIT9_SENSOR,
+ Mpi2IOUnit9Sensor_t, *pMpi2IOUnit9Sensor_t;
+
+/*defines for IO Unit Page 9 Sensor Flags field */
+#define MPI2_IOUNIT9_SENSOR_FLAGS_TEMP_VALID (0x01)
+
+/*
+ *Host code (drivers, BIOS, utilities, etc.) should leave this define set to
+ *one and check the value returned for NumSensors at runtime.
+ */
+#ifndef MPI2_IOUNITPAGE9_SENSOR_ENTRIES
+#define MPI2_IOUNITPAGE9_SENSOR_ENTRIES (1)
+#endif
+
+typedef struct _MPI2_CONFIG_PAGE_IO_UNIT_9 {
+ MPI2_CONFIG_PAGE_HEADER Header; /*0x00 */
+ U32 Reserved1; /*0x04 */
+ U32 Reserved2; /*0x08 */
+ U8 NumSensors; /*0x0C */
+ U8 Reserved4; /*0x0D */
+ U16 Reserved3; /*0x0E */
+ MPI2_IOUNIT9_SENSOR
+ Sensor[MPI2_IOUNITPAGE9_SENSOR_ENTRIES];/*0x10 */
+} MPI2_CONFIG_PAGE_IO_UNIT_9,
+ *PTR_MPI2_CONFIG_PAGE_IO_UNIT_9,
+ Mpi2IOUnitPage9_t, *pMpi2IOUnitPage9_t;
+
+#define MPI2_IOUNITPAGE9_PAGEVERSION (0x00)
+
+
+/*IO Unit Page 10 */
+
+typedef struct _MPI2_IOUNIT10_FUNCTION {
+ U8 CreditPercent; /*0x00 */
+ U8 Reserved1; /*0x01 */
+ U16 Reserved2; /*0x02 */
+} MPI2_IOUNIT10_FUNCTION,
+ *PTR_MPI2_IOUNIT10_FUNCTION,
+ Mpi2IOUnit10Function_t,
+ *pMpi2IOUnit10Function_t;
+
+/*
+ *Host code (drivers, BIOS, utilities, etc.) should leave this define set to
+ *one and check the value returned for NumFunctions at runtime.
+ */
+#ifndef MPI2_IOUNITPAGE10_FUNCTION_ENTRIES
+#define MPI2_IOUNITPAGE10_FUNCTION_ENTRIES (1)
+#endif
+
+typedef struct _MPI2_CONFIG_PAGE_IO_UNIT_10 {
+ MPI2_CONFIG_PAGE_HEADER Header; /*0x00 */
+ U8 NumFunctions; /*0x04 */
+ U8 Reserved1; /*0x05 */
+ U16 Reserved2; /*0x06 */
+ U32 Reserved3; /*0x08 */
+ U32 Reserved4; /*0x0C */
+ MPI2_IOUNIT10_FUNCTION
+ Function[MPI2_IOUNITPAGE10_FUNCTION_ENTRIES];/*0x10 */
+} MPI2_CONFIG_PAGE_IO_UNIT_10,
+ *PTR_MPI2_CONFIG_PAGE_IO_UNIT_10,
+ Mpi2IOUnitPage10_t, *pMpi2IOUnitPage10_t;
+
+#define MPI2_IOUNITPAGE10_PAGEVERSION (0x01)
+
+
+
+/****************************************************************************
+* IOC Config Pages
+****************************************************************************/
+
+/*IOC Page 0 */
+
+typedef struct _MPI2_CONFIG_PAGE_IOC_0 {
+ MPI2_CONFIG_PAGE_HEADER Header; /*0x00 */
+ U32 Reserved1; /*0x04 */
+ U32 Reserved2; /*0x08 */
+ U16 VendorID; /*0x0C */
+ U16 DeviceID; /*0x0E */
+ U8 RevisionID; /*0x10 */
+ U8 Reserved3; /*0x11 */
+ U16 Reserved4; /*0x12 */
+ U32 ClassCode; /*0x14 */
+ U16 SubsystemVendorID; /*0x18 */
+ U16 SubsystemID; /*0x1A */
+} MPI2_CONFIG_PAGE_IOC_0,
+ *PTR_MPI2_CONFIG_PAGE_IOC_0,
+ Mpi2IOCPage0_t, *pMpi2IOCPage0_t;
+
+#define MPI2_IOCPAGE0_PAGEVERSION (0x02)
+
+
+/*IOC Page 1 */
+
+typedef struct _MPI2_CONFIG_PAGE_IOC_1 {
+ MPI2_CONFIG_PAGE_HEADER Header; /*0x00 */
+ U32 Flags; /*0x04 */
+ U32 CoalescingTimeout; /*0x08 */
+ U8 CoalescingDepth; /*0x0C */
+ U8 PCISlotNum; /*0x0D */
+ U8 PCIBusNum; /*0x0E */
+ U8 PCIDomainSegment; /*0x0F */
+ U32 Reserved1; /*0x10 */
+ U32 Reserved2; /*0x14 */
+} MPI2_CONFIG_PAGE_IOC_1,
+ *PTR_MPI2_CONFIG_PAGE_IOC_1,
+ Mpi2IOCPage1_t, *pMpi2IOCPage1_t;
+
+#define MPI2_IOCPAGE1_PAGEVERSION (0x05)
+
+/*defines for IOC Page 1 Flags field */
+#define MPI2_IOCPAGE1_REPLY_COALESCING (0x00000001)
+
+#define MPI2_IOCPAGE1_PCISLOTNUM_UNKNOWN (0xFF)
+#define MPI2_IOCPAGE1_PCIBUSNUM_UNKNOWN (0xFF)
+#define MPI2_IOCPAGE1_PCIDOMAIN_UNKNOWN (0xFF)
+
+/*IOC Page 6 */
+
+typedef struct _MPI2_CONFIG_PAGE_IOC_6 {
+ MPI2_CONFIG_PAGE_HEADER Header; /*0x00 */
+ U32
+ CapabilitiesFlags; /*0x04 */
+ U8 MaxDrivesRAID0; /*0x08 */
+ U8 MaxDrivesRAID1; /*0x09 */
+ U8
+ MaxDrivesRAID1E; /*0x0A */
+ U8
+ MaxDrivesRAID10; /*0x0B */
+ U8 MinDrivesRAID0; /*0x0C */
+ U8 MinDrivesRAID1; /*0x0D */
+ U8
+ MinDrivesRAID1E; /*0x0E */
+ U8
+ MinDrivesRAID10; /*0x0F */
+ U32 Reserved1; /*0x10 */
+ U8
+ MaxGlobalHotSpares; /*0x14 */
+ U8 MaxPhysDisks; /*0x15 */
+ U8 MaxVolumes; /*0x16 */
+ U8 MaxConfigs; /*0x17 */
+ U8 MaxOCEDisks; /*0x18 */
+ U8 Reserved2; /*0x19 */
+ U16 Reserved3; /*0x1A */
+ U32
+ SupportedStripeSizeMapRAID0; /*0x1C */
+ U32
+ SupportedStripeSizeMapRAID1E; /*0x20 */
+ U32
+ SupportedStripeSizeMapRAID10; /*0x24 */
+ U32 Reserved4; /*0x28 */
+ U32 Reserved5; /*0x2C */
+ U16
+ DefaultMetadataSize; /*0x30 */
+ U16 Reserved6; /*0x32 */
+ U16
+ MaxBadBlockTableEntries; /*0x34 */
+ U16 Reserved7; /*0x36 */
+ U32
+ IRNvsramVersion; /*0x38 */
+} MPI2_CONFIG_PAGE_IOC_6,
+ *PTR_MPI2_CONFIG_PAGE_IOC_6,
+ Mpi2IOCPage6_t, *pMpi2IOCPage6_t;
+
+#define MPI2_IOCPAGE6_PAGEVERSION (0x05)
+
+/*defines for IOC Page 6 CapabilitiesFlags */
+#define MPI2_IOCPAGE6_CAP_FLAGS_4K_SECTORS_SUPPORT (0x00000020)
+#define MPI2_IOCPAGE6_CAP_FLAGS_RAID10_SUPPORT (0x00000010)
+#define MPI2_IOCPAGE6_CAP_FLAGS_RAID1_SUPPORT (0x00000008)
+#define MPI2_IOCPAGE6_CAP_FLAGS_RAID1E_SUPPORT (0x00000004)
+#define MPI2_IOCPAGE6_CAP_FLAGS_RAID0_SUPPORT (0x00000002)
+#define MPI2_IOCPAGE6_CAP_FLAGS_GLOBAL_HOT_SPARE (0x00000001)
+
+
+/*IOC Page 7 */
+
+#define MPI2_IOCPAGE7_EVENTMASK_WORDS (4)
+
+typedef struct _MPI2_CONFIG_PAGE_IOC_7 {
+ MPI2_CONFIG_PAGE_HEADER Header; /*0x00 */
+ U32 Reserved1; /*0x04 */
+ U32
+ EventMasks[MPI2_IOCPAGE7_EVENTMASK_WORDS];/*0x08 */
+ U16 SASBroadcastPrimitiveMasks; /*0x18 */
+ U16 SASNotifyPrimitiveMasks; /*0x1A */
+ U32 Reserved3; /*0x1C */
+} MPI2_CONFIG_PAGE_IOC_7,
+ *PTR_MPI2_CONFIG_PAGE_IOC_7,
+ Mpi2IOCPage7_t, *pMpi2IOCPage7_t;
+
+#define MPI2_IOCPAGE7_PAGEVERSION (0x02)
+
+
+/*IOC Page 8 */
+
+typedef struct _MPI2_CONFIG_PAGE_IOC_8 {
+ MPI2_CONFIG_PAGE_HEADER Header; /*0x00 */
+ U8 NumDevsPerEnclosure; /*0x04 */
+ U8 Reserved1; /*0x05 */
+ U16 Reserved2; /*0x06 */
+ U16 MaxPersistentEntries; /*0x08 */
+ U16 MaxNumPhysicalMappedIDs; /*0x0A */
+ U16 Flags; /*0x0C */
+ U16 Reserved3; /*0x0E */
+ U16 IRVolumeMappingFlags; /*0x10 */
+ U16 Reserved4; /*0x12 */
+ U32 Reserved5; /*0x14 */
+} MPI2_CONFIG_PAGE_IOC_8,
+ *PTR_MPI2_CONFIG_PAGE_IOC_8,
+ Mpi2IOCPage8_t, *pMpi2IOCPage8_t;
+
+#define MPI2_IOCPAGE8_PAGEVERSION (0x00)
+
+/*defines for IOC Page 8 Flags field */
+#define MPI2_IOCPAGE8_FLAGS_DA_START_SLOT_1 (0x00000020)
+#define MPI2_IOCPAGE8_FLAGS_RESERVED_TARGETID_0 (0x00000010)
+
+#define MPI2_IOCPAGE8_FLAGS_MASK_MAPPING_MODE (0x0000000E)
+#define MPI2_IOCPAGE8_FLAGS_DEVICE_PERSISTENCE_MAPPING (0x00000000)
+#define MPI2_IOCPAGE8_FLAGS_ENCLOSURE_SLOT_MAPPING (0x00000002)
+
+#define MPI2_IOCPAGE8_FLAGS_DISABLE_PERSISTENT_MAPPING (0x00000001)
+#define MPI2_IOCPAGE8_FLAGS_ENABLE_PERSISTENT_MAPPING (0x00000000)
+
+/*defines for IOC Page 8 IRVolumeMappingFlags */
+#define MPI2_IOCPAGE8_IRFLAGS_MASK_VOLUME_MAPPING_MODE (0x00000003)
+#define MPI2_IOCPAGE8_IRFLAGS_LOW_VOLUME_MAPPING (0x00000000)
+#define MPI2_IOCPAGE8_IRFLAGS_HIGH_VOLUME_MAPPING (0x00000001)
+
+
+/****************************************************************************
+* BIOS Config Pages
+****************************************************************************/
+
+/*BIOS Page 1 */
+
+typedef struct _MPI2_CONFIG_PAGE_BIOS_1 {
+ MPI2_CONFIG_PAGE_HEADER Header; /*0x00 */
+ U32 BiosOptions; /*0x04 */
+ U32 IOCSettings; /*0x08 */
+ U32 Reserved1; /*0x0C */
+ U32 DeviceSettings; /*0x10 */
+ U16 NumberOfDevices; /*0x14 */
+ U16 UEFIVersion; /*0x16 */
+ U16 IOTimeoutBlockDevicesNonRM; /*0x18 */
+ U16 IOTimeoutSequential; /*0x1A */
+ U16 IOTimeoutOther; /*0x1C */
+ U16 IOTimeoutBlockDevicesRM; /*0x1E */
+} MPI2_CONFIG_PAGE_BIOS_1,
+ *PTR_MPI2_CONFIG_PAGE_BIOS_1,
+ Mpi2BiosPage1_t, *pMpi2BiosPage1_t;
+
+#define MPI2_BIOSPAGE1_PAGEVERSION (0x05)
+
+/*values for BIOS Page 1 BiosOptions field */
+#define MPI2_BIOSPAGE1_OPTIONS_MASK_OEM_ID (0x000000F0)
+#define MPI2_BIOSPAGE1_OPTIONS_LSI_OEM_ID (0x00000000)
+
+#define MPI2_BIOSPAGE1_OPTIONS_MASK_UEFI_HII_REGISTRATION (0x00000006)
+#define MPI2_BIOSPAGE1_OPTIONS_ENABLE_UEFI_HII (0x00000000)
+#define MPI2_BIOSPAGE1_OPTIONS_DISABLE_UEFI_HII (0x00000002)
+#define MPI2_BIOSPAGE1_OPTIONS_VERSION_CHECK_UEFI_HII (0x00000004)
+
+#define MPI2_BIOSPAGE1_OPTIONS_DISABLE_BIOS (0x00000001)
+
+/*values for BIOS Page 1 IOCSettings field */
+#define MPI2_BIOSPAGE1_IOCSET_MASK_BOOT_PREFERENCE (0x00030000)
+#define MPI2_BIOSPAGE1_IOCSET_ENCLOSURE_SLOT_BOOT (0x00000000)
+#define MPI2_BIOSPAGE1_IOCSET_SAS_ADDRESS_BOOT (0x00010000)
+
+#define MPI2_BIOSPAGE1_IOCSET_MASK_RM_SETTING (0x000000C0)
+#define MPI2_BIOSPAGE1_IOCSET_NONE_RM_SETTING (0x00000000)
+#define MPI2_BIOSPAGE1_IOCSET_BOOT_RM_SETTING (0x00000040)
+#define MPI2_BIOSPAGE1_IOCSET_MEDIA_RM_SETTING (0x00000080)
+
+#define MPI2_BIOSPAGE1_IOCSET_MASK_ADAPTER_SUPPORT (0x00000030)
+#define MPI2_BIOSPAGE1_IOCSET_NO_SUPPORT (0x00000000)
+#define MPI2_BIOSPAGE1_IOCSET_BIOS_SUPPORT (0x00000010)
+#define MPI2_BIOSPAGE1_IOCSET_OS_SUPPORT (0x00000020)
+#define MPI2_BIOSPAGE1_IOCSET_ALL_SUPPORT (0x00000030)
+
+#define MPI2_BIOSPAGE1_IOCSET_ALTERNATE_CHS (0x00000008)
+
+/*values for BIOS Page 1 DeviceSettings field */
+#define MPI2_BIOSPAGE1_DEVSET_DISABLE_SMART_POLLING (0x00000010)
+#define MPI2_BIOSPAGE1_DEVSET_DISABLE_SEQ_LUN (0x00000008)
+#define MPI2_BIOSPAGE1_DEVSET_DISABLE_RM_LUN (0x00000004)
+#define MPI2_BIOSPAGE1_DEVSET_DISABLE_NON_RM_LUN (0x00000002)
+#define MPI2_BIOSPAGE1_DEVSET_DISABLE_OTHER_LUN (0x00000001)
+
+/*defines for BIOS Page 1 UEFIVersion field */
+#define MPI2_BIOSPAGE1_UEFI_VER_MAJOR_MASK (0xFF00)
+#define MPI2_BIOSPAGE1_UEFI_VER_MAJOR_SHIFT (8)
+#define MPI2_BIOSPAGE1_UEFI_VER_MINOR_MASK (0x00FF)
+#define MPI2_BIOSPAGE1_UEFI_VER_MINOR_SHIFT (0)
+
+
+
+/*BIOS Page 2 */
+
+typedef struct _MPI2_BOOT_DEVICE_ADAPTER_ORDER {
+ U32 Reserved1; /*0x00 */
+ U32 Reserved2; /*0x04 */
+ U32 Reserved3; /*0x08 */
+ U32 Reserved4; /*0x0C */
+ U32 Reserved5; /*0x10 */
+ U32 Reserved6; /*0x14 */
+} MPI2_BOOT_DEVICE_ADAPTER_ORDER,
+ *PTR_MPI2_BOOT_DEVICE_ADAPTER_ORDER,
+ Mpi2BootDeviceAdapterOrder_t,
+ *pMpi2BootDeviceAdapterOrder_t;
+
+typedef struct _MPI2_BOOT_DEVICE_SAS_WWID {
+ U64 SASAddress; /*0x00 */
+ U8 LUN[8]; /*0x08 */
+ U32 Reserved1; /*0x10 */
+ U32 Reserved2; /*0x14 */
+} MPI2_BOOT_DEVICE_SAS_WWID,
+ *PTR_MPI2_BOOT_DEVICE_SAS_WWID,
+ Mpi2BootDeviceSasWwid_t,
+ *pMpi2BootDeviceSasWwid_t;
+
+typedef struct _MPI2_BOOT_DEVICE_ENCLOSURE_SLOT {
+ U64 EnclosureLogicalID; /*0x00 */
+ U32 Reserved1; /*0x08 */
+ U32 Reserved2; /*0x0C */
+ U16 SlotNumber; /*0x10 */
+ U16 Reserved3; /*0x12 */
+ U32 Reserved4; /*0x14 */
+} MPI2_BOOT_DEVICE_ENCLOSURE_SLOT,
+ *PTR_MPI2_BOOT_DEVICE_ENCLOSURE_SLOT,
+ Mpi2BootDeviceEnclosureSlot_t,
+ *pMpi2BootDeviceEnclosureSlot_t;
+
+typedef struct _MPI2_BOOT_DEVICE_DEVICE_NAME {
+ U64 DeviceName; /*0x00 */
+ U8 LUN[8]; /*0x08 */
+ U32 Reserved1; /*0x10 */
+ U32 Reserved2; /*0x14 */
+} MPI2_BOOT_DEVICE_DEVICE_NAME,
+ *PTR_MPI2_BOOT_DEVICE_DEVICE_NAME,
+ Mpi2BootDeviceDeviceName_t,
+ *pMpi2BootDeviceDeviceName_t;
+
+typedef union _MPI2_MPI2_BIOSPAGE2_BOOT_DEVICE {
+ MPI2_BOOT_DEVICE_ADAPTER_ORDER AdapterOrder;
+ MPI2_BOOT_DEVICE_SAS_WWID SasWwid;
+ MPI2_BOOT_DEVICE_ENCLOSURE_SLOT EnclosureSlot;
+ MPI2_BOOT_DEVICE_DEVICE_NAME DeviceName;
+} MPI2_BIOSPAGE2_BOOT_DEVICE,
+ *PTR_MPI2_BIOSPAGE2_BOOT_DEVICE,
+ Mpi2BiosPage2BootDevice_t,
+ *pMpi2BiosPage2BootDevice_t;
+
+typedef struct _MPI2_CONFIG_PAGE_BIOS_2 {
+ MPI2_CONFIG_PAGE_HEADER Header; /*0x00 */
+ U32 Reserved1; /*0x04 */
+ U32 Reserved2; /*0x08 */
+ U32 Reserved3; /*0x0C */
+ U32 Reserved4; /*0x10 */
+ U32 Reserved5; /*0x14 */
+ U32 Reserved6; /*0x18 */
+ U8 ReqBootDeviceForm; /*0x1C */
+ U8 Reserved7; /*0x1D */
+ U16 Reserved8; /*0x1E */
+ MPI2_BIOSPAGE2_BOOT_DEVICE RequestedBootDevice; /*0x20 */
+ U8 ReqAltBootDeviceForm; /*0x38 */
+ U8 Reserved9; /*0x39 */
+ U16 Reserved10; /*0x3A */
+ MPI2_BIOSPAGE2_BOOT_DEVICE RequestedAltBootDevice; /*0x3C */
+ U8 CurrentBootDeviceForm; /*0x58 */
+ U8 Reserved11; /*0x59 */
+ U16 Reserved12; /*0x5A */
+ MPI2_BIOSPAGE2_BOOT_DEVICE CurrentBootDevice; /*0x58 */
+} MPI2_CONFIG_PAGE_BIOS_2, *PTR_MPI2_CONFIG_PAGE_BIOS_2,
+ Mpi2BiosPage2_t, *pMpi2BiosPage2_t;
+
+#define MPI2_BIOSPAGE2_PAGEVERSION (0x04)
+
+/*values for BIOS Page 2 BootDeviceForm fields */
+#define MPI2_BIOSPAGE2_FORM_MASK (0x0F)
+#define MPI2_BIOSPAGE2_FORM_NO_DEVICE_SPECIFIED (0x00)
+#define MPI2_BIOSPAGE2_FORM_SAS_WWID (0x05)
+#define MPI2_BIOSPAGE2_FORM_ENCLOSURE_SLOT (0x06)
+#define MPI2_BIOSPAGE2_FORM_DEVICE_NAME (0x07)
+
+
+/*BIOS Page 3 */
+
+typedef struct _MPI2_ADAPTER_INFO {
+ U8 PciBusNumber; /*0x00 */
+ U8 PciDeviceAndFunctionNumber; /*0x01 */
+ U16 AdapterFlags; /*0x02 */
+} MPI2_ADAPTER_INFO, *PTR_MPI2_ADAPTER_INFO,
+ Mpi2AdapterInfo_t, *pMpi2AdapterInfo_t;
+
+#define MPI2_ADAPTER_INFO_FLAGS_EMBEDDED (0x0001)
+#define MPI2_ADAPTER_INFO_FLAGS_INIT_STATUS (0x0002)
+
+typedef struct _MPI2_CONFIG_PAGE_BIOS_3 {
+ MPI2_CONFIG_PAGE_HEADER Header; /*0x00 */
+ U32 GlobalFlags; /*0x04 */
+ U32 BiosVersion; /*0x08 */
+ MPI2_ADAPTER_INFO AdapterOrder[4]; /*0x0C */
+ U32 Reserved1; /*0x1C */
+} MPI2_CONFIG_PAGE_BIOS_3,
+ *PTR_MPI2_CONFIG_PAGE_BIOS_3,
+ Mpi2BiosPage3_t, *pMpi2BiosPage3_t;
+
+#define MPI2_BIOSPAGE3_PAGEVERSION (0x00)
+
+/*values for BIOS Page 3 GlobalFlags */
+#define MPI2_BIOSPAGE3_FLAGS_PAUSE_ON_ERROR (0x00000002)
+#define MPI2_BIOSPAGE3_FLAGS_VERBOSE_ENABLE (0x00000004)
+#define MPI2_BIOSPAGE3_FLAGS_HOOK_INT_40_DISABLE (0x00000010)
+
+#define MPI2_BIOSPAGE3_FLAGS_DEV_LIST_DISPLAY_MASK (0x000000E0)
+#define MPI2_BIOSPAGE3_FLAGS_INSTALLED_DEV_DISPLAY (0x00000000)
+#define MPI2_BIOSPAGE3_FLAGS_ADAPTER_DISPLAY (0x00000020)
+#define MPI2_BIOSPAGE3_FLAGS_ADAPTER_DEV_DISPLAY (0x00000040)
+
+
+/*BIOS Page 4 */
+
+/*
+ *Host code (drivers, BIOS, utilities, etc.) should leave this define set to
+ *one and check the value returned for NumPhys at runtime.
+ */
+#ifndef MPI2_BIOS_PAGE_4_PHY_ENTRIES
+#define MPI2_BIOS_PAGE_4_PHY_ENTRIES (1)
+#endif
+
+typedef struct _MPI2_BIOS4_ENTRY {
+ U64 ReassignmentWWID; /*0x00 */
+ U64 ReassignmentDeviceName; /*0x08 */
+} MPI2_BIOS4_ENTRY, *PTR_MPI2_BIOS4_ENTRY,
+ Mpi2MBios4Entry_t, *pMpi2Bios4Entry_t;
+
+typedef struct _MPI2_CONFIG_PAGE_BIOS_4 {
+ MPI2_CONFIG_PAGE_HEADER Header; /*0x00 */
+ U8 NumPhys; /*0x04 */
+ U8 Reserved1; /*0x05 */
+ U16 Reserved2; /*0x06 */
+ MPI2_BIOS4_ENTRY
+ Phy[MPI2_BIOS_PAGE_4_PHY_ENTRIES]; /*0x08 */
+} MPI2_CONFIG_PAGE_BIOS_4, *PTR_MPI2_CONFIG_PAGE_BIOS_4,
+ Mpi2BiosPage4_t, *pMpi2BiosPage4_t;
+
+#define MPI2_BIOSPAGE4_PAGEVERSION (0x01)
+
+
+/****************************************************************************
+* RAID Volume Config Pages
+****************************************************************************/
+
+/*RAID Volume Page 0 */
+
+typedef struct _MPI2_RAIDVOL0_PHYS_DISK {
+ U8 RAIDSetNum; /*0x00 */
+ U8 PhysDiskMap; /*0x01 */
+ U8 PhysDiskNum; /*0x02 */
+ U8 Reserved; /*0x03 */
+} MPI2_RAIDVOL0_PHYS_DISK, *PTR_MPI2_RAIDVOL0_PHYS_DISK,
+ Mpi2RaidVol0PhysDisk_t, *pMpi2RaidVol0PhysDisk_t;
+
+/*defines for the PhysDiskMap field */
+#define MPI2_RAIDVOL0_PHYSDISK_PRIMARY (0x01)
+#define MPI2_RAIDVOL0_PHYSDISK_SECONDARY (0x02)
+
+typedef struct _MPI2_RAIDVOL0_SETTINGS {
+ U16 Settings; /*0x00 */
+ U8 HotSparePool; /*0x01 */
+ U8 Reserved; /*0x02 */
+} MPI2_RAIDVOL0_SETTINGS, *PTR_MPI2_RAIDVOL0_SETTINGS,
+ Mpi2RaidVol0Settings_t,
+ *pMpi2RaidVol0Settings_t;
+
+/*RAID Volume Page 0 HotSparePool defines, also used in RAID Physical Disk */
+#define MPI2_RAID_HOT_SPARE_POOL_0 (0x01)
+#define MPI2_RAID_HOT_SPARE_POOL_1 (0x02)
+#define MPI2_RAID_HOT_SPARE_POOL_2 (0x04)
+#define MPI2_RAID_HOT_SPARE_POOL_3 (0x08)
+#define MPI2_RAID_HOT_SPARE_POOL_4 (0x10)
+#define MPI2_RAID_HOT_SPARE_POOL_5 (0x20)
+#define MPI2_RAID_HOT_SPARE_POOL_6 (0x40)
+#define MPI2_RAID_HOT_SPARE_POOL_7 (0x80)
+
+/*RAID Volume Page 0 VolumeSettings defines */
+#define MPI2_RAIDVOL0_SETTING_USE_PRODUCT_ID_SUFFIX (0x0008)
+#define MPI2_RAIDVOL0_SETTING_AUTO_CONFIG_HSWAP_DISABLE (0x0004)
+
+#define MPI2_RAIDVOL0_SETTING_MASK_WRITE_CACHING (0x0003)
+#define MPI2_RAIDVOL0_SETTING_UNCHANGED (0x0000)
+#define MPI2_RAIDVOL0_SETTING_DISABLE_WRITE_CACHING (0x0001)
+#define MPI2_RAIDVOL0_SETTING_ENABLE_WRITE_CACHING (0x0002)
+
+/*
+ *Host code (drivers, BIOS, utilities, etc.) should leave this define set to
+ *one and check the value returned for NumPhysDisks at runtime.
+ */
+#ifndef MPI2_RAID_VOL_PAGE_0_PHYSDISK_MAX
+#define MPI2_RAID_VOL_PAGE_0_PHYSDISK_MAX (1)
+#endif
+
+typedef struct _MPI2_CONFIG_PAGE_RAID_VOL_0 {
+ MPI2_CONFIG_PAGE_HEADER Header; /*0x00 */
+ U16 DevHandle; /*0x04 */
+ U8 VolumeState; /*0x06 */
+ U8 VolumeType; /*0x07 */
+ U32 VolumeStatusFlags; /*0x08 */
+ MPI2_RAIDVOL0_SETTINGS VolumeSettings; /*0x0C */
+ U64 MaxLBA; /*0x10 */
+ U32 StripeSize; /*0x18 */
+ U16 BlockSize; /*0x1C */
+ U16 Reserved1; /*0x1E */
+ U8 SupportedPhysDisks;/*0x20 */
+ U8 ResyncRate; /*0x21 */
+ U16 DataScrubDuration; /*0x22 */
+ U8 NumPhysDisks; /*0x24 */
+ U8 Reserved2; /*0x25 */
+ U8 Reserved3; /*0x26 */
+ U8 InactiveStatus; /*0x27 */
+ MPI2_RAIDVOL0_PHYS_DISK
+ PhysDisk[MPI2_RAID_VOL_PAGE_0_PHYSDISK_MAX]; /*0x28 */
+} MPI2_CONFIG_PAGE_RAID_VOL_0,
+ *PTR_MPI2_CONFIG_PAGE_RAID_VOL_0,
+ Mpi2RaidVolPage0_t, *pMpi2RaidVolPage0_t;
+
+#define MPI2_RAIDVOLPAGE0_PAGEVERSION (0x0A)
+
+/*values for RAID VolumeState */
+#define MPI2_RAID_VOL_STATE_MISSING (0x00)
+#define MPI2_RAID_VOL_STATE_FAILED (0x01)
+#define MPI2_RAID_VOL_STATE_INITIALIZING (0x02)
+#define MPI2_RAID_VOL_STATE_ONLINE (0x03)
+#define MPI2_RAID_VOL_STATE_DEGRADED (0x04)
+#define MPI2_RAID_VOL_STATE_OPTIMAL (0x05)
+
+/*values for RAID VolumeType */
+#define MPI2_RAID_VOL_TYPE_RAID0 (0x00)
+#define MPI2_RAID_VOL_TYPE_RAID1E (0x01)
+#define MPI2_RAID_VOL_TYPE_RAID1 (0x02)
+#define MPI2_RAID_VOL_TYPE_RAID10 (0x05)
+#define MPI2_RAID_VOL_TYPE_UNKNOWN (0xFF)
+
+/*values for RAID Volume Page 0 VolumeStatusFlags field */
+#define MPI2_RAIDVOL0_STATUS_FLAG_PENDING_RESYNC (0x02000000)
+#define MPI2_RAIDVOL0_STATUS_FLAG_BACKG_INIT_PENDING (0x01000000)
+#define MPI2_RAIDVOL0_STATUS_FLAG_MDC_PENDING (0x00800000)
+#define MPI2_RAIDVOL0_STATUS_FLAG_USER_CONSIST_PENDING (0x00400000)
+#define MPI2_RAIDVOL0_STATUS_FLAG_MAKE_DATA_CONSISTENT (0x00200000)
+#define MPI2_RAIDVOL0_STATUS_FLAG_DATA_SCRUB (0x00100000)
+#define MPI2_RAIDVOL0_STATUS_FLAG_CONSISTENCY_CHECK (0x00080000)
+#define MPI2_RAIDVOL0_STATUS_FLAG_CAPACITY_EXPANSION (0x00040000)
+#define MPI2_RAIDVOL0_STATUS_FLAG_BACKGROUND_INIT (0x00020000)
+#define MPI2_RAIDVOL0_STATUS_FLAG_RESYNC_IN_PROGRESS (0x00010000)
+#define MPI2_RAIDVOL0_STATUS_FLAG_VOL_NOT_CONSISTENT (0x00000080)
+#define MPI2_RAIDVOL0_STATUS_FLAG_OCE_ALLOWED (0x00000040)
+#define MPI2_RAIDVOL0_STATUS_FLAG_BGI_COMPLETE (0x00000020)
+#define MPI2_RAIDVOL0_STATUS_FLAG_1E_OFFSET_MIRROR (0x00000000)
+#define MPI2_RAIDVOL0_STATUS_FLAG_1E_ADJACENT_MIRROR (0x00000010)
+#define MPI2_RAIDVOL0_STATUS_FLAG_BAD_BLOCK_TABLE_FULL (0x00000008)
+#define MPI2_RAIDVOL0_STATUS_FLAG_VOLUME_INACTIVE (0x00000004)
+#define MPI2_RAIDVOL0_STATUS_FLAG_QUIESCED (0x00000002)
+#define MPI2_RAIDVOL0_STATUS_FLAG_ENABLED (0x00000001)
+
+/*values for RAID Volume Page 0 SupportedPhysDisks field */
+#define MPI2_RAIDVOL0_SUPPORT_SOLID_STATE_DISKS (0x08)
+#define MPI2_RAIDVOL0_SUPPORT_HARD_DISKS (0x04)
+#define MPI2_RAIDVOL0_SUPPORT_SAS_PROTOCOL (0x02)
+#define MPI2_RAIDVOL0_SUPPORT_SATA_PROTOCOL (0x01)
+
+/*values for RAID Volume Page 0 InactiveStatus field */
+#define MPI2_RAIDVOLPAGE0_UNKNOWN_INACTIVE (0x00)
+#define MPI2_RAIDVOLPAGE0_STALE_METADATA_INACTIVE (0x01)
+#define MPI2_RAIDVOLPAGE0_FOREIGN_VOLUME_INACTIVE (0x02)
+#define MPI2_RAIDVOLPAGE0_INSUFFICIENT_RESOURCE_INACTIVE (0x03)
+#define MPI2_RAIDVOLPAGE0_CLONE_VOLUME_INACTIVE (0x04)
+#define MPI2_RAIDVOLPAGE0_INSUFFICIENT_METADATA_INACTIVE (0x05)
+#define MPI2_RAIDVOLPAGE0_PREVIOUSLY_DELETED (0x06)
+
+
+/*RAID Volume Page 1 */
+
+typedef struct _MPI2_CONFIG_PAGE_RAID_VOL_1 {
+ MPI2_CONFIG_PAGE_HEADER Header; /*0x00 */
+ U16 DevHandle; /*0x04 */
+ U16 Reserved0; /*0x06 */
+ U8 GUID[24]; /*0x08 */
+ U8 Name[16]; /*0x20 */
+ U64 WWID; /*0x30 */
+ U32 Reserved1; /*0x38 */
+ U32 Reserved2; /*0x3C */
+} MPI2_CONFIG_PAGE_RAID_VOL_1,
+ *PTR_MPI2_CONFIG_PAGE_RAID_VOL_1,
+ Mpi2RaidVolPage1_t, *pMpi2RaidVolPage1_t;
+
+#define MPI2_RAIDVOLPAGE1_PAGEVERSION (0x03)
+
+
+/****************************************************************************
+* RAID Physical Disk Config Pages
+****************************************************************************/
+
+/*RAID Physical Disk Page 0 */
+
+typedef struct _MPI2_RAIDPHYSDISK0_SETTINGS {
+ U16 Reserved1; /*0x00 */
+ U8 HotSparePool; /*0x02 */
+ U8 Reserved2; /*0x03 */
+} MPI2_RAIDPHYSDISK0_SETTINGS,
+ *PTR_MPI2_RAIDPHYSDISK0_SETTINGS,
+ Mpi2RaidPhysDisk0Settings_t,
+ *pMpi2RaidPhysDisk0Settings_t;
+
+/*use MPI2_RAID_HOT_SPARE_POOL_ defines for the HotSparePool field */
+
+typedef struct _MPI2_RAIDPHYSDISK0_INQUIRY_DATA {
+ U8 VendorID[8]; /*0x00 */
+ U8 ProductID[16]; /*0x08 */
+ U8 ProductRevLevel[4]; /*0x18 */
+ U8 SerialNum[32]; /*0x1C */
+} MPI2_RAIDPHYSDISK0_INQUIRY_DATA,
+ *PTR_MPI2_RAIDPHYSDISK0_INQUIRY_DATA,
+ Mpi2RaidPhysDisk0InquiryData_t,
+ *pMpi2RaidPhysDisk0InquiryData_t;
+
+typedef struct _MPI2_CONFIG_PAGE_RD_PDISK_0 {
+ MPI2_CONFIG_PAGE_HEADER Header; /*0x00 */
+ U16 DevHandle; /*0x04 */
+ U8 Reserved1; /*0x06 */
+ U8 PhysDiskNum; /*0x07 */
+ MPI2_RAIDPHYSDISK0_SETTINGS PhysDiskSettings; /*0x08 */
+ U32 Reserved2; /*0x0C */
+ MPI2_RAIDPHYSDISK0_INQUIRY_DATA InquiryData; /*0x10 */
+ U32 Reserved3; /*0x4C */
+ U8 PhysDiskState; /*0x50 */
+ U8 OfflineReason; /*0x51 */
+ U8 IncompatibleReason; /*0x52 */
+ U8 PhysDiskAttributes; /*0x53 */
+ U32 PhysDiskStatusFlags;/*0x54 */
+ U64 DeviceMaxLBA; /*0x58 */
+ U64 HostMaxLBA; /*0x60 */
+ U64 CoercedMaxLBA; /*0x68 */
+ U16 BlockSize; /*0x70 */
+ U16 Reserved5; /*0x72 */
+ U32 Reserved6; /*0x74 */
+} MPI2_CONFIG_PAGE_RD_PDISK_0,
+ *PTR_MPI2_CONFIG_PAGE_RD_PDISK_0,
+ Mpi2RaidPhysDiskPage0_t,
+ *pMpi2RaidPhysDiskPage0_t;
+
+#define MPI2_RAIDPHYSDISKPAGE0_PAGEVERSION (0x05)
+
+/*PhysDiskState defines */
+#define MPI2_RAID_PD_STATE_NOT_CONFIGURED (0x00)
+#define MPI2_RAID_PD_STATE_NOT_COMPATIBLE (0x01)
+#define MPI2_RAID_PD_STATE_OFFLINE (0x02)
+#define MPI2_RAID_PD_STATE_ONLINE (0x03)
+#define MPI2_RAID_PD_STATE_HOT_SPARE (0x04)
+#define MPI2_RAID_PD_STATE_DEGRADED (0x05)
+#define MPI2_RAID_PD_STATE_REBUILDING (0x06)
+#define MPI2_RAID_PD_STATE_OPTIMAL (0x07)
+
+/*OfflineReason defines */
+#define MPI2_PHYSDISK0_ONLINE (0x00)
+#define MPI2_PHYSDISK0_OFFLINE_MISSING (0x01)
+#define MPI2_PHYSDISK0_OFFLINE_FAILED (0x03)
+#define MPI2_PHYSDISK0_OFFLINE_INITIALIZING (0x04)
+#define MPI2_PHYSDISK0_OFFLINE_REQUESTED (0x05)
+#define MPI2_PHYSDISK0_OFFLINE_FAILED_REQUESTED (0x06)
+#define MPI2_PHYSDISK0_OFFLINE_OTHER (0xFF)
+
+/*IncompatibleReason defines */
+#define MPI2_PHYSDISK0_COMPATIBLE (0x00)
+#define MPI2_PHYSDISK0_INCOMPATIBLE_PROTOCOL (0x01)
+#define MPI2_PHYSDISK0_INCOMPATIBLE_BLOCKSIZE (0x02)
+#define MPI2_PHYSDISK0_INCOMPATIBLE_MAX_LBA (0x03)
+#define MPI2_PHYSDISK0_INCOMPATIBLE_SATA_EXTENDED_CMD (0x04)
+#define MPI2_PHYSDISK0_INCOMPATIBLE_REMOVEABLE_MEDIA (0x05)
+#define MPI2_PHYSDISK0_INCOMPATIBLE_MEDIA_TYPE (0x06)
+#define MPI2_PHYSDISK0_INCOMPATIBLE_UNKNOWN (0xFF)
+
+/*PhysDiskAttributes defines */
+#define MPI2_PHYSDISK0_ATTRIB_MEDIA_MASK (0x0C)
+#define MPI2_PHYSDISK0_ATTRIB_SOLID_STATE_DRIVE (0x08)
+#define MPI2_PHYSDISK0_ATTRIB_HARD_DISK_DRIVE (0x04)
+
+#define MPI2_PHYSDISK0_ATTRIB_PROTOCOL_MASK (0x03)
+#define MPI2_PHYSDISK0_ATTRIB_SAS_PROTOCOL (0x02)
+#define MPI2_PHYSDISK0_ATTRIB_SATA_PROTOCOL (0x01)
+
+/*PhysDiskStatusFlags defines */
+#define MPI2_PHYSDISK0_STATUS_FLAG_NOT_CERTIFIED (0x00000040)
+#define MPI2_PHYSDISK0_STATUS_FLAG_OCE_TARGET (0x00000020)
+#define MPI2_PHYSDISK0_STATUS_FLAG_WRITE_CACHE_ENABLED (0x00000010)
+#define MPI2_PHYSDISK0_STATUS_FLAG_OPTIMAL_PREVIOUS (0x00000000)
+#define MPI2_PHYSDISK0_STATUS_FLAG_NOT_OPTIMAL_PREVIOUS (0x00000008)
+#define MPI2_PHYSDISK0_STATUS_FLAG_INACTIVE_VOLUME (0x00000004)
+#define MPI2_PHYSDISK0_STATUS_FLAG_QUIESCED (0x00000002)
+#define MPI2_PHYSDISK0_STATUS_FLAG_OUT_OF_SYNC (0x00000001)
+
+
+/*RAID Physical Disk Page 1 */
+
+/*
+ *Host code (drivers, BIOS, utilities, etc.) should leave this define set to
+ *one and check the value returned for NumPhysDiskPaths at runtime.
+ */
+#ifndef MPI2_RAID_PHYS_DISK1_PATH_MAX
+#define MPI2_RAID_PHYS_DISK1_PATH_MAX (1)
+#endif
+
+typedef struct _MPI2_RAIDPHYSDISK1_PATH {
+ U16 DevHandle; /*0x00 */
+ U16 Reserved1; /*0x02 */
+ U64 WWID; /*0x04 */
+ U64 OwnerWWID; /*0x0C */
+ U8 OwnerIdentifier; /*0x14 */
+ U8 Reserved2; /*0x15 */
+ U16 Flags; /*0x16 */
+} MPI2_RAIDPHYSDISK1_PATH, *PTR_MPI2_RAIDPHYSDISK1_PATH,
+ Mpi2RaidPhysDisk1Path_t,
+ *pMpi2RaidPhysDisk1Path_t;
+
+/*RAID Physical Disk Page 1 Physical Disk Path Flags field defines */
+#define MPI2_RAID_PHYSDISK1_FLAG_PRIMARY (0x0004)
+#define MPI2_RAID_PHYSDISK1_FLAG_BROKEN (0x0002)
+#define MPI2_RAID_PHYSDISK1_FLAG_INVALID (0x0001)
+
+typedef struct _MPI2_CONFIG_PAGE_RD_PDISK_1 {
+ MPI2_CONFIG_PAGE_HEADER Header; /*0x00 */
+ U8 NumPhysDiskPaths; /*0x04 */
+ U8 PhysDiskNum; /*0x05 */
+ U16 Reserved1; /*0x06 */
+ U32 Reserved2; /*0x08 */
+ MPI2_RAIDPHYSDISK1_PATH
+ PhysicalDiskPath[MPI2_RAID_PHYS_DISK1_PATH_MAX];/*0x0C */
+} MPI2_CONFIG_PAGE_RD_PDISK_1,
+ *PTR_MPI2_CONFIG_PAGE_RD_PDISK_1,
+ Mpi2RaidPhysDiskPage1_t,
+ *pMpi2RaidPhysDiskPage1_t;
+
+#define MPI2_RAIDPHYSDISKPAGE1_PAGEVERSION (0x02)
+
+
+/****************************************************************************
+* values for fields used by several types of SAS Config Pages
+****************************************************************************/
+
+/*values for NegotiatedLinkRates fields */
+#define MPI2_SAS_NEG_LINK_RATE_MASK_LOGICAL (0xF0)
+#define MPI2_SAS_NEG_LINK_RATE_SHIFT_LOGICAL (4)
+#define MPI2_SAS_NEG_LINK_RATE_MASK_PHYSICAL (0x0F)
+/*link rates used for Negotiated Physical and Logical Link Rate */
+#define MPI2_SAS_NEG_LINK_RATE_UNKNOWN_LINK_RATE (0x00)
+#define MPI2_SAS_NEG_LINK_RATE_PHY_DISABLED (0x01)
+#define MPI2_SAS_NEG_LINK_RATE_NEGOTIATION_FAILED (0x02)
+#define MPI2_SAS_NEG_LINK_RATE_SATA_OOB_COMPLETE (0x03)
+#define MPI2_SAS_NEG_LINK_RATE_PORT_SELECTOR (0x04)
+#define MPI2_SAS_NEG_LINK_RATE_SMP_RESET_IN_PROGRESS (0x05)
+#define MPI2_SAS_NEG_LINK_RATE_UNSUPPORTED_PHY (0x06)
+#define MPI2_SAS_NEG_LINK_RATE_1_5 (0x08)
+#define MPI2_SAS_NEG_LINK_RATE_3_0 (0x09)
+#define MPI2_SAS_NEG_LINK_RATE_6_0 (0x0A)
+#define MPI25_SAS_NEG_LINK_RATE_12_0 (0x0B)
+
+
+/*values for AttachedPhyInfo fields */
+#define MPI2_SAS_APHYINFO_INSIDE_ZPSDS_PERSISTENT (0x00000040)
+#define MPI2_SAS_APHYINFO_REQUESTED_INSIDE_ZPSDS (0x00000020)
+#define MPI2_SAS_APHYINFO_BREAK_REPLY_CAPABLE (0x00000010)
+
+#define MPI2_SAS_APHYINFO_REASON_MASK (0x0000000F)
+#define MPI2_SAS_APHYINFO_REASON_UNKNOWN (0x00000000)
+#define MPI2_SAS_APHYINFO_REASON_POWER_ON (0x00000001)
+#define MPI2_SAS_APHYINFO_REASON_HARD_RESET (0x00000002)
+#define MPI2_SAS_APHYINFO_REASON_SMP_PHY_CONTROL (0x00000003)
+#define MPI2_SAS_APHYINFO_REASON_LOSS_OF_SYNC (0x00000004)
+#define MPI2_SAS_APHYINFO_REASON_MULTIPLEXING_SEQ (0x00000005)
+#define MPI2_SAS_APHYINFO_REASON_IT_NEXUS_LOSS_TIMER (0x00000006)
+#define MPI2_SAS_APHYINFO_REASON_BREAK_TIMEOUT (0x00000007)
+#define MPI2_SAS_APHYINFO_REASON_PHY_TEST_STOPPED (0x00000008)
+
+
+/*values for PhyInfo fields */
+#define MPI2_SAS_PHYINFO_PHY_VACANT (0x80000000)
+
+#define MPI2_SAS_PHYINFO_PHY_POWER_CONDITION_MASK (0x18000000)
+#define MPI2_SAS_PHYINFO_SHIFT_PHY_POWER_CONDITION (27)
+#define MPI2_SAS_PHYINFO_PHY_POWER_ACTIVE (0x00000000)
+#define MPI2_SAS_PHYINFO_PHY_POWER_PARTIAL (0x08000000)
+#define MPI2_SAS_PHYINFO_PHY_POWER_SLUMBER (0x10000000)
+
+#define MPI2_SAS_PHYINFO_CHANGED_REQ_INSIDE_ZPSDS (0x04000000)
+#define MPI2_SAS_PHYINFO_INSIDE_ZPSDS_PERSISTENT (0x02000000)
+#define MPI2_SAS_PHYINFO_REQ_INSIDE_ZPSDS (0x01000000)
+#define MPI2_SAS_PHYINFO_ZONE_GROUP_PERSISTENT (0x00400000)
+#define MPI2_SAS_PHYINFO_INSIDE_ZPSDS (0x00200000)
+#define MPI2_SAS_PHYINFO_ZONING_ENABLED (0x00100000)
+
+#define MPI2_SAS_PHYINFO_REASON_MASK (0x000F0000)
+#define MPI2_SAS_PHYINFO_REASON_UNKNOWN (0x00000000)
+#define MPI2_SAS_PHYINFO_REASON_POWER_ON (0x00010000)
+#define MPI2_SAS_PHYINFO_REASON_HARD_RESET (0x00020000)
+#define MPI2_SAS_PHYINFO_REASON_SMP_PHY_CONTROL (0x00030000)
+#define MPI2_SAS_PHYINFO_REASON_LOSS_OF_SYNC (0x00040000)
+#define MPI2_SAS_PHYINFO_REASON_MULTIPLEXING_SEQ (0x00050000)
+#define MPI2_SAS_PHYINFO_REASON_IT_NEXUS_LOSS_TIMER (0x00060000)
+#define MPI2_SAS_PHYINFO_REASON_BREAK_TIMEOUT (0x00070000)
+#define MPI2_SAS_PHYINFO_REASON_PHY_TEST_STOPPED (0x00080000)
+
+#define MPI2_SAS_PHYINFO_MULTIPLEXING_SUPPORTED (0x00008000)
+#define MPI2_SAS_PHYINFO_SATA_PORT_ACTIVE (0x00004000)
+#define MPI2_SAS_PHYINFO_SATA_PORT_SELECTOR_PRESENT (0x00002000)
+#define MPI2_SAS_PHYINFO_VIRTUAL_PHY (0x00001000)
+
+#define MPI2_SAS_PHYINFO_MASK_PARTIAL_PATHWAY_TIME (0x00000F00)
+#define MPI2_SAS_PHYINFO_SHIFT_PARTIAL_PATHWAY_TIME (8)
+
+#define MPI2_SAS_PHYINFO_MASK_ROUTING_ATTRIBUTE (0x000000F0)
+#define MPI2_SAS_PHYINFO_DIRECT_ROUTING (0x00000000)
+#define MPI2_SAS_PHYINFO_SUBTRACTIVE_ROUTING (0x00000010)
+#define MPI2_SAS_PHYINFO_TABLE_ROUTING (0x00000020)
+
+
+/*values for SAS ProgrammedLinkRate fields */
+#define MPI2_SAS_PRATE_MAX_RATE_MASK (0xF0)
+#define MPI2_SAS_PRATE_MAX_RATE_NOT_PROGRAMMABLE (0x00)
+#define MPI2_SAS_PRATE_MAX_RATE_1_5 (0x80)
+#define MPI2_SAS_PRATE_MAX_RATE_3_0 (0x90)
+#define MPI2_SAS_PRATE_MAX_RATE_6_0 (0xA0)
+#define MPI25_SAS_PRATE_MAX_RATE_12_0 (0xB0)
+#define MPI2_SAS_PRATE_MIN_RATE_MASK (0x0F)
+#define MPI2_SAS_PRATE_MIN_RATE_NOT_PROGRAMMABLE (0x00)
+#define MPI2_SAS_PRATE_MIN_RATE_1_5 (0x08)
+#define MPI2_SAS_PRATE_MIN_RATE_3_0 (0x09)
+#define MPI2_SAS_PRATE_MIN_RATE_6_0 (0x0A)
+#define MPI25_SAS_PRATE_MIN_RATE_12_0 (0x0B)
+
+
+/*values for SAS HwLinkRate fields */
+#define MPI2_SAS_HWRATE_MAX_RATE_MASK (0xF0)
+#define MPI2_SAS_HWRATE_MAX_RATE_1_5 (0x80)
+#define MPI2_SAS_HWRATE_MAX_RATE_3_0 (0x90)
+#define MPI2_SAS_HWRATE_MAX_RATE_6_0 (0xA0)
+#define MPI25_SAS_HWRATE_MAX_RATE_12_0 (0xB0)
+#define MPI2_SAS_HWRATE_MIN_RATE_MASK (0x0F)
+#define MPI2_SAS_HWRATE_MIN_RATE_1_5 (0x08)
+#define MPI2_SAS_HWRATE_MIN_RATE_3_0 (0x09)
+#define MPI2_SAS_HWRATE_MIN_RATE_6_0 (0x0A)
+#define MPI25_SAS_HWRATE_MIN_RATE_12_0 (0x0B)
+
+
+
+/****************************************************************************
+* SAS IO Unit Config Pages
+****************************************************************************/
+
+/*SAS IO Unit Page 0 */
+
+typedef struct _MPI2_SAS_IO_UNIT0_PHY_DATA {
+ U8 Port; /*0x00 */
+ U8 PortFlags; /*0x01 */
+ U8 PhyFlags; /*0x02 */
+ U8 NegotiatedLinkRate; /*0x03 */
+ U32 ControllerPhyDeviceInfo;/*0x04 */
+ U16 AttachedDevHandle; /*0x08 */
+ U16 ControllerDevHandle; /*0x0A */
+ U32 DiscoveryStatus; /*0x0C */
+ U32 Reserved; /*0x10 */
+} MPI2_SAS_IO_UNIT0_PHY_DATA,
+ *PTR_MPI2_SAS_IO_UNIT0_PHY_DATA,
+ Mpi2SasIOUnit0PhyData_t,
+ *pMpi2SasIOUnit0PhyData_t;
+
+/*
+ *Host code (drivers, BIOS, utilities, etc.) should leave this define set to
+ *one and check the value returned for NumPhys at runtime.
+ */
+#ifndef MPI2_SAS_IOUNIT0_PHY_MAX
+#define MPI2_SAS_IOUNIT0_PHY_MAX (1)
+#endif
+
+typedef struct _MPI2_CONFIG_PAGE_SASIOUNIT_0 {
+ MPI2_CONFIG_EXTENDED_PAGE_HEADER Header; /*0x00 */
+ U32 Reserved1;/*0x08 */
+ U8 NumPhys; /*0x0C */
+ U8 Reserved2;/*0x0D */
+ U16 Reserved3;/*0x0E */
+ MPI2_SAS_IO_UNIT0_PHY_DATA
+ PhyData[MPI2_SAS_IOUNIT0_PHY_MAX]; /*0x10 */
+} MPI2_CONFIG_PAGE_SASIOUNIT_0,
+ *PTR_MPI2_CONFIG_PAGE_SASIOUNIT_0,
+ Mpi2SasIOUnitPage0_t, *pMpi2SasIOUnitPage0_t;
+
+#define MPI2_SASIOUNITPAGE0_PAGEVERSION (0x05)
+
+/*values for SAS IO Unit Page 0 PortFlags */
+#define MPI2_SASIOUNIT0_PORTFLAGS_DISCOVERY_IN_PROGRESS (0x08)
+#define MPI2_SASIOUNIT0_PORTFLAGS_AUTO_PORT_CONFIG (0x01)
+
+/*values for SAS IO Unit Page 0 PhyFlags */
+#define MPI2_SASIOUNIT0_PHYFLAGS_ZONING_ENABLED (0x10)
+#define MPI2_SASIOUNIT0_PHYFLAGS_PHY_DISABLED (0x08)
+
+/*use MPI2_SAS_NEG_LINK_RATE_ defines for the NegotiatedLinkRate field */
+
+/*see mpi2_sas.h for values for
+ *SAS IO Unit Page 0 ControllerPhyDeviceInfo values */
+
+/*values for SAS IO Unit Page 0 DiscoveryStatus */
+#define MPI2_SASIOUNIT0_DS_MAX_ENCLOSURES_EXCEED (0x80000000)
+#define MPI2_SASIOUNIT0_DS_MAX_EXPANDERS_EXCEED (0x40000000)
+#define MPI2_SASIOUNIT0_DS_MAX_DEVICES_EXCEED (0x20000000)
+#define MPI2_SASIOUNIT0_DS_MAX_TOPO_PHYS_EXCEED (0x10000000)
+#define MPI2_SASIOUNIT0_DS_DOWNSTREAM_INITIATOR (0x08000000)
+#define MPI2_SASIOUNIT0_DS_MULTI_SUBTRACTIVE_SUBTRACTIVE (0x00008000)
+#define MPI2_SASIOUNIT0_DS_EXP_MULTI_SUBTRACTIVE (0x00004000)
+#define MPI2_SASIOUNIT0_DS_MULTI_PORT_DOMAIN (0x00002000)
+#define MPI2_SASIOUNIT0_DS_TABLE_TO_SUBTRACTIVE_LINK (0x00001000)
+#define MPI2_SASIOUNIT0_DS_UNSUPPORTED_DEVICE (0x00000800)
+#define MPI2_SASIOUNIT0_DS_TABLE_LINK (0x00000400)
+#define MPI2_SASIOUNIT0_DS_SUBTRACTIVE_LINK (0x00000200)
+#define MPI2_SASIOUNIT0_DS_SMP_CRC_ERROR (0x00000100)
+#define MPI2_SASIOUNIT0_DS_SMP_FUNCTION_FAILED (0x00000080)
+#define MPI2_SASIOUNIT0_DS_INDEX_NOT_EXIST (0x00000040)
+#define MPI2_SASIOUNIT0_DS_OUT_ROUTE_ENTRIES (0x00000020)
+#define MPI2_SASIOUNIT0_DS_SMP_TIMEOUT (0x00000010)
+#define MPI2_SASIOUNIT0_DS_MULTIPLE_PORTS (0x00000004)
+#define MPI2_SASIOUNIT0_DS_UNADDRESSABLE_DEVICE (0x00000002)
+#define MPI2_SASIOUNIT0_DS_LOOP_DETECTED (0x00000001)
+
+
+/*SAS IO Unit Page 1 */
+
+typedef struct _MPI2_SAS_IO_UNIT1_PHY_DATA {
+ U8 Port; /*0x00 */
+ U8 PortFlags; /*0x01 */
+ U8 PhyFlags; /*0x02 */
+ U8 MaxMinLinkRate; /*0x03 */
+ U32 ControllerPhyDeviceInfo; /*0x04 */
+ U16 MaxTargetPortConnectTime; /*0x08 */
+ U16 Reserved1; /*0x0A */
+} MPI2_SAS_IO_UNIT1_PHY_DATA,
+ *PTR_MPI2_SAS_IO_UNIT1_PHY_DATA,
+ Mpi2SasIOUnit1PhyData_t,
+ *pMpi2SasIOUnit1PhyData_t;
+
+/*
+ *Host code (drivers, BIOS, utilities, etc.) should leave this define set to
+ *one and check the value returned for NumPhys at runtime.
+ */
+#ifndef MPI2_SAS_IOUNIT1_PHY_MAX
+#define MPI2_SAS_IOUNIT1_PHY_MAX (1)
+#endif
+
+typedef struct _MPI2_CONFIG_PAGE_SASIOUNIT_1 {
+ MPI2_CONFIG_EXTENDED_PAGE_HEADER Header; /*0x00 */
+ U16
+ ControlFlags; /*0x08 */
+ U16
+ SASNarrowMaxQueueDepth; /*0x0A */
+ U16
+ AdditionalControlFlags; /*0x0C */
+ U16
+ SASWideMaxQueueDepth; /*0x0E */
+ U8
+ NumPhys; /*0x10 */
+ U8
+ SATAMaxQDepth; /*0x11 */
+ U8
+ ReportDeviceMissingDelay; /*0x12 */
+ U8
+ IODeviceMissingDelay; /*0x13 */
+ MPI2_SAS_IO_UNIT1_PHY_DATA
+ PhyData[MPI2_SAS_IOUNIT1_PHY_MAX]; /*0x14 */
+} MPI2_CONFIG_PAGE_SASIOUNIT_1,
+ *PTR_MPI2_CONFIG_PAGE_SASIOUNIT_1,
+ Mpi2SasIOUnitPage1_t, *pMpi2SasIOUnitPage1_t;
+
+#define MPI2_SASIOUNITPAGE1_PAGEVERSION (0x09)
+
+/*values for SAS IO Unit Page 1 ControlFlags */
+#define MPI2_SASIOUNIT1_CONTROL_DEVICE_SELF_TEST (0x8000)
+#define MPI2_SASIOUNIT1_CONTROL_SATA_3_0_MAX (0x4000)
+#define MPI2_SASIOUNIT1_CONTROL_SATA_1_5_MAX (0x2000)
+#define MPI2_SASIOUNIT1_CONTROL_SATA_SW_PRESERVE (0x1000)
+
+#define MPI2_SASIOUNIT1_CONTROL_MASK_DEV_SUPPORT (0x0600)
+#define MPI2_SASIOUNIT1_CONTROL_SHIFT_DEV_SUPPORT (9)
+#define MPI2_SASIOUNIT1_CONTROL_DEV_SUPPORT_BOTH (0x0)
+#define MPI2_SASIOUNIT1_CONTROL_DEV_SAS_SUPPORT (0x1)
+#define MPI2_SASIOUNIT1_CONTROL_DEV_SATA_SUPPORT (0x2)
+
+#define MPI2_SASIOUNIT1_CONTROL_SATA_48BIT_LBA_REQUIRED (0x0080)
+#define MPI2_SASIOUNIT1_CONTROL_SATA_SMART_REQUIRED (0x0040)
+#define MPI2_SASIOUNIT1_CONTROL_SATA_NCQ_REQUIRED (0x0020)
+#define MPI2_SASIOUNIT1_CONTROL_SATA_FUA_REQUIRED (0x0010)
+#define MPI2_SASIOUNIT1_CONTROL_TABLE_SUBTRACTIVE_ILLEGAL (0x0008)
+#define MPI2_SASIOUNIT1_CONTROL_SUBTRACTIVE_ILLEGAL (0x0004)
+#define MPI2_SASIOUNIT1_CONTROL_FIRST_LVL_DISC_ONLY (0x0002)
+#define MPI2_SASIOUNIT1_CONTROL_CLEAR_AFFILIATION (0x0001)
+
+/*values for SAS IO Unit Page 1 AdditionalControlFlags */
+#define MPI2_SASIOUNIT1_ACONTROL_MULTI_PORT_DOMAIN_ILLEGAL (0x0080)
+#define MPI2_SASIOUNIT1_ACONTROL_SATA_ASYNCHROUNOUS_NOTIFICATION (0x0040)
+#define MPI2_SASIOUNIT1_ACONTROL_INVALID_TOPOLOGY_CORRECTION (0x0020)
+#define MPI2_SASIOUNIT1_ACONTROL_PORT_ENABLE_ONLY_SATA_LINK_RESET (0x0010)
+#define MPI2_SASIOUNIT1_ACONTROL_OTHER_AFFILIATION_SATA_LINK_RESET (0x0008)
+#define MPI2_SASIOUNIT1_ACONTROL_SELF_AFFILIATION_SATA_LINK_RESET (0x0004)
+#define MPI2_SASIOUNIT1_ACONTROL_NO_AFFILIATION_SATA_LINK_RESET (0x0002)
+#define MPI2_SASIOUNIT1_ACONTROL_ALLOW_TABLE_TO_TABLE (0x0001)
+
+/*defines for SAS IO Unit Page 1 ReportDeviceMissingDelay */
+#define MPI2_SASIOUNIT1_REPORT_MISSING_TIMEOUT_MASK (0x7F)
+#define MPI2_SASIOUNIT1_REPORT_MISSING_UNIT_16 (0x80)
+
+/*values for SAS IO Unit Page 1 PortFlags */
+#define MPI2_SASIOUNIT1_PORT_FLAGS_AUTO_PORT_CONFIG (0x01)
+
+/*values for SAS IO Unit Page 1 PhyFlags */
+#define MPI2_SASIOUNIT1_PHYFLAGS_ZONING_ENABLE (0x10)
+#define MPI2_SASIOUNIT1_PHYFLAGS_PHY_DISABLE (0x08)
+
+/*values for SAS IO Unit Page 1 MaxMinLinkRate */
+#define MPI2_SASIOUNIT1_MAX_RATE_MASK (0xF0)
+#define MPI2_SASIOUNIT1_MAX_RATE_1_5 (0x80)
+#define MPI2_SASIOUNIT1_MAX_RATE_3_0 (0x90)
+#define MPI2_SASIOUNIT1_MAX_RATE_6_0 (0xA0)
+#define MPI25_SASIOUNIT1_MAX_RATE_12_0 (0xB0)
+#define MPI2_SASIOUNIT1_MIN_RATE_MASK (0x0F)
+#define MPI2_SASIOUNIT1_MIN_RATE_1_5 (0x08)
+#define MPI2_SASIOUNIT1_MIN_RATE_3_0 (0x09)
+#define MPI2_SASIOUNIT1_MIN_RATE_6_0 (0x0A)
+#define MPI25_SASIOUNIT1_MIN_RATE_12_0 (0x0B)
+
+/*see mpi2_sas.h for values for
+ *SAS IO Unit Page 1 ControllerPhyDeviceInfo values */
+
+
+/*SAS IO Unit Page 4 */
+
+typedef struct _MPI2_SAS_IOUNIT4_SPINUP_GROUP {
+ U8 MaxTargetSpinup; /*0x00 */
+ U8 SpinupDelay; /*0x01 */
+ U8 SpinupFlags; /*0x02 */
+ U8 Reserved1; /*0x03 */
+} MPI2_SAS_IOUNIT4_SPINUP_GROUP,
+ *PTR_MPI2_SAS_IOUNIT4_SPINUP_GROUP,
+ Mpi2SasIOUnit4SpinupGroup_t,
+ *pMpi2SasIOUnit4SpinupGroup_t;
+/*defines for SAS IO Unit Page 4 SpinupFlags */
+#define MPI2_SASIOUNIT4_SPINUP_DISABLE_FLAG (0x01)
+
+
+/*
+ *Host code (drivers, BIOS, utilities, etc.) should leave this define set to
+ *one and check the value returned for NumPhys at runtime.
+ */
+#ifndef MPI2_SAS_IOUNIT4_PHY_MAX
+#define MPI2_SAS_IOUNIT4_PHY_MAX (4)
+#endif
+
+typedef struct _MPI2_CONFIG_PAGE_SASIOUNIT_4 {
+ MPI2_CONFIG_EXTENDED_PAGE_HEADER Header;/*0x00 */
+ MPI2_SAS_IOUNIT4_SPINUP_GROUP
+ SpinupGroupParameters[4]; /*0x08 */
+ U32
+ Reserved1; /*0x18 */
+ U32
+ Reserved2; /*0x1C */
+ U32
+ Reserved3; /*0x20 */
+ U8
+ BootDeviceWaitTime; /*0x24 */
+ U8
+ Reserved4; /*0x25 */
+ U16
+ Reserved5; /*0x26 */
+ U8
+ NumPhys; /*0x28 */
+ U8
+ PEInitialSpinupDelay; /*0x29 */
+ U8
+ PEReplyDelay; /*0x2A */
+ U8
+ Flags; /*0x2B */
+ U8
+ PHY[MPI2_SAS_IOUNIT4_PHY_MAX]; /*0x2C */
+} MPI2_CONFIG_PAGE_SASIOUNIT_4,
+ *PTR_MPI2_CONFIG_PAGE_SASIOUNIT_4,
+ Mpi2SasIOUnitPage4_t, *pMpi2SasIOUnitPage4_t;
+
+#define MPI2_SASIOUNITPAGE4_PAGEVERSION (0x02)
+
+/*defines for Flags field */
+#define MPI2_SASIOUNIT4_FLAGS_AUTO_PORTENABLE (0x01)
+
+/*defines for PHY field */
+#define MPI2_SASIOUNIT4_PHY_SPINUP_GROUP_MASK (0x03)
+
+
+/*SAS IO Unit Page 5 */
+
+typedef struct _MPI2_SAS_IO_UNIT5_PHY_PM_SETTINGS {
+ U8 ControlFlags; /*0x00 */
+ U8 PortWidthModGroup; /*0x01 */
+ U16 InactivityTimerExponent; /*0x02 */
+ U8 SATAPartialTimeout; /*0x04 */
+ U8 Reserved2; /*0x05 */
+ U8 SATASlumberTimeout; /*0x06 */
+ U8 Reserved3; /*0x07 */
+ U8 SASPartialTimeout; /*0x08 */
+ U8 Reserved4; /*0x09 */
+ U8 SASSlumberTimeout; /*0x0A */
+ U8 Reserved5; /*0x0B */
+} MPI2_SAS_IO_UNIT5_PHY_PM_SETTINGS,
+ *PTR_MPI2_SAS_IO_UNIT5_PHY_PM_SETTINGS,
+ Mpi2SasIOUnit5PhyPmSettings_t,
+ *pMpi2SasIOUnit5PhyPmSettings_t;
+
+/*defines for ControlFlags field */
+#define MPI2_SASIOUNIT5_CONTROL_SAS_SLUMBER_ENABLE (0x08)
+#define MPI2_SASIOUNIT5_CONTROL_SAS_PARTIAL_ENABLE (0x04)
+#define MPI2_SASIOUNIT5_CONTROL_SATA_SLUMBER_ENABLE (0x02)
+#define MPI2_SASIOUNIT5_CONTROL_SATA_PARTIAL_ENABLE (0x01)
+
+/*defines for PortWidthModeGroup field */
+#define MPI2_SASIOUNIT5_PWMG_DISABLE (0xFF)
+
+/*defines for InactivityTimerExponent field */
+#define MPI2_SASIOUNIT5_ITE_MASK_SAS_SLUMBER (0x7000)
+#define MPI2_SASIOUNIT5_ITE_SHIFT_SAS_SLUMBER (12)
+#define MPI2_SASIOUNIT5_ITE_MASK_SAS_PARTIAL (0x0700)
+#define MPI2_SASIOUNIT5_ITE_SHIFT_SAS_PARTIAL (8)
+#define MPI2_SASIOUNIT5_ITE_MASK_SATA_SLUMBER (0x0070)
+#define MPI2_SASIOUNIT5_ITE_SHIFT_SATA_SLUMBER (4)
+#define MPI2_SASIOUNIT5_ITE_MASK_SATA_PARTIAL (0x0007)
+#define MPI2_SASIOUNIT5_ITE_SHIFT_SATA_PARTIAL (0)
+
+#define MPI2_SASIOUNIT5_ITE_TEN_SECONDS (7)
+#define MPI2_SASIOUNIT5_ITE_ONE_SECOND (6)
+#define MPI2_SASIOUNIT5_ITE_HUNDRED_MILLISECONDS (5)
+#define MPI2_SASIOUNIT5_ITE_TEN_MILLISECONDS (4)
+#define MPI2_SASIOUNIT5_ITE_ONE_MILLISECOND (3)
+#define MPI2_SASIOUNIT5_ITE_HUNDRED_MICROSECONDS (2)
+#define MPI2_SASIOUNIT5_ITE_TEN_MICROSECONDS (1)
+#define MPI2_SASIOUNIT5_ITE_ONE_MICROSECOND (0)
+
+/*
+ *Host code (drivers, BIOS, utilities, etc.) should leave this define set to
+ *one and check the value returned for NumPhys at runtime.
+ */
+#ifndef MPI2_SAS_IOUNIT5_PHY_MAX
+#define MPI2_SAS_IOUNIT5_PHY_MAX (1)
+#endif
+
+typedef struct _MPI2_CONFIG_PAGE_SASIOUNIT_5 {
+ MPI2_CONFIG_EXTENDED_PAGE_HEADER Header; /*0x00 */
+ U8 NumPhys; /*0x08 */
+ U8 Reserved1;/*0x09 */
+ U16 Reserved2;/*0x0A */
+ U32 Reserved3;/*0x0C */
+ MPI2_SAS_IO_UNIT5_PHY_PM_SETTINGS
+ SASPhyPowerManagementSettings[MPI2_SAS_IOUNIT5_PHY_MAX];/*0x10 */
+} MPI2_CONFIG_PAGE_SASIOUNIT_5,
+ *PTR_MPI2_CONFIG_PAGE_SASIOUNIT_5,
+ Mpi2SasIOUnitPage5_t, *pMpi2SasIOUnitPage5_t;
+
+#define MPI2_SASIOUNITPAGE5_PAGEVERSION (0x01)
+
+
+/*SAS IO Unit Page 6 */
+
+typedef struct _MPI2_SAS_IO_UNIT6_PORT_WIDTH_MOD_GROUP_STATUS {
+ U8 CurrentStatus; /*0x00 */
+ U8 CurrentModulation; /*0x01 */
+ U8 CurrentUtilization; /*0x02 */
+ U8 Reserved1; /*0x03 */
+ U32 Reserved2; /*0x04 */
+} MPI2_SAS_IO_UNIT6_PORT_WIDTH_MOD_GROUP_STATUS,
+ *PTR_MPI2_SAS_IO_UNIT6_PORT_WIDTH_MOD_GROUP_STATUS,
+ Mpi2SasIOUnit6PortWidthModGroupStatus_t,
+ *pMpi2SasIOUnit6PortWidthModGroupStatus_t;
+
+/*defines for CurrentStatus field */
+#define MPI2_SASIOUNIT6_STATUS_UNAVAILABLE (0x00)
+#define MPI2_SASIOUNIT6_STATUS_UNCONFIGURED (0x01)
+#define MPI2_SASIOUNIT6_STATUS_INVALID_CONFIG (0x02)
+#define MPI2_SASIOUNIT6_STATUS_LINK_DOWN (0x03)
+#define MPI2_SASIOUNIT6_STATUS_OBSERVATION_ONLY (0x04)
+#define MPI2_SASIOUNIT6_STATUS_INACTIVE (0x05)
+#define MPI2_SASIOUNIT6_STATUS_ACTIVE_IOUNIT (0x06)
+#define MPI2_SASIOUNIT6_STATUS_ACTIVE_HOST (0x07)
+
+/*defines for CurrentModulation field */
+#define MPI2_SASIOUNIT6_MODULATION_25_PERCENT (0x00)
+#define MPI2_SASIOUNIT6_MODULATION_50_PERCENT (0x01)
+#define MPI2_SASIOUNIT6_MODULATION_75_PERCENT (0x02)
+#define MPI2_SASIOUNIT6_MODULATION_100_PERCENT (0x03)
+
+/*
+ *Host code (drivers, BIOS, utilities, etc.) should leave this define set to
+ *one and check the value returned for NumGroups at runtime.
+ */
+#ifndef MPI2_SAS_IOUNIT6_GROUP_MAX
+#define MPI2_SAS_IOUNIT6_GROUP_MAX (1)
+#endif
+
+typedef struct _MPI2_CONFIG_PAGE_SASIOUNIT_6 {
+ MPI2_CONFIG_EXTENDED_PAGE_HEADER Header; /*0x00 */
+ U32 Reserved1; /*0x08 */
+ U32 Reserved2; /*0x0C */
+ U8 NumGroups; /*0x10 */
+ U8 Reserved3; /*0x11 */
+ U16 Reserved4; /*0x12 */
+ MPI2_SAS_IO_UNIT6_PORT_WIDTH_MOD_GROUP_STATUS
+ PortWidthModulationGroupStatus[MPI2_SAS_IOUNIT6_GROUP_MAX]; /*0x14 */
+} MPI2_CONFIG_PAGE_SASIOUNIT_6,
+ *PTR_MPI2_CONFIG_PAGE_SASIOUNIT_6,
+ Mpi2SasIOUnitPage6_t, *pMpi2SasIOUnitPage6_t;
+
+#define MPI2_SASIOUNITPAGE6_PAGEVERSION (0x00)
+
+
+/*SAS IO Unit Page 7 */
+
+typedef struct _MPI2_SAS_IO_UNIT7_PORT_WIDTH_MOD_GROUP_SETTINGS {
+ U8 Flags; /*0x00 */
+ U8 Reserved1; /*0x01 */
+ U16 Reserved2; /*0x02 */
+ U8 Threshold75Pct; /*0x04 */
+ U8 Threshold50Pct; /*0x05 */
+ U8 Threshold25Pct; /*0x06 */
+ U8 Reserved3; /*0x07 */
+} MPI2_SAS_IO_UNIT7_PORT_WIDTH_MOD_GROUP_SETTINGS,
+ *PTR_MPI2_SAS_IO_UNIT7_PORT_WIDTH_MOD_GROUP_SETTINGS,
+ Mpi2SasIOUnit7PortWidthModGroupSettings_t,
+ *pMpi2SasIOUnit7PortWidthModGroupSettings_t;
+
+/*defines for Flags field */
+#define MPI2_SASIOUNIT7_FLAGS_ENABLE_PORT_WIDTH_MODULATION (0x01)
+
+
+/*
+ *Host code (drivers, BIOS, utilities, etc.) should leave this define set to
+ *one and check the value returned for NumGroups at runtime.
+ */
+#ifndef MPI2_SAS_IOUNIT7_GROUP_MAX
+#define MPI2_SAS_IOUNIT7_GROUP_MAX (1)
+#endif
+
+typedef struct _MPI2_CONFIG_PAGE_SASIOUNIT_7 {
+ MPI2_CONFIG_EXTENDED_PAGE_HEADER Header; /*0x00 */
+ U8 SamplingInterval; /*0x08 */
+ U8 WindowLength; /*0x09 */
+ U16 Reserved1; /*0x0A */
+ U32 Reserved2; /*0x0C */
+ U32 Reserved3; /*0x10 */
+ U8 NumGroups; /*0x14 */
+ U8 Reserved4; /*0x15 */
+ U16 Reserved5; /*0x16 */
+ MPI2_SAS_IO_UNIT7_PORT_WIDTH_MOD_GROUP_SETTINGS
+ PortWidthModulationGroupSettings[MPI2_SAS_IOUNIT7_GROUP_MAX];/*0x18 */
+} MPI2_CONFIG_PAGE_SASIOUNIT_7,
+ *PTR_MPI2_CONFIG_PAGE_SASIOUNIT_7,
+ Mpi2SasIOUnitPage7_t, *pMpi2SasIOUnitPage7_t;
+
+#define MPI2_SASIOUNITPAGE7_PAGEVERSION (0x00)
+
+
+/*SAS IO Unit Page 8 */
+
+typedef struct _MPI2_CONFIG_PAGE_SASIOUNIT_8 {
+ MPI2_CONFIG_EXTENDED_PAGE_HEADER
+ Header; /*0x00 */
+ U32
+ Reserved1; /*0x08 */
+ U32
+ PowerManagementCapabilities; /*0x0C */
+ U8
+ TxRxSleepStatus; /*0x10 */
+ U8
+ Reserved2; /*0x11 */
+ U16
+ Reserved3; /*0x12 */
+} MPI2_CONFIG_PAGE_SASIOUNIT_8,
+ *PTR_MPI2_CONFIG_PAGE_SASIOUNIT_8,
+ Mpi2SasIOUnitPage8_t, *pMpi2SasIOUnitPage8_t;
+
+#define MPI2_SASIOUNITPAGE8_PAGEVERSION (0x00)
+
+/*defines for PowerManagementCapabilities field */
+#define MPI2_SASIOUNIT8_PM_HOST_PORT_WIDTH_MOD (0x00001000)
+#define MPI2_SASIOUNIT8_PM_HOST_SAS_SLUMBER_MODE (0x00000800)
+#define MPI2_SASIOUNIT8_PM_HOST_SAS_PARTIAL_MODE (0x00000400)
+#define MPI2_SASIOUNIT8_PM_HOST_SATA_SLUMBER_MODE (0x00000200)
+#define MPI2_SASIOUNIT8_PM_HOST_SATA_PARTIAL_MODE (0x00000100)
+#define MPI2_SASIOUNIT8_PM_IOUNIT_PORT_WIDTH_MOD (0x00000010)
+#define MPI2_SASIOUNIT8_PM_IOUNIT_SAS_SLUMBER_MODE (0x00000008)
+#define MPI2_SASIOUNIT8_PM_IOUNIT_SAS_PARTIAL_MODE (0x00000004)
+#define MPI2_SASIOUNIT8_PM_IOUNIT_SATA_SLUMBER_MODE (0x00000002)
+#define MPI2_SASIOUNIT8_PM_IOUNIT_SATA_PARTIAL_MODE (0x00000001)
+
+/*defines for TxRxSleepStatus field */
+#define MPI25_SASIOUNIT8_TXRXSLEEP_UNSUPPORTED (0x00)
+#define MPI25_SASIOUNIT8_TXRXSLEEP_DISENGAGED (0x01)
+#define MPI25_SASIOUNIT8_TXRXSLEEP_ACTIVE (0x02)
+#define MPI25_SASIOUNIT8_TXRXSLEEP_SHUTDOWN (0x03)
+
+
+
+/*SAS IO Unit Page 16 */
+
+typedef struct _MPI2_CONFIG_PAGE_SASIOUNIT16 {
+ MPI2_CONFIG_EXTENDED_PAGE_HEADER
+ Header; /*0x00 */
+ U64
+ TimeStamp; /*0x08 */
+ U32
+ Reserved1; /*0x10 */
+ U32
+ Reserved2; /*0x14 */
+ U32
+ FastPathPendedRequests; /*0x18 */
+ U32
+ FastPathUnPendedRequests; /*0x1C */
+ U32
+ FastPathHostRequestStarts; /*0x20 */
+ U32
+ FastPathFirmwareRequestStarts; /*0x24 */
+ U32
+ FastPathHostCompletions; /*0x28 */
+ U32
+ FastPathFirmwareCompletions; /*0x2C */
+ U32
+ NonFastPathRequestStarts; /*0x30 */
+ U32
+ NonFastPathHostCompletions; /*0x30 */
+} MPI2_CONFIG_PAGE_SASIOUNIT16,
+ *PTR_MPI2_CONFIG_PAGE_SASIOUNIT16,
+ Mpi2SasIOUnitPage16_t, *pMpi2SasIOUnitPage16_t;
+
+#define MPI2_SASIOUNITPAGE16_PAGEVERSION (0x00)
+
+
+/****************************************************************************
+* SAS Expander Config Pages
+****************************************************************************/
+
+/*SAS Expander Page 0 */
+
+typedef struct _MPI2_CONFIG_PAGE_EXPANDER_0 {
+ MPI2_CONFIG_EXTENDED_PAGE_HEADER
+ Header; /*0x00 */
+ U8
+ PhysicalPort; /*0x08 */
+ U8
+ ReportGenLength; /*0x09 */
+ U16
+ EnclosureHandle; /*0x0A */
+ U64
+ SASAddress; /*0x0C */
+ U32
+ DiscoveryStatus; /*0x14 */
+ U16
+ DevHandle; /*0x18 */
+ U16
+ ParentDevHandle; /*0x1A */
+ U16
+ ExpanderChangeCount; /*0x1C */
+ U16
+ ExpanderRouteIndexes; /*0x1E */
+ U8
+ NumPhys; /*0x20 */
+ U8
+ SASLevel; /*0x21 */
+ U16
+ Flags; /*0x22 */
+ U16
+ STPBusInactivityTimeLimit; /*0x24 */
+ U16
+ STPMaxConnectTimeLimit; /*0x26 */
+ U16
+ STP_SMP_NexusLossTime; /*0x28 */
+ U16
+ MaxNumRoutedSasAddresses; /*0x2A */
+ U64
+ ActiveZoneManagerSASAddress;/*0x2C */
+ U16
+ ZoneLockInactivityLimit; /*0x34 */
+ U16
+ Reserved1; /*0x36 */
+ U8
+ TimeToReducedFunc; /*0x38 */
+ U8
+ InitialTimeToReducedFunc; /*0x39 */
+ U8
+ MaxReducedFuncTime; /*0x3A */
+ U8
+ Reserved2; /*0x3B */
+} MPI2_CONFIG_PAGE_EXPANDER_0,
+ *PTR_MPI2_CONFIG_PAGE_EXPANDER_0,
+ Mpi2ExpanderPage0_t, *pMpi2ExpanderPage0_t;
+
+#define MPI2_SASEXPANDER0_PAGEVERSION (0x06)
+
+/*values for SAS Expander Page 0 DiscoveryStatus field */
+#define MPI2_SAS_EXPANDER0_DS_MAX_ENCLOSURES_EXCEED (0x80000000)
+#define MPI2_SAS_EXPANDER0_DS_MAX_EXPANDERS_EXCEED (0x40000000)
+#define MPI2_SAS_EXPANDER0_DS_MAX_DEVICES_EXCEED (0x20000000)
+#define MPI2_SAS_EXPANDER0_DS_MAX_TOPO_PHYS_EXCEED (0x10000000)
+#define MPI2_SAS_EXPANDER0_DS_DOWNSTREAM_INITIATOR (0x08000000)
+#define MPI2_SAS_EXPANDER0_DS_MULTI_SUBTRACTIVE_SUBTRACTIVE (0x00008000)
+#define MPI2_SAS_EXPANDER0_DS_EXP_MULTI_SUBTRACTIVE (0x00004000)
+#define MPI2_SAS_EXPANDER0_DS_MULTI_PORT_DOMAIN (0x00002000)
+#define MPI2_SAS_EXPANDER0_DS_TABLE_TO_SUBTRACTIVE_LINK (0x00001000)
+#define MPI2_SAS_EXPANDER0_DS_UNSUPPORTED_DEVICE (0x00000800)
+#define MPI2_SAS_EXPANDER0_DS_TABLE_LINK (0x00000400)
+#define MPI2_SAS_EXPANDER0_DS_SUBTRACTIVE_LINK (0x00000200)
+#define MPI2_SAS_EXPANDER0_DS_SMP_CRC_ERROR (0x00000100)
+#define MPI2_SAS_EXPANDER0_DS_SMP_FUNCTION_FAILED (0x00000080)
+#define MPI2_SAS_EXPANDER0_DS_INDEX_NOT_EXIST (0x00000040)
+#define MPI2_SAS_EXPANDER0_DS_OUT_ROUTE_ENTRIES (0x00000020)
+#define MPI2_SAS_EXPANDER0_DS_SMP_TIMEOUT (0x00000010)
+#define MPI2_SAS_EXPANDER0_DS_MULTIPLE_PORTS (0x00000004)
+#define MPI2_SAS_EXPANDER0_DS_UNADDRESSABLE_DEVICE (0x00000002)
+#define MPI2_SAS_EXPANDER0_DS_LOOP_DETECTED (0x00000001)
+
+/*values for SAS Expander Page 0 Flags field */
+#define MPI2_SAS_EXPANDER0_FLAGS_REDUCED_FUNCTIONALITY (0x2000)
+#define MPI2_SAS_EXPANDER0_FLAGS_ZONE_LOCKED (0x1000)
+#define MPI2_SAS_EXPANDER0_FLAGS_SUPPORTED_PHYSICAL_PRES (0x0800)
+#define MPI2_SAS_EXPANDER0_FLAGS_ASSERTED_PHYSICAL_PRES (0x0400)
+#define MPI2_SAS_EXPANDER0_FLAGS_ZONING_SUPPORT (0x0200)
+#define MPI2_SAS_EXPANDER0_FLAGS_ENABLED_ZONING (0x0100)
+#define MPI2_SAS_EXPANDER0_FLAGS_TABLE_TO_TABLE_SUPPORT (0x0080)
+#define MPI2_SAS_EXPANDER0_FLAGS_CONNECTOR_END_DEVICE (0x0010)
+#define MPI2_SAS_EXPANDER0_FLAGS_OTHERS_CONFIG (0x0004)
+#define MPI2_SAS_EXPANDER0_FLAGS_CONFIG_IN_PROGRESS (0x0002)
+#define MPI2_SAS_EXPANDER0_FLAGS_ROUTE_TABLE_CONFIG (0x0001)
+
+
+/*SAS Expander Page 1 */
+
+typedef struct _MPI2_CONFIG_PAGE_EXPANDER_1 {
+ MPI2_CONFIG_EXTENDED_PAGE_HEADER
+ Header; /*0x00 */
+ U8
+ PhysicalPort; /*0x08 */
+ U8
+ Reserved1; /*0x09 */
+ U16
+ Reserved2; /*0x0A */
+ U8
+ NumPhys; /*0x0C */
+ U8
+ Phy; /*0x0D */
+ U16
+ NumTableEntriesProgrammed; /*0x0E */
+ U8
+ ProgrammedLinkRate; /*0x10 */
+ U8
+ HwLinkRate; /*0x11 */
+ U16
+ AttachedDevHandle; /*0x12 */
+ U32
+ PhyInfo; /*0x14 */
+ U32
+ AttachedDeviceInfo; /*0x18 */
+ U16
+ ExpanderDevHandle; /*0x1C */
+ U8
+ ChangeCount; /*0x1E */
+ U8
+ NegotiatedLinkRate; /*0x1F */
+ U8
+ PhyIdentifier; /*0x20 */
+ U8
+ AttachedPhyIdentifier; /*0x21 */
+ U8
+ Reserved3; /*0x22 */
+ U8
+ DiscoveryInfo; /*0x23 */
+ U32
+ AttachedPhyInfo; /*0x24 */
+ U8
+ ZoneGroup; /*0x28 */
+ U8
+ SelfConfigStatus; /*0x29 */
+ U16
+ Reserved4; /*0x2A */
+} MPI2_CONFIG_PAGE_EXPANDER_1,
+ *PTR_MPI2_CONFIG_PAGE_EXPANDER_1,
+ Mpi2ExpanderPage1_t, *pMpi2ExpanderPage1_t;
+
+#define MPI2_SASEXPANDER1_PAGEVERSION (0x02)
+
+/*use MPI2_SAS_PRATE_ defines for the ProgrammedLinkRate field */
+
+/*use MPI2_SAS_HWRATE_ defines for the HwLinkRate field */
+
+/*use MPI2_SAS_PHYINFO_ for the PhyInfo field */
+
+/*see mpi2_sas.h for the MPI2_SAS_DEVICE_INFO_ defines
+ *used for the AttachedDeviceInfo field */
+
+/*use MPI2_SAS_NEG_LINK_RATE_ defines for the NegotiatedLinkRate field */
+
+/*values for SAS Expander Page 1 DiscoveryInfo field */
+#define MPI2_SAS_EXPANDER1_DISCINFO_BAD_PHY_DISABLED (0x04)
+#define MPI2_SAS_EXPANDER1_DISCINFO_LINK_STATUS_CHANGE (0x02)
+#define MPI2_SAS_EXPANDER1_DISCINFO_NO_ROUTING_ENTRIES (0x01)
+
+/*use MPI2_SAS_APHYINFO_ defines for AttachedPhyInfo field */
+
+
+/****************************************************************************
+* SAS Device Config Pages
+****************************************************************************/
+
+/*SAS Device Page 0 */
+
+typedef struct _MPI2_CONFIG_PAGE_SAS_DEV_0 {
+ MPI2_CONFIG_EXTENDED_PAGE_HEADER
+ Header; /*0x00 */
+ U16
+ Slot; /*0x08 */
+ U16
+ EnclosureHandle; /*0x0A */
+ U64
+ SASAddress; /*0x0C */
+ U16
+ ParentDevHandle; /*0x14 */
+ U8
+ PhyNum; /*0x16 */
+ U8
+ AccessStatus; /*0x17 */
+ U16
+ DevHandle; /*0x18 */
+ U8
+ AttachedPhyIdentifier; /*0x1A */
+ U8
+ ZoneGroup; /*0x1B */
+ U32
+ DeviceInfo; /*0x1C */
+ U16
+ Flags; /*0x20 */
+ U8
+ PhysicalPort; /*0x22 */
+ U8
+ MaxPortConnections; /*0x23 */
+ U64
+ DeviceName; /*0x24 */
+ U8
+ PortGroups; /*0x2C */
+ U8
+ DmaGroup; /*0x2D */
+ U8
+ ControlGroup; /*0x2E */
+ U8
+ Reserved1; /*0x2F */
+ U32
+ Reserved2; /*0x30 */
+ U32
+ Reserved3; /*0x34 */
+} MPI2_CONFIG_PAGE_SAS_DEV_0,
+ *PTR_MPI2_CONFIG_PAGE_SAS_DEV_0,
+ Mpi2SasDevicePage0_t,
+ *pMpi2SasDevicePage0_t;
+
+#define MPI2_SASDEVICE0_PAGEVERSION (0x08)
+
+/*values for SAS Device Page 0 AccessStatus field */
+#define MPI2_SAS_DEVICE0_ASTATUS_NO_ERRORS (0x00)
+#define MPI2_SAS_DEVICE0_ASTATUS_SATA_INIT_FAILED (0x01)
+#define MPI2_SAS_DEVICE0_ASTATUS_SATA_CAPABILITY_FAILED (0x02)
+#define MPI2_SAS_DEVICE0_ASTATUS_SATA_AFFILIATION_CONFLICT (0x03)
+#define MPI2_SAS_DEVICE0_ASTATUS_SATA_NEEDS_INITIALIZATION (0x04)
+#define MPI2_SAS_DEVICE0_ASTATUS_ROUTE_NOT_ADDRESSABLE (0x05)
+#define MPI2_SAS_DEVICE0_ASTATUS_SMP_ERROR_NOT_ADDRESSABLE (0x06)
+#define MPI2_SAS_DEVICE0_ASTATUS_DEVICE_BLOCKED (0x07)
+/*specific values for SATA Init failures */
+#define MPI2_SAS_DEVICE0_ASTATUS_SIF_UNKNOWN (0x10)
+#define MPI2_SAS_DEVICE0_ASTATUS_SIF_AFFILIATION_CONFLICT (0x11)
+#define MPI2_SAS_DEVICE0_ASTATUS_SIF_DIAG (0x12)
+#define MPI2_SAS_DEVICE0_ASTATUS_SIF_IDENTIFICATION (0x13)
+#define MPI2_SAS_DEVICE0_ASTATUS_SIF_CHECK_POWER (0x14)
+#define MPI2_SAS_DEVICE0_ASTATUS_SIF_PIO_SN (0x15)
+#define MPI2_SAS_DEVICE0_ASTATUS_SIF_MDMA_SN (0x16)
+#define MPI2_SAS_DEVICE0_ASTATUS_SIF_UDMA_SN (0x17)
+#define MPI2_SAS_DEVICE0_ASTATUS_SIF_ZONING_VIOLATION (0x18)
+#define MPI2_SAS_DEVICE0_ASTATUS_SIF_NOT_ADDRESSABLE (0x19)
+#define MPI2_SAS_DEVICE0_ASTATUS_SIF_MAX (0x1F)
+
+/*see mpi2_sas.h for values for SAS Device Page 0 DeviceInfo values */
+
+/*values for SAS Device Page 0 Flags field */
+#define MPI2_SAS_DEVICE0_FLAGS_UNAUTHORIZED_DEVICE (0x8000)
+#define MPI25_SAS_DEVICE0_FLAGS_ENABLED_FAST_PATH (0x4000)
+#define MPI25_SAS_DEVICE0_FLAGS_FAST_PATH_CAPABLE (0x2000)
+#define MPI2_SAS_DEVICE0_FLAGS_SLUMBER_PM_CAPABLE (0x1000)
+#define MPI2_SAS_DEVICE0_FLAGS_PARTIAL_PM_CAPABLE (0x0800)
+#define MPI2_SAS_DEVICE0_FLAGS_SATA_ASYNCHRONOUS_NOTIFY (0x0400)
+#define MPI2_SAS_DEVICE0_FLAGS_SATA_SW_PRESERVE (0x0200)
+#define MPI2_SAS_DEVICE0_FLAGS_UNSUPPORTED_DEVICE (0x0100)
+#define MPI2_SAS_DEVICE0_FLAGS_SATA_48BIT_LBA_SUPPORTED (0x0080)
+#define MPI2_SAS_DEVICE0_FLAGS_SATA_SMART_SUPPORTED (0x0040)
+#define MPI2_SAS_DEVICE0_FLAGS_SATA_NCQ_SUPPORTED (0x0020)
+#define MPI2_SAS_DEVICE0_FLAGS_SATA_FUA_SUPPORTED (0x0010)
+#define MPI2_SAS_DEVICE0_FLAGS_PORT_SELECTOR_ATTACH (0x0008)
+#define MPI2_SAS_DEVICE0_FLAGS_DEVICE_PRESENT (0x0001)
+
+
+/*SAS Device Page 1 */
+
+typedef struct _MPI2_CONFIG_PAGE_SAS_DEV_1 {
+ MPI2_CONFIG_EXTENDED_PAGE_HEADER
+ Header; /*0x00 */
+ U32
+ Reserved1; /*0x08 */
+ U64
+ SASAddress; /*0x0C */
+ U32
+ Reserved2; /*0x14 */
+ U16
+ DevHandle; /*0x18 */
+ U16
+ Reserved3; /*0x1A */
+ U8
+ InitialRegDeviceFIS[20];/*0x1C */
+} MPI2_CONFIG_PAGE_SAS_DEV_1,
+ *PTR_MPI2_CONFIG_PAGE_SAS_DEV_1,
+ Mpi2SasDevicePage1_t,
+ *pMpi2SasDevicePage1_t;
+
+#define MPI2_SASDEVICE1_PAGEVERSION (0x01)
+
+
+/****************************************************************************
+* SAS PHY Config Pages
+****************************************************************************/
+
+/*SAS PHY Page 0 */
+
+typedef struct _MPI2_CONFIG_PAGE_SAS_PHY_0 {
+ MPI2_CONFIG_EXTENDED_PAGE_HEADER
+ Header; /*0x00 */
+ U16
+ OwnerDevHandle; /*0x08 */
+ U16
+ Reserved1; /*0x0A */
+ U16
+ AttachedDevHandle; /*0x0C */
+ U8
+ AttachedPhyIdentifier; /*0x0E */
+ U8
+ Reserved2; /*0x0F */
+ U32
+ AttachedPhyInfo; /*0x10 */
+ U8
+ ProgrammedLinkRate; /*0x14 */
+ U8
+ HwLinkRate; /*0x15 */
+ U8
+ ChangeCount; /*0x16 */
+ U8
+ Flags; /*0x17 */
+ U32
+ PhyInfo; /*0x18 */
+ U8
+ NegotiatedLinkRate; /*0x1C */
+ U8
+ Reserved3; /*0x1D */
+ U16
+ Reserved4; /*0x1E */
+} MPI2_CONFIG_PAGE_SAS_PHY_0,
+ *PTR_MPI2_CONFIG_PAGE_SAS_PHY_0,
+ Mpi2SasPhyPage0_t, *pMpi2SasPhyPage0_t;
+
+#define MPI2_SASPHY0_PAGEVERSION (0x03)
+
+/*use MPI2_SAS_APHYINFO_ defines for AttachedPhyInfo field */
+
+/*use MPI2_SAS_PRATE_ defines for the ProgrammedLinkRate field */
+
+/*use MPI2_SAS_HWRATE_ defines for the HwLinkRate field */
+
+/*values for SAS PHY Page 0 Flags field */
+#define MPI2_SAS_PHY0_FLAGS_SGPIO_DIRECT_ATTACH_ENC (0x01)
+
+/*use MPI2_SAS_PHYINFO_ for the PhyInfo field */
+
+/*use MPI2_SAS_NEG_LINK_RATE_ defines for the NegotiatedLinkRate field */
+
+
+/*SAS PHY Page 1 */
+
+typedef struct _MPI2_CONFIG_PAGE_SAS_PHY_1 {
+ MPI2_CONFIG_EXTENDED_PAGE_HEADER
+ Header; /*0x00 */
+ U32
+ Reserved1; /*0x08 */
+ U32
+ InvalidDwordCount; /*0x0C */
+ U32
+ RunningDisparityErrorCount; /*0x10 */
+ U32
+ LossDwordSynchCount; /*0x14 */
+ U32
+ PhyResetProblemCount; /*0x18 */
+} MPI2_CONFIG_PAGE_SAS_PHY_1,
+ *PTR_MPI2_CONFIG_PAGE_SAS_PHY_1,
+ Mpi2SasPhyPage1_t, *pMpi2SasPhyPage1_t;
+
+#define MPI2_SASPHY1_PAGEVERSION (0x01)
+
+
+/*SAS PHY Page 2 */
+
+typedef struct _MPI2_SASPHY2_PHY_EVENT {
+ U8 PhyEventCode; /*0x00 */
+ U8 Reserved1; /*0x01 */
+ U16 Reserved2; /*0x02 */
+ U32 PhyEventInfo; /*0x04 */
+} MPI2_SASPHY2_PHY_EVENT, *PTR_MPI2_SASPHY2_PHY_EVENT,
+ Mpi2SasPhy2PhyEvent_t, *pMpi2SasPhy2PhyEvent_t;
+
+/*use MPI2_SASPHY3_EVENT_CODE_ for the PhyEventCode field */
+
+
+/*
+ *Host code (drivers, BIOS, utilities, etc.) should leave this define set to
+ *one and check the value returned for NumPhyEvents at runtime.
+ */
+#ifndef MPI2_SASPHY2_PHY_EVENT_MAX
+#define MPI2_SASPHY2_PHY_EVENT_MAX (1)
+#endif
+
+typedef struct _MPI2_CONFIG_PAGE_SAS_PHY_2 {
+ MPI2_CONFIG_EXTENDED_PAGE_HEADER
+ Header; /*0x00 */
+ U32
+ Reserved1; /*0x08 */
+ U8
+ NumPhyEvents; /*0x0C */
+ U8
+ Reserved2; /*0x0D */
+ U16
+ Reserved3; /*0x0E */
+ MPI2_SASPHY2_PHY_EVENT
+ PhyEvent[MPI2_SASPHY2_PHY_EVENT_MAX]; /*0x10 */
+} MPI2_CONFIG_PAGE_SAS_PHY_2,
+ *PTR_MPI2_CONFIG_PAGE_SAS_PHY_2,
+ Mpi2SasPhyPage2_t,
+ *pMpi2SasPhyPage2_t;
+
+#define MPI2_SASPHY2_PAGEVERSION (0x00)
+
+
+/*SAS PHY Page 3 */
+
+typedef struct _MPI2_SASPHY3_PHY_EVENT_CONFIG {
+ U8 PhyEventCode; /*0x00 */
+ U8 Reserved1; /*0x01 */
+ U16 Reserved2; /*0x02 */
+ U8 CounterType; /*0x04 */
+ U8 ThresholdWindow; /*0x05 */
+ U8 TimeUnits; /*0x06 */
+ U8 Reserved3; /*0x07 */
+ U32 EventThreshold; /*0x08 */
+ U16 ThresholdFlags; /*0x0C */
+ U16 Reserved4; /*0x0E */
+} MPI2_SASPHY3_PHY_EVENT_CONFIG,
+ *PTR_MPI2_SASPHY3_PHY_EVENT_CONFIG,
+ Mpi2SasPhy3PhyEventConfig_t,
+ *pMpi2SasPhy3PhyEventConfig_t;
+
+/*values for PhyEventCode field */
+#define MPI2_SASPHY3_EVENT_CODE_NO_EVENT (0x00)
+#define MPI2_SASPHY3_EVENT_CODE_INVALID_DWORD (0x01)
+#define MPI2_SASPHY3_EVENT_CODE_RUNNING_DISPARITY_ERROR (0x02)
+#define MPI2_SASPHY3_EVENT_CODE_LOSS_DWORD_SYNC (0x03)
+#define MPI2_SASPHY3_EVENT_CODE_PHY_RESET_PROBLEM (0x04)
+#define MPI2_SASPHY3_EVENT_CODE_ELASTICITY_BUF_OVERFLOW (0x05)
+#define MPI2_SASPHY3_EVENT_CODE_RX_ERROR (0x06)
+#define MPI2_SASPHY3_EVENT_CODE_RX_ADDR_FRAME_ERROR (0x20)
+#define MPI2_SASPHY3_EVENT_CODE_TX_AC_OPEN_REJECT (0x21)
+#define MPI2_SASPHY3_EVENT_CODE_RX_AC_OPEN_REJECT (0x22)
+#define MPI2_SASPHY3_EVENT_CODE_TX_RC_OPEN_REJECT (0x23)
+#define MPI2_SASPHY3_EVENT_CODE_RX_RC_OPEN_REJECT (0x24)
+#define MPI2_SASPHY3_EVENT_CODE_RX_AIP_PARTIAL_WAITING_ON (0x25)
+#define MPI2_SASPHY3_EVENT_CODE_RX_AIP_CONNECT_WAITING_ON (0x26)
+#define MPI2_SASPHY3_EVENT_CODE_TX_BREAK (0x27)
+#define MPI2_SASPHY3_EVENT_CODE_RX_BREAK (0x28)
+#define MPI2_SASPHY3_EVENT_CODE_BREAK_TIMEOUT (0x29)
+#define MPI2_SASPHY3_EVENT_CODE_CONNECTION (0x2A)
+#define MPI2_SASPHY3_EVENT_CODE_PEAKTX_PATHWAY_BLOCKED (0x2B)
+#define MPI2_SASPHY3_EVENT_CODE_PEAKTX_ARB_WAIT_TIME (0x2C)
+#define MPI2_SASPHY3_EVENT_CODE_PEAK_ARB_WAIT_TIME (0x2D)
+#define MPI2_SASPHY3_EVENT_CODE_PEAK_CONNECT_TIME (0x2E)
+#define MPI2_SASPHY3_EVENT_CODE_TX_SSP_FRAMES (0x40)
+#define MPI2_SASPHY3_EVENT_CODE_RX_SSP_FRAMES (0x41)
+#define MPI2_SASPHY3_EVENT_CODE_TX_SSP_ERROR_FRAMES (0x42)
+#define MPI2_SASPHY3_EVENT_CODE_RX_SSP_ERROR_FRAMES (0x43)
+#define MPI2_SASPHY3_EVENT_CODE_TX_CREDIT_BLOCKED (0x44)
+#define MPI2_SASPHY3_EVENT_CODE_RX_CREDIT_BLOCKED (0x45)
+#define MPI2_SASPHY3_EVENT_CODE_TX_SATA_FRAMES (0x50)
+#define MPI2_SASPHY3_EVENT_CODE_RX_SATA_FRAMES (0x51)
+#define MPI2_SASPHY3_EVENT_CODE_SATA_OVERFLOW (0x52)
+#define MPI2_SASPHY3_EVENT_CODE_TX_SMP_FRAMES (0x60)
+#define MPI2_SASPHY3_EVENT_CODE_RX_SMP_FRAMES (0x61)
+#define MPI2_SASPHY3_EVENT_CODE_RX_SMP_ERROR_FRAMES (0x63)
+#define MPI2_SASPHY3_EVENT_CODE_HOTPLUG_TIMEOUT (0xD0)
+#define MPI2_SASPHY3_EVENT_CODE_MISALIGNED_MUX_PRIMITIVE (0xD1)
+#define MPI2_SASPHY3_EVENT_CODE_RX_AIP (0xD2)
+
+/*values for the CounterType field */
+#define MPI2_SASPHY3_COUNTER_TYPE_WRAPPING (0x00)
+#define MPI2_SASPHY3_COUNTER_TYPE_SATURATING (0x01)
+#define MPI2_SASPHY3_COUNTER_TYPE_PEAK_VALUE (0x02)
+
+/*values for the TimeUnits field */
+#define MPI2_SASPHY3_TIME_UNITS_10_MICROSECONDS (0x00)
+#define MPI2_SASPHY3_TIME_UNITS_100_MICROSECONDS (0x01)
+#define MPI2_SASPHY3_TIME_UNITS_1_MILLISECOND (0x02)
+#define MPI2_SASPHY3_TIME_UNITS_10_MILLISECONDS (0x03)
+
+/*values for the ThresholdFlags field */
+#define MPI2_SASPHY3_TFLAGS_PHY_RESET (0x0002)
+#define MPI2_SASPHY3_TFLAGS_EVENT_NOTIFY (0x0001)
+
+/*
+ *Host code (drivers, BIOS, utilities, etc.) should leave this define set to
+ *one and check the value returned for NumPhyEvents at runtime.
+ */
+#ifndef MPI2_SASPHY3_PHY_EVENT_MAX
+#define MPI2_SASPHY3_PHY_EVENT_MAX (1)
+#endif
+
+typedef struct _MPI2_CONFIG_PAGE_SAS_PHY_3 {
+ MPI2_CONFIG_EXTENDED_PAGE_HEADER
+ Header; /*0x00 */
+ U32
+ Reserved1; /*0x08 */
+ U8
+ NumPhyEvents; /*0x0C */
+ U8
+ Reserved2; /*0x0D */
+ U16
+ Reserved3; /*0x0E */
+ MPI2_SASPHY3_PHY_EVENT_CONFIG
+ PhyEventConfig[MPI2_SASPHY3_PHY_EVENT_MAX]; /*0x10 */
+} MPI2_CONFIG_PAGE_SAS_PHY_3,
+ *PTR_MPI2_CONFIG_PAGE_SAS_PHY_3,
+ Mpi2SasPhyPage3_t, *pMpi2SasPhyPage3_t;
+
+#define MPI2_SASPHY3_PAGEVERSION (0x00)
+
+
+/*SAS PHY Page 4 */
+
+typedef struct _MPI2_CONFIG_PAGE_SAS_PHY_4 {
+ MPI2_CONFIG_EXTENDED_PAGE_HEADER
+ Header; /*0x00 */
+ U16
+ Reserved1; /*0x08 */
+ U8
+ Reserved2; /*0x0A */
+ U8
+ Flags; /*0x0B */
+ U8
+ InitialFrame[28]; /*0x0C */
+} MPI2_CONFIG_PAGE_SAS_PHY_4,
+ *PTR_MPI2_CONFIG_PAGE_SAS_PHY_4,
+ Mpi2SasPhyPage4_t, *pMpi2SasPhyPage4_t;
+
+#define MPI2_SASPHY4_PAGEVERSION (0x00)
+
+/*values for the Flags field */
+#define MPI2_SASPHY4_FLAGS_FRAME_VALID (0x02)
+#define MPI2_SASPHY4_FLAGS_SATA_FRAME (0x01)
+
+
+
+
+/****************************************************************************
+* SAS Port Config Pages
+****************************************************************************/
+
+/*SAS Port Page 0 */
+
+typedef struct _MPI2_CONFIG_PAGE_SAS_PORT_0 {
+ MPI2_CONFIG_EXTENDED_PAGE_HEADER
+ Header; /*0x00 */
+ U8
+ PortNumber; /*0x08 */
+ U8
+ PhysicalPort; /*0x09 */
+ U8
+ PortWidth; /*0x0A */
+ U8
+ PhysicalPortWidth; /*0x0B */
+ U8
+ ZoneGroup; /*0x0C */
+ U8
+ Reserved1; /*0x0D */
+ U16
+ Reserved2; /*0x0E */
+ U64
+ SASAddress; /*0x10 */
+ U32
+ DeviceInfo; /*0x18 */
+ U32
+ Reserved3; /*0x1C */
+ U32
+ Reserved4; /*0x20 */
+} MPI2_CONFIG_PAGE_SAS_PORT_0,
+ *PTR_MPI2_CONFIG_PAGE_SAS_PORT_0,
+ Mpi2SasPortPage0_t, *pMpi2SasPortPage0_t;
+
+#define MPI2_SASPORT0_PAGEVERSION (0x00)
+
+/*see mpi2_sas.h for values for SAS Port Page 0 DeviceInfo values */
+
+
+/****************************************************************************
+* SAS Enclosure Config Pages
+****************************************************************************/
+
+/*SAS Enclosure Page 0 */
+
+typedef struct _MPI2_CONFIG_PAGE_SAS_ENCLOSURE_0 {
+ MPI2_CONFIG_EXTENDED_PAGE_HEADER
+ Header; /*0x00 */
+ U32
+ Reserved1; /*0x08 */
+ U64
+ EnclosureLogicalID; /*0x0C */
+ U16
+ Flags; /*0x14 */
+ U16
+ EnclosureHandle; /*0x16 */
+ U16
+ NumSlots; /*0x18 */
+ U16
+ StartSlot; /*0x1A */
+ U16
+ Reserved2; /*0x1C */
+ U16
+ SEPDevHandle; /*0x1E */
+ U32
+ Reserved3; /*0x20 */
+ U32
+ Reserved4; /*0x24 */
+} MPI2_CONFIG_PAGE_SAS_ENCLOSURE_0,
+ *PTR_MPI2_CONFIG_PAGE_SAS_ENCLOSURE_0,
+ Mpi2SasEnclosurePage0_t, *pMpi2SasEnclosurePage0_t;
+
+#define MPI2_SASENCLOSURE0_PAGEVERSION (0x03)
+
+/*values for SAS Enclosure Page 0 Flags field */
+#define MPI2_SAS_ENCLS0_FLAGS_MNG_MASK (0x000F)
+#define MPI2_SAS_ENCLS0_FLAGS_MNG_UNKNOWN (0x0000)
+#define MPI2_SAS_ENCLS0_FLAGS_MNG_IOC_SES (0x0001)
+#define MPI2_SAS_ENCLS0_FLAGS_MNG_IOC_SGPIO (0x0002)
+#define MPI2_SAS_ENCLS0_FLAGS_MNG_EXP_SGPIO (0x0003)
+#define MPI2_SAS_ENCLS0_FLAGS_MNG_SES_ENCLOSURE (0x0004)
+#define MPI2_SAS_ENCLS0_FLAGS_MNG_IOC_GPIO (0x0005)
+
+
+/****************************************************************************
+* Log Config Page
+****************************************************************************/
+
+/*Log Page 0 */
+
+/*
+ *Host code (drivers, BIOS, utilities, etc.) should leave this define set to
+ *one and check the value returned for NumLogEntries at runtime.
+ */
+#ifndef MPI2_LOG_0_NUM_LOG_ENTRIES
+#define MPI2_LOG_0_NUM_LOG_ENTRIES (1)
+#endif
+
+#define MPI2_LOG_0_LOG_DATA_LENGTH (0x1C)
+
+typedef struct _MPI2_LOG_0_ENTRY {
+ U64 TimeStamp; /*0x00 */
+ U32 Reserved1; /*0x08 */
+ U16 LogSequence; /*0x0C */
+ U16 LogEntryQualifier; /*0x0E */
+ U8 VP_ID; /*0x10 */
+ U8 VF_ID; /*0x11 */
+ U16 Reserved2; /*0x12 */
+ U8
+ LogData[MPI2_LOG_0_LOG_DATA_LENGTH];/*0x14 */
+} MPI2_LOG_0_ENTRY, *PTR_MPI2_LOG_0_ENTRY,
+ Mpi2Log0Entry_t, *pMpi2Log0Entry_t;
+
+/*values for Log Page 0 LogEntry LogEntryQualifier field */
+#define MPI2_LOG_0_ENTRY_QUAL_ENTRY_UNUSED (0x0000)
+#define MPI2_LOG_0_ENTRY_QUAL_POWER_ON_RESET (0x0001)
+#define MPI2_LOG_0_ENTRY_QUAL_TIMESTAMP_UPDATE (0x0002)
+#define MPI2_LOG_0_ENTRY_QUAL_MIN_IMPLEMENT_SPEC (0x8000)
+#define MPI2_LOG_0_ENTRY_QUAL_MAX_IMPLEMENT_SPEC (0xFFFF)
+
+typedef struct _MPI2_CONFIG_PAGE_LOG_0 {
+ MPI2_CONFIG_EXTENDED_PAGE_HEADER Header; /*0x00 */
+ U32 Reserved1; /*0x08 */
+ U32 Reserved2; /*0x0C */
+ U16 NumLogEntries;/*0x10 */
+ U16 Reserved3; /*0x12 */
+ MPI2_LOG_0_ENTRY
+ LogEntry[MPI2_LOG_0_NUM_LOG_ENTRIES]; /*0x14 */
+} MPI2_CONFIG_PAGE_LOG_0, *PTR_MPI2_CONFIG_PAGE_LOG_0,
+ Mpi2LogPage0_t, *pMpi2LogPage0_t;
+
+#define MPI2_LOG_0_PAGEVERSION (0x02)
+
+
+/****************************************************************************
+* RAID Config Page
+****************************************************************************/
+
+/*RAID Page 0 */
+
+/*
+ *Host code (drivers, BIOS, utilities, etc.) should leave this define set to
+ *one and check the value returned for NumElements at runtime.
+ */
+#ifndef MPI2_RAIDCONFIG0_MAX_ELEMENTS
+#define MPI2_RAIDCONFIG0_MAX_ELEMENTS (1)
+#endif
+
+typedef struct _MPI2_RAIDCONFIG0_CONFIG_ELEMENT {
+ U16 ElementFlags; /*0x00 */
+ U16 VolDevHandle; /*0x02 */
+ U8 HotSparePool; /*0x04 */
+ U8 PhysDiskNum; /*0x05 */
+ U16 PhysDiskDevHandle; /*0x06 */
+} MPI2_RAIDCONFIG0_CONFIG_ELEMENT,
+ *PTR_MPI2_RAIDCONFIG0_CONFIG_ELEMENT,
+ Mpi2RaidConfig0ConfigElement_t,
+ *pMpi2RaidConfig0ConfigElement_t;
+
+/*values for the ElementFlags field */
+#define MPI2_RAIDCONFIG0_EFLAGS_MASK_ELEMENT_TYPE (0x000F)
+#define MPI2_RAIDCONFIG0_EFLAGS_VOLUME_ELEMENT (0x0000)
+#define MPI2_RAIDCONFIG0_EFLAGS_VOL_PHYS_DISK_ELEMENT (0x0001)
+#define MPI2_RAIDCONFIG0_EFLAGS_HOT_SPARE_ELEMENT (0x0002)
+#define MPI2_RAIDCONFIG0_EFLAGS_OCE_ELEMENT (0x0003)
+
+
+typedef struct _MPI2_CONFIG_PAGE_RAID_CONFIGURATION_0 {
+ MPI2_CONFIG_EXTENDED_PAGE_HEADER Header; /*0x00 */
+ U8 NumHotSpares; /*0x08 */
+ U8 NumPhysDisks; /*0x09 */
+ U8 NumVolumes; /*0x0A */
+ U8 ConfigNum; /*0x0B */
+ U32 Flags; /*0x0C */
+ U8 ConfigGUID[24]; /*0x10 */
+ U32 Reserved1; /*0x28 */
+ U8 NumElements; /*0x2C */
+ U8 Reserved2; /*0x2D */
+ U16 Reserved3; /*0x2E */
+ MPI2_RAIDCONFIG0_CONFIG_ELEMENT
+ ConfigElement[MPI2_RAIDCONFIG0_MAX_ELEMENTS]; /*0x30 */
+} MPI2_CONFIG_PAGE_RAID_CONFIGURATION_0,
+ *PTR_MPI2_CONFIG_PAGE_RAID_CONFIGURATION_0,
+ Mpi2RaidConfigurationPage0_t,
+ *pMpi2RaidConfigurationPage0_t;
+
+#define MPI2_RAIDCONFIG0_PAGEVERSION (0x00)
+
+/*values for RAID Configuration Page 0 Flags field */
+#define MPI2_RAIDCONFIG0_FLAG_FOREIGN_CONFIG (0x00000001)
+
+
+/****************************************************************************
+* Driver Persistent Mapping Config Pages
+****************************************************************************/
+
+/*Driver Persistent Mapping Page 0 */
+
+typedef struct _MPI2_CONFIG_PAGE_DRIVER_MAP0_ENTRY {
+ U64 PhysicalIdentifier; /*0x00 */
+ U16 MappingInformation; /*0x08 */
+ U16 DeviceIndex; /*0x0A */
+ U32 PhysicalBitsMapping; /*0x0C */
+ U32 Reserved1; /*0x10 */
+} MPI2_CONFIG_PAGE_DRIVER_MAP0_ENTRY,
+ *PTR_MPI2_CONFIG_PAGE_DRIVER_MAP0_ENTRY,
+ Mpi2DriverMap0Entry_t, *pMpi2DriverMap0Entry_t;
+
+typedef struct _MPI2_CONFIG_PAGE_DRIVER_MAPPING_0 {
+ MPI2_CONFIG_EXTENDED_PAGE_HEADER Header; /*0x00 */
+ MPI2_CONFIG_PAGE_DRIVER_MAP0_ENTRY Entry; /*0x08 */
+} MPI2_CONFIG_PAGE_DRIVER_MAPPING_0,
+ *PTR_MPI2_CONFIG_PAGE_DRIVER_MAPPING_0,
+ Mpi2DriverMappingPage0_t, *pMpi2DriverMappingPage0_t;
+
+#define MPI2_DRIVERMAPPING0_PAGEVERSION (0x00)
+
+/*values for Driver Persistent Mapping Page 0 MappingInformation field */
+#define MPI2_DRVMAP0_MAPINFO_SLOT_MASK (0x07F0)
+#define MPI2_DRVMAP0_MAPINFO_SLOT_SHIFT (4)
+#define MPI2_DRVMAP0_MAPINFO_MISSING_MASK (0x000F)
+
+
+/****************************************************************************
+* Ethernet Config Pages
+****************************************************************************/
+
+/*Ethernet Page 0 */
+
+/*IP address (union of IPv4 and IPv6) */
+typedef union _MPI2_ETHERNET_IP_ADDR {
+ U32 IPv4Addr;
+ U32 IPv6Addr[4];
+} MPI2_ETHERNET_IP_ADDR, *PTR_MPI2_ETHERNET_IP_ADDR,
+ Mpi2EthernetIpAddr_t, *pMpi2EthernetIpAddr_t;
+
+#define MPI2_ETHERNET_HOST_NAME_LENGTH (32)
+
+typedef struct _MPI2_CONFIG_PAGE_ETHERNET_0 {
+ MPI2_CONFIG_EXTENDED_PAGE_HEADER Header; /*0x00 */
+ U8 NumInterfaces; /*0x08 */
+ U8 Reserved0; /*0x09 */
+ U16 Reserved1; /*0x0A */
+ U32 Status; /*0x0C */
+ U8 MediaState; /*0x10 */
+ U8 Reserved2; /*0x11 */
+ U16 Reserved3; /*0x12 */
+ U8 MacAddress[6]; /*0x14 */
+ U8 Reserved4; /*0x1A */
+ U8 Reserved5; /*0x1B */
+ MPI2_ETHERNET_IP_ADDR IpAddress; /*0x1C */
+ MPI2_ETHERNET_IP_ADDR SubnetMask; /*0x2C */
+ MPI2_ETHERNET_IP_ADDR GatewayIpAddress;/*0x3C */
+ MPI2_ETHERNET_IP_ADDR DNS1IpAddress; /*0x4C */
+ MPI2_ETHERNET_IP_ADDR DNS2IpAddress; /*0x5C */
+ MPI2_ETHERNET_IP_ADDR DhcpIpAddress; /*0x6C */
+ U8
+ HostName[MPI2_ETHERNET_HOST_NAME_LENGTH];/*0x7C */
+} MPI2_CONFIG_PAGE_ETHERNET_0,
+ *PTR_MPI2_CONFIG_PAGE_ETHERNET_0,
+ Mpi2EthernetPage0_t, *pMpi2EthernetPage0_t;
+
+#define MPI2_ETHERNETPAGE0_PAGEVERSION (0x00)
+
+/*values for Ethernet Page 0 Status field */
+#define MPI2_ETHPG0_STATUS_IPV6_CAPABLE (0x80000000)
+#define MPI2_ETHPG0_STATUS_IPV4_CAPABLE (0x40000000)
+#define MPI2_ETHPG0_STATUS_CONSOLE_CONNECTED (0x20000000)
+#define MPI2_ETHPG0_STATUS_DEFAULT_IF (0x00000100)
+#define MPI2_ETHPG0_STATUS_FW_DWNLD_ENABLED (0x00000080)
+#define MPI2_ETHPG0_STATUS_TELNET_ENABLED (0x00000040)
+#define MPI2_ETHPG0_STATUS_SSH2_ENABLED (0x00000020)
+#define MPI2_ETHPG0_STATUS_DHCP_CLIENT_ENABLED (0x00000010)
+#define MPI2_ETHPG0_STATUS_IPV6_ENABLED (0x00000008)
+#define MPI2_ETHPG0_STATUS_IPV4_ENABLED (0x00000004)
+#define MPI2_ETHPG0_STATUS_IPV6_ADDRESSES (0x00000002)
+#define MPI2_ETHPG0_STATUS_ETH_IF_ENABLED (0x00000001)
+
+/*values for Ethernet Page 0 MediaState field */
+#define MPI2_ETHPG0_MS_DUPLEX_MASK (0x80)
+#define MPI2_ETHPG0_MS_HALF_DUPLEX (0x00)
+#define MPI2_ETHPG0_MS_FULL_DUPLEX (0x80)
+
+#define MPI2_ETHPG0_MS_CONNECT_SPEED_MASK (0x07)
+#define MPI2_ETHPG0_MS_NOT_CONNECTED (0x00)
+#define MPI2_ETHPG0_MS_10MBIT (0x01)
+#define MPI2_ETHPG0_MS_100MBIT (0x02)
+#define MPI2_ETHPG0_MS_1GBIT (0x03)
+
+
+/*Ethernet Page 1 */
+
+typedef struct _MPI2_CONFIG_PAGE_ETHERNET_1 {
+ MPI2_CONFIG_EXTENDED_PAGE_HEADER
+ Header; /*0x00 */
+ U32
+ Reserved0; /*0x08 */
+ U32
+ Flags; /*0x0C */
+ U8
+ MediaState; /*0x10 */
+ U8
+ Reserved1; /*0x11 */
+ U16
+ Reserved2; /*0x12 */
+ U8
+ MacAddress[6]; /*0x14 */
+ U8
+ Reserved3; /*0x1A */
+ U8
+ Reserved4; /*0x1B */
+ MPI2_ETHERNET_IP_ADDR
+ StaticIpAddress; /*0x1C */
+ MPI2_ETHERNET_IP_ADDR
+ StaticSubnetMask; /*0x2C */
+ MPI2_ETHERNET_IP_ADDR
+ StaticGatewayIpAddress; /*0x3C */
+ MPI2_ETHERNET_IP_ADDR
+ StaticDNS1IpAddress; /*0x4C */
+ MPI2_ETHERNET_IP_ADDR
+ StaticDNS2IpAddress; /*0x5C */
+ U32
+ Reserved5; /*0x6C */
+ U32
+ Reserved6; /*0x70 */
+ U32
+ Reserved7; /*0x74 */
+ U32
+ Reserved8; /*0x78 */
+ U8
+ HostName[MPI2_ETHERNET_HOST_NAME_LENGTH];/*0x7C */
+} MPI2_CONFIG_PAGE_ETHERNET_1,
+ *PTR_MPI2_CONFIG_PAGE_ETHERNET_1,
+ Mpi2EthernetPage1_t, *pMpi2EthernetPage1_t;
+
+#define MPI2_ETHERNETPAGE1_PAGEVERSION (0x00)
+
+/*values for Ethernet Page 1 Flags field */
+#define MPI2_ETHPG1_FLAG_SET_DEFAULT_IF (0x00000100)
+#define MPI2_ETHPG1_FLAG_ENABLE_FW_DOWNLOAD (0x00000080)
+#define MPI2_ETHPG1_FLAG_ENABLE_TELNET (0x00000040)
+#define MPI2_ETHPG1_FLAG_ENABLE_SSH2 (0x00000020)
+#define MPI2_ETHPG1_FLAG_ENABLE_DHCP_CLIENT (0x00000010)
+#define MPI2_ETHPG1_FLAG_ENABLE_IPV6 (0x00000008)
+#define MPI2_ETHPG1_FLAG_ENABLE_IPV4 (0x00000004)
+#define MPI2_ETHPG1_FLAG_USE_IPV6_ADDRESSES (0x00000002)
+#define MPI2_ETHPG1_FLAG_ENABLE_ETH_IF (0x00000001)
+
+/*values for Ethernet Page 1 MediaState field */
+#define MPI2_ETHPG1_MS_DUPLEX_MASK (0x80)
+#define MPI2_ETHPG1_MS_HALF_DUPLEX (0x00)
+#define MPI2_ETHPG1_MS_FULL_DUPLEX (0x80)
+
+#define MPI2_ETHPG1_MS_DATA_RATE_MASK (0x07)
+#define MPI2_ETHPG1_MS_DATA_RATE_AUTO (0x00)
+#define MPI2_ETHPG1_MS_DATA_RATE_10MBIT (0x01)
+#define MPI2_ETHPG1_MS_DATA_RATE_100MBIT (0x02)
+#define MPI2_ETHPG1_MS_DATA_RATE_1GBIT (0x03)
+
+
+/****************************************************************************
+* Extended Manufacturing Config Pages
+****************************************************************************/
+
+/*
+ *Generic structure to use for product-specific extended manufacturing pages
+ *(currently Extended Manufacturing Page 40 through Extended Manufacturing
+ *Page 60).
+ */
+
+typedef struct _MPI2_CONFIG_PAGE_EXT_MAN_PS {
+ MPI2_CONFIG_EXTENDED_PAGE_HEADER
+ Header; /*0x00 */
+ U32
+ ProductSpecificInfo; /*0x08 */
+} MPI2_CONFIG_PAGE_EXT_MAN_PS,
+ *PTR_MPI2_CONFIG_PAGE_EXT_MAN_PS,
+ Mpi2ExtManufacturingPagePS_t,
+ *pMpi2ExtManufacturingPagePS_t;
+
+/*PageVersion should be provided by product-specific code */
+
+#endif
diff --git a/drivers/scsi/mpt3sas/mpi/mpi2_init.h b/drivers/scsi/mpt3sas/mpi/mpi2_init.h
new file mode 100644
index 000000000..068c98efd
--- /dev/null
+++ b/drivers/scsi/mpt3sas/mpi/mpi2_init.h
@@ -0,0 +1,562 @@
+/*
+ * Copyright (c) 2000-2014 LSI Corporation.
+ *
+ *
+ * Name: mpi2_init.h
+ * Title: MPI SCSI initiator mode messages and structures
+ * Creation Date: June 23, 2006
+ *
+ * mpi2_init.h Version: 02.00.15
+ *
+ * NOTE: Names (typedefs, defines, etc.) beginning with an MPI25 or Mpi25
+ * prefix are for use only on MPI v2.5 products, and must not be used
+ * with MPI v2.0 products. Unless otherwise noted, names beginning with
+ * MPI2 or Mpi2 are for use with both MPI v2.0 and MPI v2.5 products.
+ *
+ * Version History
+ * ---------------
+ *
+ * Date Version Description
+ * -------- -------- ------------------------------------------------------
+ * 04-30-07 02.00.00 Corresponds to Fusion-MPT MPI Specification Rev A.
+ * 10-31-07 02.00.01 Fixed name for pMpi2SCSITaskManagementRequest_t.
+ * 12-18-07 02.00.02 Modified Task Management Target Reset Method defines.
+ * 02-29-08 02.00.03 Added Query Task Set and Query Unit Attention.
+ * 03-03-08 02.00.04 Fixed name of struct _MPI2_SCSI_TASK_MANAGE_REPLY.
+ * 05-21-08 02.00.05 Fixed typo in name of Mpi2SepRequest_t.
+ * 10-02-08 02.00.06 Removed Untagged and No Disconnect values from SCSI IO
+ * Control field Task Attribute flags.
+ * Moved LUN field defines to mpi2.h becasue they are
+ * common to many structures.
+ * 05-06-09 02.00.07 Changed task management type of Query Unit Attention to
+ * Query Asynchronous Event.
+ * Defined two new bits in the SlotStatus field of the SCSI
+ * Enclosure Processor Request and Reply.
+ * 10-28-09 02.00.08 Added defines for decoding the ResponseInfo bytes for
+ * both SCSI IO Error Reply and SCSI Task Management Reply.
+ * Added ResponseInfo field to MPI2_SCSI_TASK_MANAGE_REPLY.
+ * Added MPI2_SCSITASKMGMT_RSP_TM_OVERLAPPED_TAG define.
+ * 02-10-10 02.00.09 Removed unused structure that had "#if 0" around it.
+ * 05-12-10 02.00.10 Added optional vendor-unique region to SCSI IO Request.
+ * 11-10-10 02.00.11 Added MPI2_SCSIIO_NUM_SGLOFFSETS define.
+ * 11-18-11 02.00.12 Incorporating additions for MPI v2.5.
+ * 02-06-12 02.00.13 Added alternate defines for Task Priority / Command
+ * Priority to match SAM-4.
+ * Added EEDPErrorOffset to MPI2_SCSI_IO_REPLY.
+ * 07-10-12 02.00.14 Added MPI2_SCSIIO_CONTROL_SHIFT_DATADIRECTION.
+ * 04-09-13 02.00.15 Added SCSIStatusQualifier field to MPI2_SCSI_IO_REPLY,
+ * replacing the Reserved4 field.
+ * --------------------------------------------------------------------------
+ */
+
+#ifndef MPI2_INIT_H
+#define MPI2_INIT_H
+
+/*****************************************************************************
+*
+* SCSI Initiator Messages
+*
+*****************************************************************************/
+
+/****************************************************************************
+* SCSI IO messages and associated structures
+****************************************************************************/
+
+typedef struct _MPI2_SCSI_IO_CDB_EEDP32 {
+ U8 CDB[20]; /*0x00 */
+ U32 PrimaryReferenceTag; /*0x14 */
+ U16 PrimaryApplicationTag; /*0x18 */
+ U16 PrimaryApplicationTagMask; /*0x1A */
+ U32 TransferLength; /*0x1C */
+} MPI2_SCSI_IO_CDB_EEDP32, *PTR_MPI2_SCSI_IO_CDB_EEDP32,
+ Mpi2ScsiIoCdbEedp32_t, *pMpi2ScsiIoCdbEedp32_t;
+
+/*MPI v2.0 CDB field */
+typedef union _MPI2_SCSI_IO_CDB_UNION {
+ U8 CDB32[32];
+ MPI2_SCSI_IO_CDB_EEDP32 EEDP32;
+ MPI2_SGE_SIMPLE_UNION SGE;
+} MPI2_SCSI_IO_CDB_UNION, *PTR_MPI2_SCSI_IO_CDB_UNION,
+ Mpi2ScsiIoCdb_t, *pMpi2ScsiIoCdb_t;
+
+/*MPI v2.0 SCSI IO Request Message */
+typedef struct _MPI2_SCSI_IO_REQUEST {
+ U16 DevHandle; /*0x00 */
+ U8 ChainOffset; /*0x02 */
+ U8 Function; /*0x03 */
+ U16 Reserved1; /*0x04 */
+ U8 Reserved2; /*0x06 */
+ U8 MsgFlags; /*0x07 */
+ U8 VP_ID; /*0x08 */
+ U8 VF_ID; /*0x09 */
+ U16 Reserved3; /*0x0A */
+ U32 SenseBufferLowAddress; /*0x0C */
+ U16 SGLFlags; /*0x10 */
+ U8 SenseBufferLength; /*0x12 */
+ U8 Reserved4; /*0x13 */
+ U8 SGLOffset0; /*0x14 */
+ U8 SGLOffset1; /*0x15 */
+ U8 SGLOffset2; /*0x16 */
+ U8 SGLOffset3; /*0x17 */
+ U32 SkipCount; /*0x18 */
+ U32 DataLength; /*0x1C */
+ U32 BidirectionalDataLength; /*0x20 */
+ U16 IoFlags; /*0x24 */
+ U16 EEDPFlags; /*0x26 */
+ U32 EEDPBlockSize; /*0x28 */
+ U32 SecondaryReferenceTag; /*0x2C */
+ U16 SecondaryApplicationTag; /*0x30 */
+ U16 ApplicationTagTranslationMask; /*0x32 */
+ U8 LUN[8]; /*0x34 */
+ U32 Control; /*0x3C */
+ MPI2_SCSI_IO_CDB_UNION CDB; /*0x40 */
+
+#ifdef MPI2_SCSI_IO_VENDOR_UNIQUE_REGION /*typically this is left undefined */
+ MPI2_SCSI_IO_VENDOR_UNIQUE VendorRegion;
+#endif
+
+ MPI2_SGE_IO_UNION SGL; /*0x60 */
+
+} MPI2_SCSI_IO_REQUEST, *PTR_MPI2_SCSI_IO_REQUEST,
+ Mpi2SCSIIORequest_t, *pMpi2SCSIIORequest_t;
+
+/*SCSI IO MsgFlags bits */
+
+/*MsgFlags for SenseBufferAddressSpace */
+#define MPI2_SCSIIO_MSGFLAGS_MASK_SENSE_ADDR (0x0C)
+#define MPI2_SCSIIO_MSGFLAGS_SYSTEM_SENSE_ADDR (0x00)
+#define MPI2_SCSIIO_MSGFLAGS_IOCDDR_SENSE_ADDR (0x04)
+#define MPI2_SCSIIO_MSGFLAGS_IOCPLB_SENSE_ADDR (0x08)
+#define MPI2_SCSIIO_MSGFLAGS_IOCPLBNTA_SENSE_ADDR (0x0C)
+
+/*SCSI IO SGLFlags bits */
+
+/*base values for Data Location Address Space */
+#define MPI2_SCSIIO_SGLFLAGS_ADDR_MASK (0x0C)
+#define MPI2_SCSIIO_SGLFLAGS_SYSTEM_ADDR (0x00)
+#define MPI2_SCSIIO_SGLFLAGS_IOCDDR_ADDR (0x04)
+#define MPI2_SCSIIO_SGLFLAGS_IOCPLB_ADDR (0x08)
+#define MPI2_SCSIIO_SGLFLAGS_IOCPLBNTA_ADDR (0x0C)
+
+/*base values for Type */
+#define MPI2_SCSIIO_SGLFLAGS_TYPE_MASK (0x03)
+#define MPI2_SCSIIO_SGLFLAGS_TYPE_MPI (0x00)
+#define MPI2_SCSIIO_SGLFLAGS_TYPE_IEEE32 (0x01)
+#define MPI2_SCSIIO_SGLFLAGS_TYPE_IEEE64 (0x02)
+
+/*shift values for each sub-field */
+#define MPI2_SCSIIO_SGLFLAGS_SGL3_SHIFT (12)
+#define MPI2_SCSIIO_SGLFLAGS_SGL2_SHIFT (8)
+#define MPI2_SCSIIO_SGLFLAGS_SGL1_SHIFT (4)
+#define MPI2_SCSIIO_SGLFLAGS_SGL0_SHIFT (0)
+
+/*number of SGLOffset fields */
+#define MPI2_SCSIIO_NUM_SGLOFFSETS (4)
+
+/*SCSI IO IoFlags bits */
+
+/*Large CDB Address Space */
+#define MPI2_SCSIIO_CDB_ADDR_MASK (0x6000)
+#define MPI2_SCSIIO_CDB_ADDR_SYSTEM (0x0000)
+#define MPI2_SCSIIO_CDB_ADDR_IOCDDR (0x2000)
+#define MPI2_SCSIIO_CDB_ADDR_IOCPLB (0x4000)
+#define MPI2_SCSIIO_CDB_ADDR_IOCPLBNTA (0x6000)
+
+#define MPI2_SCSIIO_IOFLAGS_LARGE_CDB (0x1000)
+#define MPI2_SCSIIO_IOFLAGS_BIDIRECTIONAL (0x0800)
+#define MPI2_SCSIIO_IOFLAGS_MULTICAST (0x0400)
+#define MPI2_SCSIIO_IOFLAGS_CMD_DETERMINES_DATA_DIR (0x0200)
+#define MPI2_SCSIIO_IOFLAGS_CDBLENGTH_MASK (0x01FF)
+
+/*SCSI IO EEDPFlags bits */
+
+#define MPI2_SCSIIO_EEDPFLAGS_INC_PRI_REFTAG (0x8000)
+#define MPI2_SCSIIO_EEDPFLAGS_INC_SEC_REFTAG (0x4000)
+#define MPI2_SCSIIO_EEDPFLAGS_INC_PRI_APPTAG (0x2000)
+#define MPI2_SCSIIO_EEDPFLAGS_INC_SEC_APPTAG (0x1000)
+
+#define MPI2_SCSIIO_EEDPFLAGS_CHECK_REFTAG (0x0400)
+#define MPI2_SCSIIO_EEDPFLAGS_CHECK_APPTAG (0x0200)
+#define MPI2_SCSIIO_EEDPFLAGS_CHECK_GUARD (0x0100)
+
+#define MPI2_SCSIIO_EEDPFLAGS_PASSTHRU_REFTAG (0x0008)
+
+#define MPI2_SCSIIO_EEDPFLAGS_MASK_OP (0x0007)
+#define MPI2_SCSIIO_EEDPFLAGS_NOOP_OP (0x0000)
+#define MPI2_SCSIIO_EEDPFLAGS_CHECK_OP (0x0001)
+#define MPI2_SCSIIO_EEDPFLAGS_STRIP_OP (0x0002)
+#define MPI2_SCSIIO_EEDPFLAGS_CHECK_REMOVE_OP (0x0003)
+#define MPI2_SCSIIO_EEDPFLAGS_INSERT_OP (0x0004)
+#define MPI2_SCSIIO_EEDPFLAGS_REPLACE_OP (0x0006)
+#define MPI2_SCSIIO_EEDPFLAGS_CHECK_REGEN_OP (0x0007)
+
+/*SCSI IO LUN fields: use MPI2_LUN_ from mpi2.h */
+
+/*SCSI IO Control bits */
+#define MPI2_SCSIIO_CONTROL_ADDCDBLEN_MASK (0xFC000000)
+#define MPI2_SCSIIO_CONTROL_ADDCDBLEN_SHIFT (26)
+
+#define MPI2_SCSIIO_CONTROL_DATADIRECTION_MASK (0x03000000)
+#define MPI2_SCSIIO_CONTROL_SHIFT_DATADIRECTION (24)
+#define MPI2_SCSIIO_CONTROL_NODATATRANSFER (0x00000000)
+#define MPI2_SCSIIO_CONTROL_WRITE (0x01000000)
+#define MPI2_SCSIIO_CONTROL_READ (0x02000000)
+#define MPI2_SCSIIO_CONTROL_BIDIRECTIONAL (0x03000000)
+
+#define MPI2_SCSIIO_CONTROL_TASKPRI_MASK (0x00007800)
+#define MPI2_SCSIIO_CONTROL_TASKPRI_SHIFT (11)
+/*alternate name for the previous field; called Command Priority in SAM-4 */
+#define MPI2_SCSIIO_CONTROL_CMDPRI_MASK (0x00007800)
+#define MPI2_SCSIIO_CONTROL_CMDPRI_SHIFT (11)
+
+#define MPI2_SCSIIO_CONTROL_TASKATTRIBUTE_MASK (0x00000700)
+#define MPI2_SCSIIO_CONTROL_SIMPLEQ (0x00000000)
+#define MPI2_SCSIIO_CONTROL_HEADOFQ (0x00000100)
+#define MPI2_SCSIIO_CONTROL_ORDEREDQ (0x00000200)
+#define MPI2_SCSIIO_CONTROL_ACAQ (0x00000400)
+
+#define MPI2_SCSIIO_CONTROL_TLR_MASK (0x000000C0)
+#define MPI2_SCSIIO_CONTROL_NO_TLR (0x00000000)
+#define MPI2_SCSIIO_CONTROL_TLR_ON (0x00000040)
+#define MPI2_SCSIIO_CONTROL_TLR_OFF (0x00000080)
+
+/*MPI v2.5 CDB field */
+typedef union _MPI25_SCSI_IO_CDB_UNION {
+ U8 CDB32[32];
+ MPI2_SCSI_IO_CDB_EEDP32 EEDP32;
+ MPI2_IEEE_SGE_SIMPLE64 SGE;
+} MPI25_SCSI_IO_CDB_UNION, *PTR_MPI25_SCSI_IO_CDB_UNION,
+ Mpi25ScsiIoCdb_t, *pMpi25ScsiIoCdb_t;
+
+/*MPI v2.5 SCSI IO Request Message */
+typedef struct _MPI25_SCSI_IO_REQUEST {
+ U16 DevHandle; /*0x00 */
+ U8 ChainOffset; /*0x02 */
+ U8 Function; /*0x03 */
+ U16 Reserved1; /*0x04 */
+ U8 Reserved2; /*0x06 */
+ U8 MsgFlags; /*0x07 */
+ U8 VP_ID; /*0x08 */
+ U8 VF_ID; /*0x09 */
+ U16 Reserved3; /*0x0A */
+ U32 SenseBufferLowAddress; /*0x0C */
+ U8 DMAFlags; /*0x10 */
+ U8 Reserved5; /*0x11 */
+ U8 SenseBufferLength; /*0x12 */
+ U8 Reserved4; /*0x13 */
+ U8 SGLOffset0; /*0x14 */
+ U8 SGLOffset1; /*0x15 */
+ U8 SGLOffset2; /*0x16 */
+ U8 SGLOffset3; /*0x17 */
+ U32 SkipCount; /*0x18 */
+ U32 DataLength; /*0x1C */
+ U32 BidirectionalDataLength; /*0x20 */
+ U16 IoFlags; /*0x24 */
+ U16 EEDPFlags; /*0x26 */
+ U16 EEDPBlockSize; /*0x28 */
+ U16 Reserved6; /*0x2A */
+ U32 SecondaryReferenceTag; /*0x2C */
+ U16 SecondaryApplicationTag; /*0x30 */
+ U16 ApplicationTagTranslationMask; /*0x32 */
+ U8 LUN[8]; /*0x34 */
+ U32 Control; /*0x3C */
+ MPI25_SCSI_IO_CDB_UNION CDB; /*0x40 */
+
+#ifdef MPI25_SCSI_IO_VENDOR_UNIQUE_REGION /*typically this is left undefined */
+ MPI25_SCSI_IO_VENDOR_UNIQUE VendorRegion;
+#endif
+
+ MPI25_SGE_IO_UNION SGL; /*0x60 */
+
+} MPI25_SCSI_IO_REQUEST, *PTR_MPI25_SCSI_IO_REQUEST,
+ Mpi25SCSIIORequest_t, *pMpi25SCSIIORequest_t;
+
+/*use MPI2_SCSIIO_MSGFLAGS_ defines for the MsgFlags field */
+
+/*Defines for the DMAFlags field
+ * Each setting affects 4 SGLS, from SGL0 to SGL3.
+ * D = Data
+ * C = Cache DIF
+ * I = Interleaved
+ * H = Host DIF
+ */
+#define MPI25_SCSIIO_DMAFLAGS_OP_MASK (0x0F)
+#define MPI25_SCSIIO_DMAFLAGS_OP_D_D_D_D (0x00)
+#define MPI25_SCSIIO_DMAFLAGS_OP_D_D_D_C (0x01)
+#define MPI25_SCSIIO_DMAFLAGS_OP_D_D_D_I (0x02)
+#define MPI25_SCSIIO_DMAFLAGS_OP_D_D_C_C (0x03)
+#define MPI25_SCSIIO_DMAFLAGS_OP_D_D_C_I (0x04)
+#define MPI25_SCSIIO_DMAFLAGS_OP_D_D_I_I (0x05)
+#define MPI25_SCSIIO_DMAFLAGS_OP_D_C_C_C (0x06)
+#define MPI25_SCSIIO_DMAFLAGS_OP_D_C_C_I (0x07)
+#define MPI25_SCSIIO_DMAFLAGS_OP_D_C_I_I (0x08)
+#define MPI25_SCSIIO_DMAFLAGS_OP_D_I_I_I (0x09)
+#define MPI25_SCSIIO_DMAFLAGS_OP_D_H_D_D (0x0A)
+#define MPI25_SCSIIO_DMAFLAGS_OP_D_H_D_C (0x0B)
+#define MPI25_SCSIIO_DMAFLAGS_OP_D_H_D_I (0x0C)
+#define MPI25_SCSIIO_DMAFLAGS_OP_D_H_C_C (0x0D)
+#define MPI25_SCSIIO_DMAFLAGS_OP_D_H_C_I (0x0E)
+#define MPI25_SCSIIO_DMAFLAGS_OP_D_H_I_I (0x0F)
+
+/*number of SGLOffset fields */
+#define MPI25_SCSIIO_NUM_SGLOFFSETS (4)
+
+/*defines for the IoFlags field */
+#define MPI25_SCSIIO_IOFLAGS_IO_PATH_MASK (0xC000)
+#define MPI25_SCSIIO_IOFLAGS_NORMAL_PATH (0x0000)
+#define MPI25_SCSIIO_IOFLAGS_FAST_PATH (0x4000)
+
+#define MPI25_SCSIIO_IOFLAGS_LARGE_CDB (0x1000)
+#define MPI25_SCSIIO_IOFLAGS_BIDIRECTIONAL (0x0800)
+#define MPI25_SCSIIO_IOFLAGS_CDBLENGTH_MASK (0x01FF)
+
+/*MPI v2.5 defines for the EEDPFlags bits */
+/*use MPI2_SCSIIO_EEDPFLAGS_ defines for the other EEDPFlags bits */
+#define MPI25_SCSIIO_EEDPFLAGS_ESCAPE_MODE_MASK (0x00C0)
+#define MPI25_SCSIIO_EEDPFLAGS_COMPATIBLE_MODE (0x0000)
+#define MPI25_SCSIIO_EEDPFLAGS_DO_NOT_DISABLE_MODE (0x0040)
+#define MPI25_SCSIIO_EEDPFLAGS_APPTAG_DISABLE_MODE (0x0080)
+#define MPI25_SCSIIO_EEDPFLAGS_APPTAG_REFTAG_DISABLE_MODE (0x00C0)
+
+#define MPI25_SCSIIO_EEDPFLAGS_HOST_GUARD_METHOD_MASK (0x0030)
+#define MPI25_SCSIIO_EEDPFLAGS_T10_CRC_HOST_GUARD (0x0000)
+#define MPI25_SCSIIO_EEDPFLAGS_IP_CHKSUM_HOST_GUARD (0x0010)
+
+/*use MPI2_LUN_ defines from mpi2.h for the LUN field */
+
+/*use MPI2_SCSIIO_CONTROL_ defines for the Control field */
+
+/*NOTE: The SCSI IO Reply is nearly the same for MPI 2.0 and MPI 2.5, so
+ * MPI2_SCSI_IO_REPLY is used for both.
+ */
+
+/*SCSI IO Error Reply Message */
+typedef struct _MPI2_SCSI_IO_REPLY {
+ U16 DevHandle; /*0x00 */
+ U8 MsgLength; /*0x02 */
+ U8 Function; /*0x03 */
+ U16 Reserved1; /*0x04 */
+ U8 Reserved2; /*0x06 */
+ U8 MsgFlags; /*0x07 */
+ U8 VP_ID; /*0x08 */
+ U8 VF_ID; /*0x09 */
+ U16 Reserved3; /*0x0A */
+ U8 SCSIStatus; /*0x0C */
+ U8 SCSIState; /*0x0D */
+ U16 IOCStatus; /*0x0E */
+ U32 IOCLogInfo; /*0x10 */
+ U32 TransferCount; /*0x14 */
+ U32 SenseCount; /*0x18 */
+ U32 ResponseInfo; /*0x1C */
+ U16 TaskTag; /*0x20 */
+ U16 SCSIStatusQualifier; /* 0x22 */
+ U32 BidirectionalTransferCount; /*0x24 */
+ U32 EEDPErrorOffset; /*0x28 *//*MPI 2.5 only; Reserved in MPI 2.0*/
+ U32 Reserved6; /*0x2C */
+} MPI2_SCSI_IO_REPLY, *PTR_MPI2_SCSI_IO_REPLY,
+ Mpi2SCSIIOReply_t, *pMpi2SCSIIOReply_t;
+
+/*SCSI IO Reply SCSIStatus values (SAM-4 status codes) */
+
+#define MPI2_SCSI_STATUS_GOOD (0x00)
+#define MPI2_SCSI_STATUS_CHECK_CONDITION (0x02)
+#define MPI2_SCSI_STATUS_CONDITION_MET (0x04)
+#define MPI2_SCSI_STATUS_BUSY (0x08)
+#define MPI2_SCSI_STATUS_INTERMEDIATE (0x10)
+#define MPI2_SCSI_STATUS_INTERMEDIATE_CONDMET (0x14)
+#define MPI2_SCSI_STATUS_RESERVATION_CONFLICT (0x18)
+#define MPI2_SCSI_STATUS_COMMAND_TERMINATED (0x22) /*obsolete */
+#define MPI2_SCSI_STATUS_TASK_SET_FULL (0x28)
+#define MPI2_SCSI_STATUS_ACA_ACTIVE (0x30)
+#define MPI2_SCSI_STATUS_TASK_ABORTED (0x40)
+
+/*SCSI IO Reply SCSIState flags */
+
+#define MPI2_SCSI_STATE_RESPONSE_INFO_VALID (0x10)
+#define MPI2_SCSI_STATE_TERMINATED (0x08)
+#define MPI2_SCSI_STATE_NO_SCSI_STATUS (0x04)
+#define MPI2_SCSI_STATE_AUTOSENSE_FAILED (0x02)
+#define MPI2_SCSI_STATE_AUTOSENSE_VALID (0x01)
+
+/*masks and shifts for the ResponseInfo field */
+
+#define MPI2_SCSI_RI_MASK_REASONCODE (0x000000FF)
+#define MPI2_SCSI_RI_SHIFT_REASONCODE (0)
+
+#define MPI2_SCSI_TASKTAG_UNKNOWN (0xFFFF)
+
+/****************************************************************************
+* SCSI Task Management messages
+****************************************************************************/
+
+/*SCSI Task Management Request Message */
+typedef struct _MPI2_SCSI_TASK_MANAGE_REQUEST {
+ U16 DevHandle; /*0x00 */
+ U8 ChainOffset; /*0x02 */
+ U8 Function; /*0x03 */
+ U8 Reserved1; /*0x04 */
+ U8 TaskType; /*0x05 */
+ U8 Reserved2; /*0x06 */
+ U8 MsgFlags; /*0x07 */
+ U8 VP_ID; /*0x08 */
+ U8 VF_ID; /*0x09 */
+ U16 Reserved3; /*0x0A */
+ U8 LUN[8]; /*0x0C */
+ U32 Reserved4[7]; /*0x14 */
+ U16 TaskMID; /*0x30 */
+ U16 Reserved5; /*0x32 */
+} MPI2_SCSI_TASK_MANAGE_REQUEST,
+ *PTR_MPI2_SCSI_TASK_MANAGE_REQUEST,
+ Mpi2SCSITaskManagementRequest_t,
+ *pMpi2SCSITaskManagementRequest_t;
+
+/*TaskType values */
+
+#define MPI2_SCSITASKMGMT_TASKTYPE_ABORT_TASK (0x01)
+#define MPI2_SCSITASKMGMT_TASKTYPE_ABRT_TASK_SET (0x02)
+#define MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET (0x03)
+#define MPI2_SCSITASKMGMT_TASKTYPE_LOGICAL_UNIT_RESET (0x05)
+#define MPI2_SCSITASKMGMT_TASKTYPE_CLEAR_TASK_SET (0x06)
+#define MPI2_SCSITASKMGMT_TASKTYPE_QUERY_TASK (0x07)
+#define MPI2_SCSITASKMGMT_TASKTYPE_CLR_ACA (0x08)
+#define MPI2_SCSITASKMGMT_TASKTYPE_QRY_TASK_SET (0x09)
+#define MPI2_SCSITASKMGMT_TASKTYPE_QRY_ASYNC_EVENT (0x0A)
+
+/*obsolete TaskType name */
+#define MPI2_SCSITASKMGMT_TASKTYPE_QRY_UNIT_ATTENTION \
+ (MPI2_SCSITASKMGMT_TASKTYPE_QRY_ASYNC_EVENT)
+
+/*MsgFlags bits */
+
+#define MPI2_SCSITASKMGMT_MSGFLAGS_MASK_TARGET_RESET (0x18)
+#define MPI2_SCSITASKMGMT_MSGFLAGS_LINK_RESET (0x00)
+#define MPI2_SCSITASKMGMT_MSGFLAGS_NEXUS_RESET_SRST (0x08)
+#define MPI2_SCSITASKMGMT_MSGFLAGS_SAS_HARD_LINK_RESET (0x10)
+
+#define MPI2_SCSITASKMGMT_MSGFLAGS_DO_NOT_SEND_TASK_IU (0x01)
+
+/*SCSI Task Management Reply Message */
+typedef struct _MPI2_SCSI_TASK_MANAGE_REPLY {
+ U16 DevHandle; /*0x00 */
+ U8 MsgLength; /*0x02 */
+ U8 Function; /*0x03 */
+ U8 ResponseCode; /*0x04 */
+ U8 TaskType; /*0x05 */
+ U8 Reserved1; /*0x06 */
+ U8 MsgFlags; /*0x07 */
+ U8 VP_ID; /*0x08 */
+ U8 VF_ID; /*0x09 */
+ U16 Reserved2; /*0x0A */
+ U16 Reserved3; /*0x0C */
+ U16 IOCStatus; /*0x0E */
+ U32 IOCLogInfo; /*0x10 */
+ U32 TerminationCount; /*0x14 */
+ U32 ResponseInfo; /*0x18 */
+} MPI2_SCSI_TASK_MANAGE_REPLY,
+ *PTR_MPI2_SCSI_TASK_MANAGE_REPLY,
+ Mpi2SCSITaskManagementReply_t, *pMpi2SCSIManagementReply_t;
+
+/*ResponseCode values */
+
+#define MPI2_SCSITASKMGMT_RSP_TM_COMPLETE (0x00)
+#define MPI2_SCSITASKMGMT_RSP_INVALID_FRAME (0x02)
+#define MPI2_SCSITASKMGMT_RSP_TM_NOT_SUPPORTED (0x04)
+#define MPI2_SCSITASKMGMT_RSP_TM_FAILED (0x05)
+#define MPI2_SCSITASKMGMT_RSP_TM_SUCCEEDED (0x08)
+#define MPI2_SCSITASKMGMT_RSP_TM_INVALID_LUN (0x09)
+#define MPI2_SCSITASKMGMT_RSP_TM_OVERLAPPED_TAG (0x0A)
+#define MPI2_SCSITASKMGMT_RSP_IO_QUEUED_ON_IOC (0x80)
+
+/*masks and shifts for the ResponseInfo field */
+
+#define MPI2_SCSITASKMGMT_RI_MASK_REASONCODE (0x000000FF)
+#define MPI2_SCSITASKMGMT_RI_SHIFT_REASONCODE (0)
+#define MPI2_SCSITASKMGMT_RI_MASK_ARI2 (0x0000FF00)
+#define MPI2_SCSITASKMGMT_RI_SHIFT_ARI2 (8)
+#define MPI2_SCSITASKMGMT_RI_MASK_ARI1 (0x00FF0000)
+#define MPI2_SCSITASKMGMT_RI_SHIFT_ARI1 (16)
+#define MPI2_SCSITASKMGMT_RI_MASK_ARI0 (0xFF000000)
+#define MPI2_SCSITASKMGMT_RI_SHIFT_ARI0 (24)
+
+/****************************************************************************
+* SCSI Enclosure Processor messages
+****************************************************************************/
+
+/*SCSI Enclosure Processor Request Message */
+typedef struct _MPI2_SEP_REQUEST {
+ U16 DevHandle; /*0x00 */
+ U8 ChainOffset; /*0x02 */
+ U8 Function; /*0x03 */
+ U8 Action; /*0x04 */
+ U8 Flags; /*0x05 */
+ U8 Reserved1; /*0x06 */
+ U8 MsgFlags; /*0x07 */
+ U8 VP_ID; /*0x08 */
+ U8 VF_ID; /*0x09 */
+ U16 Reserved2; /*0x0A */
+ U32 SlotStatus; /*0x0C */
+ U32 Reserved3; /*0x10 */
+ U32 Reserved4; /*0x14 */
+ U32 Reserved5; /*0x18 */
+ U16 Slot; /*0x1C */
+ U16 EnclosureHandle; /*0x1E */
+} MPI2_SEP_REQUEST, *PTR_MPI2_SEP_REQUEST,
+ Mpi2SepRequest_t, *pMpi2SepRequest_t;
+
+/*Action defines */
+#define MPI2_SEP_REQ_ACTION_WRITE_STATUS (0x00)
+#define MPI2_SEP_REQ_ACTION_READ_STATUS (0x01)
+
+/*Flags defines */
+#define MPI2_SEP_REQ_FLAGS_DEVHANDLE_ADDRESS (0x00)
+#define MPI2_SEP_REQ_FLAGS_ENCLOSURE_SLOT_ADDRESS (0x01)
+
+/*SlotStatus defines */
+#define MPI2_SEP_REQ_SLOTSTATUS_REQUEST_REMOVE (0x00040000)
+#define MPI2_SEP_REQ_SLOTSTATUS_IDENTIFY_REQUEST (0x00020000)
+#define MPI2_SEP_REQ_SLOTSTATUS_REBUILD_STOPPED (0x00000200)
+#define MPI2_SEP_REQ_SLOTSTATUS_HOT_SPARE (0x00000100)
+#define MPI2_SEP_REQ_SLOTSTATUS_UNCONFIGURED (0x00000080)
+#define MPI2_SEP_REQ_SLOTSTATUS_PREDICTED_FAULT (0x00000040)
+#define MPI2_SEP_REQ_SLOTSTATUS_IN_CRITICAL_ARRAY (0x00000010)
+#define MPI2_SEP_REQ_SLOTSTATUS_IN_FAILED_ARRAY (0x00000008)
+#define MPI2_SEP_REQ_SLOTSTATUS_DEV_REBUILDING (0x00000004)
+#define MPI2_SEP_REQ_SLOTSTATUS_DEV_FAULTY (0x00000002)
+#define MPI2_SEP_REQ_SLOTSTATUS_NO_ERROR (0x00000001)
+
+/*SCSI Enclosure Processor Reply Message */
+typedef struct _MPI2_SEP_REPLY {
+ U16 DevHandle; /*0x00 */
+ U8 MsgLength; /*0x02 */
+ U8 Function; /*0x03 */
+ U8 Action; /*0x04 */
+ U8 Flags; /*0x05 */
+ U8 Reserved1; /*0x06 */
+ U8 MsgFlags; /*0x07 */
+ U8 VP_ID; /*0x08 */
+ U8 VF_ID; /*0x09 */
+ U16 Reserved2; /*0x0A */
+ U16 Reserved3; /*0x0C */
+ U16 IOCStatus; /*0x0E */
+ U32 IOCLogInfo; /*0x10 */
+ U32 SlotStatus; /*0x14 */
+ U32 Reserved4; /*0x18 */
+ U16 Slot; /*0x1C */
+ U16 EnclosureHandle; /*0x1E */
+} MPI2_SEP_REPLY, *PTR_MPI2_SEP_REPLY,
+ Mpi2SepReply_t, *pMpi2SepReply_t;
+
+/*SlotStatus defines */
+#define MPI2_SEP_REPLY_SLOTSTATUS_REMOVE_READY (0x00040000)
+#define MPI2_SEP_REPLY_SLOTSTATUS_IDENTIFY_REQUEST (0x00020000)
+#define MPI2_SEP_REPLY_SLOTSTATUS_REBUILD_STOPPED (0x00000200)
+#define MPI2_SEP_REPLY_SLOTSTATUS_HOT_SPARE (0x00000100)
+#define MPI2_SEP_REPLY_SLOTSTATUS_UNCONFIGURED (0x00000080)
+#define MPI2_SEP_REPLY_SLOTSTATUS_PREDICTED_FAULT (0x00000040)
+#define MPI2_SEP_REPLY_SLOTSTATUS_IN_CRITICAL_ARRAY (0x00000010)
+#define MPI2_SEP_REPLY_SLOTSTATUS_IN_FAILED_ARRAY (0x00000008)
+#define MPI2_SEP_REPLY_SLOTSTATUS_DEV_REBUILDING (0x00000004)
+#define MPI2_SEP_REPLY_SLOTSTATUS_DEV_FAULTY (0x00000002)
+#define MPI2_SEP_REPLY_SLOTSTATUS_NO_ERROR (0x00000001)
+
+#endif
diff --git a/drivers/scsi/mpt3sas/mpi/mpi2_ioc.h b/drivers/scsi/mpt3sas/mpi/mpi2_ioc.h
new file mode 100644
index 000000000..490830957
--- /dev/null
+++ b/drivers/scsi/mpt3sas/mpi/mpi2_ioc.h
@@ -0,0 +1,1727 @@
+/*
+ * Copyright (c) 2000-2014 LSI Corporation.
+ *
+ *
+ * Name: mpi2_ioc.h
+ * Title: MPI IOC, Port, Event, FW Download, and FW Upload messages
+ * Creation Date: October 11, 2006
+ *
+ * mpi2_ioc.h Version: 02.00.23
+ *
+ * NOTE: Names (typedefs, defines, etc.) beginning with an MPI25 or Mpi25
+ * prefix are for use only on MPI v2.5 products, and must not be used
+ * with MPI v2.0 products. Unless otherwise noted, names beginning with
+ * MPI2 or Mpi2 are for use with both MPI v2.0 and MPI v2.5 products.
+ *
+ * Version History
+ * ---------------
+ *
+ * Date Version Description
+ * -------- -------- ------------------------------------------------------
+ * 04-30-07 02.00.00 Corresponds to Fusion-MPT MPI Specification Rev A.
+ * 06-04-07 02.00.01 In IOCFacts Reply structure, renamed MaxDevices to
+ * MaxTargets.
+ * Added TotalImageSize field to FWDownload Request.
+ * Added reserved words to FWUpload Request.
+ * 06-26-07 02.00.02 Added IR Configuration Change List Event.
+ * 08-31-07 02.00.03 Removed SystemReplyQueueDepth field from the IOCInit
+ * request and replaced it with
+ * ReplyDescriptorPostQueueDepth and ReplyFreeQueueDepth.
+ * Replaced the MinReplyQueueDepth field of the IOCFacts
+ * reply with MaxReplyDescriptorPostQueueDepth.
+ * Added MPI2_RDPQ_DEPTH_MIN define to specify the minimum
+ * depth for the Reply Descriptor Post Queue.
+ * Added SASAddress field to Initiator Device Table
+ * Overflow Event data.
+ * 10-31-07 02.00.04 Added ReasonCode MPI2_EVENT_SAS_INIT_RC_NOT_RESPONDING
+ * for SAS Initiator Device Status Change Event data.
+ * Modified Reason Code defines for SAS Topology Change
+ * List Event data, including adding a bit for PHY Vacant
+ * status, and adding a mask for the Reason Code.
+ * Added define for
+ * MPI2_EVENT_SAS_TOPO_ES_DELAY_NOT_RESPONDING.
+ * Added define for MPI2_EXT_IMAGE_TYPE_MEGARAID.
+ * 12-18-07 02.00.05 Added Boot Status defines for the IOCExceptions field of
+ * the IOCFacts Reply.
+ * Removed MPI2_IOCFACTS_CAPABILITY_EXTENDED_BUFFER define.
+ * Moved MPI2_VERSION_UNION to mpi2.h.
+ * Changed MPI2_EVENT_NOTIFICATION_REQUEST to use masks
+ * instead of enables, and added SASBroadcastPrimitiveMasks
+ * field.
+ * Added Log Entry Added Event and related structure.
+ * 02-29-08 02.00.06 Added define MPI2_IOCFACTS_CAPABILITY_INTEGRATED_RAID.
+ * Removed define MPI2_IOCFACTS_PROTOCOL_SMP_TARGET.
+ * Added MaxVolumes and MaxPersistentEntries fields to
+ * IOCFacts reply.
+ * Added ProtocalFlags and IOCCapabilities fields to
+ * MPI2_FW_IMAGE_HEADER.
+ * Removed MPI2_PORTENABLE_FLAGS_ENABLE_SINGLE_PORT.
+ * 03-03-08 02.00.07 Fixed MPI2_FW_IMAGE_HEADER by changing Reserved26 to
+ * a U16 (from a U32).
+ * Removed extra 's' from EventMasks name.
+ * 06-27-08 02.00.08 Fixed an offset in a comment.
+ * 10-02-08 02.00.09 Removed SystemReplyFrameSize from MPI2_IOC_INIT_REQUEST.
+ * Removed CurReplyFrameSize from MPI2_IOC_FACTS_REPLY and
+ * renamed MinReplyFrameSize to ReplyFrameSize.
+ * Added MPI2_IOCFACTS_EXCEPT_IR_FOREIGN_CONFIG_MAX.
+ * Added two new RAIDOperation values for Integrated RAID
+ * Operations Status Event data.
+ * Added four new IR Configuration Change List Event data
+ * ReasonCode values.
+ * Added two new ReasonCode defines for SAS Device Status
+ * Change Event data.
+ * Added three new DiscoveryStatus bits for the SAS
+ * Discovery event data.
+ * Added Multiplexing Status Change bit to the PhyStatus
+ * field of the SAS Topology Change List event data.
+ * Removed define for MPI2_INIT_IMAGE_BOOTFLAGS_XMEMCOPY.
+ * BootFlags are now product-specific.
+ * Added defines for the indivdual signature bytes
+ * for MPI2_INIT_IMAGE_FOOTER.
+ * 01-19-09 02.00.10 Added MPI2_IOCFACTS_CAPABILITY_EVENT_REPLAY define.
+ * Added MPI2_EVENT_SAS_DISC_DS_DOWNSTREAM_INITIATOR
+ * define.
+ * Added MPI2_EVENT_SAS_DEV_STAT_RC_SATA_INIT_FAILURE
+ * define.
+ * Removed MPI2_EVENT_SAS_DISC_DS_SATA_INIT_FAILURE define.
+ * 05-06-09 02.00.11 Added MPI2_IOCFACTS_CAPABILITY_RAID_ACCELERATOR define.
+ * Added MPI2_IOCFACTS_CAPABILITY_MSI_X_INDEX define.
+ * Added two new reason codes for SAS Device Status Change
+ * Event.
+ * Added new event: SAS PHY Counter.
+ * 07-30-09 02.00.12 Added GPIO Interrupt event define and structure.
+ * Added MPI2_IOCFACTS_CAPABILITY_EXTENDED_BUFFER define.
+ * Added new product id family for 2208.
+ * 10-28-09 02.00.13 Added HostMSIxVectors field to MPI2_IOC_INIT_REQUEST.
+ * Added MaxMSIxVectors field to MPI2_IOC_FACTS_REPLY.
+ * Added MinDevHandle field to MPI2_IOC_FACTS_REPLY.
+ * Added MPI2_IOCFACTS_CAPABILITY_HOST_BASED_DISCOVERY.
+ * Added MPI2_EVENT_HOST_BASED_DISCOVERY_PHY define.
+ * Added MPI2_EVENT_SAS_TOPO_ES_NO_EXPANDER define.
+ * Added Host Based Discovery Phy Event data.
+ * Added defines for ProductID Product field
+ * (MPI2_FW_HEADER_PID_).
+ * Modified values for SAS ProductID Family
+ * (MPI2_FW_HEADER_PID_FAMILY_).
+ * 02-10-10 02.00.14 Added SAS Quiesce Event structure and defines.
+ * Added PowerManagementControl Request structures and
+ * defines.
+ * 05-12-10 02.00.15 Marked Task Set Full Event as obsolete.
+ * Added MPI2_EVENT_SAS_TOPO_LR_UNSUPPORTED_PHY define.
+ * 11-10-10 02.00.16 Added MPI2_FW_DOWNLOAD_ITYPE_MIN_PRODUCT_SPECIFIC.
+ * 02-23-11 02.00.17 Added SAS NOTIFY Primitive event, and added
+ * SASNotifyPrimitiveMasks field to
+ * MPI2_EVENT_NOTIFICATION_REQUEST.
+ * Added Temperature Threshold Event.
+ * Added Host Message Event.
+ * Added Send Host Message request and reply.
+ * 05-25-11 02.00.18 For Extended Image Header, added
+ * MPI2_EXT_IMAGE_TYPE_MIN_PRODUCT_SPECIFIC and
+ * MPI2_EXT_IMAGE_TYPE_MAX_PRODUCT_SPECIFIC defines.
+ * Deprecated MPI2_EXT_IMAGE_TYPE_MAX define.
+ * 08-24-11 02.00.19 Added PhysicalPort field to
+ * MPI2_EVENT_DATA_SAS_DEVICE_STATUS_CHANGE structure.
+ * Marked MPI2_PM_CONTROL_FEATURE_PCIE_LINK as obsolete.
+ * 11-18-11 02.00.20 Incorporating additions for MPI v2.5.
+ * 03-29-12 02.00.21 Added a product specific range to event values.
+ * 07-26-12 02.00.22 Added MPI2_IOCFACTS_EXCEPT_PARTIAL_MEMORY_FAILURE.
+ * Added ElapsedSeconds field to
+ * MPI2_EVENT_DATA_IR_OPERATION_STATUS.
+ * 08-19-13 02.00.23 For IOCInit, added MPI2_IOCINIT_MSGFLAG_RDPQ_ARRAY_MODE
+ * and MPI2_IOC_INIT_RDPQ_ARRAY_ENTRY.
+ * Added MPI2_IOCFACTS_CAPABILITY_RDPQ_ARRAY_CAPABLE.
+ * Added MPI2_FW_DOWNLOAD_ITYPE_PUBLIC_KEY.
+ * Added Encrypted Hash Extended Image.
+ * --------------------------------------------------------------------------
+ */
+
+#ifndef MPI2_IOC_H
+#define MPI2_IOC_H
+
+/*****************************************************************************
+*
+* IOC Messages
+*
+*****************************************************************************/
+
+/****************************************************************************
+* IOCInit message
+****************************************************************************/
+
+/*IOCInit Request message */
+typedef struct _MPI2_IOC_INIT_REQUEST {
+ U8 WhoInit; /*0x00 */
+ U8 Reserved1; /*0x01 */
+ U8 ChainOffset; /*0x02 */
+ U8 Function; /*0x03 */
+ U16 Reserved2; /*0x04 */
+ U8 Reserved3; /*0x06 */
+ U8 MsgFlags; /*0x07 */
+ U8 VP_ID; /*0x08 */
+ U8 VF_ID; /*0x09 */
+ U16 Reserved4; /*0x0A */
+ U16 MsgVersion; /*0x0C */
+ U16 HeaderVersion; /*0x0E */
+ U32 Reserved5; /*0x10 */
+ U16 Reserved6; /*0x14 */
+ U8 Reserved7; /*0x16 */
+ U8 HostMSIxVectors; /*0x17 */
+ U16 Reserved8; /*0x18 */
+ U16 SystemRequestFrameSize; /*0x1A */
+ U16 ReplyDescriptorPostQueueDepth; /*0x1C */
+ U16 ReplyFreeQueueDepth; /*0x1E */
+ U32 SenseBufferAddressHigh; /*0x20 */
+ U32 SystemReplyAddressHigh; /*0x24 */
+ U64 SystemRequestFrameBaseAddress; /*0x28 */
+ U64 ReplyDescriptorPostQueueAddress; /*0x30 */
+ U64 ReplyFreeQueueAddress; /*0x38 */
+ U64 TimeStamp; /*0x40 */
+} MPI2_IOC_INIT_REQUEST, *PTR_MPI2_IOC_INIT_REQUEST,
+ Mpi2IOCInitRequest_t, *pMpi2IOCInitRequest_t;
+
+/*WhoInit values */
+#define MPI2_WHOINIT_NOT_INITIALIZED (0x00)
+#define MPI2_WHOINIT_SYSTEM_BIOS (0x01)
+#define MPI2_WHOINIT_ROM_BIOS (0x02)
+#define MPI2_WHOINIT_PCI_PEER (0x03)
+#define MPI2_WHOINIT_HOST_DRIVER (0x04)
+#define MPI2_WHOINIT_MANUFACTURER (0x05)
+
+/* MsgFlags */
+#define MPI2_IOCINIT_MSGFLAG_RDPQ_ARRAY_MODE (0x01)
+
+
+/*MsgVersion */
+#define MPI2_IOCINIT_MSGVERSION_MAJOR_MASK (0xFF00)
+#define MPI2_IOCINIT_MSGVERSION_MAJOR_SHIFT (8)
+#define MPI2_IOCINIT_MSGVERSION_MINOR_MASK (0x00FF)
+#define MPI2_IOCINIT_MSGVERSION_MINOR_SHIFT (0)
+
+/*HeaderVersion */
+#define MPI2_IOCINIT_HDRVERSION_UNIT_MASK (0xFF00)
+#define MPI2_IOCINIT_HDRVERSION_UNIT_SHIFT (8)
+#define MPI2_IOCINIT_HDRVERSION_DEV_MASK (0x00FF)
+#define MPI2_IOCINIT_HDRVERSION_DEV_SHIFT (0)
+
+/*minimum depth for a Reply Descriptor Post Queue */
+#define MPI2_RDPQ_DEPTH_MIN (16)
+
+/* Reply Descriptor Post Queue Array Entry */
+typedef struct _MPI2_IOC_INIT_RDPQ_ARRAY_ENTRY {
+ U64 RDPQBaseAddress; /* 0x00 */
+ U32 Reserved1; /* 0x08 */
+ U32 Reserved2; /* 0x0C */
+} MPI2_IOC_INIT_RDPQ_ARRAY_ENTRY,
+*PTR_MPI2_IOC_INIT_RDPQ_ARRAY_ENTRY,
+Mpi2IOCInitRDPQArrayEntry, *pMpi2IOCInitRDPQArrayEntry;
+
+
+/*IOCInit Reply message */
+typedef struct _MPI2_IOC_INIT_REPLY {
+ U8 WhoInit; /*0x00 */
+ U8 Reserved1; /*0x01 */
+ U8 MsgLength; /*0x02 */
+ U8 Function; /*0x03 */
+ U16 Reserved2; /*0x04 */
+ U8 Reserved3; /*0x06 */
+ U8 MsgFlags; /*0x07 */
+ U8 VP_ID; /*0x08 */
+ U8 VF_ID; /*0x09 */
+ U16 Reserved4; /*0x0A */
+ U16 Reserved5; /*0x0C */
+ U16 IOCStatus; /*0x0E */
+ U32 IOCLogInfo; /*0x10 */
+} MPI2_IOC_INIT_REPLY, *PTR_MPI2_IOC_INIT_REPLY,
+ Mpi2IOCInitReply_t, *pMpi2IOCInitReply_t;
+
+/****************************************************************************
+* IOCFacts message
+****************************************************************************/
+
+/*IOCFacts Request message */
+typedef struct _MPI2_IOC_FACTS_REQUEST {
+ U16 Reserved1; /*0x00 */
+ U8 ChainOffset; /*0x02 */
+ U8 Function; /*0x03 */
+ U16 Reserved2; /*0x04 */
+ U8 Reserved3; /*0x06 */
+ U8 MsgFlags; /*0x07 */
+ U8 VP_ID; /*0x08 */
+ U8 VF_ID; /*0x09 */
+ U16 Reserved4; /*0x0A */
+} MPI2_IOC_FACTS_REQUEST, *PTR_MPI2_IOC_FACTS_REQUEST,
+ Mpi2IOCFactsRequest_t, *pMpi2IOCFactsRequest_t;
+
+/*IOCFacts Reply message */
+typedef struct _MPI2_IOC_FACTS_REPLY {
+ U16 MsgVersion; /*0x00 */
+ U8 MsgLength; /*0x02 */
+ U8 Function; /*0x03 */
+ U16 HeaderVersion; /*0x04 */
+ U8 IOCNumber; /*0x06 */
+ U8 MsgFlags; /*0x07 */
+ U8 VP_ID; /*0x08 */
+ U8 VF_ID; /*0x09 */
+ U16 Reserved1; /*0x0A */
+ U16 IOCExceptions; /*0x0C */
+ U16 IOCStatus; /*0x0E */
+ U32 IOCLogInfo; /*0x10 */
+ U8 MaxChainDepth; /*0x14 */
+ U8 WhoInit; /*0x15 */
+ U8 NumberOfPorts; /*0x16 */
+ U8 MaxMSIxVectors; /*0x17 */
+ U16 RequestCredit; /*0x18 */
+ U16 ProductID; /*0x1A */
+ U32 IOCCapabilities; /*0x1C */
+ MPI2_VERSION_UNION FWVersion; /*0x20 */
+ U16 IOCRequestFrameSize; /*0x24 */
+ U16 IOCMaxChainSegmentSize; /*0x26 */
+ U16 MaxInitiators; /*0x28 */
+ U16 MaxTargets; /*0x2A */
+ U16 MaxSasExpanders; /*0x2C */
+ U16 MaxEnclosures; /*0x2E */
+ U16 ProtocolFlags; /*0x30 */
+ U16 HighPriorityCredit; /*0x32 */
+ U16 MaxReplyDescriptorPostQueueDepth; /*0x34 */
+ U8 ReplyFrameSize; /*0x36 */
+ U8 MaxVolumes; /*0x37 */
+ U16 MaxDevHandle; /*0x38 */
+ U16 MaxPersistentEntries; /*0x3A */
+ U16 MinDevHandle; /*0x3C */
+ U16 Reserved4; /*0x3E */
+} MPI2_IOC_FACTS_REPLY, *PTR_MPI2_IOC_FACTS_REPLY,
+ Mpi2IOCFactsReply_t, *pMpi2IOCFactsReply_t;
+
+/*MsgVersion */
+#define MPI2_IOCFACTS_MSGVERSION_MAJOR_MASK (0xFF00)
+#define MPI2_IOCFACTS_MSGVERSION_MAJOR_SHIFT (8)
+#define MPI2_IOCFACTS_MSGVERSION_MINOR_MASK (0x00FF)
+#define MPI2_IOCFACTS_MSGVERSION_MINOR_SHIFT (0)
+
+/*HeaderVersion */
+#define MPI2_IOCFACTS_HDRVERSION_UNIT_MASK (0xFF00)
+#define MPI2_IOCFACTS_HDRVERSION_UNIT_SHIFT (8)
+#define MPI2_IOCFACTS_HDRVERSION_DEV_MASK (0x00FF)
+#define MPI2_IOCFACTS_HDRVERSION_DEV_SHIFT (0)
+
+/*IOCExceptions */
+#define MPI2_IOCFACTS_EXCEPT_PARTIAL_MEMORY_FAILURE (0x0200)
+#define MPI2_IOCFACTS_EXCEPT_IR_FOREIGN_CONFIG_MAX (0x0100)
+
+#define MPI2_IOCFACTS_EXCEPT_BOOTSTAT_MASK (0x00E0)
+#define MPI2_IOCFACTS_EXCEPT_BOOTSTAT_GOOD (0x0000)
+#define MPI2_IOCFACTS_EXCEPT_BOOTSTAT_BACKUP (0x0020)
+#define MPI2_IOCFACTS_EXCEPT_BOOTSTAT_RESTORED (0x0040)
+#define MPI2_IOCFACTS_EXCEPT_BOOTSTAT_CORRUPT_BACKUP (0x0060)
+
+#define MPI2_IOCFACTS_EXCEPT_METADATA_UNSUPPORTED (0x0010)
+#define MPI2_IOCFACTS_EXCEPT_MANUFACT_CHECKSUM_FAIL (0x0008)
+#define MPI2_IOCFACTS_EXCEPT_FW_CHECKSUM_FAIL (0x0004)
+#define MPI2_IOCFACTS_EXCEPT_RAID_CONFIG_INVALID (0x0002)
+#define MPI2_IOCFACTS_EXCEPT_CONFIG_CHECKSUM_FAIL (0x0001)
+
+/*defines for WhoInit field are after the IOCInit Request */
+
+/*ProductID field uses MPI2_FW_HEADER_PID_ */
+
+/*IOCCapabilities */
+#define MPI2_IOCFACTS_CAPABILITY_RDPQ_ARRAY_CAPABLE (0x00040000)
+#define MPI25_IOCFACTS_CAPABILITY_FAST_PATH_CAPABLE (0x00020000)
+#define MPI2_IOCFACTS_CAPABILITY_HOST_BASED_DISCOVERY (0x00010000)
+#define MPI2_IOCFACTS_CAPABILITY_MSI_X_INDEX (0x00008000)
+#define MPI2_IOCFACTS_CAPABILITY_RAID_ACCELERATOR (0x00004000)
+#define MPI2_IOCFACTS_CAPABILITY_EVENT_REPLAY (0x00002000)
+#define MPI2_IOCFACTS_CAPABILITY_INTEGRATED_RAID (0x00001000)
+#define MPI2_IOCFACTS_CAPABILITY_TLR (0x00000800)
+#define MPI2_IOCFACTS_CAPABILITY_MULTICAST (0x00000100)
+#define MPI2_IOCFACTS_CAPABILITY_BIDIRECTIONAL_TARGET (0x00000080)
+#define MPI2_IOCFACTS_CAPABILITY_EEDP (0x00000040)
+#define MPI2_IOCFACTS_CAPABILITY_EXTENDED_BUFFER (0x00000020)
+#define MPI2_IOCFACTS_CAPABILITY_SNAPSHOT_BUFFER (0x00000010)
+#define MPI2_IOCFACTS_CAPABILITY_DIAG_TRACE_BUFFER (0x00000008)
+#define MPI2_IOCFACTS_CAPABILITY_TASK_SET_FULL_HANDLING (0x00000004)
+
+/*ProtocolFlags */
+#define MPI2_IOCFACTS_PROTOCOL_SCSI_TARGET (0x0001)
+#define MPI2_IOCFACTS_PROTOCOL_SCSI_INITIATOR (0x0002)
+
+/****************************************************************************
+* PortFacts message
+****************************************************************************/
+
+/*PortFacts Request message */
+typedef struct _MPI2_PORT_FACTS_REQUEST {
+ U16 Reserved1; /*0x00 */
+ U8 ChainOffset; /*0x02 */
+ U8 Function; /*0x03 */
+ U16 Reserved2; /*0x04 */
+ U8 PortNumber; /*0x06 */
+ U8 MsgFlags; /*0x07 */
+ U8 VP_ID; /*0x08 */
+ U8 VF_ID; /*0x09 */
+ U16 Reserved3; /*0x0A */
+} MPI2_PORT_FACTS_REQUEST, *PTR_MPI2_PORT_FACTS_REQUEST,
+ Mpi2PortFactsRequest_t, *pMpi2PortFactsRequest_t;
+
+/*PortFacts Reply message */
+typedef struct _MPI2_PORT_FACTS_REPLY {
+ U16 Reserved1; /*0x00 */
+ U8 MsgLength; /*0x02 */
+ U8 Function; /*0x03 */
+ U16 Reserved2; /*0x04 */
+ U8 PortNumber; /*0x06 */
+ U8 MsgFlags; /*0x07 */
+ U8 VP_ID; /*0x08 */
+ U8 VF_ID; /*0x09 */
+ U16 Reserved3; /*0x0A */
+ U16 Reserved4; /*0x0C */
+ U16 IOCStatus; /*0x0E */
+ U32 IOCLogInfo; /*0x10 */
+ U8 Reserved5; /*0x14 */
+ U8 PortType; /*0x15 */
+ U16 Reserved6; /*0x16 */
+ U16 MaxPostedCmdBuffers; /*0x18 */
+ U16 Reserved7; /*0x1A */
+} MPI2_PORT_FACTS_REPLY, *PTR_MPI2_PORT_FACTS_REPLY,
+ Mpi2PortFactsReply_t, *pMpi2PortFactsReply_t;
+
+/*PortType values */
+#define MPI2_PORTFACTS_PORTTYPE_INACTIVE (0x00)
+#define MPI2_PORTFACTS_PORTTYPE_FC (0x10)
+#define MPI2_PORTFACTS_PORTTYPE_ISCSI (0x20)
+#define MPI2_PORTFACTS_PORTTYPE_SAS_PHYSICAL (0x30)
+#define MPI2_PORTFACTS_PORTTYPE_SAS_VIRTUAL (0x31)
+
+/****************************************************************************
+* PortEnable message
+****************************************************************************/
+
+/*PortEnable Request message */
+typedef struct _MPI2_PORT_ENABLE_REQUEST {
+ U16 Reserved1; /*0x00 */
+ U8 ChainOffset; /*0x02 */
+ U8 Function; /*0x03 */
+ U8 Reserved2; /*0x04 */
+ U8 PortFlags; /*0x05 */
+ U8 Reserved3; /*0x06 */
+ U8 MsgFlags; /*0x07 */
+ U8 VP_ID; /*0x08 */
+ U8 VF_ID; /*0x09 */
+ U16 Reserved4; /*0x0A */
+} MPI2_PORT_ENABLE_REQUEST, *PTR_MPI2_PORT_ENABLE_REQUEST,
+ Mpi2PortEnableRequest_t, *pMpi2PortEnableRequest_t;
+
+/*PortEnable Reply message */
+typedef struct _MPI2_PORT_ENABLE_REPLY {
+ U16 Reserved1; /*0x00 */
+ U8 MsgLength; /*0x02 */
+ U8 Function; /*0x03 */
+ U8 Reserved2; /*0x04 */
+ U8 PortFlags; /*0x05 */
+ U8 Reserved3; /*0x06 */
+ U8 MsgFlags; /*0x07 */
+ U8 VP_ID; /*0x08 */
+ U8 VF_ID; /*0x09 */
+ U16 Reserved4; /*0x0A */
+ U16 Reserved5; /*0x0C */
+ U16 IOCStatus; /*0x0E */
+ U32 IOCLogInfo; /*0x10 */
+} MPI2_PORT_ENABLE_REPLY, *PTR_MPI2_PORT_ENABLE_REPLY,
+ Mpi2PortEnableReply_t, *pMpi2PortEnableReply_t;
+
+/****************************************************************************
+* EventNotification message
+****************************************************************************/
+
+/*EventNotification Request message */
+#define MPI2_EVENT_NOTIFY_EVENTMASK_WORDS (4)
+
+typedef struct _MPI2_EVENT_NOTIFICATION_REQUEST {
+ U16 Reserved1; /*0x00 */
+ U8 ChainOffset; /*0x02 */
+ U8 Function; /*0x03 */
+ U16 Reserved2; /*0x04 */
+ U8 Reserved3; /*0x06 */
+ U8 MsgFlags; /*0x07 */
+ U8 VP_ID; /*0x08 */
+ U8 VF_ID; /*0x09 */
+ U16 Reserved4; /*0x0A */
+ U32 Reserved5; /*0x0C */
+ U32 Reserved6; /*0x10 */
+ U32 EventMasks[MPI2_EVENT_NOTIFY_EVENTMASK_WORDS]; /*0x14 */
+ U16 SASBroadcastPrimitiveMasks; /*0x24 */
+ U16 SASNotifyPrimitiveMasks; /*0x26 */
+ U32 Reserved8; /*0x28 */
+} MPI2_EVENT_NOTIFICATION_REQUEST,
+ *PTR_MPI2_EVENT_NOTIFICATION_REQUEST,
+ Mpi2EventNotificationRequest_t,
+ *pMpi2EventNotificationRequest_t;
+
+/*EventNotification Reply message */
+typedef struct _MPI2_EVENT_NOTIFICATION_REPLY {
+ U16 EventDataLength; /*0x00 */
+ U8 MsgLength; /*0x02 */
+ U8 Function; /*0x03 */
+ U16 Reserved1; /*0x04 */
+ U8 AckRequired; /*0x06 */
+ U8 MsgFlags; /*0x07 */
+ U8 VP_ID; /*0x08 */
+ U8 VF_ID; /*0x09 */
+ U16 Reserved2; /*0x0A */
+ U16 Reserved3; /*0x0C */
+ U16 IOCStatus; /*0x0E */
+ U32 IOCLogInfo; /*0x10 */
+ U16 Event; /*0x14 */
+ U16 Reserved4; /*0x16 */
+ U32 EventContext; /*0x18 */
+ U32 EventData[1]; /*0x1C */
+} MPI2_EVENT_NOTIFICATION_REPLY, *PTR_MPI2_EVENT_NOTIFICATION_REPLY,
+ Mpi2EventNotificationReply_t,
+ *pMpi2EventNotificationReply_t;
+
+/*AckRequired */
+#define MPI2_EVENT_NOTIFICATION_ACK_NOT_REQUIRED (0x00)
+#define MPI2_EVENT_NOTIFICATION_ACK_REQUIRED (0x01)
+
+/*Event */
+#define MPI2_EVENT_LOG_DATA (0x0001)
+#define MPI2_EVENT_STATE_CHANGE (0x0002)
+#define MPI2_EVENT_HARD_RESET_RECEIVED (0x0005)
+#define MPI2_EVENT_EVENT_CHANGE (0x000A)
+#define MPI2_EVENT_TASK_SET_FULL (0x000E) /*obsolete */
+#define MPI2_EVENT_SAS_DEVICE_STATUS_CHANGE (0x000F)
+#define MPI2_EVENT_IR_OPERATION_STATUS (0x0014)
+#define MPI2_EVENT_SAS_DISCOVERY (0x0016)
+#define MPI2_EVENT_SAS_BROADCAST_PRIMITIVE (0x0017)
+#define MPI2_EVENT_SAS_INIT_DEVICE_STATUS_CHANGE (0x0018)
+#define MPI2_EVENT_SAS_INIT_TABLE_OVERFLOW (0x0019)
+#define MPI2_EVENT_SAS_TOPOLOGY_CHANGE_LIST (0x001C)
+#define MPI2_EVENT_SAS_ENCL_DEVICE_STATUS_CHANGE (0x001D)
+#define MPI2_EVENT_IR_VOLUME (0x001E)
+#define MPI2_EVENT_IR_PHYSICAL_DISK (0x001F)
+#define MPI2_EVENT_IR_CONFIGURATION_CHANGE_LIST (0x0020)
+#define MPI2_EVENT_LOG_ENTRY_ADDED (0x0021)
+#define MPI2_EVENT_SAS_PHY_COUNTER (0x0022)
+#define MPI2_EVENT_GPIO_INTERRUPT (0x0023)
+#define MPI2_EVENT_HOST_BASED_DISCOVERY_PHY (0x0024)
+#define MPI2_EVENT_SAS_QUIESCE (0x0025)
+#define MPI2_EVENT_SAS_NOTIFY_PRIMITIVE (0x0026)
+#define MPI2_EVENT_TEMP_THRESHOLD (0x0027)
+#define MPI2_EVENT_HOST_MESSAGE (0x0028)
+#define MPI2_EVENT_POWER_PERFORMANCE_CHANGE (0x0029)
+#define MPI2_EVENT_MIN_PRODUCT_SPECIFIC (0x006E)
+#define MPI2_EVENT_MAX_PRODUCT_SPECIFIC (0x007F)
+
+/*Log Entry Added Event data */
+
+/*the following structure matches MPI2_LOG_0_ENTRY in mpi2_cnfg.h */
+#define MPI2_EVENT_DATA_LOG_DATA_LENGTH (0x1C)
+
+typedef struct _MPI2_EVENT_DATA_LOG_ENTRY_ADDED {
+ U64 TimeStamp; /*0x00 */
+ U32 Reserved1; /*0x08 */
+ U16 LogSequence; /*0x0C */
+ U16 LogEntryQualifier; /*0x0E */
+ U8 VP_ID; /*0x10 */
+ U8 VF_ID; /*0x11 */
+ U16 Reserved2; /*0x12 */
+ U8 LogData[MPI2_EVENT_DATA_LOG_DATA_LENGTH]; /*0x14 */
+} MPI2_EVENT_DATA_LOG_ENTRY_ADDED,
+ *PTR_MPI2_EVENT_DATA_LOG_ENTRY_ADDED,
+ Mpi2EventDataLogEntryAdded_t,
+ *pMpi2EventDataLogEntryAdded_t;
+
+/*GPIO Interrupt Event data */
+
+typedef struct _MPI2_EVENT_DATA_GPIO_INTERRUPT {
+ U8 GPIONum; /*0x00 */
+ U8 Reserved1; /*0x01 */
+ U16 Reserved2; /*0x02 */
+} MPI2_EVENT_DATA_GPIO_INTERRUPT,
+ *PTR_MPI2_EVENT_DATA_GPIO_INTERRUPT,
+ Mpi2EventDataGpioInterrupt_t,
+ *pMpi2EventDataGpioInterrupt_t;
+
+/*Temperature Threshold Event data */
+
+typedef struct _MPI2_EVENT_DATA_TEMPERATURE {
+ U16 Status; /*0x00 */
+ U8 SensorNum; /*0x02 */
+ U8 Reserved1; /*0x03 */
+ U16 CurrentTemperature; /*0x04 */
+ U16 Reserved2; /*0x06 */
+ U32 Reserved3; /*0x08 */
+ U32 Reserved4; /*0x0C */
+} MPI2_EVENT_DATA_TEMPERATURE,
+ *PTR_MPI2_EVENT_DATA_TEMPERATURE,
+ Mpi2EventDataTemperature_t, *pMpi2EventDataTemperature_t;
+
+/*Temperature Threshold Event data Status bits */
+#define MPI2_EVENT_TEMPERATURE3_EXCEEDED (0x0008)
+#define MPI2_EVENT_TEMPERATURE2_EXCEEDED (0x0004)
+#define MPI2_EVENT_TEMPERATURE1_EXCEEDED (0x0002)
+#define MPI2_EVENT_TEMPERATURE0_EXCEEDED (0x0001)
+
+/*Host Message Event data */
+
+typedef struct _MPI2_EVENT_DATA_HOST_MESSAGE {
+ U8 SourceVF_ID; /*0x00 */
+ U8 Reserved1; /*0x01 */
+ U16 Reserved2; /*0x02 */
+ U32 Reserved3; /*0x04 */
+ U32 HostData[1]; /*0x08 */
+} MPI2_EVENT_DATA_HOST_MESSAGE, *PTR_MPI2_EVENT_DATA_HOST_MESSAGE,
+ Mpi2EventDataHostMessage_t, *pMpi2EventDataHostMessage_t;
+
+/*Power Performance Change Event */
+
+typedef struct _MPI2_EVENT_DATA_POWER_PERF_CHANGE {
+ U8 CurrentPowerMode; /*0x00 */
+ U8 PreviousPowerMode; /*0x01 */
+ U16 Reserved1; /*0x02 */
+} MPI2_EVENT_DATA_POWER_PERF_CHANGE,
+ *PTR_MPI2_EVENT_DATA_POWER_PERF_CHANGE,
+ Mpi2EventDataPowerPerfChange_t,
+ *pMpi2EventDataPowerPerfChange_t;
+
+/*defines for CurrentPowerMode and PreviousPowerMode fields */
+#define MPI2_EVENT_PM_INIT_MASK (0xC0)
+#define MPI2_EVENT_PM_INIT_UNAVAILABLE (0x00)
+#define MPI2_EVENT_PM_INIT_HOST (0x40)
+#define MPI2_EVENT_PM_INIT_IO_UNIT (0x80)
+#define MPI2_EVENT_PM_INIT_PCIE_DPA (0xC0)
+
+#define MPI2_EVENT_PM_MODE_MASK (0x07)
+#define MPI2_EVENT_PM_MODE_UNAVAILABLE (0x00)
+#define MPI2_EVENT_PM_MODE_UNKNOWN (0x01)
+#define MPI2_EVENT_PM_MODE_FULL_POWER (0x04)
+#define MPI2_EVENT_PM_MODE_REDUCED_POWER (0x05)
+#define MPI2_EVENT_PM_MODE_STANDBY (0x06)
+
+/*Hard Reset Received Event data */
+
+typedef struct _MPI2_EVENT_DATA_HARD_RESET_RECEIVED {
+ U8 Reserved1; /*0x00 */
+ U8 Port; /*0x01 */
+ U16 Reserved2; /*0x02 */
+} MPI2_EVENT_DATA_HARD_RESET_RECEIVED,
+ *PTR_MPI2_EVENT_DATA_HARD_RESET_RECEIVED,
+ Mpi2EventDataHardResetReceived_t,
+ *pMpi2EventDataHardResetReceived_t;
+
+/*Task Set Full Event data */
+/* this event is obsolete */
+
+typedef struct _MPI2_EVENT_DATA_TASK_SET_FULL {
+ U16 DevHandle; /*0x00 */
+ U16 CurrentDepth; /*0x02 */
+} MPI2_EVENT_DATA_TASK_SET_FULL, *PTR_MPI2_EVENT_DATA_TASK_SET_FULL,
+ Mpi2EventDataTaskSetFull_t, *pMpi2EventDataTaskSetFull_t;
+
+/*SAS Device Status Change Event data */
+
+typedef struct _MPI2_EVENT_DATA_SAS_DEVICE_STATUS_CHANGE {
+ U16 TaskTag; /*0x00 */
+ U8 ReasonCode; /*0x02 */
+ U8 PhysicalPort; /*0x03 */
+ U8 ASC; /*0x04 */
+ U8 ASCQ; /*0x05 */
+ U16 DevHandle; /*0x06 */
+ U32 Reserved2; /*0x08 */
+ U64 SASAddress; /*0x0C */
+ U8 LUN[8]; /*0x14 */
+} MPI2_EVENT_DATA_SAS_DEVICE_STATUS_CHANGE,
+ *PTR_MPI2_EVENT_DATA_SAS_DEVICE_STATUS_CHANGE,
+ Mpi2EventDataSasDeviceStatusChange_t,
+ *pMpi2EventDataSasDeviceStatusChange_t;
+
+/*SAS Device Status Change Event data ReasonCode values */
+#define MPI2_EVENT_SAS_DEV_STAT_RC_SMART_DATA (0x05)
+#define MPI2_EVENT_SAS_DEV_STAT_RC_UNSUPPORTED (0x07)
+#define MPI2_EVENT_SAS_DEV_STAT_RC_INTERNAL_DEVICE_RESET (0x08)
+#define MPI2_EVENT_SAS_DEV_STAT_RC_TASK_ABORT_INTERNAL (0x09)
+#define MPI2_EVENT_SAS_DEV_STAT_RC_ABORT_TASK_SET_INTERNAL (0x0A)
+#define MPI2_EVENT_SAS_DEV_STAT_RC_CLEAR_TASK_SET_INTERNAL (0x0B)
+#define MPI2_EVENT_SAS_DEV_STAT_RC_QUERY_TASK_INTERNAL (0x0C)
+#define MPI2_EVENT_SAS_DEV_STAT_RC_ASYNC_NOTIFICATION (0x0D)
+#define MPI2_EVENT_SAS_DEV_STAT_RC_CMP_INTERNAL_DEV_RESET (0x0E)
+#define MPI2_EVENT_SAS_DEV_STAT_RC_CMP_TASK_ABORT_INTERNAL (0x0F)
+#define MPI2_EVENT_SAS_DEV_STAT_RC_SATA_INIT_FAILURE (0x10)
+#define MPI2_EVENT_SAS_DEV_STAT_RC_EXPANDER_REDUCED_FUNCTIONALITY (0x11)
+#define MPI2_EVENT_SAS_DEV_STAT_RC_CMP_EXPANDER_REDUCED_FUNCTIONALITY (0x12)
+
+/*Integrated RAID Operation Status Event data */
+
+typedef struct _MPI2_EVENT_DATA_IR_OPERATION_STATUS {
+ U16 VolDevHandle; /*0x00 */
+ U16 Reserved1; /*0x02 */
+ U8 RAIDOperation; /*0x04 */
+ U8 PercentComplete; /*0x05 */
+ U16 Reserved2; /*0x06 */
+ U32 ElapsedSeconds; /*0x08 */
+} MPI2_EVENT_DATA_IR_OPERATION_STATUS,
+ *PTR_MPI2_EVENT_DATA_IR_OPERATION_STATUS,
+ Mpi2EventDataIrOperationStatus_t,
+ *pMpi2EventDataIrOperationStatus_t;
+
+/*Integrated RAID Operation Status Event data RAIDOperation values */
+#define MPI2_EVENT_IR_RAIDOP_RESYNC (0x00)
+#define MPI2_EVENT_IR_RAIDOP_ONLINE_CAP_EXPANSION (0x01)
+#define MPI2_EVENT_IR_RAIDOP_CONSISTENCY_CHECK (0x02)
+#define MPI2_EVENT_IR_RAIDOP_BACKGROUND_INIT (0x03)
+#define MPI2_EVENT_IR_RAIDOP_MAKE_DATA_CONSISTENT (0x04)
+
+/*Integrated RAID Volume Event data */
+
+typedef struct _MPI2_EVENT_DATA_IR_VOLUME {
+ U16 VolDevHandle; /*0x00 */
+ U8 ReasonCode; /*0x02 */
+ U8 Reserved1; /*0x03 */
+ U32 NewValue; /*0x04 */
+ U32 PreviousValue; /*0x08 */
+} MPI2_EVENT_DATA_IR_VOLUME, *PTR_MPI2_EVENT_DATA_IR_VOLUME,
+ Mpi2EventDataIrVolume_t, *pMpi2EventDataIrVolume_t;
+
+/*Integrated RAID Volume Event data ReasonCode values */
+#define MPI2_EVENT_IR_VOLUME_RC_SETTINGS_CHANGED (0x01)
+#define MPI2_EVENT_IR_VOLUME_RC_STATUS_FLAGS_CHANGED (0x02)
+#define MPI2_EVENT_IR_VOLUME_RC_STATE_CHANGED (0x03)
+
+/*Integrated RAID Physical Disk Event data */
+
+typedef struct _MPI2_EVENT_DATA_IR_PHYSICAL_DISK {
+ U16 Reserved1; /*0x00 */
+ U8 ReasonCode; /*0x02 */
+ U8 PhysDiskNum; /*0x03 */
+ U16 PhysDiskDevHandle; /*0x04 */
+ U16 Reserved2; /*0x06 */
+ U16 Slot; /*0x08 */
+ U16 EnclosureHandle; /*0x0A */
+ U32 NewValue; /*0x0C */
+ U32 PreviousValue; /*0x10 */
+} MPI2_EVENT_DATA_IR_PHYSICAL_DISK,
+ *PTR_MPI2_EVENT_DATA_IR_PHYSICAL_DISK,
+ Mpi2EventDataIrPhysicalDisk_t,
+ *pMpi2EventDataIrPhysicalDisk_t;
+
+/*Integrated RAID Physical Disk Event data ReasonCode values */
+#define MPI2_EVENT_IR_PHYSDISK_RC_SETTINGS_CHANGED (0x01)
+#define MPI2_EVENT_IR_PHYSDISK_RC_STATUS_FLAGS_CHANGED (0x02)
+#define MPI2_EVENT_IR_PHYSDISK_RC_STATE_CHANGED (0x03)
+
+/*Integrated RAID Configuration Change List Event data */
+
+/*
+ *Host code (drivers, BIOS, utilities, etc.) should leave this define set to
+ *one and check NumElements at runtime.
+ */
+#ifndef MPI2_EVENT_IR_CONFIG_ELEMENT_COUNT
+#define MPI2_EVENT_IR_CONFIG_ELEMENT_COUNT (1)
+#endif
+
+typedef struct _MPI2_EVENT_IR_CONFIG_ELEMENT {
+ U16 ElementFlags; /*0x00 */
+ U16 VolDevHandle; /*0x02 */
+ U8 ReasonCode; /*0x04 */
+ U8 PhysDiskNum; /*0x05 */
+ U16 PhysDiskDevHandle; /*0x06 */
+} MPI2_EVENT_IR_CONFIG_ELEMENT, *PTR_MPI2_EVENT_IR_CONFIG_ELEMENT,
+ Mpi2EventIrConfigElement_t, *pMpi2EventIrConfigElement_t;
+
+/*IR Configuration Change List Event data ElementFlags values */
+#define MPI2_EVENT_IR_CHANGE_EFLAGS_ELEMENT_TYPE_MASK (0x000F)
+#define MPI2_EVENT_IR_CHANGE_EFLAGS_VOLUME_ELEMENT (0x0000)
+#define MPI2_EVENT_IR_CHANGE_EFLAGS_VOLPHYSDISK_ELEMENT (0x0001)
+#define MPI2_EVENT_IR_CHANGE_EFLAGS_HOTSPARE_ELEMENT (0x0002)
+
+/*IR Configuration Change List Event data ReasonCode values */
+#define MPI2_EVENT_IR_CHANGE_RC_ADDED (0x01)
+#define MPI2_EVENT_IR_CHANGE_RC_REMOVED (0x02)
+#define MPI2_EVENT_IR_CHANGE_RC_NO_CHANGE (0x03)
+#define MPI2_EVENT_IR_CHANGE_RC_HIDE (0x04)
+#define MPI2_EVENT_IR_CHANGE_RC_UNHIDE (0x05)
+#define MPI2_EVENT_IR_CHANGE_RC_VOLUME_CREATED (0x06)
+#define MPI2_EVENT_IR_CHANGE_RC_VOLUME_DELETED (0x07)
+#define MPI2_EVENT_IR_CHANGE_RC_PD_CREATED (0x08)
+#define MPI2_EVENT_IR_CHANGE_RC_PD_DELETED (0x09)
+
+typedef struct _MPI2_EVENT_DATA_IR_CONFIG_CHANGE_LIST {
+ U8 NumElements; /*0x00 */
+ U8 Reserved1; /*0x01 */
+ U8 Reserved2; /*0x02 */
+ U8 ConfigNum; /*0x03 */
+ U32 Flags; /*0x04 */
+ MPI2_EVENT_IR_CONFIG_ELEMENT
+ ConfigElement[MPI2_EVENT_IR_CONFIG_ELEMENT_COUNT];/*0x08 */
+} MPI2_EVENT_DATA_IR_CONFIG_CHANGE_LIST,
+ *PTR_MPI2_EVENT_DATA_IR_CONFIG_CHANGE_LIST,
+ Mpi2EventDataIrConfigChangeList_t,
+ *pMpi2EventDataIrConfigChangeList_t;
+
+/*IR Configuration Change List Event data Flags values */
+#define MPI2_EVENT_IR_CHANGE_FLAGS_FOREIGN_CONFIG (0x00000001)
+
+/*SAS Discovery Event data */
+
+typedef struct _MPI2_EVENT_DATA_SAS_DISCOVERY {
+ U8 Flags; /*0x00 */
+ U8 ReasonCode; /*0x01 */
+ U8 PhysicalPort; /*0x02 */
+ U8 Reserved1; /*0x03 */
+ U32 DiscoveryStatus; /*0x04 */
+} MPI2_EVENT_DATA_SAS_DISCOVERY,
+ *PTR_MPI2_EVENT_DATA_SAS_DISCOVERY,
+ Mpi2EventDataSasDiscovery_t, *pMpi2EventDataSasDiscovery_t;
+
+/*SAS Discovery Event data Flags values */
+#define MPI2_EVENT_SAS_DISC_DEVICE_CHANGE (0x02)
+#define MPI2_EVENT_SAS_DISC_IN_PROGRESS (0x01)
+
+/*SAS Discovery Event data ReasonCode values */
+#define MPI2_EVENT_SAS_DISC_RC_STARTED (0x01)
+#define MPI2_EVENT_SAS_DISC_RC_COMPLETED (0x02)
+
+/*SAS Discovery Event data DiscoveryStatus values */
+#define MPI2_EVENT_SAS_DISC_DS_MAX_ENCLOSURES_EXCEED (0x80000000)
+#define MPI2_EVENT_SAS_DISC_DS_MAX_EXPANDERS_EXCEED (0x40000000)
+#define MPI2_EVENT_SAS_DISC_DS_MAX_DEVICES_EXCEED (0x20000000)
+#define MPI2_EVENT_SAS_DISC_DS_MAX_TOPO_PHYS_EXCEED (0x10000000)
+#define MPI2_EVENT_SAS_DISC_DS_DOWNSTREAM_INITIATOR (0x08000000)
+#define MPI2_EVENT_SAS_DISC_DS_MULTI_SUBTRACTIVE_SUBTRACTIVE (0x00008000)
+#define MPI2_EVENT_SAS_DISC_DS_EXP_MULTI_SUBTRACTIVE (0x00004000)
+#define MPI2_EVENT_SAS_DISC_DS_MULTI_PORT_DOMAIN (0x00002000)
+#define MPI2_EVENT_SAS_DISC_DS_TABLE_TO_SUBTRACTIVE_LINK (0x00001000)
+#define MPI2_EVENT_SAS_DISC_DS_UNSUPPORTED_DEVICE (0x00000800)
+#define MPI2_EVENT_SAS_DISC_DS_TABLE_LINK (0x00000400)
+#define MPI2_EVENT_SAS_DISC_DS_SUBTRACTIVE_LINK (0x00000200)
+#define MPI2_EVENT_SAS_DISC_DS_SMP_CRC_ERROR (0x00000100)
+#define MPI2_EVENT_SAS_DISC_DS_SMP_FUNCTION_FAILED (0x00000080)
+#define MPI2_EVENT_SAS_DISC_DS_INDEX_NOT_EXIST (0x00000040)
+#define MPI2_EVENT_SAS_DISC_DS_OUT_ROUTE_ENTRIES (0x00000020)
+#define MPI2_EVENT_SAS_DISC_DS_SMP_TIMEOUT (0x00000010)
+#define MPI2_EVENT_SAS_DISC_DS_MULTIPLE_PORTS (0x00000004)
+#define MPI2_EVENT_SAS_DISC_DS_UNADDRESSABLE_DEVICE (0x00000002)
+#define MPI2_EVENT_SAS_DISC_DS_LOOP_DETECTED (0x00000001)
+
+/*SAS Broadcast Primitive Event data */
+
+typedef struct _MPI2_EVENT_DATA_SAS_BROADCAST_PRIMITIVE {
+ U8 PhyNum; /*0x00 */
+ U8 Port; /*0x01 */
+ U8 PortWidth; /*0x02 */
+ U8 Primitive; /*0x03 */
+} MPI2_EVENT_DATA_SAS_BROADCAST_PRIMITIVE,
+ *PTR_MPI2_EVENT_DATA_SAS_BROADCAST_PRIMITIVE,
+ Mpi2EventDataSasBroadcastPrimitive_t,
+ *pMpi2EventDataSasBroadcastPrimitive_t;
+
+/*defines for the Primitive field */
+#define MPI2_EVENT_PRIMITIVE_CHANGE (0x01)
+#define MPI2_EVENT_PRIMITIVE_SES (0x02)
+#define MPI2_EVENT_PRIMITIVE_EXPANDER (0x03)
+#define MPI2_EVENT_PRIMITIVE_ASYNCHRONOUS_EVENT (0x04)
+#define MPI2_EVENT_PRIMITIVE_RESERVED3 (0x05)
+#define MPI2_EVENT_PRIMITIVE_RESERVED4 (0x06)
+#define MPI2_EVENT_PRIMITIVE_CHANGE0_RESERVED (0x07)
+#define MPI2_EVENT_PRIMITIVE_CHANGE1_RESERVED (0x08)
+
+/*SAS Notify Primitive Event data */
+
+typedef struct _MPI2_EVENT_DATA_SAS_NOTIFY_PRIMITIVE {
+ U8 PhyNum; /*0x00 */
+ U8 Port; /*0x01 */
+ U8 Reserved1; /*0x02 */
+ U8 Primitive; /*0x03 */
+} MPI2_EVENT_DATA_SAS_NOTIFY_PRIMITIVE,
+ *PTR_MPI2_EVENT_DATA_SAS_NOTIFY_PRIMITIVE,
+ Mpi2EventDataSasNotifyPrimitive_t,
+ *pMpi2EventDataSasNotifyPrimitive_t;
+
+/*defines for the Primitive field */
+#define MPI2_EVENT_NOTIFY_ENABLE_SPINUP (0x01)
+#define MPI2_EVENT_NOTIFY_POWER_LOSS_EXPECTED (0x02)
+#define MPI2_EVENT_NOTIFY_RESERVED1 (0x03)
+#define MPI2_EVENT_NOTIFY_RESERVED2 (0x04)
+
+/*SAS Initiator Device Status Change Event data */
+
+typedef struct _MPI2_EVENT_DATA_SAS_INIT_DEV_STATUS_CHANGE {
+ U8 ReasonCode; /*0x00 */
+ U8 PhysicalPort; /*0x01 */
+ U16 DevHandle; /*0x02 */
+ U64 SASAddress; /*0x04 */
+} MPI2_EVENT_DATA_SAS_INIT_DEV_STATUS_CHANGE,
+ *PTR_MPI2_EVENT_DATA_SAS_INIT_DEV_STATUS_CHANGE,
+ Mpi2EventDataSasInitDevStatusChange_t,
+ *pMpi2EventDataSasInitDevStatusChange_t;
+
+/*SAS Initiator Device Status Change event ReasonCode values */
+#define MPI2_EVENT_SAS_INIT_RC_ADDED (0x01)
+#define MPI2_EVENT_SAS_INIT_RC_NOT_RESPONDING (0x02)
+
+/*SAS Initiator Device Table Overflow Event data */
+
+typedef struct _MPI2_EVENT_DATA_SAS_INIT_TABLE_OVERFLOW {
+ U16 MaxInit; /*0x00 */
+ U16 CurrentInit; /*0x02 */
+ U64 SASAddress; /*0x04 */
+} MPI2_EVENT_DATA_SAS_INIT_TABLE_OVERFLOW,
+ *PTR_MPI2_EVENT_DATA_SAS_INIT_TABLE_OVERFLOW,
+ Mpi2EventDataSasInitTableOverflow_t,
+ *pMpi2EventDataSasInitTableOverflow_t;
+
+/*SAS Topology Change List Event data */
+
+/*
+ *Host code (drivers, BIOS, utilities, etc.) should leave this define set to
+ *one and check NumEntries at runtime.
+ */
+#ifndef MPI2_EVENT_SAS_TOPO_PHY_COUNT
+#define MPI2_EVENT_SAS_TOPO_PHY_COUNT (1)
+#endif
+
+typedef struct _MPI2_EVENT_SAS_TOPO_PHY_ENTRY {
+ U16 AttachedDevHandle; /*0x00 */
+ U8 LinkRate; /*0x02 */
+ U8 PhyStatus; /*0x03 */
+} MPI2_EVENT_SAS_TOPO_PHY_ENTRY, *PTR_MPI2_EVENT_SAS_TOPO_PHY_ENTRY,
+ Mpi2EventSasTopoPhyEntry_t, *pMpi2EventSasTopoPhyEntry_t;
+
+typedef struct _MPI2_EVENT_DATA_SAS_TOPOLOGY_CHANGE_LIST {
+ U16 EnclosureHandle; /*0x00 */
+ U16 ExpanderDevHandle; /*0x02 */
+ U8 NumPhys; /*0x04 */
+ U8 Reserved1; /*0x05 */
+ U16 Reserved2; /*0x06 */
+ U8 NumEntries; /*0x08 */
+ U8 StartPhyNum; /*0x09 */
+ U8 ExpStatus; /*0x0A */
+ U8 PhysicalPort; /*0x0B */
+ MPI2_EVENT_SAS_TOPO_PHY_ENTRY
+ PHY[MPI2_EVENT_SAS_TOPO_PHY_COUNT]; /*0x0C */
+} MPI2_EVENT_DATA_SAS_TOPOLOGY_CHANGE_LIST,
+ *PTR_MPI2_EVENT_DATA_SAS_TOPOLOGY_CHANGE_LIST,
+ Mpi2EventDataSasTopologyChangeList_t,
+ *pMpi2EventDataSasTopologyChangeList_t;
+
+/*values for the ExpStatus field */
+#define MPI2_EVENT_SAS_TOPO_ES_NO_EXPANDER (0x00)
+#define MPI2_EVENT_SAS_TOPO_ES_ADDED (0x01)
+#define MPI2_EVENT_SAS_TOPO_ES_NOT_RESPONDING (0x02)
+#define MPI2_EVENT_SAS_TOPO_ES_RESPONDING (0x03)
+#define MPI2_EVENT_SAS_TOPO_ES_DELAY_NOT_RESPONDING (0x04)
+
+/*defines for the LinkRate field */
+#define MPI2_EVENT_SAS_TOPO_LR_CURRENT_MASK (0xF0)
+#define MPI2_EVENT_SAS_TOPO_LR_CURRENT_SHIFT (4)
+#define MPI2_EVENT_SAS_TOPO_LR_PREV_MASK (0x0F)
+#define MPI2_EVENT_SAS_TOPO_LR_PREV_SHIFT (0)
+
+#define MPI2_EVENT_SAS_TOPO_LR_UNKNOWN_LINK_RATE (0x00)
+#define MPI2_EVENT_SAS_TOPO_LR_PHY_DISABLED (0x01)
+#define MPI2_EVENT_SAS_TOPO_LR_NEGOTIATION_FAILED (0x02)
+#define MPI2_EVENT_SAS_TOPO_LR_SATA_OOB_COMPLETE (0x03)
+#define MPI2_EVENT_SAS_TOPO_LR_PORT_SELECTOR (0x04)
+#define MPI2_EVENT_SAS_TOPO_LR_SMP_RESET_IN_PROGRESS (0x05)
+#define MPI2_EVENT_SAS_TOPO_LR_UNSUPPORTED_PHY (0x06)
+#define MPI2_EVENT_SAS_TOPO_LR_RATE_1_5 (0x08)
+#define MPI2_EVENT_SAS_TOPO_LR_RATE_3_0 (0x09)
+#define MPI2_EVENT_SAS_TOPO_LR_RATE_6_0 (0x0A)
+#define MPI25_EVENT_SAS_TOPO_LR_RATE_12_0 (0x0B)
+
+/*values for the PhyStatus field */
+#define MPI2_EVENT_SAS_TOPO_PHYSTATUS_VACANT (0x80)
+#define MPI2_EVENT_SAS_TOPO_PS_MULTIPLEX_CHANGE (0x10)
+/*values for the PhyStatus ReasonCode sub-field */
+#define MPI2_EVENT_SAS_TOPO_RC_MASK (0x0F)
+#define MPI2_EVENT_SAS_TOPO_RC_TARG_ADDED (0x01)
+#define MPI2_EVENT_SAS_TOPO_RC_TARG_NOT_RESPONDING (0x02)
+#define MPI2_EVENT_SAS_TOPO_RC_PHY_CHANGED (0x03)
+#define MPI2_EVENT_SAS_TOPO_RC_NO_CHANGE (0x04)
+#define MPI2_EVENT_SAS_TOPO_RC_DELAY_NOT_RESPONDING (0x05)
+
+/*SAS Enclosure Device Status Change Event data */
+
+typedef struct _MPI2_EVENT_DATA_SAS_ENCL_DEV_STATUS_CHANGE {
+ U16 EnclosureHandle; /*0x00 */
+ U8 ReasonCode; /*0x02 */
+ U8 PhysicalPort; /*0x03 */
+ U64 EnclosureLogicalID; /*0x04 */
+ U16 NumSlots; /*0x0C */
+ U16 StartSlot; /*0x0E */
+ U32 PhyBits; /*0x10 */
+} MPI2_EVENT_DATA_SAS_ENCL_DEV_STATUS_CHANGE,
+ *PTR_MPI2_EVENT_DATA_SAS_ENCL_DEV_STATUS_CHANGE,
+ Mpi2EventDataSasEnclDevStatusChange_t,
+ *pMpi2EventDataSasEnclDevStatusChange_t;
+
+/*SAS Enclosure Device Status Change event ReasonCode values */
+#define MPI2_EVENT_SAS_ENCL_RC_ADDED (0x01)
+#define MPI2_EVENT_SAS_ENCL_RC_NOT_RESPONDING (0x02)
+
+/*SAS PHY Counter Event data */
+
+typedef struct _MPI2_EVENT_DATA_SAS_PHY_COUNTER {
+ U64 TimeStamp; /*0x00 */
+ U32 Reserved1; /*0x08 */
+ U8 PhyEventCode; /*0x0C */
+ U8 PhyNum; /*0x0D */
+ U16 Reserved2; /*0x0E */
+ U32 PhyEventInfo; /*0x10 */
+ U8 CounterType; /*0x14 */
+ U8 ThresholdWindow; /*0x15 */
+ U8 TimeUnits; /*0x16 */
+ U8 Reserved3; /*0x17 */
+ U32 EventThreshold; /*0x18 */
+ U16 ThresholdFlags; /*0x1C */
+ U16 Reserved4; /*0x1E */
+} MPI2_EVENT_DATA_SAS_PHY_COUNTER,
+ *PTR_MPI2_EVENT_DATA_SAS_PHY_COUNTER,
+ Mpi2EventDataSasPhyCounter_t,
+ *pMpi2EventDataSasPhyCounter_t;
+
+/*use MPI2_SASPHY3_EVENT_CODE_ values from mpi2_cnfg.h
+ *for the PhyEventCode field */
+
+/*use MPI2_SASPHY3_COUNTER_TYPE_ values from mpi2_cnfg.h
+ *for the CounterType field */
+
+/*use MPI2_SASPHY3_TIME_UNITS_ values from mpi2_cnfg.h
+ *for the TimeUnits field */
+
+/*use MPI2_SASPHY3_TFLAGS_ values from mpi2_cnfg.h
+ *for the ThresholdFlags field */
+
+/*SAS Quiesce Event data */
+
+typedef struct _MPI2_EVENT_DATA_SAS_QUIESCE {
+ U8 ReasonCode; /*0x00 */
+ U8 Reserved1; /*0x01 */
+ U16 Reserved2; /*0x02 */
+ U32 Reserved3; /*0x04 */
+} MPI2_EVENT_DATA_SAS_QUIESCE,
+ *PTR_MPI2_EVENT_DATA_SAS_QUIESCE,
+ Mpi2EventDataSasQuiesce_t, *pMpi2EventDataSasQuiesce_t;
+
+/*SAS Quiesce Event data ReasonCode values */
+#define MPI2_EVENT_SAS_QUIESCE_RC_STARTED (0x01)
+#define MPI2_EVENT_SAS_QUIESCE_RC_COMPLETED (0x02)
+
+/*Host Based Discovery Phy Event data */
+
+typedef struct _MPI2_EVENT_HBD_PHY_SAS {
+ U8 Flags; /*0x00 */
+ U8 NegotiatedLinkRate; /*0x01 */
+ U8 PhyNum; /*0x02 */
+ U8 PhysicalPort; /*0x03 */
+ U32 Reserved1; /*0x04 */
+ U8 InitialFrame[28]; /*0x08 */
+} MPI2_EVENT_HBD_PHY_SAS, *PTR_MPI2_EVENT_HBD_PHY_SAS,
+ Mpi2EventHbdPhySas_t, *pMpi2EventHbdPhySas_t;
+
+/*values for the Flags field */
+#define MPI2_EVENT_HBD_SAS_FLAGS_FRAME_VALID (0x02)
+#define MPI2_EVENT_HBD_SAS_FLAGS_SATA_FRAME (0x01)
+
+/*use MPI2_SAS_NEG_LINK_RATE_ defines from mpi2_cnfg.h
+ *for the NegotiatedLinkRate field */
+
+typedef union _MPI2_EVENT_HBD_DESCRIPTOR {
+ MPI2_EVENT_HBD_PHY_SAS Sas;
+} MPI2_EVENT_HBD_DESCRIPTOR, *PTR_MPI2_EVENT_HBD_DESCRIPTOR,
+ Mpi2EventHbdDescriptor_t, *pMpi2EventHbdDescriptor_t;
+
+typedef struct _MPI2_EVENT_DATA_HBD_PHY {
+ U8 DescriptorType; /*0x00 */
+ U8 Reserved1; /*0x01 */
+ U16 Reserved2; /*0x02 */
+ U32 Reserved3; /*0x04 */
+ MPI2_EVENT_HBD_DESCRIPTOR Descriptor; /*0x08 */
+} MPI2_EVENT_DATA_HBD_PHY, *PTR_MPI2_EVENT_DATA_HBD_PHY,
+ Mpi2EventDataHbdPhy_t,
+ *pMpi2EventDataMpi2EventDataHbdPhy_t;
+
+/*values for the DescriptorType field */
+#define MPI2_EVENT_HBD_DT_SAS (0x01)
+
+/****************************************************************************
+* EventAck message
+****************************************************************************/
+
+/*EventAck Request message */
+typedef struct _MPI2_EVENT_ACK_REQUEST {
+ U16 Reserved1; /*0x00 */
+ U8 ChainOffset; /*0x02 */
+ U8 Function; /*0x03 */
+ U16 Reserved2; /*0x04 */
+ U8 Reserved3; /*0x06 */
+ U8 MsgFlags; /*0x07 */
+ U8 VP_ID; /*0x08 */
+ U8 VF_ID; /*0x09 */
+ U16 Reserved4; /*0x0A */
+ U16 Event; /*0x0C */
+ U16 Reserved5; /*0x0E */
+ U32 EventContext; /*0x10 */
+} MPI2_EVENT_ACK_REQUEST, *PTR_MPI2_EVENT_ACK_REQUEST,
+ Mpi2EventAckRequest_t, *pMpi2EventAckRequest_t;
+
+/*EventAck Reply message */
+typedef struct _MPI2_EVENT_ACK_REPLY {
+ U16 Reserved1; /*0x00 */
+ U8 MsgLength; /*0x02 */
+ U8 Function; /*0x03 */
+ U16 Reserved2; /*0x04 */
+ U8 Reserved3; /*0x06 */
+ U8 MsgFlags; /*0x07 */
+ U8 VP_ID; /*0x08 */
+ U8 VF_ID; /*0x09 */
+ U16 Reserved4; /*0x0A */
+ U16 Reserved5; /*0x0C */
+ U16 IOCStatus; /*0x0E */
+ U32 IOCLogInfo; /*0x10 */
+} MPI2_EVENT_ACK_REPLY, *PTR_MPI2_EVENT_ACK_REPLY,
+ Mpi2EventAckReply_t, *pMpi2EventAckReply_t;
+
+/****************************************************************************
+* SendHostMessage message
+****************************************************************************/
+
+/*SendHostMessage Request message */
+typedef struct _MPI2_SEND_HOST_MESSAGE_REQUEST {
+ U16 HostDataLength; /*0x00 */
+ U8 ChainOffset; /*0x02 */
+ U8 Function; /*0x03 */
+ U16 Reserved1; /*0x04 */
+ U8 Reserved2; /*0x06 */
+ U8 MsgFlags; /*0x07 */
+ U8 VP_ID; /*0x08 */
+ U8 VF_ID; /*0x09 */
+ U16 Reserved3; /*0x0A */
+ U8 Reserved4; /*0x0C */
+ U8 DestVF_ID; /*0x0D */
+ U16 Reserved5; /*0x0E */
+ U32 Reserved6; /*0x10 */
+ U32 Reserved7; /*0x14 */
+ U32 Reserved8; /*0x18 */
+ U32 Reserved9; /*0x1C */
+ U32 Reserved10; /*0x20 */
+ U32 HostData[1]; /*0x24 */
+} MPI2_SEND_HOST_MESSAGE_REQUEST,
+ *PTR_MPI2_SEND_HOST_MESSAGE_REQUEST,
+ Mpi2SendHostMessageRequest_t,
+ *pMpi2SendHostMessageRequest_t;
+
+/*SendHostMessage Reply message */
+typedef struct _MPI2_SEND_HOST_MESSAGE_REPLY {
+ U16 HostDataLength; /*0x00 */
+ U8 MsgLength; /*0x02 */
+ U8 Function; /*0x03 */
+ U16 Reserved1; /*0x04 */
+ U8 Reserved2; /*0x06 */
+ U8 MsgFlags; /*0x07 */
+ U8 VP_ID; /*0x08 */
+ U8 VF_ID; /*0x09 */
+ U16 Reserved3; /*0x0A */
+ U16 Reserved4; /*0x0C */
+ U16 IOCStatus; /*0x0E */
+ U32 IOCLogInfo; /*0x10 */
+} MPI2_SEND_HOST_MESSAGE_REPLY, *PTR_MPI2_SEND_HOST_MESSAGE_REPLY,
+ Mpi2SendHostMessageReply_t, *pMpi2SendHostMessageReply_t;
+
+/****************************************************************************
+* FWDownload message
+****************************************************************************/
+
+/*MPI v2.0 FWDownload Request message */
+typedef struct _MPI2_FW_DOWNLOAD_REQUEST {
+ U8 ImageType; /*0x00 */
+ U8 Reserved1; /*0x01 */
+ U8 ChainOffset; /*0x02 */
+ U8 Function; /*0x03 */
+ U16 Reserved2; /*0x04 */
+ U8 Reserved3; /*0x06 */
+ U8 MsgFlags; /*0x07 */
+ U8 VP_ID; /*0x08 */
+ U8 VF_ID; /*0x09 */
+ U16 Reserved4; /*0x0A */
+ U32 TotalImageSize; /*0x0C */
+ U32 Reserved5; /*0x10 */
+ MPI2_MPI_SGE_UNION SGL; /*0x14 */
+} MPI2_FW_DOWNLOAD_REQUEST, *PTR_MPI2_FW_DOWNLOAD_REQUEST,
+ Mpi2FWDownloadRequest, *pMpi2FWDownloadRequest;
+
+#define MPI2_FW_DOWNLOAD_MSGFLGS_LAST_SEGMENT (0x01)
+
+#define MPI2_FW_DOWNLOAD_ITYPE_FW (0x01)
+#define MPI2_FW_DOWNLOAD_ITYPE_BIOS (0x02)
+#define MPI2_FW_DOWNLOAD_ITYPE_MANUFACTURING (0x06)
+#define MPI2_FW_DOWNLOAD_ITYPE_CONFIG_1 (0x07)
+#define MPI2_FW_DOWNLOAD_ITYPE_CONFIG_2 (0x08)
+#define MPI2_FW_DOWNLOAD_ITYPE_MEGARAID (0x09)
+#define MPI2_FW_DOWNLOAD_ITYPE_COMPLETE (0x0A)
+#define MPI2_FW_DOWNLOAD_ITYPE_COMMON_BOOT_BLOCK (0x0B)
+#define MPI2_FW_DOWNLOAD_ITYPE_PUBLIC_KEY (0x0C)
+#define MPI2_FW_DOWNLOAD_ITYPE_MIN_PRODUCT_SPECIFIC (0xF0)
+
+/*MPI v2.0 FWDownload TransactionContext Element */
+typedef struct _MPI2_FW_DOWNLOAD_TCSGE {
+ U8 Reserved1; /*0x00 */
+ U8 ContextSize; /*0x01 */
+ U8 DetailsLength; /*0x02 */
+ U8 Flags; /*0x03 */
+ U32 Reserved2; /*0x04 */
+ U32 ImageOffset; /*0x08 */
+ U32 ImageSize; /*0x0C */
+} MPI2_FW_DOWNLOAD_TCSGE, *PTR_MPI2_FW_DOWNLOAD_TCSGE,
+ Mpi2FWDownloadTCSGE_t, *pMpi2FWDownloadTCSGE_t;
+
+/*MPI v2.5 FWDownload Request message */
+typedef struct _MPI25_FW_DOWNLOAD_REQUEST {
+ U8 ImageType; /*0x00 */
+ U8 Reserved1; /*0x01 */
+ U8 ChainOffset; /*0x02 */
+ U8 Function; /*0x03 */
+ U16 Reserved2; /*0x04 */
+ U8 Reserved3; /*0x06 */
+ U8 MsgFlags; /*0x07 */
+ U8 VP_ID; /*0x08 */
+ U8 VF_ID; /*0x09 */
+ U16 Reserved4; /*0x0A */
+ U32 TotalImageSize; /*0x0C */
+ U32 Reserved5; /*0x10 */
+ U32 Reserved6; /*0x14 */
+ U32 ImageOffset; /*0x18 */
+ U32 ImageSize; /*0x1C */
+ MPI25_SGE_IO_UNION SGL; /*0x20 */
+} MPI25_FW_DOWNLOAD_REQUEST, *PTR_MPI25_FW_DOWNLOAD_REQUEST,
+ Mpi25FWDownloadRequest, *pMpi25FWDownloadRequest;
+
+/*FWDownload Reply message */
+typedef struct _MPI2_FW_DOWNLOAD_REPLY {
+ U8 ImageType; /*0x00 */
+ U8 Reserved1; /*0x01 */
+ U8 MsgLength; /*0x02 */
+ U8 Function; /*0x03 */
+ U16 Reserved2; /*0x04 */
+ U8 Reserved3; /*0x06 */
+ U8 MsgFlags; /*0x07 */
+ U8 VP_ID; /*0x08 */
+ U8 VF_ID; /*0x09 */
+ U16 Reserved4; /*0x0A */
+ U16 Reserved5; /*0x0C */
+ U16 IOCStatus; /*0x0E */
+ U32 IOCLogInfo; /*0x10 */
+} MPI2_FW_DOWNLOAD_REPLY, *PTR_MPI2_FW_DOWNLOAD_REPLY,
+ Mpi2FWDownloadReply_t, *pMpi2FWDownloadReply_t;
+
+/****************************************************************************
+* FWUpload message
+****************************************************************************/
+
+/*MPI v2.0 FWUpload Request message */
+typedef struct _MPI2_FW_UPLOAD_REQUEST {
+ U8 ImageType; /*0x00 */
+ U8 Reserved1; /*0x01 */
+ U8 ChainOffset; /*0x02 */
+ U8 Function; /*0x03 */
+ U16 Reserved2; /*0x04 */
+ U8 Reserved3; /*0x06 */
+ U8 MsgFlags; /*0x07 */
+ U8 VP_ID; /*0x08 */
+ U8 VF_ID; /*0x09 */
+ U16 Reserved4; /*0x0A */
+ U32 Reserved5; /*0x0C */
+ U32 Reserved6; /*0x10 */
+ MPI2_MPI_SGE_UNION SGL; /*0x14 */
+} MPI2_FW_UPLOAD_REQUEST, *PTR_MPI2_FW_UPLOAD_REQUEST,
+ Mpi2FWUploadRequest_t, *pMpi2FWUploadRequest_t;
+
+#define MPI2_FW_UPLOAD_ITYPE_FW_CURRENT (0x00)
+#define MPI2_FW_UPLOAD_ITYPE_FW_FLASH (0x01)
+#define MPI2_FW_UPLOAD_ITYPE_BIOS_FLASH (0x02)
+#define MPI2_FW_UPLOAD_ITYPE_FW_BACKUP (0x05)
+#define MPI2_FW_UPLOAD_ITYPE_MANUFACTURING (0x06)
+#define MPI2_FW_UPLOAD_ITYPE_CONFIG_1 (0x07)
+#define MPI2_FW_UPLOAD_ITYPE_CONFIG_2 (0x08)
+#define MPI2_FW_UPLOAD_ITYPE_MEGARAID (0x09)
+#define MPI2_FW_UPLOAD_ITYPE_COMPLETE (0x0A)
+#define MPI2_FW_UPLOAD_ITYPE_COMMON_BOOT_BLOCK (0x0B)
+
+/*MPI v2.0 FWUpload TransactionContext Element */
+typedef struct _MPI2_FW_UPLOAD_TCSGE {
+ U8 Reserved1; /*0x00 */
+ U8 ContextSize; /*0x01 */
+ U8 DetailsLength; /*0x02 */
+ U8 Flags; /*0x03 */
+ U32 Reserved2; /*0x04 */
+ U32 ImageOffset; /*0x08 */
+ U32 ImageSize; /*0x0C */
+} MPI2_FW_UPLOAD_TCSGE, *PTR_MPI2_FW_UPLOAD_TCSGE,
+ Mpi2FWUploadTCSGE_t, *pMpi2FWUploadTCSGE_t;
+
+/*MPI v2.5 FWUpload Request message */
+typedef struct _MPI25_FW_UPLOAD_REQUEST {
+ U8 ImageType; /*0x00 */
+ U8 Reserved1; /*0x01 */
+ U8 ChainOffset; /*0x02 */
+ U8 Function; /*0x03 */
+ U16 Reserved2; /*0x04 */
+ U8 Reserved3; /*0x06 */
+ U8 MsgFlags; /*0x07 */
+ U8 VP_ID; /*0x08 */
+ U8 VF_ID; /*0x09 */
+ U16 Reserved4; /*0x0A */
+ U32 Reserved5; /*0x0C */
+ U32 Reserved6; /*0x10 */
+ U32 Reserved7; /*0x14 */
+ U32 ImageOffset; /*0x18 */
+ U32 ImageSize; /*0x1C */
+ MPI25_SGE_IO_UNION SGL; /*0x20 */
+} MPI25_FW_UPLOAD_REQUEST, *PTR_MPI25_FW_UPLOAD_REQUEST,
+ Mpi25FWUploadRequest_t, *pMpi25FWUploadRequest_t;
+
+/*FWUpload Reply message */
+typedef struct _MPI2_FW_UPLOAD_REPLY {
+ U8 ImageType; /*0x00 */
+ U8 Reserved1; /*0x01 */
+ U8 MsgLength; /*0x02 */
+ U8 Function; /*0x03 */
+ U16 Reserved2; /*0x04 */
+ U8 Reserved3; /*0x06 */
+ U8 MsgFlags; /*0x07 */
+ U8 VP_ID; /*0x08 */
+ U8 VF_ID; /*0x09 */
+ U16 Reserved4; /*0x0A */
+ U16 Reserved5; /*0x0C */
+ U16 IOCStatus; /*0x0E */
+ U32 IOCLogInfo; /*0x10 */
+ U32 ActualImageSize; /*0x14 */
+} MPI2_FW_UPLOAD_REPLY, *PTR_MPI2_FW_UPLOAD_REPLY,
+ Mpi2FWUploadReply_t, *pMPi2FWUploadReply_t;
+
+/*FW Image Header */
+typedef struct _MPI2_FW_IMAGE_HEADER {
+ U32 Signature; /*0x00 */
+ U32 Signature0; /*0x04 */
+ U32 Signature1; /*0x08 */
+ U32 Signature2; /*0x0C */
+ MPI2_VERSION_UNION MPIVersion; /*0x10 */
+ MPI2_VERSION_UNION FWVersion; /*0x14 */
+ MPI2_VERSION_UNION NVDATAVersion; /*0x18 */
+ MPI2_VERSION_UNION PackageVersion; /*0x1C */
+ U16 VendorID; /*0x20 */
+ U16 ProductID; /*0x22 */
+ U16 ProtocolFlags; /*0x24 */
+ U16 Reserved26; /*0x26 */
+ U32 IOCCapabilities; /*0x28 */
+ U32 ImageSize; /*0x2C */
+ U32 NextImageHeaderOffset; /*0x30 */
+ U32 Checksum; /*0x34 */
+ U32 Reserved38; /*0x38 */
+ U32 Reserved3C; /*0x3C */
+ U32 Reserved40; /*0x40 */
+ U32 Reserved44; /*0x44 */
+ U32 Reserved48; /*0x48 */
+ U32 Reserved4C; /*0x4C */
+ U32 Reserved50; /*0x50 */
+ U32 Reserved54; /*0x54 */
+ U32 Reserved58; /*0x58 */
+ U32 Reserved5C; /*0x5C */
+ U32 Reserved60; /*0x60 */
+ U32 FirmwareVersionNameWhat; /*0x64 */
+ U8 FirmwareVersionName[32]; /*0x68 */
+ U32 VendorNameWhat; /*0x88 */
+ U8 VendorName[32]; /*0x8C */
+ U32 PackageNameWhat; /*0x88 */
+ U8 PackageName[32]; /*0x8C */
+ U32 ReservedD0; /*0xD0 */
+ U32 ReservedD4; /*0xD4 */
+ U32 ReservedD8; /*0xD8 */
+ U32 ReservedDC; /*0xDC */
+ U32 ReservedE0; /*0xE0 */
+ U32 ReservedE4; /*0xE4 */
+ U32 ReservedE8; /*0xE8 */
+ U32 ReservedEC; /*0xEC */
+ U32 ReservedF0; /*0xF0 */
+ U32 ReservedF4; /*0xF4 */
+ U32 ReservedF8; /*0xF8 */
+ U32 ReservedFC; /*0xFC */
+} MPI2_FW_IMAGE_HEADER, *PTR_MPI2_FW_IMAGE_HEADER,
+ Mpi2FWImageHeader_t, *pMpi2FWImageHeader_t;
+
+/*Signature field */
+#define MPI2_FW_HEADER_SIGNATURE_OFFSET (0x00)
+#define MPI2_FW_HEADER_SIGNATURE_MASK (0xFF000000)
+#define MPI2_FW_HEADER_SIGNATURE (0xEA000000)
+
+/*Signature0 field */
+#define MPI2_FW_HEADER_SIGNATURE0_OFFSET (0x04)
+#define MPI2_FW_HEADER_SIGNATURE0 (0x5AFAA55A)
+
+/*Signature1 field */
+#define MPI2_FW_HEADER_SIGNATURE1_OFFSET (0x08)
+#define MPI2_FW_HEADER_SIGNATURE1 (0xA55AFAA5)
+
+/*Signature2 field */
+#define MPI2_FW_HEADER_SIGNATURE2_OFFSET (0x0C)
+#define MPI2_FW_HEADER_SIGNATURE2 (0x5AA55AFA)
+
+/*defines for using the ProductID field */
+#define MPI2_FW_HEADER_PID_TYPE_MASK (0xF000)
+#define MPI2_FW_HEADER_PID_TYPE_SAS (0x2000)
+
+#define MPI2_FW_HEADER_PID_PROD_MASK (0x0F00)
+#define MPI2_FW_HEADER_PID_PROD_A (0x0000)
+#define MPI2_FW_HEADER_PID_PROD_TARGET_INITIATOR_SCSI (0x0200)
+#define MPI2_FW_HEADER_PID_PROD_IR_SCSI (0x0700)
+
+#define MPI2_FW_HEADER_PID_FAMILY_MASK (0x00FF)
+/*SAS ProductID Family bits */
+#define MPI2_FW_HEADER_PID_FAMILY_2108_SAS (0x0013)
+#define MPI2_FW_HEADER_PID_FAMILY_2208_SAS (0x0014)
+#define MPI25_FW_HEADER_PID_FAMILY_3108_SAS (0x0021)
+
+/*use MPI2_IOCFACTS_PROTOCOL_ defines for ProtocolFlags field */
+
+/*use MPI2_IOCFACTS_CAPABILITY_ defines for IOCCapabilities field */
+
+#define MPI2_FW_HEADER_IMAGESIZE_OFFSET (0x2C)
+#define MPI2_FW_HEADER_NEXTIMAGE_OFFSET (0x30)
+#define MPI2_FW_HEADER_VERNMHWAT_OFFSET (0x64)
+
+#define MPI2_FW_HEADER_WHAT_SIGNATURE (0x29232840)
+
+#define MPI2_FW_HEADER_SIZE (0x100)
+
+/*Extended Image Header */
+typedef struct _MPI2_EXT_IMAGE_HEADER {
+ U8 ImageType; /*0x00 */
+ U8 Reserved1; /*0x01 */
+ U16 Reserved2; /*0x02 */
+ U32 Checksum; /*0x04 */
+ U32 ImageSize; /*0x08 */
+ U32 NextImageHeaderOffset; /*0x0C */
+ U32 PackageVersion; /*0x10 */
+ U32 Reserved3; /*0x14 */
+ U32 Reserved4; /*0x18 */
+ U32 Reserved5; /*0x1C */
+ U8 IdentifyString[32]; /*0x20 */
+} MPI2_EXT_IMAGE_HEADER, *PTR_MPI2_EXT_IMAGE_HEADER,
+ Mpi2ExtImageHeader_t, *pMpi2ExtImageHeader_t;
+
+/*useful offsets */
+#define MPI2_EXT_IMAGE_IMAGETYPE_OFFSET (0x00)
+#define MPI2_EXT_IMAGE_IMAGESIZE_OFFSET (0x08)
+#define MPI2_EXT_IMAGE_NEXTIMAGE_OFFSET (0x0C)
+
+#define MPI2_EXT_IMAGE_HEADER_SIZE (0x40)
+
+/*defines for the ImageType field */
+#define MPI2_EXT_IMAGE_TYPE_UNSPECIFIED (0x00)
+#define MPI2_EXT_IMAGE_TYPE_FW (0x01)
+#define MPI2_EXT_IMAGE_TYPE_NVDATA (0x03)
+#define MPI2_EXT_IMAGE_TYPE_BOOTLOADER (0x04)
+#define MPI2_EXT_IMAGE_TYPE_INITIALIZATION (0x05)
+#define MPI2_EXT_IMAGE_TYPE_FLASH_LAYOUT (0x06)
+#define MPI2_EXT_IMAGE_TYPE_SUPPORTED_DEVICES (0x07)
+#define MPI2_EXT_IMAGE_TYPE_MEGARAID (0x08)
+#define MPI2_EXT_IMAGE_TYPE_ENCRYPTED_HASH (0x09)
+#define MPI2_EXT_IMAGE_TYPE_MIN_PRODUCT_SPECIFIC (0x80)
+#define MPI2_EXT_IMAGE_TYPE_MAX_PRODUCT_SPECIFIC (0xFF)
+
+#define MPI2_EXT_IMAGE_TYPE_MAX (MPI2_EXT_IMAGE_TYPE_MAX_PRODUCT_SPECIFIC)
+
+/*FLASH Layout Extended Image Data */
+
+/*
+ *Host code (drivers, BIOS, utilities, etc.) should leave this define set to
+ *one and check RegionsPerLayout at runtime.
+ */
+#ifndef MPI2_FLASH_NUMBER_OF_REGIONS
+#define MPI2_FLASH_NUMBER_OF_REGIONS (1)
+#endif
+
+/*
+ *Host code (drivers, BIOS, utilities, etc.) should leave this define set to
+ *one and check NumberOfLayouts at runtime.
+ */
+#ifndef MPI2_FLASH_NUMBER_OF_LAYOUTS
+#define MPI2_FLASH_NUMBER_OF_LAYOUTS (1)
+#endif
+
+typedef struct _MPI2_FLASH_REGION {
+ U8 RegionType; /*0x00 */
+ U8 Reserved1; /*0x01 */
+ U16 Reserved2; /*0x02 */
+ U32 RegionOffset; /*0x04 */
+ U32 RegionSize; /*0x08 */
+ U32 Reserved3; /*0x0C */
+} MPI2_FLASH_REGION, *PTR_MPI2_FLASH_REGION,
+ Mpi2FlashRegion_t, *pMpi2FlashRegion_t;
+
+typedef struct _MPI2_FLASH_LAYOUT {
+ U32 FlashSize; /*0x00 */
+ U32 Reserved1; /*0x04 */
+ U32 Reserved2; /*0x08 */
+ U32 Reserved3; /*0x0C */
+ MPI2_FLASH_REGION Region[MPI2_FLASH_NUMBER_OF_REGIONS]; /*0x10 */
+} MPI2_FLASH_LAYOUT, *PTR_MPI2_FLASH_LAYOUT,
+ Mpi2FlashLayout_t, *pMpi2FlashLayout_t;
+
+typedef struct _MPI2_FLASH_LAYOUT_DATA {
+ U8 ImageRevision; /*0x00 */
+ U8 Reserved1; /*0x01 */
+ U8 SizeOfRegion; /*0x02 */
+ U8 Reserved2; /*0x03 */
+ U16 NumberOfLayouts; /*0x04 */
+ U16 RegionsPerLayout; /*0x06 */
+ U16 MinimumSectorAlignment; /*0x08 */
+ U16 Reserved3; /*0x0A */
+ U32 Reserved4; /*0x0C */
+ MPI2_FLASH_LAYOUT Layout[MPI2_FLASH_NUMBER_OF_LAYOUTS]; /*0x10 */
+} MPI2_FLASH_LAYOUT_DATA, *PTR_MPI2_FLASH_LAYOUT_DATA,
+ Mpi2FlashLayoutData_t, *pMpi2FlashLayoutData_t;
+
+/*defines for the RegionType field */
+#define MPI2_FLASH_REGION_UNUSED (0x00)
+#define MPI2_FLASH_REGION_FIRMWARE (0x01)
+#define MPI2_FLASH_REGION_BIOS (0x02)
+#define MPI2_FLASH_REGION_NVDATA (0x03)
+#define MPI2_FLASH_REGION_FIRMWARE_BACKUP (0x05)
+#define MPI2_FLASH_REGION_MFG_INFORMATION (0x06)
+#define MPI2_FLASH_REGION_CONFIG_1 (0x07)
+#define MPI2_FLASH_REGION_CONFIG_2 (0x08)
+#define MPI2_FLASH_REGION_MEGARAID (0x09)
+#define MPI2_FLASH_REGION_INIT (0x0A)
+
+/*ImageRevision */
+#define MPI2_FLASH_LAYOUT_IMAGE_REVISION (0x00)
+
+/*Supported Devices Extended Image Data */
+
+/*
+ *Host code (drivers, BIOS, utilities, etc.) should leave this define set to
+ *one and check NumberOfDevices at runtime.
+ */
+#ifndef MPI2_SUPPORTED_DEVICES_IMAGE_NUM_DEVICES
+#define MPI2_SUPPORTED_DEVICES_IMAGE_NUM_DEVICES (1)
+#endif
+
+typedef struct _MPI2_SUPPORTED_DEVICE {
+ U16 DeviceID; /*0x00 */
+ U16 VendorID; /*0x02 */
+ U16 DeviceIDMask; /*0x04 */
+ U16 Reserved1; /*0x06 */
+ U8 LowPCIRev; /*0x08 */
+ U8 HighPCIRev; /*0x09 */
+ U16 Reserved2; /*0x0A */
+ U32 Reserved3; /*0x0C */
+} MPI2_SUPPORTED_DEVICE, *PTR_MPI2_SUPPORTED_DEVICE,
+ Mpi2SupportedDevice_t, *pMpi2SupportedDevice_t;
+
+typedef struct _MPI2_SUPPORTED_DEVICES_DATA {
+ U8 ImageRevision; /*0x00 */
+ U8 Reserved1; /*0x01 */
+ U8 NumberOfDevices; /*0x02 */
+ U8 Reserved2; /*0x03 */
+ U32 Reserved3; /*0x04 */
+ MPI2_SUPPORTED_DEVICE
+ SupportedDevice[MPI2_SUPPORTED_DEVICES_IMAGE_NUM_DEVICES];/*0x08 */
+} MPI2_SUPPORTED_DEVICES_DATA, *PTR_MPI2_SUPPORTED_DEVICES_DATA,
+ Mpi2SupportedDevicesData_t, *pMpi2SupportedDevicesData_t;
+
+/*ImageRevision */
+#define MPI2_SUPPORTED_DEVICES_IMAGE_REVISION (0x00)
+
+/*Init Extended Image Data */
+
+typedef struct _MPI2_INIT_IMAGE_FOOTER {
+ U32 BootFlags; /*0x00 */
+ U32 ImageSize; /*0x04 */
+ U32 Signature0; /*0x08 */
+ U32 Signature1; /*0x0C */
+ U32 Signature2; /*0x10 */
+ U32 ResetVector; /*0x14 */
+} MPI2_INIT_IMAGE_FOOTER, *PTR_MPI2_INIT_IMAGE_FOOTER,
+ Mpi2InitImageFooter_t, *pMpi2InitImageFooter_t;
+
+/*defines for the BootFlags field */
+#define MPI2_INIT_IMAGE_BOOTFLAGS_OFFSET (0x00)
+
+/*defines for the ImageSize field */
+#define MPI2_INIT_IMAGE_IMAGESIZE_OFFSET (0x04)
+
+/*defines for the Signature0 field */
+#define MPI2_INIT_IMAGE_SIGNATURE0_OFFSET (0x08)
+#define MPI2_INIT_IMAGE_SIGNATURE0 (0x5AA55AEA)
+
+/*defines for the Signature1 field */
+#define MPI2_INIT_IMAGE_SIGNATURE1_OFFSET (0x0C)
+#define MPI2_INIT_IMAGE_SIGNATURE1 (0xA55AEAA5)
+
+/*defines for the Signature2 field */
+#define MPI2_INIT_IMAGE_SIGNATURE2_OFFSET (0x10)
+#define MPI2_INIT_IMAGE_SIGNATURE2 (0x5AEAA55A)
+
+/*Signature fields as individual bytes */
+#define MPI2_INIT_IMAGE_SIGNATURE_BYTE_0 (0xEA)
+#define MPI2_INIT_IMAGE_SIGNATURE_BYTE_1 (0x5A)
+#define MPI2_INIT_IMAGE_SIGNATURE_BYTE_2 (0xA5)
+#define MPI2_INIT_IMAGE_SIGNATURE_BYTE_3 (0x5A)
+
+#define MPI2_INIT_IMAGE_SIGNATURE_BYTE_4 (0xA5)
+#define MPI2_INIT_IMAGE_SIGNATURE_BYTE_5 (0xEA)
+#define MPI2_INIT_IMAGE_SIGNATURE_BYTE_6 (0x5A)
+#define MPI2_INIT_IMAGE_SIGNATURE_BYTE_7 (0xA5)
+
+#define MPI2_INIT_IMAGE_SIGNATURE_BYTE_8 (0x5A)
+#define MPI2_INIT_IMAGE_SIGNATURE_BYTE_9 (0xA5)
+#define MPI2_INIT_IMAGE_SIGNATURE_BYTE_A (0xEA)
+#define MPI2_INIT_IMAGE_SIGNATURE_BYTE_B (0x5A)
+
+/*defines for the ResetVector field */
+#define MPI2_INIT_IMAGE_RESETVECTOR_OFFSET (0x14)
+
+
+/* Encrypted Hash Extended Image Data */
+
+typedef struct _MPI25_ENCRYPTED_HASH_ENTRY {
+ U8 HashImageType; /* 0x00 */
+ U8 HashAlgorithm; /* 0x01 */
+ U8 EncryptionAlgorithm; /* 0x02 */
+ U8 Reserved1; /* 0x03 */
+ U32 Reserved2; /* 0x04 */
+ U32 EncryptedHash[1]; /* 0x08 */ /* variable length */
+} MPI25_ENCRYPTED_HASH_ENTRY, *PTR_MPI25_ENCRYPTED_HASH_ENTRY,
+Mpi25EncryptedHashEntry_t, *pMpi25EncryptedHashEntry_t;
+
+/* values for HashImageType */
+#define MPI25_HASH_IMAGE_TYPE_UNUSED (0x00)
+#define MPI25_HASH_IMAGE_TYPE_FIRMWARE (0x01)
+
+/* values for HashAlgorithm */
+#define MPI25_HASH_ALGORITHM_UNUSED (0x00)
+#define MPI25_HASH_ALGORITHM_SHA256 (0x01)
+
+/* values for EncryptionAlgorithm */
+#define MPI25_ENCRYPTION_ALG_UNUSED (0x00)
+#define MPI25_ENCRYPTION_ALG_RSA256 (0x01)
+
+typedef struct _MPI25_ENCRYPTED_HASH_DATA {
+ U8 ImageVersion; /* 0x00 */
+ U8 NumHash; /* 0x01 */
+ U16 Reserved1; /* 0x02 */
+ U32 Reserved2; /* 0x04 */
+ MPI25_ENCRYPTED_HASH_ENTRY EncryptedHashEntry[1]; /* 0x08 */
+} MPI25_ENCRYPTED_HASH_DATA, *PTR_MPI25_ENCRYPTED_HASH_DATA,
+Mpi25EncryptedHashData_t, *pMpi25EncryptedHashData_t;
+
+
+
+/****************************************************************************
+* PowerManagementControl message
+****************************************************************************/
+
+/*PowerManagementControl Request message */
+typedef struct _MPI2_PWR_MGMT_CONTROL_REQUEST {
+ U8 Feature; /*0x00 */
+ U8 Reserved1; /*0x01 */
+ U8 ChainOffset; /*0x02 */
+ U8 Function; /*0x03 */
+ U16 Reserved2; /*0x04 */
+ U8 Reserved3; /*0x06 */
+ U8 MsgFlags; /*0x07 */
+ U8 VP_ID; /*0x08 */
+ U8 VF_ID; /*0x09 */
+ U16 Reserved4; /*0x0A */
+ U8 Parameter1; /*0x0C */
+ U8 Parameter2; /*0x0D */
+ U8 Parameter3; /*0x0E */
+ U8 Parameter4; /*0x0F */
+ U32 Reserved5; /*0x10 */
+ U32 Reserved6; /*0x14 */
+} MPI2_PWR_MGMT_CONTROL_REQUEST, *PTR_MPI2_PWR_MGMT_CONTROL_REQUEST,
+ Mpi2PwrMgmtControlRequest_t, *pMpi2PwrMgmtControlRequest_t;
+
+/*defines for the Feature field */
+#define MPI2_PM_CONTROL_FEATURE_DA_PHY_POWER_COND (0x01)
+#define MPI2_PM_CONTROL_FEATURE_PORT_WIDTH_MODULATION (0x02)
+#define MPI2_PM_CONTROL_FEATURE_PCIE_LINK (0x03) /*obsolete */
+#define MPI2_PM_CONTROL_FEATURE_IOC_SPEED (0x04)
+#define MPI2_PM_CONTROL_FEATURE_GLOBAL_PWR_MGMT_MODE (0x05)
+#define MPI2_PM_CONTROL_FEATURE_MIN_PRODUCT_SPECIFIC (0x80)
+#define MPI2_PM_CONTROL_FEATURE_MAX_PRODUCT_SPECIFIC (0xFF)
+
+/*parameter usage for the MPI2_PM_CONTROL_FEATURE_DA_PHY_POWER_COND Feature */
+/*Parameter1 contains a PHY number */
+/*Parameter2 indicates power condition action using these defines */
+#define MPI2_PM_CONTROL_PARAM2_PARTIAL (0x01)
+#define MPI2_PM_CONTROL_PARAM2_SLUMBER (0x02)
+#define MPI2_PM_CONTROL_PARAM2_EXIT_PWR_MGMT (0x03)
+/*Parameter3 and Parameter4 are reserved */
+
+/*parameter usage for the MPI2_PM_CONTROL_FEATURE_PORT_WIDTH_MODULATION
+ * Feature */
+/*Parameter1 contains SAS port width modulation group number */
+/*Parameter2 indicates IOC action using these defines */
+#define MPI2_PM_CONTROL_PARAM2_REQUEST_OWNERSHIP (0x01)
+#define MPI2_PM_CONTROL_PARAM2_CHANGE_MODULATION (0x02)
+#define MPI2_PM_CONTROL_PARAM2_RELINQUISH_OWNERSHIP (0x03)
+/*Parameter3 indicates desired modulation level using these defines */
+#define MPI2_PM_CONTROL_PARAM3_25_PERCENT (0x00)
+#define MPI2_PM_CONTROL_PARAM3_50_PERCENT (0x01)
+#define MPI2_PM_CONTROL_PARAM3_75_PERCENT (0x02)
+#define MPI2_PM_CONTROL_PARAM3_100_PERCENT (0x03)
+/*Parameter4 is reserved */
+
+/*this next set (_PCIE_LINK) is obsolete */
+/*parameter usage for the MPI2_PM_CONTROL_FEATURE_PCIE_LINK Feature */
+/*Parameter1 indicates desired PCIe link speed using these defines */
+#define MPI2_PM_CONTROL_PARAM1_PCIE_2_5_GBPS (0x00) /*obsolete */
+#define MPI2_PM_CONTROL_PARAM1_PCIE_5_0_GBPS (0x01) /*obsolete */
+#define MPI2_PM_CONTROL_PARAM1_PCIE_8_0_GBPS (0x02) /*obsolete */
+/*Parameter2 indicates desired PCIe link width using these defines */
+#define MPI2_PM_CONTROL_PARAM2_WIDTH_X1 (0x01) /*obsolete */
+#define MPI2_PM_CONTROL_PARAM2_WIDTH_X2 (0x02) /*obsolete */
+#define MPI2_PM_CONTROL_PARAM2_WIDTH_X4 (0x04) /*obsolete */
+#define MPI2_PM_CONTROL_PARAM2_WIDTH_X8 (0x08) /*obsolete */
+/*Parameter3 and Parameter4 are reserved */
+
+/*parameter usage for the MPI2_PM_CONTROL_FEATURE_IOC_SPEED Feature */
+/*Parameter1 indicates desired IOC hardware clock speed using these defines */
+#define MPI2_PM_CONTROL_PARAM1_FULL_IOC_SPEED (0x01)
+#define MPI2_PM_CONTROL_PARAM1_HALF_IOC_SPEED (0x02)
+#define MPI2_PM_CONTROL_PARAM1_QUARTER_IOC_SPEED (0x04)
+#define MPI2_PM_CONTROL_PARAM1_EIGHTH_IOC_SPEED (0x08)
+/*Parameter2, Parameter3, and Parameter4 are reserved */
+
+/*parameter usage for the MPI2_PM_CONTROL_FEATURE_GLOBAL_PWR_MGMT_MODE Feature*/
+/*Parameter1 indicates host action regarding global power management mode */
+#define MPI2_PM_CONTROL_PARAM1_TAKE_CONTROL (0x01)
+#define MPI2_PM_CONTROL_PARAM1_CHANGE_GLOBAL_MODE (0x02)
+#define MPI2_PM_CONTROL_PARAM1_RELEASE_CONTROL (0x03)
+/*Parameter2 indicates the requested global power management mode */
+#define MPI2_PM_CONTROL_PARAM2_FULL_PWR_PERF (0x01)
+#define MPI2_PM_CONTROL_PARAM2_REDUCED_PWR_PERF (0x08)
+#define MPI2_PM_CONTROL_PARAM2_STANDBY (0x40)
+/*Parameter3 and Parameter4 are reserved */
+
+/*PowerManagementControl Reply message */
+typedef struct _MPI2_PWR_MGMT_CONTROL_REPLY {
+ U8 Feature; /*0x00 */
+ U8 Reserved1; /*0x01 */
+ U8 MsgLength; /*0x02 */
+ U8 Function; /*0x03 */
+ U16 Reserved2; /*0x04 */
+ U8 Reserved3; /*0x06 */
+ U8 MsgFlags; /*0x07 */
+ U8 VP_ID; /*0x08 */
+ U8 VF_ID; /*0x09 */
+ U16 Reserved4; /*0x0A */
+ U16 Reserved5; /*0x0C */
+ U16 IOCStatus; /*0x0E */
+ U32 IOCLogInfo; /*0x10 */
+} MPI2_PWR_MGMT_CONTROL_REPLY, *PTR_MPI2_PWR_MGMT_CONTROL_REPLY,
+ Mpi2PwrMgmtControlReply_t, *pMpi2PwrMgmtControlReply_t;
+
+#endif
diff --git a/drivers/scsi/mpt3sas/mpi/mpi2_raid.h b/drivers/scsi/mpt3sas/mpi/mpi2_raid.h
new file mode 100644
index 000000000..13d93ca02
--- /dev/null
+++ b/drivers/scsi/mpt3sas/mpi/mpi2_raid.h
@@ -0,0 +1,354 @@
+/*
+ * Copyright (c) 2000-2014 LSI Corporation.
+ *
+ *
+ * Name: mpi2_raid.h
+ * Title: MPI Integrated RAID messages and structures
+ * Creation Date: April 26, 2007
+ *
+ * mpi2_raid.h Version: 02.00.10
+ *
+ * Version History
+ * ---------------
+ *
+ * Date Version Description
+ * -------- -------- ------------------------------------------------------
+ * 04-30-07 02.00.00 Corresponds to Fusion-MPT MPI Specification Rev A.
+ * 08-31-07 02.00.01 Modifications to RAID Action request and reply,
+ * including the Actions and ActionData.
+ * 02-29-08 02.00.02 Added MPI2_RAID_ACTION_ADATA_DISABL_FULL_REBUILD.
+ * 05-21-08 02.00.03 Added MPI2_RAID_VOL_CREATION_NUM_PHYSDISKS so that
+ * the PhysDisk array in MPI2_RAID_VOLUME_CREATION_STRUCT
+ * can be sized by the build environment.
+ * 07-30-09 02.00.04 Added proper define for the Use Default Settings bit of
+ * VolumeCreationFlags and marked the old one as obsolete.
+ * 05-12-10 02.00.05 Added MPI2_RAID_VOL_FLAGS_OP_MDC define.
+ * 08-24-10 02.00.06 Added MPI2_RAID_ACTION_COMPATIBILITY_CHECK along with
+ * related structures and defines.
+ * Added product-specific range to RAID Action values.
+ * 11-18-11 02.00.07 Incorporating additions for MPI v2.5.
+ * 02-06-12 02.00.08 Added MPI2_RAID_ACTION_PHYSDISK_HIDDEN.
+ * 07-26-12 02.00.09 Added ElapsedSeconds field to MPI2_RAID_VOL_INDICATOR.
+ * Added MPI2_RAID_VOL_FLAGS_ELAPSED_SECONDS_VALID define.
+ * 04-17-13 02.00.10 Added MPI25_RAID_ACTION_ADATA_ALLOW_PI.
+ * --------------------------------------------------------------------------
+ */
+
+#ifndef MPI2_RAID_H
+#define MPI2_RAID_H
+
+/*****************************************************************************
+*
+* Integrated RAID Messages
+*
+*****************************************************************************/
+
+/****************************************************************************
+* RAID Action messages
+****************************************************************************/
+
+/* ActionDataWord defines for use with MPI2_RAID_ACTION_CREATE_VOLUME action */
+#define MPI25_RAID_ACTION_ADATA_ALLOW_PI (0x80000000)
+
+/*ActionDataWord defines for use with MPI2_RAID_ACTION_DELETE_VOLUME action */
+#define MPI2_RAID_ACTION_ADATA_KEEP_LBA0 (0x00000000)
+#define MPI2_RAID_ACTION_ADATA_ZERO_LBA0 (0x00000001)
+
+/*use MPI2_RAIDVOL0_SETTING_ defines from mpi2_cnfg.h for
+ *MPI2_RAID_ACTION_CHANGE_VOL_WRITE_CACHE action */
+
+/*ActionDataWord defines for use with
+ *MPI2_RAID_ACTION_DISABLE_ALL_VOLUMES action */
+#define MPI2_RAID_ACTION_ADATA_DISABL_FULL_REBUILD (0x00000001)
+
+/*ActionDataWord for MPI2_RAID_ACTION_SET_RAID_FUNCTION_RATE Action */
+typedef struct _MPI2_RAID_ACTION_RATE_DATA {
+ U8 RateToChange; /*0x00 */
+ U8 RateOrMode; /*0x01 */
+ U16 DataScrubDuration; /*0x02 */
+} MPI2_RAID_ACTION_RATE_DATA, *PTR_MPI2_RAID_ACTION_RATE_DATA,
+ Mpi2RaidActionRateData_t, *pMpi2RaidActionRateData_t;
+
+#define MPI2_RAID_ACTION_SET_RATE_RESYNC (0x00)
+#define MPI2_RAID_ACTION_SET_RATE_DATA_SCRUB (0x01)
+#define MPI2_RAID_ACTION_SET_RATE_POWERSAVE_MODE (0x02)
+
+/*ActionDataWord for MPI2_RAID_ACTION_START_RAID_FUNCTION Action */
+typedef struct _MPI2_RAID_ACTION_START_RAID_FUNCTION {
+ U8 RAIDFunction; /*0x00 */
+ U8 Flags; /*0x01 */
+ U16 Reserved1; /*0x02 */
+} MPI2_RAID_ACTION_START_RAID_FUNCTION,
+ *PTR_MPI2_RAID_ACTION_START_RAID_FUNCTION,
+ Mpi2RaidActionStartRaidFunction_t,
+ *pMpi2RaidActionStartRaidFunction_t;
+
+/*defines for the RAIDFunction field */
+#define MPI2_RAID_ACTION_START_BACKGROUND_INIT (0x00)
+#define MPI2_RAID_ACTION_START_ONLINE_CAP_EXPANSION (0x01)
+#define MPI2_RAID_ACTION_START_CONSISTENCY_CHECK (0x02)
+
+/*defines for the Flags field */
+#define MPI2_RAID_ACTION_START_NEW (0x00)
+#define MPI2_RAID_ACTION_START_RESUME (0x01)
+
+/*ActionDataWord for MPI2_RAID_ACTION_STOP_RAID_FUNCTION Action */
+typedef struct _MPI2_RAID_ACTION_STOP_RAID_FUNCTION {
+ U8 RAIDFunction; /*0x00 */
+ U8 Flags; /*0x01 */
+ U16 Reserved1; /*0x02 */
+} MPI2_RAID_ACTION_STOP_RAID_FUNCTION,
+ *PTR_MPI2_RAID_ACTION_STOP_RAID_FUNCTION,
+ Mpi2RaidActionStopRaidFunction_t,
+ *pMpi2RaidActionStopRaidFunction_t;
+
+/*defines for the RAIDFunction field */
+#define MPI2_RAID_ACTION_STOP_BACKGROUND_INIT (0x00)
+#define MPI2_RAID_ACTION_STOP_ONLINE_CAP_EXPANSION (0x01)
+#define MPI2_RAID_ACTION_STOP_CONSISTENCY_CHECK (0x02)
+
+/*defines for the Flags field */
+#define MPI2_RAID_ACTION_STOP_ABORT (0x00)
+#define MPI2_RAID_ACTION_STOP_PAUSE (0x01)
+
+/*ActionDataWord for MPI2_RAID_ACTION_CREATE_HOT_SPARE Action */
+typedef struct _MPI2_RAID_ACTION_HOT_SPARE {
+ U8 HotSparePool; /*0x00 */
+ U8 Reserved1; /*0x01 */
+ U16 DevHandle; /*0x02 */
+} MPI2_RAID_ACTION_HOT_SPARE, *PTR_MPI2_RAID_ACTION_HOT_SPARE,
+ Mpi2RaidActionHotSpare_t, *pMpi2RaidActionHotSpare_t;
+
+/*ActionDataWord for MPI2_RAID_ACTION_DEVICE_FW_UPDATE_MODE Action */
+typedef struct _MPI2_RAID_ACTION_FW_UPDATE_MODE {
+ U8 Flags; /*0x00 */
+ U8 DeviceFirmwareUpdateModeTimeout; /*0x01 */
+ U16 Reserved1; /*0x02 */
+} MPI2_RAID_ACTION_FW_UPDATE_MODE,
+ *PTR_MPI2_RAID_ACTION_FW_UPDATE_MODE,
+ Mpi2RaidActionFwUpdateMode_t,
+ *pMpi2RaidActionFwUpdateMode_t;
+
+/*ActionDataWord defines for use with
+ *MPI2_RAID_ACTION_DEVICE_FW_UPDATE_MODE action */
+#define MPI2_RAID_ACTION_ADATA_DISABLE_FW_UPDATE (0x00)
+#define MPI2_RAID_ACTION_ADATA_ENABLE_FW_UPDATE (0x01)
+
+typedef union _MPI2_RAID_ACTION_DATA {
+ U32 Word;
+ MPI2_RAID_ACTION_RATE_DATA Rates;
+ MPI2_RAID_ACTION_START_RAID_FUNCTION StartRaidFunction;
+ MPI2_RAID_ACTION_STOP_RAID_FUNCTION StopRaidFunction;
+ MPI2_RAID_ACTION_HOT_SPARE HotSpare;
+ MPI2_RAID_ACTION_FW_UPDATE_MODE FwUpdateMode;
+} MPI2_RAID_ACTION_DATA, *PTR_MPI2_RAID_ACTION_DATA,
+ Mpi2RaidActionData_t, *pMpi2RaidActionData_t;
+
+/*RAID Action Request Message */
+typedef struct _MPI2_RAID_ACTION_REQUEST {
+ U8 Action; /*0x00 */
+ U8 Reserved1; /*0x01 */
+ U8 ChainOffset; /*0x02 */
+ U8 Function; /*0x03 */
+ U16 VolDevHandle; /*0x04 */
+ U8 PhysDiskNum; /*0x06 */
+ U8 MsgFlags; /*0x07 */
+ U8 VP_ID; /*0x08 */
+ U8 VF_ID; /*0x09 */
+ U16 Reserved2; /*0x0A */
+ U32 Reserved3; /*0x0C */
+ MPI2_RAID_ACTION_DATA ActionDataWord; /*0x10 */
+ MPI2_SGE_SIMPLE_UNION ActionDataSGE; /*0x14 */
+} MPI2_RAID_ACTION_REQUEST, *PTR_MPI2_RAID_ACTION_REQUEST,
+ Mpi2RaidActionRequest_t, *pMpi2RaidActionRequest_t;
+
+/*RAID Action request Action values */
+
+#define MPI2_RAID_ACTION_INDICATOR_STRUCT (0x01)
+#define MPI2_RAID_ACTION_CREATE_VOLUME (0x02)
+#define MPI2_RAID_ACTION_DELETE_VOLUME (0x03)
+#define MPI2_RAID_ACTION_DISABLE_ALL_VOLUMES (0x04)
+#define MPI2_RAID_ACTION_ENABLE_ALL_VOLUMES (0x05)
+#define MPI2_RAID_ACTION_PHYSDISK_OFFLINE (0x0A)
+#define MPI2_RAID_ACTION_PHYSDISK_ONLINE (0x0B)
+#define MPI2_RAID_ACTION_FAIL_PHYSDISK (0x0F)
+#define MPI2_RAID_ACTION_ACTIVATE_VOLUME (0x11)
+#define MPI2_RAID_ACTION_DEVICE_FW_UPDATE_MODE (0x15)
+#define MPI2_RAID_ACTION_CHANGE_VOL_WRITE_CACHE (0x17)
+#define MPI2_RAID_ACTION_SET_VOLUME_NAME (0x18)
+#define MPI2_RAID_ACTION_SET_RAID_FUNCTION_RATE (0x19)
+#define MPI2_RAID_ACTION_ENABLE_FAILED_VOLUME (0x1C)
+#define MPI2_RAID_ACTION_CREATE_HOT_SPARE (0x1D)
+#define MPI2_RAID_ACTION_DELETE_HOT_SPARE (0x1E)
+#define MPI2_RAID_ACTION_SYSTEM_SHUTDOWN_INITIATED (0x20)
+#define MPI2_RAID_ACTION_START_RAID_FUNCTION (0x21)
+#define MPI2_RAID_ACTION_STOP_RAID_FUNCTION (0x22)
+#define MPI2_RAID_ACTION_COMPATIBILITY_CHECK (0x23)
+#define MPI2_RAID_ACTION_PHYSDISK_HIDDEN (0x24)
+#define MPI2_RAID_ACTION_MIN_PRODUCT_SPECIFIC (0x80)
+#define MPI2_RAID_ACTION_MAX_PRODUCT_SPECIFIC (0xFF)
+
+/*RAID Volume Creation Structure */
+
+/*
+ *The following define can be customized for the targeted product.
+ */
+#ifndef MPI2_RAID_VOL_CREATION_NUM_PHYSDISKS
+#define MPI2_RAID_VOL_CREATION_NUM_PHYSDISKS (1)
+#endif
+
+typedef struct _MPI2_RAID_VOLUME_PHYSDISK {
+ U8 RAIDSetNum; /*0x00 */
+ U8 PhysDiskMap; /*0x01 */
+ U16 PhysDiskDevHandle; /*0x02 */
+} MPI2_RAID_VOLUME_PHYSDISK, *PTR_MPI2_RAID_VOLUME_PHYSDISK,
+ Mpi2RaidVolumePhysDisk_t, *pMpi2RaidVolumePhysDisk_t;
+
+/*defines for the PhysDiskMap field */
+#define MPI2_RAIDACTION_PHYSDISK_PRIMARY (0x01)
+#define MPI2_RAIDACTION_PHYSDISK_SECONDARY (0x02)
+
+typedef struct _MPI2_RAID_VOLUME_CREATION_STRUCT {
+ U8 NumPhysDisks; /*0x00 */
+ U8 VolumeType; /*0x01 */
+ U16 Reserved1; /*0x02 */
+ U32 VolumeCreationFlags; /*0x04 */
+ U32 VolumeSettings; /*0x08 */
+ U8 Reserved2; /*0x0C */
+ U8 ResyncRate; /*0x0D */
+ U16 DataScrubDuration; /*0x0E */
+ U64 VolumeMaxLBA; /*0x10 */
+ U32 StripeSize; /*0x18 */
+ U8 Name[16]; /*0x1C */
+ MPI2_RAID_VOLUME_PHYSDISK
+ PhysDisk[MPI2_RAID_VOL_CREATION_NUM_PHYSDISKS]; /*0x2C */
+} MPI2_RAID_VOLUME_CREATION_STRUCT,
+ *PTR_MPI2_RAID_VOLUME_CREATION_STRUCT,
+ Mpi2RaidVolumeCreationStruct_t,
+ *pMpi2RaidVolumeCreationStruct_t;
+
+/*use MPI2_RAID_VOL_TYPE_ defines from mpi2_cnfg.h for VolumeType */
+
+/*defines for the VolumeCreationFlags field */
+#define MPI2_RAID_VOL_CREATION_DEFAULT_SETTINGS (0x80000000)
+#define MPI2_RAID_VOL_CREATION_BACKGROUND_INIT (0x00000004)
+#define MPI2_RAID_VOL_CREATION_LOW_LEVEL_INIT (0x00000002)
+#define MPI2_RAID_VOL_CREATION_MIGRATE_DATA (0x00000001)
+/*The following is an obsolete define.
+ *It must be shifted left 24 bits in order to set the proper bit.
+ */
+#define MPI2_RAID_VOL_CREATION_USE_DEFAULT_SETTINGS (0x80)
+
+/*RAID Online Capacity Expansion Structure */
+
+typedef struct _MPI2_RAID_ONLINE_CAPACITY_EXPANSION {
+ U32 Flags; /*0x00 */
+ U16 DevHandle0; /*0x04 */
+ U16 Reserved1; /*0x06 */
+ U16 DevHandle1; /*0x08 */
+ U16 Reserved2; /*0x0A */
+} MPI2_RAID_ONLINE_CAPACITY_EXPANSION,
+ *PTR_MPI2_RAID_ONLINE_CAPACITY_EXPANSION,
+ Mpi2RaidOnlineCapacityExpansion_t,
+ *pMpi2RaidOnlineCapacityExpansion_t;
+
+/*RAID Compatibility Input Structure */
+
+typedef struct _MPI2_RAID_COMPATIBILITY_INPUT_STRUCT {
+ U16 SourceDevHandle; /*0x00 */
+ U16 CandidateDevHandle; /*0x02 */
+ U32 Flags; /*0x04 */
+ U32 Reserved1; /*0x08 */
+ U32 Reserved2; /*0x0C */
+} MPI2_RAID_COMPATIBILITY_INPUT_STRUCT,
+ *PTR_MPI2_RAID_COMPATIBILITY_INPUT_STRUCT,
+ Mpi2RaidCompatibilityInputStruct_t,
+ *pMpi2RaidCompatibilityInputStruct_t;
+
+/*defines for RAID Compatibility Structure Flags field */
+#define MPI2_RAID_COMPAT_SOURCE_IS_VOLUME_FLAG (0x00000002)
+#define MPI2_RAID_COMPAT_REPORT_SOURCE_INFO_FLAG (0x00000001)
+
+/*RAID Volume Indicator Structure */
+
+typedef struct _MPI2_RAID_VOL_INDICATOR {
+ U64 TotalBlocks; /*0x00 */
+ U64 BlocksRemaining; /*0x08 */
+ U32 Flags; /*0x10 */
+ U32 ElapsedSeconds; /* 0x14 */
+} MPI2_RAID_VOL_INDICATOR, *PTR_MPI2_RAID_VOL_INDICATOR,
+ Mpi2RaidVolIndicator_t, *pMpi2RaidVolIndicator_t;
+
+/*defines for RAID Volume Indicator Flags field */
+#define MPI2_RAID_VOL_FLAGS_ELAPSED_SECONDS_VALID (0x80000000)
+#define MPI2_RAID_VOL_FLAGS_OP_MASK (0x0000000F)
+#define MPI2_RAID_VOL_FLAGS_OP_BACKGROUND_INIT (0x00000000)
+#define MPI2_RAID_VOL_FLAGS_OP_ONLINE_CAP_EXPANSION (0x00000001)
+#define MPI2_RAID_VOL_FLAGS_OP_CONSISTENCY_CHECK (0x00000002)
+#define MPI2_RAID_VOL_FLAGS_OP_RESYNC (0x00000003)
+#define MPI2_RAID_VOL_FLAGS_OP_MDC (0x00000004)
+
+/*RAID Compatibility Result Structure */
+
+typedef struct _MPI2_RAID_COMPATIBILITY_RESULT_STRUCT {
+ U8 State; /*0x00 */
+ U8 Reserved1; /*0x01 */
+ U16 Reserved2; /*0x02 */
+ U32 GenericAttributes; /*0x04 */
+ U32 OEMSpecificAttributes; /*0x08 */
+ U32 Reserved3; /*0x0C */
+ U32 Reserved4; /*0x10 */
+} MPI2_RAID_COMPATIBILITY_RESULT_STRUCT,
+ *PTR_MPI2_RAID_COMPATIBILITY_RESULT_STRUCT,
+ Mpi2RaidCompatibilityResultStruct_t,
+ *pMpi2RaidCompatibilityResultStruct_t;
+
+/*defines for RAID Compatibility Result Structure State field */
+#define MPI2_RAID_COMPAT_STATE_COMPATIBLE (0x00)
+#define MPI2_RAID_COMPAT_STATE_NOT_COMPATIBLE (0x01)
+
+/*defines for RAID Compatibility Result Structure GenericAttributes field */
+#define MPI2_RAID_COMPAT_GENATTRIB_4K_SECTOR (0x00000010)
+
+#define MPI2_RAID_COMPAT_GENATTRIB_MEDIA_MASK (0x0000000C)
+#define MPI2_RAID_COMPAT_GENATTRIB_SOLID_STATE_DRIVE (0x00000008)
+#define MPI2_RAID_COMPAT_GENATTRIB_HARD_DISK_DRIVE (0x00000004)
+
+#define MPI2_RAID_COMPAT_GENATTRIB_PROTOCOL_MASK (0x00000003)
+#define MPI2_RAID_COMPAT_GENATTRIB_SAS_PROTOCOL (0x00000002)
+#define MPI2_RAID_COMPAT_GENATTRIB_SATA_PROTOCOL (0x00000001)
+
+/*RAID Action Reply ActionData union */
+typedef union _MPI2_RAID_ACTION_REPLY_DATA {
+ U32 Word[6];
+ MPI2_RAID_VOL_INDICATOR RaidVolumeIndicator;
+ U16 VolDevHandle;
+ U8 VolumeState;
+ U8 PhysDiskNum;
+ MPI2_RAID_COMPATIBILITY_RESULT_STRUCT RaidCompatibilityResult;
+} MPI2_RAID_ACTION_REPLY_DATA, *PTR_MPI2_RAID_ACTION_REPLY_DATA,
+ Mpi2RaidActionReplyData_t, *pMpi2RaidActionReplyData_t;
+
+/*use MPI2_RAIDVOL0_SETTING_ defines from mpi2_cnfg.h for
+ *MPI2_RAID_ACTION_CHANGE_VOL_WRITE_CACHE action */
+
+/*RAID Action Reply Message */
+typedef struct _MPI2_RAID_ACTION_REPLY {
+ U8 Action; /*0x00 */
+ U8 Reserved1; /*0x01 */
+ U8 MsgLength; /*0x02 */
+ U8 Function; /*0x03 */
+ U16 VolDevHandle; /*0x04 */
+ U8 PhysDiskNum; /*0x06 */
+ U8 MsgFlags; /*0x07 */
+ U8 VP_ID; /*0x08 */
+ U8 VF_ID; /*0x09 */
+ U16 Reserved2; /*0x0A */
+ U16 Reserved3; /*0x0C */
+ U16 IOCStatus; /*0x0E */
+ U32 IOCLogInfo; /*0x10 */
+ MPI2_RAID_ACTION_REPLY_DATA ActionData; /*0x14 */
+} MPI2_RAID_ACTION_REPLY, *PTR_MPI2_RAID_ACTION_REPLY,
+ Mpi2RaidActionReply_t, *pMpi2RaidActionReply_t;
+
+#endif
diff --git a/drivers/scsi/mpt3sas/mpi/mpi2_sas.h b/drivers/scsi/mpt3sas/mpi/mpi2_sas.h
new file mode 100644
index 000000000..156e30543
--- /dev/null
+++ b/drivers/scsi/mpt3sas/mpi/mpi2_sas.h
@@ -0,0 +1,297 @@
+/*
+ * Copyright (c) 2000-2014 LSI Corporation.
+ *
+ *
+ * Name: mpi2_sas.h
+ * Title: MPI Serial Attached SCSI structures and definitions
+ * Creation Date: February 9, 2007
+ *
+ * mpi2_sas.h Version: 02.00.08
+ *
+ * NOTE: Names (typedefs, defines, etc.) beginning with an MPI25 or Mpi25
+ * prefix are for use only on MPI v2.5 products, and must not be used
+ * with MPI v2.0 products. Unless otherwise noted, names beginning with
+ * MPI2 or Mpi2 are for use with both MPI v2.0 and MPI v2.5 products.
+ *
+ * Version History
+ * ---------------
+ *
+ * Date Version Description
+ * -------- -------- ------------------------------------------------------
+ * 04-30-07 02.00.00 Corresponds to Fusion-MPT MPI Specification Rev A.
+ * 06-26-07 02.00.01 Added Clear All Persistent Operation to SAS IO Unit
+ * Control Request.
+ * 10-02-08 02.00.02 Added Set IOC Parameter Operation to SAS IO Unit Control
+ * Request.
+ * 10-28-09 02.00.03 Changed the type of SGL in MPI2_SATA_PASSTHROUGH_REQUEST
+ * to MPI2_SGE_IO_UNION since it supports chained SGLs.
+ * 05-12-10 02.00.04 Modified some comments.
+ * 08-11-10 02.00.05 Added NCQ operations to SAS IO Unit Control.
+ * 11-18-11 02.00.06 Incorporating additions for MPI v2.5.
+ * 07-10-12 02.00.07 Added MPI2_SATA_PT_SGE_UNION for use in the SATA
+ * Passthrough Request message.
+ * 08-19-13 02.00.08 Made MPI2_SAS_OP_TRANSMIT_PORT_SELECT_SIGNAL obsolete
+ * for anything newer than MPI v2.0.
+ * --------------------------------------------------------------------------
+ */
+
+#ifndef MPI2_SAS_H
+#define MPI2_SAS_H
+
+/*
+ *Values for SASStatus.
+ */
+#define MPI2_SASSTATUS_SUCCESS (0x00)
+#define MPI2_SASSTATUS_UNKNOWN_ERROR (0x01)
+#define MPI2_SASSTATUS_INVALID_FRAME (0x02)
+#define MPI2_SASSTATUS_UTC_BAD_DEST (0x03)
+#define MPI2_SASSTATUS_UTC_BREAK_RECEIVED (0x04)
+#define MPI2_SASSTATUS_UTC_CONNECT_RATE_NOT_SUPPORTED (0x05)
+#define MPI2_SASSTATUS_UTC_PORT_LAYER_REQUEST (0x06)
+#define MPI2_SASSTATUS_UTC_PROTOCOL_NOT_SUPPORTED (0x07)
+#define MPI2_SASSTATUS_UTC_STP_RESOURCES_BUSY (0x08)
+#define MPI2_SASSTATUS_UTC_WRONG_DESTINATION (0x09)
+#define MPI2_SASSTATUS_SHORT_INFORMATION_UNIT (0x0A)
+#define MPI2_SASSTATUS_LONG_INFORMATION_UNIT (0x0B)
+#define MPI2_SASSTATUS_XFER_RDY_INCORRECT_WRITE_DATA (0x0C)
+#define MPI2_SASSTATUS_XFER_RDY_REQUEST_OFFSET_ERROR (0x0D)
+#define MPI2_SASSTATUS_XFER_RDY_NOT_EXPECTED (0x0E)
+#define MPI2_SASSTATUS_DATA_INCORRECT_DATA_LENGTH (0x0F)
+#define MPI2_SASSTATUS_DATA_TOO_MUCH_READ_DATA (0x10)
+#define MPI2_SASSTATUS_DATA_OFFSET_ERROR (0x11)
+#define MPI2_SASSTATUS_SDSF_NAK_RECEIVED (0x12)
+#define MPI2_SASSTATUS_SDSF_CONNECTION_FAILED (0x13)
+#define MPI2_SASSTATUS_INITIATOR_RESPONSE_TIMEOUT (0x14)
+
+/*
+ *Values for the SAS DeviceInfo field used in SAS Device Status Change Event
+ *data and SAS Configuration pages.
+ */
+#define MPI2_SAS_DEVICE_INFO_SEP (0x00004000)
+#define MPI2_SAS_DEVICE_INFO_ATAPI_DEVICE (0x00002000)
+#define MPI2_SAS_DEVICE_INFO_LSI_DEVICE (0x00001000)
+#define MPI2_SAS_DEVICE_INFO_DIRECT_ATTACH (0x00000800)
+#define MPI2_SAS_DEVICE_INFO_SSP_TARGET (0x00000400)
+#define MPI2_SAS_DEVICE_INFO_STP_TARGET (0x00000200)
+#define MPI2_SAS_DEVICE_INFO_SMP_TARGET (0x00000100)
+#define MPI2_SAS_DEVICE_INFO_SATA_DEVICE (0x00000080)
+#define MPI2_SAS_DEVICE_INFO_SSP_INITIATOR (0x00000040)
+#define MPI2_SAS_DEVICE_INFO_STP_INITIATOR (0x00000020)
+#define MPI2_SAS_DEVICE_INFO_SMP_INITIATOR (0x00000010)
+#define MPI2_SAS_DEVICE_INFO_SATA_HOST (0x00000008)
+
+#define MPI2_SAS_DEVICE_INFO_MASK_DEVICE_TYPE (0x00000007)
+#define MPI2_SAS_DEVICE_INFO_NO_DEVICE (0x00000000)
+#define MPI2_SAS_DEVICE_INFO_END_DEVICE (0x00000001)
+#define MPI2_SAS_DEVICE_INFO_EDGE_EXPANDER (0x00000002)
+#define MPI2_SAS_DEVICE_INFO_FANOUT_EXPANDER (0x00000003)
+
+/*****************************************************************************
+*
+* SAS Messages
+*
+*****************************************************************************/
+
+/****************************************************************************
+* SMP Passthrough messages
+****************************************************************************/
+
+/*SMP Passthrough Request Message */
+typedef struct _MPI2_SMP_PASSTHROUGH_REQUEST {
+ U8 PassthroughFlags; /*0x00 */
+ U8 PhysicalPort; /*0x01 */
+ U8 ChainOffset; /*0x02 */
+ U8 Function; /*0x03 */
+ U16 RequestDataLength; /*0x04 */
+ U8 SGLFlags; /*0x06*//*MPI v2.0 only. Reserved on MPI v2.5*/
+ U8 MsgFlags; /*0x07 */
+ U8 VP_ID; /*0x08 */
+ U8 VF_ID; /*0x09 */
+ U16 Reserved1; /*0x0A */
+ U32 Reserved2; /*0x0C */
+ U64 SASAddress; /*0x10 */
+ U32 Reserved3; /*0x18 */
+ U32 Reserved4; /*0x1C */
+ MPI2_SIMPLE_SGE_UNION SGL;/*0x20 */
+} MPI2_SMP_PASSTHROUGH_REQUEST, *PTR_MPI2_SMP_PASSTHROUGH_REQUEST,
+ Mpi2SmpPassthroughRequest_t, *pMpi2SmpPassthroughRequest_t;
+
+/*values for PassthroughFlags field */
+#define MPI2_SMP_PT_REQ_PT_FLAGS_IMMEDIATE (0x80)
+
+/*MPI v2.0: use MPI2_SGLFLAGS_ defines from mpi2.h for the SGLFlags field */
+
+/*SMP Passthrough Reply Message */
+typedef struct _MPI2_SMP_PASSTHROUGH_REPLY {
+ U8 PassthroughFlags; /*0x00 */
+ U8 PhysicalPort; /*0x01 */
+ U8 MsgLength; /*0x02 */
+ U8 Function; /*0x03 */
+ U16 ResponseDataLength; /*0x04 */
+ U8 SGLFlags; /*0x06 */
+ U8 MsgFlags; /*0x07 */
+ U8 VP_ID; /*0x08 */
+ U8 VF_ID; /*0x09 */
+ U16 Reserved1; /*0x0A */
+ U8 Reserved2; /*0x0C */
+ U8 SASStatus; /*0x0D */
+ U16 IOCStatus; /*0x0E */
+ U32 IOCLogInfo; /*0x10 */
+ U32 Reserved3; /*0x14 */
+ U8 ResponseData[4]; /*0x18 */
+} MPI2_SMP_PASSTHROUGH_REPLY, *PTR_MPI2_SMP_PASSTHROUGH_REPLY,
+ Mpi2SmpPassthroughReply_t, *pMpi2SmpPassthroughReply_t;
+
+/*values for PassthroughFlags field */
+#define MPI2_SMP_PT_REPLY_PT_FLAGS_IMMEDIATE (0x80)
+
+/*values for SASStatus field are at the top of this file */
+
+/****************************************************************************
+* SATA Passthrough messages
+****************************************************************************/
+
+typedef union _MPI2_SATA_PT_SGE_UNION {
+ MPI2_SGE_SIMPLE_UNION MpiSimple; /*MPI v2.0 only */
+ MPI2_SGE_CHAIN_UNION MpiChain; /*MPI v2.0 only */
+ MPI2_IEEE_SGE_SIMPLE_UNION IeeeSimple;
+ MPI2_IEEE_SGE_CHAIN_UNION IeeeChain; /*MPI v2.0 only */
+ MPI25_IEEE_SGE_CHAIN64 IeeeChain64; /*MPI v2.5 only */
+} MPI2_SATA_PT_SGE_UNION, *PTR_MPI2_SATA_PT_SGE_UNION,
+ Mpi2SataPTSGEUnion_t, *pMpi2SataPTSGEUnion_t;
+
+/*SATA Passthrough Request Message */
+typedef struct _MPI2_SATA_PASSTHROUGH_REQUEST {
+ U16 DevHandle; /*0x00 */
+ U8 ChainOffset; /*0x02 */
+ U8 Function; /*0x03 */
+ U16 PassthroughFlags; /*0x04 */
+ U8 SGLFlags; /*0x06*//*MPI v2.0 only. Reserved on MPI v2.5*/
+ U8 MsgFlags; /*0x07 */
+ U8 VP_ID; /*0x08 */
+ U8 VF_ID; /*0x09 */
+ U16 Reserved1; /*0x0A */
+ U32 Reserved2; /*0x0C */
+ U32 Reserved3; /*0x10 */
+ U32 Reserved4; /*0x14 */
+ U32 DataLength; /*0x18 */
+ U8 CommandFIS[20]; /*0x1C */
+ MPI2_SATA_PT_SGE_UNION SGL;/*0x30*//*MPI v2.5: IEEE 64 elements only*/
+} MPI2_SATA_PASSTHROUGH_REQUEST, *PTR_MPI2_SATA_PASSTHROUGH_REQUEST,
+ Mpi2SataPassthroughRequest_t,
+ *pMpi2SataPassthroughRequest_t;
+
+/*values for PassthroughFlags field */
+#define MPI2_SATA_PT_REQ_PT_FLAGS_EXECUTE_DIAG (0x0100)
+#define MPI2_SATA_PT_REQ_PT_FLAGS_DMA (0x0020)
+#define MPI2_SATA_PT_REQ_PT_FLAGS_PIO (0x0010)
+#define MPI2_SATA_PT_REQ_PT_FLAGS_UNSPECIFIED_VU (0x0004)
+#define MPI2_SATA_PT_REQ_PT_FLAGS_WRITE (0x0002)
+#define MPI2_SATA_PT_REQ_PT_FLAGS_READ (0x0001)
+
+/*MPI v2.0: use MPI2_SGLFLAGS_ defines from mpi2.h for the SGLFlags field */
+
+/*SATA Passthrough Reply Message */
+typedef struct _MPI2_SATA_PASSTHROUGH_REPLY {
+ U16 DevHandle; /*0x00 */
+ U8 MsgLength; /*0x02 */
+ U8 Function; /*0x03 */
+ U16 PassthroughFlags; /*0x04 */
+ U8 SGLFlags; /*0x06 */
+ U8 MsgFlags; /*0x07 */
+ U8 VP_ID; /*0x08 */
+ U8 VF_ID; /*0x09 */
+ U16 Reserved1; /*0x0A */
+ U8 Reserved2; /*0x0C */
+ U8 SASStatus; /*0x0D */
+ U16 IOCStatus; /*0x0E */
+ U32 IOCLogInfo; /*0x10 */
+ U8 StatusFIS[20]; /*0x14 */
+ U32 StatusControlRegisters; /*0x28 */
+ U32 TransferCount; /*0x2C */
+} MPI2_SATA_PASSTHROUGH_REPLY, *PTR_MPI2_SATA_PASSTHROUGH_REPLY,
+ Mpi2SataPassthroughReply_t, *pMpi2SataPassthroughReply_t;
+
+/*values for SASStatus field are at the top of this file */
+
+/****************************************************************************
+* SAS IO Unit Control messages
+****************************************************************************/
+
+/*SAS IO Unit Control Request Message */
+typedef struct _MPI2_SAS_IOUNIT_CONTROL_REQUEST {
+ U8 Operation; /*0x00 */
+ U8 Reserved1; /*0x01 */
+ U8 ChainOffset; /*0x02 */
+ U8 Function; /*0x03 */
+ U16 DevHandle; /*0x04 */
+ U8 IOCParameter; /*0x06 */
+ U8 MsgFlags; /*0x07 */
+ U8 VP_ID; /*0x08 */
+ U8 VF_ID; /*0x09 */
+ U16 Reserved3; /*0x0A */
+ U16 Reserved4; /*0x0C */
+ U8 PhyNum; /*0x0E */
+ U8 PrimFlags; /*0x0F */
+ U32 Primitive; /*0x10 */
+ U8 LookupMethod; /*0x14 */
+ U8 Reserved5; /*0x15 */
+ U16 SlotNumber; /*0x16 */
+ U64 LookupAddress; /*0x18 */
+ U32 IOCParameterValue; /*0x20 */
+ U32 Reserved7; /*0x24 */
+ U32 Reserved8; /*0x28 */
+} MPI2_SAS_IOUNIT_CONTROL_REQUEST,
+ *PTR_MPI2_SAS_IOUNIT_CONTROL_REQUEST,
+ Mpi2SasIoUnitControlRequest_t,
+ *pMpi2SasIoUnitControlRequest_t;
+
+/*values for the Operation field */
+#define MPI2_SAS_OP_CLEAR_ALL_PERSISTENT (0x02)
+#define MPI2_SAS_OP_PHY_LINK_RESET (0x06)
+#define MPI2_SAS_OP_PHY_HARD_RESET (0x07)
+#define MPI2_SAS_OP_PHY_CLEAR_ERROR_LOG (0x08)
+#define MPI2_SAS_OP_SEND_PRIMITIVE (0x0A)
+#define MPI2_SAS_OP_FORCE_FULL_DISCOVERY (0x0B)
+#define MPI2_SAS_OP_TRANSMIT_PORT_SELECT_SIGNAL (0x0C) /* MPI v2.0 only */
+#define MPI2_SAS_OP_REMOVE_DEVICE (0x0D)
+#define MPI2_SAS_OP_LOOKUP_MAPPING (0x0E)
+#define MPI2_SAS_OP_SET_IOC_PARAMETER (0x0F)
+#define MPI25_SAS_OP_ENABLE_FP_DEVICE (0x10)
+#define MPI25_SAS_OP_DISABLE_FP_DEVICE (0x11)
+#define MPI25_SAS_OP_ENABLE_FP_ALL (0x12)
+#define MPI25_SAS_OP_DISABLE_FP_ALL (0x13)
+#define MPI2_SAS_OP_DEV_ENABLE_NCQ (0x14)
+#define MPI2_SAS_OP_DEV_DISABLE_NCQ (0x15)
+#define MPI2_SAS_OP_PRODUCT_SPECIFIC_MIN (0x80)
+
+/*values for the PrimFlags field */
+#define MPI2_SAS_PRIMFLAGS_SINGLE (0x08)
+#define MPI2_SAS_PRIMFLAGS_TRIPLE (0x02)
+#define MPI2_SAS_PRIMFLAGS_REDUNDANT (0x01)
+
+/*values for the LookupMethod field */
+#define MPI2_SAS_LOOKUP_METHOD_SAS_ADDRESS (0x01)
+#define MPI2_SAS_LOOKUP_METHOD_SAS_ENCLOSURE_SLOT (0x02)
+#define MPI2_SAS_LOOKUP_METHOD_SAS_DEVICE_NAME (0x03)
+
+/*SAS IO Unit Control Reply Message */
+typedef struct _MPI2_SAS_IOUNIT_CONTROL_REPLY {
+ U8 Operation; /*0x00 */
+ U8 Reserved1; /*0x01 */
+ U8 MsgLength; /*0x02 */
+ U8 Function; /*0x03 */
+ U16 DevHandle; /*0x04 */
+ U8 IOCParameter; /*0x06 */
+ U8 MsgFlags; /*0x07 */
+ U8 VP_ID; /*0x08 */
+ U8 VF_ID; /*0x09 */
+ U16 Reserved3; /*0x0A */
+ U16 Reserved4; /*0x0C */
+ U16 IOCStatus; /*0x0E */
+ U32 IOCLogInfo; /*0x10 */
+} MPI2_SAS_IOUNIT_CONTROL_REPLY,
+ *PTR_MPI2_SAS_IOUNIT_CONTROL_REPLY,
+ Mpi2SasIoUnitControlReply_t, *pMpi2SasIoUnitControlReply_t;
+
+#endif
diff --git a/drivers/scsi/mpt3sas/mpi/mpi2_tool.h b/drivers/scsi/mpt3sas/mpi/mpi2_tool.h
new file mode 100644
index 000000000..904910d8a
--- /dev/null
+++ b/drivers/scsi/mpt3sas/mpi/mpi2_tool.h
@@ -0,0 +1,480 @@
+/*
+ * Copyright (c) 2000-2014 LSI Corporation.
+ *
+ *
+ * Name: mpi2_tool.h
+ * Title: MPI diagnostic tool structures and definitions
+ * Creation Date: March 26, 2007
+ *
+ * mpi2_tool.h Version: 02.00.11
+ *
+ * Version History
+ * ---------------
+ *
+ * Date Version Description
+ * -------- -------- ------------------------------------------------------
+ * 04-30-07 02.00.00 Corresponds to Fusion-MPT MPI Specification Rev A.
+ * 12-18-07 02.00.01 Added Diagnostic Buffer Post and Diagnostic Release
+ * structures and defines.
+ * 02-29-08 02.00.02 Modified various names to make them 32-character unique.
+ * 05-06-09 02.00.03 Added ISTWI Read Write Tool and Diagnostic CLI Tool.
+ * 07-30-09 02.00.04 Added ExtendedType field to DiagnosticBufferPost request
+ * and reply messages.
+ * Added MPI2_DIAG_BUF_TYPE_EXTENDED.
+ * Incremented MPI2_DIAG_BUF_TYPE_COUNT.
+ * 05-12-10 02.00.05 Added Diagnostic Data Upload tool.
+ * 08-11-10 02.00.06 Added defines that were missing for Diagnostic Buffer
+ * Post Request.
+ * 05-25-11 02.00.07 Added Flags field and related defines to
+ * MPI2_TOOLBOX_ISTWI_READ_WRITE_REQUEST.
+ * 11-18-11 02.00.08 Incorporating additions for MPI v2.5.
+ * 07-10-12 02.00.09 Add MPI v2.5 Toolbox Diagnostic CLI Tool Request
+ * message.
+ * 07-26-12 02.00.10 Modified MPI2_TOOLBOX_DIAGNOSTIC_CLI_REQUEST so that
+ * it uses MPI Chain SGE as well as MPI Simple SGE.
+ * 08-19-13 02.00.11 Added MPI2_TOOLBOX_TEXT_DISPLAY_TOOL and related info.
+ * --------------------------------------------------------------------------
+ */
+
+#ifndef MPI2_TOOL_H
+#define MPI2_TOOL_H
+
+/*****************************************************************************
+*
+* Toolbox Messages
+*
+*****************************************************************************/
+
+/*defines for the Tools */
+#define MPI2_TOOLBOX_CLEAN_TOOL (0x00)
+#define MPI2_TOOLBOX_MEMORY_MOVE_TOOL (0x01)
+#define MPI2_TOOLBOX_DIAG_DATA_UPLOAD_TOOL (0x02)
+#define MPI2_TOOLBOX_ISTWI_READ_WRITE_TOOL (0x03)
+#define MPI2_TOOLBOX_BEACON_TOOL (0x05)
+#define MPI2_TOOLBOX_DIAGNOSTIC_CLI_TOOL (0x06)
+#define MPI2_TOOLBOX_TEXT_DISPLAY_TOOL (0x07)
+
+/****************************************************************************
+* Toolbox reply
+****************************************************************************/
+
+typedef struct _MPI2_TOOLBOX_REPLY {
+ U8 Tool; /*0x00 */
+ U8 Reserved1; /*0x01 */
+ U8 MsgLength; /*0x02 */
+ U8 Function; /*0x03 */
+ U16 Reserved2; /*0x04 */
+ U8 Reserved3; /*0x06 */
+ U8 MsgFlags; /*0x07 */
+ U8 VP_ID; /*0x08 */
+ U8 VF_ID; /*0x09 */
+ U16 Reserved4; /*0x0A */
+ U16 Reserved5; /*0x0C */
+ U16 IOCStatus; /*0x0E */
+ U32 IOCLogInfo; /*0x10 */
+} MPI2_TOOLBOX_REPLY, *PTR_MPI2_TOOLBOX_REPLY,
+ Mpi2ToolboxReply_t, *pMpi2ToolboxReply_t;
+
+/****************************************************************************
+* Toolbox Clean Tool request
+****************************************************************************/
+
+typedef struct _MPI2_TOOLBOX_CLEAN_REQUEST {
+ U8 Tool; /*0x00 */
+ U8 Reserved1; /*0x01 */
+ U8 ChainOffset; /*0x02 */
+ U8 Function; /*0x03 */
+ U16 Reserved2; /*0x04 */
+ U8 Reserved3; /*0x06 */
+ U8 MsgFlags; /*0x07 */
+ U8 VP_ID; /*0x08 */
+ U8 VF_ID; /*0x09 */
+ U16 Reserved4; /*0x0A */
+ U32 Flags; /*0x0C */
+} MPI2_TOOLBOX_CLEAN_REQUEST, *PTR_MPI2_TOOLBOX_CLEAN_REQUEST,
+ Mpi2ToolboxCleanRequest_t, *pMpi2ToolboxCleanRequest_t;
+
+/*values for the Flags field */
+#define MPI2_TOOLBOX_CLEAN_BOOT_SERVICES (0x80000000)
+#define MPI2_TOOLBOX_CLEAN_PERSIST_MANUFACT_PAGES (0x40000000)
+#define MPI2_TOOLBOX_CLEAN_OTHER_PERSIST_PAGES (0x20000000)
+#define MPI2_TOOLBOX_CLEAN_FW_CURRENT (0x10000000)
+#define MPI2_TOOLBOX_CLEAN_FW_BACKUP (0x08000000)
+#define MPI2_TOOLBOX_CLEAN_MEGARAID (0x02000000)
+#define MPI2_TOOLBOX_CLEAN_INITIALIZATION (0x01000000)
+#define MPI2_TOOLBOX_CLEAN_FLASH (0x00000004)
+#define MPI2_TOOLBOX_CLEAN_SEEPROM (0x00000002)
+#define MPI2_TOOLBOX_CLEAN_NVSRAM (0x00000001)
+
+/****************************************************************************
+* Toolbox Memory Move request
+****************************************************************************/
+
+typedef struct _MPI2_TOOLBOX_MEM_MOVE_REQUEST {
+ U8 Tool; /*0x00 */
+ U8 Reserved1; /*0x01 */
+ U8 ChainOffset; /*0x02 */
+ U8 Function; /*0x03 */
+ U16 Reserved2; /*0x04 */
+ U8 Reserved3; /*0x06 */
+ U8 MsgFlags; /*0x07 */
+ U8 VP_ID; /*0x08 */
+ U8 VF_ID; /*0x09 */
+ U16 Reserved4; /*0x0A */
+ MPI2_SGE_SIMPLE_UNION SGL; /*0x0C */
+} MPI2_TOOLBOX_MEM_MOVE_REQUEST, *PTR_MPI2_TOOLBOX_MEM_MOVE_REQUEST,
+ Mpi2ToolboxMemMoveRequest_t, *pMpi2ToolboxMemMoveRequest_t;
+
+/****************************************************************************
+* Toolbox Diagnostic Data Upload request
+****************************************************************************/
+
+typedef struct _MPI2_TOOLBOX_DIAG_DATA_UPLOAD_REQUEST {
+ U8 Tool; /*0x00 */
+ U8 Reserved1; /*0x01 */
+ U8 ChainOffset; /*0x02 */
+ U8 Function; /*0x03 */
+ U16 Reserved2; /*0x04 */
+ U8 Reserved3; /*0x06 */
+ U8 MsgFlags; /*0x07 */
+ U8 VP_ID; /*0x08 */
+ U8 VF_ID; /*0x09 */
+ U16 Reserved4; /*0x0A */
+ U8 SGLFlags; /*0x0C */
+ U8 Reserved5; /*0x0D */
+ U16 Reserved6; /*0x0E */
+ U32 Flags; /*0x10 */
+ U32 DataLength; /*0x14 */
+ MPI2_SGE_SIMPLE_UNION SGL; /*0x18 */
+} MPI2_TOOLBOX_DIAG_DATA_UPLOAD_REQUEST,
+ *PTR_MPI2_TOOLBOX_DIAG_DATA_UPLOAD_REQUEST,
+ Mpi2ToolboxDiagDataUploadRequest_t,
+ *pMpi2ToolboxDiagDataUploadRequest_t;
+
+/*use MPI2_SGLFLAGS_ defines from mpi2.h for the SGLFlags field */
+
+typedef struct _MPI2_DIAG_DATA_UPLOAD_HEADER {
+ U32 DiagDataLength; /*00h */
+ U8 FormatCode; /*04h */
+ U8 Reserved1; /*05h */
+ U16 Reserved2; /*06h */
+} MPI2_DIAG_DATA_UPLOAD_HEADER, *PTR_MPI2_DIAG_DATA_UPLOAD_HEADER,
+ Mpi2DiagDataUploadHeader_t, *pMpi2DiagDataUploadHeader_t;
+
+/****************************************************************************
+* Toolbox ISTWI Read Write Tool
+****************************************************************************/
+
+/*Toolbox ISTWI Read Write Tool request message */
+typedef struct _MPI2_TOOLBOX_ISTWI_READ_WRITE_REQUEST {
+ U8 Tool; /*0x00 */
+ U8 Reserved1; /*0x01 */
+ U8 ChainOffset; /*0x02 */
+ U8 Function; /*0x03 */
+ U16 Reserved2; /*0x04 */
+ U8 Reserved3; /*0x06 */
+ U8 MsgFlags; /*0x07 */
+ U8 VP_ID; /*0x08 */
+ U8 VF_ID; /*0x09 */
+ U16 Reserved4; /*0x0A */
+ U32 Reserved5; /*0x0C */
+ U32 Reserved6; /*0x10 */
+ U8 DevIndex; /*0x14 */
+ U8 Action; /*0x15 */
+ U8 SGLFlags; /*0x16 */
+ U8 Flags; /*0x17 */
+ U16 TxDataLength; /*0x18 */
+ U16 RxDataLength; /*0x1A */
+ U32 Reserved8; /*0x1C */
+ U32 Reserved9; /*0x20 */
+ U32 Reserved10; /*0x24 */
+ U32 Reserved11; /*0x28 */
+ U32 Reserved12; /*0x2C */
+ MPI2_SGE_SIMPLE_UNION SGL; /*0x30 */
+} MPI2_TOOLBOX_ISTWI_READ_WRITE_REQUEST,
+ *PTR_MPI2_TOOLBOX_ISTWI_READ_WRITE_REQUEST,
+ Mpi2ToolboxIstwiReadWriteRequest_t,
+ *pMpi2ToolboxIstwiReadWriteRequest_t;
+
+/*values for the Action field */
+#define MPI2_TOOL_ISTWI_ACTION_READ_DATA (0x01)
+#define MPI2_TOOL_ISTWI_ACTION_WRITE_DATA (0x02)
+#define MPI2_TOOL_ISTWI_ACTION_SEQUENCE (0x03)
+#define MPI2_TOOL_ISTWI_ACTION_RESERVE_BUS (0x10)
+#define MPI2_TOOL_ISTWI_ACTION_RELEASE_BUS (0x11)
+#define MPI2_TOOL_ISTWI_ACTION_RESET (0x12)
+
+/*use MPI2_SGLFLAGS_ defines from mpi2.h for the SGLFlags field */
+
+/*values for the Flags field */
+#define MPI2_TOOL_ISTWI_FLAG_AUTO_RESERVE_RELEASE (0x80)
+#define MPI2_TOOL_ISTWI_FLAG_PAGE_ADDR_MASK (0x07)
+
+/*Toolbox ISTWI Read Write Tool reply message */
+typedef struct _MPI2_TOOLBOX_ISTWI_REPLY {
+ U8 Tool; /*0x00 */
+ U8 Reserved1; /*0x01 */
+ U8 MsgLength; /*0x02 */
+ U8 Function; /*0x03 */
+ U16 Reserved2; /*0x04 */
+ U8 Reserved3; /*0x06 */
+ U8 MsgFlags; /*0x07 */
+ U8 VP_ID; /*0x08 */
+ U8 VF_ID; /*0x09 */
+ U16 Reserved4; /*0x0A */
+ U16 Reserved5; /*0x0C */
+ U16 IOCStatus; /*0x0E */
+ U32 IOCLogInfo; /*0x10 */
+ U8 DevIndex; /*0x14 */
+ U8 Action; /*0x15 */
+ U8 IstwiStatus; /*0x16 */
+ U8 Reserved6; /*0x17 */
+ U16 TxDataCount; /*0x18 */
+ U16 RxDataCount; /*0x1A */
+} MPI2_TOOLBOX_ISTWI_REPLY, *PTR_MPI2_TOOLBOX_ISTWI_REPLY,
+ Mpi2ToolboxIstwiReply_t, *pMpi2ToolboxIstwiReply_t;
+
+/****************************************************************************
+* Toolbox Beacon Tool request
+****************************************************************************/
+
+typedef struct _MPI2_TOOLBOX_BEACON_REQUEST {
+ U8 Tool; /*0x00 */
+ U8 Reserved1; /*0x01 */
+ U8 ChainOffset; /*0x02 */
+ U8 Function; /*0x03 */
+ U16 Reserved2; /*0x04 */
+ U8 Reserved3; /*0x06 */
+ U8 MsgFlags; /*0x07 */
+ U8 VP_ID; /*0x08 */
+ U8 VF_ID; /*0x09 */
+ U16 Reserved4; /*0x0A */
+ U8 Reserved5; /*0x0C */
+ U8 PhysicalPort; /*0x0D */
+ U8 Reserved6; /*0x0E */
+ U8 Flags; /*0x0F */
+} MPI2_TOOLBOX_BEACON_REQUEST, *PTR_MPI2_TOOLBOX_BEACON_REQUEST,
+ Mpi2ToolboxBeaconRequest_t, *pMpi2ToolboxBeaconRequest_t;
+
+/*values for the Flags field */
+#define MPI2_TOOLBOX_FLAGS_BEACONMODE_OFF (0x00)
+#define MPI2_TOOLBOX_FLAGS_BEACONMODE_ON (0x01)
+
+/****************************************************************************
+* Toolbox Diagnostic CLI Tool
+****************************************************************************/
+
+#define MPI2_TOOLBOX_DIAG_CLI_CMD_LENGTH (0x5C)
+
+/*MPI v2.0 Toolbox Diagnostic CLI Tool request message */
+typedef struct _MPI2_TOOLBOX_DIAGNOSTIC_CLI_REQUEST {
+ U8 Tool; /*0x00 */
+ U8 Reserved1; /*0x01 */
+ U8 ChainOffset; /*0x02 */
+ U8 Function; /*0x03 */
+ U16 Reserved2; /*0x04 */
+ U8 Reserved3; /*0x06 */
+ U8 MsgFlags; /*0x07 */
+ U8 VP_ID; /*0x08 */
+ U8 VF_ID; /*0x09 */
+ U16 Reserved4; /*0x0A */
+ U8 SGLFlags; /*0x0C */
+ U8 Reserved5; /*0x0D */
+ U16 Reserved6; /*0x0E */
+ U32 DataLength; /*0x10 */
+ U8 DiagnosticCliCommand[MPI2_TOOLBOX_DIAG_CLI_CMD_LENGTH];/*0x14 */
+ MPI2_MPI_SGE_IO_UNION SGL; /*0x70 */
+} MPI2_TOOLBOX_DIAGNOSTIC_CLI_REQUEST,
+ *PTR_MPI2_TOOLBOX_DIAGNOSTIC_CLI_REQUEST,
+ Mpi2ToolboxDiagnosticCliRequest_t,
+ *pMpi2ToolboxDiagnosticCliRequest_t;
+
+/*use MPI2_SGLFLAGS_ defines from mpi2.h for the SGLFlags field */
+
+/*MPI v2.5 Toolbox Diagnostic CLI Tool request message */
+typedef struct _MPI25_TOOLBOX_DIAGNOSTIC_CLI_REQUEST {
+ U8 Tool; /*0x00 */
+ U8 Reserved1; /*0x01 */
+ U8 ChainOffset; /*0x02 */
+ U8 Function; /*0x03 */
+ U16 Reserved2; /*0x04 */
+ U8 Reserved3; /*0x06 */
+ U8 MsgFlags; /*0x07 */
+ U8 VP_ID; /*0x08 */
+ U8 VF_ID; /*0x09 */
+ U16 Reserved4; /*0x0A */
+ U32 Reserved5; /*0x0C */
+ U32 DataLength; /*0x10 */
+ U8 DiagnosticCliCommand[MPI2_TOOLBOX_DIAG_CLI_CMD_LENGTH];/*0x14 */
+ MPI25_SGE_IO_UNION SGL; /* 0x70 */
+} MPI25_TOOLBOX_DIAGNOSTIC_CLI_REQUEST,
+ *PTR_MPI25_TOOLBOX_DIAGNOSTIC_CLI_REQUEST,
+ Mpi25ToolboxDiagnosticCliRequest_t,
+ *pMpi25ToolboxDiagnosticCliRequest_t;
+
+/*Toolbox Diagnostic CLI Tool reply message */
+typedef struct _MPI2_TOOLBOX_DIAGNOSTIC_CLI_REPLY {
+ U8 Tool; /*0x00 */
+ U8 Reserved1; /*0x01 */
+ U8 MsgLength; /*0x02 */
+ U8 Function; /*0x03 */
+ U16 Reserved2; /*0x04 */
+ U8 Reserved3; /*0x06 */
+ U8 MsgFlags; /*0x07 */
+ U8 VP_ID; /*0x08 */
+ U8 VF_ID; /*0x09 */
+ U16 Reserved4; /*0x0A */
+ U16 Reserved5; /*0x0C */
+ U16 IOCStatus; /*0x0E */
+ U32 IOCLogInfo; /*0x10 */
+ U32 ReturnedDataLength; /*0x14 */
+} MPI2_TOOLBOX_DIAGNOSTIC_CLI_REPLY,
+ *PTR_MPI2_TOOLBOX_DIAG_CLI_REPLY,
+ Mpi2ToolboxDiagnosticCliReply_t,
+ *pMpi2ToolboxDiagnosticCliReply_t;
+
+
+/****************************************************************************
+* Toolbox Console Text Display Tool
+****************************************************************************/
+
+/* Toolbox Console Text Display Tool request message */
+typedef struct _MPI2_TOOLBOX_TEXT_DISPLAY_REQUEST {
+ U8 Tool; /* 0x00 */
+ U8 Reserved1; /* 0x01 */
+ U8 ChainOffset; /* 0x02 */
+ U8 Function; /* 0x03 */
+ U16 Reserved2; /* 0x04 */
+ U8 Reserved3; /* 0x06 */
+ U8 MsgFlags; /* 0x07 */
+ U8 VP_ID; /* 0x08 */
+ U8 VF_ID; /* 0x09 */
+ U16 Reserved4; /* 0x0A */
+ U8 Console; /* 0x0C */
+ U8 Flags; /* 0x0D */
+ U16 Reserved6; /* 0x0E */
+ U8 TextToDisplay[4]; /* 0x10 */
+} MPI2_TOOLBOX_TEXT_DISPLAY_REQUEST,
+*PTR_MPI2_TOOLBOX_TEXT_DISPLAY_REQUEST,
+Mpi2ToolboxTextDisplayRequest_t,
+*pMpi2ToolboxTextDisplayRequest_t;
+
+/* defines for the Console field */
+#define MPI2_TOOLBOX_CONSOLE_TYPE_MASK (0xF0)
+#define MPI2_TOOLBOX_CONSOLE_TYPE_DEFAULT (0x00)
+#define MPI2_TOOLBOX_CONSOLE_TYPE_UART (0x10)
+#define MPI2_TOOLBOX_CONSOLE_TYPE_ETHERNET (0x20)
+
+#define MPI2_TOOLBOX_CONSOLE_NUMBER_MASK (0x0F)
+
+/* defines for the Flags field */
+#define MPI2_TOOLBOX_CONSOLE_FLAG_TIMESTAMP (0x01)
+
+
+
+/*****************************************************************************
+*
+* Diagnostic Buffer Messages
+*
+*****************************************************************************/
+
+/****************************************************************************
+* Diagnostic Buffer Post request
+****************************************************************************/
+
+typedef struct _MPI2_DIAG_BUFFER_POST_REQUEST {
+ U8 ExtendedType; /*0x00 */
+ U8 BufferType; /*0x01 */
+ U8 ChainOffset; /*0x02 */
+ U8 Function; /*0x03 */
+ U16 Reserved2; /*0x04 */
+ U8 Reserved3; /*0x06 */
+ U8 MsgFlags; /*0x07 */
+ U8 VP_ID; /*0x08 */
+ U8 VF_ID; /*0x09 */
+ U16 Reserved4; /*0x0A */
+ U64 BufferAddress; /*0x0C */
+ U32 BufferLength; /*0x14 */
+ U32 Reserved5; /*0x18 */
+ U32 Reserved6; /*0x1C */
+ U32 Flags; /*0x20 */
+ U32 ProductSpecific[23]; /*0x24 */
+} MPI2_DIAG_BUFFER_POST_REQUEST, *PTR_MPI2_DIAG_BUFFER_POST_REQUEST,
+ Mpi2DiagBufferPostRequest_t, *pMpi2DiagBufferPostRequest_t;
+
+/*values for the ExtendedType field */
+#define MPI2_DIAG_EXTENDED_TYPE_UTILIZATION (0x02)
+
+/*values for the BufferType field */
+#define MPI2_DIAG_BUF_TYPE_TRACE (0x00)
+#define MPI2_DIAG_BUF_TYPE_SNAPSHOT (0x01)
+#define MPI2_DIAG_BUF_TYPE_EXTENDED (0x02)
+/*count of the number of buffer types */
+#define MPI2_DIAG_BUF_TYPE_COUNT (0x03)
+
+/*values for the Flags field */
+#define MPI2_DIAG_BUF_FLAG_RELEASE_ON_FULL (0x00000002)
+#define MPI2_DIAG_BUF_FLAG_IMMEDIATE_RELEASE (0x00000001)
+
+/****************************************************************************
+* Diagnostic Buffer Post reply
+****************************************************************************/
+
+typedef struct _MPI2_DIAG_BUFFER_POST_REPLY {
+ U8 ExtendedType; /*0x00 */
+ U8 BufferType; /*0x01 */
+ U8 MsgLength; /*0x02 */
+ U8 Function; /*0x03 */
+ U16 Reserved2; /*0x04 */
+ U8 Reserved3; /*0x06 */
+ U8 MsgFlags; /*0x07 */
+ U8 VP_ID; /*0x08 */
+ U8 VF_ID; /*0x09 */
+ U16 Reserved4; /*0x0A */
+ U16 Reserved5; /*0x0C */
+ U16 IOCStatus; /*0x0E */
+ U32 IOCLogInfo; /*0x10 */
+ U32 TransferLength; /*0x14 */
+} MPI2_DIAG_BUFFER_POST_REPLY, *PTR_MPI2_DIAG_BUFFER_POST_REPLY,
+ Mpi2DiagBufferPostReply_t, *pMpi2DiagBufferPostReply_t;
+
+/****************************************************************************
+* Diagnostic Release request
+****************************************************************************/
+
+typedef struct _MPI2_DIAG_RELEASE_REQUEST {
+ U8 Reserved1; /*0x00 */
+ U8 BufferType; /*0x01 */
+ U8 ChainOffset; /*0x02 */
+ U8 Function; /*0x03 */
+ U16 Reserved2; /*0x04 */
+ U8 Reserved3; /*0x06 */
+ U8 MsgFlags; /*0x07 */
+ U8 VP_ID; /*0x08 */
+ U8 VF_ID; /*0x09 */
+ U16 Reserved4; /*0x0A */
+} MPI2_DIAG_RELEASE_REQUEST, *PTR_MPI2_DIAG_RELEASE_REQUEST,
+ Mpi2DiagReleaseRequest_t, *pMpi2DiagReleaseRequest_t;
+
+/****************************************************************************
+* Diagnostic Buffer Post reply
+****************************************************************************/
+
+typedef struct _MPI2_DIAG_RELEASE_REPLY {
+ U8 Reserved1; /*0x00 */
+ U8 BufferType; /*0x01 */
+ U8 MsgLength; /*0x02 */
+ U8 Function; /*0x03 */
+ U16 Reserved2; /*0x04 */
+ U8 Reserved3; /*0x06 */
+ U8 MsgFlags; /*0x07 */
+ U8 VP_ID; /*0x08 */
+ U8 VF_ID; /*0x09 */
+ U16 Reserved4; /*0x0A */
+ U16 Reserved5; /*0x0C */
+ U16 IOCStatus; /*0x0E */
+ U32 IOCLogInfo; /*0x10 */
+} MPI2_DIAG_RELEASE_REPLY, *PTR_MPI2_DIAG_RELEASE_REPLY,
+ Mpi2DiagReleaseReply_t, *pMpi2DiagReleaseReply_t;
+
+#endif
diff --git a/drivers/scsi/mpt3sas/mpi/mpi2_type.h b/drivers/scsi/mpt3sas/mpi/mpi2_type.h
new file mode 100644
index 000000000..99ab09360
--- /dev/null
+++ b/drivers/scsi/mpt3sas/mpi/mpi2_type.h
@@ -0,0 +1,56 @@
+/*
+ * Copyright (c) 2000-2014 LSI Corporation.
+ *
+ *
+ * Name: mpi2_type.h
+ * Title: MPI basic type definitions
+ * Creation Date: August 16, 2006
+ *
+ * mpi2_type.h Version: 02.00.00
+ *
+ * Version History
+ * ---------------
+ *
+ * Date Version Description
+ * -------- -------- ------------------------------------------------------
+ * 04-30-07 02.00.00 Corresponds to Fusion-MPT MPI Specification Rev A.
+ * --------------------------------------------------------------------------
+ */
+
+#ifndef MPI2_TYPE_H
+#define MPI2_TYPE_H
+
+/*******************************************************************************
+ * Define * if it hasn't already been defined. By default
+ * * is defined to be a near pointer. MPI2_POINTER can be defined as
+ * a far pointer by defining * as "far *" before this header file is
+ * included.
+ */
+
+/* the basic types may have already been included by mpi_type.h */
+#ifndef MPI_TYPE_H
+/*****************************************************************************
+*
+* Basic Types
+*
+*****************************************************************************/
+
+typedef u8 U8;
+typedef __le16 U16;
+typedef __le32 U32;
+typedef __le64 U64 __attribute__ ((aligned(4)));
+
+/*****************************************************************************
+*
+* Pointer Types
+*
+*****************************************************************************/
+
+typedef U8 *PU8;
+typedef U16 *PU16;
+typedef U32 *PU32;
+typedef U64 *PU64;
+
+#endif
+
+#endif
diff --git a/drivers/scsi/mpt3sas/mpt3sas_base.c b/drivers/scsi/mpt3sas/mpt3sas_base.c
new file mode 100644
index 000000000..14a781b6b
--- /dev/null
+++ b/drivers/scsi/mpt3sas/mpt3sas_base.c
@@ -0,0 +1,5033 @@
+/*
+ * This is the Fusion MPT base driver providing common API layer interface
+ * for access to MPT (Message Passing Technology) firmware.
+ *
+ * This code is based on drivers/scsi/mpt3sas/mpt3sas_base.c
+ * Copyright (C) 2012-2014 LSI Corporation
+ * Copyright (C) 2013-2014 Avago Technologies
+ * (mailto: MPT-FusionLinux.pdl@avagotech.com)
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * NO WARRANTY
+ * THE PROGRAM IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OR
+ * CONDITIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED INCLUDING, WITHOUT
+ * LIMITATION, ANY WARRANTIES OR CONDITIONS OF TITLE, NON-INFRINGEMENT,
+ * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE. Each Recipient is
+ * solely responsible for determining the appropriateness of using and
+ * distributing the Program and assumes all risks associated with its
+ * exercise of rights under this Agreement, including but not limited to
+ * the risks and costs of program errors, damage to or loss of data,
+ * programs or equipment, and unavailability or interruption of operations.
+
+ * DISCLAIMER OF LIABILITY
+ * NEITHER RECIPIENT NOR ANY CONTRIBUTORS SHALL HAVE ANY LIABILITY FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING WITHOUT LIMITATION LOST PROFITS), HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR
+ * TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
+ * USE OR DISTRIBUTION OF THE PROGRAM OR THE EXERCISE OF ANY RIGHTS GRANTED
+ * HEREUNDER, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGES
+
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301,
+ * USA.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/errno.h>
+#include <linux/init.h>
+#include <linux/slab.h>
+#include <linux/types.h>
+#include <linux/pci.h>
+#include <linux/kdev_t.h>
+#include <linux/blkdev.h>
+#include <linux/delay.h>
+#include <linux/interrupt.h>
+#include <linux/dma-mapping.h>
+#include <linux/io.h>
+#include <linux/time.h>
+#include <linux/kthread.h>
+#include <linux/aer.h>
+
+
+#include "mpt3sas_base.h"
+
+static MPT_CALLBACK mpt_callbacks[MPT_MAX_CALLBACKS];
+
+
+#define FAULT_POLLING_INTERVAL 1000 /* in milliseconds */
+
+ /* maximum controller queue depth */
+#define MAX_HBA_QUEUE_DEPTH 30000
+#define MAX_CHAIN_DEPTH 100000
+static int max_queue_depth = -1;
+module_param(max_queue_depth, int, 0);
+MODULE_PARM_DESC(max_queue_depth, " max controller queue depth ");
+
+static int max_sgl_entries = -1;
+module_param(max_sgl_entries, int, 0);
+MODULE_PARM_DESC(max_sgl_entries, " max sg entries ");
+
+static int msix_disable = -1;
+module_param(msix_disable, int, 0);
+MODULE_PARM_DESC(msix_disable, " disable msix routed interrupts (default=0)");
+
+static int max_msix_vectors = 8;
+module_param(max_msix_vectors, int, 0);
+MODULE_PARM_DESC(max_msix_vectors,
+ " max msix vectors - (default=8)");
+
+static int mpt3sas_fwfault_debug;
+MODULE_PARM_DESC(mpt3sas_fwfault_debug,
+ " enable detection of firmware fault and halt firmware - (default=0)");
+
+static int
+_base_get_ioc_facts(struct MPT3SAS_ADAPTER *ioc, int sleep_flag);
+
+/**
+ * _scsih_set_fwfault_debug - global setting of ioc->fwfault_debug.
+ *
+ */
+static int
+_scsih_set_fwfault_debug(const char *val, struct kernel_param *kp)
+{
+ int ret = param_set_int(val, kp);
+ struct MPT3SAS_ADAPTER *ioc;
+
+ if (ret)
+ return ret;
+
+ pr_info("setting fwfault_debug(%d)\n", mpt3sas_fwfault_debug);
+ list_for_each_entry(ioc, &mpt3sas_ioc_list, list)
+ ioc->fwfault_debug = mpt3sas_fwfault_debug;
+ return 0;
+}
+module_param_call(mpt3sas_fwfault_debug, _scsih_set_fwfault_debug,
+ param_get_int, &mpt3sas_fwfault_debug, 0644);
+
+/**
+ * mpt3sas_remove_dead_ioc_func - kthread context to remove dead ioc
+ * @arg: input argument, used to derive ioc
+ *
+ * Return 0 if controller is removed from pci subsystem.
+ * Return -1 for other case.
+ */
+static int mpt3sas_remove_dead_ioc_func(void *arg)
+{
+ struct MPT3SAS_ADAPTER *ioc = (struct MPT3SAS_ADAPTER *)arg;
+ struct pci_dev *pdev;
+
+ if ((ioc == NULL))
+ return -1;
+
+ pdev = ioc->pdev;
+ if ((pdev == NULL))
+ return -1;
+ pci_stop_and_remove_bus_device_locked(pdev);
+ return 0;
+}
+
+/**
+ * _base_fault_reset_work - workq handling ioc fault conditions
+ * @work: input argument, used to derive ioc
+ * Context: sleep.
+ *
+ * Return nothing.
+ */
+static void
+_base_fault_reset_work(struct work_struct *work)
+{
+ struct MPT3SAS_ADAPTER *ioc =
+ container_of(work, struct MPT3SAS_ADAPTER, fault_reset_work.work);
+ unsigned long flags;
+ u32 doorbell;
+ int rc;
+ struct task_struct *p;
+
+
+ spin_lock_irqsave(&ioc->ioc_reset_in_progress_lock, flags);
+ if (ioc->shost_recovery)
+ goto rearm_timer;
+ spin_unlock_irqrestore(&ioc->ioc_reset_in_progress_lock, flags);
+
+ doorbell = mpt3sas_base_get_iocstate(ioc, 0);
+ if ((doorbell & MPI2_IOC_STATE_MASK) == MPI2_IOC_STATE_MASK) {
+ pr_err(MPT3SAS_FMT "SAS host is non-operational !!!!\n",
+ ioc->name);
+
+ /*
+ * Call _scsih_flush_pending_cmds callback so that we flush all
+ * pending commands back to OS. This call is required to aovid
+ * deadlock at block layer. Dead IOC will fail to do diag reset,
+ * and this call is safe since dead ioc will never return any
+ * command back from HW.
+ */
+ ioc->schedule_dead_ioc_flush_running_cmds(ioc);
+ /*
+ * Set remove_host flag early since kernel thread will
+ * take some time to execute.
+ */
+ ioc->remove_host = 1;
+ /*Remove the Dead Host */
+ p = kthread_run(mpt3sas_remove_dead_ioc_func, ioc,
+ "mpt3sas_dead_ioc_%d", ioc->id);
+ if (IS_ERR(p))
+ pr_err(MPT3SAS_FMT
+ "%s: Running mpt3sas_dead_ioc thread failed !!!!\n",
+ ioc->name, __func__);
+ else
+ pr_err(MPT3SAS_FMT
+ "%s: Running mpt3sas_dead_ioc thread success !!!!\n",
+ ioc->name, __func__);
+ return; /* don't rearm timer */
+ }
+
+ if ((doorbell & MPI2_IOC_STATE_MASK) != MPI2_IOC_STATE_OPERATIONAL) {
+ rc = mpt3sas_base_hard_reset_handler(ioc, CAN_SLEEP,
+ FORCE_BIG_HAMMER);
+ pr_warn(MPT3SAS_FMT "%s: hard reset: %s\n", ioc->name,
+ __func__, (rc == 0) ? "success" : "failed");
+ doorbell = mpt3sas_base_get_iocstate(ioc, 0);
+ if ((doorbell & MPI2_IOC_STATE_MASK) == MPI2_IOC_STATE_FAULT)
+ mpt3sas_base_fault_info(ioc, doorbell &
+ MPI2_DOORBELL_DATA_MASK);
+ if (rc && (doorbell & MPI2_IOC_STATE_MASK) !=
+ MPI2_IOC_STATE_OPERATIONAL)
+ return; /* don't rearm timer */
+ }
+
+ spin_lock_irqsave(&ioc->ioc_reset_in_progress_lock, flags);
+ rearm_timer:
+ if (ioc->fault_reset_work_q)
+ queue_delayed_work(ioc->fault_reset_work_q,
+ &ioc->fault_reset_work,
+ msecs_to_jiffies(FAULT_POLLING_INTERVAL));
+ spin_unlock_irqrestore(&ioc->ioc_reset_in_progress_lock, flags);
+}
+
+/**
+ * mpt3sas_base_start_watchdog - start the fault_reset_work_q
+ * @ioc: per adapter object
+ * Context: sleep.
+ *
+ * Return nothing.
+ */
+void
+mpt3sas_base_start_watchdog(struct MPT3SAS_ADAPTER *ioc)
+{
+ unsigned long flags;
+
+ if (ioc->fault_reset_work_q)
+ return;
+
+ /* initialize fault polling */
+
+ INIT_DELAYED_WORK(&ioc->fault_reset_work, _base_fault_reset_work);
+ snprintf(ioc->fault_reset_work_q_name,
+ sizeof(ioc->fault_reset_work_q_name), "poll_%d_status", ioc->id);
+ ioc->fault_reset_work_q =
+ create_singlethread_workqueue(ioc->fault_reset_work_q_name);
+ if (!ioc->fault_reset_work_q) {
+ pr_err(MPT3SAS_FMT "%s: failed (line=%d)\n",
+ ioc->name, __func__, __LINE__);
+ return;
+ }
+ spin_lock_irqsave(&ioc->ioc_reset_in_progress_lock, flags);
+ if (ioc->fault_reset_work_q)
+ queue_delayed_work(ioc->fault_reset_work_q,
+ &ioc->fault_reset_work,
+ msecs_to_jiffies(FAULT_POLLING_INTERVAL));
+ spin_unlock_irqrestore(&ioc->ioc_reset_in_progress_lock, flags);
+}
+
+/**
+ * mpt3sas_base_stop_watchdog - stop the fault_reset_work_q
+ * @ioc: per adapter object
+ * Context: sleep.
+ *
+ * Return nothing.
+ */
+void
+mpt3sas_base_stop_watchdog(struct MPT3SAS_ADAPTER *ioc)
+{
+ unsigned long flags;
+ struct workqueue_struct *wq;
+
+ spin_lock_irqsave(&ioc->ioc_reset_in_progress_lock, flags);
+ wq = ioc->fault_reset_work_q;
+ ioc->fault_reset_work_q = NULL;
+ spin_unlock_irqrestore(&ioc->ioc_reset_in_progress_lock, flags);
+ if (wq) {
+ if (!cancel_delayed_work_sync(&ioc->fault_reset_work))
+ flush_workqueue(wq);
+ destroy_workqueue(wq);
+ }
+}
+
+/**
+ * mpt3sas_base_fault_info - verbose translation of firmware FAULT code
+ * @ioc: per adapter object
+ * @fault_code: fault code
+ *
+ * Return nothing.
+ */
+void
+mpt3sas_base_fault_info(struct MPT3SAS_ADAPTER *ioc , u16 fault_code)
+{
+ pr_err(MPT3SAS_FMT "fault_state(0x%04x)!\n",
+ ioc->name, fault_code);
+}
+
+/**
+ * mpt3sas_halt_firmware - halt's mpt controller firmware
+ * @ioc: per adapter object
+ *
+ * For debugging timeout related issues. Writing 0xCOFFEE00
+ * to the doorbell register will halt controller firmware. With
+ * the purpose to stop both driver and firmware, the enduser can
+ * obtain a ring buffer from controller UART.
+ */
+void
+mpt3sas_halt_firmware(struct MPT3SAS_ADAPTER *ioc)
+{
+ u32 doorbell;
+
+ if (!ioc->fwfault_debug)
+ return;
+
+ dump_stack();
+
+ doorbell = readl(&ioc->chip->Doorbell);
+ if ((doorbell & MPI2_IOC_STATE_MASK) == MPI2_IOC_STATE_FAULT)
+ mpt3sas_base_fault_info(ioc , doorbell);
+ else {
+ writel(0xC0FFEE00, &ioc->chip->Doorbell);
+ pr_err(MPT3SAS_FMT "Firmware is halted due to command timeout\n",
+ ioc->name);
+ }
+
+ if (ioc->fwfault_debug == 2)
+ for (;;)
+ ;
+ else
+ panic("panic in %s\n", __func__);
+}
+
+#ifdef CONFIG_SCSI_MPT3SAS_LOGGING
+/**
+ * _base_sas_ioc_info - verbose translation of the ioc status
+ * @ioc: per adapter object
+ * @mpi_reply: reply mf payload returned from firmware
+ * @request_hdr: request mf
+ *
+ * Return nothing.
+ */
+static void
+_base_sas_ioc_info(struct MPT3SAS_ADAPTER *ioc, MPI2DefaultReply_t *mpi_reply,
+ MPI2RequestHeader_t *request_hdr)
+{
+ u16 ioc_status = le16_to_cpu(mpi_reply->IOCStatus) &
+ MPI2_IOCSTATUS_MASK;
+ char *desc = NULL;
+ u16 frame_sz;
+ char *func_str = NULL;
+
+ /* SCSI_IO, RAID_PASS are handled from _scsih_scsi_ioc_info */
+ if (request_hdr->Function == MPI2_FUNCTION_SCSI_IO_REQUEST ||
+ request_hdr->Function == MPI2_FUNCTION_RAID_SCSI_IO_PASSTHROUGH ||
+ request_hdr->Function == MPI2_FUNCTION_EVENT_NOTIFICATION)
+ return;
+
+ if (ioc_status == MPI2_IOCSTATUS_CONFIG_INVALID_PAGE)
+ return;
+
+ switch (ioc_status) {
+
+/****************************************************************************
+* Common IOCStatus values for all replies
+****************************************************************************/
+
+ case MPI2_IOCSTATUS_INVALID_FUNCTION:
+ desc = "invalid function";
+ break;
+ case MPI2_IOCSTATUS_BUSY:
+ desc = "busy";
+ break;
+ case MPI2_IOCSTATUS_INVALID_SGL:
+ desc = "invalid sgl";
+ break;
+ case MPI2_IOCSTATUS_INTERNAL_ERROR:
+ desc = "internal error";
+ break;
+ case MPI2_IOCSTATUS_INVALID_VPID:
+ desc = "invalid vpid";
+ break;
+ case MPI2_IOCSTATUS_INSUFFICIENT_RESOURCES:
+ desc = "insufficient resources";
+ break;
+ case MPI2_IOCSTATUS_INVALID_FIELD:
+ desc = "invalid field";
+ break;
+ case MPI2_IOCSTATUS_INVALID_STATE:
+ desc = "invalid state";
+ break;
+ case MPI2_IOCSTATUS_OP_STATE_NOT_SUPPORTED:
+ desc = "op state not supported";
+ break;
+
+/****************************************************************************
+* Config IOCStatus values
+****************************************************************************/
+
+ case MPI2_IOCSTATUS_CONFIG_INVALID_ACTION:
+ desc = "config invalid action";
+ break;
+ case MPI2_IOCSTATUS_CONFIG_INVALID_TYPE:
+ desc = "config invalid type";
+ break;
+ case MPI2_IOCSTATUS_CONFIG_INVALID_PAGE:
+ desc = "config invalid page";
+ break;
+ case MPI2_IOCSTATUS_CONFIG_INVALID_DATA:
+ desc = "config invalid data";
+ break;
+ case MPI2_IOCSTATUS_CONFIG_NO_DEFAULTS:
+ desc = "config no defaults";
+ break;
+ case MPI2_IOCSTATUS_CONFIG_CANT_COMMIT:
+ desc = "config cant commit";
+ break;
+
+/****************************************************************************
+* SCSI IO Reply
+****************************************************************************/
+
+ case MPI2_IOCSTATUS_SCSI_RECOVERED_ERROR:
+ case MPI2_IOCSTATUS_SCSI_INVALID_DEVHANDLE:
+ case MPI2_IOCSTATUS_SCSI_DEVICE_NOT_THERE:
+ case MPI2_IOCSTATUS_SCSI_DATA_OVERRUN:
+ case MPI2_IOCSTATUS_SCSI_DATA_UNDERRUN:
+ case MPI2_IOCSTATUS_SCSI_IO_DATA_ERROR:
+ case MPI2_IOCSTATUS_SCSI_PROTOCOL_ERROR:
+ case MPI2_IOCSTATUS_SCSI_TASK_TERMINATED:
+ case MPI2_IOCSTATUS_SCSI_RESIDUAL_MISMATCH:
+ case MPI2_IOCSTATUS_SCSI_TASK_MGMT_FAILED:
+ case MPI2_IOCSTATUS_SCSI_IOC_TERMINATED:
+ case MPI2_IOCSTATUS_SCSI_EXT_TERMINATED:
+ break;
+
+/****************************************************************************
+* For use by SCSI Initiator and SCSI Target end-to-end data protection
+****************************************************************************/
+
+ case MPI2_IOCSTATUS_EEDP_GUARD_ERROR:
+ desc = "eedp guard error";
+ break;
+ case MPI2_IOCSTATUS_EEDP_REF_TAG_ERROR:
+ desc = "eedp ref tag error";
+ break;
+ case MPI2_IOCSTATUS_EEDP_APP_TAG_ERROR:
+ desc = "eedp app tag error";
+ break;
+
+/****************************************************************************
+* SCSI Target values
+****************************************************************************/
+
+ case MPI2_IOCSTATUS_TARGET_INVALID_IO_INDEX:
+ desc = "target invalid io index";
+ break;
+ case MPI2_IOCSTATUS_TARGET_ABORTED:
+ desc = "target aborted";
+ break;
+ case MPI2_IOCSTATUS_TARGET_NO_CONN_RETRYABLE:
+ desc = "target no conn retryable";
+ break;
+ case MPI2_IOCSTATUS_TARGET_NO_CONNECTION:
+ desc = "target no connection";
+ break;
+ case MPI2_IOCSTATUS_TARGET_XFER_COUNT_MISMATCH:
+ desc = "target xfer count mismatch";
+ break;
+ case MPI2_IOCSTATUS_TARGET_DATA_OFFSET_ERROR:
+ desc = "target data offset error";
+ break;
+ case MPI2_IOCSTATUS_TARGET_TOO_MUCH_WRITE_DATA:
+ desc = "target too much write data";
+ break;
+ case MPI2_IOCSTATUS_TARGET_IU_TOO_SHORT:
+ desc = "target iu too short";
+ break;
+ case MPI2_IOCSTATUS_TARGET_ACK_NAK_TIMEOUT:
+ desc = "target ack nak timeout";
+ break;
+ case MPI2_IOCSTATUS_TARGET_NAK_RECEIVED:
+ desc = "target nak received";
+ break;
+
+/****************************************************************************
+* Serial Attached SCSI values
+****************************************************************************/
+
+ case MPI2_IOCSTATUS_SAS_SMP_REQUEST_FAILED:
+ desc = "smp request failed";
+ break;
+ case MPI2_IOCSTATUS_SAS_SMP_DATA_OVERRUN:
+ desc = "smp data overrun";
+ break;
+
+/****************************************************************************
+* Diagnostic Buffer Post / Diagnostic Release values
+****************************************************************************/
+
+ case MPI2_IOCSTATUS_DIAGNOSTIC_RELEASED:
+ desc = "diagnostic released";
+ break;
+ default:
+ break;
+ }
+
+ if (!desc)
+ return;
+
+ switch (request_hdr->Function) {
+ case MPI2_FUNCTION_CONFIG:
+ frame_sz = sizeof(Mpi2ConfigRequest_t) + ioc->sge_size;
+ func_str = "config_page";
+ break;
+ case MPI2_FUNCTION_SCSI_TASK_MGMT:
+ frame_sz = sizeof(Mpi2SCSITaskManagementRequest_t);
+ func_str = "task_mgmt";
+ break;
+ case MPI2_FUNCTION_SAS_IO_UNIT_CONTROL:
+ frame_sz = sizeof(Mpi2SasIoUnitControlRequest_t);
+ func_str = "sas_iounit_ctl";
+ break;
+ case MPI2_FUNCTION_SCSI_ENCLOSURE_PROCESSOR:
+ frame_sz = sizeof(Mpi2SepRequest_t);
+ func_str = "enclosure";
+ break;
+ case MPI2_FUNCTION_IOC_INIT:
+ frame_sz = sizeof(Mpi2IOCInitRequest_t);
+ func_str = "ioc_init";
+ break;
+ case MPI2_FUNCTION_PORT_ENABLE:
+ frame_sz = sizeof(Mpi2PortEnableRequest_t);
+ func_str = "port_enable";
+ break;
+ case MPI2_FUNCTION_SMP_PASSTHROUGH:
+ frame_sz = sizeof(Mpi2SmpPassthroughRequest_t) + ioc->sge_size;
+ func_str = "smp_passthru";
+ break;
+ default:
+ frame_sz = 32;
+ func_str = "unknown";
+ break;
+ }
+
+ pr_warn(MPT3SAS_FMT "ioc_status: %s(0x%04x), request(0x%p),(%s)\n",
+ ioc->name, desc, ioc_status, request_hdr, func_str);
+
+ _debug_dump_mf(request_hdr, frame_sz/4);
+}
+
+/**
+ * _base_display_event_data - verbose translation of firmware asyn events
+ * @ioc: per adapter object
+ * @mpi_reply: reply mf payload returned from firmware
+ *
+ * Return nothing.
+ */
+static void
+_base_display_event_data(struct MPT3SAS_ADAPTER *ioc,
+ Mpi2EventNotificationReply_t *mpi_reply)
+{
+ char *desc = NULL;
+ u16 event;
+
+ if (!(ioc->logging_level & MPT_DEBUG_EVENTS))
+ return;
+
+ event = le16_to_cpu(mpi_reply->Event);
+
+ switch (event) {
+ case MPI2_EVENT_LOG_DATA:
+ desc = "Log Data";
+ break;
+ case MPI2_EVENT_STATE_CHANGE:
+ desc = "Status Change";
+ break;
+ case MPI2_EVENT_HARD_RESET_RECEIVED:
+ desc = "Hard Reset Received";
+ break;
+ case MPI2_EVENT_EVENT_CHANGE:
+ desc = "Event Change";
+ break;
+ case MPI2_EVENT_SAS_DEVICE_STATUS_CHANGE:
+ desc = "Device Status Change";
+ break;
+ case MPI2_EVENT_IR_OPERATION_STATUS:
+ desc = "IR Operation Status";
+ break;
+ case MPI2_EVENT_SAS_DISCOVERY:
+ {
+ Mpi2EventDataSasDiscovery_t *event_data =
+ (Mpi2EventDataSasDiscovery_t *)mpi_reply->EventData;
+ pr_info(MPT3SAS_FMT "Discovery: (%s)", ioc->name,
+ (event_data->ReasonCode == MPI2_EVENT_SAS_DISC_RC_STARTED) ?
+ "start" : "stop");
+ if (event_data->DiscoveryStatus)
+ pr_info("discovery_status(0x%08x)",
+ le32_to_cpu(event_data->DiscoveryStatus));
+ pr_info("\n");
+ return;
+ }
+ case MPI2_EVENT_SAS_BROADCAST_PRIMITIVE:
+ desc = "SAS Broadcast Primitive";
+ break;
+ case MPI2_EVENT_SAS_INIT_DEVICE_STATUS_CHANGE:
+ desc = "SAS Init Device Status Change";
+ break;
+ case MPI2_EVENT_SAS_INIT_TABLE_OVERFLOW:
+ desc = "SAS Init Table Overflow";
+ break;
+ case MPI2_EVENT_SAS_TOPOLOGY_CHANGE_LIST:
+ desc = "SAS Topology Change List";
+ break;
+ case MPI2_EVENT_SAS_ENCL_DEVICE_STATUS_CHANGE:
+ desc = "SAS Enclosure Device Status Change";
+ break;
+ case MPI2_EVENT_IR_VOLUME:
+ desc = "IR Volume";
+ break;
+ case MPI2_EVENT_IR_PHYSICAL_DISK:
+ desc = "IR Physical Disk";
+ break;
+ case MPI2_EVENT_IR_CONFIGURATION_CHANGE_LIST:
+ desc = "IR Configuration Change List";
+ break;
+ case MPI2_EVENT_LOG_ENTRY_ADDED:
+ desc = "Log Entry Added";
+ break;
+ case MPI2_EVENT_TEMP_THRESHOLD:
+ desc = "Temperature Threshold";
+ break;
+ }
+
+ if (!desc)
+ return;
+
+ pr_info(MPT3SAS_FMT "%s\n", ioc->name, desc);
+}
+#endif
+
+/**
+ * _base_sas_log_info - verbose translation of firmware log info
+ * @ioc: per adapter object
+ * @log_info: log info
+ *
+ * Return nothing.
+ */
+static void
+_base_sas_log_info(struct MPT3SAS_ADAPTER *ioc , u32 log_info)
+{
+ union loginfo_type {
+ u32 loginfo;
+ struct {
+ u32 subcode:16;
+ u32 code:8;
+ u32 originator:4;
+ u32 bus_type:4;
+ } dw;
+ };
+ union loginfo_type sas_loginfo;
+ char *originator_str = NULL;
+
+ sas_loginfo.loginfo = log_info;
+ if (sas_loginfo.dw.bus_type != 3 /*SAS*/)
+ return;
+
+ /* each nexus loss loginfo */
+ if (log_info == 0x31170000)
+ return;
+
+ /* eat the loginfos associated with task aborts */
+ if (ioc->ignore_loginfos && (log_info == 0x30050000 || log_info ==
+ 0x31140000 || log_info == 0x31130000))
+ return;
+
+ switch (sas_loginfo.dw.originator) {
+ case 0:
+ originator_str = "IOP";
+ break;
+ case 1:
+ originator_str = "PL";
+ break;
+ case 2:
+ originator_str = "IR";
+ break;
+ }
+
+ pr_warn(MPT3SAS_FMT
+ "log_info(0x%08x): originator(%s), code(0x%02x), sub_code(0x%04x)\n",
+ ioc->name, log_info,
+ originator_str, sas_loginfo.dw.code,
+ sas_loginfo.dw.subcode);
+}
+
+/**
+ * _base_display_reply_info -
+ * @ioc: per adapter object
+ * @smid: system request message index
+ * @msix_index: MSIX table index supplied by the OS
+ * @reply: reply message frame(lower 32bit addr)
+ *
+ * Return nothing.
+ */
+static void
+_base_display_reply_info(struct MPT3SAS_ADAPTER *ioc, u16 smid, u8 msix_index,
+ u32 reply)
+{
+ MPI2DefaultReply_t *mpi_reply;
+ u16 ioc_status;
+ u32 loginfo = 0;
+
+ mpi_reply = mpt3sas_base_get_reply_virt_addr(ioc, reply);
+ if (unlikely(!mpi_reply)) {
+ pr_err(MPT3SAS_FMT "mpi_reply not valid at %s:%d/%s()!\n",
+ ioc->name, __FILE__, __LINE__, __func__);
+ return;
+ }
+ ioc_status = le16_to_cpu(mpi_reply->IOCStatus);
+#ifdef CONFIG_SCSI_MPT3SAS_LOGGING
+ if ((ioc_status & MPI2_IOCSTATUS_MASK) &&
+ (ioc->logging_level & MPT_DEBUG_REPLY)) {
+ _base_sas_ioc_info(ioc , mpi_reply,
+ mpt3sas_base_get_msg_frame(ioc, smid));
+ }
+#endif
+ if (ioc_status & MPI2_IOCSTATUS_FLAG_LOG_INFO_AVAILABLE) {
+ loginfo = le32_to_cpu(mpi_reply->IOCLogInfo);
+ _base_sas_log_info(ioc, loginfo);
+ }
+
+ if (ioc_status || loginfo) {
+ ioc_status &= MPI2_IOCSTATUS_MASK;
+ mpt3sas_trigger_mpi(ioc, ioc_status, loginfo);
+ }
+}
+
+/**
+ * mpt3sas_base_done - base internal command completion routine
+ * @ioc: per adapter object
+ * @smid: system request message index
+ * @msix_index: MSIX table index supplied by the OS
+ * @reply: reply message frame(lower 32bit addr)
+ *
+ * Return 1 meaning mf should be freed from _base_interrupt
+ * 0 means the mf is freed from this function.
+ */
+u8
+mpt3sas_base_done(struct MPT3SAS_ADAPTER *ioc, u16 smid, u8 msix_index,
+ u32 reply)
+{
+ MPI2DefaultReply_t *mpi_reply;
+
+ mpi_reply = mpt3sas_base_get_reply_virt_addr(ioc, reply);
+ if (mpi_reply && mpi_reply->Function == MPI2_FUNCTION_EVENT_ACK)
+ return 1;
+
+ if (ioc->base_cmds.status == MPT3_CMD_NOT_USED)
+ return 1;
+
+ ioc->base_cmds.status |= MPT3_CMD_COMPLETE;
+ if (mpi_reply) {
+ ioc->base_cmds.status |= MPT3_CMD_REPLY_VALID;
+ memcpy(ioc->base_cmds.reply, mpi_reply, mpi_reply->MsgLength*4);
+ }
+ ioc->base_cmds.status &= ~MPT3_CMD_PENDING;
+
+ complete(&ioc->base_cmds.done);
+ return 1;
+}
+
+/**
+ * _base_async_event - main callback handler for firmware asyn events
+ * @ioc: per adapter object
+ * @msix_index: MSIX table index supplied by the OS
+ * @reply: reply message frame(lower 32bit addr)
+ *
+ * Return 1 meaning mf should be freed from _base_interrupt
+ * 0 means the mf is freed from this function.
+ */
+static u8
+_base_async_event(struct MPT3SAS_ADAPTER *ioc, u8 msix_index, u32 reply)
+{
+ Mpi2EventNotificationReply_t *mpi_reply;
+ Mpi2EventAckRequest_t *ack_request;
+ u16 smid;
+
+ mpi_reply = mpt3sas_base_get_reply_virt_addr(ioc, reply);
+ if (!mpi_reply)
+ return 1;
+ if (mpi_reply->Function != MPI2_FUNCTION_EVENT_NOTIFICATION)
+ return 1;
+#ifdef CONFIG_SCSI_MPT3SAS_LOGGING
+ _base_display_event_data(ioc, mpi_reply);
+#endif
+ if (!(mpi_reply->AckRequired & MPI2_EVENT_NOTIFICATION_ACK_REQUIRED))
+ goto out;
+ smid = mpt3sas_base_get_smid(ioc, ioc->base_cb_idx);
+ if (!smid) {
+ pr_err(MPT3SAS_FMT "%s: failed obtaining a smid\n",
+ ioc->name, __func__);
+ goto out;
+ }
+
+ ack_request = mpt3sas_base_get_msg_frame(ioc, smid);
+ memset(ack_request, 0, sizeof(Mpi2EventAckRequest_t));
+ ack_request->Function = MPI2_FUNCTION_EVENT_ACK;
+ ack_request->Event = mpi_reply->Event;
+ ack_request->EventContext = mpi_reply->EventContext;
+ ack_request->VF_ID = 0; /* TODO */
+ ack_request->VP_ID = 0;
+ mpt3sas_base_put_smid_default(ioc, smid);
+
+ out:
+
+ /* scsih callback handler */
+ mpt3sas_scsih_event_callback(ioc, msix_index, reply);
+
+ /* ctl callback handler */
+ mpt3sas_ctl_event_callback(ioc, msix_index, reply);
+
+ return 1;
+}
+
+/**
+ * _base_get_cb_idx - obtain the callback index
+ * @ioc: per adapter object
+ * @smid: system request message index
+ *
+ * Return callback index.
+ */
+static u8
+_base_get_cb_idx(struct MPT3SAS_ADAPTER *ioc, u16 smid)
+{
+ int i;
+ u8 cb_idx;
+
+ if (smid < ioc->hi_priority_smid) {
+ i = smid - 1;
+ cb_idx = ioc->scsi_lookup[i].cb_idx;
+ } else if (smid < ioc->internal_smid) {
+ i = smid - ioc->hi_priority_smid;
+ cb_idx = ioc->hpr_lookup[i].cb_idx;
+ } else if (smid <= ioc->hba_queue_depth) {
+ i = smid - ioc->internal_smid;
+ cb_idx = ioc->internal_lookup[i].cb_idx;
+ } else
+ cb_idx = 0xFF;
+ return cb_idx;
+}
+
+/**
+ * _base_mask_interrupts - disable interrupts
+ * @ioc: per adapter object
+ *
+ * Disabling ResetIRQ, Reply and Doorbell Interrupts
+ *
+ * Return nothing.
+ */
+static void
+_base_mask_interrupts(struct MPT3SAS_ADAPTER *ioc)
+{
+ u32 him_register;
+
+ ioc->mask_interrupts = 1;
+ him_register = readl(&ioc->chip->HostInterruptMask);
+ him_register |= MPI2_HIM_DIM + MPI2_HIM_RIM + MPI2_HIM_RESET_IRQ_MASK;
+ writel(him_register, &ioc->chip->HostInterruptMask);
+ readl(&ioc->chip->HostInterruptMask);
+}
+
+/**
+ * _base_unmask_interrupts - enable interrupts
+ * @ioc: per adapter object
+ *
+ * Enabling only Reply Interrupts
+ *
+ * Return nothing.
+ */
+static void
+_base_unmask_interrupts(struct MPT3SAS_ADAPTER *ioc)
+{
+ u32 him_register;
+
+ him_register = readl(&ioc->chip->HostInterruptMask);
+ him_register &= ~MPI2_HIM_RIM;
+ writel(him_register, &ioc->chip->HostInterruptMask);
+ ioc->mask_interrupts = 0;
+}
+
+union reply_descriptor {
+ u64 word;
+ struct {
+ u32 low;
+ u32 high;
+ } u;
+};
+
+/**
+ * _base_interrupt - MPT adapter (IOC) specific interrupt handler.
+ * @irq: irq number (not used)
+ * @bus_id: bus identifier cookie == pointer to MPT_ADAPTER structure
+ * @r: pt_regs pointer (not used)
+ *
+ * Return IRQ_HANDLE if processed, else IRQ_NONE.
+ */
+static irqreturn_t
+_base_interrupt(int irq, void *bus_id)
+{
+ struct adapter_reply_queue *reply_q = bus_id;
+ union reply_descriptor rd;
+ u32 completed_cmds;
+ u8 request_desript_type;
+ u16 smid;
+ u8 cb_idx;
+ u32 reply;
+ u8 msix_index = reply_q->msix_index;
+ struct MPT3SAS_ADAPTER *ioc = reply_q->ioc;
+ Mpi2ReplyDescriptorsUnion_t *rpf;
+ u8 rc;
+
+ if (ioc->mask_interrupts)
+ return IRQ_NONE;
+
+ if (!atomic_add_unless(&reply_q->busy, 1, 1))
+ return IRQ_NONE;
+
+ rpf = &reply_q->reply_post_free[reply_q->reply_post_host_index];
+ request_desript_type = rpf->Default.ReplyFlags
+ & MPI2_RPY_DESCRIPT_FLAGS_TYPE_MASK;
+ if (request_desript_type == MPI2_RPY_DESCRIPT_FLAGS_UNUSED) {
+ atomic_dec(&reply_q->busy);
+ return IRQ_NONE;
+ }
+
+ completed_cmds = 0;
+ cb_idx = 0xFF;
+ do {
+ rd.word = le64_to_cpu(rpf->Words);
+ if (rd.u.low == UINT_MAX || rd.u.high == UINT_MAX)
+ goto out;
+ reply = 0;
+ smid = le16_to_cpu(rpf->Default.DescriptorTypeDependent1);
+ if (request_desript_type ==
+ MPI25_RPY_DESCRIPT_FLAGS_FAST_PATH_SCSI_IO_SUCCESS ||
+ request_desript_type ==
+ MPI2_RPY_DESCRIPT_FLAGS_SCSI_IO_SUCCESS) {
+ cb_idx = _base_get_cb_idx(ioc, smid);
+ if ((likely(cb_idx < MPT_MAX_CALLBACKS)) &&
+ (likely(mpt_callbacks[cb_idx] != NULL))) {
+ rc = mpt_callbacks[cb_idx](ioc, smid,
+ msix_index, 0);
+ if (rc)
+ mpt3sas_base_free_smid(ioc, smid);
+ }
+ } else if (request_desript_type ==
+ MPI2_RPY_DESCRIPT_FLAGS_ADDRESS_REPLY) {
+ reply = le32_to_cpu(
+ rpf->AddressReply.ReplyFrameAddress);
+ if (reply > ioc->reply_dma_max_address ||
+ reply < ioc->reply_dma_min_address)
+ reply = 0;
+ if (smid) {
+ cb_idx = _base_get_cb_idx(ioc, smid);
+ if ((likely(cb_idx < MPT_MAX_CALLBACKS)) &&
+ (likely(mpt_callbacks[cb_idx] != NULL))) {
+ rc = mpt_callbacks[cb_idx](ioc, smid,
+ msix_index, reply);
+ if (reply)
+ _base_display_reply_info(ioc,
+ smid, msix_index, reply);
+ if (rc)
+ mpt3sas_base_free_smid(ioc,
+ smid);
+ }
+ } else {
+ _base_async_event(ioc, msix_index, reply);
+ }
+
+ /* reply free queue handling */
+ if (reply) {
+ ioc->reply_free_host_index =
+ (ioc->reply_free_host_index ==
+ (ioc->reply_free_queue_depth - 1)) ?
+ 0 : ioc->reply_free_host_index + 1;
+ ioc->reply_free[ioc->reply_free_host_index] =
+ cpu_to_le32(reply);
+ wmb();
+ writel(ioc->reply_free_host_index,
+ &ioc->chip->ReplyFreeHostIndex);
+ }
+ }
+
+ rpf->Words = cpu_to_le64(ULLONG_MAX);
+ reply_q->reply_post_host_index =
+ (reply_q->reply_post_host_index ==
+ (ioc->reply_post_queue_depth - 1)) ? 0 :
+ reply_q->reply_post_host_index + 1;
+ request_desript_type =
+ reply_q->reply_post_free[reply_q->reply_post_host_index].
+ Default.ReplyFlags & MPI2_RPY_DESCRIPT_FLAGS_TYPE_MASK;
+ completed_cmds++;
+ if (request_desript_type == MPI2_RPY_DESCRIPT_FLAGS_UNUSED)
+ goto out;
+ if (!reply_q->reply_post_host_index)
+ rpf = reply_q->reply_post_free;
+ else
+ rpf++;
+ } while (1);
+
+ out:
+
+ if (!completed_cmds) {
+ atomic_dec(&reply_q->busy);
+ return IRQ_NONE;
+ }
+
+ wmb();
+ writel(reply_q->reply_post_host_index | (msix_index <<
+ MPI2_RPHI_MSIX_INDEX_SHIFT), &ioc->chip->ReplyPostHostIndex);
+ atomic_dec(&reply_q->busy);
+ return IRQ_HANDLED;
+}
+
+/**
+ * _base_is_controller_msix_enabled - is controller support muli-reply queues
+ * @ioc: per adapter object
+ *
+ */
+static inline int
+_base_is_controller_msix_enabled(struct MPT3SAS_ADAPTER *ioc)
+{
+ return (ioc->facts.IOCCapabilities &
+ MPI2_IOCFACTS_CAPABILITY_MSI_X_INDEX) && ioc->msix_enable;
+}
+
+/**
+ * mpt3sas_base_flush_reply_queues - flushing the MSIX reply queues
+ * @ioc: per adapter object
+ * Context: ISR conext
+ *
+ * Called when a Task Management request has completed. We want
+ * to flush the other reply queues so all the outstanding IO has been
+ * completed back to OS before we process the TM completetion.
+ *
+ * Return nothing.
+ */
+void
+mpt3sas_base_flush_reply_queues(struct MPT3SAS_ADAPTER *ioc)
+{
+ struct adapter_reply_queue *reply_q;
+
+ /* If MSIX capability is turned off
+ * then multi-queues are not enabled
+ */
+ if (!_base_is_controller_msix_enabled(ioc))
+ return;
+
+ list_for_each_entry(reply_q, &ioc->reply_queue_list, list) {
+ if (ioc->shost_recovery)
+ return;
+ /* TMs are on msix_index == 0 */
+ if (reply_q->msix_index == 0)
+ continue;
+ _base_interrupt(reply_q->vector, (void *)reply_q);
+ }
+}
+
+/**
+ * mpt3sas_base_release_callback_handler - clear interrupt callback handler
+ * @cb_idx: callback index
+ *
+ * Return nothing.
+ */
+void
+mpt3sas_base_release_callback_handler(u8 cb_idx)
+{
+ mpt_callbacks[cb_idx] = NULL;
+}
+
+/**
+ * mpt3sas_base_register_callback_handler - obtain index for the interrupt callback handler
+ * @cb_func: callback function
+ *
+ * Returns cb_func.
+ */
+u8
+mpt3sas_base_register_callback_handler(MPT_CALLBACK cb_func)
+{
+ u8 cb_idx;
+
+ for (cb_idx = MPT_MAX_CALLBACKS-1; cb_idx; cb_idx--)
+ if (mpt_callbacks[cb_idx] == NULL)
+ break;
+
+ mpt_callbacks[cb_idx] = cb_func;
+ return cb_idx;
+}
+
+/**
+ * mpt3sas_base_initialize_callback_handler - initialize the interrupt callback handler
+ *
+ * Return nothing.
+ */
+void
+mpt3sas_base_initialize_callback_handler(void)
+{
+ u8 cb_idx;
+
+ for (cb_idx = 0; cb_idx < MPT_MAX_CALLBACKS; cb_idx++)
+ mpt3sas_base_release_callback_handler(cb_idx);
+}
+
+
+/**
+ * _base_build_zero_len_sge - build zero length sg entry
+ * @ioc: per adapter object
+ * @paddr: virtual address for SGE
+ *
+ * Create a zero length scatter gather entry to insure the IOCs hardware has
+ * something to use if the target device goes brain dead and tries
+ * to send data even when none is asked for.
+ *
+ * Return nothing.
+ */
+static void
+_base_build_zero_len_sge(struct MPT3SAS_ADAPTER *ioc, void *paddr)
+{
+ u32 flags_length = (u32)((MPI2_SGE_FLAGS_LAST_ELEMENT |
+ MPI2_SGE_FLAGS_END_OF_BUFFER | MPI2_SGE_FLAGS_END_OF_LIST |
+ MPI2_SGE_FLAGS_SIMPLE_ELEMENT) <<
+ MPI2_SGE_FLAGS_SHIFT);
+ ioc->base_add_sg_single(paddr, flags_length, -1);
+}
+
+/**
+ * _base_add_sg_single_32 - Place a simple 32 bit SGE at address pAddr.
+ * @paddr: virtual address for SGE
+ * @flags_length: SGE flags and data transfer length
+ * @dma_addr: Physical address
+ *
+ * Return nothing.
+ */
+static void
+_base_add_sg_single_32(void *paddr, u32 flags_length, dma_addr_t dma_addr)
+{
+ Mpi2SGESimple32_t *sgel = paddr;
+
+ flags_length |= (MPI2_SGE_FLAGS_32_BIT_ADDRESSING |
+ MPI2_SGE_FLAGS_SYSTEM_ADDRESS) << MPI2_SGE_FLAGS_SHIFT;
+ sgel->FlagsLength = cpu_to_le32(flags_length);
+ sgel->Address = cpu_to_le32(dma_addr);
+}
+
+
+/**
+ * _base_add_sg_single_64 - Place a simple 64 bit SGE at address pAddr.
+ * @paddr: virtual address for SGE
+ * @flags_length: SGE flags and data transfer length
+ * @dma_addr: Physical address
+ *
+ * Return nothing.
+ */
+static void
+_base_add_sg_single_64(void *paddr, u32 flags_length, dma_addr_t dma_addr)
+{
+ Mpi2SGESimple64_t *sgel = paddr;
+
+ flags_length |= (MPI2_SGE_FLAGS_64_BIT_ADDRESSING |
+ MPI2_SGE_FLAGS_SYSTEM_ADDRESS) << MPI2_SGE_FLAGS_SHIFT;
+ sgel->FlagsLength = cpu_to_le32(flags_length);
+ sgel->Address = cpu_to_le64(dma_addr);
+}
+
+/**
+ * _base_get_chain_buffer_tracker - obtain chain tracker
+ * @ioc: per adapter object
+ * @smid: smid associated to an IO request
+ *
+ * Returns chain tracker(from ioc->free_chain_list)
+ */
+static struct chain_tracker *
+_base_get_chain_buffer_tracker(struct MPT3SAS_ADAPTER *ioc, u16 smid)
+{
+ struct chain_tracker *chain_req;
+ unsigned long flags;
+
+ spin_lock_irqsave(&ioc->scsi_lookup_lock, flags);
+ if (list_empty(&ioc->free_chain_list)) {
+ spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags);
+ dfailprintk(ioc, pr_warn(MPT3SAS_FMT
+ "chain buffers not available\n", ioc->name));
+ return NULL;
+ }
+ chain_req = list_entry(ioc->free_chain_list.next,
+ struct chain_tracker, tracker_list);
+ list_del_init(&chain_req->tracker_list);
+ list_add_tail(&chain_req->tracker_list,
+ &ioc->scsi_lookup[smid - 1].chain_list);
+ spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags);
+ return chain_req;
+}
+
+
+/**
+ * _base_build_sg - build generic sg
+ * @ioc: per adapter object
+ * @psge: virtual address for SGE
+ * @data_out_dma: physical address for WRITES
+ * @data_out_sz: data xfer size for WRITES
+ * @data_in_dma: physical address for READS
+ * @data_in_sz: data xfer size for READS
+ *
+ * Return nothing.
+ */
+static void
+_base_build_sg(struct MPT3SAS_ADAPTER *ioc, void *psge,
+ dma_addr_t data_out_dma, size_t data_out_sz, dma_addr_t data_in_dma,
+ size_t data_in_sz)
+{
+ u32 sgl_flags;
+
+ if (!data_out_sz && !data_in_sz) {
+ _base_build_zero_len_sge(ioc, psge);
+ return;
+ }
+
+ if (data_out_sz && data_in_sz) {
+ /* WRITE sgel first */
+ sgl_flags = (MPI2_SGE_FLAGS_SIMPLE_ELEMENT |
+ MPI2_SGE_FLAGS_END_OF_BUFFER | MPI2_SGE_FLAGS_HOST_TO_IOC);
+ sgl_flags = sgl_flags << MPI2_SGE_FLAGS_SHIFT;
+ ioc->base_add_sg_single(psge, sgl_flags |
+ data_out_sz, data_out_dma);
+
+ /* incr sgel */
+ psge += ioc->sge_size;
+
+ /* READ sgel last */
+ sgl_flags = (MPI2_SGE_FLAGS_SIMPLE_ELEMENT |
+ MPI2_SGE_FLAGS_LAST_ELEMENT | MPI2_SGE_FLAGS_END_OF_BUFFER |
+ MPI2_SGE_FLAGS_END_OF_LIST);
+ sgl_flags = sgl_flags << MPI2_SGE_FLAGS_SHIFT;
+ ioc->base_add_sg_single(psge, sgl_flags |
+ data_in_sz, data_in_dma);
+ } else if (data_out_sz) /* WRITE */ {
+ sgl_flags = (MPI2_SGE_FLAGS_SIMPLE_ELEMENT |
+ MPI2_SGE_FLAGS_LAST_ELEMENT | MPI2_SGE_FLAGS_END_OF_BUFFER |
+ MPI2_SGE_FLAGS_END_OF_LIST | MPI2_SGE_FLAGS_HOST_TO_IOC);
+ sgl_flags = sgl_flags << MPI2_SGE_FLAGS_SHIFT;
+ ioc->base_add_sg_single(psge, sgl_flags |
+ data_out_sz, data_out_dma);
+ } else if (data_in_sz) /* READ */ {
+ sgl_flags = (MPI2_SGE_FLAGS_SIMPLE_ELEMENT |
+ MPI2_SGE_FLAGS_LAST_ELEMENT | MPI2_SGE_FLAGS_END_OF_BUFFER |
+ MPI2_SGE_FLAGS_END_OF_LIST);
+ sgl_flags = sgl_flags << MPI2_SGE_FLAGS_SHIFT;
+ ioc->base_add_sg_single(psge, sgl_flags |
+ data_in_sz, data_in_dma);
+ }
+}
+
+/* IEEE format sgls */
+
+/**
+ * _base_add_sg_single_ieee - add sg element for IEEE format
+ * @paddr: virtual address for SGE
+ * @flags: SGE flags
+ * @chain_offset: number of 128 byte elements from start of segment
+ * @length: data transfer length
+ * @dma_addr: Physical address
+ *
+ * Return nothing.
+ */
+static void
+_base_add_sg_single_ieee(void *paddr, u8 flags, u8 chain_offset, u32 length,
+ dma_addr_t dma_addr)
+{
+ Mpi25IeeeSgeChain64_t *sgel = paddr;
+
+ sgel->Flags = flags;
+ sgel->NextChainOffset = chain_offset;
+ sgel->Length = cpu_to_le32(length);
+ sgel->Address = cpu_to_le64(dma_addr);
+}
+
+/**
+ * _base_build_zero_len_sge_ieee - build zero length sg entry for IEEE format
+ * @ioc: per adapter object
+ * @paddr: virtual address for SGE
+ *
+ * Create a zero length scatter gather entry to insure the IOCs hardware has
+ * something to use if the target device goes brain dead and tries
+ * to send data even when none is asked for.
+ *
+ * Return nothing.
+ */
+static void
+_base_build_zero_len_sge_ieee(struct MPT3SAS_ADAPTER *ioc, void *paddr)
+{
+ u8 sgl_flags = (MPI2_IEEE_SGE_FLAGS_SIMPLE_ELEMENT |
+ MPI2_IEEE_SGE_FLAGS_SYSTEM_ADDR |
+ MPI25_IEEE_SGE_FLAGS_END_OF_LIST);
+ _base_add_sg_single_ieee(paddr, sgl_flags, 0, 0, -1);
+}
+
+/**
+ * _base_build_sg_scmd_ieee - main sg creation routine for IEEE format
+ * @ioc: per adapter object
+ * @scmd: scsi command
+ * @smid: system request message index
+ * Context: none.
+ *
+ * The main routine that builds scatter gather table from a given
+ * scsi request sent via the .queuecommand main handler.
+ *
+ * Returns 0 success, anything else error
+ */
+static int
+_base_build_sg_scmd_ieee(struct MPT3SAS_ADAPTER *ioc,
+ struct scsi_cmnd *scmd, u16 smid)
+{
+ Mpi2SCSIIORequest_t *mpi_request;
+ dma_addr_t chain_dma;
+ struct scatterlist *sg_scmd;
+ void *sg_local, *chain;
+ u32 chain_offset;
+ u32 chain_length;
+ int sges_left;
+ u32 sges_in_segment;
+ u8 simple_sgl_flags;
+ u8 simple_sgl_flags_last;
+ u8 chain_sgl_flags;
+ struct chain_tracker *chain_req;
+
+ mpi_request = mpt3sas_base_get_msg_frame(ioc, smid);
+
+ /* init scatter gather flags */
+ simple_sgl_flags = MPI2_IEEE_SGE_FLAGS_SIMPLE_ELEMENT |
+ MPI2_IEEE_SGE_FLAGS_SYSTEM_ADDR;
+ simple_sgl_flags_last = simple_sgl_flags |
+ MPI25_IEEE_SGE_FLAGS_END_OF_LIST;
+ chain_sgl_flags = MPI2_IEEE_SGE_FLAGS_CHAIN_ELEMENT |
+ MPI2_IEEE_SGE_FLAGS_SYSTEM_ADDR;
+
+ sg_scmd = scsi_sglist(scmd);
+ sges_left = scsi_dma_map(scmd);
+ if (!sges_left) {
+ sdev_printk(KERN_ERR, scmd->device,
+ "pci_map_sg failed: request for %d bytes!\n",
+ scsi_bufflen(scmd));
+ return -ENOMEM;
+ }
+
+ sg_local = &mpi_request->SGL;
+ sges_in_segment = (ioc->request_sz -
+ offsetof(Mpi2SCSIIORequest_t, SGL))/ioc->sge_size_ieee;
+ if (sges_left <= sges_in_segment)
+ goto fill_in_last_segment;
+
+ mpi_request->ChainOffset = (sges_in_segment - 1 /* chain element */) +
+ (offsetof(Mpi2SCSIIORequest_t, SGL)/ioc->sge_size_ieee);
+
+ /* fill in main message segment when there is a chain following */
+ while (sges_in_segment > 1) {
+ _base_add_sg_single_ieee(sg_local, simple_sgl_flags, 0,
+ sg_dma_len(sg_scmd), sg_dma_address(sg_scmd));
+ sg_scmd = sg_next(sg_scmd);
+ sg_local += ioc->sge_size_ieee;
+ sges_left--;
+ sges_in_segment--;
+ }
+
+ /* initializing the pointers */
+ chain_req = _base_get_chain_buffer_tracker(ioc, smid);
+ if (!chain_req)
+ return -1;
+ chain = chain_req->chain_buffer;
+ chain_dma = chain_req->chain_buffer_dma;
+ do {
+ sges_in_segment = (sges_left <=
+ ioc->max_sges_in_chain_message) ? sges_left :
+ ioc->max_sges_in_chain_message;
+ chain_offset = (sges_left == sges_in_segment) ?
+ 0 : sges_in_segment;
+ chain_length = sges_in_segment * ioc->sge_size_ieee;
+ if (chain_offset)
+ chain_length += ioc->sge_size_ieee;
+ _base_add_sg_single_ieee(sg_local, chain_sgl_flags,
+ chain_offset, chain_length, chain_dma);
+
+ sg_local = chain;
+ if (!chain_offset)
+ goto fill_in_last_segment;
+
+ /* fill in chain segments */
+ while (sges_in_segment) {
+ _base_add_sg_single_ieee(sg_local, simple_sgl_flags, 0,
+ sg_dma_len(sg_scmd), sg_dma_address(sg_scmd));
+ sg_scmd = sg_next(sg_scmd);
+ sg_local += ioc->sge_size_ieee;
+ sges_left--;
+ sges_in_segment--;
+ }
+
+ chain_req = _base_get_chain_buffer_tracker(ioc, smid);
+ if (!chain_req)
+ return -1;
+ chain = chain_req->chain_buffer;
+ chain_dma = chain_req->chain_buffer_dma;
+ } while (1);
+
+
+ fill_in_last_segment:
+
+ /* fill the last segment */
+ while (sges_left) {
+ if (sges_left == 1)
+ _base_add_sg_single_ieee(sg_local,
+ simple_sgl_flags_last, 0, sg_dma_len(sg_scmd),
+ sg_dma_address(sg_scmd));
+ else
+ _base_add_sg_single_ieee(sg_local, simple_sgl_flags, 0,
+ sg_dma_len(sg_scmd), sg_dma_address(sg_scmd));
+ sg_scmd = sg_next(sg_scmd);
+ sg_local += ioc->sge_size_ieee;
+ sges_left--;
+ }
+
+ return 0;
+}
+
+/**
+ * _base_build_sg_ieee - build generic sg for IEEE format
+ * @ioc: per adapter object
+ * @psge: virtual address for SGE
+ * @data_out_dma: physical address for WRITES
+ * @data_out_sz: data xfer size for WRITES
+ * @data_in_dma: physical address for READS
+ * @data_in_sz: data xfer size for READS
+ *
+ * Return nothing.
+ */
+static void
+_base_build_sg_ieee(struct MPT3SAS_ADAPTER *ioc, void *psge,
+ dma_addr_t data_out_dma, size_t data_out_sz, dma_addr_t data_in_dma,
+ size_t data_in_sz)
+{
+ u8 sgl_flags;
+
+ if (!data_out_sz && !data_in_sz) {
+ _base_build_zero_len_sge_ieee(ioc, psge);
+ return;
+ }
+
+ if (data_out_sz && data_in_sz) {
+ /* WRITE sgel first */
+ sgl_flags = MPI2_IEEE_SGE_FLAGS_SIMPLE_ELEMENT |
+ MPI2_IEEE_SGE_FLAGS_SYSTEM_ADDR;
+ _base_add_sg_single_ieee(psge, sgl_flags, 0, data_out_sz,
+ data_out_dma);
+
+ /* incr sgel */
+ psge += ioc->sge_size_ieee;
+
+ /* READ sgel last */
+ sgl_flags |= MPI25_IEEE_SGE_FLAGS_END_OF_LIST;
+ _base_add_sg_single_ieee(psge, sgl_flags, 0, data_in_sz,
+ data_in_dma);
+ } else if (data_out_sz) /* WRITE */ {
+ sgl_flags = MPI2_IEEE_SGE_FLAGS_SIMPLE_ELEMENT |
+ MPI25_IEEE_SGE_FLAGS_END_OF_LIST |
+ MPI2_IEEE_SGE_FLAGS_SYSTEM_ADDR;
+ _base_add_sg_single_ieee(psge, sgl_flags, 0, data_out_sz,
+ data_out_dma);
+ } else if (data_in_sz) /* READ */ {
+ sgl_flags = MPI2_IEEE_SGE_FLAGS_SIMPLE_ELEMENT |
+ MPI25_IEEE_SGE_FLAGS_END_OF_LIST |
+ MPI2_IEEE_SGE_FLAGS_SYSTEM_ADDR;
+ _base_add_sg_single_ieee(psge, sgl_flags, 0, data_in_sz,
+ data_in_dma);
+ }
+}
+
+#define convert_to_kb(x) ((x) << (PAGE_SHIFT - 10))
+
+/**
+ * _base_config_dma_addressing - set dma addressing
+ * @ioc: per adapter object
+ * @pdev: PCI device struct
+ *
+ * Returns 0 for success, non-zero for failure.
+ */
+static int
+_base_config_dma_addressing(struct MPT3SAS_ADAPTER *ioc, struct pci_dev *pdev)
+{
+ struct sysinfo s;
+ u64 consistent_dma_mask;
+
+ if (ioc->dma_mask)
+ consistent_dma_mask = DMA_BIT_MASK(64);
+ else
+ consistent_dma_mask = DMA_BIT_MASK(32);
+
+ if (sizeof(dma_addr_t) > 4) {
+ const uint64_t required_mask =
+ dma_get_required_mask(&pdev->dev);
+ if ((required_mask > DMA_BIT_MASK(32)) &&
+ !pci_set_dma_mask(pdev, DMA_BIT_MASK(64)) &&
+ !pci_set_consistent_dma_mask(pdev, consistent_dma_mask)) {
+ ioc->base_add_sg_single = &_base_add_sg_single_64;
+ ioc->sge_size = sizeof(Mpi2SGESimple64_t);
+ ioc->dma_mask = 64;
+ goto out;
+ }
+ }
+
+ if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(32))
+ && !pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32))) {
+ ioc->base_add_sg_single = &_base_add_sg_single_32;
+ ioc->sge_size = sizeof(Mpi2SGESimple32_t);
+ ioc->dma_mask = 32;
+ } else
+ return -ENODEV;
+
+ out:
+ si_meminfo(&s);
+ pr_info(MPT3SAS_FMT
+ "%d BIT PCI BUS DMA ADDRESSING SUPPORTED, total mem (%ld kB)\n",
+ ioc->name, ioc->dma_mask, convert_to_kb(s.totalram));
+
+ return 0;
+}
+
+static int
+_base_change_consistent_dma_mask(struct MPT3SAS_ADAPTER *ioc,
+ struct pci_dev *pdev)
+{
+ if (pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64))) {
+ if (pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32)))
+ return -ENODEV;
+ }
+ return 0;
+}
+
+/**
+ * _base_check_enable_msix - checks MSIX capabable.
+ * @ioc: per adapter object
+ *
+ * Check to see if card is capable of MSIX, and set number
+ * of available msix vectors
+ */
+static int
+_base_check_enable_msix(struct MPT3SAS_ADAPTER *ioc)
+{
+ int base;
+ u16 message_control;
+
+ base = pci_find_capability(ioc->pdev, PCI_CAP_ID_MSIX);
+ if (!base) {
+ dfailprintk(ioc, pr_info(MPT3SAS_FMT "msix not supported\n",
+ ioc->name));
+ return -EINVAL;
+ }
+
+ /* get msix vector count */
+
+ pci_read_config_word(ioc->pdev, base + 2, &message_control);
+ ioc->msix_vector_count = (message_control & 0x3FF) + 1;
+ if (ioc->msix_vector_count > 8)
+ ioc->msix_vector_count = 8;
+ dinitprintk(ioc, pr_info(MPT3SAS_FMT
+ "msix is supported, vector_count(%d)\n",
+ ioc->name, ioc->msix_vector_count));
+ return 0;
+}
+
+/**
+ * _base_free_irq - free irq
+ * @ioc: per adapter object
+ *
+ * Freeing respective reply_queue from the list.
+ */
+static void
+_base_free_irq(struct MPT3SAS_ADAPTER *ioc)
+{
+ struct adapter_reply_queue *reply_q, *next;
+
+ if (list_empty(&ioc->reply_queue_list))
+ return;
+
+ list_for_each_entry_safe(reply_q, next, &ioc->reply_queue_list, list) {
+ list_del(&reply_q->list);
+ irq_set_affinity_hint(reply_q->vector, NULL);
+ free_cpumask_var(reply_q->affinity_hint);
+ synchronize_irq(reply_q->vector);
+ free_irq(reply_q->vector, reply_q);
+ kfree(reply_q);
+ }
+}
+
+/**
+ * _base_request_irq - request irq
+ * @ioc: per adapter object
+ * @index: msix index into vector table
+ * @vector: irq vector
+ *
+ * Inserting respective reply_queue into the list.
+ */
+static int
+_base_request_irq(struct MPT3SAS_ADAPTER *ioc, u8 index, u32 vector)
+{
+ struct adapter_reply_queue *reply_q;
+ int r;
+
+ reply_q = kzalloc(sizeof(struct adapter_reply_queue), GFP_KERNEL);
+ if (!reply_q) {
+ pr_err(MPT3SAS_FMT "unable to allocate memory %d!\n",
+ ioc->name, (int)sizeof(struct adapter_reply_queue));
+ return -ENOMEM;
+ }
+ reply_q->ioc = ioc;
+ reply_q->msix_index = index;
+ reply_q->vector = vector;
+
+ if (!alloc_cpumask_var(&reply_q->affinity_hint, GFP_KERNEL))
+ return -ENOMEM;
+ cpumask_clear(reply_q->affinity_hint);
+
+ atomic_set(&reply_q->busy, 0);
+ if (ioc->msix_enable)
+ snprintf(reply_q->name, MPT_NAME_LENGTH, "%s%d-msix%d",
+ MPT3SAS_DRIVER_NAME, ioc->id, index);
+ else
+ snprintf(reply_q->name, MPT_NAME_LENGTH, "%s%d",
+ MPT3SAS_DRIVER_NAME, ioc->id);
+ r = request_irq(vector, _base_interrupt, IRQF_SHARED, reply_q->name,
+ reply_q);
+ if (r) {
+ pr_err(MPT3SAS_FMT "unable to allocate interrupt %d!\n",
+ reply_q->name, vector);
+ kfree(reply_q);
+ return -EBUSY;
+ }
+
+ INIT_LIST_HEAD(&reply_q->list);
+ list_add_tail(&reply_q->list, &ioc->reply_queue_list);
+ return 0;
+}
+
+/**
+ * _base_assign_reply_queues - assigning msix index for each cpu
+ * @ioc: per adapter object
+ *
+ * The enduser would need to set the affinity via /proc/irq/#/smp_affinity
+ *
+ * It would nice if we could call irq_set_affinity, however it is not
+ * an exported symbol
+ */
+static void
+_base_assign_reply_queues(struct MPT3SAS_ADAPTER *ioc)
+{
+ unsigned int cpu, nr_cpus, nr_msix, index = 0;
+ struct adapter_reply_queue *reply_q;
+
+ if (!_base_is_controller_msix_enabled(ioc))
+ return;
+
+ memset(ioc->cpu_msix_table, 0, ioc->cpu_msix_table_sz);
+
+ nr_cpus = num_online_cpus();
+ nr_msix = ioc->reply_queue_count = min(ioc->reply_queue_count,
+ ioc->facts.MaxMSIxVectors);
+ if (!nr_msix)
+ return;
+
+ cpu = cpumask_first(cpu_online_mask);
+
+ list_for_each_entry(reply_q, &ioc->reply_queue_list, list) {
+
+ unsigned int i, group = nr_cpus / nr_msix;
+
+ if (cpu >= nr_cpus)
+ break;
+
+ if (index < nr_cpus % nr_msix)
+ group++;
+
+ for (i = 0 ; i < group ; i++) {
+ ioc->cpu_msix_table[cpu] = index;
+ cpumask_or(reply_q->affinity_hint,
+ reply_q->affinity_hint, get_cpu_mask(cpu));
+ cpu = cpumask_next(cpu, cpu_online_mask);
+ }
+
+ if (irq_set_affinity_hint(reply_q->vector,
+ reply_q->affinity_hint))
+ dinitprintk(ioc, pr_info(MPT3SAS_FMT
+ "error setting affinity hint for irq vector %d\n",
+ ioc->name, reply_q->vector));
+ index++;
+ }
+}
+
+/**
+ * _base_disable_msix - disables msix
+ * @ioc: per adapter object
+ *
+ */
+static void
+_base_disable_msix(struct MPT3SAS_ADAPTER *ioc)
+{
+ if (!ioc->msix_enable)
+ return;
+ pci_disable_msix(ioc->pdev);
+ ioc->msix_enable = 0;
+}
+
+/**
+ * _base_enable_msix - enables msix, failback to io_apic
+ * @ioc: per adapter object
+ *
+ */
+static int
+_base_enable_msix(struct MPT3SAS_ADAPTER *ioc)
+{
+ struct msix_entry *entries, *a;
+ int r;
+ int i;
+ u8 try_msix = 0;
+
+ if (msix_disable == -1 || msix_disable == 0)
+ try_msix = 1;
+
+ if (!try_msix)
+ goto try_ioapic;
+
+ if (_base_check_enable_msix(ioc) != 0)
+ goto try_ioapic;
+
+ ioc->reply_queue_count = min_t(int, ioc->cpu_count,
+ ioc->msix_vector_count);
+
+ printk(MPT3SAS_FMT "MSI-X vectors supported: %d, no of cores"
+ ": %d, max_msix_vectors: %d\n", ioc->name, ioc->msix_vector_count,
+ ioc->cpu_count, max_msix_vectors);
+
+ if (!ioc->rdpq_array_enable && max_msix_vectors == -1)
+ max_msix_vectors = 8;
+
+ if (max_msix_vectors > 0) {
+ ioc->reply_queue_count = min_t(int, max_msix_vectors,
+ ioc->reply_queue_count);
+ ioc->msix_vector_count = ioc->reply_queue_count;
+ } else if (max_msix_vectors == 0)
+ goto try_ioapic;
+
+ entries = kcalloc(ioc->reply_queue_count, sizeof(struct msix_entry),
+ GFP_KERNEL);
+ if (!entries) {
+ dfailprintk(ioc, pr_info(MPT3SAS_FMT
+ "kcalloc failed @ at %s:%d/%s() !!!\n",
+ ioc->name, __FILE__, __LINE__, __func__));
+ goto try_ioapic;
+ }
+
+ for (i = 0, a = entries; i < ioc->reply_queue_count; i++, a++)
+ a->entry = i;
+
+ r = pci_enable_msix_exact(ioc->pdev, entries, ioc->reply_queue_count);
+ if (r) {
+ dfailprintk(ioc, pr_info(MPT3SAS_FMT
+ "pci_enable_msix_exact failed (r=%d) !!!\n",
+ ioc->name, r));
+ kfree(entries);
+ goto try_ioapic;
+ }
+
+ ioc->msix_enable = 1;
+ for (i = 0, a = entries; i < ioc->reply_queue_count; i++, a++) {
+ r = _base_request_irq(ioc, i, a->vector);
+ if (r) {
+ _base_free_irq(ioc);
+ _base_disable_msix(ioc);
+ kfree(entries);
+ goto try_ioapic;
+ }
+ }
+
+ kfree(entries);
+ return 0;
+
+/* failback to io_apic interrupt routing */
+ try_ioapic:
+
+ ioc->reply_queue_count = 1;
+ r = _base_request_irq(ioc, 0, ioc->pdev->irq);
+
+ return r;
+}
+
+/**
+ * mpt3sas_base_map_resources - map in controller resources (io/irq/memap)
+ * @ioc: per adapter object
+ *
+ * Returns 0 for success, non-zero for failure.
+ */
+int
+mpt3sas_base_map_resources(struct MPT3SAS_ADAPTER *ioc)
+{
+ struct pci_dev *pdev = ioc->pdev;
+ u32 memap_sz;
+ u32 pio_sz;
+ int i, r = 0;
+ u64 pio_chip = 0;
+ u64 chip_phys = 0;
+ struct adapter_reply_queue *reply_q;
+
+ dinitprintk(ioc, pr_info(MPT3SAS_FMT "%s\n",
+ ioc->name, __func__));
+
+ ioc->bars = pci_select_bars(pdev, IORESOURCE_MEM);
+ if (pci_enable_device_mem(pdev)) {
+ pr_warn(MPT3SAS_FMT "pci_enable_device_mem: failed\n",
+ ioc->name);
+ ioc->bars = 0;
+ return -ENODEV;
+ }
+
+
+ if (pci_request_selected_regions(pdev, ioc->bars,
+ MPT3SAS_DRIVER_NAME)) {
+ pr_warn(MPT3SAS_FMT "pci_request_selected_regions: failed\n",
+ ioc->name);
+ ioc->bars = 0;
+ r = -ENODEV;
+ goto out_fail;
+ }
+
+/* AER (Advanced Error Reporting) hooks */
+ pci_enable_pcie_error_reporting(pdev);
+
+ pci_set_master(pdev);
+
+
+ if (_base_config_dma_addressing(ioc, pdev) != 0) {
+ pr_warn(MPT3SAS_FMT "no suitable DMA mask for %s\n",
+ ioc->name, pci_name(pdev));
+ r = -ENODEV;
+ goto out_fail;
+ }
+
+ for (i = 0, memap_sz = 0, pio_sz = 0 ; i < DEVICE_COUNT_RESOURCE; i++) {
+ if (pci_resource_flags(pdev, i) & IORESOURCE_IO) {
+ if (pio_sz)
+ continue;
+ pio_chip = (u64)pci_resource_start(pdev, i);
+ pio_sz = pci_resource_len(pdev, i);
+ } else if (pci_resource_flags(pdev, i) & IORESOURCE_MEM) {
+ if (memap_sz)
+ continue;
+ ioc->chip_phys = pci_resource_start(pdev, i);
+ chip_phys = (u64)ioc->chip_phys;
+ memap_sz = pci_resource_len(pdev, i);
+ ioc->chip = ioremap(ioc->chip_phys, memap_sz);
+ if (ioc->chip == NULL) {
+ pr_err(MPT3SAS_FMT "unable to map adapter memory!\n",
+ ioc->name);
+ r = -EINVAL;
+ goto out_fail;
+ }
+ }
+ }
+
+ _base_mask_interrupts(ioc);
+
+ r = _base_get_ioc_facts(ioc, CAN_SLEEP);
+ if (r)
+ goto out_fail;
+
+ if (!ioc->rdpq_array_enable_assigned) {
+ ioc->rdpq_array_enable = ioc->rdpq_array_capable;
+ ioc->rdpq_array_enable_assigned = 1;
+ }
+
+ r = _base_enable_msix(ioc);
+ if (r)
+ goto out_fail;
+
+ list_for_each_entry(reply_q, &ioc->reply_queue_list, list)
+ pr_info(MPT3SAS_FMT "%s: IRQ %d\n",
+ reply_q->name, ((ioc->msix_enable) ? "PCI-MSI-X enabled" :
+ "IO-APIC enabled"), reply_q->vector);
+
+ pr_info(MPT3SAS_FMT "iomem(0x%016llx), mapped(0x%p), size(%d)\n",
+ ioc->name, (unsigned long long)chip_phys, ioc->chip, memap_sz);
+ pr_info(MPT3SAS_FMT "ioport(0x%016llx), size(%d)\n",
+ ioc->name, (unsigned long long)pio_chip, pio_sz);
+
+ /* Save PCI configuration state for recovery from PCI AER/EEH errors */
+ pci_save_state(pdev);
+ return 0;
+
+ out_fail:
+ if (ioc->chip_phys)
+ iounmap(ioc->chip);
+ ioc->chip_phys = 0;
+ pci_release_selected_regions(ioc->pdev, ioc->bars);
+ pci_disable_pcie_error_reporting(pdev);
+ pci_disable_device(pdev);
+ return r;
+}
+
+/**
+ * mpt3sas_base_get_msg_frame - obtain request mf pointer
+ * @ioc: per adapter object
+ * @smid: system request message index(smid zero is invalid)
+ *
+ * Returns virt pointer to message frame.
+ */
+void *
+mpt3sas_base_get_msg_frame(struct MPT3SAS_ADAPTER *ioc, u16 smid)
+{
+ return (void *)(ioc->request + (smid * ioc->request_sz));
+}
+
+/**
+ * mpt3sas_base_get_sense_buffer - obtain a sense buffer virt addr
+ * @ioc: per adapter object
+ * @smid: system request message index
+ *
+ * Returns virt pointer to sense buffer.
+ */
+void *
+mpt3sas_base_get_sense_buffer(struct MPT3SAS_ADAPTER *ioc, u16 smid)
+{
+ return (void *)(ioc->sense + ((smid - 1) * SCSI_SENSE_BUFFERSIZE));
+}
+
+/**
+ * mpt3sas_base_get_sense_buffer_dma - obtain a sense buffer dma addr
+ * @ioc: per adapter object
+ * @smid: system request message index
+ *
+ * Returns phys pointer to the low 32bit address of the sense buffer.
+ */
+__le32
+mpt3sas_base_get_sense_buffer_dma(struct MPT3SAS_ADAPTER *ioc, u16 smid)
+{
+ return cpu_to_le32(ioc->sense_dma + ((smid - 1) *
+ SCSI_SENSE_BUFFERSIZE));
+}
+
+/**
+ * mpt3sas_base_get_reply_virt_addr - obtain reply frames virt address
+ * @ioc: per adapter object
+ * @phys_addr: lower 32 physical addr of the reply
+ *
+ * Converts 32bit lower physical addr into a virt address.
+ */
+void *
+mpt3sas_base_get_reply_virt_addr(struct MPT3SAS_ADAPTER *ioc, u32 phys_addr)
+{
+ if (!phys_addr)
+ return NULL;
+ return ioc->reply + (phys_addr - (u32)ioc->reply_dma);
+}
+
+/**
+ * mpt3sas_base_get_smid - obtain a free smid from internal queue
+ * @ioc: per adapter object
+ * @cb_idx: callback index
+ *
+ * Returns smid (zero is invalid)
+ */
+u16
+mpt3sas_base_get_smid(struct MPT3SAS_ADAPTER *ioc, u8 cb_idx)
+{
+ unsigned long flags;
+ struct request_tracker *request;
+ u16 smid;
+
+ spin_lock_irqsave(&ioc->scsi_lookup_lock, flags);
+ if (list_empty(&ioc->internal_free_list)) {
+ spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags);
+ pr_err(MPT3SAS_FMT "%s: smid not available\n",
+ ioc->name, __func__);
+ return 0;
+ }
+
+ request = list_entry(ioc->internal_free_list.next,
+ struct request_tracker, tracker_list);
+ request->cb_idx = cb_idx;
+ smid = request->smid;
+ list_del(&request->tracker_list);
+ spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags);
+ return smid;
+}
+
+/**
+ * mpt3sas_base_get_smid_scsiio - obtain a free smid from scsiio queue
+ * @ioc: per adapter object
+ * @cb_idx: callback index
+ * @scmd: pointer to scsi command object
+ *
+ * Returns smid (zero is invalid)
+ */
+u16
+mpt3sas_base_get_smid_scsiio(struct MPT3SAS_ADAPTER *ioc, u8 cb_idx,
+ struct scsi_cmnd *scmd)
+{
+ unsigned long flags;
+ struct scsiio_tracker *request;
+ u16 smid;
+
+ spin_lock_irqsave(&ioc->scsi_lookup_lock, flags);
+ if (list_empty(&ioc->free_list)) {
+ spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags);
+ pr_err(MPT3SAS_FMT "%s: smid not available\n",
+ ioc->name, __func__);
+ return 0;
+ }
+
+ request = list_entry(ioc->free_list.next,
+ struct scsiio_tracker, tracker_list);
+ request->scmd = scmd;
+ request->cb_idx = cb_idx;
+ smid = request->smid;
+ list_del(&request->tracker_list);
+ spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags);
+ return smid;
+}
+
+/**
+ * mpt3sas_base_get_smid_hpr - obtain a free smid from hi-priority queue
+ * @ioc: per adapter object
+ * @cb_idx: callback index
+ *
+ * Returns smid (zero is invalid)
+ */
+u16
+mpt3sas_base_get_smid_hpr(struct MPT3SAS_ADAPTER *ioc, u8 cb_idx)
+{
+ unsigned long flags;
+ struct request_tracker *request;
+ u16 smid;
+
+ spin_lock_irqsave(&ioc->scsi_lookup_lock, flags);
+ if (list_empty(&ioc->hpr_free_list)) {
+ spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags);
+ return 0;
+ }
+
+ request = list_entry(ioc->hpr_free_list.next,
+ struct request_tracker, tracker_list);
+ request->cb_idx = cb_idx;
+ smid = request->smid;
+ list_del(&request->tracker_list);
+ spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags);
+ return smid;
+}
+
+/**
+ * mpt3sas_base_free_smid - put smid back on free_list
+ * @ioc: per adapter object
+ * @smid: system request message index
+ *
+ * Return nothing.
+ */
+void
+mpt3sas_base_free_smid(struct MPT3SAS_ADAPTER *ioc, u16 smid)
+{
+ unsigned long flags;
+ int i;
+ struct chain_tracker *chain_req, *next;
+
+ spin_lock_irqsave(&ioc->scsi_lookup_lock, flags);
+ if (smid < ioc->hi_priority_smid) {
+ /* scsiio queue */
+ i = smid - 1;
+ if (!list_empty(&ioc->scsi_lookup[i].chain_list)) {
+ list_for_each_entry_safe(chain_req, next,
+ &ioc->scsi_lookup[i].chain_list, tracker_list) {
+ list_del_init(&chain_req->tracker_list);
+ list_add(&chain_req->tracker_list,
+ &ioc->free_chain_list);
+ }
+ }
+ ioc->scsi_lookup[i].cb_idx = 0xFF;
+ ioc->scsi_lookup[i].scmd = NULL;
+ list_add(&ioc->scsi_lookup[i].tracker_list, &ioc->free_list);
+ spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags);
+
+ /*
+ * See _wait_for_commands_to_complete() call with regards
+ * to this code.
+ */
+ if (ioc->shost_recovery && ioc->pending_io_count) {
+ if (ioc->pending_io_count == 1)
+ wake_up(&ioc->reset_wq);
+ ioc->pending_io_count--;
+ }
+ return;
+ } else if (smid < ioc->internal_smid) {
+ /* hi-priority */
+ i = smid - ioc->hi_priority_smid;
+ ioc->hpr_lookup[i].cb_idx = 0xFF;
+ list_add(&ioc->hpr_lookup[i].tracker_list, &ioc->hpr_free_list);
+ } else if (smid <= ioc->hba_queue_depth) {
+ /* internal queue */
+ i = smid - ioc->internal_smid;
+ ioc->internal_lookup[i].cb_idx = 0xFF;
+ list_add(&ioc->internal_lookup[i].tracker_list,
+ &ioc->internal_free_list);
+ }
+ spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags);
+}
+
+/**
+ * _base_writeq - 64 bit write to MMIO
+ * @ioc: per adapter object
+ * @b: data payload
+ * @addr: address in MMIO space
+ * @writeq_lock: spin lock
+ *
+ * Glue for handling an atomic 64 bit word to MMIO. This special handling takes
+ * care of 32 bit environment where its not quarenteed to send the entire word
+ * in one transfer.
+ */
+#if defined(writeq) && defined(CONFIG_64BIT)
+static inline void
+_base_writeq(__u64 b, volatile void __iomem *addr, spinlock_t *writeq_lock)
+{
+ writeq(cpu_to_le64(b), addr);
+}
+#else
+static inline void
+_base_writeq(__u64 b, volatile void __iomem *addr, spinlock_t *writeq_lock)
+{
+ unsigned long flags;
+ __u64 data_out = cpu_to_le64(b);
+
+ spin_lock_irqsave(writeq_lock, flags);
+ writel((u32)(data_out), addr);
+ writel((u32)(data_out >> 32), (addr + 4));
+ spin_unlock_irqrestore(writeq_lock, flags);
+}
+#endif
+
+static inline u8
+_base_get_msix_index(struct MPT3SAS_ADAPTER *ioc)
+{
+ return ioc->cpu_msix_table[raw_smp_processor_id()];
+}
+
+/**
+ * mpt3sas_base_put_smid_scsi_io - send SCSI_IO request to firmware
+ * @ioc: per adapter object
+ * @smid: system request message index
+ * @handle: device handle
+ *
+ * Return nothing.
+ */
+void
+mpt3sas_base_put_smid_scsi_io(struct MPT3SAS_ADAPTER *ioc, u16 smid, u16 handle)
+{
+ Mpi2RequestDescriptorUnion_t descriptor;
+ u64 *request = (u64 *)&descriptor;
+
+
+ descriptor.SCSIIO.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_SCSI_IO;
+ descriptor.SCSIIO.MSIxIndex = _base_get_msix_index(ioc);
+ descriptor.SCSIIO.SMID = cpu_to_le16(smid);
+ descriptor.SCSIIO.DevHandle = cpu_to_le16(handle);
+ descriptor.SCSIIO.LMID = 0;
+ _base_writeq(*request, &ioc->chip->RequestDescriptorPostLow,
+ &ioc->scsi_lookup_lock);
+}
+
+/**
+ * mpt3sas_base_put_smid_fast_path - send fast path request to firmware
+ * @ioc: per adapter object
+ * @smid: system request message index
+ * @handle: device handle
+ *
+ * Return nothing.
+ */
+void
+mpt3sas_base_put_smid_fast_path(struct MPT3SAS_ADAPTER *ioc, u16 smid,
+ u16 handle)
+{
+ Mpi2RequestDescriptorUnion_t descriptor;
+ u64 *request = (u64 *)&descriptor;
+
+ descriptor.SCSIIO.RequestFlags =
+ MPI25_REQ_DESCRIPT_FLAGS_FAST_PATH_SCSI_IO;
+ descriptor.SCSIIO.MSIxIndex = _base_get_msix_index(ioc);
+ descriptor.SCSIIO.SMID = cpu_to_le16(smid);
+ descriptor.SCSIIO.DevHandle = cpu_to_le16(handle);
+ descriptor.SCSIIO.LMID = 0;
+ _base_writeq(*request, &ioc->chip->RequestDescriptorPostLow,
+ &ioc->scsi_lookup_lock);
+}
+
+/**
+ * mpt3sas_base_put_smid_hi_priority - send Task Managment request to firmware
+ * @ioc: per adapter object
+ * @smid: system request message index
+ *
+ * Return nothing.
+ */
+void
+mpt3sas_base_put_smid_hi_priority(struct MPT3SAS_ADAPTER *ioc, u16 smid)
+{
+ Mpi2RequestDescriptorUnion_t descriptor;
+ u64 *request = (u64 *)&descriptor;
+
+ descriptor.HighPriority.RequestFlags =
+ MPI2_REQ_DESCRIPT_FLAGS_HIGH_PRIORITY;
+ descriptor.HighPriority.MSIxIndex = 0;
+ descriptor.HighPriority.SMID = cpu_to_le16(smid);
+ descriptor.HighPriority.LMID = 0;
+ descriptor.HighPriority.Reserved1 = 0;
+ _base_writeq(*request, &ioc->chip->RequestDescriptorPostLow,
+ &ioc->scsi_lookup_lock);
+}
+
+/**
+ * mpt3sas_base_put_smid_default - Default, primarily used for config pages
+ * @ioc: per adapter object
+ * @smid: system request message index
+ *
+ * Return nothing.
+ */
+void
+mpt3sas_base_put_smid_default(struct MPT3SAS_ADAPTER *ioc, u16 smid)
+{
+ Mpi2RequestDescriptorUnion_t descriptor;
+ u64 *request = (u64 *)&descriptor;
+
+ descriptor.Default.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_DEFAULT_TYPE;
+ descriptor.Default.MSIxIndex = _base_get_msix_index(ioc);
+ descriptor.Default.SMID = cpu_to_le16(smid);
+ descriptor.Default.LMID = 0;
+ descriptor.Default.DescriptorTypeDependent = 0;
+ _base_writeq(*request, &ioc->chip->RequestDescriptorPostLow,
+ &ioc->scsi_lookup_lock);
+}
+
+/**
+ * _base_display_intel_branding - Display branding string
+ * @ioc: per adapter object
+ *
+ * Return nothing.
+ */
+static void
+_base_display_intel_branding(struct MPT3SAS_ADAPTER *ioc)
+{
+ if (ioc->pdev->subsystem_vendor != PCI_VENDOR_ID_INTEL)
+ return;
+
+ switch (ioc->pdev->device) {
+ case MPI25_MFGPAGE_DEVID_SAS3008:
+ switch (ioc->pdev->subsystem_device) {
+ case MPT3SAS_INTEL_RMS3JC080_SSDID:
+ pr_info(MPT3SAS_FMT "%s\n", ioc->name,
+ MPT3SAS_INTEL_RMS3JC080_BRANDING);
+ break;
+
+ case MPT3SAS_INTEL_RS3GC008_SSDID:
+ pr_info(MPT3SAS_FMT "%s\n", ioc->name,
+ MPT3SAS_INTEL_RS3GC008_BRANDING);
+ break;
+ case MPT3SAS_INTEL_RS3FC044_SSDID:
+ pr_info(MPT3SAS_FMT "%s\n", ioc->name,
+ MPT3SAS_INTEL_RS3FC044_BRANDING);
+ break;
+ case MPT3SAS_INTEL_RS3UC080_SSDID:
+ pr_info(MPT3SAS_FMT "%s\n", ioc->name,
+ MPT3SAS_INTEL_RS3UC080_BRANDING);
+ break;
+ default:
+ pr_info(MPT3SAS_FMT
+ "Intel(R) Controller: Subsystem ID: 0x%X\n",
+ ioc->name, ioc->pdev->subsystem_device);
+ break;
+ }
+ break;
+ default:
+ pr_info(MPT3SAS_FMT
+ "Intel(R) Controller: Subsystem ID: 0x%X\n",
+ ioc->name, ioc->pdev->subsystem_device);
+ break;
+ }
+}
+
+
+
+/**
+ * _base_display_ioc_capabilities - Disply IOC's capabilities.
+ * @ioc: per adapter object
+ *
+ * Return nothing.
+ */
+static void
+_base_display_ioc_capabilities(struct MPT3SAS_ADAPTER *ioc)
+{
+ int i = 0;
+ char desc[16];
+ u32 iounit_pg1_flags;
+ u32 bios_version;
+
+ bios_version = le32_to_cpu(ioc->bios_pg3.BiosVersion);
+ strncpy(desc, ioc->manu_pg0.ChipName, 16);
+ pr_info(MPT3SAS_FMT "%s: FWVersion(%02d.%02d.%02d.%02d), "\
+ "ChipRevision(0x%02x), BiosVersion(%02d.%02d.%02d.%02d)\n",
+ ioc->name, desc,
+ (ioc->facts.FWVersion.Word & 0xFF000000) >> 24,
+ (ioc->facts.FWVersion.Word & 0x00FF0000) >> 16,
+ (ioc->facts.FWVersion.Word & 0x0000FF00) >> 8,
+ ioc->facts.FWVersion.Word & 0x000000FF,
+ ioc->pdev->revision,
+ (bios_version & 0xFF000000) >> 24,
+ (bios_version & 0x00FF0000) >> 16,
+ (bios_version & 0x0000FF00) >> 8,
+ bios_version & 0x000000FF);
+
+ _base_display_intel_branding(ioc);
+
+ pr_info(MPT3SAS_FMT "Protocol=(", ioc->name);
+
+ if (ioc->facts.ProtocolFlags & MPI2_IOCFACTS_PROTOCOL_SCSI_INITIATOR) {
+ pr_info("Initiator");
+ i++;
+ }
+
+ if (ioc->facts.ProtocolFlags & MPI2_IOCFACTS_PROTOCOL_SCSI_TARGET) {
+ pr_info("%sTarget", i ? "," : "");
+ i++;
+ }
+
+ i = 0;
+ pr_info("), ");
+ pr_info("Capabilities=(");
+
+ if (ioc->facts.IOCCapabilities &
+ MPI2_IOCFACTS_CAPABILITY_INTEGRATED_RAID) {
+ pr_info("Raid");
+ i++;
+ }
+
+ if (ioc->facts.IOCCapabilities & MPI2_IOCFACTS_CAPABILITY_TLR) {
+ pr_info("%sTLR", i ? "," : "");
+ i++;
+ }
+
+ if (ioc->facts.IOCCapabilities & MPI2_IOCFACTS_CAPABILITY_MULTICAST) {
+ pr_info("%sMulticast", i ? "," : "");
+ i++;
+ }
+
+ if (ioc->facts.IOCCapabilities &
+ MPI2_IOCFACTS_CAPABILITY_BIDIRECTIONAL_TARGET) {
+ pr_info("%sBIDI Target", i ? "," : "");
+ i++;
+ }
+
+ if (ioc->facts.IOCCapabilities & MPI2_IOCFACTS_CAPABILITY_EEDP) {
+ pr_info("%sEEDP", i ? "," : "");
+ i++;
+ }
+
+ if (ioc->facts.IOCCapabilities &
+ MPI2_IOCFACTS_CAPABILITY_SNAPSHOT_BUFFER) {
+ pr_info("%sSnapshot Buffer", i ? "," : "");
+ i++;
+ }
+
+ if (ioc->facts.IOCCapabilities &
+ MPI2_IOCFACTS_CAPABILITY_DIAG_TRACE_BUFFER) {
+ pr_info("%sDiag Trace Buffer", i ? "," : "");
+ i++;
+ }
+
+ if (ioc->facts.IOCCapabilities &
+ MPI2_IOCFACTS_CAPABILITY_EXTENDED_BUFFER) {
+ pr_info("%sDiag Extended Buffer", i ? "," : "");
+ i++;
+ }
+
+ if (ioc->facts.IOCCapabilities &
+ MPI2_IOCFACTS_CAPABILITY_TASK_SET_FULL_HANDLING) {
+ pr_info("%sTask Set Full", i ? "," : "");
+ i++;
+ }
+
+ iounit_pg1_flags = le32_to_cpu(ioc->iounit_pg1.Flags);
+ if (!(iounit_pg1_flags & MPI2_IOUNITPAGE1_NATIVE_COMMAND_Q_DISABLE)) {
+ pr_info("%sNCQ", i ? "," : "");
+ i++;
+ }
+
+ pr_info(")\n");
+}
+
+/**
+ * mpt3sas_base_update_missing_delay - change the missing delay timers
+ * @ioc: per adapter object
+ * @device_missing_delay: amount of time till device is reported missing
+ * @io_missing_delay: interval IO is returned when there is a missing device
+ *
+ * Return nothing.
+ *
+ * Passed on the command line, this function will modify the device missing
+ * delay, as well as the io missing delay. This should be called at driver
+ * load time.
+ */
+void
+mpt3sas_base_update_missing_delay(struct MPT3SAS_ADAPTER *ioc,
+ u16 device_missing_delay, u8 io_missing_delay)
+{
+ u16 dmd, dmd_new, dmd_orignal;
+ u8 io_missing_delay_original;
+ u16 sz;
+ Mpi2SasIOUnitPage1_t *sas_iounit_pg1 = NULL;
+ Mpi2ConfigReply_t mpi_reply;
+ u8 num_phys = 0;
+ u16 ioc_status;
+
+ mpt3sas_config_get_number_hba_phys(ioc, &num_phys);
+ if (!num_phys)
+ return;
+
+ sz = offsetof(Mpi2SasIOUnitPage1_t, PhyData) + (num_phys *
+ sizeof(Mpi2SasIOUnit1PhyData_t));
+ sas_iounit_pg1 = kzalloc(sz, GFP_KERNEL);
+ if (!sas_iounit_pg1) {
+ pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n",
+ ioc->name, __FILE__, __LINE__, __func__);
+ goto out;
+ }
+ if ((mpt3sas_config_get_sas_iounit_pg1(ioc, &mpi_reply,
+ sas_iounit_pg1, sz))) {
+ pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n",
+ ioc->name, __FILE__, __LINE__, __func__);
+ goto out;
+ }
+ ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
+ MPI2_IOCSTATUS_MASK;
+ if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
+ pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n",
+ ioc->name, __FILE__, __LINE__, __func__);
+ goto out;
+ }
+
+ /* device missing delay */
+ dmd = sas_iounit_pg1->ReportDeviceMissingDelay;
+ if (dmd & MPI2_SASIOUNIT1_REPORT_MISSING_UNIT_16)
+ dmd = (dmd & MPI2_SASIOUNIT1_REPORT_MISSING_TIMEOUT_MASK) * 16;
+ else
+ dmd = dmd & MPI2_SASIOUNIT1_REPORT_MISSING_TIMEOUT_MASK;
+ dmd_orignal = dmd;
+ if (device_missing_delay > 0x7F) {
+ dmd = (device_missing_delay > 0x7F0) ? 0x7F0 :
+ device_missing_delay;
+ dmd = dmd / 16;
+ dmd |= MPI2_SASIOUNIT1_REPORT_MISSING_UNIT_16;
+ } else
+ dmd = device_missing_delay;
+ sas_iounit_pg1->ReportDeviceMissingDelay = dmd;
+
+ /* io missing delay */
+ io_missing_delay_original = sas_iounit_pg1->IODeviceMissingDelay;
+ sas_iounit_pg1->IODeviceMissingDelay = io_missing_delay;
+
+ if (!mpt3sas_config_set_sas_iounit_pg1(ioc, &mpi_reply, sas_iounit_pg1,
+ sz)) {
+ if (dmd & MPI2_SASIOUNIT1_REPORT_MISSING_UNIT_16)
+ dmd_new = (dmd &
+ MPI2_SASIOUNIT1_REPORT_MISSING_TIMEOUT_MASK) * 16;
+ else
+ dmd_new =
+ dmd & MPI2_SASIOUNIT1_REPORT_MISSING_TIMEOUT_MASK;
+ pr_info(MPT3SAS_FMT "device_missing_delay: old(%d), new(%d)\n",
+ ioc->name, dmd_orignal, dmd_new);
+ pr_info(MPT3SAS_FMT "ioc_missing_delay: old(%d), new(%d)\n",
+ ioc->name, io_missing_delay_original,
+ io_missing_delay);
+ ioc->device_missing_delay = dmd_new;
+ ioc->io_missing_delay = io_missing_delay;
+ }
+
+out:
+ kfree(sas_iounit_pg1);
+}
+/**
+ * _base_static_config_pages - static start of day config pages
+ * @ioc: per adapter object
+ *
+ * Return nothing.
+ */
+static void
+_base_static_config_pages(struct MPT3SAS_ADAPTER *ioc)
+{
+ Mpi2ConfigReply_t mpi_reply;
+ u32 iounit_pg1_flags;
+
+ mpt3sas_config_get_manufacturing_pg0(ioc, &mpi_reply, &ioc->manu_pg0);
+ if (ioc->ir_firmware)
+ mpt3sas_config_get_manufacturing_pg10(ioc, &mpi_reply,
+ &ioc->manu_pg10);
+
+ /*
+ * Ensure correct T10 PI operation if vendor left EEDPTagMode
+ * flag unset in NVDATA.
+ */
+ mpt3sas_config_get_manufacturing_pg11(ioc, &mpi_reply, &ioc->manu_pg11);
+ if (ioc->manu_pg11.EEDPTagMode == 0) {
+ pr_err("%s: overriding NVDATA EEDPTagMode setting\n",
+ ioc->name);
+ ioc->manu_pg11.EEDPTagMode &= ~0x3;
+ ioc->manu_pg11.EEDPTagMode |= 0x1;
+ mpt3sas_config_set_manufacturing_pg11(ioc, &mpi_reply,
+ &ioc->manu_pg11);
+ }
+
+ mpt3sas_config_get_bios_pg2(ioc, &mpi_reply, &ioc->bios_pg2);
+ mpt3sas_config_get_bios_pg3(ioc, &mpi_reply, &ioc->bios_pg3);
+ mpt3sas_config_get_ioc_pg8(ioc, &mpi_reply, &ioc->ioc_pg8);
+ mpt3sas_config_get_iounit_pg0(ioc, &mpi_reply, &ioc->iounit_pg0);
+ mpt3sas_config_get_iounit_pg1(ioc, &mpi_reply, &ioc->iounit_pg1);
+ mpt3sas_config_get_iounit_pg8(ioc, &mpi_reply, &ioc->iounit_pg8);
+ _base_display_ioc_capabilities(ioc);
+
+ /*
+ * Enable task_set_full handling in iounit_pg1 when the
+ * facts capabilities indicate that its supported.
+ */
+ iounit_pg1_flags = le32_to_cpu(ioc->iounit_pg1.Flags);
+ if ((ioc->facts.IOCCapabilities &
+ MPI2_IOCFACTS_CAPABILITY_TASK_SET_FULL_HANDLING))
+ iounit_pg1_flags &=
+ ~MPI2_IOUNITPAGE1_DISABLE_TASK_SET_FULL_HANDLING;
+ else
+ iounit_pg1_flags |=
+ MPI2_IOUNITPAGE1_DISABLE_TASK_SET_FULL_HANDLING;
+ ioc->iounit_pg1.Flags = cpu_to_le32(iounit_pg1_flags);
+ mpt3sas_config_set_iounit_pg1(ioc, &mpi_reply, &ioc->iounit_pg1);
+
+ if (ioc->iounit_pg8.NumSensors)
+ ioc->temp_sensors_count = ioc->iounit_pg8.NumSensors;
+}
+
+/**
+ * _base_release_memory_pools - release memory
+ * @ioc: per adapter object
+ *
+ * Free memory allocated from _base_allocate_memory_pools.
+ *
+ * Return nothing.
+ */
+static void
+_base_release_memory_pools(struct MPT3SAS_ADAPTER *ioc)
+{
+ int i = 0;
+ struct reply_post_struct *rps;
+
+ dexitprintk(ioc, pr_info(MPT3SAS_FMT "%s\n", ioc->name,
+ __func__));
+
+ if (ioc->request) {
+ pci_free_consistent(ioc->pdev, ioc->request_dma_sz,
+ ioc->request, ioc->request_dma);
+ dexitprintk(ioc, pr_info(MPT3SAS_FMT
+ "request_pool(0x%p): free\n",
+ ioc->name, ioc->request));
+ ioc->request = NULL;
+ }
+
+ if (ioc->sense) {
+ pci_pool_free(ioc->sense_dma_pool, ioc->sense, ioc->sense_dma);
+ if (ioc->sense_dma_pool)
+ pci_pool_destroy(ioc->sense_dma_pool);
+ dexitprintk(ioc, pr_info(MPT3SAS_FMT
+ "sense_pool(0x%p): free\n",
+ ioc->name, ioc->sense));
+ ioc->sense = NULL;
+ }
+
+ if (ioc->reply) {
+ pci_pool_free(ioc->reply_dma_pool, ioc->reply, ioc->reply_dma);
+ if (ioc->reply_dma_pool)
+ pci_pool_destroy(ioc->reply_dma_pool);
+ dexitprintk(ioc, pr_info(MPT3SAS_FMT
+ "reply_pool(0x%p): free\n",
+ ioc->name, ioc->reply));
+ ioc->reply = NULL;
+ }
+
+ if (ioc->reply_free) {
+ pci_pool_free(ioc->reply_free_dma_pool, ioc->reply_free,
+ ioc->reply_free_dma);
+ if (ioc->reply_free_dma_pool)
+ pci_pool_destroy(ioc->reply_free_dma_pool);
+ dexitprintk(ioc, pr_info(MPT3SAS_FMT
+ "reply_free_pool(0x%p): free\n",
+ ioc->name, ioc->reply_free));
+ ioc->reply_free = NULL;
+ }
+
+ if (ioc->reply_post) {
+ do {
+ rps = &ioc->reply_post[i];
+ if (rps->reply_post_free) {
+ pci_pool_free(
+ ioc->reply_post_free_dma_pool,
+ rps->reply_post_free,
+ rps->reply_post_free_dma);
+ dexitprintk(ioc, pr_info(MPT3SAS_FMT
+ "reply_post_free_pool(0x%p): free\n",
+ ioc->name, rps->reply_post_free));
+ rps->reply_post_free = NULL;
+ }
+ } while (ioc->rdpq_array_enable &&
+ (++i < ioc->reply_queue_count));
+
+ if (ioc->reply_post_free_dma_pool)
+ pci_pool_destroy(ioc->reply_post_free_dma_pool);
+ kfree(ioc->reply_post);
+ }
+
+ if (ioc->config_page) {
+ dexitprintk(ioc, pr_info(MPT3SAS_FMT
+ "config_page(0x%p): free\n", ioc->name,
+ ioc->config_page));
+ pci_free_consistent(ioc->pdev, ioc->config_page_sz,
+ ioc->config_page, ioc->config_page_dma);
+ }
+
+ if (ioc->scsi_lookup) {
+ free_pages((ulong)ioc->scsi_lookup, ioc->scsi_lookup_pages);
+ ioc->scsi_lookup = NULL;
+ }
+ kfree(ioc->hpr_lookup);
+ kfree(ioc->internal_lookup);
+ if (ioc->chain_lookup) {
+ for (i = 0; i < ioc->chain_depth; i++) {
+ if (ioc->chain_lookup[i].chain_buffer)
+ pci_pool_free(ioc->chain_dma_pool,
+ ioc->chain_lookup[i].chain_buffer,
+ ioc->chain_lookup[i].chain_buffer_dma);
+ }
+ if (ioc->chain_dma_pool)
+ pci_pool_destroy(ioc->chain_dma_pool);
+ free_pages((ulong)ioc->chain_lookup, ioc->chain_pages);
+ ioc->chain_lookup = NULL;
+ }
+}
+
+/**
+ * _base_allocate_memory_pools - allocate start of day memory pools
+ * @ioc: per adapter object
+ * @sleep_flag: CAN_SLEEP or NO_SLEEP
+ *
+ * Returns 0 success, anything else error
+ */
+static int
+_base_allocate_memory_pools(struct MPT3SAS_ADAPTER *ioc, int sleep_flag)
+{
+ struct mpt3sas_facts *facts;
+ u16 max_sge_elements;
+ u16 chains_needed_per_io;
+ u32 sz, total_sz, reply_post_free_sz;
+ u32 retry_sz;
+ u16 max_request_credit;
+ unsigned short sg_tablesize;
+ u16 sge_size;
+ int i;
+
+ dinitprintk(ioc, pr_info(MPT3SAS_FMT "%s\n", ioc->name,
+ __func__));
+
+
+ retry_sz = 0;
+ facts = &ioc->facts;
+
+ /* command line tunables for max sgl entries */
+ if (max_sgl_entries != -1)
+ sg_tablesize = max_sgl_entries;
+ else
+ sg_tablesize = MPT3SAS_SG_DEPTH;
+
+ if (sg_tablesize < MPT3SAS_MIN_PHYS_SEGMENTS)
+ sg_tablesize = MPT3SAS_MIN_PHYS_SEGMENTS;
+ else if (sg_tablesize > MPT3SAS_MAX_PHYS_SEGMENTS) {
+ sg_tablesize = min_t(unsigned short, sg_tablesize,
+ SCSI_MAX_SG_CHAIN_SEGMENTS);
+ pr_warn(MPT3SAS_FMT
+ "sg_tablesize(%u) is bigger than kernel"
+ " defined SCSI_MAX_SG_SEGMENTS(%u)\n", ioc->name,
+ sg_tablesize, MPT3SAS_MAX_PHYS_SEGMENTS);
+ }
+ ioc->shost->sg_tablesize = sg_tablesize;
+
+ ioc->hi_priority_depth = facts->HighPriorityCredit;
+ ioc->internal_depth = ioc->hi_priority_depth + (5);
+ /* command line tunables for max controller queue depth */
+ if (max_queue_depth != -1 && max_queue_depth != 0) {
+ max_request_credit = min_t(u16, max_queue_depth +
+ ioc->hi_priority_depth + ioc->internal_depth,
+ facts->RequestCredit);
+ if (max_request_credit > MAX_HBA_QUEUE_DEPTH)
+ max_request_credit = MAX_HBA_QUEUE_DEPTH;
+ } else
+ max_request_credit = min_t(u16, facts->RequestCredit,
+ MAX_HBA_QUEUE_DEPTH);
+
+ ioc->hba_queue_depth = max_request_credit;
+
+ /* request frame size */
+ ioc->request_sz = facts->IOCRequestFrameSize * 4;
+
+ /* reply frame size */
+ ioc->reply_sz = facts->ReplyFrameSize * 4;
+
+ /* calculate the max scatter element size */
+ sge_size = max_t(u16, ioc->sge_size, ioc->sge_size_ieee);
+
+ retry_allocation:
+ total_sz = 0;
+ /* calculate number of sg elements left over in the 1st frame */
+ max_sge_elements = ioc->request_sz - ((sizeof(Mpi2SCSIIORequest_t) -
+ sizeof(Mpi2SGEIOUnion_t)) + sge_size);
+ ioc->max_sges_in_main_message = max_sge_elements/sge_size;
+
+ /* now do the same for a chain buffer */
+ max_sge_elements = ioc->request_sz - sge_size;
+ ioc->max_sges_in_chain_message = max_sge_elements/sge_size;
+
+ /*
+ * MPT3SAS_SG_DEPTH = CONFIG_FUSION_MAX_SGE
+ */
+ chains_needed_per_io = ((ioc->shost->sg_tablesize -
+ ioc->max_sges_in_main_message)/ioc->max_sges_in_chain_message)
+ + 1;
+ if (chains_needed_per_io > facts->MaxChainDepth) {
+ chains_needed_per_io = facts->MaxChainDepth;
+ ioc->shost->sg_tablesize = min_t(u16,
+ ioc->max_sges_in_main_message + (ioc->max_sges_in_chain_message
+ * chains_needed_per_io), ioc->shost->sg_tablesize);
+ }
+ ioc->chains_needed_per_io = chains_needed_per_io;
+
+ /* reply free queue sizing - taking into account for 64 FW events */
+ ioc->reply_free_queue_depth = ioc->hba_queue_depth + 64;
+
+ /* calculate reply descriptor post queue depth */
+ ioc->reply_post_queue_depth = ioc->hba_queue_depth +
+ ioc->reply_free_queue_depth + 1 ;
+ /* align the reply post queue on the next 16 count boundary */
+ if (ioc->reply_post_queue_depth % 16)
+ ioc->reply_post_queue_depth += 16 -
+ (ioc->reply_post_queue_depth % 16);
+
+
+ if (ioc->reply_post_queue_depth >
+ facts->MaxReplyDescriptorPostQueueDepth) {
+ ioc->reply_post_queue_depth =
+ facts->MaxReplyDescriptorPostQueueDepth -
+ (facts->MaxReplyDescriptorPostQueueDepth % 16);
+ ioc->hba_queue_depth =
+ ((ioc->reply_post_queue_depth - 64) / 2) - 1;
+ ioc->reply_free_queue_depth = ioc->hba_queue_depth + 64;
+ }
+
+ dinitprintk(ioc, pr_info(MPT3SAS_FMT "scatter gather: " \
+ "sge_in_main_msg(%d), sge_per_chain(%d), sge_per_io(%d), "
+ "chains_per_io(%d)\n", ioc->name, ioc->max_sges_in_main_message,
+ ioc->max_sges_in_chain_message, ioc->shost->sg_tablesize,
+ ioc->chains_needed_per_io));
+
+ /* reply post queue, 16 byte align */
+ reply_post_free_sz = ioc->reply_post_queue_depth *
+ sizeof(Mpi2DefaultReplyDescriptor_t);
+
+ sz = reply_post_free_sz;
+ if (_base_is_controller_msix_enabled(ioc) && !ioc->rdpq_array_enable)
+ sz *= ioc->reply_queue_count;
+
+ ioc->reply_post = kcalloc((ioc->rdpq_array_enable) ?
+ (ioc->reply_queue_count):1,
+ sizeof(struct reply_post_struct), GFP_KERNEL);
+
+ if (!ioc->reply_post) {
+ pr_err(MPT3SAS_FMT "reply_post_free pool: kcalloc failed\n",
+ ioc->name);
+ goto out;
+ }
+ ioc->reply_post_free_dma_pool = pci_pool_create("reply_post_free pool",
+ ioc->pdev, sz, 16, 0);
+ if (!ioc->reply_post_free_dma_pool) {
+ pr_err(MPT3SAS_FMT
+ "reply_post_free pool: pci_pool_create failed\n",
+ ioc->name);
+ goto out;
+ }
+ i = 0;
+ do {
+ ioc->reply_post[i].reply_post_free =
+ pci_pool_alloc(ioc->reply_post_free_dma_pool,
+ GFP_KERNEL,
+ &ioc->reply_post[i].reply_post_free_dma);
+ if (!ioc->reply_post[i].reply_post_free) {
+ pr_err(MPT3SAS_FMT
+ "reply_post_free pool: pci_pool_alloc failed\n",
+ ioc->name);
+ goto out;
+ }
+ memset(ioc->reply_post[i].reply_post_free, 0, sz);
+ dinitprintk(ioc, pr_info(MPT3SAS_FMT
+ "reply post free pool (0x%p): depth(%d),"
+ "element_size(%d), pool_size(%d kB)\n", ioc->name,
+ ioc->reply_post[i].reply_post_free,
+ ioc->reply_post_queue_depth, 8, sz/1024));
+ dinitprintk(ioc, pr_info(MPT3SAS_FMT
+ "reply_post_free_dma = (0x%llx)\n", ioc->name,
+ (unsigned long long)
+ ioc->reply_post[i].reply_post_free_dma));
+ total_sz += sz;
+ } while (ioc->rdpq_array_enable && (++i < ioc->reply_queue_count));
+
+ if (ioc->dma_mask == 64) {
+ if (_base_change_consistent_dma_mask(ioc, ioc->pdev) != 0) {
+ pr_warn(MPT3SAS_FMT
+ "no suitable consistent DMA mask for %s\n",
+ ioc->name, pci_name(ioc->pdev));
+ goto out;
+ }
+ }
+
+ ioc->scsiio_depth = ioc->hba_queue_depth -
+ ioc->hi_priority_depth - ioc->internal_depth;
+
+ /* set the scsi host can_queue depth
+ * with some internal commands that could be outstanding
+ */
+ ioc->shost->can_queue = ioc->scsiio_depth;
+ dinitprintk(ioc, pr_info(MPT3SAS_FMT
+ "scsi host: can_queue depth (%d)\n",
+ ioc->name, ioc->shost->can_queue));
+
+
+ /* contiguous pool for request and chains, 16 byte align, one extra "
+ * "frame for smid=0
+ */
+ ioc->chain_depth = ioc->chains_needed_per_io * ioc->scsiio_depth;
+ sz = ((ioc->scsiio_depth + 1) * ioc->request_sz);
+
+ /* hi-priority queue */
+ sz += (ioc->hi_priority_depth * ioc->request_sz);
+
+ /* internal queue */
+ sz += (ioc->internal_depth * ioc->request_sz);
+
+ ioc->request_dma_sz = sz;
+ ioc->request = pci_alloc_consistent(ioc->pdev, sz, &ioc->request_dma);
+ if (!ioc->request) {
+ pr_err(MPT3SAS_FMT "request pool: pci_alloc_consistent " \
+ "failed: hba_depth(%d), chains_per_io(%d), frame_sz(%d), "
+ "total(%d kB)\n", ioc->name, ioc->hba_queue_depth,
+ ioc->chains_needed_per_io, ioc->request_sz, sz/1024);
+ if (ioc->scsiio_depth < MPT3SAS_SAS_QUEUE_DEPTH)
+ goto out;
+ retry_sz += 64;
+ ioc->hba_queue_depth = max_request_credit - retry_sz;
+ goto retry_allocation;
+ }
+
+ if (retry_sz)
+ pr_err(MPT3SAS_FMT "request pool: pci_alloc_consistent " \
+ "succeed: hba_depth(%d), chains_per_io(%d), frame_sz(%d), "
+ "total(%d kb)\n", ioc->name, ioc->hba_queue_depth,
+ ioc->chains_needed_per_io, ioc->request_sz, sz/1024);
+
+ /* hi-priority queue */
+ ioc->hi_priority = ioc->request + ((ioc->scsiio_depth + 1) *
+ ioc->request_sz);
+ ioc->hi_priority_dma = ioc->request_dma + ((ioc->scsiio_depth + 1) *
+ ioc->request_sz);
+
+ /* internal queue */
+ ioc->internal = ioc->hi_priority + (ioc->hi_priority_depth *
+ ioc->request_sz);
+ ioc->internal_dma = ioc->hi_priority_dma + (ioc->hi_priority_depth *
+ ioc->request_sz);
+
+ dinitprintk(ioc, pr_info(MPT3SAS_FMT
+ "request pool(0x%p): depth(%d), frame_size(%d), pool_size(%d kB)\n",
+ ioc->name, ioc->request, ioc->hba_queue_depth, ioc->request_sz,
+ (ioc->hba_queue_depth * ioc->request_sz)/1024));
+
+ dinitprintk(ioc, pr_info(MPT3SAS_FMT "request pool: dma(0x%llx)\n",
+ ioc->name, (unsigned long long) ioc->request_dma));
+ total_sz += sz;
+
+ sz = ioc->scsiio_depth * sizeof(struct scsiio_tracker);
+ ioc->scsi_lookup_pages = get_order(sz);
+ ioc->scsi_lookup = (struct scsiio_tracker *)__get_free_pages(
+ GFP_KERNEL, ioc->scsi_lookup_pages);
+ if (!ioc->scsi_lookup) {
+ pr_err(MPT3SAS_FMT "scsi_lookup: get_free_pages failed, sz(%d)\n",
+ ioc->name, (int)sz);
+ goto out;
+ }
+
+ dinitprintk(ioc, pr_info(MPT3SAS_FMT "scsiio(0x%p): depth(%d)\n",
+ ioc->name, ioc->request, ioc->scsiio_depth));
+
+ ioc->chain_depth = min_t(u32, ioc->chain_depth, MAX_CHAIN_DEPTH);
+ sz = ioc->chain_depth * sizeof(struct chain_tracker);
+ ioc->chain_pages = get_order(sz);
+ ioc->chain_lookup = (struct chain_tracker *)__get_free_pages(
+ GFP_KERNEL, ioc->chain_pages);
+ if (!ioc->chain_lookup) {
+ pr_err(MPT3SAS_FMT "chain_lookup: __get_free_pages failed\n",
+ ioc->name);
+ goto out;
+ }
+ ioc->chain_dma_pool = pci_pool_create("chain pool", ioc->pdev,
+ ioc->request_sz, 16, 0);
+ if (!ioc->chain_dma_pool) {
+ pr_err(MPT3SAS_FMT "chain_dma_pool: pci_pool_create failed\n",
+ ioc->name);
+ goto out;
+ }
+ for (i = 0; i < ioc->chain_depth; i++) {
+ ioc->chain_lookup[i].chain_buffer = pci_pool_alloc(
+ ioc->chain_dma_pool , GFP_KERNEL,
+ &ioc->chain_lookup[i].chain_buffer_dma);
+ if (!ioc->chain_lookup[i].chain_buffer) {
+ ioc->chain_depth = i;
+ goto chain_done;
+ }
+ total_sz += ioc->request_sz;
+ }
+ chain_done:
+ dinitprintk(ioc, pr_info(MPT3SAS_FMT
+ "chain pool depth(%d), frame_size(%d), pool_size(%d kB)\n",
+ ioc->name, ioc->chain_depth, ioc->request_sz,
+ ((ioc->chain_depth * ioc->request_sz))/1024));
+
+ /* initialize hi-priority queue smid's */
+ ioc->hpr_lookup = kcalloc(ioc->hi_priority_depth,
+ sizeof(struct request_tracker), GFP_KERNEL);
+ if (!ioc->hpr_lookup) {
+ pr_err(MPT3SAS_FMT "hpr_lookup: kcalloc failed\n",
+ ioc->name);
+ goto out;
+ }
+ ioc->hi_priority_smid = ioc->scsiio_depth + 1;
+ dinitprintk(ioc, pr_info(MPT3SAS_FMT
+ "hi_priority(0x%p): depth(%d), start smid(%d)\n",
+ ioc->name, ioc->hi_priority,
+ ioc->hi_priority_depth, ioc->hi_priority_smid));
+
+ /* initialize internal queue smid's */
+ ioc->internal_lookup = kcalloc(ioc->internal_depth,
+ sizeof(struct request_tracker), GFP_KERNEL);
+ if (!ioc->internal_lookup) {
+ pr_err(MPT3SAS_FMT "internal_lookup: kcalloc failed\n",
+ ioc->name);
+ goto out;
+ }
+ ioc->internal_smid = ioc->hi_priority_smid + ioc->hi_priority_depth;
+ dinitprintk(ioc, pr_info(MPT3SAS_FMT
+ "internal(0x%p): depth(%d), start smid(%d)\n",
+ ioc->name, ioc->internal,
+ ioc->internal_depth, ioc->internal_smid));
+
+ /* sense buffers, 4 byte align */
+ sz = ioc->scsiio_depth * SCSI_SENSE_BUFFERSIZE;
+ ioc->sense_dma_pool = pci_pool_create("sense pool", ioc->pdev, sz, 4,
+ 0);
+ if (!ioc->sense_dma_pool) {
+ pr_err(MPT3SAS_FMT "sense pool: pci_pool_create failed\n",
+ ioc->name);
+ goto out;
+ }
+ ioc->sense = pci_pool_alloc(ioc->sense_dma_pool , GFP_KERNEL,
+ &ioc->sense_dma);
+ if (!ioc->sense) {
+ pr_err(MPT3SAS_FMT "sense pool: pci_pool_alloc failed\n",
+ ioc->name);
+ goto out;
+ }
+ dinitprintk(ioc, pr_info(MPT3SAS_FMT
+ "sense pool(0x%p): depth(%d), element_size(%d), pool_size"
+ "(%d kB)\n", ioc->name, ioc->sense, ioc->scsiio_depth,
+ SCSI_SENSE_BUFFERSIZE, sz/1024));
+ dinitprintk(ioc, pr_info(MPT3SAS_FMT "sense_dma(0x%llx)\n",
+ ioc->name, (unsigned long long)ioc->sense_dma));
+ total_sz += sz;
+
+ /* reply pool, 4 byte align */
+ sz = ioc->reply_free_queue_depth * ioc->reply_sz;
+ ioc->reply_dma_pool = pci_pool_create("reply pool", ioc->pdev, sz, 4,
+ 0);
+ if (!ioc->reply_dma_pool) {
+ pr_err(MPT3SAS_FMT "reply pool: pci_pool_create failed\n",
+ ioc->name);
+ goto out;
+ }
+ ioc->reply = pci_pool_alloc(ioc->reply_dma_pool , GFP_KERNEL,
+ &ioc->reply_dma);
+ if (!ioc->reply) {
+ pr_err(MPT3SAS_FMT "reply pool: pci_pool_alloc failed\n",
+ ioc->name);
+ goto out;
+ }
+ ioc->reply_dma_min_address = (u32)(ioc->reply_dma);
+ ioc->reply_dma_max_address = (u32)(ioc->reply_dma) + sz;
+ dinitprintk(ioc, pr_info(MPT3SAS_FMT
+ "reply pool(0x%p): depth(%d), frame_size(%d), pool_size(%d kB)\n",
+ ioc->name, ioc->reply,
+ ioc->reply_free_queue_depth, ioc->reply_sz, sz/1024));
+ dinitprintk(ioc, pr_info(MPT3SAS_FMT "reply_dma(0x%llx)\n",
+ ioc->name, (unsigned long long)ioc->reply_dma));
+ total_sz += sz;
+
+ /* reply free queue, 16 byte align */
+ sz = ioc->reply_free_queue_depth * 4;
+ ioc->reply_free_dma_pool = pci_pool_create("reply_free pool",
+ ioc->pdev, sz, 16, 0);
+ if (!ioc->reply_free_dma_pool) {
+ pr_err(MPT3SAS_FMT "reply_free pool: pci_pool_create failed\n",
+ ioc->name);
+ goto out;
+ }
+ ioc->reply_free = pci_pool_alloc(ioc->reply_free_dma_pool , GFP_KERNEL,
+ &ioc->reply_free_dma);
+ if (!ioc->reply_free) {
+ pr_err(MPT3SAS_FMT "reply_free pool: pci_pool_alloc failed\n",
+ ioc->name);
+ goto out;
+ }
+ memset(ioc->reply_free, 0, sz);
+ dinitprintk(ioc, pr_info(MPT3SAS_FMT "reply_free pool(0x%p): " \
+ "depth(%d), element_size(%d), pool_size(%d kB)\n", ioc->name,
+ ioc->reply_free, ioc->reply_free_queue_depth, 4, sz/1024));
+ dinitprintk(ioc, pr_info(MPT3SAS_FMT
+ "reply_free_dma (0x%llx)\n",
+ ioc->name, (unsigned long long)ioc->reply_free_dma));
+ total_sz += sz;
+
+ ioc->config_page_sz = 512;
+ ioc->config_page = pci_alloc_consistent(ioc->pdev,
+ ioc->config_page_sz, &ioc->config_page_dma);
+ if (!ioc->config_page) {
+ pr_err(MPT3SAS_FMT
+ "config page: pci_pool_alloc failed\n",
+ ioc->name);
+ goto out;
+ }
+ dinitprintk(ioc, pr_info(MPT3SAS_FMT
+ "config page(0x%p): size(%d)\n",
+ ioc->name, ioc->config_page, ioc->config_page_sz));
+ dinitprintk(ioc, pr_info(MPT3SAS_FMT "config_page_dma(0x%llx)\n",
+ ioc->name, (unsigned long long)ioc->config_page_dma));
+ total_sz += ioc->config_page_sz;
+
+ pr_info(MPT3SAS_FMT "Allocated physical memory: size(%d kB)\n",
+ ioc->name, total_sz/1024);
+ pr_info(MPT3SAS_FMT
+ "Current Controller Queue Depth(%d),Max Controller Queue Depth(%d)\n",
+ ioc->name, ioc->shost->can_queue, facts->RequestCredit);
+ pr_info(MPT3SAS_FMT "Scatter Gather Elements per IO(%d)\n",
+ ioc->name, ioc->shost->sg_tablesize);
+ return 0;
+
+ out:
+ return -ENOMEM;
+}
+
+/**
+ * mpt3sas_base_get_iocstate - Get the current state of a MPT adapter.
+ * @ioc: Pointer to MPT_ADAPTER structure
+ * @cooked: Request raw or cooked IOC state
+ *
+ * Returns all IOC Doorbell register bits if cooked==0, else just the
+ * Doorbell bits in MPI_IOC_STATE_MASK.
+ */
+u32
+mpt3sas_base_get_iocstate(struct MPT3SAS_ADAPTER *ioc, int cooked)
+{
+ u32 s, sc;
+
+ s = readl(&ioc->chip->Doorbell);
+ sc = s & MPI2_IOC_STATE_MASK;
+ return cooked ? sc : s;
+}
+
+/**
+ * _base_wait_on_iocstate - waiting on a particular ioc state
+ * @ioc_state: controller state { READY, OPERATIONAL, or RESET }
+ * @timeout: timeout in second
+ * @sleep_flag: CAN_SLEEP or NO_SLEEP
+ *
+ * Returns 0 for success, non-zero for failure.
+ */
+static int
+_base_wait_on_iocstate(struct MPT3SAS_ADAPTER *ioc, u32 ioc_state, int timeout,
+ int sleep_flag)
+{
+ u32 count, cntdn;
+ u32 current_state;
+
+ count = 0;
+ cntdn = (sleep_flag == CAN_SLEEP) ? 1000*timeout : 2000*timeout;
+ do {
+ current_state = mpt3sas_base_get_iocstate(ioc, 1);
+ if (current_state == ioc_state)
+ return 0;
+ if (count && current_state == MPI2_IOC_STATE_FAULT)
+ break;
+ if (sleep_flag == CAN_SLEEP)
+ usleep_range(1000, 1500);
+ else
+ udelay(500);
+ count++;
+ } while (--cntdn);
+
+ return current_state;
+}
+
+/**
+ * _base_wait_for_doorbell_int - waiting for controller interrupt(generated by
+ * a write to the doorbell)
+ * @ioc: per adapter object
+ * @timeout: timeout in second
+ * @sleep_flag: CAN_SLEEP or NO_SLEEP
+ *
+ * Returns 0 for success, non-zero for failure.
+ *
+ * Notes: MPI2_HIS_IOC2SYS_DB_STATUS - set to one when IOC writes to doorbell.
+ */
+static int
+_base_wait_for_doorbell_int(struct MPT3SAS_ADAPTER *ioc, int timeout,
+ int sleep_flag)
+{
+ u32 cntdn, count;
+ u32 int_status;
+
+ count = 0;
+ cntdn = (sleep_flag == CAN_SLEEP) ? 1000*timeout : 2000*timeout;
+ do {
+ int_status = readl(&ioc->chip->HostInterruptStatus);
+ if (int_status & MPI2_HIS_IOC2SYS_DB_STATUS) {
+ dhsprintk(ioc, pr_info(MPT3SAS_FMT
+ "%s: successful count(%d), timeout(%d)\n",
+ ioc->name, __func__, count, timeout));
+ return 0;
+ }
+ if (sleep_flag == CAN_SLEEP)
+ usleep_range(1000, 1500);
+ else
+ udelay(500);
+ count++;
+ } while (--cntdn);
+
+ pr_err(MPT3SAS_FMT
+ "%s: failed due to timeout count(%d), int_status(%x)!\n",
+ ioc->name, __func__, count, int_status);
+ return -EFAULT;
+}
+
+/**
+ * _base_wait_for_doorbell_ack - waiting for controller to read the doorbell.
+ * @ioc: per adapter object
+ * @timeout: timeout in second
+ * @sleep_flag: CAN_SLEEP or NO_SLEEP
+ *
+ * Returns 0 for success, non-zero for failure.
+ *
+ * Notes: MPI2_HIS_SYS2IOC_DB_STATUS - set to one when host writes to
+ * doorbell.
+ */
+static int
+_base_wait_for_doorbell_ack(struct MPT3SAS_ADAPTER *ioc, int timeout,
+ int sleep_flag)
+{
+ u32 cntdn, count;
+ u32 int_status;
+ u32 doorbell;
+
+ count = 0;
+ cntdn = (sleep_flag == CAN_SLEEP) ? 1000*timeout : 2000*timeout;
+ do {
+ int_status = readl(&ioc->chip->HostInterruptStatus);
+ if (!(int_status & MPI2_HIS_SYS2IOC_DB_STATUS)) {
+ dhsprintk(ioc, pr_info(MPT3SAS_FMT
+ "%s: successful count(%d), timeout(%d)\n",
+ ioc->name, __func__, count, timeout));
+ return 0;
+ } else if (int_status & MPI2_HIS_IOC2SYS_DB_STATUS) {
+ doorbell = readl(&ioc->chip->Doorbell);
+ if ((doorbell & MPI2_IOC_STATE_MASK) ==
+ MPI2_IOC_STATE_FAULT) {
+ mpt3sas_base_fault_info(ioc , doorbell);
+ return -EFAULT;
+ }
+ } else if (int_status == 0xFFFFFFFF)
+ goto out;
+
+ if (sleep_flag == CAN_SLEEP)
+ usleep_range(1000, 1500);
+ else
+ udelay(500);
+ count++;
+ } while (--cntdn);
+
+ out:
+ pr_err(MPT3SAS_FMT
+ "%s: failed due to timeout count(%d), int_status(%x)!\n",
+ ioc->name, __func__, count, int_status);
+ return -EFAULT;
+}
+
+/**
+ * _base_wait_for_doorbell_not_used - waiting for doorbell to not be in use
+ * @ioc: per adapter object
+ * @timeout: timeout in second
+ * @sleep_flag: CAN_SLEEP or NO_SLEEP
+ *
+ * Returns 0 for success, non-zero for failure.
+ *
+ */
+static int
+_base_wait_for_doorbell_not_used(struct MPT3SAS_ADAPTER *ioc, int timeout,
+ int sleep_flag)
+{
+ u32 cntdn, count;
+ u32 doorbell_reg;
+
+ count = 0;
+ cntdn = (sleep_flag == CAN_SLEEP) ? 1000*timeout : 2000*timeout;
+ do {
+ doorbell_reg = readl(&ioc->chip->Doorbell);
+ if (!(doorbell_reg & MPI2_DOORBELL_USED)) {
+ dhsprintk(ioc, pr_info(MPT3SAS_FMT
+ "%s: successful count(%d), timeout(%d)\n",
+ ioc->name, __func__, count, timeout));
+ return 0;
+ }
+ if (sleep_flag == CAN_SLEEP)
+ usleep_range(1000, 1500);
+ else
+ udelay(500);
+ count++;
+ } while (--cntdn);
+
+ pr_err(MPT3SAS_FMT
+ "%s: failed due to timeout count(%d), doorbell_reg(%x)!\n",
+ ioc->name, __func__, count, doorbell_reg);
+ return -EFAULT;
+}
+
+/**
+ * _base_send_ioc_reset - send doorbell reset
+ * @ioc: per adapter object
+ * @reset_type: currently only supports: MPI2_FUNCTION_IOC_MESSAGE_UNIT_RESET
+ * @timeout: timeout in second
+ * @sleep_flag: CAN_SLEEP or NO_SLEEP
+ *
+ * Returns 0 for success, non-zero for failure.
+ */
+static int
+_base_send_ioc_reset(struct MPT3SAS_ADAPTER *ioc, u8 reset_type, int timeout,
+ int sleep_flag)
+{
+ u32 ioc_state;
+ int r = 0;
+
+ if (reset_type != MPI2_FUNCTION_IOC_MESSAGE_UNIT_RESET) {
+ pr_err(MPT3SAS_FMT "%s: unknown reset_type\n",
+ ioc->name, __func__);
+ return -EFAULT;
+ }
+
+ if (!(ioc->facts.IOCCapabilities &
+ MPI2_IOCFACTS_CAPABILITY_EVENT_REPLAY))
+ return -EFAULT;
+
+ pr_info(MPT3SAS_FMT "sending message unit reset !!\n", ioc->name);
+
+ writel(reset_type << MPI2_DOORBELL_FUNCTION_SHIFT,
+ &ioc->chip->Doorbell);
+ if ((_base_wait_for_doorbell_ack(ioc, 15, sleep_flag))) {
+ r = -EFAULT;
+ goto out;
+ }
+ ioc_state = _base_wait_on_iocstate(ioc, MPI2_IOC_STATE_READY,
+ timeout, sleep_flag);
+ if (ioc_state) {
+ pr_err(MPT3SAS_FMT
+ "%s: failed going to ready state (ioc_state=0x%x)\n",
+ ioc->name, __func__, ioc_state);
+ r = -EFAULT;
+ goto out;
+ }
+ out:
+ pr_info(MPT3SAS_FMT "message unit reset: %s\n",
+ ioc->name, ((r == 0) ? "SUCCESS" : "FAILED"));
+ return r;
+}
+
+/**
+ * _base_handshake_req_reply_wait - send request thru doorbell interface
+ * @ioc: per adapter object
+ * @request_bytes: request length
+ * @request: pointer having request payload
+ * @reply_bytes: reply length
+ * @reply: pointer to reply payload
+ * @timeout: timeout in second
+ * @sleep_flag: CAN_SLEEP or NO_SLEEP
+ *
+ * Returns 0 for success, non-zero for failure.
+ */
+static int
+_base_handshake_req_reply_wait(struct MPT3SAS_ADAPTER *ioc, int request_bytes,
+ u32 *request, int reply_bytes, u16 *reply, int timeout, int sleep_flag)
+{
+ MPI2DefaultReply_t *default_reply = (MPI2DefaultReply_t *)reply;
+ int i;
+ u8 failed;
+ u16 dummy;
+ __le32 *mfp;
+
+ /* make sure doorbell is not in use */
+ if ((readl(&ioc->chip->Doorbell) & MPI2_DOORBELL_USED)) {
+ pr_err(MPT3SAS_FMT
+ "doorbell is in use (line=%d)\n",
+ ioc->name, __LINE__);
+ return -EFAULT;
+ }
+
+ /* clear pending doorbell interrupts from previous state changes */
+ if (readl(&ioc->chip->HostInterruptStatus) &
+ MPI2_HIS_IOC2SYS_DB_STATUS)
+ writel(0, &ioc->chip->HostInterruptStatus);
+
+ /* send message to ioc */
+ writel(((MPI2_FUNCTION_HANDSHAKE<<MPI2_DOORBELL_FUNCTION_SHIFT) |
+ ((request_bytes/4)<<MPI2_DOORBELL_ADD_DWORDS_SHIFT)),
+ &ioc->chip->Doorbell);
+
+ if ((_base_wait_for_doorbell_int(ioc, 5, NO_SLEEP))) {
+ pr_err(MPT3SAS_FMT
+ "doorbell handshake int failed (line=%d)\n",
+ ioc->name, __LINE__);
+ return -EFAULT;
+ }
+ writel(0, &ioc->chip->HostInterruptStatus);
+
+ if ((_base_wait_for_doorbell_ack(ioc, 5, sleep_flag))) {
+ pr_err(MPT3SAS_FMT
+ "doorbell handshake ack failed (line=%d)\n",
+ ioc->name, __LINE__);
+ return -EFAULT;
+ }
+
+ /* send message 32-bits at a time */
+ for (i = 0, failed = 0; i < request_bytes/4 && !failed; i++) {
+ writel(cpu_to_le32(request[i]), &ioc->chip->Doorbell);
+ if ((_base_wait_for_doorbell_ack(ioc, 5, sleep_flag)))
+ failed = 1;
+ }
+
+ if (failed) {
+ pr_err(MPT3SAS_FMT
+ "doorbell handshake sending request failed (line=%d)\n",
+ ioc->name, __LINE__);
+ return -EFAULT;
+ }
+
+ /* now wait for the reply */
+ if ((_base_wait_for_doorbell_int(ioc, timeout, sleep_flag))) {
+ pr_err(MPT3SAS_FMT
+ "doorbell handshake int failed (line=%d)\n",
+ ioc->name, __LINE__);
+ return -EFAULT;
+ }
+
+ /* read the first two 16-bits, it gives the total length of the reply */
+ reply[0] = le16_to_cpu(readl(&ioc->chip->Doorbell)
+ & MPI2_DOORBELL_DATA_MASK);
+ writel(0, &ioc->chip->HostInterruptStatus);
+ if ((_base_wait_for_doorbell_int(ioc, 5, sleep_flag))) {
+ pr_err(MPT3SAS_FMT
+ "doorbell handshake int failed (line=%d)\n",
+ ioc->name, __LINE__);
+ return -EFAULT;
+ }
+ reply[1] = le16_to_cpu(readl(&ioc->chip->Doorbell)
+ & MPI2_DOORBELL_DATA_MASK);
+ writel(0, &ioc->chip->HostInterruptStatus);
+
+ for (i = 2; i < default_reply->MsgLength * 2; i++) {
+ if ((_base_wait_for_doorbell_int(ioc, 5, sleep_flag))) {
+ pr_err(MPT3SAS_FMT
+ "doorbell handshake int failed (line=%d)\n",
+ ioc->name, __LINE__);
+ return -EFAULT;
+ }
+ if (i >= reply_bytes/2) /* overflow case */
+ dummy = readl(&ioc->chip->Doorbell);
+ else
+ reply[i] = le16_to_cpu(readl(&ioc->chip->Doorbell)
+ & MPI2_DOORBELL_DATA_MASK);
+ writel(0, &ioc->chip->HostInterruptStatus);
+ }
+
+ _base_wait_for_doorbell_int(ioc, 5, sleep_flag);
+ if (_base_wait_for_doorbell_not_used(ioc, 5, sleep_flag) != 0) {
+ dhsprintk(ioc, pr_info(MPT3SAS_FMT
+ "doorbell is in use (line=%d)\n", ioc->name, __LINE__));
+ }
+ writel(0, &ioc->chip->HostInterruptStatus);
+
+ if (ioc->logging_level & MPT_DEBUG_INIT) {
+ mfp = (__le32 *)reply;
+ pr_info("\toffset:data\n");
+ for (i = 0; i < reply_bytes/4; i++)
+ pr_info("\t[0x%02x]:%08x\n", i*4,
+ le32_to_cpu(mfp[i]));
+ }
+ return 0;
+}
+
+/**
+ * mpt3sas_base_sas_iounit_control - send sas iounit control to FW
+ * @ioc: per adapter object
+ * @mpi_reply: the reply payload from FW
+ * @mpi_request: the request payload sent to FW
+ *
+ * The SAS IO Unit Control Request message allows the host to perform low-level
+ * operations, such as resets on the PHYs of the IO Unit, also allows the host
+ * to obtain the IOC assigned device handles for a device if it has other
+ * identifying information about the device, in addition allows the host to
+ * remove IOC resources associated with the device.
+ *
+ * Returns 0 for success, non-zero for failure.
+ */
+int
+mpt3sas_base_sas_iounit_control(struct MPT3SAS_ADAPTER *ioc,
+ Mpi2SasIoUnitControlReply_t *mpi_reply,
+ Mpi2SasIoUnitControlRequest_t *mpi_request)
+{
+ u16 smid;
+ u32 ioc_state;
+ unsigned long timeleft;
+ bool issue_reset = false;
+ int rc;
+ void *request;
+ u16 wait_state_count;
+
+ dinitprintk(ioc, pr_info(MPT3SAS_FMT "%s\n", ioc->name,
+ __func__));
+
+ mutex_lock(&ioc->base_cmds.mutex);
+
+ if (ioc->base_cmds.status != MPT3_CMD_NOT_USED) {
+ pr_err(MPT3SAS_FMT "%s: base_cmd in use\n",
+ ioc->name, __func__);
+ rc = -EAGAIN;
+ goto out;
+ }
+
+ wait_state_count = 0;
+ ioc_state = mpt3sas_base_get_iocstate(ioc, 1);
+ while (ioc_state != MPI2_IOC_STATE_OPERATIONAL) {
+ if (wait_state_count++ == 10) {
+ pr_err(MPT3SAS_FMT
+ "%s: failed due to ioc not operational\n",
+ ioc->name, __func__);
+ rc = -EFAULT;
+ goto out;
+ }
+ ssleep(1);
+ ioc_state = mpt3sas_base_get_iocstate(ioc, 1);
+ pr_info(MPT3SAS_FMT
+ "%s: waiting for operational state(count=%d)\n",
+ ioc->name, __func__, wait_state_count);
+ }
+
+ smid = mpt3sas_base_get_smid(ioc, ioc->base_cb_idx);
+ if (!smid) {
+ pr_err(MPT3SAS_FMT "%s: failed obtaining a smid\n",
+ ioc->name, __func__);
+ rc = -EAGAIN;
+ goto out;
+ }
+
+ rc = 0;
+ ioc->base_cmds.status = MPT3_CMD_PENDING;
+ request = mpt3sas_base_get_msg_frame(ioc, smid);
+ ioc->base_cmds.smid = smid;
+ memcpy(request, mpi_request, sizeof(Mpi2SasIoUnitControlRequest_t));
+ if (mpi_request->Operation == MPI2_SAS_OP_PHY_HARD_RESET ||
+ mpi_request->Operation == MPI2_SAS_OP_PHY_LINK_RESET)
+ ioc->ioc_link_reset_in_progress = 1;
+ init_completion(&ioc->base_cmds.done);
+ mpt3sas_base_put_smid_default(ioc, smid);
+ timeleft = wait_for_completion_timeout(&ioc->base_cmds.done,
+ msecs_to_jiffies(10000));
+ if ((mpi_request->Operation == MPI2_SAS_OP_PHY_HARD_RESET ||
+ mpi_request->Operation == MPI2_SAS_OP_PHY_LINK_RESET) &&
+ ioc->ioc_link_reset_in_progress)
+ ioc->ioc_link_reset_in_progress = 0;
+ if (!(ioc->base_cmds.status & MPT3_CMD_COMPLETE)) {
+ pr_err(MPT3SAS_FMT "%s: timeout\n",
+ ioc->name, __func__);
+ _debug_dump_mf(mpi_request,
+ sizeof(Mpi2SasIoUnitControlRequest_t)/4);
+ if (!(ioc->base_cmds.status & MPT3_CMD_RESET))
+ issue_reset = true;
+ goto issue_host_reset;
+ }
+ if (ioc->base_cmds.status & MPT3_CMD_REPLY_VALID)
+ memcpy(mpi_reply, ioc->base_cmds.reply,
+ sizeof(Mpi2SasIoUnitControlReply_t));
+ else
+ memset(mpi_reply, 0, sizeof(Mpi2SasIoUnitControlReply_t));
+ ioc->base_cmds.status = MPT3_CMD_NOT_USED;
+ goto out;
+
+ issue_host_reset:
+ if (issue_reset)
+ mpt3sas_base_hard_reset_handler(ioc, CAN_SLEEP,
+ FORCE_BIG_HAMMER);
+ ioc->base_cmds.status = MPT3_CMD_NOT_USED;
+ rc = -EFAULT;
+ out:
+ mutex_unlock(&ioc->base_cmds.mutex);
+ return rc;
+}
+
+/**
+ * mpt3sas_base_scsi_enclosure_processor - sending request to sep device
+ * @ioc: per adapter object
+ * @mpi_reply: the reply payload from FW
+ * @mpi_request: the request payload sent to FW
+ *
+ * The SCSI Enclosure Processor request message causes the IOC to
+ * communicate with SES devices to control LED status signals.
+ *
+ * Returns 0 for success, non-zero for failure.
+ */
+int
+mpt3sas_base_scsi_enclosure_processor(struct MPT3SAS_ADAPTER *ioc,
+ Mpi2SepReply_t *mpi_reply, Mpi2SepRequest_t *mpi_request)
+{
+ u16 smid;
+ u32 ioc_state;
+ unsigned long timeleft;
+ bool issue_reset = false;
+ int rc;
+ void *request;
+ u16 wait_state_count;
+
+ dinitprintk(ioc, pr_info(MPT3SAS_FMT "%s\n", ioc->name,
+ __func__));
+
+ mutex_lock(&ioc->base_cmds.mutex);
+
+ if (ioc->base_cmds.status != MPT3_CMD_NOT_USED) {
+ pr_err(MPT3SAS_FMT "%s: base_cmd in use\n",
+ ioc->name, __func__);
+ rc = -EAGAIN;
+ goto out;
+ }
+
+ wait_state_count = 0;
+ ioc_state = mpt3sas_base_get_iocstate(ioc, 1);
+ while (ioc_state != MPI2_IOC_STATE_OPERATIONAL) {
+ if (wait_state_count++ == 10) {
+ pr_err(MPT3SAS_FMT
+ "%s: failed due to ioc not operational\n",
+ ioc->name, __func__);
+ rc = -EFAULT;
+ goto out;
+ }
+ ssleep(1);
+ ioc_state = mpt3sas_base_get_iocstate(ioc, 1);
+ pr_info(MPT3SAS_FMT
+ "%s: waiting for operational state(count=%d)\n",
+ ioc->name,
+ __func__, wait_state_count);
+ }
+
+ smid = mpt3sas_base_get_smid(ioc, ioc->base_cb_idx);
+ if (!smid) {
+ pr_err(MPT3SAS_FMT "%s: failed obtaining a smid\n",
+ ioc->name, __func__);
+ rc = -EAGAIN;
+ goto out;
+ }
+
+ rc = 0;
+ ioc->base_cmds.status = MPT3_CMD_PENDING;
+ request = mpt3sas_base_get_msg_frame(ioc, smid);
+ ioc->base_cmds.smid = smid;
+ memcpy(request, mpi_request, sizeof(Mpi2SepReply_t));
+ init_completion(&ioc->base_cmds.done);
+ mpt3sas_base_put_smid_default(ioc, smid);
+ timeleft = wait_for_completion_timeout(&ioc->base_cmds.done,
+ msecs_to_jiffies(10000));
+ if (!(ioc->base_cmds.status & MPT3_CMD_COMPLETE)) {
+ pr_err(MPT3SAS_FMT "%s: timeout\n",
+ ioc->name, __func__);
+ _debug_dump_mf(mpi_request,
+ sizeof(Mpi2SepRequest_t)/4);
+ if (!(ioc->base_cmds.status & MPT3_CMD_RESET))
+ issue_reset = false;
+ goto issue_host_reset;
+ }
+ if (ioc->base_cmds.status & MPT3_CMD_REPLY_VALID)
+ memcpy(mpi_reply, ioc->base_cmds.reply,
+ sizeof(Mpi2SepReply_t));
+ else
+ memset(mpi_reply, 0, sizeof(Mpi2SepReply_t));
+ ioc->base_cmds.status = MPT3_CMD_NOT_USED;
+ goto out;
+
+ issue_host_reset:
+ if (issue_reset)
+ mpt3sas_base_hard_reset_handler(ioc, CAN_SLEEP,
+ FORCE_BIG_HAMMER);
+ ioc->base_cmds.status = MPT3_CMD_NOT_USED;
+ rc = -EFAULT;
+ out:
+ mutex_unlock(&ioc->base_cmds.mutex);
+ return rc;
+}
+
+/**
+ * _base_get_port_facts - obtain port facts reply and save in ioc
+ * @ioc: per adapter object
+ * @sleep_flag: CAN_SLEEP or NO_SLEEP
+ *
+ * Returns 0 for success, non-zero for failure.
+ */
+static int
+_base_get_port_facts(struct MPT3SAS_ADAPTER *ioc, int port, int sleep_flag)
+{
+ Mpi2PortFactsRequest_t mpi_request;
+ Mpi2PortFactsReply_t mpi_reply;
+ struct mpt3sas_port_facts *pfacts;
+ int mpi_reply_sz, mpi_request_sz, r;
+
+ dinitprintk(ioc, pr_info(MPT3SAS_FMT "%s\n", ioc->name,
+ __func__));
+
+ mpi_reply_sz = sizeof(Mpi2PortFactsReply_t);
+ mpi_request_sz = sizeof(Mpi2PortFactsRequest_t);
+ memset(&mpi_request, 0, mpi_request_sz);
+ mpi_request.Function = MPI2_FUNCTION_PORT_FACTS;
+ mpi_request.PortNumber = port;
+ r = _base_handshake_req_reply_wait(ioc, mpi_request_sz,
+ (u32 *)&mpi_request, mpi_reply_sz, (u16 *)&mpi_reply, 5, CAN_SLEEP);
+
+ if (r != 0) {
+ pr_err(MPT3SAS_FMT "%s: handshake failed (r=%d)\n",
+ ioc->name, __func__, r);
+ return r;
+ }
+
+ pfacts = &ioc->pfacts[port];
+ memset(pfacts, 0, sizeof(struct mpt3sas_port_facts));
+ pfacts->PortNumber = mpi_reply.PortNumber;
+ pfacts->VP_ID = mpi_reply.VP_ID;
+ pfacts->VF_ID = mpi_reply.VF_ID;
+ pfacts->MaxPostedCmdBuffers =
+ le16_to_cpu(mpi_reply.MaxPostedCmdBuffers);
+
+ return 0;
+}
+
+/**
+ * _base_get_ioc_facts - obtain ioc facts reply and save in ioc
+ * @ioc: per adapter object
+ * @sleep_flag: CAN_SLEEP or NO_SLEEP
+ *
+ * Returns 0 for success, non-zero for failure.
+ */
+static int
+_base_get_ioc_facts(struct MPT3SAS_ADAPTER *ioc, int sleep_flag)
+{
+ Mpi2IOCFactsRequest_t mpi_request;
+ Mpi2IOCFactsReply_t mpi_reply;
+ struct mpt3sas_facts *facts;
+ int mpi_reply_sz, mpi_request_sz, r;
+
+ dinitprintk(ioc, pr_info(MPT3SAS_FMT "%s\n", ioc->name,
+ __func__));
+
+ mpi_reply_sz = sizeof(Mpi2IOCFactsReply_t);
+ mpi_request_sz = sizeof(Mpi2IOCFactsRequest_t);
+ memset(&mpi_request, 0, mpi_request_sz);
+ mpi_request.Function = MPI2_FUNCTION_IOC_FACTS;
+ r = _base_handshake_req_reply_wait(ioc, mpi_request_sz,
+ (u32 *)&mpi_request, mpi_reply_sz, (u16 *)&mpi_reply, 5, CAN_SLEEP);
+
+ if (r != 0) {
+ pr_err(MPT3SAS_FMT "%s: handshake failed (r=%d)\n",
+ ioc->name, __func__, r);
+ return r;
+ }
+
+ facts = &ioc->facts;
+ memset(facts, 0, sizeof(struct mpt3sas_facts));
+ facts->MsgVersion = le16_to_cpu(mpi_reply.MsgVersion);
+ facts->HeaderVersion = le16_to_cpu(mpi_reply.HeaderVersion);
+ facts->VP_ID = mpi_reply.VP_ID;
+ facts->VF_ID = mpi_reply.VF_ID;
+ facts->IOCExceptions = le16_to_cpu(mpi_reply.IOCExceptions);
+ facts->MaxChainDepth = mpi_reply.MaxChainDepth;
+ facts->WhoInit = mpi_reply.WhoInit;
+ facts->NumberOfPorts = mpi_reply.NumberOfPorts;
+ facts->MaxMSIxVectors = mpi_reply.MaxMSIxVectors;
+ facts->RequestCredit = le16_to_cpu(mpi_reply.RequestCredit);
+ facts->MaxReplyDescriptorPostQueueDepth =
+ le16_to_cpu(mpi_reply.MaxReplyDescriptorPostQueueDepth);
+ facts->ProductID = le16_to_cpu(mpi_reply.ProductID);
+ facts->IOCCapabilities = le32_to_cpu(mpi_reply.IOCCapabilities);
+ if ((facts->IOCCapabilities & MPI2_IOCFACTS_CAPABILITY_INTEGRATED_RAID))
+ ioc->ir_firmware = 1;
+ if ((facts->IOCCapabilities &
+ MPI2_IOCFACTS_CAPABILITY_RDPQ_ARRAY_CAPABLE))
+ ioc->rdpq_array_capable = 1;
+ facts->FWVersion.Word = le32_to_cpu(mpi_reply.FWVersion.Word);
+ facts->IOCRequestFrameSize =
+ le16_to_cpu(mpi_reply.IOCRequestFrameSize);
+ facts->MaxInitiators = le16_to_cpu(mpi_reply.MaxInitiators);
+ facts->MaxTargets = le16_to_cpu(mpi_reply.MaxTargets);
+ ioc->shost->max_id = -1;
+ facts->MaxSasExpanders = le16_to_cpu(mpi_reply.MaxSasExpanders);
+ facts->MaxEnclosures = le16_to_cpu(mpi_reply.MaxEnclosures);
+ facts->ProtocolFlags = le16_to_cpu(mpi_reply.ProtocolFlags);
+ facts->HighPriorityCredit =
+ le16_to_cpu(mpi_reply.HighPriorityCredit);
+ facts->ReplyFrameSize = mpi_reply.ReplyFrameSize;
+ facts->MaxDevHandle = le16_to_cpu(mpi_reply.MaxDevHandle);
+
+ dinitprintk(ioc, pr_info(MPT3SAS_FMT
+ "hba queue depth(%d), max chains per io(%d)\n",
+ ioc->name, facts->RequestCredit,
+ facts->MaxChainDepth));
+ dinitprintk(ioc, pr_info(MPT3SAS_FMT
+ "request frame size(%d), reply frame size(%d)\n", ioc->name,
+ facts->IOCRequestFrameSize * 4, facts->ReplyFrameSize * 4));
+ return 0;
+}
+
+/**
+ * _base_send_ioc_init - send ioc_init to firmware
+ * @ioc: per adapter object
+ * @sleep_flag: CAN_SLEEP or NO_SLEEP
+ *
+ * Returns 0 for success, non-zero for failure.
+ */
+static int
+_base_send_ioc_init(struct MPT3SAS_ADAPTER *ioc, int sleep_flag)
+{
+ Mpi2IOCInitRequest_t mpi_request;
+ Mpi2IOCInitReply_t mpi_reply;
+ int i, r = 0;
+ struct timeval current_time;
+ u16 ioc_status;
+ u32 reply_post_free_array_sz = 0;
+ Mpi2IOCInitRDPQArrayEntry *reply_post_free_array = NULL;
+ dma_addr_t reply_post_free_array_dma;
+
+ dinitprintk(ioc, pr_info(MPT3SAS_FMT "%s\n", ioc->name,
+ __func__));
+
+ memset(&mpi_request, 0, sizeof(Mpi2IOCInitRequest_t));
+ mpi_request.Function = MPI2_FUNCTION_IOC_INIT;
+ mpi_request.WhoInit = MPI2_WHOINIT_HOST_DRIVER;
+ mpi_request.VF_ID = 0; /* TODO */
+ mpi_request.VP_ID = 0;
+ mpi_request.MsgVersion = cpu_to_le16(MPI2_VERSION);
+ mpi_request.HeaderVersion = cpu_to_le16(MPI2_HEADER_VERSION);
+
+ if (_base_is_controller_msix_enabled(ioc))
+ mpi_request.HostMSIxVectors = ioc->reply_queue_count;
+ mpi_request.SystemRequestFrameSize = cpu_to_le16(ioc->request_sz/4);
+ mpi_request.ReplyDescriptorPostQueueDepth =
+ cpu_to_le16(ioc->reply_post_queue_depth);
+ mpi_request.ReplyFreeQueueDepth =
+ cpu_to_le16(ioc->reply_free_queue_depth);
+
+ mpi_request.SenseBufferAddressHigh =
+ cpu_to_le32((u64)ioc->sense_dma >> 32);
+ mpi_request.SystemReplyAddressHigh =
+ cpu_to_le32((u64)ioc->reply_dma >> 32);
+ mpi_request.SystemRequestFrameBaseAddress =
+ cpu_to_le64((u64)ioc->request_dma);
+ mpi_request.ReplyFreeQueueAddress =
+ cpu_to_le64((u64)ioc->reply_free_dma);
+
+ if (ioc->rdpq_array_enable) {
+ reply_post_free_array_sz = ioc->reply_queue_count *
+ sizeof(Mpi2IOCInitRDPQArrayEntry);
+ reply_post_free_array = pci_alloc_consistent(ioc->pdev,
+ reply_post_free_array_sz, &reply_post_free_array_dma);
+ if (!reply_post_free_array) {
+ pr_err(MPT3SAS_FMT
+ "reply_post_free_array: pci_alloc_consistent failed\n",
+ ioc->name);
+ r = -ENOMEM;
+ goto out;
+ }
+ memset(reply_post_free_array, 0, reply_post_free_array_sz);
+ for (i = 0; i < ioc->reply_queue_count; i++)
+ reply_post_free_array[i].RDPQBaseAddress =
+ cpu_to_le64(
+ (u64)ioc->reply_post[i].reply_post_free_dma);
+ mpi_request.MsgFlags = MPI2_IOCINIT_MSGFLAG_RDPQ_ARRAY_MODE;
+ mpi_request.ReplyDescriptorPostQueueAddress =
+ cpu_to_le64((u64)reply_post_free_array_dma);
+ } else {
+ mpi_request.ReplyDescriptorPostQueueAddress =
+ cpu_to_le64((u64)ioc->reply_post[0].reply_post_free_dma);
+ }
+
+ /* This time stamp specifies number of milliseconds
+ * since epoch ~ midnight January 1, 1970.
+ */
+ do_gettimeofday(&current_time);
+ mpi_request.TimeStamp = cpu_to_le64((u64)current_time.tv_sec * 1000 +
+ (current_time.tv_usec / 1000));
+
+ if (ioc->logging_level & MPT_DEBUG_INIT) {
+ __le32 *mfp;
+ int i;
+
+ mfp = (__le32 *)&mpi_request;
+ pr_info("\toffset:data\n");
+ for (i = 0; i < sizeof(Mpi2IOCInitRequest_t)/4; i++)
+ pr_info("\t[0x%02x]:%08x\n", i*4,
+ le32_to_cpu(mfp[i]));
+ }
+
+ r = _base_handshake_req_reply_wait(ioc,
+ sizeof(Mpi2IOCInitRequest_t), (u32 *)&mpi_request,
+ sizeof(Mpi2IOCInitReply_t), (u16 *)&mpi_reply, 10,
+ sleep_flag);
+
+ if (r != 0) {
+ pr_err(MPT3SAS_FMT "%s: handshake failed (r=%d)\n",
+ ioc->name, __func__, r);
+ goto out;
+ }
+
+ ioc_status = le16_to_cpu(mpi_reply.IOCStatus) & MPI2_IOCSTATUS_MASK;
+ if (ioc_status != MPI2_IOCSTATUS_SUCCESS ||
+ mpi_reply.IOCLogInfo) {
+ pr_err(MPT3SAS_FMT "%s: failed\n", ioc->name, __func__);
+ r = -EIO;
+ }
+
+out:
+ if (reply_post_free_array)
+ pci_free_consistent(ioc->pdev, reply_post_free_array_sz,
+ reply_post_free_array,
+ reply_post_free_array_dma);
+ return r;
+}
+
+/**
+ * mpt3sas_port_enable_done - command completion routine for port enable
+ * @ioc: per adapter object
+ * @smid: system request message index
+ * @msix_index: MSIX table index supplied by the OS
+ * @reply: reply message frame(lower 32bit addr)
+ *
+ * Return 1 meaning mf should be freed from _base_interrupt
+ * 0 means the mf is freed from this function.
+ */
+u8
+mpt3sas_port_enable_done(struct MPT3SAS_ADAPTER *ioc, u16 smid, u8 msix_index,
+ u32 reply)
+{
+ MPI2DefaultReply_t *mpi_reply;
+ u16 ioc_status;
+
+ if (ioc->port_enable_cmds.status == MPT3_CMD_NOT_USED)
+ return 1;
+
+ mpi_reply = mpt3sas_base_get_reply_virt_addr(ioc, reply);
+ if (!mpi_reply)
+ return 1;
+
+ if (mpi_reply->Function != MPI2_FUNCTION_PORT_ENABLE)
+ return 1;
+
+ ioc->port_enable_cmds.status &= ~MPT3_CMD_PENDING;
+ ioc->port_enable_cmds.status |= MPT3_CMD_COMPLETE;
+ ioc->port_enable_cmds.status |= MPT3_CMD_REPLY_VALID;
+ memcpy(ioc->port_enable_cmds.reply, mpi_reply, mpi_reply->MsgLength*4);
+ ioc_status = le16_to_cpu(mpi_reply->IOCStatus) & MPI2_IOCSTATUS_MASK;
+ if (ioc_status != MPI2_IOCSTATUS_SUCCESS)
+ ioc->port_enable_failed = 1;
+
+ if (ioc->is_driver_loading) {
+ if (ioc_status == MPI2_IOCSTATUS_SUCCESS) {
+ mpt3sas_port_enable_complete(ioc);
+ return 1;
+ } else {
+ ioc->start_scan_failed = ioc_status;
+ ioc->start_scan = 0;
+ return 1;
+ }
+ }
+ complete(&ioc->port_enable_cmds.done);
+ return 1;
+}
+
+/**
+ * _base_send_port_enable - send port_enable(discovery stuff) to firmware
+ * @ioc: per adapter object
+ * @sleep_flag: CAN_SLEEP or NO_SLEEP
+ *
+ * Returns 0 for success, non-zero for failure.
+ */
+static int
+_base_send_port_enable(struct MPT3SAS_ADAPTER *ioc, int sleep_flag)
+{
+ Mpi2PortEnableRequest_t *mpi_request;
+ Mpi2PortEnableReply_t *mpi_reply;
+ unsigned long timeleft;
+ int r = 0;
+ u16 smid;
+ u16 ioc_status;
+
+ pr_info(MPT3SAS_FMT "sending port enable !!\n", ioc->name);
+
+ if (ioc->port_enable_cmds.status & MPT3_CMD_PENDING) {
+ pr_err(MPT3SAS_FMT "%s: internal command already in use\n",
+ ioc->name, __func__);
+ return -EAGAIN;
+ }
+
+ smid = mpt3sas_base_get_smid(ioc, ioc->port_enable_cb_idx);
+ if (!smid) {
+ pr_err(MPT3SAS_FMT "%s: failed obtaining a smid\n",
+ ioc->name, __func__);
+ return -EAGAIN;
+ }
+
+ ioc->port_enable_cmds.status = MPT3_CMD_PENDING;
+ mpi_request = mpt3sas_base_get_msg_frame(ioc, smid);
+ ioc->port_enable_cmds.smid = smid;
+ memset(mpi_request, 0, sizeof(Mpi2PortEnableRequest_t));
+ mpi_request->Function = MPI2_FUNCTION_PORT_ENABLE;
+
+ init_completion(&ioc->port_enable_cmds.done);
+ mpt3sas_base_put_smid_default(ioc, smid);
+ timeleft = wait_for_completion_timeout(&ioc->port_enable_cmds.done,
+ 300*HZ);
+ if (!(ioc->port_enable_cmds.status & MPT3_CMD_COMPLETE)) {
+ pr_err(MPT3SAS_FMT "%s: timeout\n",
+ ioc->name, __func__);
+ _debug_dump_mf(mpi_request,
+ sizeof(Mpi2PortEnableRequest_t)/4);
+ if (ioc->port_enable_cmds.status & MPT3_CMD_RESET)
+ r = -EFAULT;
+ else
+ r = -ETIME;
+ goto out;
+ }
+
+ mpi_reply = ioc->port_enable_cmds.reply;
+ ioc_status = le16_to_cpu(mpi_reply->IOCStatus) & MPI2_IOCSTATUS_MASK;
+ if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
+ pr_err(MPT3SAS_FMT "%s: failed with (ioc_status=0x%08x)\n",
+ ioc->name, __func__, ioc_status);
+ r = -EFAULT;
+ goto out;
+ }
+
+ out:
+ ioc->port_enable_cmds.status = MPT3_CMD_NOT_USED;
+ pr_info(MPT3SAS_FMT "port enable: %s\n", ioc->name, ((r == 0) ?
+ "SUCCESS" : "FAILED"));
+ return r;
+}
+
+/**
+ * mpt3sas_port_enable - initiate firmware discovery (don't wait for reply)
+ * @ioc: per adapter object
+ *
+ * Returns 0 for success, non-zero for failure.
+ */
+int
+mpt3sas_port_enable(struct MPT3SAS_ADAPTER *ioc)
+{
+ Mpi2PortEnableRequest_t *mpi_request;
+ u16 smid;
+
+ pr_info(MPT3SAS_FMT "sending port enable !!\n", ioc->name);
+
+ if (ioc->port_enable_cmds.status & MPT3_CMD_PENDING) {
+ pr_err(MPT3SAS_FMT "%s: internal command already in use\n",
+ ioc->name, __func__);
+ return -EAGAIN;
+ }
+
+ smid = mpt3sas_base_get_smid(ioc, ioc->port_enable_cb_idx);
+ if (!smid) {
+ pr_err(MPT3SAS_FMT "%s: failed obtaining a smid\n",
+ ioc->name, __func__);
+ return -EAGAIN;
+ }
+
+ ioc->port_enable_cmds.status = MPT3_CMD_PENDING;
+ mpi_request = mpt3sas_base_get_msg_frame(ioc, smid);
+ ioc->port_enable_cmds.smid = smid;
+ memset(mpi_request, 0, sizeof(Mpi2PortEnableRequest_t));
+ mpi_request->Function = MPI2_FUNCTION_PORT_ENABLE;
+
+ mpt3sas_base_put_smid_default(ioc, smid);
+ return 0;
+}
+
+/**
+ * _base_determine_wait_on_discovery - desposition
+ * @ioc: per adapter object
+ *
+ * Decide whether to wait on discovery to complete. Used to either
+ * locate boot device, or report volumes ahead of physical devices.
+ *
+ * Returns 1 for wait, 0 for don't wait
+ */
+static int
+_base_determine_wait_on_discovery(struct MPT3SAS_ADAPTER *ioc)
+{
+ /* We wait for discovery to complete if IR firmware is loaded.
+ * The sas topology events arrive before PD events, so we need time to
+ * turn on the bit in ioc->pd_handles to indicate PD
+ * Also, it maybe required to report Volumes ahead of physical
+ * devices when MPI2_IOCPAGE8_IRFLAGS_LOW_VOLUME_MAPPING is set.
+ */
+ if (ioc->ir_firmware)
+ return 1;
+
+ /* if no Bios, then we don't need to wait */
+ if (!ioc->bios_pg3.BiosVersion)
+ return 0;
+
+ /* Bios is present, then we drop down here.
+ *
+ * If there any entries in the Bios Page 2, then we wait
+ * for discovery to complete.
+ */
+
+ /* Current Boot Device */
+ if ((ioc->bios_pg2.CurrentBootDeviceForm &
+ MPI2_BIOSPAGE2_FORM_MASK) ==
+ MPI2_BIOSPAGE2_FORM_NO_DEVICE_SPECIFIED &&
+ /* Request Boot Device */
+ (ioc->bios_pg2.ReqBootDeviceForm &
+ MPI2_BIOSPAGE2_FORM_MASK) ==
+ MPI2_BIOSPAGE2_FORM_NO_DEVICE_SPECIFIED &&
+ /* Alternate Request Boot Device */
+ (ioc->bios_pg2.ReqAltBootDeviceForm &
+ MPI2_BIOSPAGE2_FORM_MASK) ==
+ MPI2_BIOSPAGE2_FORM_NO_DEVICE_SPECIFIED)
+ return 0;
+
+ return 1;
+}
+
+/**
+ * _base_unmask_events - turn on notification for this event
+ * @ioc: per adapter object
+ * @event: firmware event
+ *
+ * The mask is stored in ioc->event_masks.
+ */
+static void
+_base_unmask_events(struct MPT3SAS_ADAPTER *ioc, u16 event)
+{
+ u32 desired_event;
+
+ if (event >= 128)
+ return;
+
+ desired_event = (1 << (event % 32));
+
+ if (event < 32)
+ ioc->event_masks[0] &= ~desired_event;
+ else if (event < 64)
+ ioc->event_masks[1] &= ~desired_event;
+ else if (event < 96)
+ ioc->event_masks[2] &= ~desired_event;
+ else if (event < 128)
+ ioc->event_masks[3] &= ~desired_event;
+}
+
+/**
+ * _base_event_notification - send event notification
+ * @ioc: per adapter object
+ * @sleep_flag: CAN_SLEEP or NO_SLEEP
+ *
+ * Returns 0 for success, non-zero for failure.
+ */
+static int
+_base_event_notification(struct MPT3SAS_ADAPTER *ioc, int sleep_flag)
+{
+ Mpi2EventNotificationRequest_t *mpi_request;
+ unsigned long timeleft;
+ u16 smid;
+ int r = 0;
+ int i;
+
+ dinitprintk(ioc, pr_info(MPT3SAS_FMT "%s\n", ioc->name,
+ __func__));
+
+ if (ioc->base_cmds.status & MPT3_CMD_PENDING) {
+ pr_err(MPT3SAS_FMT "%s: internal command already in use\n",
+ ioc->name, __func__);
+ return -EAGAIN;
+ }
+
+ smid = mpt3sas_base_get_smid(ioc, ioc->base_cb_idx);
+ if (!smid) {
+ pr_err(MPT3SAS_FMT "%s: failed obtaining a smid\n",
+ ioc->name, __func__);
+ return -EAGAIN;
+ }
+ ioc->base_cmds.status = MPT3_CMD_PENDING;
+ mpi_request = mpt3sas_base_get_msg_frame(ioc, smid);
+ ioc->base_cmds.smid = smid;
+ memset(mpi_request, 0, sizeof(Mpi2EventNotificationRequest_t));
+ mpi_request->Function = MPI2_FUNCTION_EVENT_NOTIFICATION;
+ mpi_request->VF_ID = 0; /* TODO */
+ mpi_request->VP_ID = 0;
+ for (i = 0; i < MPI2_EVENT_NOTIFY_EVENTMASK_WORDS; i++)
+ mpi_request->EventMasks[i] =
+ cpu_to_le32(ioc->event_masks[i]);
+ init_completion(&ioc->base_cmds.done);
+ mpt3sas_base_put_smid_default(ioc, smid);
+ timeleft = wait_for_completion_timeout(&ioc->base_cmds.done, 30*HZ);
+ if (!(ioc->base_cmds.status & MPT3_CMD_COMPLETE)) {
+ pr_err(MPT3SAS_FMT "%s: timeout\n",
+ ioc->name, __func__);
+ _debug_dump_mf(mpi_request,
+ sizeof(Mpi2EventNotificationRequest_t)/4);
+ if (ioc->base_cmds.status & MPT3_CMD_RESET)
+ r = -EFAULT;
+ else
+ r = -ETIME;
+ } else
+ dinitprintk(ioc, pr_info(MPT3SAS_FMT "%s: complete\n",
+ ioc->name, __func__));
+ ioc->base_cmds.status = MPT3_CMD_NOT_USED;
+ return r;
+}
+
+/**
+ * mpt3sas_base_validate_event_type - validating event types
+ * @ioc: per adapter object
+ * @event: firmware event
+ *
+ * This will turn on firmware event notification when application
+ * ask for that event. We don't mask events that are already enabled.
+ */
+void
+mpt3sas_base_validate_event_type(struct MPT3SAS_ADAPTER *ioc, u32 *event_type)
+{
+ int i, j;
+ u32 event_mask, desired_event;
+ u8 send_update_to_fw;
+
+ for (i = 0, send_update_to_fw = 0; i <
+ MPI2_EVENT_NOTIFY_EVENTMASK_WORDS; i++) {
+ event_mask = ~event_type[i];
+ desired_event = 1;
+ for (j = 0; j < 32; j++) {
+ if (!(event_mask & desired_event) &&
+ (ioc->event_masks[i] & desired_event)) {
+ ioc->event_masks[i] &= ~desired_event;
+ send_update_to_fw = 1;
+ }
+ desired_event = (desired_event << 1);
+ }
+ }
+
+ if (!send_update_to_fw)
+ return;
+
+ mutex_lock(&ioc->base_cmds.mutex);
+ _base_event_notification(ioc, CAN_SLEEP);
+ mutex_unlock(&ioc->base_cmds.mutex);
+}
+
+/**
+ * _base_diag_reset - the "big hammer" start of day reset
+ * @ioc: per adapter object
+ * @sleep_flag: CAN_SLEEP or NO_SLEEP
+ *
+ * Returns 0 for success, non-zero for failure.
+ */
+static int
+_base_diag_reset(struct MPT3SAS_ADAPTER *ioc, int sleep_flag)
+{
+ u32 host_diagnostic;
+ u32 ioc_state;
+ u32 count;
+ u32 hcb_size;
+
+ pr_info(MPT3SAS_FMT "sending diag reset !!\n", ioc->name);
+
+ drsprintk(ioc, pr_info(MPT3SAS_FMT "clear interrupts\n",
+ ioc->name));
+
+ count = 0;
+ do {
+ /* Write magic sequence to WriteSequence register
+ * Loop until in diagnostic mode
+ */
+ drsprintk(ioc, pr_info(MPT3SAS_FMT
+ "write magic sequence\n", ioc->name));
+ writel(MPI2_WRSEQ_FLUSH_KEY_VALUE, &ioc->chip->WriteSequence);
+ writel(MPI2_WRSEQ_1ST_KEY_VALUE, &ioc->chip->WriteSequence);
+ writel(MPI2_WRSEQ_2ND_KEY_VALUE, &ioc->chip->WriteSequence);
+ writel(MPI2_WRSEQ_3RD_KEY_VALUE, &ioc->chip->WriteSequence);
+ writel(MPI2_WRSEQ_4TH_KEY_VALUE, &ioc->chip->WriteSequence);
+ writel(MPI2_WRSEQ_5TH_KEY_VALUE, &ioc->chip->WriteSequence);
+ writel(MPI2_WRSEQ_6TH_KEY_VALUE, &ioc->chip->WriteSequence);
+
+ /* wait 100 msec */
+ if (sleep_flag == CAN_SLEEP)
+ msleep(100);
+ else
+ mdelay(100);
+
+ if (count++ > 20)
+ goto out;
+
+ host_diagnostic = readl(&ioc->chip->HostDiagnostic);
+ drsprintk(ioc, pr_info(MPT3SAS_FMT
+ "wrote magic sequence: count(%d), host_diagnostic(0x%08x)\n",
+ ioc->name, count, host_diagnostic));
+
+ } while ((host_diagnostic & MPI2_DIAG_DIAG_WRITE_ENABLE) == 0);
+
+ hcb_size = readl(&ioc->chip->HCBSize);
+
+ drsprintk(ioc, pr_info(MPT3SAS_FMT "diag reset: issued\n",
+ ioc->name));
+ writel(host_diagnostic | MPI2_DIAG_RESET_ADAPTER,
+ &ioc->chip->HostDiagnostic);
+
+ /*This delay allows the chip PCIe hardware time to finish reset tasks*/
+ if (sleep_flag == CAN_SLEEP)
+ msleep(MPI2_HARD_RESET_PCIE_FIRST_READ_DELAY_MICRO_SEC/1000);
+ else
+ mdelay(MPI2_HARD_RESET_PCIE_FIRST_READ_DELAY_MICRO_SEC/1000);
+
+ /* Approximately 300 second max wait */
+ for (count = 0; count < (300000000 /
+ MPI2_HARD_RESET_PCIE_SECOND_READ_DELAY_MICRO_SEC); count++) {
+
+ host_diagnostic = readl(&ioc->chip->HostDiagnostic);
+
+ if (host_diagnostic == 0xFFFFFFFF)
+ goto out;
+ if (!(host_diagnostic & MPI2_DIAG_RESET_ADAPTER))
+ break;
+
+ /* Wait to pass the second read delay window */
+ if (sleep_flag == CAN_SLEEP)
+ msleep(MPI2_HARD_RESET_PCIE_SECOND_READ_DELAY_MICRO_SEC
+ / 1000);
+ else
+ mdelay(MPI2_HARD_RESET_PCIE_SECOND_READ_DELAY_MICRO_SEC
+ / 1000);
+ }
+
+ if (host_diagnostic & MPI2_DIAG_HCB_MODE) {
+
+ drsprintk(ioc, pr_info(MPT3SAS_FMT
+ "restart the adapter assuming the HCB Address points to good F/W\n",
+ ioc->name));
+ host_diagnostic &= ~MPI2_DIAG_BOOT_DEVICE_SELECT_MASK;
+ host_diagnostic |= MPI2_DIAG_BOOT_DEVICE_SELECT_HCDW;
+ writel(host_diagnostic, &ioc->chip->HostDiagnostic);
+
+ drsprintk(ioc, pr_info(MPT3SAS_FMT
+ "re-enable the HCDW\n", ioc->name));
+ writel(hcb_size | MPI2_HCB_SIZE_HCB_ENABLE,
+ &ioc->chip->HCBSize);
+ }
+
+ drsprintk(ioc, pr_info(MPT3SAS_FMT "restart the adapter\n",
+ ioc->name));
+ writel(host_diagnostic & ~MPI2_DIAG_HOLD_IOC_RESET,
+ &ioc->chip->HostDiagnostic);
+
+ drsprintk(ioc, pr_info(MPT3SAS_FMT
+ "disable writes to the diagnostic register\n", ioc->name));
+ writel(MPI2_WRSEQ_FLUSH_KEY_VALUE, &ioc->chip->WriteSequence);
+
+ drsprintk(ioc, pr_info(MPT3SAS_FMT
+ "Wait for FW to go to the READY state\n", ioc->name));
+ ioc_state = _base_wait_on_iocstate(ioc, MPI2_IOC_STATE_READY, 20,
+ sleep_flag);
+ if (ioc_state) {
+ pr_err(MPT3SAS_FMT
+ "%s: failed going to ready state (ioc_state=0x%x)\n",
+ ioc->name, __func__, ioc_state);
+ goto out;
+ }
+
+ pr_info(MPT3SAS_FMT "diag reset: SUCCESS\n", ioc->name);
+ return 0;
+
+ out:
+ pr_err(MPT3SAS_FMT "diag reset: FAILED\n", ioc->name);
+ return -EFAULT;
+}
+
+/**
+ * _base_make_ioc_ready - put controller in READY state
+ * @ioc: per adapter object
+ * @sleep_flag: CAN_SLEEP or NO_SLEEP
+ * @type: FORCE_BIG_HAMMER or SOFT_RESET
+ *
+ * Returns 0 for success, non-zero for failure.
+ */
+static int
+_base_make_ioc_ready(struct MPT3SAS_ADAPTER *ioc, int sleep_flag,
+ enum reset_type type)
+{
+ u32 ioc_state;
+ int rc;
+ int count;
+
+ dinitprintk(ioc, pr_info(MPT3SAS_FMT "%s\n", ioc->name,
+ __func__));
+
+ if (ioc->pci_error_recovery)
+ return 0;
+
+ ioc_state = mpt3sas_base_get_iocstate(ioc, 0);
+ dhsprintk(ioc, pr_info(MPT3SAS_FMT "%s: ioc_state(0x%08x)\n",
+ ioc->name, __func__, ioc_state));
+
+ /* if in RESET state, it should move to READY state shortly */
+ count = 0;
+ if ((ioc_state & MPI2_IOC_STATE_MASK) == MPI2_IOC_STATE_RESET) {
+ while ((ioc_state & MPI2_IOC_STATE_MASK) !=
+ MPI2_IOC_STATE_READY) {
+ if (count++ == 10) {
+ pr_err(MPT3SAS_FMT
+ "%s: failed going to ready state (ioc_state=0x%x)\n",
+ ioc->name, __func__, ioc_state);
+ return -EFAULT;
+ }
+ if (sleep_flag == CAN_SLEEP)
+ ssleep(1);
+ else
+ mdelay(1000);
+ ioc_state = mpt3sas_base_get_iocstate(ioc, 0);
+ }
+ }
+
+ if ((ioc_state & MPI2_IOC_STATE_MASK) == MPI2_IOC_STATE_READY)
+ return 0;
+
+ if (ioc_state & MPI2_DOORBELL_USED) {
+ dhsprintk(ioc, pr_info(MPT3SAS_FMT
+ "unexpected doorbell active!\n",
+ ioc->name));
+ goto issue_diag_reset;
+ }
+
+ if ((ioc_state & MPI2_IOC_STATE_MASK) == MPI2_IOC_STATE_FAULT) {
+ mpt3sas_base_fault_info(ioc, ioc_state &
+ MPI2_DOORBELL_DATA_MASK);
+ goto issue_diag_reset;
+ }
+
+ if (type == FORCE_BIG_HAMMER)
+ goto issue_diag_reset;
+
+ if ((ioc_state & MPI2_IOC_STATE_MASK) == MPI2_IOC_STATE_OPERATIONAL)
+ if (!(_base_send_ioc_reset(ioc,
+ MPI2_FUNCTION_IOC_MESSAGE_UNIT_RESET, 15, CAN_SLEEP))) {
+ return 0;
+ }
+
+ issue_diag_reset:
+ rc = _base_diag_reset(ioc, CAN_SLEEP);
+ return rc;
+}
+
+/**
+ * _base_make_ioc_operational - put controller in OPERATIONAL state
+ * @ioc: per adapter object
+ * @sleep_flag: CAN_SLEEP or NO_SLEEP
+ *
+ * Returns 0 for success, non-zero for failure.
+ */
+static int
+_base_make_ioc_operational(struct MPT3SAS_ADAPTER *ioc, int sleep_flag)
+{
+ int r, i;
+ unsigned long flags;
+ u32 reply_address;
+ u16 smid;
+ struct _tr_list *delayed_tr, *delayed_tr_next;
+ struct adapter_reply_queue *reply_q;
+ long reply_post_free;
+ u32 reply_post_free_sz, index = 0;
+
+ dinitprintk(ioc, pr_info(MPT3SAS_FMT "%s\n", ioc->name,
+ __func__));
+
+ /* clean the delayed target reset list */
+ list_for_each_entry_safe(delayed_tr, delayed_tr_next,
+ &ioc->delayed_tr_list, list) {
+ list_del(&delayed_tr->list);
+ kfree(delayed_tr);
+ }
+
+
+ list_for_each_entry_safe(delayed_tr, delayed_tr_next,
+ &ioc->delayed_tr_volume_list, list) {
+ list_del(&delayed_tr->list);
+ kfree(delayed_tr);
+ }
+
+ /* initialize the scsi lookup free list */
+ spin_lock_irqsave(&ioc->scsi_lookup_lock, flags);
+ INIT_LIST_HEAD(&ioc->free_list);
+ smid = 1;
+ for (i = 0; i < ioc->scsiio_depth; i++, smid++) {
+ INIT_LIST_HEAD(&ioc->scsi_lookup[i].chain_list);
+ ioc->scsi_lookup[i].cb_idx = 0xFF;
+ ioc->scsi_lookup[i].smid = smid;
+ ioc->scsi_lookup[i].scmd = NULL;
+ list_add_tail(&ioc->scsi_lookup[i].tracker_list,
+ &ioc->free_list);
+ }
+
+ /* hi-priority queue */
+ INIT_LIST_HEAD(&ioc->hpr_free_list);
+ smid = ioc->hi_priority_smid;
+ for (i = 0; i < ioc->hi_priority_depth; i++, smid++) {
+ ioc->hpr_lookup[i].cb_idx = 0xFF;
+ ioc->hpr_lookup[i].smid = smid;
+ list_add_tail(&ioc->hpr_lookup[i].tracker_list,
+ &ioc->hpr_free_list);
+ }
+
+ /* internal queue */
+ INIT_LIST_HEAD(&ioc->internal_free_list);
+ smid = ioc->internal_smid;
+ for (i = 0; i < ioc->internal_depth; i++, smid++) {
+ ioc->internal_lookup[i].cb_idx = 0xFF;
+ ioc->internal_lookup[i].smid = smid;
+ list_add_tail(&ioc->internal_lookup[i].tracker_list,
+ &ioc->internal_free_list);
+ }
+
+ /* chain pool */
+ INIT_LIST_HEAD(&ioc->free_chain_list);
+ for (i = 0; i < ioc->chain_depth; i++)
+ list_add_tail(&ioc->chain_lookup[i].tracker_list,
+ &ioc->free_chain_list);
+
+ spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags);
+
+ /* initialize Reply Free Queue */
+ for (i = 0, reply_address = (u32)ioc->reply_dma ;
+ i < ioc->reply_free_queue_depth ; i++, reply_address +=
+ ioc->reply_sz)
+ ioc->reply_free[i] = cpu_to_le32(reply_address);
+
+ /* initialize reply queues */
+ if (ioc->is_driver_loading)
+ _base_assign_reply_queues(ioc);
+
+ /* initialize Reply Post Free Queue */
+ reply_post_free_sz = ioc->reply_post_queue_depth *
+ sizeof(Mpi2DefaultReplyDescriptor_t);
+ reply_post_free = (long)ioc->reply_post[index].reply_post_free;
+ list_for_each_entry(reply_q, &ioc->reply_queue_list, list) {
+ reply_q->reply_post_host_index = 0;
+ reply_q->reply_post_free = (Mpi2ReplyDescriptorsUnion_t *)
+ reply_post_free;
+ for (i = 0; i < ioc->reply_post_queue_depth; i++)
+ reply_q->reply_post_free[i].Words =
+ cpu_to_le64(ULLONG_MAX);
+ if (!_base_is_controller_msix_enabled(ioc))
+ goto skip_init_reply_post_free_queue;
+ /*
+ * If RDPQ is enabled, switch to the next allocation.
+ * Otherwise advance within the contiguous region.
+ */
+ if (ioc->rdpq_array_enable)
+ reply_post_free = (long)
+ ioc->reply_post[++index].reply_post_free;
+ else
+ reply_post_free += reply_post_free_sz;
+ }
+ skip_init_reply_post_free_queue:
+
+ r = _base_send_ioc_init(ioc, sleep_flag);
+ if (r)
+ return r;
+
+ /* initialize reply free host index */
+ ioc->reply_free_host_index = ioc->reply_free_queue_depth - 1;
+ writel(ioc->reply_free_host_index, &ioc->chip->ReplyFreeHostIndex);
+
+ /* initialize reply post host index */
+ list_for_each_entry(reply_q, &ioc->reply_queue_list, list) {
+ writel(reply_q->msix_index << MPI2_RPHI_MSIX_INDEX_SHIFT,
+ &ioc->chip->ReplyPostHostIndex);
+ if (!_base_is_controller_msix_enabled(ioc))
+ goto skip_init_reply_post_host_index;
+ }
+
+ skip_init_reply_post_host_index:
+
+ _base_unmask_interrupts(ioc);
+ r = _base_event_notification(ioc, sleep_flag);
+ if (r)
+ return r;
+
+ if (sleep_flag == CAN_SLEEP)
+ _base_static_config_pages(ioc);
+
+
+ if (ioc->is_driver_loading) {
+ ioc->wait_for_discovery_to_complete =
+ _base_determine_wait_on_discovery(ioc);
+
+ return r; /* scan_start and scan_finished support */
+ }
+
+ r = _base_send_port_enable(ioc, sleep_flag);
+ if (r)
+ return r;
+
+ return r;
+}
+
+/**
+ * mpt3sas_base_free_resources - free resources controller resources
+ * @ioc: per adapter object
+ *
+ * Return nothing.
+ */
+void
+mpt3sas_base_free_resources(struct MPT3SAS_ADAPTER *ioc)
+{
+ struct pci_dev *pdev = ioc->pdev;
+
+ dexitprintk(ioc, pr_info(MPT3SAS_FMT "%s\n", ioc->name,
+ __func__));
+
+ if (ioc->chip_phys && ioc->chip) {
+ _base_mask_interrupts(ioc);
+ ioc->shost_recovery = 1;
+ _base_make_ioc_ready(ioc, CAN_SLEEP, SOFT_RESET);
+ ioc->shost_recovery = 0;
+ }
+
+ _base_free_irq(ioc);
+ _base_disable_msix(ioc);
+
+ if (ioc->chip_phys && ioc->chip)
+ iounmap(ioc->chip);
+ ioc->chip_phys = 0;
+
+ if (pci_is_enabled(pdev)) {
+ pci_release_selected_regions(ioc->pdev, ioc->bars);
+ pci_disable_pcie_error_reporting(pdev);
+ pci_disable_device(pdev);
+ }
+ return;
+}
+
+/**
+ * mpt3sas_base_attach - attach controller instance
+ * @ioc: per adapter object
+ *
+ * Returns 0 for success, non-zero for failure.
+ */
+int
+mpt3sas_base_attach(struct MPT3SAS_ADAPTER *ioc)
+{
+ int r, i;
+ int cpu_id, last_cpu_id = 0;
+
+ dinitprintk(ioc, pr_info(MPT3SAS_FMT "%s\n", ioc->name,
+ __func__));
+
+ /* setup cpu_msix_table */
+ ioc->cpu_count = num_online_cpus();
+ for_each_online_cpu(cpu_id)
+ last_cpu_id = cpu_id;
+ ioc->cpu_msix_table_sz = last_cpu_id + 1;
+ ioc->cpu_msix_table = kzalloc(ioc->cpu_msix_table_sz, GFP_KERNEL);
+ ioc->reply_queue_count = 1;
+ if (!ioc->cpu_msix_table) {
+ dfailprintk(ioc, pr_info(MPT3SAS_FMT
+ "allocation for cpu_msix_table failed!!!\n",
+ ioc->name));
+ r = -ENOMEM;
+ goto out_free_resources;
+ }
+
+ ioc->rdpq_array_enable_assigned = 0;
+ ioc->dma_mask = 0;
+ r = mpt3sas_base_map_resources(ioc);
+ if (r)
+ goto out_free_resources;
+
+
+ pci_set_drvdata(ioc->pdev, ioc->shost);
+ r = _base_get_ioc_facts(ioc, CAN_SLEEP);
+ if (r)
+ goto out_free_resources;
+
+ /*
+ * In SAS3.0,
+ * SCSI_IO, SMP_PASSTHRU, SATA_PASSTHRU, Target Assist, and
+ * Target Status - all require the IEEE formated scatter gather
+ * elements.
+ */
+
+ ioc->build_sg_scmd = &_base_build_sg_scmd_ieee;
+ ioc->build_sg = &_base_build_sg_ieee;
+ ioc->build_zero_len_sge = &_base_build_zero_len_sge_ieee;
+ ioc->mpi25 = 1;
+ ioc->sge_size_ieee = sizeof(Mpi2IeeeSgeSimple64_t);
+
+ /*
+ * These function pointers for other requests that don't
+ * the require IEEE scatter gather elements.
+ *
+ * For example Configuration Pages and SAS IOUNIT Control don't.
+ */
+ ioc->build_sg_mpi = &_base_build_sg;
+ ioc->build_zero_len_sge_mpi = &_base_build_zero_len_sge;
+
+ r = _base_make_ioc_ready(ioc, CAN_SLEEP, SOFT_RESET);
+ if (r)
+ goto out_free_resources;
+
+ ioc->pfacts = kcalloc(ioc->facts.NumberOfPorts,
+ sizeof(struct mpt3sas_port_facts), GFP_KERNEL);
+ if (!ioc->pfacts) {
+ r = -ENOMEM;
+ goto out_free_resources;
+ }
+
+ for (i = 0 ; i < ioc->facts.NumberOfPorts; i++) {
+ r = _base_get_port_facts(ioc, i, CAN_SLEEP);
+ if (r)
+ goto out_free_resources;
+ }
+
+ r = _base_allocate_memory_pools(ioc, CAN_SLEEP);
+ if (r)
+ goto out_free_resources;
+
+ init_waitqueue_head(&ioc->reset_wq);
+
+ /* allocate memory pd handle bitmask list */
+ ioc->pd_handles_sz = (ioc->facts.MaxDevHandle / 8);
+ if (ioc->facts.MaxDevHandle % 8)
+ ioc->pd_handles_sz++;
+ ioc->pd_handles = kzalloc(ioc->pd_handles_sz,
+ GFP_KERNEL);
+ if (!ioc->pd_handles) {
+ r = -ENOMEM;
+ goto out_free_resources;
+ }
+ ioc->blocking_handles = kzalloc(ioc->pd_handles_sz,
+ GFP_KERNEL);
+ if (!ioc->blocking_handles) {
+ r = -ENOMEM;
+ goto out_free_resources;
+ }
+
+ ioc->fwfault_debug = mpt3sas_fwfault_debug;
+
+ /* base internal command bits */
+ mutex_init(&ioc->base_cmds.mutex);
+ ioc->base_cmds.reply = kzalloc(ioc->reply_sz, GFP_KERNEL);
+ ioc->base_cmds.status = MPT3_CMD_NOT_USED;
+
+ /* port_enable command bits */
+ ioc->port_enable_cmds.reply = kzalloc(ioc->reply_sz, GFP_KERNEL);
+ ioc->port_enable_cmds.status = MPT3_CMD_NOT_USED;
+
+ /* transport internal command bits */
+ ioc->transport_cmds.reply = kzalloc(ioc->reply_sz, GFP_KERNEL);
+ ioc->transport_cmds.status = MPT3_CMD_NOT_USED;
+ mutex_init(&ioc->transport_cmds.mutex);
+
+ /* scsih internal command bits */
+ ioc->scsih_cmds.reply = kzalloc(ioc->reply_sz, GFP_KERNEL);
+ ioc->scsih_cmds.status = MPT3_CMD_NOT_USED;
+ mutex_init(&ioc->scsih_cmds.mutex);
+
+ /* task management internal command bits */
+ ioc->tm_cmds.reply = kzalloc(ioc->reply_sz, GFP_KERNEL);
+ ioc->tm_cmds.status = MPT3_CMD_NOT_USED;
+ mutex_init(&ioc->tm_cmds.mutex);
+
+ /* config page internal command bits */
+ ioc->config_cmds.reply = kzalloc(ioc->reply_sz, GFP_KERNEL);
+ ioc->config_cmds.status = MPT3_CMD_NOT_USED;
+ mutex_init(&ioc->config_cmds.mutex);
+
+ /* ctl module internal command bits */
+ ioc->ctl_cmds.reply = kzalloc(ioc->reply_sz, GFP_KERNEL);
+ ioc->ctl_cmds.sense = kzalloc(SCSI_SENSE_BUFFERSIZE, GFP_KERNEL);
+ ioc->ctl_cmds.status = MPT3_CMD_NOT_USED;
+ mutex_init(&ioc->ctl_cmds.mutex);
+
+ if (!ioc->base_cmds.reply || !ioc->transport_cmds.reply ||
+ !ioc->scsih_cmds.reply || !ioc->tm_cmds.reply ||
+ !ioc->config_cmds.reply || !ioc->ctl_cmds.reply ||
+ !ioc->ctl_cmds.sense) {
+ r = -ENOMEM;
+ goto out_free_resources;
+ }
+
+ for (i = 0; i < MPI2_EVENT_NOTIFY_EVENTMASK_WORDS; i++)
+ ioc->event_masks[i] = -1;
+
+ /* here we enable the events we care about */
+ _base_unmask_events(ioc, MPI2_EVENT_SAS_DISCOVERY);
+ _base_unmask_events(ioc, MPI2_EVENT_SAS_BROADCAST_PRIMITIVE);
+ _base_unmask_events(ioc, MPI2_EVENT_SAS_TOPOLOGY_CHANGE_LIST);
+ _base_unmask_events(ioc, MPI2_EVENT_SAS_DEVICE_STATUS_CHANGE);
+ _base_unmask_events(ioc, MPI2_EVENT_SAS_ENCL_DEVICE_STATUS_CHANGE);
+ _base_unmask_events(ioc, MPI2_EVENT_IR_CONFIGURATION_CHANGE_LIST);
+ _base_unmask_events(ioc, MPI2_EVENT_IR_VOLUME);
+ _base_unmask_events(ioc, MPI2_EVENT_IR_PHYSICAL_DISK);
+ _base_unmask_events(ioc, MPI2_EVENT_IR_OPERATION_STATUS);
+ _base_unmask_events(ioc, MPI2_EVENT_LOG_ENTRY_ADDED);
+ _base_unmask_events(ioc, MPI2_EVENT_TEMP_THRESHOLD);
+
+ r = _base_make_ioc_operational(ioc, CAN_SLEEP);
+ if (r)
+ goto out_free_resources;
+
+ return 0;
+
+ out_free_resources:
+
+ ioc->remove_host = 1;
+
+ mpt3sas_base_free_resources(ioc);
+ _base_release_memory_pools(ioc);
+ pci_set_drvdata(ioc->pdev, NULL);
+ kfree(ioc->cpu_msix_table);
+ kfree(ioc->pd_handles);
+ kfree(ioc->blocking_handles);
+ kfree(ioc->tm_cmds.reply);
+ kfree(ioc->transport_cmds.reply);
+ kfree(ioc->scsih_cmds.reply);
+ kfree(ioc->config_cmds.reply);
+ kfree(ioc->base_cmds.reply);
+ kfree(ioc->port_enable_cmds.reply);
+ kfree(ioc->ctl_cmds.reply);
+ kfree(ioc->ctl_cmds.sense);
+ kfree(ioc->pfacts);
+ ioc->ctl_cmds.reply = NULL;
+ ioc->base_cmds.reply = NULL;
+ ioc->tm_cmds.reply = NULL;
+ ioc->scsih_cmds.reply = NULL;
+ ioc->transport_cmds.reply = NULL;
+ ioc->config_cmds.reply = NULL;
+ ioc->pfacts = NULL;
+ return r;
+}
+
+
+/**
+ * mpt3sas_base_detach - remove controller instance
+ * @ioc: per adapter object
+ *
+ * Return nothing.
+ */
+void
+mpt3sas_base_detach(struct MPT3SAS_ADAPTER *ioc)
+{
+ dexitprintk(ioc, pr_info(MPT3SAS_FMT "%s\n", ioc->name,
+ __func__));
+
+ mpt3sas_base_stop_watchdog(ioc);
+ mpt3sas_base_free_resources(ioc);
+ _base_release_memory_pools(ioc);
+ pci_set_drvdata(ioc->pdev, NULL);
+ kfree(ioc->cpu_msix_table);
+ kfree(ioc->pd_handles);
+ kfree(ioc->blocking_handles);
+ kfree(ioc->pfacts);
+ kfree(ioc->ctl_cmds.reply);
+ kfree(ioc->ctl_cmds.sense);
+ kfree(ioc->base_cmds.reply);
+ kfree(ioc->port_enable_cmds.reply);
+ kfree(ioc->tm_cmds.reply);
+ kfree(ioc->transport_cmds.reply);
+ kfree(ioc->scsih_cmds.reply);
+ kfree(ioc->config_cmds.reply);
+}
+
+/**
+ * _base_reset_handler - reset callback handler (for base)
+ * @ioc: per adapter object
+ * @reset_phase: phase
+ *
+ * The handler for doing any required cleanup or initialization.
+ *
+ * The reset phase can be MPT3_IOC_PRE_RESET, MPT3_IOC_AFTER_RESET,
+ * MPT3_IOC_DONE_RESET
+ *
+ * Return nothing.
+ */
+static void
+_base_reset_handler(struct MPT3SAS_ADAPTER *ioc, int reset_phase)
+{
+ mpt3sas_scsih_reset_handler(ioc, reset_phase);
+ mpt3sas_ctl_reset_handler(ioc, reset_phase);
+ switch (reset_phase) {
+ case MPT3_IOC_PRE_RESET:
+ dtmprintk(ioc, pr_info(MPT3SAS_FMT
+ "%s: MPT3_IOC_PRE_RESET\n", ioc->name, __func__));
+ break;
+ case MPT3_IOC_AFTER_RESET:
+ dtmprintk(ioc, pr_info(MPT3SAS_FMT
+ "%s: MPT3_IOC_AFTER_RESET\n", ioc->name, __func__));
+ if (ioc->transport_cmds.status & MPT3_CMD_PENDING) {
+ ioc->transport_cmds.status |= MPT3_CMD_RESET;
+ mpt3sas_base_free_smid(ioc, ioc->transport_cmds.smid);
+ complete(&ioc->transport_cmds.done);
+ }
+ if (ioc->base_cmds.status & MPT3_CMD_PENDING) {
+ ioc->base_cmds.status |= MPT3_CMD_RESET;
+ mpt3sas_base_free_smid(ioc, ioc->base_cmds.smid);
+ complete(&ioc->base_cmds.done);
+ }
+ if (ioc->port_enable_cmds.status & MPT3_CMD_PENDING) {
+ ioc->port_enable_failed = 1;
+ ioc->port_enable_cmds.status |= MPT3_CMD_RESET;
+ mpt3sas_base_free_smid(ioc, ioc->port_enable_cmds.smid);
+ if (ioc->is_driver_loading) {
+ ioc->start_scan_failed =
+ MPI2_IOCSTATUS_INTERNAL_ERROR;
+ ioc->start_scan = 0;
+ ioc->port_enable_cmds.status =
+ MPT3_CMD_NOT_USED;
+ } else
+ complete(&ioc->port_enable_cmds.done);
+ }
+ if (ioc->config_cmds.status & MPT3_CMD_PENDING) {
+ ioc->config_cmds.status |= MPT3_CMD_RESET;
+ mpt3sas_base_free_smid(ioc, ioc->config_cmds.smid);
+ ioc->config_cmds.smid = USHRT_MAX;
+ complete(&ioc->config_cmds.done);
+ }
+ break;
+ case MPT3_IOC_DONE_RESET:
+ dtmprintk(ioc, pr_info(MPT3SAS_FMT
+ "%s: MPT3_IOC_DONE_RESET\n", ioc->name, __func__));
+ break;
+ }
+}
+
+/**
+ * _wait_for_commands_to_complete - reset controller
+ * @ioc: Pointer to MPT_ADAPTER structure
+ * @sleep_flag: CAN_SLEEP or NO_SLEEP
+ *
+ * This function waiting(3s) for all pending commands to complete
+ * prior to putting controller in reset.
+ */
+static void
+_wait_for_commands_to_complete(struct MPT3SAS_ADAPTER *ioc, int sleep_flag)
+{
+ u32 ioc_state;
+ unsigned long flags;
+ u16 i;
+
+ ioc->pending_io_count = 0;
+ if (sleep_flag != CAN_SLEEP)
+ return;
+
+ ioc_state = mpt3sas_base_get_iocstate(ioc, 0);
+ if ((ioc_state & MPI2_IOC_STATE_MASK) != MPI2_IOC_STATE_OPERATIONAL)
+ return;
+
+ /* pending command count */
+ spin_lock_irqsave(&ioc->scsi_lookup_lock, flags);
+ for (i = 0; i < ioc->scsiio_depth; i++)
+ if (ioc->scsi_lookup[i].cb_idx != 0xFF)
+ ioc->pending_io_count++;
+ spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags);
+
+ if (!ioc->pending_io_count)
+ return;
+
+ /* wait for pending commands to complete */
+ wait_event_timeout(ioc->reset_wq, ioc->pending_io_count == 0, 10 * HZ);
+}
+
+/**
+ * mpt3sas_base_hard_reset_handler - reset controller
+ * @ioc: Pointer to MPT_ADAPTER structure
+ * @sleep_flag: CAN_SLEEP or NO_SLEEP
+ * @type: FORCE_BIG_HAMMER or SOFT_RESET
+ *
+ * Returns 0 for success, non-zero for failure.
+ */
+int
+mpt3sas_base_hard_reset_handler(struct MPT3SAS_ADAPTER *ioc, int sleep_flag,
+ enum reset_type type)
+{
+ int r;
+ unsigned long flags;
+ u32 ioc_state;
+ u8 is_fault = 0, is_trigger = 0;
+
+ dtmprintk(ioc, pr_info(MPT3SAS_FMT "%s: enter\n", ioc->name,
+ __func__));
+
+ if (ioc->pci_error_recovery) {
+ pr_err(MPT3SAS_FMT "%s: pci error recovery reset\n",
+ ioc->name, __func__);
+ r = 0;
+ goto out_unlocked;
+ }
+
+ if (mpt3sas_fwfault_debug)
+ mpt3sas_halt_firmware(ioc);
+
+ /* TODO - What we really should be doing is pulling
+ * out all the code associated with NO_SLEEP; its never used.
+ * That is legacy code from mpt fusion driver, ported over.
+ * I will leave this BUG_ON here for now till its been resolved.
+ */
+ BUG_ON(sleep_flag == NO_SLEEP);
+
+ /* wait for an active reset in progress to complete */
+ if (!mutex_trylock(&ioc->reset_in_progress_mutex)) {
+ do {
+ ssleep(1);
+ } while (ioc->shost_recovery == 1);
+ dtmprintk(ioc, pr_info(MPT3SAS_FMT "%s: exit\n", ioc->name,
+ __func__));
+ return ioc->ioc_reset_in_progress_status;
+ }
+
+ spin_lock_irqsave(&ioc->ioc_reset_in_progress_lock, flags);
+ ioc->shost_recovery = 1;
+ spin_unlock_irqrestore(&ioc->ioc_reset_in_progress_lock, flags);
+
+ if ((ioc->diag_buffer_status[MPI2_DIAG_BUF_TYPE_TRACE] &
+ MPT3_DIAG_BUFFER_IS_REGISTERED) &&
+ (!(ioc->diag_buffer_status[MPI2_DIAG_BUF_TYPE_TRACE] &
+ MPT3_DIAG_BUFFER_IS_RELEASED))) {
+ is_trigger = 1;
+ ioc_state = mpt3sas_base_get_iocstate(ioc, 0);
+ if ((ioc_state & MPI2_IOC_STATE_MASK) == MPI2_IOC_STATE_FAULT)
+ is_fault = 1;
+ }
+ _base_reset_handler(ioc, MPT3_IOC_PRE_RESET);
+ _wait_for_commands_to_complete(ioc, sleep_flag);
+ _base_mask_interrupts(ioc);
+ r = _base_make_ioc_ready(ioc, sleep_flag, type);
+ if (r)
+ goto out;
+ _base_reset_handler(ioc, MPT3_IOC_AFTER_RESET);
+
+ /* If this hard reset is called while port enable is active, then
+ * there is no reason to call make_ioc_operational
+ */
+ if (ioc->is_driver_loading && ioc->port_enable_failed) {
+ ioc->remove_host = 1;
+ r = -EFAULT;
+ goto out;
+ }
+ r = _base_get_ioc_facts(ioc, CAN_SLEEP);
+ if (r)
+ goto out;
+
+ if (ioc->rdpq_array_enable && !ioc->rdpq_array_capable)
+ panic("%s: Issue occurred with flashing controller firmware."
+ "Please reboot the system and ensure that the correct"
+ " firmware version is running\n", ioc->name);
+
+ r = _base_make_ioc_operational(ioc, sleep_flag);
+ if (!r)
+ _base_reset_handler(ioc, MPT3_IOC_DONE_RESET);
+
+ out:
+ dtmprintk(ioc, pr_info(MPT3SAS_FMT "%s: %s\n",
+ ioc->name, __func__, ((r == 0) ? "SUCCESS" : "FAILED")));
+
+ spin_lock_irqsave(&ioc->ioc_reset_in_progress_lock, flags);
+ ioc->ioc_reset_in_progress_status = r;
+ ioc->shost_recovery = 0;
+ spin_unlock_irqrestore(&ioc->ioc_reset_in_progress_lock, flags);
+ ioc->ioc_reset_count++;
+ mutex_unlock(&ioc->reset_in_progress_mutex);
+
+ out_unlocked:
+ if ((r == 0) && is_trigger) {
+ if (is_fault)
+ mpt3sas_trigger_master(ioc, MASTER_TRIGGER_FW_FAULT);
+ else
+ mpt3sas_trigger_master(ioc,
+ MASTER_TRIGGER_ADAPTER_RESET);
+ }
+ dtmprintk(ioc, pr_info(MPT3SAS_FMT "%s: exit\n", ioc->name,
+ __func__));
+ return r;
+}
diff --git a/drivers/scsi/mpt3sas/mpt3sas_base.h b/drivers/scsi/mpt3sas/mpt3sas_base.h
new file mode 100644
index 000000000..afa881682
--- /dev/null
+++ b/drivers/scsi/mpt3sas/mpt3sas_base.h
@@ -0,0 +1,1180 @@
+/*
+ * This is the Fusion MPT base driver providing common API layer interface
+ * for access to MPT (Message Passing Technology) firmware.
+ *
+ * This code is based on drivers/scsi/mpt3sas/mpt3sas_base.h
+ * Copyright (C) 2012-2014 LSI Corporation
+ * Copyright (C) 2013-2014 Avago Technologies
+ * (mailto: MPT-FusionLinux.pdl@avagotech.com)
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * NO WARRANTY
+ * THE PROGRAM IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OR
+ * CONDITIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED INCLUDING, WITHOUT
+ * LIMITATION, ANY WARRANTIES OR CONDITIONS OF TITLE, NON-INFRINGEMENT,
+ * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE. Each Recipient is
+ * solely responsible for determining the appropriateness of using and
+ * distributing the Program and assumes all risks associated with its
+ * exercise of rights under this Agreement, including but not limited to
+ * the risks and costs of program errors, damage to or loss of data,
+ * programs or equipment, and unavailability or interruption of operations.
+
+ * DISCLAIMER OF LIABILITY
+ * NEITHER RECIPIENT NOR ANY CONTRIBUTORS SHALL HAVE ANY LIABILITY FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING WITHOUT LIMITATION LOST PROFITS), HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR
+ * TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
+ * USE OR DISTRIBUTION OF THE PROGRAM OR THE EXERCISE OF ANY RIGHTS GRANTED
+ * HEREUNDER, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGES
+
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301,
+ * USA.
+ */
+
+#ifndef MPT3SAS_BASE_H_INCLUDED
+#define MPT3SAS_BASE_H_INCLUDED
+
+#include "mpi/mpi2_type.h"
+#include "mpi/mpi2.h"
+#include "mpi/mpi2_ioc.h"
+#include "mpi/mpi2_cnfg.h"
+#include "mpi/mpi2_init.h"
+#include "mpi/mpi2_raid.h"
+#include "mpi/mpi2_tool.h"
+#include "mpi/mpi2_sas.h"
+
+#include <scsi/scsi.h>
+#include <scsi/scsi_cmnd.h>
+#include <scsi/scsi_device.h>
+#include <scsi/scsi_host.h>
+#include <scsi/scsi_tcq.h>
+#include <scsi/scsi_transport_sas.h>
+#include <scsi/scsi_dbg.h>
+#include <scsi/scsi_eh.h>
+
+#include "mpt3sas_debug.h"
+#include "mpt3sas_trigger_diag.h"
+
+/* driver versioning info */
+#define MPT3SAS_DRIVER_NAME "mpt3sas"
+#define MPT3SAS_AUTHOR "Avago Technologies <MPT-FusionLinux.pdl@avagotech.com>"
+#define MPT3SAS_DESCRIPTION "LSI MPT Fusion SAS 3.0 Device Driver"
+#define MPT3SAS_DRIVER_VERSION "04.100.00.00"
+#define MPT3SAS_MAJOR_VERSION 4
+#define MPT3SAS_MINOR_VERSION 100
+#define MPT3SAS_BUILD_VERSION 0
+#define MPT3SAS_RELEASE_VERSION 00
+
+/*
+ * Set MPT3SAS_SG_DEPTH value based on user input.
+ */
+#define MPT3SAS_MAX_PHYS_SEGMENTS SCSI_MAX_SG_SEGMENTS
+#define MPT3SAS_MIN_PHYS_SEGMENTS 16
+#ifdef CONFIG_SCSI_MPT3SAS_MAX_SGE
+#define MPT3SAS_SG_DEPTH CONFIG_SCSI_MPT3SAS_MAX_SGE
+#else
+#define MPT3SAS_SG_DEPTH MPT3SAS_MAX_PHYS_SEGMENTS
+#endif
+
+
+/*
+ * Generic Defines
+ */
+#define MPT3SAS_SATA_QUEUE_DEPTH 32
+#define MPT3SAS_SAS_QUEUE_DEPTH 254
+#define MPT3SAS_RAID_QUEUE_DEPTH 128
+
+#define MPT_NAME_LENGTH 32 /* generic length of strings */
+#define MPT_STRING_LENGTH 64
+
+#define MPT_MAX_CALLBACKS 32
+
+
+#define CAN_SLEEP 1
+#define NO_SLEEP 0
+
+#define INTERNAL_CMDS_COUNT 10 /* reserved cmds */
+
+#define MPI3_HIM_MASK 0xFFFFFFFF /* mask every bit*/
+
+#define MPT3SAS_INVALID_DEVICE_HANDLE 0xFFFF
+
+/*
+ * reset phases
+ */
+#define MPT3_IOC_PRE_RESET 1 /* prior to host reset */
+#define MPT3_IOC_AFTER_RESET 2 /* just after host reset */
+#define MPT3_IOC_DONE_RESET 3 /* links re-initialized */
+
+/*
+ * logging format
+ */
+#define MPT3SAS_FMT "%s: "
+
+/*
+ * per target private data
+ */
+#define MPT_TARGET_FLAGS_RAID_COMPONENT 0x01
+#define MPT_TARGET_FLAGS_VOLUME 0x02
+#define MPT_TARGET_FLAGS_DELETED 0x04
+#define MPT_TARGET_FASTPATH_IO 0x08
+
+/*
+ * Intel HBA branding
+ */
+#define MPT3SAS_INTEL_RMS3JC080_BRANDING \
+ "Intel(R) Integrated RAID Module RMS3JC080"
+#define MPT3SAS_INTEL_RS3GC008_BRANDING \
+ "Intel(R) RAID Controller RS3GC008"
+#define MPT3SAS_INTEL_RS3FC044_BRANDING \
+ "Intel(R) RAID Controller RS3FC044"
+#define MPT3SAS_INTEL_RS3UC080_BRANDING \
+ "Intel(R) RAID Controller RS3UC080"
+
+/*
+ * Intel HBA SSDIDs
+ */
+#define MPT3SAS_INTEL_RMS3JC080_SSDID 0x3521
+#define MPT3SAS_INTEL_RS3GC008_SSDID 0x3522
+#define MPT3SAS_INTEL_RS3FC044_SSDID 0x3523
+#define MPT3SAS_INTEL_RS3UC080_SSDID 0x3524
+
+/*
+ * status bits for ioc->diag_buffer_status
+ */
+#define MPT3_DIAG_BUFFER_IS_REGISTERED (0x01)
+#define MPT3_DIAG_BUFFER_IS_RELEASED (0x02)
+#define MPT3_DIAG_BUFFER_IS_DIAG_RESET (0x04)
+
+
+/* OEM Identifiers */
+#define MFG10_OEM_ID_INVALID (0x00000000)
+#define MFG10_OEM_ID_DELL (0x00000001)
+#define MFG10_OEM_ID_FSC (0x00000002)
+#define MFG10_OEM_ID_SUN (0x00000003)
+#define MFG10_OEM_ID_IBM (0x00000004)
+
+/* GENERIC Flags 0*/
+#define MFG10_GF0_OCE_DISABLED (0x00000001)
+#define MFG10_GF0_R1E_DRIVE_COUNT (0x00000002)
+#define MFG10_GF0_R10_DISPLAY (0x00000004)
+#define MFG10_GF0_SSD_DATA_SCRUB_DISABLE (0x00000008)
+#define MFG10_GF0_SINGLE_DRIVE_R0 (0x00000010)
+
+/* OEM Specific Flags will come from OEM specific header files */
+struct Mpi2ManufacturingPage10_t {
+ MPI2_CONFIG_PAGE_HEADER Header; /* 00h */
+ U8 OEMIdentifier; /* 04h */
+ U8 Reserved1; /* 05h */
+ U16 Reserved2; /* 08h */
+ U32 Reserved3; /* 0Ch */
+ U32 GenericFlags0; /* 10h */
+ U32 GenericFlags1; /* 14h */
+ U32 Reserved4; /* 18h */
+ U32 OEMSpecificFlags0; /* 1Ch */
+ U32 OEMSpecificFlags1; /* 20h */
+ U32 Reserved5[18]; /* 24h - 60h*/
+};
+
+
+/* Miscellaneous options */
+struct Mpi2ManufacturingPage11_t {
+ MPI2_CONFIG_PAGE_HEADER Header; /* 00h */
+ __le32 Reserved1; /* 04h */
+ u8 Reserved2; /* 08h */
+ u8 EEDPTagMode; /* 09h */
+ u8 Reserved3; /* 0Ah */
+ u8 Reserved4; /* 0Bh */
+ __le32 Reserved5[23]; /* 0Ch-60h*/
+};
+
+/**
+ * struct MPT3SAS_TARGET - starget private hostdata
+ * @starget: starget object
+ * @sas_address: target sas address
+ * @handle: device handle
+ * @num_luns: number luns
+ * @flags: MPT_TARGET_FLAGS_XXX flags
+ * @deleted: target flaged for deletion
+ * @tm_busy: target is busy with TM request.
+ */
+struct MPT3SAS_TARGET {
+ struct scsi_target *starget;
+ u64 sas_address;
+ u16 handle;
+ int num_luns;
+ u32 flags;
+ u8 deleted;
+ u8 tm_busy;
+};
+
+
+/*
+ * per device private data
+ */
+#define MPT_DEVICE_FLAGS_INIT 0x01
+#define MPT_DEVICE_TLR_ON 0x02
+
+/**
+ * struct MPT3SAS_DEVICE - sdev private hostdata
+ * @sas_target: starget private hostdata
+ * @lun: lun number
+ * @flags: MPT_DEVICE_XXX flags
+ * @configured_lun: lun is configured
+ * @block: device is in SDEV_BLOCK state
+ * @tlr_snoop_check: flag used in determining whether to disable TLR
+ * @eedp_enable: eedp support enable bit
+ * @eedp_type: 0(type_1), 1(type_2), 2(type_3)
+ * @eedp_block_length: block size
+ */
+struct MPT3SAS_DEVICE {
+ struct MPT3SAS_TARGET *sas_target;
+ unsigned int lun;
+ u32 flags;
+ u8 configured_lun;
+ u8 block;
+ u8 tlr_snoop_check;
+};
+
+#define MPT3_CMD_NOT_USED 0x8000 /* free */
+#define MPT3_CMD_COMPLETE 0x0001 /* completed */
+#define MPT3_CMD_PENDING 0x0002 /* pending */
+#define MPT3_CMD_REPLY_VALID 0x0004 /* reply is valid */
+#define MPT3_CMD_RESET 0x0008 /* host reset dropped the command */
+
+/**
+ * struct _internal_cmd - internal commands struct
+ * @mutex: mutex
+ * @done: completion
+ * @reply: reply message pointer
+ * @sense: sense data
+ * @status: MPT3_CMD_XXX status
+ * @smid: system message id
+ */
+struct _internal_cmd {
+ struct mutex mutex;
+ struct completion done;
+ void *reply;
+ void *sense;
+ u16 status;
+ u16 smid;
+};
+
+
+
+/**
+ * struct _sas_device - attached device information
+ * @list: sas device list
+ * @starget: starget object
+ * @sas_address: device sas address
+ * @device_name: retrieved from the SAS IDENTIFY frame.
+ * @handle: device handle
+ * @sas_address_parent: sas address of parent expander or sas host
+ * @enclosure_handle: enclosure handle
+ * @enclosure_logical_id: enclosure logical identifier
+ * @volume_handle: volume handle (valid when hidden raid member)
+ * @volume_wwid: volume unique identifier
+ * @device_info: bitfield provides detailed info about the device
+ * @id: target id
+ * @channel: target channel
+ * @slot: number number
+ * @phy: phy identifier provided in sas device page 0
+ * @responding: used in _scsih_sas_device_mark_responding
+ * @fast_path: fast path feature enable bit
+ * @pfa_led_on: flag for PFA LED status
+ *
+ */
+struct _sas_device {
+ struct list_head list;
+ struct scsi_target *starget;
+ u64 sas_address;
+ u64 device_name;
+ u16 handle;
+ u64 sas_address_parent;
+ u16 enclosure_handle;
+ u64 enclosure_logical_id;
+ u16 volume_handle;
+ u64 volume_wwid;
+ u32 device_info;
+ int id;
+ int channel;
+ u16 slot;
+ u8 phy;
+ u8 responding;
+ u8 fast_path;
+ u8 pfa_led_on;
+};
+
+/**
+ * struct _raid_device - raid volume link list
+ * @list: sas device list
+ * @starget: starget object
+ * @sdev: scsi device struct (volumes are single lun)
+ * @wwid: unique identifier for the volume
+ * @handle: device handle
+ * @id: target id
+ * @channel: target channel
+ * @volume_type: the raid level
+ * @device_info: bitfield provides detailed info about the hidden components
+ * @num_pds: number of hidden raid components
+ * @responding: used in _scsih_raid_device_mark_responding
+ * @percent_complete: resync percent complete
+ */
+#define MPT_MAX_WARPDRIVE_PDS 8
+struct _raid_device {
+ struct list_head list;
+ struct scsi_target *starget;
+ struct scsi_device *sdev;
+ u64 wwid;
+ u16 handle;
+ int id;
+ int channel;
+ u8 volume_type;
+ u8 num_pds;
+ u8 responding;
+ u8 percent_complete;
+ u32 device_info;
+};
+
+/**
+ * struct _boot_device - boot device info
+ * @is_raid: flag to indicate whether this is volume
+ * @device: holds pointer for either struct _sas_device or
+ * struct _raid_device
+ */
+struct _boot_device {
+ u8 is_raid;
+ void *device;
+};
+
+/**
+ * struct _sas_port - wide/narrow sas port information
+ * @port_list: list of ports belonging to expander
+ * @num_phys: number of phys belonging to this port
+ * @remote_identify: attached device identification
+ * @rphy: sas transport rphy object
+ * @port: sas transport wide/narrow port object
+ * @phy_list: _sas_phy list objects belonging to this port
+ */
+struct _sas_port {
+ struct list_head port_list;
+ u8 num_phys;
+ struct sas_identify remote_identify;
+ struct sas_rphy *rphy;
+ struct sas_port *port;
+ struct list_head phy_list;
+};
+
+/**
+ * struct _sas_phy - phy information
+ * @port_siblings: list of phys belonging to a port
+ * @identify: phy identification
+ * @remote_identify: attached device identification
+ * @phy: sas transport phy object
+ * @phy_id: unique phy id
+ * @handle: device handle for this phy
+ * @attached_handle: device handle for attached device
+ * @phy_belongs_to_port: port has been created for this phy
+ */
+struct _sas_phy {
+ struct list_head port_siblings;
+ struct sas_identify identify;
+ struct sas_identify remote_identify;
+ struct sas_phy *phy;
+ u8 phy_id;
+ u16 handle;
+ u16 attached_handle;
+ u8 phy_belongs_to_port;
+};
+
+/**
+ * struct _sas_node - sas_host/expander information
+ * @list: list of expanders
+ * @parent_dev: parent device class
+ * @num_phys: number phys belonging to this sas_host/expander
+ * @sas_address: sas address of this sas_host/expander
+ * @handle: handle for this sas_host/expander
+ * @sas_address_parent: sas address of parent expander or sas host
+ * @enclosure_handle: handle for this a member of an enclosure
+ * @device_info: bitwise defining capabilities of this sas_host/expander
+ * @responding: used in _scsih_expander_device_mark_responding
+ * @phy: a list of phys that make up this sas_host/expander
+ * @sas_port_list: list of ports attached to this sas_host/expander
+ */
+struct _sas_node {
+ struct list_head list;
+ struct device *parent_dev;
+ u8 num_phys;
+ u64 sas_address;
+ u16 handle;
+ u64 sas_address_parent;
+ u16 enclosure_handle;
+ u64 enclosure_logical_id;
+ u8 responding;
+ struct _sas_phy *phy;
+ struct list_head sas_port_list;
+};
+
+/**
+ * enum reset_type - reset state
+ * @FORCE_BIG_HAMMER: issue diagnostic reset
+ * @SOFT_RESET: issue message_unit_reset, if fails to to big hammer
+ */
+enum reset_type {
+ FORCE_BIG_HAMMER,
+ SOFT_RESET,
+};
+
+/**
+ * struct chain_tracker - firmware chain tracker
+ * @chain_buffer: chain buffer
+ * @chain_buffer_dma: physical address
+ * @tracker_list: list of free request (ioc->free_chain_list)
+ */
+struct chain_tracker {
+ void *chain_buffer;
+ dma_addr_t chain_buffer_dma;
+ struct list_head tracker_list;
+};
+
+/**
+ * struct scsiio_tracker - scsi mf request tracker
+ * @smid: system message id
+ * @scmd: scsi request pointer
+ * @cb_idx: callback index
+ * @tracker_list: list of free request (ioc->free_list)
+ */
+struct scsiio_tracker {
+ u16 smid;
+ struct scsi_cmnd *scmd;
+ u8 cb_idx;
+ struct list_head chain_list;
+ struct list_head tracker_list;
+};
+
+/**
+ * struct request_tracker - firmware request tracker
+ * @smid: system message id
+ * @cb_idx: callback index
+ * @tracker_list: list of free request (ioc->free_list)
+ */
+struct request_tracker {
+ u16 smid;
+ u8 cb_idx;
+ struct list_head tracker_list;
+};
+
+/**
+ * struct _tr_list - target reset list
+ * @handle: device handle
+ * @state: state machine
+ */
+struct _tr_list {
+ struct list_head list;
+ u16 handle;
+ u16 state;
+};
+
+
+/**
+ * struct adapter_reply_queue - the reply queue struct
+ * @ioc: per adapter object
+ * @msix_index: msix index into vector table
+ * @vector: irq vector
+ * @reply_post_host_index: head index in the pool where FW completes IO
+ * @reply_post_free: reply post base virt address
+ * @name: the name registered to request_irq()
+ * @busy: isr is actively processing replies on another cpu
+ * @list: this list
+*/
+struct adapter_reply_queue {
+ struct MPT3SAS_ADAPTER *ioc;
+ u8 msix_index;
+ unsigned int vector;
+ u32 reply_post_host_index;
+ Mpi2ReplyDescriptorsUnion_t *reply_post_free;
+ char name[MPT_NAME_LENGTH];
+ atomic_t busy;
+ cpumask_var_t affinity_hint;
+ struct list_head list;
+};
+
+typedef void (*MPT_ADD_SGE)(void *paddr, u32 flags_length, dma_addr_t dma_addr);
+
+/* SAS3.0 support */
+typedef int (*MPT_BUILD_SG_SCMD)(struct MPT3SAS_ADAPTER *ioc,
+ struct scsi_cmnd *scmd, u16 smid);
+typedef void (*MPT_BUILD_SG)(struct MPT3SAS_ADAPTER *ioc, void *psge,
+ dma_addr_t data_out_dma, size_t data_out_sz,
+ dma_addr_t data_in_dma, size_t data_in_sz);
+typedef void (*MPT_BUILD_ZERO_LEN_SGE)(struct MPT3SAS_ADAPTER *ioc,
+ void *paddr);
+
+
+
+/* IOC Facts and Port Facts converted from little endian to cpu */
+union mpi3_version_union {
+ MPI2_VERSION_STRUCT Struct;
+ u32 Word;
+};
+
+struct mpt3sas_facts {
+ u16 MsgVersion;
+ u16 HeaderVersion;
+ u8 IOCNumber;
+ u8 VP_ID;
+ u8 VF_ID;
+ u16 IOCExceptions;
+ u16 IOCStatus;
+ u32 IOCLogInfo;
+ u8 MaxChainDepth;
+ u8 WhoInit;
+ u8 NumberOfPorts;
+ u8 MaxMSIxVectors;
+ u16 RequestCredit;
+ u16 ProductID;
+ u32 IOCCapabilities;
+ union mpi3_version_union FWVersion;
+ u16 IOCRequestFrameSize;
+ u16 Reserved3;
+ u16 MaxInitiators;
+ u16 MaxTargets;
+ u16 MaxSasExpanders;
+ u16 MaxEnclosures;
+ u16 ProtocolFlags;
+ u16 HighPriorityCredit;
+ u16 MaxReplyDescriptorPostQueueDepth;
+ u8 ReplyFrameSize;
+ u8 MaxVolumes;
+ u16 MaxDevHandle;
+ u16 MaxPersistentEntries;
+ u16 MinDevHandle;
+};
+
+struct mpt3sas_port_facts {
+ u8 PortNumber;
+ u8 VP_ID;
+ u8 VF_ID;
+ u8 PortType;
+ u16 MaxPostedCmdBuffers;
+};
+
+struct reply_post_struct {
+ Mpi2ReplyDescriptorsUnion_t *reply_post_free;
+ dma_addr_t reply_post_free_dma;
+};
+
+/**
+ * enum mutex_type - task management mutex type
+ * @TM_MUTEX_OFF: mutex is not required becuase calling function is acquiring it
+ * @TM_MUTEX_ON: mutex is required
+ */
+enum mutex_type {
+ TM_MUTEX_OFF = 0,
+ TM_MUTEX_ON = 1,
+};
+
+typedef void (*MPT3SAS_FLUSH_RUNNING_CMDS)(struct MPT3SAS_ADAPTER *ioc);
+/**
+ * struct MPT3SAS_ADAPTER - per adapter struct
+ * @list: ioc_list
+ * @shost: shost object
+ * @id: unique adapter id
+ * @cpu_count: number online cpus
+ * @name: generic ioc string
+ * @tmp_string: tmp string used for logging
+ * @pdev: pci pdev object
+ * @pio_chip: physical io register space
+ * @chip: memory mapped register space
+ * @chip_phys: physical addrss prior to mapping
+ * @logging_level: see mpt3sas_debug.h
+ * @fwfault_debug: debuging FW timeouts
+ * @ir_firmware: IR firmware present
+ * @bars: bitmask of BAR's that must be configured
+ * @mask_interrupts: ignore interrupt
+ * @dma_mask: used to set the consistent dma mask
+ * @fault_reset_work_q_name: fw fault work queue
+ * @fault_reset_work_q: ""
+ * @fault_reset_work: ""
+ * @firmware_event_name: fw event work queue
+ * @firmware_event_thread: ""
+ * @fw_event_lock:
+ * @fw_event_list: list of fw events
+ * @aen_event_read_flag: event log was read
+ * @broadcast_aen_busy: broadcast aen waiting to be serviced
+ * @shost_recovery: host reset in progress
+ * @ioc_reset_in_progress_lock:
+ * @ioc_link_reset_in_progress: phy/hard reset in progress
+ * @ignore_loginfos: ignore loginfos during task management
+ * @remove_host: flag for when driver unloads, to avoid sending dev resets
+ * @pci_error_recovery: flag to prevent ioc access until slot reset completes
+ * @wait_for_discovery_to_complete: flag set at driver load time when
+ * waiting on reporting devices
+ * @is_driver_loading: flag set at driver load time
+ * @port_enable_failed: flag set when port enable has failed
+ * @start_scan: flag set from scan_start callback, cleared from _mpt3sas_fw_work
+ * @start_scan_failed: means port enable failed, return's the ioc_status
+ * @msix_enable: flag indicating msix is enabled
+ * @msix_vector_count: number msix vectors
+ * @cpu_msix_table: table for mapping cpus to msix index
+ * @cpu_msix_table_sz: table size
+ * @schedule_dead_ioc_flush_running_cmds: callback to flush pending commands
+ * @scsi_io_cb_idx: shost generated commands
+ * @tm_cb_idx: task management commands
+ * @scsih_cb_idx: scsih internal commands
+ * @transport_cb_idx: transport internal commands
+ * @ctl_cb_idx: clt internal commands
+ * @base_cb_idx: base internal commands
+ * @config_cb_idx: base internal commands
+ * @tm_tr_cb_idx : device removal target reset handshake
+ * @tm_tr_volume_cb_idx : volume removal target reset
+ * @base_cmds:
+ * @transport_cmds:
+ * @scsih_cmds:
+ * @tm_cmds:
+ * @ctl_cmds:
+ * @config_cmds:
+ * @base_add_sg_single: handler for either 32/64 bit sgl's
+ * @event_type: bits indicating which events to log
+ * @event_context: unique id for each logged event
+ * @event_log: event log pointer
+ * @event_masks: events that are masked
+ * @facts: static facts data
+ * @pfacts: static port facts data
+ * @manu_pg0: static manufacturing page 0
+ * @manu_pg10: static manufacturing page 10
+ * @manu_pg11: static manufacturing page 11
+ * @bios_pg2: static bios page 2
+ * @bios_pg3: static bios page 3
+ * @ioc_pg8: static ioc page 8
+ * @iounit_pg0: static iounit page 0
+ * @iounit_pg1: static iounit page 1
+ * @iounit_pg8: static iounit page 8
+ * @sas_hba: sas host object
+ * @sas_expander_list: expander object list
+ * @sas_node_lock:
+ * @sas_device_list: sas device object list
+ * @sas_device_init_list: sas device object list (used only at init time)
+ * @sas_device_lock:
+ * @io_missing_delay: time for IO completed by fw when PDR enabled
+ * @device_missing_delay: time for device missing by fw when PDR enabled
+ * @sas_id : used for setting volume target IDs
+ * @blocking_handles: bitmask used to identify which devices need blocking
+ * @pd_handles : bitmask for PD handles
+ * @pd_handles_sz : size of pd_handle bitmask
+ * @config_page_sz: config page size
+ * @config_page: reserve memory for config page payload
+ * @config_page_dma:
+ * @hba_queue_depth: hba request queue depth
+ * @sge_size: sg element size for either 32/64 bit
+ * @scsiio_depth: SCSI_IO queue depth
+ * @request_sz: per request frame size
+ * @request: pool of request frames
+ * @request_dma:
+ * @request_dma_sz:
+ * @scsi_lookup: firmware request tracker list
+ * @scsi_lookup_lock:
+ * @free_list: free list of request
+ * @pending_io_count:
+ * @reset_wq:
+ * @chain: pool of chains
+ * @chain_dma:
+ * @max_sges_in_main_message: number sg elements in main message
+ * @max_sges_in_chain_message: number sg elements per chain
+ * @chains_needed_per_io: max chains per io
+ * @chain_depth: total chains allocated
+ * @hi_priority_smid:
+ * @hi_priority:
+ * @hi_priority_dma:
+ * @hi_priority_depth:
+ * @hpr_lookup:
+ * @hpr_free_list:
+ * @internal_smid:
+ * @internal:
+ * @internal_dma:
+ * @internal_depth:
+ * @internal_lookup:
+ * @internal_free_list:
+ * @sense: pool of sense
+ * @sense_dma:
+ * @sense_dma_pool:
+ * @reply_depth: hba reply queue depth:
+ * @reply_sz: per reply frame size:
+ * @reply: pool of replys:
+ * @reply_dma:
+ * @reply_dma_pool:
+ * @reply_free_queue_depth: reply free depth
+ * @reply_free: pool for reply free queue (32 bit addr)
+ * @reply_free_dma:
+ * @reply_free_dma_pool:
+ * @reply_free_host_index: tail index in pool to insert free replys
+ * @reply_post_queue_depth: reply post queue depth
+ * @reply_post_struct: struct for reply_post_free physical & virt address
+ * @rdpq_array_capable: FW supports multiple reply queue addresses in ioc_init
+ * @rdpq_array_enable: rdpq_array support is enabled in the driver
+ * @rdpq_array_enable_assigned: this ensures that rdpq_array_enable flag
+ * is assigned only ones
+ * @reply_queue_count: number of reply queue's
+ * @reply_queue_list: link list contaning the reply queue info
+ * @reply_post_host_index: head index in the pool where FW completes IO
+ * @delayed_tr_list: target reset link list
+ * @delayed_tr_volume_list: volume target reset link list
+ * @@temp_sensors_count: flag to carry the number of temperature sensors
+ */
+struct MPT3SAS_ADAPTER {
+ struct list_head list;
+ struct Scsi_Host *shost;
+ u8 id;
+ int cpu_count;
+ char name[MPT_NAME_LENGTH];
+ char tmp_string[MPT_STRING_LENGTH];
+ struct pci_dev *pdev;
+ Mpi2SystemInterfaceRegs_t __iomem *chip;
+ resource_size_t chip_phys;
+ int logging_level;
+ int fwfault_debug;
+ u8 ir_firmware;
+ int bars;
+ u8 mask_interrupts;
+ int dma_mask;
+
+ /* fw fault handler */
+ char fault_reset_work_q_name[20];
+ struct workqueue_struct *fault_reset_work_q;
+ struct delayed_work fault_reset_work;
+
+ /* fw event handler */
+ char firmware_event_name[20];
+ struct workqueue_struct *firmware_event_thread;
+ spinlock_t fw_event_lock;
+ struct list_head fw_event_list;
+
+ /* misc flags */
+ int aen_event_read_flag;
+ u8 broadcast_aen_busy;
+ u16 broadcast_aen_pending;
+ u8 shost_recovery;
+
+ struct mutex reset_in_progress_mutex;
+ spinlock_t ioc_reset_in_progress_lock;
+ u8 ioc_link_reset_in_progress;
+ u8 ioc_reset_in_progress_status;
+
+ u8 ignore_loginfos;
+ u8 remove_host;
+ u8 pci_error_recovery;
+ u8 wait_for_discovery_to_complete;
+ u8 is_driver_loading;
+ u8 port_enable_failed;
+ u8 start_scan;
+ u16 start_scan_failed;
+
+ u8 msix_enable;
+ u16 msix_vector_count;
+ u8 *cpu_msix_table;
+ u16 cpu_msix_table_sz;
+ u32 ioc_reset_count;
+ MPT3SAS_FLUSH_RUNNING_CMDS schedule_dead_ioc_flush_running_cmds;
+
+ /* internal commands, callback index */
+ u8 scsi_io_cb_idx;
+ u8 tm_cb_idx;
+ u8 transport_cb_idx;
+ u8 scsih_cb_idx;
+ u8 ctl_cb_idx;
+ u8 base_cb_idx;
+ u8 port_enable_cb_idx;
+ u8 config_cb_idx;
+ u8 tm_tr_cb_idx;
+ u8 tm_tr_volume_cb_idx;
+ u8 tm_sas_control_cb_idx;
+ struct _internal_cmd base_cmds;
+ struct _internal_cmd port_enable_cmds;
+ struct _internal_cmd transport_cmds;
+ struct _internal_cmd scsih_cmds;
+ struct _internal_cmd tm_cmds;
+ struct _internal_cmd ctl_cmds;
+ struct _internal_cmd config_cmds;
+
+ MPT_ADD_SGE base_add_sg_single;
+
+ /* function ptr for either IEEE or MPI sg elements */
+ MPT_BUILD_SG_SCMD build_sg_scmd;
+ MPT_BUILD_SG build_sg;
+ MPT_BUILD_ZERO_LEN_SGE build_zero_len_sge;
+ u8 mpi25;
+ u16 sge_size_ieee;
+
+ /* function ptr for MPI sg elements only */
+ MPT_BUILD_SG build_sg_mpi;
+ MPT_BUILD_ZERO_LEN_SGE build_zero_len_sge_mpi;
+
+ /* event log */
+ u32 event_type[MPI2_EVENT_NOTIFY_EVENTMASK_WORDS];
+ u32 event_context;
+ void *event_log;
+ u32 event_masks[MPI2_EVENT_NOTIFY_EVENTMASK_WORDS];
+
+ /* static config pages */
+ struct mpt3sas_facts facts;
+ struct mpt3sas_port_facts *pfacts;
+ Mpi2ManufacturingPage0_t manu_pg0;
+ struct Mpi2ManufacturingPage10_t manu_pg10;
+ struct Mpi2ManufacturingPage11_t manu_pg11;
+ Mpi2BiosPage2_t bios_pg2;
+ Mpi2BiosPage3_t bios_pg3;
+ Mpi2IOCPage8_t ioc_pg8;
+ Mpi2IOUnitPage0_t iounit_pg0;
+ Mpi2IOUnitPage1_t iounit_pg1;
+ Mpi2IOUnitPage8_t iounit_pg8;
+
+ struct _boot_device req_boot_device;
+ struct _boot_device req_alt_boot_device;
+ struct _boot_device current_boot_device;
+
+ /* sas hba, expander, and device list */
+ struct _sas_node sas_hba;
+ struct list_head sas_expander_list;
+ spinlock_t sas_node_lock;
+ struct list_head sas_device_list;
+ struct list_head sas_device_init_list;
+ spinlock_t sas_device_lock;
+ struct list_head raid_device_list;
+ spinlock_t raid_device_lock;
+ u8 io_missing_delay;
+ u16 device_missing_delay;
+ int sas_id;
+
+ void *blocking_handles;
+ void *pd_handles;
+ u16 pd_handles_sz;
+
+ /* config page */
+ u16 config_page_sz;
+ void *config_page;
+ dma_addr_t config_page_dma;
+
+ /* scsiio request */
+ u16 hba_queue_depth;
+ u16 sge_size;
+ u16 scsiio_depth;
+ u16 request_sz;
+ u8 *request;
+ dma_addr_t request_dma;
+ u32 request_dma_sz;
+ struct scsiio_tracker *scsi_lookup;
+ ulong scsi_lookup_pages;
+ spinlock_t scsi_lookup_lock;
+ struct list_head free_list;
+ int pending_io_count;
+ wait_queue_head_t reset_wq;
+
+ /* chain */
+ struct chain_tracker *chain_lookup;
+ struct list_head free_chain_list;
+ struct dma_pool *chain_dma_pool;
+ ulong chain_pages;
+ u16 max_sges_in_main_message;
+ u16 max_sges_in_chain_message;
+ u16 chains_needed_per_io;
+ u32 chain_depth;
+
+ /* hi-priority queue */
+ u16 hi_priority_smid;
+ u8 *hi_priority;
+ dma_addr_t hi_priority_dma;
+ u16 hi_priority_depth;
+ struct request_tracker *hpr_lookup;
+ struct list_head hpr_free_list;
+
+ /* internal queue */
+ u16 internal_smid;
+ u8 *internal;
+ dma_addr_t internal_dma;
+ u16 internal_depth;
+ struct request_tracker *internal_lookup;
+ struct list_head internal_free_list;
+
+ /* sense */
+ u8 *sense;
+ dma_addr_t sense_dma;
+ struct dma_pool *sense_dma_pool;
+
+ /* reply */
+ u16 reply_sz;
+ u8 *reply;
+ dma_addr_t reply_dma;
+ u32 reply_dma_max_address;
+ u32 reply_dma_min_address;
+ struct dma_pool *reply_dma_pool;
+
+ /* reply free queue */
+ u16 reply_free_queue_depth;
+ __le32 *reply_free;
+ dma_addr_t reply_free_dma;
+ struct dma_pool *reply_free_dma_pool;
+ u32 reply_free_host_index;
+
+ /* reply post queue */
+ u16 reply_post_queue_depth;
+ struct reply_post_struct *reply_post;
+ u8 rdpq_array_capable;
+ u8 rdpq_array_enable;
+ u8 rdpq_array_enable_assigned;
+ struct dma_pool *reply_post_free_dma_pool;
+ u8 reply_queue_count;
+ struct list_head reply_queue_list;
+
+ struct list_head delayed_tr_list;
+ struct list_head delayed_tr_volume_list;
+ u8 temp_sensors_count;
+
+ /* diag buffer support */
+ u8 *diag_buffer[MPI2_DIAG_BUF_TYPE_COUNT];
+ u32 diag_buffer_sz[MPI2_DIAG_BUF_TYPE_COUNT];
+ dma_addr_t diag_buffer_dma[MPI2_DIAG_BUF_TYPE_COUNT];
+ u8 diag_buffer_status[MPI2_DIAG_BUF_TYPE_COUNT];
+ u32 unique_id[MPI2_DIAG_BUF_TYPE_COUNT];
+ u32 product_specific[MPI2_DIAG_BUF_TYPE_COUNT][23];
+ u32 diagnostic_flags[MPI2_DIAG_BUF_TYPE_COUNT];
+ u32 ring_buffer_offset;
+ u32 ring_buffer_sz;
+ spinlock_t diag_trigger_lock;
+ u8 diag_trigger_active;
+ struct SL_WH_MASTER_TRIGGER_T diag_trigger_master;
+ struct SL_WH_EVENT_TRIGGERS_T diag_trigger_event;
+ struct SL_WH_SCSI_TRIGGERS_T diag_trigger_scsi;
+ struct SL_WH_MPI_TRIGGERS_T diag_trigger_mpi;
+};
+
+typedef u8 (*MPT_CALLBACK)(struct MPT3SAS_ADAPTER *ioc, u16 smid, u8 msix_index,
+ u32 reply);
+
+
+/* base shared API */
+extern struct list_head mpt3sas_ioc_list;
+void mpt3sas_base_start_watchdog(struct MPT3SAS_ADAPTER *ioc);
+void mpt3sas_base_stop_watchdog(struct MPT3SAS_ADAPTER *ioc);
+
+int mpt3sas_base_attach(struct MPT3SAS_ADAPTER *ioc);
+void mpt3sas_base_detach(struct MPT3SAS_ADAPTER *ioc);
+int mpt3sas_base_map_resources(struct MPT3SAS_ADAPTER *ioc);
+void mpt3sas_base_free_resources(struct MPT3SAS_ADAPTER *ioc);
+int mpt3sas_base_hard_reset_handler(struct MPT3SAS_ADAPTER *ioc, int sleep_flag,
+ enum reset_type type);
+
+void *mpt3sas_base_get_msg_frame(struct MPT3SAS_ADAPTER *ioc, u16 smid);
+void *mpt3sas_base_get_sense_buffer(struct MPT3SAS_ADAPTER *ioc, u16 smid);
+__le32 mpt3sas_base_get_sense_buffer_dma(struct MPT3SAS_ADAPTER *ioc,
+ u16 smid);
+void mpt3sas_base_flush_reply_queues(struct MPT3SAS_ADAPTER *ioc);
+
+/* hi-priority queue */
+u16 mpt3sas_base_get_smid_hpr(struct MPT3SAS_ADAPTER *ioc, u8 cb_idx);
+u16 mpt3sas_base_get_smid_scsiio(struct MPT3SAS_ADAPTER *ioc, u8 cb_idx,
+ struct scsi_cmnd *scmd);
+
+u16 mpt3sas_base_get_smid(struct MPT3SAS_ADAPTER *ioc, u8 cb_idx);
+void mpt3sas_base_free_smid(struct MPT3SAS_ADAPTER *ioc, u16 smid);
+void mpt3sas_base_put_smid_scsi_io(struct MPT3SAS_ADAPTER *ioc, u16 smid,
+ u16 handle);
+void mpt3sas_base_put_smid_fast_path(struct MPT3SAS_ADAPTER *ioc, u16 smid,
+ u16 handle);
+void mpt3sas_base_put_smid_hi_priority(struct MPT3SAS_ADAPTER *ioc, u16 smid);
+void mpt3sas_base_put_smid_default(struct MPT3SAS_ADAPTER *ioc, u16 smid);
+void mpt3sas_base_initialize_callback_handler(void);
+u8 mpt3sas_base_register_callback_handler(MPT_CALLBACK cb_func);
+void mpt3sas_base_release_callback_handler(u8 cb_idx);
+
+u8 mpt3sas_base_done(struct MPT3SAS_ADAPTER *ioc, u16 smid, u8 msix_index,
+ u32 reply);
+u8 mpt3sas_port_enable_done(struct MPT3SAS_ADAPTER *ioc, u16 smid,
+ u8 msix_index, u32 reply);
+void *mpt3sas_base_get_reply_virt_addr(struct MPT3SAS_ADAPTER *ioc,
+ u32 phys_addr);
+
+u32 mpt3sas_base_get_iocstate(struct MPT3SAS_ADAPTER *ioc, int cooked);
+
+void mpt3sas_base_fault_info(struct MPT3SAS_ADAPTER *ioc , u16 fault_code);
+int mpt3sas_base_sas_iounit_control(struct MPT3SAS_ADAPTER *ioc,
+ Mpi2SasIoUnitControlReply_t *mpi_reply,
+ Mpi2SasIoUnitControlRequest_t *mpi_request);
+int mpt3sas_base_scsi_enclosure_processor(struct MPT3SAS_ADAPTER *ioc,
+ Mpi2SepReply_t *mpi_reply, Mpi2SepRequest_t *mpi_request);
+
+void mpt3sas_base_validate_event_type(struct MPT3SAS_ADAPTER *ioc,
+ u32 *event_type);
+
+void mpt3sas_halt_firmware(struct MPT3SAS_ADAPTER *ioc);
+
+void mpt3sas_base_update_missing_delay(struct MPT3SAS_ADAPTER *ioc,
+ u16 device_missing_delay, u8 io_missing_delay);
+
+int mpt3sas_port_enable(struct MPT3SAS_ADAPTER *ioc);
+
+
+/* scsih shared API */
+u8 mpt3sas_scsih_event_callback(struct MPT3SAS_ADAPTER *ioc, u8 msix_index,
+ u32 reply);
+void mpt3sas_scsih_reset_handler(struct MPT3SAS_ADAPTER *ioc, int reset_phase);
+
+int mpt3sas_scsih_issue_tm(struct MPT3SAS_ADAPTER *ioc, u16 handle,
+ uint channel, uint id, uint lun, u8 type, u16 smid_task,
+ ulong timeout, enum mutex_type m_type);
+void mpt3sas_scsih_set_tm_flag(struct MPT3SAS_ADAPTER *ioc, u16 handle);
+void mpt3sas_scsih_clear_tm_flag(struct MPT3SAS_ADAPTER *ioc, u16 handle);
+void mpt3sas_expander_remove(struct MPT3SAS_ADAPTER *ioc, u64 sas_address);
+void mpt3sas_device_remove_by_sas_address(struct MPT3SAS_ADAPTER *ioc,
+ u64 sas_address);
+
+struct _sas_node *mpt3sas_scsih_expander_find_by_handle(
+ struct MPT3SAS_ADAPTER *ioc, u16 handle);
+struct _sas_node *mpt3sas_scsih_expander_find_by_sas_address(
+ struct MPT3SAS_ADAPTER *ioc, u64 sas_address);
+struct _sas_device *mpt3sas_scsih_sas_device_find_by_sas_address(
+ struct MPT3SAS_ADAPTER *ioc, u64 sas_address);
+
+void mpt3sas_port_enable_complete(struct MPT3SAS_ADAPTER *ioc);
+
+/* config shared API */
+u8 mpt3sas_config_done(struct MPT3SAS_ADAPTER *ioc, u16 smid, u8 msix_index,
+ u32 reply);
+int mpt3sas_config_get_number_hba_phys(struct MPT3SAS_ADAPTER *ioc,
+ u8 *num_phys);
+int mpt3sas_config_get_manufacturing_pg0(struct MPT3SAS_ADAPTER *ioc,
+ Mpi2ConfigReply_t *mpi_reply, Mpi2ManufacturingPage0_t *config_page);
+int mpt3sas_config_get_manufacturing_pg7(struct MPT3SAS_ADAPTER *ioc,
+ Mpi2ConfigReply_t *mpi_reply, Mpi2ManufacturingPage7_t *config_page,
+ u16 sz);
+int mpt3sas_config_get_manufacturing_pg10(struct MPT3SAS_ADAPTER *ioc,
+ Mpi2ConfigReply_t *mpi_reply,
+ struct Mpi2ManufacturingPage10_t *config_page);
+
+int mpt3sas_config_get_manufacturing_pg11(struct MPT3SAS_ADAPTER *ioc,
+ Mpi2ConfigReply_t *mpi_reply,
+ struct Mpi2ManufacturingPage11_t *config_page);
+int mpt3sas_config_set_manufacturing_pg11(struct MPT3SAS_ADAPTER *ioc,
+ Mpi2ConfigReply_t *mpi_reply,
+ struct Mpi2ManufacturingPage11_t *config_page);
+
+int mpt3sas_config_get_bios_pg2(struct MPT3SAS_ADAPTER *ioc, Mpi2ConfigReply_t
+ *mpi_reply, Mpi2BiosPage2_t *config_page);
+int mpt3sas_config_get_bios_pg3(struct MPT3SAS_ADAPTER *ioc, Mpi2ConfigReply_t
+ *mpi_reply, Mpi2BiosPage3_t *config_page);
+int mpt3sas_config_get_iounit_pg0(struct MPT3SAS_ADAPTER *ioc, Mpi2ConfigReply_t
+ *mpi_reply, Mpi2IOUnitPage0_t *config_page);
+int mpt3sas_config_get_sas_device_pg0(struct MPT3SAS_ADAPTER *ioc,
+ Mpi2ConfigReply_t *mpi_reply, Mpi2SasDevicePage0_t *config_page,
+ u32 form, u32 handle);
+int mpt3sas_config_get_sas_device_pg1(struct MPT3SAS_ADAPTER *ioc,
+ Mpi2ConfigReply_t *mpi_reply, Mpi2SasDevicePage1_t *config_page,
+ u32 form, u32 handle);
+int mpt3sas_config_get_sas_iounit_pg0(struct MPT3SAS_ADAPTER *ioc,
+ Mpi2ConfigReply_t *mpi_reply, Mpi2SasIOUnitPage0_t *config_page,
+ u16 sz);
+int mpt3sas_config_get_iounit_pg1(struct MPT3SAS_ADAPTER *ioc, Mpi2ConfigReply_t
+ *mpi_reply, Mpi2IOUnitPage1_t *config_page);
+int mpt3sas_config_set_iounit_pg1(struct MPT3SAS_ADAPTER *ioc, Mpi2ConfigReply_t
+ *mpi_reply, Mpi2IOUnitPage1_t *config_page);
+int mpt3sas_config_get_iounit_pg8(struct MPT3SAS_ADAPTER *ioc, Mpi2ConfigReply_t
+ *mpi_reply, Mpi2IOUnitPage8_t *config_page);
+int mpt3sas_config_get_sas_iounit_pg1(struct MPT3SAS_ADAPTER *ioc,
+ Mpi2ConfigReply_t *mpi_reply, Mpi2SasIOUnitPage1_t *config_page,
+ u16 sz);
+int mpt3sas_config_set_sas_iounit_pg1(struct MPT3SAS_ADAPTER *ioc,
+ Mpi2ConfigReply_t *mpi_reply, Mpi2SasIOUnitPage1_t *config_page,
+ u16 sz);
+int mpt3sas_config_get_ioc_pg8(struct MPT3SAS_ADAPTER *ioc, Mpi2ConfigReply_t
+ *mpi_reply, Mpi2IOCPage8_t *config_page);
+int mpt3sas_config_get_expander_pg0(struct MPT3SAS_ADAPTER *ioc,
+ Mpi2ConfigReply_t *mpi_reply, Mpi2ExpanderPage0_t *config_page,
+ u32 form, u32 handle);
+int mpt3sas_config_get_expander_pg1(struct MPT3SAS_ADAPTER *ioc,
+ Mpi2ConfigReply_t *mpi_reply, Mpi2ExpanderPage1_t *config_page,
+ u32 phy_number, u16 handle);
+int mpt3sas_config_get_enclosure_pg0(struct MPT3SAS_ADAPTER *ioc,
+ Mpi2ConfigReply_t *mpi_reply, Mpi2SasEnclosurePage0_t *config_page,
+ u32 form, u32 handle);
+int mpt3sas_config_get_phy_pg0(struct MPT3SAS_ADAPTER *ioc, Mpi2ConfigReply_t
+ *mpi_reply, Mpi2SasPhyPage0_t *config_page, u32 phy_number);
+int mpt3sas_config_get_phy_pg1(struct MPT3SAS_ADAPTER *ioc, Mpi2ConfigReply_t
+ *mpi_reply, Mpi2SasPhyPage1_t *config_page, u32 phy_number);
+int mpt3sas_config_get_raid_volume_pg1(struct MPT3SAS_ADAPTER *ioc,
+ Mpi2ConfigReply_t *mpi_reply, Mpi2RaidVolPage1_t *config_page, u32 form,
+ u32 handle);
+int mpt3sas_config_get_number_pds(struct MPT3SAS_ADAPTER *ioc, u16 handle,
+ u8 *num_pds);
+int mpt3sas_config_get_raid_volume_pg0(struct MPT3SAS_ADAPTER *ioc,
+ Mpi2ConfigReply_t *mpi_reply, Mpi2RaidVolPage0_t *config_page, u32 form,
+ u32 handle, u16 sz);
+int mpt3sas_config_get_phys_disk_pg0(struct MPT3SAS_ADAPTER *ioc,
+ Mpi2ConfigReply_t *mpi_reply, Mpi2RaidPhysDiskPage0_t *config_page,
+ u32 form, u32 form_specific);
+int mpt3sas_config_get_volume_handle(struct MPT3SAS_ADAPTER *ioc, u16 pd_handle,
+ u16 *volume_handle);
+int mpt3sas_config_get_volume_wwid(struct MPT3SAS_ADAPTER *ioc,
+ u16 volume_handle, u64 *wwid);
+
+/* ctl shared API */
+extern struct device_attribute *mpt3sas_host_attrs[];
+extern struct device_attribute *mpt3sas_dev_attrs[];
+void mpt3sas_ctl_init(void);
+void mpt3sas_ctl_exit(void);
+u8 mpt3sas_ctl_done(struct MPT3SAS_ADAPTER *ioc, u16 smid, u8 msix_index,
+ u32 reply);
+void mpt3sas_ctl_reset_handler(struct MPT3SAS_ADAPTER *ioc, int reset_phase);
+u8 mpt3sas_ctl_event_callback(struct MPT3SAS_ADAPTER *ioc,
+ u8 msix_index, u32 reply);
+void mpt3sas_ctl_add_to_event_log(struct MPT3SAS_ADAPTER *ioc,
+ Mpi2EventNotificationReply_t *mpi_reply);
+
+void mpt3sas_enable_diag_buffer(struct MPT3SAS_ADAPTER *ioc,
+ u8 bits_to_regsiter);
+int mpt3sas_send_diag_release(struct MPT3SAS_ADAPTER *ioc, u8 buffer_type,
+ u8 *issue_reset);
+
+/* transport shared API */
+u8 mpt3sas_transport_done(struct MPT3SAS_ADAPTER *ioc, u16 smid, u8 msix_index,
+ u32 reply);
+struct _sas_port *mpt3sas_transport_port_add(struct MPT3SAS_ADAPTER *ioc,
+ u16 handle, u64 sas_address);
+void mpt3sas_transport_port_remove(struct MPT3SAS_ADAPTER *ioc, u64 sas_address,
+ u64 sas_address_parent);
+int mpt3sas_transport_add_host_phy(struct MPT3SAS_ADAPTER *ioc, struct _sas_phy
+ *mpt3sas_phy, Mpi2SasPhyPage0_t phy_pg0, struct device *parent_dev);
+int mpt3sas_transport_add_expander_phy(struct MPT3SAS_ADAPTER *ioc,
+ struct _sas_phy *mpt3sas_phy, Mpi2ExpanderPage1_t expander_pg1,
+ struct device *parent_dev);
+void mpt3sas_transport_update_links(struct MPT3SAS_ADAPTER *ioc,
+ u64 sas_address, u16 handle, u8 phy_number, u8 link_rate);
+extern struct sas_function_template mpt3sas_transport_functions;
+extern struct scsi_transport_template *mpt3sas_transport_template;
+extern int scsi_internal_device_block(struct scsi_device *sdev);
+extern int scsi_internal_device_unblock(struct scsi_device *sdev,
+ enum scsi_device_state new_state);
+/* trigger data externs */
+void mpt3sas_send_trigger_data_event(struct MPT3SAS_ADAPTER *ioc,
+ struct SL_WH_TRIGGERS_EVENT_DATA_T *event_data);
+void mpt3sas_process_trigger_data(struct MPT3SAS_ADAPTER *ioc,
+ struct SL_WH_TRIGGERS_EVENT_DATA_T *event_data);
+void mpt3sas_trigger_master(struct MPT3SAS_ADAPTER *ioc,
+ u32 tigger_bitmask);
+void mpt3sas_trigger_event(struct MPT3SAS_ADAPTER *ioc, u16 event,
+ u16 log_entry_qualifier);
+void mpt3sas_trigger_scsi(struct MPT3SAS_ADAPTER *ioc, u8 sense_key,
+ u8 asc, u8 ascq);
+void mpt3sas_trigger_mpi(struct MPT3SAS_ADAPTER *ioc, u16 ioc_status,
+ u32 loginfo);
+#endif /* MPT3SAS_BASE_H_INCLUDED */
diff --git a/drivers/scsi/mpt3sas/mpt3sas_config.c b/drivers/scsi/mpt3sas/mpt3sas_config.c
new file mode 100644
index 000000000..e45c4613e
--- /dev/null
+++ b/drivers/scsi/mpt3sas/mpt3sas_config.c
@@ -0,0 +1,1686 @@
+/*
+ * This module provides common API for accessing firmware configuration pages
+ *
+ * This code is based on drivers/scsi/mpt3sas/mpt3sas_base.c
+ * Copyright (C) 2012-2014 LSI Corporation
+ * Copyright (C) 2013-2014 Avago Technologies
+ * (mailto: MPT-FusionLinux.pdl@avagotech.com)
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * NO WARRANTY
+ * THE PROGRAM IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OR
+ * CONDITIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED INCLUDING, WITHOUT
+ * LIMITATION, ANY WARRANTIES OR CONDITIONS OF TITLE, NON-INFRINGEMENT,
+ * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE. Each Recipient is
+ * solely responsible for determining the appropriateness of using and
+ * distributing the Program and assumes all risks associated with its
+ * exercise of rights under this Agreement, including but not limited to
+ * the risks and costs of program errors, damage to or loss of data,
+ * programs or equipment, and unavailability or interruption of operations.
+
+ * DISCLAIMER OF LIABILITY
+ * NEITHER RECIPIENT NOR ANY CONTRIBUTORS SHALL HAVE ANY LIABILITY FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING WITHOUT LIMITATION LOST PROFITS), HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR
+ * TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
+ * USE OR DISTRIBUTION OF THE PROGRAM OR THE EXERCISE OF ANY RIGHTS GRANTED
+ * HEREUNDER, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGES
+
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301,
+ * USA.
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/errno.h>
+#include <linux/blkdev.h>
+#include <linux/sched.h>
+#include <linux/workqueue.h>
+#include <linux/delay.h>
+#include <linux/pci.h>
+
+#include "mpt3sas_base.h"
+
+/* local definitions */
+
+/* Timeout for config page request (in seconds) */
+#define MPT3_CONFIG_PAGE_DEFAULT_TIMEOUT 15
+
+/* Common sgl flags for READING a config page. */
+#define MPT3_CONFIG_COMMON_SGLFLAGS ((MPI2_SGE_FLAGS_SIMPLE_ELEMENT | \
+ MPI2_SGE_FLAGS_LAST_ELEMENT | MPI2_SGE_FLAGS_END_OF_BUFFER \
+ | MPI2_SGE_FLAGS_END_OF_LIST) << MPI2_SGE_FLAGS_SHIFT)
+
+/* Common sgl flags for WRITING a config page. */
+#define MPT3_CONFIG_COMMON_WRITE_SGLFLAGS ((MPI2_SGE_FLAGS_SIMPLE_ELEMENT | \
+ MPI2_SGE_FLAGS_LAST_ELEMENT | MPI2_SGE_FLAGS_END_OF_BUFFER \
+ | MPI2_SGE_FLAGS_END_OF_LIST | MPI2_SGE_FLAGS_HOST_TO_IOC) \
+ << MPI2_SGE_FLAGS_SHIFT)
+
+/**
+ * struct config_request - obtain dma memory via routine
+ * @sz: size
+ * @page: virt pointer
+ * @page_dma: phys pointer
+ *
+ */
+struct config_request {
+ u16 sz;
+ void *page;
+ dma_addr_t page_dma;
+};
+
+#ifdef CONFIG_SCSI_MPT3SAS_LOGGING
+/**
+ * _config_display_some_debug - debug routine
+ * @ioc: per adapter object
+ * @smid: system request message index
+ * @calling_function_name: string pass from calling function
+ * @mpi_reply: reply message frame
+ * Context: none.
+ *
+ * Function for displaying debug info helpful when debugging issues
+ * in this module.
+ */
+static void
+_config_display_some_debug(struct MPT3SAS_ADAPTER *ioc, u16 smid,
+ char *calling_function_name, MPI2DefaultReply_t *mpi_reply)
+{
+ Mpi2ConfigRequest_t *mpi_request;
+ char *desc = NULL;
+
+ if (!(ioc->logging_level & MPT_DEBUG_CONFIG))
+ return;
+
+ mpi_request = mpt3sas_base_get_msg_frame(ioc, smid);
+ switch (mpi_request->Header.PageType & MPI2_CONFIG_PAGETYPE_MASK) {
+ case MPI2_CONFIG_PAGETYPE_IO_UNIT:
+ desc = "io_unit";
+ break;
+ case MPI2_CONFIG_PAGETYPE_IOC:
+ desc = "ioc";
+ break;
+ case MPI2_CONFIG_PAGETYPE_BIOS:
+ desc = "bios";
+ break;
+ case MPI2_CONFIG_PAGETYPE_RAID_VOLUME:
+ desc = "raid_volume";
+ break;
+ case MPI2_CONFIG_PAGETYPE_MANUFACTURING:
+ desc = "manufaucturing";
+ break;
+ case MPI2_CONFIG_PAGETYPE_RAID_PHYSDISK:
+ desc = "physdisk";
+ break;
+ case MPI2_CONFIG_PAGETYPE_EXTENDED:
+ switch (mpi_request->ExtPageType) {
+ case MPI2_CONFIG_EXTPAGETYPE_SAS_IO_UNIT:
+ desc = "sas_io_unit";
+ break;
+ case MPI2_CONFIG_EXTPAGETYPE_SAS_EXPANDER:
+ desc = "sas_expander";
+ break;
+ case MPI2_CONFIG_EXTPAGETYPE_SAS_DEVICE:
+ desc = "sas_device";
+ break;
+ case MPI2_CONFIG_EXTPAGETYPE_SAS_PHY:
+ desc = "sas_phy";
+ break;
+ case MPI2_CONFIG_EXTPAGETYPE_LOG:
+ desc = "log";
+ break;
+ case MPI2_CONFIG_EXTPAGETYPE_ENCLOSURE:
+ desc = "enclosure";
+ break;
+ case MPI2_CONFIG_EXTPAGETYPE_RAID_CONFIG:
+ desc = "raid_config";
+ break;
+ case MPI2_CONFIG_EXTPAGETYPE_DRIVER_MAPPING:
+ desc = "driver_mapping";
+ break;
+ }
+ break;
+ }
+
+ if (!desc)
+ return;
+
+ pr_info(MPT3SAS_FMT
+ "%s: %s(%d), action(%d), form(0x%08x), smid(%d)\n",
+ ioc->name, calling_function_name, desc,
+ mpi_request->Header.PageNumber, mpi_request->Action,
+ le32_to_cpu(mpi_request->PageAddress), smid);
+
+ if (!mpi_reply)
+ return;
+
+ if (mpi_reply->IOCStatus || mpi_reply->IOCLogInfo)
+ pr_info(MPT3SAS_FMT
+ "\tiocstatus(0x%04x), loginfo(0x%08x)\n",
+ ioc->name, le16_to_cpu(mpi_reply->IOCStatus),
+ le32_to_cpu(mpi_reply->IOCLogInfo));
+}
+#endif
+
+/**
+ * _config_alloc_config_dma_memory - obtain physical memory
+ * @ioc: per adapter object
+ * @mem: struct config_request
+ *
+ * A wrapper for obtaining dma-able memory for config page request.
+ *
+ * Returns 0 for success, non-zero for failure.
+ */
+static int
+_config_alloc_config_dma_memory(struct MPT3SAS_ADAPTER *ioc,
+ struct config_request *mem)
+{
+ int r = 0;
+
+ if (mem->sz > ioc->config_page_sz) {
+ mem->page = dma_alloc_coherent(&ioc->pdev->dev, mem->sz,
+ &mem->page_dma, GFP_KERNEL);
+ if (!mem->page) {
+ pr_err(MPT3SAS_FMT
+ "%s: dma_alloc_coherent failed asking for (%d) bytes!!\n",
+ ioc->name, __func__, mem->sz);
+ r = -ENOMEM;
+ }
+ } else { /* use tmp buffer if less than 512 bytes */
+ mem->page = ioc->config_page;
+ mem->page_dma = ioc->config_page_dma;
+ }
+ return r;
+}
+
+/**
+ * _config_free_config_dma_memory - wrapper to free the memory
+ * @ioc: per adapter object
+ * @mem: struct config_request
+ *
+ * A wrapper to free dma-able memory when using _config_alloc_config_dma_memory.
+ *
+ * Returns 0 for success, non-zero for failure.
+ */
+static void
+_config_free_config_dma_memory(struct MPT3SAS_ADAPTER *ioc,
+ struct config_request *mem)
+{
+ if (mem->sz > ioc->config_page_sz)
+ dma_free_coherent(&ioc->pdev->dev, mem->sz, mem->page,
+ mem->page_dma);
+}
+
+/**
+ * mpt3sas_config_done - config page completion routine
+ * @ioc: per adapter object
+ * @smid: system request message index
+ * @msix_index: MSIX table index supplied by the OS
+ * @reply: reply message frame(lower 32bit addr)
+ * Context: none.
+ *
+ * The callback handler when using _config_request.
+ *
+ * Return 1 meaning mf should be freed from _base_interrupt
+ * 0 means the mf is freed from this function.
+ */
+u8
+mpt3sas_config_done(struct MPT3SAS_ADAPTER *ioc, u16 smid, u8 msix_index,
+ u32 reply)
+{
+ MPI2DefaultReply_t *mpi_reply;
+
+ if (ioc->config_cmds.status == MPT3_CMD_NOT_USED)
+ return 1;
+ if (ioc->config_cmds.smid != smid)
+ return 1;
+ ioc->config_cmds.status |= MPT3_CMD_COMPLETE;
+ mpi_reply = mpt3sas_base_get_reply_virt_addr(ioc, reply);
+ if (mpi_reply) {
+ ioc->config_cmds.status |= MPT3_CMD_REPLY_VALID;
+ memcpy(ioc->config_cmds.reply, mpi_reply,
+ mpi_reply->MsgLength*4);
+ }
+ ioc->config_cmds.status &= ~MPT3_CMD_PENDING;
+#ifdef CONFIG_SCSI_MPT3SAS_LOGGING
+ _config_display_some_debug(ioc, smid, "config_done", mpi_reply);
+#endif
+ ioc->config_cmds.smid = USHRT_MAX;
+ complete(&ioc->config_cmds.done);
+ return 1;
+}
+
+/**
+ * _config_request - main routine for sending config page requests
+ * @ioc: per adapter object
+ * @mpi_request: request message frame
+ * @mpi_reply: reply mf payload returned from firmware
+ * @timeout: timeout in seconds
+ * @config_page: contents of the config page
+ * @config_page_sz: size of config page
+ * Context: sleep
+ *
+ * A generic API for config page requests to firmware.
+ *
+ * The ioc->config_cmds.status flag should be MPT3_CMD_NOT_USED before calling
+ * this API.
+ *
+ * The callback index is set inside `ioc->config_cb_idx.
+ *
+ * Returns 0 for success, non-zero for failure.
+ */
+static int
+_config_request(struct MPT3SAS_ADAPTER *ioc, Mpi2ConfigRequest_t
+ *mpi_request, Mpi2ConfigReply_t *mpi_reply, int timeout,
+ void *config_page, u16 config_page_sz)
+{
+ u16 smid;
+ u32 ioc_state;
+ unsigned long timeleft;
+ Mpi2ConfigRequest_t *config_request;
+ int r;
+ u8 retry_count, issue_host_reset = 0;
+ u16 wait_state_count;
+ struct config_request mem;
+ u32 ioc_status = UINT_MAX;
+
+ mutex_lock(&ioc->config_cmds.mutex);
+ if (ioc->config_cmds.status != MPT3_CMD_NOT_USED) {
+ pr_err(MPT3SAS_FMT "%s: config_cmd in use\n",
+ ioc->name, __func__);
+ mutex_unlock(&ioc->config_cmds.mutex);
+ return -EAGAIN;
+ }
+
+ retry_count = 0;
+ memset(&mem, 0, sizeof(struct config_request));
+
+ mpi_request->VF_ID = 0; /* TODO */
+ mpi_request->VP_ID = 0;
+
+ if (config_page) {
+ mpi_request->Header.PageVersion = mpi_reply->Header.PageVersion;
+ mpi_request->Header.PageNumber = mpi_reply->Header.PageNumber;
+ mpi_request->Header.PageType = mpi_reply->Header.PageType;
+ mpi_request->Header.PageLength = mpi_reply->Header.PageLength;
+ mpi_request->ExtPageLength = mpi_reply->ExtPageLength;
+ mpi_request->ExtPageType = mpi_reply->ExtPageType;
+ if (mpi_request->Header.PageLength)
+ mem.sz = mpi_request->Header.PageLength * 4;
+ else
+ mem.sz = le16_to_cpu(mpi_reply->ExtPageLength) * 4;
+ r = _config_alloc_config_dma_memory(ioc, &mem);
+ if (r != 0)
+ goto out;
+ if (mpi_request->Action ==
+ MPI2_CONFIG_ACTION_PAGE_WRITE_CURRENT ||
+ mpi_request->Action ==
+ MPI2_CONFIG_ACTION_PAGE_WRITE_NVRAM) {
+ ioc->base_add_sg_single(&mpi_request->PageBufferSGE,
+ MPT3_CONFIG_COMMON_WRITE_SGLFLAGS | mem.sz,
+ mem.page_dma);
+ memcpy(mem.page, config_page, min_t(u16, mem.sz,
+ config_page_sz));
+ } else {
+ memset(config_page, 0, config_page_sz);
+ ioc->base_add_sg_single(&mpi_request->PageBufferSGE,
+ MPT3_CONFIG_COMMON_SGLFLAGS | mem.sz, mem.page_dma);
+ memset(mem.page, 0, min_t(u16, mem.sz, config_page_sz));
+ }
+ }
+
+ retry_config:
+ if (retry_count) {
+ if (retry_count > 2) { /* attempt only 2 retries */
+ r = -EFAULT;
+ goto free_mem;
+ }
+ pr_info(MPT3SAS_FMT "%s: attempting retry (%d)\n",
+ ioc->name, __func__, retry_count);
+ }
+ wait_state_count = 0;
+ ioc_state = mpt3sas_base_get_iocstate(ioc, 1);
+ while (ioc_state != MPI2_IOC_STATE_OPERATIONAL) {
+ if (wait_state_count++ == MPT3_CONFIG_PAGE_DEFAULT_TIMEOUT) {
+ pr_err(MPT3SAS_FMT
+ "%s: failed due to ioc not operational\n",
+ ioc->name, __func__);
+ ioc->config_cmds.status = MPT3_CMD_NOT_USED;
+ r = -EFAULT;
+ goto free_mem;
+ }
+ ssleep(1);
+ ioc_state = mpt3sas_base_get_iocstate(ioc, 1);
+ pr_info(MPT3SAS_FMT
+ "%s: waiting for operational state(count=%d)\n",
+ ioc->name, __func__, wait_state_count);
+ }
+ if (wait_state_count)
+ pr_info(MPT3SAS_FMT "%s: ioc is operational\n",
+ ioc->name, __func__);
+
+ smid = mpt3sas_base_get_smid(ioc, ioc->config_cb_idx);
+ if (!smid) {
+ pr_err(MPT3SAS_FMT "%s: failed obtaining a smid\n",
+ ioc->name, __func__);
+ ioc->config_cmds.status = MPT3_CMD_NOT_USED;
+ r = -EAGAIN;
+ goto free_mem;
+ }
+
+ r = 0;
+ memset(mpi_reply, 0, sizeof(Mpi2ConfigReply_t));
+ ioc->config_cmds.status = MPT3_CMD_PENDING;
+ config_request = mpt3sas_base_get_msg_frame(ioc, smid);
+ ioc->config_cmds.smid = smid;
+ memcpy(config_request, mpi_request, sizeof(Mpi2ConfigRequest_t));
+#ifdef CONFIG_SCSI_MPT3SAS_LOGGING
+ _config_display_some_debug(ioc, smid, "config_request", NULL);
+#endif
+ init_completion(&ioc->config_cmds.done);
+ mpt3sas_base_put_smid_default(ioc, smid);
+ timeleft = wait_for_completion_timeout(&ioc->config_cmds.done,
+ timeout*HZ);
+ if (!(ioc->config_cmds.status & MPT3_CMD_COMPLETE)) {
+ pr_err(MPT3SAS_FMT "%s: timeout\n",
+ ioc->name, __func__);
+ _debug_dump_mf(mpi_request,
+ sizeof(Mpi2ConfigRequest_t)/4);
+ retry_count++;
+ if (ioc->config_cmds.smid == smid)
+ mpt3sas_base_free_smid(ioc, smid);
+ if ((ioc->shost_recovery) || (ioc->config_cmds.status &
+ MPT3_CMD_RESET) || ioc->pci_error_recovery)
+ goto retry_config;
+ issue_host_reset = 1;
+ r = -EFAULT;
+ goto free_mem;
+ }
+
+ if (ioc->config_cmds.status & MPT3_CMD_REPLY_VALID) {
+ memcpy(mpi_reply, ioc->config_cmds.reply,
+ sizeof(Mpi2ConfigReply_t));
+
+ /* Reply Frame Sanity Checks to workaround FW issues */
+ if ((mpi_request->Header.PageType & 0xF) !=
+ (mpi_reply->Header.PageType & 0xF)) {
+ _debug_dump_mf(mpi_request, ioc->request_sz/4);
+ _debug_dump_reply(mpi_reply, ioc->request_sz/4);
+ panic(KERN_WARNING MPT3SAS_FMT "%s: Firmware BUG:" \
+ " mpi_reply mismatch: Requested PageType(0x%02x)" \
+ " Reply PageType(0x%02x)\n", \
+ ioc->name, __func__,
+ (mpi_request->Header.PageType & 0xF),
+ (mpi_reply->Header.PageType & 0xF));
+ }
+
+ if (((mpi_request->Header.PageType & 0xF) ==
+ MPI2_CONFIG_PAGETYPE_EXTENDED) &&
+ mpi_request->ExtPageType != mpi_reply->ExtPageType) {
+ _debug_dump_mf(mpi_request, ioc->request_sz/4);
+ _debug_dump_reply(mpi_reply, ioc->request_sz/4);
+ panic(KERN_WARNING MPT3SAS_FMT "%s: Firmware BUG:" \
+ " mpi_reply mismatch: Requested ExtPageType(0x%02x)"
+ " Reply ExtPageType(0x%02x)\n",
+ ioc->name, __func__, mpi_request->ExtPageType,
+ mpi_reply->ExtPageType);
+ }
+ ioc_status = le16_to_cpu(mpi_reply->IOCStatus)
+ & MPI2_IOCSTATUS_MASK;
+ }
+
+ if (retry_count)
+ pr_info(MPT3SAS_FMT "%s: retry (%d) completed!!\n", \
+ ioc->name, __func__, retry_count);
+
+ if ((ioc_status == MPI2_IOCSTATUS_SUCCESS) &&
+ config_page && mpi_request->Action ==
+ MPI2_CONFIG_ACTION_PAGE_READ_CURRENT) {
+ u8 *p = (u8 *)mem.page;
+
+ /* Config Page Sanity Checks to workaround FW issues */
+ if (p) {
+ if ((mpi_request->Header.PageType & 0xF) !=
+ (p[3] & 0xF)) {
+ _debug_dump_mf(mpi_request, ioc->request_sz/4);
+ _debug_dump_reply(mpi_reply, ioc->request_sz/4);
+ _debug_dump_config(p, min_t(u16, mem.sz,
+ config_page_sz)/4);
+ panic(KERN_WARNING MPT3SAS_FMT
+ "%s: Firmware BUG:" \
+ " config page mismatch:"
+ " Requested PageType(0x%02x)"
+ " Reply PageType(0x%02x)\n",
+ ioc->name, __func__,
+ (mpi_request->Header.PageType & 0xF),
+ (p[3] & 0xF));
+ }
+
+ if (((mpi_request->Header.PageType & 0xF) ==
+ MPI2_CONFIG_PAGETYPE_EXTENDED) &&
+ (mpi_request->ExtPageType != p[6])) {
+ _debug_dump_mf(mpi_request, ioc->request_sz/4);
+ _debug_dump_reply(mpi_reply, ioc->request_sz/4);
+ _debug_dump_config(p, min_t(u16, mem.sz,
+ config_page_sz)/4);
+ panic(KERN_WARNING MPT3SAS_FMT
+ "%s: Firmware BUG:" \
+ " config page mismatch:"
+ " Requested ExtPageType(0x%02x)"
+ " Reply ExtPageType(0x%02x)\n",
+ ioc->name, __func__,
+ mpi_request->ExtPageType, p[6]);
+ }
+ }
+ memcpy(config_page, mem.page, min_t(u16, mem.sz,
+ config_page_sz));
+ }
+
+ free_mem:
+ if (config_page)
+ _config_free_config_dma_memory(ioc, &mem);
+ out:
+ ioc->config_cmds.status = MPT3_CMD_NOT_USED;
+ mutex_unlock(&ioc->config_cmds.mutex);
+
+ if (issue_host_reset)
+ mpt3sas_base_hard_reset_handler(ioc, CAN_SLEEP,
+ FORCE_BIG_HAMMER);
+ return r;
+}
+
+/**
+ * mpt3sas_config_get_manufacturing_pg0 - obtain manufacturing page 0
+ * @ioc: per adapter object
+ * @mpi_reply: reply mf payload returned from firmware
+ * @config_page: contents of the config page
+ * Context: sleep.
+ *
+ * Returns 0 for success, non-zero for failure.
+ */
+int
+mpt3sas_config_get_manufacturing_pg0(struct MPT3SAS_ADAPTER *ioc,
+ Mpi2ConfigReply_t *mpi_reply, Mpi2ManufacturingPage0_t *config_page)
+{
+ Mpi2ConfigRequest_t mpi_request;
+ int r;
+
+ memset(&mpi_request, 0, sizeof(Mpi2ConfigRequest_t));
+ mpi_request.Function = MPI2_FUNCTION_CONFIG;
+ mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_HEADER;
+ mpi_request.Header.PageType = MPI2_CONFIG_PAGETYPE_MANUFACTURING;
+ mpi_request.Header.PageNumber = 0;
+ mpi_request.Header.PageVersion = MPI2_MANUFACTURING0_PAGEVERSION;
+ ioc->build_zero_len_sge_mpi(ioc, &mpi_request.PageBufferSGE);
+ r = _config_request(ioc, &mpi_request, mpi_reply,
+ MPT3_CONFIG_PAGE_DEFAULT_TIMEOUT, NULL, 0);
+ if (r)
+ goto out;
+
+ mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_READ_CURRENT;
+ r = _config_request(ioc, &mpi_request, mpi_reply,
+ MPT3_CONFIG_PAGE_DEFAULT_TIMEOUT, config_page,
+ sizeof(*config_page));
+ out:
+ return r;
+}
+
+/**
+ * mpt3sas_config_get_manufacturing_pg7 - obtain manufacturing page 7
+ * @ioc: per adapter object
+ * @mpi_reply: reply mf payload returned from firmware
+ * @config_page: contents of the config page
+ * @sz: size of buffer passed in config_page
+ * Context: sleep.
+ *
+ * Returns 0 for success, non-zero for failure.
+ */
+int
+mpt3sas_config_get_manufacturing_pg7(struct MPT3SAS_ADAPTER *ioc,
+ Mpi2ConfigReply_t *mpi_reply, Mpi2ManufacturingPage7_t *config_page,
+ u16 sz)
+{
+ Mpi2ConfigRequest_t mpi_request;
+ int r;
+
+ memset(&mpi_request, 0, sizeof(Mpi2ConfigRequest_t));
+ mpi_request.Function = MPI2_FUNCTION_CONFIG;
+ mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_HEADER;
+ mpi_request.Header.PageType = MPI2_CONFIG_PAGETYPE_MANUFACTURING;
+ mpi_request.Header.PageNumber = 7;
+ mpi_request.Header.PageVersion = MPI2_MANUFACTURING7_PAGEVERSION;
+ ioc->build_zero_len_sge_mpi(ioc, &mpi_request.PageBufferSGE);
+ r = _config_request(ioc, &mpi_request, mpi_reply,
+ MPT3_CONFIG_PAGE_DEFAULT_TIMEOUT, NULL, 0);
+ if (r)
+ goto out;
+
+ mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_READ_CURRENT;
+ r = _config_request(ioc, &mpi_request, mpi_reply,
+ MPT3_CONFIG_PAGE_DEFAULT_TIMEOUT, config_page,
+ sz);
+ out:
+ return r;
+}
+
+/**
+ * mpt3sas_config_get_manufacturing_pg10 - obtain manufacturing page 10
+ * @ioc: per adapter object
+ * @mpi_reply: reply mf payload returned from firmware
+ * @config_page: contents of the config page
+ * Context: sleep.
+ *
+ * Returns 0 for success, non-zero for failure.
+ */
+int
+mpt3sas_config_get_manufacturing_pg10(struct MPT3SAS_ADAPTER *ioc,
+ Mpi2ConfigReply_t *mpi_reply,
+ struct Mpi2ManufacturingPage10_t *config_page)
+{
+ Mpi2ConfigRequest_t mpi_request;
+ int r;
+
+ memset(&mpi_request, 0, sizeof(Mpi2ConfigRequest_t));
+ mpi_request.Function = MPI2_FUNCTION_CONFIG;
+ mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_HEADER;
+ mpi_request.Header.PageType = MPI2_CONFIG_PAGETYPE_MANUFACTURING;
+ mpi_request.Header.PageNumber = 10;
+ mpi_request.Header.PageVersion = MPI2_MANUFACTURING0_PAGEVERSION;
+ ioc->build_zero_len_sge_mpi(ioc, &mpi_request.PageBufferSGE);
+ r = _config_request(ioc, &mpi_request, mpi_reply,
+ MPT3_CONFIG_PAGE_DEFAULT_TIMEOUT, NULL, 0);
+ if (r)
+ goto out;
+
+ mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_READ_CURRENT;
+ r = _config_request(ioc, &mpi_request, mpi_reply,
+ MPT3_CONFIG_PAGE_DEFAULT_TIMEOUT, config_page,
+ sizeof(*config_page));
+ out:
+ return r;
+}
+
+/**
+ * mpt3sas_config_get_manufacturing_pg11 - obtain manufacturing page 11
+ * @ioc: per adapter object
+ * @mpi_reply: reply mf payload returned from firmware
+ * @config_page: contents of the config page
+ * Context: sleep.
+ *
+ * Returns 0 for success, non-zero for failure.
+ */
+int
+mpt3sas_config_get_manufacturing_pg11(struct MPT3SAS_ADAPTER *ioc,
+ Mpi2ConfigReply_t *mpi_reply,
+ struct Mpi2ManufacturingPage11_t *config_page)
+{
+ Mpi2ConfigRequest_t mpi_request;
+ int r;
+
+ memset(&mpi_request, 0, sizeof(Mpi2ConfigRequest_t));
+ mpi_request.Function = MPI2_FUNCTION_CONFIG;
+ mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_HEADER;
+ mpi_request.Header.PageType = MPI2_CONFIG_PAGETYPE_MANUFACTURING;
+ mpi_request.Header.PageNumber = 11;
+ mpi_request.Header.PageVersion = MPI2_MANUFACTURING0_PAGEVERSION;
+ ioc->build_zero_len_sge_mpi(ioc, &mpi_request.PageBufferSGE);
+ r = _config_request(ioc, &mpi_request, mpi_reply,
+ MPT3_CONFIG_PAGE_DEFAULT_TIMEOUT, NULL, 0);
+ if (r)
+ goto out;
+
+ mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_READ_CURRENT;
+ r = _config_request(ioc, &mpi_request, mpi_reply,
+ MPT3_CONFIG_PAGE_DEFAULT_TIMEOUT, config_page,
+ sizeof(*config_page));
+ out:
+ return r;
+}
+
+/**
+ * mpt3sas_config_set_manufacturing_pg11 - set manufacturing page 11
+ * @ioc: per adapter object
+ * @mpi_reply: reply mf payload returned from firmware
+ * @config_page: contents of the config page
+ * Context: sleep.
+ *
+ * Returns 0 for success, non-zero for failure.
+ */
+int
+mpt3sas_config_set_manufacturing_pg11(struct MPT3SAS_ADAPTER *ioc,
+ Mpi2ConfigReply_t *mpi_reply,
+ struct Mpi2ManufacturingPage11_t *config_page)
+{
+ Mpi2ConfigRequest_t mpi_request;
+ int r;
+
+ memset(&mpi_request, 0, sizeof(Mpi2ConfigRequest_t));
+ mpi_request.Function = MPI2_FUNCTION_CONFIG;
+ mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_HEADER;
+ mpi_request.Header.PageType = MPI2_CONFIG_PAGETYPE_MANUFACTURING;
+ mpi_request.Header.PageNumber = 11;
+ mpi_request.Header.PageVersion = MPI2_MANUFACTURING0_PAGEVERSION;
+ ioc->build_zero_len_sge_mpi(ioc, &mpi_request.PageBufferSGE);
+ r = _config_request(ioc, &mpi_request, mpi_reply,
+ MPT3_CONFIG_PAGE_DEFAULT_TIMEOUT, NULL, 0);
+ if (r)
+ goto out;
+
+ mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_WRITE_CURRENT;
+ r = _config_request(ioc, &mpi_request, mpi_reply,
+ MPT3_CONFIG_PAGE_DEFAULT_TIMEOUT, config_page,
+ sizeof(*config_page));
+ mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_WRITE_NVRAM;
+ r = _config_request(ioc, &mpi_request, mpi_reply,
+ MPT3_CONFIG_PAGE_DEFAULT_TIMEOUT, config_page,
+ sizeof(*config_page));
+ out:
+ return r;
+}
+
+/**
+ * mpt3sas_config_get_bios_pg2 - obtain bios page 2
+ * @ioc: per adapter object
+ * @mpi_reply: reply mf payload returned from firmware
+ * @config_page: contents of the config page
+ * Context: sleep.
+ *
+ * Returns 0 for success, non-zero for failure.
+ */
+int
+mpt3sas_config_get_bios_pg2(struct MPT3SAS_ADAPTER *ioc,
+ Mpi2ConfigReply_t *mpi_reply, Mpi2BiosPage2_t *config_page)
+{
+ Mpi2ConfigRequest_t mpi_request;
+ int r;
+
+ memset(&mpi_request, 0, sizeof(Mpi2ConfigRequest_t));
+ mpi_request.Function = MPI2_FUNCTION_CONFIG;
+ mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_HEADER;
+ mpi_request.Header.PageType = MPI2_CONFIG_PAGETYPE_BIOS;
+ mpi_request.Header.PageNumber = 2;
+ mpi_request.Header.PageVersion = MPI2_BIOSPAGE2_PAGEVERSION;
+ ioc->build_zero_len_sge_mpi(ioc, &mpi_request.PageBufferSGE);
+ r = _config_request(ioc, &mpi_request, mpi_reply,
+ MPT3_CONFIG_PAGE_DEFAULT_TIMEOUT, NULL, 0);
+ if (r)
+ goto out;
+
+ mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_READ_CURRENT;
+ r = _config_request(ioc, &mpi_request, mpi_reply,
+ MPT3_CONFIG_PAGE_DEFAULT_TIMEOUT, config_page,
+ sizeof(*config_page));
+ out:
+ return r;
+}
+
+/**
+ * mpt3sas_config_get_bios_pg3 - obtain bios page 3
+ * @ioc: per adapter object
+ * @mpi_reply: reply mf payload returned from firmware
+ * @config_page: contents of the config page
+ * Context: sleep.
+ *
+ * Returns 0 for success, non-zero for failure.
+ */
+int
+mpt3sas_config_get_bios_pg3(struct MPT3SAS_ADAPTER *ioc, Mpi2ConfigReply_t
+ *mpi_reply, Mpi2BiosPage3_t *config_page)
+{
+ Mpi2ConfigRequest_t mpi_request;
+ int r;
+
+ memset(&mpi_request, 0, sizeof(Mpi2ConfigRequest_t));
+ mpi_request.Function = MPI2_FUNCTION_CONFIG;
+ mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_HEADER;
+ mpi_request.Header.PageType = MPI2_CONFIG_PAGETYPE_BIOS;
+ mpi_request.Header.PageNumber = 3;
+ mpi_request.Header.PageVersion = MPI2_BIOSPAGE3_PAGEVERSION;
+ ioc->build_zero_len_sge_mpi(ioc, &mpi_request.PageBufferSGE);
+ r = _config_request(ioc, &mpi_request, mpi_reply,
+ MPT3_CONFIG_PAGE_DEFAULT_TIMEOUT, NULL, 0);
+ if (r)
+ goto out;
+
+ mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_READ_CURRENT;
+ r = _config_request(ioc, &mpi_request, mpi_reply,
+ MPT3_CONFIG_PAGE_DEFAULT_TIMEOUT, config_page,
+ sizeof(*config_page));
+ out:
+ return r;
+}
+
+/**
+ * mpt3sas_config_get_iounit_pg0 - obtain iounit page 0
+ * @ioc: per adapter object
+ * @mpi_reply: reply mf payload returned from firmware
+ * @config_page: contents of the config page
+ * Context: sleep.
+ *
+ * Returns 0 for success, non-zero for failure.
+ */
+int
+mpt3sas_config_get_iounit_pg0(struct MPT3SAS_ADAPTER *ioc,
+ Mpi2ConfigReply_t *mpi_reply, Mpi2IOUnitPage0_t *config_page)
+{
+ Mpi2ConfigRequest_t mpi_request;
+ int r;
+
+ memset(&mpi_request, 0, sizeof(Mpi2ConfigRequest_t));
+ mpi_request.Function = MPI2_FUNCTION_CONFIG;
+ mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_HEADER;
+ mpi_request.Header.PageType = MPI2_CONFIG_PAGETYPE_IO_UNIT;
+ mpi_request.Header.PageNumber = 0;
+ mpi_request.Header.PageVersion = MPI2_IOUNITPAGE0_PAGEVERSION;
+ ioc->build_zero_len_sge_mpi(ioc, &mpi_request.PageBufferSGE);
+ r = _config_request(ioc, &mpi_request, mpi_reply,
+ MPT3_CONFIG_PAGE_DEFAULT_TIMEOUT, NULL, 0);
+ if (r)
+ goto out;
+
+ mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_READ_CURRENT;
+ r = _config_request(ioc, &mpi_request, mpi_reply,
+ MPT3_CONFIG_PAGE_DEFAULT_TIMEOUT, config_page,
+ sizeof(*config_page));
+ out:
+ return r;
+}
+
+/**
+ * mpt3sas_config_get_iounit_pg1 - obtain iounit page 1
+ * @ioc: per adapter object
+ * @mpi_reply: reply mf payload returned from firmware
+ * @config_page: contents of the config page
+ * Context: sleep.
+ *
+ * Returns 0 for success, non-zero for failure.
+ */
+int
+mpt3sas_config_get_iounit_pg1(struct MPT3SAS_ADAPTER *ioc,
+ Mpi2ConfigReply_t *mpi_reply, Mpi2IOUnitPage1_t *config_page)
+{
+ Mpi2ConfigRequest_t mpi_request;
+ int r;
+
+ memset(&mpi_request, 0, sizeof(Mpi2ConfigRequest_t));
+ mpi_request.Function = MPI2_FUNCTION_CONFIG;
+ mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_HEADER;
+ mpi_request.Header.PageType = MPI2_CONFIG_PAGETYPE_IO_UNIT;
+ mpi_request.Header.PageNumber = 1;
+ mpi_request.Header.PageVersion = MPI2_IOUNITPAGE1_PAGEVERSION;
+ ioc->build_zero_len_sge_mpi(ioc, &mpi_request.PageBufferSGE);
+ r = _config_request(ioc, &mpi_request, mpi_reply,
+ MPT3_CONFIG_PAGE_DEFAULT_TIMEOUT, NULL, 0);
+ if (r)
+ goto out;
+
+ mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_READ_CURRENT;
+ r = _config_request(ioc, &mpi_request, mpi_reply,
+ MPT3_CONFIG_PAGE_DEFAULT_TIMEOUT, config_page,
+ sizeof(*config_page));
+ out:
+ return r;
+}
+
+/**
+ * mpt3sas_config_set_iounit_pg1 - set iounit page 1
+ * @ioc: per adapter object
+ * @mpi_reply: reply mf payload returned from firmware
+ * @config_page: contents of the config page
+ * Context: sleep.
+ *
+ * Returns 0 for success, non-zero for failure.
+ */
+int
+mpt3sas_config_set_iounit_pg1(struct MPT3SAS_ADAPTER *ioc,
+ Mpi2ConfigReply_t *mpi_reply, Mpi2IOUnitPage1_t *config_page)
+{
+ Mpi2ConfigRequest_t mpi_request;
+ int r;
+
+ memset(&mpi_request, 0, sizeof(Mpi2ConfigRequest_t));
+ mpi_request.Function = MPI2_FUNCTION_CONFIG;
+ mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_HEADER;
+ mpi_request.Header.PageType = MPI2_CONFIG_PAGETYPE_IO_UNIT;
+ mpi_request.Header.PageNumber = 1;
+ mpi_request.Header.PageVersion = MPI2_IOUNITPAGE1_PAGEVERSION;
+ ioc->build_zero_len_sge_mpi(ioc, &mpi_request.PageBufferSGE);
+ r = _config_request(ioc, &mpi_request, mpi_reply,
+ MPT3_CONFIG_PAGE_DEFAULT_TIMEOUT, NULL, 0);
+ if (r)
+ goto out;
+
+ mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_WRITE_CURRENT;
+ r = _config_request(ioc, &mpi_request, mpi_reply,
+ MPT3_CONFIG_PAGE_DEFAULT_TIMEOUT, config_page,
+ sizeof(*config_page));
+ out:
+ return r;
+}
+
+/**
+ * mpt3sas_config_get_iounit_pg8 - obtain iounit page 8
+ * @ioc: per adapter object
+ * @mpi_reply: reply mf payload returned from firmware
+ * @config_page: contents of the config page
+ * Context: sleep.
+ *
+ * Returns 0 for success, non-zero for failure.
+ */
+int
+mpt3sas_config_get_iounit_pg8(struct MPT3SAS_ADAPTER *ioc,
+ Mpi2ConfigReply_t *mpi_reply, Mpi2IOUnitPage8_t *config_page)
+{
+ Mpi2ConfigRequest_t mpi_request;
+ int r;
+
+ memset(&mpi_request, 0, sizeof(Mpi2ConfigRequest_t));
+ mpi_request.Function = MPI2_FUNCTION_CONFIG;
+ mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_HEADER;
+ mpi_request.Header.PageType = MPI2_CONFIG_PAGETYPE_IO_UNIT;
+ mpi_request.Header.PageNumber = 8;
+ mpi_request.Header.PageVersion = MPI2_IOUNITPAGE8_PAGEVERSION;
+ ioc->build_zero_len_sge_mpi(ioc, &mpi_request.PageBufferSGE);
+ r = _config_request(ioc, &mpi_request, mpi_reply,
+ MPT3_CONFIG_PAGE_DEFAULT_TIMEOUT, NULL, 0);
+ if (r)
+ goto out;
+
+ mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_READ_CURRENT;
+ r = _config_request(ioc, &mpi_request, mpi_reply,
+ MPT3_CONFIG_PAGE_DEFAULT_TIMEOUT, config_page,
+ sizeof(*config_page));
+ out:
+ return r;
+}
+
+/**
+ * mpt3sas_config_get_ioc_pg8 - obtain ioc page 8
+ * @ioc: per adapter object
+ * @mpi_reply: reply mf payload returned from firmware
+ * @config_page: contents of the config page
+ * Context: sleep.
+ *
+ * Returns 0 for success, non-zero for failure.
+ */
+int
+mpt3sas_config_get_ioc_pg8(struct MPT3SAS_ADAPTER *ioc,
+ Mpi2ConfigReply_t *mpi_reply, Mpi2IOCPage8_t *config_page)
+{
+ Mpi2ConfigRequest_t mpi_request;
+ int r;
+
+ memset(&mpi_request, 0, sizeof(Mpi2ConfigRequest_t));
+ mpi_request.Function = MPI2_FUNCTION_CONFIG;
+ mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_HEADER;
+ mpi_request.Header.PageType = MPI2_CONFIG_PAGETYPE_IOC;
+ mpi_request.Header.PageNumber = 8;
+ mpi_request.Header.PageVersion = MPI2_IOCPAGE8_PAGEVERSION;
+ ioc->build_zero_len_sge_mpi(ioc, &mpi_request.PageBufferSGE);
+ r = _config_request(ioc, &mpi_request, mpi_reply,
+ MPT3_CONFIG_PAGE_DEFAULT_TIMEOUT, NULL, 0);
+ if (r)
+ goto out;
+
+ mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_READ_CURRENT;
+ r = _config_request(ioc, &mpi_request, mpi_reply,
+ MPT3_CONFIG_PAGE_DEFAULT_TIMEOUT, config_page,
+ sizeof(*config_page));
+ out:
+ return r;
+}
+
+/**
+ * mpt3sas_config_get_sas_device_pg0 - obtain sas device page 0
+ * @ioc: per adapter object
+ * @mpi_reply: reply mf payload returned from firmware
+ * @config_page: contents of the config page
+ * @form: GET_NEXT_HANDLE or HANDLE
+ * @handle: device handle
+ * Context: sleep.
+ *
+ * Returns 0 for success, non-zero for failure.
+ */
+int
+mpt3sas_config_get_sas_device_pg0(struct MPT3SAS_ADAPTER *ioc,
+ Mpi2ConfigReply_t *mpi_reply, Mpi2SasDevicePage0_t *config_page,
+ u32 form, u32 handle)
+{
+ Mpi2ConfigRequest_t mpi_request;
+ int r;
+
+ memset(&mpi_request, 0, sizeof(Mpi2ConfigRequest_t));
+ mpi_request.Function = MPI2_FUNCTION_CONFIG;
+ mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_HEADER;
+ mpi_request.Header.PageType = MPI2_CONFIG_PAGETYPE_EXTENDED;
+ mpi_request.ExtPageType = MPI2_CONFIG_EXTPAGETYPE_SAS_DEVICE;
+ mpi_request.Header.PageVersion = MPI2_SASDEVICE0_PAGEVERSION;
+ mpi_request.Header.PageNumber = 0;
+ ioc->build_zero_len_sge_mpi(ioc, &mpi_request.PageBufferSGE);
+ r = _config_request(ioc, &mpi_request, mpi_reply,
+ MPT3_CONFIG_PAGE_DEFAULT_TIMEOUT, NULL, 0);
+ if (r)
+ goto out;
+
+ mpi_request.PageAddress = cpu_to_le32(form | handle);
+ mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_READ_CURRENT;
+ r = _config_request(ioc, &mpi_request, mpi_reply,
+ MPT3_CONFIG_PAGE_DEFAULT_TIMEOUT, config_page,
+ sizeof(*config_page));
+ out:
+ return r;
+}
+
+/**
+ * mpt3sas_config_get_sas_device_pg1 - obtain sas device page 1
+ * @ioc: per adapter object
+ * @mpi_reply: reply mf payload returned from firmware
+ * @config_page: contents of the config page
+ * @form: GET_NEXT_HANDLE or HANDLE
+ * @handle: device handle
+ * Context: sleep.
+ *
+ * Returns 0 for success, non-zero for failure.
+ */
+int
+mpt3sas_config_get_sas_device_pg1(struct MPT3SAS_ADAPTER *ioc,
+ Mpi2ConfigReply_t *mpi_reply, Mpi2SasDevicePage1_t *config_page,
+ u32 form, u32 handle)
+{
+ Mpi2ConfigRequest_t mpi_request;
+ int r;
+
+ memset(&mpi_request, 0, sizeof(Mpi2ConfigRequest_t));
+ mpi_request.Function = MPI2_FUNCTION_CONFIG;
+ mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_HEADER;
+ mpi_request.Header.PageType = MPI2_CONFIG_PAGETYPE_EXTENDED;
+ mpi_request.ExtPageType = MPI2_CONFIG_EXTPAGETYPE_SAS_DEVICE;
+ mpi_request.Header.PageVersion = MPI2_SASDEVICE1_PAGEVERSION;
+ mpi_request.Header.PageNumber = 1;
+ ioc->build_zero_len_sge_mpi(ioc, &mpi_request.PageBufferSGE);
+ r = _config_request(ioc, &mpi_request, mpi_reply,
+ MPT3_CONFIG_PAGE_DEFAULT_TIMEOUT, NULL, 0);
+ if (r)
+ goto out;
+
+ mpi_request.PageAddress = cpu_to_le32(form | handle);
+ mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_READ_CURRENT;
+ r = _config_request(ioc, &mpi_request, mpi_reply,
+ MPT3_CONFIG_PAGE_DEFAULT_TIMEOUT, config_page,
+ sizeof(*config_page));
+ out:
+ return r;
+}
+
+/**
+ * mpt3sas_config_get_number_hba_phys - obtain number of phys on the host
+ * @ioc: per adapter object
+ * @num_phys: pointer returned with the number of phys
+ * Context: sleep.
+ *
+ * Returns 0 for success, non-zero for failure.
+ */
+int
+mpt3sas_config_get_number_hba_phys(struct MPT3SAS_ADAPTER *ioc, u8 *num_phys)
+{
+ Mpi2ConfigRequest_t mpi_request;
+ int r;
+ u16 ioc_status;
+ Mpi2ConfigReply_t mpi_reply;
+ Mpi2SasIOUnitPage0_t config_page;
+
+ *num_phys = 0;
+ memset(&mpi_request, 0, sizeof(Mpi2ConfigRequest_t));
+ mpi_request.Function = MPI2_FUNCTION_CONFIG;
+ mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_HEADER;
+ mpi_request.Header.PageType = MPI2_CONFIG_PAGETYPE_EXTENDED;
+ mpi_request.ExtPageType = MPI2_CONFIG_EXTPAGETYPE_SAS_IO_UNIT;
+ mpi_request.Header.PageNumber = 0;
+ mpi_request.Header.PageVersion = MPI2_SASIOUNITPAGE0_PAGEVERSION;
+ ioc->build_zero_len_sge_mpi(ioc, &mpi_request.PageBufferSGE);
+ r = _config_request(ioc, &mpi_request, &mpi_reply,
+ MPT3_CONFIG_PAGE_DEFAULT_TIMEOUT, NULL, 0);
+ if (r)
+ goto out;
+
+ mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_READ_CURRENT;
+ r = _config_request(ioc, &mpi_request, &mpi_reply,
+ MPT3_CONFIG_PAGE_DEFAULT_TIMEOUT, &config_page,
+ sizeof(Mpi2SasIOUnitPage0_t));
+ if (!r) {
+ ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
+ MPI2_IOCSTATUS_MASK;
+ if (ioc_status == MPI2_IOCSTATUS_SUCCESS)
+ *num_phys = config_page.NumPhys;
+ }
+ out:
+ return r;
+}
+
+/**
+ * mpt3sas_config_get_sas_iounit_pg0 - obtain sas iounit page 0
+ * @ioc: per adapter object
+ * @mpi_reply: reply mf payload returned from firmware
+ * @config_page: contents of the config page
+ * @sz: size of buffer passed in config_page
+ * Context: sleep.
+ *
+ * Calling function should call config_get_number_hba_phys prior to
+ * this function, so enough memory is allocated for config_page.
+ *
+ * Returns 0 for success, non-zero for failure.
+ */
+int
+mpt3sas_config_get_sas_iounit_pg0(struct MPT3SAS_ADAPTER *ioc,
+ Mpi2ConfigReply_t *mpi_reply, Mpi2SasIOUnitPage0_t *config_page,
+ u16 sz)
+{
+ Mpi2ConfigRequest_t mpi_request;
+ int r;
+
+ memset(&mpi_request, 0, sizeof(Mpi2ConfigRequest_t));
+ mpi_request.Function = MPI2_FUNCTION_CONFIG;
+ mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_HEADER;
+ mpi_request.Header.PageType = MPI2_CONFIG_PAGETYPE_EXTENDED;
+ mpi_request.ExtPageType = MPI2_CONFIG_EXTPAGETYPE_SAS_IO_UNIT;
+ mpi_request.Header.PageNumber = 0;
+ mpi_request.Header.PageVersion = MPI2_SASIOUNITPAGE0_PAGEVERSION;
+ ioc->build_zero_len_sge_mpi(ioc, &mpi_request.PageBufferSGE);
+ r = _config_request(ioc, &mpi_request, mpi_reply,
+ MPT3_CONFIG_PAGE_DEFAULT_TIMEOUT, NULL, 0);
+ if (r)
+ goto out;
+
+ mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_READ_CURRENT;
+ r = _config_request(ioc, &mpi_request, mpi_reply,
+ MPT3_CONFIG_PAGE_DEFAULT_TIMEOUT, config_page, sz);
+ out:
+ return r;
+}
+
+/**
+ * mpt3sas_config_get_sas_iounit_pg1 - obtain sas iounit page 1
+ * @ioc: per adapter object
+ * @mpi_reply: reply mf payload returned from firmware
+ * @config_page: contents of the config page
+ * @sz: size of buffer passed in config_page
+ * Context: sleep.
+ *
+ * Calling function should call config_get_number_hba_phys prior to
+ * this function, so enough memory is allocated for config_page.
+ *
+ * Returns 0 for success, non-zero for failure.
+ */
+int
+mpt3sas_config_get_sas_iounit_pg1(struct MPT3SAS_ADAPTER *ioc,
+ Mpi2ConfigReply_t *mpi_reply, Mpi2SasIOUnitPage1_t *config_page,
+ u16 sz)
+{
+ Mpi2ConfigRequest_t mpi_request;
+ int r;
+
+ memset(&mpi_request, 0, sizeof(Mpi2ConfigRequest_t));
+ mpi_request.Function = MPI2_FUNCTION_CONFIG;
+ mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_HEADER;
+ mpi_request.Header.PageType = MPI2_CONFIG_PAGETYPE_EXTENDED;
+ mpi_request.ExtPageType = MPI2_CONFIG_EXTPAGETYPE_SAS_IO_UNIT;
+ mpi_request.Header.PageNumber = 1;
+ mpi_request.Header.PageVersion = MPI2_SASIOUNITPAGE1_PAGEVERSION;
+ ioc->build_zero_len_sge_mpi(ioc, &mpi_request.PageBufferSGE);
+ r = _config_request(ioc, &mpi_request, mpi_reply,
+ MPT3_CONFIG_PAGE_DEFAULT_TIMEOUT, NULL, 0);
+ if (r)
+ goto out;
+
+ mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_READ_CURRENT;
+ r = _config_request(ioc, &mpi_request, mpi_reply,
+ MPT3_CONFIG_PAGE_DEFAULT_TIMEOUT, config_page, sz);
+ out:
+ return r;
+}
+
+/**
+ * mpt3sas_config_set_sas_iounit_pg1 - send sas iounit page 1
+ * @ioc: per adapter object
+ * @mpi_reply: reply mf payload returned from firmware
+ * @config_page: contents of the config page
+ * @sz: size of buffer passed in config_page
+ * Context: sleep.
+ *
+ * Calling function should call config_get_number_hba_phys prior to
+ * this function, so enough memory is allocated for config_page.
+ *
+ * Returns 0 for success, non-zero for failure.
+ */
+int
+mpt3sas_config_set_sas_iounit_pg1(struct MPT3SAS_ADAPTER *ioc,
+ Mpi2ConfigReply_t *mpi_reply, Mpi2SasIOUnitPage1_t *config_page,
+ u16 sz)
+{
+ Mpi2ConfigRequest_t mpi_request;
+ int r;
+
+ memset(&mpi_request, 0, sizeof(Mpi2ConfigRequest_t));
+ mpi_request.Function = MPI2_FUNCTION_CONFIG;
+ mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_HEADER;
+ mpi_request.Header.PageType = MPI2_CONFIG_PAGETYPE_EXTENDED;
+ mpi_request.ExtPageType = MPI2_CONFIG_EXTPAGETYPE_SAS_IO_UNIT;
+ mpi_request.Header.PageNumber = 1;
+ mpi_request.Header.PageVersion = MPI2_SASIOUNITPAGE1_PAGEVERSION;
+ ioc->build_zero_len_sge_mpi(ioc, &mpi_request.PageBufferSGE);
+ r = _config_request(ioc, &mpi_request, mpi_reply,
+ MPT3_CONFIG_PAGE_DEFAULT_TIMEOUT, NULL, 0);
+ if (r)
+ goto out;
+
+ mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_WRITE_CURRENT;
+ _config_request(ioc, &mpi_request, mpi_reply,
+ MPT3_CONFIG_PAGE_DEFAULT_TIMEOUT, config_page, sz);
+ mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_WRITE_NVRAM;
+ r = _config_request(ioc, &mpi_request, mpi_reply,
+ MPT3_CONFIG_PAGE_DEFAULT_TIMEOUT, config_page, sz);
+ out:
+ return r;
+}
+
+/**
+ * mpt3sas_config_get_expander_pg0 - obtain expander page 0
+ * @ioc: per adapter object
+ * @mpi_reply: reply mf payload returned from firmware
+ * @config_page: contents of the config page
+ * @form: GET_NEXT_HANDLE or HANDLE
+ * @handle: expander handle
+ * Context: sleep.
+ *
+ * Returns 0 for success, non-zero for failure.
+ */
+int
+mpt3sas_config_get_expander_pg0(struct MPT3SAS_ADAPTER *ioc, Mpi2ConfigReply_t
+ *mpi_reply, Mpi2ExpanderPage0_t *config_page, u32 form, u32 handle)
+{
+ Mpi2ConfigRequest_t mpi_request;
+ int r;
+
+ memset(&mpi_request, 0, sizeof(Mpi2ConfigRequest_t));
+ mpi_request.Function = MPI2_FUNCTION_CONFIG;
+ mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_HEADER;
+ mpi_request.Header.PageType = MPI2_CONFIG_PAGETYPE_EXTENDED;
+ mpi_request.ExtPageType = MPI2_CONFIG_EXTPAGETYPE_SAS_EXPANDER;
+ mpi_request.Header.PageNumber = 0;
+ mpi_request.Header.PageVersion = MPI2_SASEXPANDER0_PAGEVERSION;
+ ioc->build_zero_len_sge_mpi(ioc, &mpi_request.PageBufferSGE);
+ r = _config_request(ioc, &mpi_request, mpi_reply,
+ MPT3_CONFIG_PAGE_DEFAULT_TIMEOUT, NULL, 0);
+ if (r)
+ goto out;
+
+ mpi_request.PageAddress = cpu_to_le32(form | handle);
+ mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_READ_CURRENT;
+ r = _config_request(ioc, &mpi_request, mpi_reply,
+ MPT3_CONFIG_PAGE_DEFAULT_TIMEOUT, config_page,
+ sizeof(*config_page));
+ out:
+ return r;
+}
+
+/**
+ * mpt3sas_config_get_expander_pg1 - obtain expander page 1
+ * @ioc: per adapter object
+ * @mpi_reply: reply mf payload returned from firmware
+ * @config_page: contents of the config page
+ * @phy_number: phy number
+ * @handle: expander handle
+ * Context: sleep.
+ *
+ * Returns 0 for success, non-zero for failure.
+ */
+int
+mpt3sas_config_get_expander_pg1(struct MPT3SAS_ADAPTER *ioc, Mpi2ConfigReply_t
+ *mpi_reply, Mpi2ExpanderPage1_t *config_page, u32 phy_number,
+ u16 handle)
+{
+ Mpi2ConfigRequest_t mpi_request;
+ int r;
+
+ memset(&mpi_request, 0, sizeof(Mpi2ConfigRequest_t));
+ mpi_request.Function = MPI2_FUNCTION_CONFIG;
+ mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_HEADER;
+ mpi_request.Header.PageType = MPI2_CONFIG_PAGETYPE_EXTENDED;
+ mpi_request.ExtPageType = MPI2_CONFIG_EXTPAGETYPE_SAS_EXPANDER;
+ mpi_request.Header.PageNumber = 1;
+ mpi_request.Header.PageVersion = MPI2_SASEXPANDER1_PAGEVERSION;
+ ioc->build_zero_len_sge_mpi(ioc, &mpi_request.PageBufferSGE);
+ r = _config_request(ioc, &mpi_request, mpi_reply,
+ MPT3_CONFIG_PAGE_DEFAULT_TIMEOUT, NULL, 0);
+ if (r)
+ goto out;
+
+ mpi_request.PageAddress =
+ cpu_to_le32(MPI2_SAS_EXPAND_PGAD_FORM_HNDL_PHY_NUM |
+ (phy_number << MPI2_SAS_EXPAND_PGAD_PHYNUM_SHIFT) | handle);
+ mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_READ_CURRENT;
+ r = _config_request(ioc, &mpi_request, mpi_reply,
+ MPT3_CONFIG_PAGE_DEFAULT_TIMEOUT, config_page,
+ sizeof(*config_page));
+ out:
+ return r;
+}
+
+/**
+ * mpt3sas_config_get_enclosure_pg0 - obtain enclosure page 0
+ * @ioc: per adapter object
+ * @mpi_reply: reply mf payload returned from firmware
+ * @config_page: contents of the config page
+ * @form: GET_NEXT_HANDLE or HANDLE
+ * @handle: expander handle
+ * Context: sleep.
+ *
+ * Returns 0 for success, non-zero for failure.
+ */
+int
+mpt3sas_config_get_enclosure_pg0(struct MPT3SAS_ADAPTER *ioc, Mpi2ConfigReply_t
+ *mpi_reply, Mpi2SasEnclosurePage0_t *config_page, u32 form, u32 handle)
+{
+ Mpi2ConfigRequest_t mpi_request;
+ int r;
+
+ memset(&mpi_request, 0, sizeof(Mpi2ConfigRequest_t));
+ mpi_request.Function = MPI2_FUNCTION_CONFIG;
+ mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_HEADER;
+ mpi_request.Header.PageType = MPI2_CONFIG_PAGETYPE_EXTENDED;
+ mpi_request.ExtPageType = MPI2_CONFIG_EXTPAGETYPE_ENCLOSURE;
+ mpi_request.Header.PageNumber = 0;
+ mpi_request.Header.PageVersion = MPI2_SASENCLOSURE0_PAGEVERSION;
+ ioc->build_zero_len_sge_mpi(ioc, &mpi_request.PageBufferSGE);
+ r = _config_request(ioc, &mpi_request, mpi_reply,
+ MPT3_CONFIG_PAGE_DEFAULT_TIMEOUT, NULL, 0);
+ if (r)
+ goto out;
+
+ mpi_request.PageAddress = cpu_to_le32(form | handle);
+ mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_READ_CURRENT;
+ r = _config_request(ioc, &mpi_request, mpi_reply,
+ MPT3_CONFIG_PAGE_DEFAULT_TIMEOUT, config_page,
+ sizeof(*config_page));
+ out:
+ return r;
+}
+
+/**
+ * mpt3sas_config_get_phy_pg0 - obtain phy page 0
+ * @ioc: per adapter object
+ * @mpi_reply: reply mf payload returned from firmware
+ * @config_page: contents of the config page
+ * @phy_number: phy number
+ * Context: sleep.
+ *
+ * Returns 0 for success, non-zero for failure.
+ */
+int
+mpt3sas_config_get_phy_pg0(struct MPT3SAS_ADAPTER *ioc, Mpi2ConfigReply_t
+ *mpi_reply, Mpi2SasPhyPage0_t *config_page, u32 phy_number)
+{
+ Mpi2ConfigRequest_t mpi_request;
+ int r;
+
+ memset(&mpi_request, 0, sizeof(Mpi2ConfigRequest_t));
+ mpi_request.Function = MPI2_FUNCTION_CONFIG;
+ mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_HEADER;
+ mpi_request.Header.PageType = MPI2_CONFIG_PAGETYPE_EXTENDED;
+ mpi_request.ExtPageType = MPI2_CONFIG_EXTPAGETYPE_SAS_PHY;
+ mpi_request.Header.PageNumber = 0;
+ mpi_request.Header.PageVersion = MPI2_SASPHY0_PAGEVERSION;
+ ioc->build_zero_len_sge_mpi(ioc, &mpi_request.PageBufferSGE);
+ r = _config_request(ioc, &mpi_request, mpi_reply,
+ MPT3_CONFIG_PAGE_DEFAULT_TIMEOUT, NULL, 0);
+ if (r)
+ goto out;
+
+ mpi_request.PageAddress =
+ cpu_to_le32(MPI2_SAS_PHY_PGAD_FORM_PHY_NUMBER | phy_number);
+ mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_READ_CURRENT;
+ r = _config_request(ioc, &mpi_request, mpi_reply,
+ MPT3_CONFIG_PAGE_DEFAULT_TIMEOUT, config_page,
+ sizeof(*config_page));
+ out:
+ return r;
+}
+
+/**
+ * mpt3sas_config_get_phy_pg1 - obtain phy page 1
+ * @ioc: per adapter object
+ * @mpi_reply: reply mf payload returned from firmware
+ * @config_page: contents of the config page
+ * @phy_number: phy number
+ * Context: sleep.
+ *
+ * Returns 0 for success, non-zero for failure.
+ */
+int
+mpt3sas_config_get_phy_pg1(struct MPT3SAS_ADAPTER *ioc, Mpi2ConfigReply_t
+ *mpi_reply, Mpi2SasPhyPage1_t *config_page, u32 phy_number)
+{
+ Mpi2ConfigRequest_t mpi_request;
+ int r;
+
+ memset(&mpi_request, 0, sizeof(Mpi2ConfigRequest_t));
+ mpi_request.Function = MPI2_FUNCTION_CONFIG;
+ mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_HEADER;
+ mpi_request.Header.PageType = MPI2_CONFIG_PAGETYPE_EXTENDED;
+ mpi_request.ExtPageType = MPI2_CONFIG_EXTPAGETYPE_SAS_PHY;
+ mpi_request.Header.PageNumber = 1;
+ mpi_request.Header.PageVersion = MPI2_SASPHY1_PAGEVERSION;
+ ioc->build_zero_len_sge_mpi(ioc, &mpi_request.PageBufferSGE);
+ r = _config_request(ioc, &mpi_request, mpi_reply,
+ MPT3_CONFIG_PAGE_DEFAULT_TIMEOUT, NULL, 0);
+ if (r)
+ goto out;
+
+ mpi_request.PageAddress =
+ cpu_to_le32(MPI2_SAS_PHY_PGAD_FORM_PHY_NUMBER | phy_number);
+ mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_READ_CURRENT;
+ r = _config_request(ioc, &mpi_request, mpi_reply,
+ MPT3_CONFIG_PAGE_DEFAULT_TIMEOUT, config_page,
+ sizeof(*config_page));
+ out:
+ return r;
+}
+
+/**
+ * mpt3sas_config_get_raid_volume_pg1 - obtain raid volume page 1
+ * @ioc: per adapter object
+ * @mpi_reply: reply mf payload returned from firmware
+ * @config_page: contents of the config page
+ * @form: GET_NEXT_HANDLE or HANDLE
+ * @handle: volume handle
+ * Context: sleep.
+ *
+ * Returns 0 for success, non-zero for failure.
+ */
+int
+mpt3sas_config_get_raid_volume_pg1(struct MPT3SAS_ADAPTER *ioc,
+ Mpi2ConfigReply_t *mpi_reply, Mpi2RaidVolPage1_t *config_page, u32 form,
+ u32 handle)
+{
+ Mpi2ConfigRequest_t mpi_request;
+ int r;
+
+ memset(&mpi_request, 0, sizeof(Mpi2ConfigRequest_t));
+ mpi_request.Function = MPI2_FUNCTION_CONFIG;
+ mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_HEADER;
+ mpi_request.Header.PageType = MPI2_CONFIG_PAGETYPE_RAID_VOLUME;
+ mpi_request.Header.PageNumber = 1;
+ mpi_request.Header.PageVersion = MPI2_RAIDVOLPAGE1_PAGEVERSION;
+ ioc->build_zero_len_sge_mpi(ioc, &mpi_request.PageBufferSGE);
+ r = _config_request(ioc, &mpi_request, mpi_reply,
+ MPT3_CONFIG_PAGE_DEFAULT_TIMEOUT, NULL, 0);
+ if (r)
+ goto out;
+
+ mpi_request.PageAddress = cpu_to_le32(form | handle);
+ mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_READ_CURRENT;
+ r = _config_request(ioc, &mpi_request, mpi_reply,
+ MPT3_CONFIG_PAGE_DEFAULT_TIMEOUT, config_page,
+ sizeof(*config_page));
+ out:
+ return r;
+}
+
+/**
+ * mpt3sas_config_get_number_pds - obtain number of phys disk assigned to volume
+ * @ioc: per adapter object
+ * @handle: volume handle
+ * @num_pds: returns pds count
+ * Context: sleep.
+ *
+ * Returns 0 for success, non-zero for failure.
+ */
+int
+mpt3sas_config_get_number_pds(struct MPT3SAS_ADAPTER *ioc, u16 handle,
+ u8 *num_pds)
+{
+ Mpi2ConfigRequest_t mpi_request;
+ Mpi2RaidVolPage0_t config_page;
+ Mpi2ConfigReply_t mpi_reply;
+ int r;
+ u16 ioc_status;
+
+ memset(&mpi_request, 0, sizeof(Mpi2ConfigRequest_t));
+ *num_pds = 0;
+ mpi_request.Function = MPI2_FUNCTION_CONFIG;
+ mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_HEADER;
+ mpi_request.Header.PageType = MPI2_CONFIG_PAGETYPE_RAID_VOLUME;
+ mpi_request.Header.PageNumber = 0;
+ mpi_request.Header.PageVersion = MPI2_RAIDVOLPAGE0_PAGEVERSION;
+ ioc->build_zero_len_sge_mpi(ioc, &mpi_request.PageBufferSGE);
+ r = _config_request(ioc, &mpi_request, &mpi_reply,
+ MPT3_CONFIG_PAGE_DEFAULT_TIMEOUT, NULL, 0);
+ if (r)
+ goto out;
+
+ mpi_request.PageAddress =
+ cpu_to_le32(MPI2_RAID_VOLUME_PGAD_FORM_HANDLE | handle);
+ mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_READ_CURRENT;
+ r = _config_request(ioc, &mpi_request, &mpi_reply,
+ MPT3_CONFIG_PAGE_DEFAULT_TIMEOUT, &config_page,
+ sizeof(Mpi2RaidVolPage0_t));
+ if (!r) {
+ ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
+ MPI2_IOCSTATUS_MASK;
+ if (ioc_status == MPI2_IOCSTATUS_SUCCESS)
+ *num_pds = config_page.NumPhysDisks;
+ }
+
+ out:
+ return r;
+}
+
+/**
+ * mpt3sas_config_get_raid_volume_pg0 - obtain raid volume page 0
+ * @ioc: per adapter object
+ * @mpi_reply: reply mf payload returned from firmware
+ * @config_page: contents of the config page
+ * @form: GET_NEXT_HANDLE or HANDLE
+ * @handle: volume handle
+ * @sz: size of buffer passed in config_page
+ * Context: sleep.
+ *
+ * Returns 0 for success, non-zero for failure.
+ */
+int
+mpt3sas_config_get_raid_volume_pg0(struct MPT3SAS_ADAPTER *ioc,
+ Mpi2ConfigReply_t *mpi_reply, Mpi2RaidVolPage0_t *config_page, u32 form,
+ u32 handle, u16 sz)
+{
+ Mpi2ConfigRequest_t mpi_request;
+ int r;
+
+ memset(&mpi_request, 0, sizeof(Mpi2ConfigRequest_t));
+ mpi_request.Function = MPI2_FUNCTION_CONFIG;
+ mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_HEADER;
+ mpi_request.Header.PageType = MPI2_CONFIG_PAGETYPE_RAID_VOLUME;
+ mpi_request.Header.PageNumber = 0;
+ mpi_request.Header.PageVersion = MPI2_RAIDVOLPAGE0_PAGEVERSION;
+ ioc->build_zero_len_sge_mpi(ioc, &mpi_request.PageBufferSGE);
+ r = _config_request(ioc, &mpi_request, mpi_reply,
+ MPT3_CONFIG_PAGE_DEFAULT_TIMEOUT, NULL, 0);
+ if (r)
+ goto out;
+
+ mpi_request.PageAddress = cpu_to_le32(form | handle);
+ mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_READ_CURRENT;
+ r = _config_request(ioc, &mpi_request, mpi_reply,
+ MPT3_CONFIG_PAGE_DEFAULT_TIMEOUT, config_page, sz);
+ out:
+ return r;
+}
+
+/**
+ * mpt3sas_config_get_phys_disk_pg0 - obtain phys disk page 0
+ * @ioc: per adapter object
+ * @mpi_reply: reply mf payload returned from firmware
+ * @config_page: contents of the config page
+ * @form: GET_NEXT_PHYSDISKNUM, PHYSDISKNUM, DEVHANDLE
+ * @form_specific: specific to the form
+ * Context: sleep.
+ *
+ * Returns 0 for success, non-zero for failure.
+ */
+int
+mpt3sas_config_get_phys_disk_pg0(struct MPT3SAS_ADAPTER *ioc, Mpi2ConfigReply_t
+ *mpi_reply, Mpi2RaidPhysDiskPage0_t *config_page, u32 form,
+ u32 form_specific)
+{
+ Mpi2ConfigRequest_t mpi_request;
+ int r;
+
+ memset(&mpi_request, 0, sizeof(Mpi2ConfigRequest_t));
+ mpi_request.Function = MPI2_FUNCTION_CONFIG;
+ mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_HEADER;
+ mpi_request.Header.PageType = MPI2_CONFIG_PAGETYPE_RAID_PHYSDISK;
+ mpi_request.Header.PageNumber = 0;
+ mpi_request.Header.PageVersion = MPI2_RAIDPHYSDISKPAGE0_PAGEVERSION;
+ ioc->build_zero_len_sge_mpi(ioc, &mpi_request.PageBufferSGE);
+ r = _config_request(ioc, &mpi_request, mpi_reply,
+ MPT3_CONFIG_PAGE_DEFAULT_TIMEOUT, NULL, 0);
+ if (r)
+ goto out;
+
+ mpi_request.PageAddress = cpu_to_le32(form | form_specific);
+ mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_READ_CURRENT;
+ r = _config_request(ioc, &mpi_request, mpi_reply,
+ MPT3_CONFIG_PAGE_DEFAULT_TIMEOUT, config_page,
+ sizeof(*config_page));
+ out:
+ return r;
+}
+
+/**
+ * mpt3sas_config_get_volume_handle - returns volume handle for give hidden
+ * raid components
+ * @ioc: per adapter object
+ * @pd_handle: phys disk handle
+ * @volume_handle: volume handle
+ * Context: sleep.
+ *
+ * Returns 0 for success, non-zero for failure.
+ */
+int
+mpt3sas_config_get_volume_handle(struct MPT3SAS_ADAPTER *ioc, u16 pd_handle,
+ u16 *volume_handle)
+{
+ Mpi2RaidConfigurationPage0_t *config_page = NULL;
+ Mpi2ConfigRequest_t mpi_request;
+ Mpi2ConfigReply_t mpi_reply;
+ int r, i, config_page_sz;
+ u16 ioc_status;
+ int config_num;
+ u16 element_type;
+ u16 phys_disk_dev_handle;
+
+ *volume_handle = 0;
+ memset(&mpi_request, 0, sizeof(Mpi2ConfigRequest_t));
+ mpi_request.Function = MPI2_FUNCTION_CONFIG;
+ mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_HEADER;
+ mpi_request.Header.PageType = MPI2_CONFIG_PAGETYPE_EXTENDED;
+ mpi_request.ExtPageType = MPI2_CONFIG_EXTPAGETYPE_RAID_CONFIG;
+ mpi_request.Header.PageVersion = MPI2_RAIDCONFIG0_PAGEVERSION;
+ mpi_request.Header.PageNumber = 0;
+ ioc->build_zero_len_sge_mpi(ioc, &mpi_request.PageBufferSGE);
+ r = _config_request(ioc, &mpi_request, &mpi_reply,
+ MPT3_CONFIG_PAGE_DEFAULT_TIMEOUT, NULL, 0);
+ if (r)
+ goto out;
+
+ mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_READ_CURRENT;
+ config_page_sz = (le16_to_cpu(mpi_reply.ExtPageLength) * 4);
+ config_page = kmalloc(config_page_sz, GFP_KERNEL);
+ if (!config_page) {
+ r = -1;
+ goto out;
+ }
+
+ config_num = 0xff;
+ while (1) {
+ mpi_request.PageAddress = cpu_to_le32(config_num +
+ MPI2_RAID_PGAD_FORM_GET_NEXT_CONFIGNUM);
+ r = _config_request(ioc, &mpi_request, &mpi_reply,
+ MPT3_CONFIG_PAGE_DEFAULT_TIMEOUT, config_page,
+ config_page_sz);
+ if (r)
+ goto out;
+ r = -1;
+ ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
+ MPI2_IOCSTATUS_MASK;
+ if (ioc_status != MPI2_IOCSTATUS_SUCCESS)
+ goto out;
+ for (i = 0; i < config_page->NumElements; i++) {
+ element_type = le16_to_cpu(config_page->
+ ConfigElement[i].ElementFlags) &
+ MPI2_RAIDCONFIG0_EFLAGS_MASK_ELEMENT_TYPE;
+ if (element_type ==
+ MPI2_RAIDCONFIG0_EFLAGS_VOL_PHYS_DISK_ELEMENT ||
+ element_type ==
+ MPI2_RAIDCONFIG0_EFLAGS_OCE_ELEMENT) {
+ phys_disk_dev_handle =
+ le16_to_cpu(config_page->ConfigElement[i].
+ PhysDiskDevHandle);
+ if (phys_disk_dev_handle == pd_handle) {
+ *volume_handle =
+ le16_to_cpu(config_page->
+ ConfigElement[i].VolDevHandle);
+ r = 0;
+ goto out;
+ }
+ } else if (element_type ==
+ MPI2_RAIDCONFIG0_EFLAGS_HOT_SPARE_ELEMENT) {
+ *volume_handle = 0;
+ r = 0;
+ goto out;
+ }
+ }
+ config_num = config_page->ConfigNum;
+ }
+ out:
+ kfree(config_page);
+ return r;
+}
+
+/**
+ * mpt3sas_config_get_volume_wwid - returns wwid given the volume handle
+ * @ioc: per adapter object
+ * @volume_handle: volume handle
+ * @wwid: volume wwid
+ * Context: sleep.
+ *
+ * Returns 0 for success, non-zero for failure.
+ */
+int
+mpt3sas_config_get_volume_wwid(struct MPT3SAS_ADAPTER *ioc, u16 volume_handle,
+ u64 *wwid)
+{
+ Mpi2ConfigReply_t mpi_reply;
+ Mpi2RaidVolPage1_t raid_vol_pg1;
+
+ *wwid = 0;
+ if (!(mpt3sas_config_get_raid_volume_pg1(ioc, &mpi_reply,
+ &raid_vol_pg1, MPI2_RAID_VOLUME_PGAD_FORM_HANDLE,
+ volume_handle))) {
+ *wwid = le64_to_cpu(raid_vol_pg1.WWID);
+ return 0;
+ } else
+ return -1;
+}
diff --git a/drivers/scsi/mpt3sas/mpt3sas_ctl.c b/drivers/scsi/mpt3sas/mpt3sas_ctl.c
new file mode 100644
index 000000000..080c8a76d
--- /dev/null
+++ b/drivers/scsi/mpt3sas/mpt3sas_ctl.c
@@ -0,0 +1,3283 @@
+/*
+ * Management Module Support for MPT (Message Passing Technology) based
+ * controllers
+ *
+ * This code is based on drivers/scsi/mpt3sas/mpt3sas_ctl.c
+ * Copyright (C) 2012-2014 LSI Corporation
+ * Copyright (C) 2013-2014 Avago Technologies
+ * (mailto: MPT-FusionLinux.pdl@avagotech.com)
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * NO WARRANTY
+ * THE PROGRAM IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OR
+ * CONDITIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED INCLUDING, WITHOUT
+ * LIMITATION, ANY WARRANTIES OR CONDITIONS OF TITLE, NON-INFRINGEMENT,
+ * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE. Each Recipient is
+ * solely responsible for determining the appropriateness of using and
+ * distributing the Program and assumes all risks associated with its
+ * exercise of rights under this Agreement, including but not limited to
+ * the risks and costs of program errors, damage to or loss of data,
+ * programs or equipment, and unavailability or interruption of operations.
+
+ * DISCLAIMER OF LIABILITY
+ * NEITHER RECIPIENT NOR ANY CONTRIBUTORS SHALL HAVE ANY LIABILITY FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING WITHOUT LIMITATION LOST PROFITS), HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR
+ * TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
+ * USE OR DISTRIBUTION OF THE PROGRAM OR THE EXERCISE OF ANY RIGHTS GRANTED
+ * HEREUNDER, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGES
+
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301,
+ * USA.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/errno.h>
+#include <linux/init.h>
+#include <linux/slab.h>
+#include <linux/types.h>
+#include <linux/pci.h>
+#include <linux/delay.h>
+#include <linux/compat.h>
+#include <linux/poll.h>
+
+#include <linux/io.h>
+#include <linux/uaccess.h>
+
+#include "mpt3sas_base.h"
+#include "mpt3sas_ctl.h"
+
+
+static struct fasync_struct *async_queue;
+static DECLARE_WAIT_QUEUE_HEAD(ctl_poll_wait);
+
+
+/**
+ * enum block_state - blocking state
+ * @NON_BLOCKING: non blocking
+ * @BLOCKING: blocking
+ *
+ * These states are for ioctls that need to wait for a response
+ * from firmware, so they probably require sleep.
+ */
+enum block_state {
+ NON_BLOCKING,
+ BLOCKING,
+};
+
+#ifdef CONFIG_SCSI_MPT3SAS_LOGGING
+/**
+ * _ctl_sas_device_find_by_handle - sas device search
+ * @ioc: per adapter object
+ * @handle: sas device handle (assigned by firmware)
+ * Context: Calling function should acquire ioc->sas_device_lock
+ *
+ * This searches for sas_device based on sas_address, then return sas_device
+ * object.
+ */
+static struct _sas_device *
+_ctl_sas_device_find_by_handle(struct MPT3SAS_ADAPTER *ioc, u16 handle)
+{
+ struct _sas_device *sas_device, *r;
+
+ r = NULL;
+ list_for_each_entry(sas_device, &ioc->sas_device_list, list) {
+ if (sas_device->handle != handle)
+ continue;
+ r = sas_device;
+ goto out;
+ }
+
+ out:
+ return r;
+}
+
+/**
+ * _ctl_display_some_debug - debug routine
+ * @ioc: per adapter object
+ * @smid: system request message index
+ * @calling_function_name: string pass from calling function
+ * @mpi_reply: reply message frame
+ * Context: none.
+ *
+ * Function for displaying debug info helpful when debugging issues
+ * in this module.
+ */
+static void
+_ctl_display_some_debug(struct MPT3SAS_ADAPTER *ioc, u16 smid,
+ char *calling_function_name, MPI2DefaultReply_t *mpi_reply)
+{
+ Mpi2ConfigRequest_t *mpi_request;
+ char *desc = NULL;
+
+ if (!(ioc->logging_level & MPT_DEBUG_IOCTL))
+ return;
+
+ mpi_request = mpt3sas_base_get_msg_frame(ioc, smid);
+ switch (mpi_request->Function) {
+ case MPI2_FUNCTION_SCSI_IO_REQUEST:
+ {
+ Mpi2SCSIIORequest_t *scsi_request =
+ (Mpi2SCSIIORequest_t *)mpi_request;
+
+ snprintf(ioc->tmp_string, MPT_STRING_LENGTH,
+ "scsi_io, cmd(0x%02x), cdb_len(%d)",
+ scsi_request->CDB.CDB32[0],
+ le16_to_cpu(scsi_request->IoFlags) & 0xF);
+ desc = ioc->tmp_string;
+ break;
+ }
+ case MPI2_FUNCTION_SCSI_TASK_MGMT:
+ desc = "task_mgmt";
+ break;
+ case MPI2_FUNCTION_IOC_INIT:
+ desc = "ioc_init";
+ break;
+ case MPI2_FUNCTION_IOC_FACTS:
+ desc = "ioc_facts";
+ break;
+ case MPI2_FUNCTION_CONFIG:
+ {
+ Mpi2ConfigRequest_t *config_request =
+ (Mpi2ConfigRequest_t *)mpi_request;
+
+ snprintf(ioc->tmp_string, MPT_STRING_LENGTH,
+ "config, type(0x%02x), ext_type(0x%02x), number(%d)",
+ (config_request->Header.PageType &
+ MPI2_CONFIG_PAGETYPE_MASK), config_request->ExtPageType,
+ config_request->Header.PageNumber);
+ desc = ioc->tmp_string;
+ break;
+ }
+ case MPI2_FUNCTION_PORT_FACTS:
+ desc = "port_facts";
+ break;
+ case MPI2_FUNCTION_PORT_ENABLE:
+ desc = "port_enable";
+ break;
+ case MPI2_FUNCTION_EVENT_NOTIFICATION:
+ desc = "event_notification";
+ break;
+ case MPI2_FUNCTION_FW_DOWNLOAD:
+ desc = "fw_download";
+ break;
+ case MPI2_FUNCTION_FW_UPLOAD:
+ desc = "fw_upload";
+ break;
+ case MPI2_FUNCTION_RAID_ACTION:
+ desc = "raid_action";
+ break;
+ case MPI2_FUNCTION_RAID_SCSI_IO_PASSTHROUGH:
+ {
+ Mpi2SCSIIORequest_t *scsi_request =
+ (Mpi2SCSIIORequest_t *)mpi_request;
+
+ snprintf(ioc->tmp_string, MPT_STRING_LENGTH,
+ "raid_pass, cmd(0x%02x), cdb_len(%d)",
+ scsi_request->CDB.CDB32[0],
+ le16_to_cpu(scsi_request->IoFlags) & 0xF);
+ desc = ioc->tmp_string;
+ break;
+ }
+ case MPI2_FUNCTION_SAS_IO_UNIT_CONTROL:
+ desc = "sas_iounit_cntl";
+ break;
+ case MPI2_FUNCTION_SATA_PASSTHROUGH:
+ desc = "sata_pass";
+ break;
+ case MPI2_FUNCTION_DIAG_BUFFER_POST:
+ desc = "diag_buffer_post";
+ break;
+ case MPI2_FUNCTION_DIAG_RELEASE:
+ desc = "diag_release";
+ break;
+ case MPI2_FUNCTION_SMP_PASSTHROUGH:
+ desc = "smp_passthrough";
+ break;
+ }
+
+ if (!desc)
+ return;
+
+ pr_info(MPT3SAS_FMT "%s: %s, smid(%d)\n",
+ ioc->name, calling_function_name, desc, smid);
+
+ if (!mpi_reply)
+ return;
+
+ if (mpi_reply->IOCStatus || mpi_reply->IOCLogInfo)
+ pr_info(MPT3SAS_FMT
+ "\tiocstatus(0x%04x), loginfo(0x%08x)\n",
+ ioc->name, le16_to_cpu(mpi_reply->IOCStatus),
+ le32_to_cpu(mpi_reply->IOCLogInfo));
+
+ if (mpi_request->Function == MPI2_FUNCTION_SCSI_IO_REQUEST ||
+ mpi_request->Function ==
+ MPI2_FUNCTION_RAID_SCSI_IO_PASSTHROUGH) {
+ Mpi2SCSIIOReply_t *scsi_reply =
+ (Mpi2SCSIIOReply_t *)mpi_reply;
+ struct _sas_device *sas_device = NULL;
+ unsigned long flags;
+
+ spin_lock_irqsave(&ioc->sas_device_lock, flags);
+ sas_device = _ctl_sas_device_find_by_handle(ioc,
+ le16_to_cpu(scsi_reply->DevHandle));
+ if (sas_device) {
+ pr_warn(MPT3SAS_FMT "\tsas_address(0x%016llx), phy(%d)\n",
+ ioc->name, (unsigned long long)
+ sas_device->sas_address, sas_device->phy);
+ pr_warn(MPT3SAS_FMT
+ "\tenclosure_logical_id(0x%016llx), slot(%d)\n",
+ ioc->name, (unsigned long long)
+ sas_device->enclosure_logical_id, sas_device->slot);
+ }
+ spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
+ if (scsi_reply->SCSIState || scsi_reply->SCSIStatus)
+ pr_info(MPT3SAS_FMT
+ "\tscsi_state(0x%02x), scsi_status"
+ "(0x%02x)\n", ioc->name,
+ scsi_reply->SCSIState,
+ scsi_reply->SCSIStatus);
+ }
+}
+
+#endif
+
+/**
+ * mpt3sas_ctl_done - ctl module completion routine
+ * @ioc: per adapter object
+ * @smid: system request message index
+ * @msix_index: MSIX table index supplied by the OS
+ * @reply: reply message frame(lower 32bit addr)
+ * Context: none.
+ *
+ * The callback handler when using ioc->ctl_cb_idx.
+ *
+ * Return 1 meaning mf should be freed from _base_interrupt
+ * 0 means the mf is freed from this function.
+ */
+u8
+mpt3sas_ctl_done(struct MPT3SAS_ADAPTER *ioc, u16 smid, u8 msix_index,
+ u32 reply)
+{
+ MPI2DefaultReply_t *mpi_reply;
+ Mpi2SCSIIOReply_t *scsiio_reply;
+ const void *sense_data;
+ u32 sz;
+
+ if (ioc->ctl_cmds.status == MPT3_CMD_NOT_USED)
+ return 1;
+ if (ioc->ctl_cmds.smid != smid)
+ return 1;
+ ioc->ctl_cmds.status |= MPT3_CMD_COMPLETE;
+ mpi_reply = mpt3sas_base_get_reply_virt_addr(ioc, reply);
+ if (mpi_reply) {
+ memcpy(ioc->ctl_cmds.reply, mpi_reply, mpi_reply->MsgLength*4);
+ ioc->ctl_cmds.status |= MPT3_CMD_REPLY_VALID;
+ /* get sense data */
+ if (mpi_reply->Function == MPI2_FUNCTION_SCSI_IO_REQUEST ||
+ mpi_reply->Function ==
+ MPI2_FUNCTION_RAID_SCSI_IO_PASSTHROUGH) {
+ scsiio_reply = (Mpi2SCSIIOReply_t *)mpi_reply;
+ if (scsiio_reply->SCSIState &
+ MPI2_SCSI_STATE_AUTOSENSE_VALID) {
+ sz = min_t(u32, SCSI_SENSE_BUFFERSIZE,
+ le32_to_cpu(scsiio_reply->SenseCount));
+ sense_data = mpt3sas_base_get_sense_buffer(ioc,
+ smid);
+ memcpy(ioc->ctl_cmds.sense, sense_data, sz);
+ }
+ }
+ }
+#ifdef CONFIG_SCSI_MPT3SAS_LOGGING
+ _ctl_display_some_debug(ioc, smid, "ctl_done", mpi_reply);
+#endif
+ ioc->ctl_cmds.status &= ~MPT3_CMD_PENDING;
+ complete(&ioc->ctl_cmds.done);
+ return 1;
+}
+
+/**
+ * _ctl_check_event_type - determines when an event needs logging
+ * @ioc: per adapter object
+ * @event: firmware event
+ *
+ * The bitmask in ioc->event_type[] indicates which events should be
+ * be saved in the driver event_log. This bitmask is set by application.
+ *
+ * Returns 1 when event should be captured, or zero means no match.
+ */
+static int
+_ctl_check_event_type(struct MPT3SAS_ADAPTER *ioc, u16 event)
+{
+ u16 i;
+ u32 desired_event;
+
+ if (event >= 128 || !event || !ioc->event_log)
+ return 0;
+
+ desired_event = (1 << (event % 32));
+ if (!desired_event)
+ desired_event = 1;
+ i = event / 32;
+ return desired_event & ioc->event_type[i];
+}
+
+/**
+ * mpt3sas_ctl_add_to_event_log - add event
+ * @ioc: per adapter object
+ * @mpi_reply: reply message frame
+ *
+ * Return nothing.
+ */
+void
+mpt3sas_ctl_add_to_event_log(struct MPT3SAS_ADAPTER *ioc,
+ Mpi2EventNotificationReply_t *mpi_reply)
+{
+ struct MPT3_IOCTL_EVENTS *event_log;
+ u16 event;
+ int i;
+ u32 sz, event_data_sz;
+ u8 send_aen = 0;
+
+ if (!ioc->event_log)
+ return;
+
+ event = le16_to_cpu(mpi_reply->Event);
+
+ if (_ctl_check_event_type(ioc, event)) {
+
+ /* insert entry into circular event_log */
+ i = ioc->event_context % MPT3SAS_CTL_EVENT_LOG_SIZE;
+ event_log = ioc->event_log;
+ event_log[i].event = event;
+ event_log[i].context = ioc->event_context++;
+
+ event_data_sz = le16_to_cpu(mpi_reply->EventDataLength)*4;
+ sz = min_t(u32, event_data_sz, MPT3_EVENT_DATA_SIZE);
+ memset(event_log[i].data, 0, MPT3_EVENT_DATA_SIZE);
+ memcpy(event_log[i].data, mpi_reply->EventData, sz);
+ send_aen = 1;
+ }
+
+ /* This aen_event_read_flag flag is set until the
+ * application has read the event log.
+ * For MPI2_EVENT_LOG_ENTRY_ADDED, we always notify.
+ */
+ if (event == MPI2_EVENT_LOG_ENTRY_ADDED ||
+ (send_aen && !ioc->aen_event_read_flag)) {
+ ioc->aen_event_read_flag = 1;
+ wake_up_interruptible(&ctl_poll_wait);
+ if (async_queue)
+ kill_fasync(&async_queue, SIGIO, POLL_IN);
+ }
+}
+
+/**
+ * mpt3sas_ctl_event_callback - firmware event handler (called at ISR time)
+ * @ioc: per adapter object
+ * @msix_index: MSIX table index supplied by the OS
+ * @reply: reply message frame(lower 32bit addr)
+ * Context: interrupt.
+ *
+ * This function merely adds a new work task into ioc->firmware_event_thread.
+ * The tasks are worked from _firmware_event_work in user context.
+ *
+ * Return 1 meaning mf should be freed from _base_interrupt
+ * 0 means the mf is freed from this function.
+ */
+u8
+mpt3sas_ctl_event_callback(struct MPT3SAS_ADAPTER *ioc, u8 msix_index,
+ u32 reply)
+{
+ Mpi2EventNotificationReply_t *mpi_reply;
+
+ mpi_reply = mpt3sas_base_get_reply_virt_addr(ioc, reply);
+ mpt3sas_ctl_add_to_event_log(ioc, mpi_reply);
+ return 1;
+}
+
+/**
+ * _ctl_verify_adapter - validates ioc_number passed from application
+ * @ioc: per adapter object
+ * @iocpp: The ioc pointer is returned in this.
+ *
+ * Return (-1) means error, else ioc_number.
+ */
+static int
+_ctl_verify_adapter(int ioc_number, struct MPT3SAS_ADAPTER **iocpp)
+{
+ struct MPT3SAS_ADAPTER *ioc;
+
+ list_for_each_entry(ioc, &mpt3sas_ioc_list, list) {
+ if (ioc->id != ioc_number)
+ continue;
+ *iocpp = ioc;
+ return ioc_number;
+ }
+ *iocpp = NULL;
+ return -1;
+}
+
+/**
+ * mpt3sas_ctl_reset_handler - reset callback handler (for ctl)
+ * @ioc: per adapter object
+ * @reset_phase: phase
+ *
+ * The handler for doing any required cleanup or initialization.
+ *
+ * The reset phase can be MPT3_IOC_PRE_RESET, MPT3_IOC_AFTER_RESET,
+ * MPT3_IOC_DONE_RESET
+ */
+void
+mpt3sas_ctl_reset_handler(struct MPT3SAS_ADAPTER *ioc, int reset_phase)
+{
+ int i;
+ u8 issue_reset;
+
+ switch (reset_phase) {
+ case MPT3_IOC_PRE_RESET:
+ dtmprintk(ioc, pr_info(MPT3SAS_FMT
+ "%s: MPT3_IOC_PRE_RESET\n", ioc->name, __func__));
+ for (i = 0; i < MPI2_DIAG_BUF_TYPE_COUNT; i++) {
+ if (!(ioc->diag_buffer_status[i] &
+ MPT3_DIAG_BUFFER_IS_REGISTERED))
+ continue;
+ if ((ioc->diag_buffer_status[i] &
+ MPT3_DIAG_BUFFER_IS_RELEASED))
+ continue;
+ mpt3sas_send_diag_release(ioc, i, &issue_reset);
+ }
+ break;
+ case MPT3_IOC_AFTER_RESET:
+ dtmprintk(ioc, pr_info(MPT3SAS_FMT
+ "%s: MPT3_IOC_AFTER_RESET\n", ioc->name, __func__));
+ if (ioc->ctl_cmds.status & MPT3_CMD_PENDING) {
+ ioc->ctl_cmds.status |= MPT3_CMD_RESET;
+ mpt3sas_base_free_smid(ioc, ioc->ctl_cmds.smid);
+ complete(&ioc->ctl_cmds.done);
+ }
+ break;
+ case MPT3_IOC_DONE_RESET:
+ dtmprintk(ioc, pr_info(MPT3SAS_FMT
+ "%s: MPT3_IOC_DONE_RESET\n", ioc->name, __func__));
+
+ for (i = 0; i < MPI2_DIAG_BUF_TYPE_COUNT; i++) {
+ if (!(ioc->diag_buffer_status[i] &
+ MPT3_DIAG_BUFFER_IS_REGISTERED))
+ continue;
+ if ((ioc->diag_buffer_status[i] &
+ MPT3_DIAG_BUFFER_IS_RELEASED))
+ continue;
+ ioc->diag_buffer_status[i] |=
+ MPT3_DIAG_BUFFER_IS_DIAG_RESET;
+ }
+ break;
+ }
+}
+
+/**
+ * _ctl_fasync -
+ * @fd -
+ * @filep -
+ * @mode -
+ *
+ * Called when application request fasyn callback handler.
+ */
+static int
+_ctl_fasync(int fd, struct file *filep, int mode)
+{
+ return fasync_helper(fd, filep, mode, &async_queue);
+}
+
+/**
+ * _ctl_poll -
+ * @file -
+ * @wait -
+ *
+ */
+static unsigned int
+_ctl_poll(struct file *filep, poll_table *wait)
+{
+ struct MPT3SAS_ADAPTER *ioc;
+
+ poll_wait(filep, &ctl_poll_wait, wait);
+
+ list_for_each_entry(ioc, &mpt3sas_ioc_list, list) {
+ if (ioc->aen_event_read_flag)
+ return POLLIN | POLLRDNORM;
+ }
+ return 0;
+}
+
+/**
+ * _ctl_set_task_mid - assign an active smid to tm request
+ * @ioc: per adapter object
+ * @karg - (struct mpt3_ioctl_command)
+ * @tm_request - pointer to mf from user space
+ *
+ * Returns 0 when an smid if found, else fail.
+ * during failure, the reply frame is filled.
+ */
+static int
+_ctl_set_task_mid(struct MPT3SAS_ADAPTER *ioc, struct mpt3_ioctl_command *karg,
+ Mpi2SCSITaskManagementRequest_t *tm_request)
+{
+ u8 found = 0;
+ u16 i;
+ u16 handle;
+ struct scsi_cmnd *scmd;
+ struct MPT3SAS_DEVICE *priv_data;
+ unsigned long flags;
+ Mpi2SCSITaskManagementReply_t *tm_reply;
+ u32 sz;
+ u32 lun;
+ char *desc = NULL;
+
+ if (tm_request->TaskType == MPI2_SCSITASKMGMT_TASKTYPE_ABORT_TASK)
+ desc = "abort_task";
+ else if (tm_request->TaskType == MPI2_SCSITASKMGMT_TASKTYPE_QUERY_TASK)
+ desc = "query_task";
+ else
+ return 0;
+
+ lun = scsilun_to_int((struct scsi_lun *)tm_request->LUN);
+
+ handle = le16_to_cpu(tm_request->DevHandle);
+ spin_lock_irqsave(&ioc->scsi_lookup_lock, flags);
+ for (i = ioc->scsiio_depth; i && !found; i--) {
+ scmd = ioc->scsi_lookup[i - 1].scmd;
+ if (scmd == NULL || scmd->device == NULL ||
+ scmd->device->hostdata == NULL)
+ continue;
+ if (lun != scmd->device->lun)
+ continue;
+ priv_data = scmd->device->hostdata;
+ if (priv_data->sas_target == NULL)
+ continue;
+ if (priv_data->sas_target->handle != handle)
+ continue;
+ tm_request->TaskMID = cpu_to_le16(ioc->scsi_lookup[i - 1].smid);
+ found = 1;
+ }
+ spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags);
+
+ if (!found) {
+ dctlprintk(ioc, pr_info(MPT3SAS_FMT
+ "%s: handle(0x%04x), lun(%d), no active mid!!\n",
+ ioc->name,
+ desc, le16_to_cpu(tm_request->DevHandle), lun));
+ tm_reply = ioc->ctl_cmds.reply;
+ tm_reply->DevHandle = tm_request->DevHandle;
+ tm_reply->Function = MPI2_FUNCTION_SCSI_TASK_MGMT;
+ tm_reply->TaskType = tm_request->TaskType;
+ tm_reply->MsgLength = sizeof(Mpi2SCSITaskManagementReply_t)/4;
+ tm_reply->VP_ID = tm_request->VP_ID;
+ tm_reply->VF_ID = tm_request->VF_ID;
+ sz = min_t(u32, karg->max_reply_bytes, ioc->reply_sz);
+ if (copy_to_user(karg->reply_frame_buf_ptr, ioc->ctl_cmds.reply,
+ sz))
+ pr_err("failure at %s:%d/%s()!\n", __FILE__,
+ __LINE__, __func__);
+ return 1;
+ }
+
+ dctlprintk(ioc, pr_info(MPT3SAS_FMT
+ "%s: handle(0x%04x), lun(%d), task_mid(%d)\n", ioc->name,
+ desc, le16_to_cpu(tm_request->DevHandle), lun,
+ le16_to_cpu(tm_request->TaskMID)));
+ return 0;
+}
+
+/**
+ * _ctl_do_mpt_command - main handler for MPT3COMMAND opcode
+ * @ioc: per adapter object
+ * @karg - (struct mpt3_ioctl_command)
+ * @mf - pointer to mf in user space
+ */
+static long
+_ctl_do_mpt_command(struct MPT3SAS_ADAPTER *ioc, struct mpt3_ioctl_command karg,
+ void __user *mf)
+{
+ MPI2RequestHeader_t *mpi_request = NULL, *request;
+ MPI2DefaultReply_t *mpi_reply;
+ u32 ioc_state;
+ u16 ioc_status;
+ u16 smid;
+ unsigned long timeout, timeleft;
+ u8 issue_reset;
+ u32 sz;
+ void *psge;
+ void *data_out = NULL;
+ dma_addr_t data_out_dma = 0;
+ size_t data_out_sz = 0;
+ void *data_in = NULL;
+ dma_addr_t data_in_dma = 0;
+ size_t data_in_sz = 0;
+ long ret;
+ u16 wait_state_count;
+
+ issue_reset = 0;
+
+ if (ioc->ctl_cmds.status != MPT3_CMD_NOT_USED) {
+ pr_err(MPT3SAS_FMT "%s: ctl_cmd in use\n",
+ ioc->name, __func__);
+ ret = -EAGAIN;
+ goto out;
+ }
+
+ wait_state_count = 0;
+ ioc_state = mpt3sas_base_get_iocstate(ioc, 1);
+ while (ioc_state != MPI2_IOC_STATE_OPERATIONAL) {
+ if (wait_state_count++ == 10) {
+ pr_err(MPT3SAS_FMT
+ "%s: failed due to ioc not operational\n",
+ ioc->name, __func__);
+ ret = -EFAULT;
+ goto out;
+ }
+ ssleep(1);
+ ioc_state = mpt3sas_base_get_iocstate(ioc, 1);
+ pr_info(MPT3SAS_FMT
+ "%s: waiting for operational state(count=%d)\n",
+ ioc->name,
+ __func__, wait_state_count);
+ }
+ if (wait_state_count)
+ pr_info(MPT3SAS_FMT "%s: ioc is operational\n",
+ ioc->name, __func__);
+
+ mpi_request = kzalloc(ioc->request_sz, GFP_KERNEL);
+ if (!mpi_request) {
+ pr_err(MPT3SAS_FMT
+ "%s: failed obtaining a memory for mpi_request\n",
+ ioc->name, __func__);
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ /* Check for overflow and wraparound */
+ if (karg.data_sge_offset * 4 > ioc->request_sz ||
+ karg.data_sge_offset > (UINT_MAX / 4)) {
+ ret = -EINVAL;
+ goto out;
+ }
+
+ /* copy in request message frame from user */
+ if (copy_from_user(mpi_request, mf, karg.data_sge_offset*4)) {
+ pr_err("failure at %s:%d/%s()!\n", __FILE__, __LINE__,
+ __func__);
+ ret = -EFAULT;
+ goto out;
+ }
+
+ if (mpi_request->Function == MPI2_FUNCTION_SCSI_TASK_MGMT) {
+ smid = mpt3sas_base_get_smid_hpr(ioc, ioc->ctl_cb_idx);
+ if (!smid) {
+ pr_err(MPT3SAS_FMT "%s: failed obtaining a smid\n",
+ ioc->name, __func__);
+ ret = -EAGAIN;
+ goto out;
+ }
+ } else {
+
+ smid = mpt3sas_base_get_smid_scsiio(ioc, ioc->ctl_cb_idx, NULL);
+ if (!smid) {
+ pr_err(MPT3SAS_FMT "%s: failed obtaining a smid\n",
+ ioc->name, __func__);
+ ret = -EAGAIN;
+ goto out;
+ }
+ }
+
+ ret = 0;
+ ioc->ctl_cmds.status = MPT3_CMD_PENDING;
+ memset(ioc->ctl_cmds.reply, 0, ioc->reply_sz);
+ request = mpt3sas_base_get_msg_frame(ioc, smid);
+ memcpy(request, mpi_request, karg.data_sge_offset*4);
+ ioc->ctl_cmds.smid = smid;
+ data_out_sz = karg.data_out_size;
+ data_in_sz = karg.data_in_size;
+
+ if (mpi_request->Function == MPI2_FUNCTION_SCSI_IO_REQUEST ||
+ mpi_request->Function == MPI2_FUNCTION_RAID_SCSI_IO_PASSTHROUGH) {
+ if (!le16_to_cpu(mpi_request->FunctionDependent1) ||
+ le16_to_cpu(mpi_request->FunctionDependent1) >
+ ioc->facts.MaxDevHandle) {
+ ret = -EINVAL;
+ mpt3sas_base_free_smid(ioc, smid);
+ goto out;
+ }
+ }
+
+ /* obtain dma-able memory for data transfer */
+ if (data_out_sz) /* WRITE */ {
+ data_out = pci_alloc_consistent(ioc->pdev, data_out_sz,
+ &data_out_dma);
+ if (!data_out) {
+ pr_err("failure at %s:%d/%s()!\n", __FILE__,
+ __LINE__, __func__);
+ ret = -ENOMEM;
+ mpt3sas_base_free_smid(ioc, smid);
+ goto out;
+ }
+ if (copy_from_user(data_out, karg.data_out_buf_ptr,
+ data_out_sz)) {
+ pr_err("failure at %s:%d/%s()!\n", __FILE__,
+ __LINE__, __func__);
+ ret = -EFAULT;
+ mpt3sas_base_free_smid(ioc, smid);
+ goto out;
+ }
+ }
+
+ if (data_in_sz) /* READ */ {
+ data_in = pci_alloc_consistent(ioc->pdev, data_in_sz,
+ &data_in_dma);
+ if (!data_in) {
+ pr_err("failure at %s:%d/%s()!\n", __FILE__,
+ __LINE__, __func__);
+ ret = -ENOMEM;
+ mpt3sas_base_free_smid(ioc, smid);
+ goto out;
+ }
+ }
+
+ psge = (void *)request + (karg.data_sge_offset*4);
+
+ /* send command to firmware */
+#ifdef CONFIG_SCSI_MPT3SAS_LOGGING
+ _ctl_display_some_debug(ioc, smid, "ctl_request", NULL);
+#endif
+
+ init_completion(&ioc->ctl_cmds.done);
+ switch (mpi_request->Function) {
+ case MPI2_FUNCTION_SCSI_IO_REQUEST:
+ case MPI2_FUNCTION_RAID_SCSI_IO_PASSTHROUGH:
+ {
+ Mpi2SCSIIORequest_t *scsiio_request =
+ (Mpi2SCSIIORequest_t *)request;
+ scsiio_request->SenseBufferLength = SCSI_SENSE_BUFFERSIZE;
+ scsiio_request->SenseBufferLowAddress =
+ mpt3sas_base_get_sense_buffer_dma(ioc, smid);
+ memset(ioc->ctl_cmds.sense, 0, SCSI_SENSE_BUFFERSIZE);
+ ioc->build_sg(ioc, psge, data_out_dma, data_out_sz,
+ data_in_dma, data_in_sz);
+
+ if (mpi_request->Function == MPI2_FUNCTION_SCSI_IO_REQUEST)
+ mpt3sas_base_put_smid_scsi_io(ioc, smid,
+ le16_to_cpu(mpi_request->FunctionDependent1));
+ else
+ mpt3sas_base_put_smid_default(ioc, smid);
+ break;
+ }
+ case MPI2_FUNCTION_SCSI_TASK_MGMT:
+ {
+ Mpi2SCSITaskManagementRequest_t *tm_request =
+ (Mpi2SCSITaskManagementRequest_t *)request;
+
+ dtmprintk(ioc, pr_info(MPT3SAS_FMT
+ "TASK_MGMT: handle(0x%04x), task_type(0x%02x)\n",
+ ioc->name,
+ le16_to_cpu(tm_request->DevHandle), tm_request->TaskType));
+
+ if (tm_request->TaskType ==
+ MPI2_SCSITASKMGMT_TASKTYPE_ABORT_TASK ||
+ tm_request->TaskType ==
+ MPI2_SCSITASKMGMT_TASKTYPE_QUERY_TASK) {
+ if (_ctl_set_task_mid(ioc, &karg, tm_request)) {
+ mpt3sas_base_free_smid(ioc, smid);
+ goto out;
+ }
+ }
+
+ mpt3sas_scsih_set_tm_flag(ioc, le16_to_cpu(
+ tm_request->DevHandle));
+ ioc->build_sg_mpi(ioc, psge, data_out_dma, data_out_sz,
+ data_in_dma, data_in_sz);
+ mpt3sas_base_put_smid_hi_priority(ioc, smid);
+ break;
+ }
+ case MPI2_FUNCTION_SMP_PASSTHROUGH:
+ {
+ Mpi2SmpPassthroughRequest_t *smp_request =
+ (Mpi2SmpPassthroughRequest_t *)mpi_request;
+ u8 *data;
+
+ /* ioc determines which port to use */
+ smp_request->PhysicalPort = 0xFF;
+ if (smp_request->PassthroughFlags &
+ MPI2_SMP_PT_REQ_PT_FLAGS_IMMEDIATE)
+ data = (u8 *)&smp_request->SGL;
+ else {
+ if (unlikely(data_out == NULL)) {
+ pr_err("failure at %s:%d/%s()!\n",
+ __FILE__, __LINE__, __func__);
+ mpt3sas_base_free_smid(ioc, smid);
+ ret = -EINVAL;
+ goto out;
+ }
+ data = data_out;
+ }
+
+ if (data[1] == 0x91 && (data[10] == 1 || data[10] == 2)) {
+ ioc->ioc_link_reset_in_progress = 1;
+ ioc->ignore_loginfos = 1;
+ }
+ ioc->build_sg(ioc, psge, data_out_dma, data_out_sz, data_in_dma,
+ data_in_sz);
+ mpt3sas_base_put_smid_default(ioc, smid);
+ break;
+ }
+ case MPI2_FUNCTION_SATA_PASSTHROUGH:
+ case MPI2_FUNCTION_FW_DOWNLOAD:
+ case MPI2_FUNCTION_FW_UPLOAD:
+ {
+ ioc->build_sg(ioc, psge, data_out_dma, data_out_sz, data_in_dma,
+ data_in_sz);
+ mpt3sas_base_put_smid_default(ioc, smid);
+ break;
+ }
+ case MPI2_FUNCTION_TOOLBOX:
+ {
+ Mpi2ToolboxCleanRequest_t *toolbox_request =
+ (Mpi2ToolboxCleanRequest_t *)mpi_request;
+
+ if (toolbox_request->Tool == MPI2_TOOLBOX_DIAGNOSTIC_CLI_TOOL) {
+ ioc->build_sg(ioc, psge, data_out_dma, data_out_sz,
+ data_in_dma, data_in_sz);
+ } else {
+ ioc->build_sg_mpi(ioc, psge, data_out_dma, data_out_sz,
+ data_in_dma, data_in_sz);
+ }
+ mpt3sas_base_put_smid_default(ioc, smid);
+ break;
+ }
+ case MPI2_FUNCTION_SAS_IO_UNIT_CONTROL:
+ {
+ Mpi2SasIoUnitControlRequest_t *sasiounit_request =
+ (Mpi2SasIoUnitControlRequest_t *)mpi_request;
+
+ if (sasiounit_request->Operation == MPI2_SAS_OP_PHY_HARD_RESET
+ || sasiounit_request->Operation ==
+ MPI2_SAS_OP_PHY_LINK_RESET) {
+ ioc->ioc_link_reset_in_progress = 1;
+ ioc->ignore_loginfos = 1;
+ }
+ /* drop to default case for posting the request */
+ }
+ default:
+ ioc->build_sg_mpi(ioc, psge, data_out_dma, data_out_sz,
+ data_in_dma, data_in_sz);
+ mpt3sas_base_put_smid_default(ioc, smid);
+ break;
+ }
+
+ if (karg.timeout < MPT3_IOCTL_DEFAULT_TIMEOUT)
+ timeout = MPT3_IOCTL_DEFAULT_TIMEOUT;
+ else
+ timeout = karg.timeout;
+ timeleft = wait_for_completion_timeout(&ioc->ctl_cmds.done,
+ timeout*HZ);
+ if (mpi_request->Function == MPI2_FUNCTION_SCSI_TASK_MGMT) {
+ Mpi2SCSITaskManagementRequest_t *tm_request =
+ (Mpi2SCSITaskManagementRequest_t *)mpi_request;
+ mpt3sas_scsih_clear_tm_flag(ioc, le16_to_cpu(
+ tm_request->DevHandle));
+ mpt3sas_trigger_master(ioc, MASTER_TRIGGER_TASK_MANAGMENT);
+ } else if ((mpi_request->Function == MPI2_FUNCTION_SMP_PASSTHROUGH ||
+ mpi_request->Function == MPI2_FUNCTION_SAS_IO_UNIT_CONTROL) &&
+ ioc->ioc_link_reset_in_progress) {
+ ioc->ioc_link_reset_in_progress = 0;
+ ioc->ignore_loginfos = 0;
+ }
+ if (!(ioc->ctl_cmds.status & MPT3_CMD_COMPLETE)) {
+ pr_err(MPT3SAS_FMT "%s: timeout\n", ioc->name,
+ __func__);
+ _debug_dump_mf(mpi_request, karg.data_sge_offset);
+ if (!(ioc->ctl_cmds.status & MPT3_CMD_RESET))
+ issue_reset = 1;
+ goto issue_host_reset;
+ }
+
+ mpi_reply = ioc->ctl_cmds.reply;
+ ioc_status = le16_to_cpu(mpi_reply->IOCStatus) & MPI2_IOCSTATUS_MASK;
+
+#ifdef CONFIG_SCSI_MPT3SAS_LOGGING
+ if (mpi_reply->Function == MPI2_FUNCTION_SCSI_TASK_MGMT &&
+ (ioc->logging_level & MPT_DEBUG_TM)) {
+ Mpi2SCSITaskManagementReply_t *tm_reply =
+ (Mpi2SCSITaskManagementReply_t *)mpi_reply;
+
+ pr_info(MPT3SAS_FMT "TASK_MGMT: " \
+ "IOCStatus(0x%04x), IOCLogInfo(0x%08x), "
+ "TerminationCount(0x%08x)\n", ioc->name,
+ le16_to_cpu(tm_reply->IOCStatus),
+ le32_to_cpu(tm_reply->IOCLogInfo),
+ le32_to_cpu(tm_reply->TerminationCount));
+ }
+#endif
+ /* copy out xdata to user */
+ if (data_in_sz) {
+ if (copy_to_user(karg.data_in_buf_ptr, data_in,
+ data_in_sz)) {
+ pr_err("failure at %s:%d/%s()!\n", __FILE__,
+ __LINE__, __func__);
+ ret = -ENODATA;
+ goto out;
+ }
+ }
+
+ /* copy out reply message frame to user */
+ if (karg.max_reply_bytes) {
+ sz = min_t(u32, karg.max_reply_bytes, ioc->reply_sz);
+ if (copy_to_user(karg.reply_frame_buf_ptr, ioc->ctl_cmds.reply,
+ sz)) {
+ pr_err("failure at %s:%d/%s()!\n", __FILE__,
+ __LINE__, __func__);
+ ret = -ENODATA;
+ goto out;
+ }
+ }
+
+ /* copy out sense to user */
+ if (karg.max_sense_bytes && (mpi_request->Function ==
+ MPI2_FUNCTION_SCSI_IO_REQUEST || mpi_request->Function ==
+ MPI2_FUNCTION_RAID_SCSI_IO_PASSTHROUGH)) {
+ sz = min_t(u32, karg.max_sense_bytes, SCSI_SENSE_BUFFERSIZE);
+ if (copy_to_user(karg.sense_data_ptr, ioc->ctl_cmds.sense,
+ sz)) {
+ pr_err("failure at %s:%d/%s()!\n", __FILE__,
+ __LINE__, __func__);
+ ret = -ENODATA;
+ goto out;
+ }
+ }
+
+ issue_host_reset:
+ if (issue_reset) {
+ ret = -ENODATA;
+ if ((mpi_request->Function == MPI2_FUNCTION_SCSI_IO_REQUEST ||
+ mpi_request->Function ==
+ MPI2_FUNCTION_RAID_SCSI_IO_PASSTHROUGH ||
+ mpi_request->Function == MPI2_FUNCTION_SATA_PASSTHROUGH)) {
+ pr_info(MPT3SAS_FMT "issue target reset: handle = (0x%04x)\n",
+ ioc->name,
+ le16_to_cpu(mpi_request->FunctionDependent1));
+ mpt3sas_halt_firmware(ioc);
+ mpt3sas_scsih_issue_tm(ioc,
+ le16_to_cpu(mpi_request->FunctionDependent1), 0, 0,
+ 0, MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET, 0, 30,
+ TM_MUTEX_ON);
+ } else
+ mpt3sas_base_hard_reset_handler(ioc, CAN_SLEEP,
+ FORCE_BIG_HAMMER);
+ }
+
+ out:
+
+ /* free memory associated with sg buffers */
+ if (data_in)
+ pci_free_consistent(ioc->pdev, data_in_sz, data_in,
+ data_in_dma);
+
+ if (data_out)
+ pci_free_consistent(ioc->pdev, data_out_sz, data_out,
+ data_out_dma);
+
+ kfree(mpi_request);
+ ioc->ctl_cmds.status = MPT3_CMD_NOT_USED;
+ return ret;
+}
+
+/**
+ * _ctl_getiocinfo - main handler for MPT3IOCINFO opcode
+ * @ioc: per adapter object
+ * @arg - user space buffer containing ioctl content
+ */
+static long
+_ctl_getiocinfo(struct MPT3SAS_ADAPTER *ioc, void __user *arg)
+{
+ struct mpt3_ioctl_iocinfo karg;
+
+ if (copy_from_user(&karg, arg, sizeof(karg))) {
+ pr_err("failure at %s:%d/%s()!\n",
+ __FILE__, __LINE__, __func__);
+ return -EFAULT;
+ }
+
+ dctlprintk(ioc, pr_info(MPT3SAS_FMT "%s: enter\n", ioc->name,
+ __func__));
+
+ memset(&karg, 0 , sizeof(karg));
+ karg.adapter_type = MPT3_IOCTL_INTERFACE_SAS3;
+ if (ioc->pfacts)
+ karg.port_number = ioc->pfacts[0].PortNumber;
+ karg.hw_rev = ioc->pdev->revision;
+ karg.pci_id = ioc->pdev->device;
+ karg.subsystem_device = ioc->pdev->subsystem_device;
+ karg.subsystem_vendor = ioc->pdev->subsystem_vendor;
+ karg.pci_information.u.bits.bus = ioc->pdev->bus->number;
+ karg.pci_information.u.bits.device = PCI_SLOT(ioc->pdev->devfn);
+ karg.pci_information.u.bits.function = PCI_FUNC(ioc->pdev->devfn);
+ karg.pci_information.segment_id = pci_domain_nr(ioc->pdev->bus);
+ karg.firmware_version = ioc->facts.FWVersion.Word;
+ strcpy(karg.driver_version, MPT3SAS_DRIVER_NAME);
+ strcat(karg.driver_version, "-");
+ strcat(karg.driver_version, MPT3SAS_DRIVER_VERSION);
+ karg.bios_version = le32_to_cpu(ioc->bios_pg3.BiosVersion);
+
+ if (copy_to_user(arg, &karg, sizeof(karg))) {
+ pr_err("failure at %s:%d/%s()!\n",
+ __FILE__, __LINE__, __func__);
+ return -EFAULT;
+ }
+ return 0;
+}
+
+/**
+ * _ctl_eventquery - main handler for MPT3EVENTQUERY opcode
+ * @ioc: per adapter object
+ * @arg - user space buffer containing ioctl content
+ */
+static long
+_ctl_eventquery(struct MPT3SAS_ADAPTER *ioc, void __user *arg)
+{
+ struct mpt3_ioctl_eventquery karg;
+
+ if (copy_from_user(&karg, arg, sizeof(karg))) {
+ pr_err("failure at %s:%d/%s()!\n",
+ __FILE__, __LINE__, __func__);
+ return -EFAULT;
+ }
+
+ dctlprintk(ioc, pr_info(MPT3SAS_FMT "%s: enter\n", ioc->name,
+ __func__));
+
+ karg.event_entries = MPT3SAS_CTL_EVENT_LOG_SIZE;
+ memcpy(karg.event_types, ioc->event_type,
+ MPI2_EVENT_NOTIFY_EVENTMASK_WORDS * sizeof(u32));
+
+ if (copy_to_user(arg, &karg, sizeof(karg))) {
+ pr_err("failure at %s:%d/%s()!\n",
+ __FILE__, __LINE__, __func__);
+ return -EFAULT;
+ }
+ return 0;
+}
+
+/**
+ * _ctl_eventenable - main handler for MPT3EVENTENABLE opcode
+ * @ioc: per adapter object
+ * @arg - user space buffer containing ioctl content
+ */
+static long
+_ctl_eventenable(struct MPT3SAS_ADAPTER *ioc, void __user *arg)
+{
+ struct mpt3_ioctl_eventenable karg;
+
+ if (copy_from_user(&karg, arg, sizeof(karg))) {
+ pr_err("failure at %s:%d/%s()!\n",
+ __FILE__, __LINE__, __func__);
+ return -EFAULT;
+ }
+
+ dctlprintk(ioc, pr_info(MPT3SAS_FMT "%s: enter\n", ioc->name,
+ __func__));
+
+ memcpy(ioc->event_type, karg.event_types,
+ MPI2_EVENT_NOTIFY_EVENTMASK_WORDS * sizeof(u32));
+ mpt3sas_base_validate_event_type(ioc, ioc->event_type);
+
+ if (ioc->event_log)
+ return 0;
+ /* initialize event_log */
+ ioc->event_context = 0;
+ ioc->aen_event_read_flag = 0;
+ ioc->event_log = kcalloc(MPT3SAS_CTL_EVENT_LOG_SIZE,
+ sizeof(struct MPT3_IOCTL_EVENTS), GFP_KERNEL);
+ if (!ioc->event_log) {
+ pr_err("failure at %s:%d/%s()!\n",
+ __FILE__, __LINE__, __func__);
+ return -ENOMEM;
+ }
+ return 0;
+}
+
+/**
+ * _ctl_eventreport - main handler for MPT3EVENTREPORT opcode
+ * @ioc: per adapter object
+ * @arg - user space buffer containing ioctl content
+ */
+static long
+_ctl_eventreport(struct MPT3SAS_ADAPTER *ioc, void __user *arg)
+{
+ struct mpt3_ioctl_eventreport karg;
+ u32 number_bytes, max_events, max;
+ struct mpt3_ioctl_eventreport __user *uarg = arg;
+
+ if (copy_from_user(&karg, arg, sizeof(karg))) {
+ pr_err("failure at %s:%d/%s()!\n",
+ __FILE__, __LINE__, __func__);
+ return -EFAULT;
+ }
+
+ dctlprintk(ioc, pr_info(MPT3SAS_FMT "%s: enter\n", ioc->name,
+ __func__));
+
+ number_bytes = karg.hdr.max_data_size -
+ sizeof(struct mpt3_ioctl_header);
+ max_events = number_bytes/sizeof(struct MPT3_IOCTL_EVENTS);
+ max = min_t(u32, MPT3SAS_CTL_EVENT_LOG_SIZE, max_events);
+
+ /* If fewer than 1 event is requested, there must have
+ * been some type of error.
+ */
+ if (!max || !ioc->event_log)
+ return -ENODATA;
+
+ number_bytes = max * sizeof(struct MPT3_IOCTL_EVENTS);
+ if (copy_to_user(uarg->event_data, ioc->event_log, number_bytes)) {
+ pr_err("failure at %s:%d/%s()!\n",
+ __FILE__, __LINE__, __func__);
+ return -EFAULT;
+ }
+
+ /* reset flag so SIGIO can restart */
+ ioc->aen_event_read_flag = 0;
+ return 0;
+}
+
+/**
+ * _ctl_do_reset - main handler for MPT3HARDRESET opcode
+ * @ioc: per adapter object
+ * @arg - user space buffer containing ioctl content
+ */
+static long
+_ctl_do_reset(struct MPT3SAS_ADAPTER *ioc, void __user *arg)
+{
+ struct mpt3_ioctl_diag_reset karg;
+ int retval;
+
+ if (copy_from_user(&karg, arg, sizeof(karg))) {
+ pr_err("failure at %s:%d/%s()!\n",
+ __FILE__, __LINE__, __func__);
+ return -EFAULT;
+ }
+
+ if (ioc->shost_recovery || ioc->pci_error_recovery ||
+ ioc->is_driver_loading)
+ return -EAGAIN;
+
+ dctlprintk(ioc, pr_info(MPT3SAS_FMT "%s: enter\n", ioc->name,
+ __func__));
+
+ retval = mpt3sas_base_hard_reset_handler(ioc, CAN_SLEEP,
+ FORCE_BIG_HAMMER);
+ pr_info(MPT3SAS_FMT "host reset: %s\n",
+ ioc->name, ((!retval) ? "SUCCESS" : "FAILED"));
+ return 0;
+}
+
+/**
+ * _ctl_btdh_search_sas_device - searching for sas device
+ * @ioc: per adapter object
+ * @btdh: btdh ioctl payload
+ */
+static int
+_ctl_btdh_search_sas_device(struct MPT3SAS_ADAPTER *ioc,
+ struct mpt3_ioctl_btdh_mapping *btdh)
+{
+ struct _sas_device *sas_device;
+ unsigned long flags;
+ int rc = 0;
+
+ if (list_empty(&ioc->sas_device_list))
+ return rc;
+
+ spin_lock_irqsave(&ioc->sas_device_lock, flags);
+ list_for_each_entry(sas_device, &ioc->sas_device_list, list) {
+ if (btdh->bus == 0xFFFFFFFF && btdh->id == 0xFFFFFFFF &&
+ btdh->handle == sas_device->handle) {
+ btdh->bus = sas_device->channel;
+ btdh->id = sas_device->id;
+ rc = 1;
+ goto out;
+ } else if (btdh->bus == sas_device->channel && btdh->id ==
+ sas_device->id && btdh->handle == 0xFFFF) {
+ btdh->handle = sas_device->handle;
+ rc = 1;
+ goto out;
+ }
+ }
+ out:
+ spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
+ return rc;
+}
+
+/**
+ * _ctl_btdh_search_raid_device - searching for raid device
+ * @ioc: per adapter object
+ * @btdh: btdh ioctl payload
+ */
+static int
+_ctl_btdh_search_raid_device(struct MPT3SAS_ADAPTER *ioc,
+ struct mpt3_ioctl_btdh_mapping *btdh)
+{
+ struct _raid_device *raid_device;
+ unsigned long flags;
+ int rc = 0;
+
+ if (list_empty(&ioc->raid_device_list))
+ return rc;
+
+ spin_lock_irqsave(&ioc->raid_device_lock, flags);
+ list_for_each_entry(raid_device, &ioc->raid_device_list, list) {
+ if (btdh->bus == 0xFFFFFFFF && btdh->id == 0xFFFFFFFF &&
+ btdh->handle == raid_device->handle) {
+ btdh->bus = raid_device->channel;
+ btdh->id = raid_device->id;
+ rc = 1;
+ goto out;
+ } else if (btdh->bus == raid_device->channel && btdh->id ==
+ raid_device->id && btdh->handle == 0xFFFF) {
+ btdh->handle = raid_device->handle;
+ rc = 1;
+ goto out;
+ }
+ }
+ out:
+ spin_unlock_irqrestore(&ioc->raid_device_lock, flags);
+ return rc;
+}
+
+/**
+ * _ctl_btdh_mapping - main handler for MPT3BTDHMAPPING opcode
+ * @ioc: per adapter object
+ * @arg - user space buffer containing ioctl content
+ */
+static long
+_ctl_btdh_mapping(struct MPT3SAS_ADAPTER *ioc, void __user *arg)
+{
+ struct mpt3_ioctl_btdh_mapping karg;
+ int rc;
+
+ if (copy_from_user(&karg, arg, sizeof(karg))) {
+ pr_err("failure at %s:%d/%s()!\n",
+ __FILE__, __LINE__, __func__);
+ return -EFAULT;
+ }
+
+ dctlprintk(ioc, pr_info(MPT3SAS_FMT "%s\n", ioc->name,
+ __func__));
+
+ rc = _ctl_btdh_search_sas_device(ioc, &karg);
+ if (!rc)
+ _ctl_btdh_search_raid_device(ioc, &karg);
+
+ if (copy_to_user(arg, &karg, sizeof(karg))) {
+ pr_err("failure at %s:%d/%s()!\n",
+ __FILE__, __LINE__, __func__);
+ return -EFAULT;
+ }
+ return 0;
+}
+
+/**
+ * _ctl_diag_capability - return diag buffer capability
+ * @ioc: per adapter object
+ * @buffer_type: specifies either TRACE, SNAPSHOT, or EXTENDED
+ *
+ * returns 1 when diag buffer support is enabled in firmware
+ */
+static u8
+_ctl_diag_capability(struct MPT3SAS_ADAPTER *ioc, u8 buffer_type)
+{
+ u8 rc = 0;
+
+ switch (buffer_type) {
+ case MPI2_DIAG_BUF_TYPE_TRACE:
+ if (ioc->facts.IOCCapabilities &
+ MPI2_IOCFACTS_CAPABILITY_DIAG_TRACE_BUFFER)
+ rc = 1;
+ break;
+ case MPI2_DIAG_BUF_TYPE_SNAPSHOT:
+ if (ioc->facts.IOCCapabilities &
+ MPI2_IOCFACTS_CAPABILITY_SNAPSHOT_BUFFER)
+ rc = 1;
+ break;
+ case MPI2_DIAG_BUF_TYPE_EXTENDED:
+ if (ioc->facts.IOCCapabilities &
+ MPI2_IOCFACTS_CAPABILITY_EXTENDED_BUFFER)
+ rc = 1;
+ }
+
+ return rc;
+}
+
+
+/**
+ * _ctl_diag_register_2 - wrapper for registering diag buffer support
+ * @ioc: per adapter object
+ * @diag_register: the diag_register struct passed in from user space
+ *
+ */
+static long
+_ctl_diag_register_2(struct MPT3SAS_ADAPTER *ioc,
+ struct mpt3_diag_register *diag_register)
+{
+ int rc, i;
+ void *request_data = NULL;
+ dma_addr_t request_data_dma;
+ u32 request_data_sz = 0;
+ Mpi2DiagBufferPostRequest_t *mpi_request;
+ Mpi2DiagBufferPostReply_t *mpi_reply;
+ u8 buffer_type;
+ unsigned long timeleft;
+ u16 smid;
+ u16 ioc_status;
+ u32 ioc_state;
+ u8 issue_reset = 0;
+
+ dctlprintk(ioc, pr_info(MPT3SAS_FMT "%s\n", ioc->name,
+ __func__));
+
+ ioc_state = mpt3sas_base_get_iocstate(ioc, 1);
+ if (ioc_state != MPI2_IOC_STATE_OPERATIONAL) {
+ pr_err(MPT3SAS_FMT
+ "%s: failed due to ioc not operational\n",
+ ioc->name, __func__);
+ rc = -EAGAIN;
+ goto out;
+ }
+
+ if (ioc->ctl_cmds.status != MPT3_CMD_NOT_USED) {
+ pr_err(MPT3SAS_FMT "%s: ctl_cmd in use\n",
+ ioc->name, __func__);
+ rc = -EAGAIN;
+ goto out;
+ }
+
+ buffer_type = diag_register->buffer_type;
+ if (!_ctl_diag_capability(ioc, buffer_type)) {
+ pr_err(MPT3SAS_FMT
+ "%s: doesn't have capability for buffer_type(0x%02x)\n",
+ ioc->name, __func__, buffer_type);
+ return -EPERM;
+ }
+
+ if (ioc->diag_buffer_status[buffer_type] &
+ MPT3_DIAG_BUFFER_IS_REGISTERED) {
+ pr_err(MPT3SAS_FMT
+ "%s: already has a registered buffer for buffer_type(0x%02x)\n",
+ ioc->name, __func__,
+ buffer_type);
+ return -EINVAL;
+ }
+
+ if (diag_register->requested_buffer_size % 4) {
+ pr_err(MPT3SAS_FMT
+ "%s: the requested_buffer_size is not 4 byte aligned\n",
+ ioc->name, __func__);
+ return -EINVAL;
+ }
+
+ smid = mpt3sas_base_get_smid(ioc, ioc->ctl_cb_idx);
+ if (!smid) {
+ pr_err(MPT3SAS_FMT "%s: failed obtaining a smid\n",
+ ioc->name, __func__);
+ rc = -EAGAIN;
+ goto out;
+ }
+
+ rc = 0;
+ ioc->ctl_cmds.status = MPT3_CMD_PENDING;
+ memset(ioc->ctl_cmds.reply, 0, ioc->reply_sz);
+ mpi_request = mpt3sas_base_get_msg_frame(ioc, smid);
+ ioc->ctl_cmds.smid = smid;
+
+ request_data = ioc->diag_buffer[buffer_type];
+ request_data_sz = diag_register->requested_buffer_size;
+ ioc->unique_id[buffer_type] = diag_register->unique_id;
+ ioc->diag_buffer_status[buffer_type] = 0;
+ memcpy(ioc->product_specific[buffer_type],
+ diag_register->product_specific, MPT3_PRODUCT_SPECIFIC_DWORDS);
+ ioc->diagnostic_flags[buffer_type] = diag_register->diagnostic_flags;
+
+ if (request_data) {
+ request_data_dma = ioc->diag_buffer_dma[buffer_type];
+ if (request_data_sz != ioc->diag_buffer_sz[buffer_type]) {
+ pci_free_consistent(ioc->pdev,
+ ioc->diag_buffer_sz[buffer_type],
+ request_data, request_data_dma);
+ request_data = NULL;
+ }
+ }
+
+ if (request_data == NULL) {
+ ioc->diag_buffer_sz[buffer_type] = 0;
+ ioc->diag_buffer_dma[buffer_type] = 0;
+ request_data = pci_alloc_consistent(
+ ioc->pdev, request_data_sz, &request_data_dma);
+ if (request_data == NULL) {
+ pr_err(MPT3SAS_FMT "%s: failed allocating memory" \
+ " for diag buffers, requested size(%d)\n",
+ ioc->name, __func__, request_data_sz);
+ mpt3sas_base_free_smid(ioc, smid);
+ return -ENOMEM;
+ }
+ ioc->diag_buffer[buffer_type] = request_data;
+ ioc->diag_buffer_sz[buffer_type] = request_data_sz;
+ ioc->diag_buffer_dma[buffer_type] = request_data_dma;
+ }
+
+ mpi_request->Function = MPI2_FUNCTION_DIAG_BUFFER_POST;
+ mpi_request->BufferType = diag_register->buffer_type;
+ mpi_request->Flags = cpu_to_le32(diag_register->diagnostic_flags);
+ mpi_request->BufferAddress = cpu_to_le64(request_data_dma);
+ mpi_request->BufferLength = cpu_to_le32(request_data_sz);
+ mpi_request->VF_ID = 0; /* TODO */
+ mpi_request->VP_ID = 0;
+
+ dctlprintk(ioc, pr_info(MPT3SAS_FMT
+ "%s: diag_buffer(0x%p), dma(0x%llx), sz(%d)\n",
+ ioc->name, __func__, request_data,
+ (unsigned long long)request_data_dma,
+ le32_to_cpu(mpi_request->BufferLength)));
+
+ for (i = 0; i < MPT3_PRODUCT_SPECIFIC_DWORDS; i++)
+ mpi_request->ProductSpecific[i] =
+ cpu_to_le32(ioc->product_specific[buffer_type][i]);
+
+ init_completion(&ioc->ctl_cmds.done);
+ mpt3sas_base_put_smid_default(ioc, smid);
+ timeleft = wait_for_completion_timeout(&ioc->ctl_cmds.done,
+ MPT3_IOCTL_DEFAULT_TIMEOUT*HZ);
+
+ if (!(ioc->ctl_cmds.status & MPT3_CMD_COMPLETE)) {
+ pr_err(MPT3SAS_FMT "%s: timeout\n", ioc->name,
+ __func__);
+ _debug_dump_mf(mpi_request,
+ sizeof(Mpi2DiagBufferPostRequest_t)/4);
+ if (!(ioc->ctl_cmds.status & MPT3_CMD_RESET))
+ issue_reset = 1;
+ goto issue_host_reset;
+ }
+
+ /* process the completed Reply Message Frame */
+ if ((ioc->ctl_cmds.status & MPT3_CMD_REPLY_VALID) == 0) {
+ pr_err(MPT3SAS_FMT "%s: no reply message\n",
+ ioc->name, __func__);
+ rc = -EFAULT;
+ goto out;
+ }
+
+ mpi_reply = ioc->ctl_cmds.reply;
+ ioc_status = le16_to_cpu(mpi_reply->IOCStatus) & MPI2_IOCSTATUS_MASK;
+
+ if (ioc_status == MPI2_IOCSTATUS_SUCCESS) {
+ ioc->diag_buffer_status[buffer_type] |=
+ MPT3_DIAG_BUFFER_IS_REGISTERED;
+ dctlprintk(ioc, pr_info(MPT3SAS_FMT "%s: success\n",
+ ioc->name, __func__));
+ } else {
+ pr_info(MPT3SAS_FMT
+ "%s: ioc_status(0x%04x) log_info(0x%08x)\n",
+ ioc->name, __func__,
+ ioc_status, le32_to_cpu(mpi_reply->IOCLogInfo));
+ rc = -EFAULT;
+ }
+
+ issue_host_reset:
+ if (issue_reset)
+ mpt3sas_base_hard_reset_handler(ioc, CAN_SLEEP,
+ FORCE_BIG_HAMMER);
+
+ out:
+
+ if (rc && request_data)
+ pci_free_consistent(ioc->pdev, request_data_sz,
+ request_data, request_data_dma);
+
+ ioc->ctl_cmds.status = MPT3_CMD_NOT_USED;
+ return rc;
+}
+
+/**
+ * mpt3sas_enable_diag_buffer - enabling diag_buffers support driver load time
+ * @ioc: per adapter object
+ * @bits_to_register: bitwise field where trace is bit 0, and snapshot is bit 1
+ *
+ * This is called when command line option diag_buffer_enable is enabled
+ * at driver load time.
+ */
+void
+mpt3sas_enable_diag_buffer(struct MPT3SAS_ADAPTER *ioc, u8 bits_to_register)
+{
+ struct mpt3_diag_register diag_register;
+
+ memset(&diag_register, 0, sizeof(struct mpt3_diag_register));
+
+ if (bits_to_register & 1) {
+ pr_info(MPT3SAS_FMT "registering trace buffer support\n",
+ ioc->name);
+ ioc->diag_trigger_master.MasterData =
+ (MASTER_TRIGGER_FW_FAULT + MASTER_TRIGGER_ADAPTER_RESET);
+ diag_register.buffer_type = MPI2_DIAG_BUF_TYPE_TRACE;
+ /* register for 2MB buffers */
+ diag_register.requested_buffer_size = 2 * (1024 * 1024);
+ diag_register.unique_id = 0x7075900;
+ _ctl_diag_register_2(ioc, &diag_register);
+ }
+
+ if (bits_to_register & 2) {
+ pr_info(MPT3SAS_FMT "registering snapshot buffer support\n",
+ ioc->name);
+ diag_register.buffer_type = MPI2_DIAG_BUF_TYPE_SNAPSHOT;
+ /* register for 2MB buffers */
+ diag_register.requested_buffer_size = 2 * (1024 * 1024);
+ diag_register.unique_id = 0x7075901;
+ _ctl_diag_register_2(ioc, &diag_register);
+ }
+
+ if (bits_to_register & 4) {
+ pr_info(MPT3SAS_FMT "registering extended buffer support\n",
+ ioc->name);
+ diag_register.buffer_type = MPI2_DIAG_BUF_TYPE_EXTENDED;
+ /* register for 2MB buffers */
+ diag_register.requested_buffer_size = 2 * (1024 * 1024);
+ diag_register.unique_id = 0x7075901;
+ _ctl_diag_register_2(ioc, &diag_register);
+ }
+}
+
+/**
+ * _ctl_diag_register - application register with driver
+ * @ioc: per adapter object
+ * @arg - user space buffer containing ioctl content
+ *
+ * This will allow the driver to setup any required buffers that will be
+ * needed by firmware to communicate with the driver.
+ */
+static long
+_ctl_diag_register(struct MPT3SAS_ADAPTER *ioc, void __user *arg)
+{
+ struct mpt3_diag_register karg;
+ long rc;
+
+ if (copy_from_user(&karg, arg, sizeof(karg))) {
+ pr_err("failure at %s:%d/%s()!\n",
+ __FILE__, __LINE__, __func__);
+ return -EFAULT;
+ }
+
+ rc = _ctl_diag_register_2(ioc, &karg);
+ return rc;
+}
+
+/**
+ * _ctl_diag_unregister - application unregister with driver
+ * @ioc: per adapter object
+ * @arg - user space buffer containing ioctl content
+ *
+ * This will allow the driver to cleanup any memory allocated for diag
+ * messages and to free up any resources.
+ */
+static long
+_ctl_diag_unregister(struct MPT3SAS_ADAPTER *ioc, void __user *arg)
+{
+ struct mpt3_diag_unregister karg;
+ void *request_data;
+ dma_addr_t request_data_dma;
+ u32 request_data_sz;
+ u8 buffer_type;
+
+ if (copy_from_user(&karg, arg, sizeof(karg))) {
+ pr_err("failure at %s:%d/%s()!\n",
+ __FILE__, __LINE__, __func__);
+ return -EFAULT;
+ }
+
+ dctlprintk(ioc, pr_info(MPT3SAS_FMT "%s\n", ioc->name,
+ __func__));
+
+ buffer_type = karg.unique_id & 0x000000ff;
+ if (!_ctl_diag_capability(ioc, buffer_type)) {
+ pr_err(MPT3SAS_FMT
+ "%s: doesn't have capability for buffer_type(0x%02x)\n",
+ ioc->name, __func__, buffer_type);
+ return -EPERM;
+ }
+
+ if ((ioc->diag_buffer_status[buffer_type] &
+ MPT3_DIAG_BUFFER_IS_REGISTERED) == 0) {
+ pr_err(MPT3SAS_FMT
+ "%s: buffer_type(0x%02x) is not registered\n",
+ ioc->name, __func__, buffer_type);
+ return -EINVAL;
+ }
+ if ((ioc->diag_buffer_status[buffer_type] &
+ MPT3_DIAG_BUFFER_IS_RELEASED) == 0) {
+ pr_err(MPT3SAS_FMT
+ "%s: buffer_type(0x%02x) has not been released\n",
+ ioc->name, __func__, buffer_type);
+ return -EINVAL;
+ }
+
+ if (karg.unique_id != ioc->unique_id[buffer_type]) {
+ pr_err(MPT3SAS_FMT
+ "%s: unique_id(0x%08x) is not registered\n",
+ ioc->name, __func__, karg.unique_id);
+ return -EINVAL;
+ }
+
+ request_data = ioc->diag_buffer[buffer_type];
+ if (!request_data) {
+ pr_err(MPT3SAS_FMT
+ "%s: doesn't have memory allocated for buffer_type(0x%02x)\n",
+ ioc->name, __func__, buffer_type);
+ return -ENOMEM;
+ }
+
+ request_data_sz = ioc->diag_buffer_sz[buffer_type];
+ request_data_dma = ioc->diag_buffer_dma[buffer_type];
+ pci_free_consistent(ioc->pdev, request_data_sz,
+ request_data, request_data_dma);
+ ioc->diag_buffer[buffer_type] = NULL;
+ ioc->diag_buffer_status[buffer_type] = 0;
+ return 0;
+}
+
+/**
+ * _ctl_diag_query - query relevant info associated with diag buffers
+ * @ioc: per adapter object
+ * @arg - user space buffer containing ioctl content
+ *
+ * The application will send only buffer_type and unique_id. Driver will
+ * inspect unique_id first, if valid, fill in all the info. If unique_id is
+ * 0x00, the driver will return info specified by Buffer Type.
+ */
+static long
+_ctl_diag_query(struct MPT3SAS_ADAPTER *ioc, void __user *arg)
+{
+ struct mpt3_diag_query karg;
+ void *request_data;
+ int i;
+ u8 buffer_type;
+
+ if (copy_from_user(&karg, arg, sizeof(karg))) {
+ pr_err("failure at %s:%d/%s()!\n",
+ __FILE__, __LINE__, __func__);
+ return -EFAULT;
+ }
+
+ dctlprintk(ioc, pr_info(MPT3SAS_FMT "%s\n", ioc->name,
+ __func__));
+
+ karg.application_flags = 0;
+ buffer_type = karg.buffer_type;
+
+ if (!_ctl_diag_capability(ioc, buffer_type)) {
+ pr_err(MPT3SAS_FMT
+ "%s: doesn't have capability for buffer_type(0x%02x)\n",
+ ioc->name, __func__, buffer_type);
+ return -EPERM;
+ }
+
+ if ((ioc->diag_buffer_status[buffer_type] &
+ MPT3_DIAG_BUFFER_IS_REGISTERED) == 0) {
+ pr_err(MPT3SAS_FMT
+ "%s: buffer_type(0x%02x) is not registered\n",
+ ioc->name, __func__, buffer_type);
+ return -EINVAL;
+ }
+
+ if (karg.unique_id & 0xffffff00) {
+ if (karg.unique_id != ioc->unique_id[buffer_type]) {
+ pr_err(MPT3SAS_FMT
+ "%s: unique_id(0x%08x) is not registered\n",
+ ioc->name, __func__, karg.unique_id);
+ return -EINVAL;
+ }
+ }
+
+ request_data = ioc->diag_buffer[buffer_type];
+ if (!request_data) {
+ pr_err(MPT3SAS_FMT
+ "%s: doesn't have buffer for buffer_type(0x%02x)\n",
+ ioc->name, __func__, buffer_type);
+ return -ENOMEM;
+ }
+
+ if (ioc->diag_buffer_status[buffer_type] & MPT3_DIAG_BUFFER_IS_RELEASED)
+ karg.application_flags = (MPT3_APP_FLAGS_APP_OWNED |
+ MPT3_APP_FLAGS_BUFFER_VALID);
+ else
+ karg.application_flags = (MPT3_APP_FLAGS_APP_OWNED |
+ MPT3_APP_FLAGS_BUFFER_VALID |
+ MPT3_APP_FLAGS_FW_BUFFER_ACCESS);
+
+ for (i = 0; i < MPT3_PRODUCT_SPECIFIC_DWORDS; i++)
+ karg.product_specific[i] =
+ ioc->product_specific[buffer_type][i];
+
+ karg.total_buffer_size = ioc->diag_buffer_sz[buffer_type];
+ karg.driver_added_buffer_size = 0;
+ karg.unique_id = ioc->unique_id[buffer_type];
+ karg.diagnostic_flags = ioc->diagnostic_flags[buffer_type];
+
+ if (copy_to_user(arg, &karg, sizeof(struct mpt3_diag_query))) {
+ pr_err(MPT3SAS_FMT
+ "%s: unable to write mpt3_diag_query data @ %p\n",
+ ioc->name, __func__, arg);
+ return -EFAULT;
+ }
+ return 0;
+}
+
+/**
+ * mpt3sas_send_diag_release - Diag Release Message
+ * @ioc: per adapter object
+ * @buffer_type - specifies either TRACE, SNAPSHOT, or EXTENDED
+ * @issue_reset - specifies whether host reset is required.
+ *
+ */
+int
+mpt3sas_send_diag_release(struct MPT3SAS_ADAPTER *ioc, u8 buffer_type,
+ u8 *issue_reset)
+{
+ Mpi2DiagReleaseRequest_t *mpi_request;
+ Mpi2DiagReleaseReply_t *mpi_reply;
+ u16 smid;
+ u16 ioc_status;
+ u32 ioc_state;
+ int rc;
+ unsigned long timeleft;
+
+ dctlprintk(ioc, pr_info(MPT3SAS_FMT "%s\n", ioc->name,
+ __func__));
+
+ rc = 0;
+ *issue_reset = 0;
+
+ ioc_state = mpt3sas_base_get_iocstate(ioc, 1);
+ if (ioc_state != MPI2_IOC_STATE_OPERATIONAL) {
+ if (ioc->diag_buffer_status[buffer_type] &
+ MPT3_DIAG_BUFFER_IS_REGISTERED)
+ ioc->diag_buffer_status[buffer_type] |=
+ MPT3_DIAG_BUFFER_IS_RELEASED;
+ dctlprintk(ioc, pr_info(MPT3SAS_FMT
+ "%s: skipping due to FAULT state\n", ioc->name,
+ __func__));
+ rc = -EAGAIN;
+ goto out;
+ }
+
+ if (ioc->ctl_cmds.status != MPT3_CMD_NOT_USED) {
+ pr_err(MPT3SAS_FMT "%s: ctl_cmd in use\n",
+ ioc->name, __func__);
+ rc = -EAGAIN;
+ goto out;
+ }
+
+ smid = mpt3sas_base_get_smid(ioc, ioc->ctl_cb_idx);
+ if (!smid) {
+ pr_err(MPT3SAS_FMT "%s: failed obtaining a smid\n",
+ ioc->name, __func__);
+ rc = -EAGAIN;
+ goto out;
+ }
+
+ ioc->ctl_cmds.status = MPT3_CMD_PENDING;
+ memset(ioc->ctl_cmds.reply, 0, ioc->reply_sz);
+ mpi_request = mpt3sas_base_get_msg_frame(ioc, smid);
+ ioc->ctl_cmds.smid = smid;
+
+ mpi_request->Function = MPI2_FUNCTION_DIAG_RELEASE;
+ mpi_request->BufferType = buffer_type;
+ mpi_request->VF_ID = 0; /* TODO */
+ mpi_request->VP_ID = 0;
+
+ init_completion(&ioc->ctl_cmds.done);
+ mpt3sas_base_put_smid_default(ioc, smid);
+ timeleft = wait_for_completion_timeout(&ioc->ctl_cmds.done,
+ MPT3_IOCTL_DEFAULT_TIMEOUT*HZ);
+
+ if (!(ioc->ctl_cmds.status & MPT3_CMD_COMPLETE)) {
+ pr_err(MPT3SAS_FMT "%s: timeout\n", ioc->name,
+ __func__);
+ _debug_dump_mf(mpi_request,
+ sizeof(Mpi2DiagReleaseRequest_t)/4);
+ if (!(ioc->ctl_cmds.status & MPT3_CMD_RESET))
+ *issue_reset = 1;
+ rc = -EFAULT;
+ goto out;
+ }
+
+ /* process the completed Reply Message Frame */
+ if ((ioc->ctl_cmds.status & MPT3_CMD_REPLY_VALID) == 0) {
+ pr_err(MPT3SAS_FMT "%s: no reply message\n",
+ ioc->name, __func__);
+ rc = -EFAULT;
+ goto out;
+ }
+
+ mpi_reply = ioc->ctl_cmds.reply;
+ ioc_status = le16_to_cpu(mpi_reply->IOCStatus) & MPI2_IOCSTATUS_MASK;
+
+ if (ioc_status == MPI2_IOCSTATUS_SUCCESS) {
+ ioc->diag_buffer_status[buffer_type] |=
+ MPT3_DIAG_BUFFER_IS_RELEASED;
+ dctlprintk(ioc, pr_info(MPT3SAS_FMT "%s: success\n",
+ ioc->name, __func__));
+ } else {
+ pr_info(MPT3SAS_FMT
+ "%s: ioc_status(0x%04x) log_info(0x%08x)\n",
+ ioc->name, __func__,
+ ioc_status, le32_to_cpu(mpi_reply->IOCLogInfo));
+ rc = -EFAULT;
+ }
+
+ out:
+ ioc->ctl_cmds.status = MPT3_CMD_NOT_USED;
+ return rc;
+}
+
+/**
+ * _ctl_diag_release - request to send Diag Release Message to firmware
+ * @arg - user space buffer containing ioctl content
+ *
+ * This allows ownership of the specified buffer to returned to the driver,
+ * allowing an application to read the buffer without fear that firmware is
+ * overwritting information in the buffer.
+ */
+static long
+_ctl_diag_release(struct MPT3SAS_ADAPTER *ioc, void __user *arg)
+{
+ struct mpt3_diag_release karg;
+ void *request_data;
+ int rc;
+ u8 buffer_type;
+ u8 issue_reset = 0;
+
+ if (copy_from_user(&karg, arg, sizeof(karg))) {
+ pr_err("failure at %s:%d/%s()!\n",
+ __FILE__, __LINE__, __func__);
+ return -EFAULT;
+ }
+
+ dctlprintk(ioc, pr_info(MPT3SAS_FMT "%s\n", ioc->name,
+ __func__));
+
+ buffer_type = karg.unique_id & 0x000000ff;
+ if (!_ctl_diag_capability(ioc, buffer_type)) {
+ pr_err(MPT3SAS_FMT
+ "%s: doesn't have capability for buffer_type(0x%02x)\n",
+ ioc->name, __func__, buffer_type);
+ return -EPERM;
+ }
+
+ if ((ioc->diag_buffer_status[buffer_type] &
+ MPT3_DIAG_BUFFER_IS_REGISTERED) == 0) {
+ pr_err(MPT3SAS_FMT
+ "%s: buffer_type(0x%02x) is not registered\n",
+ ioc->name, __func__, buffer_type);
+ return -EINVAL;
+ }
+
+ if (karg.unique_id != ioc->unique_id[buffer_type]) {
+ pr_err(MPT3SAS_FMT
+ "%s: unique_id(0x%08x) is not registered\n",
+ ioc->name, __func__, karg.unique_id);
+ return -EINVAL;
+ }
+
+ if (ioc->diag_buffer_status[buffer_type] &
+ MPT3_DIAG_BUFFER_IS_RELEASED) {
+ pr_err(MPT3SAS_FMT
+ "%s: buffer_type(0x%02x) is already released\n",
+ ioc->name, __func__,
+ buffer_type);
+ return 0;
+ }
+
+ request_data = ioc->diag_buffer[buffer_type];
+
+ if (!request_data) {
+ pr_err(MPT3SAS_FMT
+ "%s: doesn't have memory allocated for buffer_type(0x%02x)\n",
+ ioc->name, __func__, buffer_type);
+ return -ENOMEM;
+ }
+
+ /* buffers were released by due to host reset */
+ if ((ioc->diag_buffer_status[buffer_type] &
+ MPT3_DIAG_BUFFER_IS_DIAG_RESET)) {
+ ioc->diag_buffer_status[buffer_type] |=
+ MPT3_DIAG_BUFFER_IS_RELEASED;
+ ioc->diag_buffer_status[buffer_type] &=
+ ~MPT3_DIAG_BUFFER_IS_DIAG_RESET;
+ pr_err(MPT3SAS_FMT
+ "%s: buffer_type(0x%02x) was released due to host reset\n",
+ ioc->name, __func__, buffer_type);
+ return 0;
+ }
+
+ rc = mpt3sas_send_diag_release(ioc, buffer_type, &issue_reset);
+
+ if (issue_reset)
+ mpt3sas_base_hard_reset_handler(ioc, CAN_SLEEP,
+ FORCE_BIG_HAMMER);
+
+ return rc;
+}
+
+/**
+ * _ctl_diag_read_buffer - request for copy of the diag buffer
+ * @ioc: per adapter object
+ * @arg - user space buffer containing ioctl content
+ */
+static long
+_ctl_diag_read_buffer(struct MPT3SAS_ADAPTER *ioc, void __user *arg)
+{
+ struct mpt3_diag_read_buffer karg;
+ struct mpt3_diag_read_buffer __user *uarg = arg;
+ void *request_data, *diag_data;
+ Mpi2DiagBufferPostRequest_t *mpi_request;
+ Mpi2DiagBufferPostReply_t *mpi_reply;
+ int rc, i;
+ u8 buffer_type;
+ unsigned long timeleft, request_size, copy_size;
+ u16 smid;
+ u16 ioc_status;
+ u8 issue_reset = 0;
+
+ if (copy_from_user(&karg, arg, sizeof(karg))) {
+ pr_err("failure at %s:%d/%s()!\n",
+ __FILE__, __LINE__, __func__);
+ return -EFAULT;
+ }
+
+ dctlprintk(ioc, pr_info(MPT3SAS_FMT "%s\n", ioc->name,
+ __func__));
+
+ buffer_type = karg.unique_id & 0x000000ff;
+ if (!_ctl_diag_capability(ioc, buffer_type)) {
+ pr_err(MPT3SAS_FMT
+ "%s: doesn't have capability for buffer_type(0x%02x)\n",
+ ioc->name, __func__, buffer_type);
+ return -EPERM;
+ }
+
+ if (karg.unique_id != ioc->unique_id[buffer_type]) {
+ pr_err(MPT3SAS_FMT
+ "%s: unique_id(0x%08x) is not registered\n",
+ ioc->name, __func__, karg.unique_id);
+ return -EINVAL;
+ }
+
+ request_data = ioc->diag_buffer[buffer_type];
+ if (!request_data) {
+ pr_err(MPT3SAS_FMT
+ "%s: doesn't have buffer for buffer_type(0x%02x)\n",
+ ioc->name, __func__, buffer_type);
+ return -ENOMEM;
+ }
+
+ request_size = ioc->diag_buffer_sz[buffer_type];
+
+ if ((karg.starting_offset % 4) || (karg.bytes_to_read % 4)) {
+ pr_err(MPT3SAS_FMT "%s: either the starting_offset " \
+ "or bytes_to_read are not 4 byte aligned\n", ioc->name,
+ __func__);
+ return -EINVAL;
+ }
+
+ if (karg.starting_offset > request_size)
+ return -EINVAL;
+
+ diag_data = (void *)(request_data + karg.starting_offset);
+ dctlprintk(ioc, pr_info(MPT3SAS_FMT
+ "%s: diag_buffer(%p), offset(%d), sz(%d)\n",
+ ioc->name, __func__,
+ diag_data, karg.starting_offset, karg.bytes_to_read));
+
+ /* Truncate data on requests that are too large */
+ if ((diag_data + karg.bytes_to_read < diag_data) ||
+ (diag_data + karg.bytes_to_read > request_data + request_size))
+ copy_size = request_size - karg.starting_offset;
+ else
+ copy_size = karg.bytes_to_read;
+
+ if (copy_to_user((void __user *)uarg->diagnostic_data,
+ diag_data, copy_size)) {
+ pr_err(MPT3SAS_FMT
+ "%s: Unable to write mpt_diag_read_buffer_t data @ %p\n",
+ ioc->name, __func__, diag_data);
+ return -EFAULT;
+ }
+
+ if ((karg.flags & MPT3_FLAGS_REREGISTER) == 0)
+ return 0;
+
+ dctlprintk(ioc, pr_info(MPT3SAS_FMT
+ "%s: Reregister buffer_type(0x%02x)\n",
+ ioc->name, __func__, buffer_type));
+ if ((ioc->diag_buffer_status[buffer_type] &
+ MPT3_DIAG_BUFFER_IS_RELEASED) == 0) {
+ dctlprintk(ioc, pr_info(MPT3SAS_FMT
+ "%s: buffer_type(0x%02x) is still registered\n",
+ ioc->name, __func__, buffer_type));
+ return 0;
+ }
+ /* Get a free request frame and save the message context.
+ */
+
+ if (ioc->ctl_cmds.status != MPT3_CMD_NOT_USED) {
+ pr_err(MPT3SAS_FMT "%s: ctl_cmd in use\n",
+ ioc->name, __func__);
+ rc = -EAGAIN;
+ goto out;
+ }
+
+ smid = mpt3sas_base_get_smid(ioc, ioc->ctl_cb_idx);
+ if (!smid) {
+ pr_err(MPT3SAS_FMT "%s: failed obtaining a smid\n",
+ ioc->name, __func__);
+ rc = -EAGAIN;
+ goto out;
+ }
+
+ rc = 0;
+ ioc->ctl_cmds.status = MPT3_CMD_PENDING;
+ memset(ioc->ctl_cmds.reply, 0, ioc->reply_sz);
+ mpi_request = mpt3sas_base_get_msg_frame(ioc, smid);
+ ioc->ctl_cmds.smid = smid;
+
+ mpi_request->Function = MPI2_FUNCTION_DIAG_BUFFER_POST;
+ mpi_request->BufferType = buffer_type;
+ mpi_request->BufferLength =
+ cpu_to_le32(ioc->diag_buffer_sz[buffer_type]);
+ mpi_request->BufferAddress =
+ cpu_to_le64(ioc->diag_buffer_dma[buffer_type]);
+ for (i = 0; i < MPT3_PRODUCT_SPECIFIC_DWORDS; i++)
+ mpi_request->ProductSpecific[i] =
+ cpu_to_le32(ioc->product_specific[buffer_type][i]);
+ mpi_request->VF_ID = 0; /* TODO */
+ mpi_request->VP_ID = 0;
+
+ init_completion(&ioc->ctl_cmds.done);
+ mpt3sas_base_put_smid_default(ioc, smid);
+ timeleft = wait_for_completion_timeout(&ioc->ctl_cmds.done,
+ MPT3_IOCTL_DEFAULT_TIMEOUT*HZ);
+
+ if (!(ioc->ctl_cmds.status & MPT3_CMD_COMPLETE)) {
+ pr_err(MPT3SAS_FMT "%s: timeout\n", ioc->name,
+ __func__);
+ _debug_dump_mf(mpi_request,
+ sizeof(Mpi2DiagBufferPostRequest_t)/4);
+ if (!(ioc->ctl_cmds.status & MPT3_CMD_RESET))
+ issue_reset = 1;
+ goto issue_host_reset;
+ }
+
+ /* process the completed Reply Message Frame */
+ if ((ioc->ctl_cmds.status & MPT3_CMD_REPLY_VALID) == 0) {
+ pr_err(MPT3SAS_FMT "%s: no reply message\n",
+ ioc->name, __func__);
+ rc = -EFAULT;
+ goto out;
+ }
+
+ mpi_reply = ioc->ctl_cmds.reply;
+ ioc_status = le16_to_cpu(mpi_reply->IOCStatus) & MPI2_IOCSTATUS_MASK;
+
+ if (ioc_status == MPI2_IOCSTATUS_SUCCESS) {
+ ioc->diag_buffer_status[buffer_type] |=
+ MPT3_DIAG_BUFFER_IS_REGISTERED;
+ dctlprintk(ioc, pr_info(MPT3SAS_FMT "%s: success\n",
+ ioc->name, __func__));
+ } else {
+ pr_info(MPT3SAS_FMT
+ "%s: ioc_status(0x%04x) log_info(0x%08x)\n",
+ ioc->name, __func__,
+ ioc_status, le32_to_cpu(mpi_reply->IOCLogInfo));
+ rc = -EFAULT;
+ }
+
+ issue_host_reset:
+ if (issue_reset)
+ mpt3sas_base_hard_reset_handler(ioc, CAN_SLEEP,
+ FORCE_BIG_HAMMER);
+
+ out:
+
+ ioc->ctl_cmds.status = MPT3_CMD_NOT_USED;
+ return rc;
+}
+
+
+
+#ifdef CONFIG_COMPAT
+/**
+ * _ctl_compat_mpt_command - convert 32bit pointers to 64bit.
+ * @ioc: per adapter object
+ * @cmd - ioctl opcode
+ * @arg - (struct mpt3_ioctl_command32)
+ *
+ * MPT3COMMAND32 - Handle 32bit applications running on 64bit os.
+ */
+static long
+_ctl_compat_mpt_command(struct MPT3SAS_ADAPTER *ioc, unsigned cmd,
+ void __user *arg)
+{
+ struct mpt3_ioctl_command32 karg32;
+ struct mpt3_ioctl_command32 __user *uarg;
+ struct mpt3_ioctl_command karg;
+
+ if (_IOC_SIZE(cmd) != sizeof(struct mpt3_ioctl_command32))
+ return -EINVAL;
+
+ uarg = (struct mpt3_ioctl_command32 __user *) arg;
+
+ if (copy_from_user(&karg32, (char __user *)arg, sizeof(karg32))) {
+ pr_err("failure at %s:%d/%s()!\n",
+ __FILE__, __LINE__, __func__);
+ return -EFAULT;
+ }
+
+ memset(&karg, 0, sizeof(struct mpt3_ioctl_command));
+ karg.hdr.ioc_number = karg32.hdr.ioc_number;
+ karg.hdr.port_number = karg32.hdr.port_number;
+ karg.hdr.max_data_size = karg32.hdr.max_data_size;
+ karg.timeout = karg32.timeout;
+ karg.max_reply_bytes = karg32.max_reply_bytes;
+ karg.data_in_size = karg32.data_in_size;
+ karg.data_out_size = karg32.data_out_size;
+ karg.max_sense_bytes = karg32.max_sense_bytes;
+ karg.data_sge_offset = karg32.data_sge_offset;
+ karg.reply_frame_buf_ptr = compat_ptr(karg32.reply_frame_buf_ptr);
+ karg.data_in_buf_ptr = compat_ptr(karg32.data_in_buf_ptr);
+ karg.data_out_buf_ptr = compat_ptr(karg32.data_out_buf_ptr);
+ karg.sense_data_ptr = compat_ptr(karg32.sense_data_ptr);
+ return _ctl_do_mpt_command(ioc, karg, &uarg->mf);
+}
+#endif
+
+/**
+ * _ctl_ioctl_main - main ioctl entry point
+ * @file - (struct file)
+ * @cmd - ioctl opcode
+ * @arg -
+ * compat - handles 32 bit applications in 64bit os
+ */
+static long
+_ctl_ioctl_main(struct file *file, unsigned int cmd, void __user *arg,
+ u8 compat)
+{
+ struct MPT3SAS_ADAPTER *ioc;
+ struct mpt3_ioctl_header ioctl_header;
+ enum block_state state;
+ long ret = -EINVAL;
+
+ /* get IOCTL header */
+ if (copy_from_user(&ioctl_header, (char __user *)arg,
+ sizeof(struct mpt3_ioctl_header))) {
+ pr_err("failure at %s:%d/%s()!\n",
+ __FILE__, __LINE__, __func__);
+ return -EFAULT;
+ }
+
+ if (_ctl_verify_adapter(ioctl_header.ioc_number, &ioc) == -1 || !ioc)
+ return -ENODEV;
+
+ if (ioc->shost_recovery || ioc->pci_error_recovery ||
+ ioc->is_driver_loading)
+ return -EAGAIN;
+
+ state = (file->f_flags & O_NONBLOCK) ? NON_BLOCKING : BLOCKING;
+ if (state == NON_BLOCKING) {
+ if (!mutex_trylock(&ioc->ctl_cmds.mutex))
+ return -EAGAIN;
+ } else if (mutex_lock_interruptible(&ioc->ctl_cmds.mutex))
+ return -ERESTARTSYS;
+
+
+ switch (cmd) {
+ case MPT3IOCINFO:
+ if (_IOC_SIZE(cmd) == sizeof(struct mpt3_ioctl_iocinfo))
+ ret = _ctl_getiocinfo(ioc, arg);
+ break;
+#ifdef CONFIG_COMPAT
+ case MPT3COMMAND32:
+#endif
+ case MPT3COMMAND:
+ {
+ struct mpt3_ioctl_command __user *uarg;
+ struct mpt3_ioctl_command karg;
+
+#ifdef CONFIG_COMPAT
+ if (compat) {
+ ret = _ctl_compat_mpt_command(ioc, cmd, arg);
+ break;
+ }
+#endif
+ if (copy_from_user(&karg, arg, sizeof(karg))) {
+ pr_err("failure at %s:%d/%s()!\n",
+ __FILE__, __LINE__, __func__);
+ ret = -EFAULT;
+ break;
+ }
+
+ if (_IOC_SIZE(cmd) == sizeof(struct mpt3_ioctl_command)) {
+ uarg = arg;
+ ret = _ctl_do_mpt_command(ioc, karg, &uarg->mf);
+ }
+ break;
+ }
+ case MPT3EVENTQUERY:
+ if (_IOC_SIZE(cmd) == sizeof(struct mpt3_ioctl_eventquery))
+ ret = _ctl_eventquery(ioc, arg);
+ break;
+ case MPT3EVENTENABLE:
+ if (_IOC_SIZE(cmd) == sizeof(struct mpt3_ioctl_eventenable))
+ ret = _ctl_eventenable(ioc, arg);
+ break;
+ case MPT3EVENTREPORT:
+ ret = _ctl_eventreport(ioc, arg);
+ break;
+ case MPT3HARDRESET:
+ if (_IOC_SIZE(cmd) == sizeof(struct mpt3_ioctl_diag_reset))
+ ret = _ctl_do_reset(ioc, arg);
+ break;
+ case MPT3BTDHMAPPING:
+ if (_IOC_SIZE(cmd) == sizeof(struct mpt3_ioctl_btdh_mapping))
+ ret = _ctl_btdh_mapping(ioc, arg);
+ break;
+ case MPT3DIAGREGISTER:
+ if (_IOC_SIZE(cmd) == sizeof(struct mpt3_diag_register))
+ ret = _ctl_diag_register(ioc, arg);
+ break;
+ case MPT3DIAGUNREGISTER:
+ if (_IOC_SIZE(cmd) == sizeof(struct mpt3_diag_unregister))
+ ret = _ctl_diag_unregister(ioc, arg);
+ break;
+ case MPT3DIAGQUERY:
+ if (_IOC_SIZE(cmd) == sizeof(struct mpt3_diag_query))
+ ret = _ctl_diag_query(ioc, arg);
+ break;
+ case MPT3DIAGRELEASE:
+ if (_IOC_SIZE(cmd) == sizeof(struct mpt3_diag_release))
+ ret = _ctl_diag_release(ioc, arg);
+ break;
+ case MPT3DIAGREADBUFFER:
+ if (_IOC_SIZE(cmd) == sizeof(struct mpt3_diag_read_buffer))
+ ret = _ctl_diag_read_buffer(ioc, arg);
+ break;
+ default:
+ dctlprintk(ioc, pr_info(MPT3SAS_FMT
+ "unsupported ioctl opcode(0x%08x)\n", ioc->name, cmd));
+ break;
+ }
+
+ mutex_unlock(&ioc->ctl_cmds.mutex);
+ return ret;
+}
+
+/**
+ * _ctl_ioctl - main ioctl entry point (unlocked)
+ * @file - (struct file)
+ * @cmd - ioctl opcode
+ * @arg -
+ */
+static long
+_ctl_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
+{
+ long ret;
+
+ ret = _ctl_ioctl_main(file, cmd, (void __user *)arg, 0);
+ return ret;
+}
+
+#ifdef CONFIG_COMPAT
+/**
+ * _ctl_ioctl_compat - main ioctl entry point (compat)
+ * @file -
+ * @cmd -
+ * @arg -
+ *
+ * This routine handles 32 bit applications in 64bit os.
+ */
+static long
+_ctl_ioctl_compat(struct file *file, unsigned cmd, unsigned long arg)
+{
+ long ret;
+
+ ret = _ctl_ioctl_main(file, cmd, (void __user *)arg, 1);
+ return ret;
+}
+#endif
+
+/* scsi host attributes */
+/**
+ * _ctl_version_fw_show - firmware version
+ * @cdev - pointer to embedded class device
+ * @buf - the buffer returned
+ *
+ * A sysfs 'read-only' shost attribute.
+ */
+static ssize_t
+_ctl_version_fw_show(struct device *cdev, struct device_attribute *attr,
+ char *buf)
+{
+ struct Scsi_Host *shost = class_to_shost(cdev);
+ struct MPT3SAS_ADAPTER *ioc = shost_priv(shost);
+
+ return snprintf(buf, PAGE_SIZE, "%02d.%02d.%02d.%02d\n",
+ (ioc->facts.FWVersion.Word & 0xFF000000) >> 24,
+ (ioc->facts.FWVersion.Word & 0x00FF0000) >> 16,
+ (ioc->facts.FWVersion.Word & 0x0000FF00) >> 8,
+ ioc->facts.FWVersion.Word & 0x000000FF);
+}
+static DEVICE_ATTR(version_fw, S_IRUGO, _ctl_version_fw_show, NULL);
+
+/**
+ * _ctl_version_bios_show - bios version
+ * @cdev - pointer to embedded class device
+ * @buf - the buffer returned
+ *
+ * A sysfs 'read-only' shost attribute.
+ */
+static ssize_t
+_ctl_version_bios_show(struct device *cdev, struct device_attribute *attr,
+ char *buf)
+{
+ struct Scsi_Host *shost = class_to_shost(cdev);
+ struct MPT3SAS_ADAPTER *ioc = shost_priv(shost);
+
+ u32 version = le32_to_cpu(ioc->bios_pg3.BiosVersion);
+
+ return snprintf(buf, PAGE_SIZE, "%02d.%02d.%02d.%02d\n",
+ (version & 0xFF000000) >> 24,
+ (version & 0x00FF0000) >> 16,
+ (version & 0x0000FF00) >> 8,
+ version & 0x000000FF);
+}
+static DEVICE_ATTR(version_bios, S_IRUGO, _ctl_version_bios_show, NULL);
+
+/**
+ * _ctl_version_mpi_show - MPI (message passing interface) version
+ * @cdev - pointer to embedded class device
+ * @buf - the buffer returned
+ *
+ * A sysfs 'read-only' shost attribute.
+ */
+static ssize_t
+_ctl_version_mpi_show(struct device *cdev, struct device_attribute *attr,
+ char *buf)
+{
+ struct Scsi_Host *shost = class_to_shost(cdev);
+ struct MPT3SAS_ADAPTER *ioc = shost_priv(shost);
+
+ return snprintf(buf, PAGE_SIZE, "%03x.%02x\n",
+ ioc->facts.MsgVersion, ioc->facts.HeaderVersion >> 8);
+}
+static DEVICE_ATTR(version_mpi, S_IRUGO, _ctl_version_mpi_show, NULL);
+
+/**
+ * _ctl_version_product_show - product name
+ * @cdev - pointer to embedded class device
+ * @buf - the buffer returned
+ *
+ * A sysfs 'read-only' shost attribute.
+ */
+static ssize_t
+_ctl_version_product_show(struct device *cdev, struct device_attribute *attr,
+ char *buf)
+{
+ struct Scsi_Host *shost = class_to_shost(cdev);
+ struct MPT3SAS_ADAPTER *ioc = shost_priv(shost);
+
+ return snprintf(buf, 16, "%s\n", ioc->manu_pg0.ChipName);
+}
+static DEVICE_ATTR(version_product, S_IRUGO, _ctl_version_product_show, NULL);
+
+/**
+ * _ctl_version_nvdata_persistent_show - ndvata persistent version
+ * @cdev - pointer to embedded class device
+ * @buf - the buffer returned
+ *
+ * A sysfs 'read-only' shost attribute.
+ */
+static ssize_t
+_ctl_version_nvdata_persistent_show(struct device *cdev,
+ struct device_attribute *attr, char *buf)
+{
+ struct Scsi_Host *shost = class_to_shost(cdev);
+ struct MPT3SAS_ADAPTER *ioc = shost_priv(shost);
+
+ return snprintf(buf, PAGE_SIZE, "%08xh\n",
+ le32_to_cpu(ioc->iounit_pg0.NvdataVersionPersistent.Word));
+}
+static DEVICE_ATTR(version_nvdata_persistent, S_IRUGO,
+ _ctl_version_nvdata_persistent_show, NULL);
+
+/**
+ * _ctl_version_nvdata_default_show - nvdata default version
+ * @cdev - pointer to embedded class device
+ * @buf - the buffer returned
+ *
+ * A sysfs 'read-only' shost attribute.
+ */
+static ssize_t
+_ctl_version_nvdata_default_show(struct device *cdev, struct device_attribute
+ *attr, char *buf)
+{
+ struct Scsi_Host *shost = class_to_shost(cdev);
+ struct MPT3SAS_ADAPTER *ioc = shost_priv(shost);
+
+ return snprintf(buf, PAGE_SIZE, "%08xh\n",
+ le32_to_cpu(ioc->iounit_pg0.NvdataVersionDefault.Word));
+}
+static DEVICE_ATTR(version_nvdata_default, S_IRUGO,
+ _ctl_version_nvdata_default_show, NULL);
+
+/**
+ * _ctl_board_name_show - board name
+ * @cdev - pointer to embedded class device
+ * @buf - the buffer returned
+ *
+ * A sysfs 'read-only' shost attribute.
+ */
+static ssize_t
+_ctl_board_name_show(struct device *cdev, struct device_attribute *attr,
+ char *buf)
+{
+ struct Scsi_Host *shost = class_to_shost(cdev);
+ struct MPT3SAS_ADAPTER *ioc = shost_priv(shost);
+
+ return snprintf(buf, 16, "%s\n", ioc->manu_pg0.BoardName);
+}
+static DEVICE_ATTR(board_name, S_IRUGO, _ctl_board_name_show, NULL);
+
+/**
+ * _ctl_board_assembly_show - board assembly name
+ * @cdev - pointer to embedded class device
+ * @buf - the buffer returned
+ *
+ * A sysfs 'read-only' shost attribute.
+ */
+static ssize_t
+_ctl_board_assembly_show(struct device *cdev, struct device_attribute *attr,
+ char *buf)
+{
+ struct Scsi_Host *shost = class_to_shost(cdev);
+ struct MPT3SAS_ADAPTER *ioc = shost_priv(shost);
+
+ return snprintf(buf, 16, "%s\n", ioc->manu_pg0.BoardAssembly);
+}
+static DEVICE_ATTR(board_assembly, S_IRUGO, _ctl_board_assembly_show, NULL);
+
+/**
+ * _ctl_board_tracer_show - board tracer number
+ * @cdev - pointer to embedded class device
+ * @buf - the buffer returned
+ *
+ * A sysfs 'read-only' shost attribute.
+ */
+static ssize_t
+_ctl_board_tracer_show(struct device *cdev, struct device_attribute *attr,
+ char *buf)
+{
+ struct Scsi_Host *shost = class_to_shost(cdev);
+ struct MPT3SAS_ADAPTER *ioc = shost_priv(shost);
+
+ return snprintf(buf, 16, "%s\n", ioc->manu_pg0.BoardTracerNumber);
+}
+static DEVICE_ATTR(board_tracer, S_IRUGO, _ctl_board_tracer_show, NULL);
+
+/**
+ * _ctl_io_delay_show - io missing delay
+ * @cdev - pointer to embedded class device
+ * @buf - the buffer returned
+ *
+ * This is for firmware implemention for deboucing device
+ * removal events.
+ *
+ * A sysfs 'read-only' shost attribute.
+ */
+static ssize_t
+_ctl_io_delay_show(struct device *cdev, struct device_attribute *attr,
+ char *buf)
+{
+ struct Scsi_Host *shost = class_to_shost(cdev);
+ struct MPT3SAS_ADAPTER *ioc = shost_priv(shost);
+
+ return snprintf(buf, PAGE_SIZE, "%02d\n", ioc->io_missing_delay);
+}
+static DEVICE_ATTR(io_delay, S_IRUGO, _ctl_io_delay_show, NULL);
+
+/**
+ * _ctl_device_delay_show - device missing delay
+ * @cdev - pointer to embedded class device
+ * @buf - the buffer returned
+ *
+ * This is for firmware implemention for deboucing device
+ * removal events.
+ *
+ * A sysfs 'read-only' shost attribute.
+ */
+static ssize_t
+_ctl_device_delay_show(struct device *cdev, struct device_attribute *attr,
+ char *buf)
+{
+ struct Scsi_Host *shost = class_to_shost(cdev);
+ struct MPT3SAS_ADAPTER *ioc = shost_priv(shost);
+
+ return snprintf(buf, PAGE_SIZE, "%02d\n", ioc->device_missing_delay);
+}
+static DEVICE_ATTR(device_delay, S_IRUGO, _ctl_device_delay_show, NULL);
+
+/**
+ * _ctl_fw_queue_depth_show - global credits
+ * @cdev - pointer to embedded class device
+ * @buf - the buffer returned
+ *
+ * This is firmware queue depth limit
+ *
+ * A sysfs 'read-only' shost attribute.
+ */
+static ssize_t
+_ctl_fw_queue_depth_show(struct device *cdev, struct device_attribute *attr,
+ char *buf)
+{
+ struct Scsi_Host *shost = class_to_shost(cdev);
+ struct MPT3SAS_ADAPTER *ioc = shost_priv(shost);
+
+ return snprintf(buf, PAGE_SIZE, "%02d\n", ioc->facts.RequestCredit);
+}
+static DEVICE_ATTR(fw_queue_depth, S_IRUGO, _ctl_fw_queue_depth_show, NULL);
+
+/**
+ * _ctl_sas_address_show - sas address
+ * @cdev - pointer to embedded class device
+ * @buf - the buffer returned
+ *
+ * This is the controller sas address
+ *
+ * A sysfs 'read-only' shost attribute.
+ */
+static ssize_t
+_ctl_host_sas_address_show(struct device *cdev, struct device_attribute *attr,
+ char *buf)
+
+{
+ struct Scsi_Host *shost = class_to_shost(cdev);
+ struct MPT3SAS_ADAPTER *ioc = shost_priv(shost);
+
+ return snprintf(buf, PAGE_SIZE, "0x%016llx\n",
+ (unsigned long long)ioc->sas_hba.sas_address);
+}
+static DEVICE_ATTR(host_sas_address, S_IRUGO,
+ _ctl_host_sas_address_show, NULL);
+
+/**
+ * _ctl_logging_level_show - logging level
+ * @cdev - pointer to embedded class device
+ * @buf - the buffer returned
+ *
+ * A sysfs 'read/write' shost attribute.
+ */
+static ssize_t
+_ctl_logging_level_show(struct device *cdev, struct device_attribute *attr,
+ char *buf)
+{
+ struct Scsi_Host *shost = class_to_shost(cdev);
+ struct MPT3SAS_ADAPTER *ioc = shost_priv(shost);
+
+ return snprintf(buf, PAGE_SIZE, "%08xh\n", ioc->logging_level);
+}
+static ssize_t
+_ctl_logging_level_store(struct device *cdev, struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct Scsi_Host *shost = class_to_shost(cdev);
+ struct MPT3SAS_ADAPTER *ioc = shost_priv(shost);
+ int val = 0;
+
+ if (sscanf(buf, "%x", &val) != 1)
+ return -EINVAL;
+
+ ioc->logging_level = val;
+ pr_info(MPT3SAS_FMT "logging_level=%08xh\n", ioc->name,
+ ioc->logging_level);
+ return strlen(buf);
+}
+static DEVICE_ATTR(logging_level, S_IRUGO | S_IWUSR, _ctl_logging_level_show,
+ _ctl_logging_level_store);
+
+/**
+ * _ctl_fwfault_debug_show - show/store fwfault_debug
+ * @cdev - pointer to embedded class device
+ * @buf - the buffer returned
+ *
+ * mpt3sas_fwfault_debug is command line option
+ * A sysfs 'read/write' shost attribute.
+ */
+static ssize_t
+_ctl_fwfault_debug_show(struct device *cdev, struct device_attribute *attr,
+ char *buf)
+{
+ struct Scsi_Host *shost = class_to_shost(cdev);
+ struct MPT3SAS_ADAPTER *ioc = shost_priv(shost);
+
+ return snprintf(buf, PAGE_SIZE, "%d\n", ioc->fwfault_debug);
+}
+static ssize_t
+_ctl_fwfault_debug_store(struct device *cdev, struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct Scsi_Host *shost = class_to_shost(cdev);
+ struct MPT3SAS_ADAPTER *ioc = shost_priv(shost);
+ int val = 0;
+
+ if (sscanf(buf, "%d", &val) != 1)
+ return -EINVAL;
+
+ ioc->fwfault_debug = val;
+ pr_info(MPT3SAS_FMT "fwfault_debug=%d\n", ioc->name,
+ ioc->fwfault_debug);
+ return strlen(buf);
+}
+static DEVICE_ATTR(fwfault_debug, S_IRUGO | S_IWUSR,
+ _ctl_fwfault_debug_show, _ctl_fwfault_debug_store);
+
+/**
+ * _ctl_ioc_reset_count_show - ioc reset count
+ * @cdev - pointer to embedded class device
+ * @buf - the buffer returned
+ *
+ * This is firmware queue depth limit
+ *
+ * A sysfs 'read-only' shost attribute.
+ */
+static ssize_t
+_ctl_ioc_reset_count_show(struct device *cdev, struct device_attribute *attr,
+ char *buf)
+{
+ struct Scsi_Host *shost = class_to_shost(cdev);
+ struct MPT3SAS_ADAPTER *ioc = shost_priv(shost);
+
+ return snprintf(buf, PAGE_SIZE, "%d\n", ioc->ioc_reset_count);
+}
+static DEVICE_ATTR(ioc_reset_count, S_IRUGO, _ctl_ioc_reset_count_show, NULL);
+
+/**
+ * _ctl_ioc_reply_queue_count_show - number of reply queues
+ * @cdev - pointer to embedded class device
+ * @buf - the buffer returned
+ *
+ * This is number of reply queues
+ *
+ * A sysfs 'read-only' shost attribute.
+ */
+static ssize_t
+_ctl_ioc_reply_queue_count_show(struct device *cdev,
+ struct device_attribute *attr, char *buf)
+{
+ u8 reply_queue_count;
+ struct Scsi_Host *shost = class_to_shost(cdev);
+ struct MPT3SAS_ADAPTER *ioc = shost_priv(shost);
+
+ if ((ioc->facts.IOCCapabilities &
+ MPI2_IOCFACTS_CAPABILITY_MSI_X_INDEX) && ioc->msix_enable)
+ reply_queue_count = ioc->reply_queue_count;
+ else
+ reply_queue_count = 1;
+
+ return snprintf(buf, PAGE_SIZE, "%d\n", reply_queue_count);
+}
+static DEVICE_ATTR(reply_queue_count, S_IRUGO, _ctl_ioc_reply_queue_count_show,
+ NULL);
+
+struct DIAG_BUFFER_START {
+ __le32 Size;
+ __le32 DiagVersion;
+ u8 BufferType;
+ u8 Reserved[3];
+ __le32 Reserved1;
+ __le32 Reserved2;
+ __le32 Reserved3;
+};
+
+/**
+ * _ctl_host_trace_buffer_size_show - host buffer size (trace only)
+ * @cdev - pointer to embedded class device
+ * @buf - the buffer returned
+ *
+ * A sysfs 'read-only' shost attribute.
+ */
+static ssize_t
+_ctl_host_trace_buffer_size_show(struct device *cdev,
+ struct device_attribute *attr, char *buf)
+{
+ struct Scsi_Host *shost = class_to_shost(cdev);
+ struct MPT3SAS_ADAPTER *ioc = shost_priv(shost);
+ u32 size = 0;
+ struct DIAG_BUFFER_START *request_data;
+
+ if (!ioc->diag_buffer[MPI2_DIAG_BUF_TYPE_TRACE]) {
+ pr_err(MPT3SAS_FMT
+ "%s: host_trace_buffer is not registered\n",
+ ioc->name, __func__);
+ return 0;
+ }
+
+ if ((ioc->diag_buffer_status[MPI2_DIAG_BUF_TYPE_TRACE] &
+ MPT3_DIAG_BUFFER_IS_REGISTERED) == 0) {
+ pr_err(MPT3SAS_FMT
+ "%s: host_trace_buffer is not registered\n",
+ ioc->name, __func__);
+ return 0;
+ }
+
+ request_data = (struct DIAG_BUFFER_START *)
+ ioc->diag_buffer[MPI2_DIAG_BUF_TYPE_TRACE];
+ if ((le32_to_cpu(request_data->DiagVersion) == 0x00000000 ||
+ le32_to_cpu(request_data->DiagVersion) == 0x01000000 ||
+ le32_to_cpu(request_data->DiagVersion) == 0x01010000) &&
+ le32_to_cpu(request_data->Reserved3) == 0x4742444c)
+ size = le32_to_cpu(request_data->Size);
+
+ ioc->ring_buffer_sz = size;
+ return snprintf(buf, PAGE_SIZE, "%d\n", size);
+}
+static DEVICE_ATTR(host_trace_buffer_size, S_IRUGO,
+ _ctl_host_trace_buffer_size_show, NULL);
+
+/**
+ * _ctl_host_trace_buffer_show - firmware ring buffer (trace only)
+ * @cdev - pointer to embedded class device
+ * @buf - the buffer returned
+ *
+ * A sysfs 'read/write' shost attribute.
+ *
+ * You will only be able to read 4k bytes of ring buffer at a time.
+ * In order to read beyond 4k bytes, you will have to write out the
+ * offset to the same attribute, it will move the pointer.
+ */
+static ssize_t
+_ctl_host_trace_buffer_show(struct device *cdev, struct device_attribute *attr,
+ char *buf)
+{
+ struct Scsi_Host *shost = class_to_shost(cdev);
+ struct MPT3SAS_ADAPTER *ioc = shost_priv(shost);
+ void *request_data;
+ u32 size;
+
+ if (!ioc->diag_buffer[MPI2_DIAG_BUF_TYPE_TRACE]) {
+ pr_err(MPT3SAS_FMT
+ "%s: host_trace_buffer is not registered\n",
+ ioc->name, __func__);
+ return 0;
+ }
+
+ if ((ioc->diag_buffer_status[MPI2_DIAG_BUF_TYPE_TRACE] &
+ MPT3_DIAG_BUFFER_IS_REGISTERED) == 0) {
+ pr_err(MPT3SAS_FMT
+ "%s: host_trace_buffer is not registered\n",
+ ioc->name, __func__);
+ return 0;
+ }
+
+ if (ioc->ring_buffer_offset > ioc->ring_buffer_sz)
+ return 0;
+
+ size = ioc->ring_buffer_sz - ioc->ring_buffer_offset;
+ size = (size >= PAGE_SIZE) ? (PAGE_SIZE - 1) : size;
+ request_data = ioc->diag_buffer[0] + ioc->ring_buffer_offset;
+ memcpy(buf, request_data, size);
+ return size;
+}
+
+static ssize_t
+_ctl_host_trace_buffer_store(struct device *cdev, struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct Scsi_Host *shost = class_to_shost(cdev);
+ struct MPT3SAS_ADAPTER *ioc = shost_priv(shost);
+ int val = 0;
+
+ if (sscanf(buf, "%d", &val) != 1)
+ return -EINVAL;
+
+ ioc->ring_buffer_offset = val;
+ return strlen(buf);
+}
+static DEVICE_ATTR(host_trace_buffer, S_IRUGO | S_IWUSR,
+ _ctl_host_trace_buffer_show, _ctl_host_trace_buffer_store);
+
+
+/*****************************************/
+
+/**
+ * _ctl_host_trace_buffer_enable_show - firmware ring buffer (trace only)
+ * @cdev - pointer to embedded class device
+ * @buf - the buffer returned
+ *
+ * A sysfs 'read/write' shost attribute.
+ *
+ * This is a mechnism to post/release host_trace_buffers
+ */
+static ssize_t
+_ctl_host_trace_buffer_enable_show(struct device *cdev,
+ struct device_attribute *attr, char *buf)
+{
+ struct Scsi_Host *shost = class_to_shost(cdev);
+ struct MPT3SAS_ADAPTER *ioc = shost_priv(shost);
+
+ if ((!ioc->diag_buffer[MPI2_DIAG_BUF_TYPE_TRACE]) ||
+ ((ioc->diag_buffer_status[MPI2_DIAG_BUF_TYPE_TRACE] &
+ MPT3_DIAG_BUFFER_IS_REGISTERED) == 0))
+ return snprintf(buf, PAGE_SIZE, "off\n");
+ else if ((ioc->diag_buffer_status[MPI2_DIAG_BUF_TYPE_TRACE] &
+ MPT3_DIAG_BUFFER_IS_RELEASED))
+ return snprintf(buf, PAGE_SIZE, "release\n");
+ else
+ return snprintf(buf, PAGE_SIZE, "post\n");
+}
+
+static ssize_t
+_ctl_host_trace_buffer_enable_store(struct device *cdev,
+ struct device_attribute *attr, const char *buf, size_t count)
+{
+ struct Scsi_Host *shost = class_to_shost(cdev);
+ struct MPT3SAS_ADAPTER *ioc = shost_priv(shost);
+ char str[10] = "";
+ struct mpt3_diag_register diag_register;
+ u8 issue_reset = 0;
+
+ /* don't allow post/release occurr while recovery is active */
+ if (ioc->shost_recovery || ioc->remove_host ||
+ ioc->pci_error_recovery || ioc->is_driver_loading)
+ return -EBUSY;
+
+ if (sscanf(buf, "%9s", str) != 1)
+ return -EINVAL;
+
+ if (!strcmp(str, "post")) {
+ /* exit out if host buffers are already posted */
+ if ((ioc->diag_buffer[MPI2_DIAG_BUF_TYPE_TRACE]) &&
+ (ioc->diag_buffer_status[MPI2_DIAG_BUF_TYPE_TRACE] &
+ MPT3_DIAG_BUFFER_IS_REGISTERED) &&
+ ((ioc->diag_buffer_status[MPI2_DIAG_BUF_TYPE_TRACE] &
+ MPT3_DIAG_BUFFER_IS_RELEASED) == 0))
+ goto out;
+ memset(&diag_register, 0, sizeof(struct mpt3_diag_register));
+ pr_info(MPT3SAS_FMT "posting host trace buffers\n",
+ ioc->name);
+ diag_register.buffer_type = MPI2_DIAG_BUF_TYPE_TRACE;
+ diag_register.requested_buffer_size = (1024 * 1024);
+ diag_register.unique_id = 0x7075900;
+ ioc->diag_buffer_status[MPI2_DIAG_BUF_TYPE_TRACE] = 0;
+ _ctl_diag_register_2(ioc, &diag_register);
+ } else if (!strcmp(str, "release")) {
+ /* exit out if host buffers are already released */
+ if (!ioc->diag_buffer[MPI2_DIAG_BUF_TYPE_TRACE])
+ goto out;
+ if ((ioc->diag_buffer_status[MPI2_DIAG_BUF_TYPE_TRACE] &
+ MPT3_DIAG_BUFFER_IS_REGISTERED) == 0)
+ goto out;
+ if ((ioc->diag_buffer_status[MPI2_DIAG_BUF_TYPE_TRACE] &
+ MPT3_DIAG_BUFFER_IS_RELEASED))
+ goto out;
+ pr_info(MPT3SAS_FMT "releasing host trace buffer\n",
+ ioc->name);
+ mpt3sas_send_diag_release(ioc, MPI2_DIAG_BUF_TYPE_TRACE,
+ &issue_reset);
+ }
+
+ out:
+ return strlen(buf);
+}
+static DEVICE_ATTR(host_trace_buffer_enable, S_IRUGO | S_IWUSR,
+ _ctl_host_trace_buffer_enable_show,
+ _ctl_host_trace_buffer_enable_store);
+
+/*********** diagnostic trigger suppport *********************************/
+
+/**
+ * _ctl_diag_trigger_master_show - show the diag_trigger_master attribute
+ * @cdev - pointer to embedded class device
+ * @buf - the buffer returned
+ *
+ * A sysfs 'read/write' shost attribute.
+ */
+static ssize_t
+_ctl_diag_trigger_master_show(struct device *cdev,
+ struct device_attribute *attr, char *buf)
+
+{
+ struct Scsi_Host *shost = class_to_shost(cdev);
+ struct MPT3SAS_ADAPTER *ioc = shost_priv(shost);
+ unsigned long flags;
+ ssize_t rc;
+
+ spin_lock_irqsave(&ioc->diag_trigger_lock, flags);
+ rc = sizeof(struct SL_WH_MASTER_TRIGGER_T);
+ memcpy(buf, &ioc->diag_trigger_master, rc);
+ spin_unlock_irqrestore(&ioc->diag_trigger_lock, flags);
+ return rc;
+}
+
+/**
+ * _ctl_diag_trigger_master_store - store the diag_trigger_master attribute
+ * @cdev - pointer to embedded class device
+ * @buf - the buffer returned
+ *
+ * A sysfs 'read/write' shost attribute.
+ */
+static ssize_t
+_ctl_diag_trigger_master_store(struct device *cdev,
+ struct device_attribute *attr, const char *buf, size_t count)
+
+{
+ struct Scsi_Host *shost = class_to_shost(cdev);
+ struct MPT3SAS_ADAPTER *ioc = shost_priv(shost);
+ unsigned long flags;
+ ssize_t rc;
+
+ spin_lock_irqsave(&ioc->diag_trigger_lock, flags);
+ rc = min(sizeof(struct SL_WH_MASTER_TRIGGER_T), count);
+ memset(&ioc->diag_trigger_master, 0,
+ sizeof(struct SL_WH_MASTER_TRIGGER_T));
+ memcpy(&ioc->diag_trigger_master, buf, rc);
+ ioc->diag_trigger_master.MasterData |=
+ (MASTER_TRIGGER_FW_FAULT + MASTER_TRIGGER_ADAPTER_RESET);
+ spin_unlock_irqrestore(&ioc->diag_trigger_lock, flags);
+ return rc;
+}
+static DEVICE_ATTR(diag_trigger_master, S_IRUGO | S_IWUSR,
+ _ctl_diag_trigger_master_show, _ctl_diag_trigger_master_store);
+
+
+/**
+ * _ctl_diag_trigger_event_show - show the diag_trigger_event attribute
+ * @cdev - pointer to embedded class device
+ * @buf - the buffer returned
+ *
+ * A sysfs 'read/write' shost attribute.
+ */
+static ssize_t
+_ctl_diag_trigger_event_show(struct device *cdev,
+ struct device_attribute *attr, char *buf)
+{
+ struct Scsi_Host *shost = class_to_shost(cdev);
+ struct MPT3SAS_ADAPTER *ioc = shost_priv(shost);
+ unsigned long flags;
+ ssize_t rc;
+
+ spin_lock_irqsave(&ioc->diag_trigger_lock, flags);
+ rc = sizeof(struct SL_WH_EVENT_TRIGGERS_T);
+ memcpy(buf, &ioc->diag_trigger_event, rc);
+ spin_unlock_irqrestore(&ioc->diag_trigger_lock, flags);
+ return rc;
+}
+
+/**
+ * _ctl_diag_trigger_event_store - store the diag_trigger_event attribute
+ * @cdev - pointer to embedded class device
+ * @buf - the buffer returned
+ *
+ * A sysfs 'read/write' shost attribute.
+ */
+static ssize_t
+_ctl_diag_trigger_event_store(struct device *cdev,
+ struct device_attribute *attr, const char *buf, size_t count)
+
+{
+ struct Scsi_Host *shost = class_to_shost(cdev);
+ struct MPT3SAS_ADAPTER *ioc = shost_priv(shost);
+ unsigned long flags;
+ ssize_t sz;
+
+ spin_lock_irqsave(&ioc->diag_trigger_lock, flags);
+ sz = min(sizeof(struct SL_WH_EVENT_TRIGGERS_T), count);
+ memset(&ioc->diag_trigger_event, 0,
+ sizeof(struct SL_WH_EVENT_TRIGGERS_T));
+ memcpy(&ioc->diag_trigger_event, buf, sz);
+ if (ioc->diag_trigger_event.ValidEntries > NUM_VALID_ENTRIES)
+ ioc->diag_trigger_event.ValidEntries = NUM_VALID_ENTRIES;
+ spin_unlock_irqrestore(&ioc->diag_trigger_lock, flags);
+ return sz;
+}
+static DEVICE_ATTR(diag_trigger_event, S_IRUGO | S_IWUSR,
+ _ctl_diag_trigger_event_show, _ctl_diag_trigger_event_store);
+
+
+/**
+ * _ctl_diag_trigger_scsi_show - show the diag_trigger_scsi attribute
+ * @cdev - pointer to embedded class device
+ * @buf - the buffer returned
+ *
+ * A sysfs 'read/write' shost attribute.
+ */
+static ssize_t
+_ctl_diag_trigger_scsi_show(struct device *cdev,
+ struct device_attribute *attr, char *buf)
+{
+ struct Scsi_Host *shost = class_to_shost(cdev);
+ struct MPT3SAS_ADAPTER *ioc = shost_priv(shost);
+ unsigned long flags;
+ ssize_t rc;
+
+ spin_lock_irqsave(&ioc->diag_trigger_lock, flags);
+ rc = sizeof(struct SL_WH_SCSI_TRIGGERS_T);
+ memcpy(buf, &ioc->diag_trigger_scsi, rc);
+ spin_unlock_irqrestore(&ioc->diag_trigger_lock, flags);
+ return rc;
+}
+
+/**
+ * _ctl_diag_trigger_scsi_store - store the diag_trigger_scsi attribute
+ * @cdev - pointer to embedded class device
+ * @buf - the buffer returned
+ *
+ * A sysfs 'read/write' shost attribute.
+ */
+static ssize_t
+_ctl_diag_trigger_scsi_store(struct device *cdev,
+ struct device_attribute *attr, const char *buf, size_t count)
+{
+ struct Scsi_Host *shost = class_to_shost(cdev);
+ struct MPT3SAS_ADAPTER *ioc = shost_priv(shost);
+ unsigned long flags;
+ ssize_t sz;
+
+ spin_lock_irqsave(&ioc->diag_trigger_lock, flags);
+ sz = min(sizeof(struct SL_WH_SCSI_TRIGGERS_T), count);
+ memset(&ioc->diag_trigger_scsi, 0,
+ sizeof(struct SL_WH_EVENT_TRIGGERS_T));
+ memcpy(&ioc->diag_trigger_scsi, buf, sz);
+ if (ioc->diag_trigger_scsi.ValidEntries > NUM_VALID_ENTRIES)
+ ioc->diag_trigger_scsi.ValidEntries = NUM_VALID_ENTRIES;
+ spin_unlock_irqrestore(&ioc->diag_trigger_lock, flags);
+ return sz;
+}
+static DEVICE_ATTR(diag_trigger_scsi, S_IRUGO | S_IWUSR,
+ _ctl_diag_trigger_scsi_show, _ctl_diag_trigger_scsi_store);
+
+
+/**
+ * _ctl_diag_trigger_scsi_show - show the diag_trigger_mpi attribute
+ * @cdev - pointer to embedded class device
+ * @buf - the buffer returned
+ *
+ * A sysfs 'read/write' shost attribute.
+ */
+static ssize_t
+_ctl_diag_trigger_mpi_show(struct device *cdev,
+ struct device_attribute *attr, char *buf)
+{
+ struct Scsi_Host *shost = class_to_shost(cdev);
+ struct MPT3SAS_ADAPTER *ioc = shost_priv(shost);
+ unsigned long flags;
+ ssize_t rc;
+
+ spin_lock_irqsave(&ioc->diag_trigger_lock, flags);
+ rc = sizeof(struct SL_WH_MPI_TRIGGERS_T);
+ memcpy(buf, &ioc->diag_trigger_mpi, rc);
+ spin_unlock_irqrestore(&ioc->diag_trigger_lock, flags);
+ return rc;
+}
+
+/**
+ * _ctl_diag_trigger_mpi_store - store the diag_trigger_mpi attribute
+ * @cdev - pointer to embedded class device
+ * @buf - the buffer returned
+ *
+ * A sysfs 'read/write' shost attribute.
+ */
+static ssize_t
+_ctl_diag_trigger_mpi_store(struct device *cdev,
+ struct device_attribute *attr, const char *buf, size_t count)
+{
+ struct Scsi_Host *shost = class_to_shost(cdev);
+ struct MPT3SAS_ADAPTER *ioc = shost_priv(shost);
+ unsigned long flags;
+ ssize_t sz;
+
+ spin_lock_irqsave(&ioc->diag_trigger_lock, flags);
+ sz = min(sizeof(struct SL_WH_MPI_TRIGGERS_T), count);
+ memset(&ioc->diag_trigger_mpi, 0,
+ sizeof(ioc->diag_trigger_mpi));
+ memcpy(&ioc->diag_trigger_mpi, buf, sz);
+ if (ioc->diag_trigger_mpi.ValidEntries > NUM_VALID_ENTRIES)
+ ioc->diag_trigger_mpi.ValidEntries = NUM_VALID_ENTRIES;
+ spin_unlock_irqrestore(&ioc->diag_trigger_lock, flags);
+ return sz;
+}
+
+static DEVICE_ATTR(diag_trigger_mpi, S_IRUGO | S_IWUSR,
+ _ctl_diag_trigger_mpi_show, _ctl_diag_trigger_mpi_store);
+
+/*********** diagnostic trigger suppport *** END ****************************/
+
+
+
+/*****************************************/
+
+struct device_attribute *mpt3sas_host_attrs[] = {
+ &dev_attr_version_fw,
+ &dev_attr_version_bios,
+ &dev_attr_version_mpi,
+ &dev_attr_version_product,
+ &dev_attr_version_nvdata_persistent,
+ &dev_attr_version_nvdata_default,
+ &dev_attr_board_name,
+ &dev_attr_board_assembly,
+ &dev_attr_board_tracer,
+ &dev_attr_io_delay,
+ &dev_attr_device_delay,
+ &dev_attr_logging_level,
+ &dev_attr_fwfault_debug,
+ &dev_attr_fw_queue_depth,
+ &dev_attr_host_sas_address,
+ &dev_attr_ioc_reset_count,
+ &dev_attr_host_trace_buffer_size,
+ &dev_attr_host_trace_buffer,
+ &dev_attr_host_trace_buffer_enable,
+ &dev_attr_reply_queue_count,
+ &dev_attr_diag_trigger_master,
+ &dev_attr_diag_trigger_event,
+ &dev_attr_diag_trigger_scsi,
+ &dev_attr_diag_trigger_mpi,
+ NULL,
+};
+
+/* device attributes */
+
+/**
+ * _ctl_device_sas_address_show - sas address
+ * @cdev - pointer to embedded class device
+ * @buf - the buffer returned
+ *
+ * This is the sas address for the target
+ *
+ * A sysfs 'read-only' shost attribute.
+ */
+static ssize_t
+_ctl_device_sas_address_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct scsi_device *sdev = to_scsi_device(dev);
+ struct MPT3SAS_DEVICE *sas_device_priv_data = sdev->hostdata;
+
+ return snprintf(buf, PAGE_SIZE, "0x%016llx\n",
+ (unsigned long long)sas_device_priv_data->sas_target->sas_address);
+}
+static DEVICE_ATTR(sas_address, S_IRUGO, _ctl_device_sas_address_show, NULL);
+
+/**
+ * _ctl_device_handle_show - device handle
+ * @cdev - pointer to embedded class device
+ * @buf - the buffer returned
+ *
+ * This is the firmware assigned device handle
+ *
+ * A sysfs 'read-only' shost attribute.
+ */
+static ssize_t
+_ctl_device_handle_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct scsi_device *sdev = to_scsi_device(dev);
+ struct MPT3SAS_DEVICE *sas_device_priv_data = sdev->hostdata;
+
+ return snprintf(buf, PAGE_SIZE, "0x%04x\n",
+ sas_device_priv_data->sas_target->handle);
+}
+static DEVICE_ATTR(sas_device_handle, S_IRUGO, _ctl_device_handle_show, NULL);
+
+struct device_attribute *mpt3sas_dev_attrs[] = {
+ &dev_attr_sas_address,
+ &dev_attr_sas_device_handle,
+ NULL,
+};
+
+static const struct file_operations ctl_fops = {
+ .owner = THIS_MODULE,
+ .unlocked_ioctl = _ctl_ioctl,
+ .poll = _ctl_poll,
+ .fasync = _ctl_fasync,
+#ifdef CONFIG_COMPAT
+ .compat_ioctl = _ctl_ioctl_compat,
+#endif
+};
+
+static struct miscdevice ctl_dev = {
+ .minor = MPT3SAS_MINOR,
+ .name = MPT3SAS_DEV_NAME,
+ .fops = &ctl_fops,
+};
+
+/**
+ * mpt3sas_ctl_init - main entry point for ctl.
+ *
+ */
+void
+mpt3sas_ctl_init(void)
+{
+ async_queue = NULL;
+ if (misc_register(&ctl_dev) < 0)
+ pr_err("%s can't register misc device [minor=%d]\n",
+ MPT3SAS_DRIVER_NAME, MPT3SAS_MINOR);
+
+ init_waitqueue_head(&ctl_poll_wait);
+}
+
+/**
+ * mpt3sas_ctl_exit - exit point for ctl
+ *
+ */
+void
+mpt3sas_ctl_exit(void)
+{
+ struct MPT3SAS_ADAPTER *ioc;
+ int i;
+
+ list_for_each_entry(ioc, &mpt3sas_ioc_list, list) {
+
+ /* free memory associated to diag buffers */
+ for (i = 0; i < MPI2_DIAG_BUF_TYPE_COUNT; i++) {
+ if (!ioc->diag_buffer[i])
+ continue;
+ if (!(ioc->diag_buffer_status[i] &
+ MPT3_DIAG_BUFFER_IS_REGISTERED))
+ continue;
+ if ((ioc->diag_buffer_status[i] &
+ MPT3_DIAG_BUFFER_IS_RELEASED))
+ continue;
+ pci_free_consistent(ioc->pdev, ioc->diag_buffer_sz[i],
+ ioc->diag_buffer[i], ioc->diag_buffer_dma[i]);
+ ioc->diag_buffer[i] = NULL;
+ ioc->diag_buffer_status[i] = 0;
+ }
+
+ kfree(ioc->event_log);
+ }
+ misc_deregister(&ctl_dev);
+}
diff --git a/drivers/scsi/mpt3sas/mpt3sas_ctl.h b/drivers/scsi/mpt3sas/mpt3sas_ctl.h
new file mode 100644
index 000000000..aee99ce67
--- /dev/null
+++ b/drivers/scsi/mpt3sas/mpt3sas_ctl.h
@@ -0,0 +1,419 @@
+/*
+ * Management Module Support for MPT (Message Passing Technology) based
+ * controllers
+ *
+ * This code is based on drivers/scsi/mpt3sas/mpt3sas_ctl.h
+ * Copyright (C) 2012-2014 LSI Corporation
+ * Copyright (C) 2013-2014 Avago Technologies
+ * (mailto: MPT-FusionLinux.pdl@avagotech.com)
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * NO WARRANTY
+ * THE PROGRAM IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OR
+ * CONDITIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED INCLUDING, WITHOUT
+ * LIMITATION, ANY WARRANTIES OR CONDITIONS OF TITLE, NON-INFRINGEMENT,
+ * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE. Each Recipient is
+ * solely responsible for determining the appropriateness of using and
+ * distributing the Program and assumes all risks associated with its
+ * exercise of rights under this Agreement, including but not limited to
+ * the risks and costs of program errors, damage to or loss of data,
+ * programs or equipment, and unavailability or interruption of operations.
+
+ * DISCLAIMER OF LIABILITY
+ * NEITHER RECIPIENT NOR ANY CONTRIBUTORS SHALL HAVE ANY LIABILITY FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING WITHOUT LIMITATION LOST PROFITS), HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR
+ * TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
+ * USE OR DISTRIBUTION OF THE PROGRAM OR THE EXERCISE OF ANY RIGHTS GRANTED
+ * HEREUNDER, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGES
+
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301,
+ * USA.
+ */
+
+#ifndef MPT3SAS_CTL_H_INCLUDED
+#define MPT3SAS_CTL_H_INCLUDED
+
+#ifdef __KERNEL__
+#include <linux/miscdevice.h>
+#endif
+
+
+#ifndef MPT3SAS_MINOR
+#define MPT3SAS_MINOR (MPT_MINOR + 2)
+#endif
+#define MPT3SAS_DEV_NAME "mpt3ctl"
+#define MPT3_MAGIC_NUMBER 'L'
+#define MPT3_IOCTL_DEFAULT_TIMEOUT (10) /* in seconds */
+
+/**
+ * IOCTL opcodes
+ */
+#define MPT3IOCINFO _IOWR(MPT3_MAGIC_NUMBER, 17, \
+ struct mpt3_ioctl_iocinfo)
+#define MPT3COMMAND _IOWR(MPT3_MAGIC_NUMBER, 20, \
+ struct mpt3_ioctl_command)
+#ifdef CONFIG_COMPAT
+#define MPT3COMMAND32 _IOWR(MPT3_MAGIC_NUMBER, 20, \
+ struct mpt3_ioctl_command32)
+#endif
+#define MPT3EVENTQUERY _IOWR(MPT3_MAGIC_NUMBER, 21, \
+ struct mpt3_ioctl_eventquery)
+#define MPT3EVENTENABLE _IOWR(MPT3_MAGIC_NUMBER, 22, \
+ struct mpt3_ioctl_eventenable)
+#define MPT3EVENTREPORT _IOWR(MPT3_MAGIC_NUMBER, 23, \
+ struct mpt3_ioctl_eventreport)
+#define MPT3HARDRESET _IOWR(MPT3_MAGIC_NUMBER, 24, \
+ struct mpt3_ioctl_diag_reset)
+#define MPT3BTDHMAPPING _IOWR(MPT3_MAGIC_NUMBER, 31, \
+ struct mpt3_ioctl_btdh_mapping)
+
+/* diag buffer support */
+#define MPT3DIAGREGISTER _IOWR(MPT3_MAGIC_NUMBER, 26, \
+ struct mpt3_diag_register)
+#define MPT3DIAGRELEASE _IOWR(MPT3_MAGIC_NUMBER, 27, \
+ struct mpt3_diag_release)
+#define MPT3DIAGUNREGISTER _IOWR(MPT3_MAGIC_NUMBER, 28, \
+ struct mpt3_diag_unregister)
+#define MPT3DIAGQUERY _IOWR(MPT3_MAGIC_NUMBER, 29, \
+ struct mpt3_diag_query)
+#define MPT3DIAGREADBUFFER _IOWR(MPT3_MAGIC_NUMBER, 30, \
+ struct mpt3_diag_read_buffer)
+
+/**
+ * struct mpt3_ioctl_header - main header structure
+ * @ioc_number - IOC unit number
+ * @port_number - IOC port number
+ * @max_data_size - maximum number bytes to transfer on read
+ */
+struct mpt3_ioctl_header {
+ uint32_t ioc_number;
+ uint32_t port_number;
+ uint32_t max_data_size;
+};
+
+/**
+ * struct mpt3_ioctl_diag_reset - diagnostic reset
+ * @hdr - generic header
+ */
+struct mpt3_ioctl_diag_reset {
+ struct mpt3_ioctl_header hdr;
+};
+
+
+/**
+ * struct mpt3_ioctl_pci_info - pci device info
+ * @device - pci device id
+ * @function - pci function id
+ * @bus - pci bus id
+ * @segment_id - pci segment id
+ */
+struct mpt3_ioctl_pci_info {
+ union {
+ struct {
+ uint32_t device:5;
+ uint32_t function:3;
+ uint32_t bus:24;
+ } bits;
+ uint32_t word;
+ } u;
+ uint32_t segment_id;
+};
+
+
+#define MPT2_IOCTL_INTERFACE_SCSI (0x00)
+#define MPT2_IOCTL_INTERFACE_FC (0x01)
+#define MPT2_IOCTL_INTERFACE_FC_IP (0x02)
+#define MPT2_IOCTL_INTERFACE_SAS (0x03)
+#define MPT2_IOCTL_INTERFACE_SAS2 (0x04)
+#define MPT3_IOCTL_INTERFACE_SAS3 (0x06)
+#define MPT2_IOCTL_VERSION_LENGTH (32)
+
+/**
+ * struct mpt3_ioctl_iocinfo - generic controller info
+ * @hdr - generic header
+ * @adapter_type - type of adapter (spi, fc, sas)
+ * @port_number - port number
+ * @pci_id - PCI Id
+ * @hw_rev - hardware revision
+ * @sub_system_device - PCI subsystem Device ID
+ * @sub_system_vendor - PCI subsystem Vendor ID
+ * @rsvd0 - reserved
+ * @firmware_version - firmware version
+ * @bios_version - BIOS version
+ * @driver_version - driver version - 32 ASCII characters
+ * @rsvd1 - reserved
+ * @scsi_id - scsi id of adapter 0
+ * @rsvd2 - reserved
+ * @pci_information - pci info (2nd revision)
+ */
+struct mpt3_ioctl_iocinfo {
+ struct mpt3_ioctl_header hdr;
+ uint32_t adapter_type;
+ uint32_t port_number;
+ uint32_t pci_id;
+ uint32_t hw_rev;
+ uint32_t subsystem_device;
+ uint32_t subsystem_vendor;
+ uint32_t rsvd0;
+ uint32_t firmware_version;
+ uint32_t bios_version;
+ uint8_t driver_version[MPT2_IOCTL_VERSION_LENGTH];
+ uint8_t rsvd1;
+ uint8_t scsi_id;
+ uint16_t rsvd2;
+ struct mpt3_ioctl_pci_info pci_information;
+};
+
+
+/* number of event log entries */
+#define MPT3SAS_CTL_EVENT_LOG_SIZE (50)
+
+/**
+ * struct mpt3_ioctl_eventquery - query event count and type
+ * @hdr - generic header
+ * @event_entries - number of events returned by get_event_report
+ * @rsvd - reserved
+ * @event_types - type of events currently being captured
+ */
+struct mpt3_ioctl_eventquery {
+ struct mpt3_ioctl_header hdr;
+ uint16_t event_entries;
+ uint16_t rsvd;
+ uint32_t event_types[MPI2_EVENT_NOTIFY_EVENTMASK_WORDS];
+};
+
+/**
+ * struct mpt3_ioctl_eventenable - enable/disable event capturing
+ * @hdr - generic header
+ * @event_types - toggle off/on type of events to be captured
+ */
+struct mpt3_ioctl_eventenable {
+ struct mpt3_ioctl_header hdr;
+ uint32_t event_types[4];
+};
+
+#define MPT3_EVENT_DATA_SIZE (192)
+/**
+ * struct MPT3_IOCTL_EVENTS -
+ * @event - the event that was reported
+ * @context - unique value for each event assigned by driver
+ * @data - event data returned in fw reply message
+ */
+struct MPT3_IOCTL_EVENTS {
+ uint32_t event;
+ uint32_t context;
+ uint8_t data[MPT3_EVENT_DATA_SIZE];
+};
+
+/**
+ * struct mpt3_ioctl_eventreport - returing event log
+ * @hdr - generic header
+ * @event_data - (see struct MPT3_IOCTL_EVENTS)
+ */
+struct mpt3_ioctl_eventreport {
+ struct mpt3_ioctl_header hdr;
+ struct MPT3_IOCTL_EVENTS event_data[1];
+};
+
+/**
+ * struct mpt3_ioctl_command - generic mpt firmware passthru ioctl
+ * @hdr - generic header
+ * @timeout - command timeout in seconds. (if zero then use driver default
+ * value).
+ * @reply_frame_buf_ptr - reply location
+ * @data_in_buf_ptr - destination for read
+ * @data_out_buf_ptr - data source for write
+ * @sense_data_ptr - sense data location
+ * @max_reply_bytes - maximum number of reply bytes to be sent to app.
+ * @data_in_size - number bytes for data transfer in (read)
+ * @data_out_size - number bytes for data transfer out (write)
+ * @max_sense_bytes - maximum number of bytes for auto sense buffers
+ * @data_sge_offset - offset in words from the start of the request message to
+ * the first SGL
+ * @mf[1];
+ */
+struct mpt3_ioctl_command {
+ struct mpt3_ioctl_header hdr;
+ uint32_t timeout;
+ void __user *reply_frame_buf_ptr;
+ void __user *data_in_buf_ptr;
+ void __user *data_out_buf_ptr;
+ void __user *sense_data_ptr;
+ uint32_t max_reply_bytes;
+ uint32_t data_in_size;
+ uint32_t data_out_size;
+ uint32_t max_sense_bytes;
+ uint32_t data_sge_offset;
+ uint8_t mf[1];
+};
+
+#ifdef CONFIG_COMPAT
+struct mpt3_ioctl_command32 {
+ struct mpt3_ioctl_header hdr;
+ uint32_t timeout;
+ uint32_t reply_frame_buf_ptr;
+ uint32_t data_in_buf_ptr;
+ uint32_t data_out_buf_ptr;
+ uint32_t sense_data_ptr;
+ uint32_t max_reply_bytes;
+ uint32_t data_in_size;
+ uint32_t data_out_size;
+ uint32_t max_sense_bytes;
+ uint32_t data_sge_offset;
+ uint8_t mf[1];
+};
+#endif
+
+/**
+ * struct mpt3_ioctl_btdh_mapping - mapping info
+ * @hdr - generic header
+ * @id - target device identification number
+ * @bus - SCSI bus number that the target device exists on
+ * @handle - device handle for the target device
+ * @rsvd - reserved
+ *
+ * To obtain a bus/id the application sets
+ * handle to valid handle, and bus/id to 0xFFFF.
+ *
+ * To obtain the device handle the application sets
+ * bus/id valid value, and the handle to 0xFFFF.
+ */
+struct mpt3_ioctl_btdh_mapping {
+ struct mpt3_ioctl_header hdr;
+ uint32_t id;
+ uint32_t bus;
+ uint16_t handle;
+ uint16_t rsvd;
+};
+
+
+
+/* application flags for mpt3_diag_register, mpt3_diag_query */
+#define MPT3_APP_FLAGS_APP_OWNED (0x0001)
+#define MPT3_APP_FLAGS_BUFFER_VALID (0x0002)
+#define MPT3_APP_FLAGS_FW_BUFFER_ACCESS (0x0004)
+
+/* flags for mpt3_diag_read_buffer */
+#define MPT3_FLAGS_REREGISTER (0x0001)
+
+#define MPT3_PRODUCT_SPECIFIC_DWORDS 23
+
+/**
+ * struct mpt3_diag_register - application register with driver
+ * @hdr - generic header
+ * @reserved -
+ * @buffer_type - specifies either TRACE, SNAPSHOT, or EXTENDED
+ * @application_flags - misc flags
+ * @diagnostic_flags - specifies flags affecting command processing
+ * @product_specific - product specific information
+ * @requested_buffer_size - buffers size in bytes
+ * @unique_id - tag specified by application that is used to signal ownership
+ * of the buffer.
+ *
+ * This will allow the driver to setup any required buffers that will be
+ * needed by firmware to communicate with the driver.
+ */
+struct mpt3_diag_register {
+ struct mpt3_ioctl_header hdr;
+ uint8_t reserved;
+ uint8_t buffer_type;
+ uint16_t application_flags;
+ uint32_t diagnostic_flags;
+ uint32_t product_specific[MPT3_PRODUCT_SPECIFIC_DWORDS];
+ uint32_t requested_buffer_size;
+ uint32_t unique_id;
+};
+
+/**
+ * struct mpt3_diag_unregister - application unregister with driver
+ * @hdr - generic header
+ * @unique_id - tag uniquely identifies the buffer to be unregistered
+ *
+ * This will allow the driver to cleanup any memory allocated for diag
+ * messages and to free up any resources.
+ */
+struct mpt3_diag_unregister {
+ struct mpt3_ioctl_header hdr;
+ uint32_t unique_id;
+};
+
+/**
+ * struct mpt3_diag_query - query relevant info associated with diag buffers
+ * @hdr - generic header
+ * @reserved -
+ * @buffer_type - specifies either TRACE, SNAPSHOT, or EXTENDED
+ * @application_flags - misc flags
+ * @diagnostic_flags - specifies flags affecting command processing
+ * @product_specific - product specific information
+ * @total_buffer_size - diag buffer size in bytes
+ * @driver_added_buffer_size - size of extra space appended to end of buffer
+ * @unique_id - unique id associated with this buffer.
+ *
+ * The application will send only buffer_type and unique_id. Driver will
+ * inspect unique_id first, if valid, fill in all the info. If unique_id is
+ * 0x00, the driver will return info specified by Buffer Type.
+ */
+struct mpt3_diag_query {
+ struct mpt3_ioctl_header hdr;
+ uint8_t reserved;
+ uint8_t buffer_type;
+ uint16_t application_flags;
+ uint32_t diagnostic_flags;
+ uint32_t product_specific[MPT3_PRODUCT_SPECIFIC_DWORDS];
+ uint32_t total_buffer_size;
+ uint32_t driver_added_buffer_size;
+ uint32_t unique_id;
+};
+
+/**
+ * struct mpt3_diag_release - request to send Diag Release Message to firmware
+ * @hdr - generic header
+ * @unique_id - tag uniquely identifies the buffer to be released
+ *
+ * This allows ownership of the specified buffer to returned to the driver,
+ * allowing an application to read the buffer without fear that firmware is
+ * overwritting information in the buffer.
+ */
+struct mpt3_diag_release {
+ struct mpt3_ioctl_header hdr;
+ uint32_t unique_id;
+};
+
+/**
+ * struct mpt3_diag_read_buffer - request for copy of the diag buffer
+ * @hdr - generic header
+ * @status -
+ * @reserved -
+ * @flags - misc flags
+ * @starting_offset - starting offset within drivers buffer where to start
+ * reading data at into the specified application buffer
+ * @bytes_to_read - number of bytes to copy from the drivers buffer into the
+ * application buffer starting at starting_offset.
+ * @unique_id - unique id associated with this buffer.
+ * @diagnostic_data - data payload
+ */
+struct mpt3_diag_read_buffer {
+ struct mpt3_ioctl_header hdr;
+ uint8_t status;
+ uint8_t reserved;
+ uint16_t flags;
+ uint32_t starting_offset;
+ uint32_t bytes_to_read;
+ uint32_t unique_id;
+ uint32_t diagnostic_data[1];
+};
+
+#endif /* MPT3SAS_CTL_H_INCLUDED */
diff --git a/drivers/scsi/mpt3sas/mpt3sas_debug.h b/drivers/scsi/mpt3sas/mpt3sas_debug.h
new file mode 100644
index 000000000..4e8a63fdb
--- /dev/null
+++ b/drivers/scsi/mpt3sas/mpt3sas_debug.h
@@ -0,0 +1,220 @@
+/*
+ * Logging Support for MPT (Message Passing Technology) based controllers
+ *
+ * This code is based on drivers/scsi/mpt3sas/mpt3sas_debug.c
+ * Copyright (C) 2012-2014 LSI Corporation
+ * Copyright (C) 2013-2014 Avago Technologies
+ * (mailto: MPT-FusionLinux.pdl@avagotech.com)
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * NO WARRANTY
+ * THE PROGRAM IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OR
+ * CONDITIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED INCLUDING, WITHOUT
+ * LIMITATION, ANY WARRANTIES OR CONDITIONS OF TITLE, NON-INFRINGEMENT,
+ * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE. Each Recipient is
+ * solely responsible for determining the appropriateness of using and
+ * distributing the Program and assumes all risks associated with its
+ * exercise of rights under this Agreement, including but not limited to
+ * the risks and costs of program errors, damage to or loss of data,
+ * programs or equipment, and unavailability or interruption of operations.
+
+ * DISCLAIMER OF LIABILITY
+ * NEITHER RECIPIENT NOR ANY CONTRIBUTORS SHALL HAVE ANY LIABILITY FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING WITHOUT LIMITATION LOST PROFITS), HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR
+ * TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
+ * USE OR DISTRIBUTION OF THE PROGRAM OR THE EXERCISE OF ANY RIGHTS GRANTED
+ * HEREUNDER, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGES
+
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301,
+ * USA.
+ */
+
+#ifndef MPT3SAS_DEBUG_H_INCLUDED
+#define MPT3SAS_DEBUG_H_INCLUDED
+
+#define MPT_DEBUG 0x00000001
+#define MPT_DEBUG_MSG_FRAME 0x00000002
+#define MPT_DEBUG_SG 0x00000004
+#define MPT_DEBUG_EVENTS 0x00000008
+#define MPT_DEBUG_EVENT_WORK_TASK 0x00000010
+#define MPT_DEBUG_INIT 0x00000020
+#define MPT_DEBUG_EXIT 0x00000040
+#define MPT_DEBUG_FAIL 0x00000080
+#define MPT_DEBUG_TM 0x00000100
+#define MPT_DEBUG_REPLY 0x00000200
+#define MPT_DEBUG_HANDSHAKE 0x00000400
+#define MPT_DEBUG_CONFIG 0x00000800
+#define MPT_DEBUG_DL 0x00001000
+#define MPT_DEBUG_RESET 0x00002000
+#define MPT_DEBUG_SCSI 0x00004000
+#define MPT_DEBUG_IOCTL 0x00008000
+#define MPT_DEBUG_SAS 0x00020000
+#define MPT_DEBUG_TRANSPORT 0x00040000
+#define MPT_DEBUG_TASK_SET_FULL 0x00080000
+
+#define MPT_DEBUG_TRIGGER_DIAG 0x00200000
+
+
+/*
+ * CONFIG_SCSI_MPT3SAS_LOGGING - enabled in Kconfig
+ */
+
+#ifdef CONFIG_SCSI_MPT3SAS_LOGGING
+#define MPT_CHECK_LOGGING(IOC, CMD, BITS) \
+{ \
+ if (IOC->logging_level & BITS) \
+ CMD; \
+}
+#else
+#define MPT_CHECK_LOGGING(IOC, CMD, BITS)
+#endif /* CONFIG_SCSI_MPT3SAS_LOGGING */
+
+
+/*
+ * debug macros
+ */
+
+#define dprintk(IOC, CMD) \
+ MPT_CHECK_LOGGING(IOC, CMD, MPT_DEBUG)
+
+#define dsgprintk(IOC, CMD) \
+ MPT_CHECK_LOGGING(IOC, CMD, MPT_DEBUG_SG)
+
+#define devtprintk(IOC, CMD) \
+ MPT_CHECK_LOGGING(IOC, CMD, MPT_DEBUG_EVENTS)
+
+#define dewtprintk(IOC, CMD) \
+ MPT_CHECK_LOGGING(IOC, CMD, MPT_DEBUG_EVENT_WORK_TASK)
+
+#define dinitprintk(IOC, CMD) \
+ MPT_CHECK_LOGGING(IOC, CMD, MPT_DEBUG_INIT)
+
+#define dexitprintk(IOC, CMD) \
+ MPT_CHECK_LOGGING(IOC, CMD, MPT_DEBUG_EXIT)
+
+#define dfailprintk(IOC, CMD) \
+ MPT_CHECK_LOGGING(IOC, CMD, MPT_DEBUG_FAIL)
+
+#define dtmprintk(IOC, CMD) \
+ MPT_CHECK_LOGGING(IOC, CMD, MPT_DEBUG_TM)
+
+#define dreplyprintk(IOC, CMD) \
+ MPT_CHECK_LOGGING(IOC, CMD, MPT_DEBUG_REPLY)
+
+#define dhsprintk(IOC, CMD) \
+ MPT_CHECK_LOGGING(IOC, CMD, MPT_DEBUG_HANDSHAKE)
+
+#define dcprintk(IOC, CMD) \
+ MPT_CHECK_LOGGING(IOC, CMD, MPT_DEBUG_CONFIG)
+
+#define ddlprintk(IOC, CMD) \
+ MPT_CHECK_LOGGING(IOC, CMD, MPT_DEBUG_DL)
+
+#define drsprintk(IOC, CMD) \
+ MPT_CHECK_LOGGING(IOC, CMD, MPT_DEBUG_RESET)
+
+#define dsprintk(IOC, CMD) \
+ MPT_CHECK_LOGGING(IOC, CMD, MPT_DEBUG_SCSI)
+
+#define dctlprintk(IOC, CMD) \
+ MPT_CHECK_LOGGING(IOC, CMD, MPT_DEBUG_IOCTL)
+
+#define dsasprintk(IOC, CMD) \
+ MPT_CHECK_LOGGING(IOC, CMD, MPT_DEBUG_SAS)
+
+#define dsastransport(IOC, CMD) \
+ MPT_CHECK_LOGGING(IOC, CMD, MPT_DEBUG_SAS_WIDE)
+
+#define dmfprintk(IOC, CMD) \
+ MPT_CHECK_LOGGING(IOC, CMD, MPT_DEBUG_MSG_FRAME)
+
+#define dtsfprintk(IOC, CMD) \
+ MPT_CHECK_LOGGING(IOC, CMD, MPT_DEBUG_TASK_SET_FULL)
+
+#define dtransportprintk(IOC, CMD) \
+ MPT_CHECK_LOGGING(IOC, CMD, MPT_DEBUG_TRANSPORT)
+
+#define dTriggerDiagPrintk(IOC, CMD) \
+ MPT_CHECK_LOGGING(IOC, CMD, MPT_DEBUG_TRIGGER_DIAG)
+
+
+
+/* inline functions for dumping debug data*/
+#ifdef CONFIG_SCSI_MPT3SAS_LOGGING
+/**
+ * _debug_dump_mf - print message frame contents
+ * @mpi_request: pointer to message frame
+ * @sz: number of dwords
+ */
+static inline void
+_debug_dump_mf(void *mpi_request, int sz)
+{
+ int i;
+ __le32 *mfp = (__le32 *)mpi_request;
+
+ pr_info("mf:\n\t");
+ for (i = 0; i < sz; i++) {
+ if (i && ((i % 8) == 0))
+ pr_info("\n\t");
+ pr_info("%08x ", le32_to_cpu(mfp[i]));
+ }
+ pr_info("\n");
+}
+/**
+ * _debug_dump_reply - print message frame contents
+ * @mpi_request: pointer to message frame
+ * @sz: number of dwords
+ */
+static inline void
+_debug_dump_reply(void *mpi_request, int sz)
+{
+ int i;
+ __le32 *mfp = (__le32 *)mpi_request;
+
+ pr_info("reply:\n\t");
+ for (i = 0; i < sz; i++) {
+ if (i && ((i % 8) == 0))
+ pr_info("\n\t");
+ pr_info("%08x ", le32_to_cpu(mfp[i]));
+ }
+ pr_info("\n");
+}
+/**
+ * _debug_dump_config - print config page contents
+ * @mpi_request: pointer to message frame
+ * @sz: number of dwords
+ */
+static inline void
+_debug_dump_config(void *mpi_request, int sz)
+{
+ int i;
+ __le32 *mfp = (__le32 *)mpi_request;
+
+ pr_info("config:\n\t");
+ for (i = 0; i < sz; i++) {
+ if (i && ((i % 8) == 0))
+ pr_info("\n\t");
+ pr_info("%08x ", le32_to_cpu(mfp[i]));
+ }
+ pr_info("\n");
+}
+#else
+#define _debug_dump_mf(mpi_request, sz)
+#define _debug_dump_reply(mpi_request, sz)
+#define _debug_dump_config(mpi_request, sz)
+#endif /* CONFIG_SCSI_MPT3SAS_LOGGING */
+
+#endif /* MPT3SAS_DEBUG_H_INCLUDED */
diff --git a/drivers/scsi/mpt3sas/mpt3sas_scsih.c b/drivers/scsi/mpt3sas/mpt3sas_scsih.c
new file mode 100644
index 000000000..5a97e3286
--- /dev/null
+++ b/drivers/scsi/mpt3sas/mpt3sas_scsih.c
@@ -0,0 +1,8209 @@
+/*
+ * Scsi Host Layer for MPT (Message Passing Technology) based controllers
+ *
+ * This code is based on drivers/scsi/mpt3sas/mpt3sas_scsih.c
+ * Copyright (C) 2012-2014 LSI Corporation
+ * Copyright (C) 2013-2014 Avago Technologies
+ * (mailto: MPT-FusionLinux.pdl@avagotech.com)
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * NO WARRANTY
+ * THE PROGRAM IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OR
+ * CONDITIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED INCLUDING, WITHOUT
+ * LIMITATION, ANY WARRANTIES OR CONDITIONS OF TITLE, NON-INFRINGEMENT,
+ * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE. Each Recipient is
+ * solely responsible for determining the appropriateness of using and
+ * distributing the Program and assumes all risks associated with its
+ * exercise of rights under this Agreement, including but not limited to
+ * the risks and costs of program errors, damage to or loss of data,
+ * programs or equipment, and unavailability or interruption of operations.
+
+ * DISCLAIMER OF LIABILITY
+ * NEITHER RECIPIENT NOR ANY CONTRIBUTORS SHALL HAVE ANY LIABILITY FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING WITHOUT LIMITATION LOST PROFITS), HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR
+ * TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
+ * USE OR DISTRIBUTION OF THE PROGRAM OR THE EXERCISE OF ANY RIGHTS GRANTED
+ * HEREUNDER, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGES
+
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301,
+ * USA.
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/errno.h>
+#include <linux/blkdev.h>
+#include <linux/sched.h>
+#include <linux/workqueue.h>
+#include <linux/delay.h>
+#include <linux/pci.h>
+#include <linux/interrupt.h>
+#include <linux/aer.h>
+#include <linux/raid_class.h>
+
+#include "mpt3sas_base.h"
+
+MODULE_AUTHOR(MPT3SAS_AUTHOR);
+MODULE_DESCRIPTION(MPT3SAS_DESCRIPTION);
+MODULE_LICENSE("GPL");
+MODULE_VERSION(MPT3SAS_DRIVER_VERSION);
+
+#define RAID_CHANNEL 1
+/* forward proto's */
+static void _scsih_expander_node_remove(struct MPT3SAS_ADAPTER *ioc,
+ struct _sas_node *sas_expander);
+static void _firmware_event_work(struct work_struct *work);
+
+static void _scsih_remove_device(struct MPT3SAS_ADAPTER *ioc,
+ struct _sas_device *sas_device);
+static int _scsih_add_device(struct MPT3SAS_ADAPTER *ioc, u16 handle,
+ u8 retry_count, u8 is_pd);
+
+static u8 _scsih_check_for_pending_tm(struct MPT3SAS_ADAPTER *ioc, u16 smid);
+
+static void _scsih_scan_start(struct Scsi_Host *shost);
+static int _scsih_scan_finished(struct Scsi_Host *shost, unsigned long time);
+
+/* global parameters */
+LIST_HEAD(mpt3sas_ioc_list);
+
+/* local parameters */
+static u8 scsi_io_cb_idx = -1;
+static u8 tm_cb_idx = -1;
+static u8 ctl_cb_idx = -1;
+static u8 base_cb_idx = -1;
+static u8 port_enable_cb_idx = -1;
+static u8 transport_cb_idx = -1;
+static u8 scsih_cb_idx = -1;
+static u8 config_cb_idx = -1;
+static int mpt_ids;
+
+static u8 tm_tr_cb_idx = -1 ;
+static u8 tm_tr_volume_cb_idx = -1 ;
+static u8 tm_sas_control_cb_idx = -1;
+
+/* command line options */
+static u32 logging_level;
+MODULE_PARM_DESC(logging_level,
+ " bits for enabling additional logging info (default=0)");
+
+
+static ushort max_sectors = 0xFFFF;
+module_param(max_sectors, ushort, 0);
+MODULE_PARM_DESC(max_sectors, "max sectors, range 64 to 32767 default=32767");
+
+
+static int missing_delay[2] = {-1, -1};
+module_param_array(missing_delay, int, NULL, 0);
+MODULE_PARM_DESC(missing_delay, " device missing delay , io missing delay");
+
+/* scsi-mid layer global parmeter is max_report_luns, which is 511 */
+#define MPT3SAS_MAX_LUN (16895)
+static u64 max_lun = MPT3SAS_MAX_LUN;
+module_param(max_lun, ullong, 0);
+MODULE_PARM_DESC(max_lun, " max lun, default=16895 ");
+
+
+
+
+/* diag_buffer_enable is bitwise
+ * bit 0 set = TRACE
+ * bit 1 set = SNAPSHOT
+ * bit 2 set = EXTENDED
+ *
+ * Either bit can be set, or both
+ */
+static int diag_buffer_enable = -1;
+module_param(diag_buffer_enable, int, 0);
+MODULE_PARM_DESC(diag_buffer_enable,
+ " post diag buffers (TRACE=1/SNAPSHOT=2/EXTENDED=4/default=0)");
+static int disable_discovery = -1;
+module_param(disable_discovery, int, 0);
+MODULE_PARM_DESC(disable_discovery, " disable discovery ");
+
+
+/* permit overriding the host protection capabilities mask (EEDP/T10 PI) */
+static int prot_mask = -1;
+module_param(prot_mask, int, 0);
+MODULE_PARM_DESC(prot_mask, " host protection capabilities mask, def=7 ");
+
+
+/* raid transport support */
+
+static struct raid_template *mpt3sas_raid_template;
+
+
+/**
+ * struct sense_info - common structure for obtaining sense keys
+ * @skey: sense key
+ * @asc: additional sense code
+ * @ascq: additional sense code qualifier
+ */
+struct sense_info {
+ u8 skey;
+ u8 asc;
+ u8 ascq;
+};
+
+#define MPT3SAS_PROCESS_TRIGGER_DIAG (0xFFFB)
+#define MPT3SAS_TURN_ON_PFA_LED (0xFFFC)
+#define MPT3SAS_PORT_ENABLE_COMPLETE (0xFFFD)
+#define MPT3SAS_ABRT_TASK_SET (0xFFFE)
+#define MPT3SAS_REMOVE_UNRESPONDING_DEVICES (0xFFFF)
+/**
+ * struct fw_event_work - firmware event struct
+ * @list: link list framework
+ * @work: work object (ioc->fault_reset_work_q)
+ * @cancel_pending_work: flag set during reset handling
+ * @ioc: per adapter object
+ * @device_handle: device handle
+ * @VF_ID: virtual function id
+ * @VP_ID: virtual port id
+ * @ignore: flag meaning this event has been marked to ignore
+ * @event: firmware event MPI2_EVENT_XXX defined in mpt2_ioc.h
+ * @event_data: reply event data payload follows
+ *
+ * This object stored on ioc->fw_event_list.
+ */
+struct fw_event_work {
+ struct list_head list;
+ struct work_struct work;
+ u8 cancel_pending_work;
+ struct delayed_work delayed_work;
+
+ struct MPT3SAS_ADAPTER *ioc;
+ u16 device_handle;
+ u8 VF_ID;
+ u8 VP_ID;
+ u8 ignore;
+ u16 event;
+ char event_data[0] __aligned(4);
+};
+
+/* raid transport support */
+static struct raid_template *mpt3sas_raid_template;
+
+/**
+ * struct _scsi_io_transfer - scsi io transfer
+ * @handle: sas device handle (assigned by firmware)
+ * @is_raid: flag set for hidden raid components
+ * @dir: DMA_TO_DEVICE, DMA_FROM_DEVICE,
+ * @data_length: data transfer length
+ * @data_dma: dma pointer to data
+ * @sense: sense data
+ * @lun: lun number
+ * @cdb_length: cdb length
+ * @cdb: cdb contents
+ * @timeout: timeout for this command
+ * @VF_ID: virtual function id
+ * @VP_ID: virtual port id
+ * @valid_reply: flag set for reply message
+ * @sense_length: sense length
+ * @ioc_status: ioc status
+ * @scsi_state: scsi state
+ * @scsi_status: scsi staus
+ * @log_info: log information
+ * @transfer_length: data length transfer when there is a reply message
+ *
+ * Used for sending internal scsi commands to devices within this module.
+ * Refer to _scsi_send_scsi_io().
+ */
+struct _scsi_io_transfer {
+ u16 handle;
+ u8 is_raid;
+ enum dma_data_direction dir;
+ u32 data_length;
+ dma_addr_t data_dma;
+ u8 sense[SCSI_SENSE_BUFFERSIZE];
+ u32 lun;
+ u8 cdb_length;
+ u8 cdb[32];
+ u8 timeout;
+ u8 VF_ID;
+ u8 VP_ID;
+ u8 valid_reply;
+ /* the following bits are only valid when 'valid_reply = 1' */
+ u32 sense_length;
+ u16 ioc_status;
+ u8 scsi_state;
+ u8 scsi_status;
+ u32 log_info;
+ u32 transfer_length;
+};
+
+/*
+ * The pci device ids are defined in mpi/mpi2_cnfg.h.
+ */
+static const struct pci_device_id scsih_pci_table[] = {
+ /* Fury ~ 3004 and 3008 */
+ { MPI2_MFGPAGE_VENDORID_LSI, MPI25_MFGPAGE_DEVID_SAS3004,
+ PCI_ANY_ID, PCI_ANY_ID },
+ { MPI2_MFGPAGE_VENDORID_LSI, MPI25_MFGPAGE_DEVID_SAS3008,
+ PCI_ANY_ID, PCI_ANY_ID },
+ /* Invader ~ 3108 */
+ { MPI2_MFGPAGE_VENDORID_LSI, MPI25_MFGPAGE_DEVID_SAS3108_1,
+ PCI_ANY_ID, PCI_ANY_ID },
+ { MPI2_MFGPAGE_VENDORID_LSI, MPI25_MFGPAGE_DEVID_SAS3108_2,
+ PCI_ANY_ID, PCI_ANY_ID },
+ { MPI2_MFGPAGE_VENDORID_LSI, MPI25_MFGPAGE_DEVID_SAS3108_5,
+ PCI_ANY_ID, PCI_ANY_ID },
+ { MPI2_MFGPAGE_VENDORID_LSI, MPI25_MFGPAGE_DEVID_SAS3108_6,
+ PCI_ANY_ID, PCI_ANY_ID },
+ {0} /* Terminating entry */
+};
+MODULE_DEVICE_TABLE(pci, scsih_pci_table);
+
+/**
+ * _scsih_set_debug_level - global setting of ioc->logging_level.
+ *
+ * Note: The logging levels are defined in mpt3sas_debug.h.
+ */
+static int
+_scsih_set_debug_level(const char *val, struct kernel_param *kp)
+{
+ int ret = param_set_int(val, kp);
+ struct MPT3SAS_ADAPTER *ioc;
+
+ if (ret)
+ return ret;
+
+ pr_info("setting logging_level(0x%08x)\n", logging_level);
+ list_for_each_entry(ioc, &mpt3sas_ioc_list, list)
+ ioc->logging_level = logging_level;
+ return 0;
+}
+module_param_call(logging_level, _scsih_set_debug_level, param_get_int,
+ &logging_level, 0644);
+
+/**
+ * _scsih_srch_boot_sas_address - search based on sas_address
+ * @sas_address: sas address
+ * @boot_device: boot device object from bios page 2
+ *
+ * Returns 1 when there's a match, 0 means no match.
+ */
+static inline int
+_scsih_srch_boot_sas_address(u64 sas_address,
+ Mpi2BootDeviceSasWwid_t *boot_device)
+{
+ return (sas_address == le64_to_cpu(boot_device->SASAddress)) ? 1 : 0;
+}
+
+/**
+ * _scsih_srch_boot_device_name - search based on device name
+ * @device_name: device name specified in INDENTIFY fram
+ * @boot_device: boot device object from bios page 2
+ *
+ * Returns 1 when there's a match, 0 means no match.
+ */
+static inline int
+_scsih_srch_boot_device_name(u64 device_name,
+ Mpi2BootDeviceDeviceName_t *boot_device)
+{
+ return (device_name == le64_to_cpu(boot_device->DeviceName)) ? 1 : 0;
+}
+
+/**
+ * _scsih_srch_boot_encl_slot - search based on enclosure_logical_id/slot
+ * @enclosure_logical_id: enclosure logical id
+ * @slot_number: slot number
+ * @boot_device: boot device object from bios page 2
+ *
+ * Returns 1 when there's a match, 0 means no match.
+ */
+static inline int
+_scsih_srch_boot_encl_slot(u64 enclosure_logical_id, u16 slot_number,
+ Mpi2BootDeviceEnclosureSlot_t *boot_device)
+{
+ return (enclosure_logical_id == le64_to_cpu(boot_device->
+ EnclosureLogicalID) && slot_number == le16_to_cpu(boot_device->
+ SlotNumber)) ? 1 : 0;
+}
+
+/**
+ * _scsih_is_boot_device - search for matching boot device.
+ * @sas_address: sas address
+ * @device_name: device name specified in INDENTIFY fram
+ * @enclosure_logical_id: enclosure logical id
+ * @slot_number: slot number
+ * @form: specifies boot device form
+ * @boot_device: boot device object from bios page 2
+ *
+ * Returns 1 when there's a match, 0 means no match.
+ */
+static int
+_scsih_is_boot_device(u64 sas_address, u64 device_name,
+ u64 enclosure_logical_id, u16 slot, u8 form,
+ Mpi2BiosPage2BootDevice_t *boot_device)
+{
+ int rc = 0;
+
+ switch (form) {
+ case MPI2_BIOSPAGE2_FORM_SAS_WWID:
+ if (!sas_address)
+ break;
+ rc = _scsih_srch_boot_sas_address(
+ sas_address, &boot_device->SasWwid);
+ break;
+ case MPI2_BIOSPAGE2_FORM_ENCLOSURE_SLOT:
+ if (!enclosure_logical_id)
+ break;
+ rc = _scsih_srch_boot_encl_slot(
+ enclosure_logical_id,
+ slot, &boot_device->EnclosureSlot);
+ break;
+ case MPI2_BIOSPAGE2_FORM_DEVICE_NAME:
+ if (!device_name)
+ break;
+ rc = _scsih_srch_boot_device_name(
+ device_name, &boot_device->DeviceName);
+ break;
+ case MPI2_BIOSPAGE2_FORM_NO_DEVICE_SPECIFIED:
+ break;
+ }
+
+ return rc;
+}
+
+/**
+ * _scsih_get_sas_address - set the sas_address for given device handle
+ * @handle: device handle
+ * @sas_address: sas address
+ *
+ * Returns 0 success, non-zero when failure
+ */
+static int
+_scsih_get_sas_address(struct MPT3SAS_ADAPTER *ioc, u16 handle,
+ u64 *sas_address)
+{
+ Mpi2SasDevicePage0_t sas_device_pg0;
+ Mpi2ConfigReply_t mpi_reply;
+ u32 ioc_status;
+
+ *sas_address = 0;
+
+ if (handle <= ioc->sas_hba.num_phys) {
+ *sas_address = ioc->sas_hba.sas_address;
+ return 0;
+ }
+
+ if ((mpt3sas_config_get_sas_device_pg0(ioc, &mpi_reply, &sas_device_pg0,
+ MPI2_SAS_DEVICE_PGAD_FORM_HANDLE, handle))) {
+ pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n", ioc->name,
+ __FILE__, __LINE__, __func__);
+ return -ENXIO;
+ }
+
+ ioc_status = le16_to_cpu(mpi_reply.IOCStatus) & MPI2_IOCSTATUS_MASK;
+ if (ioc_status == MPI2_IOCSTATUS_SUCCESS) {
+ *sas_address = le64_to_cpu(sas_device_pg0.SASAddress);
+ return 0;
+ }
+
+ /* we hit this becuase the given parent handle doesn't exist */
+ if (ioc_status == MPI2_IOCSTATUS_CONFIG_INVALID_PAGE)
+ return -ENXIO;
+
+ /* else error case */
+ pr_err(MPT3SAS_FMT
+ "handle(0x%04x), ioc_status(0x%04x), failure at %s:%d/%s()!\n",
+ ioc->name, handle, ioc_status,
+ __FILE__, __LINE__, __func__);
+ return -EIO;
+}
+
+/**
+ * _scsih_determine_boot_device - determine boot device.
+ * @ioc: per adapter object
+ * @device: either sas_device or raid_device object
+ * @is_raid: [flag] 1 = raid object, 0 = sas object
+ *
+ * Determines whether this device should be first reported device to
+ * to scsi-ml or sas transport, this purpose is for persistent boot device.
+ * There are primary, alternate, and current entries in bios page 2. The order
+ * priority is primary, alternate, then current. This routine saves
+ * the corresponding device object and is_raid flag in the ioc object.
+ * The saved data to be used later in _scsih_probe_boot_devices().
+ */
+static void
+_scsih_determine_boot_device(struct MPT3SAS_ADAPTER *ioc,
+ void *device, u8 is_raid)
+{
+ struct _sas_device *sas_device;
+ struct _raid_device *raid_device;
+ u64 sas_address;
+ u64 device_name;
+ u64 enclosure_logical_id;
+ u16 slot;
+
+ /* only process this function when driver loads */
+ if (!ioc->is_driver_loading)
+ return;
+
+ /* no Bios, return immediately */
+ if (!ioc->bios_pg3.BiosVersion)
+ return;
+
+ if (!is_raid) {
+ sas_device = device;
+ sas_address = sas_device->sas_address;
+ device_name = sas_device->device_name;
+ enclosure_logical_id = sas_device->enclosure_logical_id;
+ slot = sas_device->slot;
+ } else {
+ raid_device = device;
+ sas_address = raid_device->wwid;
+ device_name = 0;
+ enclosure_logical_id = 0;
+ slot = 0;
+ }
+
+ if (!ioc->req_boot_device.device) {
+ if (_scsih_is_boot_device(sas_address, device_name,
+ enclosure_logical_id, slot,
+ (ioc->bios_pg2.ReqBootDeviceForm &
+ MPI2_BIOSPAGE2_FORM_MASK),
+ &ioc->bios_pg2.RequestedBootDevice)) {
+ dinitprintk(ioc, pr_info(MPT3SAS_FMT
+ "%s: req_boot_device(0x%016llx)\n",
+ ioc->name, __func__,
+ (unsigned long long)sas_address));
+ ioc->req_boot_device.device = device;
+ ioc->req_boot_device.is_raid = is_raid;
+ }
+ }
+
+ if (!ioc->req_alt_boot_device.device) {
+ if (_scsih_is_boot_device(sas_address, device_name,
+ enclosure_logical_id, slot,
+ (ioc->bios_pg2.ReqAltBootDeviceForm &
+ MPI2_BIOSPAGE2_FORM_MASK),
+ &ioc->bios_pg2.RequestedAltBootDevice)) {
+ dinitprintk(ioc, pr_info(MPT3SAS_FMT
+ "%s: req_alt_boot_device(0x%016llx)\n",
+ ioc->name, __func__,
+ (unsigned long long)sas_address));
+ ioc->req_alt_boot_device.device = device;
+ ioc->req_alt_boot_device.is_raid = is_raid;
+ }
+ }
+
+ if (!ioc->current_boot_device.device) {
+ if (_scsih_is_boot_device(sas_address, device_name,
+ enclosure_logical_id, slot,
+ (ioc->bios_pg2.CurrentBootDeviceForm &
+ MPI2_BIOSPAGE2_FORM_MASK),
+ &ioc->bios_pg2.CurrentBootDevice)) {
+ dinitprintk(ioc, pr_info(MPT3SAS_FMT
+ "%s: current_boot_device(0x%016llx)\n",
+ ioc->name, __func__,
+ (unsigned long long)sas_address));
+ ioc->current_boot_device.device = device;
+ ioc->current_boot_device.is_raid = is_raid;
+ }
+ }
+}
+
+/**
+ * mpt3sas_scsih_sas_device_find_by_sas_address - sas device search
+ * @ioc: per adapter object
+ * @sas_address: sas address
+ * Context: Calling function should acquire ioc->sas_device_lock
+ *
+ * This searches for sas_device based on sas_address, then return sas_device
+ * object.
+ */
+struct _sas_device *
+mpt3sas_scsih_sas_device_find_by_sas_address(struct MPT3SAS_ADAPTER *ioc,
+ u64 sas_address)
+{
+ struct _sas_device *sas_device;
+
+ list_for_each_entry(sas_device, &ioc->sas_device_list, list)
+ if (sas_device->sas_address == sas_address)
+ return sas_device;
+
+ list_for_each_entry(sas_device, &ioc->sas_device_init_list, list)
+ if (sas_device->sas_address == sas_address)
+ return sas_device;
+
+ return NULL;
+}
+
+/**
+ * _scsih_sas_device_find_by_handle - sas device search
+ * @ioc: per adapter object
+ * @handle: sas device handle (assigned by firmware)
+ * Context: Calling function should acquire ioc->sas_device_lock
+ *
+ * This searches for sas_device based on sas_address, then return sas_device
+ * object.
+ */
+static struct _sas_device *
+_scsih_sas_device_find_by_handle(struct MPT3SAS_ADAPTER *ioc, u16 handle)
+{
+ struct _sas_device *sas_device;
+
+ list_for_each_entry(sas_device, &ioc->sas_device_list, list)
+ if (sas_device->handle == handle)
+ return sas_device;
+
+ list_for_each_entry(sas_device, &ioc->sas_device_init_list, list)
+ if (sas_device->handle == handle)
+ return sas_device;
+
+ return NULL;
+}
+
+/**
+ * _scsih_sas_device_remove - remove sas_device from list.
+ * @ioc: per adapter object
+ * @sas_device: the sas_device object
+ * Context: This function will acquire ioc->sas_device_lock.
+ *
+ * Removing object and freeing associated memory from the ioc->sas_device_list.
+ */
+static void
+_scsih_sas_device_remove(struct MPT3SAS_ADAPTER *ioc,
+ struct _sas_device *sas_device)
+{
+ unsigned long flags;
+
+ if (!sas_device)
+ return;
+
+ spin_lock_irqsave(&ioc->sas_device_lock, flags);
+ list_del(&sas_device->list);
+ kfree(sas_device);
+ spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
+}
+
+/**
+ * _scsih_device_remove_by_handle - removing device object by handle
+ * @ioc: per adapter object
+ * @handle: device handle
+ *
+ * Return nothing.
+ */
+static void
+_scsih_device_remove_by_handle(struct MPT3SAS_ADAPTER *ioc, u16 handle)
+{
+ struct _sas_device *sas_device;
+ unsigned long flags;
+
+ if (ioc->shost_recovery)
+ return;
+
+ spin_lock_irqsave(&ioc->sas_device_lock, flags);
+ sas_device = _scsih_sas_device_find_by_handle(ioc, handle);
+ if (sas_device)
+ list_del(&sas_device->list);
+ spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
+ if (sas_device)
+ _scsih_remove_device(ioc, sas_device);
+}
+
+/**
+ * mpt3sas_device_remove_by_sas_address - removing device object by sas address
+ * @ioc: per adapter object
+ * @sas_address: device sas_address
+ *
+ * Return nothing.
+ */
+void
+mpt3sas_device_remove_by_sas_address(struct MPT3SAS_ADAPTER *ioc,
+ u64 sas_address)
+{
+ struct _sas_device *sas_device;
+ unsigned long flags;
+
+ if (ioc->shost_recovery)
+ return;
+
+ spin_lock_irqsave(&ioc->sas_device_lock, flags);
+ sas_device = mpt3sas_scsih_sas_device_find_by_sas_address(ioc,
+ sas_address);
+ if (sas_device)
+ list_del(&sas_device->list);
+ spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
+ if (sas_device)
+ _scsih_remove_device(ioc, sas_device);
+}
+
+/**
+ * _scsih_sas_device_add - insert sas_device to the list.
+ * @ioc: per adapter object
+ * @sas_device: the sas_device object
+ * Context: This function will acquire ioc->sas_device_lock.
+ *
+ * Adding new object to the ioc->sas_device_list.
+ */
+static void
+_scsih_sas_device_add(struct MPT3SAS_ADAPTER *ioc,
+ struct _sas_device *sas_device)
+{
+ unsigned long flags;
+
+ dewtprintk(ioc, pr_info(MPT3SAS_FMT
+ "%s: handle(0x%04x), sas_addr(0x%016llx)\n",
+ ioc->name, __func__, sas_device->handle,
+ (unsigned long long)sas_device->sas_address));
+
+ spin_lock_irqsave(&ioc->sas_device_lock, flags);
+ list_add_tail(&sas_device->list, &ioc->sas_device_list);
+ spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
+
+ if (!mpt3sas_transport_port_add(ioc, sas_device->handle,
+ sas_device->sas_address_parent)) {
+ _scsih_sas_device_remove(ioc, sas_device);
+ } else if (!sas_device->starget) {
+ /*
+ * When asyn scanning is enabled, its not possible to remove
+ * devices while scanning is turned on due to an oops in
+ * scsi_sysfs_add_sdev()->add_device()->sysfs_addrm_start()
+ */
+ if (!ioc->is_driver_loading) {
+ mpt3sas_transport_port_remove(ioc,
+ sas_device->sas_address,
+ sas_device->sas_address_parent);
+ _scsih_sas_device_remove(ioc, sas_device);
+ }
+ }
+}
+
+/**
+ * _scsih_sas_device_init_add - insert sas_device to the list.
+ * @ioc: per adapter object
+ * @sas_device: the sas_device object
+ * Context: This function will acquire ioc->sas_device_lock.
+ *
+ * Adding new object at driver load time to the ioc->sas_device_init_list.
+ */
+static void
+_scsih_sas_device_init_add(struct MPT3SAS_ADAPTER *ioc,
+ struct _sas_device *sas_device)
+{
+ unsigned long flags;
+
+ dewtprintk(ioc, pr_info(MPT3SAS_FMT
+ "%s: handle(0x%04x), sas_addr(0x%016llx)\n", ioc->name,
+ __func__, sas_device->handle,
+ (unsigned long long)sas_device->sas_address));
+
+ spin_lock_irqsave(&ioc->sas_device_lock, flags);
+ list_add_tail(&sas_device->list, &ioc->sas_device_init_list);
+ _scsih_determine_boot_device(ioc, sas_device, 0);
+ spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
+}
+
+/**
+ * _scsih_raid_device_find_by_id - raid device search
+ * @ioc: per adapter object
+ * @id: sas device target id
+ * @channel: sas device channel
+ * Context: Calling function should acquire ioc->raid_device_lock
+ *
+ * This searches for raid_device based on target id, then return raid_device
+ * object.
+ */
+static struct _raid_device *
+_scsih_raid_device_find_by_id(struct MPT3SAS_ADAPTER *ioc, int id, int channel)
+{
+ struct _raid_device *raid_device, *r;
+
+ r = NULL;
+ list_for_each_entry(raid_device, &ioc->raid_device_list, list) {
+ if (raid_device->id == id && raid_device->channel == channel) {
+ r = raid_device;
+ goto out;
+ }
+ }
+
+ out:
+ return r;
+}
+
+/**
+ * _scsih_raid_device_find_by_handle - raid device search
+ * @ioc: per adapter object
+ * @handle: sas device handle (assigned by firmware)
+ * Context: Calling function should acquire ioc->raid_device_lock
+ *
+ * This searches for raid_device based on handle, then return raid_device
+ * object.
+ */
+static struct _raid_device *
+_scsih_raid_device_find_by_handle(struct MPT3SAS_ADAPTER *ioc, u16 handle)
+{
+ struct _raid_device *raid_device, *r;
+
+ r = NULL;
+ list_for_each_entry(raid_device, &ioc->raid_device_list, list) {
+ if (raid_device->handle != handle)
+ continue;
+ r = raid_device;
+ goto out;
+ }
+
+ out:
+ return r;
+}
+
+/**
+ * _scsih_raid_device_find_by_wwid - raid device search
+ * @ioc: per adapter object
+ * @handle: sas device handle (assigned by firmware)
+ * Context: Calling function should acquire ioc->raid_device_lock
+ *
+ * This searches for raid_device based on wwid, then return raid_device
+ * object.
+ */
+static struct _raid_device *
+_scsih_raid_device_find_by_wwid(struct MPT3SAS_ADAPTER *ioc, u64 wwid)
+{
+ struct _raid_device *raid_device, *r;
+
+ r = NULL;
+ list_for_each_entry(raid_device, &ioc->raid_device_list, list) {
+ if (raid_device->wwid != wwid)
+ continue;
+ r = raid_device;
+ goto out;
+ }
+
+ out:
+ return r;
+}
+
+/**
+ * _scsih_raid_device_add - add raid_device object
+ * @ioc: per adapter object
+ * @raid_device: raid_device object
+ *
+ * This is added to the raid_device_list link list.
+ */
+static void
+_scsih_raid_device_add(struct MPT3SAS_ADAPTER *ioc,
+ struct _raid_device *raid_device)
+{
+ unsigned long flags;
+
+ dewtprintk(ioc, pr_info(MPT3SAS_FMT
+ "%s: handle(0x%04x), wwid(0x%016llx)\n", ioc->name, __func__,
+ raid_device->handle, (unsigned long long)raid_device->wwid));
+
+ spin_lock_irqsave(&ioc->raid_device_lock, flags);
+ list_add_tail(&raid_device->list, &ioc->raid_device_list);
+ spin_unlock_irqrestore(&ioc->raid_device_lock, flags);
+}
+
+/**
+ * _scsih_raid_device_remove - delete raid_device object
+ * @ioc: per adapter object
+ * @raid_device: raid_device object
+ *
+ */
+static void
+_scsih_raid_device_remove(struct MPT3SAS_ADAPTER *ioc,
+ struct _raid_device *raid_device)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&ioc->raid_device_lock, flags);
+ list_del(&raid_device->list);
+ kfree(raid_device);
+ spin_unlock_irqrestore(&ioc->raid_device_lock, flags);
+}
+
+/**
+ * mpt3sas_scsih_expander_find_by_handle - expander device search
+ * @ioc: per adapter object
+ * @handle: expander handle (assigned by firmware)
+ * Context: Calling function should acquire ioc->sas_device_lock
+ *
+ * This searches for expander device based on handle, then returns the
+ * sas_node object.
+ */
+struct _sas_node *
+mpt3sas_scsih_expander_find_by_handle(struct MPT3SAS_ADAPTER *ioc, u16 handle)
+{
+ struct _sas_node *sas_expander, *r;
+
+ r = NULL;
+ list_for_each_entry(sas_expander, &ioc->sas_expander_list, list) {
+ if (sas_expander->handle != handle)
+ continue;
+ r = sas_expander;
+ goto out;
+ }
+ out:
+ return r;
+}
+
+/**
+ * mpt3sas_scsih_expander_find_by_sas_address - expander device search
+ * @ioc: per adapter object
+ * @sas_address: sas address
+ * Context: Calling function should acquire ioc->sas_node_lock.
+ *
+ * This searches for expander device based on sas_address, then returns the
+ * sas_node object.
+ */
+struct _sas_node *
+mpt3sas_scsih_expander_find_by_sas_address(struct MPT3SAS_ADAPTER *ioc,
+ u64 sas_address)
+{
+ struct _sas_node *sas_expander, *r;
+
+ r = NULL;
+ list_for_each_entry(sas_expander, &ioc->sas_expander_list, list) {
+ if (sas_expander->sas_address != sas_address)
+ continue;
+ r = sas_expander;
+ goto out;
+ }
+ out:
+ return r;
+}
+
+/**
+ * _scsih_expander_node_add - insert expander device to the list.
+ * @ioc: per adapter object
+ * @sas_expander: the sas_device object
+ * Context: This function will acquire ioc->sas_node_lock.
+ *
+ * Adding new object to the ioc->sas_expander_list.
+ *
+ * Return nothing.
+ */
+static void
+_scsih_expander_node_add(struct MPT3SAS_ADAPTER *ioc,
+ struct _sas_node *sas_expander)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&ioc->sas_node_lock, flags);
+ list_add_tail(&sas_expander->list, &ioc->sas_expander_list);
+ spin_unlock_irqrestore(&ioc->sas_node_lock, flags);
+}
+
+/**
+ * _scsih_is_end_device - determines if device is an end device
+ * @device_info: bitfield providing information about the device.
+ * Context: none
+ *
+ * Returns 1 if end device.
+ */
+static int
+_scsih_is_end_device(u32 device_info)
+{
+ if (device_info & MPI2_SAS_DEVICE_INFO_END_DEVICE &&
+ ((device_info & MPI2_SAS_DEVICE_INFO_SSP_TARGET) |
+ (device_info & MPI2_SAS_DEVICE_INFO_STP_TARGET) |
+ (device_info & MPI2_SAS_DEVICE_INFO_SATA_DEVICE)))
+ return 1;
+ else
+ return 0;
+}
+
+/**
+ * _scsih_scsi_lookup_get - returns scmd entry
+ * @ioc: per adapter object
+ * @smid: system request message index
+ *
+ * Returns the smid stored scmd pointer.
+ */
+static struct scsi_cmnd *
+_scsih_scsi_lookup_get(struct MPT3SAS_ADAPTER *ioc, u16 smid)
+{
+ return ioc->scsi_lookup[smid - 1].scmd;
+}
+
+/**
+ * _scsih_scsi_lookup_get_clear - returns scmd entry
+ * @ioc: per adapter object
+ * @smid: system request message index
+ *
+ * Returns the smid stored scmd pointer.
+ * Then will derefrence the stored scmd pointer.
+ */
+static inline struct scsi_cmnd *
+_scsih_scsi_lookup_get_clear(struct MPT3SAS_ADAPTER *ioc, u16 smid)
+{
+ unsigned long flags;
+ struct scsi_cmnd *scmd;
+
+ spin_lock_irqsave(&ioc->scsi_lookup_lock, flags);
+ scmd = ioc->scsi_lookup[smid - 1].scmd;
+ ioc->scsi_lookup[smid - 1].scmd = NULL;
+ spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags);
+
+ return scmd;
+}
+
+/**
+ * _scsih_scsi_lookup_find_by_scmd - scmd lookup
+ * @ioc: per adapter object
+ * @smid: system request message index
+ * @scmd: pointer to scsi command object
+ * Context: This function will acquire ioc->scsi_lookup_lock.
+ *
+ * This will search for a scmd pointer in the scsi_lookup array,
+ * returning the revelent smid. A returned value of zero means invalid.
+ */
+static u16
+_scsih_scsi_lookup_find_by_scmd(struct MPT3SAS_ADAPTER *ioc, struct scsi_cmnd
+ *scmd)
+{
+ u16 smid;
+ unsigned long flags;
+ int i;
+
+ spin_lock_irqsave(&ioc->scsi_lookup_lock, flags);
+ smid = 0;
+ for (i = 0; i < ioc->scsiio_depth; i++) {
+ if (ioc->scsi_lookup[i].scmd == scmd) {
+ smid = ioc->scsi_lookup[i].smid;
+ goto out;
+ }
+ }
+ out:
+ spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags);
+ return smid;
+}
+
+/**
+ * _scsih_scsi_lookup_find_by_target - search for matching channel:id
+ * @ioc: per adapter object
+ * @id: target id
+ * @channel: channel
+ * Context: This function will acquire ioc->scsi_lookup_lock.
+ *
+ * This will search for a matching channel:id in the scsi_lookup array,
+ * returning 1 if found.
+ */
+static u8
+_scsih_scsi_lookup_find_by_target(struct MPT3SAS_ADAPTER *ioc, int id,
+ int channel)
+{
+ u8 found;
+ unsigned long flags;
+ int i;
+
+ spin_lock_irqsave(&ioc->scsi_lookup_lock, flags);
+ found = 0;
+ for (i = 0 ; i < ioc->scsiio_depth; i++) {
+ if (ioc->scsi_lookup[i].scmd &&
+ (ioc->scsi_lookup[i].scmd->device->id == id &&
+ ioc->scsi_lookup[i].scmd->device->channel == channel)) {
+ found = 1;
+ goto out;
+ }
+ }
+ out:
+ spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags);
+ return found;
+}
+
+/**
+ * _scsih_scsi_lookup_find_by_lun - search for matching channel:id:lun
+ * @ioc: per adapter object
+ * @id: target id
+ * @lun: lun number
+ * @channel: channel
+ * Context: This function will acquire ioc->scsi_lookup_lock.
+ *
+ * This will search for a matching channel:id:lun in the scsi_lookup array,
+ * returning 1 if found.
+ */
+static u8
+_scsih_scsi_lookup_find_by_lun(struct MPT3SAS_ADAPTER *ioc, int id,
+ unsigned int lun, int channel)
+{
+ u8 found;
+ unsigned long flags;
+ int i;
+
+ spin_lock_irqsave(&ioc->scsi_lookup_lock, flags);
+ found = 0;
+ for (i = 0 ; i < ioc->scsiio_depth; i++) {
+ if (ioc->scsi_lookup[i].scmd &&
+ (ioc->scsi_lookup[i].scmd->device->id == id &&
+ ioc->scsi_lookup[i].scmd->device->channel == channel &&
+ ioc->scsi_lookup[i].scmd->device->lun == lun)) {
+ found = 1;
+ goto out;
+ }
+ }
+ out:
+ spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags);
+ return found;
+}
+
+/**
+ * _scsih_change_queue_depth - setting device queue depth
+ * @sdev: scsi device struct
+ * @qdepth: requested queue depth
+ *
+ * Returns queue depth.
+ */
+static int
+_scsih_change_queue_depth(struct scsi_device *sdev, int qdepth)
+{
+ struct Scsi_Host *shost = sdev->host;
+ int max_depth;
+ struct MPT3SAS_ADAPTER *ioc = shost_priv(shost);
+ struct MPT3SAS_DEVICE *sas_device_priv_data;
+ struct MPT3SAS_TARGET *sas_target_priv_data;
+ struct _sas_device *sas_device;
+ unsigned long flags;
+
+ max_depth = shost->can_queue;
+
+ /* limit max device queue for SATA to 32 */
+ sas_device_priv_data = sdev->hostdata;
+ if (!sas_device_priv_data)
+ goto not_sata;
+ sas_target_priv_data = sas_device_priv_data->sas_target;
+ if (!sas_target_priv_data)
+ goto not_sata;
+ if ((sas_target_priv_data->flags & MPT_TARGET_FLAGS_VOLUME))
+ goto not_sata;
+ spin_lock_irqsave(&ioc->sas_device_lock, flags);
+ sas_device = mpt3sas_scsih_sas_device_find_by_sas_address(ioc,
+ sas_device_priv_data->sas_target->sas_address);
+ if (sas_device && sas_device->device_info &
+ MPI2_SAS_DEVICE_INFO_SATA_DEVICE)
+ max_depth = MPT3SAS_SATA_QUEUE_DEPTH;
+ spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
+
+ not_sata:
+
+ if (!sdev->tagged_supported)
+ max_depth = 1;
+ if (qdepth > max_depth)
+ qdepth = max_depth;
+ return scsi_change_queue_depth(sdev, qdepth);
+}
+
+/**
+ * _scsih_target_alloc - target add routine
+ * @starget: scsi target struct
+ *
+ * Returns 0 if ok. Any other return is assumed to be an error and
+ * the device is ignored.
+ */
+static int
+_scsih_target_alloc(struct scsi_target *starget)
+{
+ struct Scsi_Host *shost = dev_to_shost(&starget->dev);
+ struct MPT3SAS_ADAPTER *ioc = shost_priv(shost);
+ struct MPT3SAS_TARGET *sas_target_priv_data;
+ struct _sas_device *sas_device;
+ struct _raid_device *raid_device;
+ unsigned long flags;
+ struct sas_rphy *rphy;
+
+ sas_target_priv_data = kzalloc(sizeof(*sas_target_priv_data),
+ GFP_KERNEL);
+ if (!sas_target_priv_data)
+ return -ENOMEM;
+
+ starget->hostdata = sas_target_priv_data;
+ sas_target_priv_data->starget = starget;
+ sas_target_priv_data->handle = MPT3SAS_INVALID_DEVICE_HANDLE;
+
+ /* RAID volumes */
+ if (starget->channel == RAID_CHANNEL) {
+ spin_lock_irqsave(&ioc->raid_device_lock, flags);
+ raid_device = _scsih_raid_device_find_by_id(ioc, starget->id,
+ starget->channel);
+ if (raid_device) {
+ sas_target_priv_data->handle = raid_device->handle;
+ sas_target_priv_data->sas_address = raid_device->wwid;
+ sas_target_priv_data->flags |= MPT_TARGET_FLAGS_VOLUME;
+ raid_device->starget = starget;
+ }
+ spin_unlock_irqrestore(&ioc->raid_device_lock, flags);
+ return 0;
+ }
+
+ /* sas/sata devices */
+ spin_lock_irqsave(&ioc->sas_device_lock, flags);
+ rphy = dev_to_rphy(starget->dev.parent);
+ sas_device = mpt3sas_scsih_sas_device_find_by_sas_address(ioc,
+ rphy->identify.sas_address);
+
+ if (sas_device) {
+ sas_target_priv_data->handle = sas_device->handle;
+ sas_target_priv_data->sas_address = sas_device->sas_address;
+ sas_device->starget = starget;
+ sas_device->id = starget->id;
+ sas_device->channel = starget->channel;
+ if (test_bit(sas_device->handle, ioc->pd_handles))
+ sas_target_priv_data->flags |=
+ MPT_TARGET_FLAGS_RAID_COMPONENT;
+ if (sas_device->fast_path)
+ sas_target_priv_data->flags |= MPT_TARGET_FASTPATH_IO;
+ }
+ spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
+
+ return 0;
+}
+
+/**
+ * _scsih_target_destroy - target destroy routine
+ * @starget: scsi target struct
+ *
+ * Returns nothing.
+ */
+static void
+_scsih_target_destroy(struct scsi_target *starget)
+{
+ struct Scsi_Host *shost = dev_to_shost(&starget->dev);
+ struct MPT3SAS_ADAPTER *ioc = shost_priv(shost);
+ struct MPT3SAS_TARGET *sas_target_priv_data;
+ struct _sas_device *sas_device;
+ struct _raid_device *raid_device;
+ unsigned long flags;
+ struct sas_rphy *rphy;
+
+ sas_target_priv_data = starget->hostdata;
+ if (!sas_target_priv_data)
+ return;
+
+ if (starget->channel == RAID_CHANNEL) {
+ spin_lock_irqsave(&ioc->raid_device_lock, flags);
+ raid_device = _scsih_raid_device_find_by_id(ioc, starget->id,
+ starget->channel);
+ if (raid_device) {
+ raid_device->starget = NULL;
+ raid_device->sdev = NULL;
+ }
+ spin_unlock_irqrestore(&ioc->raid_device_lock, flags);
+ goto out;
+ }
+
+ spin_lock_irqsave(&ioc->sas_device_lock, flags);
+ rphy = dev_to_rphy(starget->dev.parent);
+ sas_device = mpt3sas_scsih_sas_device_find_by_sas_address(ioc,
+ rphy->identify.sas_address);
+ if (sas_device && (sas_device->starget == starget) &&
+ (sas_device->id == starget->id) &&
+ (sas_device->channel == starget->channel))
+ sas_device->starget = NULL;
+
+ spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
+
+ out:
+ kfree(sas_target_priv_data);
+ starget->hostdata = NULL;
+}
+
+/**
+ * _scsih_slave_alloc - device add routine
+ * @sdev: scsi device struct
+ *
+ * Returns 0 if ok. Any other return is assumed to be an error and
+ * the device is ignored.
+ */
+static int
+_scsih_slave_alloc(struct scsi_device *sdev)
+{
+ struct Scsi_Host *shost;
+ struct MPT3SAS_ADAPTER *ioc;
+ struct MPT3SAS_TARGET *sas_target_priv_data;
+ struct MPT3SAS_DEVICE *sas_device_priv_data;
+ struct scsi_target *starget;
+ struct _raid_device *raid_device;
+ struct _sas_device *sas_device;
+ unsigned long flags;
+
+ sas_device_priv_data = kzalloc(sizeof(*sas_device_priv_data),
+ GFP_KERNEL);
+ if (!sas_device_priv_data)
+ return -ENOMEM;
+
+ sas_device_priv_data->lun = sdev->lun;
+ sas_device_priv_data->flags = MPT_DEVICE_FLAGS_INIT;
+
+ starget = scsi_target(sdev);
+ sas_target_priv_data = starget->hostdata;
+ sas_target_priv_data->num_luns++;
+ sas_device_priv_data->sas_target = sas_target_priv_data;
+ sdev->hostdata = sas_device_priv_data;
+ if ((sas_target_priv_data->flags & MPT_TARGET_FLAGS_RAID_COMPONENT))
+ sdev->no_uld_attach = 1;
+
+ shost = dev_to_shost(&starget->dev);
+ ioc = shost_priv(shost);
+ if (starget->channel == RAID_CHANNEL) {
+ spin_lock_irqsave(&ioc->raid_device_lock, flags);
+ raid_device = _scsih_raid_device_find_by_id(ioc,
+ starget->id, starget->channel);
+ if (raid_device)
+ raid_device->sdev = sdev; /* raid is single lun */
+ spin_unlock_irqrestore(&ioc->raid_device_lock, flags);
+ }
+
+ if (!(sas_target_priv_data->flags & MPT_TARGET_FLAGS_VOLUME)) {
+ spin_lock_irqsave(&ioc->sas_device_lock, flags);
+ sas_device = mpt3sas_scsih_sas_device_find_by_sas_address(ioc,
+ sas_target_priv_data->sas_address);
+ if (sas_device && (sas_device->starget == NULL)) {
+ sdev_printk(KERN_INFO, sdev,
+ "%s : sas_device->starget set to starget @ %d\n",
+ __func__, __LINE__);
+ sas_device->starget = starget;
+ }
+ spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
+ }
+
+ return 0;
+}
+
+/**
+ * _scsih_slave_destroy - device destroy routine
+ * @sdev: scsi device struct
+ *
+ * Returns nothing.
+ */
+static void
+_scsih_slave_destroy(struct scsi_device *sdev)
+{
+ struct MPT3SAS_TARGET *sas_target_priv_data;
+ struct scsi_target *starget;
+ struct Scsi_Host *shost;
+ struct MPT3SAS_ADAPTER *ioc;
+ struct _sas_device *sas_device;
+ unsigned long flags;
+
+ if (!sdev->hostdata)
+ return;
+
+ starget = scsi_target(sdev);
+ sas_target_priv_data = starget->hostdata;
+ sas_target_priv_data->num_luns--;
+
+ shost = dev_to_shost(&starget->dev);
+ ioc = shost_priv(shost);
+
+ if (!(sas_target_priv_data->flags & MPT_TARGET_FLAGS_VOLUME)) {
+ spin_lock_irqsave(&ioc->sas_device_lock, flags);
+ sas_device = mpt3sas_scsih_sas_device_find_by_sas_address(ioc,
+ sas_target_priv_data->sas_address);
+ if (sas_device && !sas_target_priv_data->num_luns)
+ sas_device->starget = NULL;
+ spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
+ }
+
+ kfree(sdev->hostdata);
+ sdev->hostdata = NULL;
+}
+
+/**
+ * _scsih_display_sata_capabilities - sata capabilities
+ * @ioc: per adapter object
+ * @handle: device handle
+ * @sdev: scsi device struct
+ */
+static void
+_scsih_display_sata_capabilities(struct MPT3SAS_ADAPTER *ioc,
+ u16 handle, struct scsi_device *sdev)
+{
+ Mpi2ConfigReply_t mpi_reply;
+ Mpi2SasDevicePage0_t sas_device_pg0;
+ u32 ioc_status;
+ u16 flags;
+ u32 device_info;
+
+ if ((mpt3sas_config_get_sas_device_pg0(ioc, &mpi_reply, &sas_device_pg0,
+ MPI2_SAS_DEVICE_PGAD_FORM_HANDLE, handle))) {
+ pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n",
+ ioc->name, __FILE__, __LINE__, __func__);
+ return;
+ }
+
+ ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
+ MPI2_IOCSTATUS_MASK;
+ if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
+ pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n",
+ ioc->name, __FILE__, __LINE__, __func__);
+ return;
+ }
+
+ flags = le16_to_cpu(sas_device_pg0.Flags);
+ device_info = le32_to_cpu(sas_device_pg0.DeviceInfo);
+
+ sdev_printk(KERN_INFO, sdev,
+ "atapi(%s), ncq(%s), asyn_notify(%s), smart(%s), fua(%s), "
+ "sw_preserve(%s)\n",
+ (device_info & MPI2_SAS_DEVICE_INFO_ATAPI_DEVICE) ? "y" : "n",
+ (flags & MPI2_SAS_DEVICE0_FLAGS_SATA_NCQ_SUPPORTED) ? "y" : "n",
+ (flags & MPI2_SAS_DEVICE0_FLAGS_SATA_ASYNCHRONOUS_NOTIFY) ? "y" :
+ "n",
+ (flags & MPI2_SAS_DEVICE0_FLAGS_SATA_SMART_SUPPORTED) ? "y" : "n",
+ (flags & MPI2_SAS_DEVICE0_FLAGS_SATA_FUA_SUPPORTED) ? "y" : "n",
+ (flags & MPI2_SAS_DEVICE0_FLAGS_SATA_SW_PRESERVE) ? "y" : "n");
+}
+
+/*
+ * raid transport support -
+ * Enabled for SLES11 and newer, in older kernels the driver will panic when
+ * unloading the driver followed by a load - I beleive that the subroutine
+ * raid_class_release() is not cleaning up properly.
+ */
+
+/**
+ * _scsih_is_raid - return boolean indicating device is raid volume
+ * @dev the device struct object
+ */
+static int
+_scsih_is_raid(struct device *dev)
+{
+ struct scsi_device *sdev = to_scsi_device(dev);
+
+ return (sdev->channel == RAID_CHANNEL) ? 1 : 0;
+}
+
+/**
+ * _scsih_get_resync - get raid volume resync percent complete
+ * @dev the device struct object
+ */
+static void
+_scsih_get_resync(struct device *dev)
+{
+ struct scsi_device *sdev = to_scsi_device(dev);
+ struct MPT3SAS_ADAPTER *ioc = shost_priv(sdev->host);
+ static struct _raid_device *raid_device;
+ unsigned long flags;
+ Mpi2RaidVolPage0_t vol_pg0;
+ Mpi2ConfigReply_t mpi_reply;
+ u32 volume_status_flags;
+ u8 percent_complete;
+ u16 handle;
+
+ percent_complete = 0;
+ handle = 0;
+ spin_lock_irqsave(&ioc->raid_device_lock, flags);
+ raid_device = _scsih_raid_device_find_by_id(ioc, sdev->id,
+ sdev->channel);
+ if (raid_device) {
+ handle = raid_device->handle;
+ percent_complete = raid_device->percent_complete;
+ }
+ spin_unlock_irqrestore(&ioc->raid_device_lock, flags);
+
+ if (!handle)
+ goto out;
+
+ if (mpt3sas_config_get_raid_volume_pg0(ioc, &mpi_reply, &vol_pg0,
+ MPI2_RAID_VOLUME_PGAD_FORM_HANDLE, handle,
+ sizeof(Mpi2RaidVolPage0_t))) {
+ pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n",
+ ioc->name, __FILE__, __LINE__, __func__);
+ percent_complete = 0;
+ goto out;
+ }
+
+ volume_status_flags = le32_to_cpu(vol_pg0.VolumeStatusFlags);
+ if (!(volume_status_flags &
+ MPI2_RAIDVOL0_STATUS_FLAG_RESYNC_IN_PROGRESS))
+ percent_complete = 0;
+
+ out:
+ raid_set_resync(mpt3sas_raid_template, dev, percent_complete);
+}
+
+/**
+ * _scsih_get_state - get raid volume level
+ * @dev the device struct object
+ */
+static void
+_scsih_get_state(struct device *dev)
+{
+ struct scsi_device *sdev = to_scsi_device(dev);
+ struct MPT3SAS_ADAPTER *ioc = shost_priv(sdev->host);
+ static struct _raid_device *raid_device;
+ unsigned long flags;
+ Mpi2RaidVolPage0_t vol_pg0;
+ Mpi2ConfigReply_t mpi_reply;
+ u32 volstate;
+ enum raid_state state = RAID_STATE_UNKNOWN;
+ u16 handle = 0;
+
+ spin_lock_irqsave(&ioc->raid_device_lock, flags);
+ raid_device = _scsih_raid_device_find_by_id(ioc, sdev->id,
+ sdev->channel);
+ if (raid_device)
+ handle = raid_device->handle;
+ spin_unlock_irqrestore(&ioc->raid_device_lock, flags);
+
+ if (!raid_device)
+ goto out;
+
+ if (mpt3sas_config_get_raid_volume_pg0(ioc, &mpi_reply, &vol_pg0,
+ MPI2_RAID_VOLUME_PGAD_FORM_HANDLE, handle,
+ sizeof(Mpi2RaidVolPage0_t))) {
+ pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n",
+ ioc->name, __FILE__, __LINE__, __func__);
+ goto out;
+ }
+
+ volstate = le32_to_cpu(vol_pg0.VolumeStatusFlags);
+ if (volstate & MPI2_RAIDVOL0_STATUS_FLAG_RESYNC_IN_PROGRESS) {
+ state = RAID_STATE_RESYNCING;
+ goto out;
+ }
+
+ switch (vol_pg0.VolumeState) {
+ case MPI2_RAID_VOL_STATE_OPTIMAL:
+ case MPI2_RAID_VOL_STATE_ONLINE:
+ state = RAID_STATE_ACTIVE;
+ break;
+ case MPI2_RAID_VOL_STATE_DEGRADED:
+ state = RAID_STATE_DEGRADED;
+ break;
+ case MPI2_RAID_VOL_STATE_FAILED:
+ case MPI2_RAID_VOL_STATE_MISSING:
+ state = RAID_STATE_OFFLINE;
+ break;
+ }
+ out:
+ raid_set_state(mpt3sas_raid_template, dev, state);
+}
+
+/**
+ * _scsih_set_level - set raid level
+ * @sdev: scsi device struct
+ * @volume_type: volume type
+ */
+static void
+_scsih_set_level(struct scsi_device *sdev, u8 volume_type)
+{
+ enum raid_level level = RAID_LEVEL_UNKNOWN;
+
+ switch (volume_type) {
+ case MPI2_RAID_VOL_TYPE_RAID0:
+ level = RAID_LEVEL_0;
+ break;
+ case MPI2_RAID_VOL_TYPE_RAID10:
+ level = RAID_LEVEL_10;
+ break;
+ case MPI2_RAID_VOL_TYPE_RAID1E:
+ level = RAID_LEVEL_1E;
+ break;
+ case MPI2_RAID_VOL_TYPE_RAID1:
+ level = RAID_LEVEL_1;
+ break;
+ }
+
+ raid_set_level(mpt3sas_raid_template, &sdev->sdev_gendev, level);
+}
+
+
+/**
+ * _scsih_get_volume_capabilities - volume capabilities
+ * @ioc: per adapter object
+ * @sas_device: the raid_device object
+ *
+ * Returns 0 for success, else 1
+ */
+static int
+_scsih_get_volume_capabilities(struct MPT3SAS_ADAPTER *ioc,
+ struct _raid_device *raid_device)
+{
+ Mpi2RaidVolPage0_t *vol_pg0;
+ Mpi2RaidPhysDiskPage0_t pd_pg0;
+ Mpi2SasDevicePage0_t sas_device_pg0;
+ Mpi2ConfigReply_t mpi_reply;
+ u16 sz;
+ u8 num_pds;
+
+ if ((mpt3sas_config_get_number_pds(ioc, raid_device->handle,
+ &num_pds)) || !num_pds) {
+ dfailprintk(ioc, pr_warn(MPT3SAS_FMT
+ "failure at %s:%d/%s()!\n", ioc->name, __FILE__, __LINE__,
+ __func__));
+ return 1;
+ }
+
+ raid_device->num_pds = num_pds;
+ sz = offsetof(Mpi2RaidVolPage0_t, PhysDisk) + (num_pds *
+ sizeof(Mpi2RaidVol0PhysDisk_t));
+ vol_pg0 = kzalloc(sz, GFP_KERNEL);
+ if (!vol_pg0) {
+ dfailprintk(ioc, pr_warn(MPT3SAS_FMT
+ "failure at %s:%d/%s()!\n", ioc->name, __FILE__, __LINE__,
+ __func__));
+ return 1;
+ }
+
+ if ((mpt3sas_config_get_raid_volume_pg0(ioc, &mpi_reply, vol_pg0,
+ MPI2_RAID_VOLUME_PGAD_FORM_HANDLE, raid_device->handle, sz))) {
+ dfailprintk(ioc, pr_warn(MPT3SAS_FMT
+ "failure at %s:%d/%s()!\n", ioc->name, __FILE__, __LINE__,
+ __func__));
+ kfree(vol_pg0);
+ return 1;
+ }
+
+ raid_device->volume_type = vol_pg0->VolumeType;
+
+ /* figure out what the underlying devices are by
+ * obtaining the device_info bits for the 1st device
+ */
+ if (!(mpt3sas_config_get_phys_disk_pg0(ioc, &mpi_reply,
+ &pd_pg0, MPI2_PHYSDISK_PGAD_FORM_PHYSDISKNUM,
+ vol_pg0->PhysDisk[0].PhysDiskNum))) {
+ if (!(mpt3sas_config_get_sas_device_pg0(ioc, &mpi_reply,
+ &sas_device_pg0, MPI2_SAS_DEVICE_PGAD_FORM_HANDLE,
+ le16_to_cpu(pd_pg0.DevHandle)))) {
+ raid_device->device_info =
+ le32_to_cpu(sas_device_pg0.DeviceInfo);
+ }
+ }
+
+ kfree(vol_pg0);
+ return 0;
+}
+
+
+
+/**
+ * _scsih_enable_tlr - setting TLR flags
+ * @ioc: per adapter object
+ * @sdev: scsi device struct
+ *
+ * Enabling Transaction Layer Retries for tape devices when
+ * vpd page 0x90 is present
+ *
+ */
+static void
+_scsih_enable_tlr(struct MPT3SAS_ADAPTER *ioc, struct scsi_device *sdev)
+{
+
+ /* only for TAPE */
+ if (sdev->type != TYPE_TAPE)
+ return;
+
+ if (!(ioc->facts.IOCCapabilities & MPI2_IOCFACTS_CAPABILITY_TLR))
+ return;
+
+ sas_enable_tlr(sdev);
+ sdev_printk(KERN_INFO, sdev, "TLR %s\n",
+ sas_is_tlr_enabled(sdev) ? "Enabled" : "Disabled");
+ return;
+
+}
+
+/**
+ * _scsih_slave_configure - device configure routine.
+ * @sdev: scsi device struct
+ *
+ * Returns 0 if ok. Any other return is assumed to be an error and
+ * the device is ignored.
+ */
+static int
+_scsih_slave_configure(struct scsi_device *sdev)
+{
+ struct Scsi_Host *shost = sdev->host;
+ struct MPT3SAS_ADAPTER *ioc = shost_priv(shost);
+ struct MPT3SAS_DEVICE *sas_device_priv_data;
+ struct MPT3SAS_TARGET *sas_target_priv_data;
+ struct _sas_device *sas_device;
+ struct _raid_device *raid_device;
+ unsigned long flags;
+ int qdepth;
+ u8 ssp_target = 0;
+ char *ds = "";
+ char *r_level = "";
+ u16 handle, volume_handle = 0;
+ u64 volume_wwid = 0;
+
+ qdepth = 1;
+ sas_device_priv_data = sdev->hostdata;
+ sas_device_priv_data->configured_lun = 1;
+ sas_device_priv_data->flags &= ~MPT_DEVICE_FLAGS_INIT;
+ sas_target_priv_data = sas_device_priv_data->sas_target;
+ handle = sas_target_priv_data->handle;
+
+ /* raid volume handling */
+ if (sas_target_priv_data->flags & MPT_TARGET_FLAGS_VOLUME) {
+
+ spin_lock_irqsave(&ioc->raid_device_lock, flags);
+ raid_device = _scsih_raid_device_find_by_handle(ioc, handle);
+ spin_unlock_irqrestore(&ioc->raid_device_lock, flags);
+ if (!raid_device) {
+ dfailprintk(ioc, pr_warn(MPT3SAS_FMT
+ "failure at %s:%d/%s()!\n", ioc->name, __FILE__,
+ __LINE__, __func__));
+ return 1;
+ }
+
+ if (_scsih_get_volume_capabilities(ioc, raid_device)) {
+ dfailprintk(ioc, pr_warn(MPT3SAS_FMT
+ "failure at %s:%d/%s()!\n", ioc->name, __FILE__,
+ __LINE__, __func__));
+ return 1;
+ }
+
+
+ /* RAID Queue Depth Support
+ * IS volume = underlying qdepth of drive type, either
+ * MPT3SAS_SAS_QUEUE_DEPTH or MPT3SAS_SATA_QUEUE_DEPTH
+ * IM/IME/R10 = 128 (MPT3SAS_RAID_QUEUE_DEPTH)
+ */
+ if (raid_device->device_info &
+ MPI2_SAS_DEVICE_INFO_SSP_TARGET) {
+ qdepth = MPT3SAS_SAS_QUEUE_DEPTH;
+ ds = "SSP";
+ } else {
+ qdepth = MPT3SAS_SATA_QUEUE_DEPTH;
+ if (raid_device->device_info &
+ MPI2_SAS_DEVICE_INFO_SATA_DEVICE)
+ ds = "SATA";
+ else
+ ds = "STP";
+ }
+
+ switch (raid_device->volume_type) {
+ case MPI2_RAID_VOL_TYPE_RAID0:
+ r_level = "RAID0";
+ break;
+ case MPI2_RAID_VOL_TYPE_RAID1E:
+ qdepth = MPT3SAS_RAID_QUEUE_DEPTH;
+ if (ioc->manu_pg10.OEMIdentifier &&
+ (le32_to_cpu(ioc->manu_pg10.GenericFlags0) &
+ MFG10_GF0_R10_DISPLAY) &&
+ !(raid_device->num_pds % 2))
+ r_level = "RAID10";
+ else
+ r_level = "RAID1E";
+ break;
+ case MPI2_RAID_VOL_TYPE_RAID1:
+ qdepth = MPT3SAS_RAID_QUEUE_DEPTH;
+ r_level = "RAID1";
+ break;
+ case MPI2_RAID_VOL_TYPE_RAID10:
+ qdepth = MPT3SAS_RAID_QUEUE_DEPTH;
+ r_level = "RAID10";
+ break;
+ case MPI2_RAID_VOL_TYPE_UNKNOWN:
+ default:
+ qdepth = MPT3SAS_RAID_QUEUE_DEPTH;
+ r_level = "RAIDX";
+ break;
+ }
+
+ sdev_printk(KERN_INFO, sdev,
+ "%s: handle(0x%04x), wwid(0x%016llx), pd_count(%d), type(%s)\n",
+ r_level, raid_device->handle,
+ (unsigned long long)raid_device->wwid,
+ raid_device->num_pds, ds);
+
+
+ _scsih_change_queue_depth(sdev, qdepth);
+
+/* raid transport support */
+ _scsih_set_level(sdev, raid_device->volume_type);
+ return 0;
+ }
+
+ /* non-raid handling */
+ if (sas_target_priv_data->flags & MPT_TARGET_FLAGS_RAID_COMPONENT) {
+ if (mpt3sas_config_get_volume_handle(ioc, handle,
+ &volume_handle)) {
+ dfailprintk(ioc, pr_warn(MPT3SAS_FMT
+ "failure at %s:%d/%s()!\n", ioc->name,
+ __FILE__, __LINE__, __func__));
+ return 1;
+ }
+ if (volume_handle && mpt3sas_config_get_volume_wwid(ioc,
+ volume_handle, &volume_wwid)) {
+ dfailprintk(ioc, pr_warn(MPT3SAS_FMT
+ "failure at %s:%d/%s()!\n", ioc->name,
+ __FILE__, __LINE__, __func__));
+ return 1;
+ }
+ }
+
+ spin_lock_irqsave(&ioc->sas_device_lock, flags);
+ sas_device = mpt3sas_scsih_sas_device_find_by_sas_address(ioc,
+ sas_device_priv_data->sas_target->sas_address);
+ if (!sas_device) {
+ spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
+ dfailprintk(ioc, pr_warn(MPT3SAS_FMT
+ "failure at %s:%d/%s()!\n", ioc->name, __FILE__, __LINE__,
+ __func__));
+ return 1;
+ }
+
+ sas_device->volume_handle = volume_handle;
+ sas_device->volume_wwid = volume_wwid;
+ if (sas_device->device_info & MPI2_SAS_DEVICE_INFO_SSP_TARGET) {
+ qdepth = MPT3SAS_SAS_QUEUE_DEPTH;
+ ssp_target = 1;
+ ds = "SSP";
+ } else {
+ qdepth = MPT3SAS_SATA_QUEUE_DEPTH;
+ if (sas_device->device_info & MPI2_SAS_DEVICE_INFO_STP_TARGET)
+ ds = "STP";
+ else if (sas_device->device_info &
+ MPI2_SAS_DEVICE_INFO_SATA_DEVICE)
+ ds = "SATA";
+ }
+
+ sdev_printk(KERN_INFO, sdev, "%s: handle(0x%04x), " \
+ "sas_addr(0x%016llx), phy(%d), device_name(0x%016llx)\n",
+ ds, handle, (unsigned long long)sas_device->sas_address,
+ sas_device->phy, (unsigned long long)sas_device->device_name);
+ sdev_printk(KERN_INFO, sdev,
+ "%s: enclosure_logical_id(0x%016llx), slot(%d)\n",
+ ds, (unsigned long long)
+ sas_device->enclosure_logical_id, sas_device->slot);
+
+ spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
+
+ if (!ssp_target)
+ _scsih_display_sata_capabilities(ioc, handle, sdev);
+
+
+ _scsih_change_queue_depth(sdev, qdepth);
+
+ if (ssp_target) {
+ sas_read_port_mode_page(sdev);
+ _scsih_enable_tlr(ioc, sdev);
+ }
+
+ return 0;
+}
+
+/**
+ * _scsih_bios_param - fetch head, sector, cylinder info for a disk
+ * @sdev: scsi device struct
+ * @bdev: pointer to block device context
+ * @capacity: device size (in 512 byte sectors)
+ * @params: three element array to place output:
+ * params[0] number of heads (max 255)
+ * params[1] number of sectors (max 63)
+ * params[2] number of cylinders
+ *
+ * Return nothing.
+ */
+static int
+_scsih_bios_param(struct scsi_device *sdev, struct block_device *bdev,
+ sector_t capacity, int params[])
+{
+ int heads;
+ int sectors;
+ sector_t cylinders;
+ ulong dummy;
+
+ heads = 64;
+ sectors = 32;
+
+ dummy = heads * sectors;
+ cylinders = capacity;
+ sector_div(cylinders, dummy);
+
+ /*
+ * Handle extended translation size for logical drives
+ * > 1Gb
+ */
+ if ((ulong)capacity >= 0x200000) {
+ heads = 255;
+ sectors = 63;
+ dummy = heads * sectors;
+ cylinders = capacity;
+ sector_div(cylinders, dummy);
+ }
+
+ /* return result */
+ params[0] = heads;
+ params[1] = sectors;
+ params[2] = cylinders;
+
+ return 0;
+}
+
+/**
+ * _scsih_response_code - translation of device response code
+ * @ioc: per adapter object
+ * @response_code: response code returned by the device
+ *
+ * Return nothing.
+ */
+static void
+_scsih_response_code(struct MPT3SAS_ADAPTER *ioc, u8 response_code)
+{
+ char *desc;
+
+ switch (response_code) {
+ case MPI2_SCSITASKMGMT_RSP_TM_COMPLETE:
+ desc = "task management request completed";
+ break;
+ case MPI2_SCSITASKMGMT_RSP_INVALID_FRAME:
+ desc = "invalid frame";
+ break;
+ case MPI2_SCSITASKMGMT_RSP_TM_NOT_SUPPORTED:
+ desc = "task management request not supported";
+ break;
+ case MPI2_SCSITASKMGMT_RSP_TM_FAILED:
+ desc = "task management request failed";
+ break;
+ case MPI2_SCSITASKMGMT_RSP_TM_SUCCEEDED:
+ desc = "task management request succeeded";
+ break;
+ case MPI2_SCSITASKMGMT_RSP_TM_INVALID_LUN:
+ desc = "invalid lun";
+ break;
+ case 0xA:
+ desc = "overlapped tag attempted";
+ break;
+ case MPI2_SCSITASKMGMT_RSP_IO_QUEUED_ON_IOC:
+ desc = "task queued, however not sent to target";
+ break;
+ default:
+ desc = "unknown";
+ break;
+ }
+ pr_warn(MPT3SAS_FMT "response_code(0x%01x): %s\n",
+ ioc->name, response_code, desc);
+}
+
+/**
+ * _scsih_tm_done - tm completion routine
+ * @ioc: per adapter object
+ * @smid: system request message index
+ * @msix_index: MSIX table index supplied by the OS
+ * @reply: reply message frame(lower 32bit addr)
+ * Context: none.
+ *
+ * The callback handler when using scsih_issue_tm.
+ *
+ * Return 1 meaning mf should be freed from _base_interrupt
+ * 0 means the mf is freed from this function.
+ */
+static u8
+_scsih_tm_done(struct MPT3SAS_ADAPTER *ioc, u16 smid, u8 msix_index, u32 reply)
+{
+ MPI2DefaultReply_t *mpi_reply;
+
+ if (ioc->tm_cmds.status == MPT3_CMD_NOT_USED)
+ return 1;
+ if (ioc->tm_cmds.smid != smid)
+ return 1;
+ mpt3sas_base_flush_reply_queues(ioc);
+ ioc->tm_cmds.status |= MPT3_CMD_COMPLETE;
+ mpi_reply = mpt3sas_base_get_reply_virt_addr(ioc, reply);
+ if (mpi_reply) {
+ memcpy(ioc->tm_cmds.reply, mpi_reply, mpi_reply->MsgLength*4);
+ ioc->tm_cmds.status |= MPT3_CMD_REPLY_VALID;
+ }
+ ioc->tm_cmds.status &= ~MPT3_CMD_PENDING;
+ complete(&ioc->tm_cmds.done);
+ return 1;
+}
+
+/**
+ * mpt3sas_scsih_set_tm_flag - set per target tm_busy
+ * @ioc: per adapter object
+ * @handle: device handle
+ *
+ * During taskmangement request, we need to freeze the device queue.
+ */
+void
+mpt3sas_scsih_set_tm_flag(struct MPT3SAS_ADAPTER *ioc, u16 handle)
+{
+ struct MPT3SAS_DEVICE *sas_device_priv_data;
+ struct scsi_device *sdev;
+ u8 skip = 0;
+
+ shost_for_each_device(sdev, ioc->shost) {
+ if (skip)
+ continue;
+ sas_device_priv_data = sdev->hostdata;
+ if (!sas_device_priv_data)
+ continue;
+ if (sas_device_priv_data->sas_target->handle == handle) {
+ sas_device_priv_data->sas_target->tm_busy = 1;
+ skip = 1;
+ ioc->ignore_loginfos = 1;
+ }
+ }
+}
+
+/**
+ * mpt3sas_scsih_clear_tm_flag - clear per target tm_busy
+ * @ioc: per adapter object
+ * @handle: device handle
+ *
+ * During taskmangement request, we need to freeze the device queue.
+ */
+void
+mpt3sas_scsih_clear_tm_flag(struct MPT3SAS_ADAPTER *ioc, u16 handle)
+{
+ struct MPT3SAS_DEVICE *sas_device_priv_data;
+ struct scsi_device *sdev;
+ u8 skip = 0;
+
+ shost_for_each_device(sdev, ioc->shost) {
+ if (skip)
+ continue;
+ sas_device_priv_data = sdev->hostdata;
+ if (!sas_device_priv_data)
+ continue;
+ if (sas_device_priv_data->sas_target->handle == handle) {
+ sas_device_priv_data->sas_target->tm_busy = 0;
+ skip = 1;
+ ioc->ignore_loginfos = 0;
+ }
+ }
+}
+
+/**
+ * mpt3sas_scsih_issue_tm - main routine for sending tm requests
+ * @ioc: per adapter struct
+ * @device_handle: device handle
+ * @channel: the channel assigned by the OS
+ * @id: the id assigned by the OS
+ * @lun: lun number
+ * @type: MPI2_SCSITASKMGMT_TASKTYPE__XXX (defined in mpi2_init.h)
+ * @smid_task: smid assigned to the task
+ * @timeout: timeout in seconds
+ * @m_type: TM_MUTEX_ON or TM_MUTEX_OFF
+ * Context: user
+ *
+ * A generic API for sending task management requests to firmware.
+ *
+ * The callback index is set inside `ioc->tm_cb_idx`.
+ *
+ * Return SUCCESS or FAILED.
+ */
+int
+mpt3sas_scsih_issue_tm(struct MPT3SAS_ADAPTER *ioc, u16 handle, uint channel,
+ uint id, uint lun, u8 type, u16 smid_task, ulong timeout,
+ enum mutex_type m_type)
+{
+ Mpi2SCSITaskManagementRequest_t *mpi_request;
+ Mpi2SCSITaskManagementReply_t *mpi_reply;
+ u16 smid = 0;
+ u32 ioc_state;
+ unsigned long timeleft;
+ struct scsiio_tracker *scsi_lookup = NULL;
+ int rc;
+
+ if (m_type == TM_MUTEX_ON)
+ mutex_lock(&ioc->tm_cmds.mutex);
+ if (ioc->tm_cmds.status != MPT3_CMD_NOT_USED) {
+ pr_info(MPT3SAS_FMT "%s: tm_cmd busy!!!\n",
+ __func__, ioc->name);
+ rc = FAILED;
+ goto err_out;
+ }
+
+ if (ioc->shost_recovery || ioc->remove_host ||
+ ioc->pci_error_recovery) {
+ pr_info(MPT3SAS_FMT "%s: host reset in progress!\n",
+ __func__, ioc->name);
+ rc = FAILED;
+ goto err_out;
+ }
+
+ ioc_state = mpt3sas_base_get_iocstate(ioc, 0);
+ if (ioc_state & MPI2_DOORBELL_USED) {
+ dhsprintk(ioc, pr_info(MPT3SAS_FMT
+ "unexpected doorbell active!\n", ioc->name));
+ rc = mpt3sas_base_hard_reset_handler(ioc, CAN_SLEEP,
+ FORCE_BIG_HAMMER);
+ rc = (!rc) ? SUCCESS : FAILED;
+ goto err_out;
+ }
+
+ if ((ioc_state & MPI2_IOC_STATE_MASK) == MPI2_IOC_STATE_FAULT) {
+ mpt3sas_base_fault_info(ioc, ioc_state &
+ MPI2_DOORBELL_DATA_MASK);
+ rc = mpt3sas_base_hard_reset_handler(ioc, CAN_SLEEP,
+ FORCE_BIG_HAMMER);
+ rc = (!rc) ? SUCCESS : FAILED;
+ goto err_out;
+ }
+
+ smid = mpt3sas_base_get_smid_hpr(ioc, ioc->tm_cb_idx);
+ if (!smid) {
+ pr_err(MPT3SAS_FMT "%s: failed obtaining a smid\n",
+ ioc->name, __func__);
+ rc = FAILED;
+ goto err_out;
+ }
+
+ if (type == MPI2_SCSITASKMGMT_TASKTYPE_ABORT_TASK)
+ scsi_lookup = &ioc->scsi_lookup[smid_task - 1];
+
+ dtmprintk(ioc, pr_info(MPT3SAS_FMT
+ "sending tm: handle(0x%04x), task_type(0x%02x), smid(%d)\n",
+ ioc->name, handle, type, smid_task));
+ ioc->tm_cmds.status = MPT3_CMD_PENDING;
+ mpi_request = mpt3sas_base_get_msg_frame(ioc, smid);
+ ioc->tm_cmds.smid = smid;
+ memset(mpi_request, 0, sizeof(Mpi2SCSITaskManagementRequest_t));
+ memset(ioc->tm_cmds.reply, 0, sizeof(Mpi2SCSITaskManagementReply_t));
+ mpi_request->Function = MPI2_FUNCTION_SCSI_TASK_MGMT;
+ mpi_request->DevHandle = cpu_to_le16(handle);
+ mpi_request->TaskType = type;
+ mpi_request->TaskMID = cpu_to_le16(smid_task);
+ int_to_scsilun(lun, (struct scsi_lun *)mpi_request->LUN);
+ mpt3sas_scsih_set_tm_flag(ioc, handle);
+ init_completion(&ioc->tm_cmds.done);
+ mpt3sas_base_put_smid_hi_priority(ioc, smid);
+ timeleft = wait_for_completion_timeout(&ioc->tm_cmds.done, timeout*HZ);
+ if (!(ioc->tm_cmds.status & MPT3_CMD_COMPLETE)) {
+ pr_err(MPT3SAS_FMT "%s: timeout\n",
+ ioc->name, __func__);
+ _debug_dump_mf(mpi_request,
+ sizeof(Mpi2SCSITaskManagementRequest_t)/4);
+ if (!(ioc->tm_cmds.status & MPT3_CMD_RESET)) {
+ rc = mpt3sas_base_hard_reset_handler(ioc, CAN_SLEEP,
+ FORCE_BIG_HAMMER);
+ rc = (!rc) ? SUCCESS : FAILED;
+ ioc->tm_cmds.status = MPT3_CMD_NOT_USED;
+ mpt3sas_scsih_clear_tm_flag(ioc, handle);
+ goto err_out;
+ }
+ }
+
+ if (ioc->tm_cmds.status & MPT3_CMD_REPLY_VALID) {
+ mpt3sas_trigger_master(ioc, MASTER_TRIGGER_TASK_MANAGMENT);
+ mpi_reply = ioc->tm_cmds.reply;
+ dtmprintk(ioc, pr_info(MPT3SAS_FMT "complete tm: " \
+ "ioc_status(0x%04x), loginfo(0x%08x), term_count(0x%08x)\n",
+ ioc->name, le16_to_cpu(mpi_reply->IOCStatus),
+ le32_to_cpu(mpi_reply->IOCLogInfo),
+ le32_to_cpu(mpi_reply->TerminationCount)));
+ if (ioc->logging_level & MPT_DEBUG_TM) {
+ _scsih_response_code(ioc, mpi_reply->ResponseCode);
+ if (mpi_reply->IOCStatus)
+ _debug_dump_mf(mpi_request,
+ sizeof(Mpi2SCSITaskManagementRequest_t)/4);
+ }
+ }
+
+ switch (type) {
+ case MPI2_SCSITASKMGMT_TASKTYPE_ABORT_TASK:
+ rc = SUCCESS;
+ if (scsi_lookup->scmd == NULL)
+ break;
+ rc = FAILED;
+ break;
+
+ case MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET:
+ if (_scsih_scsi_lookup_find_by_target(ioc, id, channel))
+ rc = FAILED;
+ else
+ rc = SUCCESS;
+ break;
+ case MPI2_SCSITASKMGMT_TASKTYPE_ABRT_TASK_SET:
+ case MPI2_SCSITASKMGMT_TASKTYPE_LOGICAL_UNIT_RESET:
+ if (_scsih_scsi_lookup_find_by_lun(ioc, id, lun, channel))
+ rc = FAILED;
+ else
+ rc = SUCCESS;
+ break;
+ case MPI2_SCSITASKMGMT_TASKTYPE_QUERY_TASK:
+ rc = SUCCESS;
+ break;
+ default:
+ rc = FAILED;
+ break;
+ }
+
+ mpt3sas_scsih_clear_tm_flag(ioc, handle);
+ ioc->tm_cmds.status = MPT3_CMD_NOT_USED;
+ if (m_type == TM_MUTEX_ON)
+ mutex_unlock(&ioc->tm_cmds.mutex);
+
+ return rc;
+
+ err_out:
+ if (m_type == TM_MUTEX_ON)
+ mutex_unlock(&ioc->tm_cmds.mutex);
+ return rc;
+}
+
+/**
+ * _scsih_tm_display_info - displays info about the device
+ * @ioc: per adapter struct
+ * @scmd: pointer to scsi command object
+ *
+ * Called by task management callback handlers.
+ */
+static void
+_scsih_tm_display_info(struct MPT3SAS_ADAPTER *ioc, struct scsi_cmnd *scmd)
+{
+ struct scsi_target *starget = scmd->device->sdev_target;
+ struct MPT3SAS_TARGET *priv_target = starget->hostdata;
+ struct _sas_device *sas_device = NULL;
+ unsigned long flags;
+ char *device_str = NULL;
+
+ if (!priv_target)
+ return;
+ device_str = "volume";
+
+ scsi_print_command(scmd);
+ if (priv_target->flags & MPT_TARGET_FLAGS_VOLUME) {
+ starget_printk(KERN_INFO, starget,
+ "%s handle(0x%04x), %s wwid(0x%016llx)\n",
+ device_str, priv_target->handle,
+ device_str, (unsigned long long)priv_target->sas_address);
+ } else {
+ spin_lock_irqsave(&ioc->sas_device_lock, flags);
+ sas_device = mpt3sas_scsih_sas_device_find_by_sas_address(ioc,
+ priv_target->sas_address);
+ if (sas_device) {
+ if (priv_target->flags &
+ MPT_TARGET_FLAGS_RAID_COMPONENT) {
+ starget_printk(KERN_INFO, starget,
+ "volume handle(0x%04x), "
+ "volume wwid(0x%016llx)\n",
+ sas_device->volume_handle,
+ (unsigned long long)sas_device->volume_wwid);
+ }
+ starget_printk(KERN_INFO, starget,
+ "handle(0x%04x), sas_address(0x%016llx), phy(%d)\n",
+ sas_device->handle,
+ (unsigned long long)sas_device->sas_address,
+ sas_device->phy);
+ starget_printk(KERN_INFO, starget,
+ "enclosure_logical_id(0x%016llx), slot(%d)\n",
+ (unsigned long long)sas_device->enclosure_logical_id,
+ sas_device->slot);
+ }
+ spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
+ }
+}
+
+/**
+ * _scsih_abort - eh threads main abort routine
+ * @scmd: pointer to scsi command object
+ *
+ * Returns SUCCESS if command aborted else FAILED
+ */
+static int
+_scsih_abort(struct scsi_cmnd *scmd)
+{
+ struct MPT3SAS_ADAPTER *ioc = shost_priv(scmd->device->host);
+ struct MPT3SAS_DEVICE *sas_device_priv_data;
+ u16 smid;
+ u16 handle;
+ int r;
+
+ sdev_printk(KERN_INFO, scmd->device,
+ "attempting task abort! scmd(%p)\n", scmd);
+ _scsih_tm_display_info(ioc, scmd);
+
+ sas_device_priv_data = scmd->device->hostdata;
+ if (!sas_device_priv_data || !sas_device_priv_data->sas_target) {
+ sdev_printk(KERN_INFO, scmd->device,
+ "device been deleted! scmd(%p)\n", scmd);
+ scmd->result = DID_NO_CONNECT << 16;
+ scmd->scsi_done(scmd);
+ r = SUCCESS;
+ goto out;
+ }
+
+ /* search for the command */
+ smid = _scsih_scsi_lookup_find_by_scmd(ioc, scmd);
+ if (!smid) {
+ scmd->result = DID_RESET << 16;
+ r = SUCCESS;
+ goto out;
+ }
+
+ /* for hidden raid components and volumes this is not supported */
+ if (sas_device_priv_data->sas_target->flags &
+ MPT_TARGET_FLAGS_RAID_COMPONENT ||
+ sas_device_priv_data->sas_target->flags & MPT_TARGET_FLAGS_VOLUME) {
+ scmd->result = DID_RESET << 16;
+ r = FAILED;
+ goto out;
+ }
+
+ mpt3sas_halt_firmware(ioc);
+
+ handle = sas_device_priv_data->sas_target->handle;
+ r = mpt3sas_scsih_issue_tm(ioc, handle, scmd->device->channel,
+ scmd->device->id, scmd->device->lun,
+ MPI2_SCSITASKMGMT_TASKTYPE_ABORT_TASK, smid, 30, TM_MUTEX_ON);
+
+ out:
+ sdev_printk(KERN_INFO, scmd->device, "task abort: %s scmd(%p)\n",
+ ((r == SUCCESS) ? "SUCCESS" : "FAILED"), scmd);
+ return r;
+}
+
+/**
+ * _scsih_dev_reset - eh threads main device reset routine
+ * @scmd: pointer to scsi command object
+ *
+ * Returns SUCCESS if command aborted else FAILED
+ */
+static int
+_scsih_dev_reset(struct scsi_cmnd *scmd)
+{
+ struct MPT3SAS_ADAPTER *ioc = shost_priv(scmd->device->host);
+ struct MPT3SAS_DEVICE *sas_device_priv_data;
+ struct _sas_device *sas_device;
+ unsigned long flags;
+ u16 handle;
+ int r;
+
+ sdev_printk(KERN_INFO, scmd->device,
+ "attempting device reset! scmd(%p)\n", scmd);
+ _scsih_tm_display_info(ioc, scmd);
+
+ sas_device_priv_data = scmd->device->hostdata;
+ if (!sas_device_priv_data || !sas_device_priv_data->sas_target) {
+ sdev_printk(KERN_INFO, scmd->device,
+ "device been deleted! scmd(%p)\n", scmd);
+ scmd->result = DID_NO_CONNECT << 16;
+ scmd->scsi_done(scmd);
+ r = SUCCESS;
+ goto out;
+ }
+
+ /* for hidden raid components obtain the volume_handle */
+ handle = 0;
+ if (sas_device_priv_data->sas_target->flags &
+ MPT_TARGET_FLAGS_RAID_COMPONENT) {
+ spin_lock_irqsave(&ioc->sas_device_lock, flags);
+ sas_device = _scsih_sas_device_find_by_handle(ioc,
+ sas_device_priv_data->sas_target->handle);
+ if (sas_device)
+ handle = sas_device->volume_handle;
+ spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
+ } else
+ handle = sas_device_priv_data->sas_target->handle;
+
+ if (!handle) {
+ scmd->result = DID_RESET << 16;
+ r = FAILED;
+ goto out;
+ }
+
+ r = mpt3sas_scsih_issue_tm(ioc, handle, scmd->device->channel,
+ scmd->device->id, scmd->device->lun,
+ MPI2_SCSITASKMGMT_TASKTYPE_LOGICAL_UNIT_RESET, 0, 30, TM_MUTEX_ON);
+
+ out:
+ sdev_printk(KERN_INFO, scmd->device, "device reset: %s scmd(%p)\n",
+ ((r == SUCCESS) ? "SUCCESS" : "FAILED"), scmd);
+ return r;
+}
+
+/**
+ * _scsih_target_reset - eh threads main target reset routine
+ * @scmd: pointer to scsi command object
+ *
+ * Returns SUCCESS if command aborted else FAILED
+ */
+static int
+_scsih_target_reset(struct scsi_cmnd *scmd)
+{
+ struct MPT3SAS_ADAPTER *ioc = shost_priv(scmd->device->host);
+ struct MPT3SAS_DEVICE *sas_device_priv_data;
+ struct _sas_device *sas_device;
+ unsigned long flags;
+ u16 handle;
+ int r;
+ struct scsi_target *starget = scmd->device->sdev_target;
+
+ starget_printk(KERN_INFO, starget, "attempting target reset! scmd(%p)\n",
+ scmd);
+ _scsih_tm_display_info(ioc, scmd);
+
+ sas_device_priv_data = scmd->device->hostdata;
+ if (!sas_device_priv_data || !sas_device_priv_data->sas_target) {
+ starget_printk(KERN_INFO, starget, "target been deleted! scmd(%p)\n",
+ scmd);
+ scmd->result = DID_NO_CONNECT << 16;
+ scmd->scsi_done(scmd);
+ r = SUCCESS;
+ goto out;
+ }
+
+ /* for hidden raid components obtain the volume_handle */
+ handle = 0;
+ if (sas_device_priv_data->sas_target->flags &
+ MPT_TARGET_FLAGS_RAID_COMPONENT) {
+ spin_lock_irqsave(&ioc->sas_device_lock, flags);
+ sas_device = _scsih_sas_device_find_by_handle(ioc,
+ sas_device_priv_data->sas_target->handle);
+ if (sas_device)
+ handle = sas_device->volume_handle;
+ spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
+ } else
+ handle = sas_device_priv_data->sas_target->handle;
+
+ if (!handle) {
+ scmd->result = DID_RESET << 16;
+ r = FAILED;
+ goto out;
+ }
+
+ r = mpt3sas_scsih_issue_tm(ioc, handle, scmd->device->channel,
+ scmd->device->id, 0, MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET, 0,
+ 30, TM_MUTEX_ON);
+
+ out:
+ starget_printk(KERN_INFO, starget, "target reset: %s scmd(%p)\n",
+ ((r == SUCCESS) ? "SUCCESS" : "FAILED"), scmd);
+ return r;
+}
+
+
+/**
+ * _scsih_host_reset - eh threads main host reset routine
+ * @scmd: pointer to scsi command object
+ *
+ * Returns SUCCESS if command aborted else FAILED
+ */
+static int
+_scsih_host_reset(struct scsi_cmnd *scmd)
+{
+ struct MPT3SAS_ADAPTER *ioc = shost_priv(scmd->device->host);
+ int r, retval;
+
+ pr_info(MPT3SAS_FMT "attempting host reset! scmd(%p)\n",
+ ioc->name, scmd);
+ scsi_print_command(scmd);
+
+ if (ioc->is_driver_loading) {
+ pr_info(MPT3SAS_FMT "Blocking the host reset\n",
+ ioc->name);
+ r = FAILED;
+ goto out;
+ }
+
+ retval = mpt3sas_base_hard_reset_handler(ioc, CAN_SLEEP,
+ FORCE_BIG_HAMMER);
+ r = (retval < 0) ? FAILED : SUCCESS;
+out:
+ pr_info(MPT3SAS_FMT "host reset: %s scmd(%p)\n",
+ ioc->name, ((r == SUCCESS) ? "SUCCESS" : "FAILED"), scmd);
+
+ return r;
+}
+
+/**
+ * _scsih_fw_event_add - insert and queue up fw_event
+ * @ioc: per adapter object
+ * @fw_event: object describing the event
+ * Context: This function will acquire ioc->fw_event_lock.
+ *
+ * This adds the firmware event object into link list, then queues it up to
+ * be processed from user context.
+ *
+ * Return nothing.
+ */
+static void
+_scsih_fw_event_add(struct MPT3SAS_ADAPTER *ioc, struct fw_event_work *fw_event)
+{
+ unsigned long flags;
+
+ if (ioc->firmware_event_thread == NULL)
+ return;
+
+ spin_lock_irqsave(&ioc->fw_event_lock, flags);
+ INIT_LIST_HEAD(&fw_event->list);
+ list_add_tail(&fw_event->list, &ioc->fw_event_list);
+ INIT_WORK(&fw_event->work, _firmware_event_work);
+ queue_work(ioc->firmware_event_thread, &fw_event->work);
+ spin_unlock_irqrestore(&ioc->fw_event_lock, flags);
+}
+
+/**
+ * _scsih_fw_event_free - delete fw_event
+ * @ioc: per adapter object
+ * @fw_event: object describing the event
+ * Context: This function will acquire ioc->fw_event_lock.
+ *
+ * This removes firmware event object from link list, frees associated memory.
+ *
+ * Return nothing.
+ */
+static void
+_scsih_fw_event_free(struct MPT3SAS_ADAPTER *ioc, struct fw_event_work
+ *fw_event)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&ioc->fw_event_lock, flags);
+ list_del(&fw_event->list);
+ kfree(fw_event);
+ spin_unlock_irqrestore(&ioc->fw_event_lock, flags);
+}
+
+
+ /**
+ * mpt3sas_send_trigger_data_event - send event for processing trigger data
+ * @ioc: per adapter object
+ * @event_data: trigger event data
+ *
+ * Return nothing.
+ */
+void
+mpt3sas_send_trigger_data_event(struct MPT3SAS_ADAPTER *ioc,
+ struct SL_WH_TRIGGERS_EVENT_DATA_T *event_data)
+{
+ struct fw_event_work *fw_event;
+
+ if (ioc->is_driver_loading)
+ return;
+ fw_event = kzalloc(sizeof(*fw_event) + sizeof(*event_data),
+ GFP_ATOMIC);
+ if (!fw_event)
+ return;
+ fw_event->event = MPT3SAS_PROCESS_TRIGGER_DIAG;
+ fw_event->ioc = ioc;
+ memcpy(fw_event->event_data, event_data, sizeof(*event_data));
+ _scsih_fw_event_add(ioc, fw_event);
+}
+
+/**
+ * _scsih_error_recovery_delete_devices - remove devices not responding
+ * @ioc: per adapter object
+ *
+ * Return nothing.
+ */
+static void
+_scsih_error_recovery_delete_devices(struct MPT3SAS_ADAPTER *ioc)
+{
+ struct fw_event_work *fw_event;
+
+ if (ioc->is_driver_loading)
+ return;
+ fw_event = kzalloc(sizeof(struct fw_event_work), GFP_ATOMIC);
+ if (!fw_event)
+ return;
+ fw_event->event = MPT3SAS_REMOVE_UNRESPONDING_DEVICES;
+ fw_event->ioc = ioc;
+ _scsih_fw_event_add(ioc, fw_event);
+}
+
+/**
+ * mpt3sas_port_enable_complete - port enable completed (fake event)
+ * @ioc: per adapter object
+ *
+ * Return nothing.
+ */
+void
+mpt3sas_port_enable_complete(struct MPT3SAS_ADAPTER *ioc)
+{
+ struct fw_event_work *fw_event;
+
+ fw_event = kzalloc(sizeof(struct fw_event_work), GFP_ATOMIC);
+ if (!fw_event)
+ return;
+ fw_event->event = MPT3SAS_PORT_ENABLE_COMPLETE;
+ fw_event->ioc = ioc;
+ _scsih_fw_event_add(ioc, fw_event);
+}
+
+/**
+ * _scsih_fw_event_cleanup_queue - cleanup event queue
+ * @ioc: per adapter object
+ *
+ * Walk the firmware event queue, either killing timers, or waiting
+ * for outstanding events to complete
+ *
+ * Return nothing.
+ */
+static void
+_scsih_fw_event_cleanup_queue(struct MPT3SAS_ADAPTER *ioc)
+{
+ struct fw_event_work *fw_event, *next;
+
+ if (list_empty(&ioc->fw_event_list) ||
+ !ioc->firmware_event_thread || in_interrupt())
+ return;
+
+ list_for_each_entry_safe(fw_event, next, &ioc->fw_event_list, list) {
+ if (cancel_delayed_work_sync(&fw_event->delayed_work)) {
+ _scsih_fw_event_free(ioc, fw_event);
+ continue;
+ }
+ }
+}
+
+/**
+ * _scsih_ublock_io_all_device - unblock every device
+ * @ioc: per adapter object
+ *
+ * change the device state from block to running
+ */
+static void
+_scsih_ublock_io_all_device(struct MPT3SAS_ADAPTER *ioc)
+{
+ struct MPT3SAS_DEVICE *sas_device_priv_data;
+ struct scsi_device *sdev;
+
+ shost_for_each_device(sdev, ioc->shost) {
+ sas_device_priv_data = sdev->hostdata;
+ if (!sas_device_priv_data)
+ continue;
+ if (!sas_device_priv_data->block)
+ continue;
+
+ sas_device_priv_data->block = 0;
+ dewtprintk(ioc, sdev_printk(KERN_INFO, sdev,
+ "device_running, handle(0x%04x)\n",
+ sas_device_priv_data->sas_target->handle));
+ scsi_internal_device_unblock(sdev, SDEV_RUNNING);
+ }
+}
+
+
+/**
+ * _scsih_ublock_io_device - prepare device to be deleted
+ * @ioc: per adapter object
+ * @sas_addr: sas address
+ *
+ * unblock then put device in offline state
+ */
+static void
+_scsih_ublock_io_device(struct MPT3SAS_ADAPTER *ioc, u64 sas_address)
+{
+ struct MPT3SAS_DEVICE *sas_device_priv_data;
+ struct scsi_device *sdev;
+
+ shost_for_each_device(sdev, ioc->shost) {
+ sas_device_priv_data = sdev->hostdata;
+ if (!sas_device_priv_data)
+ continue;
+ if (sas_device_priv_data->sas_target->sas_address
+ != sas_address)
+ continue;
+ if (sas_device_priv_data->block) {
+ sas_device_priv_data->block = 0;
+ scsi_internal_device_unblock(sdev, SDEV_RUNNING);
+ }
+ }
+}
+
+/**
+ * _scsih_block_io_all_device - set the device state to SDEV_BLOCK
+ * @ioc: per adapter object
+ * @handle: device handle
+ *
+ * During device pull we need to appropiately set the sdev state.
+ */
+static void
+_scsih_block_io_all_device(struct MPT3SAS_ADAPTER *ioc)
+{
+ struct MPT3SAS_DEVICE *sas_device_priv_data;
+ struct scsi_device *sdev;
+
+ shost_for_each_device(sdev, ioc->shost) {
+ sas_device_priv_data = sdev->hostdata;
+ if (!sas_device_priv_data)
+ continue;
+ if (sas_device_priv_data->block)
+ continue;
+ sas_device_priv_data->block = 1;
+ scsi_internal_device_block(sdev);
+ sdev_printk(KERN_INFO, sdev, "device_blocked, handle(0x%04x)\n",
+ sas_device_priv_data->sas_target->handle);
+ }
+}
+
+/**
+ * _scsih_block_io_device - set the device state to SDEV_BLOCK
+ * @ioc: per adapter object
+ * @handle: device handle
+ *
+ * During device pull we need to appropiately set the sdev state.
+ */
+static void
+_scsih_block_io_device(struct MPT3SAS_ADAPTER *ioc, u16 handle)
+{
+ struct MPT3SAS_DEVICE *sas_device_priv_data;
+ struct scsi_device *sdev;
+
+ shost_for_each_device(sdev, ioc->shost) {
+ sas_device_priv_data = sdev->hostdata;
+ if (!sas_device_priv_data)
+ continue;
+ if (sas_device_priv_data->sas_target->handle != handle)
+ continue;
+ if (sas_device_priv_data->block)
+ continue;
+ sas_device_priv_data->block = 1;
+ scsi_internal_device_block(sdev);
+ sdev_printk(KERN_INFO, sdev,
+ "device_blocked, handle(0x%04x)\n", handle);
+ }
+}
+
+/**
+ * _scsih_block_io_to_children_attached_to_ex
+ * @ioc: per adapter object
+ * @sas_expander: the sas_device object
+ *
+ * This routine set sdev state to SDEV_BLOCK for all devices
+ * attached to this expander. This function called when expander is
+ * pulled.
+ */
+static void
+_scsih_block_io_to_children_attached_to_ex(struct MPT3SAS_ADAPTER *ioc,
+ struct _sas_node *sas_expander)
+{
+ struct _sas_port *mpt3sas_port;
+ struct _sas_device *sas_device;
+ struct _sas_node *expander_sibling;
+ unsigned long flags;
+
+ if (!sas_expander)
+ return;
+
+ list_for_each_entry(mpt3sas_port,
+ &sas_expander->sas_port_list, port_list) {
+ if (mpt3sas_port->remote_identify.device_type ==
+ SAS_END_DEVICE) {
+ spin_lock_irqsave(&ioc->sas_device_lock, flags);
+ sas_device =
+ mpt3sas_scsih_sas_device_find_by_sas_address(ioc,
+ mpt3sas_port->remote_identify.sas_address);
+ if (sas_device)
+ set_bit(sas_device->handle,
+ ioc->blocking_handles);
+ spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
+ }
+ }
+
+ list_for_each_entry(mpt3sas_port,
+ &sas_expander->sas_port_list, port_list) {
+
+ if (mpt3sas_port->remote_identify.device_type ==
+ SAS_EDGE_EXPANDER_DEVICE ||
+ mpt3sas_port->remote_identify.device_type ==
+ SAS_FANOUT_EXPANDER_DEVICE) {
+ expander_sibling =
+ mpt3sas_scsih_expander_find_by_sas_address(
+ ioc, mpt3sas_port->remote_identify.sas_address);
+ _scsih_block_io_to_children_attached_to_ex(ioc,
+ expander_sibling);
+ }
+ }
+}
+
+/**
+ * _scsih_block_io_to_children_attached_directly
+ * @ioc: per adapter object
+ * @event_data: topology change event data
+ *
+ * This routine set sdev state to SDEV_BLOCK for all devices
+ * direct attached during device pull.
+ */
+static void
+_scsih_block_io_to_children_attached_directly(struct MPT3SAS_ADAPTER *ioc,
+ Mpi2EventDataSasTopologyChangeList_t *event_data)
+{
+ int i;
+ u16 handle;
+ u16 reason_code;
+
+ for (i = 0; i < event_data->NumEntries; i++) {
+ handle = le16_to_cpu(event_data->PHY[i].AttachedDevHandle);
+ if (!handle)
+ continue;
+ reason_code = event_data->PHY[i].PhyStatus &
+ MPI2_EVENT_SAS_TOPO_RC_MASK;
+ if (reason_code == MPI2_EVENT_SAS_TOPO_RC_DELAY_NOT_RESPONDING)
+ _scsih_block_io_device(ioc, handle);
+ }
+}
+
+/**
+ * _scsih_tm_tr_send - send task management request
+ * @ioc: per adapter object
+ * @handle: device handle
+ * Context: interrupt time.
+ *
+ * This code is to initiate the device removal handshake protocol
+ * with controller firmware. This function will issue target reset
+ * using high priority request queue. It will send a sas iounit
+ * control request (MPI2_SAS_OP_REMOVE_DEVICE) from this completion.
+ *
+ * This is designed to send muliple task management request at the same
+ * time to the fifo. If the fifo is full, we will append the request,
+ * and process it in a future completion.
+ */
+static void
+_scsih_tm_tr_send(struct MPT3SAS_ADAPTER *ioc, u16 handle)
+{
+ Mpi2SCSITaskManagementRequest_t *mpi_request;
+ u16 smid;
+ struct _sas_device *sas_device;
+ struct MPT3SAS_TARGET *sas_target_priv_data = NULL;
+ u64 sas_address = 0;
+ unsigned long flags;
+ struct _tr_list *delayed_tr;
+ u32 ioc_state;
+
+ if (ioc->remove_host) {
+ dewtprintk(ioc, pr_info(MPT3SAS_FMT
+ "%s: host has been removed: handle(0x%04x)\n",
+ __func__, ioc->name, handle));
+ return;
+ } else if (ioc->pci_error_recovery) {
+ dewtprintk(ioc, pr_info(MPT3SAS_FMT
+ "%s: host in pci error recovery: handle(0x%04x)\n",
+ __func__, ioc->name,
+ handle));
+ return;
+ }
+ ioc_state = mpt3sas_base_get_iocstate(ioc, 1);
+ if (ioc_state != MPI2_IOC_STATE_OPERATIONAL) {
+ dewtprintk(ioc, pr_info(MPT3SAS_FMT
+ "%s: host is not operational: handle(0x%04x)\n",
+ __func__, ioc->name,
+ handle));
+ return;
+ }
+
+ /* if PD, then return */
+ if (test_bit(handle, ioc->pd_handles))
+ return;
+
+ spin_lock_irqsave(&ioc->sas_device_lock, flags);
+ sas_device = _scsih_sas_device_find_by_handle(ioc, handle);
+ if (sas_device && sas_device->starget &&
+ sas_device->starget->hostdata) {
+ sas_target_priv_data = sas_device->starget->hostdata;
+ sas_target_priv_data->deleted = 1;
+ sas_address = sas_device->sas_address;
+ }
+ spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
+
+ if (sas_target_priv_data) {
+ dewtprintk(ioc, pr_info(MPT3SAS_FMT
+ "setting delete flag: handle(0x%04x), sas_addr(0x%016llx)\n",
+ ioc->name, handle,
+ (unsigned long long)sas_address));
+ _scsih_ublock_io_device(ioc, sas_address);
+ sas_target_priv_data->handle = MPT3SAS_INVALID_DEVICE_HANDLE;
+ }
+
+ smid = mpt3sas_base_get_smid_hpr(ioc, ioc->tm_tr_cb_idx);
+ if (!smid) {
+ delayed_tr = kzalloc(sizeof(*delayed_tr), GFP_ATOMIC);
+ if (!delayed_tr)
+ return;
+ INIT_LIST_HEAD(&delayed_tr->list);
+ delayed_tr->handle = handle;
+ list_add_tail(&delayed_tr->list, &ioc->delayed_tr_list);
+ dewtprintk(ioc, pr_info(MPT3SAS_FMT
+ "DELAYED:tr:handle(0x%04x), (open)\n",
+ ioc->name, handle));
+ return;
+ }
+
+ dewtprintk(ioc, pr_info(MPT3SAS_FMT
+ "tr_send:handle(0x%04x), (open), smid(%d), cb(%d)\n",
+ ioc->name, handle, smid,
+ ioc->tm_tr_cb_idx));
+ mpi_request = mpt3sas_base_get_msg_frame(ioc, smid);
+ memset(mpi_request, 0, sizeof(Mpi2SCSITaskManagementRequest_t));
+ mpi_request->Function = MPI2_FUNCTION_SCSI_TASK_MGMT;
+ mpi_request->DevHandle = cpu_to_le16(handle);
+ mpi_request->TaskType = MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET;
+ mpt3sas_base_put_smid_hi_priority(ioc, smid);
+ mpt3sas_trigger_master(ioc, MASTER_TRIGGER_DEVICE_REMOVAL);
+}
+
+/**
+ * _scsih_tm_tr_complete -
+ * @ioc: per adapter object
+ * @smid: system request message index
+ * @msix_index: MSIX table index supplied by the OS
+ * @reply: reply message frame(lower 32bit addr)
+ * Context: interrupt time.
+ *
+ * This is the target reset completion routine.
+ * This code is part of the code to initiate the device removal
+ * handshake protocol with controller firmware.
+ * It will send a sas iounit control request (MPI2_SAS_OP_REMOVE_DEVICE)
+ *
+ * Return 1 meaning mf should be freed from _base_interrupt
+ * 0 means the mf is freed from this function.
+ */
+static u8
+_scsih_tm_tr_complete(struct MPT3SAS_ADAPTER *ioc, u16 smid, u8 msix_index,
+ u32 reply)
+{
+ u16 handle;
+ Mpi2SCSITaskManagementRequest_t *mpi_request_tm;
+ Mpi2SCSITaskManagementReply_t *mpi_reply =
+ mpt3sas_base_get_reply_virt_addr(ioc, reply);
+ Mpi2SasIoUnitControlRequest_t *mpi_request;
+ u16 smid_sas_ctrl;
+ u32 ioc_state;
+
+ if (ioc->remove_host) {
+ dewtprintk(ioc, pr_info(MPT3SAS_FMT
+ "%s: host has been removed\n", __func__, ioc->name));
+ return 1;
+ } else if (ioc->pci_error_recovery) {
+ dewtprintk(ioc, pr_info(MPT3SAS_FMT
+ "%s: host in pci error recovery\n", __func__,
+ ioc->name));
+ return 1;
+ }
+ ioc_state = mpt3sas_base_get_iocstate(ioc, 1);
+ if (ioc_state != MPI2_IOC_STATE_OPERATIONAL) {
+ dewtprintk(ioc, pr_info(MPT3SAS_FMT
+ "%s: host is not operational\n", __func__, ioc->name));
+ return 1;
+ }
+ if (unlikely(!mpi_reply)) {
+ pr_err(MPT3SAS_FMT "mpi_reply not valid at %s:%d/%s()!\n",
+ ioc->name, __FILE__, __LINE__, __func__);
+ return 1;
+ }
+ mpi_request_tm = mpt3sas_base_get_msg_frame(ioc, smid);
+ handle = le16_to_cpu(mpi_request_tm->DevHandle);
+ if (handle != le16_to_cpu(mpi_reply->DevHandle)) {
+ dewtprintk(ioc, pr_err(MPT3SAS_FMT
+ "spurious interrupt: handle(0x%04x:0x%04x), smid(%d)!!!\n",
+ ioc->name, handle,
+ le16_to_cpu(mpi_reply->DevHandle), smid));
+ return 0;
+ }
+
+ mpt3sas_trigger_master(ioc, MASTER_TRIGGER_TASK_MANAGMENT);
+ dewtprintk(ioc, pr_info(MPT3SAS_FMT
+ "tr_complete:handle(0x%04x), (open) smid(%d), ioc_status(0x%04x), "
+ "loginfo(0x%08x), completed(%d)\n", ioc->name,
+ handle, smid, le16_to_cpu(mpi_reply->IOCStatus),
+ le32_to_cpu(mpi_reply->IOCLogInfo),
+ le32_to_cpu(mpi_reply->TerminationCount)));
+
+ smid_sas_ctrl = mpt3sas_base_get_smid(ioc, ioc->tm_sas_control_cb_idx);
+ if (!smid_sas_ctrl) {
+ pr_err(MPT3SAS_FMT "%s: failed obtaining a smid\n",
+ ioc->name, __func__);
+ return 1;
+ }
+
+ dewtprintk(ioc, pr_info(MPT3SAS_FMT
+ "sc_send:handle(0x%04x), (open), smid(%d), cb(%d)\n",
+ ioc->name, handle, smid_sas_ctrl,
+ ioc->tm_sas_control_cb_idx));
+ mpi_request = mpt3sas_base_get_msg_frame(ioc, smid_sas_ctrl);
+ memset(mpi_request, 0, sizeof(Mpi2SasIoUnitControlRequest_t));
+ mpi_request->Function = MPI2_FUNCTION_SAS_IO_UNIT_CONTROL;
+ mpi_request->Operation = MPI2_SAS_OP_REMOVE_DEVICE;
+ mpi_request->DevHandle = mpi_request_tm->DevHandle;
+ mpt3sas_base_put_smid_default(ioc, smid_sas_ctrl);
+
+ return _scsih_check_for_pending_tm(ioc, smid);
+}
+
+
+/**
+ * _scsih_sas_control_complete - completion routine
+ * @ioc: per adapter object
+ * @smid: system request message index
+ * @msix_index: MSIX table index supplied by the OS
+ * @reply: reply message frame(lower 32bit addr)
+ * Context: interrupt time.
+ *
+ * This is the sas iounit control completion routine.
+ * This code is part of the code to initiate the device removal
+ * handshake protocol with controller firmware.
+ *
+ * Return 1 meaning mf should be freed from _base_interrupt
+ * 0 means the mf is freed from this function.
+ */
+static u8
+_scsih_sas_control_complete(struct MPT3SAS_ADAPTER *ioc, u16 smid,
+ u8 msix_index, u32 reply)
+{
+ Mpi2SasIoUnitControlReply_t *mpi_reply =
+ mpt3sas_base_get_reply_virt_addr(ioc, reply);
+
+ if (likely(mpi_reply)) {
+ dewtprintk(ioc, pr_info(MPT3SAS_FMT
+ "sc_complete:handle(0x%04x), (open) "
+ "smid(%d), ioc_status(0x%04x), loginfo(0x%08x)\n",
+ ioc->name, le16_to_cpu(mpi_reply->DevHandle), smid,
+ le16_to_cpu(mpi_reply->IOCStatus),
+ le32_to_cpu(mpi_reply->IOCLogInfo)));
+ } else {
+ pr_err(MPT3SAS_FMT "mpi_reply not valid at %s:%d/%s()!\n",
+ ioc->name, __FILE__, __LINE__, __func__);
+ }
+ return 1;
+}
+
+/**
+ * _scsih_tm_tr_volume_send - send target reset request for volumes
+ * @ioc: per adapter object
+ * @handle: device handle
+ * Context: interrupt time.
+ *
+ * This is designed to send muliple task management request at the same
+ * time to the fifo. If the fifo is full, we will append the request,
+ * and process it in a future completion.
+ */
+static void
+_scsih_tm_tr_volume_send(struct MPT3SAS_ADAPTER *ioc, u16 handle)
+{
+ Mpi2SCSITaskManagementRequest_t *mpi_request;
+ u16 smid;
+ struct _tr_list *delayed_tr;
+
+ if (ioc->shost_recovery || ioc->remove_host ||
+ ioc->pci_error_recovery) {
+ dewtprintk(ioc, pr_info(MPT3SAS_FMT
+ "%s: host reset in progress!\n",
+ __func__, ioc->name));
+ return;
+ }
+
+ smid = mpt3sas_base_get_smid_hpr(ioc, ioc->tm_tr_volume_cb_idx);
+ if (!smid) {
+ delayed_tr = kzalloc(sizeof(*delayed_tr), GFP_ATOMIC);
+ if (!delayed_tr)
+ return;
+ INIT_LIST_HEAD(&delayed_tr->list);
+ delayed_tr->handle = handle;
+ list_add_tail(&delayed_tr->list, &ioc->delayed_tr_volume_list);
+ dewtprintk(ioc, pr_info(MPT3SAS_FMT
+ "DELAYED:tr:handle(0x%04x), (open)\n",
+ ioc->name, handle));
+ return;
+ }
+
+ dewtprintk(ioc, pr_info(MPT3SAS_FMT
+ "tr_send:handle(0x%04x), (open), smid(%d), cb(%d)\n",
+ ioc->name, handle, smid,
+ ioc->tm_tr_volume_cb_idx));
+ mpi_request = mpt3sas_base_get_msg_frame(ioc, smid);
+ memset(mpi_request, 0, sizeof(Mpi2SCSITaskManagementRequest_t));
+ mpi_request->Function = MPI2_FUNCTION_SCSI_TASK_MGMT;
+ mpi_request->DevHandle = cpu_to_le16(handle);
+ mpi_request->TaskType = MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET;
+ mpt3sas_base_put_smid_hi_priority(ioc, smid);
+}
+
+/**
+ * _scsih_tm_volume_tr_complete - target reset completion
+ * @ioc: per adapter object
+ * @smid: system request message index
+ * @msix_index: MSIX table index supplied by the OS
+ * @reply: reply message frame(lower 32bit addr)
+ * Context: interrupt time.
+ *
+ * Return 1 meaning mf should be freed from _base_interrupt
+ * 0 means the mf is freed from this function.
+ */
+static u8
+_scsih_tm_volume_tr_complete(struct MPT3SAS_ADAPTER *ioc, u16 smid,
+ u8 msix_index, u32 reply)
+{
+ u16 handle;
+ Mpi2SCSITaskManagementRequest_t *mpi_request_tm;
+ Mpi2SCSITaskManagementReply_t *mpi_reply =
+ mpt3sas_base_get_reply_virt_addr(ioc, reply);
+
+ if (ioc->shost_recovery || ioc->remove_host ||
+ ioc->pci_error_recovery) {
+ dewtprintk(ioc, pr_info(MPT3SAS_FMT
+ "%s: host reset in progress!\n",
+ __func__, ioc->name));
+ return 1;
+ }
+ if (unlikely(!mpi_reply)) {
+ pr_err(MPT3SAS_FMT "mpi_reply not valid at %s:%d/%s()!\n",
+ ioc->name, __FILE__, __LINE__, __func__);
+ return 1;
+ }
+
+ mpi_request_tm = mpt3sas_base_get_msg_frame(ioc, smid);
+ handle = le16_to_cpu(mpi_request_tm->DevHandle);
+ if (handle != le16_to_cpu(mpi_reply->DevHandle)) {
+ dewtprintk(ioc, pr_err(MPT3SAS_FMT
+ "spurious interrupt: handle(0x%04x:0x%04x), smid(%d)!!!\n",
+ ioc->name, handle,
+ le16_to_cpu(mpi_reply->DevHandle), smid));
+ return 0;
+ }
+
+ dewtprintk(ioc, pr_info(MPT3SAS_FMT
+ "tr_complete:handle(0x%04x), (open) smid(%d), ioc_status(0x%04x), "
+ "loginfo(0x%08x), completed(%d)\n", ioc->name,
+ handle, smid, le16_to_cpu(mpi_reply->IOCStatus),
+ le32_to_cpu(mpi_reply->IOCLogInfo),
+ le32_to_cpu(mpi_reply->TerminationCount)));
+
+ return _scsih_check_for_pending_tm(ioc, smid);
+}
+
+
+/**
+ * _scsih_check_for_pending_tm - check for pending task management
+ * @ioc: per adapter object
+ * @smid: system request message index
+ *
+ * This will check delayed target reset list, and feed the
+ * next reqeust.
+ *
+ * Return 1 meaning mf should be freed from _base_interrupt
+ * 0 means the mf is freed from this function.
+ */
+static u8
+_scsih_check_for_pending_tm(struct MPT3SAS_ADAPTER *ioc, u16 smid)
+{
+ struct _tr_list *delayed_tr;
+
+ if (!list_empty(&ioc->delayed_tr_volume_list)) {
+ delayed_tr = list_entry(ioc->delayed_tr_volume_list.next,
+ struct _tr_list, list);
+ mpt3sas_base_free_smid(ioc, smid);
+ _scsih_tm_tr_volume_send(ioc, delayed_tr->handle);
+ list_del(&delayed_tr->list);
+ kfree(delayed_tr);
+ return 0;
+ }
+
+ if (!list_empty(&ioc->delayed_tr_list)) {
+ delayed_tr = list_entry(ioc->delayed_tr_list.next,
+ struct _tr_list, list);
+ mpt3sas_base_free_smid(ioc, smid);
+ _scsih_tm_tr_send(ioc, delayed_tr->handle);
+ list_del(&delayed_tr->list);
+ kfree(delayed_tr);
+ return 0;
+ }
+
+ return 1;
+}
+
+/**
+ * _scsih_check_topo_delete_events - sanity check on topo events
+ * @ioc: per adapter object
+ * @event_data: the event data payload
+ *
+ * This routine added to better handle cable breaker.
+ *
+ * This handles the case where driver receives multiple expander
+ * add and delete events in a single shot. When there is a delete event
+ * the routine will void any pending add events waiting in the event queue.
+ *
+ * Return nothing.
+ */
+static void
+_scsih_check_topo_delete_events(struct MPT3SAS_ADAPTER *ioc,
+ Mpi2EventDataSasTopologyChangeList_t *event_data)
+{
+ struct fw_event_work *fw_event;
+ Mpi2EventDataSasTopologyChangeList_t *local_event_data;
+ u16 expander_handle;
+ struct _sas_node *sas_expander;
+ unsigned long flags;
+ int i, reason_code;
+ u16 handle;
+
+ for (i = 0 ; i < event_data->NumEntries; i++) {
+ handle = le16_to_cpu(event_data->PHY[i].AttachedDevHandle);
+ if (!handle)
+ continue;
+ reason_code = event_data->PHY[i].PhyStatus &
+ MPI2_EVENT_SAS_TOPO_RC_MASK;
+ if (reason_code == MPI2_EVENT_SAS_TOPO_RC_TARG_NOT_RESPONDING)
+ _scsih_tm_tr_send(ioc, handle);
+ }
+
+ expander_handle = le16_to_cpu(event_data->ExpanderDevHandle);
+ if (expander_handle < ioc->sas_hba.num_phys) {
+ _scsih_block_io_to_children_attached_directly(ioc, event_data);
+ return;
+ }
+ if (event_data->ExpStatus ==
+ MPI2_EVENT_SAS_TOPO_ES_DELAY_NOT_RESPONDING) {
+ /* put expander attached devices into blocking state */
+ spin_lock_irqsave(&ioc->sas_node_lock, flags);
+ sas_expander = mpt3sas_scsih_expander_find_by_handle(ioc,
+ expander_handle);
+ _scsih_block_io_to_children_attached_to_ex(ioc, sas_expander);
+ spin_unlock_irqrestore(&ioc->sas_node_lock, flags);
+ do {
+ handle = find_first_bit(ioc->blocking_handles,
+ ioc->facts.MaxDevHandle);
+ if (handle < ioc->facts.MaxDevHandle)
+ _scsih_block_io_device(ioc, handle);
+ } while (test_and_clear_bit(handle, ioc->blocking_handles));
+ } else if (event_data->ExpStatus == MPI2_EVENT_SAS_TOPO_ES_RESPONDING)
+ _scsih_block_io_to_children_attached_directly(ioc, event_data);
+
+ if (event_data->ExpStatus != MPI2_EVENT_SAS_TOPO_ES_NOT_RESPONDING)
+ return;
+
+ /* mark ignore flag for pending events */
+ spin_lock_irqsave(&ioc->fw_event_lock, flags);
+ list_for_each_entry(fw_event, &ioc->fw_event_list, list) {
+ if (fw_event->event != MPI2_EVENT_SAS_TOPOLOGY_CHANGE_LIST ||
+ fw_event->ignore)
+ continue;
+ local_event_data = (Mpi2EventDataSasTopologyChangeList_t *)
+ fw_event->event_data;
+ if (local_event_data->ExpStatus ==
+ MPI2_EVENT_SAS_TOPO_ES_ADDED ||
+ local_event_data->ExpStatus ==
+ MPI2_EVENT_SAS_TOPO_ES_RESPONDING) {
+ if (le16_to_cpu(local_event_data->ExpanderDevHandle) ==
+ expander_handle) {
+ dewtprintk(ioc, pr_info(MPT3SAS_FMT
+ "setting ignoring flag\n", ioc->name));
+ fw_event->ignore = 1;
+ }
+ }
+ }
+ spin_unlock_irqrestore(&ioc->fw_event_lock, flags);
+}
+
+/**
+ * _scsih_set_volume_delete_flag - setting volume delete flag
+ * @ioc: per adapter object
+ * @handle: device handle
+ *
+ * This returns nothing.
+ */
+static void
+_scsih_set_volume_delete_flag(struct MPT3SAS_ADAPTER *ioc, u16 handle)
+{
+ struct _raid_device *raid_device;
+ struct MPT3SAS_TARGET *sas_target_priv_data;
+ unsigned long flags;
+
+ spin_lock_irqsave(&ioc->raid_device_lock, flags);
+ raid_device = _scsih_raid_device_find_by_handle(ioc, handle);
+ if (raid_device && raid_device->starget &&
+ raid_device->starget->hostdata) {
+ sas_target_priv_data =
+ raid_device->starget->hostdata;
+ sas_target_priv_data->deleted = 1;
+ dewtprintk(ioc, pr_info(MPT3SAS_FMT
+ "setting delete flag: handle(0x%04x), "
+ "wwid(0x%016llx)\n", ioc->name, handle,
+ (unsigned long long) raid_device->wwid));
+ }
+ spin_unlock_irqrestore(&ioc->raid_device_lock, flags);
+}
+
+/**
+ * _scsih_set_volume_handle_for_tr - set handle for target reset to volume
+ * @handle: input handle
+ * @a: handle for volume a
+ * @b: handle for volume b
+ *
+ * IR firmware only supports two raid volumes. The purpose of this
+ * routine is to set the volume handle in either a or b. When the given
+ * input handle is non-zero, or when a and b have not been set before.
+ */
+static void
+_scsih_set_volume_handle_for_tr(u16 handle, u16 *a, u16 *b)
+{
+ if (!handle || handle == *a || handle == *b)
+ return;
+ if (!*a)
+ *a = handle;
+ else if (!*b)
+ *b = handle;
+}
+
+/**
+ * _scsih_check_ir_config_unhide_events - check for UNHIDE events
+ * @ioc: per adapter object
+ * @event_data: the event data payload
+ * Context: interrupt time.
+ *
+ * This routine will send target reset to volume, followed by target
+ * resets to the PDs. This is called when a PD has been removed, or
+ * volume has been deleted or removed. When the target reset is sent
+ * to volume, the PD target resets need to be queued to start upon
+ * completion of the volume target reset.
+ *
+ * Return nothing.
+ */
+static void
+_scsih_check_ir_config_unhide_events(struct MPT3SAS_ADAPTER *ioc,
+ Mpi2EventDataIrConfigChangeList_t *event_data)
+{
+ Mpi2EventIrConfigElement_t *element;
+ int i;
+ u16 handle, volume_handle, a, b;
+ struct _tr_list *delayed_tr;
+
+ a = 0;
+ b = 0;
+
+ /* Volume Resets for Deleted or Removed */
+ element = (Mpi2EventIrConfigElement_t *)&event_data->ConfigElement[0];
+ for (i = 0; i < event_data->NumElements; i++, element++) {
+ if (le32_to_cpu(event_data->Flags) &
+ MPI2_EVENT_IR_CHANGE_FLAGS_FOREIGN_CONFIG)
+ continue;
+ if (element->ReasonCode ==
+ MPI2_EVENT_IR_CHANGE_RC_VOLUME_DELETED ||
+ element->ReasonCode ==
+ MPI2_EVENT_IR_CHANGE_RC_REMOVED) {
+ volume_handle = le16_to_cpu(element->VolDevHandle);
+ _scsih_set_volume_delete_flag(ioc, volume_handle);
+ _scsih_set_volume_handle_for_tr(volume_handle, &a, &b);
+ }
+ }
+
+ /* Volume Resets for UNHIDE events */
+ element = (Mpi2EventIrConfigElement_t *)&event_data->ConfigElement[0];
+ for (i = 0; i < event_data->NumElements; i++, element++) {
+ if (le32_to_cpu(event_data->Flags) &
+ MPI2_EVENT_IR_CHANGE_FLAGS_FOREIGN_CONFIG)
+ continue;
+ if (element->ReasonCode == MPI2_EVENT_IR_CHANGE_RC_UNHIDE) {
+ volume_handle = le16_to_cpu(element->VolDevHandle);
+ _scsih_set_volume_handle_for_tr(volume_handle, &a, &b);
+ }
+ }
+
+ if (a)
+ _scsih_tm_tr_volume_send(ioc, a);
+ if (b)
+ _scsih_tm_tr_volume_send(ioc, b);
+
+ /* PD target resets */
+ element = (Mpi2EventIrConfigElement_t *)&event_data->ConfigElement[0];
+ for (i = 0; i < event_data->NumElements; i++, element++) {
+ if (element->ReasonCode != MPI2_EVENT_IR_CHANGE_RC_UNHIDE)
+ continue;
+ handle = le16_to_cpu(element->PhysDiskDevHandle);
+ volume_handle = le16_to_cpu(element->VolDevHandle);
+ clear_bit(handle, ioc->pd_handles);
+ if (!volume_handle)
+ _scsih_tm_tr_send(ioc, handle);
+ else if (volume_handle == a || volume_handle == b) {
+ delayed_tr = kzalloc(sizeof(*delayed_tr), GFP_ATOMIC);
+ BUG_ON(!delayed_tr);
+ INIT_LIST_HEAD(&delayed_tr->list);
+ delayed_tr->handle = handle;
+ list_add_tail(&delayed_tr->list, &ioc->delayed_tr_list);
+ dewtprintk(ioc, pr_info(MPT3SAS_FMT
+ "DELAYED:tr:handle(0x%04x), (open)\n", ioc->name,
+ handle));
+ } else
+ _scsih_tm_tr_send(ioc, handle);
+ }
+}
+
+
+/**
+ * _scsih_check_volume_delete_events - set delete flag for volumes
+ * @ioc: per adapter object
+ * @event_data: the event data payload
+ * Context: interrupt time.
+ *
+ * This will handle the case when the cable connected to entire volume is
+ * pulled. We will take care of setting the deleted flag so normal IO will
+ * not be sent.
+ *
+ * Return nothing.
+ */
+static void
+_scsih_check_volume_delete_events(struct MPT3SAS_ADAPTER *ioc,
+ Mpi2EventDataIrVolume_t *event_data)
+{
+ u32 state;
+
+ if (event_data->ReasonCode != MPI2_EVENT_IR_VOLUME_RC_STATE_CHANGED)
+ return;
+ state = le32_to_cpu(event_data->NewValue);
+ if (state == MPI2_RAID_VOL_STATE_MISSING || state ==
+ MPI2_RAID_VOL_STATE_FAILED)
+ _scsih_set_volume_delete_flag(ioc,
+ le16_to_cpu(event_data->VolDevHandle));
+}
+
+/**
+ * _scsih_temp_threshold_events - display temperature threshold exceeded events
+ * @ioc: per adapter object
+ * @event_data: the temp threshold event data
+ * Context: interrupt time.
+ *
+ * Return nothing.
+ */
+static void
+_scsih_temp_threshold_events(struct MPT3SAS_ADAPTER *ioc,
+ Mpi2EventDataTemperature_t *event_data)
+{
+ if (ioc->temp_sensors_count >= event_data->SensorNum) {
+ pr_err(MPT3SAS_FMT "Temperature Threshold flags %s%s%s%s"
+ " exceeded for Sensor: %d !!!\n", ioc->name,
+ ((le16_to_cpu(event_data->Status) & 0x1) == 1) ? "0 " : " ",
+ ((le16_to_cpu(event_data->Status) & 0x2) == 2) ? "1 " : " ",
+ ((le16_to_cpu(event_data->Status) & 0x4) == 4) ? "2 " : " ",
+ ((le16_to_cpu(event_data->Status) & 0x8) == 8) ? "3 " : " ",
+ event_data->SensorNum);
+ pr_err(MPT3SAS_FMT "Current Temp In Celsius: %d\n",
+ ioc->name, event_data->CurrentTemperature);
+ }
+}
+
+/**
+ * _scsih_flush_running_cmds - completing outstanding commands.
+ * @ioc: per adapter object
+ *
+ * The flushing out of all pending scmd commands following host reset,
+ * where all IO is dropped to the floor.
+ *
+ * Return nothing.
+ */
+static void
+_scsih_flush_running_cmds(struct MPT3SAS_ADAPTER *ioc)
+{
+ struct scsi_cmnd *scmd;
+ u16 smid;
+ u16 count = 0;
+
+ for (smid = 1; smid <= ioc->scsiio_depth; smid++) {
+ scmd = _scsih_scsi_lookup_get_clear(ioc, smid);
+ if (!scmd)
+ continue;
+ count++;
+ mpt3sas_base_free_smid(ioc, smid);
+ scsi_dma_unmap(scmd);
+ if (ioc->pci_error_recovery)
+ scmd->result = DID_NO_CONNECT << 16;
+ else
+ scmd->result = DID_RESET << 16;
+ scmd->scsi_done(scmd);
+ }
+ dtmprintk(ioc, pr_info(MPT3SAS_FMT "completing %d cmds\n",
+ ioc->name, count));
+}
+
+/**
+ * _scsih_setup_eedp - setup MPI request for EEDP transfer
+ * @ioc: per adapter object
+ * @scmd: pointer to scsi command object
+ * @mpi_request: pointer to the SCSI_IO reqest message frame
+ *
+ * Supporting protection 1 and 3.
+ *
+ * Returns nothing
+ */
+static void
+_scsih_setup_eedp(struct MPT3SAS_ADAPTER *ioc, struct scsi_cmnd *scmd,
+ Mpi2SCSIIORequest_t *mpi_request)
+{
+ u16 eedp_flags;
+ unsigned char prot_op = scsi_get_prot_op(scmd);
+ unsigned char prot_type = scsi_get_prot_type(scmd);
+ Mpi25SCSIIORequest_t *mpi_request_3v =
+ (Mpi25SCSIIORequest_t *)mpi_request;
+
+ if (prot_type == SCSI_PROT_DIF_TYPE0 || prot_op == SCSI_PROT_NORMAL)
+ return;
+
+ if (prot_op == SCSI_PROT_READ_STRIP)
+ eedp_flags = MPI2_SCSIIO_EEDPFLAGS_CHECK_REMOVE_OP;
+ else if (prot_op == SCSI_PROT_WRITE_INSERT)
+ eedp_flags = MPI2_SCSIIO_EEDPFLAGS_INSERT_OP;
+ else
+ return;
+
+ switch (prot_type) {
+ case SCSI_PROT_DIF_TYPE1:
+ case SCSI_PROT_DIF_TYPE2:
+
+ /*
+ * enable ref/guard checking
+ * auto increment ref tag
+ */
+ eedp_flags |= MPI2_SCSIIO_EEDPFLAGS_INC_PRI_REFTAG |
+ MPI2_SCSIIO_EEDPFLAGS_CHECK_REFTAG |
+ MPI2_SCSIIO_EEDPFLAGS_CHECK_GUARD;
+ mpi_request->CDB.EEDP32.PrimaryReferenceTag =
+ cpu_to_be32(scsi_get_lba(scmd));
+ break;
+
+ case SCSI_PROT_DIF_TYPE3:
+
+ /*
+ * enable guard checking
+ */
+ eedp_flags |= MPI2_SCSIIO_EEDPFLAGS_CHECK_GUARD;
+
+ break;
+ }
+
+ mpi_request_3v->EEDPBlockSize =
+ cpu_to_le16(scmd->device->sector_size);
+ mpi_request->EEDPFlags = cpu_to_le16(eedp_flags);
+}
+
+/**
+ * _scsih_eedp_error_handling - return sense code for EEDP errors
+ * @scmd: pointer to scsi command object
+ * @ioc_status: ioc status
+ *
+ * Returns nothing
+ */
+static void
+_scsih_eedp_error_handling(struct scsi_cmnd *scmd, u16 ioc_status)
+{
+ u8 ascq;
+
+ switch (ioc_status) {
+ case MPI2_IOCSTATUS_EEDP_GUARD_ERROR:
+ ascq = 0x01;
+ break;
+ case MPI2_IOCSTATUS_EEDP_APP_TAG_ERROR:
+ ascq = 0x02;
+ break;
+ case MPI2_IOCSTATUS_EEDP_REF_TAG_ERROR:
+ ascq = 0x03;
+ break;
+ default:
+ ascq = 0x00;
+ break;
+ }
+ scsi_build_sense_buffer(0, scmd->sense_buffer, ILLEGAL_REQUEST, 0x10,
+ ascq);
+ scmd->result = DRIVER_SENSE << 24 | (DID_ABORT << 16) |
+ SAM_STAT_CHECK_CONDITION;
+}
+
+
+/**
+ * _scsih_qcmd - main scsi request entry point
+ * @scmd: pointer to scsi command object
+ * @done: function pointer to be invoked on completion
+ *
+ * The callback index is set inside `ioc->scsi_io_cb_idx`.
+ *
+ * Returns 0 on success. If there's a failure, return either:
+ * SCSI_MLQUEUE_DEVICE_BUSY if the device queue is full, or
+ * SCSI_MLQUEUE_HOST_BUSY if the entire host queue is full
+ */
+static int
+_scsih_qcmd(struct Scsi_Host *shost, struct scsi_cmnd *scmd)
+{
+ struct MPT3SAS_ADAPTER *ioc = shost_priv(shost);
+ struct MPT3SAS_DEVICE *sas_device_priv_data;
+ struct MPT3SAS_TARGET *sas_target_priv_data;
+ Mpi2SCSIIORequest_t *mpi_request;
+ u32 mpi_control;
+ u16 smid;
+ u16 handle;
+
+#ifdef CONFIG_SCSI_MPT3SAS_LOGGING
+ if (ioc->logging_level & MPT_DEBUG_SCSI)
+ scsi_print_command(scmd);
+#endif
+
+ sas_device_priv_data = scmd->device->hostdata;
+ if (!sas_device_priv_data || !sas_device_priv_data->sas_target) {
+ scmd->result = DID_NO_CONNECT << 16;
+ scmd->scsi_done(scmd);
+ return 0;
+ }
+
+ if (ioc->pci_error_recovery || ioc->remove_host) {
+ scmd->result = DID_NO_CONNECT << 16;
+ scmd->scsi_done(scmd);
+ return 0;
+ }
+
+ sas_target_priv_data = sas_device_priv_data->sas_target;
+
+ /* invalid device handle */
+ handle = sas_target_priv_data->handle;
+ if (handle == MPT3SAS_INVALID_DEVICE_HANDLE) {
+ scmd->result = DID_NO_CONNECT << 16;
+ scmd->scsi_done(scmd);
+ return 0;
+ }
+
+
+ /* host recovery or link resets sent via IOCTLs */
+ if (ioc->shost_recovery || ioc->ioc_link_reset_in_progress)
+ return SCSI_MLQUEUE_HOST_BUSY;
+
+ /* device has been deleted */
+ else if (sas_target_priv_data->deleted) {
+ scmd->result = DID_NO_CONNECT << 16;
+ scmd->scsi_done(scmd);
+ return 0;
+ /* device busy with task managment */
+ } else if (sas_target_priv_data->tm_busy ||
+ sas_device_priv_data->block)
+ return SCSI_MLQUEUE_DEVICE_BUSY;
+
+ if (scmd->sc_data_direction == DMA_FROM_DEVICE)
+ mpi_control = MPI2_SCSIIO_CONTROL_READ;
+ else if (scmd->sc_data_direction == DMA_TO_DEVICE)
+ mpi_control = MPI2_SCSIIO_CONTROL_WRITE;
+ else
+ mpi_control = MPI2_SCSIIO_CONTROL_NODATATRANSFER;
+
+ /* set tags */
+ mpi_control |= MPI2_SCSIIO_CONTROL_SIMPLEQ;
+
+ if ((sas_device_priv_data->flags & MPT_DEVICE_TLR_ON) &&
+ scmd->cmd_len != 32)
+ mpi_control |= MPI2_SCSIIO_CONTROL_TLR_ON;
+
+ smid = mpt3sas_base_get_smid_scsiio(ioc, ioc->scsi_io_cb_idx, scmd);
+ if (!smid) {
+ pr_err(MPT3SAS_FMT "%s: failed obtaining a smid\n",
+ ioc->name, __func__);
+ goto out;
+ }
+ mpi_request = mpt3sas_base_get_msg_frame(ioc, smid);
+ memset(mpi_request, 0, sizeof(Mpi2SCSIIORequest_t));
+ _scsih_setup_eedp(ioc, scmd, mpi_request);
+
+ if (scmd->cmd_len == 32)
+ mpi_control |= 4 << MPI2_SCSIIO_CONTROL_ADDCDBLEN_SHIFT;
+ mpi_request->Function = MPI2_FUNCTION_SCSI_IO_REQUEST;
+ if (sas_device_priv_data->sas_target->flags &
+ MPT_TARGET_FLAGS_RAID_COMPONENT)
+ mpi_request->Function = MPI2_FUNCTION_RAID_SCSI_IO_PASSTHROUGH;
+ else
+ mpi_request->Function = MPI2_FUNCTION_SCSI_IO_REQUEST;
+ mpi_request->DevHandle = cpu_to_le16(handle);
+ mpi_request->DataLength = cpu_to_le32(scsi_bufflen(scmd));
+ mpi_request->Control = cpu_to_le32(mpi_control);
+ mpi_request->IoFlags = cpu_to_le16(scmd->cmd_len);
+ mpi_request->MsgFlags = MPI2_SCSIIO_MSGFLAGS_SYSTEM_SENSE_ADDR;
+ mpi_request->SenseBufferLength = SCSI_SENSE_BUFFERSIZE;
+ mpi_request->SenseBufferLowAddress =
+ mpt3sas_base_get_sense_buffer_dma(ioc, smid);
+ mpi_request->SGLOffset0 = offsetof(Mpi2SCSIIORequest_t, SGL) / 4;
+ int_to_scsilun(sas_device_priv_data->lun, (struct scsi_lun *)
+ mpi_request->LUN);
+ memcpy(mpi_request->CDB.CDB32, scmd->cmnd, scmd->cmd_len);
+
+ if (mpi_request->DataLength) {
+ if (ioc->build_sg_scmd(ioc, scmd, smid)) {
+ mpt3sas_base_free_smid(ioc, smid);
+ goto out;
+ }
+ } else
+ ioc->build_zero_len_sge(ioc, &mpi_request->SGL);
+
+ if (likely(mpi_request->Function == MPI2_FUNCTION_SCSI_IO_REQUEST)) {
+ if (sas_target_priv_data->flags & MPT_TARGET_FASTPATH_IO) {
+ mpi_request->IoFlags = cpu_to_le16(scmd->cmd_len |
+ MPI25_SCSIIO_IOFLAGS_FAST_PATH);
+ mpt3sas_base_put_smid_fast_path(ioc, smid, handle);
+ } else
+ mpt3sas_base_put_smid_scsi_io(ioc, smid, handle);
+ } else
+ mpt3sas_base_put_smid_default(ioc, smid);
+ return 0;
+
+ out:
+ return SCSI_MLQUEUE_HOST_BUSY;
+}
+
+/**
+ * _scsih_normalize_sense - normalize descriptor and fixed format sense data
+ * @sense_buffer: sense data returned by target
+ * @data: normalized skey/asc/ascq
+ *
+ * Return nothing.
+ */
+static void
+_scsih_normalize_sense(char *sense_buffer, struct sense_info *data)
+{
+ if ((sense_buffer[0] & 0x7F) >= 0x72) {
+ /* descriptor format */
+ data->skey = sense_buffer[1] & 0x0F;
+ data->asc = sense_buffer[2];
+ data->ascq = sense_buffer[3];
+ } else {
+ /* fixed format */
+ data->skey = sense_buffer[2] & 0x0F;
+ data->asc = sense_buffer[12];
+ data->ascq = sense_buffer[13];
+ }
+}
+
+#ifdef CONFIG_SCSI_MPT3SAS_LOGGING
+/**
+ * _scsih_scsi_ioc_info - translated non-succesfull SCSI_IO request
+ * @ioc: per adapter object
+ * @scmd: pointer to scsi command object
+ * @mpi_reply: reply mf payload returned from firmware
+ *
+ * scsi_status - SCSI Status code returned from target device
+ * scsi_state - state info associated with SCSI_IO determined by ioc
+ * ioc_status - ioc supplied status info
+ *
+ * Return nothing.
+ */
+static void
+_scsih_scsi_ioc_info(struct MPT3SAS_ADAPTER *ioc, struct scsi_cmnd *scmd,
+ Mpi2SCSIIOReply_t *mpi_reply, u16 smid)
+{
+ u32 response_info;
+ u8 *response_bytes;
+ u16 ioc_status = le16_to_cpu(mpi_reply->IOCStatus) &
+ MPI2_IOCSTATUS_MASK;
+ u8 scsi_state = mpi_reply->SCSIState;
+ u8 scsi_status = mpi_reply->SCSIStatus;
+ char *desc_ioc_state = NULL;
+ char *desc_scsi_status = NULL;
+ char *desc_scsi_state = ioc->tmp_string;
+ u32 log_info = le32_to_cpu(mpi_reply->IOCLogInfo);
+ struct _sas_device *sas_device = NULL;
+ unsigned long flags;
+ struct scsi_target *starget = scmd->device->sdev_target;
+ struct MPT3SAS_TARGET *priv_target = starget->hostdata;
+ char *device_str = NULL;
+
+ if (!priv_target)
+ return;
+ device_str = "volume";
+
+ if (log_info == 0x31170000)
+ return;
+
+ switch (ioc_status) {
+ case MPI2_IOCSTATUS_SUCCESS:
+ desc_ioc_state = "success";
+ break;
+ case MPI2_IOCSTATUS_INVALID_FUNCTION:
+ desc_ioc_state = "invalid function";
+ break;
+ case MPI2_IOCSTATUS_SCSI_RECOVERED_ERROR:
+ desc_ioc_state = "scsi recovered error";
+ break;
+ case MPI2_IOCSTATUS_SCSI_INVALID_DEVHANDLE:
+ desc_ioc_state = "scsi invalid dev handle";
+ break;
+ case MPI2_IOCSTATUS_SCSI_DEVICE_NOT_THERE:
+ desc_ioc_state = "scsi device not there";
+ break;
+ case MPI2_IOCSTATUS_SCSI_DATA_OVERRUN:
+ desc_ioc_state = "scsi data overrun";
+ break;
+ case MPI2_IOCSTATUS_SCSI_DATA_UNDERRUN:
+ desc_ioc_state = "scsi data underrun";
+ break;
+ case MPI2_IOCSTATUS_SCSI_IO_DATA_ERROR:
+ desc_ioc_state = "scsi io data error";
+ break;
+ case MPI2_IOCSTATUS_SCSI_PROTOCOL_ERROR:
+ desc_ioc_state = "scsi protocol error";
+ break;
+ case MPI2_IOCSTATUS_SCSI_TASK_TERMINATED:
+ desc_ioc_state = "scsi task terminated";
+ break;
+ case MPI2_IOCSTATUS_SCSI_RESIDUAL_MISMATCH:
+ desc_ioc_state = "scsi residual mismatch";
+ break;
+ case MPI2_IOCSTATUS_SCSI_TASK_MGMT_FAILED:
+ desc_ioc_state = "scsi task mgmt failed";
+ break;
+ case MPI2_IOCSTATUS_SCSI_IOC_TERMINATED:
+ desc_ioc_state = "scsi ioc terminated";
+ break;
+ case MPI2_IOCSTATUS_SCSI_EXT_TERMINATED:
+ desc_ioc_state = "scsi ext terminated";
+ break;
+ case MPI2_IOCSTATUS_EEDP_GUARD_ERROR:
+ desc_ioc_state = "eedp guard error";
+ break;
+ case MPI2_IOCSTATUS_EEDP_REF_TAG_ERROR:
+ desc_ioc_state = "eedp ref tag error";
+ break;
+ case MPI2_IOCSTATUS_EEDP_APP_TAG_ERROR:
+ desc_ioc_state = "eedp app tag error";
+ break;
+ default:
+ desc_ioc_state = "unknown";
+ break;
+ }
+
+ switch (scsi_status) {
+ case MPI2_SCSI_STATUS_GOOD:
+ desc_scsi_status = "good";
+ break;
+ case MPI2_SCSI_STATUS_CHECK_CONDITION:
+ desc_scsi_status = "check condition";
+ break;
+ case MPI2_SCSI_STATUS_CONDITION_MET:
+ desc_scsi_status = "condition met";
+ break;
+ case MPI2_SCSI_STATUS_BUSY:
+ desc_scsi_status = "busy";
+ break;
+ case MPI2_SCSI_STATUS_INTERMEDIATE:
+ desc_scsi_status = "intermediate";
+ break;
+ case MPI2_SCSI_STATUS_INTERMEDIATE_CONDMET:
+ desc_scsi_status = "intermediate condmet";
+ break;
+ case MPI2_SCSI_STATUS_RESERVATION_CONFLICT:
+ desc_scsi_status = "reservation conflict";
+ break;
+ case MPI2_SCSI_STATUS_COMMAND_TERMINATED:
+ desc_scsi_status = "command terminated";
+ break;
+ case MPI2_SCSI_STATUS_TASK_SET_FULL:
+ desc_scsi_status = "task set full";
+ break;
+ case MPI2_SCSI_STATUS_ACA_ACTIVE:
+ desc_scsi_status = "aca active";
+ break;
+ case MPI2_SCSI_STATUS_TASK_ABORTED:
+ desc_scsi_status = "task aborted";
+ break;
+ default:
+ desc_scsi_status = "unknown";
+ break;
+ }
+
+ desc_scsi_state[0] = '\0';
+ if (!scsi_state)
+ desc_scsi_state = " ";
+ if (scsi_state & MPI2_SCSI_STATE_RESPONSE_INFO_VALID)
+ strcat(desc_scsi_state, "response info ");
+ if (scsi_state & MPI2_SCSI_STATE_TERMINATED)
+ strcat(desc_scsi_state, "state terminated ");
+ if (scsi_state & MPI2_SCSI_STATE_NO_SCSI_STATUS)
+ strcat(desc_scsi_state, "no status ");
+ if (scsi_state & MPI2_SCSI_STATE_AUTOSENSE_FAILED)
+ strcat(desc_scsi_state, "autosense failed ");
+ if (scsi_state & MPI2_SCSI_STATE_AUTOSENSE_VALID)
+ strcat(desc_scsi_state, "autosense valid ");
+
+ scsi_print_command(scmd);
+
+ if (priv_target->flags & MPT_TARGET_FLAGS_VOLUME) {
+ pr_warn(MPT3SAS_FMT "\t%s wwid(0x%016llx)\n", ioc->name,
+ device_str, (unsigned long long)priv_target->sas_address);
+ } else {
+ spin_lock_irqsave(&ioc->sas_device_lock, flags);
+ sas_device = mpt3sas_scsih_sas_device_find_by_sas_address(ioc,
+ priv_target->sas_address);
+ if (sas_device) {
+ pr_warn(MPT3SAS_FMT
+ "\tsas_address(0x%016llx), phy(%d)\n",
+ ioc->name, (unsigned long long)
+ sas_device->sas_address, sas_device->phy);
+ pr_warn(MPT3SAS_FMT
+ "\tenclosure_logical_id(0x%016llx), slot(%d)\n",
+ ioc->name, (unsigned long long)
+ sas_device->enclosure_logical_id, sas_device->slot);
+ }
+ spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
+ }
+
+ pr_warn(MPT3SAS_FMT
+ "\thandle(0x%04x), ioc_status(%s)(0x%04x), smid(%d)\n",
+ ioc->name, le16_to_cpu(mpi_reply->DevHandle),
+ desc_ioc_state, ioc_status, smid);
+ pr_warn(MPT3SAS_FMT
+ "\trequest_len(%d), underflow(%d), resid(%d)\n",
+ ioc->name, scsi_bufflen(scmd), scmd->underflow,
+ scsi_get_resid(scmd));
+ pr_warn(MPT3SAS_FMT
+ "\ttag(%d), transfer_count(%d), sc->result(0x%08x)\n",
+ ioc->name, le16_to_cpu(mpi_reply->TaskTag),
+ le32_to_cpu(mpi_reply->TransferCount), scmd->result);
+ pr_warn(MPT3SAS_FMT
+ "\tscsi_status(%s)(0x%02x), scsi_state(%s)(0x%02x)\n",
+ ioc->name, desc_scsi_status,
+ scsi_status, desc_scsi_state, scsi_state);
+
+ if (scsi_state & MPI2_SCSI_STATE_AUTOSENSE_VALID) {
+ struct sense_info data;
+ _scsih_normalize_sense(scmd->sense_buffer, &data);
+ pr_warn(MPT3SAS_FMT
+ "\t[sense_key,asc,ascq]: [0x%02x,0x%02x,0x%02x], count(%d)\n",
+ ioc->name, data.skey,
+ data.asc, data.ascq, le32_to_cpu(mpi_reply->SenseCount));
+ }
+
+ if (scsi_state & MPI2_SCSI_STATE_RESPONSE_INFO_VALID) {
+ response_info = le32_to_cpu(mpi_reply->ResponseInfo);
+ response_bytes = (u8 *)&response_info;
+ _scsih_response_code(ioc, response_bytes[0]);
+ }
+}
+#endif
+
+/**
+ * _scsih_turn_on_pfa_led - illuminate PFA LED
+ * @ioc: per adapter object
+ * @handle: device handle
+ * Context: process
+ *
+ * Return nothing.
+ */
+static void
+_scsih_turn_on_pfa_led(struct MPT3SAS_ADAPTER *ioc, u16 handle)
+{
+ Mpi2SepReply_t mpi_reply;
+ Mpi2SepRequest_t mpi_request;
+ struct _sas_device *sas_device;
+
+ sas_device = _scsih_sas_device_find_by_handle(ioc, handle);
+ if (!sas_device)
+ return;
+
+ memset(&mpi_request, 0, sizeof(Mpi2SepRequest_t));
+ mpi_request.Function = MPI2_FUNCTION_SCSI_ENCLOSURE_PROCESSOR;
+ mpi_request.Action = MPI2_SEP_REQ_ACTION_WRITE_STATUS;
+ mpi_request.SlotStatus =
+ cpu_to_le32(MPI2_SEP_REQ_SLOTSTATUS_PREDICTED_FAULT);
+ mpi_request.DevHandle = cpu_to_le16(handle);
+ mpi_request.Flags = MPI2_SEP_REQ_FLAGS_DEVHANDLE_ADDRESS;
+ if ((mpt3sas_base_scsi_enclosure_processor(ioc, &mpi_reply,
+ &mpi_request)) != 0) {
+ pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n", ioc->name,
+ __FILE__, __LINE__, __func__);
+ return;
+ }
+ sas_device->pfa_led_on = 1;
+
+ if (mpi_reply.IOCStatus || mpi_reply.IOCLogInfo) {
+ dewtprintk(ioc, pr_info(MPT3SAS_FMT
+ "enclosure_processor: ioc_status (0x%04x), loginfo(0x%08x)\n",
+ ioc->name, le16_to_cpu(mpi_reply.IOCStatus),
+ le32_to_cpu(mpi_reply.IOCLogInfo)));
+ return;
+ }
+}
+/**
+ * _scsih_turn_off_pfa_led - turn off Fault LED
+ * @ioc: per adapter object
+ * @sas_device: sas device whose PFA LED has to turned off
+ * Context: process
+ *
+ * Return nothing.
+ */
+static void
+_scsih_turn_off_pfa_led(struct MPT3SAS_ADAPTER *ioc,
+ struct _sas_device *sas_device)
+{
+ Mpi2SepReply_t mpi_reply;
+ Mpi2SepRequest_t mpi_request;
+
+ memset(&mpi_request, 0, sizeof(Mpi2SepRequest_t));
+ mpi_request.Function = MPI2_FUNCTION_SCSI_ENCLOSURE_PROCESSOR;
+ mpi_request.Action = MPI2_SEP_REQ_ACTION_WRITE_STATUS;
+ mpi_request.SlotStatus = 0;
+ mpi_request.Slot = cpu_to_le16(sas_device->slot);
+ mpi_request.DevHandle = 0;
+ mpi_request.EnclosureHandle = cpu_to_le16(sas_device->enclosure_handle);
+ mpi_request.Flags = MPI2_SEP_REQ_FLAGS_ENCLOSURE_SLOT_ADDRESS;
+ if ((mpt3sas_base_scsi_enclosure_processor(ioc, &mpi_reply,
+ &mpi_request)) != 0) {
+ printk(MPT3SAS_FMT "failure at %s:%d/%s()!\n", ioc->name,
+ __FILE__, __LINE__, __func__);
+ return;
+ }
+
+ if (mpi_reply.IOCStatus || mpi_reply.IOCLogInfo) {
+ dewtprintk(ioc, printk(MPT3SAS_FMT
+ "enclosure_processor: ioc_status (0x%04x), loginfo(0x%08x)\n",
+ ioc->name, le16_to_cpu(mpi_reply.IOCStatus),
+ le32_to_cpu(mpi_reply.IOCLogInfo)));
+ return;
+ }
+}
+/**
+ * _scsih_send_event_to_turn_on_pfa_led - fire delayed event
+ * @ioc: per adapter object
+ * @handle: device handle
+ * Context: interrupt.
+ *
+ * Return nothing.
+ */
+static void
+_scsih_send_event_to_turn_on_pfa_led(struct MPT3SAS_ADAPTER *ioc, u16 handle)
+{
+ struct fw_event_work *fw_event;
+
+ fw_event = kzalloc(sizeof(struct fw_event_work), GFP_ATOMIC);
+ if (!fw_event)
+ return;
+ fw_event->event = MPT3SAS_TURN_ON_PFA_LED;
+ fw_event->device_handle = handle;
+ fw_event->ioc = ioc;
+ _scsih_fw_event_add(ioc, fw_event);
+}
+
+/**
+ * _scsih_smart_predicted_fault - process smart errors
+ * @ioc: per adapter object
+ * @handle: device handle
+ * Context: interrupt.
+ *
+ * Return nothing.
+ */
+static void
+_scsih_smart_predicted_fault(struct MPT3SAS_ADAPTER *ioc, u16 handle)
+{
+ struct scsi_target *starget;
+ struct MPT3SAS_TARGET *sas_target_priv_data;
+ Mpi2EventNotificationReply_t *event_reply;
+ Mpi2EventDataSasDeviceStatusChange_t *event_data;
+ struct _sas_device *sas_device;
+ ssize_t sz;
+ unsigned long flags;
+
+ /* only handle non-raid devices */
+ spin_lock_irqsave(&ioc->sas_device_lock, flags);
+ sas_device = _scsih_sas_device_find_by_handle(ioc, handle);
+ if (!sas_device) {
+ spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
+ return;
+ }
+ starget = sas_device->starget;
+ sas_target_priv_data = starget->hostdata;
+
+ if ((sas_target_priv_data->flags & MPT_TARGET_FLAGS_RAID_COMPONENT) ||
+ ((sas_target_priv_data->flags & MPT_TARGET_FLAGS_VOLUME))) {
+ spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
+ return;
+ }
+ starget_printk(KERN_WARNING, starget, "predicted fault\n");
+ spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
+
+ if (ioc->pdev->subsystem_vendor == PCI_VENDOR_ID_IBM)
+ _scsih_send_event_to_turn_on_pfa_led(ioc, handle);
+
+ /* insert into event log */
+ sz = offsetof(Mpi2EventNotificationReply_t, EventData) +
+ sizeof(Mpi2EventDataSasDeviceStatusChange_t);
+ event_reply = kzalloc(sz, GFP_KERNEL);
+ if (!event_reply) {
+ pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n",
+ ioc->name, __FILE__, __LINE__, __func__);
+ return;
+ }
+
+ event_reply->Function = MPI2_FUNCTION_EVENT_NOTIFICATION;
+ event_reply->Event =
+ cpu_to_le16(MPI2_EVENT_SAS_DEVICE_STATUS_CHANGE);
+ event_reply->MsgLength = sz/4;
+ event_reply->EventDataLength =
+ cpu_to_le16(sizeof(Mpi2EventDataSasDeviceStatusChange_t)/4);
+ event_data = (Mpi2EventDataSasDeviceStatusChange_t *)
+ event_reply->EventData;
+ event_data->ReasonCode = MPI2_EVENT_SAS_DEV_STAT_RC_SMART_DATA;
+ event_data->ASC = 0x5D;
+ event_data->DevHandle = cpu_to_le16(handle);
+ event_data->SASAddress = cpu_to_le64(sas_target_priv_data->sas_address);
+ mpt3sas_ctl_add_to_event_log(ioc, event_reply);
+ kfree(event_reply);
+}
+
+/**
+ * _scsih_io_done - scsi request callback
+ * @ioc: per adapter object
+ * @smid: system request message index
+ * @msix_index: MSIX table index supplied by the OS
+ * @reply: reply message frame(lower 32bit addr)
+ *
+ * Callback handler when using _scsih_qcmd.
+ *
+ * Return 1 meaning mf should be freed from _base_interrupt
+ * 0 means the mf is freed from this function.
+ */
+static u8
+_scsih_io_done(struct MPT3SAS_ADAPTER *ioc, u16 smid, u8 msix_index, u32 reply)
+{
+ Mpi2SCSIIORequest_t *mpi_request;
+ Mpi2SCSIIOReply_t *mpi_reply;
+ struct scsi_cmnd *scmd;
+ u16 ioc_status;
+ u32 xfer_cnt;
+ u8 scsi_state;
+ u8 scsi_status;
+ u32 log_info;
+ struct MPT3SAS_DEVICE *sas_device_priv_data;
+ u32 response_code = 0;
+
+ mpi_reply = mpt3sas_base_get_reply_virt_addr(ioc, reply);
+ scmd = _scsih_scsi_lookup_get_clear(ioc, smid);
+ if (scmd == NULL)
+ return 1;
+
+ mpi_request = mpt3sas_base_get_msg_frame(ioc, smid);
+
+ if (mpi_reply == NULL) {
+ scmd->result = DID_OK << 16;
+ goto out;
+ }
+
+ sas_device_priv_data = scmd->device->hostdata;
+ if (!sas_device_priv_data || !sas_device_priv_data->sas_target ||
+ sas_device_priv_data->sas_target->deleted) {
+ scmd->result = DID_NO_CONNECT << 16;
+ goto out;
+ }
+ ioc_status = le16_to_cpu(mpi_reply->IOCStatus);
+
+ /* turning off TLR */
+ scsi_state = mpi_reply->SCSIState;
+ if (scsi_state & MPI2_SCSI_STATE_RESPONSE_INFO_VALID)
+ response_code =
+ le32_to_cpu(mpi_reply->ResponseInfo) & 0xFF;
+ if (!sas_device_priv_data->tlr_snoop_check) {
+ sas_device_priv_data->tlr_snoop_check++;
+ if ((sas_device_priv_data->flags & MPT_DEVICE_TLR_ON) &&
+ response_code == MPI2_SCSITASKMGMT_RSP_INVALID_FRAME)
+ sas_device_priv_data->flags &=
+ ~MPT_DEVICE_TLR_ON;
+ }
+
+ xfer_cnt = le32_to_cpu(mpi_reply->TransferCount);
+ scsi_set_resid(scmd, scsi_bufflen(scmd) - xfer_cnt);
+ if (ioc_status & MPI2_IOCSTATUS_FLAG_LOG_INFO_AVAILABLE)
+ log_info = le32_to_cpu(mpi_reply->IOCLogInfo);
+ else
+ log_info = 0;
+ ioc_status &= MPI2_IOCSTATUS_MASK;
+ scsi_status = mpi_reply->SCSIStatus;
+
+ if (ioc_status == MPI2_IOCSTATUS_SCSI_DATA_UNDERRUN && xfer_cnt == 0 &&
+ (scsi_status == MPI2_SCSI_STATUS_BUSY ||
+ scsi_status == MPI2_SCSI_STATUS_RESERVATION_CONFLICT ||
+ scsi_status == MPI2_SCSI_STATUS_TASK_SET_FULL)) {
+ ioc_status = MPI2_IOCSTATUS_SUCCESS;
+ }
+
+ if (scsi_state & MPI2_SCSI_STATE_AUTOSENSE_VALID) {
+ struct sense_info data;
+ const void *sense_data = mpt3sas_base_get_sense_buffer(ioc,
+ smid);
+ u32 sz = min_t(u32, SCSI_SENSE_BUFFERSIZE,
+ le32_to_cpu(mpi_reply->SenseCount));
+ memcpy(scmd->sense_buffer, sense_data, sz);
+ _scsih_normalize_sense(scmd->sense_buffer, &data);
+ /* failure prediction threshold exceeded */
+ if (data.asc == 0x5D)
+ _scsih_smart_predicted_fault(ioc,
+ le16_to_cpu(mpi_reply->DevHandle));
+ mpt3sas_trigger_scsi(ioc, data.skey, data.asc, data.ascq);
+ }
+
+ switch (ioc_status) {
+ case MPI2_IOCSTATUS_BUSY:
+ case MPI2_IOCSTATUS_INSUFFICIENT_RESOURCES:
+ scmd->result = SAM_STAT_BUSY;
+ break;
+
+ case MPI2_IOCSTATUS_SCSI_DEVICE_NOT_THERE:
+ scmd->result = DID_NO_CONNECT << 16;
+ break;
+
+ case MPI2_IOCSTATUS_SCSI_IOC_TERMINATED:
+ if (sas_device_priv_data->block) {
+ scmd->result = DID_TRANSPORT_DISRUPTED << 16;
+ goto out;
+ }
+ if (log_info == 0x31110630) {
+ if (scmd->retries > 2) {
+ scmd->result = DID_NO_CONNECT << 16;
+ scsi_device_set_state(scmd->device,
+ SDEV_OFFLINE);
+ } else {
+ scmd->result = DID_SOFT_ERROR << 16;
+ scmd->device->expecting_cc_ua = 1;
+ }
+ break;
+ }
+ scmd->result = DID_SOFT_ERROR << 16;
+ break;
+ case MPI2_IOCSTATUS_SCSI_TASK_TERMINATED:
+ case MPI2_IOCSTATUS_SCSI_EXT_TERMINATED:
+ scmd->result = DID_RESET << 16;
+ break;
+
+ case MPI2_IOCSTATUS_SCSI_RESIDUAL_MISMATCH:
+ if ((xfer_cnt == 0) || (scmd->underflow > xfer_cnt))
+ scmd->result = DID_SOFT_ERROR << 16;
+ else
+ scmd->result = (DID_OK << 16) | scsi_status;
+ break;
+
+ case MPI2_IOCSTATUS_SCSI_DATA_UNDERRUN:
+ scmd->result = (DID_OK << 16) | scsi_status;
+
+ if ((scsi_state & MPI2_SCSI_STATE_AUTOSENSE_VALID))
+ break;
+
+ if (xfer_cnt < scmd->underflow) {
+ if (scsi_status == SAM_STAT_BUSY)
+ scmd->result = SAM_STAT_BUSY;
+ else
+ scmd->result = DID_SOFT_ERROR << 16;
+ } else if (scsi_state & (MPI2_SCSI_STATE_AUTOSENSE_FAILED |
+ MPI2_SCSI_STATE_NO_SCSI_STATUS))
+ scmd->result = DID_SOFT_ERROR << 16;
+ else if (scsi_state & MPI2_SCSI_STATE_TERMINATED)
+ scmd->result = DID_RESET << 16;
+ else if (!xfer_cnt && scmd->cmnd[0] == REPORT_LUNS) {
+ mpi_reply->SCSIState = MPI2_SCSI_STATE_AUTOSENSE_VALID;
+ mpi_reply->SCSIStatus = SAM_STAT_CHECK_CONDITION;
+ scmd->result = (DRIVER_SENSE << 24) |
+ SAM_STAT_CHECK_CONDITION;
+ scmd->sense_buffer[0] = 0x70;
+ scmd->sense_buffer[2] = ILLEGAL_REQUEST;
+ scmd->sense_buffer[12] = 0x20;
+ scmd->sense_buffer[13] = 0;
+ }
+ break;
+
+ case MPI2_IOCSTATUS_SCSI_DATA_OVERRUN:
+ scsi_set_resid(scmd, 0);
+ case MPI2_IOCSTATUS_SCSI_RECOVERED_ERROR:
+ case MPI2_IOCSTATUS_SUCCESS:
+ scmd->result = (DID_OK << 16) | scsi_status;
+ if (response_code ==
+ MPI2_SCSITASKMGMT_RSP_INVALID_FRAME ||
+ (scsi_state & (MPI2_SCSI_STATE_AUTOSENSE_FAILED |
+ MPI2_SCSI_STATE_NO_SCSI_STATUS)))
+ scmd->result = DID_SOFT_ERROR << 16;
+ else if (scsi_state & MPI2_SCSI_STATE_TERMINATED)
+ scmd->result = DID_RESET << 16;
+ break;
+
+ case MPI2_IOCSTATUS_EEDP_GUARD_ERROR:
+ case MPI2_IOCSTATUS_EEDP_REF_TAG_ERROR:
+ case MPI2_IOCSTATUS_EEDP_APP_TAG_ERROR:
+ _scsih_eedp_error_handling(scmd, ioc_status);
+ break;
+
+ case MPI2_IOCSTATUS_SCSI_PROTOCOL_ERROR:
+ case MPI2_IOCSTATUS_INVALID_FUNCTION:
+ case MPI2_IOCSTATUS_INVALID_SGL:
+ case MPI2_IOCSTATUS_INTERNAL_ERROR:
+ case MPI2_IOCSTATUS_INVALID_FIELD:
+ case MPI2_IOCSTATUS_INVALID_STATE:
+ case MPI2_IOCSTATUS_SCSI_IO_DATA_ERROR:
+ case MPI2_IOCSTATUS_SCSI_TASK_MGMT_FAILED:
+ default:
+ scmd->result = DID_SOFT_ERROR << 16;
+ break;
+
+ }
+
+#ifdef CONFIG_SCSI_MPT3SAS_LOGGING
+ if (scmd->result && (ioc->logging_level & MPT_DEBUG_REPLY))
+ _scsih_scsi_ioc_info(ioc , scmd, mpi_reply, smid);
+#endif
+
+ out:
+
+ scsi_dma_unmap(scmd);
+
+ scmd->scsi_done(scmd);
+ return 1;
+}
+
+/**
+ * _scsih_sas_host_refresh - refreshing sas host object contents
+ * @ioc: per adapter object
+ * Context: user
+ *
+ * During port enable, fw will send topology events for every device. Its
+ * possible that the handles may change from the previous setting, so this
+ * code keeping handles updating if changed.
+ *
+ * Return nothing.
+ */
+static void
+_scsih_sas_host_refresh(struct MPT3SAS_ADAPTER *ioc)
+{
+ u16 sz;
+ u16 ioc_status;
+ int i;
+ Mpi2ConfigReply_t mpi_reply;
+ Mpi2SasIOUnitPage0_t *sas_iounit_pg0 = NULL;
+ u16 attached_handle;
+ u8 link_rate;
+
+ dtmprintk(ioc, pr_info(MPT3SAS_FMT
+ "updating handles for sas_host(0x%016llx)\n",
+ ioc->name, (unsigned long long)ioc->sas_hba.sas_address));
+
+ sz = offsetof(Mpi2SasIOUnitPage0_t, PhyData) + (ioc->sas_hba.num_phys
+ * sizeof(Mpi2SasIOUnit0PhyData_t));
+ sas_iounit_pg0 = kzalloc(sz, GFP_KERNEL);
+ if (!sas_iounit_pg0) {
+ pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n",
+ ioc->name, __FILE__, __LINE__, __func__);
+ return;
+ }
+
+ if ((mpt3sas_config_get_sas_iounit_pg0(ioc, &mpi_reply,
+ sas_iounit_pg0, sz)) != 0)
+ goto out;
+ ioc_status = le16_to_cpu(mpi_reply.IOCStatus) & MPI2_IOCSTATUS_MASK;
+ if (ioc_status != MPI2_IOCSTATUS_SUCCESS)
+ goto out;
+ for (i = 0; i < ioc->sas_hba.num_phys ; i++) {
+ link_rate = sas_iounit_pg0->PhyData[i].NegotiatedLinkRate >> 4;
+ if (i == 0)
+ ioc->sas_hba.handle = le16_to_cpu(sas_iounit_pg0->
+ PhyData[0].ControllerDevHandle);
+ ioc->sas_hba.phy[i].handle = ioc->sas_hba.handle;
+ attached_handle = le16_to_cpu(sas_iounit_pg0->PhyData[i].
+ AttachedDevHandle);
+ if (attached_handle && link_rate < MPI2_SAS_NEG_LINK_RATE_1_5)
+ link_rate = MPI2_SAS_NEG_LINK_RATE_1_5;
+ mpt3sas_transport_update_links(ioc, ioc->sas_hba.sas_address,
+ attached_handle, i, link_rate);
+ }
+ out:
+ kfree(sas_iounit_pg0);
+}
+
+/**
+ * _scsih_sas_host_add - create sas host object
+ * @ioc: per adapter object
+ *
+ * Creating host side data object, stored in ioc->sas_hba
+ *
+ * Return nothing.
+ */
+static void
+_scsih_sas_host_add(struct MPT3SAS_ADAPTER *ioc)
+{
+ int i;
+ Mpi2ConfigReply_t mpi_reply;
+ Mpi2SasIOUnitPage0_t *sas_iounit_pg0 = NULL;
+ Mpi2SasIOUnitPage1_t *sas_iounit_pg1 = NULL;
+ Mpi2SasPhyPage0_t phy_pg0;
+ Mpi2SasDevicePage0_t sas_device_pg0;
+ Mpi2SasEnclosurePage0_t enclosure_pg0;
+ u16 ioc_status;
+ u16 sz;
+ u8 device_missing_delay;
+
+ mpt3sas_config_get_number_hba_phys(ioc, &ioc->sas_hba.num_phys);
+ if (!ioc->sas_hba.num_phys) {
+ pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n",
+ ioc->name, __FILE__, __LINE__, __func__);
+ return;
+ }
+
+ /* sas_iounit page 0 */
+ sz = offsetof(Mpi2SasIOUnitPage0_t, PhyData) + (ioc->sas_hba.num_phys *
+ sizeof(Mpi2SasIOUnit0PhyData_t));
+ sas_iounit_pg0 = kzalloc(sz, GFP_KERNEL);
+ if (!sas_iounit_pg0) {
+ pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n",
+ ioc->name, __FILE__, __LINE__, __func__);
+ return;
+ }
+ if ((mpt3sas_config_get_sas_iounit_pg0(ioc, &mpi_reply,
+ sas_iounit_pg0, sz))) {
+ pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n",
+ ioc->name, __FILE__, __LINE__, __func__);
+ goto out;
+ }
+ ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
+ MPI2_IOCSTATUS_MASK;
+ if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
+ pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n",
+ ioc->name, __FILE__, __LINE__, __func__);
+ goto out;
+ }
+
+ /* sas_iounit page 1 */
+ sz = offsetof(Mpi2SasIOUnitPage1_t, PhyData) + (ioc->sas_hba.num_phys *
+ sizeof(Mpi2SasIOUnit1PhyData_t));
+ sas_iounit_pg1 = kzalloc(sz, GFP_KERNEL);
+ if (!sas_iounit_pg1) {
+ pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n",
+ ioc->name, __FILE__, __LINE__, __func__);
+ goto out;
+ }
+ if ((mpt3sas_config_get_sas_iounit_pg1(ioc, &mpi_reply,
+ sas_iounit_pg1, sz))) {
+ pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n",
+ ioc->name, __FILE__, __LINE__, __func__);
+ goto out;
+ }
+ ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
+ MPI2_IOCSTATUS_MASK;
+ if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
+ pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n",
+ ioc->name, __FILE__, __LINE__, __func__);
+ goto out;
+ }
+
+ ioc->io_missing_delay =
+ sas_iounit_pg1->IODeviceMissingDelay;
+ device_missing_delay =
+ sas_iounit_pg1->ReportDeviceMissingDelay;
+ if (device_missing_delay & MPI2_SASIOUNIT1_REPORT_MISSING_UNIT_16)
+ ioc->device_missing_delay = (device_missing_delay &
+ MPI2_SASIOUNIT1_REPORT_MISSING_TIMEOUT_MASK) * 16;
+ else
+ ioc->device_missing_delay = device_missing_delay &
+ MPI2_SASIOUNIT1_REPORT_MISSING_TIMEOUT_MASK;
+
+ ioc->sas_hba.parent_dev = &ioc->shost->shost_gendev;
+ ioc->sas_hba.phy = kcalloc(ioc->sas_hba.num_phys,
+ sizeof(struct _sas_phy), GFP_KERNEL);
+ if (!ioc->sas_hba.phy) {
+ pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n",
+ ioc->name, __FILE__, __LINE__, __func__);
+ goto out;
+ }
+ for (i = 0; i < ioc->sas_hba.num_phys ; i++) {
+ if ((mpt3sas_config_get_phy_pg0(ioc, &mpi_reply, &phy_pg0,
+ i))) {
+ pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n",
+ ioc->name, __FILE__, __LINE__, __func__);
+ goto out;
+ }
+ ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
+ MPI2_IOCSTATUS_MASK;
+ if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
+ pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n",
+ ioc->name, __FILE__, __LINE__, __func__);
+ goto out;
+ }
+
+ if (i == 0)
+ ioc->sas_hba.handle = le16_to_cpu(sas_iounit_pg0->
+ PhyData[0].ControllerDevHandle);
+ ioc->sas_hba.phy[i].handle = ioc->sas_hba.handle;
+ ioc->sas_hba.phy[i].phy_id = i;
+ mpt3sas_transport_add_host_phy(ioc, &ioc->sas_hba.phy[i],
+ phy_pg0, ioc->sas_hba.parent_dev);
+ }
+ if ((mpt3sas_config_get_sas_device_pg0(ioc, &mpi_reply, &sas_device_pg0,
+ MPI2_SAS_DEVICE_PGAD_FORM_HANDLE, ioc->sas_hba.handle))) {
+ pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n",
+ ioc->name, __FILE__, __LINE__, __func__);
+ goto out;
+ }
+ ioc->sas_hba.enclosure_handle =
+ le16_to_cpu(sas_device_pg0.EnclosureHandle);
+ ioc->sas_hba.sas_address = le64_to_cpu(sas_device_pg0.SASAddress);
+ pr_info(MPT3SAS_FMT
+ "host_add: handle(0x%04x), sas_addr(0x%016llx), phys(%d)\n",
+ ioc->name, ioc->sas_hba.handle,
+ (unsigned long long) ioc->sas_hba.sas_address,
+ ioc->sas_hba.num_phys) ;
+
+ if (ioc->sas_hba.enclosure_handle) {
+ if (!(mpt3sas_config_get_enclosure_pg0(ioc, &mpi_reply,
+ &enclosure_pg0, MPI2_SAS_ENCLOS_PGAD_FORM_HANDLE,
+ ioc->sas_hba.enclosure_handle)))
+ ioc->sas_hba.enclosure_logical_id =
+ le64_to_cpu(enclosure_pg0.EnclosureLogicalID);
+ }
+
+ out:
+ kfree(sas_iounit_pg1);
+ kfree(sas_iounit_pg0);
+}
+
+/**
+ * _scsih_expander_add - creating expander object
+ * @ioc: per adapter object
+ * @handle: expander handle
+ *
+ * Creating expander object, stored in ioc->sas_expander_list.
+ *
+ * Return 0 for success, else error.
+ */
+static int
+_scsih_expander_add(struct MPT3SAS_ADAPTER *ioc, u16 handle)
+{
+ struct _sas_node *sas_expander;
+ Mpi2ConfigReply_t mpi_reply;
+ Mpi2ExpanderPage0_t expander_pg0;
+ Mpi2ExpanderPage1_t expander_pg1;
+ Mpi2SasEnclosurePage0_t enclosure_pg0;
+ u32 ioc_status;
+ u16 parent_handle;
+ u64 sas_address, sas_address_parent = 0;
+ int i;
+ unsigned long flags;
+ struct _sas_port *mpt3sas_port = NULL;
+
+ int rc = 0;
+
+ if (!handle)
+ return -1;
+
+ if (ioc->shost_recovery || ioc->pci_error_recovery)
+ return -1;
+
+ if ((mpt3sas_config_get_expander_pg0(ioc, &mpi_reply, &expander_pg0,
+ MPI2_SAS_EXPAND_PGAD_FORM_HNDL, handle))) {
+ pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n",
+ ioc->name, __FILE__, __LINE__, __func__);
+ return -1;
+ }
+
+ ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
+ MPI2_IOCSTATUS_MASK;
+ if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
+ pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n",
+ ioc->name, __FILE__, __LINE__, __func__);
+ return -1;
+ }
+
+ /* handle out of order topology events */
+ parent_handle = le16_to_cpu(expander_pg0.ParentDevHandle);
+ if (_scsih_get_sas_address(ioc, parent_handle, &sas_address_parent)
+ != 0) {
+ pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n",
+ ioc->name, __FILE__, __LINE__, __func__);
+ return -1;
+ }
+ if (sas_address_parent != ioc->sas_hba.sas_address) {
+ spin_lock_irqsave(&ioc->sas_node_lock, flags);
+ sas_expander = mpt3sas_scsih_expander_find_by_sas_address(ioc,
+ sas_address_parent);
+ spin_unlock_irqrestore(&ioc->sas_node_lock, flags);
+ if (!sas_expander) {
+ rc = _scsih_expander_add(ioc, parent_handle);
+ if (rc != 0)
+ return rc;
+ }
+ }
+
+ spin_lock_irqsave(&ioc->sas_node_lock, flags);
+ sas_address = le64_to_cpu(expander_pg0.SASAddress);
+ sas_expander = mpt3sas_scsih_expander_find_by_sas_address(ioc,
+ sas_address);
+ spin_unlock_irqrestore(&ioc->sas_node_lock, flags);
+
+ if (sas_expander)
+ return 0;
+
+ sas_expander = kzalloc(sizeof(struct _sas_node),
+ GFP_KERNEL);
+ if (!sas_expander) {
+ pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n",
+ ioc->name, __FILE__, __LINE__, __func__);
+ return -1;
+ }
+
+ sas_expander->handle = handle;
+ sas_expander->num_phys = expander_pg0.NumPhys;
+ sas_expander->sas_address_parent = sas_address_parent;
+ sas_expander->sas_address = sas_address;
+
+ pr_info(MPT3SAS_FMT "expander_add: handle(0x%04x)," \
+ " parent(0x%04x), sas_addr(0x%016llx), phys(%d)\n", ioc->name,
+ handle, parent_handle, (unsigned long long)
+ sas_expander->sas_address, sas_expander->num_phys);
+
+ if (!sas_expander->num_phys)
+ goto out_fail;
+ sas_expander->phy = kcalloc(sas_expander->num_phys,
+ sizeof(struct _sas_phy), GFP_KERNEL);
+ if (!sas_expander->phy) {
+ pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n",
+ ioc->name, __FILE__, __LINE__, __func__);
+ rc = -1;
+ goto out_fail;
+ }
+
+ INIT_LIST_HEAD(&sas_expander->sas_port_list);
+ mpt3sas_port = mpt3sas_transport_port_add(ioc, handle,
+ sas_address_parent);
+ if (!mpt3sas_port) {
+ pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n",
+ ioc->name, __FILE__, __LINE__, __func__);
+ rc = -1;
+ goto out_fail;
+ }
+ sas_expander->parent_dev = &mpt3sas_port->rphy->dev;
+
+ for (i = 0 ; i < sas_expander->num_phys ; i++) {
+ if ((mpt3sas_config_get_expander_pg1(ioc, &mpi_reply,
+ &expander_pg1, i, handle))) {
+ pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n",
+ ioc->name, __FILE__, __LINE__, __func__);
+ rc = -1;
+ goto out_fail;
+ }
+ sas_expander->phy[i].handle = handle;
+ sas_expander->phy[i].phy_id = i;
+
+ if ((mpt3sas_transport_add_expander_phy(ioc,
+ &sas_expander->phy[i], expander_pg1,
+ sas_expander->parent_dev))) {
+ pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n",
+ ioc->name, __FILE__, __LINE__, __func__);
+ rc = -1;
+ goto out_fail;
+ }
+ }
+
+ if (sas_expander->enclosure_handle) {
+ if (!(mpt3sas_config_get_enclosure_pg0(ioc, &mpi_reply,
+ &enclosure_pg0, MPI2_SAS_ENCLOS_PGAD_FORM_HANDLE,
+ sas_expander->enclosure_handle)))
+ sas_expander->enclosure_logical_id =
+ le64_to_cpu(enclosure_pg0.EnclosureLogicalID);
+ }
+
+ _scsih_expander_node_add(ioc, sas_expander);
+ return 0;
+
+ out_fail:
+
+ if (mpt3sas_port)
+ mpt3sas_transport_port_remove(ioc, sas_expander->sas_address,
+ sas_address_parent);
+ kfree(sas_expander);
+ return rc;
+}
+
+/**
+ * mpt3sas_expander_remove - removing expander object
+ * @ioc: per adapter object
+ * @sas_address: expander sas_address
+ *
+ * Return nothing.
+ */
+void
+mpt3sas_expander_remove(struct MPT3SAS_ADAPTER *ioc, u64 sas_address)
+{
+ struct _sas_node *sas_expander;
+ unsigned long flags;
+
+ if (ioc->shost_recovery)
+ return;
+
+ spin_lock_irqsave(&ioc->sas_node_lock, flags);
+ sas_expander = mpt3sas_scsih_expander_find_by_sas_address(ioc,
+ sas_address);
+ if (sas_expander)
+ list_del(&sas_expander->list);
+ spin_unlock_irqrestore(&ioc->sas_node_lock, flags);
+ if (sas_expander)
+ _scsih_expander_node_remove(ioc, sas_expander);
+}
+
+/**
+ * _scsih_done - internal SCSI_IO callback handler.
+ * @ioc: per adapter object
+ * @smid: system request message index
+ * @msix_index: MSIX table index supplied by the OS
+ * @reply: reply message frame(lower 32bit addr)
+ *
+ * Callback handler when sending internal generated SCSI_IO.
+ * The callback index passed is `ioc->scsih_cb_idx`
+ *
+ * Return 1 meaning mf should be freed from _base_interrupt
+ * 0 means the mf is freed from this function.
+ */
+static u8
+_scsih_done(struct MPT3SAS_ADAPTER *ioc, u16 smid, u8 msix_index, u32 reply)
+{
+ MPI2DefaultReply_t *mpi_reply;
+
+ mpi_reply = mpt3sas_base_get_reply_virt_addr(ioc, reply);
+ if (ioc->scsih_cmds.status == MPT3_CMD_NOT_USED)
+ return 1;
+ if (ioc->scsih_cmds.smid != smid)
+ return 1;
+ ioc->scsih_cmds.status |= MPT3_CMD_COMPLETE;
+ if (mpi_reply) {
+ memcpy(ioc->scsih_cmds.reply, mpi_reply,
+ mpi_reply->MsgLength*4);
+ ioc->scsih_cmds.status |= MPT3_CMD_REPLY_VALID;
+ }
+ ioc->scsih_cmds.status &= ~MPT3_CMD_PENDING;
+ complete(&ioc->scsih_cmds.done);
+ return 1;
+}
+
+
+
+
+#define MPT3_MAX_LUNS (255)
+
+
+/**
+ * _scsih_check_access_status - check access flags
+ * @ioc: per adapter object
+ * @sas_address: sas address
+ * @handle: sas device handle
+ * @access_flags: errors returned during discovery of the device
+ *
+ * Return 0 for success, else failure
+ */
+static u8
+_scsih_check_access_status(struct MPT3SAS_ADAPTER *ioc, u64 sas_address,
+ u16 handle, u8 access_status)
+{
+ u8 rc = 1;
+ char *desc = NULL;
+
+ switch (access_status) {
+ case MPI2_SAS_DEVICE0_ASTATUS_NO_ERRORS:
+ case MPI2_SAS_DEVICE0_ASTATUS_SATA_NEEDS_INITIALIZATION:
+ rc = 0;
+ break;
+ case MPI2_SAS_DEVICE0_ASTATUS_SATA_CAPABILITY_FAILED:
+ desc = "sata capability failed";
+ break;
+ case MPI2_SAS_DEVICE0_ASTATUS_SATA_AFFILIATION_CONFLICT:
+ desc = "sata affiliation conflict";
+ break;
+ case MPI2_SAS_DEVICE0_ASTATUS_ROUTE_NOT_ADDRESSABLE:
+ desc = "route not addressable";
+ break;
+ case MPI2_SAS_DEVICE0_ASTATUS_SMP_ERROR_NOT_ADDRESSABLE:
+ desc = "smp error not addressable";
+ break;
+ case MPI2_SAS_DEVICE0_ASTATUS_DEVICE_BLOCKED:
+ desc = "device blocked";
+ break;
+ case MPI2_SAS_DEVICE0_ASTATUS_SATA_INIT_FAILED:
+ case MPI2_SAS_DEVICE0_ASTATUS_SIF_UNKNOWN:
+ case MPI2_SAS_DEVICE0_ASTATUS_SIF_AFFILIATION_CONFLICT:
+ case MPI2_SAS_DEVICE0_ASTATUS_SIF_DIAG:
+ case MPI2_SAS_DEVICE0_ASTATUS_SIF_IDENTIFICATION:
+ case MPI2_SAS_DEVICE0_ASTATUS_SIF_CHECK_POWER:
+ case MPI2_SAS_DEVICE0_ASTATUS_SIF_PIO_SN:
+ case MPI2_SAS_DEVICE0_ASTATUS_SIF_MDMA_SN:
+ case MPI2_SAS_DEVICE0_ASTATUS_SIF_UDMA_SN:
+ case MPI2_SAS_DEVICE0_ASTATUS_SIF_ZONING_VIOLATION:
+ case MPI2_SAS_DEVICE0_ASTATUS_SIF_NOT_ADDRESSABLE:
+ case MPI2_SAS_DEVICE0_ASTATUS_SIF_MAX:
+ desc = "sata initialization failed";
+ break;
+ default:
+ desc = "unknown";
+ break;
+ }
+
+ if (!rc)
+ return 0;
+
+ pr_err(MPT3SAS_FMT
+ "discovery errors(%s): sas_address(0x%016llx), handle(0x%04x)\n",
+ ioc->name, desc, (unsigned long long)sas_address, handle);
+ return rc;
+}
+
+/**
+ * _scsih_check_device - checking device responsiveness
+ * @ioc: per adapter object
+ * @parent_sas_address: sas address of parent expander or sas host
+ * @handle: attached device handle
+ * @phy_numberv: phy number
+ * @link_rate: new link rate
+ *
+ * Returns nothing.
+ */
+static void
+_scsih_check_device(struct MPT3SAS_ADAPTER *ioc,
+ u64 parent_sas_address, u16 handle, u8 phy_number, u8 link_rate)
+{
+ Mpi2ConfigReply_t mpi_reply;
+ Mpi2SasDevicePage0_t sas_device_pg0;
+ struct _sas_device *sas_device;
+ u32 ioc_status;
+ unsigned long flags;
+ u64 sas_address;
+ struct scsi_target *starget;
+ struct MPT3SAS_TARGET *sas_target_priv_data;
+ u32 device_info;
+
+
+ if ((mpt3sas_config_get_sas_device_pg0(ioc, &mpi_reply, &sas_device_pg0,
+ MPI2_SAS_DEVICE_PGAD_FORM_HANDLE, handle)))
+ return;
+
+ ioc_status = le16_to_cpu(mpi_reply.IOCStatus) & MPI2_IOCSTATUS_MASK;
+ if (ioc_status != MPI2_IOCSTATUS_SUCCESS)
+ return;
+
+ /* wide port handling ~ we need only handle device once for the phy that
+ * is matched in sas device page zero
+ */
+ if (phy_number != sas_device_pg0.PhyNum)
+ return;
+
+ /* check if this is end device */
+ device_info = le32_to_cpu(sas_device_pg0.DeviceInfo);
+ if (!(_scsih_is_end_device(device_info)))
+ return;
+
+ spin_lock_irqsave(&ioc->sas_device_lock, flags);
+ sas_address = le64_to_cpu(sas_device_pg0.SASAddress);
+ sas_device = mpt3sas_scsih_sas_device_find_by_sas_address(ioc,
+ sas_address);
+
+ if (!sas_device) {
+ spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
+ return;
+ }
+
+ if (unlikely(sas_device->handle != handle)) {
+ starget = sas_device->starget;
+ sas_target_priv_data = starget->hostdata;
+ starget_printk(KERN_INFO, starget,
+ "handle changed from(0x%04x) to (0x%04x)!!!\n",
+ sas_device->handle, handle);
+ sas_target_priv_data->handle = handle;
+ sas_device->handle = handle;
+ }
+
+ /* check if device is present */
+ if (!(le16_to_cpu(sas_device_pg0.Flags) &
+ MPI2_SAS_DEVICE0_FLAGS_DEVICE_PRESENT)) {
+ pr_err(MPT3SAS_FMT
+ "device is not present handle(0x%04x), flags!!!\n",
+ ioc->name, handle);
+ spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
+ return;
+ }
+
+ /* check if there were any issues with discovery */
+ if (_scsih_check_access_status(ioc, sas_address, handle,
+ sas_device_pg0.AccessStatus)) {
+ spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
+ return;
+ }
+
+ spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
+ _scsih_ublock_io_device(ioc, sas_address);
+
+}
+
+/**
+ * _scsih_add_device - creating sas device object
+ * @ioc: per adapter object
+ * @handle: sas device handle
+ * @phy_num: phy number end device attached to
+ * @is_pd: is this hidden raid component
+ *
+ * Creating end device object, stored in ioc->sas_device_list.
+ *
+ * Returns 0 for success, non-zero for failure.
+ */
+static int
+_scsih_add_device(struct MPT3SAS_ADAPTER *ioc, u16 handle, u8 phy_num,
+ u8 is_pd)
+{
+ Mpi2ConfigReply_t mpi_reply;
+ Mpi2SasDevicePage0_t sas_device_pg0;
+ Mpi2SasEnclosurePage0_t enclosure_pg0;
+ struct _sas_device *sas_device;
+ u32 ioc_status;
+ u64 sas_address;
+ u32 device_info;
+ unsigned long flags;
+
+ if ((mpt3sas_config_get_sas_device_pg0(ioc, &mpi_reply, &sas_device_pg0,
+ MPI2_SAS_DEVICE_PGAD_FORM_HANDLE, handle))) {
+ pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n",
+ ioc->name, __FILE__, __LINE__, __func__);
+ return -1;
+ }
+
+ ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
+ MPI2_IOCSTATUS_MASK;
+ if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
+ pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n",
+ ioc->name, __FILE__, __LINE__, __func__);
+ return -1;
+ }
+
+ /* check if this is end device */
+ device_info = le32_to_cpu(sas_device_pg0.DeviceInfo);
+ if (!(_scsih_is_end_device(device_info)))
+ return -1;
+ sas_address = le64_to_cpu(sas_device_pg0.SASAddress);
+
+ /* check if device is present */
+ if (!(le16_to_cpu(sas_device_pg0.Flags) &
+ MPI2_SAS_DEVICE0_FLAGS_DEVICE_PRESENT)) {
+ pr_err(MPT3SAS_FMT "device is not present handle(0x04%x)!!!\n",
+ ioc->name, handle);
+ return -1;
+ }
+
+ /* check if there were any issues with discovery */
+ if (_scsih_check_access_status(ioc, sas_address, handle,
+ sas_device_pg0.AccessStatus))
+ return -1;
+
+ spin_lock_irqsave(&ioc->sas_device_lock, flags);
+ sas_device = mpt3sas_scsih_sas_device_find_by_sas_address(ioc,
+ sas_address);
+ spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
+
+ if (sas_device)
+ return -1;
+
+ sas_device = kzalloc(sizeof(struct _sas_device),
+ GFP_KERNEL);
+ if (!sas_device) {
+ pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n",
+ ioc->name, __FILE__, __LINE__, __func__);
+ return 0;
+ }
+
+ sas_device->handle = handle;
+ if (_scsih_get_sas_address(ioc,
+ le16_to_cpu(sas_device_pg0.ParentDevHandle),
+ &sas_device->sas_address_parent) != 0)
+ pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n",
+ ioc->name, __FILE__, __LINE__, __func__);
+ sas_device->enclosure_handle =
+ le16_to_cpu(sas_device_pg0.EnclosureHandle);
+ sas_device->slot =
+ le16_to_cpu(sas_device_pg0.Slot);
+ sas_device->device_info = device_info;
+ sas_device->sas_address = sas_address;
+ sas_device->phy = sas_device_pg0.PhyNum;
+ sas_device->fast_path = (le16_to_cpu(sas_device_pg0.Flags) &
+ MPI25_SAS_DEVICE0_FLAGS_FAST_PATH_CAPABLE) ? 1 : 0;
+
+ /* get enclosure_logical_id */
+ if (sas_device->enclosure_handle && !(mpt3sas_config_get_enclosure_pg0(
+ ioc, &mpi_reply, &enclosure_pg0, MPI2_SAS_ENCLOS_PGAD_FORM_HANDLE,
+ sas_device->enclosure_handle)))
+ sas_device->enclosure_logical_id =
+ le64_to_cpu(enclosure_pg0.EnclosureLogicalID);
+
+ /* get device name */
+ sas_device->device_name = le64_to_cpu(sas_device_pg0.DeviceName);
+
+ if (ioc->wait_for_discovery_to_complete)
+ _scsih_sas_device_init_add(ioc, sas_device);
+ else
+ _scsih_sas_device_add(ioc, sas_device);
+
+ return 0;
+}
+
+/**
+ * _scsih_remove_device - removing sas device object
+ * @ioc: per adapter object
+ * @sas_device_delete: the sas_device object
+ *
+ * Return nothing.
+ */
+static void
+_scsih_remove_device(struct MPT3SAS_ADAPTER *ioc,
+ struct _sas_device *sas_device)
+{
+ struct MPT3SAS_TARGET *sas_target_priv_data;
+
+ if ((ioc->pdev->subsystem_vendor == PCI_VENDOR_ID_IBM) &&
+ (sas_device->pfa_led_on)) {
+ _scsih_turn_off_pfa_led(ioc, sas_device);
+ sas_device->pfa_led_on = 0;
+ }
+ dewtprintk(ioc, pr_info(MPT3SAS_FMT
+ "%s: enter: handle(0x%04x), sas_addr(0x%016llx)\n",
+ ioc->name, __func__,
+ sas_device->handle, (unsigned long long)
+ sas_device->sas_address));
+
+ if (sas_device->starget && sas_device->starget->hostdata) {
+ sas_target_priv_data = sas_device->starget->hostdata;
+ sas_target_priv_data->deleted = 1;
+ _scsih_ublock_io_device(ioc, sas_device->sas_address);
+ sas_target_priv_data->handle =
+ MPT3SAS_INVALID_DEVICE_HANDLE;
+ }
+ mpt3sas_transport_port_remove(ioc,
+ sas_device->sas_address,
+ sas_device->sas_address_parent);
+
+ pr_info(MPT3SAS_FMT
+ "removing handle(0x%04x), sas_addr(0x%016llx)\n",
+ ioc->name, sas_device->handle,
+ (unsigned long long) sas_device->sas_address);
+
+ dewtprintk(ioc, pr_info(MPT3SAS_FMT
+ "%s: exit: handle(0x%04x), sas_addr(0x%016llx)\n",
+ ioc->name, __func__,
+ sas_device->handle, (unsigned long long)
+ sas_device->sas_address));
+
+ kfree(sas_device);
+}
+
+#ifdef CONFIG_SCSI_MPT3SAS_LOGGING
+/**
+ * _scsih_sas_topology_change_event_debug - debug for topology event
+ * @ioc: per adapter object
+ * @event_data: event data payload
+ * Context: user.
+ */
+static void
+_scsih_sas_topology_change_event_debug(struct MPT3SAS_ADAPTER *ioc,
+ Mpi2EventDataSasTopologyChangeList_t *event_data)
+{
+ int i;
+ u16 handle;
+ u16 reason_code;
+ u8 phy_number;
+ char *status_str = NULL;
+ u8 link_rate, prev_link_rate;
+
+ switch (event_data->ExpStatus) {
+ case MPI2_EVENT_SAS_TOPO_ES_ADDED:
+ status_str = "add";
+ break;
+ case MPI2_EVENT_SAS_TOPO_ES_NOT_RESPONDING:
+ status_str = "remove";
+ break;
+ case MPI2_EVENT_SAS_TOPO_ES_RESPONDING:
+ case 0:
+ status_str = "responding";
+ break;
+ case MPI2_EVENT_SAS_TOPO_ES_DELAY_NOT_RESPONDING:
+ status_str = "remove delay";
+ break;
+ default:
+ status_str = "unknown status";
+ break;
+ }
+ pr_info(MPT3SAS_FMT "sas topology change: (%s)\n",
+ ioc->name, status_str);
+ pr_info("\thandle(0x%04x), enclosure_handle(0x%04x) " \
+ "start_phy(%02d), count(%d)\n",
+ le16_to_cpu(event_data->ExpanderDevHandle),
+ le16_to_cpu(event_data->EnclosureHandle),
+ event_data->StartPhyNum, event_data->NumEntries);
+ for (i = 0; i < event_data->NumEntries; i++) {
+ handle = le16_to_cpu(event_data->PHY[i].AttachedDevHandle);
+ if (!handle)
+ continue;
+ phy_number = event_data->StartPhyNum + i;
+ reason_code = event_data->PHY[i].PhyStatus &
+ MPI2_EVENT_SAS_TOPO_RC_MASK;
+ switch (reason_code) {
+ case MPI2_EVENT_SAS_TOPO_RC_TARG_ADDED:
+ status_str = "target add";
+ break;
+ case MPI2_EVENT_SAS_TOPO_RC_TARG_NOT_RESPONDING:
+ status_str = "target remove";
+ break;
+ case MPI2_EVENT_SAS_TOPO_RC_DELAY_NOT_RESPONDING:
+ status_str = "delay target remove";
+ break;
+ case MPI2_EVENT_SAS_TOPO_RC_PHY_CHANGED:
+ status_str = "link rate change";
+ break;
+ case MPI2_EVENT_SAS_TOPO_RC_NO_CHANGE:
+ status_str = "target responding";
+ break;
+ default:
+ status_str = "unknown";
+ break;
+ }
+ link_rate = event_data->PHY[i].LinkRate >> 4;
+ prev_link_rate = event_data->PHY[i].LinkRate & 0xF;
+ pr_info("\tphy(%02d), attached_handle(0x%04x): %s:" \
+ " link rate: new(0x%02x), old(0x%02x)\n", phy_number,
+ handle, status_str, link_rate, prev_link_rate);
+
+ }
+}
+#endif
+
+/**
+ * _scsih_sas_topology_change_event - handle topology changes
+ * @ioc: per adapter object
+ * @fw_event: The fw_event_work object
+ * Context: user.
+ *
+ */
+static int
+_scsih_sas_topology_change_event(struct MPT3SAS_ADAPTER *ioc,
+ struct fw_event_work *fw_event)
+{
+ int i;
+ u16 parent_handle, handle;
+ u16 reason_code;
+ u8 phy_number, max_phys;
+ struct _sas_node *sas_expander;
+ u64 sas_address;
+ unsigned long flags;
+ u8 link_rate, prev_link_rate;
+ Mpi2EventDataSasTopologyChangeList_t *event_data =
+ (Mpi2EventDataSasTopologyChangeList_t *)
+ fw_event->event_data;
+
+#ifdef CONFIG_SCSI_MPT3SAS_LOGGING
+ if (ioc->logging_level & MPT_DEBUG_EVENT_WORK_TASK)
+ _scsih_sas_topology_change_event_debug(ioc, event_data);
+#endif
+
+ if (ioc->shost_recovery || ioc->remove_host || ioc->pci_error_recovery)
+ return 0;
+
+ if (!ioc->sas_hba.num_phys)
+ _scsih_sas_host_add(ioc);
+ else
+ _scsih_sas_host_refresh(ioc);
+
+ if (fw_event->ignore) {
+ dewtprintk(ioc, pr_info(MPT3SAS_FMT
+ "ignoring expander event\n", ioc->name));
+ return 0;
+ }
+
+ parent_handle = le16_to_cpu(event_data->ExpanderDevHandle);
+
+ /* handle expander add */
+ if (event_data->ExpStatus == MPI2_EVENT_SAS_TOPO_ES_ADDED)
+ if (_scsih_expander_add(ioc, parent_handle) != 0)
+ return 0;
+
+ spin_lock_irqsave(&ioc->sas_node_lock, flags);
+ sas_expander = mpt3sas_scsih_expander_find_by_handle(ioc,
+ parent_handle);
+ if (sas_expander) {
+ sas_address = sas_expander->sas_address;
+ max_phys = sas_expander->num_phys;
+ } else if (parent_handle < ioc->sas_hba.num_phys) {
+ sas_address = ioc->sas_hba.sas_address;
+ max_phys = ioc->sas_hba.num_phys;
+ } else {
+ spin_unlock_irqrestore(&ioc->sas_node_lock, flags);
+ return 0;
+ }
+ spin_unlock_irqrestore(&ioc->sas_node_lock, flags);
+
+ /* handle siblings events */
+ for (i = 0; i < event_data->NumEntries; i++) {
+ if (fw_event->ignore) {
+ dewtprintk(ioc, pr_info(MPT3SAS_FMT
+ "ignoring expander event\n", ioc->name));
+ return 0;
+ }
+ if (ioc->remove_host || ioc->pci_error_recovery)
+ return 0;
+ phy_number = event_data->StartPhyNum + i;
+ if (phy_number >= max_phys)
+ continue;
+ reason_code = event_data->PHY[i].PhyStatus &
+ MPI2_EVENT_SAS_TOPO_RC_MASK;
+ if ((event_data->PHY[i].PhyStatus &
+ MPI2_EVENT_SAS_TOPO_PHYSTATUS_VACANT) && (reason_code !=
+ MPI2_EVENT_SAS_TOPO_RC_TARG_NOT_RESPONDING))
+ continue;
+ handle = le16_to_cpu(event_data->PHY[i].AttachedDevHandle);
+ if (!handle)
+ continue;
+ link_rate = event_data->PHY[i].LinkRate >> 4;
+ prev_link_rate = event_data->PHY[i].LinkRate & 0xF;
+ switch (reason_code) {
+ case MPI2_EVENT_SAS_TOPO_RC_PHY_CHANGED:
+
+ if (ioc->shost_recovery)
+ break;
+
+ if (link_rate == prev_link_rate)
+ break;
+
+ mpt3sas_transport_update_links(ioc, sas_address,
+ handle, phy_number, link_rate);
+
+ if (link_rate < MPI2_SAS_NEG_LINK_RATE_1_5)
+ break;
+
+ _scsih_check_device(ioc, sas_address, handle,
+ phy_number, link_rate);
+
+
+ case MPI2_EVENT_SAS_TOPO_RC_TARG_ADDED:
+
+ if (ioc->shost_recovery)
+ break;
+
+ mpt3sas_transport_update_links(ioc, sas_address,
+ handle, phy_number, link_rate);
+
+ _scsih_add_device(ioc, handle, phy_number, 0);
+
+ break;
+ case MPI2_EVENT_SAS_TOPO_RC_TARG_NOT_RESPONDING:
+
+ _scsih_device_remove_by_handle(ioc, handle);
+ break;
+ }
+ }
+
+ /* handle expander removal */
+ if (event_data->ExpStatus == MPI2_EVENT_SAS_TOPO_ES_NOT_RESPONDING &&
+ sas_expander)
+ mpt3sas_expander_remove(ioc, sas_address);
+
+ return 0;
+}
+
+#ifdef CONFIG_SCSI_MPT3SAS_LOGGING
+/**
+ * _scsih_sas_device_status_change_event_debug - debug for device event
+ * @event_data: event data payload
+ * Context: user.
+ *
+ * Return nothing.
+ */
+static void
+_scsih_sas_device_status_change_event_debug(struct MPT3SAS_ADAPTER *ioc,
+ Mpi2EventDataSasDeviceStatusChange_t *event_data)
+{
+ char *reason_str = NULL;
+
+ switch (event_data->ReasonCode) {
+ case MPI2_EVENT_SAS_DEV_STAT_RC_SMART_DATA:
+ reason_str = "smart data";
+ break;
+ case MPI2_EVENT_SAS_DEV_STAT_RC_UNSUPPORTED:
+ reason_str = "unsupported device discovered";
+ break;
+ case MPI2_EVENT_SAS_DEV_STAT_RC_INTERNAL_DEVICE_RESET:
+ reason_str = "internal device reset";
+ break;
+ case MPI2_EVENT_SAS_DEV_STAT_RC_TASK_ABORT_INTERNAL:
+ reason_str = "internal task abort";
+ break;
+ case MPI2_EVENT_SAS_DEV_STAT_RC_ABORT_TASK_SET_INTERNAL:
+ reason_str = "internal task abort set";
+ break;
+ case MPI2_EVENT_SAS_DEV_STAT_RC_CLEAR_TASK_SET_INTERNAL:
+ reason_str = "internal clear task set";
+ break;
+ case MPI2_EVENT_SAS_DEV_STAT_RC_QUERY_TASK_INTERNAL:
+ reason_str = "internal query task";
+ break;
+ case MPI2_EVENT_SAS_DEV_STAT_RC_SATA_INIT_FAILURE:
+ reason_str = "sata init failure";
+ break;
+ case MPI2_EVENT_SAS_DEV_STAT_RC_CMP_INTERNAL_DEV_RESET:
+ reason_str = "internal device reset complete";
+ break;
+ case MPI2_EVENT_SAS_DEV_STAT_RC_CMP_TASK_ABORT_INTERNAL:
+ reason_str = "internal task abort complete";
+ break;
+ case MPI2_EVENT_SAS_DEV_STAT_RC_ASYNC_NOTIFICATION:
+ reason_str = "internal async notification";
+ break;
+ case MPI2_EVENT_SAS_DEV_STAT_RC_EXPANDER_REDUCED_FUNCTIONALITY:
+ reason_str = "expander reduced functionality";
+ break;
+ case MPI2_EVENT_SAS_DEV_STAT_RC_CMP_EXPANDER_REDUCED_FUNCTIONALITY:
+ reason_str = "expander reduced functionality complete";
+ break;
+ default:
+ reason_str = "unknown reason";
+ break;
+ }
+ pr_info(MPT3SAS_FMT "device status change: (%s)\n"
+ "\thandle(0x%04x), sas address(0x%016llx), tag(%d)",
+ ioc->name, reason_str, le16_to_cpu(event_data->DevHandle),
+ (unsigned long long)le64_to_cpu(event_data->SASAddress),
+ le16_to_cpu(event_data->TaskTag));
+ if (event_data->ReasonCode == MPI2_EVENT_SAS_DEV_STAT_RC_SMART_DATA)
+ pr_info(MPT3SAS_FMT ", ASC(0x%x), ASCQ(0x%x)\n", ioc->name,
+ event_data->ASC, event_data->ASCQ);
+ pr_info("\n");
+}
+#endif
+
+/**
+ * _scsih_sas_device_status_change_event - handle device status change
+ * @ioc: per adapter object
+ * @fw_event: The fw_event_work object
+ * Context: user.
+ *
+ * Return nothing.
+ */
+static void
+_scsih_sas_device_status_change_event(struct MPT3SAS_ADAPTER *ioc,
+ struct fw_event_work *fw_event)
+{
+ struct MPT3SAS_TARGET *target_priv_data;
+ struct _sas_device *sas_device;
+ u64 sas_address;
+ unsigned long flags;
+ Mpi2EventDataSasDeviceStatusChange_t *event_data =
+ (Mpi2EventDataSasDeviceStatusChange_t *)
+ fw_event->event_data;
+
+#ifdef CONFIG_SCSI_MPT3SAS_LOGGING
+ if (ioc->logging_level & MPT_DEBUG_EVENT_WORK_TASK)
+ _scsih_sas_device_status_change_event_debug(ioc,
+ event_data);
+#endif
+
+ /* In MPI Revision K (0xC), the internal device reset complete was
+ * implemented, so avoid setting tm_busy flag for older firmware.
+ */
+ if ((ioc->facts.HeaderVersion >> 8) < 0xC)
+ return;
+
+ if (event_data->ReasonCode !=
+ MPI2_EVENT_SAS_DEV_STAT_RC_INTERNAL_DEVICE_RESET &&
+ event_data->ReasonCode !=
+ MPI2_EVENT_SAS_DEV_STAT_RC_CMP_INTERNAL_DEV_RESET)
+ return;
+
+ spin_lock_irqsave(&ioc->sas_device_lock, flags);
+ sas_address = le64_to_cpu(event_data->SASAddress);
+ sas_device = mpt3sas_scsih_sas_device_find_by_sas_address(ioc,
+ sas_address);
+
+ if (!sas_device || !sas_device->starget) {
+ spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
+ return;
+ }
+
+ target_priv_data = sas_device->starget->hostdata;
+ if (!target_priv_data) {
+ spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
+ return;
+ }
+
+ if (event_data->ReasonCode ==
+ MPI2_EVENT_SAS_DEV_STAT_RC_INTERNAL_DEVICE_RESET)
+ target_priv_data->tm_busy = 1;
+ else
+ target_priv_data->tm_busy = 0;
+ spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
+}
+
+#ifdef CONFIG_SCSI_MPT3SAS_LOGGING
+/**
+ * _scsih_sas_enclosure_dev_status_change_event_debug - debug for enclosure
+ * event
+ * @ioc: per adapter object
+ * @event_data: event data payload
+ * Context: user.
+ *
+ * Return nothing.
+ */
+static void
+_scsih_sas_enclosure_dev_status_change_event_debug(struct MPT3SAS_ADAPTER *ioc,
+ Mpi2EventDataSasEnclDevStatusChange_t *event_data)
+{
+ char *reason_str = NULL;
+
+ switch (event_data->ReasonCode) {
+ case MPI2_EVENT_SAS_ENCL_RC_ADDED:
+ reason_str = "enclosure add";
+ break;
+ case MPI2_EVENT_SAS_ENCL_RC_NOT_RESPONDING:
+ reason_str = "enclosure remove";
+ break;
+ default:
+ reason_str = "unknown reason";
+ break;
+ }
+
+ pr_info(MPT3SAS_FMT "enclosure status change: (%s)\n"
+ "\thandle(0x%04x), enclosure logical id(0x%016llx)"
+ " number slots(%d)\n", ioc->name, reason_str,
+ le16_to_cpu(event_data->EnclosureHandle),
+ (unsigned long long)le64_to_cpu(event_data->EnclosureLogicalID),
+ le16_to_cpu(event_data->StartSlot));
+}
+#endif
+
+/**
+ * _scsih_sas_enclosure_dev_status_change_event - handle enclosure events
+ * @ioc: per adapter object
+ * @fw_event: The fw_event_work object
+ * Context: user.
+ *
+ * Return nothing.
+ */
+static void
+_scsih_sas_enclosure_dev_status_change_event(struct MPT3SAS_ADAPTER *ioc,
+ struct fw_event_work *fw_event)
+{
+#ifdef CONFIG_SCSI_MPT3SAS_LOGGING
+ if (ioc->logging_level & MPT_DEBUG_EVENT_WORK_TASK)
+ _scsih_sas_enclosure_dev_status_change_event_debug(ioc,
+ (Mpi2EventDataSasEnclDevStatusChange_t *)
+ fw_event->event_data);
+#endif
+}
+
+/**
+ * _scsih_sas_broadcast_primitive_event - handle broadcast events
+ * @ioc: per adapter object
+ * @fw_event: The fw_event_work object
+ * Context: user.
+ *
+ * Return nothing.
+ */
+static void
+_scsih_sas_broadcast_primitive_event(struct MPT3SAS_ADAPTER *ioc,
+ struct fw_event_work *fw_event)
+{
+ struct scsi_cmnd *scmd;
+ struct scsi_device *sdev;
+ u16 smid, handle;
+ u32 lun;
+ struct MPT3SAS_DEVICE *sas_device_priv_data;
+ u32 termination_count;
+ u32 query_count;
+ Mpi2SCSITaskManagementReply_t *mpi_reply;
+ Mpi2EventDataSasBroadcastPrimitive_t *event_data =
+ (Mpi2EventDataSasBroadcastPrimitive_t *)
+ fw_event->event_data;
+ u16 ioc_status;
+ unsigned long flags;
+ int r;
+ u8 max_retries = 0;
+ u8 task_abort_retries;
+
+ mutex_lock(&ioc->tm_cmds.mutex);
+ pr_info(MPT3SAS_FMT
+ "%s: enter: phy number(%d), width(%d)\n",
+ ioc->name, __func__, event_data->PhyNum,
+ event_data->PortWidth);
+
+ _scsih_block_io_all_device(ioc);
+
+ spin_lock_irqsave(&ioc->scsi_lookup_lock, flags);
+ mpi_reply = ioc->tm_cmds.reply;
+ broadcast_aen_retry:
+
+ /* sanity checks for retrying this loop */
+ if (max_retries++ == 5) {
+ dewtprintk(ioc, pr_info(MPT3SAS_FMT "%s: giving up\n",
+ ioc->name, __func__));
+ goto out;
+ } else if (max_retries > 1)
+ dewtprintk(ioc, pr_info(MPT3SAS_FMT "%s: %d retry\n",
+ ioc->name, __func__, max_retries - 1));
+
+ termination_count = 0;
+ query_count = 0;
+ for (smid = 1; smid <= ioc->scsiio_depth; smid++) {
+ if (ioc->shost_recovery)
+ goto out;
+ scmd = _scsih_scsi_lookup_get(ioc, smid);
+ if (!scmd)
+ continue;
+ sdev = scmd->device;
+ sas_device_priv_data = sdev->hostdata;
+ if (!sas_device_priv_data || !sas_device_priv_data->sas_target)
+ continue;
+ /* skip hidden raid components */
+ if (sas_device_priv_data->sas_target->flags &
+ MPT_TARGET_FLAGS_RAID_COMPONENT)
+ continue;
+ /* skip volumes */
+ if (sas_device_priv_data->sas_target->flags &
+ MPT_TARGET_FLAGS_VOLUME)
+ continue;
+
+ handle = sas_device_priv_data->sas_target->handle;
+ lun = sas_device_priv_data->lun;
+ query_count++;
+
+ if (ioc->shost_recovery)
+ goto out;
+
+ spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags);
+ r = mpt3sas_scsih_issue_tm(ioc, handle, 0, 0, lun,
+ MPI2_SCSITASKMGMT_TASKTYPE_QUERY_TASK, smid, 30,
+ TM_MUTEX_OFF);
+ if (r == FAILED) {
+ sdev_printk(KERN_WARNING, sdev,
+ "mpt3sas_scsih_issue_tm: FAILED when sending "
+ "QUERY_TASK: scmd(%p)\n", scmd);
+ spin_lock_irqsave(&ioc->scsi_lookup_lock, flags);
+ goto broadcast_aen_retry;
+ }
+ ioc_status = le16_to_cpu(mpi_reply->IOCStatus)
+ & MPI2_IOCSTATUS_MASK;
+ if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
+ sdev_printk(KERN_WARNING, sdev,
+ "query task: FAILED with IOCSTATUS(0x%04x), scmd(%p)\n",
+ ioc_status, scmd);
+ spin_lock_irqsave(&ioc->scsi_lookup_lock, flags);
+ goto broadcast_aen_retry;
+ }
+
+ /* see if IO is still owned by IOC and target */
+ if (mpi_reply->ResponseCode ==
+ MPI2_SCSITASKMGMT_RSP_TM_SUCCEEDED ||
+ mpi_reply->ResponseCode ==
+ MPI2_SCSITASKMGMT_RSP_IO_QUEUED_ON_IOC) {
+ spin_lock_irqsave(&ioc->scsi_lookup_lock, flags);
+ continue;
+ }
+ task_abort_retries = 0;
+ tm_retry:
+ if (task_abort_retries++ == 60) {
+ dewtprintk(ioc, pr_info(MPT3SAS_FMT
+ "%s: ABORT_TASK: giving up\n", ioc->name,
+ __func__));
+ spin_lock_irqsave(&ioc->scsi_lookup_lock, flags);
+ goto broadcast_aen_retry;
+ }
+
+ if (ioc->shost_recovery)
+ goto out_no_lock;
+
+ r = mpt3sas_scsih_issue_tm(ioc, handle, sdev->channel, sdev->id,
+ sdev->lun, MPI2_SCSITASKMGMT_TASKTYPE_ABORT_TASK, smid, 30,
+ TM_MUTEX_OFF);
+ if (r == FAILED) {
+ sdev_printk(KERN_WARNING, sdev,
+ "mpt3sas_scsih_issue_tm: ABORT_TASK: FAILED : "
+ "scmd(%p)\n", scmd);
+ goto tm_retry;
+ }
+
+ if (task_abort_retries > 1)
+ sdev_printk(KERN_WARNING, sdev,
+ "mpt3sas_scsih_issue_tm: ABORT_TASK: RETRIES (%d):"
+ " scmd(%p)\n",
+ task_abort_retries - 1, scmd);
+
+ termination_count += le32_to_cpu(mpi_reply->TerminationCount);
+ spin_lock_irqsave(&ioc->scsi_lookup_lock, flags);
+ }
+
+ if (ioc->broadcast_aen_pending) {
+ dewtprintk(ioc, pr_info(MPT3SAS_FMT
+ "%s: loop back due to pending AEN\n",
+ ioc->name, __func__));
+ ioc->broadcast_aen_pending = 0;
+ goto broadcast_aen_retry;
+ }
+
+ out:
+ spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags);
+ out_no_lock:
+
+ dewtprintk(ioc, pr_info(MPT3SAS_FMT
+ "%s - exit, query_count = %d termination_count = %d\n",
+ ioc->name, __func__, query_count, termination_count));
+
+ ioc->broadcast_aen_busy = 0;
+ if (!ioc->shost_recovery)
+ _scsih_ublock_io_all_device(ioc);
+ mutex_unlock(&ioc->tm_cmds.mutex);
+}
+
+/**
+ * _scsih_sas_discovery_event - handle discovery events
+ * @ioc: per adapter object
+ * @fw_event: The fw_event_work object
+ * Context: user.
+ *
+ * Return nothing.
+ */
+static void
+_scsih_sas_discovery_event(struct MPT3SAS_ADAPTER *ioc,
+ struct fw_event_work *fw_event)
+{
+ Mpi2EventDataSasDiscovery_t *event_data =
+ (Mpi2EventDataSasDiscovery_t *) fw_event->event_data;
+
+#ifdef CONFIG_SCSI_MPT3SAS_LOGGING
+ if (ioc->logging_level & MPT_DEBUG_EVENT_WORK_TASK) {
+ pr_info(MPT3SAS_FMT "discovery event: (%s)", ioc->name,
+ (event_data->ReasonCode == MPI2_EVENT_SAS_DISC_RC_STARTED) ?
+ "start" : "stop");
+ if (event_data->DiscoveryStatus)
+ pr_info("discovery_status(0x%08x)",
+ le32_to_cpu(event_data->DiscoveryStatus));
+ pr_info("\n");
+ }
+#endif
+
+ if (event_data->ReasonCode == MPI2_EVENT_SAS_DISC_RC_STARTED &&
+ !ioc->sas_hba.num_phys) {
+ if (disable_discovery > 0 && ioc->shost_recovery) {
+ /* Wait for the reset to complete */
+ while (ioc->shost_recovery)
+ ssleep(1);
+ }
+ _scsih_sas_host_add(ioc);
+ }
+}
+
+/**
+ * _scsih_ir_fastpath - turn on fastpath for IR physdisk
+ * @ioc: per adapter object
+ * @handle: device handle for physical disk
+ * @phys_disk_num: physical disk number
+ *
+ * Return 0 for success, else failure.
+ */
+static int
+_scsih_ir_fastpath(struct MPT3SAS_ADAPTER *ioc, u16 handle, u8 phys_disk_num)
+{
+ Mpi2RaidActionRequest_t *mpi_request;
+ Mpi2RaidActionReply_t *mpi_reply;
+ u16 smid;
+ u8 issue_reset = 0;
+ int rc = 0;
+ u16 ioc_status;
+ u32 log_info;
+
+
+ mutex_lock(&ioc->scsih_cmds.mutex);
+
+ if (ioc->scsih_cmds.status != MPT3_CMD_NOT_USED) {
+ pr_err(MPT3SAS_FMT "%s: scsih_cmd in use\n",
+ ioc->name, __func__);
+ rc = -EAGAIN;
+ goto out;
+ }
+ ioc->scsih_cmds.status = MPT3_CMD_PENDING;
+
+ smid = mpt3sas_base_get_smid(ioc, ioc->scsih_cb_idx);
+ if (!smid) {
+ pr_err(MPT3SAS_FMT "%s: failed obtaining a smid\n",
+ ioc->name, __func__);
+ ioc->scsih_cmds.status = MPT3_CMD_NOT_USED;
+ rc = -EAGAIN;
+ goto out;
+ }
+
+ mpi_request = mpt3sas_base_get_msg_frame(ioc, smid);
+ ioc->scsih_cmds.smid = smid;
+ memset(mpi_request, 0, sizeof(Mpi2RaidActionRequest_t));
+
+ mpi_request->Function = MPI2_FUNCTION_RAID_ACTION;
+ mpi_request->Action = MPI2_RAID_ACTION_PHYSDISK_HIDDEN;
+ mpi_request->PhysDiskNum = phys_disk_num;
+
+ dewtprintk(ioc, pr_info(MPT3SAS_FMT "IR RAID_ACTION: turning fast "\
+ "path on for handle(0x%04x), phys_disk_num (0x%02x)\n", ioc->name,
+ handle, phys_disk_num));
+
+ init_completion(&ioc->scsih_cmds.done);
+ mpt3sas_base_put_smid_default(ioc, smid);
+ wait_for_completion_timeout(&ioc->scsih_cmds.done, 10*HZ);
+
+ if (!(ioc->scsih_cmds.status & MPT3_CMD_COMPLETE)) {
+ pr_err(MPT3SAS_FMT "%s: timeout\n",
+ ioc->name, __func__);
+ if (!(ioc->scsih_cmds.status & MPT3_CMD_RESET))
+ issue_reset = 1;
+ rc = -EFAULT;
+ goto out;
+ }
+
+ if (ioc->scsih_cmds.status & MPT3_CMD_REPLY_VALID) {
+
+ mpi_reply = ioc->scsih_cmds.reply;
+ ioc_status = le16_to_cpu(mpi_reply->IOCStatus);
+ if (ioc_status & MPI2_IOCSTATUS_FLAG_LOG_INFO_AVAILABLE)
+ log_info = le32_to_cpu(mpi_reply->IOCLogInfo);
+ else
+ log_info = 0;
+ ioc_status &= MPI2_IOCSTATUS_MASK;
+ if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
+ dewtprintk(ioc, pr_info(MPT3SAS_FMT
+ "IR RAID_ACTION: failed: ioc_status(0x%04x), "
+ "loginfo(0x%08x)!!!\n", ioc->name, ioc_status,
+ log_info));
+ rc = -EFAULT;
+ } else
+ dewtprintk(ioc, pr_info(MPT3SAS_FMT
+ "IR RAID_ACTION: completed successfully\n",
+ ioc->name));
+ }
+
+ out:
+ ioc->scsih_cmds.status = MPT3_CMD_NOT_USED;
+ mutex_unlock(&ioc->scsih_cmds.mutex);
+
+ if (issue_reset)
+ mpt3sas_base_hard_reset_handler(ioc, CAN_SLEEP,
+ FORCE_BIG_HAMMER);
+ return rc;
+}
+
+/**
+ * _scsih_reprobe_lun - reprobing lun
+ * @sdev: scsi device struct
+ * @no_uld_attach: sdev->no_uld_attach flag setting
+ *
+ **/
+static void
+_scsih_reprobe_lun(struct scsi_device *sdev, void *no_uld_attach)
+{
+ int rc;
+ sdev->no_uld_attach = no_uld_attach ? 1 : 0;
+ sdev_printk(KERN_INFO, sdev, "%s raid component\n",
+ sdev->no_uld_attach ? "hidding" : "exposing");
+ rc = scsi_device_reprobe(sdev);
+}
+
+/**
+ * _scsih_sas_volume_add - add new volume
+ * @ioc: per adapter object
+ * @element: IR config element data
+ * Context: user.
+ *
+ * Return nothing.
+ */
+static void
+_scsih_sas_volume_add(struct MPT3SAS_ADAPTER *ioc,
+ Mpi2EventIrConfigElement_t *element)
+{
+ struct _raid_device *raid_device;
+ unsigned long flags;
+ u64 wwid;
+ u16 handle = le16_to_cpu(element->VolDevHandle);
+ int rc;
+
+ mpt3sas_config_get_volume_wwid(ioc, handle, &wwid);
+ if (!wwid) {
+ pr_err(MPT3SAS_FMT
+ "failure at %s:%d/%s()!\n", ioc->name,
+ __FILE__, __LINE__, __func__);
+ return;
+ }
+
+ spin_lock_irqsave(&ioc->raid_device_lock, flags);
+ raid_device = _scsih_raid_device_find_by_wwid(ioc, wwid);
+ spin_unlock_irqrestore(&ioc->raid_device_lock, flags);
+
+ if (raid_device)
+ return;
+
+ raid_device = kzalloc(sizeof(struct _raid_device), GFP_KERNEL);
+ if (!raid_device) {
+ pr_err(MPT3SAS_FMT
+ "failure at %s:%d/%s()!\n", ioc->name,
+ __FILE__, __LINE__, __func__);
+ return;
+ }
+
+ raid_device->id = ioc->sas_id++;
+ raid_device->channel = RAID_CHANNEL;
+ raid_device->handle = handle;
+ raid_device->wwid = wwid;
+ _scsih_raid_device_add(ioc, raid_device);
+ if (!ioc->wait_for_discovery_to_complete) {
+ rc = scsi_add_device(ioc->shost, RAID_CHANNEL,
+ raid_device->id, 0);
+ if (rc)
+ _scsih_raid_device_remove(ioc, raid_device);
+ } else {
+ spin_lock_irqsave(&ioc->raid_device_lock, flags);
+ _scsih_determine_boot_device(ioc, raid_device, 1);
+ spin_unlock_irqrestore(&ioc->raid_device_lock, flags);
+ }
+}
+
+/**
+ * _scsih_sas_volume_delete - delete volume
+ * @ioc: per adapter object
+ * @handle: volume device handle
+ * Context: user.
+ *
+ * Return nothing.
+ */
+static void
+_scsih_sas_volume_delete(struct MPT3SAS_ADAPTER *ioc, u16 handle)
+{
+ struct _raid_device *raid_device;
+ unsigned long flags;
+ struct MPT3SAS_TARGET *sas_target_priv_data;
+ struct scsi_target *starget = NULL;
+
+ spin_lock_irqsave(&ioc->raid_device_lock, flags);
+ raid_device = _scsih_raid_device_find_by_handle(ioc, handle);
+ if (raid_device) {
+ if (raid_device->starget) {
+ starget = raid_device->starget;
+ sas_target_priv_data = starget->hostdata;
+ sas_target_priv_data->deleted = 1;
+ }
+ pr_info(MPT3SAS_FMT "removing handle(0x%04x), wwid(0x%016llx)\n",
+ ioc->name, raid_device->handle,
+ (unsigned long long) raid_device->wwid);
+ list_del(&raid_device->list);
+ kfree(raid_device);
+ }
+ spin_unlock_irqrestore(&ioc->raid_device_lock, flags);
+ if (starget)
+ scsi_remove_target(&starget->dev);
+}
+
+/**
+ * _scsih_sas_pd_expose - expose pd component to /dev/sdX
+ * @ioc: per adapter object
+ * @element: IR config element data
+ * Context: user.
+ *
+ * Return nothing.
+ */
+static void
+_scsih_sas_pd_expose(struct MPT3SAS_ADAPTER *ioc,
+ Mpi2EventIrConfigElement_t *element)
+{
+ struct _sas_device *sas_device;
+ struct scsi_target *starget = NULL;
+ struct MPT3SAS_TARGET *sas_target_priv_data;
+ unsigned long flags;
+ u16 handle = le16_to_cpu(element->PhysDiskDevHandle);
+
+ spin_lock_irqsave(&ioc->sas_device_lock, flags);
+ sas_device = _scsih_sas_device_find_by_handle(ioc, handle);
+ if (sas_device) {
+ sas_device->volume_handle = 0;
+ sas_device->volume_wwid = 0;
+ clear_bit(handle, ioc->pd_handles);
+ if (sas_device->starget && sas_device->starget->hostdata) {
+ starget = sas_device->starget;
+ sas_target_priv_data = starget->hostdata;
+ sas_target_priv_data->flags &=
+ ~MPT_TARGET_FLAGS_RAID_COMPONENT;
+ }
+ }
+ spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
+ if (!sas_device)
+ return;
+
+ /* exposing raid component */
+ if (starget)
+ starget_for_each_device(starget, NULL, _scsih_reprobe_lun);
+}
+
+/**
+ * _scsih_sas_pd_hide - hide pd component from /dev/sdX
+ * @ioc: per adapter object
+ * @element: IR config element data
+ * Context: user.
+ *
+ * Return nothing.
+ */
+static void
+_scsih_sas_pd_hide(struct MPT3SAS_ADAPTER *ioc,
+ Mpi2EventIrConfigElement_t *element)
+{
+ struct _sas_device *sas_device;
+ struct scsi_target *starget = NULL;
+ struct MPT3SAS_TARGET *sas_target_priv_data;
+ unsigned long flags;
+ u16 handle = le16_to_cpu(element->PhysDiskDevHandle);
+ u16 volume_handle = 0;
+ u64 volume_wwid = 0;
+
+ mpt3sas_config_get_volume_handle(ioc, handle, &volume_handle);
+ if (volume_handle)
+ mpt3sas_config_get_volume_wwid(ioc, volume_handle,
+ &volume_wwid);
+
+ spin_lock_irqsave(&ioc->sas_device_lock, flags);
+ sas_device = _scsih_sas_device_find_by_handle(ioc, handle);
+ if (sas_device) {
+ set_bit(handle, ioc->pd_handles);
+ if (sas_device->starget && sas_device->starget->hostdata) {
+ starget = sas_device->starget;
+ sas_target_priv_data = starget->hostdata;
+ sas_target_priv_data->flags |=
+ MPT_TARGET_FLAGS_RAID_COMPONENT;
+ sas_device->volume_handle = volume_handle;
+ sas_device->volume_wwid = volume_wwid;
+ }
+ }
+ spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
+ if (!sas_device)
+ return;
+
+ /* hiding raid component */
+ _scsih_ir_fastpath(ioc, handle, element->PhysDiskNum);
+ if (starget)
+ starget_for_each_device(starget, (void *)1, _scsih_reprobe_lun);
+}
+
+/**
+ * _scsih_sas_pd_delete - delete pd component
+ * @ioc: per adapter object
+ * @element: IR config element data
+ * Context: user.
+ *
+ * Return nothing.
+ */
+static void
+_scsih_sas_pd_delete(struct MPT3SAS_ADAPTER *ioc,
+ Mpi2EventIrConfigElement_t *element)
+{
+ u16 handle = le16_to_cpu(element->PhysDiskDevHandle);
+
+ _scsih_device_remove_by_handle(ioc, handle);
+}
+
+/**
+ * _scsih_sas_pd_add - remove pd component
+ * @ioc: per adapter object
+ * @element: IR config element data
+ * Context: user.
+ *
+ * Return nothing.
+ */
+static void
+_scsih_sas_pd_add(struct MPT3SAS_ADAPTER *ioc,
+ Mpi2EventIrConfigElement_t *element)
+{
+ struct _sas_device *sas_device;
+ unsigned long flags;
+ u16 handle = le16_to_cpu(element->PhysDiskDevHandle);
+ Mpi2ConfigReply_t mpi_reply;
+ Mpi2SasDevicePage0_t sas_device_pg0;
+ u32 ioc_status;
+ u64 sas_address;
+ u16 parent_handle;
+
+ set_bit(handle, ioc->pd_handles);
+
+ spin_lock_irqsave(&ioc->sas_device_lock, flags);
+ sas_device = _scsih_sas_device_find_by_handle(ioc, handle);
+ spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
+ if (sas_device) {
+ _scsih_ir_fastpath(ioc, handle, element->PhysDiskNum);
+ return;
+ }
+
+ if ((mpt3sas_config_get_sas_device_pg0(ioc, &mpi_reply, &sas_device_pg0,
+ MPI2_SAS_DEVICE_PGAD_FORM_HANDLE, handle))) {
+ pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n",
+ ioc->name, __FILE__, __LINE__, __func__);
+ return;
+ }
+
+ ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
+ MPI2_IOCSTATUS_MASK;
+ if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
+ pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n",
+ ioc->name, __FILE__, __LINE__, __func__);
+ return;
+ }
+
+ parent_handle = le16_to_cpu(sas_device_pg0.ParentDevHandle);
+ if (!_scsih_get_sas_address(ioc, parent_handle, &sas_address))
+ mpt3sas_transport_update_links(ioc, sas_address, handle,
+ sas_device_pg0.PhyNum, MPI2_SAS_NEG_LINK_RATE_1_5);
+
+ _scsih_ir_fastpath(ioc, handle, element->PhysDiskNum);
+ _scsih_add_device(ioc, handle, 0, 1);
+}
+
+#ifdef CONFIG_SCSI_MPT3SAS_LOGGING
+/**
+ * _scsih_sas_ir_config_change_event_debug - debug for IR Config Change events
+ * @ioc: per adapter object
+ * @event_data: event data payload
+ * Context: user.
+ *
+ * Return nothing.
+ */
+static void
+_scsih_sas_ir_config_change_event_debug(struct MPT3SAS_ADAPTER *ioc,
+ Mpi2EventDataIrConfigChangeList_t *event_data)
+{
+ Mpi2EventIrConfigElement_t *element;
+ u8 element_type;
+ int i;
+ char *reason_str = NULL, *element_str = NULL;
+
+ element = (Mpi2EventIrConfigElement_t *)&event_data->ConfigElement[0];
+
+ pr_info(MPT3SAS_FMT "raid config change: (%s), elements(%d)\n",
+ ioc->name, (le32_to_cpu(event_data->Flags) &
+ MPI2_EVENT_IR_CHANGE_FLAGS_FOREIGN_CONFIG) ?
+ "foreign" : "native", event_data->NumElements);
+ for (i = 0; i < event_data->NumElements; i++, element++) {
+ switch (element->ReasonCode) {
+ case MPI2_EVENT_IR_CHANGE_RC_ADDED:
+ reason_str = "add";
+ break;
+ case MPI2_EVENT_IR_CHANGE_RC_REMOVED:
+ reason_str = "remove";
+ break;
+ case MPI2_EVENT_IR_CHANGE_RC_NO_CHANGE:
+ reason_str = "no change";
+ break;
+ case MPI2_EVENT_IR_CHANGE_RC_HIDE:
+ reason_str = "hide";
+ break;
+ case MPI2_EVENT_IR_CHANGE_RC_UNHIDE:
+ reason_str = "unhide";
+ break;
+ case MPI2_EVENT_IR_CHANGE_RC_VOLUME_CREATED:
+ reason_str = "volume_created";
+ break;
+ case MPI2_EVENT_IR_CHANGE_RC_VOLUME_DELETED:
+ reason_str = "volume_deleted";
+ break;
+ case MPI2_EVENT_IR_CHANGE_RC_PD_CREATED:
+ reason_str = "pd_created";
+ break;
+ case MPI2_EVENT_IR_CHANGE_RC_PD_DELETED:
+ reason_str = "pd_deleted";
+ break;
+ default:
+ reason_str = "unknown reason";
+ break;
+ }
+ element_type = le16_to_cpu(element->ElementFlags) &
+ MPI2_EVENT_IR_CHANGE_EFLAGS_ELEMENT_TYPE_MASK;
+ switch (element_type) {
+ case MPI2_EVENT_IR_CHANGE_EFLAGS_VOLUME_ELEMENT:
+ element_str = "volume";
+ break;
+ case MPI2_EVENT_IR_CHANGE_EFLAGS_VOLPHYSDISK_ELEMENT:
+ element_str = "phys disk";
+ break;
+ case MPI2_EVENT_IR_CHANGE_EFLAGS_HOTSPARE_ELEMENT:
+ element_str = "hot spare";
+ break;
+ default:
+ element_str = "unknown element";
+ break;
+ }
+ pr_info("\t(%s:%s), vol handle(0x%04x), " \
+ "pd handle(0x%04x), pd num(0x%02x)\n", element_str,
+ reason_str, le16_to_cpu(element->VolDevHandle),
+ le16_to_cpu(element->PhysDiskDevHandle),
+ element->PhysDiskNum);
+ }
+}
+#endif
+
+/**
+ * _scsih_sas_ir_config_change_event - handle ir configuration change events
+ * @ioc: per adapter object
+ * @fw_event: The fw_event_work object
+ * Context: user.
+ *
+ * Return nothing.
+ */
+static void
+_scsih_sas_ir_config_change_event(struct MPT3SAS_ADAPTER *ioc,
+ struct fw_event_work *fw_event)
+{
+ Mpi2EventIrConfigElement_t *element;
+ int i;
+ u8 foreign_config;
+ Mpi2EventDataIrConfigChangeList_t *event_data =
+ (Mpi2EventDataIrConfigChangeList_t *)
+ fw_event->event_data;
+
+#ifdef CONFIG_SCSI_MPT3SAS_LOGGING
+ if (ioc->logging_level & MPT_DEBUG_EVENT_WORK_TASK)
+ _scsih_sas_ir_config_change_event_debug(ioc, event_data);
+
+#endif
+
+ foreign_config = (le32_to_cpu(event_data->Flags) &
+ MPI2_EVENT_IR_CHANGE_FLAGS_FOREIGN_CONFIG) ? 1 : 0;
+
+ element = (Mpi2EventIrConfigElement_t *)&event_data->ConfigElement[0];
+ if (ioc->shost_recovery) {
+
+ for (i = 0; i < event_data->NumElements; i++, element++) {
+ if (element->ReasonCode == MPI2_EVENT_IR_CHANGE_RC_HIDE)
+ _scsih_ir_fastpath(ioc,
+ le16_to_cpu(element->PhysDiskDevHandle),
+ element->PhysDiskNum);
+ }
+ return;
+ }
+ for (i = 0; i < event_data->NumElements; i++, element++) {
+
+ switch (element->ReasonCode) {
+ case MPI2_EVENT_IR_CHANGE_RC_VOLUME_CREATED:
+ case MPI2_EVENT_IR_CHANGE_RC_ADDED:
+ if (!foreign_config)
+ _scsih_sas_volume_add(ioc, element);
+ break;
+ case MPI2_EVENT_IR_CHANGE_RC_VOLUME_DELETED:
+ case MPI2_EVENT_IR_CHANGE_RC_REMOVED:
+ if (!foreign_config)
+ _scsih_sas_volume_delete(ioc,
+ le16_to_cpu(element->VolDevHandle));
+ break;
+ case MPI2_EVENT_IR_CHANGE_RC_PD_CREATED:
+ _scsih_sas_pd_hide(ioc, element);
+ break;
+ case MPI2_EVENT_IR_CHANGE_RC_PD_DELETED:
+ _scsih_sas_pd_expose(ioc, element);
+ break;
+ case MPI2_EVENT_IR_CHANGE_RC_HIDE:
+ _scsih_sas_pd_add(ioc, element);
+ break;
+ case MPI2_EVENT_IR_CHANGE_RC_UNHIDE:
+ _scsih_sas_pd_delete(ioc, element);
+ break;
+ }
+ }
+}
+
+/**
+ * _scsih_sas_ir_volume_event - IR volume event
+ * @ioc: per adapter object
+ * @fw_event: The fw_event_work object
+ * Context: user.
+ *
+ * Return nothing.
+ */
+static void
+_scsih_sas_ir_volume_event(struct MPT3SAS_ADAPTER *ioc,
+ struct fw_event_work *fw_event)
+{
+ u64 wwid;
+ unsigned long flags;
+ struct _raid_device *raid_device;
+ u16 handle;
+ u32 state;
+ int rc;
+ Mpi2EventDataIrVolume_t *event_data =
+ (Mpi2EventDataIrVolume_t *) fw_event->event_data;
+
+ if (ioc->shost_recovery)
+ return;
+
+ if (event_data->ReasonCode != MPI2_EVENT_IR_VOLUME_RC_STATE_CHANGED)
+ return;
+
+ handle = le16_to_cpu(event_data->VolDevHandle);
+ state = le32_to_cpu(event_data->NewValue);
+ dewtprintk(ioc, pr_info(MPT3SAS_FMT
+ "%s: handle(0x%04x), old(0x%08x), new(0x%08x)\n",
+ ioc->name, __func__, handle,
+ le32_to_cpu(event_data->PreviousValue), state));
+ switch (state) {
+ case MPI2_RAID_VOL_STATE_MISSING:
+ case MPI2_RAID_VOL_STATE_FAILED:
+ _scsih_sas_volume_delete(ioc, handle);
+ break;
+
+ case MPI2_RAID_VOL_STATE_ONLINE:
+ case MPI2_RAID_VOL_STATE_DEGRADED:
+ case MPI2_RAID_VOL_STATE_OPTIMAL:
+
+ spin_lock_irqsave(&ioc->raid_device_lock, flags);
+ raid_device = _scsih_raid_device_find_by_handle(ioc, handle);
+ spin_unlock_irqrestore(&ioc->raid_device_lock, flags);
+
+ if (raid_device)
+ break;
+
+ mpt3sas_config_get_volume_wwid(ioc, handle, &wwid);
+ if (!wwid) {
+ pr_err(MPT3SAS_FMT
+ "failure at %s:%d/%s()!\n", ioc->name,
+ __FILE__, __LINE__, __func__);
+ break;
+ }
+
+ raid_device = kzalloc(sizeof(struct _raid_device), GFP_KERNEL);
+ if (!raid_device) {
+ pr_err(MPT3SAS_FMT
+ "failure at %s:%d/%s()!\n", ioc->name,
+ __FILE__, __LINE__, __func__);
+ break;
+ }
+
+ raid_device->id = ioc->sas_id++;
+ raid_device->channel = RAID_CHANNEL;
+ raid_device->handle = handle;
+ raid_device->wwid = wwid;
+ _scsih_raid_device_add(ioc, raid_device);
+ rc = scsi_add_device(ioc->shost, RAID_CHANNEL,
+ raid_device->id, 0);
+ if (rc)
+ _scsih_raid_device_remove(ioc, raid_device);
+ break;
+
+ case MPI2_RAID_VOL_STATE_INITIALIZING:
+ default:
+ break;
+ }
+}
+
+/**
+ * _scsih_sas_ir_physical_disk_event - PD event
+ * @ioc: per adapter object
+ * @fw_event: The fw_event_work object
+ * Context: user.
+ *
+ * Return nothing.
+ */
+static void
+_scsih_sas_ir_physical_disk_event(struct MPT3SAS_ADAPTER *ioc,
+ struct fw_event_work *fw_event)
+{
+ u16 handle, parent_handle;
+ u32 state;
+ struct _sas_device *sas_device;
+ unsigned long flags;
+ Mpi2ConfigReply_t mpi_reply;
+ Mpi2SasDevicePage0_t sas_device_pg0;
+ u32 ioc_status;
+ Mpi2EventDataIrPhysicalDisk_t *event_data =
+ (Mpi2EventDataIrPhysicalDisk_t *) fw_event->event_data;
+ u64 sas_address;
+
+ if (ioc->shost_recovery)
+ return;
+
+ if (event_data->ReasonCode != MPI2_EVENT_IR_PHYSDISK_RC_STATE_CHANGED)
+ return;
+
+ handle = le16_to_cpu(event_data->PhysDiskDevHandle);
+ state = le32_to_cpu(event_data->NewValue);
+
+ dewtprintk(ioc, pr_info(MPT3SAS_FMT
+ "%s: handle(0x%04x), old(0x%08x), new(0x%08x)\n",
+ ioc->name, __func__, handle,
+ le32_to_cpu(event_data->PreviousValue), state));
+ switch (state) {
+ case MPI2_RAID_PD_STATE_ONLINE:
+ case MPI2_RAID_PD_STATE_DEGRADED:
+ case MPI2_RAID_PD_STATE_REBUILDING:
+ case MPI2_RAID_PD_STATE_OPTIMAL:
+ case MPI2_RAID_PD_STATE_HOT_SPARE:
+
+ set_bit(handle, ioc->pd_handles);
+ spin_lock_irqsave(&ioc->sas_device_lock, flags);
+ sas_device = _scsih_sas_device_find_by_handle(ioc, handle);
+ spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
+
+ if (sas_device)
+ return;
+
+ if ((mpt3sas_config_get_sas_device_pg0(ioc, &mpi_reply,
+ &sas_device_pg0, MPI2_SAS_DEVICE_PGAD_FORM_HANDLE,
+ handle))) {
+ pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n",
+ ioc->name, __FILE__, __LINE__, __func__);
+ return;
+ }
+
+ ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
+ MPI2_IOCSTATUS_MASK;
+ if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
+ pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n",
+ ioc->name, __FILE__, __LINE__, __func__);
+ return;
+ }
+
+ parent_handle = le16_to_cpu(sas_device_pg0.ParentDevHandle);
+ if (!_scsih_get_sas_address(ioc, parent_handle, &sas_address))
+ mpt3sas_transport_update_links(ioc, sas_address, handle,
+ sas_device_pg0.PhyNum, MPI2_SAS_NEG_LINK_RATE_1_5);
+
+ _scsih_add_device(ioc, handle, 0, 1);
+
+ break;
+
+ case MPI2_RAID_PD_STATE_OFFLINE:
+ case MPI2_RAID_PD_STATE_NOT_CONFIGURED:
+ case MPI2_RAID_PD_STATE_NOT_COMPATIBLE:
+ default:
+ break;
+ }
+}
+
+#ifdef CONFIG_SCSI_MPT3SAS_LOGGING
+/**
+ * _scsih_sas_ir_operation_status_event_debug - debug for IR op event
+ * @ioc: per adapter object
+ * @event_data: event data payload
+ * Context: user.
+ *
+ * Return nothing.
+ */
+static void
+_scsih_sas_ir_operation_status_event_debug(struct MPT3SAS_ADAPTER *ioc,
+ Mpi2EventDataIrOperationStatus_t *event_data)
+{
+ char *reason_str = NULL;
+
+ switch (event_data->RAIDOperation) {
+ case MPI2_EVENT_IR_RAIDOP_RESYNC:
+ reason_str = "resync";
+ break;
+ case MPI2_EVENT_IR_RAIDOP_ONLINE_CAP_EXPANSION:
+ reason_str = "online capacity expansion";
+ break;
+ case MPI2_EVENT_IR_RAIDOP_CONSISTENCY_CHECK:
+ reason_str = "consistency check";
+ break;
+ case MPI2_EVENT_IR_RAIDOP_BACKGROUND_INIT:
+ reason_str = "background init";
+ break;
+ case MPI2_EVENT_IR_RAIDOP_MAKE_DATA_CONSISTENT:
+ reason_str = "make data consistent";
+ break;
+ }
+
+ if (!reason_str)
+ return;
+
+ pr_info(MPT3SAS_FMT "raid operational status: (%s)" \
+ "\thandle(0x%04x), percent complete(%d)\n",
+ ioc->name, reason_str,
+ le16_to_cpu(event_data->VolDevHandle),
+ event_data->PercentComplete);
+}
+#endif
+
+/**
+ * _scsih_sas_ir_operation_status_event - handle RAID operation events
+ * @ioc: per adapter object
+ * @fw_event: The fw_event_work object
+ * Context: user.
+ *
+ * Return nothing.
+ */
+static void
+_scsih_sas_ir_operation_status_event(struct MPT3SAS_ADAPTER *ioc,
+ struct fw_event_work *fw_event)
+{
+ Mpi2EventDataIrOperationStatus_t *event_data =
+ (Mpi2EventDataIrOperationStatus_t *)
+ fw_event->event_data;
+ static struct _raid_device *raid_device;
+ unsigned long flags;
+ u16 handle;
+
+#ifdef CONFIG_SCSI_MPT3SAS_LOGGING
+ if (ioc->logging_level & MPT_DEBUG_EVENT_WORK_TASK)
+ _scsih_sas_ir_operation_status_event_debug(ioc,
+ event_data);
+#endif
+
+ /* code added for raid transport support */
+ if (event_data->RAIDOperation == MPI2_EVENT_IR_RAIDOP_RESYNC) {
+
+ spin_lock_irqsave(&ioc->raid_device_lock, flags);
+ handle = le16_to_cpu(event_data->VolDevHandle);
+ raid_device = _scsih_raid_device_find_by_handle(ioc, handle);
+ if (raid_device)
+ raid_device->percent_complete =
+ event_data->PercentComplete;
+ spin_unlock_irqrestore(&ioc->raid_device_lock, flags);
+ }
+}
+
+/**
+ * _scsih_prep_device_scan - initialize parameters prior to device scan
+ * @ioc: per adapter object
+ *
+ * Set the deleted flag prior to device scan. If the device is found during
+ * the scan, then we clear the deleted flag.
+ */
+static void
+_scsih_prep_device_scan(struct MPT3SAS_ADAPTER *ioc)
+{
+ struct MPT3SAS_DEVICE *sas_device_priv_data;
+ struct scsi_device *sdev;
+
+ shost_for_each_device(sdev, ioc->shost) {
+ sas_device_priv_data = sdev->hostdata;
+ if (sas_device_priv_data && sas_device_priv_data->sas_target)
+ sas_device_priv_data->sas_target->deleted = 1;
+ }
+}
+
+/**
+ * _scsih_mark_responding_sas_device - mark a sas_devices as responding
+ * @ioc: per adapter object
+ * @sas_address: sas address
+ * @slot: enclosure slot id
+ * @handle: device handle
+ *
+ * After host reset, find out whether devices are still responding.
+ * Used in _scsih_remove_unresponsive_sas_devices.
+ *
+ * Return nothing.
+ */
+static void
+_scsih_mark_responding_sas_device(struct MPT3SAS_ADAPTER *ioc, u64 sas_address,
+ u16 slot, u16 handle)
+{
+ struct MPT3SAS_TARGET *sas_target_priv_data = NULL;
+ struct scsi_target *starget;
+ struct _sas_device *sas_device;
+ unsigned long flags;
+
+ spin_lock_irqsave(&ioc->sas_device_lock, flags);
+ list_for_each_entry(sas_device, &ioc->sas_device_list, list) {
+ if (sas_device->sas_address == sas_address &&
+ sas_device->slot == slot) {
+ sas_device->responding = 1;
+ starget = sas_device->starget;
+ if (starget && starget->hostdata) {
+ sas_target_priv_data = starget->hostdata;
+ sas_target_priv_data->tm_busy = 0;
+ sas_target_priv_data->deleted = 0;
+ } else
+ sas_target_priv_data = NULL;
+ if (starget)
+ starget_printk(KERN_INFO, starget,
+ "handle(0x%04x), sas_addr(0x%016llx), "
+ "enclosure logical id(0x%016llx), "
+ "slot(%d)\n", handle,
+ (unsigned long long)sas_device->sas_address,
+ (unsigned long long)
+ sas_device->enclosure_logical_id,
+ sas_device->slot);
+ if (sas_device->handle == handle)
+ goto out;
+ pr_info("\thandle changed from(0x%04x)!!!\n",
+ sas_device->handle);
+ sas_device->handle = handle;
+ if (sas_target_priv_data)
+ sas_target_priv_data->handle = handle;
+ goto out;
+ }
+ }
+ out:
+ spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
+}
+
+/**
+ * _scsih_search_responding_sas_devices -
+ * @ioc: per adapter object
+ *
+ * After host reset, find out whether devices are still responding.
+ * If not remove.
+ *
+ * Return nothing.
+ */
+static void
+_scsih_search_responding_sas_devices(struct MPT3SAS_ADAPTER *ioc)
+{
+ Mpi2SasDevicePage0_t sas_device_pg0;
+ Mpi2ConfigReply_t mpi_reply;
+ u16 ioc_status;
+ u16 handle;
+ u32 device_info;
+
+ pr_info(MPT3SAS_FMT "search for end-devices: start\n", ioc->name);
+
+ if (list_empty(&ioc->sas_device_list))
+ goto out;
+
+ handle = 0xFFFF;
+ while (!(mpt3sas_config_get_sas_device_pg0(ioc, &mpi_reply,
+ &sas_device_pg0, MPI2_SAS_DEVICE_PGAD_FORM_GET_NEXT_HANDLE,
+ handle))) {
+ ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
+ MPI2_IOCSTATUS_MASK;
+ if (ioc_status != MPI2_IOCSTATUS_SUCCESS)
+ break;
+ handle = le16_to_cpu(sas_device_pg0.DevHandle);
+ device_info = le32_to_cpu(sas_device_pg0.DeviceInfo);
+ if (!(_scsih_is_end_device(device_info)))
+ continue;
+ _scsih_mark_responding_sas_device(ioc,
+ le64_to_cpu(sas_device_pg0.SASAddress),
+ le16_to_cpu(sas_device_pg0.Slot), handle);
+ }
+
+ out:
+ pr_info(MPT3SAS_FMT "search for end-devices: complete\n",
+ ioc->name);
+}
+
+/**
+ * _scsih_mark_responding_raid_device - mark a raid_device as responding
+ * @ioc: per adapter object
+ * @wwid: world wide identifier for raid volume
+ * @handle: device handle
+ *
+ * After host reset, find out whether devices are still responding.
+ * Used in _scsih_remove_unresponsive_raid_devices.
+ *
+ * Return nothing.
+ */
+static void
+_scsih_mark_responding_raid_device(struct MPT3SAS_ADAPTER *ioc, u64 wwid,
+ u16 handle)
+{
+ struct MPT3SAS_TARGET *sas_target_priv_data;
+ struct scsi_target *starget;
+ struct _raid_device *raid_device;
+ unsigned long flags;
+
+ spin_lock_irqsave(&ioc->raid_device_lock, flags);
+ list_for_each_entry(raid_device, &ioc->raid_device_list, list) {
+ if (raid_device->wwid == wwid && raid_device->starget) {
+ starget = raid_device->starget;
+ if (starget && starget->hostdata) {
+ sas_target_priv_data = starget->hostdata;
+ sas_target_priv_data->deleted = 0;
+ } else
+ sas_target_priv_data = NULL;
+ raid_device->responding = 1;
+ spin_unlock_irqrestore(&ioc->raid_device_lock, flags);
+ starget_printk(KERN_INFO, raid_device->starget,
+ "handle(0x%04x), wwid(0x%016llx)\n", handle,
+ (unsigned long long)raid_device->wwid);
+ spin_lock_irqsave(&ioc->raid_device_lock, flags);
+ if (raid_device->handle == handle) {
+ spin_unlock_irqrestore(&ioc->raid_device_lock,
+ flags);
+ return;
+ }
+ pr_info("\thandle changed from(0x%04x)!!!\n",
+ raid_device->handle);
+ raid_device->handle = handle;
+ if (sas_target_priv_data)
+ sas_target_priv_data->handle = handle;
+ spin_unlock_irqrestore(&ioc->raid_device_lock, flags);
+ return;
+ }
+ }
+ spin_unlock_irqrestore(&ioc->raid_device_lock, flags);
+}
+
+/**
+ * _scsih_search_responding_raid_devices -
+ * @ioc: per adapter object
+ *
+ * After host reset, find out whether devices are still responding.
+ * If not remove.
+ *
+ * Return nothing.
+ */
+static void
+_scsih_search_responding_raid_devices(struct MPT3SAS_ADAPTER *ioc)
+{
+ Mpi2RaidVolPage1_t volume_pg1;
+ Mpi2RaidVolPage0_t volume_pg0;
+ Mpi2RaidPhysDiskPage0_t pd_pg0;
+ Mpi2ConfigReply_t mpi_reply;
+ u16 ioc_status;
+ u16 handle;
+ u8 phys_disk_num;
+
+ if (!ioc->ir_firmware)
+ return;
+
+ pr_info(MPT3SAS_FMT "search for raid volumes: start\n",
+ ioc->name);
+
+ if (list_empty(&ioc->raid_device_list))
+ goto out;
+
+ handle = 0xFFFF;
+ while (!(mpt3sas_config_get_raid_volume_pg1(ioc, &mpi_reply,
+ &volume_pg1, MPI2_RAID_VOLUME_PGAD_FORM_GET_NEXT_HANDLE, handle))) {
+ ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
+ MPI2_IOCSTATUS_MASK;
+ if (ioc_status != MPI2_IOCSTATUS_SUCCESS)
+ break;
+ handle = le16_to_cpu(volume_pg1.DevHandle);
+
+ if (mpt3sas_config_get_raid_volume_pg0(ioc, &mpi_reply,
+ &volume_pg0, MPI2_RAID_VOLUME_PGAD_FORM_HANDLE, handle,
+ sizeof(Mpi2RaidVolPage0_t)))
+ continue;
+
+ if (volume_pg0.VolumeState == MPI2_RAID_VOL_STATE_OPTIMAL ||
+ volume_pg0.VolumeState == MPI2_RAID_VOL_STATE_ONLINE ||
+ volume_pg0.VolumeState == MPI2_RAID_VOL_STATE_DEGRADED)
+ _scsih_mark_responding_raid_device(ioc,
+ le64_to_cpu(volume_pg1.WWID), handle);
+ }
+
+ /* refresh the pd_handles */
+ phys_disk_num = 0xFF;
+ memset(ioc->pd_handles, 0, ioc->pd_handles_sz);
+ while (!(mpt3sas_config_get_phys_disk_pg0(ioc, &mpi_reply,
+ &pd_pg0, MPI2_PHYSDISK_PGAD_FORM_GET_NEXT_PHYSDISKNUM,
+ phys_disk_num))) {
+ ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
+ MPI2_IOCSTATUS_MASK;
+ if (ioc_status != MPI2_IOCSTATUS_SUCCESS)
+ break;
+ phys_disk_num = pd_pg0.PhysDiskNum;
+ handle = le16_to_cpu(pd_pg0.DevHandle);
+ set_bit(handle, ioc->pd_handles);
+ }
+ out:
+ pr_info(MPT3SAS_FMT "search for responding raid volumes: complete\n",
+ ioc->name);
+}
+
+/**
+ * _scsih_mark_responding_expander - mark a expander as responding
+ * @ioc: per adapter object
+ * @sas_address: sas address
+ * @handle:
+ *
+ * After host reset, find out whether devices are still responding.
+ * Used in _scsih_remove_unresponsive_expanders.
+ *
+ * Return nothing.
+ */
+static void
+_scsih_mark_responding_expander(struct MPT3SAS_ADAPTER *ioc, u64 sas_address,
+ u16 handle)
+{
+ struct _sas_node *sas_expander;
+ unsigned long flags;
+ int i;
+
+ spin_lock_irqsave(&ioc->sas_node_lock, flags);
+ list_for_each_entry(sas_expander, &ioc->sas_expander_list, list) {
+ if (sas_expander->sas_address != sas_address)
+ continue;
+ sas_expander->responding = 1;
+ if (sas_expander->handle == handle)
+ goto out;
+ pr_info("\texpander(0x%016llx): handle changed" \
+ " from(0x%04x) to (0x%04x)!!!\n",
+ (unsigned long long)sas_expander->sas_address,
+ sas_expander->handle, handle);
+ sas_expander->handle = handle;
+ for (i = 0 ; i < sas_expander->num_phys ; i++)
+ sas_expander->phy[i].handle = handle;
+ goto out;
+ }
+ out:
+ spin_unlock_irqrestore(&ioc->sas_node_lock, flags);
+}
+
+/**
+ * _scsih_search_responding_expanders -
+ * @ioc: per adapter object
+ *
+ * After host reset, find out whether devices are still responding.
+ * If not remove.
+ *
+ * Return nothing.
+ */
+static void
+_scsih_search_responding_expanders(struct MPT3SAS_ADAPTER *ioc)
+{
+ Mpi2ExpanderPage0_t expander_pg0;
+ Mpi2ConfigReply_t mpi_reply;
+ u16 ioc_status;
+ u64 sas_address;
+ u16 handle;
+
+ pr_info(MPT3SAS_FMT "search for expanders: start\n", ioc->name);
+
+ if (list_empty(&ioc->sas_expander_list))
+ goto out;
+
+ handle = 0xFFFF;
+ while (!(mpt3sas_config_get_expander_pg0(ioc, &mpi_reply, &expander_pg0,
+ MPI2_SAS_EXPAND_PGAD_FORM_GET_NEXT_HNDL, handle))) {
+
+ ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
+ MPI2_IOCSTATUS_MASK;
+ if (ioc_status != MPI2_IOCSTATUS_SUCCESS)
+ break;
+
+ handle = le16_to_cpu(expander_pg0.DevHandle);
+ sas_address = le64_to_cpu(expander_pg0.SASAddress);
+ pr_info("\texpander present: handle(0x%04x), sas_addr(0x%016llx)\n",
+ handle,
+ (unsigned long long)sas_address);
+ _scsih_mark_responding_expander(ioc, sas_address, handle);
+ }
+
+ out:
+ pr_info(MPT3SAS_FMT "search for expanders: complete\n", ioc->name);
+}
+
+/**
+ * _scsih_remove_unresponding_sas_devices - removing unresponding devices
+ * @ioc: per adapter object
+ *
+ * Return nothing.
+ */
+static void
+_scsih_remove_unresponding_sas_devices(struct MPT3SAS_ADAPTER *ioc)
+{
+ struct _sas_device *sas_device, *sas_device_next;
+ struct _sas_node *sas_expander, *sas_expander_next;
+ struct _raid_device *raid_device, *raid_device_next;
+ struct list_head tmp_list;
+ unsigned long flags;
+
+ pr_info(MPT3SAS_FMT "removing unresponding devices: start\n",
+ ioc->name);
+
+ /* removing unresponding end devices */
+ pr_info(MPT3SAS_FMT "removing unresponding devices: end-devices\n",
+ ioc->name);
+ list_for_each_entry_safe(sas_device, sas_device_next,
+ &ioc->sas_device_list, list) {
+ if (!sas_device->responding)
+ mpt3sas_device_remove_by_sas_address(ioc,
+ sas_device->sas_address);
+ else
+ sas_device->responding = 0;
+ }
+
+ /* removing unresponding volumes */
+ if (ioc->ir_firmware) {
+ pr_info(MPT3SAS_FMT "removing unresponding devices: volumes\n",
+ ioc->name);
+ list_for_each_entry_safe(raid_device, raid_device_next,
+ &ioc->raid_device_list, list) {
+ if (!raid_device->responding)
+ _scsih_sas_volume_delete(ioc,
+ raid_device->handle);
+ else
+ raid_device->responding = 0;
+ }
+ }
+
+ /* removing unresponding expanders */
+ pr_info(MPT3SAS_FMT "removing unresponding devices: expanders\n",
+ ioc->name);
+ spin_lock_irqsave(&ioc->sas_node_lock, flags);
+ INIT_LIST_HEAD(&tmp_list);
+ list_for_each_entry_safe(sas_expander, sas_expander_next,
+ &ioc->sas_expander_list, list) {
+ if (!sas_expander->responding)
+ list_move_tail(&sas_expander->list, &tmp_list);
+ else
+ sas_expander->responding = 0;
+ }
+ spin_unlock_irqrestore(&ioc->sas_node_lock, flags);
+ list_for_each_entry_safe(sas_expander, sas_expander_next, &tmp_list,
+ list) {
+ list_del(&sas_expander->list);
+ _scsih_expander_node_remove(ioc, sas_expander);
+ }
+
+ pr_info(MPT3SAS_FMT "removing unresponding devices: complete\n",
+ ioc->name);
+
+ /* unblock devices */
+ _scsih_ublock_io_all_device(ioc);
+}
+
+static void
+_scsih_refresh_expander_links(struct MPT3SAS_ADAPTER *ioc,
+ struct _sas_node *sas_expander, u16 handle)
+{
+ Mpi2ExpanderPage1_t expander_pg1;
+ Mpi2ConfigReply_t mpi_reply;
+ int i;
+
+ for (i = 0 ; i < sas_expander->num_phys ; i++) {
+ if ((mpt3sas_config_get_expander_pg1(ioc, &mpi_reply,
+ &expander_pg1, i, handle))) {
+ pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n",
+ ioc->name, __FILE__, __LINE__, __func__);
+ return;
+ }
+
+ mpt3sas_transport_update_links(ioc, sas_expander->sas_address,
+ le16_to_cpu(expander_pg1.AttachedDevHandle), i,
+ expander_pg1.NegotiatedLinkRate >> 4);
+ }
+}
+
+/**
+ * _scsih_scan_for_devices_after_reset - scan for devices after host reset
+ * @ioc: per adapter object
+ *
+ * Return nothing.
+ */
+static void
+_scsih_scan_for_devices_after_reset(struct MPT3SAS_ADAPTER *ioc)
+{
+ Mpi2ExpanderPage0_t expander_pg0;
+ Mpi2SasDevicePage0_t sas_device_pg0;
+ Mpi2RaidVolPage1_t volume_pg1;
+ Mpi2RaidVolPage0_t volume_pg0;
+ Mpi2RaidPhysDiskPage0_t pd_pg0;
+ Mpi2EventIrConfigElement_t element;
+ Mpi2ConfigReply_t mpi_reply;
+ u8 phys_disk_num;
+ u16 ioc_status;
+ u16 handle, parent_handle;
+ u64 sas_address;
+ struct _sas_device *sas_device;
+ struct _sas_node *expander_device;
+ static struct _raid_device *raid_device;
+ u8 retry_count;
+ unsigned long flags;
+
+ pr_info(MPT3SAS_FMT "scan devices: start\n", ioc->name);
+
+ _scsih_sas_host_refresh(ioc);
+
+ pr_info(MPT3SAS_FMT "\tscan devices: expanders start\n", ioc->name);
+
+ /* expanders */
+ handle = 0xFFFF;
+ while (!(mpt3sas_config_get_expander_pg0(ioc, &mpi_reply, &expander_pg0,
+ MPI2_SAS_EXPAND_PGAD_FORM_GET_NEXT_HNDL, handle))) {
+ ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
+ MPI2_IOCSTATUS_MASK;
+ if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
+ pr_info(MPT3SAS_FMT "\tbreak from expander scan: " \
+ "ioc_status(0x%04x), loginfo(0x%08x)\n",
+ ioc->name, ioc_status,
+ le32_to_cpu(mpi_reply.IOCLogInfo));
+ break;
+ }
+ handle = le16_to_cpu(expander_pg0.DevHandle);
+ spin_lock_irqsave(&ioc->sas_node_lock, flags);
+ expander_device = mpt3sas_scsih_expander_find_by_sas_address(
+ ioc, le64_to_cpu(expander_pg0.SASAddress));
+ spin_unlock_irqrestore(&ioc->sas_node_lock, flags);
+ if (expander_device)
+ _scsih_refresh_expander_links(ioc, expander_device,
+ handle);
+ else {
+ pr_info(MPT3SAS_FMT "\tBEFORE adding expander: " \
+ "handle (0x%04x), sas_addr(0x%016llx)\n", ioc->name,
+ handle, (unsigned long long)
+ le64_to_cpu(expander_pg0.SASAddress));
+ _scsih_expander_add(ioc, handle);
+ pr_info(MPT3SAS_FMT "\tAFTER adding expander: " \
+ "handle (0x%04x), sas_addr(0x%016llx)\n", ioc->name,
+ handle, (unsigned long long)
+ le64_to_cpu(expander_pg0.SASAddress));
+ }
+ }
+
+ pr_info(MPT3SAS_FMT "\tscan devices: expanders complete\n",
+ ioc->name);
+
+ if (!ioc->ir_firmware)
+ goto skip_to_sas;
+
+ pr_info(MPT3SAS_FMT "\tscan devices: phys disk start\n", ioc->name);
+
+ /* phys disk */
+ phys_disk_num = 0xFF;
+ while (!(mpt3sas_config_get_phys_disk_pg0(ioc, &mpi_reply,
+ &pd_pg0, MPI2_PHYSDISK_PGAD_FORM_GET_NEXT_PHYSDISKNUM,
+ phys_disk_num))) {
+ ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
+ MPI2_IOCSTATUS_MASK;
+ if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
+ pr_info(MPT3SAS_FMT "\tbreak from phys disk scan: "\
+ "ioc_status(0x%04x), loginfo(0x%08x)\n",
+ ioc->name, ioc_status,
+ le32_to_cpu(mpi_reply.IOCLogInfo));
+ break;
+ }
+ phys_disk_num = pd_pg0.PhysDiskNum;
+ handle = le16_to_cpu(pd_pg0.DevHandle);
+ spin_lock_irqsave(&ioc->sas_device_lock, flags);
+ sas_device = _scsih_sas_device_find_by_handle(ioc, handle);
+ spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
+ if (sas_device)
+ continue;
+ if (mpt3sas_config_get_sas_device_pg0(ioc, &mpi_reply,
+ &sas_device_pg0, MPI2_SAS_DEVICE_PGAD_FORM_HANDLE,
+ handle) != 0)
+ continue;
+ ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
+ MPI2_IOCSTATUS_MASK;
+ if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
+ pr_info(MPT3SAS_FMT "\tbreak from phys disk scan " \
+ "ioc_status(0x%04x), loginfo(0x%08x)\n",
+ ioc->name, ioc_status,
+ le32_to_cpu(mpi_reply.IOCLogInfo));
+ break;
+ }
+ parent_handle = le16_to_cpu(sas_device_pg0.ParentDevHandle);
+ if (!_scsih_get_sas_address(ioc, parent_handle,
+ &sas_address)) {
+ pr_info(MPT3SAS_FMT "\tBEFORE adding phys disk: " \
+ " handle (0x%04x), sas_addr(0x%016llx)\n",
+ ioc->name, handle, (unsigned long long)
+ le64_to_cpu(sas_device_pg0.SASAddress));
+ mpt3sas_transport_update_links(ioc, sas_address,
+ handle, sas_device_pg0.PhyNum,
+ MPI2_SAS_NEG_LINK_RATE_1_5);
+ set_bit(handle, ioc->pd_handles);
+ retry_count = 0;
+ /* This will retry adding the end device.
+ * _scsih_add_device() will decide on retries and
+ * return "1" when it should be retried
+ */
+ while (_scsih_add_device(ioc, handle, retry_count++,
+ 1)) {
+ ssleep(1);
+ }
+ pr_info(MPT3SAS_FMT "\tAFTER adding phys disk: " \
+ " handle (0x%04x), sas_addr(0x%016llx)\n",
+ ioc->name, handle, (unsigned long long)
+ le64_to_cpu(sas_device_pg0.SASAddress));
+ }
+ }
+
+ pr_info(MPT3SAS_FMT "\tscan devices: phys disk complete\n",
+ ioc->name);
+
+ pr_info(MPT3SAS_FMT "\tscan devices: volumes start\n", ioc->name);
+
+ /* volumes */
+ handle = 0xFFFF;
+ while (!(mpt3sas_config_get_raid_volume_pg1(ioc, &mpi_reply,
+ &volume_pg1, MPI2_RAID_VOLUME_PGAD_FORM_GET_NEXT_HANDLE, handle))) {
+ ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
+ MPI2_IOCSTATUS_MASK;
+ if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
+ pr_info(MPT3SAS_FMT "\tbreak from volume scan: " \
+ "ioc_status(0x%04x), loginfo(0x%08x)\n",
+ ioc->name, ioc_status,
+ le32_to_cpu(mpi_reply.IOCLogInfo));
+ break;
+ }
+ handle = le16_to_cpu(volume_pg1.DevHandle);
+ spin_lock_irqsave(&ioc->raid_device_lock, flags);
+ raid_device = _scsih_raid_device_find_by_wwid(ioc,
+ le64_to_cpu(volume_pg1.WWID));
+ spin_unlock_irqrestore(&ioc->raid_device_lock, flags);
+ if (raid_device)
+ continue;
+ if (mpt3sas_config_get_raid_volume_pg0(ioc, &mpi_reply,
+ &volume_pg0, MPI2_RAID_VOLUME_PGAD_FORM_HANDLE, handle,
+ sizeof(Mpi2RaidVolPage0_t)))
+ continue;
+ ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
+ MPI2_IOCSTATUS_MASK;
+ if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
+ pr_info(MPT3SAS_FMT "\tbreak from volume scan: " \
+ "ioc_status(0x%04x), loginfo(0x%08x)\n",
+ ioc->name, ioc_status,
+ le32_to_cpu(mpi_reply.IOCLogInfo));
+ break;
+ }
+ if (volume_pg0.VolumeState == MPI2_RAID_VOL_STATE_OPTIMAL ||
+ volume_pg0.VolumeState == MPI2_RAID_VOL_STATE_ONLINE ||
+ volume_pg0.VolumeState == MPI2_RAID_VOL_STATE_DEGRADED) {
+ memset(&element, 0, sizeof(Mpi2EventIrConfigElement_t));
+ element.ReasonCode = MPI2_EVENT_IR_CHANGE_RC_ADDED;
+ element.VolDevHandle = volume_pg1.DevHandle;
+ pr_info(MPT3SAS_FMT
+ "\tBEFORE adding volume: handle (0x%04x)\n",
+ ioc->name, volume_pg1.DevHandle);
+ _scsih_sas_volume_add(ioc, &element);
+ pr_info(MPT3SAS_FMT
+ "\tAFTER adding volume: handle (0x%04x)\n",
+ ioc->name, volume_pg1.DevHandle);
+ }
+ }
+
+ pr_info(MPT3SAS_FMT "\tscan devices: volumes complete\n",
+ ioc->name);
+
+ skip_to_sas:
+
+ pr_info(MPT3SAS_FMT "\tscan devices: end devices start\n",
+ ioc->name);
+
+ /* sas devices */
+ handle = 0xFFFF;
+ while (!(mpt3sas_config_get_sas_device_pg0(ioc, &mpi_reply,
+ &sas_device_pg0, MPI2_SAS_DEVICE_PGAD_FORM_GET_NEXT_HANDLE,
+ handle))) {
+ ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
+ MPI2_IOCSTATUS_MASK;
+ if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
+ pr_info(MPT3SAS_FMT "\tbreak from end device scan:"\
+ " ioc_status(0x%04x), loginfo(0x%08x)\n",
+ ioc->name, ioc_status,
+ le32_to_cpu(mpi_reply.IOCLogInfo));
+ break;
+ }
+ handle = le16_to_cpu(sas_device_pg0.DevHandle);
+ if (!(_scsih_is_end_device(
+ le32_to_cpu(sas_device_pg0.DeviceInfo))))
+ continue;
+ spin_lock_irqsave(&ioc->sas_device_lock, flags);
+ sas_device = mpt3sas_scsih_sas_device_find_by_sas_address(ioc,
+ le64_to_cpu(sas_device_pg0.SASAddress));
+ spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
+ if (sas_device)
+ continue;
+ parent_handle = le16_to_cpu(sas_device_pg0.ParentDevHandle);
+ if (!_scsih_get_sas_address(ioc, parent_handle, &sas_address)) {
+ pr_info(MPT3SAS_FMT "\tBEFORE adding end device: " \
+ "handle (0x%04x), sas_addr(0x%016llx)\n", ioc->name,
+ handle, (unsigned long long)
+ le64_to_cpu(sas_device_pg0.SASAddress));
+ mpt3sas_transport_update_links(ioc, sas_address, handle,
+ sas_device_pg0.PhyNum, MPI2_SAS_NEG_LINK_RATE_1_5);
+ retry_count = 0;
+ /* This will retry adding the end device.
+ * _scsih_add_device() will decide on retries and
+ * return "1" when it should be retried
+ */
+ while (_scsih_add_device(ioc, handle, retry_count++,
+ 0)) {
+ ssleep(1);
+ }
+ pr_info(MPT3SAS_FMT "\tAFTER adding end device: " \
+ "handle (0x%04x), sas_addr(0x%016llx)\n", ioc->name,
+ handle, (unsigned long long)
+ le64_to_cpu(sas_device_pg0.SASAddress));
+ }
+ }
+ pr_info(MPT3SAS_FMT "\tscan devices: end devices complete\n",
+ ioc->name);
+
+ pr_info(MPT3SAS_FMT "scan devices: complete\n", ioc->name);
+}
+/**
+ * mpt3sas_scsih_reset_handler - reset callback handler (for scsih)
+ * @ioc: per adapter object
+ * @reset_phase: phase
+ *
+ * The handler for doing any required cleanup or initialization.
+ *
+ * The reset phase can be MPT3_IOC_PRE_RESET, MPT3_IOC_AFTER_RESET,
+ * MPT3_IOC_DONE_RESET
+ *
+ * Return nothing.
+ */
+void
+mpt3sas_scsih_reset_handler(struct MPT3SAS_ADAPTER *ioc, int reset_phase)
+{
+ switch (reset_phase) {
+ case MPT3_IOC_PRE_RESET:
+ dtmprintk(ioc, pr_info(MPT3SAS_FMT
+ "%s: MPT3_IOC_PRE_RESET\n", ioc->name, __func__));
+ break;
+ case MPT3_IOC_AFTER_RESET:
+ dtmprintk(ioc, pr_info(MPT3SAS_FMT
+ "%s: MPT3_IOC_AFTER_RESET\n", ioc->name, __func__));
+ if (ioc->scsih_cmds.status & MPT3_CMD_PENDING) {
+ ioc->scsih_cmds.status |= MPT3_CMD_RESET;
+ mpt3sas_base_free_smid(ioc, ioc->scsih_cmds.smid);
+ complete(&ioc->scsih_cmds.done);
+ }
+ if (ioc->tm_cmds.status & MPT3_CMD_PENDING) {
+ ioc->tm_cmds.status |= MPT3_CMD_RESET;
+ mpt3sas_base_free_smid(ioc, ioc->tm_cmds.smid);
+ complete(&ioc->tm_cmds.done);
+ }
+
+ _scsih_fw_event_cleanup_queue(ioc);
+ _scsih_flush_running_cmds(ioc);
+ break;
+ case MPT3_IOC_DONE_RESET:
+ dtmprintk(ioc, pr_info(MPT3SAS_FMT
+ "%s: MPT3_IOC_DONE_RESET\n", ioc->name, __func__));
+ if ((!ioc->is_driver_loading) && !(disable_discovery > 0 &&
+ !ioc->sas_hba.num_phys)) {
+ _scsih_prep_device_scan(ioc);
+ _scsih_search_responding_sas_devices(ioc);
+ _scsih_search_responding_raid_devices(ioc);
+ _scsih_search_responding_expanders(ioc);
+ _scsih_error_recovery_delete_devices(ioc);
+ }
+ break;
+ }
+}
+
+/**
+ * _mpt3sas_fw_work - delayed task for processing firmware events
+ * @ioc: per adapter object
+ * @fw_event: The fw_event_work object
+ * Context: user.
+ *
+ * Return nothing.
+ */
+static void
+_mpt3sas_fw_work(struct MPT3SAS_ADAPTER *ioc, struct fw_event_work *fw_event)
+{
+ /* the queue is being flushed so ignore this event */
+ if (ioc->remove_host ||
+ ioc->pci_error_recovery) {
+ _scsih_fw_event_free(ioc, fw_event);
+ return;
+ }
+
+ switch (fw_event->event) {
+ case MPT3SAS_PROCESS_TRIGGER_DIAG:
+ mpt3sas_process_trigger_data(ioc,
+ (struct SL_WH_TRIGGERS_EVENT_DATA_T *)
+ fw_event->event_data);
+ break;
+ case MPT3SAS_REMOVE_UNRESPONDING_DEVICES:
+ while (scsi_host_in_recovery(ioc->shost) || ioc->shost_recovery)
+ ssleep(1);
+ _scsih_remove_unresponding_sas_devices(ioc);
+ _scsih_scan_for_devices_after_reset(ioc);
+ break;
+ case MPT3SAS_PORT_ENABLE_COMPLETE:
+ ioc->start_scan = 0;
+ if (missing_delay[0] != -1 && missing_delay[1] != -1)
+ mpt3sas_base_update_missing_delay(ioc, missing_delay[0],
+ missing_delay[1]);
+ dewtprintk(ioc, pr_info(MPT3SAS_FMT
+ "port enable: complete from worker thread\n",
+ ioc->name));
+ break;
+ case MPT3SAS_TURN_ON_PFA_LED:
+ _scsih_turn_on_pfa_led(ioc, fw_event->device_handle);
+ break;
+ case MPI2_EVENT_SAS_TOPOLOGY_CHANGE_LIST:
+ _scsih_sas_topology_change_event(ioc, fw_event);
+ break;
+ case MPI2_EVENT_SAS_DEVICE_STATUS_CHANGE:
+ _scsih_sas_device_status_change_event(ioc, fw_event);
+ break;
+ case MPI2_EVENT_SAS_DISCOVERY:
+ _scsih_sas_discovery_event(ioc, fw_event);
+ break;
+ case MPI2_EVENT_SAS_BROADCAST_PRIMITIVE:
+ _scsih_sas_broadcast_primitive_event(ioc, fw_event);
+ break;
+ case MPI2_EVENT_SAS_ENCL_DEVICE_STATUS_CHANGE:
+ _scsih_sas_enclosure_dev_status_change_event(ioc,
+ fw_event);
+ break;
+ case MPI2_EVENT_IR_CONFIGURATION_CHANGE_LIST:
+ _scsih_sas_ir_config_change_event(ioc, fw_event);
+ break;
+ case MPI2_EVENT_IR_VOLUME:
+ _scsih_sas_ir_volume_event(ioc, fw_event);
+ break;
+ case MPI2_EVENT_IR_PHYSICAL_DISK:
+ _scsih_sas_ir_physical_disk_event(ioc, fw_event);
+ break;
+ case MPI2_EVENT_IR_OPERATION_STATUS:
+ _scsih_sas_ir_operation_status_event(ioc, fw_event);
+ break;
+ }
+ _scsih_fw_event_free(ioc, fw_event);
+}
+
+/**
+ * _firmware_event_work
+ * @ioc: per adapter object
+ * @work: The fw_event_work object
+ * Context: user.
+ *
+ * wrappers for the work thread handling firmware events
+ *
+ * Return nothing.
+ */
+
+static void
+_firmware_event_work(struct work_struct *work)
+{
+ struct fw_event_work *fw_event = container_of(work,
+ struct fw_event_work, work);
+
+ _mpt3sas_fw_work(fw_event->ioc, fw_event);
+}
+
+/**
+ * mpt3sas_scsih_event_callback - firmware event handler (called at ISR time)
+ * @ioc: per adapter object
+ * @msix_index: MSIX table index supplied by the OS
+ * @reply: reply message frame(lower 32bit addr)
+ * Context: interrupt.
+ *
+ * This function merely adds a new work task into ioc->firmware_event_thread.
+ * The tasks are worked from _firmware_event_work in user context.
+ *
+ * Return 1 meaning mf should be freed from _base_interrupt
+ * 0 means the mf is freed from this function.
+ */
+u8
+mpt3sas_scsih_event_callback(struct MPT3SAS_ADAPTER *ioc, u8 msix_index,
+ u32 reply)
+{
+ struct fw_event_work *fw_event;
+ Mpi2EventNotificationReply_t *mpi_reply;
+ u16 event;
+ u16 sz;
+
+ /* events turned off due to host reset or driver unloading */
+ if (ioc->remove_host || ioc->pci_error_recovery)
+ return 1;
+
+ mpi_reply = mpt3sas_base_get_reply_virt_addr(ioc, reply);
+
+ if (unlikely(!mpi_reply)) {
+ pr_err(MPT3SAS_FMT "mpi_reply not valid at %s:%d/%s()!\n",
+ ioc->name, __FILE__, __LINE__, __func__);
+ return 1;
+ }
+
+ event = le16_to_cpu(mpi_reply->Event);
+
+ if (event != MPI2_EVENT_LOG_ENTRY_ADDED)
+ mpt3sas_trigger_event(ioc, event, 0);
+
+ switch (event) {
+ /* handle these */
+ case MPI2_EVENT_SAS_BROADCAST_PRIMITIVE:
+ {
+ Mpi2EventDataSasBroadcastPrimitive_t *baen_data =
+ (Mpi2EventDataSasBroadcastPrimitive_t *)
+ mpi_reply->EventData;
+
+ if (baen_data->Primitive !=
+ MPI2_EVENT_PRIMITIVE_ASYNCHRONOUS_EVENT)
+ return 1;
+
+ if (ioc->broadcast_aen_busy) {
+ ioc->broadcast_aen_pending++;
+ return 1;
+ } else
+ ioc->broadcast_aen_busy = 1;
+ break;
+ }
+
+ case MPI2_EVENT_SAS_TOPOLOGY_CHANGE_LIST:
+ _scsih_check_topo_delete_events(ioc,
+ (Mpi2EventDataSasTopologyChangeList_t *)
+ mpi_reply->EventData);
+ break;
+ case MPI2_EVENT_IR_CONFIGURATION_CHANGE_LIST:
+ _scsih_check_ir_config_unhide_events(ioc,
+ (Mpi2EventDataIrConfigChangeList_t *)
+ mpi_reply->EventData);
+ break;
+ case MPI2_EVENT_IR_VOLUME:
+ _scsih_check_volume_delete_events(ioc,
+ (Mpi2EventDataIrVolume_t *)
+ mpi_reply->EventData);
+ break;
+
+ case MPI2_EVENT_SAS_DEVICE_STATUS_CHANGE:
+ case MPI2_EVENT_IR_OPERATION_STATUS:
+ case MPI2_EVENT_SAS_DISCOVERY:
+ case MPI2_EVENT_SAS_ENCL_DEVICE_STATUS_CHANGE:
+ case MPI2_EVENT_IR_PHYSICAL_DISK:
+ break;
+
+ case MPI2_EVENT_TEMP_THRESHOLD:
+ _scsih_temp_threshold_events(ioc,
+ (Mpi2EventDataTemperature_t *)
+ mpi_reply->EventData);
+ break;
+
+ default: /* ignore the rest */
+ return 1;
+ }
+
+ sz = le16_to_cpu(mpi_reply->EventDataLength) * 4;
+ fw_event = kzalloc(sizeof(*fw_event) + sz, GFP_ATOMIC);
+ if (!fw_event) {
+ pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n",
+ ioc->name, __FILE__, __LINE__, __func__);
+ return 1;
+ }
+
+ memcpy(fw_event->event_data, mpi_reply->EventData, sz);
+ fw_event->ioc = ioc;
+ fw_event->VF_ID = mpi_reply->VF_ID;
+ fw_event->VP_ID = mpi_reply->VP_ID;
+ fw_event->event = event;
+ _scsih_fw_event_add(ioc, fw_event);
+ return 1;
+}
+
+/* shost template */
+static struct scsi_host_template scsih_driver_template = {
+ .module = THIS_MODULE,
+ .name = "Fusion MPT SAS Host",
+ .proc_name = MPT3SAS_DRIVER_NAME,
+ .queuecommand = _scsih_qcmd,
+ .target_alloc = _scsih_target_alloc,
+ .slave_alloc = _scsih_slave_alloc,
+ .slave_configure = _scsih_slave_configure,
+ .target_destroy = _scsih_target_destroy,
+ .slave_destroy = _scsih_slave_destroy,
+ .scan_finished = _scsih_scan_finished,
+ .scan_start = _scsih_scan_start,
+ .change_queue_depth = _scsih_change_queue_depth,
+ .eh_abort_handler = _scsih_abort,
+ .eh_device_reset_handler = _scsih_dev_reset,
+ .eh_target_reset_handler = _scsih_target_reset,
+ .eh_host_reset_handler = _scsih_host_reset,
+ .bios_param = _scsih_bios_param,
+ .can_queue = 1,
+ .this_id = -1,
+ .sg_tablesize = MPT3SAS_SG_DEPTH,
+ .max_sectors = 32767,
+ .cmd_per_lun = 7,
+ .use_clustering = ENABLE_CLUSTERING,
+ .shost_attrs = mpt3sas_host_attrs,
+ .sdev_attrs = mpt3sas_dev_attrs,
+ .track_queue_depth = 1,
+};
+
+/**
+ * _scsih_expander_node_remove - removing expander device from list.
+ * @ioc: per adapter object
+ * @sas_expander: the sas_device object
+ * Context: Calling function should acquire ioc->sas_node_lock.
+ *
+ * Removing object and freeing associated memory from the
+ * ioc->sas_expander_list.
+ *
+ * Return nothing.
+ */
+static void
+_scsih_expander_node_remove(struct MPT3SAS_ADAPTER *ioc,
+ struct _sas_node *sas_expander)
+{
+ struct _sas_port *mpt3sas_port, *next;
+
+ /* remove sibling ports attached to this expander */
+ list_for_each_entry_safe(mpt3sas_port, next,
+ &sas_expander->sas_port_list, port_list) {
+ if (ioc->shost_recovery)
+ return;
+ if (mpt3sas_port->remote_identify.device_type ==
+ SAS_END_DEVICE)
+ mpt3sas_device_remove_by_sas_address(ioc,
+ mpt3sas_port->remote_identify.sas_address);
+ else if (mpt3sas_port->remote_identify.device_type ==
+ SAS_EDGE_EXPANDER_DEVICE ||
+ mpt3sas_port->remote_identify.device_type ==
+ SAS_FANOUT_EXPANDER_DEVICE)
+ mpt3sas_expander_remove(ioc,
+ mpt3sas_port->remote_identify.sas_address);
+ }
+
+ mpt3sas_transport_port_remove(ioc, sas_expander->sas_address,
+ sas_expander->sas_address_parent);
+
+ pr_info(MPT3SAS_FMT
+ "expander_remove: handle(0x%04x), sas_addr(0x%016llx)\n",
+ ioc->name,
+ sas_expander->handle, (unsigned long long)
+ sas_expander->sas_address);
+
+ kfree(sas_expander->phy);
+ kfree(sas_expander);
+}
+
+/**
+ * _scsih_ir_shutdown - IR shutdown notification
+ * @ioc: per adapter object
+ *
+ * Sending RAID Action to alert the Integrated RAID subsystem of the IOC that
+ * the host system is shutting down.
+ *
+ * Return nothing.
+ */
+static void
+_scsih_ir_shutdown(struct MPT3SAS_ADAPTER *ioc)
+{
+ Mpi2RaidActionRequest_t *mpi_request;
+ Mpi2RaidActionReply_t *mpi_reply;
+ u16 smid;
+
+ /* is IR firmware build loaded ? */
+ if (!ioc->ir_firmware)
+ return;
+
+ /* are there any volumes ? */
+ if (list_empty(&ioc->raid_device_list))
+ return;
+
+ mutex_lock(&ioc->scsih_cmds.mutex);
+
+ if (ioc->scsih_cmds.status != MPT3_CMD_NOT_USED) {
+ pr_err(MPT3SAS_FMT "%s: scsih_cmd in use\n",
+ ioc->name, __func__);
+ goto out;
+ }
+ ioc->scsih_cmds.status = MPT3_CMD_PENDING;
+
+ smid = mpt3sas_base_get_smid(ioc, ioc->scsih_cb_idx);
+ if (!smid) {
+ pr_err(MPT3SAS_FMT "%s: failed obtaining a smid\n",
+ ioc->name, __func__);
+ ioc->scsih_cmds.status = MPT3_CMD_NOT_USED;
+ goto out;
+ }
+
+ mpi_request = mpt3sas_base_get_msg_frame(ioc, smid);
+ ioc->scsih_cmds.smid = smid;
+ memset(mpi_request, 0, sizeof(Mpi2RaidActionRequest_t));
+
+ mpi_request->Function = MPI2_FUNCTION_RAID_ACTION;
+ mpi_request->Action = MPI2_RAID_ACTION_SYSTEM_SHUTDOWN_INITIATED;
+
+ pr_info(MPT3SAS_FMT "IR shutdown (sending)\n", ioc->name);
+ init_completion(&ioc->scsih_cmds.done);
+ mpt3sas_base_put_smid_default(ioc, smid);
+ wait_for_completion_timeout(&ioc->scsih_cmds.done, 10*HZ);
+
+ if (!(ioc->scsih_cmds.status & MPT3_CMD_COMPLETE)) {
+ pr_err(MPT3SAS_FMT "%s: timeout\n",
+ ioc->name, __func__);
+ goto out;
+ }
+
+ if (ioc->scsih_cmds.status & MPT3_CMD_REPLY_VALID) {
+ mpi_reply = ioc->scsih_cmds.reply;
+ pr_info(MPT3SAS_FMT
+ "IR shutdown (complete): ioc_status(0x%04x), loginfo(0x%08x)\n",
+ ioc->name, le16_to_cpu(mpi_reply->IOCStatus),
+ le32_to_cpu(mpi_reply->IOCLogInfo));
+ }
+
+ out:
+ ioc->scsih_cmds.status = MPT3_CMD_NOT_USED;
+ mutex_unlock(&ioc->scsih_cmds.mutex);
+}
+
+/**
+ * _scsih_remove - detach and remove add host
+ * @pdev: PCI device struct
+ *
+ * Routine called when unloading the driver.
+ * Return nothing.
+ */
+static void _scsih_remove(struct pci_dev *pdev)
+{
+ struct Scsi_Host *shost = pci_get_drvdata(pdev);
+ struct MPT3SAS_ADAPTER *ioc = shost_priv(shost);
+ struct _sas_port *mpt3sas_port, *next_port;
+ struct _raid_device *raid_device, *next;
+ struct MPT3SAS_TARGET *sas_target_priv_data;
+ struct workqueue_struct *wq;
+ unsigned long flags;
+
+ ioc->remove_host = 1;
+ _scsih_fw_event_cleanup_queue(ioc);
+
+ spin_lock_irqsave(&ioc->fw_event_lock, flags);
+ wq = ioc->firmware_event_thread;
+ ioc->firmware_event_thread = NULL;
+ spin_unlock_irqrestore(&ioc->fw_event_lock, flags);
+ if (wq)
+ destroy_workqueue(wq);
+
+ /* release all the volumes */
+ _scsih_ir_shutdown(ioc);
+ list_for_each_entry_safe(raid_device, next, &ioc->raid_device_list,
+ list) {
+ if (raid_device->starget) {
+ sas_target_priv_data =
+ raid_device->starget->hostdata;
+ sas_target_priv_data->deleted = 1;
+ scsi_remove_target(&raid_device->starget->dev);
+ }
+ pr_info(MPT3SAS_FMT "removing handle(0x%04x), wwid(0x%016llx)\n",
+ ioc->name, raid_device->handle,
+ (unsigned long long) raid_device->wwid);
+ _scsih_raid_device_remove(ioc, raid_device);
+ }
+
+ /* free ports attached to the sas_host */
+ list_for_each_entry_safe(mpt3sas_port, next_port,
+ &ioc->sas_hba.sas_port_list, port_list) {
+ if (mpt3sas_port->remote_identify.device_type ==
+ SAS_END_DEVICE)
+ mpt3sas_device_remove_by_sas_address(ioc,
+ mpt3sas_port->remote_identify.sas_address);
+ else if (mpt3sas_port->remote_identify.device_type ==
+ SAS_EDGE_EXPANDER_DEVICE ||
+ mpt3sas_port->remote_identify.device_type ==
+ SAS_FANOUT_EXPANDER_DEVICE)
+ mpt3sas_expander_remove(ioc,
+ mpt3sas_port->remote_identify.sas_address);
+ }
+
+ /* free phys attached to the sas_host */
+ if (ioc->sas_hba.num_phys) {
+ kfree(ioc->sas_hba.phy);
+ ioc->sas_hba.phy = NULL;
+ ioc->sas_hba.num_phys = 0;
+ }
+
+ sas_remove_host(shost);
+ scsi_remove_host(shost);
+ mpt3sas_base_detach(ioc);
+ list_del(&ioc->list);
+ scsi_host_put(shost);
+}
+
+/**
+ * _scsih_shutdown - routine call during system shutdown
+ * @pdev: PCI device struct
+ *
+ * Return nothing.
+ */
+static void
+_scsih_shutdown(struct pci_dev *pdev)
+{
+ struct Scsi_Host *shost = pci_get_drvdata(pdev);
+ struct MPT3SAS_ADAPTER *ioc = shost_priv(shost);
+ struct workqueue_struct *wq;
+ unsigned long flags;
+
+ ioc->remove_host = 1;
+ _scsih_fw_event_cleanup_queue(ioc);
+
+ spin_lock_irqsave(&ioc->fw_event_lock, flags);
+ wq = ioc->firmware_event_thread;
+ ioc->firmware_event_thread = NULL;
+ spin_unlock_irqrestore(&ioc->fw_event_lock, flags);
+ if (wq)
+ destroy_workqueue(wq);
+
+ _scsih_ir_shutdown(ioc);
+ mpt3sas_base_detach(ioc);
+}
+
+
+/**
+ * _scsih_probe_boot_devices - reports 1st device
+ * @ioc: per adapter object
+ *
+ * If specified in bios page 2, this routine reports the 1st
+ * device scsi-ml or sas transport for persistent boot device
+ * purposes. Please refer to function _scsih_determine_boot_device()
+ */
+static void
+_scsih_probe_boot_devices(struct MPT3SAS_ADAPTER *ioc)
+{
+ u8 is_raid;
+ void *device;
+ struct _sas_device *sas_device;
+ struct _raid_device *raid_device;
+ u16 handle;
+ u64 sas_address_parent;
+ u64 sas_address;
+ unsigned long flags;
+ int rc;
+
+ /* no Bios, return immediately */
+ if (!ioc->bios_pg3.BiosVersion)
+ return;
+
+ device = NULL;
+ is_raid = 0;
+ if (ioc->req_boot_device.device) {
+ device = ioc->req_boot_device.device;
+ is_raid = ioc->req_boot_device.is_raid;
+ } else if (ioc->req_alt_boot_device.device) {
+ device = ioc->req_alt_boot_device.device;
+ is_raid = ioc->req_alt_boot_device.is_raid;
+ } else if (ioc->current_boot_device.device) {
+ device = ioc->current_boot_device.device;
+ is_raid = ioc->current_boot_device.is_raid;
+ }
+
+ if (!device)
+ return;
+
+ if (is_raid) {
+ raid_device = device;
+ rc = scsi_add_device(ioc->shost, RAID_CHANNEL,
+ raid_device->id, 0);
+ if (rc)
+ _scsih_raid_device_remove(ioc, raid_device);
+ } else {
+ spin_lock_irqsave(&ioc->sas_device_lock, flags);
+ sas_device = device;
+ handle = sas_device->handle;
+ sas_address_parent = sas_device->sas_address_parent;
+ sas_address = sas_device->sas_address;
+ list_move_tail(&sas_device->list, &ioc->sas_device_list);
+ spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
+
+ if (!mpt3sas_transport_port_add(ioc, handle,
+ sas_address_parent)) {
+ _scsih_sas_device_remove(ioc, sas_device);
+ } else if (!sas_device->starget) {
+ if (!ioc->is_driver_loading) {
+ mpt3sas_transport_port_remove(ioc,
+ sas_address,
+ sas_address_parent);
+ _scsih_sas_device_remove(ioc, sas_device);
+ }
+ }
+ }
+}
+
+/**
+ * _scsih_probe_raid - reporting raid volumes to scsi-ml
+ * @ioc: per adapter object
+ *
+ * Called during initial loading of the driver.
+ */
+static void
+_scsih_probe_raid(struct MPT3SAS_ADAPTER *ioc)
+{
+ struct _raid_device *raid_device, *raid_next;
+ int rc;
+
+ list_for_each_entry_safe(raid_device, raid_next,
+ &ioc->raid_device_list, list) {
+ if (raid_device->starget)
+ continue;
+ rc = scsi_add_device(ioc->shost, RAID_CHANNEL,
+ raid_device->id, 0);
+ if (rc)
+ _scsih_raid_device_remove(ioc, raid_device);
+ }
+}
+
+/**
+ * _scsih_probe_sas - reporting sas devices to sas transport
+ * @ioc: per adapter object
+ *
+ * Called during initial loading of the driver.
+ */
+static void
+_scsih_probe_sas(struct MPT3SAS_ADAPTER *ioc)
+{
+ struct _sas_device *sas_device, *next;
+ unsigned long flags;
+
+ /* SAS Device List */
+ list_for_each_entry_safe(sas_device, next, &ioc->sas_device_init_list,
+ list) {
+
+ if (!mpt3sas_transport_port_add(ioc, sas_device->handle,
+ sas_device->sas_address_parent)) {
+ list_del(&sas_device->list);
+ kfree(sas_device);
+ continue;
+ } else if (!sas_device->starget) {
+ /*
+ * When asyn scanning is enabled, its not possible to
+ * remove devices while scanning is turned on due to an
+ * oops in scsi_sysfs_add_sdev()->add_device()->
+ * sysfs_addrm_start()
+ */
+ if (!ioc->is_driver_loading) {
+ mpt3sas_transport_port_remove(ioc,
+ sas_device->sas_address,
+ sas_device->sas_address_parent);
+ list_del(&sas_device->list);
+ kfree(sas_device);
+ continue;
+ }
+ }
+
+ spin_lock_irqsave(&ioc->sas_device_lock, flags);
+ list_move_tail(&sas_device->list, &ioc->sas_device_list);
+ spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
+ }
+}
+
+/**
+ * _scsih_probe_devices - probing for devices
+ * @ioc: per adapter object
+ *
+ * Called during initial loading of the driver.
+ */
+static void
+_scsih_probe_devices(struct MPT3SAS_ADAPTER *ioc)
+{
+ u16 volume_mapping_flags;
+
+ if (!(ioc->facts.ProtocolFlags & MPI2_IOCFACTS_PROTOCOL_SCSI_INITIATOR))
+ return; /* return when IOC doesn't support initiator mode */
+
+ _scsih_probe_boot_devices(ioc);
+
+ if (ioc->ir_firmware) {
+ volume_mapping_flags =
+ le16_to_cpu(ioc->ioc_pg8.IRVolumeMappingFlags) &
+ MPI2_IOCPAGE8_IRFLAGS_MASK_VOLUME_MAPPING_MODE;
+ if (volume_mapping_flags ==
+ MPI2_IOCPAGE8_IRFLAGS_LOW_VOLUME_MAPPING) {
+ _scsih_probe_raid(ioc);
+ _scsih_probe_sas(ioc);
+ } else {
+ _scsih_probe_sas(ioc);
+ _scsih_probe_raid(ioc);
+ }
+ } else
+ _scsih_probe_sas(ioc);
+}
+
+/**
+ * _scsih_scan_start - scsi lld callback for .scan_start
+ * @shost: SCSI host pointer
+ *
+ * The shost has the ability to discover targets on its own instead
+ * of scanning the entire bus. In our implemention, we will kick off
+ * firmware discovery.
+ */
+static void
+_scsih_scan_start(struct Scsi_Host *shost)
+{
+ struct MPT3SAS_ADAPTER *ioc = shost_priv(shost);
+ int rc;
+ if (diag_buffer_enable != -1 && diag_buffer_enable != 0)
+ mpt3sas_enable_diag_buffer(ioc, diag_buffer_enable);
+
+ if (disable_discovery > 0)
+ return;
+
+ ioc->start_scan = 1;
+ rc = mpt3sas_port_enable(ioc);
+
+ if (rc != 0)
+ pr_info(MPT3SAS_FMT "port enable: FAILED\n", ioc->name);
+}
+
+/**
+ * _scsih_scan_finished - scsi lld callback for .scan_finished
+ * @shost: SCSI host pointer
+ * @time: elapsed time of the scan in jiffies
+ *
+ * This function will be called periodicallyn until it returns 1 with the
+ * scsi_host and the elapsed time of the scan in jiffies. In our implemention,
+ * we wait for firmware discovery to complete, then return 1.
+ */
+static int
+_scsih_scan_finished(struct Scsi_Host *shost, unsigned long time)
+{
+ struct MPT3SAS_ADAPTER *ioc = shost_priv(shost);
+
+ if (disable_discovery > 0) {
+ ioc->is_driver_loading = 0;
+ ioc->wait_for_discovery_to_complete = 0;
+ return 1;
+ }
+
+ if (time >= (300 * HZ)) {
+ ioc->base_cmds.status = MPT3_CMD_NOT_USED;
+ pr_info(MPT3SAS_FMT
+ "port enable: FAILED with timeout (timeout=300s)\n",
+ ioc->name);
+ ioc->is_driver_loading = 0;
+ return 1;
+ }
+
+ if (ioc->start_scan)
+ return 0;
+
+ if (ioc->start_scan_failed) {
+ pr_info(MPT3SAS_FMT
+ "port enable: FAILED with (ioc_status=0x%08x)\n",
+ ioc->name, ioc->start_scan_failed);
+ ioc->is_driver_loading = 0;
+ ioc->wait_for_discovery_to_complete = 0;
+ ioc->remove_host = 1;
+ return 1;
+ }
+
+ pr_info(MPT3SAS_FMT "port enable: SUCCESS\n", ioc->name);
+ ioc->base_cmds.status = MPT3_CMD_NOT_USED;
+
+ if (ioc->wait_for_discovery_to_complete) {
+ ioc->wait_for_discovery_to_complete = 0;
+ _scsih_probe_devices(ioc);
+ }
+ mpt3sas_base_start_watchdog(ioc);
+ ioc->is_driver_loading = 0;
+ return 1;
+}
+
+/**
+ * _scsih_probe - attach and add scsi host
+ * @pdev: PCI device struct
+ * @id: pci device id
+ *
+ * Returns 0 success, anything else error.
+ */
+static int
+_scsih_probe(struct pci_dev *pdev, const struct pci_device_id *id)
+{
+ struct MPT3SAS_ADAPTER *ioc;
+ struct Scsi_Host *shost;
+ int rv;
+
+ shost = scsi_host_alloc(&scsih_driver_template,
+ sizeof(struct MPT3SAS_ADAPTER));
+ if (!shost)
+ return -ENODEV;
+
+ /* init local params */
+ ioc = shost_priv(shost);
+ memset(ioc, 0, sizeof(struct MPT3SAS_ADAPTER));
+ INIT_LIST_HEAD(&ioc->list);
+ list_add_tail(&ioc->list, &mpt3sas_ioc_list);
+ ioc->shost = shost;
+ ioc->id = mpt_ids++;
+ sprintf(ioc->name, "%s%d", MPT3SAS_DRIVER_NAME, ioc->id);
+ ioc->pdev = pdev;
+ ioc->scsi_io_cb_idx = scsi_io_cb_idx;
+ ioc->tm_cb_idx = tm_cb_idx;
+ ioc->ctl_cb_idx = ctl_cb_idx;
+ ioc->base_cb_idx = base_cb_idx;
+ ioc->port_enable_cb_idx = port_enable_cb_idx;
+ ioc->transport_cb_idx = transport_cb_idx;
+ ioc->scsih_cb_idx = scsih_cb_idx;
+ ioc->config_cb_idx = config_cb_idx;
+ ioc->tm_tr_cb_idx = tm_tr_cb_idx;
+ ioc->tm_tr_volume_cb_idx = tm_tr_volume_cb_idx;
+ ioc->tm_sas_control_cb_idx = tm_sas_control_cb_idx;
+ ioc->logging_level = logging_level;
+ ioc->schedule_dead_ioc_flush_running_cmds = &_scsih_flush_running_cmds;
+ /* misc semaphores and spin locks */
+ mutex_init(&ioc->reset_in_progress_mutex);
+ spin_lock_init(&ioc->ioc_reset_in_progress_lock);
+ spin_lock_init(&ioc->scsi_lookup_lock);
+ spin_lock_init(&ioc->sas_device_lock);
+ spin_lock_init(&ioc->sas_node_lock);
+ spin_lock_init(&ioc->fw_event_lock);
+ spin_lock_init(&ioc->raid_device_lock);
+ spin_lock_init(&ioc->diag_trigger_lock);
+
+ INIT_LIST_HEAD(&ioc->sas_device_list);
+ INIT_LIST_HEAD(&ioc->sas_device_init_list);
+ INIT_LIST_HEAD(&ioc->sas_expander_list);
+ INIT_LIST_HEAD(&ioc->fw_event_list);
+ INIT_LIST_HEAD(&ioc->raid_device_list);
+ INIT_LIST_HEAD(&ioc->sas_hba.sas_port_list);
+ INIT_LIST_HEAD(&ioc->delayed_tr_list);
+ INIT_LIST_HEAD(&ioc->delayed_tr_volume_list);
+ INIT_LIST_HEAD(&ioc->reply_queue_list);
+
+ /* init shost parameters */
+ shost->max_cmd_len = 32;
+ shost->max_lun = max_lun;
+ shost->transportt = mpt3sas_transport_template;
+ shost->unique_id = ioc->id;
+
+ if (max_sectors != 0xFFFF) {
+ if (max_sectors < 64) {
+ shost->max_sectors = 64;
+ pr_warn(MPT3SAS_FMT "Invalid value %d passed " \
+ "for max_sectors, range is 64 to 32767. Assigning "
+ "value of 64.\n", ioc->name, max_sectors);
+ } else if (max_sectors > 32767) {
+ shost->max_sectors = 32767;
+ pr_warn(MPT3SAS_FMT "Invalid value %d passed " \
+ "for max_sectors, range is 64 to 32767. Assigning "
+ "default value of 32767.\n", ioc->name,
+ max_sectors);
+ } else {
+ shost->max_sectors = max_sectors & 0xFFFE;
+ pr_info(MPT3SAS_FMT
+ "The max_sectors value is set to %d\n",
+ ioc->name, shost->max_sectors);
+ }
+ }
+
+ /* register EEDP capabilities with SCSI layer */
+ if (prot_mask > 0)
+ scsi_host_set_prot(shost, prot_mask);
+ else
+ scsi_host_set_prot(shost, SHOST_DIF_TYPE1_PROTECTION
+ | SHOST_DIF_TYPE2_PROTECTION
+ | SHOST_DIF_TYPE3_PROTECTION);
+
+ scsi_host_set_guard(shost, SHOST_DIX_GUARD_CRC);
+
+ /* event thread */
+ snprintf(ioc->firmware_event_name, sizeof(ioc->firmware_event_name),
+ "fw_event%d", ioc->id);
+ ioc->firmware_event_thread = create_singlethread_workqueue(
+ ioc->firmware_event_name);
+ if (!ioc->firmware_event_thread) {
+ pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n",
+ ioc->name, __FILE__, __LINE__, __func__);
+ rv = -ENODEV;
+ goto out_thread_fail;
+ }
+
+ ioc->is_driver_loading = 1;
+ if ((mpt3sas_base_attach(ioc))) {
+ pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n",
+ ioc->name, __FILE__, __LINE__, __func__);
+ rv = -ENODEV;
+ goto out_attach_fail;
+ }
+ rv = scsi_add_host(shost, &pdev->dev);
+ if (rv) {
+ pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n",
+ ioc->name, __FILE__, __LINE__, __func__);
+ goto out_add_shost_fail;
+ }
+
+ scsi_scan_host(shost);
+ return 0;
+out_add_shost_fail:
+ mpt3sas_base_detach(ioc);
+ out_attach_fail:
+ destroy_workqueue(ioc->firmware_event_thread);
+ out_thread_fail:
+ list_del(&ioc->list);
+ scsi_host_put(shost);
+ return rv;
+}
+
+#ifdef CONFIG_PM
+/**
+ * _scsih_suspend - power management suspend main entry point
+ * @pdev: PCI device struct
+ * @state: PM state change to (usually PCI_D3)
+ *
+ * Returns 0 success, anything else error.
+ */
+static int
+_scsih_suspend(struct pci_dev *pdev, pm_message_t state)
+{
+ struct Scsi_Host *shost = pci_get_drvdata(pdev);
+ struct MPT3SAS_ADAPTER *ioc = shost_priv(shost);
+ pci_power_t device_state;
+
+ mpt3sas_base_stop_watchdog(ioc);
+ flush_scheduled_work();
+ scsi_block_requests(shost);
+ device_state = pci_choose_state(pdev, state);
+ pr_info(MPT3SAS_FMT
+ "pdev=0x%p, slot=%s, entering operating state [D%d]\n",
+ ioc->name, pdev, pci_name(pdev), device_state);
+
+ pci_save_state(pdev);
+ mpt3sas_base_free_resources(ioc);
+ pci_set_power_state(pdev, device_state);
+ return 0;
+}
+
+/**
+ * _scsih_resume - power management resume main entry point
+ * @pdev: PCI device struct
+ *
+ * Returns 0 success, anything else error.
+ */
+static int
+_scsih_resume(struct pci_dev *pdev)
+{
+ struct Scsi_Host *shost = pci_get_drvdata(pdev);
+ struct MPT3SAS_ADAPTER *ioc = shost_priv(shost);
+ pci_power_t device_state = pdev->current_state;
+ int r;
+
+ pr_info(MPT3SAS_FMT
+ "pdev=0x%p, slot=%s, previous operating state [D%d]\n",
+ ioc->name, pdev, pci_name(pdev), device_state);
+
+ pci_set_power_state(pdev, PCI_D0);
+ pci_enable_wake(pdev, PCI_D0, 0);
+ pci_restore_state(pdev);
+ ioc->pdev = pdev;
+ r = mpt3sas_base_map_resources(ioc);
+ if (r)
+ return r;
+
+ mpt3sas_base_hard_reset_handler(ioc, CAN_SLEEP, SOFT_RESET);
+ scsi_unblock_requests(shost);
+ mpt3sas_base_start_watchdog(ioc);
+ return 0;
+}
+#endif /* CONFIG_PM */
+
+/**
+ * _scsih_pci_error_detected - Called when a PCI error is detected.
+ * @pdev: PCI device struct
+ * @state: PCI channel state
+ *
+ * Description: Called when a PCI error is detected.
+ *
+ * Return value:
+ * PCI_ERS_RESULT_NEED_RESET or PCI_ERS_RESULT_DISCONNECT
+ */
+static pci_ers_result_t
+_scsih_pci_error_detected(struct pci_dev *pdev, pci_channel_state_t state)
+{
+ struct Scsi_Host *shost = pci_get_drvdata(pdev);
+ struct MPT3SAS_ADAPTER *ioc = shost_priv(shost);
+
+ pr_info(MPT3SAS_FMT "PCI error: detected callback, state(%d)!!\n",
+ ioc->name, state);
+
+ switch (state) {
+ case pci_channel_io_normal:
+ return PCI_ERS_RESULT_CAN_RECOVER;
+ case pci_channel_io_frozen:
+ /* Fatal error, prepare for slot reset */
+ ioc->pci_error_recovery = 1;
+ scsi_block_requests(ioc->shost);
+ mpt3sas_base_stop_watchdog(ioc);
+ mpt3sas_base_free_resources(ioc);
+ return PCI_ERS_RESULT_NEED_RESET;
+ case pci_channel_io_perm_failure:
+ /* Permanent error, prepare for device removal */
+ ioc->pci_error_recovery = 1;
+ mpt3sas_base_stop_watchdog(ioc);
+ _scsih_flush_running_cmds(ioc);
+ return PCI_ERS_RESULT_DISCONNECT;
+ }
+ return PCI_ERS_RESULT_NEED_RESET;
+}
+
+/**
+ * _scsih_pci_slot_reset - Called when PCI slot has been reset.
+ * @pdev: PCI device struct
+ *
+ * Description: This routine is called by the pci error recovery
+ * code after the PCI slot has been reset, just before we
+ * should resume normal operations.
+ */
+static pci_ers_result_t
+_scsih_pci_slot_reset(struct pci_dev *pdev)
+{
+ struct Scsi_Host *shost = pci_get_drvdata(pdev);
+ struct MPT3SAS_ADAPTER *ioc = shost_priv(shost);
+ int rc;
+
+ pr_info(MPT3SAS_FMT "PCI error: slot reset callback!!\n",
+ ioc->name);
+
+ ioc->pci_error_recovery = 0;
+ ioc->pdev = pdev;
+ pci_restore_state(pdev);
+ rc = mpt3sas_base_map_resources(ioc);
+ if (rc)
+ return PCI_ERS_RESULT_DISCONNECT;
+
+ rc = mpt3sas_base_hard_reset_handler(ioc, CAN_SLEEP,
+ FORCE_BIG_HAMMER);
+
+ pr_warn(MPT3SAS_FMT "hard reset: %s\n", ioc->name,
+ (rc == 0) ? "success" : "failed");
+
+ if (!rc)
+ return PCI_ERS_RESULT_RECOVERED;
+ else
+ return PCI_ERS_RESULT_DISCONNECT;
+}
+
+/**
+ * _scsih_pci_resume() - resume normal ops after PCI reset
+ * @pdev: pointer to PCI device
+ *
+ * Called when the error recovery driver tells us that its
+ * OK to resume normal operation. Use completion to allow
+ * halted scsi ops to resume.
+ */
+static void
+_scsih_pci_resume(struct pci_dev *pdev)
+{
+ struct Scsi_Host *shost = pci_get_drvdata(pdev);
+ struct MPT3SAS_ADAPTER *ioc = shost_priv(shost);
+
+ pr_info(MPT3SAS_FMT "PCI error: resume callback!!\n", ioc->name);
+
+ pci_cleanup_aer_uncorrect_error_status(pdev);
+ mpt3sas_base_start_watchdog(ioc);
+ scsi_unblock_requests(ioc->shost);
+}
+
+/**
+ * _scsih_pci_mmio_enabled - Enable MMIO and dump debug registers
+ * @pdev: pointer to PCI device
+ */
+static pci_ers_result_t
+_scsih_pci_mmio_enabled(struct pci_dev *pdev)
+{
+ struct Scsi_Host *shost = pci_get_drvdata(pdev);
+ struct MPT3SAS_ADAPTER *ioc = shost_priv(shost);
+
+ pr_info(MPT3SAS_FMT "PCI error: mmio enabled callback!!\n",
+ ioc->name);
+
+ /* TODO - dump whatever for debugging purposes */
+
+ /* Request a slot reset. */
+ return PCI_ERS_RESULT_NEED_RESET;
+}
+
+/* raid transport support */
+static struct raid_function_template mpt3sas_raid_functions = {
+ .cookie = &scsih_driver_template,
+ .is_raid = _scsih_is_raid,
+ .get_resync = _scsih_get_resync,
+ .get_state = _scsih_get_state,
+};
+
+static struct pci_error_handlers _scsih_err_handler = {
+ .error_detected = _scsih_pci_error_detected,
+ .mmio_enabled = _scsih_pci_mmio_enabled,
+ .slot_reset = _scsih_pci_slot_reset,
+ .resume = _scsih_pci_resume,
+};
+
+static struct pci_driver scsih_driver = {
+ .name = MPT3SAS_DRIVER_NAME,
+ .id_table = scsih_pci_table,
+ .probe = _scsih_probe,
+ .remove = _scsih_remove,
+ .shutdown = _scsih_shutdown,
+ .err_handler = &_scsih_err_handler,
+#ifdef CONFIG_PM
+ .suspend = _scsih_suspend,
+ .resume = _scsih_resume,
+#endif
+};
+
+
+/**
+ * _scsih_init - main entry point for this driver.
+ *
+ * Returns 0 success, anything else error.
+ */
+static int __init
+_scsih_init(void)
+{
+ int error;
+
+ mpt_ids = 0;
+
+ pr_info("%s version %s loaded\n", MPT3SAS_DRIVER_NAME,
+ MPT3SAS_DRIVER_VERSION);
+
+ mpt3sas_transport_template =
+ sas_attach_transport(&mpt3sas_transport_functions);
+ if (!mpt3sas_transport_template)
+ return -ENODEV;
+
+/* raid transport support */
+ mpt3sas_raid_template = raid_class_attach(&mpt3sas_raid_functions);
+ if (!mpt3sas_raid_template) {
+ sas_release_transport(mpt3sas_transport_template);
+ return -ENODEV;
+ }
+
+ mpt3sas_base_initialize_callback_handler();
+
+ /* queuecommand callback hander */
+ scsi_io_cb_idx = mpt3sas_base_register_callback_handler(_scsih_io_done);
+
+ /* task managment callback handler */
+ tm_cb_idx = mpt3sas_base_register_callback_handler(_scsih_tm_done);
+
+ /* base internal commands callback handler */
+ base_cb_idx = mpt3sas_base_register_callback_handler(mpt3sas_base_done);
+ port_enable_cb_idx = mpt3sas_base_register_callback_handler(
+ mpt3sas_port_enable_done);
+
+ /* transport internal commands callback handler */
+ transport_cb_idx = mpt3sas_base_register_callback_handler(
+ mpt3sas_transport_done);
+
+ /* scsih internal commands callback handler */
+ scsih_cb_idx = mpt3sas_base_register_callback_handler(_scsih_done);
+
+ /* configuration page API internal commands callback handler */
+ config_cb_idx = mpt3sas_base_register_callback_handler(
+ mpt3sas_config_done);
+
+ /* ctl module callback handler */
+ ctl_cb_idx = mpt3sas_base_register_callback_handler(mpt3sas_ctl_done);
+
+ tm_tr_cb_idx = mpt3sas_base_register_callback_handler(
+ _scsih_tm_tr_complete);
+
+ tm_tr_volume_cb_idx = mpt3sas_base_register_callback_handler(
+ _scsih_tm_volume_tr_complete);
+
+ tm_sas_control_cb_idx = mpt3sas_base_register_callback_handler(
+ _scsih_sas_control_complete);
+
+ mpt3sas_ctl_init();
+
+ error = pci_register_driver(&scsih_driver);
+ if (error) {
+ /* raid transport support */
+ raid_class_release(mpt3sas_raid_template);
+ sas_release_transport(mpt3sas_transport_template);
+ }
+
+ return error;
+}
+
+/**
+ * _scsih_exit - exit point for this driver (when it is a module).
+ *
+ * Returns 0 success, anything else error.
+ */
+static void __exit
+_scsih_exit(void)
+{
+ pr_info("mpt3sas version %s unloading\n",
+ MPT3SAS_DRIVER_VERSION);
+
+ mpt3sas_ctl_exit();
+
+ pci_unregister_driver(&scsih_driver);
+
+
+ mpt3sas_base_release_callback_handler(scsi_io_cb_idx);
+ mpt3sas_base_release_callback_handler(tm_cb_idx);
+ mpt3sas_base_release_callback_handler(base_cb_idx);
+ mpt3sas_base_release_callback_handler(port_enable_cb_idx);
+ mpt3sas_base_release_callback_handler(transport_cb_idx);
+ mpt3sas_base_release_callback_handler(scsih_cb_idx);
+ mpt3sas_base_release_callback_handler(config_cb_idx);
+ mpt3sas_base_release_callback_handler(ctl_cb_idx);
+
+ mpt3sas_base_release_callback_handler(tm_tr_cb_idx);
+ mpt3sas_base_release_callback_handler(tm_tr_volume_cb_idx);
+ mpt3sas_base_release_callback_handler(tm_sas_control_cb_idx);
+
+/* raid transport support */
+ raid_class_release(mpt3sas_raid_template);
+ sas_release_transport(mpt3sas_transport_template);
+}
+
+module_init(_scsih_init);
+module_exit(_scsih_exit);
diff --git a/drivers/scsi/mpt3sas/mpt3sas_transport.c b/drivers/scsi/mpt3sas/mpt3sas_transport.c
new file mode 100644
index 000000000..efb98afc4
--- /dev/null
+++ b/drivers/scsi/mpt3sas/mpt3sas_transport.c
@@ -0,0 +1,2130 @@
+/*
+ * SAS Transport Layer for MPT (Message Passing Technology) based controllers
+ *
+ * This code is based on drivers/scsi/mpt3sas/mpt3sas_transport.c
+ * Copyright (C) 2012-2014 LSI Corporation
+ * Copyright (C) 2013-2014 Avago Technologies
+ * (mailto: MPT-FusionLinux.pdl@avagotech.com)
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * NO WARRANTY
+ * THE PROGRAM IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OR
+ * CONDITIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED INCLUDING, WITHOUT
+ * LIMITATION, ANY WARRANTIES OR CONDITIONS OF TITLE, NON-INFRINGEMENT,
+ * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE. Each Recipient is
+ * solely responsible for determining the appropriateness of using and
+ * distributing the Program and assumes all risks associated with its
+ * exercise of rights under this Agreement, including but not limited to
+ * the risks and costs of program errors, damage to or loss of data,
+ * programs or equipment, and unavailability or interruption of operations.
+
+ * DISCLAIMER OF LIABILITY
+ * NEITHER RECIPIENT NOR ANY CONTRIBUTORS SHALL HAVE ANY LIABILITY FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING WITHOUT LIMITATION LOST PROFITS), HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR
+ * TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
+ * USE OR DISTRIBUTION OF THE PROGRAM OR THE EXERCISE OF ANY RIGHTS GRANTED
+ * HEREUNDER, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGES
+
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301,
+ * USA.
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/errno.h>
+#include <linux/sched.h>
+#include <linux/workqueue.h>
+#include <linux/delay.h>
+#include <linux/pci.h>
+
+#include <scsi/scsi.h>
+#include <scsi/scsi_cmnd.h>
+#include <scsi/scsi_device.h>
+#include <scsi/scsi_host.h>
+#include <scsi/scsi_transport_sas.h>
+#include <scsi/scsi_dbg.h>
+
+#include "mpt3sas_base.h"
+
+/**
+ * _transport_sas_node_find_by_sas_address - sas node search
+ * @ioc: per adapter object
+ * @sas_address: sas address of expander or sas host
+ * Context: Calling function should acquire ioc->sas_node_lock.
+ *
+ * Search for either hba phys or expander device based on handle, then returns
+ * the sas_node object.
+ */
+static struct _sas_node *
+_transport_sas_node_find_by_sas_address(struct MPT3SAS_ADAPTER *ioc,
+ u64 sas_address)
+{
+ if (ioc->sas_hba.sas_address == sas_address)
+ return &ioc->sas_hba;
+ else
+ return mpt3sas_scsih_expander_find_by_sas_address(ioc,
+ sas_address);
+}
+
+/**
+ * _transport_convert_phy_link_rate -
+ * @link_rate: link rate returned from mpt firmware
+ *
+ * Convert link_rate from mpi fusion into sas_transport form.
+ */
+static enum sas_linkrate
+_transport_convert_phy_link_rate(u8 link_rate)
+{
+ enum sas_linkrate rc;
+
+ switch (link_rate) {
+ case MPI2_SAS_NEG_LINK_RATE_1_5:
+ rc = SAS_LINK_RATE_1_5_GBPS;
+ break;
+ case MPI2_SAS_NEG_LINK_RATE_3_0:
+ rc = SAS_LINK_RATE_3_0_GBPS;
+ break;
+ case MPI2_SAS_NEG_LINK_RATE_6_0:
+ rc = SAS_LINK_RATE_6_0_GBPS;
+ break;
+ case MPI25_SAS_NEG_LINK_RATE_12_0:
+ rc = SAS_LINK_RATE_12_0_GBPS;
+ break;
+ case MPI2_SAS_NEG_LINK_RATE_PHY_DISABLED:
+ rc = SAS_PHY_DISABLED;
+ break;
+ case MPI2_SAS_NEG_LINK_RATE_NEGOTIATION_FAILED:
+ rc = SAS_LINK_RATE_FAILED;
+ break;
+ case MPI2_SAS_NEG_LINK_RATE_PORT_SELECTOR:
+ rc = SAS_SATA_PORT_SELECTOR;
+ break;
+ case MPI2_SAS_NEG_LINK_RATE_SMP_RESET_IN_PROGRESS:
+ rc = SAS_PHY_RESET_IN_PROGRESS;
+ break;
+
+ default:
+ case MPI2_SAS_NEG_LINK_RATE_SATA_OOB_COMPLETE:
+ case MPI2_SAS_NEG_LINK_RATE_UNKNOWN_LINK_RATE:
+ rc = SAS_LINK_RATE_UNKNOWN;
+ break;
+ }
+ return rc;
+}
+
+/**
+ * _transport_set_identify - set identify for phys and end devices
+ * @ioc: per adapter object
+ * @handle: device handle
+ * @identify: sas identify info
+ *
+ * Populates sas identify info.
+ *
+ * Returns 0 for success, non-zero for failure.
+ */
+static int
+_transport_set_identify(struct MPT3SAS_ADAPTER *ioc, u16 handle,
+ struct sas_identify *identify)
+{
+ Mpi2SasDevicePage0_t sas_device_pg0;
+ Mpi2ConfigReply_t mpi_reply;
+ u32 device_info;
+ u32 ioc_status;
+
+ if (ioc->shost_recovery || ioc->pci_error_recovery) {
+ pr_info(MPT3SAS_FMT "%s: host reset in progress!\n",
+ __func__, ioc->name);
+ return -EFAULT;
+ }
+
+ if ((mpt3sas_config_get_sas_device_pg0(ioc, &mpi_reply, &sas_device_pg0,
+ MPI2_SAS_DEVICE_PGAD_FORM_HANDLE, handle))) {
+ pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n",
+ ioc->name, __FILE__, __LINE__, __func__);
+ return -ENXIO;
+ }
+
+ ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
+ MPI2_IOCSTATUS_MASK;
+ if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
+ pr_err(MPT3SAS_FMT
+ "handle(0x%04x), ioc_status(0x%04x)\nfailure at %s:%d/%s()!\n",
+ ioc->name, handle, ioc_status,
+ __FILE__, __LINE__, __func__);
+ return -EIO;
+ }
+
+ memset(identify, 0, sizeof(struct sas_identify));
+ device_info = le32_to_cpu(sas_device_pg0.DeviceInfo);
+
+ /* sas_address */
+ identify->sas_address = le64_to_cpu(sas_device_pg0.SASAddress);
+
+ /* phy number of the parent device this device is linked to */
+ identify->phy_identifier = sas_device_pg0.PhyNum;
+
+ /* device_type */
+ switch (device_info & MPI2_SAS_DEVICE_INFO_MASK_DEVICE_TYPE) {
+ case MPI2_SAS_DEVICE_INFO_NO_DEVICE:
+ identify->device_type = SAS_PHY_UNUSED;
+ break;
+ case MPI2_SAS_DEVICE_INFO_END_DEVICE:
+ identify->device_type = SAS_END_DEVICE;
+ break;
+ case MPI2_SAS_DEVICE_INFO_EDGE_EXPANDER:
+ identify->device_type = SAS_EDGE_EXPANDER_DEVICE;
+ break;
+ case MPI2_SAS_DEVICE_INFO_FANOUT_EXPANDER:
+ identify->device_type = SAS_FANOUT_EXPANDER_DEVICE;
+ break;
+ }
+
+ /* initiator_port_protocols */
+ if (device_info & MPI2_SAS_DEVICE_INFO_SSP_INITIATOR)
+ identify->initiator_port_protocols |= SAS_PROTOCOL_SSP;
+ if (device_info & MPI2_SAS_DEVICE_INFO_STP_INITIATOR)
+ identify->initiator_port_protocols |= SAS_PROTOCOL_STP;
+ if (device_info & MPI2_SAS_DEVICE_INFO_SMP_INITIATOR)
+ identify->initiator_port_protocols |= SAS_PROTOCOL_SMP;
+ if (device_info & MPI2_SAS_DEVICE_INFO_SATA_HOST)
+ identify->initiator_port_protocols |= SAS_PROTOCOL_SATA;
+
+ /* target_port_protocols */
+ if (device_info & MPI2_SAS_DEVICE_INFO_SSP_TARGET)
+ identify->target_port_protocols |= SAS_PROTOCOL_SSP;
+ if (device_info & MPI2_SAS_DEVICE_INFO_STP_TARGET)
+ identify->target_port_protocols |= SAS_PROTOCOL_STP;
+ if (device_info & MPI2_SAS_DEVICE_INFO_SMP_TARGET)
+ identify->target_port_protocols |= SAS_PROTOCOL_SMP;
+ if (device_info & MPI2_SAS_DEVICE_INFO_SATA_DEVICE)
+ identify->target_port_protocols |= SAS_PROTOCOL_SATA;
+
+ return 0;
+}
+
+/**
+ * mpt3sas_transport_done - internal transport layer callback handler.
+ * @ioc: per adapter object
+ * @smid: system request message index
+ * @msix_index: MSIX table index supplied by the OS
+ * @reply: reply message frame(lower 32bit addr)
+ *
+ * Callback handler when sending internal generated transport cmds.
+ * The callback index passed is `ioc->transport_cb_idx`
+ *
+ * Return 1 meaning mf should be freed from _base_interrupt
+ * 0 means the mf is freed from this function.
+ */
+u8
+mpt3sas_transport_done(struct MPT3SAS_ADAPTER *ioc, u16 smid, u8 msix_index,
+ u32 reply)
+{
+ MPI2DefaultReply_t *mpi_reply;
+
+ mpi_reply = mpt3sas_base_get_reply_virt_addr(ioc, reply);
+ if (ioc->transport_cmds.status == MPT3_CMD_NOT_USED)
+ return 1;
+ if (ioc->transport_cmds.smid != smid)
+ return 1;
+ ioc->transport_cmds.status |= MPT3_CMD_COMPLETE;
+ if (mpi_reply) {
+ memcpy(ioc->transport_cmds.reply, mpi_reply,
+ mpi_reply->MsgLength*4);
+ ioc->transport_cmds.status |= MPT3_CMD_REPLY_VALID;
+ }
+ ioc->transport_cmds.status &= ~MPT3_CMD_PENDING;
+ complete(&ioc->transport_cmds.done);
+ return 1;
+}
+
+/* report manufacture request structure */
+struct rep_manu_request {
+ u8 smp_frame_type;
+ u8 function;
+ u8 reserved;
+ u8 request_length;
+};
+
+/* report manufacture reply structure */
+struct rep_manu_reply {
+ u8 smp_frame_type; /* 0x41 */
+ u8 function; /* 0x01 */
+ u8 function_result;
+ u8 response_length;
+ u16 expander_change_count;
+ u8 reserved0[2];
+ u8 sas_format;
+ u8 reserved2[3];
+ u8 vendor_id[SAS_EXPANDER_VENDOR_ID_LEN];
+ u8 product_id[SAS_EXPANDER_PRODUCT_ID_LEN];
+ u8 product_rev[SAS_EXPANDER_PRODUCT_REV_LEN];
+ u8 component_vendor_id[SAS_EXPANDER_COMPONENT_VENDOR_ID_LEN];
+ u16 component_id;
+ u8 component_revision_id;
+ u8 reserved3;
+ u8 vendor_specific[8];
+};
+
+/**
+ * transport_expander_report_manufacture - obtain SMP report_manufacture
+ * @ioc: per adapter object
+ * @sas_address: expander sas address
+ * @edev: the sas_expander_device object
+ *
+ * Fills in the sas_expander_device object when SMP port is created.
+ *
+ * Returns 0 for success, non-zero for failure.
+ */
+static int
+_transport_expander_report_manufacture(struct MPT3SAS_ADAPTER *ioc,
+ u64 sas_address, struct sas_expander_device *edev)
+{
+ Mpi2SmpPassthroughRequest_t *mpi_request;
+ Mpi2SmpPassthroughReply_t *mpi_reply;
+ struct rep_manu_reply *manufacture_reply;
+ struct rep_manu_request *manufacture_request;
+ int rc;
+ u16 smid;
+ u32 ioc_state;
+ unsigned long timeleft;
+ void *psge;
+ u8 issue_reset = 0;
+ void *data_out = NULL;
+ dma_addr_t data_out_dma;
+ dma_addr_t data_in_dma;
+ size_t data_in_sz;
+ size_t data_out_sz;
+ u16 wait_state_count;
+
+ if (ioc->shost_recovery || ioc->pci_error_recovery) {
+ pr_info(MPT3SAS_FMT "%s: host reset in progress!\n",
+ __func__, ioc->name);
+ return -EFAULT;
+ }
+
+ mutex_lock(&ioc->transport_cmds.mutex);
+
+ if (ioc->transport_cmds.status != MPT3_CMD_NOT_USED) {
+ pr_err(MPT3SAS_FMT "%s: transport_cmds in use\n",
+ ioc->name, __func__);
+ rc = -EAGAIN;
+ goto out;
+ }
+ ioc->transport_cmds.status = MPT3_CMD_PENDING;
+
+ wait_state_count = 0;
+ ioc_state = mpt3sas_base_get_iocstate(ioc, 1);
+ while (ioc_state != MPI2_IOC_STATE_OPERATIONAL) {
+ if (wait_state_count++ == 10) {
+ pr_err(MPT3SAS_FMT
+ "%s: failed due to ioc not operational\n",
+ ioc->name, __func__);
+ rc = -EFAULT;
+ goto out;
+ }
+ ssleep(1);
+ ioc_state = mpt3sas_base_get_iocstate(ioc, 1);
+ pr_info(MPT3SAS_FMT
+ "%s: waiting for operational state(count=%d)\n",
+ ioc->name, __func__, wait_state_count);
+ }
+ if (wait_state_count)
+ pr_info(MPT3SAS_FMT "%s: ioc is operational\n",
+ ioc->name, __func__);
+
+ smid = mpt3sas_base_get_smid(ioc, ioc->transport_cb_idx);
+ if (!smid) {
+ pr_err(MPT3SAS_FMT "%s: failed obtaining a smid\n",
+ ioc->name, __func__);
+ rc = -EAGAIN;
+ goto out;
+ }
+
+ rc = 0;
+ mpi_request = mpt3sas_base_get_msg_frame(ioc, smid);
+ ioc->transport_cmds.smid = smid;
+
+ data_out_sz = sizeof(struct rep_manu_request);
+ data_in_sz = sizeof(struct rep_manu_reply);
+ data_out = pci_alloc_consistent(ioc->pdev, data_out_sz + data_in_sz,
+ &data_out_dma);
+
+ if (!data_out) {
+ pr_err("failure at %s:%d/%s()!\n", __FILE__,
+ __LINE__, __func__);
+ rc = -ENOMEM;
+ mpt3sas_base_free_smid(ioc, smid);
+ goto out;
+ }
+
+ data_in_dma = data_out_dma + sizeof(struct rep_manu_request);
+
+ manufacture_request = data_out;
+ manufacture_request->smp_frame_type = 0x40;
+ manufacture_request->function = 1;
+ manufacture_request->reserved = 0;
+ manufacture_request->request_length = 0;
+
+ memset(mpi_request, 0, sizeof(Mpi2SmpPassthroughRequest_t));
+ mpi_request->Function = MPI2_FUNCTION_SMP_PASSTHROUGH;
+ mpi_request->PhysicalPort = 0xFF;
+ mpi_request->SASAddress = cpu_to_le64(sas_address);
+ mpi_request->RequestDataLength = cpu_to_le16(data_out_sz);
+ psge = &mpi_request->SGL;
+
+ ioc->build_sg(ioc, psge, data_out_dma, data_out_sz, data_in_dma,
+ data_in_sz);
+
+ dtransportprintk(ioc, pr_info(MPT3SAS_FMT
+ "report_manufacture - send to sas_addr(0x%016llx)\n",
+ ioc->name, (unsigned long long)sas_address));
+ init_completion(&ioc->transport_cmds.done);
+ mpt3sas_base_put_smid_default(ioc, smid);
+ timeleft = wait_for_completion_timeout(&ioc->transport_cmds.done,
+ 10*HZ);
+
+ if (!(ioc->transport_cmds.status & MPT3_CMD_COMPLETE)) {
+ pr_err(MPT3SAS_FMT "%s: timeout\n",
+ ioc->name, __func__);
+ _debug_dump_mf(mpi_request,
+ sizeof(Mpi2SmpPassthroughRequest_t)/4);
+ if (!(ioc->transport_cmds.status & MPT3_CMD_RESET))
+ issue_reset = 1;
+ goto issue_host_reset;
+ }
+
+ dtransportprintk(ioc, pr_info(MPT3SAS_FMT
+ "report_manufacture - complete\n", ioc->name));
+
+ if (ioc->transport_cmds.status & MPT3_CMD_REPLY_VALID) {
+ u8 *tmp;
+
+ mpi_reply = ioc->transport_cmds.reply;
+
+ dtransportprintk(ioc, pr_info(MPT3SAS_FMT
+ "report_manufacture - reply data transfer size(%d)\n",
+ ioc->name, le16_to_cpu(mpi_reply->ResponseDataLength)));
+
+ if (le16_to_cpu(mpi_reply->ResponseDataLength) !=
+ sizeof(struct rep_manu_reply))
+ goto out;
+
+ manufacture_reply = data_out + sizeof(struct rep_manu_request);
+ strncpy(edev->vendor_id, manufacture_reply->vendor_id,
+ SAS_EXPANDER_VENDOR_ID_LEN);
+ strncpy(edev->product_id, manufacture_reply->product_id,
+ SAS_EXPANDER_PRODUCT_ID_LEN);
+ strncpy(edev->product_rev, manufacture_reply->product_rev,
+ SAS_EXPANDER_PRODUCT_REV_LEN);
+ edev->level = manufacture_reply->sas_format & 1;
+ if (edev->level) {
+ strncpy(edev->component_vendor_id,
+ manufacture_reply->component_vendor_id,
+ SAS_EXPANDER_COMPONENT_VENDOR_ID_LEN);
+ tmp = (u8 *)&manufacture_reply->component_id;
+ edev->component_id = tmp[0] << 8 | tmp[1];
+ edev->component_revision_id =
+ manufacture_reply->component_revision_id;
+ }
+ } else
+ dtransportprintk(ioc, pr_info(MPT3SAS_FMT
+ "report_manufacture - no reply\n", ioc->name));
+
+ issue_host_reset:
+ if (issue_reset)
+ mpt3sas_base_hard_reset_handler(ioc, CAN_SLEEP,
+ FORCE_BIG_HAMMER);
+ out:
+ ioc->transport_cmds.status = MPT3_CMD_NOT_USED;
+ if (data_out)
+ pci_free_consistent(ioc->pdev, data_out_sz + data_in_sz,
+ data_out, data_out_dma);
+
+ mutex_unlock(&ioc->transport_cmds.mutex);
+ return rc;
+}
+
+
+/**
+ * _transport_delete_port - helper function to removing a port
+ * @ioc: per adapter object
+ * @mpt3sas_port: mpt3sas per port object
+ *
+ * Returns nothing.
+ */
+static void
+_transport_delete_port(struct MPT3SAS_ADAPTER *ioc,
+ struct _sas_port *mpt3sas_port)
+{
+ u64 sas_address = mpt3sas_port->remote_identify.sas_address;
+ enum sas_device_type device_type =
+ mpt3sas_port->remote_identify.device_type;
+
+ dev_printk(KERN_INFO, &mpt3sas_port->port->dev,
+ "remove: sas_addr(0x%016llx)\n",
+ (unsigned long long) sas_address);
+
+ ioc->logging_level |= MPT_DEBUG_TRANSPORT;
+ if (device_type == SAS_END_DEVICE)
+ mpt3sas_device_remove_by_sas_address(ioc, sas_address);
+ else if (device_type == SAS_EDGE_EXPANDER_DEVICE ||
+ device_type == SAS_FANOUT_EXPANDER_DEVICE)
+ mpt3sas_expander_remove(ioc, sas_address);
+ ioc->logging_level &= ~MPT_DEBUG_TRANSPORT;
+}
+
+/**
+ * _transport_delete_phy - helper function to removing single phy from port
+ * @ioc: per adapter object
+ * @mpt3sas_port: mpt3sas per port object
+ * @mpt3sas_phy: mpt3sas per phy object
+ *
+ * Returns nothing.
+ */
+static void
+_transport_delete_phy(struct MPT3SAS_ADAPTER *ioc,
+ struct _sas_port *mpt3sas_port, struct _sas_phy *mpt3sas_phy)
+{
+ u64 sas_address = mpt3sas_port->remote_identify.sas_address;
+
+ dev_printk(KERN_INFO, &mpt3sas_phy->phy->dev,
+ "remove: sas_addr(0x%016llx), phy(%d)\n",
+ (unsigned long long) sas_address, mpt3sas_phy->phy_id);
+
+ list_del(&mpt3sas_phy->port_siblings);
+ mpt3sas_port->num_phys--;
+ sas_port_delete_phy(mpt3sas_port->port, mpt3sas_phy->phy);
+ mpt3sas_phy->phy_belongs_to_port = 0;
+}
+
+/**
+ * _transport_add_phy - helper function to adding single phy to port
+ * @ioc: per adapter object
+ * @mpt3sas_port: mpt3sas per port object
+ * @mpt3sas_phy: mpt3sas per phy object
+ *
+ * Returns nothing.
+ */
+static void
+_transport_add_phy(struct MPT3SAS_ADAPTER *ioc, struct _sas_port *mpt3sas_port,
+ struct _sas_phy *mpt3sas_phy)
+{
+ u64 sas_address = mpt3sas_port->remote_identify.sas_address;
+
+ dev_printk(KERN_INFO, &mpt3sas_phy->phy->dev,
+ "add: sas_addr(0x%016llx), phy(%d)\n", (unsigned long long)
+ sas_address, mpt3sas_phy->phy_id);
+
+ list_add_tail(&mpt3sas_phy->port_siblings, &mpt3sas_port->phy_list);
+ mpt3sas_port->num_phys++;
+ sas_port_add_phy(mpt3sas_port->port, mpt3sas_phy->phy);
+ mpt3sas_phy->phy_belongs_to_port = 1;
+}
+
+/**
+ * _transport_add_phy_to_an_existing_port - adding new phy to existing port
+ * @ioc: per adapter object
+ * @sas_node: sas node object (either expander or sas host)
+ * @mpt3sas_phy: mpt3sas per phy object
+ * @sas_address: sas address of device/expander were phy needs to be added to
+ *
+ * Returns nothing.
+ */
+static void
+_transport_add_phy_to_an_existing_port(struct MPT3SAS_ADAPTER *ioc,
+ struct _sas_node *sas_node, struct _sas_phy *mpt3sas_phy,
+ u64 sas_address)
+{
+ struct _sas_port *mpt3sas_port;
+ struct _sas_phy *phy_srch;
+
+ if (mpt3sas_phy->phy_belongs_to_port == 1)
+ return;
+
+ list_for_each_entry(mpt3sas_port, &sas_node->sas_port_list,
+ port_list) {
+ if (mpt3sas_port->remote_identify.sas_address !=
+ sas_address)
+ continue;
+ list_for_each_entry(phy_srch, &mpt3sas_port->phy_list,
+ port_siblings) {
+ if (phy_srch == mpt3sas_phy)
+ return;
+ }
+ _transport_add_phy(ioc, mpt3sas_port, mpt3sas_phy);
+ return;
+ }
+
+}
+
+/**
+ * _transport_del_phy_from_an_existing_port - delete phy from existing port
+ * @ioc: per adapter object
+ * @sas_node: sas node object (either expander or sas host)
+ * @mpt3sas_phy: mpt3sas per phy object
+ *
+ * Returns nothing.
+ */
+static void
+_transport_del_phy_from_an_existing_port(struct MPT3SAS_ADAPTER *ioc,
+ struct _sas_node *sas_node, struct _sas_phy *mpt3sas_phy)
+{
+ struct _sas_port *mpt3sas_port, *next;
+ struct _sas_phy *phy_srch;
+
+ if (mpt3sas_phy->phy_belongs_to_port == 0)
+ return;
+
+ list_for_each_entry_safe(mpt3sas_port, next, &sas_node->sas_port_list,
+ port_list) {
+ list_for_each_entry(phy_srch, &mpt3sas_port->phy_list,
+ port_siblings) {
+ if (phy_srch != mpt3sas_phy)
+ continue;
+
+ if (mpt3sas_port->num_phys == 1)
+ _transport_delete_port(ioc, mpt3sas_port);
+ else
+ _transport_delete_phy(ioc, mpt3sas_port,
+ mpt3sas_phy);
+ return;
+ }
+ }
+}
+
+/**
+ * _transport_sanity_check - sanity check when adding a new port
+ * @ioc: per adapter object
+ * @sas_node: sas node object (either expander or sas host)
+ * @sas_address: sas address of device being added
+ *
+ * See the explanation above from _transport_delete_duplicate_port
+ */
+static void
+_transport_sanity_check(struct MPT3SAS_ADAPTER *ioc, struct _sas_node *sas_node,
+ u64 sas_address)
+{
+ int i;
+
+ for (i = 0; i < sas_node->num_phys; i++) {
+ if (sas_node->phy[i].remote_identify.sas_address != sas_address)
+ continue;
+ if (sas_node->phy[i].phy_belongs_to_port == 1)
+ _transport_del_phy_from_an_existing_port(ioc, sas_node,
+ &sas_node->phy[i]);
+ }
+}
+
+/**
+ * mpt3sas_transport_port_add - insert port to the list
+ * @ioc: per adapter object
+ * @handle: handle of attached device
+ * @sas_address: sas address of parent expander or sas host
+ * Context: This function will acquire ioc->sas_node_lock.
+ *
+ * Adding new port object to the sas_node->sas_port_list.
+ *
+ * Returns mpt3sas_port.
+ */
+struct _sas_port *
+mpt3sas_transport_port_add(struct MPT3SAS_ADAPTER *ioc, u16 handle,
+ u64 sas_address)
+{
+ struct _sas_phy *mpt3sas_phy, *next;
+ struct _sas_port *mpt3sas_port;
+ unsigned long flags;
+ struct _sas_node *sas_node;
+ struct sas_rphy *rphy;
+ int i;
+ struct sas_port *port;
+
+ mpt3sas_port = kzalloc(sizeof(struct _sas_port),
+ GFP_KERNEL);
+ if (!mpt3sas_port) {
+ pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n",
+ ioc->name, __FILE__, __LINE__, __func__);
+ return NULL;
+ }
+
+ INIT_LIST_HEAD(&mpt3sas_port->port_list);
+ INIT_LIST_HEAD(&mpt3sas_port->phy_list);
+ spin_lock_irqsave(&ioc->sas_node_lock, flags);
+ sas_node = _transport_sas_node_find_by_sas_address(ioc, sas_address);
+ spin_unlock_irqrestore(&ioc->sas_node_lock, flags);
+
+ if (!sas_node) {
+ pr_err(MPT3SAS_FMT
+ "%s: Could not find parent sas_address(0x%016llx)!\n",
+ ioc->name, __func__, (unsigned long long)sas_address);
+ goto out_fail;
+ }
+
+ if ((_transport_set_identify(ioc, handle,
+ &mpt3sas_port->remote_identify))) {
+ pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n",
+ ioc->name, __FILE__, __LINE__, __func__);
+ goto out_fail;
+ }
+
+ if (mpt3sas_port->remote_identify.device_type == SAS_PHY_UNUSED) {
+ pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n",
+ ioc->name, __FILE__, __LINE__, __func__);
+ goto out_fail;
+ }
+
+ _transport_sanity_check(ioc, sas_node,
+ mpt3sas_port->remote_identify.sas_address);
+
+ for (i = 0; i < sas_node->num_phys; i++) {
+ if (sas_node->phy[i].remote_identify.sas_address !=
+ mpt3sas_port->remote_identify.sas_address)
+ continue;
+ list_add_tail(&sas_node->phy[i].port_siblings,
+ &mpt3sas_port->phy_list);
+ mpt3sas_port->num_phys++;
+ }
+
+ if (!mpt3sas_port->num_phys) {
+ pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n",
+ ioc->name, __FILE__, __LINE__, __func__);
+ goto out_fail;
+ }
+
+ port = sas_port_alloc_num(sas_node->parent_dev);
+ if ((sas_port_add(port))) {
+ pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n",
+ ioc->name, __FILE__, __LINE__, __func__);
+ goto out_fail;
+ }
+
+ list_for_each_entry(mpt3sas_phy, &mpt3sas_port->phy_list,
+ port_siblings) {
+ if ((ioc->logging_level & MPT_DEBUG_TRANSPORT))
+ dev_printk(KERN_INFO, &port->dev,
+ "add: handle(0x%04x), sas_addr(0x%016llx), phy(%d)\n",
+ handle, (unsigned long long)
+ mpt3sas_port->remote_identify.sas_address,
+ mpt3sas_phy->phy_id);
+ sas_port_add_phy(port, mpt3sas_phy->phy);
+ mpt3sas_phy->phy_belongs_to_port = 1;
+ }
+
+ mpt3sas_port->port = port;
+ if (mpt3sas_port->remote_identify.device_type == SAS_END_DEVICE)
+ rphy = sas_end_device_alloc(port);
+ else
+ rphy = sas_expander_alloc(port,
+ mpt3sas_port->remote_identify.device_type);
+
+ rphy->identify = mpt3sas_port->remote_identify;
+ if ((sas_rphy_add(rphy))) {
+ pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n",
+ ioc->name, __FILE__, __LINE__, __func__);
+ }
+ if ((ioc->logging_level & MPT_DEBUG_TRANSPORT))
+ dev_printk(KERN_INFO, &rphy->dev,
+ "add: handle(0x%04x), sas_addr(0x%016llx)\n",
+ handle, (unsigned long long)
+ mpt3sas_port->remote_identify.sas_address);
+ mpt3sas_port->rphy = rphy;
+ spin_lock_irqsave(&ioc->sas_node_lock, flags);
+ list_add_tail(&mpt3sas_port->port_list, &sas_node->sas_port_list);
+ spin_unlock_irqrestore(&ioc->sas_node_lock, flags);
+
+ /* fill in report manufacture */
+ if (mpt3sas_port->remote_identify.device_type ==
+ MPI2_SAS_DEVICE_INFO_EDGE_EXPANDER ||
+ mpt3sas_port->remote_identify.device_type ==
+ MPI2_SAS_DEVICE_INFO_FANOUT_EXPANDER)
+ _transport_expander_report_manufacture(ioc,
+ mpt3sas_port->remote_identify.sas_address,
+ rphy_to_expander_device(rphy));
+ return mpt3sas_port;
+
+ out_fail:
+ list_for_each_entry_safe(mpt3sas_phy, next, &mpt3sas_port->phy_list,
+ port_siblings)
+ list_del(&mpt3sas_phy->port_siblings);
+ kfree(mpt3sas_port);
+ return NULL;
+}
+
+/**
+ * mpt3sas_transport_port_remove - remove port from the list
+ * @ioc: per adapter object
+ * @sas_address: sas address of attached device
+ * @sas_address_parent: sas address of parent expander or sas host
+ * Context: This function will acquire ioc->sas_node_lock.
+ *
+ * Removing object and freeing associated memory from the
+ * ioc->sas_port_list.
+ *
+ * Return nothing.
+ */
+void
+mpt3sas_transport_port_remove(struct MPT3SAS_ADAPTER *ioc, u64 sas_address,
+ u64 sas_address_parent)
+{
+ int i;
+ unsigned long flags;
+ struct _sas_port *mpt3sas_port, *next;
+ struct _sas_node *sas_node;
+ u8 found = 0;
+ struct _sas_phy *mpt3sas_phy, *next_phy;
+
+ spin_lock_irqsave(&ioc->sas_node_lock, flags);
+ sas_node = _transport_sas_node_find_by_sas_address(ioc,
+ sas_address_parent);
+ if (!sas_node) {
+ spin_unlock_irqrestore(&ioc->sas_node_lock, flags);
+ return;
+ }
+ list_for_each_entry_safe(mpt3sas_port, next, &sas_node->sas_port_list,
+ port_list) {
+ if (mpt3sas_port->remote_identify.sas_address != sas_address)
+ continue;
+ found = 1;
+ list_del(&mpt3sas_port->port_list);
+ goto out;
+ }
+ out:
+ if (!found) {
+ spin_unlock_irqrestore(&ioc->sas_node_lock, flags);
+ return;
+ }
+
+ for (i = 0; i < sas_node->num_phys; i++) {
+ if (sas_node->phy[i].remote_identify.sas_address == sas_address)
+ memset(&sas_node->phy[i].remote_identify, 0 ,
+ sizeof(struct sas_identify));
+ }
+
+ spin_unlock_irqrestore(&ioc->sas_node_lock, flags);
+
+ list_for_each_entry_safe(mpt3sas_phy, next_phy,
+ &mpt3sas_port->phy_list, port_siblings) {
+ if ((ioc->logging_level & MPT_DEBUG_TRANSPORT))
+ dev_printk(KERN_INFO, &mpt3sas_port->port->dev,
+ "remove: sas_addr(0x%016llx), phy(%d)\n",
+ (unsigned long long)
+ mpt3sas_port->remote_identify.sas_address,
+ mpt3sas_phy->phy_id);
+ mpt3sas_phy->phy_belongs_to_port = 0;
+ sas_port_delete_phy(mpt3sas_port->port, mpt3sas_phy->phy);
+ list_del(&mpt3sas_phy->port_siblings);
+ }
+ sas_port_delete(mpt3sas_port->port);
+ kfree(mpt3sas_port);
+}
+
+/**
+ * mpt3sas_transport_add_host_phy - report sas_host phy to transport
+ * @ioc: per adapter object
+ * @mpt3sas_phy: mpt3sas per phy object
+ * @phy_pg0: sas phy page 0
+ * @parent_dev: parent device class object
+ *
+ * Returns 0 for success, non-zero for failure.
+ */
+int
+mpt3sas_transport_add_host_phy(struct MPT3SAS_ADAPTER *ioc, struct _sas_phy
+ *mpt3sas_phy, Mpi2SasPhyPage0_t phy_pg0, struct device *parent_dev)
+{
+ struct sas_phy *phy;
+ int phy_index = mpt3sas_phy->phy_id;
+
+
+ INIT_LIST_HEAD(&mpt3sas_phy->port_siblings);
+ phy = sas_phy_alloc(parent_dev, phy_index);
+ if (!phy) {
+ pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n",
+ ioc->name, __FILE__, __LINE__, __func__);
+ return -1;
+ }
+ if ((_transport_set_identify(ioc, mpt3sas_phy->handle,
+ &mpt3sas_phy->identify))) {
+ pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n",
+ ioc->name, __FILE__, __LINE__, __func__);
+ sas_phy_free(phy);
+ return -1;
+ }
+ phy->identify = mpt3sas_phy->identify;
+ mpt3sas_phy->attached_handle = le16_to_cpu(phy_pg0.AttachedDevHandle);
+ if (mpt3sas_phy->attached_handle)
+ _transport_set_identify(ioc, mpt3sas_phy->attached_handle,
+ &mpt3sas_phy->remote_identify);
+ phy->identify.phy_identifier = mpt3sas_phy->phy_id;
+ phy->negotiated_linkrate = _transport_convert_phy_link_rate(
+ phy_pg0.NegotiatedLinkRate & MPI2_SAS_NEG_LINK_RATE_MASK_PHYSICAL);
+ phy->minimum_linkrate_hw = _transport_convert_phy_link_rate(
+ phy_pg0.HwLinkRate & MPI2_SAS_HWRATE_MIN_RATE_MASK);
+ phy->maximum_linkrate_hw = _transport_convert_phy_link_rate(
+ phy_pg0.HwLinkRate >> 4);
+ phy->minimum_linkrate = _transport_convert_phy_link_rate(
+ phy_pg0.ProgrammedLinkRate & MPI2_SAS_PRATE_MIN_RATE_MASK);
+ phy->maximum_linkrate = _transport_convert_phy_link_rate(
+ phy_pg0.ProgrammedLinkRate >> 4);
+
+ if ((sas_phy_add(phy))) {
+ pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n",
+ ioc->name, __FILE__, __LINE__, __func__);
+ sas_phy_free(phy);
+ return -1;
+ }
+ if ((ioc->logging_level & MPT_DEBUG_TRANSPORT))
+ dev_printk(KERN_INFO, &phy->dev,
+ "add: handle(0x%04x), sas_addr(0x%016llx)\n"
+ "\tattached_handle(0x%04x), sas_addr(0x%016llx)\n",
+ mpt3sas_phy->handle, (unsigned long long)
+ mpt3sas_phy->identify.sas_address,
+ mpt3sas_phy->attached_handle,
+ (unsigned long long)
+ mpt3sas_phy->remote_identify.sas_address);
+ mpt3sas_phy->phy = phy;
+ return 0;
+}
+
+
+/**
+ * mpt3sas_transport_add_expander_phy - report expander phy to transport
+ * @ioc: per adapter object
+ * @mpt3sas_phy: mpt3sas per phy object
+ * @expander_pg1: expander page 1
+ * @parent_dev: parent device class object
+ *
+ * Returns 0 for success, non-zero for failure.
+ */
+int
+mpt3sas_transport_add_expander_phy(struct MPT3SAS_ADAPTER *ioc, struct _sas_phy
+ *mpt3sas_phy, Mpi2ExpanderPage1_t expander_pg1,
+ struct device *parent_dev)
+{
+ struct sas_phy *phy;
+ int phy_index = mpt3sas_phy->phy_id;
+
+ INIT_LIST_HEAD(&mpt3sas_phy->port_siblings);
+ phy = sas_phy_alloc(parent_dev, phy_index);
+ if (!phy) {
+ pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n",
+ ioc->name, __FILE__, __LINE__, __func__);
+ return -1;
+ }
+ if ((_transport_set_identify(ioc, mpt3sas_phy->handle,
+ &mpt3sas_phy->identify))) {
+ pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n",
+ ioc->name, __FILE__, __LINE__, __func__);
+ sas_phy_free(phy);
+ return -1;
+ }
+ phy->identify = mpt3sas_phy->identify;
+ mpt3sas_phy->attached_handle =
+ le16_to_cpu(expander_pg1.AttachedDevHandle);
+ if (mpt3sas_phy->attached_handle)
+ _transport_set_identify(ioc, mpt3sas_phy->attached_handle,
+ &mpt3sas_phy->remote_identify);
+ phy->identify.phy_identifier = mpt3sas_phy->phy_id;
+ phy->negotiated_linkrate = _transport_convert_phy_link_rate(
+ expander_pg1.NegotiatedLinkRate &
+ MPI2_SAS_NEG_LINK_RATE_MASK_PHYSICAL);
+ phy->minimum_linkrate_hw = _transport_convert_phy_link_rate(
+ expander_pg1.HwLinkRate & MPI2_SAS_HWRATE_MIN_RATE_MASK);
+ phy->maximum_linkrate_hw = _transport_convert_phy_link_rate(
+ expander_pg1.HwLinkRate >> 4);
+ phy->minimum_linkrate = _transport_convert_phy_link_rate(
+ expander_pg1.ProgrammedLinkRate & MPI2_SAS_PRATE_MIN_RATE_MASK);
+ phy->maximum_linkrate = _transport_convert_phy_link_rate(
+ expander_pg1.ProgrammedLinkRate >> 4);
+
+ if ((sas_phy_add(phy))) {
+ pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n",
+ ioc->name, __FILE__, __LINE__, __func__);
+ sas_phy_free(phy);
+ return -1;
+ }
+ if ((ioc->logging_level & MPT_DEBUG_TRANSPORT))
+ dev_printk(KERN_INFO, &phy->dev,
+ "add: handle(0x%04x), sas_addr(0x%016llx)\n"
+ "\tattached_handle(0x%04x), sas_addr(0x%016llx)\n",
+ mpt3sas_phy->handle, (unsigned long long)
+ mpt3sas_phy->identify.sas_address,
+ mpt3sas_phy->attached_handle,
+ (unsigned long long)
+ mpt3sas_phy->remote_identify.sas_address);
+ mpt3sas_phy->phy = phy;
+ return 0;
+}
+
+/**
+ * mpt3sas_transport_update_links - refreshing phy link changes
+ * @ioc: per adapter object
+ * @sas_address: sas address of parent expander or sas host
+ * @handle: attached device handle
+ * @phy_numberv: phy number
+ * @link_rate: new link rate
+ *
+ * Returns nothing.
+ */
+void
+mpt3sas_transport_update_links(struct MPT3SAS_ADAPTER *ioc,
+ u64 sas_address, u16 handle, u8 phy_number, u8 link_rate)
+{
+ unsigned long flags;
+ struct _sas_node *sas_node;
+ struct _sas_phy *mpt3sas_phy;
+
+ if (ioc->shost_recovery || ioc->pci_error_recovery)
+ return;
+
+ spin_lock_irqsave(&ioc->sas_node_lock, flags);
+ sas_node = _transport_sas_node_find_by_sas_address(ioc, sas_address);
+ if (!sas_node) {
+ spin_unlock_irqrestore(&ioc->sas_node_lock, flags);
+ return;
+ }
+
+ mpt3sas_phy = &sas_node->phy[phy_number];
+ mpt3sas_phy->attached_handle = handle;
+ spin_unlock_irqrestore(&ioc->sas_node_lock, flags);
+ if (handle && (link_rate >= MPI2_SAS_NEG_LINK_RATE_1_5)) {
+ _transport_set_identify(ioc, handle,
+ &mpt3sas_phy->remote_identify);
+ _transport_add_phy_to_an_existing_port(ioc, sas_node,
+ mpt3sas_phy, mpt3sas_phy->remote_identify.sas_address);
+ } else
+ memset(&mpt3sas_phy->remote_identify, 0 , sizeof(struct
+ sas_identify));
+
+ if (mpt3sas_phy->phy)
+ mpt3sas_phy->phy->negotiated_linkrate =
+ _transport_convert_phy_link_rate(link_rate);
+
+ if ((ioc->logging_level & MPT_DEBUG_TRANSPORT))
+ dev_printk(KERN_INFO, &mpt3sas_phy->phy->dev,
+ "refresh: parent sas_addr(0x%016llx),\n"
+ "\tlink_rate(0x%02x), phy(%d)\n"
+ "\tattached_handle(0x%04x), sas_addr(0x%016llx)\n",
+ (unsigned long long)sas_address,
+ link_rate, phy_number, handle, (unsigned long long)
+ mpt3sas_phy->remote_identify.sas_address);
+}
+
+static inline void *
+phy_to_ioc(struct sas_phy *phy)
+{
+ struct Scsi_Host *shost = dev_to_shost(phy->dev.parent);
+ return shost_priv(shost);
+}
+
+static inline void *
+rphy_to_ioc(struct sas_rphy *rphy)
+{
+ struct Scsi_Host *shost = dev_to_shost(rphy->dev.parent->parent);
+ return shost_priv(shost);
+}
+
+/* report phy error log structure */
+struct phy_error_log_request {
+ u8 smp_frame_type; /* 0x40 */
+ u8 function; /* 0x11 */
+ u8 allocated_response_length;
+ u8 request_length; /* 02 */
+ u8 reserved_1[5];
+ u8 phy_identifier;
+ u8 reserved_2[2];
+};
+
+/* report phy error log reply structure */
+struct phy_error_log_reply {
+ u8 smp_frame_type; /* 0x41 */
+ u8 function; /* 0x11 */
+ u8 function_result;
+ u8 response_length;
+ __be16 expander_change_count;
+ u8 reserved_1[3];
+ u8 phy_identifier;
+ u8 reserved_2[2];
+ __be32 invalid_dword;
+ __be32 running_disparity_error;
+ __be32 loss_of_dword_sync;
+ __be32 phy_reset_problem;
+};
+
+/**
+ * _transport_get_expander_phy_error_log - return expander counters
+ * @ioc: per adapter object
+ * @phy: The sas phy object
+ *
+ * Returns 0 for success, non-zero for failure.
+ *
+ */
+static int
+_transport_get_expander_phy_error_log(struct MPT3SAS_ADAPTER *ioc,
+ struct sas_phy *phy)
+{
+ Mpi2SmpPassthroughRequest_t *mpi_request;
+ Mpi2SmpPassthroughReply_t *mpi_reply;
+ struct phy_error_log_request *phy_error_log_request;
+ struct phy_error_log_reply *phy_error_log_reply;
+ int rc;
+ u16 smid;
+ u32 ioc_state;
+ unsigned long timeleft;
+ void *psge;
+ u8 issue_reset = 0;
+ void *data_out = NULL;
+ dma_addr_t data_out_dma;
+ u32 sz;
+ u16 wait_state_count;
+
+ if (ioc->shost_recovery || ioc->pci_error_recovery) {
+ pr_info(MPT3SAS_FMT "%s: host reset in progress!\n",
+ __func__, ioc->name);
+ return -EFAULT;
+ }
+
+ mutex_lock(&ioc->transport_cmds.mutex);
+
+ if (ioc->transport_cmds.status != MPT3_CMD_NOT_USED) {
+ pr_err(MPT3SAS_FMT "%s: transport_cmds in use\n",
+ ioc->name, __func__);
+ rc = -EAGAIN;
+ goto out;
+ }
+ ioc->transport_cmds.status = MPT3_CMD_PENDING;
+
+ wait_state_count = 0;
+ ioc_state = mpt3sas_base_get_iocstate(ioc, 1);
+ while (ioc_state != MPI2_IOC_STATE_OPERATIONAL) {
+ if (wait_state_count++ == 10) {
+ pr_err(MPT3SAS_FMT
+ "%s: failed due to ioc not operational\n",
+ ioc->name, __func__);
+ rc = -EFAULT;
+ goto out;
+ }
+ ssleep(1);
+ ioc_state = mpt3sas_base_get_iocstate(ioc, 1);
+ pr_info(MPT3SAS_FMT
+ "%s: waiting for operational state(count=%d)\n",
+ ioc->name, __func__, wait_state_count);
+ }
+ if (wait_state_count)
+ pr_info(MPT3SAS_FMT "%s: ioc is operational\n",
+ ioc->name, __func__);
+
+ smid = mpt3sas_base_get_smid(ioc, ioc->transport_cb_idx);
+ if (!smid) {
+ pr_err(MPT3SAS_FMT "%s: failed obtaining a smid\n",
+ ioc->name, __func__);
+ rc = -EAGAIN;
+ goto out;
+ }
+
+ mpi_request = mpt3sas_base_get_msg_frame(ioc, smid);
+ ioc->transport_cmds.smid = smid;
+
+ sz = sizeof(struct phy_error_log_request) +
+ sizeof(struct phy_error_log_reply);
+ data_out = pci_alloc_consistent(ioc->pdev, sz, &data_out_dma);
+ if (!data_out) {
+ pr_err("failure at %s:%d/%s()!\n", __FILE__,
+ __LINE__, __func__);
+ rc = -ENOMEM;
+ mpt3sas_base_free_smid(ioc, smid);
+ goto out;
+ }
+
+ rc = -EINVAL;
+ memset(data_out, 0, sz);
+ phy_error_log_request = data_out;
+ phy_error_log_request->smp_frame_type = 0x40;
+ phy_error_log_request->function = 0x11;
+ phy_error_log_request->request_length = 2;
+ phy_error_log_request->allocated_response_length = 0;
+ phy_error_log_request->phy_identifier = phy->number;
+
+ memset(mpi_request, 0, sizeof(Mpi2SmpPassthroughRequest_t));
+ mpi_request->Function = MPI2_FUNCTION_SMP_PASSTHROUGH;
+ mpi_request->PhysicalPort = 0xFF;
+ mpi_request->VF_ID = 0; /* TODO */
+ mpi_request->VP_ID = 0;
+ mpi_request->SASAddress = cpu_to_le64(phy->identify.sas_address);
+ mpi_request->RequestDataLength =
+ cpu_to_le16(sizeof(struct phy_error_log_request));
+ psge = &mpi_request->SGL;
+
+ ioc->build_sg(ioc, psge, data_out_dma,
+ sizeof(struct phy_error_log_request),
+ data_out_dma + sizeof(struct phy_error_log_request),
+ sizeof(struct phy_error_log_reply));
+
+ dtransportprintk(ioc, pr_info(MPT3SAS_FMT
+ "phy_error_log - send to sas_addr(0x%016llx), phy(%d)\n",
+ ioc->name, (unsigned long long)phy->identify.sas_address,
+ phy->number));
+ init_completion(&ioc->transport_cmds.done);
+ mpt3sas_base_put_smid_default(ioc, smid);
+ timeleft = wait_for_completion_timeout(&ioc->transport_cmds.done,
+ 10*HZ);
+
+ if (!(ioc->transport_cmds.status & MPT3_CMD_COMPLETE)) {
+ pr_err(MPT3SAS_FMT "%s: timeout\n",
+ ioc->name, __func__);
+ _debug_dump_mf(mpi_request,
+ sizeof(Mpi2SmpPassthroughRequest_t)/4);
+ if (!(ioc->transport_cmds.status & MPT3_CMD_RESET))
+ issue_reset = 1;
+ goto issue_host_reset;
+ }
+
+ dtransportprintk(ioc, pr_info(MPT3SAS_FMT
+ "phy_error_log - complete\n", ioc->name));
+
+ if (ioc->transport_cmds.status & MPT3_CMD_REPLY_VALID) {
+
+ mpi_reply = ioc->transport_cmds.reply;
+
+ dtransportprintk(ioc, pr_info(MPT3SAS_FMT
+ "phy_error_log - reply data transfer size(%d)\n",
+ ioc->name, le16_to_cpu(mpi_reply->ResponseDataLength)));
+
+ if (le16_to_cpu(mpi_reply->ResponseDataLength) !=
+ sizeof(struct phy_error_log_reply))
+ goto out;
+
+ phy_error_log_reply = data_out +
+ sizeof(struct phy_error_log_request);
+
+ dtransportprintk(ioc, pr_info(MPT3SAS_FMT
+ "phy_error_log - function_result(%d)\n",
+ ioc->name, phy_error_log_reply->function_result));
+
+ phy->invalid_dword_count =
+ be32_to_cpu(phy_error_log_reply->invalid_dword);
+ phy->running_disparity_error_count =
+ be32_to_cpu(phy_error_log_reply->running_disparity_error);
+ phy->loss_of_dword_sync_count =
+ be32_to_cpu(phy_error_log_reply->loss_of_dword_sync);
+ phy->phy_reset_problem_count =
+ be32_to_cpu(phy_error_log_reply->phy_reset_problem);
+ rc = 0;
+ } else
+ dtransportprintk(ioc, pr_info(MPT3SAS_FMT
+ "phy_error_log - no reply\n", ioc->name));
+
+ issue_host_reset:
+ if (issue_reset)
+ mpt3sas_base_hard_reset_handler(ioc, CAN_SLEEP,
+ FORCE_BIG_HAMMER);
+ out:
+ ioc->transport_cmds.status = MPT3_CMD_NOT_USED;
+ if (data_out)
+ pci_free_consistent(ioc->pdev, sz, data_out, data_out_dma);
+
+ mutex_unlock(&ioc->transport_cmds.mutex);
+ return rc;
+}
+
+/**
+ * _transport_get_linkerrors - return phy counters for both hba and expanders
+ * @phy: The sas phy object
+ *
+ * Returns 0 for success, non-zero for failure.
+ *
+ */
+static int
+_transport_get_linkerrors(struct sas_phy *phy)
+{
+ struct MPT3SAS_ADAPTER *ioc = phy_to_ioc(phy);
+ unsigned long flags;
+ Mpi2ConfigReply_t mpi_reply;
+ Mpi2SasPhyPage1_t phy_pg1;
+
+ spin_lock_irqsave(&ioc->sas_node_lock, flags);
+ if (_transport_sas_node_find_by_sas_address(ioc,
+ phy->identify.sas_address) == NULL) {
+ spin_unlock_irqrestore(&ioc->sas_node_lock, flags);
+ return -EINVAL;
+ }
+ spin_unlock_irqrestore(&ioc->sas_node_lock, flags);
+
+ if (phy->identify.sas_address != ioc->sas_hba.sas_address)
+ return _transport_get_expander_phy_error_log(ioc, phy);
+
+ /* get hba phy error logs */
+ if ((mpt3sas_config_get_phy_pg1(ioc, &mpi_reply, &phy_pg1,
+ phy->number))) {
+ pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n",
+ ioc->name, __FILE__, __LINE__, __func__);
+ return -ENXIO;
+ }
+
+ if (mpi_reply.IOCStatus || mpi_reply.IOCLogInfo)
+ pr_info(MPT3SAS_FMT
+ "phy(%d), ioc_status (0x%04x), loginfo(0x%08x)\n",
+ ioc->name, phy->number,
+ le16_to_cpu(mpi_reply.IOCStatus),
+ le32_to_cpu(mpi_reply.IOCLogInfo));
+
+ phy->invalid_dword_count = le32_to_cpu(phy_pg1.InvalidDwordCount);
+ phy->running_disparity_error_count =
+ le32_to_cpu(phy_pg1.RunningDisparityErrorCount);
+ phy->loss_of_dword_sync_count =
+ le32_to_cpu(phy_pg1.LossDwordSynchCount);
+ phy->phy_reset_problem_count =
+ le32_to_cpu(phy_pg1.PhyResetProblemCount);
+ return 0;
+}
+
+/**
+ * _transport_get_enclosure_identifier -
+ * @phy: The sas phy object
+ *
+ * Obtain the enclosure logical id for an expander.
+ * Returns 0 for success, non-zero for failure.
+ */
+static int
+_transport_get_enclosure_identifier(struct sas_rphy *rphy, u64 *identifier)
+{
+ struct MPT3SAS_ADAPTER *ioc = rphy_to_ioc(rphy);
+ struct _sas_device *sas_device;
+ unsigned long flags;
+ int rc;
+
+ spin_lock_irqsave(&ioc->sas_device_lock, flags);
+ sas_device = mpt3sas_scsih_sas_device_find_by_sas_address(ioc,
+ rphy->identify.sas_address);
+ if (sas_device) {
+ *identifier = sas_device->enclosure_logical_id;
+ rc = 0;
+ } else {
+ *identifier = 0;
+ rc = -ENXIO;
+ }
+ spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
+ return rc;
+}
+
+/**
+ * _transport_get_bay_identifier -
+ * @phy: The sas phy object
+ *
+ * Returns the slot id for a device that resides inside an enclosure.
+ */
+static int
+_transport_get_bay_identifier(struct sas_rphy *rphy)
+{
+ struct MPT3SAS_ADAPTER *ioc = rphy_to_ioc(rphy);
+ struct _sas_device *sas_device;
+ unsigned long flags;
+ int rc;
+
+ spin_lock_irqsave(&ioc->sas_device_lock, flags);
+ sas_device = mpt3sas_scsih_sas_device_find_by_sas_address(ioc,
+ rphy->identify.sas_address);
+ if (sas_device)
+ rc = sas_device->slot;
+ else
+ rc = -ENXIO;
+ spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
+ return rc;
+}
+
+/* phy control request structure */
+struct phy_control_request {
+ u8 smp_frame_type; /* 0x40 */
+ u8 function; /* 0x91 */
+ u8 allocated_response_length;
+ u8 request_length; /* 0x09 */
+ u16 expander_change_count;
+ u8 reserved_1[3];
+ u8 phy_identifier;
+ u8 phy_operation;
+ u8 reserved_2[13];
+ u64 attached_device_name;
+ u8 programmed_min_physical_link_rate;
+ u8 programmed_max_physical_link_rate;
+ u8 reserved_3[6];
+};
+
+/* phy control reply structure */
+struct phy_control_reply {
+ u8 smp_frame_type; /* 0x41 */
+ u8 function; /* 0x11 */
+ u8 function_result;
+ u8 response_length;
+};
+
+#define SMP_PHY_CONTROL_LINK_RESET (0x01)
+#define SMP_PHY_CONTROL_HARD_RESET (0x02)
+#define SMP_PHY_CONTROL_DISABLE (0x03)
+
+/**
+ * _transport_expander_phy_control - expander phy control
+ * @ioc: per adapter object
+ * @phy: The sas phy object
+ *
+ * Returns 0 for success, non-zero for failure.
+ *
+ */
+static int
+_transport_expander_phy_control(struct MPT3SAS_ADAPTER *ioc,
+ struct sas_phy *phy, u8 phy_operation)
+{
+ Mpi2SmpPassthroughRequest_t *mpi_request;
+ Mpi2SmpPassthroughReply_t *mpi_reply;
+ struct phy_control_request *phy_control_request;
+ struct phy_control_reply *phy_control_reply;
+ int rc;
+ u16 smid;
+ u32 ioc_state;
+ unsigned long timeleft;
+ void *psge;
+ u32 sgl_flags;
+ u8 issue_reset = 0;
+ void *data_out = NULL;
+ dma_addr_t data_out_dma;
+ u32 sz;
+ u16 wait_state_count;
+
+ if (ioc->shost_recovery || ioc->pci_error_recovery) {
+ pr_info(MPT3SAS_FMT "%s: host reset in progress!\n",
+ __func__, ioc->name);
+ return -EFAULT;
+ }
+
+ mutex_lock(&ioc->transport_cmds.mutex);
+
+ if (ioc->transport_cmds.status != MPT3_CMD_NOT_USED) {
+ pr_err(MPT3SAS_FMT "%s: transport_cmds in use\n",
+ ioc->name, __func__);
+ rc = -EAGAIN;
+ goto out;
+ }
+ ioc->transport_cmds.status = MPT3_CMD_PENDING;
+
+ wait_state_count = 0;
+ ioc_state = mpt3sas_base_get_iocstate(ioc, 1);
+ while (ioc_state != MPI2_IOC_STATE_OPERATIONAL) {
+ if (wait_state_count++ == 10) {
+ pr_err(MPT3SAS_FMT
+ "%s: failed due to ioc not operational\n",
+ ioc->name, __func__);
+ rc = -EFAULT;
+ goto out;
+ }
+ ssleep(1);
+ ioc_state = mpt3sas_base_get_iocstate(ioc, 1);
+ pr_info(MPT3SAS_FMT
+ "%s: waiting for operational state(count=%d)\n",
+ ioc->name, __func__, wait_state_count);
+ }
+ if (wait_state_count)
+ pr_info(MPT3SAS_FMT "%s: ioc is operational\n",
+ ioc->name, __func__);
+
+ smid = mpt3sas_base_get_smid(ioc, ioc->transport_cb_idx);
+ if (!smid) {
+ pr_err(MPT3SAS_FMT "%s: failed obtaining a smid\n",
+ ioc->name, __func__);
+ rc = -EAGAIN;
+ goto out;
+ }
+
+ mpi_request = mpt3sas_base_get_msg_frame(ioc, smid);
+ ioc->transport_cmds.smid = smid;
+
+ sz = sizeof(struct phy_control_request) +
+ sizeof(struct phy_control_reply);
+ data_out = pci_alloc_consistent(ioc->pdev, sz, &data_out_dma);
+ if (!data_out) {
+ pr_err("failure at %s:%d/%s()!\n", __FILE__,
+ __LINE__, __func__);
+ rc = -ENOMEM;
+ mpt3sas_base_free_smid(ioc, smid);
+ goto out;
+ }
+
+ rc = -EINVAL;
+ memset(data_out, 0, sz);
+ phy_control_request = data_out;
+ phy_control_request->smp_frame_type = 0x40;
+ phy_control_request->function = 0x91;
+ phy_control_request->request_length = 9;
+ phy_control_request->allocated_response_length = 0;
+ phy_control_request->phy_identifier = phy->number;
+ phy_control_request->phy_operation = phy_operation;
+ phy_control_request->programmed_min_physical_link_rate =
+ phy->minimum_linkrate << 4;
+ phy_control_request->programmed_max_physical_link_rate =
+ phy->maximum_linkrate << 4;
+
+ memset(mpi_request, 0, sizeof(Mpi2SmpPassthroughRequest_t));
+ mpi_request->Function = MPI2_FUNCTION_SMP_PASSTHROUGH;
+ mpi_request->PhysicalPort = 0xFF;
+ mpi_request->VF_ID = 0; /* TODO */
+ mpi_request->VP_ID = 0;
+ mpi_request->SASAddress = cpu_to_le64(phy->identify.sas_address);
+ mpi_request->RequestDataLength =
+ cpu_to_le16(sizeof(struct phy_error_log_request));
+ psge = &mpi_request->SGL;
+
+ /* WRITE sgel first */
+ sgl_flags = (MPI2_SGE_FLAGS_SIMPLE_ELEMENT |
+ MPI2_SGE_FLAGS_END_OF_BUFFER | MPI2_SGE_FLAGS_HOST_TO_IOC);
+ sgl_flags = sgl_flags << MPI2_SGE_FLAGS_SHIFT;
+ ioc->base_add_sg_single(psge, sgl_flags |
+ sizeof(struct phy_control_request), data_out_dma);
+
+ /* incr sgel */
+ psge += ioc->sge_size;
+
+ /* READ sgel last */
+ sgl_flags = (MPI2_SGE_FLAGS_SIMPLE_ELEMENT |
+ MPI2_SGE_FLAGS_LAST_ELEMENT | MPI2_SGE_FLAGS_END_OF_BUFFER |
+ MPI2_SGE_FLAGS_END_OF_LIST);
+ sgl_flags = sgl_flags << MPI2_SGE_FLAGS_SHIFT;
+ ioc->base_add_sg_single(psge, sgl_flags |
+ sizeof(struct phy_control_reply), data_out_dma +
+ sizeof(struct phy_control_request));
+
+ dtransportprintk(ioc, pr_info(MPT3SAS_FMT
+ "phy_control - send to sas_addr(0x%016llx), phy(%d), opcode(%d)\n",
+ ioc->name, (unsigned long long)phy->identify.sas_address,
+ phy->number, phy_operation));
+ init_completion(&ioc->transport_cmds.done);
+ mpt3sas_base_put_smid_default(ioc, smid);
+ timeleft = wait_for_completion_timeout(&ioc->transport_cmds.done,
+ 10*HZ);
+
+ if (!(ioc->transport_cmds.status & MPT3_CMD_COMPLETE)) {
+ pr_err(MPT3SAS_FMT "%s: timeout\n",
+ ioc->name, __func__);
+ _debug_dump_mf(mpi_request,
+ sizeof(Mpi2SmpPassthroughRequest_t)/4);
+ if (!(ioc->transport_cmds.status & MPT3_CMD_RESET))
+ issue_reset = 1;
+ goto issue_host_reset;
+ }
+
+ dtransportprintk(ioc, pr_info(MPT3SAS_FMT
+ "phy_control - complete\n", ioc->name));
+
+ if (ioc->transport_cmds.status & MPT3_CMD_REPLY_VALID) {
+
+ mpi_reply = ioc->transport_cmds.reply;
+
+ dtransportprintk(ioc, pr_info(MPT3SAS_FMT
+ "phy_control - reply data transfer size(%d)\n",
+ ioc->name, le16_to_cpu(mpi_reply->ResponseDataLength)));
+
+ if (le16_to_cpu(mpi_reply->ResponseDataLength) !=
+ sizeof(struct phy_control_reply))
+ goto out;
+
+ phy_control_reply = data_out +
+ sizeof(struct phy_control_request);
+
+ dtransportprintk(ioc, pr_info(MPT3SAS_FMT
+ "phy_control - function_result(%d)\n",
+ ioc->name, phy_control_reply->function_result));
+
+ rc = 0;
+ } else
+ dtransportprintk(ioc, pr_info(MPT3SAS_FMT
+ "phy_control - no reply\n", ioc->name));
+
+ issue_host_reset:
+ if (issue_reset)
+ mpt3sas_base_hard_reset_handler(ioc, CAN_SLEEP,
+ FORCE_BIG_HAMMER);
+ out:
+ ioc->transport_cmds.status = MPT3_CMD_NOT_USED;
+ if (data_out)
+ pci_free_consistent(ioc->pdev, sz, data_out, data_out_dma);
+
+ mutex_unlock(&ioc->transport_cmds.mutex);
+ return rc;
+}
+
+/**
+ * _transport_phy_reset -
+ * @phy: The sas phy object
+ * @hard_reset:
+ *
+ * Returns 0 for success, non-zero for failure.
+ */
+static int
+_transport_phy_reset(struct sas_phy *phy, int hard_reset)
+{
+ struct MPT3SAS_ADAPTER *ioc = phy_to_ioc(phy);
+ Mpi2SasIoUnitControlReply_t mpi_reply;
+ Mpi2SasIoUnitControlRequest_t mpi_request;
+ unsigned long flags;
+
+ spin_lock_irqsave(&ioc->sas_node_lock, flags);
+ if (_transport_sas_node_find_by_sas_address(ioc,
+ phy->identify.sas_address) == NULL) {
+ spin_unlock_irqrestore(&ioc->sas_node_lock, flags);
+ return -EINVAL;
+ }
+ spin_unlock_irqrestore(&ioc->sas_node_lock, flags);
+
+ /* handle expander phys */
+ if (phy->identify.sas_address != ioc->sas_hba.sas_address)
+ return _transport_expander_phy_control(ioc, phy,
+ (hard_reset == 1) ? SMP_PHY_CONTROL_HARD_RESET :
+ SMP_PHY_CONTROL_LINK_RESET);
+
+ /* handle hba phys */
+ memset(&mpi_request, 0, sizeof(Mpi2SasIoUnitControlReply_t));
+ mpi_request.Function = MPI2_FUNCTION_SAS_IO_UNIT_CONTROL;
+ mpi_request.Operation = hard_reset ?
+ MPI2_SAS_OP_PHY_HARD_RESET : MPI2_SAS_OP_PHY_LINK_RESET;
+ mpi_request.PhyNum = phy->number;
+
+ if ((mpt3sas_base_sas_iounit_control(ioc, &mpi_reply, &mpi_request))) {
+ pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n",
+ ioc->name, __FILE__, __LINE__, __func__);
+ return -ENXIO;
+ }
+
+ if (mpi_reply.IOCStatus || mpi_reply.IOCLogInfo)
+ pr_info(MPT3SAS_FMT
+ "phy(%d), ioc_status(0x%04x), loginfo(0x%08x)\n",
+ ioc->name, phy->number, le16_to_cpu(mpi_reply.IOCStatus),
+ le32_to_cpu(mpi_reply.IOCLogInfo));
+
+ return 0;
+}
+
+/**
+ * _transport_phy_enable - enable/disable phys
+ * @phy: The sas phy object
+ * @enable: enable phy when true
+ *
+ * Only support sas_host direct attached phys.
+ * Returns 0 for success, non-zero for failure.
+ */
+static int
+_transport_phy_enable(struct sas_phy *phy, int enable)
+{
+ struct MPT3SAS_ADAPTER *ioc = phy_to_ioc(phy);
+ Mpi2SasIOUnitPage1_t *sas_iounit_pg1 = NULL;
+ Mpi2SasIOUnitPage0_t *sas_iounit_pg0 = NULL;
+ Mpi2ConfigReply_t mpi_reply;
+ u16 ioc_status;
+ u16 sz;
+ int rc = 0;
+ unsigned long flags;
+ int i, discovery_active;
+
+ spin_lock_irqsave(&ioc->sas_node_lock, flags);
+ if (_transport_sas_node_find_by_sas_address(ioc,
+ phy->identify.sas_address) == NULL) {
+ spin_unlock_irqrestore(&ioc->sas_node_lock, flags);
+ return -EINVAL;
+ }
+ spin_unlock_irqrestore(&ioc->sas_node_lock, flags);
+
+ /* handle expander phys */
+ if (phy->identify.sas_address != ioc->sas_hba.sas_address)
+ return _transport_expander_phy_control(ioc, phy,
+ (enable == 1) ? SMP_PHY_CONTROL_LINK_RESET :
+ SMP_PHY_CONTROL_DISABLE);
+
+ /* handle hba phys */
+
+ /* read sas_iounit page 0 */
+ sz = offsetof(Mpi2SasIOUnitPage0_t, PhyData) + (ioc->sas_hba.num_phys *
+ sizeof(Mpi2SasIOUnit0PhyData_t));
+ sas_iounit_pg0 = kzalloc(sz, GFP_KERNEL);
+ if (!sas_iounit_pg0) {
+ pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n",
+ ioc->name, __FILE__, __LINE__, __func__);
+ rc = -ENOMEM;
+ goto out;
+ }
+ if ((mpt3sas_config_get_sas_iounit_pg0(ioc, &mpi_reply,
+ sas_iounit_pg0, sz))) {
+ pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n",
+ ioc->name, __FILE__, __LINE__, __func__);
+ rc = -ENXIO;
+ goto out;
+ }
+ ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
+ MPI2_IOCSTATUS_MASK;
+ if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
+ pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n",
+ ioc->name, __FILE__, __LINE__, __func__);
+ rc = -EIO;
+ goto out;
+ }
+
+ /* unable to enable/disable phys when when discovery is active */
+ for (i = 0, discovery_active = 0; i < ioc->sas_hba.num_phys ; i++) {
+ if (sas_iounit_pg0->PhyData[i].PortFlags &
+ MPI2_SASIOUNIT0_PORTFLAGS_DISCOVERY_IN_PROGRESS) {
+ pr_err(MPT3SAS_FMT "discovery is active on " \
+ "port = %d, phy = %d: unable to enable/disable "
+ "phys, try again later!\n", ioc->name,
+ sas_iounit_pg0->PhyData[i].Port, i);
+ discovery_active = 1;
+ }
+ }
+
+ if (discovery_active) {
+ rc = -EAGAIN;
+ goto out;
+ }
+
+ /* read sas_iounit page 1 */
+ sz = offsetof(Mpi2SasIOUnitPage1_t, PhyData) + (ioc->sas_hba.num_phys *
+ sizeof(Mpi2SasIOUnit1PhyData_t));
+ sas_iounit_pg1 = kzalloc(sz, GFP_KERNEL);
+ if (!sas_iounit_pg1) {
+ pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n",
+ ioc->name, __FILE__, __LINE__, __func__);
+ rc = -ENOMEM;
+ goto out;
+ }
+ if ((mpt3sas_config_get_sas_iounit_pg1(ioc, &mpi_reply,
+ sas_iounit_pg1, sz))) {
+ pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n",
+ ioc->name, __FILE__, __LINE__, __func__);
+ rc = -ENXIO;
+ goto out;
+ }
+ ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
+ MPI2_IOCSTATUS_MASK;
+ if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
+ pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n",
+ ioc->name, __FILE__, __LINE__, __func__);
+ rc = -EIO;
+ goto out;
+ }
+
+ /* copy Port/PortFlags/PhyFlags from page 0 */
+ for (i = 0; i < ioc->sas_hba.num_phys ; i++) {
+ sas_iounit_pg1->PhyData[i].Port =
+ sas_iounit_pg0->PhyData[i].Port;
+ sas_iounit_pg1->PhyData[i].PortFlags =
+ (sas_iounit_pg0->PhyData[i].PortFlags &
+ MPI2_SASIOUNIT0_PORTFLAGS_AUTO_PORT_CONFIG);
+ sas_iounit_pg1->PhyData[i].PhyFlags =
+ (sas_iounit_pg0->PhyData[i].PhyFlags &
+ (MPI2_SASIOUNIT0_PHYFLAGS_ZONING_ENABLED +
+ MPI2_SASIOUNIT0_PHYFLAGS_PHY_DISABLED));
+ }
+
+ if (enable)
+ sas_iounit_pg1->PhyData[phy->number].PhyFlags
+ &= ~MPI2_SASIOUNIT1_PHYFLAGS_PHY_DISABLE;
+ else
+ sas_iounit_pg1->PhyData[phy->number].PhyFlags
+ |= MPI2_SASIOUNIT1_PHYFLAGS_PHY_DISABLE;
+
+ mpt3sas_config_set_sas_iounit_pg1(ioc, &mpi_reply, sas_iounit_pg1, sz);
+
+ /* link reset */
+ if (enable)
+ _transport_phy_reset(phy, 0);
+
+ out:
+ kfree(sas_iounit_pg1);
+ kfree(sas_iounit_pg0);
+ return rc;
+}
+
+/**
+ * _transport_phy_speed - set phy min/max link rates
+ * @phy: The sas phy object
+ * @rates: rates defined in sas_phy_linkrates
+ *
+ * Only support sas_host direct attached phys.
+ * Returns 0 for success, non-zero for failure.
+ */
+static int
+_transport_phy_speed(struct sas_phy *phy, struct sas_phy_linkrates *rates)
+{
+ struct MPT3SAS_ADAPTER *ioc = phy_to_ioc(phy);
+ Mpi2SasIOUnitPage1_t *sas_iounit_pg1 = NULL;
+ Mpi2SasPhyPage0_t phy_pg0;
+ Mpi2ConfigReply_t mpi_reply;
+ u16 ioc_status;
+ u16 sz;
+ int i;
+ int rc = 0;
+ unsigned long flags;
+
+ spin_lock_irqsave(&ioc->sas_node_lock, flags);
+ if (_transport_sas_node_find_by_sas_address(ioc,
+ phy->identify.sas_address) == NULL) {
+ spin_unlock_irqrestore(&ioc->sas_node_lock, flags);
+ return -EINVAL;
+ }
+ spin_unlock_irqrestore(&ioc->sas_node_lock, flags);
+
+ if (!rates->minimum_linkrate)
+ rates->minimum_linkrate = phy->minimum_linkrate;
+ else if (rates->minimum_linkrate < phy->minimum_linkrate_hw)
+ rates->minimum_linkrate = phy->minimum_linkrate_hw;
+
+ if (!rates->maximum_linkrate)
+ rates->maximum_linkrate = phy->maximum_linkrate;
+ else if (rates->maximum_linkrate > phy->maximum_linkrate_hw)
+ rates->maximum_linkrate = phy->maximum_linkrate_hw;
+
+ /* handle expander phys */
+ if (phy->identify.sas_address != ioc->sas_hba.sas_address) {
+ phy->minimum_linkrate = rates->minimum_linkrate;
+ phy->maximum_linkrate = rates->maximum_linkrate;
+ return _transport_expander_phy_control(ioc, phy,
+ SMP_PHY_CONTROL_LINK_RESET);
+ }
+
+ /* handle hba phys */
+
+ /* sas_iounit page 1 */
+ sz = offsetof(Mpi2SasIOUnitPage1_t, PhyData) + (ioc->sas_hba.num_phys *
+ sizeof(Mpi2SasIOUnit1PhyData_t));
+ sas_iounit_pg1 = kzalloc(sz, GFP_KERNEL);
+ if (!sas_iounit_pg1) {
+ pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n",
+ ioc->name, __FILE__, __LINE__, __func__);
+ rc = -ENOMEM;
+ goto out;
+ }
+ if ((mpt3sas_config_get_sas_iounit_pg1(ioc, &mpi_reply,
+ sas_iounit_pg1, sz))) {
+ pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n",
+ ioc->name, __FILE__, __LINE__, __func__);
+ rc = -ENXIO;
+ goto out;
+ }
+ ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
+ MPI2_IOCSTATUS_MASK;
+ if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
+ pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n",
+ ioc->name, __FILE__, __LINE__, __func__);
+ rc = -EIO;
+ goto out;
+ }
+
+ for (i = 0; i < ioc->sas_hba.num_phys; i++) {
+ if (phy->number != i) {
+ sas_iounit_pg1->PhyData[i].MaxMinLinkRate =
+ (ioc->sas_hba.phy[i].phy->minimum_linkrate +
+ (ioc->sas_hba.phy[i].phy->maximum_linkrate << 4));
+ } else {
+ sas_iounit_pg1->PhyData[i].MaxMinLinkRate =
+ (rates->minimum_linkrate +
+ (rates->maximum_linkrate << 4));
+ }
+ }
+
+ if (mpt3sas_config_set_sas_iounit_pg1(ioc, &mpi_reply, sas_iounit_pg1,
+ sz)) {
+ pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n",
+ ioc->name, __FILE__, __LINE__, __func__);
+ rc = -ENXIO;
+ goto out;
+ }
+
+ /* link reset */
+ _transport_phy_reset(phy, 0);
+
+ /* read phy page 0, then update the rates in the sas transport phy */
+ if (!mpt3sas_config_get_phy_pg0(ioc, &mpi_reply, &phy_pg0,
+ phy->number)) {
+ phy->minimum_linkrate = _transport_convert_phy_link_rate(
+ phy_pg0.ProgrammedLinkRate & MPI2_SAS_PRATE_MIN_RATE_MASK);
+ phy->maximum_linkrate = _transport_convert_phy_link_rate(
+ phy_pg0.ProgrammedLinkRate >> 4);
+ phy->negotiated_linkrate = _transport_convert_phy_link_rate(
+ phy_pg0.NegotiatedLinkRate &
+ MPI2_SAS_NEG_LINK_RATE_MASK_PHYSICAL);
+ }
+
+ out:
+ kfree(sas_iounit_pg1);
+ return rc;
+}
+
+/**
+ * _transport_smp_handler - transport portal for smp passthru
+ * @shost: shost object
+ * @rphy: sas transport rphy object
+ * @req:
+ *
+ * This used primarily for smp_utils.
+ * Example:
+ * smp_rep_general /sys/class/bsg/expander-5:0
+ */
+static int
+_transport_smp_handler(struct Scsi_Host *shost, struct sas_rphy *rphy,
+ struct request *req)
+{
+ struct MPT3SAS_ADAPTER *ioc = shost_priv(shost);
+ Mpi2SmpPassthroughRequest_t *mpi_request;
+ Mpi2SmpPassthroughReply_t *mpi_reply;
+ int rc;
+ u16 smid;
+ u32 ioc_state;
+ unsigned long timeleft;
+ void *psge;
+ u8 issue_reset = 0;
+ dma_addr_t dma_addr_in = 0;
+ dma_addr_t dma_addr_out = 0;
+ dma_addr_t pci_dma_in = 0;
+ dma_addr_t pci_dma_out = 0;
+ void *pci_addr_in = NULL;
+ void *pci_addr_out = NULL;
+ u16 wait_state_count;
+ struct request *rsp = req->next_rq;
+ struct bio_vec bvec;
+ struct bvec_iter iter;
+
+ if (!rsp) {
+ pr_err(MPT3SAS_FMT "%s: the smp response space is missing\n",
+ ioc->name, __func__);
+ return -EINVAL;
+ }
+
+ if (ioc->shost_recovery || ioc->pci_error_recovery) {
+ pr_info(MPT3SAS_FMT "%s: host reset in progress!\n",
+ __func__, ioc->name);
+ return -EFAULT;
+ }
+
+ rc = mutex_lock_interruptible(&ioc->transport_cmds.mutex);
+ if (rc)
+ return rc;
+
+ if (ioc->transport_cmds.status != MPT3_CMD_NOT_USED) {
+ pr_err(MPT3SAS_FMT "%s: transport_cmds in use\n", ioc->name,
+ __func__);
+ rc = -EAGAIN;
+ goto out;
+ }
+ ioc->transport_cmds.status = MPT3_CMD_PENDING;
+
+ /* Check if the request is split across multiple segments */
+ if (bio_multiple_segments(req->bio)) {
+ u32 offset = 0;
+
+ /* Allocate memory and copy the request */
+ pci_addr_out = pci_alloc_consistent(ioc->pdev,
+ blk_rq_bytes(req), &pci_dma_out);
+ if (!pci_addr_out) {
+ pr_info(MPT3SAS_FMT "%s(): PCI Addr out = NULL\n",
+ ioc->name, __func__);
+ rc = -ENOMEM;
+ goto out;
+ }
+
+ bio_for_each_segment(bvec, req->bio, iter) {
+ memcpy(pci_addr_out + offset,
+ page_address(bvec.bv_page) + bvec.bv_offset,
+ bvec.bv_len);
+ offset += bvec.bv_len;
+ }
+ } else {
+ dma_addr_out = pci_map_single(ioc->pdev, bio_data(req->bio),
+ blk_rq_bytes(req), PCI_DMA_BIDIRECTIONAL);
+ if (!dma_addr_out) {
+ pr_info(MPT3SAS_FMT "%s(): DMA Addr out = NULL\n",
+ ioc->name, __func__);
+ rc = -ENOMEM;
+ goto free_pci;
+ }
+ }
+
+ /* Check if the response needs to be populated across
+ * multiple segments */
+ if (bio_multiple_segments(rsp->bio)) {
+ pci_addr_in = pci_alloc_consistent(ioc->pdev, blk_rq_bytes(rsp),
+ &pci_dma_in);
+ if (!pci_addr_in) {
+ pr_info(MPT3SAS_FMT "%s(): PCI Addr in = NULL\n",
+ ioc->name, __func__);
+ rc = -ENOMEM;
+ goto unmap;
+ }
+ } else {
+ dma_addr_in = pci_map_single(ioc->pdev, bio_data(rsp->bio),
+ blk_rq_bytes(rsp), PCI_DMA_BIDIRECTIONAL);
+ if (!dma_addr_in) {
+ pr_info(MPT3SAS_FMT "%s(): DMA Addr in = NULL\n",
+ ioc->name, __func__);
+ rc = -ENOMEM;
+ goto unmap;
+ }
+ }
+
+ wait_state_count = 0;
+ ioc_state = mpt3sas_base_get_iocstate(ioc, 1);
+ while (ioc_state != MPI2_IOC_STATE_OPERATIONAL) {
+ if (wait_state_count++ == 10) {
+ pr_err(MPT3SAS_FMT
+ "%s: failed due to ioc not operational\n",
+ ioc->name, __func__);
+ rc = -EFAULT;
+ goto unmap;
+ }
+ ssleep(1);
+ ioc_state = mpt3sas_base_get_iocstate(ioc, 1);
+ pr_info(MPT3SAS_FMT
+ "%s: waiting for operational state(count=%d)\n",
+ ioc->name, __func__, wait_state_count);
+ }
+ if (wait_state_count)
+ pr_info(MPT3SAS_FMT "%s: ioc is operational\n",
+ ioc->name, __func__);
+
+ smid = mpt3sas_base_get_smid(ioc, ioc->transport_cb_idx);
+ if (!smid) {
+ pr_err(MPT3SAS_FMT "%s: failed obtaining a smid\n",
+ ioc->name, __func__);
+ rc = -EAGAIN;
+ goto unmap;
+ }
+
+ rc = 0;
+ mpi_request = mpt3sas_base_get_msg_frame(ioc, smid);
+ ioc->transport_cmds.smid = smid;
+
+ memset(mpi_request, 0, sizeof(Mpi2SmpPassthroughRequest_t));
+ mpi_request->Function = MPI2_FUNCTION_SMP_PASSTHROUGH;
+ mpi_request->PhysicalPort = 0xFF;
+ mpi_request->SASAddress = (rphy) ?
+ cpu_to_le64(rphy->identify.sas_address) :
+ cpu_to_le64(ioc->sas_hba.sas_address);
+ mpi_request->RequestDataLength = cpu_to_le16(blk_rq_bytes(req) - 4);
+ psge = &mpi_request->SGL;
+
+ if (bio_multiple_segments(req->bio))
+ ioc->build_sg(ioc, psge, pci_dma_out, (blk_rq_bytes(req) - 4),
+ pci_dma_in, (blk_rq_bytes(rsp) + 4));
+ else
+ ioc->build_sg(ioc, psge, dma_addr_out, (blk_rq_bytes(req) - 4),
+ dma_addr_in, (blk_rq_bytes(rsp) + 4));
+
+ dtransportprintk(ioc, pr_info(MPT3SAS_FMT
+ "%s - sending smp request\n", ioc->name, __func__));
+
+ init_completion(&ioc->transport_cmds.done);
+ mpt3sas_base_put_smid_default(ioc, smid);
+ timeleft = wait_for_completion_timeout(&ioc->transport_cmds.done,
+ 10*HZ);
+
+ if (!(ioc->transport_cmds.status & MPT3_CMD_COMPLETE)) {
+ pr_err(MPT3SAS_FMT "%s : timeout\n",
+ __func__, ioc->name);
+ _debug_dump_mf(mpi_request,
+ sizeof(Mpi2SmpPassthroughRequest_t)/4);
+ if (!(ioc->transport_cmds.status & MPT3_CMD_RESET))
+ issue_reset = 1;
+ goto issue_host_reset;
+ }
+
+ dtransportprintk(ioc, pr_info(MPT3SAS_FMT
+ "%s - complete\n", ioc->name, __func__));
+
+ if (ioc->transport_cmds.status & MPT3_CMD_REPLY_VALID) {
+
+ mpi_reply = ioc->transport_cmds.reply;
+
+ dtransportprintk(ioc, pr_info(MPT3SAS_FMT
+ "%s - reply data transfer size(%d)\n",
+ ioc->name, __func__,
+ le16_to_cpu(mpi_reply->ResponseDataLength)));
+
+ memcpy(req->sense, mpi_reply, sizeof(*mpi_reply));
+ req->sense_len = sizeof(*mpi_reply);
+ req->resid_len = 0;
+ rsp->resid_len -=
+ le16_to_cpu(mpi_reply->ResponseDataLength);
+
+ /* check if the resp needs to be copied from the allocated
+ * pci mem */
+ if (bio_multiple_segments(rsp->bio)) {
+ u32 offset = 0;
+ u32 bytes_to_copy =
+ le16_to_cpu(mpi_reply->ResponseDataLength);
+ bio_for_each_segment(bvec, rsp->bio, iter) {
+ if (bytes_to_copy <= bvec.bv_len) {
+ memcpy(page_address(bvec.bv_page) +
+ bvec.bv_offset, pci_addr_in +
+ offset, bytes_to_copy);
+ break;
+ } else {
+ memcpy(page_address(bvec.bv_page) +
+ bvec.bv_offset, pci_addr_in +
+ offset, bvec.bv_len);
+ bytes_to_copy -= bvec.bv_len;
+ }
+ offset += bvec.bv_len;
+ }
+ }
+ } else {
+ dtransportprintk(ioc, pr_info(MPT3SAS_FMT
+ "%s - no reply\n", ioc->name, __func__));
+ rc = -ENXIO;
+ }
+
+ issue_host_reset:
+ if (issue_reset) {
+ mpt3sas_base_hard_reset_handler(ioc, CAN_SLEEP,
+ FORCE_BIG_HAMMER);
+ rc = -ETIMEDOUT;
+ }
+
+ unmap:
+ if (dma_addr_out)
+ pci_unmap_single(ioc->pdev, dma_addr_out, blk_rq_bytes(req),
+ PCI_DMA_BIDIRECTIONAL);
+ if (dma_addr_in)
+ pci_unmap_single(ioc->pdev, dma_addr_in, blk_rq_bytes(rsp),
+ PCI_DMA_BIDIRECTIONAL);
+
+ free_pci:
+ if (pci_addr_out)
+ pci_free_consistent(ioc->pdev, blk_rq_bytes(req), pci_addr_out,
+ pci_dma_out);
+
+ if (pci_addr_in)
+ pci_free_consistent(ioc->pdev, blk_rq_bytes(rsp), pci_addr_in,
+ pci_dma_in);
+
+ out:
+ ioc->transport_cmds.status = MPT3_CMD_NOT_USED;
+ mutex_unlock(&ioc->transport_cmds.mutex);
+ return rc;
+}
+
+struct sas_function_template mpt3sas_transport_functions = {
+ .get_linkerrors = _transport_get_linkerrors,
+ .get_enclosure_identifier = _transport_get_enclosure_identifier,
+ .get_bay_identifier = _transport_get_bay_identifier,
+ .phy_reset = _transport_phy_reset,
+ .phy_enable = _transport_phy_enable,
+ .set_phy_speed = _transport_phy_speed,
+ .smp_handler = _transport_smp_handler,
+};
+
+struct scsi_transport_template *mpt3sas_transport_template;
diff --git a/drivers/scsi/mpt3sas/mpt3sas_trigger_diag.c b/drivers/scsi/mpt3sas/mpt3sas_trigger_diag.c
new file mode 100644
index 000000000..b60fd7a3b
--- /dev/null
+++ b/drivers/scsi/mpt3sas/mpt3sas_trigger_diag.c
@@ -0,0 +1,434 @@
+/*
+ * This module provides common API to set Diagnostic trigger for MPT
+ * (Message Passing Technology) based controllers
+ *
+ * This code is based on drivers/scsi/mpt3sas/mpt3sas_trigger_diag.c
+ * Copyright (C) 2012-2014 LSI Corporation
+ * Copyright (C) 2013-2014 Avago Technologies
+ * (mailto: MPT-FusionLinux.pdl@avagotech.com)
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * NO WARRANTY
+ * THE PROGRAM IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OR
+ * CONDITIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED INCLUDING, WITHOUT
+ * LIMITATION, ANY WARRANTIES OR CONDITIONS OF TITLE, NON-INFRINGEMENT,
+ * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE. Each Recipient is
+ * solely responsible for determining the appropriateness of using and
+ * distributing the Program and assumes all risks associated with its
+ * exercise of rights under this Agreement, including but not limited to
+ * the risks and costs of program errors, damage to or loss of data,
+ * programs or equipment, and unavailability or interruption of operations.
+
+ * DISCLAIMER OF LIABILITY
+ * NEITHER RECIPIENT NOR ANY CONTRIBUTORS SHALL HAVE ANY LIABILITY FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING WITHOUT LIMITATION LOST PROFITS), HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR
+ * TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
+ * USE OR DISTRIBUTION OF THE PROGRAM OR THE EXERCISE OF ANY RIGHTS GRANTED
+ * HEREUNDER, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGES
+
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301,
+ * USA.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/errno.h>
+#include <linux/init.h>
+#include <linux/slab.h>
+#include <linux/types.h>
+#include <linux/pci.h>
+#include <linux/delay.h>
+#include <linux/compat.h>
+#include <linux/poll.h>
+
+#include <linux/io.h>
+#include <linux/uaccess.h>
+
+#include "mpt3sas_base.h"
+
+/**
+ * _mpt3sas_raise_sigio - notifiy app
+ * @ioc: per adapter object
+ * @event_data:
+ */
+static void
+_mpt3sas_raise_sigio(struct MPT3SAS_ADAPTER *ioc,
+ struct SL_WH_TRIGGERS_EVENT_DATA_T *event_data)
+{
+ Mpi2EventNotificationReply_t *mpi_reply;
+ u16 sz, event_data_sz;
+ unsigned long flags;
+
+ dTriggerDiagPrintk(ioc, pr_info(MPT3SAS_FMT "%s: enter\n",
+ ioc->name, __func__));
+
+ sz = offsetof(Mpi2EventNotificationReply_t, EventData) +
+ sizeof(struct SL_WH_TRIGGERS_EVENT_DATA_T) + 4;
+ mpi_reply = kzalloc(sz, GFP_KERNEL);
+ if (!mpi_reply)
+ goto out;
+ mpi_reply->Event = cpu_to_le16(MPI3_EVENT_DIAGNOSTIC_TRIGGER_FIRED);
+ event_data_sz = (sizeof(struct SL_WH_TRIGGERS_EVENT_DATA_T) + 4) / 4;
+ mpi_reply->EventDataLength = cpu_to_le16(event_data_sz);
+ memcpy(&mpi_reply->EventData, event_data,
+ sizeof(struct SL_WH_TRIGGERS_EVENT_DATA_T));
+ dTriggerDiagPrintk(ioc, pr_info(MPT3SAS_FMT
+ "%s: add to driver event log\n",
+ ioc->name, __func__));
+ mpt3sas_ctl_add_to_event_log(ioc, mpi_reply);
+ kfree(mpi_reply);
+ out:
+
+ /* clearing the diag_trigger_active flag */
+ spin_lock_irqsave(&ioc->diag_trigger_lock, flags);
+ dTriggerDiagPrintk(ioc, pr_info(MPT3SAS_FMT
+ "%s: clearing diag_trigger_active flag\n",
+ ioc->name, __func__));
+ ioc->diag_trigger_active = 0;
+ spin_unlock_irqrestore(&ioc->diag_trigger_lock, flags);
+
+ dTriggerDiagPrintk(ioc, pr_info(MPT3SAS_FMT "%s: exit\n", ioc->name,
+ __func__));
+}
+
+/**
+ * mpt3sas_process_trigger_data - process the event data for the trigger
+ * @ioc: per adapter object
+ * @event_data:
+ */
+void
+mpt3sas_process_trigger_data(struct MPT3SAS_ADAPTER *ioc,
+ struct SL_WH_TRIGGERS_EVENT_DATA_T *event_data)
+{
+ u8 issue_reset = 0;
+
+ dTriggerDiagPrintk(ioc, pr_info(MPT3SAS_FMT "%s: enter\n",
+ ioc->name, __func__));
+
+ /* release the diag buffer trace */
+ if ((ioc->diag_buffer_status[MPI2_DIAG_BUF_TYPE_TRACE] &
+ MPT3_DIAG_BUFFER_IS_RELEASED) == 0) {
+ dTriggerDiagPrintk(ioc, pr_info(MPT3SAS_FMT
+ "%s: release trace diag buffer\n", ioc->name, __func__));
+ mpt3sas_send_diag_release(ioc, MPI2_DIAG_BUF_TYPE_TRACE,
+ &issue_reset);
+ }
+
+ _mpt3sas_raise_sigio(ioc, event_data);
+
+ dTriggerDiagPrintk(ioc, pr_info(MPT3SAS_FMT "%s: exit\n", ioc->name,
+ __func__));
+}
+
+/**
+ * mpt3sas_trigger_master - Master trigger handler
+ * @ioc: per adapter object
+ * @trigger_bitmask:
+ *
+ */
+void
+mpt3sas_trigger_master(struct MPT3SAS_ADAPTER *ioc, u32 trigger_bitmask)
+{
+ struct SL_WH_TRIGGERS_EVENT_DATA_T event_data;
+ unsigned long flags;
+ u8 found_match = 0;
+
+ spin_lock_irqsave(&ioc->diag_trigger_lock, flags);
+
+ if (trigger_bitmask & MASTER_TRIGGER_FW_FAULT ||
+ trigger_bitmask & MASTER_TRIGGER_ADAPTER_RESET)
+ goto by_pass_checks;
+
+ /* check to see if trace buffers are currently registered */
+ if ((ioc->diag_buffer_status[MPI2_DIAG_BUF_TYPE_TRACE] &
+ MPT3_DIAG_BUFFER_IS_REGISTERED) == 0) {
+ spin_unlock_irqrestore(&ioc->diag_trigger_lock, flags);
+ return;
+ }
+
+ /* check to see if trace buffers are currently released */
+ if (ioc->diag_buffer_status[MPI2_DIAG_BUF_TYPE_TRACE] &
+ MPT3_DIAG_BUFFER_IS_RELEASED) {
+ spin_unlock_irqrestore(&ioc->diag_trigger_lock, flags);
+ return;
+ }
+
+ by_pass_checks:
+
+ dTriggerDiagPrintk(ioc, pr_info(MPT3SAS_FMT
+ "%s: enter - trigger_bitmask = 0x%08x\n",
+ ioc->name, __func__, trigger_bitmask));
+
+ /* don't send trigger if an trigger is currently active */
+ if (ioc->diag_trigger_active) {
+ spin_unlock_irqrestore(&ioc->diag_trigger_lock, flags);
+ goto out;
+ }
+
+ /* check for the trigger condition */
+ if (ioc->diag_trigger_master.MasterData & trigger_bitmask) {
+ found_match = 1;
+ ioc->diag_trigger_active = 1;
+ dTriggerDiagPrintk(ioc, pr_info(MPT3SAS_FMT
+ "%s: setting diag_trigger_active flag\n",
+ ioc->name, __func__));
+ }
+ spin_unlock_irqrestore(&ioc->diag_trigger_lock, flags);
+
+ if (!found_match)
+ goto out;
+
+ memset(&event_data, 0, sizeof(struct SL_WH_TRIGGERS_EVENT_DATA_T));
+ event_data.trigger_type = MPT3SAS_TRIGGER_MASTER;
+ event_data.u.master.MasterData = trigger_bitmask;
+
+ if (trigger_bitmask & MASTER_TRIGGER_FW_FAULT ||
+ trigger_bitmask & MASTER_TRIGGER_ADAPTER_RESET)
+ _mpt3sas_raise_sigio(ioc, &event_data);
+ else
+ mpt3sas_send_trigger_data_event(ioc, &event_data);
+
+ out:
+ dTriggerDiagPrintk(ioc, pr_info(MPT3SAS_FMT "%s: exit\n", ioc->name,
+ __func__));
+}
+
+/**
+ * mpt3sas_trigger_event - Event trigger handler
+ * @ioc: per adapter object
+ * @event:
+ * @log_entry_qualifier:
+ *
+ */
+void
+mpt3sas_trigger_event(struct MPT3SAS_ADAPTER *ioc, u16 event,
+ u16 log_entry_qualifier)
+{
+ struct SL_WH_TRIGGERS_EVENT_DATA_T event_data;
+ struct SL_WH_EVENT_TRIGGER_T *event_trigger;
+ int i;
+ unsigned long flags;
+ u8 found_match;
+
+ spin_lock_irqsave(&ioc->diag_trigger_lock, flags);
+
+ /* check to see if trace buffers are currently registered */
+ if ((ioc->diag_buffer_status[MPI2_DIAG_BUF_TYPE_TRACE] &
+ MPT3_DIAG_BUFFER_IS_REGISTERED) == 0) {
+ spin_unlock_irqrestore(&ioc->diag_trigger_lock, flags);
+ return;
+ }
+
+ /* check to see if trace buffers are currently released */
+ if (ioc->diag_buffer_status[MPI2_DIAG_BUF_TYPE_TRACE] &
+ MPT3_DIAG_BUFFER_IS_RELEASED) {
+ spin_unlock_irqrestore(&ioc->diag_trigger_lock, flags);
+ return;
+ }
+
+ dTriggerDiagPrintk(ioc, pr_info(MPT3SAS_FMT
+ "%s: enter - event = 0x%04x, log_entry_qualifier = 0x%04x\n",
+ ioc->name, __func__, event, log_entry_qualifier));
+
+ /* don't send trigger if an trigger is currently active */
+ if (ioc->diag_trigger_active) {
+ spin_unlock_irqrestore(&ioc->diag_trigger_lock, flags);
+ goto out;
+ }
+
+ /* check for the trigger condition */
+ event_trigger = ioc->diag_trigger_event.EventTriggerEntry;
+ for (i = 0 , found_match = 0; i < ioc->diag_trigger_event.ValidEntries
+ && !found_match; i++, event_trigger++) {
+ if (event_trigger->EventValue != event)
+ continue;
+ if (event == MPI2_EVENT_LOG_ENTRY_ADDED) {
+ if (event_trigger->LogEntryQualifier ==
+ log_entry_qualifier)
+ found_match = 1;
+ continue;
+ }
+ found_match = 1;
+ ioc->diag_trigger_active = 1;
+ dTriggerDiagPrintk(ioc, pr_info(MPT3SAS_FMT
+ "%s: setting diag_trigger_active flag\n",
+ ioc->name, __func__));
+ }
+ spin_unlock_irqrestore(&ioc->diag_trigger_lock, flags);
+
+ if (!found_match)
+ goto out;
+
+ dTriggerDiagPrintk(ioc, pr_info(MPT3SAS_FMT
+ "%s: setting diag_trigger_active flag\n",
+ ioc->name, __func__));
+ memset(&event_data, 0, sizeof(struct SL_WH_TRIGGERS_EVENT_DATA_T));
+ event_data.trigger_type = MPT3SAS_TRIGGER_EVENT;
+ event_data.u.event.EventValue = event;
+ event_data.u.event.LogEntryQualifier = log_entry_qualifier;
+ mpt3sas_send_trigger_data_event(ioc, &event_data);
+ out:
+ dTriggerDiagPrintk(ioc, pr_info(MPT3SAS_FMT "%s: exit\n", ioc->name,
+ __func__));
+}
+
+/**
+ * mpt3sas_trigger_scsi - SCSI trigger handler
+ * @ioc: per adapter object
+ * @sense_key:
+ * @asc:
+ * @ascq:
+ *
+ */
+void
+mpt3sas_trigger_scsi(struct MPT3SAS_ADAPTER *ioc, u8 sense_key, u8 asc,
+ u8 ascq)
+{
+ struct SL_WH_TRIGGERS_EVENT_DATA_T event_data;
+ struct SL_WH_SCSI_TRIGGER_T *scsi_trigger;
+ int i;
+ unsigned long flags;
+ u8 found_match;
+
+ spin_lock_irqsave(&ioc->diag_trigger_lock, flags);
+
+ /* check to see if trace buffers are currently registered */
+ if ((ioc->diag_buffer_status[MPI2_DIAG_BUF_TYPE_TRACE] &
+ MPT3_DIAG_BUFFER_IS_REGISTERED) == 0) {
+ spin_unlock_irqrestore(&ioc->diag_trigger_lock, flags);
+ return;
+ }
+
+ /* check to see if trace buffers are currently released */
+ if (ioc->diag_buffer_status[MPI2_DIAG_BUF_TYPE_TRACE] &
+ MPT3_DIAG_BUFFER_IS_RELEASED) {
+ spin_unlock_irqrestore(&ioc->diag_trigger_lock, flags);
+ return;
+ }
+
+ dTriggerDiagPrintk(ioc, pr_info(MPT3SAS_FMT
+ "%s: enter - sense_key = 0x%02x, asc = 0x%02x, ascq = 0x%02x\n",
+ ioc->name, __func__, sense_key, asc, ascq));
+
+ /* don't send trigger if an trigger is currently active */
+ if (ioc->diag_trigger_active) {
+ spin_unlock_irqrestore(&ioc->diag_trigger_lock, flags);
+ goto out;
+ }
+
+ /* check for the trigger condition */
+ scsi_trigger = ioc->diag_trigger_scsi.SCSITriggerEntry;
+ for (i = 0 , found_match = 0; i < ioc->diag_trigger_scsi.ValidEntries
+ && !found_match; i++, scsi_trigger++) {
+ if (scsi_trigger->SenseKey != sense_key)
+ continue;
+ if (!(scsi_trigger->ASC == 0xFF || scsi_trigger->ASC == asc))
+ continue;
+ if (!(scsi_trigger->ASCQ == 0xFF || scsi_trigger->ASCQ == ascq))
+ continue;
+ found_match = 1;
+ ioc->diag_trigger_active = 1;
+ }
+ spin_unlock_irqrestore(&ioc->diag_trigger_lock, flags);
+
+ if (!found_match)
+ goto out;
+
+ dTriggerDiagPrintk(ioc, pr_info(MPT3SAS_FMT
+ "%s: setting diag_trigger_active flag\n",
+ ioc->name, __func__));
+ memset(&event_data, 0, sizeof(struct SL_WH_TRIGGERS_EVENT_DATA_T));
+ event_data.trigger_type = MPT3SAS_TRIGGER_SCSI;
+ event_data.u.scsi.SenseKey = sense_key;
+ event_data.u.scsi.ASC = asc;
+ event_data.u.scsi.ASCQ = ascq;
+ mpt3sas_send_trigger_data_event(ioc, &event_data);
+ out:
+ dTriggerDiagPrintk(ioc, pr_info(MPT3SAS_FMT "%s: exit\n", ioc->name,
+ __func__));
+}
+
+/**
+ * mpt3sas_trigger_mpi - MPI trigger handler
+ * @ioc: per adapter object
+ * @ioc_status:
+ * @loginfo:
+ *
+ */
+void
+mpt3sas_trigger_mpi(struct MPT3SAS_ADAPTER *ioc, u16 ioc_status, u32 loginfo)
+{
+ struct SL_WH_TRIGGERS_EVENT_DATA_T event_data;
+ struct SL_WH_MPI_TRIGGER_T *mpi_trigger;
+ int i;
+ unsigned long flags;
+ u8 found_match;
+
+ spin_lock_irqsave(&ioc->diag_trigger_lock, flags);
+
+ /* check to see if trace buffers are currently registered */
+ if ((ioc->diag_buffer_status[MPI2_DIAG_BUF_TYPE_TRACE] &
+ MPT3_DIAG_BUFFER_IS_REGISTERED) == 0) {
+ spin_unlock_irqrestore(&ioc->diag_trigger_lock, flags);
+ return;
+ }
+
+ /* check to see if trace buffers are currently released */
+ if (ioc->diag_buffer_status[MPI2_DIAG_BUF_TYPE_TRACE] &
+ MPT3_DIAG_BUFFER_IS_RELEASED) {
+ spin_unlock_irqrestore(&ioc->diag_trigger_lock, flags);
+ return;
+ }
+
+ dTriggerDiagPrintk(ioc, pr_info(MPT3SAS_FMT
+ "%s: enter - ioc_status = 0x%04x, loginfo = 0x%08x\n",
+ ioc->name, __func__, ioc_status, loginfo));
+
+ /* don't send trigger if an trigger is currently active */
+ if (ioc->diag_trigger_active) {
+ spin_unlock_irqrestore(&ioc->diag_trigger_lock, flags);
+ goto out;
+ }
+
+ /* check for the trigger condition */
+ mpi_trigger = ioc->diag_trigger_mpi.MPITriggerEntry;
+ for (i = 0 , found_match = 0; i < ioc->diag_trigger_mpi.ValidEntries
+ && !found_match; i++, mpi_trigger++) {
+ if (mpi_trigger->IOCStatus != ioc_status)
+ continue;
+ if (!(mpi_trigger->IocLogInfo == 0xFFFFFFFF ||
+ mpi_trigger->IocLogInfo == loginfo))
+ continue;
+ found_match = 1;
+ ioc->diag_trigger_active = 1;
+ }
+ spin_unlock_irqrestore(&ioc->diag_trigger_lock, flags);
+
+ if (!found_match)
+ goto out;
+
+ dTriggerDiagPrintk(ioc, pr_info(MPT3SAS_FMT
+ "%s: setting diag_trigger_active flag\n",
+ ioc->name, __func__));
+ memset(&event_data, 0, sizeof(struct SL_WH_TRIGGERS_EVENT_DATA_T));
+ event_data.trigger_type = MPT3SAS_TRIGGER_MPI;
+ event_data.u.mpi.IOCStatus = ioc_status;
+ event_data.u.mpi.IocLogInfo = loginfo;
+ mpt3sas_send_trigger_data_event(ioc, &event_data);
+ out:
+ dTriggerDiagPrintk(ioc, pr_info(MPT3SAS_FMT "%s: exit\n", ioc->name,
+ __func__));
+}
diff --git a/drivers/scsi/mpt3sas/mpt3sas_trigger_diag.h b/drivers/scsi/mpt3sas/mpt3sas_trigger_diag.h
new file mode 100644
index 000000000..6586a463b
--- /dev/null
+++ b/drivers/scsi/mpt3sas/mpt3sas_trigger_diag.h
@@ -0,0 +1,194 @@
+/*
+ * This is the Fusion MPT base driver providing common API layer interface
+ * to set Diagnostic triggers for MPT (Message Passing Technology) based
+ * controllers
+ *
+ * This code is based on drivers/scsi/mpt3sas/mpt3sas_base.h
+ * Copyright (C) 2012-2014 LSI Corporation
+ * Copyright (C) 2013-2014 Avago Technologies
+ * (mailto: MPT-FusionLinux.pdl@avagotech.com)
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * NO WARRANTY
+ * THE PROGRAM IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OR
+ * CONDITIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED INCLUDING, WITHOUT
+ * LIMITATION, ANY WARRANTIES OR CONDITIONS OF TITLE, NON-INFRINGEMENT,
+ * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE. Each Recipient is
+ * solely responsible for determining the appropriateness of using and
+ * distributing the Program and assumes all risks associated with its
+ * exercise of rights under this Agreement, including but not limited to
+ * the risks and costs of program errors, damage to or loss of data,
+ * programs or equipment, and unavailability or interruption of operations.
+
+ * DISCLAIMER OF LIABILITY
+ * NEITHER RECIPIENT NOR ANY CONTRIBUTORS SHALL HAVE ANY LIABILITY FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING WITHOUT LIMITATION LOST PROFITS), HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR
+ * TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
+ * USE OR DISTRIBUTION OF THE PROGRAM OR THE EXERCISE OF ANY RIGHTS GRANTED
+ * HEREUNDER, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGES
+
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301,
+ * USA.
+ */
+ /* Diagnostic Trigger Configuration Data Structures */
+
+#ifndef MPT3SAS_TRIGGER_DIAG_H_INCLUDED
+#define MPT3SAS_TRIGGER_DIAG_H_INCLUDED
+
+/* limitation on number of entries */
+#define NUM_VALID_ENTRIES (20)
+
+/* trigger types */
+#define MPT3SAS_TRIGGER_MASTER (1)
+#define MPT3SAS_TRIGGER_EVENT (2)
+#define MPT3SAS_TRIGGER_SCSI (3)
+#define MPT3SAS_TRIGGER_MPI (4)
+
+/* trigger names */
+#define MASTER_TRIGGER_FILE_NAME "diag_trigger_master"
+#define EVENT_TRIGGERS_FILE_NAME "diag_trigger_event"
+#define SCSI_TRIGGERS_FILE_NAME "diag_trigger_scsi"
+#define MPI_TRIGGER_FILE_NAME "diag_trigger_mpi"
+
+/* master trigger bitmask */
+#define MASTER_TRIGGER_FW_FAULT (0x00000001)
+#define MASTER_TRIGGER_ADAPTER_RESET (0x00000002)
+#define MASTER_TRIGGER_TASK_MANAGMENT (0x00000004)
+#define MASTER_TRIGGER_DEVICE_REMOVAL (0x00000008)
+
+/* fake firmware event for tigger */
+#define MPI3_EVENT_DIAGNOSTIC_TRIGGER_FIRED (0x6E)
+
+/**
+ * MasterTrigger is a single U32 passed to/from sysfs.
+ *
+ * Bit Flags (enables) include:
+ * 1. FW Faults
+ * 2. Adapter Reset issued by driver
+ * 3. TMs
+ * 4. Device Remove Event sent by FW
+ */
+
+struct SL_WH_MASTER_TRIGGER_T {
+ uint32_t MasterData;
+};
+
+/**
+ * struct SL_WH_EVENT_TRIGGER_T - Definition of an event trigger element
+ * @EventValue: Event Code to trigger on
+ * @LogEntryQualifier: Type of FW event that logged (Log Entry Added Event only)
+ *
+ * Defines an event that should induce a DIAG_TRIGGER driver event if observed.
+ */
+struct SL_WH_EVENT_TRIGGER_T {
+ uint16_t EventValue;
+ uint16_t LogEntryQualifier;
+};
+
+/**
+ * struct SL_WH_EVENT_TRIGGERS_T - Structure passed to/from sysfs containing a
+ * list of Event Triggers to be monitored for.
+ * @ValidEntries: Number of _SL_WH_EVENT_TRIGGER_T structures contained in this
+ * structure.
+ * @EventTriggerEntry: List of Event trigger elements.
+ *
+ * This binary structure is transferred via sysfs to get/set Event Triggers
+ * in the Linux Driver.
+ */
+
+struct SL_WH_EVENT_TRIGGERS_T {
+ uint32_t ValidEntries;
+ struct SL_WH_EVENT_TRIGGER_T EventTriggerEntry[NUM_VALID_ENTRIES];
+};
+
+/**
+ * struct SL_WH_SCSI_TRIGGER_T - Definition of a SCSI trigger element
+ * @ASCQ: Additional Sense Code Qualifier. Can be specific or 0xFF for
+ * wildcard.
+ * @ASC: Additional Sense Code. Can be specific or 0xFF for wildcard
+ * @SenseKey: SCSI Sense Key
+ *
+ * Defines a sense key (single or many variants) that should induce a
+ * DIAG_TRIGGER driver event if observed.
+ */
+struct SL_WH_SCSI_TRIGGER_T {
+ U8 ASCQ;
+ U8 ASC;
+ U8 SenseKey;
+ U8 Reserved;
+};
+
+/**
+ * struct SL_WH_SCSI_TRIGGERS_T - Structure passed to/from sysfs containing a
+ * list of SCSI sense codes that should trigger a DIAG_SERVICE event when
+ * observed.
+ * @ValidEntries: Number of _SL_WH_SCSI_TRIGGER_T structures contained in this
+ * structure.
+ * @SCSITriggerEntry: List of SCSI Sense Code trigger elements.
+ *
+ * This binary structure is transferred via sysfs to get/set SCSI Sense Code
+ * Triggers in the Linux Driver.
+ */
+struct SL_WH_SCSI_TRIGGERS_T {
+ uint32_t ValidEntries;
+ struct SL_WH_SCSI_TRIGGER_T SCSITriggerEntry[NUM_VALID_ENTRIES];
+};
+
+/**
+ * struct SL_WH_MPI_TRIGGER_T - Definition of an MPI trigger element
+ * @IOCStatus: MPI IOCStatus
+ * @IocLogInfo: MPI IocLogInfo. Can be specific or 0xFFFFFFFF for wildcard
+ *
+ * Defines a MPI IOCStatus/IocLogInfo pair that should induce a DIAG_TRIGGER
+ * driver event if observed.
+ */
+struct SL_WH_MPI_TRIGGER_T {
+ uint16_t IOCStatus;
+ uint16_t Reserved;
+ uint32_t IocLogInfo;
+};
+
+/**
+ * struct SL_WH_MPI_TRIGGERS_T - Structure passed to/from sysfs containing a
+ * list of MPI IOCStatus/IocLogInfo pairs that should trigger a DIAG_SERVICE
+ * event when observed.
+ * @ValidEntries: Number of _SL_WH_MPI_TRIGGER_T structures contained in this
+ * structure.
+ * @MPITriggerEntry: List of MPI IOCStatus/IocLogInfo trigger elements.
+ *
+ * This binary structure is transferred via sysfs to get/set MPI Error Triggers
+ * in the Linux Driver.
+ */
+struct SL_WH_MPI_TRIGGERS_T {
+ uint32_t ValidEntries;
+ struct SL_WH_MPI_TRIGGER_T MPITriggerEntry[NUM_VALID_ENTRIES];
+};
+
+/**
+ * struct SL_WH_TRIGGERS_EVENT_DATA_T - event data for trigger
+ * @trigger_type: trigger type (see MPT3SAS_TRIGGER_XXXX)
+ * @u: trigger condition that caused trigger to be sent
+ */
+struct SL_WH_TRIGGERS_EVENT_DATA_T {
+ uint32_t trigger_type;
+ union {
+ struct SL_WH_MASTER_TRIGGER_T master;
+ struct SL_WH_EVENT_TRIGGER_T event;
+ struct SL_WH_SCSI_TRIGGER_T scsi;
+ struct SL_WH_MPI_TRIGGER_T mpi;
+ } u;
+};
+#endif /* MPT3SAS_TRIGGER_DIAG_H_INCLUDED */
diff --git a/drivers/scsi/mvme147.c b/drivers/scsi/mvme147.c
new file mode 100644
index 000000000..e7f6661a8
--- /dev/null
+++ b/drivers/scsi/mvme147.c
@@ -0,0 +1,166 @@
+#include <linux/types.h>
+#include <linux/mm.h>
+#include <linux/blkdev.h>
+#include <linux/interrupt.h>
+
+#include <asm/page.h>
+#include <asm/pgtable.h>
+#include <asm/mvme147hw.h>
+#include <asm/irq.h>
+
+#include "scsi.h"
+#include <scsi/scsi_host.h>
+#include "wd33c93.h"
+#include "mvme147.h"
+
+#include <linux/stat.h>
+
+
+static irqreturn_t mvme147_intr(int irq, void *data)
+{
+ struct Scsi_Host *instance = data;
+
+ if (irq == MVME147_IRQ_SCSI_PORT)
+ wd33c93_intr(instance);
+ else
+ m147_pcc->dma_intr = 0x89; /* Ack and enable ints */
+ return IRQ_HANDLED;
+}
+
+static int dma_setup(struct scsi_cmnd *cmd, int dir_in)
+{
+ struct Scsi_Host *instance = cmd->device->host;
+ struct WD33C93_hostdata *hdata = shost_priv(instance);
+ unsigned char flags = 0x01;
+ unsigned long addr = virt_to_bus(cmd->SCp.ptr);
+
+ /* setup dma direction */
+ if (!dir_in)
+ flags |= 0x04;
+
+ /* remember direction */
+ hdata->dma_dir = dir_in;
+
+ if (dir_in) {
+ /* invalidate any cache */
+ cache_clear(addr, cmd->SCp.this_residual);
+ } else {
+ /* push any dirty cache */
+ cache_push(addr, cmd->SCp.this_residual);
+ }
+
+ /* start DMA */
+ m147_pcc->dma_bcr = cmd->SCp.this_residual | (1 << 24);
+ m147_pcc->dma_dadr = addr;
+ m147_pcc->dma_cntrl = flags;
+
+ /* return success */
+ return 0;
+}
+
+static void dma_stop(struct Scsi_Host *instance, struct scsi_cmnd *SCpnt,
+ int status)
+{
+ m147_pcc->dma_cntrl = 0;
+}
+
+int mvme147_detect(struct scsi_host_template *tpnt)
+{
+ static unsigned char called = 0;
+ struct Scsi_Host *instance;
+ wd33c93_regs regs;
+ struct WD33C93_hostdata *hdata;
+
+ if (!MACH_IS_MVME147 || called)
+ return 0;
+ called++;
+
+ tpnt->proc_name = "MVME147";
+ tpnt->show_info = wd33c93_show_info,
+ tpnt->write_info = wd33c93_write_info,
+
+ instance = scsi_register(tpnt, sizeof(struct WD33C93_hostdata));
+ if (!instance)
+ goto err_out;
+
+ instance->base = 0xfffe4000;
+ instance->irq = MVME147_IRQ_SCSI_PORT;
+ regs.SASR = (volatile unsigned char *)0xfffe4000;
+ regs.SCMD = (volatile unsigned char *)0xfffe4001;
+ hdata = shost_priv(instance);
+ hdata->no_sync = 0xff;
+ hdata->fast = 0;
+ hdata->dma_mode = CTRL_DMA;
+ wd33c93_init(instance, regs, dma_setup, dma_stop, WD33C93_FS_8_10);
+
+ if (request_irq(MVME147_IRQ_SCSI_PORT, mvme147_intr, 0,
+ "MVME147 SCSI PORT", instance))
+ goto err_unregister;
+ if (request_irq(MVME147_IRQ_SCSI_DMA, mvme147_intr, 0,
+ "MVME147 SCSI DMA", instance))
+ goto err_free_irq;
+#if 0 /* Disabled; causes problems booting */
+ m147_pcc->scsi_interrupt = 0x10; /* Assert SCSI bus reset */
+ udelay(100);
+ m147_pcc->scsi_interrupt = 0x00; /* Negate SCSI bus reset */
+ udelay(2000);
+ m147_pcc->scsi_interrupt = 0x40; /* Clear bus reset interrupt */
+#endif
+ m147_pcc->scsi_interrupt = 0x09; /* Enable interrupt */
+
+ m147_pcc->dma_cntrl = 0x00; /* ensure DMA is stopped */
+ m147_pcc->dma_intr = 0x89; /* Ack and enable ints */
+
+ return 1;
+
+err_free_irq:
+ free_irq(MVME147_IRQ_SCSI_PORT, mvme147_intr);
+err_unregister:
+ scsi_unregister(instance);
+err_out:
+ return 0;
+}
+
+static int mvme147_bus_reset(struct scsi_cmnd *cmd)
+{
+ /* FIXME perform bus-specific reset */
+
+ /* FIXME 2: kill this function, and let midlayer fallback to
+ the same result, calling wd33c93_host_reset() */
+
+ spin_lock_irq(cmd->device->host->host_lock);
+ wd33c93_host_reset(cmd);
+ spin_unlock_irq(cmd->device->host->host_lock);
+
+ return SUCCESS;
+}
+
+
+static struct scsi_host_template driver_template = {
+ .proc_name = "MVME147",
+ .name = "MVME147 built-in SCSI",
+ .detect = mvme147_detect,
+ .release = mvme147_release,
+ .queuecommand = wd33c93_queuecommand,
+ .eh_abort_handler = wd33c93_abort,
+ .eh_bus_reset_handler = mvme147_bus_reset,
+ .eh_host_reset_handler = wd33c93_host_reset,
+ .can_queue = CAN_QUEUE,
+ .this_id = 7,
+ .sg_tablesize = SG_ALL,
+ .cmd_per_lun = CMD_PER_LUN,
+ .use_clustering = ENABLE_CLUSTERING
+};
+
+
+#include "scsi_module.c"
+
+int mvme147_release(struct Scsi_Host *instance)
+{
+#ifdef MODULE
+ /* XXX Make sure DMA is stopped! */
+ free_irq(MVME147_IRQ_SCSI_PORT, mvme147_intr);
+ free_irq(MVME147_IRQ_SCSI_DMA, mvme147_intr);
+#endif
+ return 1;
+}
diff --git a/drivers/scsi/mvme147.h b/drivers/scsi/mvme147.h
new file mode 100644
index 000000000..bfd4566ef
--- /dev/null
+++ b/drivers/scsi/mvme147.h
@@ -0,0 +1,24 @@
+#ifndef MVME147_H
+
+/* $Id: mvme147.h,v 1.4 1997/01/19 23:07:10 davem Exp $
+ *
+ * Header file for the MVME147 built-in SCSI controller for Linux
+ *
+ * Written and (C) 1993, Hamish Macdonald, see mvme147.c for more info
+ *
+ */
+
+#include <linux/types.h>
+
+int mvme147_detect(struct scsi_host_template *);
+int mvme147_release(struct Scsi_Host *);
+
+#ifndef CMD_PER_LUN
+#define CMD_PER_LUN 2
+#endif
+
+#ifndef CAN_QUEUE
+#define CAN_QUEUE 16
+#endif
+
+#endif /* MVME147_H */
diff --git a/drivers/scsi/mvme16x_scsi.c b/drivers/scsi/mvme16x_scsi.c
new file mode 100644
index 000000000..050c8c39d
--- /dev/null
+++ b/drivers/scsi/mvme16x_scsi.c
@@ -0,0 +1,159 @@
+/*
+ * Detection routine for the NCR53c710 based MVME16x SCSI Controllers for Linux.
+ *
+ * Based on work by Alan Hourihane
+ *
+ * Rewritten to use 53c700.c by Kars de Jong <jongk@linux-m68k.org>
+ */
+
+#include <linux/module.h>
+#include <linux/blkdev.h>
+#include <linux/device.h>
+#include <linux/platform_device.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/slab.h>
+#include <asm/mvme16xhw.h>
+#include <scsi/scsi_host.h>
+#include <scsi/scsi_device.h>
+#include <scsi/scsi_transport.h>
+#include <scsi/scsi_transport_spi.h>
+
+#include "53c700.h"
+
+MODULE_AUTHOR("Kars de Jong <jongk@linux-m68k.org>");
+MODULE_DESCRIPTION("MVME16x NCR53C710 driver");
+MODULE_LICENSE("GPL");
+
+static struct scsi_host_template mvme16x_scsi_driver_template = {
+ .name = "MVME16x NCR53c710 SCSI",
+ .proc_name = "MVME16x",
+ .this_id = 7,
+ .module = THIS_MODULE,
+};
+
+static struct platform_device *mvme16x_scsi_device;
+
+static int mvme16x_probe(struct platform_device *dev)
+{
+ struct Scsi_Host * host = NULL;
+ struct NCR_700_Host_Parameters *hostdata;
+
+ if (!MACH_IS_MVME16x)
+ goto out;
+
+ if (mvme16x_config & MVME16x_CONFIG_NO_SCSICHIP) {
+ printk(KERN_INFO "mvme16x-scsi: detection disabled, "
+ "SCSI chip not present\n");
+ goto out;
+ }
+
+ hostdata = kzalloc(sizeof(struct NCR_700_Host_Parameters), GFP_KERNEL);
+ if (hostdata == NULL) {
+ printk(KERN_ERR "mvme16x-scsi: "
+ "Failed to allocate host data\n");
+ goto out;
+ }
+
+ /* Fill in the required pieces of hostdata */
+ hostdata->base = (void __iomem *)0xfff47000UL;
+ hostdata->clock = 50; /* XXX - depends on the CPU clock! */
+ hostdata->chip710 = 1;
+ hostdata->dmode_extra = DMODE_FC2;
+ hostdata->dcntl_extra = EA_710;
+ hostdata->ctest7_extra = CTEST7_TT1;
+
+ /* and register the chip */
+ host = NCR_700_detect(&mvme16x_scsi_driver_template, hostdata,
+ &dev->dev);
+ if (!host) {
+ printk(KERN_ERR "mvme16x-scsi: No host detected; "
+ "board configuration problem?\n");
+ goto out_free;
+ }
+ host->this_id = 7;
+ host->base = 0xfff47000UL;
+ host->irq = MVME16x_IRQ_SCSI;
+ if (request_irq(host->irq, NCR_700_intr, 0, "mvme16x-scsi", host)) {
+ printk(KERN_ERR "mvme16x-scsi: request_irq failed\n");
+ goto out_put_host;
+ }
+
+ /* Enable scsi chip ints */
+ {
+ volatile unsigned long v;
+
+ /* Enable scsi interrupts at level 4 in PCCchip2 */
+ v = in_be32(0xfff4202c);
+ v = (v & ~0xff) | 0x10 | 4;
+ out_be32(0xfff4202c, v);
+ }
+
+ platform_set_drvdata(dev, host);
+ scsi_scan_host(host);
+
+ return 0;
+
+ out_put_host:
+ scsi_host_put(host);
+ out_free:
+ kfree(hostdata);
+ out:
+ return -ENODEV;
+}
+
+static int mvme16x_device_remove(struct platform_device *dev)
+{
+ struct Scsi_Host *host = platform_get_drvdata(dev);
+ struct NCR_700_Host_Parameters *hostdata = shost_priv(host);
+
+ /* Disable scsi chip ints */
+ {
+ volatile unsigned long v;
+
+ v = in_be32(0xfff4202c);
+ v &= ~0x10;
+ out_be32(0xfff4202c, v);
+ }
+ scsi_remove_host(host);
+ NCR_700_release(host);
+ kfree(hostdata);
+ free_irq(host->irq, host);
+
+ return 0;
+}
+
+static struct platform_driver mvme16x_scsi_driver = {
+ .driver = {
+ .name = "mvme16x-scsi",
+ },
+ .probe = mvme16x_probe,
+ .remove = mvme16x_device_remove,
+};
+
+static int __init mvme16x_scsi_init(void)
+{
+ int err;
+
+ err = platform_driver_register(&mvme16x_scsi_driver);
+ if (err)
+ return err;
+
+ mvme16x_scsi_device = platform_device_register_simple("mvme16x-scsi",
+ -1, NULL, 0);
+ if (IS_ERR(mvme16x_scsi_device)) {
+ platform_driver_unregister(&mvme16x_scsi_driver);
+ return PTR_ERR(mvme16x_scsi_device);
+ }
+
+ return 0;
+}
+
+static void __exit mvme16x_scsi_exit(void)
+{
+ platform_device_unregister(mvme16x_scsi_device);
+ platform_driver_unregister(&mvme16x_scsi_driver);
+}
+
+module_init(mvme16x_scsi_init);
+module_exit(mvme16x_scsi_exit);
diff --git a/drivers/scsi/mvsas/Kconfig b/drivers/scsi/mvsas/Kconfig
new file mode 100644
index 000000000..78f7e20a0
--- /dev/null
+++ b/drivers/scsi/mvsas/Kconfig
@@ -0,0 +1,50 @@
+#
+# Kernel configuration file for 88SE64XX/88SE94XX SAS/SATA driver.
+#
+# Copyright 2007 Red Hat, Inc.
+# Copyright 2008 Marvell. <kewei@marvell.com>
+# Copyright 2009-2011 Marvell. <yuxiangl@marvell.com>
+#
+# This file is licensed under GPLv2.
+#
+# This file is part of the 88SE64XX/88SE94XX driver.
+#
+# The 88SE64XX/88SE94XX driver is free software; you can redistribute
+# it and/or modify it under the terms of the GNU General Public License
+# as published by the Free Software Foundation; version 2 of the
+# License.
+#
+# The 88SE64XX/88SE94XX driver is distributed in the hope that it will be
+# useful, but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+# General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with 88SE64XX/88SE94XX Driver; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
+#
+#
+
+config SCSI_MVSAS
+ tristate "Marvell 88SE64XX/88SE94XX SAS/SATA support"
+ depends on PCI
+ select SCSI_SAS_LIBSAS
+ select FW_LOADER
+ help
+ This driver supports Marvell's SAS/SATA 3Gb/s PCI-E 88SE64XX and 6Gb/s
+ PCI-E 88SE94XX chip based host adapters.
+
+config SCSI_MVSAS_DEBUG
+ bool "Compile in debug mode"
+ default y
+ depends on SCSI_MVSAS
+ help
+ Compiles the 88SE64XX/88SE94XX driver in debug mode. In debug mode,
+ the driver prints some messages to the console.
+config SCSI_MVSAS_TASKLET
+ bool "Support for interrupt tasklet"
+ default n
+ depends on SCSI_MVSAS
+ help
+ Compiles the 88SE64xx/88SE94xx driver in interrupt tasklet mode.In this mode,
+ the interrupt will schedule a tasklet.
diff --git a/drivers/scsi/mvsas/Makefile b/drivers/scsi/mvsas/Makefile
new file mode 100644
index 000000000..87b231a5b
--- /dev/null
+++ b/drivers/scsi/mvsas/Makefile
@@ -0,0 +1,31 @@
+#
+# Makefile for Marvell 88SE64xx/88SE84xx SAS/SATA driver.
+#
+# Copyright 2007 Red Hat, Inc.
+# Copyright 2008 Marvell. <kewei@marvell.com>
+# Copyright 2009-2011 Marvell. <yuxiangl@marvell.com>
+#
+# This file is licensed under GPLv2.
+#
+# This program is free software; you can redistribute it and/or
+# modify it under the terms of the GNU General Public License as
+# published by the Free Software Foundation; version 2 of the
+# License.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+# General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307
+# USA
+
+ccflags-$(CONFIG_SCSI_MVSAS_DEBUG) := -DMV_DEBUG
+
+obj-$(CONFIG_SCSI_MVSAS) += mvsas.o
+mvsas-y += mv_init.o \
+ mv_sas.o \
+ mv_64xx.o \
+ mv_94xx.o
diff --git a/drivers/scsi/mvsas/mv_64xx.c b/drivers/scsi/mvsas/mv_64xx.c
new file mode 100644
index 000000000..8bb06995a
--- /dev/null
+++ b/drivers/scsi/mvsas/mv_64xx.c
@@ -0,0 +1,828 @@
+/*
+ * Marvell 88SE64xx hardware specific
+ *
+ * Copyright 2007 Red Hat, Inc.
+ * Copyright 2008 Marvell. <kewei@marvell.com>
+ * Copyright 2009-2011 Marvell. <yuxiangl@marvell.com>
+ *
+ * This file is licensed under GPLv2.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; version 2 of the
+ * License.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307
+ * USA
+*/
+
+#include "mv_sas.h"
+#include "mv_64xx.h"
+#include "mv_chips.h"
+
+static void mvs_64xx_detect_porttype(struct mvs_info *mvi, int i)
+{
+ void __iomem *regs = mvi->regs;
+ u32 reg;
+ struct mvs_phy *phy = &mvi->phy[i];
+
+ reg = mr32(MVS_GBL_PORT_TYPE);
+ phy->phy_type &= ~(PORT_TYPE_SAS | PORT_TYPE_SATA);
+ if (reg & MODE_SAS_SATA & (1 << i))
+ phy->phy_type |= PORT_TYPE_SAS;
+ else
+ phy->phy_type |= PORT_TYPE_SATA;
+}
+
+static void mvs_64xx_enable_xmt(struct mvs_info *mvi, int phy_id)
+{
+ void __iomem *regs = mvi->regs;
+ u32 tmp;
+
+ tmp = mr32(MVS_PCS);
+ if (mvi->chip->n_phy <= MVS_SOC_PORTS)
+ tmp |= 1 << (phy_id + PCS_EN_PORT_XMT_SHIFT);
+ else
+ tmp |= 1 << (phy_id + PCS_EN_PORT_XMT_SHIFT2);
+ mw32(MVS_PCS, tmp);
+}
+
+static void mvs_64xx_phy_hacks(struct mvs_info *mvi)
+{
+ void __iomem *regs = mvi->regs;
+ int i;
+
+ mvs_phy_hacks(mvi);
+
+ if (!(mvi->flags & MVF_FLAG_SOC)) {
+ for (i = 0; i < MVS_SOC_PORTS; i++) {
+ mvs_write_port_vsr_addr(mvi, i, VSR_PHY_MODE8);
+ mvs_write_port_vsr_data(mvi, i, 0x2F0);
+ }
+ } else {
+ /* disable auto port detection */
+ mw32(MVS_GBL_PORT_TYPE, 0);
+ for (i = 0; i < mvi->chip->n_phy; i++) {
+ mvs_write_port_vsr_addr(mvi, i, VSR_PHY_MODE7);
+ mvs_write_port_vsr_data(mvi, i, 0x90000000);
+ mvs_write_port_vsr_addr(mvi, i, VSR_PHY_MODE9);
+ mvs_write_port_vsr_data(mvi, i, 0x50f2);
+ mvs_write_port_vsr_addr(mvi, i, VSR_PHY_MODE11);
+ mvs_write_port_vsr_data(mvi, i, 0x0e);
+ }
+ }
+}
+
+static void mvs_64xx_stp_reset(struct mvs_info *mvi, u32 phy_id)
+{
+ void __iomem *regs = mvi->regs;
+ u32 reg, tmp;
+
+ if (!(mvi->flags & MVF_FLAG_SOC)) {
+ if (phy_id < MVS_SOC_PORTS)
+ pci_read_config_dword(mvi->pdev, PCR_PHY_CTL, &reg);
+ else
+ pci_read_config_dword(mvi->pdev, PCR_PHY_CTL2, &reg);
+
+ } else
+ reg = mr32(MVS_PHY_CTL);
+
+ tmp = reg;
+ if (phy_id < MVS_SOC_PORTS)
+ tmp |= (1U << phy_id) << PCTL_LINK_OFFS;
+ else
+ tmp |= (1U << (phy_id - MVS_SOC_PORTS)) << PCTL_LINK_OFFS;
+
+ if (!(mvi->flags & MVF_FLAG_SOC)) {
+ if (phy_id < MVS_SOC_PORTS) {
+ pci_write_config_dword(mvi->pdev, PCR_PHY_CTL, tmp);
+ mdelay(10);
+ pci_write_config_dword(mvi->pdev, PCR_PHY_CTL, reg);
+ } else {
+ pci_write_config_dword(mvi->pdev, PCR_PHY_CTL2, tmp);
+ mdelay(10);
+ pci_write_config_dword(mvi->pdev, PCR_PHY_CTL2, reg);
+ }
+ } else {
+ mw32(MVS_PHY_CTL, tmp);
+ mdelay(10);
+ mw32(MVS_PHY_CTL, reg);
+ }
+}
+
+static void mvs_64xx_phy_reset(struct mvs_info *mvi, u32 phy_id, int hard)
+{
+ u32 tmp;
+ tmp = mvs_read_port_irq_stat(mvi, phy_id);
+ tmp &= ~PHYEV_RDY_CH;
+ mvs_write_port_irq_stat(mvi, phy_id, tmp);
+ tmp = mvs_read_phy_ctl(mvi, phy_id);
+ if (hard == MVS_HARD_RESET)
+ tmp |= PHY_RST_HARD;
+ else if (hard == MVS_SOFT_RESET)
+ tmp |= PHY_RST;
+ mvs_write_phy_ctl(mvi, phy_id, tmp);
+ if (hard) {
+ do {
+ tmp = mvs_read_phy_ctl(mvi, phy_id);
+ } while (tmp & PHY_RST_HARD);
+ }
+}
+
+void mvs_64xx_clear_srs_irq(struct mvs_info *mvi, u8 reg_set, u8 clear_all)
+{
+ void __iomem *regs = mvi->regs;
+ u32 tmp;
+ if (clear_all) {
+ tmp = mr32(MVS_INT_STAT_SRS_0);
+ if (tmp) {
+ printk(KERN_DEBUG "check SRS 0 %08X.\n", tmp);
+ mw32(MVS_INT_STAT_SRS_0, tmp);
+ }
+ } else {
+ tmp = mr32(MVS_INT_STAT_SRS_0);
+ if (tmp & (1 << (reg_set % 32))) {
+ printk(KERN_DEBUG "register set 0x%x was stopped.\n",
+ reg_set);
+ mw32(MVS_INT_STAT_SRS_0, 1 << (reg_set % 32));
+ }
+ }
+}
+
+static int mvs_64xx_chip_reset(struct mvs_info *mvi)
+{
+ void __iomem *regs = mvi->regs;
+ u32 tmp;
+ int i;
+
+ /* make sure interrupts are masked immediately (paranoia) */
+ mw32(MVS_GBL_CTL, 0);
+ tmp = mr32(MVS_GBL_CTL);
+
+ /* Reset Controller */
+ if (!(tmp & HBA_RST)) {
+ if (mvi->flags & MVF_PHY_PWR_FIX) {
+ pci_read_config_dword(mvi->pdev, PCR_PHY_CTL, &tmp);
+ tmp &= ~PCTL_PWR_OFF;
+ tmp |= PCTL_PHY_DSBL;
+ pci_write_config_dword(mvi->pdev, PCR_PHY_CTL, tmp);
+
+ pci_read_config_dword(mvi->pdev, PCR_PHY_CTL2, &tmp);
+ tmp &= ~PCTL_PWR_OFF;
+ tmp |= PCTL_PHY_DSBL;
+ pci_write_config_dword(mvi->pdev, PCR_PHY_CTL2, tmp);
+ }
+ }
+
+ /* make sure interrupts are masked immediately (paranoia) */
+ mw32(MVS_GBL_CTL, 0);
+ tmp = mr32(MVS_GBL_CTL);
+
+ /* Reset Controller */
+ if (!(tmp & HBA_RST)) {
+ /* global reset, incl. COMRESET/H_RESET_N (self-clearing) */
+ mw32_f(MVS_GBL_CTL, HBA_RST);
+ }
+
+ /* wait for reset to finish; timeout is just a guess */
+ i = 1000;
+ while (i-- > 0) {
+ msleep(10);
+
+ if (!(mr32(MVS_GBL_CTL) & HBA_RST))
+ break;
+ }
+ if (mr32(MVS_GBL_CTL) & HBA_RST) {
+ dev_printk(KERN_ERR, mvi->dev, "HBA reset failed\n");
+ return -EBUSY;
+ }
+ return 0;
+}
+
+static void mvs_64xx_phy_disable(struct mvs_info *mvi, u32 phy_id)
+{
+ void __iomem *regs = mvi->regs;
+ u32 tmp;
+ if (!(mvi->flags & MVF_FLAG_SOC)) {
+ u32 offs;
+ if (phy_id < 4)
+ offs = PCR_PHY_CTL;
+ else {
+ offs = PCR_PHY_CTL2;
+ phy_id -= 4;
+ }
+ pci_read_config_dword(mvi->pdev, offs, &tmp);
+ tmp |= 1U << (PCTL_PHY_DSBL_OFFS + phy_id);
+ pci_write_config_dword(mvi->pdev, offs, tmp);
+ } else {
+ tmp = mr32(MVS_PHY_CTL);
+ tmp |= 1U << (PCTL_PHY_DSBL_OFFS + phy_id);
+ mw32(MVS_PHY_CTL, tmp);
+ }
+}
+
+static void mvs_64xx_phy_enable(struct mvs_info *mvi, u32 phy_id)
+{
+ void __iomem *regs = mvi->regs;
+ u32 tmp;
+ if (!(mvi->flags & MVF_FLAG_SOC)) {
+ u32 offs;
+ if (phy_id < 4)
+ offs = PCR_PHY_CTL;
+ else {
+ offs = PCR_PHY_CTL2;
+ phy_id -= 4;
+ }
+ pci_read_config_dword(mvi->pdev, offs, &tmp);
+ tmp &= ~(1U << (PCTL_PHY_DSBL_OFFS + phy_id));
+ pci_write_config_dword(mvi->pdev, offs, tmp);
+ } else {
+ tmp = mr32(MVS_PHY_CTL);
+ tmp &= ~(1U << (PCTL_PHY_DSBL_OFFS + phy_id));
+ mw32(MVS_PHY_CTL, tmp);
+ }
+}
+
+static int mvs_64xx_init(struct mvs_info *mvi)
+{
+ void __iomem *regs = mvi->regs;
+ int i;
+ u32 tmp, cctl;
+
+ if (mvi->pdev && mvi->pdev->revision == 0)
+ mvi->flags |= MVF_PHY_PWR_FIX;
+ if (!(mvi->flags & MVF_FLAG_SOC)) {
+ mvs_show_pcie_usage(mvi);
+ tmp = mvs_64xx_chip_reset(mvi);
+ if (tmp)
+ return tmp;
+ } else {
+ tmp = mr32(MVS_PHY_CTL);
+ tmp &= ~PCTL_PWR_OFF;
+ tmp |= PCTL_PHY_DSBL;
+ mw32(MVS_PHY_CTL, tmp);
+ }
+
+ /* Init Chip */
+ /* make sure RST is set; HBA_RST /should/ have done that for us */
+ cctl = mr32(MVS_CTL) & 0xFFFF;
+ if (cctl & CCTL_RST)
+ cctl &= ~CCTL_RST;
+ else
+ mw32_f(MVS_CTL, cctl | CCTL_RST);
+
+ if (!(mvi->flags & MVF_FLAG_SOC)) {
+ /* write to device control _AND_ device status register */
+ pci_read_config_dword(mvi->pdev, PCR_DEV_CTRL, &tmp);
+ tmp &= ~PRD_REQ_MASK;
+ tmp |= PRD_REQ_SIZE;
+ pci_write_config_dword(mvi->pdev, PCR_DEV_CTRL, tmp);
+
+ pci_read_config_dword(mvi->pdev, PCR_PHY_CTL, &tmp);
+ tmp &= ~PCTL_PWR_OFF;
+ tmp &= ~PCTL_PHY_DSBL;
+ pci_write_config_dword(mvi->pdev, PCR_PHY_CTL, tmp);
+
+ pci_read_config_dword(mvi->pdev, PCR_PHY_CTL2, &tmp);
+ tmp &= PCTL_PWR_OFF;
+ tmp &= ~PCTL_PHY_DSBL;
+ pci_write_config_dword(mvi->pdev, PCR_PHY_CTL2, tmp);
+ } else {
+ tmp = mr32(MVS_PHY_CTL);
+ tmp &= ~PCTL_PWR_OFF;
+ tmp |= PCTL_COM_ON;
+ tmp &= ~PCTL_PHY_DSBL;
+ tmp |= PCTL_LINK_RST;
+ mw32(MVS_PHY_CTL, tmp);
+ msleep(100);
+ tmp &= ~PCTL_LINK_RST;
+ mw32(MVS_PHY_CTL, tmp);
+ msleep(100);
+ }
+
+ /* reset control */
+ mw32(MVS_PCS, 0); /* MVS_PCS */
+ /* init phys */
+ mvs_64xx_phy_hacks(mvi);
+
+ tmp = mvs_cr32(mvi, CMD_PHY_MODE_21);
+ tmp &= 0x0000ffff;
+ tmp |= 0x00fa0000;
+ mvs_cw32(mvi, CMD_PHY_MODE_21, tmp);
+
+ /* enable auto port detection */
+ mw32(MVS_GBL_PORT_TYPE, MODE_AUTO_DET_EN);
+
+ mw32(MVS_CMD_LIST_LO, mvi->slot_dma);
+ mw32(MVS_CMD_LIST_HI, (mvi->slot_dma >> 16) >> 16);
+
+ mw32(MVS_RX_FIS_LO, mvi->rx_fis_dma);
+ mw32(MVS_RX_FIS_HI, (mvi->rx_fis_dma >> 16) >> 16);
+
+ mw32(MVS_TX_CFG, MVS_CHIP_SLOT_SZ);
+ mw32(MVS_TX_LO, mvi->tx_dma);
+ mw32(MVS_TX_HI, (mvi->tx_dma >> 16) >> 16);
+
+ mw32(MVS_RX_CFG, MVS_RX_RING_SZ);
+ mw32(MVS_RX_LO, mvi->rx_dma);
+ mw32(MVS_RX_HI, (mvi->rx_dma >> 16) >> 16);
+
+ for (i = 0; i < mvi->chip->n_phy; i++) {
+ /* set phy local SAS address */
+ /* should set little endian SAS address to 64xx chip */
+ mvs_set_sas_addr(mvi, i, PHYR_ADDR_LO, PHYR_ADDR_HI,
+ cpu_to_be64(mvi->phy[i].dev_sas_addr));
+
+ mvs_64xx_enable_xmt(mvi, i);
+
+ mvs_64xx_phy_reset(mvi, i, MVS_HARD_RESET);
+ msleep(500);
+ mvs_64xx_detect_porttype(mvi, i);
+ }
+ if (mvi->flags & MVF_FLAG_SOC) {
+ /* set select registers */
+ writel(0x0E008000, regs + 0x000);
+ writel(0x59000008, regs + 0x004);
+ writel(0x20, regs + 0x008);
+ writel(0x20, regs + 0x00c);
+ writel(0x20, regs + 0x010);
+ writel(0x20, regs + 0x014);
+ writel(0x20, regs + 0x018);
+ writel(0x20, regs + 0x01c);
+ }
+ for (i = 0; i < mvi->chip->n_phy; i++) {
+ /* clear phy int status */
+ tmp = mvs_read_port_irq_stat(mvi, i);
+ tmp &= ~PHYEV_SIG_FIS;
+ mvs_write_port_irq_stat(mvi, i, tmp);
+
+ /* set phy int mask */
+ tmp = PHYEV_RDY_CH | PHYEV_BROAD_CH | PHYEV_UNASSOC_FIS |
+ PHYEV_ID_DONE | PHYEV_DCDR_ERR | PHYEV_CRC_ERR |
+ PHYEV_DEC_ERR;
+ mvs_write_port_irq_mask(mvi, i, tmp);
+
+ msleep(100);
+ mvs_update_phyinfo(mvi, i, 1);
+ }
+
+ /* little endian for open address and command table, etc. */
+ cctl = mr32(MVS_CTL);
+ cctl |= CCTL_ENDIAN_CMD;
+ cctl |= CCTL_ENDIAN_DATA;
+ cctl &= ~CCTL_ENDIAN_OPEN;
+ cctl |= CCTL_ENDIAN_RSP;
+ mw32_f(MVS_CTL, cctl);
+
+ /* reset CMD queue */
+ tmp = mr32(MVS_PCS);
+ tmp |= PCS_CMD_RST;
+ tmp &= ~PCS_SELF_CLEAR;
+ mw32(MVS_PCS, tmp);
+ /*
+ * the max count is 0x1ff, while our max slot is 0x200,
+ * it will make count 0.
+ */
+ tmp = 0;
+ if (MVS_CHIP_SLOT_SZ > 0x1ff)
+ mw32(MVS_INT_COAL, 0x1ff | COAL_EN);
+ else
+ mw32(MVS_INT_COAL, MVS_CHIP_SLOT_SZ | COAL_EN);
+
+ tmp = 0x10000 | interrupt_coalescing;
+ mw32(MVS_INT_COAL_TMOUT, tmp);
+
+ /* ladies and gentlemen, start your engines */
+ mw32(MVS_TX_CFG, 0);
+ mw32(MVS_TX_CFG, MVS_CHIP_SLOT_SZ | TX_EN);
+ mw32(MVS_RX_CFG, MVS_RX_RING_SZ | RX_EN);
+ /* enable CMD/CMPL_Q/RESP mode */
+ mw32(MVS_PCS, PCS_SATA_RETRY | PCS_FIS_RX_EN |
+ PCS_CMD_EN | PCS_CMD_STOP_ERR);
+
+ /* enable completion queue interrupt */
+ tmp = (CINT_PORT_MASK | CINT_DONE | CINT_MEM | CINT_SRS | CINT_CI_STOP |
+ CINT_DMA_PCIE);
+
+ mw32(MVS_INT_MASK, tmp);
+
+ /* Enable SRS interrupt */
+ mw32(MVS_INT_MASK_SRS_0, 0xFFFF);
+
+ return 0;
+}
+
+static int mvs_64xx_ioremap(struct mvs_info *mvi)
+{
+ if (!mvs_ioremap(mvi, 4, 2))
+ return 0;
+ return -1;
+}
+
+static void mvs_64xx_iounmap(struct mvs_info *mvi)
+{
+ mvs_iounmap(mvi->regs);
+ mvs_iounmap(mvi->regs_ex);
+}
+
+static void mvs_64xx_interrupt_enable(struct mvs_info *mvi)
+{
+ void __iomem *regs = mvi->regs;
+ u32 tmp;
+
+ tmp = mr32(MVS_GBL_CTL);
+ mw32(MVS_GBL_CTL, tmp | INT_EN);
+}
+
+static void mvs_64xx_interrupt_disable(struct mvs_info *mvi)
+{
+ void __iomem *regs = mvi->regs;
+ u32 tmp;
+
+ tmp = mr32(MVS_GBL_CTL);
+ mw32(MVS_GBL_CTL, tmp & ~INT_EN);
+}
+
+static u32 mvs_64xx_isr_status(struct mvs_info *mvi, int irq)
+{
+ void __iomem *regs = mvi->regs;
+ u32 stat;
+
+ if (!(mvi->flags & MVF_FLAG_SOC)) {
+ stat = mr32(MVS_GBL_INT_STAT);
+
+ if (stat == 0 || stat == 0xffffffff)
+ return 0;
+ } else
+ stat = 1;
+ return stat;
+}
+
+static irqreturn_t mvs_64xx_isr(struct mvs_info *mvi, int irq, u32 stat)
+{
+ void __iomem *regs = mvi->regs;
+
+ /* clear CMD_CMPLT ASAP */
+ mw32_f(MVS_INT_STAT, CINT_DONE);
+
+ spin_lock(&mvi->lock);
+ mvs_int_full(mvi);
+ spin_unlock(&mvi->lock);
+
+ return IRQ_HANDLED;
+}
+
+static void mvs_64xx_command_active(struct mvs_info *mvi, u32 slot_idx)
+{
+ u32 tmp;
+ mvs_cw32(mvi, 0x40 + (slot_idx >> 3), 1 << (slot_idx % 32));
+ mvs_cw32(mvi, 0x00 + (slot_idx >> 3), 1 << (slot_idx % 32));
+ do {
+ tmp = mvs_cr32(mvi, 0x00 + (slot_idx >> 3));
+ } while (tmp & 1 << (slot_idx % 32));
+ do {
+ tmp = mvs_cr32(mvi, 0x40 + (slot_idx >> 3));
+ } while (tmp & 1 << (slot_idx % 32));
+}
+
+static void mvs_64xx_issue_stop(struct mvs_info *mvi, enum mvs_port_type type,
+ u32 tfs)
+{
+ void __iomem *regs = mvi->regs;
+ u32 tmp;
+
+ if (type == PORT_TYPE_SATA) {
+ tmp = mr32(MVS_INT_STAT_SRS_0) | (1U << tfs);
+ mw32(MVS_INT_STAT_SRS_0, tmp);
+ }
+ mw32(MVS_INT_STAT, CINT_CI_STOP);
+ tmp = mr32(MVS_PCS) | 0xFF00;
+ mw32(MVS_PCS, tmp);
+}
+
+static void mvs_64xx_free_reg_set(struct mvs_info *mvi, u8 *tfs)
+{
+ void __iomem *regs = mvi->regs;
+ u32 tmp, offs;
+
+ if (*tfs == MVS_ID_NOT_MAPPED)
+ return;
+
+ offs = 1U << ((*tfs & 0x0f) + PCS_EN_SATA_REG_SHIFT);
+ if (*tfs < 16) {
+ tmp = mr32(MVS_PCS);
+ mw32(MVS_PCS, tmp & ~offs);
+ } else {
+ tmp = mr32(MVS_CTL);
+ mw32(MVS_CTL, tmp & ~offs);
+ }
+
+ tmp = mr32(MVS_INT_STAT_SRS_0) & (1U << *tfs);
+ if (tmp)
+ mw32(MVS_INT_STAT_SRS_0, tmp);
+
+ *tfs = MVS_ID_NOT_MAPPED;
+ return;
+}
+
+static u8 mvs_64xx_assign_reg_set(struct mvs_info *mvi, u8 *tfs)
+{
+ int i;
+ u32 tmp, offs;
+ void __iomem *regs = mvi->regs;
+
+ if (*tfs != MVS_ID_NOT_MAPPED)
+ return 0;
+
+ tmp = mr32(MVS_PCS);
+
+ for (i = 0; i < mvi->chip->srs_sz; i++) {
+ if (i == 16)
+ tmp = mr32(MVS_CTL);
+ offs = 1U << ((i & 0x0f) + PCS_EN_SATA_REG_SHIFT);
+ if (!(tmp & offs)) {
+ *tfs = i;
+
+ if (i < 16)
+ mw32(MVS_PCS, tmp | offs);
+ else
+ mw32(MVS_CTL, tmp | offs);
+ tmp = mr32(MVS_INT_STAT_SRS_0) & (1U << i);
+ if (tmp)
+ mw32(MVS_INT_STAT_SRS_0, tmp);
+ return 0;
+ }
+ }
+ return MVS_ID_NOT_MAPPED;
+}
+
+void mvs_64xx_make_prd(struct scatterlist *scatter, int nr, void *prd)
+{
+ int i;
+ struct scatterlist *sg;
+ struct mvs_prd *buf_prd = prd;
+ for_each_sg(scatter, sg, nr, i) {
+ buf_prd->addr = cpu_to_le64(sg_dma_address(sg));
+ buf_prd->len = cpu_to_le32(sg_dma_len(sg));
+ buf_prd++;
+ }
+}
+
+static int mvs_64xx_oob_done(struct mvs_info *mvi, int i)
+{
+ u32 phy_st;
+ mvs_write_port_cfg_addr(mvi, i,
+ PHYR_PHY_STAT);
+ phy_st = mvs_read_port_cfg_data(mvi, i);
+ if (phy_st & PHY_OOB_DTCTD)
+ return 1;
+ return 0;
+}
+
+static void mvs_64xx_fix_phy_info(struct mvs_info *mvi, int i,
+ struct sas_identify_frame *id)
+
+{
+ struct mvs_phy *phy = &mvi->phy[i];
+ struct asd_sas_phy *sas_phy = &phy->sas_phy;
+
+ sas_phy->linkrate =
+ (phy->phy_status & PHY_NEG_SPP_PHYS_LINK_RATE_MASK) >>
+ PHY_NEG_SPP_PHYS_LINK_RATE_MASK_OFFSET;
+
+ phy->minimum_linkrate =
+ (phy->phy_status &
+ PHY_MIN_SPP_PHYS_LINK_RATE_MASK) >> 8;
+ phy->maximum_linkrate =
+ (phy->phy_status &
+ PHY_MAX_SPP_PHYS_LINK_RATE_MASK) >> 12;
+
+ mvs_write_port_cfg_addr(mvi, i, PHYR_IDENTIFY);
+ phy->dev_info = mvs_read_port_cfg_data(mvi, i);
+
+ mvs_write_port_cfg_addr(mvi, i, PHYR_ATT_DEV_INFO);
+ phy->att_dev_info = mvs_read_port_cfg_data(mvi, i);
+
+ mvs_write_port_cfg_addr(mvi, i, PHYR_ATT_ADDR_HI);
+ phy->att_dev_sas_addr =
+ (u64) mvs_read_port_cfg_data(mvi, i) << 32;
+ mvs_write_port_cfg_addr(mvi, i, PHYR_ATT_ADDR_LO);
+ phy->att_dev_sas_addr |= mvs_read_port_cfg_data(mvi, i);
+ phy->att_dev_sas_addr = SAS_ADDR(&phy->att_dev_sas_addr);
+}
+
+static void mvs_64xx_phy_work_around(struct mvs_info *mvi, int i)
+{
+ u32 tmp;
+ struct mvs_phy *phy = &mvi->phy[i];
+ mvs_write_port_vsr_addr(mvi, i, VSR_PHY_MODE6);
+ tmp = mvs_read_port_vsr_data(mvi, i);
+ if (((phy->phy_status & PHY_NEG_SPP_PHYS_LINK_RATE_MASK) >>
+ PHY_NEG_SPP_PHYS_LINK_RATE_MASK_OFFSET) ==
+ SAS_LINK_RATE_1_5_GBPS)
+ tmp &= ~PHY_MODE6_LATECLK;
+ else
+ tmp |= PHY_MODE6_LATECLK;
+ mvs_write_port_vsr_data(mvi, i, tmp);
+}
+
+void mvs_64xx_phy_set_link_rate(struct mvs_info *mvi, u32 phy_id,
+ struct sas_phy_linkrates *rates)
+{
+ u32 lrmin = 0, lrmax = 0;
+ u32 tmp;
+
+ tmp = mvs_read_phy_ctl(mvi, phy_id);
+ lrmin = (rates->minimum_linkrate << 8);
+ lrmax = (rates->maximum_linkrate << 12);
+
+ if (lrmin) {
+ tmp &= ~(0xf << 8);
+ tmp |= lrmin;
+ }
+ if (lrmax) {
+ tmp &= ~(0xf << 12);
+ tmp |= lrmax;
+ }
+ mvs_write_phy_ctl(mvi, phy_id, tmp);
+ mvs_64xx_phy_reset(mvi, phy_id, MVS_HARD_RESET);
+}
+
+static void mvs_64xx_clear_active_cmds(struct mvs_info *mvi)
+{
+ u32 tmp;
+ void __iomem *regs = mvi->regs;
+ tmp = mr32(MVS_PCS);
+ mw32(MVS_PCS, tmp & 0xFFFF);
+ mw32(MVS_PCS, tmp);
+ tmp = mr32(MVS_CTL);
+ mw32(MVS_CTL, tmp & 0xFFFF);
+ mw32(MVS_CTL, tmp);
+}
+
+
+u32 mvs_64xx_spi_read_data(struct mvs_info *mvi)
+{
+ void __iomem *regs = mvi->regs_ex;
+ return ior32(SPI_DATA_REG_64XX);
+}
+
+void mvs_64xx_spi_write_data(struct mvs_info *mvi, u32 data)
+{
+ void __iomem *regs = mvi->regs_ex;
+ iow32(SPI_DATA_REG_64XX, data);
+}
+
+
+int mvs_64xx_spi_buildcmd(struct mvs_info *mvi,
+ u32 *dwCmd,
+ u8 cmd,
+ u8 read,
+ u8 length,
+ u32 addr
+ )
+{
+ u32 dwTmp;
+
+ dwTmp = ((u32)cmd << 24) | ((u32)length << 19);
+ if (read)
+ dwTmp |= 1U<<23;
+
+ if (addr != MV_MAX_U32) {
+ dwTmp |= 1U<<22;
+ dwTmp |= (addr & 0x0003FFFF);
+ }
+
+ *dwCmd = dwTmp;
+ return 0;
+}
+
+
+int mvs_64xx_spi_issuecmd(struct mvs_info *mvi, u32 cmd)
+{
+ void __iomem *regs = mvi->regs_ex;
+ int retry;
+
+ for (retry = 0; retry < 1; retry++) {
+ iow32(SPI_CTRL_REG_64XX, SPI_CTRL_VENDOR_ENABLE);
+ iow32(SPI_CMD_REG_64XX, cmd);
+ iow32(SPI_CTRL_REG_64XX,
+ SPI_CTRL_VENDOR_ENABLE | SPI_CTRL_SPISTART);
+ }
+
+ return 0;
+}
+
+int mvs_64xx_spi_waitdataready(struct mvs_info *mvi, u32 timeout)
+{
+ void __iomem *regs = mvi->regs_ex;
+ u32 i, dwTmp;
+
+ for (i = 0; i < timeout; i++) {
+ dwTmp = ior32(SPI_CTRL_REG_64XX);
+ if (!(dwTmp & SPI_CTRL_SPISTART))
+ return 0;
+ msleep(10);
+ }
+
+ return -1;
+}
+
+void mvs_64xx_fix_dma(struct mvs_info *mvi, u32 phy_mask,
+ int buf_len, int from, void *prd)
+{
+ int i;
+ struct mvs_prd *buf_prd = prd;
+ dma_addr_t buf_dma = mvi->bulk_buffer_dma;
+
+ buf_prd += from;
+ for (i = 0; i < MAX_SG_ENTRY - from; i++) {
+ buf_prd->addr = cpu_to_le64(buf_dma);
+ buf_prd->len = cpu_to_le32(buf_len);
+ ++buf_prd;
+ }
+}
+
+static void mvs_64xx_tune_interrupt(struct mvs_info *mvi, u32 time)
+{
+ void __iomem *regs = mvi->regs;
+ u32 tmp = 0;
+ /*
+ * the max count is 0x1ff, while our max slot is 0x200,
+ * it will make count 0.
+ */
+ if (time == 0) {
+ mw32(MVS_INT_COAL, 0);
+ mw32(MVS_INT_COAL_TMOUT, 0x10000);
+ } else {
+ if (MVS_CHIP_SLOT_SZ > 0x1ff)
+ mw32(MVS_INT_COAL, 0x1ff|COAL_EN);
+ else
+ mw32(MVS_INT_COAL, MVS_CHIP_SLOT_SZ|COAL_EN);
+
+ tmp = 0x10000 | time;
+ mw32(MVS_INT_COAL_TMOUT, tmp);
+ }
+}
+
+const struct mvs_dispatch mvs_64xx_dispatch = {
+ "mv64xx",
+ mvs_64xx_init,
+ NULL,
+ mvs_64xx_ioremap,
+ mvs_64xx_iounmap,
+ mvs_64xx_isr,
+ mvs_64xx_isr_status,
+ mvs_64xx_interrupt_enable,
+ mvs_64xx_interrupt_disable,
+ mvs_read_phy_ctl,
+ mvs_write_phy_ctl,
+ mvs_read_port_cfg_data,
+ mvs_write_port_cfg_data,
+ mvs_write_port_cfg_addr,
+ mvs_read_port_vsr_data,
+ mvs_write_port_vsr_data,
+ mvs_write_port_vsr_addr,
+ mvs_read_port_irq_stat,
+ mvs_write_port_irq_stat,
+ mvs_read_port_irq_mask,
+ mvs_write_port_irq_mask,
+ mvs_64xx_command_active,
+ mvs_64xx_clear_srs_irq,
+ mvs_64xx_issue_stop,
+ mvs_start_delivery,
+ mvs_rx_update,
+ mvs_int_full,
+ mvs_64xx_assign_reg_set,
+ mvs_64xx_free_reg_set,
+ mvs_get_prd_size,
+ mvs_get_prd_count,
+ mvs_64xx_make_prd,
+ mvs_64xx_detect_porttype,
+ mvs_64xx_oob_done,
+ mvs_64xx_fix_phy_info,
+ mvs_64xx_phy_work_around,
+ mvs_64xx_phy_set_link_rate,
+ mvs_hw_max_link_rate,
+ mvs_64xx_phy_disable,
+ mvs_64xx_phy_enable,
+ mvs_64xx_phy_reset,
+ mvs_64xx_stp_reset,
+ mvs_64xx_clear_active_cmds,
+ mvs_64xx_spi_read_data,
+ mvs_64xx_spi_write_data,
+ mvs_64xx_spi_buildcmd,
+ mvs_64xx_spi_issuecmd,
+ mvs_64xx_spi_waitdataready,
+ mvs_64xx_fix_dma,
+ mvs_64xx_tune_interrupt,
+ NULL,
+};
+
diff --git a/drivers/scsi/mvsas/mv_64xx.h b/drivers/scsi/mvsas/mv_64xx.h
new file mode 100644
index 000000000..545889bd9
--- /dev/null
+++ b/drivers/scsi/mvsas/mv_64xx.h
@@ -0,0 +1,152 @@
+/*
+ * Marvell 88SE64xx hardware specific head file
+ *
+ * Copyright 2007 Red Hat, Inc.
+ * Copyright 2008 Marvell. <kewei@marvell.com>
+ * Copyright 2009-2011 Marvell. <yuxiangl@marvell.com>
+ *
+ * This file is licensed under GPLv2.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; version 2 of the
+ * License.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307
+ * USA
+*/
+
+#ifndef _MVS64XX_REG_H_
+#define _MVS64XX_REG_H_
+
+#include <linux/types.h>
+
+#define MAX_LINK_RATE SAS_LINK_RATE_3_0_GBPS
+
+/* enhanced mode registers (BAR4) */
+enum hw_registers {
+ MVS_GBL_CTL = 0x04, /* global control */
+ MVS_GBL_INT_STAT = 0x08, /* global irq status */
+ MVS_GBL_PI = 0x0C, /* ports implemented bitmask */
+
+ MVS_PHY_CTL = 0x40, /* SOC PHY Control */
+ MVS_PORTS_IMP = 0x9C, /* SOC Port Implemented */
+
+ MVS_GBL_PORT_TYPE = 0xa0, /* port type */
+
+ MVS_CTL = 0x100, /* SAS/SATA port configuration */
+ MVS_PCS = 0x104, /* SAS/SATA port control/status */
+ MVS_CMD_LIST_LO = 0x108, /* cmd list addr */
+ MVS_CMD_LIST_HI = 0x10C,
+ MVS_RX_FIS_LO = 0x110, /* RX FIS list addr */
+ MVS_RX_FIS_HI = 0x114,
+
+ MVS_TX_CFG = 0x120, /* TX configuration */
+ MVS_TX_LO = 0x124, /* TX (delivery) ring addr */
+ MVS_TX_HI = 0x128,
+
+ MVS_TX_PROD_IDX = 0x12C, /* TX producer pointer */
+ MVS_TX_CONS_IDX = 0x130, /* TX consumer pointer (RO) */
+ MVS_RX_CFG = 0x134, /* RX configuration */
+ MVS_RX_LO = 0x138, /* RX (completion) ring addr */
+ MVS_RX_HI = 0x13C,
+ MVS_RX_CONS_IDX = 0x140, /* RX consumer pointer (RO) */
+
+ MVS_INT_COAL = 0x148, /* Int coalescing config */
+ MVS_INT_COAL_TMOUT = 0x14C, /* Int coalescing timeout */
+ MVS_INT_STAT = 0x150, /* Central int status */
+ MVS_INT_MASK = 0x154, /* Central int enable */
+ MVS_INT_STAT_SRS_0 = 0x158, /* SATA register set status */
+ MVS_INT_MASK_SRS_0 = 0x15C,
+
+ /* ports 1-3 follow after this */
+ MVS_P0_INT_STAT = 0x160, /* port0 interrupt status */
+ MVS_P0_INT_MASK = 0x164, /* port0 interrupt mask */
+ /* ports 5-7 follow after this */
+ MVS_P4_INT_STAT = 0x200, /* Port4 interrupt status */
+ MVS_P4_INT_MASK = 0x204, /* Port4 interrupt enable mask */
+
+ /* ports 1-3 follow after this */
+ MVS_P0_SER_CTLSTAT = 0x180, /* port0 serial control/status */
+ /* ports 5-7 follow after this */
+ MVS_P4_SER_CTLSTAT = 0x220, /* port4 serial control/status */
+
+ MVS_CMD_ADDR = 0x1B8, /* Command register port (addr) */
+ MVS_CMD_DATA = 0x1BC, /* Command register port (data) */
+
+ /* ports 1-3 follow after this */
+ MVS_P0_CFG_ADDR = 0x1C0, /* port0 phy register address */
+ MVS_P0_CFG_DATA = 0x1C4, /* port0 phy register data */
+ /* ports 5-7 follow after this */
+ MVS_P4_CFG_ADDR = 0x230, /* Port4 config address */
+ MVS_P4_CFG_DATA = 0x234, /* Port4 config data */
+
+ /* ports 1-3 follow after this */
+ MVS_P0_VSR_ADDR = 0x1E0, /* port0 VSR address */
+ MVS_P0_VSR_DATA = 0x1E4, /* port0 VSR data */
+ /* ports 5-7 follow after this */
+ MVS_P4_VSR_ADDR = 0x250, /* port4 VSR addr */
+ MVS_P4_VSR_DATA = 0x254, /* port4 VSR data */
+};
+
+enum pci_cfg_registers {
+ PCR_PHY_CTL = 0x40,
+ PCR_PHY_CTL2 = 0x90,
+ PCR_DEV_CTRL = 0xE8,
+ PCR_LINK_STAT = 0xF2,
+};
+
+/* SAS/SATA Vendor Specific Port Registers */
+enum sas_sata_vsp_regs {
+ VSR_PHY_STAT = 0x00, /* Phy Status */
+ VSR_PHY_MODE1 = 0x01, /* phy tx */
+ VSR_PHY_MODE2 = 0x02, /* tx scc */
+ VSR_PHY_MODE3 = 0x03, /* pll */
+ VSR_PHY_MODE4 = 0x04, /* VCO */
+ VSR_PHY_MODE5 = 0x05, /* Rx */
+ VSR_PHY_MODE6 = 0x06, /* CDR */
+ VSR_PHY_MODE7 = 0x07, /* Impedance */
+ VSR_PHY_MODE8 = 0x08, /* Voltage */
+ VSR_PHY_MODE9 = 0x09, /* Test */
+ VSR_PHY_MODE10 = 0x0A, /* Power */
+ VSR_PHY_MODE11 = 0x0B, /* Phy Mode */
+ VSR_PHY_VS0 = 0x0C, /* Vednor Specific 0 */
+ VSR_PHY_VS1 = 0x0D, /* Vednor Specific 1 */
+};
+
+enum chip_register_bits {
+ PHY_MIN_SPP_PHYS_LINK_RATE_MASK = (0xF << 8),
+ PHY_MAX_SPP_PHYS_LINK_RATE_MASK = (0xF << 12),
+ PHY_NEG_SPP_PHYS_LINK_RATE_MASK_OFFSET = (16),
+ PHY_NEG_SPP_PHYS_LINK_RATE_MASK =
+ (0xF << PHY_NEG_SPP_PHYS_LINK_RATE_MASK_OFFSET),
+};
+
+#define MAX_SG_ENTRY 64
+
+struct mvs_prd {
+ __le64 addr; /* 64-bit buffer address */
+ __le32 reserved;
+ __le32 len; /* 16-bit length */
+};
+
+#define SPI_CTRL_REG 0xc0
+#define SPI_CTRL_VENDOR_ENABLE (1U<<29)
+#define SPI_CTRL_SPIRDY (1U<<22)
+#define SPI_CTRL_SPISTART (1U<<20)
+
+#define SPI_CMD_REG 0xc4
+#define SPI_DATA_REG 0xc8
+
+#define SPI_CTRL_REG_64XX 0x10
+#define SPI_CMD_REG_64XX 0x14
+#define SPI_DATA_REG_64XX 0x18
+
+#endif
diff --git a/drivers/scsi/mvsas/mv_94xx.c b/drivers/scsi/mvsas/mv_94xx.c
new file mode 100644
index 000000000..9270d15ff
--- /dev/null
+++ b/drivers/scsi/mvsas/mv_94xx.c
@@ -0,0 +1,1061 @@
+/*
+ * Marvell 88SE94xx hardware specific
+ *
+ * Copyright 2007 Red Hat, Inc.
+ * Copyright 2008 Marvell. <kewei@marvell.com>
+ * Copyright 2009-2011 Marvell. <yuxiangl@marvell.com>
+ *
+ * This file is licensed under GPLv2.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; version 2 of the
+ * License.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307
+ * USA
+*/
+
+#include "mv_sas.h"
+#include "mv_94xx.h"
+#include "mv_chips.h"
+
+static void mvs_94xx_detect_porttype(struct mvs_info *mvi, int i)
+{
+ u32 reg;
+ struct mvs_phy *phy = &mvi->phy[i];
+ u32 phy_status;
+
+ mvs_write_port_vsr_addr(mvi, i, VSR_PHY_MODE3);
+ reg = mvs_read_port_vsr_data(mvi, i);
+ phy_status = ((reg & 0x3f0000) >> 16) & 0xff;
+ phy->phy_type &= ~(PORT_TYPE_SAS | PORT_TYPE_SATA);
+ switch (phy_status) {
+ case 0x10:
+ phy->phy_type |= PORT_TYPE_SAS;
+ break;
+ case 0x1d:
+ default:
+ phy->phy_type |= PORT_TYPE_SATA;
+ break;
+ }
+}
+
+void set_phy_tuning(struct mvs_info *mvi, int phy_id,
+ struct phy_tuning phy_tuning)
+{
+ u32 tmp, setting_0 = 0, setting_1 = 0;
+ u8 i;
+
+ /* Remap information for B0 chip:
+ *
+ * R0Ch -> R118h[15:0] (Adapted DFE F3 - F5 coefficient)
+ * R0Dh -> R118h[31:16] (Generation 1 Setting 0)
+ * R0Eh -> R11Ch[15:0] (Generation 1 Setting 1)
+ * R0Fh -> R11Ch[31:16] (Generation 2 Setting 0)
+ * R10h -> R120h[15:0] (Generation 2 Setting 1)
+ * R11h -> R120h[31:16] (Generation 3 Setting 0)
+ * R12h -> R124h[15:0] (Generation 3 Setting 1)
+ * R13h -> R124h[31:16] (Generation 4 Setting 0 (Reserved))
+ */
+
+ /* A0 has a different set of registers */
+ if (mvi->pdev->revision == VANIR_A0_REV)
+ return;
+
+ for (i = 0; i < 3; i++) {
+ /* loop 3 times, set Gen 1, Gen 2, Gen 3 */
+ switch (i) {
+ case 0:
+ setting_0 = GENERATION_1_SETTING;
+ setting_1 = GENERATION_1_2_SETTING;
+ break;
+ case 1:
+ setting_0 = GENERATION_1_2_SETTING;
+ setting_1 = GENERATION_2_3_SETTING;
+ break;
+ case 2:
+ setting_0 = GENERATION_2_3_SETTING;
+ setting_1 = GENERATION_3_4_SETTING;
+ break;
+ }
+
+ /* Set:
+ *
+ * Transmitter Emphasis Enable
+ * Transmitter Emphasis Amplitude
+ * Transmitter Amplitude
+ */
+ mvs_write_port_vsr_addr(mvi, phy_id, setting_0);
+ tmp = mvs_read_port_vsr_data(mvi, phy_id);
+ tmp &= ~(0xFBE << 16);
+ tmp |= (((phy_tuning.trans_emp_en << 11) |
+ (phy_tuning.trans_emp_amp << 7) |
+ (phy_tuning.trans_amp << 1)) << 16);
+ mvs_write_port_vsr_data(mvi, phy_id, tmp);
+
+ /* Set Transmitter Amplitude Adjust */
+ mvs_write_port_vsr_addr(mvi, phy_id, setting_1);
+ tmp = mvs_read_port_vsr_data(mvi, phy_id);
+ tmp &= ~(0xC000);
+ tmp |= (phy_tuning.trans_amp_adj << 14);
+ mvs_write_port_vsr_data(mvi, phy_id, tmp);
+ }
+}
+
+void set_phy_ffe_tuning(struct mvs_info *mvi, int phy_id,
+ struct ffe_control ffe)
+{
+ u32 tmp;
+
+ /* Don't run this if A0/B0 */
+ if ((mvi->pdev->revision == VANIR_A0_REV)
+ || (mvi->pdev->revision == VANIR_B0_REV))
+ return;
+
+ /* FFE Resistor and Capacitor */
+ /* R10Ch DFE Resolution Control/Squelch and FFE Setting
+ *
+ * FFE_FORCE [7]
+ * FFE_RES_SEL [6:4]
+ * FFE_CAP_SEL [3:0]
+ */
+ mvs_write_port_vsr_addr(mvi, phy_id, VSR_PHY_FFE_CONTROL);
+ tmp = mvs_read_port_vsr_data(mvi, phy_id);
+ tmp &= ~0xFF;
+
+ /* Read from HBA_Info_Page */
+ tmp |= ((0x1 << 7) |
+ (ffe.ffe_rss_sel << 4) |
+ (ffe.ffe_cap_sel << 0));
+
+ mvs_write_port_vsr_data(mvi, phy_id, tmp);
+
+ /* R064h PHY Mode Register 1
+ *
+ * DFE_DIS 18
+ */
+ mvs_write_port_vsr_addr(mvi, phy_id, VSR_REF_CLOCK_CRTL);
+ tmp = mvs_read_port_vsr_data(mvi, phy_id);
+ tmp &= ~0x40001;
+ /* Hard coding */
+ /* No defines in HBA_Info_Page */
+ tmp |= (0 << 18);
+ mvs_write_port_vsr_data(mvi, phy_id, tmp);
+
+ /* R110h DFE F0-F1 Coefficient Control/DFE Update Control
+ *
+ * DFE_UPDATE_EN [11:6]
+ * DFE_FX_FORCE [5:0]
+ */
+ mvs_write_port_vsr_addr(mvi, phy_id, VSR_PHY_DFE_UPDATE_CRTL);
+ tmp = mvs_read_port_vsr_data(mvi, phy_id);
+ tmp &= ~0xFFF;
+ /* Hard coding */
+ /* No defines in HBA_Info_Page */
+ tmp |= ((0x3F << 6) | (0x0 << 0));
+ mvs_write_port_vsr_data(mvi, phy_id, tmp);
+
+ /* R1A0h Interface and Digital Reference Clock Control/Reserved_50h
+ *
+ * FFE_TRAIN_EN 3
+ */
+ mvs_write_port_vsr_addr(mvi, phy_id, VSR_REF_CLOCK_CRTL);
+ tmp = mvs_read_port_vsr_data(mvi, phy_id);
+ tmp &= ~0x8;
+ /* Hard coding */
+ /* No defines in HBA_Info_Page */
+ tmp |= (0 << 3);
+ mvs_write_port_vsr_data(mvi, phy_id, tmp);
+}
+
+/*Notice: this function must be called when phy is disabled*/
+void set_phy_rate(struct mvs_info *mvi, int phy_id, u8 rate)
+{
+ union reg_phy_cfg phy_cfg, phy_cfg_tmp;
+ mvs_write_port_vsr_addr(mvi, phy_id, VSR_PHY_MODE2);
+ phy_cfg_tmp.v = mvs_read_port_vsr_data(mvi, phy_id);
+ phy_cfg.v = 0;
+ phy_cfg.u.disable_phy = phy_cfg_tmp.u.disable_phy;
+ phy_cfg.u.sas_support = 1;
+ phy_cfg.u.sata_support = 1;
+ phy_cfg.u.sata_host_mode = 1;
+
+ switch (rate) {
+ case 0x0:
+ /* support 1.5 Gbps */
+ phy_cfg.u.speed_support = 1;
+ phy_cfg.u.snw_3_support = 0;
+ phy_cfg.u.tx_lnk_parity = 1;
+ phy_cfg.u.tx_spt_phs_lnk_rate = 0x30;
+ break;
+ case 0x1:
+
+ /* support 1.5, 3.0 Gbps */
+ phy_cfg.u.speed_support = 3;
+ phy_cfg.u.tx_spt_phs_lnk_rate = 0x3c;
+ phy_cfg.u.tx_lgcl_lnk_rate = 0x08;
+ break;
+ case 0x2:
+ default:
+ /* support 1.5, 3.0, 6.0 Gbps */
+ phy_cfg.u.speed_support = 7;
+ phy_cfg.u.snw_3_support = 1;
+ phy_cfg.u.tx_lnk_parity = 1;
+ phy_cfg.u.tx_spt_phs_lnk_rate = 0x3f;
+ phy_cfg.u.tx_lgcl_lnk_rate = 0x09;
+ break;
+ }
+ mvs_write_port_vsr_data(mvi, phy_id, phy_cfg.v);
+}
+
+static void mvs_94xx_config_reg_from_hba(struct mvs_info *mvi, int phy_id)
+{
+ u32 temp;
+ temp = (u32)(*(u32 *)&mvi->hba_info_param.phy_tuning[phy_id]);
+ if (temp == 0xFFFFFFFFL) {
+ mvi->hba_info_param.phy_tuning[phy_id].trans_emp_amp = 0x6;
+ mvi->hba_info_param.phy_tuning[phy_id].trans_amp = 0x1A;
+ mvi->hba_info_param.phy_tuning[phy_id].trans_amp_adj = 0x3;
+ }
+
+ temp = (u8)(*(u8 *)&mvi->hba_info_param.ffe_ctl[phy_id]);
+ if (temp == 0xFFL) {
+ switch (mvi->pdev->revision) {
+ case VANIR_A0_REV:
+ case VANIR_B0_REV:
+ mvi->hba_info_param.ffe_ctl[phy_id].ffe_rss_sel = 0x7;
+ mvi->hba_info_param.ffe_ctl[phy_id].ffe_cap_sel = 0x7;
+ break;
+ case VANIR_C0_REV:
+ case VANIR_C1_REV:
+ case VANIR_C2_REV:
+ default:
+ mvi->hba_info_param.ffe_ctl[phy_id].ffe_rss_sel = 0x7;
+ mvi->hba_info_param.ffe_ctl[phy_id].ffe_cap_sel = 0xC;
+ break;
+ }
+ }
+
+ temp = (u8)(*(u8 *)&mvi->hba_info_param.phy_rate[phy_id]);
+ if (temp == 0xFFL)
+ /*set default phy_rate = 6Gbps*/
+ mvi->hba_info_param.phy_rate[phy_id] = 0x2;
+
+ set_phy_tuning(mvi, phy_id,
+ mvi->hba_info_param.phy_tuning[phy_id]);
+ set_phy_ffe_tuning(mvi, phy_id,
+ mvi->hba_info_param.ffe_ctl[phy_id]);
+ set_phy_rate(mvi, phy_id,
+ mvi->hba_info_param.phy_rate[phy_id]);
+}
+
+static void mvs_94xx_enable_xmt(struct mvs_info *mvi, int phy_id)
+{
+ void __iomem *regs = mvi->regs;
+ u32 tmp;
+
+ tmp = mr32(MVS_PCS);
+ tmp |= 1 << (phy_id + PCS_EN_PORT_XMT_SHIFT2);
+ mw32(MVS_PCS, tmp);
+}
+
+static void mvs_94xx_phy_reset(struct mvs_info *mvi, u32 phy_id, int hard)
+{
+ u32 tmp;
+ u32 delay = 5000;
+ if (hard == MVS_PHY_TUNE) {
+ mvs_write_port_cfg_addr(mvi, phy_id, PHYR_SATA_CTL);
+ tmp = mvs_read_port_cfg_data(mvi, phy_id);
+ mvs_write_port_cfg_data(mvi, phy_id, tmp|0x20000000);
+ mvs_write_port_cfg_data(mvi, phy_id, tmp|0x100000);
+ return;
+ }
+ tmp = mvs_read_port_irq_stat(mvi, phy_id);
+ tmp &= ~PHYEV_RDY_CH;
+ mvs_write_port_irq_stat(mvi, phy_id, tmp);
+ if (hard) {
+ tmp = mvs_read_phy_ctl(mvi, phy_id);
+ tmp |= PHY_RST_HARD;
+ mvs_write_phy_ctl(mvi, phy_id, tmp);
+ do {
+ tmp = mvs_read_phy_ctl(mvi, phy_id);
+ udelay(10);
+ delay--;
+ } while ((tmp & PHY_RST_HARD) && delay);
+ if (!delay)
+ mv_dprintk("phy hard reset failed.\n");
+ } else {
+ tmp = mvs_read_phy_ctl(mvi, phy_id);
+ tmp |= PHY_RST;
+ mvs_write_phy_ctl(mvi, phy_id, tmp);
+ }
+}
+
+static void mvs_94xx_phy_disable(struct mvs_info *mvi, u32 phy_id)
+{
+ u32 tmp;
+ mvs_write_port_vsr_addr(mvi, phy_id, VSR_PHY_MODE2);
+ tmp = mvs_read_port_vsr_data(mvi, phy_id);
+ mvs_write_port_vsr_data(mvi, phy_id, tmp | 0x00800000);
+}
+
+static void mvs_94xx_phy_enable(struct mvs_info *mvi, u32 phy_id)
+{
+ u32 tmp;
+ u8 revision = 0;
+
+ revision = mvi->pdev->revision;
+ if (revision == VANIR_A0_REV) {
+ mvs_write_port_vsr_addr(mvi, phy_id, CMD_HOST_RD_DATA);
+ mvs_write_port_vsr_data(mvi, phy_id, 0x8300ffc1);
+ }
+ if (revision == VANIR_B0_REV) {
+ mvs_write_port_vsr_addr(mvi, phy_id, CMD_APP_MEM_CTL);
+ mvs_write_port_vsr_data(mvi, phy_id, 0x08001006);
+ mvs_write_port_vsr_addr(mvi, phy_id, CMD_HOST_RD_DATA);
+ mvs_write_port_vsr_data(mvi, phy_id, 0x0000705f);
+ }
+
+ mvs_write_port_vsr_addr(mvi, phy_id, VSR_PHY_MODE2);
+ tmp = mvs_read_port_vsr_data(mvi, phy_id);
+ tmp |= bit(0);
+ mvs_write_port_vsr_data(mvi, phy_id, tmp & 0xfd7fffff);
+}
+
+static int mvs_94xx_init(struct mvs_info *mvi)
+{
+ void __iomem *regs = mvi->regs;
+ int i;
+ u32 tmp, cctl;
+ u8 revision;
+
+ revision = mvi->pdev->revision;
+ mvs_show_pcie_usage(mvi);
+ if (mvi->flags & MVF_FLAG_SOC) {
+ tmp = mr32(MVS_PHY_CTL);
+ tmp &= ~PCTL_PWR_OFF;
+ tmp |= PCTL_PHY_DSBL;
+ mw32(MVS_PHY_CTL, tmp);
+ }
+
+ /* Init Chip */
+ /* make sure RST is set; HBA_RST /should/ have done that for us */
+ cctl = mr32(MVS_CTL) & 0xFFFF;
+ if (cctl & CCTL_RST)
+ cctl &= ~CCTL_RST;
+ else
+ mw32_f(MVS_CTL, cctl | CCTL_RST);
+
+ if (mvi->flags & MVF_FLAG_SOC) {
+ tmp = mr32(MVS_PHY_CTL);
+ tmp &= ~PCTL_PWR_OFF;
+ tmp |= PCTL_COM_ON;
+ tmp &= ~PCTL_PHY_DSBL;
+ tmp |= PCTL_LINK_RST;
+ mw32(MVS_PHY_CTL, tmp);
+ msleep(100);
+ tmp &= ~PCTL_LINK_RST;
+ mw32(MVS_PHY_CTL, tmp);
+ msleep(100);
+ }
+
+ /* disable Multiplexing, enable phy implemented */
+ mw32(MVS_PORTS_IMP, 0xFF);
+
+ if (revision == VANIR_A0_REV) {
+ mw32(MVS_PA_VSR_ADDR, CMD_CMWK_OOB_DET);
+ mw32(MVS_PA_VSR_PORT, 0x00018080);
+ }
+ mw32(MVS_PA_VSR_ADDR, VSR_PHY_MODE2);
+ if (revision == VANIR_A0_REV || revision == VANIR_B0_REV)
+ /* set 6G/3G/1.5G, multiplexing, without SSC */
+ mw32(MVS_PA_VSR_PORT, 0x0084d4fe);
+ else
+ /* set 6G/3G/1.5G, multiplexing, with and without SSC */
+ mw32(MVS_PA_VSR_PORT, 0x0084fffe);
+
+ if (revision == VANIR_B0_REV) {
+ mw32(MVS_PA_VSR_ADDR, CMD_APP_MEM_CTL);
+ mw32(MVS_PA_VSR_PORT, 0x08001006);
+ mw32(MVS_PA_VSR_ADDR, CMD_HOST_RD_DATA);
+ mw32(MVS_PA_VSR_PORT, 0x0000705f);
+ }
+
+ /* reset control */
+ mw32(MVS_PCS, 0); /* MVS_PCS */
+ mw32(MVS_STP_REG_SET_0, 0);
+ mw32(MVS_STP_REG_SET_1, 0);
+
+ /* init phys */
+ mvs_phy_hacks(mvi);
+
+ /* disable non data frame retry */
+ tmp = mvs_cr32(mvi, CMD_SAS_CTL1);
+ if ((revision == VANIR_A0_REV) ||
+ (revision == VANIR_B0_REV) ||
+ (revision == VANIR_C0_REV)) {
+ tmp &= ~0xffff;
+ tmp |= 0x007f;
+ mvs_cw32(mvi, CMD_SAS_CTL1, tmp);
+ }
+
+ /* set LED blink when IO*/
+ mw32(MVS_PA_VSR_ADDR, VSR_PHY_ACT_LED);
+ tmp = mr32(MVS_PA_VSR_PORT);
+ tmp &= 0xFFFF00FF;
+ tmp |= 0x00003300;
+ mw32(MVS_PA_VSR_PORT, tmp);
+
+ mw32(MVS_CMD_LIST_LO, mvi->slot_dma);
+ mw32(MVS_CMD_LIST_HI, (mvi->slot_dma >> 16) >> 16);
+
+ mw32(MVS_RX_FIS_LO, mvi->rx_fis_dma);
+ mw32(MVS_RX_FIS_HI, (mvi->rx_fis_dma >> 16) >> 16);
+
+ mw32(MVS_TX_CFG, MVS_CHIP_SLOT_SZ);
+ mw32(MVS_TX_LO, mvi->tx_dma);
+ mw32(MVS_TX_HI, (mvi->tx_dma >> 16) >> 16);
+
+ mw32(MVS_RX_CFG, MVS_RX_RING_SZ);
+ mw32(MVS_RX_LO, mvi->rx_dma);
+ mw32(MVS_RX_HI, (mvi->rx_dma >> 16) >> 16);
+
+ for (i = 0; i < mvi->chip->n_phy; i++) {
+ mvs_94xx_phy_disable(mvi, i);
+ /* set phy local SAS address */
+ mvs_set_sas_addr(mvi, i, CONFIG_ID_FRAME3, CONFIG_ID_FRAME4,
+ cpu_to_le64(mvi->phy[i].dev_sas_addr));
+
+ mvs_94xx_enable_xmt(mvi, i);
+ mvs_94xx_config_reg_from_hba(mvi, i);
+ mvs_94xx_phy_enable(mvi, i);
+
+ mvs_94xx_phy_reset(mvi, i, PHY_RST_HARD);
+ msleep(500);
+ mvs_94xx_detect_porttype(mvi, i);
+ }
+
+ if (mvi->flags & MVF_FLAG_SOC) {
+ /* set select registers */
+ writel(0x0E008000, regs + 0x000);
+ writel(0x59000008, regs + 0x004);
+ writel(0x20, regs + 0x008);
+ writel(0x20, regs + 0x00c);
+ writel(0x20, regs + 0x010);
+ writel(0x20, regs + 0x014);
+ writel(0x20, regs + 0x018);
+ writel(0x20, regs + 0x01c);
+ }
+ for (i = 0; i < mvi->chip->n_phy; i++) {
+ /* clear phy int status */
+ tmp = mvs_read_port_irq_stat(mvi, i);
+ tmp &= ~PHYEV_SIG_FIS;
+ mvs_write_port_irq_stat(mvi, i, tmp);
+
+ /* set phy int mask */
+ tmp = PHYEV_RDY_CH | PHYEV_BROAD_CH |
+ PHYEV_ID_DONE | PHYEV_DCDR_ERR | PHYEV_CRC_ERR ;
+ mvs_write_port_irq_mask(mvi, i, tmp);
+
+ msleep(100);
+ mvs_update_phyinfo(mvi, i, 1);
+ }
+
+ /* little endian for open address and command table, etc. */
+ cctl = mr32(MVS_CTL);
+ cctl |= CCTL_ENDIAN_CMD;
+ cctl &= ~CCTL_ENDIAN_OPEN;
+ cctl |= CCTL_ENDIAN_RSP;
+ mw32_f(MVS_CTL, cctl);
+
+ /* reset CMD queue */
+ tmp = mr32(MVS_PCS);
+ tmp |= PCS_CMD_RST;
+ tmp &= ~PCS_SELF_CLEAR;
+ mw32(MVS_PCS, tmp);
+ /*
+ * the max count is 0x1ff, while our max slot is 0x200,
+ * it will make count 0.
+ */
+ tmp = 0;
+ if (MVS_CHIP_SLOT_SZ > 0x1ff)
+ mw32(MVS_INT_COAL, 0x1ff | COAL_EN);
+ else
+ mw32(MVS_INT_COAL, MVS_CHIP_SLOT_SZ | COAL_EN);
+
+ /* default interrupt coalescing time is 128us */
+ tmp = 0x10000 | interrupt_coalescing;
+ mw32(MVS_INT_COAL_TMOUT, tmp);
+
+ /* ladies and gentlemen, start your engines */
+ mw32(MVS_TX_CFG, 0);
+ mw32(MVS_TX_CFG, MVS_CHIP_SLOT_SZ | TX_EN);
+ mw32(MVS_RX_CFG, MVS_RX_RING_SZ | RX_EN);
+ /* enable CMD/CMPL_Q/RESP mode */
+ mw32(MVS_PCS, PCS_SATA_RETRY_2 | PCS_FIS_RX_EN |
+ PCS_CMD_EN | PCS_CMD_STOP_ERR);
+
+ /* enable completion queue interrupt */
+ tmp = (CINT_PORT_MASK | CINT_DONE | CINT_MEM | CINT_SRS | CINT_CI_STOP |
+ CINT_DMA_PCIE | CINT_NON_SPEC_NCQ_ERROR);
+ tmp |= CINT_PHY_MASK;
+ mw32(MVS_INT_MASK, tmp);
+
+ tmp = mvs_cr32(mvi, CMD_LINK_TIMER);
+ tmp |= 0xFFFF0000;
+ mvs_cw32(mvi, CMD_LINK_TIMER, tmp);
+
+ /* tune STP performance */
+ tmp = 0x003F003F;
+ mvs_cw32(mvi, CMD_PL_TIMER, tmp);
+
+ /* This can improve expander large block size seq write performance */
+ tmp = mvs_cr32(mvi, CMD_PORT_LAYER_TIMER1);
+ tmp |= 0xFFFF007F;
+ mvs_cw32(mvi, CMD_PORT_LAYER_TIMER1, tmp);
+
+ /* change the connection open-close behavior (bit 9)
+ * set bit8 to 1 for performance tuning */
+ tmp = mvs_cr32(mvi, CMD_SL_MODE0);
+ tmp |= 0x00000300;
+ /* set bit0 to 0 to enable retry for no_dest reject case */
+ tmp &= 0xFFFFFFFE;
+ mvs_cw32(mvi, CMD_SL_MODE0, tmp);
+
+ /* Enable SRS interrupt */
+ mw32(MVS_INT_MASK_SRS_0, 0xFFFF);
+
+ return 0;
+}
+
+static int mvs_94xx_ioremap(struct mvs_info *mvi)
+{
+ if (!mvs_ioremap(mvi, 2, -1)) {
+ mvi->regs_ex = mvi->regs + 0x10200;
+ mvi->regs += 0x20000;
+ if (mvi->id == 1)
+ mvi->regs += 0x4000;
+ return 0;
+ }
+ return -1;
+}
+
+static void mvs_94xx_iounmap(struct mvs_info *mvi)
+{
+ if (mvi->regs) {
+ mvi->regs -= 0x20000;
+ if (mvi->id == 1)
+ mvi->regs -= 0x4000;
+ mvs_iounmap(mvi->regs);
+ }
+}
+
+static void mvs_94xx_interrupt_enable(struct mvs_info *mvi)
+{
+ void __iomem *regs = mvi->regs_ex;
+ u32 tmp;
+
+ tmp = mr32(MVS_GBL_CTL);
+ tmp |= (MVS_IRQ_SAS_A | MVS_IRQ_SAS_B);
+ mw32(MVS_GBL_INT_STAT, tmp);
+ writel(tmp, regs + 0x0C);
+ writel(tmp, regs + 0x10);
+ writel(tmp, regs + 0x14);
+ writel(tmp, regs + 0x18);
+ mw32(MVS_GBL_CTL, tmp);
+}
+
+static void mvs_94xx_interrupt_disable(struct mvs_info *mvi)
+{
+ void __iomem *regs = mvi->regs_ex;
+ u32 tmp;
+
+ tmp = mr32(MVS_GBL_CTL);
+
+ tmp &= ~(MVS_IRQ_SAS_A | MVS_IRQ_SAS_B);
+ mw32(MVS_GBL_INT_STAT, tmp);
+ writel(tmp, regs + 0x0C);
+ writel(tmp, regs + 0x10);
+ writel(tmp, regs + 0x14);
+ writel(tmp, regs + 0x18);
+ mw32(MVS_GBL_CTL, tmp);
+}
+
+static u32 mvs_94xx_isr_status(struct mvs_info *mvi, int irq)
+{
+ void __iomem *regs = mvi->regs_ex;
+ u32 stat = 0;
+ if (!(mvi->flags & MVF_FLAG_SOC)) {
+ stat = mr32(MVS_GBL_INT_STAT);
+
+ if (!(stat & (MVS_IRQ_SAS_A | MVS_IRQ_SAS_B)))
+ return 0;
+ }
+ return stat;
+}
+
+static irqreturn_t mvs_94xx_isr(struct mvs_info *mvi, int irq, u32 stat)
+{
+ void __iomem *regs = mvi->regs;
+
+ if (((stat & MVS_IRQ_SAS_A) && mvi->id == 0) ||
+ ((stat & MVS_IRQ_SAS_B) && mvi->id == 1)) {
+ mw32_f(MVS_INT_STAT, CINT_DONE);
+
+ spin_lock(&mvi->lock);
+ mvs_int_full(mvi);
+ spin_unlock(&mvi->lock);
+ }
+ return IRQ_HANDLED;
+}
+
+static void mvs_94xx_command_active(struct mvs_info *mvi, u32 slot_idx)
+{
+ u32 tmp;
+ tmp = mvs_cr32(mvi, MVS_COMMAND_ACTIVE+(slot_idx >> 3));
+ if (tmp && 1 << (slot_idx % 32)) {
+ mv_printk("command active %08X, slot [%x].\n", tmp, slot_idx);
+ mvs_cw32(mvi, MVS_COMMAND_ACTIVE + (slot_idx >> 3),
+ 1 << (slot_idx % 32));
+ do {
+ tmp = mvs_cr32(mvi,
+ MVS_COMMAND_ACTIVE + (slot_idx >> 3));
+ } while (tmp & 1 << (slot_idx % 32));
+ }
+}
+
+void mvs_94xx_clear_srs_irq(struct mvs_info *mvi, u8 reg_set, u8 clear_all)
+{
+ void __iomem *regs = mvi->regs;
+ u32 tmp;
+
+ if (clear_all) {
+ tmp = mr32(MVS_INT_STAT_SRS_0);
+ if (tmp) {
+ mv_dprintk("check SRS 0 %08X.\n", tmp);
+ mw32(MVS_INT_STAT_SRS_0, tmp);
+ }
+ tmp = mr32(MVS_INT_STAT_SRS_1);
+ if (tmp) {
+ mv_dprintk("check SRS 1 %08X.\n", tmp);
+ mw32(MVS_INT_STAT_SRS_1, tmp);
+ }
+ } else {
+ if (reg_set > 31)
+ tmp = mr32(MVS_INT_STAT_SRS_1);
+ else
+ tmp = mr32(MVS_INT_STAT_SRS_0);
+
+ if (tmp & (1 << (reg_set % 32))) {
+ mv_dprintk("register set 0x%x was stopped.\n", reg_set);
+ if (reg_set > 31)
+ mw32(MVS_INT_STAT_SRS_1, 1 << (reg_set % 32));
+ else
+ mw32(MVS_INT_STAT_SRS_0, 1 << (reg_set % 32));
+ }
+ }
+}
+
+static void mvs_94xx_issue_stop(struct mvs_info *mvi, enum mvs_port_type type,
+ u32 tfs)
+{
+ void __iomem *regs = mvi->regs;
+ u32 tmp;
+ mvs_94xx_clear_srs_irq(mvi, 0, 1);
+
+ tmp = mr32(MVS_INT_STAT);
+ mw32(MVS_INT_STAT, tmp | CINT_CI_STOP);
+ tmp = mr32(MVS_PCS) | 0xFF00;
+ mw32(MVS_PCS, tmp);
+}
+
+static void mvs_94xx_non_spec_ncq_error(struct mvs_info *mvi)
+{
+ void __iomem *regs = mvi->regs;
+ u32 err_0, err_1;
+ u8 i;
+ struct mvs_device *device;
+
+ err_0 = mr32(MVS_NON_NCQ_ERR_0);
+ err_1 = mr32(MVS_NON_NCQ_ERR_1);
+
+ mv_dprintk("non specific ncq error err_0:%x,err_1:%x.\n",
+ err_0, err_1);
+ for (i = 0; i < 32; i++) {
+ if (err_0 & bit(i)) {
+ device = mvs_find_dev_by_reg_set(mvi, i);
+ if (device)
+ mvs_release_task(mvi, device->sas_device);
+ }
+ if (err_1 & bit(i)) {
+ device = mvs_find_dev_by_reg_set(mvi, i+32);
+ if (device)
+ mvs_release_task(mvi, device->sas_device);
+ }
+ }
+
+ mw32(MVS_NON_NCQ_ERR_0, err_0);
+ mw32(MVS_NON_NCQ_ERR_1, err_1);
+}
+
+static void mvs_94xx_free_reg_set(struct mvs_info *mvi, u8 *tfs)
+{
+ void __iomem *regs = mvi->regs;
+ u8 reg_set = *tfs;
+
+ if (*tfs == MVS_ID_NOT_MAPPED)
+ return;
+
+ mvi->sata_reg_set &= ~bit(reg_set);
+ if (reg_set < 32)
+ w_reg_set_enable(reg_set, (u32)mvi->sata_reg_set);
+ else
+ w_reg_set_enable(reg_set, (u32)(mvi->sata_reg_set >> 32));
+
+ *tfs = MVS_ID_NOT_MAPPED;
+
+ return;
+}
+
+static u8 mvs_94xx_assign_reg_set(struct mvs_info *mvi, u8 *tfs)
+{
+ int i;
+ void __iomem *regs = mvi->regs;
+
+ if (*tfs != MVS_ID_NOT_MAPPED)
+ return 0;
+
+ i = mv_ffc64(mvi->sata_reg_set);
+ if (i >= 32) {
+ mvi->sata_reg_set |= bit(i);
+ w_reg_set_enable(i, (u32)(mvi->sata_reg_set >> 32));
+ *tfs = i;
+ return 0;
+ } else if (i >= 0) {
+ mvi->sata_reg_set |= bit(i);
+ w_reg_set_enable(i, (u32)mvi->sata_reg_set);
+ *tfs = i;
+ return 0;
+ }
+ return MVS_ID_NOT_MAPPED;
+}
+
+static void mvs_94xx_make_prd(struct scatterlist *scatter, int nr, void *prd)
+{
+ int i;
+ struct scatterlist *sg;
+ struct mvs_prd *buf_prd = prd;
+ struct mvs_prd_imt im_len;
+ *(u32 *)&im_len = 0;
+ for_each_sg(scatter, sg, nr, i) {
+ buf_prd->addr = cpu_to_le64(sg_dma_address(sg));
+ im_len.len = sg_dma_len(sg);
+ buf_prd->im_len = cpu_to_le32(*(u32 *)&im_len);
+ buf_prd++;
+ }
+}
+
+static int mvs_94xx_oob_done(struct mvs_info *mvi, int i)
+{
+ u32 phy_st;
+ phy_st = mvs_read_phy_ctl(mvi, i);
+ if (phy_st & PHY_READY_MASK)
+ return 1;
+ return 0;
+}
+
+static void mvs_94xx_get_dev_identify_frame(struct mvs_info *mvi, int port_id,
+ struct sas_identify_frame *id)
+{
+ int i;
+ u32 id_frame[7];
+
+ for (i = 0; i < 7; i++) {
+ mvs_write_port_cfg_addr(mvi, port_id,
+ CONFIG_ID_FRAME0 + i * 4);
+ id_frame[i] = cpu_to_le32(mvs_read_port_cfg_data(mvi, port_id));
+ }
+ memcpy(id, id_frame, 28);
+}
+
+static void mvs_94xx_get_att_identify_frame(struct mvs_info *mvi, int port_id,
+ struct sas_identify_frame *id)
+{
+ int i;
+ u32 id_frame[7];
+
+ for (i = 0; i < 7; i++) {
+ mvs_write_port_cfg_addr(mvi, port_id,
+ CONFIG_ATT_ID_FRAME0 + i * 4);
+ id_frame[i] = cpu_to_le32(mvs_read_port_cfg_data(mvi, port_id));
+ mv_dprintk("94xx phy %d atta frame %d %x.\n",
+ port_id + mvi->id * mvi->chip->n_phy, i, id_frame[i]);
+ }
+ memcpy(id, id_frame, 28);
+}
+
+static u32 mvs_94xx_make_dev_info(struct sas_identify_frame *id)
+{
+ u32 att_dev_info = 0;
+
+ att_dev_info |= id->dev_type;
+ if (id->stp_iport)
+ att_dev_info |= PORT_DEV_STP_INIT;
+ if (id->smp_iport)
+ att_dev_info |= PORT_DEV_SMP_INIT;
+ if (id->ssp_iport)
+ att_dev_info |= PORT_DEV_SSP_INIT;
+ if (id->stp_tport)
+ att_dev_info |= PORT_DEV_STP_TRGT;
+ if (id->smp_tport)
+ att_dev_info |= PORT_DEV_SMP_TRGT;
+ if (id->ssp_tport)
+ att_dev_info |= PORT_DEV_SSP_TRGT;
+
+ att_dev_info |= (u32)id->phy_id<<24;
+ return att_dev_info;
+}
+
+static u32 mvs_94xx_make_att_info(struct sas_identify_frame *id)
+{
+ return mvs_94xx_make_dev_info(id);
+}
+
+static void mvs_94xx_fix_phy_info(struct mvs_info *mvi, int i,
+ struct sas_identify_frame *id)
+{
+ struct mvs_phy *phy = &mvi->phy[i];
+ struct asd_sas_phy *sas_phy = &phy->sas_phy;
+ mv_dprintk("get all reg link rate is 0x%x\n", phy->phy_status);
+ sas_phy->linkrate =
+ (phy->phy_status & PHY_NEG_SPP_PHYS_LINK_RATE_MASK) >>
+ PHY_NEG_SPP_PHYS_LINK_RATE_MASK_OFFSET;
+ sas_phy->linkrate += 0x8;
+ mv_dprintk("get link rate is %d\n", sas_phy->linkrate);
+ phy->minimum_linkrate = SAS_LINK_RATE_1_5_GBPS;
+ phy->maximum_linkrate = SAS_LINK_RATE_6_0_GBPS;
+ mvs_94xx_get_dev_identify_frame(mvi, i, id);
+ phy->dev_info = mvs_94xx_make_dev_info(id);
+
+ if (phy->phy_type & PORT_TYPE_SAS) {
+ mvs_94xx_get_att_identify_frame(mvi, i, id);
+ phy->att_dev_info = mvs_94xx_make_att_info(id);
+ phy->att_dev_sas_addr = *(u64 *)id->sas_addr;
+ } else {
+ phy->att_dev_info = PORT_DEV_STP_TRGT | 1;
+ }
+
+ /* enable spin up bit */
+ mvs_write_port_cfg_addr(mvi, i, PHYR_PHY_STAT);
+ mvs_write_port_cfg_data(mvi, i, 0x04);
+
+}
+
+void mvs_94xx_phy_set_link_rate(struct mvs_info *mvi, u32 phy_id,
+ struct sas_phy_linkrates *rates)
+{
+ u32 lrmax = 0;
+ u32 tmp;
+
+ tmp = mvs_read_phy_ctl(mvi, phy_id);
+ lrmax = (rates->maximum_linkrate - SAS_LINK_RATE_1_5_GBPS) << 12;
+
+ if (lrmax) {
+ tmp &= ~(0x3 << 12);
+ tmp |= lrmax;
+ }
+ mvs_write_phy_ctl(mvi, phy_id, tmp);
+ mvs_94xx_phy_reset(mvi, phy_id, PHY_RST_HARD);
+}
+
+static void mvs_94xx_clear_active_cmds(struct mvs_info *mvi)
+{
+ u32 tmp;
+ void __iomem *regs = mvi->regs;
+ tmp = mr32(MVS_STP_REG_SET_0);
+ mw32(MVS_STP_REG_SET_0, 0);
+ mw32(MVS_STP_REG_SET_0, tmp);
+ tmp = mr32(MVS_STP_REG_SET_1);
+ mw32(MVS_STP_REG_SET_1, 0);
+ mw32(MVS_STP_REG_SET_1, tmp);
+}
+
+
+u32 mvs_94xx_spi_read_data(struct mvs_info *mvi)
+{
+ void __iomem *regs = mvi->regs_ex - 0x10200;
+ return mr32(SPI_RD_DATA_REG_94XX);
+}
+
+void mvs_94xx_spi_write_data(struct mvs_info *mvi, u32 data)
+{
+ void __iomem *regs = mvi->regs_ex - 0x10200;
+ mw32(SPI_RD_DATA_REG_94XX, data);
+}
+
+
+int mvs_94xx_spi_buildcmd(struct mvs_info *mvi,
+ u32 *dwCmd,
+ u8 cmd,
+ u8 read,
+ u8 length,
+ u32 addr
+ )
+{
+ void __iomem *regs = mvi->regs_ex - 0x10200;
+ u32 dwTmp;
+
+ dwTmp = ((u32)cmd << 8) | ((u32)length << 4);
+ if (read)
+ dwTmp |= SPI_CTRL_READ_94XX;
+
+ if (addr != MV_MAX_U32) {
+ mw32(SPI_ADDR_REG_94XX, (addr & 0x0003FFFFL));
+ dwTmp |= SPI_ADDR_VLD_94XX;
+ }
+
+ *dwCmd = dwTmp;
+ return 0;
+}
+
+
+int mvs_94xx_spi_issuecmd(struct mvs_info *mvi, u32 cmd)
+{
+ void __iomem *regs = mvi->regs_ex - 0x10200;
+ mw32(SPI_CTRL_REG_94XX, cmd | SPI_CTRL_SpiStart_94XX);
+
+ return 0;
+}
+
+int mvs_94xx_spi_waitdataready(struct mvs_info *mvi, u32 timeout)
+{
+ void __iomem *regs = mvi->regs_ex - 0x10200;
+ u32 i, dwTmp;
+
+ for (i = 0; i < timeout; i++) {
+ dwTmp = mr32(SPI_CTRL_REG_94XX);
+ if (!(dwTmp & SPI_CTRL_SpiStart_94XX))
+ return 0;
+ msleep(10);
+ }
+
+ return -1;
+}
+
+void mvs_94xx_fix_dma(struct mvs_info *mvi, u32 phy_mask,
+ int buf_len, int from, void *prd)
+{
+ int i;
+ struct mvs_prd *buf_prd = prd;
+ dma_addr_t buf_dma;
+ struct mvs_prd_imt im_len;
+
+ *(u32 *)&im_len = 0;
+ buf_prd += from;
+
+#define PRD_CHAINED_ENTRY 0x01
+ if ((mvi->pdev->revision == VANIR_A0_REV) ||
+ (mvi->pdev->revision == VANIR_B0_REV))
+ buf_dma = (phy_mask <= 0x08) ?
+ mvi->bulk_buffer_dma : mvi->bulk_buffer_dma1;
+ else
+ return;
+
+ for (i = from; i < MAX_SG_ENTRY; i++, ++buf_prd) {
+ if (i == MAX_SG_ENTRY - 1) {
+ buf_prd->addr = cpu_to_le64(virt_to_phys(buf_prd - 1));
+ im_len.len = 2;
+ im_len.misc_ctl = PRD_CHAINED_ENTRY;
+ } else {
+ buf_prd->addr = cpu_to_le64(buf_dma);
+ im_len.len = buf_len;
+ }
+ buf_prd->im_len = cpu_to_le32(*(u32 *)&im_len);
+ }
+}
+
+static void mvs_94xx_tune_interrupt(struct mvs_info *mvi, u32 time)
+{
+ void __iomem *regs = mvi->regs;
+ u32 tmp = 0;
+ /*
+ * the max count is 0x1ff, while our max slot is 0x200,
+ * it will make count 0.
+ */
+ if (time == 0) {
+ mw32(MVS_INT_COAL, 0);
+ mw32(MVS_INT_COAL_TMOUT, 0x10000);
+ } else {
+ if (MVS_CHIP_SLOT_SZ > 0x1ff)
+ mw32(MVS_INT_COAL, 0x1ff|COAL_EN);
+ else
+ mw32(MVS_INT_COAL, MVS_CHIP_SLOT_SZ|COAL_EN);
+
+ tmp = 0x10000 | time;
+ mw32(MVS_INT_COAL_TMOUT, tmp);
+ }
+
+}
+
+const struct mvs_dispatch mvs_94xx_dispatch = {
+ "mv94xx",
+ mvs_94xx_init,
+ NULL,
+ mvs_94xx_ioremap,
+ mvs_94xx_iounmap,
+ mvs_94xx_isr,
+ mvs_94xx_isr_status,
+ mvs_94xx_interrupt_enable,
+ mvs_94xx_interrupt_disable,
+ mvs_read_phy_ctl,
+ mvs_write_phy_ctl,
+ mvs_read_port_cfg_data,
+ mvs_write_port_cfg_data,
+ mvs_write_port_cfg_addr,
+ mvs_read_port_vsr_data,
+ mvs_write_port_vsr_data,
+ mvs_write_port_vsr_addr,
+ mvs_read_port_irq_stat,
+ mvs_write_port_irq_stat,
+ mvs_read_port_irq_mask,
+ mvs_write_port_irq_mask,
+ mvs_94xx_command_active,
+ mvs_94xx_clear_srs_irq,
+ mvs_94xx_issue_stop,
+ mvs_start_delivery,
+ mvs_rx_update,
+ mvs_int_full,
+ mvs_94xx_assign_reg_set,
+ mvs_94xx_free_reg_set,
+ mvs_get_prd_size,
+ mvs_get_prd_count,
+ mvs_94xx_make_prd,
+ mvs_94xx_detect_porttype,
+ mvs_94xx_oob_done,
+ mvs_94xx_fix_phy_info,
+ NULL,
+ mvs_94xx_phy_set_link_rate,
+ mvs_hw_max_link_rate,
+ mvs_94xx_phy_disable,
+ mvs_94xx_phy_enable,
+ mvs_94xx_phy_reset,
+ NULL,
+ mvs_94xx_clear_active_cmds,
+ mvs_94xx_spi_read_data,
+ mvs_94xx_spi_write_data,
+ mvs_94xx_spi_buildcmd,
+ mvs_94xx_spi_issuecmd,
+ mvs_94xx_spi_waitdataready,
+ mvs_94xx_fix_dma,
+ mvs_94xx_tune_interrupt,
+ mvs_94xx_non_spec_ncq_error,
+};
+
diff --git a/drivers/scsi/mvsas/mv_94xx.h b/drivers/scsi/mvsas/mv_94xx.h
new file mode 100644
index 000000000..14e197497
--- /dev/null
+++ b/drivers/scsi/mvsas/mv_94xx.h
@@ -0,0 +1,278 @@
+/*
+ * Marvell 88SE94xx hardware specific head file
+ *
+ * Copyright 2007 Red Hat, Inc.
+ * Copyright 2008 Marvell. <kewei@marvell.com>
+ * Copyright 2009-2011 Marvell. <yuxiangl@marvell.com>
+ *
+ * This file is licensed under GPLv2.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; version 2 of the
+ * License.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307
+ * USA
+*/
+
+#ifndef _MVS94XX_REG_H_
+#define _MVS94XX_REG_H_
+
+#include <linux/types.h>
+
+#define MAX_LINK_RATE SAS_LINK_RATE_6_0_GBPS
+
+enum VANIR_REVISION_ID {
+ VANIR_A0_REV = 0xA0,
+ VANIR_B0_REV = 0x01,
+ VANIR_C0_REV = 0x02,
+ VANIR_C1_REV = 0x03,
+ VANIR_C2_REV = 0xC2,
+};
+
+enum hw_registers {
+ MVS_GBL_CTL = 0x04, /* global control */
+ MVS_GBL_INT_STAT = 0x00, /* global irq status */
+ MVS_GBL_PI = 0x0C, /* ports implemented bitmask */
+
+ MVS_PHY_CTL = 0x40, /* SOC PHY Control */
+ MVS_PORTS_IMP = 0x9C, /* SOC Port Implemented */
+
+ MVS_GBL_PORT_TYPE = 0xa0, /* port type */
+
+ MVS_CTL = 0x100, /* SAS/SATA port configuration */
+ MVS_PCS = 0x104, /* SAS/SATA port control/status */
+ MVS_CMD_LIST_LO = 0x108, /* cmd list addr */
+ MVS_CMD_LIST_HI = 0x10C,
+ MVS_RX_FIS_LO = 0x110, /* RX FIS list addr */
+ MVS_RX_FIS_HI = 0x114,
+ MVS_STP_REG_SET_0 = 0x118, /* STP/SATA Register Set Enable */
+ MVS_STP_REG_SET_1 = 0x11C,
+ MVS_TX_CFG = 0x120, /* TX configuration */
+ MVS_TX_LO = 0x124, /* TX (delivery) ring addr */
+ MVS_TX_HI = 0x128,
+
+ MVS_TX_PROD_IDX = 0x12C, /* TX producer pointer */
+ MVS_TX_CONS_IDX = 0x130, /* TX consumer pointer (RO) */
+ MVS_RX_CFG = 0x134, /* RX configuration */
+ MVS_RX_LO = 0x138, /* RX (completion) ring addr */
+ MVS_RX_HI = 0x13C,
+ MVS_RX_CONS_IDX = 0x140, /* RX consumer pointer (RO) */
+
+ MVS_INT_COAL = 0x148, /* Int coalescing config */
+ MVS_INT_COAL_TMOUT = 0x14C, /* Int coalescing timeout */
+ MVS_INT_STAT = 0x150, /* Central int status */
+ MVS_INT_MASK = 0x154, /* Central int enable */
+ MVS_INT_STAT_SRS_0 = 0x158, /* SATA register set status */
+ MVS_INT_MASK_SRS_0 = 0x15C,
+ MVS_INT_STAT_SRS_1 = 0x160,
+ MVS_INT_MASK_SRS_1 = 0x164,
+ MVS_NON_NCQ_ERR_0 = 0x168, /* SRS Non-specific NCQ Error */
+ MVS_NON_NCQ_ERR_1 = 0x16C,
+ MVS_CMD_ADDR = 0x170, /* Command register port (addr) */
+ MVS_CMD_DATA = 0x174, /* Command register port (data) */
+ MVS_MEM_PARITY_ERR = 0x178, /* Memory parity error */
+
+ /* ports 1-3 follow after this */
+ MVS_P0_INT_STAT = 0x180, /* port0 interrupt status */
+ MVS_P0_INT_MASK = 0x184, /* port0 interrupt mask */
+ /* ports 5-7 follow after this */
+ MVS_P4_INT_STAT = 0x1A0, /* Port4 interrupt status */
+ MVS_P4_INT_MASK = 0x1A4, /* Port4 interrupt enable mask */
+
+ /* ports 1-3 follow after this */
+ MVS_P0_SER_CTLSTAT = 0x1D0, /* port0 serial control/status */
+ /* ports 5-7 follow after this */
+ MVS_P4_SER_CTLSTAT = 0x1E0, /* port4 serial control/status */
+
+ /* ports 1-3 follow after this */
+ MVS_P0_CFG_ADDR = 0x200, /* port0 phy register address */
+ MVS_P0_CFG_DATA = 0x204, /* port0 phy register data */
+ /* ports 5-7 follow after this */
+ MVS_P4_CFG_ADDR = 0x220, /* Port4 config address */
+ MVS_P4_CFG_DATA = 0x224, /* Port4 config data */
+
+ /* phys 1-3 follow after this */
+ MVS_P0_VSR_ADDR = 0x250, /* phy0 VSR address */
+ MVS_P0_VSR_DATA = 0x254, /* phy0 VSR data */
+ /* phys 1-3 follow after this */
+ /* multiplexing */
+ MVS_P4_VSR_ADDR = 0x250, /* phy4 VSR address */
+ MVS_P4_VSR_DATA = 0x254, /* phy4 VSR data */
+ MVS_PA_VSR_ADDR = 0x290, /* All port VSR addr */
+ MVS_PA_VSR_PORT = 0x294, /* All port VSR data */
+ MVS_COMMAND_ACTIVE = 0x300,
+};
+
+enum pci_cfg_registers {
+ PCR_PHY_CTL = 0x40,
+ PCR_PHY_CTL2 = 0x90,
+ PCR_DEV_CTRL = 0x78,
+ PCR_LINK_STAT = 0x82,
+};
+
+/* SAS/SATA Vendor Specific Port Registers */
+enum sas_sata_vsp_regs {
+ VSR_PHY_STAT = 0x00 * 4, /* Phy Interrupt Status */
+ VSR_PHY_MODE1 = 0x01 * 4, /* phy Interrupt Enable */
+ VSR_PHY_MODE2 = 0x02 * 4, /* Phy Configuration */
+ VSR_PHY_MODE3 = 0x03 * 4, /* Phy Status */
+ VSR_PHY_MODE4 = 0x04 * 4, /* Phy Counter 0 */
+ VSR_PHY_MODE5 = 0x05 * 4, /* Phy Counter 1 */
+ VSR_PHY_MODE6 = 0x06 * 4, /* Event Counter Control */
+ VSR_PHY_MODE7 = 0x07 * 4, /* Event Counter Select */
+ VSR_PHY_MODE8 = 0x08 * 4, /* Event Counter 0 */
+ VSR_PHY_MODE9 = 0x09 * 4, /* Event Counter 1 */
+ VSR_PHY_MODE10 = 0x0A * 4, /* Event Counter 2 */
+ VSR_PHY_MODE11 = 0x0B * 4, /* Event Counter 3 */
+ VSR_PHY_ACT_LED = 0x0C * 4, /* Activity LED control */
+
+ VSR_PHY_FFE_CONTROL = 0x10C,
+ VSR_PHY_DFE_UPDATE_CRTL = 0x110,
+ VSR_REF_CLOCK_CRTL = 0x1A0,
+};
+
+enum chip_register_bits {
+ PHY_MIN_SPP_PHYS_LINK_RATE_MASK = (0x7 << 8),
+ PHY_MAX_SPP_PHYS_LINK_RATE_MASK = (0x7 << 12),
+ PHY_NEG_SPP_PHYS_LINK_RATE_MASK_OFFSET = (16),
+ PHY_NEG_SPP_PHYS_LINK_RATE_MASK =
+ (0x3 << PHY_NEG_SPP_PHYS_LINK_RATE_MASK_OFFSET),
+};
+
+enum pci_interrupt_cause {
+ /* MAIN_IRQ_CAUSE (R10200) Bits*/
+ MVS_IRQ_COM_IN_I2O_IOP0 = (1 << 0),
+ MVS_IRQ_COM_IN_I2O_IOP1 = (1 << 1),
+ MVS_IRQ_COM_IN_I2O_IOP2 = (1 << 2),
+ MVS_IRQ_COM_IN_I2O_IOP3 = (1 << 3),
+ MVS_IRQ_COM_OUT_I2O_HOS0 = (1 << 4),
+ MVS_IRQ_COM_OUT_I2O_HOS1 = (1 << 5),
+ MVS_IRQ_COM_OUT_I2O_HOS2 = (1 << 6),
+ MVS_IRQ_COM_OUT_I2O_HOS3 = (1 << 7),
+ MVS_IRQ_PCIF_TO_CPU_DRBL0 = (1 << 8),
+ MVS_IRQ_PCIF_TO_CPU_DRBL1 = (1 << 9),
+ MVS_IRQ_PCIF_TO_CPU_DRBL2 = (1 << 10),
+ MVS_IRQ_PCIF_TO_CPU_DRBL3 = (1 << 11),
+ MVS_IRQ_PCIF_DRBL0 = (1 << 12),
+ MVS_IRQ_PCIF_DRBL1 = (1 << 13),
+ MVS_IRQ_PCIF_DRBL2 = (1 << 14),
+ MVS_IRQ_PCIF_DRBL3 = (1 << 15),
+ MVS_IRQ_XOR_A = (1 << 16),
+ MVS_IRQ_XOR_B = (1 << 17),
+ MVS_IRQ_SAS_A = (1 << 18),
+ MVS_IRQ_SAS_B = (1 << 19),
+ MVS_IRQ_CPU_CNTRL = (1 << 20),
+ MVS_IRQ_GPIO = (1 << 21),
+ MVS_IRQ_UART = (1 << 22),
+ MVS_IRQ_SPI = (1 << 23),
+ MVS_IRQ_I2C = (1 << 24),
+ MVS_IRQ_SGPIO = (1 << 25),
+ MVS_IRQ_COM_ERR = (1 << 29),
+ MVS_IRQ_I2O_ERR = (1 << 30),
+ MVS_IRQ_PCIE_ERR = (1 << 31),
+};
+
+union reg_phy_cfg {
+ u32 v;
+ struct {
+ u32 phy_reset:1;
+ u32 sas_support:1;
+ u32 sata_support:1;
+ u32 sata_host_mode:1;
+ /*
+ * bit 2: 6Gbps support
+ * bit 1: 3Gbps support
+ * bit 0: 1.5Gbps support
+ */
+ u32 speed_support:3;
+ u32 snw_3_support:1;
+ u32 tx_lnk_parity:1;
+ /*
+ * bit 5: G1 (1.5Gbps) Without SSC
+ * bit 4: G1 (1.5Gbps) with SSC
+ * bit 3: G2 (3.0Gbps) Without SSC
+ * bit 2: G2 (3.0Gbps) with SSC
+ * bit 1: G3 (6.0Gbps) without SSC
+ * bit 0: G3 (6.0Gbps) with SSC
+ */
+ u32 tx_spt_phs_lnk_rate:6;
+ /* 8h: 1.5Gbps 9h: 3Gbps Ah: 6Gbps */
+ u32 tx_lgcl_lnk_rate:4;
+ u32 tx_ssc_type:1;
+ u32 sata_spin_up_spt:1;
+ u32 sata_spin_up_en:1;
+ u32 bypass_oob:1;
+ u32 disable_phy:1;
+ u32 rsvd:8;
+ } u;
+};
+
+#define MAX_SG_ENTRY 255
+
+struct mvs_prd_imt {
+#ifndef __BIG_ENDIAN
+ __le32 len:22;
+ u8 _r_a:2;
+ u8 misc_ctl:4;
+ u8 inter_sel:4;
+#else
+ u32 inter_sel:4;
+ u32 misc_ctl:4;
+ u32 _r_a:2;
+ u32 len:22;
+#endif
+};
+
+struct mvs_prd {
+ /* 64-bit buffer address */
+ __le64 addr;
+ /* 22-bit length */
+ __le32 im_len;
+} __attribute__ ((packed));
+
+/*
+ * these registers are accessed through port vendor
+ * specific address/data registers
+ */
+enum sas_sata_phy_regs {
+ GENERATION_1_SETTING = 0x118,
+ GENERATION_1_2_SETTING = 0x11C,
+ GENERATION_2_3_SETTING = 0x120,
+ GENERATION_3_4_SETTING = 0x124,
+};
+
+#define SPI_CTRL_REG_94XX 0xc800
+#define SPI_ADDR_REG_94XX 0xc804
+#define SPI_WR_DATA_REG_94XX 0xc808
+#define SPI_RD_DATA_REG_94XX 0xc80c
+#define SPI_CTRL_READ_94XX (1U << 2)
+#define SPI_ADDR_VLD_94XX (1U << 1)
+#define SPI_CTRL_SpiStart_94XX (1U << 0)
+
+static inline int
+mv_ffc64(u64 v)
+{
+ u64 x = ~v;
+ return x ? __ffs64(x) : -1;
+}
+
+#define r_reg_set_enable(i) \
+ (((i) > 31) ? mr32(MVS_STP_REG_SET_1) : \
+ mr32(MVS_STP_REG_SET_0))
+
+#define w_reg_set_enable(i, tmp) \
+ (((i) > 31) ? mw32(MVS_STP_REG_SET_1, tmp) : \
+ mw32(MVS_STP_REG_SET_0, tmp))
+
+extern const struct mvs_dispatch mvs_94xx_dispatch;
+#endif
+
diff --git a/drivers/scsi/mvsas/mv_chips.h b/drivers/scsi/mvsas/mv_chips.h
new file mode 100644
index 000000000..8c4479ab4
--- /dev/null
+++ b/drivers/scsi/mvsas/mv_chips.h
@@ -0,0 +1,270 @@
+/*
+ * Marvell 88SE64xx/88SE94xx register IO interface
+ *
+ * Copyright 2007 Red Hat, Inc.
+ * Copyright 2008 Marvell. <kewei@marvell.com>
+ * Copyright 2009-2011 Marvell. <yuxiangl@marvell.com>
+ *
+ * This file is licensed under GPLv2.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; version 2 of the
+ * License.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307
+ * USA
+*/
+
+
+#ifndef _MV_CHIPS_H_
+#define _MV_CHIPS_H_
+
+#define mr32(reg) readl(regs + reg)
+#define mw32(reg, val) writel((val), regs + reg)
+#define mw32_f(reg, val) do { \
+ mw32(reg, val); \
+ mr32(reg); \
+ } while (0)
+
+#define iow32(reg, val) outl(val, (unsigned long)(regs + reg))
+#define ior32(reg) inl((unsigned long)(regs + reg))
+#define iow16(reg, val) outw((unsigned long)(val, regs + reg))
+#define ior16(reg) inw((unsigned long)(regs + reg))
+#define iow8(reg, val) outb((unsigned long)(val, regs + reg))
+#define ior8(reg) inb((unsigned long)(regs + reg))
+
+static inline u32 mvs_cr32(struct mvs_info *mvi, u32 addr)
+{
+ void __iomem *regs = mvi->regs;
+ mw32(MVS_CMD_ADDR, addr);
+ return mr32(MVS_CMD_DATA);
+}
+
+static inline void mvs_cw32(struct mvs_info *mvi, u32 addr, u32 val)
+{
+ void __iomem *regs = mvi->regs;
+ mw32(MVS_CMD_ADDR, addr);
+ mw32(MVS_CMD_DATA, val);
+}
+
+static inline u32 mvs_read_phy_ctl(struct mvs_info *mvi, u32 port)
+{
+ void __iomem *regs = mvi->regs;
+ return (port < 4) ? mr32(MVS_P0_SER_CTLSTAT + port * 4) :
+ mr32(MVS_P4_SER_CTLSTAT + (port - 4) * 4);
+}
+
+static inline void mvs_write_phy_ctl(struct mvs_info *mvi, u32 port, u32 val)
+{
+ void __iomem *regs = mvi->regs;
+ if (port < 4)
+ mw32(MVS_P0_SER_CTLSTAT + port * 4, val);
+ else
+ mw32(MVS_P4_SER_CTLSTAT + (port - 4) * 4, val);
+}
+
+static inline u32 mvs_read_port(struct mvs_info *mvi, u32 off,
+ u32 off2, u32 port)
+{
+ void __iomem *regs = mvi->regs + off;
+ void __iomem *regs2 = mvi->regs + off2;
+ return (port < 4) ? readl(regs + port * 8) :
+ readl(regs2 + (port - 4) * 8);
+}
+
+static inline void mvs_write_port(struct mvs_info *mvi, u32 off, u32 off2,
+ u32 port, u32 val)
+{
+ void __iomem *regs = mvi->regs + off;
+ void __iomem *regs2 = mvi->regs + off2;
+ if (port < 4)
+ writel(val, regs + port * 8);
+ else
+ writel(val, regs2 + (port - 4) * 8);
+}
+
+static inline u32 mvs_read_port_cfg_data(struct mvs_info *mvi, u32 port)
+{
+ return mvs_read_port(mvi, MVS_P0_CFG_DATA,
+ MVS_P4_CFG_DATA, port);
+}
+
+static inline void mvs_write_port_cfg_data(struct mvs_info *mvi,
+ u32 port, u32 val)
+{
+ mvs_write_port(mvi, MVS_P0_CFG_DATA,
+ MVS_P4_CFG_DATA, port, val);
+}
+
+static inline void mvs_write_port_cfg_addr(struct mvs_info *mvi,
+ u32 port, u32 addr)
+{
+ mvs_write_port(mvi, MVS_P0_CFG_ADDR,
+ MVS_P4_CFG_ADDR, port, addr);
+ mdelay(10);
+}
+
+static inline u32 mvs_read_port_vsr_data(struct mvs_info *mvi, u32 port)
+{
+ return mvs_read_port(mvi, MVS_P0_VSR_DATA,
+ MVS_P4_VSR_DATA, port);
+}
+
+static inline void mvs_write_port_vsr_data(struct mvs_info *mvi,
+ u32 port, u32 val)
+{
+ mvs_write_port(mvi, MVS_P0_VSR_DATA,
+ MVS_P4_VSR_DATA, port, val);
+}
+
+static inline void mvs_write_port_vsr_addr(struct mvs_info *mvi,
+ u32 port, u32 addr)
+{
+ mvs_write_port(mvi, MVS_P0_VSR_ADDR,
+ MVS_P4_VSR_ADDR, port, addr);
+ mdelay(10);
+}
+
+static inline u32 mvs_read_port_irq_stat(struct mvs_info *mvi, u32 port)
+{
+ return mvs_read_port(mvi, MVS_P0_INT_STAT,
+ MVS_P4_INT_STAT, port);
+}
+
+static inline void mvs_write_port_irq_stat(struct mvs_info *mvi,
+ u32 port, u32 val)
+{
+ mvs_write_port(mvi, MVS_P0_INT_STAT,
+ MVS_P4_INT_STAT, port, val);
+}
+
+static inline u32 mvs_read_port_irq_mask(struct mvs_info *mvi, u32 port)
+{
+ return mvs_read_port(mvi, MVS_P0_INT_MASK,
+ MVS_P4_INT_MASK, port);
+
+}
+
+static inline void mvs_write_port_irq_mask(struct mvs_info *mvi,
+ u32 port, u32 val)
+{
+ mvs_write_port(mvi, MVS_P0_INT_MASK,
+ MVS_P4_INT_MASK, port, val);
+}
+
+static inline void mvs_phy_hacks(struct mvs_info *mvi)
+{
+ u32 tmp;
+
+ tmp = mvs_cr32(mvi, CMD_PHY_TIMER);
+ tmp &= ~(1 << 9);
+ tmp |= (1 << 10);
+ mvs_cw32(mvi, CMD_PHY_TIMER, tmp);
+
+ /* enable retry 127 times */
+ mvs_cw32(mvi, CMD_SAS_CTL1, 0x7f7f);
+
+ /* extend open frame timeout to max */
+ tmp = mvs_cr32(mvi, CMD_SAS_CTL0);
+ tmp &= ~0xffff;
+ tmp |= 0x3fff;
+ mvs_cw32(mvi, CMD_SAS_CTL0, tmp);
+
+ mvs_cw32(mvi, CMD_WD_TIMER, 0x7a0000);
+
+ /* not to halt for different port op during wideport link change */
+ mvs_cw32(mvi, CMD_APP_ERR_CONFIG, 0xffefbf7d);
+}
+
+static inline void mvs_int_sata(struct mvs_info *mvi)
+{
+ u32 tmp;
+ void __iomem *regs = mvi->regs;
+ tmp = mr32(MVS_INT_STAT_SRS_0);
+ if (tmp)
+ mw32(MVS_INT_STAT_SRS_0, tmp);
+ MVS_CHIP_DISP->clear_active_cmds(mvi);
+}
+
+static inline void mvs_int_full(struct mvs_info *mvi)
+{
+ void __iomem *regs = mvi->regs;
+ u32 tmp, stat;
+ int i;
+
+ stat = mr32(MVS_INT_STAT);
+ mvs_int_rx(mvi, false);
+
+ for (i = 0; i < mvi->chip->n_phy; i++) {
+ tmp = (stat >> i) & (CINT_PORT | CINT_PORT_STOPPED);
+ if (tmp)
+ mvs_int_port(mvi, i, tmp);
+ }
+
+ if (stat & CINT_NON_SPEC_NCQ_ERROR)
+ MVS_CHIP_DISP->non_spec_ncq_error(mvi);
+
+ if (stat & CINT_SRS)
+ mvs_int_sata(mvi);
+
+ mw32(MVS_INT_STAT, stat);
+}
+
+static inline void mvs_start_delivery(struct mvs_info *mvi, u32 tx)
+{
+ void __iomem *regs = mvi->regs;
+ mw32(MVS_TX_PROD_IDX, tx);
+}
+
+static inline u32 mvs_rx_update(struct mvs_info *mvi)
+{
+ void __iomem *regs = mvi->regs;
+ return mr32(MVS_RX_CONS_IDX);
+}
+
+static inline u32 mvs_get_prd_size(void)
+{
+ return sizeof(struct mvs_prd);
+}
+
+static inline u32 mvs_get_prd_count(void)
+{
+ return MAX_SG_ENTRY;
+}
+
+static inline void mvs_show_pcie_usage(struct mvs_info *mvi)
+{
+ u16 link_stat, link_spd;
+ const char *spd[] = {
+ "UnKnown",
+ "2.5",
+ "5.0",
+ };
+ if (mvi->flags & MVF_FLAG_SOC || mvi->id > 0)
+ return;
+
+ pci_read_config_word(mvi->pdev, PCR_LINK_STAT, &link_stat);
+ link_spd = (link_stat & PLS_LINK_SPD) >> PLS_LINK_SPD_OFFS;
+ if (link_spd >= 3)
+ link_spd = 0;
+ dev_printk(KERN_INFO, mvi->dev,
+ "mvsas: PCI-E x%u, Bandwidth Usage: %s Gbps\n",
+ (link_stat & PLS_NEG_LINK_WD) >> PLS_NEG_LINK_WD_OFFS,
+ spd[link_spd]);
+}
+
+static inline u32 mvs_hw_max_link_rate(void)
+{
+ return MAX_LINK_RATE;
+}
+
+#endif /* _MV_CHIPS_H_ */
+
diff --git a/drivers/scsi/mvsas/mv_defs.h b/drivers/scsi/mvsas/mv_defs.h
new file mode 100644
index 000000000..f5451940d
--- /dev/null
+++ b/drivers/scsi/mvsas/mv_defs.h
@@ -0,0 +1,510 @@
+/*
+ * Marvell 88SE64xx/88SE94xx const head file
+ *
+ * Copyright 2007 Red Hat, Inc.
+ * Copyright 2008 Marvell. <kewei@marvell.com>
+ * Copyright 2009-2011 Marvell. <yuxiangl@marvell.com>
+ *
+ * This file is licensed under GPLv2.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; version 2 of the
+ * License.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307
+ * USA
+*/
+
+#ifndef _MV_DEFS_H_
+#define _MV_DEFS_H_
+
+#define PCI_DEVICE_ID_ARECA_1300 0x1300
+#define PCI_DEVICE_ID_ARECA_1320 0x1320
+
+enum chip_flavors {
+ chip_6320,
+ chip_6440,
+ chip_6485,
+ chip_9480,
+ chip_9180,
+ chip_9445,
+ chip_9485,
+ chip_1300,
+ chip_1320
+};
+
+/* driver compile-time configuration */
+enum driver_configuration {
+ MVS_TX_RING_SZ = 1024, /* TX ring size (12-bit) */
+ MVS_RX_RING_SZ = 1024, /* RX ring size (12-bit) */
+ /* software requires power-of-2
+ ring size */
+ MVS_SOC_SLOTS = 64,
+ MVS_SOC_TX_RING_SZ = MVS_SOC_SLOTS * 2,
+ MVS_SOC_RX_RING_SZ = MVS_SOC_SLOTS * 2,
+
+ MVS_SLOT_BUF_SZ = 8192, /* cmd tbl + IU + status + PRD */
+ MVS_SSP_CMD_SZ = 64, /* SSP command table buffer size */
+ MVS_ATA_CMD_SZ = 96, /* SATA command table buffer size */
+ MVS_OAF_SZ = 64, /* Open address frame buffer size */
+ MVS_QUEUE_SIZE = 64, /* Support Queue depth */
+ MVS_SOC_CAN_QUEUE = MVS_SOC_SLOTS - 2,
+};
+
+/* unchangeable hardware details */
+enum hardware_details {
+ MVS_MAX_PHYS = 8, /* max. possible phys */
+ MVS_MAX_PORTS = 8, /* max. possible ports */
+ MVS_SOC_PHYS = 4, /* soc phys */
+ MVS_SOC_PORTS = 4, /* soc phys */
+ MVS_MAX_DEVICES = 1024, /* max supported device */
+};
+
+/* peripheral registers (BAR2) */
+enum peripheral_registers {
+ SPI_CTL = 0x10, /* EEPROM control */
+ SPI_CMD = 0x14, /* EEPROM command */
+ SPI_DATA = 0x18, /* EEPROM data */
+};
+
+enum peripheral_register_bits {
+ TWSI_RDY = (1U << 7), /* EEPROM interface ready */
+ TWSI_RD = (1U << 4), /* EEPROM read access */
+
+ SPI_ADDR_MASK = 0x3ffff, /* bits 17:0 */
+};
+
+enum hw_register_bits {
+ /* MVS_GBL_CTL */
+ INT_EN = (1U << 1), /* Global int enable */
+ HBA_RST = (1U << 0), /* HBA reset */
+
+ /* MVS_GBL_INT_STAT */
+ INT_XOR = (1U << 4), /* XOR engine event */
+ INT_SAS_SATA = (1U << 0), /* SAS/SATA event */
+
+ /* MVS_GBL_PORT_TYPE */ /* shl for ports 1-3 */
+ SATA_TARGET = (1U << 16), /* port0 SATA target enable */
+ MODE_AUTO_DET_PORT7 = (1U << 15), /* port0 SAS/SATA autodetect */
+ MODE_AUTO_DET_PORT6 = (1U << 14),
+ MODE_AUTO_DET_PORT5 = (1U << 13),
+ MODE_AUTO_DET_PORT4 = (1U << 12),
+ MODE_AUTO_DET_PORT3 = (1U << 11),
+ MODE_AUTO_DET_PORT2 = (1U << 10),
+ MODE_AUTO_DET_PORT1 = (1U << 9),
+ MODE_AUTO_DET_PORT0 = (1U << 8),
+ MODE_AUTO_DET_EN = MODE_AUTO_DET_PORT0 | MODE_AUTO_DET_PORT1 |
+ MODE_AUTO_DET_PORT2 | MODE_AUTO_DET_PORT3 |
+ MODE_AUTO_DET_PORT4 | MODE_AUTO_DET_PORT5 |
+ MODE_AUTO_DET_PORT6 | MODE_AUTO_DET_PORT7,
+ MODE_SAS_PORT7_MASK = (1U << 7), /* port0 SAS(1), SATA(0) mode */
+ MODE_SAS_PORT6_MASK = (1U << 6),
+ MODE_SAS_PORT5_MASK = (1U << 5),
+ MODE_SAS_PORT4_MASK = (1U << 4),
+ MODE_SAS_PORT3_MASK = (1U << 3),
+ MODE_SAS_PORT2_MASK = (1U << 2),
+ MODE_SAS_PORT1_MASK = (1U << 1),
+ MODE_SAS_PORT0_MASK = (1U << 0),
+ MODE_SAS_SATA = MODE_SAS_PORT0_MASK | MODE_SAS_PORT1_MASK |
+ MODE_SAS_PORT2_MASK | MODE_SAS_PORT3_MASK |
+ MODE_SAS_PORT4_MASK | MODE_SAS_PORT5_MASK |
+ MODE_SAS_PORT6_MASK | MODE_SAS_PORT7_MASK,
+
+ /* SAS_MODE value may be
+ * dictated (in hw) by values
+ * of SATA_TARGET & AUTO_DET
+ */
+
+ /* MVS_TX_CFG */
+ TX_EN = (1U << 16), /* Enable TX */
+ TX_RING_SZ_MASK = 0xfff, /* TX ring size, bits 11:0 */
+
+ /* MVS_RX_CFG */
+ RX_EN = (1U << 16), /* Enable RX */
+ RX_RING_SZ_MASK = 0xfff, /* RX ring size, bits 11:0 */
+
+ /* MVS_INT_COAL */
+ COAL_EN = (1U << 16), /* Enable int coalescing */
+
+ /* MVS_INT_STAT, MVS_INT_MASK */
+ CINT_I2C = (1U << 31), /* I2C event */
+ CINT_SW0 = (1U << 30), /* software event 0 */
+ CINT_SW1 = (1U << 29), /* software event 1 */
+ CINT_PRD_BC = (1U << 28), /* PRD BC err for read cmd */
+ CINT_DMA_PCIE = (1U << 27), /* DMA to PCIE timeout */
+ CINT_MEM = (1U << 26), /* int mem parity err */
+ CINT_I2C_SLAVE = (1U << 25), /* slave I2C event */
+ CINT_NON_SPEC_NCQ_ERROR = (1U << 25), /* Non specific NCQ error */
+ CINT_SRS = (1U << 3), /* SRS event */
+ CINT_CI_STOP = (1U << 1), /* cmd issue stopped */
+ CINT_DONE = (1U << 0), /* cmd completion */
+
+ /* shl for ports 1-3 */
+ CINT_PORT_STOPPED = (1U << 16), /* port0 stopped */
+ CINT_PORT = (1U << 8), /* port0 event */
+ CINT_PORT_MASK_OFFSET = 8,
+ CINT_PORT_MASK = (0xFF << CINT_PORT_MASK_OFFSET),
+ CINT_PHY_MASK_OFFSET = 4,
+ CINT_PHY_MASK = (0x0F << CINT_PHY_MASK_OFFSET),
+
+ /* TX (delivery) ring bits */
+ TXQ_CMD_SHIFT = 29,
+ TXQ_CMD_SSP = 1, /* SSP protocol */
+ TXQ_CMD_SMP = 2, /* SMP protocol */
+ TXQ_CMD_STP = 3, /* STP/SATA protocol */
+ TXQ_CMD_SSP_FREE_LIST = 4, /* add to SSP target free list */
+ TXQ_CMD_SLOT_RESET = 7, /* reset command slot */
+ TXQ_MODE_I = (1U << 28), /* mode: 0=target,1=initiator */
+ TXQ_MODE_TARGET = 0,
+ TXQ_MODE_INITIATOR = 1,
+ TXQ_PRIO_HI = (1U << 27), /* priority: 0=normal, 1=high */
+ TXQ_PRI_NORMAL = 0,
+ TXQ_PRI_HIGH = 1,
+ TXQ_SRS_SHIFT = 20, /* SATA register set */
+ TXQ_SRS_MASK = 0x7f,
+ TXQ_PHY_SHIFT = 12, /* PHY bitmap */
+ TXQ_PHY_MASK = 0xff,
+ TXQ_SLOT_MASK = 0xfff, /* slot number */
+
+ /* RX (completion) ring bits */
+ RXQ_GOOD = (1U << 23), /* Response good */
+ RXQ_SLOT_RESET = (1U << 21), /* Slot reset complete */
+ RXQ_CMD_RX = (1U << 20), /* target cmd received */
+ RXQ_ATTN = (1U << 19), /* attention */
+ RXQ_RSP = (1U << 18), /* response frame xfer'd */
+ RXQ_ERR = (1U << 17), /* err info rec xfer'd */
+ RXQ_DONE = (1U << 16), /* cmd complete */
+ RXQ_SLOT_MASK = 0xfff, /* slot number */
+
+ /* mvs_cmd_hdr bits */
+ MCH_PRD_LEN_SHIFT = 16, /* 16-bit PRD table len */
+ MCH_SSP_FR_TYPE_SHIFT = 13, /* SSP frame type */
+
+ /* SSP initiator only */
+ MCH_SSP_FR_CMD = 0x0, /* COMMAND frame */
+
+ /* SSP initiator or target */
+ MCH_SSP_FR_TASK = 0x1, /* TASK frame */
+
+ /* SSP target only */
+ MCH_SSP_FR_XFER_RDY = 0x4, /* XFER_RDY frame */
+ MCH_SSP_FR_RESP = 0x5, /* RESPONSE frame */
+ MCH_SSP_FR_READ = 0x6, /* Read DATA frame(s) */
+ MCH_SSP_FR_READ_RESP = 0x7, /* ditto, plus RESPONSE */
+
+ MCH_SSP_MODE_PASSTHRU = 1,
+ MCH_SSP_MODE_NORMAL = 0,
+ MCH_PASSTHRU = (1U << 12), /* pass-through (SSP) */
+ MCH_FBURST = (1U << 11), /* first burst (SSP) */
+ MCH_CHK_LEN = (1U << 10), /* chk xfer len (SSP) */
+ MCH_RETRY = (1U << 9), /* tport layer retry (SSP) */
+ MCH_PROTECTION = (1U << 8), /* protection info rec (SSP) */
+ MCH_RESET = (1U << 7), /* Reset (STP/SATA) */
+ MCH_FPDMA = (1U << 6), /* First party DMA (STP/SATA) */
+ MCH_ATAPI = (1U << 5), /* ATAPI (STP/SATA) */
+ MCH_BIST = (1U << 4), /* BIST activate (STP/SATA) */
+ MCH_PMP_MASK = 0xf, /* PMP from cmd FIS (STP/SATA)*/
+
+ CCTL_RST = (1U << 5), /* port logic reset */
+
+ /* 0(LSB first), 1(MSB first) */
+ CCTL_ENDIAN_DATA = (1U << 3), /* PRD data */
+ CCTL_ENDIAN_RSP = (1U << 2), /* response frame */
+ CCTL_ENDIAN_OPEN = (1U << 1), /* open address frame */
+ CCTL_ENDIAN_CMD = (1U << 0), /* command table */
+
+ /* MVS_Px_SER_CTLSTAT (per-phy control) */
+ PHY_SSP_RST = (1U << 3), /* reset SSP link layer */
+ PHY_BCAST_CHG = (1U << 2), /* broadcast(change) notif */
+ PHY_RST_HARD = (1U << 1), /* hard reset + phy reset */
+ PHY_RST = (1U << 0), /* phy reset */
+ PHY_READY_MASK = (1U << 20),
+
+ /* MVS_Px_INT_STAT, MVS_Px_INT_MASK (per-phy events) */
+ PHYEV_DEC_ERR = (1U << 24), /* Phy Decoding Error */
+ PHYEV_DCDR_ERR = (1U << 23), /* STP Deocder Error */
+ PHYEV_CRC_ERR = (1U << 22), /* STP CRC Error */
+ PHYEV_UNASSOC_FIS = (1U << 19), /* unassociated FIS rx'd */
+ PHYEV_AN = (1U << 18), /* SATA async notification */
+ PHYEV_BIST_ACT = (1U << 17), /* BIST activate FIS */
+ PHYEV_SIG_FIS = (1U << 16), /* signature FIS */
+ PHYEV_POOF = (1U << 12), /* phy ready from 1 -> 0 */
+ PHYEV_IU_BIG = (1U << 11), /* IU too long err */
+ PHYEV_IU_SMALL = (1U << 10), /* IU too short err */
+ PHYEV_UNK_TAG = (1U << 9), /* unknown tag */
+ PHYEV_BROAD_CH = (1U << 8), /* broadcast(CHANGE) */
+ PHYEV_COMWAKE = (1U << 7), /* COMWAKE rx'd */
+ PHYEV_PORT_SEL = (1U << 6), /* port selector present */
+ PHYEV_HARD_RST = (1U << 5), /* hard reset rx'd */
+ PHYEV_ID_TMOUT = (1U << 4), /* identify timeout */
+ PHYEV_ID_FAIL = (1U << 3), /* identify failed */
+ PHYEV_ID_DONE = (1U << 2), /* identify done */
+ PHYEV_HARD_RST_DONE = (1U << 1), /* hard reset done */
+ PHYEV_RDY_CH = (1U << 0), /* phy ready changed state */
+
+ /* MVS_PCS */
+ PCS_EN_SATA_REG_SHIFT = (16), /* Enable SATA Register Set */
+ PCS_EN_PORT_XMT_SHIFT = (12), /* Enable Port Transmit */
+ PCS_EN_PORT_XMT_SHIFT2 = (8), /* For 6485 */
+ PCS_SATA_RETRY = (1U << 8), /* retry ctl FIS on R_ERR */
+ PCS_RSP_RX_EN = (1U << 7), /* raw response rx */
+ PCS_SATA_RETRY_2 = (1U << 6), /* For 9180 */
+ PCS_SELF_CLEAR = (1U << 5), /* self-clearing int mode */
+ PCS_FIS_RX_EN = (1U << 4), /* FIS rx enable */
+ PCS_CMD_STOP_ERR = (1U << 3), /* cmd stop-on-err enable */
+ PCS_CMD_RST = (1U << 1), /* reset cmd issue */
+ PCS_CMD_EN = (1U << 0), /* enable cmd issue */
+
+ /* Port n Attached Device Info */
+ PORT_DEV_SSP_TRGT = (1U << 19),
+ PORT_DEV_SMP_TRGT = (1U << 18),
+ PORT_DEV_STP_TRGT = (1U << 17),
+ PORT_DEV_SSP_INIT = (1U << 11),
+ PORT_DEV_SMP_INIT = (1U << 10),
+ PORT_DEV_STP_INIT = (1U << 9),
+ PORT_PHY_ID_MASK = (0xFFU << 24),
+ PORT_SSP_TRGT_MASK = (0x1U << 19),
+ PORT_SSP_INIT_MASK = (0x1U << 11),
+ PORT_DEV_TRGT_MASK = (0x7U << 17),
+ PORT_DEV_INIT_MASK = (0x7U << 9),
+ PORT_DEV_TYPE_MASK = (0x7U << 0),
+
+ /* Port n PHY Status */
+ PHY_RDY = (1U << 2),
+ PHY_DW_SYNC = (1U << 1),
+ PHY_OOB_DTCTD = (1U << 0),
+
+ /* VSR */
+ /* PHYMODE 6 (CDB) */
+ PHY_MODE6_LATECLK = (1U << 29), /* Lock Clock */
+ PHY_MODE6_DTL_SPEED = (1U << 27), /* Digital Loop Speed */
+ PHY_MODE6_FC_ORDER = (1U << 26), /* Fibre Channel Mode Order*/
+ PHY_MODE6_MUCNT_EN = (1U << 24), /* u Count Enable */
+ PHY_MODE6_SEL_MUCNT_LEN = (1U << 22), /* Training Length Select */
+ PHY_MODE6_SELMUPI = (1U << 20), /* Phase Multi Select (init) */
+ PHY_MODE6_SELMUPF = (1U << 18), /* Phase Multi Select (final) */
+ PHY_MODE6_SELMUFF = (1U << 16), /* Freq Loop Multi Sel(final) */
+ PHY_MODE6_SELMUFI = (1U << 14), /* Freq Loop Multi Sel(init) */
+ PHY_MODE6_FREEZE_LOOP = (1U << 12), /* Freeze Rx CDR Loop */
+ PHY_MODE6_INT_RXFOFFS = (1U << 3), /* Rx CDR Freq Loop Enable */
+ PHY_MODE6_FRC_RXFOFFS = (1U << 2), /* Initial Rx CDR Offset */
+ PHY_MODE6_STAU_0D8 = (1U << 1), /* Rx CDR Freq Loop Saturate */
+ PHY_MODE6_RXSAT_DIS = (1U << 0), /* Saturate Ctl */
+};
+
+/* SAS/SATA configuration port registers, aka phy registers */
+enum sas_sata_config_port_regs {
+ PHYR_IDENTIFY = 0x00, /* info for IDENTIFY frame */
+ PHYR_ADDR_LO = 0x04, /* my SAS address (low) */
+ PHYR_ADDR_HI = 0x08, /* my SAS address (high) */
+ PHYR_ATT_DEV_INFO = 0x0C, /* attached device info */
+ PHYR_ATT_ADDR_LO = 0x10, /* attached dev SAS addr (low) */
+ PHYR_ATT_ADDR_HI = 0x14, /* attached dev SAS addr (high) */
+ PHYR_SATA_CTL = 0x18, /* SATA control */
+ PHYR_PHY_STAT = 0x1C, /* PHY status */
+ PHYR_SATA_SIG0 = 0x20, /*port SATA signature FIS(Byte 0-3) */
+ PHYR_SATA_SIG1 = 0x24, /*port SATA signature FIS(Byte 4-7) */
+ PHYR_SATA_SIG2 = 0x28, /*port SATA signature FIS(Byte 8-11) */
+ PHYR_SATA_SIG3 = 0x2c, /*port SATA signature FIS(Byte 12-15) */
+ PHYR_R_ERR_COUNT = 0x30, /* port R_ERR count register */
+ PHYR_CRC_ERR_COUNT = 0x34, /* port CRC error count register */
+ PHYR_WIDE_PORT = 0x38, /* wide port participating */
+ PHYR_CURRENT0 = 0x80, /* current connection info 0 */
+ PHYR_CURRENT1 = 0x84, /* current connection info 1 */
+ PHYR_CURRENT2 = 0x88, /* current connection info 2 */
+ CONFIG_ID_FRAME0 = 0x100, /* Port device ID frame register 0 */
+ CONFIG_ID_FRAME1 = 0x104, /* Port device ID frame register 1 */
+ CONFIG_ID_FRAME2 = 0x108, /* Port device ID frame register 2 */
+ CONFIG_ID_FRAME3 = 0x10c, /* Port device ID frame register 3 */
+ CONFIG_ID_FRAME4 = 0x110, /* Port device ID frame register 4 */
+ CONFIG_ID_FRAME5 = 0x114, /* Port device ID frame register 5 */
+ CONFIG_ID_FRAME6 = 0x118, /* Port device ID frame register 6 */
+ CONFIG_ATT_ID_FRAME0 = 0x11c, /* attached ID frame register 0 */
+ CONFIG_ATT_ID_FRAME1 = 0x120, /* attached ID frame register 1 */
+ CONFIG_ATT_ID_FRAME2 = 0x124, /* attached ID frame register 2 */
+ CONFIG_ATT_ID_FRAME3 = 0x128, /* attached ID frame register 3 */
+ CONFIG_ATT_ID_FRAME4 = 0x12c, /* attached ID frame register 4 */
+ CONFIG_ATT_ID_FRAME5 = 0x130, /* attached ID frame register 5 */
+ CONFIG_ATT_ID_FRAME6 = 0x134, /* attached ID frame register 6 */
+};
+
+enum sas_cmd_port_registers {
+ CMD_CMRST_OOB_DET = 0x100, /* COMRESET OOB detect register */
+ CMD_CMWK_OOB_DET = 0x104, /* COMWAKE OOB detect register */
+ CMD_CMSAS_OOB_DET = 0x108, /* COMSAS OOB detect register */
+ CMD_BRST_OOB_DET = 0x10c, /* burst OOB detect register */
+ CMD_OOB_SPACE = 0x110, /* OOB space control register */
+ CMD_OOB_BURST = 0x114, /* OOB burst control register */
+ CMD_PHY_TIMER = 0x118, /* PHY timer control register */
+ CMD_PHY_CONFIG0 = 0x11c, /* PHY config register 0 */
+ CMD_PHY_CONFIG1 = 0x120, /* PHY config register 1 */
+ CMD_SAS_CTL0 = 0x124, /* SAS control register 0 */
+ CMD_SAS_CTL1 = 0x128, /* SAS control register 1 */
+ CMD_SAS_CTL2 = 0x12c, /* SAS control register 2 */
+ CMD_SAS_CTL3 = 0x130, /* SAS control register 3 */
+ CMD_ID_TEST = 0x134, /* ID test register */
+ CMD_PL_TIMER = 0x138, /* PL timer register */
+ CMD_WD_TIMER = 0x13c, /* WD timer register */
+ CMD_PORT_SEL_COUNT = 0x140, /* port selector count register */
+ CMD_APP_MEM_CTL = 0x144, /* Application Memory Control */
+ CMD_XOR_MEM_CTL = 0x148, /* XOR Block Memory Control */
+ CMD_DMA_MEM_CTL = 0x14c, /* DMA Block Memory Control */
+ CMD_PORT_MEM_CTL0 = 0x150, /* Port Memory Control 0 */
+ CMD_PORT_MEM_CTL1 = 0x154, /* Port Memory Control 1 */
+ CMD_SATA_PORT_MEM_CTL0 = 0x158, /* SATA Port Memory Control 0 */
+ CMD_SATA_PORT_MEM_CTL1 = 0x15c, /* SATA Port Memory Control 1 */
+ CMD_XOR_MEM_BIST_CTL = 0x160, /* XOR Memory BIST Control */
+ CMD_XOR_MEM_BIST_STAT = 0x164, /* XOR Memroy BIST Status */
+ CMD_DMA_MEM_BIST_CTL = 0x168, /* DMA Memory BIST Control */
+ CMD_DMA_MEM_BIST_STAT = 0x16c, /* DMA Memory BIST Status */
+ CMD_PORT_MEM_BIST_CTL = 0x170, /* Port Memory BIST Control */
+ CMD_PORT_MEM_BIST_STAT0 = 0x174, /* Port Memory BIST Status 0 */
+ CMD_PORT_MEM_BIST_STAT1 = 0x178, /* Port Memory BIST Status 1 */
+ CMD_STP_MEM_BIST_CTL = 0x17c, /* STP Memory BIST Control */
+ CMD_STP_MEM_BIST_STAT0 = 0x180, /* STP Memory BIST Status 0 */
+ CMD_STP_MEM_BIST_STAT1 = 0x184, /* STP Memory BIST Status 1 */
+ CMD_RESET_COUNT = 0x188, /* Reset Count */
+ CMD_MONTR_DATA_SEL = 0x18C, /* Monitor Data/Select */
+ CMD_PLL_PHY_CONFIG = 0x190, /* PLL/PHY Configuration */
+ CMD_PHY_CTL = 0x194, /* PHY Control and Status */
+ CMD_PHY_TEST_COUNT0 = 0x198, /* Phy Test Count 0 */
+ CMD_PHY_TEST_COUNT1 = 0x19C, /* Phy Test Count 1 */
+ CMD_PHY_TEST_COUNT2 = 0x1A0, /* Phy Test Count 2 */
+ CMD_APP_ERR_CONFIG = 0x1A4, /* Application Error Configuration */
+ CMD_PND_FIFO_CTL0 = 0x1A8, /* Pending FIFO Control 0 */
+ CMD_HOST_CTL = 0x1AC, /* Host Control Status */
+ CMD_HOST_WR_DATA = 0x1B0, /* Host Write Data */
+ CMD_HOST_RD_DATA = 0x1B4, /* Host Read Data */
+ CMD_PHY_MODE_21 = 0x1B8, /* Phy Mode 21 */
+ CMD_SL_MODE0 = 0x1BC, /* SL Mode 0 */
+ CMD_SL_MODE1 = 0x1C0, /* SL Mode 1 */
+ CMD_PND_FIFO_CTL1 = 0x1C4, /* Pending FIFO Control 1 */
+ CMD_PORT_LAYER_TIMER1 = 0x1E0, /* Port Layer Timer 1 */
+ CMD_LINK_TIMER = 0x1E4, /* Link Timer */
+};
+
+enum mvs_info_flags {
+ MVF_PHY_PWR_FIX = (1U << 1), /* bug workaround */
+ MVF_FLAG_SOC = (1U << 2), /* SoC integrated controllers */
+};
+
+enum mvs_event_flags {
+ PHY_PLUG_EVENT = (3U),
+ PHY_PLUG_IN = (1U << 0), /* phy plug in */
+ PHY_PLUG_OUT = (1U << 1), /* phy plug out */
+ EXP_BRCT_CHG = (1U << 2), /* broadcast change */
+};
+
+enum mvs_port_type {
+ PORT_TGT_MASK = (1U << 5),
+ PORT_INIT_PORT = (1U << 4),
+ PORT_TGT_PORT = (1U << 3),
+ PORT_INIT_TGT_PORT = (PORT_INIT_PORT | PORT_TGT_PORT),
+ PORT_TYPE_SAS = (1U << 1),
+ PORT_TYPE_SATA = (1U << 0),
+};
+
+/* Command Table Format */
+enum ct_format {
+ /* SSP */
+ SSP_F_H = 0x00,
+ SSP_F_IU = 0x18,
+ SSP_F_MAX = 0x4D,
+ /* STP */
+ STP_CMD_FIS = 0x00,
+ STP_ATAPI_CMD = 0x40,
+ STP_F_MAX = 0x10,
+ /* SMP */
+ SMP_F_T = 0x00,
+ SMP_F_DEP = 0x01,
+ SMP_F_MAX = 0x101,
+};
+
+enum status_buffer {
+ SB_EIR_OFF = 0x00, /* Error Information Record */
+ SB_RFB_OFF = 0x08, /* Response Frame Buffer */
+ SB_RFB_MAX = 0x400, /* RFB size*/
+};
+
+enum error_info_rec {
+ CMD_ISS_STPD = (1U << 31), /* Cmd Issue Stopped */
+ CMD_PI_ERR = (1U << 30), /* Protection info error. see flags2 */
+ RSP_OVER = (1U << 29), /* rsp buffer overflow */
+ RETRY_LIM = (1U << 28), /* FIS/frame retry limit exceeded */
+ UNK_FIS = (1U << 27), /* unknown FIS */
+ DMA_TERM = (1U << 26), /* DMA terminate primitive rx'd */
+ SYNC_ERR = (1U << 25), /* SYNC rx'd during frame xmit */
+ TFILE_ERR = (1U << 24), /* SATA taskfile Error bit set */
+ R_ERR = (1U << 23), /* SATA returned R_ERR prim */
+ RD_OFS = (1U << 20), /* Read DATA frame invalid offset */
+ XFER_RDY_OFS = (1U << 19), /* XFER_RDY offset error */
+ UNEXP_XFER_RDY = (1U << 18), /* unexpected XFER_RDY error */
+ DATA_OVER_UNDER = (1U << 16), /* data overflow/underflow */
+ INTERLOCK = (1U << 15), /* interlock error */
+ NAK = (1U << 14), /* NAK rx'd */
+ ACK_NAK_TO = (1U << 13), /* ACK/NAK timeout */
+ CXN_CLOSED = (1U << 12), /* cxn closed w/out ack/nak */
+ OPEN_TO = (1U << 11), /* I_T nexus lost, open cxn timeout */
+ PATH_BLOCKED = (1U << 10), /* I_T nexus lost, pathway blocked */
+ NO_DEST = (1U << 9), /* I_T nexus lost, no destination */
+ STP_RES_BSY = (1U << 8), /* STP resources busy */
+ BREAK = (1U << 7), /* break received */
+ BAD_DEST = (1U << 6), /* bad destination */
+ BAD_PROTO = (1U << 5), /* protocol not supported */
+ BAD_RATE = (1U << 4), /* cxn rate not supported */
+ WRONG_DEST = (1U << 3), /* wrong destination error */
+ CREDIT_TO = (1U << 2), /* credit timeout */
+ WDOG_TO = (1U << 1), /* watchdog timeout */
+ BUF_PAR = (1U << 0), /* buffer parity error */
+};
+
+enum error_info_rec_2 {
+ SLOT_BSY_ERR = (1U << 31), /* Slot Busy Error */
+ GRD_CHK_ERR = (1U << 14), /* Guard Check Error */
+ APP_CHK_ERR = (1U << 13), /* Application Check error */
+ REF_CHK_ERR = (1U << 12), /* Reference Check Error */
+ USR_BLK_NM = (1U << 0), /* User Block Number */
+};
+
+enum pci_cfg_register_bits {
+ PCTL_PWR_OFF = (0xFU << 24),
+ PCTL_COM_ON = (0xFU << 20),
+ PCTL_LINK_RST = (0xFU << 16),
+ PCTL_LINK_OFFS = (16),
+ PCTL_PHY_DSBL = (0xFU << 12),
+ PCTL_PHY_DSBL_OFFS = (12),
+ PRD_REQ_SIZE = (0x4000),
+ PRD_REQ_MASK = (0x00007000),
+ PLS_NEG_LINK_WD = (0x3FU << 4),
+ PLS_NEG_LINK_WD_OFFS = 4,
+ PLS_LINK_SPD = (0x0FU << 0),
+ PLS_LINK_SPD_OFFS = 0,
+};
+
+enum open_frame_protocol {
+ PROTOCOL_SMP = 0x0,
+ PROTOCOL_SSP = 0x1,
+ PROTOCOL_STP = 0x2,
+};
+
+/* define for response frame datapres field */
+enum datapres_field {
+ NO_DATA = 0,
+ RESPONSE_DATA = 1,
+ SENSE_DATA = 2,
+};
+
+/* define task management IU */
+struct mvs_tmf_task{
+ u8 tmf;
+ u16 tag_of_task_to_be_managed;
+};
+#endif
diff --git a/drivers/scsi/mvsas/mv_init.c b/drivers/scsi/mvsas/mv_init.c
new file mode 100644
index 000000000..53030b0e8
--- /dev/null
+++ b/drivers/scsi/mvsas/mv_init.c
@@ -0,0 +1,855 @@
+/*
+ * Marvell 88SE64xx/88SE94xx pci init
+ *
+ * Copyright 2007 Red Hat, Inc.
+ * Copyright 2008 Marvell. <kewei@marvell.com>
+ * Copyright 2009-2011 Marvell. <yuxiangl@marvell.com>
+ *
+ * This file is licensed under GPLv2.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; version 2 of the
+ * License.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307
+ * USA
+*/
+
+
+#include "mv_sas.h"
+
+int interrupt_coalescing = 0x80;
+
+static struct scsi_transport_template *mvs_stt;
+static const struct mvs_chip_info mvs_chips[] = {
+ [chip_6320] = { 1, 2, 0x400, 17, 16, 6, 9, &mvs_64xx_dispatch, },
+ [chip_6440] = { 1, 4, 0x400, 17, 16, 6, 9, &mvs_64xx_dispatch, },
+ [chip_6485] = { 1, 8, 0x800, 33, 32, 6, 10, &mvs_64xx_dispatch, },
+ [chip_9180] = { 2, 4, 0x800, 17, 64, 8, 9, &mvs_94xx_dispatch, },
+ [chip_9480] = { 2, 4, 0x800, 17, 64, 8, 9, &mvs_94xx_dispatch, },
+ [chip_9445] = { 1, 4, 0x800, 17, 64, 8, 11, &mvs_94xx_dispatch, },
+ [chip_9485] = { 2, 4, 0x800, 17, 64, 8, 11, &mvs_94xx_dispatch, },
+ [chip_1300] = { 1, 4, 0x400, 17, 16, 6, 9, &mvs_64xx_dispatch, },
+ [chip_1320] = { 2, 4, 0x800, 17, 64, 8, 9, &mvs_94xx_dispatch, },
+};
+
+struct device_attribute *mvst_host_attrs[];
+
+#define SOC_SAS_NUM 2
+
+static struct scsi_host_template mvs_sht = {
+ .module = THIS_MODULE,
+ .name = DRV_NAME,
+ .queuecommand = sas_queuecommand,
+ .target_alloc = sas_target_alloc,
+ .slave_configure = sas_slave_configure,
+ .scan_finished = mvs_scan_finished,
+ .scan_start = mvs_scan_start,
+ .change_queue_depth = sas_change_queue_depth,
+ .bios_param = sas_bios_param,
+ .can_queue = 1,
+ .cmd_per_lun = 1,
+ .this_id = -1,
+ .sg_tablesize = SG_ALL,
+ .max_sectors = SCSI_DEFAULT_MAX_SECTORS,
+ .use_clustering = ENABLE_CLUSTERING,
+ .eh_device_reset_handler = sas_eh_device_reset_handler,
+ .eh_bus_reset_handler = sas_eh_bus_reset_handler,
+ .target_destroy = sas_target_destroy,
+ .ioctl = sas_ioctl,
+ .shost_attrs = mvst_host_attrs,
+ .use_blk_tags = 1,
+ .track_queue_depth = 1,
+};
+
+static struct sas_domain_function_template mvs_transport_ops = {
+ .lldd_dev_found = mvs_dev_found,
+ .lldd_dev_gone = mvs_dev_gone,
+ .lldd_execute_task = mvs_queue_command,
+ .lldd_control_phy = mvs_phy_control,
+
+ .lldd_abort_task = mvs_abort_task,
+ .lldd_abort_task_set = mvs_abort_task_set,
+ .lldd_clear_aca = mvs_clear_aca,
+ .lldd_clear_task_set = mvs_clear_task_set,
+ .lldd_I_T_nexus_reset = mvs_I_T_nexus_reset,
+ .lldd_lu_reset = mvs_lu_reset,
+ .lldd_query_task = mvs_query_task,
+ .lldd_port_formed = mvs_port_formed,
+ .lldd_port_deformed = mvs_port_deformed,
+
+};
+
+static void mvs_phy_init(struct mvs_info *mvi, int phy_id)
+{
+ struct mvs_phy *phy = &mvi->phy[phy_id];
+ struct asd_sas_phy *sas_phy = &phy->sas_phy;
+
+ phy->mvi = mvi;
+ phy->port = NULL;
+ init_timer(&phy->timer);
+ sas_phy->enabled = (phy_id < mvi->chip->n_phy) ? 1 : 0;
+ sas_phy->class = SAS;
+ sas_phy->iproto = SAS_PROTOCOL_ALL;
+ sas_phy->tproto = 0;
+ sas_phy->type = PHY_TYPE_PHYSICAL;
+ sas_phy->role = PHY_ROLE_INITIATOR;
+ sas_phy->oob_mode = OOB_NOT_CONNECTED;
+ sas_phy->linkrate = SAS_LINK_RATE_UNKNOWN;
+
+ sas_phy->id = phy_id;
+ sas_phy->sas_addr = &mvi->sas_addr[0];
+ sas_phy->frame_rcvd = &phy->frame_rcvd[0];
+ sas_phy->ha = (struct sas_ha_struct *)mvi->shost->hostdata;
+ sas_phy->lldd_phy = phy;
+}
+
+static void mvs_free(struct mvs_info *mvi)
+{
+ struct mvs_wq *mwq;
+ int slot_nr;
+
+ if (!mvi)
+ return;
+
+ if (mvi->flags & MVF_FLAG_SOC)
+ slot_nr = MVS_SOC_SLOTS;
+ else
+ slot_nr = MVS_CHIP_SLOT_SZ;
+
+ if (mvi->dma_pool)
+ pci_pool_destroy(mvi->dma_pool);
+
+ if (mvi->tx)
+ dma_free_coherent(mvi->dev,
+ sizeof(*mvi->tx) * MVS_CHIP_SLOT_SZ,
+ mvi->tx, mvi->tx_dma);
+ if (mvi->rx_fis)
+ dma_free_coherent(mvi->dev, MVS_RX_FISL_SZ,
+ mvi->rx_fis, mvi->rx_fis_dma);
+ if (mvi->rx)
+ dma_free_coherent(mvi->dev,
+ sizeof(*mvi->rx) * (MVS_RX_RING_SZ + 1),
+ mvi->rx, mvi->rx_dma);
+ if (mvi->slot)
+ dma_free_coherent(mvi->dev,
+ sizeof(*mvi->slot) * slot_nr,
+ mvi->slot, mvi->slot_dma);
+
+ if (mvi->bulk_buffer)
+ dma_free_coherent(mvi->dev, TRASH_BUCKET_SIZE,
+ mvi->bulk_buffer, mvi->bulk_buffer_dma);
+ if (mvi->bulk_buffer1)
+ dma_free_coherent(mvi->dev, TRASH_BUCKET_SIZE,
+ mvi->bulk_buffer1, mvi->bulk_buffer_dma1);
+
+ MVS_CHIP_DISP->chip_iounmap(mvi);
+ if (mvi->shost)
+ scsi_host_put(mvi->shost);
+ list_for_each_entry(mwq, &mvi->wq_list, entry)
+ cancel_delayed_work(&mwq->work_q);
+ kfree(mvi->tags);
+ kfree(mvi);
+}
+
+#ifdef CONFIG_SCSI_MVSAS_TASKLET
+static void mvs_tasklet(unsigned long opaque)
+{
+ u32 stat;
+ u16 core_nr, i = 0;
+
+ struct mvs_info *mvi;
+ struct sas_ha_struct *sha = (struct sas_ha_struct *)opaque;
+
+ core_nr = ((struct mvs_prv_info *)sha->lldd_ha)->n_host;
+ mvi = ((struct mvs_prv_info *)sha->lldd_ha)->mvi[0];
+
+ if (unlikely(!mvi))
+ BUG_ON(1);
+
+ stat = MVS_CHIP_DISP->isr_status(mvi, mvi->pdev->irq);
+ if (!stat)
+ goto out;
+
+ for (i = 0; i < core_nr; i++) {
+ mvi = ((struct mvs_prv_info *)sha->lldd_ha)->mvi[i];
+ MVS_CHIP_DISP->isr(mvi, mvi->pdev->irq, stat);
+ }
+out:
+ MVS_CHIP_DISP->interrupt_enable(mvi);
+
+}
+#endif
+
+static irqreturn_t mvs_interrupt(int irq, void *opaque)
+{
+ u32 core_nr;
+ u32 stat;
+ struct mvs_info *mvi;
+ struct sas_ha_struct *sha = opaque;
+#ifndef CONFIG_SCSI_MVSAS_TASKLET
+ u32 i;
+#endif
+
+ core_nr = ((struct mvs_prv_info *)sha->lldd_ha)->n_host;
+ mvi = ((struct mvs_prv_info *)sha->lldd_ha)->mvi[0];
+
+ if (unlikely(!mvi))
+ return IRQ_NONE;
+#ifdef CONFIG_SCSI_MVSAS_TASKLET
+ MVS_CHIP_DISP->interrupt_disable(mvi);
+#endif
+
+ stat = MVS_CHIP_DISP->isr_status(mvi, irq);
+ if (!stat) {
+ #ifdef CONFIG_SCSI_MVSAS_TASKLET
+ MVS_CHIP_DISP->interrupt_enable(mvi);
+ #endif
+ return IRQ_NONE;
+ }
+
+#ifdef CONFIG_SCSI_MVSAS_TASKLET
+ tasklet_schedule(&((struct mvs_prv_info *)sha->lldd_ha)->mv_tasklet);
+#else
+ for (i = 0; i < core_nr; i++) {
+ mvi = ((struct mvs_prv_info *)sha->lldd_ha)->mvi[i];
+ MVS_CHIP_DISP->isr(mvi, irq, stat);
+ }
+#endif
+ return IRQ_HANDLED;
+}
+
+static int mvs_alloc(struct mvs_info *mvi, struct Scsi_Host *shost)
+{
+ int i = 0, slot_nr;
+ char pool_name[32];
+
+ if (mvi->flags & MVF_FLAG_SOC)
+ slot_nr = MVS_SOC_SLOTS;
+ else
+ slot_nr = MVS_CHIP_SLOT_SZ;
+
+ spin_lock_init(&mvi->lock);
+ for (i = 0; i < mvi->chip->n_phy; i++) {
+ mvs_phy_init(mvi, i);
+ mvi->port[i].wide_port_phymap = 0;
+ mvi->port[i].port_attached = 0;
+ INIT_LIST_HEAD(&mvi->port[i].list);
+ }
+ for (i = 0; i < MVS_MAX_DEVICES; i++) {
+ mvi->devices[i].taskfileset = MVS_ID_NOT_MAPPED;
+ mvi->devices[i].dev_type = SAS_PHY_UNUSED;
+ mvi->devices[i].device_id = i;
+ mvi->devices[i].dev_status = MVS_DEV_NORMAL;
+ init_timer(&mvi->devices[i].timer);
+ }
+
+ /*
+ * alloc and init our DMA areas
+ */
+ mvi->tx = dma_alloc_coherent(mvi->dev,
+ sizeof(*mvi->tx) * MVS_CHIP_SLOT_SZ,
+ &mvi->tx_dma, GFP_KERNEL);
+ if (!mvi->tx)
+ goto err_out;
+ memset(mvi->tx, 0, sizeof(*mvi->tx) * MVS_CHIP_SLOT_SZ);
+ mvi->rx_fis = dma_alloc_coherent(mvi->dev, MVS_RX_FISL_SZ,
+ &mvi->rx_fis_dma, GFP_KERNEL);
+ if (!mvi->rx_fis)
+ goto err_out;
+ memset(mvi->rx_fis, 0, MVS_RX_FISL_SZ);
+
+ mvi->rx = dma_alloc_coherent(mvi->dev,
+ sizeof(*mvi->rx) * (MVS_RX_RING_SZ + 1),
+ &mvi->rx_dma, GFP_KERNEL);
+ if (!mvi->rx)
+ goto err_out;
+ memset(mvi->rx, 0, sizeof(*mvi->rx) * (MVS_RX_RING_SZ + 1));
+ mvi->rx[0] = cpu_to_le32(0xfff);
+ mvi->rx_cons = 0xfff;
+
+ mvi->slot = dma_alloc_coherent(mvi->dev,
+ sizeof(*mvi->slot) * slot_nr,
+ &mvi->slot_dma, GFP_KERNEL);
+ if (!mvi->slot)
+ goto err_out;
+ memset(mvi->slot, 0, sizeof(*mvi->slot) * slot_nr);
+
+ mvi->bulk_buffer = dma_alloc_coherent(mvi->dev,
+ TRASH_BUCKET_SIZE,
+ &mvi->bulk_buffer_dma, GFP_KERNEL);
+ if (!mvi->bulk_buffer)
+ goto err_out;
+
+ mvi->bulk_buffer1 = dma_alloc_coherent(mvi->dev,
+ TRASH_BUCKET_SIZE,
+ &mvi->bulk_buffer_dma1, GFP_KERNEL);
+ if (!mvi->bulk_buffer1)
+ goto err_out;
+
+ sprintf(pool_name, "%s%d", "mvs_dma_pool", mvi->id);
+ mvi->dma_pool = pci_pool_create(pool_name, mvi->pdev, MVS_SLOT_BUF_SZ, 16, 0);
+ if (!mvi->dma_pool) {
+ printk(KERN_DEBUG "failed to create dma pool %s.\n", pool_name);
+ goto err_out;
+ }
+ mvi->tags_num = slot_nr;
+
+ /* Initialize tags */
+ mvs_tag_init(mvi);
+ return 0;
+err_out:
+ return 1;
+}
+
+
+int mvs_ioremap(struct mvs_info *mvi, int bar, int bar_ex)
+{
+ unsigned long res_start, res_len, res_flag, res_flag_ex = 0;
+ struct pci_dev *pdev = mvi->pdev;
+ if (bar_ex != -1) {
+ /*
+ * ioremap main and peripheral registers
+ */
+ res_start = pci_resource_start(pdev, bar_ex);
+ res_len = pci_resource_len(pdev, bar_ex);
+ if (!res_start || !res_len)
+ goto err_out;
+
+ res_flag_ex = pci_resource_flags(pdev, bar_ex);
+ if (res_flag_ex & IORESOURCE_MEM) {
+ if (res_flag_ex & IORESOURCE_CACHEABLE)
+ mvi->regs_ex = ioremap(res_start, res_len);
+ else
+ mvi->regs_ex = ioremap_nocache(res_start,
+ res_len);
+ } else
+ mvi->regs_ex = (void *)res_start;
+ if (!mvi->regs_ex)
+ goto err_out;
+ }
+
+ res_start = pci_resource_start(pdev, bar);
+ res_len = pci_resource_len(pdev, bar);
+ if (!res_start || !res_len)
+ goto err_out;
+
+ res_flag = pci_resource_flags(pdev, bar);
+ if (res_flag & IORESOURCE_CACHEABLE)
+ mvi->regs = ioremap(res_start, res_len);
+ else
+ mvi->regs = ioremap_nocache(res_start, res_len);
+
+ if (!mvi->regs) {
+ if (mvi->regs_ex && (res_flag_ex & IORESOURCE_MEM))
+ iounmap(mvi->regs_ex);
+ mvi->regs_ex = NULL;
+ goto err_out;
+ }
+
+ return 0;
+err_out:
+ return -1;
+}
+
+void mvs_iounmap(void __iomem *regs)
+{
+ iounmap(regs);
+}
+
+static struct mvs_info *mvs_pci_alloc(struct pci_dev *pdev,
+ const struct pci_device_id *ent,
+ struct Scsi_Host *shost, unsigned int id)
+{
+ struct mvs_info *mvi = NULL;
+ struct sas_ha_struct *sha = SHOST_TO_SAS_HA(shost);
+
+ mvi = kzalloc(sizeof(*mvi) +
+ (1L << mvs_chips[ent->driver_data].slot_width) *
+ sizeof(struct mvs_slot_info), GFP_KERNEL);
+ if (!mvi)
+ return NULL;
+
+ mvi->pdev = pdev;
+ mvi->dev = &pdev->dev;
+ mvi->chip_id = ent->driver_data;
+ mvi->chip = &mvs_chips[mvi->chip_id];
+ INIT_LIST_HEAD(&mvi->wq_list);
+
+ ((struct mvs_prv_info *)sha->lldd_ha)->mvi[id] = mvi;
+ ((struct mvs_prv_info *)sha->lldd_ha)->n_phy = mvi->chip->n_phy;
+
+ mvi->id = id;
+ mvi->sas = sha;
+ mvi->shost = shost;
+
+ mvi->tags = kzalloc(MVS_CHIP_SLOT_SZ>>3, GFP_KERNEL);
+ if (!mvi->tags)
+ goto err_out;
+
+ if (MVS_CHIP_DISP->chip_ioremap(mvi))
+ goto err_out;
+ if (!mvs_alloc(mvi, shost))
+ return mvi;
+err_out:
+ mvs_free(mvi);
+ return NULL;
+}
+
+static int pci_go_64(struct pci_dev *pdev)
+{
+ int rc;
+
+ if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(64))) {
+ rc = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
+ if (rc) {
+ rc = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
+ if (rc) {
+ dev_printk(KERN_ERR, &pdev->dev,
+ "64-bit DMA enable failed\n");
+ return rc;
+ }
+ }
+ } else {
+ rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
+ if (rc) {
+ dev_printk(KERN_ERR, &pdev->dev,
+ "32-bit DMA enable failed\n");
+ return rc;
+ }
+ rc = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
+ if (rc) {
+ dev_printk(KERN_ERR, &pdev->dev,
+ "32-bit consistent DMA enable failed\n");
+ return rc;
+ }
+ }
+
+ return rc;
+}
+
+static int mvs_prep_sas_ha_init(struct Scsi_Host *shost,
+ const struct mvs_chip_info *chip_info)
+{
+ int phy_nr, port_nr; unsigned short core_nr;
+ struct asd_sas_phy **arr_phy;
+ struct asd_sas_port **arr_port;
+ struct sas_ha_struct *sha = SHOST_TO_SAS_HA(shost);
+
+ core_nr = chip_info->n_host;
+ phy_nr = core_nr * chip_info->n_phy;
+ port_nr = phy_nr;
+
+ memset(sha, 0x00, sizeof(struct sas_ha_struct));
+ arr_phy = kcalloc(phy_nr, sizeof(void *), GFP_KERNEL);
+ arr_port = kcalloc(port_nr, sizeof(void *), GFP_KERNEL);
+ if (!arr_phy || !arr_port)
+ goto exit_free;
+
+ sha->sas_phy = arr_phy;
+ sha->sas_port = arr_port;
+ sha->core.shost = shost;
+
+ sha->lldd_ha = kzalloc(sizeof(struct mvs_prv_info), GFP_KERNEL);
+ if (!sha->lldd_ha)
+ goto exit_free;
+
+ ((struct mvs_prv_info *)sha->lldd_ha)->n_host = core_nr;
+
+ shost->transportt = mvs_stt;
+ shost->max_id = MVS_MAX_DEVICES;
+ shost->max_lun = ~0;
+ shost->max_channel = 1;
+ shost->max_cmd_len = 16;
+
+ return 0;
+exit_free:
+ kfree(arr_phy);
+ kfree(arr_port);
+ return -1;
+
+}
+
+static void mvs_post_sas_ha_init(struct Scsi_Host *shost,
+ const struct mvs_chip_info *chip_info)
+{
+ int can_queue, i = 0, j = 0;
+ struct mvs_info *mvi = NULL;
+ struct sas_ha_struct *sha = SHOST_TO_SAS_HA(shost);
+ unsigned short nr_core = ((struct mvs_prv_info *)sha->lldd_ha)->n_host;
+
+ for (j = 0; j < nr_core; j++) {
+ mvi = ((struct mvs_prv_info *)sha->lldd_ha)->mvi[j];
+ for (i = 0; i < chip_info->n_phy; i++) {
+ sha->sas_phy[j * chip_info->n_phy + i] =
+ &mvi->phy[i].sas_phy;
+ sha->sas_port[j * chip_info->n_phy + i] =
+ &mvi->port[i].sas_port;
+ }
+ }
+
+ sha->sas_ha_name = DRV_NAME;
+ sha->dev = mvi->dev;
+ sha->lldd_module = THIS_MODULE;
+ sha->sas_addr = &mvi->sas_addr[0];
+
+ sha->num_phys = nr_core * chip_info->n_phy;
+
+ if (mvi->flags & MVF_FLAG_SOC)
+ can_queue = MVS_SOC_CAN_QUEUE;
+ else
+ can_queue = MVS_CHIP_SLOT_SZ;
+
+ shost->sg_tablesize = min_t(u16, SG_ALL, MVS_MAX_SG);
+ shost->can_queue = can_queue;
+ mvi->shost->cmd_per_lun = MVS_QUEUE_SIZE;
+ sha->core.shost = mvi->shost;
+}
+
+static void mvs_init_sas_add(struct mvs_info *mvi)
+{
+ u8 i;
+ for (i = 0; i < mvi->chip->n_phy; i++) {
+ mvi->phy[i].dev_sas_addr = 0x5005043011ab0000ULL;
+ mvi->phy[i].dev_sas_addr =
+ cpu_to_be64((u64)(*(u64 *)&mvi->phy[i].dev_sas_addr));
+ }
+
+ memcpy(mvi->sas_addr, &mvi->phy[0].dev_sas_addr, SAS_ADDR_SIZE);
+}
+
+static int mvs_pci_init(struct pci_dev *pdev, const struct pci_device_id *ent)
+{
+ unsigned int rc, nhost = 0;
+ struct mvs_info *mvi;
+ struct mvs_prv_info *mpi;
+ irq_handler_t irq_handler = mvs_interrupt;
+ struct Scsi_Host *shost = NULL;
+ const struct mvs_chip_info *chip;
+
+ dev_printk(KERN_INFO, &pdev->dev,
+ "mvsas: driver version %s\n", DRV_VERSION);
+ rc = pci_enable_device(pdev);
+ if (rc)
+ goto err_out_enable;
+
+ pci_set_master(pdev);
+
+ rc = pci_request_regions(pdev, DRV_NAME);
+ if (rc)
+ goto err_out_disable;
+
+ rc = pci_go_64(pdev);
+ if (rc)
+ goto err_out_regions;
+
+ shost = scsi_host_alloc(&mvs_sht, sizeof(void *));
+ if (!shost) {
+ rc = -ENOMEM;
+ goto err_out_regions;
+ }
+
+ chip = &mvs_chips[ent->driver_data];
+ SHOST_TO_SAS_HA(shost) =
+ kcalloc(1, sizeof(struct sas_ha_struct), GFP_KERNEL);
+ if (!SHOST_TO_SAS_HA(shost)) {
+ kfree(shost);
+ rc = -ENOMEM;
+ goto err_out_regions;
+ }
+
+ rc = mvs_prep_sas_ha_init(shost, chip);
+ if (rc) {
+ kfree(shost);
+ rc = -ENOMEM;
+ goto err_out_regions;
+ }
+
+ pci_set_drvdata(pdev, SHOST_TO_SAS_HA(shost));
+
+ do {
+ mvi = mvs_pci_alloc(pdev, ent, shost, nhost);
+ if (!mvi) {
+ rc = -ENOMEM;
+ goto err_out_regions;
+ }
+
+ memset(&mvi->hba_info_param, 0xFF,
+ sizeof(struct hba_info_page));
+
+ mvs_init_sas_add(mvi);
+
+ mvi->instance = nhost;
+ rc = MVS_CHIP_DISP->chip_init(mvi);
+ if (rc) {
+ mvs_free(mvi);
+ goto err_out_regions;
+ }
+ nhost++;
+ } while (nhost < chip->n_host);
+ mpi = (struct mvs_prv_info *)(SHOST_TO_SAS_HA(shost)->lldd_ha);
+#ifdef CONFIG_SCSI_MVSAS_TASKLET
+ tasklet_init(&(mpi->mv_tasklet), mvs_tasklet,
+ (unsigned long)SHOST_TO_SAS_HA(shost));
+#endif
+
+ mvs_post_sas_ha_init(shost, chip);
+
+ rc = scsi_add_host(shost, &pdev->dev);
+ if (rc)
+ goto err_out_shost;
+
+ rc = sas_register_ha(SHOST_TO_SAS_HA(shost));
+ if (rc)
+ goto err_out_shost;
+ rc = request_irq(pdev->irq, irq_handler, IRQF_SHARED,
+ DRV_NAME, SHOST_TO_SAS_HA(shost));
+ if (rc)
+ goto err_not_sas;
+
+ MVS_CHIP_DISP->interrupt_enable(mvi);
+
+ scsi_scan_host(mvi->shost);
+
+ return 0;
+
+err_not_sas:
+ sas_unregister_ha(SHOST_TO_SAS_HA(shost));
+err_out_shost:
+ scsi_remove_host(mvi->shost);
+err_out_regions:
+ pci_release_regions(pdev);
+err_out_disable:
+ pci_disable_device(pdev);
+err_out_enable:
+ return rc;
+}
+
+static void mvs_pci_remove(struct pci_dev *pdev)
+{
+ unsigned short core_nr, i = 0;
+ struct sas_ha_struct *sha = pci_get_drvdata(pdev);
+ struct mvs_info *mvi = NULL;
+
+ core_nr = ((struct mvs_prv_info *)sha->lldd_ha)->n_host;
+ mvi = ((struct mvs_prv_info *)sha->lldd_ha)->mvi[0];
+
+#ifdef CONFIG_SCSI_MVSAS_TASKLET
+ tasklet_kill(&((struct mvs_prv_info *)sha->lldd_ha)->mv_tasklet);
+#endif
+
+ sas_unregister_ha(sha);
+ sas_remove_host(mvi->shost);
+ scsi_remove_host(mvi->shost);
+
+ MVS_CHIP_DISP->interrupt_disable(mvi);
+ free_irq(mvi->pdev->irq, sha);
+ for (i = 0; i < core_nr; i++) {
+ mvi = ((struct mvs_prv_info *)sha->lldd_ha)->mvi[i];
+ mvs_free(mvi);
+ }
+ kfree(sha->sas_phy);
+ kfree(sha->sas_port);
+ kfree(sha);
+ pci_release_regions(pdev);
+ pci_disable_device(pdev);
+ return;
+}
+
+static struct pci_device_id mvs_pci_table[] = {
+ { PCI_VDEVICE(MARVELL, 0x6320), chip_6320 },
+ { PCI_VDEVICE(MARVELL, 0x6340), chip_6440 },
+ {
+ .vendor = PCI_VENDOR_ID_MARVELL,
+ .device = 0x6440,
+ .subvendor = PCI_ANY_ID,
+ .subdevice = 0x6480,
+ .class = 0,
+ .class_mask = 0,
+ .driver_data = chip_6485,
+ },
+ { PCI_VDEVICE(MARVELL, 0x6440), chip_6440 },
+ { PCI_VDEVICE(MARVELL, 0x6485), chip_6485 },
+ { PCI_VDEVICE(MARVELL, 0x9480), chip_9480 },
+ { PCI_VDEVICE(MARVELL, 0x9180), chip_9180 },
+ { PCI_VDEVICE(ARECA, PCI_DEVICE_ID_ARECA_1300), chip_1300 },
+ { PCI_VDEVICE(ARECA, PCI_DEVICE_ID_ARECA_1320), chip_1320 },
+ { PCI_VDEVICE(ADAPTEC2, 0x0450), chip_6440 },
+ { PCI_VDEVICE(TTI, 0x2710), chip_9480 },
+ { PCI_VDEVICE(TTI, 0x2720), chip_9480 },
+ { PCI_VDEVICE(TTI, 0x2721), chip_9480 },
+ { PCI_VDEVICE(TTI, 0x2722), chip_9480 },
+ { PCI_VDEVICE(TTI, 0x2740), chip_9480 },
+ { PCI_VDEVICE(TTI, 0x2744), chip_9480 },
+ { PCI_VDEVICE(TTI, 0x2760), chip_9480 },
+ {
+ .vendor = PCI_VENDOR_ID_MARVELL_EXT,
+ .device = 0x9480,
+ .subvendor = PCI_ANY_ID,
+ .subdevice = 0x9480,
+ .class = 0,
+ .class_mask = 0,
+ .driver_data = chip_9480,
+ },
+ {
+ .vendor = PCI_VENDOR_ID_MARVELL_EXT,
+ .device = 0x9445,
+ .subvendor = PCI_ANY_ID,
+ .subdevice = 0x9480,
+ .class = 0,
+ .class_mask = 0,
+ .driver_data = chip_9445,
+ },
+ {
+ .vendor = PCI_VENDOR_ID_MARVELL_EXT,
+ .device = 0x9485,
+ .subvendor = PCI_ANY_ID,
+ .subdevice = 0x9480,
+ .class = 0,
+ .class_mask = 0,
+ .driver_data = chip_9485,
+ },
+ {
+ .vendor = PCI_VENDOR_ID_MARVELL_EXT,
+ .device = 0x9485,
+ .subvendor = PCI_ANY_ID,
+ .subdevice = 0x9485,
+ .class = 0,
+ .class_mask = 0,
+ .driver_data = chip_9485,
+ },
+ { PCI_VDEVICE(OCZ, 0x1021), chip_9485}, /* OCZ RevoDrive3 */
+ { PCI_VDEVICE(OCZ, 0x1022), chip_9485}, /* OCZ RevoDrive3/zDriveR4 (exact model unknown) */
+ { PCI_VDEVICE(OCZ, 0x1040), chip_9485}, /* OCZ RevoDrive3/zDriveR4 (exact model unknown) */
+ { PCI_VDEVICE(OCZ, 0x1041), chip_9485}, /* OCZ RevoDrive3/zDriveR4 (exact model unknown) */
+ { PCI_VDEVICE(OCZ, 0x1042), chip_9485}, /* OCZ RevoDrive3/zDriveR4 (exact model unknown) */
+ { PCI_VDEVICE(OCZ, 0x1043), chip_9485}, /* OCZ RevoDrive3/zDriveR4 (exact model unknown) */
+ { PCI_VDEVICE(OCZ, 0x1044), chip_9485}, /* OCZ RevoDrive3/zDriveR4 (exact model unknown) */
+ { PCI_VDEVICE(OCZ, 0x1080), chip_9485}, /* OCZ RevoDrive3/zDriveR4 (exact model unknown) */
+ { PCI_VDEVICE(OCZ, 0x1083), chip_9485}, /* OCZ RevoDrive3/zDriveR4 (exact model unknown) */
+ { PCI_VDEVICE(OCZ, 0x1084), chip_9485}, /* OCZ RevoDrive3/zDriveR4 (exact model unknown) */
+
+ { } /* terminate list */
+};
+
+static struct pci_driver mvs_pci_driver = {
+ .name = DRV_NAME,
+ .id_table = mvs_pci_table,
+ .probe = mvs_pci_init,
+ .remove = mvs_pci_remove,
+};
+
+static ssize_t
+mvs_show_driver_version(struct device *cdev,
+ struct device_attribute *attr, char *buffer)
+{
+ return snprintf(buffer, PAGE_SIZE, "%s\n", DRV_VERSION);
+}
+
+static DEVICE_ATTR(driver_version,
+ S_IRUGO,
+ mvs_show_driver_version,
+ NULL);
+
+static ssize_t
+mvs_store_interrupt_coalescing(struct device *cdev,
+ struct device_attribute *attr,
+ const char *buffer, size_t size)
+{
+ int val = 0;
+ struct mvs_info *mvi = NULL;
+ struct Scsi_Host *shost = class_to_shost(cdev);
+ struct sas_ha_struct *sha = SHOST_TO_SAS_HA(shost);
+ u8 i, core_nr;
+ if (buffer == NULL)
+ return size;
+
+ if (sscanf(buffer, "%d", &val) != 1)
+ return -EINVAL;
+
+ if (val >= 0x10000) {
+ mv_dprintk("interrupt coalescing timer %d us is"
+ "too long\n", val);
+ return strlen(buffer);
+ }
+
+ interrupt_coalescing = val;
+
+ core_nr = ((struct mvs_prv_info *)sha->lldd_ha)->n_host;
+ mvi = ((struct mvs_prv_info *)sha->lldd_ha)->mvi[0];
+
+ if (unlikely(!mvi))
+ return -EINVAL;
+
+ for (i = 0; i < core_nr; i++) {
+ mvi = ((struct mvs_prv_info *)sha->lldd_ha)->mvi[i];
+ if (MVS_CHIP_DISP->tune_interrupt)
+ MVS_CHIP_DISP->tune_interrupt(mvi,
+ interrupt_coalescing);
+ }
+ mv_dprintk("set interrupt coalescing time to %d us\n",
+ interrupt_coalescing);
+ return strlen(buffer);
+}
+
+static ssize_t mvs_show_interrupt_coalescing(struct device *cdev,
+ struct device_attribute *attr, char *buffer)
+{
+ return snprintf(buffer, PAGE_SIZE, "%d\n", interrupt_coalescing);
+}
+
+static DEVICE_ATTR(interrupt_coalescing,
+ S_IRUGO|S_IWUSR,
+ mvs_show_interrupt_coalescing,
+ mvs_store_interrupt_coalescing);
+
+/* task handler */
+struct task_struct *mvs_th;
+static int __init mvs_init(void)
+{
+ int rc;
+ mvs_stt = sas_domain_attach_transport(&mvs_transport_ops);
+ if (!mvs_stt)
+ return -ENOMEM;
+
+ rc = pci_register_driver(&mvs_pci_driver);
+ if (rc)
+ goto err_out;
+
+ return 0;
+
+err_out:
+ sas_release_transport(mvs_stt);
+ return rc;
+}
+
+static void __exit mvs_exit(void)
+{
+ pci_unregister_driver(&mvs_pci_driver);
+ sas_release_transport(mvs_stt);
+}
+
+struct device_attribute *mvst_host_attrs[] = {
+ &dev_attr_driver_version,
+ &dev_attr_interrupt_coalescing,
+ NULL,
+};
+
+module_init(mvs_init);
+module_exit(mvs_exit);
+
+MODULE_AUTHOR("Jeff Garzik <jgarzik@pobox.com>");
+MODULE_DESCRIPTION("Marvell 88SE6440 SAS/SATA controller driver");
+MODULE_VERSION(DRV_VERSION);
+MODULE_LICENSE("GPL");
+#ifdef CONFIG_PCI
+MODULE_DEVICE_TABLE(pci, mvs_pci_table);
+#endif
diff --git a/drivers/scsi/mvsas/mv_sas.c b/drivers/scsi/mvsas/mv_sas.c
new file mode 100644
index 000000000..454536c49
--- /dev/null
+++ b/drivers/scsi/mvsas/mv_sas.c
@@ -0,0 +1,2105 @@
+/*
+ * Marvell 88SE64xx/88SE94xx main function
+ *
+ * Copyright 2007 Red Hat, Inc.
+ * Copyright 2008 Marvell. <kewei@marvell.com>
+ * Copyright 2009-2011 Marvell. <yuxiangl@marvell.com>
+ *
+ * This file is licensed under GPLv2.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; version 2 of the
+ * License.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307
+ * USA
+*/
+
+#include "mv_sas.h"
+
+static int mvs_find_tag(struct mvs_info *mvi, struct sas_task *task, u32 *tag)
+{
+ if (task->lldd_task) {
+ struct mvs_slot_info *slot;
+ slot = task->lldd_task;
+ *tag = slot->slot_tag;
+ return 1;
+ }
+ return 0;
+}
+
+void mvs_tag_clear(struct mvs_info *mvi, u32 tag)
+{
+ void *bitmap = mvi->tags;
+ clear_bit(tag, bitmap);
+}
+
+void mvs_tag_free(struct mvs_info *mvi, u32 tag)
+{
+ mvs_tag_clear(mvi, tag);
+}
+
+void mvs_tag_set(struct mvs_info *mvi, unsigned int tag)
+{
+ void *bitmap = mvi->tags;
+ set_bit(tag, bitmap);
+}
+
+inline int mvs_tag_alloc(struct mvs_info *mvi, u32 *tag_out)
+{
+ unsigned int index, tag;
+ void *bitmap = mvi->tags;
+
+ index = find_first_zero_bit(bitmap, mvi->tags_num);
+ tag = index;
+ if (tag >= mvi->tags_num)
+ return -SAS_QUEUE_FULL;
+ mvs_tag_set(mvi, tag);
+ *tag_out = tag;
+ return 0;
+}
+
+void mvs_tag_init(struct mvs_info *mvi)
+{
+ int i;
+ for (i = 0; i < mvi->tags_num; ++i)
+ mvs_tag_clear(mvi, i);
+}
+
+struct mvs_info *mvs_find_dev_mvi(struct domain_device *dev)
+{
+ unsigned long i = 0, j = 0, hi = 0;
+ struct sas_ha_struct *sha = dev->port->ha;
+ struct mvs_info *mvi = NULL;
+ struct asd_sas_phy *phy;
+
+ while (sha->sas_port[i]) {
+ if (sha->sas_port[i] == dev->port) {
+ phy = container_of(sha->sas_port[i]->phy_list.next,
+ struct asd_sas_phy, port_phy_el);
+ j = 0;
+ while (sha->sas_phy[j]) {
+ if (sha->sas_phy[j] == phy)
+ break;
+ j++;
+ }
+ break;
+ }
+ i++;
+ }
+ hi = j/((struct mvs_prv_info *)sha->lldd_ha)->n_phy;
+ mvi = ((struct mvs_prv_info *)sha->lldd_ha)->mvi[hi];
+
+ return mvi;
+
+}
+
+int mvs_find_dev_phyno(struct domain_device *dev, int *phyno)
+{
+ unsigned long i = 0, j = 0, n = 0, num = 0;
+ struct mvs_device *mvi_dev = (struct mvs_device *)dev->lldd_dev;
+ struct mvs_info *mvi = mvi_dev->mvi_info;
+ struct sas_ha_struct *sha = dev->port->ha;
+
+ while (sha->sas_port[i]) {
+ if (sha->sas_port[i] == dev->port) {
+ struct asd_sas_phy *phy;
+ list_for_each_entry(phy,
+ &sha->sas_port[i]->phy_list, port_phy_el) {
+ j = 0;
+ while (sha->sas_phy[j]) {
+ if (sha->sas_phy[j] == phy)
+ break;
+ j++;
+ }
+ phyno[n] = (j >= mvi->chip->n_phy) ?
+ (j - mvi->chip->n_phy) : j;
+ num++;
+ n++;
+ }
+ break;
+ }
+ i++;
+ }
+ return num;
+}
+
+struct mvs_device *mvs_find_dev_by_reg_set(struct mvs_info *mvi,
+ u8 reg_set)
+{
+ u32 dev_no;
+ for (dev_no = 0; dev_no < MVS_MAX_DEVICES; dev_no++) {
+ if (mvi->devices[dev_no].taskfileset == MVS_ID_NOT_MAPPED)
+ continue;
+
+ if (mvi->devices[dev_no].taskfileset == reg_set)
+ return &mvi->devices[dev_no];
+ }
+ return NULL;
+}
+
+static inline void mvs_free_reg_set(struct mvs_info *mvi,
+ struct mvs_device *dev)
+{
+ if (!dev) {
+ mv_printk("device has been free.\n");
+ return;
+ }
+ if (dev->taskfileset == MVS_ID_NOT_MAPPED)
+ return;
+ MVS_CHIP_DISP->free_reg_set(mvi, &dev->taskfileset);
+}
+
+static inline u8 mvs_assign_reg_set(struct mvs_info *mvi,
+ struct mvs_device *dev)
+{
+ if (dev->taskfileset != MVS_ID_NOT_MAPPED)
+ return 0;
+ return MVS_CHIP_DISP->assign_reg_set(mvi, &dev->taskfileset);
+}
+
+void mvs_phys_reset(struct mvs_info *mvi, u32 phy_mask, int hard)
+{
+ u32 no;
+ for_each_phy(phy_mask, phy_mask, no) {
+ if (!(phy_mask & 1))
+ continue;
+ MVS_CHIP_DISP->phy_reset(mvi, no, hard);
+ }
+}
+
+int mvs_phy_control(struct asd_sas_phy *sas_phy, enum phy_func func,
+ void *funcdata)
+{
+ int rc = 0, phy_id = sas_phy->id;
+ u32 tmp, i = 0, hi;
+ struct sas_ha_struct *sha = sas_phy->ha;
+ struct mvs_info *mvi = NULL;
+
+ while (sha->sas_phy[i]) {
+ if (sha->sas_phy[i] == sas_phy)
+ break;
+ i++;
+ }
+ hi = i/((struct mvs_prv_info *)sha->lldd_ha)->n_phy;
+ mvi = ((struct mvs_prv_info *)sha->lldd_ha)->mvi[hi];
+
+ switch (func) {
+ case PHY_FUNC_SET_LINK_RATE:
+ MVS_CHIP_DISP->phy_set_link_rate(mvi, phy_id, funcdata);
+ break;
+
+ case PHY_FUNC_HARD_RESET:
+ tmp = MVS_CHIP_DISP->read_phy_ctl(mvi, phy_id);
+ if (tmp & PHY_RST_HARD)
+ break;
+ MVS_CHIP_DISP->phy_reset(mvi, phy_id, MVS_HARD_RESET);
+ break;
+
+ case PHY_FUNC_LINK_RESET:
+ MVS_CHIP_DISP->phy_enable(mvi, phy_id);
+ MVS_CHIP_DISP->phy_reset(mvi, phy_id, MVS_SOFT_RESET);
+ break;
+
+ case PHY_FUNC_DISABLE:
+ MVS_CHIP_DISP->phy_disable(mvi, phy_id);
+ break;
+ case PHY_FUNC_RELEASE_SPINUP_HOLD:
+ default:
+ rc = -ENOSYS;
+ }
+ msleep(200);
+ return rc;
+}
+
+void mvs_set_sas_addr(struct mvs_info *mvi, int port_id, u32 off_lo,
+ u32 off_hi, u64 sas_addr)
+{
+ u32 lo = (u32)sas_addr;
+ u32 hi = (u32)(sas_addr>>32);
+
+ MVS_CHIP_DISP->write_port_cfg_addr(mvi, port_id, off_lo);
+ MVS_CHIP_DISP->write_port_cfg_data(mvi, port_id, lo);
+ MVS_CHIP_DISP->write_port_cfg_addr(mvi, port_id, off_hi);
+ MVS_CHIP_DISP->write_port_cfg_data(mvi, port_id, hi);
+}
+
+static void mvs_bytes_dmaed(struct mvs_info *mvi, int i)
+{
+ struct mvs_phy *phy = &mvi->phy[i];
+ struct asd_sas_phy *sas_phy = &phy->sas_phy;
+ struct sas_ha_struct *sas_ha;
+ if (!phy->phy_attached)
+ return;
+
+ if (!(phy->att_dev_info & PORT_DEV_TRGT_MASK)
+ && phy->phy_type & PORT_TYPE_SAS) {
+ return;
+ }
+
+ sas_ha = mvi->sas;
+ sas_ha->notify_phy_event(sas_phy, PHYE_OOB_DONE);
+
+ if (sas_phy->phy) {
+ struct sas_phy *sphy = sas_phy->phy;
+
+ sphy->negotiated_linkrate = sas_phy->linkrate;
+ sphy->minimum_linkrate = phy->minimum_linkrate;
+ sphy->minimum_linkrate_hw = SAS_LINK_RATE_1_5_GBPS;
+ sphy->maximum_linkrate = phy->maximum_linkrate;
+ sphy->maximum_linkrate_hw = MVS_CHIP_DISP->phy_max_link_rate();
+ }
+
+ if (phy->phy_type & PORT_TYPE_SAS) {
+ struct sas_identify_frame *id;
+
+ id = (struct sas_identify_frame *)phy->frame_rcvd;
+ id->dev_type = phy->identify.device_type;
+ id->initiator_bits = SAS_PROTOCOL_ALL;
+ id->target_bits = phy->identify.target_port_protocols;
+
+ /* direct attached SAS device */
+ if (phy->att_dev_info & PORT_SSP_TRGT_MASK) {
+ MVS_CHIP_DISP->write_port_cfg_addr(mvi, i, PHYR_PHY_STAT);
+ MVS_CHIP_DISP->write_port_cfg_data(mvi, i, 0x00);
+ }
+ } else if (phy->phy_type & PORT_TYPE_SATA) {
+ /*Nothing*/
+ }
+ mv_dprintk("phy %d byte dmaded.\n", i + mvi->id * mvi->chip->n_phy);
+
+ sas_phy->frame_rcvd_size = phy->frame_rcvd_size;
+
+ mvi->sas->notify_port_event(sas_phy,
+ PORTE_BYTES_DMAED);
+}
+
+void mvs_scan_start(struct Scsi_Host *shost)
+{
+ int i, j;
+ unsigned short core_nr;
+ struct mvs_info *mvi;
+ struct sas_ha_struct *sha = SHOST_TO_SAS_HA(shost);
+ struct mvs_prv_info *mvs_prv = sha->lldd_ha;
+
+ core_nr = ((struct mvs_prv_info *)sha->lldd_ha)->n_host;
+
+ for (j = 0; j < core_nr; j++) {
+ mvi = ((struct mvs_prv_info *)sha->lldd_ha)->mvi[j];
+ for (i = 0; i < mvi->chip->n_phy; ++i)
+ mvs_bytes_dmaed(mvi, i);
+ }
+ mvs_prv->scan_finished = 1;
+}
+
+int mvs_scan_finished(struct Scsi_Host *shost, unsigned long time)
+{
+ struct sas_ha_struct *sha = SHOST_TO_SAS_HA(shost);
+ struct mvs_prv_info *mvs_prv = sha->lldd_ha;
+
+ if (mvs_prv->scan_finished == 0)
+ return 0;
+
+ sas_drain_work(sha);
+ return 1;
+}
+
+static int mvs_task_prep_smp(struct mvs_info *mvi,
+ struct mvs_task_exec_info *tei)
+{
+ int elem, rc, i;
+ struct sas_ha_struct *sha = mvi->sas;
+ struct sas_task *task = tei->task;
+ struct mvs_cmd_hdr *hdr = tei->hdr;
+ struct domain_device *dev = task->dev;
+ struct asd_sas_port *sas_port = dev->port;
+ struct sas_phy *sphy = dev->phy;
+ struct asd_sas_phy *sas_phy = sha->sas_phy[sphy->number];
+ struct scatterlist *sg_req, *sg_resp;
+ u32 req_len, resp_len, tag = tei->tag;
+ void *buf_tmp;
+ u8 *buf_oaf;
+ dma_addr_t buf_tmp_dma;
+ void *buf_prd;
+ struct mvs_slot_info *slot = &mvi->slot_info[tag];
+ u32 flags = (tei->n_elem << MCH_PRD_LEN_SHIFT);
+
+ /*
+ * DMA-map SMP request, response buffers
+ */
+ sg_req = &task->smp_task.smp_req;
+ elem = dma_map_sg(mvi->dev, sg_req, 1, PCI_DMA_TODEVICE);
+ if (!elem)
+ return -ENOMEM;
+ req_len = sg_dma_len(sg_req);
+
+ sg_resp = &task->smp_task.smp_resp;
+ elem = dma_map_sg(mvi->dev, sg_resp, 1, PCI_DMA_FROMDEVICE);
+ if (!elem) {
+ rc = -ENOMEM;
+ goto err_out;
+ }
+ resp_len = SB_RFB_MAX;
+
+ /* must be in dwords */
+ if ((req_len & 0x3) || (resp_len & 0x3)) {
+ rc = -EINVAL;
+ goto err_out_2;
+ }
+
+ /*
+ * arrange MVS_SLOT_BUF_SZ-sized DMA buffer according to our needs
+ */
+
+ /* region 1: command table area (MVS_SSP_CMD_SZ bytes) ***** */
+ buf_tmp = slot->buf;
+ buf_tmp_dma = slot->buf_dma;
+
+ hdr->cmd_tbl = cpu_to_le64(sg_dma_address(sg_req));
+
+ /* region 2: open address frame area (MVS_OAF_SZ bytes) ********* */
+ buf_oaf = buf_tmp;
+ hdr->open_frame = cpu_to_le64(buf_tmp_dma);
+
+ buf_tmp += MVS_OAF_SZ;
+ buf_tmp_dma += MVS_OAF_SZ;
+
+ /* region 3: PRD table *********************************** */
+ buf_prd = buf_tmp;
+ if (tei->n_elem)
+ hdr->prd_tbl = cpu_to_le64(buf_tmp_dma);
+ else
+ hdr->prd_tbl = 0;
+
+ i = MVS_CHIP_DISP->prd_size() * tei->n_elem;
+ buf_tmp += i;
+ buf_tmp_dma += i;
+
+ /* region 4: status buffer (larger the PRD, smaller this buf) ****** */
+ slot->response = buf_tmp;
+ hdr->status_buf = cpu_to_le64(buf_tmp_dma);
+ if (mvi->flags & MVF_FLAG_SOC)
+ hdr->reserved[0] = 0;
+
+ /*
+ * Fill in TX ring and command slot header
+ */
+ slot->tx = mvi->tx_prod;
+ mvi->tx[mvi->tx_prod] = cpu_to_le32((TXQ_CMD_SMP << TXQ_CMD_SHIFT) |
+ TXQ_MODE_I | tag |
+ (MVS_PHY_ID << TXQ_PHY_SHIFT));
+
+ hdr->flags |= flags;
+ hdr->lens = cpu_to_le32(((resp_len / 4) << 16) | ((req_len - 4) / 4));
+ hdr->tags = cpu_to_le32(tag);
+ hdr->data_len = 0;
+
+ /* generate open address frame hdr (first 12 bytes) */
+ /* initiator, SMP, ftype 1h */
+ buf_oaf[0] = (1 << 7) | (PROTOCOL_SMP << 4) | 0x01;
+ buf_oaf[1] = min(sas_port->linkrate, dev->linkrate) & 0xf;
+ *(u16 *)(buf_oaf + 2) = 0xFFFF; /* SAS SPEC */
+ memcpy(buf_oaf + 4, dev->sas_addr, SAS_ADDR_SIZE);
+
+ /* fill in PRD (scatter/gather) table, if any */
+ MVS_CHIP_DISP->make_prd(task->scatter, tei->n_elem, buf_prd);
+
+ return 0;
+
+err_out_2:
+ dma_unmap_sg(mvi->dev, &tei->task->smp_task.smp_resp, 1,
+ PCI_DMA_FROMDEVICE);
+err_out:
+ dma_unmap_sg(mvi->dev, &tei->task->smp_task.smp_req, 1,
+ PCI_DMA_TODEVICE);
+ return rc;
+}
+
+static u32 mvs_get_ncq_tag(struct sas_task *task, u32 *tag)
+{
+ struct ata_queued_cmd *qc = task->uldd_task;
+
+ if (qc) {
+ if (qc->tf.command == ATA_CMD_FPDMA_WRITE ||
+ qc->tf.command == ATA_CMD_FPDMA_READ) {
+ *tag = qc->tag;
+ return 1;
+ }
+ }
+
+ return 0;
+}
+
+static int mvs_task_prep_ata(struct mvs_info *mvi,
+ struct mvs_task_exec_info *tei)
+{
+ struct sas_task *task = tei->task;
+ struct domain_device *dev = task->dev;
+ struct mvs_device *mvi_dev = dev->lldd_dev;
+ struct mvs_cmd_hdr *hdr = tei->hdr;
+ struct asd_sas_port *sas_port = dev->port;
+ struct mvs_slot_info *slot;
+ void *buf_prd;
+ u32 tag = tei->tag, hdr_tag;
+ u32 flags, del_q;
+ void *buf_tmp;
+ u8 *buf_cmd, *buf_oaf;
+ dma_addr_t buf_tmp_dma;
+ u32 i, req_len, resp_len;
+ const u32 max_resp_len = SB_RFB_MAX;
+
+ if (mvs_assign_reg_set(mvi, mvi_dev) == MVS_ID_NOT_MAPPED) {
+ mv_dprintk("Have not enough regiset for dev %d.\n",
+ mvi_dev->device_id);
+ return -EBUSY;
+ }
+ slot = &mvi->slot_info[tag];
+ slot->tx = mvi->tx_prod;
+ del_q = TXQ_MODE_I | tag |
+ (TXQ_CMD_STP << TXQ_CMD_SHIFT) |
+ ((sas_port->phy_mask & TXQ_PHY_MASK) << TXQ_PHY_SHIFT) |
+ (mvi_dev->taskfileset << TXQ_SRS_SHIFT);
+ mvi->tx[mvi->tx_prod] = cpu_to_le32(del_q);
+
+ if (task->data_dir == DMA_FROM_DEVICE)
+ flags = (MVS_CHIP_DISP->prd_count() << MCH_PRD_LEN_SHIFT);
+ else
+ flags = (tei->n_elem << MCH_PRD_LEN_SHIFT);
+
+ if (task->ata_task.use_ncq)
+ flags |= MCH_FPDMA;
+ if (dev->sata_dev.class == ATA_DEV_ATAPI) {
+ if (task->ata_task.fis.command != ATA_CMD_ID_ATAPI)
+ flags |= MCH_ATAPI;
+ }
+
+ hdr->flags = cpu_to_le32(flags);
+
+ if (task->ata_task.use_ncq && mvs_get_ncq_tag(task, &hdr_tag))
+ task->ata_task.fis.sector_count |= (u8) (hdr_tag << 3);
+ else
+ hdr_tag = tag;
+
+ hdr->tags = cpu_to_le32(hdr_tag);
+
+ hdr->data_len = cpu_to_le32(task->total_xfer_len);
+
+ /*
+ * arrange MVS_SLOT_BUF_SZ-sized DMA buffer according to our needs
+ */
+
+ /* region 1: command table area (MVS_ATA_CMD_SZ bytes) ************** */
+ buf_cmd = buf_tmp = slot->buf;
+ buf_tmp_dma = slot->buf_dma;
+
+ hdr->cmd_tbl = cpu_to_le64(buf_tmp_dma);
+
+ buf_tmp += MVS_ATA_CMD_SZ;
+ buf_tmp_dma += MVS_ATA_CMD_SZ;
+
+ /* region 2: open address frame area (MVS_OAF_SZ bytes) ********* */
+ /* used for STP. unused for SATA? */
+ buf_oaf = buf_tmp;
+ hdr->open_frame = cpu_to_le64(buf_tmp_dma);
+
+ buf_tmp += MVS_OAF_SZ;
+ buf_tmp_dma += MVS_OAF_SZ;
+
+ /* region 3: PRD table ********************************************* */
+ buf_prd = buf_tmp;
+
+ if (tei->n_elem)
+ hdr->prd_tbl = cpu_to_le64(buf_tmp_dma);
+ else
+ hdr->prd_tbl = 0;
+ i = MVS_CHIP_DISP->prd_size() * MVS_CHIP_DISP->prd_count();
+
+ buf_tmp += i;
+ buf_tmp_dma += i;
+
+ /* region 4: status buffer (larger the PRD, smaller this buf) ****** */
+ slot->response = buf_tmp;
+ hdr->status_buf = cpu_to_le64(buf_tmp_dma);
+ if (mvi->flags & MVF_FLAG_SOC)
+ hdr->reserved[0] = 0;
+
+ req_len = sizeof(struct host_to_dev_fis);
+ resp_len = MVS_SLOT_BUF_SZ - MVS_ATA_CMD_SZ -
+ sizeof(struct mvs_err_info) - i;
+
+ /* request, response lengths */
+ resp_len = min(resp_len, max_resp_len);
+ hdr->lens = cpu_to_le32(((resp_len / 4) << 16) | (req_len / 4));
+
+ if (likely(!task->ata_task.device_control_reg_update))
+ task->ata_task.fis.flags |= 0x80; /* C=1: update ATA cmd reg */
+ /* fill in command FIS and ATAPI CDB */
+ memcpy(buf_cmd, &task->ata_task.fis, sizeof(struct host_to_dev_fis));
+ if (dev->sata_dev.class == ATA_DEV_ATAPI)
+ memcpy(buf_cmd + STP_ATAPI_CMD,
+ task->ata_task.atapi_packet, 16);
+
+ /* generate open address frame hdr (first 12 bytes) */
+ /* initiator, STP, ftype 1h */
+ buf_oaf[0] = (1 << 7) | (PROTOCOL_STP << 4) | 0x1;
+ buf_oaf[1] = min(sas_port->linkrate, dev->linkrate) & 0xf;
+ *(u16 *)(buf_oaf + 2) = cpu_to_be16(mvi_dev->device_id + 1);
+ memcpy(buf_oaf + 4, dev->sas_addr, SAS_ADDR_SIZE);
+
+ /* fill in PRD (scatter/gather) table, if any */
+ MVS_CHIP_DISP->make_prd(task->scatter, tei->n_elem, buf_prd);
+
+ if (task->data_dir == DMA_FROM_DEVICE)
+ MVS_CHIP_DISP->dma_fix(mvi, sas_port->phy_mask,
+ TRASH_BUCKET_SIZE, tei->n_elem, buf_prd);
+
+ return 0;
+}
+
+static int mvs_task_prep_ssp(struct mvs_info *mvi,
+ struct mvs_task_exec_info *tei, int is_tmf,
+ struct mvs_tmf_task *tmf)
+{
+ struct sas_task *task = tei->task;
+ struct mvs_cmd_hdr *hdr = tei->hdr;
+ struct mvs_port *port = tei->port;
+ struct domain_device *dev = task->dev;
+ struct mvs_device *mvi_dev = dev->lldd_dev;
+ struct asd_sas_port *sas_port = dev->port;
+ struct mvs_slot_info *slot;
+ void *buf_prd;
+ struct ssp_frame_hdr *ssp_hdr;
+ void *buf_tmp;
+ u8 *buf_cmd, *buf_oaf, fburst = 0;
+ dma_addr_t buf_tmp_dma;
+ u32 flags;
+ u32 resp_len, req_len, i, tag = tei->tag;
+ const u32 max_resp_len = SB_RFB_MAX;
+ u32 phy_mask;
+
+ slot = &mvi->slot_info[tag];
+
+ phy_mask = ((port->wide_port_phymap) ? port->wide_port_phymap :
+ sas_port->phy_mask) & TXQ_PHY_MASK;
+
+ slot->tx = mvi->tx_prod;
+ mvi->tx[mvi->tx_prod] = cpu_to_le32(TXQ_MODE_I | tag |
+ (TXQ_CMD_SSP << TXQ_CMD_SHIFT) |
+ (phy_mask << TXQ_PHY_SHIFT));
+
+ flags = MCH_RETRY;
+ if (task->ssp_task.enable_first_burst) {
+ flags |= MCH_FBURST;
+ fburst = (1 << 7);
+ }
+ if (is_tmf)
+ flags |= (MCH_SSP_FR_TASK << MCH_SSP_FR_TYPE_SHIFT);
+ else
+ flags |= (MCH_SSP_FR_CMD << MCH_SSP_FR_TYPE_SHIFT);
+
+ hdr->flags = cpu_to_le32(flags | (tei->n_elem << MCH_PRD_LEN_SHIFT));
+ hdr->tags = cpu_to_le32(tag);
+ hdr->data_len = cpu_to_le32(task->total_xfer_len);
+
+ /*
+ * arrange MVS_SLOT_BUF_SZ-sized DMA buffer according to our needs
+ */
+
+ /* region 1: command table area (MVS_SSP_CMD_SZ bytes) ************** */
+ buf_cmd = buf_tmp = slot->buf;
+ buf_tmp_dma = slot->buf_dma;
+
+ hdr->cmd_tbl = cpu_to_le64(buf_tmp_dma);
+
+ buf_tmp += MVS_SSP_CMD_SZ;
+ buf_tmp_dma += MVS_SSP_CMD_SZ;
+
+ /* region 2: open address frame area (MVS_OAF_SZ bytes) ********* */
+ buf_oaf = buf_tmp;
+ hdr->open_frame = cpu_to_le64(buf_tmp_dma);
+
+ buf_tmp += MVS_OAF_SZ;
+ buf_tmp_dma += MVS_OAF_SZ;
+
+ /* region 3: PRD table ********************************************* */
+ buf_prd = buf_tmp;
+ if (tei->n_elem)
+ hdr->prd_tbl = cpu_to_le64(buf_tmp_dma);
+ else
+ hdr->prd_tbl = 0;
+
+ i = MVS_CHIP_DISP->prd_size() * tei->n_elem;
+ buf_tmp += i;
+ buf_tmp_dma += i;
+
+ /* region 4: status buffer (larger the PRD, smaller this buf) ****** */
+ slot->response = buf_tmp;
+ hdr->status_buf = cpu_to_le64(buf_tmp_dma);
+ if (mvi->flags & MVF_FLAG_SOC)
+ hdr->reserved[0] = 0;
+
+ resp_len = MVS_SLOT_BUF_SZ - MVS_SSP_CMD_SZ - MVS_OAF_SZ -
+ sizeof(struct mvs_err_info) - i;
+ resp_len = min(resp_len, max_resp_len);
+
+ req_len = sizeof(struct ssp_frame_hdr) + 28;
+
+ /* request, response lengths */
+ hdr->lens = cpu_to_le32(((resp_len / 4) << 16) | (req_len / 4));
+
+ /* generate open address frame hdr (first 12 bytes) */
+ /* initiator, SSP, ftype 1h */
+ buf_oaf[0] = (1 << 7) | (PROTOCOL_SSP << 4) | 0x1;
+ buf_oaf[1] = min(sas_port->linkrate, dev->linkrate) & 0xf;
+ *(u16 *)(buf_oaf + 2) = cpu_to_be16(mvi_dev->device_id + 1);
+ memcpy(buf_oaf + 4, dev->sas_addr, SAS_ADDR_SIZE);
+
+ /* fill in SSP frame header (Command Table.SSP frame header) */
+ ssp_hdr = (struct ssp_frame_hdr *)buf_cmd;
+
+ if (is_tmf)
+ ssp_hdr->frame_type = SSP_TASK;
+ else
+ ssp_hdr->frame_type = SSP_COMMAND;
+
+ memcpy(ssp_hdr->hashed_dest_addr, dev->hashed_sas_addr,
+ HASHED_SAS_ADDR_SIZE);
+ memcpy(ssp_hdr->hashed_src_addr,
+ dev->hashed_sas_addr, HASHED_SAS_ADDR_SIZE);
+ ssp_hdr->tag = cpu_to_be16(tag);
+
+ /* fill in IU for TASK and Command Frame */
+ buf_cmd += sizeof(*ssp_hdr);
+ memcpy(buf_cmd, &task->ssp_task.LUN, 8);
+
+ if (ssp_hdr->frame_type != SSP_TASK) {
+ buf_cmd[9] = fburst | task->ssp_task.task_attr |
+ (task->ssp_task.task_prio << 3);
+ memcpy(buf_cmd + 12, task->ssp_task.cmd->cmnd,
+ task->ssp_task.cmd->cmd_len);
+ } else{
+ buf_cmd[10] = tmf->tmf;
+ switch (tmf->tmf) {
+ case TMF_ABORT_TASK:
+ case TMF_QUERY_TASK:
+ buf_cmd[12] =
+ (tmf->tag_of_task_to_be_managed >> 8) & 0xff;
+ buf_cmd[13] =
+ tmf->tag_of_task_to_be_managed & 0xff;
+ break;
+ default:
+ break;
+ }
+ }
+ /* fill in PRD (scatter/gather) table, if any */
+ MVS_CHIP_DISP->make_prd(task->scatter, tei->n_elem, buf_prd);
+ return 0;
+}
+
+#define DEV_IS_GONE(mvi_dev) ((!mvi_dev || (mvi_dev->dev_type == SAS_PHY_UNUSED)))
+static int mvs_task_prep(struct sas_task *task, struct mvs_info *mvi, int is_tmf,
+ struct mvs_tmf_task *tmf, int *pass)
+{
+ struct domain_device *dev = task->dev;
+ struct mvs_device *mvi_dev = dev->lldd_dev;
+ struct mvs_task_exec_info tei;
+ struct mvs_slot_info *slot;
+ u32 tag = 0xdeadbeef, n_elem = 0;
+ int rc = 0;
+
+ if (!dev->port) {
+ struct task_status_struct *tsm = &task->task_status;
+
+ tsm->resp = SAS_TASK_UNDELIVERED;
+ tsm->stat = SAS_PHY_DOWN;
+ /*
+ * libsas will use dev->port, should
+ * not call task_done for sata
+ */
+ if (dev->dev_type != SAS_SATA_DEV)
+ task->task_done(task);
+ return rc;
+ }
+
+ if (DEV_IS_GONE(mvi_dev)) {
+ if (mvi_dev)
+ mv_dprintk("device %d not ready.\n",
+ mvi_dev->device_id);
+ else
+ mv_dprintk("device %016llx not ready.\n",
+ SAS_ADDR(dev->sas_addr));
+
+ rc = SAS_PHY_DOWN;
+ return rc;
+ }
+ tei.port = dev->port->lldd_port;
+ if (tei.port && !tei.port->port_attached && !tmf) {
+ if (sas_protocol_ata(task->task_proto)) {
+ struct task_status_struct *ts = &task->task_status;
+ mv_dprintk("SATA/STP port %d does not attach"
+ "device.\n", dev->port->id);
+ ts->resp = SAS_TASK_COMPLETE;
+ ts->stat = SAS_PHY_DOWN;
+
+ task->task_done(task);
+
+ } else {
+ struct task_status_struct *ts = &task->task_status;
+ mv_dprintk("SAS port %d does not attach"
+ "device.\n", dev->port->id);
+ ts->resp = SAS_TASK_UNDELIVERED;
+ ts->stat = SAS_PHY_DOWN;
+ task->task_done(task);
+ }
+ return rc;
+ }
+
+ if (!sas_protocol_ata(task->task_proto)) {
+ if (task->num_scatter) {
+ n_elem = dma_map_sg(mvi->dev,
+ task->scatter,
+ task->num_scatter,
+ task->data_dir);
+ if (!n_elem) {
+ rc = -ENOMEM;
+ goto prep_out;
+ }
+ }
+ } else {
+ n_elem = task->num_scatter;
+ }
+
+ rc = mvs_tag_alloc(mvi, &tag);
+ if (rc)
+ goto err_out;
+
+ slot = &mvi->slot_info[tag];
+
+ task->lldd_task = NULL;
+ slot->n_elem = n_elem;
+ slot->slot_tag = tag;
+
+ slot->buf = pci_pool_alloc(mvi->dma_pool, GFP_ATOMIC, &slot->buf_dma);
+ if (!slot->buf)
+ goto err_out_tag;
+ memset(slot->buf, 0, MVS_SLOT_BUF_SZ);
+
+ tei.task = task;
+ tei.hdr = &mvi->slot[tag];
+ tei.tag = tag;
+ tei.n_elem = n_elem;
+ switch (task->task_proto) {
+ case SAS_PROTOCOL_SMP:
+ rc = mvs_task_prep_smp(mvi, &tei);
+ break;
+ case SAS_PROTOCOL_SSP:
+ rc = mvs_task_prep_ssp(mvi, &tei, is_tmf, tmf);
+ break;
+ case SAS_PROTOCOL_SATA:
+ case SAS_PROTOCOL_STP:
+ case SAS_PROTOCOL_SATA | SAS_PROTOCOL_STP:
+ rc = mvs_task_prep_ata(mvi, &tei);
+ break;
+ default:
+ dev_printk(KERN_ERR, mvi->dev,
+ "unknown sas_task proto: 0x%x\n",
+ task->task_proto);
+ rc = -EINVAL;
+ break;
+ }
+
+ if (rc) {
+ mv_dprintk("rc is %x\n", rc);
+ goto err_out_slot_buf;
+ }
+ slot->task = task;
+ slot->port = tei.port;
+ task->lldd_task = slot;
+ list_add_tail(&slot->entry, &tei.port->list);
+ spin_lock(&task->task_state_lock);
+ task->task_state_flags |= SAS_TASK_AT_INITIATOR;
+ spin_unlock(&task->task_state_lock);
+
+ mvi_dev->running_req++;
+ ++(*pass);
+ mvi->tx_prod = (mvi->tx_prod + 1) & (MVS_CHIP_SLOT_SZ - 1);
+
+ return rc;
+
+err_out_slot_buf:
+ pci_pool_free(mvi->dma_pool, slot->buf, slot->buf_dma);
+err_out_tag:
+ mvs_tag_free(mvi, tag);
+err_out:
+
+ dev_printk(KERN_ERR, mvi->dev, "mvsas prep failed[%d]!\n", rc);
+ if (!sas_protocol_ata(task->task_proto))
+ if (n_elem)
+ dma_unmap_sg(mvi->dev, task->scatter, n_elem,
+ task->data_dir);
+prep_out:
+ return rc;
+}
+
+static int mvs_task_exec(struct sas_task *task, gfp_t gfp_flags,
+ struct completion *completion, int is_tmf,
+ struct mvs_tmf_task *tmf)
+{
+ struct mvs_info *mvi = NULL;
+ u32 rc = 0;
+ u32 pass = 0;
+ unsigned long flags = 0;
+
+ mvi = ((struct mvs_device *)task->dev->lldd_dev)->mvi_info;
+
+ spin_lock_irqsave(&mvi->lock, flags);
+ rc = mvs_task_prep(task, mvi, is_tmf, tmf, &pass);
+ if (rc)
+ dev_printk(KERN_ERR, mvi->dev, "mvsas exec failed[%d]!\n", rc);
+
+ if (likely(pass))
+ MVS_CHIP_DISP->start_delivery(mvi, (mvi->tx_prod - 1) &
+ (MVS_CHIP_SLOT_SZ - 1));
+ spin_unlock_irqrestore(&mvi->lock, flags);
+
+ return rc;
+}
+
+int mvs_queue_command(struct sas_task *task, gfp_t gfp_flags)
+{
+ return mvs_task_exec(task, gfp_flags, NULL, 0, NULL);
+}
+
+static void mvs_slot_free(struct mvs_info *mvi, u32 rx_desc)
+{
+ u32 slot_idx = rx_desc & RXQ_SLOT_MASK;
+ mvs_tag_clear(mvi, slot_idx);
+}
+
+static void mvs_slot_task_free(struct mvs_info *mvi, struct sas_task *task,
+ struct mvs_slot_info *slot, u32 slot_idx)
+{
+ if (!slot->task)
+ return;
+ if (!sas_protocol_ata(task->task_proto))
+ if (slot->n_elem)
+ dma_unmap_sg(mvi->dev, task->scatter,
+ slot->n_elem, task->data_dir);
+
+ switch (task->task_proto) {
+ case SAS_PROTOCOL_SMP:
+ dma_unmap_sg(mvi->dev, &task->smp_task.smp_resp, 1,
+ PCI_DMA_FROMDEVICE);
+ dma_unmap_sg(mvi->dev, &task->smp_task.smp_req, 1,
+ PCI_DMA_TODEVICE);
+ break;
+
+ case SAS_PROTOCOL_SATA:
+ case SAS_PROTOCOL_STP:
+ case SAS_PROTOCOL_SSP:
+ default:
+ /* do nothing */
+ break;
+ }
+
+ if (slot->buf) {
+ pci_pool_free(mvi->dma_pool, slot->buf, slot->buf_dma);
+ slot->buf = NULL;
+ }
+ list_del_init(&slot->entry);
+ task->lldd_task = NULL;
+ slot->task = NULL;
+ slot->port = NULL;
+ slot->slot_tag = 0xFFFFFFFF;
+ mvs_slot_free(mvi, slot_idx);
+}
+
+static void mvs_update_wideport(struct mvs_info *mvi, int phy_no)
+{
+ struct mvs_phy *phy = &mvi->phy[phy_no];
+ struct mvs_port *port = phy->port;
+ int j, no;
+
+ for_each_phy(port->wide_port_phymap, j, no) {
+ if (j & 1) {
+ MVS_CHIP_DISP->write_port_cfg_addr(mvi, no,
+ PHYR_WIDE_PORT);
+ MVS_CHIP_DISP->write_port_cfg_data(mvi, no,
+ port->wide_port_phymap);
+ } else {
+ MVS_CHIP_DISP->write_port_cfg_addr(mvi, no,
+ PHYR_WIDE_PORT);
+ MVS_CHIP_DISP->write_port_cfg_data(mvi, no,
+ 0);
+ }
+ }
+}
+
+static u32 mvs_is_phy_ready(struct mvs_info *mvi, int i)
+{
+ u32 tmp;
+ struct mvs_phy *phy = &mvi->phy[i];
+ struct mvs_port *port = phy->port;
+
+ tmp = MVS_CHIP_DISP->read_phy_ctl(mvi, i);
+ if ((tmp & PHY_READY_MASK) && !(phy->irq_status & PHYEV_POOF)) {
+ if (!port)
+ phy->phy_attached = 1;
+ return tmp;
+ }
+
+ if (port) {
+ if (phy->phy_type & PORT_TYPE_SAS) {
+ port->wide_port_phymap &= ~(1U << i);
+ if (!port->wide_port_phymap)
+ port->port_attached = 0;
+ mvs_update_wideport(mvi, i);
+ } else if (phy->phy_type & PORT_TYPE_SATA)
+ port->port_attached = 0;
+ phy->port = NULL;
+ phy->phy_attached = 0;
+ phy->phy_type &= ~(PORT_TYPE_SAS | PORT_TYPE_SATA);
+ }
+ return 0;
+}
+
+static void *mvs_get_d2h_reg(struct mvs_info *mvi, int i, void *buf)
+{
+ u32 *s = (u32 *) buf;
+
+ if (!s)
+ return NULL;
+
+ MVS_CHIP_DISP->write_port_cfg_addr(mvi, i, PHYR_SATA_SIG3);
+ s[3] = cpu_to_le32(MVS_CHIP_DISP->read_port_cfg_data(mvi, i));
+
+ MVS_CHIP_DISP->write_port_cfg_addr(mvi, i, PHYR_SATA_SIG2);
+ s[2] = cpu_to_le32(MVS_CHIP_DISP->read_port_cfg_data(mvi, i));
+
+ MVS_CHIP_DISP->write_port_cfg_addr(mvi, i, PHYR_SATA_SIG1);
+ s[1] = cpu_to_le32(MVS_CHIP_DISP->read_port_cfg_data(mvi, i));
+
+ MVS_CHIP_DISP->write_port_cfg_addr(mvi, i, PHYR_SATA_SIG0);
+ s[0] = cpu_to_le32(MVS_CHIP_DISP->read_port_cfg_data(mvi, i));
+
+ if (((s[1] & 0x00FFFFFF) == 0x00EB1401) && (*(u8 *)&s[3] == 0x01))
+ s[1] = 0x00EB1401 | (*((u8 *)&s[1] + 3) & 0x10);
+
+ return s;
+}
+
+static u32 mvs_is_sig_fis_received(u32 irq_status)
+{
+ return irq_status & PHYEV_SIG_FIS;
+}
+
+static void mvs_sig_remove_timer(struct mvs_phy *phy)
+{
+ if (phy->timer.function)
+ del_timer(&phy->timer);
+ phy->timer.function = NULL;
+}
+
+void mvs_update_phyinfo(struct mvs_info *mvi, int i, int get_st)
+{
+ struct mvs_phy *phy = &mvi->phy[i];
+ struct sas_identify_frame *id;
+
+ id = (struct sas_identify_frame *)phy->frame_rcvd;
+
+ if (get_st) {
+ phy->irq_status = MVS_CHIP_DISP->read_port_irq_stat(mvi, i);
+ phy->phy_status = mvs_is_phy_ready(mvi, i);
+ }
+
+ if (phy->phy_status) {
+ int oob_done = 0;
+ struct asd_sas_phy *sas_phy = &mvi->phy[i].sas_phy;
+
+ oob_done = MVS_CHIP_DISP->oob_done(mvi, i);
+
+ MVS_CHIP_DISP->fix_phy_info(mvi, i, id);
+ if (phy->phy_type & PORT_TYPE_SATA) {
+ phy->identify.target_port_protocols = SAS_PROTOCOL_STP;
+ if (mvs_is_sig_fis_received(phy->irq_status)) {
+ mvs_sig_remove_timer(phy);
+ phy->phy_attached = 1;
+ phy->att_dev_sas_addr =
+ i + mvi->id * mvi->chip->n_phy;
+ if (oob_done)
+ sas_phy->oob_mode = SATA_OOB_MODE;
+ phy->frame_rcvd_size =
+ sizeof(struct dev_to_host_fis);
+ mvs_get_d2h_reg(mvi, i, id);
+ } else {
+ u32 tmp;
+ dev_printk(KERN_DEBUG, mvi->dev,
+ "Phy%d : No sig fis\n", i);
+ tmp = MVS_CHIP_DISP->read_port_irq_mask(mvi, i);
+ MVS_CHIP_DISP->write_port_irq_mask(mvi, i,
+ tmp | PHYEV_SIG_FIS);
+ phy->phy_attached = 0;
+ phy->phy_type &= ~PORT_TYPE_SATA;
+ goto out_done;
+ }
+ } else if (phy->phy_type & PORT_TYPE_SAS
+ || phy->att_dev_info & PORT_SSP_INIT_MASK) {
+ phy->phy_attached = 1;
+ phy->identify.device_type =
+ phy->att_dev_info & PORT_DEV_TYPE_MASK;
+
+ if (phy->identify.device_type == SAS_END_DEVICE)
+ phy->identify.target_port_protocols =
+ SAS_PROTOCOL_SSP;
+ else if (phy->identify.device_type != SAS_PHY_UNUSED)
+ phy->identify.target_port_protocols =
+ SAS_PROTOCOL_SMP;
+ if (oob_done)
+ sas_phy->oob_mode = SAS_OOB_MODE;
+ phy->frame_rcvd_size =
+ sizeof(struct sas_identify_frame);
+ }
+ memcpy(sas_phy->attached_sas_addr,
+ &phy->att_dev_sas_addr, SAS_ADDR_SIZE);
+
+ if (MVS_CHIP_DISP->phy_work_around)
+ MVS_CHIP_DISP->phy_work_around(mvi, i);
+ }
+ mv_dprintk("phy %d attach dev info is %x\n",
+ i + mvi->id * mvi->chip->n_phy, phy->att_dev_info);
+ mv_dprintk("phy %d attach sas addr is %llx\n",
+ i + mvi->id * mvi->chip->n_phy, phy->att_dev_sas_addr);
+out_done:
+ if (get_st)
+ MVS_CHIP_DISP->write_port_irq_stat(mvi, i, phy->irq_status);
+}
+
+static void mvs_port_notify_formed(struct asd_sas_phy *sas_phy, int lock)
+{
+ struct sas_ha_struct *sas_ha = sas_phy->ha;
+ struct mvs_info *mvi = NULL; int i = 0, hi;
+ struct mvs_phy *phy = sas_phy->lldd_phy;
+ struct asd_sas_port *sas_port = sas_phy->port;
+ struct mvs_port *port;
+ unsigned long flags = 0;
+ if (!sas_port)
+ return;
+
+ while (sas_ha->sas_phy[i]) {
+ if (sas_ha->sas_phy[i] == sas_phy)
+ break;
+ i++;
+ }
+ hi = i/((struct mvs_prv_info *)sas_ha->lldd_ha)->n_phy;
+ mvi = ((struct mvs_prv_info *)sas_ha->lldd_ha)->mvi[hi];
+ if (i >= mvi->chip->n_phy)
+ port = &mvi->port[i - mvi->chip->n_phy];
+ else
+ port = &mvi->port[i];
+ if (lock)
+ spin_lock_irqsave(&mvi->lock, flags);
+ port->port_attached = 1;
+ phy->port = port;
+ sas_port->lldd_port = port;
+ if (phy->phy_type & PORT_TYPE_SAS) {
+ port->wide_port_phymap = sas_port->phy_mask;
+ mv_printk("set wide port phy map %x\n", sas_port->phy_mask);
+ mvs_update_wideport(mvi, sas_phy->id);
+
+ /* direct attached SAS device */
+ if (phy->att_dev_info & PORT_SSP_TRGT_MASK) {
+ MVS_CHIP_DISP->write_port_cfg_addr(mvi, i, PHYR_PHY_STAT);
+ MVS_CHIP_DISP->write_port_cfg_data(mvi, i, 0x04);
+ }
+ }
+ if (lock)
+ spin_unlock_irqrestore(&mvi->lock, flags);
+}
+
+static void mvs_port_notify_deformed(struct asd_sas_phy *sas_phy, int lock)
+{
+ struct domain_device *dev;
+ struct mvs_phy *phy = sas_phy->lldd_phy;
+ struct mvs_info *mvi = phy->mvi;
+ struct asd_sas_port *port = sas_phy->port;
+ int phy_no = 0;
+
+ while (phy != &mvi->phy[phy_no]) {
+ phy_no++;
+ if (phy_no >= MVS_MAX_PHYS)
+ return;
+ }
+ list_for_each_entry(dev, &port->dev_list, dev_list_node)
+ mvs_do_release_task(phy->mvi, phy_no, dev);
+
+}
+
+
+void mvs_port_formed(struct asd_sas_phy *sas_phy)
+{
+ mvs_port_notify_formed(sas_phy, 1);
+}
+
+void mvs_port_deformed(struct asd_sas_phy *sas_phy)
+{
+ mvs_port_notify_deformed(sas_phy, 1);
+}
+
+struct mvs_device *mvs_alloc_dev(struct mvs_info *mvi)
+{
+ u32 dev;
+ for (dev = 0; dev < MVS_MAX_DEVICES; dev++) {
+ if (mvi->devices[dev].dev_type == SAS_PHY_UNUSED) {
+ mvi->devices[dev].device_id = dev;
+ return &mvi->devices[dev];
+ }
+ }
+
+ if (dev == MVS_MAX_DEVICES)
+ mv_printk("max support %d devices, ignore ..\n",
+ MVS_MAX_DEVICES);
+
+ return NULL;
+}
+
+void mvs_free_dev(struct mvs_device *mvi_dev)
+{
+ u32 id = mvi_dev->device_id;
+ memset(mvi_dev, 0, sizeof(*mvi_dev));
+ mvi_dev->device_id = id;
+ mvi_dev->dev_type = SAS_PHY_UNUSED;
+ mvi_dev->dev_status = MVS_DEV_NORMAL;
+ mvi_dev->taskfileset = MVS_ID_NOT_MAPPED;
+}
+
+int mvs_dev_found_notify(struct domain_device *dev, int lock)
+{
+ unsigned long flags = 0;
+ int res = 0;
+ struct mvs_info *mvi = NULL;
+ struct domain_device *parent_dev = dev->parent;
+ struct mvs_device *mvi_device;
+
+ mvi = mvs_find_dev_mvi(dev);
+
+ if (lock)
+ spin_lock_irqsave(&mvi->lock, flags);
+
+ mvi_device = mvs_alloc_dev(mvi);
+ if (!mvi_device) {
+ res = -1;
+ goto found_out;
+ }
+ dev->lldd_dev = mvi_device;
+ mvi_device->dev_status = MVS_DEV_NORMAL;
+ mvi_device->dev_type = dev->dev_type;
+ mvi_device->mvi_info = mvi;
+ mvi_device->sas_device = dev;
+ if (parent_dev && DEV_IS_EXPANDER(parent_dev->dev_type)) {
+ int phy_id;
+ u8 phy_num = parent_dev->ex_dev.num_phys;
+ struct ex_phy *phy;
+ for (phy_id = 0; phy_id < phy_num; phy_id++) {
+ phy = &parent_dev->ex_dev.ex_phy[phy_id];
+ if (SAS_ADDR(phy->attached_sas_addr) ==
+ SAS_ADDR(dev->sas_addr)) {
+ mvi_device->attached_phy = phy_id;
+ break;
+ }
+ }
+
+ if (phy_id == phy_num) {
+ mv_printk("Error: no attached dev:%016llx"
+ "at ex:%016llx.\n",
+ SAS_ADDR(dev->sas_addr),
+ SAS_ADDR(parent_dev->sas_addr));
+ res = -1;
+ }
+ }
+
+found_out:
+ if (lock)
+ spin_unlock_irqrestore(&mvi->lock, flags);
+ return res;
+}
+
+int mvs_dev_found(struct domain_device *dev)
+{
+ return mvs_dev_found_notify(dev, 1);
+}
+
+void mvs_dev_gone_notify(struct domain_device *dev)
+{
+ unsigned long flags = 0;
+ struct mvs_device *mvi_dev = dev->lldd_dev;
+ struct mvs_info *mvi;
+
+ if (!mvi_dev) {
+ mv_dprintk("found dev has gone.\n");
+ return;
+ }
+
+ mvi = mvi_dev->mvi_info;
+
+ spin_lock_irqsave(&mvi->lock, flags);
+
+ mv_dprintk("found dev[%d:%x] is gone.\n",
+ mvi_dev->device_id, mvi_dev->dev_type);
+ mvs_release_task(mvi, dev);
+ mvs_free_reg_set(mvi, mvi_dev);
+ mvs_free_dev(mvi_dev);
+
+ dev->lldd_dev = NULL;
+ mvi_dev->sas_device = NULL;
+
+ spin_unlock_irqrestore(&mvi->lock, flags);
+}
+
+
+void mvs_dev_gone(struct domain_device *dev)
+{
+ mvs_dev_gone_notify(dev);
+}
+
+static void mvs_task_done(struct sas_task *task)
+{
+ if (!del_timer(&task->slow_task->timer))
+ return;
+ complete(&task->slow_task->completion);
+}
+
+static void mvs_tmf_timedout(unsigned long data)
+{
+ struct sas_task *task = (struct sas_task *)data;
+
+ task->task_state_flags |= SAS_TASK_STATE_ABORTED;
+ complete(&task->slow_task->completion);
+}
+
+#define MVS_TASK_TIMEOUT 20
+static int mvs_exec_internal_tmf_task(struct domain_device *dev,
+ void *parameter, u32 para_len, struct mvs_tmf_task *tmf)
+{
+ int res, retry;
+ struct sas_task *task = NULL;
+
+ for (retry = 0; retry < 3; retry++) {
+ task = sas_alloc_slow_task(GFP_KERNEL);
+ if (!task)
+ return -ENOMEM;
+
+ task->dev = dev;
+ task->task_proto = dev->tproto;
+
+ memcpy(&task->ssp_task, parameter, para_len);
+ task->task_done = mvs_task_done;
+
+ task->slow_task->timer.data = (unsigned long) task;
+ task->slow_task->timer.function = mvs_tmf_timedout;
+ task->slow_task->timer.expires = jiffies + MVS_TASK_TIMEOUT*HZ;
+ add_timer(&task->slow_task->timer);
+
+ res = mvs_task_exec(task, GFP_KERNEL, NULL, 1, tmf);
+
+ if (res) {
+ del_timer(&task->slow_task->timer);
+ mv_printk("executing internal task failed:%d\n", res);
+ goto ex_err;
+ }
+
+ wait_for_completion(&task->slow_task->completion);
+ res = TMF_RESP_FUNC_FAILED;
+ /* Even TMF timed out, return direct. */
+ if ((task->task_state_flags & SAS_TASK_STATE_ABORTED)) {
+ if (!(task->task_state_flags & SAS_TASK_STATE_DONE)) {
+ mv_printk("TMF task[%x] timeout.\n", tmf->tmf);
+ goto ex_err;
+ }
+ }
+
+ if (task->task_status.resp == SAS_TASK_COMPLETE &&
+ task->task_status.stat == SAM_STAT_GOOD) {
+ res = TMF_RESP_FUNC_COMPLETE;
+ break;
+ }
+
+ if (task->task_status.resp == SAS_TASK_COMPLETE &&
+ task->task_status.stat == SAS_DATA_UNDERRUN) {
+ /* no error, but return the number of bytes of
+ * underrun */
+ res = task->task_status.residual;
+ break;
+ }
+
+ if (task->task_status.resp == SAS_TASK_COMPLETE &&
+ task->task_status.stat == SAS_DATA_OVERRUN) {
+ mv_dprintk("blocked task error.\n");
+ res = -EMSGSIZE;
+ break;
+ } else {
+ mv_dprintk(" task to dev %016llx response: 0x%x "
+ "status 0x%x\n",
+ SAS_ADDR(dev->sas_addr),
+ task->task_status.resp,
+ task->task_status.stat);
+ sas_free_task(task);
+ task = NULL;
+
+ }
+ }
+ex_err:
+ BUG_ON(retry == 3 && task != NULL);
+ sas_free_task(task);
+ return res;
+}
+
+static int mvs_debug_issue_ssp_tmf(struct domain_device *dev,
+ u8 *lun, struct mvs_tmf_task *tmf)
+{
+ struct sas_ssp_task ssp_task;
+ if (!(dev->tproto & SAS_PROTOCOL_SSP))
+ return TMF_RESP_FUNC_ESUPP;
+
+ memcpy(ssp_task.LUN, lun, 8);
+
+ return mvs_exec_internal_tmf_task(dev, &ssp_task,
+ sizeof(ssp_task), tmf);
+}
+
+
+/* Standard mandates link reset for ATA (type 0)
+ and hard reset for SSP (type 1) , only for RECOVERY */
+static int mvs_debug_I_T_nexus_reset(struct domain_device *dev)
+{
+ int rc;
+ struct sas_phy *phy = sas_get_local_phy(dev);
+ int reset_type = (dev->dev_type == SAS_SATA_DEV ||
+ (dev->tproto & SAS_PROTOCOL_STP)) ? 0 : 1;
+ rc = sas_phy_reset(phy, reset_type);
+ sas_put_local_phy(phy);
+ msleep(2000);
+ return rc;
+}
+
+/* mandatory SAM-3 */
+int mvs_lu_reset(struct domain_device *dev, u8 *lun)
+{
+ unsigned long flags;
+ int rc = TMF_RESP_FUNC_FAILED;
+ struct mvs_tmf_task tmf_task;
+ struct mvs_device * mvi_dev = dev->lldd_dev;
+ struct mvs_info *mvi = mvi_dev->mvi_info;
+
+ tmf_task.tmf = TMF_LU_RESET;
+ mvi_dev->dev_status = MVS_DEV_EH;
+ rc = mvs_debug_issue_ssp_tmf(dev, lun, &tmf_task);
+ if (rc == TMF_RESP_FUNC_COMPLETE) {
+ spin_lock_irqsave(&mvi->lock, flags);
+ mvs_release_task(mvi, dev);
+ spin_unlock_irqrestore(&mvi->lock, flags);
+ }
+ /* If failed, fall-through I_T_Nexus reset */
+ mv_printk("%s for device[%x]:rc= %d\n", __func__,
+ mvi_dev->device_id, rc);
+ return rc;
+}
+
+int mvs_I_T_nexus_reset(struct domain_device *dev)
+{
+ unsigned long flags;
+ int rc = TMF_RESP_FUNC_FAILED;
+ struct mvs_device * mvi_dev = (struct mvs_device *)dev->lldd_dev;
+ struct mvs_info *mvi = mvi_dev->mvi_info;
+
+ if (mvi_dev->dev_status != MVS_DEV_EH)
+ return TMF_RESP_FUNC_COMPLETE;
+ else
+ mvi_dev->dev_status = MVS_DEV_NORMAL;
+ rc = mvs_debug_I_T_nexus_reset(dev);
+ mv_printk("%s for device[%x]:rc= %d\n",
+ __func__, mvi_dev->device_id, rc);
+
+ spin_lock_irqsave(&mvi->lock, flags);
+ mvs_release_task(mvi, dev);
+ spin_unlock_irqrestore(&mvi->lock, flags);
+
+ return rc;
+}
+/* optional SAM-3 */
+int mvs_query_task(struct sas_task *task)
+{
+ u32 tag;
+ struct scsi_lun lun;
+ struct mvs_tmf_task tmf_task;
+ int rc = TMF_RESP_FUNC_FAILED;
+
+ if (task->lldd_task && task->task_proto & SAS_PROTOCOL_SSP) {
+ struct scsi_cmnd * cmnd = (struct scsi_cmnd *)task->uldd_task;
+ struct domain_device *dev = task->dev;
+ struct mvs_device *mvi_dev = (struct mvs_device *)dev->lldd_dev;
+ struct mvs_info *mvi = mvi_dev->mvi_info;
+
+ int_to_scsilun(cmnd->device->lun, &lun);
+ rc = mvs_find_tag(mvi, task, &tag);
+ if (rc == 0) {
+ rc = TMF_RESP_FUNC_FAILED;
+ return rc;
+ }
+
+ tmf_task.tmf = TMF_QUERY_TASK;
+ tmf_task.tag_of_task_to_be_managed = cpu_to_le16(tag);
+
+ rc = mvs_debug_issue_ssp_tmf(dev, lun.scsi_lun, &tmf_task);
+ switch (rc) {
+ /* The task is still in Lun, release it then */
+ case TMF_RESP_FUNC_SUCC:
+ /* The task is not in Lun or failed, reset the phy */
+ case TMF_RESP_FUNC_FAILED:
+ case TMF_RESP_FUNC_COMPLETE:
+ break;
+ }
+ }
+ mv_printk("%s:rc= %d\n", __func__, rc);
+ return rc;
+}
+
+/* mandatory SAM-3, still need free task/slot info */
+int mvs_abort_task(struct sas_task *task)
+{
+ struct scsi_lun lun;
+ struct mvs_tmf_task tmf_task;
+ struct domain_device *dev = task->dev;
+ struct mvs_device *mvi_dev = (struct mvs_device *)dev->lldd_dev;
+ struct mvs_info *mvi;
+ int rc = TMF_RESP_FUNC_FAILED;
+ unsigned long flags;
+ u32 tag;
+
+ if (!mvi_dev) {
+ mv_printk("Device has removed\n");
+ return TMF_RESP_FUNC_FAILED;
+ }
+
+ mvi = mvi_dev->mvi_info;
+
+ spin_lock_irqsave(&task->task_state_lock, flags);
+ if (task->task_state_flags & SAS_TASK_STATE_DONE) {
+ spin_unlock_irqrestore(&task->task_state_lock, flags);
+ rc = TMF_RESP_FUNC_COMPLETE;
+ goto out;
+ }
+ spin_unlock_irqrestore(&task->task_state_lock, flags);
+ mvi_dev->dev_status = MVS_DEV_EH;
+ if (task->lldd_task && task->task_proto & SAS_PROTOCOL_SSP) {
+ struct scsi_cmnd * cmnd = (struct scsi_cmnd *)task->uldd_task;
+
+ int_to_scsilun(cmnd->device->lun, &lun);
+ rc = mvs_find_tag(mvi, task, &tag);
+ if (rc == 0) {
+ mv_printk("No such tag in %s\n", __func__);
+ rc = TMF_RESP_FUNC_FAILED;
+ return rc;
+ }
+
+ tmf_task.tmf = TMF_ABORT_TASK;
+ tmf_task.tag_of_task_to_be_managed = cpu_to_le16(tag);
+
+ rc = mvs_debug_issue_ssp_tmf(dev, lun.scsi_lun, &tmf_task);
+
+ /* if successful, clear the task and callback forwards.*/
+ if (rc == TMF_RESP_FUNC_COMPLETE) {
+ u32 slot_no;
+ struct mvs_slot_info *slot;
+
+ if (task->lldd_task) {
+ slot = task->lldd_task;
+ slot_no = (u32) (slot - mvi->slot_info);
+ spin_lock_irqsave(&mvi->lock, flags);
+ mvs_slot_complete(mvi, slot_no, 1);
+ spin_unlock_irqrestore(&mvi->lock, flags);
+ }
+ }
+
+ } else if (task->task_proto & SAS_PROTOCOL_SATA ||
+ task->task_proto & SAS_PROTOCOL_STP) {
+ if (SAS_SATA_DEV == dev->dev_type) {
+ struct mvs_slot_info *slot = task->lldd_task;
+ u32 slot_idx = (u32)(slot - mvi->slot_info);
+ mv_dprintk("mvs_abort_task() mvi=%p task=%p "
+ "slot=%p slot_idx=x%x\n",
+ mvi, task, slot, slot_idx);
+ task->task_state_flags |= SAS_TASK_STATE_ABORTED;
+ mvs_slot_task_free(mvi, task, slot, slot_idx);
+ rc = TMF_RESP_FUNC_COMPLETE;
+ goto out;
+ }
+
+ }
+out:
+ if (rc != TMF_RESP_FUNC_COMPLETE)
+ mv_printk("%s:rc= %d\n", __func__, rc);
+ return rc;
+}
+
+int mvs_abort_task_set(struct domain_device *dev, u8 *lun)
+{
+ int rc = TMF_RESP_FUNC_FAILED;
+ struct mvs_tmf_task tmf_task;
+
+ tmf_task.tmf = TMF_ABORT_TASK_SET;
+ rc = mvs_debug_issue_ssp_tmf(dev, lun, &tmf_task);
+
+ return rc;
+}
+
+int mvs_clear_aca(struct domain_device *dev, u8 *lun)
+{
+ int rc = TMF_RESP_FUNC_FAILED;
+ struct mvs_tmf_task tmf_task;
+
+ tmf_task.tmf = TMF_CLEAR_ACA;
+ rc = mvs_debug_issue_ssp_tmf(dev, lun, &tmf_task);
+
+ return rc;
+}
+
+int mvs_clear_task_set(struct domain_device *dev, u8 *lun)
+{
+ int rc = TMF_RESP_FUNC_FAILED;
+ struct mvs_tmf_task tmf_task;
+
+ tmf_task.tmf = TMF_CLEAR_TASK_SET;
+ rc = mvs_debug_issue_ssp_tmf(dev, lun, &tmf_task);
+
+ return rc;
+}
+
+static int mvs_sata_done(struct mvs_info *mvi, struct sas_task *task,
+ u32 slot_idx, int err)
+{
+ struct mvs_device *mvi_dev = task->dev->lldd_dev;
+ struct task_status_struct *tstat = &task->task_status;
+ struct ata_task_resp *resp = (struct ata_task_resp *)tstat->buf;
+ int stat = SAM_STAT_GOOD;
+
+
+ resp->frame_len = sizeof(struct dev_to_host_fis);
+ memcpy(&resp->ending_fis[0],
+ SATA_RECEIVED_D2H_FIS(mvi_dev->taskfileset),
+ sizeof(struct dev_to_host_fis));
+ tstat->buf_valid_size = sizeof(*resp);
+ if (unlikely(err)) {
+ if (unlikely(err & CMD_ISS_STPD))
+ stat = SAS_OPEN_REJECT;
+ else
+ stat = SAS_PROTO_RESPONSE;
+ }
+
+ return stat;
+}
+
+void mvs_set_sense(u8 *buffer, int len, int d_sense,
+ int key, int asc, int ascq)
+{
+ memset(buffer, 0, len);
+
+ if (d_sense) {
+ /* Descriptor format */
+ if (len < 4) {
+ mv_printk("Length %d of sense buffer too small to "
+ "fit sense %x:%x:%x", len, key, asc, ascq);
+ }
+
+ buffer[0] = 0x72; /* Response Code */
+ if (len > 1)
+ buffer[1] = key; /* Sense Key */
+ if (len > 2)
+ buffer[2] = asc; /* ASC */
+ if (len > 3)
+ buffer[3] = ascq; /* ASCQ */
+ } else {
+ if (len < 14) {
+ mv_printk("Length %d of sense buffer too small to "
+ "fit sense %x:%x:%x", len, key, asc, ascq);
+ }
+
+ buffer[0] = 0x70; /* Response Code */
+ if (len > 2)
+ buffer[2] = key; /* Sense Key */
+ if (len > 7)
+ buffer[7] = 0x0a; /* Additional Sense Length */
+ if (len > 12)
+ buffer[12] = asc; /* ASC */
+ if (len > 13)
+ buffer[13] = ascq; /* ASCQ */
+ }
+
+ return;
+}
+
+void mvs_fill_ssp_resp_iu(struct ssp_response_iu *iu,
+ u8 key, u8 asc, u8 asc_q)
+{
+ iu->datapres = 2;
+ iu->response_data_len = 0;
+ iu->sense_data_len = 17;
+ iu->status = 02;
+ mvs_set_sense(iu->sense_data, 17, 0,
+ key, asc, asc_q);
+}
+
+static int mvs_slot_err(struct mvs_info *mvi, struct sas_task *task,
+ u32 slot_idx)
+{
+ struct mvs_slot_info *slot = &mvi->slot_info[slot_idx];
+ int stat;
+ u32 err_dw0 = le32_to_cpu(*(u32 *)slot->response);
+ u32 err_dw1 = le32_to_cpu(*((u32 *)slot->response + 1));
+ u32 tfs = 0;
+ enum mvs_port_type type = PORT_TYPE_SAS;
+
+ if (err_dw0 & CMD_ISS_STPD)
+ MVS_CHIP_DISP->issue_stop(mvi, type, tfs);
+
+ MVS_CHIP_DISP->command_active(mvi, slot_idx);
+
+ stat = SAM_STAT_CHECK_CONDITION;
+ switch (task->task_proto) {
+ case SAS_PROTOCOL_SSP:
+ {
+ stat = SAS_ABORTED_TASK;
+ if ((err_dw0 & NO_DEST) || err_dw1 & bit(31)) {
+ struct ssp_response_iu *iu = slot->response +
+ sizeof(struct mvs_err_info);
+ mvs_fill_ssp_resp_iu(iu, NOT_READY, 0x04, 01);
+ sas_ssp_task_response(mvi->dev, task, iu);
+ stat = SAM_STAT_CHECK_CONDITION;
+ }
+ if (err_dw1 & bit(31))
+ mv_printk("reuse same slot, retry command.\n");
+ break;
+ }
+ case SAS_PROTOCOL_SMP:
+ stat = SAM_STAT_CHECK_CONDITION;
+ break;
+
+ case SAS_PROTOCOL_SATA:
+ case SAS_PROTOCOL_STP:
+ case SAS_PROTOCOL_SATA | SAS_PROTOCOL_STP:
+ {
+ task->ata_task.use_ncq = 0;
+ stat = SAS_PROTO_RESPONSE;
+ mvs_sata_done(mvi, task, slot_idx, err_dw0);
+ }
+ break;
+ default:
+ break;
+ }
+
+ return stat;
+}
+
+int mvs_slot_complete(struct mvs_info *mvi, u32 rx_desc, u32 flags)
+{
+ u32 slot_idx = rx_desc & RXQ_SLOT_MASK;
+ struct mvs_slot_info *slot = &mvi->slot_info[slot_idx];
+ struct sas_task *task = slot->task;
+ struct mvs_device *mvi_dev = NULL;
+ struct task_status_struct *tstat;
+ struct domain_device *dev;
+ u32 aborted;
+
+ void *to;
+ enum exec_status sts;
+
+ if (unlikely(!task || !task->lldd_task || !task->dev))
+ return -1;
+
+ tstat = &task->task_status;
+ dev = task->dev;
+ mvi_dev = dev->lldd_dev;
+
+ spin_lock(&task->task_state_lock);
+ task->task_state_flags &=
+ ~(SAS_TASK_STATE_PENDING | SAS_TASK_AT_INITIATOR);
+ task->task_state_flags |= SAS_TASK_STATE_DONE;
+ /* race condition*/
+ aborted = task->task_state_flags & SAS_TASK_STATE_ABORTED;
+ spin_unlock(&task->task_state_lock);
+
+ memset(tstat, 0, sizeof(*tstat));
+ tstat->resp = SAS_TASK_COMPLETE;
+
+ if (unlikely(aborted)) {
+ tstat->stat = SAS_ABORTED_TASK;
+ if (mvi_dev && mvi_dev->running_req)
+ mvi_dev->running_req--;
+ if (sas_protocol_ata(task->task_proto))
+ mvs_free_reg_set(mvi, mvi_dev);
+
+ mvs_slot_task_free(mvi, task, slot, slot_idx);
+ return -1;
+ }
+
+ /* when no device attaching, go ahead and complete by error handling*/
+ if (unlikely(!mvi_dev || flags)) {
+ if (!mvi_dev)
+ mv_dprintk("port has not device.\n");
+ tstat->stat = SAS_PHY_DOWN;
+ goto out;
+ }
+
+ /*
+ * error info record present; slot->response is 32 bit aligned but may
+ * not be 64 bit aligned, so check for zero in two 32 bit reads
+ */
+ if (unlikely((rx_desc & RXQ_ERR)
+ && (*((u32 *)slot->response)
+ || *(((u32 *)slot->response) + 1)))) {
+ mv_dprintk("port %d slot %d rx_desc %X has error info"
+ "%016llX.\n", slot->port->sas_port.id, slot_idx,
+ rx_desc, get_unaligned_le64(slot->response));
+ tstat->stat = mvs_slot_err(mvi, task, slot_idx);
+ tstat->resp = SAS_TASK_COMPLETE;
+ goto out;
+ }
+
+ switch (task->task_proto) {
+ case SAS_PROTOCOL_SSP:
+ /* hw says status == 0, datapres == 0 */
+ if (rx_desc & RXQ_GOOD) {
+ tstat->stat = SAM_STAT_GOOD;
+ tstat->resp = SAS_TASK_COMPLETE;
+ }
+ /* response frame present */
+ else if (rx_desc & RXQ_RSP) {
+ struct ssp_response_iu *iu = slot->response +
+ sizeof(struct mvs_err_info);
+ sas_ssp_task_response(mvi->dev, task, iu);
+ } else
+ tstat->stat = SAM_STAT_CHECK_CONDITION;
+ break;
+
+ case SAS_PROTOCOL_SMP: {
+ struct scatterlist *sg_resp = &task->smp_task.smp_resp;
+ tstat->stat = SAM_STAT_GOOD;
+ to = kmap_atomic(sg_page(sg_resp));
+ memcpy(to + sg_resp->offset,
+ slot->response + sizeof(struct mvs_err_info),
+ sg_dma_len(sg_resp));
+ kunmap_atomic(to);
+ break;
+ }
+
+ case SAS_PROTOCOL_SATA:
+ case SAS_PROTOCOL_STP:
+ case SAS_PROTOCOL_SATA | SAS_PROTOCOL_STP: {
+ tstat->stat = mvs_sata_done(mvi, task, slot_idx, 0);
+ break;
+ }
+
+ default:
+ tstat->stat = SAM_STAT_CHECK_CONDITION;
+ break;
+ }
+ if (!slot->port->port_attached) {
+ mv_dprintk("port %d has removed.\n", slot->port->sas_port.id);
+ tstat->stat = SAS_PHY_DOWN;
+ }
+
+
+out:
+ if (mvi_dev && mvi_dev->running_req) {
+ mvi_dev->running_req--;
+ if (sas_protocol_ata(task->task_proto) && !mvi_dev->running_req)
+ mvs_free_reg_set(mvi, mvi_dev);
+ }
+ mvs_slot_task_free(mvi, task, slot, slot_idx);
+ sts = tstat->stat;
+
+ spin_unlock(&mvi->lock);
+ if (task->task_done)
+ task->task_done(task);
+
+ spin_lock(&mvi->lock);
+
+ return sts;
+}
+
+void mvs_do_release_task(struct mvs_info *mvi,
+ int phy_no, struct domain_device *dev)
+{
+ u32 slot_idx;
+ struct mvs_phy *phy;
+ struct mvs_port *port;
+ struct mvs_slot_info *slot, *slot2;
+
+ phy = &mvi->phy[phy_no];
+ port = phy->port;
+ if (!port)
+ return;
+ /* clean cmpl queue in case request is already finished */
+ mvs_int_rx(mvi, false);
+
+
+
+ list_for_each_entry_safe(slot, slot2, &port->list, entry) {
+ struct sas_task *task;
+ slot_idx = (u32) (slot - mvi->slot_info);
+ task = slot->task;
+
+ if (dev && task->dev != dev)
+ continue;
+
+ mv_printk("Release slot [%x] tag[%x], task [%p]:\n",
+ slot_idx, slot->slot_tag, task);
+ MVS_CHIP_DISP->command_active(mvi, slot_idx);
+
+ mvs_slot_complete(mvi, slot_idx, 1);
+ }
+}
+
+void mvs_release_task(struct mvs_info *mvi,
+ struct domain_device *dev)
+{
+ int i, phyno[WIDE_PORT_MAX_PHY], num;
+ num = mvs_find_dev_phyno(dev, phyno);
+ for (i = 0; i < num; i++)
+ mvs_do_release_task(mvi, phyno[i], dev);
+}
+
+static void mvs_phy_disconnected(struct mvs_phy *phy)
+{
+ phy->phy_attached = 0;
+ phy->att_dev_info = 0;
+ phy->att_dev_sas_addr = 0;
+}
+
+static void mvs_work_queue(struct work_struct *work)
+{
+ struct delayed_work *dw = container_of(work, struct delayed_work, work);
+ struct mvs_wq *mwq = container_of(dw, struct mvs_wq, work_q);
+ struct mvs_info *mvi = mwq->mvi;
+ unsigned long flags;
+ u32 phy_no = (unsigned long) mwq->data;
+ struct sas_ha_struct *sas_ha = mvi->sas;
+ struct mvs_phy *phy = &mvi->phy[phy_no];
+ struct asd_sas_phy *sas_phy = &phy->sas_phy;
+
+ spin_lock_irqsave(&mvi->lock, flags);
+ if (mwq->handler & PHY_PLUG_EVENT) {
+
+ if (phy->phy_event & PHY_PLUG_OUT) {
+ u32 tmp;
+ struct sas_identify_frame *id;
+ id = (struct sas_identify_frame *)phy->frame_rcvd;
+ tmp = MVS_CHIP_DISP->read_phy_ctl(mvi, phy_no);
+ phy->phy_event &= ~PHY_PLUG_OUT;
+ if (!(tmp & PHY_READY_MASK)) {
+ sas_phy_disconnected(sas_phy);
+ mvs_phy_disconnected(phy);
+ sas_ha->notify_phy_event(sas_phy,
+ PHYE_LOSS_OF_SIGNAL);
+ mv_dprintk("phy%d Removed Device\n", phy_no);
+ } else {
+ MVS_CHIP_DISP->detect_porttype(mvi, phy_no);
+ mvs_update_phyinfo(mvi, phy_no, 1);
+ mvs_bytes_dmaed(mvi, phy_no);
+ mvs_port_notify_formed(sas_phy, 0);
+ mv_dprintk("phy%d Attached Device\n", phy_no);
+ }
+ }
+ } else if (mwq->handler & EXP_BRCT_CHG) {
+ phy->phy_event &= ~EXP_BRCT_CHG;
+ sas_ha->notify_port_event(sas_phy,
+ PORTE_BROADCAST_RCVD);
+ mv_dprintk("phy%d Got Broadcast Change\n", phy_no);
+ }
+ list_del(&mwq->entry);
+ spin_unlock_irqrestore(&mvi->lock, flags);
+ kfree(mwq);
+}
+
+static int mvs_handle_event(struct mvs_info *mvi, void *data, int handler)
+{
+ struct mvs_wq *mwq;
+ int ret = 0;
+
+ mwq = kmalloc(sizeof(struct mvs_wq), GFP_ATOMIC);
+ if (mwq) {
+ mwq->mvi = mvi;
+ mwq->data = data;
+ mwq->handler = handler;
+ MV_INIT_DELAYED_WORK(&mwq->work_q, mvs_work_queue, mwq);
+ list_add_tail(&mwq->entry, &mvi->wq_list);
+ schedule_delayed_work(&mwq->work_q, HZ * 2);
+ } else
+ ret = -ENOMEM;
+
+ return ret;
+}
+
+static void mvs_sig_time_out(unsigned long tphy)
+{
+ struct mvs_phy *phy = (struct mvs_phy *)tphy;
+ struct mvs_info *mvi = phy->mvi;
+ u8 phy_no;
+
+ for (phy_no = 0; phy_no < mvi->chip->n_phy; phy_no++) {
+ if (&mvi->phy[phy_no] == phy) {
+ mv_dprintk("Get signature time out, reset phy %d\n",
+ phy_no+mvi->id*mvi->chip->n_phy);
+ MVS_CHIP_DISP->phy_reset(mvi, phy_no, MVS_HARD_RESET);
+ }
+ }
+}
+
+void mvs_int_port(struct mvs_info *mvi, int phy_no, u32 events)
+{
+ u32 tmp;
+ struct mvs_phy *phy = &mvi->phy[phy_no];
+
+ phy->irq_status = MVS_CHIP_DISP->read_port_irq_stat(mvi, phy_no);
+ MVS_CHIP_DISP->write_port_irq_stat(mvi, phy_no, phy->irq_status);
+ mv_dprintk("phy %d ctrl sts=0x%08X.\n", phy_no+mvi->id*mvi->chip->n_phy,
+ MVS_CHIP_DISP->read_phy_ctl(mvi, phy_no));
+ mv_dprintk("phy %d irq sts = 0x%08X\n", phy_no+mvi->id*mvi->chip->n_phy,
+ phy->irq_status);
+
+ /*
+ * events is port event now ,
+ * we need check the interrupt status which belongs to per port.
+ */
+
+ if (phy->irq_status & PHYEV_DCDR_ERR) {
+ mv_dprintk("phy %d STP decoding error.\n",
+ phy_no + mvi->id*mvi->chip->n_phy);
+ }
+
+ if (phy->irq_status & PHYEV_POOF) {
+ mdelay(500);
+ if (!(phy->phy_event & PHY_PLUG_OUT)) {
+ int dev_sata = phy->phy_type & PORT_TYPE_SATA;
+ int ready;
+ mvs_do_release_task(mvi, phy_no, NULL);
+ phy->phy_event |= PHY_PLUG_OUT;
+ MVS_CHIP_DISP->clear_srs_irq(mvi, 0, 1);
+ mvs_handle_event(mvi,
+ (void *)(unsigned long)phy_no,
+ PHY_PLUG_EVENT);
+ ready = mvs_is_phy_ready(mvi, phy_no);
+ if (ready || dev_sata) {
+ if (MVS_CHIP_DISP->stp_reset)
+ MVS_CHIP_DISP->stp_reset(mvi,
+ phy_no);
+ else
+ MVS_CHIP_DISP->phy_reset(mvi,
+ phy_no, MVS_SOFT_RESET);
+ return;
+ }
+ }
+ }
+
+ if (phy->irq_status & PHYEV_COMWAKE) {
+ tmp = MVS_CHIP_DISP->read_port_irq_mask(mvi, phy_no);
+ MVS_CHIP_DISP->write_port_irq_mask(mvi, phy_no,
+ tmp | PHYEV_SIG_FIS);
+ if (phy->timer.function == NULL) {
+ phy->timer.data = (unsigned long)phy;
+ phy->timer.function = mvs_sig_time_out;
+ phy->timer.expires = jiffies + 5*HZ;
+ add_timer(&phy->timer);
+ }
+ }
+ if (phy->irq_status & (PHYEV_SIG_FIS | PHYEV_ID_DONE)) {
+ phy->phy_status = mvs_is_phy_ready(mvi, phy_no);
+ mv_dprintk("notify plug in on phy[%d]\n", phy_no);
+ if (phy->phy_status) {
+ mdelay(10);
+ MVS_CHIP_DISP->detect_porttype(mvi, phy_no);
+ if (phy->phy_type & PORT_TYPE_SATA) {
+ tmp = MVS_CHIP_DISP->read_port_irq_mask(
+ mvi, phy_no);
+ tmp &= ~PHYEV_SIG_FIS;
+ MVS_CHIP_DISP->write_port_irq_mask(mvi,
+ phy_no, tmp);
+ }
+ mvs_update_phyinfo(mvi, phy_no, 0);
+ if (phy->phy_type & PORT_TYPE_SAS) {
+ MVS_CHIP_DISP->phy_reset(mvi, phy_no, MVS_PHY_TUNE);
+ mdelay(10);
+ }
+
+ mvs_bytes_dmaed(mvi, phy_no);
+ /* whether driver is going to handle hot plug */
+ if (phy->phy_event & PHY_PLUG_OUT) {
+ mvs_port_notify_formed(&phy->sas_phy, 0);
+ phy->phy_event &= ~PHY_PLUG_OUT;
+ }
+ } else {
+ mv_dprintk("plugin interrupt but phy%d is gone\n",
+ phy_no + mvi->id*mvi->chip->n_phy);
+ }
+ } else if (phy->irq_status & PHYEV_BROAD_CH) {
+ mv_dprintk("phy %d broadcast change.\n",
+ phy_no + mvi->id*mvi->chip->n_phy);
+ mvs_handle_event(mvi, (void *)(unsigned long)phy_no,
+ EXP_BRCT_CHG);
+ }
+}
+
+int mvs_int_rx(struct mvs_info *mvi, bool self_clear)
+{
+ u32 rx_prod_idx, rx_desc;
+ bool attn = false;
+
+ /* the first dword in the RX ring is special: it contains
+ * a mirror of the hardware's RX producer index, so that
+ * we don't have to stall the CPU reading that register.
+ * The actual RX ring is offset by one dword, due to this.
+ */
+ rx_prod_idx = mvi->rx_cons;
+ mvi->rx_cons = le32_to_cpu(mvi->rx[0]);
+ if (mvi->rx_cons == 0xfff) /* h/w hasn't touched RX ring yet */
+ return 0;
+
+ /* The CMPL_Q may come late, read from register and try again
+ * note: if coalescing is enabled,
+ * it will need to read from register every time for sure
+ */
+ if (unlikely(mvi->rx_cons == rx_prod_idx))
+ mvi->rx_cons = MVS_CHIP_DISP->rx_update(mvi) & RX_RING_SZ_MASK;
+
+ if (mvi->rx_cons == rx_prod_idx)
+ return 0;
+
+ while (mvi->rx_cons != rx_prod_idx) {
+ /* increment our internal RX consumer pointer */
+ rx_prod_idx = (rx_prod_idx + 1) & (MVS_RX_RING_SZ - 1);
+ rx_desc = le32_to_cpu(mvi->rx[rx_prod_idx + 1]);
+
+ if (likely(rx_desc & RXQ_DONE))
+ mvs_slot_complete(mvi, rx_desc, 0);
+ if (rx_desc & RXQ_ATTN) {
+ attn = true;
+ } else if (rx_desc & RXQ_ERR) {
+ if (!(rx_desc & RXQ_DONE))
+ mvs_slot_complete(mvi, rx_desc, 0);
+ } else if (rx_desc & RXQ_SLOT_RESET) {
+ mvs_slot_free(mvi, rx_desc);
+ }
+ }
+
+ if (attn && self_clear)
+ MVS_CHIP_DISP->int_full(mvi);
+ return 0;
+}
+
diff --git a/drivers/scsi/mvsas/mv_sas.h b/drivers/scsi/mvsas/mv_sas.h
new file mode 100644
index 000000000..dc409c047
--- /dev/null
+++ b/drivers/scsi/mvsas/mv_sas.h
@@ -0,0 +1,480 @@
+/*
+ * Marvell 88SE64xx/88SE94xx main function head file
+ *
+ * Copyright 2007 Red Hat, Inc.
+ * Copyright 2008 Marvell. <kewei@marvell.com>
+ * Copyright 2009-2011 Marvell. <yuxiangl@marvell.com>
+ *
+ * This file is licensed under GPLv2.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; version 2 of the
+ * License.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307
+ * USA
+*/
+
+#ifndef _MV_SAS_H_
+#define _MV_SAS_H_
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/spinlock.h>
+#include <linux/delay.h>
+#include <linux/types.h>
+#include <linux/ctype.h>
+#include <linux/dma-mapping.h>
+#include <linux/pci.h>
+#include <linux/platform_device.h>
+#include <linux/interrupt.h>
+#include <linux/irq.h>
+#include <linux/slab.h>
+#include <linux/vmalloc.h>
+#include <asm/unaligned.h>
+#include <scsi/libsas.h>
+#include <scsi/scsi.h>
+#include <scsi/scsi_tcq.h>
+#include <scsi/sas_ata.h>
+#include "mv_defs.h"
+
+#define DRV_NAME "mvsas"
+#define DRV_VERSION "0.8.16"
+#define MVS_ID_NOT_MAPPED 0x7f
+#define WIDE_PORT_MAX_PHY 4
+#define mv_printk(fmt, arg ...) \
+ printk(KERN_DEBUG"%s %d:" fmt, __FILE__, __LINE__, ## arg)
+#ifdef MV_DEBUG
+#define mv_dprintk(format, arg...) \
+ printk(KERN_DEBUG"%s %d:" format, __FILE__, __LINE__, ## arg)
+#else
+#define mv_dprintk(format, arg...)
+#endif
+#define MV_MAX_U32 0xffffffff
+
+extern int interrupt_coalescing;
+extern struct mvs_tgt_initiator mvs_tgt;
+extern struct mvs_info *tgt_mvi;
+extern const struct mvs_dispatch mvs_64xx_dispatch;
+extern const struct mvs_dispatch mvs_94xx_dispatch;
+
+#define DEV_IS_EXPANDER(type) \
+ ((type == SAS_EDGE_EXPANDER_DEVICE) || (type == SAS_FANOUT_EXPANDER_DEVICE))
+
+#define bit(n) ((u64)1 << n)
+
+#define for_each_phy(__lseq_mask, __mc, __lseq) \
+ for ((__mc) = (__lseq_mask), (__lseq) = 0; \
+ (__mc) != 0 ; \
+ (++__lseq), (__mc) >>= 1)
+
+#define MVS_PHY_ID (1U << sas_phy->id)
+#define MV_INIT_DELAYED_WORK(w, f, d) INIT_DELAYED_WORK(w, f)
+#define UNASSOC_D2H_FIS(id) \
+ ((void *) mvi->rx_fis + 0x100 * id)
+#define SATA_RECEIVED_FIS_LIST(reg_set) \
+ ((void *) mvi->rx_fis + mvi->chip->fis_offs + 0x100 * reg_set)
+#define SATA_RECEIVED_SDB_FIS(reg_set) \
+ (SATA_RECEIVED_FIS_LIST(reg_set) + 0x58)
+#define SATA_RECEIVED_D2H_FIS(reg_set) \
+ (SATA_RECEIVED_FIS_LIST(reg_set) + 0x40)
+#define SATA_RECEIVED_PIO_FIS(reg_set) \
+ (SATA_RECEIVED_FIS_LIST(reg_set) + 0x20)
+#define SATA_RECEIVED_DMA_FIS(reg_set) \
+ (SATA_RECEIVED_FIS_LIST(reg_set) + 0x00)
+
+enum dev_status {
+ MVS_DEV_NORMAL = 0x0,
+ MVS_DEV_EH = 0x1,
+};
+
+enum dev_reset {
+ MVS_SOFT_RESET = 0,
+ MVS_HARD_RESET = 1,
+ MVS_PHY_TUNE = 2,
+};
+
+struct mvs_info;
+
+struct mvs_dispatch {
+ char *name;
+ int (*chip_init)(struct mvs_info *mvi);
+ int (*spi_init)(struct mvs_info *mvi);
+ int (*chip_ioremap)(struct mvs_info *mvi);
+ void (*chip_iounmap)(struct mvs_info *mvi);
+ irqreturn_t (*isr)(struct mvs_info *mvi, int irq, u32 stat);
+ u32 (*isr_status)(struct mvs_info *mvi, int irq);
+ void (*interrupt_enable)(struct mvs_info *mvi);
+ void (*interrupt_disable)(struct mvs_info *mvi);
+
+ u32 (*read_phy_ctl)(struct mvs_info *mvi, u32 port);
+ void (*write_phy_ctl)(struct mvs_info *mvi, u32 port, u32 val);
+
+ u32 (*read_port_cfg_data)(struct mvs_info *mvi, u32 port);
+ void (*write_port_cfg_data)(struct mvs_info *mvi, u32 port, u32 val);
+ void (*write_port_cfg_addr)(struct mvs_info *mvi, u32 port, u32 addr);
+
+ u32 (*read_port_vsr_data)(struct mvs_info *mvi, u32 port);
+ void (*write_port_vsr_data)(struct mvs_info *mvi, u32 port, u32 val);
+ void (*write_port_vsr_addr)(struct mvs_info *mvi, u32 port, u32 addr);
+
+ u32 (*read_port_irq_stat)(struct mvs_info *mvi, u32 port);
+ void (*write_port_irq_stat)(struct mvs_info *mvi, u32 port, u32 val);
+
+ u32 (*read_port_irq_mask)(struct mvs_info *mvi, u32 port);
+ void (*write_port_irq_mask)(struct mvs_info *mvi, u32 port, u32 val);
+
+ void (*command_active)(struct mvs_info *mvi, u32 slot_idx);
+ void (*clear_srs_irq)(struct mvs_info *mvi, u8 reg_set, u8 clear_all);
+ void (*issue_stop)(struct mvs_info *mvi, enum mvs_port_type type,
+ u32 tfs);
+ void (*start_delivery)(struct mvs_info *mvi, u32 tx);
+ u32 (*rx_update)(struct mvs_info *mvi);
+ void (*int_full)(struct mvs_info *mvi);
+ u8 (*assign_reg_set)(struct mvs_info *mvi, u8 *tfs);
+ void (*free_reg_set)(struct mvs_info *mvi, u8 *tfs);
+ u32 (*prd_size)(void);
+ u32 (*prd_count)(void);
+ void (*make_prd)(struct scatterlist *scatter, int nr, void *prd);
+ void (*detect_porttype)(struct mvs_info *mvi, int i);
+ int (*oob_done)(struct mvs_info *mvi, int i);
+ void (*fix_phy_info)(struct mvs_info *mvi, int i,
+ struct sas_identify_frame *id);
+ void (*phy_work_around)(struct mvs_info *mvi, int i);
+ void (*phy_set_link_rate)(struct mvs_info *mvi, u32 phy_id,
+ struct sas_phy_linkrates *rates);
+ u32 (*phy_max_link_rate)(void);
+ void (*phy_disable)(struct mvs_info *mvi, u32 phy_id);
+ void (*phy_enable)(struct mvs_info *mvi, u32 phy_id);
+ void (*phy_reset)(struct mvs_info *mvi, u32 phy_id, int hard);
+ void (*stp_reset)(struct mvs_info *mvi, u32 phy_id);
+ void (*clear_active_cmds)(struct mvs_info *mvi);
+ u32 (*spi_read_data)(struct mvs_info *mvi);
+ void (*spi_write_data)(struct mvs_info *mvi, u32 data);
+ int (*spi_buildcmd)(struct mvs_info *mvi,
+ u32 *dwCmd,
+ u8 cmd,
+ u8 read,
+ u8 length,
+ u32 addr
+ );
+ int (*spi_issuecmd)(struct mvs_info *mvi, u32 cmd);
+ int (*spi_waitdataready)(struct mvs_info *mvi, u32 timeout);
+ void (*dma_fix)(struct mvs_info *mvi, u32 phy_mask,
+ int buf_len, int from, void *prd);
+ void (*tune_interrupt)(struct mvs_info *mvi, u32 time);
+ void (*non_spec_ncq_error)(struct mvs_info *mvi);
+
+};
+
+struct mvs_chip_info {
+ u32 n_host;
+ u32 n_phy;
+ u32 fis_offs;
+ u32 fis_count;
+ u32 srs_sz;
+ u32 sg_width;
+ u32 slot_width;
+ const struct mvs_dispatch *dispatch;
+};
+#define MVS_MAX_SG (1U << mvi->chip->sg_width)
+#define MVS_CHIP_SLOT_SZ (1U << mvi->chip->slot_width)
+#define MVS_RX_FISL_SZ \
+ (mvi->chip->fis_offs + (mvi->chip->fis_count * 0x100))
+#define MVS_CHIP_DISP (mvi->chip->dispatch)
+
+struct mvs_err_info {
+ __le32 flags;
+ __le32 flags2;
+};
+
+struct mvs_cmd_hdr {
+ __le32 flags; /* PRD tbl len; SAS, SATA ctl */
+ __le32 lens; /* cmd, max resp frame len */
+ __le32 tags; /* targ port xfer tag; tag */
+ __le32 data_len; /* data xfer len */
+ __le64 cmd_tbl; /* command table address */
+ __le64 open_frame; /* open addr frame address */
+ __le64 status_buf; /* status buffer address */
+ __le64 prd_tbl; /* PRD tbl address */
+ __le32 reserved[4];
+};
+
+struct mvs_port {
+ struct asd_sas_port sas_port;
+ u8 port_attached;
+ u8 wide_port_phymap;
+ struct list_head list;
+};
+
+struct mvs_phy {
+ struct mvs_info *mvi;
+ struct mvs_port *port;
+ struct asd_sas_phy sas_phy;
+ struct sas_identify identify;
+ struct scsi_device *sdev;
+ struct timer_list timer;
+ u64 dev_sas_addr;
+ u64 att_dev_sas_addr;
+ u32 att_dev_info;
+ u32 dev_info;
+ u32 phy_type;
+ u32 phy_status;
+ u32 irq_status;
+ u32 frame_rcvd_size;
+ u8 frame_rcvd[32];
+ u8 phy_attached;
+ u8 phy_mode;
+ u8 reserved[2];
+ u32 phy_event;
+ enum sas_linkrate minimum_linkrate;
+ enum sas_linkrate maximum_linkrate;
+};
+
+struct mvs_device {
+ struct list_head dev_entry;
+ enum sas_device_type dev_type;
+ struct mvs_info *mvi_info;
+ struct domain_device *sas_device;
+ struct timer_list timer;
+ u32 attached_phy;
+ u32 device_id;
+ u32 running_req;
+ u8 taskfileset;
+ u8 dev_status;
+ u16 reserved;
+};
+
+/* Generate PHY tunning parameters */
+struct phy_tuning {
+ /* 1 bit, transmitter emphasis enable */
+ u8 trans_emp_en:1;
+ /* 4 bits, transmitter emphasis amplitude */
+ u8 trans_emp_amp:4;
+ /* 3 bits, reserved space */
+ u8 Reserved_2bit_1:3;
+ /* 5 bits, transmitter amplitude */
+ u8 trans_amp:5;
+ /* 2 bits, transmitter amplitude adjust */
+ u8 trans_amp_adj:2;
+ /* 1 bit, reserved space */
+ u8 resv_2bit_2:1;
+ /* 2 bytes, reserved space */
+ u8 reserved[2];
+};
+
+struct ffe_control {
+ /* 4 bits, FFE Capacitor Select (value range 0~F) */
+ u8 ffe_cap_sel:4;
+ /* 3 bits, FFE Resistor Select (value range 0~7) */
+ u8 ffe_rss_sel:3;
+ /* 1 bit reserve*/
+ u8 reserved:1;
+};
+
+/*
+ * HBA_Info_Page is saved in Flash/NVRAM, total 256 bytes.
+ * The data area is valid only Signature="MRVL".
+ * If any member fills with 0xFF, the member is invalid.
+ */
+struct hba_info_page {
+ /* Dword 0 */
+ /* 4 bytes, structure signature,should be "MRVL" at first initial */
+ u8 signature[4];
+
+ /* Dword 1-13 */
+ u32 reserved1[13];
+
+ /* Dword 14-29 */
+ /* 64 bytes, SAS address for each port */
+ u64 sas_addr[8];
+
+ /* Dword 30-31 */
+ /* 8 bytes for vanir 8 port PHY FFE seeting
+ * BIT 0~3 : FFE Capacitor select(value range 0~F)
+ * BIT 4~6 : FFE Resistor select(value range 0~7)
+ * BIT 7: reserve.
+ */
+
+ struct ffe_control ffe_ctl[8];
+ /* Dword 32 -43 */
+ u32 reserved2[12];
+
+ /* Dword 44-45 */
+ /* 8 bytes, 0: 1.5G, 1: 3.0G, should be 0x01 at first initial */
+ u8 phy_rate[8];
+
+ /* Dword 46-53 */
+ /* 32 bytes, PHY tuning parameters for each PHY*/
+ struct phy_tuning phy_tuning[8];
+
+ /* Dword 54-63 */
+ u32 reserved3[10];
+}; /* total 256 bytes */
+
+struct mvs_slot_info {
+ struct list_head entry;
+ union {
+ struct sas_task *task;
+ void *tdata;
+ };
+ u32 n_elem;
+ u32 tx;
+ u32 slot_tag;
+
+ /* DMA buffer for storing cmd tbl, open addr frame, status buffer,
+ * and PRD table
+ */
+ void *buf;
+ dma_addr_t buf_dma;
+ void *response;
+ struct mvs_port *port;
+ struct mvs_device *device;
+ void *open_frame;
+};
+
+struct mvs_info {
+ unsigned long flags;
+
+ /* host-wide lock */
+ spinlock_t lock;
+
+ /* our device */
+ struct pci_dev *pdev;
+ struct device *dev;
+
+ /* enhanced mode registers */
+ void __iomem *regs;
+
+ /* peripheral or soc registers */
+ void __iomem *regs_ex;
+ u8 sas_addr[SAS_ADDR_SIZE];
+
+ /* SCSI/SAS glue */
+ struct sas_ha_struct *sas;
+ struct Scsi_Host *shost;
+
+ /* TX (delivery) DMA ring */
+ __le32 *tx;
+ dma_addr_t tx_dma;
+
+ /* cached next-producer idx */
+ u32 tx_prod;
+
+ /* RX (completion) DMA ring */
+ __le32 *rx;
+ dma_addr_t rx_dma;
+
+ /* RX consumer idx */
+ u32 rx_cons;
+
+ /* RX'd FIS area */
+ __le32 *rx_fis;
+ dma_addr_t rx_fis_dma;
+
+ /* DMA command header slots */
+ struct mvs_cmd_hdr *slot;
+ dma_addr_t slot_dma;
+
+ u32 chip_id;
+ const struct mvs_chip_info *chip;
+
+ int tags_num;
+ unsigned long *tags;
+ /* further per-slot information */
+ struct mvs_phy phy[MVS_MAX_PHYS];
+ struct mvs_port port[MVS_MAX_PHYS];
+ u32 id;
+ u64 sata_reg_set;
+ struct list_head *hba_list;
+ struct list_head soc_entry;
+ struct list_head wq_list;
+ unsigned long instance;
+ u16 flashid;
+ u32 flashsize;
+ u32 flashsectSize;
+
+ void *addon;
+ struct hba_info_page hba_info_param;
+ struct mvs_device devices[MVS_MAX_DEVICES];
+ void *bulk_buffer;
+ dma_addr_t bulk_buffer_dma;
+ void *bulk_buffer1;
+ dma_addr_t bulk_buffer_dma1;
+#define TRASH_BUCKET_SIZE 0x20000
+ void *dma_pool;
+ struct mvs_slot_info slot_info[0];
+};
+
+struct mvs_prv_info{
+ u8 n_host;
+ u8 n_phy;
+ u8 scan_finished;
+ u8 reserve;
+ struct mvs_info *mvi[2];
+ struct tasklet_struct mv_tasklet;
+};
+
+struct mvs_wq {
+ struct delayed_work work_q;
+ struct mvs_info *mvi;
+ void *data;
+ int handler;
+ struct list_head entry;
+};
+
+struct mvs_task_exec_info {
+ struct sas_task *task;
+ struct mvs_cmd_hdr *hdr;
+ struct mvs_port *port;
+ u32 tag;
+ int n_elem;
+};
+
+/******************** function prototype *********************/
+void mvs_get_sas_addr(void *buf, u32 buflen);
+void mvs_tag_clear(struct mvs_info *mvi, u32 tag);
+void mvs_tag_free(struct mvs_info *mvi, u32 tag);
+void mvs_tag_set(struct mvs_info *mvi, unsigned int tag);
+int mvs_tag_alloc(struct mvs_info *mvi, u32 *tag_out);
+void mvs_tag_init(struct mvs_info *mvi);
+void mvs_iounmap(void __iomem *regs);
+int mvs_ioremap(struct mvs_info *mvi, int bar, int bar_ex);
+void mvs_phys_reset(struct mvs_info *mvi, u32 phy_mask, int hard);
+int mvs_phy_control(struct asd_sas_phy *sas_phy, enum phy_func func,
+ void *funcdata);
+void mvs_set_sas_addr(struct mvs_info *mvi, int port_id, u32 off_lo,
+ u32 off_hi, u64 sas_addr);
+void mvs_scan_start(struct Scsi_Host *shost);
+int mvs_scan_finished(struct Scsi_Host *shost, unsigned long time);
+int mvs_queue_command(struct sas_task *task, gfp_t gfp_flags);
+int mvs_abort_task(struct sas_task *task);
+int mvs_abort_task_set(struct domain_device *dev, u8 *lun);
+int mvs_clear_aca(struct domain_device *dev, u8 *lun);
+int mvs_clear_task_set(struct domain_device *dev, u8 * lun);
+void mvs_port_formed(struct asd_sas_phy *sas_phy);
+void mvs_port_deformed(struct asd_sas_phy *sas_phy);
+int mvs_dev_found(struct domain_device *dev);
+void mvs_dev_gone(struct domain_device *dev);
+int mvs_lu_reset(struct domain_device *dev, u8 *lun);
+int mvs_slot_complete(struct mvs_info *mvi, u32 rx_desc, u32 flags);
+int mvs_I_T_nexus_reset(struct domain_device *dev);
+int mvs_query_task(struct sas_task *task);
+void mvs_release_task(struct mvs_info *mvi,
+ struct domain_device *dev);
+void mvs_do_release_task(struct mvs_info *mvi, int phy_no,
+ struct domain_device *dev);
+void mvs_int_port(struct mvs_info *mvi, int phy_no, u32 events);
+void mvs_update_phyinfo(struct mvs_info *mvi, int i, int get_st);
+int mvs_int_rx(struct mvs_info *mvi, bool self_clear);
+struct mvs_device *mvs_find_dev_by_reg_set(struct mvs_info *mvi, u8 reg_set);
+#endif
+
diff --git a/drivers/scsi/mvumi.c b/drivers/scsi/mvumi.c
new file mode 100644
index 000000000..3e6b86675
--- /dev/null
+++ b/drivers/scsi/mvumi.c
@@ -0,0 +1,2751 @@
+/*
+ * Marvell UMI driver
+ *
+ * Copyright 2011 Marvell. <jyli@marvell.com>
+ *
+ * This file is licensed under GPLv2.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; version 2 of the
+ * License.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307
+ * USA
+*/
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/init.h>
+#include <linux/device.h>
+#include <linux/pci.h>
+#include <linux/list.h>
+#include <linux/spinlock.h>
+#include <linux/interrupt.h>
+#include <linux/delay.h>
+#include <linux/blkdev.h>
+#include <linux/io.h>
+#include <scsi/scsi.h>
+#include <scsi/scsi_cmnd.h>
+#include <scsi/scsi_device.h>
+#include <scsi/scsi_host.h>
+#include <scsi/scsi_transport.h>
+#include <scsi/scsi_eh.h>
+#include <linux/uaccess.h>
+#include <linux/kthread.h>
+
+#include "mvumi.h"
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("jyli@marvell.com");
+MODULE_DESCRIPTION("Marvell UMI Driver");
+
+static const struct pci_device_id mvumi_pci_table[] = {
+ { PCI_DEVICE(PCI_VENDOR_ID_MARVELL_EXT, PCI_DEVICE_ID_MARVELL_MV9143) },
+ { PCI_DEVICE(PCI_VENDOR_ID_MARVELL_EXT, PCI_DEVICE_ID_MARVELL_MV9580) },
+ { 0 }
+};
+
+MODULE_DEVICE_TABLE(pci, mvumi_pci_table);
+
+static void tag_init(struct mvumi_tag *st, unsigned short size)
+{
+ unsigned short i;
+ BUG_ON(size != st->size);
+ st->top = size;
+ for (i = 0; i < size; i++)
+ st->stack[i] = size - 1 - i;
+}
+
+static unsigned short tag_get_one(struct mvumi_hba *mhba, struct mvumi_tag *st)
+{
+ BUG_ON(st->top <= 0);
+ return st->stack[--st->top];
+}
+
+static void tag_release_one(struct mvumi_hba *mhba, struct mvumi_tag *st,
+ unsigned short tag)
+{
+ BUG_ON(st->top >= st->size);
+ st->stack[st->top++] = tag;
+}
+
+static bool tag_is_empty(struct mvumi_tag *st)
+{
+ if (st->top == 0)
+ return 1;
+ else
+ return 0;
+}
+
+static void mvumi_unmap_pci_addr(struct pci_dev *dev, void **addr_array)
+{
+ int i;
+
+ for (i = 0; i < MAX_BASE_ADDRESS; i++)
+ if ((pci_resource_flags(dev, i) & IORESOURCE_MEM) &&
+ addr_array[i])
+ pci_iounmap(dev, addr_array[i]);
+}
+
+static int mvumi_map_pci_addr(struct pci_dev *dev, void **addr_array)
+{
+ int i;
+
+ for (i = 0; i < MAX_BASE_ADDRESS; i++) {
+ if (pci_resource_flags(dev, i) & IORESOURCE_MEM) {
+ addr_array[i] = pci_iomap(dev, i, 0);
+ if (!addr_array[i]) {
+ dev_err(&dev->dev, "failed to map Bar[%d]\n",
+ i);
+ mvumi_unmap_pci_addr(dev, addr_array);
+ return -ENOMEM;
+ }
+ } else
+ addr_array[i] = NULL;
+
+ dev_dbg(&dev->dev, "Bar %d : %p.\n", i, addr_array[i]);
+ }
+
+ return 0;
+}
+
+static struct mvumi_res *mvumi_alloc_mem_resource(struct mvumi_hba *mhba,
+ enum resource_type type, unsigned int size)
+{
+ struct mvumi_res *res = kzalloc(sizeof(*res), GFP_ATOMIC);
+
+ if (!res) {
+ dev_err(&mhba->pdev->dev,
+ "Failed to allocate memory for resource manager.\n");
+ return NULL;
+ }
+
+ switch (type) {
+ case RESOURCE_CACHED_MEMORY:
+ res->virt_addr = kzalloc(size, GFP_ATOMIC);
+ if (!res->virt_addr) {
+ dev_err(&mhba->pdev->dev,
+ "unable to allocate memory,size = %d.\n", size);
+ kfree(res);
+ return NULL;
+ }
+ break;
+
+ case RESOURCE_UNCACHED_MEMORY:
+ size = round_up(size, 8);
+ res->virt_addr = pci_zalloc_consistent(mhba->pdev, size,
+ &res->bus_addr);
+ if (!res->virt_addr) {
+ dev_err(&mhba->pdev->dev,
+ "unable to allocate consistent mem,"
+ "size = %d.\n", size);
+ kfree(res);
+ return NULL;
+ }
+ break;
+
+ default:
+ dev_err(&mhba->pdev->dev, "unknown resource type %d.\n", type);
+ kfree(res);
+ return NULL;
+ }
+
+ res->type = type;
+ res->size = size;
+ INIT_LIST_HEAD(&res->entry);
+ list_add_tail(&res->entry, &mhba->res_list);
+
+ return res;
+}
+
+static void mvumi_release_mem_resource(struct mvumi_hba *mhba)
+{
+ struct mvumi_res *res, *tmp;
+
+ list_for_each_entry_safe(res, tmp, &mhba->res_list, entry) {
+ switch (res->type) {
+ case RESOURCE_UNCACHED_MEMORY:
+ pci_free_consistent(mhba->pdev, res->size,
+ res->virt_addr, res->bus_addr);
+ break;
+ case RESOURCE_CACHED_MEMORY:
+ kfree(res->virt_addr);
+ break;
+ default:
+ dev_err(&mhba->pdev->dev,
+ "unknown resource type %d\n", res->type);
+ break;
+ }
+ list_del(&res->entry);
+ kfree(res);
+ }
+ mhba->fw_flag &= ~MVUMI_FW_ALLOC;
+}
+
+/**
+ * mvumi_make_sgl - Prepares SGL
+ * @mhba: Adapter soft state
+ * @scmd: SCSI command from the mid-layer
+ * @sgl_p: SGL to be filled in
+ * @sg_count return the number of SG elements
+ *
+ * If successful, this function returns 0. otherwise, it returns -1.
+ */
+static int mvumi_make_sgl(struct mvumi_hba *mhba, struct scsi_cmnd *scmd,
+ void *sgl_p, unsigned char *sg_count)
+{
+ struct scatterlist *sg;
+ struct mvumi_sgl *m_sg = (struct mvumi_sgl *) sgl_p;
+ unsigned int i;
+ unsigned int sgnum = scsi_sg_count(scmd);
+ dma_addr_t busaddr;
+
+ if (sgnum) {
+ sg = scsi_sglist(scmd);
+ *sg_count = pci_map_sg(mhba->pdev, sg, sgnum,
+ (int) scmd->sc_data_direction);
+ if (*sg_count > mhba->max_sge) {
+ dev_err(&mhba->pdev->dev, "sg count[0x%x] is bigger "
+ "than max sg[0x%x].\n",
+ *sg_count, mhba->max_sge);
+ return -1;
+ }
+ for (i = 0; i < *sg_count; i++) {
+ busaddr = sg_dma_address(&sg[i]);
+ m_sg->baseaddr_l = cpu_to_le32(lower_32_bits(busaddr));
+ m_sg->baseaddr_h = cpu_to_le32(upper_32_bits(busaddr));
+ m_sg->flags = 0;
+ sgd_setsz(mhba, m_sg, cpu_to_le32(sg_dma_len(&sg[i])));
+ if ((i + 1) == *sg_count)
+ m_sg->flags |= 1U << mhba->eot_flag;
+
+ sgd_inc(mhba, m_sg);
+ }
+ } else {
+ scmd->SCp.dma_handle = scsi_bufflen(scmd) ?
+ pci_map_single(mhba->pdev, scsi_sglist(scmd),
+ scsi_bufflen(scmd),
+ (int) scmd->sc_data_direction)
+ : 0;
+ busaddr = scmd->SCp.dma_handle;
+ m_sg->baseaddr_l = cpu_to_le32(lower_32_bits(busaddr));
+ m_sg->baseaddr_h = cpu_to_le32(upper_32_bits(busaddr));
+ m_sg->flags = 1U << mhba->eot_flag;
+ sgd_setsz(mhba, m_sg, cpu_to_le32(scsi_bufflen(scmd)));
+ *sg_count = 1;
+ }
+
+ return 0;
+}
+
+static int mvumi_internal_cmd_sgl(struct mvumi_hba *mhba, struct mvumi_cmd *cmd,
+ unsigned int size)
+{
+ struct mvumi_sgl *m_sg;
+ void *virt_addr;
+ dma_addr_t phy_addr;
+
+ if (size == 0)
+ return 0;
+
+ virt_addr = pci_zalloc_consistent(mhba->pdev, size, &phy_addr);
+ if (!virt_addr)
+ return -1;
+
+ m_sg = (struct mvumi_sgl *) &cmd->frame->payload[0];
+ cmd->frame->sg_counts = 1;
+ cmd->data_buf = virt_addr;
+
+ m_sg->baseaddr_l = cpu_to_le32(lower_32_bits(phy_addr));
+ m_sg->baseaddr_h = cpu_to_le32(upper_32_bits(phy_addr));
+ m_sg->flags = 1U << mhba->eot_flag;
+ sgd_setsz(mhba, m_sg, cpu_to_le32(size));
+
+ return 0;
+}
+
+static struct mvumi_cmd *mvumi_create_internal_cmd(struct mvumi_hba *mhba,
+ unsigned int buf_size)
+{
+ struct mvumi_cmd *cmd;
+
+ cmd = kzalloc(sizeof(*cmd), GFP_KERNEL);
+ if (!cmd) {
+ dev_err(&mhba->pdev->dev, "failed to create a internal cmd\n");
+ return NULL;
+ }
+ INIT_LIST_HEAD(&cmd->queue_pointer);
+
+ cmd->frame = pci_alloc_consistent(mhba->pdev,
+ mhba->ib_max_size, &cmd->frame_phys);
+ if (!cmd->frame) {
+ dev_err(&mhba->pdev->dev, "failed to allocate memory for FW"
+ " frame,size = %d.\n", mhba->ib_max_size);
+ kfree(cmd);
+ return NULL;
+ }
+
+ if (buf_size) {
+ if (mvumi_internal_cmd_sgl(mhba, cmd, buf_size)) {
+ dev_err(&mhba->pdev->dev, "failed to allocate memory"
+ " for internal frame\n");
+ pci_free_consistent(mhba->pdev, mhba->ib_max_size,
+ cmd->frame, cmd->frame_phys);
+ kfree(cmd);
+ return NULL;
+ }
+ } else
+ cmd->frame->sg_counts = 0;
+
+ return cmd;
+}
+
+static void mvumi_delete_internal_cmd(struct mvumi_hba *mhba,
+ struct mvumi_cmd *cmd)
+{
+ struct mvumi_sgl *m_sg;
+ unsigned int size;
+ dma_addr_t phy_addr;
+
+ if (cmd && cmd->frame) {
+ if (cmd->frame->sg_counts) {
+ m_sg = (struct mvumi_sgl *) &cmd->frame->payload[0];
+ sgd_getsz(mhba, m_sg, size);
+
+ phy_addr = (dma_addr_t) m_sg->baseaddr_l |
+ (dma_addr_t) ((m_sg->baseaddr_h << 16) << 16);
+
+ pci_free_consistent(mhba->pdev, size, cmd->data_buf,
+ phy_addr);
+ }
+ pci_free_consistent(mhba->pdev, mhba->ib_max_size,
+ cmd->frame, cmd->frame_phys);
+ kfree(cmd);
+ }
+}
+
+/**
+ * mvumi_get_cmd - Get a command from the free pool
+ * @mhba: Adapter soft state
+ *
+ * Returns a free command from the pool
+ */
+static struct mvumi_cmd *mvumi_get_cmd(struct mvumi_hba *mhba)
+{
+ struct mvumi_cmd *cmd = NULL;
+
+ if (likely(!list_empty(&mhba->cmd_pool))) {
+ cmd = list_entry((&mhba->cmd_pool)->next,
+ struct mvumi_cmd, queue_pointer);
+ list_del_init(&cmd->queue_pointer);
+ } else
+ dev_warn(&mhba->pdev->dev, "command pool is empty!\n");
+
+ return cmd;
+}
+
+/**
+ * mvumi_return_cmd - Return a cmd to free command pool
+ * @mhba: Adapter soft state
+ * @cmd: Command packet to be returned to free command pool
+ */
+static inline void mvumi_return_cmd(struct mvumi_hba *mhba,
+ struct mvumi_cmd *cmd)
+{
+ cmd->scmd = NULL;
+ list_add_tail(&cmd->queue_pointer, &mhba->cmd_pool);
+}
+
+/**
+ * mvumi_free_cmds - Free all the cmds in the free cmd pool
+ * @mhba: Adapter soft state
+ */
+static void mvumi_free_cmds(struct mvumi_hba *mhba)
+{
+ struct mvumi_cmd *cmd;
+
+ while (!list_empty(&mhba->cmd_pool)) {
+ cmd = list_first_entry(&mhba->cmd_pool, struct mvumi_cmd,
+ queue_pointer);
+ list_del(&cmd->queue_pointer);
+ if (!(mhba->hba_capability & HS_CAPABILITY_SUPPORT_DYN_SRC))
+ kfree(cmd->frame);
+ kfree(cmd);
+ }
+}
+
+/**
+ * mvumi_alloc_cmds - Allocates the command packets
+ * @mhba: Adapter soft state
+ *
+ */
+static int mvumi_alloc_cmds(struct mvumi_hba *mhba)
+{
+ int i;
+ struct mvumi_cmd *cmd;
+
+ for (i = 0; i < mhba->max_io; i++) {
+ cmd = kzalloc(sizeof(*cmd), GFP_KERNEL);
+ if (!cmd)
+ goto err_exit;
+
+ INIT_LIST_HEAD(&cmd->queue_pointer);
+ list_add_tail(&cmd->queue_pointer, &mhba->cmd_pool);
+ if (mhba->hba_capability & HS_CAPABILITY_SUPPORT_DYN_SRC) {
+ cmd->frame = mhba->ib_frame + i * mhba->ib_max_size;
+ cmd->frame_phys = mhba->ib_frame_phys
+ + i * mhba->ib_max_size;
+ } else
+ cmd->frame = kzalloc(mhba->ib_max_size, GFP_KERNEL);
+ if (!cmd->frame)
+ goto err_exit;
+ }
+ return 0;
+
+err_exit:
+ dev_err(&mhba->pdev->dev,
+ "failed to allocate memory for cmd[0x%x].\n", i);
+ while (!list_empty(&mhba->cmd_pool)) {
+ cmd = list_first_entry(&mhba->cmd_pool, struct mvumi_cmd,
+ queue_pointer);
+ list_del(&cmd->queue_pointer);
+ if (!(mhba->hba_capability & HS_CAPABILITY_SUPPORT_DYN_SRC))
+ kfree(cmd->frame);
+ kfree(cmd);
+ }
+ return -ENOMEM;
+}
+
+static unsigned int mvumi_check_ib_list_9143(struct mvumi_hba *mhba)
+{
+ unsigned int ib_rp_reg;
+ struct mvumi_hw_regs *regs = mhba->regs;
+
+ ib_rp_reg = ioread32(mhba->regs->inb_read_pointer);
+
+ if (unlikely(((ib_rp_reg & regs->cl_slot_num_mask) ==
+ (mhba->ib_cur_slot & regs->cl_slot_num_mask)) &&
+ ((ib_rp_reg & regs->cl_pointer_toggle)
+ != (mhba->ib_cur_slot & regs->cl_pointer_toggle)))) {
+ dev_warn(&mhba->pdev->dev, "no free slot to use.\n");
+ return 0;
+ }
+ if (atomic_read(&mhba->fw_outstanding) >= mhba->max_io) {
+ dev_warn(&mhba->pdev->dev, "firmware io overflow.\n");
+ return 0;
+ } else {
+ return mhba->max_io - atomic_read(&mhba->fw_outstanding);
+ }
+}
+
+static unsigned int mvumi_check_ib_list_9580(struct mvumi_hba *mhba)
+{
+ unsigned int count;
+ if (atomic_read(&mhba->fw_outstanding) >= (mhba->max_io - 1))
+ return 0;
+ count = ioread32(mhba->ib_shadow);
+ if (count == 0xffff)
+ return 0;
+ return count;
+}
+
+static void mvumi_get_ib_list_entry(struct mvumi_hba *mhba, void **ib_entry)
+{
+ unsigned int cur_ib_entry;
+
+ cur_ib_entry = mhba->ib_cur_slot & mhba->regs->cl_slot_num_mask;
+ cur_ib_entry++;
+ if (cur_ib_entry >= mhba->list_num_io) {
+ cur_ib_entry -= mhba->list_num_io;
+ mhba->ib_cur_slot ^= mhba->regs->cl_pointer_toggle;
+ }
+ mhba->ib_cur_slot &= ~mhba->regs->cl_slot_num_mask;
+ mhba->ib_cur_slot |= (cur_ib_entry & mhba->regs->cl_slot_num_mask);
+ if (mhba->hba_capability & HS_CAPABILITY_SUPPORT_DYN_SRC) {
+ *ib_entry = mhba->ib_list + cur_ib_entry *
+ sizeof(struct mvumi_dyn_list_entry);
+ } else {
+ *ib_entry = mhba->ib_list + cur_ib_entry * mhba->ib_max_size;
+ }
+ atomic_inc(&mhba->fw_outstanding);
+}
+
+static void mvumi_send_ib_list_entry(struct mvumi_hba *mhba)
+{
+ iowrite32(0xffff, mhba->ib_shadow);
+ iowrite32(mhba->ib_cur_slot, mhba->regs->inb_write_pointer);
+}
+
+static char mvumi_check_ob_frame(struct mvumi_hba *mhba,
+ unsigned int cur_obf, struct mvumi_rsp_frame *p_outb_frame)
+{
+ unsigned short tag, request_id;
+
+ udelay(1);
+ p_outb_frame = mhba->ob_list + cur_obf * mhba->ob_max_size;
+ request_id = p_outb_frame->request_id;
+ tag = p_outb_frame->tag;
+ if (tag > mhba->tag_pool.size) {
+ dev_err(&mhba->pdev->dev, "ob frame data error\n");
+ return -1;
+ }
+ if (mhba->tag_cmd[tag] == NULL) {
+ dev_err(&mhba->pdev->dev, "tag[0x%x] with NO command\n", tag);
+ return -1;
+ } else if (mhba->tag_cmd[tag]->request_id != request_id &&
+ mhba->request_id_enabled) {
+ dev_err(&mhba->pdev->dev, "request ID from FW:0x%x,"
+ "cmd request ID:0x%x\n", request_id,
+ mhba->tag_cmd[tag]->request_id);
+ return -1;
+ }
+
+ return 0;
+}
+
+static int mvumi_check_ob_list_9143(struct mvumi_hba *mhba,
+ unsigned int *cur_obf, unsigned int *assign_obf_end)
+{
+ unsigned int ob_write, ob_write_shadow;
+ struct mvumi_hw_regs *regs = mhba->regs;
+
+ do {
+ ob_write = ioread32(regs->outb_copy_pointer);
+ ob_write_shadow = ioread32(mhba->ob_shadow);
+ } while ((ob_write & regs->cl_slot_num_mask) != ob_write_shadow);
+
+ *cur_obf = mhba->ob_cur_slot & mhba->regs->cl_slot_num_mask;
+ *assign_obf_end = ob_write & mhba->regs->cl_slot_num_mask;
+
+ if ((ob_write & regs->cl_pointer_toggle) !=
+ (mhba->ob_cur_slot & regs->cl_pointer_toggle)) {
+ *assign_obf_end += mhba->list_num_io;
+ }
+ return 0;
+}
+
+static int mvumi_check_ob_list_9580(struct mvumi_hba *mhba,
+ unsigned int *cur_obf, unsigned int *assign_obf_end)
+{
+ unsigned int ob_write;
+ struct mvumi_hw_regs *regs = mhba->regs;
+
+ ob_write = ioread32(regs->outb_read_pointer);
+ ob_write = ioread32(regs->outb_copy_pointer);
+ *cur_obf = mhba->ob_cur_slot & mhba->regs->cl_slot_num_mask;
+ *assign_obf_end = ob_write & mhba->regs->cl_slot_num_mask;
+ if (*assign_obf_end < *cur_obf)
+ *assign_obf_end += mhba->list_num_io;
+ else if (*assign_obf_end == *cur_obf)
+ return -1;
+ return 0;
+}
+
+static void mvumi_receive_ob_list_entry(struct mvumi_hba *mhba)
+{
+ unsigned int cur_obf, assign_obf_end, i;
+ struct mvumi_ob_data *ob_data;
+ struct mvumi_rsp_frame *p_outb_frame;
+ struct mvumi_hw_regs *regs = mhba->regs;
+
+ if (mhba->instancet->check_ob_list(mhba, &cur_obf, &assign_obf_end))
+ return;
+
+ for (i = (assign_obf_end - cur_obf); i != 0; i--) {
+ cur_obf++;
+ if (cur_obf >= mhba->list_num_io) {
+ cur_obf -= mhba->list_num_io;
+ mhba->ob_cur_slot ^= regs->cl_pointer_toggle;
+ }
+
+ p_outb_frame = mhba->ob_list + cur_obf * mhba->ob_max_size;
+
+ /* Copy pointer may point to entry in outbound list
+ * before entry has valid data
+ */
+ if (unlikely(p_outb_frame->tag > mhba->tag_pool.size ||
+ mhba->tag_cmd[p_outb_frame->tag] == NULL ||
+ p_outb_frame->request_id !=
+ mhba->tag_cmd[p_outb_frame->tag]->request_id))
+ if (mvumi_check_ob_frame(mhba, cur_obf, p_outb_frame))
+ continue;
+
+ if (!list_empty(&mhba->ob_data_list)) {
+ ob_data = (struct mvumi_ob_data *)
+ list_first_entry(&mhba->ob_data_list,
+ struct mvumi_ob_data, list);
+ list_del_init(&ob_data->list);
+ } else {
+ ob_data = NULL;
+ if (cur_obf == 0) {
+ cur_obf = mhba->list_num_io - 1;
+ mhba->ob_cur_slot ^= regs->cl_pointer_toggle;
+ } else
+ cur_obf -= 1;
+ break;
+ }
+
+ memcpy(ob_data->data, p_outb_frame, mhba->ob_max_size);
+ p_outb_frame->tag = 0xff;
+
+ list_add_tail(&ob_data->list, &mhba->free_ob_list);
+ }
+ mhba->ob_cur_slot &= ~regs->cl_slot_num_mask;
+ mhba->ob_cur_slot |= (cur_obf & regs->cl_slot_num_mask);
+ iowrite32(mhba->ob_cur_slot, regs->outb_read_pointer);
+}
+
+static void mvumi_reset(struct mvumi_hba *mhba)
+{
+ struct mvumi_hw_regs *regs = mhba->regs;
+
+ iowrite32(0, regs->enpointa_mask_reg);
+ if (ioread32(regs->arm_to_pciea_msg1) != HANDSHAKE_DONESTATE)
+ return;
+
+ iowrite32(DRBL_SOFT_RESET, regs->pciea_to_arm_drbl_reg);
+}
+
+static unsigned char mvumi_start(struct mvumi_hba *mhba);
+
+static int mvumi_wait_for_outstanding(struct mvumi_hba *mhba)
+{
+ mhba->fw_state = FW_STATE_ABORT;
+ mvumi_reset(mhba);
+
+ if (mvumi_start(mhba))
+ return FAILED;
+ else
+ return SUCCESS;
+}
+
+static int mvumi_wait_for_fw(struct mvumi_hba *mhba)
+{
+ struct mvumi_hw_regs *regs = mhba->regs;
+ u32 tmp;
+ unsigned long before;
+ before = jiffies;
+
+ iowrite32(0, regs->enpointa_mask_reg);
+ tmp = ioread32(regs->arm_to_pciea_msg1);
+ while (tmp != HANDSHAKE_READYSTATE) {
+ iowrite32(DRBL_MU_RESET, regs->pciea_to_arm_drbl_reg);
+ if (time_after(jiffies, before + FW_MAX_DELAY * HZ)) {
+ dev_err(&mhba->pdev->dev,
+ "FW reset failed [0x%x].\n", tmp);
+ return FAILED;
+ }
+
+ msleep(500);
+ rmb();
+ tmp = ioread32(regs->arm_to_pciea_msg1);
+ }
+
+ return SUCCESS;
+}
+
+static void mvumi_backup_bar_addr(struct mvumi_hba *mhba)
+{
+ unsigned char i;
+
+ for (i = 0; i < MAX_BASE_ADDRESS; i++) {
+ pci_read_config_dword(mhba->pdev, 0x10 + i * 4,
+ &mhba->pci_base[i]);
+ }
+}
+
+static void mvumi_restore_bar_addr(struct mvumi_hba *mhba)
+{
+ unsigned char i;
+
+ for (i = 0; i < MAX_BASE_ADDRESS; i++) {
+ if (mhba->pci_base[i])
+ pci_write_config_dword(mhba->pdev, 0x10 + i * 4,
+ mhba->pci_base[i]);
+ }
+}
+
+static unsigned int mvumi_pci_set_master(struct pci_dev *pdev)
+{
+ unsigned int ret = 0;
+ pci_set_master(pdev);
+
+ if (IS_DMA64) {
+ if (pci_set_dma_mask(pdev, DMA_BIT_MASK(64)))
+ ret = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
+ } else
+ ret = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
+
+ return ret;
+}
+
+static int mvumi_reset_host_9580(struct mvumi_hba *mhba)
+{
+ mhba->fw_state = FW_STATE_ABORT;
+
+ iowrite32(0, mhba->regs->reset_enable);
+ iowrite32(0xf, mhba->regs->reset_request);
+
+ iowrite32(0x10, mhba->regs->reset_enable);
+ iowrite32(0x10, mhba->regs->reset_request);
+ msleep(100);
+ pci_disable_device(mhba->pdev);
+
+ if (pci_enable_device(mhba->pdev)) {
+ dev_err(&mhba->pdev->dev, "enable device failed\n");
+ return FAILED;
+ }
+ if (mvumi_pci_set_master(mhba->pdev)) {
+ dev_err(&mhba->pdev->dev, "set master failed\n");
+ return FAILED;
+ }
+ mvumi_restore_bar_addr(mhba);
+ if (mvumi_wait_for_fw(mhba) == FAILED)
+ return FAILED;
+
+ return mvumi_wait_for_outstanding(mhba);
+}
+
+static int mvumi_reset_host_9143(struct mvumi_hba *mhba)
+{
+ return mvumi_wait_for_outstanding(mhba);
+}
+
+static int mvumi_host_reset(struct scsi_cmnd *scmd)
+{
+ struct mvumi_hba *mhba;
+
+ mhba = (struct mvumi_hba *) scmd->device->host->hostdata;
+
+ scmd_printk(KERN_NOTICE, scmd, "RESET -%ld cmd=%x retries=%x\n",
+ scmd->serial_number, scmd->cmnd[0], scmd->retries);
+
+ return mhba->instancet->reset_host(mhba);
+}
+
+static int mvumi_issue_blocked_cmd(struct mvumi_hba *mhba,
+ struct mvumi_cmd *cmd)
+{
+ unsigned long flags;
+
+ cmd->cmd_status = REQ_STATUS_PENDING;
+
+ if (atomic_read(&cmd->sync_cmd)) {
+ dev_err(&mhba->pdev->dev,
+ "last blocked cmd not finished, sync_cmd = %d\n",
+ atomic_read(&cmd->sync_cmd));
+ BUG_ON(1);
+ return -1;
+ }
+ atomic_inc(&cmd->sync_cmd);
+ spin_lock_irqsave(mhba->shost->host_lock, flags);
+ mhba->instancet->fire_cmd(mhba, cmd);
+ spin_unlock_irqrestore(mhba->shost->host_lock, flags);
+
+ wait_event_timeout(mhba->int_cmd_wait_q,
+ (cmd->cmd_status != REQ_STATUS_PENDING),
+ MVUMI_INTERNAL_CMD_WAIT_TIME * HZ);
+
+ /* command timeout */
+ if (atomic_read(&cmd->sync_cmd)) {
+ spin_lock_irqsave(mhba->shost->host_lock, flags);
+ atomic_dec(&cmd->sync_cmd);
+ if (mhba->tag_cmd[cmd->frame->tag]) {
+ mhba->tag_cmd[cmd->frame->tag] = 0;
+ dev_warn(&mhba->pdev->dev, "TIMEOUT:release tag [%d]\n",
+ cmd->frame->tag);
+ tag_release_one(mhba, &mhba->tag_pool, cmd->frame->tag);
+ }
+ if (!list_empty(&cmd->queue_pointer)) {
+ dev_warn(&mhba->pdev->dev,
+ "TIMEOUT:A internal command doesn't send!\n");
+ list_del_init(&cmd->queue_pointer);
+ } else
+ atomic_dec(&mhba->fw_outstanding);
+
+ spin_unlock_irqrestore(mhba->shost->host_lock, flags);
+ }
+ return 0;
+}
+
+static void mvumi_release_fw(struct mvumi_hba *mhba)
+{
+ mvumi_free_cmds(mhba);
+ mvumi_release_mem_resource(mhba);
+ mvumi_unmap_pci_addr(mhba->pdev, mhba->base_addr);
+ pci_free_consistent(mhba->pdev, HSP_MAX_SIZE,
+ mhba->handshake_page, mhba->handshake_page_phys);
+ kfree(mhba->regs);
+ pci_release_regions(mhba->pdev);
+}
+
+static unsigned char mvumi_flush_cache(struct mvumi_hba *mhba)
+{
+ struct mvumi_cmd *cmd;
+ struct mvumi_msg_frame *frame;
+ unsigned char device_id, retry = 0;
+ unsigned char bitcount = sizeof(unsigned char) * 8;
+
+ for (device_id = 0; device_id < mhba->max_target_id; device_id++) {
+ if (!(mhba->target_map[device_id / bitcount] &
+ (1 << (device_id % bitcount))))
+ continue;
+get_cmd: cmd = mvumi_create_internal_cmd(mhba, 0);
+ if (!cmd) {
+ if (retry++ >= 5) {
+ dev_err(&mhba->pdev->dev, "failed to get memory"
+ " for internal flush cache cmd for "
+ "device %d", device_id);
+ retry = 0;
+ continue;
+ } else
+ goto get_cmd;
+ }
+ cmd->scmd = NULL;
+ cmd->cmd_status = REQ_STATUS_PENDING;
+ atomic_set(&cmd->sync_cmd, 0);
+ frame = cmd->frame;
+ frame->req_function = CL_FUN_SCSI_CMD;
+ frame->device_id = device_id;
+ frame->cmd_flag = CMD_FLAG_NON_DATA;
+ frame->data_transfer_length = 0;
+ frame->cdb_length = MAX_COMMAND_SIZE;
+ memset(frame->cdb, 0, MAX_COMMAND_SIZE);
+ frame->cdb[0] = SCSI_CMD_MARVELL_SPECIFIC;
+ frame->cdb[1] = CDB_CORE_MODULE;
+ frame->cdb[2] = CDB_CORE_SHUTDOWN;
+
+ mvumi_issue_blocked_cmd(mhba, cmd);
+ if (cmd->cmd_status != SAM_STAT_GOOD) {
+ dev_err(&mhba->pdev->dev,
+ "device %d flush cache failed, status=0x%x.\n",
+ device_id, cmd->cmd_status);
+ }
+
+ mvumi_delete_internal_cmd(mhba, cmd);
+ }
+ return 0;
+}
+
+static unsigned char
+mvumi_calculate_checksum(struct mvumi_hs_header *p_header,
+ unsigned short len)
+{
+ unsigned char *ptr;
+ unsigned char ret = 0, i;
+
+ ptr = (unsigned char *) p_header->frame_content;
+ for (i = 0; i < len; i++) {
+ ret ^= *ptr;
+ ptr++;
+ }
+
+ return ret;
+}
+
+static void mvumi_hs_build_page(struct mvumi_hba *mhba,
+ struct mvumi_hs_header *hs_header)
+{
+ struct mvumi_hs_page2 *hs_page2;
+ struct mvumi_hs_page4 *hs_page4;
+ struct mvumi_hs_page3 *hs_page3;
+ struct timeval time;
+ unsigned int local_time;
+
+ switch (hs_header->page_code) {
+ case HS_PAGE_HOST_INFO:
+ hs_page2 = (struct mvumi_hs_page2 *) hs_header;
+ hs_header->frame_length = sizeof(*hs_page2) - 4;
+ memset(hs_header->frame_content, 0, hs_header->frame_length);
+ hs_page2->host_type = 3; /* 3 mean linux*/
+ if (mhba->hba_capability & HS_CAPABILITY_SUPPORT_DYN_SRC)
+ hs_page2->host_cap = 0x08;/* host dynamic source mode */
+ hs_page2->host_ver.ver_major = VER_MAJOR;
+ hs_page2->host_ver.ver_minor = VER_MINOR;
+ hs_page2->host_ver.ver_oem = VER_OEM;
+ hs_page2->host_ver.ver_build = VER_BUILD;
+ hs_page2->system_io_bus = 0;
+ hs_page2->slot_number = 0;
+ hs_page2->intr_level = 0;
+ hs_page2->intr_vector = 0;
+ do_gettimeofday(&time);
+ local_time = (unsigned int) (time.tv_sec -
+ (sys_tz.tz_minuteswest * 60));
+ hs_page2->seconds_since1970 = local_time;
+ hs_header->checksum = mvumi_calculate_checksum(hs_header,
+ hs_header->frame_length);
+ break;
+
+ case HS_PAGE_FIRM_CTL:
+ hs_page3 = (struct mvumi_hs_page3 *) hs_header;
+ hs_header->frame_length = sizeof(*hs_page3) - 4;
+ memset(hs_header->frame_content, 0, hs_header->frame_length);
+ hs_header->checksum = mvumi_calculate_checksum(hs_header,
+ hs_header->frame_length);
+ break;
+
+ case HS_PAGE_CL_INFO:
+ hs_page4 = (struct mvumi_hs_page4 *) hs_header;
+ hs_header->frame_length = sizeof(*hs_page4) - 4;
+ memset(hs_header->frame_content, 0, hs_header->frame_length);
+ hs_page4->ib_baseaddr_l = lower_32_bits(mhba->ib_list_phys);
+ hs_page4->ib_baseaddr_h = upper_32_bits(mhba->ib_list_phys);
+
+ hs_page4->ob_baseaddr_l = lower_32_bits(mhba->ob_list_phys);
+ hs_page4->ob_baseaddr_h = upper_32_bits(mhba->ob_list_phys);
+ hs_page4->ib_entry_size = mhba->ib_max_size_setting;
+ hs_page4->ob_entry_size = mhba->ob_max_size_setting;
+ if (mhba->hba_capability
+ & HS_CAPABILITY_NEW_PAGE_IO_DEPTH_DEF) {
+ hs_page4->ob_depth = find_first_bit((unsigned long *)
+ &mhba->list_num_io,
+ BITS_PER_LONG);
+ hs_page4->ib_depth = find_first_bit((unsigned long *)
+ &mhba->list_num_io,
+ BITS_PER_LONG);
+ } else {
+ hs_page4->ob_depth = (u8) mhba->list_num_io;
+ hs_page4->ib_depth = (u8) mhba->list_num_io;
+ }
+ hs_header->checksum = mvumi_calculate_checksum(hs_header,
+ hs_header->frame_length);
+ break;
+
+ default:
+ dev_err(&mhba->pdev->dev, "cannot build page, code[0x%x]\n",
+ hs_header->page_code);
+ break;
+ }
+}
+
+/**
+ * mvumi_init_data - Initialize requested date for FW
+ * @mhba: Adapter soft state
+ */
+static int mvumi_init_data(struct mvumi_hba *mhba)
+{
+ struct mvumi_ob_data *ob_pool;
+ struct mvumi_res *res_mgnt;
+ unsigned int tmp_size, offset, i;
+ void *virmem, *v;
+ dma_addr_t p;
+
+ if (mhba->fw_flag & MVUMI_FW_ALLOC)
+ return 0;
+
+ tmp_size = mhba->ib_max_size * mhba->max_io;
+ if (mhba->hba_capability & HS_CAPABILITY_SUPPORT_DYN_SRC)
+ tmp_size += sizeof(struct mvumi_dyn_list_entry) * mhba->max_io;
+
+ tmp_size += 128 + mhba->ob_max_size * mhba->max_io;
+ tmp_size += 8 + sizeof(u32)*2 + 16;
+
+ res_mgnt = mvumi_alloc_mem_resource(mhba,
+ RESOURCE_UNCACHED_MEMORY, tmp_size);
+ if (!res_mgnt) {
+ dev_err(&mhba->pdev->dev,
+ "failed to allocate memory for inbound list\n");
+ goto fail_alloc_dma_buf;
+ }
+
+ p = res_mgnt->bus_addr;
+ v = res_mgnt->virt_addr;
+ /* ib_list */
+ offset = round_up(p, 128) - p;
+ p += offset;
+ v += offset;
+ mhba->ib_list = v;
+ mhba->ib_list_phys = p;
+ if (mhba->hba_capability & HS_CAPABILITY_SUPPORT_DYN_SRC) {
+ v += sizeof(struct mvumi_dyn_list_entry) * mhba->max_io;
+ p += sizeof(struct mvumi_dyn_list_entry) * mhba->max_io;
+ mhba->ib_frame = v;
+ mhba->ib_frame_phys = p;
+ }
+ v += mhba->ib_max_size * mhba->max_io;
+ p += mhba->ib_max_size * mhba->max_io;
+
+ /* ib shadow */
+ offset = round_up(p, 8) - p;
+ p += offset;
+ v += offset;
+ mhba->ib_shadow = v;
+ mhba->ib_shadow_phys = p;
+ p += sizeof(u32)*2;
+ v += sizeof(u32)*2;
+ /* ob shadow */
+ if (mhba->pdev->device == PCI_DEVICE_ID_MARVELL_MV9580) {
+ offset = round_up(p, 8) - p;
+ p += offset;
+ v += offset;
+ mhba->ob_shadow = v;
+ mhba->ob_shadow_phys = p;
+ p += 8;
+ v += 8;
+ } else {
+ offset = round_up(p, 4) - p;
+ p += offset;
+ v += offset;
+ mhba->ob_shadow = v;
+ mhba->ob_shadow_phys = p;
+ p += 4;
+ v += 4;
+ }
+
+ /* ob list */
+ offset = round_up(p, 128) - p;
+ p += offset;
+ v += offset;
+
+ mhba->ob_list = v;
+ mhba->ob_list_phys = p;
+
+ /* ob data pool */
+ tmp_size = mhba->max_io * (mhba->ob_max_size + sizeof(*ob_pool));
+ tmp_size = round_up(tmp_size, 8);
+
+ res_mgnt = mvumi_alloc_mem_resource(mhba,
+ RESOURCE_CACHED_MEMORY, tmp_size);
+ if (!res_mgnt) {
+ dev_err(&mhba->pdev->dev,
+ "failed to allocate memory for outbound data buffer\n");
+ goto fail_alloc_dma_buf;
+ }
+ virmem = res_mgnt->virt_addr;
+
+ for (i = mhba->max_io; i != 0; i--) {
+ ob_pool = (struct mvumi_ob_data *) virmem;
+ list_add_tail(&ob_pool->list, &mhba->ob_data_list);
+ virmem += mhba->ob_max_size + sizeof(*ob_pool);
+ }
+
+ tmp_size = sizeof(unsigned short) * mhba->max_io +
+ sizeof(struct mvumi_cmd *) * mhba->max_io;
+ tmp_size += round_up(mhba->max_target_id, sizeof(unsigned char) * 8) /
+ (sizeof(unsigned char) * 8);
+
+ res_mgnt = mvumi_alloc_mem_resource(mhba,
+ RESOURCE_CACHED_MEMORY, tmp_size);
+ if (!res_mgnt) {
+ dev_err(&mhba->pdev->dev,
+ "failed to allocate memory for tag and target map\n");
+ goto fail_alloc_dma_buf;
+ }
+
+ virmem = res_mgnt->virt_addr;
+ mhba->tag_pool.stack = virmem;
+ mhba->tag_pool.size = mhba->max_io;
+ tag_init(&mhba->tag_pool, mhba->max_io);
+ virmem += sizeof(unsigned short) * mhba->max_io;
+
+ mhba->tag_cmd = virmem;
+ virmem += sizeof(struct mvumi_cmd *) * mhba->max_io;
+
+ mhba->target_map = virmem;
+
+ mhba->fw_flag |= MVUMI_FW_ALLOC;
+ return 0;
+
+fail_alloc_dma_buf:
+ mvumi_release_mem_resource(mhba);
+ return -1;
+}
+
+static int mvumi_hs_process_page(struct mvumi_hba *mhba,
+ struct mvumi_hs_header *hs_header)
+{
+ struct mvumi_hs_page1 *hs_page1;
+ unsigned char page_checksum;
+
+ page_checksum = mvumi_calculate_checksum(hs_header,
+ hs_header->frame_length);
+ if (page_checksum != hs_header->checksum) {
+ dev_err(&mhba->pdev->dev, "checksum error\n");
+ return -1;
+ }
+
+ switch (hs_header->page_code) {
+ case HS_PAGE_FIRM_CAP:
+ hs_page1 = (struct mvumi_hs_page1 *) hs_header;
+
+ mhba->max_io = hs_page1->max_io_support;
+ mhba->list_num_io = hs_page1->cl_inout_list_depth;
+ mhba->max_transfer_size = hs_page1->max_transfer_size;
+ mhba->max_target_id = hs_page1->max_devices_support;
+ mhba->hba_capability = hs_page1->capability;
+ mhba->ib_max_size_setting = hs_page1->cl_in_max_entry_size;
+ mhba->ib_max_size = (1 << hs_page1->cl_in_max_entry_size) << 2;
+
+ mhba->ob_max_size_setting = hs_page1->cl_out_max_entry_size;
+ mhba->ob_max_size = (1 << hs_page1->cl_out_max_entry_size) << 2;
+
+ dev_dbg(&mhba->pdev->dev, "FW version:%d\n",
+ hs_page1->fw_ver.ver_build);
+
+ if (mhba->hba_capability & HS_CAPABILITY_SUPPORT_COMPACT_SG)
+ mhba->eot_flag = 22;
+ else
+ mhba->eot_flag = 27;
+ if (mhba->hba_capability & HS_CAPABILITY_NEW_PAGE_IO_DEPTH_DEF)
+ mhba->list_num_io = 1 << hs_page1->cl_inout_list_depth;
+ break;
+ default:
+ dev_err(&mhba->pdev->dev, "handshake: page code error\n");
+ return -1;
+ }
+ return 0;
+}
+
+/**
+ * mvumi_handshake - Move the FW to READY state
+ * @mhba: Adapter soft state
+ *
+ * During the initialization, FW passes can potentially be in any one of
+ * several possible states. If the FW in operational, waiting-for-handshake
+ * states, driver must take steps to bring it to ready state. Otherwise, it
+ * has to wait for the ready state.
+ */
+static int mvumi_handshake(struct mvumi_hba *mhba)
+{
+ unsigned int hs_state, tmp, hs_fun;
+ struct mvumi_hs_header *hs_header;
+ struct mvumi_hw_regs *regs = mhba->regs;
+
+ if (mhba->fw_state == FW_STATE_STARTING)
+ hs_state = HS_S_START;
+ else {
+ tmp = ioread32(regs->arm_to_pciea_msg0);
+ hs_state = HS_GET_STATE(tmp);
+ dev_dbg(&mhba->pdev->dev, "handshake state[0x%x].\n", hs_state);
+ if (HS_GET_STATUS(tmp) != HS_STATUS_OK) {
+ mhba->fw_state = FW_STATE_STARTING;
+ return -1;
+ }
+ }
+
+ hs_fun = 0;
+ switch (hs_state) {
+ case HS_S_START:
+ mhba->fw_state = FW_STATE_HANDSHAKING;
+ HS_SET_STATUS(hs_fun, HS_STATUS_OK);
+ HS_SET_STATE(hs_fun, HS_S_RESET);
+ iowrite32(HANDSHAKE_SIGNATURE, regs->pciea_to_arm_msg1);
+ iowrite32(hs_fun, regs->pciea_to_arm_msg0);
+ iowrite32(DRBL_HANDSHAKE, regs->pciea_to_arm_drbl_reg);
+ break;
+
+ case HS_S_RESET:
+ iowrite32(lower_32_bits(mhba->handshake_page_phys),
+ regs->pciea_to_arm_msg1);
+ iowrite32(upper_32_bits(mhba->handshake_page_phys),
+ regs->arm_to_pciea_msg1);
+ HS_SET_STATUS(hs_fun, HS_STATUS_OK);
+ HS_SET_STATE(hs_fun, HS_S_PAGE_ADDR);
+ iowrite32(hs_fun, regs->pciea_to_arm_msg0);
+ iowrite32(DRBL_HANDSHAKE, regs->pciea_to_arm_drbl_reg);
+ break;
+
+ case HS_S_PAGE_ADDR:
+ case HS_S_QUERY_PAGE:
+ case HS_S_SEND_PAGE:
+ hs_header = (struct mvumi_hs_header *) mhba->handshake_page;
+ if (hs_header->page_code == HS_PAGE_FIRM_CAP) {
+ mhba->hba_total_pages =
+ ((struct mvumi_hs_page1 *) hs_header)->total_pages;
+
+ if (mhba->hba_total_pages == 0)
+ mhba->hba_total_pages = HS_PAGE_TOTAL-1;
+ }
+
+ if (hs_state == HS_S_QUERY_PAGE) {
+ if (mvumi_hs_process_page(mhba, hs_header)) {
+ HS_SET_STATE(hs_fun, HS_S_ABORT);
+ return -1;
+ }
+ if (mvumi_init_data(mhba)) {
+ HS_SET_STATE(hs_fun, HS_S_ABORT);
+ return -1;
+ }
+ } else if (hs_state == HS_S_PAGE_ADDR) {
+ hs_header->page_code = 0;
+ mhba->hba_total_pages = HS_PAGE_TOTAL-1;
+ }
+
+ if ((hs_header->page_code + 1) <= mhba->hba_total_pages) {
+ hs_header->page_code++;
+ if (hs_header->page_code != HS_PAGE_FIRM_CAP) {
+ mvumi_hs_build_page(mhba, hs_header);
+ HS_SET_STATE(hs_fun, HS_S_SEND_PAGE);
+ } else
+ HS_SET_STATE(hs_fun, HS_S_QUERY_PAGE);
+ } else
+ HS_SET_STATE(hs_fun, HS_S_END);
+
+ HS_SET_STATUS(hs_fun, HS_STATUS_OK);
+ iowrite32(hs_fun, regs->pciea_to_arm_msg0);
+ iowrite32(DRBL_HANDSHAKE, regs->pciea_to_arm_drbl_reg);
+ break;
+
+ case HS_S_END:
+ /* Set communication list ISR */
+ tmp = ioread32(regs->enpointa_mask_reg);
+ tmp |= regs->int_comaout | regs->int_comaerr;
+ iowrite32(tmp, regs->enpointa_mask_reg);
+ iowrite32(mhba->list_num_io, mhba->ib_shadow);
+ /* Set InBound List Available count shadow */
+ iowrite32(lower_32_bits(mhba->ib_shadow_phys),
+ regs->inb_aval_count_basel);
+ iowrite32(upper_32_bits(mhba->ib_shadow_phys),
+ regs->inb_aval_count_baseh);
+
+ if (mhba->pdev->device == PCI_DEVICE_ID_MARVELL_MV9143) {
+ /* Set OutBound List Available count shadow */
+ iowrite32((mhba->list_num_io-1) |
+ regs->cl_pointer_toggle,
+ mhba->ob_shadow);
+ iowrite32(lower_32_bits(mhba->ob_shadow_phys),
+ regs->outb_copy_basel);
+ iowrite32(upper_32_bits(mhba->ob_shadow_phys),
+ regs->outb_copy_baseh);
+ }
+
+ mhba->ib_cur_slot = (mhba->list_num_io - 1) |
+ regs->cl_pointer_toggle;
+ mhba->ob_cur_slot = (mhba->list_num_io - 1) |
+ regs->cl_pointer_toggle;
+ mhba->fw_state = FW_STATE_STARTED;
+
+ break;
+ default:
+ dev_err(&mhba->pdev->dev, "unknown handshake state [0x%x].\n",
+ hs_state);
+ return -1;
+ }
+ return 0;
+}
+
+static unsigned char mvumi_handshake_event(struct mvumi_hba *mhba)
+{
+ unsigned int isr_status;
+ unsigned long before;
+
+ before = jiffies;
+ mvumi_handshake(mhba);
+ do {
+ isr_status = mhba->instancet->read_fw_status_reg(mhba);
+
+ if (mhba->fw_state == FW_STATE_STARTED)
+ return 0;
+ if (time_after(jiffies, before + FW_MAX_DELAY * HZ)) {
+ dev_err(&mhba->pdev->dev,
+ "no handshake response at state 0x%x.\n",
+ mhba->fw_state);
+ dev_err(&mhba->pdev->dev,
+ "isr : global=0x%x,status=0x%x.\n",
+ mhba->global_isr, isr_status);
+ return -1;
+ }
+ rmb();
+ usleep_range(1000, 2000);
+ } while (!(isr_status & DRBL_HANDSHAKE_ISR));
+
+ return 0;
+}
+
+static unsigned char mvumi_check_handshake(struct mvumi_hba *mhba)
+{
+ unsigned int tmp;
+ unsigned long before;
+
+ before = jiffies;
+ tmp = ioread32(mhba->regs->arm_to_pciea_msg1);
+ while ((tmp != HANDSHAKE_READYSTATE) && (tmp != HANDSHAKE_DONESTATE)) {
+ if (tmp != HANDSHAKE_READYSTATE)
+ iowrite32(DRBL_MU_RESET,
+ mhba->regs->pciea_to_arm_drbl_reg);
+ if (time_after(jiffies, before + FW_MAX_DELAY * HZ)) {
+ dev_err(&mhba->pdev->dev,
+ "invalid signature [0x%x].\n", tmp);
+ return -1;
+ }
+ usleep_range(1000, 2000);
+ rmb();
+ tmp = ioread32(mhba->regs->arm_to_pciea_msg1);
+ }
+
+ mhba->fw_state = FW_STATE_STARTING;
+ dev_dbg(&mhba->pdev->dev, "start firmware handshake...\n");
+ do {
+ if (mvumi_handshake_event(mhba)) {
+ dev_err(&mhba->pdev->dev,
+ "handshake failed at state 0x%x.\n",
+ mhba->fw_state);
+ return -1;
+ }
+ } while (mhba->fw_state != FW_STATE_STARTED);
+
+ dev_dbg(&mhba->pdev->dev, "firmware handshake done\n");
+
+ return 0;
+}
+
+static unsigned char mvumi_start(struct mvumi_hba *mhba)
+{
+ unsigned int tmp;
+ struct mvumi_hw_regs *regs = mhba->regs;
+
+ /* clear Door bell */
+ tmp = ioread32(regs->arm_to_pciea_drbl_reg);
+ iowrite32(tmp, regs->arm_to_pciea_drbl_reg);
+
+ iowrite32(regs->int_drbl_int_mask, regs->arm_to_pciea_mask_reg);
+ tmp = ioread32(regs->enpointa_mask_reg) | regs->int_dl_cpu2pciea;
+ iowrite32(tmp, regs->enpointa_mask_reg);
+ msleep(100);
+ if (mvumi_check_handshake(mhba))
+ return -1;
+
+ return 0;
+}
+
+/**
+ * mvumi_complete_cmd - Completes a command
+ * @mhba: Adapter soft state
+ * @cmd: Command to be completed
+ */
+static void mvumi_complete_cmd(struct mvumi_hba *mhba, struct mvumi_cmd *cmd,
+ struct mvumi_rsp_frame *ob_frame)
+{
+ struct scsi_cmnd *scmd = cmd->scmd;
+
+ cmd->scmd->SCp.ptr = NULL;
+ scmd->result = ob_frame->req_status;
+
+ switch (ob_frame->req_status) {
+ case SAM_STAT_GOOD:
+ scmd->result |= DID_OK << 16;
+ break;
+ case SAM_STAT_BUSY:
+ scmd->result |= DID_BUS_BUSY << 16;
+ break;
+ case SAM_STAT_CHECK_CONDITION:
+ scmd->result |= (DID_OK << 16);
+ if (ob_frame->rsp_flag & CL_RSP_FLAG_SENSEDATA) {
+ memcpy(cmd->scmd->sense_buffer, ob_frame->payload,
+ sizeof(struct mvumi_sense_data));
+ scmd->result |= (DRIVER_SENSE << 24);
+ }
+ break;
+ default:
+ scmd->result |= (DRIVER_INVALID << 24) | (DID_ABORT << 16);
+ break;
+ }
+
+ if (scsi_bufflen(scmd)) {
+ if (scsi_sg_count(scmd)) {
+ pci_unmap_sg(mhba->pdev,
+ scsi_sglist(scmd),
+ scsi_sg_count(scmd),
+ (int) scmd->sc_data_direction);
+ } else {
+ pci_unmap_single(mhba->pdev,
+ scmd->SCp.dma_handle,
+ scsi_bufflen(scmd),
+ (int) scmd->sc_data_direction);
+
+ scmd->SCp.dma_handle = 0;
+ }
+ }
+ cmd->scmd->scsi_done(scmd);
+ mvumi_return_cmd(mhba, cmd);
+}
+
+static void mvumi_complete_internal_cmd(struct mvumi_hba *mhba,
+ struct mvumi_cmd *cmd,
+ struct mvumi_rsp_frame *ob_frame)
+{
+ if (atomic_read(&cmd->sync_cmd)) {
+ cmd->cmd_status = ob_frame->req_status;
+
+ if ((ob_frame->req_status == SAM_STAT_CHECK_CONDITION) &&
+ (ob_frame->rsp_flag & CL_RSP_FLAG_SENSEDATA) &&
+ cmd->data_buf) {
+ memcpy(cmd->data_buf, ob_frame->payload,
+ sizeof(struct mvumi_sense_data));
+ }
+ atomic_dec(&cmd->sync_cmd);
+ wake_up(&mhba->int_cmd_wait_q);
+ }
+}
+
+static void mvumi_show_event(struct mvumi_hba *mhba,
+ struct mvumi_driver_event *ptr)
+{
+ unsigned int i;
+
+ dev_warn(&mhba->pdev->dev,
+ "Event[0x%x] id[0x%x] severity[0x%x] device id[0x%x]\n",
+ ptr->sequence_no, ptr->event_id, ptr->severity, ptr->device_id);
+ if (ptr->param_count) {
+ printk(KERN_WARNING "Event param(len 0x%x): ",
+ ptr->param_count);
+ for (i = 0; i < ptr->param_count; i++)
+ printk(KERN_WARNING "0x%x ", ptr->params[i]);
+
+ printk(KERN_WARNING "\n");
+ }
+
+ if (ptr->sense_data_length) {
+ printk(KERN_WARNING "Event sense data(len 0x%x): ",
+ ptr->sense_data_length);
+ for (i = 0; i < ptr->sense_data_length; i++)
+ printk(KERN_WARNING "0x%x ", ptr->sense_data[i]);
+ printk(KERN_WARNING "\n");
+ }
+}
+
+static int mvumi_handle_hotplug(struct mvumi_hba *mhba, u16 devid, int status)
+{
+ struct scsi_device *sdev;
+ int ret = -1;
+
+ if (status == DEVICE_OFFLINE) {
+ sdev = scsi_device_lookup(mhba->shost, 0, devid, 0);
+ if (sdev) {
+ dev_dbg(&mhba->pdev->dev, "remove disk %d-%d-%d.\n", 0,
+ sdev->id, 0);
+ scsi_remove_device(sdev);
+ scsi_device_put(sdev);
+ ret = 0;
+ } else
+ dev_err(&mhba->pdev->dev, " no disk[%d] to remove\n",
+ devid);
+ } else if (status == DEVICE_ONLINE) {
+ sdev = scsi_device_lookup(mhba->shost, 0, devid, 0);
+ if (!sdev) {
+ scsi_add_device(mhba->shost, 0, devid, 0);
+ dev_dbg(&mhba->pdev->dev, " add disk %d-%d-%d.\n", 0,
+ devid, 0);
+ ret = 0;
+ } else {
+ dev_err(&mhba->pdev->dev, " don't add disk %d-%d-%d.\n",
+ 0, devid, 0);
+ scsi_device_put(sdev);
+ }
+ }
+ return ret;
+}
+
+static u64 mvumi_inquiry(struct mvumi_hba *mhba,
+ unsigned int id, struct mvumi_cmd *cmd)
+{
+ struct mvumi_msg_frame *frame;
+ u64 wwid = 0;
+ int cmd_alloc = 0;
+ int data_buf_len = 64;
+
+ if (!cmd) {
+ cmd = mvumi_create_internal_cmd(mhba, data_buf_len);
+ if (cmd)
+ cmd_alloc = 1;
+ else
+ return 0;
+ } else {
+ memset(cmd->data_buf, 0, data_buf_len);
+ }
+ cmd->scmd = NULL;
+ cmd->cmd_status = REQ_STATUS_PENDING;
+ atomic_set(&cmd->sync_cmd, 0);
+ frame = cmd->frame;
+ frame->device_id = (u16) id;
+ frame->cmd_flag = CMD_FLAG_DATA_IN;
+ frame->req_function = CL_FUN_SCSI_CMD;
+ frame->cdb_length = 6;
+ frame->data_transfer_length = MVUMI_INQUIRY_LENGTH;
+ memset(frame->cdb, 0, frame->cdb_length);
+ frame->cdb[0] = INQUIRY;
+ frame->cdb[4] = frame->data_transfer_length;
+
+ mvumi_issue_blocked_cmd(mhba, cmd);
+
+ if (cmd->cmd_status == SAM_STAT_GOOD) {
+ if (mhba->pdev->device == PCI_DEVICE_ID_MARVELL_MV9143)
+ wwid = id + 1;
+ else
+ memcpy((void *)&wwid,
+ (cmd->data_buf + MVUMI_INQUIRY_UUID_OFF),
+ MVUMI_INQUIRY_UUID_LEN);
+ dev_dbg(&mhba->pdev->dev,
+ "inquiry device(0:%d:0) wwid(%llx)\n", id, wwid);
+ } else {
+ wwid = 0;
+ }
+ if (cmd_alloc)
+ mvumi_delete_internal_cmd(mhba, cmd);
+
+ return wwid;
+}
+
+static void mvumi_detach_devices(struct mvumi_hba *mhba)
+{
+ struct mvumi_device *mv_dev = NULL , *dev_next;
+ struct scsi_device *sdev = NULL;
+
+ mutex_lock(&mhba->device_lock);
+
+ /* detach Hard Disk */
+ list_for_each_entry_safe(mv_dev, dev_next,
+ &mhba->shost_dev_list, list) {
+ mvumi_handle_hotplug(mhba, mv_dev->id, DEVICE_OFFLINE);
+ list_del_init(&mv_dev->list);
+ dev_dbg(&mhba->pdev->dev, "release device(0:%d:0) wwid(%llx)\n",
+ mv_dev->id, mv_dev->wwid);
+ kfree(mv_dev);
+ }
+ list_for_each_entry_safe(mv_dev, dev_next, &mhba->mhba_dev_list, list) {
+ list_del_init(&mv_dev->list);
+ dev_dbg(&mhba->pdev->dev, "release device(0:%d:0) wwid(%llx)\n",
+ mv_dev->id, mv_dev->wwid);
+ kfree(mv_dev);
+ }
+
+ /* detach virtual device */
+ if (mhba->pdev->device == PCI_DEVICE_ID_MARVELL_MV9580)
+ sdev = scsi_device_lookup(mhba->shost, 0,
+ mhba->max_target_id - 1, 0);
+
+ if (sdev) {
+ scsi_remove_device(sdev);
+ scsi_device_put(sdev);
+ }
+
+ mutex_unlock(&mhba->device_lock);
+}
+
+static void mvumi_rescan_devices(struct mvumi_hba *mhba, int id)
+{
+ struct scsi_device *sdev;
+
+ sdev = scsi_device_lookup(mhba->shost, 0, id, 0);
+ if (sdev) {
+ scsi_rescan_device(&sdev->sdev_gendev);
+ scsi_device_put(sdev);
+ }
+}
+
+static int mvumi_match_devices(struct mvumi_hba *mhba, int id, u64 wwid)
+{
+ struct mvumi_device *mv_dev = NULL;
+
+ list_for_each_entry(mv_dev, &mhba->shost_dev_list, list) {
+ if (mv_dev->wwid == wwid) {
+ if (mv_dev->id != id) {
+ dev_err(&mhba->pdev->dev,
+ "%s has same wwid[%llx] ,"
+ " but different id[%d %d]\n",
+ __func__, mv_dev->wwid, mv_dev->id, id);
+ return -1;
+ } else {
+ if (mhba->pdev->device ==
+ PCI_DEVICE_ID_MARVELL_MV9143)
+ mvumi_rescan_devices(mhba, id);
+ return 1;
+ }
+ }
+ }
+ return 0;
+}
+
+static void mvumi_remove_devices(struct mvumi_hba *mhba, int id)
+{
+ struct mvumi_device *mv_dev = NULL, *dev_next;
+
+ list_for_each_entry_safe(mv_dev, dev_next,
+ &mhba->shost_dev_list, list) {
+ if (mv_dev->id == id) {
+ dev_dbg(&mhba->pdev->dev,
+ "detach device(0:%d:0) wwid(%llx) from HOST\n",
+ mv_dev->id, mv_dev->wwid);
+ mvumi_handle_hotplug(mhba, mv_dev->id, DEVICE_OFFLINE);
+ list_del_init(&mv_dev->list);
+ kfree(mv_dev);
+ }
+ }
+}
+
+static int mvumi_probe_devices(struct mvumi_hba *mhba)
+{
+ int id, maxid;
+ u64 wwid = 0;
+ struct mvumi_device *mv_dev = NULL;
+ struct mvumi_cmd *cmd = NULL;
+ int found = 0;
+
+ cmd = mvumi_create_internal_cmd(mhba, 64);
+ if (!cmd)
+ return -1;
+
+ if (mhba->pdev->device == PCI_DEVICE_ID_MARVELL_MV9143)
+ maxid = mhba->max_target_id;
+ else
+ maxid = mhba->max_target_id - 1;
+
+ for (id = 0; id < maxid; id++) {
+ wwid = mvumi_inquiry(mhba, id, cmd);
+ if (!wwid) {
+ /* device no response, remove it */
+ mvumi_remove_devices(mhba, id);
+ } else {
+ /* device response, add it */
+ found = mvumi_match_devices(mhba, id, wwid);
+ if (!found) {
+ mvumi_remove_devices(mhba, id);
+ mv_dev = kzalloc(sizeof(struct mvumi_device),
+ GFP_KERNEL);
+ if (!mv_dev) {
+ dev_err(&mhba->pdev->dev,
+ "%s alloc mv_dev failed\n",
+ __func__);
+ continue;
+ }
+ mv_dev->id = id;
+ mv_dev->wwid = wwid;
+ mv_dev->sdev = NULL;
+ INIT_LIST_HEAD(&mv_dev->list);
+ list_add_tail(&mv_dev->list,
+ &mhba->mhba_dev_list);
+ dev_dbg(&mhba->pdev->dev,
+ "probe a new device(0:%d:0)"
+ " wwid(%llx)\n", id, mv_dev->wwid);
+ } else if (found == -1)
+ return -1;
+ else
+ continue;
+ }
+ }
+
+ if (cmd)
+ mvumi_delete_internal_cmd(mhba, cmd);
+
+ return 0;
+}
+
+static int mvumi_rescan_bus(void *data)
+{
+ int ret = 0;
+ struct mvumi_hba *mhba = (struct mvumi_hba *) data;
+ struct mvumi_device *mv_dev = NULL , *dev_next;
+
+ while (!kthread_should_stop()) {
+
+ set_current_state(TASK_INTERRUPTIBLE);
+ if (!atomic_read(&mhba->pnp_count))
+ schedule();
+ msleep(1000);
+ atomic_set(&mhba->pnp_count, 0);
+ __set_current_state(TASK_RUNNING);
+
+ mutex_lock(&mhba->device_lock);
+ ret = mvumi_probe_devices(mhba);
+ if (!ret) {
+ list_for_each_entry_safe(mv_dev, dev_next,
+ &mhba->mhba_dev_list, list) {
+ if (mvumi_handle_hotplug(mhba, mv_dev->id,
+ DEVICE_ONLINE)) {
+ dev_err(&mhba->pdev->dev,
+ "%s add device(0:%d:0) failed"
+ "wwid(%llx) has exist\n",
+ __func__,
+ mv_dev->id, mv_dev->wwid);
+ list_del_init(&mv_dev->list);
+ kfree(mv_dev);
+ } else {
+ list_move_tail(&mv_dev->list,
+ &mhba->shost_dev_list);
+ }
+ }
+ }
+ mutex_unlock(&mhba->device_lock);
+ }
+ return 0;
+}
+
+static void mvumi_proc_msg(struct mvumi_hba *mhba,
+ struct mvumi_hotplug_event *param)
+{
+ u16 size = param->size;
+ const unsigned long *ar_bitmap;
+ const unsigned long *re_bitmap;
+ int index;
+
+ if (mhba->fw_flag & MVUMI_FW_ATTACH) {
+ index = -1;
+ ar_bitmap = (const unsigned long *) param->bitmap;
+ re_bitmap = (const unsigned long *) &param->bitmap[size >> 3];
+
+ mutex_lock(&mhba->sas_discovery_mutex);
+ do {
+ index = find_next_zero_bit(ar_bitmap, size, index + 1);
+ if (index >= size)
+ break;
+ mvumi_handle_hotplug(mhba, index, DEVICE_ONLINE);
+ } while (1);
+
+ index = -1;
+ do {
+ index = find_next_zero_bit(re_bitmap, size, index + 1);
+ if (index >= size)
+ break;
+ mvumi_handle_hotplug(mhba, index, DEVICE_OFFLINE);
+ } while (1);
+ mutex_unlock(&mhba->sas_discovery_mutex);
+ }
+}
+
+static void mvumi_notification(struct mvumi_hba *mhba, u8 msg, void *buffer)
+{
+ if (msg == APICDB1_EVENT_GETEVENT) {
+ int i, count;
+ struct mvumi_driver_event *param = NULL;
+ struct mvumi_event_req *er = buffer;
+ count = er->count;
+ if (count > MAX_EVENTS_RETURNED) {
+ dev_err(&mhba->pdev->dev, "event count[0x%x] is bigger"
+ " than max event count[0x%x].\n",
+ count, MAX_EVENTS_RETURNED);
+ return;
+ }
+ for (i = 0; i < count; i++) {
+ param = &er->events[i];
+ mvumi_show_event(mhba, param);
+ }
+ } else if (msg == APICDB1_HOST_GETEVENT) {
+ mvumi_proc_msg(mhba, buffer);
+ }
+}
+
+static int mvumi_get_event(struct mvumi_hba *mhba, unsigned char msg)
+{
+ struct mvumi_cmd *cmd;
+ struct mvumi_msg_frame *frame;
+
+ cmd = mvumi_create_internal_cmd(mhba, 512);
+ if (!cmd)
+ return -1;
+ cmd->scmd = NULL;
+ cmd->cmd_status = REQ_STATUS_PENDING;
+ atomic_set(&cmd->sync_cmd, 0);
+ frame = cmd->frame;
+ frame->device_id = 0;
+ frame->cmd_flag = CMD_FLAG_DATA_IN;
+ frame->req_function = CL_FUN_SCSI_CMD;
+ frame->cdb_length = MAX_COMMAND_SIZE;
+ frame->data_transfer_length = sizeof(struct mvumi_event_req);
+ memset(frame->cdb, 0, MAX_COMMAND_SIZE);
+ frame->cdb[0] = APICDB0_EVENT;
+ frame->cdb[1] = msg;
+ mvumi_issue_blocked_cmd(mhba, cmd);
+
+ if (cmd->cmd_status != SAM_STAT_GOOD)
+ dev_err(&mhba->pdev->dev, "get event failed, status=0x%x.\n",
+ cmd->cmd_status);
+ else
+ mvumi_notification(mhba, cmd->frame->cdb[1], cmd->data_buf);
+
+ mvumi_delete_internal_cmd(mhba, cmd);
+ return 0;
+}
+
+static void mvumi_scan_events(struct work_struct *work)
+{
+ struct mvumi_events_wq *mu_ev =
+ container_of(work, struct mvumi_events_wq, work_q);
+
+ mvumi_get_event(mu_ev->mhba, mu_ev->event);
+ kfree(mu_ev);
+}
+
+static void mvumi_launch_events(struct mvumi_hba *mhba, u32 isr_status)
+{
+ struct mvumi_events_wq *mu_ev;
+
+ while (isr_status & (DRBL_BUS_CHANGE | DRBL_EVENT_NOTIFY)) {
+ if (isr_status & DRBL_BUS_CHANGE) {
+ atomic_inc(&mhba->pnp_count);
+ wake_up_process(mhba->dm_thread);
+ isr_status &= ~(DRBL_BUS_CHANGE);
+ continue;
+ }
+
+ mu_ev = kzalloc(sizeof(*mu_ev), GFP_ATOMIC);
+ if (mu_ev) {
+ INIT_WORK(&mu_ev->work_q, mvumi_scan_events);
+ mu_ev->mhba = mhba;
+ mu_ev->event = APICDB1_EVENT_GETEVENT;
+ isr_status &= ~(DRBL_EVENT_NOTIFY);
+ mu_ev->param = NULL;
+ schedule_work(&mu_ev->work_q);
+ }
+ }
+}
+
+static void mvumi_handle_clob(struct mvumi_hba *mhba)
+{
+ struct mvumi_rsp_frame *ob_frame;
+ struct mvumi_cmd *cmd;
+ struct mvumi_ob_data *pool;
+
+ while (!list_empty(&mhba->free_ob_list)) {
+ pool = list_first_entry(&mhba->free_ob_list,
+ struct mvumi_ob_data, list);
+ list_del_init(&pool->list);
+ list_add_tail(&pool->list, &mhba->ob_data_list);
+
+ ob_frame = (struct mvumi_rsp_frame *) &pool->data[0];
+ cmd = mhba->tag_cmd[ob_frame->tag];
+
+ atomic_dec(&mhba->fw_outstanding);
+ mhba->tag_cmd[ob_frame->tag] = 0;
+ tag_release_one(mhba, &mhba->tag_pool, ob_frame->tag);
+ if (cmd->scmd)
+ mvumi_complete_cmd(mhba, cmd, ob_frame);
+ else
+ mvumi_complete_internal_cmd(mhba, cmd, ob_frame);
+ }
+ mhba->instancet->fire_cmd(mhba, NULL);
+}
+
+static irqreturn_t mvumi_isr_handler(int irq, void *devp)
+{
+ struct mvumi_hba *mhba = (struct mvumi_hba *) devp;
+ unsigned long flags;
+
+ spin_lock_irqsave(mhba->shost->host_lock, flags);
+ if (unlikely(mhba->instancet->clear_intr(mhba) || !mhba->global_isr)) {
+ spin_unlock_irqrestore(mhba->shost->host_lock, flags);
+ return IRQ_NONE;
+ }
+
+ if (mhba->global_isr & mhba->regs->int_dl_cpu2pciea) {
+ if (mhba->isr_status & (DRBL_BUS_CHANGE | DRBL_EVENT_NOTIFY))
+ mvumi_launch_events(mhba, mhba->isr_status);
+ if (mhba->isr_status & DRBL_HANDSHAKE_ISR) {
+ dev_warn(&mhba->pdev->dev, "enter handshake again!\n");
+ mvumi_handshake(mhba);
+ }
+
+ }
+
+ if (mhba->global_isr & mhba->regs->int_comaout)
+ mvumi_receive_ob_list_entry(mhba);
+
+ mhba->global_isr = 0;
+ mhba->isr_status = 0;
+ if (mhba->fw_state == FW_STATE_STARTED)
+ mvumi_handle_clob(mhba);
+ spin_unlock_irqrestore(mhba->shost->host_lock, flags);
+ return IRQ_HANDLED;
+}
+
+static enum mvumi_qc_result mvumi_send_command(struct mvumi_hba *mhba,
+ struct mvumi_cmd *cmd)
+{
+ void *ib_entry;
+ struct mvumi_msg_frame *ib_frame;
+ unsigned int frame_len;
+
+ ib_frame = cmd->frame;
+ if (unlikely(mhba->fw_state != FW_STATE_STARTED)) {
+ dev_dbg(&mhba->pdev->dev, "firmware not ready.\n");
+ return MV_QUEUE_COMMAND_RESULT_NO_RESOURCE;
+ }
+ if (tag_is_empty(&mhba->tag_pool)) {
+ dev_dbg(&mhba->pdev->dev, "no free tag.\n");
+ return MV_QUEUE_COMMAND_RESULT_NO_RESOURCE;
+ }
+ mvumi_get_ib_list_entry(mhba, &ib_entry);
+
+ cmd->frame->tag = tag_get_one(mhba, &mhba->tag_pool);
+ cmd->frame->request_id = mhba->io_seq++;
+ cmd->request_id = cmd->frame->request_id;
+ mhba->tag_cmd[cmd->frame->tag] = cmd;
+ frame_len = sizeof(*ib_frame) - 4 +
+ ib_frame->sg_counts * sizeof(struct mvumi_sgl);
+ if (mhba->hba_capability & HS_CAPABILITY_SUPPORT_DYN_SRC) {
+ struct mvumi_dyn_list_entry *dle;
+ dle = ib_entry;
+ dle->src_low_addr =
+ cpu_to_le32(lower_32_bits(cmd->frame_phys));
+ dle->src_high_addr =
+ cpu_to_le32(upper_32_bits(cmd->frame_phys));
+ dle->if_length = (frame_len >> 2) & 0xFFF;
+ } else {
+ memcpy(ib_entry, ib_frame, frame_len);
+ }
+ return MV_QUEUE_COMMAND_RESULT_SENT;
+}
+
+static void mvumi_fire_cmd(struct mvumi_hba *mhba, struct mvumi_cmd *cmd)
+{
+ unsigned short num_of_cl_sent = 0;
+ unsigned int count;
+ enum mvumi_qc_result result;
+
+ if (cmd)
+ list_add_tail(&cmd->queue_pointer, &mhba->waiting_req_list);
+ count = mhba->instancet->check_ib_list(mhba);
+ if (list_empty(&mhba->waiting_req_list) || !count)
+ return;
+
+ do {
+ cmd = list_first_entry(&mhba->waiting_req_list,
+ struct mvumi_cmd, queue_pointer);
+ list_del_init(&cmd->queue_pointer);
+ result = mvumi_send_command(mhba, cmd);
+ switch (result) {
+ case MV_QUEUE_COMMAND_RESULT_SENT:
+ num_of_cl_sent++;
+ break;
+ case MV_QUEUE_COMMAND_RESULT_NO_RESOURCE:
+ list_add(&cmd->queue_pointer, &mhba->waiting_req_list);
+ if (num_of_cl_sent > 0)
+ mvumi_send_ib_list_entry(mhba);
+
+ return;
+ }
+ } while (!list_empty(&mhba->waiting_req_list) && count--);
+
+ if (num_of_cl_sent > 0)
+ mvumi_send_ib_list_entry(mhba);
+}
+
+/**
+ * mvumi_enable_intr - Enables interrupts
+ * @mhba: Adapter soft state
+ */
+static void mvumi_enable_intr(struct mvumi_hba *mhba)
+{
+ unsigned int mask;
+ struct mvumi_hw_regs *regs = mhba->regs;
+
+ iowrite32(regs->int_drbl_int_mask, regs->arm_to_pciea_mask_reg);
+ mask = ioread32(regs->enpointa_mask_reg);
+ mask |= regs->int_dl_cpu2pciea | regs->int_comaout | regs->int_comaerr;
+ iowrite32(mask, regs->enpointa_mask_reg);
+}
+
+/**
+ * mvumi_disable_intr -Disables interrupt
+ * @mhba: Adapter soft state
+ */
+static void mvumi_disable_intr(struct mvumi_hba *mhba)
+{
+ unsigned int mask;
+ struct mvumi_hw_regs *regs = mhba->regs;
+
+ iowrite32(0, regs->arm_to_pciea_mask_reg);
+ mask = ioread32(regs->enpointa_mask_reg);
+ mask &= ~(regs->int_dl_cpu2pciea | regs->int_comaout |
+ regs->int_comaerr);
+ iowrite32(mask, regs->enpointa_mask_reg);
+}
+
+static int mvumi_clear_intr(void *extend)
+{
+ struct mvumi_hba *mhba = (struct mvumi_hba *) extend;
+ unsigned int status, isr_status = 0, tmp = 0;
+ struct mvumi_hw_regs *regs = mhba->regs;
+
+ status = ioread32(regs->main_int_cause_reg);
+ if (!(status & regs->int_mu) || status == 0xFFFFFFFF)
+ return 1;
+ if (unlikely(status & regs->int_comaerr)) {
+ tmp = ioread32(regs->outb_isr_cause);
+ if (mhba->pdev->device == PCI_DEVICE_ID_MARVELL_MV9580) {
+ if (tmp & regs->clic_out_err) {
+ iowrite32(tmp & regs->clic_out_err,
+ regs->outb_isr_cause);
+ }
+ } else {
+ if (tmp & (regs->clic_in_err | regs->clic_out_err))
+ iowrite32(tmp & (regs->clic_in_err |
+ regs->clic_out_err),
+ regs->outb_isr_cause);
+ }
+ status ^= mhba->regs->int_comaerr;
+ /* inbound or outbound parity error, command will timeout */
+ }
+ if (status & regs->int_comaout) {
+ tmp = ioread32(regs->outb_isr_cause);
+ if (tmp & regs->clic_irq)
+ iowrite32(tmp & regs->clic_irq, regs->outb_isr_cause);
+ }
+ if (status & regs->int_dl_cpu2pciea) {
+ isr_status = ioread32(regs->arm_to_pciea_drbl_reg);
+ if (isr_status)
+ iowrite32(isr_status, regs->arm_to_pciea_drbl_reg);
+ }
+
+ mhba->global_isr = status;
+ mhba->isr_status = isr_status;
+
+ return 0;
+}
+
+/**
+ * mvumi_read_fw_status_reg - returns the current FW status value
+ * @mhba: Adapter soft state
+ */
+static unsigned int mvumi_read_fw_status_reg(struct mvumi_hba *mhba)
+{
+ unsigned int status;
+
+ status = ioread32(mhba->regs->arm_to_pciea_drbl_reg);
+ if (status)
+ iowrite32(status, mhba->regs->arm_to_pciea_drbl_reg);
+ return status;
+}
+
+static struct mvumi_instance_template mvumi_instance_9143 = {
+ .fire_cmd = mvumi_fire_cmd,
+ .enable_intr = mvumi_enable_intr,
+ .disable_intr = mvumi_disable_intr,
+ .clear_intr = mvumi_clear_intr,
+ .read_fw_status_reg = mvumi_read_fw_status_reg,
+ .check_ib_list = mvumi_check_ib_list_9143,
+ .check_ob_list = mvumi_check_ob_list_9143,
+ .reset_host = mvumi_reset_host_9143,
+};
+
+static struct mvumi_instance_template mvumi_instance_9580 = {
+ .fire_cmd = mvumi_fire_cmd,
+ .enable_intr = mvumi_enable_intr,
+ .disable_intr = mvumi_disable_intr,
+ .clear_intr = mvumi_clear_intr,
+ .read_fw_status_reg = mvumi_read_fw_status_reg,
+ .check_ib_list = mvumi_check_ib_list_9580,
+ .check_ob_list = mvumi_check_ob_list_9580,
+ .reset_host = mvumi_reset_host_9580,
+};
+
+static int mvumi_slave_configure(struct scsi_device *sdev)
+{
+ struct mvumi_hba *mhba;
+ unsigned char bitcount = sizeof(unsigned char) * 8;
+
+ mhba = (struct mvumi_hba *) sdev->host->hostdata;
+ if (sdev->id >= mhba->max_target_id)
+ return -EINVAL;
+
+ mhba->target_map[sdev->id / bitcount] |= (1 << (sdev->id % bitcount));
+ return 0;
+}
+
+/**
+ * mvumi_build_frame - Prepares a direct cdb (DCDB) command
+ * @mhba: Adapter soft state
+ * @scmd: SCSI command
+ * @cmd: Command to be prepared in
+ *
+ * This function prepares CDB commands. These are typcially pass-through
+ * commands to the devices.
+ */
+static unsigned char mvumi_build_frame(struct mvumi_hba *mhba,
+ struct scsi_cmnd *scmd, struct mvumi_cmd *cmd)
+{
+ struct mvumi_msg_frame *pframe;
+
+ cmd->scmd = scmd;
+ cmd->cmd_status = REQ_STATUS_PENDING;
+ pframe = cmd->frame;
+ pframe->device_id = ((unsigned short) scmd->device->id) |
+ (((unsigned short) scmd->device->lun) << 8);
+ pframe->cmd_flag = 0;
+
+ switch (scmd->sc_data_direction) {
+ case DMA_NONE:
+ pframe->cmd_flag |= CMD_FLAG_NON_DATA;
+ break;
+ case DMA_FROM_DEVICE:
+ pframe->cmd_flag |= CMD_FLAG_DATA_IN;
+ break;
+ case DMA_TO_DEVICE:
+ pframe->cmd_flag |= CMD_FLAG_DATA_OUT;
+ break;
+ case DMA_BIDIRECTIONAL:
+ default:
+ dev_warn(&mhba->pdev->dev, "unexpected data direction[%d] "
+ "cmd[0x%x]\n", scmd->sc_data_direction, scmd->cmnd[0]);
+ goto error;
+ }
+
+ pframe->cdb_length = scmd->cmd_len;
+ memcpy(pframe->cdb, scmd->cmnd, pframe->cdb_length);
+ pframe->req_function = CL_FUN_SCSI_CMD;
+ if (scsi_bufflen(scmd)) {
+ if (mvumi_make_sgl(mhba, scmd, &pframe->payload[0],
+ &pframe->sg_counts))
+ goto error;
+
+ pframe->data_transfer_length = scsi_bufflen(scmd);
+ } else {
+ pframe->sg_counts = 0;
+ pframe->data_transfer_length = 0;
+ }
+ return 0;
+
+error:
+ scmd->result = (DID_OK << 16) | (DRIVER_SENSE << 24) |
+ SAM_STAT_CHECK_CONDITION;
+ scsi_build_sense_buffer(0, scmd->sense_buffer, ILLEGAL_REQUEST, 0x24,
+ 0);
+ return -1;
+}
+
+/**
+ * mvumi_queue_command - Queue entry point
+ * @scmd: SCSI command to be queued
+ * @done: Callback entry point
+ */
+static int mvumi_queue_command(struct Scsi_Host *shost,
+ struct scsi_cmnd *scmd)
+{
+ struct mvumi_cmd *cmd;
+ struct mvumi_hba *mhba;
+ unsigned long irq_flags;
+
+ spin_lock_irqsave(shost->host_lock, irq_flags);
+ scsi_cmd_get_serial(shost, scmd);
+
+ mhba = (struct mvumi_hba *) shost->hostdata;
+ scmd->result = 0;
+ cmd = mvumi_get_cmd(mhba);
+ if (unlikely(!cmd)) {
+ spin_unlock_irqrestore(shost->host_lock, irq_flags);
+ return SCSI_MLQUEUE_HOST_BUSY;
+ }
+
+ if (unlikely(mvumi_build_frame(mhba, scmd, cmd)))
+ goto out_return_cmd;
+
+ cmd->scmd = scmd;
+ scmd->SCp.ptr = (char *) cmd;
+ mhba->instancet->fire_cmd(mhba, cmd);
+ spin_unlock_irqrestore(shost->host_lock, irq_flags);
+ return 0;
+
+out_return_cmd:
+ mvumi_return_cmd(mhba, cmd);
+ scmd->scsi_done(scmd);
+ spin_unlock_irqrestore(shost->host_lock, irq_flags);
+ return 0;
+}
+
+static enum blk_eh_timer_return mvumi_timed_out(struct scsi_cmnd *scmd)
+{
+ struct mvumi_cmd *cmd = (struct mvumi_cmd *) scmd->SCp.ptr;
+ struct Scsi_Host *host = scmd->device->host;
+ struct mvumi_hba *mhba = shost_priv(host);
+ unsigned long flags;
+
+ spin_lock_irqsave(mhba->shost->host_lock, flags);
+
+ if (mhba->tag_cmd[cmd->frame->tag]) {
+ mhba->tag_cmd[cmd->frame->tag] = 0;
+ tag_release_one(mhba, &mhba->tag_pool, cmd->frame->tag);
+ }
+ if (!list_empty(&cmd->queue_pointer))
+ list_del_init(&cmd->queue_pointer);
+ else
+ atomic_dec(&mhba->fw_outstanding);
+
+ scmd->result = (DRIVER_INVALID << 24) | (DID_ABORT << 16);
+ scmd->SCp.ptr = NULL;
+ if (scsi_bufflen(scmd)) {
+ if (scsi_sg_count(scmd)) {
+ pci_unmap_sg(mhba->pdev,
+ scsi_sglist(scmd),
+ scsi_sg_count(scmd),
+ (int)scmd->sc_data_direction);
+ } else {
+ pci_unmap_single(mhba->pdev,
+ scmd->SCp.dma_handle,
+ scsi_bufflen(scmd),
+ (int)scmd->sc_data_direction);
+
+ scmd->SCp.dma_handle = 0;
+ }
+ }
+ mvumi_return_cmd(mhba, cmd);
+ spin_unlock_irqrestore(mhba->shost->host_lock, flags);
+
+ return BLK_EH_NOT_HANDLED;
+}
+
+static int
+mvumi_bios_param(struct scsi_device *sdev, struct block_device *bdev,
+ sector_t capacity, int geom[])
+{
+ int heads, sectors;
+ sector_t cylinders;
+ unsigned long tmp;
+
+ heads = 64;
+ sectors = 32;
+ tmp = heads * sectors;
+ cylinders = capacity;
+ sector_div(cylinders, tmp);
+
+ if (capacity >= 0x200000) {
+ heads = 255;
+ sectors = 63;
+ tmp = heads * sectors;
+ cylinders = capacity;
+ sector_div(cylinders, tmp);
+ }
+ geom[0] = heads;
+ geom[1] = sectors;
+ geom[2] = cylinders;
+
+ return 0;
+}
+
+static struct scsi_host_template mvumi_template = {
+
+ .module = THIS_MODULE,
+ .name = "Marvell Storage Controller",
+ .slave_configure = mvumi_slave_configure,
+ .queuecommand = mvumi_queue_command,
+ .eh_host_reset_handler = mvumi_host_reset,
+ .bios_param = mvumi_bios_param,
+ .this_id = -1,
+};
+
+static struct scsi_transport_template mvumi_transport_template = {
+ .eh_timed_out = mvumi_timed_out,
+};
+
+static int mvumi_cfg_hw_reg(struct mvumi_hba *mhba)
+{
+ void *base = NULL;
+ struct mvumi_hw_regs *regs;
+
+ switch (mhba->pdev->device) {
+ case PCI_DEVICE_ID_MARVELL_MV9143:
+ mhba->mmio = mhba->base_addr[0];
+ base = mhba->mmio;
+ if (!mhba->regs) {
+ mhba->regs = kzalloc(sizeof(*regs), GFP_KERNEL);
+ if (mhba->regs == NULL)
+ return -ENOMEM;
+ }
+ regs = mhba->regs;
+
+ /* For Arm */
+ regs->ctrl_sts_reg = base + 0x20104;
+ regs->rstoutn_mask_reg = base + 0x20108;
+ regs->sys_soft_rst_reg = base + 0x2010C;
+ regs->main_int_cause_reg = base + 0x20200;
+ regs->enpointa_mask_reg = base + 0x2020C;
+ regs->rstoutn_en_reg = base + 0xF1400;
+ /* For Doorbell */
+ regs->pciea_to_arm_drbl_reg = base + 0x20400;
+ regs->arm_to_pciea_drbl_reg = base + 0x20408;
+ regs->arm_to_pciea_mask_reg = base + 0x2040C;
+ regs->pciea_to_arm_msg0 = base + 0x20430;
+ regs->pciea_to_arm_msg1 = base + 0x20434;
+ regs->arm_to_pciea_msg0 = base + 0x20438;
+ regs->arm_to_pciea_msg1 = base + 0x2043C;
+
+ /* For Message Unit */
+
+ regs->inb_aval_count_basel = base + 0x508;
+ regs->inb_aval_count_baseh = base + 0x50C;
+ regs->inb_write_pointer = base + 0x518;
+ regs->inb_read_pointer = base + 0x51C;
+ regs->outb_coal_cfg = base + 0x568;
+ regs->outb_copy_basel = base + 0x5B0;
+ regs->outb_copy_baseh = base + 0x5B4;
+ regs->outb_copy_pointer = base + 0x544;
+ regs->outb_read_pointer = base + 0x548;
+ regs->outb_isr_cause = base + 0x560;
+ regs->outb_coal_cfg = base + 0x568;
+ /* Bit setting for HW */
+ regs->int_comaout = 1 << 8;
+ regs->int_comaerr = 1 << 6;
+ regs->int_dl_cpu2pciea = 1 << 1;
+ regs->cl_pointer_toggle = 1 << 12;
+ regs->clic_irq = 1 << 1;
+ regs->clic_in_err = 1 << 8;
+ regs->clic_out_err = 1 << 12;
+ regs->cl_slot_num_mask = 0xFFF;
+ regs->int_drbl_int_mask = 0x3FFFFFFF;
+ regs->int_mu = regs->int_dl_cpu2pciea | regs->int_comaout |
+ regs->int_comaerr;
+ break;
+ case PCI_DEVICE_ID_MARVELL_MV9580:
+ mhba->mmio = mhba->base_addr[2];
+ base = mhba->mmio;
+ if (!mhba->regs) {
+ mhba->regs = kzalloc(sizeof(*regs), GFP_KERNEL);
+ if (mhba->regs == NULL)
+ return -ENOMEM;
+ }
+ regs = mhba->regs;
+ /* For Arm */
+ regs->ctrl_sts_reg = base + 0x20104;
+ regs->rstoutn_mask_reg = base + 0x1010C;
+ regs->sys_soft_rst_reg = base + 0x10108;
+ regs->main_int_cause_reg = base + 0x10200;
+ regs->enpointa_mask_reg = base + 0x1020C;
+ regs->rstoutn_en_reg = base + 0xF1400;
+
+ /* For Doorbell */
+ regs->pciea_to_arm_drbl_reg = base + 0x10460;
+ regs->arm_to_pciea_drbl_reg = base + 0x10480;
+ regs->arm_to_pciea_mask_reg = base + 0x10484;
+ regs->pciea_to_arm_msg0 = base + 0x10400;
+ regs->pciea_to_arm_msg1 = base + 0x10404;
+ regs->arm_to_pciea_msg0 = base + 0x10420;
+ regs->arm_to_pciea_msg1 = base + 0x10424;
+
+ /* For reset*/
+ regs->reset_request = base + 0x10108;
+ regs->reset_enable = base + 0x1010c;
+
+ /* For Message Unit */
+ regs->inb_aval_count_basel = base + 0x4008;
+ regs->inb_aval_count_baseh = base + 0x400C;
+ regs->inb_write_pointer = base + 0x4018;
+ regs->inb_read_pointer = base + 0x401C;
+ regs->outb_copy_basel = base + 0x4058;
+ regs->outb_copy_baseh = base + 0x405C;
+ regs->outb_copy_pointer = base + 0x406C;
+ regs->outb_read_pointer = base + 0x4070;
+ regs->outb_coal_cfg = base + 0x4080;
+ regs->outb_isr_cause = base + 0x4088;
+ /* Bit setting for HW */
+ regs->int_comaout = 1 << 4;
+ regs->int_dl_cpu2pciea = 1 << 12;
+ regs->int_comaerr = 1 << 29;
+ regs->cl_pointer_toggle = 1 << 14;
+ regs->cl_slot_num_mask = 0x3FFF;
+ regs->clic_irq = 1 << 0;
+ regs->clic_out_err = 1 << 1;
+ regs->int_drbl_int_mask = 0x3FFFFFFF;
+ regs->int_mu = regs->int_dl_cpu2pciea | regs->int_comaout;
+ break;
+ default:
+ return -1;
+ break;
+ }
+
+ return 0;
+}
+
+/**
+ * mvumi_init_fw - Initializes the FW
+ * @mhba: Adapter soft state
+ *
+ * This is the main function for initializing firmware.
+ */
+static int mvumi_init_fw(struct mvumi_hba *mhba)
+{
+ int ret = 0;
+
+ if (pci_request_regions(mhba->pdev, MV_DRIVER_NAME)) {
+ dev_err(&mhba->pdev->dev, "IO memory region busy!\n");
+ return -EBUSY;
+ }
+ ret = mvumi_map_pci_addr(mhba->pdev, mhba->base_addr);
+ if (ret)
+ goto fail_ioremap;
+
+ switch (mhba->pdev->device) {
+ case PCI_DEVICE_ID_MARVELL_MV9143:
+ mhba->instancet = &mvumi_instance_9143;
+ mhba->io_seq = 0;
+ mhba->max_sge = MVUMI_MAX_SG_ENTRY;
+ mhba->request_id_enabled = 1;
+ break;
+ case PCI_DEVICE_ID_MARVELL_MV9580:
+ mhba->instancet = &mvumi_instance_9580;
+ mhba->io_seq = 0;
+ mhba->max_sge = MVUMI_MAX_SG_ENTRY;
+ break;
+ default:
+ dev_err(&mhba->pdev->dev, "device 0x%x not supported!\n",
+ mhba->pdev->device);
+ mhba->instancet = NULL;
+ ret = -EINVAL;
+ goto fail_alloc_mem;
+ }
+ dev_dbg(&mhba->pdev->dev, "device id : %04X is found.\n",
+ mhba->pdev->device);
+ ret = mvumi_cfg_hw_reg(mhba);
+ if (ret) {
+ dev_err(&mhba->pdev->dev,
+ "failed to allocate memory for reg\n");
+ ret = -ENOMEM;
+ goto fail_alloc_mem;
+ }
+ mhba->handshake_page = pci_alloc_consistent(mhba->pdev, HSP_MAX_SIZE,
+ &mhba->handshake_page_phys);
+ if (!mhba->handshake_page) {
+ dev_err(&mhba->pdev->dev,
+ "failed to allocate memory for handshake\n");
+ ret = -ENOMEM;
+ goto fail_alloc_page;
+ }
+
+ if (mvumi_start(mhba)) {
+ ret = -EINVAL;
+ goto fail_ready_state;
+ }
+ ret = mvumi_alloc_cmds(mhba);
+ if (ret)
+ goto fail_ready_state;
+
+ return 0;
+
+fail_ready_state:
+ mvumi_release_mem_resource(mhba);
+ pci_free_consistent(mhba->pdev, HSP_MAX_SIZE,
+ mhba->handshake_page, mhba->handshake_page_phys);
+fail_alloc_page:
+ kfree(mhba->regs);
+fail_alloc_mem:
+ mvumi_unmap_pci_addr(mhba->pdev, mhba->base_addr);
+fail_ioremap:
+ pci_release_regions(mhba->pdev);
+
+ return ret;
+}
+
+/**
+ * mvumi_io_attach - Attaches this driver to SCSI mid-layer
+ * @mhba: Adapter soft state
+ */
+static int mvumi_io_attach(struct mvumi_hba *mhba)
+{
+ struct Scsi_Host *host = mhba->shost;
+ struct scsi_device *sdev = NULL;
+ int ret;
+ unsigned int max_sg = (mhba->ib_max_size + 4 -
+ sizeof(struct mvumi_msg_frame)) / sizeof(struct mvumi_sgl);
+
+ host->irq = mhba->pdev->irq;
+ host->unique_id = mhba->unique_id;
+ host->can_queue = (mhba->max_io - 1) ? (mhba->max_io - 1) : 1;
+ host->sg_tablesize = mhba->max_sge > max_sg ? max_sg : mhba->max_sge;
+ host->max_sectors = mhba->max_transfer_size / 512;
+ host->cmd_per_lun = (mhba->max_io - 1) ? (mhba->max_io - 1) : 1;
+ host->max_id = mhba->max_target_id;
+ host->max_cmd_len = MAX_COMMAND_SIZE;
+ host->transportt = &mvumi_transport_template;
+
+ ret = scsi_add_host(host, &mhba->pdev->dev);
+ if (ret) {
+ dev_err(&mhba->pdev->dev, "scsi_add_host failed\n");
+ return ret;
+ }
+ mhba->fw_flag |= MVUMI_FW_ATTACH;
+
+ mutex_lock(&mhba->sas_discovery_mutex);
+ if (mhba->pdev->device == PCI_DEVICE_ID_MARVELL_MV9580)
+ ret = scsi_add_device(host, 0, mhba->max_target_id - 1, 0);
+ else
+ ret = 0;
+ if (ret) {
+ dev_err(&mhba->pdev->dev, "add virtual device failed\n");
+ mutex_unlock(&mhba->sas_discovery_mutex);
+ goto fail_add_device;
+ }
+
+ mhba->dm_thread = kthread_create(mvumi_rescan_bus,
+ mhba, "mvumi_scanthread");
+ if (IS_ERR(mhba->dm_thread)) {
+ dev_err(&mhba->pdev->dev,
+ "failed to create device scan thread\n");
+ mutex_unlock(&mhba->sas_discovery_mutex);
+ goto fail_create_thread;
+ }
+ atomic_set(&mhba->pnp_count, 1);
+ wake_up_process(mhba->dm_thread);
+
+ mutex_unlock(&mhba->sas_discovery_mutex);
+ return 0;
+
+fail_create_thread:
+ if (mhba->pdev->device == PCI_DEVICE_ID_MARVELL_MV9580)
+ sdev = scsi_device_lookup(mhba->shost, 0,
+ mhba->max_target_id - 1, 0);
+ if (sdev) {
+ scsi_remove_device(sdev);
+ scsi_device_put(sdev);
+ }
+fail_add_device:
+ scsi_remove_host(mhba->shost);
+ return ret;
+}
+
+/**
+ * mvumi_probe_one - PCI hotplug entry point
+ * @pdev: PCI device structure
+ * @id: PCI ids of supported hotplugged adapter
+ */
+static int mvumi_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
+{
+ struct Scsi_Host *host;
+ struct mvumi_hba *mhba;
+ int ret;
+
+ dev_dbg(&pdev->dev, " %#4.04x:%#4.04x:%#4.04x:%#4.04x: ",
+ pdev->vendor, pdev->device, pdev->subsystem_vendor,
+ pdev->subsystem_device);
+
+ ret = pci_enable_device(pdev);
+ if (ret)
+ return ret;
+
+ pci_set_master(pdev);
+
+ if (IS_DMA64) {
+ ret = pci_set_dma_mask(pdev, DMA_BIT_MASK(64));
+ if (ret) {
+ ret = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
+ if (ret)
+ goto fail_set_dma_mask;
+ }
+ } else {
+ ret = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
+ if (ret)
+ goto fail_set_dma_mask;
+ }
+
+ host = scsi_host_alloc(&mvumi_template, sizeof(*mhba));
+ if (!host) {
+ dev_err(&pdev->dev, "scsi_host_alloc failed\n");
+ ret = -ENOMEM;
+ goto fail_alloc_instance;
+ }
+ mhba = shost_priv(host);
+
+ INIT_LIST_HEAD(&mhba->cmd_pool);
+ INIT_LIST_HEAD(&mhba->ob_data_list);
+ INIT_LIST_HEAD(&mhba->free_ob_list);
+ INIT_LIST_HEAD(&mhba->res_list);
+ INIT_LIST_HEAD(&mhba->waiting_req_list);
+ mutex_init(&mhba->device_lock);
+ INIT_LIST_HEAD(&mhba->mhba_dev_list);
+ INIT_LIST_HEAD(&mhba->shost_dev_list);
+ atomic_set(&mhba->fw_outstanding, 0);
+ init_waitqueue_head(&mhba->int_cmd_wait_q);
+ mutex_init(&mhba->sas_discovery_mutex);
+
+ mhba->pdev = pdev;
+ mhba->shost = host;
+ mhba->unique_id = pdev->bus->number << 8 | pdev->devfn;
+
+ ret = mvumi_init_fw(mhba);
+ if (ret)
+ goto fail_init_fw;
+
+ ret = request_irq(mhba->pdev->irq, mvumi_isr_handler, IRQF_SHARED,
+ "mvumi", mhba);
+ if (ret) {
+ dev_err(&pdev->dev, "failed to register IRQ\n");
+ goto fail_init_irq;
+ }
+
+ mhba->instancet->enable_intr(mhba);
+ pci_set_drvdata(pdev, mhba);
+
+ ret = mvumi_io_attach(mhba);
+ if (ret)
+ goto fail_io_attach;
+
+ mvumi_backup_bar_addr(mhba);
+ dev_dbg(&pdev->dev, "probe mvumi driver successfully.\n");
+
+ return 0;
+
+fail_io_attach:
+ mhba->instancet->disable_intr(mhba);
+ free_irq(mhba->pdev->irq, mhba);
+fail_init_irq:
+ mvumi_release_fw(mhba);
+fail_init_fw:
+ scsi_host_put(host);
+
+fail_alloc_instance:
+fail_set_dma_mask:
+ pci_disable_device(pdev);
+
+ return ret;
+}
+
+static void mvumi_detach_one(struct pci_dev *pdev)
+{
+ struct Scsi_Host *host;
+ struct mvumi_hba *mhba;
+
+ mhba = pci_get_drvdata(pdev);
+ if (mhba->dm_thread) {
+ kthread_stop(mhba->dm_thread);
+ mhba->dm_thread = NULL;
+ }
+
+ mvumi_detach_devices(mhba);
+ host = mhba->shost;
+ scsi_remove_host(mhba->shost);
+ mvumi_flush_cache(mhba);
+
+ mhba->instancet->disable_intr(mhba);
+ free_irq(mhba->pdev->irq, mhba);
+ mvumi_release_fw(mhba);
+ scsi_host_put(host);
+ pci_disable_device(pdev);
+ dev_dbg(&pdev->dev, "driver is removed!\n");
+}
+
+/**
+ * mvumi_shutdown - Shutdown entry point
+ * @device: Generic device structure
+ */
+static void mvumi_shutdown(struct pci_dev *pdev)
+{
+ struct mvumi_hba *mhba = pci_get_drvdata(pdev);
+
+ mvumi_flush_cache(mhba);
+}
+
+static int mvumi_suspend(struct pci_dev *pdev, pm_message_t state)
+{
+ struct mvumi_hba *mhba = NULL;
+
+ mhba = pci_get_drvdata(pdev);
+ mvumi_flush_cache(mhba);
+
+ pci_set_drvdata(pdev, mhba);
+ mhba->instancet->disable_intr(mhba);
+ free_irq(mhba->pdev->irq, mhba);
+ mvumi_unmap_pci_addr(pdev, mhba->base_addr);
+ pci_release_regions(pdev);
+ pci_save_state(pdev);
+ pci_disable_device(pdev);
+ pci_set_power_state(pdev, pci_choose_state(pdev, state));
+
+ return 0;
+}
+
+static int mvumi_resume(struct pci_dev *pdev)
+{
+ int ret;
+ struct mvumi_hba *mhba = NULL;
+
+ mhba = pci_get_drvdata(pdev);
+
+ pci_set_power_state(pdev, PCI_D0);
+ pci_enable_wake(pdev, PCI_D0, 0);
+ pci_restore_state(pdev);
+
+ ret = pci_enable_device(pdev);
+ if (ret) {
+ dev_err(&pdev->dev, "enable device failed\n");
+ return ret;
+ }
+ pci_set_master(pdev);
+ if (IS_DMA64) {
+ ret = pci_set_dma_mask(pdev, DMA_BIT_MASK(64));
+ if (ret) {
+ ret = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
+ if (ret)
+ goto fail;
+ }
+ } else {
+ ret = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
+ if (ret)
+ goto fail;
+ }
+ ret = pci_request_regions(mhba->pdev, MV_DRIVER_NAME);
+ if (ret)
+ goto fail;
+ ret = mvumi_map_pci_addr(mhba->pdev, mhba->base_addr);
+ if (ret)
+ goto release_regions;
+
+ if (mvumi_cfg_hw_reg(mhba)) {
+ ret = -EINVAL;
+ goto unmap_pci_addr;
+ }
+
+ mhba->mmio = mhba->base_addr[0];
+ mvumi_reset(mhba);
+
+ if (mvumi_start(mhba)) {
+ ret = -EINVAL;
+ goto unmap_pci_addr;
+ }
+
+ ret = request_irq(mhba->pdev->irq, mvumi_isr_handler, IRQF_SHARED,
+ "mvumi", mhba);
+ if (ret) {
+ dev_err(&pdev->dev, "failed to register IRQ\n");
+ goto unmap_pci_addr;
+ }
+ mhba->instancet->enable_intr(mhba);
+
+ return 0;
+
+unmap_pci_addr:
+ mvumi_unmap_pci_addr(pdev, mhba->base_addr);
+release_regions:
+ pci_release_regions(pdev);
+fail:
+ pci_disable_device(pdev);
+
+ return ret;
+}
+
+static struct pci_driver mvumi_pci_driver = {
+
+ .name = MV_DRIVER_NAME,
+ .id_table = mvumi_pci_table,
+ .probe = mvumi_probe_one,
+ .remove = mvumi_detach_one,
+ .shutdown = mvumi_shutdown,
+#ifdef CONFIG_PM
+ .suspend = mvumi_suspend,
+ .resume = mvumi_resume,
+#endif
+};
+
+/**
+ * mvumi_init - Driver load entry point
+ */
+static int __init mvumi_init(void)
+{
+ return pci_register_driver(&mvumi_pci_driver);
+}
+
+/**
+ * mvumi_exit - Driver unload entry point
+ */
+static void __exit mvumi_exit(void)
+{
+
+ pci_unregister_driver(&mvumi_pci_driver);
+}
+
+module_init(mvumi_init);
+module_exit(mvumi_exit);
diff --git a/drivers/scsi/mvumi.h b/drivers/scsi/mvumi.h
new file mode 100644
index 000000000..41f168702
--- /dev/null
+++ b/drivers/scsi/mvumi.h
@@ -0,0 +1,573 @@
+/*
+ * Marvell UMI head file
+ *
+ * Copyright 2011 Marvell. <jyli@marvell.com>
+ *
+ * This file is licensed under GPLv2.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; version 2 of the
+ * License.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307
+ * USA
+ */
+
+#ifndef MVUMI_H
+#define MVUMI_H
+
+#define MAX_BASE_ADDRESS 6
+
+#define VER_MAJOR 1
+#define VER_MINOR 1
+#define VER_OEM 0
+#define VER_BUILD 1500
+
+#define MV_DRIVER_NAME "mvumi"
+#define PCI_DEVICE_ID_MARVELL_MV9143 0x9143
+#define PCI_DEVICE_ID_MARVELL_MV9580 0x9580
+
+#define MVUMI_INTERNAL_CMD_WAIT_TIME 45
+#define MVUMI_INQUIRY_LENGTH 44
+#define MVUMI_INQUIRY_UUID_OFF 36
+#define MVUMI_INQUIRY_UUID_LEN 8
+
+#define IS_DMA64 (sizeof(dma_addr_t) == 8)
+
+enum mvumi_qc_result {
+ MV_QUEUE_COMMAND_RESULT_SENT = 0,
+ MV_QUEUE_COMMAND_RESULT_NO_RESOURCE,
+};
+
+struct mvumi_hw_regs {
+ /* For CPU */
+ void *main_int_cause_reg;
+ void *enpointa_mask_reg;
+ void *enpointb_mask_reg;
+ void *rstoutn_en_reg;
+ void *ctrl_sts_reg;
+ void *rstoutn_mask_reg;
+ void *sys_soft_rst_reg;
+
+ /* For Doorbell */
+ void *pciea_to_arm_drbl_reg;
+ void *arm_to_pciea_drbl_reg;
+ void *arm_to_pciea_mask_reg;
+ void *pciea_to_arm_msg0;
+ void *pciea_to_arm_msg1;
+ void *arm_to_pciea_msg0;
+ void *arm_to_pciea_msg1;
+
+ /* reset register */
+ void *reset_request;
+ void *reset_enable;
+
+ /* For Message Unit */
+ void *inb_list_basel;
+ void *inb_list_baseh;
+ void *inb_aval_count_basel;
+ void *inb_aval_count_baseh;
+ void *inb_write_pointer;
+ void *inb_read_pointer;
+ void *outb_list_basel;
+ void *outb_list_baseh;
+ void *outb_copy_basel;
+ void *outb_copy_baseh;
+ void *outb_copy_pointer;
+ void *outb_read_pointer;
+ void *inb_isr_cause;
+ void *outb_isr_cause;
+ void *outb_coal_cfg;
+ void *outb_coal_timeout;
+
+ /* Bit setting for HW */
+ u32 int_comaout;
+ u32 int_comaerr;
+ u32 int_dl_cpu2pciea;
+ u32 int_mu;
+ u32 int_drbl_int_mask;
+ u32 int_main_int_mask;
+ u32 cl_pointer_toggle;
+ u32 cl_slot_num_mask;
+ u32 clic_irq;
+ u32 clic_in_err;
+ u32 clic_out_err;
+};
+
+struct mvumi_dyn_list_entry {
+ u32 src_low_addr;
+ u32 src_high_addr;
+ u32 if_length;
+ u32 reserve;
+};
+
+#define SCSI_CMD_MARVELL_SPECIFIC 0xE1
+#define CDB_CORE_MODULE 0x1
+#define CDB_CORE_SHUTDOWN 0xB
+
+enum {
+ DRBL_HANDSHAKE = 1 << 0,
+ DRBL_SOFT_RESET = 1 << 1,
+ DRBL_BUS_CHANGE = 1 << 2,
+ DRBL_EVENT_NOTIFY = 1 << 3,
+ DRBL_MU_RESET = 1 << 4,
+ DRBL_HANDSHAKE_ISR = DRBL_HANDSHAKE,
+
+ /*
+ * Command flag is the flag for the CDB command itself
+ */
+ /* 1-non data; 0-data command */
+ CMD_FLAG_NON_DATA = 1 << 0,
+ CMD_FLAG_DMA = 1 << 1,
+ CMD_FLAG_PIO = 1 << 2,
+ /* 1-host read data */
+ CMD_FLAG_DATA_IN = 1 << 3,
+ /* 1-host write data */
+ CMD_FLAG_DATA_OUT = 1 << 4,
+ CMD_FLAG_PRDT_IN_HOST = 1 << 5,
+};
+
+#define APICDB0_EVENT 0xF4
+#define APICDB1_EVENT_GETEVENT 0
+#define APICDB1_HOST_GETEVENT 1
+#define MAX_EVENTS_RETURNED 6
+
+#define DEVICE_OFFLINE 0
+#define DEVICE_ONLINE 1
+
+struct mvumi_hotplug_event {
+ u16 size;
+ u8 dummy[2];
+ u8 bitmap[0];
+};
+
+struct mvumi_driver_event {
+ u32 time_stamp;
+ u32 sequence_no;
+ u32 event_id;
+ u8 severity;
+ u8 param_count;
+ u16 device_id;
+ u32 params[4];
+ u8 sense_data_length;
+ u8 Reserved1;
+ u8 sense_data[30];
+};
+
+struct mvumi_event_req {
+ unsigned char count;
+ unsigned char reserved[3];
+ struct mvumi_driver_event events[MAX_EVENTS_RETURNED];
+};
+
+struct mvumi_events_wq {
+ struct work_struct work_q;
+ struct mvumi_hba *mhba;
+ unsigned int event;
+ void *param;
+};
+
+#define HS_CAPABILITY_SUPPORT_COMPACT_SG (1U << 4)
+#define HS_CAPABILITY_SUPPORT_PRD_HOST (1U << 5)
+#define HS_CAPABILITY_SUPPORT_DYN_SRC (1U << 6)
+#define HS_CAPABILITY_NEW_PAGE_IO_DEPTH_DEF (1U << 14)
+
+#define MVUMI_MAX_SG_ENTRY 32
+#define SGD_EOT (1L << 27)
+#define SGD_EOT_CP (1L << 22)
+
+struct mvumi_sgl {
+ u32 baseaddr_l;
+ u32 baseaddr_h;
+ u32 flags;
+ u32 size;
+};
+struct mvumi_compact_sgl {
+ u32 baseaddr_l;
+ u32 baseaddr_h;
+ u32 flags;
+};
+
+#define GET_COMPACT_SGD_SIZE(sgd) \
+ ((((struct mvumi_compact_sgl *)(sgd))->flags) & 0x3FFFFFL)
+
+#define SET_COMPACT_SGD_SIZE(sgd, sz) do { \
+ (((struct mvumi_compact_sgl *)(sgd))->flags) &= ~0x3FFFFFL; \
+ (((struct mvumi_compact_sgl *)(sgd))->flags) |= (sz); \
+} while (0)
+#define sgd_getsz(_mhba, sgd, sz) do { \
+ if (_mhba->hba_capability & HS_CAPABILITY_SUPPORT_COMPACT_SG) \
+ (sz) = GET_COMPACT_SGD_SIZE(sgd); \
+ else \
+ (sz) = (sgd)->size; \
+} while (0)
+
+#define sgd_setsz(_mhba, sgd, sz) do { \
+ if (_mhba->hba_capability & HS_CAPABILITY_SUPPORT_COMPACT_SG) \
+ SET_COMPACT_SGD_SIZE(sgd, sz); \
+ else \
+ (sgd)->size = (sz); \
+} while (0)
+
+#define sgd_inc(_mhba, sgd) do { \
+ if (_mhba->hba_capability & HS_CAPABILITY_SUPPORT_COMPACT_SG) \
+ sgd = (struct mvumi_sgl *)(((unsigned char *) (sgd)) + 12); \
+ else \
+ sgd = (struct mvumi_sgl *)(((unsigned char *) (sgd)) + 16); \
+} while (0)
+
+struct mvumi_res {
+ struct list_head entry;
+ dma_addr_t bus_addr;
+ void *virt_addr;
+ unsigned int size;
+ unsigned short type; /* enum Resource_Type */
+};
+
+/* Resource type */
+enum resource_type {
+ RESOURCE_CACHED_MEMORY = 0,
+ RESOURCE_UNCACHED_MEMORY
+};
+
+struct mvumi_sense_data {
+ u8 error_code:7;
+ u8 valid:1;
+ u8 segment_number;
+ u8 sense_key:4;
+ u8 reserved:1;
+ u8 incorrect_length:1;
+ u8 end_of_media:1;
+ u8 file_mark:1;
+ u8 information[4];
+ u8 additional_sense_length;
+ u8 command_specific_information[4];
+ u8 additional_sense_code;
+ u8 additional_sense_code_qualifier;
+ u8 field_replaceable_unit_code;
+ u8 sense_key_specific[3];
+};
+
+/* Request initiator must set the status to REQ_STATUS_PENDING. */
+#define REQ_STATUS_PENDING 0x80
+
+struct mvumi_cmd {
+ struct list_head queue_pointer;
+ struct mvumi_msg_frame *frame;
+ dma_addr_t frame_phys;
+ struct scsi_cmnd *scmd;
+ atomic_t sync_cmd;
+ void *data_buf;
+ unsigned short request_id;
+ unsigned char cmd_status;
+};
+
+/*
+ * the function type of the in bound frame
+ */
+#define CL_FUN_SCSI_CMD 0x1
+
+struct mvumi_msg_frame {
+ u16 device_id;
+ u16 tag;
+ u8 cmd_flag;
+ u8 req_function;
+ u8 cdb_length;
+ u8 sg_counts;
+ u32 data_transfer_length;
+ u16 request_id;
+ u16 reserved1;
+ u8 cdb[MAX_COMMAND_SIZE];
+ u32 payload[1];
+};
+
+/*
+ * the respond flag for data_payload of the out bound frame
+ */
+#define CL_RSP_FLAG_NODATA 0x0
+#define CL_RSP_FLAG_SENSEDATA 0x1
+
+struct mvumi_rsp_frame {
+ u16 device_id;
+ u16 tag;
+ u8 req_status;
+ u8 rsp_flag; /* Indicates the type of Data_Payload.*/
+ u16 request_id;
+ u32 payload[1];
+};
+
+struct mvumi_ob_data {
+ struct list_head list;
+ unsigned char data[0];
+};
+
+struct version_info {
+ u32 ver_major;
+ u32 ver_minor;
+ u32 ver_oem;
+ u32 ver_build;
+};
+
+#define FW_MAX_DELAY 30
+#define MVUMI_FW_BUSY (1U << 0)
+#define MVUMI_FW_ATTACH (1U << 1)
+#define MVUMI_FW_ALLOC (1U << 2)
+
+/*
+ * State is the state of the MU
+ */
+#define FW_STATE_IDLE 0
+#define FW_STATE_STARTING 1
+#define FW_STATE_HANDSHAKING 2
+#define FW_STATE_STARTED 3
+#define FW_STATE_ABORT 4
+
+#define HANDSHAKE_SIGNATURE 0x5A5A5A5AL
+#define HANDSHAKE_READYSTATE 0x55AA5AA5L
+#define HANDSHAKE_DONESTATE 0x55AAA55AL
+
+/* HandShake Status definition */
+#define HS_STATUS_OK 1
+#define HS_STATUS_ERR 2
+#define HS_STATUS_INVALID 3
+
+/* HandShake State/Cmd definition */
+#define HS_S_START 1
+#define HS_S_RESET 2
+#define HS_S_PAGE_ADDR 3
+#define HS_S_QUERY_PAGE 4
+#define HS_S_SEND_PAGE 5
+#define HS_S_END 6
+#define HS_S_ABORT 7
+#define HS_PAGE_VERIFY_SIZE 128
+
+#define HS_GET_STATE(a) (a & 0xFFFF)
+#define HS_GET_STATUS(a) ((a & 0xFFFF0000) >> 16)
+#define HS_SET_STATE(a, b) (a |= (b & 0xFFFF))
+#define HS_SET_STATUS(a, b) (a |= ((b & 0xFFFF) << 16))
+
+/* handshake frame */
+struct mvumi_hs_frame {
+ u16 size;
+ /* host information */
+ u8 host_type;
+ u8 reserved_1[1];
+ struct version_info host_ver; /* bios or driver version */
+
+ /* controller information */
+ u32 system_io_bus;
+ u32 slot_number;
+ u32 intr_level;
+ u32 intr_vector;
+
+ /* communication list configuration */
+ u32 ib_baseaddr_l;
+ u32 ib_baseaddr_h;
+ u32 ob_baseaddr_l;
+ u32 ob_baseaddr_h;
+
+ u8 ib_entry_size;
+ u8 ob_entry_size;
+ u8 ob_depth;
+ u8 ib_depth;
+
+ /* system time */
+ u64 seconds_since1970;
+};
+
+struct mvumi_hs_header {
+ u8 page_code;
+ u8 checksum;
+ u16 frame_length;
+ u32 frame_content[1];
+};
+
+/*
+ * the page code type of the handshake header
+ */
+#define HS_PAGE_FIRM_CAP 0x1
+#define HS_PAGE_HOST_INFO 0x2
+#define HS_PAGE_FIRM_CTL 0x3
+#define HS_PAGE_CL_INFO 0x4
+#define HS_PAGE_TOTAL 0x5
+
+#define HSP_SIZE(i) sizeof(struct mvumi_hs_page##i)
+
+#define HSP_MAX_SIZE ({ \
+ int size, m1, m2; \
+ m1 = max(HSP_SIZE(1), HSP_SIZE(3)); \
+ m2 = max(HSP_SIZE(2), HSP_SIZE(4)); \
+ size = max(m1, m2); \
+ size; \
+})
+
+/* The format of the page code for Firmware capability */
+struct mvumi_hs_page1 {
+ u8 pagecode;
+ u8 checksum;
+ u16 frame_length;
+
+ u16 number_of_ports;
+ u16 max_devices_support;
+ u16 max_io_support;
+ u16 umi_ver;
+ u32 max_transfer_size;
+ struct version_info fw_ver;
+ u8 cl_in_max_entry_size;
+ u8 cl_out_max_entry_size;
+ u8 cl_inout_list_depth;
+ u8 total_pages;
+ u16 capability;
+ u16 reserved1;
+};
+
+/* The format of the page code for Host information */
+struct mvumi_hs_page2 {
+ u8 pagecode;
+ u8 checksum;
+ u16 frame_length;
+
+ u8 host_type;
+ u8 host_cap;
+ u8 reserved[2];
+ struct version_info host_ver;
+ u32 system_io_bus;
+ u32 slot_number;
+ u32 intr_level;
+ u32 intr_vector;
+ u64 seconds_since1970;
+};
+
+/* The format of the page code for firmware control */
+struct mvumi_hs_page3 {
+ u8 pagecode;
+ u8 checksum;
+ u16 frame_length;
+ u16 control;
+ u8 reserved[2];
+ u32 host_bufferaddr_l;
+ u32 host_bufferaddr_h;
+ u32 host_eventaddr_l;
+ u32 host_eventaddr_h;
+};
+
+struct mvumi_hs_page4 {
+ u8 pagecode;
+ u8 checksum;
+ u16 frame_length;
+ u32 ib_baseaddr_l;
+ u32 ib_baseaddr_h;
+ u32 ob_baseaddr_l;
+ u32 ob_baseaddr_h;
+ u8 ib_entry_size;
+ u8 ob_entry_size;
+ u8 ob_depth;
+ u8 ib_depth;
+};
+
+struct mvumi_tag {
+ unsigned short *stack;
+ unsigned short top;
+ unsigned short size;
+};
+
+struct mvumi_device {
+ struct list_head list;
+ struct scsi_device *sdev;
+ u64 wwid;
+ u8 dev_type;
+ int id;
+};
+
+struct mvumi_hba {
+ void *base_addr[MAX_BASE_ADDRESS];
+ u32 pci_base[MAX_BASE_ADDRESS];
+ void *mmio;
+ struct list_head cmd_pool;
+ struct Scsi_Host *shost;
+ wait_queue_head_t int_cmd_wait_q;
+ struct pci_dev *pdev;
+ unsigned int unique_id;
+ atomic_t fw_outstanding;
+ struct mvumi_instance_template *instancet;
+
+ void *ib_list;
+ dma_addr_t ib_list_phys;
+
+ void *ib_frame;
+ dma_addr_t ib_frame_phys;
+
+ void *ob_list;
+ dma_addr_t ob_list_phys;
+
+ void *ib_shadow;
+ dma_addr_t ib_shadow_phys;
+
+ void *ob_shadow;
+ dma_addr_t ob_shadow_phys;
+
+ void *handshake_page;
+ dma_addr_t handshake_page_phys;
+
+ unsigned int global_isr;
+ unsigned int isr_status;
+
+ unsigned short max_sge;
+ unsigned short max_target_id;
+ unsigned char *target_map;
+ unsigned int max_io;
+ unsigned int list_num_io;
+ unsigned int ib_max_size;
+ unsigned int ob_max_size;
+ unsigned int ib_max_size_setting;
+ unsigned int ob_max_size_setting;
+ unsigned int max_transfer_size;
+ unsigned char hba_total_pages;
+ unsigned char fw_flag;
+ unsigned char request_id_enabled;
+ unsigned char eot_flag;
+ unsigned short hba_capability;
+ unsigned short io_seq;
+
+ unsigned int ib_cur_slot;
+ unsigned int ob_cur_slot;
+ unsigned int fw_state;
+ struct mutex sas_discovery_mutex;
+
+ struct list_head ob_data_list;
+ struct list_head free_ob_list;
+ struct list_head res_list;
+ struct list_head waiting_req_list;
+
+ struct mvumi_tag tag_pool;
+ struct mvumi_cmd **tag_cmd;
+ struct mvumi_hw_regs *regs;
+ struct mutex device_lock;
+ struct list_head mhba_dev_list;
+ struct list_head shost_dev_list;
+ struct task_struct *dm_thread;
+ atomic_t pnp_count;
+};
+
+struct mvumi_instance_template {
+ void (*fire_cmd) (struct mvumi_hba *, struct mvumi_cmd *);
+ void (*enable_intr) (struct mvumi_hba *);
+ void (*disable_intr) (struct mvumi_hba *);
+ int (*clear_intr) (void *);
+ unsigned int (*read_fw_status_reg) (struct mvumi_hba *);
+ unsigned int (*check_ib_list) (struct mvumi_hba *);
+ int (*check_ob_list) (struct mvumi_hba *, unsigned int *,
+ unsigned int *);
+ int (*reset_host) (struct mvumi_hba *);
+};
+
+extern struct timezone sys_tz;
+#endif
diff --git a/drivers/scsi/ncr53c8xx.c b/drivers/scsi/ncr53c8xx.c
new file mode 100644
index 000000000..5b93ed810
--- /dev/null
+++ b/drivers/scsi/ncr53c8xx.c
@@ -0,0 +1,8626 @@
+/******************************************************************************
+** Device driver for the PCI-SCSI NCR538XX controller family.
+**
+** Copyright (C) 1994 Wolfgang Stanglmeier
+**
+** This program is free software; you can redistribute it and/or modify
+** it under the terms of the GNU General Public License as published by
+** the Free Software Foundation; either version 2 of the License, or
+** (at your option) any later version.
+**
+** This program is distributed in the hope that it will be useful,
+** but WITHOUT ANY WARRANTY; without even the implied warranty of
+** MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+** GNU General Public License for more details.
+**
+** You should have received a copy of the GNU General Public License
+** along with this program; if not, write to the Free Software
+** Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+**
+**-----------------------------------------------------------------------------
+**
+** This driver has been ported to Linux from the FreeBSD NCR53C8XX driver
+** and is currently maintained by
+**
+** Gerard Roudier <groudier@free.fr>
+**
+** Being given that this driver originates from the FreeBSD version, and
+** in order to keep synergy on both, any suggested enhancements and corrections
+** received on Linux are automatically a potential candidate for the FreeBSD
+** version.
+**
+** The original driver has been written for 386bsd and FreeBSD by
+** Wolfgang Stanglmeier <wolf@cologne.de>
+** Stefan Esser <se@mi.Uni-Koeln.de>
+**
+** And has been ported to NetBSD by
+** Charles M. Hannum <mycroft@gnu.ai.mit.edu>
+**
+**-----------------------------------------------------------------------------
+**
+** Brief history
+**
+** December 10 1995 by Gerard Roudier:
+** Initial port to Linux.
+**
+** June 23 1996 by Gerard Roudier:
+** Support for 64 bits architectures (Alpha).
+**
+** November 30 1996 by Gerard Roudier:
+** Support for Fast-20 scsi.
+** Support for large DMA fifo and 128 dwords bursting.
+**
+** February 27 1997 by Gerard Roudier:
+** Support for Fast-40 scsi.
+** Support for on-Board RAM.
+**
+** May 3 1997 by Gerard Roudier:
+** Full support for scsi scripts instructions pre-fetching.
+**
+** May 19 1997 by Richard Waltham <dormouse@farsrobt.demon.co.uk>:
+** Support for NvRAM detection and reading.
+**
+** August 18 1997 by Cort <cort@cs.nmt.edu>:
+** Support for Power/PC (Big Endian).
+**
+** June 20 1998 by Gerard Roudier
+** Support for up to 64 tags per lun.
+** O(1) everywhere (C and SCRIPTS) for normal cases.
+** Low PCI traffic for command handling when on-chip RAM is present.
+** Aggressive SCSI SCRIPTS optimizations.
+**
+** 2005 by Matthew Wilcox and James Bottomley
+** PCI-ectomy. This driver now supports only the 720 chip (see the
+** NCR_Q720 and zalon drivers for the bus probe logic).
+**
+*******************************************************************************
+*/
+
+/*
+** Supported SCSI-II features:
+** Synchronous negotiation
+** Wide negotiation (depends on the NCR Chip)
+** Enable disconnection
+** Tagged command queuing
+** Parity checking
+** Etc...
+**
+** Supported NCR/SYMBIOS chips:
+** 53C720 (Wide, Fast SCSI-2, intfly problems)
+*/
+
+/* Name and version of the driver */
+#define SCSI_NCR_DRIVER_NAME "ncr53c8xx-3.4.3g"
+
+#define SCSI_NCR_DEBUG_FLAGS (0)
+
+#include <linux/blkdev.h>
+#include <linux/delay.h>
+#include <linux/dma-mapping.h>
+#include <linux/errno.h>
+#include <linux/gfp.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/ioport.h>
+#include <linux/mm.h>
+#include <linux/module.h>
+#include <linux/sched.h>
+#include <linux/signal.h>
+#include <linux/spinlock.h>
+#include <linux/stat.h>
+#include <linux/string.h>
+#include <linux/time.h>
+#include <linux/timer.h>
+#include <linux/types.h>
+
+#include <asm/dma.h>
+#include <asm/io.h>
+
+#include <scsi/scsi.h>
+#include <scsi/scsi_cmnd.h>
+#include <scsi/scsi_dbg.h>
+#include <scsi/scsi_device.h>
+#include <scsi/scsi_tcq.h>
+#include <scsi/scsi_transport.h>
+#include <scsi/scsi_transport_spi.h>
+
+#include "ncr53c8xx.h"
+
+#define NAME53C8XX "ncr53c8xx"
+
+/*==========================================================
+**
+** Debugging tags
+**
+**==========================================================
+*/
+
+#define DEBUG_ALLOC (0x0001)
+#define DEBUG_PHASE (0x0002)
+#define DEBUG_QUEUE (0x0008)
+#define DEBUG_RESULT (0x0010)
+#define DEBUG_POINTER (0x0020)
+#define DEBUG_SCRIPT (0x0040)
+#define DEBUG_TINY (0x0080)
+#define DEBUG_TIMING (0x0100)
+#define DEBUG_NEGO (0x0200)
+#define DEBUG_TAGS (0x0400)
+#define DEBUG_SCATTER (0x0800)
+#define DEBUG_IC (0x1000)
+
+/*
+** Enable/Disable debug messages.
+** Can be changed at runtime too.
+*/
+
+#ifdef SCSI_NCR_DEBUG_INFO_SUPPORT
+static int ncr_debug = SCSI_NCR_DEBUG_FLAGS;
+ #define DEBUG_FLAGS ncr_debug
+#else
+ #define DEBUG_FLAGS SCSI_NCR_DEBUG_FLAGS
+#endif
+
+static inline struct list_head *ncr_list_pop(struct list_head *head)
+{
+ if (!list_empty(head)) {
+ struct list_head *elem = head->next;
+
+ list_del(elem);
+ return elem;
+ }
+
+ return NULL;
+}
+
+/*==========================================================
+**
+** Simple power of two buddy-like allocator.
+**
+** This simple code is not intended to be fast, but to
+** provide power of 2 aligned memory allocations.
+** Since the SCRIPTS processor only supplies 8 bit
+** arithmetic, this allocator allows simple and fast
+** address calculations from the SCRIPTS code.
+** In addition, cache line alignment is guaranteed for
+** power of 2 cache line size.
+** Enhanced in linux-2.3.44 to provide a memory pool
+** per pcidev to support dynamic dma mapping. (I would
+** have preferred a real bus abstraction, btw).
+**
+**==========================================================
+*/
+
+#define MEMO_SHIFT 4 /* 16 bytes minimum memory chunk */
+#if PAGE_SIZE >= 8192
+#define MEMO_PAGE_ORDER 0 /* 1 PAGE maximum */
+#else
+#define MEMO_PAGE_ORDER 1 /* 2 PAGES maximum */
+#endif
+#define MEMO_FREE_UNUSED /* Free unused pages immediately */
+#define MEMO_WARN 1
+#define MEMO_GFP_FLAGS GFP_ATOMIC
+#define MEMO_CLUSTER_SHIFT (PAGE_SHIFT+MEMO_PAGE_ORDER)
+#define MEMO_CLUSTER_SIZE (1UL << MEMO_CLUSTER_SHIFT)
+#define MEMO_CLUSTER_MASK (MEMO_CLUSTER_SIZE-1)
+
+typedef u_long m_addr_t; /* Enough bits to bit-hack addresses */
+typedef struct device *m_bush_t; /* Something that addresses DMAable */
+
+typedef struct m_link { /* Link between free memory chunks */
+ struct m_link *next;
+} m_link_s;
+
+typedef struct m_vtob { /* Virtual to Bus address translation */
+ struct m_vtob *next;
+ m_addr_t vaddr;
+ m_addr_t baddr;
+} m_vtob_s;
+#define VTOB_HASH_SHIFT 5
+#define VTOB_HASH_SIZE (1UL << VTOB_HASH_SHIFT)
+#define VTOB_HASH_MASK (VTOB_HASH_SIZE-1)
+#define VTOB_HASH_CODE(m) \
+ ((((m_addr_t) (m)) >> MEMO_CLUSTER_SHIFT) & VTOB_HASH_MASK)
+
+typedef struct m_pool { /* Memory pool of a given kind */
+ m_bush_t bush;
+ m_addr_t (*getp)(struct m_pool *);
+ void (*freep)(struct m_pool *, m_addr_t);
+ int nump;
+ m_vtob_s *(vtob[VTOB_HASH_SIZE]);
+ struct m_pool *next;
+ struct m_link h[PAGE_SHIFT-MEMO_SHIFT+MEMO_PAGE_ORDER+1];
+} m_pool_s;
+
+static void *___m_alloc(m_pool_s *mp, int size)
+{
+ int i = 0;
+ int s = (1 << MEMO_SHIFT);
+ int j;
+ m_addr_t a;
+ m_link_s *h = mp->h;
+
+ if (size > (PAGE_SIZE << MEMO_PAGE_ORDER))
+ return NULL;
+
+ while (size > s) {
+ s <<= 1;
+ ++i;
+ }
+
+ j = i;
+ while (!h[j].next) {
+ if (s == (PAGE_SIZE << MEMO_PAGE_ORDER)) {
+ h[j].next = (m_link_s *)mp->getp(mp);
+ if (h[j].next)
+ h[j].next->next = NULL;
+ break;
+ }
+ ++j;
+ s <<= 1;
+ }
+ a = (m_addr_t) h[j].next;
+ if (a) {
+ h[j].next = h[j].next->next;
+ while (j > i) {
+ j -= 1;
+ s >>= 1;
+ h[j].next = (m_link_s *) (a+s);
+ h[j].next->next = NULL;
+ }
+ }
+#ifdef DEBUG
+ printk("___m_alloc(%d) = %p\n", size, (void *) a);
+#endif
+ return (void *) a;
+}
+
+static void ___m_free(m_pool_s *mp, void *ptr, int size)
+{
+ int i = 0;
+ int s = (1 << MEMO_SHIFT);
+ m_link_s *q;
+ m_addr_t a, b;
+ m_link_s *h = mp->h;
+
+#ifdef DEBUG
+ printk("___m_free(%p, %d)\n", ptr, size);
+#endif
+
+ if (size > (PAGE_SIZE << MEMO_PAGE_ORDER))
+ return;
+
+ while (size > s) {
+ s <<= 1;
+ ++i;
+ }
+
+ a = (m_addr_t) ptr;
+
+ while (1) {
+#ifdef MEMO_FREE_UNUSED
+ if (s == (PAGE_SIZE << MEMO_PAGE_ORDER)) {
+ mp->freep(mp, a);
+ break;
+ }
+#endif
+ b = a ^ s;
+ q = &h[i];
+ while (q->next && q->next != (m_link_s *) b) {
+ q = q->next;
+ }
+ if (!q->next) {
+ ((m_link_s *) a)->next = h[i].next;
+ h[i].next = (m_link_s *) a;
+ break;
+ }
+ q->next = q->next->next;
+ a = a & b;
+ s <<= 1;
+ ++i;
+ }
+}
+
+static DEFINE_SPINLOCK(ncr53c8xx_lock);
+
+static void *__m_calloc2(m_pool_s *mp, int size, char *name, int uflags)
+{
+ void *p;
+
+ p = ___m_alloc(mp, size);
+
+ if (DEBUG_FLAGS & DEBUG_ALLOC)
+ printk ("new %-10s[%4d] @%p.\n", name, size, p);
+
+ if (p)
+ memset(p, 0, size);
+ else if (uflags & MEMO_WARN)
+ printk (NAME53C8XX ": failed to allocate %s[%d]\n", name, size);
+
+ return p;
+}
+
+#define __m_calloc(mp, s, n) __m_calloc2(mp, s, n, MEMO_WARN)
+
+static void __m_free(m_pool_s *mp, void *ptr, int size, char *name)
+{
+ if (DEBUG_FLAGS & DEBUG_ALLOC)
+ printk ("freeing %-10s[%4d] @%p.\n", name, size, ptr);
+
+ ___m_free(mp, ptr, size);
+
+}
+
+/*
+ * With pci bus iommu support, we use a default pool of unmapped memory
+ * for memory we donnot need to DMA from/to and one pool per pcidev for
+ * memory accessed by the PCI chip. `mp0' is the default not DMAable pool.
+ */
+
+static m_addr_t ___mp0_getp(m_pool_s *mp)
+{
+ m_addr_t m = __get_free_pages(MEMO_GFP_FLAGS, MEMO_PAGE_ORDER);
+ if (m)
+ ++mp->nump;
+ return m;
+}
+
+static void ___mp0_freep(m_pool_s *mp, m_addr_t m)
+{
+ free_pages(m, MEMO_PAGE_ORDER);
+ --mp->nump;
+}
+
+static m_pool_s mp0 = {NULL, ___mp0_getp, ___mp0_freep};
+
+/*
+ * DMAable pools.
+ */
+
+/*
+ * With pci bus iommu support, we maintain one pool per pcidev and a
+ * hashed reverse table for virtual to bus physical address translations.
+ */
+static m_addr_t ___dma_getp(m_pool_s *mp)
+{
+ m_addr_t vp;
+ m_vtob_s *vbp;
+
+ vbp = __m_calloc(&mp0, sizeof(*vbp), "VTOB");
+ if (vbp) {
+ dma_addr_t daddr;
+ vp = (m_addr_t) dma_alloc_coherent(mp->bush,
+ PAGE_SIZE<<MEMO_PAGE_ORDER,
+ &daddr, GFP_ATOMIC);
+ if (vp) {
+ int hc = VTOB_HASH_CODE(vp);
+ vbp->vaddr = vp;
+ vbp->baddr = daddr;
+ vbp->next = mp->vtob[hc];
+ mp->vtob[hc] = vbp;
+ ++mp->nump;
+ return vp;
+ }
+ }
+ if (vbp)
+ __m_free(&mp0, vbp, sizeof(*vbp), "VTOB");
+ return 0;
+}
+
+static void ___dma_freep(m_pool_s *mp, m_addr_t m)
+{
+ m_vtob_s **vbpp, *vbp;
+ int hc = VTOB_HASH_CODE(m);
+
+ vbpp = &mp->vtob[hc];
+ while (*vbpp && (*vbpp)->vaddr != m)
+ vbpp = &(*vbpp)->next;
+ if (*vbpp) {
+ vbp = *vbpp;
+ *vbpp = (*vbpp)->next;
+ dma_free_coherent(mp->bush, PAGE_SIZE<<MEMO_PAGE_ORDER,
+ (void *)vbp->vaddr, (dma_addr_t)vbp->baddr);
+ __m_free(&mp0, vbp, sizeof(*vbp), "VTOB");
+ --mp->nump;
+ }
+}
+
+static inline m_pool_s *___get_dma_pool(m_bush_t bush)
+{
+ m_pool_s *mp;
+ for (mp = mp0.next; mp && mp->bush != bush; mp = mp->next);
+ return mp;
+}
+
+static m_pool_s *___cre_dma_pool(m_bush_t bush)
+{
+ m_pool_s *mp;
+ mp = __m_calloc(&mp0, sizeof(*mp), "MPOOL");
+ if (mp) {
+ memset(mp, 0, sizeof(*mp));
+ mp->bush = bush;
+ mp->getp = ___dma_getp;
+ mp->freep = ___dma_freep;
+ mp->next = mp0.next;
+ mp0.next = mp;
+ }
+ return mp;
+}
+
+static void ___del_dma_pool(m_pool_s *p)
+{
+ struct m_pool **pp = &mp0.next;
+
+ while (*pp && *pp != p)
+ pp = &(*pp)->next;
+ if (*pp) {
+ *pp = (*pp)->next;
+ __m_free(&mp0, p, sizeof(*p), "MPOOL");
+ }
+}
+
+static void *__m_calloc_dma(m_bush_t bush, int size, char *name)
+{
+ u_long flags;
+ struct m_pool *mp;
+ void *m = NULL;
+
+ spin_lock_irqsave(&ncr53c8xx_lock, flags);
+ mp = ___get_dma_pool(bush);
+ if (!mp)
+ mp = ___cre_dma_pool(bush);
+ if (mp)
+ m = __m_calloc(mp, size, name);
+ if (mp && !mp->nump)
+ ___del_dma_pool(mp);
+ spin_unlock_irqrestore(&ncr53c8xx_lock, flags);
+
+ return m;
+}
+
+static void __m_free_dma(m_bush_t bush, void *m, int size, char *name)
+{
+ u_long flags;
+ struct m_pool *mp;
+
+ spin_lock_irqsave(&ncr53c8xx_lock, flags);
+ mp = ___get_dma_pool(bush);
+ if (mp)
+ __m_free(mp, m, size, name);
+ if (mp && !mp->nump)
+ ___del_dma_pool(mp);
+ spin_unlock_irqrestore(&ncr53c8xx_lock, flags);
+}
+
+static m_addr_t __vtobus(m_bush_t bush, void *m)
+{
+ u_long flags;
+ m_pool_s *mp;
+ int hc = VTOB_HASH_CODE(m);
+ m_vtob_s *vp = NULL;
+ m_addr_t a = ((m_addr_t) m) & ~MEMO_CLUSTER_MASK;
+
+ spin_lock_irqsave(&ncr53c8xx_lock, flags);
+ mp = ___get_dma_pool(bush);
+ if (mp) {
+ vp = mp->vtob[hc];
+ while (vp && (m_addr_t) vp->vaddr != a)
+ vp = vp->next;
+ }
+ spin_unlock_irqrestore(&ncr53c8xx_lock, flags);
+ return vp ? vp->baddr + (((m_addr_t) m) - a) : 0;
+}
+
+#define _m_calloc_dma(np, s, n) __m_calloc_dma(np->dev, s, n)
+#define _m_free_dma(np, p, s, n) __m_free_dma(np->dev, p, s, n)
+#define m_calloc_dma(s, n) _m_calloc_dma(np, s, n)
+#define m_free_dma(p, s, n) _m_free_dma(np, p, s, n)
+#define _vtobus(np, p) __vtobus(np->dev, p)
+#define vtobus(p) _vtobus(np, p)
+
+/*
+ * Deal with DMA mapping/unmapping.
+ */
+
+/* To keep track of the dma mapping (sg/single) that has been set */
+#define __data_mapped SCp.phase
+#define __data_mapping SCp.have_data_in
+
+static void __unmap_scsi_data(struct device *dev, struct scsi_cmnd *cmd)
+{
+ switch(cmd->__data_mapped) {
+ case 2:
+ scsi_dma_unmap(cmd);
+ break;
+ }
+ cmd->__data_mapped = 0;
+}
+
+static int __map_scsi_sg_data(struct device *dev, struct scsi_cmnd *cmd)
+{
+ int use_sg;
+
+ use_sg = scsi_dma_map(cmd);
+ if (!use_sg)
+ return 0;
+
+ cmd->__data_mapped = 2;
+ cmd->__data_mapping = use_sg;
+
+ return use_sg;
+}
+
+#define unmap_scsi_data(np, cmd) __unmap_scsi_data(np->dev, cmd)
+#define map_scsi_sg_data(np, cmd) __map_scsi_sg_data(np->dev, cmd)
+
+/*==========================================================
+**
+** Driver setup.
+**
+** This structure is initialized from linux config
+** options. It can be overridden at boot-up by the boot
+** command line.
+**
+**==========================================================
+*/
+static struct ncr_driver_setup
+ driver_setup = SCSI_NCR_DRIVER_SETUP;
+
+#ifndef MODULE
+#ifdef SCSI_NCR_BOOT_COMMAND_LINE_SUPPORT
+static struct ncr_driver_setup
+ driver_safe_setup __initdata = SCSI_NCR_DRIVER_SAFE_SETUP;
+#endif
+#endif /* !MODULE */
+
+#define initverbose (driver_setup.verbose)
+#define bootverbose (np->verbose)
+
+
+/*===================================================================
+**
+** Driver setup from the boot command line
+**
+**===================================================================
+*/
+
+#ifdef MODULE
+#define ARG_SEP ' '
+#else
+#define ARG_SEP ','
+#endif
+
+#define OPT_TAGS 1
+#define OPT_MASTER_PARITY 2
+#define OPT_SCSI_PARITY 3
+#define OPT_DISCONNECTION 4
+#define OPT_SPECIAL_FEATURES 5
+#define OPT_UNUSED_1 6
+#define OPT_FORCE_SYNC_NEGO 7
+#define OPT_REVERSE_PROBE 8
+#define OPT_DEFAULT_SYNC 9
+#define OPT_VERBOSE 10
+#define OPT_DEBUG 11
+#define OPT_BURST_MAX 12
+#define OPT_LED_PIN 13
+#define OPT_MAX_WIDE 14
+#define OPT_SETTLE_DELAY 15
+#define OPT_DIFF_SUPPORT 16
+#define OPT_IRQM 17
+#define OPT_PCI_FIX_UP 18
+#define OPT_BUS_CHECK 19
+#define OPT_OPTIMIZE 20
+#define OPT_RECOVERY 21
+#define OPT_SAFE_SETUP 22
+#define OPT_USE_NVRAM 23
+#define OPT_EXCLUDE 24
+#define OPT_HOST_ID 25
+
+#ifdef SCSI_NCR_IARB_SUPPORT
+#define OPT_IARB 26
+#endif
+
+#ifdef MODULE
+#define ARG_SEP ' '
+#else
+#define ARG_SEP ','
+#endif
+
+#ifndef MODULE
+static char setup_token[] __initdata =
+ "tags:" "mpar:"
+ "spar:" "disc:"
+ "specf:" "ultra:"
+ "fsn:" "revprob:"
+ "sync:" "verb:"
+ "debug:" "burst:"
+ "led:" "wide:"
+ "settle:" "diff:"
+ "irqm:" "pcifix:"
+ "buschk:" "optim:"
+ "recovery:"
+ "safe:" "nvram:"
+ "excl:" "hostid:"
+#ifdef SCSI_NCR_IARB_SUPPORT
+ "iarb:"
+#endif
+ ; /* DONNOT REMOVE THIS ';' */
+
+static int __init get_setup_token(char *p)
+{
+ char *cur = setup_token;
+ char *pc;
+ int i = 0;
+
+ while (cur != NULL && (pc = strchr(cur, ':')) != NULL) {
+ ++pc;
+ ++i;
+ if (!strncmp(p, cur, pc - cur))
+ return i;
+ cur = pc;
+ }
+ return 0;
+}
+
+static int __init sym53c8xx__setup(char *str)
+{
+#ifdef SCSI_NCR_BOOT_COMMAND_LINE_SUPPORT
+ char *cur = str;
+ char *pc, *pv;
+ int i, val, c;
+ int xi = 0;
+
+ while (cur != NULL && (pc = strchr(cur, ':')) != NULL) {
+ char *pe;
+
+ val = 0;
+ pv = pc;
+ c = *++pv;
+
+ if (c == 'n')
+ val = 0;
+ else if (c == 'y')
+ val = 1;
+ else
+ val = (int) simple_strtoul(pv, &pe, 0);
+
+ switch (get_setup_token(cur)) {
+ case OPT_TAGS:
+ driver_setup.default_tags = val;
+ if (pe && *pe == '/') {
+ i = 0;
+ while (*pe && *pe != ARG_SEP &&
+ i < sizeof(driver_setup.tag_ctrl)-1) {
+ driver_setup.tag_ctrl[i++] = *pe++;
+ }
+ driver_setup.tag_ctrl[i] = '\0';
+ }
+ break;
+ case OPT_MASTER_PARITY:
+ driver_setup.master_parity = val;
+ break;
+ case OPT_SCSI_PARITY:
+ driver_setup.scsi_parity = val;
+ break;
+ case OPT_DISCONNECTION:
+ driver_setup.disconnection = val;
+ break;
+ case OPT_SPECIAL_FEATURES:
+ driver_setup.special_features = val;
+ break;
+ case OPT_FORCE_SYNC_NEGO:
+ driver_setup.force_sync_nego = val;
+ break;
+ case OPT_REVERSE_PROBE:
+ driver_setup.reverse_probe = val;
+ break;
+ case OPT_DEFAULT_SYNC:
+ driver_setup.default_sync = val;
+ break;
+ case OPT_VERBOSE:
+ driver_setup.verbose = val;
+ break;
+ case OPT_DEBUG:
+ driver_setup.debug = val;
+ break;
+ case OPT_BURST_MAX:
+ driver_setup.burst_max = val;
+ break;
+ case OPT_LED_PIN:
+ driver_setup.led_pin = val;
+ break;
+ case OPT_MAX_WIDE:
+ driver_setup.max_wide = val? 1:0;
+ break;
+ case OPT_SETTLE_DELAY:
+ driver_setup.settle_delay = val;
+ break;
+ case OPT_DIFF_SUPPORT:
+ driver_setup.diff_support = val;
+ break;
+ case OPT_IRQM:
+ driver_setup.irqm = val;
+ break;
+ case OPT_PCI_FIX_UP:
+ driver_setup.pci_fix_up = val;
+ break;
+ case OPT_BUS_CHECK:
+ driver_setup.bus_check = val;
+ break;
+ case OPT_OPTIMIZE:
+ driver_setup.optimize = val;
+ break;
+ case OPT_RECOVERY:
+ driver_setup.recovery = val;
+ break;
+ case OPT_USE_NVRAM:
+ driver_setup.use_nvram = val;
+ break;
+ case OPT_SAFE_SETUP:
+ memcpy(&driver_setup, &driver_safe_setup,
+ sizeof(driver_setup));
+ break;
+ case OPT_EXCLUDE:
+ if (xi < SCSI_NCR_MAX_EXCLUDES)
+ driver_setup.excludes[xi++] = val;
+ break;
+ case OPT_HOST_ID:
+ driver_setup.host_id = val;
+ break;
+#ifdef SCSI_NCR_IARB_SUPPORT
+ case OPT_IARB:
+ driver_setup.iarb = val;
+ break;
+#endif
+ default:
+ printk("sym53c8xx_setup: unexpected boot option '%.*s' ignored\n", (int)(pc-cur+1), cur);
+ break;
+ }
+
+ if ((cur = strchr(cur, ARG_SEP)) != NULL)
+ ++cur;
+ }
+#endif /* SCSI_NCR_BOOT_COMMAND_LINE_SUPPORT */
+ return 1;
+}
+#endif /* !MODULE */
+
+/*===================================================================
+**
+** Get device queue depth from boot command line.
+**
+**===================================================================
+*/
+#define DEF_DEPTH (driver_setup.default_tags)
+#define ALL_TARGETS -2
+#define NO_TARGET -1
+#define ALL_LUNS -2
+#define NO_LUN -1
+
+static int device_queue_depth(int unit, int target, int lun)
+{
+ int c, h, t, u, v;
+ char *p = driver_setup.tag_ctrl;
+ char *ep;
+
+ h = -1;
+ t = NO_TARGET;
+ u = NO_LUN;
+ while ((c = *p++) != 0) {
+ v = simple_strtoul(p, &ep, 0);
+ switch(c) {
+ case '/':
+ ++h;
+ t = ALL_TARGETS;
+ u = ALL_LUNS;
+ break;
+ case 't':
+ if (t != target)
+ t = (target == v) ? v : NO_TARGET;
+ u = ALL_LUNS;
+ break;
+ case 'u':
+ if (u != lun)
+ u = (lun == v) ? v : NO_LUN;
+ break;
+ case 'q':
+ if (h == unit &&
+ (t == ALL_TARGETS || t == target) &&
+ (u == ALL_LUNS || u == lun))
+ return v;
+ break;
+ case '-':
+ t = ALL_TARGETS;
+ u = ALL_LUNS;
+ break;
+ default:
+ break;
+ }
+ p = ep;
+ }
+ return DEF_DEPTH;
+}
+
+
+/*==========================================================
+**
+** The CCB done queue uses an array of CCB virtual
+** addresses. Empty entries are flagged using the bogus
+** virtual address 0xffffffff.
+**
+** Since PCI ensures that only aligned DWORDs are accessed
+** atomically, 64 bit little-endian architecture requires
+** to test the high order DWORD of the entry to determine
+** if it is empty or valid.
+**
+** BTW, I will make things differently as soon as I will
+** have a better idea, but this is simple and should work.
+**
+**==========================================================
+*/
+
+#define SCSI_NCR_CCB_DONE_SUPPORT
+#ifdef SCSI_NCR_CCB_DONE_SUPPORT
+
+#define MAX_DONE 24
+#define CCB_DONE_EMPTY 0xffffffffUL
+
+/* All 32 bit architectures */
+#if BITS_PER_LONG == 32
+#define CCB_DONE_VALID(cp) (((u_long) cp) != CCB_DONE_EMPTY)
+
+/* All > 32 bit (64 bit) architectures regardless endian-ness */
+#else
+#define CCB_DONE_VALID(cp) \
+ ((((u_long) cp) & 0xffffffff00000000ul) && \
+ (((u_long) cp) & 0xfffffffful) != CCB_DONE_EMPTY)
+#endif
+
+#endif /* SCSI_NCR_CCB_DONE_SUPPORT */
+
+/*==========================================================
+**
+** Configuration and Debugging
+**
+**==========================================================
+*/
+
+/*
+** SCSI address of this device.
+** The boot routines should have set it.
+** If not, use this.
+*/
+
+#ifndef SCSI_NCR_MYADDR
+#define SCSI_NCR_MYADDR (7)
+#endif
+
+/*
+** The maximum number of tags per logic unit.
+** Used only for disk devices that support tags.
+*/
+
+#ifndef SCSI_NCR_MAX_TAGS
+#define SCSI_NCR_MAX_TAGS (8)
+#endif
+
+/*
+** TAGS are actually limited to 64 tags/lun.
+** We need to deal with power of 2, for alignment constraints.
+*/
+#if SCSI_NCR_MAX_TAGS > 64
+#define MAX_TAGS (64)
+#else
+#define MAX_TAGS SCSI_NCR_MAX_TAGS
+#endif
+
+#define NO_TAG (255)
+
+/*
+** Choose appropriate type for tag bitmap.
+*/
+#if MAX_TAGS > 32
+typedef u64 tagmap_t;
+#else
+typedef u32 tagmap_t;
+#endif
+
+/*
+** Number of targets supported by the driver.
+** n permits target numbers 0..n-1.
+** Default is 16, meaning targets #0..#15.
+** #7 .. is myself.
+*/
+
+#ifdef SCSI_NCR_MAX_TARGET
+#define MAX_TARGET (SCSI_NCR_MAX_TARGET)
+#else
+#define MAX_TARGET (16)
+#endif
+
+/*
+** Number of logic units supported by the driver.
+** n enables logic unit numbers 0..n-1.
+** The common SCSI devices require only
+** one lun, so take 1 as the default.
+*/
+
+#ifdef SCSI_NCR_MAX_LUN
+#define MAX_LUN SCSI_NCR_MAX_LUN
+#else
+#define MAX_LUN (1)
+#endif
+
+/*
+** Asynchronous pre-scaler (ns). Shall be 40
+*/
+
+#ifndef SCSI_NCR_MIN_ASYNC
+#define SCSI_NCR_MIN_ASYNC (40)
+#endif
+
+/*
+** The maximum number of jobs scheduled for starting.
+** There should be one slot per target, and one slot
+** for each tag of each target in use.
+** The calculation below is actually quite silly ...
+*/
+
+#ifdef SCSI_NCR_CAN_QUEUE
+#define MAX_START (SCSI_NCR_CAN_QUEUE + 4)
+#else
+#define MAX_START (MAX_TARGET + 7 * MAX_TAGS)
+#endif
+
+/*
+** We limit the max number of pending IO to 250.
+** since we donnot want to allocate more than 1
+** PAGE for 'scripth'.
+*/
+#if MAX_START > 250
+#undef MAX_START
+#define MAX_START 250
+#endif
+
+/*
+** The maximum number of segments a transfer is split into.
+** We support up to 127 segments for both read and write.
+** The data scripts are broken into 2 sub-scripts.
+** 80 (MAX_SCATTERL) segments are moved from a sub-script
+** in on-chip RAM. This makes data transfers shorter than
+** 80k (assuming 1k fs) as fast as possible.
+*/
+
+#define MAX_SCATTER (SCSI_NCR_MAX_SCATTER)
+
+#if (MAX_SCATTER > 80)
+#define MAX_SCATTERL 80
+#define MAX_SCATTERH (MAX_SCATTER - MAX_SCATTERL)
+#else
+#define MAX_SCATTERL (MAX_SCATTER-1)
+#define MAX_SCATTERH 1
+#endif
+
+/*
+** other
+*/
+
+#define NCR_SNOOP_TIMEOUT (1000000)
+
+/*
+** Other definitions
+*/
+
+#define ScsiResult(host_code, scsi_code) (((host_code) << 16) + ((scsi_code) & 0x7f))
+
+#define initverbose (driver_setup.verbose)
+#define bootverbose (np->verbose)
+
+/*==========================================================
+**
+** Command control block states.
+**
+**==========================================================
+*/
+
+#define HS_IDLE (0)
+#define HS_BUSY (1)
+#define HS_NEGOTIATE (2) /* sync/wide data transfer*/
+#define HS_DISCONNECT (3) /* Disconnected by target */
+
+#define HS_DONEMASK (0x80)
+#define HS_COMPLETE (4|HS_DONEMASK)
+#define HS_SEL_TIMEOUT (5|HS_DONEMASK) /* Selection timeout */
+#define HS_RESET (6|HS_DONEMASK) /* SCSI reset */
+#define HS_ABORTED (7|HS_DONEMASK) /* Transfer aborted */
+#define HS_TIMEOUT (8|HS_DONEMASK) /* Software timeout */
+#define HS_FAIL (9|HS_DONEMASK) /* SCSI or PCI bus errors */
+#define HS_UNEXPECTED (10|HS_DONEMASK)/* Unexpected disconnect */
+
+/*
+** Invalid host status values used by the SCRIPTS processor
+** when the nexus is not fully identified.
+** Shall never appear in a CCB.
+*/
+
+#define HS_INVALMASK (0x40)
+#define HS_SELECTING (0|HS_INVALMASK)
+#define HS_IN_RESELECT (1|HS_INVALMASK)
+#define HS_STARTING (2|HS_INVALMASK)
+
+/*
+** Flags set by the SCRIPT processor for commands
+** that have been skipped.
+*/
+#define HS_SKIPMASK (0x20)
+
+/*==========================================================
+**
+** Software Interrupt Codes
+**
+**==========================================================
+*/
+
+#define SIR_BAD_STATUS (1)
+#define SIR_XXXXXXXXXX (2)
+#define SIR_NEGO_SYNC (3)
+#define SIR_NEGO_WIDE (4)
+#define SIR_NEGO_FAILED (5)
+#define SIR_NEGO_PROTO (6)
+#define SIR_REJECT_RECEIVED (7)
+#define SIR_REJECT_SENT (8)
+#define SIR_IGN_RESIDUE (9)
+#define SIR_MISSING_SAVE (10)
+#define SIR_RESEL_NO_MSG_IN (11)
+#define SIR_RESEL_NO_IDENTIFY (12)
+#define SIR_RESEL_BAD_LUN (13)
+#define SIR_RESEL_BAD_TARGET (14)
+#define SIR_RESEL_BAD_I_T_L (15)
+#define SIR_RESEL_BAD_I_T_L_Q (16)
+#define SIR_DONE_OVERFLOW (17)
+#define SIR_INTFLY (18)
+#define SIR_MAX (18)
+
+/*==========================================================
+**
+** Extended error codes.
+** xerr_status field of struct ccb.
+**
+**==========================================================
+*/
+
+#define XE_OK (0)
+#define XE_EXTRA_DATA (1) /* unexpected data phase */
+#define XE_BAD_PHASE (2) /* illegal phase (4/5) */
+
+/*==========================================================
+**
+** Negotiation status.
+** nego_status field of struct ccb.
+**
+**==========================================================
+*/
+
+#define NS_NOCHANGE (0)
+#define NS_SYNC (1)
+#define NS_WIDE (2)
+#define NS_PPR (4)
+
+/*==========================================================
+**
+** Misc.
+**
+**==========================================================
+*/
+
+#define CCB_MAGIC (0xf2691ad2)
+
+/*==========================================================
+**
+** Declaration of structs.
+**
+**==========================================================
+*/
+
+static struct scsi_transport_template *ncr53c8xx_transport_template = NULL;
+
+struct tcb;
+struct lcb;
+struct ccb;
+struct ncb;
+struct script;
+
+struct link {
+ ncrcmd l_cmd;
+ ncrcmd l_paddr;
+};
+
+struct usrcmd {
+ u_long target;
+ u_long lun;
+ u_long data;
+ u_long cmd;
+};
+
+#define UC_SETSYNC 10
+#define UC_SETTAGS 11
+#define UC_SETDEBUG 12
+#define UC_SETORDER 13
+#define UC_SETWIDE 14
+#define UC_SETFLAG 15
+#define UC_SETVERBOSE 17
+
+#define UF_TRACE (0x01)
+#define UF_NODISC (0x02)
+#define UF_NOSCAN (0x04)
+
+/*========================================================================
+**
+** Declaration of structs: target control block
+**
+**========================================================================
+*/
+struct tcb {
+ /*----------------------------------------------------------------
+ ** During reselection the ncr jumps to this point with SFBR
+ ** set to the encoded target number with bit 7 set.
+ ** if it's not this target, jump to the next.
+ **
+ ** JUMP IF (SFBR != #target#), @(next tcb)
+ **----------------------------------------------------------------
+ */
+ struct link jump_tcb;
+
+ /*----------------------------------------------------------------
+ ** Load the actual values for the sxfer and the scntl3
+ ** register (sync/wide mode).
+ **
+ ** SCR_COPY (1), @(sval field of this tcb), @(sxfer register)
+ ** SCR_COPY (1), @(wval field of this tcb), @(scntl3 register)
+ **----------------------------------------------------------------
+ */
+ ncrcmd getscr[6];
+
+ /*----------------------------------------------------------------
+ ** Get the IDENTIFY message and load the LUN to SFBR.
+ **
+ ** CALL, <RESEL_LUN>
+ **----------------------------------------------------------------
+ */
+ struct link call_lun;
+
+ /*----------------------------------------------------------------
+ ** Now look for the right lun.
+ **
+ ** For i = 0 to 3
+ ** SCR_JUMP ^ IFTRUE(MASK(i, 3)), @(first lcb mod. i)
+ **
+ ** Recent chips will prefetch the 4 JUMPS using only 1 burst.
+ ** It is kind of hashcoding.
+ **----------------------------------------------------------------
+ */
+ struct link jump_lcb[4]; /* JUMPs for reselection */
+ struct lcb * lp[MAX_LUN]; /* The lcb's of this tcb */
+
+ /*----------------------------------------------------------------
+ ** Pointer to the ccb used for negotiation.
+ ** Prevent from starting a negotiation for all queued commands
+ ** when tagged command queuing is enabled.
+ **----------------------------------------------------------------
+ */
+ struct ccb * nego_cp;
+
+ /*----------------------------------------------------------------
+ ** statistical data
+ **----------------------------------------------------------------
+ */
+ u_long transfers;
+ u_long bytes;
+
+ /*----------------------------------------------------------------
+ ** negotiation of wide and synch transfer and device quirks.
+ **----------------------------------------------------------------
+ */
+#ifdef SCSI_NCR_BIG_ENDIAN
+/*0*/ u16 period;
+/*2*/ u_char sval;
+/*3*/ u_char minsync;
+/*0*/ u_char wval;
+/*1*/ u_char widedone;
+/*2*/ u_char quirks;
+/*3*/ u_char maxoffs;
+#else
+/*0*/ u_char minsync;
+/*1*/ u_char sval;
+/*2*/ u16 period;
+/*0*/ u_char maxoffs;
+/*1*/ u_char quirks;
+/*2*/ u_char widedone;
+/*3*/ u_char wval;
+#endif
+
+ /* User settable limits and options. */
+ u_char usrsync;
+ u_char usrwide;
+ u_char usrtags;
+ u_char usrflag;
+ struct scsi_target *starget;
+};
+
+/*========================================================================
+**
+** Declaration of structs: lun control block
+**
+**========================================================================
+*/
+struct lcb {
+ /*----------------------------------------------------------------
+ ** During reselection the ncr jumps to this point
+ ** with SFBR set to the "Identify" message.
+ ** if it's not this lun, jump to the next.
+ **
+ ** JUMP IF (SFBR != #lun#), @(next lcb of this target)
+ **
+ ** It is this lun. Load TEMP with the nexus jumps table
+ ** address and jump to RESEL_TAG (or RESEL_NOTAG).
+ **
+ ** SCR_COPY (4), p_jump_ccb, TEMP,
+ ** SCR_JUMP, <RESEL_TAG>
+ **----------------------------------------------------------------
+ */
+ struct link jump_lcb;
+ ncrcmd load_jump_ccb[3];
+ struct link jump_tag;
+ ncrcmd p_jump_ccb; /* Jump table bus address */
+
+ /*----------------------------------------------------------------
+ ** Jump table used by the script processor to directly jump
+ ** to the CCB corresponding to the reselected nexus.
+ ** Address is allocated on 256 bytes boundary in order to
+ ** allow 8 bit calculation of the tag jump entry for up to
+ ** 64 possible tags.
+ **----------------------------------------------------------------
+ */
+ u32 jump_ccb_0; /* Default table if no tags */
+ u32 *jump_ccb; /* Virtual address */
+
+ /*----------------------------------------------------------------
+ ** CCB queue management.
+ **----------------------------------------------------------------
+ */
+ struct list_head free_ccbq; /* Queue of available CCBs */
+ struct list_head busy_ccbq; /* Queue of busy CCBs */
+ struct list_head wait_ccbq; /* Queue of waiting for IO CCBs */
+ struct list_head skip_ccbq; /* Queue of skipped CCBs */
+ u_char actccbs; /* Number of allocated CCBs */
+ u_char busyccbs; /* CCBs busy for this lun */
+ u_char queuedccbs; /* CCBs queued to the controller*/
+ u_char queuedepth; /* Queue depth for this lun */
+ u_char scdev_depth; /* SCSI device queue depth */
+ u_char maxnxs; /* Max possible nexuses */
+
+ /*----------------------------------------------------------------
+ ** Control of tagged command queuing.
+ ** Tags allocation is performed using a circular buffer.
+ ** This avoids using a loop for tag allocation.
+ **----------------------------------------------------------------
+ */
+ u_char ia_tag; /* Allocation index */
+ u_char if_tag; /* Freeing index */
+ u_char cb_tags[MAX_TAGS]; /* Circular tags buffer */
+ u_char usetags; /* Command queuing is active */
+ u_char maxtags; /* Max nr of tags asked by user */
+ u_char numtags; /* Current number of tags */
+
+ /*----------------------------------------------------------------
+ ** QUEUE FULL control and ORDERED tag control.
+ **----------------------------------------------------------------
+ */
+ /*----------------------------------------------------------------
+ ** QUEUE FULL and ORDERED tag control.
+ **----------------------------------------------------------------
+ */
+ u16 num_good; /* Nr of GOOD since QUEUE FULL */
+ tagmap_t tags_umap; /* Used tags bitmap */
+ tagmap_t tags_smap; /* Tags in use at 'tag_stime' */
+ u_long tags_stime; /* Last time we set smap=umap */
+ struct ccb * held_ccb; /* CCB held for QUEUE FULL */
+};
+
+/*========================================================================
+**
+** Declaration of structs: the launch script.
+**
+**========================================================================
+**
+** It is part of the CCB and is called by the scripts processor to
+** start or restart the data structure (nexus).
+** This 6 DWORDs mini script makes use of prefetching.
+**
+**------------------------------------------------------------------------
+*/
+struct launch {
+ /*----------------------------------------------------------------
+ ** SCR_COPY(4), @(p_phys), @(dsa register)
+ ** SCR_JUMP, @(scheduler_point)
+ **----------------------------------------------------------------
+ */
+ ncrcmd setup_dsa[3]; /* Copy 'phys' address to dsa */
+ struct link schedule; /* Jump to scheduler point */
+ ncrcmd p_phys; /* 'phys' header bus address */
+};
+
+/*========================================================================
+**
+** Declaration of structs: global HEADER.
+**
+**========================================================================
+**
+** This substructure is copied from the ccb to a global address after
+** selection (or reselection) and copied back before disconnect.
+**
+** These fields are accessible to the script processor.
+**
+**------------------------------------------------------------------------
+*/
+
+struct head {
+ /*----------------------------------------------------------------
+ ** Saved data pointer.
+ ** Points to the position in the script responsible for the
+ ** actual transfer transfer of data.
+ ** It's written after reception of a SAVE_DATA_POINTER message.
+ ** The goalpointer points after the last transfer command.
+ **----------------------------------------------------------------
+ */
+ u32 savep;
+ u32 lastp;
+ u32 goalp;
+
+ /*----------------------------------------------------------------
+ ** Alternate data pointer.
+ ** They are copied back to savep/lastp/goalp by the SCRIPTS
+ ** when the direction is unknown and the device claims data out.
+ **----------------------------------------------------------------
+ */
+ u32 wlastp;
+ u32 wgoalp;
+
+ /*----------------------------------------------------------------
+ ** The virtual address of the ccb containing this header.
+ **----------------------------------------------------------------
+ */
+ struct ccb * cp;
+
+ /*----------------------------------------------------------------
+ ** Status fields.
+ **----------------------------------------------------------------
+ */
+ u_char scr_st[4]; /* script status */
+ u_char status[4]; /* host status. must be the */
+ /* last DWORD of the header. */
+};
+
+/*
+** The status bytes are used by the host and the script processor.
+**
+** The byte corresponding to the host_status must be stored in the
+** last DWORD of the CCB header since it is used for command
+** completion (ncr_wakeup()). Doing so, we are sure that the header
+** has been entirely copied back to the CCB when the host_status is
+** seen complete by the CPU.
+**
+** The last four bytes (status[4]) are copied to the scratchb register
+** (declared as scr0..scr3 in ncr_reg.h) just after the select/reselect,
+** and copied back just after disconnecting.
+** Inside the script the XX_REG are used.
+**
+** The first four bytes (scr_st[4]) are used inside the script by
+** "COPY" commands.
+** Because source and destination must have the same alignment
+** in a DWORD, the fields HAVE to be at the chosen offsets.
+** xerr_st 0 (0x34) scratcha
+** sync_st 1 (0x05) sxfer
+** wide_st 3 (0x03) scntl3
+*/
+
+/*
+** Last four bytes (script)
+*/
+#define QU_REG scr0
+#define HS_REG scr1
+#define HS_PRT nc_scr1
+#define SS_REG scr2
+#define SS_PRT nc_scr2
+#define PS_REG scr3
+
+/*
+** Last four bytes (host)
+*/
+#ifdef SCSI_NCR_BIG_ENDIAN
+#define actualquirks phys.header.status[3]
+#define host_status phys.header.status[2]
+#define scsi_status phys.header.status[1]
+#define parity_status phys.header.status[0]
+#else
+#define actualquirks phys.header.status[0]
+#define host_status phys.header.status[1]
+#define scsi_status phys.header.status[2]
+#define parity_status phys.header.status[3]
+#endif
+
+/*
+** First four bytes (script)
+*/
+#define xerr_st header.scr_st[0]
+#define sync_st header.scr_st[1]
+#define nego_st header.scr_st[2]
+#define wide_st header.scr_st[3]
+
+/*
+** First four bytes (host)
+*/
+#define xerr_status phys.xerr_st
+#define nego_status phys.nego_st
+
+#if 0
+#define sync_status phys.sync_st
+#define wide_status phys.wide_st
+#endif
+
+/*==========================================================
+**
+** Declaration of structs: Data structure block
+**
+**==========================================================
+**
+** During execution of a ccb by the script processor,
+** the DSA (data structure address) register points
+** to this substructure of the ccb.
+** This substructure contains the header with
+** the script-processor-changeable data and
+** data blocks for the indirect move commands.
+**
+**----------------------------------------------------------
+*/
+
+struct dsb {
+
+ /*
+ ** Header.
+ */
+
+ struct head header;
+
+ /*
+ ** Table data for Script
+ */
+
+ struct scr_tblsel select;
+ struct scr_tblmove smsg ;
+ struct scr_tblmove cmd ;
+ struct scr_tblmove sense ;
+ struct scr_tblmove data[MAX_SCATTER];
+};
+
+
+/*========================================================================
+**
+** Declaration of structs: Command control block.
+**
+**========================================================================
+*/
+struct ccb {
+ /*----------------------------------------------------------------
+ ** This is the data structure which is pointed by the DSA
+ ** register when it is executed by the script processor.
+ ** It must be the first entry because it contains the header
+ ** as first entry that must be cache line aligned.
+ **----------------------------------------------------------------
+ */
+ struct dsb phys;
+
+ /*----------------------------------------------------------------
+ ** Mini-script used at CCB execution start-up.
+ ** Load the DSA with the data structure address (phys) and
+ ** jump to SELECT. Jump to CANCEL if CCB is to be canceled.
+ **----------------------------------------------------------------
+ */
+ struct launch start;
+
+ /*----------------------------------------------------------------
+ ** Mini-script used at CCB relection to restart the nexus.
+ ** Load the DSA with the data structure address (phys) and
+ ** jump to RESEL_DSA. Jump to ABORT if CCB is to be aborted.
+ **----------------------------------------------------------------
+ */
+ struct launch restart;
+
+ /*----------------------------------------------------------------
+ ** If a data transfer phase is terminated too early
+ ** (after reception of a message (i.e. DISCONNECT)),
+ ** we have to prepare a mini script to transfer
+ ** the rest of the data.
+ **----------------------------------------------------------------
+ */
+ ncrcmd patch[8];
+
+ /*----------------------------------------------------------------
+ ** The general SCSI driver provides a
+ ** pointer to a control block.
+ **----------------------------------------------------------------
+ */
+ struct scsi_cmnd *cmd; /* SCSI command */
+ u_char cdb_buf[16]; /* Copy of CDB */
+ u_char sense_buf[64];
+ int data_len; /* Total data length */
+
+ /*----------------------------------------------------------------
+ ** Message areas.
+ ** We prepare a message to be sent after selection.
+ ** We may use a second one if the command is rescheduled
+ ** due to GETCC or QFULL.
+ ** Contents are IDENTIFY and SIMPLE_TAG.
+ ** While negotiating sync or wide transfer,
+ ** a SDTR or WDTR message is appended.
+ **----------------------------------------------------------------
+ */
+ u_char scsi_smsg [8];
+ u_char scsi_smsg2[8];
+
+ /*----------------------------------------------------------------
+ ** Other fields.
+ **----------------------------------------------------------------
+ */
+ u_long p_ccb; /* BUS address of this CCB */
+ u_char sensecmd[6]; /* Sense command */
+ u_char tag; /* Tag for this transfer */
+ /* 255 means no tag */
+ u_char target;
+ u_char lun;
+ u_char queued;
+ u_char auto_sense;
+ struct ccb * link_ccb; /* Host adapter CCB chain */
+ struct list_head link_ccbq; /* Link to unit CCB queue */
+ u32 startp; /* Initial data pointer */
+ u_long magic; /* Free / busy CCB flag */
+};
+
+#define CCB_PHYS(cp,lbl) (cp->p_ccb + offsetof(struct ccb, lbl))
+
+
+/*========================================================================
+**
+** Declaration of structs: NCR device descriptor
+**
+**========================================================================
+*/
+struct ncb {
+ /*----------------------------------------------------------------
+ ** The global header.
+ ** It is accessible to both the host and the script processor.
+ ** Must be cache line size aligned (32 for x86) in order to
+ ** allow cache line bursting when it is copied to/from CCB.
+ **----------------------------------------------------------------
+ */
+ struct head header;
+
+ /*----------------------------------------------------------------
+ ** CCBs management queues.
+ **----------------------------------------------------------------
+ */
+ struct scsi_cmnd *waiting_list; /* Commands waiting for a CCB */
+ /* when lcb is not allocated. */
+ struct scsi_cmnd *done_list; /* Commands waiting for done() */
+ /* callback to be invoked. */
+ spinlock_t smp_lock; /* Lock for SMP threading */
+
+ /*----------------------------------------------------------------
+ ** Chip and controller identification.
+ **----------------------------------------------------------------
+ */
+ int unit; /* Unit number */
+ char inst_name[16]; /* ncb instance name */
+
+ /*----------------------------------------------------------------
+ ** Initial value of some IO register bits.
+ ** These values are assumed to have been set by BIOS, and may
+ ** be used for probing adapter implementation differences.
+ **----------------------------------------------------------------
+ */
+ u_char sv_scntl0, sv_scntl3, sv_dmode, sv_dcntl, sv_ctest0, sv_ctest3,
+ sv_ctest4, sv_ctest5, sv_gpcntl, sv_stest2, sv_stest4;
+
+ /*----------------------------------------------------------------
+ ** Actual initial value of IO register bits used by the
+ ** driver. They are loaded at initialisation according to
+ ** features that are to be enabled.
+ **----------------------------------------------------------------
+ */
+ u_char rv_scntl0, rv_scntl3, rv_dmode, rv_dcntl, rv_ctest0, rv_ctest3,
+ rv_ctest4, rv_ctest5, rv_stest2;
+
+ /*----------------------------------------------------------------
+ ** Targets management.
+ ** During reselection the ncr jumps to jump_tcb.
+ ** The SFBR register is loaded with the encoded target id.
+ ** For i = 0 to 3
+ ** SCR_JUMP ^ IFTRUE(MASK(i, 3)), @(next tcb mod. i)
+ **
+ ** Recent chips will prefetch the 4 JUMPS using only 1 burst.
+ ** It is kind of hashcoding.
+ **----------------------------------------------------------------
+ */
+ struct link jump_tcb[4]; /* JUMPs for reselection */
+ struct tcb target[MAX_TARGET]; /* Target data */
+
+ /*----------------------------------------------------------------
+ ** Virtual and physical bus addresses of the chip.
+ **----------------------------------------------------------------
+ */
+ void __iomem *vaddr; /* Virtual and bus address of */
+ unsigned long paddr; /* chip's IO registers. */
+ unsigned long paddr2; /* On-chip RAM bus address. */
+ volatile /* Pointer to volatile for */
+ struct ncr_reg __iomem *reg; /* memory mapped IO. */
+
+ /*----------------------------------------------------------------
+ ** SCRIPTS virtual and physical bus addresses.
+ ** 'script' is loaded in the on-chip RAM if present.
+ ** 'scripth' stays in main memory.
+ **----------------------------------------------------------------
+ */
+ struct script *script0; /* Copies of script and scripth */
+ struct scripth *scripth0; /* relocated for this ncb. */
+ struct scripth *scripth; /* Actual scripth virt. address */
+ u_long p_script; /* Actual script and scripth */
+ u_long p_scripth; /* bus addresses. */
+
+ /*----------------------------------------------------------------
+ ** General controller parameters and configuration.
+ **----------------------------------------------------------------
+ */
+ struct device *dev;
+ u_char revision_id; /* PCI device revision id */
+ u32 irq; /* IRQ level */
+ u32 features; /* Chip features map */
+ u_char myaddr; /* SCSI id of the adapter */
+ u_char maxburst; /* log base 2 of dwords burst */
+ u_char maxwide; /* Maximum transfer width */
+ u_char minsync; /* Minimum sync period factor */
+ u_char maxsync; /* Maximum sync period factor */
+ u_char maxoffs; /* Max scsi offset */
+ u_char multiplier; /* Clock multiplier (1,2,4) */
+ u_char clock_divn; /* Number of clock divisors */
+ u_long clock_khz; /* SCSI clock frequency in KHz */
+
+ /*----------------------------------------------------------------
+ ** Start queue management.
+ ** It is filled up by the host processor and accessed by the
+ ** SCRIPTS processor in order to start SCSI commands.
+ **----------------------------------------------------------------
+ */
+ u16 squeueput; /* Next free slot of the queue */
+ u16 actccbs; /* Number of allocated CCBs */
+ u16 queuedccbs; /* Number of CCBs in start queue*/
+ u16 queuedepth; /* Start queue depth */
+
+ /*----------------------------------------------------------------
+ ** Timeout handler.
+ **----------------------------------------------------------------
+ */
+ struct timer_list timer; /* Timer handler link header */
+ u_long lasttime;
+ u_long settle_time; /* Resetting the SCSI BUS */
+
+ /*----------------------------------------------------------------
+ ** Debugging and profiling.
+ **----------------------------------------------------------------
+ */
+ struct ncr_reg regdump; /* Register dump */
+ u_long regtime; /* Time it has been done */
+
+ /*----------------------------------------------------------------
+ ** Miscellaneous buffers accessed by the scripts-processor.
+ ** They shall be DWORD aligned, because they may be read or
+ ** written with a SCR_COPY script command.
+ **----------------------------------------------------------------
+ */
+ u_char msgout[8]; /* Buffer for MESSAGE OUT */
+ u_char msgin [8]; /* Buffer for MESSAGE IN */
+ u32 lastmsg; /* Last SCSI message sent */
+ u_char scratch; /* Scratch for SCSI receive */
+
+ /*----------------------------------------------------------------
+ ** Miscellaneous configuration and status parameters.
+ **----------------------------------------------------------------
+ */
+ u_char disc; /* Diconnection allowed */
+ u_char scsi_mode; /* Current SCSI BUS mode */
+ u_char order; /* Tag order to use */
+ u_char verbose; /* Verbosity for this controller*/
+ int ncr_cache; /* Used for cache test at init. */
+ u_long p_ncb; /* BUS address of this NCB */
+
+ /*----------------------------------------------------------------
+ ** Command completion handling.
+ **----------------------------------------------------------------
+ */
+#ifdef SCSI_NCR_CCB_DONE_SUPPORT
+ struct ccb *(ccb_done[MAX_DONE]);
+ int ccb_done_ic;
+#endif
+ /*----------------------------------------------------------------
+ ** Fields that should be removed or changed.
+ **----------------------------------------------------------------
+ */
+ struct ccb *ccb; /* Global CCB */
+ struct usrcmd user; /* Command from user */
+ volatile u_char release_stage; /* Synchronisation stage on release */
+};
+
+#define NCB_SCRIPT_PHYS(np,lbl) (np->p_script + offsetof (struct script, lbl))
+#define NCB_SCRIPTH_PHYS(np,lbl) (np->p_scripth + offsetof (struct scripth,lbl))
+
+/*==========================================================
+**
+**
+** Script for NCR-Processor.
+**
+** Use ncr_script_fill() to create the variable parts.
+** Use ncr_script_copy_and_bind() to make a copy and
+** bind to physical addresses.
+**
+**
+**==========================================================
+**
+** We have to know the offsets of all labels before
+** we reach them (for forward jumps).
+** Therefore we declare a struct here.
+** If you make changes inside the script,
+** DONT FORGET TO CHANGE THE LENGTHS HERE!
+**
+**----------------------------------------------------------
+*/
+
+/*
+** For HP Zalon/53c720 systems, the Zalon interface
+** between CPU and 53c720 does prefetches, which causes
+** problems with self modifying scripts. The problem
+** is overcome by calling a dummy subroutine after each
+** modification, to force a refetch of the script on
+** return from the subroutine.
+*/
+
+#ifdef CONFIG_NCR53C8XX_PREFETCH
+#define PREFETCH_FLUSH_CNT 2
+#define PREFETCH_FLUSH SCR_CALL, PADDRH (wait_dma),
+#else
+#define PREFETCH_FLUSH_CNT 0
+#define PREFETCH_FLUSH
+#endif
+
+/*
+** Script fragments which are loaded into the on-chip RAM
+** of 825A, 875 and 895 chips.
+*/
+struct script {
+ ncrcmd start [ 5];
+ ncrcmd startpos [ 1];
+ ncrcmd select [ 6];
+ ncrcmd select2 [ 9 + PREFETCH_FLUSH_CNT];
+ ncrcmd loadpos [ 4];
+ ncrcmd send_ident [ 9];
+ ncrcmd prepare [ 6];
+ ncrcmd prepare2 [ 7];
+ ncrcmd command [ 6];
+ ncrcmd dispatch [ 32];
+ ncrcmd clrack [ 4];
+ ncrcmd no_data [ 17];
+ ncrcmd status [ 8];
+ ncrcmd msg_in [ 2];
+ ncrcmd msg_in2 [ 16];
+ ncrcmd msg_bad [ 4];
+ ncrcmd setmsg [ 7];
+ ncrcmd cleanup [ 6];
+ ncrcmd complete [ 9];
+ ncrcmd cleanup_ok [ 8 + PREFETCH_FLUSH_CNT];
+ ncrcmd cleanup0 [ 1];
+#ifndef SCSI_NCR_CCB_DONE_SUPPORT
+ ncrcmd signal [ 12];
+#else
+ ncrcmd signal [ 9];
+ ncrcmd done_pos [ 1];
+ ncrcmd done_plug [ 2];
+ ncrcmd done_end [ 7];
+#endif
+ ncrcmd save_dp [ 7];
+ ncrcmd restore_dp [ 5];
+ ncrcmd disconnect [ 10];
+ ncrcmd msg_out [ 9];
+ ncrcmd msg_out_done [ 7];
+ ncrcmd idle [ 2];
+ ncrcmd reselect [ 8];
+ ncrcmd reselected [ 8];
+ ncrcmd resel_dsa [ 6 + PREFETCH_FLUSH_CNT];
+ ncrcmd loadpos1 [ 4];
+ ncrcmd resel_lun [ 6];
+ ncrcmd resel_tag [ 6];
+ ncrcmd jump_to_nexus [ 4 + PREFETCH_FLUSH_CNT];
+ ncrcmd nexus_indirect [ 4];
+ ncrcmd resel_notag [ 4];
+ ncrcmd data_in [MAX_SCATTERL * 4];
+ ncrcmd data_in2 [ 4];
+ ncrcmd data_out [MAX_SCATTERL * 4];
+ ncrcmd data_out2 [ 4];
+};
+
+/*
+** Script fragments which stay in main memory for all chips.
+*/
+struct scripth {
+ ncrcmd tryloop [MAX_START*2];
+ ncrcmd tryloop2 [ 2];
+#ifdef SCSI_NCR_CCB_DONE_SUPPORT
+ ncrcmd done_queue [MAX_DONE*5];
+ ncrcmd done_queue2 [ 2];
+#endif
+ ncrcmd select_no_atn [ 8];
+ ncrcmd cancel [ 4];
+ ncrcmd skip [ 9 + PREFETCH_FLUSH_CNT];
+ ncrcmd skip2 [ 19];
+ ncrcmd par_err_data_in [ 6];
+ ncrcmd par_err_other [ 4];
+ ncrcmd msg_reject [ 8];
+ ncrcmd msg_ign_residue [ 24];
+ ncrcmd msg_extended [ 10];
+ ncrcmd msg_ext_2 [ 10];
+ ncrcmd msg_wdtr [ 14];
+ ncrcmd send_wdtr [ 7];
+ ncrcmd msg_ext_3 [ 10];
+ ncrcmd msg_sdtr [ 14];
+ ncrcmd send_sdtr [ 7];
+ ncrcmd nego_bad_phase [ 4];
+ ncrcmd msg_out_abort [ 10];
+ ncrcmd hdata_in [MAX_SCATTERH * 4];
+ ncrcmd hdata_in2 [ 2];
+ ncrcmd hdata_out [MAX_SCATTERH * 4];
+ ncrcmd hdata_out2 [ 2];
+ ncrcmd reset [ 4];
+ ncrcmd aborttag [ 4];
+ ncrcmd abort [ 2];
+ ncrcmd abort_resel [ 20];
+ ncrcmd resend_ident [ 4];
+ ncrcmd clratn_go_on [ 3];
+ ncrcmd nxtdsp_go_on [ 1];
+ ncrcmd sdata_in [ 8];
+ ncrcmd data_io [ 18];
+ ncrcmd bad_identify [ 12];
+ ncrcmd bad_i_t_l [ 4];
+ ncrcmd bad_i_t_l_q [ 4];
+ ncrcmd bad_target [ 8];
+ ncrcmd bad_status [ 8];
+ ncrcmd start_ram [ 4 + PREFETCH_FLUSH_CNT];
+ ncrcmd start_ram0 [ 4];
+ ncrcmd sto_restart [ 5];
+ ncrcmd wait_dma [ 2];
+ ncrcmd snooptest [ 9];
+ ncrcmd snoopend [ 2];
+};
+
+/*==========================================================
+**
+**
+** Function headers.
+**
+**
+**==========================================================
+*/
+
+static void ncr_alloc_ccb (struct ncb *np, u_char tn, u_char ln);
+static void ncr_complete (struct ncb *np, struct ccb *cp);
+static void ncr_exception (struct ncb *np);
+static void ncr_free_ccb (struct ncb *np, struct ccb *cp);
+static void ncr_init_ccb (struct ncb *np, struct ccb *cp);
+static void ncr_init_tcb (struct ncb *np, u_char tn);
+static struct lcb * ncr_alloc_lcb (struct ncb *np, u_char tn, u_char ln);
+static struct lcb * ncr_setup_lcb (struct ncb *np, struct scsi_device *sdev);
+static void ncr_getclock (struct ncb *np, int mult);
+static void ncr_selectclock (struct ncb *np, u_char scntl3);
+static struct ccb *ncr_get_ccb (struct ncb *np, struct scsi_cmnd *cmd);
+static void ncr_chip_reset (struct ncb *np, int delay);
+static void ncr_init (struct ncb *np, int reset, char * msg, u_long code);
+static int ncr_int_sbmc (struct ncb *np);
+static int ncr_int_par (struct ncb *np);
+static void ncr_int_ma (struct ncb *np);
+static void ncr_int_sir (struct ncb *np);
+static void ncr_int_sto (struct ncb *np);
+static void ncr_negotiate (struct ncb* np, struct tcb* tp);
+static int ncr_prepare_nego(struct ncb *np, struct ccb *cp, u_char *msgptr);
+
+static void ncr_script_copy_and_bind
+ (struct ncb *np, ncrcmd *src, ncrcmd *dst, int len);
+static void ncr_script_fill (struct script * scr, struct scripth * scripth);
+static int ncr_scatter (struct ncb *np, struct ccb *cp, struct scsi_cmnd *cmd);
+static void ncr_getsync (struct ncb *np, u_char sfac, u_char *fakp, u_char *scntl3p);
+static void ncr_setsync (struct ncb *np, struct ccb *cp, u_char scntl3, u_char sxfer);
+static void ncr_setup_tags (struct ncb *np, struct scsi_device *sdev);
+static void ncr_setwide (struct ncb *np, struct ccb *cp, u_char wide, u_char ack);
+static int ncr_snooptest (struct ncb *np);
+static void ncr_timeout (struct ncb *np);
+static void ncr_wakeup (struct ncb *np, u_long code);
+static void ncr_wakeup_done (struct ncb *np);
+static void ncr_start_next_ccb (struct ncb *np, struct lcb * lp, int maxn);
+static void ncr_put_start_queue(struct ncb *np, struct ccb *cp);
+
+static void insert_into_waiting_list(struct ncb *np, struct scsi_cmnd *cmd);
+static struct scsi_cmnd *retrieve_from_waiting_list(int to_remove, struct ncb *np, struct scsi_cmnd *cmd);
+static void process_waiting_list(struct ncb *np, int sts);
+
+#define remove_from_waiting_list(np, cmd) \
+ retrieve_from_waiting_list(1, (np), (cmd))
+#define requeue_waiting_list(np) process_waiting_list((np), DID_OK)
+#define reset_waiting_list(np) process_waiting_list((np), DID_RESET)
+
+static inline char *ncr_name (struct ncb *np)
+{
+ return np->inst_name;
+}
+
+
+/*==========================================================
+**
+**
+** Scripts for NCR-Processor.
+**
+** Use ncr_script_bind for binding to physical addresses.
+**
+**
+**==========================================================
+**
+** NADDR generates a reference to a field of the controller data.
+** PADDR generates a reference to another part of the script.
+** RADDR generates a reference to a script processor register.
+** FADDR generates a reference to a script processor register
+** with offset.
+**
+**----------------------------------------------------------
+*/
+
+#define RELOC_SOFTC 0x40000000
+#define RELOC_LABEL 0x50000000
+#define RELOC_REGISTER 0x60000000
+#if 0
+#define RELOC_KVAR 0x70000000
+#endif
+#define RELOC_LABELH 0x80000000
+#define RELOC_MASK 0xf0000000
+
+#define NADDR(label) (RELOC_SOFTC | offsetof(struct ncb, label))
+#define PADDR(label) (RELOC_LABEL | offsetof(struct script, label))
+#define PADDRH(label) (RELOC_LABELH | offsetof(struct scripth, label))
+#define RADDR(label) (RELOC_REGISTER | REG(label))
+#define FADDR(label,ofs)(RELOC_REGISTER | ((REG(label))+(ofs)))
+#if 0
+#define KVAR(which) (RELOC_KVAR | (which))
+#endif
+
+#if 0
+#define SCRIPT_KVAR_JIFFIES (0)
+#define SCRIPT_KVAR_FIRST SCRIPT_KVAR_JIFFIES
+#define SCRIPT_KVAR_LAST SCRIPT_KVAR_JIFFIES
+/*
+ * Kernel variables referenced in the scripts.
+ * THESE MUST ALL BE ALIGNED TO A 4-BYTE BOUNDARY.
+ */
+static void *script_kvars[] __initdata =
+ { (void *)&jiffies };
+#endif
+
+static struct script script0 __initdata = {
+/*--------------------------< START >-----------------------*/ {
+ /*
+ ** This NOP will be patched with LED ON
+ ** SCR_REG_REG (gpreg, SCR_AND, 0xfe)
+ */
+ SCR_NO_OP,
+ 0,
+ /*
+ ** Clear SIGP.
+ */
+ SCR_FROM_REG (ctest2),
+ 0,
+ /*
+ ** Then jump to a certain point in tryloop.
+ ** Due to the lack of indirect addressing the code
+ ** is self modifying here.
+ */
+ SCR_JUMP,
+}/*-------------------------< STARTPOS >--------------------*/,{
+ PADDRH(tryloop),
+
+}/*-------------------------< SELECT >----------------------*/,{
+ /*
+ ** DSA contains the address of a scheduled
+ ** data structure.
+ **
+ ** SCRATCHA contains the address of the script,
+ ** which starts the next entry.
+ **
+ ** Set Initiator mode.
+ **
+ ** (Target mode is left as an exercise for the reader)
+ */
+
+ SCR_CLR (SCR_TRG),
+ 0,
+ SCR_LOAD_REG (HS_REG, HS_SELECTING),
+ 0,
+
+ /*
+ ** And try to select this target.
+ */
+ SCR_SEL_TBL_ATN ^ offsetof (struct dsb, select),
+ PADDR (reselect),
+
+}/*-------------------------< SELECT2 >----------------------*/,{
+ /*
+ ** Now there are 4 possibilities:
+ **
+ ** (1) The ncr loses arbitration.
+ ** This is ok, because it will try again,
+ ** when the bus becomes idle.
+ ** (But beware of the timeout function!)
+ **
+ ** (2) The ncr is reselected.
+ ** Then the script processor takes the jump
+ ** to the RESELECT label.
+ **
+ ** (3) The ncr wins arbitration.
+ ** Then it will execute SCRIPTS instruction until
+ ** the next instruction that checks SCSI phase.
+ ** Then will stop and wait for selection to be
+ ** complete or selection time-out to occur.
+ ** As a result the SCRIPTS instructions until
+ ** LOADPOS + 2 should be executed in parallel with
+ ** the SCSI core performing selection.
+ */
+
+ /*
+ ** The MESSAGE_REJECT problem seems to be due to a selection
+ ** timing problem.
+ ** Wait immediately for the selection to complete.
+ ** (2.5x behaves so)
+ */
+ SCR_JUMPR ^ IFFALSE (WHEN (SCR_MSG_OUT)),
+ 0,
+
+ /*
+ ** Next time use the next slot.
+ */
+ SCR_COPY (4),
+ RADDR (temp),
+ PADDR (startpos),
+ /*
+ ** The ncr doesn't have an indirect load
+ ** or store command. So we have to
+ ** copy part of the control block to a
+ ** fixed place, where we can access it.
+ **
+ ** We patch the address part of a
+ ** COPY command with the DSA-register.
+ */
+ SCR_COPY_F (4),
+ RADDR (dsa),
+ PADDR (loadpos),
+ /*
+ ** Flush script prefetch if required
+ */
+ PREFETCH_FLUSH
+ /*
+ ** then we do the actual copy.
+ */
+ SCR_COPY (sizeof (struct head)),
+ /*
+ ** continued after the next label ...
+ */
+}/*-------------------------< LOADPOS >---------------------*/,{
+ 0,
+ NADDR (header),
+ /*
+ ** Wait for the next phase or the selection
+ ** to complete or time-out.
+ */
+ SCR_JUMP ^ IFFALSE (WHEN (SCR_MSG_OUT)),
+ PADDR (prepare),
+
+}/*-------------------------< SEND_IDENT >----------------------*/,{
+ /*
+ ** Selection complete.
+ ** Send the IDENTIFY and SIMPLE_TAG messages
+ ** (and the EXTENDED_SDTR message)
+ */
+ SCR_MOVE_TBL ^ SCR_MSG_OUT,
+ offsetof (struct dsb, smsg),
+ SCR_JUMP ^ IFTRUE (WHEN (SCR_MSG_OUT)),
+ PADDRH (resend_ident),
+ SCR_LOAD_REG (scratcha, 0x80),
+ 0,
+ SCR_COPY (1),
+ RADDR (scratcha),
+ NADDR (lastmsg),
+}/*-------------------------< PREPARE >----------------------*/,{
+ /*
+ ** load the savep (saved pointer) into
+ ** the TEMP register (actual pointer)
+ */
+ SCR_COPY (4),
+ NADDR (header.savep),
+ RADDR (temp),
+ /*
+ ** Initialize the status registers
+ */
+ SCR_COPY (4),
+ NADDR (header.status),
+ RADDR (scr0),
+}/*-------------------------< PREPARE2 >---------------------*/,{
+ /*
+ ** Initialize the msgout buffer with a NOOP message.
+ */
+ SCR_LOAD_REG (scratcha, NOP),
+ 0,
+ SCR_COPY (1),
+ RADDR (scratcha),
+ NADDR (msgout),
+#if 0
+ SCR_COPY (1),
+ RADDR (scratcha),
+ NADDR (msgin),
+#endif
+ /*
+ ** Anticipate the COMMAND phase.
+ ** This is the normal case for initial selection.
+ */
+ SCR_JUMP ^ IFFALSE (WHEN (SCR_COMMAND)),
+ PADDR (dispatch),
+
+}/*-------------------------< COMMAND >--------------------*/,{
+ /*
+ ** ... and send the command
+ */
+ SCR_MOVE_TBL ^ SCR_COMMAND,
+ offsetof (struct dsb, cmd),
+ /*
+ ** If status is still HS_NEGOTIATE, negotiation failed.
+ ** We check this here, since we want to do that
+ ** only once.
+ */
+ SCR_FROM_REG (HS_REG),
+ 0,
+ SCR_INT ^ IFTRUE (DATA (HS_NEGOTIATE)),
+ SIR_NEGO_FAILED,
+
+}/*-----------------------< DISPATCH >----------------------*/,{
+ /*
+ ** MSG_IN is the only phase that shall be
+ ** entered at least once for each (re)selection.
+ ** So we test it first.
+ */
+ SCR_JUMP ^ IFTRUE (WHEN (SCR_MSG_IN)),
+ PADDR (msg_in),
+
+ SCR_RETURN ^ IFTRUE (IF (SCR_DATA_OUT)),
+ 0,
+ /*
+ ** DEL 397 - 53C875 Rev 3 - Part Number 609-0392410 - ITEM 4.
+ ** Possible data corruption during Memory Write and Invalidate.
+ ** This work-around resets the addressing logic prior to the
+ ** start of the first MOVE of a DATA IN phase.
+ ** (See Documentation/scsi/ncr53c8xx.txt for more information)
+ */
+ SCR_JUMPR ^ IFFALSE (IF (SCR_DATA_IN)),
+ 20,
+ SCR_COPY (4),
+ RADDR (scratcha),
+ RADDR (scratcha),
+ SCR_RETURN,
+ 0,
+ SCR_JUMP ^ IFTRUE (IF (SCR_STATUS)),
+ PADDR (status),
+ SCR_JUMP ^ IFTRUE (IF (SCR_COMMAND)),
+ PADDR (command),
+ SCR_JUMP ^ IFTRUE (IF (SCR_MSG_OUT)),
+ PADDR (msg_out),
+ /*
+ ** Discard one illegal phase byte, if required.
+ */
+ SCR_LOAD_REG (scratcha, XE_BAD_PHASE),
+ 0,
+ SCR_COPY (1),
+ RADDR (scratcha),
+ NADDR (xerr_st),
+ SCR_JUMPR ^ IFFALSE (IF (SCR_ILG_OUT)),
+ 8,
+ SCR_MOVE_ABS (1) ^ SCR_ILG_OUT,
+ NADDR (scratch),
+ SCR_JUMPR ^ IFFALSE (IF (SCR_ILG_IN)),
+ 8,
+ SCR_MOVE_ABS (1) ^ SCR_ILG_IN,
+ NADDR (scratch),
+ SCR_JUMP,
+ PADDR (dispatch),
+
+}/*-------------------------< CLRACK >----------------------*/,{
+ /*
+ ** Terminate possible pending message phase.
+ */
+ SCR_CLR (SCR_ACK),
+ 0,
+ SCR_JUMP,
+ PADDR (dispatch),
+
+}/*-------------------------< NO_DATA >--------------------*/,{
+ /*
+ ** The target wants to tranfer too much data
+ ** or in the wrong direction.
+ ** Remember that in extended error.
+ */
+ SCR_LOAD_REG (scratcha, XE_EXTRA_DATA),
+ 0,
+ SCR_COPY (1),
+ RADDR (scratcha),
+ NADDR (xerr_st),
+ /*
+ ** Discard one data byte, if required.
+ */
+ SCR_JUMPR ^ IFFALSE (WHEN (SCR_DATA_OUT)),
+ 8,
+ SCR_MOVE_ABS (1) ^ SCR_DATA_OUT,
+ NADDR (scratch),
+ SCR_JUMPR ^ IFFALSE (IF (SCR_DATA_IN)),
+ 8,
+ SCR_MOVE_ABS (1) ^ SCR_DATA_IN,
+ NADDR (scratch),
+ /*
+ ** .. and repeat as required.
+ */
+ SCR_CALL,
+ PADDR (dispatch),
+ SCR_JUMP,
+ PADDR (no_data),
+
+}/*-------------------------< STATUS >--------------------*/,{
+ /*
+ ** get the status
+ */
+ SCR_MOVE_ABS (1) ^ SCR_STATUS,
+ NADDR (scratch),
+ /*
+ ** save status to scsi_status.
+ ** mark as complete.
+ */
+ SCR_TO_REG (SS_REG),
+ 0,
+ SCR_LOAD_REG (HS_REG, HS_COMPLETE),
+ 0,
+ SCR_JUMP,
+ PADDR (dispatch),
+}/*-------------------------< MSG_IN >--------------------*/,{
+ /*
+ ** Get the first byte of the message
+ ** and save it to SCRATCHA.
+ **
+ ** The script processor doesn't negate the
+ ** ACK signal after this transfer.
+ */
+ SCR_MOVE_ABS (1) ^ SCR_MSG_IN,
+ NADDR (msgin[0]),
+}/*-------------------------< MSG_IN2 >--------------------*/,{
+ /*
+ ** Handle this message.
+ */
+ SCR_JUMP ^ IFTRUE (DATA (COMMAND_COMPLETE)),
+ PADDR (complete),
+ SCR_JUMP ^ IFTRUE (DATA (DISCONNECT)),
+ PADDR (disconnect),
+ SCR_JUMP ^ IFTRUE (DATA (SAVE_POINTERS)),
+ PADDR (save_dp),
+ SCR_JUMP ^ IFTRUE (DATA (RESTORE_POINTERS)),
+ PADDR (restore_dp),
+ SCR_JUMP ^ IFTRUE (DATA (EXTENDED_MESSAGE)),
+ PADDRH (msg_extended),
+ SCR_JUMP ^ IFTRUE (DATA (NOP)),
+ PADDR (clrack),
+ SCR_JUMP ^ IFTRUE (DATA (MESSAGE_REJECT)),
+ PADDRH (msg_reject),
+ SCR_JUMP ^ IFTRUE (DATA (IGNORE_WIDE_RESIDUE)),
+ PADDRH (msg_ign_residue),
+ /*
+ ** Rest of the messages left as
+ ** an exercise ...
+ **
+ ** Unimplemented messages:
+ ** fall through to MSG_BAD.
+ */
+}/*-------------------------< MSG_BAD >------------------*/,{
+ /*
+ ** unimplemented message - reject it.
+ */
+ SCR_INT,
+ SIR_REJECT_SENT,
+ SCR_LOAD_REG (scratcha, MESSAGE_REJECT),
+ 0,
+}/*-------------------------< SETMSG >----------------------*/,{
+ SCR_COPY (1),
+ RADDR (scratcha),
+ NADDR (msgout),
+ SCR_SET (SCR_ATN),
+ 0,
+ SCR_JUMP,
+ PADDR (clrack),
+}/*-------------------------< CLEANUP >-------------------*/,{
+ /*
+ ** dsa: Pointer to ccb
+ ** or xxxxxxFF (no ccb)
+ **
+ ** HS_REG: Host-Status (<>0!)
+ */
+ SCR_FROM_REG (dsa),
+ 0,
+ SCR_JUMP ^ IFTRUE (DATA (0xff)),
+ PADDR (start),
+ /*
+ ** dsa is valid.
+ ** complete the cleanup.
+ */
+ SCR_JUMP,
+ PADDR (cleanup_ok),
+
+}/*-------------------------< COMPLETE >-----------------*/,{
+ /*
+ ** Complete message.
+ **
+ ** Copy TEMP register to LASTP in header.
+ */
+ SCR_COPY (4),
+ RADDR (temp),
+ NADDR (header.lastp),
+ /*
+ ** When we terminate the cycle by clearing ACK,
+ ** the target may disconnect immediately.
+ **
+ ** We don't want to be told of an
+ ** "unexpected disconnect",
+ ** so we disable this feature.
+ */
+ SCR_REG_REG (scntl2, SCR_AND, 0x7f),
+ 0,
+ /*
+ ** Terminate cycle ...
+ */
+ SCR_CLR (SCR_ACK|SCR_ATN),
+ 0,
+ /*
+ ** ... and wait for the disconnect.
+ */
+ SCR_WAIT_DISC,
+ 0,
+}/*-------------------------< CLEANUP_OK >----------------*/,{
+ /*
+ ** Save host status to header.
+ */
+ SCR_COPY (4),
+ RADDR (scr0),
+ NADDR (header.status),
+ /*
+ ** and copy back the header to the ccb.
+ */
+ SCR_COPY_F (4),
+ RADDR (dsa),
+ PADDR (cleanup0),
+ /*
+ ** Flush script prefetch if required
+ */
+ PREFETCH_FLUSH
+ SCR_COPY (sizeof (struct head)),
+ NADDR (header),
+}/*-------------------------< CLEANUP0 >--------------------*/,{
+ 0,
+}/*-------------------------< SIGNAL >----------------------*/,{
+ /*
+ ** if job not completed ...
+ */
+ SCR_FROM_REG (HS_REG),
+ 0,
+ /*
+ ** ... start the next command.
+ */
+ SCR_JUMP ^ IFTRUE (MASK (0, (HS_DONEMASK|HS_SKIPMASK))),
+ PADDR(start),
+ /*
+ ** If command resulted in not GOOD status,
+ ** call the C code if needed.
+ */
+ SCR_FROM_REG (SS_REG),
+ 0,
+ SCR_CALL ^ IFFALSE (DATA (S_GOOD)),
+ PADDRH (bad_status),
+
+#ifndef SCSI_NCR_CCB_DONE_SUPPORT
+
+ /*
+ ** ... signal completion to the host
+ */
+ SCR_INT,
+ SIR_INTFLY,
+ /*
+ ** Auf zu neuen Schandtaten!
+ */
+ SCR_JUMP,
+ PADDR(start),
+
+#else /* defined SCSI_NCR_CCB_DONE_SUPPORT */
+
+ /*
+ ** ... signal completion to the host
+ */
+ SCR_JUMP,
+}/*------------------------< DONE_POS >---------------------*/,{
+ PADDRH (done_queue),
+}/*------------------------< DONE_PLUG >--------------------*/,{
+ SCR_INT,
+ SIR_DONE_OVERFLOW,
+}/*------------------------< DONE_END >---------------------*/,{
+ SCR_INT,
+ SIR_INTFLY,
+ SCR_COPY (4),
+ RADDR (temp),
+ PADDR (done_pos),
+ SCR_JUMP,
+ PADDR (start),
+
+#endif /* SCSI_NCR_CCB_DONE_SUPPORT */
+
+}/*-------------------------< SAVE_DP >------------------*/,{
+ /*
+ ** SAVE_DP message:
+ ** Copy TEMP register to SAVEP in header.
+ */
+ SCR_COPY (4),
+ RADDR (temp),
+ NADDR (header.savep),
+ SCR_CLR (SCR_ACK),
+ 0,
+ SCR_JUMP,
+ PADDR (dispatch),
+}/*-------------------------< RESTORE_DP >---------------*/,{
+ /*
+ ** RESTORE_DP message:
+ ** Copy SAVEP in header to TEMP register.
+ */
+ SCR_COPY (4),
+ NADDR (header.savep),
+ RADDR (temp),
+ SCR_JUMP,
+ PADDR (clrack),
+
+}/*-------------------------< DISCONNECT >---------------*/,{
+ /*
+ ** DISCONNECTing ...
+ **
+ ** disable the "unexpected disconnect" feature,
+ ** and remove the ACK signal.
+ */
+ SCR_REG_REG (scntl2, SCR_AND, 0x7f),
+ 0,
+ SCR_CLR (SCR_ACK|SCR_ATN),
+ 0,
+ /*
+ ** Wait for the disconnect.
+ */
+ SCR_WAIT_DISC,
+ 0,
+ /*
+ ** Status is: DISCONNECTED.
+ */
+ SCR_LOAD_REG (HS_REG, HS_DISCONNECT),
+ 0,
+ SCR_JUMP,
+ PADDR (cleanup_ok),
+
+}/*-------------------------< MSG_OUT >-------------------*/,{
+ /*
+ ** The target requests a message.
+ */
+ SCR_MOVE_ABS (1) ^ SCR_MSG_OUT,
+ NADDR (msgout),
+ SCR_COPY (1),
+ NADDR (msgout),
+ NADDR (lastmsg),
+ /*
+ ** If it was no ABORT message ...
+ */
+ SCR_JUMP ^ IFTRUE (DATA (ABORT_TASK_SET)),
+ PADDRH (msg_out_abort),
+ /*
+ ** ... wait for the next phase
+ ** if it's a message out, send it again, ...
+ */
+ SCR_JUMP ^ IFTRUE (WHEN (SCR_MSG_OUT)),
+ PADDR (msg_out),
+}/*-------------------------< MSG_OUT_DONE >--------------*/,{
+ /*
+ ** ... else clear the message ...
+ */
+ SCR_LOAD_REG (scratcha, NOP),
+ 0,
+ SCR_COPY (4),
+ RADDR (scratcha),
+ NADDR (msgout),
+ /*
+ ** ... and process the next phase
+ */
+ SCR_JUMP,
+ PADDR (dispatch),
+}/*-------------------------< IDLE >------------------------*/,{
+ /*
+ ** Nothing to do?
+ ** Wait for reselect.
+ ** This NOP will be patched with LED OFF
+ ** SCR_REG_REG (gpreg, SCR_OR, 0x01)
+ */
+ SCR_NO_OP,
+ 0,
+}/*-------------------------< RESELECT >--------------------*/,{
+ /*
+ ** make the DSA invalid.
+ */
+ SCR_LOAD_REG (dsa, 0xff),
+ 0,
+ SCR_CLR (SCR_TRG),
+ 0,
+ SCR_LOAD_REG (HS_REG, HS_IN_RESELECT),
+ 0,
+ /*
+ ** Sleep waiting for a reselection.
+ ** If SIGP is set, special treatment.
+ **
+ ** Zu allem bereit ..
+ */
+ SCR_WAIT_RESEL,
+ PADDR(start),
+}/*-------------------------< RESELECTED >------------------*/,{
+ /*
+ ** This NOP will be patched with LED ON
+ ** SCR_REG_REG (gpreg, SCR_AND, 0xfe)
+ */
+ SCR_NO_OP,
+ 0,
+ /*
+ ** ... zu nichts zu gebrauchen ?
+ **
+ ** load the target id into the SFBR
+ ** and jump to the control block.
+ **
+ ** Look at the declarations of
+ ** - struct ncb
+ ** - struct tcb
+ ** - struct lcb
+ ** - struct ccb
+ ** to understand what's going on.
+ */
+ SCR_REG_SFBR (ssid, SCR_AND, 0x8F),
+ 0,
+ SCR_TO_REG (sdid),
+ 0,
+ SCR_JUMP,
+ NADDR (jump_tcb),
+
+}/*-------------------------< RESEL_DSA >-------------------*/,{
+ /*
+ ** Ack the IDENTIFY or TAG previously received.
+ */
+ SCR_CLR (SCR_ACK),
+ 0,
+ /*
+ ** The ncr doesn't have an indirect load
+ ** or store command. So we have to
+ ** copy part of the control block to a
+ ** fixed place, where we can access it.
+ **
+ ** We patch the address part of a
+ ** COPY command with the DSA-register.
+ */
+ SCR_COPY_F (4),
+ RADDR (dsa),
+ PADDR (loadpos1),
+ /*
+ ** Flush script prefetch if required
+ */
+ PREFETCH_FLUSH
+ /*
+ ** then we do the actual copy.
+ */
+ SCR_COPY (sizeof (struct head)),
+ /*
+ ** continued after the next label ...
+ */
+
+}/*-------------------------< LOADPOS1 >-------------------*/,{
+ 0,
+ NADDR (header),
+ /*
+ ** The DSA contains the data structure address.
+ */
+ SCR_JUMP,
+ PADDR (prepare),
+
+}/*-------------------------< RESEL_LUN >-------------------*/,{
+ /*
+ ** come back to this point
+ ** to get an IDENTIFY message
+ ** Wait for a msg_in phase.
+ */
+ SCR_INT ^ IFFALSE (WHEN (SCR_MSG_IN)),
+ SIR_RESEL_NO_MSG_IN,
+ /*
+ ** message phase.
+ ** Read the data directly from the BUS DATA lines.
+ ** This helps to support very old SCSI devices that
+ ** may reselect without sending an IDENTIFY.
+ */
+ SCR_FROM_REG (sbdl),
+ 0,
+ /*
+ ** It should be an Identify message.
+ */
+ SCR_RETURN,
+ 0,
+}/*-------------------------< RESEL_TAG >-------------------*/,{
+ /*
+ ** Read IDENTIFY + SIMPLE + TAG using a single MOVE.
+ ** Aggressive optimization, is'nt it?
+ ** No need to test the SIMPLE TAG message, since the
+ ** driver only supports conformant devices for tags. ;-)
+ */
+ SCR_MOVE_ABS (3) ^ SCR_MSG_IN,
+ NADDR (msgin),
+ /*
+ ** Read the TAG from the SIDL.
+ ** Still an aggressive optimization. ;-)
+ ** Compute the CCB indirect jump address which
+ ** is (#TAG*2 & 0xfc) due to tag numbering using
+ ** 1,3,5..MAXTAGS*2+1 actual values.
+ */
+ SCR_REG_SFBR (sidl, SCR_SHL, 0),
+ 0,
+ SCR_SFBR_REG (temp, SCR_AND, 0xfc),
+ 0,
+}/*-------------------------< JUMP_TO_NEXUS >-------------------*/,{
+ SCR_COPY_F (4),
+ RADDR (temp),
+ PADDR (nexus_indirect),
+ /*
+ ** Flush script prefetch if required
+ */
+ PREFETCH_FLUSH
+ SCR_COPY (4),
+}/*-------------------------< NEXUS_INDIRECT >-------------------*/,{
+ 0,
+ RADDR (temp),
+ SCR_RETURN,
+ 0,
+}/*-------------------------< RESEL_NOTAG >-------------------*/,{
+ /*
+ ** No tag expected.
+ ** Read an throw away the IDENTIFY.
+ */
+ SCR_MOVE_ABS (1) ^ SCR_MSG_IN,
+ NADDR (msgin),
+ SCR_JUMP,
+ PADDR (jump_to_nexus),
+}/*-------------------------< DATA_IN >--------------------*/,{
+/*
+** Because the size depends on the
+** #define MAX_SCATTERL parameter,
+** it is filled in at runtime.
+**
+** ##===========< i=0; i<MAX_SCATTERL >=========
+** || SCR_CALL ^ IFFALSE (WHEN (SCR_DATA_IN)),
+** || PADDR (dispatch),
+** || SCR_MOVE_TBL ^ SCR_DATA_IN,
+** || offsetof (struct dsb, data[ i]),
+** ##==========================================
+**
+**---------------------------------------------------------
+*/
+0
+}/*-------------------------< DATA_IN2 >-------------------*/,{
+ SCR_CALL,
+ PADDR (dispatch),
+ SCR_JUMP,
+ PADDR (no_data),
+}/*-------------------------< DATA_OUT >--------------------*/,{
+/*
+** Because the size depends on the
+** #define MAX_SCATTERL parameter,
+** it is filled in at runtime.
+**
+** ##===========< i=0; i<MAX_SCATTERL >=========
+** || SCR_CALL ^ IFFALSE (WHEN (SCR_DATA_OUT)),
+** || PADDR (dispatch),
+** || SCR_MOVE_TBL ^ SCR_DATA_OUT,
+** || offsetof (struct dsb, data[ i]),
+** ##==========================================
+**
+**---------------------------------------------------------
+*/
+0
+}/*-------------------------< DATA_OUT2 >-------------------*/,{
+ SCR_CALL,
+ PADDR (dispatch),
+ SCR_JUMP,
+ PADDR (no_data),
+}/*--------------------------------------------------------*/
+};
+
+static struct scripth scripth0 __initdata = {
+/*-------------------------< TRYLOOP >---------------------*/{
+/*
+** Start the next entry.
+** Called addresses point to the launch script in the CCB.
+** They are patched by the main processor.
+**
+** Because the size depends on the
+** #define MAX_START parameter, it is filled
+** in at runtime.
+**
+**-----------------------------------------------------------
+**
+** ##===========< I=0; i<MAX_START >===========
+** || SCR_CALL,
+** || PADDR (idle),
+** ##==========================================
+**
+**-----------------------------------------------------------
+*/
+0
+}/*------------------------< TRYLOOP2 >---------------------*/,{
+ SCR_JUMP,
+ PADDRH(tryloop),
+
+#ifdef SCSI_NCR_CCB_DONE_SUPPORT
+
+}/*------------------------< DONE_QUEUE >-------------------*/,{
+/*
+** Copy the CCB address to the next done entry.
+** Because the size depends on the
+** #define MAX_DONE parameter, it is filled
+** in at runtime.
+**
+**-----------------------------------------------------------
+**
+** ##===========< I=0; i<MAX_DONE >===========
+** || SCR_COPY (sizeof(struct ccb *),
+** || NADDR (header.cp),
+** || NADDR (ccb_done[i]),
+** || SCR_CALL,
+** || PADDR (done_end),
+** ##==========================================
+**
+**-----------------------------------------------------------
+*/
+0
+}/*------------------------< DONE_QUEUE2 >------------------*/,{
+ SCR_JUMP,
+ PADDRH (done_queue),
+
+#endif /* SCSI_NCR_CCB_DONE_SUPPORT */
+}/*------------------------< SELECT_NO_ATN >-----------------*/,{
+ /*
+ ** Set Initiator mode.
+ ** And try to select this target without ATN.
+ */
+
+ SCR_CLR (SCR_TRG),
+ 0,
+ SCR_LOAD_REG (HS_REG, HS_SELECTING),
+ 0,
+ SCR_SEL_TBL ^ offsetof (struct dsb, select),
+ PADDR (reselect),
+ SCR_JUMP,
+ PADDR (select2),
+
+}/*-------------------------< CANCEL >------------------------*/,{
+
+ SCR_LOAD_REG (scratcha, HS_ABORTED),
+ 0,
+ SCR_JUMPR,
+ 8,
+}/*-------------------------< SKIP >------------------------*/,{
+ SCR_LOAD_REG (scratcha, 0),
+ 0,
+ /*
+ ** This entry has been canceled.
+ ** Next time use the next slot.
+ */
+ SCR_COPY (4),
+ RADDR (temp),
+ PADDR (startpos),
+ /*
+ ** The ncr doesn't have an indirect load
+ ** or store command. So we have to
+ ** copy part of the control block to a
+ ** fixed place, where we can access it.
+ **
+ ** We patch the address part of a
+ ** COPY command with the DSA-register.
+ */
+ SCR_COPY_F (4),
+ RADDR (dsa),
+ PADDRH (skip2),
+ /*
+ ** Flush script prefetch if required
+ */
+ PREFETCH_FLUSH
+ /*
+ ** then we do the actual copy.
+ */
+ SCR_COPY (sizeof (struct head)),
+ /*
+ ** continued after the next label ...
+ */
+}/*-------------------------< SKIP2 >---------------------*/,{
+ 0,
+ NADDR (header),
+ /*
+ ** Initialize the status registers
+ */
+ SCR_COPY (4),
+ NADDR (header.status),
+ RADDR (scr0),
+ /*
+ ** Force host status.
+ */
+ SCR_FROM_REG (scratcha),
+ 0,
+ SCR_JUMPR ^ IFFALSE (MASK (0, HS_DONEMASK)),
+ 16,
+ SCR_REG_REG (HS_REG, SCR_OR, HS_SKIPMASK),
+ 0,
+ SCR_JUMPR,
+ 8,
+ SCR_TO_REG (HS_REG),
+ 0,
+ SCR_LOAD_REG (SS_REG, S_GOOD),
+ 0,
+ SCR_JUMP,
+ PADDR (cleanup_ok),
+
+},/*-------------------------< PAR_ERR_DATA_IN >---------------*/{
+ /*
+ ** Ignore all data in byte, until next phase
+ */
+ SCR_JUMP ^ IFFALSE (WHEN (SCR_DATA_IN)),
+ PADDRH (par_err_other),
+ SCR_MOVE_ABS (1) ^ SCR_DATA_IN,
+ NADDR (scratch),
+ SCR_JUMPR,
+ -24,
+},/*-------------------------< PAR_ERR_OTHER >------------------*/{
+ /*
+ ** count it.
+ */
+ SCR_REG_REG (PS_REG, SCR_ADD, 0x01),
+ 0,
+ /*
+ ** jump to dispatcher.
+ */
+ SCR_JUMP,
+ PADDR (dispatch),
+}/*-------------------------< MSG_REJECT >---------------*/,{
+ /*
+ ** If a negotiation was in progress,
+ ** negotiation failed.
+ ** Otherwise, let the C code print
+ ** some message.
+ */
+ SCR_FROM_REG (HS_REG),
+ 0,
+ SCR_INT ^ IFFALSE (DATA (HS_NEGOTIATE)),
+ SIR_REJECT_RECEIVED,
+ SCR_INT ^ IFTRUE (DATA (HS_NEGOTIATE)),
+ SIR_NEGO_FAILED,
+ SCR_JUMP,
+ PADDR (clrack),
+
+}/*-------------------------< MSG_IGN_RESIDUE >----------*/,{
+ /*
+ ** Terminate cycle
+ */
+ SCR_CLR (SCR_ACK),
+ 0,
+ SCR_JUMP ^ IFFALSE (WHEN (SCR_MSG_IN)),
+ PADDR (dispatch),
+ /*
+ ** get residue size.
+ */
+ SCR_MOVE_ABS (1) ^ SCR_MSG_IN,
+ NADDR (msgin[1]),
+ /*
+ ** Size is 0 .. ignore message.
+ */
+ SCR_JUMP ^ IFTRUE (DATA (0)),
+ PADDR (clrack),
+ /*
+ ** Size is not 1 .. have to interrupt.
+ */
+ SCR_JUMPR ^ IFFALSE (DATA (1)),
+ 40,
+ /*
+ ** Check for residue byte in swide register
+ */
+ SCR_FROM_REG (scntl2),
+ 0,
+ SCR_JUMPR ^ IFFALSE (MASK (WSR, WSR)),
+ 16,
+ /*
+ ** There IS data in the swide register.
+ ** Discard it.
+ */
+ SCR_REG_REG (scntl2, SCR_OR, WSR),
+ 0,
+ SCR_JUMP,
+ PADDR (clrack),
+ /*
+ ** Load again the size to the sfbr register.
+ */
+ SCR_FROM_REG (scratcha),
+ 0,
+ SCR_INT,
+ SIR_IGN_RESIDUE,
+ SCR_JUMP,
+ PADDR (clrack),
+
+}/*-------------------------< MSG_EXTENDED >-------------*/,{
+ /*
+ ** Terminate cycle
+ */
+ SCR_CLR (SCR_ACK),
+ 0,
+ SCR_JUMP ^ IFFALSE (WHEN (SCR_MSG_IN)),
+ PADDR (dispatch),
+ /*
+ ** get length.
+ */
+ SCR_MOVE_ABS (1) ^ SCR_MSG_IN,
+ NADDR (msgin[1]),
+ /*
+ */
+ SCR_JUMP ^ IFTRUE (DATA (3)),
+ PADDRH (msg_ext_3),
+ SCR_JUMP ^ IFFALSE (DATA (2)),
+ PADDR (msg_bad),
+}/*-------------------------< MSG_EXT_2 >----------------*/,{
+ SCR_CLR (SCR_ACK),
+ 0,
+ SCR_JUMP ^ IFFALSE (WHEN (SCR_MSG_IN)),
+ PADDR (dispatch),
+ /*
+ ** get extended message code.
+ */
+ SCR_MOVE_ABS (1) ^ SCR_MSG_IN,
+ NADDR (msgin[2]),
+ SCR_JUMP ^ IFTRUE (DATA (EXTENDED_WDTR)),
+ PADDRH (msg_wdtr),
+ /*
+ ** unknown extended message
+ */
+ SCR_JUMP,
+ PADDR (msg_bad)
+}/*-------------------------< MSG_WDTR >-----------------*/,{
+ SCR_CLR (SCR_ACK),
+ 0,
+ SCR_JUMP ^ IFFALSE (WHEN (SCR_MSG_IN)),
+ PADDR (dispatch),
+ /*
+ ** get data bus width
+ */
+ SCR_MOVE_ABS (1) ^ SCR_MSG_IN,
+ NADDR (msgin[3]),
+ /*
+ ** let the host do the real work.
+ */
+ SCR_INT,
+ SIR_NEGO_WIDE,
+ /*
+ ** let the target fetch our answer.
+ */
+ SCR_SET (SCR_ATN),
+ 0,
+ SCR_CLR (SCR_ACK),
+ 0,
+ SCR_JUMP ^ IFFALSE (WHEN (SCR_MSG_OUT)),
+ PADDRH (nego_bad_phase),
+
+}/*-------------------------< SEND_WDTR >----------------*/,{
+ /*
+ ** Send the EXTENDED_WDTR
+ */
+ SCR_MOVE_ABS (4) ^ SCR_MSG_OUT,
+ NADDR (msgout),
+ SCR_COPY (1),
+ NADDR (msgout),
+ NADDR (lastmsg),
+ SCR_JUMP,
+ PADDR (msg_out_done),
+
+}/*-------------------------< MSG_EXT_3 >----------------*/,{
+ SCR_CLR (SCR_ACK),
+ 0,
+ SCR_JUMP ^ IFFALSE (WHEN (SCR_MSG_IN)),
+ PADDR (dispatch),
+ /*
+ ** get extended message code.
+ */
+ SCR_MOVE_ABS (1) ^ SCR_MSG_IN,
+ NADDR (msgin[2]),
+ SCR_JUMP ^ IFTRUE (DATA (EXTENDED_SDTR)),
+ PADDRH (msg_sdtr),
+ /*
+ ** unknown extended message
+ */
+ SCR_JUMP,
+ PADDR (msg_bad)
+
+}/*-------------------------< MSG_SDTR >-----------------*/,{
+ SCR_CLR (SCR_ACK),
+ 0,
+ SCR_JUMP ^ IFFALSE (WHEN (SCR_MSG_IN)),
+ PADDR (dispatch),
+ /*
+ ** get period and offset
+ */
+ SCR_MOVE_ABS (2) ^ SCR_MSG_IN,
+ NADDR (msgin[3]),
+ /*
+ ** let the host do the real work.
+ */
+ SCR_INT,
+ SIR_NEGO_SYNC,
+ /*
+ ** let the target fetch our answer.
+ */
+ SCR_SET (SCR_ATN),
+ 0,
+ SCR_CLR (SCR_ACK),
+ 0,
+ SCR_JUMP ^ IFFALSE (WHEN (SCR_MSG_OUT)),
+ PADDRH (nego_bad_phase),
+
+}/*-------------------------< SEND_SDTR >-------------*/,{
+ /*
+ ** Send the EXTENDED_SDTR
+ */
+ SCR_MOVE_ABS (5) ^ SCR_MSG_OUT,
+ NADDR (msgout),
+ SCR_COPY (1),
+ NADDR (msgout),
+ NADDR (lastmsg),
+ SCR_JUMP,
+ PADDR (msg_out_done),
+
+}/*-------------------------< NEGO_BAD_PHASE >------------*/,{
+ SCR_INT,
+ SIR_NEGO_PROTO,
+ SCR_JUMP,
+ PADDR (dispatch),
+
+}/*-------------------------< MSG_OUT_ABORT >-------------*/,{
+ /*
+ ** After ABORT message,
+ **
+ ** expect an immediate disconnect, ...
+ */
+ SCR_REG_REG (scntl2, SCR_AND, 0x7f),
+ 0,
+ SCR_CLR (SCR_ACK|SCR_ATN),
+ 0,
+ SCR_WAIT_DISC,
+ 0,
+ /*
+ ** ... and set the status to "ABORTED"
+ */
+ SCR_LOAD_REG (HS_REG, HS_ABORTED),
+ 0,
+ SCR_JUMP,
+ PADDR (cleanup),
+
+}/*-------------------------< HDATA_IN >-------------------*/,{
+/*
+** Because the size depends on the
+** #define MAX_SCATTERH parameter,
+** it is filled in at runtime.
+**
+** ##==< i=MAX_SCATTERL; i<MAX_SCATTERL+MAX_SCATTERH >==
+** || SCR_CALL ^ IFFALSE (WHEN (SCR_DATA_IN)),
+** || PADDR (dispatch),
+** || SCR_MOVE_TBL ^ SCR_DATA_IN,
+** || offsetof (struct dsb, data[ i]),
+** ##===================================================
+**
+**---------------------------------------------------------
+*/
+0
+}/*-------------------------< HDATA_IN2 >------------------*/,{
+ SCR_JUMP,
+ PADDR (data_in),
+
+}/*-------------------------< HDATA_OUT >-------------------*/,{
+/*
+** Because the size depends on the
+** #define MAX_SCATTERH parameter,
+** it is filled in at runtime.
+**
+** ##==< i=MAX_SCATTERL; i<MAX_SCATTERL+MAX_SCATTERH >==
+** || SCR_CALL ^ IFFALSE (WHEN (SCR_DATA_OUT)),
+** || PADDR (dispatch),
+** || SCR_MOVE_TBL ^ SCR_DATA_OUT,
+** || offsetof (struct dsb, data[ i]),
+** ##===================================================
+**
+**---------------------------------------------------------
+*/
+0
+}/*-------------------------< HDATA_OUT2 >------------------*/,{
+ SCR_JUMP,
+ PADDR (data_out),
+
+}/*-------------------------< RESET >----------------------*/,{
+ /*
+ ** Send a TARGET_RESET message if bad IDENTIFY
+ ** received on reselection.
+ */
+ SCR_LOAD_REG (scratcha, ABORT_TASK),
+ 0,
+ SCR_JUMP,
+ PADDRH (abort_resel),
+}/*-------------------------< ABORTTAG >-------------------*/,{
+ /*
+ ** Abort a wrong tag received on reselection.
+ */
+ SCR_LOAD_REG (scratcha, ABORT_TASK),
+ 0,
+ SCR_JUMP,
+ PADDRH (abort_resel),
+}/*-------------------------< ABORT >----------------------*/,{
+ /*
+ ** Abort a reselection when no active CCB.
+ */
+ SCR_LOAD_REG (scratcha, ABORT_TASK_SET),
+ 0,
+}/*-------------------------< ABORT_RESEL >----------------*/,{
+ SCR_COPY (1),
+ RADDR (scratcha),
+ NADDR (msgout),
+ SCR_SET (SCR_ATN),
+ 0,
+ SCR_CLR (SCR_ACK),
+ 0,
+ /*
+ ** and send it.
+ ** we expect an immediate disconnect
+ */
+ SCR_REG_REG (scntl2, SCR_AND, 0x7f),
+ 0,
+ SCR_MOVE_ABS (1) ^ SCR_MSG_OUT,
+ NADDR (msgout),
+ SCR_COPY (1),
+ NADDR (msgout),
+ NADDR (lastmsg),
+ SCR_CLR (SCR_ACK|SCR_ATN),
+ 0,
+ SCR_WAIT_DISC,
+ 0,
+ SCR_JUMP,
+ PADDR (start),
+}/*-------------------------< RESEND_IDENT >-------------------*/,{
+ /*
+ ** The target stays in MSG OUT phase after having acked
+ ** Identify [+ Tag [+ Extended message ]]. Targets shall
+ ** behave this way on parity error.
+ ** We must send it again all the messages.
+ */
+ SCR_SET (SCR_ATN), /* Shall be asserted 2 deskew delays before the */
+ 0, /* 1rst ACK = 90 ns. Hope the NCR is'nt too fast */
+ SCR_JUMP,
+ PADDR (send_ident),
+}/*-------------------------< CLRATN_GO_ON >-------------------*/,{
+ SCR_CLR (SCR_ATN),
+ 0,
+ SCR_JUMP,
+}/*-------------------------< NXTDSP_GO_ON >-------------------*/,{
+ 0,
+}/*-------------------------< SDATA_IN >-------------------*/,{
+ SCR_CALL ^ IFFALSE (WHEN (SCR_DATA_IN)),
+ PADDR (dispatch),
+ SCR_MOVE_TBL ^ SCR_DATA_IN,
+ offsetof (struct dsb, sense),
+ SCR_CALL,
+ PADDR (dispatch),
+ SCR_JUMP,
+ PADDR (no_data),
+}/*-------------------------< DATA_IO >--------------------*/,{
+ /*
+ ** We jump here if the data direction was unknown at the
+ ** time we had to queue the command to the scripts processor.
+ ** Pointers had been set as follow in this situation:
+ ** savep --> DATA_IO
+ ** lastp --> start pointer when DATA_IN
+ ** goalp --> goal pointer when DATA_IN
+ ** wlastp --> start pointer when DATA_OUT
+ ** wgoalp --> goal pointer when DATA_OUT
+ ** This script sets savep/lastp/goalp according to the
+ ** direction chosen by the target.
+ */
+ SCR_JUMPR ^ IFTRUE (WHEN (SCR_DATA_OUT)),
+ 32,
+ /*
+ ** Direction is DATA IN.
+ ** Warning: we jump here, even when phase is DATA OUT.
+ */
+ SCR_COPY (4),
+ NADDR (header.lastp),
+ NADDR (header.savep),
+
+ /*
+ ** Jump to the SCRIPTS according to actual direction.
+ */
+ SCR_COPY (4),
+ NADDR (header.savep),
+ RADDR (temp),
+ SCR_RETURN,
+ 0,
+ /*
+ ** Direction is DATA OUT.
+ */
+ SCR_COPY (4),
+ NADDR (header.wlastp),
+ NADDR (header.lastp),
+ SCR_COPY (4),
+ NADDR (header.wgoalp),
+ NADDR (header.goalp),
+ SCR_JUMPR,
+ -64,
+}/*-------------------------< BAD_IDENTIFY >---------------*/,{
+ /*
+ ** If message phase but not an IDENTIFY,
+ ** get some help from the C code.
+ ** Old SCSI device may behave so.
+ */
+ SCR_JUMPR ^ IFTRUE (MASK (0x80, 0x80)),
+ 16,
+ SCR_INT,
+ SIR_RESEL_NO_IDENTIFY,
+ SCR_JUMP,
+ PADDRH (reset),
+ /*
+ ** Message is an IDENTIFY, but lun is unknown.
+ ** Read the message, since we got it directly
+ ** from the SCSI BUS data lines.
+ ** Signal problem to C code for logging the event.
+ ** Send an ABORT_TASK_SET to clear all pending tasks.
+ */
+ SCR_INT,
+ SIR_RESEL_BAD_LUN,
+ SCR_MOVE_ABS (1) ^ SCR_MSG_IN,
+ NADDR (msgin),
+ SCR_JUMP,
+ PADDRH (abort),
+}/*-------------------------< BAD_I_T_L >------------------*/,{
+ /*
+ ** We donnot have a task for that I_T_L.
+ ** Signal problem to C code for logging the event.
+ ** Send an ABORT_TASK_SET message.
+ */
+ SCR_INT,
+ SIR_RESEL_BAD_I_T_L,
+ SCR_JUMP,
+ PADDRH (abort),
+}/*-------------------------< BAD_I_T_L_Q >----------------*/,{
+ /*
+ ** We donnot have a task that matches the tag.
+ ** Signal problem to C code for logging the event.
+ ** Send an ABORT_TASK message.
+ */
+ SCR_INT,
+ SIR_RESEL_BAD_I_T_L_Q,
+ SCR_JUMP,
+ PADDRH (aborttag),
+}/*-------------------------< BAD_TARGET >-----------------*/,{
+ /*
+ ** We donnot know the target that reselected us.
+ ** Grab the first message if any (IDENTIFY).
+ ** Signal problem to C code for logging the event.
+ ** TARGET_RESET message.
+ */
+ SCR_INT,
+ SIR_RESEL_BAD_TARGET,
+ SCR_JUMPR ^ IFFALSE (WHEN (SCR_MSG_IN)),
+ 8,
+ SCR_MOVE_ABS (1) ^ SCR_MSG_IN,
+ NADDR (msgin),
+ SCR_JUMP,
+ PADDRH (reset),
+}/*-------------------------< BAD_STATUS >-----------------*/,{
+ /*
+ ** If command resulted in either QUEUE FULL,
+ ** CHECK CONDITION or COMMAND TERMINATED,
+ ** call the C code.
+ */
+ SCR_INT ^ IFTRUE (DATA (S_QUEUE_FULL)),
+ SIR_BAD_STATUS,
+ SCR_INT ^ IFTRUE (DATA (S_CHECK_COND)),
+ SIR_BAD_STATUS,
+ SCR_INT ^ IFTRUE (DATA (S_TERMINATED)),
+ SIR_BAD_STATUS,
+ SCR_RETURN,
+ 0,
+}/*-------------------------< START_RAM >-------------------*/,{
+ /*
+ ** Load the script into on-chip RAM,
+ ** and jump to start point.
+ */
+ SCR_COPY_F (4),
+ RADDR (scratcha),
+ PADDRH (start_ram0),
+ /*
+ ** Flush script prefetch if required
+ */
+ PREFETCH_FLUSH
+ SCR_COPY (sizeof (struct script)),
+}/*-------------------------< START_RAM0 >--------------------*/,{
+ 0,
+ PADDR (start),
+ SCR_JUMP,
+ PADDR (start),
+}/*-------------------------< STO_RESTART >-------------------*/,{
+ /*
+ **
+ ** Repair start queue (e.g. next time use the next slot)
+ ** and jump to start point.
+ */
+ SCR_COPY (4),
+ RADDR (temp),
+ PADDR (startpos),
+ SCR_JUMP,
+ PADDR (start),
+}/*-------------------------< WAIT_DMA >-------------------*/,{
+ /*
+ ** For HP Zalon/53c720 systems, the Zalon interface
+ ** between CPU and 53c720 does prefetches, which causes
+ ** problems with self modifying scripts. The problem
+ ** is overcome by calling a dummy subroutine after each
+ ** modification, to force a refetch of the script on
+ ** return from the subroutine.
+ */
+ SCR_RETURN,
+ 0,
+}/*-------------------------< SNOOPTEST >-------------------*/,{
+ /*
+ ** Read the variable.
+ */
+ SCR_COPY (4),
+ NADDR(ncr_cache),
+ RADDR (scratcha),
+ /*
+ ** Write the variable.
+ */
+ SCR_COPY (4),
+ RADDR (temp),
+ NADDR(ncr_cache),
+ /*
+ ** Read back the variable.
+ */
+ SCR_COPY (4),
+ NADDR(ncr_cache),
+ RADDR (temp),
+}/*-------------------------< SNOOPEND >-------------------*/,{
+ /*
+ ** And stop.
+ */
+ SCR_INT,
+ 99,
+}/*--------------------------------------------------------*/
+};
+
+/*==========================================================
+**
+**
+** Fill in #define dependent parts of the script
+**
+**
+**==========================================================
+*/
+
+void __init ncr_script_fill (struct script * scr, struct scripth * scrh)
+{
+ int i;
+ ncrcmd *p;
+
+ p = scrh->tryloop;
+ for (i=0; i<MAX_START; i++) {
+ *p++ =SCR_CALL;
+ *p++ =PADDR (idle);
+ }
+
+ BUG_ON((u_long)p != (u_long)&scrh->tryloop + sizeof (scrh->tryloop));
+
+#ifdef SCSI_NCR_CCB_DONE_SUPPORT
+
+ p = scrh->done_queue;
+ for (i = 0; i<MAX_DONE; i++) {
+ *p++ =SCR_COPY (sizeof(struct ccb *));
+ *p++ =NADDR (header.cp);
+ *p++ =NADDR (ccb_done[i]);
+ *p++ =SCR_CALL;
+ *p++ =PADDR (done_end);
+ }
+
+ BUG_ON((u_long)p != (u_long)&scrh->done_queue+sizeof(scrh->done_queue));
+
+#endif /* SCSI_NCR_CCB_DONE_SUPPORT */
+
+ p = scrh->hdata_in;
+ for (i=0; i<MAX_SCATTERH; i++) {
+ *p++ =SCR_CALL ^ IFFALSE (WHEN (SCR_DATA_IN));
+ *p++ =PADDR (dispatch);
+ *p++ =SCR_MOVE_TBL ^ SCR_DATA_IN;
+ *p++ =offsetof (struct dsb, data[i]);
+ }
+
+ BUG_ON((u_long)p != (u_long)&scrh->hdata_in + sizeof (scrh->hdata_in));
+
+ p = scr->data_in;
+ for (i=MAX_SCATTERH; i<MAX_SCATTERH+MAX_SCATTERL; i++) {
+ *p++ =SCR_CALL ^ IFFALSE (WHEN (SCR_DATA_IN));
+ *p++ =PADDR (dispatch);
+ *p++ =SCR_MOVE_TBL ^ SCR_DATA_IN;
+ *p++ =offsetof (struct dsb, data[i]);
+ }
+
+ BUG_ON((u_long)p != (u_long)&scr->data_in + sizeof (scr->data_in));
+
+ p = scrh->hdata_out;
+ for (i=0; i<MAX_SCATTERH; i++) {
+ *p++ =SCR_CALL ^ IFFALSE (WHEN (SCR_DATA_OUT));
+ *p++ =PADDR (dispatch);
+ *p++ =SCR_MOVE_TBL ^ SCR_DATA_OUT;
+ *p++ =offsetof (struct dsb, data[i]);
+ }
+
+ BUG_ON((u_long)p != (u_long)&scrh->hdata_out + sizeof (scrh->hdata_out));
+
+ p = scr->data_out;
+ for (i=MAX_SCATTERH; i<MAX_SCATTERH+MAX_SCATTERL; i++) {
+ *p++ =SCR_CALL ^ IFFALSE (WHEN (SCR_DATA_OUT));
+ *p++ =PADDR (dispatch);
+ *p++ =SCR_MOVE_TBL ^ SCR_DATA_OUT;
+ *p++ =offsetof (struct dsb, data[i]);
+ }
+
+ BUG_ON((u_long) p != (u_long)&scr->data_out + sizeof (scr->data_out));
+}
+
+/*==========================================================
+**
+**
+** Copy and rebind a script.
+**
+**
+**==========================================================
+*/
+
+static void __init
+ncr_script_copy_and_bind (struct ncb *np, ncrcmd *src, ncrcmd *dst, int len)
+{
+ ncrcmd opcode, new, old, tmp1, tmp2;
+ ncrcmd *start, *end;
+ int relocs;
+ int opchanged = 0;
+
+ start = src;
+ end = src + len/4;
+
+ while (src < end) {
+
+ opcode = *src++;
+ *dst++ = cpu_to_scr(opcode);
+
+ /*
+ ** If we forget to change the length
+ ** in struct script, a field will be
+ ** padded with 0. This is an illegal
+ ** command.
+ */
+
+ if (opcode == 0) {
+ printk (KERN_ERR "%s: ERROR0 IN SCRIPT at %d.\n",
+ ncr_name(np), (int) (src-start-1));
+ mdelay(1000);
+ }
+
+ if (DEBUG_FLAGS & DEBUG_SCRIPT)
+ printk (KERN_DEBUG "%p: <%x>\n",
+ (src-1), (unsigned)opcode);
+
+ /*
+ ** We don't have to decode ALL commands
+ */
+ switch (opcode >> 28) {
+
+ case 0xc:
+ /*
+ ** COPY has TWO arguments.
+ */
+ relocs = 2;
+ tmp1 = src[0];
+#ifdef RELOC_KVAR
+ if ((tmp1 & RELOC_MASK) == RELOC_KVAR)
+ tmp1 = 0;
+#endif
+ tmp2 = src[1];
+#ifdef RELOC_KVAR
+ if ((tmp2 & RELOC_MASK) == RELOC_KVAR)
+ tmp2 = 0;
+#endif
+ if ((tmp1 ^ tmp2) & 3) {
+ printk (KERN_ERR"%s: ERROR1 IN SCRIPT at %d.\n",
+ ncr_name(np), (int) (src-start-1));
+ mdelay(1000);
+ }
+ /*
+ ** If PREFETCH feature not enabled, remove
+ ** the NO FLUSH bit if present.
+ */
+ if ((opcode & SCR_NO_FLUSH) && !(np->features & FE_PFEN)) {
+ dst[-1] = cpu_to_scr(opcode & ~SCR_NO_FLUSH);
+ ++opchanged;
+ }
+ break;
+
+ case 0x0:
+ /*
+ ** MOVE (absolute address)
+ */
+ relocs = 1;
+ break;
+
+ case 0x8:
+ /*
+ ** JUMP / CALL
+ ** don't relocate if relative :-)
+ */
+ if (opcode & 0x00800000)
+ relocs = 0;
+ else
+ relocs = 1;
+ break;
+
+ case 0x4:
+ case 0x5:
+ case 0x6:
+ case 0x7:
+ relocs = 1;
+ break;
+
+ default:
+ relocs = 0;
+ break;
+ }
+
+ if (relocs) {
+ while (relocs--) {
+ old = *src++;
+
+ switch (old & RELOC_MASK) {
+ case RELOC_REGISTER:
+ new = (old & ~RELOC_MASK) + np->paddr;
+ break;
+ case RELOC_LABEL:
+ new = (old & ~RELOC_MASK) + np->p_script;
+ break;
+ case RELOC_LABELH:
+ new = (old & ~RELOC_MASK) + np->p_scripth;
+ break;
+ case RELOC_SOFTC:
+ new = (old & ~RELOC_MASK) + np->p_ncb;
+ break;
+#ifdef RELOC_KVAR
+ case RELOC_KVAR:
+ if (((old & ~RELOC_MASK) <
+ SCRIPT_KVAR_FIRST) ||
+ ((old & ~RELOC_MASK) >
+ SCRIPT_KVAR_LAST))
+ panic("ncr KVAR out of range");
+ new = vtophys(script_kvars[old &
+ ~RELOC_MASK]);
+ break;
+#endif
+ case 0:
+ /* Don't relocate a 0 address. */
+ if (old == 0) {
+ new = old;
+ break;
+ }
+ /* fall through */
+ default:
+ panic("ncr_script_copy_and_bind: weird relocation %x\n", old);
+ break;
+ }
+
+ *dst++ = cpu_to_scr(new);
+ }
+ } else
+ *dst++ = cpu_to_scr(*src++);
+
+ }
+}
+
+/*
+** Linux host data structure
+*/
+
+struct host_data {
+ struct ncb *ncb;
+};
+
+#define PRINT_ADDR(cmd, arg...) dev_info(&cmd->device->sdev_gendev , ## arg)
+
+static void ncr_print_msg(struct ccb *cp, char *label, u_char *msg)
+{
+ PRINT_ADDR(cp->cmd, "%s: ", label);
+
+ spi_print_msg(msg);
+ printk("\n");
+}
+
+/*==========================================================
+**
+** NCR chip clock divisor table.
+** Divisors are multiplied by 10,000,000 in order to make
+** calculations more simple.
+**
+**==========================================================
+*/
+
+#define _5M 5000000
+static u_long div_10M[] =
+ {2*_5M, 3*_5M, 4*_5M, 6*_5M, 8*_5M, 12*_5M, 16*_5M};
+
+
+/*===============================================================
+**
+** Prepare io register values used by ncr_init() according
+** to selected and supported features.
+**
+** NCR chips allow burst lengths of 2, 4, 8, 16, 32, 64, 128
+** transfers. 32,64,128 are only supported by 875 and 895 chips.
+** We use log base 2 (burst length) as internal code, with
+** value 0 meaning "burst disabled".
+**
+**===============================================================
+*/
+
+/*
+ * Burst length from burst code.
+ */
+#define burst_length(bc) (!(bc))? 0 : 1 << (bc)
+
+/*
+ * Burst code from io register bits. Burst enable is ctest0 for c720
+ */
+#define burst_code(dmode, ctest0) \
+ (ctest0) & 0x80 ? 0 : (((dmode) & 0xc0) >> 6) + 1
+
+/*
+ * Set initial io register bits from burst code.
+ */
+static inline void ncr_init_burst(struct ncb *np, u_char bc)
+{
+ u_char *be = &np->rv_ctest0;
+ *be &= ~0x80;
+ np->rv_dmode &= ~(0x3 << 6);
+ np->rv_ctest5 &= ~0x4;
+
+ if (!bc) {
+ *be |= 0x80;
+ } else {
+ --bc;
+ np->rv_dmode |= ((bc & 0x3) << 6);
+ np->rv_ctest5 |= (bc & 0x4);
+ }
+}
+
+static void __init ncr_prepare_setting(struct ncb *np)
+{
+ u_char burst_max;
+ u_long period;
+ int i;
+
+ /*
+ ** Save assumed BIOS setting
+ */
+
+ np->sv_scntl0 = INB(nc_scntl0) & 0x0a;
+ np->sv_scntl3 = INB(nc_scntl3) & 0x07;
+ np->sv_dmode = INB(nc_dmode) & 0xce;
+ np->sv_dcntl = INB(nc_dcntl) & 0xa8;
+ np->sv_ctest0 = INB(nc_ctest0) & 0x84;
+ np->sv_ctest3 = INB(nc_ctest3) & 0x01;
+ np->sv_ctest4 = INB(nc_ctest4) & 0x80;
+ np->sv_ctest5 = INB(nc_ctest5) & 0x24;
+ np->sv_gpcntl = INB(nc_gpcntl);
+ np->sv_stest2 = INB(nc_stest2) & 0x20;
+ np->sv_stest4 = INB(nc_stest4);
+
+ /*
+ ** Wide ?
+ */
+
+ np->maxwide = (np->features & FE_WIDE)? 1 : 0;
+
+ /*
+ * Guess the frequency of the chip's clock.
+ */
+ if (np->features & FE_ULTRA)
+ np->clock_khz = 80000;
+ else
+ np->clock_khz = 40000;
+
+ /*
+ * Get the clock multiplier factor.
+ */
+ if (np->features & FE_QUAD)
+ np->multiplier = 4;
+ else if (np->features & FE_DBLR)
+ np->multiplier = 2;
+ else
+ np->multiplier = 1;
+
+ /*
+ * Measure SCSI clock frequency for chips
+ * it may vary from assumed one.
+ */
+ if (np->features & FE_VARCLK)
+ ncr_getclock(np, np->multiplier);
+
+ /*
+ * Divisor to be used for async (timer pre-scaler).
+ */
+ i = np->clock_divn - 1;
+ while (--i >= 0) {
+ if (10ul * SCSI_NCR_MIN_ASYNC * np->clock_khz > div_10M[i]) {
+ ++i;
+ break;
+ }
+ }
+ np->rv_scntl3 = i+1;
+
+ /*
+ * Minimum synchronous period factor supported by the chip.
+ * Btw, 'period' is in tenths of nanoseconds.
+ */
+
+ period = (4 * div_10M[0] + np->clock_khz - 1) / np->clock_khz;
+ if (period <= 250) np->minsync = 10;
+ else if (period <= 303) np->minsync = 11;
+ else if (period <= 500) np->minsync = 12;
+ else np->minsync = (period + 40 - 1) / 40;
+
+ /*
+ * Check against chip SCSI standard support (SCSI-2,ULTRA,ULTRA2).
+ */
+
+ if (np->minsync < 25 && !(np->features & FE_ULTRA))
+ np->minsync = 25;
+
+ /*
+ * Maximum synchronous period factor supported by the chip.
+ */
+
+ period = (11 * div_10M[np->clock_divn - 1]) / (4 * np->clock_khz);
+ np->maxsync = period > 2540 ? 254 : period / 10;
+
+ /*
+ ** Prepare initial value of other IO registers
+ */
+#if defined SCSI_NCR_TRUST_BIOS_SETTING
+ np->rv_scntl0 = np->sv_scntl0;
+ np->rv_dmode = np->sv_dmode;
+ np->rv_dcntl = np->sv_dcntl;
+ np->rv_ctest0 = np->sv_ctest0;
+ np->rv_ctest3 = np->sv_ctest3;
+ np->rv_ctest4 = np->sv_ctest4;
+ np->rv_ctest5 = np->sv_ctest5;
+ burst_max = burst_code(np->sv_dmode, np->sv_ctest0);
+#else
+
+ /*
+ ** Select burst length (dwords)
+ */
+ burst_max = driver_setup.burst_max;
+ if (burst_max == 255)
+ burst_max = burst_code(np->sv_dmode, np->sv_ctest0);
+ if (burst_max > 7)
+ burst_max = 7;
+ if (burst_max > np->maxburst)
+ burst_max = np->maxburst;
+
+ /*
+ ** Select all supported special features
+ */
+ if (np->features & FE_ERL)
+ np->rv_dmode |= ERL; /* Enable Read Line */
+ if (np->features & FE_BOF)
+ np->rv_dmode |= BOF; /* Burst Opcode Fetch */
+ if (np->features & FE_ERMP)
+ np->rv_dmode |= ERMP; /* Enable Read Multiple */
+ if (np->features & FE_PFEN)
+ np->rv_dcntl |= PFEN; /* Prefetch Enable */
+ if (np->features & FE_CLSE)
+ np->rv_dcntl |= CLSE; /* Cache Line Size Enable */
+ if (np->features & FE_WRIE)
+ np->rv_ctest3 |= WRIE; /* Write and Invalidate */
+ if (np->features & FE_DFS)
+ np->rv_ctest5 |= DFS; /* Dma Fifo Size */
+ if (np->features & FE_MUX)
+ np->rv_ctest4 |= MUX; /* Host bus multiplex mode */
+ if (np->features & FE_EA)
+ np->rv_dcntl |= EA; /* Enable ACK */
+ if (np->features & FE_EHP)
+ np->rv_ctest0 |= EHP; /* Even host parity */
+
+ /*
+ ** Select some other
+ */
+ if (driver_setup.master_parity)
+ np->rv_ctest4 |= MPEE; /* Master parity checking */
+ if (driver_setup.scsi_parity)
+ np->rv_scntl0 |= 0x0a; /* full arb., ena parity, par->ATN */
+
+ /*
+ ** Get SCSI addr of host adapter (set by bios?).
+ */
+ if (np->myaddr == 255) {
+ np->myaddr = INB(nc_scid) & 0x07;
+ if (!np->myaddr)
+ np->myaddr = SCSI_NCR_MYADDR;
+ }
+
+#endif /* SCSI_NCR_TRUST_BIOS_SETTING */
+
+ /*
+ * Prepare initial io register bits for burst length
+ */
+ ncr_init_burst(np, burst_max);
+
+ /*
+ ** Set SCSI BUS mode.
+ **
+ ** - ULTRA2 chips (895/895A/896) report the current
+ ** BUS mode through the STEST4 IO register.
+ ** - For previous generation chips (825/825A/875),
+ ** user has to tell us how to check against HVD,
+ ** since a 100% safe algorithm is not possible.
+ */
+ np->scsi_mode = SMODE_SE;
+ if (np->features & FE_DIFF) {
+ switch(driver_setup.diff_support) {
+ case 4: /* Trust previous settings if present, then GPIO3 */
+ if (np->sv_scntl3) {
+ if (np->sv_stest2 & 0x20)
+ np->scsi_mode = SMODE_HVD;
+ break;
+ }
+ case 3: /* SYMBIOS controllers report HVD through GPIO3 */
+ if (INB(nc_gpreg) & 0x08)
+ break;
+ case 2: /* Set HVD unconditionally */
+ np->scsi_mode = SMODE_HVD;
+ case 1: /* Trust previous settings for HVD */
+ if (np->sv_stest2 & 0x20)
+ np->scsi_mode = SMODE_HVD;
+ break;
+ default:/* Don't care about HVD */
+ break;
+ }
+ }
+ if (np->scsi_mode == SMODE_HVD)
+ np->rv_stest2 |= 0x20;
+
+ /*
+ ** Set LED support from SCRIPTS.
+ ** Ignore this feature for boards known to use a
+ ** specific GPIO wiring and for the 895A or 896
+ ** that drive the LED directly.
+ ** Also probe initial setting of GPIO0 as output.
+ */
+ if ((driver_setup.led_pin) &&
+ !(np->features & FE_LEDC) && !(np->sv_gpcntl & 0x01))
+ np->features |= FE_LED0;
+
+ /*
+ ** Set irq mode.
+ */
+ switch(driver_setup.irqm & 3) {
+ case 2:
+ np->rv_dcntl |= IRQM;
+ break;
+ case 1:
+ np->rv_dcntl |= (np->sv_dcntl & IRQM);
+ break;
+ default:
+ break;
+ }
+
+ /*
+ ** Configure targets according to driver setup.
+ ** Allow to override sync, wide and NOSCAN from
+ ** boot command line.
+ */
+ for (i = 0 ; i < MAX_TARGET ; i++) {
+ struct tcb *tp = &np->target[i];
+
+ tp->usrsync = driver_setup.default_sync;
+ tp->usrwide = driver_setup.max_wide;
+ tp->usrtags = MAX_TAGS;
+ tp->period = 0xffff;
+ if (!driver_setup.disconnection)
+ np->target[i].usrflag = UF_NODISC;
+ }
+
+ /*
+ ** Announce all that stuff to user.
+ */
+
+ printk(KERN_INFO "%s: ID %d, Fast-%d%s%s\n", ncr_name(np),
+ np->myaddr,
+ np->minsync < 12 ? 40 : (np->minsync < 25 ? 20 : 10),
+ (np->rv_scntl0 & 0xa) ? ", Parity Checking" : ", NO Parity",
+ (np->rv_stest2 & 0x20) ? ", Differential" : "");
+
+ if (bootverbose > 1) {
+ printk (KERN_INFO "%s: initial SCNTL3/DMODE/DCNTL/CTEST3/4/5 = "
+ "(hex) %02x/%02x/%02x/%02x/%02x/%02x\n",
+ ncr_name(np), np->sv_scntl3, np->sv_dmode, np->sv_dcntl,
+ np->sv_ctest3, np->sv_ctest4, np->sv_ctest5);
+
+ printk (KERN_INFO "%s: final SCNTL3/DMODE/DCNTL/CTEST3/4/5 = "
+ "(hex) %02x/%02x/%02x/%02x/%02x/%02x\n",
+ ncr_name(np), np->rv_scntl3, np->rv_dmode, np->rv_dcntl,
+ np->rv_ctest3, np->rv_ctest4, np->rv_ctest5);
+ }
+
+ if (bootverbose && np->paddr2)
+ printk (KERN_INFO "%s: on-chip RAM at 0x%lx\n",
+ ncr_name(np), np->paddr2);
+}
+
+/*==========================================================
+**
+**
+** Done SCSI commands list management.
+**
+** We donnot enter the scsi_done() callback immediately
+** after a command has been seen as completed but we
+** insert it into a list which is flushed outside any kind
+** of driver critical section.
+** This allows to do minimal stuff under interrupt and
+** inside critical sections and to also avoid locking up
+** on recursive calls to driver entry points under SMP.
+** In fact, the only kernel point which is entered by the
+** driver with a driver lock set is kmalloc(GFP_ATOMIC)
+** that shall not reenter the driver under any circumstances,
+** AFAIK.
+**
+**==========================================================
+*/
+static inline void ncr_queue_done_cmd(struct ncb *np, struct scsi_cmnd *cmd)
+{
+ unmap_scsi_data(np, cmd);
+ cmd->host_scribble = (char *) np->done_list;
+ np->done_list = cmd;
+}
+
+static inline void ncr_flush_done_cmds(struct scsi_cmnd *lcmd)
+{
+ struct scsi_cmnd *cmd;
+
+ while (lcmd) {
+ cmd = lcmd;
+ lcmd = (struct scsi_cmnd *) cmd->host_scribble;
+ cmd->scsi_done(cmd);
+ }
+}
+
+/*==========================================================
+**
+**
+** Prepare the next negotiation message if needed.
+**
+** Fill in the part of message buffer that contains the
+** negotiation and the nego_status field of the CCB.
+** Returns the size of the message in bytes.
+**
+**
+**==========================================================
+*/
+
+
+static int ncr_prepare_nego(struct ncb *np, struct ccb *cp, u_char *msgptr)
+{
+ struct tcb *tp = &np->target[cp->target];
+ int msglen = 0;
+ int nego = 0;
+ struct scsi_target *starget = tp->starget;
+
+ /* negotiate wide transfers ? */
+ if (!tp->widedone) {
+ if (spi_support_wide(starget)) {
+ nego = NS_WIDE;
+ } else
+ tp->widedone=1;
+ }
+
+ /* negotiate synchronous transfers? */
+ if (!nego && !tp->period) {
+ if (spi_support_sync(starget)) {
+ nego = NS_SYNC;
+ } else {
+ tp->period =0xffff;
+ dev_info(&starget->dev, "target did not report SYNC.\n");
+ }
+ }
+
+ switch (nego) {
+ case NS_SYNC:
+ msglen += spi_populate_sync_msg(msgptr + msglen,
+ tp->maxoffs ? tp->minsync : 0, tp->maxoffs);
+ break;
+ case NS_WIDE:
+ msglen += spi_populate_width_msg(msgptr + msglen, tp->usrwide);
+ break;
+ }
+
+ cp->nego_status = nego;
+
+ if (nego) {
+ tp->nego_cp = cp;
+ if (DEBUG_FLAGS & DEBUG_NEGO) {
+ ncr_print_msg(cp, nego == NS_WIDE ?
+ "wide msgout":"sync_msgout", msgptr);
+ }
+ }
+
+ return msglen;
+}
+
+
+
+/*==========================================================
+**
+**
+** Start execution of a SCSI command.
+** This is called from the generic SCSI driver.
+**
+**
+**==========================================================
+*/
+static int ncr_queue_command (struct ncb *np, struct scsi_cmnd *cmd)
+{
+ struct scsi_device *sdev = cmd->device;
+ struct tcb *tp = &np->target[sdev->id];
+ struct lcb *lp = tp->lp[sdev->lun];
+ struct ccb *cp;
+
+ int segments;
+ u_char idmsg, *msgptr;
+ u32 msglen;
+ int direction;
+ u32 lastp, goalp;
+
+ /*---------------------------------------------
+ **
+ ** Some shortcuts ...
+ **
+ **---------------------------------------------
+ */
+ if ((sdev->id == np->myaddr ) ||
+ (sdev->id >= MAX_TARGET) ||
+ (sdev->lun >= MAX_LUN )) {
+ return(DID_BAD_TARGET);
+ }
+
+ /*---------------------------------------------
+ **
+ ** Complete the 1st TEST UNIT READY command
+ ** with error condition if the device is
+ ** flagged NOSCAN, in order to speed up
+ ** the boot.
+ **
+ **---------------------------------------------
+ */
+ if ((cmd->cmnd[0] == 0 || cmd->cmnd[0] == 0x12) &&
+ (tp->usrflag & UF_NOSCAN)) {
+ tp->usrflag &= ~UF_NOSCAN;
+ return DID_BAD_TARGET;
+ }
+
+ if (DEBUG_FLAGS & DEBUG_TINY) {
+ PRINT_ADDR(cmd, "CMD=%x ", cmd->cmnd[0]);
+ }
+
+ /*---------------------------------------------------
+ **
+ ** Assign a ccb / bind cmd.
+ ** If resetting, shorten settle_time if necessary
+ ** in order to avoid spurious timeouts.
+ ** If resetting or no free ccb,
+ ** insert cmd into the waiting list.
+ **
+ **----------------------------------------------------
+ */
+ if (np->settle_time && cmd->request->timeout >= HZ) {
+ u_long tlimit = jiffies + cmd->request->timeout - HZ;
+ if (time_after(np->settle_time, tlimit))
+ np->settle_time = tlimit;
+ }
+
+ if (np->settle_time || !(cp=ncr_get_ccb (np, cmd))) {
+ insert_into_waiting_list(np, cmd);
+ return(DID_OK);
+ }
+ cp->cmd = cmd;
+
+ /*----------------------------------------------------
+ **
+ ** Build the identify / tag / sdtr message
+ **
+ **----------------------------------------------------
+ */
+
+ idmsg = IDENTIFY(0, sdev->lun);
+
+ if (cp ->tag != NO_TAG ||
+ (cp != np->ccb && np->disc && !(tp->usrflag & UF_NODISC)))
+ idmsg |= 0x40;
+
+ msgptr = cp->scsi_smsg;
+ msglen = 0;
+ msgptr[msglen++] = idmsg;
+
+ if (cp->tag != NO_TAG) {
+ char order = np->order;
+
+ /*
+ ** Force ordered tag if necessary to avoid timeouts
+ ** and to preserve interactivity.
+ */
+ if (lp && time_after(jiffies, lp->tags_stime)) {
+ if (lp->tags_smap) {
+ order = ORDERED_QUEUE_TAG;
+ if ((DEBUG_FLAGS & DEBUG_TAGS)||bootverbose>2){
+ PRINT_ADDR(cmd,
+ "ordered tag forced.\n");
+ }
+ }
+ lp->tags_stime = jiffies + 3*HZ;
+ lp->tags_smap = lp->tags_umap;
+ }
+
+ if (order == 0) {
+ /*
+ ** Ordered write ops, unordered read ops.
+ */
+ switch (cmd->cmnd[0]) {
+ case 0x08: /* READ_SMALL (6) */
+ case 0x28: /* READ_BIG (10) */
+ case 0xa8: /* READ_HUGE (12) */
+ order = SIMPLE_QUEUE_TAG;
+ break;
+ default:
+ order = ORDERED_QUEUE_TAG;
+ }
+ }
+ msgptr[msglen++] = order;
+ /*
+ ** Actual tags are numbered 1,3,5,..2*MAXTAGS+1,
+ ** since we may have to deal with devices that have
+ ** problems with #TAG 0 or too great #TAG numbers.
+ */
+ msgptr[msglen++] = (cp->tag << 1) + 1;
+ }
+
+ /*----------------------------------------------------
+ **
+ ** Build the data descriptors
+ **
+ **----------------------------------------------------
+ */
+
+ direction = cmd->sc_data_direction;
+ if (direction != DMA_NONE) {
+ segments = ncr_scatter(np, cp, cp->cmd);
+ if (segments < 0) {
+ ncr_free_ccb(np, cp);
+ return(DID_ERROR);
+ }
+ }
+ else {
+ cp->data_len = 0;
+ segments = 0;
+ }
+
+ /*---------------------------------------------------
+ **
+ ** negotiation required?
+ **
+ ** (nego_status is filled by ncr_prepare_nego())
+ **
+ **---------------------------------------------------
+ */
+
+ cp->nego_status = 0;
+
+ if ((!tp->widedone || !tp->period) && !tp->nego_cp && lp) {
+ msglen += ncr_prepare_nego (np, cp, msgptr + msglen);
+ }
+
+ /*----------------------------------------------------
+ **
+ ** Determine xfer direction.
+ **
+ **----------------------------------------------------
+ */
+ if (!cp->data_len)
+ direction = DMA_NONE;
+
+ /*
+ ** If data direction is BIDIRECTIONAL, speculate FROM_DEVICE
+ ** but prepare alternate pointers for TO_DEVICE in case
+ ** of our speculation will be just wrong.
+ ** SCRIPTS will swap values if needed.
+ */
+ switch(direction) {
+ case DMA_BIDIRECTIONAL:
+ case DMA_TO_DEVICE:
+ goalp = NCB_SCRIPT_PHYS (np, data_out2) + 8;
+ if (segments <= MAX_SCATTERL)
+ lastp = goalp - 8 - (segments * 16);
+ else {
+ lastp = NCB_SCRIPTH_PHYS (np, hdata_out2);
+ lastp -= (segments - MAX_SCATTERL) * 16;
+ }
+ if (direction != DMA_BIDIRECTIONAL)
+ break;
+ cp->phys.header.wgoalp = cpu_to_scr(goalp);
+ cp->phys.header.wlastp = cpu_to_scr(lastp);
+ /* fall through */
+ case DMA_FROM_DEVICE:
+ goalp = NCB_SCRIPT_PHYS (np, data_in2) + 8;
+ if (segments <= MAX_SCATTERL)
+ lastp = goalp - 8 - (segments * 16);
+ else {
+ lastp = NCB_SCRIPTH_PHYS (np, hdata_in2);
+ lastp -= (segments - MAX_SCATTERL) * 16;
+ }
+ break;
+ default:
+ case DMA_NONE:
+ lastp = goalp = NCB_SCRIPT_PHYS (np, no_data);
+ break;
+ }
+
+ /*
+ ** Set all pointers values needed by SCRIPTS.
+ ** If direction is unknown, start at data_io.
+ */
+ cp->phys.header.lastp = cpu_to_scr(lastp);
+ cp->phys.header.goalp = cpu_to_scr(goalp);
+
+ if (direction == DMA_BIDIRECTIONAL)
+ cp->phys.header.savep =
+ cpu_to_scr(NCB_SCRIPTH_PHYS (np, data_io));
+ else
+ cp->phys.header.savep= cpu_to_scr(lastp);
+
+ /*
+ ** Save the initial data pointer in order to be able
+ ** to redo the command.
+ */
+ cp->startp = cp->phys.header.savep;
+
+ /*----------------------------------------------------
+ **
+ ** fill in ccb
+ **
+ **----------------------------------------------------
+ **
+ **
+ ** physical -> virtual backlink
+ ** Generic SCSI command
+ */
+
+ /*
+ ** Startqueue
+ */
+ cp->start.schedule.l_paddr = cpu_to_scr(NCB_SCRIPT_PHYS (np, select));
+ cp->restart.schedule.l_paddr = cpu_to_scr(NCB_SCRIPT_PHYS (np, resel_dsa));
+ /*
+ ** select
+ */
+ cp->phys.select.sel_id = sdev_id(sdev);
+ cp->phys.select.sel_scntl3 = tp->wval;
+ cp->phys.select.sel_sxfer = tp->sval;
+ /*
+ ** message
+ */
+ cp->phys.smsg.addr = cpu_to_scr(CCB_PHYS (cp, scsi_smsg));
+ cp->phys.smsg.size = cpu_to_scr(msglen);
+
+ /*
+ ** command
+ */
+ memcpy(cp->cdb_buf, cmd->cmnd, min_t(int, cmd->cmd_len, sizeof(cp->cdb_buf)));
+ cp->phys.cmd.addr = cpu_to_scr(CCB_PHYS (cp, cdb_buf[0]));
+ cp->phys.cmd.size = cpu_to_scr(cmd->cmd_len);
+
+ /*
+ ** status
+ */
+ cp->actualquirks = 0;
+ cp->host_status = cp->nego_status ? HS_NEGOTIATE : HS_BUSY;
+ cp->scsi_status = S_ILLEGAL;
+ cp->parity_status = 0;
+
+ cp->xerr_status = XE_OK;
+#if 0
+ cp->sync_status = tp->sval;
+ cp->wide_status = tp->wval;
+#endif
+
+ /*----------------------------------------------------
+ **
+ ** Critical region: start this job.
+ **
+ **----------------------------------------------------
+ */
+
+ /* activate this job. */
+ cp->magic = CCB_MAGIC;
+
+ /*
+ ** insert next CCBs into start queue.
+ ** 2 max at a time is enough to flush the CCB wait queue.
+ */
+ cp->auto_sense = 0;
+ if (lp)
+ ncr_start_next_ccb(np, lp, 2);
+ else
+ ncr_put_start_queue(np, cp);
+
+ /* Command is successfully queued. */
+
+ return DID_OK;
+}
+
+
+/*==========================================================
+**
+**
+** Insert a CCB into the start queue and wake up the
+** SCRIPTS processor.
+**
+**
+**==========================================================
+*/
+
+static void ncr_start_next_ccb(struct ncb *np, struct lcb *lp, int maxn)
+{
+ struct list_head *qp;
+ struct ccb *cp;
+
+ if (lp->held_ccb)
+ return;
+
+ while (maxn-- && lp->queuedccbs < lp->queuedepth) {
+ qp = ncr_list_pop(&lp->wait_ccbq);
+ if (!qp)
+ break;
+ ++lp->queuedccbs;
+ cp = list_entry(qp, struct ccb, link_ccbq);
+ list_add_tail(qp, &lp->busy_ccbq);
+ lp->jump_ccb[cp->tag == NO_TAG ? 0 : cp->tag] =
+ cpu_to_scr(CCB_PHYS (cp, restart));
+ ncr_put_start_queue(np, cp);
+ }
+}
+
+static void ncr_put_start_queue(struct ncb *np, struct ccb *cp)
+{
+ u16 qidx;
+
+ /*
+ ** insert into start queue.
+ */
+ if (!np->squeueput) np->squeueput = 1;
+ qidx = np->squeueput + 2;
+ if (qidx >= MAX_START + MAX_START) qidx = 1;
+
+ np->scripth->tryloop [qidx] = cpu_to_scr(NCB_SCRIPT_PHYS (np, idle));
+ MEMORY_BARRIER();
+ np->scripth->tryloop [np->squeueput] = cpu_to_scr(CCB_PHYS (cp, start));
+
+ np->squeueput = qidx;
+ ++np->queuedccbs;
+ cp->queued = 1;
+
+ if (DEBUG_FLAGS & DEBUG_QUEUE)
+ printk ("%s: queuepos=%d.\n", ncr_name (np), np->squeueput);
+
+ /*
+ ** Script processor may be waiting for reselect.
+ ** Wake it up.
+ */
+ MEMORY_BARRIER();
+ OUTB (nc_istat, SIGP);
+}
+
+
+static int ncr_reset_scsi_bus(struct ncb *np, int enab_int, int settle_delay)
+{
+ u32 term;
+ int retv = 0;
+
+ np->settle_time = jiffies + settle_delay * HZ;
+
+ if (bootverbose > 1)
+ printk("%s: resetting, "
+ "command processing suspended for %d seconds\n",
+ ncr_name(np), settle_delay);
+
+ ncr_chip_reset(np, 100);
+ udelay(2000); /* The 895 needs time for the bus mode to settle */
+ if (enab_int)
+ OUTW (nc_sien, RST);
+ /*
+ ** Enable Tolerant, reset IRQD if present and
+ ** properly set IRQ mode, prior to resetting the bus.
+ */
+ OUTB (nc_stest3, TE);
+ OUTB (nc_scntl1, CRST);
+ udelay(200);
+
+ if (!driver_setup.bus_check)
+ goto out;
+ /*
+ ** Check for no terminators or SCSI bus shorts to ground.
+ ** Read SCSI data bus, data parity bits and control signals.
+ ** We are expecting RESET to be TRUE and other signals to be
+ ** FALSE.
+ */
+
+ term = INB(nc_sstat0);
+ term = ((term & 2) << 7) + ((term & 1) << 17); /* rst sdp0 */
+ term |= ((INB(nc_sstat2) & 0x01) << 26) | /* sdp1 */
+ ((INW(nc_sbdl) & 0xff) << 9) | /* d7-0 */
+ ((INW(nc_sbdl) & 0xff00) << 10) | /* d15-8 */
+ INB(nc_sbcl); /* req ack bsy sel atn msg cd io */
+
+ if (!(np->features & FE_WIDE))
+ term &= 0x3ffff;
+
+ if (term != (2<<7)) {
+ printk("%s: suspicious SCSI data while resetting the BUS.\n",
+ ncr_name(np));
+ printk("%s: %sdp0,d7-0,rst,req,ack,bsy,sel,atn,msg,c/d,i/o = "
+ "0x%lx, expecting 0x%lx\n",
+ ncr_name(np),
+ (np->features & FE_WIDE) ? "dp1,d15-8," : "",
+ (u_long)term, (u_long)(2<<7));
+ if (driver_setup.bus_check == 1)
+ retv = 1;
+ }
+out:
+ OUTB (nc_scntl1, 0);
+ return retv;
+}
+
+/*
+ * Start reset process.
+ * If reset in progress do nothing.
+ * The interrupt handler will reinitialize the chip.
+ * The timeout handler will wait for settle_time before
+ * clearing it and so resuming command processing.
+ */
+static void ncr_start_reset(struct ncb *np)
+{
+ if (!np->settle_time) {
+ ncr_reset_scsi_bus(np, 1, driver_setup.settle_delay);
+ }
+}
+
+/*==========================================================
+**
+**
+** Reset the SCSI BUS.
+** This is called from the generic SCSI driver.
+**
+**
+**==========================================================
+*/
+static int ncr_reset_bus (struct ncb *np, struct scsi_cmnd *cmd, int sync_reset)
+{
+/* struct scsi_device *device = cmd->device; */
+ struct ccb *cp;
+ int found;
+
+/*
+ * Return immediately if reset is in progress.
+ */
+ if (np->settle_time) {
+ return FAILED;
+ }
+/*
+ * Start the reset process.
+ * The script processor is then assumed to be stopped.
+ * Commands will now be queued in the waiting list until a settle
+ * delay of 2 seconds will be completed.
+ */
+ ncr_start_reset(np);
+/*
+ * First, look in the wakeup list
+ */
+ for (found=0, cp=np->ccb; cp; cp=cp->link_ccb) {
+ /*
+ ** look for the ccb of this command.
+ */
+ if (cp->host_status == HS_IDLE) continue;
+ if (cp->cmd == cmd) {
+ found = 1;
+ break;
+ }
+ }
+/*
+ * Then, look in the waiting list
+ */
+ if (!found && retrieve_from_waiting_list(0, np, cmd))
+ found = 1;
+/*
+ * Wake-up all awaiting commands with DID_RESET.
+ */
+ reset_waiting_list(np);
+/*
+ * Wake-up all pending commands with HS_RESET -> DID_RESET.
+ */
+ ncr_wakeup(np, HS_RESET);
+/*
+ * If the involved command was not in a driver queue, and the
+ * scsi driver told us reset is synchronous, and the command is not
+ * currently in the waiting list, complete it with DID_RESET status,
+ * in order to keep it alive.
+ */
+ if (!found && sync_reset && !retrieve_from_waiting_list(0, np, cmd)) {
+ cmd->result = ScsiResult(DID_RESET, 0);
+ ncr_queue_done_cmd(np, cmd);
+ }
+
+ return SUCCESS;
+}
+
+#if 0 /* unused and broken.. */
+/*==========================================================
+**
+**
+** Abort an SCSI command.
+** This is called from the generic SCSI driver.
+**
+**
+**==========================================================
+*/
+static int ncr_abort_command (struct ncb *np, struct scsi_cmnd *cmd)
+{
+/* struct scsi_device *device = cmd->device; */
+ struct ccb *cp;
+ int found;
+ int retv;
+
+/*
+ * First, look for the scsi command in the waiting list
+ */
+ if (remove_from_waiting_list(np, cmd)) {
+ cmd->result = ScsiResult(DID_ABORT, 0);
+ ncr_queue_done_cmd(np, cmd);
+ return SCSI_ABORT_SUCCESS;
+ }
+
+/*
+ * Then, look in the wakeup list
+ */
+ for (found=0, cp=np->ccb; cp; cp=cp->link_ccb) {
+ /*
+ ** look for the ccb of this command.
+ */
+ if (cp->host_status == HS_IDLE) continue;
+ if (cp->cmd == cmd) {
+ found = 1;
+ break;
+ }
+ }
+
+ if (!found) {
+ return SCSI_ABORT_NOT_RUNNING;
+ }
+
+ if (np->settle_time) {
+ return SCSI_ABORT_SNOOZE;
+ }
+
+ /*
+ ** If the CCB is active, patch schedule jumps for the
+ ** script to abort the command.
+ */
+
+ switch(cp->host_status) {
+ case HS_BUSY:
+ case HS_NEGOTIATE:
+ printk ("%s: abort ccb=%p (cancel)\n", ncr_name (np), cp);
+ cp->start.schedule.l_paddr =
+ cpu_to_scr(NCB_SCRIPTH_PHYS (np, cancel));
+ retv = SCSI_ABORT_PENDING;
+ break;
+ case HS_DISCONNECT:
+ cp->restart.schedule.l_paddr =
+ cpu_to_scr(NCB_SCRIPTH_PHYS (np, abort));
+ retv = SCSI_ABORT_PENDING;
+ break;
+ default:
+ retv = SCSI_ABORT_NOT_RUNNING;
+ break;
+
+ }
+
+ /*
+ ** If there are no requests, the script
+ ** processor will sleep on SEL_WAIT_RESEL.
+ ** Let's wake it up, since it may have to work.
+ */
+ OUTB (nc_istat, SIGP);
+
+ return retv;
+}
+#endif
+
+static void ncr_detach(struct ncb *np)
+{
+ struct ccb *cp;
+ struct tcb *tp;
+ struct lcb *lp;
+ int target, lun;
+ int i;
+ char inst_name[16];
+
+ /* Local copy so we don't access np after freeing it! */
+ strlcpy(inst_name, ncr_name(np), sizeof(inst_name));
+
+ printk("%s: releasing host resources\n", ncr_name(np));
+
+/*
+** Stop the ncr_timeout process
+** Set release_stage to 1 and wait that ncr_timeout() set it to 2.
+*/
+
+#ifdef DEBUG_NCR53C8XX
+ printk("%s: stopping the timer\n", ncr_name(np));
+#endif
+ np->release_stage = 1;
+ for (i = 50 ; i && np->release_stage != 2 ; i--)
+ mdelay(100);
+ if (np->release_stage != 2)
+ printk("%s: the timer seems to be already stopped\n", ncr_name(np));
+ else np->release_stage = 2;
+
+/*
+** Disable chip interrupts
+*/
+
+#ifdef DEBUG_NCR53C8XX
+ printk("%s: disabling chip interrupts\n", ncr_name(np));
+#endif
+ OUTW (nc_sien , 0);
+ OUTB (nc_dien , 0);
+
+ /*
+ ** Reset NCR chip
+ ** Restore bios setting for automatic clock detection.
+ */
+
+ printk("%s: resetting chip\n", ncr_name(np));
+ ncr_chip_reset(np, 100);
+
+ OUTB(nc_dmode, np->sv_dmode);
+ OUTB(nc_dcntl, np->sv_dcntl);
+ OUTB(nc_ctest0, np->sv_ctest0);
+ OUTB(nc_ctest3, np->sv_ctest3);
+ OUTB(nc_ctest4, np->sv_ctest4);
+ OUTB(nc_ctest5, np->sv_ctest5);
+ OUTB(nc_gpcntl, np->sv_gpcntl);
+ OUTB(nc_stest2, np->sv_stest2);
+
+ ncr_selectclock(np, np->sv_scntl3);
+
+ /*
+ ** Free allocated ccb(s)
+ */
+
+ while ((cp=np->ccb->link_ccb) != NULL) {
+ np->ccb->link_ccb = cp->link_ccb;
+ if (cp->host_status) {
+ printk("%s: shall free an active ccb (host_status=%d)\n",
+ ncr_name(np), cp->host_status);
+ }
+#ifdef DEBUG_NCR53C8XX
+ printk("%s: freeing ccb (%lx)\n", ncr_name(np), (u_long) cp);
+#endif
+ m_free_dma(cp, sizeof(*cp), "CCB");
+ }
+
+ /* Free allocated tp(s) */
+
+ for (target = 0; target < MAX_TARGET ; target++) {
+ tp=&np->target[target];
+ for (lun = 0 ; lun < MAX_LUN ; lun++) {
+ lp = tp->lp[lun];
+ if (lp) {
+#ifdef DEBUG_NCR53C8XX
+ printk("%s: freeing lp (%lx)\n", ncr_name(np), (u_long) lp);
+#endif
+ if (lp->jump_ccb != &lp->jump_ccb_0)
+ m_free_dma(lp->jump_ccb,256,"JUMP_CCB");
+ m_free_dma(lp, sizeof(*lp), "LCB");
+ }
+ }
+ }
+
+ if (np->scripth0)
+ m_free_dma(np->scripth0, sizeof(struct scripth), "SCRIPTH");
+ if (np->script0)
+ m_free_dma(np->script0, sizeof(struct script), "SCRIPT");
+ if (np->ccb)
+ m_free_dma(np->ccb, sizeof(struct ccb), "CCB");
+ m_free_dma(np, sizeof(struct ncb), "NCB");
+
+ printk("%s: host resources successfully released\n", inst_name);
+}
+
+/*==========================================================
+**
+**
+** Complete execution of a SCSI command.
+** Signal completion to the generic SCSI driver.
+**
+**
+**==========================================================
+*/
+
+void ncr_complete (struct ncb *np, struct ccb *cp)
+{
+ struct scsi_cmnd *cmd;
+ struct tcb *tp;
+ struct lcb *lp;
+
+ /*
+ ** Sanity check
+ */
+
+ if (!cp || cp->magic != CCB_MAGIC || !cp->cmd)
+ return;
+
+ /*
+ ** Print minimal debug information.
+ */
+
+ if (DEBUG_FLAGS & DEBUG_TINY)
+ printk ("CCB=%lx STAT=%x/%x\n", (unsigned long)cp,
+ cp->host_status,cp->scsi_status);
+
+ /*
+ ** Get command, target and lun pointers.
+ */
+
+ cmd = cp->cmd;
+ cp->cmd = NULL;
+ tp = &np->target[cmd->device->id];
+ lp = tp->lp[cmd->device->lun];
+
+ /*
+ ** We donnot queue more than 1 ccb per target
+ ** with negotiation at any time. If this ccb was
+ ** used for negotiation, clear this info in the tcb.
+ */
+
+ if (cp == tp->nego_cp)
+ tp->nego_cp = NULL;
+
+ /*
+ ** If auto-sense performed, change scsi status.
+ */
+ if (cp->auto_sense) {
+ cp->scsi_status = cp->auto_sense;
+ }
+
+ /*
+ ** If we were recovering from queue full or performing
+ ** auto-sense, requeue skipped CCBs to the wait queue.
+ */
+
+ if (lp && lp->held_ccb) {
+ if (cp == lp->held_ccb) {
+ list_splice_init(&lp->skip_ccbq, &lp->wait_ccbq);
+ lp->held_ccb = NULL;
+ }
+ }
+
+ /*
+ ** Check for parity errors.
+ */
+
+ if (cp->parity_status > 1) {
+ PRINT_ADDR(cmd, "%d parity error(s).\n",cp->parity_status);
+ }
+
+ /*
+ ** Check for extended errors.
+ */
+
+ if (cp->xerr_status != XE_OK) {
+ switch (cp->xerr_status) {
+ case XE_EXTRA_DATA:
+ PRINT_ADDR(cmd, "extraneous data discarded.\n");
+ break;
+ case XE_BAD_PHASE:
+ PRINT_ADDR(cmd, "invalid scsi phase (4/5).\n");
+ break;
+ default:
+ PRINT_ADDR(cmd, "extended error %d.\n",
+ cp->xerr_status);
+ break;
+ }
+ if (cp->host_status==HS_COMPLETE)
+ cp->host_status = HS_FAIL;
+ }
+
+ /*
+ ** Print out any error for debugging purpose.
+ */
+ if (DEBUG_FLAGS & (DEBUG_RESULT|DEBUG_TINY)) {
+ if (cp->host_status!=HS_COMPLETE || cp->scsi_status!=S_GOOD) {
+ PRINT_ADDR(cmd, "ERROR: cmd=%x host_status=%x "
+ "scsi_status=%x\n", cmd->cmnd[0],
+ cp->host_status, cp->scsi_status);
+ }
+ }
+
+ /*
+ ** Check the status.
+ */
+ if ( (cp->host_status == HS_COMPLETE)
+ && (cp->scsi_status == S_GOOD ||
+ cp->scsi_status == S_COND_MET)) {
+ /*
+ * All went well (GOOD status).
+ * CONDITION MET status is returned on
+ * `Pre-Fetch' or `Search data' success.
+ */
+ cmd->result = ScsiResult(DID_OK, cp->scsi_status);
+
+ /*
+ ** @RESID@
+ ** Could dig out the correct value for resid,
+ ** but it would be quite complicated.
+ */
+ /* if (cp->phys.header.lastp != cp->phys.header.goalp) */
+
+ /*
+ ** Allocate the lcb if not yet.
+ */
+ if (!lp)
+ ncr_alloc_lcb (np, cmd->device->id, cmd->device->lun);
+
+ tp->bytes += cp->data_len;
+ tp->transfers ++;
+
+ /*
+ ** If tags was reduced due to queue full,
+ ** increase tags if 1000 good status received.
+ */
+ if (lp && lp->usetags && lp->numtags < lp->maxtags) {
+ ++lp->num_good;
+ if (lp->num_good >= 1000) {
+ lp->num_good = 0;
+ ++lp->numtags;
+ ncr_setup_tags (np, cmd->device);
+ }
+ }
+ } else if ((cp->host_status == HS_COMPLETE)
+ && (cp->scsi_status == S_CHECK_COND)) {
+ /*
+ ** Check condition code
+ */
+ cmd->result = ScsiResult(DID_OK, S_CHECK_COND);
+
+ /*
+ ** Copy back sense data to caller's buffer.
+ */
+ memcpy(cmd->sense_buffer, cp->sense_buf,
+ min_t(size_t, SCSI_SENSE_BUFFERSIZE,
+ sizeof(cp->sense_buf)));
+
+ if (DEBUG_FLAGS & (DEBUG_RESULT|DEBUG_TINY)) {
+ u_char *p = cmd->sense_buffer;
+ int i;
+ PRINT_ADDR(cmd, "sense data:");
+ for (i=0; i<14; i++) printk (" %x", *p++);
+ printk (".\n");
+ }
+ } else if ((cp->host_status == HS_COMPLETE)
+ && (cp->scsi_status == S_CONFLICT)) {
+ /*
+ ** Reservation Conflict condition code
+ */
+ cmd->result = ScsiResult(DID_OK, S_CONFLICT);
+
+ } else if ((cp->host_status == HS_COMPLETE)
+ && (cp->scsi_status == S_BUSY ||
+ cp->scsi_status == S_QUEUE_FULL)) {
+
+ /*
+ ** Target is busy.
+ */
+ cmd->result = ScsiResult(DID_OK, cp->scsi_status);
+
+ } else if ((cp->host_status == HS_SEL_TIMEOUT)
+ || (cp->host_status == HS_TIMEOUT)) {
+
+ /*
+ ** No response
+ */
+ cmd->result = ScsiResult(DID_TIME_OUT, cp->scsi_status);
+
+ } else if (cp->host_status == HS_RESET) {
+
+ /*
+ ** SCSI bus reset
+ */
+ cmd->result = ScsiResult(DID_RESET, cp->scsi_status);
+
+ } else if (cp->host_status == HS_ABORTED) {
+
+ /*
+ ** Transfer aborted
+ */
+ cmd->result = ScsiResult(DID_ABORT, cp->scsi_status);
+
+ } else {
+
+ /*
+ ** Other protocol messes
+ */
+ PRINT_ADDR(cmd, "COMMAND FAILED (%x %x) @%p.\n",
+ cp->host_status, cp->scsi_status, cp);
+
+ cmd->result = ScsiResult(DID_ERROR, cp->scsi_status);
+ }
+
+ /*
+ ** trace output
+ */
+
+ if (tp->usrflag & UF_TRACE) {
+ u_char * p;
+ int i;
+ PRINT_ADDR(cmd, " CMD:");
+ p = (u_char*) &cmd->cmnd[0];
+ for (i=0; i<cmd->cmd_len; i++) printk (" %x", *p++);
+
+ if (cp->host_status==HS_COMPLETE) {
+ switch (cp->scsi_status) {
+ case S_GOOD:
+ printk (" GOOD");
+ break;
+ case S_CHECK_COND:
+ printk (" SENSE:");
+ p = (u_char*) &cmd->sense_buffer;
+ for (i=0; i<14; i++)
+ printk (" %x", *p++);
+ break;
+ default:
+ printk (" STAT: %x\n", cp->scsi_status);
+ break;
+ }
+ } else printk (" HOSTERROR: %x", cp->host_status);
+ printk ("\n");
+ }
+
+ /*
+ ** Free this ccb
+ */
+ ncr_free_ccb (np, cp);
+
+ /*
+ ** requeue awaiting scsi commands for this lun.
+ */
+ if (lp && lp->queuedccbs < lp->queuedepth &&
+ !list_empty(&lp->wait_ccbq))
+ ncr_start_next_ccb(np, lp, 2);
+
+ /*
+ ** requeue awaiting scsi commands for this controller.
+ */
+ if (np->waiting_list)
+ requeue_waiting_list(np);
+
+ /*
+ ** signal completion to generic driver.
+ */
+ ncr_queue_done_cmd(np, cmd);
+}
+
+/*==========================================================
+**
+**
+** Signal all (or one) control block done.
+**
+**
+**==========================================================
+*/
+
+/*
+** This CCB has been skipped by the NCR.
+** Queue it in the corresponding unit queue.
+*/
+static void ncr_ccb_skipped(struct ncb *np, struct ccb *cp)
+{
+ struct tcb *tp = &np->target[cp->target];
+ struct lcb *lp = tp->lp[cp->lun];
+
+ if (lp && cp != np->ccb) {
+ cp->host_status &= ~HS_SKIPMASK;
+ cp->start.schedule.l_paddr =
+ cpu_to_scr(NCB_SCRIPT_PHYS (np, select));
+ list_move_tail(&cp->link_ccbq, &lp->skip_ccbq);
+ if (cp->queued) {
+ --lp->queuedccbs;
+ }
+ }
+ if (cp->queued) {
+ --np->queuedccbs;
+ cp->queued = 0;
+ }
+}
+
+/*
+** The NCR has completed CCBs.
+** Look at the DONE QUEUE if enabled, otherwise scan all CCBs
+*/
+void ncr_wakeup_done (struct ncb *np)
+{
+ struct ccb *cp;
+#ifdef SCSI_NCR_CCB_DONE_SUPPORT
+ int i, j;
+
+ i = np->ccb_done_ic;
+ while (1) {
+ j = i+1;
+ if (j >= MAX_DONE)
+ j = 0;
+
+ cp = np->ccb_done[j];
+ if (!CCB_DONE_VALID(cp))
+ break;
+
+ np->ccb_done[j] = (struct ccb *)CCB_DONE_EMPTY;
+ np->scripth->done_queue[5*j + 4] =
+ cpu_to_scr(NCB_SCRIPT_PHYS (np, done_plug));
+ MEMORY_BARRIER();
+ np->scripth->done_queue[5*i + 4] =
+ cpu_to_scr(NCB_SCRIPT_PHYS (np, done_end));
+
+ if (cp->host_status & HS_DONEMASK)
+ ncr_complete (np, cp);
+ else if (cp->host_status & HS_SKIPMASK)
+ ncr_ccb_skipped (np, cp);
+
+ i = j;
+ }
+ np->ccb_done_ic = i;
+#else
+ cp = np->ccb;
+ while (cp) {
+ if (cp->host_status & HS_DONEMASK)
+ ncr_complete (np, cp);
+ else if (cp->host_status & HS_SKIPMASK)
+ ncr_ccb_skipped (np, cp);
+ cp = cp->link_ccb;
+ }
+#endif
+}
+
+/*
+** Complete all active CCBs.
+*/
+void ncr_wakeup (struct ncb *np, u_long code)
+{
+ struct ccb *cp = np->ccb;
+
+ while (cp) {
+ if (cp->host_status != HS_IDLE) {
+ cp->host_status = code;
+ ncr_complete (np, cp);
+ }
+ cp = cp->link_ccb;
+ }
+}
+
+/*
+** Reset ncr chip.
+*/
+
+/* Some initialisation must be done immediately following reset, for 53c720,
+ * at least. EA (dcntl bit 5) isn't set here as it is set once only in
+ * the _detect function.
+ */
+static void ncr_chip_reset(struct ncb *np, int delay)
+{
+ OUTB (nc_istat, SRST);
+ udelay(delay);
+ OUTB (nc_istat, 0 );
+
+ if (np->features & FE_EHP)
+ OUTB (nc_ctest0, EHP);
+ if (np->features & FE_MUX)
+ OUTB (nc_ctest4, MUX);
+}
+
+
+/*==========================================================
+**
+**
+** Start NCR chip.
+**
+**
+**==========================================================
+*/
+
+void ncr_init (struct ncb *np, int reset, char * msg, u_long code)
+{
+ int i;
+
+ /*
+ ** Reset chip if asked, otherwise just clear fifos.
+ */
+
+ if (reset) {
+ OUTB (nc_istat, SRST);
+ udelay(100);
+ }
+ else {
+ OUTB (nc_stest3, TE|CSF);
+ OUTONB (nc_ctest3, CLF);
+ }
+
+ /*
+ ** Message.
+ */
+
+ if (msg) printk (KERN_INFO "%s: restart (%s).\n", ncr_name (np), msg);
+
+ /*
+ ** Clear Start Queue
+ */
+ np->queuedepth = MAX_START - 1; /* 1 entry needed as end marker */
+ for (i = 1; i < MAX_START + MAX_START; i += 2)
+ np->scripth0->tryloop[i] =
+ cpu_to_scr(NCB_SCRIPT_PHYS (np, idle));
+
+ /*
+ ** Start at first entry.
+ */
+ np->squeueput = 0;
+ np->script0->startpos[0] = cpu_to_scr(NCB_SCRIPTH_PHYS (np, tryloop));
+
+#ifdef SCSI_NCR_CCB_DONE_SUPPORT
+ /*
+ ** Clear Done Queue
+ */
+ for (i = 0; i < MAX_DONE; i++) {
+ np->ccb_done[i] = (struct ccb *)CCB_DONE_EMPTY;
+ np->scripth0->done_queue[5*i + 4] =
+ cpu_to_scr(NCB_SCRIPT_PHYS (np, done_end));
+ }
+#endif
+
+ /*
+ ** Start at first entry.
+ */
+ np->script0->done_pos[0] = cpu_to_scr(NCB_SCRIPTH_PHYS (np,done_queue));
+ np->ccb_done_ic = MAX_DONE-1;
+ np->scripth0->done_queue[5*(MAX_DONE-1) + 4] =
+ cpu_to_scr(NCB_SCRIPT_PHYS (np, done_plug));
+
+ /*
+ ** Wakeup all pending jobs.
+ */
+ ncr_wakeup (np, code);
+
+ /*
+ ** Init chip.
+ */
+
+ /*
+ ** Remove reset; big delay because the 895 needs time for the
+ ** bus mode to settle
+ */
+ ncr_chip_reset(np, 2000);
+
+ OUTB (nc_scntl0, np->rv_scntl0 | 0xc0);
+ /* full arb., ena parity, par->ATN */
+ OUTB (nc_scntl1, 0x00); /* odd parity, and remove CRST!! */
+
+ ncr_selectclock(np, np->rv_scntl3); /* Select SCSI clock */
+
+ OUTB (nc_scid , RRE|np->myaddr); /* Adapter SCSI address */
+ OUTW (nc_respid, 1ul<<np->myaddr); /* Id to respond to */
+ OUTB (nc_istat , SIGP ); /* Signal Process */
+ OUTB (nc_dmode , np->rv_dmode); /* Burst length, dma mode */
+ OUTB (nc_ctest5, np->rv_ctest5); /* Large fifo + large burst */
+
+ OUTB (nc_dcntl , NOCOM|np->rv_dcntl); /* Protect SFBR */
+ OUTB (nc_ctest0, np->rv_ctest0); /* 720: CDIS and EHP */
+ OUTB (nc_ctest3, np->rv_ctest3); /* Write and invalidate */
+ OUTB (nc_ctest4, np->rv_ctest4); /* Master parity checking */
+
+ OUTB (nc_stest2, EXT|np->rv_stest2); /* Extended Sreq/Sack filtering */
+ OUTB (nc_stest3, TE); /* TolerANT enable */
+ OUTB (nc_stime0, 0x0c ); /* HTH disabled STO 0.25 sec */
+
+ /*
+ ** Disable disconnects.
+ */
+
+ np->disc = 0;
+
+ /*
+ ** Enable GPIO0 pin for writing if LED support.
+ */
+
+ if (np->features & FE_LED0) {
+ OUTOFFB (nc_gpcntl, 0x01);
+ }
+
+ /*
+ ** enable ints
+ */
+
+ OUTW (nc_sien , STO|HTH|MA|SGE|UDC|RST|PAR);
+ OUTB (nc_dien , MDPE|BF|ABRT|SSI|SIR|IID);
+
+ /*
+ ** Fill in target structure.
+ ** Reinitialize usrsync.
+ ** Reinitialize usrwide.
+ ** Prepare sync negotiation according to actual SCSI bus mode.
+ */
+
+ for (i=0;i<MAX_TARGET;i++) {
+ struct tcb *tp = &np->target[i];
+
+ tp->sval = 0;
+ tp->wval = np->rv_scntl3;
+
+ if (tp->usrsync != 255) {
+ if (tp->usrsync <= np->maxsync) {
+ if (tp->usrsync < np->minsync) {
+ tp->usrsync = np->minsync;
+ }
+ }
+ else
+ tp->usrsync = 255;
+ }
+
+ if (tp->usrwide > np->maxwide)
+ tp->usrwide = np->maxwide;
+
+ }
+
+ /*
+ ** Start script processor.
+ */
+ if (np->paddr2) {
+ if (bootverbose)
+ printk ("%s: Downloading SCSI SCRIPTS.\n",
+ ncr_name(np));
+ OUTL (nc_scratcha, vtobus(np->script0));
+ OUTL_DSP (NCB_SCRIPTH_PHYS (np, start_ram));
+ }
+ else
+ OUTL_DSP (NCB_SCRIPT_PHYS (np, start));
+}
+
+/*==========================================================
+**
+** Prepare the negotiation values for wide and
+** synchronous transfers.
+**
+**==========================================================
+*/
+
+static void ncr_negotiate (struct ncb* np, struct tcb* tp)
+{
+ /*
+ ** minsync unit is 4ns !
+ */
+
+ u_long minsync = tp->usrsync;
+
+ /*
+ ** SCSI bus mode limit
+ */
+
+ if (np->scsi_mode && np->scsi_mode == SMODE_SE) {
+ if (minsync < 12) minsync = 12;
+ }
+
+ /*
+ ** our limit ..
+ */
+
+ if (minsync < np->minsync)
+ minsync = np->minsync;
+
+ /*
+ ** divider limit
+ */
+
+ if (minsync > np->maxsync)
+ minsync = 255;
+
+ if (tp->maxoffs > np->maxoffs)
+ tp->maxoffs = np->maxoffs;
+
+ tp->minsync = minsync;
+ tp->maxoffs = (minsync<255 ? tp->maxoffs : 0);
+
+ /*
+ ** period=0: has to negotiate sync transfer
+ */
+
+ tp->period=0;
+
+ /*
+ ** widedone=0: has to negotiate wide transfer
+ */
+ tp->widedone=0;
+}
+
+/*==========================================================
+**
+** Get clock factor and sync divisor for a given
+** synchronous factor period.
+** Returns the clock factor (in sxfer) and scntl3
+** synchronous divisor field.
+**
+**==========================================================
+*/
+
+static void ncr_getsync(struct ncb *np, u_char sfac, u_char *fakp, u_char *scntl3p)
+{
+ u_long clk = np->clock_khz; /* SCSI clock frequency in kHz */
+ int div = np->clock_divn; /* Number of divisors supported */
+ u_long fak; /* Sync factor in sxfer */
+ u_long per; /* Period in tenths of ns */
+ u_long kpc; /* (per * clk) */
+
+ /*
+ ** Compute the synchronous period in tenths of nano-seconds
+ */
+ if (sfac <= 10) per = 250;
+ else if (sfac == 11) per = 303;
+ else if (sfac == 12) per = 500;
+ else per = 40 * sfac;
+
+ /*
+ ** Look for the greatest clock divisor that allows an
+ ** input speed faster than the period.
+ */
+ kpc = per * clk;
+ while (--div > 0)
+ if (kpc >= (div_10M[div] << 2)) break;
+
+ /*
+ ** Calculate the lowest clock factor that allows an output
+ ** speed not faster than the period.
+ */
+ fak = (kpc - 1) / div_10M[div] + 1;
+
+#if 0 /* This optimization does not seem very useful */
+
+ per = (fak * div_10M[div]) / clk;
+
+ /*
+ ** Why not to try the immediate lower divisor and to choose
+ ** the one that allows the fastest output speed ?
+ ** We don't want input speed too much greater than output speed.
+ */
+ if (div >= 1 && fak < 8) {
+ u_long fak2, per2;
+ fak2 = (kpc - 1) / div_10M[div-1] + 1;
+ per2 = (fak2 * div_10M[div-1]) / clk;
+ if (per2 < per && fak2 <= 8) {
+ fak = fak2;
+ per = per2;
+ --div;
+ }
+ }
+#endif
+
+ if (fak < 4) fak = 4; /* Should never happen, too bad ... */
+
+ /*
+ ** Compute and return sync parameters for the ncr
+ */
+ *fakp = fak - 4;
+ *scntl3p = ((div+1) << 4) + (sfac < 25 ? 0x80 : 0);
+}
+
+
+/*==========================================================
+**
+** Set actual values, sync status and patch all ccbs of
+** a target according to new sync/wide agreement.
+**
+**==========================================================
+*/
+
+static void ncr_set_sync_wide_status (struct ncb *np, u_char target)
+{
+ struct ccb *cp;
+ struct tcb *tp = &np->target[target];
+
+ /*
+ ** set actual value and sync_status
+ */
+ OUTB (nc_sxfer, tp->sval);
+ np->sync_st = tp->sval;
+ OUTB (nc_scntl3, tp->wval);
+ np->wide_st = tp->wval;
+
+ /*
+ ** patch ALL ccbs of this target.
+ */
+ for (cp = np->ccb; cp; cp = cp->link_ccb) {
+ if (!cp->cmd) continue;
+ if (scmd_id(cp->cmd) != target) continue;
+#if 0
+ cp->sync_status = tp->sval;
+ cp->wide_status = tp->wval;
+#endif
+ cp->phys.select.sel_scntl3 = tp->wval;
+ cp->phys.select.sel_sxfer = tp->sval;
+ }
+}
+
+/*==========================================================
+**
+** Switch sync mode for current job and it's target
+**
+**==========================================================
+*/
+
+static void ncr_setsync (struct ncb *np, struct ccb *cp, u_char scntl3, u_char sxfer)
+{
+ struct scsi_cmnd *cmd = cp->cmd;
+ struct tcb *tp;
+ u_char target = INB (nc_sdid) & 0x0f;
+ u_char idiv;
+
+ BUG_ON(target != (scmd_id(cmd) & 0xf));
+
+ tp = &np->target[target];
+
+ if (!scntl3 || !(sxfer & 0x1f))
+ scntl3 = np->rv_scntl3;
+ scntl3 = (scntl3 & 0xf0) | (tp->wval & EWS) | (np->rv_scntl3 & 0x07);
+
+ /*
+ ** Deduce the value of controller sync period from scntl3.
+ ** period is in tenths of nano-seconds.
+ */
+
+ idiv = ((scntl3 >> 4) & 0x7);
+ if ((sxfer & 0x1f) && idiv)
+ tp->period = (((sxfer>>5)+4)*div_10M[idiv-1])/np->clock_khz;
+ else
+ tp->period = 0xffff;
+
+ /* Stop there if sync parameters are unchanged */
+ if (tp->sval == sxfer && tp->wval == scntl3)
+ return;
+ tp->sval = sxfer;
+ tp->wval = scntl3;
+
+ if (sxfer & 0x01f) {
+ /* Disable extended Sreq/Sack filtering */
+ if (tp->period <= 2000)
+ OUTOFFB(nc_stest2, EXT);
+ }
+
+ spi_display_xfer_agreement(tp->starget);
+
+ /*
+ ** set actual value and sync_status
+ ** patch ALL ccbs of this target.
+ */
+ ncr_set_sync_wide_status(np, target);
+}
+
+/*==========================================================
+**
+** Switch wide mode for current job and it's target
+** SCSI specs say: a SCSI device that accepts a WDTR
+** message shall reset the synchronous agreement to
+** asynchronous mode.
+**
+**==========================================================
+*/
+
+static void ncr_setwide (struct ncb *np, struct ccb *cp, u_char wide, u_char ack)
+{
+ struct scsi_cmnd *cmd = cp->cmd;
+ u16 target = INB (nc_sdid) & 0x0f;
+ struct tcb *tp;
+ u_char scntl3;
+ u_char sxfer;
+
+ BUG_ON(target != (scmd_id(cmd) & 0xf));
+
+ tp = &np->target[target];
+ tp->widedone = wide+1;
+ scntl3 = (tp->wval & (~EWS)) | (wide ? EWS : 0);
+
+ sxfer = ack ? 0 : tp->sval;
+
+ /*
+ ** Stop there if sync/wide parameters are unchanged
+ */
+ if (tp->sval == sxfer && tp->wval == scntl3) return;
+ tp->sval = sxfer;
+ tp->wval = scntl3;
+
+ /*
+ ** Bells and whistles ;-)
+ */
+ if (bootverbose >= 2) {
+ dev_info(&cmd->device->sdev_target->dev, "WIDE SCSI %sabled.\n",
+ (scntl3 & EWS) ? "en" : "dis");
+ }
+
+ /*
+ ** set actual value and sync_status
+ ** patch ALL ccbs of this target.
+ */
+ ncr_set_sync_wide_status(np, target);
+}
+
+/*==========================================================
+**
+** Switch tagged mode for a target.
+**
+**==========================================================
+*/
+
+static void ncr_setup_tags (struct ncb *np, struct scsi_device *sdev)
+{
+ unsigned char tn = sdev->id, ln = sdev->lun;
+ struct tcb *tp = &np->target[tn];
+ struct lcb *lp = tp->lp[ln];
+ u_char reqtags, maxdepth;
+
+ /*
+ ** Just in case ...
+ */
+ if ((!tp) || (!lp) || !sdev)
+ return;
+
+ /*
+ ** If SCSI device queue depth is not yet set, leave here.
+ */
+ if (!lp->scdev_depth)
+ return;
+
+ /*
+ ** Donnot allow more tags than the SCSI driver can queue
+ ** for this device.
+ ** Donnot allow more tags than we can handle.
+ */
+ maxdepth = lp->scdev_depth;
+ if (maxdepth > lp->maxnxs) maxdepth = lp->maxnxs;
+ if (lp->maxtags > maxdepth) lp->maxtags = maxdepth;
+ if (lp->numtags > maxdepth) lp->numtags = maxdepth;
+
+ /*
+ ** only devices conformant to ANSI Version >= 2
+ ** only devices capable of tagged commands
+ ** only if enabled by user ..
+ */
+ if (sdev->tagged_supported && lp->numtags > 1) {
+ reqtags = lp->numtags;
+ } else {
+ reqtags = 1;
+ }
+
+ /*
+ ** Update max number of tags
+ */
+ lp->numtags = reqtags;
+ if (lp->numtags > lp->maxtags)
+ lp->maxtags = lp->numtags;
+
+ /*
+ ** If we want to switch tag mode, we must wait
+ ** for no CCB to be active.
+ */
+ if (reqtags > 1 && lp->usetags) { /* Stay in tagged mode */
+ if (lp->queuedepth == reqtags) /* Already announced */
+ return;
+ lp->queuedepth = reqtags;
+ }
+ else if (reqtags <= 1 && !lp->usetags) { /* Stay in untagged mode */
+ lp->queuedepth = reqtags;
+ return;
+ }
+ else { /* Want to switch tag mode */
+ if (lp->busyccbs) /* If not yet safe, return */
+ return;
+ lp->queuedepth = reqtags;
+ lp->usetags = reqtags > 1 ? 1 : 0;
+ }
+
+ /*
+ ** Patch the lun mini-script, according to tag mode.
+ */
+ lp->jump_tag.l_paddr = lp->usetags?
+ cpu_to_scr(NCB_SCRIPT_PHYS(np, resel_tag)) :
+ cpu_to_scr(NCB_SCRIPT_PHYS(np, resel_notag));
+
+ /*
+ ** Announce change to user.
+ */
+ if (bootverbose) {
+ if (lp->usetags) {
+ dev_info(&sdev->sdev_gendev,
+ "tagged command queue depth set to %d\n",
+ reqtags);
+ } else {
+ dev_info(&sdev->sdev_gendev,
+ "tagged command queueing disabled\n");
+ }
+ }
+}
+
+/*==========================================================
+**
+**
+** ncr timeout handler.
+**
+**
+**==========================================================
+**
+** Misused to keep the driver running when
+** interrupts are not configured correctly.
+**
+**----------------------------------------------------------
+*/
+
+static void ncr_timeout (struct ncb *np)
+{
+ u_long thistime = jiffies;
+
+ /*
+ ** If release process in progress, let's go
+ ** Set the release stage from 1 to 2 to synchronize
+ ** with the release process.
+ */
+
+ if (np->release_stage) {
+ if (np->release_stage == 1) np->release_stage = 2;
+ return;
+ }
+
+ np->timer.expires = jiffies + SCSI_NCR_TIMER_INTERVAL;
+ add_timer(&np->timer);
+
+ /*
+ ** If we are resetting the ncr, wait for settle_time before
+ ** clearing it. Then command processing will be resumed.
+ */
+ if (np->settle_time) {
+ if (np->settle_time <= thistime) {
+ if (bootverbose > 1)
+ printk("%s: command processing resumed\n", ncr_name(np));
+ np->settle_time = 0;
+ np->disc = 1;
+ requeue_waiting_list(np);
+ }
+ return;
+ }
+
+ /*
+ ** Since the generic scsi driver only allows us 0.5 second
+ ** to perform abort of a command, we must look at ccbs about
+ ** every 0.25 second.
+ */
+ if (np->lasttime + 4*HZ < thistime) {
+ /*
+ ** block ncr interrupts
+ */
+ np->lasttime = thistime;
+ }
+
+#ifdef SCSI_NCR_BROKEN_INTR
+ if (INB(nc_istat) & (INTF|SIP|DIP)) {
+
+ /*
+ ** Process pending interrupts.
+ */
+ if (DEBUG_FLAGS & DEBUG_TINY) printk ("{");
+ ncr_exception (np);
+ if (DEBUG_FLAGS & DEBUG_TINY) printk ("}");
+ }
+#endif /* SCSI_NCR_BROKEN_INTR */
+}
+
+/*==========================================================
+**
+** log message for real hard errors
+**
+** "ncr0 targ 0?: ERROR (ds:si) (so-si-sd) (sxfer/scntl3) @ name (dsp:dbc)."
+** " reg: r0 r1 r2 r3 r4 r5 r6 ..... rf."
+**
+** exception register:
+** ds: dstat
+** si: sist
+**
+** SCSI bus lines:
+** so: control lines as driver by NCR.
+** si: control lines as seen by NCR.
+** sd: scsi data lines as seen by NCR.
+**
+** wide/fastmode:
+** sxfer: (see the manual)
+** scntl3: (see the manual)
+**
+** current script command:
+** dsp: script address (relative to start of script).
+** dbc: first word of script command.
+**
+** First 16 register of the chip:
+** r0..rf
+**
+**==========================================================
+*/
+
+static void ncr_log_hard_error(struct ncb *np, u16 sist, u_char dstat)
+{
+ u32 dsp;
+ int script_ofs;
+ int script_size;
+ char *script_name;
+ u_char *script_base;
+ int i;
+
+ dsp = INL (nc_dsp);
+
+ if (dsp > np->p_script && dsp <= np->p_script + sizeof(struct script)) {
+ script_ofs = dsp - np->p_script;
+ script_size = sizeof(struct script);
+ script_base = (u_char *) np->script0;
+ script_name = "script";
+ }
+ else if (np->p_scripth < dsp &&
+ dsp <= np->p_scripth + sizeof(struct scripth)) {
+ script_ofs = dsp - np->p_scripth;
+ script_size = sizeof(struct scripth);
+ script_base = (u_char *) np->scripth0;
+ script_name = "scripth";
+ } else {
+ script_ofs = dsp;
+ script_size = 0;
+ script_base = NULL;
+ script_name = "mem";
+ }
+
+ printk ("%s:%d: ERROR (%x:%x) (%x-%x-%x) (%x/%x) @ (%s %x:%08x).\n",
+ ncr_name (np), (unsigned)INB (nc_sdid)&0x0f, dstat, sist,
+ (unsigned)INB (nc_socl), (unsigned)INB (nc_sbcl), (unsigned)INB (nc_sbdl),
+ (unsigned)INB (nc_sxfer),(unsigned)INB (nc_scntl3), script_name, script_ofs,
+ (unsigned)INL (nc_dbc));
+
+ if (((script_ofs & 3) == 0) &&
+ (unsigned)script_ofs < script_size) {
+ printk ("%s: script cmd = %08x\n", ncr_name(np),
+ scr_to_cpu((int) *(ncrcmd *)(script_base + script_ofs)));
+ }
+
+ printk ("%s: regdump:", ncr_name(np));
+ for (i=0; i<16;i++)
+ printk (" %02x", (unsigned)INB_OFF(i));
+ printk (".\n");
+}
+
+/*============================================================
+**
+** ncr chip exception handler.
+**
+**============================================================
+**
+** In normal cases, interrupt conditions occur one at a
+** time. The ncr is able to stack in some extra registers
+** other interrupts that will occur after the first one.
+** But, several interrupts may occur at the same time.
+**
+** We probably should only try to deal with the normal
+** case, but it seems that multiple interrupts occur in
+** some cases that are not abnormal at all.
+**
+** The most frequent interrupt condition is Phase Mismatch.
+** We should want to service this interrupt quickly.
+** A SCSI parity error may be delivered at the same time.
+** The SIR interrupt is not very frequent in this driver,
+** since the INTFLY is likely used for command completion
+** signaling.
+** The Selection Timeout interrupt may be triggered with
+** IID and/or UDC.
+** The SBMC interrupt (SCSI Bus Mode Change) may probably
+** occur at any time.
+**
+** This handler try to deal as cleverly as possible with all
+** the above.
+**
+**============================================================
+*/
+
+void ncr_exception (struct ncb *np)
+{
+ u_char istat, dstat;
+ u16 sist;
+ int i;
+
+ /*
+ ** interrupt on the fly ?
+ ** Since the global header may be copied back to a CCB
+ ** using a posted PCI memory write, the last operation on
+ ** the istat register is a READ in order to flush posted
+ ** PCI write commands.
+ */
+ istat = INB (nc_istat);
+ if (istat & INTF) {
+ OUTB (nc_istat, (istat & SIGP) | INTF);
+ istat = INB (nc_istat);
+ if (DEBUG_FLAGS & DEBUG_TINY) printk ("F ");
+ ncr_wakeup_done (np);
+ }
+
+ if (!(istat & (SIP|DIP)))
+ return;
+
+ if (istat & CABRT)
+ OUTB (nc_istat, CABRT);
+
+ /*
+ ** Steinbach's Guideline for Systems Programming:
+ ** Never test for an error condition you don't know how to handle.
+ */
+
+ sist = (istat & SIP) ? INW (nc_sist) : 0;
+ dstat = (istat & DIP) ? INB (nc_dstat) : 0;
+
+ if (DEBUG_FLAGS & DEBUG_TINY)
+ printk ("<%d|%x:%x|%x:%x>",
+ (int)INB(nc_scr0),
+ dstat,sist,
+ (unsigned)INL(nc_dsp),
+ (unsigned)INL(nc_dbc));
+
+ /*========================================================
+ ** First, interrupts we want to service cleanly.
+ **
+ ** Phase mismatch is the most frequent interrupt, and
+ ** so we have to service it as quickly and as cleanly
+ ** as possible.
+ ** Programmed interrupts are rarely used in this driver,
+ ** but we must handle them cleanly anyway.
+ ** We try to deal with PAR and SBMC combined with
+ ** some other interrupt(s).
+ **=========================================================
+ */
+
+ if (!(sist & (STO|GEN|HTH|SGE|UDC|RST)) &&
+ !(dstat & (MDPE|BF|ABRT|IID))) {
+ if ((sist & SBMC) && ncr_int_sbmc (np))
+ return;
+ if ((sist & PAR) && ncr_int_par (np))
+ return;
+ if (sist & MA) {
+ ncr_int_ma (np);
+ return;
+ }
+ if (dstat & SIR) {
+ ncr_int_sir (np);
+ return;
+ }
+ /*
+ ** DEL 397 - 53C875 Rev 3 - Part Number 609-0392410 - ITEM 2.
+ */
+ if (!(sist & (SBMC|PAR)) && !(dstat & SSI)) {
+ printk( "%s: unknown interrupt(s) ignored, "
+ "ISTAT=%x DSTAT=%x SIST=%x\n",
+ ncr_name(np), istat, dstat, sist);
+ return;
+ }
+ OUTONB_STD ();
+ return;
+ }
+
+ /*========================================================
+ ** Now, interrupts that need some fixing up.
+ ** Order and multiple interrupts is so less important.
+ **
+ ** If SRST has been asserted, we just reset the chip.
+ **
+ ** Selection is intirely handled by the chip. If the
+ ** chip says STO, we trust it. Seems some other
+ ** interrupts may occur at the same time (UDC, IID), so
+ ** we ignore them. In any case we do enough fix-up
+ ** in the service routine.
+ ** We just exclude some fatal dma errors.
+ **=========================================================
+ */
+
+ if (sist & RST) {
+ ncr_init (np, 1, bootverbose ? "scsi reset" : NULL, HS_RESET);
+ return;
+ }
+
+ if ((sist & STO) &&
+ !(dstat & (MDPE|BF|ABRT))) {
+ /*
+ ** DEL 397 - 53C875 Rev 3 - Part Number 609-0392410 - ITEM 1.
+ */
+ OUTONB (nc_ctest3, CLF);
+
+ ncr_int_sto (np);
+ return;
+ }
+
+ /*=========================================================
+ ** Now, interrupts we are not able to recover cleanly.
+ ** (At least for the moment).
+ **
+ ** Do the register dump.
+ ** Log message for real hard errors.
+ ** Clear all fifos.
+ ** For MDPE, BF, ABORT, IID, SGE and HTH we reset the
+ ** BUS and the chip.
+ ** We are more soft for UDC.
+ **=========================================================
+ */
+
+ if (time_after(jiffies, np->regtime)) {
+ np->regtime = jiffies + 10*HZ;
+ for (i = 0; i<sizeof(np->regdump); i++)
+ ((char*)&np->regdump)[i] = INB_OFF(i);
+ np->regdump.nc_dstat = dstat;
+ np->regdump.nc_sist = sist;
+ }
+
+ ncr_log_hard_error(np, sist, dstat);
+
+ printk ("%s: have to clear fifos.\n", ncr_name (np));
+ OUTB (nc_stest3, TE|CSF);
+ OUTONB (nc_ctest3, CLF);
+
+ if ((sist & (SGE)) ||
+ (dstat & (MDPE|BF|ABRT|IID))) {
+ ncr_start_reset(np);
+ return;
+ }
+
+ if (sist & HTH) {
+ printk ("%s: handshake timeout\n", ncr_name(np));
+ ncr_start_reset(np);
+ return;
+ }
+
+ if (sist & UDC) {
+ printk ("%s: unexpected disconnect\n", ncr_name(np));
+ OUTB (HS_PRT, HS_UNEXPECTED);
+ OUTL_DSP (NCB_SCRIPT_PHYS (np, cleanup));
+ return;
+ }
+
+ /*=========================================================
+ ** We just miss the cause of the interrupt. :(
+ ** Print a message. The timeout will do the real work.
+ **=========================================================
+ */
+ printk ("%s: unknown interrupt\n", ncr_name(np));
+}
+
+/*==========================================================
+**
+** ncr chip exception handler for selection timeout
+**
+**==========================================================
+**
+** There seems to be a bug in the 53c810.
+** Although a STO-Interrupt is pending,
+** it continues executing script commands.
+** But it will fail and interrupt (IID) on
+** the next instruction where it's looking
+** for a valid phase.
+**
+**----------------------------------------------------------
+*/
+
+void ncr_int_sto (struct ncb *np)
+{
+ u_long dsa;
+ struct ccb *cp;
+ if (DEBUG_FLAGS & DEBUG_TINY) printk ("T");
+
+ /*
+ ** look for ccb and set the status.
+ */
+
+ dsa = INL (nc_dsa);
+ cp = np->ccb;
+ while (cp && (CCB_PHYS (cp, phys) != dsa))
+ cp = cp->link_ccb;
+
+ if (cp) {
+ cp-> host_status = HS_SEL_TIMEOUT;
+ ncr_complete (np, cp);
+ }
+
+ /*
+ ** repair start queue and jump to start point.
+ */
+
+ OUTL_DSP (NCB_SCRIPTH_PHYS (np, sto_restart));
+ return;
+}
+
+/*==========================================================
+**
+** ncr chip exception handler for SCSI bus mode change
+**
+**==========================================================
+**
+** spi2-r12 11.2.3 says a transceiver mode change must
+** generate a reset event and a device that detects a reset
+** event shall initiate a hard reset. It says also that a
+** device that detects a mode change shall set data transfer
+** mode to eight bit asynchronous, etc...
+** So, just resetting should be enough.
+**
+**
+**----------------------------------------------------------
+*/
+
+static int ncr_int_sbmc (struct ncb *np)
+{
+ u_char scsi_mode = INB (nc_stest4) & SMODE;
+
+ if (scsi_mode != np->scsi_mode) {
+ printk("%s: SCSI bus mode change from %x to %x.\n",
+ ncr_name(np), np->scsi_mode, scsi_mode);
+
+ np->scsi_mode = scsi_mode;
+
+
+ /*
+ ** Suspend command processing for 1 second and
+ ** reinitialize all except the chip.
+ */
+ np->settle_time = jiffies + HZ;
+ ncr_init (np, 0, bootverbose ? "scsi mode change" : NULL, HS_RESET);
+ return 1;
+ }
+ return 0;
+}
+
+/*==========================================================
+**
+** ncr chip exception handler for SCSI parity error.
+**
+**==========================================================
+**
+**
+**----------------------------------------------------------
+*/
+
+static int ncr_int_par (struct ncb *np)
+{
+ u_char hsts = INB (HS_PRT);
+ u32 dbc = INL (nc_dbc);
+ u_char sstat1 = INB (nc_sstat1);
+ int phase = -1;
+ int msg = -1;
+ u32 jmp;
+
+ printk("%s: SCSI parity error detected: SCR1=%d DBC=%x SSTAT1=%x\n",
+ ncr_name(np), hsts, dbc, sstat1);
+
+ /*
+ * Ignore the interrupt if the NCR is not connected
+ * to the SCSI bus, since the right work should have
+ * been done on unexpected disconnection handling.
+ */
+ if (!(INB (nc_scntl1) & ISCON))
+ return 0;
+
+ /*
+ * If the nexus is not clearly identified, reset the bus.
+ * We will try to do better later.
+ */
+ if (hsts & HS_INVALMASK)
+ goto reset_all;
+
+ /*
+ * If the SCSI parity error occurs in MSG IN phase, prepare a
+ * MSG PARITY message. Otherwise, prepare a INITIATOR DETECTED
+ * ERROR message and let the device decide to retry the command
+ * or to terminate with check condition. If we were in MSG IN
+ * phase waiting for the response of a negotiation, we will
+ * get SIR_NEGO_FAILED at dispatch.
+ */
+ if (!(dbc & 0xc0000000))
+ phase = (dbc >> 24) & 7;
+ if (phase == 7)
+ msg = MSG_PARITY_ERROR;
+ else
+ msg = INITIATOR_ERROR;
+
+
+ /*
+ * If the NCR stopped on a MOVE ^ DATA_IN, we jump to a
+ * script that will ignore all data in bytes until phase
+ * change, since we are not sure the chip will wait the phase
+ * change prior to delivering the interrupt.
+ */
+ if (phase == 1)
+ jmp = NCB_SCRIPTH_PHYS (np, par_err_data_in);
+ else
+ jmp = NCB_SCRIPTH_PHYS (np, par_err_other);
+
+ OUTONB (nc_ctest3, CLF ); /* clear dma fifo */
+ OUTB (nc_stest3, TE|CSF); /* clear scsi fifo */
+
+ np->msgout[0] = msg;
+ OUTL_DSP (jmp);
+ return 1;
+
+reset_all:
+ ncr_start_reset(np);
+ return 1;
+}
+
+/*==========================================================
+**
+**
+** ncr chip exception handler for phase errors.
+**
+**
+**==========================================================
+**
+** We have to construct a new transfer descriptor,
+** to transfer the rest of the current block.
+**
+**----------------------------------------------------------
+*/
+
+static void ncr_int_ma (struct ncb *np)
+{
+ u32 dbc;
+ u32 rest;
+ u32 dsp;
+ u32 dsa;
+ u32 nxtdsp;
+ u32 newtmp;
+ u32 *vdsp;
+ u32 oadr, olen;
+ u32 *tblp;
+ ncrcmd *newcmd;
+ u_char cmd, sbcl;
+ struct ccb *cp;
+
+ dsp = INL (nc_dsp);
+ dbc = INL (nc_dbc);
+ sbcl = INB (nc_sbcl);
+
+ cmd = dbc >> 24;
+ rest = dbc & 0xffffff;
+
+ /*
+ ** Take into account dma fifo and various buffers and latches,
+ ** only if the interrupted phase is an OUTPUT phase.
+ */
+
+ if ((cmd & 1) == 0) {
+ u_char ctest5, ss0, ss2;
+ u16 delta;
+
+ ctest5 = (np->rv_ctest5 & DFS) ? INB (nc_ctest5) : 0;
+ if (ctest5 & DFS)
+ delta=(((ctest5 << 8) | (INB (nc_dfifo) & 0xff)) - rest) & 0x3ff;
+ else
+ delta=(INB (nc_dfifo) - rest) & 0x7f;
+
+ /*
+ ** The data in the dma fifo has not been transferred to
+ ** the target -> add the amount to the rest
+ ** and clear the data.
+ ** Check the sstat2 register in case of wide transfer.
+ */
+
+ rest += delta;
+ ss0 = INB (nc_sstat0);
+ if (ss0 & OLF) rest++;
+ if (ss0 & ORF) rest++;
+ if (INB(nc_scntl3) & EWS) {
+ ss2 = INB (nc_sstat2);
+ if (ss2 & OLF1) rest++;
+ if (ss2 & ORF1) rest++;
+ }
+
+ if (DEBUG_FLAGS & (DEBUG_TINY|DEBUG_PHASE))
+ printk ("P%x%x RL=%d D=%d SS0=%x ", cmd&7, sbcl&7,
+ (unsigned) rest, (unsigned) delta, ss0);
+
+ } else {
+ if (DEBUG_FLAGS & (DEBUG_TINY|DEBUG_PHASE))
+ printk ("P%x%x RL=%d ", cmd&7, sbcl&7, rest);
+ }
+
+ /*
+ ** Clear fifos.
+ */
+ OUTONB (nc_ctest3, CLF ); /* clear dma fifo */
+ OUTB (nc_stest3, TE|CSF); /* clear scsi fifo */
+
+ /*
+ ** locate matching cp.
+ ** if the interrupted phase is DATA IN or DATA OUT,
+ ** trust the global header.
+ */
+ dsa = INL (nc_dsa);
+ if (!(cmd & 6)) {
+ cp = np->header.cp;
+ if (CCB_PHYS(cp, phys) != dsa)
+ cp = NULL;
+ } else {
+ cp = np->ccb;
+ while (cp && (CCB_PHYS (cp, phys) != dsa))
+ cp = cp->link_ccb;
+ }
+
+ /*
+ ** try to find the interrupted script command,
+ ** and the address at which to continue.
+ */
+ vdsp = NULL;
+ nxtdsp = 0;
+ if (dsp > np->p_script &&
+ dsp <= np->p_script + sizeof(struct script)) {
+ vdsp = (u32 *)((char*)np->script0 + (dsp-np->p_script-8));
+ nxtdsp = dsp;
+ }
+ else if (dsp > np->p_scripth &&
+ dsp <= np->p_scripth + sizeof(struct scripth)) {
+ vdsp = (u32 *)((char*)np->scripth0 + (dsp-np->p_scripth-8));
+ nxtdsp = dsp;
+ }
+ else if (cp) {
+ if (dsp == CCB_PHYS (cp, patch[2])) {
+ vdsp = &cp->patch[0];
+ nxtdsp = scr_to_cpu(vdsp[3]);
+ }
+ else if (dsp == CCB_PHYS (cp, patch[6])) {
+ vdsp = &cp->patch[4];
+ nxtdsp = scr_to_cpu(vdsp[3]);
+ }
+ }
+
+ /*
+ ** log the information
+ */
+
+ if (DEBUG_FLAGS & DEBUG_PHASE) {
+ printk ("\nCP=%p CP2=%p DSP=%x NXT=%x VDSP=%p CMD=%x ",
+ cp, np->header.cp,
+ (unsigned)dsp,
+ (unsigned)nxtdsp, vdsp, cmd);
+ }
+
+ /*
+ ** cp=0 means that the DSA does not point to a valid control
+ ** block. This should not happen since we donnot use multi-byte
+ ** move while we are being reselected ot after command complete.
+ ** We are not able to recover from such a phase error.
+ */
+ if (!cp) {
+ printk ("%s: SCSI phase error fixup: "
+ "CCB already dequeued (0x%08lx)\n",
+ ncr_name (np), (u_long) np->header.cp);
+ goto reset_all;
+ }
+
+ /*
+ ** get old startaddress and old length.
+ */
+
+ oadr = scr_to_cpu(vdsp[1]);
+
+ if (cmd & 0x10) { /* Table indirect */
+ tblp = (u32 *) ((char*) &cp->phys + oadr);
+ olen = scr_to_cpu(tblp[0]);
+ oadr = scr_to_cpu(tblp[1]);
+ } else {
+ tblp = (u32 *) 0;
+ olen = scr_to_cpu(vdsp[0]) & 0xffffff;
+ }
+
+ if (DEBUG_FLAGS & DEBUG_PHASE) {
+ printk ("OCMD=%x\nTBLP=%p OLEN=%x OADR=%x\n",
+ (unsigned) (scr_to_cpu(vdsp[0]) >> 24),
+ tblp,
+ (unsigned) olen,
+ (unsigned) oadr);
+ }
+
+ /*
+ ** check cmd against assumed interrupted script command.
+ */
+
+ if (cmd != (scr_to_cpu(vdsp[0]) >> 24)) {
+ PRINT_ADDR(cp->cmd, "internal error: cmd=%02x != %02x=(vdsp[0] "
+ ">> 24)\n", cmd, scr_to_cpu(vdsp[0]) >> 24);
+
+ goto reset_all;
+ }
+
+ /*
+ ** cp != np->header.cp means that the header of the CCB
+ ** currently being processed has not yet been copied to
+ ** the global header area. That may happen if the device did
+ ** not accept all our messages after having been selected.
+ */
+ if (cp != np->header.cp) {
+ printk ("%s: SCSI phase error fixup: "
+ "CCB address mismatch (0x%08lx != 0x%08lx)\n",
+ ncr_name (np), (u_long) cp, (u_long) np->header.cp);
+ }
+
+ /*
+ ** if old phase not dataphase, leave here.
+ */
+
+ if (cmd & 0x06) {
+ PRINT_ADDR(cp->cmd, "phase change %x-%x %d@%08x resid=%d.\n",
+ cmd&7, sbcl&7, (unsigned)olen,
+ (unsigned)oadr, (unsigned)rest);
+ goto unexpected_phase;
+ }
+
+ /*
+ ** choose the correct patch area.
+ ** if savep points to one, choose the other.
+ */
+
+ newcmd = cp->patch;
+ newtmp = CCB_PHYS (cp, patch);
+ if (newtmp == scr_to_cpu(cp->phys.header.savep)) {
+ newcmd = &cp->patch[4];
+ newtmp = CCB_PHYS (cp, patch[4]);
+ }
+
+ /*
+ ** fillin the commands
+ */
+
+ newcmd[0] = cpu_to_scr(((cmd & 0x0f) << 24) | rest);
+ newcmd[1] = cpu_to_scr(oadr + olen - rest);
+ newcmd[2] = cpu_to_scr(SCR_JUMP);
+ newcmd[3] = cpu_to_scr(nxtdsp);
+
+ if (DEBUG_FLAGS & DEBUG_PHASE) {
+ PRINT_ADDR(cp->cmd, "newcmd[%d] %x %x %x %x.\n",
+ (int) (newcmd - cp->patch),
+ (unsigned)scr_to_cpu(newcmd[0]),
+ (unsigned)scr_to_cpu(newcmd[1]),
+ (unsigned)scr_to_cpu(newcmd[2]),
+ (unsigned)scr_to_cpu(newcmd[3]));
+ }
+ /*
+ ** fake the return address (to the patch).
+ ** and restart script processor at dispatcher.
+ */
+ OUTL (nc_temp, newtmp);
+ OUTL_DSP (NCB_SCRIPT_PHYS (np, dispatch));
+ return;
+
+ /*
+ ** Unexpected phase changes that occurs when the current phase
+ ** is not a DATA IN or DATA OUT phase are due to error conditions.
+ ** Such event may only happen when the SCRIPTS is using a
+ ** multibyte SCSI MOVE.
+ **
+ ** Phase change Some possible cause
+ **
+ ** COMMAND --> MSG IN SCSI parity error detected by target.
+ ** COMMAND --> STATUS Bad command or refused by target.
+ ** MSG OUT --> MSG IN Message rejected by target.
+ ** MSG OUT --> COMMAND Bogus target that discards extended
+ ** negotiation messages.
+ **
+ ** The code below does not care of the new phase and so
+ ** trusts the target. Why to annoy it ?
+ ** If the interrupted phase is COMMAND phase, we restart at
+ ** dispatcher.
+ ** If a target does not get all the messages after selection,
+ ** the code assumes blindly that the target discards extended
+ ** messages and clears the negotiation status.
+ ** If the target does not want all our response to negotiation,
+ ** we force a SIR_NEGO_PROTO interrupt (it is a hack that avoids
+ ** bloat for such a should_not_happen situation).
+ ** In all other situation, we reset the BUS.
+ ** Are these assumptions reasonable ? (Wait and see ...)
+ */
+unexpected_phase:
+ dsp -= 8;
+ nxtdsp = 0;
+
+ switch (cmd & 7) {
+ case 2: /* COMMAND phase */
+ nxtdsp = NCB_SCRIPT_PHYS (np, dispatch);
+ break;
+#if 0
+ case 3: /* STATUS phase */
+ nxtdsp = NCB_SCRIPT_PHYS (np, dispatch);
+ break;
+#endif
+ case 6: /* MSG OUT phase */
+ np->scripth->nxtdsp_go_on[0] = cpu_to_scr(dsp + 8);
+ if (dsp == NCB_SCRIPT_PHYS (np, send_ident)) {
+ cp->host_status = HS_BUSY;
+ nxtdsp = NCB_SCRIPTH_PHYS (np, clratn_go_on);
+ }
+ else if (dsp == NCB_SCRIPTH_PHYS (np, send_wdtr) ||
+ dsp == NCB_SCRIPTH_PHYS (np, send_sdtr)) {
+ nxtdsp = NCB_SCRIPTH_PHYS (np, nego_bad_phase);
+ }
+ break;
+#if 0
+ case 7: /* MSG IN phase */
+ nxtdsp = NCB_SCRIPT_PHYS (np, clrack);
+ break;
+#endif
+ }
+
+ if (nxtdsp) {
+ OUTL_DSP (nxtdsp);
+ return;
+ }
+
+reset_all:
+ ncr_start_reset(np);
+}
+
+
+static void ncr_sir_to_redo(struct ncb *np, int num, struct ccb *cp)
+{
+ struct scsi_cmnd *cmd = cp->cmd;
+ struct tcb *tp = &np->target[cmd->device->id];
+ struct lcb *lp = tp->lp[cmd->device->lun];
+ struct list_head *qp;
+ struct ccb * cp2;
+ int disc_cnt = 0;
+ int busy_cnt = 0;
+ u32 startp;
+ u_char s_status = INB (SS_PRT);
+
+ /*
+ ** Let the SCRIPTS processor skip all not yet started CCBs,
+ ** and count disconnected CCBs. Since the busy queue is in
+ ** the same order as the chip start queue, disconnected CCBs
+ ** are before cp and busy ones after.
+ */
+ if (lp) {
+ qp = lp->busy_ccbq.prev;
+ while (qp != &lp->busy_ccbq) {
+ cp2 = list_entry(qp, struct ccb, link_ccbq);
+ qp = qp->prev;
+ ++busy_cnt;
+ if (cp2 == cp)
+ break;
+ cp2->start.schedule.l_paddr =
+ cpu_to_scr(NCB_SCRIPTH_PHYS (np, skip));
+ }
+ lp->held_ccb = cp; /* Requeue when this one completes */
+ disc_cnt = lp->queuedccbs - busy_cnt;
+ }
+
+ switch(s_status) {
+ default: /* Just for safety, should never happen */
+ case S_QUEUE_FULL:
+ /*
+ ** Decrease number of tags to the number of
+ ** disconnected commands.
+ */
+ if (!lp)
+ goto out;
+ if (bootverbose >= 1) {
+ PRINT_ADDR(cmd, "QUEUE FULL! %d busy, %d disconnected "
+ "CCBs\n", busy_cnt, disc_cnt);
+ }
+ if (disc_cnt < lp->numtags) {
+ lp->numtags = disc_cnt > 2 ? disc_cnt : 2;
+ lp->num_good = 0;
+ ncr_setup_tags (np, cmd->device);
+ }
+ /*
+ ** Requeue the command to the start queue.
+ ** If any disconnected commands,
+ ** Clear SIGP.
+ ** Jump to reselect.
+ */
+ cp->phys.header.savep = cp->startp;
+ cp->host_status = HS_BUSY;
+ cp->scsi_status = S_ILLEGAL;
+
+ ncr_put_start_queue(np, cp);
+ if (disc_cnt)
+ INB (nc_ctest2); /* Clear SIGP */
+ OUTL_DSP (NCB_SCRIPT_PHYS (np, reselect));
+ return;
+ case S_TERMINATED:
+ case S_CHECK_COND:
+ /*
+ ** If we were requesting sense, give up.
+ */
+ if (cp->auto_sense)
+ goto out;
+
+ /*
+ ** Device returned CHECK CONDITION status.
+ ** Prepare all needed data strutures for getting
+ ** sense data.
+ **
+ ** identify message
+ */
+ cp->scsi_smsg2[0] = IDENTIFY(0, cmd->device->lun);
+ cp->phys.smsg.addr = cpu_to_scr(CCB_PHYS (cp, scsi_smsg2));
+ cp->phys.smsg.size = cpu_to_scr(1);
+
+ /*
+ ** sense command
+ */
+ cp->phys.cmd.addr = cpu_to_scr(CCB_PHYS (cp, sensecmd));
+ cp->phys.cmd.size = cpu_to_scr(6);
+
+ /*
+ ** patch requested size into sense command
+ */
+ cp->sensecmd[0] = 0x03;
+ cp->sensecmd[1] = (cmd->device->lun & 0x7) << 5;
+ cp->sensecmd[4] = sizeof(cp->sense_buf);
+
+ /*
+ ** sense data
+ */
+ memset(cp->sense_buf, 0, sizeof(cp->sense_buf));
+ cp->phys.sense.addr = cpu_to_scr(CCB_PHYS(cp,sense_buf[0]));
+ cp->phys.sense.size = cpu_to_scr(sizeof(cp->sense_buf));
+
+ /*
+ ** requeue the command.
+ */
+ startp = cpu_to_scr(NCB_SCRIPTH_PHYS (np, sdata_in));
+
+ cp->phys.header.savep = startp;
+ cp->phys.header.goalp = startp + 24;
+ cp->phys.header.lastp = startp;
+ cp->phys.header.wgoalp = startp + 24;
+ cp->phys.header.wlastp = startp;
+
+ cp->host_status = HS_BUSY;
+ cp->scsi_status = S_ILLEGAL;
+ cp->auto_sense = s_status;
+
+ cp->start.schedule.l_paddr =
+ cpu_to_scr(NCB_SCRIPT_PHYS (np, select));
+
+ /*
+ ** Select without ATN for quirky devices.
+ */
+ if (cmd->device->select_no_atn)
+ cp->start.schedule.l_paddr =
+ cpu_to_scr(NCB_SCRIPTH_PHYS (np, select_no_atn));
+
+ ncr_put_start_queue(np, cp);
+
+ OUTL_DSP (NCB_SCRIPT_PHYS (np, start));
+ return;
+ }
+
+out:
+ OUTONB_STD ();
+ return;
+}
+
+
+/*==========================================================
+**
+**
+** ncr chip exception handler for programmed interrupts.
+**
+**
+**==========================================================
+*/
+
+void ncr_int_sir (struct ncb *np)
+{
+ u_char scntl3;
+ u_char chg, ofs, per, fak, wide;
+ u_char num = INB (nc_dsps);
+ struct ccb *cp=NULL;
+ u_long dsa = INL (nc_dsa);
+ u_char target = INB (nc_sdid) & 0x0f;
+ struct tcb *tp = &np->target[target];
+ struct scsi_target *starget = tp->starget;
+
+ if (DEBUG_FLAGS & DEBUG_TINY) printk ("I#%d", num);
+
+ switch (num) {
+ case SIR_INTFLY:
+ /*
+ ** This is used for HP Zalon/53c720 where INTFLY
+ ** operation is currently broken.
+ */
+ ncr_wakeup_done(np);
+#ifdef SCSI_NCR_CCB_DONE_SUPPORT
+ OUTL(nc_dsp, NCB_SCRIPT_PHYS (np, done_end) + 8);
+#else
+ OUTL(nc_dsp, NCB_SCRIPT_PHYS (np, start));
+#endif
+ return;
+ case SIR_RESEL_NO_MSG_IN:
+ case SIR_RESEL_NO_IDENTIFY:
+ /*
+ ** If devices reselecting without sending an IDENTIFY
+ ** message still exist, this should help.
+ ** We just assume lun=0, 1 CCB, no tag.
+ */
+ if (tp->lp[0]) {
+ OUTL_DSP (scr_to_cpu(tp->lp[0]->jump_ccb[0]));
+ return;
+ }
+ case SIR_RESEL_BAD_TARGET: /* Will send a TARGET RESET message */
+ case SIR_RESEL_BAD_LUN: /* Will send a TARGET RESET message */
+ case SIR_RESEL_BAD_I_T_L_Q: /* Will send an ABORT TAG message */
+ case SIR_RESEL_BAD_I_T_L: /* Will send an ABORT message */
+ printk ("%s:%d: SIR %d, "
+ "incorrect nexus identification on reselection\n",
+ ncr_name (np), target, num);
+ goto out;
+ case SIR_DONE_OVERFLOW:
+ printk ("%s:%d: SIR %d, "
+ "CCB done queue overflow\n",
+ ncr_name (np), target, num);
+ goto out;
+ case SIR_BAD_STATUS:
+ cp = np->header.cp;
+ if (!cp || CCB_PHYS (cp, phys) != dsa)
+ goto out;
+ ncr_sir_to_redo(np, num, cp);
+ return;
+ default:
+ /*
+ ** lookup the ccb
+ */
+ cp = np->ccb;
+ while (cp && (CCB_PHYS (cp, phys) != dsa))
+ cp = cp->link_ccb;
+
+ BUG_ON(!cp);
+ BUG_ON(cp != np->header.cp);
+
+ if (!cp || cp != np->header.cp)
+ goto out;
+ }
+
+ switch (num) {
+/*-----------------------------------------------------------------------------
+**
+** Was Sie schon immer ueber transfermode negotiation wissen wollten ...
+** ("Everything you've always wanted to know about transfer mode
+** negotiation")
+**
+** We try to negotiate sync and wide transfer only after
+** a successful inquire command. We look at byte 7 of the
+** inquire data to determine the capabilities of the target.
+**
+** When we try to negotiate, we append the negotiation message
+** to the identify and (maybe) simple tag message.
+** The host status field is set to HS_NEGOTIATE to mark this
+** situation.
+**
+** If the target doesn't answer this message immediately
+** (as required by the standard), the SIR_NEGO_FAIL interrupt
+** will be raised eventually.
+** The handler removes the HS_NEGOTIATE status, and sets the
+** negotiated value to the default (async / nowide).
+**
+** If we receive a matching answer immediately, we check it
+** for validity, and set the values.
+**
+** If we receive a Reject message immediately, we assume the
+** negotiation has failed, and fall back to standard values.
+**
+** If we receive a negotiation message while not in HS_NEGOTIATE
+** state, it's a target initiated negotiation. We prepare a
+** (hopefully) valid answer, set our parameters, and send back
+** this answer to the target.
+**
+** If the target doesn't fetch the answer (no message out phase),
+** we assume the negotiation has failed, and fall back to default
+** settings.
+**
+** When we set the values, we adjust them in all ccbs belonging
+** to this target, in the controller's register, and in the "phys"
+** field of the controller's struct ncb.
+**
+** Possible cases: hs sir msg_in value send goto
+** We try to negotiate:
+** -> target doesn't msgin NEG FAIL noop defa. - dispatch
+** -> target rejected our msg NEG FAIL reject defa. - dispatch
+** -> target answered (ok) NEG SYNC sdtr set - clrack
+** -> target answered (!ok) NEG SYNC sdtr defa. REJ--->msg_bad
+** -> target answered (ok) NEG WIDE wdtr set - clrack
+** -> target answered (!ok) NEG WIDE wdtr defa. REJ--->msg_bad
+** -> any other msgin NEG FAIL noop defa. - dispatch
+**
+** Target tries to negotiate:
+** -> incoming message --- SYNC sdtr set SDTR -
+** -> incoming message --- WIDE wdtr set WDTR -
+** We sent our answer:
+** -> target doesn't msgout --- PROTO ? defa. - dispatch
+**
+**-----------------------------------------------------------------------------
+*/
+
+ case SIR_NEGO_FAILED:
+ /*-------------------------------------------------------
+ **
+ ** Negotiation failed.
+ ** Target doesn't send an answer message,
+ ** or target rejected our message.
+ **
+ ** Remove negotiation request.
+ **
+ **-------------------------------------------------------
+ */
+ OUTB (HS_PRT, HS_BUSY);
+
+ /* fall through */
+
+ case SIR_NEGO_PROTO:
+ /*-------------------------------------------------------
+ **
+ ** Negotiation failed.
+ ** Target doesn't fetch the answer message.
+ **
+ **-------------------------------------------------------
+ */
+
+ if (DEBUG_FLAGS & DEBUG_NEGO) {
+ PRINT_ADDR(cp->cmd, "negotiation failed sir=%x "
+ "status=%x.\n", num, cp->nego_status);
+ }
+
+ /*
+ ** any error in negotiation:
+ ** fall back to default mode.
+ */
+ switch (cp->nego_status) {
+
+ case NS_SYNC:
+ spi_period(starget) = 0;
+ spi_offset(starget) = 0;
+ ncr_setsync (np, cp, 0, 0xe0);
+ break;
+
+ case NS_WIDE:
+ spi_width(starget) = 0;
+ ncr_setwide (np, cp, 0, 0);
+ break;
+
+ }
+ np->msgin [0] = NOP;
+ np->msgout[0] = NOP;
+ cp->nego_status = 0;
+ break;
+
+ case SIR_NEGO_SYNC:
+ if (DEBUG_FLAGS & DEBUG_NEGO) {
+ ncr_print_msg(cp, "sync msgin", np->msgin);
+ }
+
+ chg = 0;
+ per = np->msgin[3];
+ ofs = np->msgin[4];
+ if (ofs==0) per=255;
+
+ /*
+ ** if target sends SDTR message,
+ ** it CAN transfer synch.
+ */
+
+ if (ofs && starget)
+ spi_support_sync(starget) = 1;
+
+ /*
+ ** check values against driver limits.
+ */
+
+ if (per < np->minsync)
+ {chg = 1; per = np->minsync;}
+ if (per < tp->minsync)
+ {chg = 1; per = tp->minsync;}
+ if (ofs > tp->maxoffs)
+ {chg = 1; ofs = tp->maxoffs;}
+
+ /*
+ ** Check against controller limits.
+ */
+ fak = 7;
+ scntl3 = 0;
+ if (ofs != 0) {
+ ncr_getsync(np, per, &fak, &scntl3);
+ if (fak > 7) {
+ chg = 1;
+ ofs = 0;
+ }
+ }
+ if (ofs == 0) {
+ fak = 7;
+ per = 0;
+ scntl3 = 0;
+ tp->minsync = 0;
+ }
+
+ if (DEBUG_FLAGS & DEBUG_NEGO) {
+ PRINT_ADDR(cp->cmd, "sync: per=%d scntl3=0x%x ofs=%d "
+ "fak=%d chg=%d.\n", per, scntl3, ofs, fak, chg);
+ }
+
+ if (INB (HS_PRT) == HS_NEGOTIATE) {
+ OUTB (HS_PRT, HS_BUSY);
+ switch (cp->nego_status) {
+
+ case NS_SYNC:
+ /* This was an answer message */
+ if (chg) {
+ /* Answer wasn't acceptable. */
+ spi_period(starget) = 0;
+ spi_offset(starget) = 0;
+ ncr_setsync(np, cp, 0, 0xe0);
+ OUTL_DSP(NCB_SCRIPT_PHYS (np, msg_bad));
+ } else {
+ /* Answer is ok. */
+ spi_period(starget) = per;
+ spi_offset(starget) = ofs;
+ ncr_setsync(np, cp, scntl3, (fak<<5)|ofs);
+ OUTL_DSP(NCB_SCRIPT_PHYS (np, clrack));
+ }
+ return;
+
+ case NS_WIDE:
+ spi_width(starget) = 0;
+ ncr_setwide(np, cp, 0, 0);
+ break;
+ }
+ }
+
+ /*
+ ** It was a request. Set value and
+ ** prepare an answer message
+ */
+
+ spi_period(starget) = per;
+ spi_offset(starget) = ofs;
+ ncr_setsync(np, cp, scntl3, (fak<<5)|ofs);
+
+ spi_populate_sync_msg(np->msgout, per, ofs);
+ cp->nego_status = NS_SYNC;
+
+ if (DEBUG_FLAGS & DEBUG_NEGO) {
+ ncr_print_msg(cp, "sync msgout", np->msgout);
+ }
+
+ if (!ofs) {
+ OUTL_DSP (NCB_SCRIPT_PHYS (np, msg_bad));
+ return;
+ }
+ np->msgin [0] = NOP;
+
+ break;
+
+ case SIR_NEGO_WIDE:
+ /*
+ ** Wide request message received.
+ */
+ if (DEBUG_FLAGS & DEBUG_NEGO) {
+ ncr_print_msg(cp, "wide msgin", np->msgin);
+ }
+
+ /*
+ ** get requested values.
+ */
+
+ chg = 0;
+ wide = np->msgin[3];
+
+ /*
+ ** if target sends WDTR message,
+ ** it CAN transfer wide.
+ */
+
+ if (wide && starget)
+ spi_support_wide(starget) = 1;
+
+ /*
+ ** check values against driver limits.
+ */
+
+ if (wide > tp->usrwide)
+ {chg = 1; wide = tp->usrwide;}
+
+ if (DEBUG_FLAGS & DEBUG_NEGO) {
+ PRINT_ADDR(cp->cmd, "wide: wide=%d chg=%d.\n", wide,
+ chg);
+ }
+
+ if (INB (HS_PRT) == HS_NEGOTIATE) {
+ OUTB (HS_PRT, HS_BUSY);
+ switch (cp->nego_status) {
+
+ case NS_WIDE:
+ /*
+ ** This was an answer message
+ */
+ if (chg) {
+ /* Answer wasn't acceptable. */
+ spi_width(starget) = 0;
+ ncr_setwide(np, cp, 0, 1);
+ OUTL_DSP (NCB_SCRIPT_PHYS (np, msg_bad));
+ } else {
+ /* Answer is ok. */
+ spi_width(starget) = wide;
+ ncr_setwide(np, cp, wide, 1);
+ OUTL_DSP (NCB_SCRIPT_PHYS (np, clrack));
+ }
+ return;
+
+ case NS_SYNC:
+ spi_period(starget) = 0;
+ spi_offset(starget) = 0;
+ ncr_setsync(np, cp, 0, 0xe0);
+ break;
+ }
+ }
+
+ /*
+ ** It was a request, set value and
+ ** prepare an answer message
+ */
+
+ spi_width(starget) = wide;
+ ncr_setwide(np, cp, wide, 1);
+ spi_populate_width_msg(np->msgout, wide);
+
+ np->msgin [0] = NOP;
+
+ cp->nego_status = NS_WIDE;
+
+ if (DEBUG_FLAGS & DEBUG_NEGO) {
+ ncr_print_msg(cp, "wide msgout", np->msgin);
+ }
+ break;
+
+/*--------------------------------------------------------------------
+**
+** Processing of special messages
+**
+**--------------------------------------------------------------------
+*/
+
+ case SIR_REJECT_RECEIVED:
+ /*-----------------------------------------------
+ **
+ ** We received a MESSAGE_REJECT.
+ **
+ **-----------------------------------------------
+ */
+
+ PRINT_ADDR(cp->cmd, "MESSAGE_REJECT received (%x:%x).\n",
+ (unsigned)scr_to_cpu(np->lastmsg), np->msgout[0]);
+ break;
+
+ case SIR_REJECT_SENT:
+ /*-----------------------------------------------
+ **
+ ** We received an unknown message
+ **
+ **-----------------------------------------------
+ */
+
+ ncr_print_msg(cp, "MESSAGE_REJECT sent for", np->msgin);
+ break;
+
+/*--------------------------------------------------------------------
+**
+** Processing of special messages
+**
+**--------------------------------------------------------------------
+*/
+
+ case SIR_IGN_RESIDUE:
+ /*-----------------------------------------------
+ **
+ ** We received an IGNORE RESIDUE message,
+ ** which couldn't be handled by the script.
+ **
+ **-----------------------------------------------
+ */
+
+ PRINT_ADDR(cp->cmd, "IGNORE_WIDE_RESIDUE received, but not yet "
+ "implemented.\n");
+ break;
+#if 0
+ case SIR_MISSING_SAVE:
+ /*-----------------------------------------------
+ **
+ ** We received an DISCONNECT message,
+ ** but the datapointer wasn't saved before.
+ **
+ **-----------------------------------------------
+ */
+
+ PRINT_ADDR(cp->cmd, "DISCONNECT received, but datapointer "
+ "not saved: data=%x save=%x goal=%x.\n",
+ (unsigned) INL (nc_temp),
+ (unsigned) scr_to_cpu(np->header.savep),
+ (unsigned) scr_to_cpu(np->header.goalp));
+ break;
+#endif
+ }
+
+out:
+ OUTONB_STD ();
+}
+
+/*==========================================================
+**
+**
+** Acquire a control block
+**
+**
+**==========================================================
+*/
+
+static struct ccb *ncr_get_ccb(struct ncb *np, struct scsi_cmnd *cmd)
+{
+ u_char tn = cmd->device->id;
+ u_char ln = cmd->device->lun;
+ struct tcb *tp = &np->target[tn];
+ struct lcb *lp = tp->lp[ln];
+ u_char tag = NO_TAG;
+ struct ccb *cp = NULL;
+
+ /*
+ ** Lun structure available ?
+ */
+ if (lp) {
+ struct list_head *qp;
+ /*
+ ** Keep from using more tags than we can handle.
+ */
+ if (lp->usetags && lp->busyccbs >= lp->maxnxs)
+ return NULL;
+
+ /*
+ ** Allocate a new CCB if needed.
+ */
+ if (list_empty(&lp->free_ccbq))
+ ncr_alloc_ccb(np, tn, ln);
+
+ /*
+ ** Look for free CCB
+ */
+ qp = ncr_list_pop(&lp->free_ccbq);
+ if (qp) {
+ cp = list_entry(qp, struct ccb, link_ccbq);
+ if (cp->magic) {
+ PRINT_ADDR(cmd, "ccb free list corrupted "
+ "(@%p)\n", cp);
+ cp = NULL;
+ } else {
+ list_add_tail(qp, &lp->wait_ccbq);
+ ++lp->busyccbs;
+ }
+ }
+
+ /*
+ ** If a CCB is available,
+ ** Get a tag for this nexus if required.
+ */
+ if (cp) {
+ if (lp->usetags)
+ tag = lp->cb_tags[lp->ia_tag];
+ }
+ else if (lp->actccbs > 0)
+ return NULL;
+ }
+
+ /*
+ ** if nothing available, take the default.
+ */
+ if (!cp)
+ cp = np->ccb;
+
+ /*
+ ** Wait until available.
+ */
+#if 0
+ while (cp->magic) {
+ if (flags & SCSI_NOSLEEP) break;
+ if (tsleep ((caddr_t)cp, PRIBIO|PCATCH, "ncr", 0))
+ break;
+ }
+#endif
+
+ if (cp->magic)
+ return NULL;
+
+ cp->magic = 1;
+
+ /*
+ ** Move to next available tag if tag used.
+ */
+ if (lp) {
+ if (tag != NO_TAG) {
+ ++lp->ia_tag;
+ if (lp->ia_tag == MAX_TAGS)
+ lp->ia_tag = 0;
+ lp->tags_umap |= (((tagmap_t) 1) << tag);
+ }
+ }
+
+ /*
+ ** Remember all informations needed to free this CCB.
+ */
+ cp->tag = tag;
+ cp->target = tn;
+ cp->lun = ln;
+
+ if (DEBUG_FLAGS & DEBUG_TAGS) {
+ PRINT_ADDR(cmd, "ccb @%p using tag %d.\n", cp, tag);
+ }
+
+ return cp;
+}
+
+/*==========================================================
+**
+**
+** Release one control block
+**
+**
+**==========================================================
+*/
+
+static void ncr_free_ccb (struct ncb *np, struct ccb *cp)
+{
+ struct tcb *tp = &np->target[cp->target];
+ struct lcb *lp = tp->lp[cp->lun];
+
+ if (DEBUG_FLAGS & DEBUG_TAGS) {
+ PRINT_ADDR(cp->cmd, "ccb @%p freeing tag %d.\n", cp, cp->tag);
+ }
+
+ /*
+ ** If lun control block available,
+ ** decrement active commands and increment credit,
+ ** free the tag if any and remove the JUMP for reselect.
+ */
+ if (lp) {
+ if (cp->tag != NO_TAG) {
+ lp->cb_tags[lp->if_tag++] = cp->tag;
+ if (lp->if_tag == MAX_TAGS)
+ lp->if_tag = 0;
+ lp->tags_umap &= ~(((tagmap_t) 1) << cp->tag);
+ lp->tags_smap &= lp->tags_umap;
+ lp->jump_ccb[cp->tag] =
+ cpu_to_scr(NCB_SCRIPTH_PHYS(np, bad_i_t_l_q));
+ } else {
+ lp->jump_ccb[0] =
+ cpu_to_scr(NCB_SCRIPTH_PHYS(np, bad_i_t_l));
+ }
+ }
+
+ /*
+ ** Make this CCB available.
+ */
+
+ if (lp) {
+ if (cp != np->ccb)
+ list_move(&cp->link_ccbq, &lp->free_ccbq);
+ --lp->busyccbs;
+ if (cp->queued) {
+ --lp->queuedccbs;
+ }
+ }
+ cp -> host_status = HS_IDLE;
+ cp -> magic = 0;
+ if (cp->queued) {
+ --np->queuedccbs;
+ cp->queued = 0;
+ }
+
+#if 0
+ if (cp == np->ccb)
+ wakeup ((caddr_t) cp);
+#endif
+}
+
+
+#define ncr_reg_bus_addr(r) (np->paddr + offsetof (struct ncr_reg, r))
+
+/*------------------------------------------------------------------------
+** Initialize the fixed part of a CCB structure.
+**------------------------------------------------------------------------
+**------------------------------------------------------------------------
+*/
+static void ncr_init_ccb(struct ncb *np, struct ccb *cp)
+{
+ ncrcmd copy_4 = np->features & FE_PFEN ? SCR_COPY(4) : SCR_COPY_F(4);
+
+ /*
+ ** Remember virtual and bus address of this ccb.
+ */
+ cp->p_ccb = vtobus(cp);
+ cp->phys.header.cp = cp;
+
+ /*
+ ** This allows list_del to work for the default ccb.
+ */
+ INIT_LIST_HEAD(&cp->link_ccbq);
+
+ /*
+ ** Initialyze the start and restart launch script.
+ **
+ ** COPY(4) @(...p_phys), @(dsa)
+ ** JUMP @(sched_point)
+ */
+ cp->start.setup_dsa[0] = cpu_to_scr(copy_4);
+ cp->start.setup_dsa[1] = cpu_to_scr(CCB_PHYS(cp, start.p_phys));
+ cp->start.setup_dsa[2] = cpu_to_scr(ncr_reg_bus_addr(nc_dsa));
+ cp->start.schedule.l_cmd = cpu_to_scr(SCR_JUMP);
+ cp->start.p_phys = cpu_to_scr(CCB_PHYS(cp, phys));
+
+ memcpy(&cp->restart, &cp->start, sizeof(cp->restart));
+
+ cp->start.schedule.l_paddr = cpu_to_scr(NCB_SCRIPT_PHYS (np, idle));
+ cp->restart.schedule.l_paddr = cpu_to_scr(NCB_SCRIPTH_PHYS (np, abort));
+}
+
+
+/*------------------------------------------------------------------------
+** Allocate a CCB and initialize its fixed part.
+**------------------------------------------------------------------------
+**------------------------------------------------------------------------
+*/
+static void ncr_alloc_ccb(struct ncb *np, u_char tn, u_char ln)
+{
+ struct tcb *tp = &np->target[tn];
+ struct lcb *lp = tp->lp[ln];
+ struct ccb *cp = NULL;
+
+ /*
+ ** Allocate memory for this CCB.
+ */
+ cp = m_calloc_dma(sizeof(struct ccb), "CCB");
+ if (!cp)
+ return;
+
+ /*
+ ** Count it and initialyze it.
+ */
+ lp->actccbs++;
+ np->actccbs++;
+ memset(cp, 0, sizeof (*cp));
+ ncr_init_ccb(np, cp);
+
+ /*
+ ** Chain into wakeup list and free ccb queue and take it
+ ** into account for tagged commands.
+ */
+ cp->link_ccb = np->ccb->link_ccb;
+ np->ccb->link_ccb = cp;
+
+ list_add(&cp->link_ccbq, &lp->free_ccbq);
+}
+
+/*==========================================================
+**
+**
+** Allocation of resources for Targets/Luns/Tags.
+**
+**
+**==========================================================
+*/
+
+
+/*------------------------------------------------------------------------
+** Target control block initialisation.
+**------------------------------------------------------------------------
+** This data structure is fully initialized after a SCSI command
+** has been successfully completed for this target.
+** It contains a SCRIPT that is called on target reselection.
+**------------------------------------------------------------------------
+*/
+static void ncr_init_tcb (struct ncb *np, u_char tn)
+{
+ struct tcb *tp = &np->target[tn];
+ ncrcmd copy_1 = np->features & FE_PFEN ? SCR_COPY(1) : SCR_COPY_F(1);
+ int th = tn & 3;
+ int i;
+
+ /*
+ ** Jump to next tcb if SFBR does not match this target.
+ ** JUMP IF (SFBR != #target#), @(next tcb)
+ */
+ tp->jump_tcb.l_cmd =
+ cpu_to_scr((SCR_JUMP ^ IFFALSE (DATA (0x80 + tn))));
+ tp->jump_tcb.l_paddr = np->jump_tcb[th].l_paddr;
+
+ /*
+ ** Load the synchronous transfer register.
+ ** COPY @(tp->sval), @(sxfer)
+ */
+ tp->getscr[0] = cpu_to_scr(copy_1);
+ tp->getscr[1] = cpu_to_scr(vtobus (&tp->sval));
+#ifdef SCSI_NCR_BIG_ENDIAN
+ tp->getscr[2] = cpu_to_scr(ncr_reg_bus_addr(nc_sxfer) ^ 3);
+#else
+ tp->getscr[2] = cpu_to_scr(ncr_reg_bus_addr(nc_sxfer));
+#endif
+
+ /*
+ ** Load the timing register.
+ ** COPY @(tp->wval), @(scntl3)
+ */
+ tp->getscr[3] = cpu_to_scr(copy_1);
+ tp->getscr[4] = cpu_to_scr(vtobus (&tp->wval));
+#ifdef SCSI_NCR_BIG_ENDIAN
+ tp->getscr[5] = cpu_to_scr(ncr_reg_bus_addr(nc_scntl3) ^ 3);
+#else
+ tp->getscr[5] = cpu_to_scr(ncr_reg_bus_addr(nc_scntl3));
+#endif
+
+ /*
+ ** Get the IDENTIFY message and the lun.
+ ** CALL @script(resel_lun)
+ */
+ tp->call_lun.l_cmd = cpu_to_scr(SCR_CALL);
+ tp->call_lun.l_paddr = cpu_to_scr(NCB_SCRIPT_PHYS (np, resel_lun));
+
+ /*
+ ** Look for the lun control block of this nexus.
+ ** For i = 0 to 3
+ ** JUMP ^ IFTRUE (MASK (i, 3)), @(next_lcb)
+ */
+ for (i = 0 ; i < 4 ; i++) {
+ tp->jump_lcb[i].l_cmd =
+ cpu_to_scr((SCR_JUMP ^ IFTRUE (MASK (i, 3))));
+ tp->jump_lcb[i].l_paddr =
+ cpu_to_scr(NCB_SCRIPTH_PHYS (np, bad_identify));
+ }
+
+ /*
+ ** Link this target control block to the JUMP chain.
+ */
+ np->jump_tcb[th].l_paddr = cpu_to_scr(vtobus (&tp->jump_tcb));
+
+ /*
+ ** These assert's should be moved at driver initialisations.
+ */
+#ifdef SCSI_NCR_BIG_ENDIAN
+ BUG_ON(((offsetof(struct ncr_reg, nc_sxfer) ^
+ offsetof(struct tcb , sval )) &3) != 3);
+ BUG_ON(((offsetof(struct ncr_reg, nc_scntl3) ^
+ offsetof(struct tcb , wval )) &3) != 3);
+#else
+ BUG_ON(((offsetof(struct ncr_reg, nc_sxfer) ^
+ offsetof(struct tcb , sval )) &3) != 0);
+ BUG_ON(((offsetof(struct ncr_reg, nc_scntl3) ^
+ offsetof(struct tcb , wval )) &3) != 0);
+#endif
+}
+
+
+/*------------------------------------------------------------------------
+** Lun control block allocation and initialization.
+**------------------------------------------------------------------------
+** This data structure is allocated and initialized after a SCSI
+** command has been successfully completed for this target/lun.
+**------------------------------------------------------------------------
+*/
+static struct lcb *ncr_alloc_lcb (struct ncb *np, u_char tn, u_char ln)
+{
+ struct tcb *tp = &np->target[tn];
+ struct lcb *lp = tp->lp[ln];
+ ncrcmd copy_4 = np->features & FE_PFEN ? SCR_COPY(4) : SCR_COPY_F(4);
+ int lh = ln & 3;
+
+ /*
+ ** Already done, return.
+ */
+ if (lp)
+ return lp;
+
+ /*
+ ** Allocate the lcb.
+ */
+ lp = m_calloc_dma(sizeof(struct lcb), "LCB");
+ if (!lp)
+ goto fail;
+ memset(lp, 0, sizeof(*lp));
+ tp->lp[ln] = lp;
+
+ /*
+ ** Initialize the target control block if not yet.
+ */
+ if (!tp->jump_tcb.l_cmd)
+ ncr_init_tcb(np, tn);
+
+ /*
+ ** Initialize the CCB queue headers.
+ */
+ INIT_LIST_HEAD(&lp->free_ccbq);
+ INIT_LIST_HEAD(&lp->busy_ccbq);
+ INIT_LIST_HEAD(&lp->wait_ccbq);
+ INIT_LIST_HEAD(&lp->skip_ccbq);
+
+ /*
+ ** Set max CCBs to 1 and use the default 1 entry
+ ** jump table by default.
+ */
+ lp->maxnxs = 1;
+ lp->jump_ccb = &lp->jump_ccb_0;
+ lp->p_jump_ccb = cpu_to_scr(vtobus(lp->jump_ccb));
+
+ /*
+ ** Initilialyze the reselect script:
+ **
+ ** Jump to next lcb if SFBR does not match this lun.
+ ** Load TEMP with the CCB direct jump table bus address.
+ ** Get the SIMPLE TAG message and the tag.
+ **
+ ** JUMP IF (SFBR != #lun#), @(next lcb)
+ ** COPY @(lp->p_jump_ccb), @(temp)
+ ** JUMP @script(resel_notag)
+ */
+ lp->jump_lcb.l_cmd =
+ cpu_to_scr((SCR_JUMP ^ IFFALSE (MASK (0x80+ln, 0xff))));
+ lp->jump_lcb.l_paddr = tp->jump_lcb[lh].l_paddr;
+
+ lp->load_jump_ccb[0] = cpu_to_scr(copy_4);
+ lp->load_jump_ccb[1] = cpu_to_scr(vtobus (&lp->p_jump_ccb));
+ lp->load_jump_ccb[2] = cpu_to_scr(ncr_reg_bus_addr(nc_temp));
+
+ lp->jump_tag.l_cmd = cpu_to_scr(SCR_JUMP);
+ lp->jump_tag.l_paddr = cpu_to_scr(NCB_SCRIPT_PHYS (np, resel_notag));
+
+ /*
+ ** Link this lun control block to the JUMP chain.
+ */
+ tp->jump_lcb[lh].l_paddr = cpu_to_scr(vtobus (&lp->jump_lcb));
+
+ /*
+ ** Initialize command queuing control.
+ */
+ lp->busyccbs = 1;
+ lp->queuedccbs = 1;
+ lp->queuedepth = 1;
+fail:
+ return lp;
+}
+
+
+/*------------------------------------------------------------------------
+** Lun control block setup on INQUIRY data received.
+**------------------------------------------------------------------------
+** We only support WIDE, SYNC for targets and CMDQ for logical units.
+** This setup is done on each INQUIRY since we are expecting user
+** will play with CHANGE DEFINITION commands. :-)
+**------------------------------------------------------------------------
+*/
+static struct lcb *ncr_setup_lcb (struct ncb *np, struct scsi_device *sdev)
+{
+ unsigned char tn = sdev->id, ln = sdev->lun;
+ struct tcb *tp = &np->target[tn];
+ struct lcb *lp = tp->lp[ln];
+
+ /* If no lcb, try to allocate it. */
+ if (!lp && !(lp = ncr_alloc_lcb(np, tn, ln)))
+ goto fail;
+
+ /*
+ ** If unit supports tagged commands, allocate the
+ ** CCB JUMP table if not yet.
+ */
+ if (sdev->tagged_supported && lp->jump_ccb == &lp->jump_ccb_0) {
+ int i;
+ lp->jump_ccb = m_calloc_dma(256, "JUMP_CCB");
+ if (!lp->jump_ccb) {
+ lp->jump_ccb = &lp->jump_ccb_0;
+ goto fail;
+ }
+ lp->p_jump_ccb = cpu_to_scr(vtobus(lp->jump_ccb));
+ for (i = 0 ; i < 64 ; i++)
+ lp->jump_ccb[i] =
+ cpu_to_scr(NCB_SCRIPTH_PHYS (np, bad_i_t_l_q));
+ for (i = 0 ; i < MAX_TAGS ; i++)
+ lp->cb_tags[i] = i;
+ lp->maxnxs = MAX_TAGS;
+ lp->tags_stime = jiffies + 3*HZ;
+ ncr_setup_tags (np, sdev);
+ }
+
+
+fail:
+ return lp;
+}
+
+/*==========================================================
+**
+**
+** Build Scatter Gather Block
+**
+**
+**==========================================================
+**
+** The transfer area may be scattered among
+** several non adjacent physical pages.
+**
+** We may use MAX_SCATTER blocks.
+**
+**----------------------------------------------------------
+*/
+
+/*
+** We try to reduce the number of interrupts caused
+** by unexpected phase changes due to disconnects.
+** A typical harddisk may disconnect before ANY block.
+** If we wanted to avoid unexpected phase changes at all
+** we had to use a break point every 512 bytes.
+** Of course the number of scatter/gather blocks is
+** limited.
+** Under Linux, the scatter/gatter blocks are provided by
+** the generic driver. We just have to copy addresses and
+** sizes to the data segment array.
+*/
+
+static int ncr_scatter(struct ncb *np, struct ccb *cp, struct scsi_cmnd *cmd)
+{
+ int segment = 0;
+ int use_sg = scsi_sg_count(cmd);
+
+ cp->data_len = 0;
+
+ use_sg = map_scsi_sg_data(np, cmd);
+ if (use_sg > 0) {
+ struct scatterlist *sg;
+ struct scr_tblmove *data;
+
+ if (use_sg > MAX_SCATTER) {
+ unmap_scsi_data(np, cmd);
+ return -1;
+ }
+
+ data = &cp->phys.data[MAX_SCATTER - use_sg];
+
+ scsi_for_each_sg(cmd, sg, use_sg, segment) {
+ dma_addr_t baddr = sg_dma_address(sg);
+ unsigned int len = sg_dma_len(sg);
+
+ ncr_build_sge(np, &data[segment], baddr, len);
+ cp->data_len += len;
+ }
+ } else
+ segment = -2;
+
+ return segment;
+}
+
+/*==========================================================
+**
+**
+** Test the bus snoop logic :-(
+**
+** Has to be called with interrupts disabled.
+**
+**
+**==========================================================
+*/
+
+static int __init ncr_regtest (struct ncb* np)
+{
+ register volatile u32 data;
+ /*
+ ** ncr registers may NOT be cached.
+ ** write 0xffffffff to a read only register area,
+ ** and try to read it back.
+ */
+ data = 0xffffffff;
+ OUTL_OFF(offsetof(struct ncr_reg, nc_dstat), data);
+ data = INL_OFF(offsetof(struct ncr_reg, nc_dstat));
+#if 1
+ if (data == 0xffffffff) {
+#else
+ if ((data & 0xe2f0fffd) != 0x02000080) {
+#endif
+ printk ("CACHE TEST FAILED: reg dstat-sstat2 readback %x.\n",
+ (unsigned) data);
+ return (0x10);
+ }
+ return (0);
+}
+
+static int __init ncr_snooptest (struct ncb* np)
+{
+ u32 ncr_rd, ncr_wr, ncr_bk, host_rd, host_wr, pc;
+ int i, err=0;
+ if (np->reg) {
+ err |= ncr_regtest (np);
+ if (err)
+ return (err);
+ }
+
+ /* init */
+ pc = NCB_SCRIPTH_PHYS (np, snooptest);
+ host_wr = 1;
+ ncr_wr = 2;
+ /*
+ ** Set memory and register.
+ */
+ np->ncr_cache = cpu_to_scr(host_wr);
+ OUTL (nc_temp, ncr_wr);
+ /*
+ ** Start script (exchange values)
+ */
+ OUTL_DSP (pc);
+ /*
+ ** Wait 'til done (with timeout)
+ */
+ for (i=0; i<NCR_SNOOP_TIMEOUT; i++)
+ if (INB(nc_istat) & (INTF|SIP|DIP))
+ break;
+ /*
+ ** Save termination position.
+ */
+ pc = INL (nc_dsp);
+ /*
+ ** Read memory and register.
+ */
+ host_rd = scr_to_cpu(np->ncr_cache);
+ ncr_rd = INL (nc_scratcha);
+ ncr_bk = INL (nc_temp);
+ /*
+ ** Reset ncr chip
+ */
+ ncr_chip_reset(np, 100);
+ /*
+ ** check for timeout
+ */
+ if (i>=NCR_SNOOP_TIMEOUT) {
+ printk ("CACHE TEST FAILED: timeout.\n");
+ return (0x20);
+ }
+ /*
+ ** Check termination position.
+ */
+ if (pc != NCB_SCRIPTH_PHYS (np, snoopend)+8) {
+ printk ("CACHE TEST FAILED: script execution failed.\n");
+ printk ("start=%08lx, pc=%08lx, end=%08lx\n",
+ (u_long) NCB_SCRIPTH_PHYS (np, snooptest), (u_long) pc,
+ (u_long) NCB_SCRIPTH_PHYS (np, snoopend) +8);
+ return (0x40);
+ }
+ /*
+ ** Show results.
+ */
+ if (host_wr != ncr_rd) {
+ printk ("CACHE TEST FAILED: host wrote %d, ncr read %d.\n",
+ (int) host_wr, (int) ncr_rd);
+ err |= 1;
+ }
+ if (host_rd != ncr_wr) {
+ printk ("CACHE TEST FAILED: ncr wrote %d, host read %d.\n",
+ (int) ncr_wr, (int) host_rd);
+ err |= 2;
+ }
+ if (ncr_bk != ncr_wr) {
+ printk ("CACHE TEST FAILED: ncr wrote %d, read back %d.\n",
+ (int) ncr_wr, (int) ncr_bk);
+ err |= 4;
+ }
+ return (err);
+}
+
+/*==========================================================
+**
+** Determine the ncr's clock frequency.
+** This is essential for the negotiation
+** of the synchronous transfer rate.
+**
+**==========================================================
+**
+** Note: we have to return the correct value.
+** THERE IS NO SAFE DEFAULT VALUE.
+**
+** Most NCR/SYMBIOS boards are delivered with a 40 Mhz clock.
+** 53C860 and 53C875 rev. 1 support fast20 transfers but
+** do not have a clock doubler and so are provided with a
+** 80 MHz clock. All other fast20 boards incorporate a doubler
+** and so should be delivered with a 40 MHz clock.
+** The future fast40 chips (895/895) use a 40 Mhz base clock
+** and provide a clock quadrupler (160 Mhz). The code below
+** tries to deal as cleverly as possible with all this stuff.
+**
+**----------------------------------------------------------
+*/
+
+/*
+ * Select NCR SCSI clock frequency
+ */
+static void ncr_selectclock(struct ncb *np, u_char scntl3)
+{
+ if (np->multiplier < 2) {
+ OUTB(nc_scntl3, scntl3);
+ return;
+ }
+
+ if (bootverbose >= 2)
+ printk ("%s: enabling clock multiplier\n", ncr_name(np));
+
+ OUTB(nc_stest1, DBLEN); /* Enable clock multiplier */
+ if (np->multiplier > 2) { /* Poll bit 5 of stest4 for quadrupler */
+ int i = 20;
+ while (!(INB(nc_stest4) & LCKFRQ) && --i > 0)
+ udelay(20);
+ if (!i)
+ printk("%s: the chip cannot lock the frequency\n", ncr_name(np));
+ } else /* Wait 20 micro-seconds for doubler */
+ udelay(20);
+ OUTB(nc_stest3, HSC); /* Halt the scsi clock */
+ OUTB(nc_scntl3, scntl3);
+ OUTB(nc_stest1, (DBLEN|DBLSEL));/* Select clock multiplier */
+ OUTB(nc_stest3, 0x00); /* Restart scsi clock */
+}
+
+
+/*
+ * calculate NCR SCSI clock frequency (in KHz)
+ */
+static unsigned __init ncrgetfreq (struct ncb *np, int gen)
+{
+ unsigned ms = 0;
+ char count = 0;
+
+ /*
+ * Measure GEN timer delay in order
+ * to calculate SCSI clock frequency
+ *
+ * This code will never execute too
+ * many loop iterations (if DELAY is
+ * reasonably correct). It could get
+ * too low a delay (too high a freq.)
+ * if the CPU is slow executing the
+ * loop for some reason (an NMI, for
+ * example). For this reason we will
+ * if multiple measurements are to be
+ * performed trust the higher delay
+ * (lower frequency returned).
+ */
+ OUTB (nc_stest1, 0); /* make sure clock doubler is OFF */
+ OUTW (nc_sien , 0); /* mask all scsi interrupts */
+ (void) INW (nc_sist); /* clear pending scsi interrupt */
+ OUTB (nc_dien , 0); /* mask all dma interrupts */
+ (void) INW (nc_sist); /* another one, just to be sure :) */
+ OUTB (nc_scntl3, 4); /* set pre-scaler to divide by 3 */
+ OUTB (nc_stime1, 0); /* disable general purpose timer */
+ OUTB (nc_stime1, gen); /* set to nominal delay of 1<<gen * 125us */
+ while (!(INW(nc_sist) & GEN) && ms++ < 100000) {
+ for (count = 0; count < 10; count ++)
+ udelay(100); /* count ms */
+ }
+ OUTB (nc_stime1, 0); /* disable general purpose timer */
+ /*
+ * set prescaler to divide by whatever 0 means
+ * 0 ought to choose divide by 2, but appears
+ * to set divide by 3.5 mode in my 53c810 ...
+ */
+ OUTB (nc_scntl3, 0);
+
+ if (bootverbose >= 2)
+ printk ("%s: Delay (GEN=%d): %u msec\n", ncr_name(np), gen, ms);
+ /*
+ * adjust for prescaler, and convert into KHz
+ */
+ return ms ? ((1 << gen) * 4340) / ms : 0;
+}
+
+/*
+ * Get/probe NCR SCSI clock frequency
+ */
+static void __init ncr_getclock (struct ncb *np, int mult)
+{
+ unsigned char scntl3 = INB(nc_scntl3);
+ unsigned char stest1 = INB(nc_stest1);
+ unsigned f1;
+
+ np->multiplier = 1;
+ f1 = 40000;
+
+ /*
+ ** True with 875 or 895 with clock multiplier selected
+ */
+ if (mult > 1 && (stest1 & (DBLEN+DBLSEL)) == DBLEN+DBLSEL) {
+ if (bootverbose >= 2)
+ printk ("%s: clock multiplier found\n", ncr_name(np));
+ np->multiplier = mult;
+ }
+
+ /*
+ ** If multiplier not found or scntl3 not 7,5,3,
+ ** reset chip and get frequency from general purpose timer.
+ ** Otherwise trust scntl3 BIOS setting.
+ */
+ if (np->multiplier != mult || (scntl3 & 7) < 3 || !(scntl3 & 1)) {
+ unsigned f2;
+
+ ncr_chip_reset(np, 5);
+
+ (void) ncrgetfreq (np, 11); /* throw away first result */
+ f1 = ncrgetfreq (np, 11);
+ f2 = ncrgetfreq (np, 11);
+
+ if(bootverbose)
+ printk ("%s: NCR clock is %uKHz, %uKHz\n", ncr_name(np), f1, f2);
+
+ if (f1 > f2) f1 = f2; /* trust lower result */
+
+ if (f1 < 45000) f1 = 40000;
+ else if (f1 < 55000) f1 = 50000;
+ else f1 = 80000;
+
+ if (f1 < 80000 && mult > 1) {
+ if (bootverbose >= 2)
+ printk ("%s: clock multiplier assumed\n", ncr_name(np));
+ np->multiplier = mult;
+ }
+ } else {
+ if ((scntl3 & 7) == 3) f1 = 40000;
+ else if ((scntl3 & 7) == 5) f1 = 80000;
+ else f1 = 160000;
+
+ f1 /= np->multiplier;
+ }
+
+ /*
+ ** Compute controller synchronous parameters.
+ */
+ f1 *= np->multiplier;
+ np->clock_khz = f1;
+}
+
+/*===================== LINUX ENTRY POINTS SECTION ==========================*/
+
+static int ncr53c8xx_slave_alloc(struct scsi_device *device)
+{
+ struct Scsi_Host *host = device->host;
+ struct ncb *np = ((struct host_data *) host->hostdata)->ncb;
+ struct tcb *tp = &np->target[device->id];
+ tp->starget = device->sdev_target;
+
+ return 0;
+}
+
+static int ncr53c8xx_slave_configure(struct scsi_device *device)
+{
+ struct Scsi_Host *host = device->host;
+ struct ncb *np = ((struct host_data *) host->hostdata)->ncb;
+ struct tcb *tp = &np->target[device->id];
+ struct lcb *lp = tp->lp[device->lun];
+ int numtags, depth_to_use;
+
+ ncr_setup_lcb(np, device);
+
+ /*
+ ** Select queue depth from driver setup.
+ ** Donnot use more than configured by user.
+ ** Use at least 2.
+ ** Donnot use more than our maximum.
+ */
+ numtags = device_queue_depth(np->unit, device->id, device->lun);
+ if (numtags > tp->usrtags)
+ numtags = tp->usrtags;
+ if (!device->tagged_supported)
+ numtags = 1;
+ depth_to_use = numtags;
+ if (depth_to_use < 2)
+ depth_to_use = 2;
+ if (depth_to_use > MAX_TAGS)
+ depth_to_use = MAX_TAGS;
+
+ scsi_change_queue_depth(device, depth_to_use);
+
+ /*
+ ** Since the queue depth is not tunable under Linux,
+ ** we need to know this value in order not to
+ ** announce stupid things to user.
+ **
+ ** XXX(hch): As of Linux 2.6 it certainly _is_ tunable..
+ ** In fact we just tuned it, or did I miss
+ ** something important? :)
+ */
+ if (lp) {
+ lp->numtags = lp->maxtags = numtags;
+ lp->scdev_depth = depth_to_use;
+ }
+ ncr_setup_tags (np, device);
+
+#ifdef DEBUG_NCR53C8XX
+ printk("ncr53c8xx_select_queue_depth: host=%d, id=%d, lun=%d, depth=%d\n",
+ np->unit, device->id, device->lun, depth_to_use);
+#endif
+
+ if (spi_support_sync(device->sdev_target) &&
+ !spi_initial_dv(device->sdev_target))
+ spi_dv_device(device);
+ return 0;
+}
+
+static int ncr53c8xx_queue_command_lck (struct scsi_cmnd *cmd, void (*done)(struct scsi_cmnd *))
+{
+ struct ncb *np = ((struct host_data *) cmd->device->host->hostdata)->ncb;
+ unsigned long flags;
+ int sts;
+
+#ifdef DEBUG_NCR53C8XX
+printk("ncr53c8xx_queue_command\n");
+#endif
+
+ cmd->scsi_done = done;
+ cmd->host_scribble = NULL;
+ cmd->__data_mapped = 0;
+ cmd->__data_mapping = 0;
+
+ spin_lock_irqsave(&np->smp_lock, flags);
+
+ if ((sts = ncr_queue_command(np, cmd)) != DID_OK) {
+ cmd->result = ScsiResult(sts, 0);
+#ifdef DEBUG_NCR53C8XX
+printk("ncr53c8xx : command not queued - result=%d\n", sts);
+#endif
+ }
+#ifdef DEBUG_NCR53C8XX
+ else
+printk("ncr53c8xx : command successfully queued\n");
+#endif
+
+ spin_unlock_irqrestore(&np->smp_lock, flags);
+
+ if (sts != DID_OK) {
+ unmap_scsi_data(np, cmd);
+ done(cmd);
+ sts = 0;
+ }
+
+ return sts;
+}
+
+static DEF_SCSI_QCMD(ncr53c8xx_queue_command)
+
+irqreturn_t ncr53c8xx_intr(int irq, void *dev_id)
+{
+ unsigned long flags;
+ struct Scsi_Host *shost = (struct Scsi_Host *)dev_id;
+ struct host_data *host_data = (struct host_data *)shost->hostdata;
+ struct ncb *np = host_data->ncb;
+ struct scsi_cmnd *done_list;
+
+#ifdef DEBUG_NCR53C8XX
+ printk("ncr53c8xx : interrupt received\n");
+#endif
+
+ if (DEBUG_FLAGS & DEBUG_TINY) printk ("[");
+
+ spin_lock_irqsave(&np->smp_lock, flags);
+ ncr_exception(np);
+ done_list = np->done_list;
+ np->done_list = NULL;
+ spin_unlock_irqrestore(&np->smp_lock, flags);
+
+ if (DEBUG_FLAGS & DEBUG_TINY) printk ("]\n");
+
+ if (done_list)
+ ncr_flush_done_cmds(done_list);
+ return IRQ_HANDLED;
+}
+
+static void ncr53c8xx_timeout(unsigned long npref)
+{
+ struct ncb *np = (struct ncb *) npref;
+ unsigned long flags;
+ struct scsi_cmnd *done_list;
+
+ spin_lock_irqsave(&np->smp_lock, flags);
+ ncr_timeout(np);
+ done_list = np->done_list;
+ np->done_list = NULL;
+ spin_unlock_irqrestore(&np->smp_lock, flags);
+
+ if (done_list)
+ ncr_flush_done_cmds(done_list);
+}
+
+static int ncr53c8xx_bus_reset(struct scsi_cmnd *cmd)
+{
+ struct ncb *np = ((struct host_data *) cmd->device->host->hostdata)->ncb;
+ int sts;
+ unsigned long flags;
+ struct scsi_cmnd *done_list;
+
+ /*
+ * If the mid-level driver told us reset is synchronous, it seems
+ * that we must call the done() callback for the involved command,
+ * even if this command was not queued to the low-level driver,
+ * before returning SUCCESS.
+ */
+
+ spin_lock_irqsave(&np->smp_lock, flags);
+ sts = ncr_reset_bus(np, cmd, 1);
+
+ done_list = np->done_list;
+ np->done_list = NULL;
+ spin_unlock_irqrestore(&np->smp_lock, flags);
+
+ ncr_flush_done_cmds(done_list);
+
+ return sts;
+}
+
+#if 0 /* unused and broken */
+static int ncr53c8xx_abort(struct scsi_cmnd *cmd)
+{
+ struct ncb *np = ((struct host_data *) cmd->device->host->hostdata)->ncb;
+ int sts;
+ unsigned long flags;
+ struct scsi_cmnd *done_list;
+
+ printk("ncr53c8xx_abort\n");
+
+ NCR_LOCK_NCB(np, flags);
+
+ sts = ncr_abort_command(np, cmd);
+out:
+ done_list = np->done_list;
+ np->done_list = NULL;
+ NCR_UNLOCK_NCB(np, flags);
+
+ ncr_flush_done_cmds(done_list);
+
+ return sts;
+}
+#endif
+
+
+/*
+** Scsi command waiting list management.
+**
+** It may happen that we cannot insert a scsi command into the start queue,
+** in the following circumstances.
+** Too few preallocated ccb(s),
+** maxtags < cmd_per_lun of the Linux host control block,
+** etc...
+** Such scsi commands are inserted into a waiting list.
+** When a scsi command complete, we try to requeue the commands of the
+** waiting list.
+*/
+
+#define next_wcmd host_scribble
+
+static void insert_into_waiting_list(struct ncb *np, struct scsi_cmnd *cmd)
+{
+ struct scsi_cmnd *wcmd;
+
+#ifdef DEBUG_WAITING_LIST
+ printk("%s: cmd %lx inserted into waiting list\n", ncr_name(np), (u_long) cmd);
+#endif
+ cmd->next_wcmd = NULL;
+ if (!(wcmd = np->waiting_list)) np->waiting_list = cmd;
+ else {
+ while (wcmd->next_wcmd)
+ wcmd = (struct scsi_cmnd *) wcmd->next_wcmd;
+ wcmd->next_wcmd = (char *) cmd;
+ }
+}
+
+static struct scsi_cmnd *retrieve_from_waiting_list(int to_remove, struct ncb *np, struct scsi_cmnd *cmd)
+{
+ struct scsi_cmnd **pcmd = &np->waiting_list;
+
+ while (*pcmd) {
+ if (cmd == *pcmd) {
+ if (to_remove) {
+ *pcmd = (struct scsi_cmnd *) cmd->next_wcmd;
+ cmd->next_wcmd = NULL;
+ }
+#ifdef DEBUG_WAITING_LIST
+ printk("%s: cmd %lx retrieved from waiting list\n", ncr_name(np), (u_long) cmd);
+#endif
+ return cmd;
+ }
+ pcmd = (struct scsi_cmnd **) &(*pcmd)->next_wcmd;
+ }
+ return NULL;
+}
+
+static void process_waiting_list(struct ncb *np, int sts)
+{
+ struct scsi_cmnd *waiting_list, *wcmd;
+
+ waiting_list = np->waiting_list;
+ np->waiting_list = NULL;
+
+#ifdef DEBUG_WAITING_LIST
+ if (waiting_list) printk("%s: waiting_list=%lx processing sts=%d\n", ncr_name(np), (u_long) waiting_list, sts);
+#endif
+ while ((wcmd = waiting_list) != NULL) {
+ waiting_list = (struct scsi_cmnd *) wcmd->next_wcmd;
+ wcmd->next_wcmd = NULL;
+ if (sts == DID_OK) {
+#ifdef DEBUG_WAITING_LIST
+ printk("%s: cmd %lx trying to requeue\n", ncr_name(np), (u_long) wcmd);
+#endif
+ sts = ncr_queue_command(np, wcmd);
+ }
+ if (sts != DID_OK) {
+#ifdef DEBUG_WAITING_LIST
+ printk("%s: cmd %lx done forced sts=%d\n", ncr_name(np), (u_long) wcmd, sts);
+#endif
+ wcmd->result = ScsiResult(sts, 0);
+ ncr_queue_done_cmd(np, wcmd);
+ }
+ }
+}
+
+#undef next_wcmd
+
+static ssize_t show_ncr53c8xx_revision(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct Scsi_Host *host = class_to_shost(dev);
+ struct host_data *host_data = (struct host_data *)host->hostdata;
+
+ return snprintf(buf, 20, "0x%x\n", host_data->ncb->revision_id);
+}
+
+static struct device_attribute ncr53c8xx_revision_attr = {
+ .attr = { .name = "revision", .mode = S_IRUGO, },
+ .show = show_ncr53c8xx_revision,
+};
+
+static struct device_attribute *ncr53c8xx_host_attrs[] = {
+ &ncr53c8xx_revision_attr,
+ NULL
+};
+
+/*==========================================================
+**
+** Boot command line.
+**
+**==========================================================
+*/
+#ifdef MODULE
+char *ncr53c8xx; /* command line passed by insmod */
+module_param(ncr53c8xx, charp, 0);
+#endif
+
+#ifndef MODULE
+static int __init ncr53c8xx_setup(char *str)
+{
+ return sym53c8xx__setup(str);
+}
+
+__setup("ncr53c8xx=", ncr53c8xx_setup);
+#endif
+
+
+/*
+ * Host attach and initialisations.
+ *
+ * Allocate host data and ncb structure.
+ * Request IO region and remap MMIO region.
+ * Do chip initialization.
+ * If all is OK, install interrupt handling and
+ * start the timer daemon.
+ */
+struct Scsi_Host * __init ncr_attach(struct scsi_host_template *tpnt,
+ int unit, struct ncr_device *device)
+{
+ struct host_data *host_data;
+ struct ncb *np = NULL;
+ struct Scsi_Host *instance = NULL;
+ u_long flags = 0;
+ int i;
+
+ if (!tpnt->name)
+ tpnt->name = SCSI_NCR_DRIVER_NAME;
+ if (!tpnt->shost_attrs)
+ tpnt->shost_attrs = ncr53c8xx_host_attrs;
+
+ tpnt->queuecommand = ncr53c8xx_queue_command;
+ tpnt->slave_configure = ncr53c8xx_slave_configure;
+ tpnt->slave_alloc = ncr53c8xx_slave_alloc;
+ tpnt->eh_bus_reset_handler = ncr53c8xx_bus_reset;
+ tpnt->can_queue = SCSI_NCR_CAN_QUEUE;
+ tpnt->this_id = 7;
+ tpnt->sg_tablesize = SCSI_NCR_SG_TABLESIZE;
+ tpnt->cmd_per_lun = SCSI_NCR_CMD_PER_LUN;
+ tpnt->use_clustering = ENABLE_CLUSTERING;
+
+ if (device->differential)
+ driver_setup.diff_support = device->differential;
+
+ printk(KERN_INFO "ncr53c720-%d: rev 0x%x irq %d\n",
+ unit, device->chip.revision_id, device->slot.irq);
+
+ instance = scsi_host_alloc(tpnt, sizeof(*host_data));
+ if (!instance)
+ goto attach_error;
+ host_data = (struct host_data *) instance->hostdata;
+
+ np = __m_calloc_dma(device->dev, sizeof(struct ncb), "NCB");
+ if (!np)
+ goto attach_error;
+ spin_lock_init(&np->smp_lock);
+ np->dev = device->dev;
+ np->p_ncb = vtobus(np);
+ host_data->ncb = np;
+
+ np->ccb = m_calloc_dma(sizeof(struct ccb), "CCB");
+ if (!np->ccb)
+ goto attach_error;
+
+ /* Store input information in the host data structure. */
+ np->unit = unit;
+ np->verbose = driver_setup.verbose;
+ sprintf(np->inst_name, "ncr53c720-%d", np->unit);
+ np->revision_id = device->chip.revision_id;
+ np->features = device->chip.features;
+ np->clock_divn = device->chip.nr_divisor;
+ np->maxoffs = device->chip.offset_max;
+ np->maxburst = device->chip.burst_max;
+ np->myaddr = device->host_id;
+
+ /* Allocate SCRIPTS areas. */
+ np->script0 = m_calloc_dma(sizeof(struct script), "SCRIPT");
+ if (!np->script0)
+ goto attach_error;
+ np->scripth0 = m_calloc_dma(sizeof(struct scripth), "SCRIPTH");
+ if (!np->scripth0)
+ goto attach_error;
+
+ init_timer(&np->timer);
+ np->timer.data = (unsigned long) np;
+ np->timer.function = ncr53c8xx_timeout;
+
+ /* Try to map the controller chip to virtual and physical memory. */
+
+ np->paddr = device->slot.base;
+ np->paddr2 = (np->features & FE_RAM) ? device->slot.base_2 : 0;
+
+ if (device->slot.base_v)
+ np->vaddr = device->slot.base_v;
+ else
+ np->vaddr = ioremap(device->slot.base_c, 128);
+
+ if (!np->vaddr) {
+ printk(KERN_ERR
+ "%s: can't map memory mapped IO region\n",ncr_name(np));
+ goto attach_error;
+ } else {
+ if (bootverbose > 1)
+ printk(KERN_INFO
+ "%s: using memory mapped IO at virtual address 0x%lx\n", ncr_name(np), (u_long) np->vaddr);
+ }
+
+ /* Make the controller's registers available. Now the INB INW INL
+ * OUTB OUTW OUTL macros can be used safely.
+ */
+
+ np->reg = (struct ncr_reg __iomem *)np->vaddr;
+
+ /* Do chip dependent initialization. */
+ ncr_prepare_setting(np);
+
+ if (np->paddr2 && sizeof(struct script) > 4096) {
+ np->paddr2 = 0;
+ printk(KERN_WARNING "%s: script too large, NOT using on chip RAM.\n",
+ ncr_name(np));
+ }
+
+ instance->max_channel = 0;
+ instance->this_id = np->myaddr;
+ instance->max_id = np->maxwide ? 16 : 8;
+ instance->max_lun = SCSI_NCR_MAX_LUN;
+ instance->base = (unsigned long) np->reg;
+ instance->irq = device->slot.irq;
+ instance->unique_id = device->slot.base;
+ instance->dma_channel = 0;
+ instance->cmd_per_lun = MAX_TAGS;
+ instance->can_queue = (MAX_START-4);
+ /* This can happen if you forget to call ncr53c8xx_init from
+ * your module_init */
+ BUG_ON(!ncr53c8xx_transport_template);
+ instance->transportt = ncr53c8xx_transport_template;
+
+ /* Patch script to physical addresses */
+ ncr_script_fill(&script0, &scripth0);
+
+ np->scripth = np->scripth0;
+ np->p_scripth = vtobus(np->scripth);
+ np->p_script = (np->paddr2) ? np->paddr2 : vtobus(np->script0);
+
+ ncr_script_copy_and_bind(np, (ncrcmd *) &script0,
+ (ncrcmd *) np->script0, sizeof(struct script));
+ ncr_script_copy_and_bind(np, (ncrcmd *) &scripth0,
+ (ncrcmd *) np->scripth0, sizeof(struct scripth));
+ np->ccb->p_ccb = vtobus (np->ccb);
+
+ /* Patch the script for LED support. */
+
+ if (np->features & FE_LED0) {
+ np->script0->idle[0] =
+ cpu_to_scr(SCR_REG_REG(gpreg, SCR_OR, 0x01));
+ np->script0->reselected[0] =
+ cpu_to_scr(SCR_REG_REG(gpreg, SCR_AND, 0xfe));
+ np->script0->start[0] =
+ cpu_to_scr(SCR_REG_REG(gpreg, SCR_AND, 0xfe));
+ }
+
+ /*
+ * Look for the target control block of this nexus.
+ * For i = 0 to 3
+ * JUMP ^ IFTRUE (MASK (i, 3)), @(next_lcb)
+ */
+ for (i = 0 ; i < 4 ; i++) {
+ np->jump_tcb[i].l_cmd =
+ cpu_to_scr((SCR_JUMP ^ IFTRUE (MASK (i, 3))));
+ np->jump_tcb[i].l_paddr =
+ cpu_to_scr(NCB_SCRIPTH_PHYS (np, bad_target));
+ }
+
+ ncr_chip_reset(np, 100);
+
+ /* Now check the cache handling of the chipset. */
+
+ if (ncr_snooptest(np)) {
+ printk(KERN_ERR "CACHE INCORRECTLY CONFIGURED.\n");
+ goto attach_error;
+ }
+
+ /* Install the interrupt handler. */
+ np->irq = device->slot.irq;
+
+ /* Initialize the fixed part of the default ccb. */
+ ncr_init_ccb(np, np->ccb);
+
+ /*
+ * After SCSI devices have been opened, we cannot reset the bus
+ * safely, so we do it here. Interrupt handler does the real work.
+ * Process the reset exception if interrupts are not enabled yet.
+ * Then enable disconnects.
+ */
+ spin_lock_irqsave(&np->smp_lock, flags);
+ if (ncr_reset_scsi_bus(np, 0, driver_setup.settle_delay) != 0) {
+ printk(KERN_ERR "%s: FATAL ERROR: CHECK SCSI BUS - CABLES, TERMINATION, DEVICE POWER etc.!\n", ncr_name(np));
+
+ spin_unlock_irqrestore(&np->smp_lock, flags);
+ goto attach_error;
+ }
+ ncr_exception(np);
+
+ np->disc = 1;
+
+ /*
+ * The middle-level SCSI driver does not wait for devices to settle.
+ * Wait synchronously if more than 2 seconds.
+ */
+ if (driver_setup.settle_delay > 2) {
+ printk(KERN_INFO "%s: waiting %d seconds for scsi devices to settle...\n",
+ ncr_name(np), driver_setup.settle_delay);
+ mdelay(1000 * driver_setup.settle_delay);
+ }
+
+ /* start the timeout daemon */
+ np->lasttime=0;
+ ncr_timeout (np);
+
+ /* use SIMPLE TAG messages by default */
+#ifdef SCSI_NCR_ALWAYS_SIMPLE_TAG
+ np->order = SIMPLE_QUEUE_TAG;
+#endif
+
+ spin_unlock_irqrestore(&np->smp_lock, flags);
+
+ return instance;
+
+ attach_error:
+ if (!instance)
+ return NULL;
+ printk(KERN_INFO "%s: detaching...\n", ncr_name(np));
+ if (!np)
+ goto unregister;
+ if (np->scripth0)
+ m_free_dma(np->scripth0, sizeof(struct scripth), "SCRIPTH");
+ if (np->script0)
+ m_free_dma(np->script0, sizeof(struct script), "SCRIPT");
+ if (np->ccb)
+ m_free_dma(np->ccb, sizeof(struct ccb), "CCB");
+ m_free_dma(np, sizeof(struct ncb), "NCB");
+ host_data->ncb = NULL;
+
+ unregister:
+ scsi_host_put(instance);
+
+ return NULL;
+}
+
+
+void ncr53c8xx_release(struct Scsi_Host *host)
+{
+ struct host_data *host_data = shost_priv(host);
+#ifdef DEBUG_NCR53C8XX
+ printk("ncr53c8xx: release\n");
+#endif
+ if (host_data->ncb)
+ ncr_detach(host_data->ncb);
+ scsi_host_put(host);
+}
+
+static void ncr53c8xx_set_period(struct scsi_target *starget, int period)
+{
+ struct Scsi_Host *shost = dev_to_shost(starget->dev.parent);
+ struct ncb *np = ((struct host_data *)shost->hostdata)->ncb;
+ struct tcb *tp = &np->target[starget->id];
+
+ if (period > np->maxsync)
+ period = np->maxsync;
+ else if (period < np->minsync)
+ period = np->minsync;
+
+ tp->usrsync = period;
+
+ ncr_negotiate(np, tp);
+}
+
+static void ncr53c8xx_set_offset(struct scsi_target *starget, int offset)
+{
+ struct Scsi_Host *shost = dev_to_shost(starget->dev.parent);
+ struct ncb *np = ((struct host_data *)shost->hostdata)->ncb;
+ struct tcb *tp = &np->target[starget->id];
+
+ if (offset > np->maxoffs)
+ offset = np->maxoffs;
+ else if (offset < 0)
+ offset = 0;
+
+ tp->maxoffs = offset;
+
+ ncr_negotiate(np, tp);
+}
+
+static void ncr53c8xx_set_width(struct scsi_target *starget, int width)
+{
+ struct Scsi_Host *shost = dev_to_shost(starget->dev.parent);
+ struct ncb *np = ((struct host_data *)shost->hostdata)->ncb;
+ struct tcb *tp = &np->target[starget->id];
+
+ if (width > np->maxwide)
+ width = np->maxwide;
+ else if (width < 0)
+ width = 0;
+
+ tp->usrwide = width;
+
+ ncr_negotiate(np, tp);
+}
+
+static void ncr53c8xx_get_signalling(struct Scsi_Host *shost)
+{
+ struct ncb *np = ((struct host_data *)shost->hostdata)->ncb;
+ enum spi_signal_type type;
+
+ switch (np->scsi_mode) {
+ case SMODE_SE:
+ type = SPI_SIGNAL_SE;
+ break;
+ case SMODE_HVD:
+ type = SPI_SIGNAL_HVD;
+ break;
+ default:
+ type = SPI_SIGNAL_UNKNOWN;
+ break;
+ }
+ spi_signalling(shost) = type;
+}
+
+static struct spi_function_template ncr53c8xx_transport_functions = {
+ .set_period = ncr53c8xx_set_period,
+ .show_period = 1,
+ .set_offset = ncr53c8xx_set_offset,
+ .show_offset = 1,
+ .set_width = ncr53c8xx_set_width,
+ .show_width = 1,
+ .get_signalling = ncr53c8xx_get_signalling,
+};
+
+int __init ncr53c8xx_init(void)
+{
+ ncr53c8xx_transport_template = spi_attach_transport(&ncr53c8xx_transport_functions);
+ if (!ncr53c8xx_transport_template)
+ return -ENODEV;
+ return 0;
+}
+
+void ncr53c8xx_exit(void)
+{
+ spi_release_transport(ncr53c8xx_transport_template);
+}
diff --git a/drivers/scsi/ncr53c8xx.h b/drivers/scsi/ncr53c8xx.h
new file mode 100644
index 000000000..02901c54b
--- /dev/null
+++ b/drivers/scsi/ncr53c8xx.h
@@ -0,0 +1,1325 @@
+/******************************************************************************
+** Device driver for the PCI-SCSI NCR538XX controller family.
+**
+** Copyright (C) 1994 Wolfgang Stanglmeier
+** Copyright (C) 1998-2001 Gerard Roudier <groudier@free.fr>
+**
+** This program is free software; you can redistribute it and/or modify
+** it under the terms of the GNU General Public License as published by
+** the Free Software Foundation; either version 2 of the License, or
+** (at your option) any later version.
+**
+** This program is distributed in the hope that it will be useful,
+** but WITHOUT ANY WARRANTY; without even the implied warranty of
+** MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+** GNU General Public License for more details.
+**
+** You should have received a copy of the GNU General Public License
+** along with this program; if not, write to the Free Software
+** Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+**
+**-----------------------------------------------------------------------------
+**
+** This driver has been ported to Linux from the FreeBSD NCR53C8XX driver
+** and is currently maintained by
+**
+** Gerard Roudier <groudier@free.fr>
+**
+** Being given that this driver originates from the FreeBSD version, and
+** in order to keep synergy on both, any suggested enhancements and corrections
+** received on Linux are automatically a potential candidate for the FreeBSD
+** version.
+**
+** The original driver has been written for 386bsd and FreeBSD by
+** Wolfgang Stanglmeier <wolf@cologne.de>
+** Stefan Esser <se@mi.Uni-Koeln.de>
+**
+** And has been ported to NetBSD by
+** Charles M. Hannum <mycroft@gnu.ai.mit.edu>
+**
+** NVRAM detection and reading.
+** Copyright (C) 1997 Richard Waltham <dormouse@farsrobt.demon.co.uk>
+**
+** Added support for MIPS big endian systems.
+** Carsten Langgaard, carstenl@mips.com
+** Copyright (C) 2000 MIPS Technologies, Inc. All rights reserved.
+**
+** Added support for HP PARISC big endian systems.
+** Copyright (C) 2000 MIPS Technologies, Inc. All rights reserved.
+**
+*******************************************************************************
+*/
+
+#ifndef NCR53C8XX_H
+#define NCR53C8XX_H
+
+#include <scsi/scsi_host.h>
+
+
+/*
+** If you want a driver as small as possible, donnot define the
+** following options.
+*/
+#define SCSI_NCR_BOOT_COMMAND_LINE_SUPPORT
+#define SCSI_NCR_DEBUG_INFO_SUPPORT
+
+/*
+** To disable integrity checking, do not define the
+** following option.
+*/
+#ifdef CONFIG_SCSI_NCR53C8XX_INTEGRITY_CHECK
+# define SCSI_NCR_ENABLE_INTEGRITY_CHECK
+#endif
+
+/* ---------------------------------------------------------------------
+** Take into account kernel configured parameters.
+** Most of these options can be overridden at startup by a command line.
+** ---------------------------------------------------------------------
+*/
+
+/*
+ * For Ultra2 and Ultra3 SCSI support option, use special features.
+ *
+ * Value (default) means:
+ * bit 0 : all features enabled, except:
+ * bit 1 : PCI Write And Invalidate.
+ * bit 2 : Data Phase Mismatch handling from SCRIPTS.
+ *
+ * Use boot options ncr53c8xx=specf:1 if you want all chip features to be
+ * enabled by the driver.
+ */
+#define SCSI_NCR_SETUP_SPECIAL_FEATURES (3)
+
+#define SCSI_NCR_MAX_SYNC (80)
+
+/*
+ * Allow tags from 2 to 256, default 8
+ */
+#ifdef CONFIG_SCSI_NCR53C8XX_MAX_TAGS
+#if CONFIG_SCSI_NCR53C8XX_MAX_TAGS < 2
+#define SCSI_NCR_MAX_TAGS (2)
+#elif CONFIG_SCSI_NCR53C8XX_MAX_TAGS > 256
+#define SCSI_NCR_MAX_TAGS (256)
+#else
+#define SCSI_NCR_MAX_TAGS CONFIG_SCSI_NCR53C8XX_MAX_TAGS
+#endif
+#else
+#define SCSI_NCR_MAX_TAGS (8)
+#endif
+
+/*
+ * Allow tagged command queuing support if configured with default number
+ * of tags set to max (see above).
+ */
+#ifdef CONFIG_SCSI_NCR53C8XX_DEFAULT_TAGS
+#define SCSI_NCR_SETUP_DEFAULT_TAGS CONFIG_SCSI_NCR53C8XX_DEFAULT_TAGS
+#elif defined CONFIG_SCSI_NCR53C8XX_TAGGED_QUEUE
+#define SCSI_NCR_SETUP_DEFAULT_TAGS SCSI_NCR_MAX_TAGS
+#else
+#define SCSI_NCR_SETUP_DEFAULT_TAGS (0)
+#endif
+
+/*
+ * Immediate arbitration
+ */
+#if defined(CONFIG_SCSI_NCR53C8XX_IARB)
+#define SCSI_NCR_IARB_SUPPORT
+#endif
+
+/*
+ * Sync transfer frequency at startup.
+ * Allow from 5Mhz to 80Mhz default 20 Mhz.
+ */
+#ifndef CONFIG_SCSI_NCR53C8XX_SYNC
+#define CONFIG_SCSI_NCR53C8XX_SYNC (20)
+#elif CONFIG_SCSI_NCR53C8XX_SYNC > SCSI_NCR_MAX_SYNC
+#undef CONFIG_SCSI_NCR53C8XX_SYNC
+#define CONFIG_SCSI_NCR53C8XX_SYNC SCSI_NCR_MAX_SYNC
+#endif
+
+#if CONFIG_SCSI_NCR53C8XX_SYNC == 0
+#define SCSI_NCR_SETUP_DEFAULT_SYNC (255)
+#elif CONFIG_SCSI_NCR53C8XX_SYNC <= 5
+#define SCSI_NCR_SETUP_DEFAULT_SYNC (50)
+#elif CONFIG_SCSI_NCR53C8XX_SYNC <= 20
+#define SCSI_NCR_SETUP_DEFAULT_SYNC (250/(CONFIG_SCSI_NCR53C8XX_SYNC))
+#elif CONFIG_SCSI_NCR53C8XX_SYNC <= 33
+#define SCSI_NCR_SETUP_DEFAULT_SYNC (11)
+#elif CONFIG_SCSI_NCR53C8XX_SYNC <= 40
+#define SCSI_NCR_SETUP_DEFAULT_SYNC (10)
+#else
+#define SCSI_NCR_SETUP_DEFAULT_SYNC (9)
+#endif
+
+/*
+ * Disallow disconnections at boot-up
+ */
+#ifdef CONFIG_SCSI_NCR53C8XX_NO_DISCONNECT
+#define SCSI_NCR_SETUP_DISCONNECTION (0)
+#else
+#define SCSI_NCR_SETUP_DISCONNECTION (1)
+#endif
+
+/*
+ * Force synchronous negotiation for all targets
+ */
+#ifdef CONFIG_SCSI_NCR53C8XX_FORCE_SYNC_NEGO
+#define SCSI_NCR_SETUP_FORCE_SYNC_NEGO (1)
+#else
+#define SCSI_NCR_SETUP_FORCE_SYNC_NEGO (0)
+#endif
+
+/*
+ * Disable master parity checking (flawed hardwares need that)
+ */
+#ifdef CONFIG_SCSI_NCR53C8XX_DISABLE_MPARITY_CHECK
+#define SCSI_NCR_SETUP_MASTER_PARITY (0)
+#else
+#define SCSI_NCR_SETUP_MASTER_PARITY (1)
+#endif
+
+/*
+ * Disable scsi parity checking (flawed devices may need that)
+ */
+#ifdef CONFIG_SCSI_NCR53C8XX_DISABLE_PARITY_CHECK
+#define SCSI_NCR_SETUP_SCSI_PARITY (0)
+#else
+#define SCSI_NCR_SETUP_SCSI_PARITY (1)
+#endif
+
+/*
+ * Settle time after reset at boot-up
+ */
+#define SCSI_NCR_SETUP_SETTLE_TIME (2)
+
+/*
+** Bridge quirks work-around option defaulted to 1.
+*/
+#ifndef SCSI_NCR_PCIQ_WORK_AROUND_OPT
+#define SCSI_NCR_PCIQ_WORK_AROUND_OPT 1
+#endif
+
+/*
+** Work-around common bridge misbehaviour.
+**
+** - Do not flush posted writes in the opposite
+** direction on read.
+** - May reorder DMA writes to memory.
+**
+** This option should not affect performances
+** significantly, so it is the default.
+*/
+#if SCSI_NCR_PCIQ_WORK_AROUND_OPT == 1
+#define SCSI_NCR_PCIQ_MAY_NOT_FLUSH_PW_UPSTREAM
+#define SCSI_NCR_PCIQ_MAY_REORDER_WRITES
+#define SCSI_NCR_PCIQ_MAY_MISS_COMPLETIONS
+
+/*
+** Same as option 1, but also deal with
+** misconfigured interrupts.
+**
+** - Edge triggered instead of level sensitive.
+** - No interrupt line connected.
+** - IRQ number misconfigured.
+**
+** If no interrupt is delivered, the driver will
+** catch the interrupt conditions 10 times per
+** second. No need to say that this option is
+** not recommended.
+*/
+#elif SCSI_NCR_PCIQ_WORK_AROUND_OPT == 2
+#define SCSI_NCR_PCIQ_MAY_NOT_FLUSH_PW_UPSTREAM
+#define SCSI_NCR_PCIQ_MAY_REORDER_WRITES
+#define SCSI_NCR_PCIQ_MAY_MISS_COMPLETIONS
+#define SCSI_NCR_PCIQ_BROKEN_INTR
+
+/*
+** Some bridge designers decided to flush
+** everything prior to deliver the interrupt.
+** This option tries to deal with such a
+** behaviour.
+*/
+#elif SCSI_NCR_PCIQ_WORK_AROUND_OPT == 3
+#define SCSI_NCR_PCIQ_SYNC_ON_INTR
+#endif
+
+/*
+** Other parameters not configurable with "make config"
+** Avoid to change these constants, unless you know what you are doing.
+*/
+
+#define SCSI_NCR_ALWAYS_SIMPLE_TAG
+#define SCSI_NCR_MAX_SCATTER (127)
+#define SCSI_NCR_MAX_TARGET (16)
+
+/*
+** Compute some desirable value for CAN_QUEUE
+** and CMD_PER_LUN.
+** The driver will use lower values if these
+** ones appear to be too large.
+*/
+#define SCSI_NCR_CAN_QUEUE (8*SCSI_NCR_MAX_TAGS + 2*SCSI_NCR_MAX_TARGET)
+#define SCSI_NCR_CMD_PER_LUN (SCSI_NCR_MAX_TAGS)
+
+#define SCSI_NCR_SG_TABLESIZE (SCSI_NCR_MAX_SCATTER)
+#define SCSI_NCR_TIMER_INTERVAL (HZ)
+
+#define SCSI_NCR_MAX_LUN (16)
+
+/*
+ * IO functions definition for big/little endian CPU support.
+ * For now, the NCR is only supported in little endian addressing mode,
+ */
+
+#ifdef __BIG_ENDIAN
+
+#define inw_l2b inw
+#define inl_l2b inl
+#define outw_b2l outw
+#define outl_b2l outl
+
+#define readb_raw readb
+#define writeb_raw writeb
+
+#if defined(SCSI_NCR_BIG_ENDIAN)
+#define readw_l2b __raw_readw
+#define readl_l2b __raw_readl
+#define writew_b2l __raw_writew
+#define writel_b2l __raw_writel
+#define readw_raw __raw_readw
+#define readl_raw __raw_readl
+#define writew_raw __raw_writew
+#define writel_raw __raw_writel
+#else /* Other big-endian */
+#define readw_l2b readw
+#define readl_l2b readl
+#define writew_b2l writew
+#define writel_b2l writel
+#define readw_raw readw
+#define readl_raw readl
+#define writew_raw writew
+#define writel_raw writel
+#endif
+
+#else /* little endian */
+
+#define inw_raw inw
+#define inl_raw inl
+#define outw_raw outw
+#define outl_raw outl
+
+#define readb_raw readb
+#define readw_raw readw
+#define readl_raw readl
+#define writeb_raw writeb
+#define writew_raw writew
+#define writel_raw writel
+
+#endif
+
+#if !defined(__hppa__) && !defined(__mips__)
+#ifdef SCSI_NCR_BIG_ENDIAN
+#error "The NCR in BIG ENDIAN addressing mode is not (yet) supported"
+#endif
+#endif
+
+#define MEMORY_BARRIER() mb()
+
+
+/*
+ * If the NCR uses big endian addressing mode over the
+ * PCI, actual io register addresses for byte and word
+ * accesses must be changed according to lane routing.
+ * Btw, ncr_offb() and ncr_offw() macros only apply to
+ * constants and so donnot generate bloated code.
+ */
+
+#if defined(SCSI_NCR_BIG_ENDIAN)
+
+#define ncr_offb(o) (((o)&~3)+((~((o)&3))&3))
+#define ncr_offw(o) (((o)&~3)+((~((o)&3))&2))
+
+#else
+
+#define ncr_offb(o) (o)
+#define ncr_offw(o) (o)
+
+#endif
+
+/*
+ * If the CPU and the NCR use same endian-ness addressing,
+ * no byte reordering is needed for script patching.
+ * Macro cpu_to_scr() is to be used for script patching.
+ * Macro scr_to_cpu() is to be used for getting a DWORD
+ * from the script.
+ */
+
+#if defined(__BIG_ENDIAN) && !defined(SCSI_NCR_BIG_ENDIAN)
+
+#define cpu_to_scr(dw) cpu_to_le32(dw)
+#define scr_to_cpu(dw) le32_to_cpu(dw)
+
+#elif defined(__LITTLE_ENDIAN) && defined(SCSI_NCR_BIG_ENDIAN)
+
+#define cpu_to_scr(dw) cpu_to_be32(dw)
+#define scr_to_cpu(dw) be32_to_cpu(dw)
+
+#else
+
+#define cpu_to_scr(dw) (dw)
+#define scr_to_cpu(dw) (dw)
+
+#endif
+
+/*
+ * Access to the controller chip.
+ *
+ * If the CPU and the NCR use same endian-ness addressing,
+ * no byte reordering is needed for accessing chip io
+ * registers. Functions suffixed by '_raw' are assumed
+ * to access the chip over the PCI without doing byte
+ * reordering. Functions suffixed by '_l2b' are
+ * assumed to perform little-endian to big-endian byte
+ * reordering, those suffixed by '_b2l' blah, blah,
+ * blah, ...
+ */
+
+/*
+ * MEMORY mapped IO input / output
+ */
+
+#define INB_OFF(o) readb_raw((char __iomem *)np->reg + ncr_offb(o))
+#define OUTB_OFF(o, val) writeb_raw((val), (char __iomem *)np->reg + ncr_offb(o))
+
+#if defined(__BIG_ENDIAN) && !defined(SCSI_NCR_BIG_ENDIAN)
+
+#define INW_OFF(o) readw_l2b((char __iomem *)np->reg + ncr_offw(o))
+#define INL_OFF(o) readl_l2b((char __iomem *)np->reg + (o))
+
+#define OUTW_OFF(o, val) writew_b2l((val), (char __iomem *)np->reg + ncr_offw(o))
+#define OUTL_OFF(o, val) writel_b2l((val), (char __iomem *)np->reg + (o))
+
+#elif defined(__LITTLE_ENDIAN) && defined(SCSI_NCR_BIG_ENDIAN)
+
+#define INW_OFF(o) readw_b2l((char __iomem *)np->reg + ncr_offw(o))
+#define INL_OFF(o) readl_b2l((char __iomem *)np->reg + (o))
+
+#define OUTW_OFF(o, val) writew_l2b((val), (char __iomem *)np->reg + ncr_offw(o))
+#define OUTL_OFF(o, val) writel_l2b((val), (char __iomem *)np->reg + (o))
+
+#else
+
+#ifdef CONFIG_SCSI_NCR53C8XX_NO_WORD_TRANSFERS
+/* Only 8 or 32 bit transfers allowed */
+#define INW_OFF(o) (readb((char __iomem *)np->reg + ncr_offw(o)) << 8 | readb((char __iomem *)np->reg + ncr_offw(o) + 1))
+#else
+#define INW_OFF(o) readw_raw((char __iomem *)np->reg + ncr_offw(o))
+#endif
+#define INL_OFF(o) readl_raw((char __iomem *)np->reg + (o))
+
+#ifdef CONFIG_SCSI_NCR53C8XX_NO_WORD_TRANSFERS
+/* Only 8 or 32 bit transfers allowed */
+#define OUTW_OFF(o, val) do { writeb((char)((val) >> 8), (char __iomem *)np->reg + ncr_offw(o)); writeb((char)(val), (char __iomem *)np->reg + ncr_offw(o) + 1); } while (0)
+#else
+#define OUTW_OFF(o, val) writew_raw((val), (char __iomem *)np->reg + ncr_offw(o))
+#endif
+#define OUTL_OFF(o, val) writel_raw((val), (char __iomem *)np->reg + (o))
+
+#endif
+
+#define INB(r) INB_OFF (offsetof(struct ncr_reg,r))
+#define INW(r) INW_OFF (offsetof(struct ncr_reg,r))
+#define INL(r) INL_OFF (offsetof(struct ncr_reg,r))
+
+#define OUTB(r, val) OUTB_OFF (offsetof(struct ncr_reg,r), (val))
+#define OUTW(r, val) OUTW_OFF (offsetof(struct ncr_reg,r), (val))
+#define OUTL(r, val) OUTL_OFF (offsetof(struct ncr_reg,r), (val))
+
+/*
+ * Set bit field ON, OFF
+ */
+
+#define OUTONB(r, m) OUTB(r, INB(r) | (m))
+#define OUTOFFB(r, m) OUTB(r, INB(r) & ~(m))
+#define OUTONW(r, m) OUTW(r, INW(r) | (m))
+#define OUTOFFW(r, m) OUTW(r, INW(r) & ~(m))
+#define OUTONL(r, m) OUTL(r, INL(r) | (m))
+#define OUTOFFL(r, m) OUTL(r, INL(r) & ~(m))
+
+/*
+ * We normally want the chip to have a consistent view
+ * of driver internal data structures when we restart it.
+ * Thus these macros.
+ */
+#define OUTL_DSP(v) \
+ do { \
+ MEMORY_BARRIER(); \
+ OUTL (nc_dsp, (v)); \
+ } while (0)
+
+#define OUTONB_STD() \
+ do { \
+ MEMORY_BARRIER(); \
+ OUTONB (nc_dcntl, (STD|NOCOM)); \
+ } while (0)
+
+
+/*
+** NCR53C8XX devices features table.
+*/
+struct ncr_chip {
+ unsigned short revision_id;
+ unsigned char burst_max; /* log-base-2 of max burst */
+ unsigned char offset_max;
+ unsigned char nr_divisor;
+ unsigned int features;
+#define FE_LED0 (1<<0)
+#define FE_WIDE (1<<1) /* Wide data transfers */
+#define FE_ULTRA (1<<2) /* Ultra speed 20Mtrans/sec */
+#define FE_DBLR (1<<4) /* Clock doubler present */
+#define FE_QUAD (1<<5) /* Clock quadrupler present */
+#define FE_ERL (1<<6) /* Enable read line */
+#define FE_CLSE (1<<7) /* Cache line size enable */
+#define FE_WRIE (1<<8) /* Write & Invalidate enable */
+#define FE_ERMP (1<<9) /* Enable read multiple */
+#define FE_BOF (1<<10) /* Burst opcode fetch */
+#define FE_DFS (1<<11) /* DMA fifo size */
+#define FE_PFEN (1<<12) /* Prefetch enable */
+#define FE_LDSTR (1<<13) /* Load/Store supported */
+#define FE_RAM (1<<14) /* On chip RAM present */
+#define FE_VARCLK (1<<15) /* SCSI clock may vary */
+#define FE_RAM8K (1<<16) /* On chip RAM sized 8Kb */
+#define FE_64BIT (1<<17) /* Have a 64-bit PCI interface */
+#define FE_IO256 (1<<18) /* Requires full 256 bytes in PCI space */
+#define FE_NOPM (1<<19) /* Scripts handles phase mismatch */
+#define FE_LEDC (1<<20) /* Hardware control of LED */
+#define FE_DIFF (1<<21) /* Support Differential SCSI */
+#define FE_66MHZ (1<<23) /* 66MHz PCI Support */
+#define FE_DAC (1<<24) /* Support DAC cycles (64 bit addressing) */
+#define FE_ISTAT1 (1<<25) /* Have ISTAT1, MBOX0, MBOX1 registers */
+#define FE_DAC_IN_USE (1<<26) /* Platform does DAC cycles */
+#define FE_EHP (1<<27) /* 720: Even host parity */
+#define FE_MUX (1<<28) /* 720: Multiplexed bus */
+#define FE_EA (1<<29) /* 720: Enable Ack */
+
+#define FE_CACHE_SET (FE_ERL|FE_CLSE|FE_WRIE|FE_ERMP)
+#define FE_SCSI_SET (FE_WIDE|FE_ULTRA|FE_DBLR|FE_QUAD|F_CLK80)
+#define FE_SPECIAL_SET (FE_CACHE_SET|FE_BOF|FE_DFS|FE_LDSTR|FE_PFEN|FE_RAM)
+};
+
+
+/*
+** Driver setup structure.
+**
+** This structure is initialized from linux config options.
+** It can be overridden at boot-up by the boot command line.
+*/
+#define SCSI_NCR_MAX_EXCLUDES 8
+struct ncr_driver_setup {
+ u8 master_parity;
+ u8 scsi_parity;
+ u8 disconnection;
+ u8 special_features;
+ u8 force_sync_nego;
+ u8 reverse_probe;
+ u8 pci_fix_up;
+ u8 use_nvram;
+ u8 verbose;
+ u8 default_tags;
+ u16 default_sync;
+ u16 debug;
+ u8 burst_max;
+ u8 led_pin;
+ u8 max_wide;
+ u8 settle_delay;
+ u8 diff_support;
+ u8 irqm;
+ u8 bus_check;
+ u8 optimize;
+ u8 recovery;
+ u8 host_id;
+ u16 iarb;
+ u32 excludes[SCSI_NCR_MAX_EXCLUDES];
+ char tag_ctrl[100];
+};
+
+/*
+** Initial setup.
+** Can be overridden at startup by a command line.
+*/
+#define SCSI_NCR_DRIVER_SETUP \
+{ \
+ SCSI_NCR_SETUP_MASTER_PARITY, \
+ SCSI_NCR_SETUP_SCSI_PARITY, \
+ SCSI_NCR_SETUP_DISCONNECTION, \
+ SCSI_NCR_SETUP_SPECIAL_FEATURES, \
+ SCSI_NCR_SETUP_FORCE_SYNC_NEGO, \
+ 0, \
+ 0, \
+ 1, \
+ 0, \
+ SCSI_NCR_SETUP_DEFAULT_TAGS, \
+ SCSI_NCR_SETUP_DEFAULT_SYNC, \
+ 0x00, \
+ 7, \
+ 0, \
+ 1, \
+ SCSI_NCR_SETUP_SETTLE_TIME, \
+ 0, \
+ 0, \
+ 1, \
+ 0, \
+ 0, \
+ 255, \
+ 0x00 \
+}
+
+/*
+** Boot fail safe setup.
+** Override initial setup from boot command line:
+** ncr53c8xx=safe:y
+*/
+#define SCSI_NCR_DRIVER_SAFE_SETUP \
+{ \
+ 0, \
+ 1, \
+ 0, \
+ 0, \
+ 0, \
+ 0, \
+ 0, \
+ 1, \
+ 2, \
+ 0, \
+ 255, \
+ 0x00, \
+ 255, \
+ 0, \
+ 0, \
+ 10, \
+ 1, \
+ 1, \
+ 1, \
+ 0, \
+ 0, \
+ 255 \
+}
+
+/**************** ORIGINAL CONTENT of ncrreg.h from FreeBSD ******************/
+
+/*-----------------------------------------------------------------
+**
+** The ncr 53c810 register structure.
+**
+**-----------------------------------------------------------------
+*/
+
+struct ncr_reg {
+/*00*/ u8 nc_scntl0; /* full arb., ena parity, par->ATN */
+
+/*01*/ u8 nc_scntl1; /* no reset */
+ #define ISCON 0x10 /* connected to scsi */
+ #define CRST 0x08 /* force reset */
+ #define IARB 0x02 /* immediate arbitration */
+
+/*02*/ u8 nc_scntl2; /* no disconnect expected */
+ #define SDU 0x80 /* cmd: disconnect will raise error */
+ #define CHM 0x40 /* sta: chained mode */
+ #define WSS 0x08 /* sta: wide scsi send [W]*/
+ #define WSR 0x01 /* sta: wide scsi received [W]*/
+
+/*03*/ u8 nc_scntl3; /* cnf system clock dependent */
+ #define EWS 0x08 /* cmd: enable wide scsi [W]*/
+ #define ULTRA 0x80 /* cmd: ULTRA enable */
+ /* bits 0-2, 7 rsvd for C1010 */
+
+/*04*/ u8 nc_scid; /* cnf host adapter scsi address */
+ #define RRE 0x40 /* r/w:e enable response to resel. */
+ #define SRE 0x20 /* r/w:e enable response to select */
+
+/*05*/ u8 nc_sxfer; /* ### Sync speed and count */
+ /* bits 6-7 rsvd for C1010 */
+
+/*06*/ u8 nc_sdid; /* ### Destination-ID */
+
+/*07*/ u8 nc_gpreg; /* ??? IO-Pins */
+
+/*08*/ u8 nc_sfbr; /* ### First byte in phase */
+
+/*09*/ u8 nc_socl;
+ #define CREQ 0x80 /* r/w: SCSI-REQ */
+ #define CACK 0x40 /* r/w: SCSI-ACK */
+ #define CBSY 0x20 /* r/w: SCSI-BSY */
+ #define CSEL 0x10 /* r/w: SCSI-SEL */
+ #define CATN 0x08 /* r/w: SCSI-ATN */
+ #define CMSG 0x04 /* r/w: SCSI-MSG */
+ #define CC_D 0x02 /* r/w: SCSI-C_D */
+ #define CI_O 0x01 /* r/w: SCSI-I_O */
+
+/*0a*/ u8 nc_ssid;
+
+/*0b*/ u8 nc_sbcl;
+
+/*0c*/ u8 nc_dstat;
+ #define DFE 0x80 /* sta: dma fifo empty */
+ #define MDPE 0x40 /* int: master data parity error */
+ #define BF 0x20 /* int: script: bus fault */
+ #define ABRT 0x10 /* int: script: command aborted */
+ #define SSI 0x08 /* int: script: single step */
+ #define SIR 0x04 /* int: script: interrupt instruct. */
+ #define IID 0x01 /* int: script: illegal instruct. */
+
+/*0d*/ u8 nc_sstat0;
+ #define ILF 0x80 /* sta: data in SIDL register lsb */
+ #define ORF 0x40 /* sta: data in SODR register lsb */
+ #define OLF 0x20 /* sta: data in SODL register lsb */
+ #define AIP 0x10 /* sta: arbitration in progress */
+ #define LOA 0x08 /* sta: arbitration lost */
+ #define WOA 0x04 /* sta: arbitration won */
+ #define IRST 0x02 /* sta: scsi reset signal */
+ #define SDP 0x01 /* sta: scsi parity signal */
+
+/*0e*/ u8 nc_sstat1;
+ #define FF3210 0xf0 /* sta: bytes in the scsi fifo */
+
+/*0f*/ u8 nc_sstat2;
+ #define ILF1 0x80 /* sta: data in SIDL register msb[W]*/
+ #define ORF1 0x40 /* sta: data in SODR register msb[W]*/
+ #define OLF1 0x20 /* sta: data in SODL register msb[W]*/
+ #define DM 0x04 /* sta: DIFFSENS mismatch (895/6 only) */
+ #define LDSC 0x02 /* sta: disconnect & reconnect */
+
+/*10*/ u8 nc_dsa; /* --> Base page */
+/*11*/ u8 nc_dsa1;
+/*12*/ u8 nc_dsa2;
+/*13*/ u8 nc_dsa3;
+
+/*14*/ u8 nc_istat; /* --> Main Command and status */
+ #define CABRT 0x80 /* cmd: abort current operation */
+ #define SRST 0x40 /* mod: reset chip */
+ #define SIGP 0x20 /* r/w: message from host to ncr */
+ #define SEM 0x10 /* r/w: message between host + ncr */
+ #define CON 0x08 /* sta: connected to scsi */
+ #define INTF 0x04 /* sta: int on the fly (reset by wr)*/
+ #define SIP 0x02 /* sta: scsi-interrupt */
+ #define DIP 0x01 /* sta: host/script interrupt */
+
+/*15*/ u8 nc_istat1; /* 896 and later cores only */
+ #define FLSH 0x04 /* sta: chip is flushing */
+ #define SRUN 0x02 /* sta: scripts are running */
+ #define SIRQD 0x01 /* r/w: disable INT pin */
+
+/*16*/ u8 nc_mbox0; /* 896 and later cores only */
+/*17*/ u8 nc_mbox1; /* 896 and later cores only */
+
+/*18*/ u8 nc_ctest0;
+ #define EHP 0x04 /* 720 even host parity */
+/*19*/ u8 nc_ctest1;
+
+/*1a*/ u8 nc_ctest2;
+ #define CSIGP 0x40
+ /* bits 0-2,7 rsvd for C1010 */
+
+/*1b*/ u8 nc_ctest3;
+ #define FLF 0x08 /* cmd: flush dma fifo */
+ #define CLF 0x04 /* cmd: clear dma fifo */
+ #define FM 0x02 /* mod: fetch pin mode */
+ #define WRIE 0x01 /* mod: write and invalidate enable */
+ /* bits 4-7 rsvd for C1010 */
+
+/*1c*/ u32 nc_temp; /* ### Temporary stack */
+
+/*20*/ u8 nc_dfifo;
+/*21*/ u8 nc_ctest4;
+ #define MUX 0x80 /* 720 host bus multiplex mode */
+ #define BDIS 0x80 /* mod: burst disable */
+ #define MPEE 0x08 /* mod: master parity error enable */
+
+/*22*/ u8 nc_ctest5;
+ #define DFS 0x20 /* mod: dma fifo size */
+ /* bits 0-1, 3-7 rsvd for C1010 */
+/*23*/ u8 nc_ctest6;
+
+/*24*/ u32 nc_dbc; /* ### Byte count and command */
+/*28*/ u32 nc_dnad; /* ### Next command register */
+/*2c*/ u32 nc_dsp; /* --> Script Pointer */
+/*30*/ u32 nc_dsps; /* --> Script pointer save/opcode#2 */
+
+/*34*/ u8 nc_scratcha; /* Temporary register a */
+/*35*/ u8 nc_scratcha1;
+/*36*/ u8 nc_scratcha2;
+/*37*/ u8 nc_scratcha3;
+
+/*38*/ u8 nc_dmode;
+ #define BL_2 0x80 /* mod: burst length shift value +2 */
+ #define BL_1 0x40 /* mod: burst length shift value +1 */
+ #define ERL 0x08 /* mod: enable read line */
+ #define ERMP 0x04 /* mod: enable read multiple */
+ #define BOF 0x02 /* mod: burst op code fetch */
+
+/*39*/ u8 nc_dien;
+/*3a*/ u8 nc_sbr;
+
+/*3b*/ u8 nc_dcntl; /* --> Script execution control */
+ #define CLSE 0x80 /* mod: cache line size enable */
+ #define PFF 0x40 /* cmd: pre-fetch flush */
+ #define PFEN 0x20 /* mod: pre-fetch enable */
+ #define EA 0x20 /* mod: 720 enable-ack */
+ #define SSM 0x10 /* mod: single step mode */
+ #define IRQM 0x08 /* mod: irq mode (1 = totem pole !) */
+ #define STD 0x04 /* cmd: start dma mode */
+ #define IRQD 0x02 /* mod: irq disable */
+ #define NOCOM 0x01 /* cmd: protect sfbr while reselect */
+ /* bits 0-1 rsvd for C1010 */
+
+/*3c*/ u32 nc_adder;
+
+/*40*/ u16 nc_sien; /* -->: interrupt enable */
+/*42*/ u16 nc_sist; /* <--: interrupt status */
+ #define SBMC 0x1000/* sta: SCSI Bus Mode Change (895/6 only) */
+ #define STO 0x0400/* sta: timeout (select) */
+ #define GEN 0x0200/* sta: timeout (general) */
+ #define HTH 0x0100/* sta: timeout (handshake) */
+ #define MA 0x80 /* sta: phase mismatch */
+ #define CMP 0x40 /* sta: arbitration complete */
+ #define SEL 0x20 /* sta: selected by another device */
+ #define RSL 0x10 /* sta: reselected by another device*/
+ #define SGE 0x08 /* sta: gross error (over/underflow)*/
+ #define UDC 0x04 /* sta: unexpected disconnect */
+ #define RST 0x02 /* sta: scsi bus reset detected */
+ #define PAR 0x01 /* sta: scsi parity error */
+
+/*44*/ u8 nc_slpar;
+/*45*/ u8 nc_swide;
+/*46*/ u8 nc_macntl;
+/*47*/ u8 nc_gpcntl;
+/*48*/ u8 nc_stime0; /* cmd: timeout for select&handshake*/
+/*49*/ u8 nc_stime1; /* cmd: timeout user defined */
+/*4a*/ u16 nc_respid; /* sta: Reselect-IDs */
+
+/*4c*/ u8 nc_stest0;
+
+/*4d*/ u8 nc_stest1;
+ #define SCLK 0x80 /* Use the PCI clock as SCSI clock */
+ #define DBLEN 0x08 /* clock doubler running */
+ #define DBLSEL 0x04 /* clock doubler selected */
+
+
+/*4e*/ u8 nc_stest2;
+ #define ROF 0x40 /* reset scsi offset (after gross error!) */
+ #define DIF 0x20 /* 720 SCSI differential mode */
+ #define EXT 0x02 /* extended filtering */
+
+/*4f*/ u8 nc_stest3;
+ #define TE 0x80 /* c: tolerAnt enable */
+ #define HSC 0x20 /* c: Halt SCSI Clock */
+ #define CSF 0x02 /* c: clear scsi fifo */
+
+/*50*/ u16 nc_sidl; /* Lowlevel: latched from scsi data */
+/*52*/ u8 nc_stest4;
+ #define SMODE 0xc0 /* SCSI bus mode (895/6 only) */
+ #define SMODE_HVD 0x40 /* High Voltage Differential */
+ #define SMODE_SE 0x80 /* Single Ended */
+ #define SMODE_LVD 0xc0 /* Low Voltage Differential */
+ #define LCKFRQ 0x20 /* Frequency Lock (895/6 only) */
+ /* bits 0-5 rsvd for C1010 */
+
+/*53*/ u8 nc_53_;
+/*54*/ u16 nc_sodl; /* Lowlevel: data out to scsi data */
+/*56*/ u8 nc_ccntl0; /* Chip Control 0 (896) */
+ #define ENPMJ 0x80 /* Enable Phase Mismatch Jump */
+ #define PMJCTL 0x40 /* Phase Mismatch Jump Control */
+ #define ENNDJ 0x20 /* Enable Non Data PM Jump */
+ #define DISFC 0x10 /* Disable Auto FIFO Clear */
+ #define DILS 0x02 /* Disable Internal Load/Store */
+ #define DPR 0x01 /* Disable Pipe Req */
+
+/*57*/ u8 nc_ccntl1; /* Chip Control 1 (896) */
+ #define ZMOD 0x80 /* High Impedance Mode */
+ #define DIC 0x10 /* Disable Internal Cycles */
+ #define DDAC 0x08 /* Disable Dual Address Cycle */
+ #define XTIMOD 0x04 /* 64-bit Table Ind. Indexing Mode */
+ #define EXTIBMV 0x02 /* Enable 64-bit Table Ind. BMOV */
+ #define EXDBMV 0x01 /* Enable 64-bit Direct BMOV */
+
+/*58*/ u16 nc_sbdl; /* Lowlevel: data from scsi data */
+/*5a*/ u16 nc_5a_;
+
+/*5c*/ u8 nc_scr0; /* Working register B */
+/*5d*/ u8 nc_scr1; /* */
+/*5e*/ u8 nc_scr2; /* */
+/*5f*/ u8 nc_scr3; /* */
+
+/*60*/ u8 nc_scrx[64]; /* Working register C-R */
+/*a0*/ u32 nc_mmrs; /* Memory Move Read Selector */
+/*a4*/ u32 nc_mmws; /* Memory Move Write Selector */
+/*a8*/ u32 nc_sfs; /* Script Fetch Selector */
+/*ac*/ u32 nc_drs; /* DSA Relative Selector */
+/*b0*/ u32 nc_sbms; /* Static Block Move Selector */
+/*b4*/ u32 nc_dbms; /* Dynamic Block Move Selector */
+/*b8*/ u32 nc_dnad64; /* DMA Next Address 64 */
+/*bc*/ u16 nc_scntl4; /* C1010 only */
+ #define U3EN 0x80 /* Enable Ultra 3 */
+ #define AIPEN 0x40 /* Allow check upper byte lanes */
+ #define XCLKH_DT 0x08 /* Extra clock of data hold on DT
+ transfer edge */
+ #define XCLKH_ST 0x04 /* Extra clock of data hold on ST
+ transfer edge */
+
+/*be*/ u8 nc_aipcntl0; /* Epat Control 1 C1010 only */
+/*bf*/ u8 nc_aipcntl1; /* AIP Control C1010_66 Only */
+
+/*c0*/ u32 nc_pmjad1; /* Phase Mismatch Jump Address 1 */
+/*c4*/ u32 nc_pmjad2; /* Phase Mismatch Jump Address 2 */
+/*c8*/ u8 nc_rbc; /* Remaining Byte Count */
+/*c9*/ u8 nc_rbc1; /* */
+/*ca*/ u8 nc_rbc2; /* */
+/*cb*/ u8 nc_rbc3; /* */
+
+/*cc*/ u8 nc_ua; /* Updated Address */
+/*cd*/ u8 nc_ua1; /* */
+/*ce*/ u8 nc_ua2; /* */
+/*cf*/ u8 nc_ua3; /* */
+/*d0*/ u32 nc_esa; /* Entry Storage Address */
+/*d4*/ u8 nc_ia; /* Instruction Address */
+/*d5*/ u8 nc_ia1;
+/*d6*/ u8 nc_ia2;
+/*d7*/ u8 nc_ia3;
+/*d8*/ u32 nc_sbc; /* SCSI Byte Count (3 bytes only) */
+/*dc*/ u32 nc_csbc; /* Cumulative SCSI Byte Count */
+
+ /* Following for C1010 only */
+/*e0*/ u16 nc_crcpad; /* CRC Value */
+/*e2*/ u8 nc_crccntl0; /* CRC control register */
+ #define SNDCRC 0x10 /* Send CRC Request */
+/*e3*/ u8 nc_crccntl1; /* CRC control register */
+/*e4*/ u32 nc_crcdata; /* CRC data register */
+/*e8*/ u32 nc_e8_; /* rsvd */
+/*ec*/ u32 nc_ec_; /* rsvd */
+/*f0*/ u16 nc_dfbc; /* DMA FIFO byte count */
+
+};
+
+/*-----------------------------------------------------------
+**
+** Utility macros for the script.
+**
+**-----------------------------------------------------------
+*/
+
+#define REGJ(p,r) (offsetof(struct ncr_reg, p ## r))
+#define REG(r) REGJ (nc_, r)
+
+typedef u32 ncrcmd;
+
+/*-----------------------------------------------------------
+**
+** SCSI phases
+**
+** DT phases illegal for ncr driver.
+**
+**-----------------------------------------------------------
+*/
+
+#define SCR_DATA_OUT 0x00000000
+#define SCR_DATA_IN 0x01000000
+#define SCR_COMMAND 0x02000000
+#define SCR_STATUS 0x03000000
+#define SCR_DT_DATA_OUT 0x04000000
+#define SCR_DT_DATA_IN 0x05000000
+#define SCR_MSG_OUT 0x06000000
+#define SCR_MSG_IN 0x07000000
+
+#define SCR_ILG_OUT 0x04000000
+#define SCR_ILG_IN 0x05000000
+
+/*-----------------------------------------------------------
+**
+** Data transfer via SCSI.
+**
+**-----------------------------------------------------------
+**
+** MOVE_ABS (LEN)
+** <<start address>>
+**
+** MOVE_IND (LEN)
+** <<dnad_offset>>
+**
+** MOVE_TBL
+** <<dnad_offset>>
+**
+**-----------------------------------------------------------
+*/
+
+#define OPC_MOVE 0x08000000
+
+#define SCR_MOVE_ABS(l) ((0x00000000 | OPC_MOVE) | (l))
+#define SCR_MOVE_IND(l) ((0x20000000 | OPC_MOVE) | (l))
+#define SCR_MOVE_TBL (0x10000000 | OPC_MOVE)
+
+#define SCR_CHMOV_ABS(l) ((0x00000000) | (l))
+#define SCR_CHMOV_IND(l) ((0x20000000) | (l))
+#define SCR_CHMOV_TBL (0x10000000)
+
+struct scr_tblmove {
+ u32 size;
+ u32 addr;
+};
+
+/*-----------------------------------------------------------
+**
+** Selection
+**
+**-----------------------------------------------------------
+**
+** SEL_ABS | SCR_ID (0..15) [ | REL_JMP]
+** <<alternate_address>>
+**
+** SEL_TBL | << dnad_offset>> [ | REL_JMP]
+** <<alternate_address>>
+**
+**-----------------------------------------------------------
+*/
+
+#define SCR_SEL_ABS 0x40000000
+#define SCR_SEL_ABS_ATN 0x41000000
+#define SCR_SEL_TBL 0x42000000
+#define SCR_SEL_TBL_ATN 0x43000000
+
+
+#ifdef SCSI_NCR_BIG_ENDIAN
+struct scr_tblsel {
+ u8 sel_scntl3;
+ u8 sel_id;
+ u8 sel_sxfer;
+ u8 sel_scntl4;
+};
+#else
+struct scr_tblsel {
+ u8 sel_scntl4;
+ u8 sel_sxfer;
+ u8 sel_id;
+ u8 sel_scntl3;
+};
+#endif
+
+#define SCR_JMP_REL 0x04000000
+#define SCR_ID(id) (((u32)(id)) << 16)
+
+/*-----------------------------------------------------------
+**
+** Waiting for Disconnect or Reselect
+**
+**-----------------------------------------------------------
+**
+** WAIT_DISC
+** dummy: <<alternate_address>>
+**
+** WAIT_RESEL
+** <<alternate_address>>
+**
+**-----------------------------------------------------------
+*/
+
+#define SCR_WAIT_DISC 0x48000000
+#define SCR_WAIT_RESEL 0x50000000
+
+/*-----------------------------------------------------------
+**
+** Bit Set / Reset
+**
+**-----------------------------------------------------------
+**
+** SET (flags {|.. })
+**
+** CLR (flags {|.. })
+**
+**-----------------------------------------------------------
+*/
+
+#define SCR_SET(f) (0x58000000 | (f))
+#define SCR_CLR(f) (0x60000000 | (f))
+
+#define SCR_CARRY 0x00000400
+#define SCR_TRG 0x00000200
+#define SCR_ACK 0x00000040
+#define SCR_ATN 0x00000008
+
+
+
+
+/*-----------------------------------------------------------
+**
+** Memory to memory move
+**
+**-----------------------------------------------------------
+**
+** COPY (bytecount)
+** << source_address >>
+** << destination_address >>
+**
+** SCR_COPY sets the NO FLUSH option by default.
+** SCR_COPY_F does not set this option.
+**
+** For chips which do not support this option,
+** ncr_copy_and_bind() will remove this bit.
+**-----------------------------------------------------------
+*/
+
+#define SCR_NO_FLUSH 0x01000000
+
+#define SCR_COPY(n) (0xc0000000 | SCR_NO_FLUSH | (n))
+#define SCR_COPY_F(n) (0xc0000000 | (n))
+
+/*-----------------------------------------------------------
+**
+** Register move and binary operations
+**
+**-----------------------------------------------------------
+**
+** SFBR_REG (reg, op, data) reg = SFBR op data
+** << 0 >>
+**
+** REG_SFBR (reg, op, data) SFBR = reg op data
+** << 0 >>
+**
+** REG_REG (reg, op, data) reg = reg op data
+** << 0 >>
+**
+**-----------------------------------------------------------
+** On 810A, 860, 825A, 875, 895 and 896 chips the content
+** of SFBR register can be used as data (SCR_SFBR_DATA).
+** The 896 has additional IO registers starting at
+** offset 0x80. Bit 7 of register offset is stored in
+** bit 7 of the SCRIPTS instruction first DWORD.
+**-----------------------------------------------------------
+*/
+
+#define SCR_REG_OFS(ofs) ((((ofs) & 0x7f) << 16ul) + ((ofs) & 0x80))
+
+#define SCR_SFBR_REG(reg,op,data) \
+ (0x68000000 | (SCR_REG_OFS(REG(reg))) | (op) | (((data)&0xff)<<8ul))
+
+#define SCR_REG_SFBR(reg,op,data) \
+ (0x70000000 | (SCR_REG_OFS(REG(reg))) | (op) | (((data)&0xff)<<8ul))
+
+#define SCR_REG_REG(reg,op,data) \
+ (0x78000000 | (SCR_REG_OFS(REG(reg))) | (op) | (((data)&0xff)<<8ul))
+
+
+#define SCR_LOAD 0x00000000
+#define SCR_SHL 0x01000000
+#define SCR_OR 0x02000000
+#define SCR_XOR 0x03000000
+#define SCR_AND 0x04000000
+#define SCR_SHR 0x05000000
+#define SCR_ADD 0x06000000
+#define SCR_ADDC 0x07000000
+
+#define SCR_SFBR_DATA (0x00800000>>8ul) /* Use SFBR as data */
+
+/*-----------------------------------------------------------
+**
+** FROM_REG (reg) SFBR = reg
+** << 0 >>
+**
+** TO_REG (reg) reg = SFBR
+** << 0 >>
+**
+** LOAD_REG (reg, data) reg = <data>
+** << 0 >>
+**
+** LOAD_SFBR(data) SFBR = <data>
+** << 0 >>
+**
+**-----------------------------------------------------------
+*/
+
+#define SCR_FROM_REG(reg) \
+ SCR_REG_SFBR(reg,SCR_OR,0)
+
+#define SCR_TO_REG(reg) \
+ SCR_SFBR_REG(reg,SCR_OR,0)
+
+#define SCR_LOAD_REG(reg,data) \
+ SCR_REG_REG(reg,SCR_LOAD,data)
+
+#define SCR_LOAD_SFBR(data) \
+ (SCR_REG_SFBR (gpreg, SCR_LOAD, data))
+
+/*-----------------------------------------------------------
+**
+** LOAD from memory to register.
+** STORE from register to memory.
+**
+** Only supported by 810A, 860, 825A, 875, 895 and 896.
+**
+**-----------------------------------------------------------
+**
+** LOAD_ABS (LEN)
+** <<start address>>
+**
+** LOAD_REL (LEN) (DSA relative)
+** <<dsa_offset>>
+**
+**-----------------------------------------------------------
+*/
+
+#define SCR_REG_OFS2(ofs) (((ofs) & 0xff) << 16ul)
+#define SCR_NO_FLUSH2 0x02000000
+#define SCR_DSA_REL2 0x10000000
+
+#define SCR_LOAD_R(reg, how, n) \
+ (0xe1000000 | how | (SCR_REG_OFS2(REG(reg))) | (n))
+
+#define SCR_STORE_R(reg, how, n) \
+ (0xe0000000 | how | (SCR_REG_OFS2(REG(reg))) | (n))
+
+#define SCR_LOAD_ABS(reg, n) SCR_LOAD_R(reg, SCR_NO_FLUSH2, n)
+#define SCR_LOAD_REL(reg, n) SCR_LOAD_R(reg, SCR_NO_FLUSH2|SCR_DSA_REL2, n)
+#define SCR_LOAD_ABS_F(reg, n) SCR_LOAD_R(reg, 0, n)
+#define SCR_LOAD_REL_F(reg, n) SCR_LOAD_R(reg, SCR_DSA_REL2, n)
+
+#define SCR_STORE_ABS(reg, n) SCR_STORE_R(reg, SCR_NO_FLUSH2, n)
+#define SCR_STORE_REL(reg, n) SCR_STORE_R(reg, SCR_NO_FLUSH2|SCR_DSA_REL2,n)
+#define SCR_STORE_ABS_F(reg, n) SCR_STORE_R(reg, 0, n)
+#define SCR_STORE_REL_F(reg, n) SCR_STORE_R(reg, SCR_DSA_REL2, n)
+
+
+/*-----------------------------------------------------------
+**
+** Waiting for Disconnect or Reselect
+**
+**-----------------------------------------------------------
+**
+** JUMP [ | IFTRUE/IFFALSE ( ... ) ]
+** <<address>>
+**
+** JUMPR [ | IFTRUE/IFFALSE ( ... ) ]
+** <<distance>>
+**
+** CALL [ | IFTRUE/IFFALSE ( ... ) ]
+** <<address>>
+**
+** CALLR [ | IFTRUE/IFFALSE ( ... ) ]
+** <<distance>>
+**
+** RETURN [ | IFTRUE/IFFALSE ( ... ) ]
+** <<dummy>>
+**
+** INT [ | IFTRUE/IFFALSE ( ... ) ]
+** <<ident>>
+**
+** INT_FLY [ | IFTRUE/IFFALSE ( ... ) ]
+** <<ident>>
+**
+** Conditions:
+** WHEN (phase)
+** IF (phase)
+** CARRYSET
+** DATA (data, mask)
+**
+**-----------------------------------------------------------
+*/
+
+#define SCR_NO_OP 0x80000000
+#define SCR_JUMP 0x80080000
+#define SCR_JUMP64 0x80480000
+#define SCR_JUMPR 0x80880000
+#define SCR_CALL 0x88080000
+#define SCR_CALLR 0x88880000
+#define SCR_RETURN 0x90080000
+#define SCR_INT 0x98080000
+#define SCR_INT_FLY 0x98180000
+
+#define IFFALSE(arg) (0x00080000 | (arg))
+#define IFTRUE(arg) (0x00000000 | (arg))
+
+#define WHEN(phase) (0x00030000 | (phase))
+#define IF(phase) (0x00020000 | (phase))
+
+#define DATA(D) (0x00040000 | ((D) & 0xff))
+#define MASK(D,M) (0x00040000 | (((M ^ 0xff) & 0xff) << 8ul)|((D) & 0xff))
+
+#define CARRYSET (0x00200000)
+
+/*-----------------------------------------------------------
+**
+** SCSI constants.
+**
+**-----------------------------------------------------------
+*/
+
+/*
+** Status
+*/
+
+#define S_GOOD (0x00)
+#define S_CHECK_COND (0x02)
+#define S_COND_MET (0x04)
+#define S_BUSY (0x08)
+#define S_INT (0x10)
+#define S_INT_COND_MET (0x14)
+#define S_CONFLICT (0x18)
+#define S_TERMINATED (0x20)
+#define S_QUEUE_FULL (0x28)
+#define S_ILLEGAL (0xff)
+#define S_SENSE (0x80)
+
+/*
+ * End of ncrreg from FreeBSD
+ */
+
+/*
+ Build a scatter/gather entry.
+ see sym53c8xx_2/sym_hipd.h for more detailed sym_build_sge()
+ implementation ;)
+ */
+
+#define ncr_build_sge(np, data, badd, len) \
+do { \
+ (data)->addr = cpu_to_scr(badd); \
+ (data)->size = cpu_to_scr(len); \
+} while (0)
+
+/*==========================================================
+**
+** Structures used by the detection routine to transmit
+** device configuration to the attach function.
+**
+**==========================================================
+*/
+struct ncr_slot {
+ u_long base;
+ u_long base_2;
+ u_long base_c;
+ u_long base_2_c;
+ void __iomem *base_v;
+ void __iomem *base_2_v;
+ int irq;
+/* port and reg fields to use INB, OUTB macros */
+ volatile struct ncr_reg __iomem *reg;
+};
+
+/*==========================================================
+**
+** Structure used by detection routine to save data on
+** each detected board for attach.
+**
+**==========================================================
+*/
+struct ncr_device {
+ struct device *dev;
+ struct ncr_slot slot;
+ struct ncr_chip chip;
+ u_char host_id;
+ u8 differential;
+};
+
+extern struct Scsi_Host *ncr_attach(struct scsi_host_template *tpnt, int unit, struct ncr_device *device);
+extern void ncr53c8xx_release(struct Scsi_Host *host);
+irqreturn_t ncr53c8xx_intr(int irq, void *dev_id);
+extern int ncr53c8xx_init(void);
+extern void ncr53c8xx_exit(void);
+
+#endif /* NCR53C8XX_H */
diff --git a/drivers/scsi/nsp32.c b/drivers/scsi/nsp32.c
new file mode 100644
index 000000000..c6077cefb
--- /dev/null
+++ b/drivers/scsi/nsp32.c
@@ -0,0 +1,3431 @@
+/*
+ * NinjaSCSI-32Bi Cardbus, NinjaSCSI-32UDE PCI/CardBus SCSI driver
+ * Copyright (C) 2001, 2002, 2003
+ * YOKOTA Hiroshi <yokota@netlab.is.tsukuba.ac.jp>
+ * GOTO Masanori <gotom@debian.or.jp>, <gotom@debian.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2, or (at your option)
+ * any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ *
+ * Revision History:
+ * 1.0: Initial Release.
+ * 1.1: Add /proc SDTR status.
+ * Remove obsolete error handler nsp32_reset.
+ * Some clean up.
+ * 1.2: PowerPC (big endian) support.
+ */
+
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/string.h>
+#include <linux/timer.h>
+#include <linux/ioport.h>
+#include <linux/major.h>
+#include <linux/blkdev.h>
+#include <linux/interrupt.h>
+#include <linux/pci.h>
+#include <linux/delay.h>
+#include <linux/ctype.h>
+#include <linux/dma-mapping.h>
+
+#include <asm/dma.h>
+#include <asm/io.h>
+
+#include <scsi/scsi.h>
+#include <scsi/scsi_cmnd.h>
+#include <scsi/scsi_device.h>
+#include <scsi/scsi_host.h>
+#include <scsi/scsi_ioctl.h>
+
+#include "nsp32.h"
+
+
+/***********************************************************************
+ * Module parameters
+ */
+static int trans_mode = 0; /* default: BIOS */
+module_param (trans_mode, int, 0);
+MODULE_PARM_DESC(trans_mode, "transfer mode (0: BIOS(default) 1: Async 2: Ultra20M");
+#define ASYNC_MODE 1
+#define ULTRA20M_MODE 2
+
+static bool auto_param = 0; /* default: ON */
+module_param (auto_param, bool, 0);
+MODULE_PARM_DESC(auto_param, "AutoParameter mode (0: ON(default) 1: OFF)");
+
+static bool disc_priv = 1; /* default: OFF */
+module_param (disc_priv, bool, 0);
+MODULE_PARM_DESC(disc_priv, "disconnection privilege mode (0: ON 1: OFF(default))");
+
+MODULE_AUTHOR("YOKOTA Hiroshi <yokota@netlab.is.tsukuba.ac.jp>, GOTO Masanori <gotom@debian.or.jp>");
+MODULE_DESCRIPTION("Workbit NinjaSCSI-32Bi/UDE CardBus/PCI SCSI host bus adapter module");
+MODULE_LICENSE("GPL");
+
+static const char *nsp32_release_version = "1.2";
+
+
+/****************************************************************************
+ * Supported hardware
+ */
+static struct pci_device_id nsp32_pci_table[] = {
+ {
+ .vendor = PCI_VENDOR_ID_IODATA,
+ .device = PCI_DEVICE_ID_NINJASCSI_32BI_CBSC_II,
+ .subvendor = PCI_ANY_ID,
+ .subdevice = PCI_ANY_ID,
+ .driver_data = MODEL_IODATA,
+ },
+ {
+ .vendor = PCI_VENDOR_ID_WORKBIT,
+ .device = PCI_DEVICE_ID_NINJASCSI_32BI_KME,
+ .subvendor = PCI_ANY_ID,
+ .subdevice = PCI_ANY_ID,
+ .driver_data = MODEL_KME,
+ },
+ {
+ .vendor = PCI_VENDOR_ID_WORKBIT,
+ .device = PCI_DEVICE_ID_NINJASCSI_32BI_WBT,
+ .subvendor = PCI_ANY_ID,
+ .subdevice = PCI_ANY_ID,
+ .driver_data = MODEL_WORKBIT,
+ },
+ {
+ .vendor = PCI_VENDOR_ID_WORKBIT,
+ .device = PCI_DEVICE_ID_WORKBIT_STANDARD,
+ .subvendor = PCI_ANY_ID,
+ .subdevice = PCI_ANY_ID,
+ .driver_data = MODEL_PCI_WORKBIT,
+ },
+ {
+ .vendor = PCI_VENDOR_ID_WORKBIT,
+ .device = PCI_DEVICE_ID_NINJASCSI_32BI_LOGITEC,
+ .subvendor = PCI_ANY_ID,
+ .subdevice = PCI_ANY_ID,
+ .driver_data = MODEL_LOGITEC,
+ },
+ {
+ .vendor = PCI_VENDOR_ID_WORKBIT,
+ .device = PCI_DEVICE_ID_NINJASCSI_32BIB_LOGITEC,
+ .subvendor = PCI_ANY_ID,
+ .subdevice = PCI_ANY_ID,
+ .driver_data = MODEL_PCI_LOGITEC,
+ },
+ {
+ .vendor = PCI_VENDOR_ID_WORKBIT,
+ .device = PCI_DEVICE_ID_NINJASCSI_32UDE_MELCO,
+ .subvendor = PCI_ANY_ID,
+ .subdevice = PCI_ANY_ID,
+ .driver_data = MODEL_PCI_MELCO,
+ },
+ {
+ .vendor = PCI_VENDOR_ID_WORKBIT,
+ .device = PCI_DEVICE_ID_NINJASCSI_32UDE_MELCO_II,
+ .subvendor = PCI_ANY_ID,
+ .subdevice = PCI_ANY_ID,
+ .driver_data = MODEL_PCI_MELCO,
+ },
+ {0,0,},
+};
+MODULE_DEVICE_TABLE(pci, nsp32_pci_table);
+
+static nsp32_hw_data nsp32_data_base; /* probe <-> detect glue */
+
+
+/*
+ * Period/AckWidth speed conversion table
+ *
+ * Note: This period/ackwidth speed table must be in descending order.
+ */
+static nsp32_sync_table nsp32_sync_table_40M[] = {
+ /* {PNo, AW, SP, EP, SREQ smpl} Speed(MB/s) Period AckWidth */
+ {0x1, 0, 0x0c, 0x0c, SMPL_40M}, /* 20.0 : 50ns, 25ns */
+ {0x2, 0, 0x0d, 0x18, SMPL_40M}, /* 13.3 : 75ns, 25ns */
+ {0x3, 1, 0x19, 0x19, SMPL_40M}, /* 10.0 : 100ns, 50ns */
+ {0x4, 1, 0x1a, 0x1f, SMPL_20M}, /* 8.0 : 125ns, 50ns */
+ {0x5, 2, 0x20, 0x25, SMPL_20M}, /* 6.7 : 150ns, 75ns */
+ {0x6, 2, 0x26, 0x31, SMPL_20M}, /* 5.7 : 175ns, 75ns */
+ {0x7, 3, 0x32, 0x32, SMPL_20M}, /* 5.0 : 200ns, 100ns */
+ {0x8, 3, 0x33, 0x38, SMPL_10M}, /* 4.4 : 225ns, 100ns */
+ {0x9, 3, 0x39, 0x3e, SMPL_10M}, /* 4.0 : 250ns, 100ns */
+};
+
+static nsp32_sync_table nsp32_sync_table_20M[] = {
+ {0x1, 0, 0x19, 0x19, SMPL_40M}, /* 10.0 : 100ns, 50ns */
+ {0x2, 0, 0x1a, 0x25, SMPL_20M}, /* 6.7 : 150ns, 50ns */
+ {0x3, 1, 0x26, 0x32, SMPL_20M}, /* 5.0 : 200ns, 100ns */
+ {0x4, 1, 0x33, 0x3e, SMPL_10M}, /* 4.0 : 250ns, 100ns */
+ {0x5, 2, 0x3f, 0x4b, SMPL_10M}, /* 3.3 : 300ns, 150ns */
+ {0x6, 2, 0x4c, 0x57, SMPL_10M}, /* 2.8 : 350ns, 150ns */
+ {0x7, 3, 0x58, 0x64, SMPL_10M}, /* 2.5 : 400ns, 200ns */
+ {0x8, 3, 0x65, 0x70, SMPL_10M}, /* 2.2 : 450ns, 200ns */
+ {0x9, 3, 0x71, 0x7d, SMPL_10M}, /* 2.0 : 500ns, 200ns */
+};
+
+static nsp32_sync_table nsp32_sync_table_pci[] = {
+ {0x1, 0, 0x0c, 0x0f, SMPL_40M}, /* 16.6 : 60ns, 30ns */
+ {0x2, 0, 0x10, 0x16, SMPL_40M}, /* 11.1 : 90ns, 30ns */
+ {0x3, 1, 0x17, 0x1e, SMPL_20M}, /* 8.3 : 120ns, 60ns */
+ {0x4, 1, 0x1f, 0x25, SMPL_20M}, /* 6.7 : 150ns, 60ns */
+ {0x5, 2, 0x26, 0x2d, SMPL_20M}, /* 5.6 : 180ns, 90ns */
+ {0x6, 2, 0x2e, 0x34, SMPL_10M}, /* 4.8 : 210ns, 90ns */
+ {0x7, 3, 0x35, 0x3c, SMPL_10M}, /* 4.2 : 240ns, 120ns */
+ {0x8, 3, 0x3d, 0x43, SMPL_10M}, /* 3.7 : 270ns, 120ns */
+ {0x9, 3, 0x44, 0x4b, SMPL_10M}, /* 3.3 : 300ns, 120ns */
+};
+
+/*
+ * function declaration
+ */
+/* module entry point */
+static int nsp32_probe (struct pci_dev *, const struct pci_device_id *);
+static void nsp32_remove(struct pci_dev *);
+static int __init init_nsp32 (void);
+static void __exit exit_nsp32 (void);
+
+/* struct struct scsi_host_template */
+static int nsp32_show_info (struct seq_file *, struct Scsi_Host *);
+
+static int nsp32_detect (struct pci_dev *pdev);
+static int nsp32_queuecommand(struct Scsi_Host *, struct scsi_cmnd *);
+static const char *nsp32_info (struct Scsi_Host *);
+static int nsp32_release (struct Scsi_Host *);
+
+/* SCSI error handler */
+static int nsp32_eh_abort (struct scsi_cmnd *);
+static int nsp32_eh_bus_reset (struct scsi_cmnd *);
+static int nsp32_eh_host_reset(struct scsi_cmnd *);
+
+/* generate SCSI message */
+static void nsp32_build_identify(struct scsi_cmnd *);
+static void nsp32_build_nop (struct scsi_cmnd *);
+static void nsp32_build_reject (struct scsi_cmnd *);
+static void nsp32_build_sdtr (struct scsi_cmnd *, unsigned char, unsigned char);
+
+/* SCSI message handler */
+static int nsp32_busfree_occur(struct scsi_cmnd *, unsigned short);
+static void nsp32_msgout_occur (struct scsi_cmnd *);
+static void nsp32_msgin_occur (struct scsi_cmnd *, unsigned long, unsigned short);
+
+static int nsp32_setup_sg_table (struct scsi_cmnd *);
+static int nsp32_selection_autopara(struct scsi_cmnd *);
+static int nsp32_selection_autoscsi(struct scsi_cmnd *);
+static void nsp32_scsi_done (struct scsi_cmnd *);
+static int nsp32_arbitration (struct scsi_cmnd *, unsigned int);
+static int nsp32_reselection (struct scsi_cmnd *, unsigned char);
+static void nsp32_adjust_busfree (struct scsi_cmnd *, unsigned int);
+static void nsp32_restart_autoscsi (struct scsi_cmnd *, unsigned short);
+
+/* SCSI SDTR */
+static void nsp32_analyze_sdtr (struct scsi_cmnd *);
+static int nsp32_search_period_entry(nsp32_hw_data *, nsp32_target *, unsigned char);
+static void nsp32_set_async (nsp32_hw_data *, nsp32_target *);
+static void nsp32_set_max_sync (nsp32_hw_data *, nsp32_target *, unsigned char *, unsigned char *);
+static void nsp32_set_sync_entry (nsp32_hw_data *, nsp32_target *, int, unsigned char);
+
+/* SCSI bus status handler */
+static void nsp32_wait_req (nsp32_hw_data *, int);
+static void nsp32_wait_sack (nsp32_hw_data *, int);
+static void nsp32_sack_assert (nsp32_hw_data *);
+static void nsp32_sack_negate (nsp32_hw_data *);
+static void nsp32_do_bus_reset(nsp32_hw_data *);
+
+/* hardware interrupt handler */
+static irqreturn_t do_nsp32_isr(int, void *);
+
+/* initialize hardware */
+static int nsp32hw_init(nsp32_hw_data *);
+
+/* EEPROM handler */
+static int nsp32_getprom_param (nsp32_hw_data *);
+static int nsp32_getprom_at24 (nsp32_hw_data *);
+static int nsp32_getprom_c16 (nsp32_hw_data *);
+static void nsp32_prom_start (nsp32_hw_data *);
+static void nsp32_prom_stop (nsp32_hw_data *);
+static int nsp32_prom_read (nsp32_hw_data *, int);
+static int nsp32_prom_read_bit (nsp32_hw_data *);
+static void nsp32_prom_write_bit(nsp32_hw_data *, int);
+static void nsp32_prom_set (nsp32_hw_data *, int, int);
+static int nsp32_prom_get (nsp32_hw_data *, int);
+
+/* debug/warning/info message */
+static void nsp32_message (const char *, int, char *, char *, ...);
+#ifdef NSP32_DEBUG
+static void nsp32_dmessage(const char *, int, int, char *, ...);
+#endif
+
+/*
+ * max_sectors is currently limited up to 128.
+ */
+static struct scsi_host_template nsp32_template = {
+ .proc_name = "nsp32",
+ .name = "Workbit NinjaSCSI-32Bi/UDE",
+ .show_info = nsp32_show_info,
+ .info = nsp32_info,
+ .queuecommand = nsp32_queuecommand,
+ .can_queue = 1,
+ .sg_tablesize = NSP32_SG_SIZE,
+ .max_sectors = 128,
+ .cmd_per_lun = 1,
+ .this_id = NSP32_HOST_SCSIID,
+ .use_clustering = DISABLE_CLUSTERING,
+ .eh_abort_handler = nsp32_eh_abort,
+ .eh_bus_reset_handler = nsp32_eh_bus_reset,
+ .eh_host_reset_handler = nsp32_eh_host_reset,
+/* .highmem_io = 1, */
+};
+
+#include "nsp32_io.h"
+
+/***********************************************************************
+ * debug, error print
+ */
+#ifndef NSP32_DEBUG
+# define NSP32_DEBUG_MASK 0x000000
+# define nsp32_msg(type, args...) nsp32_message ("", 0, (type), args)
+# define nsp32_dbg(mask, args...) /* */
+#else
+# define NSP32_DEBUG_MASK 0xffffff
+# define nsp32_msg(type, args...) \
+ nsp32_message (__func__, __LINE__, (type), args)
+# define nsp32_dbg(mask, args...) \
+ nsp32_dmessage(__func__, __LINE__, (mask), args)
+#endif
+
+#define NSP32_DEBUG_QUEUECOMMAND BIT(0)
+#define NSP32_DEBUG_REGISTER BIT(1)
+#define NSP32_DEBUG_AUTOSCSI BIT(2)
+#define NSP32_DEBUG_INTR BIT(3)
+#define NSP32_DEBUG_SGLIST BIT(4)
+#define NSP32_DEBUG_BUSFREE BIT(5)
+#define NSP32_DEBUG_CDB_CONTENTS BIT(6)
+#define NSP32_DEBUG_RESELECTION BIT(7)
+#define NSP32_DEBUG_MSGINOCCUR BIT(8)
+#define NSP32_DEBUG_EEPROM BIT(9)
+#define NSP32_DEBUG_MSGOUTOCCUR BIT(10)
+#define NSP32_DEBUG_BUSRESET BIT(11)
+#define NSP32_DEBUG_RESTART BIT(12)
+#define NSP32_DEBUG_SYNC BIT(13)
+#define NSP32_DEBUG_WAIT BIT(14)
+#define NSP32_DEBUG_TARGETFLAG BIT(15)
+#define NSP32_DEBUG_PROC BIT(16)
+#define NSP32_DEBUG_INIT BIT(17)
+#define NSP32_SPECIAL_PRINT_REGISTER BIT(20)
+
+#define NSP32_DEBUG_BUF_LEN 100
+
+static void nsp32_message(const char *func, int line, char *type, char *fmt, ...)
+{
+ va_list args;
+ char buf[NSP32_DEBUG_BUF_LEN];
+
+ va_start(args, fmt);
+ vsnprintf(buf, sizeof(buf), fmt, args);
+ va_end(args);
+
+#ifndef NSP32_DEBUG
+ printk("%snsp32: %s\n", type, buf);
+#else
+ printk("%snsp32: %s (%d): %s\n", type, func, line, buf);
+#endif
+}
+
+#ifdef NSP32_DEBUG
+static void nsp32_dmessage(const char *func, int line, int mask, char *fmt, ...)
+{
+ va_list args;
+ char buf[NSP32_DEBUG_BUF_LEN];
+
+ va_start(args, fmt);
+ vsnprintf(buf, sizeof(buf), fmt, args);
+ va_end(args);
+
+ if (mask & NSP32_DEBUG_MASK) {
+ printk("nsp32-debug: 0x%x %s (%d): %s\n", mask, func, line, buf);
+ }
+}
+#endif
+
+#ifdef NSP32_DEBUG
+# include "nsp32_debug.c"
+#else
+# define show_command(arg) /* */
+# define show_busphase(arg) /* */
+# define show_autophase(arg) /* */
+#endif
+
+/*
+ * IDENTIFY Message
+ */
+static void nsp32_build_identify(struct scsi_cmnd *SCpnt)
+{
+ nsp32_hw_data *data = (nsp32_hw_data *)SCpnt->device->host->hostdata;
+ int pos = data->msgout_len;
+ int mode = FALSE;
+
+ /* XXX: Auto DiscPriv detection is progressing... */
+ if (disc_priv == 0) {
+ /* mode = TRUE; */
+ }
+
+ data->msgoutbuf[pos] = IDENTIFY(mode, SCpnt->device->lun); pos++;
+
+ data->msgout_len = pos;
+}
+
+/*
+ * SDTR Message Routine
+ */
+static void nsp32_build_sdtr(struct scsi_cmnd *SCpnt,
+ unsigned char period,
+ unsigned char offset)
+{
+ nsp32_hw_data *data = (nsp32_hw_data *)SCpnt->device->host->hostdata;
+ int pos = data->msgout_len;
+
+ data->msgoutbuf[pos] = EXTENDED_MESSAGE; pos++;
+ data->msgoutbuf[pos] = EXTENDED_SDTR_LEN; pos++;
+ data->msgoutbuf[pos] = EXTENDED_SDTR; pos++;
+ data->msgoutbuf[pos] = period; pos++;
+ data->msgoutbuf[pos] = offset; pos++;
+
+ data->msgout_len = pos;
+}
+
+/*
+ * No Operation Message
+ */
+static void nsp32_build_nop(struct scsi_cmnd *SCpnt)
+{
+ nsp32_hw_data *data = (nsp32_hw_data *)SCpnt->device->host->hostdata;
+ int pos = data->msgout_len;
+
+ if (pos != 0) {
+ nsp32_msg(KERN_WARNING,
+ "Some messages are already contained!");
+ return;
+ }
+
+ data->msgoutbuf[pos] = NOP; pos++;
+ data->msgout_len = pos;
+}
+
+/*
+ * Reject Message
+ */
+static void nsp32_build_reject(struct scsi_cmnd *SCpnt)
+{
+ nsp32_hw_data *data = (nsp32_hw_data *)SCpnt->device->host->hostdata;
+ int pos = data->msgout_len;
+
+ data->msgoutbuf[pos] = MESSAGE_REJECT; pos++;
+ data->msgout_len = pos;
+}
+
+/*
+ * timer
+ */
+#if 0
+static void nsp32_start_timer(struct scsi_cmnd *SCpnt, int time)
+{
+ unsigned int base = SCpnt->host->io_port;
+
+ nsp32_dbg(NSP32_DEBUG_INTR, "timer=%d", time);
+
+ if (time & (~TIMER_CNT_MASK)) {
+ nsp32_dbg(NSP32_DEBUG_INTR, "timer set overflow");
+ }
+
+ nsp32_write2(base, TIMER_SET, time & TIMER_CNT_MASK);
+}
+#endif
+
+
+/*
+ * set SCSI command and other parameter to asic, and start selection phase
+ */
+static int nsp32_selection_autopara(struct scsi_cmnd *SCpnt)
+{
+ nsp32_hw_data *data = (nsp32_hw_data *)SCpnt->device->host->hostdata;
+ unsigned int base = SCpnt->device->host->io_port;
+ unsigned int host_id = SCpnt->device->host->this_id;
+ unsigned char target = scmd_id(SCpnt);
+ nsp32_autoparam *param = data->autoparam;
+ unsigned char phase;
+ int i, ret;
+ unsigned int msgout;
+ u16_le s;
+
+ nsp32_dbg(NSP32_DEBUG_AUTOSCSI, "in");
+
+ /*
+ * check bus free
+ */
+ phase = nsp32_read1(base, SCSI_BUS_MONITOR);
+ if (phase != BUSMON_BUS_FREE) {
+ nsp32_msg(KERN_WARNING, "bus busy");
+ show_busphase(phase & BUSMON_PHASE_MASK);
+ SCpnt->result = DID_BUS_BUSY << 16;
+ return FALSE;
+ }
+
+ /*
+ * message out
+ *
+ * Note: If the range of msgout_len is 1 - 3, fill scsi_msgout.
+ * over 3 messages needs another routine.
+ */
+ if (data->msgout_len == 0) {
+ nsp32_msg(KERN_ERR, "SCSI MsgOut without any message!");
+ SCpnt->result = DID_ERROR << 16;
+ return FALSE;
+ } else if (data->msgout_len > 0 && data->msgout_len <= 3) {
+ msgout = 0;
+ for (i = 0; i < data->msgout_len; i++) {
+ /*
+ * the sending order of the message is:
+ * MCNT 3: MSG#0 -> MSG#1 -> MSG#2
+ * MCNT 2: MSG#1 -> MSG#2
+ * MCNT 1: MSG#2
+ */
+ msgout >>= 8;
+ msgout |= ((unsigned int)(data->msgoutbuf[i]) << 24);
+ }
+ msgout |= MV_VALID; /* MV valid */
+ msgout |= (unsigned int)data->msgout_len; /* len */
+ } else {
+ /* data->msgout_len > 3 */
+ msgout = 0;
+ }
+
+ // nsp_dbg(NSP32_DEBUG_AUTOSCSI, "sel time out=0x%x\n", nsp32_read2(base, SEL_TIME_OUT));
+ // nsp32_write2(base, SEL_TIME_OUT, SEL_TIMEOUT_TIME);
+
+ /*
+ * setup asic parameter
+ */
+ memset(param, 0, sizeof(nsp32_autoparam));
+
+ /* cdb */
+ for (i = 0; i < SCpnt->cmd_len; i++) {
+ param->cdb[4 * i] = SCpnt->cmnd[i];
+ }
+
+ /* outgoing messages */
+ param->msgout = cpu_to_le32(msgout);
+
+ /* syncreg, ackwidth, target id, SREQ sampling rate */
+ param->syncreg = data->cur_target->syncreg;
+ param->ackwidth = data->cur_target->ackwidth;
+ param->target_id = BIT(host_id) | BIT(target);
+ param->sample_reg = data->cur_target->sample_reg;
+
+ // nsp32_dbg(NSP32_DEBUG_AUTOSCSI, "sample rate=0x%x\n", data->cur_target->sample_reg);
+
+ /* command control */
+ param->command_control = cpu_to_le16(CLEAR_CDB_FIFO_POINTER |
+ AUTOSCSI_START |
+ AUTO_MSGIN_00_OR_04 |
+ AUTO_MSGIN_02 |
+ AUTO_ATN );
+
+
+ /* transfer control */
+ s = 0;
+ switch (data->trans_method) {
+ case NSP32_TRANSFER_BUSMASTER:
+ s |= BM_START;
+ break;
+ case NSP32_TRANSFER_MMIO:
+ s |= CB_MMIO_MODE;
+ break;
+ case NSP32_TRANSFER_PIO:
+ s |= CB_IO_MODE;
+ break;
+ default:
+ nsp32_msg(KERN_ERR, "unknown trans_method");
+ break;
+ }
+ /*
+ * OR-ed BLIEND_MODE, FIFO intr is decreased, instead of PCI bus waits.
+ * For bus master transfer, it's taken off.
+ */
+ s |= (TRANSFER_GO | ALL_COUNTER_CLR);
+ param->transfer_control = cpu_to_le16(s);
+
+ /* sg table addr */
+ param->sgt_pointer = cpu_to_le32(data->cur_lunt->sglun_paddr);
+
+ /*
+ * transfer parameter to ASIC
+ */
+ nsp32_write4(base, SGT_ADR, data->auto_paddr);
+ nsp32_write2(base, COMMAND_CONTROL, CLEAR_CDB_FIFO_POINTER |
+ AUTO_PARAMETER );
+
+ /*
+ * Check arbitration
+ */
+ ret = nsp32_arbitration(SCpnt, base);
+
+ return ret;
+}
+
+
+/*
+ * Selection with AUTO SCSI (without AUTO PARAMETER)
+ */
+static int nsp32_selection_autoscsi(struct scsi_cmnd *SCpnt)
+{
+ nsp32_hw_data *data = (nsp32_hw_data *)SCpnt->device->host->hostdata;
+ unsigned int base = SCpnt->device->host->io_port;
+ unsigned int host_id = SCpnt->device->host->this_id;
+ unsigned char target = scmd_id(SCpnt);
+ unsigned char phase;
+ int status;
+ unsigned short command = 0;
+ unsigned int msgout = 0;
+ unsigned short execph;
+ int i;
+
+ nsp32_dbg(NSP32_DEBUG_AUTOSCSI, "in");
+
+ /*
+ * IRQ disable
+ */
+ nsp32_write2(base, IRQ_CONTROL, IRQ_CONTROL_ALL_IRQ_MASK);
+
+ /*
+ * check bus line
+ */
+ phase = nsp32_read1(base, SCSI_BUS_MONITOR);
+ if(((phase & BUSMON_BSY) == 1) || (phase & BUSMON_SEL) == 1) {
+ nsp32_msg(KERN_WARNING, "bus busy");
+ SCpnt->result = DID_BUS_BUSY << 16;
+ status = 1;
+ goto out;
+ }
+
+ /*
+ * clear execph
+ */
+ execph = nsp32_read2(base, SCSI_EXECUTE_PHASE);
+
+ /*
+ * clear FIFO counter to set CDBs
+ */
+ nsp32_write2(base, COMMAND_CONTROL, CLEAR_CDB_FIFO_POINTER);
+
+ /*
+ * set CDB0 - CDB15
+ */
+ for (i = 0; i < SCpnt->cmd_len; i++) {
+ nsp32_write1(base, COMMAND_DATA, SCpnt->cmnd[i]);
+ }
+ nsp32_dbg(NSP32_DEBUG_CDB_CONTENTS, "CDB[0]=[0x%x]", SCpnt->cmnd[0]);
+
+ /*
+ * set SCSIOUT LATCH(initiator)/TARGET(target) (OR-ed) ID
+ */
+ nsp32_write1(base, SCSI_OUT_LATCH_TARGET_ID, BIT(host_id) | BIT(target));
+
+ /*
+ * set SCSI MSGOUT REG
+ *
+ * Note: If the range of msgout_len is 1 - 3, fill scsi_msgout.
+ * over 3 messages needs another routine.
+ */
+ if (data->msgout_len == 0) {
+ nsp32_msg(KERN_ERR, "SCSI MsgOut without any message!");
+ SCpnt->result = DID_ERROR << 16;
+ status = 1;
+ goto out;
+ } else if (data->msgout_len > 0 && data->msgout_len <= 3) {
+ msgout = 0;
+ for (i = 0; i < data->msgout_len; i++) {
+ /*
+ * the sending order of the message is:
+ * MCNT 3: MSG#0 -> MSG#1 -> MSG#2
+ * MCNT 2: MSG#1 -> MSG#2
+ * MCNT 1: MSG#2
+ */
+ msgout >>= 8;
+ msgout |= ((unsigned int)(data->msgoutbuf[i]) << 24);
+ }
+ msgout |= MV_VALID; /* MV valid */
+ msgout |= (unsigned int)data->msgout_len; /* len */
+ nsp32_write4(base, SCSI_MSG_OUT, msgout);
+ } else {
+ /* data->msgout_len > 3 */
+ nsp32_write4(base, SCSI_MSG_OUT, 0);
+ }
+
+ /*
+ * set selection timeout(= 250ms)
+ */
+ nsp32_write2(base, SEL_TIME_OUT, SEL_TIMEOUT_TIME);
+
+ /*
+ * set SREQ hazard killer sampling rate
+ *
+ * TODO: sample_rate (BASE+0F) is 0 when internal clock = 40MHz.
+ * check other internal clock!
+ */
+ nsp32_write1(base, SREQ_SMPL_RATE, data->cur_target->sample_reg);
+
+ /*
+ * clear Arbit
+ */
+ nsp32_write1(base, SET_ARBIT, ARBIT_CLEAR);
+
+ /*
+ * set SYNCREG
+ * Don't set BM_START_ADR before setting this register.
+ */
+ nsp32_write1(base, SYNC_REG, data->cur_target->syncreg);
+
+ /*
+ * set ACKWIDTH
+ */
+ nsp32_write1(base, ACK_WIDTH, data->cur_target->ackwidth);
+
+ nsp32_dbg(NSP32_DEBUG_AUTOSCSI,
+ "syncreg=0x%x, ackwidth=0x%x, sgtpaddr=0x%x, id=0x%x",
+ nsp32_read1(base, SYNC_REG), nsp32_read1(base, ACK_WIDTH),
+ nsp32_read4(base, SGT_ADR), nsp32_read1(base, SCSI_OUT_LATCH_TARGET_ID));
+ nsp32_dbg(NSP32_DEBUG_AUTOSCSI, "msgout_len=%d, msgout=0x%x",
+ data->msgout_len, msgout);
+
+ /*
+ * set SGT ADDR (physical address)
+ */
+ nsp32_write4(base, SGT_ADR, data->cur_lunt->sglun_paddr);
+
+ /*
+ * set TRANSFER CONTROL REG
+ */
+ command = 0;
+ command |= (TRANSFER_GO | ALL_COUNTER_CLR);
+ if (data->trans_method & NSP32_TRANSFER_BUSMASTER) {
+ if (scsi_bufflen(SCpnt) > 0) {
+ command |= BM_START;
+ }
+ } else if (data->trans_method & NSP32_TRANSFER_MMIO) {
+ command |= CB_MMIO_MODE;
+ } else if (data->trans_method & NSP32_TRANSFER_PIO) {
+ command |= CB_IO_MODE;
+ }
+ nsp32_write2(base, TRANSFER_CONTROL, command);
+
+ /*
+ * start AUTO SCSI, kick off arbitration
+ */
+ command = (CLEAR_CDB_FIFO_POINTER |
+ AUTOSCSI_START |
+ AUTO_MSGIN_00_OR_04 |
+ AUTO_MSGIN_02 |
+ AUTO_ATN );
+ nsp32_write2(base, COMMAND_CONTROL, command);
+
+ /*
+ * Check arbitration
+ */
+ status = nsp32_arbitration(SCpnt, base);
+
+ out:
+ /*
+ * IRQ enable
+ */
+ nsp32_write2(base, IRQ_CONTROL, 0);
+
+ return status;
+}
+
+
+/*
+ * Arbitration Status Check
+ *
+ * Note: Arbitration counter is waited during ARBIT_GO is not lifting.
+ * Using udelay(1) consumes CPU time and system time, but
+ * arbitration delay time is defined minimal 2.4us in SCSI
+ * specification, thus udelay works as coarse grained wait timer.
+ */
+static int nsp32_arbitration(struct scsi_cmnd *SCpnt, unsigned int base)
+{
+ unsigned char arbit;
+ int status = TRUE;
+ int time = 0;
+
+ do {
+ arbit = nsp32_read1(base, ARBIT_STATUS);
+ time++;
+ } while ((arbit & (ARBIT_WIN | ARBIT_FAIL)) == 0 &&
+ (time <= ARBIT_TIMEOUT_TIME));
+
+ nsp32_dbg(NSP32_DEBUG_AUTOSCSI,
+ "arbit: 0x%x, delay time: %d", arbit, time);
+
+ if (arbit & ARBIT_WIN) {
+ /* Arbitration succeeded */
+ SCpnt->result = DID_OK << 16;
+ nsp32_index_write1(base, EXT_PORT, LED_ON); /* PCI LED on */
+ } else if (arbit & ARBIT_FAIL) {
+ /* Arbitration failed */
+ SCpnt->result = DID_BUS_BUSY << 16;
+ status = FALSE;
+ } else {
+ /*
+ * unknown error or ARBIT_GO timeout,
+ * something lock up! guess no connection.
+ */
+ nsp32_dbg(NSP32_DEBUG_AUTOSCSI, "arbit timeout");
+ SCpnt->result = DID_NO_CONNECT << 16;
+ status = FALSE;
+ }
+
+ /*
+ * clear Arbit
+ */
+ nsp32_write1(base, SET_ARBIT, ARBIT_CLEAR);
+
+ return status;
+}
+
+
+/*
+ * reselection
+ *
+ * Note: This reselection routine is called from msgin_occur,
+ * reselection target id&lun must be already set.
+ * SCSI-2 says IDENTIFY implies RESTORE_POINTER operation.
+ */
+static int nsp32_reselection(struct scsi_cmnd *SCpnt, unsigned char newlun)
+{
+ nsp32_hw_data *data = (nsp32_hw_data *)SCpnt->device->host->hostdata;
+ unsigned int host_id = SCpnt->device->host->this_id;
+ unsigned int base = SCpnt->device->host->io_port;
+ unsigned char tmpid, newid;
+
+ nsp32_dbg(NSP32_DEBUG_RESELECTION, "enter");
+
+ /*
+ * calculate reselected SCSI ID
+ */
+ tmpid = nsp32_read1(base, RESELECT_ID);
+ tmpid &= (~BIT(host_id));
+ newid = 0;
+ while (tmpid) {
+ if (tmpid & 1) {
+ break;
+ }
+ tmpid >>= 1;
+ newid++;
+ }
+
+ /*
+ * If reselected New ID:LUN is not existed
+ * or current nexus is not existed, unexpected
+ * reselection is occurred. Send reject message.
+ */
+ if (newid >= ARRAY_SIZE(data->lunt) || newlun >= ARRAY_SIZE(data->lunt[0])) {
+ nsp32_msg(KERN_WARNING, "unknown id/lun");
+ return FALSE;
+ } else if(data->lunt[newid][newlun].SCpnt == NULL) {
+ nsp32_msg(KERN_WARNING, "no SCSI command is processing");
+ return FALSE;
+ }
+
+ data->cur_id = newid;
+ data->cur_lun = newlun;
+ data->cur_target = &(data->target[newid]);
+ data->cur_lunt = &(data->lunt[newid][newlun]);
+
+ /* reset SACK/SavedACK counter (or ALL clear?) */
+ nsp32_write4(base, CLR_COUNTER, CLRCOUNTER_ALLMASK);
+
+ return TRUE;
+}
+
+
+/*
+ * nsp32_setup_sg_table - build scatter gather list for transfer data
+ * with bus master.
+ *
+ * Note: NinjaSCSI-32Bi/UDE bus master can not transfer over 64KB at a time.
+ */
+static int nsp32_setup_sg_table(struct scsi_cmnd *SCpnt)
+{
+ nsp32_hw_data *data = (nsp32_hw_data *)SCpnt->device->host->hostdata;
+ struct scatterlist *sg;
+ nsp32_sgtable *sgt = data->cur_lunt->sglun->sgt;
+ int num, i;
+ u32_le l;
+
+ if (sgt == NULL) {
+ nsp32_dbg(NSP32_DEBUG_SGLIST, "SGT == null");
+ return FALSE;
+ }
+
+ num = scsi_dma_map(SCpnt);
+ if (!num)
+ return TRUE;
+ else if (num < 0)
+ return FALSE;
+ else {
+ scsi_for_each_sg(SCpnt, sg, num, i) {
+ /*
+ * Build nsp32_sglist, substitute sg dma addresses.
+ */
+ sgt[i].addr = cpu_to_le32(sg_dma_address(sg));
+ sgt[i].len = cpu_to_le32(sg_dma_len(sg));
+
+ if (le32_to_cpu(sgt[i].len) > 0x10000) {
+ nsp32_msg(KERN_ERR,
+ "can't transfer over 64KB at a time, size=0x%lx", le32_to_cpu(sgt[i].len));
+ return FALSE;
+ }
+ nsp32_dbg(NSP32_DEBUG_SGLIST,
+ "num 0x%x : addr 0x%lx len 0x%lx",
+ i,
+ le32_to_cpu(sgt[i].addr),
+ le32_to_cpu(sgt[i].len ));
+ }
+
+ /* set end mark */
+ l = le32_to_cpu(sgt[num-1].len);
+ sgt[num-1].len = cpu_to_le32(l | SGTEND);
+ }
+
+ return TRUE;
+}
+
+static int nsp32_queuecommand_lck(struct scsi_cmnd *SCpnt, void (*done)(struct scsi_cmnd *))
+{
+ nsp32_hw_data *data = (nsp32_hw_data *)SCpnt->device->host->hostdata;
+ nsp32_target *target;
+ nsp32_lunt *cur_lunt;
+ int ret;
+
+ nsp32_dbg(NSP32_DEBUG_QUEUECOMMAND,
+ "enter. target: 0x%x LUN: 0x%llx cmnd: 0x%x cmndlen: 0x%x "
+ "use_sg: 0x%x reqbuf: 0x%lx reqlen: 0x%x",
+ SCpnt->device->id, SCpnt->device->lun, SCpnt->cmnd[0], SCpnt->cmd_len,
+ scsi_sg_count(SCpnt), scsi_sglist(SCpnt), scsi_bufflen(SCpnt));
+
+ if (data->CurrentSC != NULL) {
+ nsp32_msg(KERN_ERR, "Currentsc != NULL. Cancel this command request");
+ data->CurrentSC = NULL;
+ SCpnt->result = DID_NO_CONNECT << 16;
+ done(SCpnt);
+ return 0;
+ }
+
+ /* check target ID is not same as this initiator ID */
+ if (scmd_id(SCpnt) == SCpnt->device->host->this_id) {
+ nsp32_dbg(NSP32_DEBUG_QUEUECOMMAND, "target==host???");
+ SCpnt->result = DID_BAD_TARGET << 16;
+ done(SCpnt);
+ return 0;
+ }
+
+ /* check target LUN is allowable value */
+ if (SCpnt->device->lun >= MAX_LUN) {
+ nsp32_dbg(NSP32_DEBUG_QUEUECOMMAND, "no more lun");
+ SCpnt->result = DID_BAD_TARGET << 16;
+ done(SCpnt);
+ return 0;
+ }
+
+ show_command(SCpnt);
+
+ SCpnt->scsi_done = done;
+ data->CurrentSC = SCpnt;
+ SCpnt->SCp.Status = CHECK_CONDITION;
+ SCpnt->SCp.Message = 0;
+ scsi_set_resid(SCpnt, scsi_bufflen(SCpnt));
+
+ SCpnt->SCp.ptr = (char *)scsi_sglist(SCpnt);
+ SCpnt->SCp.this_residual = scsi_bufflen(SCpnt);
+ SCpnt->SCp.buffer = NULL;
+ SCpnt->SCp.buffers_residual = 0;
+
+ /* initialize data */
+ data->msgout_len = 0;
+ data->msgin_len = 0;
+ cur_lunt = &(data->lunt[SCpnt->device->id][SCpnt->device->lun]);
+ cur_lunt->SCpnt = SCpnt;
+ cur_lunt->save_datp = 0;
+ cur_lunt->msgin03 = FALSE;
+ data->cur_lunt = cur_lunt;
+ data->cur_id = SCpnt->device->id;
+ data->cur_lun = SCpnt->device->lun;
+
+ ret = nsp32_setup_sg_table(SCpnt);
+ if (ret == FALSE) {
+ nsp32_msg(KERN_ERR, "SGT fail");
+ SCpnt->result = DID_ERROR << 16;
+ nsp32_scsi_done(SCpnt);
+ return 0;
+ }
+
+ /* Build IDENTIFY */
+ nsp32_build_identify(SCpnt);
+
+ /*
+ * If target is the first time to transfer after the reset
+ * (target don't have SDTR_DONE and SDTR_INITIATOR), sync
+ * message SDTR is needed to do synchronous transfer.
+ */
+ target = &data->target[scmd_id(SCpnt)];
+ data->cur_target = target;
+
+ if (!(target->sync_flag & (SDTR_DONE | SDTR_INITIATOR | SDTR_TARGET))) {
+ unsigned char period, offset;
+
+ if (trans_mode != ASYNC_MODE) {
+ nsp32_set_max_sync(data, target, &period, &offset);
+ nsp32_build_sdtr(SCpnt, period, offset);
+ target->sync_flag |= SDTR_INITIATOR;
+ } else {
+ nsp32_set_async(data, target);
+ target->sync_flag |= SDTR_DONE;
+ }
+
+ nsp32_dbg(NSP32_DEBUG_QUEUECOMMAND,
+ "SDTR: entry: %d start_period: 0x%x offset: 0x%x\n",
+ target->limit_entry, period, offset);
+ } else if (target->sync_flag & SDTR_INITIATOR) {
+ /*
+ * It was negotiating SDTR with target, sending from the
+ * initiator, but there are no chance to remove this flag.
+ * Set async because we don't get proper negotiation.
+ */
+ nsp32_set_async(data, target);
+ target->sync_flag &= ~SDTR_INITIATOR;
+ target->sync_flag |= SDTR_DONE;
+
+ nsp32_dbg(NSP32_DEBUG_QUEUECOMMAND,
+ "SDTR_INITIATOR: fall back to async");
+ } else if (target->sync_flag & SDTR_TARGET) {
+ /*
+ * It was negotiating SDTR with target, sending from target,
+ * but there are no chance to remove this flag. Set async
+ * because we don't get proper negotiation.
+ */
+ nsp32_set_async(data, target);
+ target->sync_flag &= ~SDTR_TARGET;
+ target->sync_flag |= SDTR_DONE;
+
+ nsp32_dbg(NSP32_DEBUG_QUEUECOMMAND,
+ "Unknown SDTR from target is reached, fall back to async.");
+ }
+
+ nsp32_dbg(NSP32_DEBUG_TARGETFLAG,
+ "target: %d sync_flag: 0x%x syncreg: 0x%x ackwidth: 0x%x",
+ SCpnt->device->id, target->sync_flag, target->syncreg,
+ target->ackwidth);
+
+ /* Selection */
+ if (auto_param == 0) {
+ ret = nsp32_selection_autopara(SCpnt);
+ } else {
+ ret = nsp32_selection_autoscsi(SCpnt);
+ }
+
+ if (ret != TRUE) {
+ nsp32_dbg(NSP32_DEBUG_QUEUECOMMAND, "selection fail");
+ nsp32_scsi_done(SCpnt);
+ }
+
+ return 0;
+}
+
+static DEF_SCSI_QCMD(nsp32_queuecommand)
+
+/* initialize asic */
+static int nsp32hw_init(nsp32_hw_data *data)
+{
+ unsigned int base = data->BaseAddress;
+ unsigned short irq_stat;
+ unsigned long lc_reg;
+ unsigned char power;
+
+ lc_reg = nsp32_index_read4(base, CFG_LATE_CACHE);
+ if ((lc_reg & 0xff00) == 0) {
+ lc_reg |= (0x20 << 8);
+ nsp32_index_write2(base, CFG_LATE_CACHE, lc_reg & 0xffff);
+ }
+
+ nsp32_write2(base, IRQ_CONTROL, IRQ_CONTROL_ALL_IRQ_MASK);
+ nsp32_write2(base, TRANSFER_CONTROL, 0);
+ nsp32_write4(base, BM_CNT, 0);
+ nsp32_write2(base, SCSI_EXECUTE_PHASE, 0);
+
+ do {
+ irq_stat = nsp32_read2(base, IRQ_STATUS);
+ nsp32_dbg(NSP32_DEBUG_INIT, "irq_stat 0x%x", irq_stat);
+ } while (irq_stat & IRQSTATUS_ANY_IRQ);
+
+ /*
+ * Fill FIFO_FULL_SHLD, FIFO_EMPTY_SHLD. Below parameter is
+ * designated by specification.
+ */
+ if ((data->trans_method & NSP32_TRANSFER_PIO) ||
+ (data->trans_method & NSP32_TRANSFER_MMIO)) {
+ nsp32_index_write1(base, FIFO_FULL_SHLD_COUNT, 0x40);
+ nsp32_index_write1(base, FIFO_EMPTY_SHLD_COUNT, 0x40);
+ } else if (data->trans_method & NSP32_TRANSFER_BUSMASTER) {
+ nsp32_index_write1(base, FIFO_FULL_SHLD_COUNT, 0x10);
+ nsp32_index_write1(base, FIFO_EMPTY_SHLD_COUNT, 0x60);
+ } else {
+ nsp32_dbg(NSP32_DEBUG_INIT, "unknown transfer mode");
+ }
+
+ nsp32_dbg(NSP32_DEBUG_INIT, "full 0x%x emp 0x%x",
+ nsp32_index_read1(base, FIFO_FULL_SHLD_COUNT),
+ nsp32_index_read1(base, FIFO_EMPTY_SHLD_COUNT));
+
+ nsp32_index_write1(base, CLOCK_DIV, data->clock);
+ nsp32_index_write1(base, BM_CYCLE, MEMRD_CMD1 | SGT_AUTO_PARA_MEMED_CMD);
+ nsp32_write1(base, PARITY_CONTROL, 0); /* parity check is disable */
+
+ /*
+ * initialize MISC_WRRD register
+ *
+ * Note: Designated parameters is obeyed as following:
+ * MISC_SCSI_DIRECTION_DETECTOR_SELECT: It must be set.
+ * MISC_MASTER_TERMINATION_SELECT: It must be set.
+ * MISC_BMREQ_NEGATE_TIMING_SEL: It should be set.
+ * MISC_AUTOSEL_TIMING_SEL: It should be set.
+ * MISC_BMSTOP_CHANGE2_NONDATA_PHASE: It should be set.
+ * MISC_DELAYED_BMSTART: It's selected for safety.
+ *
+ * Note: If MISC_BMSTOP_CHANGE2_NONDATA_PHASE is set, then
+ * we have to set TRANSFERCONTROL_BM_START as 0 and set
+ * appropriate value before restarting bus master transfer.
+ */
+ nsp32_index_write2(base, MISC_WR,
+ (SCSI_DIRECTION_DETECTOR_SELECT |
+ DELAYED_BMSTART |
+ MASTER_TERMINATION_SELECT |
+ BMREQ_NEGATE_TIMING_SEL |
+ AUTOSEL_TIMING_SEL |
+ BMSTOP_CHANGE2_NONDATA_PHASE));
+
+ nsp32_index_write1(base, TERM_PWR_CONTROL, 0);
+ power = nsp32_index_read1(base, TERM_PWR_CONTROL);
+ if (!(power & SENSE)) {
+ nsp32_msg(KERN_INFO, "term power on");
+ nsp32_index_write1(base, TERM_PWR_CONTROL, BPWR);
+ }
+
+ nsp32_write2(base, TIMER_SET, TIMER_STOP);
+ nsp32_write2(base, TIMER_SET, TIMER_STOP); /* Required 2 times */
+
+ nsp32_write1(base, SYNC_REG, 0);
+ nsp32_write1(base, ACK_WIDTH, 0);
+ nsp32_write2(base, SEL_TIME_OUT, SEL_TIMEOUT_TIME);
+
+ /*
+ * enable to select designated IRQ (except for
+ * IRQSELECT_SERR, IRQSELECT_PERR, IRQSELECT_BMCNTERR)
+ */
+ nsp32_index_write2(base, IRQ_SELECT, IRQSELECT_TIMER_IRQ |
+ IRQSELECT_SCSIRESET_IRQ |
+ IRQSELECT_FIFO_SHLD_IRQ |
+ IRQSELECT_RESELECT_IRQ |
+ IRQSELECT_PHASE_CHANGE_IRQ |
+ IRQSELECT_AUTO_SCSI_SEQ_IRQ |
+ // IRQSELECT_BMCNTERR_IRQ |
+ IRQSELECT_TARGET_ABORT_IRQ |
+ IRQSELECT_MASTER_ABORT_IRQ );
+ nsp32_write2(base, IRQ_CONTROL, 0);
+
+ /* PCI LED off */
+ nsp32_index_write1(base, EXT_PORT_DDR, LED_OFF);
+ nsp32_index_write1(base, EXT_PORT, LED_OFF);
+
+ return TRUE;
+}
+
+
+/* interrupt routine */
+static irqreturn_t do_nsp32_isr(int irq, void *dev_id)
+{
+ nsp32_hw_data *data = dev_id;
+ unsigned int base = data->BaseAddress;
+ struct scsi_cmnd *SCpnt = data->CurrentSC;
+ unsigned short auto_stat, irq_stat, trans_stat;
+ unsigned char busmon, busphase;
+ unsigned long flags;
+ int ret;
+ int handled = 0;
+ struct Scsi_Host *host = data->Host;
+
+ spin_lock_irqsave(host->host_lock, flags);
+
+ /*
+ * IRQ check, then enable IRQ mask
+ */
+ irq_stat = nsp32_read2(base, IRQ_STATUS);
+ nsp32_dbg(NSP32_DEBUG_INTR,
+ "enter IRQ: %d, IRQstatus: 0x%x", irq, irq_stat);
+ /* is this interrupt comes from Ninja asic? */
+ if ((irq_stat & IRQSTATUS_ANY_IRQ) == 0) {
+ nsp32_dbg(NSP32_DEBUG_INTR, "shared interrupt: irq other 0x%x", irq_stat);
+ goto out2;
+ }
+ handled = 1;
+ nsp32_write2(base, IRQ_CONTROL, IRQ_CONTROL_ALL_IRQ_MASK);
+
+ busmon = nsp32_read1(base, SCSI_BUS_MONITOR);
+ busphase = busmon & BUSMON_PHASE_MASK;
+
+ trans_stat = nsp32_read2(base, TRANSFER_STATUS);
+ if ((irq_stat == 0xffff) && (trans_stat == 0xffff)) {
+ nsp32_msg(KERN_INFO, "card disconnect");
+ if (data->CurrentSC != NULL) {
+ nsp32_msg(KERN_INFO, "clean up current SCSI command");
+ SCpnt->result = DID_BAD_TARGET << 16;
+ nsp32_scsi_done(SCpnt);
+ }
+ goto out;
+ }
+
+ /* Timer IRQ */
+ if (irq_stat & IRQSTATUS_TIMER_IRQ) {
+ nsp32_dbg(NSP32_DEBUG_INTR, "timer stop");
+ nsp32_write2(base, TIMER_SET, TIMER_STOP);
+ goto out;
+ }
+
+ /* SCSI reset */
+ if (irq_stat & IRQSTATUS_SCSIRESET_IRQ) {
+ nsp32_msg(KERN_INFO, "detected someone do bus reset");
+ nsp32_do_bus_reset(data);
+ if (SCpnt != NULL) {
+ SCpnt->result = DID_RESET << 16;
+ nsp32_scsi_done(SCpnt);
+ }
+ goto out;
+ }
+
+ if (SCpnt == NULL) {
+ nsp32_msg(KERN_WARNING, "SCpnt==NULL this can't be happened");
+ nsp32_msg(KERN_WARNING, "irq_stat=0x%x trans_stat=0x%x", irq_stat, trans_stat);
+ goto out;
+ }
+
+ /*
+ * AutoSCSI Interrupt.
+ * Note: This interrupt is occurred when AutoSCSI is finished. Then
+ * check SCSIEXECUTEPHASE, and do appropriate action. Each phases are
+ * recorded when AutoSCSI sequencer has been processed.
+ */
+ if(irq_stat & IRQSTATUS_AUTOSCSI_IRQ) {
+ /* getting SCSI executed phase */
+ auto_stat = nsp32_read2(base, SCSI_EXECUTE_PHASE);
+ nsp32_write2(base, SCSI_EXECUTE_PHASE, 0);
+
+ /* Selection Timeout, go busfree phase. */
+ if (auto_stat & SELECTION_TIMEOUT) {
+ nsp32_dbg(NSP32_DEBUG_INTR,
+ "selection timeout occurred");
+
+ SCpnt->result = DID_TIME_OUT << 16;
+ nsp32_scsi_done(SCpnt);
+ goto out;
+ }
+
+ if (auto_stat & MSGOUT_PHASE) {
+ /*
+ * MsgOut phase was processed.
+ * If MSG_IN_OCCUER is not set, then MsgOut phase is
+ * completed. Thus, msgout_len must reset. Otherwise,
+ * nothing to do here. If MSG_OUT_OCCUER is occurred,
+ * then we will encounter the condition and check.
+ */
+ if (!(auto_stat & MSG_IN_OCCUER) &&
+ (data->msgout_len <= 3)) {
+ /*
+ * !MSG_IN_OCCUER && msgout_len <=3
+ * ---> AutoSCSI with MSGOUTreg is processed.
+ */
+ data->msgout_len = 0;
+ };
+
+ nsp32_dbg(NSP32_DEBUG_INTR, "MsgOut phase processed");
+ }
+
+ if ((auto_stat & DATA_IN_PHASE) &&
+ (scsi_get_resid(SCpnt) > 0) &&
+ ((nsp32_read2(base, FIFO_REST_CNT) & FIFO_REST_MASK) != 0)) {
+ printk( "auto+fifo\n");
+ //nsp32_pio_read(SCpnt);
+ }
+
+ if (auto_stat & (DATA_IN_PHASE | DATA_OUT_PHASE)) {
+ /* DATA_IN_PHASE/DATA_OUT_PHASE was processed. */
+ nsp32_dbg(NSP32_DEBUG_INTR,
+ "Data in/out phase processed");
+
+ /* read BMCNT, SGT pointer addr */
+ nsp32_dbg(NSP32_DEBUG_INTR, "BMCNT=0x%lx",
+ nsp32_read4(base, BM_CNT));
+ nsp32_dbg(NSP32_DEBUG_INTR, "addr=0x%lx",
+ nsp32_read4(base, SGT_ADR));
+ nsp32_dbg(NSP32_DEBUG_INTR, "SACK=0x%lx",
+ nsp32_read4(base, SACK_CNT));
+ nsp32_dbg(NSP32_DEBUG_INTR, "SSACK=0x%lx",
+ nsp32_read4(base, SAVED_SACK_CNT));
+
+ scsi_set_resid(SCpnt, 0); /* all data transferred! */
+ }
+
+ /*
+ * MsgIn Occur
+ */
+ if (auto_stat & MSG_IN_OCCUER) {
+ nsp32_msgin_occur(SCpnt, irq_stat, auto_stat);
+ }
+
+ /*
+ * MsgOut Occur
+ */
+ if (auto_stat & MSG_OUT_OCCUER) {
+ nsp32_msgout_occur(SCpnt);
+ }
+
+ /*
+ * Bus Free Occur
+ */
+ if (auto_stat & BUS_FREE_OCCUER) {
+ ret = nsp32_busfree_occur(SCpnt, auto_stat);
+ if (ret == TRUE) {
+ goto out;
+ }
+ }
+
+ if (auto_stat & STATUS_PHASE) {
+ /*
+ * Read CSB and substitute CSB for SCpnt->result
+ * to save status phase stutas byte.
+ * scsi error handler checks host_byte (DID_*:
+ * low level driver to indicate status), then checks
+ * status_byte (SCSI status byte).
+ */
+ SCpnt->result = (int)nsp32_read1(base, SCSI_CSB_IN);
+ }
+
+ if (auto_stat & ILLEGAL_PHASE) {
+ /* Illegal phase is detected. SACK is not back. */
+ nsp32_msg(KERN_WARNING,
+ "AUTO SCSI ILLEGAL PHASE OCCUR!!!!");
+
+ /* TODO: currently we don't have any action... bus reset? */
+
+ /*
+ * To send back SACK, assert, wait, and negate.
+ */
+ nsp32_sack_assert(data);
+ nsp32_wait_req(data, NEGATE);
+ nsp32_sack_negate(data);
+
+ }
+
+ if (auto_stat & COMMAND_PHASE) {
+ /* nothing to do */
+ nsp32_dbg(NSP32_DEBUG_INTR, "Command phase processed");
+ }
+
+ if (auto_stat & AUTOSCSI_BUSY) {
+ /* AutoSCSI is running */
+ }
+
+ show_autophase(auto_stat);
+ }
+
+ /* FIFO_SHLD_IRQ */
+ if (irq_stat & IRQSTATUS_FIFO_SHLD_IRQ) {
+ nsp32_dbg(NSP32_DEBUG_INTR, "FIFO IRQ");
+
+ switch(busphase) {
+ case BUSPHASE_DATA_OUT:
+ nsp32_dbg(NSP32_DEBUG_INTR, "fifo/write");
+
+ //nsp32_pio_write(SCpnt);
+
+ break;
+
+ case BUSPHASE_DATA_IN:
+ nsp32_dbg(NSP32_DEBUG_INTR, "fifo/read");
+
+ //nsp32_pio_read(SCpnt);
+
+ break;
+
+ case BUSPHASE_STATUS:
+ nsp32_dbg(NSP32_DEBUG_INTR, "fifo/status");
+
+ SCpnt->SCp.Status = nsp32_read1(base, SCSI_CSB_IN);
+
+ break;
+ default:
+ nsp32_dbg(NSP32_DEBUG_INTR, "fifo/other phase");
+ nsp32_dbg(NSP32_DEBUG_INTR, "irq_stat=0x%x trans_stat=0x%x", irq_stat, trans_stat);
+ show_busphase(busphase);
+ break;
+ }
+
+ goto out;
+ }
+
+ /* Phase Change IRQ */
+ if (irq_stat & IRQSTATUS_PHASE_CHANGE_IRQ) {
+ nsp32_dbg(NSP32_DEBUG_INTR, "phase change IRQ");
+
+ switch(busphase) {
+ case BUSPHASE_MESSAGE_IN:
+ nsp32_dbg(NSP32_DEBUG_INTR, "phase chg/msg in");
+ nsp32_msgin_occur(SCpnt, irq_stat, 0);
+ break;
+ default:
+ nsp32_msg(KERN_WARNING, "phase chg/other phase?");
+ nsp32_msg(KERN_WARNING, "irq_stat=0x%x trans_stat=0x%x\n",
+ irq_stat, trans_stat);
+ show_busphase(busphase);
+ break;
+ }
+ goto out;
+ }
+
+ /* PCI_IRQ */
+ if (irq_stat & IRQSTATUS_PCI_IRQ) {
+ nsp32_dbg(NSP32_DEBUG_INTR, "PCI IRQ occurred");
+ /* Do nothing */
+ }
+
+ /* BMCNTERR_IRQ */
+ if (irq_stat & IRQSTATUS_BMCNTERR_IRQ) {
+ nsp32_msg(KERN_ERR, "Received unexpected BMCNTERR IRQ! ");
+ /*
+ * TODO: To be implemented improving bus master
+ * transfer reliability when BMCNTERR is occurred in
+ * AutoSCSI phase described in specification.
+ */
+ }
+
+#if 0
+ nsp32_dbg(NSP32_DEBUG_INTR,
+ "irq_stat=0x%x trans_stat=0x%x", irq_stat, trans_stat);
+ show_busphase(busphase);
+#endif
+
+ out:
+ /* disable IRQ mask */
+ nsp32_write2(base, IRQ_CONTROL, 0);
+
+ out2:
+ spin_unlock_irqrestore(host->host_lock, flags);
+
+ nsp32_dbg(NSP32_DEBUG_INTR, "exit");
+
+ return IRQ_RETVAL(handled);
+}
+
+
+static int nsp32_show_info(struct seq_file *m, struct Scsi_Host *host)
+{
+ unsigned long flags;
+ nsp32_hw_data *data;
+ int hostno;
+ unsigned int base;
+ unsigned char mode_reg;
+ int id, speed;
+ long model;
+
+ hostno = host->host_no;
+ data = (nsp32_hw_data *)host->hostdata;
+ base = host->io_port;
+
+ seq_puts(m, "NinjaSCSI-32 status\n\n");
+ seq_printf(m, "Driver version: %s, $Revision: 1.33 $\n", nsp32_release_version);
+ seq_printf(m, "SCSI host No.: %d\n", hostno);
+ seq_printf(m, "IRQ: %d\n", host->irq);
+ seq_printf(m, "IO: 0x%lx-0x%lx\n", host->io_port, host->io_port + host->n_io_port - 1);
+ seq_printf(m, "MMIO(virtual address): 0x%lx-0x%lx\n", host->base, host->base + data->MmioLength - 1);
+ seq_printf(m, "sg_tablesize: %d\n", host->sg_tablesize);
+ seq_printf(m, "Chip revision: 0x%x\n", (nsp32_read2(base, INDEX_REG) >> 8) & 0xff);
+
+ mode_reg = nsp32_index_read1(base, CHIP_MODE);
+ model = data->pci_devid->driver_data;
+
+#ifdef CONFIG_PM
+ seq_printf(m, "Power Management: %s\n", (mode_reg & OPTF) ? "yes" : "no");
+#endif
+ seq_printf(m, "OEM: %ld, %s\n", (mode_reg & (OEM0|OEM1)), nsp32_model[model]);
+
+ spin_lock_irqsave(&(data->Lock), flags);
+ seq_printf(m, "CurrentSC: 0x%p\n\n", data->CurrentSC);
+ spin_unlock_irqrestore(&(data->Lock), flags);
+
+
+ seq_puts(m, "SDTR status\n");
+ for (id = 0; id < ARRAY_SIZE(data->target); id++) {
+
+ seq_printf(m, "id %d: ", id);
+
+ if (id == host->this_id) {
+ seq_puts(m, "----- NinjaSCSI-32 host adapter\n");
+ continue;
+ }
+
+ if (data->target[id].sync_flag == SDTR_DONE) {
+ if (data->target[id].period == 0 &&
+ data->target[id].offset == ASYNC_OFFSET ) {
+ seq_puts(m, "async");
+ } else {
+ seq_puts(m, " sync");
+ }
+ } else {
+ seq_puts(m, " none");
+ }
+
+ if (data->target[id].period != 0) {
+
+ speed = 1000000 / (data->target[id].period * 4);
+
+ seq_printf(m, " transfer %d.%dMB/s, offset %d",
+ speed / 1000,
+ speed % 1000,
+ data->target[id].offset
+ );
+ }
+ seq_putc(m, '\n');
+ }
+ return 0;
+}
+
+
+
+/*
+ * Reset parameters and call scsi_done for data->cur_lunt.
+ * Be careful setting SCpnt->result = DID_* before calling this function.
+ */
+static void nsp32_scsi_done(struct scsi_cmnd *SCpnt)
+{
+ nsp32_hw_data *data = (nsp32_hw_data *)SCpnt->device->host->hostdata;
+ unsigned int base = SCpnt->device->host->io_port;
+
+ scsi_dma_unmap(SCpnt);
+
+ /*
+ * clear TRANSFERCONTROL_BM_START
+ */
+ nsp32_write2(base, TRANSFER_CONTROL, 0);
+ nsp32_write4(base, BM_CNT, 0);
+
+ /*
+ * call scsi_done
+ */
+ (*SCpnt->scsi_done)(SCpnt);
+
+ /*
+ * reset parameters
+ */
+ data->cur_lunt->SCpnt = NULL;
+ data->cur_lunt = NULL;
+ data->cur_target = NULL;
+ data->CurrentSC = NULL;
+}
+
+
+/*
+ * Bus Free Occur
+ *
+ * Current Phase is BUSFREE. AutoSCSI is automatically execute BUSFREE phase
+ * with ACK reply when below condition is matched:
+ * MsgIn 00: Command Complete.
+ * MsgIn 02: Save Data Pointer.
+ * MsgIn 04: Diconnect.
+ * In other case, unexpected BUSFREE is detected.
+ */
+static int nsp32_busfree_occur(struct scsi_cmnd *SCpnt, unsigned short execph)
+{
+ nsp32_hw_data *data = (nsp32_hw_data *)SCpnt->device->host->hostdata;
+ unsigned int base = SCpnt->device->host->io_port;
+
+ nsp32_dbg(NSP32_DEBUG_BUSFREE, "enter execph=0x%x", execph);
+ show_autophase(execph);
+
+ nsp32_write4(base, BM_CNT, 0);
+ nsp32_write2(base, TRANSFER_CONTROL, 0);
+
+ /*
+ * MsgIn 02: Save Data Pointer
+ *
+ * VALID:
+ * Save Data Pointer is received. Adjust pointer.
+ *
+ * NO-VALID:
+ * SCSI-3 says if Save Data Pointer is not received, then we restart
+ * processing and we can't adjust any SCSI data pointer in next data
+ * phase.
+ */
+ if (execph & MSGIN_02_VALID) {
+ nsp32_dbg(NSP32_DEBUG_BUSFREE, "MsgIn02_Valid");
+
+ /*
+ * Check sack_cnt/saved_sack_cnt, then adjust sg table if
+ * needed.
+ */
+ if (!(execph & MSGIN_00_VALID) &&
+ ((execph & DATA_IN_PHASE) || (execph & DATA_OUT_PHASE))) {
+ unsigned int sacklen, s_sacklen;
+
+ /*
+ * Read SACK count and SAVEDSACK count, then compare.
+ */
+ sacklen = nsp32_read4(base, SACK_CNT );
+ s_sacklen = nsp32_read4(base, SAVED_SACK_CNT);
+
+ /*
+ * If SAVEDSACKCNT == 0, it means SavedDataPointer is
+ * come after data transferring.
+ */
+ if (s_sacklen > 0) {
+ /*
+ * Comparing between sack and savedsack to
+ * check the condition of AutoMsgIn03.
+ *
+ * If they are same, set msgin03 == TRUE,
+ * COMMANDCONTROL_AUTO_MSGIN_03 is enabled at
+ * reselection. On the other hand, if they
+ * aren't same, set msgin03 == FALSE, and
+ * COMMANDCONTROL_AUTO_MSGIN_03 is disabled at
+ * reselection.
+ */
+ if (sacklen != s_sacklen) {
+ data->cur_lunt->msgin03 = FALSE;
+ } else {
+ data->cur_lunt->msgin03 = TRUE;
+ }
+
+ nsp32_adjust_busfree(SCpnt, s_sacklen);
+ }
+ }
+
+ /* This value has not substitude with valid value yet... */
+ //data->cur_lunt->save_datp = data->cur_datp;
+ } else {
+ /*
+ * no processing.
+ */
+ }
+
+ if (execph & MSGIN_03_VALID) {
+ /* MsgIn03 was valid to be processed. No need processing. */
+ }
+
+ /*
+ * target SDTR check
+ */
+ if (data->cur_target->sync_flag & SDTR_INITIATOR) {
+ /*
+ * SDTR negotiation pulled by the initiator has not
+ * finished yet. Fall back to ASYNC mode.
+ */
+ nsp32_set_async(data, data->cur_target);
+ data->cur_target->sync_flag &= ~SDTR_INITIATOR;
+ data->cur_target->sync_flag |= SDTR_DONE;
+ } else if (data->cur_target->sync_flag & SDTR_TARGET) {
+ /*
+ * SDTR negotiation pulled by the target has been
+ * negotiating.
+ */
+ if (execph & (MSGIN_00_VALID | MSGIN_04_VALID)) {
+ /*
+ * If valid message is received, then
+ * negotiation is succeeded.
+ */
+ } else {
+ /*
+ * On the contrary, if unexpected bus free is
+ * occurred, then negotiation is failed. Fall
+ * back to ASYNC mode.
+ */
+ nsp32_set_async(data, data->cur_target);
+ }
+ data->cur_target->sync_flag &= ~SDTR_TARGET;
+ data->cur_target->sync_flag |= SDTR_DONE;
+ }
+
+ /*
+ * It is always ensured by SCSI standard that initiator
+ * switches into Bus Free Phase after
+ * receiving message 00 (Command Complete), 04 (Disconnect).
+ * It's the reason that processing here is valid.
+ */
+ if (execph & MSGIN_00_VALID) {
+ /* MsgIn 00: Command Complete */
+ nsp32_dbg(NSP32_DEBUG_BUSFREE, "command complete");
+
+ SCpnt->SCp.Status = nsp32_read1(base, SCSI_CSB_IN);
+ SCpnt->SCp.Message = 0;
+ nsp32_dbg(NSP32_DEBUG_BUSFREE,
+ "normal end stat=0x%x resid=0x%x\n",
+ SCpnt->SCp.Status, scsi_get_resid(SCpnt));
+ SCpnt->result = (DID_OK << 16) |
+ (SCpnt->SCp.Message << 8) |
+ (SCpnt->SCp.Status << 0);
+ nsp32_scsi_done(SCpnt);
+ /* All operation is done */
+ return TRUE;
+ } else if (execph & MSGIN_04_VALID) {
+ /* MsgIn 04: Disconnect */
+ SCpnt->SCp.Status = nsp32_read1(base, SCSI_CSB_IN);
+ SCpnt->SCp.Message = 4;
+
+ nsp32_dbg(NSP32_DEBUG_BUSFREE, "disconnect");
+ return TRUE;
+ } else {
+ /* Unexpected bus free */
+ nsp32_msg(KERN_WARNING, "unexpected bus free occurred");
+
+ /* DID_ERROR? */
+ //SCpnt->result = (DID_OK << 16) | (SCpnt->SCp.Message << 8) | (SCpnt->SCp.Status << 0);
+ SCpnt->result = DID_ERROR << 16;
+ nsp32_scsi_done(SCpnt);
+ return TRUE;
+ }
+ return FALSE;
+}
+
+
+/*
+ * nsp32_adjust_busfree - adjusting SG table
+ *
+ * Note: This driver adjust the SG table using SCSI ACK
+ * counter instead of BMCNT counter!
+ */
+static void nsp32_adjust_busfree(struct scsi_cmnd *SCpnt, unsigned int s_sacklen)
+{
+ nsp32_hw_data *data = (nsp32_hw_data *)SCpnt->device->host->hostdata;
+ int old_entry = data->cur_entry;
+ int new_entry;
+ int sg_num = data->cur_lunt->sg_num;
+ nsp32_sgtable *sgt = data->cur_lunt->sglun->sgt;
+ unsigned int restlen, sentlen;
+ u32_le len, addr;
+
+ nsp32_dbg(NSP32_DEBUG_SGLIST, "old resid=0x%x", scsi_get_resid(SCpnt));
+
+ /* adjust saved SACK count with 4 byte start address boundary */
+ s_sacklen -= le32_to_cpu(sgt[old_entry].addr) & 3;
+
+ /*
+ * calculate new_entry from sack count and each sgt[].len
+ * calculate the byte which is intent to send
+ */
+ sentlen = 0;
+ for (new_entry = old_entry; new_entry < sg_num; new_entry++) {
+ sentlen += (le32_to_cpu(sgt[new_entry].len) & ~SGTEND);
+ if (sentlen > s_sacklen) {
+ break;
+ }
+ }
+
+ /* all sgt is processed */
+ if (new_entry == sg_num) {
+ goto last;
+ }
+
+ if (sentlen == s_sacklen) {
+ /* XXX: confirm it's ok or not */
+ /* In this case, it's ok because we are at
+ the head element of the sg. restlen is correctly calculated. */
+ }
+
+ /* calculate the rest length for transferring */
+ restlen = sentlen - s_sacklen;
+
+ /* update adjusting current SG table entry */
+ len = le32_to_cpu(sgt[new_entry].len);
+ addr = le32_to_cpu(sgt[new_entry].addr);
+ addr += (len - restlen);
+ sgt[new_entry].addr = cpu_to_le32(addr);
+ sgt[new_entry].len = cpu_to_le32(restlen);
+
+ /* set cur_entry with new_entry */
+ data->cur_entry = new_entry;
+
+ return;
+
+ last:
+ if (scsi_get_resid(SCpnt) < sentlen) {
+ nsp32_msg(KERN_ERR, "resid underflow");
+ }
+
+ scsi_set_resid(SCpnt, scsi_get_resid(SCpnt) - sentlen);
+ nsp32_dbg(NSP32_DEBUG_SGLIST, "new resid=0x%x", scsi_get_resid(SCpnt));
+
+ /* update hostdata and lun */
+
+ return;
+}
+
+
+/*
+ * It's called MsgOut phase occur.
+ * NinjaSCSI-32Bi/UDE automatically processes up to 3 messages in
+ * message out phase. It, however, has more than 3 messages,
+ * HBA creates the interrupt and we have to process by hand.
+ */
+static void nsp32_msgout_occur(struct scsi_cmnd *SCpnt)
+{
+ nsp32_hw_data *data = (nsp32_hw_data *)SCpnt->device->host->hostdata;
+ unsigned int base = SCpnt->device->host->io_port;
+ //unsigned short command;
+ long new_sgtp;
+ int i;
+
+ nsp32_dbg(NSP32_DEBUG_MSGOUTOCCUR,
+ "enter: msgout_len: 0x%x", data->msgout_len);
+
+ /*
+ * If MsgOut phase is occurred without having any
+ * message, then No_Operation is sent (SCSI-2).
+ */
+ if (data->msgout_len == 0) {
+ nsp32_build_nop(SCpnt);
+ }
+
+ /*
+ * Set SGTP ADDR current entry for restarting AUTOSCSI,
+ * because SGTP is incremented next point.
+ * There is few statement in the specification...
+ */
+ new_sgtp = data->cur_lunt->sglun_paddr +
+ (data->cur_lunt->cur_entry * sizeof(nsp32_sgtable));
+
+ /*
+ * send messages
+ */
+ for (i = 0; i < data->msgout_len; i++) {
+ nsp32_dbg(NSP32_DEBUG_MSGOUTOCCUR,
+ "%d : 0x%x", i, data->msgoutbuf[i]);
+
+ /*
+ * Check REQ is asserted.
+ */
+ nsp32_wait_req(data, ASSERT);
+
+ if (i == (data->msgout_len - 1)) {
+ /*
+ * If the last message, set the AutoSCSI restart
+ * before send back the ack message. AutoSCSI
+ * restart automatically negate ATN signal.
+ */
+ //command = (AUTO_MSGIN_00_OR_04 | AUTO_MSGIN_02);
+ //nsp32_restart_autoscsi(SCpnt, command);
+ nsp32_write2(base, COMMAND_CONTROL,
+ (CLEAR_CDB_FIFO_POINTER |
+ AUTO_COMMAND_PHASE |
+ AUTOSCSI_RESTART |
+ AUTO_MSGIN_00_OR_04 |
+ AUTO_MSGIN_02 ));
+ }
+ /*
+ * Write data with SACK, then wait sack is
+ * automatically negated.
+ */
+ nsp32_write1(base, SCSI_DATA_WITH_ACK, data->msgoutbuf[i]);
+ nsp32_wait_sack(data, NEGATE);
+
+ nsp32_dbg(NSP32_DEBUG_MSGOUTOCCUR, "bus: 0x%x\n",
+ nsp32_read1(base, SCSI_BUS_MONITOR));
+ };
+
+ data->msgout_len = 0;
+
+ nsp32_dbg(NSP32_DEBUG_MSGOUTOCCUR, "exit");
+}
+
+/*
+ * Restart AutoSCSI
+ *
+ * Note: Restarting AutoSCSI needs set:
+ * SYNC_REG, ACK_WIDTH, SGT_ADR, TRANSFER_CONTROL
+ */
+static void nsp32_restart_autoscsi(struct scsi_cmnd *SCpnt, unsigned short command)
+{
+ nsp32_hw_data *data = (nsp32_hw_data *)SCpnt->device->host->hostdata;
+ unsigned int base = data->BaseAddress;
+ unsigned short transfer = 0;
+
+ nsp32_dbg(NSP32_DEBUG_RESTART, "enter");
+
+ if (data->cur_target == NULL || data->cur_lunt == NULL) {
+ nsp32_msg(KERN_ERR, "Target or Lun is invalid");
+ }
+
+ /*
+ * set SYNC_REG
+ * Don't set BM_START_ADR before setting this register.
+ */
+ nsp32_write1(base, SYNC_REG, data->cur_target->syncreg);
+
+ /*
+ * set ACKWIDTH
+ */
+ nsp32_write1(base, ACK_WIDTH, data->cur_target->ackwidth);
+
+ /*
+ * set SREQ hazard killer sampling rate
+ */
+ nsp32_write1(base, SREQ_SMPL_RATE, data->cur_target->sample_reg);
+
+ /*
+ * set SGT ADDR (physical address)
+ */
+ nsp32_write4(base, SGT_ADR, data->cur_lunt->sglun_paddr);
+
+ /*
+ * set TRANSFER CONTROL REG
+ */
+ transfer = 0;
+ transfer |= (TRANSFER_GO | ALL_COUNTER_CLR);
+ if (data->trans_method & NSP32_TRANSFER_BUSMASTER) {
+ if (scsi_bufflen(SCpnt) > 0) {
+ transfer |= BM_START;
+ }
+ } else if (data->trans_method & NSP32_TRANSFER_MMIO) {
+ transfer |= CB_MMIO_MODE;
+ } else if (data->trans_method & NSP32_TRANSFER_PIO) {
+ transfer |= CB_IO_MODE;
+ }
+ nsp32_write2(base, TRANSFER_CONTROL, transfer);
+
+ /*
+ * restart AutoSCSI
+ *
+ * TODO: COMMANDCONTROL_AUTO_COMMAND_PHASE is needed ?
+ */
+ command |= (CLEAR_CDB_FIFO_POINTER |
+ AUTO_COMMAND_PHASE |
+ AUTOSCSI_RESTART );
+ nsp32_write2(base, COMMAND_CONTROL, command);
+
+ nsp32_dbg(NSP32_DEBUG_RESTART, "exit");
+}
+
+
+/*
+ * cannot run automatically message in occur
+ */
+static void nsp32_msgin_occur(struct scsi_cmnd *SCpnt,
+ unsigned long irq_status,
+ unsigned short execph)
+{
+ nsp32_hw_data *data = (nsp32_hw_data *)SCpnt->device->host->hostdata;
+ unsigned int base = SCpnt->device->host->io_port;
+ unsigned char msg;
+ unsigned char msgtype;
+ unsigned char newlun;
+ unsigned short command = 0;
+ int msgclear = TRUE;
+ long new_sgtp;
+ int ret;
+
+ /*
+ * read first message
+ * Use SCSIDATA_W_ACK instead of SCSIDATAIN, because the procedure
+ * of Message-In have to be processed before sending back SCSI ACK.
+ */
+ msg = nsp32_read1(base, SCSI_DATA_IN);
+ data->msginbuf[(unsigned char)data->msgin_len] = msg;
+ msgtype = data->msginbuf[0];
+ nsp32_dbg(NSP32_DEBUG_MSGINOCCUR,
+ "enter: msglen: 0x%x msgin: 0x%x msgtype: 0x%x",
+ data->msgin_len, msg, msgtype);
+
+ /*
+ * TODO: We need checking whether bus phase is message in?
+ */
+
+ /*
+ * assert SCSI ACK
+ */
+ nsp32_sack_assert(data);
+
+ /*
+ * processing IDENTIFY
+ */
+ if (msgtype & 0x80) {
+ if (!(irq_status & IRQSTATUS_RESELECT_OCCUER)) {
+ /* Invalid (non reselect) phase */
+ goto reject;
+ }
+
+ newlun = msgtype & 0x1f; /* TODO: SPI-3 compliant? */
+ ret = nsp32_reselection(SCpnt, newlun);
+ if (ret == TRUE) {
+ goto restart;
+ } else {
+ goto reject;
+ }
+ }
+
+ /*
+ * processing messages except for IDENTIFY
+ *
+ * TODO: Messages are all SCSI-2 terminology. SCSI-3 compliance is TODO.
+ */
+ switch (msgtype) {
+ /*
+ * 1-byte message
+ */
+ case COMMAND_COMPLETE:
+ case DISCONNECT:
+ /*
+ * These messages should not be occurred.
+ * They should be processed on AutoSCSI sequencer.
+ */
+ nsp32_msg(KERN_WARNING,
+ "unexpected message of AutoSCSI MsgIn: 0x%x", msg);
+ break;
+
+ case RESTORE_POINTERS:
+ /*
+ * AutoMsgIn03 is disabled, and HBA gets this message.
+ */
+
+ if ((execph & DATA_IN_PHASE) || (execph & DATA_OUT_PHASE)) {
+ unsigned int s_sacklen;
+
+ s_sacklen = nsp32_read4(base, SAVED_SACK_CNT);
+ if ((execph & MSGIN_02_VALID) && (s_sacklen > 0)) {
+ nsp32_adjust_busfree(SCpnt, s_sacklen);
+ } else {
+ /* No need to rewrite SGT */
+ }
+ }
+ data->cur_lunt->msgin03 = FALSE;
+
+ /* Update with the new value */
+
+ /* reset SACK/SavedACK counter (or ALL clear?) */
+ nsp32_write4(base, CLR_COUNTER, CLRCOUNTER_ALLMASK);
+
+ /*
+ * set new sg pointer
+ */
+ new_sgtp = data->cur_lunt->sglun_paddr +
+ (data->cur_lunt->cur_entry * sizeof(nsp32_sgtable));
+ nsp32_write4(base, SGT_ADR, new_sgtp);
+
+ break;
+
+ case SAVE_POINTERS:
+ /*
+ * These messages should not be occurred.
+ * They should be processed on AutoSCSI sequencer.
+ */
+ nsp32_msg (KERN_WARNING,
+ "unexpected message of AutoSCSI MsgIn: SAVE_POINTERS");
+
+ break;
+
+ case MESSAGE_REJECT:
+ /* If previous message_out is sending SDTR, and get
+ message_reject from target, SDTR negotiation is failed */
+ if (data->cur_target->sync_flag &
+ (SDTR_INITIATOR | SDTR_TARGET)) {
+ /*
+ * Current target is negotiating SDTR, but it's
+ * failed. Fall back to async transfer mode, and set
+ * SDTR_DONE.
+ */
+ nsp32_set_async(data, data->cur_target);
+ data->cur_target->sync_flag &= ~SDTR_INITIATOR;
+ data->cur_target->sync_flag |= SDTR_DONE;
+
+ }
+ break;
+
+ case LINKED_CMD_COMPLETE:
+ case LINKED_FLG_CMD_COMPLETE:
+ /* queue tag is not supported currently */
+ nsp32_msg (KERN_WARNING,
+ "unsupported message: 0x%x", msgtype);
+ break;
+
+ case INITIATE_RECOVERY:
+ /* staring ECA (Extended Contingent Allegiance) state. */
+ /* This message is declined in SPI2 or later. */
+
+ goto reject;
+
+ /*
+ * 2-byte message
+ */
+ case SIMPLE_QUEUE_TAG:
+ case 0x23:
+ /*
+ * 0x23: Ignore_Wide_Residue is not declared in scsi.h.
+ * No support is needed.
+ */
+ if (data->msgin_len >= 1) {
+ goto reject;
+ }
+
+ /* current position is 1-byte of 2 byte */
+ msgclear = FALSE;
+
+ break;
+
+ /*
+ * extended message
+ */
+ case EXTENDED_MESSAGE:
+ if (data->msgin_len < 1) {
+ /*
+ * Current position does not reach 2-byte
+ * (2-byte is extended message length).
+ */
+ msgclear = FALSE;
+ break;
+ }
+
+ if ((data->msginbuf[1] + 1) > data->msgin_len) {
+ /*
+ * Current extended message has msginbuf[1] + 2
+ * (msgin_len starts counting from 0, so buf[1] + 1).
+ * If current message position is not finished,
+ * continue receiving message.
+ */
+ msgclear = FALSE;
+ break;
+ }
+
+ /*
+ * Reach here means regular length of each type of
+ * extended messages.
+ */
+ switch (data->msginbuf[2]) {
+ case EXTENDED_MODIFY_DATA_POINTER:
+ /* TODO */
+ goto reject; /* not implemented yet */
+ break;
+
+ case EXTENDED_SDTR:
+ /*
+ * Exchange this message between initiator and target.
+ */
+ if (data->msgin_len != EXTENDED_SDTR_LEN + 1) {
+ /*
+ * received inappropriate message.
+ */
+ goto reject;
+ break;
+ }
+
+ nsp32_analyze_sdtr(SCpnt);
+
+ break;
+
+ case EXTENDED_EXTENDED_IDENTIFY:
+ /* SCSI-I only, not supported. */
+ goto reject; /* not implemented yet */
+
+ break;
+
+ case EXTENDED_WDTR:
+ goto reject; /* not implemented yet */
+
+ break;
+
+ default:
+ goto reject;
+ }
+ break;
+
+ default:
+ goto reject;
+ }
+
+ restart:
+ if (msgclear == TRUE) {
+ data->msgin_len = 0;
+
+ /*
+ * If restarting AutoSCSI, but there are some message to out
+ * (msgout_len > 0), set AutoATN, and set SCSIMSGOUT as 0
+ * (MV_VALID = 0). When commandcontrol is written with
+ * AutoSCSI restart, at the same time MsgOutOccur should be
+ * happened (however, such situation is really possible...?).
+ */
+ if (data->msgout_len > 0) {
+ nsp32_write4(base, SCSI_MSG_OUT, 0);
+ command |= AUTO_ATN;
+ }
+
+ /*
+ * restart AutoSCSI
+ * If it's failed, COMMANDCONTROL_AUTO_COMMAND_PHASE is needed.
+ */
+ command |= (AUTO_MSGIN_00_OR_04 | AUTO_MSGIN_02);
+
+ /*
+ * If current msgin03 is TRUE, then flag on.
+ */
+ if (data->cur_lunt->msgin03 == TRUE) {
+ command |= AUTO_MSGIN_03;
+ }
+ data->cur_lunt->msgin03 = FALSE;
+ } else {
+ data->msgin_len++;
+ }
+
+ /*
+ * restart AutoSCSI
+ */
+ nsp32_restart_autoscsi(SCpnt, command);
+
+ /*
+ * wait SCSI REQ negate for REQ-ACK handshake
+ */
+ nsp32_wait_req(data, NEGATE);
+
+ /*
+ * negate SCSI ACK
+ */
+ nsp32_sack_negate(data);
+
+ nsp32_dbg(NSP32_DEBUG_MSGINOCCUR, "exit");
+
+ return;
+
+ reject:
+ nsp32_msg(KERN_WARNING,
+ "invalid or unsupported MessageIn, rejected. "
+ "current msg: 0x%x (len: 0x%x), processing msg: 0x%x",
+ msg, data->msgin_len, msgtype);
+ nsp32_build_reject(SCpnt);
+ data->msgin_len = 0;
+
+ goto restart;
+}
+
+/*
+ *
+ */
+static void nsp32_analyze_sdtr(struct scsi_cmnd *SCpnt)
+{
+ nsp32_hw_data *data = (nsp32_hw_data *)SCpnt->device->host->hostdata;
+ nsp32_target *target = data->cur_target;
+ nsp32_sync_table *synct;
+ unsigned char get_period = data->msginbuf[3];
+ unsigned char get_offset = data->msginbuf[4];
+ int entry;
+ int syncnum;
+
+ nsp32_dbg(NSP32_DEBUG_MSGINOCCUR, "enter");
+
+ synct = data->synct;
+ syncnum = data->syncnum;
+
+ /*
+ * If this inititor sent the SDTR message, then target responds SDTR,
+ * initiator SYNCREG, ACKWIDTH from SDTR parameter.
+ * Messages are not appropriate, then send back reject message.
+ * If initiator did not send the SDTR, but target sends SDTR,
+ * initiator calculator the appropriate parameter and send back SDTR.
+ */
+ if (target->sync_flag & SDTR_INITIATOR) {
+ /*
+ * Initiator sent SDTR, the target responds and
+ * send back negotiation SDTR.
+ */
+ nsp32_dbg(NSP32_DEBUG_MSGINOCCUR, "target responds SDTR");
+
+ target->sync_flag &= ~SDTR_INITIATOR;
+ target->sync_flag |= SDTR_DONE;
+
+ /*
+ * offset:
+ */
+ if (get_offset > SYNC_OFFSET) {
+ /*
+ * Negotiation is failed, the target send back
+ * unexpected offset value.
+ */
+ goto reject;
+ }
+
+ if (get_offset == ASYNC_OFFSET) {
+ /*
+ * Negotiation is succeeded, the target want
+ * to fall back into asynchronous transfer mode.
+ */
+ goto async;
+ }
+
+ /*
+ * period:
+ * Check whether sync period is too short. If too short,
+ * fall back to async mode. If it's ok, then investigate
+ * the received sync period. If sync period is acceptable
+ * between sync table start_period and end_period, then
+ * set this I_T nexus as sent offset and period.
+ * If it's not acceptable, send back reject and fall back
+ * to async mode.
+ */
+ if (get_period < data->synct[0].period_num) {
+ /*
+ * Negotiation is failed, the target send back
+ * unexpected period value.
+ */
+ goto reject;
+ }
+
+ entry = nsp32_search_period_entry(data, target, get_period);
+
+ if (entry < 0) {
+ /*
+ * Target want to use long period which is not
+ * acceptable NinjaSCSI-32Bi/UDE.
+ */
+ goto reject;
+ }
+
+ /*
+ * Set new sync table and offset in this I_T nexus.
+ */
+ nsp32_set_sync_entry(data, target, entry, get_offset);
+ } else {
+ /* Target send SDTR to initiator. */
+ nsp32_dbg(NSP32_DEBUG_MSGINOCCUR, "target send SDTR");
+
+ target->sync_flag |= SDTR_INITIATOR;
+
+ /* offset: */
+ if (get_offset > SYNC_OFFSET) {
+ /* send back as SYNC_OFFSET */
+ get_offset = SYNC_OFFSET;
+ }
+
+ /* period: */
+ if (get_period < data->synct[0].period_num) {
+ get_period = data->synct[0].period_num;
+ }
+
+ entry = nsp32_search_period_entry(data, target, get_period);
+
+ if (get_offset == ASYNC_OFFSET || entry < 0) {
+ nsp32_set_async(data, target);
+ nsp32_build_sdtr(SCpnt, 0, ASYNC_OFFSET);
+ } else {
+ nsp32_set_sync_entry(data, target, entry, get_offset);
+ nsp32_build_sdtr(SCpnt, get_period, get_offset);
+ }
+ }
+
+ target->period = get_period;
+ nsp32_dbg(NSP32_DEBUG_MSGINOCCUR, "exit");
+ return;
+
+ reject:
+ /*
+ * If the current message is unacceptable, send back to the target
+ * with reject message.
+ */
+ nsp32_build_reject(SCpnt);
+
+ async:
+ nsp32_set_async(data, target); /* set as ASYNC transfer mode */
+
+ target->period = 0;
+ nsp32_dbg(NSP32_DEBUG_MSGINOCCUR, "exit: set async");
+ return;
+}
+
+
+/*
+ * Search config entry number matched in sync_table from given
+ * target and speed period value. If failed to search, return negative value.
+ */
+static int nsp32_search_period_entry(nsp32_hw_data *data,
+ nsp32_target *target,
+ unsigned char period)
+{
+ int i;
+
+ if (target->limit_entry >= data->syncnum) {
+ nsp32_msg(KERN_ERR, "limit_entry exceeds syncnum!");
+ target->limit_entry = 0;
+ }
+
+ for (i = target->limit_entry; i < data->syncnum; i++) {
+ if (period >= data->synct[i].start_period &&
+ period <= data->synct[i].end_period) {
+ break;
+ }
+ }
+
+ /*
+ * Check given period value is over the sync_table value.
+ * If so, return max value.
+ */
+ if (i == data->syncnum) {
+ i = -1;
+ }
+
+ return i;
+}
+
+
+/*
+ * target <-> initiator use ASYNC transfer
+ */
+static void nsp32_set_async(nsp32_hw_data *data, nsp32_target *target)
+{
+ unsigned char period = data->synct[target->limit_entry].period_num;
+
+ target->offset = ASYNC_OFFSET;
+ target->period = 0;
+ target->syncreg = TO_SYNCREG(period, ASYNC_OFFSET);
+ target->ackwidth = 0;
+ target->sample_reg = 0;
+
+ nsp32_dbg(NSP32_DEBUG_SYNC, "set async");
+}
+
+
+/*
+ * target <-> initiator use maximum SYNC transfer
+ */
+static void nsp32_set_max_sync(nsp32_hw_data *data,
+ nsp32_target *target,
+ unsigned char *period,
+ unsigned char *offset)
+{
+ unsigned char period_num, ackwidth;
+
+ period_num = data->synct[target->limit_entry].period_num;
+ *period = data->synct[target->limit_entry].start_period;
+ ackwidth = data->synct[target->limit_entry].ackwidth;
+ *offset = SYNC_OFFSET;
+
+ target->syncreg = TO_SYNCREG(period_num, *offset);
+ target->ackwidth = ackwidth;
+ target->offset = *offset;
+ target->sample_reg = 0; /* disable SREQ sampling */
+}
+
+
+/*
+ * target <-> initiator use entry number speed
+ */
+static void nsp32_set_sync_entry(nsp32_hw_data *data,
+ nsp32_target *target,
+ int entry,
+ unsigned char offset)
+{
+ unsigned char period, ackwidth, sample_rate;
+
+ period = data->synct[entry].period_num;
+ ackwidth = data->synct[entry].ackwidth;
+ offset = offset;
+ sample_rate = data->synct[entry].sample_rate;
+
+ target->syncreg = TO_SYNCREG(period, offset);
+ target->ackwidth = ackwidth;
+ target->offset = offset;
+ target->sample_reg = sample_rate | SAMPLING_ENABLE;
+
+ nsp32_dbg(NSP32_DEBUG_SYNC, "set sync");
+}
+
+
+/*
+ * It waits until SCSI REQ becomes assertion or negation state.
+ *
+ * Note: If nsp32_msgin_occur is called, we asserts SCSI ACK. Then
+ * connected target responds SCSI REQ negation. We have to wait
+ * SCSI REQ becomes negation in order to negate SCSI ACK signal for
+ * REQ-ACK handshake.
+ */
+static void nsp32_wait_req(nsp32_hw_data *data, int state)
+{
+ unsigned int base = data->BaseAddress;
+ int wait_time = 0;
+ unsigned char bus, req_bit;
+
+ if (!((state == ASSERT) || (state == NEGATE))) {
+ nsp32_msg(KERN_ERR, "unknown state designation");
+ }
+ /* REQ is BIT(5) */
+ req_bit = (state == ASSERT ? BUSMON_REQ : 0);
+
+ do {
+ bus = nsp32_read1(base, SCSI_BUS_MONITOR);
+ if ((bus & BUSMON_REQ) == req_bit) {
+ nsp32_dbg(NSP32_DEBUG_WAIT,
+ "wait_time: %d", wait_time);
+ return;
+ }
+ udelay(1);
+ wait_time++;
+ } while (wait_time < REQSACK_TIMEOUT_TIME);
+
+ nsp32_msg(KERN_WARNING, "wait REQ timeout, req_bit: 0x%x", req_bit);
+}
+
+/*
+ * It waits until SCSI SACK becomes assertion or negation state.
+ */
+static void nsp32_wait_sack(nsp32_hw_data *data, int state)
+{
+ unsigned int base = data->BaseAddress;
+ int wait_time = 0;
+ unsigned char bus, ack_bit;
+
+ if (!((state == ASSERT) || (state == NEGATE))) {
+ nsp32_msg(KERN_ERR, "unknown state designation");
+ }
+ /* ACK is BIT(4) */
+ ack_bit = (state == ASSERT ? BUSMON_ACK : 0);
+
+ do {
+ bus = nsp32_read1(base, SCSI_BUS_MONITOR);
+ if ((bus & BUSMON_ACK) == ack_bit) {
+ nsp32_dbg(NSP32_DEBUG_WAIT,
+ "wait_time: %d", wait_time);
+ return;
+ }
+ udelay(1);
+ wait_time++;
+ } while (wait_time < REQSACK_TIMEOUT_TIME);
+
+ nsp32_msg(KERN_WARNING, "wait SACK timeout, ack_bit: 0x%x", ack_bit);
+}
+
+/*
+ * assert SCSI ACK
+ *
+ * Note: SCSI ACK assertion needs with ACKENB=1, AUTODIRECTION=1.
+ */
+static void nsp32_sack_assert(nsp32_hw_data *data)
+{
+ unsigned int base = data->BaseAddress;
+ unsigned char busctrl;
+
+ busctrl = nsp32_read1(base, SCSI_BUS_CONTROL);
+ busctrl |= (BUSCTL_ACK | AUTODIRECTION | ACKENB);
+ nsp32_write1(base, SCSI_BUS_CONTROL, busctrl);
+}
+
+/*
+ * negate SCSI ACK
+ */
+static void nsp32_sack_negate(nsp32_hw_data *data)
+{
+ unsigned int base = data->BaseAddress;
+ unsigned char busctrl;
+
+ busctrl = nsp32_read1(base, SCSI_BUS_CONTROL);
+ busctrl &= ~BUSCTL_ACK;
+ nsp32_write1(base, SCSI_BUS_CONTROL, busctrl);
+}
+
+
+
+/*
+ * Note: n_io_port is defined as 0x7f because I/O register port is
+ * assigned as:
+ * 0x800-0x8ff: memory mapped I/O port
+ * 0x900-0xbff: (map same 0x800-0x8ff I/O port image repeatedly)
+ * 0xc00-0xfff: CardBus status registers
+ */
+static int nsp32_detect(struct pci_dev *pdev)
+{
+ struct Scsi_Host *host; /* registered host structure */
+ struct resource *res;
+ nsp32_hw_data *data;
+ int ret;
+ int i, j;
+
+ nsp32_dbg(NSP32_DEBUG_REGISTER, "enter");
+
+ /*
+ * register this HBA as SCSI device
+ */
+ host = scsi_host_alloc(&nsp32_template, sizeof(nsp32_hw_data));
+ if (host == NULL) {
+ nsp32_msg (KERN_ERR, "failed to scsi register");
+ goto err;
+ }
+
+ /*
+ * set nsp32_hw_data
+ */
+ data = (nsp32_hw_data *)host->hostdata;
+
+ memcpy(data, &nsp32_data_base, sizeof(nsp32_hw_data));
+
+ host->irq = data->IrqNumber;
+ host->io_port = data->BaseAddress;
+ host->unique_id = data->BaseAddress;
+ host->n_io_port = data->NumAddress;
+ host->base = (unsigned long)data->MmioAddress;
+
+ data->Host = host;
+ spin_lock_init(&(data->Lock));
+
+ data->cur_lunt = NULL;
+ data->cur_target = NULL;
+
+ /*
+ * Bus master transfer mode is supported currently.
+ */
+ data->trans_method = NSP32_TRANSFER_BUSMASTER;
+
+ /*
+ * Set clock div, CLOCK_4 (HBA has own external clock, and
+ * dividing * 100ns/4).
+ * Currently CLOCK_4 has only tested, not for CLOCK_2/PCICLK yet.
+ */
+ data->clock = CLOCK_4;
+
+ /*
+ * Select appropriate nsp32_sync_table and set I_CLOCKDIV.
+ */
+ switch (data->clock) {
+ case CLOCK_4:
+ /* If data->clock is CLOCK_4, then select 40M sync table. */
+ data->synct = nsp32_sync_table_40M;
+ data->syncnum = ARRAY_SIZE(nsp32_sync_table_40M);
+ break;
+ case CLOCK_2:
+ /* If data->clock is CLOCK_2, then select 20M sync table. */
+ data->synct = nsp32_sync_table_20M;
+ data->syncnum = ARRAY_SIZE(nsp32_sync_table_20M);
+ break;
+ case PCICLK:
+ /* If data->clock is PCICLK, then select pci sync table. */
+ data->synct = nsp32_sync_table_pci;
+ data->syncnum = ARRAY_SIZE(nsp32_sync_table_pci);
+ break;
+ default:
+ nsp32_msg(KERN_WARNING,
+ "Invalid clock div is selected, set CLOCK_4.");
+ /* Use default value CLOCK_4 */
+ data->clock = CLOCK_4;
+ data->synct = nsp32_sync_table_40M;
+ data->syncnum = ARRAY_SIZE(nsp32_sync_table_40M);
+ }
+
+ /*
+ * setup nsp32_lunt
+ */
+
+ /*
+ * setup DMA
+ */
+ if (pci_set_dma_mask(pdev, DMA_BIT_MASK(32)) != 0) {
+ nsp32_msg (KERN_ERR, "failed to set PCI DMA mask");
+ goto scsi_unregister;
+ }
+
+ /*
+ * allocate autoparam DMA resource.
+ */
+ data->autoparam = pci_alloc_consistent(pdev, sizeof(nsp32_autoparam), &(data->auto_paddr));
+ if (data->autoparam == NULL) {
+ nsp32_msg(KERN_ERR, "failed to allocate DMA memory");
+ goto scsi_unregister;
+ }
+
+ /*
+ * allocate scatter-gather DMA resource.
+ */
+ data->sg_list = pci_alloc_consistent(pdev, NSP32_SG_TABLE_SIZE,
+ &(data->sg_paddr));
+ if (data->sg_list == NULL) {
+ nsp32_msg(KERN_ERR, "failed to allocate DMA memory");
+ goto free_autoparam;
+ }
+
+ for (i = 0; i < ARRAY_SIZE(data->lunt); i++) {
+ for (j = 0; j < ARRAY_SIZE(data->lunt[0]); j++) {
+ int offset = i * ARRAY_SIZE(data->lunt[0]) + j;
+ nsp32_lunt tmp = {
+ .SCpnt = NULL,
+ .save_datp = 0,
+ .msgin03 = FALSE,
+ .sg_num = 0,
+ .cur_entry = 0,
+ .sglun = &(data->sg_list[offset]),
+ .sglun_paddr = data->sg_paddr + (offset * sizeof(nsp32_sglun)),
+ };
+
+ data->lunt[i][j] = tmp;
+ }
+ }
+
+ /*
+ * setup target
+ */
+ for (i = 0; i < ARRAY_SIZE(data->target); i++) {
+ nsp32_target *target = &(data->target[i]);
+
+ target->limit_entry = 0;
+ target->sync_flag = 0;
+ nsp32_set_async(data, target);
+ }
+
+ /*
+ * EEPROM check
+ */
+ ret = nsp32_getprom_param(data);
+ if (ret == FALSE) {
+ data->resettime = 3; /* default 3 */
+ }
+
+ /*
+ * setup HBA
+ */
+ nsp32hw_init(data);
+
+ snprintf(data->info_str, sizeof(data->info_str),
+ "NinjaSCSI-32Bi/UDE: irq %d, io 0x%lx+0x%x",
+ host->irq, host->io_port, host->n_io_port);
+
+ /*
+ * SCSI bus reset
+ *
+ * Note: It's important to reset SCSI bus in initialization phase.
+ * NinjaSCSI-32Bi/UDE HBA EEPROM seems to exchange SDTR when
+ * system is coming up, so SCSI devices connected to HBA is set as
+ * un-asynchronous mode. It brings the merit that this HBA is
+ * ready to start synchronous transfer without any preparation,
+ * but we are difficult to control transfer speed. In addition,
+ * it prevents device transfer speed from effecting EEPROM start-up
+ * SDTR. NinjaSCSI-32Bi/UDE has the feature if EEPROM is set as
+ * Auto Mode, then FAST-10M is selected when SCSI devices are
+ * connected same or more than 4 devices. It should be avoided
+ * depending on this specification. Thus, resetting the SCSI bus
+ * restores all connected SCSI devices to asynchronous mode, then
+ * this driver set SDTR safely later, and we can control all SCSI
+ * device transfer mode.
+ */
+ nsp32_do_bus_reset(data);
+
+ ret = request_irq(host->irq, do_nsp32_isr, IRQF_SHARED, "nsp32", data);
+ if (ret < 0) {
+ nsp32_msg(KERN_ERR, "Unable to allocate IRQ for NinjaSCSI32 "
+ "SCSI PCI controller. Interrupt: %d", host->irq);
+ goto free_sg_list;
+ }
+
+ /*
+ * PCI IO register
+ */
+ res = request_region(host->io_port, host->n_io_port, "nsp32");
+ if (res == NULL) {
+ nsp32_msg(KERN_ERR,
+ "I/O region 0x%lx+0x%lx is already used",
+ data->BaseAddress, data->NumAddress);
+ goto free_irq;
+ }
+
+ ret = scsi_add_host(host, &pdev->dev);
+ if (ret) {
+ nsp32_msg(KERN_ERR, "failed to add scsi host");
+ goto free_region;
+ }
+ scsi_scan_host(host);
+ pci_set_drvdata(pdev, host);
+ return 0;
+
+ free_region:
+ release_region(host->io_port, host->n_io_port);
+
+ free_irq:
+ free_irq(host->irq, data);
+
+ free_sg_list:
+ pci_free_consistent(pdev, NSP32_SG_TABLE_SIZE,
+ data->sg_list, data->sg_paddr);
+
+ free_autoparam:
+ pci_free_consistent(pdev, sizeof(nsp32_autoparam),
+ data->autoparam, data->auto_paddr);
+
+ scsi_unregister:
+ scsi_host_put(host);
+
+ err:
+ return 1;
+}
+
+static int nsp32_release(struct Scsi_Host *host)
+{
+ nsp32_hw_data *data = (nsp32_hw_data *)host->hostdata;
+
+ if (data->autoparam) {
+ pci_free_consistent(data->Pci, sizeof(nsp32_autoparam),
+ data->autoparam, data->auto_paddr);
+ }
+
+ if (data->sg_list) {
+ pci_free_consistent(data->Pci, NSP32_SG_TABLE_SIZE,
+ data->sg_list, data->sg_paddr);
+ }
+
+ if (host->irq) {
+ free_irq(host->irq, data);
+ }
+
+ if (host->io_port && host->n_io_port) {
+ release_region(host->io_port, host->n_io_port);
+ }
+
+ if (data->MmioAddress) {
+ iounmap(data->MmioAddress);
+ }
+
+ return 0;
+}
+
+static const char *nsp32_info(struct Scsi_Host *shpnt)
+{
+ nsp32_hw_data *data = (nsp32_hw_data *)shpnt->hostdata;
+
+ return data->info_str;
+}
+
+
+/****************************************************************************
+ * error handler
+ */
+static int nsp32_eh_abort(struct scsi_cmnd *SCpnt)
+{
+ nsp32_hw_data *data = (nsp32_hw_data *)SCpnt->device->host->hostdata;
+ unsigned int base = SCpnt->device->host->io_port;
+
+ nsp32_msg(KERN_WARNING, "abort");
+
+ if (data->cur_lunt->SCpnt == NULL) {
+ nsp32_dbg(NSP32_DEBUG_BUSRESET, "abort failed");
+ return FAILED;
+ }
+
+ if (data->cur_target->sync_flag & (SDTR_INITIATOR | SDTR_TARGET)) {
+ /* reset SDTR negotiation */
+ data->cur_target->sync_flag = 0;
+ nsp32_set_async(data, data->cur_target);
+ }
+
+ nsp32_write2(base, TRANSFER_CONTROL, 0);
+ nsp32_write2(base, BM_CNT, 0);
+
+ SCpnt->result = DID_ABORT << 16;
+ nsp32_scsi_done(SCpnt);
+
+ nsp32_dbg(NSP32_DEBUG_BUSRESET, "abort success");
+ return SUCCESS;
+}
+
+static int nsp32_eh_bus_reset(struct scsi_cmnd *SCpnt)
+{
+ nsp32_hw_data *data = (nsp32_hw_data *)SCpnt->device->host->hostdata;
+ unsigned int base = SCpnt->device->host->io_port;
+
+ spin_lock_irq(SCpnt->device->host->host_lock);
+
+ nsp32_msg(KERN_INFO, "Bus Reset");
+ nsp32_dbg(NSP32_DEBUG_BUSRESET, "SCpnt=0x%x", SCpnt);
+
+ nsp32_write2(base, IRQ_CONTROL, IRQ_CONTROL_ALL_IRQ_MASK);
+ nsp32_do_bus_reset(data);
+ nsp32_write2(base, IRQ_CONTROL, 0);
+
+ spin_unlock_irq(SCpnt->device->host->host_lock);
+ return SUCCESS; /* SCSI bus reset is succeeded at any time. */
+}
+
+static void nsp32_do_bus_reset(nsp32_hw_data *data)
+{
+ unsigned int base = data->BaseAddress;
+ unsigned short intrdat;
+ int i;
+
+ nsp32_dbg(NSP32_DEBUG_BUSRESET, "in");
+
+ /*
+ * stop all transfer
+ * clear TRANSFERCONTROL_BM_START
+ * clear counter
+ */
+ nsp32_write2(base, TRANSFER_CONTROL, 0);
+ nsp32_write4(base, BM_CNT, 0);
+ nsp32_write4(base, CLR_COUNTER, CLRCOUNTER_ALLMASK);
+
+ /*
+ * fall back to asynchronous transfer mode
+ * initialize SDTR negotiation flag
+ */
+ for (i = 0; i < ARRAY_SIZE(data->target); i++) {
+ nsp32_target *target = &data->target[i];
+
+ target->sync_flag = 0;
+ nsp32_set_async(data, target);
+ }
+
+ /*
+ * reset SCSI bus
+ */
+ nsp32_write1(base, SCSI_BUS_CONTROL, BUSCTL_RST);
+ mdelay(RESET_HOLD_TIME / 1000);
+ nsp32_write1(base, SCSI_BUS_CONTROL, 0);
+ for(i = 0; i < 5; i++) {
+ intrdat = nsp32_read2(base, IRQ_STATUS); /* dummy read */
+ nsp32_dbg(NSP32_DEBUG_BUSRESET, "irq:1: 0x%x", intrdat);
+ }
+
+ data->CurrentSC = NULL;
+}
+
+static int nsp32_eh_host_reset(struct scsi_cmnd *SCpnt)
+{
+ struct Scsi_Host *host = SCpnt->device->host;
+ unsigned int base = SCpnt->device->host->io_port;
+ nsp32_hw_data *data = (nsp32_hw_data *)host->hostdata;
+
+ nsp32_msg(KERN_INFO, "Host Reset");
+ nsp32_dbg(NSP32_DEBUG_BUSRESET, "SCpnt=0x%x", SCpnt);
+
+ spin_lock_irq(SCpnt->device->host->host_lock);
+
+ nsp32hw_init(data);
+ nsp32_write2(base, IRQ_CONTROL, IRQ_CONTROL_ALL_IRQ_MASK);
+ nsp32_do_bus_reset(data);
+ nsp32_write2(base, IRQ_CONTROL, 0);
+
+ spin_unlock_irq(SCpnt->device->host->host_lock);
+ return SUCCESS; /* Host reset is succeeded at any time. */
+}
+
+
+/**************************************************************************
+ * EEPROM handler
+ */
+
+/*
+ * getting EEPROM parameter
+ */
+static int nsp32_getprom_param(nsp32_hw_data *data)
+{
+ int vendor = data->pci_devid->vendor;
+ int device = data->pci_devid->device;
+ int ret, val, i;
+
+ /*
+ * EEPROM checking.
+ */
+ ret = nsp32_prom_read(data, 0x7e);
+ if (ret != 0x55) {
+ nsp32_msg(KERN_INFO, "No EEPROM detected: 0x%x", ret);
+ return FALSE;
+ }
+ ret = nsp32_prom_read(data, 0x7f);
+ if (ret != 0xaa) {
+ nsp32_msg(KERN_INFO, "Invalid number: 0x%x", ret);
+ return FALSE;
+ }
+
+ /*
+ * check EEPROM type
+ */
+ if (vendor == PCI_VENDOR_ID_WORKBIT &&
+ device == PCI_DEVICE_ID_WORKBIT_STANDARD) {
+ ret = nsp32_getprom_c16(data);
+ } else if (vendor == PCI_VENDOR_ID_WORKBIT &&
+ device == PCI_DEVICE_ID_NINJASCSI_32BIB_LOGITEC) {
+ ret = nsp32_getprom_at24(data);
+ } else if (vendor == PCI_VENDOR_ID_WORKBIT &&
+ device == PCI_DEVICE_ID_NINJASCSI_32UDE_MELCO ) {
+ ret = nsp32_getprom_at24(data);
+ } else {
+ nsp32_msg(KERN_WARNING, "Unknown EEPROM");
+ ret = FALSE;
+ }
+
+ /* for debug : SPROM data full checking */
+ for (i = 0; i <= 0x1f; i++) {
+ val = nsp32_prom_read(data, i);
+ nsp32_dbg(NSP32_DEBUG_EEPROM,
+ "rom address 0x%x : 0x%x", i, val);
+ }
+
+ return ret;
+}
+
+
+/*
+ * AT24C01A (Logitec: LHA-600S), AT24C02 (Melco Buffalo: IFC-USLP) data map:
+ *
+ * ROMADDR
+ * 0x00 - 0x06 : Device Synchronous Transfer Period (SCSI ID 0 - 6)
+ * Value 0x0: ASYNC, 0x0c: Ultra-20M, 0x19: Fast-10M
+ * 0x07 : HBA Synchronous Transfer Period
+ * Value 0: AutoSync, 1: Manual Setting
+ * 0x08 - 0x0f : Not Used? (0x0)
+ * 0x10 : Bus Termination
+ * Value 0: Auto[ON], 1: ON, 2: OFF
+ * 0x11 : Not Used? (0)
+ * 0x12 : Bus Reset Delay Time (0x03)
+ * 0x13 : Bootable CD Support
+ * Value 0: Disable, 1: Enable
+ * 0x14 : Device Scan
+ * Bit 7 6 5 4 3 2 1 0
+ * | <----------------->
+ * | SCSI ID: Value 0: Skip, 1: YES
+ * |-> Value 0: ALL scan, Value 1: Manual
+ * 0x15 - 0x1b : Not Used? (0)
+ * 0x1c : Constant? (0x01) (clock div?)
+ * 0x1d - 0x7c : Not Used (0xff)
+ * 0x7d : Not Used? (0xff)
+ * 0x7e : Constant (0x55), Validity signature
+ * 0x7f : Constant (0xaa), Validity signature
+ */
+static int nsp32_getprom_at24(nsp32_hw_data *data)
+{
+ int ret, i;
+ int auto_sync;
+ nsp32_target *target;
+ int entry;
+
+ /*
+ * Reset time which is designated by EEPROM.
+ *
+ * TODO: Not used yet.
+ */
+ data->resettime = nsp32_prom_read(data, 0x12);
+
+ /*
+ * HBA Synchronous Transfer Period
+ *
+ * Note: auto_sync = 0: auto, 1: manual. Ninja SCSI HBA spec says
+ * that if auto_sync is 0 (auto), and connected SCSI devices are
+ * same or lower than 3, then transfer speed is set as ULTRA-20M.
+ * On the contrary if connected SCSI devices are same or higher
+ * than 4, then transfer speed is set as FAST-10M.
+ *
+ * I break this rule. The number of connected SCSI devices are
+ * only ignored. If auto_sync is 0 (auto), then transfer speed is
+ * forced as ULTRA-20M.
+ */
+ ret = nsp32_prom_read(data, 0x07);
+ switch (ret) {
+ case 0:
+ auto_sync = TRUE;
+ break;
+ case 1:
+ auto_sync = FALSE;
+ break;
+ default:
+ nsp32_msg(KERN_WARNING,
+ "Unsupported Auto Sync mode. Fall back to manual mode.");
+ auto_sync = TRUE;
+ }
+
+ if (trans_mode == ULTRA20M_MODE) {
+ auto_sync = TRUE;
+ }
+
+ /*
+ * each device Synchronous Transfer Period
+ */
+ for (i = 0; i < NSP32_HOST_SCSIID; i++) {
+ target = &data->target[i];
+ if (auto_sync == TRUE) {
+ target->limit_entry = 0; /* set as ULTRA20M */
+ } else {
+ ret = nsp32_prom_read(data, i);
+ entry = nsp32_search_period_entry(data, target, ret);
+ if (entry < 0) {
+ /* search failed... set maximum speed */
+ entry = 0;
+ }
+ target->limit_entry = entry;
+ }
+ }
+
+ return TRUE;
+}
+
+
+/*
+ * C16 110 (I-O Data: SC-NBD) data map:
+ *
+ * ROMADDR
+ * 0x00 - 0x06 : Device Synchronous Transfer Period (SCSI ID 0 - 6)
+ * Value 0x0: 20MB/S, 0x1: 10MB/S, 0x2: 5MB/S, 0x3: ASYNC
+ * 0x07 : 0 (HBA Synchronous Transfer Period: Auto Sync)
+ * 0x08 - 0x0f : Not Used? (0x0)
+ * 0x10 : Transfer Mode
+ * Value 0: PIO, 1: Busmater
+ * 0x11 : Bus Reset Delay Time (0x00-0x20)
+ * 0x12 : Bus Termination
+ * Value 0: Disable, 1: Enable
+ * 0x13 - 0x19 : Disconnection
+ * Value 0: Disable, 1: Enable
+ * 0x1a - 0x7c : Not Used? (0)
+ * 0x7d : Not Used? (0xf8)
+ * 0x7e : Constant (0x55), Validity signature
+ * 0x7f : Constant (0xaa), Validity signature
+ */
+static int nsp32_getprom_c16(nsp32_hw_data *data)
+{
+ int ret, i;
+ nsp32_target *target;
+ int entry, val;
+
+ /*
+ * Reset time which is designated by EEPROM.
+ *
+ * TODO: Not used yet.
+ */
+ data->resettime = nsp32_prom_read(data, 0x11);
+
+ /*
+ * each device Synchronous Transfer Period
+ */
+ for (i = 0; i < NSP32_HOST_SCSIID; i++) {
+ target = &data->target[i];
+ ret = nsp32_prom_read(data, i);
+ switch (ret) {
+ case 0: /* 20MB/s */
+ val = 0x0c;
+ break;
+ case 1: /* 10MB/s */
+ val = 0x19;
+ break;
+ case 2: /* 5MB/s */
+ val = 0x32;
+ break;
+ case 3: /* ASYNC */
+ val = 0x00;
+ break;
+ default: /* default 20MB/s */
+ val = 0x0c;
+ break;
+ }
+ entry = nsp32_search_period_entry(data, target, val);
+ if (entry < 0 || trans_mode == ULTRA20M_MODE) {
+ /* search failed... set maximum speed */
+ entry = 0;
+ }
+ target->limit_entry = entry;
+ }
+
+ return TRUE;
+}
+
+
+/*
+ * Atmel AT24C01A (drived in 5V) serial EEPROM routines
+ */
+static int nsp32_prom_read(nsp32_hw_data *data, int romaddr)
+{
+ int i, val;
+
+ /* start condition */
+ nsp32_prom_start(data);
+
+ /* device address */
+ nsp32_prom_write_bit(data, 1); /* 1 */
+ nsp32_prom_write_bit(data, 0); /* 0 */
+ nsp32_prom_write_bit(data, 1); /* 1 */
+ nsp32_prom_write_bit(data, 0); /* 0 */
+ nsp32_prom_write_bit(data, 0); /* A2: 0 (GND) */
+ nsp32_prom_write_bit(data, 0); /* A1: 0 (GND) */
+ nsp32_prom_write_bit(data, 0); /* A0: 0 (GND) */
+
+ /* R/W: W for dummy write */
+ nsp32_prom_write_bit(data, 0);
+
+ /* ack */
+ nsp32_prom_write_bit(data, 0);
+
+ /* word address */
+ for (i = 7; i >= 0; i--) {
+ nsp32_prom_write_bit(data, ((romaddr >> i) & 1));
+ }
+
+ /* ack */
+ nsp32_prom_write_bit(data, 0);
+
+ /* start condition */
+ nsp32_prom_start(data);
+
+ /* device address */
+ nsp32_prom_write_bit(data, 1); /* 1 */
+ nsp32_prom_write_bit(data, 0); /* 0 */
+ nsp32_prom_write_bit(data, 1); /* 1 */
+ nsp32_prom_write_bit(data, 0); /* 0 */
+ nsp32_prom_write_bit(data, 0); /* A2: 0 (GND) */
+ nsp32_prom_write_bit(data, 0); /* A1: 0 (GND) */
+ nsp32_prom_write_bit(data, 0); /* A0: 0 (GND) */
+
+ /* R/W: R */
+ nsp32_prom_write_bit(data, 1);
+
+ /* ack */
+ nsp32_prom_write_bit(data, 0);
+
+ /* data... */
+ val = 0;
+ for (i = 7; i >= 0; i--) {
+ val += (nsp32_prom_read_bit(data) << i);
+ }
+
+ /* no ack */
+ nsp32_prom_write_bit(data, 1);
+
+ /* stop condition */
+ nsp32_prom_stop(data);
+
+ return val;
+}
+
+static void nsp32_prom_set(nsp32_hw_data *data, int bit, int val)
+{
+ int base = data->BaseAddress;
+ int tmp;
+
+ tmp = nsp32_index_read1(base, SERIAL_ROM_CTL);
+
+ if (val == 0) {
+ tmp &= ~bit;
+ } else {
+ tmp |= bit;
+ }
+
+ nsp32_index_write1(base, SERIAL_ROM_CTL, tmp);
+
+ udelay(10);
+}
+
+static int nsp32_prom_get(nsp32_hw_data *data, int bit)
+{
+ int base = data->BaseAddress;
+ int tmp, ret;
+
+ if (bit != SDA) {
+ nsp32_msg(KERN_ERR, "return value is not appropriate");
+ return 0;
+ }
+
+
+ tmp = nsp32_index_read1(base, SERIAL_ROM_CTL) & bit;
+
+ if (tmp == 0) {
+ ret = 0;
+ } else {
+ ret = 1;
+ }
+
+ udelay(10);
+
+ return ret;
+}
+
+static void nsp32_prom_start (nsp32_hw_data *data)
+{
+ /* start condition */
+ nsp32_prom_set(data, SCL, 1);
+ nsp32_prom_set(data, SDA, 1);
+ nsp32_prom_set(data, ENA, 1); /* output mode */
+ nsp32_prom_set(data, SDA, 0); /* keeping SCL=1 and transiting
+ * SDA 1->0 is start condition */
+ nsp32_prom_set(data, SCL, 0);
+}
+
+static void nsp32_prom_stop (nsp32_hw_data *data)
+{
+ /* stop condition */
+ nsp32_prom_set(data, SCL, 1);
+ nsp32_prom_set(data, SDA, 0);
+ nsp32_prom_set(data, ENA, 1); /* output mode */
+ nsp32_prom_set(data, SDA, 1);
+ nsp32_prom_set(data, SCL, 0);
+}
+
+static void nsp32_prom_write_bit(nsp32_hw_data *data, int val)
+{
+ /* write */
+ nsp32_prom_set(data, SDA, val);
+ nsp32_prom_set(data, SCL, 1 );
+ nsp32_prom_set(data, SCL, 0 );
+}
+
+static int nsp32_prom_read_bit(nsp32_hw_data *data)
+{
+ int val;
+
+ /* read */
+ nsp32_prom_set(data, ENA, 0); /* input mode */
+ nsp32_prom_set(data, SCL, 1);
+
+ val = nsp32_prom_get(data, SDA);
+
+ nsp32_prom_set(data, SCL, 0);
+ nsp32_prom_set(data, ENA, 1); /* output mode */
+
+ return val;
+}
+
+
+/**************************************************************************
+ * Power Management
+ */
+#ifdef CONFIG_PM
+
+/* Device suspended */
+static int nsp32_suspend(struct pci_dev *pdev, pm_message_t state)
+{
+ struct Scsi_Host *host = pci_get_drvdata(pdev);
+
+ nsp32_msg(KERN_INFO, "pci-suspend: pdev=0x%p, state=%ld, slot=%s, host=0x%p", pdev, state, pci_name(pdev), host);
+
+ pci_save_state (pdev);
+ pci_disable_device (pdev);
+ pci_set_power_state(pdev, pci_choose_state(pdev, state));
+
+ return 0;
+}
+
+/* Device woken up */
+static int nsp32_resume(struct pci_dev *pdev)
+{
+ struct Scsi_Host *host = pci_get_drvdata(pdev);
+ nsp32_hw_data *data = (nsp32_hw_data *)host->hostdata;
+ unsigned short reg;
+
+ nsp32_msg(KERN_INFO, "pci-resume: pdev=0x%p, slot=%s, host=0x%p", pdev, pci_name(pdev), host);
+
+ pci_set_power_state(pdev, PCI_D0);
+ pci_enable_wake (pdev, PCI_D0, 0);
+ pci_restore_state (pdev);
+
+ reg = nsp32_read2(data->BaseAddress, INDEX_REG);
+
+ nsp32_msg(KERN_INFO, "io=0x%x reg=0x%x", data->BaseAddress, reg);
+
+ if (reg == 0xffff) {
+ nsp32_msg(KERN_INFO, "missing device. abort resume.");
+ return 0;
+ }
+
+ nsp32hw_init (data);
+ nsp32_do_bus_reset(data);
+
+ nsp32_msg(KERN_INFO, "resume success");
+
+ return 0;
+}
+
+#endif
+
+/************************************************************************
+ * PCI/Cardbus probe/remove routine
+ */
+static int nsp32_probe(struct pci_dev *pdev, const struct pci_device_id *id)
+{
+ int ret;
+ nsp32_hw_data *data = &nsp32_data_base;
+
+ nsp32_dbg(NSP32_DEBUG_REGISTER, "enter");
+
+ ret = pci_enable_device(pdev);
+ if (ret) {
+ nsp32_msg(KERN_ERR, "failed to enable pci device");
+ return ret;
+ }
+
+ data->Pci = pdev;
+ data->pci_devid = id;
+ data->IrqNumber = pdev->irq;
+ data->BaseAddress = pci_resource_start(pdev, 0);
+ data->NumAddress = pci_resource_len (pdev, 0);
+ data->MmioAddress = pci_ioremap_bar(pdev, 1);
+ data->MmioLength = pci_resource_len (pdev, 1);
+
+ pci_set_master(pdev);
+
+ ret = nsp32_detect(pdev);
+
+ nsp32_msg(KERN_INFO, "irq: %i mmio: %p+0x%lx slot: %s model: %s",
+ pdev->irq,
+ data->MmioAddress, data->MmioLength,
+ pci_name(pdev),
+ nsp32_model[id->driver_data]);
+
+ nsp32_dbg(NSP32_DEBUG_REGISTER, "exit %d", ret);
+
+ return ret;
+}
+
+static void nsp32_remove(struct pci_dev *pdev)
+{
+ struct Scsi_Host *host = pci_get_drvdata(pdev);
+
+ nsp32_dbg(NSP32_DEBUG_REGISTER, "enter");
+
+ scsi_remove_host(host);
+
+ nsp32_release(host);
+
+ scsi_host_put(host);
+}
+
+static struct pci_driver nsp32_driver = {
+ .name = "nsp32",
+ .id_table = nsp32_pci_table,
+ .probe = nsp32_probe,
+ .remove = nsp32_remove,
+#ifdef CONFIG_PM
+ .suspend = nsp32_suspend,
+ .resume = nsp32_resume,
+#endif
+};
+
+/*********************************************************************
+ * Moule entry point
+ */
+static int __init init_nsp32(void) {
+ nsp32_msg(KERN_INFO, "loading...");
+ return pci_register_driver(&nsp32_driver);
+}
+
+static void __exit exit_nsp32(void) {
+ nsp32_msg(KERN_INFO, "unloading...");
+ pci_unregister_driver(&nsp32_driver);
+}
+
+module_init(init_nsp32);
+module_exit(exit_nsp32);
+
+/* end */
diff --git a/drivers/scsi/nsp32.h b/drivers/scsi/nsp32.h
new file mode 100644
index 000000000..c02218290
--- /dev/null
+++ b/drivers/scsi/nsp32.h
@@ -0,0 +1,617 @@
+/*
+ * Workbit NinjaSCSI-32Bi/UDE PCI/CardBus SCSI Host Bus Adapter driver
+ * Basic data header
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2, or (at your option)
+ * any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+*/
+
+#ifndef _NSP32_H
+#define _NSP32_H
+
+//#define NSP32_DEBUG 9
+
+/*
+ * VENDOR/DEVICE ID
+ */
+#define PCI_VENDOR_ID_IODATA 0x10fc
+#define PCI_VENDOR_ID_WORKBIT 0x1145
+
+#define PCI_DEVICE_ID_NINJASCSI_32BI_CBSC_II 0x0005
+#define PCI_DEVICE_ID_NINJASCSI_32BI_KME 0xf007
+#define PCI_DEVICE_ID_NINJASCSI_32BI_WBT 0x8007
+#define PCI_DEVICE_ID_WORKBIT_STANDARD 0xf010
+#define PCI_DEVICE_ID_WORKBIT_DUALEDGE 0xf011
+#define PCI_DEVICE_ID_NINJASCSI_32BI_LOGITEC 0xf012
+#define PCI_DEVICE_ID_NINJASCSI_32BIB_LOGITEC 0xf013
+#define PCI_DEVICE_ID_NINJASCSI_32UDE_MELCO 0xf015
+#define PCI_DEVICE_ID_NINJASCSI_32UDE_MELCO_II 0x8009
+
+/*
+ * MODEL
+ */
+enum {
+ MODEL_IODATA = 0,
+ MODEL_KME = 1,
+ MODEL_WORKBIT = 2,
+ MODEL_LOGITEC = 3,
+ MODEL_PCI_WORKBIT = 4,
+ MODEL_PCI_LOGITEC = 5,
+ MODEL_PCI_MELCO = 6,
+};
+
+static char * nsp32_model[] = {
+ "I-O DATA CBSC-II CardBus card",
+ "KME SCSI CardBus card",
+ "Workbit duo SCSI CardBus card",
+ "Logitec CardBus card with external ROM",
+ "Workbit / I-O DATA PCI card",
+ "Logitec PCI card with external ROM",
+ "Melco CardBus/PCI card with external ROM",
+};
+
+
+/*
+ * SCSI Generic Definitions
+ */
+#define EXTENDED_SDTR_LEN 0x03
+
+/* Little Endian */
+typedef u32 u32_le;
+typedef u16 u16_le;
+
+/*
+ * BASIC Definitions
+ */
+#ifndef TRUE
+# define TRUE 1
+#endif
+#ifndef FALSE
+# define FALSE 0
+#endif
+#define ASSERT 1
+#define NEGATE 0
+
+
+/*******************/
+/* normal register */
+/*******************/
+/*
+ * Don't access below register with Double Word:
+ * +00, +04, +08, +0c, +64, +80, +84, +88, +90, +c4, +c8, +cc, +d0.
+ */
+#define IRQ_CONTROL 0x00 /* BASE+00, W, W */
+#define IRQ_STATUS 0x00 /* BASE+00, W, R */
+# define IRQSTATUS_LATCHED_MSG BIT(0)
+# define IRQSTATUS_LATCHED_IO BIT(1)
+# define IRQSTATUS_LATCHED_CD BIT(2)
+# define IRQSTATUS_LATCHED_BUS_FREE BIT(3)
+# define IRQSTATUS_RESELECT_OCCUER BIT(4)
+# define IRQSTATUS_PHASE_CHANGE_IRQ BIT(5)
+# define IRQSTATUS_SCSIRESET_IRQ BIT(6)
+# define IRQSTATUS_TIMER_IRQ BIT(7)
+# define IRQSTATUS_FIFO_SHLD_IRQ BIT(8)
+# define IRQSTATUS_PCI_IRQ BIT(9)
+# define IRQSTATUS_BMCNTERR_IRQ BIT(10)
+# define IRQSTATUS_AUTOSCSI_IRQ BIT(11)
+# define PCI_IRQ_MASK BIT(12)
+# define TIMER_IRQ_MASK BIT(13)
+# define FIFO_IRQ_MASK BIT(14)
+# define SCSI_IRQ_MASK BIT(15)
+# define IRQ_CONTROL_ALL_IRQ_MASK (PCI_IRQ_MASK | \
+ TIMER_IRQ_MASK | \
+ FIFO_IRQ_MASK | \
+ SCSI_IRQ_MASK )
+# define IRQSTATUS_ANY_IRQ (IRQSTATUS_RESELECT_OCCUER | \
+ IRQSTATUS_PHASE_CHANGE_IRQ | \
+ IRQSTATUS_SCSIRESET_IRQ | \
+ IRQSTATUS_TIMER_IRQ | \
+ IRQSTATUS_FIFO_SHLD_IRQ | \
+ IRQSTATUS_PCI_IRQ | \
+ IRQSTATUS_BMCNTERR_IRQ | \
+ IRQSTATUS_AUTOSCSI_IRQ )
+
+#define TRANSFER_CONTROL 0x02 /* BASE+02, W, W */
+#define TRANSFER_STATUS 0x02 /* BASE+02, W, R */
+# define CB_MMIO_MODE BIT(0)
+# define CB_IO_MODE BIT(1)
+# define BM_TEST BIT(2)
+# define BM_TEST_DIR BIT(3)
+# define DUAL_EDGE_ENABLE BIT(4)
+# define NO_TRANSFER_TO_HOST BIT(5)
+# define TRANSFER_GO BIT(7)
+# define BLIEND_MODE BIT(8)
+# define BM_START BIT(9)
+# define ADVANCED_BM_WRITE BIT(10)
+# define BM_SINGLE_MODE BIT(11)
+# define FIFO_TRUE_FULL BIT(12)
+# define FIFO_TRUE_EMPTY BIT(13)
+# define ALL_COUNTER_CLR BIT(14)
+# define FIFOTEST BIT(15)
+
+#define INDEX_REG 0x04 /* BASE+04, Byte(R/W), Word(R) */
+
+#define TIMER_SET 0x06 /* BASE+06, W, R/W */
+# define TIMER_CNT_MASK (0xff)
+# define TIMER_STOP BIT(8)
+
+#define DATA_REG_LOW 0x08 /* BASE+08, LowW, R/W */
+#define DATA_REG_HI 0x0a /* BASE+0a, Hi-W, R/W */
+
+#define FIFO_REST_CNT 0x0c /* BASE+0c, W, R/W */
+# define FIFO_REST_MASK 0x1ff
+# define FIFO_EMPTY_SHLD_FLAG BIT(14)
+# define FIFO_FULL_SHLD_FLAG BIT(15)
+
+#define SREQ_SMPL_RATE 0x0f /* BASE+0f, B, R/W */
+# define SREQSMPLRATE_RATE0 BIT(0)
+# define SREQSMPLRATE_RATE1 BIT(1)
+# define SAMPLING_ENABLE BIT(2)
+# define SMPL_40M (0) /* 40MHz: 0-100ns/period */
+# define SMPL_20M (SREQSMPLRATE_RATE0) /* 20MHz: 100-200ns/period */
+# define SMPL_10M (SREQSMPLRATE_RATE1) /* 10Mhz: 200- ns/period */
+
+#define SCSI_BUS_CONTROL 0x10 /* BASE+10, B, R/W */
+# define BUSCTL_SEL BIT(0)
+# define BUSCTL_RST BIT(1)
+# define BUSCTL_DATAOUT_ENB BIT(2)
+# define BUSCTL_ATN BIT(3)
+# define BUSCTL_ACK BIT(4)
+# define BUSCTL_BSY BIT(5)
+# define AUTODIRECTION BIT(6)
+# define ACKENB BIT(7)
+
+#define CLR_COUNTER 0x12 /* BASE+12, B, W */
+# define ACK_COUNTER_CLR BIT(0)
+# define SREQ_COUNTER_CLR BIT(1)
+# define FIFO_HOST_POINTER_CLR BIT(2)
+# define FIFO_REST_COUNT_CLR BIT(3)
+# define BM_COUNTER_CLR BIT(4)
+# define SAVED_ACK_CLR BIT(5)
+# define CLRCOUNTER_ALLMASK (ACK_COUNTER_CLR | \
+ SREQ_COUNTER_CLR | \
+ FIFO_HOST_POINTER_CLR | \
+ FIFO_REST_COUNT_CLR | \
+ BM_COUNTER_CLR | \
+ SAVED_ACK_CLR )
+
+#define SCSI_BUS_MONITOR 0x12 /* BASE+12, B, R */
+# define BUSMON_MSG BIT(0)
+# define BUSMON_IO BIT(1)
+# define BUSMON_CD BIT(2)
+# define BUSMON_BSY BIT(3)
+# define BUSMON_ACK BIT(4)
+# define BUSMON_REQ BIT(5)
+# define BUSMON_SEL BIT(6)
+# define BUSMON_ATN BIT(7)
+
+#define COMMAND_DATA 0x14 /* BASE+14, B, R/W */
+
+#define PARITY_CONTROL 0x16 /* BASE+16, B, W */
+# define PARITY_CHECK_ENABLE BIT(0)
+# define PARITY_ERROR_CLEAR BIT(1)
+#define PARITY_STATUS 0x16 /* BASE+16, B, R */
+//# define PARITY_CHECK_ENABLE BIT(0)
+# define PARITY_ERROR_NORMAL BIT(1)
+# define PARITY_ERROR_LSB BIT(1)
+# define PARITY_ERROR_MSB BIT(2)
+
+#define RESELECT_ID 0x18 /* BASE+18, B, R */
+
+#define COMMAND_CONTROL 0x18 /* BASE+18, W, W */
+# define CLEAR_CDB_FIFO_POINTER BIT(0)
+# define AUTO_COMMAND_PHASE BIT(1)
+# define AUTOSCSI_START BIT(2)
+# define AUTOSCSI_RESTART BIT(3)
+# define AUTO_PARAMETER BIT(4)
+# define AUTO_ATN BIT(5)
+# define AUTO_MSGIN_00_OR_04 BIT(6)
+# define AUTO_MSGIN_02 BIT(7)
+# define AUTO_MSGIN_03 BIT(8)
+
+#define SET_ARBIT 0x1a /* BASE+1a, B, W */
+# define ARBIT_GO BIT(0)
+# define ARBIT_CLEAR BIT(1)
+
+#define ARBIT_STATUS 0x1a /* BASE+1a, B, R */
+//# define ARBIT_GO BIT(0)
+# define ARBIT_WIN BIT(1)
+# define ARBIT_FAIL BIT(2)
+# define AUTO_PARAMETER_VALID BIT(3)
+# define SGT_VALID BIT(4)
+
+#define SYNC_REG 0x1c /* BASE+1c, B, R/W */
+
+#define ACK_WIDTH 0x1d /* BASE+1d, B, R/W */
+
+#define SCSI_DATA_WITH_ACK 0x20 /* BASE+20, B, R/W */
+#define SCSI_OUT_LATCH_TARGET_ID 0x22 /* BASE+22, B, W */
+#define SCSI_DATA_IN 0x22 /* BASE+22, B, R */
+
+#define SCAM_CONTROL 0x24 /* BASE+24, B, W */
+#define SCAM_STATUS 0x24 /* BASE+24, B, R */
+# define SCAM_MSG BIT(0)
+# define SCAM_IO BIT(1)
+# define SCAM_CD BIT(2)
+# define SCAM_BSY BIT(3)
+# define SCAM_SEL BIT(4)
+# define SCAM_XFEROK BIT(5)
+
+#define SCAM_DATA 0x26 /* BASE+26, B, R/W */
+# define SD0 BIT(0)
+# define SD1 BIT(1)
+# define SD2 BIT(2)
+# define SD3 BIT(3)
+# define SD4 BIT(4)
+# define SD5 BIT(5)
+# define SD6 BIT(6)
+# define SD7 BIT(7)
+
+#define SACK_CNT 0x28 /* BASE+28, DW, R/W */
+#define SREQ_CNT 0x2c /* BASE+2c, DW, R/W */
+
+#define FIFO_DATA_LOW 0x30 /* BASE+30, B/W/DW, R/W */
+#define FIFO_DATA_HIGH 0x32 /* BASE+32, B/W, R/W */
+#define BM_START_ADR 0x34 /* BASE+34, DW, R/W */
+
+#define BM_CNT 0x38 /* BASE+38, DW, R/W */
+# define BM_COUNT_MASK 0x0001ffffUL
+# define SGTEND BIT(31) /* Last SGT marker */
+
+#define SGT_ADR 0x3c /* BASE+3c, DW, R/W */
+#define WAIT_REG 0x40 /* Bi only */
+
+#define SCSI_EXECUTE_PHASE 0x40 /* BASE+40, W, R */
+# define COMMAND_PHASE BIT(0)
+# define DATA_IN_PHASE BIT(1)
+# define DATA_OUT_PHASE BIT(2)
+# define MSGOUT_PHASE BIT(3)
+# define STATUS_PHASE BIT(4)
+# define ILLEGAL_PHASE BIT(5)
+# define BUS_FREE_OCCUER BIT(6)
+# define MSG_IN_OCCUER BIT(7)
+# define MSG_OUT_OCCUER BIT(8)
+# define SELECTION_TIMEOUT BIT(9)
+# define MSGIN_00_VALID BIT(10)
+# define MSGIN_02_VALID BIT(11)
+# define MSGIN_03_VALID BIT(12)
+# define MSGIN_04_VALID BIT(13)
+# define AUTOSCSI_BUSY BIT(15)
+
+#define SCSI_CSB_IN 0x42 /* BASE+42, B, R */
+
+#define SCSI_MSG_OUT 0x44 /* BASE+44, DW, R/W */
+# define MSGOUT_COUNT_MASK (BIT(0)|BIT(1))
+# define MV_VALID BIT(7)
+
+#define SEL_TIME_OUT 0x48 /* BASE+48, W, R/W */
+#define SAVED_SACK_CNT 0x4c /* BASE+4c, DW, R */
+
+#define HTOSDATADELAY 0x50 /* BASE+50, B, R/W */
+#define STOHDATADELAY 0x54 /* BASE+54, B, R/W */
+#define ACKSUMCHECKRD 0x58 /* BASE+58, W, R */
+#define REQSUMCHECKRD 0x5c /* BASE+5c, W, R */
+
+
+/********************/
+/* indexed register */
+/********************/
+
+#define CLOCK_DIV 0x00 /* BASE+08, IDX+00, B, R/W */
+# define CLOCK_2 BIT(0) /* MCLK/2 */
+# define CLOCK_4 BIT(1) /* MCLK/4 */
+# define PCICLK BIT(7) /* PCICLK (33MHz) */
+
+#define TERM_PWR_CONTROL 0x01 /* BASE+08, IDX+01, B, R/W */
+# define BPWR BIT(0)
+# define SENSE BIT(1) /* Read Only */
+
+#define EXT_PORT_DDR 0x02 /* BASE+08, IDX+02, B, R/W */
+#define EXT_PORT 0x03 /* BASE+08, IDX+03, B, R/W */
+# define LED_ON (0)
+# define LED_OFF BIT(0)
+
+#define IRQ_SELECT 0x04 /* BASE+08, IDX+04, W, R/W */
+# define IRQSELECT_RESELECT_IRQ BIT(0)
+# define IRQSELECT_PHASE_CHANGE_IRQ BIT(1)
+# define IRQSELECT_SCSIRESET_IRQ BIT(2)
+# define IRQSELECT_TIMER_IRQ BIT(3)
+# define IRQSELECT_FIFO_SHLD_IRQ BIT(4)
+# define IRQSELECT_TARGET_ABORT_IRQ BIT(5)
+# define IRQSELECT_MASTER_ABORT_IRQ BIT(6)
+# define IRQSELECT_SERR_IRQ BIT(7)
+# define IRQSELECT_PERR_IRQ BIT(8)
+# define IRQSELECT_BMCNTERR_IRQ BIT(9)
+# define IRQSELECT_AUTO_SCSI_SEQ_IRQ BIT(10)
+
+#define OLD_SCSI_PHASE 0x05 /* BASE+08, IDX+05, B, R */
+# define OLD_MSG BIT(0)
+# define OLD_IO BIT(1)
+# define OLD_CD BIT(2)
+# define OLD_BUSY BIT(3)
+
+#define FIFO_FULL_SHLD_COUNT 0x06 /* BASE+08, IDX+06, B, R/W */
+#define FIFO_EMPTY_SHLD_COUNT 0x07 /* BASE+08, IDX+07, B, R/W */
+
+#define EXP_ROM_CONTROL 0x08 /* BASE+08, IDX+08, B, R/W */ /* external ROM control */
+# define ROM_WRITE_ENB BIT(0)
+# define IO_ACCESS_ENB BIT(1)
+# define ROM_ADR_CLEAR BIT(2)
+
+#define EXP_ROM_ADR 0x09 /* BASE+08, IDX+09, W, R/W */
+
+#define EXP_ROM_DATA 0x0a /* BASE+08, IDX+0a, B, R/W */
+
+#define CHIP_MODE 0x0b /* BASE+08, IDX+0b, B, R */ /* NinjaSCSI-32Bi only */
+# define OEM0 BIT(1) /* OEM select */ /* 00=I-O DATA, 01=KME, 10=Workbit, 11=Ext ROM */
+# define OEM1 BIT(2) /* OEM select */
+# define OPTB BIT(3) /* KME mode select */
+# define OPTC BIT(4) /* KME mode select */
+# define OPTD BIT(5) /* KME mode select */
+# define OPTE BIT(6) /* KME mode select */
+# define OPTF BIT(7) /* Power management */
+
+#define MISC_WR 0x0c /* BASE+08, IDX+0c, W, R/W */
+#define MISC_RD 0x0c
+# define SCSI_DIRECTION_DETECTOR_SELECT BIT(0)
+# define SCSI2_HOST_DIRECTION_VALID BIT(1) /* Read only */
+# define HOST2_SCSI_DIRECTION_VALID BIT(2) /* Read only */
+# define DELAYED_BMSTART BIT(3)
+# define MASTER_TERMINATION_SELECT BIT(4)
+# define BMREQ_NEGATE_TIMING_SEL BIT(5)
+# define AUTOSEL_TIMING_SEL BIT(6)
+# define MISC_MABORT_MASK BIT(7)
+# define BMSTOP_CHANGE2_NONDATA_PHASE BIT(8)
+
+#define BM_CYCLE 0x0d /* BASE+08, IDX+0d, B, R/W */
+# define BM_CYCLE0 BIT(0)
+# define BM_CYCLE1 BIT(1)
+# define BM_FRAME_ASSERT_TIMING BIT(2)
+# define BM_IRDY_ASSERT_TIMING BIT(3)
+# define BM_SINGLE_BUS_MASTER BIT(4)
+# define MEMRD_CMD0 BIT(5)
+# define SGT_AUTO_PARA_MEMED_CMD BIT(6)
+# define MEMRD_CMD1 BIT(7)
+
+
+#define SREQ_EDGH 0x0e /* BASE+08, IDX+0e, B, W */
+# define SREQ_EDGH_SELECT BIT(0)
+
+#define UP_CNT 0x0f /* BASE+08, IDX+0f, B, W */
+# define REQCNT_UP BIT(0)
+# define ACKCNT_UP BIT(1)
+# define BMADR_UP BIT(4)
+# define BMCNT_UP BIT(5)
+# define SGT_CNT_UP BIT(7)
+
+#define CFG_CMD_STR 0x10 /* BASE+08, IDX+10, W, R */
+#define CFG_LATE_CACHE 0x11 /* BASE+08, IDX+11, W, R/W */
+#define CFG_BASE_ADR_1 0x12 /* BASE+08, IDX+12, W, R */
+#define CFG_BASE_ADR_2 0x13 /* BASE+08, IDX+13, W, R */
+#define CFG_INLINE 0x14 /* BASE+08, IDX+14, W, R */
+
+#define SERIAL_ROM_CTL 0x15 /* BASE+08, IDX+15, B, R */
+# define SCL BIT(0)
+# define ENA BIT(1)
+# define SDA BIT(2)
+
+#define FIFO_HST_POINTER 0x16 /* BASE+08, IDX+16, B, R/W */
+#define SREQ_DELAY 0x17 /* BASE+08, IDX+17, B, R/W */
+#define SACK_DELAY 0x18 /* BASE+08, IDX+18, B, R/W */
+#define SREQ_NOISE_CANCEL 0x19 /* BASE+08, IDX+19, B, R/W */
+#define SDP_NOISE_CANCEL 0x1a /* BASE+08, IDX+1a, B, R/W */
+#define DELAY_TEST 0x1b /* BASE+08, IDX+1b, B, R/W */
+#define SD0_NOISE_CANCEL 0x20 /* BASE+08, IDX+20, B, R/W */
+#define SD1_NOISE_CANCEL 0x21 /* BASE+08, IDX+21, B, R/W */
+#define SD2_NOISE_CANCEL 0x22 /* BASE+08, IDX+22, B, R/W */
+#define SD3_NOISE_CANCEL 0x23 /* BASE+08, IDX+23, B, R/W */
+#define SD4_NOISE_CANCEL 0x24 /* BASE+08, IDX+24, B, R/W */
+#define SD5_NOISE_CANCEL 0x25 /* BASE+08, IDX+25, B, R/W */
+#define SD6_NOISE_CANCEL 0x26 /* BASE+08, IDX+26, B, R/W */
+#define SD7_NOISE_CANCEL 0x27 /* BASE+08, IDX+27, B, R/W */
+
+
+/*
+ * Useful Bus Monitor status combinations.
+ */
+#define BUSMON_BUS_FREE 0
+#define BUSMON_COMMAND ( BUSMON_BSY | BUSMON_CD | BUSMON_REQ )
+#define BUSMON_MESSAGE_IN ( BUSMON_BSY | BUSMON_MSG | BUSMON_IO | BUSMON_CD | BUSMON_REQ )
+#define BUSMON_MESSAGE_OUT ( BUSMON_BSY | BUSMON_MSG | BUSMON_CD | BUSMON_REQ )
+#define BUSMON_DATA_IN ( BUSMON_BSY | BUSMON_IO | BUSMON_REQ )
+#define BUSMON_DATA_OUT ( BUSMON_BSY | BUSMON_REQ )
+#define BUSMON_STATUS ( BUSMON_BSY | BUSMON_IO | BUSMON_CD | BUSMON_REQ )
+#define BUSMON_RESELECT ( BUSMON_IO | BUSMON_SEL)
+#define BUSMON_PHASE_MASK ( BUSMON_MSG | BUSMON_IO | BUSMON_CD | BUSMON_SEL)
+
+#define BUSPHASE_COMMAND ( BUSMON_COMMAND & BUSMON_PHASE_MASK )
+#define BUSPHASE_MESSAGE_IN ( BUSMON_MESSAGE_IN & BUSMON_PHASE_MASK )
+#define BUSPHASE_MESSAGE_OUT ( BUSMON_MESSAGE_OUT & BUSMON_PHASE_MASK )
+#define BUSPHASE_DATA_IN ( BUSMON_DATA_IN & BUSMON_PHASE_MASK )
+#define BUSPHASE_DATA_OUT ( BUSMON_DATA_OUT & BUSMON_PHASE_MASK )
+#define BUSPHASE_STATUS ( BUSMON_STATUS & BUSMON_PHASE_MASK )
+#define BUSPHASE_SELECT ( BUSMON_SEL | BUSMON_IO )
+
+
+/************************************************************************
+ * structure for DMA/Scatter Gather list
+ */
+#define NSP32_SG_SIZE SG_ALL
+
+typedef struct _nsp32_sgtable {
+ /* values must be little endian */
+ u32_le addr; /* transfer address */
+ u32_le len; /* transfer length. BIT(31) is for SGT_END mark */
+} __attribute__ ((packed)) nsp32_sgtable;
+
+typedef struct _nsp32_sglun {
+ nsp32_sgtable sgt[NSP32_SG_SIZE+1]; /* SG table */
+} __attribute__ ((packed)) nsp32_sglun;
+#define NSP32_SG_TABLE_SIZE (sizeof(nsp32_sgtable) * NSP32_SG_SIZE * MAX_TARGET * MAX_LUN)
+
+/* Auto parameter mode memory map. */
+/* All values must be little endian. */
+typedef struct _nsp32_autoparam {
+ u8 cdb[4 * 0x10]; /* SCSI Command */
+ u32_le msgout; /* outgoing messages */
+ u8 syncreg; /* sync register value */
+ u8 ackwidth; /* ack width register value */
+ u8 target_id; /* target/host device id */
+ u8 sample_reg; /* hazard killer sampling rate */
+ u16_le command_control; /* command control register */
+ u16_le transfer_control; /* transfer control register */
+ u32_le sgt_pointer; /* SG table physical address for DMA */
+ u32_le dummy[2];
+} __attribute__ ((packed)) nsp32_autoparam; /* must be packed struct */
+
+/*
+ * host data structure
+ */
+/* message in/out buffer */
+#define MSGOUTBUF_MAX 20
+#define MSGINBUF_MAX 20
+
+/* flag for trans_method */
+#define NSP32_TRANSFER_BUSMASTER BIT(0)
+#define NSP32_TRANSFER_MMIO BIT(1) /* Not supported yet */
+#define NSP32_TRANSFER_PIO BIT(2) /* Not supported yet */
+
+
+/*
+ * structure for connected LUN dynamic data
+ *
+ * Note: Currently tagged queuing is disabled, each nsp32_lunt holds
+ * one SCSI command and one state.
+ */
+#define DISCPRIV_OK BIT(0) /* DISCPRIV Enable mode */
+#define MSGIN03 BIT(1) /* Auto Msg In 03 Flag */
+
+typedef struct _nsp32_lunt {
+ struct scsi_cmnd *SCpnt; /* Current Handling struct scsi_cmnd */
+ unsigned long save_datp; /* Save Data Pointer - saved position from initial address */
+ int msgin03; /* auto msg in 03 flag */
+ unsigned int sg_num; /* Total number of SG entries */
+ int cur_entry; /* Current SG entry number */
+ nsp32_sglun *sglun; /* sg table per lun */
+ dma_addr_t sglun_paddr; /* sglun physical address */
+} nsp32_lunt;
+
+
+/*
+ * SCSI TARGET/LUN definition
+ */
+#define NSP32_HOST_SCSIID 7 /* SCSI initiator is every time defined as 7 */
+#define MAX_TARGET 8
+#define MAX_LUN 8 /* XXX: In SPI3, max number of LUN is 64. */
+
+
+typedef struct _nsp32_sync_table {
+ unsigned char period_num; /* period number */
+ unsigned char ackwidth; /* ack width designated by period */
+ unsigned char start_period; /* search range - start period */
+ unsigned char end_period; /* search range - end period */
+ unsigned char sample_rate; /* hazard killer parameter */
+} nsp32_sync_table;
+
+
+/*
+ * structure for target device static data
+ */
+/* flag for nsp32_target.sync_flag */
+#define SDTR_INITIATOR BIT(0) /* sending SDTR from initiator */
+#define SDTR_TARGET BIT(1) /* sending SDTR from target */
+#define SDTR_DONE BIT(2) /* exchanging SDTR has been processed */
+
+/* syncronous period value for nsp32_target.config_max */
+#define FAST5M 0x32
+#define FAST10M 0x19
+#define ULTRA20M 0x0c
+
+/* flag for nsp32_target.{sync_offset}, period */
+#define ASYNC_OFFSET 0 /* asynchronous transfer */
+#define SYNC_OFFSET 0xf /* synchronous transfer max offset */
+
+/* syncreg:
+ bit:07 06 05 04 03 02 01 00
+ ---PERIOD-- ---OFFSET-- */
+#define TO_SYNCREG(period, offset) (((period) & 0x0f) << 4 | ((offset) & 0x0f))
+
+typedef struct _nsp32_target {
+ unsigned char syncreg; /* value for SYNCREG */
+ unsigned char ackwidth; /* value for ACKWIDTH */
+ unsigned char period; /* sync period (0-255) */
+ unsigned char offset; /* sync offset (0-15) */
+ int sync_flag; /* SDTR_*, 0 */
+ int limit_entry; /* max speed limit entry designated
+ by EEPROM configuration */
+ unsigned char sample_reg; /* SREQ hazard killer register */
+} nsp32_target;
+
+typedef struct _nsp32_hw_data {
+ int IrqNumber;
+ int BaseAddress;
+ int NumAddress;
+ void __iomem *MmioAddress;
+#define NSP32_MMIO_OFFSET 0x0800
+ unsigned long MmioLength;
+
+ struct scsi_cmnd *CurrentSC;
+
+ struct pci_dev *Pci;
+ const struct pci_device_id *pci_devid;
+ struct Scsi_Host *Host;
+ spinlock_t Lock;
+
+ char info_str[100];
+
+ /* allocated memory region */
+ nsp32_sglun *sg_list; /* sglist virtuxal address */
+ dma_addr_t sg_paddr; /* physical address of hw_sg_table */
+ nsp32_autoparam *autoparam; /* auto parameter transfer region */
+ dma_addr_t auto_paddr; /* physical address of autoparam */
+ int cur_entry; /* current sgt entry */
+
+ /* target/LUN */
+ nsp32_lunt *cur_lunt; /* Current connected LUN table */
+ nsp32_lunt lunt[MAX_TARGET][MAX_LUN]; /* All LUN table */
+
+ nsp32_target *cur_target; /* Current connected SCSI ID */
+ nsp32_target target[MAX_TARGET]; /* SCSI ID */
+ int cur_id; /* Current connected target ID */
+ int cur_lun; /* Current connected target LUN */
+
+ /* behavior setting parameters */
+ int trans_method; /* transfer method flag */
+ int resettime; /* Reset time */
+ int clock; /* clock dividing flag */
+ nsp32_sync_table *synct; /* sync_table determined by clock */
+ int syncnum; /* the max number of synct element */
+
+ /* message buffer */
+ unsigned char msgoutbuf[MSGOUTBUF_MAX]; /* msgout buffer */
+ char msgout_len; /* msgoutbuf length */
+ unsigned char msginbuf [MSGINBUF_MAX]; /* megin buffer */
+ char msgin_len; /* msginbuf length */
+
+} nsp32_hw_data;
+
+/*
+ * TIME definition
+ */
+#define RESET_HOLD_TIME 10000 /* reset time in us (SCSI-2 says the
+ minimum is 25us) */
+#define SEL_TIMEOUT_TIME 10000 /* 250ms defined in SCSI specification
+ (25.6us/1unit) */
+#define ARBIT_TIMEOUT_TIME 100 /* 100us */
+#define REQSACK_TIMEOUT_TIME 10000 /* max wait time for REQ/SACK assertion
+ or negation, 10000us == 10ms */
+
+#endif /* _NSP32_H */
+/* end */
diff --git a/drivers/scsi/nsp32_debug.c b/drivers/scsi/nsp32_debug.c
new file mode 100644
index 000000000..58806f432
--- /dev/null
+++ b/drivers/scsi/nsp32_debug.c
@@ -0,0 +1,263 @@
+/*
+ * Workbit NinjaSCSI-32Bi/UDE PCI/CardBus SCSI Host Bus Adapter driver
+ * Debug routine
+ *
+ * This software may be used and distributed according to the terms of
+ * the GNU General Public License.
+ */
+
+/*
+ * Show the command data of a command
+ */
+static const char unknown[] = "UNKNOWN";
+
+static const char * group_0_commands[] = {
+/* 00-03 */ "Test Unit Ready", "Rezero Unit", unknown, "Request Sense",
+/* 04-07 */ "Format Unit", "Read Block Limits", unknown, "Reassign Blocks",
+/* 08-0d */ "Read (6)", unknown, "Write (6)", "Seek (6)", unknown, unknown,
+/* 0e-12 */ unknown, "Read Reverse", "Write Filemarks", "Space", "Inquiry",
+/* 13-16 */ unknown, "Recover Buffered Data", "Mode Select", "Reserve",
+/* 17-1b */ "Release", "Copy", "Erase", "Mode Sense", "Start/Stop Unit",
+/* 1c-1d */ "Receive Diagnostic", "Send Diagnostic",
+/* 1e-1f */ "Prevent/Allow Medium Removal", unknown,
+};
+
+
+static const char *group_1_commands[] = {
+/* 20-22 */ unknown, unknown, unknown,
+/* 23-28 */ unknown, unknown, "Read Capacity", unknown, unknown, "Read (10)",
+/* 29-2d */ unknown, "Write (10)", "Seek (10)", unknown, unknown,
+/* 2e-31 */ "Write Verify","Verify", "Search High", "Search Equal",
+/* 32-34 */ "Search Low", "Set Limits", "Prefetch or Read Position",
+/* 35-37 */ "Synchronize Cache","Lock/Unlock Cache", "Read Defect Data",
+/* 38-3c */ "Medium Scan", "Compare","Copy Verify", "Write Buffer", "Read Buffer",
+/* 3d-3f */ "Update Block", "Read Long", "Write Long",
+};
+
+
+static const char *group_2_commands[] = {
+/* 40-41 */ "Change Definition", "Write Same",
+/* 42-48 */ "Read Sub-Ch(cd)", "Read TOC", "Read Header(cd)", "Play Audio(cd)", unknown, "Play Audio MSF(cd)", "Play Audio Track/Index(cd)",
+/* 49-4f */ "Play Track Relative(10)(cd)", unknown, "Pause/Resume(cd)", "Log Select", "Log Sense", unknown, unknown,
+/* 50-55 */ unknown, unknown, unknown, unknown, unknown, "Mode Select (10)",
+/* 56-5b */ unknown, unknown, unknown, unknown, "Mode Sense (10)", unknown,
+/* 5c-5f */ unknown, unknown, unknown,
+};
+
+#define group(opcode) (((opcode) >> 5) & 7)
+
+#define RESERVED_GROUP 0
+#define VENDOR_GROUP 1
+#define NOTEXT_GROUP 2
+
+static const char **commands[] = {
+ group_0_commands, group_1_commands, group_2_commands,
+ (const char **) RESERVED_GROUP, (const char **) RESERVED_GROUP,
+ (const char **) NOTEXT_GROUP, (const char **) VENDOR_GROUP,
+ (const char **) VENDOR_GROUP
+};
+
+static const char reserved[] = "RESERVED";
+static const char vendor[] = "VENDOR SPECIFIC";
+
+static void print_opcodek(unsigned char opcode)
+{
+ const char **table = commands[ group(opcode) ];
+
+ switch ((unsigned long) table) {
+ case RESERVED_GROUP:
+ printk("%s[%02x] ", reserved, opcode);
+ break;
+ case NOTEXT_GROUP:
+ printk("%s(notext)[%02x] ", unknown, opcode);
+ break;
+ case VENDOR_GROUP:
+ printk("%s[%02x] ", vendor, opcode);
+ break;
+ default:
+ if (table[opcode & 0x1f] != unknown)
+ printk("%s[%02x] ", table[opcode & 0x1f], opcode);
+ else
+ printk("%s[%02x] ", unknown, opcode);
+ break;
+ }
+}
+
+static void print_commandk (unsigned char *command)
+{
+ int i,s;
+// printk(KERN_DEBUG);
+ print_opcodek(command[0]);
+ /*printk(KERN_DEBUG "%s ", __func__);*/
+ if ((command[0] >> 5) == 6 ||
+ (command[0] >> 5) == 7 ) {
+ s = 12; /* vender specific */
+ } else {
+ s = COMMAND_SIZE(command[0]);
+ }
+
+ for ( i = 1; i < s; ++i) {
+ printk("%02x ", command[i]);
+ }
+
+ switch (s) {
+ case 6:
+ printk("LBA=%d len=%d",
+ (((unsigned int)command[1] & 0x0f) << 16) |
+ ( (unsigned int)command[2] << 8) |
+ ( (unsigned int)command[3] ),
+ (unsigned int)command[4]
+ );
+ break;
+ case 10:
+ printk("LBA=%d len=%d",
+ ((unsigned int)command[2] << 24) |
+ ((unsigned int)command[3] << 16) |
+ ((unsigned int)command[4] << 8) |
+ ((unsigned int)command[5] ),
+ ((unsigned int)command[7] << 8) |
+ ((unsigned int)command[8] )
+ );
+ break;
+ case 12:
+ printk("LBA=%d len=%d",
+ ((unsigned int)command[2] << 24) |
+ ((unsigned int)command[3] << 16) |
+ ((unsigned int)command[4] << 8) |
+ ((unsigned int)command[5] ),
+ ((unsigned int)command[6] << 24) |
+ ((unsigned int)command[7] << 16) |
+ ((unsigned int)command[8] << 8) |
+ ((unsigned int)command[9] )
+ );
+ break;
+ default:
+ break;
+ }
+ printk("\n");
+}
+
+static void show_command(Scsi_Cmnd *SCpnt)
+{
+ print_commandk(SCpnt->cmnd);
+}
+
+static void show_busphase(unsigned char stat)
+{
+ switch(stat) {
+ case BUSPHASE_COMMAND:
+ printk( "BUSPHASE_COMMAND\n");
+ break;
+ case BUSPHASE_MESSAGE_IN:
+ printk( "BUSPHASE_MESSAGE_IN\n");
+ break;
+ case BUSPHASE_MESSAGE_OUT:
+ printk( "BUSPHASE_MESSAGE_OUT\n");
+ break;
+ case BUSPHASE_DATA_IN:
+ printk( "BUSPHASE_DATA_IN\n");
+ break;
+ case BUSPHASE_DATA_OUT:
+ printk( "BUSPHASE_DATA_OUT\n");
+ break;
+ case BUSPHASE_STATUS:
+ printk( "BUSPHASE_STATUS\n");
+ break;
+ case BUSPHASE_SELECT:
+ printk( "BUSPHASE_SELECT\n");
+ break;
+ default:
+ printk( "BUSPHASE_other: 0x%x\n", stat);
+ break;
+ }
+}
+
+static void show_autophase(unsigned short i)
+{
+ printk("auto: 0x%x,", i);
+
+ if(i & COMMAND_PHASE) {
+ printk(" cmd");
+ }
+ if(i & DATA_IN_PHASE) {
+ printk(" din");
+ }
+ if(i & DATA_OUT_PHASE) {
+ printk(" dout");
+ }
+ if(i & MSGOUT_PHASE) {
+ printk(" mout");
+ }
+ if(i & STATUS_PHASE) {
+ printk(" stat");
+ }
+ if(i & ILLEGAL_PHASE) {
+ printk(" ill");
+ }
+ if(i & BUS_FREE_OCCUER) {
+ printk(" bfree-o");
+ }
+ if(i & MSG_IN_OCCUER) {
+ printk(" min-o");
+ }
+ if(i & MSG_OUT_OCCUER) {
+ printk(" mout-o");
+ }
+ if(i & SELECTION_TIMEOUT) {
+ printk(" sel");
+ }
+ if(i & MSGIN_00_VALID) {
+ printk(" m0");
+ }
+ if(i & MSGIN_02_VALID) {
+ printk(" m2");
+ }
+ if(i & MSGIN_03_VALID) {
+ printk(" m3");
+ }
+ if(i & MSGIN_04_VALID) {
+ printk(" m4");
+ }
+ if(i & AUTOSCSI_BUSY) {
+ printk(" busy");
+ }
+
+ printk("\n");
+}
+
+static void nsp32_print_register(int base)
+{
+ if (!(NSP32_DEBUG_MASK & NSP32_SPECIAL_PRINT_REGISTER))
+ return;
+
+ printk("Phase=0x%x, ", nsp32_read1(base, SCSI_BUS_MONITOR));
+ printk("OldPhase=0x%x, ", nsp32_index_read1(base, OLD_SCSI_PHASE));
+ printk("syncreg=0x%x, ", nsp32_read1(base, SYNC_REG));
+ printk("ackwidth=0x%x, ", nsp32_read1(base, ACK_WIDTH));
+ printk("sgtpaddr=0x%lx, ", nsp32_read4(base, SGT_ADR));
+ printk("scsioutlatch=0x%x, ", nsp32_read1(base, SCSI_OUT_LATCH_TARGET_ID));
+ printk("msgout=0x%lx, ", nsp32_read4(base, SCSI_MSG_OUT));
+ printk("miscrd=0x%x, ", nsp32_index_read2(base, MISC_WR));
+ printk("seltimeout=0x%x, ", nsp32_read2(base, SEL_TIME_OUT));
+ printk("sreqrate=0x%x, ", nsp32_read1(base, SREQ_SMPL_RATE));
+ printk("transStatus=0x%x, ", nsp32_read2(base, TRANSFER_STATUS));
+ printk("reselectid=0x%x, ", nsp32_read2(base, COMMAND_CONTROL));
+ printk("arbit=0x%x, ", nsp32_read1(base, ARBIT_STATUS));
+ printk("BmStart=0x%lx, ", nsp32_read4(base, BM_START_ADR));
+ printk("BmCount=0x%lx, ", nsp32_read4(base, BM_CNT));
+ printk("SackCnt=0x%lx, ", nsp32_read4(base, SACK_CNT));
+ printk("SReqCnt=0x%lx, ", nsp32_read4(base, SREQ_CNT));
+ printk("SavedSackCnt=0x%lx, ", nsp32_read4(base, SAVED_SACK_CNT));
+ printk("ScsiBusControl=0x%x, ", nsp32_read1(base, SCSI_BUS_CONTROL));
+ printk("FifoRestCnt=0x%x, ", nsp32_read2(base, FIFO_REST_CNT));
+ printk("CdbIn=0x%x, ", nsp32_read1(base, SCSI_CSB_IN));
+ printk("\n");
+
+ if (0) {
+ printk("execph=0x%x, ", nsp32_read2(base, SCSI_EXECUTE_PHASE));
+ printk("IrqStatus=0x%x, ", nsp32_read2(base, IRQ_STATUS));
+ printk("\n");
+ }
+}
+
+/* end */
diff --git a/drivers/scsi/nsp32_io.h b/drivers/scsi/nsp32_io.h
new file mode 100644
index 000000000..e3f3c27b0
--- /dev/null
+++ b/drivers/scsi/nsp32_io.h
@@ -0,0 +1,259 @@
+/*
+ * Workbit NinjaSCSI-32Bi/UDE PCI/CardBus SCSI Host Bus Adapter driver
+ * I/O routine
+ *
+ * This software may be used and distributed according to the terms of
+ * the GNU General Public License.
+ */
+
+#ifndef _NSP32_IO_H
+#define _NSP32_IO_H
+
+static inline void nsp32_write1(unsigned int base,
+ unsigned int index,
+ unsigned char val)
+{
+ outb(val, (base + index));
+}
+
+static inline unsigned char nsp32_read1(unsigned int base,
+ unsigned int index)
+{
+ return inb(base + index);
+}
+
+static inline void nsp32_write2(unsigned int base,
+ unsigned int index,
+ unsigned short val)
+{
+ outw(val, (base + index));
+}
+
+static inline unsigned short nsp32_read2(unsigned int base,
+ unsigned int index)
+{
+ return inw(base + index);
+}
+
+static inline void nsp32_write4(unsigned int base,
+ unsigned int index,
+ unsigned long val)
+{
+ outl(val, (base + index));
+}
+
+static inline unsigned long nsp32_read4(unsigned int base,
+ unsigned int index)
+{
+ return inl(base + index);
+}
+
+/*==============================================*/
+
+static inline void nsp32_mmio_write1(unsigned long base,
+ unsigned int index,
+ unsigned char val)
+{
+ volatile unsigned char *ptr;
+
+ ptr = (unsigned char *)(base + NSP32_MMIO_OFFSET + index);
+
+ writeb(val, ptr);
+}
+
+static inline unsigned char nsp32_mmio_read1(unsigned long base,
+ unsigned int index)
+{
+ volatile unsigned char *ptr;
+
+ ptr = (unsigned char *)(base + NSP32_MMIO_OFFSET + index);
+
+ return readb(ptr);
+}
+
+static inline void nsp32_mmio_write2(unsigned long base,
+ unsigned int index,
+ unsigned short val)
+{
+ volatile unsigned short *ptr;
+
+ ptr = (unsigned short *)(base + NSP32_MMIO_OFFSET + index);
+
+ writew(cpu_to_le16(val), ptr);
+}
+
+static inline unsigned short nsp32_mmio_read2(unsigned long base,
+ unsigned int index)
+{
+ volatile unsigned short *ptr;
+
+ ptr = (unsigned short *)(base + NSP32_MMIO_OFFSET + index);
+
+ return le16_to_cpu(readw(ptr));
+}
+
+static inline void nsp32_mmio_write4(unsigned long base,
+ unsigned int index,
+ unsigned long val)
+{
+ volatile unsigned long *ptr;
+
+ ptr = (unsigned long *)(base + NSP32_MMIO_OFFSET + index);
+
+ writel(cpu_to_le32(val), ptr);
+}
+
+static inline unsigned long nsp32_mmio_read4(unsigned long base,
+ unsigned int index)
+{
+ volatile unsigned long *ptr;
+
+ ptr = (unsigned long *)(base + NSP32_MMIO_OFFSET + index);
+
+ return le32_to_cpu(readl(ptr));
+}
+
+/*==============================================*/
+
+static inline unsigned char nsp32_index_read1(unsigned int base,
+ unsigned int reg)
+{
+ outb(reg, base + INDEX_REG);
+ return inb(base + DATA_REG_LOW);
+}
+
+static inline void nsp32_index_write1(unsigned int base,
+ unsigned int reg,
+ unsigned char val)
+{
+ outb(reg, base + INDEX_REG );
+ outb(val, base + DATA_REG_LOW);
+}
+
+static inline unsigned short nsp32_index_read2(unsigned int base,
+ unsigned int reg)
+{
+ outb(reg, base + INDEX_REG);
+ return inw(base + DATA_REG_LOW);
+}
+
+static inline void nsp32_index_write2(unsigned int base,
+ unsigned int reg,
+ unsigned short val)
+{
+ outb(reg, base + INDEX_REG );
+ outw(val, base + DATA_REG_LOW);
+}
+
+static inline unsigned long nsp32_index_read4(unsigned int base,
+ unsigned int reg)
+{
+ unsigned long h,l;
+
+ outb(reg, base + INDEX_REG);
+ l = inw(base + DATA_REG_LOW);
+ h = inw(base + DATA_REG_HI );
+
+ return ((h << 16) | l);
+}
+
+static inline void nsp32_index_write4(unsigned int base,
+ unsigned int reg,
+ unsigned long val)
+{
+ unsigned long h,l;
+
+ h = (val & 0xffff0000) >> 16;
+ l = (val & 0x0000ffff) >> 0;
+
+ outb(reg, base + INDEX_REG );
+ outw(l, base + DATA_REG_LOW);
+ outw(h, base + DATA_REG_HI );
+}
+
+/*==============================================*/
+
+static inline unsigned char nsp32_mmio_index_read1(unsigned long base,
+ unsigned int reg)
+{
+ volatile unsigned short *index_ptr, *data_ptr;
+
+ index_ptr = (unsigned short *)(base + NSP32_MMIO_OFFSET + INDEX_REG);
+ data_ptr = (unsigned short *)(base + NSP32_MMIO_OFFSET + DATA_REG_LOW);
+
+ writeb(reg, index_ptr);
+ return readb(data_ptr);
+}
+
+static inline void nsp32_mmio_index_write1(unsigned long base,
+ unsigned int reg,
+ unsigned char val)
+{
+ volatile unsigned short *index_ptr, *data_ptr;
+
+ index_ptr = (unsigned short *)(base + NSP32_MMIO_OFFSET + INDEX_REG);
+ data_ptr = (unsigned short *)(base + NSP32_MMIO_OFFSET + DATA_REG_LOW);
+
+ writeb(reg, index_ptr);
+ writeb(val, data_ptr );
+}
+
+static inline unsigned short nsp32_mmio_index_read2(unsigned long base,
+ unsigned int reg)
+{
+ volatile unsigned short *index_ptr, *data_ptr;
+
+ index_ptr = (unsigned short *)(base + NSP32_MMIO_OFFSET + INDEX_REG);
+ data_ptr = (unsigned short *)(base + NSP32_MMIO_OFFSET + DATA_REG_LOW);
+
+ writeb(reg, index_ptr);
+ return le16_to_cpu(readw(data_ptr));
+}
+
+static inline void nsp32_mmio_index_write2(unsigned long base,
+ unsigned int reg,
+ unsigned short val)
+{
+ volatile unsigned short *index_ptr, *data_ptr;
+
+ index_ptr = (unsigned short *)(base + NSP32_MMIO_OFFSET + INDEX_REG);
+ data_ptr = (unsigned short *)(base + NSP32_MMIO_OFFSET + DATA_REG_LOW);
+
+ writeb(reg, index_ptr);
+ writew(cpu_to_le16(val), data_ptr );
+}
+
+/*==============================================*/
+
+static inline void nsp32_multi_read4(unsigned int base,
+ unsigned int reg,
+ void *buf,
+ unsigned long count)
+{
+ insl(base + reg, buf, count);
+}
+
+static inline void nsp32_fifo_read(unsigned int base,
+ void *buf,
+ unsigned long count)
+{
+ nsp32_multi_read4(base, FIFO_DATA_LOW, buf, count);
+}
+
+static inline void nsp32_multi_write4(unsigned int base,
+ unsigned int reg,
+ void *buf,
+ unsigned long count)
+{
+ outsl(base + reg, buf, count);
+}
+
+static inline void nsp32_fifo_write(unsigned int base,
+ void *buf,
+ unsigned long count)
+{
+ nsp32_multi_write4(base, FIFO_DATA_LOW, buf, count);
+}
+
+#endif /* _NSP32_IO_H */
+/* end */
diff --git a/drivers/scsi/osd/Kbuild b/drivers/scsi/osd/Kbuild
new file mode 100644
index 000000000..58cecd45b
--- /dev/null
+++ b/drivers/scsi/osd/Kbuild
@@ -0,0 +1,20 @@
+#
+# Kbuild for the OSD modules
+#
+# Copyright (C) 2008 Panasas Inc. All rights reserved.
+#
+# Authors:
+# Boaz Harrosh <ooo@electrozaur.com>
+# Benny Halevy <bhalevy@panasas.com>
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License version 2
+#
+
+# libosd.ko - osd-initiator library
+libosd-y := osd_initiator.o
+obj-$(CONFIG_SCSI_OSD_INITIATOR) += libosd.o
+
+# osd.ko - SCSI ULD and char-device
+osd-y := osd_uld.o
+obj-$(CONFIG_SCSI_OSD_ULD) += osd.o
diff --git a/drivers/scsi/osd/Kconfig b/drivers/scsi/osd/Kconfig
new file mode 100644
index 000000000..347cc5e33
--- /dev/null
+++ b/drivers/scsi/osd/Kconfig
@@ -0,0 +1,49 @@
+#
+# Kernel configuration file for the OSD scsi protocol
+#
+# Copyright (C) 2008 Panasas Inc. All rights reserved.
+#
+# Authors:
+# Boaz Harrosh <ooo@electrozaur.com>
+# Benny Halevy <bhalevy@panasas.com>
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public version 2 License as
+# published by the Free Software Foundation
+#
+config SCSI_OSD_INITIATOR
+ tristate "OSD-Initiator library"
+ depends on SCSI
+ help
+ Enable the OSD-Initiator library (libosd.ko).
+ NOTE: You must also select CRYPTO_SHA1 + CRYPTO_HMAC and their
+ dependencies
+
+config SCSI_OSD_ULD
+ tristate "OSD Upper Level driver"
+ depends on SCSI_OSD_INITIATOR
+ help
+ Build a SCSI upper layer driver that exports /dev/osdX devices
+ to user-mode for testing and controlling OSD devices. It is also
+ needed by exofs, for mounting an OSD based file system.
+
+config SCSI_OSD_DPRINT_SENSE
+ int "(0-2) When sense is returned, DEBUG print all sense descriptors"
+ default 1
+ depends on SCSI_OSD_INITIATOR
+ help
+ When a CHECK_CONDITION status is returned from a target, and a
+ sense-buffer is retrieved, turning this on will dump a full
+ sense-decoding message. Setting to 2 will also print recoverable
+ errors that might be regularly returned for some filesystem
+ operations.
+
+config SCSI_OSD_DEBUG
+ bool "Compile All OSD modules with lots of DEBUG prints"
+ default n
+ depends on SCSI_OSD_INITIATOR
+ help
+ OSD Code is populated with lots of OSD_DEBUG(..) printouts to
+ dmesg. Enable this if you found a bug and you want to help us
+ track the problem (see also MAINTAINERS). Setting this will also
+ force SCSI_OSD_DPRINT_SENSE=2.
diff --git a/drivers/scsi/osd/osd_debug.h b/drivers/scsi/osd/osd_debug.h
new file mode 100644
index 000000000..26341261b
--- /dev/null
+++ b/drivers/scsi/osd/osd_debug.h
@@ -0,0 +1,30 @@
+/*
+ * osd_debug.h - Some kprintf macros
+ *
+ * Copyright (C) 2008 Panasas Inc. All rights reserved.
+ *
+ * Authors:
+ * Boaz Harrosh <ooo@electrozaur.com>
+ * Benny Halevy <bhalevy@panasas.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2
+ *
+ */
+#ifndef __OSD_DEBUG_H__
+#define __OSD_DEBUG_H__
+
+#define OSD_ERR(fmt, a...) printk(KERN_ERR "osd: " fmt, ##a)
+#define OSD_INFO(fmt, a...) printk(KERN_NOTICE "osd: " fmt, ##a)
+
+#ifdef CONFIG_SCSI_OSD_DEBUG
+#define OSD_DEBUG(fmt, a...) \
+ printk(KERN_NOTICE "osd @%s:%d: " fmt, __func__, __LINE__, ##a)
+#else
+#define OSD_DEBUG(fmt, a...) do {} while (0)
+#endif
+
+/* u64 has problems with printk this will cast it to unsigned long long */
+#define _LLU(x) (unsigned long long)(x)
+
+#endif /* ndef __OSD_DEBUG_H__ */
diff --git a/drivers/scsi/osd/osd_initiator.c b/drivers/scsi/osd/osd_initiator.c
new file mode 100644
index 000000000..0cccd6033
--- /dev/null
+++ b/drivers/scsi/osd/osd_initiator.c
@@ -0,0 +1,2071 @@
+/*
+ * osd_initiator - Main body of the osd initiator library.
+ *
+ * Note: The file does not contain the advanced security functionality which
+ * is only needed by the security_manager's initiators.
+ *
+ * Copyright (C) 2008 Panasas Inc. All rights reserved.
+ *
+ * Authors:
+ * Boaz Harrosh <ooo@electrozaur.com>
+ * Benny Halevy <bhalevy@panasas.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. Neither the name of the Panasas company nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED
+ * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
+ * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
+ * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
+ * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <linux/slab.h>
+#include <linux/module.h>
+
+#include <scsi/osd_initiator.h>
+#include <scsi/osd_sec.h>
+#include <scsi/osd_attributes.h>
+#include <scsi/osd_sense.h>
+
+#include <scsi/scsi_device.h>
+
+#include "osd_debug.h"
+
+#ifndef __unused
+# define __unused __attribute__((unused))
+#endif
+
+enum { OSD_REQ_RETRIES = 1 };
+
+MODULE_AUTHOR("Boaz Harrosh <ooo@electrozaur.com>");
+MODULE_DESCRIPTION("open-osd initiator library libosd.ko");
+MODULE_LICENSE("GPL");
+
+static inline void build_test(void)
+{
+ /* structures were not packed */
+ BUILD_BUG_ON(sizeof(struct osd_capability) != OSD_CAP_LEN);
+ BUILD_BUG_ON(sizeof(struct osdv2_cdb) != OSD_TOTAL_CDB_LEN);
+ BUILD_BUG_ON(sizeof(struct osdv1_cdb) != OSDv1_TOTAL_CDB_LEN);
+}
+
+static const char *_osd_ver_desc(struct osd_request *or)
+{
+ return osd_req_is_ver1(or) ? "OSD1" : "OSD2";
+}
+
+#define ATTR_DEF_RI(id, len) ATTR_DEF(OSD_APAGE_ROOT_INFORMATION, id, len)
+
+static int _osd_get_print_system_info(struct osd_dev *od,
+ void *caps, struct osd_dev_info *odi)
+{
+ struct osd_request *or;
+ struct osd_attr get_attrs[] = {
+ ATTR_DEF_RI(OSD_ATTR_RI_VENDOR_IDENTIFICATION, 8),
+ ATTR_DEF_RI(OSD_ATTR_RI_PRODUCT_IDENTIFICATION, 16),
+ ATTR_DEF_RI(OSD_ATTR_RI_PRODUCT_MODEL, 32),
+ ATTR_DEF_RI(OSD_ATTR_RI_PRODUCT_REVISION_LEVEL, 4),
+ ATTR_DEF_RI(OSD_ATTR_RI_PRODUCT_SERIAL_NUMBER, 64 /*variable*/),
+ ATTR_DEF_RI(OSD_ATTR_RI_OSD_NAME, 64 /*variable*/),
+ ATTR_DEF_RI(OSD_ATTR_RI_TOTAL_CAPACITY, 8),
+ ATTR_DEF_RI(OSD_ATTR_RI_USED_CAPACITY, 8),
+ ATTR_DEF_RI(OSD_ATTR_RI_NUMBER_OF_PARTITIONS, 8),
+ ATTR_DEF_RI(OSD_ATTR_RI_CLOCK, 6),
+ /* IBM-OSD-SIM Has a bug with this one put it last */
+ ATTR_DEF_RI(OSD_ATTR_RI_OSD_SYSTEM_ID, 20),
+ };
+ void *iter = NULL, *pFirst;
+ int nelem = ARRAY_SIZE(get_attrs), a = 0;
+ int ret;
+
+ or = osd_start_request(od, GFP_KERNEL);
+ if (!or)
+ return -ENOMEM;
+
+ /* get attrs */
+ osd_req_get_attributes(or, &osd_root_object);
+ osd_req_add_get_attr_list(or, get_attrs, ARRAY_SIZE(get_attrs));
+
+ ret = osd_finalize_request(or, 0, caps, NULL);
+ if (ret)
+ goto out;
+
+ ret = osd_execute_request(or);
+ if (ret) {
+ OSD_ERR("Failed to detect %s => %d\n", _osd_ver_desc(or), ret);
+ goto out;
+ }
+
+ osd_req_decode_get_attr_list(or, get_attrs, &nelem, &iter);
+
+ OSD_INFO("Detected %s device\n",
+ _osd_ver_desc(or));
+
+ pFirst = get_attrs[a++].val_ptr;
+ OSD_INFO("VENDOR_IDENTIFICATION [%s]\n",
+ (char *)pFirst);
+
+ pFirst = get_attrs[a++].val_ptr;
+ OSD_INFO("PRODUCT_IDENTIFICATION [%s]\n",
+ (char *)pFirst);
+
+ pFirst = get_attrs[a++].val_ptr;
+ OSD_INFO("PRODUCT_MODEL [%s]\n",
+ (char *)pFirst);
+
+ pFirst = get_attrs[a++].val_ptr;
+ OSD_INFO("PRODUCT_REVISION_LEVEL [%u]\n",
+ pFirst ? get_unaligned_be32(pFirst) : ~0U);
+
+ pFirst = get_attrs[a++].val_ptr;
+ OSD_INFO("PRODUCT_SERIAL_NUMBER [%s]\n",
+ (char *)pFirst);
+
+ odi->osdname_len = get_attrs[a].len;
+ /* Avoid NULL for memcmp optimization 0-length is good enough */
+ odi->osdname = kzalloc(odi->osdname_len + 1, GFP_KERNEL);
+ if (!odi->osdname) {
+ ret = -ENOMEM;
+ goto out;
+ }
+ if (odi->osdname_len)
+ memcpy(odi->osdname, get_attrs[a].val_ptr, odi->osdname_len);
+ OSD_INFO("OSD_NAME [%s]\n", odi->osdname);
+ a++;
+
+ pFirst = get_attrs[a++].val_ptr;
+ OSD_INFO("TOTAL_CAPACITY [0x%llx]\n",
+ pFirst ? _LLU(get_unaligned_be64(pFirst)) : ~0ULL);
+
+ pFirst = get_attrs[a++].val_ptr;
+ OSD_INFO("USED_CAPACITY [0x%llx]\n",
+ pFirst ? _LLU(get_unaligned_be64(pFirst)) : ~0ULL);
+
+ pFirst = get_attrs[a++].val_ptr;
+ OSD_INFO("NUMBER_OF_PARTITIONS [%llu]\n",
+ pFirst ? _LLU(get_unaligned_be64(pFirst)) : ~0ULL);
+
+ if (a >= nelem)
+ goto out;
+
+ /* FIXME: Where are the time utilities */
+ pFirst = get_attrs[a++].val_ptr;
+ OSD_INFO("CLOCK [0x%02x%02x%02x%02x%02x%02x]\n",
+ ((char *)pFirst)[0], ((char *)pFirst)[1],
+ ((char *)pFirst)[2], ((char *)pFirst)[3],
+ ((char *)pFirst)[4], ((char *)pFirst)[5]);
+
+ if (a < nelem) { /* IBM-OSD-SIM bug, Might not have it */
+ unsigned len = get_attrs[a].len;
+ char sid_dump[32*4 + 2]; /* 2nibbles+space+ASCII */
+
+ hex_dump_to_buffer(get_attrs[a].val_ptr, len, 32, 1,
+ sid_dump, sizeof(sid_dump), true);
+ OSD_INFO("OSD_SYSTEM_ID(%d)\n"
+ " [%s]\n", len, sid_dump);
+
+ if (unlikely(len > sizeof(odi->systemid))) {
+ OSD_ERR("OSD Target error: OSD_SYSTEM_ID too long(%d). "
+ "device identification might not work\n", len);
+ len = sizeof(odi->systemid);
+ }
+ odi->systemid_len = len;
+ memcpy(odi->systemid, get_attrs[a].val_ptr, len);
+ a++;
+ }
+out:
+ osd_end_request(or);
+ return ret;
+}
+
+int osd_auto_detect_ver(struct osd_dev *od,
+ void *caps, struct osd_dev_info *odi)
+{
+ int ret;
+
+ /* Auto-detect the osd version */
+ ret = _osd_get_print_system_info(od, caps, odi);
+ if (ret) {
+ osd_dev_set_ver(od, OSD_VER1);
+ OSD_DEBUG("converting to OSD1\n");
+ ret = _osd_get_print_system_info(od, caps, odi);
+ }
+
+ return ret;
+}
+EXPORT_SYMBOL(osd_auto_detect_ver);
+
+static unsigned _osd_req_cdb_len(struct osd_request *or)
+{
+ return osd_req_is_ver1(or) ? OSDv1_TOTAL_CDB_LEN : OSD_TOTAL_CDB_LEN;
+}
+
+static unsigned _osd_req_alist_elem_size(struct osd_request *or, unsigned len)
+{
+ return osd_req_is_ver1(or) ?
+ osdv1_attr_list_elem_size(len) :
+ osdv2_attr_list_elem_size(len);
+}
+
+static void _osd_req_alist_elem_encode(struct osd_request *or,
+ void *attr_last, const struct osd_attr *oa)
+{
+ if (osd_req_is_ver1(or)) {
+ struct osdv1_attributes_list_element *attr = attr_last;
+
+ attr->attr_page = cpu_to_be32(oa->attr_page);
+ attr->attr_id = cpu_to_be32(oa->attr_id);
+ attr->attr_bytes = cpu_to_be16(oa->len);
+ memcpy(attr->attr_val, oa->val_ptr, oa->len);
+ } else {
+ struct osdv2_attributes_list_element *attr = attr_last;
+
+ attr->attr_page = cpu_to_be32(oa->attr_page);
+ attr->attr_id = cpu_to_be32(oa->attr_id);
+ attr->attr_bytes = cpu_to_be16(oa->len);
+ memcpy(attr->attr_val, oa->val_ptr, oa->len);
+ }
+}
+
+static int _osd_req_alist_elem_decode(struct osd_request *or,
+ void *cur_p, struct osd_attr *oa, unsigned max_bytes)
+{
+ unsigned inc;
+ if (osd_req_is_ver1(or)) {
+ struct osdv1_attributes_list_element *attr = cur_p;
+
+ if (max_bytes < sizeof(*attr))
+ return -1;
+
+ oa->len = be16_to_cpu(attr->attr_bytes);
+ inc = _osd_req_alist_elem_size(or, oa->len);
+ if (inc > max_bytes)
+ return -1;
+
+ oa->attr_page = be32_to_cpu(attr->attr_page);
+ oa->attr_id = be32_to_cpu(attr->attr_id);
+
+ /* OSD1: On empty attributes we return a pointer to 2 bytes
+ * of zeros. This keeps similar behaviour with OSD2.
+ * (See below)
+ */
+ oa->val_ptr = likely(oa->len) ? attr->attr_val :
+ (u8 *)&attr->attr_bytes;
+ } else {
+ struct osdv2_attributes_list_element *attr = cur_p;
+
+ if (max_bytes < sizeof(*attr))
+ return -1;
+
+ oa->len = be16_to_cpu(attr->attr_bytes);
+ inc = _osd_req_alist_elem_size(or, oa->len);
+ if (inc > max_bytes)
+ return -1;
+
+ oa->attr_page = be32_to_cpu(attr->attr_page);
+ oa->attr_id = be32_to_cpu(attr->attr_id);
+
+ /* OSD2: For convenience, on empty attributes, we return 8 bytes
+ * of zeros here. This keeps the same behaviour with OSD2r04,
+ * and is nice with null terminating ASCII fields.
+ * oa->val_ptr == NULL marks the end-of-list, or error.
+ */
+ oa->val_ptr = likely(oa->len) ? attr->attr_val : attr->reserved;
+ }
+ return inc;
+}
+
+static unsigned _osd_req_alist_size(struct osd_request *or, void *list_head)
+{
+ return osd_req_is_ver1(or) ?
+ osdv1_list_size(list_head) :
+ osdv2_list_size(list_head);
+}
+
+static unsigned _osd_req_sizeof_alist_header(struct osd_request *or)
+{
+ return osd_req_is_ver1(or) ?
+ sizeof(struct osdv1_attributes_list_header) :
+ sizeof(struct osdv2_attributes_list_header);
+}
+
+static void _osd_req_set_alist_type(struct osd_request *or,
+ void *list, int list_type)
+{
+ if (osd_req_is_ver1(or)) {
+ struct osdv1_attributes_list_header *attr_list = list;
+
+ memset(attr_list, 0, sizeof(*attr_list));
+ attr_list->type = list_type;
+ } else {
+ struct osdv2_attributes_list_header *attr_list = list;
+
+ memset(attr_list, 0, sizeof(*attr_list));
+ attr_list->type = list_type;
+ }
+}
+
+static bool _osd_req_is_alist_type(struct osd_request *or,
+ void *list, int list_type)
+{
+ if (!list)
+ return false;
+
+ if (osd_req_is_ver1(or)) {
+ struct osdv1_attributes_list_header *attr_list = list;
+
+ return attr_list->type == list_type;
+ } else {
+ struct osdv2_attributes_list_header *attr_list = list;
+
+ return attr_list->type == list_type;
+ }
+}
+
+/* This is for List-objects not Attributes-Lists */
+static void _osd_req_encode_olist(struct osd_request *or,
+ struct osd_obj_id_list *list)
+{
+ struct osd_cdb_head *cdbh = osd_cdb_head(&or->cdb);
+
+ if (osd_req_is_ver1(or)) {
+ cdbh->v1.list_identifier = list->list_identifier;
+ cdbh->v1.start_address = list->continuation_id;
+ } else {
+ cdbh->v2.list_identifier = list->list_identifier;
+ cdbh->v2.start_address = list->continuation_id;
+ }
+}
+
+static osd_cdb_offset osd_req_encode_offset(struct osd_request *or,
+ u64 offset, unsigned *padding)
+{
+ return __osd_encode_offset(offset, padding,
+ osd_req_is_ver1(or) ?
+ OSDv1_OFFSET_MIN_SHIFT : OSD_OFFSET_MIN_SHIFT,
+ OSD_OFFSET_MAX_SHIFT);
+}
+
+static struct osd_security_parameters *
+_osd_req_sec_params(struct osd_request *or)
+{
+ struct osd_cdb *ocdb = &or->cdb;
+
+ if (osd_req_is_ver1(or))
+ return (struct osd_security_parameters *)&ocdb->v1.sec_params;
+ else
+ return (struct osd_security_parameters *)&ocdb->v2.sec_params;
+}
+
+void osd_dev_init(struct osd_dev *osdd, struct scsi_device *scsi_device)
+{
+ memset(osdd, 0, sizeof(*osdd));
+ osdd->scsi_device = scsi_device;
+ osdd->def_timeout = BLK_DEFAULT_SG_TIMEOUT;
+#ifdef OSD_VER1_SUPPORT
+ osdd->version = OSD_VER2;
+#endif
+ /* TODO: Allocate pools for osd_request attributes ... */
+}
+EXPORT_SYMBOL(osd_dev_init);
+
+void osd_dev_fini(struct osd_dev *osdd)
+{
+ /* TODO: De-allocate pools */
+
+ osdd->scsi_device = NULL;
+}
+EXPORT_SYMBOL(osd_dev_fini);
+
+static struct osd_request *_osd_request_alloc(gfp_t gfp)
+{
+ struct osd_request *or;
+
+ /* TODO: Use mempool with one saved request */
+ or = kzalloc(sizeof(*or), gfp);
+ return or;
+}
+
+static void _osd_request_free(struct osd_request *or)
+{
+ kfree(or);
+}
+
+struct osd_request *osd_start_request(struct osd_dev *dev, gfp_t gfp)
+{
+ struct osd_request *or;
+
+ or = _osd_request_alloc(gfp);
+ if (!or)
+ return NULL;
+
+ or->osd_dev = dev;
+ or->alloc_flags = gfp;
+ or->timeout = dev->def_timeout;
+ or->retries = OSD_REQ_RETRIES;
+
+ return or;
+}
+EXPORT_SYMBOL(osd_start_request);
+
+static void _osd_free_seg(struct osd_request *or __unused,
+ struct _osd_req_data_segment *seg)
+{
+ if (!seg->buff || !seg->alloc_size)
+ return;
+
+ kfree(seg->buff);
+ seg->buff = NULL;
+ seg->alloc_size = 0;
+}
+
+static void _put_request(struct request *rq)
+{
+ /*
+ * If osd_finalize_request() was called but the request was not
+ * executed through the block layer, then we must release BIOs.
+ * TODO: Keep error code in or->async_error. Need to audit all
+ * code paths.
+ */
+ if (unlikely(rq->bio))
+ blk_end_request(rq, -ENOMEM, blk_rq_bytes(rq));
+ else
+ blk_put_request(rq);
+}
+
+void osd_end_request(struct osd_request *or)
+{
+ struct request *rq = or->request;
+
+ if (rq) {
+ if (rq->next_rq) {
+ _put_request(rq->next_rq);
+ rq->next_rq = NULL;
+ }
+
+ _put_request(rq);
+ }
+
+ _osd_free_seg(or, &or->get_attr);
+ _osd_free_seg(or, &or->enc_get_attr);
+ _osd_free_seg(or, &or->set_attr);
+ _osd_free_seg(or, &or->cdb_cont);
+
+ _osd_request_free(or);
+}
+EXPORT_SYMBOL(osd_end_request);
+
+static void _set_error_resid(struct osd_request *or, struct request *req,
+ int error)
+{
+ or->async_error = error;
+ or->req_errors = req->errors ? : error;
+ or->sense_len = req->sense_len;
+ if (or->out.req)
+ or->out.residual = or->out.req->resid_len;
+ if (or->in.req)
+ or->in.residual = or->in.req->resid_len;
+}
+
+int osd_execute_request(struct osd_request *or)
+{
+ int error = blk_execute_rq(or->request->q, NULL, or->request, 0);
+
+ _set_error_resid(or, or->request, error);
+ return error;
+}
+EXPORT_SYMBOL(osd_execute_request);
+
+static void osd_request_async_done(struct request *req, int error)
+{
+ struct osd_request *or = req->end_io_data;
+
+ _set_error_resid(or, req, error);
+ if (req->next_rq) {
+ __blk_put_request(req->q, req->next_rq);
+ req->next_rq = NULL;
+ }
+
+ __blk_put_request(req->q, req);
+ or->request = NULL;
+ or->in.req = NULL;
+ or->out.req = NULL;
+
+ if (or->async_done)
+ or->async_done(or, or->async_private);
+ else
+ osd_end_request(or);
+}
+
+int osd_execute_request_async(struct osd_request *or,
+ osd_req_done_fn *done, void *private)
+{
+ or->request->end_io_data = or;
+ or->async_private = private;
+ or->async_done = done;
+
+ blk_execute_rq_nowait(or->request->q, NULL, or->request, 0,
+ osd_request_async_done);
+ return 0;
+}
+EXPORT_SYMBOL(osd_execute_request_async);
+
+u8 sg_out_pad_buffer[1 << OSDv1_OFFSET_MIN_SHIFT];
+u8 sg_in_pad_buffer[1 << OSDv1_OFFSET_MIN_SHIFT];
+
+static int _osd_realloc_seg(struct osd_request *or,
+ struct _osd_req_data_segment *seg, unsigned max_bytes)
+{
+ void *buff;
+
+ if (seg->alloc_size >= max_bytes)
+ return 0;
+
+ buff = krealloc(seg->buff, max_bytes, or->alloc_flags);
+ if (!buff) {
+ OSD_ERR("Failed to Realloc %d-bytes was-%d\n", max_bytes,
+ seg->alloc_size);
+ return -ENOMEM;
+ }
+
+ memset(buff + seg->alloc_size, 0, max_bytes - seg->alloc_size);
+ seg->buff = buff;
+ seg->alloc_size = max_bytes;
+ return 0;
+}
+
+static int _alloc_cdb_cont(struct osd_request *or, unsigned total_bytes)
+{
+ OSD_DEBUG("total_bytes=%d\n", total_bytes);
+ return _osd_realloc_seg(or, &or->cdb_cont, total_bytes);
+}
+
+static int _alloc_set_attr_list(struct osd_request *or,
+ const struct osd_attr *oa, unsigned nelem, unsigned add_bytes)
+{
+ unsigned total_bytes = add_bytes;
+
+ for (; nelem; --nelem, ++oa)
+ total_bytes += _osd_req_alist_elem_size(or, oa->len);
+
+ OSD_DEBUG("total_bytes=%d\n", total_bytes);
+ return _osd_realloc_seg(or, &or->set_attr, total_bytes);
+}
+
+static int _alloc_get_attr_desc(struct osd_request *or, unsigned max_bytes)
+{
+ OSD_DEBUG("total_bytes=%d\n", max_bytes);
+ return _osd_realloc_seg(or, &or->enc_get_attr, max_bytes);
+}
+
+static int _alloc_get_attr_list(struct osd_request *or)
+{
+ OSD_DEBUG("total_bytes=%d\n", or->get_attr.total_bytes);
+ return _osd_realloc_seg(or, &or->get_attr, or->get_attr.total_bytes);
+}
+
+/*
+ * Common to all OSD commands
+ */
+
+static void _osdv1_req_encode_common(struct osd_request *or,
+ __be16 act, const struct osd_obj_id *obj, u64 offset, u64 len)
+{
+ struct osdv1_cdb *ocdb = &or->cdb.v1;
+
+ /*
+ * For speed, the commands
+ * OSD_ACT_PERFORM_SCSI_COMMAND , V1 0x8F7E, V2 0x8F7C
+ * OSD_ACT_SCSI_TASK_MANAGEMENT , V1 0x8F7F, V2 0x8F7D
+ * are not supported here. Should pass zero and set after the call
+ */
+ act &= cpu_to_be16(~0x0080); /* V1 action code */
+
+ OSD_DEBUG("OSDv1 execute opcode 0x%x\n", be16_to_cpu(act));
+
+ ocdb->h.varlen_cdb.opcode = VARIABLE_LENGTH_CMD;
+ ocdb->h.varlen_cdb.additional_cdb_length = OSD_ADDITIONAL_CDB_LENGTH;
+ ocdb->h.varlen_cdb.service_action = act;
+
+ ocdb->h.partition = cpu_to_be64(obj->partition);
+ ocdb->h.object = cpu_to_be64(obj->id);
+ ocdb->h.v1.length = cpu_to_be64(len);
+ ocdb->h.v1.start_address = cpu_to_be64(offset);
+}
+
+static void _osdv2_req_encode_common(struct osd_request *or,
+ __be16 act, const struct osd_obj_id *obj, u64 offset, u64 len)
+{
+ struct osdv2_cdb *ocdb = &or->cdb.v2;
+
+ OSD_DEBUG("OSDv2 execute opcode 0x%x\n", be16_to_cpu(act));
+
+ ocdb->h.varlen_cdb.opcode = VARIABLE_LENGTH_CMD;
+ ocdb->h.varlen_cdb.additional_cdb_length = OSD_ADDITIONAL_CDB_LENGTH;
+ ocdb->h.varlen_cdb.service_action = act;
+
+ ocdb->h.partition = cpu_to_be64(obj->partition);
+ ocdb->h.object = cpu_to_be64(obj->id);
+ ocdb->h.v2.length = cpu_to_be64(len);
+ ocdb->h.v2.start_address = cpu_to_be64(offset);
+}
+
+static void _osd_req_encode_common(struct osd_request *or,
+ __be16 act, const struct osd_obj_id *obj, u64 offset, u64 len)
+{
+ if (osd_req_is_ver1(or))
+ _osdv1_req_encode_common(or, act, obj, offset, len);
+ else
+ _osdv2_req_encode_common(or, act, obj, offset, len);
+}
+
+/*
+ * Device commands
+ */
+/*TODO: void osd_req_set_master_seed_xchg(struct osd_request *, ...); */
+/*TODO: void osd_req_set_master_key(struct osd_request *, ...); */
+
+void osd_req_format(struct osd_request *or, u64 tot_capacity)
+{
+ _osd_req_encode_common(or, OSD_ACT_FORMAT_OSD, &osd_root_object, 0,
+ tot_capacity);
+}
+EXPORT_SYMBOL(osd_req_format);
+
+int osd_req_list_dev_partitions(struct osd_request *or,
+ osd_id initial_id, struct osd_obj_id_list *list, unsigned nelem)
+{
+ return osd_req_list_partition_objects(or, 0, initial_id, list, nelem);
+}
+EXPORT_SYMBOL(osd_req_list_dev_partitions);
+
+static void _osd_req_encode_flush(struct osd_request *or,
+ enum osd_options_flush_scope_values op)
+{
+ struct osd_cdb_head *ocdb = osd_cdb_head(&or->cdb);
+
+ ocdb->command_specific_options = op;
+}
+
+void osd_req_flush_obsd(struct osd_request *or,
+ enum osd_options_flush_scope_values op)
+{
+ _osd_req_encode_common(or, OSD_ACT_FLUSH_OSD, &osd_root_object, 0, 0);
+ _osd_req_encode_flush(or, op);
+}
+EXPORT_SYMBOL(osd_req_flush_obsd);
+
+/*TODO: void osd_req_perform_scsi_command(struct osd_request *,
+ const u8 *cdb, ...); */
+/*TODO: void osd_req_task_management(struct osd_request *, ...); */
+
+/*
+ * Partition commands
+ */
+static void _osd_req_encode_partition(struct osd_request *or,
+ __be16 act, osd_id partition)
+{
+ struct osd_obj_id par = {
+ .partition = partition,
+ .id = 0,
+ };
+
+ _osd_req_encode_common(or, act, &par, 0, 0);
+}
+
+void osd_req_create_partition(struct osd_request *or, osd_id partition)
+{
+ _osd_req_encode_partition(or, OSD_ACT_CREATE_PARTITION, partition);
+}
+EXPORT_SYMBOL(osd_req_create_partition);
+
+void osd_req_remove_partition(struct osd_request *or, osd_id partition)
+{
+ _osd_req_encode_partition(or, OSD_ACT_REMOVE_PARTITION, partition);
+}
+EXPORT_SYMBOL(osd_req_remove_partition);
+
+/*TODO: void osd_req_set_partition_key(struct osd_request *,
+ osd_id partition, u8 new_key_id[OSD_CRYPTO_KEYID_SIZE],
+ u8 seed[OSD_CRYPTO_SEED_SIZE]); */
+
+static int _osd_req_list_objects(struct osd_request *or,
+ __be16 action, const struct osd_obj_id *obj, osd_id initial_id,
+ struct osd_obj_id_list *list, unsigned nelem)
+{
+ struct request_queue *q = osd_request_queue(or->osd_dev);
+ u64 len = nelem * sizeof(osd_id) + sizeof(*list);
+ struct bio *bio;
+
+ _osd_req_encode_common(or, action, obj, (u64)initial_id, len);
+
+ if (list->list_identifier)
+ _osd_req_encode_olist(or, list);
+
+ WARN_ON(or->in.bio);
+ bio = bio_map_kern(q, list, len, or->alloc_flags);
+ if (IS_ERR(bio)) {
+ OSD_ERR("!!! Failed to allocate list_objects BIO\n");
+ return PTR_ERR(bio);
+ }
+
+ bio->bi_rw &= ~REQ_WRITE;
+ or->in.bio = bio;
+ or->in.total_bytes = bio->bi_iter.bi_size;
+ return 0;
+}
+
+int osd_req_list_partition_collections(struct osd_request *or,
+ osd_id partition, osd_id initial_id, struct osd_obj_id_list *list,
+ unsigned nelem)
+{
+ struct osd_obj_id par = {
+ .partition = partition,
+ .id = 0,
+ };
+
+ return osd_req_list_collection_objects(or, &par, initial_id, list,
+ nelem);
+}
+EXPORT_SYMBOL(osd_req_list_partition_collections);
+
+int osd_req_list_partition_objects(struct osd_request *or,
+ osd_id partition, osd_id initial_id, struct osd_obj_id_list *list,
+ unsigned nelem)
+{
+ struct osd_obj_id par = {
+ .partition = partition,
+ .id = 0,
+ };
+
+ return _osd_req_list_objects(or, OSD_ACT_LIST, &par, initial_id, list,
+ nelem);
+}
+EXPORT_SYMBOL(osd_req_list_partition_objects);
+
+void osd_req_flush_partition(struct osd_request *or,
+ osd_id partition, enum osd_options_flush_scope_values op)
+{
+ _osd_req_encode_partition(or, OSD_ACT_FLUSH_PARTITION, partition);
+ _osd_req_encode_flush(or, op);
+}
+EXPORT_SYMBOL(osd_req_flush_partition);
+
+/*
+ * Collection commands
+ */
+/*TODO: void osd_req_create_collection(struct osd_request *,
+ const struct osd_obj_id *); */
+/*TODO: void osd_req_remove_collection(struct osd_request *,
+ const struct osd_obj_id *); */
+
+int osd_req_list_collection_objects(struct osd_request *or,
+ const struct osd_obj_id *obj, osd_id initial_id,
+ struct osd_obj_id_list *list, unsigned nelem)
+{
+ return _osd_req_list_objects(or, OSD_ACT_LIST_COLLECTION, obj,
+ initial_id, list, nelem);
+}
+EXPORT_SYMBOL(osd_req_list_collection_objects);
+
+/*TODO: void query(struct osd_request *, ...); V2 */
+
+void osd_req_flush_collection(struct osd_request *or,
+ const struct osd_obj_id *obj, enum osd_options_flush_scope_values op)
+{
+ _osd_req_encode_common(or, OSD_ACT_FLUSH_PARTITION, obj, 0, 0);
+ _osd_req_encode_flush(or, op);
+}
+EXPORT_SYMBOL(osd_req_flush_collection);
+
+/*TODO: void get_member_attrs(struct osd_request *, ...); V2 */
+/*TODO: void set_member_attrs(struct osd_request *, ...); V2 */
+
+/*
+ * Object commands
+ */
+void osd_req_create_object(struct osd_request *or, struct osd_obj_id *obj)
+{
+ _osd_req_encode_common(or, OSD_ACT_CREATE, obj, 0, 0);
+}
+EXPORT_SYMBOL(osd_req_create_object);
+
+void osd_req_remove_object(struct osd_request *or, struct osd_obj_id *obj)
+{
+ _osd_req_encode_common(or, OSD_ACT_REMOVE, obj, 0, 0);
+}
+EXPORT_SYMBOL(osd_req_remove_object);
+
+
+/*TODO: void osd_req_create_multi(struct osd_request *or,
+ struct osd_obj_id *first, struct osd_obj_id_list *list, unsigned nelem);
+*/
+
+void osd_req_write(struct osd_request *or,
+ const struct osd_obj_id *obj, u64 offset,
+ struct bio *bio, u64 len)
+{
+ _osd_req_encode_common(or, OSD_ACT_WRITE, obj, offset, len);
+ WARN_ON(or->out.bio || or->out.total_bytes);
+ WARN_ON(0 == (bio->bi_rw & REQ_WRITE));
+ or->out.bio = bio;
+ or->out.total_bytes = len;
+}
+EXPORT_SYMBOL(osd_req_write);
+
+int osd_req_write_kern(struct osd_request *or,
+ const struct osd_obj_id *obj, u64 offset, void* buff, u64 len)
+{
+ struct request_queue *req_q = osd_request_queue(or->osd_dev);
+ struct bio *bio = bio_map_kern(req_q, buff, len, GFP_KERNEL);
+
+ if (IS_ERR(bio))
+ return PTR_ERR(bio);
+
+ bio->bi_rw |= REQ_WRITE; /* FIXME: bio_set_dir() */
+ osd_req_write(or, obj, offset, bio, len);
+ return 0;
+}
+EXPORT_SYMBOL(osd_req_write_kern);
+
+/*TODO: void osd_req_append(struct osd_request *,
+ const struct osd_obj_id *, struct bio *data_out); */
+/*TODO: void osd_req_create_write(struct osd_request *,
+ const struct osd_obj_id *, struct bio *data_out, u64 offset); */
+/*TODO: void osd_req_clear(struct osd_request *,
+ const struct osd_obj_id *, u64 offset, u64 len); */
+/*TODO: void osd_req_punch(struct osd_request *,
+ const struct osd_obj_id *, u64 offset, u64 len); V2 */
+
+void osd_req_flush_object(struct osd_request *or,
+ const struct osd_obj_id *obj, enum osd_options_flush_scope_values op,
+ /*V2*/ u64 offset, /*V2*/ u64 len)
+{
+ if (unlikely(osd_req_is_ver1(or) && (offset || len))) {
+ OSD_DEBUG("OSD Ver1 flush on specific range ignored\n");
+ offset = 0;
+ len = 0;
+ }
+
+ _osd_req_encode_common(or, OSD_ACT_FLUSH, obj, offset, len);
+ _osd_req_encode_flush(or, op);
+}
+EXPORT_SYMBOL(osd_req_flush_object);
+
+void osd_req_read(struct osd_request *or,
+ const struct osd_obj_id *obj, u64 offset,
+ struct bio *bio, u64 len)
+{
+ _osd_req_encode_common(or, OSD_ACT_READ, obj, offset, len);
+ WARN_ON(or->in.bio || or->in.total_bytes);
+ WARN_ON(bio->bi_rw & REQ_WRITE);
+ or->in.bio = bio;
+ or->in.total_bytes = len;
+}
+EXPORT_SYMBOL(osd_req_read);
+
+int osd_req_read_kern(struct osd_request *or,
+ const struct osd_obj_id *obj, u64 offset, void* buff, u64 len)
+{
+ struct request_queue *req_q = osd_request_queue(or->osd_dev);
+ struct bio *bio = bio_map_kern(req_q, buff, len, GFP_KERNEL);
+
+ if (IS_ERR(bio))
+ return PTR_ERR(bio);
+
+ osd_req_read(or, obj, offset, bio, len);
+ return 0;
+}
+EXPORT_SYMBOL(osd_req_read_kern);
+
+static int _add_sg_continuation_descriptor(struct osd_request *or,
+ const struct osd_sg_entry *sglist, unsigned numentries, u64 *len)
+{
+ struct osd_sg_continuation_descriptor *oscd;
+ u32 oscd_size;
+ unsigned i;
+ int ret;
+
+ oscd_size = sizeof(*oscd) + numentries * sizeof(oscd->entries[0]);
+
+ if (!or->cdb_cont.total_bytes) {
+ /* First time, jump over the header, we will write to:
+ * cdb_cont.buff + cdb_cont.total_bytes
+ */
+ or->cdb_cont.total_bytes =
+ sizeof(struct osd_continuation_segment_header);
+ }
+
+ ret = _alloc_cdb_cont(or, or->cdb_cont.total_bytes + oscd_size);
+ if (unlikely(ret))
+ return ret;
+
+ oscd = or->cdb_cont.buff + or->cdb_cont.total_bytes;
+ oscd->hdr.type = cpu_to_be16(SCATTER_GATHER_LIST);
+ oscd->hdr.pad_length = 0;
+ oscd->hdr.length = cpu_to_be32(oscd_size - sizeof(*oscd));
+
+ *len = 0;
+ /* copy the sg entries and convert to network byte order */
+ for (i = 0; i < numentries; i++) {
+ oscd->entries[i].offset = cpu_to_be64(sglist[i].offset);
+ oscd->entries[i].len = cpu_to_be64(sglist[i].len);
+ *len += sglist[i].len;
+ }
+
+ or->cdb_cont.total_bytes += oscd_size;
+ OSD_DEBUG("total_bytes=%d oscd_size=%d numentries=%d\n",
+ or->cdb_cont.total_bytes, oscd_size, numentries);
+ return 0;
+}
+
+static int _osd_req_finalize_cdb_cont(struct osd_request *or, const u8 *cap_key)
+{
+ struct request_queue *req_q = osd_request_queue(or->osd_dev);
+ struct bio *bio;
+ struct osd_cdb_head *cdbh = osd_cdb_head(&or->cdb);
+ struct osd_continuation_segment_header *cont_seg_hdr;
+
+ if (!or->cdb_cont.total_bytes)
+ return 0;
+
+ cont_seg_hdr = or->cdb_cont.buff;
+ cont_seg_hdr->format = CDB_CONTINUATION_FORMAT_V2;
+ cont_seg_hdr->service_action = cdbh->varlen_cdb.service_action;
+
+ /* create a bio for continuation segment */
+ bio = bio_map_kern(req_q, or->cdb_cont.buff, or->cdb_cont.total_bytes,
+ GFP_KERNEL);
+ if (IS_ERR(bio))
+ return PTR_ERR(bio);
+
+ bio->bi_rw |= REQ_WRITE;
+
+ /* integrity check the continuation before the bio is linked
+ * with the other data segments since the continuation
+ * integrity is separate from the other data segments.
+ */
+ osd_sec_sign_data(cont_seg_hdr->integrity_check, bio, cap_key);
+
+ cdbh->v2.cdb_continuation_length = cpu_to_be32(or->cdb_cont.total_bytes);
+
+ /* we can't use _req_append_segment, because we need to link in the
+ * continuation bio to the head of the bio list - the
+ * continuation segment (if it exists) is always the first segment in
+ * the out data buffer.
+ */
+ bio->bi_next = or->out.bio;
+ or->out.bio = bio;
+ or->out.total_bytes += or->cdb_cont.total_bytes;
+
+ return 0;
+}
+
+/* osd_req_write_sg: Takes a @bio that points to the data out buffer and an
+ * @sglist that has the scatter gather entries. Scatter-gather enables a write
+ * of multiple none-contiguous areas of an object, in a single call. The extents
+ * may overlap and/or be in any order. The only constrain is that:
+ * total_bytes(sglist) >= total_bytes(bio)
+ */
+int osd_req_write_sg(struct osd_request *or,
+ const struct osd_obj_id *obj, struct bio *bio,
+ const struct osd_sg_entry *sglist, unsigned numentries)
+{
+ u64 len;
+ int ret = _add_sg_continuation_descriptor(or, sglist, numentries, &len);
+
+ if (ret)
+ return ret;
+ osd_req_write(or, obj, 0, bio, len);
+
+ return 0;
+}
+EXPORT_SYMBOL(osd_req_write_sg);
+
+/* osd_req_read_sg: Read multiple extents of an object into @bio
+ * See osd_req_write_sg
+ */
+int osd_req_read_sg(struct osd_request *or,
+ const struct osd_obj_id *obj, struct bio *bio,
+ const struct osd_sg_entry *sglist, unsigned numentries)
+{
+ u64 len;
+ u64 off;
+ int ret;
+
+ if (numentries > 1) {
+ off = 0;
+ ret = _add_sg_continuation_descriptor(or, sglist, numentries,
+ &len);
+ if (ret)
+ return ret;
+ } else {
+ /* Optimize the case of single segment, read_sg is a
+ * bidi operation.
+ */
+ len = sglist->len;
+ off = sglist->offset;
+ }
+ osd_req_read(or, obj, off, bio, len);
+
+ return 0;
+}
+EXPORT_SYMBOL(osd_req_read_sg);
+
+/* SG-list write/read Kern API
+ *
+ * osd_req_{write,read}_sg_kern takes an array of @buff pointers and an array
+ * of sg_entries. @numentries indicates how many pointers and sg_entries there
+ * are. By requiring an array of buff pointers. This allows a caller to do a
+ * single write/read and scatter into multiple buffers.
+ * NOTE: Each buffer + len should not cross a page boundary.
+ */
+static struct bio *_create_sg_bios(struct osd_request *or,
+ void **buff, const struct osd_sg_entry *sglist, unsigned numentries)
+{
+ struct request_queue *q = osd_request_queue(or->osd_dev);
+ struct bio *bio;
+ unsigned i;
+
+ bio = bio_kmalloc(GFP_KERNEL, numentries);
+ if (unlikely(!bio)) {
+ OSD_DEBUG("Failed to allocate BIO size=%u\n", numentries);
+ return ERR_PTR(-ENOMEM);
+ }
+
+ for (i = 0; i < numentries; i++) {
+ unsigned offset = offset_in_page(buff[i]);
+ struct page *page = virt_to_page(buff[i]);
+ unsigned len = sglist[i].len;
+ unsigned added_len;
+
+ BUG_ON(offset + len > PAGE_SIZE);
+ added_len = bio_add_pc_page(q, bio, page, len, offset);
+ if (unlikely(len != added_len)) {
+ OSD_DEBUG("bio_add_pc_page len(%d) != added_len(%d)\n",
+ len, added_len);
+ bio_put(bio);
+ return ERR_PTR(-ENOMEM);
+ }
+ }
+
+ return bio;
+}
+
+int osd_req_write_sg_kern(struct osd_request *or,
+ const struct osd_obj_id *obj, void **buff,
+ const struct osd_sg_entry *sglist, unsigned numentries)
+{
+ struct bio *bio = _create_sg_bios(or, buff, sglist, numentries);
+ if (IS_ERR(bio))
+ return PTR_ERR(bio);
+
+ bio->bi_rw |= REQ_WRITE;
+ osd_req_write_sg(or, obj, bio, sglist, numentries);
+
+ return 0;
+}
+EXPORT_SYMBOL(osd_req_write_sg_kern);
+
+int osd_req_read_sg_kern(struct osd_request *or,
+ const struct osd_obj_id *obj, void **buff,
+ const struct osd_sg_entry *sglist, unsigned numentries)
+{
+ struct bio *bio = _create_sg_bios(or, buff, sglist, numentries);
+ if (IS_ERR(bio))
+ return PTR_ERR(bio);
+
+ osd_req_read_sg(or, obj, bio, sglist, numentries);
+
+ return 0;
+}
+EXPORT_SYMBOL(osd_req_read_sg_kern);
+
+
+
+void osd_req_get_attributes(struct osd_request *or,
+ const struct osd_obj_id *obj)
+{
+ _osd_req_encode_common(or, OSD_ACT_GET_ATTRIBUTES, obj, 0, 0);
+}
+EXPORT_SYMBOL(osd_req_get_attributes);
+
+void osd_req_set_attributes(struct osd_request *or,
+ const struct osd_obj_id *obj)
+{
+ _osd_req_encode_common(or, OSD_ACT_SET_ATTRIBUTES, obj, 0, 0);
+}
+EXPORT_SYMBOL(osd_req_set_attributes);
+
+/*
+ * Attributes List-mode
+ */
+
+int osd_req_add_set_attr_list(struct osd_request *or,
+ const struct osd_attr *oa, unsigned nelem)
+{
+ unsigned total_bytes = or->set_attr.total_bytes;
+ void *attr_last;
+ int ret;
+
+ if (or->attributes_mode &&
+ or->attributes_mode != OSD_CDB_GET_SET_ATTR_LISTS) {
+ WARN_ON(1);
+ return -EINVAL;
+ }
+ or->attributes_mode = OSD_CDB_GET_SET_ATTR_LISTS;
+
+ if (!total_bytes) { /* first-time: allocate and put list header */
+ total_bytes = _osd_req_sizeof_alist_header(or);
+ ret = _alloc_set_attr_list(or, oa, nelem, total_bytes);
+ if (ret)
+ return ret;
+ _osd_req_set_alist_type(or, or->set_attr.buff,
+ OSD_ATTR_LIST_SET_RETRIEVE);
+ }
+ attr_last = or->set_attr.buff + total_bytes;
+
+ for (; nelem; --nelem) {
+ unsigned elem_size = _osd_req_alist_elem_size(or, oa->len);
+
+ total_bytes += elem_size;
+ if (unlikely(or->set_attr.alloc_size < total_bytes)) {
+ or->set_attr.total_bytes = total_bytes - elem_size;
+ ret = _alloc_set_attr_list(or, oa, nelem, total_bytes);
+ if (ret)
+ return ret;
+ attr_last =
+ or->set_attr.buff + or->set_attr.total_bytes;
+ }
+
+ _osd_req_alist_elem_encode(or, attr_last, oa);
+
+ attr_last += elem_size;
+ ++oa;
+ }
+
+ or->set_attr.total_bytes = total_bytes;
+ return 0;
+}
+EXPORT_SYMBOL(osd_req_add_set_attr_list);
+
+static int _req_append_segment(struct osd_request *or,
+ unsigned padding, struct _osd_req_data_segment *seg,
+ struct _osd_req_data_segment *last_seg, struct _osd_io_info *io)
+{
+ void *pad_buff;
+ int ret;
+
+ if (padding) {
+ /* check if we can just add it to last buffer */
+ if (last_seg &&
+ (padding <= last_seg->alloc_size - last_seg->total_bytes))
+ pad_buff = last_seg->buff + last_seg->total_bytes;
+ else
+ pad_buff = io->pad_buff;
+
+ ret = blk_rq_map_kern(io->req->q, io->req, pad_buff, padding,
+ or->alloc_flags);
+ if (ret)
+ return ret;
+ io->total_bytes += padding;
+ }
+
+ ret = blk_rq_map_kern(io->req->q, io->req, seg->buff, seg->total_bytes,
+ or->alloc_flags);
+ if (ret)
+ return ret;
+
+ io->total_bytes += seg->total_bytes;
+ OSD_DEBUG("padding=%d buff=%p total_bytes=%d\n", padding, seg->buff,
+ seg->total_bytes);
+ return 0;
+}
+
+static int _osd_req_finalize_set_attr_list(struct osd_request *or)
+{
+ struct osd_cdb_head *cdbh = osd_cdb_head(&or->cdb);
+ unsigned padding;
+ int ret;
+
+ if (!or->set_attr.total_bytes) {
+ cdbh->attrs_list.set_attr_offset = OSD_OFFSET_UNUSED;
+ return 0;
+ }
+
+ cdbh->attrs_list.set_attr_bytes = cpu_to_be32(or->set_attr.total_bytes);
+ cdbh->attrs_list.set_attr_offset =
+ osd_req_encode_offset(or, or->out.total_bytes, &padding);
+
+ ret = _req_append_segment(or, padding, &or->set_attr,
+ or->out.last_seg, &or->out);
+ if (ret)
+ return ret;
+
+ or->out.last_seg = &or->set_attr;
+ return 0;
+}
+
+int osd_req_add_get_attr_list(struct osd_request *or,
+ const struct osd_attr *oa, unsigned nelem)
+{
+ unsigned total_bytes = or->enc_get_attr.total_bytes;
+ void *attr_last;
+ int ret;
+
+ if (or->attributes_mode &&
+ or->attributes_mode != OSD_CDB_GET_SET_ATTR_LISTS) {
+ WARN_ON(1);
+ return -EINVAL;
+ }
+ or->attributes_mode = OSD_CDB_GET_SET_ATTR_LISTS;
+
+ /* first time calc data-in list header size */
+ if (!or->get_attr.total_bytes)
+ or->get_attr.total_bytes = _osd_req_sizeof_alist_header(or);
+
+ /* calc data-out info */
+ if (!total_bytes) { /* first-time: allocate and put list header */
+ unsigned max_bytes;
+
+ total_bytes = _osd_req_sizeof_alist_header(or);
+ max_bytes = total_bytes +
+ nelem * sizeof(struct osd_attributes_list_attrid);
+ ret = _alloc_get_attr_desc(or, max_bytes);
+ if (ret)
+ return ret;
+
+ _osd_req_set_alist_type(or, or->enc_get_attr.buff,
+ OSD_ATTR_LIST_GET);
+ }
+ attr_last = or->enc_get_attr.buff + total_bytes;
+
+ for (; nelem; --nelem) {
+ struct osd_attributes_list_attrid *attrid;
+ const unsigned cur_size = sizeof(*attrid);
+
+ total_bytes += cur_size;
+ if (unlikely(or->enc_get_attr.alloc_size < total_bytes)) {
+ or->enc_get_attr.total_bytes = total_bytes - cur_size;
+ ret = _alloc_get_attr_desc(or,
+ total_bytes + nelem * sizeof(*attrid));
+ if (ret)
+ return ret;
+ attr_last = or->enc_get_attr.buff +
+ or->enc_get_attr.total_bytes;
+ }
+
+ attrid = attr_last;
+ attrid->attr_page = cpu_to_be32(oa->attr_page);
+ attrid->attr_id = cpu_to_be32(oa->attr_id);
+
+ attr_last += cur_size;
+
+ /* calc data-in size */
+ or->get_attr.total_bytes +=
+ _osd_req_alist_elem_size(or, oa->len);
+ ++oa;
+ }
+
+ or->enc_get_attr.total_bytes = total_bytes;
+
+ OSD_DEBUG(
+ "get_attr.total_bytes=%u(%u) enc_get_attr.total_bytes=%u(%Zu)\n",
+ or->get_attr.total_bytes,
+ or->get_attr.total_bytes - _osd_req_sizeof_alist_header(or),
+ or->enc_get_attr.total_bytes,
+ (or->enc_get_attr.total_bytes - _osd_req_sizeof_alist_header(or))
+ / sizeof(struct osd_attributes_list_attrid));
+
+ return 0;
+}
+EXPORT_SYMBOL(osd_req_add_get_attr_list);
+
+static int _osd_req_finalize_get_attr_list(struct osd_request *or)
+{
+ struct osd_cdb_head *cdbh = osd_cdb_head(&or->cdb);
+ unsigned out_padding;
+ unsigned in_padding;
+ int ret;
+
+ if (!or->enc_get_attr.total_bytes) {
+ cdbh->attrs_list.get_attr_desc_offset = OSD_OFFSET_UNUSED;
+ cdbh->attrs_list.get_attr_offset = OSD_OFFSET_UNUSED;
+ return 0;
+ }
+
+ ret = _alloc_get_attr_list(or);
+ if (ret)
+ return ret;
+
+ /* The out-going buffer info update */
+ OSD_DEBUG("out-going\n");
+ cdbh->attrs_list.get_attr_desc_bytes =
+ cpu_to_be32(or->enc_get_attr.total_bytes);
+
+ cdbh->attrs_list.get_attr_desc_offset =
+ osd_req_encode_offset(or, or->out.total_bytes, &out_padding);
+
+ ret = _req_append_segment(or, out_padding, &or->enc_get_attr,
+ or->out.last_seg, &or->out);
+ if (ret)
+ return ret;
+ or->out.last_seg = &or->enc_get_attr;
+
+ /* The incoming buffer info update */
+ OSD_DEBUG("in-coming\n");
+ cdbh->attrs_list.get_attr_alloc_length =
+ cpu_to_be32(or->get_attr.total_bytes);
+
+ cdbh->attrs_list.get_attr_offset =
+ osd_req_encode_offset(or, or->in.total_bytes, &in_padding);
+
+ ret = _req_append_segment(or, in_padding, &or->get_attr, NULL,
+ &or->in);
+ if (ret)
+ return ret;
+ or->in.last_seg = &or->get_attr;
+
+ return 0;
+}
+
+int osd_req_decode_get_attr_list(struct osd_request *or,
+ struct osd_attr *oa, int *nelem, void **iterator)
+{
+ unsigned cur_bytes, returned_bytes;
+ int n;
+ const unsigned sizeof_attr_list = _osd_req_sizeof_alist_header(or);
+ void *cur_p;
+
+ if (!_osd_req_is_alist_type(or, or->get_attr.buff,
+ OSD_ATTR_LIST_SET_RETRIEVE)) {
+ oa->attr_page = 0;
+ oa->attr_id = 0;
+ oa->val_ptr = NULL;
+ oa->len = 0;
+ *iterator = NULL;
+ return 0;
+ }
+
+ if (*iterator) {
+ BUG_ON((*iterator < or->get_attr.buff) ||
+ (or->get_attr.buff + or->get_attr.alloc_size < *iterator));
+ cur_p = *iterator;
+ cur_bytes = (*iterator - or->get_attr.buff) - sizeof_attr_list;
+ returned_bytes = or->get_attr.total_bytes;
+ } else { /* first time decode the list header */
+ cur_bytes = sizeof_attr_list;
+ returned_bytes = _osd_req_alist_size(or, or->get_attr.buff) +
+ sizeof_attr_list;
+
+ cur_p = or->get_attr.buff + sizeof_attr_list;
+
+ if (returned_bytes > or->get_attr.alloc_size) {
+ OSD_DEBUG("target report: space was not big enough! "
+ "Allocate=%u Needed=%u\n",
+ or->get_attr.alloc_size,
+ returned_bytes + sizeof_attr_list);
+
+ returned_bytes =
+ or->get_attr.alloc_size - sizeof_attr_list;
+ }
+ or->get_attr.total_bytes = returned_bytes;
+ }
+
+ for (n = 0; (n < *nelem) && (cur_bytes < returned_bytes); ++n) {
+ int inc = _osd_req_alist_elem_decode(or, cur_p, oa,
+ returned_bytes - cur_bytes);
+
+ if (inc < 0) {
+ OSD_ERR("BAD FOOD from target. list not valid!"
+ "c=%d r=%d n=%d\n",
+ cur_bytes, returned_bytes, n);
+ oa->val_ptr = NULL;
+ cur_bytes = returned_bytes; /* break the caller loop */
+ break;
+ }
+
+ cur_bytes += inc;
+ cur_p += inc;
+ ++oa;
+ }
+
+ *iterator = (returned_bytes - cur_bytes) ? cur_p : NULL;
+ *nelem = n;
+ return returned_bytes - cur_bytes;
+}
+EXPORT_SYMBOL(osd_req_decode_get_attr_list);
+
+/*
+ * Attributes Page-mode
+ */
+
+int osd_req_add_get_attr_page(struct osd_request *or,
+ u32 page_id, void *attar_page, unsigned max_page_len,
+ const struct osd_attr *set_one_attr)
+{
+ struct osd_cdb_head *cdbh = osd_cdb_head(&or->cdb);
+
+ if (or->attributes_mode &&
+ or->attributes_mode != OSD_CDB_GET_ATTR_PAGE_SET_ONE) {
+ WARN_ON(1);
+ return -EINVAL;
+ }
+ or->attributes_mode = OSD_CDB_GET_ATTR_PAGE_SET_ONE;
+
+ or->get_attr.buff = attar_page;
+ or->get_attr.total_bytes = max_page_len;
+
+ cdbh->attrs_page.get_attr_page = cpu_to_be32(page_id);
+ cdbh->attrs_page.get_attr_alloc_length = cpu_to_be32(max_page_len);
+
+ if (!set_one_attr || !set_one_attr->attr_page)
+ return 0; /* The set is optional */
+
+ or->set_attr.buff = set_one_attr->val_ptr;
+ or->set_attr.total_bytes = set_one_attr->len;
+
+ cdbh->attrs_page.set_attr_page = cpu_to_be32(set_one_attr->attr_page);
+ cdbh->attrs_page.set_attr_id = cpu_to_be32(set_one_attr->attr_id);
+ cdbh->attrs_page.set_attr_length = cpu_to_be32(set_one_attr->len);
+ return 0;
+}
+EXPORT_SYMBOL(osd_req_add_get_attr_page);
+
+static int _osd_req_finalize_attr_page(struct osd_request *or)
+{
+ struct osd_cdb_head *cdbh = osd_cdb_head(&or->cdb);
+ unsigned in_padding, out_padding;
+ int ret;
+
+ /* returned page */
+ cdbh->attrs_page.get_attr_offset =
+ osd_req_encode_offset(or, or->in.total_bytes, &in_padding);
+
+ ret = _req_append_segment(or, in_padding, &or->get_attr, NULL,
+ &or->in);
+ if (ret)
+ return ret;
+
+ if (or->set_attr.total_bytes == 0)
+ return 0;
+
+ /* set one value */
+ cdbh->attrs_page.set_attr_offset =
+ osd_req_encode_offset(or, or->out.total_bytes, &out_padding);
+
+ ret = _req_append_segment(or, out_padding, &or->set_attr, NULL,
+ &or->out);
+ return ret;
+}
+
+static inline void osd_sec_parms_set_out_offset(bool is_v1,
+ struct osd_security_parameters *sec_parms, osd_cdb_offset offset)
+{
+ if (is_v1)
+ sec_parms->v1.data_out_integrity_check_offset = offset;
+ else
+ sec_parms->v2.data_out_integrity_check_offset = offset;
+}
+
+static inline void osd_sec_parms_set_in_offset(bool is_v1,
+ struct osd_security_parameters *sec_parms, osd_cdb_offset offset)
+{
+ if (is_v1)
+ sec_parms->v1.data_in_integrity_check_offset = offset;
+ else
+ sec_parms->v2.data_in_integrity_check_offset = offset;
+}
+
+static int _osd_req_finalize_data_integrity(struct osd_request *or,
+ bool has_in, bool has_out, struct bio *out_data_bio, u64 out_data_bytes,
+ const u8 *cap_key)
+{
+ struct osd_security_parameters *sec_parms = _osd_req_sec_params(or);
+ int ret;
+
+ if (!osd_is_sec_alldata(sec_parms))
+ return 0;
+
+ if (has_out) {
+ struct _osd_req_data_segment seg = {
+ .buff = &or->out_data_integ,
+ .total_bytes = sizeof(or->out_data_integ),
+ };
+ unsigned pad;
+
+ or->out_data_integ.data_bytes = cpu_to_be64(out_data_bytes);
+ or->out_data_integ.set_attributes_bytes = cpu_to_be64(
+ or->set_attr.total_bytes);
+ or->out_data_integ.get_attributes_bytes = cpu_to_be64(
+ or->enc_get_attr.total_bytes);
+
+ osd_sec_parms_set_out_offset(osd_req_is_ver1(or), sec_parms,
+ osd_req_encode_offset(or, or->out.total_bytes, &pad));
+
+ ret = _req_append_segment(or, pad, &seg, or->out.last_seg,
+ &or->out);
+ if (ret)
+ return ret;
+ or->out.last_seg = NULL;
+
+ /* they are now all chained to request sign them all together */
+ osd_sec_sign_data(&or->out_data_integ, out_data_bio,
+ cap_key);
+ }
+
+ if (has_in) {
+ struct _osd_req_data_segment seg = {
+ .buff = &or->in_data_integ,
+ .total_bytes = sizeof(or->in_data_integ),
+ };
+ unsigned pad;
+
+ osd_sec_parms_set_in_offset(osd_req_is_ver1(or), sec_parms,
+ osd_req_encode_offset(or, or->in.total_bytes, &pad));
+
+ ret = _req_append_segment(or, pad, &seg, or->in.last_seg,
+ &or->in);
+ if (ret)
+ return ret;
+
+ or->in.last_seg = NULL;
+ }
+
+ return 0;
+}
+
+/*
+ * osd_finalize_request and helpers
+ */
+static struct request *_make_request(struct request_queue *q, bool has_write,
+ struct _osd_io_info *oii, gfp_t flags)
+{
+ if (oii->bio)
+ return blk_make_request(q, oii->bio, flags);
+ else {
+ struct request *req;
+
+ req = blk_get_request(q, has_write ? WRITE : READ, flags);
+ if (IS_ERR(req))
+ return req;
+
+ blk_rq_set_block_pc(req);
+ return req;
+ }
+}
+
+static int _init_blk_request(struct osd_request *or,
+ bool has_in, bool has_out)
+{
+ gfp_t flags = or->alloc_flags;
+ struct scsi_device *scsi_device = or->osd_dev->scsi_device;
+ struct request_queue *q = scsi_device->request_queue;
+ struct request *req;
+ int ret;
+
+ req = _make_request(q, has_out, has_out ? &or->out : &or->in, flags);
+ if (IS_ERR(req)) {
+ ret = PTR_ERR(req);
+ goto out;
+ }
+
+ or->request = req;
+ req->cmd_flags |= REQ_QUIET;
+
+ req->timeout = or->timeout;
+ req->retries = or->retries;
+ req->sense = or->sense;
+ req->sense_len = 0;
+
+ if (has_out) {
+ or->out.req = req;
+ if (has_in) {
+ /* allocate bidi request */
+ req = _make_request(q, false, &or->in, flags);
+ if (IS_ERR(req)) {
+ OSD_DEBUG("blk_get_request for bidi failed\n");
+ ret = PTR_ERR(req);
+ goto out;
+ }
+ blk_rq_set_block_pc(req);
+ or->in.req = or->request->next_rq = req;
+ }
+ } else if (has_in)
+ or->in.req = req;
+
+ ret = 0;
+out:
+ OSD_DEBUG("or=%p has_in=%d has_out=%d => %d, %p\n",
+ or, has_in, has_out, ret, or->request);
+ return ret;
+}
+
+int osd_finalize_request(struct osd_request *or,
+ u8 options, const void *cap, const u8 *cap_key)
+{
+ struct osd_cdb_head *cdbh = osd_cdb_head(&or->cdb);
+ bool has_in, has_out;
+ /* Save for data_integrity without the cdb_continuation */
+ struct bio *out_data_bio = or->out.bio;
+ u64 out_data_bytes = or->out.total_bytes;
+ int ret;
+
+ if (options & OSD_REQ_FUA)
+ cdbh->options |= OSD_CDB_FUA;
+
+ if (options & OSD_REQ_DPO)
+ cdbh->options |= OSD_CDB_DPO;
+
+ if (options & OSD_REQ_BYPASS_TIMESTAMPS)
+ cdbh->timestamp_control = OSD_CDB_BYPASS_TIMESTAMPS;
+
+ osd_set_caps(&or->cdb, cap);
+
+ has_in = or->in.bio || or->get_attr.total_bytes;
+ has_out = or->out.bio || or->cdb_cont.total_bytes ||
+ or->set_attr.total_bytes || or->enc_get_attr.total_bytes;
+
+ ret = _osd_req_finalize_cdb_cont(or, cap_key);
+ if (ret) {
+ OSD_DEBUG("_osd_req_finalize_cdb_cont failed\n");
+ return ret;
+ }
+ ret = _init_blk_request(or, has_in, has_out);
+ if (ret) {
+ OSD_DEBUG("_init_blk_request failed\n");
+ return ret;
+ }
+
+ or->out.pad_buff = sg_out_pad_buffer;
+ or->in.pad_buff = sg_in_pad_buffer;
+
+ if (!or->attributes_mode)
+ or->attributes_mode = OSD_CDB_GET_SET_ATTR_LISTS;
+ cdbh->command_specific_options |= or->attributes_mode;
+ if (or->attributes_mode == OSD_CDB_GET_ATTR_PAGE_SET_ONE) {
+ ret = _osd_req_finalize_attr_page(or);
+ if (ret) {
+ OSD_DEBUG("_osd_req_finalize_attr_page failed\n");
+ return ret;
+ }
+ } else {
+ /* TODO: I think that for the GET_ATTR command these 2 should
+ * be reversed to keep them in execution order (for embeded
+ * targets with low memory footprint)
+ */
+ ret = _osd_req_finalize_set_attr_list(or);
+ if (ret) {
+ OSD_DEBUG("_osd_req_finalize_set_attr_list failed\n");
+ return ret;
+ }
+
+ ret = _osd_req_finalize_get_attr_list(or);
+ if (ret) {
+ OSD_DEBUG("_osd_req_finalize_get_attr_list failed\n");
+ return ret;
+ }
+ }
+
+ ret = _osd_req_finalize_data_integrity(or, has_in, has_out,
+ out_data_bio, out_data_bytes,
+ cap_key);
+ if (ret)
+ return ret;
+
+ osd_sec_sign_cdb(&or->cdb, cap_key);
+
+ or->request->cmd = or->cdb.buff;
+ or->request->cmd_len = _osd_req_cdb_len(or);
+
+ return 0;
+}
+EXPORT_SYMBOL(osd_finalize_request);
+
+static bool _is_osd_security_code(int code)
+{
+ return (code == osd_security_audit_value_frozen) ||
+ (code == osd_security_working_key_frozen) ||
+ (code == osd_nonce_not_unique) ||
+ (code == osd_nonce_timestamp_out_of_range) ||
+ (code == osd_invalid_dataout_buffer_integrity_check_value);
+}
+
+#define OSD_SENSE_PRINT1(fmt, a...) \
+ do { \
+ if (__cur_sense_need_output) \
+ OSD_ERR(fmt, ##a); \
+ } while (0)
+
+#define OSD_SENSE_PRINT2(fmt, a...) OSD_SENSE_PRINT1(" " fmt, ##a)
+
+int osd_req_decode_sense_full(struct osd_request *or,
+ struct osd_sense_info *osi, bool silent,
+ struct osd_obj_id *bad_obj_list __unused, int max_obj __unused,
+ struct osd_attr *bad_attr_list, int max_attr)
+{
+ int sense_len, original_sense_len;
+ struct osd_sense_info local_osi;
+ struct scsi_sense_descriptor_based *ssdb;
+ void *cur_descriptor;
+#if (CONFIG_SCSI_OSD_DPRINT_SENSE == 0)
+ const bool __cur_sense_need_output = false;
+#else
+ bool __cur_sense_need_output = !silent;
+#endif
+ int ret;
+
+ if (likely(!or->req_errors))
+ return 0;
+
+ osi = osi ? : &local_osi;
+ memset(osi, 0, sizeof(*osi));
+
+ ssdb = (typeof(ssdb))or->sense;
+ sense_len = or->sense_len;
+ if ((sense_len < (int)sizeof(*ssdb) || !ssdb->sense_key)) {
+ OSD_ERR("Block-layer returned error(0x%x) but "
+ "sense_len(%u) || key(%d) is empty\n",
+ or->req_errors, sense_len, ssdb->sense_key);
+ goto analyze;
+ }
+
+ if ((ssdb->response_code != 0x72) && (ssdb->response_code != 0x73)) {
+ OSD_ERR("Unrecognized scsi sense: rcode=%x length=%d\n",
+ ssdb->response_code, sense_len);
+ goto analyze;
+ }
+
+ osi->key = ssdb->sense_key;
+ osi->additional_code = be16_to_cpu(ssdb->additional_sense_code);
+ original_sense_len = ssdb->additional_sense_length + 8;
+
+#if (CONFIG_SCSI_OSD_DPRINT_SENSE == 1)
+ if (__cur_sense_need_output)
+ __cur_sense_need_output = (osi->key > scsi_sk_recovered_error);
+#endif
+ OSD_SENSE_PRINT1("Main Sense information key=0x%x length(%d, %d) "
+ "additional_code=0x%x async_error=%d errors=0x%x\n",
+ osi->key, original_sense_len, sense_len,
+ osi->additional_code, or->async_error,
+ or->req_errors);
+
+ if (original_sense_len < sense_len)
+ sense_len = original_sense_len;
+
+ cur_descriptor = ssdb->ssd;
+ sense_len -= sizeof(*ssdb);
+ while (sense_len > 0) {
+ struct scsi_sense_descriptor *ssd = cur_descriptor;
+ int cur_len = ssd->additional_length + 2;
+
+ sense_len -= cur_len;
+
+ if (sense_len < 0)
+ break; /* sense was truncated */
+
+ switch (ssd->descriptor_type) {
+ case scsi_sense_information:
+ case scsi_sense_command_specific_information:
+ {
+ struct scsi_sense_command_specific_data_descriptor
+ *sscd = cur_descriptor;
+
+ osi->command_info =
+ get_unaligned_be64(&sscd->information) ;
+ OSD_SENSE_PRINT2(
+ "command_specific_information 0x%llx \n",
+ _LLU(osi->command_info));
+ break;
+ }
+ case scsi_sense_key_specific:
+ {
+ struct scsi_sense_key_specific_data_descriptor
+ *ssks = cur_descriptor;
+
+ osi->sense_info = get_unaligned_be16(&ssks->value);
+ OSD_SENSE_PRINT2(
+ "sense_key_specific_information %u"
+ "sksv_cd_bpv_bp (0x%x)\n",
+ osi->sense_info, ssks->sksv_cd_bpv_bp);
+ break;
+ }
+ case osd_sense_object_identification:
+ { /*FIXME: Keep first not last, Store in array*/
+ struct osd_sense_identification_data_descriptor
+ *osidd = cur_descriptor;
+
+ osi->not_initiated_command_functions =
+ le32_to_cpu(osidd->not_initiated_functions);
+ osi->completed_command_functions =
+ le32_to_cpu(osidd->completed_functions);
+ osi->obj.partition = be64_to_cpu(osidd->partition_id);
+ osi->obj.id = be64_to_cpu(osidd->object_id);
+ OSD_SENSE_PRINT2(
+ "object_identification pid=0x%llx oid=0x%llx\n",
+ _LLU(osi->obj.partition), _LLU(osi->obj.id));
+ OSD_SENSE_PRINT2(
+ "not_initiated_bits(%x) "
+ "completed_command_bits(%x)\n",
+ osi->not_initiated_command_functions,
+ osi->completed_command_functions);
+ break;
+ }
+ case osd_sense_response_integrity_check:
+ {
+ struct osd_sense_response_integrity_check_descriptor
+ *osricd = cur_descriptor;
+ const unsigned len =
+ sizeof(osricd->integrity_check_value);
+ char key_dump[len*4 + 2]; /* 2nibbles+space+ASCII */
+
+ hex_dump_to_buffer(osricd->integrity_check_value, len,
+ 32, 1, key_dump, sizeof(key_dump), true);
+ OSD_SENSE_PRINT2("response_integrity [%s]\n", key_dump);
+ }
+ case osd_sense_attribute_identification:
+ {
+ struct osd_sense_attributes_data_descriptor
+ *osadd = cur_descriptor;
+ unsigned len = min(cur_len, sense_len);
+ struct osd_sense_attr *pattr = osadd->sense_attrs;
+
+ while (len >= sizeof(*pattr)) {
+ u32 attr_page = be32_to_cpu(pattr->attr_page);
+ u32 attr_id = be32_to_cpu(pattr->attr_id);
+
+ if (!osi->attr.attr_page) {
+ osi->attr.attr_page = attr_page;
+ osi->attr.attr_id = attr_id;
+ }
+
+ if (bad_attr_list && max_attr) {
+ bad_attr_list->attr_page = attr_page;
+ bad_attr_list->attr_id = attr_id;
+ bad_attr_list++;
+ max_attr--;
+ }
+
+ len -= sizeof(*pattr);
+ OSD_SENSE_PRINT2(
+ "osd_sense_attribute_identification"
+ "attr_page=0x%x attr_id=0x%x\n",
+ attr_page, attr_id);
+ }
+ }
+ /*These are not legal for OSD*/
+ case scsi_sense_field_replaceable_unit:
+ OSD_SENSE_PRINT2("scsi_sense_field_replaceable_unit\n");
+ break;
+ case scsi_sense_stream_commands:
+ OSD_SENSE_PRINT2("scsi_sense_stream_commands\n");
+ break;
+ case scsi_sense_block_commands:
+ OSD_SENSE_PRINT2("scsi_sense_block_commands\n");
+ break;
+ case scsi_sense_ata_return:
+ OSD_SENSE_PRINT2("scsi_sense_ata_return\n");
+ break;
+ default:
+ if (ssd->descriptor_type <= scsi_sense_Reserved_last)
+ OSD_SENSE_PRINT2(
+ "scsi_sense Reserved descriptor (0x%x)",
+ ssd->descriptor_type);
+ else
+ OSD_SENSE_PRINT2(
+ "scsi_sense Vendor descriptor (0x%x)",
+ ssd->descriptor_type);
+ }
+
+ cur_descriptor += cur_len;
+ }
+
+analyze:
+ if (!osi->key) {
+ /* scsi sense is Empty, the request was never issued to target
+ * linux return code might tell us what happened.
+ */
+ if (or->async_error == -ENOMEM)
+ osi->osd_err_pri = OSD_ERR_PRI_RESOURCE;
+ else
+ osi->osd_err_pri = OSD_ERR_PRI_UNREACHABLE;
+ ret = or->async_error;
+ } else if (osi->key <= scsi_sk_recovered_error) {
+ osi->osd_err_pri = 0;
+ ret = 0;
+ } else if (osi->additional_code == scsi_invalid_field_in_cdb) {
+ if (osi->cdb_field_offset == OSD_CFO_STARTING_BYTE) {
+ osi->osd_err_pri = OSD_ERR_PRI_CLEAR_PAGES;
+ ret = -EFAULT; /* caller should recover from this */
+ } else if (osi->cdb_field_offset == OSD_CFO_OBJECT_ID) {
+ osi->osd_err_pri = OSD_ERR_PRI_NOT_FOUND;
+ ret = -ENOENT;
+ } else if (osi->cdb_field_offset == OSD_CFO_PERMISSIONS) {
+ osi->osd_err_pri = OSD_ERR_PRI_NO_ACCESS;
+ ret = -EACCES;
+ } else {
+ osi->osd_err_pri = OSD_ERR_PRI_BAD_CRED;
+ ret = -EINVAL;
+ }
+ } else if (osi->additional_code == osd_quota_error) {
+ osi->osd_err_pri = OSD_ERR_PRI_NO_SPACE;
+ ret = -ENOSPC;
+ } else if (_is_osd_security_code(osi->additional_code)) {
+ osi->osd_err_pri = OSD_ERR_PRI_BAD_CRED;
+ ret = -EINVAL;
+ } else {
+ osi->osd_err_pri = OSD_ERR_PRI_EIO;
+ ret = -EIO;
+ }
+
+ if (!or->out.residual)
+ or->out.residual = or->out.total_bytes;
+ if (!or->in.residual)
+ or->in.residual = or->in.total_bytes;
+
+ return ret;
+}
+EXPORT_SYMBOL(osd_req_decode_sense_full);
+
+/*
+ * Implementation of osd_sec.h API
+ * TODO: Move to a separate osd_sec.c file at a later stage.
+ */
+
+enum { OSD_SEC_CAP_V1_ALL_CAPS =
+ OSD_SEC_CAP_APPEND | OSD_SEC_CAP_OBJ_MGMT | OSD_SEC_CAP_REMOVE |
+ OSD_SEC_CAP_CREATE | OSD_SEC_CAP_SET_ATTR | OSD_SEC_CAP_GET_ATTR |
+ OSD_SEC_CAP_WRITE | OSD_SEC_CAP_READ | OSD_SEC_CAP_POL_SEC |
+ OSD_SEC_CAP_GLOBAL | OSD_SEC_CAP_DEV_MGMT
+};
+
+enum { OSD_SEC_CAP_V2_ALL_CAPS =
+ OSD_SEC_CAP_V1_ALL_CAPS | OSD_SEC_CAP_QUERY | OSD_SEC_CAP_M_OBJECT
+};
+
+void osd_sec_init_nosec_doall_caps(void *caps,
+ const struct osd_obj_id *obj, bool is_collection, const bool is_v1)
+{
+ struct osd_capability *cap = caps;
+ u8 type;
+ u8 descriptor_type;
+
+ if (likely(obj->id)) {
+ if (unlikely(is_collection)) {
+ type = OSD_SEC_OBJ_COLLECTION;
+ descriptor_type = is_v1 ? OSD_SEC_OBJ_DESC_OBJ :
+ OSD_SEC_OBJ_DESC_COL;
+ } else {
+ type = OSD_SEC_OBJ_USER;
+ descriptor_type = OSD_SEC_OBJ_DESC_OBJ;
+ }
+ WARN_ON(!obj->partition);
+ } else {
+ type = obj->partition ? OSD_SEC_OBJ_PARTITION :
+ OSD_SEC_OBJ_ROOT;
+ descriptor_type = OSD_SEC_OBJ_DESC_PAR;
+ }
+
+ memset(cap, 0, sizeof(*cap));
+
+ cap->h.format = OSD_SEC_CAP_FORMAT_VER1;
+ cap->h.integrity_algorithm__key_version = 0; /* MAKE_BYTE(0, 0); */
+ cap->h.security_method = OSD_SEC_NOSEC;
+/* cap->expiration_time;
+ cap->AUDIT[30-10];
+ cap->discriminator[42-30];
+ cap->object_created_time; */
+ cap->h.object_type = type;
+ osd_sec_set_caps(&cap->h, OSD_SEC_CAP_V1_ALL_CAPS);
+ cap->h.object_descriptor_type = descriptor_type;
+ cap->od.obj_desc.policy_access_tag = 0;
+ cap->od.obj_desc.allowed_partition_id = cpu_to_be64(obj->partition);
+ cap->od.obj_desc.allowed_object_id = cpu_to_be64(obj->id);
+}
+EXPORT_SYMBOL(osd_sec_init_nosec_doall_caps);
+
+/* FIXME: Extract version from caps pointer.
+ * Also Pete's target only supports caps from OSDv1 for now
+ */
+void osd_set_caps(struct osd_cdb *cdb, const void *caps)
+{
+ bool is_ver1 = true;
+ /* NOTE: They start at same address */
+ memcpy(&cdb->v1.caps, caps, is_ver1 ? OSDv1_CAP_LEN : OSD_CAP_LEN);
+}
+
+bool osd_is_sec_alldata(struct osd_security_parameters *sec_parms __unused)
+{
+ return false;
+}
+
+void osd_sec_sign_cdb(struct osd_cdb *ocdb __unused, const u8 *cap_key __unused)
+{
+}
+
+void osd_sec_sign_data(void *data_integ __unused,
+ struct bio *bio __unused, const u8 *cap_key __unused)
+{
+}
+
+/*
+ * Declared in osd_protocol.h
+ * 4.12.5 Data-In and Data-Out buffer offsets
+ * byte offset = mantissa * (2^(exponent+8))
+ * Returns the smallest allowed encoded offset that contains given @offset
+ * The actual encoded offset returned is @offset + *@padding.
+ */
+osd_cdb_offset __osd_encode_offset(
+ u64 offset, unsigned *padding, int min_shift, int max_shift)
+{
+ u64 try_offset = -1, mod, align;
+ osd_cdb_offset be32_offset;
+ int shift;
+
+ *padding = 0;
+ if (!offset)
+ return 0;
+
+ for (shift = min_shift; shift < max_shift; ++shift) {
+ try_offset = offset >> shift;
+ if (try_offset < (1 << OSD_OFFSET_MAX_BITS))
+ break;
+ }
+
+ BUG_ON(shift == max_shift);
+
+ align = 1 << shift;
+ mod = offset & (align - 1);
+ if (mod) {
+ *padding = align - mod;
+ try_offset += 1;
+ }
+
+ try_offset |= ((shift - 8) & 0xf) << 28;
+ be32_offset = cpu_to_be32((u32)try_offset);
+
+ OSD_DEBUG("offset=%llu mantissa=%llu exp=%d encoded=%x pad=%d\n",
+ _LLU(offset), _LLU(try_offset & 0x0FFFFFFF), shift,
+ be32_offset, *padding);
+ return be32_offset;
+}
diff --git a/drivers/scsi/osd/osd_uld.c b/drivers/scsi/osd/osd_uld.c
new file mode 100644
index 000000000..243eab3d1
--- /dev/null
+++ b/drivers/scsi/osd/osd_uld.c
@@ -0,0 +1,594 @@
+/*
+ * osd_uld.c - OSD Upper Layer Driver
+ *
+ * A Linux driver module that registers as a SCSI ULD and probes
+ * for OSD type SCSI devices.
+ * It's main function is to export osd devices to in-kernel users like
+ * osdfs and pNFS-objects-LD. It also provides one ioctl for running
+ * in Kernel tests.
+ *
+ * Copyright (C) 2008 Panasas Inc. All rights reserved.
+ *
+ * Authors:
+ * Boaz Harrosh <ooo@electrozaur.com>
+ * Benny Halevy <bhalevy@panasas.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. Neither the name of the Panasas company nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED
+ * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
+ * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
+ * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
+ * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <linux/namei.h>
+#include <linux/cdev.h>
+#include <linux/fs.h>
+#include <linux/module.h>
+#include <linux/device.h>
+#include <linux/idr.h>
+#include <linux/major.h>
+#include <linux/file.h>
+#include <linux/slab.h>
+
+#include <scsi/scsi.h>
+#include <scsi/scsi_driver.h>
+#include <scsi/scsi_device.h>
+#include <scsi/scsi_ioctl.h>
+
+#include <scsi/osd_initiator.h>
+#include <scsi/osd_sec.h>
+
+#include "osd_debug.h"
+
+#ifndef TYPE_OSD
+# define TYPE_OSD 0x11
+#endif
+
+#ifndef SCSI_OSD_MAJOR
+# define SCSI_OSD_MAJOR 260
+#endif
+#define SCSI_OSD_MAX_MINOR MINORMASK
+
+static const char osd_name[] = "osd";
+static const char *osd_version_string = "open-osd 0.2.1";
+
+MODULE_AUTHOR("Boaz Harrosh <ooo@electrozaur.com>");
+MODULE_DESCRIPTION("open-osd Upper-Layer-Driver osd.ko");
+MODULE_LICENSE("GPL");
+MODULE_ALIAS_CHARDEV_MAJOR(SCSI_OSD_MAJOR);
+MODULE_ALIAS_SCSI_DEVICE(TYPE_OSD);
+
+struct osd_uld_device {
+ int minor;
+ struct device class_dev;
+ struct cdev cdev;
+ struct osd_dev od;
+ struct osd_dev_info odi;
+ struct gendisk *disk;
+};
+
+struct osd_dev_handle {
+ struct osd_dev od;
+ struct file *file;
+ struct osd_uld_device *oud;
+} ;
+
+static DEFINE_IDA(osd_minor_ida);
+
+/*
+ * scsi sysfs attribute operations
+ */
+static ssize_t osdname_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct osd_uld_device *ould = container_of(dev, struct osd_uld_device,
+ class_dev);
+ return sprintf(buf, "%s\n", ould->odi.osdname);
+}
+static DEVICE_ATTR_RO(osdname);
+
+static ssize_t systemid_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct osd_uld_device *ould = container_of(dev, struct osd_uld_device,
+ class_dev);
+
+ memcpy(buf, ould->odi.systemid, ould->odi.systemid_len);
+ return ould->odi.systemid_len;
+}
+static DEVICE_ATTR_RO(systemid);
+
+static struct attribute *osd_uld_attrs[] = {
+ &dev_attr_osdname.attr,
+ &dev_attr_systemid.attr,
+ NULL,
+};
+ATTRIBUTE_GROUPS(osd_uld);
+
+static struct class osd_uld_class = {
+ .owner = THIS_MODULE,
+ .name = "scsi_osd",
+ .dev_groups = osd_uld_groups,
+};
+
+/*
+ * Char Device operations
+ */
+
+static int osd_uld_open(struct inode *inode, struct file *file)
+{
+ struct osd_uld_device *oud = container_of(inode->i_cdev,
+ struct osd_uld_device, cdev);
+
+ get_device(&oud->class_dev);
+ /* cache osd_uld_device on file handle */
+ file->private_data = oud;
+ OSD_DEBUG("osd_uld_open %p\n", oud);
+ return 0;
+}
+
+static int osd_uld_release(struct inode *inode, struct file *file)
+{
+ struct osd_uld_device *oud = file->private_data;
+
+ OSD_DEBUG("osd_uld_release %p\n", file->private_data);
+ file->private_data = NULL;
+ put_device(&oud->class_dev);
+ return 0;
+}
+
+/* FIXME: Only one vector for now */
+unsigned g_test_ioctl;
+do_test_fn *g_do_test;
+
+int osduld_register_test(unsigned ioctl, do_test_fn *do_test)
+{
+ if (g_test_ioctl)
+ return -EINVAL;
+
+ g_test_ioctl = ioctl;
+ g_do_test = do_test;
+ return 0;
+}
+EXPORT_SYMBOL(osduld_register_test);
+
+void osduld_unregister_test(unsigned ioctl)
+{
+ if (ioctl == g_test_ioctl) {
+ g_test_ioctl = 0;
+ g_do_test = NULL;
+ }
+}
+EXPORT_SYMBOL(osduld_unregister_test);
+
+static do_test_fn *_find_ioctl(unsigned cmd)
+{
+ if (g_test_ioctl == cmd)
+ return g_do_test;
+ else
+ return NULL;
+}
+
+static long osd_uld_ioctl(struct file *file, unsigned int cmd,
+ unsigned long arg)
+{
+ struct osd_uld_device *oud = file->private_data;
+ int ret;
+ do_test_fn *do_test;
+
+ do_test = _find_ioctl(cmd);
+ if (do_test)
+ ret = do_test(&oud->od, cmd, arg);
+ else {
+ OSD_ERR("Unknown ioctl %d: osd_uld_device=%p\n", cmd, oud);
+ ret = -ENOIOCTLCMD;
+ }
+ return ret;
+}
+
+static const struct file_operations osd_fops = {
+ .owner = THIS_MODULE,
+ .open = osd_uld_open,
+ .release = osd_uld_release,
+ .unlocked_ioctl = osd_uld_ioctl,
+ .llseek = noop_llseek,
+};
+
+struct osd_dev *osduld_path_lookup(const char *name)
+{
+ struct osd_uld_device *oud;
+ struct osd_dev_handle *odh;
+ struct file *file;
+ int error;
+
+ if (!name || !*name) {
+ OSD_ERR("Mount with !path || !*path\n");
+ return ERR_PTR(-EINVAL);
+ }
+
+ odh = kzalloc(sizeof(*odh), GFP_KERNEL);
+ if (unlikely(!odh))
+ return ERR_PTR(-ENOMEM);
+
+ file = filp_open(name, O_RDWR, 0);
+ if (IS_ERR(file)) {
+ error = PTR_ERR(file);
+ goto free_od;
+ }
+
+ if (file->f_op != &osd_fops){
+ error = -EINVAL;
+ goto close_file;
+ }
+
+ oud = file->private_data;
+
+ odh->od = oud->od;
+ odh->file = file;
+ odh->oud = oud;
+
+ return &odh->od;
+
+close_file:
+ fput(file);
+free_od:
+ kfree(odh);
+ return ERR_PTR(error);
+}
+EXPORT_SYMBOL(osduld_path_lookup);
+
+static inline bool _the_same_or_null(const u8 *a1, unsigned a1_len,
+ const u8 *a2, unsigned a2_len)
+{
+ if (!a2_len) /* User string is Empty means don't care */
+ return true;
+
+ if (a1_len != a2_len)
+ return false;
+
+ return 0 == memcmp(a1, a2, a1_len);
+}
+
+static int _match_odi(struct device *dev, const void *find_data)
+{
+ struct osd_uld_device *oud = container_of(dev, struct osd_uld_device,
+ class_dev);
+ const struct osd_dev_info *odi = find_data;
+
+ if (_the_same_or_null(oud->odi.systemid, oud->odi.systemid_len,
+ odi->systemid, odi->systemid_len) &&
+ _the_same_or_null(oud->odi.osdname, oud->odi.osdname_len,
+ odi->osdname, odi->osdname_len)) {
+ OSD_DEBUG("found device sysid_len=%d osdname=%d\n",
+ odi->systemid_len, odi->osdname_len);
+ return 1;
+ } else {
+ return 0;
+ }
+}
+
+/* osduld_info_lookup - Loop through all devices, return the requested osd_dev.
+ *
+ * if @odi->systemid_len and/or @odi->osdname_len are zero, they act as a don't
+ * care. .e.g if they're both zero /dev/osd0 is returned.
+ */
+struct osd_dev *osduld_info_lookup(const struct osd_dev_info *odi)
+{
+ struct device *dev = class_find_device(&osd_uld_class, NULL, odi, _match_odi);
+ if (likely(dev)) {
+ struct osd_dev_handle *odh = kzalloc(sizeof(*odh), GFP_KERNEL);
+ struct osd_uld_device *oud = container_of(dev,
+ struct osd_uld_device, class_dev);
+
+ if (unlikely(!odh)) {
+ put_device(dev);
+ return ERR_PTR(-ENOMEM);
+ }
+
+ odh->od = oud->od;
+ odh->oud = oud;
+
+ return &odh->od;
+ }
+
+ return ERR_PTR(-ENODEV);
+}
+EXPORT_SYMBOL(osduld_info_lookup);
+
+void osduld_put_device(struct osd_dev *od)
+{
+ if (od && !IS_ERR(od)) {
+ struct osd_dev_handle *odh =
+ container_of(od, struct osd_dev_handle, od);
+ struct osd_uld_device *oud = odh->oud;
+
+ BUG_ON(od->scsi_device != oud->od.scsi_device);
+
+ /* If scsi has released the device (logout), and exofs has last
+ * reference on oud it will be freed by above osd_uld_release
+ * within fput below. But this will oops in cdev_release which
+ * is called after the fops->release. A get_/put_ pair makes
+ * sure we have a cdev for the duration of fput
+ */
+ if (odh->file) {
+ get_device(&oud->class_dev);
+ fput(odh->file);
+ }
+ put_device(&oud->class_dev);
+ kfree(odh);
+ }
+}
+EXPORT_SYMBOL(osduld_put_device);
+
+const struct osd_dev_info *osduld_device_info(struct osd_dev *od)
+{
+ struct osd_dev_handle *odh =
+ container_of(od, struct osd_dev_handle, od);
+ return &odh->oud->odi;
+}
+EXPORT_SYMBOL(osduld_device_info);
+
+bool osduld_device_same(struct osd_dev *od, const struct osd_dev_info *odi)
+{
+ struct osd_dev_handle *odh =
+ container_of(od, struct osd_dev_handle, od);
+ struct osd_uld_device *oud = odh->oud;
+
+ return (oud->odi.systemid_len == odi->systemid_len) &&
+ _the_same_or_null(oud->odi.systemid, oud->odi.systemid_len,
+ odi->systemid, odi->systemid_len) &&
+ (oud->odi.osdname_len == odi->osdname_len) &&
+ _the_same_or_null(oud->odi.osdname, oud->odi.osdname_len,
+ odi->osdname, odi->osdname_len);
+}
+EXPORT_SYMBOL(osduld_device_same);
+
+/*
+ * Scsi Device operations
+ */
+
+static int __detect_osd(struct osd_uld_device *oud)
+{
+ struct scsi_device *scsi_device = oud->od.scsi_device;
+ char caps[OSD_CAP_LEN];
+ int error;
+
+ /* sending a test_unit_ready as first command seems to be needed
+ * by some targets
+ */
+ OSD_DEBUG("start scsi_test_unit_ready %p %p %p\n",
+ oud, scsi_device, scsi_device->request_queue);
+ error = scsi_test_unit_ready(scsi_device, 10*HZ, 5, NULL);
+ if (error)
+ OSD_ERR("warning: scsi_test_unit_ready failed\n");
+
+ osd_sec_init_nosec_doall_caps(caps, &osd_root_object, false, true);
+ if (osd_auto_detect_ver(&oud->od, caps, &oud->odi))
+ return -ENODEV;
+
+ return 0;
+}
+
+static void __remove(struct device *dev)
+{
+ struct osd_uld_device *oud = container_of(dev, struct osd_uld_device,
+ class_dev);
+ struct scsi_device *scsi_device = oud->od.scsi_device;
+
+ kfree(oud->odi.osdname);
+
+ if (oud->cdev.owner)
+ cdev_del(&oud->cdev);
+
+ osd_dev_fini(&oud->od);
+ scsi_device_put(scsi_device);
+
+ OSD_INFO("osd_remove %s\n",
+ oud->disk ? oud->disk->disk_name : NULL);
+
+ if (oud->disk)
+ put_disk(oud->disk);
+ ida_remove(&osd_minor_ida, oud->minor);
+
+ kfree(oud);
+}
+
+static int osd_probe(struct device *dev)
+{
+ struct scsi_device *scsi_device = to_scsi_device(dev);
+ struct gendisk *disk;
+ struct osd_uld_device *oud;
+ int minor;
+ int error;
+
+ if (scsi_device->type != TYPE_OSD)
+ return -ENODEV;
+
+ do {
+ if (!ida_pre_get(&osd_minor_ida, GFP_KERNEL))
+ return -ENODEV;
+
+ error = ida_get_new(&osd_minor_ida, &minor);
+ } while (error == -EAGAIN);
+
+ if (error)
+ return error;
+ if (minor >= SCSI_OSD_MAX_MINOR) {
+ error = -EBUSY;
+ goto err_retract_minor;
+ }
+
+ error = -ENOMEM;
+ oud = kzalloc(sizeof(*oud), GFP_KERNEL);
+ if (NULL == oud)
+ goto err_retract_minor;
+
+ dev_set_drvdata(dev, oud);
+ oud->minor = minor;
+
+ /* allocate a disk and set it up */
+ /* FIXME: do we need this since sg has already done that */
+ disk = alloc_disk(1);
+ if (!disk) {
+ OSD_ERR("alloc_disk failed\n");
+ goto err_free_osd;
+ }
+ disk->major = SCSI_OSD_MAJOR;
+ disk->first_minor = oud->minor;
+ sprintf(disk->disk_name, "osd%d", oud->minor);
+ oud->disk = disk;
+
+ /* hold one more reference to the scsi_device that will get released
+ * in __release, in case a logout is happening while fs is mounted
+ */
+ scsi_device_get(scsi_device);
+ osd_dev_init(&oud->od, scsi_device);
+
+ /* Detect the OSD Version */
+ error = __detect_osd(oud);
+ if (error) {
+ OSD_ERR("osd detection failed, non-compatible OSD device\n");
+ goto err_put_disk;
+ }
+
+ /* init the char-device for communication with user-mode */
+ cdev_init(&oud->cdev, &osd_fops);
+ oud->cdev.owner = THIS_MODULE;
+ error = cdev_add(&oud->cdev,
+ MKDEV(SCSI_OSD_MAJOR, oud->minor), 1);
+ if (error) {
+ OSD_ERR("cdev_add failed\n");
+ goto err_put_disk;
+ }
+
+ /* class device member */
+ oud->class_dev.devt = oud->cdev.dev;
+ oud->class_dev.class = &osd_uld_class;
+ oud->class_dev.parent = dev;
+ oud->class_dev.release = __remove;
+ error = dev_set_name(&oud->class_dev, "%s", disk->disk_name);
+ if (error) {
+ OSD_ERR("dev_set_name failed => %d\n", error);
+ goto err_put_cdev;
+ }
+
+ error = device_register(&oud->class_dev);
+ if (error) {
+ OSD_ERR("device_register failed => %d\n", error);
+ goto err_put_cdev;
+ }
+
+ get_device(&oud->class_dev);
+
+ OSD_INFO("osd_probe %s\n", disk->disk_name);
+ return 0;
+
+err_put_cdev:
+ cdev_del(&oud->cdev);
+err_put_disk:
+ scsi_device_put(scsi_device);
+ put_disk(disk);
+err_free_osd:
+ dev_set_drvdata(dev, NULL);
+ kfree(oud);
+err_retract_minor:
+ ida_remove(&osd_minor_ida, minor);
+ return error;
+}
+
+static int osd_remove(struct device *dev)
+{
+ struct scsi_device *scsi_device = to_scsi_device(dev);
+ struct osd_uld_device *oud = dev_get_drvdata(dev);
+
+ if (!oud || (oud->od.scsi_device != scsi_device)) {
+ OSD_ERR("Half cooked osd-device %p,%p || %p!=%p",
+ dev, oud, oud ? oud->od.scsi_device : NULL,
+ scsi_device);
+ }
+
+ device_unregister(&oud->class_dev);
+
+ put_device(&oud->class_dev);
+ return 0;
+}
+
+/*
+ * Global driver and scsi registration
+ */
+
+static struct scsi_driver osd_driver = {
+ .gendrv = {
+ .name = osd_name,
+ .owner = THIS_MODULE,
+ .probe = osd_probe,
+ .remove = osd_remove,
+ }
+};
+
+static int __init osd_uld_init(void)
+{
+ int err;
+
+ err = class_register(&osd_uld_class);
+ if (err) {
+ OSD_ERR("Unable to register sysfs class => %d\n", err);
+ return err;
+ }
+
+ err = register_chrdev_region(MKDEV(SCSI_OSD_MAJOR, 0),
+ SCSI_OSD_MAX_MINOR, osd_name);
+ if (err) {
+ OSD_ERR("Unable to register major %d for osd ULD => %d\n",
+ SCSI_OSD_MAJOR, err);
+ goto err_out;
+ }
+
+ err = scsi_register_driver(&osd_driver.gendrv);
+ if (err) {
+ OSD_ERR("scsi_register_driver failed => %d\n", err);
+ goto err_out_chrdev;
+ }
+
+ OSD_INFO("LOADED %s\n", osd_version_string);
+ return 0;
+
+err_out_chrdev:
+ unregister_chrdev_region(MKDEV(SCSI_OSD_MAJOR, 0), SCSI_OSD_MAX_MINOR);
+err_out:
+ class_unregister(&osd_uld_class);
+ return err;
+}
+
+static void __exit osd_uld_exit(void)
+{
+ scsi_unregister_driver(&osd_driver.gendrv);
+ unregister_chrdev_region(MKDEV(SCSI_OSD_MAJOR, 0), SCSI_OSD_MAX_MINOR);
+ class_unregister(&osd_uld_class);
+ OSD_INFO("UNLOADED %s\n", osd_version_string);
+}
+
+module_init(osd_uld_init);
+module_exit(osd_uld_exit);
diff --git a/drivers/scsi/osst.c b/drivers/scsi/osst.c
new file mode 100644
index 000000000..5033223f6
--- /dev/null
+++ b/drivers/scsi/osst.c
@@ -0,0 +1,6096 @@
+/*
+ SCSI Tape Driver for Linux version 1.1 and newer. See the accompanying
+ file Documentation/scsi/st.txt for more information.
+
+ History:
+
+ OnStream SCSI Tape support (osst) cloned from st.c by
+ Willem Riede (osst@riede.org) Feb 2000
+ Fixes ... Kurt Garloff <garloff@suse.de> Mar 2000
+
+ Rewritten from Dwayne Forsyth's SCSI tape driver by Kai Makisara.
+ Contribution and ideas from several people including (in alphabetical
+ order) Klaus Ehrenfried, Wolfgang Denk, Steve Hirsch, Andreas Koppenh"ofer,
+ Michael Leodolter, Eyal Lebedinsky, J"org Weule, and Eric Youngdale.
+
+ Copyright 1992 - 2002 Kai Makisara / 2000 - 2006 Willem Riede
+ email osst@riede.org
+
+ $Header: /cvsroot/osst/Driver/osst.c,v 1.73 2005/01/01 21:13:34 wriede Exp $
+
+ Microscopic alterations - Rik Ling, 2000/12/21
+ Last st.c sync: Tue Oct 15 22:01:04 2002 by makisara
+ Some small formal changes - aeb, 950809
+*/
+
+static const char * cvsid = "$Id: osst.c,v 1.73 2005/01/01 21:13:34 wriede Exp $";
+static const char * osst_version = "0.99.4";
+
+/* The "failure to reconnect" firmware bug */
+#define OSST_FW_NEED_POLL_MIN 10601 /*(107A)*/
+#define OSST_FW_NEED_POLL_MAX 10704 /*(108D)*/
+#define OSST_FW_NEED_POLL(x,d) ((x) >= OSST_FW_NEED_POLL_MIN && (x) <= OSST_FW_NEED_POLL_MAX && d->host->this_id != 7)
+
+#include <linux/module.h>
+
+#include <linux/fs.h>
+#include <linux/kernel.h>
+#include <linux/sched.h>
+#include <linux/proc_fs.h>
+#include <linux/mm.h>
+#include <linux/slab.h>
+#include <linux/init.h>
+#include <linux/string.h>
+#include <linux/errno.h>
+#include <linux/mtio.h>
+#include <linux/ioctl.h>
+#include <linux/fcntl.h>
+#include <linux/spinlock.h>
+#include <linux/vmalloc.h>
+#include <linux/blkdev.h>
+#include <linux/moduleparam.h>
+#include <linux/delay.h>
+#include <linux/jiffies.h>
+#include <linux/mutex.h>
+#include <asm/uaccess.h>
+#include <asm/dma.h>
+
+/* The driver prints some debugging information on the console if DEBUG
+ is defined and non-zero. */
+#define DEBUG 0
+
+/* The message level for the debug messages is currently set to KERN_NOTICE
+ so that people can easily see the messages. Later when the debugging messages
+ in the drivers are more widely classified, this may be changed to KERN_DEBUG. */
+#define OSST_DEB_MSG KERN_NOTICE
+
+#include <scsi/scsi.h>
+#include <scsi/scsi_dbg.h>
+#include <scsi/scsi_device.h>
+#include <scsi/scsi_driver.h>
+#include <scsi/scsi_eh.h>
+#include <scsi/scsi_host.h>
+#include <scsi/scsi_ioctl.h>
+
+#define ST_KILOBYTE 1024
+
+#include "st.h"
+#include "osst.h"
+#include "osst_options.h"
+#include "osst_detect.h"
+
+static DEFINE_MUTEX(osst_int_mutex);
+static int max_dev = 0;
+static int write_threshold_kbs = 0;
+static int max_sg_segs = 0;
+
+#ifdef MODULE
+MODULE_AUTHOR("Willem Riede");
+MODULE_DESCRIPTION("OnStream {DI-|FW-|SC-|USB}{30|50} Tape Driver");
+MODULE_LICENSE("GPL");
+MODULE_ALIAS_CHARDEV_MAJOR(OSST_MAJOR);
+MODULE_ALIAS_SCSI_DEVICE(TYPE_TAPE);
+
+module_param(max_dev, int, 0444);
+MODULE_PARM_DESC(max_dev, "Maximum number of OnStream Tape Drives to attach (4)");
+
+module_param(write_threshold_kbs, int, 0644);
+MODULE_PARM_DESC(write_threshold_kbs, "Asynchronous write threshold (KB; 32)");
+
+module_param(max_sg_segs, int, 0644);
+MODULE_PARM_DESC(max_sg_segs, "Maximum number of scatter/gather segments to use (9)");
+#else
+static struct osst_dev_parm {
+ char *name;
+ int *val;
+} parms[] __initdata = {
+ { "max_dev", &max_dev },
+ { "write_threshold_kbs", &write_threshold_kbs },
+ { "max_sg_segs", &max_sg_segs }
+};
+#endif
+
+/* Some default definitions have been moved to osst_options.h */
+#define OSST_BUFFER_SIZE (OSST_BUFFER_BLOCKS * ST_KILOBYTE)
+#define OSST_WRITE_THRESHOLD (OSST_WRITE_THRESHOLD_BLOCKS * ST_KILOBYTE)
+
+/* The buffer size should fit into the 24 bits for length in the
+ 6-byte SCSI read and write commands. */
+#if OSST_BUFFER_SIZE >= (2 << 24 - 1)
+#error "Buffer size should not exceed (2 << 24 - 1) bytes!"
+#endif
+
+#if DEBUG
+static int debugging = 1;
+/* uncomment define below to test error recovery */
+// #define OSST_INJECT_ERRORS 1
+#endif
+
+/* Do not retry! The drive firmware already retries when appropriate,
+ and when it tries to tell us something, we had better listen... */
+#define MAX_RETRIES 0
+
+#define NO_TAPE NOT_READY
+
+#define OSST_WAIT_POSITION_COMPLETE (HZ > 200 ? HZ / 200 : 1)
+#define OSST_WAIT_WRITE_COMPLETE (HZ / 12)
+#define OSST_WAIT_LONG_WRITE_COMPLETE (HZ / 2)
+
+#define OSST_TIMEOUT (200 * HZ)
+#define OSST_LONG_TIMEOUT (1800 * HZ)
+
+#define TAPE_NR(x) (iminor(x) & ~(-1 << ST_MODE_SHIFT))
+#define TAPE_MODE(x) ((iminor(x) & ST_MODE_MASK) >> ST_MODE_SHIFT)
+#define TAPE_REWIND(x) ((iminor(x) & 0x80) == 0)
+#define TAPE_IS_RAW(x) (TAPE_MODE(x) & (ST_NBR_MODES >> 1))
+
+/* Internal ioctl to set both density (uppermost 8 bits) and blocksize (lower
+ 24 bits) */
+#define SET_DENS_AND_BLK 0x10001
+
+static int osst_buffer_size = OSST_BUFFER_SIZE;
+static int osst_write_threshold = OSST_WRITE_THRESHOLD;
+static int osst_max_sg_segs = OSST_MAX_SG;
+static int osst_max_dev = OSST_MAX_TAPES;
+static int osst_nr_dev;
+
+static struct osst_tape **os_scsi_tapes = NULL;
+static DEFINE_RWLOCK(os_scsi_tapes_lock);
+
+static int modes_defined = 0;
+
+static struct osst_buffer *new_tape_buffer(int, int, int);
+static int enlarge_buffer(struct osst_buffer *, int);
+static void normalize_buffer(struct osst_buffer *);
+static int append_to_buffer(const char __user *, struct osst_buffer *, int);
+static int from_buffer(struct osst_buffer *, char __user *, int);
+static int osst_zero_buffer_tail(struct osst_buffer *);
+static int osst_copy_to_buffer(struct osst_buffer *, unsigned char *);
+static int osst_copy_from_buffer(struct osst_buffer *, unsigned char *);
+
+static int osst_probe(struct device *);
+static int osst_remove(struct device *);
+
+static struct scsi_driver osst_template = {
+ .gendrv = {
+ .name = "osst",
+ .owner = THIS_MODULE,
+ .probe = osst_probe,
+ .remove = osst_remove,
+ }
+};
+
+static int osst_int_ioctl(struct osst_tape *STp, struct osst_request ** aSRpnt,
+ unsigned int cmd_in, unsigned long arg);
+
+static int osst_set_frame_position(struct osst_tape *STp, struct osst_request ** aSRpnt, int frame, int skip);
+
+static int osst_get_frame_position(struct osst_tape *STp, struct osst_request ** aSRpnt);
+
+static int osst_flush_write_buffer(struct osst_tape *STp, struct osst_request ** aSRpnt);
+
+static int osst_write_error_recovery(struct osst_tape * STp, struct osst_request ** aSRpnt, int pending);
+
+static inline char *tape_name(struct osst_tape *tape)
+{
+ return tape->drive->disk_name;
+}
+
+/* Routines that handle the interaction with mid-layer SCSI routines */
+
+
+/* Normalize Sense */
+static void osst_analyze_sense(struct osst_request *SRpnt, struct st_cmdstatus *s)
+{
+ const u8 *ucp;
+ const u8 *sense = SRpnt->sense;
+
+ s->have_sense = scsi_normalize_sense(SRpnt->sense,
+ SCSI_SENSE_BUFFERSIZE, &s->sense_hdr);
+ s->flags = 0;
+
+ if (s->have_sense) {
+ s->deferred = 0;
+ s->remainder_valid =
+ scsi_get_sense_info_fld(sense, SCSI_SENSE_BUFFERSIZE, &s->uremainder64);
+ switch (sense[0] & 0x7f) {
+ case 0x71:
+ s->deferred = 1;
+ case 0x70:
+ s->fixed_format = 1;
+ s->flags = sense[2] & 0xe0;
+ break;
+ case 0x73:
+ s->deferred = 1;
+ case 0x72:
+ s->fixed_format = 0;
+ ucp = scsi_sense_desc_find(sense, SCSI_SENSE_BUFFERSIZE, 4);
+ s->flags = ucp ? (ucp[3] & 0xe0) : 0;
+ break;
+ }
+ }
+}
+
+/* Convert the result to success code */
+static int osst_chk_result(struct osst_tape * STp, struct osst_request * SRpnt)
+{
+ char *name = tape_name(STp);
+ int result = SRpnt->result;
+ u8 * sense = SRpnt->sense, scode;
+#if DEBUG
+ const char *stp;
+#endif
+ struct st_cmdstatus *cmdstatp;
+
+ if (!result)
+ return 0;
+
+ cmdstatp = &STp->buffer->cmdstat;
+ osst_analyze_sense(SRpnt, cmdstatp);
+
+ if (cmdstatp->have_sense)
+ scode = STp->buffer->cmdstat.sense_hdr.sense_key;
+ else
+ scode = 0;
+#if DEBUG
+ if (debugging) {
+ printk(OSST_DEB_MSG "%s:D: Error: %x, cmd: %x %x %x %x %x %x\n",
+ name, result,
+ SRpnt->cmd[0], SRpnt->cmd[1], SRpnt->cmd[2],
+ SRpnt->cmd[3], SRpnt->cmd[4], SRpnt->cmd[5]);
+ if (scode) printk(OSST_DEB_MSG "%s:D: Sense: %02x, ASC: %02x, ASCQ: %02x\n",
+ name, scode, sense[12], sense[13]);
+ if (cmdstatp->have_sense)
+ __scsi_print_sense(STp->device, name,
+ SRpnt->sense, SCSI_SENSE_BUFFERSIZE);
+ }
+ else
+#endif
+ if (cmdstatp->have_sense && (
+ scode != NO_SENSE &&
+ scode != RECOVERED_ERROR &&
+/* scode != UNIT_ATTENTION && */
+ scode != BLANK_CHECK &&
+ scode != VOLUME_OVERFLOW &&
+ SRpnt->cmd[0] != MODE_SENSE &&
+ SRpnt->cmd[0] != TEST_UNIT_READY)) { /* Abnormal conditions for tape */
+ if (cmdstatp->have_sense) {
+ printk(KERN_WARNING "%s:W: Command with sense data:\n", name);
+ __scsi_print_sense(STp->device, name,
+ SRpnt->sense, SCSI_SENSE_BUFFERSIZE);
+ }
+ else {
+ static int notyetprinted = 1;
+
+ printk(KERN_WARNING
+ "%s:W: Warning %x (driver bt 0x%x, host bt 0x%x).\n",
+ name, result, driver_byte(result),
+ host_byte(result));
+ if (notyetprinted) {
+ notyetprinted = 0;
+ printk(KERN_INFO
+ "%s:I: This warning may be caused by your scsi controller,\n", name);
+ printk(KERN_INFO
+ "%s:I: it has been reported with some Buslogic cards.\n", name);
+ }
+ }
+ }
+ STp->pos_unknown |= STp->device->was_reset;
+
+ if (cmdstatp->have_sense && scode == RECOVERED_ERROR) {
+ STp->recover_count++;
+ STp->recover_erreg++;
+#if DEBUG
+ if (debugging) {
+ if (SRpnt->cmd[0] == READ_6)
+ stp = "read";
+ else if (SRpnt->cmd[0] == WRITE_6)
+ stp = "write";
+ else
+ stp = "ioctl";
+ printk(OSST_DEB_MSG "%s:D: Recovered %s error (%d).\n", name, stp,
+ STp->recover_count);
+ }
+#endif
+ if ((sense[2] & 0xe0) == 0)
+ return 0;
+ }
+ return (-EIO);
+}
+
+
+/* Wakeup from interrupt */
+static void osst_end_async(struct request *req, int update)
+{
+ struct osst_request *SRpnt = req->end_io_data;
+ struct osst_tape *STp = SRpnt->stp;
+ struct rq_map_data *mdata = &SRpnt->stp->buffer->map_data;
+
+ STp->buffer->cmdstat.midlevel_result = SRpnt->result = req->errors;
+#if DEBUG
+ STp->write_pending = 0;
+#endif
+ if (SRpnt->waiting)
+ complete(SRpnt->waiting);
+
+ if (SRpnt->bio) {
+ kfree(mdata->pages);
+ blk_rq_unmap_user(SRpnt->bio);
+ }
+
+ __blk_put_request(req->q, req);
+}
+
+/* osst_request memory management */
+static struct osst_request *osst_allocate_request(void)
+{
+ return kzalloc(sizeof(struct osst_request), GFP_KERNEL);
+}
+
+static void osst_release_request(struct osst_request *streq)
+{
+ kfree(streq);
+}
+
+static int osst_execute(struct osst_request *SRpnt, const unsigned char *cmd,
+ int cmd_len, int data_direction, void *buffer, unsigned bufflen,
+ int use_sg, int timeout, int retries)
+{
+ struct request *req;
+ struct page **pages = NULL;
+ struct rq_map_data *mdata = &SRpnt->stp->buffer->map_data;
+
+ int err = 0;
+ int write = (data_direction == DMA_TO_DEVICE);
+
+ req = blk_get_request(SRpnt->stp->device->request_queue, write, GFP_KERNEL);
+ if (IS_ERR(req))
+ return DRIVER_ERROR << 24;
+
+ blk_rq_set_block_pc(req);
+ req->cmd_flags |= REQ_QUIET;
+
+ SRpnt->bio = NULL;
+
+ if (use_sg) {
+ struct scatterlist *sg, *sgl = (struct scatterlist *)buffer;
+ int i;
+
+ pages = kzalloc(use_sg * sizeof(struct page *), GFP_KERNEL);
+ if (!pages)
+ goto free_req;
+
+ for_each_sg(sgl, sg, use_sg, i)
+ pages[i] = sg_page(sg);
+
+ mdata->null_mapped = 1;
+
+ mdata->page_order = get_order(sgl[0].length);
+ mdata->nr_entries =
+ DIV_ROUND_UP(bufflen, PAGE_SIZE << mdata->page_order);
+ mdata->offset = 0;
+
+ err = blk_rq_map_user(req->q, req, mdata, NULL, bufflen, GFP_KERNEL);
+ if (err) {
+ kfree(pages);
+ goto free_req;
+ }
+ SRpnt->bio = req->bio;
+ mdata->pages = pages;
+
+ } else if (bufflen) {
+ err = blk_rq_map_kern(req->q, req, buffer, bufflen, GFP_KERNEL);
+ if (err)
+ goto free_req;
+ }
+
+ req->cmd_len = cmd_len;
+ memset(req->cmd, 0, BLK_MAX_CDB); /* ATAPI hates garbage after CDB */
+ memcpy(req->cmd, cmd, req->cmd_len);
+ req->sense = SRpnt->sense;
+ req->sense_len = 0;
+ req->timeout = timeout;
+ req->retries = retries;
+ req->end_io_data = SRpnt;
+
+ blk_execute_rq_nowait(req->q, NULL, req, 1, osst_end_async);
+ return 0;
+free_req:
+ blk_put_request(req);
+ return DRIVER_ERROR << 24;
+}
+
+/* Do the scsi command. Waits until command performed if do_wait is true.
+ Otherwise osst_write_behind_check() is used to check that the command
+ has finished. */
+static struct osst_request * osst_do_scsi(struct osst_request *SRpnt, struct osst_tape *STp,
+ unsigned char *cmd, int bytes, int direction, int timeout, int retries, int do_wait)
+{
+ unsigned char *bp;
+ unsigned short use_sg;
+#ifdef OSST_INJECT_ERRORS
+ static int inject = 0;
+ static int repeat = 0;
+#endif
+ struct completion *waiting;
+
+ /* if async, make sure there's no command outstanding */
+ if (!do_wait && ((STp->buffer)->last_SRpnt)) {
+ printk(KERN_ERR "%s: Async command already active.\n",
+ tape_name(STp));
+ if (signal_pending(current))
+ (STp->buffer)->syscall_result = (-EINTR);
+ else
+ (STp->buffer)->syscall_result = (-EBUSY);
+ return NULL;
+ }
+
+ if (SRpnt == NULL) {
+ SRpnt = osst_allocate_request();
+ if (SRpnt == NULL) {
+ printk(KERN_ERR "%s: Can't allocate SCSI request.\n",
+ tape_name(STp));
+ if (signal_pending(current))
+ (STp->buffer)->syscall_result = (-EINTR);
+ else
+ (STp->buffer)->syscall_result = (-EBUSY);
+ return NULL;
+ }
+ SRpnt->stp = STp;
+ }
+
+ /* If async IO, set last_SRpnt. This ptr tells write_behind_check
+ which IO is outstanding. It's nulled out when the IO completes. */
+ if (!do_wait)
+ (STp->buffer)->last_SRpnt = SRpnt;
+
+ waiting = &STp->wait;
+ init_completion(waiting);
+ SRpnt->waiting = waiting;
+
+ use_sg = (bytes > STp->buffer->sg[0].length) ? STp->buffer->use_sg : 0;
+ if (use_sg) {
+ bp = (char *)&(STp->buffer->sg[0]);
+ if (STp->buffer->sg_segs < use_sg)
+ use_sg = STp->buffer->sg_segs;
+ }
+ else
+ bp = (STp->buffer)->b_data;
+
+ memcpy(SRpnt->cmd, cmd, sizeof(SRpnt->cmd));
+ STp->buffer->cmdstat.have_sense = 0;
+ STp->buffer->syscall_result = 0;
+
+ if (osst_execute(SRpnt, cmd, COMMAND_SIZE(cmd[0]), direction, bp, bytes,
+ use_sg, timeout, retries))
+ /* could not allocate the buffer or request was too large */
+ (STp->buffer)->syscall_result = (-EBUSY);
+ else if (do_wait) {
+ wait_for_completion(waiting);
+ SRpnt->waiting = NULL;
+ STp->buffer->syscall_result = osst_chk_result(STp, SRpnt);
+#ifdef OSST_INJECT_ERRORS
+ if (STp->buffer->syscall_result == 0 &&
+ cmd[0] == READ_6 &&
+ cmd[4] &&
+ ( (++ inject % 83) == 29 ||
+ (STp->first_frame_position == 240
+ /* or STp->read_error_frame to fail again on the block calculated above */ &&
+ ++repeat < 3))) {
+ printk(OSST_DEB_MSG "%s:D: Injecting read error\n", tape_name(STp));
+ STp->buffer->last_result_fatal = 1;
+ }
+#endif
+ }
+ return SRpnt;
+}
+
+
+/* Handle the write-behind checking (downs the semaphore) */
+static void osst_write_behind_check(struct osst_tape *STp)
+{
+ struct osst_buffer * STbuffer;
+
+ STbuffer = STp->buffer;
+
+#if DEBUG
+ if (STp->write_pending)
+ STp->nbr_waits++;
+ else
+ STp->nbr_finished++;
+#endif
+ wait_for_completion(&(STp->wait));
+ STp->buffer->last_SRpnt->waiting = NULL;
+
+ STp->buffer->syscall_result = osst_chk_result(STp, STp->buffer->last_SRpnt);
+
+ if (STp->buffer->syscall_result)
+ STp->buffer->syscall_result =
+ osst_write_error_recovery(STp, &(STp->buffer->last_SRpnt), 1);
+ else
+ STp->first_frame_position++;
+
+ osst_release_request(STp->buffer->last_SRpnt);
+
+ if (STbuffer->writing < STbuffer->buffer_bytes)
+ printk(KERN_WARNING "osst :A: write_behind_check: something left in buffer!\n");
+
+ STbuffer->last_SRpnt = NULL;
+ STbuffer->buffer_bytes -= STbuffer->writing;
+ STbuffer->writing = 0;
+
+ return;
+}
+
+
+
+/* Onstream specific Routines */
+/*
+ * Initialize the OnStream AUX
+ */
+static void osst_init_aux(struct osst_tape * STp, int frame_type, int frame_seq_number,
+ int logical_blk_num, int blk_sz, int blk_cnt)
+{
+ os_aux_t *aux = STp->buffer->aux;
+ os_partition_t *par = &aux->partition;
+ os_dat_t *dat = &aux->dat;
+
+ if (STp->raw) return;
+
+ memset(aux, 0, sizeof(*aux));
+ aux->format_id = htonl(0);
+ memcpy(aux->application_sig, "LIN4", 4);
+ aux->hdwr = htonl(0);
+ aux->frame_type = frame_type;
+
+ switch (frame_type) {
+ case OS_FRAME_TYPE_HEADER:
+ aux->update_frame_cntr = htonl(STp->update_frame_cntr);
+ par->partition_num = OS_CONFIG_PARTITION;
+ par->par_desc_ver = OS_PARTITION_VERSION;
+ par->wrt_pass_cntr = htons(0xffff);
+ /* 0-4 = reserved, 5-9 = header, 2990-2994 = header, 2995-2999 = reserved */
+ par->first_frame_ppos = htonl(0);
+ par->last_frame_ppos = htonl(0xbb7);
+ aux->frame_seq_num = htonl(0);
+ aux->logical_blk_num_high = htonl(0);
+ aux->logical_blk_num = htonl(0);
+ aux->next_mark_ppos = htonl(STp->first_mark_ppos);
+ break;
+ case OS_FRAME_TYPE_DATA:
+ case OS_FRAME_TYPE_MARKER:
+ dat->dat_sz = 8;
+ dat->reserved1 = 0;
+ dat->entry_cnt = 1;
+ dat->reserved3 = 0;
+ dat->dat_list[0].blk_sz = htonl(blk_sz);
+ dat->dat_list[0].blk_cnt = htons(blk_cnt);
+ dat->dat_list[0].flags = frame_type==OS_FRAME_TYPE_MARKER?
+ OS_DAT_FLAGS_MARK:OS_DAT_FLAGS_DATA;
+ dat->dat_list[0].reserved = 0;
+ case OS_FRAME_TYPE_EOD:
+ aux->update_frame_cntr = htonl(0);
+ par->partition_num = OS_DATA_PARTITION;
+ par->par_desc_ver = OS_PARTITION_VERSION;
+ par->wrt_pass_cntr = htons(STp->wrt_pass_cntr);
+ par->first_frame_ppos = htonl(STp->first_data_ppos);
+ par->last_frame_ppos = htonl(STp->capacity);
+ aux->frame_seq_num = htonl(frame_seq_number);
+ aux->logical_blk_num_high = htonl(0);
+ aux->logical_blk_num = htonl(logical_blk_num);
+ break;
+ default: ; /* probably FILL */
+ }
+ aux->filemark_cnt = htonl(STp->filemark_cnt);
+ aux->phys_fm = htonl(0xffffffff);
+ aux->last_mark_ppos = htonl(STp->last_mark_ppos);
+ aux->last_mark_lbn = htonl(STp->last_mark_lbn);
+}
+
+/*
+ * Verify that we have the correct tape frame
+ */
+static int osst_verify_frame(struct osst_tape * STp, int frame_seq_number, int quiet)
+{
+ char * name = tape_name(STp);
+ os_aux_t * aux = STp->buffer->aux;
+ os_partition_t * par = &(aux->partition);
+ struct st_partstat * STps = &(STp->ps[STp->partition]);
+ int blk_cnt, blk_sz, i;
+
+ if (STp->raw) {
+ if (STp->buffer->syscall_result) {
+ for (i=0; i < STp->buffer->sg_segs; i++)
+ memset(page_address(sg_page(&STp->buffer->sg[i])),
+ 0, STp->buffer->sg[i].length);
+ strcpy(STp->buffer->b_data, "READ ERROR ON FRAME");
+ } else
+ STp->buffer->buffer_bytes = OS_FRAME_SIZE;
+ return 1;
+ }
+ if (STp->buffer->syscall_result) {
+#if DEBUG
+ printk(OSST_DEB_MSG "%s:D: Skipping frame, read error\n", name);
+#endif
+ return 0;
+ }
+ if (ntohl(aux->format_id) != 0) {
+#if DEBUG
+ printk(OSST_DEB_MSG "%s:D: Skipping frame, format_id %u\n", name, ntohl(aux->format_id));
+#endif
+ goto err_out;
+ }
+ if (memcmp(aux->application_sig, STp->application_sig, 4) != 0 &&
+ (memcmp(aux->application_sig, "LIN3", 4) != 0 || STp->linux_media_version != 4)) {
+#if DEBUG
+ printk(OSST_DEB_MSG "%s:D: Skipping frame, incorrect application signature\n", name);
+#endif
+ goto err_out;
+ }
+ if (par->partition_num != OS_DATA_PARTITION) {
+ if (!STp->linux_media || STp->linux_media_version != 2) {
+#if DEBUG
+ printk(OSST_DEB_MSG "%s:D: Skipping frame, partition num %d\n",
+ name, par->partition_num);
+#endif
+ goto err_out;
+ }
+ }
+ if (par->par_desc_ver != OS_PARTITION_VERSION) {
+#if DEBUG
+ printk(OSST_DEB_MSG "%s:D: Skipping frame, partition version %d\n", name, par->par_desc_ver);
+#endif
+ goto err_out;
+ }
+ if (ntohs(par->wrt_pass_cntr) != STp->wrt_pass_cntr) {
+#if DEBUG
+ printk(OSST_DEB_MSG "%s:D: Skipping frame, wrt_pass_cntr %d (expected %d)\n",
+ name, ntohs(par->wrt_pass_cntr), STp->wrt_pass_cntr);
+#endif
+ goto err_out;
+ }
+ if (aux->frame_type != OS_FRAME_TYPE_DATA &&
+ aux->frame_type != OS_FRAME_TYPE_EOD &&
+ aux->frame_type != OS_FRAME_TYPE_MARKER) {
+ if (!quiet) {
+#if DEBUG
+ printk(OSST_DEB_MSG "%s:D: Skipping frame, frame type %x\n", name, aux->frame_type);
+#endif
+ }
+ goto err_out;
+ }
+ if (aux->frame_type == OS_FRAME_TYPE_EOD &&
+ STp->first_frame_position < STp->eod_frame_ppos) {
+ printk(KERN_INFO "%s:I: Skipping premature EOD frame %d\n", name,
+ STp->first_frame_position);
+ goto err_out;
+ }
+ if (frame_seq_number != -1 && ntohl(aux->frame_seq_num) != frame_seq_number) {
+ if (!quiet) {
+#if DEBUG
+ printk(OSST_DEB_MSG "%s:D: Skipping frame, sequence number %u (expected %d)\n",
+ name, ntohl(aux->frame_seq_num), frame_seq_number);
+#endif
+ }
+ goto err_out;
+ }
+ if (aux->frame_type == OS_FRAME_TYPE_MARKER) {
+ STps->eof = ST_FM_HIT;
+
+ i = ntohl(aux->filemark_cnt);
+ if (STp->header_cache != NULL && i < OS_FM_TAB_MAX && (i > STp->filemark_cnt ||
+ STp->first_frame_position - 1 != ntohl(STp->header_cache->dat_fm_tab.fm_tab_ent[i]))) {
+#if DEBUG
+ printk(OSST_DEB_MSG "%s:D: %s filemark %d at frame pos %d\n", name,
+ STp->header_cache->dat_fm_tab.fm_tab_ent[i] == 0?"Learned":"Corrected",
+ i, STp->first_frame_position - 1);
+#endif
+ STp->header_cache->dat_fm_tab.fm_tab_ent[i] = htonl(STp->first_frame_position - 1);
+ if (i >= STp->filemark_cnt)
+ STp->filemark_cnt = i+1;
+ }
+ }
+ if (aux->frame_type == OS_FRAME_TYPE_EOD) {
+ STps->eof = ST_EOD_1;
+ STp->frame_in_buffer = 1;
+ }
+ if (aux->frame_type == OS_FRAME_TYPE_DATA) {
+ blk_cnt = ntohs(aux->dat.dat_list[0].blk_cnt);
+ blk_sz = ntohl(aux->dat.dat_list[0].blk_sz);
+ STp->buffer->buffer_bytes = blk_cnt * blk_sz;
+ STp->buffer->read_pointer = 0;
+ STp->frame_in_buffer = 1;
+
+ /* See what block size was used to write file */
+ if (STp->block_size != blk_sz && blk_sz > 0) {
+ printk(KERN_INFO
+ "%s:I: File was written with block size %d%c, currently %d%c, adjusted to match.\n",
+ name, blk_sz<1024?blk_sz:blk_sz/1024,blk_sz<1024?'b':'k',
+ STp->block_size<1024?STp->block_size:STp->block_size/1024,
+ STp->block_size<1024?'b':'k');
+ STp->block_size = blk_sz;
+ STp->buffer->buffer_blocks = OS_DATA_SIZE / blk_sz;
+ }
+ STps->eof = ST_NOEOF;
+ }
+ STp->frame_seq_number = ntohl(aux->frame_seq_num);
+ STp->logical_blk_num = ntohl(aux->logical_blk_num);
+ return 1;
+
+err_out:
+ if (STp->read_error_frame == 0)
+ STp->read_error_frame = STp->first_frame_position - 1;
+ return 0;
+}
+
+/*
+ * Wait for the unit to become Ready
+ */
+static int osst_wait_ready(struct osst_tape * STp, struct osst_request ** aSRpnt,
+ unsigned timeout, int initial_delay)
+{
+ unsigned char cmd[MAX_COMMAND_SIZE];
+ struct osst_request * SRpnt;
+ unsigned long startwait = jiffies;
+#if DEBUG
+ int dbg = debugging;
+ char * name = tape_name(STp);
+
+ printk(OSST_DEB_MSG "%s:D: Reached onstream wait ready\n", name);
+#endif
+
+ if (initial_delay > 0)
+ msleep(jiffies_to_msecs(initial_delay));
+
+ memset(cmd, 0, MAX_COMMAND_SIZE);
+ cmd[0] = TEST_UNIT_READY;
+
+ SRpnt = osst_do_scsi(*aSRpnt, STp, cmd, 0, DMA_NONE, STp->timeout, MAX_RETRIES, 1);
+ *aSRpnt = SRpnt;
+ if (!SRpnt) return (-EBUSY);
+
+ while ( STp->buffer->syscall_result && time_before(jiffies, startwait + timeout*HZ) &&
+ (( SRpnt->sense[2] == 2 && SRpnt->sense[12] == 4 &&
+ (SRpnt->sense[13] == 1 || SRpnt->sense[13] == 8) ) ||
+ ( SRpnt->sense[2] == 6 && SRpnt->sense[12] == 0x28 &&
+ SRpnt->sense[13] == 0 ) )) {
+#if DEBUG
+ if (debugging) {
+ printk(OSST_DEB_MSG "%s:D: Sleeping in onstream wait ready\n", name);
+ printk(OSST_DEB_MSG "%s:D: Turning off debugging for a while\n", name);
+ debugging = 0;
+ }
+#endif
+ msleep(100);
+
+ memset(cmd, 0, MAX_COMMAND_SIZE);
+ cmd[0] = TEST_UNIT_READY;
+
+ SRpnt = osst_do_scsi(SRpnt, STp, cmd, 0, DMA_NONE, STp->timeout, MAX_RETRIES, 1);
+ }
+ *aSRpnt = SRpnt;
+#if DEBUG
+ debugging = dbg;
+#endif
+ if ( STp->buffer->syscall_result &&
+ osst_write_error_recovery(STp, aSRpnt, 0) ) {
+#if DEBUG
+ printk(OSST_DEB_MSG "%s:D: Abnormal exit from onstream wait ready\n", name);
+ printk(OSST_DEB_MSG "%s:D: Result = %d, Sense: 0=%02x, 2=%02x, 12=%02x, 13=%02x\n", name,
+ STp->buffer->syscall_result, SRpnt->sense[0], SRpnt->sense[2],
+ SRpnt->sense[12], SRpnt->sense[13]);
+#endif
+ return (-EIO);
+ }
+#if DEBUG
+ printk(OSST_DEB_MSG "%s:D: Normal exit from onstream wait ready\n", name);
+#endif
+ return 0;
+}
+
+/*
+ * Wait for a tape to be inserted in the unit
+ */
+static int osst_wait_for_medium(struct osst_tape * STp, struct osst_request ** aSRpnt, unsigned timeout)
+{
+ unsigned char cmd[MAX_COMMAND_SIZE];
+ struct osst_request * SRpnt;
+ unsigned long startwait = jiffies;
+#if DEBUG
+ int dbg = debugging;
+ char * name = tape_name(STp);
+
+ printk(OSST_DEB_MSG "%s:D: Reached onstream wait for medium\n", name);
+#endif
+
+ memset(cmd, 0, MAX_COMMAND_SIZE);
+ cmd[0] = TEST_UNIT_READY;
+
+ SRpnt = osst_do_scsi(*aSRpnt, STp, cmd, 0, DMA_NONE, STp->timeout, MAX_RETRIES, 1);
+ *aSRpnt = SRpnt;
+ if (!SRpnt) return (-EBUSY);
+
+ while ( STp->buffer->syscall_result && time_before(jiffies, startwait + timeout*HZ) &&
+ SRpnt->sense[2] == 2 && SRpnt->sense[12] == 0x3a && SRpnt->sense[13] == 0 ) {
+#if DEBUG
+ if (debugging) {
+ printk(OSST_DEB_MSG "%s:D: Sleeping in onstream wait medium\n", name);
+ printk(OSST_DEB_MSG "%s:D: Turning off debugging for a while\n", name);
+ debugging = 0;
+ }
+#endif
+ msleep(100);
+
+ memset(cmd, 0, MAX_COMMAND_SIZE);
+ cmd[0] = TEST_UNIT_READY;
+
+ SRpnt = osst_do_scsi(SRpnt, STp, cmd, 0, DMA_NONE, STp->timeout, MAX_RETRIES, 1);
+ }
+ *aSRpnt = SRpnt;
+#if DEBUG
+ debugging = dbg;
+#endif
+ if ( STp->buffer->syscall_result && SRpnt->sense[2] != 2 &&
+ SRpnt->sense[12] != 4 && SRpnt->sense[13] == 1) {
+#if DEBUG
+ printk(OSST_DEB_MSG "%s:D: Abnormal exit from onstream wait medium\n", name);
+ printk(OSST_DEB_MSG "%s:D: Result = %d, Sense: 0=%02x, 2=%02x, 12=%02x, 13=%02x\n", name,
+ STp->buffer->syscall_result, SRpnt->sense[0], SRpnt->sense[2],
+ SRpnt->sense[12], SRpnt->sense[13]);
+#endif
+ return 0;
+ }
+#if DEBUG
+ printk(OSST_DEB_MSG "%s:D: Normal exit from onstream wait medium\n", name);
+#endif
+ return 1;
+}
+
+static int osst_position_tape_and_confirm(struct osst_tape * STp, struct osst_request ** aSRpnt, int frame)
+{
+ int retval;
+
+ osst_wait_ready(STp, aSRpnt, 15 * 60, 0); /* TODO - can this catch a write error? */
+ retval = osst_set_frame_position(STp, aSRpnt, frame, 0);
+ if (retval) return (retval);
+ osst_wait_ready(STp, aSRpnt, 15 * 60, OSST_WAIT_POSITION_COMPLETE);
+ return (osst_get_frame_position(STp, aSRpnt));
+}
+
+/*
+ * Wait for write(s) to complete
+ */
+static int osst_flush_drive_buffer(struct osst_tape * STp, struct osst_request ** aSRpnt)
+{
+ unsigned char cmd[MAX_COMMAND_SIZE];
+ struct osst_request * SRpnt;
+ int result = 0;
+ int delay = OSST_WAIT_WRITE_COMPLETE;
+#if DEBUG
+ char * name = tape_name(STp);
+
+ printk(OSST_DEB_MSG "%s:D: Reached onstream flush drive buffer (write filemark)\n", name);
+#endif
+
+ memset(cmd, 0, MAX_COMMAND_SIZE);
+ cmd[0] = WRITE_FILEMARKS;
+ cmd[1] = 1;
+
+ SRpnt = osst_do_scsi(*aSRpnt, STp, cmd, 0, DMA_NONE, STp->timeout, MAX_RETRIES, 1);
+ *aSRpnt = SRpnt;
+ if (!SRpnt) return (-EBUSY);
+ if (STp->buffer->syscall_result) {
+ if ((SRpnt->sense[2] & 0x0f) == 2 && SRpnt->sense[12] == 4) {
+ if (SRpnt->sense[13] == 8) {
+ delay = OSST_WAIT_LONG_WRITE_COMPLETE;
+ }
+ } else
+ result = osst_write_error_recovery(STp, aSRpnt, 0);
+ }
+ result |= osst_wait_ready(STp, aSRpnt, 5 * 60, delay);
+ STp->ps[STp->partition].rw = OS_WRITING_COMPLETE;
+
+ return (result);
+}
+
+#define OSST_POLL_PER_SEC 10
+static int osst_wait_frame(struct osst_tape * STp, struct osst_request ** aSRpnt, int curr, int minlast, int to)
+{
+ unsigned long startwait = jiffies;
+ char * name = tape_name(STp);
+#if DEBUG
+ char notyetprinted = 1;
+#endif
+ if (minlast >= 0 && STp->ps[STp->partition].rw != ST_READING)
+ printk(KERN_ERR "%s:A: Waiting for frame without having initialized read!\n", name);
+
+ while (time_before (jiffies, startwait + to*HZ))
+ {
+ int result;
+ result = osst_get_frame_position(STp, aSRpnt);
+ if (result == -EIO)
+ if ((result = osst_write_error_recovery(STp, aSRpnt, 0)) == 0)
+ return 0; /* successful recovery leaves drive ready for frame */
+ if (result < 0) break;
+ if (STp->first_frame_position == curr &&
+ ((minlast < 0 &&
+ (signed)STp->last_frame_position > (signed)curr + minlast) ||
+ (minlast >= 0 && STp->cur_frames > minlast)
+ ) && result >= 0)
+ {
+#if DEBUG
+ if (debugging || time_after_eq(jiffies, startwait + 2*HZ/OSST_POLL_PER_SEC))
+ printk (OSST_DEB_MSG
+ "%s:D: Succ wait f fr %i (>%i): %i-%i %i (%i): %3li.%li s\n",
+ name, curr, curr+minlast, STp->first_frame_position,
+ STp->last_frame_position, STp->cur_frames,
+ result, (jiffies-startwait)/HZ,
+ (((jiffies-startwait)%HZ)*10)/HZ);
+#endif
+ return 0;
+ }
+#if DEBUG
+ if (time_after_eq(jiffies, startwait + 2*HZ/OSST_POLL_PER_SEC) && notyetprinted)
+ {
+ printk (OSST_DEB_MSG "%s:D: Wait for frame %i (>%i): %i-%i %i (%i)\n",
+ name, curr, curr+minlast, STp->first_frame_position,
+ STp->last_frame_position, STp->cur_frames, result);
+ notyetprinted--;
+ }
+#endif
+ msleep(1000 / OSST_POLL_PER_SEC);
+ }
+#if DEBUG
+ printk (OSST_DEB_MSG "%s:D: Fail wait f fr %i (>%i): %i-%i %i: %3li.%li s\n",
+ name, curr, curr+minlast, STp->first_frame_position,
+ STp->last_frame_position, STp->cur_frames,
+ (jiffies-startwait)/HZ, (((jiffies-startwait)%HZ)*10)/HZ);
+#endif
+ return -EBUSY;
+}
+
+static int osst_recover_wait_frame(struct osst_tape * STp, struct osst_request ** aSRpnt, int writing)
+{
+ struct osst_request * SRpnt;
+ unsigned char cmd[MAX_COMMAND_SIZE];
+ unsigned long startwait = jiffies;
+ int retval = 1;
+ char * name = tape_name(STp);
+
+ if (writing) {
+ char mybuf[24];
+ char * olddata = STp->buffer->b_data;
+ int oldsize = STp->buffer->buffer_size;
+
+ /* write zero fm then read pos - if shows write error, try to recover - if no progress, wait */
+
+ memset(cmd, 0, MAX_COMMAND_SIZE);
+ cmd[0] = WRITE_FILEMARKS;
+ cmd[1] = 1;
+ SRpnt = osst_do_scsi(*aSRpnt, STp, cmd, 0, DMA_NONE, STp->timeout,
+ MAX_RETRIES, 1);
+
+ while (retval && time_before (jiffies, startwait + 5*60*HZ)) {
+
+ if (STp->buffer->syscall_result && (SRpnt->sense[2] & 0x0f) != 2) {
+
+ /* some failure - not just not-ready */
+ retval = osst_write_error_recovery(STp, aSRpnt, 0);
+ break;
+ }
+ schedule_timeout_interruptible(HZ / OSST_POLL_PER_SEC);
+
+ STp->buffer->b_data = mybuf; STp->buffer->buffer_size = 24;
+ memset(cmd, 0, MAX_COMMAND_SIZE);
+ cmd[0] = READ_POSITION;
+
+ SRpnt = osst_do_scsi(SRpnt, STp, cmd, 20, DMA_FROM_DEVICE, STp->timeout,
+ MAX_RETRIES, 1);
+
+ retval = ( STp->buffer->syscall_result || (STp->buffer)->b_data[15] > 25 );
+ STp->buffer->b_data = olddata; STp->buffer->buffer_size = oldsize;
+ }
+ if (retval)
+ printk(KERN_ERR "%s:E: Device did not succeed to write buffered data\n", name);
+ } else
+ /* TODO - figure out which error conditions can be handled */
+ if (STp->buffer->syscall_result)
+ printk(KERN_WARNING
+ "%s:W: Recover_wait_frame(read) cannot handle %02x:%02x:%02x\n", name,
+ (*aSRpnt)->sense[ 2] & 0x0f,
+ (*aSRpnt)->sense[12],
+ (*aSRpnt)->sense[13]);
+
+ return retval;
+}
+
+/*
+ * Read the next OnStream tape frame at the current location
+ */
+static int osst_read_frame(struct osst_tape * STp, struct osst_request ** aSRpnt, int timeout)
+{
+ unsigned char cmd[MAX_COMMAND_SIZE];
+ struct osst_request * SRpnt;
+ int retval = 0;
+#if DEBUG
+ os_aux_t * aux = STp->buffer->aux;
+ char * name = tape_name(STp);
+#endif
+
+ if (STp->poll)
+ if (osst_wait_frame (STp, aSRpnt, STp->first_frame_position, 0, timeout))
+ retval = osst_recover_wait_frame(STp, aSRpnt, 0);
+
+ memset(cmd, 0, MAX_COMMAND_SIZE);
+ cmd[0] = READ_6;
+ cmd[1] = 1;
+ cmd[4] = 1;
+
+#if DEBUG
+ if (debugging)
+ printk(OSST_DEB_MSG "%s:D: Reading frame from OnStream tape\n", name);
+#endif
+ SRpnt = osst_do_scsi(*aSRpnt, STp, cmd, OS_FRAME_SIZE, DMA_FROM_DEVICE,
+ STp->timeout, MAX_RETRIES, 1);
+ *aSRpnt = SRpnt;
+ if (!SRpnt)
+ return (-EBUSY);
+
+ if ((STp->buffer)->syscall_result) {
+ retval = 1;
+ if (STp->read_error_frame == 0) {
+ STp->read_error_frame = STp->first_frame_position;
+#if DEBUG
+ printk(OSST_DEB_MSG "%s:D: Recording read error at %d\n", name, STp->read_error_frame);
+#endif
+ }
+#if DEBUG
+ if (debugging)
+ printk(OSST_DEB_MSG "%s:D: Sense: %2x %2x %2x %2x %2x %2x %2x %2x\n",
+ name,
+ SRpnt->sense[0], SRpnt->sense[1],
+ SRpnt->sense[2], SRpnt->sense[3],
+ SRpnt->sense[4], SRpnt->sense[5],
+ SRpnt->sense[6], SRpnt->sense[7]);
+#endif
+ }
+ else
+ STp->first_frame_position++;
+#if DEBUG
+ if (debugging) {
+ char sig[8]; int i;
+ for (i=0;i<4;i++)
+ sig[i] = aux->application_sig[i]<32?'^':aux->application_sig[i];
+ sig[4] = '\0';
+ printk(OSST_DEB_MSG
+ "%s:D: AUX: %s UpdFrCt#%d Wpass#%d %s FrSeq#%d LogBlk#%d Qty=%d Sz=%d\n", name, sig,
+ ntohl(aux->update_frame_cntr), ntohs(aux->partition.wrt_pass_cntr),
+ aux->frame_type==1?"EOD":aux->frame_type==2?"MARK":
+ aux->frame_type==8?"HEADR":aux->frame_type==0x80?"DATA":"FILL",
+ ntohl(aux->frame_seq_num), ntohl(aux->logical_blk_num),
+ ntohs(aux->dat.dat_list[0].blk_cnt), ntohl(aux->dat.dat_list[0].blk_sz) );
+ if (aux->frame_type==2)
+ printk(OSST_DEB_MSG "%s:D: mark_cnt=%d, last_mark_ppos=%d, last_mark_lbn=%d\n", name,
+ ntohl(aux->filemark_cnt), ntohl(aux->last_mark_ppos), ntohl(aux->last_mark_lbn));
+ printk(OSST_DEB_MSG "%s:D: Exit read frame from OnStream tape with code %d\n", name, retval);
+ }
+#endif
+ return (retval);
+}
+
+static int osst_initiate_read(struct osst_tape * STp, struct osst_request ** aSRpnt)
+{
+ struct st_partstat * STps = &(STp->ps[STp->partition]);
+ struct osst_request * SRpnt ;
+ unsigned char cmd[MAX_COMMAND_SIZE];
+ int retval = 0;
+ char * name = tape_name(STp);
+
+ if (STps->rw != ST_READING) { /* Initialize read operation */
+ if (STps->rw == ST_WRITING || STp->dirty) {
+ STp->write_type = OS_WRITE_DATA;
+ osst_flush_write_buffer(STp, aSRpnt);
+ osst_flush_drive_buffer(STp, aSRpnt);
+ }
+ STps->rw = ST_READING;
+ STp->frame_in_buffer = 0;
+
+ /*
+ * Issue a read 0 command to get the OnStream drive
+ * read frames into its buffer.
+ */
+ memset(cmd, 0, MAX_COMMAND_SIZE);
+ cmd[0] = READ_6;
+ cmd[1] = 1;
+
+#if DEBUG
+ printk(OSST_DEB_MSG "%s:D: Start Read Ahead on OnStream tape\n", name);
+#endif
+ SRpnt = osst_do_scsi(*aSRpnt, STp, cmd, 0, DMA_NONE, STp->timeout, MAX_RETRIES, 1);
+ *aSRpnt = SRpnt;
+ if ((retval = STp->buffer->syscall_result))
+ printk(KERN_WARNING "%s:W: Error starting read ahead\n", name);
+ }
+
+ return retval;
+}
+
+static int osst_get_logical_frame(struct osst_tape * STp, struct osst_request ** aSRpnt,
+ int frame_seq_number, int quiet)
+{
+ struct st_partstat * STps = &(STp->ps[STp->partition]);
+ char * name = tape_name(STp);
+ int cnt = 0,
+ bad = 0,
+ past = 0,
+ x,
+ position;
+
+ /*
+ * If we want just any frame (-1) and there is a frame in the buffer, return it
+ */
+ if (frame_seq_number == -1 && STp->frame_in_buffer) {
+#if DEBUG
+ printk(OSST_DEB_MSG "%s:D: Frame %d still in buffer\n", name, STp->frame_seq_number);
+#endif
+ return (STps->eof);
+ }
+ /*
+ * Search and wait for the next logical tape frame
+ */
+ while (1) {
+ if (cnt++ > 400) {
+ printk(KERN_ERR "%s:E: Couldn't find logical frame %d, aborting\n",
+ name, frame_seq_number);
+ if (STp->read_error_frame) {
+ osst_set_frame_position(STp, aSRpnt, STp->read_error_frame, 0);
+#if DEBUG
+ printk(OSST_DEB_MSG "%s:D: Repositioning tape to bad frame %d\n",
+ name, STp->read_error_frame);
+#endif
+ STp->read_error_frame = 0;
+ STp->abort_count++;
+ }
+ return (-EIO);
+ }
+#if DEBUG
+ if (debugging)
+ printk(OSST_DEB_MSG "%s:D: Looking for frame %d, attempt %d\n",
+ name, frame_seq_number, cnt);
+#endif
+ if ( osst_initiate_read(STp, aSRpnt)
+ || ( (!STp->frame_in_buffer) && osst_read_frame(STp, aSRpnt, 30) ) ) {
+ if (STp->raw)
+ return (-EIO);
+ position = osst_get_frame_position(STp, aSRpnt);
+ if (position >= 0xbae && position < 0xbb8)
+ position = 0xbb8;
+ else if (position > STp->eod_frame_ppos || ++bad == 10) {
+ position = STp->read_error_frame - 1;
+ bad = 0;
+ }
+ else {
+ position += 29;
+ cnt += 19;
+ }
+#if DEBUG
+ printk(OSST_DEB_MSG "%s:D: Bad frame detected, positioning tape to block %d\n",
+ name, position);
+#endif
+ osst_set_frame_position(STp, aSRpnt, position, 0);
+ continue;
+ }
+ if (osst_verify_frame(STp, frame_seq_number, quiet))
+ break;
+ if (osst_verify_frame(STp, -1, quiet)) {
+ x = ntohl(STp->buffer->aux->frame_seq_num);
+ if (STp->fast_open) {
+ printk(KERN_WARNING
+ "%s:W: Found logical frame %d instead of %d after fast open\n",
+ name, x, frame_seq_number);
+ STp->header_ok = 0;
+ STp->read_error_frame = 0;
+ return (-EIO);
+ }
+ if (x > frame_seq_number) {
+ if (++past > 3) {
+ /* positioning backwards did not bring us to the desired frame */
+ position = STp->read_error_frame - 1;
+ }
+ else {
+ position = osst_get_frame_position(STp, aSRpnt)
+ + frame_seq_number - x - 1;
+
+ if (STp->first_frame_position >= 3000 && position < 3000)
+ position -= 10;
+ }
+#if DEBUG
+ printk(OSST_DEB_MSG
+ "%s:D: Found logical frame %d while looking for %d: back up %d\n",
+ name, x, frame_seq_number,
+ STp->first_frame_position - position);
+#endif
+ osst_set_frame_position(STp, aSRpnt, position, 0);
+ cnt += 10;
+ }
+ else
+ past = 0;
+ }
+ if (osst_get_frame_position(STp, aSRpnt) == 0xbaf) {
+#if DEBUG
+ printk(OSST_DEB_MSG "%s:D: Skipping config partition\n", name);
+#endif
+ osst_set_frame_position(STp, aSRpnt, 0xbb8, 0);
+ cnt--;
+ }
+ STp->frame_in_buffer = 0;
+ }
+ if (cnt > 1) {
+ STp->recover_count++;
+ STp->recover_erreg++;
+ printk(KERN_WARNING "%s:I: Don't worry, Read error at position %d recovered\n",
+ name, STp->read_error_frame);
+ }
+ STp->read_count++;
+
+#if DEBUG
+ if (debugging || STps->eof)
+ printk(OSST_DEB_MSG
+ "%s:D: Exit get logical frame (%d=>%d) from OnStream tape with code %d\n",
+ name, frame_seq_number, STp->frame_seq_number, STps->eof);
+#endif
+ STp->fast_open = 0;
+ STp->read_error_frame = 0;
+ return (STps->eof);
+}
+
+static int osst_seek_logical_blk(struct osst_tape * STp, struct osst_request ** aSRpnt, int logical_blk_num)
+{
+ struct st_partstat * STps = &(STp->ps[STp->partition]);
+ char * name = tape_name(STp);
+ int retries = 0;
+ int frame_seq_estimate, ppos_estimate, move;
+
+ if (logical_blk_num < 0) logical_blk_num = 0;
+#if DEBUG
+ printk(OSST_DEB_MSG "%s:D: Seeking logical block %d (now at %d, size %d%c)\n",
+ name, logical_blk_num, STp->logical_blk_num,
+ STp->block_size<1024?STp->block_size:STp->block_size/1024,
+ STp->block_size<1024?'b':'k');
+#endif
+ /* Do we know where we are? */
+ if (STps->drv_block >= 0) {
+ move = logical_blk_num - STp->logical_blk_num;
+ if (move < 0) move -= (OS_DATA_SIZE / STp->block_size) - 1;
+ move /= (OS_DATA_SIZE / STp->block_size);
+ frame_seq_estimate = STp->frame_seq_number + move;
+ } else
+ frame_seq_estimate = logical_blk_num * STp->block_size / OS_DATA_SIZE;
+
+ if (frame_seq_estimate < 2980) ppos_estimate = frame_seq_estimate + 10;
+ else ppos_estimate = frame_seq_estimate + 20;
+ while (++retries < 10) {
+ if (ppos_estimate > STp->eod_frame_ppos-2) {
+ frame_seq_estimate += STp->eod_frame_ppos - 2 - ppos_estimate;
+ ppos_estimate = STp->eod_frame_ppos - 2;
+ }
+ if (frame_seq_estimate < 0) {
+ frame_seq_estimate = 0;
+ ppos_estimate = 10;
+ }
+ osst_set_frame_position(STp, aSRpnt, ppos_estimate, 0);
+ if (osst_get_logical_frame(STp, aSRpnt, frame_seq_estimate, 1) >= 0) {
+ /* we've located the estimated frame, now does it have our block? */
+ if (logical_blk_num < STp->logical_blk_num ||
+ logical_blk_num >= STp->logical_blk_num + ntohs(STp->buffer->aux->dat.dat_list[0].blk_cnt)) {
+ if (STps->eof == ST_FM_HIT)
+ move = logical_blk_num < STp->logical_blk_num? -2 : 1;
+ else {
+ move = logical_blk_num - STp->logical_blk_num;
+ if (move < 0) move -= (OS_DATA_SIZE / STp->block_size) - 1;
+ move /= (OS_DATA_SIZE / STp->block_size);
+ }
+ if (!move) move = logical_blk_num > STp->logical_blk_num ? 1 : -1;
+#if DEBUG
+ printk(OSST_DEB_MSG
+ "%s:D: Seek retry %d at ppos %d fsq %d (est %d) lbn %d (need %d) move %d\n",
+ name, retries, ppos_estimate, STp->frame_seq_number, frame_seq_estimate,
+ STp->logical_blk_num, logical_blk_num, move);
+#endif
+ frame_seq_estimate += move;
+ ppos_estimate += move;
+ continue;
+ } else {
+ STp->buffer->read_pointer = (logical_blk_num - STp->logical_blk_num) * STp->block_size;
+ STp->buffer->buffer_bytes -= STp->buffer->read_pointer;
+ STp->logical_blk_num = logical_blk_num;
+#if DEBUG
+ printk(OSST_DEB_MSG
+ "%s:D: Seek success at ppos %d fsq %d in_buf %d, bytes %d, ptr %d*%d\n",
+ name, ppos_estimate, STp->frame_seq_number, STp->frame_in_buffer,
+ STp->buffer->buffer_bytes, STp->buffer->read_pointer / STp->block_size,
+ STp->block_size);
+#endif
+ STps->drv_file = ntohl(STp->buffer->aux->filemark_cnt);
+ if (STps->eof == ST_FM_HIT) {
+ STps->drv_file++;
+ STps->drv_block = 0;
+ } else {
+ STps->drv_block = ntohl(STp->buffer->aux->last_mark_lbn)?
+ STp->logical_blk_num -
+ (STps->drv_file ? ntohl(STp->buffer->aux->last_mark_lbn) + 1 : 0):
+ -1;
+ }
+ STps->eof = (STp->first_frame_position >= STp->eod_frame_ppos)?ST_EOD:ST_NOEOF;
+ return 0;
+ }
+ }
+ if (osst_get_logical_frame(STp, aSRpnt, -1, 1) < 0)
+ goto error;
+ /* we are not yet at the estimated frame, adjust our estimate of its physical position */
+#if DEBUG
+ printk(OSST_DEB_MSG "%s:D: Seek retry %d at ppos %d fsq %d (est %d) lbn %d (need %d)\n",
+ name, retries, ppos_estimate, STp->frame_seq_number, frame_seq_estimate,
+ STp->logical_blk_num, logical_blk_num);
+#endif
+ if (frame_seq_estimate != STp->frame_seq_number)
+ ppos_estimate += frame_seq_estimate - STp->frame_seq_number;
+ else
+ break;
+ }
+error:
+ printk(KERN_ERR "%s:E: Couldn't seek to logical block %d (at %d), %d retries\n",
+ name, logical_blk_num, STp->logical_blk_num, retries);
+ return (-EIO);
+}
+
+/* The values below are based on the OnStream frame payload size of 32K == 2**15,
+ * that is, OSST_FRAME_SHIFT + OSST_SECTOR_SHIFT must be 15. With a minimum block
+ * size of 512 bytes, we need to be able to resolve 32K/512 == 64 == 2**6 positions
+ * inside each frame. Finally, OSST_SECTOR_MASK == 2**OSST_FRAME_SHIFT - 1.
+ */
+#define OSST_FRAME_SHIFT 6
+#define OSST_SECTOR_SHIFT 9
+#define OSST_SECTOR_MASK 0x03F
+
+static int osst_get_sector(struct osst_tape * STp, struct osst_request ** aSRpnt)
+{
+ int sector;
+#if DEBUG
+ char * name = tape_name(STp);
+
+ printk(OSST_DEB_MSG
+ "%s:D: Positioned at ppos %d, frame %d, lbn %d, file %d, blk %d, %cptr %d, eof %d\n",
+ name, STp->first_frame_position, STp->frame_seq_number, STp->logical_blk_num,
+ STp->ps[STp->partition].drv_file, STp->ps[STp->partition].drv_block,
+ STp->ps[STp->partition].rw == ST_WRITING?'w':'r',
+ STp->ps[STp->partition].rw == ST_WRITING?STp->buffer->buffer_bytes:
+ STp->buffer->read_pointer, STp->ps[STp->partition].eof);
+#endif
+ /* do we know where we are inside a file? */
+ if (STp->ps[STp->partition].drv_block >= 0) {
+ sector = (STp->frame_in_buffer ? STp->first_frame_position-1 :
+ STp->first_frame_position) << OSST_FRAME_SHIFT;
+ if (STp->ps[STp->partition].rw == ST_WRITING)
+ sector |= (STp->buffer->buffer_bytes >> OSST_SECTOR_SHIFT) & OSST_SECTOR_MASK;
+ else
+ sector |= (STp->buffer->read_pointer >> OSST_SECTOR_SHIFT) & OSST_SECTOR_MASK;
+ } else {
+ sector = osst_get_frame_position(STp, aSRpnt);
+ if (sector > 0)
+ sector <<= OSST_FRAME_SHIFT;
+ }
+ return sector;
+}
+
+static int osst_seek_sector(struct osst_tape * STp, struct osst_request ** aSRpnt, int sector)
+{
+ struct st_partstat * STps = &(STp->ps[STp->partition]);
+ int frame = sector >> OSST_FRAME_SHIFT,
+ offset = (sector & OSST_SECTOR_MASK) << OSST_SECTOR_SHIFT,
+ r;
+#if DEBUG
+ char * name = tape_name(STp);
+
+ printk(OSST_DEB_MSG "%s:D: Seeking sector %d in frame %d at offset %d\n",
+ name, sector, frame, offset);
+#endif
+ if (frame < 0 || frame >= STp->capacity) return (-ENXIO);
+
+ if (frame <= STp->first_data_ppos) {
+ STp->frame_seq_number = STp->logical_blk_num = STps->drv_file = STps->drv_block = 0;
+ return (osst_set_frame_position(STp, aSRpnt, frame, 0));
+ }
+ r = osst_set_frame_position(STp, aSRpnt, offset?frame:frame-1, 0);
+ if (r < 0) return r;
+
+ r = osst_get_logical_frame(STp, aSRpnt, -1, 1);
+ if (r < 0) return r;
+
+ if (osst_get_frame_position(STp, aSRpnt) != (offset?frame+1:frame)) return (-EIO);
+
+ if (offset) {
+ STp->logical_blk_num += offset / STp->block_size;
+ STp->buffer->read_pointer = offset;
+ STp->buffer->buffer_bytes -= offset;
+ } else {
+ STp->frame_seq_number++;
+ STp->frame_in_buffer = 0;
+ STp->logical_blk_num += ntohs(STp->buffer->aux->dat.dat_list[0].blk_cnt);
+ STp->buffer->buffer_bytes = STp->buffer->read_pointer = 0;
+ }
+ STps->drv_file = ntohl(STp->buffer->aux->filemark_cnt);
+ if (STps->eof == ST_FM_HIT) {
+ STps->drv_file++;
+ STps->drv_block = 0;
+ } else {
+ STps->drv_block = ntohl(STp->buffer->aux->last_mark_lbn)?
+ STp->logical_blk_num -
+ (STps->drv_file ? ntohl(STp->buffer->aux->last_mark_lbn) + 1 : 0):
+ -1;
+ }
+ STps->eof = (STp->first_frame_position >= STp->eod_frame_ppos)?ST_EOD:ST_NOEOF;
+#if DEBUG
+ printk(OSST_DEB_MSG
+ "%s:D: Now positioned at ppos %d, frame %d, lbn %d, file %d, blk %d, rptr %d, eof %d\n",
+ name, STp->first_frame_position, STp->frame_seq_number, STp->logical_blk_num,
+ STps->drv_file, STps->drv_block, STp->buffer->read_pointer, STps->eof);
+#endif
+ return 0;
+}
+
+/*
+ * Read back the drive's internal buffer contents, as a part
+ * of the write error recovery mechanism for old OnStream
+ * firmware revisions.
+ * Precondition for this function to work: all frames in the
+ * drive's buffer must be of one type (DATA, MARK or EOD)!
+ */
+static int osst_read_back_buffer_and_rewrite(struct osst_tape * STp, struct osst_request ** aSRpnt,
+ unsigned int frame, unsigned int skip, int pending)
+{
+ struct osst_request * SRpnt = * aSRpnt;
+ unsigned char * buffer, * p;
+ unsigned char cmd[MAX_COMMAND_SIZE];
+ int flag, new_frame, i;
+ int nframes = STp->cur_frames;
+ int blks_per_frame = ntohs(STp->buffer->aux->dat.dat_list[0].blk_cnt);
+ int frame_seq_number = ntohl(STp->buffer->aux->frame_seq_num)
+ - (nframes + pending - 1);
+ int logical_blk_num = ntohl(STp->buffer->aux->logical_blk_num)
+ - (nframes + pending - 1) * blks_per_frame;
+ char * name = tape_name(STp);
+ unsigned long startwait = jiffies;
+#if DEBUG
+ int dbg = debugging;
+#endif
+
+ if ((buffer = vmalloc((nframes + 1) * OS_DATA_SIZE)) == NULL)
+ return (-EIO);
+
+ printk(KERN_INFO "%s:I: Reading back %d frames from drive buffer%s\n",
+ name, nframes, pending?" and one that was pending":"");
+
+ osst_copy_from_buffer(STp->buffer, (p = &buffer[nframes * OS_DATA_SIZE]));
+#if DEBUG
+ if (pending && debugging)
+ printk(OSST_DEB_MSG "%s:D: Pending frame %d (lblk %d), data %02x %02x %02x %02x\n",
+ name, frame_seq_number + nframes,
+ logical_blk_num + nframes * blks_per_frame,
+ p[0], p[1], p[2], p[3]);
+#endif
+ for (i = 0, p = buffer; i < nframes; i++, p += OS_DATA_SIZE) {
+
+ memset(cmd, 0, MAX_COMMAND_SIZE);
+ cmd[0] = 0x3C; /* Buffer Read */
+ cmd[1] = 6; /* Retrieve Faulty Block */
+ cmd[7] = 32768 >> 8;
+ cmd[8] = 32768 & 0xff;
+
+ SRpnt = osst_do_scsi(SRpnt, STp, cmd, OS_FRAME_SIZE, DMA_FROM_DEVICE,
+ STp->timeout, MAX_RETRIES, 1);
+
+ if ((STp->buffer)->syscall_result || !SRpnt) {
+ printk(KERN_ERR "%s:E: Failed to read frame back from OnStream buffer\n", name);
+ vfree(buffer);
+ *aSRpnt = SRpnt;
+ return (-EIO);
+ }
+ osst_copy_from_buffer(STp->buffer, p);
+#if DEBUG
+ if (debugging)
+ printk(OSST_DEB_MSG "%s:D: Read back logical frame %d, data %02x %02x %02x %02x\n",
+ name, frame_seq_number + i, p[0], p[1], p[2], p[3]);
+#endif
+ }
+ *aSRpnt = SRpnt;
+ osst_get_frame_position(STp, aSRpnt);
+
+#if DEBUG
+ printk(OSST_DEB_MSG "%s:D: Frames left in buffer: %d\n", name, STp->cur_frames);
+#endif
+ /* Write synchronously so we can be sure we're OK again and don't have to recover recursively */
+ /* In the header we don't actually re-write the frames that fail, just the ones after them */
+
+ for (flag=1, new_frame=frame, p=buffer, i=0; i < nframes + pending; ) {
+
+ if (flag) {
+ if (STp->write_type == OS_WRITE_HEADER) {
+ i += skip;
+ p += skip * OS_DATA_SIZE;
+ }
+ else if (new_frame < 2990 && new_frame+skip+nframes+pending >= 2990)
+ new_frame = 3000-i;
+ else
+ new_frame += skip;
+#if DEBUG
+ printk(OSST_DEB_MSG "%s:D: Position to frame %d, write fseq %d\n",
+ name, new_frame+i, frame_seq_number+i);
+#endif
+ osst_set_frame_position(STp, aSRpnt, new_frame + i, 0);
+ osst_wait_ready(STp, aSRpnt, 60, OSST_WAIT_POSITION_COMPLETE);
+ osst_get_frame_position(STp, aSRpnt);
+ SRpnt = * aSRpnt;
+
+ if (new_frame > frame + 1000) {
+ printk(KERN_ERR "%s:E: Failed to find writable tape media\n", name);
+ vfree(buffer);
+ return (-EIO);
+ }
+ if ( i >= nframes + pending ) break;
+ flag = 0;
+ }
+ osst_copy_to_buffer(STp->buffer, p);
+ /*
+ * IMPORTANT: for error recovery to work, _never_ queue frames with mixed frame type!
+ */
+ osst_init_aux(STp, STp->buffer->aux->frame_type, frame_seq_number+i,
+ logical_blk_num + i*blks_per_frame,
+ ntohl(STp->buffer->aux->dat.dat_list[0].blk_sz), blks_per_frame);
+ memset(cmd, 0, MAX_COMMAND_SIZE);
+ cmd[0] = WRITE_6;
+ cmd[1] = 1;
+ cmd[4] = 1;
+
+#if DEBUG
+ if (debugging)
+ printk(OSST_DEB_MSG
+ "%s:D: About to write frame %d, seq %d, lbn %d, data %02x %02x %02x %02x\n",
+ name, new_frame+i, frame_seq_number+i, logical_blk_num + i*blks_per_frame,
+ p[0], p[1], p[2], p[3]);
+#endif
+ SRpnt = osst_do_scsi(SRpnt, STp, cmd, OS_FRAME_SIZE, DMA_TO_DEVICE,
+ STp->timeout, MAX_RETRIES, 1);
+
+ if (STp->buffer->syscall_result)
+ flag = 1;
+ else {
+ p += OS_DATA_SIZE; i++;
+
+ /* if we just sent the last frame, wait till all successfully written */
+ if ( i == nframes + pending ) {
+#if DEBUG
+ printk(OSST_DEB_MSG "%s:D: Check re-write successful\n", name);
+#endif
+ memset(cmd, 0, MAX_COMMAND_SIZE);
+ cmd[0] = WRITE_FILEMARKS;
+ cmd[1] = 1;
+ SRpnt = osst_do_scsi(SRpnt, STp, cmd, 0, DMA_NONE,
+ STp->timeout, MAX_RETRIES, 1);
+#if DEBUG
+ if (debugging) {
+ printk(OSST_DEB_MSG "%s:D: Sleeping in re-write wait ready\n", name);
+ printk(OSST_DEB_MSG "%s:D: Turning off debugging for a while\n", name);
+ debugging = 0;
+ }
+#endif
+ flag = STp->buffer->syscall_result;
+ while ( !flag && time_before(jiffies, startwait + 60*HZ) ) {
+
+ memset(cmd, 0, MAX_COMMAND_SIZE);
+ cmd[0] = TEST_UNIT_READY;
+
+ SRpnt = osst_do_scsi(SRpnt, STp, cmd, 0, DMA_NONE, STp->timeout,
+ MAX_RETRIES, 1);
+
+ if (SRpnt->sense[2] == 2 && SRpnt->sense[12] == 4 &&
+ (SRpnt->sense[13] == 1 || SRpnt->sense[13] == 8)) {
+ /* in the process of becoming ready */
+ msleep(100);
+ continue;
+ }
+ if (STp->buffer->syscall_result)
+ flag = 1;
+ break;
+ }
+#if DEBUG
+ debugging = dbg;
+ printk(OSST_DEB_MSG "%s:D: Wait re-write finished\n", name);
+#endif
+ }
+ }
+ *aSRpnt = SRpnt;
+ if (flag) {
+ if ((SRpnt->sense[ 2] & 0x0f) == 13 &&
+ SRpnt->sense[12] == 0 &&
+ SRpnt->sense[13] == 2) {
+ printk(KERN_ERR "%s:E: Volume overflow in write error recovery\n", name);
+ vfree(buffer);
+ return (-EIO); /* hit end of tape = fail */
+ }
+ i = ((SRpnt->sense[3] << 24) |
+ (SRpnt->sense[4] << 16) |
+ (SRpnt->sense[5] << 8) |
+ SRpnt->sense[6] ) - new_frame;
+ p = &buffer[i * OS_DATA_SIZE];
+#if DEBUG
+ printk(OSST_DEB_MSG "%s:D: Additional write error at %d\n", name, new_frame+i);
+#endif
+ osst_get_frame_position(STp, aSRpnt);
+#if DEBUG
+ printk(OSST_DEB_MSG "%s:D: reported frame positions: host = %d, tape = %d, buffer = %d\n",
+ name, STp->first_frame_position, STp->last_frame_position, STp->cur_frames);
+#endif
+ }
+ }
+ if (flag) {
+ /* error recovery did not successfully complete */
+ printk(KERN_ERR "%s:D: Write error recovery failed in %s\n", name,
+ STp->write_type == OS_WRITE_HEADER?"header":"body");
+ }
+ if (!pending)
+ osst_copy_to_buffer(STp->buffer, p); /* so buffer content == at entry in all cases */
+ vfree(buffer);
+ return 0;
+}
+
+static int osst_reposition_and_retry(struct osst_tape * STp, struct osst_request ** aSRpnt,
+ unsigned int frame, unsigned int skip, int pending)
+{
+ unsigned char cmd[MAX_COMMAND_SIZE];
+ struct osst_request * SRpnt;
+ char * name = tape_name(STp);
+ int expected = 0;
+ int attempts = 1000 / skip;
+ int flag = 1;
+ unsigned long startwait = jiffies;
+#if DEBUG
+ int dbg = debugging;
+#endif
+
+ while (attempts && time_before(jiffies, startwait + 60*HZ)) {
+ if (flag) {
+#if DEBUG
+ debugging = dbg;
+#endif
+ if (frame < 2990 && frame+skip+STp->cur_frames+pending >= 2990)
+ frame = 3000-skip;
+ expected = frame+skip+STp->cur_frames+pending;
+#if DEBUG
+ printk(OSST_DEB_MSG "%s:D: Position to fppos %d, re-write from fseq %d\n",
+ name, frame+skip, STp->frame_seq_number-STp->cur_frames-pending);
+#endif
+ osst_set_frame_position(STp, aSRpnt, frame + skip, 1);
+ flag = 0;
+ attempts--;
+ schedule_timeout_interruptible(msecs_to_jiffies(100));
+ }
+ if (osst_get_frame_position(STp, aSRpnt) < 0) { /* additional write error */
+#if DEBUG
+ printk(OSST_DEB_MSG "%s:D: Addl error, host %d, tape %d, buffer %d\n",
+ name, STp->first_frame_position,
+ STp->last_frame_position, STp->cur_frames);
+#endif
+ frame = STp->last_frame_position;
+ flag = 1;
+ continue;
+ }
+ if (pending && STp->cur_frames < 50) {
+
+ memset(cmd, 0, MAX_COMMAND_SIZE);
+ cmd[0] = WRITE_6;
+ cmd[1] = 1;
+ cmd[4] = 1;
+#if DEBUG
+ printk(OSST_DEB_MSG "%s:D: About to write pending fseq %d at fppos %d\n",
+ name, STp->frame_seq_number-1, STp->first_frame_position);
+#endif
+ SRpnt = osst_do_scsi(*aSRpnt, STp, cmd, OS_FRAME_SIZE, DMA_TO_DEVICE,
+ STp->timeout, MAX_RETRIES, 1);
+ *aSRpnt = SRpnt;
+
+ if (STp->buffer->syscall_result) { /* additional write error */
+ if ((SRpnt->sense[ 2] & 0x0f) == 13 &&
+ SRpnt->sense[12] == 0 &&
+ SRpnt->sense[13] == 2) {
+ printk(KERN_ERR
+ "%s:E: Volume overflow in write error recovery\n",
+ name);
+ break; /* hit end of tape = fail */
+ }
+ flag = 1;
+ }
+ else
+ pending = 0;
+
+ continue;
+ }
+ if (STp->cur_frames == 0) {
+#if DEBUG
+ debugging = dbg;
+ printk(OSST_DEB_MSG "%s:D: Wait re-write finished\n", name);
+#endif
+ if (STp->first_frame_position != expected) {
+ printk(KERN_ERR "%s:A: Actual position %d - expected %d\n",
+ name, STp->first_frame_position, expected);
+ return (-EIO);
+ }
+ return 0;
+ }
+#if DEBUG
+ if (debugging) {
+ printk(OSST_DEB_MSG "%s:D: Sleeping in re-write wait ready\n", name);
+ printk(OSST_DEB_MSG "%s:D: Turning off debugging for a while\n", name);
+ debugging = 0;
+ }
+#endif
+ schedule_timeout_interruptible(msecs_to_jiffies(100));
+ }
+ printk(KERN_ERR "%s:E: Failed to find valid tape media\n", name);
+#if DEBUG
+ debugging = dbg;
+#endif
+ return (-EIO);
+}
+
+/*
+ * Error recovery algorithm for the OnStream tape.
+ */
+
+static int osst_write_error_recovery(struct osst_tape * STp, struct osst_request ** aSRpnt, int pending)
+{
+ struct osst_request * SRpnt = * aSRpnt;
+ struct st_partstat * STps = & STp->ps[STp->partition];
+ char * name = tape_name(STp);
+ int retval = 0;
+ int rw_state;
+ unsigned int frame, skip;
+
+ rw_state = STps->rw;
+
+ if ((SRpnt->sense[ 2] & 0x0f) != 3
+ || SRpnt->sense[12] != 12
+ || SRpnt->sense[13] != 0) {
+#if DEBUG
+ printk(OSST_DEB_MSG "%s:D: Write error recovery cannot handle %02x:%02x:%02x\n", name,
+ SRpnt->sense[2], SRpnt->sense[12], SRpnt->sense[13]);
+#endif
+ return (-EIO);
+ }
+ frame = (SRpnt->sense[3] << 24) |
+ (SRpnt->sense[4] << 16) |
+ (SRpnt->sense[5] << 8) |
+ SRpnt->sense[6];
+ skip = SRpnt->sense[9];
+
+#if DEBUG
+ printk(OSST_DEB_MSG "%s:D: Detected physical bad frame at %u, advised to skip %d\n", name, frame, skip);
+#endif
+ osst_get_frame_position(STp, aSRpnt);
+#if DEBUG
+ printk(OSST_DEB_MSG "%s:D: reported frame positions: host = %d, tape = %d\n",
+ name, STp->first_frame_position, STp->last_frame_position);
+#endif
+ switch (STp->write_type) {
+ case OS_WRITE_DATA:
+ case OS_WRITE_EOD:
+ case OS_WRITE_NEW_MARK:
+ printk(KERN_WARNING
+ "%s:I: Relocating %d buffered logical frames from position %u to %u\n",
+ name, STp->cur_frames, frame, (frame + skip > 3000 && frame < 3000)?3000:frame + skip);
+ if (STp->os_fw_rev >= 10600)
+ retval = osst_reposition_and_retry(STp, aSRpnt, frame, skip, pending);
+ else
+ retval = osst_read_back_buffer_and_rewrite(STp, aSRpnt, frame, skip, pending);
+ printk(KERN_WARNING "%s:%s: %sWrite error%srecovered\n", name,
+ retval?"E" :"I",
+ retval?"" :"Don't worry, ",
+ retval?" not ":" ");
+ break;
+ case OS_WRITE_LAST_MARK:
+ printk(KERN_ERR "%s:E: Bad frame in update last marker, fatal\n", name);
+ osst_set_frame_position(STp, aSRpnt, frame + STp->cur_frames + pending, 0);
+ retval = -EIO;
+ break;
+ case OS_WRITE_HEADER:
+ printk(KERN_WARNING "%s:I: Bad frame in header partition, skipped\n", name);
+ retval = osst_read_back_buffer_and_rewrite(STp, aSRpnt, frame, 1, pending);
+ break;
+ default:
+ printk(KERN_INFO "%s:I: Bad frame in filler, ignored\n", name);
+ osst_set_frame_position(STp, aSRpnt, frame + STp->cur_frames + pending, 0);
+ }
+ osst_get_frame_position(STp, aSRpnt);
+#if DEBUG
+ printk(OSST_DEB_MSG "%s:D: Positioning complete, cur_frames %d, pos %d, tape pos %d\n",
+ name, STp->cur_frames, STp->first_frame_position, STp->last_frame_position);
+ printk(OSST_DEB_MSG "%s:D: next logical frame to write: %d\n", name, STp->logical_blk_num);
+#endif
+ if (retval == 0) {
+ STp->recover_count++;
+ STp->recover_erreg++;
+ } else
+ STp->abort_count++;
+
+ STps->rw = rw_state;
+ return retval;
+}
+
+static int osst_space_over_filemarks_backward(struct osst_tape * STp, struct osst_request ** aSRpnt,
+ int mt_op, int mt_count)
+{
+ char * name = tape_name(STp);
+ int cnt;
+ int last_mark_ppos = -1;
+
+#if DEBUG
+ printk(OSST_DEB_MSG "%s:D: Reached space_over_filemarks_backwards %d %d\n", name, mt_op, mt_count);
+#endif
+ if (osst_get_logical_frame(STp, aSRpnt, -1, 0) < 0) {
+#if DEBUG
+ printk(OSST_DEB_MSG "%s:D: Couldn't get logical blk num in space_filemarks_bwd\n", name);
+#endif
+ return -EIO;
+ }
+ if (STp->linux_media_version >= 4) {
+ /*
+ * direct lookup in header filemark list
+ */
+ cnt = ntohl(STp->buffer->aux->filemark_cnt);
+ if (STp->header_ok &&
+ STp->header_cache != NULL &&
+ (cnt - mt_count) >= 0 &&
+ (cnt - mt_count) < OS_FM_TAB_MAX &&
+ (cnt - mt_count) < STp->filemark_cnt &&
+ STp->header_cache->dat_fm_tab.fm_tab_ent[cnt-1] == STp->buffer->aux->last_mark_ppos)
+
+ last_mark_ppos = ntohl(STp->header_cache->dat_fm_tab.fm_tab_ent[cnt - mt_count]);
+#if DEBUG
+ if (STp->header_cache == NULL || (cnt - mt_count) < 0 || (cnt - mt_count) >= OS_FM_TAB_MAX)
+ printk(OSST_DEB_MSG "%s:D: Filemark lookup fail due to %s\n", name,
+ STp->header_cache == NULL?"lack of header cache":"count out of range");
+ else
+ printk(OSST_DEB_MSG "%s:D: Filemark lookup: prev mark %d (%s), skip %d to %d\n",
+ name, cnt,
+ ((cnt == -1 && ntohl(STp->buffer->aux->last_mark_ppos) == -1) ||
+ (STp->header_cache->dat_fm_tab.fm_tab_ent[cnt-1] ==
+ STp->buffer->aux->last_mark_ppos))?"match":"error",
+ mt_count, last_mark_ppos);
+#endif
+ if (last_mark_ppos > 10 && last_mark_ppos < STp->eod_frame_ppos) {
+ osst_position_tape_and_confirm(STp, aSRpnt, last_mark_ppos);
+ if (osst_get_logical_frame(STp, aSRpnt, -1, 0) < 0) {
+#if DEBUG
+ printk(OSST_DEB_MSG
+ "%s:D: Couldn't get logical blk num in space_filemarks\n", name);
+#endif
+ return (-EIO);
+ }
+ if (STp->buffer->aux->frame_type != OS_FRAME_TYPE_MARKER) {
+ printk(KERN_WARNING "%s:W: Expected to find marker at ppos %d, not found\n",
+ name, last_mark_ppos);
+ return (-EIO);
+ }
+ goto found;
+ }
+#if DEBUG
+ printk(OSST_DEB_MSG "%s:D: Reverting to scan filemark backwards\n", name);
+#endif
+ }
+ cnt = 0;
+ while (cnt != mt_count) {
+ last_mark_ppos = ntohl(STp->buffer->aux->last_mark_ppos);
+ if (last_mark_ppos == -1)
+ return (-EIO);
+#if DEBUG
+ printk(OSST_DEB_MSG "%s:D: Positioning to last mark at %d\n", name, last_mark_ppos);
+#endif
+ osst_position_tape_and_confirm(STp, aSRpnt, last_mark_ppos);
+ cnt++;
+ if (osst_get_logical_frame(STp, aSRpnt, -1, 0) < 0) {
+#if DEBUG
+ printk(OSST_DEB_MSG "%s:D: Couldn't get logical blk num in space_filemarks\n", name);
+#endif
+ return (-EIO);
+ }
+ if (STp->buffer->aux->frame_type != OS_FRAME_TYPE_MARKER) {
+ printk(KERN_WARNING "%s:W: Expected to find marker at ppos %d, not found\n",
+ name, last_mark_ppos);
+ return (-EIO);
+ }
+ }
+found:
+ if (mt_op == MTBSFM) {
+ STp->frame_seq_number++;
+ STp->frame_in_buffer = 0;
+ STp->buffer->buffer_bytes = 0;
+ STp->buffer->read_pointer = 0;
+ STp->logical_blk_num += ntohs(STp->buffer->aux->dat.dat_list[0].blk_cnt);
+ }
+ return 0;
+}
+
+/*
+ * ADRL 1.1 compatible "slow" space filemarks fwd version
+ *
+ * Just scans for the filemark sequentially.
+ */
+static int osst_space_over_filemarks_forward_slow(struct osst_tape * STp, struct osst_request ** aSRpnt,
+ int mt_op, int mt_count)
+{
+ int cnt = 0;
+#if DEBUG
+ char * name = tape_name(STp);
+
+ printk(OSST_DEB_MSG "%s:D: Reached space_over_filemarks_forward_slow %d %d\n", name, mt_op, mt_count);
+#endif
+ if (osst_get_logical_frame(STp, aSRpnt, -1, 0) < 0) {
+#if DEBUG
+ printk(OSST_DEB_MSG "%s:D: Couldn't get logical blk num in space_filemarks_fwd\n", name);
+#endif
+ return (-EIO);
+ }
+ while (1) {
+ if (osst_get_logical_frame(STp, aSRpnt, -1, 0) < 0) {
+#if DEBUG
+ printk(OSST_DEB_MSG "%s:D: Couldn't get logical blk num in space_filemarks\n", name);
+#endif
+ return (-EIO);
+ }
+ if (STp->buffer->aux->frame_type == OS_FRAME_TYPE_MARKER)
+ cnt++;
+ if (STp->buffer->aux->frame_type == OS_FRAME_TYPE_EOD) {
+#if DEBUG
+ printk(OSST_DEB_MSG "%s:D: space_fwd: EOD reached\n", name);
+#endif
+ if (STp->first_frame_position > STp->eod_frame_ppos+1) {
+#if DEBUG
+ printk(OSST_DEB_MSG "%s:D: EOD position corrected (%d=>%d)\n",
+ name, STp->eod_frame_ppos, STp->first_frame_position-1);
+#endif
+ STp->eod_frame_ppos = STp->first_frame_position-1;
+ }
+ return (-EIO);
+ }
+ if (cnt == mt_count)
+ break;
+ STp->frame_in_buffer = 0;
+ }
+ if (mt_op == MTFSF) {
+ STp->frame_seq_number++;
+ STp->frame_in_buffer = 0;
+ STp->buffer->buffer_bytes = 0;
+ STp->buffer->read_pointer = 0;
+ STp->logical_blk_num += ntohs(STp->buffer->aux->dat.dat_list[0].blk_cnt);
+ }
+ return 0;
+}
+
+/*
+ * Fast linux specific version of OnStream FSF
+ */
+static int osst_space_over_filemarks_forward_fast(struct osst_tape * STp, struct osst_request ** aSRpnt,
+ int mt_op, int mt_count)
+{
+ char * name = tape_name(STp);
+ int cnt = 0,
+ next_mark_ppos = -1;
+
+#if DEBUG
+ printk(OSST_DEB_MSG "%s:D: Reached space_over_filemarks_forward_fast %d %d\n", name, mt_op, mt_count);
+#endif
+ if (osst_get_logical_frame(STp, aSRpnt, -1, 0) < 0) {
+#if DEBUG
+ printk(OSST_DEB_MSG "%s:D: Couldn't get logical blk num in space_filemarks_fwd\n", name);
+#endif
+ return (-EIO);
+ }
+
+ if (STp->linux_media_version >= 4) {
+ /*
+ * direct lookup in header filemark list
+ */
+ cnt = ntohl(STp->buffer->aux->filemark_cnt) - 1;
+ if (STp->header_ok &&
+ STp->header_cache != NULL &&
+ (cnt + mt_count) < OS_FM_TAB_MAX &&
+ (cnt + mt_count) < STp->filemark_cnt &&
+ ((cnt == -1 && ntohl(STp->buffer->aux->last_mark_ppos) == -1) ||
+ (STp->header_cache->dat_fm_tab.fm_tab_ent[cnt] == STp->buffer->aux->last_mark_ppos)))
+
+ next_mark_ppos = ntohl(STp->header_cache->dat_fm_tab.fm_tab_ent[cnt + mt_count]);
+#if DEBUG
+ if (STp->header_cache == NULL || (cnt + mt_count) >= OS_FM_TAB_MAX)
+ printk(OSST_DEB_MSG "%s:D: Filemark lookup fail due to %s\n", name,
+ STp->header_cache == NULL?"lack of header cache":"count out of range");
+ else
+ printk(OSST_DEB_MSG "%s:D: Filemark lookup: prev mark %d (%s), skip %d to %d\n",
+ name, cnt,
+ ((cnt == -1 && ntohl(STp->buffer->aux->last_mark_ppos) == -1) ||
+ (STp->header_cache->dat_fm_tab.fm_tab_ent[cnt] ==
+ STp->buffer->aux->last_mark_ppos))?"match":"error",
+ mt_count, next_mark_ppos);
+#endif
+ if (next_mark_ppos <= 10 || next_mark_ppos > STp->eod_frame_ppos) {
+#if DEBUG
+ printk(OSST_DEB_MSG "%s:D: Reverting to slow filemark space\n", name);
+#endif
+ return osst_space_over_filemarks_forward_slow(STp, aSRpnt, mt_op, mt_count);
+ } else {
+ osst_position_tape_and_confirm(STp, aSRpnt, next_mark_ppos);
+ if (osst_get_logical_frame(STp, aSRpnt, -1, 0) < 0) {
+#if DEBUG
+ printk(OSST_DEB_MSG "%s:D: Couldn't get logical blk num in space_filemarks\n",
+ name);
+#endif
+ return (-EIO);
+ }
+ if (STp->buffer->aux->frame_type != OS_FRAME_TYPE_MARKER) {
+ printk(KERN_WARNING "%s:W: Expected to find marker at ppos %d, not found\n",
+ name, next_mark_ppos);
+ return (-EIO);
+ }
+ if (ntohl(STp->buffer->aux->filemark_cnt) != cnt + mt_count) {
+ printk(KERN_WARNING "%s:W: Expected to find marker %d at ppos %d, not %d\n",
+ name, cnt+mt_count, next_mark_ppos,
+ ntohl(STp->buffer->aux->filemark_cnt));
+ return (-EIO);
+ }
+ }
+ } else {
+ /*
+ * Find nearest (usually previous) marker, then jump from marker to marker
+ */
+ while (1) {
+ if (STp->buffer->aux->frame_type == OS_FRAME_TYPE_MARKER)
+ break;
+ if (STp->buffer->aux->frame_type == OS_FRAME_TYPE_EOD) {
+#if DEBUG
+ printk(OSST_DEB_MSG "%s:D: space_fwd: EOD reached\n", name);
+#endif
+ return (-EIO);
+ }
+ if (ntohl(STp->buffer->aux->filemark_cnt) == 0) {
+ if (STp->first_mark_ppos == -1) {
+#if DEBUG
+ printk(OSST_DEB_MSG "%s:D: Reverting to slow filemark space\n", name);
+#endif
+ return osst_space_over_filemarks_forward_slow(STp, aSRpnt, mt_op, mt_count);
+ }
+ osst_position_tape_and_confirm(STp, aSRpnt, STp->first_mark_ppos);
+ if (osst_get_logical_frame(STp, aSRpnt, -1, 0) < 0) {
+#if DEBUG
+ printk(OSST_DEB_MSG
+ "%s:D: Couldn't get logical blk num in space_filemarks_fwd_fast\n",
+ name);
+#endif
+ return (-EIO);
+ }
+ if (STp->buffer->aux->frame_type != OS_FRAME_TYPE_MARKER) {
+ printk(KERN_WARNING "%s:W: Expected to find filemark at %d\n",
+ name, STp->first_mark_ppos);
+ return (-EIO);
+ }
+ } else {
+ if (osst_space_over_filemarks_backward(STp, aSRpnt, MTBSF, 1) < 0)
+ return (-EIO);
+ mt_count++;
+ }
+ }
+ cnt++;
+ while (cnt != mt_count) {
+ next_mark_ppos = ntohl(STp->buffer->aux->next_mark_ppos);
+ if (!next_mark_ppos || next_mark_ppos > STp->eod_frame_ppos) {
+#if DEBUG
+ printk(OSST_DEB_MSG "%s:D: Reverting to slow filemark space\n", name);
+#endif
+ return osst_space_over_filemarks_forward_slow(STp, aSRpnt, mt_op, mt_count - cnt);
+ }
+#if DEBUG
+ else printk(OSST_DEB_MSG "%s:D: Positioning to next mark at %d\n", name, next_mark_ppos);
+#endif
+ osst_position_tape_and_confirm(STp, aSRpnt, next_mark_ppos);
+ cnt++;
+ if (osst_get_logical_frame(STp, aSRpnt, -1, 0) < 0) {
+#if DEBUG
+ printk(OSST_DEB_MSG "%s:D: Couldn't get logical blk num in space_filemarks\n",
+ name);
+#endif
+ return (-EIO);
+ }
+ if (STp->buffer->aux->frame_type != OS_FRAME_TYPE_MARKER) {
+ printk(KERN_WARNING "%s:W: Expected to find marker at ppos %d, not found\n",
+ name, next_mark_ppos);
+ return (-EIO);
+ }
+ }
+ }
+ if (mt_op == MTFSF) {
+ STp->frame_seq_number++;
+ STp->frame_in_buffer = 0;
+ STp->buffer->buffer_bytes = 0;
+ STp->buffer->read_pointer = 0;
+ STp->logical_blk_num += ntohs(STp->buffer->aux->dat.dat_list[0].blk_cnt);
+ }
+ return 0;
+}
+
+/*
+ * In debug mode, we want to see as many errors as possible
+ * to test the error recovery mechanism.
+ */
+#if DEBUG
+static void osst_set_retries(struct osst_tape * STp, struct osst_request ** aSRpnt, int retries)
+{
+ unsigned char cmd[MAX_COMMAND_SIZE];
+ struct osst_request * SRpnt = * aSRpnt;
+ char * name = tape_name(STp);
+
+ memset(cmd, 0, MAX_COMMAND_SIZE);
+ cmd[0] = MODE_SELECT;
+ cmd[1] = 0x10;
+ cmd[4] = NUMBER_RETRIES_PAGE_LENGTH + MODE_HEADER_LENGTH;
+
+ (STp->buffer)->b_data[0] = cmd[4] - 1;
+ (STp->buffer)->b_data[1] = 0; /* Medium Type - ignoring */
+ (STp->buffer)->b_data[2] = 0; /* Reserved */
+ (STp->buffer)->b_data[3] = 0; /* Block Descriptor Length */
+ (STp->buffer)->b_data[MODE_HEADER_LENGTH + 0] = NUMBER_RETRIES_PAGE | (1 << 7);
+ (STp->buffer)->b_data[MODE_HEADER_LENGTH + 1] = 2;
+ (STp->buffer)->b_data[MODE_HEADER_LENGTH + 2] = 4;
+ (STp->buffer)->b_data[MODE_HEADER_LENGTH + 3] = retries;
+
+ if (debugging)
+ printk(OSST_DEB_MSG "%s:D: Setting number of retries on OnStream tape to %d\n", name, retries);
+
+ SRpnt = osst_do_scsi(SRpnt, STp, cmd, cmd[4], DMA_TO_DEVICE, STp->timeout, 0, 1);
+ *aSRpnt = SRpnt;
+
+ if ((STp->buffer)->syscall_result)
+ printk (KERN_ERR "%s:D: Couldn't set retries to %d\n", name, retries);
+}
+#endif
+
+
+static int osst_write_filemark(struct osst_tape * STp, struct osst_request ** aSRpnt)
+{
+ int result;
+ int this_mark_ppos = STp->first_frame_position;
+ int this_mark_lbn = STp->logical_blk_num;
+#if DEBUG
+ char * name = tape_name(STp);
+#endif
+
+ if (STp->raw) return 0;
+
+ STp->write_type = OS_WRITE_NEW_MARK;
+#if DEBUG
+ printk(OSST_DEB_MSG "%s:D: Writing Filemark %i at fppos %d (fseq %d, lblk %d)\n",
+ name, STp->filemark_cnt, this_mark_ppos, STp->frame_seq_number, this_mark_lbn);
+#endif
+ STp->dirty = 1;
+ result = osst_flush_write_buffer(STp, aSRpnt);
+ result |= osst_flush_drive_buffer(STp, aSRpnt);
+ STp->last_mark_ppos = this_mark_ppos;
+ STp->last_mark_lbn = this_mark_lbn;
+ if (STp->header_cache != NULL && STp->filemark_cnt < OS_FM_TAB_MAX)
+ STp->header_cache->dat_fm_tab.fm_tab_ent[STp->filemark_cnt] = htonl(this_mark_ppos);
+ if (STp->filemark_cnt++ == 0)
+ STp->first_mark_ppos = this_mark_ppos;
+ return result;
+}
+
+static int osst_write_eod(struct osst_tape * STp, struct osst_request ** aSRpnt)
+{
+ int result;
+#if DEBUG
+ char * name = tape_name(STp);
+#endif
+
+ if (STp->raw) return 0;
+
+ STp->write_type = OS_WRITE_EOD;
+ STp->eod_frame_ppos = STp->first_frame_position;
+#if DEBUG
+ printk(OSST_DEB_MSG "%s:D: Writing EOD at fppos %d (fseq %d, lblk %d)\n", name,
+ STp->eod_frame_ppos, STp->frame_seq_number, STp->logical_blk_num);
+#endif
+ STp->dirty = 1;
+
+ result = osst_flush_write_buffer(STp, aSRpnt);
+ result |= osst_flush_drive_buffer(STp, aSRpnt);
+ STp->eod_frame_lfa = --(STp->frame_seq_number);
+ return result;
+}
+
+static int osst_write_filler(struct osst_tape * STp, struct osst_request ** aSRpnt, int where, int count)
+{
+ char * name = tape_name(STp);
+
+#if DEBUG
+ printk(OSST_DEB_MSG "%s:D: Reached onstream write filler group %d\n", name, where);
+#endif
+ osst_wait_ready(STp, aSRpnt, 60 * 5, 0);
+ osst_set_frame_position(STp, aSRpnt, where, 0);
+ STp->write_type = OS_WRITE_FILLER;
+ while (count--) {
+ memcpy(STp->buffer->b_data, "Filler", 6);
+ STp->buffer->buffer_bytes = 6;
+ STp->dirty = 1;
+ if (osst_flush_write_buffer(STp, aSRpnt)) {
+ printk(KERN_INFO "%s:I: Couldn't write filler frame\n", name);
+ return (-EIO);
+ }
+ }
+#if DEBUG
+ printk(OSST_DEB_MSG "%s:D: Exiting onstream write filler group\n", name);
+#endif
+ return osst_flush_drive_buffer(STp, aSRpnt);
+}
+
+static int __osst_write_header(struct osst_tape * STp, struct osst_request ** aSRpnt, int where, int count)
+{
+ char * name = tape_name(STp);
+ int result;
+
+#if DEBUG
+ printk(OSST_DEB_MSG "%s:D: Reached onstream write header group %d\n", name, where);
+#endif
+ osst_wait_ready(STp, aSRpnt, 60 * 5, 0);
+ osst_set_frame_position(STp, aSRpnt, where, 0);
+ STp->write_type = OS_WRITE_HEADER;
+ while (count--) {
+ osst_copy_to_buffer(STp->buffer, (unsigned char *)STp->header_cache);
+ STp->buffer->buffer_bytes = sizeof(os_header_t);
+ STp->dirty = 1;
+ if (osst_flush_write_buffer(STp, aSRpnt)) {
+ printk(KERN_INFO "%s:I: Couldn't write header frame\n", name);
+ return (-EIO);
+ }
+ }
+ result = osst_flush_drive_buffer(STp, aSRpnt);
+#if DEBUG
+ printk(OSST_DEB_MSG "%s:D: Write onstream header group %s\n", name, result?"failed":"done");
+#endif
+ return result;
+}
+
+static int osst_write_header(struct osst_tape * STp, struct osst_request ** aSRpnt, int locate_eod)
+{
+ os_header_t * header;
+ int result;
+ char * name = tape_name(STp);
+
+#if DEBUG
+ printk(OSST_DEB_MSG "%s:D: Writing tape header\n", name);
+#endif
+ if (STp->raw) return 0;
+
+ if (STp->header_cache == NULL) {
+ if ((STp->header_cache = vmalloc(sizeof(os_header_t))) == NULL) {
+ printk(KERN_ERR "%s:E: Failed to allocate header cache\n", name);
+ return (-ENOMEM);
+ }
+ memset(STp->header_cache, 0, sizeof(os_header_t));
+#if DEBUG
+ printk(OSST_DEB_MSG "%s:D: Allocated and cleared memory for header cache\n", name);
+#endif
+ }
+ if (STp->header_ok) STp->update_frame_cntr++;
+ else STp->update_frame_cntr = 0;
+
+ header = STp->header_cache;
+ strcpy(header->ident_str, "ADR_SEQ");
+ header->major_rev = 1;
+ header->minor_rev = 4;
+ header->ext_trk_tb_off = htons(17192);
+ header->pt_par_num = 1;
+ header->partition[0].partition_num = OS_DATA_PARTITION;
+ header->partition[0].par_desc_ver = OS_PARTITION_VERSION;
+ header->partition[0].wrt_pass_cntr = htons(STp->wrt_pass_cntr);
+ header->partition[0].first_frame_ppos = htonl(STp->first_data_ppos);
+ header->partition[0].last_frame_ppos = htonl(STp->capacity);
+ header->partition[0].eod_frame_ppos = htonl(STp->eod_frame_ppos);
+ header->cfg_col_width = htonl(20);
+ header->dat_col_width = htonl(1500);
+ header->qfa_col_width = htonl(0);
+ header->ext_track_tb.nr_stream_part = 1;
+ header->ext_track_tb.et_ent_sz = 32;
+ header->ext_track_tb.dat_ext_trk_ey.et_part_num = 0;
+ header->ext_track_tb.dat_ext_trk_ey.fmt = 1;
+ header->ext_track_tb.dat_ext_trk_ey.fm_tab_off = htons(17736);
+ header->ext_track_tb.dat_ext_trk_ey.last_hlb_hi = 0;
+ header->ext_track_tb.dat_ext_trk_ey.last_hlb = htonl(STp->eod_frame_lfa);
+ header->ext_track_tb.dat_ext_trk_ey.last_pp = htonl(STp->eod_frame_ppos);
+ header->dat_fm_tab.fm_part_num = 0;
+ header->dat_fm_tab.fm_tab_ent_sz = 4;
+ header->dat_fm_tab.fm_tab_ent_cnt = htons(STp->filemark_cnt<OS_FM_TAB_MAX?
+ STp->filemark_cnt:OS_FM_TAB_MAX);
+
+ result = __osst_write_header(STp, aSRpnt, 0xbae, 5);
+ if (STp->update_frame_cntr == 0)
+ osst_write_filler(STp, aSRpnt, 0xbb3, 5);
+ result &= __osst_write_header(STp, aSRpnt, 5, 5);
+
+ if (locate_eod) {
+#if DEBUG
+ printk(OSST_DEB_MSG "%s:D: Locating back to eod frame addr %d\n", name, STp->eod_frame_ppos);
+#endif
+ osst_set_frame_position(STp, aSRpnt, STp->eod_frame_ppos, 0);
+ }
+ if (result)
+ printk(KERN_ERR "%s:E: Write header failed\n", name);
+ else {
+ memcpy(STp->application_sig, "LIN4", 4);
+ STp->linux_media = 1;
+ STp->linux_media_version = 4;
+ STp->header_ok = 1;
+ }
+ return result;
+}
+
+static int osst_reset_header(struct osst_tape * STp, struct osst_request ** aSRpnt)
+{
+ if (STp->header_cache != NULL)
+ memset(STp->header_cache, 0, sizeof(os_header_t));
+
+ STp->logical_blk_num = STp->frame_seq_number = 0;
+ STp->frame_in_buffer = 0;
+ STp->eod_frame_ppos = STp->first_data_ppos = 0x0000000A;
+ STp->filemark_cnt = 0;
+ STp->first_mark_ppos = STp->last_mark_ppos = STp->last_mark_lbn = -1;
+ return osst_write_header(STp, aSRpnt, 1);
+}
+
+static int __osst_analyze_headers(struct osst_tape * STp, struct osst_request ** aSRpnt, int ppos)
+{
+ char * name = tape_name(STp);
+ os_header_t * header;
+ os_aux_t * aux;
+ char id_string[8];
+ int linux_media_version,
+ update_frame_cntr;
+
+ if (STp->raw)
+ return 1;
+
+ if (ppos == 5 || ppos == 0xbae || STp->buffer->syscall_result) {
+ if (osst_set_frame_position(STp, aSRpnt, ppos, 0))
+ printk(KERN_WARNING "%s:W: Couldn't position tape\n", name);
+ osst_wait_ready(STp, aSRpnt, 60 * 15, 0);
+ if (osst_initiate_read (STp, aSRpnt)) {
+ printk(KERN_WARNING "%s:W: Couldn't initiate read\n", name);
+ return 0;
+ }
+ }
+ if (osst_read_frame(STp, aSRpnt, 180)) {
+#if DEBUG
+ printk(OSST_DEB_MSG "%s:D: Couldn't read header frame\n", name);
+#endif
+ return 0;
+ }
+ header = (os_header_t *) STp->buffer->b_data; /* warning: only first segment addressable */
+ aux = STp->buffer->aux;
+ if (aux->frame_type != OS_FRAME_TYPE_HEADER) {
+#if DEBUG
+ printk(OSST_DEB_MSG "%s:D: Skipping non-header frame (%d)\n", name, ppos);
+#endif
+ return 0;
+ }
+ if (ntohl(aux->frame_seq_num) != 0 ||
+ ntohl(aux->logical_blk_num) != 0 ||
+ aux->partition.partition_num != OS_CONFIG_PARTITION ||
+ ntohl(aux->partition.first_frame_ppos) != 0 ||
+ ntohl(aux->partition.last_frame_ppos) != 0xbb7 ) {
+#if DEBUG
+ printk(OSST_DEB_MSG "%s:D: Invalid header frame (%d,%d,%d,%d,%d)\n", name,
+ ntohl(aux->frame_seq_num), ntohl(aux->logical_blk_num),
+ aux->partition.partition_num, ntohl(aux->partition.first_frame_ppos),
+ ntohl(aux->partition.last_frame_ppos));
+#endif
+ return 0;
+ }
+ if (strncmp(header->ident_str, "ADR_SEQ", 7) != 0 &&
+ strncmp(header->ident_str, "ADR-SEQ", 7) != 0) {
+ strlcpy(id_string, header->ident_str, 8);
+#if DEBUG
+ printk(OSST_DEB_MSG "%s:D: Invalid header identification string %s\n", name, id_string);
+#endif
+ return 0;
+ }
+ update_frame_cntr = ntohl(aux->update_frame_cntr);
+ if (update_frame_cntr < STp->update_frame_cntr) {
+#if DEBUG
+ printk(OSST_DEB_MSG "%s:D: Skipping frame %d with update_frame_counter %d<%d\n",
+ name, ppos, update_frame_cntr, STp->update_frame_cntr);
+#endif
+ return 0;
+ }
+ if (header->major_rev != 1 || header->minor_rev != 4 ) {
+#if DEBUG
+ printk(OSST_DEB_MSG "%s:D: %s revision %d.%d detected (1.4 supported)\n",
+ name, (header->major_rev != 1 || header->minor_rev < 2 ||
+ header->minor_rev > 4 )? "Invalid" : "Warning:",
+ header->major_rev, header->minor_rev);
+#endif
+ if (header->major_rev != 1 || header->minor_rev < 2 || header->minor_rev > 4)
+ return 0;
+ }
+#if DEBUG
+ if (header->pt_par_num != 1)
+ printk(KERN_INFO "%s:W: %d partitions defined, only one supported\n",
+ name, header->pt_par_num);
+#endif
+ memcpy(id_string, aux->application_sig, 4);
+ id_string[4] = 0;
+ if (memcmp(id_string, "LIN", 3) == 0) {
+ STp->linux_media = 1;
+ linux_media_version = id_string[3] - '0';
+ if (linux_media_version != 4)
+ printk(KERN_INFO "%s:I: Linux media version %d detected (current 4)\n",
+ name, linux_media_version);
+ } else {
+ printk(KERN_WARNING "%s:W: Non Linux media detected (%s)\n", name, id_string);
+ return 0;
+ }
+ if (linux_media_version < STp->linux_media_version) {
+#if DEBUG
+ printk(OSST_DEB_MSG "%s:D: Skipping frame %d with linux_media_version %d\n",
+ name, ppos, linux_media_version);
+#endif
+ return 0;
+ }
+ if (linux_media_version > STp->linux_media_version) {
+#if DEBUG
+ printk(OSST_DEB_MSG "%s:D: Frame %d sets linux_media_version to %d\n",
+ name, ppos, linux_media_version);
+#endif
+ memcpy(STp->application_sig, id_string, 5);
+ STp->linux_media_version = linux_media_version;
+ STp->update_frame_cntr = -1;
+ }
+ if (update_frame_cntr > STp->update_frame_cntr) {
+#if DEBUG
+ printk(OSST_DEB_MSG "%s:D: Frame %d sets update_frame_counter to %d\n",
+ name, ppos, update_frame_cntr);
+#endif
+ if (STp->header_cache == NULL) {
+ if ((STp->header_cache = vmalloc(sizeof(os_header_t))) == NULL) {
+ printk(KERN_ERR "%s:E: Failed to allocate header cache\n", name);
+ return 0;
+ }
+#if DEBUG
+ printk(OSST_DEB_MSG "%s:D: Allocated memory for header cache\n", name);
+#endif
+ }
+ osst_copy_from_buffer(STp->buffer, (unsigned char *)STp->header_cache);
+ header = STp->header_cache; /* further accesses from cached (full) copy */
+
+ STp->wrt_pass_cntr = ntohs(header->partition[0].wrt_pass_cntr);
+ STp->first_data_ppos = ntohl(header->partition[0].first_frame_ppos);
+ STp->eod_frame_ppos = ntohl(header->partition[0].eod_frame_ppos);
+ STp->eod_frame_lfa = ntohl(header->ext_track_tb.dat_ext_trk_ey.last_hlb);
+ STp->filemark_cnt = ntohl(aux->filemark_cnt);
+ STp->first_mark_ppos = ntohl(aux->next_mark_ppos);
+ STp->last_mark_ppos = ntohl(aux->last_mark_ppos);
+ STp->last_mark_lbn = ntohl(aux->last_mark_lbn);
+ STp->update_frame_cntr = update_frame_cntr;
+#if DEBUG
+ printk(OSST_DEB_MSG "%s:D: Detected write pass %d, update frame counter %d, filemark counter %d\n",
+ name, STp->wrt_pass_cntr, STp->update_frame_cntr, STp->filemark_cnt);
+ printk(OSST_DEB_MSG "%s:D: first data frame on tape = %d, last = %d, eod frame = %d\n", name,
+ STp->first_data_ppos,
+ ntohl(header->partition[0].last_frame_ppos),
+ ntohl(header->partition[0].eod_frame_ppos));
+ printk(OSST_DEB_MSG "%s:D: first mark on tape = %d, last = %d, eod frame = %d\n",
+ name, STp->first_mark_ppos, STp->last_mark_ppos, STp->eod_frame_ppos);
+#endif
+ if (header->minor_rev < 4 && STp->linux_media_version == 4) {
+#if DEBUG
+ printk(OSST_DEB_MSG "%s:D: Moving filemark list to ADR 1.4 location\n", name);
+#endif
+ memcpy((void *)header->dat_fm_tab.fm_tab_ent,
+ (void *)header->old_filemark_list, sizeof(header->dat_fm_tab.fm_tab_ent));
+ memset((void *)header->old_filemark_list, 0, sizeof(header->old_filemark_list));
+ }
+ if (header->minor_rev == 4 &&
+ (header->ext_trk_tb_off != htons(17192) ||
+ header->partition[0].partition_num != OS_DATA_PARTITION ||
+ header->partition[0].par_desc_ver != OS_PARTITION_VERSION ||
+ header->partition[0].last_frame_ppos != htonl(STp->capacity) ||
+ header->cfg_col_width != htonl(20) ||
+ header->dat_col_width != htonl(1500) ||
+ header->qfa_col_width != htonl(0) ||
+ header->ext_track_tb.nr_stream_part != 1 ||
+ header->ext_track_tb.et_ent_sz != 32 ||
+ header->ext_track_tb.dat_ext_trk_ey.et_part_num != OS_DATA_PARTITION ||
+ header->ext_track_tb.dat_ext_trk_ey.fmt != 1 ||
+ header->ext_track_tb.dat_ext_trk_ey.fm_tab_off != htons(17736) ||
+ header->ext_track_tb.dat_ext_trk_ey.last_hlb_hi != 0 ||
+ header->ext_track_tb.dat_ext_trk_ey.last_pp != htonl(STp->eod_frame_ppos) ||
+ header->dat_fm_tab.fm_part_num != OS_DATA_PARTITION ||
+ header->dat_fm_tab.fm_tab_ent_sz != 4 ||
+ header->dat_fm_tab.fm_tab_ent_cnt !=
+ htons(STp->filemark_cnt<OS_FM_TAB_MAX?STp->filemark_cnt:OS_FM_TAB_MAX)))
+ printk(KERN_WARNING "%s:W: Failed consistency check ADR 1.4 format\n", name);
+
+ }
+
+ return 1;
+}
+
+static int osst_analyze_headers(struct osst_tape * STp, struct osst_request ** aSRpnt)
+{
+ int position, ppos;
+ int first, last;
+ int valid = 0;
+ char * name = tape_name(STp);
+
+ position = osst_get_frame_position(STp, aSRpnt);
+
+ if (STp->raw) {
+ STp->header_ok = STp->linux_media = 1;
+ STp->linux_media_version = 0;
+ return 1;
+ }
+ STp->header_ok = STp->linux_media = STp->linux_media_version = 0;
+ STp->wrt_pass_cntr = STp->update_frame_cntr = -1;
+ STp->eod_frame_ppos = STp->first_data_ppos = -1;
+ STp->first_mark_ppos = STp->last_mark_ppos = STp->last_mark_lbn = -1;
+#if DEBUG
+ printk(OSST_DEB_MSG "%s:D: Reading header\n", name);
+#endif
+
+ /* optimization for speed - if we are positioned at ppos 10, read second group first */
+ /* TODO try the ADR 1.1 locations for the second group if we have no valid one yet... */
+
+ first = position==10?0xbae: 5;
+ last = position==10?0xbb3:10;
+
+ for (ppos = first; ppos < last; ppos++)
+ if (__osst_analyze_headers(STp, aSRpnt, ppos))
+ valid = 1;
+
+ first = position==10? 5:0xbae;
+ last = position==10?10:0xbb3;
+
+ for (ppos = first; ppos < last; ppos++)
+ if (__osst_analyze_headers(STp, aSRpnt, ppos))
+ valid = 1;
+
+ if (!valid) {
+ printk(KERN_ERR "%s:E: Failed to find valid ADRL header, new media?\n", name);
+ STp->eod_frame_ppos = STp->first_data_ppos = 0;
+ osst_set_frame_position(STp, aSRpnt, 10, 0);
+ return 0;
+ }
+ if (position <= STp->first_data_ppos) {
+ position = STp->first_data_ppos;
+ STp->ps[0].drv_file = STp->ps[0].drv_block = STp->frame_seq_number = STp->logical_blk_num = 0;
+ }
+ osst_set_frame_position(STp, aSRpnt, position, 0);
+ STp->header_ok = 1;
+
+ return 1;
+}
+
+static int osst_verify_position(struct osst_tape * STp, struct osst_request ** aSRpnt)
+{
+ int frame_position = STp->first_frame_position;
+ int frame_seq_numbr = STp->frame_seq_number;
+ int logical_blk_num = STp->logical_blk_num;
+ int halfway_frame = STp->frame_in_buffer;
+ int read_pointer = STp->buffer->read_pointer;
+ int prev_mark_ppos = -1;
+ int actual_mark_ppos, i, n;
+#if DEBUG
+ char * name = tape_name(STp);
+
+ printk(OSST_DEB_MSG "%s:D: Verify that the tape is really the one we think before writing\n", name);
+#endif
+ osst_set_frame_position(STp, aSRpnt, frame_position - 1, 0);
+ if (osst_get_logical_frame(STp, aSRpnt, -1, 0) < 0) {
+#if DEBUG
+ printk(OSST_DEB_MSG "%s:D: Couldn't get logical blk num in verify_position\n", name);
+#endif
+ return (-EIO);
+ }
+ if (STp->linux_media_version >= 4) {
+ for (i=0; i<STp->filemark_cnt; i++)
+ if ((n=ntohl(STp->header_cache->dat_fm_tab.fm_tab_ent[i])) < frame_position)
+ prev_mark_ppos = n;
+ } else
+ prev_mark_ppos = frame_position - 1; /* usually - we don't really know */
+ actual_mark_ppos = STp->buffer->aux->frame_type == OS_FRAME_TYPE_MARKER ?
+ frame_position - 1 : ntohl(STp->buffer->aux->last_mark_ppos);
+ if (frame_position != STp->first_frame_position ||
+ frame_seq_numbr != STp->frame_seq_number + (halfway_frame?0:1) ||
+ prev_mark_ppos != actual_mark_ppos ) {
+#if DEBUG
+ printk(OSST_DEB_MSG "%s:D: Block mismatch: fppos %d-%d, fseq %d-%d, mark %d-%d\n", name,
+ STp->first_frame_position, frame_position,
+ STp->frame_seq_number + (halfway_frame?0:1),
+ frame_seq_numbr, actual_mark_ppos, prev_mark_ppos);
+#endif
+ return (-EIO);
+ }
+ if (halfway_frame) {
+ /* prepare buffer for append and rewrite on top of original */
+ osst_set_frame_position(STp, aSRpnt, frame_position - 1, 0);
+ STp->buffer->buffer_bytes = read_pointer;
+ STp->ps[STp->partition].rw = ST_WRITING;
+ STp->dirty = 1;
+ }
+ STp->frame_in_buffer = halfway_frame;
+ STp->frame_seq_number = frame_seq_numbr;
+ STp->logical_blk_num = logical_blk_num;
+ return 0;
+}
+
+/* Acc. to OnStream, the vers. numbering is the following:
+ * X.XX for released versions (X=digit),
+ * XXXY for unreleased versions (Y=letter)
+ * Ordering 1.05 < 106A < 106B < ... < 106a < ... < 1.06
+ * This fn makes monoton numbers out of this scheme ...
+ */
+static unsigned int osst_parse_firmware_rev (const char * str)
+{
+ if (str[1] == '.') {
+ return (str[0]-'0')*10000
+ +(str[2]-'0')*1000
+ +(str[3]-'0')*100;
+ } else {
+ return (str[0]-'0')*10000
+ +(str[1]-'0')*1000
+ +(str[2]-'0')*100 - 100
+ +(str[3]-'@');
+ }
+}
+
+/*
+ * Configure the OnStream SCII tape drive for default operation
+ */
+static int osst_configure_onstream(struct osst_tape *STp, struct osst_request ** aSRpnt)
+{
+ unsigned char cmd[MAX_COMMAND_SIZE];
+ char * name = tape_name(STp);
+ struct osst_request * SRpnt = * aSRpnt;
+ osst_mode_parameter_header_t * header;
+ osst_block_size_page_t * bs;
+ osst_capabilities_page_t * cp;
+ osst_tape_paramtr_page_t * prm;
+ int drive_buffer_size;
+
+ if (STp->ready != ST_READY) {
+#if DEBUG
+ printk(OSST_DEB_MSG "%s:D: Not Ready\n", name);
+#endif
+ return (-EIO);
+ }
+
+ if (STp->os_fw_rev < 10600) {
+ printk(KERN_INFO "%s:I: Old OnStream firmware revision detected (%s),\n", name, STp->device->rev);
+ printk(KERN_INFO "%s:I: an upgrade to version 1.06 or above is recommended\n", name);
+ }
+
+ /*
+ * Configure 32.5KB (data+aux) frame size.
+ * Get the current frame size from the block size mode page
+ */
+ memset(cmd, 0, MAX_COMMAND_SIZE);
+ cmd[0] = MODE_SENSE;
+ cmd[1] = 8;
+ cmd[2] = BLOCK_SIZE_PAGE;
+ cmd[4] = BLOCK_SIZE_PAGE_LENGTH + MODE_HEADER_LENGTH;
+
+ SRpnt = osst_do_scsi(SRpnt, STp, cmd, cmd[4], DMA_FROM_DEVICE, STp->timeout, 0, 1);
+ if (SRpnt == NULL) {
+#if DEBUG
+ printk(OSST_DEB_MSG "osst :D: Busy\n");
+#endif
+ return (-EBUSY);
+ }
+ *aSRpnt = SRpnt;
+ if ((STp->buffer)->syscall_result != 0) {
+ printk (KERN_ERR "%s:E: Can't get tape block size mode page\n", name);
+ return (-EIO);
+ }
+
+ header = (osst_mode_parameter_header_t *) (STp->buffer)->b_data;
+ bs = (osst_block_size_page_t *) ((STp->buffer)->b_data + sizeof(osst_mode_parameter_header_t) + header->bdl);
+
+#if DEBUG
+ printk(OSST_DEB_MSG "%s:D: 32KB play back: %s\n", name, bs->play32 ? "Yes" : "No");
+ printk(OSST_DEB_MSG "%s:D: 32.5KB play back: %s\n", name, bs->play32_5 ? "Yes" : "No");
+ printk(OSST_DEB_MSG "%s:D: 32KB record: %s\n", name, bs->record32 ? "Yes" : "No");
+ printk(OSST_DEB_MSG "%s:D: 32.5KB record: %s\n", name, bs->record32_5 ? "Yes" : "No");
+#endif
+
+ /*
+ * Configure default auto columns mode, 32.5KB transfer mode
+ */
+ bs->one = 1;
+ bs->play32 = 0;
+ bs->play32_5 = 1;
+ bs->record32 = 0;
+ bs->record32_5 = 1;
+
+ memset(cmd, 0, MAX_COMMAND_SIZE);
+ cmd[0] = MODE_SELECT;
+ cmd[1] = 0x10;
+ cmd[4] = BLOCK_SIZE_PAGE_LENGTH + MODE_HEADER_LENGTH;
+
+ SRpnt = osst_do_scsi(SRpnt, STp, cmd, cmd[4], DMA_TO_DEVICE, STp->timeout, 0, 1);
+ *aSRpnt = SRpnt;
+ if ((STp->buffer)->syscall_result != 0) {
+ printk (KERN_ERR "%s:E: Couldn't set tape block size mode page\n", name);
+ return (-EIO);
+ }
+
+#if DEBUG
+ printk(KERN_INFO "%s:D: Drive Block Size changed to 32.5K\n", name);
+ /*
+ * In debug mode, we want to see as many errors as possible
+ * to test the error recovery mechanism.
+ */
+ osst_set_retries(STp, aSRpnt, 0);
+ SRpnt = * aSRpnt;
+#endif
+
+ /*
+ * Set vendor name to 'LIN4' for "Linux support version 4".
+ */
+
+ memset(cmd, 0, MAX_COMMAND_SIZE);
+ cmd[0] = MODE_SELECT;
+ cmd[1] = 0x10;
+ cmd[4] = VENDOR_IDENT_PAGE_LENGTH + MODE_HEADER_LENGTH;
+
+ header->mode_data_length = VENDOR_IDENT_PAGE_LENGTH + MODE_HEADER_LENGTH - 1;
+ header->medium_type = 0; /* Medium Type - ignoring */
+ header->dsp = 0; /* Reserved */
+ header->bdl = 0; /* Block Descriptor Length */
+
+ (STp->buffer)->b_data[MODE_HEADER_LENGTH + 0] = VENDOR_IDENT_PAGE | (1 << 7);
+ (STp->buffer)->b_data[MODE_HEADER_LENGTH + 1] = 6;
+ (STp->buffer)->b_data[MODE_HEADER_LENGTH + 2] = 'L';
+ (STp->buffer)->b_data[MODE_HEADER_LENGTH + 3] = 'I';
+ (STp->buffer)->b_data[MODE_HEADER_LENGTH + 4] = 'N';
+ (STp->buffer)->b_data[MODE_HEADER_LENGTH + 5] = '4';
+ (STp->buffer)->b_data[MODE_HEADER_LENGTH + 6] = 0;
+ (STp->buffer)->b_data[MODE_HEADER_LENGTH + 7] = 0;
+
+ SRpnt = osst_do_scsi(SRpnt, STp, cmd, cmd[4], DMA_TO_DEVICE, STp->timeout, 0, 1);
+ *aSRpnt = SRpnt;
+
+ if ((STp->buffer)->syscall_result != 0) {
+ printk (KERN_ERR "%s:E: Couldn't set vendor name to %s\n", name,
+ (char *) ((STp->buffer)->b_data + MODE_HEADER_LENGTH + 2));
+ return (-EIO);
+ }
+
+ memset(cmd, 0, MAX_COMMAND_SIZE);
+ cmd[0] = MODE_SENSE;
+ cmd[1] = 8;
+ cmd[2] = CAPABILITIES_PAGE;
+ cmd[4] = CAPABILITIES_PAGE_LENGTH + MODE_HEADER_LENGTH;
+
+ SRpnt = osst_do_scsi(SRpnt, STp, cmd, cmd[4], DMA_FROM_DEVICE, STp->timeout, 0, 1);
+ *aSRpnt = SRpnt;
+
+ if ((STp->buffer)->syscall_result != 0) {
+ printk (KERN_ERR "%s:E: Can't get capabilities page\n", name);
+ return (-EIO);
+ }
+
+ header = (osst_mode_parameter_header_t *) (STp->buffer)->b_data;
+ cp = (osst_capabilities_page_t *) ((STp->buffer)->b_data +
+ sizeof(osst_mode_parameter_header_t) + header->bdl);
+
+ drive_buffer_size = ntohs(cp->buffer_size) / 2;
+
+ memset(cmd, 0, MAX_COMMAND_SIZE);
+ cmd[0] = MODE_SENSE;
+ cmd[1] = 8;
+ cmd[2] = TAPE_PARAMTR_PAGE;
+ cmd[4] = TAPE_PARAMTR_PAGE_LENGTH + MODE_HEADER_LENGTH;
+
+ SRpnt = osst_do_scsi(SRpnt, STp, cmd, cmd[4], DMA_FROM_DEVICE, STp->timeout, 0, 1);
+ *aSRpnt = SRpnt;
+
+ if ((STp->buffer)->syscall_result != 0) {
+ printk (KERN_ERR "%s:E: Can't get tape parameter page\n", name);
+ return (-EIO);
+ }
+
+ header = (osst_mode_parameter_header_t *) (STp->buffer)->b_data;
+ prm = (osst_tape_paramtr_page_t *) ((STp->buffer)->b_data +
+ sizeof(osst_mode_parameter_header_t) + header->bdl);
+
+ STp->density = prm->density;
+ STp->capacity = ntohs(prm->segtrk) * ntohs(prm->trks);
+#if DEBUG
+ printk(OSST_DEB_MSG "%s:D: Density %d, tape length: %dMB, drive buffer size: %dKB\n",
+ name, STp->density, STp->capacity / 32, drive_buffer_size);
+#endif
+
+ return 0;
+
+}
+
+
+/* Step over EOF if it has been inadvertently crossed (ioctl not used because
+ it messes up the block number). */
+static int cross_eof(struct osst_tape *STp, struct osst_request ** aSRpnt, int forward)
+{
+ int result;
+ char * name = tape_name(STp);
+
+#if DEBUG
+ if (debugging)
+ printk(OSST_DEB_MSG "%s:D: Stepping over filemark %s.\n",
+ name, forward ? "forward" : "backward");
+#endif
+
+ if (forward) {
+ /* assumes that the filemark is already read by the drive, so this is low cost */
+ result = osst_space_over_filemarks_forward_slow(STp, aSRpnt, MTFSF, 1);
+ }
+ else
+ /* assumes this is only called if we just read the filemark! */
+ result = osst_seek_logical_blk(STp, aSRpnt, STp->logical_blk_num - 1);
+
+ if (result < 0)
+ printk(KERN_WARNING "%s:W: Stepping over filemark %s failed.\n",
+ name, forward ? "forward" : "backward");
+
+ return result;
+}
+
+
+/* Get the tape position. */
+
+static int osst_get_frame_position(struct osst_tape *STp, struct osst_request ** aSRpnt)
+{
+ unsigned char scmd[MAX_COMMAND_SIZE];
+ struct osst_request * SRpnt;
+ int result = 0;
+ char * name = tape_name(STp);
+
+ /* KG: We want to be able to use it for checking Write Buffer availability
+ * and thus don't want to risk to overwrite anything. Exchange buffers ... */
+ char mybuf[24];
+ char * olddata = STp->buffer->b_data;
+ int oldsize = STp->buffer->buffer_size;
+
+ if (STp->ready != ST_READY) return (-EIO);
+
+ memset (scmd, 0, MAX_COMMAND_SIZE);
+ scmd[0] = READ_POSITION;
+
+ STp->buffer->b_data = mybuf; STp->buffer->buffer_size = 24;
+ SRpnt = osst_do_scsi(*aSRpnt, STp, scmd, 20, DMA_FROM_DEVICE,
+ STp->timeout, MAX_RETRIES, 1);
+ if (!SRpnt) {
+ STp->buffer->b_data = olddata; STp->buffer->buffer_size = oldsize;
+ return (-EBUSY);
+ }
+ *aSRpnt = SRpnt;
+
+ if (STp->buffer->syscall_result)
+ result = ((SRpnt->sense[2] & 0x0f) == 3) ? -EIO : -EINVAL; /* 3: Write Error */
+
+ if (result == -EINVAL)
+ printk(KERN_ERR "%s:E: Can't read tape position.\n", name);
+ else {
+ if (result == -EIO) { /* re-read position - this needs to preserve media errors */
+ unsigned char mysense[16];
+ memcpy (mysense, SRpnt->sense, 16);
+ memset (scmd, 0, MAX_COMMAND_SIZE);
+ scmd[0] = READ_POSITION;
+ STp->buffer->b_data = mybuf; STp->buffer->buffer_size = 24;
+ SRpnt = osst_do_scsi(SRpnt, STp, scmd, 20, DMA_FROM_DEVICE,
+ STp->timeout, MAX_RETRIES, 1);
+#if DEBUG
+ printk(OSST_DEB_MSG "%s:D: Reread position, reason=[%02x:%02x:%02x], result=[%s%02x:%02x:%02x]\n",
+ name, mysense[2], mysense[12], mysense[13], STp->buffer->syscall_result?"":"ok:",
+ SRpnt->sense[2],SRpnt->sense[12],SRpnt->sense[13]);
+#endif
+ if (!STp->buffer->syscall_result)
+ memcpy (SRpnt->sense, mysense, 16);
+ else
+ printk(KERN_WARNING "%s:W: Double error in get position\n", name);
+ }
+ STp->first_frame_position = ((STp->buffer)->b_data[4] << 24)
+ + ((STp->buffer)->b_data[5] << 16)
+ + ((STp->buffer)->b_data[6] << 8)
+ + (STp->buffer)->b_data[7];
+ STp->last_frame_position = ((STp->buffer)->b_data[ 8] << 24)
+ + ((STp->buffer)->b_data[ 9] << 16)
+ + ((STp->buffer)->b_data[10] << 8)
+ + (STp->buffer)->b_data[11];
+ STp->cur_frames = (STp->buffer)->b_data[15];
+#if DEBUG
+ if (debugging) {
+ printk(OSST_DEB_MSG "%s:D: Drive Positions: host %d, tape %d%s, buffer %d\n", name,
+ STp->first_frame_position, STp->last_frame_position,
+ ((STp->buffer)->b_data[0]&0x80)?" (BOP)":
+ ((STp->buffer)->b_data[0]&0x40)?" (EOP)":"",
+ STp->cur_frames);
+ }
+#endif
+ if (STp->cur_frames == 0 && STp->first_frame_position != STp->last_frame_position) {
+#if DEBUG
+ printk(OSST_DEB_MSG "%s:D: Correcting read position %d, %d, %d\n", name,
+ STp->first_frame_position, STp->last_frame_position, STp->cur_frames);
+#endif
+ STp->first_frame_position = STp->last_frame_position;
+ }
+ }
+ STp->buffer->b_data = olddata; STp->buffer->buffer_size = oldsize;
+
+ return (result == 0 ? STp->first_frame_position : result);
+}
+
+
+/* Set the tape block */
+static int osst_set_frame_position(struct osst_tape *STp, struct osst_request ** aSRpnt, int ppos, int skip)
+{
+ unsigned char scmd[MAX_COMMAND_SIZE];
+ struct osst_request * SRpnt;
+ struct st_partstat * STps;
+ int result = 0;
+ int pp = (ppos == 3000 && !skip)? 0 : ppos;
+ char * name = tape_name(STp);
+
+ if (STp->ready != ST_READY) return (-EIO);
+
+ STps = &(STp->ps[STp->partition]);
+
+ if (ppos < 0 || ppos > STp->capacity) {
+ printk(KERN_WARNING "%s:W: Reposition request %d out of range\n", name, ppos);
+ pp = ppos = ppos < 0 ? 0 : (STp->capacity - 1);
+ result = (-EINVAL);
+ }
+
+ do {
+#if DEBUG
+ if (debugging)
+ printk(OSST_DEB_MSG "%s:D: Setting ppos to %d.\n", name, pp);
+#endif
+ memset (scmd, 0, MAX_COMMAND_SIZE);
+ scmd[0] = SEEK_10;
+ scmd[1] = 1;
+ scmd[3] = (pp >> 24);
+ scmd[4] = (pp >> 16);
+ scmd[5] = (pp >> 8);
+ scmd[6] = pp;
+ if (skip)
+ scmd[9] = 0x80;
+
+ SRpnt = osst_do_scsi(*aSRpnt, STp, scmd, 0, DMA_NONE, STp->long_timeout,
+ MAX_RETRIES, 1);
+ if (!SRpnt)
+ return (-EBUSY);
+ *aSRpnt = SRpnt;
+
+ if ((STp->buffer)->syscall_result != 0) {
+#if DEBUG
+ printk(OSST_DEB_MSG "%s:D: SEEK command from %d to %d failed.\n",
+ name, STp->first_frame_position, pp);
+#endif
+ result = (-EIO);
+ }
+ if (pp != ppos)
+ osst_wait_ready(STp, aSRpnt, 5 * 60, OSST_WAIT_POSITION_COMPLETE);
+ } while ((pp != ppos) && (pp = ppos));
+ STp->first_frame_position = STp->last_frame_position = ppos;
+ STps->eof = ST_NOEOF;
+ STps->at_sm = 0;
+ STps->rw = ST_IDLE;
+ STp->frame_in_buffer = 0;
+ return result;
+}
+
+static int osst_write_trailer(struct osst_tape *STp, struct osst_request ** aSRpnt, int leave_at_EOT)
+{
+ struct st_partstat * STps = &(STp->ps[STp->partition]);
+ int result = 0;
+
+ if (STp->write_type != OS_WRITE_NEW_MARK) {
+ /* true unless the user wrote the filemark for us */
+ result = osst_flush_drive_buffer(STp, aSRpnt);
+ if (result < 0) goto out;
+ result = osst_write_filemark(STp, aSRpnt);
+ if (result < 0) goto out;
+
+ if (STps->drv_file >= 0)
+ STps->drv_file++ ;
+ STps->drv_block = 0;
+ }
+ result = osst_write_eod(STp, aSRpnt);
+ osst_write_header(STp, aSRpnt, leave_at_EOT);
+
+ STps->eof = ST_FM;
+out:
+ return result;
+}
+
+/* osst versions of st functions - augmented and stripped to suit OnStream only */
+
+/* Flush the write buffer (never need to write if variable blocksize). */
+static int osst_flush_write_buffer(struct osst_tape *STp, struct osst_request ** aSRpnt)
+{
+ int offset, transfer, blks = 0;
+ int result = 0;
+ unsigned char cmd[MAX_COMMAND_SIZE];
+ struct osst_request * SRpnt = *aSRpnt;
+ struct st_partstat * STps;
+ char * name = tape_name(STp);
+
+ if ((STp->buffer)->writing) {
+ if (SRpnt == (STp->buffer)->last_SRpnt)
+#if DEBUG
+ { printk(OSST_DEB_MSG
+ "%s:D: aSRpnt points to osst_request that write_behind_check will release -- cleared\n", name);
+#endif
+ *aSRpnt = SRpnt = NULL;
+#if DEBUG
+ } else if (SRpnt)
+ printk(OSST_DEB_MSG
+ "%s:D: aSRpnt does not point to osst_request that write_behind_check will release -- strange\n", name);
+#endif
+ osst_write_behind_check(STp);
+ if ((STp->buffer)->syscall_result) {
+#if DEBUG
+ if (debugging)
+ printk(OSST_DEB_MSG "%s:D: Async write error (flush) %x.\n",
+ name, (STp->buffer)->midlevel_result);
+#endif
+ if ((STp->buffer)->midlevel_result == INT_MAX)
+ return (-ENOSPC);
+ return (-EIO);
+ }
+ }
+
+ result = 0;
+ if (STp->dirty == 1) {
+
+ STp->write_count++;
+ STps = &(STp->ps[STp->partition]);
+ STps->rw = ST_WRITING;
+ offset = STp->buffer->buffer_bytes;
+ blks = (offset + STp->block_size - 1) / STp->block_size;
+ transfer = OS_FRAME_SIZE;
+
+ if (offset < OS_DATA_SIZE)
+ osst_zero_buffer_tail(STp->buffer);
+
+ if (STp->poll)
+ if (osst_wait_frame (STp, aSRpnt, STp->first_frame_position, -50, 120))
+ result = osst_recover_wait_frame(STp, aSRpnt, 1);
+
+ memset(cmd, 0, MAX_COMMAND_SIZE);
+ cmd[0] = WRITE_6;
+ cmd[1] = 1;
+ cmd[4] = 1;
+
+ switch (STp->write_type) {
+ case OS_WRITE_DATA:
+#if DEBUG
+ if (debugging)
+ printk(OSST_DEB_MSG "%s:D: Writing %d blocks to frame %d, lblks %d-%d\n",
+ name, blks, STp->frame_seq_number,
+ STp->logical_blk_num - blks, STp->logical_blk_num - 1);
+#endif
+ osst_init_aux(STp, OS_FRAME_TYPE_DATA, STp->frame_seq_number++,
+ STp->logical_blk_num - blks, STp->block_size, blks);
+ break;
+ case OS_WRITE_EOD:
+ osst_init_aux(STp, OS_FRAME_TYPE_EOD, STp->frame_seq_number++,
+ STp->logical_blk_num, 0, 0);
+ break;
+ case OS_WRITE_NEW_MARK:
+ osst_init_aux(STp, OS_FRAME_TYPE_MARKER, STp->frame_seq_number++,
+ STp->logical_blk_num++, 0, blks=1);
+ break;
+ case OS_WRITE_HEADER:
+ osst_init_aux(STp, OS_FRAME_TYPE_HEADER, 0, 0, 0, blks=0);
+ break;
+ default: /* probably FILLER */
+ osst_init_aux(STp, OS_FRAME_TYPE_FILL, 0, 0, 0, 0);
+ }
+#if DEBUG
+ if (debugging)
+ printk(OSST_DEB_MSG "%s:D: Flushing %d bytes, Transferring %d bytes in %d lblocks.\n",
+ name, offset, transfer, blks);
+#endif
+
+ SRpnt = osst_do_scsi(*aSRpnt, STp, cmd, transfer, DMA_TO_DEVICE,
+ STp->timeout, MAX_RETRIES, 1);
+ *aSRpnt = SRpnt;
+ if (!SRpnt)
+ return (-EBUSY);
+
+ if ((STp->buffer)->syscall_result != 0) {
+#if DEBUG
+ printk(OSST_DEB_MSG
+ "%s:D: write sense [0]=0x%02x [2]=%02x [12]=%02x [13]=%02x\n",
+ name, SRpnt->sense[0], SRpnt->sense[2],
+ SRpnt->sense[12], SRpnt->sense[13]);
+#endif
+ if ((SRpnt->sense[0] & 0x70) == 0x70 &&
+ (SRpnt->sense[2] & 0x40) && /* FIXME - SC-30 drive doesn't assert EOM bit */
+ (SRpnt->sense[2] & 0x0f) == NO_SENSE) {
+ STp->dirty = 0;
+ (STp->buffer)->buffer_bytes = 0;
+ result = (-ENOSPC);
+ }
+ else {
+ if (osst_write_error_recovery(STp, aSRpnt, 1)) {
+ printk(KERN_ERR "%s:E: Error on flush write.\n", name);
+ result = (-EIO);
+ }
+ }
+ STps->drv_block = (-1); /* FIXME - even if write recovery succeeds? */
+ }
+ else {
+ STp->first_frame_position++;
+ STp->dirty = 0;
+ (STp->buffer)->buffer_bytes = 0;
+ }
+ }
+#if DEBUG
+ printk(OSST_DEB_MSG "%s:D: Exit flush write buffer with code %d\n", name, result);
+#endif
+ return result;
+}
+
+
+/* Flush the tape buffer. The tape will be positioned correctly unless
+ seek_next is true. */
+static int osst_flush_buffer(struct osst_tape * STp, struct osst_request ** aSRpnt, int seek_next)
+{
+ struct st_partstat * STps;
+ int backspace = 0, result = 0;
+#if DEBUG
+ char * name = tape_name(STp);
+#endif
+
+ /*
+ * If there was a bus reset, block further access
+ * to this device.
+ */
+ if( STp->pos_unknown)
+ return (-EIO);
+
+ if (STp->ready != ST_READY)
+ return 0;
+
+ STps = &(STp->ps[STp->partition]);
+ if (STps->rw == ST_WRITING || STp->dirty) { /* Writing */
+ STp->write_type = OS_WRITE_DATA;
+ return osst_flush_write_buffer(STp, aSRpnt);
+ }
+ if (STp->block_size == 0)
+ return 0;
+
+#if DEBUG
+ printk(OSST_DEB_MSG "%s:D: Reached flush (read) buffer\n", name);
+#endif
+
+ if (!STp->can_bsr) {
+ backspace = ((STp->buffer)->buffer_bytes + (STp->buffer)->read_pointer) / STp->block_size -
+ ((STp->buffer)->read_pointer + STp->block_size - 1 ) / STp->block_size ;
+ (STp->buffer)->buffer_bytes = 0;
+ (STp->buffer)->read_pointer = 0;
+ STp->frame_in_buffer = 0; /* FIXME is this relevant w. OSST? */
+ }
+
+ if (!seek_next) {
+ if (STps->eof == ST_FM_HIT) {
+ result = cross_eof(STp, aSRpnt, 0); /* Back over the EOF hit */
+ if (!result)
+ STps->eof = ST_NOEOF;
+ else {
+ if (STps->drv_file >= 0)
+ STps->drv_file++;
+ STps->drv_block = 0;
+ }
+ }
+ if (!result && backspace > 0) /* TODO -- design and run a test case for this */
+ result = osst_seek_logical_blk(STp, aSRpnt, STp->logical_blk_num - backspace);
+ }
+ else if (STps->eof == ST_FM_HIT) {
+ if (STps->drv_file >= 0)
+ STps->drv_file++;
+ STps->drv_block = 0;
+ STps->eof = ST_NOEOF;
+ }
+
+ return result;
+}
+
+static int osst_write_frame(struct osst_tape * STp, struct osst_request ** aSRpnt, int synchronous)
+{
+ unsigned char cmd[MAX_COMMAND_SIZE];
+ struct osst_request * SRpnt;
+ int blks;
+#if DEBUG
+ char * name = tape_name(STp);
+#endif
+
+ if ((!STp-> raw) && (STp->first_frame_position == 0xbae)) { /* _must_ preserve buffer! */
+#if DEBUG
+ printk(OSST_DEB_MSG "%s:D: Reaching config partition.\n", name);
+#endif
+ if (osst_flush_drive_buffer(STp, aSRpnt) < 0) {
+ return (-EIO);
+ }
+ /* error recovery may have bumped us past the header partition */
+ if (osst_get_frame_position(STp, aSRpnt) < 0xbb8) {
+#if DEBUG
+ printk(OSST_DEB_MSG "%s:D: Skipping over config partition.\n", name);
+#endif
+ osst_position_tape_and_confirm(STp, aSRpnt, 0xbb8);
+ }
+ }
+
+ if (STp->poll)
+ if (osst_wait_frame (STp, aSRpnt, STp->first_frame_position, -48, 120))
+ if (osst_recover_wait_frame(STp, aSRpnt, 1))
+ return (-EIO);
+
+// osst_build_stats(STp, &SRpnt);
+
+ STp->ps[STp->partition].rw = ST_WRITING;
+ STp->write_type = OS_WRITE_DATA;
+
+ memset(cmd, 0, MAX_COMMAND_SIZE);
+ cmd[0] = WRITE_6;
+ cmd[1] = 1;
+ cmd[4] = 1; /* one frame at a time... */
+ blks = STp->buffer->buffer_bytes / STp->block_size;
+#if DEBUG
+ if (debugging)
+ printk(OSST_DEB_MSG "%s:D: Writing %d blocks to frame %d, lblks %d-%d\n", name, blks,
+ STp->frame_seq_number, STp->logical_blk_num - blks, STp->logical_blk_num - 1);
+#endif
+ osst_init_aux(STp, OS_FRAME_TYPE_DATA, STp->frame_seq_number++,
+ STp->logical_blk_num - blks, STp->block_size, blks);
+
+#if DEBUG
+ if (!synchronous)
+ STp->write_pending = 1;
+#endif
+ SRpnt = osst_do_scsi(*aSRpnt, STp, cmd, OS_FRAME_SIZE, DMA_TO_DEVICE, STp->timeout,
+ MAX_RETRIES, synchronous);
+ if (!SRpnt)
+ return (-EBUSY);
+ *aSRpnt = SRpnt;
+
+ if (synchronous) {
+ if (STp->buffer->syscall_result != 0) {
+#if DEBUG
+ if (debugging)
+ printk(OSST_DEB_MSG "%s:D: Error on write:\n", name);
+#endif
+ if ((SRpnt->sense[0] & 0x70) == 0x70 &&
+ (SRpnt->sense[2] & 0x40)) {
+ if ((SRpnt->sense[2] & 0x0f) == VOLUME_OVERFLOW)
+ return (-ENOSPC);
+ }
+ else {
+ if (osst_write_error_recovery(STp, aSRpnt, 1))
+ return (-EIO);
+ }
+ }
+ else
+ STp->first_frame_position++;
+ }
+
+ STp->write_count++;
+
+ return 0;
+}
+
+/* Lock or unlock the drive door. Don't use when struct osst_request allocated. */
+static int do_door_lock(struct osst_tape * STp, int do_lock)
+{
+ int retval;
+
+#if DEBUG
+ printk(OSST_DEB_MSG "%s:D: %socking drive door.\n", tape_name(STp), do_lock ? "L" : "Unl");
+#endif
+
+ retval = scsi_set_medium_removal(STp->device,
+ do_lock ? SCSI_REMOVAL_PREVENT : SCSI_REMOVAL_ALLOW);
+ if (!retval)
+ STp->door_locked = do_lock ? ST_LOCKED_EXPLICIT : ST_UNLOCKED;
+ else
+ STp->door_locked = ST_LOCK_FAILS;
+ return retval;
+}
+
+/* Set the internal state after reset */
+static void reset_state(struct osst_tape *STp)
+{
+ int i;
+ struct st_partstat *STps;
+
+ STp->pos_unknown = 0;
+ for (i = 0; i < ST_NBR_PARTITIONS; i++) {
+ STps = &(STp->ps[i]);
+ STps->rw = ST_IDLE;
+ STps->eof = ST_NOEOF;
+ STps->at_sm = 0;
+ STps->last_block_valid = 0;
+ STps->drv_block = -1;
+ STps->drv_file = -1;
+ }
+}
+
+
+/* Entry points to osst */
+
+/* Write command */
+static ssize_t osst_write(struct file * filp, const char __user * buf, size_t count, loff_t *ppos)
+{
+ ssize_t total, retval = 0;
+ ssize_t i, do_count, blks, transfer;
+ int write_threshold;
+ int doing_write = 0;
+ const char __user * b_point;
+ struct osst_request * SRpnt = NULL;
+ struct st_modedef * STm;
+ struct st_partstat * STps;
+ struct osst_tape * STp = filp->private_data;
+ char * name = tape_name(STp);
+
+
+ if (mutex_lock_interruptible(&STp->lock))
+ return (-ERESTARTSYS);
+
+ /*
+ * If we are in the middle of error recovery, don't let anyone
+ * else try and use this device. Also, if error recovery fails, it
+ * may try and take the device offline, in which case all further
+ * access to the device is prohibited.
+ */
+ if( !scsi_block_when_processing_errors(STp->device) ) {
+ retval = (-ENXIO);
+ goto out;
+ }
+
+ if (STp->ready != ST_READY) {
+ if (STp->ready == ST_NO_TAPE)
+ retval = (-ENOMEDIUM);
+ else
+ retval = (-EIO);
+ goto out;
+ }
+ STm = &(STp->modes[STp->current_mode]);
+ if (!STm->defined) {
+ retval = (-ENXIO);
+ goto out;
+ }
+ if (count == 0)
+ goto out;
+
+ /*
+ * If there was a bus reset, block further access
+ * to this device.
+ */
+ if (STp->pos_unknown) {
+ retval = (-EIO);
+ goto out;
+ }
+
+#if DEBUG
+ if (!STp->in_use) {
+ printk(OSST_DEB_MSG "%s:D: Incorrect device.\n", name);
+ retval = (-EIO);
+ goto out;
+ }
+#endif
+
+ if (STp->write_prot) {
+ retval = (-EACCES);
+ goto out;
+ }
+
+ /* Write must be integral number of blocks */
+ if (STp->block_size != 0 && (count % STp->block_size) != 0) {
+ printk(KERN_ERR "%s:E: Write (%Zd bytes) not multiple of tape block size (%d%c).\n",
+ name, count, STp->block_size<1024?
+ STp->block_size:STp->block_size/1024, STp->block_size<1024?'b':'k');
+ retval = (-EINVAL);
+ goto out;
+ }
+
+ if (STp->first_frame_position >= STp->capacity - OSST_EOM_RESERVE) {
+ printk(KERN_ERR "%s:E: Write truncated at EOM early warning (frame %d).\n",
+ name, STp->first_frame_position);
+ retval = (-ENOSPC);
+ goto out;
+ }
+
+ if (STp->do_auto_lock && STp->door_locked == ST_UNLOCKED && !do_door_lock(STp, 1))
+ STp->door_locked = ST_LOCKED_AUTO;
+
+ STps = &(STp->ps[STp->partition]);
+
+ if (STps->rw == ST_READING) {
+#if DEBUG
+ printk(OSST_DEB_MSG "%s:D: Switching from read to write at file %d, block %d\n", name,
+ STps->drv_file, STps->drv_block);
+#endif
+ retval = osst_flush_buffer(STp, &SRpnt, 0);
+ if (retval)
+ goto out;
+ STps->rw = ST_IDLE;
+ }
+ if (STps->rw != ST_WRITING) {
+ /* Are we totally rewriting this tape? */
+ if (!STp->header_ok ||
+ (STp->first_frame_position == STp->first_data_ppos && STps->drv_block < 0) ||
+ (STps->drv_file == 0 && STps->drv_block == 0)) {
+ STp->wrt_pass_cntr++;
+#if DEBUG
+ printk(OSST_DEB_MSG "%s:D: Allocating next write pass counter: %d\n",
+ name, STp->wrt_pass_cntr);
+#endif
+ osst_reset_header(STp, &SRpnt);
+ STps->drv_file = STps->drv_block = 0;
+ }
+ /* Do we know where we'll be writing on the tape? */
+ else {
+ if ((STp->fast_open && osst_verify_position(STp, &SRpnt)) ||
+ STps->drv_file < 0 || STps->drv_block < 0) {
+ if (STp->first_frame_position == STp->eod_frame_ppos) { /* at EOD */
+ STps->drv_file = STp->filemark_cnt;
+ STps->drv_block = 0;
+ }
+ else {
+ /* We have no idea where the tape is positioned - give up */
+#if DEBUG
+ printk(OSST_DEB_MSG
+ "%s:D: Cannot write at indeterminate position.\n", name);
+#endif
+ retval = (-EIO);
+ goto out;
+ }
+ }
+ if ((STps->drv_file + STps->drv_block) > 0 && STps->drv_file < STp->filemark_cnt) {
+ STp->filemark_cnt = STps->drv_file;
+ STp->last_mark_ppos =
+ ntohl(STp->header_cache->dat_fm_tab.fm_tab_ent[STp->filemark_cnt-1]);
+ printk(KERN_WARNING
+ "%s:W: Overwriting file %d with old write pass counter %d\n",
+ name, STps->drv_file, STp->wrt_pass_cntr);
+ printk(KERN_WARNING
+ "%s:W: may lead to stale data being accepted on reading back!\n",
+ name);
+#if DEBUG
+ printk(OSST_DEB_MSG
+ "%s:D: resetting filemark count to %d and last mark ppos,lbn to %d,%d\n",
+ name, STp->filemark_cnt, STp->last_mark_ppos, STp->last_mark_lbn);
+#endif
+ }
+ }
+ STp->fast_open = 0;
+ }
+ if (!STp->header_ok) {
+#if DEBUG
+ printk(OSST_DEB_MSG "%s:D: Write cannot proceed without valid headers\n", name);
+#endif
+ retval = (-EIO);
+ goto out;
+ }
+
+ if ((STp->buffer)->writing) {
+if (SRpnt) printk(KERN_ERR "%s:A: Not supposed to have SRpnt at line %d\n", name, __LINE__);
+ osst_write_behind_check(STp);
+ if ((STp->buffer)->syscall_result) {
+#if DEBUG
+ if (debugging)
+ printk(OSST_DEB_MSG "%s:D: Async write error (write) %x.\n", name,
+ (STp->buffer)->midlevel_result);
+#endif
+ if ((STp->buffer)->midlevel_result == INT_MAX)
+ STps->eof = ST_EOM_OK;
+ else
+ STps->eof = ST_EOM_ERROR;
+ }
+ }
+ if (STps->eof == ST_EOM_OK) {
+ retval = (-ENOSPC);
+ goto out;
+ }
+ else if (STps->eof == ST_EOM_ERROR) {
+ retval = (-EIO);
+ goto out;
+ }
+
+ /* Check the buffer readability in cases where copy_user might catch
+ the problems after some tape movement. */
+ if ((copy_from_user(&i, buf, 1) != 0 ||
+ copy_from_user(&i, buf + count - 1, 1) != 0)) {
+ retval = (-EFAULT);
+ goto out;
+ }
+
+ if (!STm->do_buffer_writes) {
+ write_threshold = 1;
+ }
+ else
+ write_threshold = (STp->buffer)->buffer_blocks * STp->block_size;
+ if (!STm->do_async_writes)
+ write_threshold--;
+
+ total = count;
+#if DEBUG
+ if (debugging)
+ printk(OSST_DEB_MSG "%s:D: Writing %d bytes to file %d block %d lblk %d fseq %d fppos %d\n",
+ name, (int) count, STps->drv_file, STps->drv_block,
+ STp->logical_blk_num, STp->frame_seq_number, STp->first_frame_position);
+#endif
+ b_point = buf;
+ while ((STp->buffer)->buffer_bytes + count > write_threshold)
+ {
+ doing_write = 1;
+ do_count = (STp->buffer)->buffer_blocks * STp->block_size -
+ (STp->buffer)->buffer_bytes;
+ if (do_count > count)
+ do_count = count;
+
+ i = append_to_buffer(b_point, STp->buffer, do_count);
+ if (i) {
+ retval = i;
+ goto out;
+ }
+
+ blks = do_count / STp->block_size;
+ STp->logical_blk_num += blks; /* logical_blk_num is incremented as data is moved from user */
+
+ i = osst_write_frame(STp, &SRpnt, 1);
+
+ if (i == (-ENOSPC)) {
+ transfer = STp->buffer->writing; /* FIXME -- check this logic */
+ if (transfer <= do_count) {
+ *ppos += do_count - transfer;
+ count -= do_count - transfer;
+ if (STps->drv_block >= 0) {
+ STps->drv_block += (do_count - transfer) / STp->block_size;
+ }
+ STps->eof = ST_EOM_OK;
+ retval = (-ENOSPC); /* EOM within current request */
+#if DEBUG
+ if (debugging)
+ printk(OSST_DEB_MSG "%s:D: EOM with %d bytes unwritten.\n",
+ name, (int) transfer);
+#endif
+ }
+ else {
+ STps->eof = ST_EOM_ERROR;
+ STps->drv_block = (-1); /* Too cautious? */
+ retval = (-EIO); /* EOM for old data */
+#if DEBUG
+ if (debugging)
+ printk(OSST_DEB_MSG "%s:D: EOM with lost data.\n", name);
+#endif
+ }
+ }
+ else
+ retval = i;
+
+ if (retval < 0) {
+ if (SRpnt != NULL) {
+ osst_release_request(SRpnt);
+ SRpnt = NULL;
+ }
+ STp->buffer->buffer_bytes = 0;
+ STp->dirty = 0;
+ if (count < total)
+ retval = total - count;
+ goto out;
+ }
+
+ *ppos += do_count;
+ b_point += do_count;
+ count -= do_count;
+ if (STps->drv_block >= 0) {
+ STps->drv_block += blks;
+ }
+ STp->buffer->buffer_bytes = 0;
+ STp->dirty = 0;
+ } /* end while write threshold exceeded */
+
+ if (count != 0) {
+ STp->dirty = 1;
+ i = append_to_buffer(b_point, STp->buffer, count);
+ if (i) {
+ retval = i;
+ goto out;
+ }
+ blks = count / STp->block_size;
+ STp->logical_blk_num += blks;
+ if (STps->drv_block >= 0) {
+ STps->drv_block += blks;
+ }
+ *ppos += count;
+ count = 0;
+ }
+
+ if (doing_write && (STp->buffer)->syscall_result != 0) {
+ retval = (STp->buffer)->syscall_result;
+ goto out;
+ }
+
+ if (STm->do_async_writes && ((STp->buffer)->buffer_bytes >= STp->write_threshold)) {
+ /* Schedule an asynchronous write */
+ (STp->buffer)->writing = ((STp->buffer)->buffer_bytes /
+ STp->block_size) * STp->block_size;
+ STp->dirty = !((STp->buffer)->writing ==
+ (STp->buffer)->buffer_bytes);
+
+ i = osst_write_frame(STp, &SRpnt, 0);
+ if (i < 0) {
+ retval = (-EIO);
+ goto out;
+ }
+ SRpnt = NULL; /* Prevent releasing this request! */
+ }
+ STps->at_sm &= (total == 0);
+ if (total > 0)
+ STps->eof = ST_NOEOF;
+
+ retval = total;
+
+out:
+ if (SRpnt != NULL) osst_release_request(SRpnt);
+
+ mutex_unlock(&STp->lock);
+
+ return retval;
+}
+
+
+/* Read command */
+static ssize_t osst_read(struct file * filp, char __user * buf, size_t count, loff_t *ppos)
+{
+ ssize_t total, retval = 0;
+ ssize_t i, transfer;
+ int special;
+ struct st_modedef * STm;
+ struct st_partstat * STps;
+ struct osst_request * SRpnt = NULL;
+ struct osst_tape * STp = filp->private_data;
+ char * name = tape_name(STp);
+
+
+ if (mutex_lock_interruptible(&STp->lock))
+ return (-ERESTARTSYS);
+
+ /*
+ * If we are in the middle of error recovery, don't let anyone
+ * else try and use this device. Also, if error recovery fails, it
+ * may try and take the device offline, in which case all further
+ * access to the device is prohibited.
+ */
+ if( !scsi_block_when_processing_errors(STp->device) ) {
+ retval = (-ENXIO);
+ goto out;
+ }
+
+ if (STp->ready != ST_READY) {
+ if (STp->ready == ST_NO_TAPE)
+ retval = (-ENOMEDIUM);
+ else
+ retval = (-EIO);
+ goto out;
+ }
+ STm = &(STp->modes[STp->current_mode]);
+ if (!STm->defined) {
+ retval = (-ENXIO);
+ goto out;
+ }
+#if DEBUG
+ if (!STp->in_use) {
+ printk(OSST_DEB_MSG "%s:D: Incorrect device.\n", name);
+ retval = (-EIO);
+ goto out;
+ }
+#endif
+ /* Must have initialized medium */
+ if (!STp->header_ok) {
+ retval = (-EIO);
+ goto out;
+ }
+
+ if (STp->do_auto_lock && STp->door_locked == ST_UNLOCKED && !do_door_lock(STp, 1))
+ STp->door_locked = ST_LOCKED_AUTO;
+
+ STps = &(STp->ps[STp->partition]);
+ if (STps->rw == ST_WRITING) {
+ retval = osst_flush_buffer(STp, &SRpnt, 0);
+ if (retval)
+ goto out;
+ STps->rw = ST_IDLE;
+ /* FIXME -- this may leave the tape without EOD and up2date headers */
+ }
+
+ if ((count % STp->block_size) != 0) {
+ printk(KERN_WARNING
+ "%s:W: Read (%Zd bytes) not multiple of tape block size (%d%c).\n", name, count,
+ STp->block_size<1024?STp->block_size:STp->block_size/1024, STp->block_size<1024?'b':'k');
+ }
+
+#if DEBUG
+ if (debugging && STps->eof != ST_NOEOF)
+ printk(OSST_DEB_MSG "%s:D: EOF/EOM flag up (%d). Bytes %d\n", name,
+ STps->eof, (STp->buffer)->buffer_bytes);
+#endif
+ if ((STp->buffer)->buffer_bytes == 0 &&
+ STps->eof >= ST_EOD_1) {
+ if (STps->eof < ST_EOD) {
+ STps->eof += 1;
+ retval = 0;
+ goto out;
+ }
+ retval = (-EIO); /* EOM or Blank Check */
+ goto out;
+ }
+
+ /* Check the buffer writability before any tape movement. Don't alter
+ buffer data. */
+ if (copy_from_user(&i, buf, 1) != 0 ||
+ copy_to_user (buf, &i, 1) != 0 ||
+ copy_from_user(&i, buf + count - 1, 1) != 0 ||
+ copy_to_user (buf + count - 1, &i, 1) != 0) {
+ retval = (-EFAULT);
+ goto out;
+ }
+
+ /* Loop until enough data in buffer or a special condition found */
+ for (total = 0, special = 0; total < count - STp->block_size + 1 && !special; ) {
+
+ /* Get new data if the buffer is empty */
+ if ((STp->buffer)->buffer_bytes == 0) {
+ if (STps->eof == ST_FM_HIT)
+ break;
+ special = osst_get_logical_frame(STp, &SRpnt, STp->frame_seq_number, 0);
+ if (special < 0) { /* No need to continue read */
+ STp->frame_in_buffer = 0;
+ retval = special;
+ goto out;
+ }
+ }
+
+ /* Move the data from driver buffer to user buffer */
+ if ((STp->buffer)->buffer_bytes > 0) {
+#if DEBUG
+ if (debugging && STps->eof != ST_NOEOF)
+ printk(OSST_DEB_MSG "%s:D: EOF up (%d). Left %d, needed %d.\n", name,
+ STps->eof, (STp->buffer)->buffer_bytes, (int) (count - total));
+#endif
+ /* force multiple of block size, note block_size may have been adjusted */
+ transfer = (((STp->buffer)->buffer_bytes < count - total ?
+ (STp->buffer)->buffer_bytes : count - total)/
+ STp->block_size) * STp->block_size;
+
+ if (transfer == 0) {
+ printk(KERN_WARNING
+ "%s:W: Nothing can be transferred, requested %Zd, tape block size (%d%c).\n",
+ name, count, STp->block_size < 1024?
+ STp->block_size:STp->block_size/1024,
+ STp->block_size<1024?'b':'k');
+ break;
+ }
+ i = from_buffer(STp->buffer, buf, transfer);
+ if (i) {
+ retval = i;
+ goto out;
+ }
+ STp->logical_blk_num += transfer / STp->block_size;
+ STps->drv_block += transfer / STp->block_size;
+ *ppos += transfer;
+ buf += transfer;
+ total += transfer;
+ }
+
+ if ((STp->buffer)->buffer_bytes == 0) {
+#if DEBUG
+ if (debugging)
+ printk(OSST_DEB_MSG "%s:D: Finished with frame %d\n",
+ name, STp->frame_seq_number);
+#endif
+ STp->frame_in_buffer = 0;
+ STp->frame_seq_number++; /* frame to look for next time */
+ }
+ } /* for (total = 0, special = 0; total < count && !special; ) */
+
+ /* Change the eof state if no data from tape or buffer */
+ if (total == 0) {
+ if (STps->eof == ST_FM_HIT) {
+ STps->eof = (STp->first_frame_position >= STp->eod_frame_ppos)?ST_EOD_2:ST_FM;
+ STps->drv_block = 0;
+ if (STps->drv_file >= 0)
+ STps->drv_file++;
+ }
+ else if (STps->eof == ST_EOD_1) {
+ STps->eof = ST_EOD_2;
+ if (STps->drv_block > 0 && STps->drv_file >= 0)
+ STps->drv_file++;
+ STps->drv_block = 0;
+ }
+ else if (STps->eof == ST_EOD_2)
+ STps->eof = ST_EOD;
+ }
+ else if (STps->eof == ST_FM)
+ STps->eof = ST_NOEOF;
+
+ retval = total;
+
+out:
+ if (SRpnt != NULL) osst_release_request(SRpnt);
+
+ mutex_unlock(&STp->lock);
+
+ return retval;
+}
+
+
+/* Set the driver options */
+static void osst_log_options(struct osst_tape *STp, struct st_modedef *STm, char *name)
+{
+ printk(KERN_INFO
+"%s:I: Mode %d options: buffer writes: %d, async writes: %d, read ahead: %d\n",
+ name, STp->current_mode, STm->do_buffer_writes, STm->do_async_writes,
+ STm->do_read_ahead);
+ printk(KERN_INFO
+"%s:I: can bsr: %d, two FMs: %d, fast mteom: %d, auto lock: %d,\n",
+ name, STp->can_bsr, STp->two_fm, STp->fast_mteom, STp->do_auto_lock);
+ printk(KERN_INFO
+"%s:I: defs for wr: %d, no block limits: %d, partitions: %d, s2 log: %d\n",
+ name, STm->defaults_for_writes, STp->omit_blklims, STp->can_partitions,
+ STp->scsi2_logical);
+ printk(KERN_INFO
+"%s:I: sysv: %d\n", name, STm->sysv);
+#if DEBUG
+ printk(KERN_INFO
+ "%s:D: debugging: %d\n",
+ name, debugging);
+#endif
+}
+
+
+static int osst_set_options(struct osst_tape *STp, long options)
+{
+ int value;
+ long code;
+ struct st_modedef * STm;
+ char * name = tape_name(STp);
+
+ STm = &(STp->modes[STp->current_mode]);
+ if (!STm->defined) {
+ memcpy(STm, &(STp->modes[0]), sizeof(*STm));
+ modes_defined = 1;
+#if DEBUG
+ if (debugging)
+ printk(OSST_DEB_MSG "%s:D: Initialized mode %d definition from mode 0\n",
+ name, STp->current_mode);
+#endif
+ }
+
+ code = options & MT_ST_OPTIONS;
+ if (code == MT_ST_BOOLEANS) {
+ STm->do_buffer_writes = (options & MT_ST_BUFFER_WRITES) != 0;
+ STm->do_async_writes = (options & MT_ST_ASYNC_WRITES) != 0;
+ STm->defaults_for_writes = (options & MT_ST_DEF_WRITES) != 0;
+ STm->do_read_ahead = (options & MT_ST_READ_AHEAD) != 0;
+ STp->two_fm = (options & MT_ST_TWO_FM) != 0;
+ STp->fast_mteom = (options & MT_ST_FAST_MTEOM) != 0;
+ STp->do_auto_lock = (options & MT_ST_AUTO_LOCK) != 0;
+ STp->can_bsr = (options & MT_ST_CAN_BSR) != 0;
+ STp->omit_blklims = (options & MT_ST_NO_BLKLIMS) != 0;
+ if ((STp->device)->scsi_level >= SCSI_2)
+ STp->can_partitions = (options & MT_ST_CAN_PARTITIONS) != 0;
+ STp->scsi2_logical = (options & MT_ST_SCSI2LOGICAL) != 0;
+ STm->sysv = (options & MT_ST_SYSV) != 0;
+#if DEBUG
+ debugging = (options & MT_ST_DEBUGGING) != 0;
+#endif
+ osst_log_options(STp, STm, name);
+ }
+ else if (code == MT_ST_SETBOOLEANS || code == MT_ST_CLEARBOOLEANS) {
+ value = (code == MT_ST_SETBOOLEANS);
+ if ((options & MT_ST_BUFFER_WRITES) != 0)
+ STm->do_buffer_writes = value;
+ if ((options & MT_ST_ASYNC_WRITES) != 0)
+ STm->do_async_writes = value;
+ if ((options & MT_ST_DEF_WRITES) != 0)
+ STm->defaults_for_writes = value;
+ if ((options & MT_ST_READ_AHEAD) != 0)
+ STm->do_read_ahead = value;
+ if ((options & MT_ST_TWO_FM) != 0)
+ STp->two_fm = value;
+ if ((options & MT_ST_FAST_MTEOM) != 0)
+ STp->fast_mteom = value;
+ if ((options & MT_ST_AUTO_LOCK) != 0)
+ STp->do_auto_lock = value;
+ if ((options & MT_ST_CAN_BSR) != 0)
+ STp->can_bsr = value;
+ if ((options & MT_ST_NO_BLKLIMS) != 0)
+ STp->omit_blklims = value;
+ if ((STp->device)->scsi_level >= SCSI_2 &&
+ (options & MT_ST_CAN_PARTITIONS) != 0)
+ STp->can_partitions = value;
+ if ((options & MT_ST_SCSI2LOGICAL) != 0)
+ STp->scsi2_logical = value;
+ if ((options & MT_ST_SYSV) != 0)
+ STm->sysv = value;
+#if DEBUG
+ if ((options & MT_ST_DEBUGGING) != 0)
+ debugging = value;
+#endif
+ osst_log_options(STp, STm, name);
+ }
+ else if (code == MT_ST_WRITE_THRESHOLD) {
+ value = (options & ~MT_ST_OPTIONS) * ST_KILOBYTE;
+ if (value < 1 || value > osst_buffer_size) {
+ printk(KERN_WARNING "%s:W: Write threshold %d too small or too large.\n",
+ name, value);
+ return (-EIO);
+ }
+ STp->write_threshold = value;
+ printk(KERN_INFO "%s:I: Write threshold set to %d bytes.\n",
+ name, value);
+ }
+ else if (code == MT_ST_DEF_BLKSIZE) {
+ value = (options & ~MT_ST_OPTIONS);
+ if (value == ~MT_ST_OPTIONS) {
+ STm->default_blksize = (-1);
+ printk(KERN_INFO "%s:I: Default block size disabled.\n", name);
+ }
+ else {
+ if (value < 512 || value > OS_DATA_SIZE || OS_DATA_SIZE % value) {
+ printk(KERN_WARNING "%s:W: Default block size cannot be set to %d.\n",
+ name, value);
+ return (-EINVAL);
+ }
+ STm->default_blksize = value;
+ printk(KERN_INFO "%s:I: Default block size set to %d bytes.\n",
+ name, STm->default_blksize);
+ }
+ }
+ else if (code == MT_ST_TIMEOUTS) {
+ value = (options & ~MT_ST_OPTIONS);
+ if ((value & MT_ST_SET_LONG_TIMEOUT) != 0) {
+ STp->long_timeout = (value & ~MT_ST_SET_LONG_TIMEOUT) * HZ;
+ printk(KERN_INFO "%s:I: Long timeout set to %d seconds.\n", name,
+ (value & ~MT_ST_SET_LONG_TIMEOUT));
+ }
+ else {
+ STp->timeout = value * HZ;
+ printk(KERN_INFO "%s:I: Normal timeout set to %d seconds.\n", name, value);
+ }
+ }
+ else if (code == MT_ST_DEF_OPTIONS) {
+ code = (options & ~MT_ST_CLEAR_DEFAULT);
+ value = (options & MT_ST_CLEAR_DEFAULT);
+ if (code == MT_ST_DEF_DENSITY) {
+ if (value == MT_ST_CLEAR_DEFAULT) {
+ STm->default_density = (-1);
+ printk(KERN_INFO "%s:I: Density default disabled.\n", name);
+ }
+ else {
+ STm->default_density = value & 0xff;
+ printk(KERN_INFO "%s:I: Density default set to %x\n",
+ name, STm->default_density);
+ }
+ }
+ else if (code == MT_ST_DEF_DRVBUFFER) {
+ if (value == MT_ST_CLEAR_DEFAULT) {
+ STp->default_drvbuffer = 0xff;
+ printk(KERN_INFO "%s:I: Drive buffer default disabled.\n", name);
+ }
+ else {
+ STp->default_drvbuffer = value & 7;
+ printk(KERN_INFO "%s:I: Drive buffer default set to %x\n",
+ name, STp->default_drvbuffer);
+ }
+ }
+ else if (code == MT_ST_DEF_COMPRESSION) {
+ if (value == MT_ST_CLEAR_DEFAULT) {
+ STm->default_compression = ST_DONT_TOUCH;
+ printk(KERN_INFO "%s:I: Compression default disabled.\n", name);
+ }
+ else {
+ STm->default_compression = (value & 1 ? ST_YES : ST_NO);
+ printk(KERN_INFO "%s:I: Compression default set to %x\n",
+ name, (value & 1));
+ }
+ }
+ }
+ else
+ return (-EIO);
+
+ return 0;
+}
+
+
+/* Internal ioctl function */
+static int osst_int_ioctl(struct osst_tape * STp, struct osst_request ** aSRpnt,
+ unsigned int cmd_in, unsigned long arg)
+{
+ int timeout;
+ long ltmp;
+ int i, ioctl_result;
+ int chg_eof = 1;
+ unsigned char cmd[MAX_COMMAND_SIZE];
+ struct osst_request * SRpnt = * aSRpnt;
+ struct st_partstat * STps;
+ int fileno, blkno, at_sm, frame_seq_numbr, logical_blk_num;
+ int datalen = 0, direction = DMA_NONE;
+ char * name = tape_name(STp);
+
+ if (STp->ready != ST_READY && cmd_in != MTLOAD) {
+ if (STp->ready == ST_NO_TAPE)
+ return (-ENOMEDIUM);
+ else
+ return (-EIO);
+ }
+ timeout = STp->long_timeout;
+ STps = &(STp->ps[STp->partition]);
+ fileno = STps->drv_file;
+ blkno = STps->drv_block;
+ at_sm = STps->at_sm;
+ frame_seq_numbr = STp->frame_seq_number;
+ logical_blk_num = STp->logical_blk_num;
+
+ memset(cmd, 0, MAX_COMMAND_SIZE);
+ switch (cmd_in) {
+ case MTFSFM:
+ chg_eof = 0; /* Changed from the FSF after this */
+ case MTFSF:
+ if (STp->raw)
+ return (-EIO);
+ if (STp->linux_media)
+ ioctl_result = osst_space_over_filemarks_forward_fast(STp, &SRpnt, cmd_in, arg);
+ else
+ ioctl_result = osst_space_over_filemarks_forward_slow(STp, &SRpnt, cmd_in, arg);
+ if (fileno >= 0)
+ fileno += arg;
+ blkno = 0;
+ at_sm &= (arg == 0);
+ goto os_bypass;
+
+ case MTBSF:
+ chg_eof = 0; /* Changed from the FSF after this */
+ case MTBSFM:
+ if (STp->raw)
+ return (-EIO);
+ ioctl_result = osst_space_over_filemarks_backward(STp, &SRpnt, cmd_in, arg);
+ if (fileno >= 0)
+ fileno -= arg;
+ blkno = (-1); /* We can't know the block number */
+ at_sm &= (arg == 0);
+ goto os_bypass;
+
+ case MTFSR:
+ case MTBSR:
+#if DEBUG
+ if (debugging)
+ printk(OSST_DEB_MSG "%s:D: Skipping %lu blocks %s from logical block %d\n",
+ name, arg, cmd_in==MTFSR?"forward":"backward", logical_blk_num);
+#endif
+ if (cmd_in == MTFSR) {
+ logical_blk_num += arg;
+ if (blkno >= 0) blkno += arg;
+ }
+ else {
+ logical_blk_num -= arg;
+ if (blkno >= 0) blkno -= arg;
+ }
+ ioctl_result = osst_seek_logical_blk(STp, &SRpnt, logical_blk_num);
+ fileno = STps->drv_file;
+ blkno = STps->drv_block;
+ at_sm &= (arg == 0);
+ goto os_bypass;
+
+ case MTFSS:
+ cmd[0] = SPACE;
+ cmd[1] = 0x04; /* Space Setmarks */ /* FIXME -- OS can't do this? */
+ cmd[2] = (arg >> 16);
+ cmd[3] = (arg >> 8);
+ cmd[4] = arg;
+#if DEBUG
+ if (debugging)
+ printk(OSST_DEB_MSG "%s:D: Spacing tape forward %d setmarks.\n", name,
+ cmd[2] * 65536 + cmd[3] * 256 + cmd[4]);
+#endif
+ if (arg != 0) {
+ blkno = fileno = (-1);
+ at_sm = 1;
+ }
+ break;
+ case MTBSS:
+ cmd[0] = SPACE;
+ cmd[1] = 0x04; /* Space Setmarks */ /* FIXME -- OS can't do this? */
+ ltmp = (-arg);
+ cmd[2] = (ltmp >> 16);
+ cmd[3] = (ltmp >> 8);
+ cmd[4] = ltmp;
+#if DEBUG
+ if (debugging) {
+ if (cmd[2] & 0x80)
+ ltmp = 0xff000000;
+ ltmp = ltmp | (cmd[2] << 16) | (cmd[3] << 8) | cmd[4];
+ printk(OSST_DEB_MSG "%s:D: Spacing tape backward %ld setmarks.\n",
+ name, (-ltmp));
+ }
+#endif
+ if (arg != 0) {
+ blkno = fileno = (-1);
+ at_sm = 1;
+ }
+ break;
+ case MTWEOF:
+ if ((STps->rw == ST_WRITING || STp->dirty) && !STp->pos_unknown) {
+ STp->write_type = OS_WRITE_DATA;
+ ioctl_result = osst_flush_write_buffer(STp, &SRpnt);
+ } else
+ ioctl_result = 0;
+#if DEBUG
+ if (debugging)
+ printk(OSST_DEB_MSG "%s:D: Writing %ld filemark(s).\n", name, arg);
+#endif
+ for (i=0; i<arg; i++)
+ ioctl_result |= osst_write_filemark(STp, &SRpnt);
+ if (fileno >= 0) fileno += arg;
+ if (blkno >= 0) blkno = 0;
+ goto os_bypass;
+
+ case MTWSM:
+ if (STp->write_prot)
+ return (-EACCES);
+ if (!STp->raw)
+ return 0;
+ cmd[0] = WRITE_FILEMARKS; /* FIXME -- need OS version */
+ if (cmd_in == MTWSM)
+ cmd[1] = 2;
+ cmd[2] = (arg >> 16);
+ cmd[3] = (arg >> 8);
+ cmd[4] = arg;
+ timeout = STp->timeout;
+#if DEBUG
+ if (debugging)
+ printk(OSST_DEB_MSG "%s:D: Writing %d setmark(s).\n", name,
+ cmd[2] * 65536 + cmd[3] * 256 + cmd[4]);
+#endif
+ if (fileno >= 0)
+ fileno += arg;
+ blkno = 0;
+ at_sm = (cmd_in == MTWSM);
+ break;
+ case MTOFFL:
+ case MTLOAD:
+ case MTUNLOAD:
+ case MTRETEN:
+ cmd[0] = START_STOP;
+ cmd[1] = 1; /* Don't wait for completion */
+ if (cmd_in == MTLOAD) {
+ if (STp->ready == ST_NO_TAPE)
+ cmd[4] = 4; /* open tray */
+ else
+ cmd[4] = 1; /* load */
+ }
+ if (cmd_in == MTRETEN)
+ cmd[4] = 3; /* retension then mount */
+ if (cmd_in == MTOFFL)
+ cmd[4] = 4; /* rewind then eject */
+ timeout = STp->timeout;
+#if DEBUG
+ if (debugging) {
+ switch (cmd_in) {
+ case MTUNLOAD:
+ printk(OSST_DEB_MSG "%s:D: Unloading tape.\n", name);
+ break;
+ case MTLOAD:
+ printk(OSST_DEB_MSG "%s:D: Loading tape.\n", name);
+ break;
+ case MTRETEN:
+ printk(OSST_DEB_MSG "%s:D: Retensioning tape.\n", name);
+ break;
+ case MTOFFL:
+ printk(OSST_DEB_MSG "%s:D: Ejecting tape.\n", name);
+ break;
+ }
+ }
+#endif
+ fileno = blkno = at_sm = frame_seq_numbr = logical_blk_num = 0 ;
+ break;
+ case MTNOP:
+#if DEBUG
+ if (debugging)
+ printk(OSST_DEB_MSG "%s:D: No-op on tape.\n", name);
+#endif
+ return 0; /* Should do something ? */
+ break;
+ case MTEOM:
+#if DEBUG
+ if (debugging)
+ printk(OSST_DEB_MSG "%s:D: Spacing to end of recorded medium.\n", name);
+#endif
+ if ((osst_position_tape_and_confirm(STp, &SRpnt, STp->eod_frame_ppos) < 0) ||
+ (osst_get_logical_frame(STp, &SRpnt, -1, 0) < 0)) {
+ ioctl_result = -EIO;
+ goto os_bypass;
+ }
+ if (STp->buffer->aux->frame_type != OS_FRAME_TYPE_EOD) {
+#if DEBUG
+ printk(OSST_DEB_MSG "%s:D: No EOD frame found where expected.\n", name);
+#endif
+ ioctl_result = -EIO;
+ goto os_bypass;
+ }
+ ioctl_result = osst_set_frame_position(STp, &SRpnt, STp->eod_frame_ppos, 0);
+ fileno = STp->filemark_cnt;
+ blkno = at_sm = 0;
+ goto os_bypass;
+
+ case MTERASE:
+ if (STp->write_prot)
+ return (-EACCES);
+ ioctl_result = osst_reset_header(STp, &SRpnt);
+ i = osst_write_eod(STp, &SRpnt);
+ if (i < ioctl_result) ioctl_result = i;
+ i = osst_position_tape_and_confirm(STp, &SRpnt, STp->eod_frame_ppos);
+ if (i < ioctl_result) ioctl_result = i;
+ fileno = blkno = at_sm = 0 ;
+ goto os_bypass;
+
+ case MTREW:
+ cmd[0] = REZERO_UNIT; /* rewind */
+ cmd[1] = 1;
+#if DEBUG
+ if (debugging)
+ printk(OSST_DEB_MSG "%s:D: Rewinding tape, Immed=%d.\n", name, cmd[1]);
+#endif
+ fileno = blkno = at_sm = frame_seq_numbr = logical_blk_num = 0 ;
+ break;
+
+ case MTSETBLK: /* Set block length */
+ if ((STps->drv_block == 0 ) &&
+ !STp->dirty &&
+ ((STp->buffer)->buffer_bytes == 0) &&
+ ((arg & MT_ST_BLKSIZE_MASK) >= 512 ) &&
+ ((arg & MT_ST_BLKSIZE_MASK) <= OS_DATA_SIZE) &&
+ !(OS_DATA_SIZE % (arg & MT_ST_BLKSIZE_MASK)) ) {
+ /*
+ * Only allowed to change the block size if you opened the
+ * device at the beginning of a file before writing anything.
+ * Note, that when reading, changing block_size is futile,
+ * as the size used when writing overrides it.
+ */
+ STp->block_size = (arg & MT_ST_BLKSIZE_MASK);
+ printk(KERN_INFO "%s:I: Block size set to %d bytes.\n",
+ name, STp->block_size);
+ return 0;
+ }
+ case MTSETDENSITY: /* Set tape density */
+ case MTSETDRVBUFFER: /* Set drive buffering */
+ case SET_DENS_AND_BLK: /* Set density and block size */
+ chg_eof = 0;
+ if (STp->dirty || (STp->buffer)->buffer_bytes != 0)
+ return (-EIO); /* Not allowed if data in buffer */
+ if ((cmd_in == MTSETBLK || cmd_in == SET_DENS_AND_BLK) &&
+ (arg & MT_ST_BLKSIZE_MASK) != 0 &&
+ (arg & MT_ST_BLKSIZE_MASK) != STp->block_size ) {
+ printk(KERN_WARNING "%s:W: Illegal to set block size to %d%s.\n",
+ name, (int)(arg & MT_ST_BLKSIZE_MASK),
+ (OS_DATA_SIZE % (arg & MT_ST_BLKSIZE_MASK))?"":" now");
+ return (-EINVAL);
+ }
+ return 0; /* FIXME silently ignore if block size didn't change */
+
+ default:
+ return (-ENOSYS);
+ }
+
+ SRpnt = osst_do_scsi(SRpnt, STp, cmd, datalen, direction, timeout, MAX_RETRIES, 1);
+
+ ioctl_result = (STp->buffer)->syscall_result;
+
+ if (!SRpnt) {
+#if DEBUG
+ printk(OSST_DEB_MSG "%s:D: Couldn't exec scsi cmd for IOCTL\n", name);
+#endif
+ return ioctl_result;
+ }
+
+ if (!ioctl_result) { /* SCSI command successful */
+ STp->frame_seq_number = frame_seq_numbr;
+ STp->logical_blk_num = logical_blk_num;
+ }
+
+os_bypass:
+#if DEBUG
+ if (debugging)
+ printk(OSST_DEB_MSG "%s:D: IOCTL (%d) Result=%d\n", name, cmd_in, ioctl_result);
+#endif
+
+ if (!ioctl_result) { /* success */
+
+ if (cmd_in == MTFSFM) {
+ fileno--;
+ blkno--;
+ }
+ if (cmd_in == MTBSFM) {
+ fileno++;
+ blkno++;
+ }
+ STps->drv_block = blkno;
+ STps->drv_file = fileno;
+ STps->at_sm = at_sm;
+
+ if (cmd_in == MTEOM)
+ STps->eof = ST_EOD;
+ else if ((cmd_in == MTFSFM || cmd_in == MTBSF) && STps->eof == ST_FM_HIT) {
+ ioctl_result = osst_seek_logical_blk(STp, &SRpnt, STp->logical_blk_num-1);
+ STps->drv_block++;
+ STp->logical_blk_num++;
+ STp->frame_seq_number++;
+ STp->frame_in_buffer = 0;
+ STp->buffer->read_pointer = 0;
+ }
+ else if (cmd_in == MTFSF)
+ STps->eof = (STp->first_frame_position >= STp->eod_frame_ppos)?ST_EOD:ST_FM;
+ else if (chg_eof)
+ STps->eof = ST_NOEOF;
+
+ if (cmd_in == MTOFFL || cmd_in == MTUNLOAD)
+ STp->rew_at_close = 0;
+ else if (cmd_in == MTLOAD) {
+ for (i=0; i < ST_NBR_PARTITIONS; i++) {
+ STp->ps[i].rw = ST_IDLE;
+ STp->ps[i].last_block_valid = 0;/* FIXME - where else is this field maintained? */
+ }
+ STp->partition = 0;
+ }
+
+ if (cmd_in == MTREW) {
+ ioctl_result = osst_position_tape_and_confirm(STp, &SRpnt, STp->first_data_ppos);
+ if (ioctl_result > 0)
+ ioctl_result = 0;
+ }
+
+ } else if (cmd_in == MTBSF || cmd_in == MTBSFM ) {
+ if (osst_position_tape_and_confirm(STp, &SRpnt, STp->first_data_ppos) < 0)
+ STps->drv_file = STps->drv_block = -1;
+ else
+ STps->drv_file = STps->drv_block = 0;
+ STps->eof = ST_NOEOF;
+ } else if (cmd_in == MTFSF || cmd_in == MTFSFM) {
+ if (osst_position_tape_and_confirm(STp, &SRpnt, STp->eod_frame_ppos) < 0)
+ STps->drv_file = STps->drv_block = -1;
+ else {
+ STps->drv_file = STp->filemark_cnt;
+ STps->drv_block = 0;
+ }
+ STps->eof = ST_EOD;
+ } else if (cmd_in == MTBSR || cmd_in == MTFSR || cmd_in == MTWEOF || cmd_in == MTEOM) {
+ STps->drv_file = STps->drv_block = (-1);
+ STps->eof = ST_NOEOF;
+ STp->header_ok = 0;
+ } else if (cmd_in == MTERASE) {
+ STp->header_ok = 0;
+ } else if (SRpnt) { /* SCSI command was not completely successful. */
+ if (SRpnt->sense[2] & 0x40) {
+ STps->eof = ST_EOM_OK;
+ STps->drv_block = 0;
+ }
+ if (chg_eof)
+ STps->eof = ST_NOEOF;
+
+ if ((SRpnt->sense[2] & 0x0f) == BLANK_CHECK)
+ STps->eof = ST_EOD;
+
+ if (cmd_in == MTLOAD && osst_wait_for_medium(STp, &SRpnt, 60))
+ ioctl_result = osst_wait_ready(STp, &SRpnt, 5 * 60, OSST_WAIT_POSITION_COMPLETE);
+ }
+ *aSRpnt = SRpnt;
+
+ return ioctl_result;
+}
+
+
+/* Open the device */
+static int __os_scsi_tape_open(struct inode * inode, struct file * filp)
+{
+ unsigned short flags;
+ int i, b_size, new_session = 0, retval = 0;
+ unsigned char cmd[MAX_COMMAND_SIZE];
+ struct osst_request * SRpnt = NULL;
+ struct osst_tape * STp;
+ struct st_modedef * STm;
+ struct st_partstat * STps;
+ char * name;
+ int dev = TAPE_NR(inode);
+ int mode = TAPE_MODE(inode);
+
+ /*
+ * We really want to do nonseekable_open(inode, filp); here, but some
+ * versions of tar incorrectly call lseek on tapes and bail out if that
+ * fails. So we disallow pread() and pwrite(), but permit lseeks.
+ */
+ filp->f_mode &= ~(FMODE_PREAD | FMODE_PWRITE);
+
+ write_lock(&os_scsi_tapes_lock);
+ if (dev >= osst_max_dev || os_scsi_tapes == NULL ||
+ (STp = os_scsi_tapes[dev]) == NULL || !STp->device) {
+ write_unlock(&os_scsi_tapes_lock);
+ return (-ENXIO);
+ }
+
+ name = tape_name(STp);
+
+ if (STp->in_use) {
+ write_unlock(&os_scsi_tapes_lock);
+#if DEBUG
+ printk(OSST_DEB_MSG "%s:D: Device already in use.\n", name);
+#endif
+ return (-EBUSY);
+ }
+ if (scsi_device_get(STp->device)) {
+ write_unlock(&os_scsi_tapes_lock);
+#if DEBUG
+ printk(OSST_DEB_MSG "%s:D: Failed scsi_device_get.\n", name);
+#endif
+ return (-ENXIO);
+ }
+ filp->private_data = STp;
+ STp->in_use = 1;
+ write_unlock(&os_scsi_tapes_lock);
+ STp->rew_at_close = TAPE_REWIND(inode);
+
+ if( !scsi_block_when_processing_errors(STp->device) ) {
+ return -ENXIO;
+ }
+
+ if (mode != STp->current_mode) {
+#if DEBUG
+ if (debugging)
+ printk(OSST_DEB_MSG "%s:D: Mode change from %d to %d.\n",
+ name, STp->current_mode, mode);
+#endif
+ new_session = 1;
+ STp->current_mode = mode;
+ }
+ STm = &(STp->modes[STp->current_mode]);
+
+ flags = filp->f_flags;
+ STp->write_prot = ((flags & O_ACCMODE) == O_RDONLY);
+
+ STp->raw = TAPE_IS_RAW(inode);
+ if (STp->raw)
+ STp->header_ok = 0;
+
+ /* Allocate data segments for this device's tape buffer */
+ if (!enlarge_buffer(STp->buffer, STp->restr_dma)) {
+ printk(KERN_ERR "%s:E: Unable to allocate memory segments for tape buffer.\n", name);
+ retval = (-EOVERFLOW);
+ goto err_out;
+ }
+ if (STp->buffer->buffer_size >= OS_FRAME_SIZE) {
+ for (i = 0, b_size = 0;
+ (i < STp->buffer->sg_segs) && ((b_size + STp->buffer->sg[i].length) <= OS_DATA_SIZE);
+ b_size += STp->buffer->sg[i++].length);
+ STp->buffer->aux = (os_aux_t *) (page_address(sg_page(&STp->buffer->sg[i])) + OS_DATA_SIZE - b_size);
+#if DEBUG
+ printk(OSST_DEB_MSG "%s:D: b_data points to %p in segment 0 at %p\n", name,
+ STp->buffer->b_data, page_address(STp->buffer->sg[0].page));
+ printk(OSST_DEB_MSG "%s:D: AUX points to %p in segment %d at %p\n", name,
+ STp->buffer->aux, i, page_address(STp->buffer->sg[i].page));
+#endif
+ } else {
+ STp->buffer->aux = NULL; /* this had better never happen! */
+ printk(KERN_NOTICE "%s:A: Framesize %d too large for buffer.\n", name, OS_FRAME_SIZE);
+ retval = (-EIO);
+ goto err_out;
+ }
+ STp->buffer->writing = 0;
+ STp->buffer->syscall_result = 0;
+ STp->dirty = 0;
+ for (i=0; i < ST_NBR_PARTITIONS; i++) {
+ STps = &(STp->ps[i]);
+ STps->rw = ST_IDLE;
+ }
+ STp->ready = ST_READY;
+#if DEBUG
+ STp->nbr_waits = STp->nbr_finished = 0;
+#endif
+
+ memset (cmd, 0, MAX_COMMAND_SIZE);
+ cmd[0] = TEST_UNIT_READY;
+
+ SRpnt = osst_do_scsi(NULL, STp, cmd, 0, DMA_NONE, STp->timeout, MAX_RETRIES, 1);
+ if (!SRpnt) {
+ retval = (STp->buffer)->syscall_result; /* FIXME - valid? */
+ goto err_out;
+ }
+ if ((SRpnt->sense[0] & 0x70) == 0x70 &&
+ (SRpnt->sense[2] & 0x0f) == NOT_READY &&
+ SRpnt->sense[12] == 4 ) {
+#if DEBUG
+ printk(OSST_DEB_MSG "%s:D: Unit not ready, cause %x\n", name, SRpnt->sense[13]);
+#endif
+ if (filp->f_flags & O_NONBLOCK) {
+ retval = -EAGAIN;
+ goto err_out;
+ }
+ if (SRpnt->sense[13] == 2) { /* initialize command required (LOAD) */
+ memset (cmd, 0, MAX_COMMAND_SIZE);
+ cmd[0] = START_STOP;
+ cmd[1] = 1;
+ cmd[4] = 1;
+ SRpnt = osst_do_scsi(SRpnt, STp, cmd, 0, DMA_NONE,
+ STp->timeout, MAX_RETRIES, 1);
+ }
+ osst_wait_ready(STp, &SRpnt, (SRpnt->sense[13]==1?15:3) * 60, 0);
+ }
+ if ((SRpnt->sense[0] & 0x70) == 0x70 &&
+ (SRpnt->sense[2] & 0x0f) == UNIT_ATTENTION) { /* New media? */
+#if DEBUG
+ printk(OSST_DEB_MSG "%s:D: Unit wants attention\n", name);
+#endif
+ STp->header_ok = 0;
+
+ for (i=0; i < 10; i++) {
+
+ memset (cmd, 0, MAX_COMMAND_SIZE);
+ cmd[0] = TEST_UNIT_READY;
+
+ SRpnt = osst_do_scsi(SRpnt, STp, cmd, 0, DMA_NONE,
+ STp->timeout, MAX_RETRIES, 1);
+ if ((SRpnt->sense[0] & 0x70) != 0x70 ||
+ (SRpnt->sense[2] & 0x0f) != UNIT_ATTENTION)
+ break;
+ }
+
+ STp->pos_unknown = 0;
+ STp->partition = STp->new_partition = 0;
+ if (STp->can_partitions)
+ STp->nbr_partitions = 1; /* This guess will be updated later if necessary */
+ for (i=0; i < ST_NBR_PARTITIONS; i++) {
+ STps = &(STp->ps[i]);
+ STps->rw = ST_IDLE; /* FIXME - seems to be redundant... */
+ STps->eof = ST_NOEOF;
+ STps->at_sm = 0;
+ STps->last_block_valid = 0;
+ STps->drv_block = 0;
+ STps->drv_file = 0 ;
+ }
+ new_session = 1;
+ STp->recover_count = 0;
+ STp->abort_count = 0;
+ }
+ /*
+ * if we have valid headers from before, and the drive/tape seem untouched,
+ * open without reconfiguring and re-reading the headers
+ */
+ if (!STp->buffer->syscall_result && STp->header_ok &&
+ !SRpnt->result && SRpnt->sense[0] == 0) {
+
+ memset(cmd, 0, MAX_COMMAND_SIZE);
+ cmd[0] = MODE_SENSE;
+ cmd[1] = 8;
+ cmd[2] = VENDOR_IDENT_PAGE;
+ cmd[4] = VENDOR_IDENT_PAGE_LENGTH + MODE_HEADER_LENGTH;
+
+ SRpnt = osst_do_scsi(SRpnt, STp, cmd, cmd[4], DMA_FROM_DEVICE, STp->timeout, 0, 1);
+
+ if (STp->buffer->syscall_result ||
+ STp->buffer->b_data[MODE_HEADER_LENGTH + 2] != 'L' ||
+ STp->buffer->b_data[MODE_HEADER_LENGTH + 3] != 'I' ||
+ STp->buffer->b_data[MODE_HEADER_LENGTH + 4] != 'N' ||
+ STp->buffer->b_data[MODE_HEADER_LENGTH + 5] != '4' ) {
+#if DEBUG
+ printk(OSST_DEB_MSG "%s:D: Signature was changed to %c%c%c%c\n", name,
+ STp->buffer->b_data[MODE_HEADER_LENGTH + 2],
+ STp->buffer->b_data[MODE_HEADER_LENGTH + 3],
+ STp->buffer->b_data[MODE_HEADER_LENGTH + 4],
+ STp->buffer->b_data[MODE_HEADER_LENGTH + 5]);
+#endif
+ STp->header_ok = 0;
+ }
+ i = STp->first_frame_position;
+ if (STp->header_ok && i == osst_get_frame_position(STp, &SRpnt)) {
+ if (STp->door_locked == ST_UNLOCKED) {
+ if (do_door_lock(STp, 1))
+ printk(KERN_INFO "%s:I: Can't lock drive door\n", name);
+ else
+ STp->door_locked = ST_LOCKED_AUTO;
+ }
+ if (!STp->frame_in_buffer) {
+ STp->block_size = (STm->default_blksize > 0) ?
+ STm->default_blksize : OS_DATA_SIZE;
+ STp->buffer->buffer_bytes = STp->buffer->read_pointer = 0;
+ }
+ STp->buffer->buffer_blocks = OS_DATA_SIZE / STp->block_size;
+ STp->fast_open = 1;
+ osst_release_request(SRpnt);
+ return 0;
+ }
+#if DEBUG
+ if (i != STp->first_frame_position)
+ printk(OSST_DEB_MSG "%s:D: Tape position changed from %d to %d\n",
+ name, i, STp->first_frame_position);
+#endif
+ STp->header_ok = 0;
+ }
+ STp->fast_open = 0;
+
+ if ((STp->buffer)->syscall_result != 0 && /* in all error conditions except no medium */
+ (SRpnt->sense[2] != 2 || SRpnt->sense[12] != 0x3A) ) {
+
+ memset(cmd, 0, MAX_COMMAND_SIZE);
+ cmd[0] = MODE_SELECT;
+ cmd[1] = 0x10;
+ cmd[4] = 4 + MODE_HEADER_LENGTH;
+
+ (STp->buffer)->b_data[0] = cmd[4] - 1;
+ (STp->buffer)->b_data[1] = 0; /* Medium Type - ignoring */
+ (STp->buffer)->b_data[2] = 0; /* Reserved */
+ (STp->buffer)->b_data[3] = 0; /* Block Descriptor Length */
+ (STp->buffer)->b_data[MODE_HEADER_LENGTH + 0] = 0x3f;
+ (STp->buffer)->b_data[MODE_HEADER_LENGTH + 1] = 1;
+ (STp->buffer)->b_data[MODE_HEADER_LENGTH + 2] = 2;
+ (STp->buffer)->b_data[MODE_HEADER_LENGTH + 3] = 3;
+
+#if DEBUG
+ printk(OSST_DEB_MSG "%s:D: Applying soft reset\n", name);
+#endif
+ SRpnt = osst_do_scsi(SRpnt, STp, cmd, cmd[4], DMA_TO_DEVICE, STp->timeout, 0, 1);
+
+ STp->header_ok = 0;
+
+ for (i=0; i < 10; i++) {
+
+ memset (cmd, 0, MAX_COMMAND_SIZE);
+ cmd[0] = TEST_UNIT_READY;
+
+ SRpnt = osst_do_scsi(SRpnt, STp, cmd, 0, DMA_NONE,
+ STp->timeout, MAX_RETRIES, 1);
+ if ((SRpnt->sense[0] & 0x70) != 0x70 ||
+ (SRpnt->sense[2] & 0x0f) == NOT_READY)
+ break;
+
+ if ((SRpnt->sense[2] & 0x0f) == UNIT_ATTENTION) {
+ int j;
+
+ STp->pos_unknown = 0;
+ STp->partition = STp->new_partition = 0;
+ if (STp->can_partitions)
+ STp->nbr_partitions = 1; /* This guess will be updated later if necessary */
+ for (j = 0; j < ST_NBR_PARTITIONS; j++) {
+ STps = &(STp->ps[j]);
+ STps->rw = ST_IDLE;
+ STps->eof = ST_NOEOF;
+ STps->at_sm = 0;
+ STps->last_block_valid = 0;
+ STps->drv_block = 0;
+ STps->drv_file = 0 ;
+ }
+ new_session = 1;
+ }
+ }
+ }
+
+ if (osst_wait_ready(STp, &SRpnt, 15 * 60, 0)) /* FIXME - not allowed with NOBLOCK */
+ printk(KERN_INFO "%s:I: Device did not become Ready in open\n", name);
+
+ if ((STp->buffer)->syscall_result != 0) {
+ if ((STp->device)->scsi_level >= SCSI_2 &&
+ (SRpnt->sense[0] & 0x70) == 0x70 &&
+ (SRpnt->sense[2] & 0x0f) == NOT_READY &&
+ SRpnt->sense[12] == 0x3a) { /* Check ASC */
+ STp->ready = ST_NO_TAPE;
+ } else
+ STp->ready = ST_NOT_READY;
+ osst_release_request(SRpnt);
+ SRpnt = NULL;
+ STp->density = 0; /* Clear the erroneous "residue" */
+ STp->write_prot = 0;
+ STp->block_size = 0;
+ STp->ps[0].drv_file = STp->ps[0].drv_block = (-1);
+ STp->partition = STp->new_partition = 0;
+ STp->door_locked = ST_UNLOCKED;
+ return 0;
+ }
+
+ osst_configure_onstream(STp, &SRpnt);
+
+ STp->block_size = STp->raw ? OS_FRAME_SIZE : (
+ (STm->default_blksize > 0) ? STm->default_blksize : OS_DATA_SIZE);
+ STp->buffer->buffer_blocks = STp->raw ? 1 : OS_DATA_SIZE / STp->block_size;
+ STp->buffer->buffer_bytes =
+ STp->buffer->read_pointer =
+ STp->frame_in_buffer = 0;
+
+#if DEBUG
+ if (debugging)
+ printk(OSST_DEB_MSG "%s:D: Block size: %d, frame size: %d, buffer size: %d (%d blocks).\n",
+ name, STp->block_size, OS_FRAME_SIZE, (STp->buffer)->buffer_size,
+ (STp->buffer)->buffer_blocks);
+#endif
+
+ if (STp->drv_write_prot) {
+ STp->write_prot = 1;
+#if DEBUG
+ if (debugging)
+ printk(OSST_DEB_MSG "%s:D: Write protected\n", name);
+#endif
+ if ((flags & O_ACCMODE) == O_WRONLY || (flags & O_ACCMODE) == O_RDWR) {
+ retval = (-EROFS);
+ goto err_out;
+ }
+ }
+
+ if (new_session) { /* Change the drive parameters for the new mode */
+#if DEBUG
+ if (debugging)
+ printk(OSST_DEB_MSG "%s:D: New Session\n", name);
+#endif
+ STp->density_changed = STp->blksize_changed = 0;
+ STp->compression_changed = 0;
+ }
+
+ /*
+ * properly position the tape and check the ADR headers
+ */
+ if (STp->door_locked == ST_UNLOCKED) {
+ if (do_door_lock(STp, 1))
+ printk(KERN_INFO "%s:I: Can't lock drive door\n", name);
+ else
+ STp->door_locked = ST_LOCKED_AUTO;
+ }
+
+ osst_analyze_headers(STp, &SRpnt);
+
+ osst_release_request(SRpnt);
+ SRpnt = NULL;
+
+ return 0;
+
+err_out:
+ if (SRpnt != NULL)
+ osst_release_request(SRpnt);
+ normalize_buffer(STp->buffer);
+ STp->header_ok = 0;
+ STp->in_use = 0;
+ scsi_device_put(STp->device);
+
+ return retval;
+}
+
+/* BKL pushdown: spaghetti avoidance wrapper */
+static int os_scsi_tape_open(struct inode * inode, struct file * filp)
+{
+ int ret;
+
+ mutex_lock(&osst_int_mutex);
+ ret = __os_scsi_tape_open(inode, filp);
+ mutex_unlock(&osst_int_mutex);
+ return ret;
+}
+
+
+
+/* Flush the tape buffer before close */
+static int os_scsi_tape_flush(struct file * filp, fl_owner_t id)
+{
+ int result = 0, result2;
+ struct osst_tape * STp = filp->private_data;
+ struct st_modedef * STm = &(STp->modes[STp->current_mode]);
+ struct st_partstat * STps = &(STp->ps[STp->partition]);
+ struct osst_request * SRpnt = NULL;
+ char * name = tape_name(STp);
+
+ if (file_count(filp) > 1)
+ return 0;
+
+ if ((STps->rw == ST_WRITING || STp->dirty) && !STp->pos_unknown) {
+ STp->write_type = OS_WRITE_DATA;
+ result = osst_flush_write_buffer(STp, &SRpnt);
+ if (result != 0 && result != (-ENOSPC))
+ goto out;
+ }
+ if ( STps->rw >= ST_WRITING && !STp->pos_unknown) {
+
+#if DEBUG
+ if (debugging) {
+ printk(OSST_DEB_MSG "%s:D: File length %ld bytes.\n",
+ name, (long)(filp->f_pos));
+ printk(OSST_DEB_MSG "%s:D: Async write waits %d, finished %d.\n",
+ name, STp->nbr_waits, STp->nbr_finished);
+ }
+#endif
+ result = osst_write_trailer(STp, &SRpnt, !(STp->rew_at_close));
+#if DEBUG
+ if (debugging)
+ printk(OSST_DEB_MSG "%s:D: Buffer flushed, %d EOF(s) written\n",
+ name, 1+STp->two_fm);
+#endif
+ }
+ else if (!STp->rew_at_close) {
+ STps = &(STp->ps[STp->partition]);
+ if (!STm->sysv || STps->rw != ST_READING) {
+ if (STp->can_bsr)
+ result = osst_flush_buffer(STp, &SRpnt, 0); /* this is the default path */
+ else if (STps->eof == ST_FM_HIT) {
+ result = cross_eof(STp, &SRpnt, 0);
+ if (result) {
+ if (STps->drv_file >= 0)
+ STps->drv_file++;
+ STps->drv_block = 0;
+ STps->eof = ST_FM;
+ }
+ else
+ STps->eof = ST_NOEOF;
+ }
+ }
+ else if ((STps->eof == ST_NOEOF &&
+ !(result = cross_eof(STp, &SRpnt, 1))) ||
+ STps->eof == ST_FM_HIT) {
+ if (STps->drv_file >= 0)
+ STps->drv_file++;
+ STps->drv_block = 0;
+ STps->eof = ST_FM;
+ }
+ }
+
+out:
+ if (STp->rew_at_close) {
+ result2 = osst_position_tape_and_confirm(STp, &SRpnt, STp->first_data_ppos);
+ STps->drv_file = STps->drv_block = STp->frame_seq_number = STp->logical_blk_num = 0;
+ if (result == 0 && result2 < 0)
+ result = result2;
+ }
+ if (SRpnt) osst_release_request(SRpnt);
+
+ if (STp->abort_count || STp->recover_count) {
+ printk(KERN_INFO "%s:I:", name);
+ if (STp->abort_count)
+ printk(" %d unrecovered errors", STp->abort_count);
+ if (STp->recover_count)
+ printk(" %d recovered errors", STp->recover_count);
+ if (STp->write_count)
+ printk(" in %d frames written", STp->write_count);
+ if (STp->read_count)
+ printk(" in %d frames read", STp->read_count);
+ printk("\n");
+ STp->recover_count = 0;
+ STp->abort_count = 0;
+ }
+ STp->write_count = 0;
+ STp->read_count = 0;
+
+ return result;
+}
+
+
+/* Close the device and release it */
+static int os_scsi_tape_close(struct inode * inode, struct file * filp)
+{
+ int result = 0;
+ struct osst_tape * STp = filp->private_data;
+
+ if (STp->door_locked == ST_LOCKED_AUTO)
+ do_door_lock(STp, 0);
+
+ if (STp->raw)
+ STp->header_ok = 0;
+
+ normalize_buffer(STp->buffer);
+ write_lock(&os_scsi_tapes_lock);
+ STp->in_use = 0;
+ write_unlock(&os_scsi_tapes_lock);
+
+ scsi_device_put(STp->device);
+
+ return result;
+}
+
+
+/* The ioctl command */
+static long osst_ioctl(struct file * file,
+ unsigned int cmd_in, unsigned long arg)
+{
+ int i, cmd_nr, cmd_type, blk, retval = 0;
+ struct st_modedef * STm;
+ struct st_partstat * STps;
+ struct osst_request * SRpnt = NULL;
+ struct osst_tape * STp = file->private_data;
+ char * name = tape_name(STp);
+ void __user * p = (void __user *)arg;
+
+ mutex_lock(&osst_int_mutex);
+ if (mutex_lock_interruptible(&STp->lock)) {
+ mutex_unlock(&osst_int_mutex);
+ return -ERESTARTSYS;
+ }
+
+#if DEBUG
+ if (debugging && !STp->in_use) {
+ printk(OSST_DEB_MSG "%s:D: Incorrect device.\n", name);
+ retval = (-EIO);
+ goto out;
+ }
+#endif
+ STm = &(STp->modes[STp->current_mode]);
+ STps = &(STp->ps[STp->partition]);
+
+ /*
+ * If we are in the middle of error recovery, don't let anyone
+ * else try and use this device. Also, if error recovery fails, it
+ * may try and take the device offline, in which case all further
+ * access to the device is prohibited.
+ */
+ retval = scsi_ioctl_block_when_processing_errors(STp->device, cmd_in,
+ file->f_flags & O_NDELAY);
+ if (retval)
+ goto out;
+
+ cmd_type = _IOC_TYPE(cmd_in);
+ cmd_nr = _IOC_NR(cmd_in);
+#if DEBUG
+ printk(OSST_DEB_MSG "%s:D: Ioctl %d,%d in %s mode\n", name,
+ cmd_type, cmd_nr, STp->raw?"raw":"normal");
+#endif
+ if (cmd_type == _IOC_TYPE(MTIOCTOP) && cmd_nr == _IOC_NR(MTIOCTOP)) {
+ struct mtop mtc;
+ int auto_weof = 0;
+
+ if (_IOC_SIZE(cmd_in) != sizeof(mtc)) {
+ retval = (-EINVAL);
+ goto out;
+ }
+
+ i = copy_from_user((char *) &mtc, p, sizeof(struct mtop));
+ if (i) {
+ retval = (-EFAULT);
+ goto out;
+ }
+
+ if (mtc.mt_op == MTSETDRVBUFFER && !capable(CAP_SYS_ADMIN)) {
+ printk(KERN_WARNING "%s:W: MTSETDRVBUFFER only allowed for root.\n", name);
+ retval = (-EPERM);
+ goto out;
+ }
+
+ if (!STm->defined && (mtc.mt_op != MTSETDRVBUFFER && (mtc.mt_count & MT_ST_OPTIONS) == 0)) {
+ retval = (-ENXIO);
+ goto out;
+ }
+
+ if (!STp->pos_unknown) {
+
+ if (STps->eof == ST_FM_HIT) {
+ if (mtc.mt_op == MTFSF || mtc.mt_op == MTFSFM|| mtc.mt_op == MTEOM) {
+ mtc.mt_count -= 1;
+ if (STps->drv_file >= 0)
+ STps->drv_file += 1;
+ }
+ else if (mtc.mt_op == MTBSF || mtc.mt_op == MTBSFM) {
+ mtc.mt_count += 1;
+ if (STps->drv_file >= 0)
+ STps->drv_file += 1;
+ }
+ }
+
+ if (mtc.mt_op == MTSEEK) {
+ /* Old position must be restored if partition will be changed */
+ i = !STp->can_partitions || (STp->new_partition != STp->partition);
+ }
+ else {
+ i = mtc.mt_op == MTREW || mtc.mt_op == MTOFFL ||
+ mtc.mt_op == MTRETEN || mtc.mt_op == MTEOM ||
+ mtc.mt_op == MTLOCK || mtc.mt_op == MTLOAD ||
+ mtc.mt_op == MTFSF || mtc.mt_op == MTFSFM ||
+ mtc.mt_op == MTBSF || mtc.mt_op == MTBSFM ||
+ mtc.mt_op == MTCOMPRESSION;
+ }
+ i = osst_flush_buffer(STp, &SRpnt, i);
+ if (i < 0) {
+ retval = i;
+ goto out;
+ }
+ }
+ else {
+ /*
+ * If there was a bus reset, block further access
+ * to this device. If the user wants to rewind the tape,
+ * then reset the flag and allow access again.
+ */
+ if(mtc.mt_op != MTREW &&
+ mtc.mt_op != MTOFFL &&
+ mtc.mt_op != MTRETEN &&
+ mtc.mt_op != MTERASE &&
+ mtc.mt_op != MTSEEK &&
+ mtc.mt_op != MTEOM) {
+ retval = (-EIO);
+ goto out;
+ }
+ reset_state(STp);
+ /* remove this when the midlevel properly clears was_reset */
+ STp->device->was_reset = 0;
+ }
+
+ if (mtc.mt_op != MTCOMPRESSION && mtc.mt_op != MTLOCK &&
+ mtc.mt_op != MTNOP && mtc.mt_op != MTSETBLK &&
+ mtc.mt_op != MTSETDENSITY && mtc.mt_op != MTSETDRVBUFFER &&
+ mtc.mt_op != MTMKPART && mtc.mt_op != MTSETPART &&
+ mtc.mt_op != MTWEOF && mtc.mt_op != MTWSM ) {
+
+ /*
+ * The user tells us to move to another position on the tape.
+ * If we were appending to the tape content, that would leave
+ * the tape without proper end, in that case write EOD and
+ * update the header to reflect its position.
+ */
+#if DEBUG
+ printk(KERN_WARNING "%s:D: auto_weod %s at ffp=%d,efp=%d,fsn=%d,lbn=%d,fn=%d,bn=%d\n", name,
+ STps->rw >= ST_WRITING ? "write" : STps->rw == ST_READING ? "read" : "idle",
+ STp->first_frame_position, STp->eod_frame_ppos, STp->frame_seq_number,
+ STp->logical_blk_num, STps->drv_file, STps->drv_block );
+#endif
+ if (STps->rw >= ST_WRITING && STp->first_frame_position >= STp->eod_frame_ppos) {
+ auto_weof = ((STp->write_type != OS_WRITE_NEW_MARK) &&
+ !(mtc.mt_op == MTREW || mtc.mt_op == MTOFFL));
+ i = osst_write_trailer(STp, &SRpnt,
+ !(mtc.mt_op == MTREW || mtc.mt_op == MTOFFL));
+#if DEBUG
+ printk(KERN_WARNING "%s:D: post trailer xeof=%d,ffp=%d,efp=%d,fsn=%d,lbn=%d,fn=%d,bn=%d\n",
+ name, auto_weof, STp->first_frame_position, STp->eod_frame_ppos,
+ STp->frame_seq_number, STp->logical_blk_num, STps->drv_file, STps->drv_block );
+#endif
+ if (i < 0) {
+ retval = i;
+ goto out;
+ }
+ }
+ STps->rw = ST_IDLE;
+ }
+
+ if (mtc.mt_op == MTOFFL && STp->door_locked != ST_UNLOCKED)
+ do_door_lock(STp, 0); /* Ignore result! */
+
+ if (mtc.mt_op == MTSETDRVBUFFER &&
+ (mtc.mt_count & MT_ST_OPTIONS) != 0) {
+ retval = osst_set_options(STp, mtc.mt_count);
+ goto out;
+ }
+
+ if (mtc.mt_op == MTSETPART) {
+ if (mtc.mt_count >= STp->nbr_partitions)
+ retval = -EINVAL;
+ else {
+ STp->new_partition = mtc.mt_count;
+ retval = 0;
+ }
+ goto out;
+ }
+
+ if (mtc.mt_op == MTMKPART) {
+ if (!STp->can_partitions) {
+ retval = (-EINVAL);
+ goto out;
+ }
+ if ((i = osst_int_ioctl(STp, &SRpnt, MTREW, 0)) < 0 /*||
+ (i = partition_tape(inode, mtc.mt_count)) < 0*/) {
+ retval = i;
+ goto out;
+ }
+ for (i=0; i < ST_NBR_PARTITIONS; i++) {
+ STp->ps[i].rw = ST_IDLE;
+ STp->ps[i].at_sm = 0;
+ STp->ps[i].last_block_valid = 0;
+ }
+ STp->partition = STp->new_partition = 0;
+ STp->nbr_partitions = 1; /* Bad guess ?-) */
+ STps->drv_block = STps->drv_file = 0;
+ retval = 0;
+ goto out;
+ }
+
+ if (mtc.mt_op == MTSEEK) {
+ if (STp->raw)
+ i = osst_set_frame_position(STp, &SRpnt, mtc.mt_count, 0);
+ else
+ i = osst_seek_sector(STp, &SRpnt, mtc.mt_count);
+ if (!STp->can_partitions)
+ STp->ps[0].rw = ST_IDLE;
+ retval = i;
+ goto out;
+ }
+
+ if (mtc.mt_op == MTLOCK || mtc.mt_op == MTUNLOCK) {
+ retval = do_door_lock(STp, (mtc.mt_op == MTLOCK));
+ goto out;
+ }
+
+ if (auto_weof)
+ cross_eof(STp, &SRpnt, 0);
+
+ if (mtc.mt_op == MTCOMPRESSION)
+ retval = -EINVAL; /* OnStream drives don't have compression hardware */
+ else
+ /* MTBSF MTBSFM MTBSR MTBSS MTEOM MTERASE MTFSF MTFSFB MTFSR MTFSS
+ * MTLOAD MTOFFL MTRESET MTRETEN MTREW MTUNLOAD MTWEOF MTWSM */
+ retval = osst_int_ioctl(STp, &SRpnt, mtc.mt_op, mtc.mt_count);
+ goto out;
+ }
+
+ if (!STm->defined) {
+ retval = (-ENXIO);
+ goto out;
+ }
+
+ if ((i = osst_flush_buffer(STp, &SRpnt, 0)) < 0) {
+ retval = i;
+ goto out;
+ }
+
+ if (cmd_type == _IOC_TYPE(MTIOCGET) && cmd_nr == _IOC_NR(MTIOCGET)) {
+ struct mtget mt_status;
+
+ if (_IOC_SIZE(cmd_in) != sizeof(struct mtget)) {
+ retval = (-EINVAL);
+ goto out;
+ }
+
+ mt_status.mt_type = MT_ISONSTREAM_SC;
+ mt_status.mt_erreg = STp->recover_erreg << MT_ST_SOFTERR_SHIFT;
+ mt_status.mt_dsreg =
+ ((STp->block_size << MT_ST_BLKSIZE_SHIFT) & MT_ST_BLKSIZE_MASK) |
+ ((STp->density << MT_ST_DENSITY_SHIFT) & MT_ST_DENSITY_MASK);
+ mt_status.mt_blkno = STps->drv_block;
+ mt_status.mt_fileno = STps->drv_file;
+ if (STp->block_size != 0) {
+ if (STps->rw == ST_WRITING)
+ mt_status.mt_blkno += (STp->buffer)->buffer_bytes / STp->block_size;
+ else if (STps->rw == ST_READING)
+ mt_status.mt_blkno -= ((STp->buffer)->buffer_bytes +
+ STp->block_size - 1) / STp->block_size;
+ }
+
+ mt_status.mt_gstat = 0;
+ if (STp->drv_write_prot)
+ mt_status.mt_gstat |= GMT_WR_PROT(0xffffffff);
+ if (mt_status.mt_blkno == 0) {
+ if (mt_status.mt_fileno == 0)
+ mt_status.mt_gstat |= GMT_BOT(0xffffffff);
+ else
+ mt_status.mt_gstat |= GMT_EOF(0xffffffff);
+ }
+ mt_status.mt_resid = STp->partition;
+ if (STps->eof == ST_EOM_OK || STps->eof == ST_EOM_ERROR)
+ mt_status.mt_gstat |= GMT_EOT(0xffffffff);
+ else if (STps->eof >= ST_EOM_OK)
+ mt_status.mt_gstat |= GMT_EOD(0xffffffff);
+ if (STp->density == 1)
+ mt_status.mt_gstat |= GMT_D_800(0xffffffff);
+ else if (STp->density == 2)
+ mt_status.mt_gstat |= GMT_D_1600(0xffffffff);
+ else if (STp->density == 3)
+ mt_status.mt_gstat |= GMT_D_6250(0xffffffff);
+ if (STp->ready == ST_READY)
+ mt_status.mt_gstat |= GMT_ONLINE(0xffffffff);
+ if (STp->ready == ST_NO_TAPE)
+ mt_status.mt_gstat |= GMT_DR_OPEN(0xffffffff);
+ if (STps->at_sm)
+ mt_status.mt_gstat |= GMT_SM(0xffffffff);
+ if (STm->do_async_writes || (STm->do_buffer_writes && STp->block_size != 0) ||
+ STp->drv_buffer != 0)
+ mt_status.mt_gstat |= GMT_IM_REP_EN(0xffffffff);
+
+ i = copy_to_user(p, &mt_status, sizeof(struct mtget));
+ if (i) {
+ retval = (-EFAULT);
+ goto out;
+ }
+
+ STp->recover_erreg = 0; /* Clear after read */
+ retval = 0;
+ goto out;
+ } /* End of MTIOCGET */
+
+ if (cmd_type == _IOC_TYPE(MTIOCPOS) && cmd_nr == _IOC_NR(MTIOCPOS)) {
+ struct mtpos mt_pos;
+
+ if (_IOC_SIZE(cmd_in) != sizeof(struct mtpos)) {
+ retval = (-EINVAL);
+ goto out;
+ }
+ if (STp->raw)
+ blk = osst_get_frame_position(STp, &SRpnt);
+ else
+ blk = osst_get_sector(STp, &SRpnt);
+ if (blk < 0) {
+ retval = blk;
+ goto out;
+ }
+ mt_pos.mt_blkno = blk;
+ i = copy_to_user(p, &mt_pos, sizeof(struct mtpos));
+ if (i)
+ retval = -EFAULT;
+ goto out;
+ }
+ if (SRpnt) osst_release_request(SRpnt);
+
+ mutex_unlock(&STp->lock);
+
+ retval = scsi_ioctl(STp->device, cmd_in, p);
+ mutex_unlock(&osst_int_mutex);
+ return retval;
+
+out:
+ if (SRpnt) osst_release_request(SRpnt);
+
+ mutex_unlock(&STp->lock);
+ mutex_unlock(&osst_int_mutex);
+
+ return retval;
+}
+
+#ifdef CONFIG_COMPAT
+static long osst_compat_ioctl(struct file * file, unsigned int cmd_in, unsigned long arg)
+{
+ struct osst_tape *STp = file->private_data;
+ struct scsi_device *sdev = STp->device;
+ int ret = -ENOIOCTLCMD;
+ if (sdev->host->hostt->compat_ioctl) {
+
+ ret = sdev->host->hostt->compat_ioctl(sdev, cmd_in, (void __user *)arg);
+
+ }
+ return ret;
+}
+#endif
+
+
+
+/* Memory handling routines */
+
+/* Try to allocate a new tape buffer skeleton. Caller must not hold os_scsi_tapes_lock */
+static struct osst_buffer * new_tape_buffer( int from_initialization, int need_dma, int max_sg )
+{
+ int i;
+ gfp_t priority;
+ struct osst_buffer *tb;
+
+ if (from_initialization)
+ priority = GFP_ATOMIC;
+ else
+ priority = GFP_KERNEL;
+
+ i = sizeof(struct osst_buffer) + (osst_max_sg_segs - 1) * sizeof(struct scatterlist);
+ tb = kzalloc(i, priority);
+ if (!tb) {
+ printk(KERN_NOTICE "osst :I: Can't allocate new tape buffer.\n");
+ return NULL;
+ }
+
+ tb->sg_segs = tb->orig_sg_segs = 0;
+ tb->use_sg = max_sg;
+ tb->in_use = 1;
+ tb->dma = need_dma;
+ tb->buffer_size = 0;
+#if DEBUG
+ if (debugging)
+ printk(OSST_DEB_MSG
+ "osst :D: Allocated tape buffer skeleton (%d bytes, %d segments, dma: %d).\n",
+ i, max_sg, need_dma);
+#endif
+ return tb;
+}
+
+/* Try to allocate a temporary (while a user has the device open) enlarged tape buffer */
+static int enlarge_buffer(struct osst_buffer *STbuffer, int need_dma)
+{
+ int segs, nbr, max_segs, b_size, order, got;
+ gfp_t priority;
+
+ if (STbuffer->buffer_size >= OS_FRAME_SIZE)
+ return 1;
+
+ if (STbuffer->sg_segs) {
+ printk(KERN_WARNING "osst :A: Buffer not previously normalized.\n");
+ normalize_buffer(STbuffer);
+ }
+ /* See how many segments we can use -- need at least two */
+ nbr = max_segs = STbuffer->use_sg;
+ if (nbr <= 2)
+ return 0;
+
+ priority = GFP_KERNEL /* | __GFP_NOWARN */;
+ if (need_dma)
+ priority |= GFP_DMA;
+
+ /* Try to allocate the first segment up to OS_DATA_SIZE and the others
+ big enough to reach the goal (code assumes no segments in place) */
+ for (b_size = OS_DATA_SIZE, order = OSST_FIRST_ORDER; b_size >= PAGE_SIZE; order--, b_size /= 2) {
+ struct page *page = alloc_pages(priority, order);
+
+ STbuffer->sg[0].offset = 0;
+ if (page != NULL) {
+ sg_set_page(&STbuffer->sg[0], page, b_size, 0);
+ STbuffer->b_data = page_address(page);
+ break;
+ }
+ }
+ if (sg_page(&STbuffer->sg[0]) == NULL) {
+ printk(KERN_NOTICE "osst :I: Can't allocate tape buffer main segment.\n");
+ return 0;
+ }
+ /* Got initial segment of 'bsize,order', continue with same size if possible, except for AUX */
+ for (segs=STbuffer->sg_segs=1, got=b_size;
+ segs < max_segs && got < OS_FRAME_SIZE; ) {
+ struct page *page = alloc_pages(priority, (OS_FRAME_SIZE - got <= PAGE_SIZE) ? 0 : order);
+ STbuffer->sg[segs].offset = 0;
+ if (page == NULL) {
+ printk(KERN_WARNING "osst :W: Failed to enlarge buffer to %d bytes.\n",
+ OS_FRAME_SIZE);
+#if DEBUG
+ STbuffer->buffer_size = got;
+#endif
+ normalize_buffer(STbuffer);
+ return 0;
+ }
+ sg_set_page(&STbuffer->sg[segs], page, (OS_FRAME_SIZE - got <= PAGE_SIZE / 2) ? (OS_FRAME_SIZE - got) : b_size, 0);
+ got += STbuffer->sg[segs].length;
+ STbuffer->buffer_size = got;
+ STbuffer->sg_segs = ++segs;
+ }
+#if DEBUG
+ if (debugging) {
+ printk(OSST_DEB_MSG
+ "osst :D: Expanded tape buffer (%d bytes, %d->%d segments, dma: %d, at: %p).\n",
+ got, STbuffer->orig_sg_segs, STbuffer->sg_segs, need_dma, STbuffer->b_data);
+ printk(OSST_DEB_MSG
+ "osst :D: segment sizes: first %d at %p, last %d bytes at %p.\n",
+ STbuffer->sg[0].length, page_address(STbuffer->sg[0].page),
+ STbuffer->sg[segs-1].length, page_address(STbuffer->sg[segs-1].page));
+ }
+#endif
+
+ return 1;
+}
+
+
+/* Release the segments */
+static void normalize_buffer(struct osst_buffer *STbuffer)
+{
+ int i, order, b_size;
+
+ for (i=0; i < STbuffer->sg_segs; i++) {
+
+ for (b_size = PAGE_SIZE, order = 0;
+ b_size < STbuffer->sg[i].length;
+ b_size *= 2, order++);
+
+ __free_pages(sg_page(&STbuffer->sg[i]), order);
+ STbuffer->buffer_size -= STbuffer->sg[i].length;
+ }
+#if DEBUG
+ if (debugging && STbuffer->orig_sg_segs < STbuffer->sg_segs)
+ printk(OSST_DEB_MSG "osst :D: Buffer at %p normalized to %d bytes (segs %d).\n",
+ STbuffer->b_data, STbuffer->buffer_size, STbuffer->sg_segs);
+#endif
+ STbuffer->sg_segs = STbuffer->orig_sg_segs = 0;
+}
+
+
+/* Move data from the user buffer to the tape buffer. Returns zero (success) or
+ negative error code. */
+static int append_to_buffer(const char __user *ubp, struct osst_buffer *st_bp, int do_count)
+{
+ int i, cnt, res, offset;
+
+ for (i=0, offset=st_bp->buffer_bytes;
+ i < st_bp->sg_segs && offset >= st_bp->sg[i].length; i++)
+ offset -= st_bp->sg[i].length;
+ if (i == st_bp->sg_segs) { /* Should never happen */
+ printk(KERN_WARNING "osst :A: Append_to_buffer offset overflow.\n");
+ return (-EIO);
+ }
+ for ( ; i < st_bp->sg_segs && do_count > 0; i++) {
+ cnt = st_bp->sg[i].length - offset < do_count ?
+ st_bp->sg[i].length - offset : do_count;
+ res = copy_from_user(page_address(sg_page(&st_bp->sg[i])) + offset, ubp, cnt);
+ if (res)
+ return (-EFAULT);
+ do_count -= cnt;
+ st_bp->buffer_bytes += cnt;
+ ubp += cnt;
+ offset = 0;
+ }
+ if (do_count) { /* Should never happen */
+ printk(KERN_WARNING "osst :A: Append_to_buffer overflow (left %d).\n",
+ do_count);
+ return (-EIO);
+ }
+ return 0;
+}
+
+
+/* Move data from the tape buffer to the user buffer. Returns zero (success) or
+ negative error code. */
+static int from_buffer(struct osst_buffer *st_bp, char __user *ubp, int do_count)
+{
+ int i, cnt, res, offset;
+
+ for (i=0, offset=st_bp->read_pointer;
+ i < st_bp->sg_segs && offset >= st_bp->sg[i].length; i++)
+ offset -= st_bp->sg[i].length;
+ if (i == st_bp->sg_segs) { /* Should never happen */
+ printk(KERN_WARNING "osst :A: From_buffer offset overflow.\n");
+ return (-EIO);
+ }
+ for ( ; i < st_bp->sg_segs && do_count > 0; i++) {
+ cnt = st_bp->sg[i].length - offset < do_count ?
+ st_bp->sg[i].length - offset : do_count;
+ res = copy_to_user(ubp, page_address(sg_page(&st_bp->sg[i])) + offset, cnt);
+ if (res)
+ return (-EFAULT);
+ do_count -= cnt;
+ st_bp->buffer_bytes -= cnt;
+ st_bp->read_pointer += cnt;
+ ubp += cnt;
+ offset = 0;
+ }
+ if (do_count) { /* Should never happen */
+ printk(KERN_WARNING "osst :A: From_buffer overflow (left %d).\n", do_count);
+ return (-EIO);
+ }
+ return 0;
+}
+
+/* Sets the tail of the buffer after fill point to zero.
+ Returns zero (success) or negative error code. */
+static int osst_zero_buffer_tail(struct osst_buffer *st_bp)
+{
+ int i, offset, do_count, cnt;
+
+ for (i = 0, offset = st_bp->buffer_bytes;
+ i < st_bp->sg_segs && offset >= st_bp->sg[i].length; i++)
+ offset -= st_bp->sg[i].length;
+ if (i == st_bp->sg_segs) { /* Should never happen */
+ printk(KERN_WARNING "osst :A: Zero_buffer offset overflow.\n");
+ return (-EIO);
+ }
+ for (do_count = OS_DATA_SIZE - st_bp->buffer_bytes;
+ i < st_bp->sg_segs && do_count > 0; i++) {
+ cnt = st_bp->sg[i].length - offset < do_count ?
+ st_bp->sg[i].length - offset : do_count ;
+ memset(page_address(sg_page(&st_bp->sg[i])) + offset, 0, cnt);
+ do_count -= cnt;
+ offset = 0;
+ }
+ if (do_count) { /* Should never happen */
+ printk(KERN_WARNING "osst :A: Zero_buffer overflow (left %d).\n", do_count);
+ return (-EIO);
+ }
+ return 0;
+}
+
+/* Copy a osst 32K chunk of memory into the buffer.
+ Returns zero (success) or negative error code. */
+static int osst_copy_to_buffer(struct osst_buffer *st_bp, unsigned char *ptr)
+{
+ int i, cnt, do_count = OS_DATA_SIZE;
+
+ for (i = 0; i < st_bp->sg_segs && do_count > 0; i++) {
+ cnt = st_bp->sg[i].length < do_count ?
+ st_bp->sg[i].length : do_count ;
+ memcpy(page_address(sg_page(&st_bp->sg[i])), ptr, cnt);
+ do_count -= cnt;
+ ptr += cnt;
+ }
+ if (do_count || i != st_bp->sg_segs-1) { /* Should never happen */
+ printk(KERN_WARNING "osst :A: Copy_to_buffer overflow (left %d at sg %d).\n",
+ do_count, i);
+ return (-EIO);
+ }
+ return 0;
+}
+
+/* Copy a osst 32K chunk of memory from the buffer.
+ Returns zero (success) or negative error code. */
+static int osst_copy_from_buffer(struct osst_buffer *st_bp, unsigned char *ptr)
+{
+ int i, cnt, do_count = OS_DATA_SIZE;
+
+ for (i = 0; i < st_bp->sg_segs && do_count > 0; i++) {
+ cnt = st_bp->sg[i].length < do_count ?
+ st_bp->sg[i].length : do_count ;
+ memcpy(ptr, page_address(sg_page(&st_bp->sg[i])), cnt);
+ do_count -= cnt;
+ ptr += cnt;
+ }
+ if (do_count || i != st_bp->sg_segs-1) { /* Should never happen */
+ printk(KERN_WARNING "osst :A: Copy_from_buffer overflow (left %d at sg %d).\n",
+ do_count, i);
+ return (-EIO);
+ }
+ return 0;
+}
+
+
+/* Module housekeeping */
+
+static void validate_options (void)
+{
+ if (max_dev > 0)
+ osst_max_dev = max_dev;
+ if (write_threshold_kbs > 0)
+ osst_write_threshold = write_threshold_kbs * ST_KILOBYTE;
+ if (osst_write_threshold > osst_buffer_size)
+ osst_write_threshold = osst_buffer_size;
+ if (max_sg_segs >= OSST_FIRST_SG)
+ osst_max_sg_segs = max_sg_segs;
+#if DEBUG
+ printk(OSST_DEB_MSG "osst :D: max tapes %d, write threshold %d, max s/g segs %d.\n",
+ osst_max_dev, osst_write_threshold, osst_max_sg_segs);
+#endif
+}
+
+#ifndef MODULE
+/* Set the boot options. Syntax: osst=xxx,yyy,...
+ where xxx is write threshold in 1024 byte blocks,
+ and yyy is number of s/g segments to use. */
+static int __init osst_setup (char *str)
+{
+ int i, ints[5];
+ char *stp;
+
+ stp = get_options(str, ARRAY_SIZE(ints), ints);
+
+ if (ints[0] > 0) {
+ for (i = 0; i < ints[0] && i < ARRAY_SIZE(parms); i++)
+ *parms[i].val = ints[i + 1];
+ } else {
+ while (stp != NULL) {
+ for (i = 0; i < ARRAY_SIZE(parms); i++) {
+ int len = strlen(parms[i].name);
+ if (!strncmp(stp, parms[i].name, len) &&
+ (*(stp + len) == ':' || *(stp + len) == '=')) {
+ *parms[i].val =
+ simple_strtoul(stp + len + 1, NULL, 0);
+ break;
+ }
+ }
+ if (i >= ARRAY_SIZE(parms))
+ printk(KERN_INFO "osst :I: Illegal parameter in '%s'\n",
+ stp);
+ stp = strchr(stp, ',');
+ if (stp)
+ stp++;
+ }
+ }
+
+ return 1;
+}
+
+__setup("osst=", osst_setup);
+
+#endif
+
+static const struct file_operations osst_fops = {
+ .owner = THIS_MODULE,
+ .read = osst_read,
+ .write = osst_write,
+ .unlocked_ioctl = osst_ioctl,
+#ifdef CONFIG_COMPAT
+ .compat_ioctl = osst_compat_ioctl,
+#endif
+ .open = os_scsi_tape_open,
+ .flush = os_scsi_tape_flush,
+ .release = os_scsi_tape_close,
+ .llseek = noop_llseek,
+};
+
+static int osst_supports(struct scsi_device * SDp)
+{
+ struct osst_support_data {
+ char *vendor;
+ char *model;
+ char *rev;
+ char *driver_hint; /* Name of the correct driver, NULL if unknown */
+ };
+
+static struct osst_support_data support_list[] = {
+ /* {"XXX", "Yy-", "", NULL}, example */
+ SIGS_FROM_OSST,
+ {NULL, }};
+
+ struct osst_support_data *rp;
+
+ /* We are willing to drive OnStream SC-x0 as well as the
+ * * IDE, ParPort, FireWire, USB variants, if accessible by
+ * * emulation layer (ide-scsi, usb-storage, ...) */
+
+ for (rp=&(support_list[0]); rp->vendor != NULL; rp++)
+ if (!strncmp(rp->vendor, SDp->vendor, strlen(rp->vendor)) &&
+ !strncmp(rp->model, SDp->model, strlen(rp->model)) &&
+ !strncmp(rp->rev, SDp->rev, strlen(rp->rev)))
+ return 1;
+ return 0;
+}
+
+/*
+ * sysfs support for osst driver parameter information
+ */
+
+static ssize_t osst_version_show(struct device_driver *ddd, char *buf)
+{
+ return snprintf(buf, PAGE_SIZE, "%s\n", osst_version);
+}
+
+static DRIVER_ATTR(version, S_IRUGO, osst_version_show, NULL);
+
+static int osst_create_sysfs_files(struct device_driver *sysfs)
+{
+ return driver_create_file(sysfs, &driver_attr_version);
+}
+
+static void osst_remove_sysfs_files(struct device_driver *sysfs)
+{
+ driver_remove_file(sysfs, &driver_attr_version);
+}
+
+/*
+ * sysfs support for accessing ADR header information
+ */
+
+static ssize_t osst_adr_rev_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct osst_tape * STp = (struct osst_tape *) dev_get_drvdata (dev);
+ ssize_t l = 0;
+
+ if (STp && STp->header_ok && STp->linux_media)
+ l = snprintf(buf, PAGE_SIZE, "%d.%d\n", STp->header_cache->major_rev, STp->header_cache->minor_rev);
+ return l;
+}
+
+DEVICE_ATTR(ADR_rev, S_IRUGO, osst_adr_rev_show, NULL);
+
+static ssize_t osst_linux_media_version_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct osst_tape * STp = (struct osst_tape *) dev_get_drvdata (dev);
+ ssize_t l = 0;
+
+ if (STp && STp->header_ok && STp->linux_media)
+ l = snprintf(buf, PAGE_SIZE, "LIN%d\n", STp->linux_media_version);
+ return l;
+}
+
+DEVICE_ATTR(media_version, S_IRUGO, osst_linux_media_version_show, NULL);
+
+static ssize_t osst_capacity_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct osst_tape * STp = (struct osst_tape *) dev_get_drvdata (dev);
+ ssize_t l = 0;
+
+ if (STp && STp->header_ok && STp->linux_media)
+ l = snprintf(buf, PAGE_SIZE, "%d\n", STp->capacity);
+ return l;
+}
+
+DEVICE_ATTR(capacity, S_IRUGO, osst_capacity_show, NULL);
+
+static ssize_t osst_first_data_ppos_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct osst_tape * STp = (struct osst_tape *) dev_get_drvdata (dev);
+ ssize_t l = 0;
+
+ if (STp && STp->header_ok && STp->linux_media)
+ l = snprintf(buf, PAGE_SIZE, "%d\n", STp->first_data_ppos);
+ return l;
+}
+
+DEVICE_ATTR(BOT_frame, S_IRUGO, osst_first_data_ppos_show, NULL);
+
+static ssize_t osst_eod_frame_ppos_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct osst_tape * STp = (struct osst_tape *) dev_get_drvdata (dev);
+ ssize_t l = 0;
+
+ if (STp && STp->header_ok && STp->linux_media)
+ l = snprintf(buf, PAGE_SIZE, "%d\n", STp->eod_frame_ppos);
+ return l;
+}
+
+DEVICE_ATTR(EOD_frame, S_IRUGO, osst_eod_frame_ppos_show, NULL);
+
+static ssize_t osst_filemark_cnt_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct osst_tape * STp = (struct osst_tape *) dev_get_drvdata (dev);
+ ssize_t l = 0;
+
+ if (STp && STp->header_ok && STp->linux_media)
+ l = snprintf(buf, PAGE_SIZE, "%d\n", STp->filemark_cnt);
+ return l;
+}
+
+DEVICE_ATTR(file_count, S_IRUGO, osst_filemark_cnt_show, NULL);
+
+static struct class *osst_sysfs_class;
+
+static int osst_sysfs_init(void)
+{
+ osst_sysfs_class = class_create(THIS_MODULE, "onstream_tape");
+ if (IS_ERR(osst_sysfs_class)) {
+ printk(KERN_ERR "osst :W: Unable to register sysfs class\n");
+ return PTR_ERR(osst_sysfs_class);
+ }
+
+ return 0;
+}
+
+static void osst_sysfs_destroy(dev_t dev)
+{
+ device_destroy(osst_sysfs_class, dev);
+}
+
+static int osst_sysfs_add(dev_t dev, struct device *device, struct osst_tape * STp, char * name)
+{
+ struct device *osst_member;
+ int err;
+
+ osst_member = device_create(osst_sysfs_class, device, dev, STp,
+ "%s", name);
+ if (IS_ERR(osst_member)) {
+ printk(KERN_WARNING "osst :W: Unable to add sysfs class member %s\n", name);
+ return PTR_ERR(osst_member);
+ }
+
+ err = device_create_file(osst_member, &dev_attr_ADR_rev);
+ if (err)
+ goto err_out;
+ err = device_create_file(osst_member, &dev_attr_media_version);
+ if (err)
+ goto err_out;
+ err = device_create_file(osst_member, &dev_attr_capacity);
+ if (err)
+ goto err_out;
+ err = device_create_file(osst_member, &dev_attr_BOT_frame);
+ if (err)
+ goto err_out;
+ err = device_create_file(osst_member, &dev_attr_EOD_frame);
+ if (err)
+ goto err_out;
+ err = device_create_file(osst_member, &dev_attr_file_count);
+ if (err)
+ goto err_out;
+
+ return 0;
+
+err_out:
+ osst_sysfs_destroy(dev);
+ return err;
+}
+
+static void osst_sysfs_cleanup(void)
+{
+ class_destroy(osst_sysfs_class);
+}
+
+/*
+ * osst startup / cleanup code
+ */
+
+static int osst_probe(struct device *dev)
+{
+ struct scsi_device * SDp = to_scsi_device(dev);
+ struct osst_tape * tpnt;
+ struct st_modedef * STm;
+ struct st_partstat * STps;
+ struct osst_buffer * buffer;
+ struct gendisk * drive;
+ int i, dev_num, err = -ENODEV;
+
+ if (SDp->type != TYPE_TAPE || !osst_supports(SDp))
+ return -ENODEV;
+
+ drive = alloc_disk(1);
+ if (!drive) {
+ printk(KERN_ERR "osst :E: Out of memory. Device not attached.\n");
+ return -ENODEV;
+ }
+
+ /* if this is the first attach, build the infrastructure */
+ write_lock(&os_scsi_tapes_lock);
+ if (os_scsi_tapes == NULL) {
+ os_scsi_tapes = kmalloc(osst_max_dev * sizeof(struct osst_tape *), GFP_ATOMIC);
+ if (os_scsi_tapes == NULL) {
+ write_unlock(&os_scsi_tapes_lock);
+ printk(KERN_ERR "osst :E: Unable to allocate array for OnStream SCSI tapes.\n");
+ goto out_put_disk;
+ }
+ for (i=0; i < osst_max_dev; ++i) os_scsi_tapes[i] = NULL;
+ }
+
+ if (osst_nr_dev >= osst_max_dev) {
+ write_unlock(&os_scsi_tapes_lock);
+ printk(KERN_ERR "osst :E: Too many tape devices (max. %d).\n", osst_max_dev);
+ goto out_put_disk;
+ }
+
+ /* find a free minor number */
+ for (i = 0; i < osst_max_dev && os_scsi_tapes[i]; i++)
+ ;
+ if(i >= osst_max_dev) panic ("Scsi_devices corrupt (osst)");
+ dev_num = i;
+
+ /* allocate a struct osst_tape for this device */
+ tpnt = kzalloc(sizeof(struct osst_tape), GFP_ATOMIC);
+ if (!tpnt) {
+ write_unlock(&os_scsi_tapes_lock);
+ printk(KERN_ERR "osst :E: Can't allocate device descriptor, device not attached.\n");
+ goto out_put_disk;
+ }
+
+ /* allocate a buffer for this device */
+ i = SDp->host->sg_tablesize;
+ if (osst_max_sg_segs < i)
+ i = osst_max_sg_segs;
+ buffer = new_tape_buffer(1, SDp->host->unchecked_isa_dma, i);
+ if (buffer == NULL) {
+ write_unlock(&os_scsi_tapes_lock);
+ printk(KERN_ERR "osst :E: Unable to allocate a tape buffer, device not attached.\n");
+ kfree(tpnt);
+ goto out_put_disk;
+ }
+ os_scsi_tapes[dev_num] = tpnt;
+ tpnt->buffer = buffer;
+ tpnt->device = SDp;
+ drive->private_data = &tpnt->driver;
+ sprintf(drive->disk_name, "osst%d", dev_num);
+ tpnt->driver = &osst_template;
+ tpnt->drive = drive;
+ tpnt->in_use = 0;
+ tpnt->capacity = 0xfffff;
+ tpnt->dirty = 0;
+ tpnt->drv_buffer = 1; /* Try buffering if no mode sense */
+ tpnt->restr_dma = (SDp->host)->unchecked_isa_dma;
+ tpnt->density = 0;
+ tpnt->do_auto_lock = OSST_AUTO_LOCK;
+ tpnt->can_bsr = OSST_IN_FILE_POS;
+ tpnt->can_partitions = 0;
+ tpnt->two_fm = OSST_TWO_FM;
+ tpnt->fast_mteom = OSST_FAST_MTEOM;
+ tpnt->scsi2_logical = OSST_SCSI2LOGICAL; /* FIXME */
+ tpnt->write_threshold = osst_write_threshold;
+ tpnt->default_drvbuffer = 0xff; /* No forced buffering */
+ tpnt->partition = 0;
+ tpnt->new_partition = 0;
+ tpnt->nbr_partitions = 0;
+ tpnt->min_block = 512;
+ tpnt->max_block = OS_DATA_SIZE;
+ tpnt->timeout = OSST_TIMEOUT;
+ tpnt->long_timeout = OSST_LONG_TIMEOUT;
+
+ /* Recognize OnStream tapes */
+ /* We don't need to test for OnStream, as this has been done in detect () */
+ tpnt->os_fw_rev = osst_parse_firmware_rev (SDp->rev);
+ tpnt->omit_blklims = 1;
+
+ tpnt->poll = (strncmp(SDp->model, "DI-", 3) == 0) ||
+ (strncmp(SDp->model, "FW-", 3) == 0) || OSST_FW_NEED_POLL(tpnt->os_fw_rev,SDp);
+ tpnt->frame_in_buffer = 0;
+ tpnt->header_ok = 0;
+ tpnt->linux_media = 0;
+ tpnt->header_cache = NULL;
+
+ for (i=0; i < ST_NBR_MODES; i++) {
+ STm = &(tpnt->modes[i]);
+ STm->defined = 0;
+ STm->sysv = OSST_SYSV;
+ STm->defaults_for_writes = 0;
+ STm->do_async_writes = OSST_ASYNC_WRITES;
+ STm->do_buffer_writes = OSST_BUFFER_WRITES;
+ STm->do_read_ahead = OSST_READ_AHEAD;
+ STm->default_compression = ST_DONT_TOUCH;
+ STm->default_blksize = 512;
+ STm->default_density = (-1); /* No forced density */
+ }
+
+ for (i=0; i < ST_NBR_PARTITIONS; i++) {
+ STps = &(tpnt->ps[i]);
+ STps->rw = ST_IDLE;
+ STps->eof = ST_NOEOF;
+ STps->at_sm = 0;
+ STps->last_block_valid = 0;
+ STps->drv_block = (-1);
+ STps->drv_file = (-1);
+ }
+
+ tpnt->current_mode = 0;
+ tpnt->modes[0].defined = 1;
+ tpnt->modes[2].defined = 1;
+ tpnt->density_changed = tpnt->compression_changed = tpnt->blksize_changed = 0;
+
+ mutex_init(&tpnt->lock);
+ osst_nr_dev++;
+ write_unlock(&os_scsi_tapes_lock);
+
+ {
+ char name[8];
+
+ /* Rewind entry */
+ err = osst_sysfs_add(MKDEV(OSST_MAJOR, dev_num), dev, tpnt, tape_name(tpnt));
+ if (err)
+ goto out_free_buffer;
+
+ /* No-rewind entry */
+ snprintf(name, 8, "%s%s", "n", tape_name(tpnt));
+ err = osst_sysfs_add(MKDEV(OSST_MAJOR, dev_num + 128), dev, tpnt, name);
+ if (err)
+ goto out_free_sysfs1;
+ }
+
+ sdev_printk(KERN_INFO, SDp,
+ "osst :I: Attached OnStream %.5s tape as %s\n",
+ SDp->model, tape_name(tpnt));
+
+ return 0;
+
+out_free_sysfs1:
+ osst_sysfs_destroy(MKDEV(OSST_MAJOR, dev_num));
+out_free_buffer:
+ kfree(buffer);
+out_put_disk:
+ put_disk(drive);
+ return err;
+};
+
+static int osst_remove(struct device *dev)
+{
+ struct scsi_device * SDp = to_scsi_device(dev);
+ struct osst_tape * tpnt;
+ int i;
+
+ if ((SDp->type != TYPE_TAPE) || (osst_nr_dev <= 0))
+ return 0;
+
+ write_lock(&os_scsi_tapes_lock);
+ for(i=0; i < osst_max_dev; i++) {
+ if((tpnt = os_scsi_tapes[i]) && (tpnt->device == SDp)) {
+ osst_sysfs_destroy(MKDEV(OSST_MAJOR, i));
+ osst_sysfs_destroy(MKDEV(OSST_MAJOR, i+128));
+ tpnt->device = NULL;
+ put_disk(tpnt->drive);
+ os_scsi_tapes[i] = NULL;
+ osst_nr_dev--;
+ write_unlock(&os_scsi_tapes_lock);
+ vfree(tpnt->header_cache);
+ if (tpnt->buffer) {
+ normalize_buffer(tpnt->buffer);
+ kfree(tpnt->buffer);
+ }
+ kfree(tpnt);
+ return 0;
+ }
+ }
+ write_unlock(&os_scsi_tapes_lock);
+ return 0;
+}
+
+static int __init init_osst(void)
+{
+ int err;
+
+ printk(KERN_INFO "osst :I: Tape driver with OnStream support version %s\nosst :I: %s\n", osst_version, cvsid);
+
+ validate_options();
+
+ err = osst_sysfs_init();
+ if (err)
+ return err;
+
+ err = register_chrdev(OSST_MAJOR, "osst", &osst_fops);
+ if (err < 0) {
+ printk(KERN_ERR "osst :E: Unable to register major %d for OnStream tapes\n", OSST_MAJOR);
+ goto err_out;
+ }
+
+ err = scsi_register_driver(&osst_template.gendrv);
+ if (err)
+ goto err_out_chrdev;
+
+ err = osst_create_sysfs_files(&osst_template.gendrv);
+ if (err)
+ goto err_out_scsidrv;
+
+ return 0;
+
+err_out_scsidrv:
+ scsi_unregister_driver(&osst_template.gendrv);
+err_out_chrdev:
+ unregister_chrdev(OSST_MAJOR, "osst");
+err_out:
+ osst_sysfs_cleanup();
+ return err;
+}
+
+static void __exit exit_osst (void)
+{
+ int i;
+ struct osst_tape * STp;
+
+ osst_remove_sysfs_files(&osst_template.gendrv);
+ scsi_unregister_driver(&osst_template.gendrv);
+ unregister_chrdev(OSST_MAJOR, "osst");
+ osst_sysfs_cleanup();
+
+ if (os_scsi_tapes) {
+ for (i=0; i < osst_max_dev; ++i) {
+ if (!(STp = os_scsi_tapes[i])) continue;
+ /* This is defensive, supposed to happen during detach */
+ vfree(STp->header_cache);
+ if (STp->buffer) {
+ normalize_buffer(STp->buffer);
+ kfree(STp->buffer);
+ }
+ put_disk(STp->drive);
+ kfree(STp);
+ }
+ kfree(os_scsi_tapes);
+ }
+ printk(KERN_INFO "osst :I: Unloaded.\n");
+}
+
+module_init(init_osst);
+module_exit(exit_osst);
diff --git a/drivers/scsi/osst.h b/drivers/scsi/osst.h
new file mode 100644
index 000000000..b4fea98ba
--- /dev/null
+++ b/drivers/scsi/osst.h
@@ -0,0 +1,650 @@
+/*
+ * $Header: /cvsroot/osst/Driver/osst.h,v 1.16 2005/01/01 21:13:35 wriede Exp $
+ */
+
+#include <asm/byteorder.h>
+#include <linux/completion.h>
+#include <linux/mutex.h>
+
+/* FIXME - rename and use the following two types or delete them!
+ * and the types really should go to st.h anyway...
+ * INQUIRY packet command - Data Format (From Table 6-8 of QIC-157C)
+ */
+typedef struct {
+ unsigned device_type :5; /* Peripheral Device Type */
+ unsigned reserved0_765 :3; /* Peripheral Qualifier - Reserved */
+ unsigned reserved1_6t0 :7; /* Reserved */
+ unsigned rmb :1; /* Removable Medium Bit */
+ unsigned ansi_version :3; /* ANSI Version */
+ unsigned ecma_version :3; /* ECMA Version */
+ unsigned iso_version :2; /* ISO Version */
+ unsigned response_format :4; /* Response Data Format */
+ unsigned reserved3_45 :2; /* Reserved */
+ unsigned reserved3_6 :1; /* TrmIOP - Reserved */
+ unsigned reserved3_7 :1; /* AENC - Reserved */
+ u8 additional_length; /* Additional Length (total_length-4) */
+ u8 rsv5, rsv6, rsv7; /* Reserved */
+ u8 vendor_id[8]; /* Vendor Identification */
+ u8 product_id[16]; /* Product Identification */
+ u8 revision_level[4]; /* Revision Level */
+ u8 vendor_specific[20]; /* Vendor Specific - Optional */
+ u8 reserved56t95[40]; /* Reserved - Optional */
+ /* Additional information may be returned */
+} idetape_inquiry_result_t;
+
+/*
+ * READ POSITION packet command - Data Format (From Table 6-57)
+ */
+typedef struct {
+ unsigned reserved0_10 :2; /* Reserved */
+ unsigned bpu :1; /* Block Position Unknown */
+ unsigned reserved0_543 :3; /* Reserved */
+ unsigned eop :1; /* End Of Partition */
+ unsigned bop :1; /* Beginning Of Partition */
+ u8 partition; /* Partition Number */
+ u8 reserved2, reserved3; /* Reserved */
+ u32 first_block; /* First Block Location */
+ u32 last_block; /* Last Block Location (Optional) */
+ u8 reserved12; /* Reserved */
+ u8 blocks_in_buffer[3]; /* Blocks In Buffer - (Optional) */
+ u32 bytes_in_buffer; /* Bytes In Buffer (Optional) */
+} idetape_read_position_result_t;
+
+/*
+ * Follows structures which are related to the SELECT SENSE / MODE SENSE
+ * packet commands.
+ */
+#define COMPRESSION_PAGE 0x0f
+#define COMPRESSION_PAGE_LENGTH 16
+
+#define CAPABILITIES_PAGE 0x2a
+#define CAPABILITIES_PAGE_LENGTH 20
+
+#define TAPE_PARAMTR_PAGE 0x2b
+#define TAPE_PARAMTR_PAGE_LENGTH 16
+
+#define NUMBER_RETRIES_PAGE 0x2f
+#define NUMBER_RETRIES_PAGE_LENGTH 4
+
+#define BLOCK_SIZE_PAGE 0x30
+#define BLOCK_SIZE_PAGE_LENGTH 4
+
+#define BUFFER_FILLING_PAGE 0x33
+#define BUFFER_FILLING_PAGE_LENGTH 4
+
+#define VENDOR_IDENT_PAGE 0x36
+#define VENDOR_IDENT_PAGE_LENGTH 8
+
+#define LOCATE_STATUS_PAGE 0x37
+#define LOCATE_STATUS_PAGE_LENGTH 0
+
+#define MODE_HEADER_LENGTH 4
+
+
+/*
+ * REQUEST SENSE packet command result - Data Format.
+ */
+typedef struct {
+ unsigned error_code :7; /* Current of deferred errors */
+ unsigned valid :1; /* The information field conforms to QIC-157C */
+ u8 reserved1 :8; /* Segment Number - Reserved */
+ unsigned sense_key :4; /* Sense Key */
+ unsigned reserved2_4 :1; /* Reserved */
+ unsigned ili :1; /* Incorrect Length Indicator */
+ unsigned eom :1; /* End Of Medium */
+ unsigned filemark :1; /* Filemark */
+ u32 information __attribute__ ((packed));
+ u8 asl; /* Additional sense length (n-7) */
+ u32 command_specific; /* Additional command specific information */
+ u8 asc; /* Additional Sense Code */
+ u8 ascq; /* Additional Sense Code Qualifier */
+ u8 replaceable_unit_code; /* Field Replaceable Unit Code */
+ unsigned sk_specific1 :7; /* Sense Key Specific */
+ unsigned sksv :1; /* Sense Key Specific information is valid */
+ u8 sk_specific2; /* Sense Key Specific */
+ u8 sk_specific3; /* Sense Key Specific */
+ u8 pad[2]; /* Padding to 20 bytes */
+} idetape_request_sense_result_t;
+
+/*
+ * Mode Parameter Header for the MODE SENSE packet command
+ */
+typedef struct {
+ u8 mode_data_length; /* Length of the following data transfer */
+ u8 medium_type; /* Medium Type */
+ u8 dsp; /* Device Specific Parameter */
+ u8 bdl; /* Block Descriptor Length */
+} osst_mode_parameter_header_t;
+
+/*
+ * Mode Parameter Block Descriptor the MODE SENSE packet command
+ *
+ * Support for block descriptors is optional.
+ */
+typedef struct {
+ u8 density_code; /* Medium density code */
+ u8 blocks[3]; /* Number of blocks */
+ u8 reserved4; /* Reserved */
+ u8 length[3]; /* Block Length */
+} osst_parameter_block_descriptor_t;
+
+/*
+ * The Data Compression Page, as returned by the MODE SENSE packet command.
+ */
+typedef struct {
+#if defined(__BIG_ENDIAN_BITFIELD)
+ unsigned ps :1;
+ unsigned reserved0 :1; /* Reserved */
+ unsigned page_code :6; /* Page Code - Should be 0xf */
+#elif defined(__LITTLE_ENDIAN_BITFIELD)
+ unsigned page_code :6; /* Page Code - Should be 0xf */
+ unsigned reserved0 :1; /* Reserved */
+ unsigned ps :1;
+#else
+#error "Please fix <asm/byteorder.h>"
+#endif
+ u8 page_length; /* Page Length - Should be 14 */
+#if defined(__BIG_ENDIAN_BITFIELD)
+ unsigned dce :1; /* Data Compression Enable */
+ unsigned dcc :1; /* Data Compression Capable */
+ unsigned reserved2 :6; /* Reserved */
+#elif defined(__LITTLE_ENDIAN_BITFIELD)
+ unsigned reserved2 :6; /* Reserved */
+ unsigned dcc :1; /* Data Compression Capable */
+ unsigned dce :1; /* Data Compression Enable */
+#else
+#error "Please fix <asm/byteorder.h>"
+#endif
+#if defined(__BIG_ENDIAN_BITFIELD)
+ unsigned dde :1; /* Data Decompression Enable */
+ unsigned red :2; /* Report Exception on Decompression */
+ unsigned reserved3 :5; /* Reserved */
+#elif defined(__LITTLE_ENDIAN_BITFIELD)
+ unsigned reserved3 :5; /* Reserved */
+ unsigned red :2; /* Report Exception on Decompression */
+ unsigned dde :1; /* Data Decompression Enable */
+#else
+#error "Please fix <asm/byteorder.h>"
+#endif
+ u32 ca; /* Compression Algorithm */
+ u32 da; /* Decompression Algorithm */
+ u8 reserved[4]; /* Reserved */
+} osst_data_compression_page_t;
+
+/*
+ * The Medium Partition Page, as returned by the MODE SENSE packet command.
+ */
+typedef struct {
+#if defined(__BIG_ENDIAN_BITFIELD)
+ unsigned ps :1;
+ unsigned reserved1_6 :1; /* Reserved */
+ unsigned page_code :6; /* Page Code - Should be 0x11 */
+#elif defined(__LITTLE_ENDIAN_BITFIELD)
+ unsigned page_code :6; /* Page Code - Should be 0x11 */
+ unsigned reserved1_6 :1; /* Reserved */
+ unsigned ps :1;
+#else
+#error "Please fix <asm/byteorder.h>"
+#endif
+ u8 page_length; /* Page Length - Should be 6 */
+ u8 map; /* Maximum Additional Partitions - Should be 0 */
+ u8 apd; /* Additional Partitions Defined - Should be 0 */
+#if defined(__BIG_ENDIAN_BITFIELD)
+ unsigned fdp :1; /* Fixed Data Partitions */
+ unsigned sdp :1; /* Should be 0 */
+ unsigned idp :1; /* Should be 0 */
+ unsigned psum :2; /* Should be 0 */
+ unsigned reserved4_012 :3; /* Reserved */
+#elif defined(__LITTLE_ENDIAN_BITFIELD)
+ unsigned reserved4_012 :3; /* Reserved */
+ unsigned psum :2; /* Should be 0 */
+ unsigned idp :1; /* Should be 0 */
+ unsigned sdp :1; /* Should be 0 */
+ unsigned fdp :1; /* Fixed Data Partitions */
+#else
+#error "Please fix <asm/byteorder.h>"
+#endif
+ u8 mfr; /* Medium Format Recognition */
+ u8 reserved[2]; /* Reserved */
+} osst_medium_partition_page_t;
+
+/*
+ * Capabilities and Mechanical Status Page
+ */
+typedef struct {
+#if defined(__BIG_ENDIAN_BITFIELD)
+ unsigned reserved1_67 :2;
+ unsigned page_code :6; /* Page code - Should be 0x2a */
+#elif defined(__LITTLE_ENDIAN_BITFIELD)
+ unsigned page_code :6; /* Page code - Should be 0x2a */
+ unsigned reserved1_67 :2;
+#else
+#error "Please fix <asm/byteorder.h>"
+#endif
+ u8 page_length; /* Page Length - Should be 0x12 */
+ u8 reserved2, reserved3;
+#if defined(__BIG_ENDIAN_BITFIELD)
+ unsigned reserved4_67 :2;
+ unsigned sprev :1; /* Supports SPACE in the reverse direction */
+ unsigned reserved4_1234 :4;
+ unsigned ro :1; /* Read Only Mode */
+#elif defined(__LITTLE_ENDIAN_BITFIELD)
+ unsigned ro :1; /* Read Only Mode */
+ unsigned reserved4_1234 :4;
+ unsigned sprev :1; /* Supports SPACE in the reverse direction */
+ unsigned reserved4_67 :2;
+#else
+#error "Please fix <asm/byteorder.h>"
+#endif
+#if defined(__BIG_ENDIAN_BITFIELD)
+ unsigned reserved5_67 :2;
+ unsigned qfa :1; /* Supports the QFA two partition formats */
+ unsigned reserved5_4 :1;
+ unsigned efmt :1; /* Supports ERASE command initiated formatting */
+ unsigned reserved5_012 :3;
+#elif defined(__LITTLE_ENDIAN_BITFIELD)
+ unsigned reserved5_012 :3;
+ unsigned efmt :1; /* Supports ERASE command initiated formatting */
+ unsigned reserved5_4 :1;
+ unsigned qfa :1; /* Supports the QFA two partition formats */
+ unsigned reserved5_67 :2;
+#else
+#error "Please fix <asm/byteorder.h>"
+#endif
+#if defined(__BIG_ENDIAN_BITFIELD)
+ unsigned cmprs :1; /* Supports data compression */
+ unsigned ecc :1; /* Supports error correction */
+ unsigned reserved6_45 :2; /* Reserved */
+ unsigned eject :1; /* The device can eject the volume */
+ unsigned prevent :1; /* The device defaults in the prevent state after power up */
+ unsigned locked :1; /* The volume is locked */
+ unsigned lock :1; /* Supports locking the volume */
+#elif defined(__LITTLE_ENDIAN_BITFIELD)
+ unsigned lock :1; /* Supports locking the volume */
+ unsigned locked :1; /* The volume is locked */
+ unsigned prevent :1; /* The device defaults in the prevent state after power up */
+ unsigned eject :1; /* The device can eject the volume */
+ unsigned reserved6_45 :2; /* Reserved */
+ unsigned ecc :1; /* Supports error correction */
+ unsigned cmprs :1; /* Supports data compression */
+#else
+#error "Please fix <asm/byteorder.h>"
+#endif
+#if defined(__BIG_ENDIAN_BITFIELD)
+ unsigned blk32768 :1; /* slowb - the device restricts the byte count for PIO */
+ /* transfers for slow buffer memory ??? */
+ /* Also 32768 block size in some cases */
+ unsigned reserved7_3_6 :4;
+ unsigned blk1024 :1; /* Supports 1024 bytes block size */
+ unsigned blk512 :1; /* Supports 512 bytes block size */
+ unsigned reserved7_0 :1;
+#elif defined(__LITTLE_ENDIAN_BITFIELD)
+ unsigned reserved7_0 :1;
+ unsigned blk512 :1; /* Supports 512 bytes block size */
+ unsigned blk1024 :1; /* Supports 1024 bytes block size */
+ unsigned reserved7_3_6 :4;
+ unsigned blk32768 :1; /* slowb - the device restricts the byte count for PIO */
+ /* transfers for slow buffer memory ??? */
+ /* Also 32768 block size in some cases */
+#else
+#error "Please fix <asm/byteorder.h>"
+#endif
+ __be16 max_speed; /* Maximum speed supported in KBps */
+ u8 reserved10, reserved11;
+ __be16 ctl; /* Continuous Transfer Limit in blocks */
+ __be16 speed; /* Current Speed, in KBps */
+ __be16 buffer_size; /* Buffer Size, in 512 bytes */
+ u8 reserved18, reserved19;
+} osst_capabilities_page_t;
+
+/*
+ * Block Size Page
+ */
+typedef struct {
+#if defined(__BIG_ENDIAN_BITFIELD)
+ unsigned ps :1;
+ unsigned reserved1_6 :1;
+ unsigned page_code :6; /* Page code - Should be 0x30 */
+#elif defined(__LITTLE_ENDIAN_BITFIELD)
+ unsigned page_code :6; /* Page code - Should be 0x30 */
+ unsigned reserved1_6 :1;
+ unsigned ps :1;
+#else
+#error "Please fix <asm/byteorder.h>"
+#endif
+ u8 page_length; /* Page Length - Should be 2 */
+ u8 reserved2;
+#if defined(__BIG_ENDIAN_BITFIELD)
+ unsigned one :1;
+ unsigned reserved2_6 :1;
+ unsigned record32_5 :1;
+ unsigned record32 :1;
+ unsigned reserved2_23 :2;
+ unsigned play32_5 :1;
+ unsigned play32 :1;
+#elif defined(__LITTLE_ENDIAN_BITFIELD)
+ unsigned play32 :1;
+ unsigned play32_5 :1;
+ unsigned reserved2_23 :2;
+ unsigned record32 :1;
+ unsigned record32_5 :1;
+ unsigned reserved2_6 :1;
+ unsigned one :1;
+#else
+#error "Please fix <asm/byteorder.h>"
+#endif
+} osst_block_size_page_t;
+
+/*
+ * Tape Parameters Page
+ */
+typedef struct {
+#if defined(__BIG_ENDIAN_BITFIELD)
+ unsigned ps :1;
+ unsigned reserved1_6 :1;
+ unsigned page_code :6; /* Page code - Should be 0x2b */
+#elif defined(__LITTLE_ENDIAN_BITFIELD)
+ unsigned page_code :6; /* Page code - Should be 0x2b */
+ unsigned reserved1_6 :1;
+ unsigned ps :1;
+#else
+#error "Please fix <asm/byteorder.h>"
+#endif
+ u8 reserved2;
+ u8 density;
+ u8 reserved3,reserved4;
+ __be16 segtrk;
+ __be16 trks;
+ u8 reserved5,reserved6,reserved7,reserved8,reserved9,reserved10;
+} osst_tape_paramtr_page_t;
+
+/* OnStream definitions */
+
+#define OS_CONFIG_PARTITION (0xff)
+#define OS_DATA_PARTITION (0)
+#define OS_PARTITION_VERSION (1)
+
+/*
+ * partition
+ */
+typedef struct os_partition_s {
+ __u8 partition_num;
+ __u8 par_desc_ver;
+ __be16 wrt_pass_cntr;
+ __be32 first_frame_ppos;
+ __be32 last_frame_ppos;
+ __be32 eod_frame_ppos;
+} os_partition_t;
+
+/*
+ * DAT entry
+ */
+typedef struct os_dat_entry_s {
+ __be32 blk_sz;
+ __be16 blk_cnt;
+ __u8 flags;
+ __u8 reserved;
+} os_dat_entry_t;
+
+/*
+ * DAT
+ */
+#define OS_DAT_FLAGS_DATA (0xc)
+#define OS_DAT_FLAGS_MARK (0x1)
+
+typedef struct os_dat_s {
+ __u8 dat_sz;
+ __u8 reserved1;
+ __u8 entry_cnt;
+ __u8 reserved3;
+ os_dat_entry_t dat_list[16];
+} os_dat_t;
+
+/*
+ * Frame types
+ */
+#define OS_FRAME_TYPE_FILL (0)
+#define OS_FRAME_TYPE_EOD (1 << 0)
+#define OS_FRAME_TYPE_MARKER (1 << 1)
+#define OS_FRAME_TYPE_HEADER (1 << 3)
+#define OS_FRAME_TYPE_DATA (1 << 7)
+
+/*
+ * AUX
+ */
+typedef struct os_aux_s {
+ __be32 format_id; /* hardware compatibility AUX is based on */
+ char application_sig[4]; /* driver used to write this media */
+ __be32 hdwr; /* reserved */
+ __be32 update_frame_cntr; /* for configuration frame */
+ __u8 frame_type;
+ __u8 frame_type_reserved;
+ __u8 reserved_18_19[2];
+ os_partition_t partition;
+ __u8 reserved_36_43[8];
+ __be32 frame_seq_num;
+ __be32 logical_blk_num_high;
+ __be32 logical_blk_num;
+ os_dat_t dat;
+ __u8 reserved188_191[4];
+ __be32 filemark_cnt;
+ __be32 phys_fm;
+ __be32 last_mark_ppos;
+ __u8 reserved204_223[20];
+
+ /*
+ * __u8 app_specific[32];
+ *
+ * Linux specific fields:
+ */
+ __be32 next_mark_ppos; /* when known, points to next marker */
+ __be32 last_mark_lbn; /* storing log_blk_num of last mark is extends ADR spec */
+ __u8 linux_specific[24];
+
+ __u8 reserved_256_511[256];
+} os_aux_t;
+
+#define OS_FM_TAB_MAX 1024
+
+typedef struct os_fm_tab_s {
+ __u8 fm_part_num;
+ __u8 reserved_1;
+ __u8 fm_tab_ent_sz;
+ __u8 reserved_3;
+ __be16 fm_tab_ent_cnt;
+ __u8 reserved6_15[10];
+ __be32 fm_tab_ent[OS_FM_TAB_MAX];
+} os_fm_tab_t;
+
+typedef struct os_ext_trk_ey_s {
+ __u8 et_part_num;
+ __u8 fmt;
+ __be16 fm_tab_off;
+ __u8 reserved4_7[4];
+ __be32 last_hlb_hi;
+ __be32 last_hlb;
+ __be32 last_pp;
+ __u8 reserved20_31[12];
+} os_ext_trk_ey_t;
+
+typedef struct os_ext_trk_tb_s {
+ __u8 nr_stream_part;
+ __u8 reserved_1;
+ __u8 et_ent_sz;
+ __u8 reserved3_15[13];
+ os_ext_trk_ey_t dat_ext_trk_ey;
+ os_ext_trk_ey_t qfa_ext_trk_ey;
+} os_ext_trk_tb_t;
+
+typedef struct os_header_s {
+ char ident_str[8];
+ __u8 major_rev;
+ __u8 minor_rev;
+ __be16 ext_trk_tb_off;
+ __u8 reserved12_15[4];
+ __u8 pt_par_num;
+ __u8 pt_reserved1_3[3];
+ os_partition_t partition[16];
+ __be32 cfg_col_width;
+ __be32 dat_col_width;
+ __be32 qfa_col_width;
+ __u8 cartridge[16];
+ __u8 reserved304_511[208];
+ __be32 old_filemark_list[16680/4]; /* in ADR 1.4 __u8 track_table[16680] */
+ os_ext_trk_tb_t ext_track_tb;
+ __u8 reserved17272_17735[464];
+ os_fm_tab_t dat_fm_tab;
+ os_fm_tab_t qfa_fm_tab;
+ __u8 reserved25960_32767[6808];
+} os_header_t;
+
+
+/*
+ * OnStream ADRL frame
+ */
+#define OS_FRAME_SIZE (32 * 1024 + 512)
+#define OS_DATA_SIZE (32 * 1024)
+#define OS_AUX_SIZE (512)
+//#define OSST_MAX_SG 2
+
+/* The OnStream tape buffer descriptor. */
+struct osst_buffer {
+ unsigned char in_use;
+ unsigned char dma; /* DMA-able buffer */
+ int buffer_size;
+ int buffer_blocks;
+ int buffer_bytes;
+ int read_pointer;
+ int writing;
+ int midlevel_result;
+ int syscall_result;
+ struct osst_request *last_SRpnt;
+ struct st_cmdstatus cmdstat;
+ struct rq_map_data map_data;
+ unsigned char *b_data;
+ os_aux_t *aux; /* onstream AUX structure at end of each block */
+ unsigned short use_sg; /* zero or number of s/g segments for this adapter */
+ unsigned short sg_segs; /* number of segments in s/g list */
+ unsigned short orig_sg_segs; /* number of segments allocated at first try */
+ struct scatterlist sg[1]; /* MUST BE last item */
+} ;
+
+/* The OnStream tape drive descriptor */
+struct osst_tape {
+ struct scsi_driver *driver;
+ unsigned capacity;
+ struct scsi_device *device;
+ struct mutex lock; /* for serialization */
+ struct completion wait; /* for SCSI commands */
+ struct osst_buffer * buffer;
+
+ /* Drive characteristics */
+ unsigned char omit_blklims;
+ unsigned char do_auto_lock;
+ unsigned char can_bsr;
+ unsigned char can_partitions;
+ unsigned char two_fm;
+ unsigned char fast_mteom;
+ unsigned char restr_dma;
+ unsigned char scsi2_logical;
+ unsigned char default_drvbuffer; /* 0xff = don't touch, value 3 bits */
+ unsigned char pos_unknown; /* after reset position unknown */
+ int write_threshold;
+ int timeout; /* timeout for normal commands */
+ int long_timeout; /* timeout for commands known to take long time*/
+
+ /* Mode characteristics */
+ struct st_modedef modes[ST_NBR_MODES];
+ int current_mode;
+
+ /* Status variables */
+ int partition;
+ int new_partition;
+ int nbr_partitions; /* zero until partition support enabled */
+ struct st_partstat ps[ST_NBR_PARTITIONS];
+ unsigned char dirty;
+ unsigned char ready;
+ unsigned char write_prot;
+ unsigned char drv_write_prot;
+ unsigned char in_use;
+ unsigned char blksize_changed;
+ unsigned char density_changed;
+ unsigned char compression_changed;
+ unsigned char drv_buffer;
+ unsigned char density;
+ unsigned char door_locked;
+ unsigned char rew_at_close;
+ unsigned char inited;
+ int block_size;
+ int min_block;
+ int max_block;
+ int recover_count; /* from tape opening */
+ int abort_count;
+ int write_count;
+ int read_count;
+ int recover_erreg; /* from last status call */
+ /*
+ * OnStream specific data
+ */
+ int os_fw_rev; /* the firmware revision * 10000 */
+ unsigned char raw; /* flag OnStream raw access (32.5KB block size) */
+ unsigned char poll; /* flag that this drive needs polling (IDE|firmware) */
+ unsigned char frame_in_buffer; /* flag that the frame as per frame_seq_number
+ * has been read into STp->buffer and is valid */
+ int frame_seq_number; /* logical frame number */
+ int logical_blk_num; /* logical block number */
+ unsigned first_frame_position; /* physical frame to be transferred to/from host */
+ unsigned last_frame_position; /* physical frame to be transferd to/from tape */
+ int cur_frames; /* current number of frames in internal buffer */
+ int max_frames; /* max number of frames in internal buffer */
+ char application_sig[5]; /* application signature */
+ unsigned char fast_open; /* flag that reminds us we didn't check headers at open */
+ unsigned short wrt_pass_cntr; /* write pass counter */
+ int update_frame_cntr; /* update frame counter */
+ int onstream_write_error; /* write error recovery active */
+ int header_ok; /* header frame verified ok */
+ int linux_media; /* reading linux-specifc media */
+ int linux_media_version;
+ os_header_t * header_cache; /* cache is kept for filemark positions */
+ int filemark_cnt;
+ int first_mark_ppos;
+ int last_mark_ppos;
+ int last_mark_lbn; /* storing log_blk_num of last mark is extends ADR spec */
+ int first_data_ppos;
+ int eod_frame_ppos;
+ int eod_frame_lfa;
+ int write_type; /* used in write error recovery */
+ int read_error_frame; /* used in read error recovery */
+ unsigned long cmd_start_time;
+ unsigned long max_cmd_time;
+
+#if DEBUG
+ unsigned char write_pending;
+ int nbr_finished;
+ int nbr_waits;
+ unsigned char last_cmnd[6];
+ unsigned char last_sense[16];
+#endif
+ struct gendisk *drive;
+} ;
+
+/* scsi tape command */
+struct osst_request {
+ unsigned char cmd[MAX_COMMAND_SIZE];
+ unsigned char sense[SCSI_SENSE_BUFFERSIZE];
+ int result;
+ struct osst_tape *stp;
+ struct completion *waiting;
+ struct bio *bio;
+};
+
+/* Values of write_type */
+#define OS_WRITE_DATA 0
+#define OS_WRITE_EOD 1
+#define OS_WRITE_NEW_MARK 2
+#define OS_WRITE_LAST_MARK 3
+#define OS_WRITE_HEADER 4
+#define OS_WRITE_FILLER 5
+
+/* Additional rw state */
+#define OS_WRITING_COMPLETE 3
diff --git a/drivers/scsi/osst_detect.h b/drivers/scsi/osst_detect.h
new file mode 100644
index 000000000..21717d0e6
--- /dev/null
+++ b/drivers/scsi/osst_detect.h
@@ -0,0 +1,6 @@
+#define SIGS_FROM_OSST \
+ {"OnStream", "SC-", "", "osst"}, \
+ {"OnStream", "DI-", "", "osst"}, \
+ {"OnStream", "DP-", "", "osst"}, \
+ {"OnStream", "FW-", "", "osst"}, \
+ {"OnStream", "USB", "", "osst"}
diff --git a/drivers/scsi/osst_options.h b/drivers/scsi/osst_options.h
new file mode 100644
index 000000000..ff1e61094
--- /dev/null
+++ b/drivers/scsi/osst_options.h
@@ -0,0 +1,106 @@
+/*
+ The compile-time configurable defaults for the Linux SCSI tape driver.
+
+ Copyright 1995 Kai Makisara.
+
+ Last modified: Wed Sep 2 21:24:07 1998 by root@home
+
+ Changed (and renamed) for OnStream SCSI drives garloff@suse.de
+ 2000-06-21
+
+ $Header: /cvsroot/osst/Driver/osst_options.h,v 1.6 2003/12/23 14:22:12 wriede Exp $
+*/
+
+#ifndef _OSST_OPTIONS_H
+#define _OSST_OPTIONS_H
+
+/* The minimum limit for the number of SCSI tape devices is determined by
+ OSST_MAX_TAPES. If the number of tape devices and the "slack" defined by
+ OSST_EXTRA_DEVS exceeds OSST_MAX_TAPES, the large number is used. */
+#define OSST_MAX_TAPES 4
+
+/* If OSST_IN_FILE_POS is nonzero, the driver positions the tape after the
+ record been read by the user program even if the tape has moved further
+ because of buffered reads. Should be set to zero to support also drives
+ that can't space backwards over records. NOTE: The tape will be
+ spaced backwards over an "accidentally" crossed filemark in any case. */
+#define OSST_IN_FILE_POS 1
+
+/* The tape driver buffer size in kilobytes. */
+/* Don't change, as this is the HW blocksize */
+#define OSST_BUFFER_BLOCKS 32
+
+/* The number of kilobytes of data in the buffer that triggers an
+ asynchronous write in fixed block mode. See also OSST_ASYNC_WRITES
+ below. */
+#define OSST_WRITE_THRESHOLD_BLOCKS 32
+
+/* OSST_EOM_RESERVE defines the number of frames are kept in reserve for
+ * * write error recovery when writing near end of medium. ENOSPC is returned
+ * * when write() is called and the tape write position is within this number
+ * * of blocks from the tape capacity. */
+#define OSST_EOM_RESERVE 300
+
+/* The maximum number of tape buffers the driver allocates. The number
+ is also constrained by the number of drives detected. Determines the
+ maximum number of concurrently active tape drives. */
+#define OSST_MAX_BUFFERS OSST_MAX_TAPES
+
+/* Maximum number of scatter/gather segments */
+/* Fit one buffer in pages and add one for the AUX header */
+#define OSST_MAX_SG (((OSST_BUFFER_BLOCKS*1024) / PAGE_SIZE) + 1)
+
+/* The number of scatter/gather segments to allocate at first try (must be
+ smaller or equal to the maximum). */
+#define OSST_FIRST_SG ((OSST_BUFFER_BLOCKS*1024) / PAGE_SIZE)
+
+/* The size of the first scatter/gather segments (determines the maximum block
+ size for SCSI adapters not supporting scatter/gather). The default is set
+ to try to allocate the buffer as one chunk. */
+#define OSST_FIRST_ORDER (15-PAGE_SHIFT)
+
+
+/* The following lines define defaults for properties that can be set
+ separately for each drive using the MTSTOPTIONS ioctl. */
+
+/* If OSST_TWO_FM is non-zero, the driver writes two filemarks after a
+ file being written. Some drives can't handle two filemarks at the
+ end of data. */
+#define OSST_TWO_FM 0
+
+/* If OSST_BUFFER_WRITES is non-zero, writes in fixed block mode are
+ buffered until the driver buffer is full or asynchronous write is
+ triggered. */
+#define OSST_BUFFER_WRITES 1
+
+/* If OSST_ASYNC_WRITES is non-zero, the SCSI write command may be started
+ without waiting for it to finish. May cause problems in multiple
+ tape backups. */
+#define OSST_ASYNC_WRITES 1
+
+/* If OSST_READ_AHEAD is non-zero, blocks are read ahead in fixed block
+ mode. */
+#define OSST_READ_AHEAD 1
+
+/* If OSST_AUTO_LOCK is non-zero, the drive door is locked at the first
+ read or write command after the device is opened. The door is opened
+ when the device is closed. */
+#define OSST_AUTO_LOCK 0
+
+/* If OSST_FAST_MTEOM is non-zero, the MTEOM ioctl is done using the
+ direct SCSI command. The file number status is lost but this method
+ is fast with some drives. Otherwise MTEOM is done by spacing over
+ files and the file number status is retained. */
+#define OSST_FAST_MTEOM 0
+
+/* If OSST_SCSI2LOGICAL is nonzero, the logical block addresses are used for
+ MTIOCPOS and MTSEEK by default. Vendor addresses are used if OSST_SCSI2LOGICAL
+ is zero. */
+#define OSST_SCSI2LOGICAL 0
+
+/* If OSST_SYSV is non-zero, the tape behaves according to the SYS V semantics.
+ The default is BSD semantics. */
+#define OSST_SYSV 0
+
+
+#endif
diff --git a/drivers/scsi/pas16.c b/drivers/scsi/pas16.c
new file mode 100644
index 000000000..e81eadd08
--- /dev/null
+++ b/drivers/scsi/pas16.c
@@ -0,0 +1,594 @@
+#define PSEUDO_DMA
+#define UNSAFE /* Not unsafe for PAS16 -- use it */
+#define PDEBUG 0
+
+/*
+ * This driver adapted from Drew Eckhardt's Trantor T128 driver
+ *
+ * Copyright 1993, Drew Eckhardt
+ * Visionary Computing
+ * (Unix and Linux consulting and custom programming)
+ * drew@colorado.edu
+ * +1 (303) 666-5836
+ *
+ * ( Based on T128 - DISTRIBUTION RELEASE 3. )
+ *
+ * Modified to work with the Pro Audio Spectrum/Studio 16
+ * by John Weidman.
+ *
+ *
+ * For more information, please consult
+ *
+ * Media Vision
+ * (510) 770-8600
+ * (800) 348-7116
+ */
+
+/*
+ * The card is detected and initialized in one of several ways :
+ * 1. Autoprobe (default) - There are many different models of
+ * the Pro Audio Spectrum/Studio 16, and I only have one of
+ * them, so this may require a little tweaking. An interrupt
+ * is triggered to autoprobe for the interrupt line. Note:
+ * with the newer model boards, the interrupt is set via
+ * software after reset using the default_irq for the
+ * current board number.
+ *
+ * 2. With command line overrides - pas16=port,irq may be
+ * used on the LILO command line to override the defaults.
+ *
+ * 3. With the PAS16_OVERRIDE compile time define. This is
+ * specified as an array of address, irq tuples. Ie, for
+ * one board at the default 0x388 address, IRQ10, I could say
+ * -DPAS16_OVERRIDE={{0x388, 10}}
+ * NOTE: Untested.
+ *
+ * 4. When included as a module, with arguments passed on the command line:
+ * pas16_irq=xx the interrupt
+ * pas16_addr=xx the port
+ * e.g. "modprobe pas16 pas16_addr=0x388 pas16_irq=5"
+ *
+ * Note that if the override methods are used, place holders must
+ * be specified for other boards in the system.
+ *
+ *
+ * Configuration notes :
+ * The current driver does not support interrupt sharing with the
+ * sound portion of the card. If you use the same irq for the
+ * scsi port and sound you will have problems. Either use
+ * a different irq for the scsi port or don't use interrupts
+ * for the scsi port.
+ *
+ * If you have problems with your card not being recognized, use
+ * the LILO command line override. Try to get it recognized without
+ * interrupts. Ie, for a board at the default 0x388 base port,
+ * boot: linux pas16=0x388,0
+ *
+ * NO_IRQ (0) should be specified for no interrupt,
+ * IRQ_AUTO (254) to autoprobe for an IRQ line if overridden
+ * on the command line.
+ */
+
+#include <linux/module.h>
+
+#include <linux/signal.h>
+#include <linux/proc_fs.h>
+#include <asm/io.h>
+#include <asm/dma.h>
+#include <linux/blkdev.h>
+#include <linux/delay.h>
+#include <linux/interrupt.h>
+#include <linux/stat.h>
+#include <linux/init.h>
+
+#include <scsi/scsi_host.h>
+#include "pas16.h"
+#define AUTOPROBE_IRQ
+#include "NCR5380.h"
+
+
+static unsigned short pas16_addr = 0;
+static int pas16_irq = 0;
+
+
+static const int scsi_irq_translate[] =
+ { 0, 0, 1, 2, 3, 4, 5, 6, 0, 0, 7, 8, 9, 0, 10, 11 };
+
+/* The default_irqs array contains values used to set the irq into the
+ * board via software (as must be done on newer model boards without
+ * irq jumpers on the board). The first value in the array will be
+ * assigned to logical board 0, the next to board 1, etc.
+ */
+static int default_irqs[] __initdata =
+ { PAS16_DEFAULT_BOARD_1_IRQ,
+ PAS16_DEFAULT_BOARD_2_IRQ,
+ PAS16_DEFAULT_BOARD_3_IRQ,
+ PAS16_DEFAULT_BOARD_4_IRQ
+ };
+
+static struct override {
+ unsigned short io_port;
+ int irq;
+} overrides
+#ifdef PAS16_OVERRIDE
+ [] __initdata = PAS16_OVERRIDE;
+#else
+ [4] __initdata = {{0,IRQ_AUTO}, {0,IRQ_AUTO}, {0,IRQ_AUTO},
+ {0,IRQ_AUTO}};
+#endif
+
+#define NO_OVERRIDES ARRAY_SIZE(overrides)
+
+static struct base {
+ unsigned short io_port;
+ int noauto;
+} bases[] __initdata =
+ { {PAS16_DEFAULT_BASE_1, 0},
+ {PAS16_DEFAULT_BASE_2, 0},
+ {PAS16_DEFAULT_BASE_3, 0},
+ {PAS16_DEFAULT_BASE_4, 0}
+ };
+
+#define NO_BASES ARRAY_SIZE(bases)
+
+static const unsigned short pas16_offset[ 8 ] =
+ {
+ 0x1c00, /* OUTPUT_DATA_REG */
+ 0x1c01, /* INITIATOR_COMMAND_REG */
+ 0x1c02, /* MODE_REG */
+ 0x1c03, /* TARGET_COMMAND_REG */
+ 0x3c00, /* STATUS_REG ro, SELECT_ENABLE_REG wo */
+ 0x3c01, /* BUS_AND_STATUS_REG ro, START_DMA_SEND_REG wo */
+ 0x3c02, /* INPUT_DATA_REGISTER ro, (N/A on PAS16 ?)
+ * START_DMA_TARGET_RECEIVE_REG wo
+ */
+ 0x3c03, /* RESET_PARITY_INTERRUPT_REG ro,
+ * START_DMA_INITIATOR_RECEIVE_REG wo
+ */
+ };
+/*----------------------------------------------------------------*/
+/* the following will set the monitor border color (useful to find
+ where something crashed or gets stuck at */
+/* 1 = blue
+ 2 = green
+ 3 = cyan
+ 4 = red
+ 5 = magenta
+ 6 = yellow
+ 7 = white
+*/
+#if 1
+#define rtrc(i) {inb(0x3da); outb(0x31, 0x3c0); outb((i), 0x3c0);}
+#else
+#define rtrc(i) {}
+#endif
+
+
+/*
+ * Function : enable_board( int board_num, unsigned short port )
+ *
+ * Purpose : set address in new model board
+ *
+ * Inputs : board_num - logical board number 0-3, port - base address
+ *
+ */
+
+static void __init
+ enable_board( int board_num, unsigned short port )
+{
+ outb( 0xbc + board_num, MASTER_ADDRESS_PTR );
+ outb( port >> 2, MASTER_ADDRESS_PTR );
+}
+
+
+
+/*
+ * Function : init_board( unsigned short port, int irq )
+ *
+ * Purpose : Set the board up to handle the SCSI interface
+ *
+ * Inputs : port - base address of the board,
+ * irq - irq to assign to the SCSI port
+ * force_irq - set it even if it conflicts with sound driver
+ *
+ */
+
+static void __init
+ init_board( unsigned short io_port, int irq, int force_irq )
+{
+ unsigned int tmp;
+ unsigned int pas_irq_code;
+
+ /* Initialize the SCSI part of the board */
+
+ outb( 0x30, io_port + P_TIMEOUT_COUNTER_REG ); /* Timeout counter */
+ outb( 0x01, io_port + P_TIMEOUT_STATUS_REG_OFFSET ); /* Reset TC */
+ outb( 0x01, io_port + WAIT_STATE ); /* 1 Wait state */
+
+ NCR5380_read( RESET_PARITY_INTERRUPT_REG );
+
+ /* Set the SCSI interrupt pointer without mucking up the sound
+ * interrupt pointer in the same byte.
+ */
+ pas_irq_code = ( irq < 16 ) ? scsi_irq_translate[irq] : 0;
+ tmp = inb( io_port + IO_CONFIG_3 );
+
+ if( (( tmp & 0x0f ) == pas_irq_code) && pas_irq_code > 0
+ && !force_irq )
+ {
+ printk( "pas16: WARNING: Can't use same irq as sound "
+ "driver -- interrupts disabled\n" );
+ /* Set up the drive parameters, disable 5380 interrupts */
+ outb( 0x4d, io_port + SYS_CONFIG_4 );
+ }
+ else
+ {
+ tmp = ( tmp & 0x0f ) | ( pas_irq_code << 4 );
+ outb( tmp, io_port + IO_CONFIG_3 );
+
+ /* Set up the drive parameters and enable 5380 interrupts */
+ outb( 0x6d, io_port + SYS_CONFIG_4 );
+ }
+}
+
+
+/*
+ * Function : pas16_hw_detect( unsigned short board_num )
+ *
+ * Purpose : determine if a pas16 board is present
+ *
+ * Inputs : board_num - logical board number ( 0 - 3 )
+ *
+ * Returns : 0 if board not found, 1 if found.
+ */
+
+static int __init
+ pas16_hw_detect( unsigned short board_num )
+{
+ unsigned char board_rev, tmp;
+ unsigned short io_port = bases[ board_num ].io_port;
+
+ /* See if we can find a PAS16 board at the address associated
+ * with this logical board number.
+ */
+
+ /* First, attempt to take a newer model board out of reset and
+ * give it a base address. This shouldn't affect older boards.
+ */
+ enable_board( board_num, io_port );
+
+ /* Now see if it looks like a PAS16 board */
+ board_rev = inb( io_port + PCB_CONFIG );
+
+ if( board_rev == 0xff )
+ return 0;
+
+ tmp = board_rev ^ 0xe0;
+
+ outb( tmp, io_port + PCB_CONFIG );
+ tmp = inb( io_port + PCB_CONFIG );
+ outb( board_rev, io_port + PCB_CONFIG );
+
+ if( board_rev != tmp ) /* Not a PAS-16 */
+ return 0;
+
+ if( ( inb( io_port + OPERATION_MODE_1 ) & 0x03 ) != 0x03 )
+ return 0; /* return if no SCSI interface found */
+
+ /* Mediavision has some new model boards that return ID bits
+ * that indicate a SCSI interface, but they're not (LMS). We'll
+ * put in an additional test to try to weed them out.
+ */
+
+ outb( 0x01, io_port + WAIT_STATE ); /* 1 Wait state */
+ NCR5380_write( MODE_REG, 0x20 ); /* Is it really SCSI? */
+ if( NCR5380_read( MODE_REG ) != 0x20 ) /* Write to a reg. */
+ return 0; /* and try to read */
+ NCR5380_write( MODE_REG, 0x00 ); /* it back. */
+ if( NCR5380_read( MODE_REG ) != 0x00 )
+ return 0;
+
+ return 1;
+}
+
+
+#ifndef MODULE
+/*
+ * Function : pas16_setup(char *str, int *ints)
+ *
+ * Purpose : LILO command line initialization of the overrides array,
+ *
+ * Inputs : str - unused, ints - array of integer parameters with ints[0]
+ * equal to the number of ints.
+ *
+ */
+
+static int __init pas16_setup(char *str)
+{
+ static int commandline_current = 0;
+ int i;
+ int ints[10];
+
+ get_options(str, ARRAY_SIZE(ints), ints);
+ if (ints[0] != 2)
+ printk("pas16_setup : usage pas16=io_port,irq\n");
+ else
+ if (commandline_current < NO_OVERRIDES) {
+ overrides[commandline_current].io_port = (unsigned short) ints[1];
+ overrides[commandline_current].irq = ints[2];
+ for (i = 0; i < NO_BASES; ++i)
+ if (bases[i].io_port == (unsigned short) ints[1]) {
+ bases[i].noauto = 1;
+ break;
+ }
+ ++commandline_current;
+ }
+ return 1;
+}
+
+__setup("pas16=", pas16_setup);
+#endif
+
+/*
+ * Function : int pas16_detect(struct scsi_host_template * tpnt)
+ *
+ * Purpose : detects and initializes PAS16 controllers
+ * that were autoprobed, overridden on the LILO command line,
+ * or specified at compile time.
+ *
+ * Inputs : tpnt - template for this SCSI adapter.
+ *
+ * Returns : 1 if a host adapter was found, 0 if not.
+ *
+ */
+
+static int __init pas16_detect(struct scsi_host_template *tpnt)
+{
+ static int current_override = 0;
+ static unsigned short current_base = 0;
+ struct Scsi_Host *instance;
+ unsigned short io_port;
+ int count;
+
+ if (pas16_addr != 0) {
+ overrides[0].io_port = pas16_addr;
+ /*
+ * This is how we avoid seeing more than
+ * one host adapter at the same I/O port.
+ * Cribbed shamelessly from pas16_setup().
+ */
+ for (count = 0; count < NO_BASES; ++count)
+ if (bases[count].io_port == pas16_addr) {
+ bases[count].noauto = 1;
+ break;
+ }
+ }
+ if (pas16_irq != 0)
+ overrides[0].irq = pas16_irq;
+
+ for (count = 0; current_override < NO_OVERRIDES; ++current_override) {
+ io_port = 0;
+
+ if (overrides[current_override].io_port)
+ {
+ io_port = overrides[current_override].io_port;
+ enable_board( current_override, io_port );
+ init_board( io_port, overrides[current_override].irq, 1 );
+ }
+ else
+ for (; !io_port && (current_base < NO_BASES); ++current_base) {
+#if (PDEBUG & PDEBUG_INIT)
+ printk("scsi-pas16 : probing io_port %04x\n", (unsigned int) bases[current_base].io_port);
+#endif
+ if ( !bases[current_base].noauto &&
+ pas16_hw_detect( current_base ) ){
+ io_port = bases[current_base].io_port;
+ init_board( io_port, default_irqs[ current_base ], 0 );
+#if (PDEBUG & PDEBUG_INIT)
+ printk("scsi-pas16 : detected board.\n");
+#endif
+ }
+ }
+
+
+#if defined(PDEBUG) && (PDEBUG & PDEBUG_INIT)
+ printk("scsi-pas16 : io_port = %04x\n", (unsigned int) io_port);
+#endif
+
+ if (!io_port)
+ break;
+
+ instance = scsi_register (tpnt, sizeof(struct NCR5380_hostdata));
+ if(instance == NULL)
+ break;
+
+ instance->io_port = io_port;
+
+ NCR5380_init(instance, 0);
+
+ if (overrides[current_override].irq != IRQ_AUTO)
+ instance->irq = overrides[current_override].irq;
+ else
+ instance->irq = NCR5380_probe_irq(instance, PAS16_IRQS);
+
+ /* Compatibility with documented NCR5380 kernel parameters */
+ if (instance->irq == 255)
+ instance->irq = NO_IRQ;
+
+ if (instance->irq != NO_IRQ)
+ if (request_irq(instance->irq, pas16_intr, 0,
+ "pas16", instance)) {
+ printk("scsi%d : IRQ%d not free, interrupts disabled\n",
+ instance->host_no, instance->irq);
+ instance->irq = NO_IRQ;
+ }
+
+ if (instance->irq == NO_IRQ) {
+ printk("scsi%d : interrupts not enabled. for better interactive performance,\n", instance->host_no);
+ printk("scsi%d : please jumper the board for a free IRQ.\n", instance->host_no);
+ /* Disable 5380 interrupts, leave drive params the same */
+ outb( 0x4d, io_port + SYS_CONFIG_4 );
+ outb( (inb(io_port + IO_CONFIG_3) & 0x0f), io_port + IO_CONFIG_3 );
+ }
+
+#if defined(PDEBUG) && (PDEBUG & PDEBUG_INIT)
+ printk("scsi%d : irq = %d\n", instance->host_no, instance->irq);
+#endif
+
+ ++current_override;
+ ++count;
+ }
+ return count;
+}
+
+/*
+ * Function : int pas16_biosparam(Disk *disk, struct block_device *dev, int *ip)
+ *
+ * Purpose : Generates a BIOS / DOS compatible H-C-S mapping for
+ * the specified device / size.
+ *
+ * Inputs : size = size of device in sectors (512 bytes), dev = block device
+ * major / minor, ip[] = {heads, sectors, cylinders}
+ *
+ * Returns : always 0 (success), initializes ip
+ *
+ */
+
+/*
+ * XXX Most SCSI boards use this mapping, I could be incorrect. Some one
+ * using hard disks on a trantor should verify that this mapping corresponds
+ * to that used by the BIOS / ASPI driver by running the linux fdisk program
+ * and matching the H_C_S coordinates to what DOS uses.
+ */
+
+static int pas16_biosparam(struct scsi_device *sdev, struct block_device *dev,
+ sector_t capacity, int *ip)
+{
+ int size = capacity;
+ ip[0] = 64;
+ ip[1] = 32;
+ ip[2] = size >> 11; /* I think I have it as /(32*64) */
+ if( ip[2] > 1024 ) { /* yes, >, not >= */
+ ip[0]=255;
+ ip[1]=63;
+ ip[2]=size/(63*255);
+ if( ip[2] > 1023 ) /* yes >1023... */
+ ip[2] = 1023;
+ }
+
+ return 0;
+}
+
+/*
+ * Function : int NCR5380_pread (struct Scsi_Host *instance,
+ * unsigned char *dst, int len)
+ *
+ * Purpose : Fast 5380 pseudo-dma read function, transfers len bytes to
+ * dst
+ *
+ * Inputs : dst = destination, len = length in bytes
+ *
+ * Returns : 0 on success, non zero on a failure such as a watchdog
+ * timeout.
+ */
+
+static inline int NCR5380_pread (struct Scsi_Host *instance, unsigned char *dst,
+ int len) {
+ register unsigned char *d = dst;
+ register unsigned short reg = (unsigned short) (instance->io_port +
+ P_DATA_REG_OFFSET);
+ register int i = len;
+ int ii = 0;
+ struct NCR5380_hostdata *hostdata = shost_priv(instance);
+
+ while ( !(inb(instance->io_port + P_STATUS_REG_OFFSET) & P_ST_RDY) )
+ ++ii;
+
+ insb( reg, d, i );
+
+ if ( inb(instance->io_port + P_TIMEOUT_STATUS_REG_OFFSET) & P_TS_TIM) {
+ outb( P_TS_CT, instance->io_port + P_TIMEOUT_STATUS_REG_OFFSET);
+ printk("scsi%d : watchdog timer fired in NCR5380_pread()\n",
+ instance->host_no);
+ return -1;
+ }
+ if (ii > hostdata->spin_max_r)
+ hostdata->spin_max_r = ii;
+ return 0;
+}
+
+/*
+ * Function : int NCR5380_pwrite (struct Scsi_Host *instance,
+ * unsigned char *src, int len)
+ *
+ * Purpose : Fast 5380 pseudo-dma write function, transfers len bytes from
+ * src
+ *
+ * Inputs : src = source, len = length in bytes
+ *
+ * Returns : 0 on success, non zero on a failure such as a watchdog
+ * timeout.
+ */
+
+static inline int NCR5380_pwrite (struct Scsi_Host *instance, unsigned char *src,
+ int len) {
+ register unsigned char *s = src;
+ register unsigned short reg = (instance->io_port + P_DATA_REG_OFFSET);
+ register int i = len;
+ int ii = 0;
+ struct NCR5380_hostdata *hostdata = shost_priv(instance);
+
+ while ( !((inb(instance->io_port + P_STATUS_REG_OFFSET)) & P_ST_RDY) )
+ ++ii;
+
+ outsb( reg, s, i );
+
+ if (inb(instance->io_port + P_TIMEOUT_STATUS_REG_OFFSET) & P_TS_TIM) {
+ outb( P_TS_CT, instance->io_port + P_TIMEOUT_STATUS_REG_OFFSET);
+ printk("scsi%d : watchdog timer fired in NCR5380_pwrite()\n",
+ instance->host_no);
+ return -1;
+ }
+ if (ii > hostdata->spin_max_w)
+ hostdata->spin_max_w = ii;
+ return 0;
+}
+
+#include "NCR5380.c"
+
+static int pas16_release(struct Scsi_Host *shost)
+{
+ if (shost->irq != NO_IRQ)
+ free_irq(shost->irq, shost);
+ NCR5380_exit(shost);
+ if (shost->io_port && shost->n_io_port)
+ release_region(shost->io_port, shost->n_io_port);
+ scsi_unregister(shost);
+ return 0;
+}
+
+static struct scsi_host_template driver_template = {
+ .name = "Pro Audio Spectrum-16 SCSI",
+ .detect = pas16_detect,
+ .release = pas16_release,
+ .proc_name = "pas16",
+ .show_info = pas16_show_info,
+ .write_info = pas16_write_info,
+ .info = pas16_info,
+ .queuecommand = pas16_queue_command,
+ .eh_abort_handler = pas16_abort,
+ .eh_bus_reset_handler = pas16_bus_reset,
+ .bios_param = pas16_biosparam,
+ .can_queue = CAN_QUEUE,
+ .this_id = 7,
+ .sg_tablesize = SG_ALL,
+ .cmd_per_lun = CMD_PER_LUN,
+ .use_clustering = DISABLE_CLUSTERING,
+};
+#include "scsi_module.c"
+
+#ifdef MODULE
+module_param(pas16_addr, ushort, 0);
+module_param(pas16_irq, int, 0);
+#endif
+MODULE_LICENSE("GPL");
diff --git a/drivers/scsi/pas16.h b/drivers/scsi/pas16.h
new file mode 100644
index 000000000..c6109c800
--- /dev/null
+++ b/drivers/scsi/pas16.h
@@ -0,0 +1,154 @@
+/*
+ * This driver adapted from Drew Eckhardt's Trantor T128 driver
+ *
+ * Copyright 1993, Drew Eckhardt
+ * Visionary Computing
+ * (Unix and Linux consulting and custom programming)
+ * drew@colorado.edu
+ * +1 (303) 666-5836
+ *
+ * ( Based on T128 - DISTRIBUTION RELEASE 3. )
+ *
+ * Modified to work with the Pro Audio Spectrum/Studio 16
+ * by John Weidman.
+ *
+ *
+ * For more information, please consult
+ *
+ * Media Vision
+ * (510) 770-8600
+ * (800) 348-7116
+ */
+
+
+#ifndef PAS16_H
+#define PAS16_H
+
+#define PDEBUG_INIT 0x1
+#define PDEBUG_TRANSFER 0x2
+
+#define PAS16_DEFAULT_BASE_1 0x388
+#define PAS16_DEFAULT_BASE_2 0x384
+#define PAS16_DEFAULT_BASE_3 0x38c
+#define PAS16_DEFAULT_BASE_4 0x288
+
+#define PAS16_DEFAULT_BOARD_1_IRQ 10
+#define PAS16_DEFAULT_BOARD_2_IRQ 12
+#define PAS16_DEFAULT_BOARD_3_IRQ 14
+#define PAS16_DEFAULT_BOARD_4_IRQ 15
+
+
+/*
+ * The Pro Audio Spectrum boards are I/O mapped. They use a Zilog 5380
+ * SCSI controller, which is the equivalent of NCR's 5380. "Pseudo-DMA"
+ * architecture is used, where a PAL drives the DMA signals on the 5380
+ * allowing fast, blind transfers with proper handshaking.
+ */
+
+
+/* The Time-out Counter register is used to safe-guard against a stuck
+ * bus (in the case of RDY driven handshake) or a stuck byte (if 16-Bit
+ * DMA conversion is used). The counter uses a 28.224MHz clock
+ * divided by 14 as its clock source. In the case of a stuck byte in
+ * the holding register, an interrupt is generated (and mixed with the
+ * one with the drive) using the CD-ROM interrupt pointer.
+ */
+
+#define P_TIMEOUT_COUNTER_REG 0x4000
+#define P_TC_DISABLE 0x80 /* Set to 0 to enable timeout int. */
+ /* Bits D6-D0 contain timeout count */
+
+
+#define P_TIMEOUT_STATUS_REG_OFFSET 0x4001
+#define P_TS_TIM 0x80 /* check timeout status */
+ /* Bits D6-D4 N/U */
+#define P_TS_ARM_DRQ_INT 0x08 /* Arm DRQ Int. When set high,
+ * the next rising edge will
+ * cause a CD-ROM interrupt.
+ * When set low, the interrupt
+ * will be cleared. There is
+ * no status available for
+ * this interrupt.
+ */
+#define P_TS_ENABLE_TO_ERR_INTERRUPT /* Enable timeout error int. */
+#define P_TS_ENABLE_WAIT /* Enable Wait */
+
+#define P_TS_CT 0x01 /* clear timeout. Note: writing
+ * to this register clears the
+ * timeout error int. or status
+ */
+
+
+/*
+ * The data register reads/writes to/from the 5380 in pseudo-DMA mode
+ */
+
+#define P_DATA_REG_OFFSET 0x5c00 /* rw */
+
+#define P_STATUS_REG_OFFSET 0x5c01 /* ro */
+#define P_ST_RDY 0x80 /* 5380 DDRQ Status */
+
+#define P_IRQ_STATUS 0x5c03
+#define P_IS_IRQ 0x80 /* DIRQ status */
+
+#define PCB_CONFIG 0x803
+#define MASTER_ADDRESS_PTR 0x9a01 /* Fixed position - no relo */
+#define SYS_CONFIG_4 0x8003
+#define WAIT_STATE 0xbc00
+#define OPERATION_MODE_1 0xec03
+#define IO_CONFIG_3 0xf002
+
+
+#ifndef ASM
+
+#ifndef CMD_PER_LUN
+#define CMD_PER_LUN 2
+#endif
+
+#ifndef CAN_QUEUE
+#define CAN_QUEUE 32
+#endif
+
+#define NCR5380_implementation_fields \
+ volatile unsigned short io_port
+
+#define NCR5380_local_declare() \
+ volatile unsigned short io_port
+
+#define NCR5380_setup(instance) \
+ io_port = (instance)->io_port
+
+#define PAS16_io_port(reg) ( io_port + pas16_offset[(reg)] )
+
+#if !(PDEBUG & PDEBUG_TRANSFER)
+#define NCR5380_read(reg) ( inb(PAS16_io_port(reg)) )
+#define NCR5380_write(reg, value) ( outb((value),PAS16_io_port(reg)) )
+#else
+#define NCR5380_read(reg) \
+ (((unsigned char) printk("scsi%d : read register %d at io_port %04x\n"\
+ , instance->hostno, (reg), PAS16_io_port(reg))), inb( PAS16_io_port(reg)) )
+
+#define NCR5380_write(reg, value) \
+ (printk("scsi%d : write %02x to register %d at io_port %04x\n", \
+ instance->hostno, (value), (reg), PAS16_io_port(reg)), \
+ outb( (value),PAS16_io_port(reg) ) )
+
+#endif
+
+
+#define NCR5380_intr pas16_intr
+#define do_NCR5380_intr do_pas16_intr
+#define NCR5380_queue_command pas16_queue_command
+#define NCR5380_abort pas16_abort
+#define NCR5380_bus_reset pas16_bus_reset
+#define NCR5380_info pas16_info
+#define NCR5380_show_info pas16_show_info
+#define NCR5380_write_info pas16_write_info
+
+/* 15 14 12 10 7 5 3
+ 1101 0100 1010 1000 */
+
+#define PAS16_IRQS 0xd4a8
+
+#endif /* ndef ASM */
+#endif /* PAS16_H */
diff --git a/drivers/scsi/pcmcia/Kconfig b/drivers/scsi/pcmcia/Kconfig
new file mode 100644
index 000000000..ecc855c55
--- /dev/null
+++ b/drivers/scsi/pcmcia/Kconfig
@@ -0,0 +1,83 @@
+#
+# PCMCIA SCSI adapter configuration
+#
+
+menuconfig SCSI_LOWLEVEL_PCMCIA
+ bool "PCMCIA SCSI adapter support"
+ depends on SCSI!=n && PCMCIA!=n
+
+# drivers have problems when build in, so require modules
+if SCSI_LOWLEVEL_PCMCIA && SCSI && PCMCIA && m
+
+config PCMCIA_AHA152X
+ tristate "Adaptec AHA152X PCMCIA support"
+ select SCSI_SPI_ATTRS
+ help
+ Say Y here if you intend to attach this type of PCMCIA SCSI host
+ adapter to your computer.
+
+ To compile this driver as a module, choose M here: the
+ module will be called aha152x_cs.
+
+config PCMCIA_FDOMAIN
+ tristate "Future Domain PCMCIA support"
+ help
+ Say Y here if you intend to attach this type of PCMCIA SCSI host
+ adapter to your computer.
+
+ To compile this driver as a module, choose M here: the
+ module will be called fdomain_cs.
+
+config PCMCIA_NINJA_SCSI
+ tristate "NinjaSCSI-3 / NinjaSCSI-32Bi (16bit) PCMCIA support"
+ depends on !64BIT
+ help
+ If you intend to attach this type of PCMCIA SCSI host adapter to
+ your computer, say Y here and read
+ <file:Documentation/scsi/NinjaSCSI.txt>.
+
+ Supported cards:
+
+ NinjaSCSI-3: (version string: "WBT","NinjaSCSI-3","R1.0")
+ IO-DATA PCSC-FP
+ ALPHA DATA AD-PCS201
+ CyQ've SFC-201
+ LOGITECH LPM-SCSI2E
+ Pioneer PCR-PR24's card
+ I-O DATA CDPS-PX24's card (PCSC-F)
+ Panasonic KXL-RW10AN CD-RW's card
+ etc.
+
+ NinjaSCSI-32Bit (in 16bit mode):
+ [Workbit (version string: "WORKBIT","UltraNinja-16","1")]
+ Jazz SCP050
+ [I-O DATA (OEM) (version string: "IO DATA","CBSC16 ","1")]
+ I-O DATA CBSC-II
+ [Kyusyu Matsushita Kotobuki (OEM)
+ (version string: "KME ","SCSI-CARD-001","1")]
+ KME KXL-820AN's card
+ HP M820e CDRW's card
+ etc.
+
+ To compile this driver as a module, choose M here: the
+ module will be called nsp_cs.
+
+config PCMCIA_QLOGIC
+ tristate "Qlogic PCMCIA support"
+ help
+ Say Y here if you intend to attach this type of PCMCIA SCSI host
+ adapter to your computer.
+
+ To compile this driver as a module, choose M here: the
+ module will be called qlogic_cs.
+
+config PCMCIA_SYM53C500
+ tristate "Symbios 53c500 PCMCIA support"
+ help
+ Say Y here if you have a New Media Bus Toaster or other PCMCIA
+ SCSI adapter based on the Symbios 53c500 controller.
+
+ To compile this driver as a module, choose M here: the
+ module will be called sym53c500_cs.
+
+endif # SCSI_LOWLEVEL_PCMCIA
diff --git a/drivers/scsi/pcmcia/Makefile b/drivers/scsi/pcmcia/Makefile
new file mode 100644
index 000000000..683bf148b
--- /dev/null
+++ b/drivers/scsi/pcmcia/Makefile
@@ -0,0 +1,13 @@
+
+ccflags-y := -Idrivers/scsi
+
+# 16-bit client drivers
+obj-$(CONFIG_PCMCIA_QLOGIC) += qlogic_cs.o
+obj-$(CONFIG_PCMCIA_FDOMAIN) += fdomain_cs.o
+obj-$(CONFIG_PCMCIA_AHA152X) += aha152x_cs.o
+obj-$(CONFIG_PCMCIA_NINJA_SCSI) += nsp_cs.o
+obj-$(CONFIG_PCMCIA_SYM53C500) += sym53c500_cs.o
+
+aha152x_cs-objs := aha152x_stub.o aha152x_core.o
+fdomain_cs-objs := fdomain_stub.o fdomain_core.o
+qlogic_cs-objs := qlogic_stub.o
diff --git a/drivers/scsi/pcmcia/aha152x_core.c b/drivers/scsi/pcmcia/aha152x_core.c
new file mode 100644
index 000000000..dba371651
--- /dev/null
+++ b/drivers/scsi/pcmcia/aha152x_core.c
@@ -0,0 +1,3 @@
+#define PCMCIA 1
+#define AHA152X_STAT 1
+#include "aha152x.c"
diff --git a/drivers/scsi/pcmcia/aha152x_stub.c b/drivers/scsi/pcmcia/aha152x_stub.c
new file mode 100644
index 000000000..7d1609fa2
--- /dev/null
+++ b/drivers/scsi/pcmcia/aha152x_stub.c
@@ -0,0 +1,235 @@
+/*======================================================================
+
+ A driver for Adaptec AHA152X-compatible PCMCIA SCSI cards.
+
+ This driver supports the Adaptec AHA-1460, the New Media Bus
+ Toaster, and the New Media Toast & Jam.
+
+ aha152x_cs.c 1.54 2000/06/12 21:27:25
+
+ The contents of this file are subject to the Mozilla Public
+ License Version 1.1 (the "License"); you may not use this file
+ except in compliance with the License. You may obtain a copy of
+ the License at http://www.mozilla.org/MPL/
+
+ Software distributed under the License is distributed on an "AS
+ IS" basis, WITHOUT WARRANTY OF ANY KIND, either express or
+ implied. See the License for the specific language governing
+ rights and limitations under the License.
+
+ The initial developer of the original code is David A. Hinds
+ <dahinds@users.sourceforge.net>. Portions created by David A. Hinds
+ are Copyright (C) 1999 David A. Hinds. All Rights Reserved.
+
+ Alternatively, the contents of this file may be used under the
+ terms of the GNU General Public License version 2 (the "GPL"), in which
+ case the provisions of the GPL are applicable instead of the
+ above. If you wish to allow the use of your version of this file
+ only under the terms of the GPL and not to allow others to use
+ your version of this file under the MPL, indicate your decision
+ by deleting the provisions above and replace them with the notice
+ and other provisions required by the GPL. If you do not delete
+ the provisions above, a recipient may use your version of this
+ file under either the MPL or the GPL.
+
+======================================================================*/
+
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/slab.h>
+#include <linux/string.h>
+#include <linux/ioport.h>
+#include <scsi/scsi.h>
+#include <linux/major.h>
+#include <linux/blkdev.h>
+#include <scsi/scsi_ioctl.h>
+
+#include "scsi.h"
+#include <scsi/scsi_host.h>
+#include "aha152x.h"
+
+#include <pcmcia/cistpl.h>
+#include <pcmcia/ds.h>
+
+
+/*====================================================================*/
+
+/* Parameters that can be set with 'insmod' */
+
+/* SCSI bus setup options */
+static int host_id = 7;
+static int reconnect = 1;
+static int parity = 1;
+static int synchronous = 1;
+static int reset_delay = 100;
+static int ext_trans = 0;
+
+module_param(host_id, int, 0);
+module_param(reconnect, int, 0);
+module_param(parity, int, 0);
+module_param(synchronous, int, 0);
+module_param(reset_delay, int, 0);
+module_param(ext_trans, int, 0);
+
+MODULE_LICENSE("Dual MPL/GPL");
+
+/*====================================================================*/
+
+typedef struct scsi_info_t {
+ struct pcmcia_device *p_dev;
+ struct Scsi_Host *host;
+} scsi_info_t;
+
+static void aha152x_release_cs(struct pcmcia_device *link);
+static void aha152x_detach(struct pcmcia_device *p_dev);
+static int aha152x_config_cs(struct pcmcia_device *link);
+
+static int aha152x_probe(struct pcmcia_device *link)
+{
+ scsi_info_t *info;
+
+ dev_dbg(&link->dev, "aha152x_attach()\n");
+
+ /* Create new SCSI device */
+ info = kzalloc(sizeof(*info), GFP_KERNEL);
+ if (!info) return -ENOMEM;
+ info->p_dev = link;
+ link->priv = info;
+
+ link->config_flags |= CONF_ENABLE_IRQ | CONF_AUTO_SET_IO;
+ link->config_regs = PRESENT_OPTION;
+
+ return aha152x_config_cs(link);
+} /* aha152x_attach */
+
+/*====================================================================*/
+
+static void aha152x_detach(struct pcmcia_device *link)
+{
+ dev_dbg(&link->dev, "aha152x_detach\n");
+
+ aha152x_release_cs(link);
+
+ /* Unlink device structure, free bits */
+ kfree(link->priv);
+} /* aha152x_detach */
+
+/*====================================================================*/
+
+static int aha152x_config_check(struct pcmcia_device *p_dev, void *priv_data)
+{
+ p_dev->io_lines = 10;
+
+ /* For New Media T&J, look for a SCSI window */
+ if ((p_dev->resource[0]->end < 0x20) &&
+ (p_dev->resource[1]->end >= 0x20))
+ p_dev->resource[0]->start = p_dev->resource[1]->start;
+
+ if (p_dev->resource[0]->start >= 0xffff)
+ return -EINVAL;
+
+ p_dev->resource[1]->start = p_dev->resource[1]->end = 0;
+ p_dev->resource[0]->end = 0x20;
+ p_dev->resource[0]->flags &= ~IO_DATA_PATH_WIDTH;
+ p_dev->resource[0]->flags |= IO_DATA_PATH_WIDTH_AUTO;
+
+ return pcmcia_request_io(p_dev);
+}
+
+static int aha152x_config_cs(struct pcmcia_device *link)
+{
+ scsi_info_t *info = link->priv;
+ struct aha152x_setup s;
+ int ret;
+ struct Scsi_Host *host;
+
+ dev_dbg(&link->dev, "aha152x_config\n");
+
+ ret = pcmcia_loop_config(link, aha152x_config_check, NULL);
+ if (ret)
+ goto failed;
+
+ if (!link->irq)
+ goto failed;
+
+ ret = pcmcia_enable_device(link);
+ if (ret)
+ goto failed;
+
+ /* Set configuration options for the aha152x driver */
+ memset(&s, 0, sizeof(s));
+ s.conf = "PCMCIA setup";
+ s.io_port = link->resource[0]->start;
+ s.irq = link->irq;
+ s.scsiid = host_id;
+ s.reconnect = reconnect;
+ s.parity = parity;
+ s.synchronous = synchronous;
+ s.delay = reset_delay;
+ if (ext_trans)
+ s.ext_trans = ext_trans;
+
+ host = aha152x_probe_one(&s);
+ if (host == NULL) {
+ printk(KERN_INFO "aha152x_cs: no SCSI devices found\n");
+ goto failed;
+ }
+
+ info->host = host;
+
+ return 0;
+
+failed:
+ aha152x_release_cs(link);
+ return -ENODEV;
+}
+
+static void aha152x_release_cs(struct pcmcia_device *link)
+{
+ scsi_info_t *info = link->priv;
+
+ aha152x_release(info->host);
+ pcmcia_disable_device(link);
+}
+
+static int aha152x_resume(struct pcmcia_device *link)
+{
+ scsi_info_t *info = link->priv;
+
+ aha152x_host_reset_host(info->host);
+
+ return 0;
+}
+
+static const struct pcmcia_device_id aha152x_ids[] = {
+ PCMCIA_DEVICE_PROD_ID123("New Media", "SCSI", "Bus Toaster", 0xcdf7e4cc, 0x35f26476, 0xa8851d6e),
+ PCMCIA_DEVICE_PROD_ID123("NOTEWORTHY", "SCSI", "Bus Toaster", 0xad89c6e8, 0x35f26476, 0xa8851d6e),
+ PCMCIA_DEVICE_PROD_ID12("Adaptec, Inc.", "APA-1460 SCSI Host Adapter", 0x24ba9738, 0x3a3c3d20),
+ PCMCIA_DEVICE_PROD_ID12("New Media Corporation", "Multimedia Sound/SCSI", 0x085a850b, 0x80a6535c),
+ PCMCIA_DEVICE_PROD_ID12("NOTEWORTHY", "NWCOMB02 SCSI/AUDIO COMBO CARD", 0xad89c6e8, 0x5f9a615b),
+ PCMCIA_DEVICE_NULL,
+};
+MODULE_DEVICE_TABLE(pcmcia, aha152x_ids);
+
+static struct pcmcia_driver aha152x_cs_driver = {
+ .owner = THIS_MODULE,
+ .name = "aha152x_cs",
+ .probe = aha152x_probe,
+ .remove = aha152x_detach,
+ .id_table = aha152x_ids,
+ .resume = aha152x_resume,
+};
+
+static int __init init_aha152x_cs(void)
+{
+ return pcmcia_register_driver(&aha152x_cs_driver);
+}
+
+static void __exit exit_aha152x_cs(void)
+{
+ pcmcia_unregister_driver(&aha152x_cs_driver);
+}
+
+module_init(init_aha152x_cs);
+module_exit(exit_aha152x_cs);
diff --git a/drivers/scsi/pcmcia/fdomain_core.c b/drivers/scsi/pcmcia/fdomain_core.c
new file mode 100644
index 000000000..a48913791
--- /dev/null
+++ b/drivers/scsi/pcmcia/fdomain_core.c
@@ -0,0 +1,2 @@
+#define PCMCIA 1
+#include "fdomain.c"
diff --git a/drivers/scsi/pcmcia/fdomain_stub.c b/drivers/scsi/pcmcia/fdomain_stub.c
new file mode 100644
index 000000000..714b248f5
--- /dev/null
+++ b/drivers/scsi/pcmcia/fdomain_stub.c
@@ -0,0 +1,209 @@
+/*======================================================================
+
+ A driver for Future Domain-compatible PCMCIA SCSI cards
+
+ fdomain_cs.c 1.47 2001/10/13 00:08:52
+
+ The contents of this file are subject to the Mozilla Public
+ License Version 1.1 (the "License"); you may not use this file
+ except in compliance with the License. You may obtain a copy of
+ the License at http://www.mozilla.org/MPL/
+
+ Software distributed under the License is distributed on an "AS
+ IS" basis, WITHOUT WARRANTY OF ANY KIND, either express or
+ implied. See the License for the specific language governing
+ rights and limitations under the License.
+
+ The initial developer of the original code is David A. Hinds
+ <dahinds@users.sourceforge.net>. Portions created by David A. Hinds
+ are Copyright (C) 1999 David A. Hinds. All Rights Reserved.
+
+ Alternatively, the contents of this file may be used under the
+ terms of the GNU General Public License version 2 (the "GPL"), in
+ which case the provisions of the GPL are applicable instead of the
+ above. If you wish to allow the use of your version of this file
+ only under the terms of the GPL and not to allow others to use
+ your version of this file under the MPL, indicate your decision
+ by deleting the provisions above and replace them with the notice
+ and other provisions required by the GPL. If you do not delete
+ the provisions above, a recipient may use your version of this
+ file under either the MPL or the GPL.
+
+======================================================================*/
+
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/slab.h>
+#include <linux/string.h>
+#include <linux/ioport.h>
+#include <scsi/scsi.h>
+#include <linux/major.h>
+#include <linux/blkdev.h>
+#include <scsi/scsi_ioctl.h>
+
+#include "scsi.h"
+#include <scsi/scsi_host.h>
+#include "fdomain.h"
+
+#include <pcmcia/cistpl.h>
+#include <pcmcia/ds.h>
+
+/*====================================================================*/
+
+/* Module parameters */
+
+MODULE_AUTHOR("David Hinds <dahinds@users.sourceforge.net>");
+MODULE_DESCRIPTION("Future Domain PCMCIA SCSI driver");
+MODULE_LICENSE("Dual MPL/GPL");
+
+/*====================================================================*/
+
+typedef struct scsi_info_t {
+ struct pcmcia_device *p_dev;
+ struct Scsi_Host *host;
+} scsi_info_t;
+
+
+static void fdomain_release(struct pcmcia_device *link);
+static void fdomain_detach(struct pcmcia_device *p_dev);
+static int fdomain_config(struct pcmcia_device *link);
+
+static int fdomain_probe(struct pcmcia_device *link)
+{
+ scsi_info_t *info;
+
+ dev_dbg(&link->dev, "fdomain_attach()\n");
+
+ /* Create new SCSI device */
+ info = kzalloc(sizeof(*info), GFP_KERNEL);
+ if (!info)
+ return -ENOMEM;
+
+ info->p_dev = link;
+ link->priv = info;
+ link->config_flags |= CONF_ENABLE_IRQ | CONF_AUTO_SET_IO;
+ link->config_regs = PRESENT_OPTION;
+
+ return fdomain_config(link);
+} /* fdomain_attach */
+
+/*====================================================================*/
+
+static void fdomain_detach(struct pcmcia_device *link)
+{
+ dev_dbg(&link->dev, "fdomain_detach\n");
+
+ fdomain_release(link);
+
+ kfree(link->priv);
+} /* fdomain_detach */
+
+/*====================================================================*/
+
+static int fdomain_config_check(struct pcmcia_device *p_dev, void *priv_data)
+{
+ p_dev->io_lines = 10;
+ p_dev->resource[0]->end = 0x10;
+ p_dev->resource[0]->flags &= ~IO_DATA_PATH_WIDTH;
+ p_dev->resource[0]->flags |= IO_DATA_PATH_WIDTH_AUTO;
+ return pcmcia_request_io(p_dev);
+}
+
+
+static int fdomain_config(struct pcmcia_device *link)
+{
+ scsi_info_t *info = link->priv;
+ int ret;
+ char str[22];
+ struct Scsi_Host *host;
+
+ dev_dbg(&link->dev, "fdomain_config\n");
+
+ ret = pcmcia_loop_config(link, fdomain_config_check, NULL);
+ if (ret)
+ goto failed;
+
+ if (!link->irq)
+ goto failed;
+ ret = pcmcia_enable_device(link);
+ if (ret)
+ goto failed;
+
+ /* A bad hack... */
+ release_region(link->resource[0]->start, resource_size(link->resource[0]));
+
+ /* Set configuration options for the fdomain driver */
+ sprintf(str, "%d,%d", (unsigned int) link->resource[0]->start, link->irq);
+ fdomain_setup(str);
+
+ host = __fdomain_16x0_detect(&fdomain_driver_template);
+ if (!host) {
+ printk(KERN_INFO "fdomain_cs: no SCSI devices found\n");
+ goto failed;
+ }
+
+ if (scsi_add_host(host, NULL))
+ goto failed;
+ scsi_scan_host(host);
+
+ info->host = host;
+
+ return 0;
+
+failed:
+ fdomain_release(link);
+ return -ENODEV;
+} /* fdomain_config */
+
+/*====================================================================*/
+
+static void fdomain_release(struct pcmcia_device *link)
+{
+ scsi_info_t *info = link->priv;
+
+ dev_dbg(&link->dev, "fdomain_release\n");
+
+ scsi_remove_host(info->host);
+ pcmcia_disable_device(link);
+ scsi_unregister(info->host);
+}
+
+/*====================================================================*/
+
+static int fdomain_resume(struct pcmcia_device *link)
+{
+ fdomain_16x0_bus_reset(NULL);
+
+ return 0;
+}
+
+static const struct pcmcia_device_id fdomain_ids[] = {
+ PCMCIA_DEVICE_PROD_ID12("IBM Corp.", "SCSI PCMCIA Card", 0xe3736c88, 0x859cad20),
+ PCMCIA_DEVICE_PROD_ID1("SCSI PCMCIA Adapter Card", 0x8dacb57e),
+ PCMCIA_DEVICE_PROD_ID12(" SIMPLE TECHNOLOGY Corporation", "SCSI PCMCIA Credit Card Controller", 0x182bdafe, 0xc80d106f),
+ PCMCIA_DEVICE_NULL,
+};
+MODULE_DEVICE_TABLE(pcmcia, fdomain_ids);
+
+static struct pcmcia_driver fdomain_cs_driver = {
+ .owner = THIS_MODULE,
+ .name = "fdomain_cs",
+ .probe = fdomain_probe,
+ .remove = fdomain_detach,
+ .id_table = fdomain_ids,
+ .resume = fdomain_resume,
+};
+
+static int __init init_fdomain_cs(void)
+{
+ return pcmcia_register_driver(&fdomain_cs_driver);
+}
+
+static void __exit exit_fdomain_cs(void)
+{
+ pcmcia_unregister_driver(&fdomain_cs_driver);
+}
+
+module_init(init_fdomain_cs);
+module_exit(exit_fdomain_cs);
diff --git a/drivers/scsi/pcmcia/nsp_cs.c b/drivers/scsi/pcmcia/nsp_cs.c
new file mode 100644
index 000000000..1b6c8833a
--- /dev/null
+++ b/drivers/scsi/pcmcia/nsp_cs.c
@@ -0,0 +1,1761 @@
+/*======================================================================
+
+ NinjaSCSI-3 / NinjaSCSI-32Bi PCMCIA SCSI host adapter card driver
+ By: YOKOTA Hiroshi <yokota@netlab.is.tsukuba.ac.jp>
+
+ Ver.2.8 Support 32bit MMIO mode
+ Support Synchronous Data Transfer Request (SDTR) mode
+ Ver.2.0 Support 32bit PIO mode
+ Ver.1.1.2 Fix for scatter list buffer exceeds
+ Ver.1.1 Support scatter list
+ Ver.0.1 Initial version
+
+ This software may be used and distributed according to the terms of
+ the GNU General Public License.
+
+======================================================================*/
+
+/***********************************************************************
+ This driver is for these PCcards.
+
+ I-O DATA PCSC-F (Workbit NinjaSCSI-3)
+ "WBT", "NinjaSCSI-3", "R1.0"
+ I-O DATA CBSC-II (Workbit NinjaSCSI-32Bi in 16bit mode)
+ "IO DATA", "CBSC16 ", "1"
+
+***********************************************************************/
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/slab.h>
+#include <linux/string.h>
+#include <linux/timer.h>
+#include <linux/ioport.h>
+#include <linux/delay.h>
+#include <linux/interrupt.h>
+#include <linux/major.h>
+#include <linux/blkdev.h>
+#include <linux/stat.h>
+
+#include <asm/io.h>
+#include <asm/irq.h>
+
+#include <../drivers/scsi/scsi.h>
+#include <scsi/scsi_host.h>
+
+#include <scsi/scsi.h>
+#include <scsi/scsi_ioctl.h>
+
+#include <pcmcia/cistpl.h>
+#include <pcmcia/cisreg.h>
+#include <pcmcia/ds.h>
+
+#include "nsp_cs.h"
+
+MODULE_AUTHOR("YOKOTA Hiroshi <yokota@netlab.is.tsukuba.ac.jp>");
+MODULE_DESCRIPTION("WorkBit NinjaSCSI-3 / NinjaSCSI-32Bi(16bit) PCMCIA SCSI host adapter module");
+MODULE_SUPPORTED_DEVICE("sd,sr,sg,st");
+#ifdef MODULE_LICENSE
+MODULE_LICENSE("GPL");
+#endif
+
+#include "nsp_io.h"
+
+/*====================================================================*/
+/* Parameters that can be set with 'insmod' */
+
+static int nsp_burst_mode = BURST_MEM32;
+module_param(nsp_burst_mode, int, 0);
+MODULE_PARM_DESC(nsp_burst_mode, "Burst transfer mode (0=io8, 1=io32, 2=mem32(default))");
+
+/* Release IO ports after configuration? */
+static bool free_ports = 0;
+module_param(free_ports, bool, 0);
+MODULE_PARM_DESC(free_ports, "Release IO ports after configuration? (default: 0 (=no))");
+
+static struct scsi_host_template nsp_driver_template = {
+ .proc_name = "nsp_cs",
+ .show_info = nsp_show_info,
+ .name = "WorkBit NinjaSCSI-3/32Bi(16bit)",
+ .info = nsp_info,
+ .queuecommand = nsp_queuecommand,
+/* .eh_abort_handler = nsp_eh_abort,*/
+ .eh_bus_reset_handler = nsp_eh_bus_reset,
+ .eh_host_reset_handler = nsp_eh_host_reset,
+ .can_queue = 1,
+ .this_id = NSP_INITIATOR_ID,
+ .sg_tablesize = SG_ALL,
+ .cmd_per_lun = 1,
+ .use_clustering = DISABLE_CLUSTERING,
+};
+
+static nsp_hw_data nsp_data_base; /* attach <-> detect glue */
+
+
+
+/*
+ * debug, error print
+ */
+#ifndef NSP_DEBUG
+# define NSP_DEBUG_MASK 0x000000
+# define nsp_msg(type, args...) nsp_cs_message("", 0, (type), args)
+# define nsp_dbg(mask, args...) /* */
+#else
+# define NSP_DEBUG_MASK 0xffffff
+# define nsp_msg(type, args...) \
+ nsp_cs_message (__func__, __LINE__, (type), args)
+# define nsp_dbg(mask, args...) \
+ nsp_cs_dmessage(__func__, __LINE__, (mask), args)
+#endif
+
+#define NSP_DEBUG_QUEUECOMMAND BIT(0)
+#define NSP_DEBUG_REGISTER BIT(1)
+#define NSP_DEBUG_AUTOSCSI BIT(2)
+#define NSP_DEBUG_INTR BIT(3)
+#define NSP_DEBUG_SGLIST BIT(4)
+#define NSP_DEBUG_BUSFREE BIT(5)
+#define NSP_DEBUG_CDB_CONTENTS BIT(6)
+#define NSP_DEBUG_RESELECTION BIT(7)
+#define NSP_DEBUG_MSGINOCCUR BIT(8)
+#define NSP_DEBUG_EEPROM BIT(9)
+#define NSP_DEBUG_MSGOUTOCCUR BIT(10)
+#define NSP_DEBUG_BUSRESET BIT(11)
+#define NSP_DEBUG_RESTART BIT(12)
+#define NSP_DEBUG_SYNC BIT(13)
+#define NSP_DEBUG_WAIT BIT(14)
+#define NSP_DEBUG_TARGETFLAG BIT(15)
+#define NSP_DEBUG_PROC BIT(16)
+#define NSP_DEBUG_INIT BIT(17)
+#define NSP_DEBUG_DATA_IO BIT(18)
+#define NSP_SPECIAL_PRINT_REGISTER BIT(20)
+
+#define NSP_DEBUG_BUF_LEN 150
+
+static inline void nsp_inc_resid(struct scsi_cmnd *SCpnt, int residInc)
+{
+ scsi_set_resid(SCpnt, scsi_get_resid(SCpnt) + residInc);
+}
+
+static void nsp_cs_message(const char *func, int line, char *type, char *fmt, ...)
+{
+ va_list args;
+ char buf[NSP_DEBUG_BUF_LEN];
+
+ va_start(args, fmt);
+ vsnprintf(buf, sizeof(buf), fmt, args);
+ va_end(args);
+
+#ifndef NSP_DEBUG
+ printk("%snsp_cs: %s\n", type, buf);
+#else
+ printk("%snsp_cs: %s (%d): %s\n", type, func, line, buf);
+#endif
+}
+
+#ifdef NSP_DEBUG
+static void nsp_cs_dmessage(const char *func, int line, int mask, char *fmt, ...)
+{
+ va_list args;
+ char buf[NSP_DEBUG_BUF_LEN];
+
+ va_start(args, fmt);
+ vsnprintf(buf, sizeof(buf), fmt, args);
+ va_end(args);
+
+ if (mask & NSP_DEBUG_MASK) {
+ printk("nsp_cs-debug: 0x%x %s (%d): %s\n", mask, func, line, buf);
+ }
+}
+#endif
+
+/***********************************************************/
+
+/*====================================================
+ * Clenaup parameters and call done() functions.
+ * You must be set SCpnt->result before call this function.
+ */
+static void nsp_scsi_done(struct scsi_cmnd *SCpnt)
+{
+ nsp_hw_data *data = (nsp_hw_data *)SCpnt->device->host->hostdata;
+
+ data->CurrentSC = NULL;
+
+ SCpnt->scsi_done(SCpnt);
+}
+
+static int nsp_queuecommand_lck(struct scsi_cmnd *SCpnt,
+ void (*done)(struct scsi_cmnd *))
+{
+#ifdef NSP_DEBUG
+ /*unsigned int host_id = SCpnt->device->host->this_id;*/
+ /*unsigned int base = SCpnt->device->host->io_port;*/
+ unsigned char target = scmd_id(SCpnt);
+#endif
+ nsp_hw_data *data = (nsp_hw_data *)SCpnt->device->host->hostdata;
+
+ nsp_dbg(NSP_DEBUG_QUEUECOMMAND,
+ "SCpnt=0x%p target=%d lun=%llu sglist=0x%p bufflen=%d sg_count=%d",
+ SCpnt, target, SCpnt->device->lun, scsi_sglist(SCpnt),
+ scsi_bufflen(SCpnt), scsi_sg_count(SCpnt));
+ //nsp_dbg(NSP_DEBUG_QUEUECOMMAND, "before CurrentSC=0x%p", data->CurrentSC);
+
+ SCpnt->scsi_done = done;
+
+ if (data->CurrentSC != NULL) {
+ nsp_msg(KERN_DEBUG, "CurrentSC!=NULL this can't be happen");
+ SCpnt->result = DID_BAD_TARGET << 16;
+ nsp_scsi_done(SCpnt);
+ return 0;
+ }
+
+#if 0
+ /* XXX: pcmcia-cs generates SCSI command with "scsi_info" utility.
+ This makes kernel crash when suspending... */
+ if (data->ScsiInfo->stop != 0) {
+ nsp_msg(KERN_INFO, "suspending device. reject command.");
+ SCpnt->result = DID_BAD_TARGET << 16;
+ nsp_scsi_done(SCpnt);
+ return SCSI_MLQUEUE_HOST_BUSY;
+ }
+#endif
+
+ show_command(SCpnt);
+
+ data->CurrentSC = SCpnt;
+
+ SCpnt->SCp.Status = CHECK_CONDITION;
+ SCpnt->SCp.Message = 0;
+ SCpnt->SCp.have_data_in = IO_UNKNOWN;
+ SCpnt->SCp.sent_command = 0;
+ SCpnt->SCp.phase = PH_UNDETERMINED;
+ scsi_set_resid(SCpnt, scsi_bufflen(SCpnt));
+
+ /* setup scratch area
+ SCp.ptr : buffer pointer
+ SCp.this_residual : buffer length
+ SCp.buffer : next buffer
+ SCp.buffers_residual : left buffers in list
+ SCp.phase : current state of the command */
+ if (scsi_bufflen(SCpnt)) {
+ SCpnt->SCp.buffer = scsi_sglist(SCpnt);
+ SCpnt->SCp.ptr = BUFFER_ADDR;
+ SCpnt->SCp.this_residual = SCpnt->SCp.buffer->length;
+ SCpnt->SCp.buffers_residual = scsi_sg_count(SCpnt) - 1;
+ } else {
+ SCpnt->SCp.ptr = NULL;
+ SCpnt->SCp.this_residual = 0;
+ SCpnt->SCp.buffer = NULL;
+ SCpnt->SCp.buffers_residual = 0;
+ }
+
+ if (nsphw_start_selection(SCpnt) == FALSE) {
+ nsp_dbg(NSP_DEBUG_QUEUECOMMAND, "selection fail");
+ SCpnt->result = DID_BUS_BUSY << 16;
+ nsp_scsi_done(SCpnt);
+ return 0;
+ }
+
+
+ //nsp_dbg(NSP_DEBUG_QUEUECOMMAND, "out");
+#ifdef NSP_DEBUG
+ data->CmdId++;
+#endif
+ return 0;
+}
+
+static DEF_SCSI_QCMD(nsp_queuecommand)
+
+/*
+ * setup PIO FIFO transfer mode and enable/disable to data out
+ */
+static void nsp_setup_fifo(nsp_hw_data *data, int enabled)
+{
+ unsigned int base = data->BaseAddress;
+ unsigned char transfer_mode_reg;
+
+ //nsp_dbg(NSP_DEBUG_DATA_IO, "enabled=%d", enabled);
+
+ if (enabled != FALSE) {
+ transfer_mode_reg = TRANSFER_GO | BRAIND;
+ } else {
+ transfer_mode_reg = 0;
+ }
+
+ transfer_mode_reg |= data->TransferMode;
+
+ nsp_index_write(base, TRANSFERMODE, transfer_mode_reg);
+}
+
+static void nsphw_init_sync(nsp_hw_data *data)
+{
+ sync_data tmp_sync = { .SyncNegotiation = SYNC_NOT_YET,
+ .SyncPeriod = 0,
+ .SyncOffset = 0
+ };
+ int i;
+
+ /* setup sync data */
+ for ( i = 0; i < ARRAY_SIZE(data->Sync); i++ ) {
+ data->Sync[i] = tmp_sync;
+ }
+}
+
+/*
+ * Initialize Ninja hardware
+ */
+static int nsphw_init(nsp_hw_data *data)
+{
+ unsigned int base = data->BaseAddress;
+
+ nsp_dbg(NSP_DEBUG_INIT, "in base=0x%x", base);
+
+ data->ScsiClockDiv = CLOCK_40M | FAST_20;
+ data->CurrentSC = NULL;
+ data->FifoCount = 0;
+ data->TransferMode = MODE_IO8;
+
+ nsphw_init_sync(data);
+
+ /* block all interrupts */
+ nsp_write(base, IRQCONTROL, IRQCONTROL_ALLMASK);
+
+ /* setup SCSI interface */
+ nsp_write(base, IFSELECT, IF_IFSEL);
+
+ nsp_index_write(base, SCSIIRQMODE, 0);
+
+ nsp_index_write(base, TRANSFERMODE, MODE_IO8);
+ nsp_index_write(base, CLOCKDIV, data->ScsiClockDiv);
+
+ nsp_index_write(base, PARITYCTRL, 0);
+ nsp_index_write(base, POINTERCLR, POINTER_CLEAR |
+ ACK_COUNTER_CLEAR |
+ REQ_COUNTER_CLEAR |
+ HOST_COUNTER_CLEAR);
+
+ /* setup fifo asic */
+ nsp_write(base, IFSELECT, IF_REGSEL);
+ nsp_index_write(base, TERMPWRCTRL, 0);
+ if ((nsp_index_read(base, OTHERCONTROL) & TPWR_SENSE) == 0) {
+ nsp_msg(KERN_INFO, "terminator power on");
+ nsp_index_write(base, TERMPWRCTRL, POWER_ON);
+ }
+
+ nsp_index_write(base, TIMERCOUNT, 0);
+ nsp_index_write(base, TIMERCOUNT, 0); /* requires 2 times!! */
+
+ nsp_index_write(base, SYNCREG, 0);
+ nsp_index_write(base, ACKWIDTH, 0);
+
+ /* enable interrupts and ack them */
+ nsp_index_write(base, SCSIIRQMODE, SCSI_PHASE_CHANGE_EI |
+ RESELECT_EI |
+ SCSI_RESET_IRQ_EI );
+ nsp_write(base, IRQCONTROL, IRQCONTROL_ALLCLEAR);
+
+ nsp_setup_fifo(data, FALSE);
+
+ return TRUE;
+}
+
+/*
+ * Start selection phase
+ */
+static int nsphw_start_selection(struct scsi_cmnd *SCpnt)
+{
+ unsigned int host_id = SCpnt->device->host->this_id;
+ unsigned int base = SCpnt->device->host->io_port;
+ unsigned char target = scmd_id(SCpnt);
+ nsp_hw_data *data = (nsp_hw_data *)SCpnt->device->host->hostdata;
+ int time_out;
+ unsigned char phase, arbit;
+
+ //nsp_dbg(NSP_DEBUG_RESELECTION, "in");
+
+ phase = nsp_index_read(base, SCSIBUSMON);
+ if(phase != BUSMON_BUS_FREE) {
+ //nsp_dbg(NSP_DEBUG_RESELECTION, "bus busy");
+ return FALSE;
+ }
+
+ /* start arbitration */
+ //nsp_dbg(NSP_DEBUG_RESELECTION, "start arbit");
+ SCpnt->SCp.phase = PH_ARBSTART;
+ nsp_index_write(base, SETARBIT, ARBIT_GO);
+
+ time_out = 1000;
+ do {
+ /* XXX: what a stupid chip! */
+ arbit = nsp_index_read(base, ARBITSTATUS);
+ //nsp_dbg(NSP_DEBUG_RESELECTION, "arbit=%d, wait_count=%d", arbit, wait_count);
+ udelay(1); /* hold 1.2us */
+ } while((arbit & (ARBIT_WIN | ARBIT_FAIL)) == 0 &&
+ (time_out-- != 0));
+
+ if (!(arbit & ARBIT_WIN)) {
+ //nsp_dbg(NSP_DEBUG_RESELECTION, "arbit fail");
+ nsp_index_write(base, SETARBIT, ARBIT_FLAG_CLEAR);
+ return FALSE;
+ }
+
+ /* assert select line */
+ //nsp_dbg(NSP_DEBUG_RESELECTION, "assert SEL line");
+ SCpnt->SCp.phase = PH_SELSTART;
+ udelay(3); /* wait 2.4us */
+ nsp_index_write(base, SCSIDATALATCH, BIT(host_id) | BIT(target));
+ nsp_index_write(base, SCSIBUSCTRL, SCSI_SEL | SCSI_BSY | SCSI_ATN);
+ udelay(2); /* wait >1.2us */
+ nsp_index_write(base, SCSIBUSCTRL, SCSI_SEL | SCSI_BSY | SCSI_DATAOUT_ENB | SCSI_ATN);
+ nsp_index_write(base, SETARBIT, ARBIT_FLAG_CLEAR);
+ /*udelay(1);*/ /* wait >90ns */
+ nsp_index_write(base, SCSIBUSCTRL, SCSI_SEL | SCSI_DATAOUT_ENB | SCSI_ATN);
+
+ /* check selection timeout */
+ nsp_start_timer(SCpnt, 1000/51);
+ data->SelectionTimeOut = 1;
+
+ return TRUE;
+}
+
+struct nsp_sync_table {
+ unsigned int min_period;
+ unsigned int max_period;
+ unsigned int chip_period;
+ unsigned int ack_width;
+};
+
+static struct nsp_sync_table nsp_sync_table_40M[] = {
+ {0x0c, 0x0c, 0x1, 0}, /* 20MB 50ns*/
+ {0x19, 0x19, 0x3, 1}, /* 10MB 100ns*/
+ {0x1a, 0x25, 0x5, 2}, /* 7.5MB 150ns*/
+ {0x26, 0x32, 0x7, 3}, /* 5MB 200ns*/
+ { 0, 0, 0, 0},
+};
+
+static struct nsp_sync_table nsp_sync_table_20M[] = {
+ {0x19, 0x19, 0x1, 0}, /* 10MB 100ns*/
+ {0x1a, 0x25, 0x2, 0}, /* 7.5MB 150ns*/
+ {0x26, 0x32, 0x3, 1}, /* 5MB 200ns*/
+ { 0, 0, 0, 0},
+};
+
+/*
+ * setup synchronous data transfer mode
+ */
+static int nsp_analyze_sdtr(struct scsi_cmnd *SCpnt)
+{
+ unsigned char target = scmd_id(SCpnt);
+// unsigned char lun = SCpnt->device->lun;
+ nsp_hw_data *data = (nsp_hw_data *)SCpnt->device->host->hostdata;
+ sync_data *sync = &(data->Sync[target]);
+ struct nsp_sync_table *sync_table;
+ unsigned int period, offset;
+ int i;
+
+
+ nsp_dbg(NSP_DEBUG_SYNC, "in");
+
+ period = sync->SyncPeriod;
+ offset = sync->SyncOffset;
+
+ nsp_dbg(NSP_DEBUG_SYNC, "period=0x%x, offset=0x%x", period, offset);
+
+ if ((data->ScsiClockDiv & (BIT(0)|BIT(1))) == CLOCK_20M) {
+ sync_table = nsp_sync_table_20M;
+ } else {
+ sync_table = nsp_sync_table_40M;
+ }
+
+ for ( i = 0; sync_table->max_period != 0; i++, sync_table++) {
+ if ( period >= sync_table->min_period &&
+ period <= sync_table->max_period ) {
+ break;
+ }
+ }
+
+ if (period != 0 && sync_table->max_period == 0) {
+ /*
+ * No proper period/offset found
+ */
+ nsp_dbg(NSP_DEBUG_SYNC, "no proper period/offset");
+
+ sync->SyncPeriod = 0;
+ sync->SyncOffset = 0;
+ sync->SyncRegister = 0;
+ sync->AckWidth = 0;
+
+ return FALSE;
+ }
+
+ sync->SyncRegister = (sync_table->chip_period << SYNCREG_PERIOD_SHIFT) |
+ (offset & SYNCREG_OFFSET_MASK);
+ sync->AckWidth = sync_table->ack_width;
+
+ nsp_dbg(NSP_DEBUG_SYNC, "sync_reg=0x%x, ack_width=0x%x", sync->SyncRegister, sync->AckWidth);
+
+ return TRUE;
+}
+
+
+/*
+ * start ninja hardware timer
+ */
+static void nsp_start_timer(struct scsi_cmnd *SCpnt, int time)
+{
+ unsigned int base = SCpnt->device->host->io_port;
+ nsp_hw_data *data = (nsp_hw_data *)SCpnt->device->host->hostdata;
+
+ //nsp_dbg(NSP_DEBUG_INTR, "in SCpnt=0x%p, time=%d", SCpnt, time);
+ data->TimerCount = time;
+ nsp_index_write(base, TIMERCOUNT, time);
+}
+
+/*
+ * wait for bus phase change
+ */
+static int nsp_negate_signal(struct scsi_cmnd *SCpnt, unsigned char mask,
+ char *str)
+{
+ unsigned int base = SCpnt->device->host->io_port;
+ unsigned char reg;
+ int time_out;
+
+ //nsp_dbg(NSP_DEBUG_INTR, "in");
+
+ time_out = 100;
+
+ do {
+ reg = nsp_index_read(base, SCSIBUSMON);
+ if (reg == 0xff) {
+ break;
+ }
+ } while ((--time_out != 0) && (reg & mask) != 0);
+
+ if (time_out == 0) {
+ nsp_msg(KERN_DEBUG, " %s signal off timeout", str);
+ }
+
+ return 0;
+}
+
+/*
+ * expect Ninja Irq
+ */
+static int nsp_expect_signal(struct scsi_cmnd *SCpnt,
+ unsigned char current_phase,
+ unsigned char mask)
+{
+ unsigned int base = SCpnt->device->host->io_port;
+ int time_out;
+ unsigned char phase, i_src;
+
+ //nsp_dbg(NSP_DEBUG_INTR, "current_phase=0x%x, mask=0x%x", current_phase, mask);
+
+ time_out = 100;
+ do {
+ phase = nsp_index_read(base, SCSIBUSMON);
+ if (phase == 0xff) {
+ //nsp_dbg(NSP_DEBUG_INTR, "ret -1");
+ return -1;
+ }
+ i_src = nsp_read(base, IRQSTATUS);
+ if (i_src & IRQSTATUS_SCSI) {
+ //nsp_dbg(NSP_DEBUG_INTR, "ret 0 found scsi signal");
+ return 0;
+ }
+ if ((phase & mask) != 0 && (phase & BUSMON_PHASE_MASK) == current_phase) {
+ //nsp_dbg(NSP_DEBUG_INTR, "ret 1 phase=0x%x", phase);
+ return 1;
+ }
+ } while(time_out-- != 0);
+
+ //nsp_dbg(NSP_DEBUG_INTR, "timeout");
+ return -1;
+}
+
+/*
+ * transfer SCSI message
+ */
+static int nsp_xfer(struct scsi_cmnd *SCpnt, int phase)
+{
+ unsigned int base = SCpnt->device->host->io_port;
+ nsp_hw_data *data = (nsp_hw_data *)SCpnt->device->host->hostdata;
+ char *buf = data->MsgBuffer;
+ int len = min(MSGBUF_SIZE, data->MsgLen);
+ int ptr;
+ int ret;
+
+ //nsp_dbg(NSP_DEBUG_DATA_IO, "in");
+ for (ptr = 0; len > 0; len--, ptr++) {
+
+ ret = nsp_expect_signal(SCpnt, phase, BUSMON_REQ);
+ if (ret <= 0) {
+ nsp_dbg(NSP_DEBUG_DATA_IO, "xfer quit");
+ return 0;
+ }
+
+ /* if last byte, negate ATN */
+ if (len == 1 && SCpnt->SCp.phase == PH_MSG_OUT) {
+ nsp_index_write(base, SCSIBUSCTRL, AUTODIRECTION | ACKENB);
+ }
+
+ /* read & write message */
+ if (phase & BUSMON_IO) {
+ nsp_dbg(NSP_DEBUG_DATA_IO, "read msg");
+ buf[ptr] = nsp_index_read(base, SCSIDATAWITHACK);
+ } else {
+ nsp_dbg(NSP_DEBUG_DATA_IO, "write msg");
+ nsp_index_write(base, SCSIDATAWITHACK, buf[ptr]);
+ }
+ nsp_negate_signal(SCpnt, BUSMON_ACK, "xfer<ack>");
+
+ }
+ return len;
+}
+
+/*
+ * get extra SCSI data from fifo
+ */
+static int nsp_dataphase_bypass(struct scsi_cmnd *SCpnt)
+{
+ nsp_hw_data *data = (nsp_hw_data *)SCpnt->device->host->hostdata;
+ unsigned int count;
+
+ //nsp_dbg(NSP_DEBUG_DATA_IO, "in");
+
+ if (SCpnt->SCp.have_data_in != IO_IN) {
+ return 0;
+ }
+
+ count = nsp_fifo_count(SCpnt);
+ if (data->FifoCount == count) {
+ //nsp_dbg(NSP_DEBUG_DATA_IO, "not use bypass quirk");
+ return 0;
+ }
+
+ /*
+ * XXX: NSP_QUIRK
+ * data phase skip only occures in case of SCSI_LOW_READ
+ */
+ nsp_dbg(NSP_DEBUG_DATA_IO, "use bypass quirk");
+ SCpnt->SCp.phase = PH_DATA;
+ nsp_pio_read(SCpnt);
+ nsp_setup_fifo(data, FALSE);
+
+ return 0;
+}
+
+/*
+ * accept reselection
+ */
+static int nsp_reselected(struct scsi_cmnd *SCpnt)
+{
+ unsigned int base = SCpnt->device->host->io_port;
+ unsigned int host_id = SCpnt->device->host->this_id;
+ //nsp_hw_data *data = (nsp_hw_data *)SCpnt->device->host->hostdata;
+ unsigned char bus_reg;
+ unsigned char id_reg, tmp;
+ int target;
+
+ nsp_dbg(NSP_DEBUG_RESELECTION, "in");
+
+ id_reg = nsp_index_read(base, RESELECTID);
+ tmp = id_reg & (~BIT(host_id));
+ target = 0;
+ while(tmp != 0) {
+ if (tmp & BIT(0)) {
+ break;
+ }
+ tmp >>= 1;
+ target++;
+ }
+
+ if (scmd_id(SCpnt) != target) {
+ nsp_msg(KERN_ERR, "XXX: reselect ID must be %d in this implementation.", target);
+ }
+
+ nsp_negate_signal(SCpnt, BUSMON_SEL, "reselect<SEL>");
+
+ nsp_nexus(SCpnt);
+ bus_reg = nsp_index_read(base, SCSIBUSCTRL) & ~(SCSI_BSY | SCSI_ATN);
+ nsp_index_write(base, SCSIBUSCTRL, bus_reg);
+ nsp_index_write(base, SCSIBUSCTRL, bus_reg | AUTODIRECTION | ACKENB);
+
+ return TRUE;
+}
+
+/*
+ * count how many data transferd
+ */
+static int nsp_fifo_count(struct scsi_cmnd *SCpnt)
+{
+ unsigned int base = SCpnt->device->host->io_port;
+ unsigned int count;
+ unsigned int l, m, h, dummy;
+
+ nsp_index_write(base, POINTERCLR, POINTER_CLEAR | ACK_COUNTER);
+
+ l = nsp_index_read(base, TRANSFERCOUNT);
+ m = nsp_index_read(base, TRANSFERCOUNT);
+ h = nsp_index_read(base, TRANSFERCOUNT);
+ dummy = nsp_index_read(base, TRANSFERCOUNT); /* required this! */
+
+ count = (h << 16) | (m << 8) | (l << 0);
+
+ //nsp_dbg(NSP_DEBUG_DATA_IO, "count=0x%x", count);
+
+ return count;
+}
+
+/* fifo size */
+#define RFIFO_CRIT 64
+#define WFIFO_CRIT 64
+
+/*
+ * read data in DATA IN phase
+ */
+static void nsp_pio_read(struct scsi_cmnd *SCpnt)
+{
+ unsigned int base = SCpnt->device->host->io_port;
+ unsigned long mmio_base = SCpnt->device->host->base;
+ nsp_hw_data *data = (nsp_hw_data *)SCpnt->device->host->hostdata;
+ long time_out;
+ int ocount, res;
+ unsigned char stat, fifo_stat;
+
+ ocount = data->FifoCount;
+
+ nsp_dbg(NSP_DEBUG_DATA_IO, "in SCpnt=0x%p resid=%d ocount=%d ptr=0x%p this_residual=%d buffers=0x%p nbuf=%d",
+ SCpnt, scsi_get_resid(SCpnt), ocount, SCpnt->SCp.ptr,
+ SCpnt->SCp.this_residual, SCpnt->SCp.buffer,
+ SCpnt->SCp.buffers_residual);
+
+ time_out = 1000;
+
+ while ((time_out-- != 0) &&
+ (SCpnt->SCp.this_residual > 0 || SCpnt->SCp.buffers_residual > 0 ) ) {
+
+ stat = nsp_index_read(base, SCSIBUSMON);
+ stat &= BUSMON_PHASE_MASK;
+
+
+ res = nsp_fifo_count(SCpnt) - ocount;
+ //nsp_dbg(NSP_DEBUG_DATA_IO, "ptr=0x%p this=0x%x ocount=0x%x res=0x%x", SCpnt->SCp.ptr, SCpnt->SCp.this_residual, ocount, res);
+ if (res == 0) { /* if some data available ? */
+ if (stat == BUSPHASE_DATA_IN) { /* phase changed? */
+ //nsp_dbg(NSP_DEBUG_DATA_IO, " wait for data this=%d", SCpnt->SCp.this_residual);
+ continue;
+ } else {
+ nsp_dbg(NSP_DEBUG_DATA_IO, "phase changed stat=0x%x", stat);
+ break;
+ }
+ }
+
+ fifo_stat = nsp_read(base, FIFOSTATUS);
+ if ((fifo_stat & FIFOSTATUS_FULL_EMPTY) == 0 &&
+ stat == BUSPHASE_DATA_IN) {
+ continue;
+ }
+
+ res = min(res, SCpnt->SCp.this_residual);
+
+ switch (data->TransferMode) {
+ case MODE_IO32:
+ res &= ~(BIT(1)|BIT(0)); /* align 4 */
+ nsp_fifo32_read(base, SCpnt->SCp.ptr, res >> 2);
+ break;
+ case MODE_IO8:
+ nsp_fifo8_read (base, SCpnt->SCp.ptr, res );
+ break;
+
+ case MODE_MEM32:
+ res &= ~(BIT(1)|BIT(0)); /* align 4 */
+ nsp_mmio_fifo32_read(mmio_base, SCpnt->SCp.ptr, res >> 2);
+ break;
+
+ default:
+ nsp_dbg(NSP_DEBUG_DATA_IO, "unknown read mode");
+ return;
+ }
+
+ nsp_inc_resid(SCpnt, -res);
+ SCpnt->SCp.ptr += res;
+ SCpnt->SCp.this_residual -= res;
+ ocount += res;
+ //nsp_dbg(NSP_DEBUG_DATA_IO, "ptr=0x%p this_residual=0x%x ocount=0x%x", SCpnt->SCp.ptr, SCpnt->SCp.this_residual, ocount);
+
+ /* go to next scatter list if available */
+ if (SCpnt->SCp.this_residual == 0 &&
+ SCpnt->SCp.buffers_residual != 0 ) {
+ //nsp_dbg(NSP_DEBUG_DATA_IO, "scatterlist next timeout=%d", time_out);
+ SCpnt->SCp.buffers_residual--;
+ SCpnt->SCp.buffer++;
+ SCpnt->SCp.ptr = BUFFER_ADDR;
+ SCpnt->SCp.this_residual = SCpnt->SCp.buffer->length;
+ time_out = 1000;
+
+ //nsp_dbg(NSP_DEBUG_DATA_IO, "page: 0x%p, off: 0x%x", SCpnt->SCp.buffer->page, SCpnt->SCp.buffer->offset);
+ }
+ }
+
+ data->FifoCount = ocount;
+
+ if (time_out < 0) {
+ nsp_msg(KERN_DEBUG, "pio read timeout resid=%d this_residual=%d buffers_residual=%d",
+ scsi_get_resid(SCpnt), SCpnt->SCp.this_residual,
+ SCpnt->SCp.buffers_residual);
+ }
+ nsp_dbg(NSP_DEBUG_DATA_IO, "read ocount=0x%x", ocount);
+ nsp_dbg(NSP_DEBUG_DATA_IO, "r cmd=%d resid=0x%x\n", data->CmdId,
+ scsi_get_resid(SCpnt));
+}
+
+/*
+ * write data in DATA OUT phase
+ */
+static void nsp_pio_write(struct scsi_cmnd *SCpnt)
+{
+ unsigned int base = SCpnt->device->host->io_port;
+ unsigned long mmio_base = SCpnt->device->host->base;
+ nsp_hw_data *data = (nsp_hw_data *)SCpnt->device->host->hostdata;
+ int time_out;
+ int ocount, res;
+ unsigned char stat;
+
+ ocount = data->FifoCount;
+
+ nsp_dbg(NSP_DEBUG_DATA_IO, "in fifocount=%d ptr=0x%p this_residual=%d buffers=0x%p nbuf=%d resid=0x%x",
+ data->FifoCount, SCpnt->SCp.ptr, SCpnt->SCp.this_residual,
+ SCpnt->SCp.buffer, SCpnt->SCp.buffers_residual,
+ scsi_get_resid(SCpnt));
+
+ time_out = 1000;
+
+ while ((time_out-- != 0) &&
+ (SCpnt->SCp.this_residual > 0 || SCpnt->SCp.buffers_residual > 0)) {
+ stat = nsp_index_read(base, SCSIBUSMON);
+ stat &= BUSMON_PHASE_MASK;
+
+ if (stat != BUSPHASE_DATA_OUT) {
+ res = ocount - nsp_fifo_count(SCpnt);
+
+ nsp_dbg(NSP_DEBUG_DATA_IO, "phase changed stat=0x%x, res=%d\n", stat, res);
+ /* Put back pointer */
+ nsp_inc_resid(SCpnt, res);
+ SCpnt->SCp.ptr -= res;
+ SCpnt->SCp.this_residual += res;
+ ocount -= res;
+
+ break;
+ }
+
+ res = ocount - nsp_fifo_count(SCpnt);
+ if (res > 0) { /* write all data? */
+ nsp_dbg(NSP_DEBUG_DATA_IO, "wait for all data out. ocount=0x%x res=%d", ocount, res);
+ continue;
+ }
+
+ res = min(SCpnt->SCp.this_residual, WFIFO_CRIT);
+
+ //nsp_dbg(NSP_DEBUG_DATA_IO, "ptr=0x%p this=0x%x res=0x%x", SCpnt->SCp.ptr, SCpnt->SCp.this_residual, res);
+ switch (data->TransferMode) {
+ case MODE_IO32:
+ res &= ~(BIT(1)|BIT(0)); /* align 4 */
+ nsp_fifo32_write(base, SCpnt->SCp.ptr, res >> 2);
+ break;
+ case MODE_IO8:
+ nsp_fifo8_write (base, SCpnt->SCp.ptr, res );
+ break;
+
+ case MODE_MEM32:
+ res &= ~(BIT(1)|BIT(0)); /* align 4 */
+ nsp_mmio_fifo32_write(mmio_base, SCpnt->SCp.ptr, res >> 2);
+ break;
+
+ default:
+ nsp_dbg(NSP_DEBUG_DATA_IO, "unknown write mode");
+ break;
+ }
+
+ nsp_inc_resid(SCpnt, -res);
+ SCpnt->SCp.ptr += res;
+ SCpnt->SCp.this_residual -= res;
+ ocount += res;
+
+ /* go to next scatter list if available */
+ if (SCpnt->SCp.this_residual == 0 &&
+ SCpnt->SCp.buffers_residual != 0 ) {
+ //nsp_dbg(NSP_DEBUG_DATA_IO, "scatterlist next");
+ SCpnt->SCp.buffers_residual--;
+ SCpnt->SCp.buffer++;
+ SCpnt->SCp.ptr = BUFFER_ADDR;
+ SCpnt->SCp.this_residual = SCpnt->SCp.buffer->length;
+ time_out = 1000;
+ }
+ }
+
+ data->FifoCount = ocount;
+
+ if (time_out < 0) {
+ nsp_msg(KERN_DEBUG, "pio write timeout resid=0x%x",
+ scsi_get_resid(SCpnt));
+ }
+ nsp_dbg(NSP_DEBUG_DATA_IO, "write ocount=0x%x", ocount);
+ nsp_dbg(NSP_DEBUG_DATA_IO, "w cmd=%d resid=0x%x\n", data->CmdId,
+ scsi_get_resid(SCpnt));
+}
+#undef RFIFO_CRIT
+#undef WFIFO_CRIT
+
+/*
+ * setup synchronous/asynchronous data transfer mode
+ */
+static int nsp_nexus(struct scsi_cmnd *SCpnt)
+{
+ unsigned int base = SCpnt->device->host->io_port;
+ unsigned char target = scmd_id(SCpnt);
+// unsigned char lun = SCpnt->device->lun;
+ nsp_hw_data *data = (nsp_hw_data *)SCpnt->device->host->hostdata;
+ sync_data *sync = &(data->Sync[target]);
+
+ //nsp_dbg(NSP_DEBUG_DATA_IO, "in SCpnt=0x%p", SCpnt);
+
+ /* setup synch transfer registers */
+ nsp_index_write(base, SYNCREG, sync->SyncRegister);
+ nsp_index_write(base, ACKWIDTH, sync->AckWidth);
+
+ if (scsi_get_resid(SCpnt) % 4 != 0 ||
+ scsi_get_resid(SCpnt) <= PAGE_SIZE ) {
+ data->TransferMode = MODE_IO8;
+ } else if (nsp_burst_mode == BURST_MEM32) {
+ data->TransferMode = MODE_MEM32;
+ } else if (nsp_burst_mode == BURST_IO32) {
+ data->TransferMode = MODE_IO32;
+ } else {
+ data->TransferMode = MODE_IO8;
+ }
+
+ /* setup pdma fifo */
+ nsp_setup_fifo(data, TRUE);
+
+ /* clear ack counter */
+ data->FifoCount = 0;
+ nsp_index_write(base, POINTERCLR, POINTER_CLEAR |
+ ACK_COUNTER_CLEAR |
+ REQ_COUNTER_CLEAR |
+ HOST_COUNTER_CLEAR);
+
+ return 0;
+}
+
+#include "nsp_message.c"
+/*
+ * interrupt handler
+ */
+static irqreturn_t nspintr(int irq, void *dev_id)
+{
+ unsigned int base;
+ unsigned char irq_status, irq_phase, phase;
+ struct scsi_cmnd *tmpSC;
+ unsigned char target, lun;
+ unsigned int *sync_neg;
+ int i, tmp;
+ nsp_hw_data *data;
+
+
+ //nsp_dbg(NSP_DEBUG_INTR, "dev_id=0x%p", dev_id);
+ //nsp_dbg(NSP_DEBUG_INTR, "host=0x%p", ((scsi_info_t *)dev_id)->host);
+
+ if ( dev_id != NULL &&
+ ((scsi_info_t *)dev_id)->host != NULL ) {
+ scsi_info_t *info = (scsi_info_t *)dev_id;
+
+ data = (nsp_hw_data *)info->host->hostdata;
+ } else {
+ nsp_dbg(NSP_DEBUG_INTR, "host data wrong");
+ return IRQ_NONE;
+ }
+
+ //nsp_dbg(NSP_DEBUG_INTR, "&nsp_data_base=0x%p, dev_id=0x%p", &nsp_data_base, dev_id);
+
+ base = data->BaseAddress;
+ //nsp_dbg(NSP_DEBUG_INTR, "base=0x%x", base);
+
+ /*
+ * interrupt check
+ */
+ nsp_write(base, IRQCONTROL, IRQCONTROL_IRQDISABLE);
+ irq_status = nsp_read(base, IRQSTATUS);
+ //nsp_dbg(NSP_DEBUG_INTR, "irq_status=0x%x", irq_status);
+ if ((irq_status == 0xff) || ((irq_status & IRQSTATUS_MASK) == 0)) {
+ nsp_write(base, IRQCONTROL, 0);
+ //nsp_dbg(NSP_DEBUG_INTR, "no irq/shared irq");
+ return IRQ_NONE;
+ }
+
+ /* XXX: IMPORTANT
+ * Do not read an irq_phase register if no scsi phase interrupt.
+ * Unless, you should lose a scsi phase interrupt.
+ */
+ phase = nsp_index_read(base, SCSIBUSMON);
+ if((irq_status & IRQSTATUS_SCSI) != 0) {
+ irq_phase = nsp_index_read(base, IRQPHASESENCE);
+ } else {
+ irq_phase = 0;
+ }
+
+ //nsp_dbg(NSP_DEBUG_INTR, "irq_phase=0x%x", irq_phase);
+
+ /*
+ * timer interrupt handler (scsi vs timer interrupts)
+ */
+ //nsp_dbg(NSP_DEBUG_INTR, "timercount=%d", data->TimerCount);
+ if (data->TimerCount != 0) {
+ //nsp_dbg(NSP_DEBUG_INTR, "stop timer");
+ nsp_index_write(base, TIMERCOUNT, 0);
+ nsp_index_write(base, TIMERCOUNT, 0);
+ data->TimerCount = 0;
+ }
+
+ if ((irq_status & IRQSTATUS_MASK) == IRQSTATUS_TIMER &&
+ data->SelectionTimeOut == 0) {
+ //nsp_dbg(NSP_DEBUG_INTR, "timer start");
+ nsp_write(base, IRQCONTROL, IRQCONTROL_TIMER_CLEAR);
+ return IRQ_HANDLED;
+ }
+
+ nsp_write(base, IRQCONTROL, IRQCONTROL_TIMER_CLEAR | IRQCONTROL_FIFO_CLEAR);
+
+ if ((irq_status & IRQSTATUS_SCSI) &&
+ (irq_phase & SCSI_RESET_IRQ)) {
+ nsp_msg(KERN_ERR, "bus reset (power off?)");
+
+ nsphw_init(data);
+ nsp_bus_reset(data);
+
+ if(data->CurrentSC != NULL) {
+ tmpSC = data->CurrentSC;
+ tmpSC->result = (DID_RESET << 16) |
+ ((tmpSC->SCp.Message & 0xff) << 8) |
+ ((tmpSC->SCp.Status & 0xff) << 0);
+ nsp_scsi_done(tmpSC);
+ }
+ return IRQ_HANDLED;
+ }
+
+ if (data->CurrentSC == NULL) {
+ nsp_msg(KERN_ERR, "CurrentSC==NULL irq_status=0x%x phase=0x%x irq_phase=0x%x this can't be happen. reset everything", irq_status, phase, irq_phase);
+ nsphw_init(data);
+ nsp_bus_reset(data);
+ return IRQ_HANDLED;
+ }
+
+ tmpSC = data->CurrentSC;
+ target = tmpSC->device->id;
+ lun = tmpSC->device->lun;
+ sync_neg = &(data->Sync[target].SyncNegotiation);
+
+ /*
+ * parse hardware SCSI irq reasons register
+ */
+ if (irq_status & IRQSTATUS_SCSI) {
+ if (irq_phase & RESELECT_IRQ) {
+ nsp_dbg(NSP_DEBUG_INTR, "reselect");
+ nsp_write(base, IRQCONTROL, IRQCONTROL_RESELECT_CLEAR);
+ if (nsp_reselected(tmpSC) != FALSE) {
+ return IRQ_HANDLED;
+ }
+ }
+
+ if ((irq_phase & (PHASE_CHANGE_IRQ | LATCHED_BUS_FREE)) == 0) {
+ return IRQ_HANDLED;
+ }
+ }
+
+ //show_phase(tmpSC);
+
+ switch(tmpSC->SCp.phase) {
+ case PH_SELSTART:
+ // *sync_neg = SYNC_NOT_YET;
+ if ((phase & BUSMON_BSY) == 0) {
+ //nsp_dbg(NSP_DEBUG_INTR, "selection count=%d", data->SelectionTimeOut);
+ if (data->SelectionTimeOut >= NSP_SELTIMEOUT) {
+ nsp_dbg(NSP_DEBUG_INTR, "selection time out");
+ data->SelectionTimeOut = 0;
+ nsp_index_write(base, SCSIBUSCTRL, 0);
+
+ tmpSC->result = DID_TIME_OUT << 16;
+ nsp_scsi_done(tmpSC);
+
+ return IRQ_HANDLED;
+ }
+ data->SelectionTimeOut += 1;
+ nsp_start_timer(tmpSC, 1000/51);
+ return IRQ_HANDLED;
+ }
+
+ /* attention assert */
+ //nsp_dbg(NSP_DEBUG_INTR, "attention assert");
+ data->SelectionTimeOut = 0;
+ tmpSC->SCp.phase = PH_SELECTED;
+ nsp_index_write(base, SCSIBUSCTRL, SCSI_ATN);
+ udelay(1);
+ nsp_index_write(base, SCSIBUSCTRL, SCSI_ATN | AUTODIRECTION | ACKENB);
+ return IRQ_HANDLED;
+
+ break;
+
+ case PH_RESELECT:
+ //nsp_dbg(NSP_DEBUG_INTR, "phase reselect");
+ // *sync_neg = SYNC_NOT_YET;
+ if ((phase & BUSMON_PHASE_MASK) != BUSPHASE_MESSAGE_IN) {
+
+ tmpSC->result = DID_ABORT << 16;
+ nsp_scsi_done(tmpSC);
+ return IRQ_HANDLED;
+ }
+ /* fall thru */
+ default:
+ if ((irq_status & (IRQSTATUS_SCSI | IRQSTATUS_FIFO)) == 0) {
+ return IRQ_HANDLED;
+ }
+ break;
+ }
+
+ /*
+ * SCSI sequencer
+ */
+ //nsp_dbg(NSP_DEBUG_INTR, "start scsi seq");
+
+ /* normal disconnect */
+ if (((tmpSC->SCp.phase == PH_MSG_IN) || (tmpSC->SCp.phase == PH_MSG_OUT)) &&
+ (irq_phase & LATCHED_BUS_FREE) != 0 ) {
+ nsp_dbg(NSP_DEBUG_INTR, "normal disconnect irq_status=0x%x, phase=0x%x, irq_phase=0x%x", irq_status, phase, irq_phase);
+
+ //*sync_neg = SYNC_NOT_YET;
+
+ if ((tmpSC->SCp.Message == MSG_COMMAND_COMPLETE)) { /* all command complete and return status */
+ tmpSC->result = (DID_OK << 16) |
+ ((tmpSC->SCp.Message & 0xff) << 8) |
+ ((tmpSC->SCp.Status & 0xff) << 0);
+ nsp_dbg(NSP_DEBUG_INTR, "command complete result=0x%x", tmpSC->result);
+ nsp_scsi_done(tmpSC);
+
+ return IRQ_HANDLED;
+ }
+
+ return IRQ_HANDLED;
+ }
+
+
+ /* check unexpected bus free state */
+ if (phase == 0) {
+ nsp_msg(KERN_DEBUG, "unexpected bus free. irq_status=0x%x, phase=0x%x, irq_phase=0x%x", irq_status, phase, irq_phase);
+
+ *sync_neg = SYNC_NG;
+ tmpSC->result = DID_ERROR << 16;
+ nsp_scsi_done(tmpSC);
+ return IRQ_HANDLED;
+ }
+
+ switch (phase & BUSMON_PHASE_MASK) {
+ case BUSPHASE_COMMAND:
+ nsp_dbg(NSP_DEBUG_INTR, "BUSPHASE_COMMAND");
+ if ((phase & BUSMON_REQ) == 0) {
+ nsp_dbg(NSP_DEBUG_INTR, "REQ == 0");
+ return IRQ_HANDLED;
+ }
+
+ tmpSC->SCp.phase = PH_COMMAND;
+
+ nsp_nexus(tmpSC);
+
+ /* write scsi command */
+ nsp_dbg(NSP_DEBUG_INTR, "cmd_len=%d", tmpSC->cmd_len);
+ nsp_index_write(base, COMMANDCTRL, CLEAR_COMMAND_POINTER);
+ for (i = 0; i < tmpSC->cmd_len; i++) {
+ nsp_index_write(base, COMMANDDATA, tmpSC->cmnd[i]);
+ }
+ nsp_index_write(base, COMMANDCTRL, CLEAR_COMMAND_POINTER | AUTO_COMMAND_GO);
+ break;
+
+ case BUSPHASE_DATA_OUT:
+ nsp_dbg(NSP_DEBUG_INTR, "BUSPHASE_DATA_OUT");
+
+ tmpSC->SCp.phase = PH_DATA;
+ tmpSC->SCp.have_data_in = IO_OUT;
+
+ nsp_pio_write(tmpSC);
+
+ break;
+
+ case BUSPHASE_DATA_IN:
+ nsp_dbg(NSP_DEBUG_INTR, "BUSPHASE_DATA_IN");
+
+ tmpSC->SCp.phase = PH_DATA;
+ tmpSC->SCp.have_data_in = IO_IN;
+
+ nsp_pio_read(tmpSC);
+
+ break;
+
+ case BUSPHASE_STATUS:
+ nsp_dataphase_bypass(tmpSC);
+ nsp_dbg(NSP_DEBUG_INTR, "BUSPHASE_STATUS");
+
+ tmpSC->SCp.phase = PH_STATUS;
+
+ tmpSC->SCp.Status = nsp_index_read(base, SCSIDATAWITHACK);
+ nsp_dbg(NSP_DEBUG_INTR, "message=0x%x status=0x%x", tmpSC->SCp.Message, tmpSC->SCp.Status);
+
+ break;
+
+ case BUSPHASE_MESSAGE_OUT:
+ nsp_dbg(NSP_DEBUG_INTR, "BUSPHASE_MESSAGE_OUT");
+ if ((phase & BUSMON_REQ) == 0) {
+ goto timer_out;
+ }
+
+ tmpSC->SCp.phase = PH_MSG_OUT;
+
+ //*sync_neg = SYNC_NOT_YET;
+
+ data->MsgLen = i = 0;
+ data->MsgBuffer[i] = IDENTIFY(TRUE, lun); i++;
+
+ if (*sync_neg == SYNC_NOT_YET) {
+ data->Sync[target].SyncPeriod = 0;
+ data->Sync[target].SyncOffset = 0;
+
+ /**/
+ data->MsgBuffer[i] = MSG_EXTENDED; i++;
+ data->MsgBuffer[i] = 3; i++;
+ data->MsgBuffer[i] = MSG_EXT_SDTR; i++;
+ data->MsgBuffer[i] = 0x0c; i++;
+ data->MsgBuffer[i] = 15; i++;
+ /**/
+ }
+ data->MsgLen = i;
+
+ nsp_analyze_sdtr(tmpSC);
+ show_message(data);
+ nsp_message_out(tmpSC);
+ break;
+
+ case BUSPHASE_MESSAGE_IN:
+ nsp_dataphase_bypass(tmpSC);
+ nsp_dbg(NSP_DEBUG_INTR, "BUSPHASE_MESSAGE_IN");
+ if ((phase & BUSMON_REQ) == 0) {
+ goto timer_out;
+ }
+
+ tmpSC->SCp.phase = PH_MSG_IN;
+ nsp_message_in(tmpSC);
+
+ /**/
+ if (*sync_neg == SYNC_NOT_YET) {
+ //nsp_dbg(NSP_DEBUG_INTR, "sync target=%d,lun=%d",target,lun);
+
+ if (data->MsgLen >= 5 &&
+ data->MsgBuffer[0] == MSG_EXTENDED &&
+ data->MsgBuffer[1] == 3 &&
+ data->MsgBuffer[2] == MSG_EXT_SDTR ) {
+ data->Sync[target].SyncPeriod = data->MsgBuffer[3];
+ data->Sync[target].SyncOffset = data->MsgBuffer[4];
+ //nsp_dbg(NSP_DEBUG_INTR, "sync ok, %d %d", data->MsgBuffer[3], data->MsgBuffer[4]);
+ *sync_neg = SYNC_OK;
+ } else {
+ data->Sync[target].SyncPeriod = 0;
+ data->Sync[target].SyncOffset = 0;
+ *sync_neg = SYNC_NG;
+ }
+ nsp_analyze_sdtr(tmpSC);
+ }
+ /**/
+
+ /* search last messeage byte */
+ tmp = -1;
+ for (i = 0; i < data->MsgLen; i++) {
+ tmp = data->MsgBuffer[i];
+ if (data->MsgBuffer[i] == MSG_EXTENDED) {
+ i += (1 + data->MsgBuffer[i+1]);
+ }
+ }
+ tmpSC->SCp.Message = tmp;
+
+ nsp_dbg(NSP_DEBUG_INTR, "message=0x%x len=%d", tmpSC->SCp.Message, data->MsgLen);
+ show_message(data);
+
+ break;
+
+ case BUSPHASE_SELECT:
+ default:
+ nsp_dbg(NSP_DEBUG_INTR, "BUSPHASE other");
+
+ break;
+ }
+
+ //nsp_dbg(NSP_DEBUG_INTR, "out");
+ return IRQ_HANDLED;
+
+timer_out:
+ nsp_start_timer(tmpSC, 1000/102);
+ return IRQ_HANDLED;
+}
+
+#ifdef NSP_DEBUG
+#include "nsp_debug.c"
+#endif /* NSP_DEBUG */
+
+/*----------------------------------------------------------------*/
+/* look for ninja3 card and init if found */
+/*----------------------------------------------------------------*/
+static struct Scsi_Host *nsp_detect(struct scsi_host_template *sht)
+{
+ struct Scsi_Host *host; /* registered host structure */
+ nsp_hw_data *data_b = &nsp_data_base, *data;
+
+ nsp_dbg(NSP_DEBUG_INIT, "this_id=%d", sht->this_id);
+ host = scsi_host_alloc(&nsp_driver_template, sizeof(nsp_hw_data));
+ if (host == NULL) {
+ nsp_dbg(NSP_DEBUG_INIT, "host failed");
+ return NULL;
+ }
+
+ memcpy(host->hostdata, data_b, sizeof(nsp_hw_data));
+ data = (nsp_hw_data *)host->hostdata;
+ data->ScsiInfo->host = host;
+#ifdef NSP_DEBUG
+ data->CmdId = 0;
+#endif
+
+ nsp_dbg(NSP_DEBUG_INIT, "irq=%d,%d", data_b->IrqNumber, ((nsp_hw_data *)host->hostdata)->IrqNumber);
+
+ host->unique_id = data->BaseAddress;
+ host->io_port = data->BaseAddress;
+ host->n_io_port = data->NumAddress;
+ host->irq = data->IrqNumber;
+ host->base = data->MmioAddress;
+
+ spin_lock_init(&(data->Lock));
+
+ snprintf(data->nspinfo,
+ sizeof(data->nspinfo),
+ "NinjaSCSI-3/32Bi Driver $Revision: 1.23 $ IO:0x%04lx-0x%04lx MMIO(virt addr):0x%04lx IRQ:%02d",
+ host->io_port, host->io_port + host->n_io_port - 1,
+ host->base,
+ host->irq);
+ sht->name = data->nspinfo;
+
+ nsp_dbg(NSP_DEBUG_INIT, "end");
+
+
+ return host; /* detect done. */
+}
+
+/*----------------------------------------------------------------*/
+/* return info string */
+/*----------------------------------------------------------------*/
+static const char *nsp_info(struct Scsi_Host *shpnt)
+{
+ nsp_hw_data *data = (nsp_hw_data *)shpnt->hostdata;
+
+ return data->nspinfo;
+}
+
+static int nsp_show_info(struct seq_file *m, struct Scsi_Host *host)
+{
+ int id;
+ int speed;
+ unsigned long flags;
+ nsp_hw_data *data;
+ int hostno;
+
+ hostno = host->host_no;
+ data = (nsp_hw_data *)host->hostdata;
+
+ seq_puts(m, "NinjaSCSI status\n\n"
+ "Driver version: $Revision: 1.23 $\n");
+ seq_printf(m, "SCSI host No.: %d\n", hostno);
+ seq_printf(m, "IRQ: %d\n", host->irq);
+ seq_printf(m, "IO: 0x%lx-0x%lx\n", host->io_port, host->io_port + host->n_io_port - 1);
+ seq_printf(m, "MMIO(virtual address): 0x%lx-0x%lx\n", host->base, host->base + data->MmioLength - 1);
+ seq_printf(m, "sg_tablesize: %d\n", host->sg_tablesize);
+
+ seq_puts(m, "burst transfer mode: ");
+ switch (nsp_burst_mode) {
+ case BURST_IO8:
+ seq_puts(m, "io8");
+ break;
+ case BURST_IO32:
+ seq_puts(m, "io32");
+ break;
+ case BURST_MEM32:
+ seq_puts(m, "mem32");
+ break;
+ default:
+ seq_puts(m, "???");
+ break;
+ }
+ seq_putc(m, '\n');
+
+
+ spin_lock_irqsave(&(data->Lock), flags);
+ seq_printf(m, "CurrentSC: 0x%p\n\n", data->CurrentSC);
+ spin_unlock_irqrestore(&(data->Lock), flags);
+
+ seq_puts(m, "SDTR status\n");
+ for(id = 0; id < ARRAY_SIZE(data->Sync); id++) {
+
+ seq_printf(m, "id %d: ", id);
+
+ if (id == host->this_id) {
+ seq_puts(m, "----- NinjaSCSI-3 host adapter\n");
+ continue;
+ }
+
+ switch(data->Sync[id].SyncNegotiation) {
+ case SYNC_OK:
+ seq_puts(m, " sync");
+ break;
+ case SYNC_NG:
+ seq_puts(m, "async");
+ break;
+ case SYNC_NOT_YET:
+ seq_puts(m, " none");
+ break;
+ default:
+ seq_puts(m, "?????");
+ break;
+ }
+
+ if (data->Sync[id].SyncPeriod != 0) {
+ speed = 1000000 / (data->Sync[id].SyncPeriod * 4);
+
+ seq_printf(m, " transfer %d.%dMB/s, offset %d",
+ speed / 1000,
+ speed % 1000,
+ data->Sync[id].SyncOffset
+ );
+ }
+ seq_putc(m, '\n');
+ }
+ return 0;
+}
+
+/*---------------------------------------------------------------*/
+/* error handler */
+/*---------------------------------------------------------------*/
+
+/*
+static int nsp_eh_abort(struct scsi_cmnd *SCpnt)
+{
+ nsp_dbg(NSP_DEBUG_BUSRESET, "SCpnt=0x%p", SCpnt);
+
+ return nsp_eh_bus_reset(SCpnt);
+}*/
+
+static int nsp_bus_reset(nsp_hw_data *data)
+{
+ unsigned int base = data->BaseAddress;
+ int i;
+
+ nsp_write(base, IRQCONTROL, IRQCONTROL_ALLMASK);
+
+ nsp_index_write(base, SCSIBUSCTRL, SCSI_RST);
+ mdelay(100); /* 100ms */
+ nsp_index_write(base, SCSIBUSCTRL, 0);
+ for(i = 0; i < 5; i++) {
+ nsp_index_read(base, IRQPHASESENCE); /* dummy read */
+ }
+
+ nsphw_init_sync(data);
+
+ nsp_write(base, IRQCONTROL, IRQCONTROL_ALLCLEAR);
+
+ return SUCCESS;
+}
+
+static int nsp_eh_bus_reset(struct scsi_cmnd *SCpnt)
+{
+ nsp_hw_data *data = (nsp_hw_data *)SCpnt->device->host->hostdata;
+
+ nsp_dbg(NSP_DEBUG_BUSRESET, "SCpnt=0x%p", SCpnt);
+
+ return nsp_bus_reset(data);
+}
+
+static int nsp_eh_host_reset(struct scsi_cmnd *SCpnt)
+{
+ nsp_hw_data *data = (nsp_hw_data *)SCpnt->device->host->hostdata;
+
+ nsp_dbg(NSP_DEBUG_BUSRESET, "in");
+
+ nsphw_init(data);
+
+ return SUCCESS;
+}
+
+
+/**********************************************************************
+ PCMCIA functions
+**********************************************************************/
+
+static int nsp_cs_probe(struct pcmcia_device *link)
+{
+ scsi_info_t *info;
+ nsp_hw_data *data = &nsp_data_base;
+ int ret;
+
+ nsp_dbg(NSP_DEBUG_INIT, "in");
+
+ /* Create new SCSI device */
+ info = kzalloc(sizeof(*info), GFP_KERNEL);
+ if (info == NULL) { return -ENOMEM; }
+ info->p_dev = link;
+ link->priv = info;
+ data->ScsiInfo = info;
+
+ nsp_dbg(NSP_DEBUG_INIT, "info=0x%p", info);
+
+ ret = nsp_cs_config(link);
+
+ nsp_dbg(NSP_DEBUG_INIT, "link=0x%p", link);
+ return ret;
+} /* nsp_cs_attach */
+
+
+static void nsp_cs_detach(struct pcmcia_device *link)
+{
+ nsp_dbg(NSP_DEBUG_INIT, "in, link=0x%p", link);
+
+ ((scsi_info_t *)link->priv)->stop = 1;
+ nsp_cs_release(link);
+
+ kfree(link->priv);
+ link->priv = NULL;
+} /* nsp_cs_detach */
+
+
+static int nsp_cs_config_check(struct pcmcia_device *p_dev, void *priv_data)
+{
+ nsp_hw_data *data = priv_data;
+
+ if (p_dev->config_index == 0)
+ return -ENODEV;
+
+ /* This reserves IO space but doesn't actually enable it */
+ if (pcmcia_request_io(p_dev) != 0)
+ goto next_entry;
+
+ if (resource_size(p_dev->resource[2])) {
+ p_dev->resource[2]->flags |= (WIN_DATA_WIDTH_16 |
+ WIN_MEMORY_TYPE_CM |
+ WIN_ENABLE);
+ if (p_dev->resource[2]->end < 0x1000)
+ p_dev->resource[2]->end = 0x1000;
+ if (pcmcia_request_window(p_dev, p_dev->resource[2], 0) != 0)
+ goto next_entry;
+ if (pcmcia_map_mem_page(p_dev, p_dev->resource[2],
+ p_dev->card_addr) != 0)
+ goto next_entry;
+
+ data->MmioAddress = (unsigned long)
+ ioremap_nocache(p_dev->resource[2]->start,
+ resource_size(p_dev->resource[2]));
+ data->MmioLength = resource_size(p_dev->resource[2]);
+ }
+ /* If we got this far, we're cool! */
+ return 0;
+
+next_entry:
+ nsp_dbg(NSP_DEBUG_INIT, "next");
+ pcmcia_disable_device(p_dev);
+ return -ENODEV;
+}
+
+static int nsp_cs_config(struct pcmcia_device *link)
+{
+ int ret;
+ scsi_info_t *info = link->priv;
+ struct Scsi_Host *host;
+ nsp_hw_data *data = &nsp_data_base;
+
+ nsp_dbg(NSP_DEBUG_INIT, "in");
+
+ link->config_flags |= CONF_ENABLE_IRQ | CONF_AUTO_CHECK_VCC |
+ CONF_AUTO_SET_VPP | CONF_AUTO_AUDIO | CONF_AUTO_SET_IOMEM |
+ CONF_AUTO_SET_IO;
+
+ ret = pcmcia_loop_config(link, nsp_cs_config_check, data);
+ if (ret)
+ goto cs_failed;
+
+ if (pcmcia_request_irq(link, nspintr))
+ goto cs_failed;
+
+ ret = pcmcia_enable_device(link);
+ if (ret)
+ goto cs_failed;
+
+ if (free_ports) {
+ if (link->resource[0]) {
+ release_region(link->resource[0]->start,
+ resource_size(link->resource[0]));
+ }
+ if (link->resource[1]) {
+ release_region(link->resource[1]->start,
+ resource_size(link->resource[1]));
+ }
+ }
+
+ /* Set port and IRQ */
+ data->BaseAddress = link->resource[0]->start;
+ data->NumAddress = resource_size(link->resource[0]);
+ data->IrqNumber = link->irq;
+
+ nsp_dbg(NSP_DEBUG_INIT, "I/O[0x%x+0x%x] IRQ %d",
+ data->BaseAddress, data->NumAddress, data->IrqNumber);
+
+ if(nsphw_init(data) == FALSE) {
+ goto cs_failed;
+ }
+
+ host = nsp_detect(&nsp_driver_template);
+
+ if (host == NULL) {
+ nsp_dbg(NSP_DEBUG_INIT, "detect failed");
+ goto cs_failed;
+ }
+
+
+ ret = scsi_add_host (host, NULL);
+ if (ret)
+ goto cs_failed;
+
+ scsi_scan_host(host);
+
+ info->host = host;
+
+ return 0;
+
+ cs_failed:
+ nsp_dbg(NSP_DEBUG_INIT, "config fail");
+ nsp_cs_release(link);
+
+ return -ENODEV;
+} /* nsp_cs_config */
+
+
+static void nsp_cs_release(struct pcmcia_device *link)
+{
+ scsi_info_t *info = link->priv;
+ nsp_hw_data *data = NULL;
+
+ if (info->host == NULL) {
+ nsp_msg(KERN_DEBUG, "unexpected card release call.");
+ } else {
+ data = (nsp_hw_data *)info->host->hostdata;
+ }
+
+ nsp_dbg(NSP_DEBUG_INIT, "link=0x%p", link);
+
+ /* Unlink the device chain */
+ if (info->host != NULL) {
+ scsi_remove_host(info->host);
+ }
+
+ if (resource_size(link->resource[2])) {
+ if (data != NULL) {
+ iounmap((void *)(data->MmioAddress));
+ }
+ }
+ pcmcia_disable_device(link);
+
+ if (info->host != NULL) {
+ scsi_host_put(info->host);
+ }
+} /* nsp_cs_release */
+
+static int nsp_cs_suspend(struct pcmcia_device *link)
+{
+ scsi_info_t *info = link->priv;
+ nsp_hw_data *data;
+
+ nsp_dbg(NSP_DEBUG_INIT, "event: suspend");
+
+ if (info->host != NULL) {
+ nsp_msg(KERN_INFO, "clear SDTR status");
+
+ data = (nsp_hw_data *)info->host->hostdata;
+
+ nsphw_init_sync(data);
+ }
+
+ info->stop = 1;
+
+ return 0;
+}
+
+static int nsp_cs_resume(struct pcmcia_device *link)
+{
+ scsi_info_t *info = link->priv;
+ nsp_hw_data *data;
+
+ nsp_dbg(NSP_DEBUG_INIT, "event: resume");
+
+ info->stop = 0;
+
+ if (info->host != NULL) {
+ nsp_msg(KERN_INFO, "reset host and bus");
+
+ data = (nsp_hw_data *)info->host->hostdata;
+
+ nsphw_init (data);
+ nsp_bus_reset(data);
+ }
+
+ return 0;
+}
+
+/*======================================================================*
+ * module entry point
+ *====================================================================*/
+static const struct pcmcia_device_id nsp_cs_ids[] = {
+ PCMCIA_DEVICE_PROD_ID123("IO DATA", "CBSC16 ", "1", 0x547e66dc, 0x0d63a3fd, 0x51de003a),
+ PCMCIA_DEVICE_PROD_ID123("KME ", "SCSI-CARD-001", "1", 0x534c02bc, 0x52008408, 0x51de003a),
+ PCMCIA_DEVICE_PROD_ID123("KME ", "SCSI-CARD-002", "1", 0x534c02bc, 0xcb09d5b2, 0x51de003a),
+ PCMCIA_DEVICE_PROD_ID123("KME ", "SCSI-CARD-003", "1", 0x534c02bc, 0xbc0ee524, 0x51de003a),
+ PCMCIA_DEVICE_PROD_ID123("KME ", "SCSI-CARD-004", "1", 0x534c02bc, 0x226a7087, 0x51de003a),
+ PCMCIA_DEVICE_PROD_ID123("WBT", "NinjaSCSI-3", "R1.0", 0xc7ba805f, 0xfdc7c97d, 0x6973710e),
+ PCMCIA_DEVICE_PROD_ID123("WORKBIT", "UltraNinja-16", "1", 0x28191418, 0xb70f4b09, 0x51de003a),
+ PCMCIA_DEVICE_NULL
+};
+MODULE_DEVICE_TABLE(pcmcia, nsp_cs_ids);
+
+static struct pcmcia_driver nsp_driver = {
+ .owner = THIS_MODULE,
+ .name = "nsp_cs",
+ .probe = nsp_cs_probe,
+ .remove = nsp_cs_detach,
+ .id_table = nsp_cs_ids,
+ .suspend = nsp_cs_suspend,
+ .resume = nsp_cs_resume,
+};
+
+static int __init nsp_cs_init(void)
+{
+ return pcmcia_register_driver(&nsp_driver);
+}
+
+static void __exit nsp_cs_exit(void)
+{
+ pcmcia_unregister_driver(&nsp_driver);
+}
+
+
+module_init(nsp_cs_init)
+module_exit(nsp_cs_exit)
+
+/* end */
diff --git a/drivers/scsi/pcmcia/nsp_cs.h b/drivers/scsi/pcmcia/nsp_cs.h
new file mode 100644
index 000000000..afd64f0ad
--- /dev/null
+++ b/drivers/scsi/pcmcia/nsp_cs.h
@@ -0,0 +1,392 @@
+/*=======================================================/
+ Header file for nsp_cs.c
+ By: YOKOTA Hiroshi <yokota@netlab.is.tsukuba.ac.jp>
+
+ Ver.1.0 : Cut unused lines.
+ Ver 0.1 : Initial version.
+
+ This software may be used and distributed according to the terms of
+ the GNU General Public License.
+
+=========================================================*/
+
+#ifndef __nsp_cs__
+#define __nsp_cs__
+
+/* for debugging */
+//#define NSP_DEBUG 9
+
+/*
+#define static
+#define inline
+*/
+
+/************************************
+ * Some useful macros...
+ */
+
+/* SCSI initiator must be ID 7 */
+#define NSP_INITIATOR_ID 7
+
+#define NSP_SELTIMEOUT 200
+
+/***************************************************************************
+ * register definitions
+ ***************************************************************************/
+/*========================================================================
+ * base register
+ ========================================================================*/
+#define IRQCONTROL 0x00 /* R */
+# define IRQCONTROL_RESELECT_CLEAR BIT(0)
+# define IRQCONTROL_PHASE_CHANGE_CLEAR BIT(1)
+# define IRQCONTROL_TIMER_CLEAR BIT(2)
+# define IRQCONTROL_FIFO_CLEAR BIT(3)
+# define IRQCONTROL_ALLMASK 0xff
+# define IRQCONTROL_ALLCLEAR (IRQCONTROL_RESELECT_CLEAR | \
+ IRQCONTROL_PHASE_CHANGE_CLEAR | \
+ IRQCONTROL_TIMER_CLEAR | \
+ IRQCONTROL_FIFO_CLEAR )
+# define IRQCONTROL_IRQDISABLE 0xf0
+
+#define IRQSTATUS 0x00 /* W */
+# define IRQSTATUS_SCSI BIT(0)
+# define IRQSTATUS_TIMER BIT(2)
+# define IRQSTATUS_FIFO BIT(3)
+# define IRQSTATUS_MASK 0x0f
+
+#define IFSELECT 0x01 /* W */
+# define IF_IFSEL BIT(0)
+# define IF_REGSEL BIT(2)
+
+#define FIFOSTATUS 0x01 /* R */
+# define FIFOSTATUS_CHIP_REVISION_MASK 0x0f
+# define FIFOSTATUS_CHIP_ID_MASK 0x70
+# define FIFOSTATUS_FULL_EMPTY BIT(7)
+
+#define INDEXREG 0x02 /* R/W */
+#define DATAREG 0x03 /* R/W */
+#define FIFODATA 0x04 /* R/W */
+#define FIFODATA1 0x05 /* R/W */
+#define FIFODATA2 0x06 /* R/W */
+#define FIFODATA3 0x07 /* R/W */
+
+/*====================================================================
+ * indexed register
+ ====================================================================*/
+#define EXTBUSCTRL 0x10 /* R/W,deleted */
+
+#define CLOCKDIV 0x11 /* R/W */
+# define CLOCK_40M 0x02
+# define CLOCK_20M 0x01
+# define FAST_20 BIT(2)
+
+#define TERMPWRCTRL 0x13 /* R/W */
+# define POWER_ON BIT(0)
+
+#define SCSIIRQMODE 0x15 /* R/W */
+# define SCSI_PHASE_CHANGE_EI BIT(0)
+# define RESELECT_EI BIT(4)
+# define FIFO_IRQ_EI BIT(5)
+# define SCSI_RESET_IRQ_EI BIT(6)
+
+#define IRQPHASESENCE 0x16 /* R */
+# define LATCHED_MSG BIT(0)
+# define LATCHED_IO BIT(1)
+# define LATCHED_CD BIT(2)
+# define LATCHED_BUS_FREE BIT(3)
+# define PHASE_CHANGE_IRQ BIT(4)
+# define RESELECT_IRQ BIT(5)
+# define FIFO_IRQ BIT(6)
+# define SCSI_RESET_IRQ BIT(7)
+
+#define TIMERCOUNT 0x17 /* R/W */
+
+#define SCSIBUSCTRL 0x18 /* R/W */
+# define SCSI_SEL BIT(0)
+# define SCSI_RST BIT(1)
+# define SCSI_DATAOUT_ENB BIT(2)
+# define SCSI_ATN BIT(3)
+# define SCSI_ACK BIT(4)
+# define SCSI_BSY BIT(5)
+# define AUTODIRECTION BIT(6)
+# define ACKENB BIT(7)
+
+#define SCSIBUSMON 0x19 /* R */
+
+#define SETARBIT 0x1A /* W */
+# define ARBIT_GO BIT(0)
+# define ARBIT_FLAG_CLEAR BIT(1)
+
+#define ARBITSTATUS 0x1A /* R */
+/*# define ARBIT_GO BIT(0)*/
+# define ARBIT_WIN BIT(1)
+# define ARBIT_FAIL BIT(2)
+# define RESELECT_FLAG BIT(3)
+
+#define PARITYCTRL 0x1B /* W */
+#define PARITYSTATUS 0x1B /* R */
+
+#define COMMANDCTRL 0x1C /* W */
+# define CLEAR_COMMAND_POINTER BIT(0)
+# define AUTO_COMMAND_GO BIT(1)
+
+#define RESELECTID 0x1C /* R */
+#define COMMANDDATA 0x1D /* R/W */
+
+#define POINTERCLR 0x1E /* W */
+# define POINTER_CLEAR BIT(0)
+# define ACK_COUNTER_CLEAR BIT(1)
+# define REQ_COUNTER_CLEAR BIT(2)
+# define HOST_COUNTER_CLEAR BIT(3)
+# define READ_SOURCE (BIT(4) | BIT(5))
+# define ACK_COUNTER (0)
+# define REQ_COUNTER (BIT(4))
+# define HOST_COUNTER (BIT(5))
+
+#define TRANSFERCOUNT 0x1E /* R */
+
+#define TRANSFERMODE 0x20 /* R/W */
+# define MODE_MEM8 BIT(0)
+# define MODE_MEM32 BIT(1)
+# define MODE_ADR24 BIT(2)
+# define MODE_ADR32 BIT(3)
+# define MODE_IO8 BIT(4)
+# define MODE_IO32 BIT(5)
+# define TRANSFER_GO BIT(6)
+# define BRAIND BIT(7)
+
+#define SYNCREG 0x21 /* R/W */
+# define SYNCREG_OFFSET_MASK 0x0f
+# define SYNCREG_PERIOD_MASK 0xf0
+# define SYNCREG_PERIOD_SHIFT 4
+
+#define SCSIDATALATCH 0x22 /* W */
+#define SCSIDATAIN 0x22 /* R */
+#define SCSIDATAWITHACK 0x23 /* R/W */
+#define SCAMCONTROL 0x24 /* W */
+#define SCAMSTATUS 0x24 /* R */
+#define SCAMDATA 0x25 /* R/W */
+
+#define OTHERCONTROL 0x26 /* R/W */
+# define TPL_ROM_WRITE_EN BIT(0)
+# define TPWR_OUT BIT(1)
+# define TPWR_SENSE BIT(2)
+# define RA8_CONTROL BIT(3)
+
+#define ACKWIDTH 0x27 /* R/W */
+#define CLRTESTPNT 0x28 /* W */
+#define ACKCNTLD 0x29 /* W */
+#define REQCNTLD 0x2A /* W */
+#define HSTCNTLD 0x2B /* W */
+#define CHECKSUM 0x2C /* R/W */
+
+/************************************************************************
+ * Input status bit definitions.
+ ************************************************************************/
+#define S_MESSAGE BIT(0) /* Message line from SCSI bus */
+#define S_IO BIT(1) /* Input/Output line from SCSI bus */
+#define S_CD BIT(2) /* Command/Data line from SCSI bus */
+#define S_BUSY BIT(3) /* Busy line from SCSI bus */
+#define S_ACK BIT(4) /* Acknowledge line from SCSI bus */
+#define S_REQUEST BIT(5) /* Request line from SCSI bus */
+#define S_SELECT BIT(6) /* */
+#define S_ATN BIT(7) /* */
+
+/***********************************************************************
+ * Useful Bus Monitor status combinations.
+ ***********************************************************************/
+#define BUSMON_SEL S_SELECT
+#define BUSMON_BSY S_BUSY
+#define BUSMON_REQ S_REQUEST
+#define BUSMON_IO S_IO
+#define BUSMON_ACK S_ACK
+#define BUSMON_BUS_FREE 0
+#define BUSMON_COMMAND ( S_BUSY | S_CD | S_REQUEST )
+#define BUSMON_MESSAGE_IN ( S_BUSY | S_CD | S_IO | S_MESSAGE | S_REQUEST )
+#define BUSMON_MESSAGE_OUT ( S_BUSY | S_CD | S_MESSAGE | S_REQUEST )
+#define BUSMON_DATA_IN ( S_BUSY | S_IO | S_REQUEST )
+#define BUSMON_DATA_OUT ( S_BUSY | S_REQUEST )
+#define BUSMON_STATUS ( S_BUSY | S_CD | S_IO | S_REQUEST )
+#define BUSMON_SELECT ( S_IO | S_SELECT )
+#define BUSMON_RESELECT ( S_IO | S_SELECT )
+#define BUSMON_PHASE_MASK ( S_CD | S_IO | S_MESSAGE | S_SELECT )
+
+#define BUSPHASE_SELECT ( BUSMON_SELECT & BUSMON_PHASE_MASK )
+#define BUSPHASE_COMMAND ( BUSMON_COMMAND & BUSMON_PHASE_MASK )
+#define BUSPHASE_MESSAGE_IN ( BUSMON_MESSAGE_IN & BUSMON_PHASE_MASK )
+#define BUSPHASE_MESSAGE_OUT ( BUSMON_MESSAGE_OUT & BUSMON_PHASE_MASK )
+#define BUSPHASE_DATA_IN ( BUSMON_DATA_IN & BUSMON_PHASE_MASK )
+#define BUSPHASE_DATA_OUT ( BUSMON_DATA_OUT & BUSMON_PHASE_MASK )
+#define BUSPHASE_STATUS ( BUSMON_STATUS & BUSMON_PHASE_MASK )
+
+/*====================================================================*/
+
+typedef struct scsi_info_t {
+ struct pcmcia_device *p_dev;
+ struct Scsi_Host *host;
+ int stop;
+} scsi_info_t;
+
+
+/* synchronous transfer negotiation data */
+typedef struct _sync_data {
+ unsigned int SyncNegotiation;
+#define SYNC_NOT_YET 0
+#define SYNC_OK 1
+#define SYNC_NG 2
+
+ unsigned int SyncPeriod;
+ unsigned int SyncOffset;
+ unsigned char SyncRegister;
+ unsigned char AckWidth;
+} sync_data;
+
+typedef struct _nsp_hw_data {
+ unsigned int BaseAddress;
+ unsigned int NumAddress;
+ unsigned int IrqNumber;
+
+ unsigned long MmioAddress;
+#define NSP_MMIO_OFFSET 0x0800
+ unsigned long MmioLength;
+
+ unsigned char ScsiClockDiv;
+
+ unsigned char TransferMode;
+
+ int TimerCount;
+ int SelectionTimeOut;
+ struct scsi_cmnd *CurrentSC;
+ //int CurrnetTarget;
+
+ int FifoCount;
+
+#define MSGBUF_SIZE 20
+ unsigned char MsgBuffer[MSGBUF_SIZE];
+ int MsgLen;
+
+#define N_TARGET 8
+ sync_data Sync[N_TARGET];
+
+ char nspinfo[110]; /* description */
+ spinlock_t Lock;
+
+ scsi_info_t *ScsiInfo; /* attach <-> detect glue */
+
+
+#ifdef NSP_DEBUG
+ int CmdId; /* Accepted command serial number.
+ Used for debugging. */
+#endif
+} nsp_hw_data;
+
+/****************************************************************************
+ *
+ */
+
+/* Card service functions */
+static void nsp_cs_detach (struct pcmcia_device *p_dev);
+static void nsp_cs_release(struct pcmcia_device *link);
+static int nsp_cs_config (struct pcmcia_device *link);
+
+/* Linux SCSI subsystem specific functions */
+static struct Scsi_Host *nsp_detect (struct scsi_host_template *sht);
+static const char *nsp_info (struct Scsi_Host *shpnt);
+static int nsp_show_info (struct seq_file *m,
+ struct Scsi_Host *host);
+static int nsp_queuecommand(struct Scsi_Host *h, struct scsi_cmnd *SCpnt);
+
+/* Error handler */
+/*static int nsp_eh_abort (struct scsi_cmnd *SCpnt);*/
+/*static int nsp_eh_device_reset(struct scsi_cmnd *SCpnt);*/
+static int nsp_eh_bus_reset (struct scsi_cmnd *SCpnt);
+static int nsp_eh_host_reset (struct scsi_cmnd *SCpnt);
+static int nsp_bus_reset (nsp_hw_data *data);
+
+/* */
+static int nsphw_init (nsp_hw_data *data);
+static int nsphw_start_selection(struct scsi_cmnd *SCpnt);
+static void nsp_start_timer (struct scsi_cmnd *SCpnt, int time);
+static int nsp_fifo_count (struct scsi_cmnd *SCpnt);
+static void nsp_pio_read (struct scsi_cmnd *SCpnt);
+static void nsp_pio_write (struct scsi_cmnd *SCpnt);
+static int nsp_nexus (struct scsi_cmnd *SCpnt);
+static void nsp_scsi_done (struct scsi_cmnd *SCpnt);
+static int nsp_analyze_sdtr (struct scsi_cmnd *SCpnt);
+static int nsp_negate_signal (struct scsi_cmnd *SCpnt,
+ unsigned char mask, char *str);
+static int nsp_expect_signal (struct scsi_cmnd *SCpnt,
+ unsigned char current_phase,
+ unsigned char mask);
+static int nsp_xfer (struct scsi_cmnd *SCpnt, int phase);
+static int nsp_dataphase_bypass (struct scsi_cmnd *SCpnt);
+static int nsp_reselected (struct scsi_cmnd *SCpnt);
+static struct Scsi_Host *nsp_detect(struct scsi_host_template *sht);
+
+/* Interrupt handler */
+//static irqreturn_t nspintr(int irq, void *dev_id);
+
+/* Module entry point*/
+static int __init nsp_cs_init(void);
+static void __exit nsp_cs_exit(void);
+
+/* Debug */
+#ifdef NSP_DEBUG
+static void show_command (struct scsi_cmnd *SCpnt);
+static void show_phase (struct scsi_cmnd *SCpnt);
+static void show_busphase(unsigned char stat);
+static void show_message (nsp_hw_data *data);
+#else
+# define show_command(ptr) /* */
+# define show_phase(SCpnt) /* */
+# define show_busphase(stat) /* */
+# define show_message(data) /* */
+#endif
+
+/*
+ * SCSI phase
+ */
+enum _scsi_phase {
+ PH_UNDETERMINED ,
+ PH_ARBSTART ,
+ PH_SELSTART ,
+ PH_SELECTED ,
+ PH_COMMAND ,
+ PH_DATA ,
+ PH_STATUS ,
+ PH_MSG_IN ,
+ PH_MSG_OUT ,
+ PH_DISCONNECT ,
+ PH_RESELECT ,
+ PH_ABORT ,
+ PH_RESET
+};
+
+enum _data_in_out {
+ IO_UNKNOWN,
+ IO_IN,
+ IO_OUT
+};
+
+enum _burst_mode {
+ BURST_IO8 = 0,
+ BURST_IO32 = 1,
+ BURST_MEM32 = 2,
+};
+
+/**************************************************************************
+ * SCSI messaage
+ */
+#define MSG_COMMAND_COMPLETE 0x00
+#define MSG_EXTENDED 0x01
+#define MSG_ABORT 0x06
+#define MSG_NO_OPERATION 0x08
+#define MSG_BUS_DEVICE_RESET 0x0c
+
+#define MSG_EXT_SDTR 0x01
+
+/* scatter-gather table */
+# define BUFFER_ADDR ((char *)((sg_virt(SCpnt->SCp.buffer))))
+
+#endif /*__nsp_cs__*/
+/* end */
diff --git a/drivers/scsi/pcmcia/nsp_debug.c b/drivers/scsi/pcmcia/nsp_debug.c
new file mode 100644
index 000000000..6aa7d269d
--- /dev/null
+++ b/drivers/scsi/pcmcia/nsp_debug.c
@@ -0,0 +1,215 @@
+/*========================================================================
+ Debug routines for nsp_cs
+ By: YOKOTA Hiroshi <yokota@netlab.is.tsukuba.ac.jp>
+
+ This software may be used and distributed according to the terms of
+ the GNU General Public License.
+=========================================================================*/
+
+/* $Id: nsp_debug.c,v 1.3 2003/07/26 14:21:09 elca Exp $ */
+
+/*
+ * Show the command data of a command
+ */
+static const char unknown[] = "UNKNOWN";
+
+static const char * group_0_commands[] = {
+/* 00-03 */ "Test Unit Ready", "Rezero Unit", unknown, "Request Sense",
+/* 04-07 */ "Format Unit", "Read Block Limits", unknown, "Reassign Blocks",
+/* 08-0d */ "Read (6)", unknown, "Write (6)", "Seek (6)", unknown, unknown,
+/* 0e-12 */ unknown, "Read Reverse", "Write Filemarks", "Space", "Inquiry",
+/* 13-16 */ unknown, "Recover Buffered Data", "Mode Select", "Reserve",
+/* 17-1b */ "Release", "Copy", "Erase", "Mode Sense", "Start/Stop Unit",
+/* 1c-1d */ "Receive Diagnostic", "Send Diagnostic",
+/* 1e-1f */ "Prevent/Allow Medium Removal", unknown,
+};
+
+
+static const char *group_1_commands[] = {
+/* 20-22 */ unknown, unknown, unknown,
+/* 23-28 */ unknown, unknown, "Read Capacity", unknown, unknown, "Read (10)",
+/* 29-2d */ unknown, "Write (10)", "Seek (10)", unknown, unknown,
+/* 2e-31 */ "Write Verify","Verify", "Search High", "Search Equal",
+/* 32-34 */ "Search Low", "Set Limits", "Prefetch or Read Position",
+/* 35-37 */ "Synchronize Cache","Lock/Unlock Cache", "Read Defect Data",
+/* 38-3c */ "Medium Scan", "Compare","Copy Verify", "Write Buffer", "Read Buffer",
+/* 3d-3f */ "Update Block", "Read Long", "Write Long",
+};
+
+
+static const char *group_2_commands[] = {
+/* 40-41 */ "Change Definition", "Write Same",
+/* 42-48 */ "Read Sub-Ch(cd)", "Read TOC", "Read Header(cd)", "Play Audio(cd)", unknown, "Play Audio MSF(cd)", "Play Audio Track/Index(cd)",
+/* 49-4f */ "Play Track Relative(10)(cd)", unknown, "Pause/Resume(cd)", "Log Select", "Log Sense", unknown, unknown,
+/* 50-55 */ unknown, unknown, unknown, unknown, unknown, "Mode Select (10)",
+/* 56-5b */ unknown, unknown, unknown, unknown, "Mode Sense (10)", unknown,
+/* 5c-5f */ unknown, unknown, unknown,
+};
+
+#define group(opcode) (((opcode) >> 5) & 7)
+
+#define RESERVED_GROUP 0
+#define VENDOR_GROUP 1
+#define NOTEXT_GROUP 2
+
+static const char **commands[] = {
+ group_0_commands, group_1_commands, group_2_commands,
+ (const char **) RESERVED_GROUP, (const char **) RESERVED_GROUP,
+ (const char **) NOTEXT_GROUP, (const char **) VENDOR_GROUP,
+ (const char **) VENDOR_GROUP
+};
+
+static const char reserved[] = "RESERVED";
+static const char vendor[] = "VENDOR SPECIFIC";
+
+static void print_opcodek(unsigned char opcode)
+{
+ const char **table = commands[ group(opcode) ];
+
+ switch ((unsigned long) table) {
+ case RESERVED_GROUP:
+ printk("%s[%02x] ", reserved, opcode);
+ break;
+ case NOTEXT_GROUP:
+ printk("%s(notext)[%02x] ", unknown, opcode);
+ break;
+ case VENDOR_GROUP:
+ printk("%s[%02x] ", vendor, opcode);
+ break;
+ default:
+ if (table[opcode & 0x1f] != unknown)
+ printk("%s[%02x] ", table[opcode & 0x1f], opcode);
+ else
+ printk("%s[%02x] ", unknown, opcode);
+ break;
+ }
+}
+
+static void print_commandk (unsigned char *command)
+{
+ int i, s;
+ printk(KERN_DEBUG);
+ print_opcodek(command[0]);
+ /*printk(KERN_DEBUG "%s ", __func__);*/
+ if ((command[0] >> 5) == 6 ||
+ (command[0] >> 5) == 7 ) {
+ s = 12; /* vender specific */
+ } else {
+ s = COMMAND_SIZE(command[0]);
+ }
+ for ( i = 1; i < s; ++i) {
+ printk("%02x ", command[i]);
+ }
+
+ switch (s) {
+ case 6:
+ printk("LBA=%d len=%d",
+ (((unsigned int)command[1] & 0x0f) << 16) |
+ ( (unsigned int)command[2] << 8) |
+ ( (unsigned int)command[3] ),
+ (unsigned int)command[4]
+ );
+ break;
+ case 10:
+ printk("LBA=%d len=%d",
+ ((unsigned int)command[2] << 24) |
+ ((unsigned int)command[3] << 16) |
+ ((unsigned int)command[4] << 8) |
+ ((unsigned int)command[5] ),
+ ((unsigned int)command[7] << 8) |
+ ((unsigned int)command[8] )
+ );
+ break;
+ case 12:
+ printk("LBA=%d len=%d",
+ ((unsigned int)command[2] << 24) |
+ ((unsigned int)command[3] << 16) |
+ ((unsigned int)command[4] << 8) |
+ ((unsigned int)command[5] ),
+ ((unsigned int)command[6] << 24) |
+ ((unsigned int)command[7] << 16) |
+ ((unsigned int)command[8] << 8) |
+ ((unsigned int)command[9] )
+ );
+ break;
+ default:
+ break;
+ }
+ printk("\n");
+}
+
+static void show_command(struct scsi_cmnd *SCpnt)
+{
+ print_commandk(SCpnt->cmnd);
+}
+
+static void show_phase(struct scsi_cmnd *SCpnt)
+{
+ int i = SCpnt->SCp.phase;
+
+ char *ph[] = {
+ "PH_UNDETERMINED",
+ "PH_ARBSTART",
+ "PH_SELSTART",
+ "PH_SELECTED",
+ "PH_COMMAND",
+ "PH_DATA",
+ "PH_STATUS",
+ "PH_MSG_IN",
+ "PH_MSG_OUT",
+ "PH_DISCONNECT",
+ "PH_RESELECT"
+ };
+
+ if ( i < PH_UNDETERMINED || i > PH_RESELECT ) {
+ printk(KERN_DEBUG "scsi phase: unknown(%d)\n", i);
+ return;
+ }
+
+ printk(KERN_DEBUG "scsi phase: %s\n", ph[i]);
+
+ return;
+}
+
+static void show_busphase(unsigned char stat)
+{
+ switch(stat) {
+ case BUSPHASE_COMMAND:
+ printk(KERN_DEBUG "BUSPHASE_COMMAND\n");
+ break;
+ case BUSPHASE_MESSAGE_IN:
+ printk(KERN_DEBUG "BUSPHASE_MESSAGE_IN\n");
+ break;
+ case BUSPHASE_MESSAGE_OUT:
+ printk(KERN_DEBUG "BUSPHASE_MESSAGE_OUT\n");
+ break;
+ case BUSPHASE_DATA_IN:
+ printk(KERN_DEBUG "BUSPHASE_DATA_IN\n");
+ break;
+ case BUSPHASE_DATA_OUT:
+ printk(KERN_DEBUG "BUSPHASE_DATA_OUT\n");
+ break;
+ case BUSPHASE_STATUS:
+ printk(KERN_DEBUG "BUSPHASE_STATUS\n");
+ break;
+ case BUSPHASE_SELECT:
+ printk(KERN_DEBUG "BUSPHASE_SELECT\n");
+ break;
+ default:
+ printk(KERN_DEBUG "BUSPHASE_other\n");
+ break;
+ }
+}
+
+static void show_message(nsp_hw_data *data)
+{
+ int i;
+
+ printk(KERN_DEBUG "msg:");
+ for(i=0; i < data->MsgLen; i++) {
+ printk(" %02x", data->MsgBuffer[i]);
+ }
+ printk("\n");
+}
+
+/* end */
diff --git a/drivers/scsi/pcmcia/nsp_io.h b/drivers/scsi/pcmcia/nsp_io.h
new file mode 100644
index 000000000..3b8746f85
--- /dev/null
+++ b/drivers/scsi/pcmcia/nsp_io.h
@@ -0,0 +1,274 @@
+/*
+ NinjaSCSI I/O funtions
+ By: YOKOTA Hiroshi <yokota@netlab.is.tsukuba.ac.jp>
+
+ This software may be used and distributed according to the terms of
+ the GNU General Public License.
+
+ */
+
+/* $Id: nsp_io.h,v 1.3 2003/08/04 21:15:26 elca Exp $ */
+
+#ifndef __NSP_IO_H__
+#define __NSP_IO_H__
+
+static inline void nsp_write(unsigned int base,
+ unsigned int index,
+ unsigned char val);
+static inline unsigned char nsp_read(unsigned int base,
+ unsigned int index);
+static inline void nsp_index_write(unsigned int BaseAddr,
+ unsigned int Register,
+ unsigned char Value);
+static inline unsigned char nsp_index_read(unsigned int BaseAddr,
+ unsigned int Register);
+
+/*******************************************************************
+ * Basic IO
+ */
+
+static inline void nsp_write(unsigned int base,
+ unsigned int index,
+ unsigned char val)
+{
+ outb(val, (base + index));
+}
+
+static inline unsigned char nsp_read(unsigned int base,
+ unsigned int index)
+{
+ return inb(base + index);
+}
+
+
+/**********************************************************************
+ * Indexed IO
+ */
+static inline unsigned char nsp_index_read(unsigned int BaseAddr,
+ unsigned int Register)
+{
+ outb(Register, BaseAddr + INDEXREG);
+ return inb(BaseAddr + DATAREG);
+}
+
+static inline void nsp_index_write(unsigned int BaseAddr,
+ unsigned int Register,
+ unsigned char Value)
+{
+ outb(Register, BaseAddr + INDEXREG);
+ outb(Value, BaseAddr + DATAREG);
+}
+
+/*********************************************************************
+ * fifo func
+ */
+
+/* read 8 bit FIFO */
+static inline void nsp_multi_read_1(unsigned int BaseAddr,
+ unsigned int Register,
+ void *buf,
+ unsigned long count)
+{
+ insb(BaseAddr + Register, buf, count);
+}
+
+static inline void nsp_fifo8_read(unsigned int base,
+ void *buf,
+ unsigned long count)
+{
+ /*nsp_dbg(NSP_DEBUG_DATA_IO, "buf=0x%p, count=0x%lx", buf, count);*/
+ nsp_multi_read_1(base, FIFODATA, buf, count);
+}
+
+/*--------------------------------------------------------------*/
+
+/* read 16 bit FIFO */
+static inline void nsp_multi_read_2(unsigned int BaseAddr,
+ unsigned int Register,
+ void *buf,
+ unsigned long count)
+{
+ insw(BaseAddr + Register, buf, count);
+}
+
+static inline void nsp_fifo16_read(unsigned int base,
+ void *buf,
+ unsigned long count)
+{
+ //nsp_dbg(NSP_DEBUG_DATA_IO, "buf=0x%p, count=0x%lx*2", buf, count);
+ nsp_multi_read_2(base, FIFODATA, buf, count);
+}
+
+/*--------------------------------------------------------------*/
+
+/* read 32bit FIFO */
+static inline void nsp_multi_read_4(unsigned int BaseAddr,
+ unsigned int Register,
+ void *buf,
+ unsigned long count)
+{
+ insl(BaseAddr + Register, buf, count);
+}
+
+static inline void nsp_fifo32_read(unsigned int base,
+ void *buf,
+ unsigned long count)
+{
+ //nsp_dbg(NSP_DEBUG_DATA_IO, "buf=0x%p, count=0x%lx*4", buf, count);
+ nsp_multi_read_4(base, FIFODATA, buf, count);
+}
+
+/*----------------------------------------------------------*/
+
+/* write 8bit FIFO */
+static inline void nsp_multi_write_1(unsigned int BaseAddr,
+ unsigned int Register,
+ void *buf,
+ unsigned long count)
+{
+ outsb(BaseAddr + Register, buf, count);
+}
+
+static inline void nsp_fifo8_write(unsigned int base,
+ void *buf,
+ unsigned long count)
+{
+ nsp_multi_write_1(base, FIFODATA, buf, count);
+}
+
+/*---------------------------------------------------------*/
+
+/* write 16bit FIFO */
+static inline void nsp_multi_write_2(unsigned int BaseAddr,
+ unsigned int Register,
+ void *buf,
+ unsigned long count)
+{
+ outsw(BaseAddr + Register, buf, count);
+}
+
+static inline void nsp_fifo16_write(unsigned int base,
+ void *buf,
+ unsigned long count)
+{
+ nsp_multi_write_2(base, FIFODATA, buf, count);
+}
+
+/*---------------------------------------------------------*/
+
+/* write 32bit FIFO */
+static inline void nsp_multi_write_4(unsigned int BaseAddr,
+ unsigned int Register,
+ void *buf,
+ unsigned long count)
+{
+ outsl(BaseAddr + Register, buf, count);
+}
+
+static inline void nsp_fifo32_write(unsigned int base,
+ void *buf,
+ unsigned long count)
+{
+ nsp_multi_write_4(base, FIFODATA, buf, count);
+}
+
+
+/*====================================================================*/
+
+static inline void nsp_mmio_write(unsigned long base,
+ unsigned int index,
+ unsigned char val)
+{
+ unsigned char *ptr = (unsigned char *)(base + NSP_MMIO_OFFSET + index);
+
+ writeb(val, ptr);
+}
+
+static inline unsigned char nsp_mmio_read(unsigned long base,
+ unsigned int index)
+{
+ unsigned char *ptr = (unsigned char *)(base + NSP_MMIO_OFFSET + index);
+
+ return readb(ptr);
+}
+
+/*-----------*/
+
+static inline unsigned char nsp_mmio_index_read(unsigned long base,
+ unsigned int reg)
+{
+ unsigned char *index_ptr = (unsigned char *)(base + NSP_MMIO_OFFSET + INDEXREG);
+ unsigned char *data_ptr = (unsigned char *)(base + NSP_MMIO_OFFSET + DATAREG);
+
+ writeb((unsigned char)reg, index_ptr);
+ return readb(data_ptr);
+}
+
+static inline void nsp_mmio_index_write(unsigned long base,
+ unsigned int reg,
+ unsigned char val)
+{
+ unsigned char *index_ptr = (unsigned char *)(base + NSP_MMIO_OFFSET + INDEXREG);
+ unsigned char *data_ptr = (unsigned char *)(base + NSP_MMIO_OFFSET + DATAREG);
+
+ writeb((unsigned char)reg, index_ptr);
+ writeb(val, data_ptr);
+}
+
+/* read 32bit FIFO */
+static inline void nsp_mmio_multi_read_4(unsigned long base,
+ unsigned int Register,
+ void *buf,
+ unsigned long count)
+{
+ unsigned long *ptr = (unsigned long *)(base + Register);
+ unsigned long *tmp = (unsigned long *)buf;
+ int i;
+
+ //nsp_dbg(NSP_DEBUG_DATA_IO, "base 0x%0lx ptr 0x%p",base,ptr);
+
+ for (i = 0; i < count; i++) {
+ *tmp = readl(ptr);
+ //nsp_dbg(NSP_DEBUG_DATA_IO, "<%d,%p,%p,%lx>", i, ptr, tmp, *tmp);
+ tmp++;
+ }
+}
+
+static inline void nsp_mmio_fifo32_read(unsigned int base,
+ void *buf,
+ unsigned long count)
+{
+ //nsp_dbg(NSP_DEBUG_DATA_IO, "buf=0x%p, count=0x%lx*4", buf, count);
+ nsp_mmio_multi_read_4(base, FIFODATA, buf, count);
+}
+
+static inline void nsp_mmio_multi_write_4(unsigned long base,
+ unsigned int Register,
+ void *buf,
+ unsigned long count)
+{
+ unsigned long *ptr = (unsigned long *)(base + Register);
+ unsigned long *tmp = (unsigned long *)buf;
+ int i;
+
+ //nsp_dbg(NSP_DEBUG_DATA_IO, "base 0x%0lx ptr 0x%p",base,ptr);
+
+ for (i = 0; i < count; i++) {
+ writel(*tmp, ptr);
+ //nsp_dbg(NSP_DEBUG_DATA_IO, "<%d,%p,%p,%lx>", i, ptr, tmp, *tmp);
+ tmp++;
+ }
+}
+
+static inline void nsp_mmio_fifo32_write(unsigned int base,
+ void *buf,
+ unsigned long count)
+{
+ //nsp_dbg(NSP_DEBUG_DATA_IO, "buf=0x%p, count=0x%lx*4", buf, count);
+ nsp_mmio_multi_write_4(base, FIFODATA, buf, count);
+}
+
+
+
+#endif
+/* end */
diff --git a/drivers/scsi/pcmcia/nsp_message.c b/drivers/scsi/pcmcia/nsp_message.c
new file mode 100644
index 000000000..ef593b70d
--- /dev/null
+++ b/drivers/scsi/pcmcia/nsp_message.c
@@ -0,0 +1,78 @@
+/*==========================================================================
+ NinjaSCSI-3 message handler
+ By: YOKOTA Hiroshi <yokota@netlab.is.tsukuba.ac.jp>
+
+ This software may be used and distributed according to the terms of
+ the GNU General Public License.
+ */
+
+/* $Id: nsp_message.c,v 1.6 2003/07/26 14:21:09 elca Exp $ */
+
+static void nsp_message_in(struct scsi_cmnd *SCpnt)
+{
+ unsigned int base = SCpnt->device->host->io_port;
+ nsp_hw_data *data = (nsp_hw_data *)SCpnt->device->host->hostdata;
+ unsigned char data_reg, control_reg;
+ int ret, len;
+
+ /*
+ * XXX: NSP QUIRK
+ * NSP invoke interrupts only in the case of scsi phase changes,
+ * therefore we should poll the scsi phase here to catch
+ * the next "msg in" if exists (no scsi phase changes).
+ */
+ ret = 16;
+ len = 0;
+
+ nsp_dbg(NSP_DEBUG_MSGINOCCUR, "msgin loop");
+ do {
+ /* read data */
+ data_reg = nsp_index_read(base, SCSIDATAIN);
+
+ /* assert ACK */
+ control_reg = nsp_index_read(base, SCSIBUSCTRL);
+ control_reg |= SCSI_ACK;
+ nsp_index_write(base, SCSIBUSCTRL, control_reg);
+ nsp_negate_signal(SCpnt, BUSMON_REQ, "msgin<REQ>");
+
+ data->MsgBuffer[len] = data_reg; len++;
+
+ /* deassert ACK */
+ control_reg = nsp_index_read(base, SCSIBUSCTRL);
+ control_reg &= ~SCSI_ACK;
+ nsp_index_write(base, SCSIBUSCTRL, control_reg);
+
+ /* catch a next signal */
+ ret = nsp_expect_signal(SCpnt, BUSPHASE_MESSAGE_IN, BUSMON_REQ);
+ } while (ret > 0 && MSGBUF_SIZE > len);
+
+ data->MsgLen = len;
+
+}
+
+static void nsp_message_out(struct scsi_cmnd *SCpnt)
+{
+ nsp_hw_data *data = (nsp_hw_data *)SCpnt->device->host->hostdata;
+ int ret = 1;
+ int len = data->MsgLen;
+
+ /*
+ * XXX: NSP QUIRK
+ * NSP invoke interrupts only in the case of scsi phase changes,
+ * therefore we should poll the scsi phase here to catch
+ * the next "msg out" if exists (no scsi phase changes).
+ */
+
+ nsp_dbg(NSP_DEBUG_MSGOUTOCCUR, "msgout loop");
+ do {
+ if (nsp_xfer(SCpnt, BUSPHASE_MESSAGE_OUT)) {
+ nsp_msg(KERN_DEBUG, "msgout: xfer short");
+ }
+
+ /* catch a next signal */
+ ret = nsp_expect_signal(SCpnt, BUSPHASE_MESSAGE_OUT, BUSMON_REQ);
+ } while (ret > 0 && len-- > 0);
+
+}
+
+/* end */
diff --git a/drivers/scsi/pcmcia/qlogic_stub.c b/drivers/scsi/pcmcia/qlogic_stub.c
new file mode 100644
index 000000000..bcaf89fe0
--- /dev/null
+++ b/drivers/scsi/pcmcia/qlogic_stub.c
@@ -0,0 +1,318 @@
+/*======================================================================
+
+ A driver for the Qlogic SCSI card
+
+ qlogic_cs.c 1.79 2000/06/12 21:27:26
+
+ The contents of this file are subject to the Mozilla Public
+ License Version 1.1 (the "License"); you may not use this file
+ except in compliance with the License. You may obtain a copy of
+ the License at http://www.mozilla.org/MPL/
+
+ Software distributed under the License is distributed on an "AS
+ IS" basis, WITHOUT WARRANTY OF ANY KIND, either express or
+ implied. See the License for the specific language governing
+ rights and limitations under the License.
+
+ The initial developer of the original code is David A. Hinds
+ <dahinds@users.sourceforge.net>. Portions created by David A. Hinds
+ are Copyright (C) 1999 David A. Hinds. All Rights Reserved.
+
+ Alternatively, the contents of this file may be used under the
+ terms of the GNU General Public License version 2 (the "GPL"), in which
+ case the provisions of the GPL are applicable instead of the
+ above. If you wish to allow the use of your version of this file
+ only under the terms of the GPL and not to allow others to use
+ your version of this file under the MPL, indicate your decision
+ by deleting the provisions above and replace them with the notice
+ and other provisions required by the GPL. If you do not delete
+ the provisions above, a recipient may use your version of this
+ file under either the MPL or the GPL.
+
+======================================================================*/
+
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/slab.h>
+#include <linux/string.h>
+#include <linux/ioport.h>
+#include <asm/io.h>
+#include <scsi/scsi.h>
+#include <linux/major.h>
+#include <linux/blkdev.h>
+#include <scsi/scsi_ioctl.h>
+#include <linux/interrupt.h>
+
+#include "scsi.h"
+#include <scsi/scsi_host.h>
+#include "../qlogicfas408.h"
+
+#include <pcmcia/cistpl.h>
+#include <pcmcia/ds.h>
+#include <pcmcia/ciscode.h>
+
+/* Set the following to 2 to use normal interrupt (active high/totempole-
+ * tristate), otherwise use 0 (REQUIRED FOR PCMCIA) for active low, open
+ * drain
+ */
+#define INT_TYPE 0
+
+static char qlogic_name[] = "qlogic_cs";
+
+static struct scsi_host_template qlogicfas_driver_template = {
+ .module = THIS_MODULE,
+ .name = qlogic_name,
+ .proc_name = qlogic_name,
+ .info = qlogicfas408_info,
+ .queuecommand = qlogicfas408_queuecommand,
+ .eh_abort_handler = qlogicfas408_abort,
+ .eh_bus_reset_handler = qlogicfas408_bus_reset,
+ .bios_param = qlogicfas408_biosparam,
+ .can_queue = 1,
+ .this_id = -1,
+ .sg_tablesize = SG_ALL,
+ .cmd_per_lun = 1,
+ .use_clustering = DISABLE_CLUSTERING,
+};
+
+/*====================================================================*/
+
+typedef struct scsi_info_t {
+ struct pcmcia_device *p_dev;
+ struct Scsi_Host *host;
+ unsigned short manf_id;
+} scsi_info_t;
+
+static void qlogic_release(struct pcmcia_device *link);
+static void qlogic_detach(struct pcmcia_device *p_dev);
+static int qlogic_config(struct pcmcia_device * link);
+
+static struct Scsi_Host *qlogic_detect(struct scsi_host_template *host,
+ struct pcmcia_device *link, int qbase, int qlirq)
+{
+ int qltyp; /* type of chip */
+ int qinitid;
+ struct Scsi_Host *shost; /* registered host structure */
+ struct qlogicfas408_priv *priv;
+
+ qltyp = qlogicfas408_get_chip_type(qbase, INT_TYPE);
+ qinitid = host->this_id;
+ if (qinitid < 0)
+ qinitid = 7; /* if no ID, use 7 */
+
+ qlogicfas408_setup(qbase, qinitid, INT_TYPE);
+
+ host->name = qlogic_name;
+ shost = scsi_host_alloc(host, sizeof(struct qlogicfas408_priv));
+ if (!shost)
+ goto err;
+ shost->io_port = qbase;
+ shost->n_io_port = 16;
+ shost->dma_channel = -1;
+ if (qlirq != -1)
+ shost->irq = qlirq;
+
+ priv = get_priv_by_host(shost);
+ priv->qlirq = qlirq;
+ priv->qbase = qbase;
+ priv->qinitid = qinitid;
+ priv->shost = shost;
+ priv->int_type = INT_TYPE;
+
+ if (request_irq(qlirq, qlogicfas408_ihandl, 0, qlogic_name, shost))
+ goto free_scsi_host;
+
+ sprintf(priv->qinfo,
+ "Qlogicfas Driver version 0.46, chip %02X at %03X, IRQ %d, TPdma:%d",
+ qltyp, qbase, qlirq, QL_TURBO_PDMA);
+
+ if (scsi_add_host(shost, NULL))
+ goto free_interrupt;
+
+ scsi_scan_host(shost);
+
+ return shost;
+
+free_interrupt:
+ free_irq(qlirq, shost);
+
+free_scsi_host:
+ scsi_host_put(shost);
+
+err:
+ return NULL;
+}
+static int qlogic_probe(struct pcmcia_device *link)
+{
+ scsi_info_t *info;
+
+ dev_dbg(&link->dev, "qlogic_attach()\n");
+
+ /* Create new SCSI device */
+ info = kzalloc(sizeof(*info), GFP_KERNEL);
+ if (!info)
+ return -ENOMEM;
+ info->p_dev = link;
+ link->priv = info;
+ link->config_flags |= CONF_ENABLE_IRQ | CONF_AUTO_SET_IO;
+ link->config_regs = PRESENT_OPTION;
+
+ return qlogic_config(link);
+} /* qlogic_attach */
+
+/*====================================================================*/
+
+static void qlogic_detach(struct pcmcia_device *link)
+{
+ dev_dbg(&link->dev, "qlogic_detach\n");
+
+ qlogic_release(link);
+ kfree(link->priv);
+
+} /* qlogic_detach */
+
+/*====================================================================*/
+
+static int qlogic_config_check(struct pcmcia_device *p_dev, void *priv_data)
+{
+ p_dev->io_lines = 10;
+ p_dev->resource[0]->flags &= ~IO_DATA_PATH_WIDTH;
+ p_dev->resource[0]->flags |= IO_DATA_PATH_WIDTH_AUTO;
+
+ if (p_dev->resource[0]->start == 0)
+ return -ENODEV;
+
+ return pcmcia_request_io(p_dev);
+}
+
+static int qlogic_config(struct pcmcia_device * link)
+{
+ scsi_info_t *info = link->priv;
+ int ret;
+ struct Scsi_Host *host;
+
+ dev_dbg(&link->dev, "qlogic_config\n");
+
+ ret = pcmcia_loop_config(link, qlogic_config_check, NULL);
+ if (ret)
+ goto failed;
+
+ if (!link->irq)
+ goto failed;
+
+ ret = pcmcia_enable_device(link);
+ if (ret)
+ goto failed;
+
+ if ((info->manf_id == MANFID_MACNICA) || (info->manf_id == MANFID_PIONEER) || (info->manf_id == 0x0098)) {
+ /* set ATAcmd */
+ outb(0xb4, link->resource[0]->start + 0xd);
+ outb(0x24, link->resource[0]->start + 0x9);
+ outb(0x04, link->resource[0]->start + 0xd);
+ }
+
+ /* The KXL-810AN has a bigger IO port window */
+ if (resource_size(link->resource[0]) == 32)
+ host = qlogic_detect(&qlogicfas_driver_template, link,
+ link->resource[0]->start + 16, link->irq);
+ else
+ host = qlogic_detect(&qlogicfas_driver_template, link,
+ link->resource[0]->start, link->irq);
+
+ if (!host) {
+ printk(KERN_INFO "%s: no SCSI devices found\n", qlogic_name);
+ goto failed;
+ }
+
+ info->host = host;
+
+ return 0;
+
+failed:
+ pcmcia_disable_device(link);
+ return -ENODEV;
+} /* qlogic_config */
+
+/*====================================================================*/
+
+static void qlogic_release(struct pcmcia_device *link)
+{
+ scsi_info_t *info = link->priv;
+
+ dev_dbg(&link->dev, "qlogic_release\n");
+
+ scsi_remove_host(info->host);
+
+ free_irq(link->irq, info->host);
+ pcmcia_disable_device(link);
+
+ scsi_host_put(info->host);
+}
+
+/*====================================================================*/
+
+static int qlogic_resume(struct pcmcia_device *link)
+{
+ scsi_info_t *info = link->priv;
+
+ pcmcia_enable_device(link);
+ if ((info->manf_id == MANFID_MACNICA) ||
+ (info->manf_id == MANFID_PIONEER) ||
+ (info->manf_id == 0x0098)) {
+ outb(0x80, link->resource[0]->start + 0xd);
+ outb(0x24, link->resource[0]->start + 0x9);
+ outb(0x04, link->resource[0]->start + 0xd);
+ }
+ /* Ugggglllyyyy!!! */
+ qlogicfas408_bus_reset(NULL);
+
+ return 0;
+}
+
+static const struct pcmcia_device_id qlogic_ids[] = {
+ PCMCIA_DEVICE_PROD_ID12("EIger Labs", "PCMCIA-to-SCSI Adapter", 0x88395fa7, 0x33b7a5e6),
+ PCMCIA_DEVICE_PROD_ID12("EPSON", "SCSI-2 PC Card SC200", 0xd361772f, 0x299d1751),
+ PCMCIA_DEVICE_PROD_ID12("MACNICA", "MIRACLE SCSI-II mPS110", 0x20841b68, 0xab3c3b6d),
+ PCMCIA_DEVICE_PROD_ID12("MIDORI ELECTRONICS ", "CN-SC43", 0x6534382a, 0xd67eee79),
+ PCMCIA_DEVICE_PROD_ID12("NEC", "PC-9801N-J03R", 0x18df0ba0, 0x24662e8a),
+ PCMCIA_DEVICE_PROD_ID12("KME ", "KXLC003", 0x82375a27, 0xf68e5bf7),
+ PCMCIA_DEVICE_PROD_ID12("KME ", "KXLC004", 0x82375a27, 0x68eace54),
+ PCMCIA_DEVICE_PROD_ID12("KME", "KXLC101", 0x3faee676, 0x194250ec),
+ PCMCIA_DEVICE_PROD_ID12("QLOGIC CORPORATION", "pc05", 0xd77b2930, 0xa85b2735),
+ PCMCIA_DEVICE_PROD_ID12("QLOGIC CORPORATION", "pc05 rev 1.10", 0xd77b2930, 0x70f8b5f8),
+ PCMCIA_DEVICE_PROD_ID123("KME", "KXLC002", "00", 0x3faee676, 0x81896b61, 0xf99f065f),
+ PCMCIA_DEVICE_PROD_ID12("RATOC System Inc.", "SCSI2 CARD 37", 0x85c10e17, 0x1a2640c1),
+ PCMCIA_DEVICE_PROD_ID12("TOSHIBA", "SCSC200A PC CARD SCSI", 0xb4585a1a, 0xa6f06ebe),
+ PCMCIA_DEVICE_PROD_ID12("TOSHIBA", "SCSC200B PC CARD SCSI-10", 0xb4585a1a, 0x0a88dea0),
+ /* these conflict with other cards! */
+ /* PCMCIA_DEVICE_PROD_ID123("MACNICA", "MIRACLE SCSI", "mPS100", 0x20841b68, 0xf8dedaeb, 0x89f7fafb), */
+ /* PCMCIA_DEVICE_PROD_ID123("MACNICA", "MIRACLE SCSI", "mPS100", 0x20841b68, 0xf8dedaeb, 0x89f7fafb), */
+ PCMCIA_DEVICE_NULL,
+};
+MODULE_DEVICE_TABLE(pcmcia, qlogic_ids);
+
+static struct pcmcia_driver qlogic_cs_driver = {
+ .owner = THIS_MODULE,
+ .name = "qlogic_cs",
+ .probe = qlogic_probe,
+ .remove = qlogic_detach,
+ .id_table = qlogic_ids,
+ .resume = qlogic_resume,
+};
+
+static int __init init_qlogic_cs(void)
+{
+ return pcmcia_register_driver(&qlogic_cs_driver);
+}
+
+static void __exit exit_qlogic_cs(void)
+{
+ pcmcia_unregister_driver(&qlogic_cs_driver);
+}
+
+MODULE_AUTHOR("Tom Zerucha, Michael Griffith");
+MODULE_DESCRIPTION("Driver for the PCMCIA Qlogic FAS SCSI controllers");
+MODULE_LICENSE("GPL");
+module_init(init_qlogic_cs);
+module_exit(exit_qlogic_cs);
diff --git a/drivers/scsi/pcmcia/sym53c500_cs.c b/drivers/scsi/pcmcia/sym53c500_cs.c
new file mode 100644
index 000000000..155f95730
--- /dev/null
+++ b/drivers/scsi/pcmcia/sym53c500_cs.c
@@ -0,0 +1,898 @@
+/*
+* sym53c500_cs.c Bob Tracy (rct@frus.com)
+*
+* A rewrite of the pcmcia-cs add-on driver for newer (circa 1997)
+* New Media Bus Toaster PCMCIA SCSI cards using the Symbios Logic
+* 53c500 controller: intended for use with 2.6 and later kernels.
+* The pcmcia-cs add-on version of this driver is not supported
+* beyond 2.4. It consisted of three files with history/copyright
+* information as follows:
+*
+* SYM53C500.h
+* Bob Tracy (rct@frus.com)
+* Original by Tom Corner (tcorner@via.at).
+* Adapted from NCR53c406a.h which is Copyrighted (C) 1994
+* Normunds Saumanis (normunds@rx.tech.swh.lv)
+*
+* SYM53C500.c
+* Bob Tracy (rct@frus.com)
+* Original driver by Tom Corner (tcorner@via.at) was adapted
+* from NCR53c406a.c which is Copyrighted (C) 1994, 1995, 1996
+* Normunds Saumanis (normunds@fi.ibm.com)
+*
+* sym53c500.c
+* Bob Tracy (rct@frus.com)
+* Original by Tom Corner (tcorner@via.at) was adapted from a
+* driver for the Qlogic SCSI card written by
+* David Hinds (dhinds@allegro.stanford.edu).
+*
+* This program is free software; you can redistribute it and/or modify it
+* under the terms of the GNU General Public License as published by the
+* Free Software Foundation; either version 2, or (at your option) any
+* later version.
+*
+* This program is distributed in the hope that it will be useful, but
+* WITHOUT ANY WARRANTY; without even the implied warranty of
+* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+* General Public License for more details.
+*/
+
+#define SYM53C500_DEBUG 0
+#define VERBOSE_SYM53C500_DEBUG 0
+
+/*
+* Set this to 0 if you encounter kernel lockups while transferring
+* data in PIO mode. Note this can be changed via "sysfs".
+*/
+#define USE_FAST_PIO 1
+
+/* =============== End of user configurable parameters ============== */
+
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/errno.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/kernel.h>
+#include <linux/slab.h>
+#include <linux/string.h>
+#include <linux/ioport.h>
+#include <linux/blkdev.h>
+#include <linux/spinlock.h>
+#include <linux/bitops.h>
+
+#include <asm/io.h>
+#include <asm/dma.h>
+#include <asm/irq.h>
+
+#include <scsi/scsi_ioctl.h>
+#include <scsi/scsi_cmnd.h>
+#include <scsi/scsi_device.h>
+#include <scsi/scsi.h>
+#include <scsi/scsi_host.h>
+
+#include <pcmcia/cistpl.h>
+#include <pcmcia/ds.h>
+#include <pcmcia/ciscode.h>
+
+
+/* ================================================================== */
+
+#define SYNC_MODE 0 /* Synchronous transfer mode */
+
+/* Default configuration */
+#define C1_IMG 0x07 /* ID=7 */
+#define C2_IMG 0x48 /* FE SCSI2 */
+#define C3_IMG 0x20 /* CDB */
+#define C4_IMG 0x04 /* ANE */
+#define C5_IMG 0xa4 /* ? changed from b6= AA PI SIE POL */
+#define C7_IMG 0x80 /* added for SYM53C500 t. corner */
+
+/* Hardware Registers: offsets from io_port (base) */
+
+/* Control Register Set 0 */
+#define TC_LSB 0x00 /* transfer counter lsb */
+#define TC_MSB 0x01 /* transfer counter msb */
+#define SCSI_FIFO 0x02 /* scsi fifo register */
+#define CMD_REG 0x03 /* command register */
+#define STAT_REG 0x04 /* status register */
+#define DEST_ID 0x04 /* selection/reselection bus id */
+#define INT_REG 0x05 /* interrupt status register */
+#define SRTIMOUT 0x05 /* select/reselect timeout reg */
+#define SEQ_REG 0x06 /* sequence step register */
+#define SYNCPRD 0x06 /* synchronous transfer period */
+#define FIFO_FLAGS 0x07 /* indicates # of bytes in fifo */
+#define SYNCOFF 0x07 /* synchronous offset register */
+#define CONFIG1 0x08 /* configuration register */
+#define CLKCONV 0x09 /* clock conversion register */
+/* #define TESTREG 0x0A */ /* test mode register */
+#define CONFIG2 0x0B /* configuration 2 register */
+#define CONFIG3 0x0C /* configuration 3 register */
+#define CONFIG4 0x0D /* configuration 4 register */
+#define TC_HIGH 0x0E /* transfer counter high */
+/* #define FIFO_BOTTOM 0x0F */ /* reserve FIFO byte register */
+
+/* Control Register Set 1 */
+/* #define JUMPER_SENSE 0x00 */ /* jumper sense port reg (r/w) */
+/* #define SRAM_PTR 0x01 */ /* SRAM address pointer reg (r/w) */
+/* #define SRAM_DATA 0x02 */ /* SRAM data register (r/w) */
+#define PIO_FIFO 0x04 /* PIO FIFO registers (r/w) */
+/* #define PIO_FIFO1 0x05 */ /* */
+/* #define PIO_FIFO2 0x06 */ /* */
+/* #define PIO_FIFO3 0x07 */ /* */
+#define PIO_STATUS 0x08 /* PIO status (r/w) */
+/* #define ATA_CMD 0x09 */ /* ATA command/status reg (r/w) */
+/* #define ATA_ERR 0x0A */ /* ATA features/error reg (r/w) */
+#define PIO_FLAG 0x0B /* PIO flag interrupt enable (r/w) */
+#define CONFIG5 0x09 /* configuration 5 register */
+/* #define SIGNATURE 0x0E */ /* signature register (r) */
+/* #define CONFIG6 0x0F */ /* configuration 6 register (r) */
+#define CONFIG7 0x0d
+
+/* select register set 0 */
+#define REG0(x) (outb(C4_IMG, (x) + CONFIG4))
+/* select register set 1 */
+#define REG1(x) outb(C7_IMG, (x) + CONFIG7); outb(C5_IMG, (x) + CONFIG5)
+
+#if SYM53C500_DEBUG
+#define DEB(x) x
+#else
+#define DEB(x)
+#endif
+
+#if VERBOSE_SYM53C500_DEBUG
+#define VDEB(x) x
+#else
+#define VDEB(x)
+#endif
+
+#define LOAD_DMA_COUNT(x, count) \
+ outb(count & 0xff, (x) + TC_LSB); \
+ outb((count >> 8) & 0xff, (x) + TC_MSB); \
+ outb((count >> 16) & 0xff, (x) + TC_HIGH);
+
+/* Chip commands */
+#define DMA_OP 0x80
+
+#define SCSI_NOP 0x00
+#define FLUSH_FIFO 0x01
+#define CHIP_RESET 0x02
+#define SCSI_RESET 0x03
+#define RESELECT 0x40
+#define SELECT_NO_ATN 0x41
+#define SELECT_ATN 0x42
+#define SELECT_ATN_STOP 0x43
+#define ENABLE_SEL 0x44
+#define DISABLE_SEL 0x45
+#define SELECT_ATN3 0x46
+#define RESELECT3 0x47
+#define TRANSFER_INFO 0x10
+#define INIT_CMD_COMPLETE 0x11
+#define MSG_ACCEPT 0x12
+#define TRANSFER_PAD 0x18
+#define SET_ATN 0x1a
+#define RESET_ATN 0x1b
+#define SEND_MSG 0x20
+#define SEND_STATUS 0x21
+#define SEND_DATA 0x22
+#define DISCONN_SEQ 0x23
+#define TERMINATE_SEQ 0x24
+#define TARG_CMD_COMPLETE 0x25
+#define DISCONN 0x27
+#define RECV_MSG 0x28
+#define RECV_CMD 0x29
+#define RECV_DATA 0x2a
+#define RECV_CMD_SEQ 0x2b
+#define TARGET_ABORT_DMA 0x04
+
+/* ================================================================== */
+
+struct scsi_info_t {
+ struct pcmcia_device *p_dev;
+ struct Scsi_Host *host;
+ unsigned short manf_id;
+};
+
+/*
+* Repository for per-instance host data.
+*/
+struct sym53c500_data {
+ struct scsi_cmnd *current_SC;
+ int fast_pio;
+};
+
+enum Phase {
+ idle,
+ data_out,
+ data_in,
+ command_ph,
+ status_ph,
+ message_out,
+ message_in
+};
+
+/* ================================================================== */
+
+static void
+chip_init(int io_port)
+{
+ REG1(io_port);
+ outb(0x01, io_port + PIO_STATUS);
+ outb(0x00, io_port + PIO_FLAG);
+
+ outb(C4_IMG, io_port + CONFIG4); /* REG0(io_port); */
+ outb(C3_IMG, io_port + CONFIG3);
+ outb(C2_IMG, io_port + CONFIG2);
+ outb(C1_IMG, io_port + CONFIG1);
+
+ outb(0x05, io_port + CLKCONV); /* clock conversion factor */
+ outb(0x9C, io_port + SRTIMOUT); /* Selection timeout */
+ outb(0x05, io_port + SYNCPRD); /* Synchronous transfer period */
+ outb(SYNC_MODE, io_port + SYNCOFF); /* synchronous mode */
+}
+
+static void
+SYM53C500_int_host_reset(int io_port)
+{
+ outb(C4_IMG, io_port + CONFIG4); /* REG0(io_port); */
+ outb(CHIP_RESET, io_port + CMD_REG);
+ outb(SCSI_NOP, io_port + CMD_REG); /* required after reset */
+ outb(SCSI_RESET, io_port + CMD_REG);
+ chip_init(io_port);
+}
+
+static __inline__ int
+SYM53C500_pio_read(int fast_pio, int base, unsigned char *request, unsigned int reqlen)
+{
+ int i;
+ int len; /* current scsi fifo size */
+
+ REG1(base);
+ while (reqlen) {
+ i = inb(base + PIO_STATUS);
+ /* VDEB(printk("pio_status=%x\n", i)); */
+ if (i & 0x80)
+ return 0;
+
+ switch (i & 0x1e) {
+ default:
+ case 0x10: /* fifo empty */
+ len = 0;
+ break;
+ case 0x0:
+ len = 1;
+ break;
+ case 0x8: /* fifo 1/3 full */
+ len = 42;
+ break;
+ case 0xc: /* fifo 2/3 full */
+ len = 84;
+ break;
+ case 0xe: /* fifo full */
+ len = 128;
+ break;
+ }
+
+ if ((i & 0x40) && len == 0) { /* fifo empty and interrupt occurred */
+ return 0;
+ }
+
+ if (len) {
+ if (len > reqlen)
+ len = reqlen;
+
+ if (fast_pio && len > 3) {
+ insl(base + PIO_FIFO, request, len >> 2);
+ request += len & 0xfc;
+ reqlen -= len & 0xfc;
+ } else {
+ while (len--) {
+ *request++ = inb(base + PIO_FIFO);
+ reqlen--;
+ }
+ }
+ }
+ }
+ return 0;
+}
+
+static __inline__ int
+SYM53C500_pio_write(int fast_pio, int base, unsigned char *request, unsigned int reqlen)
+{
+ int i = 0;
+ int len; /* current scsi fifo size */
+
+ REG1(base);
+ while (reqlen && !(i & 0x40)) {
+ i = inb(base + PIO_STATUS);
+ /* VDEB(printk("pio_status=%x\n", i)); */
+ if (i & 0x80) /* error */
+ return 0;
+
+ switch (i & 0x1e) {
+ case 0x10:
+ len = 128;
+ break;
+ case 0x0:
+ len = 84;
+ break;
+ case 0x8:
+ len = 42;
+ break;
+ case 0xc:
+ len = 1;
+ break;
+ default:
+ case 0xe:
+ len = 0;
+ break;
+ }
+
+ if (len) {
+ if (len > reqlen)
+ len = reqlen;
+
+ if (fast_pio && len > 3) {
+ outsl(base + PIO_FIFO, request, len >> 2);
+ request += len & 0xfc;
+ reqlen -= len & 0xfc;
+ } else {
+ while (len--) {
+ outb(*request++, base + PIO_FIFO);
+ reqlen--;
+ }
+ }
+ }
+ }
+ return 0;
+}
+
+static irqreturn_t
+SYM53C500_intr(int irq, void *dev_id)
+{
+ unsigned long flags;
+ struct Scsi_Host *dev = dev_id;
+ DEB(unsigned char fifo_size;)
+ DEB(unsigned char seq_reg;)
+ unsigned char status, int_reg;
+ unsigned char pio_status;
+ int port_base = dev->io_port;
+ struct sym53c500_data *data =
+ (struct sym53c500_data *)dev->hostdata;
+ struct scsi_cmnd *curSC = data->current_SC;
+ int fast_pio = data->fast_pio;
+
+ spin_lock_irqsave(dev->host_lock, flags);
+
+ VDEB(printk("SYM53C500_intr called\n"));
+
+ REG1(port_base);
+ pio_status = inb(port_base + PIO_STATUS);
+ REG0(port_base);
+ status = inb(port_base + STAT_REG);
+ DEB(seq_reg = inb(port_base + SEQ_REG));
+ int_reg = inb(port_base + INT_REG);
+ DEB(fifo_size = inb(port_base + FIFO_FLAGS) & 0x1f);
+
+#if SYM53C500_DEBUG
+ printk("status=%02x, seq_reg=%02x, int_reg=%02x, fifo_size=%02x",
+ status, seq_reg, int_reg, fifo_size);
+ printk(", pio=%02x\n", pio_status);
+#endif /* SYM53C500_DEBUG */
+
+ if (int_reg & 0x80) { /* SCSI reset intr */
+ DEB(printk("SYM53C500: reset intr received\n"));
+ curSC->result = DID_RESET << 16;
+ goto idle_out;
+ }
+
+ if (pio_status & 0x80) {
+ printk("SYM53C500: Warning: PIO error!\n");
+ curSC->result = DID_ERROR << 16;
+ goto idle_out;
+ }
+
+ if (status & 0x20) { /* Parity error */
+ printk("SYM53C500: Warning: parity error!\n");
+ curSC->result = DID_PARITY << 16;
+ goto idle_out;
+ }
+
+ if (status & 0x40) { /* Gross error */
+ printk("SYM53C500: Warning: gross error!\n");
+ curSC->result = DID_ERROR << 16;
+ goto idle_out;
+ }
+
+ if (int_reg & 0x20) { /* Disconnect */
+ DEB(printk("SYM53C500: disconnect intr received\n"));
+ if (curSC->SCp.phase != message_in) { /* Unexpected disconnect */
+ curSC->result = DID_NO_CONNECT << 16;
+ } else { /* Command complete, return status and message */
+ curSC->result = (curSC->SCp.Status & 0xff)
+ | ((curSC->SCp.Message & 0xff) << 8) | (DID_OK << 16);
+ }
+ goto idle_out;
+ }
+
+ switch (status & 0x07) { /* scsi phase */
+ case 0x00: /* DATA-OUT */
+ if (int_reg & 0x10) { /* Target requesting info transfer */
+ struct scatterlist *sg;
+ int i;
+
+ curSC->SCp.phase = data_out;
+ VDEB(printk("SYM53C500: Data-Out phase\n"));
+ outb(FLUSH_FIFO, port_base + CMD_REG);
+ LOAD_DMA_COUNT(port_base, scsi_bufflen(curSC)); /* Max transfer size */
+ outb(TRANSFER_INFO | DMA_OP, port_base + CMD_REG);
+
+ scsi_for_each_sg(curSC, sg, scsi_sg_count(curSC), i) {
+ SYM53C500_pio_write(fast_pio, port_base,
+ sg_virt(sg), sg->length);
+ }
+ REG0(port_base);
+ }
+ break;
+
+ case 0x01: /* DATA-IN */
+ if (int_reg & 0x10) { /* Target requesting info transfer */
+ struct scatterlist *sg;
+ int i;
+
+ curSC->SCp.phase = data_in;
+ VDEB(printk("SYM53C500: Data-In phase\n"));
+ outb(FLUSH_FIFO, port_base + CMD_REG);
+ LOAD_DMA_COUNT(port_base, scsi_bufflen(curSC)); /* Max transfer size */
+ outb(TRANSFER_INFO | DMA_OP, port_base + CMD_REG);
+
+ scsi_for_each_sg(curSC, sg, scsi_sg_count(curSC), i) {
+ SYM53C500_pio_read(fast_pio, port_base,
+ sg_virt(sg), sg->length);
+ }
+ REG0(port_base);
+ }
+ break;
+
+ case 0x02: /* COMMAND */
+ curSC->SCp.phase = command_ph;
+ printk("SYM53C500: Warning: Unknown interrupt occurred in command phase!\n");
+ break;
+
+ case 0x03: /* STATUS */
+ curSC->SCp.phase = status_ph;
+ VDEB(printk("SYM53C500: Status phase\n"));
+ outb(FLUSH_FIFO, port_base + CMD_REG);
+ outb(INIT_CMD_COMPLETE, port_base + CMD_REG);
+ break;
+
+ case 0x04: /* Reserved */
+ case 0x05: /* Reserved */
+ printk("SYM53C500: WARNING: Reserved phase!!!\n");
+ break;
+
+ case 0x06: /* MESSAGE-OUT */
+ DEB(printk("SYM53C500: Message-Out phase\n"));
+ curSC->SCp.phase = message_out;
+ outb(SET_ATN, port_base + CMD_REG); /* Reject the message */
+ outb(MSG_ACCEPT, port_base + CMD_REG);
+ break;
+
+ case 0x07: /* MESSAGE-IN */
+ VDEB(printk("SYM53C500: Message-In phase\n"));
+ curSC->SCp.phase = message_in;
+
+ curSC->SCp.Status = inb(port_base + SCSI_FIFO);
+ curSC->SCp.Message = inb(port_base + SCSI_FIFO);
+
+ VDEB(printk("SCSI FIFO size=%d\n", inb(port_base + FIFO_FLAGS) & 0x1f));
+ DEB(printk("Status = %02x Message = %02x\n", curSC->SCp.Status, curSC->SCp.Message));
+
+ if (curSC->SCp.Message == SAVE_POINTERS || curSC->SCp.Message == DISCONNECT) {
+ outb(SET_ATN, port_base + CMD_REG); /* Reject message */
+ DEB(printk("Discarding SAVE_POINTERS message\n"));
+ }
+ outb(MSG_ACCEPT, port_base + CMD_REG);
+ break;
+ }
+out:
+ spin_unlock_irqrestore(dev->host_lock, flags);
+ return IRQ_HANDLED;
+
+idle_out:
+ curSC->SCp.phase = idle;
+ curSC->scsi_done(curSC);
+ goto out;
+}
+
+static void
+SYM53C500_release(struct pcmcia_device *link)
+{
+ struct scsi_info_t *info = link->priv;
+ struct Scsi_Host *shost = info->host;
+
+ dev_dbg(&link->dev, "SYM53C500_release\n");
+
+ /*
+ * Do this before releasing/freeing resources.
+ */
+ scsi_remove_host(shost);
+
+ /*
+ * Interrupts getting hosed on card removal. Try
+ * the following code, mostly from qlogicfas.c.
+ */
+ if (shost->irq)
+ free_irq(shost->irq, shost);
+ if (shost->io_port && shost->n_io_port)
+ release_region(shost->io_port, shost->n_io_port);
+
+ pcmcia_disable_device(link);
+
+ scsi_host_put(shost);
+} /* SYM53C500_release */
+
+static const char*
+SYM53C500_info(struct Scsi_Host *SChost)
+{
+ static char info_msg[256];
+ struct sym53c500_data *data =
+ (struct sym53c500_data *)SChost->hostdata;
+
+ DEB(printk("SYM53C500_info called\n"));
+ (void)snprintf(info_msg, sizeof(info_msg),
+ "SYM53C500 at 0x%lx, IRQ %d, %s PIO mode.",
+ SChost->io_port, SChost->irq, data->fast_pio ? "fast" : "slow");
+ return (info_msg);
+}
+
+static int
+SYM53C500_queue_lck(struct scsi_cmnd *SCpnt, void (*done)(struct scsi_cmnd *))
+{
+ int i;
+ int port_base = SCpnt->device->host->io_port;
+ struct sym53c500_data *data =
+ (struct sym53c500_data *)SCpnt->device->host->hostdata;
+
+ VDEB(printk("SYM53C500_queue called\n"));
+
+ DEB(printk("cmd=%02x, cmd_len=%02x, target=%02x, lun=%02x, bufflen=%d\n",
+ SCpnt->cmnd[0], SCpnt->cmd_len, SCpnt->device->id,
+ (u8)SCpnt->device->lun, scsi_bufflen(SCpnt)));
+
+ VDEB(for (i = 0; i < SCpnt->cmd_len; i++)
+ printk("cmd[%d]=%02x ", i, SCpnt->cmnd[i]));
+ VDEB(printk("\n"));
+
+ data->current_SC = SCpnt;
+ data->current_SC->scsi_done = done;
+ data->current_SC->SCp.phase = command_ph;
+ data->current_SC->SCp.Status = 0;
+ data->current_SC->SCp.Message = 0;
+
+ /* We are locked here already by the mid layer */
+ REG0(port_base);
+ outb(scmd_id(SCpnt), port_base + DEST_ID); /* set destination */
+ outb(FLUSH_FIFO, port_base + CMD_REG); /* reset the fifos */
+
+ for (i = 0; i < SCpnt->cmd_len; i++) {
+ outb(SCpnt->cmnd[i], port_base + SCSI_FIFO);
+ }
+ outb(SELECT_NO_ATN, port_base + CMD_REG);
+
+ return 0;
+}
+
+static DEF_SCSI_QCMD(SYM53C500_queue)
+
+static int
+SYM53C500_host_reset(struct scsi_cmnd *SCpnt)
+{
+ int port_base = SCpnt->device->host->io_port;
+
+ DEB(printk("SYM53C500_host_reset called\n"));
+ spin_lock_irq(SCpnt->device->host->host_lock);
+ SYM53C500_int_host_reset(port_base);
+ spin_unlock_irq(SCpnt->device->host->host_lock);
+
+ return SUCCESS;
+}
+
+static int
+SYM53C500_biosparm(struct scsi_device *disk,
+ struct block_device *dev,
+ sector_t capacity, int *info_array)
+{
+ int size;
+
+ DEB(printk("SYM53C500_biosparm called\n"));
+
+ size = capacity;
+ info_array[0] = 64; /* heads */
+ info_array[1] = 32; /* sectors */
+ info_array[2] = size >> 11; /* cylinders */
+ if (info_array[2] > 1024) { /* big disk */
+ info_array[0] = 255;
+ info_array[1] = 63;
+ info_array[2] = size / (255 * 63);
+ }
+ return 0;
+}
+
+static ssize_t
+SYM53C500_show_pio(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct Scsi_Host *SHp = class_to_shost(dev);
+ struct sym53c500_data *data =
+ (struct sym53c500_data *)SHp->hostdata;
+
+ return snprintf(buf, 4, "%d\n", data->fast_pio);
+}
+
+static ssize_t
+SYM53C500_store_pio(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ int pio;
+ struct Scsi_Host *SHp = class_to_shost(dev);
+ struct sym53c500_data *data =
+ (struct sym53c500_data *)SHp->hostdata;
+
+ pio = simple_strtoul(buf, NULL, 0);
+ if (pio == 0 || pio == 1) {
+ data->fast_pio = pio;
+ return count;
+ }
+ else
+ return -EINVAL;
+}
+
+/*
+* SCSI HBA device attributes we want to
+* make available via sysfs.
+*/
+static struct device_attribute SYM53C500_pio_attr = {
+ .attr = {
+ .name = "fast_pio",
+ .mode = (S_IRUGO | S_IWUSR),
+ },
+ .show = SYM53C500_show_pio,
+ .store = SYM53C500_store_pio,
+};
+
+static struct device_attribute *SYM53C500_shost_attrs[] = {
+ &SYM53C500_pio_attr,
+ NULL,
+};
+
+/*
+* scsi_host_template initializer
+*/
+static struct scsi_host_template sym53c500_driver_template = {
+ .module = THIS_MODULE,
+ .name = "SYM53C500",
+ .info = SYM53C500_info,
+ .queuecommand = SYM53C500_queue,
+ .eh_host_reset_handler = SYM53C500_host_reset,
+ .bios_param = SYM53C500_biosparm,
+ .proc_name = "SYM53C500",
+ .can_queue = 1,
+ .this_id = 7,
+ .sg_tablesize = 32,
+ .cmd_per_lun = 1,
+ .use_clustering = ENABLE_CLUSTERING,
+ .shost_attrs = SYM53C500_shost_attrs
+};
+
+static int SYM53C500_config_check(struct pcmcia_device *p_dev, void *priv_data)
+{
+ p_dev->io_lines = 10;
+ p_dev->resource[0]->flags &= ~IO_DATA_PATH_WIDTH;
+ p_dev->resource[0]->flags |= IO_DATA_PATH_WIDTH_AUTO;
+
+ if (p_dev->resource[0]->start == 0)
+ return -ENODEV;
+
+ return pcmcia_request_io(p_dev);
+}
+
+static int
+SYM53C500_config(struct pcmcia_device *link)
+{
+ struct scsi_info_t *info = link->priv;
+ int ret;
+ int irq_level, port_base;
+ struct Scsi_Host *host;
+ struct scsi_host_template *tpnt = &sym53c500_driver_template;
+ struct sym53c500_data *data;
+
+ dev_dbg(&link->dev, "SYM53C500_config\n");
+
+ info->manf_id = link->manf_id;
+
+ ret = pcmcia_loop_config(link, SYM53C500_config_check, NULL);
+ if (ret)
+ goto failed;
+
+ if (!link->irq)
+ goto failed;
+
+ ret = pcmcia_enable_device(link);
+ if (ret)
+ goto failed;
+
+ /*
+ * That's the trouble with copying liberally from another driver.
+ * Some things probably aren't relevant, and I suspect this entire
+ * section dealing with manufacturer IDs can be scrapped. --rct
+ */
+ if ((info->manf_id == MANFID_MACNICA) ||
+ (info->manf_id == MANFID_PIONEER) ||
+ (info->manf_id == 0x0098)) {
+ /* set ATAcmd */
+ outb(0xb4, link->resource[0]->start + 0xd);
+ outb(0x24, link->resource[0]->start + 0x9);
+ outb(0x04, link->resource[0]->start + 0xd);
+ }
+
+ /*
+ * irq_level == 0 implies tpnt->can_queue == 0, which
+ * is not supported in 2.6. Thus, only irq_level > 0
+ * will be allowed.
+ *
+ * Possible port_base values are as follows:
+ *
+ * 0x130, 0x230, 0x280, 0x290,
+ * 0x320, 0x330, 0x340, 0x350
+ */
+ port_base = link->resource[0]->start;
+ irq_level = link->irq;
+
+ DEB(printk("SYM53C500: port_base=0x%x, irq=%d, fast_pio=%d\n",
+ port_base, irq_level, USE_FAST_PIO);)
+
+ chip_init(port_base);
+
+ host = scsi_host_alloc(tpnt, sizeof(struct sym53c500_data));
+ if (!host) {
+ printk("SYM53C500: Unable to register host, giving up.\n");
+ goto err_release;
+ }
+
+ data = (struct sym53c500_data *)host->hostdata;
+
+ if (irq_level > 0) {
+ if (request_irq(irq_level, SYM53C500_intr, IRQF_SHARED, "SYM53C500", host)) {
+ printk("SYM53C500: unable to allocate IRQ %d\n", irq_level);
+ goto err_free_scsi;
+ }
+ DEB(printk("SYM53C500: allocated IRQ %d\n", irq_level));
+ } else if (irq_level == 0) {
+ DEB(printk("SYM53C500: No interrupts detected\n"));
+ goto err_free_scsi;
+ } else {
+ DEB(printk("SYM53C500: Shouldn't get here!\n"));
+ goto err_free_scsi;
+ }
+
+ host->unique_id = port_base;
+ host->irq = irq_level;
+ host->io_port = port_base;
+ host->n_io_port = 0x10;
+ host->dma_channel = -1;
+
+ /*
+ * Note fast_pio is set to USE_FAST_PIO by
+ * default, but can be changed via "sysfs".
+ */
+ data->fast_pio = USE_FAST_PIO;
+
+ info->host = host;
+
+ if (scsi_add_host(host, NULL))
+ goto err_free_irq;
+
+ scsi_scan_host(host);
+
+ return 0;
+
+err_free_irq:
+ free_irq(irq_level, host);
+err_free_scsi:
+ scsi_host_put(host);
+err_release:
+ release_region(port_base, 0x10);
+ printk(KERN_INFO "sym53c500_cs: no SCSI devices found\n");
+ return -ENODEV;
+
+failed:
+ SYM53C500_release(link);
+ return -ENODEV;
+} /* SYM53C500_config */
+
+static int sym53c500_resume(struct pcmcia_device *link)
+{
+ struct scsi_info_t *info = link->priv;
+
+ /* See earlier comment about manufacturer IDs. */
+ if ((info->manf_id == MANFID_MACNICA) ||
+ (info->manf_id == MANFID_PIONEER) ||
+ (info->manf_id == 0x0098)) {
+ outb(0x80, link->resource[0]->start + 0xd);
+ outb(0x24, link->resource[0]->start + 0x9);
+ outb(0x04, link->resource[0]->start + 0xd);
+ }
+ /*
+ * If things don't work after a "resume",
+ * this is a good place to start looking.
+ */
+ SYM53C500_int_host_reset(link->resource[0]->start);
+
+ return 0;
+}
+
+static void
+SYM53C500_detach(struct pcmcia_device *link)
+{
+ dev_dbg(&link->dev, "SYM53C500_detach\n");
+
+ SYM53C500_release(link);
+
+ kfree(link->priv);
+ link->priv = NULL;
+} /* SYM53C500_detach */
+
+static int
+SYM53C500_probe(struct pcmcia_device *link)
+{
+ struct scsi_info_t *info;
+
+ dev_dbg(&link->dev, "SYM53C500_attach()\n");
+
+ /* Create new SCSI device */
+ info = kzalloc(sizeof(*info), GFP_KERNEL);
+ if (!info)
+ return -ENOMEM;
+ info->p_dev = link;
+ link->priv = info;
+ link->config_flags |= CONF_ENABLE_IRQ | CONF_AUTO_SET_IO;
+
+ return SYM53C500_config(link);
+} /* SYM53C500_attach */
+
+MODULE_AUTHOR("Bob Tracy <rct@frus.com>");
+MODULE_DESCRIPTION("SYM53C500 PCMCIA SCSI driver");
+MODULE_LICENSE("GPL");
+
+static const struct pcmcia_device_id sym53c500_ids[] = {
+ PCMCIA_DEVICE_PROD_ID12("BASICS by New Media Corporation", "SCSI Sym53C500", 0x23c78a9d, 0x0099e7f7),
+ PCMCIA_DEVICE_PROD_ID12("New Media Corporation", "SCSI Bus Toaster Sym53C500", 0x085a850b, 0x45432eb8),
+ PCMCIA_DEVICE_PROD_ID2("SCSI9000", 0x21648f44),
+ PCMCIA_DEVICE_NULL,
+};
+MODULE_DEVICE_TABLE(pcmcia, sym53c500_ids);
+
+static struct pcmcia_driver sym53c500_cs_driver = {
+ .owner = THIS_MODULE,
+ .name = "sym53c500_cs",
+ .probe = SYM53C500_probe,
+ .remove = SYM53C500_detach,
+ .id_table = sym53c500_ids,
+ .resume = sym53c500_resume,
+};
+
+static int __init
+init_sym53c500_cs(void)
+{
+ return pcmcia_register_driver(&sym53c500_cs_driver);
+}
+
+static void __exit
+exit_sym53c500_cs(void)
+{
+ pcmcia_unregister_driver(&sym53c500_cs_driver);
+}
+
+module_init(init_sym53c500_cs);
+module_exit(exit_sym53c500_cs);
diff --git a/drivers/scsi/pm8001/Makefile b/drivers/scsi/pm8001/Makefile
new file mode 100644
index 000000000..ce4cd87c7
--- /dev/null
+++ b/drivers/scsi/pm8001/Makefile
@@ -0,0 +1,13 @@
+#
+# Kernel configuration file for the PM8001 SAS/SATA 8x6G based HBA driver
+#
+# Copyright (C) 2008-2009 USI Co., Ltd.
+
+
+obj-$(CONFIG_SCSI_PM8001) += pm80xx.o
+pm80xx-y += pm8001_init.o \
+ pm8001_sas.o \
+ pm8001_ctl.o \
+ pm8001_hwi.o \
+ pm80xx_hwi.o
+
diff --git a/drivers/scsi/pm8001/pm8001_chips.h b/drivers/scsi/pm8001/pm8001_chips.h
new file mode 100644
index 000000000..9241c7826
--- /dev/null
+++ b/drivers/scsi/pm8001/pm8001_chips.h
@@ -0,0 +1,89 @@
+/*
+ * PMC-Sierra SPC 8001 SAS/SATA based host adapters driver
+ *
+ * Copyright (c) 2008-2009 USI Co., Ltd.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions, and the following disclaimer,
+ * without modification.
+ * 2. Redistributions in binary form must reproduce at minimum a disclaimer
+ * substantially similar to the "NO WARRANTY" disclaimer below
+ * ("Disclaimer") and any redistribution must be conditioned upon
+ * including a substantially similar Disclaimer requirement for further
+ * binary redistribution.
+ * 3. Neither the names of the above-listed copyright holders nor the names
+ * of any contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * Alternatively, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") version 2 as published by the Free
+ * Software Foundation.
+ *
+ * NO WARRANTY
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
+ * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
+ * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGES.
+ *
+ */
+
+#ifndef _PM8001_CHIPS_H_
+#define _PM8001_CHIPS_H_
+
+static inline u32 pm8001_read_32(void *virt_addr)
+{
+ return *((u32 *)virt_addr);
+}
+
+static inline void pm8001_write_32(void *addr, u32 offset, __le32 val)
+{
+ *((__le32 *)(addr + offset)) = val;
+}
+
+static inline u32 pm8001_cr32(struct pm8001_hba_info *pm8001_ha, u32 bar,
+ u32 offset)
+{
+ return readl(pm8001_ha->io_mem[bar].memvirtaddr + offset);
+}
+
+static inline void pm8001_cw32(struct pm8001_hba_info *pm8001_ha, u32 bar,
+ u32 addr, u32 val)
+{
+ writel(val, pm8001_ha->io_mem[bar].memvirtaddr + addr);
+}
+static inline u32 pm8001_mr32(void __iomem *addr, u32 offset)
+{
+ return readl(addr + offset);
+}
+static inline void pm8001_mw32(void __iomem *addr, u32 offset, u32 val)
+{
+ writel(val, addr + offset);
+}
+static inline u32 get_pci_bar_index(u32 pcibar)
+{
+ switch (pcibar) {
+ case 0x18:
+ case 0x1C:
+ return 1;
+ case 0x20:
+ return 2;
+ case 0x24:
+ return 3;
+ default:
+ return 0;
+ }
+}
+
+#endif /* _PM8001_CHIPS_H_ */
+
diff --git a/drivers/scsi/pm8001/pm8001_ctl.c b/drivers/scsi/pm8001/pm8001_ctl.c
new file mode 100644
index 000000000..be8269c8d
--- /dev/null
+++ b/drivers/scsi/pm8001/pm8001_ctl.c
@@ -0,0 +1,753 @@
+/*
+ * PMC-Sierra 8001/8081/8088/8089 SAS/SATA based host adapters driver
+ *
+ * Copyright (c) 2008-2009 USI Co., Ltd.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions, and the following disclaimer,
+ * without modification.
+ * 2. Redistributions in binary form must reproduce at minimum a disclaimer
+ * substantially similar to the "NO WARRANTY" disclaimer below
+ * ("Disclaimer") and any redistribution must be conditioned upon
+ * including a substantially similar Disclaimer requirement for further
+ * binary redistribution.
+ * 3. Neither the names of the above-listed copyright holders nor the names
+ * of any contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * Alternatively, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") version 2 as published by the Free
+ * Software Foundation.
+ *
+ * NO WARRANTY
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
+ * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
+ * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGES.
+ *
+ */
+#include <linux/firmware.h>
+#include <linux/slab.h>
+#include "pm8001_sas.h"
+#include "pm8001_ctl.h"
+
+/* scsi host attributes */
+
+/**
+ * pm8001_ctl_mpi_interface_rev_show - MPI interface revision number
+ * @cdev: pointer to embedded class device
+ * @buf: the buffer returned
+ *
+ * A sysfs 'read-only' shost attribute.
+ */
+static ssize_t pm8001_ctl_mpi_interface_rev_show(struct device *cdev,
+ struct device_attribute *attr, char *buf)
+{
+ struct Scsi_Host *shost = class_to_shost(cdev);
+ struct sas_ha_struct *sha = SHOST_TO_SAS_HA(shost);
+ struct pm8001_hba_info *pm8001_ha = sha->lldd_ha;
+
+ if (pm8001_ha->chip_id == chip_8001) {
+ return snprintf(buf, PAGE_SIZE, "%d\n",
+ pm8001_ha->main_cfg_tbl.pm8001_tbl.interface_rev);
+ } else {
+ return snprintf(buf, PAGE_SIZE, "%d\n",
+ pm8001_ha->main_cfg_tbl.pm80xx_tbl.interface_rev);
+ }
+}
+static
+DEVICE_ATTR(interface_rev, S_IRUGO, pm8001_ctl_mpi_interface_rev_show, NULL);
+
+/**
+ * pm8001_ctl_fw_version_show - firmware version
+ * @cdev: pointer to embedded class device
+ * @buf: the buffer returned
+ *
+ * A sysfs 'read-only' shost attribute.
+ */
+static ssize_t pm8001_ctl_fw_version_show(struct device *cdev,
+ struct device_attribute *attr, char *buf)
+{
+ struct Scsi_Host *shost = class_to_shost(cdev);
+ struct sas_ha_struct *sha = SHOST_TO_SAS_HA(shost);
+ struct pm8001_hba_info *pm8001_ha = sha->lldd_ha;
+
+ if (pm8001_ha->chip_id == chip_8001) {
+ return snprintf(buf, PAGE_SIZE, "%02x.%02x.%02x.%02x\n",
+ (u8)(pm8001_ha->main_cfg_tbl.pm8001_tbl.firmware_rev >> 24),
+ (u8)(pm8001_ha->main_cfg_tbl.pm8001_tbl.firmware_rev >> 16),
+ (u8)(pm8001_ha->main_cfg_tbl.pm8001_tbl.firmware_rev >> 8),
+ (u8)(pm8001_ha->main_cfg_tbl.pm8001_tbl.firmware_rev));
+ } else {
+ return snprintf(buf, PAGE_SIZE, "%02x.%02x.%02x.%02x\n",
+ (u8)(pm8001_ha->main_cfg_tbl.pm80xx_tbl.firmware_rev >> 24),
+ (u8)(pm8001_ha->main_cfg_tbl.pm80xx_tbl.firmware_rev >> 16),
+ (u8)(pm8001_ha->main_cfg_tbl.pm80xx_tbl.firmware_rev >> 8),
+ (u8)(pm8001_ha->main_cfg_tbl.pm80xx_tbl.firmware_rev));
+ }
+}
+static DEVICE_ATTR(fw_version, S_IRUGO, pm8001_ctl_fw_version_show, NULL);
+/**
+ * pm8001_ctl_max_out_io_show - max outstanding io supported
+ * @cdev: pointer to embedded class device
+ * @buf: the buffer returned
+ *
+ * A sysfs 'read-only' shost attribute.
+ */
+static ssize_t pm8001_ctl_max_out_io_show(struct device *cdev,
+ struct device_attribute *attr, char *buf)
+{
+ struct Scsi_Host *shost = class_to_shost(cdev);
+ struct sas_ha_struct *sha = SHOST_TO_SAS_HA(shost);
+ struct pm8001_hba_info *pm8001_ha = sha->lldd_ha;
+
+ if (pm8001_ha->chip_id == chip_8001) {
+ return snprintf(buf, PAGE_SIZE, "%d\n",
+ pm8001_ha->main_cfg_tbl.pm8001_tbl.max_out_io);
+ } else {
+ return snprintf(buf, PAGE_SIZE, "%d\n",
+ pm8001_ha->main_cfg_tbl.pm80xx_tbl.max_out_io);
+ }
+}
+static DEVICE_ATTR(max_out_io, S_IRUGO, pm8001_ctl_max_out_io_show, NULL);
+/**
+ * pm8001_ctl_max_devices_show - max devices support
+ * @cdev: pointer to embedded class device
+ * @buf: the buffer returned
+ *
+ * A sysfs 'read-only' shost attribute.
+ */
+static ssize_t pm8001_ctl_max_devices_show(struct device *cdev,
+ struct device_attribute *attr, char *buf)
+{
+ struct Scsi_Host *shost = class_to_shost(cdev);
+ struct sas_ha_struct *sha = SHOST_TO_SAS_HA(shost);
+ struct pm8001_hba_info *pm8001_ha = sha->lldd_ha;
+
+ if (pm8001_ha->chip_id == chip_8001) {
+ return snprintf(buf, PAGE_SIZE, "%04d\n",
+ (u16)(pm8001_ha->main_cfg_tbl.pm8001_tbl.max_sgl >> 16)
+ );
+ } else {
+ return snprintf(buf, PAGE_SIZE, "%04d\n",
+ (u16)(pm8001_ha->main_cfg_tbl.pm80xx_tbl.max_sgl >> 16)
+ );
+ }
+}
+static DEVICE_ATTR(max_devices, S_IRUGO, pm8001_ctl_max_devices_show, NULL);
+/**
+ * pm8001_ctl_max_sg_list_show - max sg list supported iff not 0.0 for no
+ * hardware limitation
+ * @cdev: pointer to embedded class device
+ * @buf: the buffer returned
+ *
+ * A sysfs 'read-only' shost attribute.
+ */
+static ssize_t pm8001_ctl_max_sg_list_show(struct device *cdev,
+ struct device_attribute *attr, char *buf)
+{
+ struct Scsi_Host *shost = class_to_shost(cdev);
+ struct sas_ha_struct *sha = SHOST_TO_SAS_HA(shost);
+ struct pm8001_hba_info *pm8001_ha = sha->lldd_ha;
+
+ if (pm8001_ha->chip_id == chip_8001) {
+ return snprintf(buf, PAGE_SIZE, "%04d\n",
+ pm8001_ha->main_cfg_tbl.pm8001_tbl.max_sgl & 0x0000FFFF
+ );
+ } else {
+ return snprintf(buf, PAGE_SIZE, "%04d\n",
+ pm8001_ha->main_cfg_tbl.pm80xx_tbl.max_sgl & 0x0000FFFF
+ );
+ }
+}
+static DEVICE_ATTR(max_sg_list, S_IRUGO, pm8001_ctl_max_sg_list_show, NULL);
+
+#define SAS_1_0 0x1
+#define SAS_1_1 0x2
+#define SAS_2_0 0x4
+
+static ssize_t
+show_sas_spec_support_status(unsigned int mode, char *buf)
+{
+ ssize_t len = 0;
+
+ if (mode & SAS_1_1)
+ len = sprintf(buf, "%s", "SAS1.1");
+ if (mode & SAS_2_0)
+ len += sprintf(buf + len, "%s%s", len ? ", " : "", "SAS2.0");
+ len += sprintf(buf + len, "\n");
+
+ return len;
+}
+
+/**
+ * pm8001_ctl_sas_spec_support_show - sas spec supported
+ * @cdev: pointer to embedded class device
+ * @buf: the buffer returned
+ *
+ * A sysfs 'read-only' shost attribute.
+ */
+static ssize_t pm8001_ctl_sas_spec_support_show(struct device *cdev,
+ struct device_attribute *attr, char *buf)
+{
+ unsigned int mode;
+ struct Scsi_Host *shost = class_to_shost(cdev);
+ struct sas_ha_struct *sha = SHOST_TO_SAS_HA(shost);
+ struct pm8001_hba_info *pm8001_ha = sha->lldd_ha;
+ /* fe000000 means supports SAS2.1 */
+ if (pm8001_ha->chip_id == chip_8001)
+ mode = (pm8001_ha->main_cfg_tbl.pm8001_tbl.ctrl_cap_flag &
+ 0xfe000000)>>25;
+ else
+ /* fe000000 means supports SAS2.1 */
+ mode = (pm8001_ha->main_cfg_tbl.pm80xx_tbl.ctrl_cap_flag &
+ 0xfe000000)>>25;
+ return show_sas_spec_support_status(mode, buf);
+}
+static DEVICE_ATTR(sas_spec_support, S_IRUGO,
+ pm8001_ctl_sas_spec_support_show, NULL);
+
+/**
+ * pm8001_ctl_sas_address_show - sas address
+ * @cdev: pointer to embedded class device
+ * @buf: the buffer returned
+ *
+ * This is the controller sas address
+ *
+ * A sysfs 'read-only' shost attribute.
+ */
+static ssize_t pm8001_ctl_host_sas_address_show(struct device *cdev,
+ struct device_attribute *attr, char *buf)
+{
+ struct Scsi_Host *shost = class_to_shost(cdev);
+ struct sas_ha_struct *sha = SHOST_TO_SAS_HA(shost);
+ struct pm8001_hba_info *pm8001_ha = sha->lldd_ha;
+ return snprintf(buf, PAGE_SIZE, "0x%016llx\n",
+ be64_to_cpu(*(__be64 *)pm8001_ha->sas_addr));
+}
+static DEVICE_ATTR(host_sas_address, S_IRUGO,
+ pm8001_ctl_host_sas_address_show, NULL);
+
+/**
+ * pm8001_ctl_logging_level_show - logging level
+ * @cdev: pointer to embedded class device
+ * @buf: the buffer returned
+ *
+ * A sysfs 'read/write' shost attribute.
+ */
+static ssize_t pm8001_ctl_logging_level_show(struct device *cdev,
+ struct device_attribute *attr, char *buf)
+{
+ struct Scsi_Host *shost = class_to_shost(cdev);
+ struct sas_ha_struct *sha = SHOST_TO_SAS_HA(shost);
+ struct pm8001_hba_info *pm8001_ha = sha->lldd_ha;
+
+ return snprintf(buf, PAGE_SIZE, "%08xh\n", pm8001_ha->logging_level);
+}
+static ssize_t pm8001_ctl_logging_level_store(struct device *cdev,
+ struct device_attribute *attr, const char *buf, size_t count)
+{
+ struct Scsi_Host *shost = class_to_shost(cdev);
+ struct sas_ha_struct *sha = SHOST_TO_SAS_HA(shost);
+ struct pm8001_hba_info *pm8001_ha = sha->lldd_ha;
+ int val = 0;
+
+ if (sscanf(buf, "%x", &val) != 1)
+ return -EINVAL;
+
+ pm8001_ha->logging_level = val;
+ return strlen(buf);
+}
+
+static DEVICE_ATTR(logging_level, S_IRUGO | S_IWUSR,
+ pm8001_ctl_logging_level_show, pm8001_ctl_logging_level_store);
+/**
+ * pm8001_ctl_aap_log_show - aap1 event log
+ * @cdev: pointer to embedded class device
+ * @buf: the buffer returned
+ *
+ * A sysfs 'read-only' shost attribute.
+ */
+static ssize_t pm8001_ctl_aap_log_show(struct device *cdev,
+ struct device_attribute *attr, char *buf)
+{
+ struct Scsi_Host *shost = class_to_shost(cdev);
+ struct sas_ha_struct *sha = SHOST_TO_SAS_HA(shost);
+ struct pm8001_hba_info *pm8001_ha = sha->lldd_ha;
+ int i;
+#define AAP1_MEMMAP(r, c) \
+ (*(u32 *)((u8*)pm8001_ha->memoryMap.region[AAP1].virt_ptr + (r) * 32 \
+ + (c)))
+
+ char *str = buf;
+ int max = 2;
+ for (i = 0; i < max; i++) {
+ str += sprintf(str, "0x%08x 0x%08x 0x%08x 0x%08x 0x%08x 0x%08x"
+ "0x%08x 0x%08x\n",
+ AAP1_MEMMAP(i, 0),
+ AAP1_MEMMAP(i, 4),
+ AAP1_MEMMAP(i, 8),
+ AAP1_MEMMAP(i, 12),
+ AAP1_MEMMAP(i, 16),
+ AAP1_MEMMAP(i, 20),
+ AAP1_MEMMAP(i, 24),
+ AAP1_MEMMAP(i, 28));
+ }
+
+ return str - buf;
+}
+static DEVICE_ATTR(aap_log, S_IRUGO, pm8001_ctl_aap_log_show, NULL);
+/**
+ * pm8001_ctl_ib_queue_log_show - Out bound Queue log
+ * @cdev:pointer to embedded class device
+ * @buf: the buffer returned
+ * A sysfs 'read-only' shost attribute.
+ */
+static ssize_t pm8001_ctl_ib_queue_log_show(struct device *cdev,
+ struct device_attribute *attr, char *buf)
+{
+ struct Scsi_Host *shost = class_to_shost(cdev);
+ struct sas_ha_struct *sha = SHOST_TO_SAS_HA(shost);
+ struct pm8001_hba_info *pm8001_ha = sha->lldd_ha;
+ int offset;
+ char *str = buf;
+ int start = 0;
+#define IB_MEMMAP(c) \
+ (*(u32 *)((u8 *)pm8001_ha-> \
+ memoryMap.region[IB].virt_ptr + \
+ pm8001_ha->evtlog_ib_offset + (c)))
+
+ for (offset = 0; offset < IB_OB_READ_TIMES; offset++) {
+ str += sprintf(str, "0x%08x\n", IB_MEMMAP(start));
+ start = start + 4;
+ }
+ pm8001_ha->evtlog_ib_offset += SYSFS_OFFSET;
+ if (((pm8001_ha->evtlog_ib_offset) % (PM80XX_IB_OB_QUEUE_SIZE)) == 0)
+ pm8001_ha->evtlog_ib_offset = 0;
+
+ return str - buf;
+}
+
+static DEVICE_ATTR(ib_log, S_IRUGO, pm8001_ctl_ib_queue_log_show, NULL);
+/**
+ * pm8001_ctl_ob_queue_log_show - Out bound Queue log
+ * @cdev:pointer to embedded class device
+ * @buf: the buffer returned
+ * A sysfs 'read-only' shost attribute.
+ */
+
+static ssize_t pm8001_ctl_ob_queue_log_show(struct device *cdev,
+ struct device_attribute *attr, char *buf)
+{
+ struct Scsi_Host *shost = class_to_shost(cdev);
+ struct sas_ha_struct *sha = SHOST_TO_SAS_HA(shost);
+ struct pm8001_hba_info *pm8001_ha = sha->lldd_ha;
+ int offset;
+ char *str = buf;
+ int start = 0;
+#define OB_MEMMAP(c) \
+ (*(u32 *)((u8 *)pm8001_ha-> \
+ memoryMap.region[OB].virt_ptr + \
+ pm8001_ha->evtlog_ob_offset + (c)))
+
+ for (offset = 0; offset < IB_OB_READ_TIMES; offset++) {
+ str += sprintf(str, "0x%08x\n", OB_MEMMAP(start));
+ start = start + 4;
+ }
+ pm8001_ha->evtlog_ob_offset += SYSFS_OFFSET;
+ if (((pm8001_ha->evtlog_ob_offset) % (PM80XX_IB_OB_QUEUE_SIZE)) == 0)
+ pm8001_ha->evtlog_ob_offset = 0;
+
+ return str - buf;
+}
+static DEVICE_ATTR(ob_log, S_IRUGO, pm8001_ctl_ob_queue_log_show, NULL);
+/**
+ * pm8001_ctl_bios_version_show - Bios version Display
+ * @cdev:pointer to embedded class device
+ * @buf:the buffer returned
+ * A sysfs 'read-only' shost attribute.
+ */
+static ssize_t pm8001_ctl_bios_version_show(struct device *cdev,
+ struct device_attribute *attr, char *buf)
+{
+ struct Scsi_Host *shost = class_to_shost(cdev);
+ struct sas_ha_struct *sha = SHOST_TO_SAS_HA(shost);
+ struct pm8001_hba_info *pm8001_ha = sha->lldd_ha;
+ char *str = buf;
+ int bios_index;
+ DECLARE_COMPLETION_ONSTACK(completion);
+ struct pm8001_ioctl_payload payload;
+
+ pm8001_ha->nvmd_completion = &completion;
+ payload.minor_function = 7;
+ payload.offset = 0;
+ payload.length = 4096;
+ payload.func_specific = kzalloc(4096, GFP_KERNEL);
+ if (!payload.func_specific)
+ return -ENOMEM;
+ if (PM8001_CHIP_DISP->get_nvmd_req(pm8001_ha, &payload)) {
+ kfree(payload.func_specific);
+ return -ENOMEM;
+ }
+ wait_for_completion(&completion);
+ for (bios_index = BIOSOFFSET; bios_index < BIOS_OFFSET_LIMIT;
+ bios_index++)
+ str += sprintf(str, "%c",
+ *(payload.func_specific+bios_index));
+ kfree(payload.func_specific);
+ return str - buf;
+}
+static DEVICE_ATTR(bios_version, S_IRUGO, pm8001_ctl_bios_version_show, NULL);
+/**
+ * pm8001_ctl_aap_log_show - IOP event log
+ * @cdev: pointer to embedded class device
+ * @buf: the buffer returned
+ *
+ * A sysfs 'read-only' shost attribute.
+ */
+static ssize_t pm8001_ctl_iop_log_show(struct device *cdev,
+ struct device_attribute *attr, char *buf)
+{
+ struct Scsi_Host *shost = class_to_shost(cdev);
+ struct sas_ha_struct *sha = SHOST_TO_SAS_HA(shost);
+ struct pm8001_hba_info *pm8001_ha = sha->lldd_ha;
+#define IOP_MEMMAP(r, c) \
+ (*(u32 *)((u8*)pm8001_ha->memoryMap.region[IOP].virt_ptr + (r) * 32 \
+ + (c)))
+ int i;
+ char *str = buf;
+ int max = 2;
+ for (i = 0; i < max; i++) {
+ str += sprintf(str, "0x%08x 0x%08x 0x%08x 0x%08x 0x%08x 0x%08x"
+ "0x%08x 0x%08x\n",
+ IOP_MEMMAP(i, 0),
+ IOP_MEMMAP(i, 4),
+ IOP_MEMMAP(i, 8),
+ IOP_MEMMAP(i, 12),
+ IOP_MEMMAP(i, 16),
+ IOP_MEMMAP(i, 20),
+ IOP_MEMMAP(i, 24),
+ IOP_MEMMAP(i, 28));
+ }
+
+ return str - buf;
+}
+static DEVICE_ATTR(iop_log, S_IRUGO, pm8001_ctl_iop_log_show, NULL);
+
+/**
+ ** pm8001_ctl_fatal_log_show - fatal error logging
+ ** @cdev:pointer to embedded class device
+ ** @buf: the buffer returned
+ **
+ ** A sysfs 'read-only' shost attribute.
+ **/
+
+static ssize_t pm8001_ctl_fatal_log_show(struct device *cdev,
+ struct device_attribute *attr, char *buf)
+{
+ ssize_t count;
+
+ count = pm80xx_get_fatal_dump(cdev, attr, buf);
+ return count;
+}
+
+static DEVICE_ATTR(fatal_log, S_IRUGO, pm8001_ctl_fatal_log_show, NULL);
+
+
+/**
+ ** pm8001_ctl_gsm_log_show - gsm dump collection
+ ** @cdev:pointer to embedded class device
+ ** @buf: the buffer returned
+ **A sysfs 'read-only' shost attribute.
+ **/
+static ssize_t pm8001_ctl_gsm_log_show(struct device *cdev,
+ struct device_attribute *attr, char *buf)
+{
+ ssize_t count;
+
+ count = pm8001_get_gsm_dump(cdev, SYSFS_OFFSET, buf);
+ return count;
+}
+
+static DEVICE_ATTR(gsm_log, S_IRUGO, pm8001_ctl_gsm_log_show, NULL);
+
+#define FLASH_CMD_NONE 0x00
+#define FLASH_CMD_UPDATE 0x01
+#define FLASH_CMD_SET_NVMD 0x02
+
+struct flash_command {
+ u8 command[8];
+ int code;
+};
+
+static struct flash_command flash_command_table[] =
+{
+ {"set_nvmd", FLASH_CMD_SET_NVMD},
+ {"update", FLASH_CMD_UPDATE},
+ {"", FLASH_CMD_NONE} /* Last entry should be NULL. */
+};
+
+struct error_fw {
+ char *reason;
+ int err_code;
+};
+
+static struct error_fw flash_error_table[] =
+{
+ {"Failed to open fw image file", FAIL_OPEN_BIOS_FILE},
+ {"image header mismatch", FLASH_UPDATE_HDR_ERR},
+ {"image offset mismatch", FLASH_UPDATE_OFFSET_ERR},
+ {"image CRC Error", FLASH_UPDATE_CRC_ERR},
+ {"image length Error.", FLASH_UPDATE_LENGTH_ERR},
+ {"Failed to program flash chip", FLASH_UPDATE_HW_ERR},
+ {"Flash chip not supported.", FLASH_UPDATE_DNLD_NOT_SUPPORTED},
+ {"Flash update disabled.", FLASH_UPDATE_DISABLED},
+ {"Flash in progress", FLASH_IN_PROGRESS},
+ {"Image file size Error", FAIL_FILE_SIZE},
+ {"Input parameter error", FAIL_PARAMETERS},
+ {"Out of memory", FAIL_OUT_MEMORY},
+ {"OK", 0} /* Last entry err_code = 0. */
+};
+
+static int pm8001_set_nvmd(struct pm8001_hba_info *pm8001_ha)
+{
+ struct pm8001_ioctl_payload *payload;
+ DECLARE_COMPLETION_ONSTACK(completion);
+ u8 *ioctlbuffer;
+ u32 ret;
+ u32 length = 1024 * 5 + sizeof(*payload) - 1;
+
+ if (pm8001_ha->fw_image->size > 4096) {
+ pm8001_ha->fw_status = FAIL_FILE_SIZE;
+ return -EFAULT;
+ }
+
+ ioctlbuffer = kzalloc(length, GFP_KERNEL);
+ if (!ioctlbuffer) {
+ pm8001_ha->fw_status = FAIL_OUT_MEMORY;
+ return -ENOMEM;
+ }
+ payload = (struct pm8001_ioctl_payload *)ioctlbuffer;
+ memcpy((u8 *)&payload->func_specific, (u8 *)pm8001_ha->fw_image->data,
+ pm8001_ha->fw_image->size);
+ payload->length = pm8001_ha->fw_image->size;
+ payload->id = 0;
+ payload->minor_function = 0x1;
+ pm8001_ha->nvmd_completion = &completion;
+ ret = PM8001_CHIP_DISP->set_nvmd_req(pm8001_ha, payload);
+ if (ret) {
+ pm8001_ha->fw_status = FAIL_OUT_MEMORY;
+ goto out;
+ }
+ wait_for_completion(&completion);
+out:
+ kfree(ioctlbuffer);
+ return ret;
+}
+
+static int pm8001_update_flash(struct pm8001_hba_info *pm8001_ha)
+{
+ struct pm8001_ioctl_payload *payload;
+ DECLARE_COMPLETION_ONSTACK(completion);
+ u8 *ioctlbuffer;
+ struct fw_control_info *fwControl;
+ u32 partitionSize, partitionSizeTmp;
+ u32 loopNumber, loopcount;
+ struct pm8001_fw_image_header *image_hdr;
+ u32 sizeRead = 0;
+ u32 ret = 0;
+ u32 length = 1024 * 16 + sizeof(*payload) - 1;
+
+ if (pm8001_ha->fw_image->size < 28) {
+ pm8001_ha->fw_status = FAIL_FILE_SIZE;
+ return -EFAULT;
+ }
+ ioctlbuffer = kzalloc(length, GFP_KERNEL);
+ if (!ioctlbuffer) {
+ pm8001_ha->fw_status = FAIL_OUT_MEMORY;
+ return -ENOMEM;
+ }
+ image_hdr = (struct pm8001_fw_image_header *)pm8001_ha->fw_image->data;
+ while (sizeRead < pm8001_ha->fw_image->size) {
+ partitionSizeTmp =
+ *(u32 *)((u8 *)&image_hdr->image_length + sizeRead);
+ partitionSize = be32_to_cpu(partitionSizeTmp);
+ loopcount = DIV_ROUND_UP(partitionSize + HEADER_LEN,
+ IOCTL_BUF_SIZE);
+ for (loopNumber = 0; loopNumber < loopcount; loopNumber++) {
+ payload = (struct pm8001_ioctl_payload *)ioctlbuffer;
+ payload->length = 1024*16;
+ payload->id = 0;
+ fwControl =
+ (struct fw_control_info *)&payload->func_specific;
+ fwControl->len = IOCTL_BUF_SIZE; /* IN */
+ fwControl->size = partitionSize + HEADER_LEN;/* IN */
+ fwControl->retcode = 0;/* OUT */
+ fwControl->offset = loopNumber * IOCTL_BUF_SIZE;/*OUT */
+
+ /* for the last chunk of data in case file size is not even with
+ 4k, load only the rest*/
+ if (((loopcount-loopNumber) == 1) &&
+ ((partitionSize + HEADER_LEN) % IOCTL_BUF_SIZE)) {
+ fwControl->len =
+ (partitionSize + HEADER_LEN) % IOCTL_BUF_SIZE;
+ memcpy((u8 *)fwControl->buffer,
+ (u8 *)pm8001_ha->fw_image->data + sizeRead,
+ (partitionSize + HEADER_LEN) % IOCTL_BUF_SIZE);
+ sizeRead +=
+ (partitionSize + HEADER_LEN) % IOCTL_BUF_SIZE;
+ } else {
+ memcpy((u8 *)fwControl->buffer,
+ (u8 *)pm8001_ha->fw_image->data + sizeRead,
+ IOCTL_BUF_SIZE);
+ sizeRead += IOCTL_BUF_SIZE;
+ }
+
+ pm8001_ha->nvmd_completion = &completion;
+ ret = PM8001_CHIP_DISP->fw_flash_update_req(pm8001_ha, payload);
+ if (ret) {
+ pm8001_ha->fw_status = FAIL_OUT_MEMORY;
+ goto out;
+ }
+ wait_for_completion(&completion);
+ if (fwControl->retcode > FLASH_UPDATE_IN_PROGRESS) {
+ pm8001_ha->fw_status = fwControl->retcode;
+ ret = -EFAULT;
+ goto out;
+ }
+ }
+ }
+out:
+ kfree(ioctlbuffer);
+ return ret;
+}
+static ssize_t pm8001_store_update_fw(struct device *cdev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct Scsi_Host *shost = class_to_shost(cdev);
+ struct sas_ha_struct *sha = SHOST_TO_SAS_HA(shost);
+ struct pm8001_hba_info *pm8001_ha = sha->lldd_ha;
+ char *cmd_ptr, *filename_ptr;
+ int res, i;
+ int flash_command = FLASH_CMD_NONE;
+ int ret;
+
+ if (!capable(CAP_SYS_ADMIN))
+ return -EACCES;
+
+ /* this test protects us from running two flash processes at once,
+ * so we should start with this test */
+ if (pm8001_ha->fw_status == FLASH_IN_PROGRESS)
+ return -EINPROGRESS;
+ pm8001_ha->fw_status = FLASH_IN_PROGRESS;
+
+ cmd_ptr = kzalloc(count*2, GFP_KERNEL);
+ if (!cmd_ptr) {
+ pm8001_ha->fw_status = FAIL_OUT_MEMORY;
+ return -ENOMEM;
+ }
+
+ filename_ptr = cmd_ptr + count;
+ res = sscanf(buf, "%s %s", cmd_ptr, filename_ptr);
+ if (res != 2) {
+ pm8001_ha->fw_status = FAIL_PARAMETERS;
+ ret = -EINVAL;
+ goto out;
+ }
+
+ for (i = 0; flash_command_table[i].code != FLASH_CMD_NONE; i++) {
+ if (!memcmp(flash_command_table[i].command,
+ cmd_ptr, strlen(cmd_ptr))) {
+ flash_command = flash_command_table[i].code;
+ break;
+ }
+ }
+ if (flash_command == FLASH_CMD_NONE) {
+ pm8001_ha->fw_status = FAIL_PARAMETERS;
+ ret = -EINVAL;
+ goto out;
+ }
+
+ ret = request_firmware(&pm8001_ha->fw_image,
+ filename_ptr,
+ pm8001_ha->dev);
+
+ if (ret) {
+ PM8001_FAIL_DBG(pm8001_ha,
+ pm8001_printk(
+ "Failed to load firmware image file %s, error %d\n",
+ filename_ptr, ret));
+ pm8001_ha->fw_status = FAIL_OPEN_BIOS_FILE;
+ goto out;
+ }
+
+ if (FLASH_CMD_UPDATE == flash_command)
+ ret = pm8001_update_flash(pm8001_ha);
+ else
+ ret = pm8001_set_nvmd(pm8001_ha);
+
+ release_firmware(pm8001_ha->fw_image);
+out:
+ kfree(cmd_ptr);
+
+ if (ret)
+ return ret;
+
+ pm8001_ha->fw_status = FLASH_OK;
+ return count;
+}
+
+static ssize_t pm8001_show_update_fw(struct device *cdev,
+ struct device_attribute *attr, char *buf)
+{
+ int i;
+ struct Scsi_Host *shost = class_to_shost(cdev);
+ struct sas_ha_struct *sha = SHOST_TO_SAS_HA(shost);
+ struct pm8001_hba_info *pm8001_ha = sha->lldd_ha;
+
+ for (i = 0; flash_error_table[i].err_code != 0; i++) {
+ if (flash_error_table[i].err_code == pm8001_ha->fw_status)
+ break;
+ }
+ if (pm8001_ha->fw_status != FLASH_IN_PROGRESS)
+ pm8001_ha->fw_status = FLASH_OK;
+
+ return snprintf(buf, PAGE_SIZE, "status=%x %s\n",
+ flash_error_table[i].err_code,
+ flash_error_table[i].reason);
+}
+
+static DEVICE_ATTR(update_fw, S_IRUGO|S_IWUSR|S_IWGRP,
+ pm8001_show_update_fw, pm8001_store_update_fw);
+struct device_attribute *pm8001_host_attrs[] = {
+ &dev_attr_interface_rev,
+ &dev_attr_fw_version,
+ &dev_attr_update_fw,
+ &dev_attr_aap_log,
+ &dev_attr_iop_log,
+ &dev_attr_fatal_log,
+ &dev_attr_gsm_log,
+ &dev_attr_max_out_io,
+ &dev_attr_max_devices,
+ &dev_attr_max_sg_list,
+ &dev_attr_sas_spec_support,
+ &dev_attr_logging_level,
+ &dev_attr_host_sas_address,
+ &dev_attr_bios_version,
+ &dev_attr_ib_log,
+ &dev_attr_ob_log,
+ NULL,
+};
+
diff --git a/drivers/scsi/pm8001/pm8001_ctl.h b/drivers/scsi/pm8001/pm8001_ctl.h
new file mode 100644
index 000000000..d0d43a250
--- /dev/null
+++ b/drivers/scsi/pm8001/pm8001_ctl.h
@@ -0,0 +1,63 @@
+ /*
+ * PMC-Sierra SPC 8001 SAS/SATA based host adapters driver
+ *
+ * Copyright (c) 2008-2009 USI Co., Ltd.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions, and the following disclaimer,
+ * without modification.
+ * 2. Redistributions in binary form must reproduce at minimum a disclaimer
+ * substantially similar to the "NO WARRANTY" disclaimer below
+ * ("Disclaimer") and any redistribution must be conditioned upon
+ * including a substantially similar Disclaimer requirement for further
+ * binary redistribution.
+ * 3. Neither the names of the above-listed copyright holders nor the names
+ * of any contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * Alternatively, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") version 2 as published by the Free
+ * Software Foundation.
+ *
+ * NO WARRANTY
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
+ * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
+ * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGES.
+ *
+ */
+
+#ifndef PM8001_CTL_H_INCLUDED
+#define PM8001_CTL_H_INCLUDED
+
+#define IOCTL_BUF_SIZE 4096
+#define HEADER_LEN 28
+#define SIZE_OFFSET 16
+
+#define BIOSOFFSET 56
+#define BIOS_OFFSET_LIMIT 61
+
+#define FLASH_OK 0x000000
+#define FAIL_OPEN_BIOS_FILE 0x000100
+#define FAIL_FILE_SIZE 0x000a00
+#define FAIL_PARAMETERS 0x000b00
+#define FAIL_OUT_MEMORY 0x000c00
+#define FLASH_IN_PROGRESS 0x001000
+
+#define IB_OB_READ_TIMES 256
+#define SYSFS_OFFSET 1024
+#define PM80XX_IB_OB_QUEUE_SIZE (32 * 1024)
+#define PM8001_IB_OB_QUEUE_SIZE (16 * 1024)
+#endif /* PM8001_CTL_H_INCLUDED */
+
diff --git a/drivers/scsi/pm8001/pm8001_defs.h b/drivers/scsi/pm8001/pm8001_defs.h
new file mode 100644
index 000000000..74a4bb9af
--- /dev/null
+++ b/drivers/scsi/pm8001/pm8001_defs.h
@@ -0,0 +1,131 @@
+/*
+ * PMC-Sierra 8001/8081/8088/8089 SAS/SATA based host adapters driver
+ *
+ * Copyright (c) 2008-2009 USI Co., Ltd.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions, and the following disclaimer,
+ * without modification.
+ * 2. Redistributions in binary form must reproduce at minimum a disclaimer
+ * substantially similar to the "NO WARRANTY" disclaimer below
+ * ("Disclaimer") and any redistribution must be conditioned upon
+ * including a substantially similar Disclaimer requirement for further
+ * binary redistribution.
+ * 3. Neither the names of the above-listed copyright holders nor the names
+ * of any contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * Alternatively, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") version 2 as published by the Free
+ * Software Foundation.
+ *
+ * NO WARRANTY
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
+ * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
+ * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGES.
+ *
+ */
+
+#ifndef _PM8001_DEFS_H_
+#define _PM8001_DEFS_H_
+
+enum chip_flavors {
+ chip_8001,
+ chip_8008,
+ chip_8009,
+ chip_8018,
+ chip_8019,
+ chip_8074,
+ chip_8076,
+ chip_8077
+};
+
+enum phy_speed {
+ PHY_SPEED_15 = 0x01,
+ PHY_SPEED_30 = 0x02,
+ PHY_SPEED_60 = 0x04,
+};
+
+enum data_direction {
+ DATA_DIR_NONE = 0x0, /* NO TRANSFER */
+ DATA_DIR_IN = 0x01, /* INBOUND */
+ DATA_DIR_OUT = 0x02, /* OUTBOUND */
+ DATA_DIR_BYRECIPIENT = 0x04, /* UNSPECIFIED */
+};
+
+enum port_type {
+ PORT_TYPE_SAS = (1L << 1),
+ PORT_TYPE_SATA = (1L << 0),
+};
+
+/* driver compile-time configuration */
+#define PM8001_MAX_CCB 512 /* max ccbs supported */
+#define PM8001_MPI_QUEUE 1024 /* maximum mpi queue entries */
+#define PM8001_MAX_INB_NUM 1
+#define PM8001_MAX_OUTB_NUM 1
+#define PM8001_MAX_SPCV_INB_NUM 1
+#define PM8001_MAX_SPCV_OUTB_NUM 4
+#define PM8001_CAN_QUEUE 508 /* SCSI Queue depth */
+
+/* Inbound/Outbound queue size */
+#define IOMB_SIZE_SPC 64
+#define IOMB_SIZE_SPCV 128
+
+/* unchangeable hardware details */
+#define PM8001_MAX_PHYS 16 /* max. possible phys */
+#define PM8001_MAX_PORTS 16 /* max. possible ports */
+#define PM8001_MAX_DEVICES 2048 /* max supported device */
+#define PM8001_MAX_MSIX_VEC 64 /* max msi-x int for spcv/ve */
+
+#define USI_MAX_MEMCNT_BASE 5
+#define IB (USI_MAX_MEMCNT_BASE + 1)
+#define CI (IB + PM8001_MAX_SPCV_INB_NUM)
+#define OB (CI + PM8001_MAX_SPCV_INB_NUM)
+#define PI (OB + PM8001_MAX_SPCV_OUTB_NUM)
+#define USI_MAX_MEMCNT (PI + PM8001_MAX_SPCV_OUTB_NUM)
+#define PM8001_MAX_DMA_SG SG_ALL
+enum memory_region_num {
+ AAP1 = 0x0, /* application acceleration processor */
+ IOP, /* IO processor */
+ NVMD, /* NVM device */
+ DEV_MEM, /* memory for devices */
+ CCB_MEM, /* memory for command control block */
+ FW_FLASH, /* memory for fw flash update */
+ FORENSIC_MEM /* memory for fw forensic data */
+};
+#define PM8001_EVENT_LOG_SIZE (128 * 1024)
+
+/*error code*/
+enum mpi_err {
+ MPI_IO_STATUS_SUCCESS = 0x0,
+ MPI_IO_STATUS_BUSY = 0x01,
+ MPI_IO_STATUS_FAIL = 0x02,
+};
+
+/**
+ * Phy Control constants
+ */
+enum phy_control_type {
+ PHY_LINK_RESET = 0x01,
+ PHY_HARD_RESET = 0x02,
+ PHY_NOTIFY_ENABLE_SPINUP = 0x10,
+};
+
+enum pm8001_hba_info_flags {
+ PM8001F_INIT_TIME = (1U << 0),
+ PM8001F_RUN_TIME = (1U << 1),
+};
+
+#endif
diff --git a/drivers/scsi/pm8001/pm8001_hwi.c b/drivers/scsi/pm8001/pm8001_hwi.c
new file mode 100644
index 000000000..96dcc097a
--- /dev/null
+++ b/drivers/scsi/pm8001/pm8001_hwi.c
@@ -0,0 +1,5125 @@
+/*
+ * PMC-Sierra SPC 8001 SAS/SATA based host adapters driver
+ *
+ * Copyright (c) 2008-2009 USI Co., Ltd.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions, and the following disclaimer,
+ * without modification.
+ * 2. Redistributions in binary form must reproduce at minimum a disclaimer
+ * substantially similar to the "NO WARRANTY" disclaimer below
+ * ("Disclaimer") and any redistribution must be conditioned upon
+ * including a substantially similar Disclaimer requirement for further
+ * binary redistribution.
+ * 3. Neither the names of the above-listed copyright holders nor the names
+ * of any contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * Alternatively, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") version 2 as published by the Free
+ * Software Foundation.
+ *
+ * NO WARRANTY
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
+ * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
+ * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGES.
+ *
+ */
+ #include <linux/slab.h>
+ #include "pm8001_sas.h"
+ #include "pm8001_hwi.h"
+ #include "pm8001_chips.h"
+ #include "pm8001_ctl.h"
+
+/**
+ * read_main_config_table - read the configure table and save it.
+ * @pm8001_ha: our hba card information
+ */
+static void read_main_config_table(struct pm8001_hba_info *pm8001_ha)
+{
+ void __iomem *address = pm8001_ha->main_cfg_tbl_addr;
+ pm8001_ha->main_cfg_tbl.pm8001_tbl.signature =
+ pm8001_mr32(address, 0x00);
+ pm8001_ha->main_cfg_tbl.pm8001_tbl.interface_rev =
+ pm8001_mr32(address, 0x04);
+ pm8001_ha->main_cfg_tbl.pm8001_tbl.firmware_rev =
+ pm8001_mr32(address, 0x08);
+ pm8001_ha->main_cfg_tbl.pm8001_tbl.max_out_io =
+ pm8001_mr32(address, 0x0C);
+ pm8001_ha->main_cfg_tbl.pm8001_tbl.max_sgl =
+ pm8001_mr32(address, 0x10);
+ pm8001_ha->main_cfg_tbl.pm8001_tbl.ctrl_cap_flag =
+ pm8001_mr32(address, 0x14);
+ pm8001_ha->main_cfg_tbl.pm8001_tbl.gst_offset =
+ pm8001_mr32(address, 0x18);
+ pm8001_ha->main_cfg_tbl.pm8001_tbl.inbound_queue_offset =
+ pm8001_mr32(address, MAIN_IBQ_OFFSET);
+ pm8001_ha->main_cfg_tbl.pm8001_tbl.outbound_queue_offset =
+ pm8001_mr32(address, MAIN_OBQ_OFFSET);
+ pm8001_ha->main_cfg_tbl.pm8001_tbl.hda_mode_flag =
+ pm8001_mr32(address, MAIN_HDA_FLAGS_OFFSET);
+
+ /* read analog Setting offset from the configuration table */
+ pm8001_ha->main_cfg_tbl.pm8001_tbl.anolog_setup_table_offset =
+ pm8001_mr32(address, MAIN_ANALOG_SETUP_OFFSET);
+
+ /* read Error Dump Offset and Length */
+ pm8001_ha->main_cfg_tbl.pm8001_tbl.fatal_err_dump_offset0 =
+ pm8001_mr32(address, MAIN_FATAL_ERROR_RDUMP0_OFFSET);
+ pm8001_ha->main_cfg_tbl.pm8001_tbl.fatal_err_dump_length0 =
+ pm8001_mr32(address, MAIN_FATAL_ERROR_RDUMP0_LENGTH);
+ pm8001_ha->main_cfg_tbl.pm8001_tbl.fatal_err_dump_offset1 =
+ pm8001_mr32(address, MAIN_FATAL_ERROR_RDUMP1_OFFSET);
+ pm8001_ha->main_cfg_tbl.pm8001_tbl.fatal_err_dump_length1 =
+ pm8001_mr32(address, MAIN_FATAL_ERROR_RDUMP1_LENGTH);
+}
+
+/**
+ * read_general_status_table - read the general status table and save it.
+ * @pm8001_ha: our hba card information
+ */
+static void read_general_status_table(struct pm8001_hba_info *pm8001_ha)
+{
+ void __iomem *address = pm8001_ha->general_stat_tbl_addr;
+ pm8001_ha->gs_tbl.pm8001_tbl.gst_len_mpistate =
+ pm8001_mr32(address, 0x00);
+ pm8001_ha->gs_tbl.pm8001_tbl.iq_freeze_state0 =
+ pm8001_mr32(address, 0x04);
+ pm8001_ha->gs_tbl.pm8001_tbl.iq_freeze_state1 =
+ pm8001_mr32(address, 0x08);
+ pm8001_ha->gs_tbl.pm8001_tbl.msgu_tcnt =
+ pm8001_mr32(address, 0x0C);
+ pm8001_ha->gs_tbl.pm8001_tbl.iop_tcnt =
+ pm8001_mr32(address, 0x10);
+ pm8001_ha->gs_tbl.pm8001_tbl.rsvd =
+ pm8001_mr32(address, 0x14);
+ pm8001_ha->gs_tbl.pm8001_tbl.phy_state[0] =
+ pm8001_mr32(address, 0x18);
+ pm8001_ha->gs_tbl.pm8001_tbl.phy_state[1] =
+ pm8001_mr32(address, 0x1C);
+ pm8001_ha->gs_tbl.pm8001_tbl.phy_state[2] =
+ pm8001_mr32(address, 0x20);
+ pm8001_ha->gs_tbl.pm8001_tbl.phy_state[3] =
+ pm8001_mr32(address, 0x24);
+ pm8001_ha->gs_tbl.pm8001_tbl.phy_state[4] =
+ pm8001_mr32(address, 0x28);
+ pm8001_ha->gs_tbl.pm8001_tbl.phy_state[5] =
+ pm8001_mr32(address, 0x2C);
+ pm8001_ha->gs_tbl.pm8001_tbl.phy_state[6] =
+ pm8001_mr32(address, 0x30);
+ pm8001_ha->gs_tbl.pm8001_tbl.phy_state[7] =
+ pm8001_mr32(address, 0x34);
+ pm8001_ha->gs_tbl.pm8001_tbl.gpio_input_val =
+ pm8001_mr32(address, 0x38);
+ pm8001_ha->gs_tbl.pm8001_tbl.rsvd1[0] =
+ pm8001_mr32(address, 0x3C);
+ pm8001_ha->gs_tbl.pm8001_tbl.rsvd1[1] =
+ pm8001_mr32(address, 0x40);
+ pm8001_ha->gs_tbl.pm8001_tbl.recover_err_info[0] =
+ pm8001_mr32(address, 0x44);
+ pm8001_ha->gs_tbl.pm8001_tbl.recover_err_info[1] =
+ pm8001_mr32(address, 0x48);
+ pm8001_ha->gs_tbl.pm8001_tbl.recover_err_info[2] =
+ pm8001_mr32(address, 0x4C);
+ pm8001_ha->gs_tbl.pm8001_tbl.recover_err_info[3] =
+ pm8001_mr32(address, 0x50);
+ pm8001_ha->gs_tbl.pm8001_tbl.recover_err_info[4] =
+ pm8001_mr32(address, 0x54);
+ pm8001_ha->gs_tbl.pm8001_tbl.recover_err_info[5] =
+ pm8001_mr32(address, 0x58);
+ pm8001_ha->gs_tbl.pm8001_tbl.recover_err_info[6] =
+ pm8001_mr32(address, 0x5C);
+ pm8001_ha->gs_tbl.pm8001_tbl.recover_err_info[7] =
+ pm8001_mr32(address, 0x60);
+}
+
+/**
+ * read_inbnd_queue_table - read the inbound queue table and save it.
+ * @pm8001_ha: our hba card information
+ */
+static void read_inbnd_queue_table(struct pm8001_hba_info *pm8001_ha)
+{
+ int i;
+ void __iomem *address = pm8001_ha->inbnd_q_tbl_addr;
+ for (i = 0; i < PM8001_MAX_INB_NUM; i++) {
+ u32 offset = i * 0x20;
+ pm8001_ha->inbnd_q_tbl[i].pi_pci_bar =
+ get_pci_bar_index(pm8001_mr32(address, (offset + 0x14)));
+ pm8001_ha->inbnd_q_tbl[i].pi_offset =
+ pm8001_mr32(address, (offset + 0x18));
+ }
+}
+
+/**
+ * read_outbnd_queue_table - read the outbound queue table and save it.
+ * @pm8001_ha: our hba card information
+ */
+static void read_outbnd_queue_table(struct pm8001_hba_info *pm8001_ha)
+{
+ int i;
+ void __iomem *address = pm8001_ha->outbnd_q_tbl_addr;
+ for (i = 0; i < PM8001_MAX_OUTB_NUM; i++) {
+ u32 offset = i * 0x24;
+ pm8001_ha->outbnd_q_tbl[i].ci_pci_bar =
+ get_pci_bar_index(pm8001_mr32(address, (offset + 0x14)));
+ pm8001_ha->outbnd_q_tbl[i].ci_offset =
+ pm8001_mr32(address, (offset + 0x18));
+ }
+}
+
+/**
+ * init_default_table_values - init the default table.
+ * @pm8001_ha: our hba card information
+ */
+static void init_default_table_values(struct pm8001_hba_info *pm8001_ha)
+{
+ int i;
+ u32 offsetib, offsetob;
+ void __iomem *addressib = pm8001_ha->inbnd_q_tbl_addr;
+ void __iomem *addressob = pm8001_ha->outbnd_q_tbl_addr;
+
+ pm8001_ha->main_cfg_tbl.pm8001_tbl.inbound_q_nppd_hppd = 0;
+ pm8001_ha->main_cfg_tbl.pm8001_tbl.outbound_hw_event_pid0_3 = 0;
+ pm8001_ha->main_cfg_tbl.pm8001_tbl.outbound_hw_event_pid4_7 = 0;
+ pm8001_ha->main_cfg_tbl.pm8001_tbl.outbound_ncq_event_pid0_3 = 0;
+ pm8001_ha->main_cfg_tbl.pm8001_tbl.outbound_ncq_event_pid4_7 = 0;
+ pm8001_ha->main_cfg_tbl.pm8001_tbl.outbound_tgt_ITNexus_event_pid0_3 =
+ 0;
+ pm8001_ha->main_cfg_tbl.pm8001_tbl.outbound_tgt_ITNexus_event_pid4_7 =
+ 0;
+ pm8001_ha->main_cfg_tbl.pm8001_tbl.outbound_tgt_ssp_event_pid0_3 = 0;
+ pm8001_ha->main_cfg_tbl.pm8001_tbl.outbound_tgt_ssp_event_pid4_7 = 0;
+ pm8001_ha->main_cfg_tbl.pm8001_tbl.outbound_tgt_smp_event_pid0_3 = 0;
+ pm8001_ha->main_cfg_tbl.pm8001_tbl.outbound_tgt_smp_event_pid4_7 = 0;
+
+ pm8001_ha->main_cfg_tbl.pm8001_tbl.upper_event_log_addr =
+ pm8001_ha->memoryMap.region[AAP1].phys_addr_hi;
+ pm8001_ha->main_cfg_tbl.pm8001_tbl.lower_event_log_addr =
+ pm8001_ha->memoryMap.region[AAP1].phys_addr_lo;
+ pm8001_ha->main_cfg_tbl.pm8001_tbl.event_log_size =
+ PM8001_EVENT_LOG_SIZE;
+ pm8001_ha->main_cfg_tbl.pm8001_tbl.event_log_option = 0x01;
+ pm8001_ha->main_cfg_tbl.pm8001_tbl.upper_iop_event_log_addr =
+ pm8001_ha->memoryMap.region[IOP].phys_addr_hi;
+ pm8001_ha->main_cfg_tbl.pm8001_tbl.lower_iop_event_log_addr =
+ pm8001_ha->memoryMap.region[IOP].phys_addr_lo;
+ pm8001_ha->main_cfg_tbl.pm8001_tbl.iop_event_log_size =
+ PM8001_EVENT_LOG_SIZE;
+ pm8001_ha->main_cfg_tbl.pm8001_tbl.iop_event_log_option = 0x01;
+ pm8001_ha->main_cfg_tbl.pm8001_tbl.fatal_err_interrupt = 0x01;
+ for (i = 0; i < PM8001_MAX_INB_NUM; i++) {
+ pm8001_ha->inbnd_q_tbl[i].element_pri_size_cnt =
+ PM8001_MPI_QUEUE | (pm8001_ha->iomb_size << 16) | (0x00<<30);
+ pm8001_ha->inbnd_q_tbl[i].upper_base_addr =
+ pm8001_ha->memoryMap.region[IB + i].phys_addr_hi;
+ pm8001_ha->inbnd_q_tbl[i].lower_base_addr =
+ pm8001_ha->memoryMap.region[IB + i].phys_addr_lo;
+ pm8001_ha->inbnd_q_tbl[i].base_virt =
+ (u8 *)pm8001_ha->memoryMap.region[IB + i].virt_ptr;
+ pm8001_ha->inbnd_q_tbl[i].total_length =
+ pm8001_ha->memoryMap.region[IB + i].total_len;
+ pm8001_ha->inbnd_q_tbl[i].ci_upper_base_addr =
+ pm8001_ha->memoryMap.region[CI + i].phys_addr_hi;
+ pm8001_ha->inbnd_q_tbl[i].ci_lower_base_addr =
+ pm8001_ha->memoryMap.region[CI + i].phys_addr_lo;
+ pm8001_ha->inbnd_q_tbl[i].ci_virt =
+ pm8001_ha->memoryMap.region[CI + i].virt_ptr;
+ offsetib = i * 0x20;
+ pm8001_ha->inbnd_q_tbl[i].pi_pci_bar =
+ get_pci_bar_index(pm8001_mr32(addressib,
+ (offsetib + 0x14)));
+ pm8001_ha->inbnd_q_tbl[i].pi_offset =
+ pm8001_mr32(addressib, (offsetib + 0x18));
+ pm8001_ha->inbnd_q_tbl[i].producer_idx = 0;
+ pm8001_ha->inbnd_q_tbl[i].consumer_index = 0;
+ }
+ for (i = 0; i < PM8001_MAX_OUTB_NUM; i++) {
+ pm8001_ha->outbnd_q_tbl[i].element_size_cnt =
+ PM8001_MPI_QUEUE | (pm8001_ha->iomb_size << 16) | (0x01<<30);
+ pm8001_ha->outbnd_q_tbl[i].upper_base_addr =
+ pm8001_ha->memoryMap.region[OB + i].phys_addr_hi;
+ pm8001_ha->outbnd_q_tbl[i].lower_base_addr =
+ pm8001_ha->memoryMap.region[OB + i].phys_addr_lo;
+ pm8001_ha->outbnd_q_tbl[i].base_virt =
+ (u8 *)pm8001_ha->memoryMap.region[OB + i].virt_ptr;
+ pm8001_ha->outbnd_q_tbl[i].total_length =
+ pm8001_ha->memoryMap.region[OB + i].total_len;
+ pm8001_ha->outbnd_q_tbl[i].pi_upper_base_addr =
+ pm8001_ha->memoryMap.region[PI + i].phys_addr_hi;
+ pm8001_ha->outbnd_q_tbl[i].pi_lower_base_addr =
+ pm8001_ha->memoryMap.region[PI + i].phys_addr_lo;
+ pm8001_ha->outbnd_q_tbl[i].interrup_vec_cnt_delay =
+ 0 | (10 << 16) | (i << 24);
+ pm8001_ha->outbnd_q_tbl[i].pi_virt =
+ pm8001_ha->memoryMap.region[PI + i].virt_ptr;
+ offsetob = i * 0x24;
+ pm8001_ha->outbnd_q_tbl[i].ci_pci_bar =
+ get_pci_bar_index(pm8001_mr32(addressob,
+ offsetob + 0x14));
+ pm8001_ha->outbnd_q_tbl[i].ci_offset =
+ pm8001_mr32(addressob, (offsetob + 0x18));
+ pm8001_ha->outbnd_q_tbl[i].consumer_idx = 0;
+ pm8001_ha->outbnd_q_tbl[i].producer_index = 0;
+ }
+}
+
+/**
+ * update_main_config_table - update the main default table to the HBA.
+ * @pm8001_ha: our hba card information
+ */
+static void update_main_config_table(struct pm8001_hba_info *pm8001_ha)
+{
+ void __iomem *address = pm8001_ha->main_cfg_tbl_addr;
+ pm8001_mw32(address, 0x24,
+ pm8001_ha->main_cfg_tbl.pm8001_tbl.inbound_q_nppd_hppd);
+ pm8001_mw32(address, 0x28,
+ pm8001_ha->main_cfg_tbl.pm8001_tbl.outbound_hw_event_pid0_3);
+ pm8001_mw32(address, 0x2C,
+ pm8001_ha->main_cfg_tbl.pm8001_tbl.outbound_hw_event_pid4_7);
+ pm8001_mw32(address, 0x30,
+ pm8001_ha->main_cfg_tbl.pm8001_tbl.outbound_ncq_event_pid0_3);
+ pm8001_mw32(address, 0x34,
+ pm8001_ha->main_cfg_tbl.pm8001_tbl.outbound_ncq_event_pid4_7);
+ pm8001_mw32(address, 0x38,
+ pm8001_ha->main_cfg_tbl.pm8001_tbl.
+ outbound_tgt_ITNexus_event_pid0_3);
+ pm8001_mw32(address, 0x3C,
+ pm8001_ha->main_cfg_tbl.pm8001_tbl.
+ outbound_tgt_ITNexus_event_pid4_7);
+ pm8001_mw32(address, 0x40,
+ pm8001_ha->main_cfg_tbl.pm8001_tbl.
+ outbound_tgt_ssp_event_pid0_3);
+ pm8001_mw32(address, 0x44,
+ pm8001_ha->main_cfg_tbl.pm8001_tbl.
+ outbound_tgt_ssp_event_pid4_7);
+ pm8001_mw32(address, 0x48,
+ pm8001_ha->main_cfg_tbl.pm8001_tbl.
+ outbound_tgt_smp_event_pid0_3);
+ pm8001_mw32(address, 0x4C,
+ pm8001_ha->main_cfg_tbl.pm8001_tbl.
+ outbound_tgt_smp_event_pid4_7);
+ pm8001_mw32(address, 0x50,
+ pm8001_ha->main_cfg_tbl.pm8001_tbl.upper_event_log_addr);
+ pm8001_mw32(address, 0x54,
+ pm8001_ha->main_cfg_tbl.pm8001_tbl.lower_event_log_addr);
+ pm8001_mw32(address, 0x58,
+ pm8001_ha->main_cfg_tbl.pm8001_tbl.event_log_size);
+ pm8001_mw32(address, 0x5C,
+ pm8001_ha->main_cfg_tbl.pm8001_tbl.event_log_option);
+ pm8001_mw32(address, 0x60,
+ pm8001_ha->main_cfg_tbl.pm8001_tbl.upper_iop_event_log_addr);
+ pm8001_mw32(address, 0x64,
+ pm8001_ha->main_cfg_tbl.pm8001_tbl.lower_iop_event_log_addr);
+ pm8001_mw32(address, 0x68,
+ pm8001_ha->main_cfg_tbl.pm8001_tbl.iop_event_log_size);
+ pm8001_mw32(address, 0x6C,
+ pm8001_ha->main_cfg_tbl.pm8001_tbl.iop_event_log_option);
+ pm8001_mw32(address, 0x70,
+ pm8001_ha->main_cfg_tbl.pm8001_tbl.fatal_err_interrupt);
+}
+
+/**
+ * update_inbnd_queue_table - update the inbound queue table to the HBA.
+ * @pm8001_ha: our hba card information
+ */
+static void update_inbnd_queue_table(struct pm8001_hba_info *pm8001_ha,
+ int number)
+{
+ void __iomem *address = pm8001_ha->inbnd_q_tbl_addr;
+ u16 offset = number * 0x20;
+ pm8001_mw32(address, offset + 0x00,
+ pm8001_ha->inbnd_q_tbl[number].element_pri_size_cnt);
+ pm8001_mw32(address, offset + 0x04,
+ pm8001_ha->inbnd_q_tbl[number].upper_base_addr);
+ pm8001_mw32(address, offset + 0x08,
+ pm8001_ha->inbnd_q_tbl[number].lower_base_addr);
+ pm8001_mw32(address, offset + 0x0C,
+ pm8001_ha->inbnd_q_tbl[number].ci_upper_base_addr);
+ pm8001_mw32(address, offset + 0x10,
+ pm8001_ha->inbnd_q_tbl[number].ci_lower_base_addr);
+}
+
+/**
+ * update_outbnd_queue_table - update the outbound queue table to the HBA.
+ * @pm8001_ha: our hba card information
+ */
+static void update_outbnd_queue_table(struct pm8001_hba_info *pm8001_ha,
+ int number)
+{
+ void __iomem *address = pm8001_ha->outbnd_q_tbl_addr;
+ u16 offset = number * 0x24;
+ pm8001_mw32(address, offset + 0x00,
+ pm8001_ha->outbnd_q_tbl[number].element_size_cnt);
+ pm8001_mw32(address, offset + 0x04,
+ pm8001_ha->outbnd_q_tbl[number].upper_base_addr);
+ pm8001_mw32(address, offset + 0x08,
+ pm8001_ha->outbnd_q_tbl[number].lower_base_addr);
+ pm8001_mw32(address, offset + 0x0C,
+ pm8001_ha->outbnd_q_tbl[number].pi_upper_base_addr);
+ pm8001_mw32(address, offset + 0x10,
+ pm8001_ha->outbnd_q_tbl[number].pi_lower_base_addr);
+ pm8001_mw32(address, offset + 0x1C,
+ pm8001_ha->outbnd_q_tbl[number].interrup_vec_cnt_delay);
+}
+
+/**
+ * pm8001_bar4_shift - function is called to shift BAR base address
+ * @pm8001_ha : our hba card infomation
+ * @shiftValue : shifting value in memory bar.
+ */
+int pm8001_bar4_shift(struct pm8001_hba_info *pm8001_ha, u32 shiftValue)
+{
+ u32 regVal;
+ unsigned long start;
+
+ /* program the inbound AXI translation Lower Address */
+ pm8001_cw32(pm8001_ha, 1, SPC_IBW_AXI_TRANSLATION_LOW, shiftValue);
+
+ /* confirm the setting is written */
+ start = jiffies + HZ; /* 1 sec */
+ do {
+ regVal = pm8001_cr32(pm8001_ha, 1, SPC_IBW_AXI_TRANSLATION_LOW);
+ } while ((regVal != shiftValue) && time_before(jiffies, start));
+
+ if (regVal != shiftValue) {
+ PM8001_INIT_DBG(pm8001_ha,
+ pm8001_printk("TIMEOUT:SPC_IBW_AXI_TRANSLATION_LOW"
+ " = 0x%x\n", regVal));
+ return -1;
+ }
+ return 0;
+}
+
+/**
+ * mpi_set_phys_g3_with_ssc
+ * @pm8001_ha: our hba card information
+ * @SSCbit: set SSCbit to 0 to disable all phys ssc; 1 to enable all phys ssc.
+ */
+static void mpi_set_phys_g3_with_ssc(struct pm8001_hba_info *pm8001_ha,
+ u32 SSCbit)
+{
+ u32 value, offset, i;
+ unsigned long flags;
+
+#define SAS2_SETTINGS_LOCAL_PHY_0_3_SHIFT_ADDR 0x00030000
+#define SAS2_SETTINGS_LOCAL_PHY_4_7_SHIFT_ADDR 0x00040000
+#define SAS2_SETTINGS_LOCAL_PHY_0_3_OFFSET 0x1074
+#define SAS2_SETTINGS_LOCAL_PHY_4_7_OFFSET 0x1074
+#define PHY_G3_WITHOUT_SSC_BIT_SHIFT 12
+#define PHY_G3_WITH_SSC_BIT_SHIFT 13
+#define SNW3_PHY_CAPABILITIES_PARITY 31
+
+ /*
+ * Using shifted destination address 0x3_0000:0x1074 + 0x4000*N (N=0:3)
+ * Using shifted destination address 0x4_0000:0x1074 + 0x4000*(N-4) (N=4:7)
+ */
+ spin_lock_irqsave(&pm8001_ha->lock, flags);
+ if (-1 == pm8001_bar4_shift(pm8001_ha,
+ SAS2_SETTINGS_LOCAL_PHY_0_3_SHIFT_ADDR)) {
+ spin_unlock_irqrestore(&pm8001_ha->lock, flags);
+ return;
+ }
+
+ for (i = 0; i < 4; i++) {
+ offset = SAS2_SETTINGS_LOCAL_PHY_0_3_OFFSET + 0x4000 * i;
+ pm8001_cw32(pm8001_ha, 2, offset, 0x80001501);
+ }
+ /* shift membase 3 for SAS2_SETTINGS_LOCAL_PHY 4 - 7 */
+ if (-1 == pm8001_bar4_shift(pm8001_ha,
+ SAS2_SETTINGS_LOCAL_PHY_4_7_SHIFT_ADDR)) {
+ spin_unlock_irqrestore(&pm8001_ha->lock, flags);
+ return;
+ }
+ for (i = 4; i < 8; i++) {
+ offset = SAS2_SETTINGS_LOCAL_PHY_4_7_OFFSET + 0x4000 * (i-4);
+ pm8001_cw32(pm8001_ha, 2, offset, 0x80001501);
+ }
+ /*************************************************************
+ Change the SSC upspreading value to 0x0 so that upspreading is disabled.
+ Device MABC SMOD0 Controls
+ Address: (via MEMBASE-III):
+ Using shifted destination address 0x0_0000: with Offset 0xD8
+
+ 31:28 R/W Reserved Do not change
+ 27:24 R/W SAS_SMOD_SPRDUP 0000
+ 23:20 R/W SAS_SMOD_SPRDDN 0000
+ 19:0 R/W Reserved Do not change
+ Upon power-up this register will read as 0x8990c016,
+ and I would like you to change the SAS_SMOD_SPRDUP bits to 0b0000
+ so that the written value will be 0x8090c016.
+ This will ensure only down-spreading SSC is enabled on the SPC.
+ *************************************************************/
+ value = pm8001_cr32(pm8001_ha, 2, 0xd8);
+ pm8001_cw32(pm8001_ha, 2, 0xd8, 0x8000C016);
+
+ /*set the shifted destination address to 0x0 to avoid error operation */
+ pm8001_bar4_shift(pm8001_ha, 0x0);
+ spin_unlock_irqrestore(&pm8001_ha->lock, flags);
+ return;
+}
+
+/**
+ * mpi_set_open_retry_interval_reg
+ * @pm8001_ha: our hba card information
+ * @interval - interval time for each OPEN_REJECT (RETRY). The units are in 1us.
+ */
+static void mpi_set_open_retry_interval_reg(struct pm8001_hba_info *pm8001_ha,
+ u32 interval)
+{
+ u32 offset;
+ u32 value;
+ u32 i;
+ unsigned long flags;
+
+#define OPEN_RETRY_INTERVAL_PHY_0_3_SHIFT_ADDR 0x00030000
+#define OPEN_RETRY_INTERVAL_PHY_4_7_SHIFT_ADDR 0x00040000
+#define OPEN_RETRY_INTERVAL_PHY_0_3_OFFSET 0x30B4
+#define OPEN_RETRY_INTERVAL_PHY_4_7_OFFSET 0x30B4
+#define OPEN_RETRY_INTERVAL_REG_MASK 0x0000FFFF
+
+ value = interval & OPEN_RETRY_INTERVAL_REG_MASK;
+ spin_lock_irqsave(&pm8001_ha->lock, flags);
+ /* shift bar and set the OPEN_REJECT(RETRY) interval time of PHY 0 -3.*/
+ if (-1 == pm8001_bar4_shift(pm8001_ha,
+ OPEN_RETRY_INTERVAL_PHY_0_3_SHIFT_ADDR)) {
+ spin_unlock_irqrestore(&pm8001_ha->lock, flags);
+ return;
+ }
+ for (i = 0; i < 4; i++) {
+ offset = OPEN_RETRY_INTERVAL_PHY_0_3_OFFSET + 0x4000 * i;
+ pm8001_cw32(pm8001_ha, 2, offset, value);
+ }
+
+ if (-1 == pm8001_bar4_shift(pm8001_ha,
+ OPEN_RETRY_INTERVAL_PHY_4_7_SHIFT_ADDR)) {
+ spin_unlock_irqrestore(&pm8001_ha->lock, flags);
+ return;
+ }
+ for (i = 4; i < 8; i++) {
+ offset = OPEN_RETRY_INTERVAL_PHY_4_7_OFFSET + 0x4000 * (i-4);
+ pm8001_cw32(pm8001_ha, 2, offset, value);
+ }
+ /*set the shifted destination address to 0x0 to avoid error operation */
+ pm8001_bar4_shift(pm8001_ha, 0x0);
+ spin_unlock_irqrestore(&pm8001_ha->lock, flags);
+ return;
+}
+
+/**
+ * mpi_init_check - check firmware initialization status.
+ * @pm8001_ha: our hba card information
+ */
+static int mpi_init_check(struct pm8001_hba_info *pm8001_ha)
+{
+ u32 max_wait_count;
+ u32 value;
+ u32 gst_len_mpistate;
+ /* Write bit0=1 to Inbound DoorBell Register to tell the SPC FW the
+ table is updated */
+ pm8001_cw32(pm8001_ha, 0, MSGU_IBDB_SET, SPC_MSGU_CFG_TABLE_UPDATE);
+ /* wait until Inbound DoorBell Clear Register toggled */
+ max_wait_count = 1 * 1000 * 1000;/* 1 sec */
+ do {
+ udelay(1);
+ value = pm8001_cr32(pm8001_ha, 0, MSGU_IBDB_SET);
+ value &= SPC_MSGU_CFG_TABLE_UPDATE;
+ } while ((value != 0) && (--max_wait_count));
+
+ if (!max_wait_count)
+ return -1;
+ /* check the MPI-State for initialization */
+ gst_len_mpistate =
+ pm8001_mr32(pm8001_ha->general_stat_tbl_addr,
+ GST_GSTLEN_MPIS_OFFSET);
+ if (GST_MPI_STATE_INIT != (gst_len_mpistate & GST_MPI_STATE_MASK))
+ return -1;
+ /* check MPI Initialization error */
+ gst_len_mpistate = gst_len_mpistate >> 16;
+ if (0x0000 != gst_len_mpistate)
+ return -1;
+ return 0;
+}
+
+/**
+ * check_fw_ready - The LLDD check if the FW is ready, if not, return error.
+ * @pm8001_ha: our hba card information
+ */
+static int check_fw_ready(struct pm8001_hba_info *pm8001_ha)
+{
+ u32 value, value1;
+ u32 max_wait_count;
+ /* check error state */
+ value = pm8001_cr32(pm8001_ha, 0, MSGU_SCRATCH_PAD_1);
+ value1 = pm8001_cr32(pm8001_ha, 0, MSGU_SCRATCH_PAD_2);
+ /* check AAP error */
+ if (SCRATCH_PAD1_ERR == (value & SCRATCH_PAD_STATE_MASK)) {
+ /* error state */
+ value = pm8001_cr32(pm8001_ha, 0, MSGU_SCRATCH_PAD_0);
+ return -1;
+ }
+
+ /* check IOP error */
+ if (SCRATCH_PAD2_ERR == (value1 & SCRATCH_PAD_STATE_MASK)) {
+ /* error state */
+ value1 = pm8001_cr32(pm8001_ha, 0, MSGU_SCRATCH_PAD_3);
+ return -1;
+ }
+
+ /* bit 4-31 of scratch pad1 should be zeros if it is not
+ in error state*/
+ if (value & SCRATCH_PAD1_STATE_MASK) {
+ /* error case */
+ pm8001_cr32(pm8001_ha, 0, MSGU_SCRATCH_PAD_0);
+ return -1;
+ }
+
+ /* bit 2, 4-31 of scratch pad2 should be zeros if it is not
+ in error state */
+ if (value1 & SCRATCH_PAD2_STATE_MASK) {
+ /* error case */
+ return -1;
+ }
+
+ max_wait_count = 1 * 1000 * 1000;/* 1 sec timeout */
+
+ /* wait until scratch pad 1 and 2 registers in ready state */
+ do {
+ udelay(1);
+ value = pm8001_cr32(pm8001_ha, 0, MSGU_SCRATCH_PAD_1)
+ & SCRATCH_PAD1_RDY;
+ value1 = pm8001_cr32(pm8001_ha, 0, MSGU_SCRATCH_PAD_2)
+ & SCRATCH_PAD2_RDY;
+ if ((--max_wait_count) == 0)
+ return -1;
+ } while ((value != SCRATCH_PAD1_RDY) || (value1 != SCRATCH_PAD2_RDY));
+ return 0;
+}
+
+static void init_pci_device_addresses(struct pm8001_hba_info *pm8001_ha)
+{
+ void __iomem *base_addr;
+ u32 value;
+ u32 offset;
+ u32 pcibar;
+ u32 pcilogic;
+
+ value = pm8001_cr32(pm8001_ha, 0, 0x44);
+ offset = value & 0x03FFFFFF;
+ PM8001_INIT_DBG(pm8001_ha,
+ pm8001_printk("Scratchpad 0 Offset: %x\n", offset));
+ pcilogic = (value & 0xFC000000) >> 26;
+ pcibar = get_pci_bar_index(pcilogic);
+ PM8001_INIT_DBG(pm8001_ha,
+ pm8001_printk("Scratchpad 0 PCI BAR: %d\n", pcibar));
+ pm8001_ha->main_cfg_tbl_addr = base_addr =
+ pm8001_ha->io_mem[pcibar].memvirtaddr + offset;
+ pm8001_ha->general_stat_tbl_addr =
+ base_addr + pm8001_cr32(pm8001_ha, pcibar, offset + 0x18);
+ pm8001_ha->inbnd_q_tbl_addr =
+ base_addr + pm8001_cr32(pm8001_ha, pcibar, offset + 0x1C);
+ pm8001_ha->outbnd_q_tbl_addr =
+ base_addr + pm8001_cr32(pm8001_ha, pcibar, offset + 0x20);
+}
+
+/**
+ * pm8001_chip_init - the main init function that initialize whole PM8001 chip.
+ * @pm8001_ha: our hba card information
+ */
+static int pm8001_chip_init(struct pm8001_hba_info *pm8001_ha)
+{
+ u8 i = 0;
+ u16 deviceid;
+ pci_read_config_word(pm8001_ha->pdev, PCI_DEVICE_ID, &deviceid);
+ /* 8081 controllers need BAR shift to access MPI space
+ * as this is shared with BIOS data */
+ if (deviceid == 0x8081 || deviceid == 0x0042) {
+ if (-1 == pm8001_bar4_shift(pm8001_ha, GSM_SM_BASE)) {
+ PM8001_FAIL_DBG(pm8001_ha,
+ pm8001_printk("Shift Bar4 to 0x%x failed\n",
+ GSM_SM_BASE));
+ return -1;
+ }
+ }
+ /* check the firmware status */
+ if (-1 == check_fw_ready(pm8001_ha)) {
+ PM8001_FAIL_DBG(pm8001_ha,
+ pm8001_printk("Firmware is not ready!\n"));
+ return -EBUSY;
+ }
+
+ /* Initialize pci space address eg: mpi offset */
+ init_pci_device_addresses(pm8001_ha);
+ init_default_table_values(pm8001_ha);
+ read_main_config_table(pm8001_ha);
+ read_general_status_table(pm8001_ha);
+ read_inbnd_queue_table(pm8001_ha);
+ read_outbnd_queue_table(pm8001_ha);
+ /* update main config table ,inbound table and outbound table */
+ update_main_config_table(pm8001_ha);
+ for (i = 0; i < PM8001_MAX_INB_NUM; i++)
+ update_inbnd_queue_table(pm8001_ha, i);
+ for (i = 0; i < PM8001_MAX_OUTB_NUM; i++)
+ update_outbnd_queue_table(pm8001_ha, i);
+ /* 8081 controller donot require these operations */
+ if (deviceid != 0x8081 && deviceid != 0x0042) {
+ mpi_set_phys_g3_with_ssc(pm8001_ha, 0);
+ /* 7->130ms, 34->500ms, 119->1.5s */
+ mpi_set_open_retry_interval_reg(pm8001_ha, 119);
+ }
+ /* notify firmware update finished and check initialization status */
+ if (0 == mpi_init_check(pm8001_ha)) {
+ PM8001_INIT_DBG(pm8001_ha,
+ pm8001_printk("MPI initialize successful!\n"));
+ } else
+ return -EBUSY;
+ /*This register is a 16-bit timer with a resolution of 1us. This is the
+ timer used for interrupt delay/coalescing in the PCIe Application Layer.
+ Zero is not a valid value. A value of 1 in the register will cause the
+ interrupts to be normal. A value greater than 1 will cause coalescing
+ delays.*/
+ pm8001_cw32(pm8001_ha, 1, 0x0033c0, 0x1);
+ pm8001_cw32(pm8001_ha, 1, 0x0033c4, 0x0);
+ return 0;
+}
+
+static int mpi_uninit_check(struct pm8001_hba_info *pm8001_ha)
+{
+ u32 max_wait_count;
+ u32 value;
+ u32 gst_len_mpistate;
+ u16 deviceid;
+ pci_read_config_word(pm8001_ha->pdev, PCI_DEVICE_ID, &deviceid);
+ if (deviceid == 0x8081 || deviceid == 0x0042) {
+ if (-1 == pm8001_bar4_shift(pm8001_ha, GSM_SM_BASE)) {
+ PM8001_FAIL_DBG(pm8001_ha,
+ pm8001_printk("Shift Bar4 to 0x%x failed\n",
+ GSM_SM_BASE));
+ return -1;
+ }
+ }
+ init_pci_device_addresses(pm8001_ha);
+ /* Write bit1=1 to Inbound DoorBell Register to tell the SPC FW the
+ table is stop */
+ pm8001_cw32(pm8001_ha, 0, MSGU_IBDB_SET, SPC_MSGU_CFG_TABLE_RESET);
+
+ /* wait until Inbound DoorBell Clear Register toggled */
+ max_wait_count = 1 * 1000 * 1000;/* 1 sec */
+ do {
+ udelay(1);
+ value = pm8001_cr32(pm8001_ha, 0, MSGU_IBDB_SET);
+ value &= SPC_MSGU_CFG_TABLE_RESET;
+ } while ((value != 0) && (--max_wait_count));
+
+ if (!max_wait_count) {
+ PM8001_FAIL_DBG(pm8001_ha,
+ pm8001_printk("TIMEOUT:IBDB value/=0x%x\n", value));
+ return -1;
+ }
+
+ /* check the MPI-State for termination in progress */
+ /* wait until Inbound DoorBell Clear Register toggled */
+ max_wait_count = 1 * 1000 * 1000; /* 1 sec */
+ do {
+ udelay(1);
+ gst_len_mpistate =
+ pm8001_mr32(pm8001_ha->general_stat_tbl_addr,
+ GST_GSTLEN_MPIS_OFFSET);
+ if (GST_MPI_STATE_UNINIT ==
+ (gst_len_mpistate & GST_MPI_STATE_MASK))
+ break;
+ } while (--max_wait_count);
+ if (!max_wait_count) {
+ PM8001_FAIL_DBG(pm8001_ha,
+ pm8001_printk(" TIME OUT MPI State = 0x%x\n",
+ gst_len_mpistate & GST_MPI_STATE_MASK));
+ return -1;
+ }
+ return 0;
+}
+
+/**
+ * soft_reset_ready_check - Function to check FW is ready for soft reset.
+ * @pm8001_ha: our hba card information
+ */
+static u32 soft_reset_ready_check(struct pm8001_hba_info *pm8001_ha)
+{
+ u32 regVal, regVal1, regVal2;
+ if (mpi_uninit_check(pm8001_ha) != 0) {
+ PM8001_FAIL_DBG(pm8001_ha,
+ pm8001_printk("MPI state is not ready\n"));
+ return -1;
+ }
+ /* read the scratch pad 2 register bit 2 */
+ regVal = pm8001_cr32(pm8001_ha, 0, MSGU_SCRATCH_PAD_2)
+ & SCRATCH_PAD2_FWRDY_RST;
+ if (regVal == SCRATCH_PAD2_FWRDY_RST) {
+ PM8001_INIT_DBG(pm8001_ha,
+ pm8001_printk("Firmware is ready for reset .\n"));
+ } else {
+ unsigned long flags;
+ /* Trigger NMI twice via RB6 */
+ spin_lock_irqsave(&pm8001_ha->lock, flags);
+ if (-1 == pm8001_bar4_shift(pm8001_ha, RB6_ACCESS_REG)) {
+ spin_unlock_irqrestore(&pm8001_ha->lock, flags);
+ PM8001_FAIL_DBG(pm8001_ha,
+ pm8001_printk("Shift Bar4 to 0x%x failed\n",
+ RB6_ACCESS_REG));
+ return -1;
+ }
+ pm8001_cw32(pm8001_ha, 2, SPC_RB6_OFFSET,
+ RB6_MAGIC_NUMBER_RST);
+ pm8001_cw32(pm8001_ha, 2, SPC_RB6_OFFSET, RB6_MAGIC_NUMBER_RST);
+ /* wait for 100 ms */
+ mdelay(100);
+ regVal = pm8001_cr32(pm8001_ha, 0, MSGU_SCRATCH_PAD_2) &
+ SCRATCH_PAD2_FWRDY_RST;
+ if (regVal != SCRATCH_PAD2_FWRDY_RST) {
+ regVal1 = pm8001_cr32(pm8001_ha, 0, MSGU_SCRATCH_PAD_1);
+ regVal2 = pm8001_cr32(pm8001_ha, 0, MSGU_SCRATCH_PAD_2);
+ PM8001_FAIL_DBG(pm8001_ha,
+ pm8001_printk("TIMEOUT:MSGU_SCRATCH_PAD1"
+ "=0x%x, MSGU_SCRATCH_PAD2=0x%x\n",
+ regVal1, regVal2));
+ PM8001_FAIL_DBG(pm8001_ha,
+ pm8001_printk("SCRATCH_PAD0 value = 0x%x\n",
+ pm8001_cr32(pm8001_ha, 0, MSGU_SCRATCH_PAD_0)));
+ PM8001_FAIL_DBG(pm8001_ha,
+ pm8001_printk("SCRATCH_PAD3 value = 0x%x\n",
+ pm8001_cr32(pm8001_ha, 0, MSGU_SCRATCH_PAD_3)));
+ spin_unlock_irqrestore(&pm8001_ha->lock, flags);
+ return -1;
+ }
+ spin_unlock_irqrestore(&pm8001_ha->lock, flags);
+ }
+ return 0;
+}
+
+/**
+ * pm8001_chip_soft_rst - soft reset the PM8001 chip, so that the clear all
+ * the FW register status to the originated status.
+ * @pm8001_ha: our hba card information
+ */
+static int
+pm8001_chip_soft_rst(struct pm8001_hba_info *pm8001_ha)
+{
+ u32 regVal, toggleVal;
+ u32 max_wait_count;
+ u32 regVal1, regVal2, regVal3;
+ u32 signature = 0x252acbcd; /* for host scratch pad0 */
+ unsigned long flags;
+
+ /* step1: Check FW is ready for soft reset */
+ if (soft_reset_ready_check(pm8001_ha) != 0) {
+ PM8001_FAIL_DBG(pm8001_ha, pm8001_printk("FW is not ready\n"));
+ return -1;
+ }
+
+ /* step 2: clear NMI status register on AAP1 and IOP, write the same
+ value to clear */
+ /* map 0x60000 to BAR4(0x20), BAR2(win) */
+ spin_lock_irqsave(&pm8001_ha->lock, flags);
+ if (-1 == pm8001_bar4_shift(pm8001_ha, MBIC_AAP1_ADDR_BASE)) {
+ spin_unlock_irqrestore(&pm8001_ha->lock, flags);
+ PM8001_FAIL_DBG(pm8001_ha,
+ pm8001_printk("Shift Bar4 to 0x%x failed\n",
+ MBIC_AAP1_ADDR_BASE));
+ return -1;
+ }
+ regVal = pm8001_cr32(pm8001_ha, 2, MBIC_NMI_ENABLE_VPE0_IOP);
+ PM8001_INIT_DBG(pm8001_ha,
+ pm8001_printk("MBIC - NMI Enable VPE0 (IOP)= 0x%x\n", regVal));
+ pm8001_cw32(pm8001_ha, 2, MBIC_NMI_ENABLE_VPE0_IOP, 0x0);
+ /* map 0x70000 to BAR4(0x20), BAR2(win) */
+ if (-1 == pm8001_bar4_shift(pm8001_ha, MBIC_IOP_ADDR_BASE)) {
+ spin_unlock_irqrestore(&pm8001_ha->lock, flags);
+ PM8001_FAIL_DBG(pm8001_ha,
+ pm8001_printk("Shift Bar4 to 0x%x failed\n",
+ MBIC_IOP_ADDR_BASE));
+ return -1;
+ }
+ regVal = pm8001_cr32(pm8001_ha, 2, MBIC_NMI_ENABLE_VPE0_AAP1);
+ PM8001_INIT_DBG(pm8001_ha,
+ pm8001_printk("MBIC - NMI Enable VPE0 (AAP1)= 0x%x\n", regVal));
+ pm8001_cw32(pm8001_ha, 2, MBIC_NMI_ENABLE_VPE0_AAP1, 0x0);
+
+ regVal = pm8001_cr32(pm8001_ha, 1, PCIE_EVENT_INTERRUPT_ENABLE);
+ PM8001_INIT_DBG(pm8001_ha,
+ pm8001_printk("PCIE -Event Interrupt Enable = 0x%x\n", regVal));
+ pm8001_cw32(pm8001_ha, 1, PCIE_EVENT_INTERRUPT_ENABLE, 0x0);
+
+ regVal = pm8001_cr32(pm8001_ha, 1, PCIE_EVENT_INTERRUPT);
+ PM8001_INIT_DBG(pm8001_ha,
+ pm8001_printk("PCIE - Event Interrupt = 0x%x\n", regVal));
+ pm8001_cw32(pm8001_ha, 1, PCIE_EVENT_INTERRUPT, regVal);
+
+ regVal = pm8001_cr32(pm8001_ha, 1, PCIE_ERROR_INTERRUPT_ENABLE);
+ PM8001_INIT_DBG(pm8001_ha,
+ pm8001_printk("PCIE -Error Interrupt Enable = 0x%x\n", regVal));
+ pm8001_cw32(pm8001_ha, 1, PCIE_ERROR_INTERRUPT_ENABLE, 0x0);
+
+ regVal = pm8001_cr32(pm8001_ha, 1, PCIE_ERROR_INTERRUPT);
+ PM8001_INIT_DBG(pm8001_ha,
+ pm8001_printk("PCIE - Error Interrupt = 0x%x\n", regVal));
+ pm8001_cw32(pm8001_ha, 1, PCIE_ERROR_INTERRUPT, regVal);
+
+ /* read the scratch pad 1 register bit 2 */
+ regVal = pm8001_cr32(pm8001_ha, 0, MSGU_SCRATCH_PAD_1)
+ & SCRATCH_PAD1_RST;
+ toggleVal = regVal ^ SCRATCH_PAD1_RST;
+
+ /* set signature in host scratch pad0 register to tell SPC that the
+ host performs the soft reset */
+ pm8001_cw32(pm8001_ha, 0, MSGU_HOST_SCRATCH_PAD_0, signature);
+
+ /* read required registers for confirmming */
+ /* map 0x0700000 to BAR4(0x20), BAR2(win) */
+ if (-1 == pm8001_bar4_shift(pm8001_ha, GSM_ADDR_BASE)) {
+ spin_unlock_irqrestore(&pm8001_ha->lock, flags);
+ PM8001_FAIL_DBG(pm8001_ha,
+ pm8001_printk("Shift Bar4 to 0x%x failed\n",
+ GSM_ADDR_BASE));
+ return -1;
+ }
+ PM8001_INIT_DBG(pm8001_ha,
+ pm8001_printk("GSM 0x0(0x00007b88)-GSM Configuration and"
+ " Reset = 0x%x\n",
+ pm8001_cr32(pm8001_ha, 2, GSM_CONFIG_RESET)));
+
+ /* step 3: host read GSM Configuration and Reset register */
+ regVal = pm8001_cr32(pm8001_ha, 2, GSM_CONFIG_RESET);
+ /* Put those bits to low */
+ /* GSM XCBI offset = 0x70 0000
+ 0x00 Bit 13 COM_SLV_SW_RSTB 1
+ 0x00 Bit 12 QSSP_SW_RSTB 1
+ 0x00 Bit 11 RAAE_SW_RSTB 1
+ 0x00 Bit 9 RB_1_SW_RSTB 1
+ 0x00 Bit 8 SM_SW_RSTB 1
+ */
+ regVal &= ~(0x00003b00);
+ /* host write GSM Configuration and Reset register */
+ pm8001_cw32(pm8001_ha, 2, GSM_CONFIG_RESET, regVal);
+ PM8001_INIT_DBG(pm8001_ha,
+ pm8001_printk("GSM 0x0 (0x00007b88 ==> 0x00004088) - GSM "
+ "Configuration and Reset is set to = 0x%x\n",
+ pm8001_cr32(pm8001_ha, 2, GSM_CONFIG_RESET)));
+
+ /* step 4: */
+ /* disable GSM - Read Address Parity Check */
+ regVal1 = pm8001_cr32(pm8001_ha, 2, GSM_READ_ADDR_PARITY_CHECK);
+ PM8001_INIT_DBG(pm8001_ha,
+ pm8001_printk("GSM 0x700038 - Read Address Parity Check "
+ "Enable = 0x%x\n", regVal1));
+ pm8001_cw32(pm8001_ha, 2, GSM_READ_ADDR_PARITY_CHECK, 0x0);
+ PM8001_INIT_DBG(pm8001_ha,
+ pm8001_printk("GSM 0x700038 - Read Address Parity Check Enable"
+ "is set to = 0x%x\n",
+ pm8001_cr32(pm8001_ha, 2, GSM_READ_ADDR_PARITY_CHECK)));
+
+ /* disable GSM - Write Address Parity Check */
+ regVal2 = pm8001_cr32(pm8001_ha, 2, GSM_WRITE_ADDR_PARITY_CHECK);
+ PM8001_INIT_DBG(pm8001_ha,
+ pm8001_printk("GSM 0x700040 - Write Address Parity Check"
+ " Enable = 0x%x\n", regVal2));
+ pm8001_cw32(pm8001_ha, 2, GSM_WRITE_ADDR_PARITY_CHECK, 0x0);
+ PM8001_INIT_DBG(pm8001_ha,
+ pm8001_printk("GSM 0x700040 - Write Address Parity Check "
+ "Enable is set to = 0x%x\n",
+ pm8001_cr32(pm8001_ha, 2, GSM_WRITE_ADDR_PARITY_CHECK)));
+
+ /* disable GSM - Write Data Parity Check */
+ regVal3 = pm8001_cr32(pm8001_ha, 2, GSM_WRITE_DATA_PARITY_CHECK);
+ PM8001_INIT_DBG(pm8001_ha,
+ pm8001_printk("GSM 0x300048 - Write Data Parity Check"
+ " Enable = 0x%x\n", regVal3));
+ pm8001_cw32(pm8001_ha, 2, GSM_WRITE_DATA_PARITY_CHECK, 0x0);
+ PM8001_INIT_DBG(pm8001_ha,
+ pm8001_printk("GSM 0x300048 - Write Data Parity Check Enable"
+ "is set to = 0x%x\n",
+ pm8001_cr32(pm8001_ha, 2, GSM_WRITE_DATA_PARITY_CHECK)));
+
+ /* step 5: delay 10 usec */
+ udelay(10);
+ /* step 5-b: set GPIO-0 output control to tristate anyway */
+ if (-1 == pm8001_bar4_shift(pm8001_ha, GPIO_ADDR_BASE)) {
+ spin_unlock_irqrestore(&pm8001_ha->lock, flags);
+ PM8001_INIT_DBG(pm8001_ha,
+ pm8001_printk("Shift Bar4 to 0x%x failed\n",
+ GPIO_ADDR_BASE));
+ return -1;
+ }
+ regVal = pm8001_cr32(pm8001_ha, 2, GPIO_GPIO_0_0UTPUT_CTL_OFFSET);
+ PM8001_INIT_DBG(pm8001_ha,
+ pm8001_printk("GPIO Output Control Register:"
+ " = 0x%x\n", regVal));
+ /* set GPIO-0 output control to tri-state */
+ regVal &= 0xFFFFFFFC;
+ pm8001_cw32(pm8001_ha, 2, GPIO_GPIO_0_0UTPUT_CTL_OFFSET, regVal);
+
+ /* Step 6: Reset the IOP and AAP1 */
+ /* map 0x00000 to BAR4(0x20), BAR2(win) */
+ if (-1 == pm8001_bar4_shift(pm8001_ha, SPC_TOP_LEVEL_ADDR_BASE)) {
+ spin_unlock_irqrestore(&pm8001_ha->lock, flags);
+ PM8001_FAIL_DBG(pm8001_ha,
+ pm8001_printk("SPC Shift Bar4 to 0x%x failed\n",
+ SPC_TOP_LEVEL_ADDR_BASE));
+ return -1;
+ }
+ regVal = pm8001_cr32(pm8001_ha, 2, SPC_REG_RESET);
+ PM8001_INIT_DBG(pm8001_ha,
+ pm8001_printk("Top Register before resetting IOP/AAP1"
+ ":= 0x%x\n", regVal));
+ regVal &= ~(SPC_REG_RESET_PCS_IOP_SS | SPC_REG_RESET_PCS_AAP1_SS);
+ pm8001_cw32(pm8001_ha, 2, SPC_REG_RESET, regVal);
+
+ /* step 7: Reset the BDMA/OSSP */
+ regVal = pm8001_cr32(pm8001_ha, 2, SPC_REG_RESET);
+ PM8001_INIT_DBG(pm8001_ha,
+ pm8001_printk("Top Register before resetting BDMA/OSSP"
+ ": = 0x%x\n", regVal));
+ regVal &= ~(SPC_REG_RESET_BDMA_CORE | SPC_REG_RESET_OSSP);
+ pm8001_cw32(pm8001_ha, 2, SPC_REG_RESET, regVal);
+
+ /* step 8: delay 10 usec */
+ udelay(10);
+
+ /* step 9: bring the BDMA and OSSP out of reset */
+ regVal = pm8001_cr32(pm8001_ha, 2, SPC_REG_RESET);
+ PM8001_INIT_DBG(pm8001_ha,
+ pm8001_printk("Top Register before bringing up BDMA/OSSP"
+ ":= 0x%x\n", regVal));
+ regVal |= (SPC_REG_RESET_BDMA_CORE | SPC_REG_RESET_OSSP);
+ pm8001_cw32(pm8001_ha, 2, SPC_REG_RESET, regVal);
+
+ /* step 10: delay 10 usec */
+ udelay(10);
+
+ /* step 11: reads and sets the GSM Configuration and Reset Register */
+ /* map 0x0700000 to BAR4(0x20), BAR2(win) */
+ if (-1 == pm8001_bar4_shift(pm8001_ha, GSM_ADDR_BASE)) {
+ spin_unlock_irqrestore(&pm8001_ha->lock, flags);
+ PM8001_FAIL_DBG(pm8001_ha,
+ pm8001_printk("SPC Shift Bar4 to 0x%x failed\n",
+ GSM_ADDR_BASE));
+ return -1;
+ }
+ PM8001_INIT_DBG(pm8001_ha,
+ pm8001_printk("GSM 0x0 (0x00007b88)-GSM Configuration and "
+ "Reset = 0x%x\n", pm8001_cr32(pm8001_ha, 2, GSM_CONFIG_RESET)));
+ regVal = pm8001_cr32(pm8001_ha, 2, GSM_CONFIG_RESET);
+ /* Put those bits to high */
+ /* GSM XCBI offset = 0x70 0000
+ 0x00 Bit 13 COM_SLV_SW_RSTB 1
+ 0x00 Bit 12 QSSP_SW_RSTB 1
+ 0x00 Bit 11 RAAE_SW_RSTB 1
+ 0x00 Bit 9 RB_1_SW_RSTB 1
+ 0x00 Bit 8 SM_SW_RSTB 1
+ */
+ regVal |= (GSM_CONFIG_RESET_VALUE);
+ pm8001_cw32(pm8001_ha, 2, GSM_CONFIG_RESET, regVal);
+ PM8001_INIT_DBG(pm8001_ha,
+ pm8001_printk("GSM (0x00004088 ==> 0x00007b88) - GSM"
+ " Configuration and Reset is set to = 0x%x\n",
+ pm8001_cr32(pm8001_ha, 2, GSM_CONFIG_RESET)));
+
+ /* step 12: Restore GSM - Read Address Parity Check */
+ regVal = pm8001_cr32(pm8001_ha, 2, GSM_READ_ADDR_PARITY_CHECK);
+ /* just for debugging */
+ PM8001_INIT_DBG(pm8001_ha,
+ pm8001_printk("GSM 0x700038 - Read Address Parity Check Enable"
+ " = 0x%x\n", regVal));
+ pm8001_cw32(pm8001_ha, 2, GSM_READ_ADDR_PARITY_CHECK, regVal1);
+ PM8001_INIT_DBG(pm8001_ha,
+ pm8001_printk("GSM 0x700038 - Read Address Parity"
+ " Check Enable is set to = 0x%x\n",
+ pm8001_cr32(pm8001_ha, 2, GSM_READ_ADDR_PARITY_CHECK)));
+ /* Restore GSM - Write Address Parity Check */
+ regVal = pm8001_cr32(pm8001_ha, 2, GSM_WRITE_ADDR_PARITY_CHECK);
+ pm8001_cw32(pm8001_ha, 2, GSM_WRITE_ADDR_PARITY_CHECK, regVal2);
+ PM8001_INIT_DBG(pm8001_ha,
+ pm8001_printk("GSM 0x700040 - Write Address Parity Check"
+ " Enable is set to = 0x%x\n",
+ pm8001_cr32(pm8001_ha, 2, GSM_WRITE_ADDR_PARITY_CHECK)));
+ /* Restore GSM - Write Data Parity Check */
+ regVal = pm8001_cr32(pm8001_ha, 2, GSM_WRITE_DATA_PARITY_CHECK);
+ pm8001_cw32(pm8001_ha, 2, GSM_WRITE_DATA_PARITY_CHECK, regVal3);
+ PM8001_INIT_DBG(pm8001_ha,
+ pm8001_printk("GSM 0x700048 - Write Data Parity Check Enable"
+ "is set to = 0x%x\n",
+ pm8001_cr32(pm8001_ha, 2, GSM_WRITE_DATA_PARITY_CHECK)));
+
+ /* step 13: bring the IOP and AAP1 out of reset */
+ /* map 0x00000 to BAR4(0x20), BAR2(win) */
+ if (-1 == pm8001_bar4_shift(pm8001_ha, SPC_TOP_LEVEL_ADDR_BASE)) {
+ spin_unlock_irqrestore(&pm8001_ha->lock, flags);
+ PM8001_FAIL_DBG(pm8001_ha,
+ pm8001_printk("Shift Bar4 to 0x%x failed\n",
+ SPC_TOP_LEVEL_ADDR_BASE));
+ return -1;
+ }
+ regVal = pm8001_cr32(pm8001_ha, 2, SPC_REG_RESET);
+ regVal |= (SPC_REG_RESET_PCS_IOP_SS | SPC_REG_RESET_PCS_AAP1_SS);
+ pm8001_cw32(pm8001_ha, 2, SPC_REG_RESET, regVal);
+
+ /* step 14: delay 10 usec - Normal Mode */
+ udelay(10);
+ /* check Soft Reset Normal mode or Soft Reset HDA mode */
+ if (signature == SPC_SOFT_RESET_SIGNATURE) {
+ /* step 15 (Normal Mode): wait until scratch pad1 register
+ bit 2 toggled */
+ max_wait_count = 2 * 1000 * 1000;/* 2 sec */
+ do {
+ udelay(1);
+ regVal = pm8001_cr32(pm8001_ha, 0, MSGU_SCRATCH_PAD_1) &
+ SCRATCH_PAD1_RST;
+ } while ((regVal != toggleVal) && (--max_wait_count));
+
+ if (!max_wait_count) {
+ regVal = pm8001_cr32(pm8001_ha, 0,
+ MSGU_SCRATCH_PAD_1);
+ PM8001_FAIL_DBG(pm8001_ha,
+ pm8001_printk("TIMEOUT : ToggleVal 0x%x,"
+ "MSGU_SCRATCH_PAD1 = 0x%x\n",
+ toggleVal, regVal));
+ PM8001_FAIL_DBG(pm8001_ha,
+ pm8001_printk("SCRATCH_PAD0 value = 0x%x\n",
+ pm8001_cr32(pm8001_ha, 0,
+ MSGU_SCRATCH_PAD_0)));
+ PM8001_FAIL_DBG(pm8001_ha,
+ pm8001_printk("SCRATCH_PAD2 value = 0x%x\n",
+ pm8001_cr32(pm8001_ha, 0,
+ MSGU_SCRATCH_PAD_2)));
+ PM8001_FAIL_DBG(pm8001_ha,
+ pm8001_printk("SCRATCH_PAD3 value = 0x%x\n",
+ pm8001_cr32(pm8001_ha, 0,
+ MSGU_SCRATCH_PAD_3)));
+ spin_unlock_irqrestore(&pm8001_ha->lock, flags);
+ return -1;
+ }
+
+ /* step 16 (Normal) - Clear ODMR and ODCR */
+ pm8001_cw32(pm8001_ha, 0, MSGU_ODCR, ODCR_CLEAR_ALL);
+ pm8001_cw32(pm8001_ha, 0, MSGU_ODMR, ODMR_CLEAR_ALL);
+
+ /* step 17 (Normal Mode): wait for the FW and IOP to get
+ ready - 1 sec timeout */
+ /* Wait for the SPC Configuration Table to be ready */
+ if (check_fw_ready(pm8001_ha) == -1) {
+ regVal = pm8001_cr32(pm8001_ha, 0, MSGU_SCRATCH_PAD_1);
+ /* return error if MPI Configuration Table not ready */
+ PM8001_INIT_DBG(pm8001_ha,
+ pm8001_printk("FW not ready SCRATCH_PAD1"
+ " = 0x%x\n", regVal));
+ regVal = pm8001_cr32(pm8001_ha, 0, MSGU_SCRATCH_PAD_2);
+ /* return error if MPI Configuration Table not ready */
+ PM8001_INIT_DBG(pm8001_ha,
+ pm8001_printk("FW not ready SCRATCH_PAD2"
+ " = 0x%x\n", regVal));
+ PM8001_INIT_DBG(pm8001_ha,
+ pm8001_printk("SCRATCH_PAD0 value = 0x%x\n",
+ pm8001_cr32(pm8001_ha, 0,
+ MSGU_SCRATCH_PAD_0)));
+ PM8001_INIT_DBG(pm8001_ha,
+ pm8001_printk("SCRATCH_PAD3 value = 0x%x\n",
+ pm8001_cr32(pm8001_ha, 0,
+ MSGU_SCRATCH_PAD_3)));
+ spin_unlock_irqrestore(&pm8001_ha->lock, flags);
+ return -1;
+ }
+ }
+ pm8001_bar4_shift(pm8001_ha, 0);
+ spin_unlock_irqrestore(&pm8001_ha->lock, flags);
+
+ PM8001_INIT_DBG(pm8001_ha,
+ pm8001_printk("SPC soft reset Complete\n"));
+ return 0;
+}
+
+static void pm8001_hw_chip_rst(struct pm8001_hba_info *pm8001_ha)
+{
+ u32 i;
+ u32 regVal;
+ PM8001_INIT_DBG(pm8001_ha,
+ pm8001_printk("chip reset start\n"));
+
+ /* do SPC chip reset. */
+ regVal = pm8001_cr32(pm8001_ha, 1, SPC_REG_RESET);
+ regVal &= ~(SPC_REG_RESET_DEVICE);
+ pm8001_cw32(pm8001_ha, 1, SPC_REG_RESET, regVal);
+
+ /* delay 10 usec */
+ udelay(10);
+
+ /* bring chip reset out of reset */
+ regVal = pm8001_cr32(pm8001_ha, 1, SPC_REG_RESET);
+ regVal |= SPC_REG_RESET_DEVICE;
+ pm8001_cw32(pm8001_ha, 1, SPC_REG_RESET, regVal);
+
+ /* delay 10 usec */
+ udelay(10);
+
+ /* wait for 20 msec until the firmware gets reloaded */
+ i = 20;
+ do {
+ mdelay(1);
+ } while ((--i) != 0);
+
+ PM8001_INIT_DBG(pm8001_ha,
+ pm8001_printk("chip reset finished\n"));
+}
+
+/**
+ * pm8001_chip_iounmap - which maped when initialized.
+ * @pm8001_ha: our hba card information
+ */
+void pm8001_chip_iounmap(struct pm8001_hba_info *pm8001_ha)
+{
+ s8 bar, logical = 0;
+ for (bar = 0; bar < 6; bar++) {
+ /*
+ ** logical BARs for SPC:
+ ** bar 0 and 1 - logical BAR0
+ ** bar 2 and 3 - logical BAR1
+ ** bar4 - logical BAR2
+ ** bar5 - logical BAR3
+ ** Skip the appropriate assignments:
+ */
+ if ((bar == 1) || (bar == 3))
+ continue;
+ if (pm8001_ha->io_mem[logical].memvirtaddr) {
+ iounmap(pm8001_ha->io_mem[logical].memvirtaddr);
+ logical++;
+ }
+ }
+}
+
+/**
+ * pm8001_chip_interrupt_enable - enable PM8001 chip interrupt
+ * @pm8001_ha: our hba card information
+ */
+static void
+pm8001_chip_intx_interrupt_enable(struct pm8001_hba_info *pm8001_ha)
+{
+ pm8001_cw32(pm8001_ha, 0, MSGU_ODMR, ODMR_CLEAR_ALL);
+ pm8001_cw32(pm8001_ha, 0, MSGU_ODCR, ODCR_CLEAR_ALL);
+}
+
+ /**
+ * pm8001_chip_intx_interrupt_disable- disable PM8001 chip interrupt
+ * @pm8001_ha: our hba card information
+ */
+static void
+pm8001_chip_intx_interrupt_disable(struct pm8001_hba_info *pm8001_ha)
+{
+ pm8001_cw32(pm8001_ha, 0, MSGU_ODMR, ODMR_MASK_ALL);
+}
+
+/**
+ * pm8001_chip_msix_interrupt_enable - enable PM8001 chip interrupt
+ * @pm8001_ha: our hba card information
+ */
+static void
+pm8001_chip_msix_interrupt_enable(struct pm8001_hba_info *pm8001_ha,
+ u32 int_vec_idx)
+{
+ u32 msi_index;
+ u32 value;
+ msi_index = int_vec_idx * MSIX_TABLE_ELEMENT_SIZE;
+ msi_index += MSIX_TABLE_BASE;
+ pm8001_cw32(pm8001_ha, 0, msi_index, MSIX_INTERRUPT_ENABLE);
+ value = (1 << int_vec_idx);
+ pm8001_cw32(pm8001_ha, 0, MSGU_ODCR, value);
+
+}
+
+/**
+ * pm8001_chip_msix_interrupt_disable - disable PM8001 chip interrupt
+ * @pm8001_ha: our hba card information
+ */
+static void
+pm8001_chip_msix_interrupt_disable(struct pm8001_hba_info *pm8001_ha,
+ u32 int_vec_idx)
+{
+ u32 msi_index;
+ msi_index = int_vec_idx * MSIX_TABLE_ELEMENT_SIZE;
+ msi_index += MSIX_TABLE_BASE;
+ pm8001_cw32(pm8001_ha, 0, msi_index, MSIX_INTERRUPT_DISABLE);
+}
+
+/**
+ * pm8001_chip_interrupt_enable - enable PM8001 chip interrupt
+ * @pm8001_ha: our hba card information
+ */
+static void
+pm8001_chip_interrupt_enable(struct pm8001_hba_info *pm8001_ha, u8 vec)
+{
+#ifdef PM8001_USE_MSIX
+ pm8001_chip_msix_interrupt_enable(pm8001_ha, 0);
+ return;
+#endif
+ pm8001_chip_intx_interrupt_enable(pm8001_ha);
+
+}
+
+/**
+ * pm8001_chip_intx_interrupt_disable- disable PM8001 chip interrupt
+ * @pm8001_ha: our hba card information
+ */
+static void
+pm8001_chip_interrupt_disable(struct pm8001_hba_info *pm8001_ha, u8 vec)
+{
+#ifdef PM8001_USE_MSIX
+ pm8001_chip_msix_interrupt_disable(pm8001_ha, 0);
+ return;
+#endif
+ pm8001_chip_intx_interrupt_disable(pm8001_ha);
+
+}
+
+/**
+ * pm8001_mpi_msg_free_get - get the free message buffer for transfer
+ * inbound queue.
+ * @circularQ: the inbound queue we want to transfer to HBA.
+ * @messageSize: the message size of this transfer, normally it is 64 bytes
+ * @messagePtr: the pointer to message.
+ */
+int pm8001_mpi_msg_free_get(struct inbound_queue_table *circularQ,
+ u16 messageSize, void **messagePtr)
+{
+ u32 offset, consumer_index;
+ struct mpi_msg_hdr *msgHeader;
+ u8 bcCount = 1; /* only support single buffer */
+
+ /* Checks is the requested message size can be allocated in this queue*/
+ if (messageSize > IOMB_SIZE_SPCV) {
+ *messagePtr = NULL;
+ return -1;
+ }
+
+ /* Stores the new consumer index */
+ consumer_index = pm8001_read_32(circularQ->ci_virt);
+ circularQ->consumer_index = cpu_to_le32(consumer_index);
+ if (((circularQ->producer_idx + bcCount) % PM8001_MPI_QUEUE) ==
+ le32_to_cpu(circularQ->consumer_index)) {
+ *messagePtr = NULL;
+ return -1;
+ }
+ /* get memory IOMB buffer address */
+ offset = circularQ->producer_idx * messageSize;
+ /* increment to next bcCount element */
+ circularQ->producer_idx = (circularQ->producer_idx + bcCount)
+ % PM8001_MPI_QUEUE;
+ /* Adds that distance to the base of the region virtual address plus
+ the message header size*/
+ msgHeader = (struct mpi_msg_hdr *)(circularQ->base_virt + offset);
+ *messagePtr = ((void *)msgHeader) + sizeof(struct mpi_msg_hdr);
+ return 0;
+}
+
+/**
+ * pm8001_mpi_build_cmd- build the message queue for transfer, update the PI to
+ * FW to tell the fw to get this message from IOMB.
+ * @pm8001_ha: our hba card information
+ * @circularQ: the inbound queue we want to transfer to HBA.
+ * @opCode: the operation code represents commands which LLDD and fw recognized.
+ * @payload: the command payload of each operation command.
+ */
+int pm8001_mpi_build_cmd(struct pm8001_hba_info *pm8001_ha,
+ struct inbound_queue_table *circularQ,
+ u32 opCode, void *payload, u32 responseQueue)
+{
+ u32 Header = 0, hpriority = 0, bc = 1, category = 0x02;
+ void *pMessage;
+
+ if (pm8001_mpi_msg_free_get(circularQ, pm8001_ha->iomb_size,
+ &pMessage) < 0) {
+ PM8001_IO_DBG(pm8001_ha,
+ pm8001_printk("No free mpi buffer\n"));
+ return -ENOMEM;
+ }
+ BUG_ON(!payload);
+ /*Copy to the payload*/
+ memcpy(pMessage, payload, (pm8001_ha->iomb_size -
+ sizeof(struct mpi_msg_hdr)));
+
+ /*Build the header*/
+ Header = ((1 << 31) | (hpriority << 30) | ((bc & 0x1f) << 24)
+ | ((responseQueue & 0x3F) << 16)
+ | ((category & 0xF) << 12) | (opCode & 0xFFF));
+
+ pm8001_write_32((pMessage - 4), 0, cpu_to_le32(Header));
+ /*Update the PI to the firmware*/
+ pm8001_cw32(pm8001_ha, circularQ->pi_pci_bar,
+ circularQ->pi_offset, circularQ->producer_idx);
+ PM8001_IO_DBG(pm8001_ha,
+ pm8001_printk("INB Q %x OPCODE:%x , UPDATED PI=%d CI=%d\n",
+ responseQueue, opCode, circularQ->producer_idx,
+ circularQ->consumer_index));
+ return 0;
+}
+
+u32 pm8001_mpi_msg_free_set(struct pm8001_hba_info *pm8001_ha, void *pMsg,
+ struct outbound_queue_table *circularQ, u8 bc)
+{
+ u32 producer_index;
+ struct mpi_msg_hdr *msgHeader;
+ struct mpi_msg_hdr *pOutBoundMsgHeader;
+
+ msgHeader = (struct mpi_msg_hdr *)(pMsg - sizeof(struct mpi_msg_hdr));
+ pOutBoundMsgHeader = (struct mpi_msg_hdr *)(circularQ->base_virt +
+ circularQ->consumer_idx * pm8001_ha->iomb_size);
+ if (pOutBoundMsgHeader != msgHeader) {
+ PM8001_FAIL_DBG(pm8001_ha,
+ pm8001_printk("consumer_idx = %d msgHeader = %p\n",
+ circularQ->consumer_idx, msgHeader));
+
+ /* Update the producer index from SPC */
+ producer_index = pm8001_read_32(circularQ->pi_virt);
+ circularQ->producer_index = cpu_to_le32(producer_index);
+ PM8001_FAIL_DBG(pm8001_ha,
+ pm8001_printk("consumer_idx = %d producer_index = %d"
+ "msgHeader = %p\n", circularQ->consumer_idx,
+ circularQ->producer_index, msgHeader));
+ return 0;
+ }
+ /* free the circular queue buffer elements associated with the message*/
+ circularQ->consumer_idx = (circularQ->consumer_idx + bc)
+ % PM8001_MPI_QUEUE;
+ /* update the CI of outbound queue */
+ pm8001_cw32(pm8001_ha, circularQ->ci_pci_bar, circularQ->ci_offset,
+ circularQ->consumer_idx);
+ /* Update the producer index from SPC*/
+ producer_index = pm8001_read_32(circularQ->pi_virt);
+ circularQ->producer_index = cpu_to_le32(producer_index);
+ PM8001_IO_DBG(pm8001_ha,
+ pm8001_printk(" CI=%d PI=%d\n", circularQ->consumer_idx,
+ circularQ->producer_index));
+ return 0;
+}
+
+/**
+ * pm8001_mpi_msg_consume- get the MPI message from outbound queue
+ * message table.
+ * @pm8001_ha: our hba card information
+ * @circularQ: the outbound queue table.
+ * @messagePtr1: the message contents of this outbound message.
+ * @pBC: the message size.
+ */
+u32 pm8001_mpi_msg_consume(struct pm8001_hba_info *pm8001_ha,
+ struct outbound_queue_table *circularQ,
+ void **messagePtr1, u8 *pBC)
+{
+ struct mpi_msg_hdr *msgHeader;
+ __le32 msgHeader_tmp;
+ u32 header_tmp;
+ do {
+ /* If there are not-yet-delivered messages ... */
+ if (le32_to_cpu(circularQ->producer_index)
+ != circularQ->consumer_idx) {
+ /*Get the pointer to the circular queue buffer element*/
+ msgHeader = (struct mpi_msg_hdr *)
+ (circularQ->base_virt +
+ circularQ->consumer_idx * pm8001_ha->iomb_size);
+ /* read header */
+ header_tmp = pm8001_read_32(msgHeader);
+ msgHeader_tmp = cpu_to_le32(header_tmp);
+ if (0 != (le32_to_cpu(msgHeader_tmp) & 0x80000000)) {
+ if (OPC_OUB_SKIP_ENTRY !=
+ (le32_to_cpu(msgHeader_tmp) & 0xfff)) {
+ *messagePtr1 =
+ ((u8 *)msgHeader) +
+ sizeof(struct mpi_msg_hdr);
+ *pBC = (u8)((le32_to_cpu(msgHeader_tmp)
+ >> 24) & 0x1f);
+ PM8001_IO_DBG(pm8001_ha,
+ pm8001_printk(": CI=%d PI=%d "
+ "msgHeader=%x\n",
+ circularQ->consumer_idx,
+ circularQ->producer_index,
+ msgHeader_tmp));
+ return MPI_IO_STATUS_SUCCESS;
+ } else {
+ circularQ->consumer_idx =
+ (circularQ->consumer_idx +
+ ((le32_to_cpu(msgHeader_tmp)
+ >> 24) & 0x1f))
+ % PM8001_MPI_QUEUE;
+ msgHeader_tmp = 0;
+ pm8001_write_32(msgHeader, 0, 0);
+ /* update the CI of outbound queue */
+ pm8001_cw32(pm8001_ha,
+ circularQ->ci_pci_bar,
+ circularQ->ci_offset,
+ circularQ->consumer_idx);
+ }
+ } else {
+ circularQ->consumer_idx =
+ (circularQ->consumer_idx +
+ ((le32_to_cpu(msgHeader_tmp) >> 24) &
+ 0x1f)) % PM8001_MPI_QUEUE;
+ msgHeader_tmp = 0;
+ pm8001_write_32(msgHeader, 0, 0);
+ /* update the CI of outbound queue */
+ pm8001_cw32(pm8001_ha, circularQ->ci_pci_bar,
+ circularQ->ci_offset,
+ circularQ->consumer_idx);
+ return MPI_IO_STATUS_FAIL;
+ }
+ } else {
+ u32 producer_index;
+ void *pi_virt = circularQ->pi_virt;
+ /* Update the producer index from SPC */
+ producer_index = pm8001_read_32(pi_virt);
+ circularQ->producer_index = cpu_to_le32(producer_index);
+ }
+ } while (le32_to_cpu(circularQ->producer_index) !=
+ circularQ->consumer_idx);
+ /* while we don't have any more not-yet-delivered message */
+ /* report empty */
+ return MPI_IO_STATUS_BUSY;
+}
+
+void pm8001_work_fn(struct work_struct *work)
+{
+ struct pm8001_work *pw = container_of(work, struct pm8001_work, work);
+ struct pm8001_device *pm8001_dev;
+ struct domain_device *dev;
+
+ /*
+ * So far, all users of this stash an associated structure here.
+ * If we get here, and this pointer is null, then the action
+ * was cancelled. This nullification happens when the device
+ * goes away.
+ */
+ pm8001_dev = pw->data; /* Most stash device structure */
+ if ((pm8001_dev == NULL)
+ || ((pw->handler != IO_XFER_ERROR_BREAK)
+ && (pm8001_dev->dev_type == SAS_PHY_UNUSED))) {
+ kfree(pw);
+ return;
+ }
+
+ switch (pw->handler) {
+ case IO_XFER_ERROR_BREAK:
+ { /* This one stashes the sas_task instead */
+ struct sas_task *t = (struct sas_task *)pm8001_dev;
+ u32 tag;
+ struct pm8001_ccb_info *ccb;
+ struct pm8001_hba_info *pm8001_ha = pw->pm8001_ha;
+ unsigned long flags, flags1;
+ struct task_status_struct *ts;
+ int i;
+
+ if (pm8001_query_task(t) == TMF_RESP_FUNC_SUCC)
+ break; /* Task still on lu */
+ spin_lock_irqsave(&pm8001_ha->lock, flags);
+
+ spin_lock_irqsave(&t->task_state_lock, flags1);
+ if (unlikely((t->task_state_flags & SAS_TASK_STATE_DONE))) {
+ spin_unlock_irqrestore(&t->task_state_lock, flags1);
+ spin_unlock_irqrestore(&pm8001_ha->lock, flags);
+ break; /* Task got completed by another */
+ }
+ spin_unlock_irqrestore(&t->task_state_lock, flags1);
+
+ /* Search for a possible ccb that matches the task */
+ for (i = 0; ccb = NULL, i < PM8001_MAX_CCB; i++) {
+ ccb = &pm8001_ha->ccb_info[i];
+ tag = ccb->ccb_tag;
+ if ((tag != 0xFFFFFFFF) && (ccb->task == t))
+ break;
+ }
+ if (!ccb) {
+ spin_unlock_irqrestore(&pm8001_ha->lock, flags);
+ break; /* Task got freed by another */
+ }
+ ts = &t->task_status;
+ ts->resp = SAS_TASK_COMPLETE;
+ /* Force the midlayer to retry */
+ ts->stat = SAS_QUEUE_FULL;
+ pm8001_dev = ccb->device;
+ if (pm8001_dev)
+ pm8001_dev->running_req--;
+ spin_lock_irqsave(&t->task_state_lock, flags1);
+ t->task_state_flags &= ~SAS_TASK_STATE_PENDING;
+ t->task_state_flags &= ~SAS_TASK_AT_INITIATOR;
+ t->task_state_flags |= SAS_TASK_STATE_DONE;
+ if (unlikely((t->task_state_flags & SAS_TASK_STATE_ABORTED))) {
+ spin_unlock_irqrestore(&t->task_state_lock, flags1);
+ PM8001_FAIL_DBG(pm8001_ha, pm8001_printk("task 0x%p"
+ " done with event 0x%x resp 0x%x stat 0x%x but"
+ " aborted by upper layer!\n",
+ t, pw->handler, ts->resp, ts->stat));
+ pm8001_ccb_task_free(pm8001_ha, t, ccb, tag);
+ spin_unlock_irqrestore(&pm8001_ha->lock, flags);
+ } else {
+ spin_unlock_irqrestore(&t->task_state_lock, flags1);
+ pm8001_ccb_task_free(pm8001_ha, t, ccb, tag);
+ mb();/* in order to force CPU ordering */
+ spin_unlock_irqrestore(&pm8001_ha->lock, flags);
+ t->task_done(t);
+ }
+ } break;
+ case IO_XFER_OPEN_RETRY_TIMEOUT:
+ { /* This one stashes the sas_task instead */
+ struct sas_task *t = (struct sas_task *)pm8001_dev;
+ u32 tag;
+ struct pm8001_ccb_info *ccb;
+ struct pm8001_hba_info *pm8001_ha = pw->pm8001_ha;
+ unsigned long flags, flags1;
+ int i, ret = 0;
+
+ PM8001_IO_DBG(pm8001_ha,
+ pm8001_printk("IO_XFER_OPEN_RETRY_TIMEOUT\n"));
+
+ ret = pm8001_query_task(t);
+
+ PM8001_IO_DBG(pm8001_ha,
+ switch (ret) {
+ case TMF_RESP_FUNC_SUCC:
+ pm8001_printk("...Task on lu\n");
+ break;
+
+ case TMF_RESP_FUNC_COMPLETE:
+ pm8001_printk("...Task NOT on lu\n");
+ break;
+
+ default:
+ pm8001_printk("...query task failed!!!\n");
+ break;
+ });
+
+ spin_lock_irqsave(&pm8001_ha->lock, flags);
+
+ spin_lock_irqsave(&t->task_state_lock, flags1);
+
+ if (unlikely((t->task_state_flags & SAS_TASK_STATE_DONE))) {
+ spin_unlock_irqrestore(&t->task_state_lock, flags1);
+ spin_unlock_irqrestore(&pm8001_ha->lock, flags);
+ if (ret == TMF_RESP_FUNC_SUCC) /* task on lu */
+ (void)pm8001_abort_task(t);
+ break; /* Task got completed by another */
+ }
+
+ spin_unlock_irqrestore(&t->task_state_lock, flags1);
+
+ /* Search for a possible ccb that matches the task */
+ for (i = 0; ccb = NULL, i < PM8001_MAX_CCB; i++) {
+ ccb = &pm8001_ha->ccb_info[i];
+ tag = ccb->ccb_tag;
+ if ((tag != 0xFFFFFFFF) && (ccb->task == t))
+ break;
+ }
+ if (!ccb) {
+ spin_unlock_irqrestore(&pm8001_ha->lock, flags);
+ if (ret == TMF_RESP_FUNC_SUCC) /* task on lu */
+ (void)pm8001_abort_task(t);
+ break; /* Task got freed by another */
+ }
+
+ pm8001_dev = ccb->device;
+ dev = pm8001_dev->sas_device;
+
+ switch (ret) {
+ case TMF_RESP_FUNC_SUCC: /* task on lu */
+ ccb->open_retry = 1; /* Snub completion */
+ spin_unlock_irqrestore(&pm8001_ha->lock, flags);
+ ret = pm8001_abort_task(t);
+ ccb->open_retry = 0;
+ switch (ret) {
+ case TMF_RESP_FUNC_SUCC:
+ case TMF_RESP_FUNC_COMPLETE:
+ break;
+ default: /* device misbehavior */
+ ret = TMF_RESP_FUNC_FAILED;
+ PM8001_IO_DBG(pm8001_ha,
+ pm8001_printk("...Reset phy\n"));
+ pm8001_I_T_nexus_reset(dev);
+ break;
+ }
+ break;
+
+ case TMF_RESP_FUNC_COMPLETE: /* task not on lu */
+ spin_unlock_irqrestore(&pm8001_ha->lock, flags);
+ /* Do we need to abort the task locally? */
+ break;
+
+ default: /* device misbehavior */
+ spin_unlock_irqrestore(&pm8001_ha->lock, flags);
+ ret = TMF_RESP_FUNC_FAILED;
+ PM8001_IO_DBG(pm8001_ha,
+ pm8001_printk("...Reset phy\n"));
+ pm8001_I_T_nexus_reset(dev);
+ }
+
+ if (ret == TMF_RESP_FUNC_FAILED)
+ t = NULL;
+ pm8001_open_reject_retry(pm8001_ha, t, pm8001_dev);
+ PM8001_IO_DBG(pm8001_ha, pm8001_printk("...Complete\n"));
+ } break;
+ case IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS:
+ dev = pm8001_dev->sas_device;
+ pm8001_I_T_nexus_event_handler(dev);
+ break;
+ case IO_OPEN_CNX_ERROR_STP_RESOURCES_BUSY:
+ dev = pm8001_dev->sas_device;
+ pm8001_I_T_nexus_reset(dev);
+ break;
+ case IO_DS_IN_ERROR:
+ dev = pm8001_dev->sas_device;
+ pm8001_I_T_nexus_reset(dev);
+ break;
+ case IO_DS_NON_OPERATIONAL:
+ dev = pm8001_dev->sas_device;
+ pm8001_I_T_nexus_reset(dev);
+ break;
+ }
+ kfree(pw);
+}
+
+int pm8001_handle_event(struct pm8001_hba_info *pm8001_ha, void *data,
+ int handler)
+{
+ struct pm8001_work *pw;
+ int ret = 0;
+
+ pw = kmalloc(sizeof(struct pm8001_work), GFP_ATOMIC);
+ if (pw) {
+ pw->pm8001_ha = pm8001_ha;
+ pw->data = data;
+ pw->handler = handler;
+ INIT_WORK(&pw->work, pm8001_work_fn);
+ queue_work(pm8001_wq, &pw->work);
+ } else
+ ret = -ENOMEM;
+
+ return ret;
+}
+
+static void pm8001_send_abort_all(struct pm8001_hba_info *pm8001_ha,
+ struct pm8001_device *pm8001_ha_dev)
+{
+ int res;
+ u32 ccb_tag;
+ struct pm8001_ccb_info *ccb;
+ struct sas_task *task = NULL;
+ struct task_abort_req task_abort;
+ struct inbound_queue_table *circularQ;
+ u32 opc = OPC_INB_SATA_ABORT;
+ int ret;
+
+ if (!pm8001_ha_dev) {
+ PM8001_FAIL_DBG(pm8001_ha, pm8001_printk("dev is null\n"));
+ return;
+ }
+
+ task = sas_alloc_slow_task(GFP_ATOMIC);
+
+ if (!task) {
+ PM8001_FAIL_DBG(pm8001_ha, pm8001_printk("cannot "
+ "allocate task\n"));
+ return;
+ }
+
+ task->task_done = pm8001_task_done;
+
+ res = pm8001_tag_alloc(pm8001_ha, &ccb_tag);
+ if (res)
+ return;
+
+ ccb = &pm8001_ha->ccb_info[ccb_tag];
+ ccb->device = pm8001_ha_dev;
+ ccb->ccb_tag = ccb_tag;
+ ccb->task = task;
+
+ circularQ = &pm8001_ha->inbnd_q_tbl[0];
+
+ memset(&task_abort, 0, sizeof(task_abort));
+ task_abort.abort_all = cpu_to_le32(1);
+ task_abort.device_id = cpu_to_le32(pm8001_ha_dev->device_id);
+ task_abort.tag = cpu_to_le32(ccb_tag);
+
+ ret = pm8001_mpi_build_cmd(pm8001_ha, circularQ, opc, &task_abort, 0);
+ if (ret)
+ pm8001_tag_free(pm8001_ha, ccb_tag);
+
+}
+
+static void pm8001_send_read_log(struct pm8001_hba_info *pm8001_ha,
+ struct pm8001_device *pm8001_ha_dev)
+{
+ struct sata_start_req sata_cmd;
+ int res;
+ u32 ccb_tag;
+ struct pm8001_ccb_info *ccb;
+ struct sas_task *task = NULL;
+ struct host_to_dev_fis fis;
+ struct domain_device *dev;
+ struct inbound_queue_table *circularQ;
+ u32 opc = OPC_INB_SATA_HOST_OPSTART;
+
+ task = sas_alloc_slow_task(GFP_ATOMIC);
+
+ if (!task) {
+ PM8001_FAIL_DBG(pm8001_ha,
+ pm8001_printk("cannot allocate task !!!\n"));
+ return;
+ }
+ task->task_done = pm8001_task_done;
+
+ res = pm8001_tag_alloc(pm8001_ha, &ccb_tag);
+ if (res) {
+ sas_free_task(task);
+ PM8001_FAIL_DBG(pm8001_ha,
+ pm8001_printk("cannot allocate tag !!!\n"));
+ return;
+ }
+
+ /* allocate domain device by ourselves as libsas
+ * is not going to provide any
+ */
+ dev = kzalloc(sizeof(struct domain_device), GFP_ATOMIC);
+ if (!dev) {
+ sas_free_task(task);
+ pm8001_tag_free(pm8001_ha, ccb_tag);
+ PM8001_FAIL_DBG(pm8001_ha,
+ pm8001_printk("Domain device cannot be allocated\n"));
+ return;
+ }
+ task->dev = dev;
+ task->dev->lldd_dev = pm8001_ha_dev;
+
+ ccb = &pm8001_ha->ccb_info[ccb_tag];
+ ccb->device = pm8001_ha_dev;
+ ccb->ccb_tag = ccb_tag;
+ ccb->task = task;
+ pm8001_ha_dev->id |= NCQ_READ_LOG_FLAG;
+ pm8001_ha_dev->id |= NCQ_2ND_RLE_FLAG;
+
+ memset(&sata_cmd, 0, sizeof(sata_cmd));
+ circularQ = &pm8001_ha->inbnd_q_tbl[0];
+
+ /* construct read log FIS */
+ memset(&fis, 0, sizeof(struct host_to_dev_fis));
+ fis.fis_type = 0x27;
+ fis.flags = 0x80;
+ fis.command = ATA_CMD_READ_LOG_EXT;
+ fis.lbal = 0x10;
+ fis.sector_count = 0x1;
+
+ sata_cmd.tag = cpu_to_le32(ccb_tag);
+ sata_cmd.device_id = cpu_to_le32(pm8001_ha_dev->device_id);
+ sata_cmd.ncqtag_atap_dir_m |= ((0x1 << 7) | (0x5 << 9));
+ memcpy(&sata_cmd.sata_fis, &fis, sizeof(struct host_to_dev_fis));
+
+ res = pm8001_mpi_build_cmd(pm8001_ha, circularQ, opc, &sata_cmd, 0);
+ if (res) {
+ sas_free_task(task);
+ pm8001_tag_free(pm8001_ha, ccb_tag);
+ kfree(dev);
+ }
+}
+
+/**
+ * mpi_ssp_completion- process the event that FW response to the SSP request.
+ * @pm8001_ha: our hba card information
+ * @piomb: the message contents of this outbound message.
+ *
+ * When FW has completed a ssp request for example a IO request, after it has
+ * filled the SG data with the data, it will trigger this event represent
+ * that he has finished the job,please check the coresponding buffer.
+ * So we will tell the caller who maybe waiting the result to tell upper layer
+ * that the task has been finished.
+ */
+static void
+mpi_ssp_completion(struct pm8001_hba_info *pm8001_ha , void *piomb)
+{
+ struct sas_task *t;
+ struct pm8001_ccb_info *ccb;
+ unsigned long flags;
+ u32 status;
+ u32 param;
+ u32 tag;
+ struct ssp_completion_resp *psspPayload;
+ struct task_status_struct *ts;
+ struct ssp_response_iu *iu;
+ struct pm8001_device *pm8001_dev;
+ psspPayload = (struct ssp_completion_resp *)(piomb + 4);
+ status = le32_to_cpu(psspPayload->status);
+ tag = le32_to_cpu(psspPayload->tag);
+ ccb = &pm8001_ha->ccb_info[tag];
+ if ((status == IO_ABORTED) && ccb->open_retry) {
+ /* Being completed by another */
+ ccb->open_retry = 0;
+ return;
+ }
+ pm8001_dev = ccb->device;
+ param = le32_to_cpu(psspPayload->param);
+
+ t = ccb->task;
+
+ if (status && status != IO_UNDERFLOW)
+ PM8001_FAIL_DBG(pm8001_ha,
+ pm8001_printk("sas IO status 0x%x\n", status));
+ if (unlikely(!t || !t->lldd_task || !t->dev))
+ return;
+ ts = &t->task_status;
+ /* Print sas address of IO failed device */
+ if ((status != IO_SUCCESS) && (status != IO_OVERFLOW) &&
+ (status != IO_UNDERFLOW))
+ PM8001_FAIL_DBG(pm8001_ha,
+ pm8001_printk("SAS Address of IO Failure Drive:"
+ "%016llx", SAS_ADDR(t->dev->sas_addr)));
+
+ switch (status) {
+ case IO_SUCCESS:
+ PM8001_IO_DBG(pm8001_ha, pm8001_printk("IO_SUCCESS"
+ ",param = %d\n", param));
+ if (param == 0) {
+ ts->resp = SAS_TASK_COMPLETE;
+ ts->stat = SAM_STAT_GOOD;
+ } else {
+ ts->resp = SAS_TASK_COMPLETE;
+ ts->stat = SAS_PROTO_RESPONSE;
+ ts->residual = param;
+ iu = &psspPayload->ssp_resp_iu;
+ sas_ssp_task_response(pm8001_ha->dev, t, iu);
+ }
+ if (pm8001_dev)
+ pm8001_dev->running_req--;
+ break;
+ case IO_ABORTED:
+ PM8001_IO_DBG(pm8001_ha,
+ pm8001_printk("IO_ABORTED IOMB Tag\n"));
+ ts->resp = SAS_TASK_COMPLETE;
+ ts->stat = SAS_ABORTED_TASK;
+ break;
+ case IO_UNDERFLOW:
+ /* SSP Completion with error */
+ PM8001_IO_DBG(pm8001_ha, pm8001_printk("IO_UNDERFLOW"
+ ",param = %d\n", param));
+ ts->resp = SAS_TASK_COMPLETE;
+ ts->stat = SAS_DATA_UNDERRUN;
+ ts->residual = param;
+ if (pm8001_dev)
+ pm8001_dev->running_req--;
+ break;
+ case IO_NO_DEVICE:
+ PM8001_IO_DBG(pm8001_ha,
+ pm8001_printk("IO_NO_DEVICE\n"));
+ ts->resp = SAS_TASK_UNDELIVERED;
+ ts->stat = SAS_PHY_DOWN;
+ break;
+ case IO_XFER_ERROR_BREAK:
+ PM8001_IO_DBG(pm8001_ha,
+ pm8001_printk("IO_XFER_ERROR_BREAK\n"));
+ ts->resp = SAS_TASK_COMPLETE;
+ ts->stat = SAS_OPEN_REJECT;
+ /* Force the midlayer to retry */
+ ts->open_rej_reason = SAS_OREJ_RSVD_RETRY;
+ break;
+ case IO_XFER_ERROR_PHY_NOT_READY:
+ PM8001_IO_DBG(pm8001_ha,
+ pm8001_printk("IO_XFER_ERROR_PHY_NOT_READY\n"));
+ ts->resp = SAS_TASK_COMPLETE;
+ ts->stat = SAS_OPEN_REJECT;
+ ts->open_rej_reason = SAS_OREJ_RSVD_RETRY;
+ break;
+ case IO_OPEN_CNX_ERROR_PROTOCOL_NOT_SUPPORTED:
+ PM8001_IO_DBG(pm8001_ha,
+ pm8001_printk("IO_OPEN_CNX_ERROR_PROTOCOL_NOT_SUPPORTED\n"));
+ ts->resp = SAS_TASK_COMPLETE;
+ ts->stat = SAS_OPEN_REJECT;
+ ts->open_rej_reason = SAS_OREJ_EPROTO;
+ break;
+ case IO_OPEN_CNX_ERROR_ZONE_VIOLATION:
+ PM8001_IO_DBG(pm8001_ha,
+ pm8001_printk("IO_OPEN_CNX_ERROR_ZONE_VIOLATION\n"));
+ ts->resp = SAS_TASK_COMPLETE;
+ ts->stat = SAS_OPEN_REJECT;
+ ts->open_rej_reason = SAS_OREJ_UNKNOWN;
+ break;
+ case IO_OPEN_CNX_ERROR_BREAK:
+ PM8001_IO_DBG(pm8001_ha,
+ pm8001_printk("IO_OPEN_CNX_ERROR_BREAK\n"));
+ ts->resp = SAS_TASK_COMPLETE;
+ ts->stat = SAS_OPEN_REJECT;
+ ts->open_rej_reason = SAS_OREJ_RSVD_RETRY;
+ break;
+ case IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS:
+ PM8001_IO_DBG(pm8001_ha,
+ pm8001_printk("IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS\n"));
+ ts->resp = SAS_TASK_COMPLETE;
+ ts->stat = SAS_OPEN_REJECT;
+ ts->open_rej_reason = SAS_OREJ_UNKNOWN;
+ if (!t->uldd_task)
+ pm8001_handle_event(pm8001_ha,
+ pm8001_dev,
+ IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS);
+ break;
+ case IO_OPEN_CNX_ERROR_BAD_DESTINATION:
+ PM8001_IO_DBG(pm8001_ha,
+ pm8001_printk("IO_OPEN_CNX_ERROR_BAD_DESTINATION\n"));
+ ts->resp = SAS_TASK_COMPLETE;
+ ts->stat = SAS_OPEN_REJECT;
+ ts->open_rej_reason = SAS_OREJ_BAD_DEST;
+ break;
+ case IO_OPEN_CNX_ERROR_CONNECTION_RATE_NOT_SUPPORTED:
+ PM8001_IO_DBG(pm8001_ha,
+ pm8001_printk("IO_OPEN_CNX_ERROR_CONNECTION_RATE_"
+ "NOT_SUPPORTED\n"));
+ ts->resp = SAS_TASK_COMPLETE;
+ ts->stat = SAS_OPEN_REJECT;
+ ts->open_rej_reason = SAS_OREJ_CONN_RATE;
+ break;
+ case IO_OPEN_CNX_ERROR_WRONG_DESTINATION:
+ PM8001_IO_DBG(pm8001_ha,
+ pm8001_printk("IO_OPEN_CNX_ERROR_WRONG_DESTINATION\n"));
+ ts->resp = SAS_TASK_UNDELIVERED;
+ ts->stat = SAS_OPEN_REJECT;
+ ts->open_rej_reason = SAS_OREJ_WRONG_DEST;
+ break;
+ case IO_XFER_ERROR_NAK_RECEIVED:
+ PM8001_IO_DBG(pm8001_ha,
+ pm8001_printk("IO_XFER_ERROR_NAK_RECEIVED\n"));
+ ts->resp = SAS_TASK_COMPLETE;
+ ts->stat = SAS_OPEN_REJECT;
+ ts->open_rej_reason = SAS_OREJ_RSVD_RETRY;
+ break;
+ case IO_XFER_ERROR_ACK_NAK_TIMEOUT:
+ PM8001_IO_DBG(pm8001_ha,
+ pm8001_printk("IO_XFER_ERROR_ACK_NAK_TIMEOUT\n"));
+ ts->resp = SAS_TASK_COMPLETE;
+ ts->stat = SAS_NAK_R_ERR;
+ break;
+ case IO_XFER_ERROR_DMA:
+ PM8001_IO_DBG(pm8001_ha,
+ pm8001_printk("IO_XFER_ERROR_DMA\n"));
+ ts->resp = SAS_TASK_COMPLETE;
+ ts->stat = SAS_OPEN_REJECT;
+ break;
+ case IO_XFER_OPEN_RETRY_TIMEOUT:
+ PM8001_IO_DBG(pm8001_ha,
+ pm8001_printk("IO_XFER_OPEN_RETRY_TIMEOUT\n"));
+ ts->resp = SAS_TASK_COMPLETE;
+ ts->stat = SAS_OPEN_REJECT;
+ ts->open_rej_reason = SAS_OREJ_RSVD_RETRY;
+ break;
+ case IO_XFER_ERROR_OFFSET_MISMATCH:
+ PM8001_IO_DBG(pm8001_ha,
+ pm8001_printk("IO_XFER_ERROR_OFFSET_MISMATCH\n"));
+ ts->resp = SAS_TASK_COMPLETE;
+ ts->stat = SAS_OPEN_REJECT;
+ break;
+ case IO_PORT_IN_RESET:
+ PM8001_IO_DBG(pm8001_ha,
+ pm8001_printk("IO_PORT_IN_RESET\n"));
+ ts->resp = SAS_TASK_COMPLETE;
+ ts->stat = SAS_OPEN_REJECT;
+ break;
+ case IO_DS_NON_OPERATIONAL:
+ PM8001_IO_DBG(pm8001_ha,
+ pm8001_printk("IO_DS_NON_OPERATIONAL\n"));
+ ts->resp = SAS_TASK_COMPLETE;
+ ts->stat = SAS_OPEN_REJECT;
+ if (!t->uldd_task)
+ pm8001_handle_event(pm8001_ha,
+ pm8001_dev,
+ IO_DS_NON_OPERATIONAL);
+ break;
+ case IO_DS_IN_RECOVERY:
+ PM8001_IO_DBG(pm8001_ha,
+ pm8001_printk("IO_DS_IN_RECOVERY\n"));
+ ts->resp = SAS_TASK_COMPLETE;
+ ts->stat = SAS_OPEN_REJECT;
+ break;
+ case IO_TM_TAG_NOT_FOUND:
+ PM8001_IO_DBG(pm8001_ha,
+ pm8001_printk("IO_TM_TAG_NOT_FOUND\n"));
+ ts->resp = SAS_TASK_COMPLETE;
+ ts->stat = SAS_OPEN_REJECT;
+ break;
+ case IO_SSP_EXT_IU_ZERO_LEN_ERROR:
+ PM8001_IO_DBG(pm8001_ha,
+ pm8001_printk("IO_SSP_EXT_IU_ZERO_LEN_ERROR\n"));
+ ts->resp = SAS_TASK_COMPLETE;
+ ts->stat = SAS_OPEN_REJECT;
+ break;
+ case IO_OPEN_CNX_ERROR_HW_RESOURCE_BUSY:
+ PM8001_IO_DBG(pm8001_ha,
+ pm8001_printk("IO_OPEN_CNX_ERROR_HW_RESOURCE_BUSY\n"));
+ ts->resp = SAS_TASK_COMPLETE;
+ ts->stat = SAS_OPEN_REJECT;
+ ts->open_rej_reason = SAS_OREJ_RSVD_RETRY;
+ break;
+ default:
+ PM8001_IO_DBG(pm8001_ha,
+ pm8001_printk("Unknown status 0x%x\n", status));
+ /* not allowed case. Therefore, return failed status */
+ ts->resp = SAS_TASK_COMPLETE;
+ ts->stat = SAS_OPEN_REJECT;
+ break;
+ }
+ PM8001_IO_DBG(pm8001_ha,
+ pm8001_printk("scsi_status = %x\n ",
+ psspPayload->ssp_resp_iu.status));
+ spin_lock_irqsave(&t->task_state_lock, flags);
+ t->task_state_flags &= ~SAS_TASK_STATE_PENDING;
+ t->task_state_flags &= ~SAS_TASK_AT_INITIATOR;
+ t->task_state_flags |= SAS_TASK_STATE_DONE;
+ if (unlikely((t->task_state_flags & SAS_TASK_STATE_ABORTED))) {
+ spin_unlock_irqrestore(&t->task_state_lock, flags);
+ PM8001_FAIL_DBG(pm8001_ha, pm8001_printk("task 0x%p done with"
+ " io_status 0x%x resp 0x%x "
+ "stat 0x%x but aborted by upper layer!\n",
+ t, status, ts->resp, ts->stat));
+ pm8001_ccb_task_free(pm8001_ha, t, ccb, tag);
+ } else {
+ spin_unlock_irqrestore(&t->task_state_lock, flags);
+ pm8001_ccb_task_free(pm8001_ha, t, ccb, tag);
+ mb();/* in order to force CPU ordering */
+ t->task_done(t);
+ }
+}
+
+/*See the comments for mpi_ssp_completion */
+static void mpi_ssp_event(struct pm8001_hba_info *pm8001_ha , void *piomb)
+{
+ struct sas_task *t;
+ unsigned long flags;
+ struct task_status_struct *ts;
+ struct pm8001_ccb_info *ccb;
+ struct pm8001_device *pm8001_dev;
+ struct ssp_event_resp *psspPayload =
+ (struct ssp_event_resp *)(piomb + 4);
+ u32 event = le32_to_cpu(psspPayload->event);
+ u32 tag = le32_to_cpu(psspPayload->tag);
+ u32 port_id = le32_to_cpu(psspPayload->port_id);
+ u32 dev_id = le32_to_cpu(psspPayload->device_id);
+
+ ccb = &pm8001_ha->ccb_info[tag];
+ t = ccb->task;
+ pm8001_dev = ccb->device;
+ if (event)
+ PM8001_FAIL_DBG(pm8001_ha,
+ pm8001_printk("sas IO status 0x%x\n", event));
+ if (unlikely(!t || !t->lldd_task || !t->dev))
+ return;
+ ts = &t->task_status;
+ PM8001_IO_DBG(pm8001_ha,
+ pm8001_printk("port_id = %x,device_id = %x\n",
+ port_id, dev_id));
+ switch (event) {
+ case IO_OVERFLOW:
+ PM8001_IO_DBG(pm8001_ha, pm8001_printk("IO_UNDERFLOW\n");)
+ ts->resp = SAS_TASK_COMPLETE;
+ ts->stat = SAS_DATA_OVERRUN;
+ ts->residual = 0;
+ if (pm8001_dev)
+ pm8001_dev->running_req--;
+ break;
+ case IO_XFER_ERROR_BREAK:
+ PM8001_IO_DBG(pm8001_ha,
+ pm8001_printk("IO_XFER_ERROR_BREAK\n"));
+ pm8001_handle_event(pm8001_ha, t, IO_XFER_ERROR_BREAK);
+ return;
+ case IO_XFER_ERROR_PHY_NOT_READY:
+ PM8001_IO_DBG(pm8001_ha,
+ pm8001_printk("IO_XFER_ERROR_PHY_NOT_READY\n"));
+ ts->resp = SAS_TASK_COMPLETE;
+ ts->stat = SAS_OPEN_REJECT;
+ ts->open_rej_reason = SAS_OREJ_RSVD_RETRY;
+ break;
+ case IO_OPEN_CNX_ERROR_PROTOCOL_NOT_SUPPORTED:
+ PM8001_IO_DBG(pm8001_ha,
+ pm8001_printk("IO_OPEN_CNX_ERROR_PROTOCOL_NOT"
+ "_SUPPORTED\n"));
+ ts->resp = SAS_TASK_COMPLETE;
+ ts->stat = SAS_OPEN_REJECT;
+ ts->open_rej_reason = SAS_OREJ_EPROTO;
+ break;
+ case IO_OPEN_CNX_ERROR_ZONE_VIOLATION:
+ PM8001_IO_DBG(pm8001_ha,
+ pm8001_printk("IO_OPEN_CNX_ERROR_ZONE_VIOLATION\n"));
+ ts->resp = SAS_TASK_COMPLETE;
+ ts->stat = SAS_OPEN_REJECT;
+ ts->open_rej_reason = SAS_OREJ_UNKNOWN;
+ break;
+ case IO_OPEN_CNX_ERROR_BREAK:
+ PM8001_IO_DBG(pm8001_ha,
+ pm8001_printk("IO_OPEN_CNX_ERROR_BREAK\n"));
+ ts->resp = SAS_TASK_COMPLETE;
+ ts->stat = SAS_OPEN_REJECT;
+ ts->open_rej_reason = SAS_OREJ_RSVD_RETRY;
+ break;
+ case IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS:
+ PM8001_IO_DBG(pm8001_ha,
+ pm8001_printk("IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS\n"));
+ ts->resp = SAS_TASK_COMPLETE;
+ ts->stat = SAS_OPEN_REJECT;
+ ts->open_rej_reason = SAS_OREJ_UNKNOWN;
+ if (!t->uldd_task)
+ pm8001_handle_event(pm8001_ha,
+ pm8001_dev,
+ IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS);
+ break;
+ case IO_OPEN_CNX_ERROR_BAD_DESTINATION:
+ PM8001_IO_DBG(pm8001_ha,
+ pm8001_printk("IO_OPEN_CNX_ERROR_BAD_DESTINATION\n"));
+ ts->resp = SAS_TASK_COMPLETE;
+ ts->stat = SAS_OPEN_REJECT;
+ ts->open_rej_reason = SAS_OREJ_BAD_DEST;
+ break;
+ case IO_OPEN_CNX_ERROR_CONNECTION_RATE_NOT_SUPPORTED:
+ PM8001_IO_DBG(pm8001_ha,
+ pm8001_printk("IO_OPEN_CNX_ERROR_CONNECTION_RATE_"
+ "NOT_SUPPORTED\n"));
+ ts->resp = SAS_TASK_COMPLETE;
+ ts->stat = SAS_OPEN_REJECT;
+ ts->open_rej_reason = SAS_OREJ_CONN_RATE;
+ break;
+ case IO_OPEN_CNX_ERROR_WRONG_DESTINATION:
+ PM8001_IO_DBG(pm8001_ha,
+ pm8001_printk("IO_OPEN_CNX_ERROR_WRONG_DESTINATION\n"));
+ ts->resp = SAS_TASK_COMPLETE;
+ ts->stat = SAS_OPEN_REJECT;
+ ts->open_rej_reason = SAS_OREJ_WRONG_DEST;
+ break;
+ case IO_XFER_ERROR_NAK_RECEIVED:
+ PM8001_IO_DBG(pm8001_ha,
+ pm8001_printk("IO_XFER_ERROR_NAK_RECEIVED\n"));
+ ts->resp = SAS_TASK_COMPLETE;
+ ts->stat = SAS_OPEN_REJECT;
+ ts->open_rej_reason = SAS_OREJ_RSVD_RETRY;
+ break;
+ case IO_XFER_ERROR_ACK_NAK_TIMEOUT:
+ PM8001_IO_DBG(pm8001_ha,
+ pm8001_printk("IO_XFER_ERROR_ACK_NAK_TIMEOUT\n"));
+ ts->resp = SAS_TASK_COMPLETE;
+ ts->stat = SAS_NAK_R_ERR;
+ break;
+ case IO_XFER_OPEN_RETRY_TIMEOUT:
+ PM8001_IO_DBG(pm8001_ha,
+ pm8001_printk("IO_XFER_OPEN_RETRY_TIMEOUT\n"));
+ pm8001_handle_event(pm8001_ha, t, IO_XFER_OPEN_RETRY_TIMEOUT);
+ return;
+ case IO_XFER_ERROR_UNEXPECTED_PHASE:
+ PM8001_IO_DBG(pm8001_ha,
+ pm8001_printk("IO_XFER_ERROR_UNEXPECTED_PHASE\n"));
+ ts->resp = SAS_TASK_COMPLETE;
+ ts->stat = SAS_DATA_OVERRUN;
+ break;
+ case IO_XFER_ERROR_XFER_RDY_OVERRUN:
+ PM8001_IO_DBG(pm8001_ha,
+ pm8001_printk("IO_XFER_ERROR_XFER_RDY_OVERRUN\n"));
+ ts->resp = SAS_TASK_COMPLETE;
+ ts->stat = SAS_DATA_OVERRUN;
+ break;
+ case IO_XFER_ERROR_XFER_RDY_NOT_EXPECTED:
+ PM8001_IO_DBG(pm8001_ha,
+ pm8001_printk("IO_XFER_ERROR_XFER_RDY_NOT_EXPECTED\n"));
+ ts->resp = SAS_TASK_COMPLETE;
+ ts->stat = SAS_DATA_OVERRUN;
+ break;
+ case IO_XFER_ERROR_CMD_ISSUE_ACK_NAK_TIMEOUT:
+ PM8001_IO_DBG(pm8001_ha,
+ pm8001_printk("IO_XFER_ERROR_CMD_ISSUE_ACK_NAK_TIMEOUT\n"));
+ ts->resp = SAS_TASK_COMPLETE;
+ ts->stat = SAS_DATA_OVERRUN;
+ break;
+ case IO_XFER_ERROR_OFFSET_MISMATCH:
+ PM8001_IO_DBG(pm8001_ha,
+ pm8001_printk("IO_XFER_ERROR_OFFSET_MISMATCH\n"));
+ ts->resp = SAS_TASK_COMPLETE;
+ ts->stat = SAS_DATA_OVERRUN;
+ break;
+ case IO_XFER_ERROR_XFER_ZERO_DATA_LEN:
+ PM8001_IO_DBG(pm8001_ha,
+ pm8001_printk("IO_XFER_ERROR_XFER_ZERO_DATA_LEN\n"));
+ ts->resp = SAS_TASK_COMPLETE;
+ ts->stat = SAS_DATA_OVERRUN;
+ break;
+ case IO_XFER_CMD_FRAME_ISSUED:
+ PM8001_IO_DBG(pm8001_ha,
+ pm8001_printk(" IO_XFER_CMD_FRAME_ISSUED\n"));
+ return;
+ default:
+ PM8001_IO_DBG(pm8001_ha,
+ pm8001_printk("Unknown status 0x%x\n", event));
+ /* not allowed case. Therefore, return failed status */
+ ts->resp = SAS_TASK_COMPLETE;
+ ts->stat = SAS_DATA_OVERRUN;
+ break;
+ }
+ spin_lock_irqsave(&t->task_state_lock, flags);
+ t->task_state_flags &= ~SAS_TASK_STATE_PENDING;
+ t->task_state_flags &= ~SAS_TASK_AT_INITIATOR;
+ t->task_state_flags |= SAS_TASK_STATE_DONE;
+ if (unlikely((t->task_state_flags & SAS_TASK_STATE_ABORTED))) {
+ spin_unlock_irqrestore(&t->task_state_lock, flags);
+ PM8001_FAIL_DBG(pm8001_ha, pm8001_printk("task 0x%p done with"
+ " event 0x%x resp 0x%x "
+ "stat 0x%x but aborted by upper layer!\n",
+ t, event, ts->resp, ts->stat));
+ pm8001_ccb_task_free(pm8001_ha, t, ccb, tag);
+ } else {
+ spin_unlock_irqrestore(&t->task_state_lock, flags);
+ pm8001_ccb_task_free(pm8001_ha, t, ccb, tag);
+ mb();/* in order to force CPU ordering */
+ t->task_done(t);
+ }
+}
+
+/*See the comments for mpi_ssp_completion */
+static void
+mpi_sata_completion(struct pm8001_hba_info *pm8001_ha, void *piomb)
+{
+ struct sas_task *t;
+ struct pm8001_ccb_info *ccb;
+ u32 param;
+ u32 status;
+ u32 tag;
+ int i, j;
+ u8 sata_addr_low[4];
+ u32 temp_sata_addr_low;
+ u8 sata_addr_hi[4];
+ u32 temp_sata_addr_hi;
+ struct sata_completion_resp *psataPayload;
+ struct task_status_struct *ts;
+ struct ata_task_resp *resp ;
+ u32 *sata_resp;
+ struct pm8001_device *pm8001_dev;
+ unsigned long flags;
+
+ psataPayload = (struct sata_completion_resp *)(piomb + 4);
+ status = le32_to_cpu(psataPayload->status);
+ tag = le32_to_cpu(psataPayload->tag);
+
+ if (!tag) {
+ PM8001_FAIL_DBG(pm8001_ha,
+ pm8001_printk("tag null\n"));
+ return;
+ }
+ ccb = &pm8001_ha->ccb_info[tag];
+ param = le32_to_cpu(psataPayload->param);
+ if (ccb) {
+ t = ccb->task;
+ pm8001_dev = ccb->device;
+ } else {
+ PM8001_FAIL_DBG(pm8001_ha,
+ pm8001_printk("ccb null\n"));
+ return;
+ }
+
+ if (t) {
+ if (t->dev && (t->dev->lldd_dev))
+ pm8001_dev = t->dev->lldd_dev;
+ } else {
+ PM8001_FAIL_DBG(pm8001_ha,
+ pm8001_printk("task null\n"));
+ return;
+ }
+
+ if ((pm8001_dev && !(pm8001_dev->id & NCQ_READ_LOG_FLAG))
+ && unlikely(!t || !t->lldd_task || !t->dev)) {
+ PM8001_FAIL_DBG(pm8001_ha,
+ pm8001_printk("task or dev null\n"));
+ return;
+ }
+
+ ts = &t->task_status;
+ if (!ts) {
+ PM8001_FAIL_DBG(pm8001_ha,
+ pm8001_printk("ts null\n"));
+ return;
+ }
+ /* Print sas address of IO failed device */
+ if ((status != IO_SUCCESS) && (status != IO_OVERFLOW) &&
+ (status != IO_UNDERFLOW)) {
+ if (!((t->dev->parent) &&
+ (DEV_IS_EXPANDER(t->dev->parent->dev_type)))) {
+ for (i = 0 , j = 4; j <= 7 && i <= 3; i++ , j++)
+ sata_addr_low[i] = pm8001_ha->sas_addr[j];
+ for (i = 0 , j = 0; j <= 3 && i <= 3; i++ , j++)
+ sata_addr_hi[i] = pm8001_ha->sas_addr[j];
+ memcpy(&temp_sata_addr_low, sata_addr_low,
+ sizeof(sata_addr_low));
+ memcpy(&temp_sata_addr_hi, sata_addr_hi,
+ sizeof(sata_addr_hi));
+ temp_sata_addr_hi = (((temp_sata_addr_hi >> 24) & 0xff)
+ |((temp_sata_addr_hi << 8) &
+ 0xff0000) |
+ ((temp_sata_addr_hi >> 8)
+ & 0xff00) |
+ ((temp_sata_addr_hi << 24) &
+ 0xff000000));
+ temp_sata_addr_low = ((((temp_sata_addr_low >> 24)
+ & 0xff) |
+ ((temp_sata_addr_low << 8)
+ & 0xff0000) |
+ ((temp_sata_addr_low >> 8)
+ & 0xff00) |
+ ((temp_sata_addr_low << 24)
+ & 0xff000000)) +
+ pm8001_dev->attached_phy +
+ 0x10);
+ PM8001_FAIL_DBG(pm8001_ha,
+ pm8001_printk("SAS Address of IO Failure Drive:"
+ "%08x%08x", temp_sata_addr_hi,
+ temp_sata_addr_low));
+ } else {
+ PM8001_FAIL_DBG(pm8001_ha,
+ pm8001_printk("SAS Address of IO Failure Drive:"
+ "%016llx", SAS_ADDR(t->dev->sas_addr)));
+ }
+ }
+ switch (status) {
+ case IO_SUCCESS:
+ PM8001_IO_DBG(pm8001_ha, pm8001_printk("IO_SUCCESS\n"));
+ if (param == 0) {
+ ts->resp = SAS_TASK_COMPLETE;
+ ts->stat = SAM_STAT_GOOD;
+ /* check if response is for SEND READ LOG */
+ if (pm8001_dev &&
+ (pm8001_dev->id & NCQ_READ_LOG_FLAG)) {
+ /* set new bit for abort_all */
+ pm8001_dev->id |= NCQ_ABORT_ALL_FLAG;
+ /* clear bit for read log */
+ pm8001_dev->id = pm8001_dev->id & 0x7FFFFFFF;
+ pm8001_send_abort_all(pm8001_ha, pm8001_dev);
+ /* Free the tag */
+ pm8001_tag_free(pm8001_ha, tag);
+ sas_free_task(t);
+ return;
+ }
+ } else {
+ u8 len;
+ ts->resp = SAS_TASK_COMPLETE;
+ ts->stat = SAS_PROTO_RESPONSE;
+ ts->residual = param;
+ PM8001_IO_DBG(pm8001_ha,
+ pm8001_printk("SAS_PROTO_RESPONSE len = %d\n",
+ param));
+ sata_resp = &psataPayload->sata_resp[0];
+ resp = (struct ata_task_resp *)ts->buf;
+ if (t->ata_task.dma_xfer == 0 &&
+ t->data_dir == PCI_DMA_FROMDEVICE) {
+ len = sizeof(struct pio_setup_fis);
+ PM8001_IO_DBG(pm8001_ha,
+ pm8001_printk("PIO read len = %d\n", len));
+ } else if (t->ata_task.use_ncq) {
+ len = sizeof(struct set_dev_bits_fis);
+ PM8001_IO_DBG(pm8001_ha,
+ pm8001_printk("FPDMA len = %d\n", len));
+ } else {
+ len = sizeof(struct dev_to_host_fis);
+ PM8001_IO_DBG(pm8001_ha,
+ pm8001_printk("other len = %d\n", len));
+ }
+ if (SAS_STATUS_BUF_SIZE >= sizeof(*resp)) {
+ resp->frame_len = len;
+ memcpy(&resp->ending_fis[0], sata_resp, len);
+ ts->buf_valid_size = sizeof(*resp);
+ } else
+ PM8001_IO_DBG(pm8001_ha,
+ pm8001_printk("response to large\n"));
+ }
+ if (pm8001_dev)
+ pm8001_dev->running_req--;
+ break;
+ case IO_ABORTED:
+ PM8001_IO_DBG(pm8001_ha,
+ pm8001_printk("IO_ABORTED IOMB Tag\n"));
+ ts->resp = SAS_TASK_COMPLETE;
+ ts->stat = SAS_ABORTED_TASK;
+ if (pm8001_dev)
+ pm8001_dev->running_req--;
+ break;
+ /* following cases are to do cases */
+ case IO_UNDERFLOW:
+ /* SATA Completion with error */
+ PM8001_IO_DBG(pm8001_ha,
+ pm8001_printk("IO_UNDERFLOW param = %d\n", param));
+ ts->resp = SAS_TASK_COMPLETE;
+ ts->stat = SAS_DATA_UNDERRUN;
+ ts->residual = param;
+ if (pm8001_dev)
+ pm8001_dev->running_req--;
+ break;
+ case IO_NO_DEVICE:
+ PM8001_IO_DBG(pm8001_ha,
+ pm8001_printk("IO_NO_DEVICE\n"));
+ ts->resp = SAS_TASK_UNDELIVERED;
+ ts->stat = SAS_PHY_DOWN;
+ break;
+ case IO_XFER_ERROR_BREAK:
+ PM8001_IO_DBG(pm8001_ha,
+ pm8001_printk("IO_XFER_ERROR_BREAK\n"));
+ ts->resp = SAS_TASK_COMPLETE;
+ ts->stat = SAS_INTERRUPTED;
+ break;
+ case IO_XFER_ERROR_PHY_NOT_READY:
+ PM8001_IO_DBG(pm8001_ha,
+ pm8001_printk("IO_XFER_ERROR_PHY_NOT_READY\n"));
+ ts->resp = SAS_TASK_COMPLETE;
+ ts->stat = SAS_OPEN_REJECT;
+ ts->open_rej_reason = SAS_OREJ_RSVD_RETRY;
+ break;
+ case IO_OPEN_CNX_ERROR_PROTOCOL_NOT_SUPPORTED:
+ PM8001_IO_DBG(pm8001_ha,
+ pm8001_printk("IO_OPEN_CNX_ERROR_PROTOCOL_NOT"
+ "_SUPPORTED\n"));
+ ts->resp = SAS_TASK_COMPLETE;
+ ts->stat = SAS_OPEN_REJECT;
+ ts->open_rej_reason = SAS_OREJ_EPROTO;
+ break;
+ case IO_OPEN_CNX_ERROR_ZONE_VIOLATION:
+ PM8001_IO_DBG(pm8001_ha,
+ pm8001_printk("IO_OPEN_CNX_ERROR_ZONE_VIOLATION\n"));
+ ts->resp = SAS_TASK_COMPLETE;
+ ts->stat = SAS_OPEN_REJECT;
+ ts->open_rej_reason = SAS_OREJ_UNKNOWN;
+ break;
+ case IO_OPEN_CNX_ERROR_BREAK:
+ PM8001_IO_DBG(pm8001_ha,
+ pm8001_printk("IO_OPEN_CNX_ERROR_BREAK\n"));
+ ts->resp = SAS_TASK_COMPLETE;
+ ts->stat = SAS_OPEN_REJECT;
+ ts->open_rej_reason = SAS_OREJ_RSVD_CONT0;
+ break;
+ case IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS:
+ PM8001_IO_DBG(pm8001_ha,
+ pm8001_printk("IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS\n"));
+ ts->resp = SAS_TASK_COMPLETE;
+ ts->stat = SAS_DEV_NO_RESPONSE;
+ if (!t->uldd_task) {
+ pm8001_handle_event(pm8001_ha,
+ pm8001_dev,
+ IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS);
+ ts->resp = SAS_TASK_UNDELIVERED;
+ ts->stat = SAS_QUEUE_FULL;
+ pm8001_ccb_task_free_done(pm8001_ha, t, ccb, tag);
+ return;
+ }
+ break;
+ case IO_OPEN_CNX_ERROR_BAD_DESTINATION:
+ PM8001_IO_DBG(pm8001_ha,
+ pm8001_printk("IO_OPEN_CNX_ERROR_BAD_DESTINATION\n"));
+ ts->resp = SAS_TASK_UNDELIVERED;
+ ts->stat = SAS_OPEN_REJECT;
+ ts->open_rej_reason = SAS_OREJ_BAD_DEST;
+ if (!t->uldd_task) {
+ pm8001_handle_event(pm8001_ha,
+ pm8001_dev,
+ IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS);
+ ts->resp = SAS_TASK_UNDELIVERED;
+ ts->stat = SAS_QUEUE_FULL;
+ pm8001_ccb_task_free_done(pm8001_ha, t, ccb, tag);
+ return;
+ }
+ break;
+ case IO_OPEN_CNX_ERROR_CONNECTION_RATE_NOT_SUPPORTED:
+ PM8001_IO_DBG(pm8001_ha,
+ pm8001_printk("IO_OPEN_CNX_ERROR_CONNECTION_RATE_"
+ "NOT_SUPPORTED\n"));
+ ts->resp = SAS_TASK_COMPLETE;
+ ts->stat = SAS_OPEN_REJECT;
+ ts->open_rej_reason = SAS_OREJ_CONN_RATE;
+ break;
+ case IO_OPEN_CNX_ERROR_STP_RESOURCES_BUSY:
+ PM8001_IO_DBG(pm8001_ha,
+ pm8001_printk("IO_OPEN_CNX_ERROR_STP_RESOURCES"
+ "_BUSY\n"));
+ ts->resp = SAS_TASK_COMPLETE;
+ ts->stat = SAS_DEV_NO_RESPONSE;
+ if (!t->uldd_task) {
+ pm8001_handle_event(pm8001_ha,
+ pm8001_dev,
+ IO_OPEN_CNX_ERROR_STP_RESOURCES_BUSY);
+ ts->resp = SAS_TASK_UNDELIVERED;
+ ts->stat = SAS_QUEUE_FULL;
+ pm8001_ccb_task_free_done(pm8001_ha, t, ccb, tag);
+ return;
+ }
+ break;
+ case IO_OPEN_CNX_ERROR_WRONG_DESTINATION:
+ PM8001_IO_DBG(pm8001_ha,
+ pm8001_printk("IO_OPEN_CNX_ERROR_WRONG_DESTINATION\n"));
+ ts->resp = SAS_TASK_COMPLETE;
+ ts->stat = SAS_OPEN_REJECT;
+ ts->open_rej_reason = SAS_OREJ_WRONG_DEST;
+ break;
+ case IO_XFER_ERROR_NAK_RECEIVED:
+ PM8001_IO_DBG(pm8001_ha,
+ pm8001_printk("IO_XFER_ERROR_NAK_RECEIVED\n"));
+ ts->resp = SAS_TASK_COMPLETE;
+ ts->stat = SAS_NAK_R_ERR;
+ break;
+ case IO_XFER_ERROR_ACK_NAK_TIMEOUT:
+ PM8001_IO_DBG(pm8001_ha,
+ pm8001_printk("IO_XFER_ERROR_ACK_NAK_TIMEOUT\n"));
+ ts->resp = SAS_TASK_COMPLETE;
+ ts->stat = SAS_NAK_R_ERR;
+ break;
+ case IO_XFER_ERROR_DMA:
+ PM8001_IO_DBG(pm8001_ha,
+ pm8001_printk("IO_XFER_ERROR_DMA\n"));
+ ts->resp = SAS_TASK_COMPLETE;
+ ts->stat = SAS_ABORTED_TASK;
+ break;
+ case IO_XFER_ERROR_SATA_LINK_TIMEOUT:
+ PM8001_IO_DBG(pm8001_ha,
+ pm8001_printk("IO_XFER_ERROR_SATA_LINK_TIMEOUT\n"));
+ ts->resp = SAS_TASK_UNDELIVERED;
+ ts->stat = SAS_DEV_NO_RESPONSE;
+ break;
+ case IO_XFER_ERROR_REJECTED_NCQ_MODE:
+ PM8001_IO_DBG(pm8001_ha,
+ pm8001_printk("IO_XFER_ERROR_REJECTED_NCQ_MODE\n"));
+ ts->resp = SAS_TASK_COMPLETE;
+ ts->stat = SAS_DATA_UNDERRUN;
+ break;
+ case IO_XFER_OPEN_RETRY_TIMEOUT:
+ PM8001_IO_DBG(pm8001_ha,
+ pm8001_printk("IO_XFER_OPEN_RETRY_TIMEOUT\n"));
+ ts->resp = SAS_TASK_COMPLETE;
+ ts->stat = SAS_OPEN_TO;
+ break;
+ case IO_PORT_IN_RESET:
+ PM8001_IO_DBG(pm8001_ha,
+ pm8001_printk("IO_PORT_IN_RESET\n"));
+ ts->resp = SAS_TASK_COMPLETE;
+ ts->stat = SAS_DEV_NO_RESPONSE;
+ break;
+ case IO_DS_NON_OPERATIONAL:
+ PM8001_IO_DBG(pm8001_ha,
+ pm8001_printk("IO_DS_NON_OPERATIONAL\n"));
+ ts->resp = SAS_TASK_COMPLETE;
+ ts->stat = SAS_DEV_NO_RESPONSE;
+ if (!t->uldd_task) {
+ pm8001_handle_event(pm8001_ha, pm8001_dev,
+ IO_DS_NON_OPERATIONAL);
+ ts->resp = SAS_TASK_UNDELIVERED;
+ ts->stat = SAS_QUEUE_FULL;
+ pm8001_ccb_task_free_done(pm8001_ha, t, ccb, tag);
+ return;
+ }
+ break;
+ case IO_DS_IN_RECOVERY:
+ PM8001_IO_DBG(pm8001_ha,
+ pm8001_printk(" IO_DS_IN_RECOVERY\n"));
+ ts->resp = SAS_TASK_COMPLETE;
+ ts->stat = SAS_DEV_NO_RESPONSE;
+ break;
+ case IO_DS_IN_ERROR:
+ PM8001_IO_DBG(pm8001_ha,
+ pm8001_printk("IO_DS_IN_ERROR\n"));
+ ts->resp = SAS_TASK_COMPLETE;
+ ts->stat = SAS_DEV_NO_RESPONSE;
+ if (!t->uldd_task) {
+ pm8001_handle_event(pm8001_ha, pm8001_dev,
+ IO_DS_IN_ERROR);
+ ts->resp = SAS_TASK_UNDELIVERED;
+ ts->stat = SAS_QUEUE_FULL;
+ pm8001_ccb_task_free_done(pm8001_ha, t, ccb, tag);
+ return;
+ }
+ break;
+ case IO_OPEN_CNX_ERROR_HW_RESOURCE_BUSY:
+ PM8001_IO_DBG(pm8001_ha,
+ pm8001_printk("IO_OPEN_CNX_ERROR_HW_RESOURCE_BUSY\n"));
+ ts->resp = SAS_TASK_COMPLETE;
+ ts->stat = SAS_OPEN_REJECT;
+ ts->open_rej_reason = SAS_OREJ_RSVD_RETRY;
+ default:
+ PM8001_IO_DBG(pm8001_ha,
+ pm8001_printk("Unknown status 0x%x\n", status));
+ /* not allowed case. Therefore, return failed status */
+ ts->resp = SAS_TASK_COMPLETE;
+ ts->stat = SAS_DEV_NO_RESPONSE;
+ break;
+ }
+ spin_lock_irqsave(&t->task_state_lock, flags);
+ t->task_state_flags &= ~SAS_TASK_STATE_PENDING;
+ t->task_state_flags &= ~SAS_TASK_AT_INITIATOR;
+ t->task_state_flags |= SAS_TASK_STATE_DONE;
+ if (unlikely((t->task_state_flags & SAS_TASK_STATE_ABORTED))) {
+ spin_unlock_irqrestore(&t->task_state_lock, flags);
+ PM8001_FAIL_DBG(pm8001_ha,
+ pm8001_printk("task 0x%p done with io_status 0x%x"
+ " resp 0x%x stat 0x%x but aborted by upper layer!\n",
+ t, status, ts->resp, ts->stat));
+ pm8001_ccb_task_free(pm8001_ha, t, ccb, tag);
+ } else {
+ spin_unlock_irqrestore(&t->task_state_lock, flags);
+ pm8001_ccb_task_free_done(pm8001_ha, t, ccb, tag);
+ }
+}
+
+/*See the comments for mpi_ssp_completion */
+static void mpi_sata_event(struct pm8001_hba_info *pm8001_ha , void *piomb)
+{
+ struct sas_task *t;
+ struct task_status_struct *ts;
+ struct pm8001_ccb_info *ccb;
+ struct pm8001_device *pm8001_dev;
+ struct sata_event_resp *psataPayload =
+ (struct sata_event_resp *)(piomb + 4);
+ u32 event = le32_to_cpu(psataPayload->event);
+ u32 tag = le32_to_cpu(psataPayload->tag);
+ u32 port_id = le32_to_cpu(psataPayload->port_id);
+ u32 dev_id = le32_to_cpu(psataPayload->device_id);
+ unsigned long flags;
+
+ ccb = &pm8001_ha->ccb_info[tag];
+
+ if (ccb) {
+ t = ccb->task;
+ pm8001_dev = ccb->device;
+ } else {
+ PM8001_FAIL_DBG(pm8001_ha,
+ pm8001_printk("No CCB !!!. returning\n"));
+ }
+ if (event)
+ PM8001_FAIL_DBG(pm8001_ha,
+ pm8001_printk("SATA EVENT 0x%x\n", event));
+
+ /* Check if this is NCQ error */
+ if (event == IO_XFER_ERROR_ABORTED_NCQ_MODE) {
+ /* find device using device id */
+ pm8001_dev = pm8001_find_dev(pm8001_ha, dev_id);
+ /* send read log extension */
+ if (pm8001_dev)
+ pm8001_send_read_log(pm8001_ha, pm8001_dev);
+ return;
+ }
+
+ ccb = &pm8001_ha->ccb_info[tag];
+ t = ccb->task;
+ pm8001_dev = ccb->device;
+ if (event)
+ PM8001_FAIL_DBG(pm8001_ha,
+ pm8001_printk("sata IO status 0x%x\n", event));
+ if (unlikely(!t || !t->lldd_task || !t->dev))
+ return;
+ ts = &t->task_status;
+ PM8001_IO_DBG(pm8001_ha, pm8001_printk(
+ "port_id:0x%x, device_id:0x%x, tag:0x%x, event:0x%x\n",
+ port_id, dev_id, tag, event));
+ switch (event) {
+ case IO_OVERFLOW:
+ PM8001_IO_DBG(pm8001_ha, pm8001_printk("IO_UNDERFLOW\n"));
+ ts->resp = SAS_TASK_COMPLETE;
+ ts->stat = SAS_DATA_OVERRUN;
+ ts->residual = 0;
+ if (pm8001_dev)
+ pm8001_dev->running_req--;
+ break;
+ case IO_XFER_ERROR_BREAK:
+ PM8001_IO_DBG(pm8001_ha,
+ pm8001_printk("IO_XFER_ERROR_BREAK\n"));
+ ts->resp = SAS_TASK_COMPLETE;
+ ts->stat = SAS_INTERRUPTED;
+ break;
+ case IO_XFER_ERROR_PHY_NOT_READY:
+ PM8001_IO_DBG(pm8001_ha,
+ pm8001_printk("IO_XFER_ERROR_PHY_NOT_READY\n"));
+ ts->resp = SAS_TASK_COMPLETE;
+ ts->stat = SAS_OPEN_REJECT;
+ ts->open_rej_reason = SAS_OREJ_RSVD_RETRY;
+ break;
+ case IO_OPEN_CNX_ERROR_PROTOCOL_NOT_SUPPORTED:
+ PM8001_IO_DBG(pm8001_ha,
+ pm8001_printk("IO_OPEN_CNX_ERROR_PROTOCOL_NOT"
+ "_SUPPORTED\n"));
+ ts->resp = SAS_TASK_COMPLETE;
+ ts->stat = SAS_OPEN_REJECT;
+ ts->open_rej_reason = SAS_OREJ_EPROTO;
+ break;
+ case IO_OPEN_CNX_ERROR_ZONE_VIOLATION:
+ PM8001_IO_DBG(pm8001_ha,
+ pm8001_printk("IO_OPEN_CNX_ERROR_ZONE_VIOLATION\n"));
+ ts->resp = SAS_TASK_COMPLETE;
+ ts->stat = SAS_OPEN_REJECT;
+ ts->open_rej_reason = SAS_OREJ_UNKNOWN;
+ break;
+ case IO_OPEN_CNX_ERROR_BREAK:
+ PM8001_IO_DBG(pm8001_ha,
+ pm8001_printk("IO_OPEN_CNX_ERROR_BREAK\n"));
+ ts->resp = SAS_TASK_COMPLETE;
+ ts->stat = SAS_OPEN_REJECT;
+ ts->open_rej_reason = SAS_OREJ_RSVD_CONT0;
+ break;
+ case IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS:
+ PM8001_IO_DBG(pm8001_ha,
+ pm8001_printk("IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS\n"));
+ ts->resp = SAS_TASK_UNDELIVERED;
+ ts->stat = SAS_DEV_NO_RESPONSE;
+ if (!t->uldd_task) {
+ pm8001_handle_event(pm8001_ha,
+ pm8001_dev,
+ IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS);
+ ts->resp = SAS_TASK_COMPLETE;
+ ts->stat = SAS_QUEUE_FULL;
+ pm8001_ccb_task_free_done(pm8001_ha, t, ccb, tag);
+ return;
+ }
+ break;
+ case IO_OPEN_CNX_ERROR_BAD_DESTINATION:
+ PM8001_IO_DBG(pm8001_ha,
+ pm8001_printk("IO_OPEN_CNX_ERROR_BAD_DESTINATION\n"));
+ ts->resp = SAS_TASK_UNDELIVERED;
+ ts->stat = SAS_OPEN_REJECT;
+ ts->open_rej_reason = SAS_OREJ_BAD_DEST;
+ break;
+ case IO_OPEN_CNX_ERROR_CONNECTION_RATE_NOT_SUPPORTED:
+ PM8001_IO_DBG(pm8001_ha,
+ pm8001_printk("IO_OPEN_CNX_ERROR_CONNECTION_RATE_"
+ "NOT_SUPPORTED\n"));
+ ts->resp = SAS_TASK_COMPLETE;
+ ts->stat = SAS_OPEN_REJECT;
+ ts->open_rej_reason = SAS_OREJ_CONN_RATE;
+ break;
+ case IO_OPEN_CNX_ERROR_WRONG_DESTINATION:
+ PM8001_IO_DBG(pm8001_ha,
+ pm8001_printk("IO_OPEN_CNX_ERROR_WRONG_DESTINATION\n"));
+ ts->resp = SAS_TASK_COMPLETE;
+ ts->stat = SAS_OPEN_REJECT;
+ ts->open_rej_reason = SAS_OREJ_WRONG_DEST;
+ break;
+ case IO_XFER_ERROR_NAK_RECEIVED:
+ PM8001_IO_DBG(pm8001_ha,
+ pm8001_printk("IO_XFER_ERROR_NAK_RECEIVED\n"));
+ ts->resp = SAS_TASK_COMPLETE;
+ ts->stat = SAS_NAK_R_ERR;
+ break;
+ case IO_XFER_ERROR_PEER_ABORTED:
+ PM8001_IO_DBG(pm8001_ha,
+ pm8001_printk("IO_XFER_ERROR_PEER_ABORTED\n"));
+ ts->resp = SAS_TASK_COMPLETE;
+ ts->stat = SAS_NAK_R_ERR;
+ break;
+ case IO_XFER_ERROR_REJECTED_NCQ_MODE:
+ PM8001_IO_DBG(pm8001_ha,
+ pm8001_printk("IO_XFER_ERROR_REJECTED_NCQ_MODE\n"));
+ ts->resp = SAS_TASK_COMPLETE;
+ ts->stat = SAS_DATA_UNDERRUN;
+ break;
+ case IO_XFER_OPEN_RETRY_TIMEOUT:
+ PM8001_IO_DBG(pm8001_ha,
+ pm8001_printk("IO_XFER_OPEN_RETRY_TIMEOUT\n"));
+ ts->resp = SAS_TASK_COMPLETE;
+ ts->stat = SAS_OPEN_TO;
+ break;
+ case IO_XFER_ERROR_UNEXPECTED_PHASE:
+ PM8001_IO_DBG(pm8001_ha,
+ pm8001_printk("IO_XFER_ERROR_UNEXPECTED_PHASE\n"));
+ ts->resp = SAS_TASK_COMPLETE;
+ ts->stat = SAS_OPEN_TO;
+ break;
+ case IO_XFER_ERROR_XFER_RDY_OVERRUN:
+ PM8001_IO_DBG(pm8001_ha,
+ pm8001_printk("IO_XFER_ERROR_XFER_RDY_OVERRUN\n"));
+ ts->resp = SAS_TASK_COMPLETE;
+ ts->stat = SAS_OPEN_TO;
+ break;
+ case IO_XFER_ERROR_XFER_RDY_NOT_EXPECTED:
+ PM8001_IO_DBG(pm8001_ha,
+ pm8001_printk("IO_XFER_ERROR_XFER_RDY_NOT_EXPECTED\n"));
+ ts->resp = SAS_TASK_COMPLETE;
+ ts->stat = SAS_OPEN_TO;
+ break;
+ case IO_XFER_ERROR_OFFSET_MISMATCH:
+ PM8001_IO_DBG(pm8001_ha,
+ pm8001_printk("IO_XFER_ERROR_OFFSET_MISMATCH\n"));
+ ts->resp = SAS_TASK_COMPLETE;
+ ts->stat = SAS_OPEN_TO;
+ break;
+ case IO_XFER_ERROR_XFER_ZERO_DATA_LEN:
+ PM8001_IO_DBG(pm8001_ha,
+ pm8001_printk("IO_XFER_ERROR_XFER_ZERO_DATA_LEN\n"));
+ ts->resp = SAS_TASK_COMPLETE;
+ ts->stat = SAS_OPEN_TO;
+ break;
+ case IO_XFER_CMD_FRAME_ISSUED:
+ PM8001_IO_DBG(pm8001_ha,
+ pm8001_printk("IO_XFER_CMD_FRAME_ISSUED\n"));
+ break;
+ case IO_XFER_PIO_SETUP_ERROR:
+ PM8001_IO_DBG(pm8001_ha,
+ pm8001_printk("IO_XFER_PIO_SETUP_ERROR\n"));
+ ts->resp = SAS_TASK_COMPLETE;
+ ts->stat = SAS_OPEN_TO;
+ break;
+ default:
+ PM8001_IO_DBG(pm8001_ha,
+ pm8001_printk("Unknown status 0x%x\n", event));
+ /* not allowed case. Therefore, return failed status */
+ ts->resp = SAS_TASK_COMPLETE;
+ ts->stat = SAS_OPEN_TO;
+ break;
+ }
+ spin_lock_irqsave(&t->task_state_lock, flags);
+ t->task_state_flags &= ~SAS_TASK_STATE_PENDING;
+ t->task_state_flags &= ~SAS_TASK_AT_INITIATOR;
+ t->task_state_flags |= SAS_TASK_STATE_DONE;
+ if (unlikely((t->task_state_flags & SAS_TASK_STATE_ABORTED))) {
+ spin_unlock_irqrestore(&t->task_state_lock, flags);
+ PM8001_FAIL_DBG(pm8001_ha,
+ pm8001_printk("task 0x%p done with io_status 0x%x"
+ " resp 0x%x stat 0x%x but aborted by upper layer!\n",
+ t, event, ts->resp, ts->stat));
+ pm8001_ccb_task_free(pm8001_ha, t, ccb, tag);
+ } else {
+ spin_unlock_irqrestore(&t->task_state_lock, flags);
+ pm8001_ccb_task_free_done(pm8001_ha, t, ccb, tag);
+ }
+}
+
+/*See the comments for mpi_ssp_completion */
+static void
+mpi_smp_completion(struct pm8001_hba_info *pm8001_ha, void *piomb)
+{
+ u32 param;
+ struct sas_task *t;
+ struct pm8001_ccb_info *ccb;
+ unsigned long flags;
+ u32 status;
+ u32 tag;
+ struct smp_completion_resp *psmpPayload;
+ struct task_status_struct *ts;
+ struct pm8001_device *pm8001_dev;
+
+ psmpPayload = (struct smp_completion_resp *)(piomb + 4);
+ status = le32_to_cpu(psmpPayload->status);
+ tag = le32_to_cpu(psmpPayload->tag);
+
+ ccb = &pm8001_ha->ccb_info[tag];
+ param = le32_to_cpu(psmpPayload->param);
+ t = ccb->task;
+ ts = &t->task_status;
+ pm8001_dev = ccb->device;
+ if (status)
+ PM8001_FAIL_DBG(pm8001_ha,
+ pm8001_printk("smp IO status 0x%x\n", status));
+ if (unlikely(!t || !t->lldd_task || !t->dev))
+ return;
+
+ switch (status) {
+ case IO_SUCCESS:
+ PM8001_IO_DBG(pm8001_ha, pm8001_printk("IO_SUCCESS\n"));
+ ts->resp = SAS_TASK_COMPLETE;
+ ts->stat = SAM_STAT_GOOD;
+ if (pm8001_dev)
+ pm8001_dev->running_req--;
+ break;
+ case IO_ABORTED:
+ PM8001_IO_DBG(pm8001_ha,
+ pm8001_printk("IO_ABORTED IOMB\n"));
+ ts->resp = SAS_TASK_COMPLETE;
+ ts->stat = SAS_ABORTED_TASK;
+ if (pm8001_dev)
+ pm8001_dev->running_req--;
+ break;
+ case IO_OVERFLOW:
+ PM8001_IO_DBG(pm8001_ha, pm8001_printk("IO_UNDERFLOW\n"));
+ ts->resp = SAS_TASK_COMPLETE;
+ ts->stat = SAS_DATA_OVERRUN;
+ ts->residual = 0;
+ if (pm8001_dev)
+ pm8001_dev->running_req--;
+ break;
+ case IO_NO_DEVICE:
+ PM8001_IO_DBG(pm8001_ha, pm8001_printk("IO_NO_DEVICE\n"));
+ ts->resp = SAS_TASK_COMPLETE;
+ ts->stat = SAS_PHY_DOWN;
+ break;
+ case IO_ERROR_HW_TIMEOUT:
+ PM8001_IO_DBG(pm8001_ha,
+ pm8001_printk("IO_ERROR_HW_TIMEOUT\n"));
+ ts->resp = SAS_TASK_COMPLETE;
+ ts->stat = SAM_STAT_BUSY;
+ break;
+ case IO_XFER_ERROR_BREAK:
+ PM8001_IO_DBG(pm8001_ha,
+ pm8001_printk("IO_XFER_ERROR_BREAK\n"));
+ ts->resp = SAS_TASK_COMPLETE;
+ ts->stat = SAM_STAT_BUSY;
+ break;
+ case IO_XFER_ERROR_PHY_NOT_READY:
+ PM8001_IO_DBG(pm8001_ha,
+ pm8001_printk("IO_XFER_ERROR_PHY_NOT_READY\n"));
+ ts->resp = SAS_TASK_COMPLETE;
+ ts->stat = SAM_STAT_BUSY;
+ break;
+ case IO_OPEN_CNX_ERROR_PROTOCOL_NOT_SUPPORTED:
+ PM8001_IO_DBG(pm8001_ha,
+ pm8001_printk("IO_OPEN_CNX_ERROR_PROTOCOL_NOT_SUPPORTED\n"));
+ ts->resp = SAS_TASK_COMPLETE;
+ ts->stat = SAS_OPEN_REJECT;
+ ts->open_rej_reason = SAS_OREJ_UNKNOWN;
+ break;
+ case IO_OPEN_CNX_ERROR_ZONE_VIOLATION:
+ PM8001_IO_DBG(pm8001_ha,
+ pm8001_printk("IO_OPEN_CNX_ERROR_ZONE_VIOLATION\n"));
+ ts->resp = SAS_TASK_COMPLETE;
+ ts->stat = SAS_OPEN_REJECT;
+ ts->open_rej_reason = SAS_OREJ_UNKNOWN;
+ break;
+ case IO_OPEN_CNX_ERROR_BREAK:
+ PM8001_IO_DBG(pm8001_ha,
+ pm8001_printk("IO_OPEN_CNX_ERROR_BREAK\n"));
+ ts->resp = SAS_TASK_COMPLETE;
+ ts->stat = SAS_OPEN_REJECT;
+ ts->open_rej_reason = SAS_OREJ_RSVD_CONT0;
+ break;
+ case IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS:
+ PM8001_IO_DBG(pm8001_ha,
+ pm8001_printk("IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS\n"));
+ ts->resp = SAS_TASK_COMPLETE;
+ ts->stat = SAS_OPEN_REJECT;
+ ts->open_rej_reason = SAS_OREJ_UNKNOWN;
+ pm8001_handle_event(pm8001_ha,
+ pm8001_dev,
+ IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS);
+ break;
+ case IO_OPEN_CNX_ERROR_BAD_DESTINATION:
+ PM8001_IO_DBG(pm8001_ha,
+ pm8001_printk("IO_OPEN_CNX_ERROR_BAD_DESTINATION\n"));
+ ts->resp = SAS_TASK_COMPLETE;
+ ts->stat = SAS_OPEN_REJECT;
+ ts->open_rej_reason = SAS_OREJ_BAD_DEST;
+ break;
+ case IO_OPEN_CNX_ERROR_CONNECTION_RATE_NOT_SUPPORTED:
+ PM8001_IO_DBG(pm8001_ha,
+ pm8001_printk("IO_OPEN_CNX_ERROR_CONNECTION_RATE_"
+ "NOT_SUPPORTED\n"));
+ ts->resp = SAS_TASK_COMPLETE;
+ ts->stat = SAS_OPEN_REJECT;
+ ts->open_rej_reason = SAS_OREJ_CONN_RATE;
+ break;
+ case IO_OPEN_CNX_ERROR_WRONG_DESTINATION:
+ PM8001_IO_DBG(pm8001_ha,
+ pm8001_printk("IO_OPEN_CNX_ERROR_WRONG_DESTINATION\n"));
+ ts->resp = SAS_TASK_COMPLETE;
+ ts->stat = SAS_OPEN_REJECT;
+ ts->open_rej_reason = SAS_OREJ_WRONG_DEST;
+ break;
+ case IO_XFER_ERROR_RX_FRAME:
+ PM8001_IO_DBG(pm8001_ha,
+ pm8001_printk("IO_XFER_ERROR_RX_FRAME\n"));
+ ts->resp = SAS_TASK_COMPLETE;
+ ts->stat = SAS_DEV_NO_RESPONSE;
+ break;
+ case IO_XFER_OPEN_RETRY_TIMEOUT:
+ PM8001_IO_DBG(pm8001_ha,
+ pm8001_printk("IO_XFER_OPEN_RETRY_TIMEOUT\n"));
+ ts->resp = SAS_TASK_COMPLETE;
+ ts->stat = SAS_OPEN_REJECT;
+ ts->open_rej_reason = SAS_OREJ_RSVD_RETRY;
+ break;
+ case IO_ERROR_INTERNAL_SMP_RESOURCE:
+ PM8001_IO_DBG(pm8001_ha,
+ pm8001_printk("IO_ERROR_INTERNAL_SMP_RESOURCE\n"));
+ ts->resp = SAS_TASK_COMPLETE;
+ ts->stat = SAS_QUEUE_FULL;
+ break;
+ case IO_PORT_IN_RESET:
+ PM8001_IO_DBG(pm8001_ha,
+ pm8001_printk("IO_PORT_IN_RESET\n"));
+ ts->resp = SAS_TASK_COMPLETE;
+ ts->stat = SAS_OPEN_REJECT;
+ ts->open_rej_reason = SAS_OREJ_RSVD_RETRY;
+ break;
+ case IO_DS_NON_OPERATIONAL:
+ PM8001_IO_DBG(pm8001_ha,
+ pm8001_printk("IO_DS_NON_OPERATIONAL\n"));
+ ts->resp = SAS_TASK_COMPLETE;
+ ts->stat = SAS_DEV_NO_RESPONSE;
+ break;
+ case IO_DS_IN_RECOVERY:
+ PM8001_IO_DBG(pm8001_ha,
+ pm8001_printk("IO_DS_IN_RECOVERY\n"));
+ ts->resp = SAS_TASK_COMPLETE;
+ ts->stat = SAS_OPEN_REJECT;
+ ts->open_rej_reason = SAS_OREJ_RSVD_RETRY;
+ break;
+ case IO_OPEN_CNX_ERROR_HW_RESOURCE_BUSY:
+ PM8001_IO_DBG(pm8001_ha,
+ pm8001_printk("IO_OPEN_CNX_ERROR_HW_RESOURCE_BUSY\n"));
+ ts->resp = SAS_TASK_COMPLETE;
+ ts->stat = SAS_OPEN_REJECT;
+ ts->open_rej_reason = SAS_OREJ_RSVD_RETRY;
+ break;
+ default:
+ PM8001_IO_DBG(pm8001_ha,
+ pm8001_printk("Unknown status 0x%x\n", status));
+ ts->resp = SAS_TASK_COMPLETE;
+ ts->stat = SAS_DEV_NO_RESPONSE;
+ /* not allowed case. Therefore, return failed status */
+ break;
+ }
+ spin_lock_irqsave(&t->task_state_lock, flags);
+ t->task_state_flags &= ~SAS_TASK_STATE_PENDING;
+ t->task_state_flags &= ~SAS_TASK_AT_INITIATOR;
+ t->task_state_flags |= SAS_TASK_STATE_DONE;
+ if (unlikely((t->task_state_flags & SAS_TASK_STATE_ABORTED))) {
+ spin_unlock_irqrestore(&t->task_state_lock, flags);
+ PM8001_FAIL_DBG(pm8001_ha, pm8001_printk("task 0x%p done with"
+ " io_status 0x%x resp 0x%x "
+ "stat 0x%x but aborted by upper layer!\n",
+ t, status, ts->resp, ts->stat));
+ pm8001_ccb_task_free(pm8001_ha, t, ccb, tag);
+ } else {
+ spin_unlock_irqrestore(&t->task_state_lock, flags);
+ pm8001_ccb_task_free(pm8001_ha, t, ccb, tag);
+ mb();/* in order to force CPU ordering */
+ t->task_done(t);
+ }
+}
+
+void pm8001_mpi_set_dev_state_resp(struct pm8001_hba_info *pm8001_ha,
+ void *piomb)
+{
+ struct set_dev_state_resp *pPayload =
+ (struct set_dev_state_resp *)(piomb + 4);
+ u32 tag = le32_to_cpu(pPayload->tag);
+ struct pm8001_ccb_info *ccb = &pm8001_ha->ccb_info[tag];
+ struct pm8001_device *pm8001_dev = ccb->device;
+ u32 status = le32_to_cpu(pPayload->status);
+ u32 device_id = le32_to_cpu(pPayload->device_id);
+ u8 pds = le32_to_cpu(pPayload->pds_nds) & PDS_BITS;
+ u8 nds = le32_to_cpu(pPayload->pds_nds) & NDS_BITS;
+ PM8001_MSG_DBG(pm8001_ha, pm8001_printk("Set device id = 0x%x state "
+ "from 0x%x to 0x%x status = 0x%x!\n",
+ device_id, pds, nds, status));
+ complete(pm8001_dev->setds_completion);
+ ccb->task = NULL;
+ ccb->ccb_tag = 0xFFFFFFFF;
+ pm8001_tag_free(pm8001_ha, tag);
+}
+
+void pm8001_mpi_set_nvmd_resp(struct pm8001_hba_info *pm8001_ha, void *piomb)
+{
+ struct get_nvm_data_resp *pPayload =
+ (struct get_nvm_data_resp *)(piomb + 4);
+ u32 tag = le32_to_cpu(pPayload->tag);
+ struct pm8001_ccb_info *ccb = &pm8001_ha->ccb_info[tag];
+ u32 dlen_status = le32_to_cpu(pPayload->dlen_status);
+ complete(pm8001_ha->nvmd_completion);
+ PM8001_MSG_DBG(pm8001_ha, pm8001_printk("Set nvm data complete!\n"));
+ if ((dlen_status & NVMD_STAT) != 0) {
+ PM8001_FAIL_DBG(pm8001_ha,
+ pm8001_printk("Set nvm data error!\n"));
+ return;
+ }
+ ccb->task = NULL;
+ ccb->ccb_tag = 0xFFFFFFFF;
+ pm8001_tag_free(pm8001_ha, tag);
+}
+
+void
+pm8001_mpi_get_nvmd_resp(struct pm8001_hba_info *pm8001_ha, void *piomb)
+{
+ struct fw_control_ex *fw_control_context;
+ struct get_nvm_data_resp *pPayload =
+ (struct get_nvm_data_resp *)(piomb + 4);
+ u32 tag = le32_to_cpu(pPayload->tag);
+ struct pm8001_ccb_info *ccb = &pm8001_ha->ccb_info[tag];
+ u32 dlen_status = le32_to_cpu(pPayload->dlen_status);
+ u32 ir_tds_bn_dps_das_nvm =
+ le32_to_cpu(pPayload->ir_tda_bn_dps_das_nvm);
+ void *virt_addr = pm8001_ha->memoryMap.region[NVMD].virt_ptr;
+ fw_control_context = ccb->fw_control_context;
+
+ PM8001_MSG_DBG(pm8001_ha, pm8001_printk("Get nvm data complete!\n"));
+ if ((dlen_status & NVMD_STAT) != 0) {
+ PM8001_FAIL_DBG(pm8001_ha,
+ pm8001_printk("Get nvm data error!\n"));
+ complete(pm8001_ha->nvmd_completion);
+ return;
+ }
+
+ if (ir_tds_bn_dps_das_nvm & IPMode) {
+ /* indirect mode - IR bit set */
+ PM8001_MSG_DBG(pm8001_ha,
+ pm8001_printk("Get NVMD success, IR=1\n"));
+ if ((ir_tds_bn_dps_das_nvm & NVMD_TYPE) == TWI_DEVICE) {
+ if (ir_tds_bn_dps_das_nvm == 0x80a80200) {
+ memcpy(pm8001_ha->sas_addr,
+ ((u8 *)virt_addr + 4),
+ SAS_ADDR_SIZE);
+ PM8001_MSG_DBG(pm8001_ha,
+ pm8001_printk("Get SAS address"
+ " from VPD successfully!\n"));
+ }
+ } else if (((ir_tds_bn_dps_das_nvm & NVMD_TYPE) == C_SEEPROM)
+ || ((ir_tds_bn_dps_das_nvm & NVMD_TYPE) == VPD_FLASH) ||
+ ((ir_tds_bn_dps_das_nvm & NVMD_TYPE) == EXPAN_ROM)) {
+ ;
+ } else if (((ir_tds_bn_dps_das_nvm & NVMD_TYPE) == AAP1_RDUMP)
+ || ((ir_tds_bn_dps_das_nvm & NVMD_TYPE) == IOP_RDUMP)) {
+ ;
+ } else {
+ /* Should not be happened*/
+ PM8001_MSG_DBG(pm8001_ha,
+ pm8001_printk("(IR=1)Wrong Device type 0x%x\n",
+ ir_tds_bn_dps_das_nvm));
+ }
+ } else /* direct mode */{
+ PM8001_MSG_DBG(pm8001_ha,
+ pm8001_printk("Get NVMD success, IR=0, dataLen=%d\n",
+ (dlen_status & NVMD_LEN) >> 24));
+ }
+ /* Though fw_control_context is freed below, usrAddr still needs
+ * to be updated as this holds the response to the request function
+ */
+ memcpy(fw_control_context->usrAddr,
+ pm8001_ha->memoryMap.region[NVMD].virt_ptr,
+ fw_control_context->len);
+ kfree(ccb->fw_control_context);
+ ccb->task = NULL;
+ ccb->ccb_tag = 0xFFFFFFFF;
+ pm8001_tag_free(pm8001_ha, tag);
+ complete(pm8001_ha->nvmd_completion);
+}
+
+int pm8001_mpi_local_phy_ctl(struct pm8001_hba_info *pm8001_ha, void *piomb)
+{
+ struct local_phy_ctl_resp *pPayload =
+ (struct local_phy_ctl_resp *)(piomb + 4);
+ u32 status = le32_to_cpu(pPayload->status);
+ u32 phy_id = le32_to_cpu(pPayload->phyop_phyid) & ID_BITS;
+ u32 phy_op = le32_to_cpu(pPayload->phyop_phyid) & OP_BITS;
+ if (status != 0) {
+ PM8001_MSG_DBG(pm8001_ha,
+ pm8001_printk("%x phy execute %x phy op failed!\n",
+ phy_id, phy_op));
+ } else
+ PM8001_MSG_DBG(pm8001_ha,
+ pm8001_printk("%x phy execute %x phy op success!\n",
+ phy_id, phy_op));
+ return 0;
+}
+
+/**
+ * pm8001_bytes_dmaed - one of the interface function communication with libsas
+ * @pm8001_ha: our hba card information
+ * @i: which phy that received the event.
+ *
+ * when HBA driver received the identify done event or initiate FIS received
+ * event(for SATA), it will invoke this function to notify the sas layer that
+ * the sas toplogy has formed, please discover the the whole sas domain,
+ * while receive a broadcast(change) primitive just tell the sas
+ * layer to discover the changed domain rather than the whole domain.
+ */
+void pm8001_bytes_dmaed(struct pm8001_hba_info *pm8001_ha, int i)
+{
+ struct pm8001_phy *phy = &pm8001_ha->phy[i];
+ struct asd_sas_phy *sas_phy = &phy->sas_phy;
+ struct sas_ha_struct *sas_ha;
+ if (!phy->phy_attached)
+ return;
+
+ sas_ha = pm8001_ha->sas;
+ if (sas_phy->phy) {
+ struct sas_phy *sphy = sas_phy->phy;
+ sphy->negotiated_linkrate = sas_phy->linkrate;
+ sphy->minimum_linkrate = phy->minimum_linkrate;
+ sphy->minimum_linkrate_hw = SAS_LINK_RATE_1_5_GBPS;
+ sphy->maximum_linkrate = phy->maximum_linkrate;
+ sphy->maximum_linkrate_hw = phy->maximum_linkrate;
+ }
+
+ if (phy->phy_type & PORT_TYPE_SAS) {
+ struct sas_identify_frame *id;
+ id = (struct sas_identify_frame *)phy->frame_rcvd;
+ id->dev_type = phy->identify.device_type;
+ id->initiator_bits = SAS_PROTOCOL_ALL;
+ id->target_bits = phy->identify.target_port_protocols;
+ } else if (phy->phy_type & PORT_TYPE_SATA) {
+ /*Nothing*/
+ }
+ PM8001_MSG_DBG(pm8001_ha, pm8001_printk("phy %d byte dmaded.\n", i));
+
+ sas_phy->frame_rcvd_size = phy->frame_rcvd_size;
+ pm8001_ha->sas->notify_port_event(sas_phy, PORTE_BYTES_DMAED);
+}
+
+/* Get the link rate speed */
+void pm8001_get_lrate_mode(struct pm8001_phy *phy, u8 link_rate)
+{
+ struct sas_phy *sas_phy = phy->sas_phy.phy;
+
+ switch (link_rate) {
+ case PHY_SPEED_60:
+ phy->sas_phy.linkrate = SAS_LINK_RATE_6_0_GBPS;
+ phy->sas_phy.phy->negotiated_linkrate = SAS_LINK_RATE_6_0_GBPS;
+ break;
+ case PHY_SPEED_30:
+ phy->sas_phy.linkrate = SAS_LINK_RATE_3_0_GBPS;
+ phy->sas_phy.phy->negotiated_linkrate = SAS_LINK_RATE_3_0_GBPS;
+ break;
+ case PHY_SPEED_15:
+ phy->sas_phy.linkrate = SAS_LINK_RATE_1_5_GBPS;
+ phy->sas_phy.phy->negotiated_linkrate = SAS_LINK_RATE_1_5_GBPS;
+ break;
+ }
+ sas_phy->negotiated_linkrate = phy->sas_phy.linkrate;
+ sas_phy->maximum_linkrate_hw = SAS_LINK_RATE_6_0_GBPS;
+ sas_phy->minimum_linkrate_hw = SAS_LINK_RATE_1_5_GBPS;
+ sas_phy->maximum_linkrate = SAS_LINK_RATE_6_0_GBPS;
+ sas_phy->minimum_linkrate = SAS_LINK_RATE_1_5_GBPS;
+}
+
+/**
+ * asd_get_attached_sas_addr -- extract/generate attached SAS address
+ * @phy: pointer to asd_phy
+ * @sas_addr: pointer to buffer where the SAS address is to be written
+ *
+ * This function extracts the SAS address from an IDENTIFY frame
+ * received. If OOB is SATA, then a SAS address is generated from the
+ * HA tables.
+ *
+ * LOCKING: the frame_rcvd_lock needs to be held since this parses the frame
+ * buffer.
+ */
+void pm8001_get_attached_sas_addr(struct pm8001_phy *phy,
+ u8 *sas_addr)
+{
+ if (phy->sas_phy.frame_rcvd[0] == 0x34
+ && phy->sas_phy.oob_mode == SATA_OOB_MODE) {
+ struct pm8001_hba_info *pm8001_ha = phy->sas_phy.ha->lldd_ha;
+ /* FIS device-to-host */
+ u64 addr = be64_to_cpu(*(__be64 *)pm8001_ha->sas_addr);
+ addr += phy->sas_phy.id;
+ *(__be64 *)sas_addr = cpu_to_be64(addr);
+ } else {
+ struct sas_identify_frame *idframe =
+ (void *) phy->sas_phy.frame_rcvd;
+ memcpy(sas_addr, idframe->sas_addr, SAS_ADDR_SIZE);
+ }
+}
+
+/**
+ * pm8001_hw_event_ack_req- For PM8001,some events need to acknowage to FW.
+ * @pm8001_ha: our hba card information
+ * @Qnum: the outbound queue message number.
+ * @SEA: source of event to ack
+ * @port_id: port id.
+ * @phyId: phy id.
+ * @param0: parameter 0.
+ * @param1: parameter 1.
+ */
+static void pm8001_hw_event_ack_req(struct pm8001_hba_info *pm8001_ha,
+ u32 Qnum, u32 SEA, u32 port_id, u32 phyId, u32 param0, u32 param1)
+{
+ struct hw_event_ack_req payload;
+ u32 opc = OPC_INB_SAS_HW_EVENT_ACK;
+
+ struct inbound_queue_table *circularQ;
+
+ memset((u8 *)&payload, 0, sizeof(payload));
+ circularQ = &pm8001_ha->inbnd_q_tbl[Qnum];
+ payload.tag = cpu_to_le32(1);
+ payload.sea_phyid_portid = cpu_to_le32(((SEA & 0xFFFF) << 8) |
+ ((phyId & 0x0F) << 4) | (port_id & 0x0F));
+ payload.param0 = cpu_to_le32(param0);
+ payload.param1 = cpu_to_le32(param1);
+ pm8001_mpi_build_cmd(pm8001_ha, circularQ, opc, &payload, 0);
+}
+
+static int pm8001_chip_phy_ctl_req(struct pm8001_hba_info *pm8001_ha,
+ u32 phyId, u32 phy_op);
+
+/**
+ * hw_event_sas_phy_up -FW tells me a SAS phy up event.
+ * @pm8001_ha: our hba card information
+ * @piomb: IO message buffer
+ */
+static void
+hw_event_sas_phy_up(struct pm8001_hba_info *pm8001_ha, void *piomb)
+{
+ struct hw_event_resp *pPayload =
+ (struct hw_event_resp *)(piomb + 4);
+ u32 lr_evt_status_phyid_portid =
+ le32_to_cpu(pPayload->lr_evt_status_phyid_portid);
+ u8 link_rate =
+ (u8)((lr_evt_status_phyid_portid & 0xF0000000) >> 28);
+ u8 port_id = (u8)(lr_evt_status_phyid_portid & 0x0000000F);
+ u8 phy_id =
+ (u8)((lr_evt_status_phyid_portid & 0x000000F0) >> 4);
+ u32 npip_portstate = le32_to_cpu(pPayload->npip_portstate);
+ u8 portstate = (u8)(npip_portstate & 0x0000000F);
+ struct pm8001_port *port = &pm8001_ha->port[port_id];
+ struct sas_ha_struct *sas_ha = pm8001_ha->sas;
+ struct pm8001_phy *phy = &pm8001_ha->phy[phy_id];
+ unsigned long flags;
+ u8 deviceType = pPayload->sas_identify.dev_type;
+ port->port_state = portstate;
+ phy->phy_state = PHY_STATE_LINK_UP_SPC;
+ PM8001_MSG_DBG(pm8001_ha,
+ pm8001_printk("HW_EVENT_SAS_PHY_UP port id = %d, phy id = %d\n",
+ port_id, phy_id));
+
+ switch (deviceType) {
+ case SAS_PHY_UNUSED:
+ PM8001_MSG_DBG(pm8001_ha,
+ pm8001_printk("device type no device.\n"));
+ break;
+ case SAS_END_DEVICE:
+ PM8001_MSG_DBG(pm8001_ha, pm8001_printk("end device.\n"));
+ pm8001_chip_phy_ctl_req(pm8001_ha, phy_id,
+ PHY_NOTIFY_ENABLE_SPINUP);
+ port->port_attached = 1;
+ pm8001_get_lrate_mode(phy, link_rate);
+ break;
+ case SAS_EDGE_EXPANDER_DEVICE:
+ PM8001_MSG_DBG(pm8001_ha,
+ pm8001_printk("expander device.\n"));
+ port->port_attached = 1;
+ pm8001_get_lrate_mode(phy, link_rate);
+ break;
+ case SAS_FANOUT_EXPANDER_DEVICE:
+ PM8001_MSG_DBG(pm8001_ha,
+ pm8001_printk("fanout expander device.\n"));
+ port->port_attached = 1;
+ pm8001_get_lrate_mode(phy, link_rate);
+ break;
+ default:
+ PM8001_MSG_DBG(pm8001_ha,
+ pm8001_printk("unknown device type(%x)\n", deviceType));
+ break;
+ }
+ phy->phy_type |= PORT_TYPE_SAS;
+ phy->identify.device_type = deviceType;
+ phy->phy_attached = 1;
+ if (phy->identify.device_type == SAS_END_DEVICE)
+ phy->identify.target_port_protocols = SAS_PROTOCOL_SSP;
+ else if (phy->identify.device_type != SAS_PHY_UNUSED)
+ phy->identify.target_port_protocols = SAS_PROTOCOL_SMP;
+ phy->sas_phy.oob_mode = SAS_OOB_MODE;
+ sas_ha->notify_phy_event(&phy->sas_phy, PHYE_OOB_DONE);
+ spin_lock_irqsave(&phy->sas_phy.frame_rcvd_lock, flags);
+ memcpy(phy->frame_rcvd, &pPayload->sas_identify,
+ sizeof(struct sas_identify_frame)-4);
+ phy->frame_rcvd_size = sizeof(struct sas_identify_frame) - 4;
+ pm8001_get_attached_sas_addr(phy, phy->sas_phy.attached_sas_addr);
+ spin_unlock_irqrestore(&phy->sas_phy.frame_rcvd_lock, flags);
+ if (pm8001_ha->flags == PM8001F_RUN_TIME)
+ mdelay(200);/*delay a moment to wait disk to spinup*/
+ pm8001_bytes_dmaed(pm8001_ha, phy_id);
+}
+
+/**
+ * hw_event_sata_phy_up -FW tells me a SATA phy up event.
+ * @pm8001_ha: our hba card information
+ * @piomb: IO message buffer
+ */
+static void
+hw_event_sata_phy_up(struct pm8001_hba_info *pm8001_ha, void *piomb)
+{
+ struct hw_event_resp *pPayload =
+ (struct hw_event_resp *)(piomb + 4);
+ u32 lr_evt_status_phyid_portid =
+ le32_to_cpu(pPayload->lr_evt_status_phyid_portid);
+ u8 link_rate =
+ (u8)((lr_evt_status_phyid_portid & 0xF0000000) >> 28);
+ u8 port_id = (u8)(lr_evt_status_phyid_portid & 0x0000000F);
+ u8 phy_id =
+ (u8)((lr_evt_status_phyid_portid & 0x000000F0) >> 4);
+ u32 npip_portstate = le32_to_cpu(pPayload->npip_portstate);
+ u8 portstate = (u8)(npip_portstate & 0x0000000F);
+ struct pm8001_port *port = &pm8001_ha->port[port_id];
+ struct sas_ha_struct *sas_ha = pm8001_ha->sas;
+ struct pm8001_phy *phy = &pm8001_ha->phy[phy_id];
+ unsigned long flags;
+ PM8001_MSG_DBG(pm8001_ha,
+ pm8001_printk("HW_EVENT_SATA_PHY_UP port id = %d,"
+ " phy id = %d\n", port_id, phy_id));
+ port->port_state = portstate;
+ phy->phy_state = PHY_STATE_LINK_UP_SPC;
+ port->port_attached = 1;
+ pm8001_get_lrate_mode(phy, link_rate);
+ phy->phy_type |= PORT_TYPE_SATA;
+ phy->phy_attached = 1;
+ phy->sas_phy.oob_mode = SATA_OOB_MODE;
+ sas_ha->notify_phy_event(&phy->sas_phy, PHYE_OOB_DONE);
+ spin_lock_irqsave(&phy->sas_phy.frame_rcvd_lock, flags);
+ memcpy(phy->frame_rcvd, ((u8 *)&pPayload->sata_fis - 4),
+ sizeof(struct dev_to_host_fis));
+ phy->frame_rcvd_size = sizeof(struct dev_to_host_fis);
+ phy->identify.target_port_protocols = SAS_PROTOCOL_SATA;
+ phy->identify.device_type = SAS_SATA_DEV;
+ pm8001_get_attached_sas_addr(phy, phy->sas_phy.attached_sas_addr);
+ spin_unlock_irqrestore(&phy->sas_phy.frame_rcvd_lock, flags);
+ pm8001_bytes_dmaed(pm8001_ha, phy_id);
+}
+
+/**
+ * hw_event_phy_down -we should notify the libsas the phy is down.
+ * @pm8001_ha: our hba card information
+ * @piomb: IO message buffer
+ */
+static void
+hw_event_phy_down(struct pm8001_hba_info *pm8001_ha, void *piomb)
+{
+ struct hw_event_resp *pPayload =
+ (struct hw_event_resp *)(piomb + 4);
+ u32 lr_evt_status_phyid_portid =
+ le32_to_cpu(pPayload->lr_evt_status_phyid_portid);
+ u8 port_id = (u8)(lr_evt_status_phyid_portid & 0x0000000F);
+ u8 phy_id =
+ (u8)((lr_evt_status_phyid_portid & 0x000000F0) >> 4);
+ u32 npip_portstate = le32_to_cpu(pPayload->npip_portstate);
+ u8 portstate = (u8)(npip_portstate & 0x0000000F);
+ struct pm8001_port *port = &pm8001_ha->port[port_id];
+ struct pm8001_phy *phy = &pm8001_ha->phy[phy_id];
+ port->port_state = portstate;
+ phy->phy_type = 0;
+ phy->identify.device_type = 0;
+ phy->phy_attached = 0;
+ memset(&phy->dev_sas_addr, 0, SAS_ADDR_SIZE);
+ switch (portstate) {
+ case PORT_VALID:
+ break;
+ case PORT_INVALID:
+ PM8001_MSG_DBG(pm8001_ha,
+ pm8001_printk(" PortInvalid portID %d\n", port_id));
+ PM8001_MSG_DBG(pm8001_ha,
+ pm8001_printk(" Last phy Down and port invalid\n"));
+ port->port_attached = 0;
+ pm8001_hw_event_ack_req(pm8001_ha, 0, HW_EVENT_PHY_DOWN,
+ port_id, phy_id, 0, 0);
+ break;
+ case PORT_IN_RESET:
+ PM8001_MSG_DBG(pm8001_ha,
+ pm8001_printk(" Port In Reset portID %d\n", port_id));
+ break;
+ case PORT_NOT_ESTABLISHED:
+ PM8001_MSG_DBG(pm8001_ha,
+ pm8001_printk(" phy Down and PORT_NOT_ESTABLISHED\n"));
+ port->port_attached = 0;
+ break;
+ case PORT_LOSTCOMM:
+ PM8001_MSG_DBG(pm8001_ha,
+ pm8001_printk(" phy Down and PORT_LOSTCOMM\n"));
+ PM8001_MSG_DBG(pm8001_ha,
+ pm8001_printk(" Last phy Down and port invalid\n"));
+ port->port_attached = 0;
+ pm8001_hw_event_ack_req(pm8001_ha, 0, HW_EVENT_PHY_DOWN,
+ port_id, phy_id, 0, 0);
+ break;
+ default:
+ port->port_attached = 0;
+ PM8001_MSG_DBG(pm8001_ha,
+ pm8001_printk(" phy Down and(default) = %x\n",
+ portstate));
+ break;
+
+ }
+}
+
+/**
+ * pm8001_mpi_reg_resp -process register device ID response.
+ * @pm8001_ha: our hba card information
+ * @piomb: IO message buffer
+ *
+ * when sas layer find a device it will notify LLDD, then the driver register
+ * the domain device to FW, this event is the return device ID which the FW
+ * has assigned, from now,inter-communication with FW is no longer using the
+ * SAS address, use device ID which FW assigned.
+ */
+int pm8001_mpi_reg_resp(struct pm8001_hba_info *pm8001_ha, void *piomb)
+{
+ u32 status;
+ u32 device_id;
+ u32 htag;
+ struct pm8001_ccb_info *ccb;
+ struct pm8001_device *pm8001_dev;
+ struct dev_reg_resp *registerRespPayload =
+ (struct dev_reg_resp *)(piomb + 4);
+
+ htag = le32_to_cpu(registerRespPayload->tag);
+ ccb = &pm8001_ha->ccb_info[htag];
+ pm8001_dev = ccb->device;
+ status = le32_to_cpu(registerRespPayload->status);
+ device_id = le32_to_cpu(registerRespPayload->device_id);
+ PM8001_MSG_DBG(pm8001_ha,
+ pm8001_printk(" register device is status = %d\n", status));
+ switch (status) {
+ case DEVREG_SUCCESS:
+ PM8001_MSG_DBG(pm8001_ha, pm8001_printk("DEVREG_SUCCESS\n"));
+ pm8001_dev->device_id = device_id;
+ break;
+ case DEVREG_FAILURE_OUT_OF_RESOURCE:
+ PM8001_MSG_DBG(pm8001_ha,
+ pm8001_printk("DEVREG_FAILURE_OUT_OF_RESOURCE\n"));
+ break;
+ case DEVREG_FAILURE_DEVICE_ALREADY_REGISTERED:
+ PM8001_MSG_DBG(pm8001_ha,
+ pm8001_printk("DEVREG_FAILURE_DEVICE_ALREADY_REGISTERED\n"));
+ break;
+ case DEVREG_FAILURE_INVALID_PHY_ID:
+ PM8001_MSG_DBG(pm8001_ha,
+ pm8001_printk("DEVREG_FAILURE_INVALID_PHY_ID\n"));
+ break;
+ case DEVREG_FAILURE_PHY_ID_ALREADY_REGISTERED:
+ PM8001_MSG_DBG(pm8001_ha,
+ pm8001_printk("DEVREG_FAILURE_PHY_ID_ALREADY_REGISTERED\n"));
+ break;
+ case DEVREG_FAILURE_PORT_ID_OUT_OF_RANGE:
+ PM8001_MSG_DBG(pm8001_ha,
+ pm8001_printk("DEVREG_FAILURE_PORT_ID_OUT_OF_RANGE\n"));
+ break;
+ case DEVREG_FAILURE_PORT_NOT_VALID_STATE:
+ PM8001_MSG_DBG(pm8001_ha,
+ pm8001_printk("DEVREG_FAILURE_PORT_NOT_VALID_STATE\n"));
+ break;
+ case DEVREG_FAILURE_DEVICE_TYPE_NOT_VALID:
+ PM8001_MSG_DBG(pm8001_ha,
+ pm8001_printk("DEVREG_FAILURE_DEVICE_TYPE_NOT_VALID\n"));
+ break;
+ default:
+ PM8001_MSG_DBG(pm8001_ha,
+ pm8001_printk("DEVREG_FAILURE_DEVICE_TYPE_NOT_UNSORPORTED\n"));
+ break;
+ }
+ complete(pm8001_dev->dcompletion);
+ ccb->task = NULL;
+ ccb->ccb_tag = 0xFFFFFFFF;
+ pm8001_tag_free(pm8001_ha, htag);
+ return 0;
+}
+
+int pm8001_mpi_dereg_resp(struct pm8001_hba_info *pm8001_ha, void *piomb)
+{
+ u32 status;
+ u32 device_id;
+ struct dev_reg_resp *registerRespPayload =
+ (struct dev_reg_resp *)(piomb + 4);
+
+ status = le32_to_cpu(registerRespPayload->status);
+ device_id = le32_to_cpu(registerRespPayload->device_id);
+ if (status != 0)
+ PM8001_MSG_DBG(pm8001_ha,
+ pm8001_printk(" deregister device failed ,status = %x"
+ ", device_id = %x\n", status, device_id));
+ return 0;
+}
+
+/**
+ * fw_flash_update_resp - Response from FW for flash update command.
+ * @pm8001_ha: our hba card information
+ * @piomb: IO message buffer
+ */
+int pm8001_mpi_fw_flash_update_resp(struct pm8001_hba_info *pm8001_ha,
+ void *piomb)
+{
+ u32 status;
+ struct fw_flash_Update_resp *ppayload =
+ (struct fw_flash_Update_resp *)(piomb + 4);
+ u32 tag = le32_to_cpu(ppayload->tag);
+ struct pm8001_ccb_info *ccb = &pm8001_ha->ccb_info[tag];
+ status = le32_to_cpu(ppayload->status);
+ switch (status) {
+ case FLASH_UPDATE_COMPLETE_PENDING_REBOOT:
+ PM8001_MSG_DBG(pm8001_ha,
+ pm8001_printk(": FLASH_UPDATE_COMPLETE_PENDING_REBOOT\n"));
+ break;
+ case FLASH_UPDATE_IN_PROGRESS:
+ PM8001_MSG_DBG(pm8001_ha,
+ pm8001_printk(": FLASH_UPDATE_IN_PROGRESS\n"));
+ break;
+ case FLASH_UPDATE_HDR_ERR:
+ PM8001_MSG_DBG(pm8001_ha,
+ pm8001_printk(": FLASH_UPDATE_HDR_ERR\n"));
+ break;
+ case FLASH_UPDATE_OFFSET_ERR:
+ PM8001_MSG_DBG(pm8001_ha,
+ pm8001_printk(": FLASH_UPDATE_OFFSET_ERR\n"));
+ break;
+ case FLASH_UPDATE_CRC_ERR:
+ PM8001_MSG_DBG(pm8001_ha,
+ pm8001_printk(": FLASH_UPDATE_CRC_ERR\n"));
+ break;
+ case FLASH_UPDATE_LENGTH_ERR:
+ PM8001_MSG_DBG(pm8001_ha,
+ pm8001_printk(": FLASH_UPDATE_LENGTH_ERR\n"));
+ break;
+ case FLASH_UPDATE_HW_ERR:
+ PM8001_MSG_DBG(pm8001_ha,
+ pm8001_printk(": FLASH_UPDATE_HW_ERR\n"));
+ break;
+ case FLASH_UPDATE_DNLD_NOT_SUPPORTED:
+ PM8001_MSG_DBG(pm8001_ha,
+ pm8001_printk(": FLASH_UPDATE_DNLD_NOT_SUPPORTED\n"));
+ break;
+ case FLASH_UPDATE_DISABLED:
+ PM8001_MSG_DBG(pm8001_ha,
+ pm8001_printk(": FLASH_UPDATE_DISABLED\n"));
+ break;
+ default:
+ PM8001_MSG_DBG(pm8001_ha,
+ pm8001_printk("No matched status = %d\n", status));
+ break;
+ }
+ kfree(ccb->fw_control_context);
+ ccb->task = NULL;
+ ccb->ccb_tag = 0xFFFFFFFF;
+ pm8001_tag_free(pm8001_ha, tag);
+ complete(pm8001_ha->nvmd_completion);
+ return 0;
+}
+
+int pm8001_mpi_general_event(struct pm8001_hba_info *pm8001_ha , void *piomb)
+{
+ u32 status;
+ int i;
+ struct general_event_resp *pPayload =
+ (struct general_event_resp *)(piomb + 4);
+ status = le32_to_cpu(pPayload->status);
+ PM8001_MSG_DBG(pm8001_ha,
+ pm8001_printk(" status = 0x%x\n", status));
+ for (i = 0; i < GENERAL_EVENT_PAYLOAD; i++)
+ PM8001_MSG_DBG(pm8001_ha,
+ pm8001_printk("inb_IOMB_payload[0x%x] 0x%x,\n", i,
+ pPayload->inb_IOMB_payload[i]));
+ return 0;
+}
+
+int pm8001_mpi_task_abort_resp(struct pm8001_hba_info *pm8001_ha, void *piomb)
+{
+ struct sas_task *t;
+ struct pm8001_ccb_info *ccb;
+ unsigned long flags;
+ u32 status ;
+ u32 tag, scp;
+ struct task_status_struct *ts;
+ struct pm8001_device *pm8001_dev;
+
+ struct task_abort_resp *pPayload =
+ (struct task_abort_resp *)(piomb + 4);
+
+ status = le32_to_cpu(pPayload->status);
+ tag = le32_to_cpu(pPayload->tag);
+ if (!tag) {
+ PM8001_FAIL_DBG(pm8001_ha,
+ pm8001_printk(" TAG NULL. RETURNING !!!"));
+ return -1;
+ }
+
+ scp = le32_to_cpu(pPayload->scp);
+ ccb = &pm8001_ha->ccb_info[tag];
+ t = ccb->task;
+ pm8001_dev = ccb->device; /* retrieve device */
+
+ if (!t) {
+ PM8001_FAIL_DBG(pm8001_ha,
+ pm8001_printk(" TASK NULL. RETURNING !!!"));
+ return -1;
+ }
+ ts = &t->task_status;
+ if (status != 0)
+ PM8001_FAIL_DBG(pm8001_ha,
+ pm8001_printk("task abort failed status 0x%x ,"
+ "tag = 0x%x, scp= 0x%x\n", status, tag, scp));
+ switch (status) {
+ case IO_SUCCESS:
+ PM8001_EH_DBG(pm8001_ha, pm8001_printk("IO_SUCCESS\n"));
+ ts->resp = SAS_TASK_COMPLETE;
+ ts->stat = SAM_STAT_GOOD;
+ break;
+ case IO_NOT_VALID:
+ PM8001_EH_DBG(pm8001_ha, pm8001_printk("IO_NOT_VALID\n"));
+ ts->resp = TMF_RESP_FUNC_FAILED;
+ break;
+ }
+ spin_lock_irqsave(&t->task_state_lock, flags);
+ t->task_state_flags &= ~SAS_TASK_STATE_PENDING;
+ t->task_state_flags &= ~SAS_TASK_AT_INITIATOR;
+ t->task_state_flags |= SAS_TASK_STATE_DONE;
+ spin_unlock_irqrestore(&t->task_state_lock, flags);
+ pm8001_ccb_task_free(pm8001_ha, t, ccb, tag);
+ mb();
+
+ if (pm8001_dev->id & NCQ_ABORT_ALL_FLAG) {
+ pm8001_tag_free(pm8001_ha, tag);
+ sas_free_task(t);
+ /* clear the flag */
+ pm8001_dev->id &= 0xBFFFFFFF;
+ } else
+ t->task_done(t);
+
+ return 0;
+}
+
+/**
+ * mpi_hw_event -The hw event has come.
+ * @pm8001_ha: our hba card information
+ * @piomb: IO message buffer
+ */
+static int mpi_hw_event(struct pm8001_hba_info *pm8001_ha, void* piomb)
+{
+ unsigned long flags;
+ struct hw_event_resp *pPayload =
+ (struct hw_event_resp *)(piomb + 4);
+ u32 lr_evt_status_phyid_portid =
+ le32_to_cpu(pPayload->lr_evt_status_phyid_portid);
+ u8 port_id = (u8)(lr_evt_status_phyid_portid & 0x0000000F);
+ u8 phy_id =
+ (u8)((lr_evt_status_phyid_portid & 0x000000F0) >> 4);
+ u16 eventType =
+ (u16)((lr_evt_status_phyid_portid & 0x00FFFF00) >> 8);
+ u8 status =
+ (u8)((lr_evt_status_phyid_portid & 0x0F000000) >> 24);
+ struct sas_ha_struct *sas_ha = pm8001_ha->sas;
+ struct pm8001_phy *phy = &pm8001_ha->phy[phy_id];
+ struct asd_sas_phy *sas_phy = sas_ha->sas_phy[phy_id];
+ PM8001_MSG_DBG(pm8001_ha,
+ pm8001_printk("outbound queue HW event & event type : "));
+ switch (eventType) {
+ case HW_EVENT_PHY_START_STATUS:
+ PM8001_MSG_DBG(pm8001_ha,
+ pm8001_printk("HW_EVENT_PHY_START_STATUS"
+ " status = %x\n", status));
+ if (status == 0) {
+ phy->phy_state = 1;
+ if (pm8001_ha->flags == PM8001F_RUN_TIME)
+ complete(phy->enable_completion);
+ }
+ break;
+ case HW_EVENT_SAS_PHY_UP:
+ PM8001_MSG_DBG(pm8001_ha,
+ pm8001_printk("HW_EVENT_PHY_START_STATUS\n"));
+ hw_event_sas_phy_up(pm8001_ha, piomb);
+ break;
+ case HW_EVENT_SATA_PHY_UP:
+ PM8001_MSG_DBG(pm8001_ha,
+ pm8001_printk("HW_EVENT_SATA_PHY_UP\n"));
+ hw_event_sata_phy_up(pm8001_ha, piomb);
+ break;
+ case HW_EVENT_PHY_STOP_STATUS:
+ PM8001_MSG_DBG(pm8001_ha,
+ pm8001_printk("HW_EVENT_PHY_STOP_STATUS "
+ "status = %x\n", status));
+ if (status == 0)
+ phy->phy_state = 0;
+ break;
+ case HW_EVENT_SATA_SPINUP_HOLD:
+ PM8001_MSG_DBG(pm8001_ha,
+ pm8001_printk("HW_EVENT_SATA_SPINUP_HOLD\n"));
+ sas_ha->notify_phy_event(&phy->sas_phy, PHYE_SPINUP_HOLD);
+ break;
+ case HW_EVENT_PHY_DOWN:
+ PM8001_MSG_DBG(pm8001_ha,
+ pm8001_printk("HW_EVENT_PHY_DOWN\n"));
+ sas_ha->notify_phy_event(&phy->sas_phy, PHYE_LOSS_OF_SIGNAL);
+ phy->phy_attached = 0;
+ phy->phy_state = 0;
+ hw_event_phy_down(pm8001_ha, piomb);
+ break;
+ case HW_EVENT_PORT_INVALID:
+ PM8001_MSG_DBG(pm8001_ha,
+ pm8001_printk("HW_EVENT_PORT_INVALID\n"));
+ sas_phy_disconnected(sas_phy);
+ phy->phy_attached = 0;
+ sas_ha->notify_port_event(sas_phy, PORTE_LINK_RESET_ERR);
+ break;
+ /* the broadcast change primitive received, tell the LIBSAS this event
+ to revalidate the sas domain*/
+ case HW_EVENT_BROADCAST_CHANGE:
+ PM8001_MSG_DBG(pm8001_ha,
+ pm8001_printk("HW_EVENT_BROADCAST_CHANGE\n"));
+ pm8001_hw_event_ack_req(pm8001_ha, 0, HW_EVENT_BROADCAST_CHANGE,
+ port_id, phy_id, 1, 0);
+ spin_lock_irqsave(&sas_phy->sas_prim_lock, flags);
+ sas_phy->sas_prim = HW_EVENT_BROADCAST_CHANGE;
+ spin_unlock_irqrestore(&sas_phy->sas_prim_lock, flags);
+ sas_ha->notify_port_event(sas_phy, PORTE_BROADCAST_RCVD);
+ break;
+ case HW_EVENT_PHY_ERROR:
+ PM8001_MSG_DBG(pm8001_ha,
+ pm8001_printk("HW_EVENT_PHY_ERROR\n"));
+ sas_phy_disconnected(&phy->sas_phy);
+ phy->phy_attached = 0;
+ sas_ha->notify_phy_event(&phy->sas_phy, PHYE_OOB_ERROR);
+ break;
+ case HW_EVENT_BROADCAST_EXP:
+ PM8001_MSG_DBG(pm8001_ha,
+ pm8001_printk("HW_EVENT_BROADCAST_EXP\n"));
+ spin_lock_irqsave(&sas_phy->sas_prim_lock, flags);
+ sas_phy->sas_prim = HW_EVENT_BROADCAST_EXP;
+ spin_unlock_irqrestore(&sas_phy->sas_prim_lock, flags);
+ sas_ha->notify_port_event(sas_phy, PORTE_BROADCAST_RCVD);
+ break;
+ case HW_EVENT_LINK_ERR_INVALID_DWORD:
+ PM8001_MSG_DBG(pm8001_ha,
+ pm8001_printk("HW_EVENT_LINK_ERR_INVALID_DWORD\n"));
+ pm8001_hw_event_ack_req(pm8001_ha, 0,
+ HW_EVENT_LINK_ERR_INVALID_DWORD, port_id, phy_id, 0, 0);
+ sas_phy_disconnected(sas_phy);
+ phy->phy_attached = 0;
+ sas_ha->notify_port_event(sas_phy, PORTE_LINK_RESET_ERR);
+ break;
+ case HW_EVENT_LINK_ERR_DISPARITY_ERROR:
+ PM8001_MSG_DBG(pm8001_ha,
+ pm8001_printk("HW_EVENT_LINK_ERR_DISPARITY_ERROR\n"));
+ pm8001_hw_event_ack_req(pm8001_ha, 0,
+ HW_EVENT_LINK_ERR_DISPARITY_ERROR,
+ port_id, phy_id, 0, 0);
+ sas_phy_disconnected(sas_phy);
+ phy->phy_attached = 0;
+ sas_ha->notify_port_event(sas_phy, PORTE_LINK_RESET_ERR);
+ break;
+ case HW_EVENT_LINK_ERR_CODE_VIOLATION:
+ PM8001_MSG_DBG(pm8001_ha,
+ pm8001_printk("HW_EVENT_LINK_ERR_CODE_VIOLATION\n"));
+ pm8001_hw_event_ack_req(pm8001_ha, 0,
+ HW_EVENT_LINK_ERR_CODE_VIOLATION,
+ port_id, phy_id, 0, 0);
+ sas_phy_disconnected(sas_phy);
+ phy->phy_attached = 0;
+ sas_ha->notify_port_event(sas_phy, PORTE_LINK_RESET_ERR);
+ break;
+ case HW_EVENT_LINK_ERR_LOSS_OF_DWORD_SYNCH:
+ PM8001_MSG_DBG(pm8001_ha,
+ pm8001_printk("HW_EVENT_LINK_ERR_LOSS_OF_DWORD_SYNCH\n"));
+ pm8001_hw_event_ack_req(pm8001_ha, 0,
+ HW_EVENT_LINK_ERR_LOSS_OF_DWORD_SYNCH,
+ port_id, phy_id, 0, 0);
+ sas_phy_disconnected(sas_phy);
+ phy->phy_attached = 0;
+ sas_ha->notify_port_event(sas_phy, PORTE_LINK_RESET_ERR);
+ break;
+ case HW_EVENT_MALFUNCTION:
+ PM8001_MSG_DBG(pm8001_ha,
+ pm8001_printk("HW_EVENT_MALFUNCTION\n"));
+ break;
+ case HW_EVENT_BROADCAST_SES:
+ PM8001_MSG_DBG(pm8001_ha,
+ pm8001_printk("HW_EVENT_BROADCAST_SES\n"));
+ spin_lock_irqsave(&sas_phy->sas_prim_lock, flags);
+ sas_phy->sas_prim = HW_EVENT_BROADCAST_SES;
+ spin_unlock_irqrestore(&sas_phy->sas_prim_lock, flags);
+ sas_ha->notify_port_event(sas_phy, PORTE_BROADCAST_RCVD);
+ break;
+ case HW_EVENT_INBOUND_CRC_ERROR:
+ PM8001_MSG_DBG(pm8001_ha,
+ pm8001_printk("HW_EVENT_INBOUND_CRC_ERROR\n"));
+ pm8001_hw_event_ack_req(pm8001_ha, 0,
+ HW_EVENT_INBOUND_CRC_ERROR,
+ port_id, phy_id, 0, 0);
+ break;
+ case HW_EVENT_HARD_RESET_RECEIVED:
+ PM8001_MSG_DBG(pm8001_ha,
+ pm8001_printk("HW_EVENT_HARD_RESET_RECEIVED\n"));
+ sas_ha->notify_port_event(sas_phy, PORTE_HARD_RESET);
+ break;
+ case HW_EVENT_ID_FRAME_TIMEOUT:
+ PM8001_MSG_DBG(pm8001_ha,
+ pm8001_printk("HW_EVENT_ID_FRAME_TIMEOUT\n"));
+ sas_phy_disconnected(sas_phy);
+ phy->phy_attached = 0;
+ sas_ha->notify_port_event(sas_phy, PORTE_LINK_RESET_ERR);
+ break;
+ case HW_EVENT_LINK_ERR_PHY_RESET_FAILED:
+ PM8001_MSG_DBG(pm8001_ha,
+ pm8001_printk("HW_EVENT_LINK_ERR_PHY_RESET_FAILED\n"));
+ pm8001_hw_event_ack_req(pm8001_ha, 0,
+ HW_EVENT_LINK_ERR_PHY_RESET_FAILED,
+ port_id, phy_id, 0, 0);
+ sas_phy_disconnected(sas_phy);
+ phy->phy_attached = 0;
+ sas_ha->notify_port_event(sas_phy, PORTE_LINK_RESET_ERR);
+ break;
+ case HW_EVENT_PORT_RESET_TIMER_TMO:
+ PM8001_MSG_DBG(pm8001_ha,
+ pm8001_printk("HW_EVENT_PORT_RESET_TIMER_TMO\n"));
+ sas_phy_disconnected(sas_phy);
+ phy->phy_attached = 0;
+ sas_ha->notify_port_event(sas_phy, PORTE_LINK_RESET_ERR);
+ break;
+ case HW_EVENT_PORT_RECOVERY_TIMER_TMO:
+ PM8001_MSG_DBG(pm8001_ha,
+ pm8001_printk("HW_EVENT_PORT_RECOVERY_TIMER_TMO\n"));
+ sas_phy_disconnected(sas_phy);
+ phy->phy_attached = 0;
+ sas_ha->notify_port_event(sas_phy, PORTE_LINK_RESET_ERR);
+ break;
+ case HW_EVENT_PORT_RECOVER:
+ PM8001_MSG_DBG(pm8001_ha,
+ pm8001_printk("HW_EVENT_PORT_RECOVER\n"));
+ break;
+ case HW_EVENT_PORT_RESET_COMPLETE:
+ PM8001_MSG_DBG(pm8001_ha,
+ pm8001_printk("HW_EVENT_PORT_RESET_COMPLETE\n"));
+ break;
+ case EVENT_BROADCAST_ASYNCH_EVENT:
+ PM8001_MSG_DBG(pm8001_ha,
+ pm8001_printk("EVENT_BROADCAST_ASYNCH_EVENT\n"));
+ break;
+ default:
+ PM8001_MSG_DBG(pm8001_ha,
+ pm8001_printk("Unknown event type = %x\n", eventType));
+ break;
+ }
+ return 0;
+}
+
+/**
+ * process_one_iomb - process one outbound Queue memory block
+ * @pm8001_ha: our hba card information
+ * @piomb: IO message buffer
+ */
+static void process_one_iomb(struct pm8001_hba_info *pm8001_ha, void *piomb)
+{
+ __le32 pHeader = *(__le32 *)piomb;
+ u8 opc = (u8)((le32_to_cpu(pHeader)) & 0xFFF);
+
+ PM8001_MSG_DBG(pm8001_ha, pm8001_printk("process_one_iomb:"));
+
+ switch (opc) {
+ case OPC_OUB_ECHO:
+ PM8001_MSG_DBG(pm8001_ha, pm8001_printk("OPC_OUB_ECHO\n"));
+ break;
+ case OPC_OUB_HW_EVENT:
+ PM8001_MSG_DBG(pm8001_ha,
+ pm8001_printk("OPC_OUB_HW_EVENT\n"));
+ mpi_hw_event(pm8001_ha, piomb);
+ break;
+ case OPC_OUB_SSP_COMP:
+ PM8001_MSG_DBG(pm8001_ha,
+ pm8001_printk("OPC_OUB_SSP_COMP\n"));
+ mpi_ssp_completion(pm8001_ha, piomb);
+ break;
+ case OPC_OUB_SMP_COMP:
+ PM8001_MSG_DBG(pm8001_ha,
+ pm8001_printk("OPC_OUB_SMP_COMP\n"));
+ mpi_smp_completion(pm8001_ha, piomb);
+ break;
+ case OPC_OUB_LOCAL_PHY_CNTRL:
+ PM8001_MSG_DBG(pm8001_ha,
+ pm8001_printk("OPC_OUB_LOCAL_PHY_CNTRL\n"));
+ pm8001_mpi_local_phy_ctl(pm8001_ha, piomb);
+ break;
+ case OPC_OUB_DEV_REGIST:
+ PM8001_MSG_DBG(pm8001_ha,
+ pm8001_printk("OPC_OUB_DEV_REGIST\n"));
+ pm8001_mpi_reg_resp(pm8001_ha, piomb);
+ break;
+ case OPC_OUB_DEREG_DEV:
+ PM8001_MSG_DBG(pm8001_ha,
+ pm8001_printk("unregister the device\n"));
+ pm8001_mpi_dereg_resp(pm8001_ha, piomb);
+ break;
+ case OPC_OUB_GET_DEV_HANDLE:
+ PM8001_MSG_DBG(pm8001_ha,
+ pm8001_printk("OPC_OUB_GET_DEV_HANDLE\n"));
+ break;
+ case OPC_OUB_SATA_COMP:
+ PM8001_MSG_DBG(pm8001_ha,
+ pm8001_printk("OPC_OUB_SATA_COMP\n"));
+ mpi_sata_completion(pm8001_ha, piomb);
+ break;
+ case OPC_OUB_SATA_EVENT:
+ PM8001_MSG_DBG(pm8001_ha,
+ pm8001_printk("OPC_OUB_SATA_EVENT\n"));
+ mpi_sata_event(pm8001_ha, piomb);
+ break;
+ case OPC_OUB_SSP_EVENT:
+ PM8001_MSG_DBG(pm8001_ha,
+ pm8001_printk("OPC_OUB_SSP_EVENT\n"));
+ mpi_ssp_event(pm8001_ha, piomb);
+ break;
+ case OPC_OUB_DEV_HANDLE_ARRIV:
+ PM8001_MSG_DBG(pm8001_ha,
+ pm8001_printk("OPC_OUB_DEV_HANDLE_ARRIV\n"));
+ /*This is for target*/
+ break;
+ case OPC_OUB_SSP_RECV_EVENT:
+ PM8001_MSG_DBG(pm8001_ha,
+ pm8001_printk("OPC_OUB_SSP_RECV_EVENT\n"));
+ /*This is for target*/
+ break;
+ case OPC_OUB_DEV_INFO:
+ PM8001_MSG_DBG(pm8001_ha,
+ pm8001_printk("OPC_OUB_DEV_INFO\n"));
+ break;
+ case OPC_OUB_FW_FLASH_UPDATE:
+ PM8001_MSG_DBG(pm8001_ha,
+ pm8001_printk("OPC_OUB_FW_FLASH_UPDATE\n"));
+ pm8001_mpi_fw_flash_update_resp(pm8001_ha, piomb);
+ break;
+ case OPC_OUB_GPIO_RESPONSE:
+ PM8001_MSG_DBG(pm8001_ha,
+ pm8001_printk("OPC_OUB_GPIO_RESPONSE\n"));
+ break;
+ case OPC_OUB_GPIO_EVENT:
+ PM8001_MSG_DBG(pm8001_ha,
+ pm8001_printk("OPC_OUB_GPIO_EVENT\n"));
+ break;
+ case OPC_OUB_GENERAL_EVENT:
+ PM8001_MSG_DBG(pm8001_ha,
+ pm8001_printk("OPC_OUB_GENERAL_EVENT\n"));
+ pm8001_mpi_general_event(pm8001_ha, piomb);
+ break;
+ case OPC_OUB_SSP_ABORT_RSP:
+ PM8001_MSG_DBG(pm8001_ha,
+ pm8001_printk("OPC_OUB_SSP_ABORT_RSP\n"));
+ pm8001_mpi_task_abort_resp(pm8001_ha, piomb);
+ break;
+ case OPC_OUB_SATA_ABORT_RSP:
+ PM8001_MSG_DBG(pm8001_ha,
+ pm8001_printk("OPC_OUB_SATA_ABORT_RSP\n"));
+ pm8001_mpi_task_abort_resp(pm8001_ha, piomb);
+ break;
+ case OPC_OUB_SAS_DIAG_MODE_START_END:
+ PM8001_MSG_DBG(pm8001_ha,
+ pm8001_printk("OPC_OUB_SAS_DIAG_MODE_START_END\n"));
+ break;
+ case OPC_OUB_SAS_DIAG_EXECUTE:
+ PM8001_MSG_DBG(pm8001_ha,
+ pm8001_printk("OPC_OUB_SAS_DIAG_EXECUTE\n"));
+ break;
+ case OPC_OUB_GET_TIME_STAMP:
+ PM8001_MSG_DBG(pm8001_ha,
+ pm8001_printk("OPC_OUB_GET_TIME_STAMP\n"));
+ break;
+ case OPC_OUB_SAS_HW_EVENT_ACK:
+ PM8001_MSG_DBG(pm8001_ha,
+ pm8001_printk("OPC_OUB_SAS_HW_EVENT_ACK\n"));
+ break;
+ case OPC_OUB_PORT_CONTROL:
+ PM8001_MSG_DBG(pm8001_ha,
+ pm8001_printk("OPC_OUB_PORT_CONTROL\n"));
+ break;
+ case OPC_OUB_SMP_ABORT_RSP:
+ PM8001_MSG_DBG(pm8001_ha,
+ pm8001_printk("OPC_OUB_SMP_ABORT_RSP\n"));
+ pm8001_mpi_task_abort_resp(pm8001_ha, piomb);
+ break;
+ case OPC_OUB_GET_NVMD_DATA:
+ PM8001_MSG_DBG(pm8001_ha,
+ pm8001_printk("OPC_OUB_GET_NVMD_DATA\n"));
+ pm8001_mpi_get_nvmd_resp(pm8001_ha, piomb);
+ break;
+ case OPC_OUB_SET_NVMD_DATA:
+ PM8001_MSG_DBG(pm8001_ha,
+ pm8001_printk("OPC_OUB_SET_NVMD_DATA\n"));
+ pm8001_mpi_set_nvmd_resp(pm8001_ha, piomb);
+ break;
+ case OPC_OUB_DEVICE_HANDLE_REMOVAL:
+ PM8001_MSG_DBG(pm8001_ha,
+ pm8001_printk("OPC_OUB_DEVICE_HANDLE_REMOVAL\n"));
+ break;
+ case OPC_OUB_SET_DEVICE_STATE:
+ PM8001_MSG_DBG(pm8001_ha,
+ pm8001_printk("OPC_OUB_SET_DEVICE_STATE\n"));
+ pm8001_mpi_set_dev_state_resp(pm8001_ha, piomb);
+ break;
+ case OPC_OUB_GET_DEVICE_STATE:
+ PM8001_MSG_DBG(pm8001_ha,
+ pm8001_printk("OPC_OUB_GET_DEVICE_STATE\n"));
+ break;
+ case OPC_OUB_SET_DEV_INFO:
+ PM8001_MSG_DBG(pm8001_ha,
+ pm8001_printk("OPC_OUB_SET_DEV_INFO\n"));
+ break;
+ case OPC_OUB_SAS_RE_INITIALIZE:
+ PM8001_MSG_DBG(pm8001_ha,
+ pm8001_printk("OPC_OUB_SAS_RE_INITIALIZE\n"));
+ break;
+ default:
+ PM8001_MSG_DBG(pm8001_ha,
+ pm8001_printk("Unknown outbound Queue IOMB OPC = %x\n",
+ opc));
+ break;
+ }
+}
+
+static int process_oq(struct pm8001_hba_info *pm8001_ha, u8 vec)
+{
+ struct outbound_queue_table *circularQ;
+ void *pMsg1 = NULL;
+ u8 uninitialized_var(bc);
+ u32 ret = MPI_IO_STATUS_FAIL;
+ unsigned long flags;
+
+ spin_lock_irqsave(&pm8001_ha->lock, flags);
+ circularQ = &pm8001_ha->outbnd_q_tbl[vec];
+ do {
+ ret = pm8001_mpi_msg_consume(pm8001_ha, circularQ, &pMsg1, &bc);
+ if (MPI_IO_STATUS_SUCCESS == ret) {
+ /* process the outbound message */
+ process_one_iomb(pm8001_ha, (void *)(pMsg1 - 4));
+ /* free the message from the outbound circular buffer */
+ pm8001_mpi_msg_free_set(pm8001_ha, pMsg1,
+ circularQ, bc);
+ }
+ if (MPI_IO_STATUS_BUSY == ret) {
+ /* Update the producer index from SPC */
+ circularQ->producer_index =
+ cpu_to_le32(pm8001_read_32(circularQ->pi_virt));
+ if (le32_to_cpu(circularQ->producer_index) ==
+ circularQ->consumer_idx)
+ /* OQ is empty */
+ break;
+ }
+ } while (1);
+ spin_unlock_irqrestore(&pm8001_ha->lock, flags);
+ return ret;
+}
+
+/* PCI_DMA_... to our direction translation. */
+static const u8 data_dir_flags[] = {
+ [PCI_DMA_BIDIRECTIONAL] = DATA_DIR_BYRECIPIENT,/* UNSPECIFIED */
+ [PCI_DMA_TODEVICE] = DATA_DIR_OUT,/* OUTBOUND */
+ [PCI_DMA_FROMDEVICE] = DATA_DIR_IN,/* INBOUND */
+ [PCI_DMA_NONE] = DATA_DIR_NONE,/* NO TRANSFER */
+};
+void
+pm8001_chip_make_sg(struct scatterlist *scatter, int nr, void *prd)
+{
+ int i;
+ struct scatterlist *sg;
+ struct pm8001_prd *buf_prd = prd;
+
+ for_each_sg(scatter, sg, nr, i) {
+ buf_prd->addr = cpu_to_le64(sg_dma_address(sg));
+ buf_prd->im_len.len = cpu_to_le32(sg_dma_len(sg));
+ buf_prd->im_len.e = 0;
+ buf_prd++;
+ }
+}
+
+static void build_smp_cmd(u32 deviceID, __le32 hTag, struct smp_req *psmp_cmd)
+{
+ psmp_cmd->tag = hTag;
+ psmp_cmd->device_id = cpu_to_le32(deviceID);
+ psmp_cmd->len_ip_ir = cpu_to_le32(1|(1 << 1));
+}
+
+/**
+ * pm8001_chip_smp_req - send a SMP task to FW
+ * @pm8001_ha: our hba card information.
+ * @ccb: the ccb information this request used.
+ */
+static int pm8001_chip_smp_req(struct pm8001_hba_info *pm8001_ha,
+ struct pm8001_ccb_info *ccb)
+{
+ int elem, rc;
+ struct sas_task *task = ccb->task;
+ struct domain_device *dev = task->dev;
+ struct pm8001_device *pm8001_dev = dev->lldd_dev;
+ struct scatterlist *sg_req, *sg_resp;
+ u32 req_len, resp_len;
+ struct smp_req smp_cmd;
+ u32 opc;
+ struct inbound_queue_table *circularQ;
+
+ memset(&smp_cmd, 0, sizeof(smp_cmd));
+ /*
+ * DMA-map SMP request, response buffers
+ */
+ sg_req = &task->smp_task.smp_req;
+ elem = dma_map_sg(pm8001_ha->dev, sg_req, 1, PCI_DMA_TODEVICE);
+ if (!elem)
+ return -ENOMEM;
+ req_len = sg_dma_len(sg_req);
+
+ sg_resp = &task->smp_task.smp_resp;
+ elem = dma_map_sg(pm8001_ha->dev, sg_resp, 1, PCI_DMA_FROMDEVICE);
+ if (!elem) {
+ rc = -ENOMEM;
+ goto err_out;
+ }
+ resp_len = sg_dma_len(sg_resp);
+ /* must be in dwords */
+ if ((req_len & 0x3) || (resp_len & 0x3)) {
+ rc = -EINVAL;
+ goto err_out_2;
+ }
+
+ opc = OPC_INB_SMP_REQUEST;
+ circularQ = &pm8001_ha->inbnd_q_tbl[0];
+ smp_cmd.tag = cpu_to_le32(ccb->ccb_tag);
+ smp_cmd.long_smp_req.long_req_addr =
+ cpu_to_le64((u64)sg_dma_address(&task->smp_task.smp_req));
+ smp_cmd.long_smp_req.long_req_size =
+ cpu_to_le32((u32)sg_dma_len(&task->smp_task.smp_req)-4);
+ smp_cmd.long_smp_req.long_resp_addr =
+ cpu_to_le64((u64)sg_dma_address(&task->smp_task.smp_resp));
+ smp_cmd.long_smp_req.long_resp_size =
+ cpu_to_le32((u32)sg_dma_len(&task->smp_task.smp_resp)-4);
+ build_smp_cmd(pm8001_dev->device_id, smp_cmd.tag, &smp_cmd);
+ rc = pm8001_mpi_build_cmd(pm8001_ha, circularQ, opc,
+ (u32 *)&smp_cmd, 0);
+ if (rc)
+ goto err_out_2;
+
+ return 0;
+
+err_out_2:
+ dma_unmap_sg(pm8001_ha->dev, &ccb->task->smp_task.smp_resp, 1,
+ PCI_DMA_FROMDEVICE);
+err_out:
+ dma_unmap_sg(pm8001_ha->dev, &ccb->task->smp_task.smp_req, 1,
+ PCI_DMA_TODEVICE);
+ return rc;
+}
+
+/**
+ * pm8001_chip_ssp_io_req - send a SSP task to FW
+ * @pm8001_ha: our hba card information.
+ * @ccb: the ccb information this request used.
+ */
+static int pm8001_chip_ssp_io_req(struct pm8001_hba_info *pm8001_ha,
+ struct pm8001_ccb_info *ccb)
+{
+ struct sas_task *task = ccb->task;
+ struct domain_device *dev = task->dev;
+ struct pm8001_device *pm8001_dev = dev->lldd_dev;
+ struct ssp_ini_io_start_req ssp_cmd;
+ u32 tag = ccb->ccb_tag;
+ int ret;
+ u64 phys_addr;
+ struct inbound_queue_table *circularQ;
+ u32 opc = OPC_INB_SSPINIIOSTART;
+ memset(&ssp_cmd, 0, sizeof(ssp_cmd));
+ memcpy(ssp_cmd.ssp_iu.lun, task->ssp_task.LUN, 8);
+ ssp_cmd.dir_m_tlr =
+ cpu_to_le32(data_dir_flags[task->data_dir] << 8 | 0x0);/*0 for
+ SAS 1.1 compatible TLR*/
+ ssp_cmd.data_len = cpu_to_le32(task->total_xfer_len);
+ ssp_cmd.device_id = cpu_to_le32(pm8001_dev->device_id);
+ ssp_cmd.tag = cpu_to_le32(tag);
+ if (task->ssp_task.enable_first_burst)
+ ssp_cmd.ssp_iu.efb_prio_attr |= 0x80;
+ ssp_cmd.ssp_iu.efb_prio_attr |= (task->ssp_task.task_prio << 3);
+ ssp_cmd.ssp_iu.efb_prio_attr |= (task->ssp_task.task_attr & 7);
+ memcpy(ssp_cmd.ssp_iu.cdb, task->ssp_task.cmd->cmnd,
+ task->ssp_task.cmd->cmd_len);
+ circularQ = &pm8001_ha->inbnd_q_tbl[0];
+
+ /* fill in PRD (scatter/gather) table, if any */
+ if (task->num_scatter > 1) {
+ pm8001_chip_make_sg(task->scatter, ccb->n_elem, ccb->buf_prd);
+ phys_addr = ccb->ccb_dma_handle +
+ offsetof(struct pm8001_ccb_info, buf_prd[0]);
+ ssp_cmd.addr_low = cpu_to_le32(lower_32_bits(phys_addr));
+ ssp_cmd.addr_high = cpu_to_le32(upper_32_bits(phys_addr));
+ ssp_cmd.esgl = cpu_to_le32(1<<31);
+ } else if (task->num_scatter == 1) {
+ u64 dma_addr = sg_dma_address(task->scatter);
+ ssp_cmd.addr_low = cpu_to_le32(lower_32_bits(dma_addr));
+ ssp_cmd.addr_high = cpu_to_le32(upper_32_bits(dma_addr));
+ ssp_cmd.len = cpu_to_le32(task->total_xfer_len);
+ ssp_cmd.esgl = 0;
+ } else if (task->num_scatter == 0) {
+ ssp_cmd.addr_low = 0;
+ ssp_cmd.addr_high = 0;
+ ssp_cmd.len = cpu_to_le32(task->total_xfer_len);
+ ssp_cmd.esgl = 0;
+ }
+ ret = pm8001_mpi_build_cmd(pm8001_ha, circularQ, opc, &ssp_cmd, 0);
+ return ret;
+}
+
+static int pm8001_chip_sata_req(struct pm8001_hba_info *pm8001_ha,
+ struct pm8001_ccb_info *ccb)
+{
+ struct sas_task *task = ccb->task;
+ struct domain_device *dev = task->dev;
+ struct pm8001_device *pm8001_ha_dev = dev->lldd_dev;
+ u32 tag = ccb->ccb_tag;
+ int ret;
+ struct sata_start_req sata_cmd;
+ u32 hdr_tag, ncg_tag = 0;
+ u64 phys_addr;
+ u32 ATAP = 0x0;
+ u32 dir;
+ struct inbound_queue_table *circularQ;
+ unsigned long flags;
+ u32 opc = OPC_INB_SATA_HOST_OPSTART;
+ memset(&sata_cmd, 0, sizeof(sata_cmd));
+ circularQ = &pm8001_ha->inbnd_q_tbl[0];
+ if (task->data_dir == PCI_DMA_NONE) {
+ ATAP = 0x04; /* no data*/
+ PM8001_IO_DBG(pm8001_ha, pm8001_printk("no data\n"));
+ } else if (likely(!task->ata_task.device_control_reg_update)) {
+ if (task->ata_task.dma_xfer) {
+ ATAP = 0x06; /* DMA */
+ PM8001_IO_DBG(pm8001_ha, pm8001_printk("DMA\n"));
+ } else {
+ ATAP = 0x05; /* PIO*/
+ PM8001_IO_DBG(pm8001_ha, pm8001_printk("PIO\n"));
+ }
+ if (task->ata_task.use_ncq &&
+ dev->sata_dev.class != ATA_DEV_ATAPI) {
+ ATAP = 0x07; /* FPDMA */
+ PM8001_IO_DBG(pm8001_ha, pm8001_printk("FPDMA\n"));
+ }
+ }
+ if (task->ata_task.use_ncq && pm8001_get_ncq_tag(task, &hdr_tag)) {
+ task->ata_task.fis.sector_count |= (u8) (hdr_tag << 3);
+ ncg_tag = hdr_tag;
+ }
+ dir = data_dir_flags[task->data_dir] << 8;
+ sata_cmd.tag = cpu_to_le32(tag);
+ sata_cmd.device_id = cpu_to_le32(pm8001_ha_dev->device_id);
+ sata_cmd.data_len = cpu_to_le32(task->total_xfer_len);
+ sata_cmd.ncqtag_atap_dir_m =
+ cpu_to_le32(((ncg_tag & 0xff)<<16)|((ATAP & 0x3f) << 10) | dir);
+ sata_cmd.sata_fis = task->ata_task.fis;
+ if (likely(!task->ata_task.device_control_reg_update))
+ sata_cmd.sata_fis.flags |= 0x80;/* C=1: update ATA cmd reg */
+ sata_cmd.sata_fis.flags &= 0xF0;/* PM_PORT field shall be 0 */
+ /* fill in PRD (scatter/gather) table, if any */
+ if (task->num_scatter > 1) {
+ pm8001_chip_make_sg(task->scatter, ccb->n_elem, ccb->buf_prd);
+ phys_addr = ccb->ccb_dma_handle +
+ offsetof(struct pm8001_ccb_info, buf_prd[0]);
+ sata_cmd.addr_low = lower_32_bits(phys_addr);
+ sata_cmd.addr_high = upper_32_bits(phys_addr);
+ sata_cmd.esgl = cpu_to_le32(1 << 31);
+ } else if (task->num_scatter == 1) {
+ u64 dma_addr = sg_dma_address(task->scatter);
+ sata_cmd.addr_low = lower_32_bits(dma_addr);
+ sata_cmd.addr_high = upper_32_bits(dma_addr);
+ sata_cmd.len = cpu_to_le32(task->total_xfer_len);
+ sata_cmd.esgl = 0;
+ } else if (task->num_scatter == 0) {
+ sata_cmd.addr_low = 0;
+ sata_cmd.addr_high = 0;
+ sata_cmd.len = cpu_to_le32(task->total_xfer_len);
+ sata_cmd.esgl = 0;
+ }
+
+ /* Check for read log for failed drive and return */
+ if (sata_cmd.sata_fis.command == 0x2f) {
+ if (((pm8001_ha_dev->id & NCQ_READ_LOG_FLAG) ||
+ (pm8001_ha_dev->id & NCQ_ABORT_ALL_FLAG) ||
+ (pm8001_ha_dev->id & NCQ_2ND_RLE_FLAG))) {
+ struct task_status_struct *ts;
+
+ pm8001_ha_dev->id &= 0xDFFFFFFF;
+ ts = &task->task_status;
+
+ spin_lock_irqsave(&task->task_state_lock, flags);
+ ts->resp = SAS_TASK_COMPLETE;
+ ts->stat = SAM_STAT_GOOD;
+ task->task_state_flags &= ~SAS_TASK_STATE_PENDING;
+ task->task_state_flags &= ~SAS_TASK_AT_INITIATOR;
+ task->task_state_flags |= SAS_TASK_STATE_DONE;
+ if (unlikely((task->task_state_flags &
+ SAS_TASK_STATE_ABORTED))) {
+ spin_unlock_irqrestore(&task->task_state_lock,
+ flags);
+ PM8001_FAIL_DBG(pm8001_ha,
+ pm8001_printk("task 0x%p resp 0x%x "
+ " stat 0x%x but aborted by upper layer "
+ "\n", task, ts->resp, ts->stat));
+ pm8001_ccb_task_free(pm8001_ha, task, ccb, tag);
+ } else {
+ spin_unlock_irqrestore(&task->task_state_lock,
+ flags);
+ pm8001_ccb_task_free_done(pm8001_ha, task,
+ ccb, tag);
+ return 0;
+ }
+ }
+ }
+
+ ret = pm8001_mpi_build_cmd(pm8001_ha, circularQ, opc, &sata_cmd, 0);
+ return ret;
+}
+
+/**
+ * pm8001_chip_phy_start_req - start phy via PHY_START COMMAND
+ * @pm8001_ha: our hba card information.
+ * @num: the inbound queue number
+ * @phy_id: the phy id which we wanted to start up.
+ */
+static int
+pm8001_chip_phy_start_req(struct pm8001_hba_info *pm8001_ha, u8 phy_id)
+{
+ struct phy_start_req payload;
+ struct inbound_queue_table *circularQ;
+ int ret;
+ u32 tag = 0x01;
+ u32 opcode = OPC_INB_PHYSTART;
+ circularQ = &pm8001_ha->inbnd_q_tbl[0];
+ memset(&payload, 0, sizeof(payload));
+ payload.tag = cpu_to_le32(tag);
+ /*
+ ** [0:7] PHY Identifier
+ ** [8:11] link rate 1.5G, 3G, 6G
+ ** [12:13] link mode 01b SAS mode; 10b SATA mode; 11b both
+ ** [14] 0b disable spin up hold; 1b enable spin up hold
+ */
+ payload.ase_sh_lm_slr_phyid = cpu_to_le32(SPINHOLD_DISABLE |
+ LINKMODE_AUTO | LINKRATE_15 |
+ LINKRATE_30 | LINKRATE_60 | phy_id);
+ payload.sas_identify.dev_type = SAS_END_DEVICE;
+ payload.sas_identify.initiator_bits = SAS_PROTOCOL_ALL;
+ memcpy(payload.sas_identify.sas_addr,
+ pm8001_ha->sas_addr, SAS_ADDR_SIZE);
+ payload.sas_identify.phy_id = phy_id;
+ ret = pm8001_mpi_build_cmd(pm8001_ha, circularQ, opcode, &payload, 0);
+ return ret;
+}
+
+/**
+ * pm8001_chip_phy_stop_req - start phy via PHY_STOP COMMAND
+ * @pm8001_ha: our hba card information.
+ * @num: the inbound queue number
+ * @phy_id: the phy id which we wanted to start up.
+ */
+int pm8001_chip_phy_stop_req(struct pm8001_hba_info *pm8001_ha,
+ u8 phy_id)
+{
+ struct phy_stop_req payload;
+ struct inbound_queue_table *circularQ;
+ int ret;
+ u32 tag = 0x01;
+ u32 opcode = OPC_INB_PHYSTOP;
+ circularQ = &pm8001_ha->inbnd_q_tbl[0];
+ memset(&payload, 0, sizeof(payload));
+ payload.tag = cpu_to_le32(tag);
+ payload.phy_id = cpu_to_le32(phy_id);
+ ret = pm8001_mpi_build_cmd(pm8001_ha, circularQ, opcode, &payload, 0);
+ return ret;
+}
+
+/**
+ * see comments on pm8001_mpi_reg_resp.
+ */
+static int pm8001_chip_reg_dev_req(struct pm8001_hba_info *pm8001_ha,
+ struct pm8001_device *pm8001_dev, u32 flag)
+{
+ struct reg_dev_req payload;
+ u32 opc;
+ u32 stp_sspsmp_sata = 0x4;
+ struct inbound_queue_table *circularQ;
+ u32 linkrate, phy_id;
+ int rc, tag = 0xdeadbeef;
+ struct pm8001_ccb_info *ccb;
+ u8 retryFlag = 0x1;
+ u16 firstBurstSize = 0;
+ u16 ITNT = 2000;
+ struct domain_device *dev = pm8001_dev->sas_device;
+ struct domain_device *parent_dev = dev->parent;
+ circularQ = &pm8001_ha->inbnd_q_tbl[0];
+
+ memset(&payload, 0, sizeof(payload));
+ rc = pm8001_tag_alloc(pm8001_ha, &tag);
+ if (rc)
+ return rc;
+ ccb = &pm8001_ha->ccb_info[tag];
+ ccb->device = pm8001_dev;
+ ccb->ccb_tag = tag;
+ payload.tag = cpu_to_le32(tag);
+ if (flag == 1)
+ stp_sspsmp_sata = 0x02; /*direct attached sata */
+ else {
+ if (pm8001_dev->dev_type == SAS_SATA_DEV)
+ stp_sspsmp_sata = 0x00; /* stp*/
+ else if (pm8001_dev->dev_type == SAS_END_DEVICE ||
+ pm8001_dev->dev_type == SAS_EDGE_EXPANDER_DEVICE ||
+ pm8001_dev->dev_type == SAS_FANOUT_EXPANDER_DEVICE)
+ stp_sspsmp_sata = 0x01; /*ssp or smp*/
+ }
+ if (parent_dev && DEV_IS_EXPANDER(parent_dev->dev_type))
+ phy_id = parent_dev->ex_dev.ex_phy->phy_id;
+ else
+ phy_id = pm8001_dev->attached_phy;
+ opc = OPC_INB_REG_DEV;
+ linkrate = (pm8001_dev->sas_device->linkrate < dev->port->linkrate) ?
+ pm8001_dev->sas_device->linkrate : dev->port->linkrate;
+ payload.phyid_portid =
+ cpu_to_le32(((pm8001_dev->sas_device->port->id) & 0x0F) |
+ ((phy_id & 0x0F) << 4));
+ payload.dtype_dlr_retry = cpu_to_le32((retryFlag & 0x01) |
+ ((linkrate & 0x0F) * 0x1000000) |
+ ((stp_sspsmp_sata & 0x03) * 0x10000000));
+ payload.firstburstsize_ITNexustimeout =
+ cpu_to_le32(ITNT | (firstBurstSize * 0x10000));
+ memcpy(payload.sas_addr, pm8001_dev->sas_device->sas_addr,
+ SAS_ADDR_SIZE);
+ rc = pm8001_mpi_build_cmd(pm8001_ha, circularQ, opc, &payload, 0);
+ return rc;
+}
+
+/**
+ * see comments on pm8001_mpi_reg_resp.
+ */
+int pm8001_chip_dereg_dev_req(struct pm8001_hba_info *pm8001_ha,
+ u32 device_id)
+{
+ struct dereg_dev_req payload;
+ u32 opc = OPC_INB_DEREG_DEV_HANDLE;
+ int ret;
+ struct inbound_queue_table *circularQ;
+
+ circularQ = &pm8001_ha->inbnd_q_tbl[0];
+ memset(&payload, 0, sizeof(payload));
+ payload.tag = cpu_to_le32(1);
+ payload.device_id = cpu_to_le32(device_id);
+ PM8001_MSG_DBG(pm8001_ha,
+ pm8001_printk("unregister device device_id = %d\n", device_id));
+ ret = pm8001_mpi_build_cmd(pm8001_ha, circularQ, opc, &payload, 0);
+ return ret;
+}
+
+/**
+ * pm8001_chip_phy_ctl_req - support the local phy operation
+ * @pm8001_ha: our hba card information.
+ * @num: the inbound queue number
+ * @phy_id: the phy id which we wanted to operate
+ * @phy_op:
+ */
+static int pm8001_chip_phy_ctl_req(struct pm8001_hba_info *pm8001_ha,
+ u32 phyId, u32 phy_op)
+{
+ struct local_phy_ctl_req payload;
+ struct inbound_queue_table *circularQ;
+ int ret;
+ u32 opc = OPC_INB_LOCAL_PHY_CONTROL;
+ memset(&payload, 0, sizeof(payload));
+ circularQ = &pm8001_ha->inbnd_q_tbl[0];
+ payload.tag = cpu_to_le32(1);
+ payload.phyop_phyid =
+ cpu_to_le32(((phy_op & 0xff) << 8) | (phyId & 0x0F));
+ ret = pm8001_mpi_build_cmd(pm8001_ha, circularQ, opc, &payload, 0);
+ return ret;
+}
+
+static u32 pm8001_chip_is_our_interupt(struct pm8001_hba_info *pm8001_ha)
+{
+ u32 value;
+#ifdef PM8001_USE_MSIX
+ return 1;
+#endif
+ value = pm8001_cr32(pm8001_ha, 0, MSGU_ODR);
+ if (value)
+ return 1;
+ return 0;
+
+}
+
+/**
+ * pm8001_chip_isr - PM8001 isr handler.
+ * @pm8001_ha: our hba card information.
+ * @irq: irq number.
+ * @stat: stat.
+ */
+static irqreturn_t
+pm8001_chip_isr(struct pm8001_hba_info *pm8001_ha, u8 vec)
+{
+ pm8001_chip_interrupt_disable(pm8001_ha, vec);
+ process_oq(pm8001_ha, vec);
+ pm8001_chip_interrupt_enable(pm8001_ha, vec);
+ return IRQ_HANDLED;
+}
+
+static int send_task_abort(struct pm8001_hba_info *pm8001_ha, u32 opc,
+ u32 dev_id, u8 flag, u32 task_tag, u32 cmd_tag)
+{
+ struct task_abort_req task_abort;
+ struct inbound_queue_table *circularQ;
+ int ret;
+ circularQ = &pm8001_ha->inbnd_q_tbl[0];
+ memset(&task_abort, 0, sizeof(task_abort));
+ if (ABORT_SINGLE == (flag & ABORT_MASK)) {
+ task_abort.abort_all = 0;
+ task_abort.device_id = cpu_to_le32(dev_id);
+ task_abort.tag_to_abort = cpu_to_le32(task_tag);
+ task_abort.tag = cpu_to_le32(cmd_tag);
+ } else if (ABORT_ALL == (flag & ABORT_MASK)) {
+ task_abort.abort_all = cpu_to_le32(1);
+ task_abort.device_id = cpu_to_le32(dev_id);
+ task_abort.tag = cpu_to_le32(cmd_tag);
+ }
+ ret = pm8001_mpi_build_cmd(pm8001_ha, circularQ, opc, &task_abort, 0);
+ return ret;
+}
+
+/**
+ * pm8001_chip_abort_task - SAS abort task when error or exception happened.
+ * @task: the task we wanted to aborted.
+ * @flag: the abort flag.
+ */
+int pm8001_chip_abort_task(struct pm8001_hba_info *pm8001_ha,
+ struct pm8001_device *pm8001_dev, u8 flag, u32 task_tag, u32 cmd_tag)
+{
+ u32 opc, device_id;
+ int rc = TMF_RESP_FUNC_FAILED;
+ PM8001_EH_DBG(pm8001_ha,
+ pm8001_printk("cmd_tag = %x, abort task tag = 0x%x",
+ cmd_tag, task_tag));
+ if (pm8001_dev->dev_type == SAS_END_DEVICE)
+ opc = OPC_INB_SSP_ABORT;
+ else if (pm8001_dev->dev_type == SAS_SATA_DEV)
+ opc = OPC_INB_SATA_ABORT;
+ else
+ opc = OPC_INB_SMP_ABORT;/* SMP */
+ device_id = pm8001_dev->device_id;
+ rc = send_task_abort(pm8001_ha, opc, device_id, flag,
+ task_tag, cmd_tag);
+ if (rc != TMF_RESP_FUNC_COMPLETE)
+ PM8001_EH_DBG(pm8001_ha, pm8001_printk("rc= %d\n", rc));
+ return rc;
+}
+
+/**
+ * pm8001_chip_ssp_tm_req - built the task management command.
+ * @pm8001_ha: our hba card information.
+ * @ccb: the ccb information.
+ * @tmf: task management function.
+ */
+int pm8001_chip_ssp_tm_req(struct pm8001_hba_info *pm8001_ha,
+ struct pm8001_ccb_info *ccb, struct pm8001_tmf_task *tmf)
+{
+ struct sas_task *task = ccb->task;
+ struct domain_device *dev = task->dev;
+ struct pm8001_device *pm8001_dev = dev->lldd_dev;
+ u32 opc = OPC_INB_SSPINITMSTART;
+ struct inbound_queue_table *circularQ;
+ struct ssp_ini_tm_start_req sspTMCmd;
+ int ret;
+
+ memset(&sspTMCmd, 0, sizeof(sspTMCmd));
+ sspTMCmd.device_id = cpu_to_le32(pm8001_dev->device_id);
+ sspTMCmd.relate_tag = cpu_to_le32(tmf->tag_of_task_to_be_managed);
+ sspTMCmd.tmf = cpu_to_le32(tmf->tmf);
+ memcpy(sspTMCmd.lun, task->ssp_task.LUN, 8);
+ sspTMCmd.tag = cpu_to_le32(ccb->ccb_tag);
+ if (pm8001_ha->chip_id != chip_8001)
+ sspTMCmd.ds_ads_m = 0x08;
+ circularQ = &pm8001_ha->inbnd_q_tbl[0];
+ ret = pm8001_mpi_build_cmd(pm8001_ha, circularQ, opc, &sspTMCmd, 0);
+ return ret;
+}
+
+int pm8001_chip_get_nvmd_req(struct pm8001_hba_info *pm8001_ha,
+ void *payload)
+{
+ u32 opc = OPC_INB_GET_NVMD_DATA;
+ u32 nvmd_type;
+ int rc;
+ u32 tag;
+ struct pm8001_ccb_info *ccb;
+ struct inbound_queue_table *circularQ;
+ struct get_nvm_data_req nvmd_req;
+ struct fw_control_ex *fw_control_context;
+ struct pm8001_ioctl_payload *ioctl_payload = payload;
+
+ nvmd_type = ioctl_payload->minor_function;
+ fw_control_context = kzalloc(sizeof(struct fw_control_ex), GFP_KERNEL);
+ if (!fw_control_context)
+ return -ENOMEM;
+ fw_control_context->usrAddr = (u8 *)ioctl_payload->func_specific;
+ fw_control_context->len = ioctl_payload->length;
+ circularQ = &pm8001_ha->inbnd_q_tbl[0];
+ memset(&nvmd_req, 0, sizeof(nvmd_req));
+ rc = pm8001_tag_alloc(pm8001_ha, &tag);
+ if (rc) {
+ kfree(fw_control_context);
+ return rc;
+ }
+ ccb = &pm8001_ha->ccb_info[tag];
+ ccb->ccb_tag = tag;
+ ccb->fw_control_context = fw_control_context;
+ nvmd_req.tag = cpu_to_le32(tag);
+
+ switch (nvmd_type) {
+ case TWI_DEVICE: {
+ u32 twi_addr, twi_page_size;
+ twi_addr = 0xa8;
+ twi_page_size = 2;
+
+ nvmd_req.len_ir_vpdd = cpu_to_le32(IPMode | twi_addr << 16 |
+ twi_page_size << 8 | TWI_DEVICE);
+ nvmd_req.resp_len = cpu_to_le32(ioctl_payload->length);
+ nvmd_req.resp_addr_hi =
+ cpu_to_le32(pm8001_ha->memoryMap.region[NVMD].phys_addr_hi);
+ nvmd_req.resp_addr_lo =
+ cpu_to_le32(pm8001_ha->memoryMap.region[NVMD].phys_addr_lo);
+ break;
+ }
+ case C_SEEPROM: {
+ nvmd_req.len_ir_vpdd = cpu_to_le32(IPMode | C_SEEPROM);
+ nvmd_req.resp_len = cpu_to_le32(ioctl_payload->length);
+ nvmd_req.resp_addr_hi =
+ cpu_to_le32(pm8001_ha->memoryMap.region[NVMD].phys_addr_hi);
+ nvmd_req.resp_addr_lo =
+ cpu_to_le32(pm8001_ha->memoryMap.region[NVMD].phys_addr_lo);
+ break;
+ }
+ case VPD_FLASH: {
+ nvmd_req.len_ir_vpdd = cpu_to_le32(IPMode | VPD_FLASH);
+ nvmd_req.resp_len = cpu_to_le32(ioctl_payload->length);
+ nvmd_req.resp_addr_hi =
+ cpu_to_le32(pm8001_ha->memoryMap.region[NVMD].phys_addr_hi);
+ nvmd_req.resp_addr_lo =
+ cpu_to_le32(pm8001_ha->memoryMap.region[NVMD].phys_addr_lo);
+ break;
+ }
+ case EXPAN_ROM: {
+ nvmd_req.len_ir_vpdd = cpu_to_le32(IPMode | EXPAN_ROM);
+ nvmd_req.resp_len = cpu_to_le32(ioctl_payload->length);
+ nvmd_req.resp_addr_hi =
+ cpu_to_le32(pm8001_ha->memoryMap.region[NVMD].phys_addr_hi);
+ nvmd_req.resp_addr_lo =
+ cpu_to_le32(pm8001_ha->memoryMap.region[NVMD].phys_addr_lo);
+ break;
+ }
+ case IOP_RDUMP: {
+ nvmd_req.len_ir_vpdd = cpu_to_le32(IPMode | IOP_RDUMP);
+ nvmd_req.resp_len = cpu_to_le32(ioctl_payload->length);
+ nvmd_req.vpd_offset = cpu_to_le32(ioctl_payload->offset);
+ nvmd_req.resp_addr_hi =
+ cpu_to_le32(pm8001_ha->memoryMap.region[NVMD].phys_addr_hi);
+ nvmd_req.resp_addr_lo =
+ cpu_to_le32(pm8001_ha->memoryMap.region[NVMD].phys_addr_lo);
+ break;
+ }
+ default:
+ break;
+ }
+ rc = pm8001_mpi_build_cmd(pm8001_ha, circularQ, opc, &nvmd_req, 0);
+ if (rc) {
+ kfree(fw_control_context);
+ pm8001_tag_free(pm8001_ha, tag);
+ }
+ return rc;
+}
+
+int pm8001_chip_set_nvmd_req(struct pm8001_hba_info *pm8001_ha,
+ void *payload)
+{
+ u32 opc = OPC_INB_SET_NVMD_DATA;
+ u32 nvmd_type;
+ int rc;
+ u32 tag;
+ struct pm8001_ccb_info *ccb;
+ struct inbound_queue_table *circularQ;
+ struct set_nvm_data_req nvmd_req;
+ struct fw_control_ex *fw_control_context;
+ struct pm8001_ioctl_payload *ioctl_payload = payload;
+
+ nvmd_type = ioctl_payload->minor_function;
+ fw_control_context = kzalloc(sizeof(struct fw_control_ex), GFP_KERNEL);
+ if (!fw_control_context)
+ return -ENOMEM;
+ circularQ = &pm8001_ha->inbnd_q_tbl[0];
+ memcpy(pm8001_ha->memoryMap.region[NVMD].virt_ptr,
+ &ioctl_payload->func_specific,
+ ioctl_payload->length);
+ memset(&nvmd_req, 0, sizeof(nvmd_req));
+ rc = pm8001_tag_alloc(pm8001_ha, &tag);
+ if (rc) {
+ kfree(fw_control_context);
+ return -EBUSY;
+ }
+ ccb = &pm8001_ha->ccb_info[tag];
+ ccb->fw_control_context = fw_control_context;
+ ccb->ccb_tag = tag;
+ nvmd_req.tag = cpu_to_le32(tag);
+ switch (nvmd_type) {
+ case TWI_DEVICE: {
+ u32 twi_addr, twi_page_size;
+ twi_addr = 0xa8;
+ twi_page_size = 2;
+ nvmd_req.reserved[0] = cpu_to_le32(0xFEDCBA98);
+ nvmd_req.len_ir_vpdd = cpu_to_le32(IPMode | twi_addr << 16 |
+ twi_page_size << 8 | TWI_DEVICE);
+ nvmd_req.resp_len = cpu_to_le32(ioctl_payload->length);
+ nvmd_req.resp_addr_hi =
+ cpu_to_le32(pm8001_ha->memoryMap.region[NVMD].phys_addr_hi);
+ nvmd_req.resp_addr_lo =
+ cpu_to_le32(pm8001_ha->memoryMap.region[NVMD].phys_addr_lo);
+ break;
+ }
+ case C_SEEPROM:
+ nvmd_req.len_ir_vpdd = cpu_to_le32(IPMode | C_SEEPROM);
+ nvmd_req.resp_len = cpu_to_le32(ioctl_payload->length);
+ nvmd_req.reserved[0] = cpu_to_le32(0xFEDCBA98);
+ nvmd_req.resp_addr_hi =
+ cpu_to_le32(pm8001_ha->memoryMap.region[NVMD].phys_addr_hi);
+ nvmd_req.resp_addr_lo =
+ cpu_to_le32(pm8001_ha->memoryMap.region[NVMD].phys_addr_lo);
+ break;
+ case VPD_FLASH:
+ nvmd_req.len_ir_vpdd = cpu_to_le32(IPMode | VPD_FLASH);
+ nvmd_req.resp_len = cpu_to_le32(ioctl_payload->length);
+ nvmd_req.reserved[0] = cpu_to_le32(0xFEDCBA98);
+ nvmd_req.resp_addr_hi =
+ cpu_to_le32(pm8001_ha->memoryMap.region[NVMD].phys_addr_hi);
+ nvmd_req.resp_addr_lo =
+ cpu_to_le32(pm8001_ha->memoryMap.region[NVMD].phys_addr_lo);
+ break;
+ case EXPAN_ROM:
+ nvmd_req.len_ir_vpdd = cpu_to_le32(IPMode | EXPAN_ROM);
+ nvmd_req.resp_len = cpu_to_le32(ioctl_payload->length);
+ nvmd_req.reserved[0] = cpu_to_le32(0xFEDCBA98);
+ nvmd_req.resp_addr_hi =
+ cpu_to_le32(pm8001_ha->memoryMap.region[NVMD].phys_addr_hi);
+ nvmd_req.resp_addr_lo =
+ cpu_to_le32(pm8001_ha->memoryMap.region[NVMD].phys_addr_lo);
+ break;
+ default:
+ break;
+ }
+ rc = pm8001_mpi_build_cmd(pm8001_ha, circularQ, opc, &nvmd_req, 0);
+ if (rc) {
+ kfree(fw_control_context);
+ pm8001_tag_free(pm8001_ha, tag);
+ }
+ return rc;
+}
+
+/**
+ * pm8001_chip_fw_flash_update_build - support the firmware update operation
+ * @pm8001_ha: our hba card information.
+ * @fw_flash_updata_info: firmware flash update param
+ */
+int
+pm8001_chip_fw_flash_update_build(struct pm8001_hba_info *pm8001_ha,
+ void *fw_flash_updata_info, u32 tag)
+{
+ struct fw_flash_Update_req payload;
+ struct fw_flash_updata_info *info;
+ struct inbound_queue_table *circularQ;
+ int ret;
+ u32 opc = OPC_INB_FW_FLASH_UPDATE;
+
+ memset(&payload, 0, sizeof(struct fw_flash_Update_req));
+ circularQ = &pm8001_ha->inbnd_q_tbl[0];
+ info = fw_flash_updata_info;
+ payload.tag = cpu_to_le32(tag);
+ payload.cur_image_len = cpu_to_le32(info->cur_image_len);
+ payload.cur_image_offset = cpu_to_le32(info->cur_image_offset);
+ payload.total_image_len = cpu_to_le32(info->total_image_len);
+ payload.len = info->sgl.im_len.len ;
+ payload.sgl_addr_lo =
+ cpu_to_le32(lower_32_bits(le64_to_cpu(info->sgl.addr)));
+ payload.sgl_addr_hi =
+ cpu_to_le32(upper_32_bits(le64_to_cpu(info->sgl.addr)));
+ ret = pm8001_mpi_build_cmd(pm8001_ha, circularQ, opc, &payload, 0);
+ return ret;
+}
+
+int
+pm8001_chip_fw_flash_update_req(struct pm8001_hba_info *pm8001_ha,
+ void *payload)
+{
+ struct fw_flash_updata_info flash_update_info;
+ struct fw_control_info *fw_control;
+ struct fw_control_ex *fw_control_context;
+ int rc;
+ u32 tag;
+ struct pm8001_ccb_info *ccb;
+ void *buffer = pm8001_ha->memoryMap.region[FW_FLASH].virt_ptr;
+ dma_addr_t phys_addr = pm8001_ha->memoryMap.region[FW_FLASH].phys_addr;
+ struct pm8001_ioctl_payload *ioctl_payload = payload;
+
+ fw_control_context = kzalloc(sizeof(struct fw_control_ex), GFP_KERNEL);
+ if (!fw_control_context)
+ return -ENOMEM;
+ fw_control = (struct fw_control_info *)&ioctl_payload->func_specific;
+ memcpy(buffer, fw_control->buffer, fw_control->len);
+ flash_update_info.sgl.addr = cpu_to_le64(phys_addr);
+ flash_update_info.sgl.im_len.len = cpu_to_le32(fw_control->len);
+ flash_update_info.sgl.im_len.e = 0;
+ flash_update_info.cur_image_offset = fw_control->offset;
+ flash_update_info.cur_image_len = fw_control->len;
+ flash_update_info.total_image_len = fw_control->size;
+ fw_control_context->fw_control = fw_control;
+ fw_control_context->virtAddr = buffer;
+ fw_control_context->phys_addr = phys_addr;
+ fw_control_context->len = fw_control->len;
+ rc = pm8001_tag_alloc(pm8001_ha, &tag);
+ if (rc) {
+ kfree(fw_control_context);
+ return -EBUSY;
+ }
+ ccb = &pm8001_ha->ccb_info[tag];
+ ccb->fw_control_context = fw_control_context;
+ ccb->ccb_tag = tag;
+ rc = pm8001_chip_fw_flash_update_build(pm8001_ha, &flash_update_info,
+ tag);
+ return rc;
+}
+
+ssize_t
+pm8001_get_gsm_dump(struct device *cdev, u32 length, char *buf)
+{
+ u32 value, rem, offset = 0, bar = 0;
+ u32 index, work_offset, dw_length;
+ u32 shift_value, gsm_base, gsm_dump_offset;
+ char *direct_data;
+ struct Scsi_Host *shost = class_to_shost(cdev);
+ struct sas_ha_struct *sha = SHOST_TO_SAS_HA(shost);
+ struct pm8001_hba_info *pm8001_ha = sha->lldd_ha;
+
+ direct_data = buf;
+ gsm_dump_offset = pm8001_ha->fatal_forensic_shift_offset;
+
+ /* check max is 1 Mbytes */
+ if ((length > 0x100000) || (gsm_dump_offset & 3) ||
+ ((gsm_dump_offset + length) > 0x1000000))
+ return -EINVAL;
+
+ if (pm8001_ha->chip_id == chip_8001)
+ bar = 2;
+ else
+ bar = 1;
+
+ work_offset = gsm_dump_offset & 0xFFFF0000;
+ offset = gsm_dump_offset & 0x0000FFFF;
+ gsm_dump_offset = work_offset;
+ /* adjust length to dword boundary */
+ rem = length & 3;
+ dw_length = length >> 2;
+
+ for (index = 0; index < dw_length; index++) {
+ if ((work_offset + offset) & 0xFFFF0000) {
+ if (pm8001_ha->chip_id == chip_8001)
+ shift_value = ((gsm_dump_offset + offset) &
+ SHIFT_REG_64K_MASK);
+ else
+ shift_value = (((gsm_dump_offset + offset) &
+ SHIFT_REG_64K_MASK) >>
+ SHIFT_REG_BIT_SHIFT);
+
+ if (pm8001_ha->chip_id == chip_8001) {
+ gsm_base = GSM_BASE;
+ if (-1 == pm8001_bar4_shift(pm8001_ha,
+ (gsm_base + shift_value)))
+ return -EIO;
+ } else {
+ gsm_base = 0;
+ if (-1 == pm80xx_bar4_shift(pm8001_ha,
+ (gsm_base + shift_value)))
+ return -EIO;
+ }
+ gsm_dump_offset = (gsm_dump_offset + offset) &
+ 0xFFFF0000;
+ work_offset = 0;
+ offset = offset & 0x0000FFFF;
+ }
+ value = pm8001_cr32(pm8001_ha, bar, (work_offset + offset) &
+ 0x0000FFFF);
+ direct_data += sprintf(direct_data, "%08x ", value);
+ offset += 4;
+ }
+ if (rem != 0) {
+ value = pm8001_cr32(pm8001_ha, bar, (work_offset + offset) &
+ 0x0000FFFF);
+ /* xfr for non_dw */
+ direct_data += sprintf(direct_data, "%08x ", value);
+ }
+ /* Shift back to BAR4 original address */
+ if (-1 == pm8001_bar4_shift(pm8001_ha, 0))
+ return -EIO;
+ pm8001_ha->fatal_forensic_shift_offset += 1024;
+
+ if (pm8001_ha->fatal_forensic_shift_offset >= 0x100000)
+ pm8001_ha->fatal_forensic_shift_offset = 0;
+ return direct_data - buf;
+}
+
+int
+pm8001_chip_set_dev_state_req(struct pm8001_hba_info *pm8001_ha,
+ struct pm8001_device *pm8001_dev, u32 state)
+{
+ struct set_dev_state_req payload;
+ struct inbound_queue_table *circularQ;
+ struct pm8001_ccb_info *ccb;
+ int rc;
+ u32 tag;
+ u32 opc = OPC_INB_SET_DEVICE_STATE;
+ memset(&payload, 0, sizeof(payload));
+ rc = pm8001_tag_alloc(pm8001_ha, &tag);
+ if (rc)
+ return -1;
+ ccb = &pm8001_ha->ccb_info[tag];
+ ccb->ccb_tag = tag;
+ ccb->device = pm8001_dev;
+ circularQ = &pm8001_ha->inbnd_q_tbl[0];
+ payload.tag = cpu_to_le32(tag);
+ payload.device_id = cpu_to_le32(pm8001_dev->device_id);
+ payload.nds = cpu_to_le32(state);
+ rc = pm8001_mpi_build_cmd(pm8001_ha, circularQ, opc, &payload, 0);
+ return rc;
+
+}
+
+static int
+pm8001_chip_sas_re_initialization(struct pm8001_hba_info *pm8001_ha)
+{
+ struct sas_re_initialization_req payload;
+ struct inbound_queue_table *circularQ;
+ struct pm8001_ccb_info *ccb;
+ int rc;
+ u32 tag;
+ u32 opc = OPC_INB_SAS_RE_INITIALIZE;
+ memset(&payload, 0, sizeof(payload));
+ rc = pm8001_tag_alloc(pm8001_ha, &tag);
+ if (rc)
+ return -ENOMEM;
+ ccb = &pm8001_ha->ccb_info[tag];
+ ccb->ccb_tag = tag;
+ circularQ = &pm8001_ha->inbnd_q_tbl[0];
+ payload.tag = cpu_to_le32(tag);
+ payload.SSAHOLT = cpu_to_le32(0xd << 25);
+ payload.sata_hol_tmo = cpu_to_le32(80);
+ payload.open_reject_cmdretries_data_retries = cpu_to_le32(0xff00ff);
+ rc = pm8001_mpi_build_cmd(pm8001_ha, circularQ, opc, &payload, 0);
+ if (rc)
+ pm8001_tag_free(pm8001_ha, tag);
+ return rc;
+
+}
+
+const struct pm8001_dispatch pm8001_8001_dispatch = {
+ .name = "pmc8001",
+ .chip_init = pm8001_chip_init,
+ .chip_soft_rst = pm8001_chip_soft_rst,
+ .chip_rst = pm8001_hw_chip_rst,
+ .chip_iounmap = pm8001_chip_iounmap,
+ .isr = pm8001_chip_isr,
+ .is_our_interupt = pm8001_chip_is_our_interupt,
+ .isr_process_oq = process_oq,
+ .interrupt_enable = pm8001_chip_interrupt_enable,
+ .interrupt_disable = pm8001_chip_interrupt_disable,
+ .make_prd = pm8001_chip_make_sg,
+ .smp_req = pm8001_chip_smp_req,
+ .ssp_io_req = pm8001_chip_ssp_io_req,
+ .sata_req = pm8001_chip_sata_req,
+ .phy_start_req = pm8001_chip_phy_start_req,
+ .phy_stop_req = pm8001_chip_phy_stop_req,
+ .reg_dev_req = pm8001_chip_reg_dev_req,
+ .dereg_dev_req = pm8001_chip_dereg_dev_req,
+ .phy_ctl_req = pm8001_chip_phy_ctl_req,
+ .task_abort = pm8001_chip_abort_task,
+ .ssp_tm_req = pm8001_chip_ssp_tm_req,
+ .get_nvmd_req = pm8001_chip_get_nvmd_req,
+ .set_nvmd_req = pm8001_chip_set_nvmd_req,
+ .fw_flash_update_req = pm8001_chip_fw_flash_update_req,
+ .set_dev_state_req = pm8001_chip_set_dev_state_req,
+ .sas_re_init_req = pm8001_chip_sas_re_initialization,
+};
diff --git a/drivers/scsi/pm8001/pm8001_hwi.h b/drivers/scsi/pm8001/pm8001_hwi.h
new file mode 100644
index 000000000..e4867e690
--- /dev/null
+++ b/drivers/scsi/pm8001/pm8001_hwi.h
@@ -0,0 +1,1038 @@
+/*
+ * PMC-Sierra SPC 8001 SAS/SATA based host adapters driver
+ *
+ * Copyright (c) 2008-2009 USI Co., Ltd.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions, and the following disclaimer,
+ * without modification.
+ * 2. Redistributions in binary form must reproduce at minimum a disclaimer
+ * substantially similar to the "NO WARRANTY" disclaimer below
+ * ("Disclaimer") and any redistribution must be conditioned upon
+ * including a substantially similar Disclaimer requirement for further
+ * binary redistribution.
+ * 3. Neither the names of the above-listed copyright holders nor the names
+ * of any contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * Alternatively, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") version 2 as published by the Free
+ * Software Foundation.
+ *
+ * NO WARRANTY
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
+ * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
+ * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGES.
+ *
+ */
+#ifndef _PMC8001_REG_H_
+#define _PMC8001_REG_H_
+
+#include <linux/types.h>
+#include <scsi/libsas.h>
+
+
+/* for Request Opcode of IOMB */
+#define OPC_INB_ECHO 1 /* 0x000 */
+#define OPC_INB_PHYSTART 4 /* 0x004 */
+#define OPC_INB_PHYSTOP 5 /* 0x005 */
+#define OPC_INB_SSPINIIOSTART 6 /* 0x006 */
+#define OPC_INB_SSPINITMSTART 7 /* 0x007 */
+#define OPC_INB_SSPINIEXTIOSTART 8 /* 0x008 */
+#define OPC_INB_DEV_HANDLE_ACCEPT 9 /* 0x009 */
+#define OPC_INB_SSPTGTIOSTART 10 /* 0x00A */
+#define OPC_INB_SSPTGTRSPSTART 11 /* 0x00B */
+#define OPC_INB_SSPINIEDCIOSTART 12 /* 0x00C */
+#define OPC_INB_SSPINIEXTEDCIOSTART 13 /* 0x00D */
+#define OPC_INB_SSPTGTEDCIOSTART 14 /* 0x00E */
+#define OPC_INB_SSP_ABORT 15 /* 0x00F */
+#define OPC_INB_DEREG_DEV_HANDLE 16 /* 0x010 */
+#define OPC_INB_GET_DEV_HANDLE 17 /* 0x011 */
+#define OPC_INB_SMP_REQUEST 18 /* 0x012 */
+/* SMP_RESPONSE is removed */
+#define OPC_INB_SMP_RESPONSE 19 /* 0x013 */
+#define OPC_INB_SMP_ABORT 20 /* 0x014 */
+#define OPC_INB_REG_DEV 22 /* 0x016 */
+#define OPC_INB_SATA_HOST_OPSTART 23 /* 0x017 */
+#define OPC_INB_SATA_ABORT 24 /* 0x018 */
+#define OPC_INB_LOCAL_PHY_CONTROL 25 /* 0x019 */
+#define OPC_INB_GET_DEV_INFO 26 /* 0x01A */
+#define OPC_INB_FW_FLASH_UPDATE 32 /* 0x020 */
+#define OPC_INB_GPIO 34 /* 0x022 */
+#define OPC_INB_SAS_DIAG_MODE_START_END 35 /* 0x023 */
+#define OPC_INB_SAS_DIAG_EXECUTE 36 /* 0x024 */
+#define OPC_INB_SAS_HW_EVENT_ACK 37 /* 0x025 */
+#define OPC_INB_GET_TIME_STAMP 38 /* 0x026 */
+#define OPC_INB_PORT_CONTROL 39 /* 0x027 */
+#define OPC_INB_GET_NVMD_DATA 40 /* 0x028 */
+#define OPC_INB_SET_NVMD_DATA 41 /* 0x029 */
+#define OPC_INB_SET_DEVICE_STATE 42 /* 0x02A */
+#define OPC_INB_GET_DEVICE_STATE 43 /* 0x02B */
+#define OPC_INB_SET_DEV_INFO 44 /* 0x02C */
+#define OPC_INB_SAS_RE_INITIALIZE 45 /* 0x02D */
+
+/* for Response Opcode of IOMB */
+#define OPC_OUB_ECHO 1 /* 0x001 */
+#define OPC_OUB_HW_EVENT 4 /* 0x004 */
+#define OPC_OUB_SSP_COMP 5 /* 0x005 */
+#define OPC_OUB_SMP_COMP 6 /* 0x006 */
+#define OPC_OUB_LOCAL_PHY_CNTRL 7 /* 0x007 */
+#define OPC_OUB_DEV_REGIST 10 /* 0x00A */
+#define OPC_OUB_DEREG_DEV 11 /* 0x00B */
+#define OPC_OUB_GET_DEV_HANDLE 12 /* 0x00C */
+#define OPC_OUB_SATA_COMP 13 /* 0x00D */
+#define OPC_OUB_SATA_EVENT 14 /* 0x00E */
+#define OPC_OUB_SSP_EVENT 15 /* 0x00F */
+#define OPC_OUB_DEV_HANDLE_ARRIV 16 /* 0x010 */
+/* SMP_RECEIVED Notification is removed */
+#define OPC_OUB_SMP_RECV_EVENT 17 /* 0x011 */
+#define OPC_OUB_SSP_RECV_EVENT 18 /* 0x012 */
+#define OPC_OUB_DEV_INFO 19 /* 0x013 */
+#define OPC_OUB_FW_FLASH_UPDATE 20 /* 0x014 */
+#define OPC_OUB_GPIO_RESPONSE 22 /* 0x016 */
+#define OPC_OUB_GPIO_EVENT 23 /* 0x017 */
+#define OPC_OUB_GENERAL_EVENT 24 /* 0x018 */
+#define OPC_OUB_SSP_ABORT_RSP 26 /* 0x01A */
+#define OPC_OUB_SATA_ABORT_RSP 27 /* 0x01B */
+#define OPC_OUB_SAS_DIAG_MODE_START_END 28 /* 0x01C */
+#define OPC_OUB_SAS_DIAG_EXECUTE 29 /* 0x01D */
+#define OPC_OUB_GET_TIME_STAMP 30 /* 0x01E */
+#define OPC_OUB_SAS_HW_EVENT_ACK 31 /* 0x01F */
+#define OPC_OUB_PORT_CONTROL 32 /* 0x020 */
+#define OPC_OUB_SKIP_ENTRY 33 /* 0x021 */
+#define OPC_OUB_SMP_ABORT_RSP 34 /* 0x022 */
+#define OPC_OUB_GET_NVMD_DATA 35 /* 0x023 */
+#define OPC_OUB_SET_NVMD_DATA 36 /* 0x024 */
+#define OPC_OUB_DEVICE_HANDLE_REMOVAL 37 /* 0x025 */
+#define OPC_OUB_SET_DEVICE_STATE 38 /* 0x026 */
+#define OPC_OUB_GET_DEVICE_STATE 39 /* 0x027 */
+#define OPC_OUB_SET_DEV_INFO 40 /* 0x028 */
+#define OPC_OUB_SAS_RE_INITIALIZE 41 /* 0x029 */
+
+/* for phy start*/
+#define SPINHOLD_DISABLE (0x00 << 14)
+#define SPINHOLD_ENABLE (0x01 << 14)
+#define LINKMODE_SAS (0x01 << 12)
+#define LINKMODE_DSATA (0x02 << 12)
+#define LINKMODE_AUTO (0x03 << 12)
+#define LINKRATE_15 (0x01 << 8)
+#define LINKRATE_30 (0x02 << 8)
+#define LINKRATE_60 (0x04 << 8)
+
+/* for phy state */
+
+#define PHY_STATE_LINK_UP_SPC 0x1
+
+/* for new SPC controllers MEMBASE III is shared between BIOS and DATA */
+#define GSM_SM_BASE 0x4F0000
+struct mpi_msg_hdr{
+ __le32 header; /* Bits [11:0] - Message operation code */
+ /* Bits [15:12] - Message Category */
+ /* Bits [21:16] - Outboundqueue ID for the
+ operation completion message */
+ /* Bits [23:22] - Reserved */
+ /* Bits [28:24] - Buffer Count, indicates how
+ many buffer are allocated for the massage */
+ /* Bits [30:29] - Reserved */
+ /* Bits [31] - Message Valid bit */
+} __attribute__((packed, aligned(4)));
+
+
+/*
+ * brief the data structure of PHY Start Command
+ * use to describe enable the phy (64 bytes)
+ */
+struct phy_start_req {
+ __le32 tag;
+ __le32 ase_sh_lm_slr_phyid;
+ struct sas_identify_frame sas_identify;
+ u32 reserved[5];
+} __attribute__((packed, aligned(4)));
+
+
+/*
+ * brief the data structure of PHY Start Command
+ * use to disable the phy (64 bytes)
+ */
+struct phy_stop_req {
+ __le32 tag;
+ __le32 phy_id;
+ u32 reserved[13];
+} __attribute__((packed, aligned(4)));
+
+
+/* set device bits fis - device to host */
+struct set_dev_bits_fis {
+ u8 fis_type; /* 0xA1*/
+ u8 n_i_pmport;
+ /* b7 : n Bit. Notification bit. If set device needs attention. */
+ /* b6 : i Bit. Interrupt Bit */
+ /* b5-b4: reserved2 */
+ /* b3-b0: PM Port */
+ u8 status;
+ u8 error;
+ u32 _r_a;
+} __attribute__ ((packed));
+/* PIO setup FIS - device to host */
+struct pio_setup_fis {
+ u8 fis_type; /* 0x5f */
+ u8 i_d_pmPort;
+ /* b7 : reserved */
+ /* b6 : i bit. Interrupt bit */
+ /* b5 : d bit. data transfer direction. set to 1 for device to host
+ xfer */
+ /* b4 : reserved */
+ /* b3-b0: PM Port */
+ u8 status;
+ u8 error;
+ u8 lbal;
+ u8 lbam;
+ u8 lbah;
+ u8 device;
+ u8 lbal_exp;
+ u8 lbam_exp;
+ u8 lbah_exp;
+ u8 _r_a;
+ u8 sector_count;
+ u8 sector_count_exp;
+ u8 _r_b;
+ u8 e_status;
+ u8 _r_c[2];
+ u8 transfer_count;
+} __attribute__ ((packed));
+
+/*
+ * brief the data structure of SATA Completion Response
+ * use to describe the sata task response (64 bytes)
+ */
+struct sata_completion_resp {
+ __le32 tag;
+ __le32 status;
+ __le32 param;
+ u32 sata_resp[12];
+} __attribute__((packed, aligned(4)));
+
+
+/*
+ * brief the data structure of SAS HW Event Notification
+ * use to alert the host about the hardware event(64 bytes)
+ */
+struct hw_event_resp {
+ __le32 lr_evt_status_phyid_portid;
+ __le32 evt_param;
+ __le32 npip_portstate;
+ struct sas_identify_frame sas_identify;
+ struct dev_to_host_fis sata_fis;
+} __attribute__((packed, aligned(4)));
+
+
+/*
+ * brief the data structure of REGISTER DEVICE Command
+ * use to describe MPI REGISTER DEVICE Command (64 bytes)
+ */
+
+struct reg_dev_req {
+ __le32 tag;
+ __le32 phyid_portid;
+ __le32 dtype_dlr_retry;
+ __le32 firstburstsize_ITNexustimeout;
+ u8 sas_addr[SAS_ADDR_SIZE];
+ __le32 upper_device_id;
+ u32 reserved[8];
+} __attribute__((packed, aligned(4)));
+
+
+/*
+ * brief the data structure of DEREGISTER DEVICE Command
+ * use to request spc to remove all internal resources associated
+ * with the device id (64 bytes)
+ */
+
+struct dereg_dev_req {
+ __le32 tag;
+ __le32 device_id;
+ u32 reserved[13];
+} __attribute__((packed, aligned(4)));
+
+
+/*
+ * brief the data structure of DEVICE_REGISTRATION Response
+ * use to notify the completion of the device registration (64 bytes)
+ */
+
+struct dev_reg_resp {
+ __le32 tag;
+ __le32 status;
+ __le32 device_id;
+ u32 reserved[12];
+} __attribute__((packed, aligned(4)));
+
+
+/*
+ * brief the data structure of Local PHY Control Command
+ * use to issue PHY CONTROL to local phy (64 bytes)
+ */
+struct local_phy_ctl_req {
+ __le32 tag;
+ __le32 phyop_phyid;
+ u32 reserved1[13];
+} __attribute__((packed, aligned(4)));
+
+
+/**
+ * brief the data structure of Local Phy Control Response
+ * use to describe MPI Local Phy Control Response (64 bytes)
+ */
+struct local_phy_ctl_resp {
+ __le32 tag;
+ __le32 phyop_phyid;
+ __le32 status;
+ u32 reserved[12];
+} __attribute__((packed, aligned(4)));
+
+
+#define OP_BITS 0x0000FF00
+#define ID_BITS 0x000000FF
+
+/*
+ * brief the data structure of PORT Control Command
+ * use to control port properties (64 bytes)
+ */
+
+struct port_ctl_req {
+ __le32 tag;
+ __le32 portop_portid;
+ __le32 param0;
+ __le32 param1;
+ u32 reserved1[11];
+} __attribute__((packed, aligned(4)));
+
+
+/*
+ * brief the data structure of HW Event Ack Command
+ * use to acknowledge receive HW event (64 bytes)
+ */
+
+struct hw_event_ack_req {
+ __le32 tag;
+ __le32 sea_phyid_portid;
+ __le32 param0;
+ __le32 param1;
+ u32 reserved1[11];
+} __attribute__((packed, aligned(4)));
+
+
+/*
+ * brief the data structure of SSP Completion Response
+ * use to indicate a SSP Completion (n bytes)
+ */
+struct ssp_completion_resp {
+ __le32 tag;
+ __le32 status;
+ __le32 param;
+ __le32 ssptag_rescv_rescpad;
+ struct ssp_response_iu ssp_resp_iu;
+ __le32 residual_count;
+} __attribute__((packed, aligned(4)));
+
+
+#define SSP_RESCV_BIT 0x00010000
+
+/*
+ * brief the data structure of SATA EVNET esponse
+ * use to indicate a SATA Completion (64 bytes)
+ */
+
+struct sata_event_resp {
+ __le32 tag;
+ __le32 event;
+ __le32 port_id;
+ __le32 device_id;
+ u32 reserved[11];
+} __attribute__((packed, aligned(4)));
+
+/*
+ * brief the data structure of SSP EVNET esponse
+ * use to indicate a SSP Completion (64 bytes)
+ */
+
+struct ssp_event_resp {
+ __le32 tag;
+ __le32 event;
+ __le32 port_id;
+ __le32 device_id;
+ u32 reserved[11];
+} __attribute__((packed, aligned(4)));
+
+/**
+ * brief the data structure of General Event Notification Response
+ * use to describe MPI General Event Notification Response (64 bytes)
+ */
+struct general_event_resp {
+ __le32 status;
+ __le32 inb_IOMB_payload[14];
+} __attribute__((packed, aligned(4)));
+
+
+#define GENERAL_EVENT_PAYLOAD 14
+#define OPCODE_BITS 0x00000fff
+
+/*
+ * brief the data structure of SMP Request Command
+ * use to describe MPI SMP REQUEST Command (64 bytes)
+ */
+struct smp_req {
+ __le32 tag;
+ __le32 device_id;
+ __le32 len_ip_ir;
+ /* Bits [0] - Indirect response */
+ /* Bits [1] - Indirect Payload */
+ /* Bits [15:2] - Reserved */
+ /* Bits [23:16] - direct payload Len */
+ /* Bits [31:24] - Reserved */
+ u8 smp_req16[16];
+ union {
+ u8 smp_req[32];
+ struct {
+ __le64 long_req_addr;/* sg dma address, LE */
+ __le32 long_req_size;/* LE */
+ u32 _r_a;
+ __le64 long_resp_addr;/* sg dma address, LE */
+ __le32 long_resp_size;/* LE */
+ u32 _r_b;
+ } long_smp_req;/* sequencer extension */
+ };
+} __attribute__((packed, aligned(4)));
+/*
+ * brief the data structure of SMP Completion Response
+ * use to describe MPI SMP Completion Response (64 bytes)
+ */
+struct smp_completion_resp {
+ __le32 tag;
+ __le32 status;
+ __le32 param;
+ __le32 _r_a[12];
+} __attribute__((packed, aligned(4)));
+
+/*
+ *brief the data structure of SSP SMP SATA Abort Command
+ * use to describe MPI SSP SMP & SATA Abort Command (64 bytes)
+ */
+struct task_abort_req {
+ __le32 tag;
+ __le32 device_id;
+ __le32 tag_to_abort;
+ __le32 abort_all;
+ u32 reserved[11];
+} __attribute__((packed, aligned(4)));
+
+/* These flags used for SSP SMP & SATA Abort */
+#define ABORT_MASK 0x3
+#define ABORT_SINGLE 0x0
+#define ABORT_ALL 0x1
+
+/**
+ * brief the data structure of SSP SATA SMP Abort Response
+ * use to describe SSP SMP & SATA Abort Response ( 64 bytes)
+ */
+struct task_abort_resp {
+ __le32 tag;
+ __le32 status;
+ __le32 scp;
+ u32 reserved[12];
+} __attribute__((packed, aligned(4)));
+
+
+/**
+ * brief the data structure of SAS Diagnostic Start/End Command
+ * use to describe MPI SAS Diagnostic Start/End Command (64 bytes)
+ */
+struct sas_diag_start_end_req {
+ __le32 tag;
+ __le32 operation_phyid;
+ u32 reserved[13];
+} __attribute__((packed, aligned(4)));
+
+
+/**
+ * brief the data structure of SAS Diagnostic Execute Command
+ * use to describe MPI SAS Diagnostic Execute Command (64 bytes)
+ */
+struct sas_diag_execute_req{
+ __le32 tag;
+ __le32 cmdtype_cmddesc_phyid;
+ __le32 pat1_pat2;
+ __le32 threshold;
+ __le32 codepat_errmsk;
+ __le32 pmon;
+ __le32 pERF1CTL;
+ u32 reserved[8];
+} __attribute__((packed, aligned(4)));
+
+
+#define SAS_DIAG_PARAM_BYTES 24
+
+/*
+ * brief the data structure of Set Device State Command
+ * use to describe MPI Set Device State Command (64 bytes)
+ */
+struct set_dev_state_req {
+ __le32 tag;
+ __le32 device_id;
+ __le32 nds;
+ u32 reserved[12];
+} __attribute__((packed, aligned(4)));
+
+/*
+ * brief the data structure of sas_re_initialization
+ */
+struct sas_re_initialization_req {
+
+ __le32 tag;
+ __le32 SSAHOLT;/* bit29-set max port;
+ ** bit28-set open reject cmd retries.
+ ** bit27-set open reject data retries.
+ ** bit26-set open reject option, remap:1 or not:0.
+ ** bit25-set sata head of line time out.
+ */
+ __le32 reserved_maxPorts;
+ __le32 open_reject_cmdretries_data_retries;/* cmd retries: 31-bit16;
+ * data retries: bit15-bit0.
+ */
+ __le32 sata_hol_tmo;
+ u32 reserved1[10];
+} __attribute__((packed, aligned(4)));
+
+/*
+ * brief the data structure of SATA Start Command
+ * use to describe MPI SATA IO Start Command (64 bytes)
+ */
+
+struct sata_start_req {
+ __le32 tag;
+ __le32 device_id;
+ __le32 data_len;
+ __le32 ncqtag_atap_dir_m;
+ struct host_to_dev_fis sata_fis;
+ u32 reserved1;
+ u32 reserved2;
+ u32 addr_low;
+ u32 addr_high;
+ __le32 len;
+ __le32 esgl;
+} __attribute__((packed, aligned(4)));
+
+/**
+ * brief the data structure of SSP INI TM Start Command
+ * use to describe MPI SSP INI TM Start Command (64 bytes)
+ */
+struct ssp_ini_tm_start_req {
+ __le32 tag;
+ __le32 device_id;
+ __le32 relate_tag;
+ __le32 tmf;
+ u8 lun[8];
+ __le32 ds_ads_m;
+ u32 reserved[8];
+} __attribute__((packed, aligned(4)));
+
+
+struct ssp_info_unit {
+ u8 lun[8];/* SCSI Logical Unit Number */
+ u8 reserved1;/* reserved */
+ u8 efb_prio_attr;
+ /* B7 : enabledFirstBurst */
+ /* B6-3 : taskPriority */
+ /* B2-0 : taskAttribute */
+ u8 reserved2; /* reserved */
+ u8 additional_cdb_len;
+ /* B7-2 : additional_cdb_len */
+ /* B1-0 : reserved */
+ u8 cdb[16];/* The SCSI CDB up to 16 bytes length */
+} __attribute__((packed, aligned(4)));
+
+
+/**
+ * brief the data structure of SSP INI IO Start Command
+ * use to describe MPI SSP INI IO Start Command (64 bytes)
+ */
+struct ssp_ini_io_start_req {
+ __le32 tag;
+ __le32 device_id;
+ __le32 data_len;
+ __le32 dir_m_tlr;
+ struct ssp_info_unit ssp_iu;
+ __le32 addr_low;
+ __le32 addr_high;
+ __le32 len;
+ __le32 esgl;
+} __attribute__((packed, aligned(4)));
+
+
+/**
+ * brief the data structure of Firmware download
+ * use to describe MPI FW DOWNLOAD Command (64 bytes)
+ */
+struct fw_flash_Update_req {
+ __le32 tag;
+ __le32 cur_image_offset;
+ __le32 cur_image_len;
+ __le32 total_image_len;
+ u32 reserved0[7];
+ __le32 sgl_addr_lo;
+ __le32 sgl_addr_hi;
+ __le32 len;
+ __le32 ext_reserved;
+} __attribute__((packed, aligned(4)));
+
+
+#define FWFLASH_IOMB_RESERVED_LEN 0x07
+/**
+ * brief the data structure of FW_FLASH_UPDATE Response
+ * use to describe MPI FW_FLASH_UPDATE Response (64 bytes)
+ *
+ */
+struct fw_flash_Update_resp {
+ __le32 tag;
+ __le32 status;
+ u32 reserved[13];
+} __attribute__((packed, aligned(4)));
+
+
+/**
+ * brief the data structure of Get NVM Data Command
+ * use to get data from NVM in HBA(64 bytes)
+ */
+struct get_nvm_data_req {
+ __le32 tag;
+ __le32 len_ir_vpdd;
+ __le32 vpd_offset;
+ u32 reserved[8];
+ __le32 resp_addr_lo;
+ __le32 resp_addr_hi;
+ __le32 resp_len;
+ u32 reserved1;
+} __attribute__((packed, aligned(4)));
+
+
+struct set_nvm_data_req {
+ __le32 tag;
+ __le32 len_ir_vpdd;
+ __le32 vpd_offset;
+ __le32 reserved[8];
+ __le32 resp_addr_lo;
+ __le32 resp_addr_hi;
+ __le32 resp_len;
+ u32 reserved1;
+} __attribute__((packed, aligned(4)));
+
+
+#define TWI_DEVICE 0x0
+#define C_SEEPROM 0x1
+#define VPD_FLASH 0x4
+#define AAP1_RDUMP 0x5
+#define IOP_RDUMP 0x6
+#define EXPAN_ROM 0x7
+
+#define IPMode 0x80000000
+#define NVMD_TYPE 0x0000000F
+#define NVMD_STAT 0x0000FFFF
+#define NVMD_LEN 0xFF000000
+/**
+ * brief the data structure of Get NVMD Data Response
+ * use to describe MPI Get NVMD Data Response (64 bytes)
+ */
+struct get_nvm_data_resp {
+ __le32 tag;
+ __le32 ir_tda_bn_dps_das_nvm;
+ __le32 dlen_status;
+ __le32 nvm_data[12];
+} __attribute__((packed, aligned(4)));
+
+
+/**
+ * brief the data structure of SAS Diagnostic Start/End Response
+ * use to describe MPI SAS Diagnostic Start/End Response (64 bytes)
+ *
+ */
+struct sas_diag_start_end_resp {
+ __le32 tag;
+ __le32 status;
+ u32 reserved[13];
+} __attribute__((packed, aligned(4)));
+
+
+/**
+ * brief the data structure of SAS Diagnostic Execute Response
+ * use to describe MPI SAS Diagnostic Execute Response (64 bytes)
+ *
+ */
+struct sas_diag_execute_resp {
+ __le32 tag;
+ __le32 cmdtype_cmddesc_phyid;
+ __le32 Status;
+ __le32 ReportData;
+ u32 reserved[11];
+} __attribute__((packed, aligned(4)));
+
+
+/**
+ * brief the data structure of Set Device State Response
+ * use to describe MPI Set Device State Response (64 bytes)
+ *
+ */
+struct set_dev_state_resp {
+ __le32 tag;
+ __le32 status;
+ __le32 device_id;
+ __le32 pds_nds;
+ u32 reserved[11];
+} __attribute__((packed, aligned(4)));
+
+
+#define NDS_BITS 0x0F
+#define PDS_BITS 0xF0
+
+/*
+ * HW Events type
+ */
+
+#define HW_EVENT_RESET_START 0x01
+#define HW_EVENT_CHIP_RESET_COMPLETE 0x02
+#define HW_EVENT_PHY_STOP_STATUS 0x03
+#define HW_EVENT_SAS_PHY_UP 0x04
+#define HW_EVENT_SATA_PHY_UP 0x05
+#define HW_EVENT_SATA_SPINUP_HOLD 0x06
+#define HW_EVENT_PHY_DOWN 0x07
+#define HW_EVENT_PORT_INVALID 0x08
+#define HW_EVENT_BROADCAST_CHANGE 0x09
+#define HW_EVENT_PHY_ERROR 0x0A
+#define HW_EVENT_BROADCAST_SES 0x0B
+#define HW_EVENT_INBOUND_CRC_ERROR 0x0C
+#define HW_EVENT_HARD_RESET_RECEIVED 0x0D
+#define HW_EVENT_MALFUNCTION 0x0E
+#define HW_EVENT_ID_FRAME_TIMEOUT 0x0F
+#define HW_EVENT_BROADCAST_EXP 0x10
+#define HW_EVENT_PHY_START_STATUS 0x11
+#define HW_EVENT_LINK_ERR_INVALID_DWORD 0x12
+#define HW_EVENT_LINK_ERR_DISPARITY_ERROR 0x13
+#define HW_EVENT_LINK_ERR_CODE_VIOLATION 0x14
+#define HW_EVENT_LINK_ERR_LOSS_OF_DWORD_SYNCH 0x15
+#define HW_EVENT_LINK_ERR_PHY_RESET_FAILED 0x16
+#define HW_EVENT_PORT_RECOVERY_TIMER_TMO 0x17
+#define HW_EVENT_PORT_RECOVER 0x18
+#define HW_EVENT_PORT_RESET_TIMER_TMO 0x19
+#define HW_EVENT_PORT_RESET_COMPLETE 0x20
+#define EVENT_BROADCAST_ASYNCH_EVENT 0x21
+
+/* port state */
+#define PORT_NOT_ESTABLISHED 0x00
+#define PORT_VALID 0x01
+#define PORT_LOSTCOMM 0x02
+#define PORT_IN_RESET 0x04
+#define PORT_INVALID 0x08
+
+/*
+ * SSP/SMP/SATA IO Completion Status values
+ */
+
+#define IO_SUCCESS 0x00
+#define IO_ABORTED 0x01
+#define IO_OVERFLOW 0x02
+#define IO_UNDERFLOW 0x03
+#define IO_FAILED 0x04
+#define IO_ABORT_RESET 0x05
+#define IO_NOT_VALID 0x06
+#define IO_NO_DEVICE 0x07
+#define IO_ILLEGAL_PARAMETER 0x08
+#define IO_LINK_FAILURE 0x09
+#define IO_PROG_ERROR 0x0A
+#define IO_EDC_IN_ERROR 0x0B
+#define IO_EDC_OUT_ERROR 0x0C
+#define IO_ERROR_HW_TIMEOUT 0x0D
+#define IO_XFER_ERROR_BREAK 0x0E
+#define IO_XFER_ERROR_PHY_NOT_READY 0x0F
+#define IO_OPEN_CNX_ERROR_PROTOCOL_NOT_SUPPORTED 0x10
+#define IO_OPEN_CNX_ERROR_ZONE_VIOLATION 0x11
+#define IO_OPEN_CNX_ERROR_BREAK 0x12
+#define IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS 0x13
+#define IO_OPEN_CNX_ERROR_BAD_DESTINATION 0x14
+#define IO_OPEN_CNX_ERROR_CONNECTION_RATE_NOT_SUPPORTED 0x15
+#define IO_OPEN_CNX_ERROR_STP_RESOURCES_BUSY 0x16
+#define IO_OPEN_CNX_ERROR_WRONG_DESTINATION 0x17
+#define IO_OPEN_CNX_ERROR_UNKNOWN_ERROR 0x18
+#define IO_XFER_ERROR_NAK_RECEIVED 0x19
+#define IO_XFER_ERROR_ACK_NAK_TIMEOUT 0x1A
+#define IO_XFER_ERROR_PEER_ABORTED 0x1B
+#define IO_XFER_ERROR_RX_FRAME 0x1C
+#define IO_XFER_ERROR_DMA 0x1D
+#define IO_XFER_ERROR_CREDIT_TIMEOUT 0x1E
+#define IO_XFER_ERROR_SATA_LINK_TIMEOUT 0x1F
+#define IO_XFER_ERROR_SATA 0x20
+#define IO_XFER_ERROR_ABORTED_DUE_TO_SRST 0x22
+#define IO_XFER_ERROR_REJECTED_NCQ_MODE 0x21
+#define IO_XFER_ERROR_ABORTED_NCQ_MODE 0x23
+#define IO_XFER_OPEN_RETRY_TIMEOUT 0x24
+#define IO_XFER_SMP_RESP_CONNECTION_ERROR 0x25
+#define IO_XFER_ERROR_UNEXPECTED_PHASE 0x26
+#define IO_XFER_ERROR_XFER_RDY_OVERRUN 0x27
+#define IO_XFER_ERROR_XFER_RDY_NOT_EXPECTED 0x28
+
+#define IO_XFER_ERROR_CMD_ISSUE_ACK_NAK_TIMEOUT 0x30
+#define IO_XFER_ERROR_CMD_ISSUE_BREAK_BEFORE_ACK_NAK 0x31
+#define IO_XFER_ERROR_CMD_ISSUE_PHY_DOWN_BEFORE_ACK_NAK 0x32
+
+#define IO_XFER_ERROR_OFFSET_MISMATCH 0x34
+#define IO_XFER_ERROR_XFER_ZERO_DATA_LEN 0x35
+#define IO_XFER_CMD_FRAME_ISSUED 0x36
+#define IO_ERROR_INTERNAL_SMP_RESOURCE 0x37
+#define IO_PORT_IN_RESET 0x38
+#define IO_DS_NON_OPERATIONAL 0x39
+#define IO_DS_IN_RECOVERY 0x3A
+#define IO_TM_TAG_NOT_FOUND 0x3B
+#define IO_XFER_PIO_SETUP_ERROR 0x3C
+#define IO_SSP_EXT_IU_ZERO_LEN_ERROR 0x3D
+#define IO_DS_IN_ERROR 0x3E
+#define IO_OPEN_CNX_ERROR_HW_RESOURCE_BUSY 0x3F
+#define IO_ABORT_IN_PROGRESS 0x40
+#define IO_ABORT_DELAYED 0x41
+#define IO_INVALID_LENGTH 0x42
+
+/* WARNING: This error code must always be the last number.
+ * If you add error code, modify this code also
+ * It is used as an index
+ */
+#define IO_ERROR_UNKNOWN_GENERIC 0x43
+
+/* MSGU CONFIGURATION TABLE*/
+
+#define SPC_MSGU_CFG_TABLE_UPDATE 0x01/* Inbound doorbell bit0 */
+#define SPC_MSGU_CFG_TABLE_RESET 0x02/* Inbound doorbell bit1 */
+#define SPC_MSGU_CFG_TABLE_FREEZE 0x04/* Inbound doorbell bit2 */
+#define SPC_MSGU_CFG_TABLE_UNFREEZE 0x08/* Inbound doorbell bit4 */
+#define MSGU_IBDB_SET 0x04
+#define MSGU_HOST_INT_STATUS 0x08
+#define MSGU_HOST_INT_MASK 0x0C
+#define MSGU_IOPIB_INT_STATUS 0x18
+#define MSGU_IOPIB_INT_MASK 0x1C
+#define MSGU_IBDB_CLEAR 0x20/* RevB - Host not use */
+#define MSGU_MSGU_CONTROL 0x24
+#define MSGU_ODR 0x3C/* RevB */
+#define MSGU_ODCR 0x40/* RevB */
+#define MSGU_SCRATCH_PAD_0 0x44
+#define MSGU_SCRATCH_PAD_1 0x48
+#define MSGU_SCRATCH_PAD_2 0x4C
+#define MSGU_SCRATCH_PAD_3 0x50
+#define MSGU_HOST_SCRATCH_PAD_0 0x54
+#define MSGU_HOST_SCRATCH_PAD_1 0x58
+#define MSGU_HOST_SCRATCH_PAD_2 0x5C
+#define MSGU_HOST_SCRATCH_PAD_3 0x60
+#define MSGU_HOST_SCRATCH_PAD_4 0x64
+#define MSGU_HOST_SCRATCH_PAD_5 0x68
+#define MSGU_HOST_SCRATCH_PAD_6 0x6C
+#define MSGU_HOST_SCRATCH_PAD_7 0x70
+#define MSGU_ODMR 0x74/* RevB */
+
+/* bit definition for ODMR register */
+#define ODMR_MASK_ALL 0xFFFFFFFF/* mask all
+ interrupt vector */
+#define ODMR_CLEAR_ALL 0/* clear all
+ interrupt vector */
+/* bit definition for ODCR register */
+#define ODCR_CLEAR_ALL 0xFFFFFFFF /* mask all
+ interrupt vector*/
+/* MSIX Interupts */
+#define MSIX_TABLE_OFFSET 0x2000
+#define MSIX_TABLE_ELEMENT_SIZE 0x10
+#define MSIX_INTERRUPT_CONTROL_OFFSET 0xC
+#define MSIX_TABLE_BASE (MSIX_TABLE_OFFSET + MSIX_INTERRUPT_CONTROL_OFFSET)
+#define MSIX_INTERRUPT_DISABLE 0x1
+#define MSIX_INTERRUPT_ENABLE 0x0
+
+
+/* state definition for Scratch Pad1 register */
+#define SCRATCH_PAD1_POR 0x00 /* power on reset state */
+#define SCRATCH_PAD1_SFR 0x01 /* soft reset state */
+#define SCRATCH_PAD1_ERR 0x02 /* error state */
+#define SCRATCH_PAD1_RDY 0x03 /* ready state */
+#define SCRATCH_PAD1_RST 0x04 /* soft reset toggle flag */
+#define SCRATCH_PAD1_AAP1RDY_RST 0x08 /* AAP1 ready for soft reset */
+#define SCRATCH_PAD1_STATE_MASK 0xFFFFFFF0 /* ScratchPad1
+ Mask, bit1-0 State, bit2 Soft Reset, bit3 FW RDY for Soft Reset */
+#define SCRATCH_PAD1_RESERVED 0x000003F8 /* Scratch Pad1
+ Reserved bit 3 to 9 */
+
+ /* state definition for Scratch Pad2 register */
+#define SCRATCH_PAD2_POR 0x00 /* power on state */
+#define SCRATCH_PAD2_SFR 0x01 /* soft reset state */
+#define SCRATCH_PAD2_ERR 0x02 /* error state */
+#define SCRATCH_PAD2_RDY 0x03 /* ready state */
+#define SCRATCH_PAD2_FWRDY_RST 0x04 /* FW ready for soft reset flag*/
+#define SCRATCH_PAD2_IOPRDY_RST 0x08 /* IOP ready for soft reset */
+#define SCRATCH_PAD2_STATE_MASK 0xFFFFFFF4 /* ScratchPad 2
+ Mask, bit1-0 State */
+#define SCRATCH_PAD2_RESERVED 0x000003FC /* Scratch Pad1
+ Reserved bit 2 to 9 */
+
+#define SCRATCH_PAD_ERROR_MASK 0xFFFFFC00 /* Error mask bits */
+#define SCRATCH_PAD_STATE_MASK 0x00000003 /* State Mask bits */
+
+/* main configuration offset - byte offset */
+#define MAIN_SIGNATURE_OFFSET 0x00/* DWORD 0x00 */
+#define MAIN_INTERFACE_REVISION 0x04/* DWORD 0x01 */
+#define MAIN_FW_REVISION 0x08/* DWORD 0x02 */
+#define MAIN_MAX_OUTSTANDING_IO_OFFSET 0x0C/* DWORD 0x03 */
+#define MAIN_MAX_SGL_OFFSET 0x10/* DWORD 0x04 */
+#define MAIN_CNTRL_CAP_OFFSET 0x14/* DWORD 0x05 */
+#define MAIN_GST_OFFSET 0x18/* DWORD 0x06 */
+#define MAIN_IBQ_OFFSET 0x1C/* DWORD 0x07 */
+#define MAIN_OBQ_OFFSET 0x20/* DWORD 0x08 */
+#define MAIN_IQNPPD_HPPD_OFFSET 0x24/* DWORD 0x09 */
+#define MAIN_OB_HW_EVENT_PID03_OFFSET 0x28/* DWORD 0x0A */
+#define MAIN_OB_HW_EVENT_PID47_OFFSET 0x2C/* DWORD 0x0B */
+#define MAIN_OB_NCQ_EVENT_PID03_OFFSET 0x30/* DWORD 0x0C */
+#define MAIN_OB_NCQ_EVENT_PID47_OFFSET 0x34/* DWORD 0x0D */
+#define MAIN_TITNX_EVENT_PID03_OFFSET 0x38/* DWORD 0x0E */
+#define MAIN_TITNX_EVENT_PID47_OFFSET 0x3C/* DWORD 0x0F */
+#define MAIN_OB_SSP_EVENT_PID03_OFFSET 0x40/* DWORD 0x10 */
+#define MAIN_OB_SSP_EVENT_PID47_OFFSET 0x44/* DWORD 0x11 */
+#define MAIN_OB_SMP_EVENT_PID03_OFFSET 0x48/* DWORD 0x12 */
+#define MAIN_OB_SMP_EVENT_PID47_OFFSET 0x4C/* DWORD 0x13 */
+#define MAIN_EVENT_LOG_ADDR_HI 0x50/* DWORD 0x14 */
+#define MAIN_EVENT_LOG_ADDR_LO 0x54/* DWORD 0x15 */
+#define MAIN_EVENT_LOG_BUFF_SIZE 0x58/* DWORD 0x16 */
+#define MAIN_EVENT_LOG_OPTION 0x5C/* DWORD 0x17 */
+#define MAIN_IOP_EVENT_LOG_ADDR_HI 0x60/* DWORD 0x18 */
+#define MAIN_IOP_EVENT_LOG_ADDR_LO 0x64/* DWORD 0x19 */
+#define MAIN_IOP_EVENT_LOG_BUFF_SIZE 0x68/* DWORD 0x1A */
+#define MAIN_IOP_EVENT_LOG_OPTION 0x6C/* DWORD 0x1B */
+#define MAIN_FATAL_ERROR_INTERRUPT 0x70/* DWORD 0x1C */
+#define MAIN_FATAL_ERROR_RDUMP0_OFFSET 0x74/* DWORD 0x1D */
+#define MAIN_FATAL_ERROR_RDUMP0_LENGTH 0x78/* DWORD 0x1E */
+#define MAIN_FATAL_ERROR_RDUMP1_OFFSET 0x7C/* DWORD 0x1F */
+#define MAIN_FATAL_ERROR_RDUMP1_LENGTH 0x80/* DWORD 0x20 */
+#define MAIN_HDA_FLAGS_OFFSET 0x84/* DWORD 0x21 */
+#define MAIN_ANALOG_SETUP_OFFSET 0x88/* DWORD 0x22 */
+
+/* Gereral Status Table offset - byte offset */
+#define GST_GSTLEN_MPIS_OFFSET 0x00
+#define GST_IQ_FREEZE_STATE0_OFFSET 0x04
+#define GST_IQ_FREEZE_STATE1_OFFSET 0x08
+#define GST_MSGUTCNT_OFFSET 0x0C
+#define GST_IOPTCNT_OFFSET 0x10
+#define GST_PHYSTATE_OFFSET 0x18
+#define GST_PHYSTATE0_OFFSET 0x18
+#define GST_PHYSTATE1_OFFSET 0x1C
+#define GST_PHYSTATE2_OFFSET 0x20
+#define GST_PHYSTATE3_OFFSET 0x24
+#define GST_PHYSTATE4_OFFSET 0x28
+#define GST_PHYSTATE5_OFFSET 0x2C
+#define GST_PHYSTATE6_OFFSET 0x30
+#define GST_PHYSTATE7_OFFSET 0x34
+#define GST_RERRINFO_OFFSET 0x44
+
+/* General Status Table - MPI state */
+#define GST_MPI_STATE_UNINIT 0x00
+#define GST_MPI_STATE_INIT 0x01
+#define GST_MPI_STATE_TERMINATION 0x02
+#define GST_MPI_STATE_ERROR 0x03
+#define GST_MPI_STATE_MASK 0x07
+
+#define MBIC_NMI_ENABLE_VPE0_IOP 0x000418
+#define MBIC_NMI_ENABLE_VPE0_AAP1 0x000418
+/* PCIE registers - BAR2(0x18), BAR1(win) 0x010000 */
+#define PCIE_EVENT_INTERRUPT_ENABLE 0x003040
+#define PCIE_EVENT_INTERRUPT 0x003044
+#define PCIE_ERROR_INTERRUPT_ENABLE 0x003048
+#define PCIE_ERROR_INTERRUPT 0x00304C
+/* signature definition for host scratch pad0 register */
+#define SPC_SOFT_RESET_SIGNATURE 0x252acbcd
+/* Signature for Soft Reset */
+
+/* SPC Reset register - BAR4(0x20), BAR2(win) (need dynamic mapping) */
+#define SPC_REG_RESET 0x000000/* reset register */
+
+/* bit difination for SPC_RESET register */
+#define SPC_REG_RESET_OSSP 0x00000001
+#define SPC_REG_RESET_RAAE 0x00000002
+#define SPC_REG_RESET_PCS_SPBC 0x00000004
+#define SPC_REG_RESET_PCS_IOP_SS 0x00000008
+#define SPC_REG_RESET_PCS_AAP1_SS 0x00000010
+#define SPC_REG_RESET_PCS_AAP2_SS 0x00000020
+#define SPC_REG_RESET_PCS_LM 0x00000040
+#define SPC_REG_RESET_PCS 0x00000080
+#define SPC_REG_RESET_GSM 0x00000100
+#define SPC_REG_RESET_DDR2 0x00010000
+#define SPC_REG_RESET_BDMA_CORE 0x00020000
+#define SPC_REG_RESET_BDMA_SXCBI 0x00040000
+#define SPC_REG_RESET_PCIE_AL_SXCBI 0x00080000
+#define SPC_REG_RESET_PCIE_PWR 0x00100000
+#define SPC_REG_RESET_PCIE_SFT 0x00200000
+#define SPC_REG_RESET_PCS_SXCBI 0x00400000
+#define SPC_REG_RESET_LMS_SXCBI 0x00800000
+#define SPC_REG_RESET_PMIC_SXCBI 0x01000000
+#define SPC_REG_RESET_PMIC_CORE 0x02000000
+#define SPC_REG_RESET_PCIE_PC_SXCBI 0x04000000
+#define SPC_REG_RESET_DEVICE 0x80000000
+
+/* registers for BAR Shifting - BAR2(0x18), BAR1(win) */
+#define SPC_IBW_AXI_TRANSLATION_LOW 0x003258
+
+#define MBIC_AAP1_ADDR_BASE 0x060000
+#define MBIC_IOP_ADDR_BASE 0x070000
+#define GSM_ADDR_BASE 0x0700000
+/* Dynamic map through Bar4 - 0x00700000 */
+#define GSM_CONFIG_RESET 0x00000000
+#define RAM_ECC_DB_ERR 0x00000018
+#define GSM_READ_ADDR_PARITY_INDIC 0x00000058
+#define GSM_WRITE_ADDR_PARITY_INDIC 0x00000060
+#define GSM_WRITE_DATA_PARITY_INDIC 0x00000068
+#define GSM_READ_ADDR_PARITY_CHECK 0x00000038
+#define GSM_WRITE_ADDR_PARITY_CHECK 0x00000040
+#define GSM_WRITE_DATA_PARITY_CHECK 0x00000048
+
+#define RB6_ACCESS_REG 0x6A0000
+#define HDAC_EXEC_CMD 0x0002
+#define HDA_C_PA 0xcb
+#define HDA_SEQ_ID_BITS 0x00ff0000
+#define HDA_GSM_OFFSET_BITS 0x00FFFFFF
+#define MBIC_AAP1_ADDR_BASE 0x060000
+#define MBIC_IOP_ADDR_BASE 0x070000
+#define GSM_ADDR_BASE 0x0700000
+#define SPC_TOP_LEVEL_ADDR_BASE 0x000000
+#define GSM_CONFIG_RESET_VALUE 0x00003b00
+#define GPIO_ADDR_BASE 0x00090000
+#define GPIO_GPIO_0_0UTPUT_CTL_OFFSET 0x0000010c
+
+/* RB6 offset */
+#define SPC_RB6_OFFSET 0x80C0
+/* Magic number of soft reset for RB6 */
+#define RB6_MAGIC_NUMBER_RST 0x1234
+
+/* Device Register status */
+#define DEVREG_SUCCESS 0x00
+#define DEVREG_FAILURE_OUT_OF_RESOURCE 0x01
+#define DEVREG_FAILURE_DEVICE_ALREADY_REGISTERED 0x02
+#define DEVREG_FAILURE_INVALID_PHY_ID 0x03
+#define DEVREG_FAILURE_PHY_ID_ALREADY_REGISTERED 0x04
+#define DEVREG_FAILURE_PORT_ID_OUT_OF_RANGE 0x05
+#define DEVREG_FAILURE_PORT_NOT_VALID_STATE 0x06
+#define DEVREG_FAILURE_DEVICE_TYPE_NOT_VALID 0x07
+
+#define GSM_BASE 0x4F0000
+#define SHIFT_REG_64K_MASK 0xffff0000
+#define SHIFT_REG_BIT_SHIFT 8
+#endif
+
diff --git a/drivers/scsi/pm8001/pm8001_init.c b/drivers/scsi/pm8001/pm8001_init.c
new file mode 100644
index 000000000..65555916d
--- /dev/null
+++ b/drivers/scsi/pm8001/pm8001_init.c
@@ -0,0 +1,1226 @@
+/*
+ * PMC-Sierra PM8001/8081/8088/8089 SAS/SATA based host adapters driver
+ *
+ * Copyright (c) 2008-2009 USI Co., Ltd.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions, and the following disclaimer,
+ * without modification.
+ * 2. Redistributions in binary form must reproduce at minimum a disclaimer
+ * substantially similar to the "NO WARRANTY" disclaimer below
+ * ("Disclaimer") and any redistribution must be conditioned upon
+ * including a substantially similar Disclaimer requirement for further
+ * binary redistribution.
+ * 3. Neither the names of the above-listed copyright holders nor the names
+ * of any contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * Alternatively, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") version 2 as published by the Free
+ * Software Foundation.
+ *
+ * NO WARRANTY
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
+ * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
+ * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGES.
+ *
+ */
+
+#include <linux/slab.h>
+#include "pm8001_sas.h"
+#include "pm8001_chips.h"
+
+static struct scsi_transport_template *pm8001_stt;
+
+/**
+ * chip info structure to identify chip key functionality as
+ * encryption available/not, no of ports, hw specific function ref
+ */
+static const struct pm8001_chip_info pm8001_chips[] = {
+ [chip_8001] = {0, 8, &pm8001_8001_dispatch,},
+ [chip_8008] = {0, 8, &pm8001_80xx_dispatch,},
+ [chip_8009] = {1, 8, &pm8001_80xx_dispatch,},
+ [chip_8018] = {0, 16, &pm8001_80xx_dispatch,},
+ [chip_8019] = {1, 16, &pm8001_80xx_dispatch,},
+ [chip_8074] = {0, 8, &pm8001_80xx_dispatch,},
+ [chip_8076] = {0, 16, &pm8001_80xx_dispatch,},
+ [chip_8077] = {0, 16, &pm8001_80xx_dispatch,},
+};
+static int pm8001_id;
+
+LIST_HEAD(hba_list);
+
+struct workqueue_struct *pm8001_wq;
+
+/**
+ * The main structure which LLDD must register for scsi core.
+ */
+static struct scsi_host_template pm8001_sht = {
+ .module = THIS_MODULE,
+ .name = DRV_NAME,
+ .queuecommand = sas_queuecommand,
+ .target_alloc = sas_target_alloc,
+ .slave_configure = sas_slave_configure,
+ .scan_finished = pm8001_scan_finished,
+ .scan_start = pm8001_scan_start,
+ .change_queue_depth = sas_change_queue_depth,
+ .bios_param = sas_bios_param,
+ .can_queue = 1,
+ .cmd_per_lun = 1,
+ .this_id = -1,
+ .sg_tablesize = SG_ALL,
+ .max_sectors = SCSI_DEFAULT_MAX_SECTORS,
+ .use_clustering = ENABLE_CLUSTERING,
+ .eh_device_reset_handler = sas_eh_device_reset_handler,
+ .eh_bus_reset_handler = sas_eh_bus_reset_handler,
+ .target_destroy = sas_target_destroy,
+ .ioctl = sas_ioctl,
+ .shost_attrs = pm8001_host_attrs,
+ .use_blk_tags = 1,
+ .track_queue_depth = 1,
+};
+
+/**
+ * Sas layer call this function to execute specific task.
+ */
+static struct sas_domain_function_template pm8001_transport_ops = {
+ .lldd_dev_found = pm8001_dev_found,
+ .lldd_dev_gone = pm8001_dev_gone,
+
+ .lldd_execute_task = pm8001_queue_command,
+ .lldd_control_phy = pm8001_phy_control,
+
+ .lldd_abort_task = pm8001_abort_task,
+ .lldd_abort_task_set = pm8001_abort_task_set,
+ .lldd_clear_aca = pm8001_clear_aca,
+ .lldd_clear_task_set = pm8001_clear_task_set,
+ .lldd_I_T_nexus_reset = pm8001_I_T_nexus_reset,
+ .lldd_lu_reset = pm8001_lu_reset,
+ .lldd_query_task = pm8001_query_task,
+};
+
+/**
+ *pm8001_phy_init - initiate our adapter phys
+ *@pm8001_ha: our hba structure.
+ *@phy_id: phy id.
+ */
+static void pm8001_phy_init(struct pm8001_hba_info *pm8001_ha, int phy_id)
+{
+ struct pm8001_phy *phy = &pm8001_ha->phy[phy_id];
+ struct asd_sas_phy *sas_phy = &phy->sas_phy;
+ phy->phy_state = 0;
+ phy->pm8001_ha = pm8001_ha;
+ sas_phy->enabled = (phy_id < pm8001_ha->chip->n_phy) ? 1 : 0;
+ sas_phy->class = SAS;
+ sas_phy->iproto = SAS_PROTOCOL_ALL;
+ sas_phy->tproto = 0;
+ sas_phy->type = PHY_TYPE_PHYSICAL;
+ sas_phy->role = PHY_ROLE_INITIATOR;
+ sas_phy->oob_mode = OOB_NOT_CONNECTED;
+ sas_phy->linkrate = SAS_LINK_RATE_UNKNOWN;
+ sas_phy->id = phy_id;
+ sas_phy->sas_addr = &pm8001_ha->sas_addr[0];
+ sas_phy->frame_rcvd = &phy->frame_rcvd[0];
+ sas_phy->ha = (struct sas_ha_struct *)pm8001_ha->shost->hostdata;
+ sas_phy->lldd_phy = phy;
+}
+
+/**
+ *pm8001_free - free hba
+ *@pm8001_ha: our hba structure.
+ *
+ */
+static void pm8001_free(struct pm8001_hba_info *pm8001_ha)
+{
+ int i;
+
+ if (!pm8001_ha)
+ return;
+
+ for (i = 0; i < USI_MAX_MEMCNT; i++) {
+ if (pm8001_ha->memoryMap.region[i].virt_ptr != NULL) {
+ pci_free_consistent(pm8001_ha->pdev,
+ (pm8001_ha->memoryMap.region[i].total_len +
+ pm8001_ha->memoryMap.region[i].alignment),
+ pm8001_ha->memoryMap.region[i].virt_ptr,
+ pm8001_ha->memoryMap.region[i].phys_addr);
+ }
+ }
+ PM8001_CHIP_DISP->chip_iounmap(pm8001_ha);
+ if (pm8001_ha->shost)
+ scsi_host_put(pm8001_ha->shost);
+ flush_workqueue(pm8001_wq);
+ kfree(pm8001_ha->tags);
+ kfree(pm8001_ha);
+}
+
+#ifdef PM8001_USE_TASKLET
+
+/**
+ * tasklet for 64 msi-x interrupt handler
+ * @opaque: the passed general host adapter struct
+ * Note: pm8001_tasklet is common for pm8001 & pm80xx
+ */
+static void pm8001_tasklet(unsigned long opaque)
+{
+ struct pm8001_hba_info *pm8001_ha;
+ struct isr_param *irq_vector;
+
+ irq_vector = (struct isr_param *)opaque;
+ pm8001_ha = irq_vector->drv_inst;
+ if (unlikely(!pm8001_ha))
+ BUG_ON(1);
+ PM8001_CHIP_DISP->isr(pm8001_ha, irq_vector->irq_id);
+}
+#endif
+
+/**
+ * pm8001_interrupt_handler_msix - main MSIX interrupt handler.
+ * It obtains the vector number and calls the equivalent bottom
+ * half or services directly.
+ * @opaque: the passed outbound queue/vector. Host structure is
+ * retrieved from the same.
+ */
+static irqreturn_t pm8001_interrupt_handler_msix(int irq, void *opaque)
+{
+ struct isr_param *irq_vector;
+ struct pm8001_hba_info *pm8001_ha;
+ irqreturn_t ret = IRQ_HANDLED;
+ irq_vector = (struct isr_param *)opaque;
+ pm8001_ha = irq_vector->drv_inst;
+
+ if (unlikely(!pm8001_ha))
+ return IRQ_NONE;
+ if (!PM8001_CHIP_DISP->is_our_interupt(pm8001_ha))
+ return IRQ_NONE;
+#ifdef PM8001_USE_TASKLET
+ tasklet_schedule(&pm8001_ha->tasklet[irq_vector->irq_id]);
+#else
+ ret = PM8001_CHIP_DISP->isr(pm8001_ha, irq_vector->irq_id);
+#endif
+ return ret;
+}
+
+/**
+ * pm8001_interrupt_handler_intx - main INTx interrupt handler.
+ * @dev_id: sas_ha structure. The HBA is retrieved from sas_has structure.
+ */
+
+static irqreturn_t pm8001_interrupt_handler_intx(int irq, void *dev_id)
+{
+ struct pm8001_hba_info *pm8001_ha;
+ irqreturn_t ret = IRQ_HANDLED;
+ struct sas_ha_struct *sha = dev_id;
+ pm8001_ha = sha->lldd_ha;
+ if (unlikely(!pm8001_ha))
+ return IRQ_NONE;
+ if (!PM8001_CHIP_DISP->is_our_interupt(pm8001_ha))
+ return IRQ_NONE;
+
+#ifdef PM8001_USE_TASKLET
+ tasklet_schedule(&pm8001_ha->tasklet[0]);
+#else
+ ret = PM8001_CHIP_DISP->isr(pm8001_ha, 0);
+#endif
+ return ret;
+}
+
+/**
+ * pm8001_alloc - initiate our hba structure and 6 DMAs area.
+ * @pm8001_ha:our hba structure.
+ *
+ */
+static int pm8001_alloc(struct pm8001_hba_info *pm8001_ha,
+ const struct pci_device_id *ent)
+{
+ int i;
+ spin_lock_init(&pm8001_ha->lock);
+ spin_lock_init(&pm8001_ha->bitmap_lock);
+ PM8001_INIT_DBG(pm8001_ha,
+ pm8001_printk("pm8001_alloc: PHY:%x\n",
+ pm8001_ha->chip->n_phy));
+ for (i = 0; i < pm8001_ha->chip->n_phy; i++) {
+ pm8001_phy_init(pm8001_ha, i);
+ pm8001_ha->port[i].wide_port_phymap = 0;
+ pm8001_ha->port[i].port_attached = 0;
+ pm8001_ha->port[i].port_state = 0;
+ INIT_LIST_HEAD(&pm8001_ha->port[i].list);
+ }
+
+ pm8001_ha->tags = kzalloc(PM8001_MAX_CCB, GFP_KERNEL);
+ if (!pm8001_ha->tags)
+ goto err_out;
+ /* MPI Memory region 1 for AAP Event Log for fw */
+ pm8001_ha->memoryMap.region[AAP1].num_elements = 1;
+ pm8001_ha->memoryMap.region[AAP1].element_size = PM8001_EVENT_LOG_SIZE;
+ pm8001_ha->memoryMap.region[AAP1].total_len = PM8001_EVENT_LOG_SIZE;
+ pm8001_ha->memoryMap.region[AAP1].alignment = 32;
+
+ /* MPI Memory region 2 for IOP Event Log for fw */
+ pm8001_ha->memoryMap.region[IOP].num_elements = 1;
+ pm8001_ha->memoryMap.region[IOP].element_size = PM8001_EVENT_LOG_SIZE;
+ pm8001_ha->memoryMap.region[IOP].total_len = PM8001_EVENT_LOG_SIZE;
+ pm8001_ha->memoryMap.region[IOP].alignment = 32;
+
+ for (i = 0; i < PM8001_MAX_SPCV_INB_NUM; i++) {
+ /* MPI Memory region 3 for consumer Index of inbound queues */
+ pm8001_ha->memoryMap.region[CI+i].num_elements = 1;
+ pm8001_ha->memoryMap.region[CI+i].element_size = 4;
+ pm8001_ha->memoryMap.region[CI+i].total_len = 4;
+ pm8001_ha->memoryMap.region[CI+i].alignment = 4;
+
+ if ((ent->driver_data) != chip_8001) {
+ /* MPI Memory region 5 inbound queues */
+ pm8001_ha->memoryMap.region[IB+i].num_elements =
+ PM8001_MPI_QUEUE;
+ pm8001_ha->memoryMap.region[IB+i].element_size = 128;
+ pm8001_ha->memoryMap.region[IB+i].total_len =
+ PM8001_MPI_QUEUE * 128;
+ pm8001_ha->memoryMap.region[IB+i].alignment = 128;
+ } else {
+ pm8001_ha->memoryMap.region[IB+i].num_elements =
+ PM8001_MPI_QUEUE;
+ pm8001_ha->memoryMap.region[IB+i].element_size = 64;
+ pm8001_ha->memoryMap.region[IB+i].total_len =
+ PM8001_MPI_QUEUE * 64;
+ pm8001_ha->memoryMap.region[IB+i].alignment = 64;
+ }
+ }
+
+ for (i = 0; i < PM8001_MAX_SPCV_OUTB_NUM; i++) {
+ /* MPI Memory region 4 for producer Index of outbound queues */
+ pm8001_ha->memoryMap.region[PI+i].num_elements = 1;
+ pm8001_ha->memoryMap.region[PI+i].element_size = 4;
+ pm8001_ha->memoryMap.region[PI+i].total_len = 4;
+ pm8001_ha->memoryMap.region[PI+i].alignment = 4;
+
+ if (ent->driver_data != chip_8001) {
+ /* MPI Memory region 6 Outbound queues */
+ pm8001_ha->memoryMap.region[OB+i].num_elements =
+ PM8001_MPI_QUEUE;
+ pm8001_ha->memoryMap.region[OB+i].element_size = 128;
+ pm8001_ha->memoryMap.region[OB+i].total_len =
+ PM8001_MPI_QUEUE * 128;
+ pm8001_ha->memoryMap.region[OB+i].alignment = 128;
+ } else {
+ /* MPI Memory region 6 Outbound queues */
+ pm8001_ha->memoryMap.region[OB+i].num_elements =
+ PM8001_MPI_QUEUE;
+ pm8001_ha->memoryMap.region[OB+i].element_size = 64;
+ pm8001_ha->memoryMap.region[OB+i].total_len =
+ PM8001_MPI_QUEUE * 64;
+ pm8001_ha->memoryMap.region[OB+i].alignment = 64;
+ }
+
+ }
+ /* Memory region write DMA*/
+ pm8001_ha->memoryMap.region[NVMD].num_elements = 1;
+ pm8001_ha->memoryMap.region[NVMD].element_size = 4096;
+ pm8001_ha->memoryMap.region[NVMD].total_len = 4096;
+ /* Memory region for devices*/
+ pm8001_ha->memoryMap.region[DEV_MEM].num_elements = 1;
+ pm8001_ha->memoryMap.region[DEV_MEM].element_size = PM8001_MAX_DEVICES *
+ sizeof(struct pm8001_device);
+ pm8001_ha->memoryMap.region[DEV_MEM].total_len = PM8001_MAX_DEVICES *
+ sizeof(struct pm8001_device);
+
+ /* Memory region for ccb_info*/
+ pm8001_ha->memoryMap.region[CCB_MEM].num_elements = 1;
+ pm8001_ha->memoryMap.region[CCB_MEM].element_size = PM8001_MAX_CCB *
+ sizeof(struct pm8001_ccb_info);
+ pm8001_ha->memoryMap.region[CCB_MEM].total_len = PM8001_MAX_CCB *
+ sizeof(struct pm8001_ccb_info);
+
+ /* Memory region for fw flash */
+ pm8001_ha->memoryMap.region[FW_FLASH].total_len = 4096;
+
+ pm8001_ha->memoryMap.region[FORENSIC_MEM].num_elements = 1;
+ pm8001_ha->memoryMap.region[FORENSIC_MEM].total_len = 0x10000;
+ pm8001_ha->memoryMap.region[FORENSIC_MEM].element_size = 0x10000;
+ pm8001_ha->memoryMap.region[FORENSIC_MEM].alignment = 0x10000;
+ for (i = 0; i < USI_MAX_MEMCNT; i++) {
+ if (pm8001_mem_alloc(pm8001_ha->pdev,
+ &pm8001_ha->memoryMap.region[i].virt_ptr,
+ &pm8001_ha->memoryMap.region[i].phys_addr,
+ &pm8001_ha->memoryMap.region[i].phys_addr_hi,
+ &pm8001_ha->memoryMap.region[i].phys_addr_lo,
+ pm8001_ha->memoryMap.region[i].total_len,
+ pm8001_ha->memoryMap.region[i].alignment) != 0) {
+ PM8001_FAIL_DBG(pm8001_ha,
+ pm8001_printk("Mem%d alloc failed\n",
+ i));
+ goto err_out;
+ }
+ }
+
+ pm8001_ha->devices = pm8001_ha->memoryMap.region[DEV_MEM].virt_ptr;
+ for (i = 0; i < PM8001_MAX_DEVICES; i++) {
+ pm8001_ha->devices[i].dev_type = SAS_PHY_UNUSED;
+ pm8001_ha->devices[i].id = i;
+ pm8001_ha->devices[i].device_id = PM8001_MAX_DEVICES;
+ pm8001_ha->devices[i].running_req = 0;
+ }
+ pm8001_ha->ccb_info = pm8001_ha->memoryMap.region[CCB_MEM].virt_ptr;
+ for (i = 0; i < PM8001_MAX_CCB; i++) {
+ pm8001_ha->ccb_info[i].ccb_dma_handle =
+ pm8001_ha->memoryMap.region[CCB_MEM].phys_addr +
+ i * sizeof(struct pm8001_ccb_info);
+ pm8001_ha->ccb_info[i].task = NULL;
+ pm8001_ha->ccb_info[i].ccb_tag = 0xffffffff;
+ pm8001_ha->ccb_info[i].device = NULL;
+ ++pm8001_ha->tags_num;
+ }
+ pm8001_ha->flags = PM8001F_INIT_TIME;
+ /* Initialize tags */
+ pm8001_tag_init(pm8001_ha);
+ return 0;
+err_out:
+ return 1;
+}
+
+/**
+ * pm8001_ioremap - remap the pci high physical address to kernal virtual
+ * address so that we can access them.
+ * @pm8001_ha:our hba structure.
+ */
+static int pm8001_ioremap(struct pm8001_hba_info *pm8001_ha)
+{
+ u32 bar;
+ u32 logicalBar = 0;
+ struct pci_dev *pdev;
+
+ pdev = pm8001_ha->pdev;
+ /* map pci mem (PMC pci base 0-3)*/
+ for (bar = 0; bar < 6; bar++) {
+ /*
+ ** logical BARs for SPC:
+ ** bar 0 and 1 - logical BAR0
+ ** bar 2 and 3 - logical BAR1
+ ** bar4 - logical BAR2
+ ** bar5 - logical BAR3
+ ** Skip the appropriate assignments:
+ */
+ if ((bar == 1) || (bar == 3))
+ continue;
+ if (pci_resource_flags(pdev, bar) & IORESOURCE_MEM) {
+ pm8001_ha->io_mem[logicalBar].membase =
+ pci_resource_start(pdev, bar);
+ pm8001_ha->io_mem[logicalBar].membase &=
+ (u32)PCI_BASE_ADDRESS_MEM_MASK;
+ pm8001_ha->io_mem[logicalBar].memsize =
+ pci_resource_len(pdev, bar);
+ pm8001_ha->io_mem[logicalBar].memvirtaddr =
+ ioremap(pm8001_ha->io_mem[logicalBar].membase,
+ pm8001_ha->io_mem[logicalBar].memsize);
+ PM8001_INIT_DBG(pm8001_ha,
+ pm8001_printk("PCI: bar %d, logicalBar %d ",
+ bar, logicalBar));
+ PM8001_INIT_DBG(pm8001_ha, pm8001_printk(
+ "base addr %llx virt_addr=%llx len=%d\n",
+ (u64)pm8001_ha->io_mem[logicalBar].membase,
+ (u64)(unsigned long)
+ pm8001_ha->io_mem[logicalBar].memvirtaddr,
+ pm8001_ha->io_mem[logicalBar].memsize));
+ } else {
+ pm8001_ha->io_mem[logicalBar].membase = 0;
+ pm8001_ha->io_mem[logicalBar].memsize = 0;
+ pm8001_ha->io_mem[logicalBar].memvirtaddr = 0;
+ }
+ logicalBar++;
+ }
+ return 0;
+}
+
+/**
+ * pm8001_pci_alloc - initialize our ha card structure
+ * @pdev: pci device.
+ * @ent: ent
+ * @shost: scsi host struct which has been initialized before.
+ */
+static struct pm8001_hba_info *pm8001_pci_alloc(struct pci_dev *pdev,
+ const struct pci_device_id *ent,
+ struct Scsi_Host *shost)
+
+{
+ struct pm8001_hba_info *pm8001_ha;
+ struct sas_ha_struct *sha = SHOST_TO_SAS_HA(shost);
+ int j;
+
+ pm8001_ha = sha->lldd_ha;
+ if (!pm8001_ha)
+ return NULL;
+
+ pm8001_ha->pdev = pdev;
+ pm8001_ha->dev = &pdev->dev;
+ pm8001_ha->chip_id = ent->driver_data;
+ pm8001_ha->chip = &pm8001_chips[pm8001_ha->chip_id];
+ pm8001_ha->irq = pdev->irq;
+ pm8001_ha->sas = sha;
+ pm8001_ha->shost = shost;
+ pm8001_ha->id = pm8001_id++;
+ pm8001_ha->logging_level = 0x01;
+ sprintf(pm8001_ha->name, "%s%d", DRV_NAME, pm8001_ha->id);
+ /* IOMB size is 128 for 8088/89 controllers */
+ if (pm8001_ha->chip_id != chip_8001)
+ pm8001_ha->iomb_size = IOMB_SIZE_SPCV;
+ else
+ pm8001_ha->iomb_size = IOMB_SIZE_SPC;
+
+#ifdef PM8001_USE_TASKLET
+ /* Tasklet for non msi-x interrupt handler */
+ if ((!pdev->msix_cap) || (pm8001_ha->chip_id == chip_8001))
+ tasklet_init(&pm8001_ha->tasklet[0], pm8001_tasklet,
+ (unsigned long)&(pm8001_ha->irq_vector[0]));
+ else
+ for (j = 0; j < PM8001_MAX_MSIX_VEC; j++)
+ tasklet_init(&pm8001_ha->tasklet[j], pm8001_tasklet,
+ (unsigned long)&(pm8001_ha->irq_vector[j]));
+#endif
+ pm8001_ioremap(pm8001_ha);
+ if (!pm8001_alloc(pm8001_ha, ent))
+ return pm8001_ha;
+ pm8001_free(pm8001_ha);
+ return NULL;
+}
+
+/**
+ * pci_go_44 - pm8001 specified, its DMA is 44 bit rather than 64 bit
+ * @pdev: pci device.
+ */
+static int pci_go_44(struct pci_dev *pdev)
+{
+ int rc;
+
+ if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(44))) {
+ rc = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(44));
+ if (rc) {
+ rc = pci_set_consistent_dma_mask(pdev,
+ DMA_BIT_MASK(32));
+ if (rc) {
+ dev_printk(KERN_ERR, &pdev->dev,
+ "44-bit DMA enable failed\n");
+ return rc;
+ }
+ }
+ } else {
+ rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
+ if (rc) {
+ dev_printk(KERN_ERR, &pdev->dev,
+ "32-bit DMA enable failed\n");
+ return rc;
+ }
+ rc = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
+ if (rc) {
+ dev_printk(KERN_ERR, &pdev->dev,
+ "32-bit consistent DMA enable failed\n");
+ return rc;
+ }
+ }
+ return rc;
+}
+
+/**
+ * pm8001_prep_sas_ha_init - allocate memory in general hba struct && init them.
+ * @shost: scsi host which has been allocated outside.
+ * @chip_info: our ha struct.
+ */
+static int pm8001_prep_sas_ha_init(struct Scsi_Host *shost,
+ const struct pm8001_chip_info *chip_info)
+{
+ int phy_nr, port_nr;
+ struct asd_sas_phy **arr_phy;
+ struct asd_sas_port **arr_port;
+ struct sas_ha_struct *sha = SHOST_TO_SAS_HA(shost);
+
+ phy_nr = chip_info->n_phy;
+ port_nr = phy_nr;
+ memset(sha, 0x00, sizeof(*sha));
+ arr_phy = kcalloc(phy_nr, sizeof(void *), GFP_KERNEL);
+ if (!arr_phy)
+ goto exit;
+ arr_port = kcalloc(port_nr, sizeof(void *), GFP_KERNEL);
+ if (!arr_port)
+ goto exit_free2;
+
+ sha->sas_phy = arr_phy;
+ sha->sas_port = arr_port;
+ sha->lldd_ha = kzalloc(sizeof(struct pm8001_hba_info), GFP_KERNEL);
+ if (!sha->lldd_ha)
+ goto exit_free1;
+
+ shost->transportt = pm8001_stt;
+ shost->max_id = PM8001_MAX_DEVICES;
+ shost->max_lun = 8;
+ shost->max_channel = 0;
+ shost->unique_id = pm8001_id;
+ shost->max_cmd_len = 16;
+ shost->can_queue = PM8001_CAN_QUEUE;
+ shost->cmd_per_lun = 32;
+ return 0;
+exit_free1:
+ kfree(arr_port);
+exit_free2:
+ kfree(arr_phy);
+exit:
+ return -1;
+}
+
+/**
+ * pm8001_post_sas_ha_init - initialize general hba struct defined in libsas
+ * @shost: scsi host which has been allocated outside
+ * @chip_info: our ha struct.
+ */
+static void pm8001_post_sas_ha_init(struct Scsi_Host *shost,
+ const struct pm8001_chip_info *chip_info)
+{
+ int i = 0;
+ struct pm8001_hba_info *pm8001_ha;
+ struct sas_ha_struct *sha = SHOST_TO_SAS_HA(shost);
+
+ pm8001_ha = sha->lldd_ha;
+ for (i = 0; i < chip_info->n_phy; i++) {
+ sha->sas_phy[i] = &pm8001_ha->phy[i].sas_phy;
+ sha->sas_port[i] = &pm8001_ha->port[i].sas_port;
+ }
+ sha->sas_ha_name = DRV_NAME;
+ sha->dev = pm8001_ha->dev;
+
+ sha->lldd_module = THIS_MODULE;
+ sha->sas_addr = &pm8001_ha->sas_addr[0];
+ sha->num_phys = chip_info->n_phy;
+ sha->core.shost = shost;
+}
+
+/**
+ * pm8001_init_sas_add - initialize sas address
+ * @chip_info: our ha struct.
+ *
+ * Currently we just set the fixed SAS address to our HBA,for manufacture,
+ * it should read from the EEPROM
+ */
+static void pm8001_init_sas_add(struct pm8001_hba_info *pm8001_ha)
+{
+ u8 i, j;
+#ifdef PM8001_READ_VPD
+ /* For new SPC controllers WWN is stored in flash vpd
+ * For SPC/SPCve controllers WWN is stored in EEPROM
+ * For Older SPC WWN is stored in NVMD
+ */
+ DECLARE_COMPLETION_ONSTACK(completion);
+ struct pm8001_ioctl_payload payload;
+ u16 deviceid;
+ int rc;
+
+ pci_read_config_word(pm8001_ha->pdev, PCI_DEVICE_ID, &deviceid);
+ pm8001_ha->nvmd_completion = &completion;
+
+ if (pm8001_ha->chip_id == chip_8001) {
+ if (deviceid == 0x8081 || deviceid == 0x0042) {
+ payload.minor_function = 4;
+ payload.length = 4096;
+ } else {
+ payload.minor_function = 0;
+ payload.length = 128;
+ }
+ } else {
+ payload.minor_function = 1;
+ payload.length = 4096;
+ }
+ payload.offset = 0;
+ payload.func_specific = kzalloc(payload.length, GFP_KERNEL);
+ if (!payload.func_specific) {
+ PM8001_INIT_DBG(pm8001_ha, pm8001_printk("mem alloc fail\n"));
+ return;
+ }
+ rc = PM8001_CHIP_DISP->get_nvmd_req(pm8001_ha, &payload);
+ if (rc) {
+ kfree(payload.func_specific);
+ PM8001_INIT_DBG(pm8001_ha, pm8001_printk("nvmd failed\n"));
+ return;
+ }
+ wait_for_completion(&completion);
+
+ for (i = 0, j = 0; i <= 7; i++, j++) {
+ if (pm8001_ha->chip_id == chip_8001) {
+ if (deviceid == 0x8081)
+ pm8001_ha->sas_addr[j] =
+ payload.func_specific[0x704 + i];
+ else if (deviceid == 0x0042)
+ pm8001_ha->sas_addr[j] =
+ payload.func_specific[0x010 + i];
+ } else
+ pm8001_ha->sas_addr[j] =
+ payload.func_specific[0x804 + i];
+ }
+
+ for (i = 0; i < pm8001_ha->chip->n_phy; i++) {
+ memcpy(&pm8001_ha->phy[i].dev_sas_addr,
+ pm8001_ha->sas_addr, SAS_ADDR_SIZE);
+ PM8001_INIT_DBG(pm8001_ha,
+ pm8001_printk("phy %d sas_addr = %016llx\n", i,
+ pm8001_ha->phy[i].dev_sas_addr));
+ }
+ kfree(payload.func_specific);
+#else
+ for (i = 0; i < pm8001_ha->chip->n_phy; i++) {
+ pm8001_ha->phy[i].dev_sas_addr = 0x50010c600047f9d0ULL;
+ pm8001_ha->phy[i].dev_sas_addr =
+ cpu_to_be64((u64)
+ (*(u64 *)&pm8001_ha->phy[i].dev_sas_addr));
+ }
+ memcpy(pm8001_ha->sas_addr, &pm8001_ha->phy[0].dev_sas_addr,
+ SAS_ADDR_SIZE);
+#endif
+}
+
+/*
+ * pm8001_get_phy_settings_info : Read phy setting values.
+ * @pm8001_ha : our hba.
+ */
+static int pm8001_get_phy_settings_info(struct pm8001_hba_info *pm8001_ha)
+{
+
+#ifdef PM8001_READ_VPD
+ /*OPTION ROM FLASH read for the SPC cards */
+ DECLARE_COMPLETION_ONSTACK(completion);
+ struct pm8001_ioctl_payload payload;
+ int rc;
+
+ pm8001_ha->nvmd_completion = &completion;
+ /* SAS ADDRESS read from flash / EEPROM */
+ payload.minor_function = 6;
+ payload.offset = 0;
+ payload.length = 4096;
+ payload.func_specific = kzalloc(4096, GFP_KERNEL);
+ if (!payload.func_specific)
+ return -ENOMEM;
+ /* Read phy setting values from flash */
+ rc = PM8001_CHIP_DISP->get_nvmd_req(pm8001_ha, &payload);
+ if (rc) {
+ kfree(payload.func_specific);
+ PM8001_INIT_DBG(pm8001_ha, pm8001_printk("nvmd failed\n"));
+ return -ENOMEM;
+ }
+ wait_for_completion(&completion);
+ pm8001_set_phy_profile(pm8001_ha, sizeof(u8), payload.func_specific);
+ kfree(payload.func_specific);
+#endif
+ return 0;
+}
+
+#ifdef PM8001_USE_MSIX
+/**
+ * pm8001_setup_msix - enable MSI-X interrupt
+ * @chip_info: our ha struct.
+ * @irq_handler: irq_handler
+ */
+static u32 pm8001_setup_msix(struct pm8001_hba_info *pm8001_ha)
+{
+ u32 i = 0, j = 0;
+ u32 number_of_intr;
+ int flag = 0;
+ u32 max_entry;
+ int rc;
+ static char intr_drvname[PM8001_MAX_MSIX_VEC][sizeof(DRV_NAME)+3];
+
+ /* SPCv controllers supports 64 msi-x */
+ if (pm8001_ha->chip_id == chip_8001) {
+ number_of_intr = 1;
+ } else {
+ number_of_intr = PM8001_MAX_MSIX_VEC;
+ flag &= ~IRQF_SHARED;
+ }
+
+ max_entry = sizeof(pm8001_ha->msix_entries) /
+ sizeof(pm8001_ha->msix_entries[0]);
+ for (i = 0; i < max_entry ; i++)
+ pm8001_ha->msix_entries[i].entry = i;
+ rc = pci_enable_msix_exact(pm8001_ha->pdev, pm8001_ha->msix_entries,
+ number_of_intr);
+ pm8001_ha->number_of_intr = number_of_intr;
+ if (rc)
+ return rc;
+
+ PM8001_INIT_DBG(pm8001_ha, pm8001_printk(
+ "pci_enable_msix_exact request ret:%d no of intr %d\n",
+ rc, pm8001_ha->number_of_intr));
+
+ for (i = 0; i < number_of_intr; i++) {
+ snprintf(intr_drvname[i], sizeof(intr_drvname[0]),
+ DRV_NAME"%d", i);
+ pm8001_ha->irq_vector[i].irq_id = i;
+ pm8001_ha->irq_vector[i].drv_inst = pm8001_ha;
+
+ rc = request_irq(pm8001_ha->msix_entries[i].vector,
+ pm8001_interrupt_handler_msix, flag,
+ intr_drvname[i], &(pm8001_ha->irq_vector[i]));
+ if (rc) {
+ for (j = 0; j < i; j++) {
+ free_irq(pm8001_ha->msix_entries[j].vector,
+ &(pm8001_ha->irq_vector[i]));
+ }
+ pci_disable_msix(pm8001_ha->pdev);
+ break;
+ }
+ }
+
+ return rc;
+}
+#endif
+
+/**
+ * pm8001_request_irq - register interrupt
+ * @chip_info: our ha struct.
+ */
+static u32 pm8001_request_irq(struct pm8001_hba_info *pm8001_ha)
+{
+ struct pci_dev *pdev;
+ int rc;
+
+ pdev = pm8001_ha->pdev;
+
+#ifdef PM8001_USE_MSIX
+ if (pdev->msix_cap)
+ return pm8001_setup_msix(pm8001_ha);
+ else {
+ PM8001_INIT_DBG(pm8001_ha,
+ pm8001_printk("MSIX not supported!!!\n"));
+ goto intx;
+ }
+#endif
+
+intx:
+ /* initialize the INT-X interrupt */
+ rc = request_irq(pdev->irq, pm8001_interrupt_handler_intx, IRQF_SHARED,
+ DRV_NAME, SHOST_TO_SAS_HA(pm8001_ha->shost));
+ return rc;
+}
+
+/**
+ * pm8001_pci_probe - probe supported device
+ * @pdev: pci device which kernel has been prepared for.
+ * @ent: pci device id
+ *
+ * This function is the main initialization function, when register a new
+ * pci driver it is invoked, all struct an hardware initilization should be done
+ * here, also, register interrupt
+ */
+static int pm8001_pci_probe(struct pci_dev *pdev,
+ const struct pci_device_id *ent)
+{
+ unsigned int rc;
+ u32 pci_reg;
+ u8 i = 0;
+ struct pm8001_hba_info *pm8001_ha;
+ struct Scsi_Host *shost = NULL;
+ const struct pm8001_chip_info *chip;
+
+ dev_printk(KERN_INFO, &pdev->dev,
+ "pm80xx: driver version %s\n", DRV_VERSION);
+ rc = pci_enable_device(pdev);
+ if (rc)
+ goto err_out_enable;
+ pci_set_master(pdev);
+ /*
+ * Enable pci slot busmaster by setting pci command register.
+ * This is required by FW for Cyclone card.
+ */
+
+ pci_read_config_dword(pdev, PCI_COMMAND, &pci_reg);
+ pci_reg |= 0x157;
+ pci_write_config_dword(pdev, PCI_COMMAND, pci_reg);
+ rc = pci_request_regions(pdev, DRV_NAME);
+ if (rc)
+ goto err_out_disable;
+ rc = pci_go_44(pdev);
+ if (rc)
+ goto err_out_regions;
+
+ shost = scsi_host_alloc(&pm8001_sht, sizeof(void *));
+ if (!shost) {
+ rc = -ENOMEM;
+ goto err_out_regions;
+ }
+ chip = &pm8001_chips[ent->driver_data];
+ SHOST_TO_SAS_HA(shost) =
+ kzalloc(sizeof(struct sas_ha_struct), GFP_KERNEL);
+ if (!SHOST_TO_SAS_HA(shost)) {
+ rc = -ENOMEM;
+ goto err_out_free_host;
+ }
+
+ rc = pm8001_prep_sas_ha_init(shost, chip);
+ if (rc) {
+ rc = -ENOMEM;
+ goto err_out_free;
+ }
+ pci_set_drvdata(pdev, SHOST_TO_SAS_HA(shost));
+ /* ent->driver variable is used to differentiate between controllers */
+ pm8001_ha = pm8001_pci_alloc(pdev, ent, shost);
+ if (!pm8001_ha) {
+ rc = -ENOMEM;
+ goto err_out_free;
+ }
+ list_add_tail(&pm8001_ha->list, &hba_list);
+ PM8001_CHIP_DISP->chip_soft_rst(pm8001_ha);
+ rc = PM8001_CHIP_DISP->chip_init(pm8001_ha);
+ if (rc) {
+ PM8001_FAIL_DBG(pm8001_ha, pm8001_printk(
+ "chip_init failed [ret: %d]\n", rc));
+ goto err_out_ha_free;
+ }
+
+ rc = scsi_add_host(shost, &pdev->dev);
+ if (rc)
+ goto err_out_ha_free;
+ rc = pm8001_request_irq(pm8001_ha);
+ if (rc) {
+ PM8001_FAIL_DBG(pm8001_ha, pm8001_printk(
+ "pm8001_request_irq failed [ret: %d]\n", rc));
+ goto err_out_shost;
+ }
+
+ PM8001_CHIP_DISP->interrupt_enable(pm8001_ha, 0);
+ if (pm8001_ha->chip_id != chip_8001) {
+ for (i = 1; i < pm8001_ha->number_of_intr; i++)
+ PM8001_CHIP_DISP->interrupt_enable(pm8001_ha, i);
+ /* setup thermal configuration. */
+ pm80xx_set_thermal_config(pm8001_ha);
+ }
+
+ pm8001_init_sas_add(pm8001_ha);
+ /* phy setting support for motherboard controller */
+ if (pdev->subsystem_vendor != PCI_VENDOR_ID_ADAPTEC2 &&
+ pdev->subsystem_vendor != 0) {
+ rc = pm8001_get_phy_settings_info(pm8001_ha);
+ if (rc)
+ goto err_out_shost;
+ }
+ pm8001_post_sas_ha_init(shost, chip);
+ rc = sas_register_ha(SHOST_TO_SAS_HA(shost));
+ if (rc)
+ goto err_out_shost;
+ scsi_scan_host(pm8001_ha->shost);
+ return 0;
+
+err_out_shost:
+ scsi_remove_host(pm8001_ha->shost);
+err_out_ha_free:
+ pm8001_free(pm8001_ha);
+err_out_free:
+ kfree(SHOST_TO_SAS_HA(shost));
+err_out_free_host:
+ kfree(shost);
+err_out_regions:
+ pci_release_regions(pdev);
+err_out_disable:
+ pci_disable_device(pdev);
+err_out_enable:
+ return rc;
+}
+
+static void pm8001_pci_remove(struct pci_dev *pdev)
+{
+ struct sas_ha_struct *sha = pci_get_drvdata(pdev);
+ struct pm8001_hba_info *pm8001_ha;
+ int i, j;
+ pm8001_ha = sha->lldd_ha;
+ sas_unregister_ha(sha);
+ sas_remove_host(pm8001_ha->shost);
+ list_del(&pm8001_ha->list);
+ scsi_remove_host(pm8001_ha->shost);
+ PM8001_CHIP_DISP->interrupt_disable(pm8001_ha, 0xFF);
+ PM8001_CHIP_DISP->chip_soft_rst(pm8001_ha);
+
+#ifdef PM8001_USE_MSIX
+ for (i = 0; i < pm8001_ha->number_of_intr; i++)
+ synchronize_irq(pm8001_ha->msix_entries[i].vector);
+ for (i = 0; i < pm8001_ha->number_of_intr; i++)
+ free_irq(pm8001_ha->msix_entries[i].vector,
+ &(pm8001_ha->irq_vector[i]));
+ pci_disable_msix(pdev);
+#else
+ free_irq(pm8001_ha->irq, sha);
+#endif
+#ifdef PM8001_USE_TASKLET
+ /* For non-msix and msix interrupts */
+ if ((!pdev->msix_cap) || (pm8001_ha->chip_id == chip_8001))
+ tasklet_kill(&pm8001_ha->tasklet[0]);
+ else
+ for (j = 0; j < PM8001_MAX_MSIX_VEC; j++)
+ tasklet_kill(&pm8001_ha->tasklet[j]);
+#endif
+ pm8001_free(pm8001_ha);
+ kfree(sha->sas_phy);
+ kfree(sha->sas_port);
+ kfree(sha);
+ pci_release_regions(pdev);
+ pci_disable_device(pdev);
+}
+
+/**
+ * pm8001_pci_suspend - power management suspend main entry point
+ * @pdev: PCI device struct
+ * @state: PM state change to (usually PCI_D3)
+ *
+ * Returns 0 success, anything else error.
+ */
+static int pm8001_pci_suspend(struct pci_dev *pdev, pm_message_t state)
+{
+ struct sas_ha_struct *sha = pci_get_drvdata(pdev);
+ struct pm8001_hba_info *pm8001_ha;
+ int i, j;
+ u32 device_state;
+ pm8001_ha = sha->lldd_ha;
+ sas_suspend_ha(sha);
+ flush_workqueue(pm8001_wq);
+ scsi_block_requests(pm8001_ha->shost);
+ if (!pdev->pm_cap) {
+ dev_err(&pdev->dev, " PCI PM not supported\n");
+ return -ENODEV;
+ }
+ PM8001_CHIP_DISP->interrupt_disable(pm8001_ha, 0xFF);
+ PM8001_CHIP_DISP->chip_soft_rst(pm8001_ha);
+#ifdef PM8001_USE_MSIX
+ for (i = 0; i < pm8001_ha->number_of_intr; i++)
+ synchronize_irq(pm8001_ha->msix_entries[i].vector);
+ for (i = 0; i < pm8001_ha->number_of_intr; i++)
+ free_irq(pm8001_ha->msix_entries[i].vector,
+ &(pm8001_ha->irq_vector[i]));
+ pci_disable_msix(pdev);
+#else
+ free_irq(pm8001_ha->irq, sha);
+#endif
+#ifdef PM8001_USE_TASKLET
+ /* For non-msix and msix interrupts */
+ if ((!pdev->msix_cap) || (pm8001_ha->chip_id == chip_8001))
+ tasklet_kill(&pm8001_ha->tasklet[0]);
+ else
+ for (j = 0; j < PM8001_MAX_MSIX_VEC; j++)
+ tasklet_kill(&pm8001_ha->tasklet[j]);
+#endif
+ device_state = pci_choose_state(pdev, state);
+ pm8001_printk("pdev=0x%p, slot=%s, entering "
+ "operating state [D%d]\n", pdev,
+ pm8001_ha->name, device_state);
+ pci_save_state(pdev);
+ pci_disable_device(pdev);
+ pci_set_power_state(pdev, device_state);
+ return 0;
+}
+
+/**
+ * pm8001_pci_resume - power management resume main entry point
+ * @pdev: PCI device struct
+ *
+ * Returns 0 success, anything else error.
+ */
+static int pm8001_pci_resume(struct pci_dev *pdev)
+{
+ struct sas_ha_struct *sha = pci_get_drvdata(pdev);
+ struct pm8001_hba_info *pm8001_ha;
+ int rc;
+ u8 i = 0, j;
+ u32 device_state;
+ DECLARE_COMPLETION_ONSTACK(completion);
+ pm8001_ha = sha->lldd_ha;
+ device_state = pdev->current_state;
+
+ pm8001_printk("pdev=0x%p, slot=%s, resuming from previous "
+ "operating state [D%d]\n", pdev, pm8001_ha->name, device_state);
+
+ pci_set_power_state(pdev, PCI_D0);
+ pci_enable_wake(pdev, PCI_D0, 0);
+ pci_restore_state(pdev);
+ rc = pci_enable_device(pdev);
+ if (rc) {
+ pm8001_printk("slot=%s Enable device failed during resume\n",
+ pm8001_ha->name);
+ goto err_out_enable;
+ }
+
+ pci_set_master(pdev);
+ rc = pci_go_44(pdev);
+ if (rc)
+ goto err_out_disable;
+ sas_prep_resume_ha(sha);
+ /* chip soft rst only for spc */
+ if (pm8001_ha->chip_id == chip_8001) {
+ PM8001_CHIP_DISP->chip_soft_rst(pm8001_ha);
+ PM8001_INIT_DBG(pm8001_ha,
+ pm8001_printk("chip soft reset successful\n"));
+ }
+ rc = PM8001_CHIP_DISP->chip_init(pm8001_ha);
+ if (rc)
+ goto err_out_disable;
+
+ /* disable all the interrupt bits */
+ PM8001_CHIP_DISP->interrupt_disable(pm8001_ha, 0xFF);
+
+ rc = pm8001_request_irq(pm8001_ha);
+ if (rc)
+ goto err_out_disable;
+#ifdef PM8001_USE_TASKLET
+ /* Tasklet for non msi-x interrupt handler */
+ if ((!pdev->msix_cap) || (pm8001_ha->chip_id == chip_8001))
+ tasklet_init(&pm8001_ha->tasklet[0], pm8001_tasklet,
+ (unsigned long)&(pm8001_ha->irq_vector[0]));
+ else
+ for (j = 0; j < PM8001_MAX_MSIX_VEC; j++)
+ tasklet_init(&pm8001_ha->tasklet[j], pm8001_tasklet,
+ (unsigned long)&(pm8001_ha->irq_vector[j]));
+#endif
+ PM8001_CHIP_DISP->interrupt_enable(pm8001_ha, 0);
+ if (pm8001_ha->chip_id != chip_8001) {
+ for (i = 1; i < pm8001_ha->number_of_intr; i++)
+ PM8001_CHIP_DISP->interrupt_enable(pm8001_ha, i);
+ }
+ pm8001_ha->flags = PM8001F_RUN_TIME;
+ for (i = 0; i < pm8001_ha->chip->n_phy; i++) {
+ pm8001_ha->phy[i].enable_completion = &completion;
+ PM8001_CHIP_DISP->phy_start_req(pm8001_ha, i);
+ wait_for_completion(&completion);
+ }
+ sas_resume_ha(sha);
+ return 0;
+
+err_out_disable:
+ scsi_remove_host(pm8001_ha->shost);
+ pci_disable_device(pdev);
+err_out_enable:
+ return rc;
+}
+
+/* update of pci device, vendor id and driver data with
+ * unique value for each of the controller
+ */
+static struct pci_device_id pm8001_pci_table[] = {
+ { PCI_VDEVICE(PMC_Sierra, 0x8001), chip_8001 },
+ { PCI_VDEVICE(ATTO, 0x0042), chip_8001 },
+ /* Support for SPC/SPCv/SPCve controllers */
+ { PCI_VDEVICE(ADAPTEC2, 0x8001), chip_8001 },
+ { PCI_VDEVICE(PMC_Sierra, 0x8008), chip_8008 },
+ { PCI_VDEVICE(ADAPTEC2, 0x8008), chip_8008 },
+ { PCI_VDEVICE(PMC_Sierra, 0x8018), chip_8018 },
+ { PCI_VDEVICE(ADAPTEC2, 0x8018), chip_8018 },
+ { PCI_VDEVICE(PMC_Sierra, 0x8009), chip_8009 },
+ { PCI_VDEVICE(ADAPTEC2, 0x8009), chip_8009 },
+ { PCI_VDEVICE(PMC_Sierra, 0x8019), chip_8019 },
+ { PCI_VDEVICE(ADAPTEC2, 0x8019), chip_8019 },
+ { PCI_VDEVICE(PMC_Sierra, 0x8074), chip_8074 },
+ { PCI_VDEVICE(ADAPTEC2, 0x8074), chip_8074 },
+ { PCI_VDEVICE(PMC_Sierra, 0x8076), chip_8076 },
+ { PCI_VDEVICE(ADAPTEC2, 0x8076), chip_8076 },
+ { PCI_VDEVICE(PMC_Sierra, 0x8077), chip_8077 },
+ { PCI_VDEVICE(ADAPTEC2, 0x8077), chip_8077 },
+ { PCI_VENDOR_ID_ADAPTEC2, 0x8081,
+ PCI_VENDOR_ID_ADAPTEC2, 0x0400, 0, 0, chip_8001 },
+ { PCI_VENDOR_ID_ADAPTEC2, 0x8081,
+ PCI_VENDOR_ID_ADAPTEC2, 0x0800, 0, 0, chip_8001 },
+ { PCI_VENDOR_ID_ADAPTEC2, 0x8088,
+ PCI_VENDOR_ID_ADAPTEC2, 0x0008, 0, 0, chip_8008 },
+ { PCI_VENDOR_ID_ADAPTEC2, 0x8088,
+ PCI_VENDOR_ID_ADAPTEC2, 0x0800, 0, 0, chip_8008 },
+ { PCI_VENDOR_ID_ADAPTEC2, 0x8089,
+ PCI_VENDOR_ID_ADAPTEC2, 0x0008, 0, 0, chip_8009 },
+ { PCI_VENDOR_ID_ADAPTEC2, 0x8089,
+ PCI_VENDOR_ID_ADAPTEC2, 0x0800, 0, 0, chip_8009 },
+ { PCI_VENDOR_ID_ADAPTEC2, 0x8088,
+ PCI_VENDOR_ID_ADAPTEC2, 0x0016, 0, 0, chip_8018 },
+ { PCI_VENDOR_ID_ADAPTEC2, 0x8088,
+ PCI_VENDOR_ID_ADAPTEC2, 0x1600, 0, 0, chip_8018 },
+ { PCI_VENDOR_ID_ADAPTEC2, 0x8089,
+ PCI_VENDOR_ID_ADAPTEC2, 0x0016, 0, 0, chip_8019 },
+ { PCI_VENDOR_ID_ADAPTEC2, 0x8089,
+ PCI_VENDOR_ID_ADAPTEC2, 0x1600, 0, 0, chip_8019 },
+ { PCI_VENDOR_ID_ADAPTEC2, 0x8074,
+ PCI_VENDOR_ID_ADAPTEC2, 0x0800, 0, 0, chip_8074 },
+ { PCI_VENDOR_ID_ADAPTEC2, 0x8076,
+ PCI_VENDOR_ID_ADAPTEC2, 0x1600, 0, 0, chip_8076 },
+ { PCI_VENDOR_ID_ADAPTEC2, 0x8077,
+ PCI_VENDOR_ID_ADAPTEC2, 0x1600, 0, 0, chip_8077 },
+ { PCI_VENDOR_ID_ADAPTEC2, 0x8074,
+ PCI_VENDOR_ID_ADAPTEC2, 0x0008, 0, 0, chip_8074 },
+ { PCI_VENDOR_ID_ADAPTEC2, 0x8076,
+ PCI_VENDOR_ID_ADAPTEC2, 0x0016, 0, 0, chip_8076 },
+ { PCI_VENDOR_ID_ADAPTEC2, 0x8077,
+ PCI_VENDOR_ID_ADAPTEC2, 0x0016, 0, 0, chip_8077 },
+ { PCI_VENDOR_ID_ADAPTEC2, 0x8076,
+ PCI_VENDOR_ID_ADAPTEC2, 0x0808, 0, 0, chip_8076 },
+ { PCI_VENDOR_ID_ADAPTEC2, 0x8077,
+ PCI_VENDOR_ID_ADAPTEC2, 0x0808, 0, 0, chip_8077 },
+ { PCI_VENDOR_ID_ADAPTEC2, 0x8074,
+ PCI_VENDOR_ID_ADAPTEC2, 0x0404, 0, 0, chip_8074 },
+ {} /* terminate list */
+};
+
+static struct pci_driver pm8001_pci_driver = {
+ .name = DRV_NAME,
+ .id_table = pm8001_pci_table,
+ .probe = pm8001_pci_probe,
+ .remove = pm8001_pci_remove,
+ .suspend = pm8001_pci_suspend,
+ .resume = pm8001_pci_resume,
+};
+
+/**
+ * pm8001_init - initialize scsi transport template
+ */
+static int __init pm8001_init(void)
+{
+ int rc = -ENOMEM;
+
+ pm8001_wq = alloc_workqueue("pm80xx", 0, 0);
+ if (!pm8001_wq)
+ goto err;
+
+ pm8001_id = 0;
+ pm8001_stt = sas_domain_attach_transport(&pm8001_transport_ops);
+ if (!pm8001_stt)
+ goto err_wq;
+ rc = pci_register_driver(&pm8001_pci_driver);
+ if (rc)
+ goto err_tp;
+ return 0;
+
+err_tp:
+ sas_release_transport(pm8001_stt);
+err_wq:
+ destroy_workqueue(pm8001_wq);
+err:
+ return rc;
+}
+
+static void __exit pm8001_exit(void)
+{
+ pci_unregister_driver(&pm8001_pci_driver);
+ sas_release_transport(pm8001_stt);
+ destroy_workqueue(pm8001_wq);
+}
+
+module_init(pm8001_init);
+module_exit(pm8001_exit);
+
+MODULE_AUTHOR("Jack Wang <jack_wang@usish.com>");
+MODULE_AUTHOR("Anand Kumar Santhanam <AnandKumar.Santhanam@pmcs.com>");
+MODULE_AUTHOR("Sangeetha Gnanasekaran <Sangeetha.Gnanasekaran@pmcs.com>");
+MODULE_AUTHOR("Nikith Ganigarakoppal <Nikith.Ganigarakoppal@pmcs.com>");
+MODULE_DESCRIPTION(
+ "PMC-Sierra PM8001/8081/8088/8089/8074/8076/8077 "
+ "SAS/SATA controller driver");
+MODULE_VERSION(DRV_VERSION);
+MODULE_LICENSE("GPL");
+MODULE_DEVICE_TABLE(pci, pm8001_pci_table);
+
diff --git a/drivers/scsi/pm8001/pm8001_sas.c b/drivers/scsi/pm8001/pm8001_sas.c
new file mode 100644
index 000000000..b93f289b4
--- /dev/null
+++ b/drivers/scsi/pm8001/pm8001_sas.c
@@ -0,0 +1,1258 @@
+/*
+ * PMC-Sierra PM8001/8081/8088/8089 SAS/SATA based host adapters driver
+ *
+ * Copyright (c) 2008-2009 USI Co., Ltd.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions, and the following disclaimer,
+ * without modification.
+ * 2. Redistributions in binary form must reproduce at minimum a disclaimer
+ * substantially similar to the "NO WARRANTY" disclaimer below
+ * ("Disclaimer") and any redistribution must be conditioned upon
+ * including a substantially similar Disclaimer requirement for further
+ * binary redistribution.
+ * 3. Neither the names of the above-listed copyright holders nor the names
+ * of any contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * Alternatively, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") version 2 as published by the Free
+ * Software Foundation.
+ *
+ * NO WARRANTY
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
+ * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
+ * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGES.
+ *
+ */
+
+#include <linux/slab.h>
+#include "pm8001_sas.h"
+
+/**
+ * pm8001_find_tag - from sas task to find out tag that belongs to this task
+ * @task: the task sent to the LLDD
+ * @tag: the found tag associated with the task
+ */
+static int pm8001_find_tag(struct sas_task *task, u32 *tag)
+{
+ if (task->lldd_task) {
+ struct pm8001_ccb_info *ccb;
+ ccb = task->lldd_task;
+ *tag = ccb->ccb_tag;
+ return 1;
+ }
+ return 0;
+}
+
+/**
+ * pm8001_tag_free - free the no more needed tag
+ * @pm8001_ha: our hba struct
+ * @tag: the found tag associated with the task
+ */
+void pm8001_tag_free(struct pm8001_hba_info *pm8001_ha, u32 tag)
+{
+ void *bitmap = pm8001_ha->tags;
+ clear_bit(tag, bitmap);
+}
+
+/**
+ * pm8001_tag_alloc - allocate a empty tag for task used.
+ * @pm8001_ha: our hba struct
+ * @tag_out: the found empty tag .
+ */
+inline int pm8001_tag_alloc(struct pm8001_hba_info *pm8001_ha, u32 *tag_out)
+{
+ unsigned int tag;
+ void *bitmap = pm8001_ha->tags;
+ unsigned long flags;
+
+ spin_lock_irqsave(&pm8001_ha->bitmap_lock, flags);
+ tag = find_first_zero_bit(bitmap, pm8001_ha->tags_num);
+ if (tag >= pm8001_ha->tags_num) {
+ spin_unlock_irqrestore(&pm8001_ha->bitmap_lock, flags);
+ return -SAS_QUEUE_FULL;
+ }
+ set_bit(tag, bitmap);
+ spin_unlock_irqrestore(&pm8001_ha->bitmap_lock, flags);
+ *tag_out = tag;
+ return 0;
+}
+
+void pm8001_tag_init(struct pm8001_hba_info *pm8001_ha)
+{
+ int i;
+ for (i = 0; i < pm8001_ha->tags_num; ++i)
+ pm8001_tag_free(pm8001_ha, i);
+}
+
+ /**
+ * pm8001_mem_alloc - allocate memory for pm8001.
+ * @pdev: pci device.
+ * @virt_addr: the allocated virtual address
+ * @pphys_addr_hi: the physical address high byte address.
+ * @pphys_addr_lo: the physical address low byte address.
+ * @mem_size: memory size.
+ */
+int pm8001_mem_alloc(struct pci_dev *pdev, void **virt_addr,
+ dma_addr_t *pphys_addr, u32 *pphys_addr_hi,
+ u32 *pphys_addr_lo, u32 mem_size, u32 align)
+{
+ caddr_t mem_virt_alloc;
+ dma_addr_t mem_dma_handle;
+ u64 phys_align;
+ u64 align_offset = 0;
+ if (align)
+ align_offset = (dma_addr_t)align - 1;
+ mem_virt_alloc = pci_zalloc_consistent(pdev, mem_size + align,
+ &mem_dma_handle);
+ if (!mem_virt_alloc) {
+ pm8001_printk("memory allocation error\n");
+ return -1;
+ }
+ *pphys_addr = mem_dma_handle;
+ phys_align = (*pphys_addr + align_offset) & ~align_offset;
+ *virt_addr = (void *)mem_virt_alloc + phys_align - *pphys_addr;
+ *pphys_addr_hi = upper_32_bits(phys_align);
+ *pphys_addr_lo = lower_32_bits(phys_align);
+ return 0;
+}
+/**
+ * pm8001_find_ha_by_dev - from domain device which come from sas layer to
+ * find out our hba struct.
+ * @dev: the domain device which from sas layer.
+ */
+static
+struct pm8001_hba_info *pm8001_find_ha_by_dev(struct domain_device *dev)
+{
+ struct sas_ha_struct *sha = dev->port->ha;
+ struct pm8001_hba_info *pm8001_ha = sha->lldd_ha;
+ return pm8001_ha;
+}
+
+/**
+ * pm8001_phy_control - this function should be registered to
+ * sas_domain_function_template to provide libsas used, note: this is just
+ * control the HBA phy rather than other expander phy if you want control
+ * other phy, you should use SMP command.
+ * @sas_phy: which phy in HBA phys.
+ * @func: the operation.
+ * @funcdata: always NULL.
+ */
+int pm8001_phy_control(struct asd_sas_phy *sas_phy, enum phy_func func,
+ void *funcdata)
+{
+ int rc = 0, phy_id = sas_phy->id;
+ struct pm8001_hba_info *pm8001_ha = NULL;
+ struct sas_phy_linkrates *rates;
+ DECLARE_COMPLETION_ONSTACK(completion);
+ unsigned long flags;
+ pm8001_ha = sas_phy->ha->lldd_ha;
+ pm8001_ha->phy[phy_id].enable_completion = &completion;
+ switch (func) {
+ case PHY_FUNC_SET_LINK_RATE:
+ rates = funcdata;
+ if (rates->minimum_linkrate) {
+ pm8001_ha->phy[phy_id].minimum_linkrate =
+ rates->minimum_linkrate;
+ }
+ if (rates->maximum_linkrate) {
+ pm8001_ha->phy[phy_id].maximum_linkrate =
+ rates->maximum_linkrate;
+ }
+ if (pm8001_ha->phy[phy_id].phy_state == 0) {
+ PM8001_CHIP_DISP->phy_start_req(pm8001_ha, phy_id);
+ wait_for_completion(&completion);
+ }
+ PM8001_CHIP_DISP->phy_ctl_req(pm8001_ha, phy_id,
+ PHY_LINK_RESET);
+ break;
+ case PHY_FUNC_HARD_RESET:
+ if (pm8001_ha->phy[phy_id].phy_state == 0) {
+ PM8001_CHIP_DISP->phy_start_req(pm8001_ha, phy_id);
+ wait_for_completion(&completion);
+ }
+ PM8001_CHIP_DISP->phy_ctl_req(pm8001_ha, phy_id,
+ PHY_HARD_RESET);
+ break;
+ case PHY_FUNC_LINK_RESET:
+ if (pm8001_ha->phy[phy_id].phy_state == 0) {
+ PM8001_CHIP_DISP->phy_start_req(pm8001_ha, phy_id);
+ wait_for_completion(&completion);
+ }
+ PM8001_CHIP_DISP->phy_ctl_req(pm8001_ha, phy_id,
+ PHY_LINK_RESET);
+ break;
+ case PHY_FUNC_RELEASE_SPINUP_HOLD:
+ PM8001_CHIP_DISP->phy_ctl_req(pm8001_ha, phy_id,
+ PHY_LINK_RESET);
+ break;
+ case PHY_FUNC_DISABLE:
+ PM8001_CHIP_DISP->phy_stop_req(pm8001_ha, phy_id);
+ break;
+ case PHY_FUNC_GET_EVENTS:
+ spin_lock_irqsave(&pm8001_ha->lock, flags);
+ if (pm8001_ha->chip_id == chip_8001) {
+ if (-1 == pm8001_bar4_shift(pm8001_ha,
+ (phy_id < 4) ? 0x30000 : 0x40000)) {
+ spin_unlock_irqrestore(&pm8001_ha->lock, flags);
+ return -EINVAL;
+ }
+ }
+ {
+ struct sas_phy *phy = sas_phy->phy;
+ uint32_t *qp = (uint32_t *)(((char *)
+ pm8001_ha->io_mem[2].memvirtaddr)
+ + 0x1034 + (0x4000 * (phy_id & 3)));
+
+ phy->invalid_dword_count = qp[0];
+ phy->running_disparity_error_count = qp[1];
+ phy->loss_of_dword_sync_count = qp[3];
+ phy->phy_reset_problem_count = qp[4];
+ }
+ if (pm8001_ha->chip_id == chip_8001)
+ pm8001_bar4_shift(pm8001_ha, 0);
+ spin_unlock_irqrestore(&pm8001_ha->lock, flags);
+ return 0;
+ default:
+ rc = -EOPNOTSUPP;
+ }
+ msleep(300);
+ return rc;
+}
+
+/**
+ * pm8001_scan_start - we should enable all HBA phys by sending the phy_start
+ * command to HBA.
+ * @shost: the scsi host data.
+ */
+void pm8001_scan_start(struct Scsi_Host *shost)
+{
+ int i;
+ struct pm8001_hba_info *pm8001_ha;
+ struct sas_ha_struct *sha = SHOST_TO_SAS_HA(shost);
+ pm8001_ha = sha->lldd_ha;
+ /* SAS_RE_INITIALIZATION not available in SPCv/ve */
+ if (pm8001_ha->chip_id == chip_8001)
+ PM8001_CHIP_DISP->sas_re_init_req(pm8001_ha);
+ for (i = 0; i < pm8001_ha->chip->n_phy; ++i)
+ PM8001_CHIP_DISP->phy_start_req(pm8001_ha, i);
+}
+
+int pm8001_scan_finished(struct Scsi_Host *shost, unsigned long time)
+{
+ struct sas_ha_struct *ha = SHOST_TO_SAS_HA(shost);
+
+ /* give the phy enabling interrupt event time to come in (1s
+ * is empirically about all it takes) */
+ if (time < HZ)
+ return 0;
+ /* Wait for discovery to finish */
+ sas_drain_work(ha);
+ return 1;
+}
+
+/**
+ * pm8001_task_prep_smp - the dispatcher function, prepare data for smp task
+ * @pm8001_ha: our hba card information
+ * @ccb: the ccb which attached to smp task
+ */
+static int pm8001_task_prep_smp(struct pm8001_hba_info *pm8001_ha,
+ struct pm8001_ccb_info *ccb)
+{
+ return PM8001_CHIP_DISP->smp_req(pm8001_ha, ccb);
+}
+
+u32 pm8001_get_ncq_tag(struct sas_task *task, u32 *tag)
+{
+ struct ata_queued_cmd *qc = task->uldd_task;
+ if (qc) {
+ if (qc->tf.command == ATA_CMD_FPDMA_WRITE ||
+ qc->tf.command == ATA_CMD_FPDMA_READ) {
+ *tag = qc->tag;
+ return 1;
+ }
+ }
+ return 0;
+}
+
+/**
+ * pm8001_task_prep_ata - the dispatcher function, prepare data for sata task
+ * @pm8001_ha: our hba card information
+ * @ccb: the ccb which attached to sata task
+ */
+static int pm8001_task_prep_ata(struct pm8001_hba_info *pm8001_ha,
+ struct pm8001_ccb_info *ccb)
+{
+ return PM8001_CHIP_DISP->sata_req(pm8001_ha, ccb);
+}
+
+/**
+ * pm8001_task_prep_ssp_tm - the dispatcher function, prepare task management data
+ * @pm8001_ha: our hba card information
+ * @ccb: the ccb which attached to TM
+ * @tmf: the task management IU
+ */
+static int pm8001_task_prep_ssp_tm(struct pm8001_hba_info *pm8001_ha,
+ struct pm8001_ccb_info *ccb, struct pm8001_tmf_task *tmf)
+{
+ return PM8001_CHIP_DISP->ssp_tm_req(pm8001_ha, ccb, tmf);
+}
+
+/**
+ * pm8001_task_prep_ssp - the dispatcher function,prepare ssp data for ssp task
+ * @pm8001_ha: our hba card information
+ * @ccb: the ccb which attached to ssp task
+ */
+static int pm8001_task_prep_ssp(struct pm8001_hba_info *pm8001_ha,
+ struct pm8001_ccb_info *ccb)
+{
+ return PM8001_CHIP_DISP->ssp_io_req(pm8001_ha, ccb);
+}
+
+ /* Find the local port id that's attached to this device */
+static int sas_find_local_port_id(struct domain_device *dev)
+{
+ struct domain_device *pdev = dev->parent;
+
+ /* Directly attached device */
+ if (!pdev)
+ return dev->port->id;
+ while (pdev) {
+ struct domain_device *pdev_p = pdev->parent;
+ if (!pdev_p)
+ return pdev->port->id;
+ pdev = pdev->parent;
+ }
+ return 0;
+}
+
+/**
+ * pm8001_task_exec - queue the task(ssp, smp && ata) to the hardware.
+ * @task: the task to be execute.
+ * @num: if can_queue great than 1, the task can be queued up. for SMP task,
+ * we always execute one one time.
+ * @gfp_flags: gfp_flags.
+ * @is_tmf: if it is task management task.
+ * @tmf: the task management IU
+ */
+#define DEV_IS_GONE(pm8001_dev) \
+ ((!pm8001_dev || (pm8001_dev->dev_type == SAS_PHY_UNUSED)))
+static int pm8001_task_exec(struct sas_task *task,
+ gfp_t gfp_flags, int is_tmf, struct pm8001_tmf_task *tmf)
+{
+ struct domain_device *dev = task->dev;
+ struct pm8001_hba_info *pm8001_ha;
+ struct pm8001_device *pm8001_dev;
+ struct pm8001_port *port = NULL;
+ struct sas_task *t = task;
+ struct pm8001_ccb_info *ccb;
+ u32 tag = 0xdeadbeef, rc, n_elem = 0;
+ unsigned long flags = 0;
+
+ if (!dev->port) {
+ struct task_status_struct *tsm = &t->task_status;
+ tsm->resp = SAS_TASK_UNDELIVERED;
+ tsm->stat = SAS_PHY_DOWN;
+ if (dev->dev_type != SAS_SATA_DEV)
+ t->task_done(t);
+ return 0;
+ }
+ pm8001_ha = pm8001_find_ha_by_dev(task->dev);
+ PM8001_IO_DBG(pm8001_ha, pm8001_printk("pm8001_task_exec device \n "));
+ spin_lock_irqsave(&pm8001_ha->lock, flags);
+ do {
+ dev = t->dev;
+ pm8001_dev = dev->lldd_dev;
+ port = &pm8001_ha->port[sas_find_local_port_id(dev)];
+ if (DEV_IS_GONE(pm8001_dev) || !port->port_attached) {
+ if (sas_protocol_ata(t->task_proto)) {
+ struct task_status_struct *ts = &t->task_status;
+ ts->resp = SAS_TASK_UNDELIVERED;
+ ts->stat = SAS_PHY_DOWN;
+
+ spin_unlock_irqrestore(&pm8001_ha->lock, flags);
+ t->task_done(t);
+ spin_lock_irqsave(&pm8001_ha->lock, flags);
+ continue;
+ } else {
+ struct task_status_struct *ts = &t->task_status;
+ ts->resp = SAS_TASK_UNDELIVERED;
+ ts->stat = SAS_PHY_DOWN;
+ t->task_done(t);
+ continue;
+ }
+ }
+ rc = pm8001_tag_alloc(pm8001_ha, &tag);
+ if (rc)
+ goto err_out;
+ ccb = &pm8001_ha->ccb_info[tag];
+
+ if (!sas_protocol_ata(t->task_proto)) {
+ if (t->num_scatter) {
+ n_elem = dma_map_sg(pm8001_ha->dev,
+ t->scatter,
+ t->num_scatter,
+ t->data_dir);
+ if (!n_elem) {
+ rc = -ENOMEM;
+ goto err_out_tag;
+ }
+ }
+ } else {
+ n_elem = t->num_scatter;
+ }
+
+ t->lldd_task = ccb;
+ ccb->n_elem = n_elem;
+ ccb->ccb_tag = tag;
+ ccb->task = t;
+ ccb->device = pm8001_dev;
+ switch (t->task_proto) {
+ case SAS_PROTOCOL_SMP:
+ rc = pm8001_task_prep_smp(pm8001_ha, ccb);
+ break;
+ case SAS_PROTOCOL_SSP:
+ if (is_tmf)
+ rc = pm8001_task_prep_ssp_tm(pm8001_ha,
+ ccb, tmf);
+ else
+ rc = pm8001_task_prep_ssp(pm8001_ha, ccb);
+ break;
+ case SAS_PROTOCOL_SATA:
+ case SAS_PROTOCOL_STP:
+ rc = pm8001_task_prep_ata(pm8001_ha, ccb);
+ break;
+ default:
+ dev_printk(KERN_ERR, pm8001_ha->dev,
+ "unknown sas_task proto: 0x%x\n",
+ t->task_proto);
+ rc = -EINVAL;
+ break;
+ }
+
+ if (rc) {
+ PM8001_IO_DBG(pm8001_ha,
+ pm8001_printk("rc is %x\n", rc));
+ goto err_out_tag;
+ }
+ /* TODO: select normal or high priority */
+ spin_lock(&t->task_state_lock);
+ t->task_state_flags |= SAS_TASK_AT_INITIATOR;
+ spin_unlock(&t->task_state_lock);
+ pm8001_dev->running_req++;
+ } while (0);
+ rc = 0;
+ goto out_done;
+
+err_out_tag:
+ pm8001_tag_free(pm8001_ha, tag);
+err_out:
+ dev_printk(KERN_ERR, pm8001_ha->dev, "pm8001 exec failed[%d]!\n", rc);
+ if (!sas_protocol_ata(t->task_proto))
+ if (n_elem)
+ dma_unmap_sg(pm8001_ha->dev, t->scatter, n_elem,
+ t->data_dir);
+out_done:
+ spin_unlock_irqrestore(&pm8001_ha->lock, flags);
+ return rc;
+}
+
+/**
+ * pm8001_queue_command - register for upper layer used, all IO commands sent
+ * to HBA are from this interface.
+ * @task: the task to be execute.
+ * @gfp_flags: gfp_flags
+ */
+int pm8001_queue_command(struct sas_task *task, gfp_t gfp_flags)
+{
+ return pm8001_task_exec(task, gfp_flags, 0, NULL);
+}
+
+/**
+ * pm8001_ccb_task_free - free the sg for ssp and smp command, free the ccb.
+ * @pm8001_ha: our hba card information
+ * @ccb: the ccb which attached to ssp task
+ * @task: the task to be free.
+ * @ccb_idx: ccb index.
+ */
+void pm8001_ccb_task_free(struct pm8001_hba_info *pm8001_ha,
+ struct sas_task *task, struct pm8001_ccb_info *ccb, u32 ccb_idx)
+{
+ if (!ccb->task)
+ return;
+ if (!sas_protocol_ata(task->task_proto))
+ if (ccb->n_elem)
+ dma_unmap_sg(pm8001_ha->dev, task->scatter,
+ task->num_scatter, task->data_dir);
+
+ switch (task->task_proto) {
+ case SAS_PROTOCOL_SMP:
+ dma_unmap_sg(pm8001_ha->dev, &task->smp_task.smp_resp, 1,
+ PCI_DMA_FROMDEVICE);
+ dma_unmap_sg(pm8001_ha->dev, &task->smp_task.smp_req, 1,
+ PCI_DMA_TODEVICE);
+ break;
+
+ case SAS_PROTOCOL_SATA:
+ case SAS_PROTOCOL_STP:
+ case SAS_PROTOCOL_SSP:
+ default:
+ /* do nothing */
+ break;
+ }
+ task->lldd_task = NULL;
+ ccb->task = NULL;
+ ccb->ccb_tag = 0xFFFFFFFF;
+ ccb->open_retry = 0;
+ pm8001_tag_free(pm8001_ha, ccb_idx);
+}
+
+ /**
+ * pm8001_alloc_dev - find a empty pm8001_device
+ * @pm8001_ha: our hba card information
+ */
+struct pm8001_device *pm8001_alloc_dev(struct pm8001_hba_info *pm8001_ha)
+{
+ u32 dev;
+ for (dev = 0; dev < PM8001_MAX_DEVICES; dev++) {
+ if (pm8001_ha->devices[dev].dev_type == SAS_PHY_UNUSED) {
+ pm8001_ha->devices[dev].id = dev;
+ return &pm8001_ha->devices[dev];
+ }
+ }
+ if (dev == PM8001_MAX_DEVICES) {
+ PM8001_FAIL_DBG(pm8001_ha,
+ pm8001_printk("max support %d devices, ignore ..\n",
+ PM8001_MAX_DEVICES));
+ }
+ return NULL;
+}
+/**
+ * pm8001_find_dev - find a matching pm8001_device
+ * @pm8001_ha: our hba card information
+ */
+struct pm8001_device *pm8001_find_dev(struct pm8001_hba_info *pm8001_ha,
+ u32 device_id)
+{
+ u32 dev;
+ for (dev = 0; dev < PM8001_MAX_DEVICES; dev++) {
+ if (pm8001_ha->devices[dev].device_id == device_id)
+ return &pm8001_ha->devices[dev];
+ }
+ if (dev == PM8001_MAX_DEVICES) {
+ PM8001_FAIL_DBG(pm8001_ha, pm8001_printk("NO MATCHING "
+ "DEVICE FOUND !!!\n"));
+ }
+ return NULL;
+}
+
+static void pm8001_free_dev(struct pm8001_device *pm8001_dev)
+{
+ u32 id = pm8001_dev->id;
+ memset(pm8001_dev, 0, sizeof(*pm8001_dev));
+ pm8001_dev->id = id;
+ pm8001_dev->dev_type = SAS_PHY_UNUSED;
+ pm8001_dev->device_id = PM8001_MAX_DEVICES;
+ pm8001_dev->sas_device = NULL;
+}
+
+/**
+ * pm8001_dev_found_notify - libsas notify a device is found.
+ * @dev: the device structure which sas layer used.
+ *
+ * when libsas find a sas domain device, it should tell the LLDD that
+ * device is found, and then LLDD register this device to HBA firmware
+ * by the command "OPC_INB_REG_DEV", after that the HBA will assign a
+ * device ID(according to device's sas address) and returned it to LLDD. From
+ * now on, we communicate with HBA FW with the device ID which HBA assigned
+ * rather than sas address. it is the necessary step for our HBA but it is
+ * the optional for other HBA driver.
+ */
+static int pm8001_dev_found_notify(struct domain_device *dev)
+{
+ unsigned long flags = 0;
+ int res = 0;
+ struct pm8001_hba_info *pm8001_ha = NULL;
+ struct domain_device *parent_dev = dev->parent;
+ struct pm8001_device *pm8001_device;
+ DECLARE_COMPLETION_ONSTACK(completion);
+ u32 flag = 0;
+ pm8001_ha = pm8001_find_ha_by_dev(dev);
+ spin_lock_irqsave(&pm8001_ha->lock, flags);
+
+ pm8001_device = pm8001_alloc_dev(pm8001_ha);
+ if (!pm8001_device) {
+ res = -1;
+ goto found_out;
+ }
+ pm8001_device->sas_device = dev;
+ dev->lldd_dev = pm8001_device;
+ pm8001_device->dev_type = dev->dev_type;
+ pm8001_device->dcompletion = &completion;
+ if (parent_dev && DEV_IS_EXPANDER(parent_dev->dev_type)) {
+ int phy_id;
+ struct ex_phy *phy;
+ for (phy_id = 0; phy_id < parent_dev->ex_dev.num_phys;
+ phy_id++) {
+ phy = &parent_dev->ex_dev.ex_phy[phy_id];
+ if (SAS_ADDR(phy->attached_sas_addr)
+ == SAS_ADDR(dev->sas_addr)) {
+ pm8001_device->attached_phy = phy_id;
+ break;
+ }
+ }
+ if (phy_id == parent_dev->ex_dev.num_phys) {
+ PM8001_FAIL_DBG(pm8001_ha,
+ pm8001_printk("Error: no attached dev:%016llx"
+ " at ex:%016llx.\n", SAS_ADDR(dev->sas_addr),
+ SAS_ADDR(parent_dev->sas_addr)));
+ res = -1;
+ }
+ } else {
+ if (dev->dev_type == SAS_SATA_DEV) {
+ pm8001_device->attached_phy =
+ dev->rphy->identify.phy_identifier;
+ flag = 1; /* directly sata*/
+ }
+ } /*register this device to HBA*/
+ PM8001_DISC_DBG(pm8001_ha, pm8001_printk("Found device\n"));
+ PM8001_CHIP_DISP->reg_dev_req(pm8001_ha, pm8001_device, flag);
+ spin_unlock_irqrestore(&pm8001_ha->lock, flags);
+ wait_for_completion(&completion);
+ if (dev->dev_type == SAS_END_DEVICE)
+ msleep(50);
+ pm8001_ha->flags = PM8001F_RUN_TIME;
+ return 0;
+found_out:
+ spin_unlock_irqrestore(&pm8001_ha->lock, flags);
+ return res;
+}
+
+int pm8001_dev_found(struct domain_device *dev)
+{
+ return pm8001_dev_found_notify(dev);
+}
+
+void pm8001_task_done(struct sas_task *task)
+{
+ if (!del_timer(&task->slow_task->timer))
+ return;
+ complete(&task->slow_task->completion);
+}
+
+static void pm8001_tmf_timedout(unsigned long data)
+{
+ struct sas_task *task = (struct sas_task *)data;
+
+ task->task_state_flags |= SAS_TASK_STATE_ABORTED;
+ complete(&task->slow_task->completion);
+}
+
+#define PM8001_TASK_TIMEOUT 20
+/**
+ * pm8001_exec_internal_tmf_task - execute some task management commands.
+ * @dev: the wanted device.
+ * @tmf: which task management wanted to be take.
+ * @para_len: para_len.
+ * @parameter: ssp task parameter.
+ *
+ * when errors or exception happened, we may want to do something, for example
+ * abort the issued task which result in this execption, it is done by calling
+ * this function, note it is also with the task execute interface.
+ */
+static int pm8001_exec_internal_tmf_task(struct domain_device *dev,
+ void *parameter, u32 para_len, struct pm8001_tmf_task *tmf)
+{
+ int res, retry;
+ struct sas_task *task = NULL;
+ struct pm8001_hba_info *pm8001_ha = pm8001_find_ha_by_dev(dev);
+ struct pm8001_device *pm8001_dev = dev->lldd_dev;
+ DECLARE_COMPLETION_ONSTACK(completion_setstate);
+
+ for (retry = 0; retry < 3; retry++) {
+ task = sas_alloc_slow_task(GFP_KERNEL);
+ if (!task)
+ return -ENOMEM;
+
+ task->dev = dev;
+ task->task_proto = dev->tproto;
+ memcpy(&task->ssp_task, parameter, para_len);
+ task->task_done = pm8001_task_done;
+ task->slow_task->timer.data = (unsigned long)task;
+ task->slow_task->timer.function = pm8001_tmf_timedout;
+ task->slow_task->timer.expires = jiffies + PM8001_TASK_TIMEOUT*HZ;
+ add_timer(&task->slow_task->timer);
+
+ res = pm8001_task_exec(task, GFP_KERNEL, 1, tmf);
+
+ if (res) {
+ del_timer(&task->slow_task->timer);
+ PM8001_FAIL_DBG(pm8001_ha,
+ pm8001_printk("Executing internal task "
+ "failed\n"));
+ goto ex_err;
+ }
+ wait_for_completion(&task->slow_task->completion);
+ if (pm8001_ha->chip_id != chip_8001) {
+ pm8001_dev->setds_completion = &completion_setstate;
+ PM8001_CHIP_DISP->set_dev_state_req(pm8001_ha,
+ pm8001_dev, 0x01);
+ wait_for_completion(&completion_setstate);
+ }
+ res = -TMF_RESP_FUNC_FAILED;
+ /* Even TMF timed out, return direct. */
+ if ((task->task_state_flags & SAS_TASK_STATE_ABORTED)) {
+ if (!(task->task_state_flags & SAS_TASK_STATE_DONE)) {
+ PM8001_FAIL_DBG(pm8001_ha,
+ pm8001_printk("TMF task[%x]timeout.\n",
+ tmf->tmf));
+ goto ex_err;
+ }
+ }
+
+ if (task->task_status.resp == SAS_TASK_COMPLETE &&
+ task->task_status.stat == SAM_STAT_GOOD) {
+ res = TMF_RESP_FUNC_COMPLETE;
+ break;
+ }
+
+ if (task->task_status.resp == SAS_TASK_COMPLETE &&
+ task->task_status.stat == SAS_DATA_UNDERRUN) {
+ /* no error, but return the number of bytes of
+ * underrun */
+ res = task->task_status.residual;
+ break;
+ }
+
+ if (task->task_status.resp == SAS_TASK_COMPLETE &&
+ task->task_status.stat == SAS_DATA_OVERRUN) {
+ PM8001_FAIL_DBG(pm8001_ha,
+ pm8001_printk("Blocked task error.\n"));
+ res = -EMSGSIZE;
+ break;
+ } else {
+ PM8001_EH_DBG(pm8001_ha,
+ pm8001_printk(" Task to dev %016llx response:"
+ "0x%x status 0x%x\n",
+ SAS_ADDR(dev->sas_addr),
+ task->task_status.resp,
+ task->task_status.stat));
+ sas_free_task(task);
+ task = NULL;
+ }
+ }
+ex_err:
+ BUG_ON(retry == 3 && task != NULL);
+ sas_free_task(task);
+ return res;
+}
+
+static int
+pm8001_exec_internal_task_abort(struct pm8001_hba_info *pm8001_ha,
+ struct pm8001_device *pm8001_dev, struct domain_device *dev, u32 flag,
+ u32 task_tag)
+{
+ int res, retry;
+ u32 ccb_tag;
+ struct pm8001_ccb_info *ccb;
+ struct sas_task *task = NULL;
+
+ for (retry = 0; retry < 3; retry++) {
+ task = sas_alloc_slow_task(GFP_KERNEL);
+ if (!task)
+ return -ENOMEM;
+
+ task->dev = dev;
+ task->task_proto = dev->tproto;
+ task->task_done = pm8001_task_done;
+ task->slow_task->timer.data = (unsigned long)task;
+ task->slow_task->timer.function = pm8001_tmf_timedout;
+ task->slow_task->timer.expires = jiffies + PM8001_TASK_TIMEOUT * HZ;
+ add_timer(&task->slow_task->timer);
+
+ res = pm8001_tag_alloc(pm8001_ha, &ccb_tag);
+ if (res)
+ return res;
+ ccb = &pm8001_ha->ccb_info[ccb_tag];
+ ccb->device = pm8001_dev;
+ ccb->ccb_tag = ccb_tag;
+ ccb->task = task;
+
+ res = PM8001_CHIP_DISP->task_abort(pm8001_ha,
+ pm8001_dev, flag, task_tag, ccb_tag);
+
+ if (res) {
+ del_timer(&task->slow_task->timer);
+ PM8001_FAIL_DBG(pm8001_ha,
+ pm8001_printk("Executing internal task "
+ "failed\n"));
+ goto ex_err;
+ }
+ wait_for_completion(&task->slow_task->completion);
+ res = TMF_RESP_FUNC_FAILED;
+ /* Even TMF timed out, return direct. */
+ if ((task->task_state_flags & SAS_TASK_STATE_ABORTED)) {
+ if (!(task->task_state_flags & SAS_TASK_STATE_DONE)) {
+ PM8001_FAIL_DBG(pm8001_ha,
+ pm8001_printk("TMF task timeout.\n"));
+ goto ex_err;
+ }
+ }
+
+ if (task->task_status.resp == SAS_TASK_COMPLETE &&
+ task->task_status.stat == SAM_STAT_GOOD) {
+ res = TMF_RESP_FUNC_COMPLETE;
+ break;
+
+ } else {
+ PM8001_EH_DBG(pm8001_ha,
+ pm8001_printk(" Task to dev %016llx response: "
+ "0x%x status 0x%x\n",
+ SAS_ADDR(dev->sas_addr),
+ task->task_status.resp,
+ task->task_status.stat));
+ sas_free_task(task);
+ task = NULL;
+ }
+ }
+ex_err:
+ BUG_ON(retry == 3 && task != NULL);
+ sas_free_task(task);
+ return res;
+}
+
+/**
+ * pm8001_dev_gone_notify - see the comments for "pm8001_dev_found_notify"
+ * @dev: the device structure which sas layer used.
+ */
+static void pm8001_dev_gone_notify(struct domain_device *dev)
+{
+ unsigned long flags = 0;
+ struct pm8001_hba_info *pm8001_ha;
+ struct pm8001_device *pm8001_dev = dev->lldd_dev;
+
+ pm8001_ha = pm8001_find_ha_by_dev(dev);
+ spin_lock_irqsave(&pm8001_ha->lock, flags);
+ if (pm8001_dev) {
+ u32 device_id = pm8001_dev->device_id;
+
+ PM8001_DISC_DBG(pm8001_ha,
+ pm8001_printk("found dev[%d:%x] is gone.\n",
+ pm8001_dev->device_id, pm8001_dev->dev_type));
+ if (pm8001_dev->running_req) {
+ spin_unlock_irqrestore(&pm8001_ha->lock, flags);
+ pm8001_exec_internal_task_abort(pm8001_ha, pm8001_dev ,
+ dev, 1, 0);
+ spin_lock_irqsave(&pm8001_ha->lock, flags);
+ }
+ PM8001_CHIP_DISP->dereg_dev_req(pm8001_ha, device_id);
+ pm8001_free_dev(pm8001_dev);
+ } else {
+ PM8001_DISC_DBG(pm8001_ha,
+ pm8001_printk("Found dev has gone.\n"));
+ }
+ dev->lldd_dev = NULL;
+ spin_unlock_irqrestore(&pm8001_ha->lock, flags);
+}
+
+void pm8001_dev_gone(struct domain_device *dev)
+{
+ pm8001_dev_gone_notify(dev);
+}
+
+static int pm8001_issue_ssp_tmf(struct domain_device *dev,
+ u8 *lun, struct pm8001_tmf_task *tmf)
+{
+ struct sas_ssp_task ssp_task;
+ if (!(dev->tproto & SAS_PROTOCOL_SSP))
+ return TMF_RESP_FUNC_ESUPP;
+
+ strncpy((u8 *)&ssp_task.LUN, lun, 8);
+ return pm8001_exec_internal_tmf_task(dev, &ssp_task, sizeof(ssp_task),
+ tmf);
+}
+
+/* retry commands by ha, by task and/or by device */
+void pm8001_open_reject_retry(
+ struct pm8001_hba_info *pm8001_ha,
+ struct sas_task *task_to_close,
+ struct pm8001_device *device_to_close)
+{
+ int i;
+ unsigned long flags;
+
+ if (pm8001_ha == NULL)
+ return;
+
+ spin_lock_irqsave(&pm8001_ha->lock, flags);
+
+ for (i = 0; i < PM8001_MAX_CCB; i++) {
+ struct sas_task *task;
+ struct task_status_struct *ts;
+ struct pm8001_device *pm8001_dev;
+ unsigned long flags1;
+ u32 tag;
+ struct pm8001_ccb_info *ccb = &pm8001_ha->ccb_info[i];
+
+ pm8001_dev = ccb->device;
+ if (!pm8001_dev || (pm8001_dev->dev_type == SAS_PHY_UNUSED))
+ continue;
+ if (!device_to_close) {
+ uintptr_t d = (uintptr_t)pm8001_dev
+ - (uintptr_t)&pm8001_ha->devices;
+ if (((d % sizeof(*pm8001_dev)) != 0)
+ || ((d / sizeof(*pm8001_dev)) >= PM8001_MAX_DEVICES))
+ continue;
+ } else if (pm8001_dev != device_to_close)
+ continue;
+ tag = ccb->ccb_tag;
+ if (!tag || (tag == 0xFFFFFFFF))
+ continue;
+ task = ccb->task;
+ if (!task || !task->task_done)
+ continue;
+ if (task_to_close && (task != task_to_close))
+ continue;
+ ts = &task->task_status;
+ ts->resp = SAS_TASK_COMPLETE;
+ /* Force the midlayer to retry */
+ ts->stat = SAS_OPEN_REJECT;
+ ts->open_rej_reason = SAS_OREJ_RSVD_RETRY;
+ if (pm8001_dev)
+ pm8001_dev->running_req--;
+ spin_lock_irqsave(&task->task_state_lock, flags1);
+ task->task_state_flags &= ~SAS_TASK_STATE_PENDING;
+ task->task_state_flags &= ~SAS_TASK_AT_INITIATOR;
+ task->task_state_flags |= SAS_TASK_STATE_DONE;
+ if (unlikely((task->task_state_flags
+ & SAS_TASK_STATE_ABORTED))) {
+ spin_unlock_irqrestore(&task->task_state_lock,
+ flags1);
+ pm8001_ccb_task_free(pm8001_ha, task, ccb, tag);
+ } else {
+ spin_unlock_irqrestore(&task->task_state_lock,
+ flags1);
+ pm8001_ccb_task_free(pm8001_ha, task, ccb, tag);
+ mb();/* in order to force CPU ordering */
+ spin_unlock_irqrestore(&pm8001_ha->lock, flags);
+ task->task_done(task);
+ spin_lock_irqsave(&pm8001_ha->lock, flags);
+ }
+ }
+
+ spin_unlock_irqrestore(&pm8001_ha->lock, flags);
+}
+
+/**
+ * Standard mandates link reset for ATA (type 0) and hard reset for
+ * SSP (type 1) , only for RECOVERY
+ */
+int pm8001_I_T_nexus_reset(struct domain_device *dev)
+{
+ int rc = TMF_RESP_FUNC_FAILED;
+ struct pm8001_device *pm8001_dev;
+ struct pm8001_hba_info *pm8001_ha;
+ struct sas_phy *phy;
+
+ if (!dev || !dev->lldd_dev)
+ return -ENODEV;
+
+ pm8001_dev = dev->lldd_dev;
+ pm8001_ha = pm8001_find_ha_by_dev(dev);
+ phy = sas_get_local_phy(dev);
+
+ if (dev_is_sata(dev)) {
+ DECLARE_COMPLETION_ONSTACK(completion_setstate);
+ if (scsi_is_sas_phy_local(phy)) {
+ rc = 0;
+ goto out;
+ }
+ rc = sas_phy_reset(phy, 1);
+ msleep(2000);
+ rc = pm8001_exec_internal_task_abort(pm8001_ha, pm8001_dev ,
+ dev, 1, 0);
+ pm8001_dev->setds_completion = &completion_setstate;
+ rc = PM8001_CHIP_DISP->set_dev_state_req(pm8001_ha,
+ pm8001_dev, 0x01);
+ wait_for_completion(&completion_setstate);
+ } else {
+ rc = sas_phy_reset(phy, 1);
+ msleep(2000);
+ }
+ PM8001_EH_DBG(pm8001_ha, pm8001_printk(" for device[%x]:rc=%d\n",
+ pm8001_dev->device_id, rc));
+ out:
+ sas_put_local_phy(phy);
+ return rc;
+}
+
+/*
+* This function handle the IT_NEXUS_XXX event or completion
+* status code for SSP/SATA/SMP I/O request.
+*/
+int pm8001_I_T_nexus_event_handler(struct domain_device *dev)
+{
+ int rc = TMF_RESP_FUNC_FAILED;
+ struct pm8001_device *pm8001_dev;
+ struct pm8001_hba_info *pm8001_ha;
+ struct sas_phy *phy;
+ u32 device_id = 0;
+
+ if (!dev || !dev->lldd_dev)
+ return -1;
+
+ pm8001_dev = dev->lldd_dev;
+ device_id = pm8001_dev->device_id;
+ pm8001_ha = pm8001_find_ha_by_dev(dev);
+
+ PM8001_EH_DBG(pm8001_ha,
+ pm8001_printk("I_T_Nexus handler invoked !!"));
+
+ phy = sas_get_local_phy(dev);
+
+ if (dev_is_sata(dev)) {
+ DECLARE_COMPLETION_ONSTACK(completion_setstate);
+ if (scsi_is_sas_phy_local(phy)) {
+ rc = 0;
+ goto out;
+ }
+ /* send internal ssp/sata/smp abort command to FW */
+ rc = pm8001_exec_internal_task_abort(pm8001_ha, pm8001_dev ,
+ dev, 1, 0);
+ msleep(100);
+
+ /* deregister the target device */
+ pm8001_dev_gone_notify(dev);
+ msleep(200);
+
+ /*send phy reset to hard reset target */
+ rc = sas_phy_reset(phy, 1);
+ msleep(2000);
+ pm8001_dev->setds_completion = &completion_setstate;
+
+ wait_for_completion(&completion_setstate);
+ } else {
+ /* send internal ssp/sata/smp abort command to FW */
+ rc = pm8001_exec_internal_task_abort(pm8001_ha, pm8001_dev ,
+ dev, 1, 0);
+ msleep(100);
+
+ /* deregister the target device */
+ pm8001_dev_gone_notify(dev);
+ msleep(200);
+
+ /*send phy reset to hard reset target */
+ rc = sas_phy_reset(phy, 1);
+ msleep(2000);
+ }
+ PM8001_EH_DBG(pm8001_ha, pm8001_printk(" for device[%x]:rc=%d\n",
+ pm8001_dev->device_id, rc));
+out:
+ sas_put_local_phy(phy);
+
+ return rc;
+}
+/* mandatory SAM-3, the task reset the specified LUN*/
+int pm8001_lu_reset(struct domain_device *dev, u8 *lun)
+{
+ int rc = TMF_RESP_FUNC_FAILED;
+ struct pm8001_tmf_task tmf_task;
+ struct pm8001_device *pm8001_dev = dev->lldd_dev;
+ struct pm8001_hba_info *pm8001_ha = pm8001_find_ha_by_dev(dev);
+ DECLARE_COMPLETION_ONSTACK(completion_setstate);
+ if (dev_is_sata(dev)) {
+ struct sas_phy *phy = sas_get_local_phy(dev);
+ rc = pm8001_exec_internal_task_abort(pm8001_ha, pm8001_dev ,
+ dev, 1, 0);
+ rc = sas_phy_reset(phy, 1);
+ sas_put_local_phy(phy);
+ pm8001_dev->setds_completion = &completion_setstate;
+ rc = PM8001_CHIP_DISP->set_dev_state_req(pm8001_ha,
+ pm8001_dev, 0x01);
+ wait_for_completion(&completion_setstate);
+ } else {
+ tmf_task.tmf = TMF_LU_RESET;
+ rc = pm8001_issue_ssp_tmf(dev, lun, &tmf_task);
+ }
+ /* If failed, fall-through I_T_Nexus reset */
+ PM8001_EH_DBG(pm8001_ha, pm8001_printk("for device[%x]:rc=%d\n",
+ pm8001_dev->device_id, rc));
+ return rc;
+}
+
+/* optional SAM-3 */
+int pm8001_query_task(struct sas_task *task)
+{
+ u32 tag = 0xdeadbeef;
+ int i = 0;
+ struct scsi_lun lun;
+ struct pm8001_tmf_task tmf_task;
+ int rc = TMF_RESP_FUNC_FAILED;
+ if (unlikely(!task || !task->lldd_task || !task->dev))
+ return rc;
+
+ if (task->task_proto & SAS_PROTOCOL_SSP) {
+ struct scsi_cmnd *cmnd = task->uldd_task;
+ struct domain_device *dev = task->dev;
+ struct pm8001_hba_info *pm8001_ha =
+ pm8001_find_ha_by_dev(dev);
+
+ int_to_scsilun(cmnd->device->lun, &lun);
+ rc = pm8001_find_tag(task, &tag);
+ if (rc == 0) {
+ rc = TMF_RESP_FUNC_FAILED;
+ return rc;
+ }
+ PM8001_EH_DBG(pm8001_ha, pm8001_printk("Query:["));
+ for (i = 0; i < 16; i++)
+ printk(KERN_INFO "%02x ", cmnd->cmnd[i]);
+ printk(KERN_INFO "]\n");
+ tmf_task.tmf = TMF_QUERY_TASK;
+ tmf_task.tag_of_task_to_be_managed = tag;
+
+ rc = pm8001_issue_ssp_tmf(dev, lun.scsi_lun, &tmf_task);
+ switch (rc) {
+ /* The task is still in Lun, release it then */
+ case TMF_RESP_FUNC_SUCC:
+ PM8001_EH_DBG(pm8001_ha,
+ pm8001_printk("The task is still in Lun\n"));
+ break;
+ /* The task is not in Lun or failed, reset the phy */
+ case TMF_RESP_FUNC_FAILED:
+ case TMF_RESP_FUNC_COMPLETE:
+ PM8001_EH_DBG(pm8001_ha,
+ pm8001_printk("The task is not in Lun or failed,"
+ " reset the phy\n"));
+ break;
+ }
+ }
+ pm8001_printk(":rc= %d\n", rc);
+ return rc;
+}
+
+/* mandatory SAM-3, still need free task/ccb info, abord the specified task */
+int pm8001_abort_task(struct sas_task *task)
+{
+ unsigned long flags;
+ u32 tag = 0xdeadbeef;
+ u32 device_id;
+ struct domain_device *dev ;
+ struct pm8001_hba_info *pm8001_ha = NULL;
+ struct pm8001_ccb_info *ccb;
+ struct scsi_lun lun;
+ struct pm8001_device *pm8001_dev;
+ struct pm8001_tmf_task tmf_task;
+ int rc = TMF_RESP_FUNC_FAILED;
+ if (unlikely(!task || !task->lldd_task || !task->dev))
+ return rc;
+ spin_lock_irqsave(&task->task_state_lock, flags);
+ if (task->task_state_flags & SAS_TASK_STATE_DONE) {
+ spin_unlock_irqrestore(&task->task_state_lock, flags);
+ rc = TMF_RESP_FUNC_COMPLETE;
+ goto out;
+ }
+ spin_unlock_irqrestore(&task->task_state_lock, flags);
+ if (task->task_proto & SAS_PROTOCOL_SSP) {
+ struct scsi_cmnd *cmnd = task->uldd_task;
+ dev = task->dev;
+ ccb = task->lldd_task;
+ pm8001_dev = dev->lldd_dev;
+ pm8001_ha = pm8001_find_ha_by_dev(dev);
+ int_to_scsilun(cmnd->device->lun, &lun);
+ rc = pm8001_find_tag(task, &tag);
+ if (rc == 0) {
+ printk(KERN_INFO "No such tag in %s\n", __func__);
+ rc = TMF_RESP_FUNC_FAILED;
+ return rc;
+ }
+ device_id = pm8001_dev->device_id;
+ PM8001_EH_DBG(pm8001_ha,
+ pm8001_printk("abort io to deviceid= %d\n", device_id));
+ tmf_task.tmf = TMF_ABORT_TASK;
+ tmf_task.tag_of_task_to_be_managed = tag;
+ rc = pm8001_issue_ssp_tmf(dev, lun.scsi_lun, &tmf_task);
+ pm8001_exec_internal_task_abort(pm8001_ha, pm8001_dev,
+ pm8001_dev->sas_device, 0, tag);
+ } else if (task->task_proto & SAS_PROTOCOL_SATA ||
+ task->task_proto & SAS_PROTOCOL_STP) {
+ dev = task->dev;
+ pm8001_dev = dev->lldd_dev;
+ pm8001_ha = pm8001_find_ha_by_dev(dev);
+ rc = pm8001_find_tag(task, &tag);
+ if (rc == 0) {
+ printk(KERN_INFO "No such tag in %s\n", __func__);
+ rc = TMF_RESP_FUNC_FAILED;
+ return rc;
+ }
+ rc = pm8001_exec_internal_task_abort(pm8001_ha, pm8001_dev,
+ pm8001_dev->sas_device, 0, tag);
+ } else if (task->task_proto & SAS_PROTOCOL_SMP) {
+ /* SMP */
+ dev = task->dev;
+ pm8001_dev = dev->lldd_dev;
+ pm8001_ha = pm8001_find_ha_by_dev(dev);
+ rc = pm8001_find_tag(task, &tag);
+ if (rc == 0) {
+ printk(KERN_INFO "No such tag in %s\n", __func__);
+ rc = TMF_RESP_FUNC_FAILED;
+ return rc;
+ }
+ rc = pm8001_exec_internal_task_abort(pm8001_ha, pm8001_dev,
+ pm8001_dev->sas_device, 0, tag);
+
+ }
+out:
+ if (rc != TMF_RESP_FUNC_COMPLETE)
+ pm8001_printk("rc= %d\n", rc);
+ return rc;
+}
+
+int pm8001_abort_task_set(struct domain_device *dev, u8 *lun)
+{
+ int rc = TMF_RESP_FUNC_FAILED;
+ struct pm8001_tmf_task tmf_task;
+
+ tmf_task.tmf = TMF_ABORT_TASK_SET;
+ rc = pm8001_issue_ssp_tmf(dev, lun, &tmf_task);
+ return rc;
+}
+
+int pm8001_clear_aca(struct domain_device *dev, u8 *lun)
+{
+ int rc = TMF_RESP_FUNC_FAILED;
+ struct pm8001_tmf_task tmf_task;
+
+ tmf_task.tmf = TMF_CLEAR_ACA;
+ rc = pm8001_issue_ssp_tmf(dev, lun, &tmf_task);
+
+ return rc;
+}
+
+int pm8001_clear_task_set(struct domain_device *dev, u8 *lun)
+{
+ int rc = TMF_RESP_FUNC_FAILED;
+ struct pm8001_tmf_task tmf_task;
+ struct pm8001_device *pm8001_dev = dev->lldd_dev;
+ struct pm8001_hba_info *pm8001_ha = pm8001_find_ha_by_dev(dev);
+
+ PM8001_EH_DBG(pm8001_ha,
+ pm8001_printk("I_T_L_Q clear task set[%x]\n",
+ pm8001_dev->device_id));
+ tmf_task.tmf = TMF_CLEAR_TASK_SET;
+ rc = pm8001_issue_ssp_tmf(dev, lun, &tmf_task);
+ return rc;
+}
+
diff --git a/drivers/scsi/pm8001/pm8001_sas.h b/drivers/scsi/pm8001/pm8001_sas.h
new file mode 100644
index 000000000..8dd8b7840
--- /dev/null
+++ b/drivers/scsi/pm8001/pm8001_sas.h
@@ -0,0 +1,723 @@
+/*
+ * PMC-Sierra PM8001/8081/8088/8089 SAS/SATA based host adapters driver
+ *
+ * Copyright (c) 2008-2009 USI Co., Ltd.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions, and the following disclaimer,
+ * without modification.
+ * 2. Redistributions in binary form must reproduce at minimum a disclaimer
+ * substantially similar to the "NO WARRANTY" disclaimer below
+ * ("Disclaimer") and any redistribution must be conditioned upon
+ * including a substantially similar Disclaimer requirement for further
+ * binary redistribution.
+ * 3. Neither the names of the above-listed copyright holders nor the names
+ * of any contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * Alternatively, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") version 2 as published by the Free
+ * Software Foundation.
+ *
+ * NO WARRANTY
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
+ * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
+ * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGES.
+ *
+ */
+
+#ifndef _PM8001_SAS_H_
+#define _PM8001_SAS_H_
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/spinlock.h>
+#include <linux/delay.h>
+#include <linux/types.h>
+#include <linux/ctype.h>
+#include <linux/dma-mapping.h>
+#include <linux/pci.h>
+#include <linux/interrupt.h>
+#include <linux/workqueue.h>
+#include <scsi/libsas.h>
+#include <scsi/scsi_tcq.h>
+#include <scsi/sas_ata.h>
+#include <linux/atomic.h>
+#include "pm8001_defs.h"
+
+#define DRV_NAME "pm80xx"
+#define DRV_VERSION "0.1.37"
+#define PM8001_FAIL_LOGGING 0x01 /* Error message logging */
+#define PM8001_INIT_LOGGING 0x02 /* driver init logging */
+#define PM8001_DISC_LOGGING 0x04 /* discovery layer logging */
+#define PM8001_IO_LOGGING 0x08 /* I/O path logging */
+#define PM8001_EH_LOGGING 0x10 /* libsas EH function logging*/
+#define PM8001_IOCTL_LOGGING 0x20 /* IOCTL message logging */
+#define PM8001_MSG_LOGGING 0x40 /* misc message logging */
+#define pm8001_printk(format, arg...) printk(KERN_INFO "pm80xx %s %d:" \
+ format, __func__, __LINE__, ## arg)
+#define PM8001_CHECK_LOGGING(HBA, LEVEL, CMD) \
+do { \
+ if (unlikely(HBA->logging_level & LEVEL)) \
+ do { \
+ CMD; \
+ } while (0); \
+} while (0);
+
+#define PM8001_EH_DBG(HBA, CMD) \
+ PM8001_CHECK_LOGGING(HBA, PM8001_EH_LOGGING, CMD)
+
+#define PM8001_INIT_DBG(HBA, CMD) \
+ PM8001_CHECK_LOGGING(HBA, PM8001_INIT_LOGGING, CMD)
+
+#define PM8001_DISC_DBG(HBA, CMD) \
+ PM8001_CHECK_LOGGING(HBA, PM8001_DISC_LOGGING, CMD)
+
+#define PM8001_IO_DBG(HBA, CMD) \
+ PM8001_CHECK_LOGGING(HBA, PM8001_IO_LOGGING, CMD)
+
+#define PM8001_FAIL_DBG(HBA, CMD) \
+ PM8001_CHECK_LOGGING(HBA, PM8001_FAIL_LOGGING, CMD)
+
+#define PM8001_IOCTL_DBG(HBA, CMD) \
+ PM8001_CHECK_LOGGING(HBA, PM8001_IOCTL_LOGGING, CMD)
+
+#define PM8001_MSG_DBG(HBA, CMD) \
+ PM8001_CHECK_LOGGING(HBA, PM8001_MSG_LOGGING, CMD)
+
+
+#define PM8001_USE_TASKLET
+#define PM8001_USE_MSIX
+#define PM8001_READ_VPD
+
+
+#define DEV_IS_EXPANDER(type) ((type == SAS_EDGE_EXPANDER_DEVICE) || (type == SAS_FANOUT_EXPANDER_DEVICE))
+#define IS_SPCV_12G(dev) ((dev->device == 0X8074) \
+ || (dev->device == 0X8076) \
+ || (dev->device == 0X8077))
+
+#define PM8001_NAME_LENGTH 32/* generic length of strings */
+extern struct list_head hba_list;
+extern const struct pm8001_dispatch pm8001_8001_dispatch;
+extern const struct pm8001_dispatch pm8001_80xx_dispatch;
+
+struct pm8001_hba_info;
+struct pm8001_ccb_info;
+struct pm8001_device;
+/* define task management IU */
+struct pm8001_tmf_task {
+ u8 tmf;
+ u32 tag_of_task_to_be_managed;
+};
+struct pm8001_ioctl_payload {
+ u32 signature;
+ u16 major_function;
+ u16 minor_function;
+ u16 length;
+ u16 status;
+ u16 offset;
+ u16 id;
+ u8 *func_specific;
+};
+
+#define MPI_FATAL_ERROR_TABLE_OFFSET_MASK 0xFFFFFF
+#define MPI_FATAL_ERROR_TABLE_SIZE(value) ((0xFF000000 & value) >> SHIFT24)
+#define MPI_FATAL_EDUMP_TABLE_LO_OFFSET 0x00 /* HNFBUFL */
+#define MPI_FATAL_EDUMP_TABLE_HI_OFFSET 0x04 /* HNFBUFH */
+#define MPI_FATAL_EDUMP_TABLE_LENGTH 0x08 /* HNFBLEN */
+#define MPI_FATAL_EDUMP_TABLE_HANDSHAKE 0x0C /* FDDHSHK */
+#define MPI_FATAL_EDUMP_TABLE_STATUS 0x10 /* FDDTSTAT */
+#define MPI_FATAL_EDUMP_TABLE_ACCUM_LEN 0x14 /* ACCDDLEN */
+#define MPI_FATAL_EDUMP_HANDSHAKE_RDY 0x1
+#define MPI_FATAL_EDUMP_HANDSHAKE_BUSY 0x0
+#define MPI_FATAL_EDUMP_TABLE_STAT_RSVD 0x0
+#define MPI_FATAL_EDUMP_TABLE_STAT_DMA_FAILED 0x1
+#define MPI_FATAL_EDUMP_TABLE_STAT_NF_SUCCESS_MORE_DATA 0x2
+#define MPI_FATAL_EDUMP_TABLE_STAT_NF_SUCCESS_DONE 0x3
+#define TYPE_GSM_SPACE 1
+#define TYPE_QUEUE 2
+#define TYPE_FATAL 3
+#define TYPE_NON_FATAL 4
+#define TYPE_INBOUND 1
+#define TYPE_OUTBOUND 2
+struct forensic_data {
+ u32 data_type;
+ union {
+ struct {
+ u32 direct_len;
+ u32 direct_offset;
+ void *direct_data;
+ } gsm_buf;
+ struct {
+ u16 queue_type;
+ u16 queue_index;
+ u32 direct_len;
+ void *direct_data;
+ } queue_buf;
+ struct {
+ u32 direct_len;
+ u32 direct_offset;
+ u32 read_len;
+ void *direct_data;
+ } data_buf;
+ };
+};
+
+/* bit31-26 - mask bar */
+#define SCRATCH_PAD0_BAR_MASK 0xFC000000
+/* bit25-0 - offset mask */
+#define SCRATCH_PAD0_OFFSET_MASK 0x03FFFFFF
+/* if AAP error state */
+#define SCRATCH_PAD0_AAPERR_MASK 0xFFFFFFFF
+/* Inbound doorbell bit7 */
+#define SPCv_MSGU_CFG_TABLE_NONFATAL_DUMP 0x80
+/* Inbound doorbell bit7 SPCV */
+#define SPCV_MSGU_CFG_TABLE_TRANSFER_DEBUG_INFO 0x80
+#define MAIN_MERRDCTO_MERRDCES 0xA0/* DWORD 0x28) */
+
+struct pm8001_dispatch {
+ char *name;
+ int (*chip_init)(struct pm8001_hba_info *pm8001_ha);
+ int (*chip_soft_rst)(struct pm8001_hba_info *pm8001_ha);
+ void (*chip_rst)(struct pm8001_hba_info *pm8001_ha);
+ int (*chip_ioremap)(struct pm8001_hba_info *pm8001_ha);
+ void (*chip_iounmap)(struct pm8001_hba_info *pm8001_ha);
+ irqreturn_t (*isr)(struct pm8001_hba_info *pm8001_ha, u8 vec);
+ u32 (*is_our_interupt)(struct pm8001_hba_info *pm8001_ha);
+ int (*isr_process_oq)(struct pm8001_hba_info *pm8001_ha, u8 vec);
+ void (*interrupt_enable)(struct pm8001_hba_info *pm8001_ha, u8 vec);
+ void (*interrupt_disable)(struct pm8001_hba_info *pm8001_ha, u8 vec);
+ void (*make_prd)(struct scatterlist *scatter, int nr, void *prd);
+ int (*smp_req)(struct pm8001_hba_info *pm8001_ha,
+ struct pm8001_ccb_info *ccb);
+ int (*ssp_io_req)(struct pm8001_hba_info *pm8001_ha,
+ struct pm8001_ccb_info *ccb);
+ int (*sata_req)(struct pm8001_hba_info *pm8001_ha,
+ struct pm8001_ccb_info *ccb);
+ int (*phy_start_req)(struct pm8001_hba_info *pm8001_ha, u8 phy_id);
+ int (*phy_stop_req)(struct pm8001_hba_info *pm8001_ha, u8 phy_id);
+ int (*reg_dev_req)(struct pm8001_hba_info *pm8001_ha,
+ struct pm8001_device *pm8001_dev, u32 flag);
+ int (*dereg_dev_req)(struct pm8001_hba_info *pm8001_ha, u32 device_id);
+ int (*phy_ctl_req)(struct pm8001_hba_info *pm8001_ha,
+ u32 phy_id, u32 phy_op);
+ int (*task_abort)(struct pm8001_hba_info *pm8001_ha,
+ struct pm8001_device *pm8001_dev, u8 flag, u32 task_tag,
+ u32 cmd_tag);
+ int (*ssp_tm_req)(struct pm8001_hba_info *pm8001_ha,
+ struct pm8001_ccb_info *ccb, struct pm8001_tmf_task *tmf);
+ int (*get_nvmd_req)(struct pm8001_hba_info *pm8001_ha, void *payload);
+ int (*set_nvmd_req)(struct pm8001_hba_info *pm8001_ha, void *payload);
+ int (*fw_flash_update_req)(struct pm8001_hba_info *pm8001_ha,
+ void *payload);
+ int (*set_dev_state_req)(struct pm8001_hba_info *pm8001_ha,
+ struct pm8001_device *pm8001_dev, u32 state);
+ int (*sas_diag_start_end_req)(struct pm8001_hba_info *pm8001_ha,
+ u32 state);
+ int (*sas_diag_execute_req)(struct pm8001_hba_info *pm8001_ha,
+ u32 state);
+ int (*sas_re_init_req)(struct pm8001_hba_info *pm8001_ha);
+};
+
+struct pm8001_chip_info {
+ u32 encrypt;
+ u32 n_phy;
+ const struct pm8001_dispatch *dispatch;
+};
+#define PM8001_CHIP_DISP (pm8001_ha->chip->dispatch)
+
+struct pm8001_port {
+ struct asd_sas_port sas_port;
+ u8 port_attached;
+ u8 wide_port_phymap;
+ u8 port_state;
+ struct list_head list;
+};
+
+struct pm8001_phy {
+ struct pm8001_hba_info *pm8001_ha;
+ struct pm8001_port *port;
+ struct asd_sas_phy sas_phy;
+ struct sas_identify identify;
+ struct scsi_device *sdev;
+ u64 dev_sas_addr;
+ u32 phy_type;
+ struct completion *enable_completion;
+ u32 frame_rcvd_size;
+ u8 frame_rcvd[32];
+ u8 phy_attached;
+ u8 phy_state;
+ enum sas_linkrate minimum_linkrate;
+ enum sas_linkrate maximum_linkrate;
+};
+
+struct pm8001_device {
+ enum sas_device_type dev_type;
+ struct domain_device *sas_device;
+ u32 attached_phy;
+ u32 id;
+ struct completion *dcompletion;
+ struct completion *setds_completion;
+ u32 device_id;
+ u32 running_req;
+};
+
+struct pm8001_prd_imt {
+ __le32 len;
+ __le32 e;
+};
+
+struct pm8001_prd {
+ __le64 addr; /* 64-bit buffer address */
+ struct pm8001_prd_imt im_len; /* 64-bit length */
+} __attribute__ ((packed));
+/*
+ * CCB(Command Control Block)
+ */
+struct pm8001_ccb_info {
+ struct list_head entry;
+ struct sas_task *task;
+ u32 n_elem;
+ u32 ccb_tag;
+ dma_addr_t ccb_dma_handle;
+ struct pm8001_device *device;
+ struct pm8001_prd buf_prd[PM8001_MAX_DMA_SG];
+ struct fw_control_ex *fw_control_context;
+ u8 open_retry;
+};
+
+struct mpi_mem {
+ void *virt_ptr;
+ dma_addr_t phys_addr;
+ u32 phys_addr_hi;
+ u32 phys_addr_lo;
+ u32 total_len;
+ u32 num_elements;
+ u32 element_size;
+ u32 alignment;
+};
+
+struct mpi_mem_req {
+ /* The number of element in the mpiMemory array */
+ u32 count;
+ /* The array of structures that define memroy regions*/
+ struct mpi_mem region[USI_MAX_MEMCNT];
+};
+
+struct encrypt {
+ u32 cipher_mode;
+ u32 sec_mode;
+ u32 status;
+ u32 flag;
+};
+
+struct sas_phy_attribute_table {
+ u32 phystart1_16[16];
+ u32 outbound_hw_event_pid1_16[16];
+};
+
+union main_cfg_table {
+ struct {
+ u32 signature;
+ u32 interface_rev;
+ u32 firmware_rev;
+ u32 max_out_io;
+ u32 max_sgl;
+ u32 ctrl_cap_flag;
+ u32 gst_offset;
+ u32 inbound_queue_offset;
+ u32 outbound_queue_offset;
+ u32 inbound_q_nppd_hppd;
+ u32 outbound_hw_event_pid0_3;
+ u32 outbound_hw_event_pid4_7;
+ u32 outbound_ncq_event_pid0_3;
+ u32 outbound_ncq_event_pid4_7;
+ u32 outbound_tgt_ITNexus_event_pid0_3;
+ u32 outbound_tgt_ITNexus_event_pid4_7;
+ u32 outbound_tgt_ssp_event_pid0_3;
+ u32 outbound_tgt_ssp_event_pid4_7;
+ u32 outbound_tgt_smp_event_pid0_3;
+ u32 outbound_tgt_smp_event_pid4_7;
+ u32 upper_event_log_addr;
+ u32 lower_event_log_addr;
+ u32 event_log_size;
+ u32 event_log_option;
+ u32 upper_iop_event_log_addr;
+ u32 lower_iop_event_log_addr;
+ u32 iop_event_log_size;
+ u32 iop_event_log_option;
+ u32 fatal_err_interrupt;
+ u32 fatal_err_dump_offset0;
+ u32 fatal_err_dump_length0;
+ u32 fatal_err_dump_offset1;
+ u32 fatal_err_dump_length1;
+ u32 hda_mode_flag;
+ u32 anolog_setup_table_offset;
+ u32 rsvd[4];
+ } pm8001_tbl;
+
+ struct {
+ u32 signature;
+ u32 interface_rev;
+ u32 firmware_rev;
+ u32 max_out_io;
+ u32 max_sgl;
+ u32 ctrl_cap_flag;
+ u32 gst_offset;
+ u32 inbound_queue_offset;
+ u32 outbound_queue_offset;
+ u32 inbound_q_nppd_hppd;
+ u32 rsvd[8];
+ u32 crc_core_dump;
+ u32 rsvd1;
+ u32 upper_event_log_addr;
+ u32 lower_event_log_addr;
+ u32 event_log_size;
+ u32 event_log_severity;
+ u32 upper_pcs_event_log_addr;
+ u32 lower_pcs_event_log_addr;
+ u32 pcs_event_log_size;
+ u32 pcs_event_log_severity;
+ u32 fatal_err_interrupt;
+ u32 fatal_err_dump_offset0;
+ u32 fatal_err_dump_length0;
+ u32 fatal_err_dump_offset1;
+ u32 fatal_err_dump_length1;
+ u32 gpio_led_mapping;
+ u32 analog_setup_table_offset;
+ u32 int_vec_table_offset;
+ u32 phy_attr_table_offset;
+ u32 port_recovery_timer;
+ u32 interrupt_reassertion_delay;
+ u32 fatal_n_non_fatal_dump; /* 0x28 */
+ } pm80xx_tbl;
+};
+
+union general_status_table {
+ struct {
+ u32 gst_len_mpistate;
+ u32 iq_freeze_state0;
+ u32 iq_freeze_state1;
+ u32 msgu_tcnt;
+ u32 iop_tcnt;
+ u32 rsvd;
+ u32 phy_state[8];
+ u32 gpio_input_val;
+ u32 rsvd1[2];
+ u32 recover_err_info[8];
+ } pm8001_tbl;
+ struct {
+ u32 gst_len_mpistate;
+ u32 iq_freeze_state0;
+ u32 iq_freeze_state1;
+ u32 msgu_tcnt;
+ u32 iop_tcnt;
+ u32 rsvd[9];
+ u32 gpio_input_val;
+ u32 rsvd1[2];
+ u32 recover_err_info[8];
+ } pm80xx_tbl;
+};
+struct inbound_queue_table {
+ u32 element_pri_size_cnt;
+ u32 upper_base_addr;
+ u32 lower_base_addr;
+ u32 ci_upper_base_addr;
+ u32 ci_lower_base_addr;
+ u32 pi_pci_bar;
+ u32 pi_offset;
+ u32 total_length;
+ void *base_virt;
+ void *ci_virt;
+ u32 reserved;
+ __le32 consumer_index;
+ u32 producer_idx;
+};
+struct outbound_queue_table {
+ u32 element_size_cnt;
+ u32 upper_base_addr;
+ u32 lower_base_addr;
+ void *base_virt;
+ u32 pi_upper_base_addr;
+ u32 pi_lower_base_addr;
+ u32 ci_pci_bar;
+ u32 ci_offset;
+ u32 total_length;
+ void *pi_virt;
+ u32 interrup_vec_cnt_delay;
+ u32 dinterrup_to_pci_offset;
+ __le32 producer_index;
+ u32 consumer_idx;
+};
+struct pm8001_hba_memspace {
+ void __iomem *memvirtaddr;
+ u64 membase;
+ u32 memsize;
+};
+struct isr_param {
+ struct pm8001_hba_info *drv_inst;
+ u32 irq_id;
+};
+struct pm8001_hba_info {
+ char name[PM8001_NAME_LENGTH];
+ struct list_head list;
+ unsigned long flags;
+ spinlock_t lock;/* host-wide lock */
+ spinlock_t bitmap_lock;
+ struct pci_dev *pdev;/* our device */
+ struct device *dev;
+ struct pm8001_hba_memspace io_mem[6];
+ struct mpi_mem_req memoryMap;
+ struct encrypt encrypt_info; /* support encryption */
+ struct forensic_data forensic_info;
+ u32 fatal_bar_loc;
+ u32 forensic_last_offset;
+ u32 fatal_forensic_shift_offset;
+ u32 forensic_fatal_step;
+ u32 evtlog_ib_offset;
+ u32 evtlog_ob_offset;
+ void __iomem *msg_unit_tbl_addr;/*Message Unit Table Addr*/
+ void __iomem *main_cfg_tbl_addr;/*Main Config Table Addr*/
+ void __iomem *general_stat_tbl_addr;/*General Status Table Addr*/
+ void __iomem *inbnd_q_tbl_addr;/*Inbound Queue Config Table Addr*/
+ void __iomem *outbnd_q_tbl_addr;/*Outbound Queue Config Table Addr*/
+ void __iomem *pspa_q_tbl_addr;
+ /*MPI SAS PHY attributes Queue Config Table Addr*/
+ void __iomem *ivt_tbl_addr; /*MPI IVT Table Addr */
+ void __iomem *fatal_tbl_addr; /*MPI IVT Table Addr */
+ union main_cfg_table main_cfg_tbl;
+ union general_status_table gs_tbl;
+ struct inbound_queue_table inbnd_q_tbl[PM8001_MAX_SPCV_INB_NUM];
+ struct outbound_queue_table outbnd_q_tbl[PM8001_MAX_SPCV_OUTB_NUM];
+ struct sas_phy_attribute_table phy_attr_table;
+ /* MPI SAS PHY attributes */
+ u8 sas_addr[SAS_ADDR_SIZE];
+ struct sas_ha_struct *sas;/* SCSI/SAS glue */
+ struct Scsi_Host *shost;
+ u32 chip_id;
+ const struct pm8001_chip_info *chip;
+ struct completion *nvmd_completion;
+ int tags_num;
+ unsigned long *tags;
+ struct pm8001_phy phy[PM8001_MAX_PHYS];
+ struct pm8001_port port[PM8001_MAX_PHYS];
+ u32 id;
+ u32 irq;
+ u32 iomb_size; /* SPC and SPCV IOMB size */
+ struct pm8001_device *devices;
+ struct pm8001_ccb_info *ccb_info;
+#ifdef PM8001_USE_MSIX
+ struct msix_entry msix_entries[PM8001_MAX_MSIX_VEC];
+ /*for msi-x interrupt*/
+ int number_of_intr;/*will be used in remove()*/
+#endif
+#ifdef PM8001_USE_TASKLET
+ struct tasklet_struct tasklet[PM8001_MAX_MSIX_VEC];
+#endif
+ u32 logging_level;
+ u32 fw_status;
+ u32 smp_exp_mode;
+ const struct firmware *fw_image;
+ struct isr_param irq_vector[PM8001_MAX_MSIX_VEC];
+};
+
+struct pm8001_work {
+ struct work_struct work;
+ struct pm8001_hba_info *pm8001_ha;
+ void *data;
+ int handler;
+};
+
+struct pm8001_fw_image_header {
+ u8 vender_id[8];
+ u8 product_id;
+ u8 hardware_rev;
+ u8 dest_partition;
+ u8 reserved;
+ u8 fw_rev[4];
+ __be32 image_length;
+ __be32 image_crc;
+ __be32 startup_entry;
+} __attribute__((packed, aligned(4)));
+
+
+/**
+ * FW Flash Update status values
+ */
+#define FLASH_UPDATE_COMPLETE_PENDING_REBOOT 0x00
+#define FLASH_UPDATE_IN_PROGRESS 0x01
+#define FLASH_UPDATE_HDR_ERR 0x02
+#define FLASH_UPDATE_OFFSET_ERR 0x03
+#define FLASH_UPDATE_CRC_ERR 0x04
+#define FLASH_UPDATE_LENGTH_ERR 0x05
+#define FLASH_UPDATE_HW_ERR 0x06
+#define FLASH_UPDATE_DNLD_NOT_SUPPORTED 0x10
+#define FLASH_UPDATE_DISABLED 0x11
+
+#define NCQ_READ_LOG_FLAG 0x80000000
+#define NCQ_ABORT_ALL_FLAG 0x40000000
+#define NCQ_2ND_RLE_FLAG 0x20000000
+/**
+ * brief param structure for firmware flash update.
+ */
+struct fw_flash_updata_info {
+ u32 cur_image_offset;
+ u32 cur_image_len;
+ u32 total_image_len;
+ struct pm8001_prd sgl;
+};
+
+struct fw_control_info {
+ u32 retcode;/*ret code (status)*/
+ u32 phase;/*ret code phase*/
+ u32 phaseCmplt;/*percent complete for the current
+ update phase */
+ u32 version;/*Hex encoded firmware version number*/
+ u32 offset;/*Used for downloading firmware */
+ u32 len; /*len of buffer*/
+ u32 size;/* Used in OS VPD and Trace get size
+ operations.*/
+ u32 reserved;/* padding required for 64 bit
+ alignment */
+ u8 buffer[1];/* Start of buffer */
+};
+struct fw_control_ex {
+ struct fw_control_info *fw_control;
+ void *buffer;/* keep buffer pointer to be
+ freed when the response comes*/
+ void *virtAddr;/* keep virtual address of the data */
+ void *usrAddr;/* keep virtual address of the
+ user data */
+ dma_addr_t phys_addr;
+ u32 len; /* len of buffer */
+ void *payload; /* pointer to IOCTL Payload */
+ u8 inProgress;/*if 1 - the IOCTL request is in
+ progress */
+ void *param1;
+ void *param2;
+ void *param3;
+};
+
+/* pm8001 workqueue */
+extern struct workqueue_struct *pm8001_wq;
+
+/******************** function prototype *********************/
+int pm8001_tag_alloc(struct pm8001_hba_info *pm8001_ha, u32 *tag_out);
+void pm8001_tag_init(struct pm8001_hba_info *pm8001_ha);
+u32 pm8001_get_ncq_tag(struct sas_task *task, u32 *tag);
+void pm8001_ccb_task_free(struct pm8001_hba_info *pm8001_ha,
+ struct sas_task *task, struct pm8001_ccb_info *ccb, u32 ccb_idx);
+int pm8001_phy_control(struct asd_sas_phy *sas_phy, enum phy_func func,
+ void *funcdata);
+void pm8001_scan_start(struct Scsi_Host *shost);
+int pm8001_scan_finished(struct Scsi_Host *shost, unsigned long time);
+int pm8001_queue_command(struct sas_task *task, gfp_t gfp_flags);
+int pm8001_abort_task(struct sas_task *task);
+int pm8001_abort_task_set(struct domain_device *dev, u8 *lun);
+int pm8001_clear_aca(struct domain_device *dev, u8 *lun);
+int pm8001_clear_task_set(struct domain_device *dev, u8 *lun);
+int pm8001_dev_found(struct domain_device *dev);
+void pm8001_dev_gone(struct domain_device *dev);
+int pm8001_lu_reset(struct domain_device *dev, u8 *lun);
+int pm8001_I_T_nexus_reset(struct domain_device *dev);
+int pm8001_I_T_nexus_event_handler(struct domain_device *dev);
+int pm8001_query_task(struct sas_task *task);
+void pm8001_open_reject_retry(
+ struct pm8001_hba_info *pm8001_ha,
+ struct sas_task *task_to_close,
+ struct pm8001_device *device_to_close);
+int pm8001_mem_alloc(struct pci_dev *pdev, void **virt_addr,
+ dma_addr_t *pphys_addr, u32 *pphys_addr_hi, u32 *pphys_addr_lo,
+ u32 mem_size, u32 align);
+
+void pm8001_chip_iounmap(struct pm8001_hba_info *pm8001_ha);
+int pm8001_mpi_build_cmd(struct pm8001_hba_info *pm8001_ha,
+ struct inbound_queue_table *circularQ,
+ u32 opCode, void *payload, u32 responseQueue);
+int pm8001_mpi_msg_free_get(struct inbound_queue_table *circularQ,
+ u16 messageSize, void **messagePtr);
+u32 pm8001_mpi_msg_free_set(struct pm8001_hba_info *pm8001_ha, void *pMsg,
+ struct outbound_queue_table *circularQ, u8 bc);
+u32 pm8001_mpi_msg_consume(struct pm8001_hba_info *pm8001_ha,
+ struct outbound_queue_table *circularQ,
+ void **messagePtr1, u8 *pBC);
+int pm8001_chip_set_dev_state_req(struct pm8001_hba_info *pm8001_ha,
+ struct pm8001_device *pm8001_dev, u32 state);
+int pm8001_chip_fw_flash_update_req(struct pm8001_hba_info *pm8001_ha,
+ void *payload);
+int pm8001_chip_fw_flash_update_build(struct pm8001_hba_info *pm8001_ha,
+ void *fw_flash_updata_info, u32 tag);
+int pm8001_chip_set_nvmd_req(struct pm8001_hba_info *pm8001_ha, void *payload);
+int pm8001_chip_get_nvmd_req(struct pm8001_hba_info *pm8001_ha, void *payload);
+int pm8001_chip_ssp_tm_req(struct pm8001_hba_info *pm8001_ha,
+ struct pm8001_ccb_info *ccb,
+ struct pm8001_tmf_task *tmf);
+int pm8001_chip_abort_task(struct pm8001_hba_info *pm8001_ha,
+ struct pm8001_device *pm8001_dev,
+ u8 flag, u32 task_tag, u32 cmd_tag);
+int pm8001_chip_dereg_dev_req(struct pm8001_hba_info *pm8001_ha, u32 device_id);
+void pm8001_chip_make_sg(struct scatterlist *scatter, int nr, void *prd);
+void pm8001_work_fn(struct work_struct *work);
+int pm8001_handle_event(struct pm8001_hba_info *pm8001_ha,
+ void *data, int handler);
+void pm8001_mpi_set_dev_state_resp(struct pm8001_hba_info *pm8001_ha,
+ void *piomb);
+void pm8001_mpi_set_nvmd_resp(struct pm8001_hba_info *pm8001_ha,
+ void *piomb);
+void pm8001_mpi_get_nvmd_resp(struct pm8001_hba_info *pm8001_ha,
+ void *piomb);
+int pm8001_mpi_local_phy_ctl(struct pm8001_hba_info *pm8001_ha,
+ void *piomb);
+void pm8001_get_lrate_mode(struct pm8001_phy *phy, u8 link_rate);
+void pm8001_get_attached_sas_addr(struct pm8001_phy *phy, u8 *sas_addr);
+void pm8001_bytes_dmaed(struct pm8001_hba_info *pm8001_ha, int i);
+int pm8001_mpi_reg_resp(struct pm8001_hba_info *pm8001_ha, void *piomb);
+int pm8001_mpi_dereg_resp(struct pm8001_hba_info *pm8001_ha, void *piomb);
+int pm8001_mpi_fw_flash_update_resp(struct pm8001_hba_info *pm8001_ha,
+ void *piomb);
+int pm8001_mpi_general_event(struct pm8001_hba_info *pm8001_ha , void *piomb);
+int pm8001_mpi_task_abort_resp(struct pm8001_hba_info *pm8001_ha, void *piomb);
+struct sas_task *pm8001_alloc_task(void);
+void pm8001_task_done(struct sas_task *task);
+void pm8001_free_task(struct sas_task *task);
+void pm8001_tag_free(struct pm8001_hba_info *pm8001_ha, u32 tag);
+struct pm8001_device *pm8001_find_dev(struct pm8001_hba_info *pm8001_ha,
+ u32 device_id);
+int pm80xx_set_thermal_config(struct pm8001_hba_info *pm8001_ha);
+
+int pm8001_bar4_shift(struct pm8001_hba_info *pm8001_ha, u32 shiftValue);
+void pm8001_set_phy_profile(struct pm8001_hba_info *pm8001_ha,
+ u32 length, u8 *buf);
+int pm80xx_bar4_shift(struct pm8001_hba_info *pm8001_ha, u32 shiftValue);
+ssize_t pm80xx_get_fatal_dump(struct device *cdev,
+ struct device_attribute *attr, char *buf);
+ssize_t pm8001_get_gsm_dump(struct device *cdev, u32, char *buf);
+/* ctl shared API */
+extern struct device_attribute *pm8001_host_attrs[];
+
+static inline void
+pm8001_ccb_task_free_done(struct pm8001_hba_info *pm8001_ha,
+ struct sas_task *task, struct pm8001_ccb_info *ccb,
+ u32 ccb_idx)
+{
+ pm8001_ccb_task_free(pm8001_ha, task, ccb, ccb_idx);
+ smp_mb(); /*in order to force CPU ordering*/
+ spin_unlock(&pm8001_ha->lock);
+ task->task_done(task);
+ spin_lock(&pm8001_ha->lock);
+}
+
+#endif
+
diff --git a/drivers/scsi/pm8001/pm80xx_hwi.c b/drivers/scsi/pm8001/pm80xx_hwi.c
new file mode 100644
index 000000000..05cce463a
--- /dev/null
+++ b/drivers/scsi/pm8001/pm80xx_hwi.c
@@ -0,0 +1,4551 @@
+/*
+ * PMC-Sierra SPCv/ve 8088/8089 SAS/SATA based host adapters driver
+ *
+ * Copyright (c) 2008-2009 PMC-Sierra, Inc.,
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions, and the following disclaimer,
+ * without modification.
+ * 2. Redistributions in binary form must reproduce at minimum a disclaimer
+ * substantially similar to the "NO WARRANTY" disclaimer below
+ * ("Disclaimer") and any redistribution must be conditioned upon
+ * including a substantially similar Disclaimer requirement for further
+ * binary redistribution.
+ * 3. Neither the names of the above-listed copyright holders nor the names
+ * of any contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * Alternatively, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") version 2 as published by the Free
+ * Software Foundation.
+ *
+ * NO WARRANTY
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
+ * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
+ * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGES.
+ *
+ */
+ #include <linux/slab.h>
+ #include "pm8001_sas.h"
+ #include "pm80xx_hwi.h"
+ #include "pm8001_chips.h"
+ #include "pm8001_ctl.h"
+
+#define SMP_DIRECT 1
+#define SMP_INDIRECT 2
+
+
+int pm80xx_bar4_shift(struct pm8001_hba_info *pm8001_ha, u32 shift_value)
+{
+ u32 reg_val;
+ unsigned long start;
+ pm8001_cw32(pm8001_ha, 0, MEMBASE_II_SHIFT_REGISTER, shift_value);
+ /* confirm the setting is written */
+ start = jiffies + HZ; /* 1 sec */
+ do {
+ reg_val = pm8001_cr32(pm8001_ha, 0, MEMBASE_II_SHIFT_REGISTER);
+ } while ((reg_val != shift_value) && time_before(jiffies, start));
+ if (reg_val != shift_value) {
+ PM8001_FAIL_DBG(pm8001_ha,
+ pm8001_printk("TIMEOUT:MEMBASE_II_SHIFT_REGISTER"
+ " = 0x%x\n", reg_val));
+ return -1;
+ }
+ return 0;
+}
+
+void pm80xx_pci_mem_copy(struct pm8001_hba_info *pm8001_ha, u32 soffset,
+ const void *destination,
+ u32 dw_count, u32 bus_base_number)
+{
+ u32 index, value, offset;
+ u32 *destination1;
+ destination1 = (u32 *)destination;
+
+ for (index = 0; index < dw_count; index += 4, destination1++) {
+ offset = (soffset + index / 4);
+ if (offset < (64 * 1024)) {
+ value = pm8001_cr32(pm8001_ha, bus_base_number, offset);
+ *destination1 = cpu_to_le32(value);
+ }
+ }
+ return;
+}
+
+ssize_t pm80xx_get_fatal_dump(struct device *cdev,
+ struct device_attribute *attr, char *buf)
+{
+ struct Scsi_Host *shost = class_to_shost(cdev);
+ struct sas_ha_struct *sha = SHOST_TO_SAS_HA(shost);
+ struct pm8001_hba_info *pm8001_ha = sha->lldd_ha;
+ void __iomem *fatal_table_address = pm8001_ha->fatal_tbl_addr;
+ u32 accum_len , reg_val, index, *temp;
+ unsigned long start;
+ u8 *direct_data;
+ char *fatal_error_data = buf;
+
+ pm8001_ha->forensic_info.data_buf.direct_data = buf;
+ if (pm8001_ha->chip_id == chip_8001) {
+ pm8001_ha->forensic_info.data_buf.direct_data +=
+ sprintf(pm8001_ha->forensic_info.data_buf.direct_data,
+ "Not supported for SPC controller");
+ return (char *)pm8001_ha->forensic_info.data_buf.direct_data -
+ (char *)buf;
+ }
+ if (pm8001_ha->forensic_info.data_buf.direct_offset == 0) {
+ PM8001_IO_DBG(pm8001_ha,
+ pm8001_printk("forensic_info TYPE_NON_FATAL..............\n"));
+ direct_data = (u8 *)fatal_error_data;
+ pm8001_ha->forensic_info.data_type = TYPE_NON_FATAL;
+ pm8001_ha->forensic_info.data_buf.direct_len = SYSFS_OFFSET;
+ pm8001_ha->forensic_info.data_buf.read_len = 0;
+
+ pm8001_ha->forensic_info.data_buf.direct_data = direct_data;
+
+ /* start to get data */
+ /* Program the MEMBASE II Shifting Register with 0x00.*/
+ pm8001_cw32(pm8001_ha, 0, MEMBASE_II_SHIFT_REGISTER,
+ pm8001_ha->fatal_forensic_shift_offset);
+ pm8001_ha->forensic_last_offset = 0;
+ pm8001_ha->forensic_fatal_step = 0;
+ pm8001_ha->fatal_bar_loc = 0;
+ }
+
+ /* Read until accum_len is retrived */
+ accum_len = pm8001_mr32(fatal_table_address,
+ MPI_FATAL_EDUMP_TABLE_ACCUM_LEN);
+ PM8001_IO_DBG(pm8001_ha, pm8001_printk("accum_len 0x%x\n",
+ accum_len));
+ if (accum_len == 0xFFFFFFFF) {
+ PM8001_IO_DBG(pm8001_ha,
+ pm8001_printk("Possible PCI issue 0x%x not expected\n",
+ accum_len));
+ return -EIO;
+ }
+ if (accum_len == 0 || accum_len >= 0x100000) {
+ pm8001_ha->forensic_info.data_buf.direct_data +=
+ sprintf(pm8001_ha->forensic_info.data_buf.direct_data,
+ "%08x ", 0xFFFFFFFF);
+ return (char *)pm8001_ha->forensic_info.data_buf.direct_data -
+ (char *)buf;
+ }
+ temp = (u32 *)pm8001_ha->memoryMap.region[FORENSIC_MEM].virt_ptr;
+ if (pm8001_ha->forensic_fatal_step == 0) {
+moreData:
+ if (pm8001_ha->forensic_info.data_buf.direct_data) {
+ /* Data is in bar, copy to host memory */
+ pm80xx_pci_mem_copy(pm8001_ha, pm8001_ha->fatal_bar_loc,
+ pm8001_ha->memoryMap.region[FORENSIC_MEM].virt_ptr,
+ pm8001_ha->forensic_info.data_buf.direct_len ,
+ 1);
+ }
+ pm8001_ha->fatal_bar_loc +=
+ pm8001_ha->forensic_info.data_buf.direct_len;
+ pm8001_ha->forensic_info.data_buf.direct_offset +=
+ pm8001_ha->forensic_info.data_buf.direct_len;
+ pm8001_ha->forensic_last_offset +=
+ pm8001_ha->forensic_info.data_buf.direct_len;
+ pm8001_ha->forensic_info.data_buf.read_len =
+ pm8001_ha->forensic_info.data_buf.direct_len;
+
+ if (pm8001_ha->forensic_last_offset >= accum_len) {
+ pm8001_ha->forensic_info.data_buf.direct_data +=
+ sprintf(pm8001_ha->forensic_info.data_buf.direct_data,
+ "%08x ", 3);
+ for (index = 0; index < (SYSFS_OFFSET / 4); index++) {
+ pm8001_ha->forensic_info.data_buf.direct_data +=
+ sprintf(pm8001_ha->
+ forensic_info.data_buf.direct_data,
+ "%08x ", *(temp + index));
+ }
+
+ pm8001_ha->fatal_bar_loc = 0;
+ pm8001_ha->forensic_fatal_step = 1;
+ pm8001_ha->fatal_forensic_shift_offset = 0;
+ pm8001_ha->forensic_last_offset = 0;
+ return (char *)pm8001_ha->
+ forensic_info.data_buf.direct_data -
+ (char *)buf;
+ }
+ if (pm8001_ha->fatal_bar_loc < (64 * 1024)) {
+ pm8001_ha->forensic_info.data_buf.direct_data +=
+ sprintf(pm8001_ha->
+ forensic_info.data_buf.direct_data,
+ "%08x ", 2);
+ for (index = 0; index < (SYSFS_OFFSET / 4); index++) {
+ pm8001_ha->forensic_info.data_buf.direct_data +=
+ sprintf(pm8001_ha->
+ forensic_info.data_buf.direct_data,
+ "%08x ", *(temp + index));
+ }
+ return (char *)pm8001_ha->
+ forensic_info.data_buf.direct_data -
+ (char *)buf;
+ }
+
+ /* Increment the MEMBASE II Shifting Register value by 0x100.*/
+ pm8001_ha->forensic_info.data_buf.direct_data +=
+ sprintf(pm8001_ha->forensic_info.data_buf.direct_data,
+ "%08x ", 2);
+ for (index = 0; index < 256; index++) {
+ pm8001_ha->forensic_info.data_buf.direct_data +=
+ sprintf(pm8001_ha->
+ forensic_info.data_buf.direct_data,
+ "%08x ", *(temp + index));
+ }
+ pm8001_ha->fatal_forensic_shift_offset += 0x100;
+ pm8001_cw32(pm8001_ha, 0, MEMBASE_II_SHIFT_REGISTER,
+ pm8001_ha->fatal_forensic_shift_offset);
+ pm8001_ha->fatal_bar_loc = 0;
+ return (char *)pm8001_ha->forensic_info.data_buf.direct_data -
+ (char *)buf;
+ }
+ if (pm8001_ha->forensic_fatal_step == 1) {
+ pm8001_ha->fatal_forensic_shift_offset = 0;
+ /* Read 64K of the debug data. */
+ pm8001_cw32(pm8001_ha, 0, MEMBASE_II_SHIFT_REGISTER,
+ pm8001_ha->fatal_forensic_shift_offset);
+ pm8001_mw32(fatal_table_address,
+ MPI_FATAL_EDUMP_TABLE_HANDSHAKE,
+ MPI_FATAL_EDUMP_HANDSHAKE_RDY);
+
+ /* Poll FDDHSHK until clear */
+ start = jiffies + (2 * HZ); /* 2 sec */
+
+ do {
+ reg_val = pm8001_mr32(fatal_table_address,
+ MPI_FATAL_EDUMP_TABLE_HANDSHAKE);
+ } while ((reg_val) && time_before(jiffies, start));
+
+ if (reg_val != 0) {
+ PM8001_FAIL_DBG(pm8001_ha,
+ pm8001_printk("TIMEOUT:MEMBASE_II_SHIFT_REGISTER"
+ " = 0x%x\n", reg_val));
+ return -EIO;
+ }
+
+ /* Read the next 64K of the debug data. */
+ pm8001_ha->forensic_fatal_step = 0;
+ if (pm8001_mr32(fatal_table_address,
+ MPI_FATAL_EDUMP_TABLE_STATUS) !=
+ MPI_FATAL_EDUMP_TABLE_STAT_NF_SUCCESS_DONE) {
+ pm8001_mw32(fatal_table_address,
+ MPI_FATAL_EDUMP_TABLE_HANDSHAKE, 0);
+ goto moreData;
+ } else {
+ pm8001_ha->forensic_info.data_buf.direct_data +=
+ sprintf(pm8001_ha->
+ forensic_info.data_buf.direct_data,
+ "%08x ", 4);
+ pm8001_ha->forensic_info.data_buf.read_len = 0xFFFFFFFF;
+ pm8001_ha->forensic_info.data_buf.direct_len = 0;
+ pm8001_ha->forensic_info.data_buf.direct_offset = 0;
+ pm8001_ha->forensic_info.data_buf.read_len = 0;
+ }
+ }
+
+ return (char *)pm8001_ha->forensic_info.data_buf.direct_data -
+ (char *)buf;
+}
+
+/**
+ * read_main_config_table - read the configure table and save it.
+ * @pm8001_ha: our hba card information
+ */
+static void read_main_config_table(struct pm8001_hba_info *pm8001_ha)
+{
+ void __iomem *address = pm8001_ha->main_cfg_tbl_addr;
+
+ pm8001_ha->main_cfg_tbl.pm80xx_tbl.signature =
+ pm8001_mr32(address, MAIN_SIGNATURE_OFFSET);
+ pm8001_ha->main_cfg_tbl.pm80xx_tbl.interface_rev =
+ pm8001_mr32(address, MAIN_INTERFACE_REVISION);
+ pm8001_ha->main_cfg_tbl.pm80xx_tbl.firmware_rev =
+ pm8001_mr32(address, MAIN_FW_REVISION);
+ pm8001_ha->main_cfg_tbl.pm80xx_tbl.max_out_io =
+ pm8001_mr32(address, MAIN_MAX_OUTSTANDING_IO_OFFSET);
+ pm8001_ha->main_cfg_tbl.pm80xx_tbl.max_sgl =
+ pm8001_mr32(address, MAIN_MAX_SGL_OFFSET);
+ pm8001_ha->main_cfg_tbl.pm80xx_tbl.ctrl_cap_flag =
+ pm8001_mr32(address, MAIN_CNTRL_CAP_OFFSET);
+ pm8001_ha->main_cfg_tbl.pm80xx_tbl.gst_offset =
+ pm8001_mr32(address, MAIN_GST_OFFSET);
+ pm8001_ha->main_cfg_tbl.pm80xx_tbl.inbound_queue_offset =
+ pm8001_mr32(address, MAIN_IBQ_OFFSET);
+ pm8001_ha->main_cfg_tbl.pm80xx_tbl.outbound_queue_offset =
+ pm8001_mr32(address, MAIN_OBQ_OFFSET);
+
+ /* read Error Dump Offset and Length */
+ pm8001_ha->main_cfg_tbl.pm80xx_tbl.fatal_err_dump_offset0 =
+ pm8001_mr32(address, MAIN_FATAL_ERROR_RDUMP0_OFFSET);
+ pm8001_ha->main_cfg_tbl.pm80xx_tbl.fatal_err_dump_length0 =
+ pm8001_mr32(address, MAIN_FATAL_ERROR_RDUMP0_LENGTH);
+ pm8001_ha->main_cfg_tbl.pm80xx_tbl.fatal_err_dump_offset1 =
+ pm8001_mr32(address, MAIN_FATAL_ERROR_RDUMP1_OFFSET);
+ pm8001_ha->main_cfg_tbl.pm80xx_tbl.fatal_err_dump_length1 =
+ pm8001_mr32(address, MAIN_FATAL_ERROR_RDUMP1_LENGTH);
+
+ /* read GPIO LED settings from the configuration table */
+ pm8001_ha->main_cfg_tbl.pm80xx_tbl.gpio_led_mapping =
+ pm8001_mr32(address, MAIN_GPIO_LED_FLAGS_OFFSET);
+
+ /* read analog Setting offset from the configuration table */
+ pm8001_ha->main_cfg_tbl.pm80xx_tbl.analog_setup_table_offset =
+ pm8001_mr32(address, MAIN_ANALOG_SETUP_OFFSET);
+
+ pm8001_ha->main_cfg_tbl.pm80xx_tbl.int_vec_table_offset =
+ pm8001_mr32(address, MAIN_INT_VECTOR_TABLE_OFFSET);
+ pm8001_ha->main_cfg_tbl.pm80xx_tbl.phy_attr_table_offset =
+ pm8001_mr32(address, MAIN_SAS_PHY_ATTR_TABLE_OFFSET);
+}
+
+/**
+ * read_general_status_table - read the general status table and save it.
+ * @pm8001_ha: our hba card information
+ */
+static void read_general_status_table(struct pm8001_hba_info *pm8001_ha)
+{
+ void __iomem *address = pm8001_ha->general_stat_tbl_addr;
+ pm8001_ha->gs_tbl.pm80xx_tbl.gst_len_mpistate =
+ pm8001_mr32(address, GST_GSTLEN_MPIS_OFFSET);
+ pm8001_ha->gs_tbl.pm80xx_tbl.iq_freeze_state0 =
+ pm8001_mr32(address, GST_IQ_FREEZE_STATE0_OFFSET);
+ pm8001_ha->gs_tbl.pm80xx_tbl.iq_freeze_state1 =
+ pm8001_mr32(address, GST_IQ_FREEZE_STATE1_OFFSET);
+ pm8001_ha->gs_tbl.pm80xx_tbl.msgu_tcnt =
+ pm8001_mr32(address, GST_MSGUTCNT_OFFSET);
+ pm8001_ha->gs_tbl.pm80xx_tbl.iop_tcnt =
+ pm8001_mr32(address, GST_IOPTCNT_OFFSET);
+ pm8001_ha->gs_tbl.pm80xx_tbl.gpio_input_val =
+ pm8001_mr32(address, GST_GPIO_INPUT_VAL);
+ pm8001_ha->gs_tbl.pm80xx_tbl.recover_err_info[0] =
+ pm8001_mr32(address, GST_RERRINFO_OFFSET0);
+ pm8001_ha->gs_tbl.pm80xx_tbl.recover_err_info[1] =
+ pm8001_mr32(address, GST_RERRINFO_OFFSET1);
+ pm8001_ha->gs_tbl.pm80xx_tbl.recover_err_info[2] =
+ pm8001_mr32(address, GST_RERRINFO_OFFSET2);
+ pm8001_ha->gs_tbl.pm80xx_tbl.recover_err_info[3] =
+ pm8001_mr32(address, GST_RERRINFO_OFFSET3);
+ pm8001_ha->gs_tbl.pm80xx_tbl.recover_err_info[4] =
+ pm8001_mr32(address, GST_RERRINFO_OFFSET4);
+ pm8001_ha->gs_tbl.pm80xx_tbl.recover_err_info[5] =
+ pm8001_mr32(address, GST_RERRINFO_OFFSET5);
+ pm8001_ha->gs_tbl.pm80xx_tbl.recover_err_info[6] =
+ pm8001_mr32(address, GST_RERRINFO_OFFSET6);
+ pm8001_ha->gs_tbl.pm80xx_tbl.recover_err_info[7] =
+ pm8001_mr32(address, GST_RERRINFO_OFFSET7);
+}
+/**
+ * read_phy_attr_table - read the phy attribute table and save it.
+ * @pm8001_ha: our hba card information
+ */
+static void read_phy_attr_table(struct pm8001_hba_info *pm8001_ha)
+{
+ void __iomem *address = pm8001_ha->pspa_q_tbl_addr;
+ pm8001_ha->phy_attr_table.phystart1_16[0] =
+ pm8001_mr32(address, PSPA_PHYSTATE0_OFFSET);
+ pm8001_ha->phy_attr_table.phystart1_16[1] =
+ pm8001_mr32(address, PSPA_PHYSTATE1_OFFSET);
+ pm8001_ha->phy_attr_table.phystart1_16[2] =
+ pm8001_mr32(address, PSPA_PHYSTATE2_OFFSET);
+ pm8001_ha->phy_attr_table.phystart1_16[3] =
+ pm8001_mr32(address, PSPA_PHYSTATE3_OFFSET);
+ pm8001_ha->phy_attr_table.phystart1_16[4] =
+ pm8001_mr32(address, PSPA_PHYSTATE4_OFFSET);
+ pm8001_ha->phy_attr_table.phystart1_16[5] =
+ pm8001_mr32(address, PSPA_PHYSTATE5_OFFSET);
+ pm8001_ha->phy_attr_table.phystart1_16[6] =
+ pm8001_mr32(address, PSPA_PHYSTATE6_OFFSET);
+ pm8001_ha->phy_attr_table.phystart1_16[7] =
+ pm8001_mr32(address, PSPA_PHYSTATE7_OFFSET);
+ pm8001_ha->phy_attr_table.phystart1_16[8] =
+ pm8001_mr32(address, PSPA_PHYSTATE8_OFFSET);
+ pm8001_ha->phy_attr_table.phystart1_16[9] =
+ pm8001_mr32(address, PSPA_PHYSTATE9_OFFSET);
+ pm8001_ha->phy_attr_table.phystart1_16[10] =
+ pm8001_mr32(address, PSPA_PHYSTATE10_OFFSET);
+ pm8001_ha->phy_attr_table.phystart1_16[11] =
+ pm8001_mr32(address, PSPA_PHYSTATE11_OFFSET);
+ pm8001_ha->phy_attr_table.phystart1_16[12] =
+ pm8001_mr32(address, PSPA_PHYSTATE12_OFFSET);
+ pm8001_ha->phy_attr_table.phystart1_16[13] =
+ pm8001_mr32(address, PSPA_PHYSTATE13_OFFSET);
+ pm8001_ha->phy_attr_table.phystart1_16[14] =
+ pm8001_mr32(address, PSPA_PHYSTATE14_OFFSET);
+ pm8001_ha->phy_attr_table.phystart1_16[15] =
+ pm8001_mr32(address, PSPA_PHYSTATE15_OFFSET);
+
+ pm8001_ha->phy_attr_table.outbound_hw_event_pid1_16[0] =
+ pm8001_mr32(address, PSPA_OB_HW_EVENT_PID0_OFFSET);
+ pm8001_ha->phy_attr_table.outbound_hw_event_pid1_16[1] =
+ pm8001_mr32(address, PSPA_OB_HW_EVENT_PID1_OFFSET);
+ pm8001_ha->phy_attr_table.outbound_hw_event_pid1_16[2] =
+ pm8001_mr32(address, PSPA_OB_HW_EVENT_PID2_OFFSET);
+ pm8001_ha->phy_attr_table.outbound_hw_event_pid1_16[3] =
+ pm8001_mr32(address, PSPA_OB_HW_EVENT_PID3_OFFSET);
+ pm8001_ha->phy_attr_table.outbound_hw_event_pid1_16[4] =
+ pm8001_mr32(address, PSPA_OB_HW_EVENT_PID4_OFFSET);
+ pm8001_ha->phy_attr_table.outbound_hw_event_pid1_16[5] =
+ pm8001_mr32(address, PSPA_OB_HW_EVENT_PID5_OFFSET);
+ pm8001_ha->phy_attr_table.outbound_hw_event_pid1_16[6] =
+ pm8001_mr32(address, PSPA_OB_HW_EVENT_PID6_OFFSET);
+ pm8001_ha->phy_attr_table.outbound_hw_event_pid1_16[7] =
+ pm8001_mr32(address, PSPA_OB_HW_EVENT_PID7_OFFSET);
+ pm8001_ha->phy_attr_table.outbound_hw_event_pid1_16[8] =
+ pm8001_mr32(address, PSPA_OB_HW_EVENT_PID8_OFFSET);
+ pm8001_ha->phy_attr_table.outbound_hw_event_pid1_16[9] =
+ pm8001_mr32(address, PSPA_OB_HW_EVENT_PID9_OFFSET);
+ pm8001_ha->phy_attr_table.outbound_hw_event_pid1_16[10] =
+ pm8001_mr32(address, PSPA_OB_HW_EVENT_PID10_OFFSET);
+ pm8001_ha->phy_attr_table.outbound_hw_event_pid1_16[11] =
+ pm8001_mr32(address, PSPA_OB_HW_EVENT_PID11_OFFSET);
+ pm8001_ha->phy_attr_table.outbound_hw_event_pid1_16[12] =
+ pm8001_mr32(address, PSPA_OB_HW_EVENT_PID12_OFFSET);
+ pm8001_ha->phy_attr_table.outbound_hw_event_pid1_16[13] =
+ pm8001_mr32(address, PSPA_OB_HW_EVENT_PID13_OFFSET);
+ pm8001_ha->phy_attr_table.outbound_hw_event_pid1_16[14] =
+ pm8001_mr32(address, PSPA_OB_HW_EVENT_PID14_OFFSET);
+ pm8001_ha->phy_attr_table.outbound_hw_event_pid1_16[15] =
+ pm8001_mr32(address, PSPA_OB_HW_EVENT_PID15_OFFSET);
+
+}
+
+/**
+ * read_inbnd_queue_table - read the inbound queue table and save it.
+ * @pm8001_ha: our hba card information
+ */
+static void read_inbnd_queue_table(struct pm8001_hba_info *pm8001_ha)
+{
+ int i;
+ void __iomem *address = pm8001_ha->inbnd_q_tbl_addr;
+ for (i = 0; i < PM8001_MAX_SPCV_INB_NUM; i++) {
+ u32 offset = i * 0x20;
+ pm8001_ha->inbnd_q_tbl[i].pi_pci_bar =
+ get_pci_bar_index(pm8001_mr32(address,
+ (offset + IB_PIPCI_BAR)));
+ pm8001_ha->inbnd_q_tbl[i].pi_offset =
+ pm8001_mr32(address, (offset + IB_PIPCI_BAR_OFFSET));
+ }
+}
+
+/**
+ * read_outbnd_queue_table - read the outbound queue table and save it.
+ * @pm8001_ha: our hba card information
+ */
+static void read_outbnd_queue_table(struct pm8001_hba_info *pm8001_ha)
+{
+ int i;
+ void __iomem *address = pm8001_ha->outbnd_q_tbl_addr;
+ for (i = 0; i < PM8001_MAX_SPCV_OUTB_NUM; i++) {
+ u32 offset = i * 0x24;
+ pm8001_ha->outbnd_q_tbl[i].ci_pci_bar =
+ get_pci_bar_index(pm8001_mr32(address,
+ (offset + OB_CIPCI_BAR)));
+ pm8001_ha->outbnd_q_tbl[i].ci_offset =
+ pm8001_mr32(address, (offset + OB_CIPCI_BAR_OFFSET));
+ }
+}
+
+/**
+ * init_default_table_values - init the default table.
+ * @pm8001_ha: our hba card information
+ */
+static void init_default_table_values(struct pm8001_hba_info *pm8001_ha)
+{
+ int i;
+ u32 offsetib, offsetob;
+ void __iomem *addressib = pm8001_ha->inbnd_q_tbl_addr;
+ void __iomem *addressob = pm8001_ha->outbnd_q_tbl_addr;
+
+ pm8001_ha->main_cfg_tbl.pm80xx_tbl.upper_event_log_addr =
+ pm8001_ha->memoryMap.region[AAP1].phys_addr_hi;
+ pm8001_ha->main_cfg_tbl.pm80xx_tbl.lower_event_log_addr =
+ pm8001_ha->memoryMap.region[AAP1].phys_addr_lo;
+ pm8001_ha->main_cfg_tbl.pm80xx_tbl.event_log_size =
+ PM8001_EVENT_LOG_SIZE;
+ pm8001_ha->main_cfg_tbl.pm80xx_tbl.event_log_severity = 0x01;
+ pm8001_ha->main_cfg_tbl.pm80xx_tbl.upper_pcs_event_log_addr =
+ pm8001_ha->memoryMap.region[IOP].phys_addr_hi;
+ pm8001_ha->main_cfg_tbl.pm80xx_tbl.lower_pcs_event_log_addr =
+ pm8001_ha->memoryMap.region[IOP].phys_addr_lo;
+ pm8001_ha->main_cfg_tbl.pm80xx_tbl.pcs_event_log_size =
+ PM8001_EVENT_LOG_SIZE;
+ pm8001_ha->main_cfg_tbl.pm80xx_tbl.pcs_event_log_severity = 0x01;
+ pm8001_ha->main_cfg_tbl.pm80xx_tbl.fatal_err_interrupt = 0x01;
+
+ /* Disable end to end CRC checking */
+ pm8001_ha->main_cfg_tbl.pm80xx_tbl.crc_core_dump = (0x1 << 16);
+
+ for (i = 0; i < PM8001_MAX_SPCV_INB_NUM; i++) {
+ pm8001_ha->inbnd_q_tbl[i].element_pri_size_cnt =
+ PM8001_MPI_QUEUE | (pm8001_ha->iomb_size << 16) | (0x00<<30);
+ pm8001_ha->inbnd_q_tbl[i].upper_base_addr =
+ pm8001_ha->memoryMap.region[IB + i].phys_addr_hi;
+ pm8001_ha->inbnd_q_tbl[i].lower_base_addr =
+ pm8001_ha->memoryMap.region[IB + i].phys_addr_lo;
+ pm8001_ha->inbnd_q_tbl[i].base_virt =
+ (u8 *)pm8001_ha->memoryMap.region[IB + i].virt_ptr;
+ pm8001_ha->inbnd_q_tbl[i].total_length =
+ pm8001_ha->memoryMap.region[IB + i].total_len;
+ pm8001_ha->inbnd_q_tbl[i].ci_upper_base_addr =
+ pm8001_ha->memoryMap.region[CI + i].phys_addr_hi;
+ pm8001_ha->inbnd_q_tbl[i].ci_lower_base_addr =
+ pm8001_ha->memoryMap.region[CI + i].phys_addr_lo;
+ pm8001_ha->inbnd_q_tbl[i].ci_virt =
+ pm8001_ha->memoryMap.region[CI + i].virt_ptr;
+ offsetib = i * 0x20;
+ pm8001_ha->inbnd_q_tbl[i].pi_pci_bar =
+ get_pci_bar_index(pm8001_mr32(addressib,
+ (offsetib + 0x14)));
+ pm8001_ha->inbnd_q_tbl[i].pi_offset =
+ pm8001_mr32(addressib, (offsetib + 0x18));
+ pm8001_ha->inbnd_q_tbl[i].producer_idx = 0;
+ pm8001_ha->inbnd_q_tbl[i].consumer_index = 0;
+ }
+ for (i = 0; i < PM8001_MAX_SPCV_OUTB_NUM; i++) {
+ pm8001_ha->outbnd_q_tbl[i].element_size_cnt =
+ PM8001_MPI_QUEUE | (pm8001_ha->iomb_size << 16) | (0x01<<30);
+ pm8001_ha->outbnd_q_tbl[i].upper_base_addr =
+ pm8001_ha->memoryMap.region[OB + i].phys_addr_hi;
+ pm8001_ha->outbnd_q_tbl[i].lower_base_addr =
+ pm8001_ha->memoryMap.region[OB + i].phys_addr_lo;
+ pm8001_ha->outbnd_q_tbl[i].base_virt =
+ (u8 *)pm8001_ha->memoryMap.region[OB + i].virt_ptr;
+ pm8001_ha->outbnd_q_tbl[i].total_length =
+ pm8001_ha->memoryMap.region[OB + i].total_len;
+ pm8001_ha->outbnd_q_tbl[i].pi_upper_base_addr =
+ pm8001_ha->memoryMap.region[PI + i].phys_addr_hi;
+ pm8001_ha->outbnd_q_tbl[i].pi_lower_base_addr =
+ pm8001_ha->memoryMap.region[PI + i].phys_addr_lo;
+ /* interrupt vector based on oq */
+ pm8001_ha->outbnd_q_tbl[i].interrup_vec_cnt_delay = (i << 24);
+ pm8001_ha->outbnd_q_tbl[i].pi_virt =
+ pm8001_ha->memoryMap.region[PI + i].virt_ptr;
+ offsetob = i * 0x24;
+ pm8001_ha->outbnd_q_tbl[i].ci_pci_bar =
+ get_pci_bar_index(pm8001_mr32(addressob,
+ offsetob + 0x14));
+ pm8001_ha->outbnd_q_tbl[i].ci_offset =
+ pm8001_mr32(addressob, (offsetob + 0x18));
+ pm8001_ha->outbnd_q_tbl[i].consumer_idx = 0;
+ pm8001_ha->outbnd_q_tbl[i].producer_index = 0;
+ }
+}
+
+/**
+ * update_main_config_table - update the main default table to the HBA.
+ * @pm8001_ha: our hba card information
+ */
+static void update_main_config_table(struct pm8001_hba_info *pm8001_ha)
+{
+ void __iomem *address = pm8001_ha->main_cfg_tbl_addr;
+ pm8001_mw32(address, MAIN_IQNPPD_HPPD_OFFSET,
+ pm8001_ha->main_cfg_tbl.pm80xx_tbl.inbound_q_nppd_hppd);
+ pm8001_mw32(address, MAIN_EVENT_LOG_ADDR_HI,
+ pm8001_ha->main_cfg_tbl.pm80xx_tbl.upper_event_log_addr);
+ pm8001_mw32(address, MAIN_EVENT_LOG_ADDR_LO,
+ pm8001_ha->main_cfg_tbl.pm80xx_tbl.lower_event_log_addr);
+ pm8001_mw32(address, MAIN_EVENT_LOG_BUFF_SIZE,
+ pm8001_ha->main_cfg_tbl.pm80xx_tbl.event_log_size);
+ pm8001_mw32(address, MAIN_EVENT_LOG_OPTION,
+ pm8001_ha->main_cfg_tbl.pm80xx_tbl.event_log_severity);
+ pm8001_mw32(address, MAIN_PCS_EVENT_LOG_ADDR_HI,
+ pm8001_ha->main_cfg_tbl.pm80xx_tbl.upper_pcs_event_log_addr);
+ pm8001_mw32(address, MAIN_PCS_EVENT_LOG_ADDR_LO,
+ pm8001_ha->main_cfg_tbl.pm80xx_tbl.lower_pcs_event_log_addr);
+ pm8001_mw32(address, MAIN_PCS_EVENT_LOG_BUFF_SIZE,
+ pm8001_ha->main_cfg_tbl.pm80xx_tbl.pcs_event_log_size);
+ pm8001_mw32(address, MAIN_PCS_EVENT_LOG_OPTION,
+ pm8001_ha->main_cfg_tbl.pm80xx_tbl.pcs_event_log_severity);
+ pm8001_mw32(address, MAIN_FATAL_ERROR_INTERRUPT,
+ pm8001_ha->main_cfg_tbl.pm80xx_tbl.fatal_err_interrupt);
+ pm8001_mw32(address, MAIN_EVENT_CRC_CHECK,
+ pm8001_ha->main_cfg_tbl.pm80xx_tbl.crc_core_dump);
+
+ /* SPCv specific */
+ pm8001_ha->main_cfg_tbl.pm80xx_tbl.gpio_led_mapping &= 0xCFFFFFFF;
+ /* Set GPIOLED to 0x2 for LED indicator */
+ pm8001_ha->main_cfg_tbl.pm80xx_tbl.gpio_led_mapping |= 0x20000000;
+ pm8001_mw32(address, MAIN_GPIO_LED_FLAGS_OFFSET,
+ pm8001_ha->main_cfg_tbl.pm80xx_tbl.gpio_led_mapping);
+
+ pm8001_mw32(address, MAIN_PORT_RECOVERY_TIMER,
+ pm8001_ha->main_cfg_tbl.pm80xx_tbl.port_recovery_timer);
+ pm8001_mw32(address, MAIN_INT_REASSERTION_DELAY,
+ pm8001_ha->main_cfg_tbl.pm80xx_tbl.interrupt_reassertion_delay);
+}
+
+/**
+ * update_inbnd_queue_table - update the inbound queue table to the HBA.
+ * @pm8001_ha: our hba card information
+ */
+static void update_inbnd_queue_table(struct pm8001_hba_info *pm8001_ha,
+ int number)
+{
+ void __iomem *address = pm8001_ha->inbnd_q_tbl_addr;
+ u16 offset = number * 0x20;
+ pm8001_mw32(address, offset + IB_PROPERITY_OFFSET,
+ pm8001_ha->inbnd_q_tbl[number].element_pri_size_cnt);
+ pm8001_mw32(address, offset + IB_BASE_ADDR_HI_OFFSET,
+ pm8001_ha->inbnd_q_tbl[number].upper_base_addr);
+ pm8001_mw32(address, offset + IB_BASE_ADDR_LO_OFFSET,
+ pm8001_ha->inbnd_q_tbl[number].lower_base_addr);
+ pm8001_mw32(address, offset + IB_CI_BASE_ADDR_HI_OFFSET,
+ pm8001_ha->inbnd_q_tbl[number].ci_upper_base_addr);
+ pm8001_mw32(address, offset + IB_CI_BASE_ADDR_LO_OFFSET,
+ pm8001_ha->inbnd_q_tbl[number].ci_lower_base_addr);
+}
+
+/**
+ * update_outbnd_queue_table - update the outbound queue table to the HBA.
+ * @pm8001_ha: our hba card information
+ */
+static void update_outbnd_queue_table(struct pm8001_hba_info *pm8001_ha,
+ int number)
+{
+ void __iomem *address = pm8001_ha->outbnd_q_tbl_addr;
+ u16 offset = number * 0x24;
+ pm8001_mw32(address, offset + OB_PROPERITY_OFFSET,
+ pm8001_ha->outbnd_q_tbl[number].element_size_cnt);
+ pm8001_mw32(address, offset + OB_BASE_ADDR_HI_OFFSET,
+ pm8001_ha->outbnd_q_tbl[number].upper_base_addr);
+ pm8001_mw32(address, offset + OB_BASE_ADDR_LO_OFFSET,
+ pm8001_ha->outbnd_q_tbl[number].lower_base_addr);
+ pm8001_mw32(address, offset + OB_PI_BASE_ADDR_HI_OFFSET,
+ pm8001_ha->outbnd_q_tbl[number].pi_upper_base_addr);
+ pm8001_mw32(address, offset + OB_PI_BASE_ADDR_LO_OFFSET,
+ pm8001_ha->outbnd_q_tbl[number].pi_lower_base_addr);
+ pm8001_mw32(address, offset + OB_INTERRUPT_COALES_OFFSET,
+ pm8001_ha->outbnd_q_tbl[number].interrup_vec_cnt_delay);
+}
+
+/**
+ * mpi_init_check - check firmware initialization status.
+ * @pm8001_ha: our hba card information
+ */
+static int mpi_init_check(struct pm8001_hba_info *pm8001_ha)
+{
+ u32 max_wait_count;
+ u32 value;
+ u32 gst_len_mpistate;
+
+ /* Write bit0=1 to Inbound DoorBell Register to tell the SPC FW the
+ table is updated */
+ pm8001_cw32(pm8001_ha, 0, MSGU_IBDB_SET, SPCv_MSGU_CFG_TABLE_UPDATE);
+ /* wait until Inbound DoorBell Clear Register toggled */
+ if (IS_SPCV_12G(pm8001_ha->pdev)) {
+ max_wait_count = 4 * 1000 * 1000;/* 4 sec */
+ } else {
+ max_wait_count = 2 * 1000 * 1000;/* 2 sec */
+ }
+ do {
+ udelay(1);
+ value = pm8001_cr32(pm8001_ha, 0, MSGU_IBDB_SET);
+ value &= SPCv_MSGU_CFG_TABLE_UPDATE;
+ } while ((value != 0) && (--max_wait_count));
+
+ if (!max_wait_count)
+ return -1;
+ /* check the MPI-State for initialization upto 100ms*/
+ max_wait_count = 100 * 1000;/* 100 msec */
+ do {
+ udelay(1);
+ gst_len_mpistate =
+ pm8001_mr32(pm8001_ha->general_stat_tbl_addr,
+ GST_GSTLEN_MPIS_OFFSET);
+ } while ((GST_MPI_STATE_INIT !=
+ (gst_len_mpistate & GST_MPI_STATE_MASK)) && (--max_wait_count));
+ if (!max_wait_count)
+ return -1;
+
+ /* check MPI Initialization error */
+ gst_len_mpistate = gst_len_mpistate >> 16;
+ if (0x0000 != gst_len_mpistate)
+ return -1;
+
+ return 0;
+}
+
+/**
+ * check_fw_ready - The LLDD check if the FW is ready, if not, return error.
+ * @pm8001_ha: our hba card information
+ */
+static int check_fw_ready(struct pm8001_hba_info *pm8001_ha)
+{
+ u32 value;
+ u32 max_wait_count;
+ u32 max_wait_time;
+ int ret = 0;
+
+ /* reset / PCIe ready */
+ max_wait_time = max_wait_count = 100 * 1000; /* 100 milli sec */
+ do {
+ udelay(1);
+ value = pm8001_cr32(pm8001_ha, 0, MSGU_SCRATCH_PAD_1);
+ } while ((value == 0xFFFFFFFF) && (--max_wait_count));
+
+ /* check ila status */
+ max_wait_time = max_wait_count = 1000 * 1000; /* 1000 milli sec */
+ do {
+ udelay(1);
+ value = pm8001_cr32(pm8001_ha, 0, MSGU_SCRATCH_PAD_1);
+ } while (((value & SCRATCH_PAD_ILA_READY) !=
+ SCRATCH_PAD_ILA_READY) && (--max_wait_count));
+ if (!max_wait_count)
+ ret = -1;
+ else {
+ PM8001_MSG_DBG(pm8001_ha,
+ pm8001_printk(" ila ready status in %d millisec\n",
+ (max_wait_time - max_wait_count)));
+ }
+
+ /* check RAAE status */
+ max_wait_time = max_wait_count = 1800 * 1000; /* 1800 milli sec */
+ do {
+ udelay(1);
+ value = pm8001_cr32(pm8001_ha, 0, MSGU_SCRATCH_PAD_1);
+ } while (((value & SCRATCH_PAD_RAAE_READY) !=
+ SCRATCH_PAD_RAAE_READY) && (--max_wait_count));
+ if (!max_wait_count)
+ ret = -1;
+ else {
+ PM8001_MSG_DBG(pm8001_ha,
+ pm8001_printk(" raae ready status in %d millisec\n",
+ (max_wait_time - max_wait_count)));
+ }
+
+ /* check iop0 status */
+ max_wait_time = max_wait_count = 600 * 1000; /* 600 milli sec */
+ do {
+ udelay(1);
+ value = pm8001_cr32(pm8001_ha, 0, MSGU_SCRATCH_PAD_1);
+ } while (((value & SCRATCH_PAD_IOP0_READY) != SCRATCH_PAD_IOP0_READY) &&
+ (--max_wait_count));
+ if (!max_wait_count)
+ ret = -1;
+ else {
+ PM8001_MSG_DBG(pm8001_ha,
+ pm8001_printk(" iop0 ready status in %d millisec\n",
+ (max_wait_time - max_wait_count)));
+ }
+
+ /* check iop1 status only for 16 port controllers */
+ if ((pm8001_ha->chip_id != chip_8008) &&
+ (pm8001_ha->chip_id != chip_8009)) {
+ /* 200 milli sec */
+ max_wait_time = max_wait_count = 200 * 1000;
+ do {
+ udelay(1);
+ value = pm8001_cr32(pm8001_ha, 0, MSGU_SCRATCH_PAD_1);
+ } while (((value & SCRATCH_PAD_IOP1_READY) !=
+ SCRATCH_PAD_IOP1_READY) && (--max_wait_count));
+ if (!max_wait_count)
+ ret = -1;
+ else {
+ PM8001_MSG_DBG(pm8001_ha, pm8001_printk(
+ "iop1 ready status in %d millisec\n",
+ (max_wait_time - max_wait_count)));
+ }
+ }
+
+ return ret;
+}
+
+static void init_pci_device_addresses(struct pm8001_hba_info *pm8001_ha)
+{
+ void __iomem *base_addr;
+ u32 value;
+ u32 offset;
+ u32 pcibar;
+ u32 pcilogic;
+
+ value = pm8001_cr32(pm8001_ha, 0, MSGU_SCRATCH_PAD_0);
+ offset = value & 0x03FFFFFF; /* scratch pad 0 TBL address */
+
+ PM8001_INIT_DBG(pm8001_ha,
+ pm8001_printk("Scratchpad 0 Offset: 0x%x value 0x%x\n",
+ offset, value));
+ pcilogic = (value & 0xFC000000) >> 26;
+ pcibar = get_pci_bar_index(pcilogic);
+ PM8001_INIT_DBG(pm8001_ha,
+ pm8001_printk("Scratchpad 0 PCI BAR: %d\n", pcibar));
+ pm8001_ha->main_cfg_tbl_addr = base_addr =
+ pm8001_ha->io_mem[pcibar].memvirtaddr + offset;
+ pm8001_ha->general_stat_tbl_addr =
+ base_addr + (pm8001_cr32(pm8001_ha, pcibar, offset + 0x18) &
+ 0xFFFFFF);
+ pm8001_ha->inbnd_q_tbl_addr =
+ base_addr + (pm8001_cr32(pm8001_ha, pcibar, offset + 0x1C) &
+ 0xFFFFFF);
+ pm8001_ha->outbnd_q_tbl_addr =
+ base_addr + (pm8001_cr32(pm8001_ha, pcibar, offset + 0x20) &
+ 0xFFFFFF);
+ pm8001_ha->ivt_tbl_addr =
+ base_addr + (pm8001_cr32(pm8001_ha, pcibar, offset + 0x8C) &
+ 0xFFFFFF);
+ pm8001_ha->pspa_q_tbl_addr =
+ base_addr + (pm8001_cr32(pm8001_ha, pcibar, offset + 0x90) &
+ 0xFFFFFF);
+ pm8001_ha->fatal_tbl_addr =
+ base_addr + (pm8001_cr32(pm8001_ha, pcibar, offset + 0xA0) &
+ 0xFFFFFF);
+
+ PM8001_INIT_DBG(pm8001_ha,
+ pm8001_printk("GST OFFSET 0x%x\n",
+ pm8001_cr32(pm8001_ha, pcibar, offset + 0x18)));
+ PM8001_INIT_DBG(pm8001_ha,
+ pm8001_printk("INBND OFFSET 0x%x\n",
+ pm8001_cr32(pm8001_ha, pcibar, offset + 0x1C)));
+ PM8001_INIT_DBG(pm8001_ha,
+ pm8001_printk("OBND OFFSET 0x%x\n",
+ pm8001_cr32(pm8001_ha, pcibar, offset + 0x20)));
+ PM8001_INIT_DBG(pm8001_ha,
+ pm8001_printk("IVT OFFSET 0x%x\n",
+ pm8001_cr32(pm8001_ha, pcibar, offset + 0x8C)));
+ PM8001_INIT_DBG(pm8001_ha,
+ pm8001_printk("PSPA OFFSET 0x%x\n",
+ pm8001_cr32(pm8001_ha, pcibar, offset + 0x90)));
+ PM8001_INIT_DBG(pm8001_ha,
+ pm8001_printk("addr - main cfg %p general status %p\n",
+ pm8001_ha->main_cfg_tbl_addr,
+ pm8001_ha->general_stat_tbl_addr));
+ PM8001_INIT_DBG(pm8001_ha,
+ pm8001_printk("addr - inbnd %p obnd %p\n",
+ pm8001_ha->inbnd_q_tbl_addr,
+ pm8001_ha->outbnd_q_tbl_addr));
+ PM8001_INIT_DBG(pm8001_ha,
+ pm8001_printk("addr - pspa %p ivt %p\n",
+ pm8001_ha->pspa_q_tbl_addr,
+ pm8001_ha->ivt_tbl_addr));
+}
+
+/**
+ * pm80xx_set_thermal_config - support the thermal configuration
+ * @pm8001_ha: our hba card information.
+ */
+int
+pm80xx_set_thermal_config(struct pm8001_hba_info *pm8001_ha)
+{
+ struct set_ctrl_cfg_req payload;
+ struct inbound_queue_table *circularQ;
+ int rc;
+ u32 tag;
+ u32 opc = OPC_INB_SET_CONTROLLER_CONFIG;
+
+ memset(&payload, 0, sizeof(struct set_ctrl_cfg_req));
+ rc = pm8001_tag_alloc(pm8001_ha, &tag);
+ if (rc)
+ return -1;
+
+ circularQ = &pm8001_ha->inbnd_q_tbl[0];
+ payload.tag = cpu_to_le32(tag);
+ payload.cfg_pg[0] = (THERMAL_LOG_ENABLE << 9) |
+ (THERMAL_ENABLE << 8) | THERMAL_OP_CODE;
+ payload.cfg_pg[1] = (LTEMPHIL << 24) | (RTEMPHIL << 8);
+
+ rc = pm8001_mpi_build_cmd(pm8001_ha, circularQ, opc, &payload, 0);
+ if (rc)
+ pm8001_tag_free(pm8001_ha, tag);
+ return rc;
+
+}
+
+/**
+* pm80xx_set_sas_protocol_timer_config - support the SAS Protocol
+* Timer configuration page
+* @pm8001_ha: our hba card information.
+*/
+static int
+pm80xx_set_sas_protocol_timer_config(struct pm8001_hba_info *pm8001_ha)
+{
+ struct set_ctrl_cfg_req payload;
+ struct inbound_queue_table *circularQ;
+ SASProtocolTimerConfig_t SASConfigPage;
+ int rc;
+ u32 tag;
+ u32 opc = OPC_INB_SET_CONTROLLER_CONFIG;
+
+ memset(&payload, 0, sizeof(struct set_ctrl_cfg_req));
+ memset(&SASConfigPage, 0, sizeof(SASProtocolTimerConfig_t));
+
+ rc = pm8001_tag_alloc(pm8001_ha, &tag);
+
+ if (rc)
+ return -1;
+
+ circularQ = &pm8001_ha->inbnd_q_tbl[0];
+ payload.tag = cpu_to_le32(tag);
+
+ SASConfigPage.pageCode = SAS_PROTOCOL_TIMER_CONFIG_PAGE;
+ SASConfigPage.MST_MSI = 3 << 15;
+ SASConfigPage.STP_SSP_MCT_TMO = (STP_MCT_TMO << 16) | SSP_MCT_TMO;
+ SASConfigPage.STP_FRM_TMO = (SAS_MAX_OPEN_TIME << 24) |
+ (SMP_MAX_CONN_TIMER << 16) | STP_FRM_TIMER;
+ SASConfigPage.STP_IDLE_TMO = STP_IDLE_TIME;
+
+ if (SASConfigPage.STP_IDLE_TMO > 0x3FFFFFF)
+ SASConfigPage.STP_IDLE_TMO = 0x3FFFFFF;
+
+
+ SASConfigPage.OPNRJT_RTRY_INTVL = (SAS_MFD << 16) |
+ SAS_OPNRJT_RTRY_INTVL;
+ SASConfigPage.Data_Cmd_OPNRJT_RTRY_TMO = (SAS_DOPNRJT_RTRY_TMO << 16)
+ | SAS_COPNRJT_RTRY_TMO;
+ SASConfigPage.Data_Cmd_OPNRJT_RTRY_THR = (SAS_DOPNRJT_RTRY_THR << 16)
+ | SAS_COPNRJT_RTRY_THR;
+ SASConfigPage.MAX_AIP = SAS_MAX_AIP;
+
+ PM8001_INIT_DBG(pm8001_ha,
+ pm8001_printk("SASConfigPage.pageCode "
+ "0x%08x\n", SASConfigPage.pageCode));
+ PM8001_INIT_DBG(pm8001_ha,
+ pm8001_printk("SASConfigPage.MST_MSI "
+ " 0x%08x\n", SASConfigPage.MST_MSI));
+ PM8001_INIT_DBG(pm8001_ha,
+ pm8001_printk("SASConfigPage.STP_SSP_MCT_TMO "
+ " 0x%08x\n", SASConfigPage.STP_SSP_MCT_TMO));
+ PM8001_INIT_DBG(pm8001_ha,
+ pm8001_printk("SASConfigPage.STP_FRM_TMO "
+ " 0x%08x\n", SASConfigPage.STP_FRM_TMO));
+ PM8001_INIT_DBG(pm8001_ha,
+ pm8001_printk("SASConfigPage.STP_IDLE_TMO "
+ " 0x%08x\n", SASConfigPage.STP_IDLE_TMO));
+ PM8001_INIT_DBG(pm8001_ha,
+ pm8001_printk("SASConfigPage.OPNRJT_RTRY_INTVL "
+ " 0x%08x\n", SASConfigPage.OPNRJT_RTRY_INTVL));
+ PM8001_INIT_DBG(pm8001_ha,
+ pm8001_printk("SASConfigPage.Data_Cmd_OPNRJT_RTRY_TMO "
+ " 0x%08x\n", SASConfigPage.Data_Cmd_OPNRJT_RTRY_TMO));
+ PM8001_INIT_DBG(pm8001_ha,
+ pm8001_printk("SASConfigPage.Data_Cmd_OPNRJT_RTRY_THR "
+ " 0x%08x\n", SASConfigPage.Data_Cmd_OPNRJT_RTRY_THR));
+ PM8001_INIT_DBG(pm8001_ha, pm8001_printk("SASConfigPage.MAX_AIP "
+ " 0x%08x\n", SASConfigPage.MAX_AIP));
+
+ memcpy(&payload.cfg_pg, &SASConfigPage,
+ sizeof(SASProtocolTimerConfig_t));
+
+ rc = pm8001_mpi_build_cmd(pm8001_ha, circularQ, opc, &payload, 0);
+ if (rc)
+ pm8001_tag_free(pm8001_ha, tag);
+
+ return rc;
+}
+
+/**
+ * pm80xx_get_encrypt_info - Check for encryption
+ * @pm8001_ha: our hba card information.
+ */
+static int
+pm80xx_get_encrypt_info(struct pm8001_hba_info *pm8001_ha)
+{
+ u32 scratch3_value;
+ int ret = -1;
+
+ /* Read encryption status from SCRATCH PAD 3 */
+ scratch3_value = pm8001_cr32(pm8001_ha, 0, MSGU_SCRATCH_PAD_3);
+
+ if ((scratch3_value & SCRATCH_PAD3_ENC_MASK) ==
+ SCRATCH_PAD3_ENC_READY) {
+ if (scratch3_value & SCRATCH_PAD3_XTS_ENABLED)
+ pm8001_ha->encrypt_info.cipher_mode = CIPHER_MODE_XTS;
+ if ((scratch3_value & SCRATCH_PAD3_SM_MASK) ==
+ SCRATCH_PAD3_SMF_ENABLED)
+ pm8001_ha->encrypt_info.sec_mode = SEC_MODE_SMF;
+ if ((scratch3_value & SCRATCH_PAD3_SM_MASK) ==
+ SCRATCH_PAD3_SMA_ENABLED)
+ pm8001_ha->encrypt_info.sec_mode = SEC_MODE_SMA;
+ if ((scratch3_value & SCRATCH_PAD3_SM_MASK) ==
+ SCRATCH_PAD3_SMB_ENABLED)
+ pm8001_ha->encrypt_info.sec_mode = SEC_MODE_SMB;
+ pm8001_ha->encrypt_info.status = 0;
+ PM8001_INIT_DBG(pm8001_ha, pm8001_printk(
+ "Encryption: SCRATCH_PAD3_ENC_READY 0x%08X."
+ "Cipher mode 0x%x Sec mode 0x%x status 0x%x\n",
+ scratch3_value, pm8001_ha->encrypt_info.cipher_mode,
+ pm8001_ha->encrypt_info.sec_mode,
+ pm8001_ha->encrypt_info.status));
+ ret = 0;
+ } else if ((scratch3_value & SCRATCH_PAD3_ENC_READY) ==
+ SCRATCH_PAD3_ENC_DISABLED) {
+ PM8001_INIT_DBG(pm8001_ha, pm8001_printk(
+ "Encryption: SCRATCH_PAD3_ENC_DISABLED 0x%08X\n",
+ scratch3_value));
+ pm8001_ha->encrypt_info.status = 0xFFFFFFFF;
+ pm8001_ha->encrypt_info.cipher_mode = 0;
+ pm8001_ha->encrypt_info.sec_mode = 0;
+ ret = 0;
+ } else if ((scratch3_value & SCRATCH_PAD3_ENC_MASK) ==
+ SCRATCH_PAD3_ENC_DIS_ERR) {
+ pm8001_ha->encrypt_info.status =
+ (scratch3_value & SCRATCH_PAD3_ERR_CODE) >> 16;
+ if (scratch3_value & SCRATCH_PAD3_XTS_ENABLED)
+ pm8001_ha->encrypt_info.cipher_mode = CIPHER_MODE_XTS;
+ if ((scratch3_value & SCRATCH_PAD3_SM_MASK) ==
+ SCRATCH_PAD3_SMF_ENABLED)
+ pm8001_ha->encrypt_info.sec_mode = SEC_MODE_SMF;
+ if ((scratch3_value & SCRATCH_PAD3_SM_MASK) ==
+ SCRATCH_PAD3_SMA_ENABLED)
+ pm8001_ha->encrypt_info.sec_mode = SEC_MODE_SMA;
+ if ((scratch3_value & SCRATCH_PAD3_SM_MASK) ==
+ SCRATCH_PAD3_SMB_ENABLED)
+ pm8001_ha->encrypt_info.sec_mode = SEC_MODE_SMB;
+ PM8001_INIT_DBG(pm8001_ha, pm8001_printk(
+ "Encryption: SCRATCH_PAD3_DIS_ERR 0x%08X."
+ "Cipher mode 0x%x sec mode 0x%x status 0x%x\n",
+ scratch3_value, pm8001_ha->encrypt_info.cipher_mode,
+ pm8001_ha->encrypt_info.sec_mode,
+ pm8001_ha->encrypt_info.status));
+ } else if ((scratch3_value & SCRATCH_PAD3_ENC_MASK) ==
+ SCRATCH_PAD3_ENC_ENA_ERR) {
+
+ pm8001_ha->encrypt_info.status =
+ (scratch3_value & SCRATCH_PAD3_ERR_CODE) >> 16;
+ if (scratch3_value & SCRATCH_PAD3_XTS_ENABLED)
+ pm8001_ha->encrypt_info.cipher_mode = CIPHER_MODE_XTS;
+ if ((scratch3_value & SCRATCH_PAD3_SM_MASK) ==
+ SCRATCH_PAD3_SMF_ENABLED)
+ pm8001_ha->encrypt_info.sec_mode = SEC_MODE_SMF;
+ if ((scratch3_value & SCRATCH_PAD3_SM_MASK) ==
+ SCRATCH_PAD3_SMA_ENABLED)
+ pm8001_ha->encrypt_info.sec_mode = SEC_MODE_SMA;
+ if ((scratch3_value & SCRATCH_PAD3_SM_MASK) ==
+ SCRATCH_PAD3_SMB_ENABLED)
+ pm8001_ha->encrypt_info.sec_mode = SEC_MODE_SMB;
+
+ PM8001_INIT_DBG(pm8001_ha, pm8001_printk(
+ "Encryption: SCRATCH_PAD3_ENA_ERR 0x%08X."
+ "Cipher mode 0x%x sec mode 0x%x status 0x%x\n",
+ scratch3_value, pm8001_ha->encrypt_info.cipher_mode,
+ pm8001_ha->encrypt_info.sec_mode,
+ pm8001_ha->encrypt_info.status));
+ }
+ return ret;
+}
+
+/**
+ * pm80xx_encrypt_update - update flash with encryption informtion
+ * @pm8001_ha: our hba card information.
+ */
+static int pm80xx_encrypt_update(struct pm8001_hba_info *pm8001_ha)
+{
+ struct kek_mgmt_req payload;
+ struct inbound_queue_table *circularQ;
+ int rc;
+ u32 tag;
+ u32 opc = OPC_INB_KEK_MANAGEMENT;
+
+ memset(&payload, 0, sizeof(struct kek_mgmt_req));
+ rc = pm8001_tag_alloc(pm8001_ha, &tag);
+ if (rc)
+ return -1;
+
+ circularQ = &pm8001_ha->inbnd_q_tbl[0];
+ payload.tag = cpu_to_le32(tag);
+ /* Currently only one key is used. New KEK index is 1.
+ * Current KEK index is 1. Store KEK to NVRAM is 1.
+ */
+ payload.new_curidx_ksop = ((1 << 24) | (1 << 16) | (1 << 8) |
+ KEK_MGMT_SUBOP_KEYCARDUPDATE);
+
+ rc = pm8001_mpi_build_cmd(pm8001_ha, circularQ, opc, &payload, 0);
+ if (rc)
+ pm8001_tag_free(pm8001_ha, tag);
+
+ return rc;
+}
+
+/**
+ * pm8001_chip_init - the main init function that initialize whole PM8001 chip.
+ * @pm8001_ha: our hba card information
+ */
+static int pm80xx_chip_init(struct pm8001_hba_info *pm8001_ha)
+{
+ int ret;
+ u8 i = 0;
+
+ /* check the firmware status */
+ if (-1 == check_fw_ready(pm8001_ha)) {
+ PM8001_FAIL_DBG(pm8001_ha,
+ pm8001_printk("Firmware is not ready!\n"));
+ return -EBUSY;
+ }
+
+ /* Initialize pci space address eg: mpi offset */
+ init_pci_device_addresses(pm8001_ha);
+ init_default_table_values(pm8001_ha);
+ read_main_config_table(pm8001_ha);
+ read_general_status_table(pm8001_ha);
+ read_inbnd_queue_table(pm8001_ha);
+ read_outbnd_queue_table(pm8001_ha);
+ read_phy_attr_table(pm8001_ha);
+
+ /* update main config table ,inbound table and outbound table */
+ update_main_config_table(pm8001_ha);
+ for (i = 0; i < PM8001_MAX_SPCV_INB_NUM; i++)
+ update_inbnd_queue_table(pm8001_ha, i);
+ for (i = 0; i < PM8001_MAX_SPCV_OUTB_NUM; i++)
+ update_outbnd_queue_table(pm8001_ha, i);
+
+ /* notify firmware update finished and check initialization status */
+ if (0 == mpi_init_check(pm8001_ha)) {
+ PM8001_INIT_DBG(pm8001_ha,
+ pm8001_printk("MPI initialize successful!\n"));
+ } else
+ return -EBUSY;
+
+ /* send SAS protocol timer configuration page to FW */
+ ret = pm80xx_set_sas_protocol_timer_config(pm8001_ha);
+
+ /* Check for encryption */
+ if (pm8001_ha->chip->encrypt) {
+ PM8001_INIT_DBG(pm8001_ha,
+ pm8001_printk("Checking for encryption\n"));
+ ret = pm80xx_get_encrypt_info(pm8001_ha);
+ if (ret == -1) {
+ PM8001_INIT_DBG(pm8001_ha,
+ pm8001_printk("Encryption error !!\n"));
+ if (pm8001_ha->encrypt_info.status == 0x81) {
+ PM8001_INIT_DBG(pm8001_ha, pm8001_printk(
+ "Encryption enabled with error."
+ "Saving encryption key to flash\n"));
+ pm80xx_encrypt_update(pm8001_ha);
+ }
+ }
+ }
+ return 0;
+}
+
+static int mpi_uninit_check(struct pm8001_hba_info *pm8001_ha)
+{
+ u32 max_wait_count;
+ u32 value;
+ u32 gst_len_mpistate;
+ init_pci_device_addresses(pm8001_ha);
+ /* Write bit1=1 to Inbound DoorBell Register to tell the SPC FW the
+ table is stop */
+ pm8001_cw32(pm8001_ha, 0, MSGU_IBDB_SET, SPCv_MSGU_CFG_TABLE_RESET);
+
+ /* wait until Inbound DoorBell Clear Register toggled */
+ if (IS_SPCV_12G(pm8001_ha->pdev)) {
+ max_wait_count = 4 * 1000 * 1000;/* 4 sec */
+ } else {
+ max_wait_count = 2 * 1000 * 1000;/* 2 sec */
+ }
+ do {
+ udelay(1);
+ value = pm8001_cr32(pm8001_ha, 0, MSGU_IBDB_SET);
+ value &= SPCv_MSGU_CFG_TABLE_RESET;
+ } while ((value != 0) && (--max_wait_count));
+
+ if (!max_wait_count) {
+ PM8001_FAIL_DBG(pm8001_ha,
+ pm8001_printk("TIMEOUT:IBDB value/=%x\n", value));
+ return -1;
+ }
+
+ /* check the MPI-State for termination in progress */
+ /* wait until Inbound DoorBell Clear Register toggled */
+ max_wait_count = 2 * 1000 * 1000; /* 2 sec for spcv/ve */
+ do {
+ udelay(1);
+ gst_len_mpistate =
+ pm8001_mr32(pm8001_ha->general_stat_tbl_addr,
+ GST_GSTLEN_MPIS_OFFSET);
+ if (GST_MPI_STATE_UNINIT ==
+ (gst_len_mpistate & GST_MPI_STATE_MASK))
+ break;
+ } while (--max_wait_count);
+ if (!max_wait_count) {
+ PM8001_FAIL_DBG(pm8001_ha,
+ pm8001_printk(" TIME OUT MPI State = 0x%x\n",
+ gst_len_mpistate & GST_MPI_STATE_MASK));
+ return -1;
+ }
+
+ return 0;
+}
+
+/**
+ * pm8001_chip_soft_rst - soft reset the PM8001 chip, so that the clear all
+ * the FW register status to the originated status.
+ * @pm8001_ha: our hba card information
+ */
+
+static int
+pm80xx_chip_soft_rst(struct pm8001_hba_info *pm8001_ha)
+{
+ u32 regval;
+ u32 bootloader_state;
+ u32 ibutton0, ibutton1;
+
+ /* Check if MPI is in ready state to reset */
+ if (mpi_uninit_check(pm8001_ha) != 0) {
+ PM8001_FAIL_DBG(pm8001_ha,
+ pm8001_printk("MPI state is not ready\n"));
+ return -1;
+ }
+
+ /* checked for reset register normal state; 0x0 */
+ regval = pm8001_cr32(pm8001_ha, 0, SPC_REG_SOFT_RESET);
+ PM8001_INIT_DBG(pm8001_ha,
+ pm8001_printk("reset register before write : 0x%x\n", regval));
+
+ pm8001_cw32(pm8001_ha, 0, SPC_REG_SOFT_RESET, SPCv_NORMAL_RESET_VALUE);
+ mdelay(500);
+
+ regval = pm8001_cr32(pm8001_ha, 0, SPC_REG_SOFT_RESET);
+ PM8001_INIT_DBG(pm8001_ha,
+ pm8001_printk("reset register after write 0x%x\n", regval));
+
+ if ((regval & SPCv_SOFT_RESET_READ_MASK) ==
+ SPCv_SOFT_RESET_NORMAL_RESET_OCCURED) {
+ PM8001_MSG_DBG(pm8001_ha,
+ pm8001_printk(" soft reset successful [regval: 0x%x]\n",
+ regval));
+ } else {
+ PM8001_MSG_DBG(pm8001_ha,
+ pm8001_printk(" soft reset failed [regval: 0x%x]\n",
+ regval));
+
+ /* check bootloader is successfully executed or in HDA mode */
+ bootloader_state =
+ pm8001_cr32(pm8001_ha, 0, MSGU_SCRATCH_PAD_1) &
+ SCRATCH_PAD1_BOOTSTATE_MASK;
+
+ if (bootloader_state == SCRATCH_PAD1_BOOTSTATE_HDA_SEEPROM) {
+ PM8001_MSG_DBG(pm8001_ha, pm8001_printk(
+ "Bootloader state - HDA mode SEEPROM\n"));
+ } else if (bootloader_state ==
+ SCRATCH_PAD1_BOOTSTATE_HDA_BOOTSTRAP) {
+ PM8001_MSG_DBG(pm8001_ha, pm8001_printk(
+ "Bootloader state - HDA mode Bootstrap Pin\n"));
+ } else if (bootloader_state ==
+ SCRATCH_PAD1_BOOTSTATE_HDA_SOFTRESET) {
+ PM8001_MSG_DBG(pm8001_ha, pm8001_printk(
+ "Bootloader state - HDA mode soft reset\n"));
+ } else if (bootloader_state ==
+ SCRATCH_PAD1_BOOTSTATE_CRIT_ERROR) {
+ PM8001_MSG_DBG(pm8001_ha, pm8001_printk(
+ "Bootloader state-HDA mode critical error\n"));
+ }
+ return -EBUSY;
+ }
+
+ /* check the firmware status after reset */
+ if (-1 == check_fw_ready(pm8001_ha)) {
+ PM8001_FAIL_DBG(pm8001_ha,
+ pm8001_printk("Firmware is not ready!\n"));
+ /* check iButton feature support for motherboard controller */
+ if (pm8001_ha->pdev->subsystem_vendor !=
+ PCI_VENDOR_ID_ADAPTEC2 &&
+ pm8001_ha->pdev->subsystem_vendor != 0) {
+ ibutton0 = pm8001_cr32(pm8001_ha, 0,
+ MSGU_HOST_SCRATCH_PAD_6);
+ ibutton1 = pm8001_cr32(pm8001_ha, 0,
+ MSGU_HOST_SCRATCH_PAD_7);
+ if (!ibutton0 && !ibutton1) {
+ PM8001_FAIL_DBG(pm8001_ha,
+ pm8001_printk("iButton Feature is"
+ " not Available!!!\n"));
+ return -EBUSY;
+ }
+ if (ibutton0 == 0xdeadbeef && ibutton1 == 0xdeadbeef) {
+ PM8001_FAIL_DBG(pm8001_ha,
+ pm8001_printk("CRC Check for iButton"
+ " Feature Failed!!!\n"));
+ return -EBUSY;
+ }
+ }
+ }
+ PM8001_INIT_DBG(pm8001_ha,
+ pm8001_printk("SPCv soft reset Complete\n"));
+ return 0;
+}
+
+static void pm80xx_hw_chip_rst(struct pm8001_hba_info *pm8001_ha)
+{
+ u32 i;
+
+ PM8001_INIT_DBG(pm8001_ha,
+ pm8001_printk("chip reset start\n"));
+
+ /* do SPCv chip reset. */
+ pm8001_cw32(pm8001_ha, 0, SPC_REG_SOFT_RESET, 0x11);
+ PM8001_INIT_DBG(pm8001_ha,
+ pm8001_printk("SPC soft reset Complete\n"));
+
+ /* Check this ..whether delay is required or no */
+ /* delay 10 usec */
+ udelay(10);
+
+ /* wait for 20 msec until the firmware gets reloaded */
+ i = 20;
+ do {
+ mdelay(1);
+ } while ((--i) != 0);
+
+ PM8001_INIT_DBG(pm8001_ha,
+ pm8001_printk("chip reset finished\n"));
+}
+
+/**
+ * pm8001_chip_interrupt_enable - enable PM8001 chip interrupt
+ * @pm8001_ha: our hba card information
+ */
+static void
+pm80xx_chip_intx_interrupt_enable(struct pm8001_hba_info *pm8001_ha)
+{
+ pm8001_cw32(pm8001_ha, 0, MSGU_ODMR, ODMR_CLEAR_ALL);
+ pm8001_cw32(pm8001_ha, 0, MSGU_ODCR, ODCR_CLEAR_ALL);
+}
+
+/**
+ * pm8001_chip_intx_interrupt_disable- disable PM8001 chip interrupt
+ * @pm8001_ha: our hba card information
+ */
+static void
+pm80xx_chip_intx_interrupt_disable(struct pm8001_hba_info *pm8001_ha)
+{
+ pm8001_cw32(pm8001_ha, 0, MSGU_ODMR_CLR, ODMR_MASK_ALL);
+}
+
+/**
+ * pm8001_chip_interrupt_enable - enable PM8001 chip interrupt
+ * @pm8001_ha: our hba card information
+ */
+static void
+pm80xx_chip_interrupt_enable(struct pm8001_hba_info *pm8001_ha, u8 vec)
+{
+#ifdef PM8001_USE_MSIX
+ u32 mask;
+ mask = (u32)(1 << vec);
+
+ pm8001_cw32(pm8001_ha, 0, MSGU_ODMR_CLR, (u32)(mask & 0xFFFFFFFF));
+ return;
+#endif
+ pm80xx_chip_intx_interrupt_enable(pm8001_ha);
+
+}
+
+/**
+ * pm8001_chip_interrupt_disable- disable PM8001 chip interrupt
+ * @pm8001_ha: our hba card information
+ */
+static void
+pm80xx_chip_interrupt_disable(struct pm8001_hba_info *pm8001_ha, u8 vec)
+{
+#ifdef PM8001_USE_MSIX
+ u32 mask;
+ if (vec == 0xFF)
+ mask = 0xFFFFFFFF;
+ else
+ mask = (u32)(1 << vec);
+ pm8001_cw32(pm8001_ha, 0, MSGU_ODMR, (u32)(mask & 0xFFFFFFFF));
+ return;
+#endif
+ pm80xx_chip_intx_interrupt_disable(pm8001_ha);
+}
+
+static void pm80xx_send_abort_all(struct pm8001_hba_info *pm8001_ha,
+ struct pm8001_device *pm8001_ha_dev)
+{
+ int res;
+ u32 ccb_tag;
+ struct pm8001_ccb_info *ccb;
+ struct sas_task *task = NULL;
+ struct task_abort_req task_abort;
+ struct inbound_queue_table *circularQ;
+ u32 opc = OPC_INB_SATA_ABORT;
+ int ret;
+
+ if (!pm8001_ha_dev) {
+ PM8001_FAIL_DBG(pm8001_ha, pm8001_printk("dev is null\n"));
+ return;
+ }
+
+ task = sas_alloc_slow_task(GFP_ATOMIC);
+
+ if (!task) {
+ PM8001_FAIL_DBG(pm8001_ha, pm8001_printk("cannot "
+ "allocate task\n"));
+ return;
+ }
+
+ task->task_done = pm8001_task_done;
+
+ res = pm8001_tag_alloc(pm8001_ha, &ccb_tag);
+ if (res) {
+ sas_free_task(task);
+ return;
+ }
+
+ ccb = &pm8001_ha->ccb_info[ccb_tag];
+ ccb->device = pm8001_ha_dev;
+ ccb->ccb_tag = ccb_tag;
+ ccb->task = task;
+
+ circularQ = &pm8001_ha->inbnd_q_tbl[0];
+
+ memset(&task_abort, 0, sizeof(task_abort));
+ task_abort.abort_all = cpu_to_le32(1);
+ task_abort.device_id = cpu_to_le32(pm8001_ha_dev->device_id);
+ task_abort.tag = cpu_to_le32(ccb_tag);
+
+ ret = pm8001_mpi_build_cmd(pm8001_ha, circularQ, opc, &task_abort, 0);
+ if (ret) {
+ sas_free_task(task);
+ pm8001_tag_free(pm8001_ha, ccb_tag);
+ }
+}
+
+static void pm80xx_send_read_log(struct pm8001_hba_info *pm8001_ha,
+ struct pm8001_device *pm8001_ha_dev)
+{
+ struct sata_start_req sata_cmd;
+ int res;
+ u32 ccb_tag;
+ struct pm8001_ccb_info *ccb;
+ struct sas_task *task = NULL;
+ struct host_to_dev_fis fis;
+ struct domain_device *dev;
+ struct inbound_queue_table *circularQ;
+ u32 opc = OPC_INB_SATA_HOST_OPSTART;
+
+ task = sas_alloc_slow_task(GFP_ATOMIC);
+
+ if (!task) {
+ PM8001_FAIL_DBG(pm8001_ha,
+ pm8001_printk("cannot allocate task !!!\n"));
+ return;
+ }
+ task->task_done = pm8001_task_done;
+
+ res = pm8001_tag_alloc(pm8001_ha, &ccb_tag);
+ if (res) {
+ sas_free_task(task);
+ PM8001_FAIL_DBG(pm8001_ha,
+ pm8001_printk("cannot allocate tag !!!\n"));
+ return;
+ }
+
+ /* allocate domain device by ourselves as libsas
+ * is not going to provide any
+ */
+ dev = kzalloc(sizeof(struct domain_device), GFP_ATOMIC);
+ if (!dev) {
+ sas_free_task(task);
+ pm8001_tag_free(pm8001_ha, ccb_tag);
+ PM8001_FAIL_DBG(pm8001_ha,
+ pm8001_printk("Domain device cannot be allocated\n"));
+ return;
+ }
+
+ task->dev = dev;
+ task->dev->lldd_dev = pm8001_ha_dev;
+
+ ccb = &pm8001_ha->ccb_info[ccb_tag];
+ ccb->device = pm8001_ha_dev;
+ ccb->ccb_tag = ccb_tag;
+ ccb->task = task;
+ pm8001_ha_dev->id |= NCQ_READ_LOG_FLAG;
+ pm8001_ha_dev->id |= NCQ_2ND_RLE_FLAG;
+
+ memset(&sata_cmd, 0, sizeof(sata_cmd));
+ circularQ = &pm8001_ha->inbnd_q_tbl[0];
+
+ /* construct read log FIS */
+ memset(&fis, 0, sizeof(struct host_to_dev_fis));
+ fis.fis_type = 0x27;
+ fis.flags = 0x80;
+ fis.command = ATA_CMD_READ_LOG_EXT;
+ fis.lbal = 0x10;
+ fis.sector_count = 0x1;
+
+ sata_cmd.tag = cpu_to_le32(ccb_tag);
+ sata_cmd.device_id = cpu_to_le32(pm8001_ha_dev->device_id);
+ sata_cmd.ncqtag_atap_dir_m_dad |= ((0x1 << 7) | (0x5 << 9));
+ memcpy(&sata_cmd.sata_fis, &fis, sizeof(struct host_to_dev_fis));
+
+ res = pm8001_mpi_build_cmd(pm8001_ha, circularQ, opc, &sata_cmd, 0);
+ if (res) {
+ sas_free_task(task);
+ pm8001_tag_free(pm8001_ha, ccb_tag);
+ kfree(dev);
+ }
+}
+
+/**
+ * mpi_ssp_completion- process the event that FW response to the SSP request.
+ * @pm8001_ha: our hba card information
+ * @piomb: the message contents of this outbound message.
+ *
+ * When FW has completed a ssp request for example a IO request, after it has
+ * filled the SG data with the data, it will trigger this event represent
+ * that he has finished the job,please check the coresponding buffer.
+ * So we will tell the caller who maybe waiting the result to tell upper layer
+ * that the task has been finished.
+ */
+static void
+mpi_ssp_completion(struct pm8001_hba_info *pm8001_ha , void *piomb)
+{
+ struct sas_task *t;
+ struct pm8001_ccb_info *ccb;
+ unsigned long flags;
+ u32 status;
+ u32 param;
+ u32 tag;
+ struct ssp_completion_resp *psspPayload;
+ struct task_status_struct *ts;
+ struct ssp_response_iu *iu;
+ struct pm8001_device *pm8001_dev;
+ psspPayload = (struct ssp_completion_resp *)(piomb + 4);
+ status = le32_to_cpu(psspPayload->status);
+ tag = le32_to_cpu(psspPayload->tag);
+ ccb = &pm8001_ha->ccb_info[tag];
+ if ((status == IO_ABORTED) && ccb->open_retry) {
+ /* Being completed by another */
+ ccb->open_retry = 0;
+ return;
+ }
+ pm8001_dev = ccb->device;
+ param = le32_to_cpu(psspPayload->param);
+ t = ccb->task;
+
+ if (status && status != IO_UNDERFLOW)
+ PM8001_FAIL_DBG(pm8001_ha,
+ pm8001_printk("sas IO status 0x%x\n", status));
+ if (unlikely(!t || !t->lldd_task || !t->dev))
+ return;
+ ts = &t->task_status;
+ /* Print sas address of IO failed device */
+ if ((status != IO_SUCCESS) && (status != IO_OVERFLOW) &&
+ (status != IO_UNDERFLOW))
+ PM8001_FAIL_DBG(pm8001_ha,
+ pm8001_printk("SAS Address of IO Failure Drive"
+ ":%016llx", SAS_ADDR(t->dev->sas_addr)));
+
+ switch (status) {
+ case IO_SUCCESS:
+ PM8001_IO_DBG(pm8001_ha,
+ pm8001_printk("IO_SUCCESS ,param = 0x%x\n",
+ param));
+ if (param == 0) {
+ ts->resp = SAS_TASK_COMPLETE;
+ ts->stat = SAM_STAT_GOOD;
+ } else {
+ ts->resp = SAS_TASK_COMPLETE;
+ ts->stat = SAS_PROTO_RESPONSE;
+ ts->residual = param;
+ iu = &psspPayload->ssp_resp_iu;
+ sas_ssp_task_response(pm8001_ha->dev, t, iu);
+ }
+ if (pm8001_dev)
+ pm8001_dev->running_req--;
+ break;
+ case IO_ABORTED:
+ PM8001_IO_DBG(pm8001_ha,
+ pm8001_printk("IO_ABORTED IOMB Tag\n"));
+ ts->resp = SAS_TASK_COMPLETE;
+ ts->stat = SAS_ABORTED_TASK;
+ break;
+ case IO_UNDERFLOW:
+ /* SSP Completion with error */
+ PM8001_IO_DBG(pm8001_ha,
+ pm8001_printk("IO_UNDERFLOW ,param = 0x%x\n",
+ param));
+ ts->resp = SAS_TASK_COMPLETE;
+ ts->stat = SAS_DATA_UNDERRUN;
+ ts->residual = param;
+ if (pm8001_dev)
+ pm8001_dev->running_req--;
+ break;
+ case IO_NO_DEVICE:
+ PM8001_IO_DBG(pm8001_ha,
+ pm8001_printk("IO_NO_DEVICE\n"));
+ ts->resp = SAS_TASK_UNDELIVERED;
+ ts->stat = SAS_PHY_DOWN;
+ break;
+ case IO_XFER_ERROR_BREAK:
+ PM8001_IO_DBG(pm8001_ha,
+ pm8001_printk("IO_XFER_ERROR_BREAK\n"));
+ ts->resp = SAS_TASK_COMPLETE;
+ ts->stat = SAS_OPEN_REJECT;
+ /* Force the midlayer to retry */
+ ts->open_rej_reason = SAS_OREJ_RSVD_RETRY;
+ break;
+ case IO_XFER_ERROR_PHY_NOT_READY:
+ PM8001_IO_DBG(pm8001_ha,
+ pm8001_printk("IO_XFER_ERROR_PHY_NOT_READY\n"));
+ ts->resp = SAS_TASK_COMPLETE;
+ ts->stat = SAS_OPEN_REJECT;
+ ts->open_rej_reason = SAS_OREJ_RSVD_RETRY;
+ break;
+ case IO_OPEN_CNX_ERROR_PROTOCOL_NOT_SUPPORTED:
+ PM8001_IO_DBG(pm8001_ha,
+ pm8001_printk("IO_OPEN_CNX_ERROR_PROTOCOL_NOT_SUPPORTED\n"));
+ ts->resp = SAS_TASK_COMPLETE;
+ ts->stat = SAS_OPEN_REJECT;
+ ts->open_rej_reason = SAS_OREJ_EPROTO;
+ break;
+ case IO_OPEN_CNX_ERROR_ZONE_VIOLATION:
+ PM8001_IO_DBG(pm8001_ha,
+ pm8001_printk("IO_OPEN_CNX_ERROR_ZONE_VIOLATION\n"));
+ ts->resp = SAS_TASK_COMPLETE;
+ ts->stat = SAS_OPEN_REJECT;
+ ts->open_rej_reason = SAS_OREJ_UNKNOWN;
+ break;
+ case IO_OPEN_CNX_ERROR_BREAK:
+ PM8001_IO_DBG(pm8001_ha,
+ pm8001_printk("IO_OPEN_CNX_ERROR_BREAK\n"));
+ ts->resp = SAS_TASK_COMPLETE;
+ ts->stat = SAS_OPEN_REJECT;
+ ts->open_rej_reason = SAS_OREJ_RSVD_RETRY;
+ break;
+ case IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS:
+ case IO_XFER_OPEN_RETRY_BACKOFF_THRESHOLD_REACHED:
+ case IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS_OPEN_TMO:
+ case IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS_NO_DEST:
+ case IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS_OPEN_COLLIDE:
+ case IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS_PATHWAY_BLOCKED:
+ PM8001_IO_DBG(pm8001_ha,
+ pm8001_printk("IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS\n"));
+ ts->resp = SAS_TASK_COMPLETE;
+ ts->stat = SAS_OPEN_REJECT;
+ ts->open_rej_reason = SAS_OREJ_UNKNOWN;
+ if (!t->uldd_task)
+ pm8001_handle_event(pm8001_ha,
+ pm8001_dev,
+ IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS);
+ break;
+ case IO_OPEN_CNX_ERROR_BAD_DESTINATION:
+ PM8001_IO_DBG(pm8001_ha,
+ pm8001_printk("IO_OPEN_CNX_ERROR_BAD_DESTINATION\n"));
+ ts->resp = SAS_TASK_COMPLETE;
+ ts->stat = SAS_OPEN_REJECT;
+ ts->open_rej_reason = SAS_OREJ_BAD_DEST;
+ break;
+ case IO_OPEN_CNX_ERROR_CONNECTION_RATE_NOT_SUPPORTED:
+ PM8001_IO_DBG(pm8001_ha, pm8001_printk(
+ "IO_OPEN_CNX_ERROR_CONNECTION_RATE_NOT_SUPPORTED\n"));
+ ts->resp = SAS_TASK_COMPLETE;
+ ts->stat = SAS_OPEN_REJECT;
+ ts->open_rej_reason = SAS_OREJ_CONN_RATE;
+ break;
+ case IO_OPEN_CNX_ERROR_WRONG_DESTINATION:
+ PM8001_IO_DBG(pm8001_ha,
+ pm8001_printk("IO_OPEN_CNX_ERROR_WRONG_DESTINATION\n"));
+ ts->resp = SAS_TASK_UNDELIVERED;
+ ts->stat = SAS_OPEN_REJECT;
+ ts->open_rej_reason = SAS_OREJ_WRONG_DEST;
+ break;
+ case IO_XFER_ERROR_NAK_RECEIVED:
+ PM8001_IO_DBG(pm8001_ha,
+ pm8001_printk("IO_XFER_ERROR_NAK_RECEIVED\n"));
+ ts->resp = SAS_TASK_COMPLETE;
+ ts->stat = SAS_OPEN_REJECT;
+ ts->open_rej_reason = SAS_OREJ_RSVD_RETRY;
+ break;
+ case IO_XFER_ERROR_ACK_NAK_TIMEOUT:
+ PM8001_IO_DBG(pm8001_ha,
+ pm8001_printk("IO_XFER_ERROR_ACK_NAK_TIMEOUT\n"));
+ ts->resp = SAS_TASK_COMPLETE;
+ ts->stat = SAS_NAK_R_ERR;
+ break;
+ case IO_XFER_ERROR_DMA:
+ PM8001_IO_DBG(pm8001_ha,
+ pm8001_printk("IO_XFER_ERROR_DMA\n"));
+ ts->resp = SAS_TASK_COMPLETE;
+ ts->stat = SAS_OPEN_REJECT;
+ break;
+ case IO_XFER_OPEN_RETRY_TIMEOUT:
+ PM8001_IO_DBG(pm8001_ha,
+ pm8001_printk("IO_XFER_OPEN_RETRY_TIMEOUT\n"));
+ ts->resp = SAS_TASK_COMPLETE;
+ ts->stat = SAS_OPEN_REJECT;
+ ts->open_rej_reason = SAS_OREJ_RSVD_RETRY;
+ break;
+ case IO_XFER_ERROR_OFFSET_MISMATCH:
+ PM8001_IO_DBG(pm8001_ha,
+ pm8001_printk("IO_XFER_ERROR_OFFSET_MISMATCH\n"));
+ ts->resp = SAS_TASK_COMPLETE;
+ ts->stat = SAS_OPEN_REJECT;
+ break;
+ case IO_PORT_IN_RESET:
+ PM8001_IO_DBG(pm8001_ha,
+ pm8001_printk("IO_PORT_IN_RESET\n"));
+ ts->resp = SAS_TASK_COMPLETE;
+ ts->stat = SAS_OPEN_REJECT;
+ break;
+ case IO_DS_NON_OPERATIONAL:
+ PM8001_IO_DBG(pm8001_ha,
+ pm8001_printk("IO_DS_NON_OPERATIONAL\n"));
+ ts->resp = SAS_TASK_COMPLETE;
+ ts->stat = SAS_OPEN_REJECT;
+ if (!t->uldd_task)
+ pm8001_handle_event(pm8001_ha,
+ pm8001_dev,
+ IO_DS_NON_OPERATIONAL);
+ break;
+ case IO_DS_IN_RECOVERY:
+ PM8001_IO_DBG(pm8001_ha,
+ pm8001_printk("IO_DS_IN_RECOVERY\n"));
+ ts->resp = SAS_TASK_COMPLETE;
+ ts->stat = SAS_OPEN_REJECT;
+ break;
+ case IO_TM_TAG_NOT_FOUND:
+ PM8001_IO_DBG(pm8001_ha,
+ pm8001_printk("IO_TM_TAG_NOT_FOUND\n"));
+ ts->resp = SAS_TASK_COMPLETE;
+ ts->stat = SAS_OPEN_REJECT;
+ break;
+ case IO_SSP_EXT_IU_ZERO_LEN_ERROR:
+ PM8001_IO_DBG(pm8001_ha,
+ pm8001_printk("IO_SSP_EXT_IU_ZERO_LEN_ERROR\n"));
+ ts->resp = SAS_TASK_COMPLETE;
+ ts->stat = SAS_OPEN_REJECT;
+ break;
+ case IO_OPEN_CNX_ERROR_HW_RESOURCE_BUSY:
+ PM8001_IO_DBG(pm8001_ha,
+ pm8001_printk("IO_OPEN_CNX_ERROR_HW_RESOURCE_BUSY\n"));
+ ts->resp = SAS_TASK_COMPLETE;
+ ts->stat = SAS_OPEN_REJECT;
+ ts->open_rej_reason = SAS_OREJ_RSVD_RETRY;
+ break;
+ default:
+ PM8001_IO_DBG(pm8001_ha,
+ pm8001_printk("Unknown status 0x%x\n", status));
+ /* not allowed case. Therefore, return failed status */
+ ts->resp = SAS_TASK_COMPLETE;
+ ts->stat = SAS_OPEN_REJECT;
+ break;
+ }
+ PM8001_IO_DBG(pm8001_ha,
+ pm8001_printk("scsi_status = 0x%x\n ",
+ psspPayload->ssp_resp_iu.status));
+ spin_lock_irqsave(&t->task_state_lock, flags);
+ t->task_state_flags &= ~SAS_TASK_STATE_PENDING;
+ t->task_state_flags &= ~SAS_TASK_AT_INITIATOR;
+ t->task_state_flags |= SAS_TASK_STATE_DONE;
+ if (unlikely((t->task_state_flags & SAS_TASK_STATE_ABORTED))) {
+ spin_unlock_irqrestore(&t->task_state_lock, flags);
+ PM8001_FAIL_DBG(pm8001_ha, pm8001_printk(
+ "task 0x%p done with io_status 0x%x resp 0x%x "
+ "stat 0x%x but aborted by upper layer!\n",
+ t, status, ts->resp, ts->stat));
+ pm8001_ccb_task_free(pm8001_ha, t, ccb, tag);
+ } else {
+ spin_unlock_irqrestore(&t->task_state_lock, flags);
+ pm8001_ccb_task_free(pm8001_ha, t, ccb, tag);
+ mb();/* in order to force CPU ordering */
+ t->task_done(t);
+ }
+}
+
+/*See the comments for mpi_ssp_completion */
+static void mpi_ssp_event(struct pm8001_hba_info *pm8001_ha , void *piomb)
+{
+ struct sas_task *t;
+ unsigned long flags;
+ struct task_status_struct *ts;
+ struct pm8001_ccb_info *ccb;
+ struct pm8001_device *pm8001_dev;
+ struct ssp_event_resp *psspPayload =
+ (struct ssp_event_resp *)(piomb + 4);
+ u32 event = le32_to_cpu(psspPayload->event);
+ u32 tag = le32_to_cpu(psspPayload->tag);
+ u32 port_id = le32_to_cpu(psspPayload->port_id);
+
+ ccb = &pm8001_ha->ccb_info[tag];
+ t = ccb->task;
+ pm8001_dev = ccb->device;
+ if (event)
+ PM8001_FAIL_DBG(pm8001_ha,
+ pm8001_printk("sas IO status 0x%x\n", event));
+ if (unlikely(!t || !t->lldd_task || !t->dev))
+ return;
+ ts = &t->task_status;
+ PM8001_IO_DBG(pm8001_ha,
+ pm8001_printk("port_id:0x%x, tag:0x%x, event:0x%x\n",
+ port_id, tag, event));
+ switch (event) {
+ case IO_OVERFLOW:
+ PM8001_IO_DBG(pm8001_ha, pm8001_printk("IO_UNDERFLOW\n");)
+ ts->resp = SAS_TASK_COMPLETE;
+ ts->stat = SAS_DATA_OVERRUN;
+ ts->residual = 0;
+ if (pm8001_dev)
+ pm8001_dev->running_req--;
+ break;
+ case IO_XFER_ERROR_BREAK:
+ PM8001_IO_DBG(pm8001_ha,
+ pm8001_printk("IO_XFER_ERROR_BREAK\n"));
+ pm8001_handle_event(pm8001_ha, t, IO_XFER_ERROR_BREAK);
+ return;
+ case IO_XFER_ERROR_PHY_NOT_READY:
+ PM8001_IO_DBG(pm8001_ha,
+ pm8001_printk("IO_XFER_ERROR_PHY_NOT_READY\n"));
+ ts->resp = SAS_TASK_COMPLETE;
+ ts->stat = SAS_OPEN_REJECT;
+ ts->open_rej_reason = SAS_OREJ_RSVD_RETRY;
+ break;
+ case IO_OPEN_CNX_ERROR_PROTOCOL_NOT_SUPPORTED:
+ PM8001_IO_DBG(pm8001_ha, pm8001_printk(
+ "IO_OPEN_CNX_ERROR_PROTOCOL_NOT_SUPPORTED\n"));
+ ts->resp = SAS_TASK_COMPLETE;
+ ts->stat = SAS_OPEN_REJECT;
+ ts->open_rej_reason = SAS_OREJ_EPROTO;
+ break;
+ case IO_OPEN_CNX_ERROR_ZONE_VIOLATION:
+ PM8001_IO_DBG(pm8001_ha,
+ pm8001_printk("IO_OPEN_CNX_ERROR_ZONE_VIOLATION\n"));
+ ts->resp = SAS_TASK_COMPLETE;
+ ts->stat = SAS_OPEN_REJECT;
+ ts->open_rej_reason = SAS_OREJ_UNKNOWN;
+ break;
+ case IO_OPEN_CNX_ERROR_BREAK:
+ PM8001_IO_DBG(pm8001_ha,
+ pm8001_printk("IO_OPEN_CNX_ERROR_BREAK\n"));
+ ts->resp = SAS_TASK_COMPLETE;
+ ts->stat = SAS_OPEN_REJECT;
+ ts->open_rej_reason = SAS_OREJ_RSVD_RETRY;
+ break;
+ case IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS:
+ case IO_XFER_OPEN_RETRY_BACKOFF_THRESHOLD_REACHED:
+ case IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS_OPEN_TMO:
+ case IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS_NO_DEST:
+ case IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS_OPEN_COLLIDE:
+ case IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS_PATHWAY_BLOCKED:
+ PM8001_IO_DBG(pm8001_ha,
+ pm8001_printk("IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS\n"));
+ ts->resp = SAS_TASK_COMPLETE;
+ ts->stat = SAS_OPEN_REJECT;
+ ts->open_rej_reason = SAS_OREJ_UNKNOWN;
+ if (!t->uldd_task)
+ pm8001_handle_event(pm8001_ha,
+ pm8001_dev,
+ IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS);
+ break;
+ case IO_OPEN_CNX_ERROR_BAD_DESTINATION:
+ PM8001_IO_DBG(pm8001_ha,
+ pm8001_printk("IO_OPEN_CNX_ERROR_BAD_DESTINATION\n"));
+ ts->resp = SAS_TASK_COMPLETE;
+ ts->stat = SAS_OPEN_REJECT;
+ ts->open_rej_reason = SAS_OREJ_BAD_DEST;
+ break;
+ case IO_OPEN_CNX_ERROR_CONNECTION_RATE_NOT_SUPPORTED:
+ PM8001_IO_DBG(pm8001_ha, pm8001_printk(
+ "IO_OPEN_CNX_ERROR_CONNECTION_RATE_NOT_SUPPORTED\n"));
+ ts->resp = SAS_TASK_COMPLETE;
+ ts->stat = SAS_OPEN_REJECT;
+ ts->open_rej_reason = SAS_OREJ_CONN_RATE;
+ break;
+ case IO_OPEN_CNX_ERROR_WRONG_DESTINATION:
+ PM8001_IO_DBG(pm8001_ha,
+ pm8001_printk("IO_OPEN_CNX_ERROR_WRONG_DESTINATION\n"));
+ ts->resp = SAS_TASK_COMPLETE;
+ ts->stat = SAS_OPEN_REJECT;
+ ts->open_rej_reason = SAS_OREJ_WRONG_DEST;
+ break;
+ case IO_XFER_ERROR_NAK_RECEIVED:
+ PM8001_IO_DBG(pm8001_ha,
+ pm8001_printk("IO_XFER_ERROR_NAK_RECEIVED\n"));
+ ts->resp = SAS_TASK_COMPLETE;
+ ts->stat = SAS_OPEN_REJECT;
+ ts->open_rej_reason = SAS_OREJ_RSVD_RETRY;
+ break;
+ case IO_XFER_ERROR_ACK_NAK_TIMEOUT:
+ PM8001_IO_DBG(pm8001_ha,
+ pm8001_printk("IO_XFER_ERROR_ACK_NAK_TIMEOUT\n"));
+ ts->resp = SAS_TASK_COMPLETE;
+ ts->stat = SAS_NAK_R_ERR;
+ break;
+ case IO_XFER_OPEN_RETRY_TIMEOUT:
+ PM8001_IO_DBG(pm8001_ha,
+ pm8001_printk("IO_XFER_OPEN_RETRY_TIMEOUT\n"));
+ pm8001_handle_event(pm8001_ha, t, IO_XFER_OPEN_RETRY_TIMEOUT);
+ return;
+ case IO_XFER_ERROR_UNEXPECTED_PHASE:
+ PM8001_IO_DBG(pm8001_ha,
+ pm8001_printk("IO_XFER_ERROR_UNEXPECTED_PHASE\n"));
+ ts->resp = SAS_TASK_COMPLETE;
+ ts->stat = SAS_DATA_OVERRUN;
+ break;
+ case IO_XFER_ERROR_XFER_RDY_OVERRUN:
+ PM8001_IO_DBG(pm8001_ha,
+ pm8001_printk("IO_XFER_ERROR_XFER_RDY_OVERRUN\n"));
+ ts->resp = SAS_TASK_COMPLETE;
+ ts->stat = SAS_DATA_OVERRUN;
+ break;
+ case IO_XFER_ERROR_XFER_RDY_NOT_EXPECTED:
+ PM8001_IO_DBG(pm8001_ha,
+ pm8001_printk("IO_XFER_ERROR_XFER_RDY_NOT_EXPECTED\n"));
+ ts->resp = SAS_TASK_COMPLETE;
+ ts->stat = SAS_DATA_OVERRUN;
+ break;
+ case IO_XFER_ERROR_CMD_ISSUE_ACK_NAK_TIMEOUT:
+ PM8001_IO_DBG(pm8001_ha,
+ pm8001_printk("IO_XFER_ERROR_CMD_ISSUE_ACK_NAK_TIMEOUT\n"));
+ ts->resp = SAS_TASK_COMPLETE;
+ ts->stat = SAS_DATA_OVERRUN;
+ break;
+ case IO_XFER_ERROR_OFFSET_MISMATCH:
+ PM8001_IO_DBG(pm8001_ha,
+ pm8001_printk("IO_XFER_ERROR_OFFSET_MISMATCH\n"));
+ ts->resp = SAS_TASK_COMPLETE;
+ ts->stat = SAS_DATA_OVERRUN;
+ break;
+ case IO_XFER_ERROR_XFER_ZERO_DATA_LEN:
+ PM8001_IO_DBG(pm8001_ha,
+ pm8001_printk("IO_XFER_ERROR_XFER_ZERO_DATA_LEN\n"));
+ ts->resp = SAS_TASK_COMPLETE;
+ ts->stat = SAS_DATA_OVERRUN;
+ break;
+ case IO_XFER_ERROR_INTERNAL_CRC_ERROR:
+ PM8001_IO_DBG(pm8001_ha,
+ pm8001_printk("IO_XFR_ERROR_INTERNAL_CRC_ERROR\n"));
+ /* TBC: used default set values */
+ ts->resp = SAS_TASK_COMPLETE;
+ ts->stat = SAS_DATA_OVERRUN;
+ break;
+ case IO_XFER_CMD_FRAME_ISSUED:
+ PM8001_IO_DBG(pm8001_ha,
+ pm8001_printk("IO_XFER_CMD_FRAME_ISSUED\n"));
+ return;
+ default:
+ PM8001_IO_DBG(pm8001_ha,
+ pm8001_printk("Unknown status 0x%x\n", event));
+ /* not allowed case. Therefore, return failed status */
+ ts->resp = SAS_TASK_COMPLETE;
+ ts->stat = SAS_DATA_OVERRUN;
+ break;
+ }
+ spin_lock_irqsave(&t->task_state_lock, flags);
+ t->task_state_flags &= ~SAS_TASK_STATE_PENDING;
+ t->task_state_flags &= ~SAS_TASK_AT_INITIATOR;
+ t->task_state_flags |= SAS_TASK_STATE_DONE;
+ if (unlikely((t->task_state_flags & SAS_TASK_STATE_ABORTED))) {
+ spin_unlock_irqrestore(&t->task_state_lock, flags);
+ PM8001_FAIL_DBG(pm8001_ha, pm8001_printk(
+ "task 0x%p done with event 0x%x resp 0x%x "
+ "stat 0x%x but aborted by upper layer!\n",
+ t, event, ts->resp, ts->stat));
+ pm8001_ccb_task_free(pm8001_ha, t, ccb, tag);
+ } else {
+ spin_unlock_irqrestore(&t->task_state_lock, flags);
+ pm8001_ccb_task_free(pm8001_ha, t, ccb, tag);
+ mb();/* in order to force CPU ordering */
+ t->task_done(t);
+ }
+}
+
+/*See the comments for mpi_ssp_completion */
+static void
+mpi_sata_completion(struct pm8001_hba_info *pm8001_ha, void *piomb)
+{
+ struct sas_task *t;
+ struct pm8001_ccb_info *ccb;
+ u32 param;
+ u32 status;
+ u32 tag;
+ int i, j;
+ u8 sata_addr_low[4];
+ u32 temp_sata_addr_low, temp_sata_addr_hi;
+ u8 sata_addr_hi[4];
+ struct sata_completion_resp *psataPayload;
+ struct task_status_struct *ts;
+ struct ata_task_resp *resp ;
+ u32 *sata_resp;
+ struct pm8001_device *pm8001_dev;
+ unsigned long flags;
+
+ psataPayload = (struct sata_completion_resp *)(piomb + 4);
+ status = le32_to_cpu(psataPayload->status);
+ tag = le32_to_cpu(psataPayload->tag);
+
+ if (!tag) {
+ PM8001_FAIL_DBG(pm8001_ha,
+ pm8001_printk("tag null\n"));
+ return;
+ }
+ ccb = &pm8001_ha->ccb_info[tag];
+ param = le32_to_cpu(psataPayload->param);
+ if (ccb) {
+ t = ccb->task;
+ pm8001_dev = ccb->device;
+ } else {
+ PM8001_FAIL_DBG(pm8001_ha,
+ pm8001_printk("ccb null\n"));
+ return;
+ }
+
+ if (t) {
+ if (t->dev && (t->dev->lldd_dev))
+ pm8001_dev = t->dev->lldd_dev;
+ } else {
+ PM8001_FAIL_DBG(pm8001_ha,
+ pm8001_printk("task null\n"));
+ return;
+ }
+
+ if ((pm8001_dev && !(pm8001_dev->id & NCQ_READ_LOG_FLAG))
+ && unlikely(!t || !t->lldd_task || !t->dev)) {
+ PM8001_FAIL_DBG(pm8001_ha,
+ pm8001_printk("task or dev null\n"));
+ return;
+ }
+
+ ts = &t->task_status;
+ if (!ts) {
+ PM8001_FAIL_DBG(pm8001_ha,
+ pm8001_printk("ts null\n"));
+ return;
+ }
+ /* Print sas address of IO failed device */
+ if ((status != IO_SUCCESS) && (status != IO_OVERFLOW) &&
+ (status != IO_UNDERFLOW)) {
+ if (!((t->dev->parent) &&
+ (DEV_IS_EXPANDER(t->dev->parent->dev_type)))) {
+ for (i = 0 , j = 4; i <= 3 && j <= 7; i++ , j++)
+ sata_addr_low[i] = pm8001_ha->sas_addr[j];
+ for (i = 0 , j = 0; i <= 3 && j <= 3; i++ , j++)
+ sata_addr_hi[i] = pm8001_ha->sas_addr[j];
+ memcpy(&temp_sata_addr_low, sata_addr_low,
+ sizeof(sata_addr_low));
+ memcpy(&temp_sata_addr_hi, sata_addr_hi,
+ sizeof(sata_addr_hi));
+ temp_sata_addr_hi = (((temp_sata_addr_hi >> 24) & 0xff)
+ |((temp_sata_addr_hi << 8) &
+ 0xff0000) |
+ ((temp_sata_addr_hi >> 8)
+ & 0xff00) |
+ ((temp_sata_addr_hi << 24) &
+ 0xff000000));
+ temp_sata_addr_low = ((((temp_sata_addr_low >> 24)
+ & 0xff) |
+ ((temp_sata_addr_low << 8)
+ & 0xff0000) |
+ ((temp_sata_addr_low >> 8)
+ & 0xff00) |
+ ((temp_sata_addr_low << 24)
+ & 0xff000000)) +
+ pm8001_dev->attached_phy +
+ 0x10);
+ PM8001_FAIL_DBG(pm8001_ha,
+ pm8001_printk("SAS Address of IO Failure Drive:"
+ "%08x%08x", temp_sata_addr_hi,
+ temp_sata_addr_low));
+
+ } else {
+ PM8001_FAIL_DBG(pm8001_ha,
+ pm8001_printk("SAS Address of IO Failure Drive:"
+ "%016llx", SAS_ADDR(t->dev->sas_addr)));
+ }
+ }
+ switch (status) {
+ case IO_SUCCESS:
+ PM8001_IO_DBG(pm8001_ha, pm8001_printk("IO_SUCCESS\n"));
+ if (param == 0) {
+ ts->resp = SAS_TASK_COMPLETE;
+ ts->stat = SAM_STAT_GOOD;
+ /* check if response is for SEND READ LOG */
+ if (pm8001_dev &&
+ (pm8001_dev->id & NCQ_READ_LOG_FLAG)) {
+ /* set new bit for abort_all */
+ pm8001_dev->id |= NCQ_ABORT_ALL_FLAG;
+ /* clear bit for read log */
+ pm8001_dev->id = pm8001_dev->id & 0x7FFFFFFF;
+ pm80xx_send_abort_all(pm8001_ha, pm8001_dev);
+ /* Free the tag */
+ pm8001_tag_free(pm8001_ha, tag);
+ sas_free_task(t);
+ return;
+ }
+ } else {
+ u8 len;
+ ts->resp = SAS_TASK_COMPLETE;
+ ts->stat = SAS_PROTO_RESPONSE;
+ ts->residual = param;
+ PM8001_IO_DBG(pm8001_ha,
+ pm8001_printk("SAS_PROTO_RESPONSE len = %d\n",
+ param));
+ sata_resp = &psataPayload->sata_resp[0];
+ resp = (struct ata_task_resp *)ts->buf;
+ if (t->ata_task.dma_xfer == 0 &&
+ t->data_dir == PCI_DMA_FROMDEVICE) {
+ len = sizeof(struct pio_setup_fis);
+ PM8001_IO_DBG(pm8001_ha,
+ pm8001_printk("PIO read len = %d\n", len));
+ } else if (t->ata_task.use_ncq) {
+ len = sizeof(struct set_dev_bits_fis);
+ PM8001_IO_DBG(pm8001_ha,
+ pm8001_printk("FPDMA len = %d\n", len));
+ } else {
+ len = sizeof(struct dev_to_host_fis);
+ PM8001_IO_DBG(pm8001_ha,
+ pm8001_printk("other len = %d\n", len));
+ }
+ if (SAS_STATUS_BUF_SIZE >= sizeof(*resp)) {
+ resp->frame_len = len;
+ memcpy(&resp->ending_fis[0], sata_resp, len);
+ ts->buf_valid_size = sizeof(*resp);
+ } else
+ PM8001_IO_DBG(pm8001_ha,
+ pm8001_printk("response to large\n"));
+ }
+ if (pm8001_dev)
+ pm8001_dev->running_req--;
+ break;
+ case IO_ABORTED:
+ PM8001_IO_DBG(pm8001_ha,
+ pm8001_printk("IO_ABORTED IOMB Tag\n"));
+ ts->resp = SAS_TASK_COMPLETE;
+ ts->stat = SAS_ABORTED_TASK;
+ if (pm8001_dev)
+ pm8001_dev->running_req--;
+ break;
+ /* following cases are to do cases */
+ case IO_UNDERFLOW:
+ /* SATA Completion with error */
+ PM8001_IO_DBG(pm8001_ha,
+ pm8001_printk("IO_UNDERFLOW param = %d\n", param));
+ ts->resp = SAS_TASK_COMPLETE;
+ ts->stat = SAS_DATA_UNDERRUN;
+ ts->residual = param;
+ if (pm8001_dev)
+ pm8001_dev->running_req--;
+ break;
+ case IO_NO_DEVICE:
+ PM8001_IO_DBG(pm8001_ha,
+ pm8001_printk("IO_NO_DEVICE\n"));
+ ts->resp = SAS_TASK_UNDELIVERED;
+ ts->stat = SAS_PHY_DOWN;
+ break;
+ case IO_XFER_ERROR_BREAK:
+ PM8001_IO_DBG(pm8001_ha,
+ pm8001_printk("IO_XFER_ERROR_BREAK\n"));
+ ts->resp = SAS_TASK_COMPLETE;
+ ts->stat = SAS_INTERRUPTED;
+ break;
+ case IO_XFER_ERROR_PHY_NOT_READY:
+ PM8001_IO_DBG(pm8001_ha,
+ pm8001_printk("IO_XFER_ERROR_PHY_NOT_READY\n"));
+ ts->resp = SAS_TASK_COMPLETE;
+ ts->stat = SAS_OPEN_REJECT;
+ ts->open_rej_reason = SAS_OREJ_RSVD_RETRY;
+ break;
+ case IO_OPEN_CNX_ERROR_PROTOCOL_NOT_SUPPORTED:
+ PM8001_IO_DBG(pm8001_ha, pm8001_printk(
+ "IO_OPEN_CNX_ERROR_PROTOCOL_NOT_SUPPORTED\n"));
+ ts->resp = SAS_TASK_COMPLETE;
+ ts->stat = SAS_OPEN_REJECT;
+ ts->open_rej_reason = SAS_OREJ_EPROTO;
+ break;
+ case IO_OPEN_CNX_ERROR_ZONE_VIOLATION:
+ PM8001_IO_DBG(pm8001_ha,
+ pm8001_printk("IO_OPEN_CNX_ERROR_ZONE_VIOLATION\n"));
+ ts->resp = SAS_TASK_COMPLETE;
+ ts->stat = SAS_OPEN_REJECT;
+ ts->open_rej_reason = SAS_OREJ_UNKNOWN;
+ break;
+ case IO_OPEN_CNX_ERROR_BREAK:
+ PM8001_IO_DBG(pm8001_ha,
+ pm8001_printk("IO_OPEN_CNX_ERROR_BREAK\n"));
+ ts->resp = SAS_TASK_COMPLETE;
+ ts->stat = SAS_OPEN_REJECT;
+ ts->open_rej_reason = SAS_OREJ_RSVD_CONT0;
+ break;
+ case IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS:
+ case IO_XFER_OPEN_RETRY_BACKOFF_THRESHOLD_REACHED:
+ case IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS_OPEN_TMO:
+ case IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS_NO_DEST:
+ case IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS_OPEN_COLLIDE:
+ case IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS_PATHWAY_BLOCKED:
+ PM8001_IO_DBG(pm8001_ha,
+ pm8001_printk("IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS\n"));
+ ts->resp = SAS_TASK_COMPLETE;
+ ts->stat = SAS_DEV_NO_RESPONSE;
+ if (!t->uldd_task) {
+ pm8001_handle_event(pm8001_ha,
+ pm8001_dev,
+ IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS);
+ ts->resp = SAS_TASK_UNDELIVERED;
+ ts->stat = SAS_QUEUE_FULL;
+ pm8001_ccb_task_free_done(pm8001_ha, t, ccb, tag);
+ return;
+ }
+ break;
+ case IO_OPEN_CNX_ERROR_BAD_DESTINATION:
+ PM8001_IO_DBG(pm8001_ha,
+ pm8001_printk("IO_OPEN_CNX_ERROR_BAD_DESTINATION\n"));
+ ts->resp = SAS_TASK_UNDELIVERED;
+ ts->stat = SAS_OPEN_REJECT;
+ ts->open_rej_reason = SAS_OREJ_BAD_DEST;
+ if (!t->uldd_task) {
+ pm8001_handle_event(pm8001_ha,
+ pm8001_dev,
+ IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS);
+ ts->resp = SAS_TASK_UNDELIVERED;
+ ts->stat = SAS_QUEUE_FULL;
+ pm8001_ccb_task_free_done(pm8001_ha, t, ccb, tag);
+ return;
+ }
+ break;
+ case IO_OPEN_CNX_ERROR_CONNECTION_RATE_NOT_SUPPORTED:
+ PM8001_IO_DBG(pm8001_ha, pm8001_printk(
+ "IO_OPEN_CNX_ERROR_CONNECTION_RATE_NOT_SUPPORTED\n"));
+ ts->resp = SAS_TASK_COMPLETE;
+ ts->stat = SAS_OPEN_REJECT;
+ ts->open_rej_reason = SAS_OREJ_CONN_RATE;
+ break;
+ case IO_OPEN_CNX_ERROR_STP_RESOURCES_BUSY:
+ PM8001_IO_DBG(pm8001_ha, pm8001_printk(
+ "IO_OPEN_CNX_ERROR_STP_RESOURCES_BUSY\n"));
+ ts->resp = SAS_TASK_COMPLETE;
+ ts->stat = SAS_DEV_NO_RESPONSE;
+ if (!t->uldd_task) {
+ pm8001_handle_event(pm8001_ha,
+ pm8001_dev,
+ IO_OPEN_CNX_ERROR_STP_RESOURCES_BUSY);
+ ts->resp = SAS_TASK_UNDELIVERED;
+ ts->stat = SAS_QUEUE_FULL;
+ pm8001_ccb_task_free_done(pm8001_ha, t, ccb, tag);
+ return;
+ }
+ break;
+ case IO_OPEN_CNX_ERROR_WRONG_DESTINATION:
+ PM8001_IO_DBG(pm8001_ha,
+ pm8001_printk("IO_OPEN_CNX_ERROR_WRONG_DESTINATION\n"));
+ ts->resp = SAS_TASK_COMPLETE;
+ ts->stat = SAS_OPEN_REJECT;
+ ts->open_rej_reason = SAS_OREJ_WRONG_DEST;
+ break;
+ case IO_XFER_ERROR_NAK_RECEIVED:
+ PM8001_IO_DBG(pm8001_ha,
+ pm8001_printk("IO_XFER_ERROR_NAK_RECEIVED\n"));
+ ts->resp = SAS_TASK_COMPLETE;
+ ts->stat = SAS_NAK_R_ERR;
+ break;
+ case IO_XFER_ERROR_ACK_NAK_TIMEOUT:
+ PM8001_IO_DBG(pm8001_ha,
+ pm8001_printk("IO_XFER_ERROR_ACK_NAK_TIMEOUT\n"));
+ ts->resp = SAS_TASK_COMPLETE;
+ ts->stat = SAS_NAK_R_ERR;
+ break;
+ case IO_XFER_ERROR_DMA:
+ PM8001_IO_DBG(pm8001_ha,
+ pm8001_printk("IO_XFER_ERROR_DMA\n"));
+ ts->resp = SAS_TASK_COMPLETE;
+ ts->stat = SAS_ABORTED_TASK;
+ break;
+ case IO_XFER_ERROR_SATA_LINK_TIMEOUT:
+ PM8001_IO_DBG(pm8001_ha,
+ pm8001_printk("IO_XFER_ERROR_SATA_LINK_TIMEOUT\n"));
+ ts->resp = SAS_TASK_UNDELIVERED;
+ ts->stat = SAS_DEV_NO_RESPONSE;
+ break;
+ case IO_XFER_ERROR_REJECTED_NCQ_MODE:
+ PM8001_IO_DBG(pm8001_ha,
+ pm8001_printk("IO_XFER_ERROR_REJECTED_NCQ_MODE\n"));
+ ts->resp = SAS_TASK_COMPLETE;
+ ts->stat = SAS_DATA_UNDERRUN;
+ break;
+ case IO_XFER_OPEN_RETRY_TIMEOUT:
+ PM8001_IO_DBG(pm8001_ha,
+ pm8001_printk("IO_XFER_OPEN_RETRY_TIMEOUT\n"));
+ ts->resp = SAS_TASK_COMPLETE;
+ ts->stat = SAS_OPEN_TO;
+ break;
+ case IO_PORT_IN_RESET:
+ PM8001_IO_DBG(pm8001_ha,
+ pm8001_printk("IO_PORT_IN_RESET\n"));
+ ts->resp = SAS_TASK_COMPLETE;
+ ts->stat = SAS_DEV_NO_RESPONSE;
+ break;
+ case IO_DS_NON_OPERATIONAL:
+ PM8001_IO_DBG(pm8001_ha,
+ pm8001_printk("IO_DS_NON_OPERATIONAL\n"));
+ ts->resp = SAS_TASK_COMPLETE;
+ ts->stat = SAS_DEV_NO_RESPONSE;
+ if (!t->uldd_task) {
+ pm8001_handle_event(pm8001_ha, pm8001_dev,
+ IO_DS_NON_OPERATIONAL);
+ ts->resp = SAS_TASK_UNDELIVERED;
+ ts->stat = SAS_QUEUE_FULL;
+ pm8001_ccb_task_free_done(pm8001_ha, t, ccb, tag);
+ return;
+ }
+ break;
+ case IO_DS_IN_RECOVERY:
+ PM8001_IO_DBG(pm8001_ha,
+ pm8001_printk("IO_DS_IN_RECOVERY\n"));
+ ts->resp = SAS_TASK_COMPLETE;
+ ts->stat = SAS_DEV_NO_RESPONSE;
+ break;
+ case IO_DS_IN_ERROR:
+ PM8001_IO_DBG(pm8001_ha,
+ pm8001_printk("IO_DS_IN_ERROR\n"));
+ ts->resp = SAS_TASK_COMPLETE;
+ ts->stat = SAS_DEV_NO_RESPONSE;
+ if (!t->uldd_task) {
+ pm8001_handle_event(pm8001_ha, pm8001_dev,
+ IO_DS_IN_ERROR);
+ ts->resp = SAS_TASK_UNDELIVERED;
+ ts->stat = SAS_QUEUE_FULL;
+ pm8001_ccb_task_free_done(pm8001_ha, t, ccb, tag);
+ return;
+ }
+ break;
+ case IO_OPEN_CNX_ERROR_HW_RESOURCE_BUSY:
+ PM8001_IO_DBG(pm8001_ha,
+ pm8001_printk("IO_OPEN_CNX_ERROR_HW_RESOURCE_BUSY\n"));
+ ts->resp = SAS_TASK_COMPLETE;
+ ts->stat = SAS_OPEN_REJECT;
+ ts->open_rej_reason = SAS_OREJ_RSVD_RETRY;
+ default:
+ PM8001_IO_DBG(pm8001_ha,
+ pm8001_printk("Unknown status 0x%x\n", status));
+ /* not allowed case. Therefore, return failed status */
+ ts->resp = SAS_TASK_COMPLETE;
+ ts->stat = SAS_DEV_NO_RESPONSE;
+ break;
+ }
+ spin_lock_irqsave(&t->task_state_lock, flags);
+ t->task_state_flags &= ~SAS_TASK_STATE_PENDING;
+ t->task_state_flags &= ~SAS_TASK_AT_INITIATOR;
+ t->task_state_flags |= SAS_TASK_STATE_DONE;
+ if (unlikely((t->task_state_flags & SAS_TASK_STATE_ABORTED))) {
+ spin_unlock_irqrestore(&t->task_state_lock, flags);
+ PM8001_FAIL_DBG(pm8001_ha,
+ pm8001_printk("task 0x%p done with io_status 0x%x"
+ " resp 0x%x stat 0x%x but aborted by upper layer!\n",
+ t, status, ts->resp, ts->stat));
+ pm8001_ccb_task_free(pm8001_ha, t, ccb, tag);
+ } else {
+ spin_unlock_irqrestore(&t->task_state_lock, flags);
+ pm8001_ccb_task_free_done(pm8001_ha, t, ccb, tag);
+ }
+}
+
+/*See the comments for mpi_ssp_completion */
+static void mpi_sata_event(struct pm8001_hba_info *pm8001_ha , void *piomb)
+{
+ struct sas_task *t;
+ struct task_status_struct *ts;
+ struct pm8001_ccb_info *ccb;
+ struct pm8001_device *pm8001_dev;
+ struct sata_event_resp *psataPayload =
+ (struct sata_event_resp *)(piomb + 4);
+ u32 event = le32_to_cpu(psataPayload->event);
+ u32 tag = le32_to_cpu(psataPayload->tag);
+ u32 port_id = le32_to_cpu(psataPayload->port_id);
+ u32 dev_id = le32_to_cpu(psataPayload->device_id);
+ unsigned long flags;
+
+ ccb = &pm8001_ha->ccb_info[tag];
+
+ if (ccb) {
+ t = ccb->task;
+ pm8001_dev = ccb->device;
+ } else {
+ PM8001_FAIL_DBG(pm8001_ha,
+ pm8001_printk("No CCB !!!. returning\n"));
+ return;
+ }
+ if (event)
+ PM8001_FAIL_DBG(pm8001_ha,
+ pm8001_printk("SATA EVENT 0x%x\n", event));
+
+ /* Check if this is NCQ error */
+ if (event == IO_XFER_ERROR_ABORTED_NCQ_MODE) {
+ /* find device using device id */
+ pm8001_dev = pm8001_find_dev(pm8001_ha, dev_id);
+ /* send read log extension */
+ if (pm8001_dev)
+ pm80xx_send_read_log(pm8001_ha, pm8001_dev);
+ return;
+ }
+
+ if (unlikely(!t || !t->lldd_task || !t->dev)) {
+ PM8001_FAIL_DBG(pm8001_ha,
+ pm8001_printk("task or dev null\n"));
+ return;
+ }
+
+ ts = &t->task_status;
+ PM8001_IO_DBG(pm8001_ha,
+ pm8001_printk("port_id:0x%x, tag:0x%x, event:0x%x\n",
+ port_id, tag, event));
+ switch (event) {
+ case IO_OVERFLOW:
+ PM8001_IO_DBG(pm8001_ha, pm8001_printk("IO_UNDERFLOW\n"));
+ ts->resp = SAS_TASK_COMPLETE;
+ ts->stat = SAS_DATA_OVERRUN;
+ ts->residual = 0;
+ if (pm8001_dev)
+ pm8001_dev->running_req--;
+ break;
+ case IO_XFER_ERROR_BREAK:
+ PM8001_IO_DBG(pm8001_ha,
+ pm8001_printk("IO_XFER_ERROR_BREAK\n"));
+ ts->resp = SAS_TASK_COMPLETE;
+ ts->stat = SAS_INTERRUPTED;
+ break;
+ case IO_XFER_ERROR_PHY_NOT_READY:
+ PM8001_IO_DBG(pm8001_ha,
+ pm8001_printk("IO_XFER_ERROR_PHY_NOT_READY\n"));
+ ts->resp = SAS_TASK_COMPLETE;
+ ts->stat = SAS_OPEN_REJECT;
+ ts->open_rej_reason = SAS_OREJ_RSVD_RETRY;
+ break;
+ case IO_OPEN_CNX_ERROR_PROTOCOL_NOT_SUPPORTED:
+ PM8001_IO_DBG(pm8001_ha, pm8001_printk(
+ "IO_OPEN_CNX_ERROR_PROTOCOL_NOT_SUPPORTED\n"));
+ ts->resp = SAS_TASK_COMPLETE;
+ ts->stat = SAS_OPEN_REJECT;
+ ts->open_rej_reason = SAS_OREJ_EPROTO;
+ break;
+ case IO_OPEN_CNX_ERROR_ZONE_VIOLATION:
+ PM8001_IO_DBG(pm8001_ha,
+ pm8001_printk("IO_OPEN_CNX_ERROR_ZONE_VIOLATION\n"));
+ ts->resp = SAS_TASK_COMPLETE;
+ ts->stat = SAS_OPEN_REJECT;
+ ts->open_rej_reason = SAS_OREJ_UNKNOWN;
+ break;
+ case IO_OPEN_CNX_ERROR_BREAK:
+ PM8001_IO_DBG(pm8001_ha,
+ pm8001_printk("IO_OPEN_CNX_ERROR_BREAK\n"));
+ ts->resp = SAS_TASK_COMPLETE;
+ ts->stat = SAS_OPEN_REJECT;
+ ts->open_rej_reason = SAS_OREJ_RSVD_CONT0;
+ break;
+ case IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS:
+ case IO_XFER_OPEN_RETRY_BACKOFF_THRESHOLD_REACHED:
+ case IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS_OPEN_TMO:
+ case IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS_NO_DEST:
+ case IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS_OPEN_COLLIDE:
+ case IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS_PATHWAY_BLOCKED:
+ PM8001_FAIL_DBG(pm8001_ha,
+ pm8001_printk("IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS\n"));
+ ts->resp = SAS_TASK_UNDELIVERED;
+ ts->stat = SAS_DEV_NO_RESPONSE;
+ if (!t->uldd_task) {
+ pm8001_handle_event(pm8001_ha,
+ pm8001_dev,
+ IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS);
+ ts->resp = SAS_TASK_COMPLETE;
+ ts->stat = SAS_QUEUE_FULL;
+ pm8001_ccb_task_free_done(pm8001_ha, t, ccb, tag);
+ return;
+ }
+ break;
+ case IO_OPEN_CNX_ERROR_BAD_DESTINATION:
+ PM8001_IO_DBG(pm8001_ha,
+ pm8001_printk("IO_OPEN_CNX_ERROR_BAD_DESTINATION\n"));
+ ts->resp = SAS_TASK_UNDELIVERED;
+ ts->stat = SAS_OPEN_REJECT;
+ ts->open_rej_reason = SAS_OREJ_BAD_DEST;
+ break;
+ case IO_OPEN_CNX_ERROR_CONNECTION_RATE_NOT_SUPPORTED:
+ PM8001_IO_DBG(pm8001_ha, pm8001_printk(
+ "IO_OPEN_CNX_ERROR_CONNECTION_RATE_NOT_SUPPORTED\n"));
+ ts->resp = SAS_TASK_COMPLETE;
+ ts->stat = SAS_OPEN_REJECT;
+ ts->open_rej_reason = SAS_OREJ_CONN_RATE;
+ break;
+ case IO_OPEN_CNX_ERROR_WRONG_DESTINATION:
+ PM8001_IO_DBG(pm8001_ha,
+ pm8001_printk("IO_OPEN_CNX_ERROR_WRONG_DESTINATION\n"));
+ ts->resp = SAS_TASK_COMPLETE;
+ ts->stat = SAS_OPEN_REJECT;
+ ts->open_rej_reason = SAS_OREJ_WRONG_DEST;
+ break;
+ case IO_XFER_ERROR_NAK_RECEIVED:
+ PM8001_IO_DBG(pm8001_ha,
+ pm8001_printk("IO_XFER_ERROR_NAK_RECEIVED\n"));
+ ts->resp = SAS_TASK_COMPLETE;
+ ts->stat = SAS_NAK_R_ERR;
+ break;
+ case IO_XFER_ERROR_PEER_ABORTED:
+ PM8001_IO_DBG(pm8001_ha,
+ pm8001_printk("IO_XFER_ERROR_PEER_ABORTED\n"));
+ ts->resp = SAS_TASK_COMPLETE;
+ ts->stat = SAS_NAK_R_ERR;
+ break;
+ case IO_XFER_ERROR_REJECTED_NCQ_MODE:
+ PM8001_IO_DBG(pm8001_ha,
+ pm8001_printk("IO_XFER_ERROR_REJECTED_NCQ_MODE\n"));
+ ts->resp = SAS_TASK_COMPLETE;
+ ts->stat = SAS_DATA_UNDERRUN;
+ break;
+ case IO_XFER_OPEN_RETRY_TIMEOUT:
+ PM8001_IO_DBG(pm8001_ha,
+ pm8001_printk("IO_XFER_OPEN_RETRY_TIMEOUT\n"));
+ ts->resp = SAS_TASK_COMPLETE;
+ ts->stat = SAS_OPEN_TO;
+ break;
+ case IO_XFER_ERROR_UNEXPECTED_PHASE:
+ PM8001_IO_DBG(pm8001_ha,
+ pm8001_printk("IO_XFER_ERROR_UNEXPECTED_PHASE\n"));
+ ts->resp = SAS_TASK_COMPLETE;
+ ts->stat = SAS_OPEN_TO;
+ break;
+ case IO_XFER_ERROR_XFER_RDY_OVERRUN:
+ PM8001_IO_DBG(pm8001_ha,
+ pm8001_printk("IO_XFER_ERROR_XFER_RDY_OVERRUN\n"));
+ ts->resp = SAS_TASK_COMPLETE;
+ ts->stat = SAS_OPEN_TO;
+ break;
+ case IO_XFER_ERROR_XFER_RDY_NOT_EXPECTED:
+ PM8001_IO_DBG(pm8001_ha,
+ pm8001_printk("IO_XFER_ERROR_XFER_RDY_NOT_EXPECTED\n"));
+ ts->resp = SAS_TASK_COMPLETE;
+ ts->stat = SAS_OPEN_TO;
+ break;
+ case IO_XFER_ERROR_OFFSET_MISMATCH:
+ PM8001_IO_DBG(pm8001_ha,
+ pm8001_printk("IO_XFER_ERROR_OFFSET_MISMATCH\n"));
+ ts->resp = SAS_TASK_COMPLETE;
+ ts->stat = SAS_OPEN_TO;
+ break;
+ case IO_XFER_ERROR_XFER_ZERO_DATA_LEN:
+ PM8001_IO_DBG(pm8001_ha,
+ pm8001_printk("IO_XFER_ERROR_XFER_ZERO_DATA_LEN\n"));
+ ts->resp = SAS_TASK_COMPLETE;
+ ts->stat = SAS_OPEN_TO;
+ break;
+ case IO_XFER_CMD_FRAME_ISSUED:
+ PM8001_IO_DBG(pm8001_ha,
+ pm8001_printk("IO_XFER_CMD_FRAME_ISSUED\n"));
+ break;
+ case IO_XFER_PIO_SETUP_ERROR:
+ PM8001_IO_DBG(pm8001_ha,
+ pm8001_printk("IO_XFER_PIO_SETUP_ERROR\n"));
+ ts->resp = SAS_TASK_COMPLETE;
+ ts->stat = SAS_OPEN_TO;
+ break;
+ case IO_XFER_ERROR_INTERNAL_CRC_ERROR:
+ PM8001_FAIL_DBG(pm8001_ha,
+ pm8001_printk("IO_XFR_ERROR_INTERNAL_CRC_ERROR\n"));
+ /* TBC: used default set values */
+ ts->resp = SAS_TASK_COMPLETE;
+ ts->stat = SAS_OPEN_TO;
+ break;
+ case IO_XFER_DMA_ACTIVATE_TIMEOUT:
+ PM8001_FAIL_DBG(pm8001_ha,
+ pm8001_printk("IO_XFR_DMA_ACTIVATE_TIMEOUT\n"));
+ /* TBC: used default set values */
+ ts->resp = SAS_TASK_COMPLETE;
+ ts->stat = SAS_OPEN_TO;
+ break;
+ default:
+ PM8001_IO_DBG(pm8001_ha,
+ pm8001_printk("Unknown status 0x%x\n", event));
+ /* not allowed case. Therefore, return failed status */
+ ts->resp = SAS_TASK_COMPLETE;
+ ts->stat = SAS_OPEN_TO;
+ break;
+ }
+ spin_lock_irqsave(&t->task_state_lock, flags);
+ t->task_state_flags &= ~SAS_TASK_STATE_PENDING;
+ t->task_state_flags &= ~SAS_TASK_AT_INITIATOR;
+ t->task_state_flags |= SAS_TASK_STATE_DONE;
+ if (unlikely((t->task_state_flags & SAS_TASK_STATE_ABORTED))) {
+ spin_unlock_irqrestore(&t->task_state_lock, flags);
+ PM8001_FAIL_DBG(pm8001_ha,
+ pm8001_printk("task 0x%p done with io_status 0x%x"
+ " resp 0x%x stat 0x%x but aborted by upper layer!\n",
+ t, event, ts->resp, ts->stat));
+ pm8001_ccb_task_free(pm8001_ha, t, ccb, tag);
+ } else {
+ spin_unlock_irqrestore(&t->task_state_lock, flags);
+ pm8001_ccb_task_free_done(pm8001_ha, t, ccb, tag);
+ }
+}
+
+/*See the comments for mpi_ssp_completion */
+static void
+mpi_smp_completion(struct pm8001_hba_info *pm8001_ha, void *piomb)
+{
+ u32 param, i;
+ struct sas_task *t;
+ struct pm8001_ccb_info *ccb;
+ unsigned long flags;
+ u32 status;
+ u32 tag;
+ struct smp_completion_resp *psmpPayload;
+ struct task_status_struct *ts;
+ struct pm8001_device *pm8001_dev;
+ char *pdma_respaddr = NULL;
+
+ psmpPayload = (struct smp_completion_resp *)(piomb + 4);
+ status = le32_to_cpu(psmpPayload->status);
+ tag = le32_to_cpu(psmpPayload->tag);
+
+ ccb = &pm8001_ha->ccb_info[tag];
+ param = le32_to_cpu(psmpPayload->param);
+ t = ccb->task;
+ ts = &t->task_status;
+ pm8001_dev = ccb->device;
+ if (status)
+ PM8001_FAIL_DBG(pm8001_ha,
+ pm8001_printk("smp IO status 0x%x\n", status));
+ if (unlikely(!t || !t->lldd_task || !t->dev))
+ return;
+
+ switch (status) {
+
+ case IO_SUCCESS:
+ PM8001_IO_DBG(pm8001_ha, pm8001_printk("IO_SUCCESS\n"));
+ ts->resp = SAS_TASK_COMPLETE;
+ ts->stat = SAM_STAT_GOOD;
+ if (pm8001_dev)
+ pm8001_dev->running_req--;
+ if (pm8001_ha->smp_exp_mode == SMP_DIRECT) {
+ PM8001_IO_DBG(pm8001_ha,
+ pm8001_printk("DIRECT RESPONSE Length:%d\n",
+ param));
+ pdma_respaddr = (char *)(phys_to_virt(cpu_to_le64
+ ((u64)sg_dma_address
+ (&t->smp_task.smp_resp))));
+ for (i = 0; i < param; i++) {
+ *(pdma_respaddr+i) = psmpPayload->_r_a[i];
+ PM8001_IO_DBG(pm8001_ha, pm8001_printk(
+ "SMP Byte%d DMA data 0x%x psmp 0x%x\n",
+ i, *(pdma_respaddr+i),
+ psmpPayload->_r_a[i]));
+ }
+ }
+ break;
+ case IO_ABORTED:
+ PM8001_IO_DBG(pm8001_ha,
+ pm8001_printk("IO_ABORTED IOMB\n"));
+ ts->resp = SAS_TASK_COMPLETE;
+ ts->stat = SAS_ABORTED_TASK;
+ if (pm8001_dev)
+ pm8001_dev->running_req--;
+ break;
+ case IO_OVERFLOW:
+ PM8001_IO_DBG(pm8001_ha, pm8001_printk("IO_UNDERFLOW\n"));
+ ts->resp = SAS_TASK_COMPLETE;
+ ts->stat = SAS_DATA_OVERRUN;
+ ts->residual = 0;
+ if (pm8001_dev)
+ pm8001_dev->running_req--;
+ break;
+ case IO_NO_DEVICE:
+ PM8001_IO_DBG(pm8001_ha, pm8001_printk("IO_NO_DEVICE\n"));
+ ts->resp = SAS_TASK_COMPLETE;
+ ts->stat = SAS_PHY_DOWN;
+ break;
+ case IO_ERROR_HW_TIMEOUT:
+ PM8001_IO_DBG(pm8001_ha,
+ pm8001_printk("IO_ERROR_HW_TIMEOUT\n"));
+ ts->resp = SAS_TASK_COMPLETE;
+ ts->stat = SAM_STAT_BUSY;
+ break;
+ case IO_XFER_ERROR_BREAK:
+ PM8001_IO_DBG(pm8001_ha,
+ pm8001_printk("IO_XFER_ERROR_BREAK\n"));
+ ts->resp = SAS_TASK_COMPLETE;
+ ts->stat = SAM_STAT_BUSY;
+ break;
+ case IO_XFER_ERROR_PHY_NOT_READY:
+ PM8001_IO_DBG(pm8001_ha,
+ pm8001_printk("IO_XFER_ERROR_PHY_NOT_READY\n"));
+ ts->resp = SAS_TASK_COMPLETE;
+ ts->stat = SAM_STAT_BUSY;
+ break;
+ case IO_OPEN_CNX_ERROR_PROTOCOL_NOT_SUPPORTED:
+ PM8001_IO_DBG(pm8001_ha,
+ pm8001_printk("IO_OPEN_CNX_ERROR_PROTOCOL_NOT_SUPPORTED\n"));
+ ts->resp = SAS_TASK_COMPLETE;
+ ts->stat = SAS_OPEN_REJECT;
+ ts->open_rej_reason = SAS_OREJ_UNKNOWN;
+ break;
+ case IO_OPEN_CNX_ERROR_ZONE_VIOLATION:
+ PM8001_IO_DBG(pm8001_ha,
+ pm8001_printk("IO_OPEN_CNX_ERROR_ZONE_VIOLATION\n"));
+ ts->resp = SAS_TASK_COMPLETE;
+ ts->stat = SAS_OPEN_REJECT;
+ ts->open_rej_reason = SAS_OREJ_UNKNOWN;
+ break;
+ case IO_OPEN_CNX_ERROR_BREAK:
+ PM8001_IO_DBG(pm8001_ha,
+ pm8001_printk("IO_OPEN_CNX_ERROR_BREAK\n"));
+ ts->resp = SAS_TASK_COMPLETE;
+ ts->stat = SAS_OPEN_REJECT;
+ ts->open_rej_reason = SAS_OREJ_RSVD_CONT0;
+ break;
+ case IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS:
+ case IO_XFER_OPEN_RETRY_BACKOFF_THRESHOLD_REACHED:
+ case IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS_OPEN_TMO:
+ case IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS_NO_DEST:
+ case IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS_OPEN_COLLIDE:
+ case IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS_PATHWAY_BLOCKED:
+ PM8001_IO_DBG(pm8001_ha,
+ pm8001_printk("IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS\n"));
+ ts->resp = SAS_TASK_COMPLETE;
+ ts->stat = SAS_OPEN_REJECT;
+ ts->open_rej_reason = SAS_OREJ_UNKNOWN;
+ pm8001_handle_event(pm8001_ha,
+ pm8001_dev,
+ IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS);
+ break;
+ case IO_OPEN_CNX_ERROR_BAD_DESTINATION:
+ PM8001_IO_DBG(pm8001_ha,
+ pm8001_printk("IO_OPEN_CNX_ERROR_BAD_DESTINATION\n"));
+ ts->resp = SAS_TASK_COMPLETE;
+ ts->stat = SAS_OPEN_REJECT;
+ ts->open_rej_reason = SAS_OREJ_BAD_DEST;
+ break;
+ case IO_OPEN_CNX_ERROR_CONNECTION_RATE_NOT_SUPPORTED:
+ PM8001_IO_DBG(pm8001_ha, pm8001_printk(\
+ "IO_OPEN_CNX_ERROR_CONNECTION_RATE_NOT_SUPPORTED\n"));
+ ts->resp = SAS_TASK_COMPLETE;
+ ts->stat = SAS_OPEN_REJECT;
+ ts->open_rej_reason = SAS_OREJ_CONN_RATE;
+ break;
+ case IO_OPEN_CNX_ERROR_WRONG_DESTINATION:
+ PM8001_IO_DBG(pm8001_ha,
+ pm8001_printk("IO_OPEN_CNX_ERROR_WRONG_DESTINATION\n"));
+ ts->resp = SAS_TASK_COMPLETE;
+ ts->stat = SAS_OPEN_REJECT;
+ ts->open_rej_reason = SAS_OREJ_WRONG_DEST;
+ break;
+ case IO_XFER_ERROR_RX_FRAME:
+ PM8001_IO_DBG(pm8001_ha,
+ pm8001_printk("IO_XFER_ERROR_RX_FRAME\n"));
+ ts->resp = SAS_TASK_COMPLETE;
+ ts->stat = SAS_DEV_NO_RESPONSE;
+ break;
+ case IO_XFER_OPEN_RETRY_TIMEOUT:
+ PM8001_IO_DBG(pm8001_ha,
+ pm8001_printk("IO_XFER_OPEN_RETRY_TIMEOUT\n"));
+ ts->resp = SAS_TASK_COMPLETE;
+ ts->stat = SAS_OPEN_REJECT;
+ ts->open_rej_reason = SAS_OREJ_RSVD_RETRY;
+ break;
+ case IO_ERROR_INTERNAL_SMP_RESOURCE:
+ PM8001_IO_DBG(pm8001_ha,
+ pm8001_printk("IO_ERROR_INTERNAL_SMP_RESOURCE\n"));
+ ts->resp = SAS_TASK_COMPLETE;
+ ts->stat = SAS_QUEUE_FULL;
+ break;
+ case IO_PORT_IN_RESET:
+ PM8001_IO_DBG(pm8001_ha,
+ pm8001_printk("IO_PORT_IN_RESET\n"));
+ ts->resp = SAS_TASK_COMPLETE;
+ ts->stat = SAS_OPEN_REJECT;
+ ts->open_rej_reason = SAS_OREJ_RSVD_RETRY;
+ break;
+ case IO_DS_NON_OPERATIONAL:
+ PM8001_IO_DBG(pm8001_ha,
+ pm8001_printk("IO_DS_NON_OPERATIONAL\n"));
+ ts->resp = SAS_TASK_COMPLETE;
+ ts->stat = SAS_DEV_NO_RESPONSE;
+ break;
+ case IO_DS_IN_RECOVERY:
+ PM8001_IO_DBG(pm8001_ha,
+ pm8001_printk("IO_DS_IN_RECOVERY\n"));
+ ts->resp = SAS_TASK_COMPLETE;
+ ts->stat = SAS_OPEN_REJECT;
+ ts->open_rej_reason = SAS_OREJ_RSVD_RETRY;
+ break;
+ case IO_OPEN_CNX_ERROR_HW_RESOURCE_BUSY:
+ PM8001_IO_DBG(pm8001_ha,
+ pm8001_printk("IO_OPEN_CNX_ERROR_HW_RESOURCE_BUSY\n"));
+ ts->resp = SAS_TASK_COMPLETE;
+ ts->stat = SAS_OPEN_REJECT;
+ ts->open_rej_reason = SAS_OREJ_RSVD_RETRY;
+ break;
+ default:
+ PM8001_IO_DBG(pm8001_ha,
+ pm8001_printk("Unknown status 0x%x\n", status));
+ ts->resp = SAS_TASK_COMPLETE;
+ ts->stat = SAS_DEV_NO_RESPONSE;
+ /* not allowed case. Therefore, return failed status */
+ break;
+ }
+ spin_lock_irqsave(&t->task_state_lock, flags);
+ t->task_state_flags &= ~SAS_TASK_STATE_PENDING;
+ t->task_state_flags &= ~SAS_TASK_AT_INITIATOR;
+ t->task_state_flags |= SAS_TASK_STATE_DONE;
+ if (unlikely((t->task_state_flags & SAS_TASK_STATE_ABORTED))) {
+ spin_unlock_irqrestore(&t->task_state_lock, flags);
+ PM8001_FAIL_DBG(pm8001_ha, pm8001_printk(
+ "task 0x%p done with io_status 0x%x resp 0x%x"
+ "stat 0x%x but aborted by upper layer!\n",
+ t, status, ts->resp, ts->stat));
+ pm8001_ccb_task_free(pm8001_ha, t, ccb, tag);
+ } else {
+ spin_unlock_irqrestore(&t->task_state_lock, flags);
+ pm8001_ccb_task_free(pm8001_ha, t, ccb, tag);
+ mb();/* in order to force CPU ordering */
+ t->task_done(t);
+ }
+}
+
+/**
+ * pm80xx_hw_event_ack_req- For PM8001,some events need to acknowage to FW.
+ * @pm8001_ha: our hba card information
+ * @Qnum: the outbound queue message number.
+ * @SEA: source of event to ack
+ * @port_id: port id.
+ * @phyId: phy id.
+ * @param0: parameter 0.
+ * @param1: parameter 1.
+ */
+static void pm80xx_hw_event_ack_req(struct pm8001_hba_info *pm8001_ha,
+ u32 Qnum, u32 SEA, u32 port_id, u32 phyId, u32 param0, u32 param1)
+{
+ struct hw_event_ack_req payload;
+ u32 opc = OPC_INB_SAS_HW_EVENT_ACK;
+
+ struct inbound_queue_table *circularQ;
+
+ memset((u8 *)&payload, 0, sizeof(payload));
+ circularQ = &pm8001_ha->inbnd_q_tbl[Qnum];
+ payload.tag = cpu_to_le32(1);
+ payload.phyid_sea_portid = cpu_to_le32(((SEA & 0xFFFF) << 8) |
+ ((phyId & 0xFF) << 24) | (port_id & 0xFF));
+ payload.param0 = cpu_to_le32(param0);
+ payload.param1 = cpu_to_le32(param1);
+ pm8001_mpi_build_cmd(pm8001_ha, circularQ, opc, &payload, 0);
+}
+
+static int pm80xx_chip_phy_ctl_req(struct pm8001_hba_info *pm8001_ha,
+ u32 phyId, u32 phy_op);
+
+/**
+ * hw_event_sas_phy_up -FW tells me a SAS phy up event.
+ * @pm8001_ha: our hba card information
+ * @piomb: IO message buffer
+ */
+static void
+hw_event_sas_phy_up(struct pm8001_hba_info *pm8001_ha, void *piomb)
+{
+ struct hw_event_resp *pPayload =
+ (struct hw_event_resp *)(piomb + 4);
+ u32 lr_status_evt_portid =
+ le32_to_cpu(pPayload->lr_status_evt_portid);
+ u32 phyid_npip_portstate = le32_to_cpu(pPayload->phyid_npip_portstate);
+
+ u8 link_rate =
+ (u8)((lr_status_evt_portid & 0xF0000000) >> 28);
+ u8 port_id = (u8)(lr_status_evt_portid & 0x000000FF);
+ u8 phy_id =
+ (u8)((phyid_npip_portstate & 0xFF0000) >> 16);
+ u8 portstate = (u8)(phyid_npip_portstate & 0x0000000F);
+
+ struct pm8001_port *port = &pm8001_ha->port[port_id];
+ struct sas_ha_struct *sas_ha = pm8001_ha->sas;
+ struct pm8001_phy *phy = &pm8001_ha->phy[phy_id];
+ unsigned long flags;
+ u8 deviceType = pPayload->sas_identify.dev_type;
+ port->port_state = portstate;
+ phy->phy_state = PHY_STATE_LINK_UP_SPCV;
+ PM8001_MSG_DBG(pm8001_ha, pm8001_printk(
+ "portid:%d; phyid:%d; linkrate:%d; "
+ "portstate:%x; devicetype:%x\n",
+ port_id, phy_id, link_rate, portstate, deviceType));
+
+ switch (deviceType) {
+ case SAS_PHY_UNUSED:
+ PM8001_MSG_DBG(pm8001_ha,
+ pm8001_printk("device type no device.\n"));
+ break;
+ case SAS_END_DEVICE:
+ PM8001_MSG_DBG(pm8001_ha, pm8001_printk("end device.\n"));
+ pm80xx_chip_phy_ctl_req(pm8001_ha, phy_id,
+ PHY_NOTIFY_ENABLE_SPINUP);
+ port->port_attached = 1;
+ pm8001_get_lrate_mode(phy, link_rate);
+ break;
+ case SAS_EDGE_EXPANDER_DEVICE:
+ PM8001_MSG_DBG(pm8001_ha,
+ pm8001_printk("expander device.\n"));
+ port->port_attached = 1;
+ pm8001_get_lrate_mode(phy, link_rate);
+ break;
+ case SAS_FANOUT_EXPANDER_DEVICE:
+ PM8001_MSG_DBG(pm8001_ha,
+ pm8001_printk("fanout expander device.\n"));
+ port->port_attached = 1;
+ pm8001_get_lrate_mode(phy, link_rate);
+ break;
+ default:
+ PM8001_MSG_DBG(pm8001_ha,
+ pm8001_printk("unknown device type(%x)\n", deviceType));
+ break;
+ }
+ phy->phy_type |= PORT_TYPE_SAS;
+ phy->identify.device_type = deviceType;
+ phy->phy_attached = 1;
+ if (phy->identify.device_type == SAS_END_DEVICE)
+ phy->identify.target_port_protocols = SAS_PROTOCOL_SSP;
+ else if (phy->identify.device_type != SAS_PHY_UNUSED)
+ phy->identify.target_port_protocols = SAS_PROTOCOL_SMP;
+ phy->sas_phy.oob_mode = SAS_OOB_MODE;
+ sas_ha->notify_phy_event(&phy->sas_phy, PHYE_OOB_DONE);
+ spin_lock_irqsave(&phy->sas_phy.frame_rcvd_lock, flags);
+ memcpy(phy->frame_rcvd, &pPayload->sas_identify,
+ sizeof(struct sas_identify_frame)-4);
+ phy->frame_rcvd_size = sizeof(struct sas_identify_frame) - 4;
+ pm8001_get_attached_sas_addr(phy, phy->sas_phy.attached_sas_addr);
+ spin_unlock_irqrestore(&phy->sas_phy.frame_rcvd_lock, flags);
+ if (pm8001_ha->flags == PM8001F_RUN_TIME)
+ mdelay(200);/*delay a moment to wait disk to spinup*/
+ pm8001_bytes_dmaed(pm8001_ha, phy_id);
+}
+
+/**
+ * hw_event_sata_phy_up -FW tells me a SATA phy up event.
+ * @pm8001_ha: our hba card information
+ * @piomb: IO message buffer
+ */
+static void
+hw_event_sata_phy_up(struct pm8001_hba_info *pm8001_ha, void *piomb)
+{
+ struct hw_event_resp *pPayload =
+ (struct hw_event_resp *)(piomb + 4);
+ u32 phyid_npip_portstate = le32_to_cpu(pPayload->phyid_npip_portstate);
+ u32 lr_status_evt_portid =
+ le32_to_cpu(pPayload->lr_status_evt_portid);
+ u8 link_rate =
+ (u8)((lr_status_evt_portid & 0xF0000000) >> 28);
+ u8 port_id = (u8)(lr_status_evt_portid & 0x000000FF);
+ u8 phy_id =
+ (u8)((phyid_npip_portstate & 0xFF0000) >> 16);
+
+ u8 portstate = (u8)(phyid_npip_portstate & 0x0000000F);
+
+ struct pm8001_port *port = &pm8001_ha->port[port_id];
+ struct sas_ha_struct *sas_ha = pm8001_ha->sas;
+ struct pm8001_phy *phy = &pm8001_ha->phy[phy_id];
+ unsigned long flags;
+ PM8001_MSG_DBG(pm8001_ha, pm8001_printk(
+ "port id %d, phy id %d link_rate %d portstate 0x%x\n",
+ port_id, phy_id, link_rate, portstate));
+
+ port->port_state = portstate;
+ phy->phy_state = PHY_STATE_LINK_UP_SPCV;
+ port->port_attached = 1;
+ pm8001_get_lrate_mode(phy, link_rate);
+ phy->phy_type |= PORT_TYPE_SATA;
+ phy->phy_attached = 1;
+ phy->sas_phy.oob_mode = SATA_OOB_MODE;
+ sas_ha->notify_phy_event(&phy->sas_phy, PHYE_OOB_DONE);
+ spin_lock_irqsave(&phy->sas_phy.frame_rcvd_lock, flags);
+ memcpy(phy->frame_rcvd, ((u8 *)&pPayload->sata_fis - 4),
+ sizeof(struct dev_to_host_fis));
+ phy->frame_rcvd_size = sizeof(struct dev_to_host_fis);
+ phy->identify.target_port_protocols = SAS_PROTOCOL_SATA;
+ phy->identify.device_type = SAS_SATA_DEV;
+ pm8001_get_attached_sas_addr(phy, phy->sas_phy.attached_sas_addr);
+ spin_unlock_irqrestore(&phy->sas_phy.frame_rcvd_lock, flags);
+ pm8001_bytes_dmaed(pm8001_ha, phy_id);
+}
+
+/**
+ * hw_event_phy_down -we should notify the libsas the phy is down.
+ * @pm8001_ha: our hba card information
+ * @piomb: IO message buffer
+ */
+static void
+hw_event_phy_down(struct pm8001_hba_info *pm8001_ha, void *piomb)
+{
+ struct hw_event_resp *pPayload =
+ (struct hw_event_resp *)(piomb + 4);
+
+ u32 lr_status_evt_portid =
+ le32_to_cpu(pPayload->lr_status_evt_portid);
+ u8 port_id = (u8)(lr_status_evt_portid & 0x000000FF);
+ u32 phyid_npip_portstate = le32_to_cpu(pPayload->phyid_npip_portstate);
+ u8 phy_id =
+ (u8)((phyid_npip_portstate & 0xFF0000) >> 16);
+ u8 portstate = (u8)(phyid_npip_portstate & 0x0000000F);
+
+ struct pm8001_port *port = &pm8001_ha->port[port_id];
+ struct pm8001_phy *phy = &pm8001_ha->phy[phy_id];
+ port->port_state = portstate;
+ phy->phy_type = 0;
+ phy->identify.device_type = 0;
+ phy->phy_attached = 0;
+ memset(&phy->dev_sas_addr, 0, SAS_ADDR_SIZE);
+ switch (portstate) {
+ case PORT_VALID:
+ break;
+ case PORT_INVALID:
+ PM8001_MSG_DBG(pm8001_ha,
+ pm8001_printk(" PortInvalid portID %d\n", port_id));
+ PM8001_MSG_DBG(pm8001_ha,
+ pm8001_printk(" Last phy Down and port invalid\n"));
+ port->port_attached = 0;
+ pm80xx_hw_event_ack_req(pm8001_ha, 0, HW_EVENT_PHY_DOWN,
+ port_id, phy_id, 0, 0);
+ break;
+ case PORT_IN_RESET:
+ PM8001_MSG_DBG(pm8001_ha,
+ pm8001_printk(" Port In Reset portID %d\n", port_id));
+ break;
+ case PORT_NOT_ESTABLISHED:
+ PM8001_MSG_DBG(pm8001_ha,
+ pm8001_printk(" phy Down and PORT_NOT_ESTABLISHED\n"));
+ port->port_attached = 0;
+ break;
+ case PORT_LOSTCOMM:
+ PM8001_MSG_DBG(pm8001_ha,
+ pm8001_printk(" phy Down and PORT_LOSTCOMM\n"));
+ PM8001_MSG_DBG(pm8001_ha,
+ pm8001_printk(" Last phy Down and port invalid\n"));
+ port->port_attached = 0;
+ pm80xx_hw_event_ack_req(pm8001_ha, 0, HW_EVENT_PHY_DOWN,
+ port_id, phy_id, 0, 0);
+ break;
+ default:
+ port->port_attached = 0;
+ PM8001_MSG_DBG(pm8001_ha,
+ pm8001_printk(" phy Down and(default) = 0x%x\n",
+ portstate));
+ break;
+
+ }
+}
+
+static int mpi_phy_start_resp(struct pm8001_hba_info *pm8001_ha, void *piomb)
+{
+ struct phy_start_resp *pPayload =
+ (struct phy_start_resp *)(piomb + 4);
+ u32 status =
+ le32_to_cpu(pPayload->status);
+ u32 phy_id =
+ le32_to_cpu(pPayload->phyid);
+ struct pm8001_phy *phy = &pm8001_ha->phy[phy_id];
+
+ PM8001_INIT_DBG(pm8001_ha,
+ pm8001_printk("phy start resp status:0x%x, phyid:0x%x\n",
+ status, phy_id));
+ if (status == 0) {
+ phy->phy_state = 1;
+ if (pm8001_ha->flags == PM8001F_RUN_TIME)
+ complete(phy->enable_completion);
+ }
+ return 0;
+
+}
+
+/**
+ * mpi_thermal_hw_event -The hw event has come.
+ * @pm8001_ha: our hba card information
+ * @piomb: IO message buffer
+ */
+static int mpi_thermal_hw_event(struct pm8001_hba_info *pm8001_ha, void *piomb)
+{
+ struct thermal_hw_event *pPayload =
+ (struct thermal_hw_event *)(piomb + 4);
+
+ u32 thermal_event = le32_to_cpu(pPayload->thermal_event);
+ u32 rht_lht = le32_to_cpu(pPayload->rht_lht);
+
+ if (thermal_event & 0x40) {
+ PM8001_IO_DBG(pm8001_ha, pm8001_printk(
+ "Thermal Event: Local high temperature violated!\n"));
+ PM8001_IO_DBG(pm8001_ha, pm8001_printk(
+ "Thermal Event: Measured local high temperature %d\n",
+ ((rht_lht & 0xFF00) >> 8)));
+ }
+ if (thermal_event & 0x10) {
+ PM8001_IO_DBG(pm8001_ha, pm8001_printk(
+ "Thermal Event: Remote high temperature violated!\n"));
+ PM8001_IO_DBG(pm8001_ha, pm8001_printk(
+ "Thermal Event: Measured remote high temperature %d\n",
+ ((rht_lht & 0xFF000000) >> 24)));
+ }
+ return 0;
+}
+
+/**
+ * mpi_hw_event -The hw event has come.
+ * @pm8001_ha: our hba card information
+ * @piomb: IO message buffer
+ */
+static int mpi_hw_event(struct pm8001_hba_info *pm8001_ha, void *piomb)
+{
+ unsigned long flags;
+ struct hw_event_resp *pPayload =
+ (struct hw_event_resp *)(piomb + 4);
+ u32 lr_status_evt_portid =
+ le32_to_cpu(pPayload->lr_status_evt_portid);
+ u32 phyid_npip_portstate = le32_to_cpu(pPayload->phyid_npip_portstate);
+ u8 port_id = (u8)(lr_status_evt_portid & 0x000000FF);
+ u8 phy_id =
+ (u8)((phyid_npip_portstate & 0xFF0000) >> 16);
+ u16 eventType =
+ (u16)((lr_status_evt_portid & 0x00FFFF00) >> 8);
+ u8 status =
+ (u8)((lr_status_evt_portid & 0x0F000000) >> 24);
+
+ struct sas_ha_struct *sas_ha = pm8001_ha->sas;
+ struct pm8001_phy *phy = &pm8001_ha->phy[phy_id];
+ struct asd_sas_phy *sas_phy = sas_ha->sas_phy[phy_id];
+ PM8001_MSG_DBG(pm8001_ha,
+ pm8001_printk("portid:%d phyid:%d event:0x%x status:0x%x\n",
+ port_id, phy_id, eventType, status));
+
+ switch (eventType) {
+
+ case HW_EVENT_SAS_PHY_UP:
+ PM8001_MSG_DBG(pm8001_ha,
+ pm8001_printk("HW_EVENT_PHY_START_STATUS\n"));
+ hw_event_sas_phy_up(pm8001_ha, piomb);
+ break;
+ case HW_EVENT_SATA_PHY_UP:
+ PM8001_MSG_DBG(pm8001_ha,
+ pm8001_printk("HW_EVENT_SATA_PHY_UP\n"));
+ hw_event_sata_phy_up(pm8001_ha, piomb);
+ break;
+ case HW_EVENT_SATA_SPINUP_HOLD:
+ PM8001_MSG_DBG(pm8001_ha,
+ pm8001_printk("HW_EVENT_SATA_SPINUP_HOLD\n"));
+ sas_ha->notify_phy_event(&phy->sas_phy, PHYE_SPINUP_HOLD);
+ break;
+ case HW_EVENT_PHY_DOWN:
+ PM8001_MSG_DBG(pm8001_ha,
+ pm8001_printk("HW_EVENT_PHY_DOWN\n"));
+ sas_ha->notify_phy_event(&phy->sas_phy, PHYE_LOSS_OF_SIGNAL);
+ phy->phy_attached = 0;
+ phy->phy_state = 0;
+ hw_event_phy_down(pm8001_ha, piomb);
+ break;
+ case HW_EVENT_PORT_INVALID:
+ PM8001_MSG_DBG(pm8001_ha,
+ pm8001_printk("HW_EVENT_PORT_INVALID\n"));
+ sas_phy_disconnected(sas_phy);
+ phy->phy_attached = 0;
+ sas_ha->notify_port_event(sas_phy, PORTE_LINK_RESET_ERR);
+ break;
+ /* the broadcast change primitive received, tell the LIBSAS this event
+ to revalidate the sas domain*/
+ case HW_EVENT_BROADCAST_CHANGE:
+ PM8001_MSG_DBG(pm8001_ha,
+ pm8001_printk("HW_EVENT_BROADCAST_CHANGE\n"));
+ pm80xx_hw_event_ack_req(pm8001_ha, 0, HW_EVENT_BROADCAST_CHANGE,
+ port_id, phy_id, 1, 0);
+ spin_lock_irqsave(&sas_phy->sas_prim_lock, flags);
+ sas_phy->sas_prim = HW_EVENT_BROADCAST_CHANGE;
+ spin_unlock_irqrestore(&sas_phy->sas_prim_lock, flags);
+ sas_ha->notify_port_event(sas_phy, PORTE_BROADCAST_RCVD);
+ break;
+ case HW_EVENT_PHY_ERROR:
+ PM8001_MSG_DBG(pm8001_ha,
+ pm8001_printk("HW_EVENT_PHY_ERROR\n"));
+ sas_phy_disconnected(&phy->sas_phy);
+ phy->phy_attached = 0;
+ sas_ha->notify_phy_event(&phy->sas_phy, PHYE_OOB_ERROR);
+ break;
+ case HW_EVENT_BROADCAST_EXP:
+ PM8001_MSG_DBG(pm8001_ha,
+ pm8001_printk("HW_EVENT_BROADCAST_EXP\n"));
+ spin_lock_irqsave(&sas_phy->sas_prim_lock, flags);
+ sas_phy->sas_prim = HW_EVENT_BROADCAST_EXP;
+ spin_unlock_irqrestore(&sas_phy->sas_prim_lock, flags);
+ sas_ha->notify_port_event(sas_phy, PORTE_BROADCAST_RCVD);
+ break;
+ case HW_EVENT_LINK_ERR_INVALID_DWORD:
+ PM8001_MSG_DBG(pm8001_ha,
+ pm8001_printk("HW_EVENT_LINK_ERR_INVALID_DWORD\n"));
+ pm80xx_hw_event_ack_req(pm8001_ha, 0,
+ HW_EVENT_LINK_ERR_INVALID_DWORD, port_id, phy_id, 0, 0);
+ sas_phy_disconnected(sas_phy);
+ phy->phy_attached = 0;
+ sas_ha->notify_port_event(sas_phy, PORTE_LINK_RESET_ERR);
+ break;
+ case HW_EVENT_LINK_ERR_DISPARITY_ERROR:
+ PM8001_MSG_DBG(pm8001_ha,
+ pm8001_printk("HW_EVENT_LINK_ERR_DISPARITY_ERROR\n"));
+ pm80xx_hw_event_ack_req(pm8001_ha, 0,
+ HW_EVENT_LINK_ERR_DISPARITY_ERROR,
+ port_id, phy_id, 0, 0);
+ sas_phy_disconnected(sas_phy);
+ phy->phy_attached = 0;
+ sas_ha->notify_port_event(sas_phy, PORTE_LINK_RESET_ERR);
+ break;
+ case HW_EVENT_LINK_ERR_CODE_VIOLATION:
+ PM8001_MSG_DBG(pm8001_ha,
+ pm8001_printk("HW_EVENT_LINK_ERR_CODE_VIOLATION\n"));
+ pm80xx_hw_event_ack_req(pm8001_ha, 0,
+ HW_EVENT_LINK_ERR_CODE_VIOLATION,
+ port_id, phy_id, 0, 0);
+ sas_phy_disconnected(sas_phy);
+ phy->phy_attached = 0;
+ sas_ha->notify_port_event(sas_phy, PORTE_LINK_RESET_ERR);
+ break;
+ case HW_EVENT_LINK_ERR_LOSS_OF_DWORD_SYNCH:
+ PM8001_MSG_DBG(pm8001_ha, pm8001_printk(
+ "HW_EVENT_LINK_ERR_LOSS_OF_DWORD_SYNCH\n"));
+ pm80xx_hw_event_ack_req(pm8001_ha, 0,
+ HW_EVENT_LINK_ERR_LOSS_OF_DWORD_SYNCH,
+ port_id, phy_id, 0, 0);
+ sas_phy_disconnected(sas_phy);
+ phy->phy_attached = 0;
+ sas_ha->notify_port_event(sas_phy, PORTE_LINK_RESET_ERR);
+ break;
+ case HW_EVENT_MALFUNCTION:
+ PM8001_MSG_DBG(pm8001_ha,
+ pm8001_printk("HW_EVENT_MALFUNCTION\n"));
+ break;
+ case HW_EVENT_BROADCAST_SES:
+ PM8001_MSG_DBG(pm8001_ha,
+ pm8001_printk("HW_EVENT_BROADCAST_SES\n"));
+ spin_lock_irqsave(&sas_phy->sas_prim_lock, flags);
+ sas_phy->sas_prim = HW_EVENT_BROADCAST_SES;
+ spin_unlock_irqrestore(&sas_phy->sas_prim_lock, flags);
+ sas_ha->notify_port_event(sas_phy, PORTE_BROADCAST_RCVD);
+ break;
+ case HW_EVENT_INBOUND_CRC_ERROR:
+ PM8001_MSG_DBG(pm8001_ha,
+ pm8001_printk("HW_EVENT_INBOUND_CRC_ERROR\n"));
+ pm80xx_hw_event_ack_req(pm8001_ha, 0,
+ HW_EVENT_INBOUND_CRC_ERROR,
+ port_id, phy_id, 0, 0);
+ break;
+ case HW_EVENT_HARD_RESET_RECEIVED:
+ PM8001_MSG_DBG(pm8001_ha,
+ pm8001_printk("HW_EVENT_HARD_RESET_RECEIVED\n"));
+ sas_ha->notify_port_event(sas_phy, PORTE_HARD_RESET);
+ break;
+ case HW_EVENT_ID_FRAME_TIMEOUT:
+ PM8001_MSG_DBG(pm8001_ha,
+ pm8001_printk("HW_EVENT_ID_FRAME_TIMEOUT\n"));
+ sas_phy_disconnected(sas_phy);
+ phy->phy_attached = 0;
+ sas_ha->notify_port_event(sas_phy, PORTE_LINK_RESET_ERR);
+ break;
+ case HW_EVENT_LINK_ERR_PHY_RESET_FAILED:
+ PM8001_MSG_DBG(pm8001_ha,
+ pm8001_printk("HW_EVENT_LINK_ERR_PHY_RESET_FAILED\n"));
+ pm80xx_hw_event_ack_req(pm8001_ha, 0,
+ HW_EVENT_LINK_ERR_PHY_RESET_FAILED,
+ port_id, phy_id, 0, 0);
+ sas_phy_disconnected(sas_phy);
+ phy->phy_attached = 0;
+ sas_ha->notify_port_event(sas_phy, PORTE_LINK_RESET_ERR);
+ break;
+ case HW_EVENT_PORT_RESET_TIMER_TMO:
+ PM8001_MSG_DBG(pm8001_ha,
+ pm8001_printk("HW_EVENT_PORT_RESET_TIMER_TMO\n"));
+ sas_phy_disconnected(sas_phy);
+ phy->phy_attached = 0;
+ sas_ha->notify_port_event(sas_phy, PORTE_LINK_RESET_ERR);
+ break;
+ case HW_EVENT_PORT_RECOVERY_TIMER_TMO:
+ PM8001_MSG_DBG(pm8001_ha,
+ pm8001_printk("HW_EVENT_PORT_RECOVERY_TIMER_TMO\n"));
+ pm80xx_hw_event_ack_req(pm8001_ha, 0,
+ HW_EVENT_PORT_RECOVERY_TIMER_TMO,
+ port_id, phy_id, 0, 0);
+ sas_phy_disconnected(sas_phy);
+ phy->phy_attached = 0;
+ sas_ha->notify_port_event(sas_phy, PORTE_LINK_RESET_ERR);
+ break;
+ case HW_EVENT_PORT_RECOVER:
+ PM8001_MSG_DBG(pm8001_ha,
+ pm8001_printk("HW_EVENT_PORT_RECOVER\n"));
+ break;
+ case HW_EVENT_PORT_RESET_COMPLETE:
+ PM8001_MSG_DBG(pm8001_ha,
+ pm8001_printk("HW_EVENT_PORT_RESET_COMPLETE\n"));
+ break;
+ case EVENT_BROADCAST_ASYNCH_EVENT:
+ PM8001_MSG_DBG(pm8001_ha,
+ pm8001_printk("EVENT_BROADCAST_ASYNCH_EVENT\n"));
+ break;
+ default:
+ PM8001_MSG_DBG(pm8001_ha,
+ pm8001_printk("Unknown event type 0x%x\n", eventType));
+ break;
+ }
+ return 0;
+}
+
+/**
+ * mpi_phy_stop_resp - SPCv specific
+ * @pm8001_ha: our hba card information
+ * @piomb: IO message buffer
+ */
+static int mpi_phy_stop_resp(struct pm8001_hba_info *pm8001_ha, void *piomb)
+{
+ struct phy_stop_resp *pPayload =
+ (struct phy_stop_resp *)(piomb + 4);
+ u32 status =
+ le32_to_cpu(pPayload->status);
+ u32 phyid =
+ le32_to_cpu(pPayload->phyid);
+ struct pm8001_phy *phy = &pm8001_ha->phy[phyid];
+ PM8001_MSG_DBG(pm8001_ha,
+ pm8001_printk("phy:0x%x status:0x%x\n",
+ phyid, status));
+ if (status == 0)
+ phy->phy_state = 0;
+ return 0;
+}
+
+/**
+ * mpi_set_controller_config_resp - SPCv specific
+ * @pm8001_ha: our hba card information
+ * @piomb: IO message buffer
+ */
+static int mpi_set_controller_config_resp(struct pm8001_hba_info *pm8001_ha,
+ void *piomb)
+{
+ struct set_ctrl_cfg_resp *pPayload =
+ (struct set_ctrl_cfg_resp *)(piomb + 4);
+ u32 status = le32_to_cpu(pPayload->status);
+ u32 err_qlfr_pgcd = le32_to_cpu(pPayload->err_qlfr_pgcd);
+
+ PM8001_MSG_DBG(pm8001_ha, pm8001_printk(
+ "SET CONTROLLER RESP: status 0x%x qlfr_pgcd 0x%x\n",
+ status, err_qlfr_pgcd));
+
+ return 0;
+}
+
+/**
+ * mpi_get_controller_config_resp - SPCv specific
+ * @pm8001_ha: our hba card information
+ * @piomb: IO message buffer
+ */
+static int mpi_get_controller_config_resp(struct pm8001_hba_info *pm8001_ha,
+ void *piomb)
+{
+ PM8001_MSG_DBG(pm8001_ha,
+ pm8001_printk(" pm80xx_addition_functionality\n"));
+
+ return 0;
+}
+
+/**
+ * mpi_get_phy_profile_resp - SPCv specific
+ * @pm8001_ha: our hba card information
+ * @piomb: IO message buffer
+ */
+static int mpi_get_phy_profile_resp(struct pm8001_hba_info *pm8001_ha,
+ void *piomb)
+{
+ PM8001_MSG_DBG(pm8001_ha,
+ pm8001_printk(" pm80xx_addition_functionality\n"));
+
+ return 0;
+}
+
+/**
+ * mpi_flash_op_ext_resp - SPCv specific
+ * @pm8001_ha: our hba card information
+ * @piomb: IO message buffer
+ */
+static int mpi_flash_op_ext_resp(struct pm8001_hba_info *pm8001_ha, void *piomb)
+{
+ PM8001_MSG_DBG(pm8001_ha,
+ pm8001_printk(" pm80xx_addition_functionality\n"));
+
+ return 0;
+}
+
+/**
+ * mpi_set_phy_profile_resp - SPCv specific
+ * @pm8001_ha: our hba card information
+ * @piomb: IO message buffer
+ */
+static int mpi_set_phy_profile_resp(struct pm8001_hba_info *pm8001_ha,
+ void *piomb)
+{
+ u8 page_code;
+ struct set_phy_profile_resp *pPayload =
+ (struct set_phy_profile_resp *)(piomb + 4);
+ u32 ppc_phyid = le32_to_cpu(pPayload->ppc_phyid);
+ u32 status = le32_to_cpu(pPayload->status);
+
+ page_code = (u8)((ppc_phyid & 0xFF00) >> 8);
+ if (status) {
+ /* status is FAILED */
+ PM8001_FAIL_DBG(pm8001_ha,
+ pm8001_printk("PhyProfile command failed with status "
+ "0x%08X \n", status));
+ return -1;
+ } else {
+ if (page_code != SAS_PHY_ANALOG_SETTINGS_PAGE) {
+ PM8001_FAIL_DBG(pm8001_ha,
+ pm8001_printk("Invalid page code 0x%X\n",
+ page_code));
+ return -1;
+ }
+ }
+ return 0;
+}
+
+/**
+ * mpi_kek_management_resp - SPCv specific
+ * @pm8001_ha: our hba card information
+ * @piomb: IO message buffer
+ */
+static int mpi_kek_management_resp(struct pm8001_hba_info *pm8001_ha,
+ void *piomb)
+{
+ struct kek_mgmt_resp *pPayload = (struct kek_mgmt_resp *)(piomb + 4);
+
+ u32 status = le32_to_cpu(pPayload->status);
+ u32 kidx_new_curr_ksop = le32_to_cpu(pPayload->kidx_new_curr_ksop);
+ u32 err_qlfr = le32_to_cpu(pPayload->err_qlfr);
+
+ PM8001_MSG_DBG(pm8001_ha, pm8001_printk(
+ "KEK MGMT RESP. Status 0x%x idx_ksop 0x%x err_qlfr 0x%x\n",
+ status, kidx_new_curr_ksop, err_qlfr));
+
+ return 0;
+}
+
+/**
+ * mpi_dek_management_resp - SPCv specific
+ * @pm8001_ha: our hba card information
+ * @piomb: IO message buffer
+ */
+static int mpi_dek_management_resp(struct pm8001_hba_info *pm8001_ha,
+ void *piomb)
+{
+ PM8001_MSG_DBG(pm8001_ha,
+ pm8001_printk(" pm80xx_addition_functionality\n"));
+
+ return 0;
+}
+
+/**
+ * ssp_coalesced_comp_resp - SPCv specific
+ * @pm8001_ha: our hba card information
+ * @piomb: IO message buffer
+ */
+static int ssp_coalesced_comp_resp(struct pm8001_hba_info *pm8001_ha,
+ void *piomb)
+{
+ PM8001_MSG_DBG(pm8001_ha,
+ pm8001_printk(" pm80xx_addition_functionality\n"));
+
+ return 0;
+}
+
+/**
+ * process_one_iomb - process one outbound Queue memory block
+ * @pm8001_ha: our hba card information
+ * @piomb: IO message buffer
+ */
+static void process_one_iomb(struct pm8001_hba_info *pm8001_ha, void *piomb)
+{
+ __le32 pHeader = *(__le32 *)piomb;
+ u32 opc = (u32)((le32_to_cpu(pHeader)) & 0xFFF);
+
+ switch (opc) {
+ case OPC_OUB_ECHO:
+ PM8001_MSG_DBG(pm8001_ha, pm8001_printk("OPC_OUB_ECHO\n"));
+ break;
+ case OPC_OUB_HW_EVENT:
+ PM8001_MSG_DBG(pm8001_ha,
+ pm8001_printk("OPC_OUB_HW_EVENT\n"));
+ mpi_hw_event(pm8001_ha, piomb);
+ break;
+ case OPC_OUB_THERM_HW_EVENT:
+ PM8001_MSG_DBG(pm8001_ha,
+ pm8001_printk("OPC_OUB_THERMAL_EVENT\n"));
+ mpi_thermal_hw_event(pm8001_ha, piomb);
+ break;
+ case OPC_OUB_SSP_COMP:
+ PM8001_MSG_DBG(pm8001_ha,
+ pm8001_printk("OPC_OUB_SSP_COMP\n"));
+ mpi_ssp_completion(pm8001_ha, piomb);
+ break;
+ case OPC_OUB_SMP_COMP:
+ PM8001_MSG_DBG(pm8001_ha,
+ pm8001_printk("OPC_OUB_SMP_COMP\n"));
+ mpi_smp_completion(pm8001_ha, piomb);
+ break;
+ case OPC_OUB_LOCAL_PHY_CNTRL:
+ PM8001_MSG_DBG(pm8001_ha,
+ pm8001_printk("OPC_OUB_LOCAL_PHY_CNTRL\n"));
+ pm8001_mpi_local_phy_ctl(pm8001_ha, piomb);
+ break;
+ case OPC_OUB_DEV_REGIST:
+ PM8001_MSG_DBG(pm8001_ha,
+ pm8001_printk("OPC_OUB_DEV_REGIST\n"));
+ pm8001_mpi_reg_resp(pm8001_ha, piomb);
+ break;
+ case OPC_OUB_DEREG_DEV:
+ PM8001_MSG_DBG(pm8001_ha,
+ pm8001_printk("unregister the device\n"));
+ pm8001_mpi_dereg_resp(pm8001_ha, piomb);
+ break;
+ case OPC_OUB_GET_DEV_HANDLE:
+ PM8001_MSG_DBG(pm8001_ha,
+ pm8001_printk("OPC_OUB_GET_DEV_HANDLE\n"));
+ break;
+ case OPC_OUB_SATA_COMP:
+ PM8001_MSG_DBG(pm8001_ha,
+ pm8001_printk("OPC_OUB_SATA_COMP\n"));
+ mpi_sata_completion(pm8001_ha, piomb);
+ break;
+ case OPC_OUB_SATA_EVENT:
+ PM8001_MSG_DBG(pm8001_ha,
+ pm8001_printk("OPC_OUB_SATA_EVENT\n"));
+ mpi_sata_event(pm8001_ha, piomb);
+ break;
+ case OPC_OUB_SSP_EVENT:
+ PM8001_MSG_DBG(pm8001_ha,
+ pm8001_printk("OPC_OUB_SSP_EVENT\n"));
+ mpi_ssp_event(pm8001_ha, piomb);
+ break;
+ case OPC_OUB_DEV_HANDLE_ARRIV:
+ PM8001_MSG_DBG(pm8001_ha,
+ pm8001_printk("OPC_OUB_DEV_HANDLE_ARRIV\n"));
+ /*This is for target*/
+ break;
+ case OPC_OUB_SSP_RECV_EVENT:
+ PM8001_MSG_DBG(pm8001_ha,
+ pm8001_printk("OPC_OUB_SSP_RECV_EVENT\n"));
+ /*This is for target*/
+ break;
+ case OPC_OUB_FW_FLASH_UPDATE:
+ PM8001_MSG_DBG(pm8001_ha,
+ pm8001_printk("OPC_OUB_FW_FLASH_UPDATE\n"));
+ pm8001_mpi_fw_flash_update_resp(pm8001_ha, piomb);
+ break;
+ case OPC_OUB_GPIO_RESPONSE:
+ PM8001_MSG_DBG(pm8001_ha,
+ pm8001_printk("OPC_OUB_GPIO_RESPONSE\n"));
+ break;
+ case OPC_OUB_GPIO_EVENT:
+ PM8001_MSG_DBG(pm8001_ha,
+ pm8001_printk("OPC_OUB_GPIO_EVENT\n"));
+ break;
+ case OPC_OUB_GENERAL_EVENT:
+ PM8001_MSG_DBG(pm8001_ha,
+ pm8001_printk("OPC_OUB_GENERAL_EVENT\n"));
+ pm8001_mpi_general_event(pm8001_ha, piomb);
+ break;
+ case OPC_OUB_SSP_ABORT_RSP:
+ PM8001_MSG_DBG(pm8001_ha,
+ pm8001_printk("OPC_OUB_SSP_ABORT_RSP\n"));
+ pm8001_mpi_task_abort_resp(pm8001_ha, piomb);
+ break;
+ case OPC_OUB_SATA_ABORT_RSP:
+ PM8001_MSG_DBG(pm8001_ha,
+ pm8001_printk("OPC_OUB_SATA_ABORT_RSP\n"));
+ pm8001_mpi_task_abort_resp(pm8001_ha, piomb);
+ break;
+ case OPC_OUB_SAS_DIAG_MODE_START_END:
+ PM8001_MSG_DBG(pm8001_ha,
+ pm8001_printk("OPC_OUB_SAS_DIAG_MODE_START_END\n"));
+ break;
+ case OPC_OUB_SAS_DIAG_EXECUTE:
+ PM8001_MSG_DBG(pm8001_ha,
+ pm8001_printk("OPC_OUB_SAS_DIAG_EXECUTE\n"));
+ break;
+ case OPC_OUB_GET_TIME_STAMP:
+ PM8001_MSG_DBG(pm8001_ha,
+ pm8001_printk("OPC_OUB_GET_TIME_STAMP\n"));
+ break;
+ case OPC_OUB_SAS_HW_EVENT_ACK:
+ PM8001_MSG_DBG(pm8001_ha,
+ pm8001_printk("OPC_OUB_SAS_HW_EVENT_ACK\n"));
+ break;
+ case OPC_OUB_PORT_CONTROL:
+ PM8001_MSG_DBG(pm8001_ha,
+ pm8001_printk("OPC_OUB_PORT_CONTROL\n"));
+ break;
+ case OPC_OUB_SMP_ABORT_RSP:
+ PM8001_MSG_DBG(pm8001_ha,
+ pm8001_printk("OPC_OUB_SMP_ABORT_RSP\n"));
+ pm8001_mpi_task_abort_resp(pm8001_ha, piomb);
+ break;
+ case OPC_OUB_GET_NVMD_DATA:
+ PM8001_MSG_DBG(pm8001_ha,
+ pm8001_printk("OPC_OUB_GET_NVMD_DATA\n"));
+ pm8001_mpi_get_nvmd_resp(pm8001_ha, piomb);
+ break;
+ case OPC_OUB_SET_NVMD_DATA:
+ PM8001_MSG_DBG(pm8001_ha,
+ pm8001_printk("OPC_OUB_SET_NVMD_DATA\n"));
+ pm8001_mpi_set_nvmd_resp(pm8001_ha, piomb);
+ break;
+ case OPC_OUB_DEVICE_HANDLE_REMOVAL:
+ PM8001_MSG_DBG(pm8001_ha,
+ pm8001_printk("OPC_OUB_DEVICE_HANDLE_REMOVAL\n"));
+ break;
+ case OPC_OUB_SET_DEVICE_STATE:
+ PM8001_MSG_DBG(pm8001_ha,
+ pm8001_printk("OPC_OUB_SET_DEVICE_STATE\n"));
+ pm8001_mpi_set_dev_state_resp(pm8001_ha, piomb);
+ break;
+ case OPC_OUB_GET_DEVICE_STATE:
+ PM8001_MSG_DBG(pm8001_ha,
+ pm8001_printk("OPC_OUB_GET_DEVICE_STATE\n"));
+ break;
+ case OPC_OUB_SET_DEV_INFO:
+ PM8001_MSG_DBG(pm8001_ha,
+ pm8001_printk("OPC_OUB_SET_DEV_INFO\n"));
+ break;
+ /* spcv specifc commands */
+ case OPC_OUB_PHY_START_RESP:
+ PM8001_MSG_DBG(pm8001_ha, pm8001_printk(
+ "OPC_OUB_PHY_START_RESP opcode:%x\n", opc));
+ mpi_phy_start_resp(pm8001_ha, piomb);
+ break;
+ case OPC_OUB_PHY_STOP_RESP:
+ PM8001_MSG_DBG(pm8001_ha, pm8001_printk(
+ "OPC_OUB_PHY_STOP_RESP opcode:%x\n", opc));
+ mpi_phy_stop_resp(pm8001_ha, piomb);
+ break;
+ case OPC_OUB_SET_CONTROLLER_CONFIG:
+ PM8001_MSG_DBG(pm8001_ha, pm8001_printk(
+ "OPC_OUB_SET_CONTROLLER_CONFIG opcode:%x\n", opc));
+ mpi_set_controller_config_resp(pm8001_ha, piomb);
+ break;
+ case OPC_OUB_GET_CONTROLLER_CONFIG:
+ PM8001_MSG_DBG(pm8001_ha, pm8001_printk(
+ "OPC_OUB_GET_CONTROLLER_CONFIG opcode:%x\n", opc));
+ mpi_get_controller_config_resp(pm8001_ha, piomb);
+ break;
+ case OPC_OUB_GET_PHY_PROFILE:
+ PM8001_MSG_DBG(pm8001_ha, pm8001_printk(
+ "OPC_OUB_GET_PHY_PROFILE opcode:%x\n", opc));
+ mpi_get_phy_profile_resp(pm8001_ha, piomb);
+ break;
+ case OPC_OUB_FLASH_OP_EXT:
+ PM8001_MSG_DBG(pm8001_ha, pm8001_printk(
+ "OPC_OUB_FLASH_OP_EXT opcode:%x\n", opc));
+ mpi_flash_op_ext_resp(pm8001_ha, piomb);
+ break;
+ case OPC_OUB_SET_PHY_PROFILE:
+ PM8001_MSG_DBG(pm8001_ha, pm8001_printk(
+ "OPC_OUB_SET_PHY_PROFILE opcode:%x\n", opc));
+ mpi_set_phy_profile_resp(pm8001_ha, piomb);
+ break;
+ case OPC_OUB_KEK_MANAGEMENT_RESP:
+ PM8001_MSG_DBG(pm8001_ha, pm8001_printk(
+ "OPC_OUB_KEK_MANAGEMENT_RESP opcode:%x\n", opc));
+ mpi_kek_management_resp(pm8001_ha, piomb);
+ break;
+ case OPC_OUB_DEK_MANAGEMENT_RESP:
+ PM8001_MSG_DBG(pm8001_ha, pm8001_printk(
+ "OPC_OUB_DEK_MANAGEMENT_RESP opcode:%x\n", opc));
+ mpi_dek_management_resp(pm8001_ha, piomb);
+ break;
+ case OPC_OUB_SSP_COALESCED_COMP_RESP:
+ PM8001_MSG_DBG(pm8001_ha, pm8001_printk(
+ "OPC_OUB_SSP_COALESCED_COMP_RESP opcode:%x\n", opc));
+ ssp_coalesced_comp_resp(pm8001_ha, piomb);
+ break;
+ default:
+ PM8001_MSG_DBG(pm8001_ha, pm8001_printk(
+ "Unknown outbound Queue IOMB OPC = 0x%x\n", opc));
+ break;
+ }
+}
+
+static int process_oq(struct pm8001_hba_info *pm8001_ha, u8 vec)
+{
+ struct outbound_queue_table *circularQ;
+ void *pMsg1 = NULL;
+ u8 uninitialized_var(bc);
+ u32 ret = MPI_IO_STATUS_FAIL;
+ unsigned long flags;
+
+ spin_lock_irqsave(&pm8001_ha->lock, flags);
+ circularQ = &pm8001_ha->outbnd_q_tbl[vec];
+ do {
+ ret = pm8001_mpi_msg_consume(pm8001_ha, circularQ, &pMsg1, &bc);
+ if (MPI_IO_STATUS_SUCCESS == ret) {
+ /* process the outbound message */
+ process_one_iomb(pm8001_ha, (void *)(pMsg1 - 4));
+ /* free the message from the outbound circular buffer */
+ pm8001_mpi_msg_free_set(pm8001_ha, pMsg1,
+ circularQ, bc);
+ }
+ if (MPI_IO_STATUS_BUSY == ret) {
+ /* Update the producer index from SPC */
+ circularQ->producer_index =
+ cpu_to_le32(pm8001_read_32(circularQ->pi_virt));
+ if (le32_to_cpu(circularQ->producer_index) ==
+ circularQ->consumer_idx)
+ /* OQ is empty */
+ break;
+ }
+ } while (1);
+ spin_unlock_irqrestore(&pm8001_ha->lock, flags);
+ return ret;
+}
+
+/* PCI_DMA_... to our direction translation. */
+static const u8 data_dir_flags[] = {
+ [PCI_DMA_BIDIRECTIONAL] = DATA_DIR_BYRECIPIENT,/* UNSPECIFIED */
+ [PCI_DMA_TODEVICE] = DATA_DIR_OUT,/* OUTBOUND */
+ [PCI_DMA_FROMDEVICE] = DATA_DIR_IN,/* INBOUND */
+ [PCI_DMA_NONE] = DATA_DIR_NONE,/* NO TRANSFER */
+};
+
+static void build_smp_cmd(u32 deviceID, __le32 hTag,
+ struct smp_req *psmp_cmd, int mode, int length)
+{
+ psmp_cmd->tag = hTag;
+ psmp_cmd->device_id = cpu_to_le32(deviceID);
+ if (mode == SMP_DIRECT) {
+ length = length - 4; /* subtract crc */
+ psmp_cmd->len_ip_ir = cpu_to_le32(length << 16);
+ } else {
+ psmp_cmd->len_ip_ir = cpu_to_le32(1|(1 << 1));
+ }
+}
+
+/**
+ * pm8001_chip_smp_req - send a SMP task to FW
+ * @pm8001_ha: our hba card information.
+ * @ccb: the ccb information this request used.
+ */
+static int pm80xx_chip_smp_req(struct pm8001_hba_info *pm8001_ha,
+ struct pm8001_ccb_info *ccb)
+{
+ int elem, rc;
+ struct sas_task *task = ccb->task;
+ struct domain_device *dev = task->dev;
+ struct pm8001_device *pm8001_dev = dev->lldd_dev;
+ struct scatterlist *sg_req, *sg_resp;
+ u32 req_len, resp_len;
+ struct smp_req smp_cmd;
+ u32 opc;
+ struct inbound_queue_table *circularQ;
+ char *preq_dma_addr = NULL;
+ __le64 tmp_addr;
+ u32 i, length;
+
+ memset(&smp_cmd, 0, sizeof(smp_cmd));
+ /*
+ * DMA-map SMP request, response buffers
+ */
+ sg_req = &task->smp_task.smp_req;
+ elem = dma_map_sg(pm8001_ha->dev, sg_req, 1, PCI_DMA_TODEVICE);
+ if (!elem)
+ return -ENOMEM;
+ req_len = sg_dma_len(sg_req);
+
+ sg_resp = &task->smp_task.smp_resp;
+ elem = dma_map_sg(pm8001_ha->dev, sg_resp, 1, PCI_DMA_FROMDEVICE);
+ if (!elem) {
+ rc = -ENOMEM;
+ goto err_out;
+ }
+ resp_len = sg_dma_len(sg_resp);
+ /* must be in dwords */
+ if ((req_len & 0x3) || (resp_len & 0x3)) {
+ rc = -EINVAL;
+ goto err_out_2;
+ }
+
+ opc = OPC_INB_SMP_REQUEST;
+ circularQ = &pm8001_ha->inbnd_q_tbl[0];
+ smp_cmd.tag = cpu_to_le32(ccb->ccb_tag);
+
+ length = sg_req->length;
+ PM8001_IO_DBG(pm8001_ha,
+ pm8001_printk("SMP Frame Length %d\n", sg_req->length));
+ if (!(length - 8))
+ pm8001_ha->smp_exp_mode = SMP_DIRECT;
+ else
+ pm8001_ha->smp_exp_mode = SMP_INDIRECT;
+
+
+ tmp_addr = cpu_to_le64((u64)sg_dma_address(&task->smp_task.smp_req));
+ preq_dma_addr = (char *)phys_to_virt(tmp_addr);
+
+ /* INDIRECT MODE command settings. Use DMA */
+ if (pm8001_ha->smp_exp_mode == SMP_INDIRECT) {
+ PM8001_IO_DBG(pm8001_ha,
+ pm8001_printk("SMP REQUEST INDIRECT MODE\n"));
+ /* for SPCv indirect mode. Place the top 4 bytes of
+ * SMP Request header here. */
+ for (i = 0; i < 4; i++)
+ smp_cmd.smp_req16[i] = *(preq_dma_addr + i);
+ /* exclude top 4 bytes for SMP req header */
+ smp_cmd.long_smp_req.long_req_addr =
+ cpu_to_le64((u64)sg_dma_address
+ (&task->smp_task.smp_req) + 4);
+ /* exclude 4 bytes for SMP req header and CRC */
+ smp_cmd.long_smp_req.long_req_size =
+ cpu_to_le32((u32)sg_dma_len(&task->smp_task.smp_req)-8);
+ smp_cmd.long_smp_req.long_resp_addr =
+ cpu_to_le64((u64)sg_dma_address
+ (&task->smp_task.smp_resp));
+ smp_cmd.long_smp_req.long_resp_size =
+ cpu_to_le32((u32)sg_dma_len
+ (&task->smp_task.smp_resp)-4);
+ } else { /* DIRECT MODE */
+ smp_cmd.long_smp_req.long_req_addr =
+ cpu_to_le64((u64)sg_dma_address
+ (&task->smp_task.smp_req));
+ smp_cmd.long_smp_req.long_req_size =
+ cpu_to_le32((u32)sg_dma_len(&task->smp_task.smp_req)-4);
+ smp_cmd.long_smp_req.long_resp_addr =
+ cpu_to_le64((u64)sg_dma_address
+ (&task->smp_task.smp_resp));
+ smp_cmd.long_smp_req.long_resp_size =
+ cpu_to_le32
+ ((u32)sg_dma_len(&task->smp_task.smp_resp)-4);
+ }
+ if (pm8001_ha->smp_exp_mode == SMP_DIRECT) {
+ PM8001_IO_DBG(pm8001_ha,
+ pm8001_printk("SMP REQUEST DIRECT MODE\n"));
+ for (i = 0; i < length; i++)
+ if (i < 16) {
+ smp_cmd.smp_req16[i] = *(preq_dma_addr+i);
+ PM8001_IO_DBG(pm8001_ha, pm8001_printk(
+ "Byte[%d]:%x (DMA data:%x)\n",
+ i, smp_cmd.smp_req16[i],
+ *(preq_dma_addr)));
+ } else {
+ smp_cmd.smp_req[i] = *(preq_dma_addr+i);
+ PM8001_IO_DBG(pm8001_ha, pm8001_printk(
+ "Byte[%d]:%x (DMA data:%x)\n",
+ i, smp_cmd.smp_req[i],
+ *(preq_dma_addr)));
+ }
+ }
+
+ build_smp_cmd(pm8001_dev->device_id, smp_cmd.tag,
+ &smp_cmd, pm8001_ha->smp_exp_mode, length);
+ rc = pm8001_mpi_build_cmd(pm8001_ha, circularQ, opc,
+ (u32 *)&smp_cmd, 0);
+ if (rc)
+ goto err_out_2;
+ return 0;
+
+err_out_2:
+ dma_unmap_sg(pm8001_ha->dev, &ccb->task->smp_task.smp_resp, 1,
+ PCI_DMA_FROMDEVICE);
+err_out:
+ dma_unmap_sg(pm8001_ha->dev, &ccb->task->smp_task.smp_req, 1,
+ PCI_DMA_TODEVICE);
+ return rc;
+}
+
+static int check_enc_sas_cmd(struct sas_task *task)
+{
+ u8 cmd = task->ssp_task.cmd->cmnd[0];
+
+ if (cmd == READ_10 || cmd == WRITE_10 || cmd == WRITE_VERIFY)
+ return 1;
+ else
+ return 0;
+}
+
+static int check_enc_sat_cmd(struct sas_task *task)
+{
+ int ret = 0;
+ switch (task->ata_task.fis.command) {
+ case ATA_CMD_FPDMA_READ:
+ case ATA_CMD_READ_EXT:
+ case ATA_CMD_READ:
+ case ATA_CMD_FPDMA_WRITE:
+ case ATA_CMD_WRITE_EXT:
+ case ATA_CMD_WRITE:
+ case ATA_CMD_PIO_READ:
+ case ATA_CMD_PIO_READ_EXT:
+ case ATA_CMD_PIO_WRITE:
+ case ATA_CMD_PIO_WRITE_EXT:
+ ret = 1;
+ break;
+ default:
+ ret = 0;
+ break;
+ }
+ return ret;
+}
+
+/**
+ * pm80xx_chip_ssp_io_req - send a SSP task to FW
+ * @pm8001_ha: our hba card information.
+ * @ccb: the ccb information this request used.
+ */
+static int pm80xx_chip_ssp_io_req(struct pm8001_hba_info *pm8001_ha,
+ struct pm8001_ccb_info *ccb)
+{
+ struct sas_task *task = ccb->task;
+ struct domain_device *dev = task->dev;
+ struct pm8001_device *pm8001_dev = dev->lldd_dev;
+ struct ssp_ini_io_start_req ssp_cmd;
+ u32 tag = ccb->ccb_tag;
+ int ret;
+ u64 phys_addr, start_addr, end_addr;
+ u32 end_addr_high, end_addr_low;
+ struct inbound_queue_table *circularQ;
+ u32 q_index;
+ u32 opc = OPC_INB_SSPINIIOSTART;
+ memset(&ssp_cmd, 0, sizeof(ssp_cmd));
+ memcpy(ssp_cmd.ssp_iu.lun, task->ssp_task.LUN, 8);
+ /* data address domain added for spcv; set to 0 by host,
+ * used internally by controller
+ * 0 for SAS 1.1 and SAS 2.0 compatible TLR
+ */
+ ssp_cmd.dad_dir_m_tlr =
+ cpu_to_le32(data_dir_flags[task->data_dir] << 8 | 0x0);
+ ssp_cmd.data_len = cpu_to_le32(task->total_xfer_len);
+ ssp_cmd.device_id = cpu_to_le32(pm8001_dev->device_id);
+ ssp_cmd.tag = cpu_to_le32(tag);
+ if (task->ssp_task.enable_first_burst)
+ ssp_cmd.ssp_iu.efb_prio_attr |= 0x80;
+ ssp_cmd.ssp_iu.efb_prio_attr |= (task->ssp_task.task_prio << 3);
+ ssp_cmd.ssp_iu.efb_prio_attr |= (task->ssp_task.task_attr & 7);
+ memcpy(ssp_cmd.ssp_iu.cdb, task->ssp_task.cmd->cmnd,
+ task->ssp_task.cmd->cmd_len);
+ q_index = (u32) (pm8001_dev->id & 0x00ffffff) % PM8001_MAX_INB_NUM;
+ circularQ = &pm8001_ha->inbnd_q_tbl[q_index];
+
+ /* Check if encryption is set */
+ if (pm8001_ha->chip->encrypt &&
+ !(pm8001_ha->encrypt_info.status) && check_enc_sas_cmd(task)) {
+ PM8001_IO_DBG(pm8001_ha, pm8001_printk(
+ "Encryption enabled.Sending Encrypt SAS command 0x%x\n",
+ task->ssp_task.cmd->cmnd[0]));
+ opc = OPC_INB_SSP_INI_DIF_ENC_IO;
+ /* enable encryption. 0 for SAS 1.1 and SAS 2.0 compatible TLR*/
+ ssp_cmd.dad_dir_m_tlr = cpu_to_le32
+ ((data_dir_flags[task->data_dir] << 8) | 0x20 | 0x0);
+
+ /* fill in PRD (scatter/gather) table, if any */
+ if (task->num_scatter > 1) {
+ pm8001_chip_make_sg(task->scatter,
+ ccb->n_elem, ccb->buf_prd);
+ phys_addr = ccb->ccb_dma_handle +
+ offsetof(struct pm8001_ccb_info, buf_prd[0]);
+ ssp_cmd.enc_addr_low =
+ cpu_to_le32(lower_32_bits(phys_addr));
+ ssp_cmd.enc_addr_high =
+ cpu_to_le32(upper_32_bits(phys_addr));
+ ssp_cmd.enc_esgl = cpu_to_le32(1<<31);
+ } else if (task->num_scatter == 1) {
+ u64 dma_addr = sg_dma_address(task->scatter);
+ ssp_cmd.enc_addr_low =
+ cpu_to_le32(lower_32_bits(dma_addr));
+ ssp_cmd.enc_addr_high =
+ cpu_to_le32(upper_32_bits(dma_addr));
+ ssp_cmd.enc_len = cpu_to_le32(task->total_xfer_len);
+ ssp_cmd.enc_esgl = 0;
+ /* Check 4G Boundary */
+ start_addr = cpu_to_le64(dma_addr);
+ end_addr = (start_addr + ssp_cmd.enc_len) - 1;
+ end_addr_low = cpu_to_le32(lower_32_bits(end_addr));
+ end_addr_high = cpu_to_le32(upper_32_bits(end_addr));
+ if (end_addr_high != ssp_cmd.enc_addr_high) {
+ PM8001_FAIL_DBG(pm8001_ha,
+ pm8001_printk("The sg list address "
+ "start_addr=0x%016llx data_len=0x%x "
+ "end_addr_high=0x%08x end_addr_low="
+ "0x%08x has crossed 4G boundary\n",
+ start_addr, ssp_cmd.enc_len,
+ end_addr_high, end_addr_low));
+ pm8001_chip_make_sg(task->scatter, 1,
+ ccb->buf_prd);
+ phys_addr = ccb->ccb_dma_handle +
+ offsetof(struct pm8001_ccb_info,
+ buf_prd[0]);
+ ssp_cmd.enc_addr_low =
+ cpu_to_le32(lower_32_bits(phys_addr));
+ ssp_cmd.enc_addr_high =
+ cpu_to_le32(upper_32_bits(phys_addr));
+ ssp_cmd.enc_esgl = cpu_to_le32(1<<31);
+ }
+ } else if (task->num_scatter == 0) {
+ ssp_cmd.enc_addr_low = 0;
+ ssp_cmd.enc_addr_high = 0;
+ ssp_cmd.enc_len = cpu_to_le32(task->total_xfer_len);
+ ssp_cmd.enc_esgl = 0;
+ }
+ /* XTS mode. All other fields are 0 */
+ ssp_cmd.key_cmode = 0x6 << 4;
+ /* set tweak values. Should be the start lba */
+ ssp_cmd.twk_val0 = cpu_to_le32((task->ssp_task.cmd->cmnd[2] << 24) |
+ (task->ssp_task.cmd->cmnd[3] << 16) |
+ (task->ssp_task.cmd->cmnd[4] << 8) |
+ (task->ssp_task.cmd->cmnd[5]));
+ } else {
+ PM8001_IO_DBG(pm8001_ha, pm8001_printk(
+ "Sending Normal SAS command 0x%x inb q %x\n",
+ task->ssp_task.cmd->cmnd[0], q_index));
+ /* fill in PRD (scatter/gather) table, if any */
+ if (task->num_scatter > 1) {
+ pm8001_chip_make_sg(task->scatter, ccb->n_elem,
+ ccb->buf_prd);
+ phys_addr = ccb->ccb_dma_handle +
+ offsetof(struct pm8001_ccb_info, buf_prd[0]);
+ ssp_cmd.addr_low =
+ cpu_to_le32(lower_32_bits(phys_addr));
+ ssp_cmd.addr_high =
+ cpu_to_le32(upper_32_bits(phys_addr));
+ ssp_cmd.esgl = cpu_to_le32(1<<31);
+ } else if (task->num_scatter == 1) {
+ u64 dma_addr = sg_dma_address(task->scatter);
+ ssp_cmd.addr_low = cpu_to_le32(lower_32_bits(dma_addr));
+ ssp_cmd.addr_high =
+ cpu_to_le32(upper_32_bits(dma_addr));
+ ssp_cmd.len = cpu_to_le32(task->total_xfer_len);
+ ssp_cmd.esgl = 0;
+ /* Check 4G Boundary */
+ start_addr = cpu_to_le64(dma_addr);
+ end_addr = (start_addr + ssp_cmd.len) - 1;
+ end_addr_low = cpu_to_le32(lower_32_bits(end_addr));
+ end_addr_high = cpu_to_le32(upper_32_bits(end_addr));
+ if (end_addr_high != ssp_cmd.addr_high) {
+ PM8001_FAIL_DBG(pm8001_ha,
+ pm8001_printk("The sg list address "
+ "start_addr=0x%016llx data_len=0x%x "
+ "end_addr_high=0x%08x end_addr_low="
+ "0x%08x has crossed 4G boundary\n",
+ start_addr, ssp_cmd.len,
+ end_addr_high, end_addr_low));
+ pm8001_chip_make_sg(task->scatter, 1,
+ ccb->buf_prd);
+ phys_addr = ccb->ccb_dma_handle +
+ offsetof(struct pm8001_ccb_info,
+ buf_prd[0]);
+ ssp_cmd.addr_low =
+ cpu_to_le32(lower_32_bits(phys_addr));
+ ssp_cmd.addr_high =
+ cpu_to_le32(upper_32_bits(phys_addr));
+ ssp_cmd.esgl = cpu_to_le32(1<<31);
+ }
+ } else if (task->num_scatter == 0) {
+ ssp_cmd.addr_low = 0;
+ ssp_cmd.addr_high = 0;
+ ssp_cmd.len = cpu_to_le32(task->total_xfer_len);
+ ssp_cmd.esgl = 0;
+ }
+ }
+ q_index = (u32) (pm8001_dev->id & 0x00ffffff) % PM8001_MAX_OUTB_NUM;
+ ret = pm8001_mpi_build_cmd(pm8001_ha, circularQ, opc,
+ &ssp_cmd, q_index);
+ return ret;
+}
+
+static int pm80xx_chip_sata_req(struct pm8001_hba_info *pm8001_ha,
+ struct pm8001_ccb_info *ccb)
+{
+ struct sas_task *task = ccb->task;
+ struct domain_device *dev = task->dev;
+ struct pm8001_device *pm8001_ha_dev = dev->lldd_dev;
+ u32 tag = ccb->ccb_tag;
+ int ret;
+ u32 q_index;
+ struct sata_start_req sata_cmd;
+ u32 hdr_tag, ncg_tag = 0;
+ u64 phys_addr, start_addr, end_addr;
+ u32 end_addr_high, end_addr_low;
+ u32 ATAP = 0x0;
+ u32 dir;
+ struct inbound_queue_table *circularQ;
+ unsigned long flags;
+ u32 opc = OPC_INB_SATA_HOST_OPSTART;
+ memset(&sata_cmd, 0, sizeof(sata_cmd));
+ q_index = (u32) (pm8001_ha_dev->id & 0x00ffffff) % PM8001_MAX_INB_NUM;
+ circularQ = &pm8001_ha->inbnd_q_tbl[q_index];
+
+ if (task->data_dir == PCI_DMA_NONE) {
+ ATAP = 0x04; /* no data*/
+ PM8001_IO_DBG(pm8001_ha, pm8001_printk("no data\n"));
+ } else if (likely(!task->ata_task.device_control_reg_update)) {
+ if (task->ata_task.dma_xfer) {
+ ATAP = 0x06; /* DMA */
+ PM8001_IO_DBG(pm8001_ha, pm8001_printk("DMA\n"));
+ } else {
+ ATAP = 0x05; /* PIO*/
+ PM8001_IO_DBG(pm8001_ha, pm8001_printk("PIO\n"));
+ }
+ if (task->ata_task.use_ncq &&
+ dev->sata_dev.class != ATA_DEV_ATAPI) {
+ ATAP = 0x07; /* FPDMA */
+ PM8001_IO_DBG(pm8001_ha, pm8001_printk("FPDMA\n"));
+ }
+ }
+ if (task->ata_task.use_ncq && pm8001_get_ncq_tag(task, &hdr_tag)) {
+ task->ata_task.fis.sector_count |= (u8) (hdr_tag << 3);
+ ncg_tag = hdr_tag;
+ }
+ dir = data_dir_flags[task->data_dir] << 8;
+ sata_cmd.tag = cpu_to_le32(tag);
+ sata_cmd.device_id = cpu_to_le32(pm8001_ha_dev->device_id);
+ sata_cmd.data_len = cpu_to_le32(task->total_xfer_len);
+
+ sata_cmd.sata_fis = task->ata_task.fis;
+ if (likely(!task->ata_task.device_control_reg_update))
+ sata_cmd.sata_fis.flags |= 0x80;/* C=1: update ATA cmd reg */
+ sata_cmd.sata_fis.flags &= 0xF0;/* PM_PORT field shall be 0 */
+
+ /* Check if encryption is set */
+ if (pm8001_ha->chip->encrypt &&
+ !(pm8001_ha->encrypt_info.status) && check_enc_sat_cmd(task)) {
+ PM8001_IO_DBG(pm8001_ha, pm8001_printk(
+ "Encryption enabled.Sending Encrypt SATA cmd 0x%x\n",
+ sata_cmd.sata_fis.command));
+ opc = OPC_INB_SATA_DIF_ENC_IO;
+
+ /* set encryption bit */
+ sata_cmd.ncqtag_atap_dir_m_dad =
+ cpu_to_le32(((ncg_tag & 0xff)<<16)|
+ ((ATAP & 0x3f) << 10) | 0x20 | dir);
+ /* dad (bit 0-1) is 0 */
+ /* fill in PRD (scatter/gather) table, if any */
+ if (task->num_scatter > 1) {
+ pm8001_chip_make_sg(task->scatter,
+ ccb->n_elem, ccb->buf_prd);
+ phys_addr = ccb->ccb_dma_handle +
+ offsetof(struct pm8001_ccb_info, buf_prd[0]);
+ sata_cmd.enc_addr_low = lower_32_bits(phys_addr);
+ sata_cmd.enc_addr_high = upper_32_bits(phys_addr);
+ sata_cmd.enc_esgl = cpu_to_le32(1 << 31);
+ } else if (task->num_scatter == 1) {
+ u64 dma_addr = sg_dma_address(task->scatter);
+ sata_cmd.enc_addr_low = lower_32_bits(dma_addr);
+ sata_cmd.enc_addr_high = upper_32_bits(dma_addr);
+ sata_cmd.enc_len = cpu_to_le32(task->total_xfer_len);
+ sata_cmd.enc_esgl = 0;
+ /* Check 4G Boundary */
+ start_addr = cpu_to_le64(dma_addr);
+ end_addr = (start_addr + sata_cmd.enc_len) - 1;
+ end_addr_low = cpu_to_le32(lower_32_bits(end_addr));
+ end_addr_high = cpu_to_le32(upper_32_bits(end_addr));
+ if (end_addr_high != sata_cmd.enc_addr_high) {
+ PM8001_FAIL_DBG(pm8001_ha,
+ pm8001_printk("The sg list address "
+ "start_addr=0x%016llx data_len=0x%x "
+ "end_addr_high=0x%08x end_addr_low"
+ "=0x%08x has crossed 4G boundary\n",
+ start_addr, sata_cmd.enc_len,
+ end_addr_high, end_addr_low));
+ pm8001_chip_make_sg(task->scatter, 1,
+ ccb->buf_prd);
+ phys_addr = ccb->ccb_dma_handle +
+ offsetof(struct pm8001_ccb_info,
+ buf_prd[0]);
+ sata_cmd.enc_addr_low =
+ lower_32_bits(phys_addr);
+ sata_cmd.enc_addr_high =
+ upper_32_bits(phys_addr);
+ sata_cmd.enc_esgl =
+ cpu_to_le32(1 << 31);
+ }
+ } else if (task->num_scatter == 0) {
+ sata_cmd.enc_addr_low = 0;
+ sata_cmd.enc_addr_high = 0;
+ sata_cmd.enc_len = cpu_to_le32(task->total_xfer_len);
+ sata_cmd.enc_esgl = 0;
+ }
+ /* XTS mode. All other fields are 0 */
+ sata_cmd.key_index_mode = 0x6 << 4;
+ /* set tweak values. Should be the start lba */
+ sata_cmd.twk_val0 =
+ cpu_to_le32((sata_cmd.sata_fis.lbal_exp << 24) |
+ (sata_cmd.sata_fis.lbah << 16) |
+ (sata_cmd.sata_fis.lbam << 8) |
+ (sata_cmd.sata_fis.lbal));
+ sata_cmd.twk_val1 =
+ cpu_to_le32((sata_cmd.sata_fis.lbah_exp << 8) |
+ (sata_cmd.sata_fis.lbam_exp));
+ } else {
+ PM8001_IO_DBG(pm8001_ha, pm8001_printk(
+ "Sending Normal SATA command 0x%x inb %x\n",
+ sata_cmd.sata_fis.command, q_index));
+ /* dad (bit 0-1) is 0 */
+ sata_cmd.ncqtag_atap_dir_m_dad =
+ cpu_to_le32(((ncg_tag & 0xff)<<16) |
+ ((ATAP & 0x3f) << 10) | dir);
+
+ /* fill in PRD (scatter/gather) table, if any */
+ if (task->num_scatter > 1) {
+ pm8001_chip_make_sg(task->scatter,
+ ccb->n_elem, ccb->buf_prd);
+ phys_addr = ccb->ccb_dma_handle +
+ offsetof(struct pm8001_ccb_info, buf_prd[0]);
+ sata_cmd.addr_low = lower_32_bits(phys_addr);
+ sata_cmd.addr_high = upper_32_bits(phys_addr);
+ sata_cmd.esgl = cpu_to_le32(1 << 31);
+ } else if (task->num_scatter == 1) {
+ u64 dma_addr = sg_dma_address(task->scatter);
+ sata_cmd.addr_low = lower_32_bits(dma_addr);
+ sata_cmd.addr_high = upper_32_bits(dma_addr);
+ sata_cmd.len = cpu_to_le32(task->total_xfer_len);
+ sata_cmd.esgl = 0;
+ /* Check 4G Boundary */
+ start_addr = cpu_to_le64(dma_addr);
+ end_addr = (start_addr + sata_cmd.len) - 1;
+ end_addr_low = cpu_to_le32(lower_32_bits(end_addr));
+ end_addr_high = cpu_to_le32(upper_32_bits(end_addr));
+ if (end_addr_high != sata_cmd.addr_high) {
+ PM8001_FAIL_DBG(pm8001_ha,
+ pm8001_printk("The sg list address "
+ "start_addr=0x%016llx data_len=0x%x"
+ "end_addr_high=0x%08x end_addr_low="
+ "0x%08x has crossed 4G boundary\n",
+ start_addr, sata_cmd.len,
+ end_addr_high, end_addr_low));
+ pm8001_chip_make_sg(task->scatter, 1,
+ ccb->buf_prd);
+ phys_addr = ccb->ccb_dma_handle +
+ offsetof(struct pm8001_ccb_info,
+ buf_prd[0]);
+ sata_cmd.addr_low =
+ lower_32_bits(phys_addr);
+ sata_cmd.addr_high =
+ upper_32_bits(phys_addr);
+ sata_cmd.esgl = cpu_to_le32(1 << 31);
+ }
+ } else if (task->num_scatter == 0) {
+ sata_cmd.addr_low = 0;
+ sata_cmd.addr_high = 0;
+ sata_cmd.len = cpu_to_le32(task->total_xfer_len);
+ sata_cmd.esgl = 0;
+ }
+ /* scsi cdb */
+ sata_cmd.atapi_scsi_cdb[0] =
+ cpu_to_le32(((task->ata_task.atapi_packet[0]) |
+ (task->ata_task.atapi_packet[1] << 8) |
+ (task->ata_task.atapi_packet[2] << 16) |
+ (task->ata_task.atapi_packet[3] << 24)));
+ sata_cmd.atapi_scsi_cdb[1] =
+ cpu_to_le32(((task->ata_task.atapi_packet[4]) |
+ (task->ata_task.atapi_packet[5] << 8) |
+ (task->ata_task.atapi_packet[6] << 16) |
+ (task->ata_task.atapi_packet[7] << 24)));
+ sata_cmd.atapi_scsi_cdb[2] =
+ cpu_to_le32(((task->ata_task.atapi_packet[8]) |
+ (task->ata_task.atapi_packet[9] << 8) |
+ (task->ata_task.atapi_packet[10] << 16) |
+ (task->ata_task.atapi_packet[11] << 24)));
+ sata_cmd.atapi_scsi_cdb[3] =
+ cpu_to_le32(((task->ata_task.atapi_packet[12]) |
+ (task->ata_task.atapi_packet[13] << 8) |
+ (task->ata_task.atapi_packet[14] << 16) |
+ (task->ata_task.atapi_packet[15] << 24)));
+ }
+
+ /* Check for read log for failed drive and return */
+ if (sata_cmd.sata_fis.command == 0x2f) {
+ if (pm8001_ha_dev && ((pm8001_ha_dev->id & NCQ_READ_LOG_FLAG) ||
+ (pm8001_ha_dev->id & NCQ_ABORT_ALL_FLAG) ||
+ (pm8001_ha_dev->id & NCQ_2ND_RLE_FLAG))) {
+ struct task_status_struct *ts;
+
+ pm8001_ha_dev->id &= 0xDFFFFFFF;
+ ts = &task->task_status;
+
+ spin_lock_irqsave(&task->task_state_lock, flags);
+ ts->resp = SAS_TASK_COMPLETE;
+ ts->stat = SAM_STAT_GOOD;
+ task->task_state_flags &= ~SAS_TASK_STATE_PENDING;
+ task->task_state_flags &= ~SAS_TASK_AT_INITIATOR;
+ task->task_state_flags |= SAS_TASK_STATE_DONE;
+ if (unlikely((task->task_state_flags &
+ SAS_TASK_STATE_ABORTED))) {
+ spin_unlock_irqrestore(&task->task_state_lock,
+ flags);
+ PM8001_FAIL_DBG(pm8001_ha,
+ pm8001_printk("task 0x%p resp 0x%x "
+ " stat 0x%x but aborted by upper layer "
+ "\n", task, ts->resp, ts->stat));
+ pm8001_ccb_task_free(pm8001_ha, task, ccb, tag);
+ return 0;
+ } else {
+ spin_unlock_irqrestore(&task->task_state_lock,
+ flags);
+ pm8001_ccb_task_free_done(pm8001_ha, task,
+ ccb, tag);
+ return 0;
+ }
+ }
+ }
+ q_index = (u32) (pm8001_ha_dev->id & 0x00ffffff) % PM8001_MAX_OUTB_NUM;
+ ret = pm8001_mpi_build_cmd(pm8001_ha, circularQ, opc,
+ &sata_cmd, q_index);
+ return ret;
+}
+
+/**
+ * pm80xx_chip_phy_start_req - start phy via PHY_START COMMAND
+ * @pm8001_ha: our hba card information.
+ * @num: the inbound queue number
+ * @phy_id: the phy id which we wanted to start up.
+ */
+static int
+pm80xx_chip_phy_start_req(struct pm8001_hba_info *pm8001_ha, u8 phy_id)
+{
+ struct phy_start_req payload;
+ struct inbound_queue_table *circularQ;
+ int ret;
+ u32 tag = 0x01;
+ u32 opcode = OPC_INB_PHYSTART;
+ circularQ = &pm8001_ha->inbnd_q_tbl[0];
+ memset(&payload, 0, sizeof(payload));
+ payload.tag = cpu_to_le32(tag);
+
+ PM8001_INIT_DBG(pm8001_ha,
+ pm8001_printk("PHY START REQ for phy_id %d\n", phy_id));
+ /*
+ ** [0:7] PHY Identifier
+ ** [8:11] link rate 1.5G, 3G, 6G
+ ** [12:13] link mode 01b SAS mode; 10b SATA mode; 11b Auto mode
+ ** [14] 0b disable spin up hold; 1b enable spin up hold
+ ** [15] ob no change in current PHY analig setup 1b enable using SPAST
+ */
+ if (!IS_SPCV_12G(pm8001_ha->pdev))
+ payload.ase_sh_lm_slr_phyid = cpu_to_le32(SPINHOLD_DISABLE |
+ LINKMODE_AUTO | LINKRATE_15 |
+ LINKRATE_30 | LINKRATE_60 | phy_id);
+ else
+ payload.ase_sh_lm_slr_phyid = cpu_to_le32(SPINHOLD_DISABLE |
+ LINKMODE_AUTO | LINKRATE_15 |
+ LINKRATE_30 | LINKRATE_60 | LINKRATE_120 |
+ phy_id);
+
+ /* SSC Disable and SAS Analog ST configuration */
+ /**
+ payload.ase_sh_lm_slr_phyid =
+ cpu_to_le32(SSC_DISABLE_30 | SAS_ASE | SPINHOLD_DISABLE |
+ LINKMODE_AUTO | LINKRATE_15 | LINKRATE_30 | LINKRATE_60 |
+ phy_id);
+ Have to add "SAS PHY Analog Setup SPASTI 1 Byte" Based on need
+ **/
+
+ payload.sas_identify.dev_type = SAS_END_DEVICE;
+ payload.sas_identify.initiator_bits = SAS_PROTOCOL_ALL;
+ memcpy(payload.sas_identify.sas_addr,
+ pm8001_ha->sas_addr, SAS_ADDR_SIZE);
+ payload.sas_identify.phy_id = phy_id;
+ ret = pm8001_mpi_build_cmd(pm8001_ha, circularQ, opcode, &payload, 0);
+ return ret;
+}
+
+/**
+ * pm8001_chip_phy_stop_req - start phy via PHY_STOP COMMAND
+ * @pm8001_ha: our hba card information.
+ * @num: the inbound queue number
+ * @phy_id: the phy id which we wanted to start up.
+ */
+static int pm80xx_chip_phy_stop_req(struct pm8001_hba_info *pm8001_ha,
+ u8 phy_id)
+{
+ struct phy_stop_req payload;
+ struct inbound_queue_table *circularQ;
+ int ret;
+ u32 tag = 0x01;
+ u32 opcode = OPC_INB_PHYSTOP;
+ circularQ = &pm8001_ha->inbnd_q_tbl[0];
+ memset(&payload, 0, sizeof(payload));
+ payload.tag = cpu_to_le32(tag);
+ payload.phy_id = cpu_to_le32(phy_id);
+ ret = pm8001_mpi_build_cmd(pm8001_ha, circularQ, opcode, &payload, 0);
+ return ret;
+}
+
+/**
+ * see comments on pm8001_mpi_reg_resp.
+ */
+static int pm80xx_chip_reg_dev_req(struct pm8001_hba_info *pm8001_ha,
+ struct pm8001_device *pm8001_dev, u32 flag)
+{
+ struct reg_dev_req payload;
+ u32 opc;
+ u32 stp_sspsmp_sata = 0x4;
+ struct inbound_queue_table *circularQ;
+ u32 linkrate, phy_id;
+ int rc, tag = 0xdeadbeef;
+ struct pm8001_ccb_info *ccb;
+ u8 retryFlag = 0x1;
+ u16 firstBurstSize = 0;
+ u16 ITNT = 2000;
+ struct domain_device *dev = pm8001_dev->sas_device;
+ struct domain_device *parent_dev = dev->parent;
+ circularQ = &pm8001_ha->inbnd_q_tbl[0];
+
+ memset(&payload, 0, sizeof(payload));
+ rc = pm8001_tag_alloc(pm8001_ha, &tag);
+ if (rc)
+ return rc;
+ ccb = &pm8001_ha->ccb_info[tag];
+ ccb->device = pm8001_dev;
+ ccb->ccb_tag = tag;
+ payload.tag = cpu_to_le32(tag);
+
+ if (flag == 1) {
+ stp_sspsmp_sata = 0x02; /*direct attached sata */
+ } else {
+ if (pm8001_dev->dev_type == SAS_SATA_DEV)
+ stp_sspsmp_sata = 0x00; /* stp*/
+ else if (pm8001_dev->dev_type == SAS_END_DEVICE ||
+ pm8001_dev->dev_type == SAS_EDGE_EXPANDER_DEVICE ||
+ pm8001_dev->dev_type == SAS_FANOUT_EXPANDER_DEVICE)
+ stp_sspsmp_sata = 0x01; /*ssp or smp*/
+ }
+ if (parent_dev && DEV_IS_EXPANDER(parent_dev->dev_type))
+ phy_id = parent_dev->ex_dev.ex_phy->phy_id;
+ else
+ phy_id = pm8001_dev->attached_phy;
+
+ opc = OPC_INB_REG_DEV;
+
+ linkrate = (pm8001_dev->sas_device->linkrate < dev->port->linkrate) ?
+ pm8001_dev->sas_device->linkrate : dev->port->linkrate;
+
+ payload.phyid_portid =
+ cpu_to_le32(((pm8001_dev->sas_device->port->id) & 0xFF) |
+ ((phy_id & 0xFF) << 8));
+
+ payload.dtype_dlr_mcn_ir_retry = cpu_to_le32((retryFlag & 0x01) |
+ ((linkrate & 0x0F) << 24) |
+ ((stp_sspsmp_sata & 0x03) << 28));
+ payload.firstburstsize_ITNexustimeout =
+ cpu_to_le32(ITNT | (firstBurstSize * 0x10000));
+
+ memcpy(payload.sas_addr, pm8001_dev->sas_device->sas_addr,
+ SAS_ADDR_SIZE);
+
+ rc = pm8001_mpi_build_cmd(pm8001_ha, circularQ, opc, &payload, 0);
+ if (rc)
+ pm8001_tag_free(pm8001_ha, tag);
+
+ return rc;
+}
+
+/**
+ * pm80xx_chip_phy_ctl_req - support the local phy operation
+ * @pm8001_ha: our hba card information.
+ * @num: the inbound queue number
+ * @phy_id: the phy id which we wanted to operate
+ * @phy_op:
+ */
+static int pm80xx_chip_phy_ctl_req(struct pm8001_hba_info *pm8001_ha,
+ u32 phyId, u32 phy_op)
+{
+ struct local_phy_ctl_req payload;
+ struct inbound_queue_table *circularQ;
+ int ret;
+ u32 opc = OPC_INB_LOCAL_PHY_CONTROL;
+ memset(&payload, 0, sizeof(payload));
+ circularQ = &pm8001_ha->inbnd_q_tbl[0];
+ payload.tag = cpu_to_le32(1);
+ payload.phyop_phyid =
+ cpu_to_le32(((phy_op & 0xFF) << 8) | (phyId & 0xFF));
+ ret = pm8001_mpi_build_cmd(pm8001_ha, circularQ, opc, &payload, 0);
+ return ret;
+}
+
+static u32 pm80xx_chip_is_our_interupt(struct pm8001_hba_info *pm8001_ha)
+{
+ u32 value;
+#ifdef PM8001_USE_MSIX
+ return 1;
+#endif
+ value = pm8001_cr32(pm8001_ha, 0, MSGU_ODR);
+ if (value)
+ return 1;
+ return 0;
+
+}
+
+/**
+ * pm8001_chip_isr - PM8001 isr handler.
+ * @pm8001_ha: our hba card information.
+ * @irq: irq number.
+ * @stat: stat.
+ */
+static irqreturn_t
+pm80xx_chip_isr(struct pm8001_hba_info *pm8001_ha, u8 vec)
+{
+ pm80xx_chip_interrupt_disable(pm8001_ha, vec);
+ process_oq(pm8001_ha, vec);
+ pm80xx_chip_interrupt_enable(pm8001_ha, vec);
+ return IRQ_HANDLED;
+}
+
+void mpi_set_phy_profile_req(struct pm8001_hba_info *pm8001_ha,
+ u32 operation, u32 phyid, u32 length, u32 *buf)
+{
+ u32 tag , i, j = 0;
+ int rc;
+ struct set_phy_profile_req payload;
+ struct inbound_queue_table *circularQ;
+ u32 opc = OPC_INB_SET_PHY_PROFILE;
+
+ memset(&payload, 0, sizeof(payload));
+ rc = pm8001_tag_alloc(pm8001_ha, &tag);
+ if (rc)
+ PM8001_FAIL_DBG(pm8001_ha, pm8001_printk("Invalid tag\n"));
+ circularQ = &pm8001_ha->inbnd_q_tbl[0];
+ payload.tag = cpu_to_le32(tag);
+ payload.ppc_phyid = (((operation & 0xF) << 8) | (phyid & 0xFF));
+ PM8001_INIT_DBG(pm8001_ha,
+ pm8001_printk(" phy profile command for phy %x ,length is %d\n",
+ payload.ppc_phyid, length));
+ for (i = length; i < (length + PHY_DWORD_LENGTH - 1); i++) {
+ payload.reserved[j] = cpu_to_le32(*((u32 *)buf + i));
+ j++;
+ }
+ rc = pm8001_mpi_build_cmd(pm8001_ha, circularQ, opc, &payload, 0);
+ if (rc)
+ pm8001_tag_free(pm8001_ha, tag);
+}
+
+void pm8001_set_phy_profile(struct pm8001_hba_info *pm8001_ha,
+ u32 length, u8 *buf)
+{
+ u32 page_code, i;
+
+ page_code = SAS_PHY_ANALOG_SETTINGS_PAGE;
+ for (i = 0; i < pm8001_ha->chip->n_phy; i++) {
+ mpi_set_phy_profile_req(pm8001_ha,
+ SAS_PHY_ANALOG_SETTINGS_PAGE, i, length, (u32 *)buf);
+ length = length + PHY_DWORD_LENGTH;
+ }
+ PM8001_INIT_DBG(pm8001_ha, pm8001_printk("phy settings completed\n"));
+}
+const struct pm8001_dispatch pm8001_80xx_dispatch = {
+ .name = "pmc80xx",
+ .chip_init = pm80xx_chip_init,
+ .chip_soft_rst = pm80xx_chip_soft_rst,
+ .chip_rst = pm80xx_hw_chip_rst,
+ .chip_iounmap = pm8001_chip_iounmap,
+ .isr = pm80xx_chip_isr,
+ .is_our_interupt = pm80xx_chip_is_our_interupt,
+ .isr_process_oq = process_oq,
+ .interrupt_enable = pm80xx_chip_interrupt_enable,
+ .interrupt_disable = pm80xx_chip_interrupt_disable,
+ .make_prd = pm8001_chip_make_sg,
+ .smp_req = pm80xx_chip_smp_req,
+ .ssp_io_req = pm80xx_chip_ssp_io_req,
+ .sata_req = pm80xx_chip_sata_req,
+ .phy_start_req = pm80xx_chip_phy_start_req,
+ .phy_stop_req = pm80xx_chip_phy_stop_req,
+ .reg_dev_req = pm80xx_chip_reg_dev_req,
+ .dereg_dev_req = pm8001_chip_dereg_dev_req,
+ .phy_ctl_req = pm80xx_chip_phy_ctl_req,
+ .task_abort = pm8001_chip_abort_task,
+ .ssp_tm_req = pm8001_chip_ssp_tm_req,
+ .get_nvmd_req = pm8001_chip_get_nvmd_req,
+ .set_nvmd_req = pm8001_chip_set_nvmd_req,
+ .fw_flash_update_req = pm8001_chip_fw_flash_update_req,
+ .set_dev_state_req = pm8001_chip_set_dev_state_req,
+};
diff --git a/drivers/scsi/pm8001/pm80xx_hwi.h b/drivers/scsi/pm8001/pm80xx_hwi.h
new file mode 100644
index 000000000..9970a3857
--- /dev/null
+++ b/drivers/scsi/pm8001/pm80xx_hwi.h
@@ -0,0 +1,1532 @@
+/*
+ * PMC-Sierra SPCv/ve 8088/8089 SAS/SATA based host adapters driver
+ *
+ * Copyright (c) 2008-2009 USI Co., Ltd.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions, and the following disclaimer,
+ * without modification.
+ * 2. Redistributions in binary form must reproduce at minimum a disclaimer
+ * substantially similar to the "NO WARRANTY" disclaimer below
+ * ("Disclaimer") and any redistribution must be conditioned upon
+ * including a substantially similar Disclaimer requirement for further
+ * binary redistribution.
+ * 3. Neither the names of the above-listed copyright holders nor the names
+ * of any contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * Alternatively, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") version 2 as published by the Free
+ * Software Foundation.
+ *
+ * NO WARRANTY
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
+ * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
+ * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGES.
+ *
+ */
+
+#ifndef _PMC8001_REG_H_
+#define _PMC8001_REG_H_
+
+#include <linux/types.h>
+#include <scsi/libsas.h>
+
+/* for Request Opcode of IOMB */
+#define OPC_INB_ECHO 1 /* 0x000 */
+#define OPC_INB_PHYSTART 4 /* 0x004 */
+#define OPC_INB_PHYSTOP 5 /* 0x005 */
+#define OPC_INB_SSPINIIOSTART 6 /* 0x006 */
+#define OPC_INB_SSPINITMSTART 7 /* 0x007 */
+/* 0x8 RESV IN SPCv */
+#define OPC_INB_RSVD 8 /* 0x008 */
+#define OPC_INB_DEV_HANDLE_ACCEPT 9 /* 0x009 */
+#define OPC_INB_SSPTGTIOSTART 10 /* 0x00A */
+#define OPC_INB_SSPTGTRSPSTART 11 /* 0x00B */
+/* 0xC, 0xD, 0xE removed in SPCv */
+#define OPC_INB_SSP_ABORT 15 /* 0x00F */
+#define OPC_INB_DEREG_DEV_HANDLE 16 /* 0x010 */
+#define OPC_INB_GET_DEV_HANDLE 17 /* 0x011 */
+#define OPC_INB_SMP_REQUEST 18 /* 0x012 */
+/* 0x13 SMP_RESPONSE is removed in SPCv */
+#define OPC_INB_SMP_ABORT 20 /* 0x014 */
+/* 0x16 RESV IN SPCv */
+#define OPC_INB_RSVD1 22 /* 0x016 */
+#define OPC_INB_SATA_HOST_OPSTART 23 /* 0x017 */
+#define OPC_INB_SATA_ABORT 24 /* 0x018 */
+#define OPC_INB_LOCAL_PHY_CONTROL 25 /* 0x019 */
+/* 0x1A RESV IN SPCv */
+#define OPC_INB_RSVD2 26 /* 0x01A */
+#define OPC_INB_FW_FLASH_UPDATE 32 /* 0x020 */
+#define OPC_INB_GPIO 34 /* 0x022 */
+#define OPC_INB_SAS_DIAG_MODE_START_END 35 /* 0x023 */
+#define OPC_INB_SAS_DIAG_EXECUTE 36 /* 0x024 */
+/* 0x25 RESV IN SPCv */
+#define OPC_INB_RSVD3 37 /* 0x025 */
+#define OPC_INB_GET_TIME_STAMP 38 /* 0x026 */
+#define OPC_INB_PORT_CONTROL 39 /* 0x027 */
+#define OPC_INB_GET_NVMD_DATA 40 /* 0x028 */
+#define OPC_INB_SET_NVMD_DATA 41 /* 0x029 */
+#define OPC_INB_SET_DEVICE_STATE 42 /* 0x02A */
+#define OPC_INB_GET_DEVICE_STATE 43 /* 0x02B */
+#define OPC_INB_SET_DEV_INFO 44 /* 0x02C */
+/* 0x2D RESV IN SPCv */
+#define OPC_INB_RSVD4 45 /* 0x02D */
+#define OPC_INB_SGPIO_REGISTER 46 /* 0x02E */
+#define OPC_INB_PCIE_DIAG_EXEC 47 /* 0x02F */
+#define OPC_INB_SET_CONTROLLER_CONFIG 48 /* 0x030 */
+#define OPC_INB_GET_CONTROLLER_CONFIG 49 /* 0x031 */
+#define OPC_INB_REG_DEV 50 /* 0x032 */
+#define OPC_INB_SAS_HW_EVENT_ACK 51 /* 0x033 */
+#define OPC_INB_GET_DEVICE_INFO 52 /* 0x034 */
+#define OPC_INB_GET_PHY_PROFILE 53 /* 0x035 */
+#define OPC_INB_FLASH_OP_EXT 54 /* 0x036 */
+#define OPC_INB_SET_PHY_PROFILE 55 /* 0x037 */
+#define OPC_INB_KEK_MANAGEMENT 256 /* 0x100 */
+#define OPC_INB_DEK_MANAGEMENT 257 /* 0x101 */
+#define OPC_INB_SSP_INI_DIF_ENC_IO 258 /* 0x102 */
+#define OPC_INB_SATA_DIF_ENC_IO 259 /* 0x103 */
+
+/* for Response Opcode of IOMB */
+#define OPC_OUB_ECHO 1 /* 0x001 */
+#define OPC_OUB_RSVD 4 /* 0x004 */
+#define OPC_OUB_SSP_COMP 5 /* 0x005 */
+#define OPC_OUB_SMP_COMP 6 /* 0x006 */
+#define OPC_OUB_LOCAL_PHY_CNTRL 7 /* 0x007 */
+#define OPC_OUB_RSVD1 10 /* 0x00A */
+#define OPC_OUB_DEREG_DEV 11 /* 0x00B */
+#define OPC_OUB_GET_DEV_HANDLE 12 /* 0x00C */
+#define OPC_OUB_SATA_COMP 13 /* 0x00D */
+#define OPC_OUB_SATA_EVENT 14 /* 0x00E */
+#define OPC_OUB_SSP_EVENT 15 /* 0x00F */
+#define OPC_OUB_RSVD2 16 /* 0x010 */
+/* 0x11 - SMP_RECEIVED Notification removed in SPCv*/
+#define OPC_OUB_SSP_RECV_EVENT 18 /* 0x012 */
+#define OPC_OUB_RSVD3 19 /* 0x013 */
+#define OPC_OUB_FW_FLASH_UPDATE 20 /* 0x014 */
+#define OPC_OUB_GPIO_RESPONSE 22 /* 0x016 */
+#define OPC_OUB_GPIO_EVENT 23 /* 0x017 */
+#define OPC_OUB_GENERAL_EVENT 24 /* 0x018 */
+#define OPC_OUB_SSP_ABORT_RSP 26 /* 0x01A */
+#define OPC_OUB_SATA_ABORT_RSP 27 /* 0x01B */
+#define OPC_OUB_SAS_DIAG_MODE_START_END 28 /* 0x01C */
+#define OPC_OUB_SAS_DIAG_EXECUTE 29 /* 0x01D */
+#define OPC_OUB_GET_TIME_STAMP 30 /* 0x01E */
+#define OPC_OUB_RSVD4 31 /* 0x01F */
+#define OPC_OUB_PORT_CONTROL 32 /* 0x020 */
+#define OPC_OUB_SKIP_ENTRY 33 /* 0x021 */
+#define OPC_OUB_SMP_ABORT_RSP 34 /* 0x022 */
+#define OPC_OUB_GET_NVMD_DATA 35 /* 0x023 */
+#define OPC_OUB_SET_NVMD_DATA 36 /* 0x024 */
+#define OPC_OUB_DEVICE_HANDLE_REMOVAL 37 /* 0x025 */
+#define OPC_OUB_SET_DEVICE_STATE 38 /* 0x026 */
+#define OPC_OUB_GET_DEVICE_STATE 39 /* 0x027 */
+#define OPC_OUB_SET_DEV_INFO 40 /* 0x028 */
+#define OPC_OUB_RSVD5 41 /* 0x029 */
+#define OPC_OUB_HW_EVENT 1792 /* 0x700 */
+#define OPC_OUB_DEV_HANDLE_ARRIV 1824 /* 0x720 */
+#define OPC_OUB_THERM_HW_EVENT 1840 /* 0x730 */
+#define OPC_OUB_SGPIO_RESP 2094 /* 0x82E */
+#define OPC_OUB_PCIE_DIAG_EXECUTE 2095 /* 0x82F */
+#define OPC_OUB_DEV_REGIST 2098 /* 0x832 */
+#define OPC_OUB_SAS_HW_EVENT_ACK 2099 /* 0x833 */
+#define OPC_OUB_GET_DEVICE_INFO 2100 /* 0x834 */
+/* spcv specific commands */
+#define OPC_OUB_PHY_START_RESP 2052 /* 0x804 */
+#define OPC_OUB_PHY_STOP_RESP 2053 /* 0x805 */
+#define OPC_OUB_SET_CONTROLLER_CONFIG 2096 /* 0x830 */
+#define OPC_OUB_GET_CONTROLLER_CONFIG 2097 /* 0x831 */
+#define OPC_OUB_GET_PHY_PROFILE 2101 /* 0x835 */
+#define OPC_OUB_FLASH_OP_EXT 2102 /* 0x836 */
+#define OPC_OUB_SET_PHY_PROFILE 2103 /* 0x837 */
+#define OPC_OUB_KEK_MANAGEMENT_RESP 2304 /* 0x900 */
+#define OPC_OUB_DEK_MANAGEMENT_RESP 2305 /* 0x901 */
+#define OPC_OUB_SSP_COALESCED_COMP_RESP 2306 /* 0x902 */
+
+/* for phy start*/
+#define SSC_DISABLE_15 (0x01 << 16)
+#define SSC_DISABLE_30 (0x02 << 16)
+#define SSC_DISABLE_60 (0x04 << 16)
+#define SAS_ASE (0x01 << 15)
+#define SPINHOLD_DISABLE (0x00 << 14)
+#define SPINHOLD_ENABLE (0x01 << 14)
+#define LINKMODE_SAS (0x01 << 12)
+#define LINKMODE_DSATA (0x02 << 12)
+#define LINKMODE_AUTO (0x03 << 12)
+#define LINKRATE_15 (0x01 << 8)
+#define LINKRATE_30 (0x02 << 8)
+#define LINKRATE_60 (0x06 << 8)
+#define LINKRATE_120 (0x08 << 8)
+
+/* phy_profile */
+#define SAS_PHY_ANALOG_SETTINGS_PAGE 0x04
+#define PHY_DWORD_LENGTH 0xC
+
+/* Thermal related */
+#define THERMAL_ENABLE 0x1
+#define THERMAL_LOG_ENABLE 0x1
+#define THERMAL_OP_CODE 0x6
+#define LTEMPHIL 70
+#define RTEMPHIL 100
+
+/* Encryption info */
+#define SCRATCH_PAD3_ENC_DISABLED 0x00000000
+#define SCRATCH_PAD3_ENC_DIS_ERR 0x00000001
+#define SCRATCH_PAD3_ENC_ENA_ERR 0x00000002
+#define SCRATCH_PAD3_ENC_READY 0x00000003
+#define SCRATCH_PAD3_ENC_MASK SCRATCH_PAD3_ENC_READY
+
+#define SCRATCH_PAD3_XTS_ENABLED (1 << 14)
+#define SCRATCH_PAD3_SMA_ENABLED (1 << 4)
+#define SCRATCH_PAD3_SMB_ENABLED (1 << 5)
+#define SCRATCH_PAD3_SMF_ENABLED 0
+#define SCRATCH_PAD3_SM_MASK 0x000000F0
+#define SCRATCH_PAD3_ERR_CODE 0x00FF0000
+
+#define SEC_MODE_SMF 0x0
+#define SEC_MODE_SMA 0x100
+#define SEC_MODE_SMB 0x200
+#define CIPHER_MODE_ECB 0x00000001
+#define CIPHER_MODE_XTS 0x00000002
+#define KEK_MGMT_SUBOP_KEYCARDUPDATE 0x4
+
+/* SAS protocol timer configuration page */
+#define SAS_PROTOCOL_TIMER_CONFIG_PAGE 0x04
+#define STP_MCT_TMO 32
+#define SSP_MCT_TMO 32
+#define SAS_MAX_OPEN_TIME 5
+#define SMP_MAX_CONN_TIMER 0xFF
+#define STP_FRM_TIMER 0
+#define STP_IDLE_TIME 5 /* 5 us; controller default */
+#define SAS_MFD 0
+#define SAS_OPNRJT_RTRY_INTVL 2
+#define SAS_DOPNRJT_RTRY_TMO 128
+#define SAS_COPNRJT_RTRY_TMO 128
+
+/* for phy state */
+#define PHY_STATE_LINK_UP_SPCV 0x2
+/*
+ Making ORR bigger than IT NEXUS LOSS which is 2000000us = 2 second.
+ Assuming a bigger value 3 second, 3000000/128 = 23437.5 where 128
+ is DOPNRJT_RTRY_TMO
+*/
+#define SAS_DOPNRJT_RTRY_THR 23438
+#define SAS_COPNRJT_RTRY_THR 23438
+#define SAS_MAX_AIP 0x200000
+#define IT_NEXUS_TIMEOUT 0x7D0
+#define PORT_RECOVERY_TIMEOUT ((IT_NEXUS_TIMEOUT/100) + 30)
+
+struct mpi_msg_hdr {
+ __le32 header; /* Bits [11:0] - Message operation code */
+ /* Bits [15:12] - Message Category */
+ /* Bits [21:16] - Outboundqueue ID for the
+ operation completion message */
+ /* Bits [23:22] - Reserved */
+ /* Bits [28:24] - Buffer Count, indicates how
+ many buffer are allocated for the massage */
+ /* Bits [30:29] - Reserved */
+ /* Bits [31] - Message Valid bit */
+} __attribute__((packed, aligned(4)));
+
+/*
+ * brief the data structure of PHY Start Command
+ * use to describe enable the phy (128 bytes)
+ */
+struct phy_start_req {
+ __le32 tag;
+ __le32 ase_sh_lm_slr_phyid;
+ struct sas_identify_frame sas_identify; /* 28 Bytes */
+ __le32 spasti;
+ u32 reserved[21];
+} __attribute__((packed, aligned(4)));
+
+/*
+ * brief the data structure of PHY Start Command
+ * use to disable the phy (128 bytes)
+ */
+struct phy_stop_req {
+ __le32 tag;
+ __le32 phy_id;
+ u32 reserved[29];
+} __attribute__((packed, aligned(4)));
+
+/* set device bits fis - device to host */
+struct set_dev_bits_fis {
+ u8 fis_type; /* 0xA1*/
+ u8 n_i_pmport;
+ /* b7 : n Bit. Notification bit. If set device needs attention. */
+ /* b6 : i Bit. Interrupt Bit */
+ /* b5-b4: reserved2 */
+ /* b3-b0: PM Port */
+ u8 status;
+ u8 error;
+ u32 _r_a;
+} __attribute__ ((packed));
+/* PIO setup FIS - device to host */
+struct pio_setup_fis {
+ u8 fis_type; /* 0x5f */
+ u8 i_d_pmPort;
+ /* b7 : reserved */
+ /* b6 : i bit. Interrupt bit */
+ /* b5 : d bit. data transfer direction. set to 1 for device to host
+ xfer */
+ /* b4 : reserved */
+ /* b3-b0: PM Port */
+ u8 status;
+ u8 error;
+ u8 lbal;
+ u8 lbam;
+ u8 lbah;
+ u8 device;
+ u8 lbal_exp;
+ u8 lbam_exp;
+ u8 lbah_exp;
+ u8 _r_a;
+ u8 sector_count;
+ u8 sector_count_exp;
+ u8 _r_b;
+ u8 e_status;
+ u8 _r_c[2];
+ u8 transfer_count;
+} __attribute__ ((packed));
+
+/*
+ * brief the data structure of SATA Completion Response
+ * use to describe the sata task response (64 bytes)
+ */
+struct sata_completion_resp {
+ __le32 tag;
+ __le32 status;
+ __le32 param;
+ u32 sata_resp[12];
+} __attribute__((packed, aligned(4)));
+
+/*
+ * brief the data structure of SAS HW Event Notification
+ * use to alert the host about the hardware event(64 bytes)
+ */
+/* updated outbound struct for spcv */
+
+struct hw_event_resp {
+ __le32 lr_status_evt_portid;
+ __le32 evt_param;
+ __le32 phyid_npip_portstate;
+ struct sas_identify_frame sas_identify;
+ struct dev_to_host_fis sata_fis;
+} __attribute__((packed, aligned(4)));
+
+/*
+ * brief the data structure for thermal event notification
+ */
+
+struct thermal_hw_event {
+ __le32 thermal_event;
+ __le32 rht_lht;
+} __attribute__((packed, aligned(4)));
+
+/*
+ * brief the data structure of REGISTER DEVICE Command
+ * use to describe MPI REGISTER DEVICE Command (64 bytes)
+ */
+
+struct reg_dev_req {
+ __le32 tag;
+ __le32 phyid_portid;
+ __le32 dtype_dlr_mcn_ir_retry;
+ __le32 firstburstsize_ITNexustimeout;
+ u8 sas_addr[SAS_ADDR_SIZE];
+ __le32 upper_device_id;
+ u32 reserved[24];
+} __attribute__((packed, aligned(4)));
+
+/*
+ * brief the data structure of DEREGISTER DEVICE Command
+ * use to request spc to remove all internal resources associated
+ * with the device id (64 bytes)
+ */
+
+struct dereg_dev_req {
+ __le32 tag;
+ __le32 device_id;
+ u32 reserved[29];
+} __attribute__((packed, aligned(4)));
+
+/*
+ * brief the data structure of DEVICE_REGISTRATION Response
+ * use to notify the completion of the device registration (64 bytes)
+ */
+struct dev_reg_resp {
+ __le32 tag;
+ __le32 status;
+ __le32 device_id;
+ u32 reserved[12];
+} __attribute__((packed, aligned(4)));
+
+/*
+ * brief the data structure of Local PHY Control Command
+ * use to issue PHY CONTROL to local phy (64 bytes)
+ */
+struct local_phy_ctl_req {
+ __le32 tag;
+ __le32 phyop_phyid;
+ u32 reserved1[29];
+} __attribute__((packed, aligned(4)));
+
+/**
+ * brief the data structure of Local Phy Control Response
+ * use to describe MPI Local Phy Control Response (64 bytes)
+ */
+ struct local_phy_ctl_resp {
+ __le32 tag;
+ __le32 phyop_phyid;
+ __le32 status;
+ u32 reserved[12];
+} __attribute__((packed, aligned(4)));
+
+#define OP_BITS 0x0000FF00
+#define ID_BITS 0x000000FF
+
+/*
+ * brief the data structure of PORT Control Command
+ * use to control port properties (64 bytes)
+ */
+
+struct port_ctl_req {
+ __le32 tag;
+ __le32 portop_portid;
+ __le32 param0;
+ __le32 param1;
+ u32 reserved1[27];
+} __attribute__((packed, aligned(4)));
+
+/*
+ * brief the data structure of HW Event Ack Command
+ * use to acknowledge receive HW event (64 bytes)
+ */
+struct hw_event_ack_req {
+ __le32 tag;
+ __le32 phyid_sea_portid;
+ __le32 param0;
+ __le32 param1;
+ u32 reserved1[27];
+} __attribute__((packed, aligned(4)));
+
+/*
+ * brief the data structure of PHY_START Response Command
+ * indicates the completion of PHY_START command (64 bytes)
+ */
+struct phy_start_resp {
+ __le32 tag;
+ __le32 status;
+ __le32 phyid;
+ u32 reserved[12];
+} __attribute__((packed, aligned(4)));
+
+/*
+ * brief the data structure of PHY_STOP Response Command
+ * indicates the completion of PHY_STOP command (64 bytes)
+ */
+struct phy_stop_resp {
+ __le32 tag;
+ __le32 status;
+ __le32 phyid;
+ u32 reserved[12];
+} __attribute__((packed, aligned(4)));
+
+/*
+ * brief the data structure of SSP Completion Response
+ * use to indicate a SSP Completion (n bytes)
+ */
+struct ssp_completion_resp {
+ __le32 tag;
+ __le32 status;
+ __le32 param;
+ __le32 ssptag_rescv_rescpad;
+ struct ssp_response_iu ssp_resp_iu;
+ __le32 residual_count;
+} __attribute__((packed, aligned(4)));
+
+#define SSP_RESCV_BIT 0x00010000
+
+/*
+ * brief the data structure of SATA EVNET response
+ * use to indicate a SATA Completion (64 bytes)
+ */
+struct sata_event_resp {
+ __le32 tag;
+ __le32 event;
+ __le32 port_id;
+ __le32 device_id;
+ u32 reserved;
+ __le32 event_param0;
+ __le32 event_param1;
+ __le32 sata_addr_h32;
+ __le32 sata_addr_l32;
+ __le32 e_udt1_udt0_crc;
+ __le32 e_udt5_udt4_udt3_udt2;
+ __le32 a_udt1_udt0_crc;
+ __le32 a_udt5_udt4_udt3_udt2;
+ __le32 hwdevid_diferr;
+ __le32 err_framelen_byteoffset;
+ __le32 err_dataframe;
+} __attribute__((packed, aligned(4)));
+
+/*
+ * brief the data structure of SSP EVNET esponse
+ * use to indicate a SSP Completion (64 bytes)
+ */
+struct ssp_event_resp {
+ __le32 tag;
+ __le32 event;
+ __le32 port_id;
+ __le32 device_id;
+ __le32 ssp_tag;
+ __le32 event_param0;
+ __le32 event_param1;
+ __le32 sas_addr_h32;
+ __le32 sas_addr_l32;
+ __le32 e_udt1_udt0_crc;
+ __le32 e_udt5_udt4_udt3_udt2;
+ __le32 a_udt1_udt0_crc;
+ __le32 a_udt5_udt4_udt3_udt2;
+ __le32 hwdevid_diferr;
+ __le32 err_framelen_byteoffset;
+ __le32 err_dataframe;
+} __attribute__((packed, aligned(4)));
+
+/**
+ * brief the data structure of General Event Notification Response
+ * use to describe MPI General Event Notification Response (64 bytes)
+ */
+struct general_event_resp {
+ __le32 status;
+ __le32 inb_IOMB_payload[14];
+} __attribute__((packed, aligned(4)));
+
+#define GENERAL_EVENT_PAYLOAD 14
+#define OPCODE_BITS 0x00000fff
+
+/*
+ * brief the data structure of SMP Request Command
+ * use to describe MPI SMP REQUEST Command (64 bytes)
+ */
+struct smp_req {
+ __le32 tag;
+ __le32 device_id;
+ __le32 len_ip_ir;
+ /* Bits [0] - Indirect response */
+ /* Bits [1] - Indirect Payload */
+ /* Bits [15:2] - Reserved */
+ /* Bits [23:16] - direct payload Len */
+ /* Bits [31:24] - Reserved */
+ u8 smp_req16[16];
+ union {
+ u8 smp_req[32];
+ struct {
+ __le64 long_req_addr;/* sg dma address, LE */
+ __le32 long_req_size;/* LE */
+ u32 _r_a;
+ __le64 long_resp_addr;/* sg dma address, LE */
+ __le32 long_resp_size;/* LE */
+ u32 _r_b;
+ } long_smp_req;/* sequencer extension */
+ };
+ __le32 rsvd[16];
+} __attribute__((packed, aligned(4)));
+/*
+ * brief the data structure of SMP Completion Response
+ * use to describe MPI SMP Completion Response (64 bytes)
+ */
+struct smp_completion_resp {
+ __le32 tag;
+ __le32 status;
+ __le32 param;
+ u8 _r_a[252];
+} __attribute__((packed, aligned(4)));
+
+/*
+ *brief the data structure of SSP SMP SATA Abort Command
+ * use to describe MPI SSP SMP & SATA Abort Command (64 bytes)
+ */
+struct task_abort_req {
+ __le32 tag;
+ __le32 device_id;
+ __le32 tag_to_abort;
+ __le32 abort_all;
+ u32 reserved[27];
+} __attribute__((packed, aligned(4)));
+
+/* These flags used for SSP SMP & SATA Abort */
+#define ABORT_MASK 0x3
+#define ABORT_SINGLE 0x0
+#define ABORT_ALL 0x1
+
+/**
+ * brief the data structure of SSP SATA SMP Abort Response
+ * use to describe SSP SMP & SATA Abort Response ( 64 bytes)
+ */
+struct task_abort_resp {
+ __le32 tag;
+ __le32 status;
+ __le32 scp;
+ u32 reserved[12];
+} __attribute__((packed, aligned(4)));
+
+/**
+ * brief the data structure of SAS Diagnostic Start/End Command
+ * use to describe MPI SAS Diagnostic Start/End Command (64 bytes)
+ */
+struct sas_diag_start_end_req {
+ __le32 tag;
+ __le32 operation_phyid;
+ u32 reserved[29];
+} __attribute__((packed, aligned(4)));
+
+/**
+ * brief the data structure of SAS Diagnostic Execute Command
+ * use to describe MPI SAS Diagnostic Execute Command (64 bytes)
+ */
+struct sas_diag_execute_req {
+ __le32 tag;
+ __le32 cmdtype_cmddesc_phyid;
+ __le32 pat1_pat2;
+ __le32 threshold;
+ __le32 codepat_errmsk;
+ __le32 pmon;
+ __le32 pERF1CTL;
+ u32 reserved[24];
+} __attribute__((packed, aligned(4)));
+
+#define SAS_DIAG_PARAM_BYTES 24
+
+/*
+ * brief the data structure of Set Device State Command
+ * use to describe MPI Set Device State Command (64 bytes)
+ */
+struct set_dev_state_req {
+ __le32 tag;
+ __le32 device_id;
+ __le32 nds;
+ u32 reserved[28];
+} __attribute__((packed, aligned(4)));
+
+/*
+ * brief the data structure of SATA Start Command
+ * use to describe MPI SATA IO Start Command (64 bytes)
+ * Note: This structure is common for normal / encryption I/O
+ */
+
+struct sata_start_req {
+ __le32 tag;
+ __le32 device_id;
+ __le32 data_len;
+ __le32 ncqtag_atap_dir_m_dad;
+ struct host_to_dev_fis sata_fis;
+ u32 reserved1;
+ u32 reserved2; /* dword 11. rsvd for normal I/O. */
+ /* EPLE Descl for enc I/O */
+ u32 addr_low; /* dword 12. rsvd for enc I/O */
+ u32 addr_high; /* dword 13. reserved for enc I/O */
+ __le32 len; /* dword 14: length for normal I/O. */
+ /* EPLE Desch for enc I/O */
+ __le32 esgl; /* dword 15. rsvd for enc I/O */
+ __le32 atapi_scsi_cdb[4]; /* dword 16-19. rsvd for enc I/O */
+ /* The below fields are reserved for normal I/O */
+ __le32 key_index_mode; /* dword 20 */
+ __le32 sector_cnt_enss;/* dword 21 */
+ __le32 keytagl; /* dword 22 */
+ __le32 keytagh; /* dword 23 */
+ __le32 twk_val0; /* dword 24 */
+ __le32 twk_val1; /* dword 25 */
+ __le32 twk_val2; /* dword 26 */
+ __le32 twk_val3; /* dword 27 */
+ __le32 enc_addr_low; /* dword 28. Encryption SGL address high */
+ __le32 enc_addr_high; /* dword 29. Encryption SGL address low */
+ __le32 enc_len; /* dword 30. Encryption length */
+ __le32 enc_esgl; /* dword 31. Encryption esgl bit */
+} __attribute__((packed, aligned(4)));
+
+/**
+ * brief the data structure of SSP INI TM Start Command
+ * use to describe MPI SSP INI TM Start Command (64 bytes)
+ */
+struct ssp_ini_tm_start_req {
+ __le32 tag;
+ __le32 device_id;
+ __le32 relate_tag;
+ __le32 tmf;
+ u8 lun[8];
+ __le32 ds_ads_m;
+ u32 reserved[24];
+} __attribute__((packed, aligned(4)));
+
+struct ssp_info_unit {
+ u8 lun[8];/* SCSI Logical Unit Number */
+ u8 reserved1;/* reserved */
+ u8 efb_prio_attr;
+ /* B7 : enabledFirstBurst */
+ /* B6-3 : taskPriority */
+ /* B2-0 : taskAttribute */
+ u8 reserved2; /* reserved */
+ u8 additional_cdb_len;
+ /* B7-2 : additional_cdb_len */
+ /* B1-0 : reserved */
+ u8 cdb[16];/* The SCSI CDB up to 16 bytes length */
+} __attribute__((packed, aligned(4)));
+
+/**
+ * brief the data structure of SSP INI IO Start Command
+ * use to describe MPI SSP INI IO Start Command (64 bytes)
+ * Note: This structure is common for normal / encryption I/O
+ */
+struct ssp_ini_io_start_req {
+ __le32 tag;
+ __le32 device_id;
+ __le32 data_len;
+ __le32 dad_dir_m_tlr;
+ struct ssp_info_unit ssp_iu;
+ __le32 addr_low; /* dword 12: sgl low for normal I/O. */
+ /* epl_descl for encryption I/O */
+ __le32 addr_high; /* dword 13: sgl hi for normal I/O */
+ /* dpl_descl for encryption I/O */
+ __le32 len; /* dword 14: len for normal I/O. */
+ /* edpl_desch for encryption I/O */
+ __le32 esgl; /* dword 15: ESGL bit for normal I/O. */
+ /* user defined tag mask for enc I/O */
+ /* The below fields are reserved for normal I/O */
+ u8 udt[12]; /* dword 16-18 */
+ __le32 sectcnt_ios; /* dword 19 */
+ __le32 key_cmode; /* dword 20 */
+ __le32 ks_enss; /* dword 21 */
+ __le32 keytagl; /* dword 22 */
+ __le32 keytagh; /* dword 23 */
+ __le32 twk_val0; /* dword 24 */
+ __le32 twk_val1; /* dword 25 */
+ __le32 twk_val2; /* dword 26 */
+ __le32 twk_val3; /* dword 27 */
+ __le32 enc_addr_low; /* dword 28: Encryption sgl addr low */
+ __le32 enc_addr_high; /* dword 29: Encryption sgl addr hi */
+ __le32 enc_len; /* dword 30: Encryption length */
+ __le32 enc_esgl; /* dword 31: ESGL bit for encryption */
+} __attribute__((packed, aligned(4)));
+
+/**
+ * brief the data structure for SSP_INI_DIF_ENC_IO COMMAND
+ * use to initiate SSP I/O operation with optional DIF/ENC
+ */
+struct ssp_dif_enc_io_req {
+ __le32 tag;
+ __le32 device_id;
+ __le32 data_len;
+ __le32 dirMTlr;
+ __le32 sspiu0;
+ __le32 sspiu1;
+ __le32 sspiu2;
+ __le32 sspiu3;
+ __le32 sspiu4;
+ __le32 sspiu5;
+ __le32 sspiu6;
+ __le32 epl_des;
+ __le32 dpl_desl_ndplr;
+ __le32 dpl_desh;
+ __le32 uum_uuv_bss_difbits;
+ u8 udt[12];
+ __le32 sectcnt_ios;
+ __le32 key_cmode;
+ __le32 ks_enss;
+ __le32 keytagl;
+ __le32 keytagh;
+ __le32 twk_val0;
+ __le32 twk_val1;
+ __le32 twk_val2;
+ __le32 twk_val3;
+ __le32 addr_low;
+ __le32 addr_high;
+ __le32 len;
+ __le32 esgl;
+} __attribute__((packed, aligned(4)));
+
+/**
+ * brief the data structure of Firmware download
+ * use to describe MPI FW DOWNLOAD Command (64 bytes)
+ */
+struct fw_flash_Update_req {
+ __le32 tag;
+ __le32 cur_image_offset;
+ __le32 cur_image_len;
+ __le32 total_image_len;
+ u32 reserved0[7];
+ __le32 sgl_addr_lo;
+ __le32 sgl_addr_hi;
+ __le32 len;
+ __le32 ext_reserved;
+ u32 reserved1[16];
+} __attribute__((packed, aligned(4)));
+
+#define FWFLASH_IOMB_RESERVED_LEN 0x07
+/**
+ * brief the data structure of FW_FLASH_UPDATE Response
+ * use to describe MPI FW_FLASH_UPDATE Response (64 bytes)
+ *
+ */
+ struct fw_flash_Update_resp {
+ __le32 tag;
+ __le32 status;
+ u32 reserved[13];
+} __attribute__((packed, aligned(4)));
+
+/**
+ * brief the data structure of Get NVM Data Command
+ * use to get data from NVM in HBA(64 bytes)
+ */
+struct get_nvm_data_req {
+ __le32 tag;
+ __le32 len_ir_vpdd;
+ __le32 vpd_offset;
+ u32 reserved[8];
+ __le32 resp_addr_lo;
+ __le32 resp_addr_hi;
+ __le32 resp_len;
+ u32 reserved1[17];
+} __attribute__((packed, aligned(4)));
+
+struct set_nvm_data_req {
+ __le32 tag;
+ __le32 len_ir_vpdd;
+ __le32 vpd_offset;
+ u32 reserved[8];
+ __le32 resp_addr_lo;
+ __le32 resp_addr_hi;
+ __le32 resp_len;
+ u32 reserved1[17];
+} __attribute__((packed, aligned(4)));
+
+/**
+ * brief the data structure for SET CONTROLLER CONFIG COMMAND
+ * use to modify controller configuration
+ */
+struct set_ctrl_cfg_req {
+ __le32 tag;
+ __le32 cfg_pg[14];
+ u32 reserved[16];
+} __attribute__((packed, aligned(4)));
+
+/**
+ * brief the data structure for GET CONTROLLER CONFIG COMMAND
+ * use to get controller configuration page
+ */
+struct get_ctrl_cfg_req {
+ __le32 tag;
+ __le32 pgcd;
+ __le32 int_vec;
+ u32 reserved[28];
+} __attribute__((packed, aligned(4)));
+
+/**
+ * brief the data structure for KEK_MANAGEMENT COMMAND
+ * use for KEK management
+ */
+struct kek_mgmt_req {
+ __le32 tag;
+ __le32 new_curidx_ksop;
+ u32 reserved;
+ __le32 kblob[12];
+ u32 reserved1[16];
+} __attribute__((packed, aligned(4)));
+
+/**
+ * brief the data structure for DEK_MANAGEMENT COMMAND
+ * use for DEK management
+ */
+struct dek_mgmt_req {
+ __le32 tag;
+ __le32 kidx_dsop;
+ __le32 dekidx;
+ __le32 addr_l;
+ __le32 addr_h;
+ __le32 nent;
+ __le32 dbf_tblsize;
+ u32 reserved[24];
+} __attribute__((packed, aligned(4)));
+
+/**
+ * brief the data structure for SET PHY PROFILE COMMAND
+ * use to retrive phy specific information
+ */
+struct set_phy_profile_req {
+ __le32 tag;
+ __le32 ppc_phyid;
+ u32 reserved[29];
+} __attribute__((packed, aligned(4)));
+
+/**
+ * brief the data structure for GET PHY PROFILE COMMAND
+ * use to retrive phy specific information
+ */
+struct get_phy_profile_req {
+ __le32 tag;
+ __le32 ppc_phyid;
+ __le32 profile[29];
+} __attribute__((packed, aligned(4)));
+
+/**
+ * brief the data structure for EXT FLASH PARTITION
+ * use to manage ext flash partition
+ */
+struct ext_flash_partition_req {
+ __le32 tag;
+ __le32 cmd;
+ __le32 offset;
+ __le32 len;
+ u32 reserved[7];
+ __le32 addr_low;
+ __le32 addr_high;
+ __le32 len1;
+ __le32 ext;
+ u32 reserved1[16];
+} __attribute__((packed, aligned(4)));
+
+#define TWI_DEVICE 0x0
+#define C_SEEPROM 0x1
+#define VPD_FLASH 0x4
+#define AAP1_RDUMP 0x5
+#define IOP_RDUMP 0x6
+#define EXPAN_ROM 0x7
+
+#define IPMode 0x80000000
+#define NVMD_TYPE 0x0000000F
+#define NVMD_STAT 0x0000FFFF
+#define NVMD_LEN 0xFF000000
+/**
+ * brief the data structure of Get NVMD Data Response
+ * use to describe MPI Get NVMD Data Response (64 bytes)
+ */
+struct get_nvm_data_resp {
+ __le32 tag;
+ __le32 ir_tda_bn_dps_das_nvm;
+ __le32 dlen_status;
+ __le32 nvm_data[12];
+} __attribute__((packed, aligned(4)));
+
+/**
+ * brief the data structure of SAS Diagnostic Start/End Response
+ * use to describe MPI SAS Diagnostic Start/End Response (64 bytes)
+ *
+ */
+struct sas_diag_start_end_resp {
+ __le32 tag;
+ __le32 status;
+ u32 reserved[13];
+} __attribute__((packed, aligned(4)));
+
+/**
+ * brief the data structure of SAS Diagnostic Execute Response
+ * use to describe MPI SAS Diagnostic Execute Response (64 bytes)
+ *
+ */
+struct sas_diag_execute_resp {
+ __le32 tag;
+ __le32 cmdtype_cmddesc_phyid;
+ __le32 Status;
+ __le32 ReportData;
+ u32 reserved[11];
+} __attribute__((packed, aligned(4)));
+
+/**
+ * brief the data structure of Set Device State Response
+ * use to describe MPI Set Device State Response (64 bytes)
+ *
+ */
+struct set_dev_state_resp {
+ __le32 tag;
+ __le32 status;
+ __le32 device_id;
+ __le32 pds_nds;
+ u32 reserved[11];
+} __attribute__((packed, aligned(4)));
+
+/* new outbound structure for spcv - begins */
+/**
+ * brief the data structure for SET CONTROLLER CONFIG COMMAND
+ * use to modify controller configuration
+ */
+struct set_ctrl_cfg_resp {
+ __le32 tag;
+ __le32 status;
+ __le32 err_qlfr_pgcd;
+ u32 reserved[12];
+} __attribute__((packed, aligned(4)));
+
+struct get_ctrl_cfg_resp {
+ __le32 tag;
+ __le32 status;
+ __le32 err_qlfr;
+ __le32 confg_page[12];
+} __attribute__((packed, aligned(4)));
+
+struct kek_mgmt_resp {
+ __le32 tag;
+ __le32 status;
+ __le32 kidx_new_curr_ksop;
+ __le32 err_qlfr;
+ u32 reserved[11];
+} __attribute__((packed, aligned(4)));
+
+struct dek_mgmt_resp {
+ __le32 tag;
+ __le32 status;
+ __le32 kekidx_tbls_dsop;
+ __le32 dekidx;
+ __le32 err_qlfr;
+ u32 reserved[10];
+} __attribute__((packed, aligned(4)));
+
+struct get_phy_profile_resp {
+ __le32 tag;
+ __le32 status;
+ __le32 ppc_phyid;
+ __le32 ppc_specific_rsp[12];
+} __attribute__((packed, aligned(4)));
+
+struct flash_op_ext_resp {
+ __le32 tag;
+ __le32 cmd;
+ __le32 status;
+ __le32 epart_size;
+ __le32 epart_sect_size;
+ u32 reserved[10];
+} __attribute__((packed, aligned(4)));
+
+struct set_phy_profile_resp {
+ __le32 tag;
+ __le32 status;
+ __le32 ppc_phyid;
+ __le32 ppc_specific_rsp[12];
+} __attribute__((packed, aligned(4)));
+
+struct ssp_coalesced_comp_resp {
+ __le32 coal_cnt;
+ __le32 tag0;
+ __le32 ssp_tag0;
+ __le32 tag1;
+ __le32 ssp_tag1;
+ __le32 add_tag_ssp_tag[10];
+} __attribute__((packed, aligned(4)));
+
+/* new outbound structure for spcv - ends */
+
+/* brief data structure for SAS protocol timer configuration page.
+ *
+ */
+struct SASProtocolTimerConfig {
+ __le32 pageCode; /* 0 */
+ __le32 MST_MSI; /* 1 */
+ __le32 STP_SSP_MCT_TMO; /* 2 */
+ __le32 STP_FRM_TMO; /* 3 */
+ __le32 STP_IDLE_TMO; /* 4 */
+ __le32 OPNRJT_RTRY_INTVL; /* 5 */
+ __le32 Data_Cmd_OPNRJT_RTRY_TMO; /* 6 */
+ __le32 Data_Cmd_OPNRJT_RTRY_THR; /* 7 */
+ __le32 MAX_AIP; /* 8 */
+} __attribute__((packed, aligned(4)));
+
+typedef struct SASProtocolTimerConfig SASProtocolTimerConfig_t;
+
+#define NDS_BITS 0x0F
+#define PDS_BITS 0xF0
+
+/*
+ * HW Events type
+ */
+
+#define HW_EVENT_RESET_START 0x01
+#define HW_EVENT_CHIP_RESET_COMPLETE 0x02
+#define HW_EVENT_PHY_STOP_STATUS 0x03
+#define HW_EVENT_SAS_PHY_UP 0x04
+#define HW_EVENT_SATA_PHY_UP 0x05
+#define HW_EVENT_SATA_SPINUP_HOLD 0x06
+#define HW_EVENT_PHY_DOWN 0x07
+#define HW_EVENT_PORT_INVALID 0x08
+#define HW_EVENT_BROADCAST_CHANGE 0x09
+#define HW_EVENT_PHY_ERROR 0x0A
+#define HW_EVENT_BROADCAST_SES 0x0B
+#define HW_EVENT_INBOUND_CRC_ERROR 0x0C
+#define HW_EVENT_HARD_RESET_RECEIVED 0x0D
+#define HW_EVENT_MALFUNCTION 0x0E
+#define HW_EVENT_ID_FRAME_TIMEOUT 0x0F
+#define HW_EVENT_BROADCAST_EXP 0x10
+#define HW_EVENT_PHY_START_STATUS 0x11
+#define HW_EVENT_LINK_ERR_INVALID_DWORD 0x12
+#define HW_EVENT_LINK_ERR_DISPARITY_ERROR 0x13
+#define HW_EVENT_LINK_ERR_CODE_VIOLATION 0x14
+#define HW_EVENT_LINK_ERR_LOSS_OF_DWORD_SYNCH 0x15
+#define HW_EVENT_LINK_ERR_PHY_RESET_FAILED 0x16
+#define HW_EVENT_PORT_RECOVERY_TIMER_TMO 0x17
+#define HW_EVENT_PORT_RECOVER 0x18
+#define HW_EVENT_PORT_RESET_TIMER_TMO 0x19
+#define HW_EVENT_PORT_RESET_COMPLETE 0x20
+#define EVENT_BROADCAST_ASYNCH_EVENT 0x21
+
+/* port state */
+#define PORT_NOT_ESTABLISHED 0x00
+#define PORT_VALID 0x01
+#define PORT_LOSTCOMM 0x02
+#define PORT_IN_RESET 0x04
+#define PORT_3RD_PARTY_RESET 0x07
+#define PORT_INVALID 0x08
+
+/*
+ * SSP/SMP/SATA IO Completion Status values
+ */
+
+#define IO_SUCCESS 0x00
+#define IO_ABORTED 0x01
+#define IO_OVERFLOW 0x02
+#define IO_UNDERFLOW 0x03
+#define IO_FAILED 0x04
+#define IO_ABORT_RESET 0x05
+#define IO_NOT_VALID 0x06
+#define IO_NO_DEVICE 0x07
+#define IO_ILLEGAL_PARAMETER 0x08
+#define IO_LINK_FAILURE 0x09
+#define IO_PROG_ERROR 0x0A
+
+#define IO_EDC_IN_ERROR 0x0B
+#define IO_EDC_OUT_ERROR 0x0C
+#define IO_ERROR_HW_TIMEOUT 0x0D
+#define IO_XFER_ERROR_BREAK 0x0E
+#define IO_XFER_ERROR_PHY_NOT_READY 0x0F
+#define IO_OPEN_CNX_ERROR_PROTOCOL_NOT_SUPPORTED 0x10
+#define IO_OPEN_CNX_ERROR_ZONE_VIOLATION 0x11
+#define IO_OPEN_CNX_ERROR_BREAK 0x12
+#define IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS 0x13
+#define IO_OPEN_CNX_ERROR_BAD_DESTINATION 0x14
+#define IO_OPEN_CNX_ERROR_CONNECTION_RATE_NOT_SUPPORTED 0x15
+#define IO_OPEN_CNX_ERROR_STP_RESOURCES_BUSY 0x16
+#define IO_OPEN_CNX_ERROR_WRONG_DESTINATION 0x17
+/* This error code 0x18 is not used on SPCv */
+#define IO_OPEN_CNX_ERROR_UNKNOWN_ERROR 0x18
+#define IO_XFER_ERROR_NAK_RECEIVED 0x19
+#define IO_XFER_ERROR_ACK_NAK_TIMEOUT 0x1A
+#define IO_XFER_ERROR_PEER_ABORTED 0x1B
+#define IO_XFER_ERROR_RX_FRAME 0x1C
+#define IO_XFER_ERROR_DMA 0x1D
+#define IO_XFER_ERROR_CREDIT_TIMEOUT 0x1E
+#define IO_XFER_ERROR_SATA_LINK_TIMEOUT 0x1F
+#define IO_XFER_ERROR_SATA 0x20
+
+/* This error code 0x22 is not used on SPCv */
+#define IO_XFER_ERROR_ABORTED_DUE_TO_SRST 0x22
+#define IO_XFER_ERROR_REJECTED_NCQ_MODE 0x21
+#define IO_XFER_ERROR_ABORTED_NCQ_MODE 0x23
+#define IO_XFER_OPEN_RETRY_TIMEOUT 0x24
+/* This error code 0x25 is not used on SPCv */
+#define IO_XFER_SMP_RESP_CONNECTION_ERROR 0x25
+#define IO_XFER_ERROR_UNEXPECTED_PHASE 0x26
+#define IO_XFER_ERROR_XFER_RDY_OVERRUN 0x27
+#define IO_XFER_ERROR_XFER_RDY_NOT_EXPECTED 0x28
+#define IO_XFER_ERROR_CMD_ISSUE_ACK_NAK_TIMEOUT 0x30
+
+/* The following error code 0x31 and 0x32 are not using (obsolete) */
+#define IO_XFER_ERROR_CMD_ISSUE_BREAK_BEFORE_ACK_NAK 0x31
+#define IO_XFER_ERROR_CMD_ISSUE_PHY_DOWN_BEFORE_ACK_NAK 0x32
+
+#define IO_XFER_ERROR_OFFSET_MISMATCH 0x34
+#define IO_XFER_ERROR_XFER_ZERO_DATA_LEN 0x35
+#define IO_XFER_CMD_FRAME_ISSUED 0x36
+#define IO_ERROR_INTERNAL_SMP_RESOURCE 0x37
+#define IO_PORT_IN_RESET 0x38
+#define IO_DS_NON_OPERATIONAL 0x39
+#define IO_DS_IN_RECOVERY 0x3A
+#define IO_TM_TAG_NOT_FOUND 0x3B
+#define IO_XFER_PIO_SETUP_ERROR 0x3C
+#define IO_SSP_EXT_IU_ZERO_LEN_ERROR 0x3D
+#define IO_DS_IN_ERROR 0x3E
+#define IO_OPEN_CNX_ERROR_HW_RESOURCE_BUSY 0x3F
+#define IO_ABORT_IN_PROGRESS 0x40
+#define IO_ABORT_DELAYED 0x41
+#define IO_INVALID_LENGTH 0x42
+
+/********** additional response event values *****************/
+
+#define IO_OPEN_CNX_ERROR_HW_RESOURCE_BUSY_ALT 0x43
+#define IO_XFER_OPEN_RETRY_BACKOFF_THRESHOLD_REACHED 0x44
+#define IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS_OPEN_TMO 0x45
+#define IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS_NO_DEST 0x46
+#define IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS_OPEN_COLLIDE 0x47
+#define IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS_PATHWAY_BLOCKED 0x48
+#define IO_DS_INVALID 0x49
+/* WARNING: the value is not contiguous from here */
+#define IO_XFER_ERR_LAST_PIO_DATAIN_CRC_ERR 0x52
+#define IO_XFER_DMA_ACTIVATE_TIMEOUT 0x53
+#define IO_XFER_ERROR_INTERNAL_CRC_ERROR 0x54
+#define MPI_IO_RQE_BUSY_FULL 0x55
+#define IO_XFER_ERR_EOB_DATA_OVERRUN 0x56
+#define IO_XFR_ERROR_INVALID_SSP_RSP_FRAME 0x57
+#define IO_OPEN_CNX_ERROR_OPEN_PREEMPTED 0x58
+
+#define MPI_ERR_IO_RESOURCE_UNAVAILABLE 0x1004
+#define MPI_ERR_ATAPI_DEVICE_BUSY 0x1024
+
+#define IO_XFR_ERROR_DEK_KEY_CACHE_MISS 0x2040
+/*
+ * An encryption IO request failed due to DEK Key Tag mismatch.
+ * The key tag supplied in the encryption IOMB does not match with
+ * the Key Tag in the referenced DEK Entry.
+ */
+#define IO_XFR_ERROR_DEK_KEY_TAG_MISMATCH 0x2041
+#define IO_XFR_ERROR_CIPHER_MODE_INVALID 0x2042
+/*
+ * An encryption I/O request failed because the initial value (IV)
+ * in the unwrapped DEK blob didn't match the IV used to unwrap it.
+ */
+#define IO_XFR_ERROR_DEK_IV_MISMATCH 0x2043
+/* An encryption I/O request failed due to an internal RAM ECC or
+ * interface error while unwrapping the DEK. */
+#define IO_XFR_ERROR_DEK_RAM_INTERFACE_ERROR 0x2044
+/* An encryption I/O request failed due to an internal RAM ECC or
+ * interface error while unwrapping the DEK. */
+#define IO_XFR_ERROR_INTERNAL_RAM 0x2045
+/*
+ * An encryption I/O request failed
+ * because the DEK index specified in the I/O was outside the bounds of
+ * the total number of entries in the host DEK table.
+ */
+#define IO_XFR_ERROR_DEK_INDEX_OUT_OF_BOUNDS0x2046
+
+/* define DIF IO response error status code */
+#define IO_XFR_ERROR_DIF_MISMATCH 0x3000
+#define IO_XFR_ERROR_DIF_APPLICATION_TAG_MISMATCH 0x3001
+#define IO_XFR_ERROR_DIF_REFERENCE_TAG_MISMATCH 0x3002
+#define IO_XFR_ERROR_DIF_CRC_MISMATCH 0x3003
+
+/* define operator management response status and error qualifier code */
+#define OPR_MGMT_OP_NOT_SUPPORTED 0x2060
+#define OPR_MGMT_MPI_ENC_ERR_OPR_PARAM_ILLEGAL 0x2061
+#define OPR_MGMT_MPI_ENC_ERR_OPR_ID_NOT_FOUND 0x2062
+#define OPR_MGMT_MPI_ENC_ERR_OPR_ROLE_NOT_MATCH 0x2063
+#define OPR_MGMT_MPI_ENC_ERR_OPR_MAX_NUM_EXCEEDED 0x2064
+#define OPR_MGMT_MPI_ENC_ERR_KEK_UNWRAP_FAIL 0x2022
+#define OPR_MGMT_MPI_ENC_ERR_NVRAM_OPERATION_FAILURE 0x2023
+/***************** additional response event values ***************/
+
+/* WARNING: This error code must always be the last number.
+ * If you add error code, modify this code also
+ * It is used as an index
+ */
+#define IO_ERROR_UNKNOWN_GENERIC 0x2023
+
+/* MSGU CONFIGURATION TABLE*/
+
+#define SPCv_MSGU_CFG_TABLE_UPDATE 0x001
+#define SPCv_MSGU_CFG_TABLE_RESET 0x002
+#define SPCv_MSGU_CFG_TABLE_FREEZE 0x004
+#define SPCv_MSGU_CFG_TABLE_UNFREEZE 0x008
+#define MSGU_IBDB_SET 0x00
+#define MSGU_HOST_INT_STATUS 0x08
+#define MSGU_HOST_INT_MASK 0x0C
+#define MSGU_IOPIB_INT_STATUS 0x18
+#define MSGU_IOPIB_INT_MASK 0x1C
+#define MSGU_IBDB_CLEAR 0x20
+
+#define MSGU_MSGU_CONTROL 0x24
+#define MSGU_ODR 0x20
+#define MSGU_ODCR 0x28
+
+#define MSGU_ODMR 0x30
+#define MSGU_ODMR_U 0x34
+#define MSGU_ODMR_CLR 0x38
+#define MSGU_ODMR_CLR_U 0x3C
+#define MSGU_OD_RSVD 0x40
+
+#define MSGU_SCRATCH_PAD_0 0x44
+#define MSGU_SCRATCH_PAD_1 0x48
+#define MSGU_SCRATCH_PAD_2 0x4C
+#define MSGU_SCRATCH_PAD_3 0x50
+#define MSGU_HOST_SCRATCH_PAD_0 0x54
+#define MSGU_HOST_SCRATCH_PAD_1 0x58
+#define MSGU_HOST_SCRATCH_PAD_2 0x5C
+#define MSGU_HOST_SCRATCH_PAD_3 0x60
+#define MSGU_HOST_SCRATCH_PAD_4 0x64
+#define MSGU_HOST_SCRATCH_PAD_5 0x68
+#define MSGU_HOST_SCRATCH_PAD_6 0x6C
+#define MSGU_HOST_SCRATCH_PAD_7 0x70
+
+/* bit definition for ODMR register */
+#define ODMR_MASK_ALL 0xFFFFFFFF/* mask all
+ interrupt vector */
+#define ODMR_CLEAR_ALL 0 /* clear all
+ interrupt vector */
+/* bit definition for ODCR register */
+#define ODCR_CLEAR_ALL 0xFFFFFFFF /* mask all
+ interrupt vector*/
+/* MSIX Interupts */
+#define MSIX_TABLE_OFFSET 0x2000
+#define MSIX_TABLE_ELEMENT_SIZE 0x10
+#define MSIX_INTERRUPT_CONTROL_OFFSET 0xC
+#define MSIX_TABLE_BASE (MSIX_TABLE_OFFSET + \
+ MSIX_INTERRUPT_CONTROL_OFFSET)
+#define MSIX_INTERRUPT_DISABLE 0x1
+#define MSIX_INTERRUPT_ENABLE 0x0
+
+/* state definition for Scratch Pad1 register */
+#define SCRATCH_PAD_RAAE_READY 0x3
+#define SCRATCH_PAD_ILA_READY 0xC
+#define SCRATCH_PAD_BOOT_LOAD_SUCCESS 0x0
+#define SCRATCH_PAD_IOP0_READY 0xC00
+#define SCRATCH_PAD_IOP1_READY 0x3000
+
+/* boot loader state */
+#define SCRATCH_PAD1_BOOTSTATE_MASK 0x70 /* Bit 4-6 */
+#define SCRATCH_PAD1_BOOTSTATE_SUCESS 0x0 /* Load successful */
+#define SCRATCH_PAD1_BOOTSTATE_HDA_SEEPROM 0x10 /* HDA SEEPROM */
+#define SCRATCH_PAD1_BOOTSTATE_HDA_BOOTSTRAP 0x20 /* HDA BootStrap Pins */
+#define SCRATCH_PAD1_BOOTSTATE_HDA_SOFTRESET 0x30 /* HDA Soft Reset */
+#define SCRATCH_PAD1_BOOTSTATE_CRIT_ERROR 0x40 /* HDA critical error */
+#define SCRATCH_PAD1_BOOTSTATE_R1 0x50 /* Reserved */
+#define SCRATCH_PAD1_BOOTSTATE_R2 0x60 /* Reserved */
+#define SCRATCH_PAD1_BOOTSTATE_FATAL 0x70 /* Fatal Error */
+
+ /* state definition for Scratch Pad2 register */
+#define SCRATCH_PAD2_POR 0x00 /* power on state */
+#define SCRATCH_PAD2_SFR 0x01 /* soft reset state */
+#define SCRATCH_PAD2_ERR 0x02 /* error state */
+#define SCRATCH_PAD2_RDY 0x03 /* ready state */
+#define SCRATCH_PAD2_FWRDY_RST 0x04 /* FW rdy for soft reset flag */
+#define SCRATCH_PAD2_IOPRDY_RST 0x08 /* IOP ready for soft reset */
+#define SCRATCH_PAD2_STATE_MASK 0xFFFFFFF4 /* ScratchPad 2
+ Mask, bit1-0 State */
+#define SCRATCH_PAD2_RESERVED 0x000003FC/* Scratch Pad1
+ Reserved bit 2 to 9 */
+
+#define SCRATCH_PAD_ERROR_MASK 0xFFFFFC00 /* Error mask bits */
+#define SCRATCH_PAD_STATE_MASK 0x00000003 /* State Mask bits */
+
+/* main configuration offset - byte offset */
+#define MAIN_SIGNATURE_OFFSET 0x00 /* DWORD 0x00 */
+#define MAIN_INTERFACE_REVISION 0x04 /* DWORD 0x01 */
+#define MAIN_FW_REVISION 0x08 /* DWORD 0x02 */
+#define MAIN_MAX_OUTSTANDING_IO_OFFSET 0x0C /* DWORD 0x03 */
+#define MAIN_MAX_SGL_OFFSET 0x10 /* DWORD 0x04 */
+#define MAIN_CNTRL_CAP_OFFSET 0x14 /* DWORD 0x05 */
+#define MAIN_GST_OFFSET 0x18 /* DWORD 0x06 */
+#define MAIN_IBQ_OFFSET 0x1C /* DWORD 0x07 */
+#define MAIN_OBQ_OFFSET 0x20 /* DWORD 0x08 */
+#define MAIN_IQNPPD_HPPD_OFFSET 0x24 /* DWORD 0x09 */
+
+/* 0x28 - 0x4C - RSVD */
+#define MAIN_EVENT_CRC_CHECK 0x48 /* DWORD 0x12 */
+#define MAIN_EVENT_LOG_ADDR_HI 0x50 /* DWORD 0x14 */
+#define MAIN_EVENT_LOG_ADDR_LO 0x54 /* DWORD 0x15 */
+#define MAIN_EVENT_LOG_BUFF_SIZE 0x58 /* DWORD 0x16 */
+#define MAIN_EVENT_LOG_OPTION 0x5C /* DWORD 0x17 */
+#define MAIN_PCS_EVENT_LOG_ADDR_HI 0x60 /* DWORD 0x18 */
+#define MAIN_PCS_EVENT_LOG_ADDR_LO 0x64 /* DWORD 0x19 */
+#define MAIN_PCS_EVENT_LOG_BUFF_SIZE 0x68 /* DWORD 0x1A */
+#define MAIN_PCS_EVENT_LOG_OPTION 0x6C /* DWORD 0x1B */
+#define MAIN_FATAL_ERROR_INTERRUPT 0x70 /* DWORD 0x1C */
+#define MAIN_FATAL_ERROR_RDUMP0_OFFSET 0x74 /* DWORD 0x1D */
+#define MAIN_FATAL_ERROR_RDUMP0_LENGTH 0x78 /* DWORD 0x1E */
+#define MAIN_FATAL_ERROR_RDUMP1_OFFSET 0x7C /* DWORD 0x1F */
+#define MAIN_FATAL_ERROR_RDUMP1_LENGTH 0x80 /* DWORD 0x20 */
+#define MAIN_GPIO_LED_FLAGS_OFFSET 0x84 /* DWORD 0x21 */
+#define MAIN_ANALOG_SETUP_OFFSET 0x88 /* DWORD 0x22 */
+
+#define MAIN_INT_VECTOR_TABLE_OFFSET 0x8C /* DWORD 0x23 */
+#define MAIN_SAS_PHY_ATTR_TABLE_OFFSET 0x90 /* DWORD 0x24 */
+#define MAIN_PORT_RECOVERY_TIMER 0x94 /* DWORD 0x25 */
+#define MAIN_INT_REASSERTION_DELAY 0x98 /* DWORD 0x26 */
+
+/* Gereral Status Table offset - byte offset */
+#define GST_GSTLEN_MPIS_OFFSET 0x00
+#define GST_IQ_FREEZE_STATE0_OFFSET 0x04
+#define GST_IQ_FREEZE_STATE1_OFFSET 0x08
+#define GST_MSGUTCNT_OFFSET 0x0C
+#define GST_IOPTCNT_OFFSET 0x10
+/* 0x14 - 0x34 - RSVD */
+#define GST_GPIO_INPUT_VAL 0x38
+/* 0x3c - 0x40 - RSVD */
+#define GST_RERRINFO_OFFSET0 0x44
+#define GST_RERRINFO_OFFSET1 0x48
+#define GST_RERRINFO_OFFSET2 0x4c
+#define GST_RERRINFO_OFFSET3 0x50
+#define GST_RERRINFO_OFFSET4 0x54
+#define GST_RERRINFO_OFFSET5 0x58
+#define GST_RERRINFO_OFFSET6 0x5c
+#define GST_RERRINFO_OFFSET7 0x60
+
+/* General Status Table - MPI state */
+#define GST_MPI_STATE_UNINIT 0x00
+#define GST_MPI_STATE_INIT 0x01
+#define GST_MPI_STATE_TERMINATION 0x02
+#define GST_MPI_STATE_ERROR 0x03
+#define GST_MPI_STATE_MASK 0x07
+
+/* Per SAS PHY Attributes */
+
+#define PSPA_PHYSTATE0_OFFSET 0x00 /* Dword V */
+#define PSPA_OB_HW_EVENT_PID0_OFFSET 0x04 /* DWORD V+1 */
+#define PSPA_PHYSTATE1_OFFSET 0x08 /* Dword V+2 */
+#define PSPA_OB_HW_EVENT_PID1_OFFSET 0x0C /* DWORD V+3 */
+#define PSPA_PHYSTATE2_OFFSET 0x10 /* Dword V+4 */
+#define PSPA_OB_HW_EVENT_PID2_OFFSET 0x14 /* DWORD V+5 */
+#define PSPA_PHYSTATE3_OFFSET 0x18 /* Dword V+6 */
+#define PSPA_OB_HW_EVENT_PID3_OFFSET 0x1C /* DWORD V+7 */
+#define PSPA_PHYSTATE4_OFFSET 0x20 /* Dword V+8 */
+#define PSPA_OB_HW_EVENT_PID4_OFFSET 0x24 /* DWORD V+9 */
+#define PSPA_PHYSTATE5_OFFSET 0x28 /* Dword V+10 */
+#define PSPA_OB_HW_EVENT_PID5_OFFSET 0x2C /* DWORD V+11 */
+#define PSPA_PHYSTATE6_OFFSET 0x30 /* Dword V+12 */
+#define PSPA_OB_HW_EVENT_PID6_OFFSET 0x34 /* DWORD V+13 */
+#define PSPA_PHYSTATE7_OFFSET 0x38 /* Dword V+14 */
+#define PSPA_OB_HW_EVENT_PID7_OFFSET 0x3C /* DWORD V+15 */
+#define PSPA_PHYSTATE8_OFFSET 0x40 /* DWORD V+16 */
+#define PSPA_OB_HW_EVENT_PID8_OFFSET 0x44 /* DWORD V+17 */
+#define PSPA_PHYSTATE9_OFFSET 0x48 /* DWORD V+18 */
+#define PSPA_OB_HW_EVENT_PID9_OFFSET 0x4C /* DWORD V+19 */
+#define PSPA_PHYSTATE10_OFFSET 0x50 /* DWORD V+20 */
+#define PSPA_OB_HW_EVENT_PID10_OFFSET 0x54 /* DWORD V+21 */
+#define PSPA_PHYSTATE11_OFFSET 0x58 /* DWORD V+22 */
+#define PSPA_OB_HW_EVENT_PID11_OFFSET 0x5C /* DWORD V+23 */
+#define PSPA_PHYSTATE12_OFFSET 0x60 /* DWORD V+24 */
+#define PSPA_OB_HW_EVENT_PID12_OFFSET 0x64 /* DWORD V+25 */
+#define PSPA_PHYSTATE13_OFFSET 0x68 /* DWORD V+26 */
+#define PSPA_OB_HW_EVENT_PID13_OFFSET 0x6c /* DWORD V+27 */
+#define PSPA_PHYSTATE14_OFFSET 0x70 /* DWORD V+28 */
+#define PSPA_OB_HW_EVENT_PID14_OFFSET 0x74 /* DWORD V+29 */
+#define PSPA_PHYSTATE15_OFFSET 0x78 /* DWORD V+30 */
+#define PSPA_OB_HW_EVENT_PID15_OFFSET 0x7c /* DWORD V+31 */
+/* end PSPA */
+
+/* inbound queue configuration offset - byte offset */
+#define IB_PROPERITY_OFFSET 0x00
+#define IB_BASE_ADDR_HI_OFFSET 0x04
+#define IB_BASE_ADDR_LO_OFFSET 0x08
+#define IB_CI_BASE_ADDR_HI_OFFSET 0x0C
+#define IB_CI_BASE_ADDR_LO_OFFSET 0x10
+#define IB_PIPCI_BAR 0x14
+#define IB_PIPCI_BAR_OFFSET 0x18
+#define IB_RESERVED_OFFSET 0x1C
+
+/* outbound queue configuration offset - byte offset */
+#define OB_PROPERITY_OFFSET 0x00
+#define OB_BASE_ADDR_HI_OFFSET 0x04
+#define OB_BASE_ADDR_LO_OFFSET 0x08
+#define OB_PI_BASE_ADDR_HI_OFFSET 0x0C
+#define OB_PI_BASE_ADDR_LO_OFFSET 0x10
+#define OB_CIPCI_BAR 0x14
+#define OB_CIPCI_BAR_OFFSET 0x18
+#define OB_INTERRUPT_COALES_OFFSET 0x1C
+#define OB_DYNAMIC_COALES_OFFSET 0x20
+#define OB_PROPERTY_INT_ENABLE 0x40000000
+
+#define MBIC_NMI_ENABLE_VPE0_IOP 0x000418
+#define MBIC_NMI_ENABLE_VPE0_AAP1 0x000418
+/* PCIE registers - BAR2(0x18), BAR1(win) 0x010000 */
+#define PCIE_EVENT_INTERRUPT_ENABLE 0x003040
+#define PCIE_EVENT_INTERRUPT 0x003044
+#define PCIE_ERROR_INTERRUPT_ENABLE 0x003048
+#define PCIE_ERROR_INTERRUPT 0x00304C
+
+/* SPCV soft reset */
+#define SPC_REG_SOFT_RESET 0x00001000
+#define SPCv_NORMAL_RESET_VALUE 0x1
+
+#define SPCv_SOFT_RESET_READ_MASK 0xC0
+#define SPCv_SOFT_RESET_NO_RESET 0x0
+#define SPCv_SOFT_RESET_NORMAL_RESET_OCCURED 0x40
+#define SPCv_SOFT_RESET_HDA_MODE_OCCURED 0x80
+#define SPCv_SOFT_RESET_CHIP_RESET_OCCURED 0xC0
+
+/* signature definition for host scratch pad0 register */
+#define SPC_SOFT_RESET_SIGNATURE 0x252acbcd
+/* Signature for Soft Reset */
+
+/* SPC Reset register - BAR4(0x20), BAR2(win) (need dynamic mapping) */
+#define SPC_REG_RESET 0x000000/* reset register */
+
+/* bit definition for SPC_RESET register */
+#define SPC_REG_RESET_OSSP 0x00000001
+#define SPC_REG_RESET_RAAE 0x00000002
+#define SPC_REG_RESET_PCS_SPBC 0x00000004
+#define SPC_REG_RESET_PCS_IOP_SS 0x00000008
+#define SPC_REG_RESET_PCS_AAP1_SS 0x00000010
+#define SPC_REG_RESET_PCS_AAP2_SS 0x00000020
+#define SPC_REG_RESET_PCS_LM 0x00000040
+#define SPC_REG_RESET_PCS 0x00000080
+#define SPC_REG_RESET_GSM 0x00000100
+#define SPC_REG_RESET_DDR2 0x00010000
+#define SPC_REG_RESET_BDMA_CORE 0x00020000
+#define SPC_REG_RESET_BDMA_SXCBI 0x00040000
+#define SPC_REG_RESET_PCIE_AL_SXCBI 0x00080000
+#define SPC_REG_RESET_PCIE_PWR 0x00100000
+#define SPC_REG_RESET_PCIE_SFT 0x00200000
+#define SPC_REG_RESET_PCS_SXCBI 0x00400000
+#define SPC_REG_RESET_LMS_SXCBI 0x00800000
+#define SPC_REG_RESET_PMIC_SXCBI 0x01000000
+#define SPC_REG_RESET_PMIC_CORE 0x02000000
+#define SPC_REG_RESET_PCIE_PC_SXCBI 0x04000000
+#define SPC_REG_RESET_DEVICE 0x80000000
+
+/* registers for BAR Shifting - BAR2(0x18), BAR1(win) */
+#define SPCV_IBW_AXI_TRANSLATION_LOW 0x001010
+
+#define MBIC_AAP1_ADDR_BASE 0x060000
+#define MBIC_IOP_ADDR_BASE 0x070000
+#define GSM_ADDR_BASE 0x0700000
+/* Dynamic map through Bar4 - 0x00700000 */
+#define GSM_CONFIG_RESET 0x00000000
+#define RAM_ECC_DB_ERR 0x00000018
+#define GSM_READ_ADDR_PARITY_INDIC 0x00000058
+#define GSM_WRITE_ADDR_PARITY_INDIC 0x00000060
+#define GSM_WRITE_DATA_PARITY_INDIC 0x00000068
+#define GSM_READ_ADDR_PARITY_CHECK 0x00000038
+#define GSM_WRITE_ADDR_PARITY_CHECK 0x00000040
+#define GSM_WRITE_DATA_PARITY_CHECK 0x00000048
+
+#define RB6_ACCESS_REG 0x6A0000
+#define HDAC_EXEC_CMD 0x0002
+#define HDA_C_PA 0xcb
+#define HDA_SEQ_ID_BITS 0x00ff0000
+#define HDA_GSM_OFFSET_BITS 0x00FFFFFF
+#define HDA_GSM_CMD_OFFSET_BITS 0x42C0
+#define HDA_GSM_RSP_OFFSET_BITS 0x42E0
+
+#define MBIC_AAP1_ADDR_BASE 0x060000
+#define MBIC_IOP_ADDR_BASE 0x070000
+#define GSM_ADDR_BASE 0x0700000
+#define SPC_TOP_LEVEL_ADDR_BASE 0x000000
+#define GSM_CONFIG_RESET_VALUE 0x00003b00
+#define GPIO_ADDR_BASE 0x00090000
+#define GPIO_GPIO_0_0UTPUT_CTL_OFFSET 0x0000010c
+
+/* RB6 offset */
+#define SPC_RB6_OFFSET 0x80C0
+/* Magic number of soft reset for RB6 */
+#define RB6_MAGIC_NUMBER_RST 0x1234
+
+/* Device Register status */
+#define DEVREG_SUCCESS 0x00
+#define DEVREG_FAILURE_OUT_OF_RESOURCE 0x01
+#define DEVREG_FAILURE_DEVICE_ALREADY_REGISTERED 0x02
+#define DEVREG_FAILURE_INVALID_PHY_ID 0x03
+#define DEVREG_FAILURE_PHY_ID_ALREADY_REGISTERED 0x04
+#define DEVREG_FAILURE_PORT_ID_OUT_OF_RANGE 0x05
+#define DEVREG_FAILURE_PORT_NOT_VALID_STATE 0x06
+#define DEVREG_FAILURE_DEVICE_TYPE_NOT_VALID 0x07
+
+
+#define MEMBASE_II_SHIFT_REGISTER 0x1010
+#endif
diff --git a/drivers/scsi/pmcraid.c b/drivers/scsi/pmcraid.c
new file mode 100644
index 000000000..ed31d8cc6
--- /dev/null
+++ b/drivers/scsi/pmcraid.c
@@ -0,0 +1,6061 @@
+/*
+ * pmcraid.c -- driver for PMC Sierra MaxRAID controller adapters
+ *
+ * Written By: Anil Ravindranath<anil_ravindranath@pmc-sierra.com>
+ * PMC-Sierra Inc
+ *
+ * Copyright (C) 2008, 2009 PMC Sierra Inc
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307,
+ * USA
+ *
+ */
+#include <linux/fs.h>
+#include <linux/init.h>
+#include <linux/types.h>
+#include <linux/errno.h>
+#include <linux/kernel.h>
+#include <linux/ioport.h>
+#include <linux/delay.h>
+#include <linux/pci.h>
+#include <linux/wait.h>
+#include <linux/spinlock.h>
+#include <linux/sched.h>
+#include <linux/interrupt.h>
+#include <linux/blkdev.h>
+#include <linux/firmware.h>
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/hdreg.h>
+#include <linux/io.h>
+#include <linux/slab.h>
+#include <asm/irq.h>
+#include <asm/processor.h>
+#include <linux/libata.h>
+#include <linux/mutex.h>
+#include <scsi/scsi.h>
+#include <scsi/scsi_host.h>
+#include <scsi/scsi_device.h>
+#include <scsi/scsi_tcq.h>
+#include <scsi/scsi_eh.h>
+#include <scsi/scsi_cmnd.h>
+#include <scsi/scsicam.h>
+
+#include "pmcraid.h"
+
+/*
+ * Module configuration parameters
+ */
+static unsigned int pmcraid_debug_log;
+static unsigned int pmcraid_disable_aen;
+static unsigned int pmcraid_log_level = IOASC_LOG_LEVEL_MUST;
+static unsigned int pmcraid_enable_msix;
+
+/*
+ * Data structures to support multiple adapters by the LLD.
+ * pmcraid_adapter_count - count of configured adapters
+ */
+static atomic_t pmcraid_adapter_count = ATOMIC_INIT(0);
+
+/*
+ * Supporting user-level control interface through IOCTL commands.
+ * pmcraid_major - major number to use
+ * pmcraid_minor - minor number(s) to use
+ */
+static unsigned int pmcraid_major;
+static struct class *pmcraid_class;
+DECLARE_BITMAP(pmcraid_minor, PMCRAID_MAX_ADAPTERS);
+
+/*
+ * Module parameters
+ */
+MODULE_AUTHOR("Anil Ravindranath<anil_ravindranath@pmc-sierra.com>");
+MODULE_DESCRIPTION("PMC Sierra MaxRAID Controller Driver");
+MODULE_LICENSE("GPL");
+MODULE_VERSION(PMCRAID_DRIVER_VERSION);
+
+module_param_named(log_level, pmcraid_log_level, uint, (S_IRUGO | S_IWUSR));
+MODULE_PARM_DESC(log_level,
+ "Enables firmware error code logging, default :1 high-severity"
+ " errors, 2: all errors including high-severity errors,"
+ " 0: disables logging");
+
+module_param_named(debug, pmcraid_debug_log, uint, (S_IRUGO | S_IWUSR));
+MODULE_PARM_DESC(debug,
+ "Enable driver verbose message logging. Set 1 to enable."
+ "(default: 0)");
+
+module_param_named(disable_aen, pmcraid_disable_aen, uint, (S_IRUGO | S_IWUSR));
+MODULE_PARM_DESC(disable_aen,
+ "Disable driver aen notifications to apps. Set 1 to disable."
+ "(default: 0)");
+
+/* chip specific constants for PMC MaxRAID controllers (same for
+ * 0x5220 and 0x8010
+ */
+static struct pmcraid_chip_details pmcraid_chip_cfg[] = {
+ {
+ .ioastatus = 0x0,
+ .ioarrin = 0x00040,
+ .mailbox = 0x7FC30,
+ .global_intr_mask = 0x00034,
+ .ioa_host_intr = 0x0009C,
+ .ioa_host_intr_clr = 0x000A0,
+ .ioa_host_msix_intr = 0x7FC40,
+ .ioa_host_mask = 0x7FC28,
+ .ioa_host_mask_clr = 0x7FC28,
+ .host_ioa_intr = 0x00020,
+ .host_ioa_intr_clr = 0x00020,
+ .transop_timeout = 300
+ }
+};
+
+/*
+ * PCI device ids supported by pmcraid driver
+ */
+static struct pci_device_id pmcraid_pci_table[] = {
+ { PCI_DEVICE(PCI_VENDOR_ID_PMC, PCI_DEVICE_ID_PMC_MAXRAID),
+ 0, 0, (kernel_ulong_t)&pmcraid_chip_cfg[0]
+ },
+ {}
+};
+
+MODULE_DEVICE_TABLE(pci, pmcraid_pci_table);
+
+
+
+/**
+ * pmcraid_slave_alloc - Prepare for commands to a device
+ * @scsi_dev: scsi device struct
+ *
+ * This function is called by mid-layer prior to sending any command to the new
+ * device. Stores resource entry details of the device in scsi_device struct.
+ * Queuecommand uses the resource handle and other details to fill up IOARCB
+ * while sending commands to the device.
+ *
+ * Return value:
+ * 0 on success / -ENXIO if device does not exist
+ */
+static int pmcraid_slave_alloc(struct scsi_device *scsi_dev)
+{
+ struct pmcraid_resource_entry *temp, *res = NULL;
+ struct pmcraid_instance *pinstance;
+ u8 target, bus, lun;
+ unsigned long lock_flags;
+ int rc = -ENXIO;
+ u16 fw_version;
+
+ pinstance = shost_priv(scsi_dev->host);
+
+ fw_version = be16_to_cpu(pinstance->inq_data->fw_version);
+
+ /* Driver exposes VSET and GSCSI resources only; all other device types
+ * are not exposed. Resource list is synchronized using resource lock
+ * so any traversal or modifications to the list should be done inside
+ * this lock
+ */
+ spin_lock_irqsave(&pinstance->resource_lock, lock_flags);
+ list_for_each_entry(temp, &pinstance->used_res_q, queue) {
+
+ /* do not expose VSETs with order-ids > MAX_VSET_TARGETS */
+ if (RES_IS_VSET(temp->cfg_entry)) {
+ if (fw_version <= PMCRAID_FW_VERSION_1)
+ target = temp->cfg_entry.unique_flags1;
+ else
+ target = temp->cfg_entry.array_id & 0xFF;
+
+ if (target > PMCRAID_MAX_VSET_TARGETS)
+ continue;
+ bus = PMCRAID_VSET_BUS_ID;
+ lun = 0;
+ } else if (RES_IS_GSCSI(temp->cfg_entry)) {
+ target = RES_TARGET(temp->cfg_entry.resource_address);
+ bus = PMCRAID_PHYS_BUS_ID;
+ lun = RES_LUN(temp->cfg_entry.resource_address);
+ } else {
+ continue;
+ }
+
+ if (bus == scsi_dev->channel &&
+ target == scsi_dev->id &&
+ lun == scsi_dev->lun) {
+ res = temp;
+ break;
+ }
+ }
+
+ if (res) {
+ res->scsi_dev = scsi_dev;
+ scsi_dev->hostdata = res;
+ res->change_detected = 0;
+ atomic_set(&res->read_failures, 0);
+ atomic_set(&res->write_failures, 0);
+ rc = 0;
+ }
+ spin_unlock_irqrestore(&pinstance->resource_lock, lock_flags);
+ return rc;
+}
+
+/**
+ * pmcraid_slave_configure - Configures a SCSI device
+ * @scsi_dev: scsi device struct
+ *
+ * This function is executed by SCSI mid layer just after a device is first
+ * scanned (i.e. it has responded to an INQUIRY). For VSET resources, the
+ * timeout value (default 30s) will be over-written to a higher value (60s)
+ * and max_sectors value will be over-written to 512. It also sets queue depth
+ * to host->cmd_per_lun value
+ *
+ * Return value:
+ * 0 on success
+ */
+static int pmcraid_slave_configure(struct scsi_device *scsi_dev)
+{
+ struct pmcraid_resource_entry *res = scsi_dev->hostdata;
+
+ if (!res)
+ return 0;
+
+ /* LLD exposes VSETs and Enclosure devices only */
+ if (RES_IS_GSCSI(res->cfg_entry) &&
+ scsi_dev->type != TYPE_ENCLOSURE)
+ return -ENXIO;
+
+ pmcraid_info("configuring %x:%x:%x:%x\n",
+ scsi_dev->host->unique_id,
+ scsi_dev->channel,
+ scsi_dev->id,
+ (u8)scsi_dev->lun);
+
+ if (RES_IS_GSCSI(res->cfg_entry)) {
+ scsi_dev->allow_restart = 1;
+ } else if (RES_IS_VSET(res->cfg_entry)) {
+ scsi_dev->allow_restart = 1;
+ blk_queue_rq_timeout(scsi_dev->request_queue,
+ PMCRAID_VSET_IO_TIMEOUT);
+ blk_queue_max_hw_sectors(scsi_dev->request_queue,
+ PMCRAID_VSET_MAX_SECTORS);
+ }
+
+ /*
+ * We never want to report TCQ support for these types of devices.
+ */
+ if (!RES_IS_GSCSI(res->cfg_entry) && !RES_IS_VSET(res->cfg_entry))
+ scsi_dev->tagged_supported = 0;
+
+ return 0;
+}
+
+/**
+ * pmcraid_slave_destroy - Unconfigure a SCSI device before removing it
+ *
+ * @scsi_dev: scsi device struct
+ *
+ * This is called by mid-layer before removing a device. Pointer assignments
+ * done in pmcraid_slave_alloc will be reset to NULL here.
+ *
+ * Return value
+ * none
+ */
+static void pmcraid_slave_destroy(struct scsi_device *scsi_dev)
+{
+ struct pmcraid_resource_entry *res;
+
+ res = (struct pmcraid_resource_entry *)scsi_dev->hostdata;
+
+ if (res)
+ res->scsi_dev = NULL;
+
+ scsi_dev->hostdata = NULL;
+}
+
+/**
+ * pmcraid_change_queue_depth - Change the device's queue depth
+ * @scsi_dev: scsi device struct
+ * @depth: depth to set
+ *
+ * Return value
+ * actual depth set
+ */
+static int pmcraid_change_queue_depth(struct scsi_device *scsi_dev, int depth)
+{
+ if (depth > PMCRAID_MAX_CMD_PER_LUN)
+ depth = PMCRAID_MAX_CMD_PER_LUN;
+ return scsi_change_queue_depth(scsi_dev, depth);
+}
+
+/**
+ * pmcraid_init_cmdblk - initializes a command block
+ *
+ * @cmd: pointer to struct pmcraid_cmd to be initialized
+ * @index: if >=0 first time initialization; otherwise reinitialization
+ *
+ * Return Value
+ * None
+ */
+void pmcraid_init_cmdblk(struct pmcraid_cmd *cmd, int index)
+{
+ struct pmcraid_ioarcb *ioarcb = &(cmd->ioa_cb->ioarcb);
+ dma_addr_t dma_addr = cmd->ioa_cb_bus_addr;
+
+ if (index >= 0) {
+ /* first time initialization (called from probe) */
+ u32 ioasa_offset =
+ offsetof(struct pmcraid_control_block, ioasa);
+
+ cmd->index = index;
+ ioarcb->response_handle = cpu_to_le32(index << 2);
+ ioarcb->ioarcb_bus_addr = cpu_to_le64(dma_addr);
+ ioarcb->ioasa_bus_addr = cpu_to_le64(dma_addr + ioasa_offset);
+ ioarcb->ioasa_len = cpu_to_le16(sizeof(struct pmcraid_ioasa));
+ } else {
+ /* re-initialization of various lengths, called once command is
+ * processed by IOA
+ */
+ memset(&cmd->ioa_cb->ioarcb.cdb, 0, PMCRAID_MAX_CDB_LEN);
+ ioarcb->hrrq_id = 0;
+ ioarcb->request_flags0 = 0;
+ ioarcb->request_flags1 = 0;
+ ioarcb->cmd_timeout = 0;
+ ioarcb->ioarcb_bus_addr &= (~0x1FULL);
+ ioarcb->ioadl_bus_addr = 0;
+ ioarcb->ioadl_length = 0;
+ ioarcb->data_transfer_length = 0;
+ ioarcb->add_cmd_param_length = 0;
+ ioarcb->add_cmd_param_offset = 0;
+ cmd->ioa_cb->ioasa.ioasc = 0;
+ cmd->ioa_cb->ioasa.residual_data_length = 0;
+ cmd->time_left = 0;
+ }
+
+ cmd->cmd_done = NULL;
+ cmd->scsi_cmd = NULL;
+ cmd->release = 0;
+ cmd->completion_req = 0;
+ cmd->sense_buffer = 0;
+ cmd->sense_buffer_dma = 0;
+ cmd->dma_handle = 0;
+ init_timer(&cmd->timer);
+}
+
+/**
+ * pmcraid_reinit_cmdblk - reinitialize a command block
+ *
+ * @cmd: pointer to struct pmcraid_cmd to be reinitialized
+ *
+ * Return Value
+ * None
+ */
+static void pmcraid_reinit_cmdblk(struct pmcraid_cmd *cmd)
+{
+ pmcraid_init_cmdblk(cmd, -1);
+}
+
+/**
+ * pmcraid_get_free_cmd - get a free cmd block from command block pool
+ * @pinstance: adapter instance structure
+ *
+ * Return Value:
+ * returns pointer to cmd block or NULL if no blocks are available
+ */
+static struct pmcraid_cmd *pmcraid_get_free_cmd(
+ struct pmcraid_instance *pinstance
+)
+{
+ struct pmcraid_cmd *cmd = NULL;
+ unsigned long lock_flags;
+
+ /* free cmd block list is protected by free_pool_lock */
+ spin_lock_irqsave(&pinstance->free_pool_lock, lock_flags);
+
+ if (!list_empty(&pinstance->free_cmd_pool)) {
+ cmd = list_entry(pinstance->free_cmd_pool.next,
+ struct pmcraid_cmd, free_list);
+ list_del(&cmd->free_list);
+ }
+ spin_unlock_irqrestore(&pinstance->free_pool_lock, lock_flags);
+
+ /* Initialize the command block before giving it the caller */
+ if (cmd != NULL)
+ pmcraid_reinit_cmdblk(cmd);
+ return cmd;
+}
+
+/**
+ * pmcraid_return_cmd - return a completed command block back into free pool
+ * @cmd: pointer to the command block
+ *
+ * Return Value:
+ * nothing
+ */
+void pmcraid_return_cmd(struct pmcraid_cmd *cmd)
+{
+ struct pmcraid_instance *pinstance = cmd->drv_inst;
+ unsigned long lock_flags;
+
+ spin_lock_irqsave(&pinstance->free_pool_lock, lock_flags);
+ list_add_tail(&cmd->free_list, &pinstance->free_cmd_pool);
+ spin_unlock_irqrestore(&pinstance->free_pool_lock, lock_flags);
+}
+
+/**
+ * pmcraid_read_interrupts - reads IOA interrupts
+ *
+ * @pinstance: pointer to adapter instance structure
+ *
+ * Return value
+ * interrupts read from IOA
+ */
+static u32 pmcraid_read_interrupts(struct pmcraid_instance *pinstance)
+{
+ return (pinstance->interrupt_mode) ?
+ ioread32(pinstance->int_regs.ioa_host_msix_interrupt_reg) :
+ ioread32(pinstance->int_regs.ioa_host_interrupt_reg);
+}
+
+/**
+ * pmcraid_disable_interrupts - Masks and clears all specified interrupts
+ *
+ * @pinstance: pointer to per adapter instance structure
+ * @intrs: interrupts to disable
+ *
+ * Return Value
+ * None
+ */
+static void pmcraid_disable_interrupts(
+ struct pmcraid_instance *pinstance,
+ u32 intrs
+)
+{
+ u32 gmask = ioread32(pinstance->int_regs.global_interrupt_mask_reg);
+ u32 nmask = gmask | GLOBAL_INTERRUPT_MASK;
+
+ iowrite32(intrs, pinstance->int_regs.ioa_host_interrupt_clr_reg);
+ iowrite32(nmask, pinstance->int_regs.global_interrupt_mask_reg);
+ ioread32(pinstance->int_regs.global_interrupt_mask_reg);
+
+ if (!pinstance->interrupt_mode) {
+ iowrite32(intrs,
+ pinstance->int_regs.ioa_host_interrupt_mask_reg);
+ ioread32(pinstance->int_regs.ioa_host_interrupt_mask_reg);
+ }
+}
+
+/**
+ * pmcraid_enable_interrupts - Enables specified interrupts
+ *
+ * @pinstance: pointer to per adapter instance structure
+ * @intr: interrupts to enable
+ *
+ * Return Value
+ * None
+ */
+static void pmcraid_enable_interrupts(
+ struct pmcraid_instance *pinstance,
+ u32 intrs
+)
+{
+ u32 gmask = ioread32(pinstance->int_regs.global_interrupt_mask_reg);
+ u32 nmask = gmask & (~GLOBAL_INTERRUPT_MASK);
+
+ iowrite32(nmask, pinstance->int_regs.global_interrupt_mask_reg);
+
+ if (!pinstance->interrupt_mode) {
+ iowrite32(~intrs,
+ pinstance->int_regs.ioa_host_interrupt_mask_reg);
+ ioread32(pinstance->int_regs.ioa_host_interrupt_mask_reg);
+ }
+
+ pmcraid_info("enabled interrupts global mask = %x intr_mask = %x\n",
+ ioread32(pinstance->int_regs.global_interrupt_mask_reg),
+ ioread32(pinstance->int_regs.ioa_host_interrupt_mask_reg));
+}
+
+/**
+ * pmcraid_clr_trans_op - clear trans to op interrupt
+ *
+ * @pinstance: pointer to per adapter instance structure
+ *
+ * Return Value
+ * None
+ */
+static void pmcraid_clr_trans_op(
+ struct pmcraid_instance *pinstance
+)
+{
+ unsigned long lock_flags;
+
+ if (!pinstance->interrupt_mode) {
+ iowrite32(INTRS_TRANSITION_TO_OPERATIONAL,
+ pinstance->int_regs.ioa_host_interrupt_mask_reg);
+ ioread32(pinstance->int_regs.ioa_host_interrupt_mask_reg);
+ iowrite32(INTRS_TRANSITION_TO_OPERATIONAL,
+ pinstance->int_regs.ioa_host_interrupt_clr_reg);
+ ioread32(pinstance->int_regs.ioa_host_interrupt_clr_reg);
+ }
+
+ if (pinstance->reset_cmd != NULL) {
+ del_timer(&pinstance->reset_cmd->timer);
+ spin_lock_irqsave(
+ pinstance->host->host_lock, lock_flags);
+ pinstance->reset_cmd->cmd_done(pinstance->reset_cmd);
+ spin_unlock_irqrestore(
+ pinstance->host->host_lock, lock_flags);
+ }
+}
+
+/**
+ * pmcraid_reset_type - Determine the required reset type
+ * @pinstance: pointer to adapter instance structure
+ *
+ * IOA requires hard reset if any of the following conditions is true.
+ * 1. If HRRQ valid interrupt is not masked
+ * 2. IOA reset alert doorbell is set
+ * 3. If there are any error interrupts
+ */
+static void pmcraid_reset_type(struct pmcraid_instance *pinstance)
+{
+ u32 mask;
+ u32 intrs;
+ u32 alerts;
+
+ mask = ioread32(pinstance->int_regs.ioa_host_interrupt_mask_reg);
+ intrs = ioread32(pinstance->int_regs.ioa_host_interrupt_reg);
+ alerts = ioread32(pinstance->int_regs.host_ioa_interrupt_reg);
+
+ if ((mask & INTRS_HRRQ_VALID) == 0 ||
+ (alerts & DOORBELL_IOA_RESET_ALERT) ||
+ (intrs & PMCRAID_ERROR_INTERRUPTS)) {
+ pmcraid_info("IOA requires hard reset\n");
+ pinstance->ioa_hard_reset = 1;
+ }
+
+ /* If unit check is active, trigger the dump */
+ if (intrs & INTRS_IOA_UNIT_CHECK)
+ pinstance->ioa_unit_check = 1;
+}
+
+/**
+ * pmcraid_bist_done - completion function for PCI BIST
+ * @cmd: pointer to reset command
+ * Return Value
+ * none
+ */
+
+static void pmcraid_ioa_reset(struct pmcraid_cmd *);
+
+static void pmcraid_bist_done(struct pmcraid_cmd *cmd)
+{
+ struct pmcraid_instance *pinstance = cmd->drv_inst;
+ unsigned long lock_flags;
+ int rc;
+ u16 pci_reg;
+
+ rc = pci_read_config_word(pinstance->pdev, PCI_COMMAND, &pci_reg);
+
+ /* If PCI config space can't be accessed wait for another two secs */
+ if ((rc != PCIBIOS_SUCCESSFUL || (!(pci_reg & PCI_COMMAND_MEMORY))) &&
+ cmd->time_left > 0) {
+ pmcraid_info("BIST not complete, waiting another 2 secs\n");
+ cmd->timer.expires = jiffies + cmd->time_left;
+ cmd->time_left = 0;
+ cmd->timer.data = (unsigned long)cmd;
+ cmd->timer.function =
+ (void (*)(unsigned long))pmcraid_bist_done;
+ add_timer(&cmd->timer);
+ } else {
+ cmd->time_left = 0;
+ pmcraid_info("BIST is complete, proceeding with reset\n");
+ spin_lock_irqsave(pinstance->host->host_lock, lock_flags);
+ pmcraid_ioa_reset(cmd);
+ spin_unlock_irqrestore(pinstance->host->host_lock, lock_flags);
+ }
+}
+
+/**
+ * pmcraid_start_bist - starts BIST
+ * @cmd: pointer to reset cmd
+ * Return Value
+ * none
+ */
+static void pmcraid_start_bist(struct pmcraid_cmd *cmd)
+{
+ struct pmcraid_instance *pinstance = cmd->drv_inst;
+ u32 doorbells, intrs;
+
+ /* proceed with bist and wait for 2 seconds */
+ iowrite32(DOORBELL_IOA_START_BIST,
+ pinstance->int_regs.host_ioa_interrupt_reg);
+ doorbells = ioread32(pinstance->int_regs.host_ioa_interrupt_reg);
+ intrs = ioread32(pinstance->int_regs.ioa_host_interrupt_reg);
+ pmcraid_info("doorbells after start bist: %x intrs: %x\n",
+ doorbells, intrs);
+
+ cmd->time_left = msecs_to_jiffies(PMCRAID_BIST_TIMEOUT);
+ cmd->timer.data = (unsigned long)cmd;
+ cmd->timer.expires = jiffies + msecs_to_jiffies(PMCRAID_BIST_TIMEOUT);
+ cmd->timer.function = (void (*)(unsigned long))pmcraid_bist_done;
+ add_timer(&cmd->timer);
+}
+
+/**
+ * pmcraid_reset_alert_done - completion routine for reset_alert
+ * @cmd: pointer to command block used in reset sequence
+ * Return value
+ * None
+ */
+static void pmcraid_reset_alert_done(struct pmcraid_cmd *cmd)
+{
+ struct pmcraid_instance *pinstance = cmd->drv_inst;
+ u32 status = ioread32(pinstance->ioa_status);
+ unsigned long lock_flags;
+
+ /* if the critical operation in progress bit is set or the wait times
+ * out, invoke reset engine to proceed with hard reset. If there is
+ * some more time to wait, restart the timer
+ */
+ if (((status & INTRS_CRITICAL_OP_IN_PROGRESS) == 0) ||
+ cmd->time_left <= 0) {
+ pmcraid_info("critical op is reset proceeding with reset\n");
+ spin_lock_irqsave(pinstance->host->host_lock, lock_flags);
+ pmcraid_ioa_reset(cmd);
+ spin_unlock_irqrestore(pinstance->host->host_lock, lock_flags);
+ } else {
+ pmcraid_info("critical op is not yet reset waiting again\n");
+ /* restart timer if some more time is available to wait */
+ cmd->time_left -= PMCRAID_CHECK_FOR_RESET_TIMEOUT;
+ cmd->timer.data = (unsigned long)cmd;
+ cmd->timer.expires = jiffies + PMCRAID_CHECK_FOR_RESET_TIMEOUT;
+ cmd->timer.function =
+ (void (*)(unsigned long))pmcraid_reset_alert_done;
+ add_timer(&cmd->timer);
+ }
+}
+
+/**
+ * pmcraid_reset_alert - alerts IOA for a possible reset
+ * @cmd : command block to be used for reset sequence.
+ *
+ * Return Value
+ * returns 0 if pci config-space is accessible and RESET_DOORBELL is
+ * successfully written to IOA. Returns non-zero in case pci_config_space
+ * is not accessible
+ */
+static void pmcraid_notify_ioastate(struct pmcraid_instance *, u32);
+static void pmcraid_reset_alert(struct pmcraid_cmd *cmd)
+{
+ struct pmcraid_instance *pinstance = cmd->drv_inst;
+ u32 doorbells;
+ int rc;
+ u16 pci_reg;
+
+ /* If we are able to access IOA PCI config space, alert IOA that we are
+ * going to reset it soon. This enables IOA to preserv persistent error
+ * data if any. In case memory space is not accessible, proceed with
+ * BIST or slot_reset
+ */
+ rc = pci_read_config_word(pinstance->pdev, PCI_COMMAND, &pci_reg);
+ if ((rc == PCIBIOS_SUCCESSFUL) && (pci_reg & PCI_COMMAND_MEMORY)) {
+
+ /* wait for IOA permission i.e until CRITICAL_OPERATION bit is
+ * reset IOA doesn't generate any interrupts when CRITICAL
+ * OPERATION bit is reset. A timer is started to wait for this
+ * bit to be reset.
+ */
+ cmd->time_left = PMCRAID_RESET_TIMEOUT;
+ cmd->timer.data = (unsigned long)cmd;
+ cmd->timer.expires = jiffies + PMCRAID_CHECK_FOR_RESET_TIMEOUT;
+ cmd->timer.function =
+ (void (*)(unsigned long))pmcraid_reset_alert_done;
+ add_timer(&cmd->timer);
+
+ iowrite32(DOORBELL_IOA_RESET_ALERT,
+ pinstance->int_regs.host_ioa_interrupt_reg);
+ doorbells =
+ ioread32(pinstance->int_regs.host_ioa_interrupt_reg);
+ pmcraid_info("doorbells after reset alert: %x\n", doorbells);
+ } else {
+ pmcraid_info("PCI config is not accessible starting BIST\n");
+ pinstance->ioa_state = IOA_STATE_IN_HARD_RESET;
+ pmcraid_start_bist(cmd);
+ }
+}
+
+/**
+ * pmcraid_timeout_handler - Timeout handler for internally generated ops
+ *
+ * @cmd : pointer to command structure, that got timedout
+ *
+ * This function blocks host requests and initiates an adapter reset.
+ *
+ * Return value:
+ * None
+ */
+static void pmcraid_timeout_handler(struct pmcraid_cmd *cmd)
+{
+ struct pmcraid_instance *pinstance = cmd->drv_inst;
+ unsigned long lock_flags;
+
+ dev_info(&pinstance->pdev->dev,
+ "Adapter being reset due to cmd(CDB[0] = %x) timeout\n",
+ cmd->ioa_cb->ioarcb.cdb[0]);
+
+ /* Command timeouts result in hard reset sequence. The command that got
+ * timed out may be the one used as part of reset sequence. In this
+ * case restart reset sequence using the same command block even if
+ * reset is in progress. Otherwise fail this command and get a free
+ * command block to restart the reset sequence.
+ */
+ spin_lock_irqsave(pinstance->host->host_lock, lock_flags);
+ if (!pinstance->ioa_reset_in_progress) {
+ pinstance->ioa_reset_attempts = 0;
+ cmd = pmcraid_get_free_cmd(pinstance);
+
+ /* If we are out of command blocks, just return here itself.
+ * Some other command's timeout handler can do the reset job
+ */
+ if (cmd == NULL) {
+ spin_unlock_irqrestore(pinstance->host->host_lock,
+ lock_flags);
+ pmcraid_err("no free cmnd block for timeout handler\n");
+ return;
+ }
+
+ pinstance->reset_cmd = cmd;
+ pinstance->ioa_reset_in_progress = 1;
+ } else {
+ pmcraid_info("reset is already in progress\n");
+
+ if (pinstance->reset_cmd != cmd) {
+ /* This command should have been given to IOA, this
+ * command will be completed by fail_outstanding_cmds
+ * anyway
+ */
+ pmcraid_err("cmd is pending but reset in progress\n");
+ }
+
+ /* If this command was being used as part of the reset
+ * sequence, set cmd_done pointer to pmcraid_ioa_reset. This
+ * causes fail_outstanding_commands not to return the command
+ * block back to free pool
+ */
+ if (cmd == pinstance->reset_cmd)
+ cmd->cmd_done = pmcraid_ioa_reset;
+ }
+
+ /* Notify apps of important IOA bringup/bringdown sequences */
+ if (pinstance->scn.ioa_state != PMC_DEVICE_EVENT_RESET_START &&
+ pinstance->scn.ioa_state != PMC_DEVICE_EVENT_SHUTDOWN_START)
+ pmcraid_notify_ioastate(pinstance,
+ PMC_DEVICE_EVENT_RESET_START);
+
+ pinstance->ioa_state = IOA_STATE_IN_RESET_ALERT;
+ scsi_block_requests(pinstance->host);
+ pmcraid_reset_alert(cmd);
+ spin_unlock_irqrestore(pinstance->host->host_lock, lock_flags);
+}
+
+/**
+ * pmcraid_internal_done - completion routine for internally generated cmds
+ *
+ * @cmd: command that got response from IOA
+ *
+ * Return Value:
+ * none
+ */
+static void pmcraid_internal_done(struct pmcraid_cmd *cmd)
+{
+ pmcraid_info("response internal cmd CDB[0] = %x ioasc = %x\n",
+ cmd->ioa_cb->ioarcb.cdb[0],
+ le32_to_cpu(cmd->ioa_cb->ioasa.ioasc));
+
+ /* Some of the internal commands are sent with callers blocking for the
+ * response. Same will be indicated as part of cmd->completion_req
+ * field. Response path needs to wake up any waiters waiting for cmd
+ * completion if this flag is set.
+ */
+ if (cmd->completion_req) {
+ cmd->completion_req = 0;
+ complete(&cmd->wait_for_completion);
+ }
+
+ /* most of the internal commands are completed by caller itself, so
+ * no need to return the command block back to free pool until we are
+ * required to do so (e.g once done with initialization).
+ */
+ if (cmd->release) {
+ cmd->release = 0;
+ pmcraid_return_cmd(cmd);
+ }
+}
+
+/**
+ * pmcraid_reinit_cfgtable_done - done function for cfg table reinitialization
+ *
+ * @cmd: command that got response from IOA
+ *
+ * This routine is called after driver re-reads configuration table due to a
+ * lost CCN. It returns the command block back to free pool and schedules
+ * worker thread to add/delete devices into the system.
+ *
+ * Return Value:
+ * none
+ */
+static void pmcraid_reinit_cfgtable_done(struct pmcraid_cmd *cmd)
+{
+ pmcraid_info("response internal cmd CDB[0] = %x ioasc = %x\n",
+ cmd->ioa_cb->ioarcb.cdb[0],
+ le32_to_cpu(cmd->ioa_cb->ioasa.ioasc));
+
+ if (cmd->release) {
+ cmd->release = 0;
+ pmcraid_return_cmd(cmd);
+ }
+ pmcraid_info("scheduling worker for config table reinitialization\n");
+ schedule_work(&cmd->drv_inst->worker_q);
+}
+
+/**
+ * pmcraid_erp_done - Process completion of SCSI error response from device
+ * @cmd: pmcraid_command
+ *
+ * This function copies the sense buffer into the scsi_cmd struct and completes
+ * scsi_cmd by calling scsi_done function.
+ *
+ * Return value:
+ * none
+ */
+static void pmcraid_erp_done(struct pmcraid_cmd *cmd)
+{
+ struct scsi_cmnd *scsi_cmd = cmd->scsi_cmd;
+ struct pmcraid_instance *pinstance = cmd->drv_inst;
+ u32 ioasc = le32_to_cpu(cmd->ioa_cb->ioasa.ioasc);
+
+ if (PMCRAID_IOASC_SENSE_KEY(ioasc) > 0) {
+ scsi_cmd->result |= (DID_ERROR << 16);
+ scmd_printk(KERN_INFO, scsi_cmd,
+ "command CDB[0] = %x failed with IOASC: 0x%08X\n",
+ cmd->ioa_cb->ioarcb.cdb[0], ioasc);
+ }
+
+ /* if we had allocated sense buffers for request sense, copy the sense
+ * release the buffers
+ */
+ if (cmd->sense_buffer != NULL) {
+ memcpy(scsi_cmd->sense_buffer,
+ cmd->sense_buffer,
+ SCSI_SENSE_BUFFERSIZE);
+ pci_free_consistent(pinstance->pdev,
+ SCSI_SENSE_BUFFERSIZE,
+ cmd->sense_buffer, cmd->sense_buffer_dma);
+ cmd->sense_buffer = NULL;
+ cmd->sense_buffer_dma = 0;
+ }
+
+ scsi_dma_unmap(scsi_cmd);
+ pmcraid_return_cmd(cmd);
+ scsi_cmd->scsi_done(scsi_cmd);
+}
+
+/**
+ * pmcraid_fire_command - sends an IOA command to adapter
+ *
+ * This function adds the given block into pending command list
+ * and returns without waiting
+ *
+ * @cmd : command to be sent to the device
+ *
+ * Return Value
+ * None
+ */
+static void _pmcraid_fire_command(struct pmcraid_cmd *cmd)
+{
+ struct pmcraid_instance *pinstance = cmd->drv_inst;
+ unsigned long lock_flags;
+
+ /* Add this command block to pending cmd pool. We do this prior to
+ * writting IOARCB to ioarrin because IOA might complete the command
+ * by the time we are about to add it to the list. Response handler
+ * (isr/tasklet) looks for cmd block in the pending pending list.
+ */
+ spin_lock_irqsave(&pinstance->pending_pool_lock, lock_flags);
+ list_add_tail(&cmd->free_list, &pinstance->pending_cmd_pool);
+ spin_unlock_irqrestore(&pinstance->pending_pool_lock, lock_flags);
+ atomic_inc(&pinstance->outstanding_cmds);
+
+ /* driver writes lower 32-bit value of IOARCB address only */
+ mb();
+ iowrite32(le32_to_cpu(cmd->ioa_cb->ioarcb.ioarcb_bus_addr),
+ pinstance->ioarrin);
+}
+
+/**
+ * pmcraid_send_cmd - fires a command to IOA
+ *
+ * This function also sets up timeout function, and command completion
+ * function
+ *
+ * @cmd: pointer to the command block to be fired to IOA
+ * @cmd_done: command completion function, called once IOA responds
+ * @timeout: timeout to wait for this command completion
+ * @timeout_func: timeout handler
+ *
+ * Return value
+ * none
+ */
+static void pmcraid_send_cmd(
+ struct pmcraid_cmd *cmd,
+ void (*cmd_done) (struct pmcraid_cmd *),
+ unsigned long timeout,
+ void (*timeout_func) (struct pmcraid_cmd *)
+)
+{
+ /* initialize done function */
+ cmd->cmd_done = cmd_done;
+
+ if (timeout_func) {
+ /* setup timeout handler */
+ cmd->timer.data = (unsigned long)cmd;
+ cmd->timer.expires = jiffies + timeout;
+ cmd->timer.function = (void (*)(unsigned long))timeout_func;
+ add_timer(&cmd->timer);
+ }
+
+ /* fire the command to IOA */
+ _pmcraid_fire_command(cmd);
+}
+
+/**
+ * pmcraid_ioa_shutdown_done - completion function for IOA shutdown command
+ * @cmd: pointer to the command block used for sending IOA shutdown command
+ *
+ * Return value
+ * None
+ */
+static void pmcraid_ioa_shutdown_done(struct pmcraid_cmd *cmd)
+{
+ struct pmcraid_instance *pinstance = cmd->drv_inst;
+ unsigned long lock_flags;
+
+ spin_lock_irqsave(pinstance->host->host_lock, lock_flags);
+ pmcraid_ioa_reset(cmd);
+ spin_unlock_irqrestore(pinstance->host->host_lock, lock_flags);
+}
+
+/**
+ * pmcraid_ioa_shutdown - sends SHUTDOWN command to ioa
+ *
+ * @cmd: pointer to the command block used as part of reset sequence
+ *
+ * Return Value
+ * None
+ */
+static void pmcraid_ioa_shutdown(struct pmcraid_cmd *cmd)
+{
+ pmcraid_info("response for Cancel CCN CDB[0] = %x ioasc = %x\n",
+ cmd->ioa_cb->ioarcb.cdb[0],
+ le32_to_cpu(cmd->ioa_cb->ioasa.ioasc));
+
+ /* Note that commands sent during reset require next command to be sent
+ * to IOA. Hence reinit the done function as well as timeout function
+ */
+ pmcraid_reinit_cmdblk(cmd);
+ cmd->ioa_cb->ioarcb.request_type = REQ_TYPE_IOACMD;
+ cmd->ioa_cb->ioarcb.resource_handle =
+ cpu_to_le32(PMCRAID_IOA_RES_HANDLE);
+ cmd->ioa_cb->ioarcb.cdb[0] = PMCRAID_IOA_SHUTDOWN;
+ cmd->ioa_cb->ioarcb.cdb[1] = PMCRAID_SHUTDOWN_NORMAL;
+
+ /* fire shutdown command to hardware. */
+ pmcraid_info("firing normal shutdown command (%d) to IOA\n",
+ le32_to_cpu(cmd->ioa_cb->ioarcb.response_handle));
+
+ pmcraid_notify_ioastate(cmd->drv_inst, PMC_DEVICE_EVENT_SHUTDOWN_START);
+
+ pmcraid_send_cmd(cmd, pmcraid_ioa_shutdown_done,
+ PMCRAID_SHUTDOWN_TIMEOUT,
+ pmcraid_timeout_handler);
+}
+
+/**
+ * pmcraid_get_fwversion_done - completion function for get_fwversion
+ *
+ * @cmd: pointer to command block used to send INQUIRY command
+ *
+ * Return Value
+ * none
+ */
+static void pmcraid_querycfg(struct pmcraid_cmd *);
+
+static void pmcraid_get_fwversion_done(struct pmcraid_cmd *cmd)
+{
+ struct pmcraid_instance *pinstance = cmd->drv_inst;
+ u32 ioasc = le32_to_cpu(cmd->ioa_cb->ioasa.ioasc);
+ unsigned long lock_flags;
+
+ /* configuration table entry size depends on firmware version. If fw
+ * version is not known, it is not possible to interpret IOA config
+ * table
+ */
+ if (ioasc) {
+ pmcraid_err("IOA Inquiry failed with %x\n", ioasc);
+ spin_lock_irqsave(pinstance->host->host_lock, lock_flags);
+ pinstance->ioa_state = IOA_STATE_IN_RESET_ALERT;
+ pmcraid_reset_alert(cmd);
+ spin_unlock_irqrestore(pinstance->host->host_lock, lock_flags);
+ } else {
+ pmcraid_querycfg(cmd);
+ }
+}
+
+/**
+ * pmcraid_get_fwversion - reads firmware version information
+ *
+ * @cmd: pointer to command block used to send INQUIRY command
+ *
+ * Return Value
+ * none
+ */
+static void pmcraid_get_fwversion(struct pmcraid_cmd *cmd)
+{
+ struct pmcraid_ioarcb *ioarcb = &cmd->ioa_cb->ioarcb;
+ struct pmcraid_ioadl_desc *ioadl = ioarcb->add_data.u.ioadl;
+ struct pmcraid_instance *pinstance = cmd->drv_inst;
+ u16 data_size = sizeof(struct pmcraid_inquiry_data);
+
+ pmcraid_reinit_cmdblk(cmd);
+ ioarcb->request_type = REQ_TYPE_SCSI;
+ ioarcb->resource_handle = cpu_to_le32(PMCRAID_IOA_RES_HANDLE);
+ ioarcb->cdb[0] = INQUIRY;
+ ioarcb->cdb[1] = 1;
+ ioarcb->cdb[2] = 0xD0;
+ ioarcb->cdb[3] = (data_size >> 8) & 0xFF;
+ ioarcb->cdb[4] = data_size & 0xFF;
+
+ /* Since entire inquiry data it can be part of IOARCB itself
+ */
+ ioarcb->ioadl_bus_addr = cpu_to_le64((cmd->ioa_cb_bus_addr) +
+ offsetof(struct pmcraid_ioarcb,
+ add_data.u.ioadl[0]));
+ ioarcb->ioadl_length = cpu_to_le32(sizeof(struct pmcraid_ioadl_desc));
+ ioarcb->ioarcb_bus_addr &= ~(0x1FULL);
+
+ ioarcb->request_flags0 |= NO_LINK_DESCS;
+ ioarcb->data_transfer_length = cpu_to_le32(data_size);
+ ioadl = &(ioarcb->add_data.u.ioadl[0]);
+ ioadl->flags = IOADL_FLAGS_LAST_DESC;
+ ioadl->address = cpu_to_le64(pinstance->inq_data_baddr);
+ ioadl->data_len = cpu_to_le32(data_size);
+
+ pmcraid_send_cmd(cmd, pmcraid_get_fwversion_done,
+ PMCRAID_INTERNAL_TIMEOUT, pmcraid_timeout_handler);
+}
+
+/**
+ * pmcraid_identify_hrrq - registers host rrq buffers with IOA
+ * @cmd: pointer to command block to be used for identify hrrq
+ *
+ * Return Value
+ * none
+ */
+static void pmcraid_identify_hrrq(struct pmcraid_cmd *cmd)
+{
+ struct pmcraid_instance *pinstance = cmd->drv_inst;
+ struct pmcraid_ioarcb *ioarcb = &cmd->ioa_cb->ioarcb;
+ int index = cmd->hrrq_index;
+ __be64 hrrq_addr = cpu_to_be64(pinstance->hrrq_start_bus_addr[index]);
+ u32 hrrq_size = cpu_to_be32(sizeof(u32) * PMCRAID_MAX_CMD);
+ void (*done_function)(struct pmcraid_cmd *);
+
+ pmcraid_reinit_cmdblk(cmd);
+ cmd->hrrq_index = index + 1;
+
+ if (cmd->hrrq_index < pinstance->num_hrrq) {
+ done_function = pmcraid_identify_hrrq;
+ } else {
+ cmd->hrrq_index = 0;
+ done_function = pmcraid_get_fwversion;
+ }
+
+ /* Initialize ioarcb */
+ ioarcb->request_type = REQ_TYPE_IOACMD;
+ ioarcb->resource_handle = cpu_to_le32(PMCRAID_IOA_RES_HANDLE);
+
+ /* initialize the hrrq number where IOA will respond to this command */
+ ioarcb->hrrq_id = index;
+ ioarcb->cdb[0] = PMCRAID_IDENTIFY_HRRQ;
+ ioarcb->cdb[1] = index;
+
+ /* IOA expects 64-bit pci address to be written in B.E format
+ * (i.e cdb[2]=MSByte..cdb[9]=LSB.
+ */
+ pmcraid_info("HRRQ_IDENTIFY with hrrq:ioarcb:index => %llx:%llx:%x\n",
+ hrrq_addr, ioarcb->ioarcb_bus_addr, index);
+
+ memcpy(&(ioarcb->cdb[2]), &hrrq_addr, sizeof(hrrq_addr));
+ memcpy(&(ioarcb->cdb[10]), &hrrq_size, sizeof(hrrq_size));
+
+ /* Subsequent commands require HRRQ identification to be successful.
+ * Note that this gets called even during reset from SCSI mid-layer
+ * or tasklet
+ */
+ pmcraid_send_cmd(cmd, done_function,
+ PMCRAID_INTERNAL_TIMEOUT,
+ pmcraid_timeout_handler);
+}
+
+static void pmcraid_process_ccn(struct pmcraid_cmd *cmd);
+static void pmcraid_process_ldn(struct pmcraid_cmd *cmd);
+
+/**
+ * pmcraid_send_hcam_cmd - send an initialized command block(HCAM) to IOA
+ *
+ * @cmd: initialized command block pointer
+ *
+ * Return Value
+ * none
+ */
+static void pmcraid_send_hcam_cmd(struct pmcraid_cmd *cmd)
+{
+ if (cmd->ioa_cb->ioarcb.cdb[1] == PMCRAID_HCAM_CODE_CONFIG_CHANGE)
+ atomic_set(&(cmd->drv_inst->ccn.ignore), 0);
+ else
+ atomic_set(&(cmd->drv_inst->ldn.ignore), 0);
+
+ pmcraid_send_cmd(cmd, cmd->cmd_done, 0, NULL);
+}
+
+/**
+ * pmcraid_init_hcam - send an initialized command block(HCAM) to IOA
+ *
+ * @pinstance: pointer to adapter instance structure
+ * @type: HCAM type
+ *
+ * Return Value
+ * pointer to initialized pmcraid_cmd structure or NULL
+ */
+static struct pmcraid_cmd *pmcraid_init_hcam
+(
+ struct pmcraid_instance *pinstance,
+ u8 type
+)
+{
+ struct pmcraid_cmd *cmd;
+ struct pmcraid_ioarcb *ioarcb;
+ struct pmcraid_ioadl_desc *ioadl;
+ struct pmcraid_hostrcb *hcam;
+ void (*cmd_done) (struct pmcraid_cmd *);
+ dma_addr_t dma;
+ int rcb_size;
+
+ cmd = pmcraid_get_free_cmd(pinstance);
+
+ if (!cmd) {
+ pmcraid_err("no free command blocks for hcam\n");
+ return cmd;
+ }
+
+ if (type == PMCRAID_HCAM_CODE_CONFIG_CHANGE) {
+ rcb_size = sizeof(struct pmcraid_hcam_ccn_ext);
+ cmd_done = pmcraid_process_ccn;
+ dma = pinstance->ccn.baddr + PMCRAID_AEN_HDR_SIZE;
+ hcam = &pinstance->ccn;
+ } else {
+ rcb_size = sizeof(struct pmcraid_hcam_ldn);
+ cmd_done = pmcraid_process_ldn;
+ dma = pinstance->ldn.baddr + PMCRAID_AEN_HDR_SIZE;
+ hcam = &pinstance->ldn;
+ }
+
+ /* initialize command pointer used for HCAM registration */
+ hcam->cmd = cmd;
+
+ ioarcb = &cmd->ioa_cb->ioarcb;
+ ioarcb->ioadl_bus_addr = cpu_to_le64((cmd->ioa_cb_bus_addr) +
+ offsetof(struct pmcraid_ioarcb,
+ add_data.u.ioadl[0]));
+ ioarcb->ioadl_length = cpu_to_le32(sizeof(struct pmcraid_ioadl_desc));
+ ioadl = ioarcb->add_data.u.ioadl;
+
+ /* Initialize ioarcb */
+ ioarcb->request_type = REQ_TYPE_HCAM;
+ ioarcb->resource_handle = cpu_to_le32(PMCRAID_IOA_RES_HANDLE);
+ ioarcb->cdb[0] = PMCRAID_HOST_CONTROLLED_ASYNC;
+ ioarcb->cdb[1] = type;
+ ioarcb->cdb[7] = (rcb_size >> 8) & 0xFF;
+ ioarcb->cdb[8] = (rcb_size) & 0xFF;
+
+ ioarcb->data_transfer_length = cpu_to_le32(rcb_size);
+
+ ioadl[0].flags |= IOADL_FLAGS_READ_LAST;
+ ioadl[0].data_len = cpu_to_le32(rcb_size);
+ ioadl[0].address = cpu_to_le32(dma);
+
+ cmd->cmd_done = cmd_done;
+ return cmd;
+}
+
+/**
+ * pmcraid_send_hcam - Send an HCAM to IOA
+ * @pinstance: ioa config struct
+ * @type: HCAM type
+ *
+ * This function will send a Host Controlled Async command to IOA.
+ *
+ * Return value:
+ * none
+ */
+static void pmcraid_send_hcam(struct pmcraid_instance *pinstance, u8 type)
+{
+ struct pmcraid_cmd *cmd = pmcraid_init_hcam(pinstance, type);
+ pmcraid_send_hcam_cmd(cmd);
+}
+
+
+/**
+ * pmcraid_prepare_cancel_cmd - prepares a command block to abort another
+ *
+ * @cmd: pointer to cmd that is used as cancelling command
+ * @cmd_to_cancel: pointer to the command that needs to be cancelled
+ */
+static void pmcraid_prepare_cancel_cmd(
+ struct pmcraid_cmd *cmd,
+ struct pmcraid_cmd *cmd_to_cancel
+)
+{
+ struct pmcraid_ioarcb *ioarcb = &cmd->ioa_cb->ioarcb;
+ __be64 ioarcb_addr = cmd_to_cancel->ioa_cb->ioarcb.ioarcb_bus_addr;
+
+ /* Get the resource handle to where the command to be aborted has been
+ * sent.
+ */
+ ioarcb->resource_handle = cmd_to_cancel->ioa_cb->ioarcb.resource_handle;
+ ioarcb->request_type = REQ_TYPE_IOACMD;
+ memset(ioarcb->cdb, 0, PMCRAID_MAX_CDB_LEN);
+ ioarcb->cdb[0] = PMCRAID_ABORT_CMD;
+
+ /* IOARCB address of the command to be cancelled is given in
+ * cdb[2]..cdb[9] is Big-Endian format. Note that length bits in
+ * IOARCB address are not masked.
+ */
+ ioarcb_addr = cpu_to_be64(ioarcb_addr);
+ memcpy(&(ioarcb->cdb[2]), &ioarcb_addr, sizeof(ioarcb_addr));
+}
+
+/**
+ * pmcraid_cancel_hcam - sends ABORT task to abort a given HCAM
+ *
+ * @cmd: command to be used as cancelling command
+ * @type: HCAM type
+ * @cmd_done: op done function for the cancelling command
+ */
+static void pmcraid_cancel_hcam(
+ struct pmcraid_cmd *cmd,
+ u8 type,
+ void (*cmd_done) (struct pmcraid_cmd *)
+)
+{
+ struct pmcraid_instance *pinstance;
+ struct pmcraid_hostrcb *hcam;
+
+ pinstance = cmd->drv_inst;
+ hcam = (type == PMCRAID_HCAM_CODE_LOG_DATA) ?
+ &pinstance->ldn : &pinstance->ccn;
+
+ /* prepare for cancelling previous hcam command. If the HCAM is
+ * currently not pending with IOA, we would have hcam->cmd as non-null
+ */
+ if (hcam->cmd == NULL)
+ return;
+
+ pmcraid_prepare_cancel_cmd(cmd, hcam->cmd);
+
+ /* writing to IOARRIN must be protected by host_lock, as mid-layer
+ * schedule queuecommand while we are doing this
+ */
+ pmcraid_send_cmd(cmd, cmd_done,
+ PMCRAID_INTERNAL_TIMEOUT,
+ pmcraid_timeout_handler);
+}
+
+/**
+ * pmcraid_cancel_ccn - cancel CCN HCAM already registered with IOA
+ *
+ * @cmd: command block to be used for cancelling the HCAM
+ */
+static void pmcraid_cancel_ccn(struct pmcraid_cmd *cmd)
+{
+ pmcraid_info("response for Cancel LDN CDB[0] = %x ioasc = %x\n",
+ cmd->ioa_cb->ioarcb.cdb[0],
+ le32_to_cpu(cmd->ioa_cb->ioasa.ioasc));
+
+ pmcraid_reinit_cmdblk(cmd);
+
+ pmcraid_cancel_hcam(cmd,
+ PMCRAID_HCAM_CODE_CONFIG_CHANGE,
+ pmcraid_ioa_shutdown);
+}
+
+/**
+ * pmcraid_cancel_ldn - cancel LDN HCAM already registered with IOA
+ *
+ * @cmd: command block to be used for cancelling the HCAM
+ */
+static void pmcraid_cancel_ldn(struct pmcraid_cmd *cmd)
+{
+ pmcraid_cancel_hcam(cmd,
+ PMCRAID_HCAM_CODE_LOG_DATA,
+ pmcraid_cancel_ccn);
+}
+
+/**
+ * pmcraid_expose_resource - check if the resource can be exposed to OS
+ *
+ * @fw_version: firmware version code
+ * @cfgte: pointer to configuration table entry of the resource
+ *
+ * Return value:
+ * true if resource can be added to midlayer, false(0) otherwise
+ */
+static int pmcraid_expose_resource(u16 fw_version,
+ struct pmcraid_config_table_entry *cfgte)
+{
+ int retval = 0;
+
+ if (cfgte->resource_type == RES_TYPE_VSET) {
+ if (fw_version <= PMCRAID_FW_VERSION_1)
+ retval = ((cfgte->unique_flags1 & 0x80) == 0);
+ else
+ retval = ((cfgte->unique_flags0 & 0x80) == 0 &&
+ (cfgte->unique_flags1 & 0x80) == 0);
+
+ } else if (cfgte->resource_type == RES_TYPE_GSCSI)
+ retval = (RES_BUS(cfgte->resource_address) !=
+ PMCRAID_VIRTUAL_ENCL_BUS_ID);
+ return retval;
+}
+
+/* attributes supported by pmcraid_event_family */
+enum {
+ PMCRAID_AEN_ATTR_UNSPEC,
+ PMCRAID_AEN_ATTR_EVENT,
+ __PMCRAID_AEN_ATTR_MAX,
+};
+#define PMCRAID_AEN_ATTR_MAX (__PMCRAID_AEN_ATTR_MAX - 1)
+
+/* commands supported by pmcraid_event_family */
+enum {
+ PMCRAID_AEN_CMD_UNSPEC,
+ PMCRAID_AEN_CMD_EVENT,
+ __PMCRAID_AEN_CMD_MAX,
+};
+#define PMCRAID_AEN_CMD_MAX (__PMCRAID_AEN_CMD_MAX - 1)
+
+static struct genl_multicast_group pmcraid_mcgrps[] = {
+ { .name = "events", /* not really used - see ID discussion below */ },
+};
+
+static struct genl_family pmcraid_event_family = {
+ /*
+ * Due to prior multicast group abuse (the code having assumed that
+ * the family ID can be used as a multicast group ID) we need to
+ * statically allocate a family (and thus group) ID.
+ */
+ .id = GENL_ID_PMCRAID,
+ .name = "pmcraid",
+ .version = 1,
+ .maxattr = PMCRAID_AEN_ATTR_MAX,
+ .mcgrps = pmcraid_mcgrps,
+ .n_mcgrps = ARRAY_SIZE(pmcraid_mcgrps),
+};
+
+/**
+ * pmcraid_netlink_init - registers pmcraid_event_family
+ *
+ * Return value:
+ * 0 if the pmcraid_event_family is successfully registered
+ * with netlink generic, non-zero otherwise
+ */
+static int pmcraid_netlink_init(void)
+{
+ int result;
+
+ result = genl_register_family(&pmcraid_event_family);
+
+ if (result)
+ return result;
+
+ pmcraid_info("registered NETLINK GENERIC group: %d\n",
+ pmcraid_event_family.id);
+
+ return result;
+}
+
+/**
+ * pmcraid_netlink_release - unregisters pmcraid_event_family
+ *
+ * Return value:
+ * none
+ */
+static void pmcraid_netlink_release(void)
+{
+ genl_unregister_family(&pmcraid_event_family);
+}
+
+/**
+ * pmcraid_notify_aen - sends event msg to user space application
+ * @pinstance: pointer to adapter instance structure
+ * @type: HCAM type
+ *
+ * Return value:
+ * 0 if success, error value in case of any failure.
+ */
+static int pmcraid_notify_aen(
+ struct pmcraid_instance *pinstance,
+ struct pmcraid_aen_msg *aen_msg,
+ u32 data_size
+)
+{
+ struct sk_buff *skb;
+ void *msg_header;
+ u32 total_size, nla_genl_hdr_total_size;
+ int result;
+
+ aen_msg->hostno = (pinstance->host->unique_id << 16 |
+ MINOR(pinstance->cdev.dev));
+ aen_msg->length = data_size;
+
+ data_size += sizeof(*aen_msg);
+
+ total_size = nla_total_size(data_size);
+ /* Add GENL_HDR to total_size */
+ nla_genl_hdr_total_size =
+ (total_size + (GENL_HDRLEN +
+ ((struct genl_family *)&pmcraid_event_family)->hdrsize)
+ + NLMSG_HDRLEN);
+ skb = genlmsg_new(nla_genl_hdr_total_size, GFP_ATOMIC);
+
+
+ if (!skb) {
+ pmcraid_err("Failed to allocate aen data SKB of size: %x\n",
+ total_size);
+ return -ENOMEM;
+ }
+
+ /* add the genetlink message header */
+ msg_header = genlmsg_put(skb, 0, 0,
+ &pmcraid_event_family, 0,
+ PMCRAID_AEN_CMD_EVENT);
+ if (!msg_header) {
+ pmcraid_err("failed to copy command details\n");
+ nlmsg_free(skb);
+ return -ENOMEM;
+ }
+
+ result = nla_put(skb, PMCRAID_AEN_ATTR_EVENT, data_size, aen_msg);
+
+ if (result) {
+ pmcraid_err("failed to copy AEN attribute data\n");
+ nlmsg_free(skb);
+ return -EINVAL;
+ }
+
+ /* send genetlink multicast message to notify appplications */
+ genlmsg_end(skb, msg_header);
+
+ result = genlmsg_multicast(&pmcraid_event_family, skb,
+ 0, 0, GFP_ATOMIC);
+
+ /* If there are no listeners, genlmsg_multicast may return non-zero
+ * value.
+ */
+ if (result)
+ pmcraid_info("error (%x) sending aen event message\n", result);
+ return result;
+}
+
+/**
+ * pmcraid_notify_ccn - notifies about CCN event msg to user space
+ * @pinstance: pointer adapter instance structure
+ *
+ * Return value:
+ * 0 if success, error value in case of any failure
+ */
+static int pmcraid_notify_ccn(struct pmcraid_instance *pinstance)
+{
+ return pmcraid_notify_aen(pinstance,
+ pinstance->ccn.msg,
+ pinstance->ccn.hcam->data_len +
+ sizeof(struct pmcraid_hcam_hdr));
+}
+
+/**
+ * pmcraid_notify_ldn - notifies about CCN event msg to user space
+ * @pinstance: pointer adapter instance structure
+ *
+ * Return value:
+ * 0 if success, error value in case of any failure
+ */
+static int pmcraid_notify_ldn(struct pmcraid_instance *pinstance)
+{
+ return pmcraid_notify_aen(pinstance,
+ pinstance->ldn.msg,
+ pinstance->ldn.hcam->data_len +
+ sizeof(struct pmcraid_hcam_hdr));
+}
+
+/**
+ * pmcraid_notify_ioastate - sends IOA state event msg to user space
+ * @pinstance: pointer adapter instance structure
+ * @evt: controller state event to be sent
+ *
+ * Return value:
+ * 0 if success, error value in case of any failure
+ */
+static void pmcraid_notify_ioastate(struct pmcraid_instance *pinstance, u32 evt)
+{
+ pinstance->scn.ioa_state = evt;
+ pmcraid_notify_aen(pinstance,
+ &pinstance->scn.msg,
+ sizeof(u32));
+}
+
+/**
+ * pmcraid_handle_config_change - Handle a config change from the adapter
+ * @pinstance: pointer to per adapter instance structure
+ *
+ * Return value:
+ * none
+ */
+
+static void pmcraid_handle_config_change(struct pmcraid_instance *pinstance)
+{
+ struct pmcraid_config_table_entry *cfg_entry;
+ struct pmcraid_hcam_ccn *ccn_hcam;
+ struct pmcraid_cmd *cmd;
+ struct pmcraid_cmd *cfgcmd;
+ struct pmcraid_resource_entry *res = NULL;
+ unsigned long lock_flags;
+ unsigned long host_lock_flags;
+ u32 new_entry = 1;
+ u32 hidden_entry = 0;
+ u16 fw_version;
+ int rc;
+
+ ccn_hcam = (struct pmcraid_hcam_ccn *)pinstance->ccn.hcam;
+ cfg_entry = &ccn_hcam->cfg_entry;
+ fw_version = be16_to_cpu(pinstance->inq_data->fw_version);
+
+ pmcraid_info("CCN(%x): %x timestamp: %llx type: %x lost: %x flags: %x \
+ res: %x:%x:%x:%x\n",
+ pinstance->ccn.hcam->ilid,
+ pinstance->ccn.hcam->op_code,
+ ((pinstance->ccn.hcam->timestamp1) |
+ ((pinstance->ccn.hcam->timestamp2 & 0xffffffffLL) << 32)),
+ pinstance->ccn.hcam->notification_type,
+ pinstance->ccn.hcam->notification_lost,
+ pinstance->ccn.hcam->flags,
+ pinstance->host->unique_id,
+ RES_IS_VSET(*cfg_entry) ? PMCRAID_VSET_BUS_ID :
+ (RES_IS_GSCSI(*cfg_entry) ? PMCRAID_PHYS_BUS_ID :
+ RES_BUS(cfg_entry->resource_address)),
+ RES_IS_VSET(*cfg_entry) ?
+ (fw_version <= PMCRAID_FW_VERSION_1 ?
+ cfg_entry->unique_flags1 :
+ cfg_entry->array_id & 0xFF) :
+ RES_TARGET(cfg_entry->resource_address),
+ RES_LUN(cfg_entry->resource_address));
+
+
+ /* If this HCAM indicates a lost notification, read the config table */
+ if (pinstance->ccn.hcam->notification_lost) {
+ cfgcmd = pmcraid_get_free_cmd(pinstance);
+ if (cfgcmd) {
+ pmcraid_info("lost CCN, reading config table\b");
+ pinstance->reinit_cfg_table = 1;
+ pmcraid_querycfg(cfgcmd);
+ } else {
+ pmcraid_err("lost CCN, no free cmd for querycfg\n");
+ }
+ goto out_notify_apps;
+ }
+
+ /* If this resource is not going to be added to mid-layer, just notify
+ * applications and return. If this notification is about hiding a VSET
+ * resource, check if it was exposed already.
+ */
+ if (pinstance->ccn.hcam->notification_type ==
+ NOTIFICATION_TYPE_ENTRY_CHANGED &&
+ cfg_entry->resource_type == RES_TYPE_VSET) {
+
+ if (fw_version <= PMCRAID_FW_VERSION_1)
+ hidden_entry = (cfg_entry->unique_flags1 & 0x80) != 0;
+ else
+ hidden_entry = (cfg_entry->unique_flags1 & 0x80) != 0;
+
+ } else if (!pmcraid_expose_resource(fw_version, cfg_entry)) {
+ goto out_notify_apps;
+ }
+
+ spin_lock_irqsave(&pinstance->resource_lock, lock_flags);
+ list_for_each_entry(res, &pinstance->used_res_q, queue) {
+ rc = memcmp(&res->cfg_entry.resource_address,
+ &cfg_entry->resource_address,
+ sizeof(cfg_entry->resource_address));
+ if (!rc) {
+ new_entry = 0;
+ break;
+ }
+ }
+
+ if (new_entry) {
+
+ if (hidden_entry) {
+ spin_unlock_irqrestore(&pinstance->resource_lock,
+ lock_flags);
+ goto out_notify_apps;
+ }
+
+ /* If there are more number of resources than what driver can
+ * manage, do not notify the applications about the CCN. Just
+ * ignore this notifications and re-register the same HCAM
+ */
+ if (list_empty(&pinstance->free_res_q)) {
+ spin_unlock_irqrestore(&pinstance->resource_lock,
+ lock_flags);
+ pmcraid_err("too many resources attached\n");
+ spin_lock_irqsave(pinstance->host->host_lock,
+ host_lock_flags);
+ pmcraid_send_hcam(pinstance,
+ PMCRAID_HCAM_CODE_CONFIG_CHANGE);
+ spin_unlock_irqrestore(pinstance->host->host_lock,
+ host_lock_flags);
+ return;
+ }
+
+ res = list_entry(pinstance->free_res_q.next,
+ struct pmcraid_resource_entry, queue);
+
+ list_del(&res->queue);
+ res->scsi_dev = NULL;
+ res->reset_progress = 0;
+ list_add_tail(&res->queue, &pinstance->used_res_q);
+ }
+
+ memcpy(&res->cfg_entry, cfg_entry, pinstance->config_table_entry_size);
+
+ if (pinstance->ccn.hcam->notification_type ==
+ NOTIFICATION_TYPE_ENTRY_DELETED || hidden_entry) {
+ if (res->scsi_dev) {
+ if (fw_version <= PMCRAID_FW_VERSION_1)
+ res->cfg_entry.unique_flags1 &= 0x7F;
+ else
+ res->cfg_entry.array_id &= 0xFF;
+ res->change_detected = RES_CHANGE_DEL;
+ res->cfg_entry.resource_handle =
+ PMCRAID_INVALID_RES_HANDLE;
+ schedule_work(&pinstance->worker_q);
+ } else {
+ /* This may be one of the non-exposed resources */
+ list_move_tail(&res->queue, &pinstance->free_res_q);
+ }
+ } else if (!res->scsi_dev) {
+ res->change_detected = RES_CHANGE_ADD;
+ schedule_work(&pinstance->worker_q);
+ }
+ spin_unlock_irqrestore(&pinstance->resource_lock, lock_flags);
+
+out_notify_apps:
+
+ /* Notify configuration changes to registered applications.*/
+ if (!pmcraid_disable_aen)
+ pmcraid_notify_ccn(pinstance);
+
+ cmd = pmcraid_init_hcam(pinstance, PMCRAID_HCAM_CODE_CONFIG_CHANGE);
+ if (cmd)
+ pmcraid_send_hcam_cmd(cmd);
+}
+
+/**
+ * pmcraid_get_error_info - return error string for an ioasc
+ * @ioasc: ioasc code
+ * Return Value
+ * none
+ */
+static struct pmcraid_ioasc_error *pmcraid_get_error_info(u32 ioasc)
+{
+ int i;
+ for (i = 0; i < ARRAY_SIZE(pmcraid_ioasc_error_table); i++) {
+ if (pmcraid_ioasc_error_table[i].ioasc_code == ioasc)
+ return &pmcraid_ioasc_error_table[i];
+ }
+ return NULL;
+}
+
+/**
+ * pmcraid_ioasc_logger - log IOASC information based user-settings
+ * @ioasc: ioasc code
+ * @cmd: pointer to command that resulted in 'ioasc'
+ */
+void pmcraid_ioasc_logger(u32 ioasc, struct pmcraid_cmd *cmd)
+{
+ struct pmcraid_ioasc_error *error_info = pmcraid_get_error_info(ioasc);
+
+ if (error_info == NULL ||
+ cmd->drv_inst->current_log_level < error_info->log_level)
+ return;
+
+ /* log the error string */
+ pmcraid_err("cmd [%x] for resource %x failed with %x(%s)\n",
+ cmd->ioa_cb->ioarcb.cdb[0],
+ cmd->ioa_cb->ioarcb.resource_handle,
+ le32_to_cpu(ioasc), error_info->error_string);
+}
+
+/**
+ * pmcraid_handle_error_log - Handle a config change (error log) from the IOA
+ *
+ * @pinstance: pointer to per adapter instance structure
+ *
+ * Return value:
+ * none
+ */
+static void pmcraid_handle_error_log(struct pmcraid_instance *pinstance)
+{
+ struct pmcraid_hcam_ldn *hcam_ldn;
+ u32 ioasc;
+
+ hcam_ldn = (struct pmcraid_hcam_ldn *)pinstance->ldn.hcam;
+
+ pmcraid_info
+ ("LDN(%x): %x type: %x lost: %x flags: %x overlay id: %x\n",
+ pinstance->ldn.hcam->ilid,
+ pinstance->ldn.hcam->op_code,
+ pinstance->ldn.hcam->notification_type,
+ pinstance->ldn.hcam->notification_lost,
+ pinstance->ldn.hcam->flags,
+ pinstance->ldn.hcam->overlay_id);
+
+ /* log only the errors, no need to log informational log entries */
+ if (pinstance->ldn.hcam->notification_type !=
+ NOTIFICATION_TYPE_ERROR_LOG)
+ return;
+
+ if (pinstance->ldn.hcam->notification_lost ==
+ HOSTRCB_NOTIFICATIONS_LOST)
+ dev_info(&pinstance->pdev->dev, "Error notifications lost\n");
+
+ ioasc = le32_to_cpu(hcam_ldn->error_log.fd_ioasc);
+
+ if (ioasc == PMCRAID_IOASC_UA_BUS_WAS_RESET ||
+ ioasc == PMCRAID_IOASC_UA_BUS_WAS_RESET_BY_OTHER) {
+ dev_info(&pinstance->pdev->dev,
+ "UnitAttention due to IOA Bus Reset\n");
+ scsi_report_bus_reset(
+ pinstance->host,
+ RES_BUS(hcam_ldn->error_log.fd_ra));
+ }
+
+ return;
+}
+
+/**
+ * pmcraid_process_ccn - Op done function for a CCN.
+ * @cmd: pointer to command struct
+ *
+ * This function is the op done function for a configuration
+ * change notification
+ *
+ * Return value:
+ * none
+ */
+static void pmcraid_process_ccn(struct pmcraid_cmd *cmd)
+{
+ struct pmcraid_instance *pinstance = cmd->drv_inst;
+ u32 ioasc = le32_to_cpu(cmd->ioa_cb->ioasa.ioasc);
+ unsigned long lock_flags;
+
+ pinstance->ccn.cmd = NULL;
+ pmcraid_return_cmd(cmd);
+
+ /* If driver initiated IOA reset happened while this hcam was pending
+ * with IOA, or IOA bringdown sequence is in progress, no need to
+ * re-register the hcam
+ */
+ if (ioasc == PMCRAID_IOASC_IOA_WAS_RESET ||
+ atomic_read(&pinstance->ccn.ignore) == 1) {
+ return;
+ } else if (ioasc) {
+ dev_info(&pinstance->pdev->dev,
+ "Host RCB (CCN) failed with IOASC: 0x%08X\n", ioasc);
+ spin_lock_irqsave(pinstance->host->host_lock, lock_flags);
+ pmcraid_send_hcam(pinstance, PMCRAID_HCAM_CODE_CONFIG_CHANGE);
+ spin_unlock_irqrestore(pinstance->host->host_lock, lock_flags);
+ } else {
+ pmcraid_handle_config_change(pinstance);
+ }
+}
+
+/**
+ * pmcraid_process_ldn - op done function for an LDN
+ * @cmd: pointer to command block
+ *
+ * Return value
+ * none
+ */
+static void pmcraid_initiate_reset(struct pmcraid_instance *);
+static void pmcraid_set_timestamp(struct pmcraid_cmd *cmd);
+
+static void pmcraid_process_ldn(struct pmcraid_cmd *cmd)
+{
+ struct pmcraid_instance *pinstance = cmd->drv_inst;
+ struct pmcraid_hcam_ldn *ldn_hcam =
+ (struct pmcraid_hcam_ldn *)pinstance->ldn.hcam;
+ u32 ioasc = le32_to_cpu(cmd->ioa_cb->ioasa.ioasc);
+ u32 fd_ioasc = le32_to_cpu(ldn_hcam->error_log.fd_ioasc);
+ unsigned long lock_flags;
+
+ /* return the command block back to freepool */
+ pinstance->ldn.cmd = NULL;
+ pmcraid_return_cmd(cmd);
+
+ /* If driver initiated IOA reset happened while this hcam was pending
+ * with IOA, no need to re-register the hcam as reset engine will do it
+ * once reset sequence is complete
+ */
+ if (ioasc == PMCRAID_IOASC_IOA_WAS_RESET ||
+ atomic_read(&pinstance->ccn.ignore) == 1) {
+ return;
+ } else if (!ioasc) {
+ pmcraid_handle_error_log(pinstance);
+ if (fd_ioasc == PMCRAID_IOASC_NR_IOA_RESET_REQUIRED) {
+ spin_lock_irqsave(pinstance->host->host_lock,
+ lock_flags);
+ pmcraid_initiate_reset(pinstance);
+ spin_unlock_irqrestore(pinstance->host->host_lock,
+ lock_flags);
+ return;
+ }
+ if (fd_ioasc == PMCRAID_IOASC_TIME_STAMP_OUT_OF_SYNC) {
+ pinstance->timestamp_error = 1;
+ pmcraid_set_timestamp(cmd);
+ }
+ } else {
+ dev_info(&pinstance->pdev->dev,
+ "Host RCB(LDN) failed with IOASC: 0x%08X\n", ioasc);
+ }
+ /* send netlink message for HCAM notification if enabled */
+ if (!pmcraid_disable_aen)
+ pmcraid_notify_ldn(pinstance);
+
+ cmd = pmcraid_init_hcam(pinstance, PMCRAID_HCAM_CODE_LOG_DATA);
+ if (cmd)
+ pmcraid_send_hcam_cmd(cmd);
+}
+
+/**
+ * pmcraid_register_hcams - register HCAMs for CCN and LDN
+ *
+ * @pinstance: pointer per adapter instance structure
+ *
+ * Return Value
+ * none
+ */
+static void pmcraid_register_hcams(struct pmcraid_instance *pinstance)
+{
+ pmcraid_send_hcam(pinstance, PMCRAID_HCAM_CODE_CONFIG_CHANGE);
+ pmcraid_send_hcam(pinstance, PMCRAID_HCAM_CODE_LOG_DATA);
+}
+
+/**
+ * pmcraid_unregister_hcams - cancel HCAMs registered already
+ * @cmd: pointer to command used as part of reset sequence
+ */
+static void pmcraid_unregister_hcams(struct pmcraid_cmd *cmd)
+{
+ struct pmcraid_instance *pinstance = cmd->drv_inst;
+
+ /* During IOA bringdown, HCAM gets fired and tasklet proceeds with
+ * handling hcam response though it is not necessary. In order to
+ * prevent this, set 'ignore', so that bring-down sequence doesn't
+ * re-send any more hcams
+ */
+ atomic_set(&pinstance->ccn.ignore, 1);
+ atomic_set(&pinstance->ldn.ignore, 1);
+
+ /* If adapter reset was forced as part of runtime reset sequence,
+ * start the reset sequence. Reset will be triggered even in case
+ * IOA unit_check.
+ */
+ if ((pinstance->force_ioa_reset && !pinstance->ioa_bringdown) ||
+ pinstance->ioa_unit_check) {
+ pinstance->force_ioa_reset = 0;
+ pinstance->ioa_unit_check = 0;
+ pinstance->ioa_state = IOA_STATE_IN_RESET_ALERT;
+ pmcraid_reset_alert(cmd);
+ return;
+ }
+
+ /* Driver tries to cancel HCAMs by sending ABORT TASK for each HCAM
+ * one after the other. So CCN cancellation will be triggered by
+ * pmcraid_cancel_ldn itself.
+ */
+ pmcraid_cancel_ldn(cmd);
+}
+
+/**
+ * pmcraid_reset_enable_ioa - re-enable IOA after a hard reset
+ * @pinstance: pointer to adapter instance structure
+ * Return Value
+ * 1 if TRANSITION_TO_OPERATIONAL is active, otherwise 0
+ */
+static void pmcraid_reinit_buffers(struct pmcraid_instance *);
+
+static int pmcraid_reset_enable_ioa(struct pmcraid_instance *pinstance)
+{
+ u32 intrs;
+
+ pmcraid_reinit_buffers(pinstance);
+ intrs = pmcraid_read_interrupts(pinstance);
+
+ pmcraid_enable_interrupts(pinstance, PMCRAID_PCI_INTERRUPTS);
+
+ if (intrs & INTRS_TRANSITION_TO_OPERATIONAL) {
+ if (!pinstance->interrupt_mode) {
+ iowrite32(INTRS_TRANSITION_TO_OPERATIONAL,
+ pinstance->int_regs.
+ ioa_host_interrupt_mask_reg);
+ iowrite32(INTRS_TRANSITION_TO_OPERATIONAL,
+ pinstance->int_regs.ioa_host_interrupt_clr_reg);
+ }
+ return 1;
+ } else {
+ return 0;
+ }
+}
+
+/**
+ * pmcraid_soft_reset - performs a soft reset and makes IOA become ready
+ * @cmd : pointer to reset command block
+ *
+ * Return Value
+ * none
+ */
+static void pmcraid_soft_reset(struct pmcraid_cmd *cmd)
+{
+ struct pmcraid_instance *pinstance = cmd->drv_inst;
+ u32 int_reg;
+ u32 doorbell;
+
+ /* There will be an interrupt when Transition to Operational bit is
+ * set so tasklet would execute next reset task. The timeout handler
+ * would re-initiate a reset
+ */
+ cmd->cmd_done = pmcraid_ioa_reset;
+ cmd->timer.data = (unsigned long)cmd;
+ cmd->timer.expires = jiffies +
+ msecs_to_jiffies(PMCRAID_TRANSOP_TIMEOUT);
+ cmd->timer.function = (void (*)(unsigned long))pmcraid_timeout_handler;
+
+ if (!timer_pending(&cmd->timer))
+ add_timer(&cmd->timer);
+
+ /* Enable destructive diagnostics on IOA if it is not yet in
+ * operational state
+ */
+ doorbell = DOORBELL_RUNTIME_RESET |
+ DOORBELL_ENABLE_DESTRUCTIVE_DIAGS;
+
+ /* Since we do RESET_ALERT and Start BIST we have to again write
+ * MSIX Doorbell to indicate the interrupt mode
+ */
+ if (pinstance->interrupt_mode) {
+ iowrite32(DOORBELL_INTR_MODE_MSIX,
+ pinstance->int_regs.host_ioa_interrupt_reg);
+ ioread32(pinstance->int_regs.host_ioa_interrupt_reg);
+ }
+
+ iowrite32(doorbell, pinstance->int_regs.host_ioa_interrupt_reg);
+ ioread32(pinstance->int_regs.host_ioa_interrupt_reg),
+ int_reg = ioread32(pinstance->int_regs.ioa_host_interrupt_reg);
+
+ pmcraid_info("Waiting for IOA to become operational %x:%x\n",
+ ioread32(pinstance->int_regs.host_ioa_interrupt_reg),
+ int_reg);
+}
+
+/**
+ * pmcraid_get_dump - retrieves IOA dump in case of Unit Check interrupt
+ *
+ * @pinstance: pointer to adapter instance structure
+ *
+ * Return Value
+ * none
+ */
+static void pmcraid_get_dump(struct pmcraid_instance *pinstance)
+{
+ pmcraid_info("%s is not yet implemented\n", __func__);
+}
+
+/**
+ * pmcraid_fail_outstanding_cmds - Fails all outstanding ops.
+ * @pinstance: pointer to adapter instance structure
+ *
+ * This function fails all outstanding ops. If they are submitted to IOA
+ * already, it sends cancel all messages if IOA is still accepting IOARCBs,
+ * otherwise just completes the commands and returns the cmd blocks to free
+ * pool.
+ *
+ * Return value:
+ * none
+ */
+static void pmcraid_fail_outstanding_cmds(struct pmcraid_instance *pinstance)
+{
+ struct pmcraid_cmd *cmd, *temp;
+ unsigned long lock_flags;
+
+ /* pending command list is protected by pending_pool_lock. Its
+ * traversal must be done as within this lock
+ */
+ spin_lock_irqsave(&pinstance->pending_pool_lock, lock_flags);
+ list_for_each_entry_safe(cmd, temp, &pinstance->pending_cmd_pool,
+ free_list) {
+ list_del(&cmd->free_list);
+ spin_unlock_irqrestore(&pinstance->pending_pool_lock,
+ lock_flags);
+ cmd->ioa_cb->ioasa.ioasc =
+ cpu_to_le32(PMCRAID_IOASC_IOA_WAS_RESET);
+ cmd->ioa_cb->ioasa.ilid =
+ cpu_to_be32(PMCRAID_DRIVER_ILID);
+
+ /* In case the command timer is still running */
+ del_timer(&cmd->timer);
+
+ /* If this is an IO command, complete it by invoking scsi_done
+ * function. If this is one of the internal commands other
+ * than pmcraid_ioa_reset and HCAM commands invoke cmd_done to
+ * complete it
+ */
+ if (cmd->scsi_cmd) {
+
+ struct scsi_cmnd *scsi_cmd = cmd->scsi_cmd;
+ __le32 resp = cmd->ioa_cb->ioarcb.response_handle;
+
+ scsi_cmd->result |= DID_ERROR << 16;
+
+ scsi_dma_unmap(scsi_cmd);
+ pmcraid_return_cmd(cmd);
+
+ pmcraid_info("failing(%d) CDB[0] = %x result: %x\n",
+ le32_to_cpu(resp) >> 2,
+ cmd->ioa_cb->ioarcb.cdb[0],
+ scsi_cmd->result);
+ scsi_cmd->scsi_done(scsi_cmd);
+ } else if (cmd->cmd_done == pmcraid_internal_done ||
+ cmd->cmd_done == pmcraid_erp_done) {
+ cmd->cmd_done(cmd);
+ } else if (cmd->cmd_done != pmcraid_ioa_reset &&
+ cmd->cmd_done != pmcraid_ioa_shutdown_done) {
+ pmcraid_return_cmd(cmd);
+ }
+
+ atomic_dec(&pinstance->outstanding_cmds);
+ spin_lock_irqsave(&pinstance->pending_pool_lock, lock_flags);
+ }
+
+ spin_unlock_irqrestore(&pinstance->pending_pool_lock, lock_flags);
+}
+
+/**
+ * pmcraid_ioa_reset - Implementation of IOA reset logic
+ *
+ * @cmd: pointer to the cmd block to be used for entire reset process
+ *
+ * This function executes most of the steps required for IOA reset. This gets
+ * called by user threads (modprobe/insmod/rmmod) timer, tasklet and midlayer's
+ * 'eh_' thread. Access to variables used for controlling the reset sequence is
+ * synchronized using host lock. Various functions called during reset process
+ * would make use of a single command block, pointer to which is also stored in
+ * adapter instance structure.
+ *
+ * Return Value
+ * None
+ */
+static void pmcraid_ioa_reset(struct pmcraid_cmd *cmd)
+{
+ struct pmcraid_instance *pinstance = cmd->drv_inst;
+ u8 reset_complete = 0;
+
+ pinstance->ioa_reset_in_progress = 1;
+
+ if (pinstance->reset_cmd != cmd) {
+ pmcraid_err("reset is called with different command block\n");
+ pinstance->reset_cmd = cmd;
+ }
+
+ pmcraid_info("reset_engine: state = %d, command = %p\n",
+ pinstance->ioa_state, cmd);
+
+ switch (pinstance->ioa_state) {
+
+ case IOA_STATE_DEAD:
+ /* If IOA is offline, whatever may be the reset reason, just
+ * return. callers might be waiting on the reset wait_q, wake
+ * up them
+ */
+ pmcraid_err("IOA is offline no reset is possible\n");
+ reset_complete = 1;
+ break;
+
+ case IOA_STATE_IN_BRINGDOWN:
+ /* we enter here, once ioa shutdown command is processed by IOA
+ * Alert IOA for a possible reset. If reset alert fails, IOA
+ * goes through hard-reset
+ */
+ pmcraid_disable_interrupts(pinstance, ~0);
+ pinstance->ioa_state = IOA_STATE_IN_RESET_ALERT;
+ pmcraid_reset_alert(cmd);
+ break;
+
+ case IOA_STATE_UNKNOWN:
+ /* We may be called during probe or resume. Some pre-processing
+ * is required for prior to reset
+ */
+ scsi_block_requests(pinstance->host);
+
+ /* If asked to reset while IOA was processing responses or
+ * there are any error responses then IOA may require
+ * hard-reset.
+ */
+ if (pinstance->ioa_hard_reset == 0) {
+ if (ioread32(pinstance->ioa_status) &
+ INTRS_TRANSITION_TO_OPERATIONAL) {
+ pmcraid_info("sticky bit set, bring-up\n");
+ pinstance->ioa_state = IOA_STATE_IN_BRINGUP;
+ pmcraid_reinit_cmdblk(cmd);
+ pmcraid_identify_hrrq(cmd);
+ } else {
+ pinstance->ioa_state = IOA_STATE_IN_SOFT_RESET;
+ pmcraid_soft_reset(cmd);
+ }
+ } else {
+ /* Alert IOA of a possible reset and wait for critical
+ * operation in progress bit to reset
+ */
+ pinstance->ioa_state = IOA_STATE_IN_RESET_ALERT;
+ pmcraid_reset_alert(cmd);
+ }
+ break;
+
+ case IOA_STATE_IN_RESET_ALERT:
+ /* If critical operation in progress bit is reset or wait gets
+ * timed out, reset proceeds with starting BIST on the IOA.
+ * pmcraid_ioa_hard_reset keeps a count of reset attempts. If
+ * they are 3 or more, reset engine marks IOA dead and returns
+ */
+ pinstance->ioa_state = IOA_STATE_IN_HARD_RESET;
+ pmcraid_start_bist(cmd);
+ break;
+
+ case IOA_STATE_IN_HARD_RESET:
+ pinstance->ioa_reset_attempts++;
+
+ /* retry reset if we haven't reached maximum allowed limit */
+ if (pinstance->ioa_reset_attempts > PMCRAID_RESET_ATTEMPTS) {
+ pinstance->ioa_reset_attempts = 0;
+ pmcraid_err("IOA didn't respond marking it as dead\n");
+ pinstance->ioa_state = IOA_STATE_DEAD;
+
+ if (pinstance->ioa_bringdown)
+ pmcraid_notify_ioastate(pinstance,
+ PMC_DEVICE_EVENT_SHUTDOWN_FAILED);
+ else
+ pmcraid_notify_ioastate(pinstance,
+ PMC_DEVICE_EVENT_RESET_FAILED);
+ reset_complete = 1;
+ break;
+ }
+
+ /* Once either bist or pci reset is done, restore PCI config
+ * space. If this fails, proceed with hard reset again
+ */
+ pci_restore_state(pinstance->pdev);
+
+ /* fail all pending commands */
+ pmcraid_fail_outstanding_cmds(pinstance);
+
+ /* check if unit check is active, if so extract dump */
+ if (pinstance->ioa_unit_check) {
+ pmcraid_info("unit check is active\n");
+ pinstance->ioa_unit_check = 0;
+ pmcraid_get_dump(pinstance);
+ pinstance->ioa_reset_attempts--;
+ pinstance->ioa_state = IOA_STATE_IN_RESET_ALERT;
+ pmcraid_reset_alert(cmd);
+ break;
+ }
+
+ /* if the reset reason is to bring-down the ioa, we might be
+ * done with the reset restore pci_config_space and complete
+ * the reset
+ */
+ if (pinstance->ioa_bringdown) {
+ pmcraid_info("bringing down the adapter\n");
+ pinstance->ioa_shutdown_type = SHUTDOWN_NONE;
+ pinstance->ioa_bringdown = 0;
+ pinstance->ioa_state = IOA_STATE_UNKNOWN;
+ pmcraid_notify_ioastate(pinstance,
+ PMC_DEVICE_EVENT_SHUTDOWN_SUCCESS);
+ reset_complete = 1;
+ } else {
+ /* bring-up IOA, so proceed with soft reset
+ * Reinitialize hrrq_buffers and their indices also
+ * enable interrupts after a pci_restore_state
+ */
+ if (pmcraid_reset_enable_ioa(pinstance)) {
+ pinstance->ioa_state = IOA_STATE_IN_BRINGUP;
+ pmcraid_info("bringing up the adapter\n");
+ pmcraid_reinit_cmdblk(cmd);
+ pmcraid_identify_hrrq(cmd);
+ } else {
+ pinstance->ioa_state = IOA_STATE_IN_SOFT_RESET;
+ pmcraid_soft_reset(cmd);
+ }
+ }
+ break;
+
+ case IOA_STATE_IN_SOFT_RESET:
+ /* TRANSITION TO OPERATIONAL is on so start initialization
+ * sequence
+ */
+ pmcraid_info("In softreset proceeding with bring-up\n");
+ pinstance->ioa_state = IOA_STATE_IN_BRINGUP;
+
+ /* Initialization commands start with HRRQ identification. From
+ * now on tasklet completes most of the commands as IOA is up
+ * and intrs are enabled
+ */
+ pmcraid_identify_hrrq(cmd);
+ break;
+
+ case IOA_STATE_IN_BRINGUP:
+ /* we are done with bringing up of IOA, change the ioa_state to
+ * operational and wake up any waiters
+ */
+ pinstance->ioa_state = IOA_STATE_OPERATIONAL;
+ reset_complete = 1;
+ break;
+
+ case IOA_STATE_OPERATIONAL:
+ default:
+ /* When IOA is operational and a reset is requested, check for
+ * the reset reason. If reset is to bring down IOA, unregister
+ * HCAMs and initiate shutdown; if adapter reset is forced then
+ * restart reset sequence again
+ */
+ if (pinstance->ioa_shutdown_type == SHUTDOWN_NONE &&
+ pinstance->force_ioa_reset == 0) {
+ pmcraid_notify_ioastate(pinstance,
+ PMC_DEVICE_EVENT_RESET_SUCCESS);
+ reset_complete = 1;
+ } else {
+ if (pinstance->ioa_shutdown_type != SHUTDOWN_NONE)
+ pinstance->ioa_state = IOA_STATE_IN_BRINGDOWN;
+ pmcraid_reinit_cmdblk(cmd);
+ pmcraid_unregister_hcams(cmd);
+ }
+ break;
+ }
+
+ /* reset will be completed if ioa_state is either DEAD or UNKNOWN or
+ * OPERATIONAL. Reset all control variables used during reset, wake up
+ * any waiting threads and let the SCSI mid-layer send commands. Note
+ * that host_lock must be held before invoking scsi_report_bus_reset.
+ */
+ if (reset_complete) {
+ pinstance->ioa_reset_in_progress = 0;
+ pinstance->ioa_reset_attempts = 0;
+ pinstance->reset_cmd = NULL;
+ pinstance->ioa_shutdown_type = SHUTDOWN_NONE;
+ pinstance->ioa_bringdown = 0;
+ pmcraid_return_cmd(cmd);
+
+ /* If target state is to bring up the adapter, proceed with
+ * hcam registration and resource exposure to mid-layer.
+ */
+ if (pinstance->ioa_state == IOA_STATE_OPERATIONAL)
+ pmcraid_register_hcams(pinstance);
+
+ wake_up_all(&pinstance->reset_wait_q);
+ }
+
+ return;
+}
+
+/**
+ * pmcraid_initiate_reset - initiates reset sequence. This is called from
+ * ISR/tasklet during error interrupts including IOA unit check. If reset
+ * is already in progress, it just returns, otherwise initiates IOA reset
+ * to bring IOA up to operational state.
+ *
+ * @pinstance: pointer to adapter instance structure
+ *
+ * Return value
+ * none
+ */
+static void pmcraid_initiate_reset(struct pmcraid_instance *pinstance)
+{
+ struct pmcraid_cmd *cmd;
+
+ /* If the reset is already in progress, just return, otherwise start
+ * reset sequence and return
+ */
+ if (!pinstance->ioa_reset_in_progress) {
+ scsi_block_requests(pinstance->host);
+ cmd = pmcraid_get_free_cmd(pinstance);
+
+ if (cmd == NULL) {
+ pmcraid_err("no cmnd blocks for initiate_reset\n");
+ return;
+ }
+
+ pinstance->ioa_shutdown_type = SHUTDOWN_NONE;
+ pinstance->reset_cmd = cmd;
+ pinstance->force_ioa_reset = 1;
+ pmcraid_notify_ioastate(pinstance,
+ PMC_DEVICE_EVENT_RESET_START);
+ pmcraid_ioa_reset(cmd);
+ }
+}
+
+/**
+ * pmcraid_reset_reload - utility routine for doing IOA reset either to bringup
+ * or bringdown IOA
+ * @pinstance: pointer adapter instance structure
+ * @shutdown_type: shutdown type to be used NONE, NORMAL or ABRREV
+ * @target_state: expected target state after reset
+ *
+ * Note: This command initiates reset and waits for its completion. Hence this
+ * should not be called from isr/timer/tasklet functions (timeout handlers,
+ * error response handlers and interrupt handlers).
+ *
+ * Return Value
+ * 1 in case ioa_state is not target_state, 0 otherwise.
+ */
+static int pmcraid_reset_reload(
+ struct pmcraid_instance *pinstance,
+ u8 shutdown_type,
+ u8 target_state
+)
+{
+ struct pmcraid_cmd *reset_cmd = NULL;
+ unsigned long lock_flags;
+ int reset = 1;
+
+ spin_lock_irqsave(pinstance->host->host_lock, lock_flags);
+
+ if (pinstance->ioa_reset_in_progress) {
+ pmcraid_info("reset_reload: reset is already in progress\n");
+
+ spin_unlock_irqrestore(pinstance->host->host_lock, lock_flags);
+
+ wait_event(pinstance->reset_wait_q,
+ !pinstance->ioa_reset_in_progress);
+
+ spin_lock_irqsave(pinstance->host->host_lock, lock_flags);
+
+ if (pinstance->ioa_state == IOA_STATE_DEAD) {
+ spin_unlock_irqrestore(pinstance->host->host_lock,
+ lock_flags);
+ pmcraid_info("reset_reload: IOA is dead\n");
+ return reset;
+ } else if (pinstance->ioa_state == target_state) {
+ reset = 0;
+ }
+ }
+
+ if (reset) {
+ pmcraid_info("reset_reload: proceeding with reset\n");
+ scsi_block_requests(pinstance->host);
+ reset_cmd = pmcraid_get_free_cmd(pinstance);
+
+ if (reset_cmd == NULL) {
+ pmcraid_err("no free cmnd for reset_reload\n");
+ spin_unlock_irqrestore(pinstance->host->host_lock,
+ lock_flags);
+ return reset;
+ }
+
+ if (shutdown_type == SHUTDOWN_NORMAL)
+ pinstance->ioa_bringdown = 1;
+
+ pinstance->ioa_shutdown_type = shutdown_type;
+ pinstance->reset_cmd = reset_cmd;
+ pinstance->force_ioa_reset = reset;
+ pmcraid_info("reset_reload: initiating reset\n");
+ pmcraid_ioa_reset(reset_cmd);
+ spin_unlock_irqrestore(pinstance->host->host_lock, lock_flags);
+ pmcraid_info("reset_reload: waiting for reset to complete\n");
+ wait_event(pinstance->reset_wait_q,
+ !pinstance->ioa_reset_in_progress);
+
+ pmcraid_info("reset_reload: reset is complete !!\n");
+ scsi_unblock_requests(pinstance->host);
+ if (pinstance->ioa_state == target_state)
+ reset = 0;
+ }
+
+ return reset;
+}
+
+/**
+ * pmcraid_reset_bringdown - wrapper over pmcraid_reset_reload to bringdown IOA
+ *
+ * @pinstance: pointer to adapter instance structure
+ *
+ * Return Value
+ * whatever is returned from pmcraid_reset_reload
+ */
+static int pmcraid_reset_bringdown(struct pmcraid_instance *pinstance)
+{
+ return pmcraid_reset_reload(pinstance,
+ SHUTDOWN_NORMAL,
+ IOA_STATE_UNKNOWN);
+}
+
+/**
+ * pmcraid_reset_bringup - wrapper over pmcraid_reset_reload to bring up IOA
+ *
+ * @pinstance: pointer to adapter instance structure
+ *
+ * Return Value
+ * whatever is returned from pmcraid_reset_reload
+ */
+static int pmcraid_reset_bringup(struct pmcraid_instance *pinstance)
+{
+ pmcraid_notify_ioastate(pinstance, PMC_DEVICE_EVENT_RESET_START);
+
+ return pmcraid_reset_reload(pinstance,
+ SHUTDOWN_NONE,
+ IOA_STATE_OPERATIONAL);
+}
+
+/**
+ * pmcraid_request_sense - Send request sense to a device
+ * @cmd: pmcraid command struct
+ *
+ * This function sends a request sense to a device as a result of a check
+ * condition. This method re-uses the same command block that failed earlier.
+ */
+static void pmcraid_request_sense(struct pmcraid_cmd *cmd)
+{
+ struct pmcraid_ioarcb *ioarcb = &cmd->ioa_cb->ioarcb;
+ struct pmcraid_ioadl_desc *ioadl = ioarcb->add_data.u.ioadl;
+
+ /* allocate DMAable memory for sense buffers */
+ cmd->sense_buffer = pci_alloc_consistent(cmd->drv_inst->pdev,
+ SCSI_SENSE_BUFFERSIZE,
+ &cmd->sense_buffer_dma);
+
+ if (cmd->sense_buffer == NULL) {
+ pmcraid_err
+ ("couldn't allocate sense buffer for request sense\n");
+ pmcraid_erp_done(cmd);
+ return;
+ }
+
+ /* re-use the command block */
+ memset(&cmd->ioa_cb->ioasa, 0, sizeof(struct pmcraid_ioasa));
+ memset(ioarcb->cdb, 0, PMCRAID_MAX_CDB_LEN);
+ ioarcb->request_flags0 = (SYNC_COMPLETE |
+ NO_LINK_DESCS |
+ INHIBIT_UL_CHECK);
+ ioarcb->request_type = REQ_TYPE_SCSI;
+ ioarcb->cdb[0] = REQUEST_SENSE;
+ ioarcb->cdb[4] = SCSI_SENSE_BUFFERSIZE;
+
+ ioarcb->ioadl_bus_addr = cpu_to_le64((cmd->ioa_cb_bus_addr) +
+ offsetof(struct pmcraid_ioarcb,
+ add_data.u.ioadl[0]));
+ ioarcb->ioadl_length = cpu_to_le32(sizeof(struct pmcraid_ioadl_desc));
+
+ ioarcb->data_transfer_length = cpu_to_le32(SCSI_SENSE_BUFFERSIZE);
+
+ ioadl->address = cpu_to_le64(cmd->sense_buffer_dma);
+ ioadl->data_len = cpu_to_le32(SCSI_SENSE_BUFFERSIZE);
+ ioadl->flags = IOADL_FLAGS_LAST_DESC;
+
+ /* request sense might be called as part of error response processing
+ * which runs in tasklets context. It is possible that mid-layer might
+ * schedule queuecommand during this time, hence, writting to IOARRIN
+ * must be protect by host_lock
+ */
+ pmcraid_send_cmd(cmd, pmcraid_erp_done,
+ PMCRAID_REQUEST_SENSE_TIMEOUT,
+ pmcraid_timeout_handler);
+}
+
+/**
+ * pmcraid_cancel_all - cancel all outstanding IOARCBs as part of error recovery
+ * @cmd: command that failed
+ * @sense: true if request_sense is required after cancel all
+ *
+ * This function sends a cancel all to a device to clear the queue.
+ */
+static void pmcraid_cancel_all(struct pmcraid_cmd *cmd, u32 sense)
+{
+ struct scsi_cmnd *scsi_cmd = cmd->scsi_cmd;
+ struct pmcraid_ioarcb *ioarcb = &cmd->ioa_cb->ioarcb;
+ struct pmcraid_resource_entry *res = scsi_cmd->device->hostdata;
+ void (*cmd_done) (struct pmcraid_cmd *) = sense ? pmcraid_erp_done
+ : pmcraid_request_sense;
+
+ memset(ioarcb->cdb, 0, PMCRAID_MAX_CDB_LEN);
+ ioarcb->request_flags0 = SYNC_OVERRIDE;
+ ioarcb->request_type = REQ_TYPE_IOACMD;
+ ioarcb->cdb[0] = PMCRAID_CANCEL_ALL_REQUESTS;
+
+ if (RES_IS_GSCSI(res->cfg_entry))
+ ioarcb->cdb[1] = PMCRAID_SYNC_COMPLETE_AFTER_CANCEL;
+
+ ioarcb->ioadl_bus_addr = 0;
+ ioarcb->ioadl_length = 0;
+ ioarcb->data_transfer_length = 0;
+ ioarcb->ioarcb_bus_addr &= (~0x1FULL);
+
+ /* writing to IOARRIN must be protected by host_lock, as mid-layer
+ * schedule queuecommand while we are doing this
+ */
+ pmcraid_send_cmd(cmd, cmd_done,
+ PMCRAID_REQUEST_SENSE_TIMEOUT,
+ pmcraid_timeout_handler);
+}
+
+/**
+ * pmcraid_frame_auto_sense: frame fixed format sense information
+ *
+ * @cmd: pointer to failing command block
+ *
+ * Return value
+ * none
+ */
+static void pmcraid_frame_auto_sense(struct pmcraid_cmd *cmd)
+{
+ u8 *sense_buf = cmd->scsi_cmd->sense_buffer;
+ struct pmcraid_resource_entry *res = cmd->scsi_cmd->device->hostdata;
+ struct pmcraid_ioasa *ioasa = &cmd->ioa_cb->ioasa;
+ u32 ioasc = le32_to_cpu(ioasa->ioasc);
+ u32 failing_lba = 0;
+
+ memset(sense_buf, 0, SCSI_SENSE_BUFFERSIZE);
+ cmd->scsi_cmd->result = SAM_STAT_CHECK_CONDITION;
+
+ if (RES_IS_VSET(res->cfg_entry) &&
+ ioasc == PMCRAID_IOASC_ME_READ_ERROR_NO_REALLOC &&
+ ioasa->u.vset.failing_lba_hi != 0) {
+
+ sense_buf[0] = 0x72;
+ sense_buf[1] = PMCRAID_IOASC_SENSE_KEY(ioasc);
+ sense_buf[2] = PMCRAID_IOASC_SENSE_CODE(ioasc);
+ sense_buf[3] = PMCRAID_IOASC_SENSE_QUAL(ioasc);
+
+ sense_buf[7] = 12;
+ sense_buf[8] = 0;
+ sense_buf[9] = 0x0A;
+ sense_buf[10] = 0x80;
+
+ failing_lba = le32_to_cpu(ioasa->u.vset.failing_lba_hi);
+
+ sense_buf[12] = (failing_lba & 0xff000000) >> 24;
+ sense_buf[13] = (failing_lba & 0x00ff0000) >> 16;
+ sense_buf[14] = (failing_lba & 0x0000ff00) >> 8;
+ sense_buf[15] = failing_lba & 0x000000ff;
+
+ failing_lba = le32_to_cpu(ioasa->u.vset.failing_lba_lo);
+
+ sense_buf[16] = (failing_lba & 0xff000000) >> 24;
+ sense_buf[17] = (failing_lba & 0x00ff0000) >> 16;
+ sense_buf[18] = (failing_lba & 0x0000ff00) >> 8;
+ sense_buf[19] = failing_lba & 0x000000ff;
+ } else {
+ sense_buf[0] = 0x70;
+ sense_buf[2] = PMCRAID_IOASC_SENSE_KEY(ioasc);
+ sense_buf[12] = PMCRAID_IOASC_SENSE_CODE(ioasc);
+ sense_buf[13] = PMCRAID_IOASC_SENSE_QUAL(ioasc);
+
+ if (ioasc == PMCRAID_IOASC_ME_READ_ERROR_NO_REALLOC) {
+ if (RES_IS_VSET(res->cfg_entry))
+ failing_lba =
+ le32_to_cpu(ioasa->u.
+ vset.failing_lba_lo);
+ sense_buf[0] |= 0x80;
+ sense_buf[3] = (failing_lba >> 24) & 0xff;
+ sense_buf[4] = (failing_lba >> 16) & 0xff;
+ sense_buf[5] = (failing_lba >> 8) & 0xff;
+ sense_buf[6] = failing_lba & 0xff;
+ }
+
+ sense_buf[7] = 6; /* additional length */
+ }
+}
+
+/**
+ * pmcraid_error_handler - Error response handlers for a SCSI op
+ * @cmd: pointer to pmcraid_cmd that has failed
+ *
+ * This function determines whether or not to initiate ERP on the affected
+ * device. This is called from a tasklet, which doesn't hold any locks.
+ *
+ * Return value:
+ * 0 it caller can complete the request, otherwise 1 where in error
+ * handler itself completes the request and returns the command block
+ * back to free-pool
+ */
+static int pmcraid_error_handler(struct pmcraid_cmd *cmd)
+{
+ struct scsi_cmnd *scsi_cmd = cmd->scsi_cmd;
+ struct pmcraid_resource_entry *res = scsi_cmd->device->hostdata;
+ struct pmcraid_instance *pinstance = cmd->drv_inst;
+ struct pmcraid_ioasa *ioasa = &cmd->ioa_cb->ioasa;
+ u32 ioasc = le32_to_cpu(ioasa->ioasc);
+ u32 masked_ioasc = ioasc & PMCRAID_IOASC_SENSE_MASK;
+ u32 sense_copied = 0;
+
+ if (!res) {
+ pmcraid_info("resource pointer is NULL\n");
+ return 0;
+ }
+
+ /* If this was a SCSI read/write command keep count of errors */
+ if (SCSI_CMD_TYPE(scsi_cmd->cmnd[0]) == SCSI_READ_CMD)
+ atomic_inc(&res->read_failures);
+ else if (SCSI_CMD_TYPE(scsi_cmd->cmnd[0]) == SCSI_WRITE_CMD)
+ atomic_inc(&res->write_failures);
+
+ if (!RES_IS_GSCSI(res->cfg_entry) &&
+ masked_ioasc != PMCRAID_IOASC_HW_DEVICE_BUS_STATUS_ERROR) {
+ pmcraid_frame_auto_sense(cmd);
+ }
+
+ /* Log IOASC/IOASA information based on user settings */
+ pmcraid_ioasc_logger(ioasc, cmd);
+
+ switch (masked_ioasc) {
+
+ case PMCRAID_IOASC_AC_TERMINATED_BY_HOST:
+ scsi_cmd->result |= (DID_ABORT << 16);
+ break;
+
+ case PMCRAID_IOASC_IR_INVALID_RESOURCE_HANDLE:
+ case PMCRAID_IOASC_HW_CANNOT_COMMUNICATE:
+ scsi_cmd->result |= (DID_NO_CONNECT << 16);
+ break;
+
+ case PMCRAID_IOASC_NR_SYNC_REQUIRED:
+ res->sync_reqd = 1;
+ scsi_cmd->result |= (DID_IMM_RETRY << 16);
+ break;
+
+ case PMCRAID_IOASC_ME_READ_ERROR_NO_REALLOC:
+ scsi_cmd->result |= (DID_PASSTHROUGH << 16);
+ break;
+
+ case PMCRAID_IOASC_UA_BUS_WAS_RESET:
+ case PMCRAID_IOASC_UA_BUS_WAS_RESET_BY_OTHER:
+ if (!res->reset_progress)
+ scsi_report_bus_reset(pinstance->host,
+ scsi_cmd->device->channel);
+ scsi_cmd->result |= (DID_ERROR << 16);
+ break;
+
+ case PMCRAID_IOASC_HW_DEVICE_BUS_STATUS_ERROR:
+ scsi_cmd->result |= PMCRAID_IOASC_SENSE_STATUS(ioasc);
+ res->sync_reqd = 1;
+
+ /* if check_condition is not active return with error otherwise
+ * get/frame the sense buffer
+ */
+ if (PMCRAID_IOASC_SENSE_STATUS(ioasc) !=
+ SAM_STAT_CHECK_CONDITION &&
+ PMCRAID_IOASC_SENSE_STATUS(ioasc) != SAM_STAT_ACA_ACTIVE)
+ return 0;
+
+ /* If we have auto sense data as part of IOASA pass it to
+ * mid-layer
+ */
+ if (ioasa->auto_sense_length != 0) {
+ short sense_len = ioasa->auto_sense_length;
+ int data_size = min_t(u16, le16_to_cpu(sense_len),
+ SCSI_SENSE_BUFFERSIZE);
+
+ memcpy(scsi_cmd->sense_buffer,
+ ioasa->sense_data,
+ data_size);
+ sense_copied = 1;
+ }
+
+ if (RES_IS_GSCSI(res->cfg_entry))
+ pmcraid_cancel_all(cmd, sense_copied);
+ else if (sense_copied)
+ pmcraid_erp_done(cmd);
+ else
+ pmcraid_request_sense(cmd);
+
+ return 1;
+
+ case PMCRAID_IOASC_NR_INIT_CMD_REQUIRED:
+ break;
+
+ default:
+ if (PMCRAID_IOASC_SENSE_KEY(ioasc) > RECOVERED_ERROR)
+ scsi_cmd->result |= (DID_ERROR << 16);
+ break;
+ }
+ return 0;
+}
+
+/**
+ * pmcraid_reset_device - device reset handler functions
+ *
+ * @scsi_cmd: scsi command struct
+ * @modifier: reset modifier indicating the reset sequence to be performed
+ *
+ * This function issues a device reset to the affected device.
+ * A LUN reset will be sent to the device first. If that does
+ * not work, a target reset will be sent.
+ *
+ * Return value:
+ * SUCCESS / FAILED
+ */
+static int pmcraid_reset_device(
+ struct scsi_cmnd *scsi_cmd,
+ unsigned long timeout,
+ u8 modifier
+)
+{
+ struct pmcraid_cmd *cmd;
+ struct pmcraid_instance *pinstance;
+ struct pmcraid_resource_entry *res;
+ struct pmcraid_ioarcb *ioarcb;
+ unsigned long lock_flags;
+ u32 ioasc;
+
+ pinstance =
+ (struct pmcraid_instance *)scsi_cmd->device->host->hostdata;
+ res = scsi_cmd->device->hostdata;
+
+ if (!res) {
+ sdev_printk(KERN_ERR, scsi_cmd->device,
+ "reset_device: NULL resource pointer\n");
+ return FAILED;
+ }
+
+ /* If adapter is currently going through reset/reload, return failed.
+ * This will force the mid-layer to call _eh_bus/host reset, which
+ * will then go to sleep and wait for the reset to complete
+ */
+ spin_lock_irqsave(pinstance->host->host_lock, lock_flags);
+ if (pinstance->ioa_reset_in_progress ||
+ pinstance->ioa_state == IOA_STATE_DEAD) {
+ spin_unlock_irqrestore(pinstance->host->host_lock, lock_flags);
+ return FAILED;
+ }
+
+ res->reset_progress = 1;
+ pmcraid_info("Resetting %s resource with addr %x\n",
+ ((modifier & RESET_DEVICE_LUN) ? "LUN" :
+ ((modifier & RESET_DEVICE_TARGET) ? "TARGET" : "BUS")),
+ le32_to_cpu(res->cfg_entry.resource_address));
+
+ /* get a free cmd block */
+ cmd = pmcraid_get_free_cmd(pinstance);
+
+ if (cmd == NULL) {
+ spin_unlock_irqrestore(pinstance->host->host_lock, lock_flags);
+ pmcraid_err("%s: no cmd blocks are available\n", __func__);
+ return FAILED;
+ }
+
+ ioarcb = &cmd->ioa_cb->ioarcb;
+ ioarcb->resource_handle = res->cfg_entry.resource_handle;
+ ioarcb->request_type = REQ_TYPE_IOACMD;
+ ioarcb->cdb[0] = PMCRAID_RESET_DEVICE;
+
+ /* Initialize reset modifier bits */
+ if (modifier)
+ modifier = ENABLE_RESET_MODIFIER | modifier;
+
+ ioarcb->cdb[1] = modifier;
+
+ init_completion(&cmd->wait_for_completion);
+ cmd->completion_req = 1;
+
+ pmcraid_info("cmd(CDB[0] = %x) for %x with index = %d\n",
+ cmd->ioa_cb->ioarcb.cdb[0],
+ le32_to_cpu(cmd->ioa_cb->ioarcb.resource_handle),
+ le32_to_cpu(cmd->ioa_cb->ioarcb.response_handle) >> 2);
+
+ pmcraid_send_cmd(cmd,
+ pmcraid_internal_done,
+ timeout,
+ pmcraid_timeout_handler);
+
+ spin_unlock_irqrestore(pinstance->host->host_lock, lock_flags);
+
+ /* RESET_DEVICE command completes after all pending IOARCBs are
+ * completed. Once this command is completed, pmcraind_internal_done
+ * will wake up the 'completion' queue.
+ */
+ wait_for_completion(&cmd->wait_for_completion);
+
+ /* complete the command here itself and return the command block
+ * to free list
+ */
+ pmcraid_return_cmd(cmd);
+ res->reset_progress = 0;
+ ioasc = le32_to_cpu(cmd->ioa_cb->ioasa.ioasc);
+
+ /* set the return value based on the returned ioasc */
+ return PMCRAID_IOASC_SENSE_KEY(ioasc) ? FAILED : SUCCESS;
+}
+
+/**
+ * _pmcraid_io_done - helper for pmcraid_io_done function
+ *
+ * @cmd: pointer to pmcraid command struct
+ * @reslen: residual data length to be set in the ioasa
+ * @ioasc: ioasc either returned by IOA or set by driver itself.
+ *
+ * This function is invoked by pmcraid_io_done to complete mid-layer
+ * scsi ops.
+ *
+ * Return value:
+ * 0 if caller is required to return it to free_pool. Returns 1 if
+ * caller need not worry about freeing command block as error handler
+ * will take care of that.
+ */
+
+static int _pmcraid_io_done(struct pmcraid_cmd *cmd, int reslen, int ioasc)
+{
+ struct scsi_cmnd *scsi_cmd = cmd->scsi_cmd;
+ int rc = 0;
+
+ scsi_set_resid(scsi_cmd, reslen);
+
+ pmcraid_info("response(%d) CDB[0] = %x ioasc:result: %x:%x\n",
+ le32_to_cpu(cmd->ioa_cb->ioarcb.response_handle) >> 2,
+ cmd->ioa_cb->ioarcb.cdb[0],
+ ioasc, scsi_cmd->result);
+
+ if (PMCRAID_IOASC_SENSE_KEY(ioasc) != 0)
+ rc = pmcraid_error_handler(cmd);
+
+ if (rc == 0) {
+ scsi_dma_unmap(scsi_cmd);
+ scsi_cmd->scsi_done(scsi_cmd);
+ }
+
+ return rc;
+}
+
+/**
+ * pmcraid_io_done - SCSI completion function
+ *
+ * @cmd: pointer to pmcraid command struct
+ *
+ * This function is invoked by tasklet/mid-layer error handler to completing
+ * the SCSI ops sent from mid-layer.
+ *
+ * Return value
+ * none
+ */
+
+static void pmcraid_io_done(struct pmcraid_cmd *cmd)
+{
+ u32 ioasc = le32_to_cpu(cmd->ioa_cb->ioasa.ioasc);
+ u32 reslen = le32_to_cpu(cmd->ioa_cb->ioasa.residual_data_length);
+
+ if (_pmcraid_io_done(cmd, reslen, ioasc) == 0)
+ pmcraid_return_cmd(cmd);
+}
+
+/**
+ * pmcraid_abort_cmd - Aborts a single IOARCB already submitted to IOA
+ *
+ * @cmd: command block of the command to be aborted
+ *
+ * Return Value:
+ * returns pointer to command structure used as cancelling cmd
+ */
+static struct pmcraid_cmd *pmcraid_abort_cmd(struct pmcraid_cmd *cmd)
+{
+ struct pmcraid_cmd *cancel_cmd;
+ struct pmcraid_instance *pinstance;
+ struct pmcraid_resource_entry *res;
+
+ pinstance = (struct pmcraid_instance *)cmd->drv_inst;
+ res = cmd->scsi_cmd->device->hostdata;
+
+ cancel_cmd = pmcraid_get_free_cmd(pinstance);
+
+ if (cancel_cmd == NULL) {
+ pmcraid_err("%s: no cmd blocks are available\n", __func__);
+ return NULL;
+ }
+
+ pmcraid_prepare_cancel_cmd(cancel_cmd, cmd);
+
+ pmcraid_info("aborting command CDB[0]= %x with index = %d\n",
+ cmd->ioa_cb->ioarcb.cdb[0],
+ cmd->ioa_cb->ioarcb.response_handle >> 2);
+
+ init_completion(&cancel_cmd->wait_for_completion);
+ cancel_cmd->completion_req = 1;
+
+ pmcraid_info("command (%d) CDB[0] = %x for %x\n",
+ le32_to_cpu(cancel_cmd->ioa_cb->ioarcb.response_handle) >> 2,
+ cancel_cmd->ioa_cb->ioarcb.cdb[0],
+ le32_to_cpu(cancel_cmd->ioa_cb->ioarcb.resource_handle));
+
+ pmcraid_send_cmd(cancel_cmd,
+ pmcraid_internal_done,
+ PMCRAID_INTERNAL_TIMEOUT,
+ pmcraid_timeout_handler);
+ return cancel_cmd;
+}
+
+/**
+ * pmcraid_abort_complete - Waits for ABORT TASK completion
+ *
+ * @cancel_cmd: command block use as cancelling command
+ *
+ * Return Value:
+ * returns SUCCESS if ABORT TASK has good completion
+ * otherwise FAILED
+ */
+static int pmcraid_abort_complete(struct pmcraid_cmd *cancel_cmd)
+{
+ struct pmcraid_resource_entry *res;
+ u32 ioasc;
+
+ wait_for_completion(&cancel_cmd->wait_for_completion);
+ res = cancel_cmd->res;
+ cancel_cmd->res = NULL;
+ ioasc = le32_to_cpu(cancel_cmd->ioa_cb->ioasa.ioasc);
+
+ /* If the abort task is not timed out we will get a Good completion
+ * as sense_key, otherwise we may get one the following responses
+ * due to subsequent bus reset or device reset. In case IOASC is
+ * NR_SYNC_REQUIRED, set sync_reqd flag for the corresponding resource
+ */
+ if (ioasc == PMCRAID_IOASC_UA_BUS_WAS_RESET ||
+ ioasc == PMCRAID_IOASC_NR_SYNC_REQUIRED) {
+ if (ioasc == PMCRAID_IOASC_NR_SYNC_REQUIRED)
+ res->sync_reqd = 1;
+ ioasc = 0;
+ }
+
+ /* complete the command here itself */
+ pmcraid_return_cmd(cancel_cmd);
+ return PMCRAID_IOASC_SENSE_KEY(ioasc) ? FAILED : SUCCESS;
+}
+
+/**
+ * pmcraid_eh_abort_handler - entry point for aborting a single task on errors
+ *
+ * @scsi_cmd: scsi command struct given by mid-layer. When this is called
+ * mid-layer ensures that no other commands are queued. This
+ * never gets called under interrupt, but a separate eh thread.
+ *
+ * Return value:
+ * SUCCESS / FAILED
+ */
+static int pmcraid_eh_abort_handler(struct scsi_cmnd *scsi_cmd)
+{
+ struct pmcraid_instance *pinstance;
+ struct pmcraid_cmd *cmd;
+ struct pmcraid_resource_entry *res;
+ unsigned long host_lock_flags;
+ unsigned long pending_lock_flags;
+ struct pmcraid_cmd *cancel_cmd = NULL;
+ int cmd_found = 0;
+ int rc = FAILED;
+
+ pinstance =
+ (struct pmcraid_instance *)scsi_cmd->device->host->hostdata;
+
+ scmd_printk(KERN_INFO, scsi_cmd,
+ "I/O command timed out, aborting it.\n");
+
+ res = scsi_cmd->device->hostdata;
+
+ if (res == NULL)
+ return rc;
+
+ /* If we are currently going through reset/reload, return failed.
+ * This will force the mid-layer to eventually call
+ * pmcraid_eh_host_reset which will then go to sleep and wait for the
+ * reset to complete
+ */
+ spin_lock_irqsave(pinstance->host->host_lock, host_lock_flags);
+
+ if (pinstance->ioa_reset_in_progress ||
+ pinstance->ioa_state == IOA_STATE_DEAD) {
+ spin_unlock_irqrestore(pinstance->host->host_lock,
+ host_lock_flags);
+ return rc;
+ }
+
+ /* loop over pending cmd list to find cmd corresponding to this
+ * scsi_cmd. Note that this command might not have been completed
+ * already. locking: all pending commands are protected with
+ * pending_pool_lock.
+ */
+ spin_lock_irqsave(&pinstance->pending_pool_lock, pending_lock_flags);
+ list_for_each_entry(cmd, &pinstance->pending_cmd_pool, free_list) {
+
+ if (cmd->scsi_cmd == scsi_cmd) {
+ cmd_found = 1;
+ break;
+ }
+ }
+
+ spin_unlock_irqrestore(&pinstance->pending_pool_lock,
+ pending_lock_flags);
+
+ /* If the command to be aborted was given to IOA and still pending with
+ * it, send ABORT_TASK to abort this and wait for its completion
+ */
+ if (cmd_found)
+ cancel_cmd = pmcraid_abort_cmd(cmd);
+
+ spin_unlock_irqrestore(pinstance->host->host_lock,
+ host_lock_flags);
+
+ if (cancel_cmd) {
+ cancel_cmd->res = cmd->scsi_cmd->device->hostdata;
+ rc = pmcraid_abort_complete(cancel_cmd);
+ }
+
+ return cmd_found ? rc : SUCCESS;
+}
+
+/**
+ * pmcraid_eh_xxxx_reset_handler - bus/target/device reset handler callbacks
+ *
+ * @scmd: pointer to scsi_cmd that was sent to the resource to be reset.
+ *
+ * All these routines invokve pmcraid_reset_device with appropriate parameters.
+ * Since these are called from mid-layer EH thread, no other IO will be queued
+ * to the resource being reset. However, control path (IOCTL) may be active so
+ * it is necessary to synchronize IOARRIN writes which pmcraid_reset_device
+ * takes care by locking/unlocking host_lock.
+ *
+ * Return value
+ * SUCCESS or FAILED
+ */
+static int pmcraid_eh_device_reset_handler(struct scsi_cmnd *scmd)
+{
+ scmd_printk(KERN_INFO, scmd,
+ "resetting device due to an I/O command timeout.\n");
+ return pmcraid_reset_device(scmd,
+ PMCRAID_INTERNAL_TIMEOUT,
+ RESET_DEVICE_LUN);
+}
+
+static int pmcraid_eh_bus_reset_handler(struct scsi_cmnd *scmd)
+{
+ scmd_printk(KERN_INFO, scmd,
+ "Doing bus reset due to an I/O command timeout.\n");
+ return pmcraid_reset_device(scmd,
+ PMCRAID_RESET_BUS_TIMEOUT,
+ RESET_DEVICE_BUS);
+}
+
+static int pmcraid_eh_target_reset_handler(struct scsi_cmnd *scmd)
+{
+ scmd_printk(KERN_INFO, scmd,
+ "Doing target reset due to an I/O command timeout.\n");
+ return pmcraid_reset_device(scmd,
+ PMCRAID_INTERNAL_TIMEOUT,
+ RESET_DEVICE_TARGET);
+}
+
+/**
+ * pmcraid_eh_host_reset_handler - adapter reset handler callback
+ *
+ * @scmd: pointer to scsi_cmd that was sent to a resource of adapter
+ *
+ * Initiates adapter reset to bring it up to operational state
+ *
+ * Return value
+ * SUCCESS or FAILED
+ */
+static int pmcraid_eh_host_reset_handler(struct scsi_cmnd *scmd)
+{
+ unsigned long interval = 10000; /* 10 seconds interval */
+ int waits = jiffies_to_msecs(PMCRAID_RESET_HOST_TIMEOUT) / interval;
+ struct pmcraid_instance *pinstance =
+ (struct pmcraid_instance *)(scmd->device->host->hostdata);
+
+
+ /* wait for an additional 150 seconds just in case firmware could come
+ * up and if it could complete all the pending commands excluding the
+ * two HCAM (CCN and LDN).
+ */
+ while (waits--) {
+ if (atomic_read(&pinstance->outstanding_cmds) <=
+ PMCRAID_MAX_HCAM_CMD)
+ return SUCCESS;
+ msleep(interval);
+ }
+
+ dev_err(&pinstance->pdev->dev,
+ "Adapter being reset due to an I/O command timeout.\n");
+ return pmcraid_reset_bringup(pinstance) == 0 ? SUCCESS : FAILED;
+}
+
+/**
+ * pmcraid_init_ioadls - initializes IOADL related fields in IOARCB
+ * @cmd: pmcraid command struct
+ * @sgcount: count of scatter-gather elements
+ *
+ * Return value
+ * returns pointer pmcraid_ioadl_desc, initialized to point to internal
+ * or external IOADLs
+ */
+struct pmcraid_ioadl_desc *
+pmcraid_init_ioadls(struct pmcraid_cmd *cmd, int sgcount)
+{
+ struct pmcraid_ioadl_desc *ioadl;
+ struct pmcraid_ioarcb *ioarcb = &cmd->ioa_cb->ioarcb;
+ int ioadl_count = 0;
+
+ if (ioarcb->add_cmd_param_length)
+ ioadl_count = DIV_ROUND_UP(ioarcb->add_cmd_param_length, 16);
+ ioarcb->ioadl_length =
+ sizeof(struct pmcraid_ioadl_desc) * sgcount;
+
+ if ((sgcount + ioadl_count) > (ARRAY_SIZE(ioarcb->add_data.u.ioadl))) {
+ /* external ioadls start at offset 0x80 from control_block
+ * structure, re-using 24 out of 27 ioadls part of IOARCB.
+ * It is necessary to indicate to firmware that driver is
+ * using ioadls to be treated as external to IOARCB.
+ */
+ ioarcb->ioarcb_bus_addr &= ~(0x1FULL);
+ ioarcb->ioadl_bus_addr =
+ cpu_to_le64((cmd->ioa_cb_bus_addr) +
+ offsetof(struct pmcraid_ioarcb,
+ add_data.u.ioadl[3]));
+ ioadl = &ioarcb->add_data.u.ioadl[3];
+ } else {
+ ioarcb->ioadl_bus_addr =
+ cpu_to_le64((cmd->ioa_cb_bus_addr) +
+ offsetof(struct pmcraid_ioarcb,
+ add_data.u.ioadl[ioadl_count]));
+
+ ioadl = &ioarcb->add_data.u.ioadl[ioadl_count];
+ ioarcb->ioarcb_bus_addr |=
+ DIV_ROUND_CLOSEST(sgcount + ioadl_count, 8);
+ }
+
+ return ioadl;
+}
+
+/**
+ * pmcraid_build_ioadl - Build a scatter/gather list and map the buffer
+ * @pinstance: pointer to adapter instance structure
+ * @cmd: pmcraid command struct
+ *
+ * This function is invoked by queuecommand entry point while sending a command
+ * to firmware. This builds ioadl descriptors and sets up ioarcb fields.
+ *
+ * Return value:
+ * 0 on success or -1 on failure
+ */
+static int pmcraid_build_ioadl(
+ struct pmcraid_instance *pinstance,
+ struct pmcraid_cmd *cmd
+)
+{
+ int i, nseg;
+ struct scatterlist *sglist;
+
+ struct scsi_cmnd *scsi_cmd = cmd->scsi_cmd;
+ struct pmcraid_ioarcb *ioarcb = &(cmd->ioa_cb->ioarcb);
+ struct pmcraid_ioadl_desc *ioadl = ioarcb->add_data.u.ioadl;
+
+ u32 length = scsi_bufflen(scsi_cmd);
+
+ if (!length)
+ return 0;
+
+ nseg = scsi_dma_map(scsi_cmd);
+
+ if (nseg < 0) {
+ scmd_printk(KERN_ERR, scsi_cmd, "scsi_map_dma failed!\n");
+ return -1;
+ } else if (nseg > PMCRAID_MAX_IOADLS) {
+ scsi_dma_unmap(scsi_cmd);
+ scmd_printk(KERN_ERR, scsi_cmd,
+ "sg count is (%d) more than allowed!\n", nseg);
+ return -1;
+ }
+
+ /* Initialize IOARCB data transfer length fields */
+ if (scsi_cmd->sc_data_direction == DMA_TO_DEVICE)
+ ioarcb->request_flags0 |= TRANSFER_DIR_WRITE;
+
+ ioarcb->request_flags0 |= NO_LINK_DESCS;
+ ioarcb->data_transfer_length = cpu_to_le32(length);
+ ioadl = pmcraid_init_ioadls(cmd, nseg);
+
+ /* Initialize IOADL descriptor addresses */
+ scsi_for_each_sg(scsi_cmd, sglist, nseg, i) {
+ ioadl[i].data_len = cpu_to_le32(sg_dma_len(sglist));
+ ioadl[i].address = cpu_to_le64(sg_dma_address(sglist));
+ ioadl[i].flags = 0;
+ }
+ /* setup last descriptor */
+ ioadl[i - 1].flags = IOADL_FLAGS_LAST_DESC;
+
+ return 0;
+}
+
+/**
+ * pmcraid_free_sglist - Frees an allocated SG buffer list
+ * @sglist: scatter/gather list pointer
+ *
+ * Free a DMA'able memory previously allocated with pmcraid_alloc_sglist
+ *
+ * Return value:
+ * none
+ */
+static void pmcraid_free_sglist(struct pmcraid_sglist *sglist)
+{
+ int i;
+
+ for (i = 0; i < sglist->num_sg; i++)
+ __free_pages(sg_page(&(sglist->scatterlist[i])),
+ sglist->order);
+
+ kfree(sglist);
+}
+
+/**
+ * pmcraid_alloc_sglist - Allocates memory for a SG list
+ * @buflen: buffer length
+ *
+ * Allocates a DMA'able buffer in chunks and assembles a scatter/gather
+ * list.
+ *
+ * Return value
+ * pointer to sglist / NULL on failure
+ */
+static struct pmcraid_sglist *pmcraid_alloc_sglist(int buflen)
+{
+ struct pmcraid_sglist *sglist;
+ struct scatterlist *scatterlist;
+ struct page *page;
+ int num_elem, i, j;
+ int sg_size;
+ int order;
+ int bsize_elem;
+
+ sg_size = buflen / (PMCRAID_MAX_IOADLS - 1);
+ order = (sg_size > 0) ? get_order(sg_size) : 0;
+ bsize_elem = PAGE_SIZE * (1 << order);
+
+ /* Determine the actual number of sg entries needed */
+ if (buflen % bsize_elem)
+ num_elem = (buflen / bsize_elem) + 1;
+ else
+ num_elem = buflen / bsize_elem;
+
+ /* Allocate a scatter/gather list for the DMA */
+ sglist = kzalloc(sizeof(struct pmcraid_sglist) +
+ (sizeof(struct scatterlist) * (num_elem - 1)),
+ GFP_KERNEL);
+
+ if (sglist == NULL)
+ return NULL;
+
+ scatterlist = sglist->scatterlist;
+ sg_init_table(scatterlist, num_elem);
+ sglist->order = order;
+ sglist->num_sg = num_elem;
+ sg_size = buflen;
+
+ for (i = 0; i < num_elem; i++) {
+ page = alloc_pages(GFP_KERNEL|GFP_DMA|__GFP_ZERO, order);
+ if (!page) {
+ for (j = i - 1; j >= 0; j--)
+ __free_pages(sg_page(&scatterlist[j]), order);
+ kfree(sglist);
+ return NULL;
+ }
+
+ sg_set_page(&scatterlist[i], page,
+ sg_size < bsize_elem ? sg_size : bsize_elem, 0);
+ sg_size -= bsize_elem;
+ }
+
+ return sglist;
+}
+
+/**
+ * pmcraid_copy_sglist - Copy user buffer to kernel buffer's SG list
+ * @sglist: scatter/gather list pointer
+ * @buffer: buffer pointer
+ * @len: buffer length
+ * @direction: data transfer direction
+ *
+ * Copy a user buffer into a buffer allocated by pmcraid_alloc_sglist
+ *
+ * Return value:
+ * 0 on success / other on failure
+ */
+static int pmcraid_copy_sglist(
+ struct pmcraid_sglist *sglist,
+ unsigned long buffer,
+ u32 len,
+ int direction
+)
+{
+ struct scatterlist *scatterlist;
+ void *kaddr;
+ int bsize_elem;
+ int i;
+ int rc = 0;
+
+ /* Determine the actual number of bytes per element */
+ bsize_elem = PAGE_SIZE * (1 << sglist->order);
+
+ scatterlist = sglist->scatterlist;
+
+ for (i = 0; i < (len / bsize_elem); i++, buffer += bsize_elem) {
+ struct page *page = sg_page(&scatterlist[i]);
+
+ kaddr = kmap(page);
+ if (direction == DMA_TO_DEVICE)
+ rc = __copy_from_user(kaddr,
+ (void *)buffer,
+ bsize_elem);
+ else
+ rc = __copy_to_user((void *)buffer, kaddr, bsize_elem);
+
+ kunmap(page);
+
+ if (rc) {
+ pmcraid_err("failed to copy user data into sg list\n");
+ return -EFAULT;
+ }
+
+ scatterlist[i].length = bsize_elem;
+ }
+
+ if (len % bsize_elem) {
+ struct page *page = sg_page(&scatterlist[i]);
+
+ kaddr = kmap(page);
+
+ if (direction == DMA_TO_DEVICE)
+ rc = __copy_from_user(kaddr,
+ (void *)buffer,
+ len % bsize_elem);
+ else
+ rc = __copy_to_user((void *)buffer,
+ kaddr,
+ len % bsize_elem);
+
+ kunmap(page);
+
+ scatterlist[i].length = len % bsize_elem;
+ }
+
+ if (rc) {
+ pmcraid_err("failed to copy user data into sg list\n");
+ rc = -EFAULT;
+ }
+
+ return rc;
+}
+
+/**
+ * pmcraid_queuecommand - Queue a mid-layer request
+ * @scsi_cmd: scsi command struct
+ * @done: done function
+ *
+ * This function queues a request generated by the mid-layer. Midlayer calls
+ * this routine within host->lock. Some of the functions called by queuecommand
+ * would use cmd block queue locks (free_pool_lock and pending_pool_lock)
+ *
+ * Return value:
+ * 0 on success
+ * SCSI_MLQUEUE_DEVICE_BUSY if device is busy
+ * SCSI_MLQUEUE_HOST_BUSY if host is busy
+ */
+static int pmcraid_queuecommand_lck(
+ struct scsi_cmnd *scsi_cmd,
+ void (*done) (struct scsi_cmnd *)
+)
+{
+ struct pmcraid_instance *pinstance;
+ struct pmcraid_resource_entry *res;
+ struct pmcraid_ioarcb *ioarcb;
+ struct pmcraid_cmd *cmd;
+ u32 fw_version;
+ int rc = 0;
+
+ pinstance =
+ (struct pmcraid_instance *)scsi_cmd->device->host->hostdata;
+ fw_version = be16_to_cpu(pinstance->inq_data->fw_version);
+ scsi_cmd->scsi_done = done;
+ res = scsi_cmd->device->hostdata;
+ scsi_cmd->result = (DID_OK << 16);
+
+ /* if adapter is marked as dead, set result to DID_NO_CONNECT complete
+ * the command
+ */
+ if (pinstance->ioa_state == IOA_STATE_DEAD) {
+ pmcraid_info("IOA is dead, but queuecommand is scheduled\n");
+ scsi_cmd->result = (DID_NO_CONNECT << 16);
+ scsi_cmd->scsi_done(scsi_cmd);
+ return 0;
+ }
+
+ /* If IOA reset is in progress, can't queue the commands */
+ if (pinstance->ioa_reset_in_progress)
+ return SCSI_MLQUEUE_HOST_BUSY;
+
+ /* Firmware doesn't support SYNCHRONIZE_CACHE command (0x35), complete
+ * the command here itself with success return
+ */
+ if (scsi_cmd->cmnd[0] == SYNCHRONIZE_CACHE) {
+ pmcraid_info("SYNC_CACHE(0x35), completing in driver itself\n");
+ scsi_cmd->scsi_done(scsi_cmd);
+ return 0;
+ }
+
+ /* initialize the command and IOARCB to be sent to IOA */
+ cmd = pmcraid_get_free_cmd(pinstance);
+
+ if (cmd == NULL) {
+ pmcraid_err("free command block is not available\n");
+ return SCSI_MLQUEUE_HOST_BUSY;
+ }
+
+ cmd->scsi_cmd = scsi_cmd;
+ ioarcb = &(cmd->ioa_cb->ioarcb);
+ memcpy(ioarcb->cdb, scsi_cmd->cmnd, scsi_cmd->cmd_len);
+ ioarcb->resource_handle = res->cfg_entry.resource_handle;
+ ioarcb->request_type = REQ_TYPE_SCSI;
+
+ /* set hrrq number where the IOA should respond to. Note that all cmds
+ * generated internally uses hrrq_id 0, exception to this is the cmd
+ * block of scsi_cmd which is re-used (e.g. cancel/abort), which uses
+ * hrrq_id assigned here in queuecommand
+ */
+ ioarcb->hrrq_id = atomic_add_return(1, &(pinstance->last_message_id)) %
+ pinstance->num_hrrq;
+ cmd->cmd_done = pmcraid_io_done;
+
+ if (RES_IS_GSCSI(res->cfg_entry) || RES_IS_VSET(res->cfg_entry)) {
+ if (scsi_cmd->underflow == 0)
+ ioarcb->request_flags0 |= INHIBIT_UL_CHECK;
+
+ if (res->sync_reqd) {
+ ioarcb->request_flags0 |= SYNC_COMPLETE;
+ res->sync_reqd = 0;
+ }
+
+ ioarcb->request_flags0 |= NO_LINK_DESCS;
+
+ if (scsi_cmd->flags & SCMD_TAGGED)
+ ioarcb->request_flags1 |= TASK_TAG_SIMPLE;
+
+ if (RES_IS_GSCSI(res->cfg_entry))
+ ioarcb->request_flags1 |= DELAY_AFTER_RESET;
+ }
+
+ rc = pmcraid_build_ioadl(pinstance, cmd);
+
+ pmcraid_info("command (%d) CDB[0] = %x for %x:%x:%x:%x\n",
+ le32_to_cpu(ioarcb->response_handle) >> 2,
+ scsi_cmd->cmnd[0], pinstance->host->unique_id,
+ RES_IS_VSET(res->cfg_entry) ? PMCRAID_VSET_BUS_ID :
+ PMCRAID_PHYS_BUS_ID,
+ RES_IS_VSET(res->cfg_entry) ?
+ (fw_version <= PMCRAID_FW_VERSION_1 ?
+ res->cfg_entry.unique_flags1 :
+ res->cfg_entry.array_id & 0xFF) :
+ RES_TARGET(res->cfg_entry.resource_address),
+ RES_LUN(res->cfg_entry.resource_address));
+
+ if (likely(rc == 0)) {
+ _pmcraid_fire_command(cmd);
+ } else {
+ pmcraid_err("queuecommand could not build ioadl\n");
+ pmcraid_return_cmd(cmd);
+ rc = SCSI_MLQUEUE_HOST_BUSY;
+ }
+
+ return rc;
+}
+
+static DEF_SCSI_QCMD(pmcraid_queuecommand)
+
+/**
+ * pmcraid_open -char node "open" entry, allowed only users with admin access
+ */
+static int pmcraid_chr_open(struct inode *inode, struct file *filep)
+{
+ struct pmcraid_instance *pinstance;
+
+ if (!capable(CAP_SYS_ADMIN))
+ return -EACCES;
+
+ /* Populate adapter instance * pointer for use by ioctl */
+ pinstance = container_of(inode->i_cdev, struct pmcraid_instance, cdev);
+ filep->private_data = pinstance;
+
+ return 0;
+}
+
+/**
+ * pmcraid_fasync - Async notifier registration from applications
+ *
+ * This function adds the calling process to a driver global queue. When an
+ * event occurs, SIGIO will be sent to all processes in this queue.
+ */
+static int pmcraid_chr_fasync(int fd, struct file *filep, int mode)
+{
+ struct pmcraid_instance *pinstance;
+ int rc;
+
+ pinstance = filep->private_data;
+ mutex_lock(&pinstance->aen_queue_lock);
+ rc = fasync_helper(fd, filep, mode, &pinstance->aen_queue);
+ mutex_unlock(&pinstance->aen_queue_lock);
+
+ return rc;
+}
+
+
+/**
+ * pmcraid_build_passthrough_ioadls - builds SG elements for passthrough
+ * commands sent over IOCTL interface
+ *
+ * @cmd : pointer to struct pmcraid_cmd
+ * @buflen : length of the request buffer
+ * @direction : data transfer direction
+ *
+ * Return value
+ * 0 on success, non-zero error code on failure
+ */
+static int pmcraid_build_passthrough_ioadls(
+ struct pmcraid_cmd *cmd,
+ int buflen,
+ int direction
+)
+{
+ struct pmcraid_sglist *sglist = NULL;
+ struct scatterlist *sg = NULL;
+ struct pmcraid_ioarcb *ioarcb = &cmd->ioa_cb->ioarcb;
+ struct pmcraid_ioadl_desc *ioadl;
+ int i;
+
+ sglist = pmcraid_alloc_sglist(buflen);
+
+ if (!sglist) {
+ pmcraid_err("can't allocate memory for passthrough SGls\n");
+ return -ENOMEM;
+ }
+
+ sglist->num_dma_sg = pci_map_sg(cmd->drv_inst->pdev,
+ sglist->scatterlist,
+ sglist->num_sg, direction);
+
+ if (!sglist->num_dma_sg || sglist->num_dma_sg > PMCRAID_MAX_IOADLS) {
+ dev_err(&cmd->drv_inst->pdev->dev,
+ "Failed to map passthrough buffer!\n");
+ pmcraid_free_sglist(sglist);
+ return -EIO;
+ }
+
+ cmd->sglist = sglist;
+ ioarcb->request_flags0 |= NO_LINK_DESCS;
+
+ ioadl = pmcraid_init_ioadls(cmd, sglist->num_dma_sg);
+
+ /* Initialize IOADL descriptor addresses */
+ for_each_sg(sglist->scatterlist, sg, sglist->num_dma_sg, i) {
+ ioadl[i].data_len = cpu_to_le32(sg_dma_len(sg));
+ ioadl[i].address = cpu_to_le64(sg_dma_address(sg));
+ ioadl[i].flags = 0;
+ }
+
+ /* setup the last descriptor */
+ ioadl[i - 1].flags = IOADL_FLAGS_LAST_DESC;
+
+ return 0;
+}
+
+
+/**
+ * pmcraid_release_passthrough_ioadls - release passthrough ioadls
+ *
+ * @cmd: pointer to struct pmcraid_cmd for which ioadls were allocated
+ * @buflen: size of the request buffer
+ * @direction: data transfer direction
+ *
+ * Return value
+ * 0 on success, non-zero error code on failure
+ */
+static void pmcraid_release_passthrough_ioadls(
+ struct pmcraid_cmd *cmd,
+ int buflen,
+ int direction
+)
+{
+ struct pmcraid_sglist *sglist = cmd->sglist;
+
+ if (buflen > 0) {
+ pci_unmap_sg(cmd->drv_inst->pdev,
+ sglist->scatterlist,
+ sglist->num_sg,
+ direction);
+ pmcraid_free_sglist(sglist);
+ cmd->sglist = NULL;
+ }
+}
+
+/**
+ * pmcraid_ioctl_passthrough - handling passthrough IOCTL commands
+ *
+ * @pinstance: pointer to adapter instance structure
+ * @cmd: ioctl code
+ * @arg: pointer to pmcraid_passthrough_buffer user buffer
+ *
+ * Return value
+ * 0 on success, non-zero error code on failure
+ */
+static long pmcraid_ioctl_passthrough(
+ struct pmcraid_instance *pinstance,
+ unsigned int ioctl_cmd,
+ unsigned int buflen,
+ unsigned long arg
+)
+{
+ struct pmcraid_passthrough_ioctl_buffer *buffer;
+ struct pmcraid_ioarcb *ioarcb;
+ struct pmcraid_cmd *cmd;
+ struct pmcraid_cmd *cancel_cmd;
+ unsigned long request_buffer;
+ unsigned long request_offset;
+ unsigned long lock_flags;
+ void *ioasa;
+ u32 ioasc;
+ int request_size;
+ int buffer_size;
+ u8 access, direction;
+ int rc = 0;
+
+ /* If IOA reset is in progress, wait 10 secs for reset to complete */
+ if (pinstance->ioa_reset_in_progress) {
+ rc = wait_event_interruptible_timeout(
+ pinstance->reset_wait_q,
+ !pinstance->ioa_reset_in_progress,
+ msecs_to_jiffies(10000));
+
+ if (!rc)
+ return -ETIMEDOUT;
+ else if (rc < 0)
+ return -ERESTARTSYS;
+ }
+
+ /* If adapter is not in operational state, return error */
+ if (pinstance->ioa_state != IOA_STATE_OPERATIONAL) {
+ pmcraid_err("IOA is not operational\n");
+ return -ENOTTY;
+ }
+
+ buffer_size = sizeof(struct pmcraid_passthrough_ioctl_buffer);
+ buffer = kmalloc(buffer_size, GFP_KERNEL);
+
+ if (!buffer) {
+ pmcraid_err("no memory for passthrough buffer\n");
+ return -ENOMEM;
+ }
+
+ request_offset =
+ offsetof(struct pmcraid_passthrough_ioctl_buffer, request_buffer);
+
+ request_buffer = arg + request_offset;
+
+ rc = __copy_from_user(buffer,
+ (struct pmcraid_passthrough_ioctl_buffer *) arg,
+ sizeof(struct pmcraid_passthrough_ioctl_buffer));
+
+ ioasa =
+ (void *)(arg +
+ offsetof(struct pmcraid_passthrough_ioctl_buffer, ioasa));
+
+ if (rc) {
+ pmcraid_err("ioctl: can't copy passthrough buffer\n");
+ rc = -EFAULT;
+ goto out_free_buffer;
+ }
+
+ request_size = buffer->ioarcb.data_transfer_length;
+
+ if (buffer->ioarcb.request_flags0 & TRANSFER_DIR_WRITE) {
+ access = VERIFY_READ;
+ direction = DMA_TO_DEVICE;
+ } else {
+ access = VERIFY_WRITE;
+ direction = DMA_FROM_DEVICE;
+ }
+
+ if (request_size > 0) {
+ rc = access_ok(access, arg, request_offset + request_size);
+
+ if (!rc) {
+ rc = -EFAULT;
+ goto out_free_buffer;
+ }
+ } else if (request_size < 0) {
+ rc = -EINVAL;
+ goto out_free_buffer;
+ }
+
+ /* check if we have any additional command parameters */
+ if (buffer->ioarcb.add_cmd_param_length > PMCRAID_ADD_CMD_PARAM_LEN) {
+ rc = -EINVAL;
+ goto out_free_buffer;
+ }
+
+ cmd = pmcraid_get_free_cmd(pinstance);
+
+ if (!cmd) {
+ pmcraid_err("free command block is not available\n");
+ rc = -ENOMEM;
+ goto out_free_buffer;
+ }
+
+ cmd->scsi_cmd = NULL;
+ ioarcb = &(cmd->ioa_cb->ioarcb);
+
+ /* Copy the user-provided IOARCB stuff field by field */
+ ioarcb->resource_handle = buffer->ioarcb.resource_handle;
+ ioarcb->data_transfer_length = buffer->ioarcb.data_transfer_length;
+ ioarcb->cmd_timeout = buffer->ioarcb.cmd_timeout;
+ ioarcb->request_type = buffer->ioarcb.request_type;
+ ioarcb->request_flags0 = buffer->ioarcb.request_flags0;
+ ioarcb->request_flags1 = buffer->ioarcb.request_flags1;
+ memcpy(ioarcb->cdb, buffer->ioarcb.cdb, PMCRAID_MAX_CDB_LEN);
+
+ if (buffer->ioarcb.add_cmd_param_length) {
+ ioarcb->add_cmd_param_length =
+ buffer->ioarcb.add_cmd_param_length;
+ ioarcb->add_cmd_param_offset =
+ buffer->ioarcb.add_cmd_param_offset;
+ memcpy(ioarcb->add_data.u.add_cmd_params,
+ buffer->ioarcb.add_data.u.add_cmd_params,
+ buffer->ioarcb.add_cmd_param_length);
+ }
+
+ /* set hrrq number where the IOA should respond to. Note that all cmds
+ * generated internally uses hrrq_id 0, exception to this is the cmd
+ * block of scsi_cmd which is re-used (e.g. cancel/abort), which uses
+ * hrrq_id assigned here in queuecommand
+ */
+ ioarcb->hrrq_id = atomic_add_return(1, &(pinstance->last_message_id)) %
+ pinstance->num_hrrq;
+
+ if (request_size) {
+ rc = pmcraid_build_passthrough_ioadls(cmd,
+ request_size,
+ direction);
+ if (rc) {
+ pmcraid_err("couldn't build passthrough ioadls\n");
+ goto out_free_buffer;
+ }
+ } else if (request_size < 0) {
+ rc = -EINVAL;
+ goto out_free_buffer;
+ }
+
+ /* If data is being written into the device, copy the data from user
+ * buffers
+ */
+ if (direction == DMA_TO_DEVICE && request_size > 0) {
+ rc = pmcraid_copy_sglist(cmd->sglist,
+ request_buffer,
+ request_size,
+ direction);
+ if (rc) {
+ pmcraid_err("failed to copy user buffer\n");
+ goto out_free_sglist;
+ }
+ }
+
+ /* passthrough ioctl is a blocking command so, put the user to sleep
+ * until timeout. Note that a timeout value of 0 means, do timeout.
+ */
+ cmd->cmd_done = pmcraid_internal_done;
+ init_completion(&cmd->wait_for_completion);
+ cmd->completion_req = 1;
+
+ pmcraid_info("command(%d) (CDB[0] = %x) for %x\n",
+ le32_to_cpu(cmd->ioa_cb->ioarcb.response_handle) >> 2,
+ cmd->ioa_cb->ioarcb.cdb[0],
+ le32_to_cpu(cmd->ioa_cb->ioarcb.resource_handle));
+
+ spin_lock_irqsave(pinstance->host->host_lock, lock_flags);
+ _pmcraid_fire_command(cmd);
+ spin_unlock_irqrestore(pinstance->host->host_lock, lock_flags);
+
+ /* NOTE ! Remove the below line once abort_task is implemented
+ * in firmware. This line disables ioctl command timeout handling logic
+ * similar to IO command timeout handling, making ioctl commands to wait
+ * until the command completion regardless of timeout value specified in
+ * ioarcb
+ */
+ buffer->ioarcb.cmd_timeout = 0;
+
+ /* If command timeout is specified put caller to wait till that time,
+ * otherwise it would be blocking wait. If command gets timed out, it
+ * will be aborted.
+ */
+ if (buffer->ioarcb.cmd_timeout == 0) {
+ wait_for_completion(&cmd->wait_for_completion);
+ } else if (!wait_for_completion_timeout(
+ &cmd->wait_for_completion,
+ msecs_to_jiffies(buffer->ioarcb.cmd_timeout * 1000))) {
+
+ pmcraid_info("aborting cmd %d (CDB[0] = %x) due to timeout\n",
+ le32_to_cpu(cmd->ioa_cb->ioarcb.response_handle >> 2),
+ cmd->ioa_cb->ioarcb.cdb[0]);
+
+ spin_lock_irqsave(pinstance->host->host_lock, lock_flags);
+ cancel_cmd = pmcraid_abort_cmd(cmd);
+ spin_unlock_irqrestore(pinstance->host->host_lock, lock_flags);
+
+ if (cancel_cmd) {
+ wait_for_completion(&cancel_cmd->wait_for_completion);
+ ioasc = cancel_cmd->ioa_cb->ioasa.ioasc;
+ pmcraid_return_cmd(cancel_cmd);
+
+ /* if abort task couldn't find the command i.e it got
+ * completed prior to aborting, return good completion.
+ * if command got aborted successfully or there was IOA
+ * reset due to abort task itself getting timedout then
+ * return -ETIMEDOUT
+ */
+ if (ioasc == PMCRAID_IOASC_IOA_WAS_RESET ||
+ PMCRAID_IOASC_SENSE_KEY(ioasc) == 0x00) {
+ if (ioasc != PMCRAID_IOASC_GC_IOARCB_NOTFOUND)
+ rc = -ETIMEDOUT;
+ goto out_handle_response;
+ }
+ }
+
+ /* no command block for abort task or abort task failed to abort
+ * the IOARCB, then wait for 150 more seconds and initiate reset
+ * sequence after timeout
+ */
+ if (!wait_for_completion_timeout(
+ &cmd->wait_for_completion,
+ msecs_to_jiffies(150 * 1000))) {
+ pmcraid_reset_bringup(cmd->drv_inst);
+ rc = -ETIMEDOUT;
+ }
+ }
+
+out_handle_response:
+ /* copy entire IOASA buffer and return IOCTL success.
+ * If copying IOASA to user-buffer fails, return
+ * EFAULT
+ */
+ if (copy_to_user(ioasa, &cmd->ioa_cb->ioasa,
+ sizeof(struct pmcraid_ioasa))) {
+ pmcraid_err("failed to copy ioasa buffer to user\n");
+ rc = -EFAULT;
+ }
+
+ /* If the data transfer was from device, copy the data onto user
+ * buffers
+ */
+ else if (direction == DMA_FROM_DEVICE && request_size > 0) {
+ rc = pmcraid_copy_sglist(cmd->sglist,
+ request_buffer,
+ request_size,
+ direction);
+ if (rc) {
+ pmcraid_err("failed to copy user buffer\n");
+ rc = -EFAULT;
+ }
+ }
+
+out_free_sglist:
+ pmcraid_release_passthrough_ioadls(cmd, request_size, direction);
+ pmcraid_return_cmd(cmd);
+
+out_free_buffer:
+ kfree(buffer);
+
+ return rc;
+}
+
+
+
+
+/**
+ * pmcraid_ioctl_driver - ioctl handler for commands handled by driver itself
+ *
+ * @pinstance: pointer to adapter instance structure
+ * @cmd: ioctl command passed in
+ * @buflen: length of user_buffer
+ * @user_buffer: user buffer pointer
+ *
+ * Return Value
+ * 0 in case of success, otherwise appropriate error code
+ */
+static long pmcraid_ioctl_driver(
+ struct pmcraid_instance *pinstance,
+ unsigned int cmd,
+ unsigned int buflen,
+ void __user *user_buffer
+)
+{
+ int rc = -ENOSYS;
+
+ if (!access_ok(VERIFY_READ, user_buffer, _IOC_SIZE(cmd))) {
+ pmcraid_err("ioctl_driver: access fault in request buffer\n");
+ return -EFAULT;
+ }
+
+ switch (cmd) {
+ case PMCRAID_IOCTL_RESET_ADAPTER:
+ pmcraid_reset_bringup(pinstance);
+ rc = 0;
+ break;
+
+ default:
+ break;
+ }
+
+ return rc;
+}
+
+/**
+ * pmcraid_check_ioctl_buffer - check for proper access to user buffer
+ *
+ * @cmd: ioctl command
+ * @arg: user buffer
+ * @hdr: pointer to kernel memory for pmcraid_ioctl_header
+ *
+ * Return Value
+ * negetive error code if there are access issues, otherwise zero.
+ * Upon success, returns ioctl header copied out of user buffer.
+ */
+
+static int pmcraid_check_ioctl_buffer(
+ int cmd,
+ void __user *arg,
+ struct pmcraid_ioctl_header *hdr
+)
+{
+ int rc = 0;
+ int access = VERIFY_READ;
+
+ if (copy_from_user(hdr, arg, sizeof(struct pmcraid_ioctl_header))) {
+ pmcraid_err("couldn't copy ioctl header from user buffer\n");
+ return -EFAULT;
+ }
+
+ /* check for valid driver signature */
+ rc = memcmp(hdr->signature,
+ PMCRAID_IOCTL_SIGNATURE,
+ sizeof(hdr->signature));
+ if (rc) {
+ pmcraid_err("signature verification failed\n");
+ return -EINVAL;
+ }
+
+ /* check for appropriate buffer access */
+ if ((_IOC_DIR(cmd) & _IOC_READ) == _IOC_READ)
+ access = VERIFY_WRITE;
+
+ rc = access_ok(access,
+ (arg + sizeof(struct pmcraid_ioctl_header)),
+ hdr->buffer_length);
+ if (!rc) {
+ pmcraid_err("access failed for user buffer of size %d\n",
+ hdr->buffer_length);
+ return -EFAULT;
+ }
+
+ return 0;
+}
+
+/**
+ * pmcraid_ioctl - char node ioctl entry point
+ */
+static long pmcraid_chr_ioctl(
+ struct file *filep,
+ unsigned int cmd,
+ unsigned long arg
+)
+{
+ struct pmcraid_instance *pinstance = NULL;
+ struct pmcraid_ioctl_header *hdr = NULL;
+ int retval = -ENOTTY;
+
+ hdr = kmalloc(sizeof(struct pmcraid_ioctl_header), GFP_KERNEL);
+
+ if (!hdr) {
+ pmcraid_err("failed to allocate memory for ioctl header\n");
+ return -ENOMEM;
+ }
+
+ retval = pmcraid_check_ioctl_buffer(cmd, (void *)arg, hdr);
+
+ if (retval) {
+ pmcraid_info("chr_ioctl: header check failed\n");
+ kfree(hdr);
+ return retval;
+ }
+
+ pinstance = filep->private_data;
+
+ if (!pinstance) {
+ pmcraid_info("adapter instance is not found\n");
+ kfree(hdr);
+ return -ENOTTY;
+ }
+
+ switch (_IOC_TYPE(cmd)) {
+
+ case PMCRAID_PASSTHROUGH_IOCTL:
+ /* If ioctl code is to download microcode, we need to block
+ * mid-layer requests.
+ */
+ if (cmd == PMCRAID_IOCTL_DOWNLOAD_MICROCODE)
+ scsi_block_requests(pinstance->host);
+
+ retval = pmcraid_ioctl_passthrough(pinstance,
+ cmd,
+ hdr->buffer_length,
+ arg);
+
+ if (cmd == PMCRAID_IOCTL_DOWNLOAD_MICROCODE)
+ scsi_unblock_requests(pinstance->host);
+ break;
+
+ case PMCRAID_DRIVER_IOCTL:
+ arg += sizeof(struct pmcraid_ioctl_header);
+ retval = pmcraid_ioctl_driver(pinstance,
+ cmd,
+ hdr->buffer_length,
+ (void __user *)arg);
+ break;
+
+ default:
+ retval = -ENOTTY;
+ break;
+ }
+
+ kfree(hdr);
+
+ return retval;
+}
+
+/**
+ * File operations structure for management interface
+ */
+static const struct file_operations pmcraid_fops = {
+ .owner = THIS_MODULE,
+ .open = pmcraid_chr_open,
+ .fasync = pmcraid_chr_fasync,
+ .unlocked_ioctl = pmcraid_chr_ioctl,
+#ifdef CONFIG_COMPAT
+ .compat_ioctl = pmcraid_chr_ioctl,
+#endif
+ .llseek = noop_llseek,
+};
+
+
+
+
+/**
+ * pmcraid_show_log_level - Display adapter's error logging level
+ * @dev: class device struct
+ * @buf: buffer
+ *
+ * Return value:
+ * number of bytes printed to buffer
+ */
+static ssize_t pmcraid_show_log_level(
+ struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct Scsi_Host *shost = class_to_shost(dev);
+ struct pmcraid_instance *pinstance =
+ (struct pmcraid_instance *)shost->hostdata;
+ return snprintf(buf, PAGE_SIZE, "%d\n", pinstance->current_log_level);
+}
+
+/**
+ * pmcraid_store_log_level - Change the adapter's error logging level
+ * @dev: class device struct
+ * @buf: buffer
+ * @count: not used
+ *
+ * Return value:
+ * number of bytes printed to buffer
+ */
+static ssize_t pmcraid_store_log_level(
+ struct device *dev,
+ struct device_attribute *attr,
+ const char *buf,
+ size_t count
+)
+{
+ struct Scsi_Host *shost;
+ struct pmcraid_instance *pinstance;
+ u8 val;
+
+ if (kstrtou8(buf, 10, &val))
+ return -EINVAL;
+ /* log-level should be from 0 to 2 */
+ if (val > 2)
+ return -EINVAL;
+
+ shost = class_to_shost(dev);
+ pinstance = (struct pmcraid_instance *)shost->hostdata;
+ pinstance->current_log_level = val;
+
+ return strlen(buf);
+}
+
+static struct device_attribute pmcraid_log_level_attr = {
+ .attr = {
+ .name = "log_level",
+ .mode = S_IRUGO | S_IWUSR,
+ },
+ .show = pmcraid_show_log_level,
+ .store = pmcraid_store_log_level,
+};
+
+/**
+ * pmcraid_show_drv_version - Display driver version
+ * @dev: class device struct
+ * @buf: buffer
+ *
+ * Return value:
+ * number of bytes printed to buffer
+ */
+static ssize_t pmcraid_show_drv_version(
+ struct device *dev,
+ struct device_attribute *attr,
+ char *buf
+)
+{
+ return snprintf(buf, PAGE_SIZE, "version: %s\n",
+ PMCRAID_DRIVER_VERSION);
+}
+
+static struct device_attribute pmcraid_driver_version_attr = {
+ .attr = {
+ .name = "drv_version",
+ .mode = S_IRUGO,
+ },
+ .show = pmcraid_show_drv_version,
+};
+
+/**
+ * pmcraid_show_io_adapter_id - Display driver assigned adapter id
+ * @dev: class device struct
+ * @buf: buffer
+ *
+ * Return value:
+ * number of bytes printed to buffer
+ */
+static ssize_t pmcraid_show_adapter_id(
+ struct device *dev,
+ struct device_attribute *attr,
+ char *buf
+)
+{
+ struct Scsi_Host *shost = class_to_shost(dev);
+ struct pmcraid_instance *pinstance =
+ (struct pmcraid_instance *)shost->hostdata;
+ u32 adapter_id = (pinstance->pdev->bus->number << 8) |
+ pinstance->pdev->devfn;
+ u32 aen_group = pmcraid_event_family.id;
+
+ return snprintf(buf, PAGE_SIZE,
+ "adapter id: %d\nminor: %d\naen group: %d\n",
+ adapter_id, MINOR(pinstance->cdev.dev), aen_group);
+}
+
+static struct device_attribute pmcraid_adapter_id_attr = {
+ .attr = {
+ .name = "adapter_id",
+ .mode = S_IRUGO,
+ },
+ .show = pmcraid_show_adapter_id,
+};
+
+static struct device_attribute *pmcraid_host_attrs[] = {
+ &pmcraid_log_level_attr,
+ &pmcraid_driver_version_attr,
+ &pmcraid_adapter_id_attr,
+ NULL,
+};
+
+
+/* host template structure for pmcraid driver */
+static struct scsi_host_template pmcraid_host_template = {
+ .module = THIS_MODULE,
+ .name = PMCRAID_DRIVER_NAME,
+ .queuecommand = pmcraid_queuecommand,
+ .eh_abort_handler = pmcraid_eh_abort_handler,
+ .eh_bus_reset_handler = pmcraid_eh_bus_reset_handler,
+ .eh_target_reset_handler = pmcraid_eh_target_reset_handler,
+ .eh_device_reset_handler = pmcraid_eh_device_reset_handler,
+ .eh_host_reset_handler = pmcraid_eh_host_reset_handler,
+
+ .slave_alloc = pmcraid_slave_alloc,
+ .slave_configure = pmcraid_slave_configure,
+ .slave_destroy = pmcraid_slave_destroy,
+ .change_queue_depth = pmcraid_change_queue_depth,
+ .can_queue = PMCRAID_MAX_IO_CMD,
+ .this_id = -1,
+ .sg_tablesize = PMCRAID_MAX_IOADLS,
+ .max_sectors = PMCRAID_IOA_MAX_SECTORS,
+ .no_write_same = 1,
+ .cmd_per_lun = PMCRAID_MAX_CMD_PER_LUN,
+ .use_clustering = ENABLE_CLUSTERING,
+ .shost_attrs = pmcraid_host_attrs,
+ .proc_name = PMCRAID_DRIVER_NAME,
+ .use_blk_tags = 1,
+};
+
+/*
+ * pmcraid_isr_msix - implements MSI-X interrupt handling routine
+ * @irq: interrupt vector number
+ * @dev_id: pointer hrrq_vector
+ *
+ * Return Value
+ * IRQ_HANDLED if interrupt is handled or IRQ_NONE if ignored
+ */
+
+static irqreturn_t pmcraid_isr_msix(int irq, void *dev_id)
+{
+ struct pmcraid_isr_param *hrrq_vector;
+ struct pmcraid_instance *pinstance;
+ unsigned long lock_flags;
+ u32 intrs_val;
+ int hrrq_id;
+
+ hrrq_vector = (struct pmcraid_isr_param *)dev_id;
+ hrrq_id = hrrq_vector->hrrq_id;
+ pinstance = hrrq_vector->drv_inst;
+
+ if (!hrrq_id) {
+ /* Read the interrupt */
+ intrs_val = pmcraid_read_interrupts(pinstance);
+ if (intrs_val &&
+ ((ioread32(pinstance->int_regs.host_ioa_interrupt_reg)
+ & DOORBELL_INTR_MSIX_CLR) == 0)) {
+ /* Any error interrupts including unit_check,
+ * initiate IOA reset.In case of unit check indicate
+ * to reset_sequence that IOA unit checked and prepare
+ * for a dump during reset sequence
+ */
+ if (intrs_val & PMCRAID_ERROR_INTERRUPTS) {
+ if (intrs_val & INTRS_IOA_UNIT_CHECK)
+ pinstance->ioa_unit_check = 1;
+
+ pmcraid_err("ISR: error interrupts: %x \
+ initiating reset\n", intrs_val);
+ spin_lock_irqsave(pinstance->host->host_lock,
+ lock_flags);
+ pmcraid_initiate_reset(pinstance);
+ spin_unlock_irqrestore(
+ pinstance->host->host_lock,
+ lock_flags);
+ }
+ /* If interrupt was as part of the ioa initialization,
+ * clear it. Delete the timer and wakeup the
+ * reset engine to proceed with reset sequence
+ */
+ if (intrs_val & INTRS_TRANSITION_TO_OPERATIONAL)
+ pmcraid_clr_trans_op(pinstance);
+
+ /* Clear the interrupt register by writing
+ * to host to ioa doorbell. Once done
+ * FW will clear the interrupt.
+ */
+ iowrite32(DOORBELL_INTR_MSIX_CLR,
+ pinstance->int_regs.host_ioa_interrupt_reg);
+ ioread32(pinstance->int_regs.host_ioa_interrupt_reg);
+
+
+ }
+ }
+
+ tasklet_schedule(&(pinstance->isr_tasklet[hrrq_id]));
+
+ return IRQ_HANDLED;
+}
+
+/**
+ * pmcraid_isr - implements legacy interrupt handling routine
+ *
+ * @irq: interrupt vector number
+ * @dev_id: pointer hrrq_vector
+ *
+ * Return Value
+ * IRQ_HANDLED if interrupt is handled or IRQ_NONE if ignored
+ */
+static irqreturn_t pmcraid_isr(int irq, void *dev_id)
+{
+ struct pmcraid_isr_param *hrrq_vector;
+ struct pmcraid_instance *pinstance;
+ u32 intrs;
+ unsigned long lock_flags;
+ int hrrq_id = 0;
+
+ /* In case of legacy interrupt mode where interrupts are shared across
+ * isrs, it may be possible that the current interrupt is not from IOA
+ */
+ if (!dev_id) {
+ printk(KERN_INFO "%s(): NULL host pointer\n", __func__);
+ return IRQ_NONE;
+ }
+ hrrq_vector = (struct pmcraid_isr_param *)dev_id;
+ pinstance = hrrq_vector->drv_inst;
+
+ intrs = pmcraid_read_interrupts(pinstance);
+
+ if (unlikely((intrs & PMCRAID_PCI_INTERRUPTS) == 0))
+ return IRQ_NONE;
+
+ /* Any error interrupts including unit_check, initiate IOA reset.
+ * In case of unit check indicate to reset_sequence that IOA unit
+ * checked and prepare for a dump during reset sequence
+ */
+ if (intrs & PMCRAID_ERROR_INTERRUPTS) {
+
+ if (intrs & INTRS_IOA_UNIT_CHECK)
+ pinstance->ioa_unit_check = 1;
+
+ iowrite32(intrs,
+ pinstance->int_regs.ioa_host_interrupt_clr_reg);
+ pmcraid_err("ISR: error interrupts: %x initiating reset\n",
+ intrs);
+ intrs = ioread32(
+ pinstance->int_regs.ioa_host_interrupt_clr_reg);
+ spin_lock_irqsave(pinstance->host->host_lock, lock_flags);
+ pmcraid_initiate_reset(pinstance);
+ spin_unlock_irqrestore(pinstance->host->host_lock, lock_flags);
+ } else {
+ /* If interrupt was as part of the ioa initialization,
+ * clear. Delete the timer and wakeup the
+ * reset engine to proceed with reset sequence
+ */
+ if (intrs & INTRS_TRANSITION_TO_OPERATIONAL) {
+ pmcraid_clr_trans_op(pinstance);
+ } else {
+ iowrite32(intrs,
+ pinstance->int_regs.ioa_host_interrupt_clr_reg);
+ ioread32(
+ pinstance->int_regs.ioa_host_interrupt_clr_reg);
+
+ tasklet_schedule(
+ &(pinstance->isr_tasklet[hrrq_id]));
+ }
+ }
+
+ return IRQ_HANDLED;
+}
+
+
+/**
+ * pmcraid_worker_function - worker thread function
+ *
+ * @workp: pointer to struct work queue
+ *
+ * Return Value
+ * None
+ */
+
+static void pmcraid_worker_function(struct work_struct *workp)
+{
+ struct pmcraid_instance *pinstance;
+ struct pmcraid_resource_entry *res;
+ struct pmcraid_resource_entry *temp;
+ struct scsi_device *sdev;
+ unsigned long lock_flags;
+ unsigned long host_lock_flags;
+ u16 fw_version;
+ u8 bus, target, lun;
+
+ pinstance = container_of(workp, struct pmcraid_instance, worker_q);
+ /* add resources only after host is added into system */
+ if (!atomic_read(&pinstance->expose_resources))
+ return;
+
+ fw_version = be16_to_cpu(pinstance->inq_data->fw_version);
+
+ spin_lock_irqsave(&pinstance->resource_lock, lock_flags);
+ list_for_each_entry_safe(res, temp, &pinstance->used_res_q, queue) {
+
+ if (res->change_detected == RES_CHANGE_DEL && res->scsi_dev) {
+ sdev = res->scsi_dev;
+
+ /* host_lock must be held before calling
+ * scsi_device_get
+ */
+ spin_lock_irqsave(pinstance->host->host_lock,
+ host_lock_flags);
+ if (!scsi_device_get(sdev)) {
+ spin_unlock_irqrestore(
+ pinstance->host->host_lock,
+ host_lock_flags);
+ pmcraid_info("deleting %x from midlayer\n",
+ res->cfg_entry.resource_address);
+ list_move_tail(&res->queue,
+ &pinstance->free_res_q);
+ spin_unlock_irqrestore(
+ &pinstance->resource_lock,
+ lock_flags);
+ scsi_remove_device(sdev);
+ scsi_device_put(sdev);
+ spin_lock_irqsave(&pinstance->resource_lock,
+ lock_flags);
+ res->change_detected = 0;
+ } else {
+ spin_unlock_irqrestore(
+ pinstance->host->host_lock,
+ host_lock_flags);
+ }
+ }
+ }
+
+ list_for_each_entry(res, &pinstance->used_res_q, queue) {
+
+ if (res->change_detected == RES_CHANGE_ADD) {
+
+ if (!pmcraid_expose_resource(fw_version,
+ &res->cfg_entry))
+ continue;
+
+ if (RES_IS_VSET(res->cfg_entry)) {
+ bus = PMCRAID_VSET_BUS_ID;
+ if (fw_version <= PMCRAID_FW_VERSION_1)
+ target = res->cfg_entry.unique_flags1;
+ else
+ target = res->cfg_entry.array_id & 0xFF;
+ lun = PMCRAID_VSET_LUN_ID;
+ } else {
+ bus = PMCRAID_PHYS_BUS_ID;
+ target =
+ RES_TARGET(
+ res->cfg_entry.resource_address);
+ lun = RES_LUN(res->cfg_entry.resource_address);
+ }
+
+ res->change_detected = 0;
+ spin_unlock_irqrestore(&pinstance->resource_lock,
+ lock_flags);
+ scsi_add_device(pinstance->host, bus, target, lun);
+ spin_lock_irqsave(&pinstance->resource_lock,
+ lock_flags);
+ }
+ }
+
+ spin_unlock_irqrestore(&pinstance->resource_lock, lock_flags);
+}
+
+/**
+ * pmcraid_tasklet_function - Tasklet function
+ *
+ * @instance: pointer to msix param structure
+ *
+ * Return Value
+ * None
+ */
+static void pmcraid_tasklet_function(unsigned long instance)
+{
+ struct pmcraid_isr_param *hrrq_vector;
+ struct pmcraid_instance *pinstance;
+ unsigned long hrrq_lock_flags;
+ unsigned long pending_lock_flags;
+ unsigned long host_lock_flags;
+ spinlock_t *lockp; /* hrrq buffer lock */
+ int id;
+ __le32 resp;
+
+ hrrq_vector = (struct pmcraid_isr_param *)instance;
+ pinstance = hrrq_vector->drv_inst;
+ id = hrrq_vector->hrrq_id;
+ lockp = &(pinstance->hrrq_lock[id]);
+
+ /* loop through each of the commands responded by IOA. Each HRRQ buf is
+ * protected by its own lock. Traversals must be done within this lock
+ * as there may be multiple tasklets running on multiple CPUs. Note
+ * that the lock is held just for picking up the response handle and
+ * manipulating hrrq_curr/toggle_bit values.
+ */
+ spin_lock_irqsave(lockp, hrrq_lock_flags);
+
+ resp = le32_to_cpu(*(pinstance->hrrq_curr[id]));
+
+ while ((resp & HRRQ_TOGGLE_BIT) ==
+ pinstance->host_toggle_bit[id]) {
+
+ int cmd_index = resp >> 2;
+ struct pmcraid_cmd *cmd = NULL;
+
+ if (pinstance->hrrq_curr[id] < pinstance->hrrq_end[id]) {
+ pinstance->hrrq_curr[id]++;
+ } else {
+ pinstance->hrrq_curr[id] = pinstance->hrrq_start[id];
+ pinstance->host_toggle_bit[id] ^= 1u;
+ }
+
+ if (cmd_index >= PMCRAID_MAX_CMD) {
+ /* In case of invalid response handle, log message */
+ pmcraid_err("Invalid response handle %d\n", cmd_index);
+ resp = le32_to_cpu(*(pinstance->hrrq_curr[id]));
+ continue;
+ }
+
+ cmd = pinstance->cmd_list[cmd_index];
+ spin_unlock_irqrestore(lockp, hrrq_lock_flags);
+
+ spin_lock_irqsave(&pinstance->pending_pool_lock,
+ pending_lock_flags);
+ list_del(&cmd->free_list);
+ spin_unlock_irqrestore(&pinstance->pending_pool_lock,
+ pending_lock_flags);
+ del_timer(&cmd->timer);
+ atomic_dec(&pinstance->outstanding_cmds);
+
+ if (cmd->cmd_done == pmcraid_ioa_reset) {
+ spin_lock_irqsave(pinstance->host->host_lock,
+ host_lock_flags);
+ cmd->cmd_done(cmd);
+ spin_unlock_irqrestore(pinstance->host->host_lock,
+ host_lock_flags);
+ } else if (cmd->cmd_done != NULL) {
+ cmd->cmd_done(cmd);
+ }
+ /* loop over until we are done with all responses */
+ spin_lock_irqsave(lockp, hrrq_lock_flags);
+ resp = le32_to_cpu(*(pinstance->hrrq_curr[id]));
+ }
+
+ spin_unlock_irqrestore(lockp, hrrq_lock_flags);
+}
+
+/**
+ * pmcraid_unregister_interrupt_handler - de-register interrupts handlers
+ * @pinstance: pointer to adapter instance structure
+ *
+ * This routine un-registers registered interrupt handler and
+ * also frees irqs/vectors.
+ *
+ * Retun Value
+ * None
+ */
+static
+void pmcraid_unregister_interrupt_handler(struct pmcraid_instance *pinstance)
+{
+ int i;
+
+ for (i = 0; i < pinstance->num_hrrq; i++)
+ free_irq(pinstance->hrrq_vector[i].vector,
+ &(pinstance->hrrq_vector[i]));
+
+ if (pinstance->interrupt_mode) {
+ pci_disable_msix(pinstance->pdev);
+ pinstance->interrupt_mode = 0;
+ }
+}
+
+/**
+ * pmcraid_register_interrupt_handler - registers interrupt handler
+ * @pinstance: pointer to per-adapter instance structure
+ *
+ * Return Value
+ * 0 on success, non-zero error code otherwise.
+ */
+static int
+pmcraid_register_interrupt_handler(struct pmcraid_instance *pinstance)
+{
+ int rc;
+ struct pci_dev *pdev = pinstance->pdev;
+
+ if ((pmcraid_enable_msix) &&
+ (pci_find_capability(pdev, PCI_CAP_ID_MSIX))) {
+ int num_hrrq = PMCRAID_NUM_MSIX_VECTORS;
+ struct msix_entry entries[PMCRAID_NUM_MSIX_VECTORS];
+ int i;
+ for (i = 0; i < PMCRAID_NUM_MSIX_VECTORS; i++)
+ entries[i].entry = i;
+
+ num_hrrq = pci_enable_msix_range(pdev, entries, 1, num_hrrq);
+ if (num_hrrq < 0)
+ goto pmcraid_isr_legacy;
+
+ for (i = 0; i < num_hrrq; i++) {
+ pinstance->hrrq_vector[i].hrrq_id = i;
+ pinstance->hrrq_vector[i].drv_inst = pinstance;
+ pinstance->hrrq_vector[i].vector = entries[i].vector;
+ rc = request_irq(pinstance->hrrq_vector[i].vector,
+ pmcraid_isr_msix, 0,
+ PMCRAID_DRIVER_NAME,
+ &(pinstance->hrrq_vector[i]));
+
+ if (rc) {
+ int j;
+ for (j = 0; j < i; j++)
+ free_irq(entries[j].vector,
+ &(pinstance->hrrq_vector[j]));
+ pci_disable_msix(pdev);
+ goto pmcraid_isr_legacy;
+ }
+ }
+
+ pinstance->num_hrrq = num_hrrq;
+ pinstance->interrupt_mode = 1;
+ iowrite32(DOORBELL_INTR_MODE_MSIX,
+ pinstance->int_regs.host_ioa_interrupt_reg);
+ ioread32(pinstance->int_regs.host_ioa_interrupt_reg);
+ goto pmcraid_isr_out;
+ }
+
+pmcraid_isr_legacy:
+ /* If MSI-X registration failed fallback to legacy mode, where
+ * only one hrrq entry will be used
+ */
+ pinstance->hrrq_vector[0].hrrq_id = 0;
+ pinstance->hrrq_vector[0].drv_inst = pinstance;
+ pinstance->hrrq_vector[0].vector = pdev->irq;
+ pinstance->num_hrrq = 1;
+
+ rc = request_irq(pdev->irq, pmcraid_isr, IRQF_SHARED,
+ PMCRAID_DRIVER_NAME, &pinstance->hrrq_vector[0]);
+pmcraid_isr_out:
+ return rc;
+}
+
+/**
+ * pmcraid_release_cmd_blocks - release buufers allocated for command blocks
+ * @pinstance: per adapter instance structure pointer
+ * @max_index: number of buffer blocks to release
+ *
+ * Return Value
+ * None
+ */
+static void
+pmcraid_release_cmd_blocks(struct pmcraid_instance *pinstance, int max_index)
+{
+ int i;
+ for (i = 0; i < max_index; i++) {
+ kmem_cache_free(pinstance->cmd_cachep, pinstance->cmd_list[i]);
+ pinstance->cmd_list[i] = NULL;
+ }
+ kmem_cache_destroy(pinstance->cmd_cachep);
+ pinstance->cmd_cachep = NULL;
+}
+
+/**
+ * pmcraid_release_control_blocks - releases buffers alloced for control blocks
+ * @pinstance: pointer to per adapter instance structure
+ * @max_index: number of buffers (from 0 onwards) to release
+ *
+ * This function assumes that the command blocks for which control blocks are
+ * linked are not released.
+ *
+ * Return Value
+ * None
+ */
+static void
+pmcraid_release_control_blocks(
+ struct pmcraid_instance *pinstance,
+ int max_index
+)
+{
+ int i;
+
+ if (pinstance->control_pool == NULL)
+ return;
+
+ for (i = 0; i < max_index; i++) {
+ pci_pool_free(pinstance->control_pool,
+ pinstance->cmd_list[i]->ioa_cb,
+ pinstance->cmd_list[i]->ioa_cb_bus_addr);
+ pinstance->cmd_list[i]->ioa_cb = NULL;
+ pinstance->cmd_list[i]->ioa_cb_bus_addr = 0;
+ }
+ pci_pool_destroy(pinstance->control_pool);
+ pinstance->control_pool = NULL;
+}
+
+/**
+ * pmcraid_allocate_cmd_blocks - allocate memory for cmd block structures
+ * @pinstance - pointer to per adapter instance structure
+ *
+ * Allocates memory for command blocks using kernel slab allocator.
+ *
+ * Return Value
+ * 0 in case of success; -ENOMEM in case of failure
+ */
+static int pmcraid_allocate_cmd_blocks(struct pmcraid_instance *pinstance)
+{
+ int i;
+
+ sprintf(pinstance->cmd_pool_name, "pmcraid_cmd_pool_%d",
+ pinstance->host->unique_id);
+
+
+ pinstance->cmd_cachep = kmem_cache_create(
+ pinstance->cmd_pool_name,
+ sizeof(struct pmcraid_cmd), 0,
+ SLAB_HWCACHE_ALIGN, NULL);
+ if (!pinstance->cmd_cachep)
+ return -ENOMEM;
+
+ for (i = 0; i < PMCRAID_MAX_CMD; i++) {
+ pinstance->cmd_list[i] =
+ kmem_cache_alloc(pinstance->cmd_cachep, GFP_KERNEL);
+ if (!pinstance->cmd_list[i]) {
+ pmcraid_release_cmd_blocks(pinstance, i);
+ return -ENOMEM;
+ }
+ }
+ return 0;
+}
+
+/**
+ * pmcraid_allocate_control_blocks - allocates memory control blocks
+ * @pinstance : pointer to per adapter instance structure
+ *
+ * This function allocates PCI memory for DMAable buffers like IOARCB, IOADLs
+ * and IOASAs. This is called after command blocks are already allocated.
+ *
+ * Return Value
+ * 0 in case it can allocate all control blocks, otherwise -ENOMEM
+ */
+static int pmcraid_allocate_control_blocks(struct pmcraid_instance *pinstance)
+{
+ int i;
+
+ sprintf(pinstance->ctl_pool_name, "pmcraid_control_pool_%d",
+ pinstance->host->unique_id);
+
+ pinstance->control_pool =
+ pci_pool_create(pinstance->ctl_pool_name,
+ pinstance->pdev,
+ sizeof(struct pmcraid_control_block),
+ PMCRAID_IOARCB_ALIGNMENT, 0);
+
+ if (!pinstance->control_pool)
+ return -ENOMEM;
+
+ for (i = 0; i < PMCRAID_MAX_CMD; i++) {
+ pinstance->cmd_list[i]->ioa_cb =
+ pci_pool_alloc(
+ pinstance->control_pool,
+ GFP_KERNEL,
+ &(pinstance->cmd_list[i]->ioa_cb_bus_addr));
+
+ if (!pinstance->cmd_list[i]->ioa_cb) {
+ pmcraid_release_control_blocks(pinstance, i);
+ return -ENOMEM;
+ }
+ memset(pinstance->cmd_list[i]->ioa_cb, 0,
+ sizeof(struct pmcraid_control_block));
+ }
+ return 0;
+}
+
+/**
+ * pmcraid_release_host_rrqs - release memory allocated for hrrq buffer(s)
+ * @pinstance: pointer to per adapter instance structure
+ * @maxindex: size of hrrq buffer pointer array
+ *
+ * Return Value
+ * None
+ */
+static void
+pmcraid_release_host_rrqs(struct pmcraid_instance *pinstance, int maxindex)
+{
+ int i;
+ for (i = 0; i < maxindex; i++) {
+
+ pci_free_consistent(pinstance->pdev,
+ HRRQ_ENTRY_SIZE * PMCRAID_MAX_CMD,
+ pinstance->hrrq_start[i],
+ pinstance->hrrq_start_bus_addr[i]);
+
+ /* reset pointers and toggle bit to zeros */
+ pinstance->hrrq_start[i] = NULL;
+ pinstance->hrrq_start_bus_addr[i] = 0;
+ pinstance->host_toggle_bit[i] = 0;
+ }
+}
+
+/**
+ * pmcraid_allocate_host_rrqs - Allocate and initialize host RRQ buffers
+ * @pinstance: pointer to per adapter instance structure
+ *
+ * Return value
+ * 0 hrrq buffers are allocated, -ENOMEM otherwise.
+ */
+static int pmcraid_allocate_host_rrqs(struct pmcraid_instance *pinstance)
+{
+ int i, buffer_size;
+
+ buffer_size = HRRQ_ENTRY_SIZE * PMCRAID_MAX_CMD;
+
+ for (i = 0; i < pinstance->num_hrrq; i++) {
+ pinstance->hrrq_start[i] =
+ pci_alloc_consistent(
+ pinstance->pdev,
+ buffer_size,
+ &(pinstance->hrrq_start_bus_addr[i]));
+
+ if (pinstance->hrrq_start[i] == 0) {
+ pmcraid_err("pci_alloc failed for hrrq vector : %d\n",
+ i);
+ pmcraid_release_host_rrqs(pinstance, i);
+ return -ENOMEM;
+ }
+
+ memset(pinstance->hrrq_start[i], 0, buffer_size);
+ pinstance->hrrq_curr[i] = pinstance->hrrq_start[i];
+ pinstance->hrrq_end[i] =
+ pinstance->hrrq_start[i] + PMCRAID_MAX_CMD - 1;
+ pinstance->host_toggle_bit[i] = 1;
+ spin_lock_init(&pinstance->hrrq_lock[i]);
+ }
+ return 0;
+}
+
+/**
+ * pmcraid_release_hcams - release HCAM buffers
+ *
+ * @pinstance: pointer to per adapter instance structure
+ *
+ * Return value
+ * none
+ */
+static void pmcraid_release_hcams(struct pmcraid_instance *pinstance)
+{
+ if (pinstance->ccn.msg != NULL) {
+ pci_free_consistent(pinstance->pdev,
+ PMCRAID_AEN_HDR_SIZE +
+ sizeof(struct pmcraid_hcam_ccn_ext),
+ pinstance->ccn.msg,
+ pinstance->ccn.baddr);
+
+ pinstance->ccn.msg = NULL;
+ pinstance->ccn.hcam = NULL;
+ pinstance->ccn.baddr = 0;
+ }
+
+ if (pinstance->ldn.msg != NULL) {
+ pci_free_consistent(pinstance->pdev,
+ PMCRAID_AEN_HDR_SIZE +
+ sizeof(struct pmcraid_hcam_ldn),
+ pinstance->ldn.msg,
+ pinstance->ldn.baddr);
+
+ pinstance->ldn.msg = NULL;
+ pinstance->ldn.hcam = NULL;
+ pinstance->ldn.baddr = 0;
+ }
+}
+
+/**
+ * pmcraid_allocate_hcams - allocates HCAM buffers
+ * @pinstance : pointer to per adapter instance structure
+ *
+ * Return Value:
+ * 0 in case of successful allocation, non-zero otherwise
+ */
+static int pmcraid_allocate_hcams(struct pmcraid_instance *pinstance)
+{
+ pinstance->ccn.msg = pci_alloc_consistent(
+ pinstance->pdev,
+ PMCRAID_AEN_HDR_SIZE +
+ sizeof(struct pmcraid_hcam_ccn_ext),
+ &(pinstance->ccn.baddr));
+
+ pinstance->ldn.msg = pci_alloc_consistent(
+ pinstance->pdev,
+ PMCRAID_AEN_HDR_SIZE +
+ sizeof(struct pmcraid_hcam_ldn),
+ &(pinstance->ldn.baddr));
+
+ if (pinstance->ldn.msg == NULL || pinstance->ccn.msg == NULL) {
+ pmcraid_release_hcams(pinstance);
+ } else {
+ pinstance->ccn.hcam =
+ (void *)pinstance->ccn.msg + PMCRAID_AEN_HDR_SIZE;
+ pinstance->ldn.hcam =
+ (void *)pinstance->ldn.msg + PMCRAID_AEN_HDR_SIZE;
+
+ atomic_set(&pinstance->ccn.ignore, 0);
+ atomic_set(&pinstance->ldn.ignore, 0);
+ }
+
+ return (pinstance->ldn.msg == NULL) ? -ENOMEM : 0;
+}
+
+/**
+ * pmcraid_release_config_buffers - release config.table buffers
+ * @pinstance: pointer to per adapter instance structure
+ *
+ * Return Value
+ * none
+ */
+static void pmcraid_release_config_buffers(struct pmcraid_instance *pinstance)
+{
+ if (pinstance->cfg_table != NULL &&
+ pinstance->cfg_table_bus_addr != 0) {
+ pci_free_consistent(pinstance->pdev,
+ sizeof(struct pmcraid_config_table),
+ pinstance->cfg_table,
+ pinstance->cfg_table_bus_addr);
+ pinstance->cfg_table = NULL;
+ pinstance->cfg_table_bus_addr = 0;
+ }
+
+ if (pinstance->res_entries != NULL) {
+ int i;
+
+ for (i = 0; i < PMCRAID_MAX_RESOURCES; i++)
+ list_del(&pinstance->res_entries[i].queue);
+ kfree(pinstance->res_entries);
+ pinstance->res_entries = NULL;
+ }
+
+ pmcraid_release_hcams(pinstance);
+}
+
+/**
+ * pmcraid_allocate_config_buffers - allocates DMAable memory for config table
+ * @pinstance : pointer to per adapter instance structure
+ *
+ * Return Value
+ * 0 for successful allocation, -ENOMEM for any failure
+ */
+static int pmcraid_allocate_config_buffers(struct pmcraid_instance *pinstance)
+{
+ int i;
+
+ pinstance->res_entries =
+ kzalloc(sizeof(struct pmcraid_resource_entry) *
+ PMCRAID_MAX_RESOURCES, GFP_KERNEL);
+
+ if (NULL == pinstance->res_entries) {
+ pmcraid_err("failed to allocate memory for resource table\n");
+ return -ENOMEM;
+ }
+
+ for (i = 0; i < PMCRAID_MAX_RESOURCES; i++)
+ list_add_tail(&pinstance->res_entries[i].queue,
+ &pinstance->free_res_q);
+
+ pinstance->cfg_table =
+ pci_alloc_consistent(pinstance->pdev,
+ sizeof(struct pmcraid_config_table),
+ &pinstance->cfg_table_bus_addr);
+
+ if (NULL == pinstance->cfg_table) {
+ pmcraid_err("couldn't alloc DMA memory for config table\n");
+ pmcraid_release_config_buffers(pinstance);
+ return -ENOMEM;
+ }
+
+ if (pmcraid_allocate_hcams(pinstance)) {
+ pmcraid_err("could not alloc DMA memory for HCAMS\n");
+ pmcraid_release_config_buffers(pinstance);
+ return -ENOMEM;
+ }
+
+ return 0;
+}
+
+/**
+ * pmcraid_init_tasklets - registers tasklets for response handling
+ *
+ * @pinstance: pointer adapter instance structure
+ *
+ * Return value
+ * none
+ */
+static void pmcraid_init_tasklets(struct pmcraid_instance *pinstance)
+{
+ int i;
+ for (i = 0; i < pinstance->num_hrrq; i++)
+ tasklet_init(&pinstance->isr_tasklet[i],
+ pmcraid_tasklet_function,
+ (unsigned long)&pinstance->hrrq_vector[i]);
+}
+
+/**
+ * pmcraid_kill_tasklets - destroys tasklets registered for response handling
+ *
+ * @pinstance: pointer to adapter instance structure
+ *
+ * Return value
+ * none
+ */
+static void pmcraid_kill_tasklets(struct pmcraid_instance *pinstance)
+{
+ int i;
+ for (i = 0; i < pinstance->num_hrrq; i++)
+ tasklet_kill(&pinstance->isr_tasklet[i]);
+}
+
+/**
+ * pmcraid_release_buffers - release per-adapter buffers allocated
+ *
+ * @pinstance: pointer to adapter soft state
+ *
+ * Return Value
+ * none
+ */
+static void pmcraid_release_buffers(struct pmcraid_instance *pinstance)
+{
+ pmcraid_release_config_buffers(pinstance);
+ pmcraid_release_control_blocks(pinstance, PMCRAID_MAX_CMD);
+ pmcraid_release_cmd_blocks(pinstance, PMCRAID_MAX_CMD);
+ pmcraid_release_host_rrqs(pinstance, pinstance->num_hrrq);
+
+ if (pinstance->inq_data != NULL) {
+ pci_free_consistent(pinstance->pdev,
+ sizeof(struct pmcraid_inquiry_data),
+ pinstance->inq_data,
+ pinstance->inq_data_baddr);
+
+ pinstance->inq_data = NULL;
+ pinstance->inq_data_baddr = 0;
+ }
+
+ if (pinstance->timestamp_data != NULL) {
+ pci_free_consistent(pinstance->pdev,
+ sizeof(struct pmcraid_timestamp_data),
+ pinstance->timestamp_data,
+ pinstance->timestamp_data_baddr);
+
+ pinstance->timestamp_data = NULL;
+ pinstance->timestamp_data_baddr = 0;
+ }
+}
+
+/**
+ * pmcraid_init_buffers - allocates memory and initializes various structures
+ * @pinstance: pointer to per adapter instance structure
+ *
+ * This routine pre-allocates memory based on the type of block as below:
+ * cmdblocks(PMCRAID_MAX_CMD): kernel memory using kernel's slab_allocator,
+ * IOARCBs(PMCRAID_MAX_CMD) : DMAable memory, using pci pool allocator
+ * config-table entries : DMAable memory using pci_alloc_consistent
+ * HostRRQs : DMAable memory, using pci_alloc_consistent
+ *
+ * Return Value
+ * 0 in case all of the blocks are allocated, -ENOMEM otherwise.
+ */
+static int pmcraid_init_buffers(struct pmcraid_instance *pinstance)
+{
+ int i;
+
+ if (pmcraid_allocate_host_rrqs(pinstance)) {
+ pmcraid_err("couldn't allocate memory for %d host rrqs\n",
+ pinstance->num_hrrq);
+ return -ENOMEM;
+ }
+
+ if (pmcraid_allocate_config_buffers(pinstance)) {
+ pmcraid_err("couldn't allocate memory for config buffers\n");
+ pmcraid_release_host_rrqs(pinstance, pinstance->num_hrrq);
+ return -ENOMEM;
+ }
+
+ if (pmcraid_allocate_cmd_blocks(pinstance)) {
+ pmcraid_err("couldn't allocate memory for cmd blocks\n");
+ pmcraid_release_config_buffers(pinstance);
+ pmcraid_release_host_rrqs(pinstance, pinstance->num_hrrq);
+ return -ENOMEM;
+ }
+
+ if (pmcraid_allocate_control_blocks(pinstance)) {
+ pmcraid_err("couldn't allocate memory control blocks\n");
+ pmcraid_release_config_buffers(pinstance);
+ pmcraid_release_cmd_blocks(pinstance, PMCRAID_MAX_CMD);
+ pmcraid_release_host_rrqs(pinstance, pinstance->num_hrrq);
+ return -ENOMEM;
+ }
+
+ /* allocate DMAable memory for page D0 INQUIRY buffer */
+ pinstance->inq_data = pci_alloc_consistent(
+ pinstance->pdev,
+ sizeof(struct pmcraid_inquiry_data),
+ &pinstance->inq_data_baddr);
+
+ if (pinstance->inq_data == NULL) {
+ pmcraid_err("couldn't allocate DMA memory for INQUIRY\n");
+ pmcraid_release_buffers(pinstance);
+ return -ENOMEM;
+ }
+
+ /* allocate DMAable memory for set timestamp data buffer */
+ pinstance->timestamp_data = pci_alloc_consistent(
+ pinstance->pdev,
+ sizeof(struct pmcraid_timestamp_data),
+ &pinstance->timestamp_data_baddr);
+
+ if (pinstance->timestamp_data == NULL) {
+ pmcraid_err("couldn't allocate DMA memory for \
+ set time_stamp \n");
+ pmcraid_release_buffers(pinstance);
+ return -ENOMEM;
+ }
+
+
+ /* Initialize all the command blocks and add them to free pool. No
+ * need to lock (free_pool_lock) as this is done in initialization
+ * itself
+ */
+ for (i = 0; i < PMCRAID_MAX_CMD; i++) {
+ struct pmcraid_cmd *cmdp = pinstance->cmd_list[i];
+ pmcraid_init_cmdblk(cmdp, i);
+ cmdp->drv_inst = pinstance;
+ list_add_tail(&cmdp->free_list, &pinstance->free_cmd_pool);
+ }
+
+ return 0;
+}
+
+/**
+ * pmcraid_reinit_buffers - resets various buffer pointers
+ * @pinstance: pointer to adapter instance
+ * Return value
+ * none
+ */
+static void pmcraid_reinit_buffers(struct pmcraid_instance *pinstance)
+{
+ int i;
+ int buffer_size = HRRQ_ENTRY_SIZE * PMCRAID_MAX_CMD;
+
+ for (i = 0; i < pinstance->num_hrrq; i++) {
+ memset(pinstance->hrrq_start[i], 0, buffer_size);
+ pinstance->hrrq_curr[i] = pinstance->hrrq_start[i];
+ pinstance->hrrq_end[i] =
+ pinstance->hrrq_start[i] + PMCRAID_MAX_CMD - 1;
+ pinstance->host_toggle_bit[i] = 1;
+ }
+}
+
+/**
+ * pmcraid_init_instance - initialize per instance data structure
+ * @pdev: pointer to pci device structure
+ * @host: pointer to Scsi_Host structure
+ * @mapped_pci_addr: memory mapped IOA configuration registers
+ *
+ * Return Value
+ * 0 on success, non-zero in case of any failure
+ */
+static int pmcraid_init_instance(struct pci_dev *pdev, struct Scsi_Host *host,
+ void __iomem *mapped_pci_addr)
+{
+ struct pmcraid_instance *pinstance =
+ (struct pmcraid_instance *)host->hostdata;
+
+ pinstance->host = host;
+ pinstance->pdev = pdev;
+
+ /* Initialize register addresses */
+ pinstance->mapped_dma_addr = mapped_pci_addr;
+
+ /* Initialize chip-specific details */
+ {
+ struct pmcraid_chip_details *chip_cfg = pinstance->chip_cfg;
+ struct pmcraid_interrupts *pint_regs = &pinstance->int_regs;
+
+ pinstance->ioarrin = mapped_pci_addr + chip_cfg->ioarrin;
+
+ pint_regs->ioa_host_interrupt_reg =
+ mapped_pci_addr + chip_cfg->ioa_host_intr;
+ pint_regs->ioa_host_interrupt_clr_reg =
+ mapped_pci_addr + chip_cfg->ioa_host_intr_clr;
+ pint_regs->ioa_host_msix_interrupt_reg =
+ mapped_pci_addr + chip_cfg->ioa_host_msix_intr;
+ pint_regs->host_ioa_interrupt_reg =
+ mapped_pci_addr + chip_cfg->host_ioa_intr;
+ pint_regs->host_ioa_interrupt_clr_reg =
+ mapped_pci_addr + chip_cfg->host_ioa_intr_clr;
+
+ /* Current version of firmware exposes interrupt mask set
+ * and mask clr registers through memory mapped bar0.
+ */
+ pinstance->mailbox = mapped_pci_addr + chip_cfg->mailbox;
+ pinstance->ioa_status = mapped_pci_addr + chip_cfg->ioastatus;
+ pint_regs->ioa_host_interrupt_mask_reg =
+ mapped_pci_addr + chip_cfg->ioa_host_mask;
+ pint_regs->ioa_host_interrupt_mask_clr_reg =
+ mapped_pci_addr + chip_cfg->ioa_host_mask_clr;
+ pint_regs->global_interrupt_mask_reg =
+ mapped_pci_addr + chip_cfg->global_intr_mask;
+ };
+
+ pinstance->ioa_reset_attempts = 0;
+ init_waitqueue_head(&pinstance->reset_wait_q);
+
+ atomic_set(&pinstance->outstanding_cmds, 0);
+ atomic_set(&pinstance->last_message_id, 0);
+ atomic_set(&pinstance->expose_resources, 0);
+
+ INIT_LIST_HEAD(&pinstance->free_res_q);
+ INIT_LIST_HEAD(&pinstance->used_res_q);
+ INIT_LIST_HEAD(&pinstance->free_cmd_pool);
+ INIT_LIST_HEAD(&pinstance->pending_cmd_pool);
+
+ spin_lock_init(&pinstance->free_pool_lock);
+ spin_lock_init(&pinstance->pending_pool_lock);
+ spin_lock_init(&pinstance->resource_lock);
+ mutex_init(&pinstance->aen_queue_lock);
+
+ /* Work-queue (Shared) for deferred processing error handling */
+ INIT_WORK(&pinstance->worker_q, pmcraid_worker_function);
+
+ /* Initialize the default log_level */
+ pinstance->current_log_level = pmcraid_log_level;
+
+ /* Setup variables required for reset engine */
+ pinstance->ioa_state = IOA_STATE_UNKNOWN;
+ pinstance->reset_cmd = NULL;
+ return 0;
+}
+
+/**
+ * pmcraid_shutdown - shutdown adapter controller.
+ * @pdev: pci device struct
+ *
+ * Issues an adapter shutdown to the card waits for its completion
+ *
+ * Return value
+ * none
+ */
+static void pmcraid_shutdown(struct pci_dev *pdev)
+{
+ struct pmcraid_instance *pinstance = pci_get_drvdata(pdev);
+ pmcraid_reset_bringdown(pinstance);
+}
+
+
+/**
+ * pmcraid_get_minor - returns unused minor number from minor number bitmap
+ */
+static unsigned short pmcraid_get_minor(void)
+{
+ int minor;
+
+ minor = find_first_zero_bit(pmcraid_minor, sizeof(pmcraid_minor));
+ __set_bit(minor, pmcraid_minor);
+ return minor;
+}
+
+/**
+ * pmcraid_release_minor - releases given minor back to minor number bitmap
+ */
+static void pmcraid_release_minor(unsigned short minor)
+{
+ __clear_bit(minor, pmcraid_minor);
+}
+
+/**
+ * pmcraid_setup_chrdev - allocates a minor number and registers a char device
+ *
+ * @pinstance: pointer to adapter instance for which to register device
+ *
+ * Return value
+ * 0 in case of success, otherwise non-zero
+ */
+static int pmcraid_setup_chrdev(struct pmcraid_instance *pinstance)
+{
+ int minor;
+ int error;
+
+ minor = pmcraid_get_minor();
+ cdev_init(&pinstance->cdev, &pmcraid_fops);
+ pinstance->cdev.owner = THIS_MODULE;
+
+ error = cdev_add(&pinstance->cdev, MKDEV(pmcraid_major, minor), 1);
+
+ if (error)
+ pmcraid_release_minor(minor);
+ else
+ device_create(pmcraid_class, NULL, MKDEV(pmcraid_major, minor),
+ NULL, "%s%u", PMCRAID_DEVFILE, minor);
+ return error;
+}
+
+/**
+ * pmcraid_release_chrdev - unregisters per-adapter management interface
+ *
+ * @pinstance: pointer to adapter instance structure
+ *
+ * Return value
+ * none
+ */
+static void pmcraid_release_chrdev(struct pmcraid_instance *pinstance)
+{
+ pmcraid_release_minor(MINOR(pinstance->cdev.dev));
+ device_destroy(pmcraid_class,
+ MKDEV(pmcraid_major, MINOR(pinstance->cdev.dev)));
+ cdev_del(&pinstance->cdev);
+}
+
+/**
+ * pmcraid_remove - IOA hot plug remove entry point
+ * @pdev: pci device struct
+ *
+ * Return value
+ * none
+ */
+static void pmcraid_remove(struct pci_dev *pdev)
+{
+ struct pmcraid_instance *pinstance = pci_get_drvdata(pdev);
+
+ /* remove the management interface (/dev file) for this device */
+ pmcraid_release_chrdev(pinstance);
+
+ /* remove host template from scsi midlayer */
+ scsi_remove_host(pinstance->host);
+
+ /* block requests from mid-layer */
+ scsi_block_requests(pinstance->host);
+
+ /* initiate shutdown adapter */
+ pmcraid_shutdown(pdev);
+
+ pmcraid_disable_interrupts(pinstance, ~0);
+ flush_work(&pinstance->worker_q);
+
+ pmcraid_kill_tasklets(pinstance);
+ pmcraid_unregister_interrupt_handler(pinstance);
+ pmcraid_release_buffers(pinstance);
+ iounmap(pinstance->mapped_dma_addr);
+ pci_release_regions(pdev);
+ scsi_host_put(pinstance->host);
+ pci_disable_device(pdev);
+
+ return;
+}
+
+#ifdef CONFIG_PM
+/**
+ * pmcraid_suspend - driver suspend entry point for power management
+ * @pdev: PCI device structure
+ * @state: PCI power state to suspend routine
+ *
+ * Return Value - 0 always
+ */
+static int pmcraid_suspend(struct pci_dev *pdev, pm_message_t state)
+{
+ struct pmcraid_instance *pinstance = pci_get_drvdata(pdev);
+
+ pmcraid_shutdown(pdev);
+ pmcraid_disable_interrupts(pinstance, ~0);
+ pmcraid_kill_tasklets(pinstance);
+ pci_set_drvdata(pinstance->pdev, pinstance);
+ pmcraid_unregister_interrupt_handler(pinstance);
+ pci_save_state(pdev);
+ pci_disable_device(pdev);
+ pci_set_power_state(pdev, pci_choose_state(pdev, state));
+
+ return 0;
+}
+
+/**
+ * pmcraid_resume - driver resume entry point PCI power management
+ * @pdev: PCI device structure
+ *
+ * Return Value - 0 in case of success. Error code in case of any failure
+ */
+static int pmcraid_resume(struct pci_dev *pdev)
+{
+ struct pmcraid_instance *pinstance = pci_get_drvdata(pdev);
+ struct Scsi_Host *host = pinstance->host;
+ int rc;
+
+ pci_set_power_state(pdev, PCI_D0);
+ pci_enable_wake(pdev, PCI_D0, 0);
+ pci_restore_state(pdev);
+
+ rc = pci_enable_device(pdev);
+
+ if (rc) {
+ dev_err(&pdev->dev, "resume: Enable device failed\n");
+ return rc;
+ }
+
+ pci_set_master(pdev);
+
+ if ((sizeof(dma_addr_t) == 4) ||
+ pci_set_dma_mask(pdev, DMA_BIT_MASK(64)))
+ rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
+
+ if (rc == 0)
+ rc = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
+
+ if (rc != 0) {
+ dev_err(&pdev->dev, "resume: Failed to set PCI DMA mask\n");
+ goto disable_device;
+ }
+
+ pmcraid_disable_interrupts(pinstance, ~0);
+ atomic_set(&pinstance->outstanding_cmds, 0);
+ rc = pmcraid_register_interrupt_handler(pinstance);
+
+ if (rc) {
+ dev_err(&pdev->dev,
+ "resume: couldn't register interrupt handlers\n");
+ rc = -ENODEV;
+ goto release_host;
+ }
+
+ pmcraid_init_tasklets(pinstance);
+ pmcraid_enable_interrupts(pinstance, PMCRAID_PCI_INTERRUPTS);
+
+ /* Start with hard reset sequence which brings up IOA to operational
+ * state as well as completes the reset sequence.
+ */
+ pinstance->ioa_hard_reset = 1;
+
+ /* Start IOA firmware initialization and bring card to Operational
+ * state.
+ */
+ if (pmcraid_reset_bringup(pinstance)) {
+ dev_err(&pdev->dev, "couldn't initialize IOA\n");
+ rc = -ENODEV;
+ goto release_tasklets;
+ }
+
+ return 0;
+
+release_tasklets:
+ pmcraid_disable_interrupts(pinstance, ~0);
+ pmcraid_kill_tasklets(pinstance);
+ pmcraid_unregister_interrupt_handler(pinstance);
+
+release_host:
+ scsi_host_put(host);
+
+disable_device:
+ pci_disable_device(pdev);
+
+ return rc;
+}
+
+#else
+
+#define pmcraid_suspend NULL
+#define pmcraid_resume NULL
+
+#endif /* CONFIG_PM */
+
+/**
+ * pmcraid_complete_ioa_reset - Called by either timer or tasklet during
+ * completion of the ioa reset
+ * @cmd: pointer to reset command block
+ */
+static void pmcraid_complete_ioa_reset(struct pmcraid_cmd *cmd)
+{
+ struct pmcraid_instance *pinstance = cmd->drv_inst;
+ unsigned long flags;
+
+ spin_lock_irqsave(pinstance->host->host_lock, flags);
+ pmcraid_ioa_reset(cmd);
+ spin_unlock_irqrestore(pinstance->host->host_lock, flags);
+ scsi_unblock_requests(pinstance->host);
+ schedule_work(&pinstance->worker_q);
+}
+
+/**
+ * pmcraid_set_supported_devs - sends SET SUPPORTED DEVICES to IOAFP
+ *
+ * @cmd: pointer to pmcraid_cmd structure
+ *
+ * Return Value
+ * 0 for success or non-zero for failure cases
+ */
+static void pmcraid_set_supported_devs(struct pmcraid_cmd *cmd)
+{
+ struct pmcraid_ioarcb *ioarcb = &cmd->ioa_cb->ioarcb;
+ void (*cmd_done) (struct pmcraid_cmd *) = pmcraid_complete_ioa_reset;
+
+ pmcraid_reinit_cmdblk(cmd);
+
+ ioarcb->resource_handle = cpu_to_le32(PMCRAID_IOA_RES_HANDLE);
+ ioarcb->request_type = REQ_TYPE_IOACMD;
+ ioarcb->cdb[0] = PMCRAID_SET_SUPPORTED_DEVICES;
+ ioarcb->cdb[1] = ALL_DEVICES_SUPPORTED;
+
+ /* If this was called as part of resource table reinitialization due to
+ * lost CCN, it is enough to return the command block back to free pool
+ * as part of set_supported_devs completion function.
+ */
+ if (cmd->drv_inst->reinit_cfg_table) {
+ cmd->drv_inst->reinit_cfg_table = 0;
+ cmd->release = 1;
+ cmd_done = pmcraid_reinit_cfgtable_done;
+ }
+
+ /* we will be done with the reset sequence after set supported devices,
+ * setup the done function to return the command block back to free
+ * pool
+ */
+ pmcraid_send_cmd(cmd,
+ cmd_done,
+ PMCRAID_SET_SUP_DEV_TIMEOUT,
+ pmcraid_timeout_handler);
+ return;
+}
+
+/**
+ * pmcraid_set_timestamp - set the timestamp to IOAFP
+ *
+ * @cmd: pointer to pmcraid_cmd structure
+ *
+ * Return Value
+ * 0 for success or non-zero for failure cases
+ */
+static void pmcraid_set_timestamp(struct pmcraid_cmd *cmd)
+{
+ struct pmcraid_instance *pinstance = cmd->drv_inst;
+ struct pmcraid_ioarcb *ioarcb = &cmd->ioa_cb->ioarcb;
+ __be32 time_stamp_len = cpu_to_be32(PMCRAID_TIMESTAMP_LEN);
+ struct pmcraid_ioadl_desc *ioadl = ioarcb->add_data.u.ioadl;
+
+ struct timeval tv;
+ __le64 timestamp;
+
+ do_gettimeofday(&tv);
+ timestamp = tv.tv_sec * 1000;
+
+ pinstance->timestamp_data->timestamp[0] = (__u8)(timestamp);
+ pinstance->timestamp_data->timestamp[1] = (__u8)((timestamp) >> 8);
+ pinstance->timestamp_data->timestamp[2] = (__u8)((timestamp) >> 16);
+ pinstance->timestamp_data->timestamp[3] = (__u8)((timestamp) >> 24);
+ pinstance->timestamp_data->timestamp[4] = (__u8)((timestamp) >> 32);
+ pinstance->timestamp_data->timestamp[5] = (__u8)((timestamp) >> 40);
+
+ pmcraid_reinit_cmdblk(cmd);
+ ioarcb->request_type = REQ_TYPE_SCSI;
+ ioarcb->resource_handle = cpu_to_le32(PMCRAID_IOA_RES_HANDLE);
+ ioarcb->cdb[0] = PMCRAID_SCSI_SET_TIMESTAMP;
+ ioarcb->cdb[1] = PMCRAID_SCSI_SERVICE_ACTION;
+ memcpy(&(ioarcb->cdb[6]), &time_stamp_len, sizeof(time_stamp_len));
+
+ ioarcb->ioadl_bus_addr = cpu_to_le64((cmd->ioa_cb_bus_addr) +
+ offsetof(struct pmcraid_ioarcb,
+ add_data.u.ioadl[0]));
+ ioarcb->ioadl_length = cpu_to_le32(sizeof(struct pmcraid_ioadl_desc));
+ ioarcb->ioarcb_bus_addr &= ~(0x1FULL);
+
+ ioarcb->request_flags0 |= NO_LINK_DESCS;
+ ioarcb->request_flags0 |= TRANSFER_DIR_WRITE;
+ ioarcb->data_transfer_length =
+ cpu_to_le32(sizeof(struct pmcraid_timestamp_data));
+ ioadl = &(ioarcb->add_data.u.ioadl[0]);
+ ioadl->flags = IOADL_FLAGS_LAST_DESC;
+ ioadl->address = cpu_to_le64(pinstance->timestamp_data_baddr);
+ ioadl->data_len = cpu_to_le32(sizeof(struct pmcraid_timestamp_data));
+
+ if (!pinstance->timestamp_error) {
+ pinstance->timestamp_error = 0;
+ pmcraid_send_cmd(cmd, pmcraid_set_supported_devs,
+ PMCRAID_INTERNAL_TIMEOUT, pmcraid_timeout_handler);
+ } else {
+ pmcraid_send_cmd(cmd, pmcraid_return_cmd,
+ PMCRAID_INTERNAL_TIMEOUT, pmcraid_timeout_handler);
+ return;
+ }
+}
+
+
+/**
+ * pmcraid_init_res_table - Initialize the resource table
+ * @cmd: pointer to pmcraid command struct
+ *
+ * This function looks through the existing resource table, comparing
+ * it with the config table. This function will take care of old/new
+ * devices and schedule adding/removing them from the mid-layer
+ * as appropriate.
+ *
+ * Return value
+ * None
+ */
+static void pmcraid_init_res_table(struct pmcraid_cmd *cmd)
+{
+ struct pmcraid_instance *pinstance = cmd->drv_inst;
+ struct pmcraid_resource_entry *res, *temp;
+ struct pmcraid_config_table_entry *cfgte;
+ unsigned long lock_flags;
+ int found, rc, i;
+ u16 fw_version;
+ LIST_HEAD(old_res);
+
+ if (pinstance->cfg_table->flags & MICROCODE_UPDATE_REQUIRED)
+ pmcraid_err("IOA requires microcode download\n");
+
+ fw_version = be16_to_cpu(pinstance->inq_data->fw_version);
+
+ /* resource list is protected by pinstance->resource_lock.
+ * init_res_table can be called from probe (user-thread) or runtime
+ * reset (timer/tasklet)
+ */
+ spin_lock_irqsave(&pinstance->resource_lock, lock_flags);
+
+ list_for_each_entry_safe(res, temp, &pinstance->used_res_q, queue)
+ list_move_tail(&res->queue, &old_res);
+
+ for (i = 0; i < pinstance->cfg_table->num_entries; i++) {
+ if (be16_to_cpu(pinstance->inq_data->fw_version) <=
+ PMCRAID_FW_VERSION_1)
+ cfgte = &pinstance->cfg_table->entries[i];
+ else
+ cfgte = (struct pmcraid_config_table_entry *)
+ &pinstance->cfg_table->entries_ext[i];
+
+ if (!pmcraid_expose_resource(fw_version, cfgte))
+ continue;
+
+ found = 0;
+
+ /* If this entry was already detected and initialized */
+ list_for_each_entry_safe(res, temp, &old_res, queue) {
+
+ rc = memcmp(&res->cfg_entry.resource_address,
+ &cfgte->resource_address,
+ sizeof(cfgte->resource_address));
+ if (!rc) {
+ list_move_tail(&res->queue,
+ &pinstance->used_res_q);
+ found = 1;
+ break;
+ }
+ }
+
+ /* If this is new entry, initialize it and add it the queue */
+ if (!found) {
+
+ if (list_empty(&pinstance->free_res_q)) {
+ pmcraid_err("Too many devices attached\n");
+ break;
+ }
+
+ found = 1;
+ res = list_entry(pinstance->free_res_q.next,
+ struct pmcraid_resource_entry, queue);
+
+ res->scsi_dev = NULL;
+ res->change_detected = RES_CHANGE_ADD;
+ res->reset_progress = 0;
+ list_move_tail(&res->queue, &pinstance->used_res_q);
+ }
+
+ /* copy new configuration table entry details into driver
+ * maintained resource entry
+ */
+ if (found) {
+ memcpy(&res->cfg_entry, cfgte,
+ pinstance->config_table_entry_size);
+ pmcraid_info("New res type:%x, vset:%x, addr:%x:\n",
+ res->cfg_entry.resource_type,
+ (fw_version <= PMCRAID_FW_VERSION_1 ?
+ res->cfg_entry.unique_flags1 :
+ res->cfg_entry.array_id & 0xFF),
+ le32_to_cpu(res->cfg_entry.resource_address));
+ }
+ }
+
+ /* Detect any deleted entries, mark them for deletion from mid-layer */
+ list_for_each_entry_safe(res, temp, &old_res, queue) {
+
+ if (res->scsi_dev) {
+ res->change_detected = RES_CHANGE_DEL;
+ res->cfg_entry.resource_handle =
+ PMCRAID_INVALID_RES_HANDLE;
+ list_move_tail(&res->queue, &pinstance->used_res_q);
+ } else {
+ list_move_tail(&res->queue, &pinstance->free_res_q);
+ }
+ }
+
+ /* release the resource list lock */
+ spin_unlock_irqrestore(&pinstance->resource_lock, lock_flags);
+ pmcraid_set_timestamp(cmd);
+}
+
+/**
+ * pmcraid_querycfg - Send a Query IOA Config to the adapter.
+ * @cmd: pointer pmcraid_cmd struct
+ *
+ * This function sends a Query IOA Configuration command to the adapter to
+ * retrieve the IOA configuration table.
+ *
+ * Return value:
+ * none
+ */
+static void pmcraid_querycfg(struct pmcraid_cmd *cmd)
+{
+ struct pmcraid_ioarcb *ioarcb = &cmd->ioa_cb->ioarcb;
+ struct pmcraid_ioadl_desc *ioadl = ioarcb->add_data.u.ioadl;
+ struct pmcraid_instance *pinstance = cmd->drv_inst;
+ int cfg_table_size = cpu_to_be32(sizeof(struct pmcraid_config_table));
+
+ if (be16_to_cpu(pinstance->inq_data->fw_version) <=
+ PMCRAID_FW_VERSION_1)
+ pinstance->config_table_entry_size =
+ sizeof(struct pmcraid_config_table_entry);
+ else
+ pinstance->config_table_entry_size =
+ sizeof(struct pmcraid_config_table_entry_ext);
+
+ ioarcb->request_type = REQ_TYPE_IOACMD;
+ ioarcb->resource_handle = cpu_to_le32(PMCRAID_IOA_RES_HANDLE);
+
+ ioarcb->cdb[0] = PMCRAID_QUERY_IOA_CONFIG;
+
+ /* firmware requires 4-byte length field, specified in B.E format */
+ memcpy(&(ioarcb->cdb[10]), &cfg_table_size, sizeof(cfg_table_size));
+
+ /* Since entire config table can be described by single IOADL, it can
+ * be part of IOARCB itself
+ */
+ ioarcb->ioadl_bus_addr = cpu_to_le64((cmd->ioa_cb_bus_addr) +
+ offsetof(struct pmcraid_ioarcb,
+ add_data.u.ioadl[0]));
+ ioarcb->ioadl_length = cpu_to_le32(sizeof(struct pmcraid_ioadl_desc));
+ ioarcb->ioarcb_bus_addr &= ~(0x1FULL);
+
+ ioarcb->request_flags0 |= NO_LINK_DESCS;
+ ioarcb->data_transfer_length =
+ cpu_to_le32(sizeof(struct pmcraid_config_table));
+
+ ioadl = &(ioarcb->add_data.u.ioadl[0]);
+ ioadl->flags = IOADL_FLAGS_LAST_DESC;
+ ioadl->address = cpu_to_le64(pinstance->cfg_table_bus_addr);
+ ioadl->data_len = cpu_to_le32(sizeof(struct pmcraid_config_table));
+
+ pmcraid_send_cmd(cmd, pmcraid_init_res_table,
+ PMCRAID_INTERNAL_TIMEOUT, pmcraid_timeout_handler);
+}
+
+
+/**
+ * pmcraid_probe - PCI probe entry pointer for PMC MaxRAID controller driver
+ * @pdev: pointer to pci device structure
+ * @dev_id: pointer to device ids structure
+ *
+ * Return Value
+ * returns 0 if the device is claimed and successfully configured.
+ * returns non-zero error code in case of any failure
+ */
+static int pmcraid_probe(struct pci_dev *pdev,
+ const struct pci_device_id *dev_id)
+{
+ struct pmcraid_instance *pinstance;
+ struct Scsi_Host *host;
+ void __iomem *mapped_pci_addr;
+ int rc = PCIBIOS_SUCCESSFUL;
+
+ if (atomic_read(&pmcraid_adapter_count) >= PMCRAID_MAX_ADAPTERS) {
+ pmcraid_err
+ ("maximum number(%d) of supported adapters reached\n",
+ atomic_read(&pmcraid_adapter_count));
+ return -ENOMEM;
+ }
+
+ atomic_inc(&pmcraid_adapter_count);
+ rc = pci_enable_device(pdev);
+
+ if (rc) {
+ dev_err(&pdev->dev, "Cannot enable adapter\n");
+ atomic_dec(&pmcraid_adapter_count);
+ return rc;
+ }
+
+ dev_info(&pdev->dev,
+ "Found new IOA(%x:%x), Total IOA count: %d\n",
+ pdev->vendor, pdev->device,
+ atomic_read(&pmcraid_adapter_count));
+
+ rc = pci_request_regions(pdev, PMCRAID_DRIVER_NAME);
+
+ if (rc < 0) {
+ dev_err(&pdev->dev,
+ "Couldn't register memory range of registers\n");
+ goto out_disable_device;
+ }
+
+ mapped_pci_addr = pci_iomap(pdev, 0, 0);
+
+ if (!mapped_pci_addr) {
+ dev_err(&pdev->dev, "Couldn't map PCI registers memory\n");
+ rc = -ENOMEM;
+ goto out_release_regions;
+ }
+
+ pci_set_master(pdev);
+
+ /* Firmware requires the system bus address of IOARCB to be within
+ * 32-bit addressable range though it has 64-bit IOARRIN register.
+ * However, firmware supports 64-bit streaming DMA buffers, whereas
+ * coherent buffers are to be 32-bit. Since pci_alloc_consistent always
+ * returns memory within 4GB (if not, change this logic), coherent
+ * buffers are within firmware acceptable address ranges.
+ */
+ if ((sizeof(dma_addr_t) == 4) ||
+ pci_set_dma_mask(pdev, DMA_BIT_MASK(64)))
+ rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
+
+ /* firmware expects 32-bit DMA addresses for IOARRIN register; set 32
+ * bit mask for pci_alloc_consistent to return addresses within 4GB
+ */
+ if (rc == 0)
+ rc = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
+
+ if (rc != 0) {
+ dev_err(&pdev->dev, "Failed to set PCI DMA mask\n");
+ goto cleanup_nomem;
+ }
+
+ host = scsi_host_alloc(&pmcraid_host_template,
+ sizeof(struct pmcraid_instance));
+
+ if (!host) {
+ dev_err(&pdev->dev, "scsi_host_alloc failed!\n");
+ rc = -ENOMEM;
+ goto cleanup_nomem;
+ }
+
+ host->max_id = PMCRAID_MAX_NUM_TARGETS_PER_BUS;
+ host->max_lun = PMCRAID_MAX_NUM_LUNS_PER_TARGET;
+ host->unique_id = host->host_no;
+ host->max_channel = PMCRAID_MAX_BUS_TO_SCAN;
+ host->max_cmd_len = PMCRAID_MAX_CDB_LEN;
+
+ /* zero out entire instance structure */
+ pinstance = (struct pmcraid_instance *)host->hostdata;
+ memset(pinstance, 0, sizeof(*pinstance));
+
+ pinstance->chip_cfg =
+ (struct pmcraid_chip_details *)(dev_id->driver_data);
+
+ rc = pmcraid_init_instance(pdev, host, mapped_pci_addr);
+
+ if (rc < 0) {
+ dev_err(&pdev->dev, "failed to initialize adapter instance\n");
+ goto out_scsi_host_put;
+ }
+
+ pci_set_drvdata(pdev, pinstance);
+
+ /* Save PCI config-space for use following the reset */
+ rc = pci_save_state(pinstance->pdev);
+
+ if (rc != 0) {
+ dev_err(&pdev->dev, "Failed to save PCI config space\n");
+ goto out_scsi_host_put;
+ }
+
+ pmcraid_disable_interrupts(pinstance, ~0);
+
+ rc = pmcraid_register_interrupt_handler(pinstance);
+
+ if (rc) {
+ dev_err(&pdev->dev, "couldn't register interrupt handler\n");
+ goto out_scsi_host_put;
+ }
+
+ pmcraid_init_tasklets(pinstance);
+
+ /* allocate verious buffers used by LLD.*/
+ rc = pmcraid_init_buffers(pinstance);
+
+ if (rc) {
+ pmcraid_err("couldn't allocate memory blocks\n");
+ goto out_unregister_isr;
+ }
+
+ /* check the reset type required */
+ pmcraid_reset_type(pinstance);
+
+ pmcraid_enable_interrupts(pinstance, PMCRAID_PCI_INTERRUPTS);
+
+ /* Start IOA firmware initialization and bring card to Operational
+ * state.
+ */
+ pmcraid_info("starting IOA initialization sequence\n");
+ if (pmcraid_reset_bringup(pinstance)) {
+ dev_err(&pdev->dev, "couldn't initialize IOA\n");
+ rc = 1;
+ goto out_release_bufs;
+ }
+
+ /* Add adapter instance into mid-layer list */
+ rc = scsi_add_host(pinstance->host, &pdev->dev);
+ if (rc != 0) {
+ pmcraid_err("couldn't add host into mid-layer: %d\n", rc);
+ goto out_release_bufs;
+ }
+
+ scsi_scan_host(pinstance->host);
+
+ rc = pmcraid_setup_chrdev(pinstance);
+
+ if (rc != 0) {
+ pmcraid_err("couldn't create mgmt interface, error: %x\n",
+ rc);
+ goto out_remove_host;
+ }
+
+ /* Schedule worker thread to handle CCN and take care of adding and
+ * removing devices to OS
+ */
+ atomic_set(&pinstance->expose_resources, 1);
+ schedule_work(&pinstance->worker_q);
+ return rc;
+
+out_remove_host:
+ scsi_remove_host(host);
+
+out_release_bufs:
+ pmcraid_release_buffers(pinstance);
+
+out_unregister_isr:
+ pmcraid_kill_tasklets(pinstance);
+ pmcraid_unregister_interrupt_handler(pinstance);
+
+out_scsi_host_put:
+ scsi_host_put(host);
+
+cleanup_nomem:
+ iounmap(mapped_pci_addr);
+
+out_release_regions:
+ pci_release_regions(pdev);
+
+out_disable_device:
+ atomic_dec(&pmcraid_adapter_count);
+ pci_disable_device(pdev);
+ return -ENODEV;
+}
+
+/*
+ * PCI driver structure of pcmraid driver
+ */
+static struct pci_driver pmcraid_driver = {
+ .name = PMCRAID_DRIVER_NAME,
+ .id_table = pmcraid_pci_table,
+ .probe = pmcraid_probe,
+ .remove = pmcraid_remove,
+ .suspend = pmcraid_suspend,
+ .resume = pmcraid_resume,
+ .shutdown = pmcraid_shutdown
+};
+
+/**
+ * pmcraid_init - module load entry point
+ */
+static int __init pmcraid_init(void)
+{
+ dev_t dev;
+ int error;
+
+ pmcraid_info("%s Device Driver version: %s\n",
+ PMCRAID_DRIVER_NAME, PMCRAID_DRIVER_VERSION);
+
+ error = alloc_chrdev_region(&dev, 0,
+ PMCRAID_MAX_ADAPTERS,
+ PMCRAID_DEVFILE);
+
+ if (error) {
+ pmcraid_err("failed to get a major number for adapters\n");
+ goto out_init;
+ }
+
+ pmcraid_major = MAJOR(dev);
+ pmcraid_class = class_create(THIS_MODULE, PMCRAID_DEVFILE);
+
+ if (IS_ERR(pmcraid_class)) {
+ error = PTR_ERR(pmcraid_class);
+ pmcraid_err("failed to register with sysfs, error = %x\n",
+ error);
+ goto out_unreg_chrdev;
+ }
+
+ error = pmcraid_netlink_init();
+
+ if (error)
+ goto out_unreg_chrdev;
+
+ error = pci_register_driver(&pmcraid_driver);
+
+ if (error == 0)
+ goto out_init;
+
+ pmcraid_err("failed to register pmcraid driver, error = %x\n",
+ error);
+ class_destroy(pmcraid_class);
+ pmcraid_netlink_release();
+
+out_unreg_chrdev:
+ unregister_chrdev_region(MKDEV(pmcraid_major, 0), PMCRAID_MAX_ADAPTERS);
+
+out_init:
+ return error;
+}
+
+/**
+ * pmcraid_exit - module unload entry point
+ */
+static void __exit pmcraid_exit(void)
+{
+ pmcraid_netlink_release();
+ unregister_chrdev_region(MKDEV(pmcraid_major, 0),
+ PMCRAID_MAX_ADAPTERS);
+ pci_unregister_driver(&pmcraid_driver);
+ class_destroy(pmcraid_class);
+}
+
+module_init(pmcraid_init);
+module_exit(pmcraid_exit);
diff --git a/drivers/scsi/pmcraid.h b/drivers/scsi/pmcraid.h
new file mode 100644
index 000000000..e1d150f3f
--- /dev/null
+++ b/drivers/scsi/pmcraid.h
@@ -0,0 +1,1095 @@
+/*
+ * pmcraid.h -- PMC Sierra MaxRAID controller driver header file
+ *
+ * Written By: Anil Ravindranath<anil_ravindranath@pmc-sierra.com>
+ * PMC-Sierra Inc
+ *
+ * Copyright (C) 2008, 2009 PMC Sierra Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+
+#ifndef _PMCRAID_H
+#define _PMCRAID_H
+
+#include <linux/types.h>
+#include <linux/completion.h>
+#include <linux/list.h>
+#include <scsi/scsi.h>
+#include <scsi/scsi_cmnd.h>
+#include <linux/cdev.h>
+#include <net/netlink.h>
+#include <net/genetlink.h>
+#include <linux/connector.h>
+/*
+ * Driver name : string representing the driver name
+ * Device file : /dev file to be used for management interfaces
+ * Driver version: version string in major_version.minor_version.patch format
+ * Driver date : date information in "Mon dd yyyy" format
+ */
+#define PMCRAID_DRIVER_NAME "PMC MaxRAID"
+#define PMCRAID_DEVFILE "pmcsas"
+#define PMCRAID_DRIVER_VERSION "1.0.3"
+
+#define PMCRAID_FW_VERSION_1 0x002
+
+/* Maximum number of adapters supported by current version of the driver */
+#define PMCRAID_MAX_ADAPTERS 1024
+
+/* Bit definitions as per firmware, bit position [0][1][2].....[31] */
+#define PMC_BIT8(n) (1 << (7-n))
+#define PMC_BIT16(n) (1 << (15-n))
+#define PMC_BIT32(n) (1 << (31-n))
+
+/* PMC PCI vendor ID and device ID values */
+#define PCI_VENDOR_ID_PMC 0x11F8
+#define PCI_DEVICE_ID_PMC_MAXRAID 0x5220
+
+/*
+ * MAX_CMD : maximum commands that can be outstanding with IOA
+ * MAX_IO_CMD : command blocks available for IO commands
+ * MAX_HCAM_CMD : command blocks avaibale for HCAMS
+ * MAX_INTERNAL_CMD : command blocks avaible for internal commands like reset
+ */
+#define PMCRAID_MAX_CMD 1024
+#define PMCRAID_MAX_IO_CMD 1020
+#define PMCRAID_MAX_HCAM_CMD 2
+#define PMCRAID_MAX_INTERNAL_CMD 2
+
+/* MAX_IOADLS : max number of scatter-gather lists supported by IOA
+ * IOADLS_INTERNAL : number of ioadls included as part of IOARCB.
+ * IOADLS_EXTERNAL : number of ioadls allocated external to IOARCB
+ */
+#define PMCRAID_IOADLS_INTERNAL 27
+#define PMCRAID_IOADLS_EXTERNAL 37
+#define PMCRAID_MAX_IOADLS PMCRAID_IOADLS_INTERNAL
+
+/* HRRQ_ENTRY_SIZE : size of hrrq buffer
+ * IOARCB_ALIGNMENT : alignment required for IOARCB
+ * IOADL_ALIGNMENT : alignment requirement for IOADLs
+ * MSIX_VECTORS : number of MSIX vectors supported
+ */
+#define HRRQ_ENTRY_SIZE sizeof(__le32)
+#define PMCRAID_IOARCB_ALIGNMENT 32
+#define PMCRAID_IOADL_ALIGNMENT 16
+#define PMCRAID_IOASA_ALIGNMENT 4
+#define PMCRAID_NUM_MSIX_VECTORS 16
+
+/* various other limits */
+#define PMCRAID_VENDOR_ID_LEN 8
+#define PMCRAID_PRODUCT_ID_LEN 16
+#define PMCRAID_SERIAL_NUM_LEN 8
+#define PMCRAID_LUN_LEN 8
+#define PMCRAID_MAX_CDB_LEN 16
+#define PMCRAID_DEVICE_ID_LEN 8
+#define PMCRAID_SENSE_DATA_LEN 256
+#define PMCRAID_ADD_CMD_PARAM_LEN 48
+
+#define PMCRAID_MAX_BUS_TO_SCAN 1
+#define PMCRAID_MAX_NUM_TARGETS_PER_BUS 256
+#define PMCRAID_MAX_NUM_LUNS_PER_TARGET 8
+
+/* IOA bus/target/lun number of IOA resources */
+#define PMCRAID_IOA_BUS_ID 0xfe
+#define PMCRAID_IOA_TARGET_ID 0xff
+#define PMCRAID_IOA_LUN_ID 0xff
+#define PMCRAID_VSET_BUS_ID 0x1
+#define PMCRAID_VSET_LUN_ID 0x0
+#define PMCRAID_PHYS_BUS_ID 0x0
+#define PMCRAID_VIRTUAL_ENCL_BUS_ID 0x8
+#define PMCRAID_MAX_VSET_TARGETS 0x7F
+#define PMCRAID_MAX_VSET_LUNS_PER_TARGET 8
+
+#define PMCRAID_IOA_MAX_SECTORS 32767
+#define PMCRAID_VSET_MAX_SECTORS 512
+#define PMCRAID_MAX_CMD_PER_LUN 254
+
+/* Number of configuration table entries (resources), includes 1 FP,
+ * 1 Enclosure device
+ */
+#define PMCRAID_MAX_RESOURCES 256
+
+/* Adapter Commands used by driver */
+#define PMCRAID_QUERY_RESOURCE_STATE 0xC2
+#define PMCRAID_RESET_DEVICE 0xC3
+/* options to select reset target */
+#define ENABLE_RESET_MODIFIER 0x80
+#define RESET_DEVICE_LUN 0x40
+#define RESET_DEVICE_TARGET 0x20
+#define RESET_DEVICE_BUS 0x10
+
+#define PMCRAID_IDENTIFY_HRRQ 0xC4
+#define PMCRAID_QUERY_IOA_CONFIG 0xC5
+#define PMCRAID_QUERY_CMD_STATUS 0xCB
+#define PMCRAID_ABORT_CMD 0xC7
+
+/* CANCEL ALL command, provides option for setting SYNC_COMPLETE
+ * on the target resources for which commands got cancelled
+ */
+#define PMCRAID_CANCEL_ALL_REQUESTS 0xCE
+#define PMCRAID_SYNC_COMPLETE_AFTER_CANCEL PMC_BIT8(0)
+
+/* HCAM command and types of HCAM supported by IOA */
+#define PMCRAID_HOST_CONTROLLED_ASYNC 0xCF
+#define PMCRAID_HCAM_CODE_CONFIG_CHANGE 0x01
+#define PMCRAID_HCAM_CODE_LOG_DATA 0x02
+
+/* IOA shutdown command and various shutdown types */
+#define PMCRAID_IOA_SHUTDOWN 0xF7
+#define PMCRAID_SHUTDOWN_NORMAL 0x00
+#define PMCRAID_SHUTDOWN_PREPARE_FOR_NORMAL 0x40
+#define PMCRAID_SHUTDOWN_NONE 0x100
+#define PMCRAID_SHUTDOWN_ABBREV 0x80
+
+/* SET SUPPORTED DEVICES command and the option to select all the
+ * devices to be supported
+ */
+#define PMCRAID_SET_SUPPORTED_DEVICES 0xFB
+#define ALL_DEVICES_SUPPORTED PMC_BIT8(0)
+
+/* This option is used with SCSI WRITE_BUFFER command */
+#define PMCRAID_WR_BUF_DOWNLOAD_AND_SAVE 0x05
+
+/* IOASC Codes used by driver */
+#define PMCRAID_IOASC_SENSE_MASK 0xFFFFFF00
+#define PMCRAID_IOASC_SENSE_KEY(ioasc) ((ioasc) >> 24)
+#define PMCRAID_IOASC_SENSE_CODE(ioasc) (((ioasc) & 0x00ff0000) >> 16)
+#define PMCRAID_IOASC_SENSE_QUAL(ioasc) (((ioasc) & 0x0000ff00) >> 8)
+#define PMCRAID_IOASC_SENSE_STATUS(ioasc) ((ioasc) & 0x000000ff)
+
+#define PMCRAID_IOASC_GOOD_COMPLETION 0x00000000
+#define PMCRAID_IOASC_GC_IOARCB_NOTFOUND 0x005A0000
+#define PMCRAID_IOASC_NR_INIT_CMD_REQUIRED 0x02040200
+#define PMCRAID_IOASC_NR_IOA_RESET_REQUIRED 0x02048000
+#define PMCRAID_IOASC_NR_SYNC_REQUIRED 0x023F0000
+#define PMCRAID_IOASC_ME_READ_ERROR_NO_REALLOC 0x03110C00
+#define PMCRAID_IOASC_HW_CANNOT_COMMUNICATE 0x04050000
+#define PMCRAID_IOASC_HW_DEVICE_TIMEOUT 0x04080100
+#define PMCRAID_IOASC_HW_DEVICE_BUS_STATUS_ERROR 0x04448500
+#define PMCRAID_IOASC_HW_IOA_RESET_REQUIRED 0x04448600
+#define PMCRAID_IOASC_IR_INVALID_RESOURCE_HANDLE 0x05250000
+#define PMCRAID_IOASC_AC_TERMINATED_BY_HOST 0x0B5A0000
+#define PMCRAID_IOASC_UA_BUS_WAS_RESET 0x06290000
+#define PMCRAID_IOASC_TIME_STAMP_OUT_OF_SYNC 0x06908B00
+#define PMCRAID_IOASC_UA_BUS_WAS_RESET_BY_OTHER 0x06298000
+
+/* Driver defined IOASCs */
+#define PMCRAID_IOASC_IOA_WAS_RESET 0x10000001
+#define PMCRAID_IOASC_PCI_ACCESS_ERROR 0x10000002
+
+/* Various timeout values (in milliseconds) used. If any of these are chip
+ * specific, move them to pmcraid_chip_details structure.
+ */
+#define PMCRAID_PCI_DEASSERT_TIMEOUT 2000
+#define PMCRAID_BIST_TIMEOUT 2000
+#define PMCRAID_AENWAIT_TIMEOUT 5000
+#define PMCRAID_TRANSOP_TIMEOUT 60000
+
+#define PMCRAID_RESET_TIMEOUT (2 * HZ)
+#define PMCRAID_CHECK_FOR_RESET_TIMEOUT ((HZ / 10))
+#define PMCRAID_VSET_IO_TIMEOUT (60 * HZ)
+#define PMCRAID_INTERNAL_TIMEOUT (60 * HZ)
+#define PMCRAID_SHUTDOWN_TIMEOUT (150 * HZ)
+#define PMCRAID_RESET_BUS_TIMEOUT (60 * HZ)
+#define PMCRAID_RESET_HOST_TIMEOUT (150 * HZ)
+#define PMCRAID_REQUEST_SENSE_TIMEOUT (30 * HZ)
+#define PMCRAID_SET_SUP_DEV_TIMEOUT (2 * 60 * HZ)
+
+/* structure to represent a scatter-gather element (IOADL descriptor) */
+struct pmcraid_ioadl_desc {
+ __le64 address;
+ __le32 data_len;
+ __u8 reserved[3];
+ __u8 flags;
+} __attribute__((packed, aligned(PMCRAID_IOADL_ALIGNMENT)));
+
+/* pmcraid_ioadl_desc.flags values */
+#define IOADL_FLAGS_CHAINED PMC_BIT8(0)
+#define IOADL_FLAGS_LAST_DESC PMC_BIT8(1)
+#define IOADL_FLAGS_READ_LAST PMC_BIT8(1)
+#define IOADL_FLAGS_WRITE_LAST PMC_BIT8(1)
+
+
+/* additional IOARCB data which can be CDB or additional request parameters
+ * or list of IOADLs. Firmware supports max of 512 bytes for IOARCB, hence then
+ * number of IOADLs are limted to 27. In case they are more than 27, they will
+ * be used in chained form
+ */
+struct pmcraid_ioarcb_add_data {
+ union {
+ struct pmcraid_ioadl_desc ioadl[PMCRAID_IOADLS_INTERNAL];
+ __u8 add_cmd_params[PMCRAID_ADD_CMD_PARAM_LEN];
+ } u;
+};
+
+/*
+ * IOA Request Control Block
+ */
+struct pmcraid_ioarcb {
+ __le64 ioarcb_bus_addr;
+ __le32 resource_handle;
+ __le32 response_handle;
+ __le64 ioadl_bus_addr;
+ __le32 ioadl_length;
+ __le32 data_transfer_length;
+ __le64 ioasa_bus_addr;
+ __le16 ioasa_len;
+ __le16 cmd_timeout;
+ __le16 add_cmd_param_offset;
+ __le16 add_cmd_param_length;
+ __le32 reserved1[2];
+ __le32 reserved2;
+ __u8 request_type;
+ __u8 request_flags0;
+ __u8 request_flags1;
+ __u8 hrrq_id;
+ __u8 cdb[PMCRAID_MAX_CDB_LEN];
+ struct pmcraid_ioarcb_add_data add_data;
+} __attribute__((packed, aligned(PMCRAID_IOARCB_ALIGNMENT)));
+
+/* well known resource handle values */
+#define PMCRAID_IOA_RES_HANDLE 0xffffffff
+#define PMCRAID_INVALID_RES_HANDLE 0
+
+/* pmcraid_ioarcb.request_type values */
+#define REQ_TYPE_SCSI 0x00
+#define REQ_TYPE_IOACMD 0x01
+#define REQ_TYPE_HCAM 0x02
+
+/* pmcraid_ioarcb.flags0 values */
+#define TRANSFER_DIR_WRITE PMC_BIT8(0)
+#define INHIBIT_UL_CHECK PMC_BIT8(2)
+#define SYNC_OVERRIDE PMC_BIT8(3)
+#define SYNC_COMPLETE PMC_BIT8(4)
+#define NO_LINK_DESCS PMC_BIT8(5)
+
+/* pmcraid_ioarcb.flags1 values */
+#define DELAY_AFTER_RESET PMC_BIT8(0)
+#define TASK_TAG_SIMPLE 0x10
+#define TASK_TAG_ORDERED 0x20
+#define TASK_TAG_QUEUE_HEAD 0x30
+
+/* toggle bit offset in response handle */
+#define HRRQ_TOGGLE_BIT 0x01
+#define HRRQ_RESPONSE_BIT 0x02
+
+/* IOA Status Area */
+struct pmcraid_ioasa_vset {
+ __le32 failing_lba_hi;
+ __le32 failing_lba_lo;
+ __le32 reserved;
+} __attribute__((packed, aligned(4)));
+
+struct pmcraid_ioasa {
+ __le32 ioasc;
+ __le16 returned_status_length;
+ __le16 available_status_length;
+ __le32 residual_data_length;
+ __le32 ilid;
+ __le32 fd_ioasc;
+ __le32 fd_res_address;
+ __le32 fd_res_handle;
+ __le32 reserved;
+
+ /* resource specific sense information */
+ union {
+ struct pmcraid_ioasa_vset vset;
+ } u;
+
+ /* IOA autosense data */
+ __le16 auto_sense_length;
+ __le16 error_data_length;
+ __u8 sense_data[PMCRAID_SENSE_DATA_LEN];
+} __attribute__((packed, aligned(4)));
+
+#define PMCRAID_DRIVER_ILID 0xffffffff
+
+/* Config Table Entry per Resource */
+struct pmcraid_config_table_entry {
+ __u8 resource_type;
+ __u8 bus_protocol;
+ __le16 array_id;
+ __u8 common_flags0;
+ __u8 common_flags1;
+ __u8 unique_flags0;
+ __u8 unique_flags1; /*also used as vset target_id */
+ __le32 resource_handle;
+ __le32 resource_address;
+ __u8 device_id[PMCRAID_DEVICE_ID_LEN];
+ __u8 lun[PMCRAID_LUN_LEN];
+} __attribute__((packed, aligned(4)));
+
+/* extended configuration table sizes are also of 32 bytes in size */
+struct pmcraid_config_table_entry_ext {
+ struct pmcraid_config_table_entry cfgte;
+};
+
+/* resource types (config_table_entry.resource_type values) */
+#define RES_TYPE_AF_DASD 0x00
+#define RES_TYPE_GSCSI 0x01
+#define RES_TYPE_VSET 0x02
+#define RES_TYPE_IOA_FP 0xFF
+
+#define RES_IS_IOA(res) ((res).resource_type == RES_TYPE_IOA_FP)
+#define RES_IS_GSCSI(res) ((res).resource_type == RES_TYPE_GSCSI)
+#define RES_IS_VSET(res) ((res).resource_type == RES_TYPE_VSET)
+#define RES_IS_AFDASD(res) ((res).resource_type == RES_TYPE_AF_DASD)
+
+/* bus_protocol values used by driver */
+#define RES_TYPE_VENCLOSURE 0x8
+
+/* config_table_entry.common_flags0 */
+#define MULTIPATH_RESOURCE PMC_BIT32(0)
+
+/* unique_flags1 */
+#define IMPORT_MODE_MANUAL PMC_BIT8(0)
+
+/* well known resource handle values */
+#define RES_HANDLE_IOA 0xFFFFFFFF
+#define RES_HANDLE_NONE 0x00000000
+
+/* well known resource address values */
+#define RES_ADDRESS_IOAFP 0xFEFFFFFF
+#define RES_ADDRESS_INVALID 0xFFFFFFFF
+
+/* BUS/TARGET/LUN values from resource_addrr */
+#define RES_BUS(res_addr) (le32_to_cpu(res_addr) & 0xFF)
+#define RES_TARGET(res_addr) ((le32_to_cpu(res_addr) >> 16) & 0xFF)
+#define RES_LUN(res_addr) 0x0
+
+/* configuration table structure */
+struct pmcraid_config_table {
+ __le16 num_entries;
+ __u8 table_format;
+ __u8 reserved1;
+ __u8 flags;
+ __u8 reserved2[11];
+ union {
+ struct pmcraid_config_table_entry
+ entries[PMCRAID_MAX_RESOURCES];
+ struct pmcraid_config_table_entry_ext
+ entries_ext[PMCRAID_MAX_RESOURCES];
+ };
+} __attribute__((packed, aligned(4)));
+
+/* config_table.flags value */
+#define MICROCODE_UPDATE_REQUIRED PMC_BIT32(0)
+
+/*
+ * HCAM format
+ */
+#define PMCRAID_HOSTRCB_LDNSIZE 4056
+
+/* Error log notification format */
+struct pmcraid_hostrcb_error {
+ __le32 fd_ioasc;
+ __le32 fd_ra;
+ __le32 fd_rh;
+ __le32 prc;
+ union {
+ __u8 data[PMCRAID_HOSTRCB_LDNSIZE];
+ } u;
+} __attribute__ ((packed, aligned(4)));
+
+struct pmcraid_hcam_hdr {
+ __u8 op_code;
+ __u8 notification_type;
+ __u8 notification_lost;
+ __u8 flags;
+ __u8 overlay_id;
+ __u8 reserved1[3];
+ __le32 ilid;
+ __le32 timestamp1;
+ __le32 timestamp2;
+ __le32 data_len;
+} __attribute__((packed, aligned(4)));
+
+#define PMCRAID_AEN_GROUP 0x3
+
+struct pmcraid_hcam_ccn {
+ struct pmcraid_hcam_hdr header;
+ struct pmcraid_config_table_entry cfg_entry;
+ struct pmcraid_config_table_entry cfg_entry_old;
+} __attribute__((packed, aligned(4)));
+
+#define PMCRAID_CCN_EXT_SIZE 3944
+struct pmcraid_hcam_ccn_ext {
+ struct pmcraid_hcam_hdr header;
+ struct pmcraid_config_table_entry_ext cfg_entry;
+ struct pmcraid_config_table_entry_ext cfg_entry_old;
+ __u8 reserved[PMCRAID_CCN_EXT_SIZE];
+} __attribute__((packed, aligned(4)));
+
+struct pmcraid_hcam_ldn {
+ struct pmcraid_hcam_hdr header;
+ struct pmcraid_hostrcb_error error_log;
+} __attribute__((packed, aligned(4)));
+
+/* pmcraid_hcam.op_code values */
+#define HOSTRCB_TYPE_CCN 0xE1
+#define HOSTRCB_TYPE_LDN 0xE2
+
+/* pmcraid_hcam.notification_type values */
+#define NOTIFICATION_TYPE_ENTRY_CHANGED 0x0
+#define NOTIFICATION_TYPE_ENTRY_NEW 0x1
+#define NOTIFICATION_TYPE_ENTRY_DELETED 0x2
+#define NOTIFICATION_TYPE_STATE_CHANGE 0x3
+#define NOTIFICATION_TYPE_ENTRY_STATECHANGED 0x4
+#define NOTIFICATION_TYPE_ERROR_LOG 0x10
+#define NOTIFICATION_TYPE_INFORMATION_LOG 0x11
+
+#define HOSTRCB_NOTIFICATIONS_LOST PMC_BIT8(0)
+
+/* pmcraid_hcam.flags values */
+#define HOSTRCB_INTERNAL_OP_ERROR PMC_BIT8(0)
+#define HOSTRCB_ERROR_RESPONSE_SENT PMC_BIT8(1)
+
+/* pmcraid_hcam.overlay_id values */
+#define HOSTRCB_OVERLAY_ID_08 0x08
+#define HOSTRCB_OVERLAY_ID_09 0x09
+#define HOSTRCB_OVERLAY_ID_11 0x11
+#define HOSTRCB_OVERLAY_ID_12 0x12
+#define HOSTRCB_OVERLAY_ID_13 0x13
+#define HOSTRCB_OVERLAY_ID_14 0x14
+#define HOSTRCB_OVERLAY_ID_16 0x16
+#define HOSTRCB_OVERLAY_ID_17 0x17
+#define HOSTRCB_OVERLAY_ID_20 0x20
+#define HOSTRCB_OVERLAY_ID_FF 0xFF
+
+/* Implementation specific card details */
+struct pmcraid_chip_details {
+ /* hardware register offsets */
+ unsigned long ioastatus;
+ unsigned long ioarrin;
+ unsigned long mailbox;
+ unsigned long global_intr_mask;
+ unsigned long ioa_host_intr;
+ unsigned long ioa_host_msix_intr;
+ unsigned long ioa_host_intr_clr;
+ unsigned long ioa_host_mask;
+ unsigned long ioa_host_mask_clr;
+ unsigned long host_ioa_intr;
+ unsigned long host_ioa_intr_clr;
+
+ /* timeout used during transitional to operational state */
+ unsigned long transop_timeout;
+};
+
+/* IOA to HOST doorbells (interrupts) */
+#define INTRS_TRANSITION_TO_OPERATIONAL PMC_BIT32(0)
+#define INTRS_IOARCB_TRANSFER_FAILED PMC_BIT32(3)
+#define INTRS_IOA_UNIT_CHECK PMC_BIT32(4)
+#define INTRS_NO_HRRQ_FOR_CMD_RESPONSE PMC_BIT32(5)
+#define INTRS_CRITICAL_OP_IN_PROGRESS PMC_BIT32(6)
+#define INTRS_IO_DEBUG_ACK PMC_BIT32(7)
+#define INTRS_IOARRIN_LOST PMC_BIT32(27)
+#define INTRS_SYSTEM_BUS_MMIO_ERROR PMC_BIT32(28)
+#define INTRS_IOA_PROCESSOR_ERROR PMC_BIT32(29)
+#define INTRS_HRRQ_VALID PMC_BIT32(30)
+#define INTRS_OPERATIONAL_STATUS PMC_BIT32(0)
+#define INTRS_ALLOW_MSIX_VECTOR0 PMC_BIT32(31)
+
+/* Host to IOA Doorbells */
+#define DOORBELL_RUNTIME_RESET PMC_BIT32(1)
+#define DOORBELL_IOA_RESET_ALERT PMC_BIT32(7)
+#define DOORBELL_IOA_DEBUG_ALERT PMC_BIT32(9)
+#define DOORBELL_ENABLE_DESTRUCTIVE_DIAGS PMC_BIT32(8)
+#define DOORBELL_IOA_START_BIST PMC_BIT32(23)
+#define DOORBELL_INTR_MODE_MSIX PMC_BIT32(25)
+#define DOORBELL_INTR_MSIX_CLR PMC_BIT32(26)
+#define DOORBELL_RESET_IOA PMC_BIT32(31)
+
+/* Global interrupt mask register value */
+#define GLOBAL_INTERRUPT_MASK 0x5ULL
+
+#define PMCRAID_ERROR_INTERRUPTS (INTRS_IOARCB_TRANSFER_FAILED | \
+ INTRS_IOA_UNIT_CHECK | \
+ INTRS_NO_HRRQ_FOR_CMD_RESPONSE | \
+ INTRS_IOARRIN_LOST | \
+ INTRS_SYSTEM_BUS_MMIO_ERROR | \
+ INTRS_IOA_PROCESSOR_ERROR)
+
+#define PMCRAID_PCI_INTERRUPTS (PMCRAID_ERROR_INTERRUPTS | \
+ INTRS_HRRQ_VALID | \
+ INTRS_TRANSITION_TO_OPERATIONAL |\
+ INTRS_ALLOW_MSIX_VECTOR0)
+
+/* control_block, associated with each of the commands contains IOARCB, IOADLs
+ * memory for IOASA. Additional 3 * 16 bytes are allocated in order to support
+ * additional request parameters (of max size 48) any command.
+ */
+struct pmcraid_control_block {
+ struct pmcraid_ioarcb ioarcb;
+ struct pmcraid_ioadl_desc ioadl[PMCRAID_IOADLS_EXTERNAL + 3];
+ struct pmcraid_ioasa ioasa;
+} __attribute__ ((packed, aligned(PMCRAID_IOARCB_ALIGNMENT)));
+
+/* pmcraid_sglist - Scatter-gather list allocated for passthrough ioctls
+ */
+struct pmcraid_sglist {
+ u32 order;
+ u32 num_sg;
+ u32 num_dma_sg;
+ u32 buffer_len;
+ struct scatterlist scatterlist[1];
+};
+
+/* page D0 inquiry data of focal point resource */
+struct pmcraid_inquiry_data {
+ __u8 ph_dev_type;
+ __u8 page_code;
+ __u8 reserved1;
+ __u8 add_page_len;
+ __u8 length;
+ __u8 reserved2;
+ __le16 fw_version;
+ __u8 reserved3[16];
+};
+
+#define PMCRAID_TIMESTAMP_LEN 12
+#define PMCRAID_REQ_TM_STR_LEN 6
+#define PMCRAID_SCSI_SET_TIMESTAMP 0xA4
+#define PMCRAID_SCSI_SERVICE_ACTION 0x0F
+
+struct pmcraid_timestamp_data {
+ __u8 reserved1[4];
+ __u8 timestamp[PMCRAID_REQ_TM_STR_LEN]; /* current time value */
+ __u8 reserved2[2];
+};
+
+/* pmcraid_cmd - LLD representation of SCSI command */
+struct pmcraid_cmd {
+
+ /* Ptr and bus address of DMA.able control block for this command */
+ struct pmcraid_control_block *ioa_cb;
+ dma_addr_t ioa_cb_bus_addr;
+ dma_addr_t dma_handle;
+
+ /* pointer to mid layer structure of SCSI commands */
+ struct scsi_cmnd *scsi_cmd;
+
+ struct list_head free_list;
+ struct completion wait_for_completion;
+ struct timer_list timer; /* needed for internal commands */
+ u32 timeout; /* current timeout value */
+ u32 index; /* index into the command list */
+ u8 completion_req; /* for handling internal commands */
+ u8 release; /* for handling completions */
+
+ void (*cmd_done) (struct pmcraid_cmd *);
+ struct pmcraid_instance *drv_inst;
+
+ struct pmcraid_sglist *sglist; /* used for passthrough IOCTLs */
+
+ /* scratch used */
+ union {
+ /* during reset sequence */
+ unsigned long time_left;
+ struct pmcraid_resource_entry *res;
+ int hrrq_index;
+
+ /* used during IO command error handling. Sense buffer
+ * for REQUEST SENSE command if firmware is not sending
+ * auto sense data
+ */
+ struct {
+ u8 *sense_buffer;
+ dma_addr_t sense_buffer_dma;
+ };
+ };
+};
+
+/*
+ * Interrupt registers of IOA
+ */
+struct pmcraid_interrupts {
+ void __iomem *ioa_host_interrupt_reg;
+ void __iomem *ioa_host_msix_interrupt_reg;
+ void __iomem *ioa_host_interrupt_clr_reg;
+ void __iomem *ioa_host_interrupt_mask_reg;
+ void __iomem *ioa_host_interrupt_mask_clr_reg;
+ void __iomem *global_interrupt_mask_reg;
+ void __iomem *host_ioa_interrupt_reg;
+ void __iomem *host_ioa_interrupt_clr_reg;
+};
+
+/* ISR parameters LLD allocates (one for each MSI-X if enabled) vectors */
+struct pmcraid_isr_param {
+ struct pmcraid_instance *drv_inst;
+ u16 vector; /* allocated msi-x vector */
+ u8 hrrq_id; /* hrrq entry index */
+};
+
+
+/* AEN message header sent as part of event data to applications */
+struct pmcraid_aen_msg {
+ u32 hostno;
+ u32 length;
+ u8 reserved[8];
+ u8 data[0];
+};
+
+/* Controller state event message type */
+struct pmcraid_state_msg {
+ struct pmcraid_aen_msg msg;
+ u32 ioa_state;
+};
+
+#define PMC_DEVICE_EVENT_RESET_START 0x11000000
+#define PMC_DEVICE_EVENT_RESET_SUCCESS 0x11000001
+#define PMC_DEVICE_EVENT_RESET_FAILED 0x11000002
+#define PMC_DEVICE_EVENT_SHUTDOWN_START 0x11000003
+#define PMC_DEVICE_EVENT_SHUTDOWN_SUCCESS 0x11000004
+#define PMC_DEVICE_EVENT_SHUTDOWN_FAILED 0x11000005
+
+struct pmcraid_hostrcb {
+ struct pmcraid_instance *drv_inst;
+ struct pmcraid_aen_msg *msg;
+ struct pmcraid_hcam_hdr *hcam; /* pointer to hcam buffer */
+ struct pmcraid_cmd *cmd; /* pointer to command block used */
+ dma_addr_t baddr; /* system address of hcam buffer */
+ atomic_t ignore; /* process HCAM response ? */
+};
+
+#define PMCRAID_AEN_HDR_SIZE sizeof(struct pmcraid_aen_msg)
+
+
+
+/*
+ * Per adapter structure maintained by LLD
+ */
+struct pmcraid_instance {
+ /* Array of allowed-to-be-exposed resources, initialized from
+ * Configutation Table, later updated with CCNs
+ */
+ struct pmcraid_resource_entry *res_entries;
+
+ struct list_head free_res_q; /* res_entries lists for easy lookup */
+ struct list_head used_res_q; /* List of to be exposed resources */
+ spinlock_t resource_lock; /* spinlock to protect resource list */
+
+ void __iomem *mapped_dma_addr;
+ void __iomem *ioa_status; /* Iomapped IOA status register */
+ void __iomem *mailbox; /* Iomapped mailbox register */
+ void __iomem *ioarrin; /* IOmapped IOARR IN register */
+
+ struct pmcraid_interrupts int_regs;
+ struct pmcraid_chip_details *chip_cfg;
+
+ /* HostRCBs needed for HCAM */
+ struct pmcraid_hostrcb ldn;
+ struct pmcraid_hostrcb ccn;
+ struct pmcraid_state_msg scn; /* controller state change msg */
+
+
+ /* Bus address of start of HRRQ */
+ dma_addr_t hrrq_start_bus_addr[PMCRAID_NUM_MSIX_VECTORS];
+
+ /* Pointer to 1st entry of HRRQ */
+ __be32 *hrrq_start[PMCRAID_NUM_MSIX_VECTORS];
+
+ /* Pointer to last entry of HRRQ */
+ __be32 *hrrq_end[PMCRAID_NUM_MSIX_VECTORS];
+
+ /* Pointer to current pointer of hrrq */
+ __be32 *hrrq_curr[PMCRAID_NUM_MSIX_VECTORS];
+
+ /* Lock for HRRQ access */
+ spinlock_t hrrq_lock[PMCRAID_NUM_MSIX_VECTORS];
+
+ struct pmcraid_inquiry_data *inq_data;
+ dma_addr_t inq_data_baddr;
+
+ struct pmcraid_timestamp_data *timestamp_data;
+ dma_addr_t timestamp_data_baddr;
+
+ /* size of configuration table entry, varies based on the firmware */
+ u32 config_table_entry_size;
+
+ /* Expected toggle bit at host */
+ u8 host_toggle_bit[PMCRAID_NUM_MSIX_VECTORS];
+
+
+ /* Wait Q for threads to wait for Reset IOA completion */
+ wait_queue_head_t reset_wait_q;
+ struct pmcraid_cmd *reset_cmd;
+
+ /* structures for supporting SIGIO based AEN. */
+ struct fasync_struct *aen_queue;
+ struct mutex aen_queue_lock; /* lock for aen subscribers list */
+ struct cdev cdev;
+
+ struct Scsi_Host *host; /* mid layer interface structure handle */
+ struct pci_dev *pdev; /* PCI device structure handle */
+
+ /* No of Reset IOA retries . IOA marked dead if threshold exceeds */
+ u8 ioa_reset_attempts;
+#define PMCRAID_RESET_ATTEMPTS 3
+
+ u8 current_log_level; /* default level for logging IOASC errors */
+
+ u8 num_hrrq; /* Number of interrupt vectors allocated */
+ u8 interrupt_mode; /* current interrupt mode legacy or msix */
+ dev_t dev; /* Major-Minor numbers for Char device */
+
+ /* Used as ISR handler argument */
+ struct pmcraid_isr_param hrrq_vector[PMCRAID_NUM_MSIX_VECTORS];
+
+ /* Message id as filled in last fired IOARCB, used to identify HRRQ */
+ atomic_t last_message_id;
+
+ /* configuration table */
+ struct pmcraid_config_table *cfg_table;
+ dma_addr_t cfg_table_bus_addr;
+
+ /* structures related to command blocks */
+ struct kmem_cache *cmd_cachep; /* cache for cmd blocks */
+ struct pci_pool *control_pool; /* pool for control blocks */
+ char cmd_pool_name[64]; /* name of cmd cache */
+ char ctl_pool_name[64]; /* name of control cache */
+
+ struct pmcraid_cmd *cmd_list[PMCRAID_MAX_CMD];
+
+ struct list_head free_cmd_pool;
+ struct list_head pending_cmd_pool;
+ spinlock_t free_pool_lock; /* free pool lock */
+ spinlock_t pending_pool_lock; /* pending pool lock */
+
+ /* Tasklet to handle deferred processing */
+ struct tasklet_struct isr_tasklet[PMCRAID_NUM_MSIX_VECTORS];
+
+ /* Work-queue (Shared) for deferred reset processing */
+ struct work_struct worker_q;
+
+ /* No of IO commands pending with FW */
+ atomic_t outstanding_cmds;
+
+ /* should add/delete resources to mid-layer now ?*/
+ atomic_t expose_resources;
+
+
+
+ u32 ioa_state:4; /* For IOA Reset sequence FSM */
+#define IOA_STATE_OPERATIONAL 0x0
+#define IOA_STATE_UNKNOWN 0x1
+#define IOA_STATE_DEAD 0x2
+#define IOA_STATE_IN_SOFT_RESET 0x3
+#define IOA_STATE_IN_HARD_RESET 0x4
+#define IOA_STATE_IN_RESET_ALERT 0x5
+#define IOA_STATE_IN_BRINGDOWN 0x6
+#define IOA_STATE_IN_BRINGUP 0x7
+
+ u32 ioa_reset_in_progress:1; /* true if IOA reset is in progress */
+ u32 ioa_hard_reset:1; /* TRUE if Hard Reset is needed */
+ u32 ioa_unit_check:1; /* Indicates Unit Check condition */
+ u32 ioa_bringdown:1; /* whether IOA needs to be brought down */
+ u32 force_ioa_reset:1; /* force adapter reset ? */
+ u32 reinit_cfg_table:1; /* reinit config table due to lost CCN */
+ u32 ioa_shutdown_type:2;/* shutdown type used during reset */
+#define SHUTDOWN_NONE 0x0
+#define SHUTDOWN_NORMAL 0x1
+#define SHUTDOWN_ABBREV 0x2
+ u32 timestamp_error:1; /* indicate set timestamp for out of sync */
+
+};
+
+/* LLD maintained resource entry structure */
+struct pmcraid_resource_entry {
+ struct list_head queue; /* link to "to be exposed" resources */
+ union {
+ struct pmcraid_config_table_entry cfg_entry;
+ struct pmcraid_config_table_entry_ext cfg_entry_ext;
+ };
+ struct scsi_device *scsi_dev; /* Link scsi_device structure */
+ atomic_t read_failures; /* count of failed READ commands */
+ atomic_t write_failures; /* count of failed WRITE commands */
+
+ /* To indicate add/delete/modify during CCN */
+ u8 change_detected;
+#define RES_CHANGE_ADD 0x1 /* add this to mid-layer */
+#define RES_CHANGE_DEL 0x2 /* remove this from mid-layer */
+
+ u8 reset_progress; /* Device is resetting */
+
+ /*
+ * When IOA asks for sync (i.e. IOASC = Not Ready, Sync Required), this
+ * flag will be set, mid layer will be asked to retry. In the next
+ * attempt, this flag will be checked in queuecommand() to set
+ * SYNC_COMPLETE flag in IOARCB (flag_0).
+ */
+ u8 sync_reqd;
+
+ /* target indicates the mapped target_id assigned to this resource if
+ * this is VSET resource. For non-VSET resources this will be un-used
+ * or zero
+ */
+ u8 target;
+};
+
+/* Data structures used in IOASC error code logging */
+struct pmcraid_ioasc_error {
+ u32 ioasc_code; /* IOASC code */
+ u8 log_level; /* default log level assignment. */
+ char *error_string;
+};
+
+/* Initial log_level assignments for various IOASCs */
+#define IOASC_LOG_LEVEL_NONE 0x0 /* no logging */
+#define IOASC_LOG_LEVEL_MUST 0x1 /* must log: all high-severity errors */
+#define IOASC_LOG_LEVEL_HARD 0x2 /* optional – low severity errors */
+
+/* Error information maintained by LLD. LLD initializes the pmcraid_error_table
+ * statically.
+ */
+static struct pmcraid_ioasc_error pmcraid_ioasc_error_table[] = {
+ {0x01180600, IOASC_LOG_LEVEL_HARD,
+ "Recovered Error, soft media error, sector reassignment suggested"},
+ {0x015D0000, IOASC_LOG_LEVEL_HARD,
+ "Recovered Error, failure prediction threshold exceeded"},
+ {0x015D9200, IOASC_LOG_LEVEL_HARD,
+ "Recovered Error, soft Cache Card Battery error threshold"},
+ {0x015D9200, IOASC_LOG_LEVEL_HARD,
+ "Recovered Error, soft Cache Card Battery error threshold"},
+ {0x02048000, IOASC_LOG_LEVEL_HARD,
+ "Not Ready, IOA Reset Required"},
+ {0x02408500, IOASC_LOG_LEVEL_HARD,
+ "Not Ready, IOA microcode download required"},
+ {0x03110B00, IOASC_LOG_LEVEL_HARD,
+ "Medium Error, data unreadable, reassignment suggested"},
+ {0x03110C00, IOASC_LOG_LEVEL_MUST,
+ "Medium Error, data unreadable do not reassign"},
+ {0x03310000, IOASC_LOG_LEVEL_HARD,
+ "Medium Error, media corrupted"},
+ {0x04050000, IOASC_LOG_LEVEL_HARD,
+ "Hardware Error, IOA can't communicate with device"},
+ {0x04080000, IOASC_LOG_LEVEL_MUST,
+ "Hardware Error, device bus error"},
+ {0x04088000, IOASC_LOG_LEVEL_MUST,
+ "Hardware Error, device bus is not functioning"},
+ {0x04118000, IOASC_LOG_LEVEL_HARD,
+ "Hardware Error, IOA reserved area data check"},
+ {0x04118100, IOASC_LOG_LEVEL_HARD,
+ "Hardware Error, IOA reserved area invalid data pattern"},
+ {0x04118200, IOASC_LOG_LEVEL_HARD,
+ "Hardware Error, IOA reserved area LRC error"},
+ {0x04320000, IOASC_LOG_LEVEL_HARD,
+ "Hardware Error, reassignment space exhausted"},
+ {0x04330000, IOASC_LOG_LEVEL_HARD,
+ "Hardware Error, data transfer underlength error"},
+ {0x04330000, IOASC_LOG_LEVEL_HARD,
+ "Hardware Error, data transfer overlength error"},
+ {0x04418000, IOASC_LOG_LEVEL_MUST,
+ "Hardware Error, PCI bus error"},
+ {0x04440000, IOASC_LOG_LEVEL_HARD,
+ "Hardware Error, device error"},
+ {0x04448200, IOASC_LOG_LEVEL_MUST,
+ "Hardware Error, IOA error"},
+ {0x04448300, IOASC_LOG_LEVEL_HARD,
+ "Hardware Error, undefined device response"},
+ {0x04448400, IOASC_LOG_LEVEL_HARD,
+ "Hardware Error, IOA microcode error"},
+ {0x04448600, IOASC_LOG_LEVEL_HARD,
+ "Hardware Error, IOA reset required"},
+ {0x04449200, IOASC_LOG_LEVEL_HARD,
+ "Hardware Error, hard Cache Fearuee Card Battery error"},
+ {0x0444A000, IOASC_LOG_LEVEL_HARD,
+ "Hardware Error, failed device altered"},
+ {0x0444A200, IOASC_LOG_LEVEL_HARD,
+ "Hardware Error, data check after reassignment"},
+ {0x0444A300, IOASC_LOG_LEVEL_HARD,
+ "Hardware Error, LRC error after reassignment"},
+ {0x044A0000, IOASC_LOG_LEVEL_HARD,
+ "Hardware Error, device bus error (msg/cmd phase)"},
+ {0x04670400, IOASC_LOG_LEVEL_HARD,
+ "Hardware Error, new device can't be used"},
+ {0x04678000, IOASC_LOG_LEVEL_HARD,
+ "Hardware Error, invalid multiadapter configuration"},
+ {0x04678100, IOASC_LOG_LEVEL_HARD,
+ "Hardware Error, incorrect connection between enclosures"},
+ {0x04678200, IOASC_LOG_LEVEL_HARD,
+ "Hardware Error, connections exceed IOA design limits"},
+ {0x04678300, IOASC_LOG_LEVEL_HARD,
+ "Hardware Error, incorrect multipath connection"},
+ {0x04679000, IOASC_LOG_LEVEL_HARD,
+ "Hardware Error, command to LUN failed"},
+ {0x064C8000, IOASC_LOG_LEVEL_HARD,
+ "Unit Attention, cache exists for missing/failed device"},
+ {0x06670100, IOASC_LOG_LEVEL_HARD,
+ "Unit Attention, incompatible exposed mode device"},
+ {0x06670600, IOASC_LOG_LEVEL_HARD,
+ "Unit Attention, attachment of logical unit failed"},
+ {0x06678000, IOASC_LOG_LEVEL_HARD,
+ "Unit Attention, cables exceed connective design limit"},
+ {0x06678300, IOASC_LOG_LEVEL_HARD,
+ "Unit Attention, incomplete multipath connection between" \
+ "IOA and enclosure"},
+ {0x06678400, IOASC_LOG_LEVEL_HARD,
+ "Unit Attention, incomplete multipath connection between" \
+ "device and enclosure"},
+ {0x06678500, IOASC_LOG_LEVEL_HARD,
+ "Unit Attention, incomplete multipath connection between" \
+ "IOA and remote IOA"},
+ {0x06678600, IOASC_LOG_LEVEL_HARD,
+ "Unit Attention, missing remote IOA"},
+ {0x06679100, IOASC_LOG_LEVEL_HARD,
+ "Unit Attention, enclosure doesn't support required multipath" \
+ "function"},
+ {0x06698200, IOASC_LOG_LEVEL_HARD,
+ "Unit Attention, corrupt array parity detected on device"},
+ {0x066B0200, IOASC_LOG_LEVEL_HARD,
+ "Unit Attention, array exposed"},
+ {0x066B8200, IOASC_LOG_LEVEL_HARD,
+ "Unit Attention, exposed array is still protected"},
+ {0x066B9200, IOASC_LOG_LEVEL_HARD,
+ "Unit Attention, Multipath redundancy level got worse"},
+ {0x07270000, IOASC_LOG_LEVEL_HARD,
+ "Data Protect, device is read/write protected by IOA"},
+ {0x07278000, IOASC_LOG_LEVEL_HARD,
+ "Data Protect, IOA doesn't support device attribute"},
+ {0x07278100, IOASC_LOG_LEVEL_HARD,
+ "Data Protect, NVRAM mirroring prohibited"},
+ {0x07278400, IOASC_LOG_LEVEL_HARD,
+ "Data Protect, array is short 2 or more devices"},
+ {0x07278600, IOASC_LOG_LEVEL_HARD,
+ "Data Protect, exposed array is short a required device"},
+ {0x07278700, IOASC_LOG_LEVEL_HARD,
+ "Data Protect, array members not at required addresses"},
+ {0x07278800, IOASC_LOG_LEVEL_HARD,
+ "Data Protect, exposed mode device resource address conflict"},
+ {0x07278900, IOASC_LOG_LEVEL_HARD,
+ "Data Protect, incorrect resource address of exposed mode device"},
+ {0x07278A00, IOASC_LOG_LEVEL_HARD,
+ "Data Protect, Array is missing a device and parity is out of sync"},
+ {0x07278B00, IOASC_LOG_LEVEL_HARD,
+ "Data Protect, maximum number of arrays already exist"},
+ {0x07278C00, IOASC_LOG_LEVEL_HARD,
+ "Data Protect, cannot locate cache data for device"},
+ {0x07278D00, IOASC_LOG_LEVEL_HARD,
+ "Data Protect, cache data exits for a changed device"},
+ {0x07279100, IOASC_LOG_LEVEL_HARD,
+ "Data Protect, detection of a device requiring format"},
+ {0x07279200, IOASC_LOG_LEVEL_HARD,
+ "Data Protect, IOA exceeds maximum number of devices"},
+ {0x07279600, IOASC_LOG_LEVEL_HARD,
+ "Data Protect, missing array, volume set is not functional"},
+ {0x07279700, IOASC_LOG_LEVEL_HARD,
+ "Data Protect, single device for a volume set"},
+ {0x07279800, IOASC_LOG_LEVEL_HARD,
+ "Data Protect, missing multiple devices for a volume set"},
+ {0x07279900, IOASC_LOG_LEVEL_HARD,
+ "Data Protect, maximum number of volument sets already exists"},
+ {0x07279A00, IOASC_LOG_LEVEL_HARD,
+ "Data Protect, other volume set problem"},
+};
+
+/* macros to help in debugging */
+#define pmcraid_err(...) \
+ printk(KERN_ERR "MaxRAID: "__VA_ARGS__)
+
+#define pmcraid_info(...) \
+ if (pmcraid_debug_log) \
+ printk(KERN_INFO "MaxRAID: "__VA_ARGS__)
+
+/* check if given command is a SCSI READ or SCSI WRITE command */
+#define SCSI_READ_CMD 0x1 /* any of SCSI READ commands */
+#define SCSI_WRITE_CMD 0x2 /* any of SCSI WRITE commands */
+#define SCSI_CMD_TYPE(opcode) \
+({ u8 op = opcode; u8 __type = 0;\
+ if (op == READ_6 || op == READ_10 || op == READ_12 || op == READ_16)\
+ __type = SCSI_READ_CMD;\
+ else if (op == WRITE_6 || op == WRITE_10 || op == WRITE_12 || \
+ op == WRITE_16)\
+ __type = SCSI_WRITE_CMD;\
+ __type;\
+})
+
+#define IS_SCSI_READ_WRITE(opcode) \
+({ u8 __type = SCSI_CMD_TYPE(opcode); \
+ (__type == SCSI_READ_CMD || __type == SCSI_WRITE_CMD) ? 1 : 0;\
+})
+
+
+/*
+ * pmcraid_ioctl_header - definition of header structure that precedes all the
+ * buffers given as ioctl arguments.
+ *
+ * .signature : always ASCII string, "PMCRAID"
+ * .reserved : not used
+ * .buffer_length : length of the buffer following the header
+ */
+struct pmcraid_ioctl_header {
+ u8 signature[8];
+ u32 reserved;
+ u32 buffer_length;
+};
+
+#define PMCRAID_IOCTL_SIGNATURE "PMCRAID"
+
+/*
+ * pmcraid_passthrough_ioctl_buffer - structure given as argument to
+ * passthrough(or firmware handled) IOCTL commands. Note that ioarcb requires
+ * 32-byte alignment so, it is necessary to pack this structure to avoid any
+ * holes between ioctl_header and passthrough buffer
+ *
+ * .ioactl_header : ioctl header
+ * .ioarcb : filled-up ioarcb buffer, driver always reads this buffer
+ * .ioasa : buffer for ioasa, driver fills this with IOASA from firmware
+ * .request_buffer: The I/O buffer (flat), driver reads/writes to this based on
+ * the transfer directions passed in ioarcb.flags0. Contents
+ * of this buffer are valid only when ioarcb.data_transfer_len
+ * is not zero.
+ */
+struct pmcraid_passthrough_ioctl_buffer {
+ struct pmcraid_ioctl_header ioctl_header;
+ struct pmcraid_ioarcb ioarcb;
+ struct pmcraid_ioasa ioasa;
+ u8 request_buffer[1];
+} __attribute__ ((packed));
+
+/*
+ * keys to differentiate between driver handled IOCTLs and passthrough
+ * IOCTLs passed to IOA. driver determines the ioctl type using macro
+ * _IOC_TYPE
+ */
+#define PMCRAID_DRIVER_IOCTL 'D'
+#define PMCRAID_PASSTHROUGH_IOCTL 'F'
+
+#define DRV_IOCTL(n, size) \
+ _IOC(_IOC_READ|_IOC_WRITE, PMCRAID_DRIVER_IOCTL, (n), (size))
+
+#define FMW_IOCTL(n, size) \
+ _IOC(_IOC_READ|_IOC_WRITE, PMCRAID_PASSTHROUGH_IOCTL, (n), (size))
+
+/*
+ * _ARGSIZE: macro that gives size of the argument type passed to an IOCTL cmd.
+ * This is to facilitate applications avoiding un-necessary memory allocations.
+ * For example, most of driver handled ioctls do not require ioarcb, ioasa.
+ */
+#define _ARGSIZE(arg) (sizeof(struct pmcraid_ioctl_header) + sizeof(arg))
+
+/* Driver handled IOCTL command definitions */
+
+#define PMCRAID_IOCTL_RESET_ADAPTER \
+ DRV_IOCTL(5, sizeof(struct pmcraid_ioctl_header))
+
+/* passthrough/firmware handled commands */
+#define PMCRAID_IOCTL_PASSTHROUGH_COMMAND \
+ FMW_IOCTL(1, sizeof(struct pmcraid_passthrough_ioctl_buffer))
+
+#define PMCRAID_IOCTL_DOWNLOAD_MICROCODE \
+ FMW_IOCTL(2, sizeof(struct pmcraid_passthrough_ioctl_buffer))
+
+
+#endif /* _PMCRAID_H */
diff --git a/drivers/scsi/ppa.c b/drivers/scsi/ppa.c
new file mode 100644
index 000000000..1db8b2606
--- /dev/null
+++ b/drivers/scsi/ppa.c
@@ -0,0 +1,1132 @@
+/* ppa.c -- low level driver for the IOMEGA PPA3
+ * parallel port SCSI host adapter.
+ *
+ * (The PPA3 is the embedded controller in the ZIP drive.)
+ *
+ * (c) 1995,1996 Grant R. Guenther, grant@torque.net,
+ * under the terms of the GNU General Public License.
+ *
+ */
+
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/slab.h>
+#include <linux/module.h>
+#include <linux/blkdev.h>
+#include <linux/parport.h>
+#include <linux/workqueue.h>
+#include <linux/delay.h>
+#include <linux/jiffies.h>
+#include <asm/io.h>
+
+#include <scsi/scsi.h>
+#include <scsi/scsi_cmnd.h>
+#include <scsi/scsi_device.h>
+#include <scsi/scsi_host.h>
+
+
+static void ppa_reset_pulse(unsigned int base);
+
+typedef struct {
+ struct pardevice *dev; /* Parport device entry */
+ int base; /* Actual port address */
+ int mode; /* Transfer mode */
+ struct scsi_cmnd *cur_cmd; /* Current queued command */
+ struct delayed_work ppa_tq; /* Polling interrupt stuff */
+ unsigned long jstart; /* Jiffies at start */
+ unsigned long recon_tmo; /* How many usecs to wait for reconnection (6th bit) */
+ unsigned int failed:1; /* Failure flag */
+ unsigned wanted:1; /* Parport sharing busy flag */
+ wait_queue_head_t *waiting;
+ struct Scsi_Host *host;
+ struct list_head list;
+} ppa_struct;
+
+#include "ppa.h"
+
+static inline ppa_struct *ppa_dev(struct Scsi_Host *host)
+{
+ return *(ppa_struct **)&host->hostdata;
+}
+
+static DEFINE_SPINLOCK(arbitration_lock);
+
+static void got_it(ppa_struct *dev)
+{
+ dev->base = dev->dev->port->base;
+ if (dev->cur_cmd)
+ dev->cur_cmd->SCp.phase = 1;
+ else
+ wake_up(dev->waiting);
+}
+
+static void ppa_wakeup(void *ref)
+{
+ ppa_struct *dev = (ppa_struct *) ref;
+ unsigned long flags;
+
+ spin_lock_irqsave(&arbitration_lock, flags);
+ if (dev->wanted) {
+ parport_claim(dev->dev);
+ got_it(dev);
+ dev->wanted = 0;
+ }
+ spin_unlock_irqrestore(&arbitration_lock, flags);
+ return;
+}
+
+static int ppa_pb_claim(ppa_struct *dev)
+{
+ unsigned long flags;
+ int res = 1;
+ spin_lock_irqsave(&arbitration_lock, flags);
+ if (parport_claim(dev->dev) == 0) {
+ got_it(dev);
+ res = 0;
+ }
+ dev->wanted = res;
+ spin_unlock_irqrestore(&arbitration_lock, flags);
+ return res;
+}
+
+static void ppa_pb_dismiss(ppa_struct *dev)
+{
+ unsigned long flags;
+ int wanted;
+ spin_lock_irqsave(&arbitration_lock, flags);
+ wanted = dev->wanted;
+ dev->wanted = 0;
+ spin_unlock_irqrestore(&arbitration_lock, flags);
+ if (!wanted)
+ parport_release(dev->dev);
+}
+
+static inline void ppa_pb_release(ppa_struct *dev)
+{
+ parport_release(dev->dev);
+}
+
+/*
+ * Start of Chipset kludges
+ */
+
+/* This is to give the ppa driver a way to modify the timings (and other
+ * parameters) by writing to the /proc/scsi/ppa/0 file.
+ * Very simple method really... (To simple, no error checking :( )
+ * Reason: Kernel hackers HATE having to unload and reload modules for
+ * testing...
+ * Also gives a method to use a script to obtain optimum timings (TODO)
+ */
+
+static inline int ppa_write_info(struct Scsi_Host *host, char *buffer, int length)
+{
+ ppa_struct *dev = ppa_dev(host);
+ unsigned long x;
+
+ if ((length > 5) && (strncmp(buffer, "mode=", 5) == 0)) {
+ x = simple_strtoul(buffer + 5, NULL, 0);
+ dev->mode = x;
+ return length;
+ }
+ if ((length > 10) && (strncmp(buffer, "recon_tmo=", 10) == 0)) {
+ x = simple_strtoul(buffer + 10, NULL, 0);
+ dev->recon_tmo = x;
+ printk(KERN_INFO "ppa: recon_tmo set to %ld\n", x);
+ return length;
+ }
+ printk(KERN_WARNING "ppa /proc: invalid variable\n");
+ return -EINVAL;
+}
+
+static int ppa_show_info(struct seq_file *m, struct Scsi_Host *host)
+{
+ ppa_struct *dev = ppa_dev(host);
+
+ seq_printf(m, "Version : %s\n", PPA_VERSION);
+ seq_printf(m, "Parport : %s\n", dev->dev->port->name);
+ seq_printf(m, "Mode : %s\n", PPA_MODE_STRING[dev->mode]);
+#if PPA_DEBUG > 0
+ seq_printf(m, "recon_tmo : %lu\n", dev->recon_tmo);
+#endif
+ return 0;
+}
+
+static int device_check(ppa_struct *dev);
+
+#if PPA_DEBUG > 0
+#define ppa_fail(x,y) printk("ppa: ppa_fail(%i) from %s at line %d\n",\
+ y, __func__, __LINE__); ppa_fail_func(x,y);
+static inline void ppa_fail_func(ppa_struct *dev, int error_code)
+#else
+static inline void ppa_fail(ppa_struct *dev, int error_code)
+#endif
+{
+ /* If we fail a device then we trash status / message bytes */
+ if (dev->cur_cmd) {
+ dev->cur_cmd->result = error_code << 16;
+ dev->failed = 1;
+ }
+}
+
+/*
+ * Wait for the high bit to be set.
+ *
+ * In principle, this could be tied to an interrupt, but the adapter
+ * doesn't appear to be designed to support interrupts. We spin on
+ * the 0x80 ready bit.
+ */
+static unsigned char ppa_wait(ppa_struct *dev)
+{
+ int k;
+ unsigned short ppb = dev->base;
+ unsigned char r;
+
+ k = PPA_SPIN_TMO;
+ /* Wait for bit 6 and 7 - PJC */
+ for (r = r_str(ppb); ((r & 0xc0) != 0xc0) && (k); k--) {
+ udelay(1);
+ r = r_str(ppb);
+ }
+
+ /*
+ * return some status information.
+ * Semantics: 0xc0 = ZIP wants more data
+ * 0xd0 = ZIP wants to send more data
+ * 0xe0 = ZIP is expecting SCSI command data
+ * 0xf0 = end of transfer, ZIP is sending status
+ */
+ if (k)
+ return (r & 0xf0);
+
+ /* Counter expired - Time out occurred */
+ ppa_fail(dev, DID_TIME_OUT);
+ printk(KERN_WARNING "ppa timeout in ppa_wait\n");
+ return 0; /* command timed out */
+}
+
+/*
+ * Clear EPP Timeout Bit
+ */
+static inline void epp_reset(unsigned short ppb)
+{
+ int i;
+
+ i = r_str(ppb);
+ w_str(ppb, i);
+ w_str(ppb, i & 0xfe);
+}
+
+/*
+ * Wait for empty ECP fifo (if we are in ECP fifo mode only)
+ */
+static inline void ecp_sync(ppa_struct *dev)
+{
+ int i, ppb_hi = dev->dev->port->base_hi;
+
+ if (ppb_hi == 0)
+ return;
+
+ if ((r_ecr(ppb_hi) & 0xe0) == 0x60) { /* mode 011 == ECP fifo mode */
+ for (i = 0; i < 100; i++) {
+ if (r_ecr(ppb_hi) & 0x01)
+ return;
+ udelay(5);
+ }
+ printk(KERN_WARNING "ppa: ECP sync failed as data still present in FIFO.\n");
+ }
+}
+
+static int ppa_byte_out(unsigned short base, const char *buffer, int len)
+{
+ int i;
+
+ for (i = len; i; i--) {
+ w_dtr(base, *buffer++);
+ w_ctr(base, 0xe);
+ w_ctr(base, 0xc);
+ }
+ return 1; /* All went well - we hope! */
+}
+
+static int ppa_byte_in(unsigned short base, char *buffer, int len)
+{
+ int i;
+
+ for (i = len; i; i--) {
+ *buffer++ = r_dtr(base);
+ w_ctr(base, 0x27);
+ w_ctr(base, 0x25);
+ }
+ return 1; /* All went well - we hope! */
+}
+
+static int ppa_nibble_in(unsigned short base, char *buffer, int len)
+{
+ for (; len; len--) {
+ unsigned char h;
+
+ w_ctr(base, 0x4);
+ h = r_str(base) & 0xf0;
+ w_ctr(base, 0x6);
+ *buffer++ = h | ((r_str(base) & 0xf0) >> 4);
+ }
+ return 1; /* All went well - we hope! */
+}
+
+static int ppa_out(ppa_struct *dev, char *buffer, int len)
+{
+ int r;
+ unsigned short ppb = dev->base;
+
+ r = ppa_wait(dev);
+
+ if ((r & 0x50) != 0x40) {
+ ppa_fail(dev, DID_ERROR);
+ return 0;
+ }
+ switch (dev->mode) {
+ case PPA_NIBBLE:
+ case PPA_PS2:
+ /* 8 bit output, with a loop */
+ r = ppa_byte_out(ppb, buffer, len);
+ break;
+
+ case PPA_EPP_32:
+ case PPA_EPP_16:
+ case PPA_EPP_8:
+ epp_reset(ppb);
+ w_ctr(ppb, 0x4);
+#ifdef CONFIG_SCSI_IZIP_EPP16
+ if (!(((long) buffer | len) & 0x01))
+ outsw(ppb + 4, buffer, len >> 1);
+#else
+ if (!(((long) buffer | len) & 0x03))
+ outsl(ppb + 4, buffer, len >> 2);
+#endif
+ else
+ outsb(ppb + 4, buffer, len);
+ w_ctr(ppb, 0xc);
+ r = !(r_str(ppb) & 0x01);
+ w_ctr(ppb, 0xc);
+ ecp_sync(dev);
+ break;
+
+ default:
+ printk(KERN_ERR "PPA: bug in ppa_out()\n");
+ r = 0;
+ }
+ return r;
+}
+
+static int ppa_in(ppa_struct *dev, char *buffer, int len)
+{
+ int r;
+ unsigned short ppb = dev->base;
+
+ r = ppa_wait(dev);
+
+ if ((r & 0x50) != 0x50) {
+ ppa_fail(dev, DID_ERROR);
+ return 0;
+ }
+ switch (dev->mode) {
+ case PPA_NIBBLE:
+ /* 4 bit input, with a loop */
+ r = ppa_nibble_in(ppb, buffer, len);
+ w_ctr(ppb, 0xc);
+ break;
+
+ case PPA_PS2:
+ /* 8 bit input, with a loop */
+ w_ctr(ppb, 0x25);
+ r = ppa_byte_in(ppb, buffer, len);
+ w_ctr(ppb, 0x4);
+ w_ctr(ppb, 0xc);
+ break;
+
+ case PPA_EPP_32:
+ case PPA_EPP_16:
+ case PPA_EPP_8:
+ epp_reset(ppb);
+ w_ctr(ppb, 0x24);
+#ifdef CONFIG_SCSI_IZIP_EPP16
+ if (!(((long) buffer | len) & 0x01))
+ insw(ppb + 4, buffer, len >> 1);
+#else
+ if (!(((long) buffer | len) & 0x03))
+ insl(ppb + 4, buffer, len >> 2);
+#endif
+ else
+ insb(ppb + 4, buffer, len);
+ w_ctr(ppb, 0x2c);
+ r = !(r_str(ppb) & 0x01);
+ w_ctr(ppb, 0x2c);
+ ecp_sync(dev);
+ break;
+
+ default:
+ printk(KERN_ERR "PPA: bug in ppa_ins()\n");
+ r = 0;
+ break;
+ }
+ return r;
+}
+
+/* end of ppa_io.h */
+static inline void ppa_d_pulse(unsigned short ppb, unsigned char b)
+{
+ w_dtr(ppb, b);
+ w_ctr(ppb, 0xc);
+ w_ctr(ppb, 0xe);
+ w_ctr(ppb, 0xc);
+ w_ctr(ppb, 0x4);
+ w_ctr(ppb, 0xc);
+}
+
+static void ppa_disconnect(ppa_struct *dev)
+{
+ unsigned short ppb = dev->base;
+
+ ppa_d_pulse(ppb, 0);
+ ppa_d_pulse(ppb, 0x3c);
+ ppa_d_pulse(ppb, 0x20);
+ ppa_d_pulse(ppb, 0xf);
+}
+
+static inline void ppa_c_pulse(unsigned short ppb, unsigned char b)
+{
+ w_dtr(ppb, b);
+ w_ctr(ppb, 0x4);
+ w_ctr(ppb, 0x6);
+ w_ctr(ppb, 0x4);
+ w_ctr(ppb, 0xc);
+}
+
+static inline void ppa_connect(ppa_struct *dev, int flag)
+{
+ unsigned short ppb = dev->base;
+
+ ppa_c_pulse(ppb, 0);
+ ppa_c_pulse(ppb, 0x3c);
+ ppa_c_pulse(ppb, 0x20);
+ if ((flag == CONNECT_EPP_MAYBE) && IN_EPP_MODE(dev->mode))
+ ppa_c_pulse(ppb, 0xcf);
+ else
+ ppa_c_pulse(ppb, 0x8f);
+}
+
+static int ppa_select(ppa_struct *dev, int target)
+{
+ int k;
+ unsigned short ppb = dev->base;
+
+ /*
+ * Bit 6 (0x40) is the device selected bit.
+ * First we must wait till the current device goes off line...
+ */
+ k = PPA_SELECT_TMO;
+ do {
+ k--;
+ udelay(1);
+ } while ((r_str(ppb) & 0x40) && (k));
+ if (!k)
+ return 0;
+
+ w_dtr(ppb, (1 << target));
+ w_ctr(ppb, 0xe);
+ w_ctr(ppb, 0xc);
+ w_dtr(ppb, 0x80); /* This is NOT the initator */
+ w_ctr(ppb, 0x8);
+
+ k = PPA_SELECT_TMO;
+ do {
+ k--;
+ udelay(1);
+ }
+ while (!(r_str(ppb) & 0x40) && (k));
+ if (!k)
+ return 0;
+
+ return 1;
+}
+
+/*
+ * This is based on a trace of what the Iomega DOS 'guest' driver does.
+ * I've tried several different kinds of parallel ports with guest and
+ * coded this to react in the same ways that it does.
+ *
+ * The return value from this function is just a hint about where the
+ * handshaking failed.
+ *
+ */
+static int ppa_init(ppa_struct *dev)
+{
+ int retv;
+ unsigned short ppb = dev->base;
+
+ ppa_disconnect(dev);
+ ppa_connect(dev, CONNECT_NORMAL);
+
+ retv = 2; /* Failed */
+
+ w_ctr(ppb, 0xe);
+ if ((r_str(ppb) & 0x08) == 0x08)
+ retv--;
+
+ w_ctr(ppb, 0xc);
+ if ((r_str(ppb) & 0x08) == 0x00)
+ retv--;
+
+ if (!retv)
+ ppa_reset_pulse(ppb);
+ udelay(1000); /* Allow devices to settle down */
+ ppa_disconnect(dev);
+ udelay(1000); /* Another delay to allow devices to settle */
+
+ if (retv)
+ return -EIO;
+
+ return device_check(dev);
+}
+
+static inline int ppa_send_command(struct scsi_cmnd *cmd)
+{
+ ppa_struct *dev = ppa_dev(cmd->device->host);
+ int k;
+
+ w_ctr(dev->base, 0x0c);
+
+ for (k = 0; k < cmd->cmd_len; k++)
+ if (!ppa_out(dev, &cmd->cmnd[k], 1))
+ return 0;
+ return 1;
+}
+
+/*
+ * The bulk flag enables some optimisations in the data transfer loops,
+ * it should be true for any command that transfers data in integral
+ * numbers of sectors.
+ *
+ * The driver appears to remain stable if we speed up the parallel port
+ * i/o in this function, but not elsewhere.
+ */
+static int ppa_completion(struct scsi_cmnd *cmd)
+{
+ /* Return codes:
+ * -1 Error
+ * 0 Told to schedule
+ * 1 Finished data transfer
+ */
+ ppa_struct *dev = ppa_dev(cmd->device->host);
+ unsigned short ppb = dev->base;
+ unsigned long start_jiffies = jiffies;
+
+ unsigned char r, v;
+ int fast, bulk, status;
+
+ v = cmd->cmnd[0];
+ bulk = ((v == READ_6) ||
+ (v == READ_10) || (v == WRITE_6) || (v == WRITE_10));
+
+ /*
+ * We only get here if the drive is ready to comunicate,
+ * hence no need for a full ppa_wait.
+ */
+ r = (r_str(ppb) & 0xf0);
+
+ while (r != (unsigned char) 0xf0) {
+ /*
+ * If we have been running for more than a full timer tick
+ * then take a rest.
+ */
+ if (time_after(jiffies, start_jiffies + 1))
+ return 0;
+
+ if ((cmd->SCp.this_residual <= 0)) {
+ ppa_fail(dev, DID_ERROR);
+ return -1; /* ERROR_RETURN */
+ }
+
+ /* On some hardware we have SCSI disconnected (6th bit low)
+ * for about 100usecs. It is too expensive to wait a
+ * tick on every loop so we busy wait for no more than
+ * 500usecs to give the drive a chance first. We do not
+ * change things for "normal" hardware since generally
+ * the 6th bit is always high.
+ * This makes the CPU load higher on some hardware
+ * but otherwise we can not get more than 50K/secs
+ * on this problem hardware.
+ */
+ if ((r & 0xc0) != 0xc0) {
+ /* Wait for reconnection should be no more than
+ * jiffy/2 = 5ms = 5000 loops
+ */
+ unsigned long k = dev->recon_tmo;
+ for (; k && ((r = (r_str(ppb) & 0xf0)) & 0xc0) != 0xc0;
+ k--)
+ udelay(1);
+
+ if (!k)
+ return 0;
+ }
+
+ /* determine if we should use burst I/O */
+ fast = (bulk && (cmd->SCp.this_residual >= PPA_BURST_SIZE))
+ ? PPA_BURST_SIZE : 1;
+
+ if (r == (unsigned char) 0xc0)
+ status = ppa_out(dev, cmd->SCp.ptr, fast);
+ else
+ status = ppa_in(dev, cmd->SCp.ptr, fast);
+
+ cmd->SCp.ptr += fast;
+ cmd->SCp.this_residual -= fast;
+
+ if (!status) {
+ ppa_fail(dev, DID_BUS_BUSY);
+ return -1; /* ERROR_RETURN */
+ }
+ if (cmd->SCp.buffer && !cmd->SCp.this_residual) {
+ /* if scatter/gather, advance to the next segment */
+ if (cmd->SCp.buffers_residual--) {
+ cmd->SCp.buffer++;
+ cmd->SCp.this_residual =
+ cmd->SCp.buffer->length;
+ cmd->SCp.ptr = sg_virt(cmd->SCp.buffer);
+ }
+ }
+ /* Now check to see if the drive is ready to comunicate */
+ r = (r_str(ppb) & 0xf0);
+ /* If not, drop back down to the scheduler and wait a timer tick */
+ if (!(r & 0x80))
+ return 0;
+ }
+ return 1; /* FINISH_RETURN */
+}
+
+/*
+ * Since the PPA itself doesn't generate interrupts, we use
+ * the scheduler's task queue to generate a stream of call-backs and
+ * complete the request when the drive is ready.
+ */
+static void ppa_interrupt(struct work_struct *work)
+{
+ ppa_struct *dev = container_of(work, ppa_struct, ppa_tq.work);
+ struct scsi_cmnd *cmd = dev->cur_cmd;
+
+ if (!cmd) {
+ printk(KERN_ERR "PPA: bug in ppa_interrupt\n");
+ return;
+ }
+ if (ppa_engine(dev, cmd)) {
+ schedule_delayed_work(&dev->ppa_tq, 1);
+ return;
+ }
+ /* Command must of completed hence it is safe to let go... */
+#if PPA_DEBUG > 0
+ switch ((cmd->result >> 16) & 0xff) {
+ case DID_OK:
+ break;
+ case DID_NO_CONNECT:
+ printk(KERN_DEBUG "ppa: no device at SCSI ID %i\n", cmd->device->target);
+ break;
+ case DID_BUS_BUSY:
+ printk(KERN_DEBUG "ppa: BUS BUSY - EPP timeout detected\n");
+ break;
+ case DID_TIME_OUT:
+ printk(KERN_DEBUG "ppa: unknown timeout\n");
+ break;
+ case DID_ABORT:
+ printk(KERN_DEBUG "ppa: told to abort\n");
+ break;
+ case DID_PARITY:
+ printk(KERN_DEBUG "ppa: parity error (???)\n");
+ break;
+ case DID_ERROR:
+ printk(KERN_DEBUG "ppa: internal driver error\n");
+ break;
+ case DID_RESET:
+ printk(KERN_DEBUG "ppa: told to reset device\n");
+ break;
+ case DID_BAD_INTR:
+ printk(KERN_WARNING "ppa: bad interrupt (???)\n");
+ break;
+ default:
+ printk(KERN_WARNING "ppa: bad return code (%02x)\n",
+ (cmd->result >> 16) & 0xff);
+ }
+#endif
+
+ if (cmd->SCp.phase > 1)
+ ppa_disconnect(dev);
+
+ ppa_pb_dismiss(dev);
+
+ dev->cur_cmd = NULL;
+
+ cmd->scsi_done(cmd);
+}
+
+static int ppa_engine(ppa_struct *dev, struct scsi_cmnd *cmd)
+{
+ unsigned short ppb = dev->base;
+ unsigned char l = 0, h = 0;
+ int retv;
+
+ /* First check for any errors that may of occurred
+ * Here we check for internal errors
+ */
+ if (dev->failed)
+ return 0;
+
+ switch (cmd->SCp.phase) {
+ case 0: /* Phase 0 - Waiting for parport */
+ if (time_after(jiffies, dev->jstart + HZ)) {
+ /*
+ * We waited more than a second
+ * for parport to call us
+ */
+ ppa_fail(dev, DID_BUS_BUSY);
+ return 0;
+ }
+ return 1; /* wait until ppa_wakeup claims parport */
+ case 1: /* Phase 1 - Connected */
+ { /* Perform a sanity check for cable unplugged */
+ int retv = 2; /* Failed */
+
+ ppa_connect(dev, CONNECT_EPP_MAYBE);
+
+ w_ctr(ppb, 0xe);
+ if ((r_str(ppb) & 0x08) == 0x08)
+ retv--;
+
+ w_ctr(ppb, 0xc);
+ if ((r_str(ppb) & 0x08) == 0x00)
+ retv--;
+
+ if (retv) {
+ if (time_after(jiffies, dev->jstart + (1 * HZ))) {
+ printk(KERN_ERR "ppa: Parallel port cable is unplugged.\n");
+ ppa_fail(dev, DID_BUS_BUSY);
+ return 0;
+ } else {
+ ppa_disconnect(dev);
+ return 1; /* Try again in a jiffy */
+ }
+ }
+ cmd->SCp.phase++;
+ }
+
+ case 2: /* Phase 2 - We are now talking to the scsi bus */
+ if (!ppa_select(dev, scmd_id(cmd))) {
+ ppa_fail(dev, DID_NO_CONNECT);
+ return 0;
+ }
+ cmd->SCp.phase++;
+
+ case 3: /* Phase 3 - Ready to accept a command */
+ w_ctr(ppb, 0x0c);
+ if (!(r_str(ppb) & 0x80))
+ return 1;
+
+ if (!ppa_send_command(cmd))
+ return 0;
+ cmd->SCp.phase++;
+
+ case 4: /* Phase 4 - Setup scatter/gather buffers */
+ if (scsi_bufflen(cmd)) {
+ cmd->SCp.buffer = scsi_sglist(cmd);
+ cmd->SCp.this_residual = cmd->SCp.buffer->length;
+ cmd->SCp.ptr = sg_virt(cmd->SCp.buffer);
+ } else {
+ cmd->SCp.buffer = NULL;
+ cmd->SCp.this_residual = 0;
+ cmd->SCp.ptr = NULL;
+ }
+ cmd->SCp.buffers_residual = scsi_sg_count(cmd) - 1;
+ cmd->SCp.phase++;
+
+ case 5: /* Phase 5 - Data transfer stage */
+ w_ctr(ppb, 0x0c);
+ if (!(r_str(ppb) & 0x80))
+ return 1;
+
+ retv = ppa_completion(cmd);
+ if (retv == -1)
+ return 0;
+ if (retv == 0)
+ return 1;
+ cmd->SCp.phase++;
+
+ case 6: /* Phase 6 - Read status/message */
+ cmd->result = DID_OK << 16;
+ /* Check for data overrun */
+ if (ppa_wait(dev) != (unsigned char) 0xf0) {
+ ppa_fail(dev, DID_ERROR);
+ return 0;
+ }
+ if (ppa_in(dev, &l, 1)) { /* read status byte */
+ /* Check for optional message byte */
+ if (ppa_wait(dev) == (unsigned char) 0xf0)
+ ppa_in(dev, &h, 1);
+ cmd->result =
+ (DID_OK << 16) + (h << 8) + (l & STATUS_MASK);
+ }
+ return 0; /* Finished */
+ break;
+
+ default:
+ printk(KERN_ERR "ppa: Invalid scsi phase\n");
+ }
+ return 0;
+}
+
+static int ppa_queuecommand_lck(struct scsi_cmnd *cmd,
+ void (*done) (struct scsi_cmnd *))
+{
+ ppa_struct *dev = ppa_dev(cmd->device->host);
+
+ if (dev->cur_cmd) {
+ printk(KERN_ERR "PPA: bug in ppa_queuecommand\n");
+ return 0;
+ }
+ dev->failed = 0;
+ dev->jstart = jiffies;
+ dev->cur_cmd = cmd;
+ cmd->scsi_done = done;
+ cmd->result = DID_ERROR << 16; /* default return code */
+ cmd->SCp.phase = 0; /* bus free */
+
+ schedule_delayed_work(&dev->ppa_tq, 0);
+
+ ppa_pb_claim(dev);
+
+ return 0;
+}
+
+static DEF_SCSI_QCMD(ppa_queuecommand)
+
+/*
+ * Apparently the disk->capacity attribute is off by 1 sector
+ * for all disk drives. We add the one here, but it should really
+ * be done in sd.c. Even if it gets fixed there, this will still
+ * work.
+ */
+static int ppa_biosparam(struct scsi_device *sdev, struct block_device *dev,
+ sector_t capacity, int ip[])
+{
+ ip[0] = 0x40;
+ ip[1] = 0x20;
+ ip[2] = ((unsigned long) capacity + 1) / (ip[0] * ip[1]);
+ if (ip[2] > 1024) {
+ ip[0] = 0xff;
+ ip[1] = 0x3f;
+ ip[2] = ((unsigned long) capacity + 1) / (ip[0] * ip[1]);
+ if (ip[2] > 1023)
+ ip[2] = 1023;
+ }
+ return 0;
+}
+
+static int ppa_abort(struct scsi_cmnd *cmd)
+{
+ ppa_struct *dev = ppa_dev(cmd->device->host);
+ /*
+ * There is no method for aborting commands since Iomega
+ * have tied the SCSI_MESSAGE line high in the interface
+ */
+
+ switch (cmd->SCp.phase) {
+ case 0: /* Do not have access to parport */
+ case 1: /* Have not connected to interface */
+ dev->cur_cmd = NULL; /* Forget the problem */
+ return SUCCESS;
+ break;
+ default: /* SCSI command sent, can not abort */
+ return FAILED;
+ break;
+ }
+}
+
+static void ppa_reset_pulse(unsigned int base)
+{
+ w_dtr(base, 0x40);
+ w_ctr(base, 0x8);
+ udelay(30);
+ w_ctr(base, 0xc);
+}
+
+static int ppa_reset(struct scsi_cmnd *cmd)
+{
+ ppa_struct *dev = ppa_dev(cmd->device->host);
+
+ if (cmd->SCp.phase)
+ ppa_disconnect(dev);
+ dev->cur_cmd = NULL; /* Forget the problem */
+
+ ppa_connect(dev, CONNECT_NORMAL);
+ ppa_reset_pulse(dev->base);
+ mdelay(1); /* device settle delay */
+ ppa_disconnect(dev);
+ mdelay(1); /* device settle delay */
+ return SUCCESS;
+}
+
+static int device_check(ppa_struct *dev)
+{
+ /* This routine looks for a device and then attempts to use EPP
+ to send a command. If all goes as planned then EPP is available. */
+
+ static u8 cmd[6] = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 };
+ int loop, old_mode, status, k, ppb = dev->base;
+ unsigned char l;
+
+ old_mode = dev->mode;
+ for (loop = 0; loop < 8; loop++) {
+ /* Attempt to use EPP for Test Unit Ready */
+ if ((ppb & 0x0007) == 0x0000)
+ dev->mode = PPA_EPP_32;
+
+second_pass:
+ ppa_connect(dev, CONNECT_EPP_MAYBE);
+ /* Select SCSI device */
+ if (!ppa_select(dev, loop)) {
+ ppa_disconnect(dev);
+ continue;
+ }
+ printk(KERN_INFO "ppa: Found device at ID %i, Attempting to use %s\n",
+ loop, PPA_MODE_STRING[dev->mode]);
+
+ /* Send SCSI command */
+ status = 1;
+ w_ctr(ppb, 0x0c);
+ for (l = 0; (l < 6) && (status); l++)
+ status = ppa_out(dev, cmd, 1);
+
+ if (!status) {
+ ppa_disconnect(dev);
+ ppa_connect(dev, CONNECT_EPP_MAYBE);
+ w_dtr(ppb, 0x40);
+ w_ctr(ppb, 0x08);
+ udelay(30);
+ w_ctr(ppb, 0x0c);
+ udelay(1000);
+ ppa_disconnect(dev);
+ udelay(1000);
+ if (dev->mode == PPA_EPP_32) {
+ dev->mode = old_mode;
+ goto second_pass;
+ }
+ return -EIO;
+ }
+ w_ctr(ppb, 0x0c);
+ k = 1000000; /* 1 Second */
+ do {
+ l = r_str(ppb);
+ k--;
+ udelay(1);
+ } while (!(l & 0x80) && (k));
+
+ l &= 0xf0;
+
+ if (l != 0xf0) {
+ ppa_disconnect(dev);
+ ppa_connect(dev, CONNECT_EPP_MAYBE);
+ ppa_reset_pulse(ppb);
+ udelay(1000);
+ ppa_disconnect(dev);
+ udelay(1000);
+ if (dev->mode == PPA_EPP_32) {
+ dev->mode = old_mode;
+ goto second_pass;
+ }
+ return -EIO;
+ }
+ ppa_disconnect(dev);
+ printk(KERN_INFO "ppa: Communication established with ID %i using %s\n",
+ loop, PPA_MODE_STRING[dev->mode]);
+ ppa_connect(dev, CONNECT_EPP_MAYBE);
+ ppa_reset_pulse(ppb);
+ udelay(1000);
+ ppa_disconnect(dev);
+ udelay(1000);
+ return 0;
+ }
+ return -ENODEV;
+}
+
+static int ppa_adjust_queue(struct scsi_device *device)
+{
+ blk_queue_bounce_limit(device->request_queue, BLK_BOUNCE_HIGH);
+ return 0;
+}
+
+static struct scsi_host_template ppa_template = {
+ .module = THIS_MODULE,
+ .proc_name = "ppa",
+ .show_info = ppa_show_info,
+ .write_info = ppa_write_info,
+ .name = "Iomega VPI0 (ppa) interface",
+ .queuecommand = ppa_queuecommand,
+ .eh_abort_handler = ppa_abort,
+ .eh_bus_reset_handler = ppa_reset,
+ .eh_host_reset_handler = ppa_reset,
+ .bios_param = ppa_biosparam,
+ .this_id = -1,
+ .sg_tablesize = SG_ALL,
+ .cmd_per_lun = 1,
+ .use_clustering = ENABLE_CLUSTERING,
+ .can_queue = 1,
+ .slave_alloc = ppa_adjust_queue,
+};
+
+/***************************************************************************
+ * Parallel port probing routines *
+ ***************************************************************************/
+
+static LIST_HEAD(ppa_hosts);
+
+static int __ppa_attach(struct parport *pb)
+{
+ struct Scsi_Host *host;
+ DECLARE_WAIT_QUEUE_HEAD_ONSTACK(waiting);
+ DEFINE_WAIT(wait);
+ ppa_struct *dev;
+ int ports;
+ int modes, ppb, ppb_hi;
+ int err = -ENOMEM;
+
+ dev = kzalloc(sizeof(ppa_struct), GFP_KERNEL);
+ if (!dev)
+ return -ENOMEM;
+ dev->base = -1;
+ dev->mode = PPA_AUTODETECT;
+ dev->recon_tmo = PPA_RECON_TMO;
+ init_waitqueue_head(&waiting);
+ dev->dev = parport_register_device(pb, "ppa", NULL, ppa_wakeup,
+ NULL, 0, dev);
+
+ if (!dev->dev)
+ goto out;
+
+ /* Claim the bus so it remembers what we do to the control
+ * registers. [ CTR and ECP ]
+ */
+ err = -EBUSY;
+ dev->waiting = &waiting;
+ prepare_to_wait(&waiting, &wait, TASK_UNINTERRUPTIBLE);
+ if (ppa_pb_claim(dev))
+ schedule_timeout(3 * HZ);
+ if (dev->wanted) {
+ printk(KERN_ERR "ppa%d: failed to claim parport because "
+ "a pardevice is owning the port for too long "
+ "time!\n", pb->number);
+ ppa_pb_dismiss(dev);
+ dev->waiting = NULL;
+ finish_wait(&waiting, &wait);
+ goto out1;
+ }
+ dev->waiting = NULL;
+ finish_wait(&waiting, &wait);
+ ppb = dev->base = dev->dev->port->base;
+ ppb_hi = dev->dev->port->base_hi;
+ w_ctr(ppb, 0x0c);
+ modes = dev->dev->port->modes;
+
+ /* Mode detection works up the chain of speed
+ * This avoids a nasty if-then-else-if-... tree
+ */
+ dev->mode = PPA_NIBBLE;
+
+ if (modes & PARPORT_MODE_TRISTATE)
+ dev->mode = PPA_PS2;
+
+ if (modes & PARPORT_MODE_ECP) {
+ w_ecr(ppb_hi, 0x20);
+ dev->mode = PPA_PS2;
+ }
+ if ((modes & PARPORT_MODE_EPP) && (modes & PARPORT_MODE_ECP))
+ w_ecr(ppb_hi, 0x80);
+
+ /* Done configuration */
+
+ err = ppa_init(dev);
+ ppa_pb_release(dev);
+
+ if (err)
+ goto out1;
+
+ /* now the glue ... */
+ if (dev->mode == PPA_NIBBLE || dev->mode == PPA_PS2)
+ ports = 3;
+ else
+ ports = 8;
+
+ INIT_DELAYED_WORK(&dev->ppa_tq, ppa_interrupt);
+
+ err = -ENOMEM;
+ host = scsi_host_alloc(&ppa_template, sizeof(ppa_struct *));
+ if (!host)
+ goto out1;
+ host->io_port = pb->base;
+ host->n_io_port = ports;
+ host->dma_channel = -1;
+ host->unique_id = pb->number;
+ *(ppa_struct **)&host->hostdata = dev;
+ dev->host = host;
+ list_add_tail(&dev->list, &ppa_hosts);
+ err = scsi_add_host(host, NULL);
+ if (err)
+ goto out2;
+ scsi_scan_host(host);
+ return 0;
+out2:
+ list_del_init(&dev->list);
+ scsi_host_put(host);
+out1:
+ parport_unregister_device(dev->dev);
+out:
+ kfree(dev);
+ return err;
+}
+
+static void ppa_attach(struct parport *pb)
+{
+ __ppa_attach(pb);
+}
+
+static void ppa_detach(struct parport *pb)
+{
+ ppa_struct *dev;
+ list_for_each_entry(dev, &ppa_hosts, list) {
+ if (dev->dev->port == pb) {
+ list_del_init(&dev->list);
+ scsi_remove_host(dev->host);
+ scsi_host_put(dev->host);
+ parport_unregister_device(dev->dev);
+ kfree(dev);
+ break;
+ }
+ }
+}
+
+static struct parport_driver ppa_driver = {
+ .name = "ppa",
+ .attach = ppa_attach,
+ .detach = ppa_detach,
+};
+
+static int __init ppa_driver_init(void)
+{
+ printk(KERN_INFO "ppa: Version %s\n", PPA_VERSION);
+ return parport_register_driver(&ppa_driver);
+}
+
+static void __exit ppa_driver_exit(void)
+{
+ parport_unregister_driver(&ppa_driver);
+}
+
+module_init(ppa_driver_init);
+module_exit(ppa_driver_exit);
+MODULE_LICENSE("GPL");
diff --git a/drivers/scsi/ppa.h b/drivers/scsi/ppa.h
new file mode 100644
index 000000000..ba8021427
--- /dev/null
+++ b/drivers/scsi/ppa.h
@@ -0,0 +1,150 @@
+/* Driver for the PPA3 parallel port SCSI HBA embedded in
+ * the Iomega ZIP drive
+ *
+ * (c) 1996 Grant R. Guenther grant@torque.net
+ * David Campbell
+ *
+ * All comments to David.
+ */
+
+#ifndef _PPA_H
+#define _PPA_H
+
+#define PPA_VERSION "2.07 (for Linux 2.4.x)"
+
+/*
+ * this driver has been hacked by Matteo Frigo (athena@theory.lcs.mit.edu)
+ * to support EPP and scatter-gather. [0.26-athena]
+ *
+ * additional hacks by David Campbell
+ * in response to this driver "mis-behaving" on his machine.
+ * Fixed EPP to handle "software" changing of EPP port data direction.
+ * Chased down EPP timeouts
+ * Made this driver "kernel version friendly" [0.28-athena]
+ *
+ * [ Stuff removed ]
+ *
+ * Corrected ppa.h for 2.1.x kernels (>=2.1.85)
+ * Modified "Nat Semi Kludge" for extended chipsets
+ * [1.41]
+ *
+ * Fixed id_probe for EPP 1.9 chipsets (misdetected as EPP 1.7)
+ * [1.42]
+ *
+ * Development solely for 2.1.x kernels from now on!
+ * [2.00]
+ *
+ * Hack and slash at the init code (EPP device check routine)
+ * Added INSANE option.
+ * [2.01]
+ *
+ * Patch applied to sync against the 2.1.x kernel code
+ * Included qboot_zip.sh
+ * [2.02]
+ *
+ * Cleaned up the mess left by someone else trying to fix the
+ * asm section to keep egcc happy. The asm section no longer
+ * exists, the nibble code is *almost* as fast as the asm code
+ * providing it is compiled with egcc.
+ *
+ * Other clean ups include the follow changes:
+ * CONFIG_SCSI_PPA_HAVE_PEDANTIC => CONFIG_SCSI_IZIP_EPP16
+ * added CONFIG_SCSI_IZIP_SLOW_CTR option
+ * [2.03]
+ *
+ * Use ppa_wait() to check for ready AND connected status bits
+ * Add ppa_wait() calls to ppa_completion()
+ * by Peter Cherriman <pjc@ecs.soton.ac.uk> and
+ * Tim Waugh <twaugh@redhat.com>
+ * [2.04]
+ *
+ * Fix kernel panic on scsi timeout, 2000-08-18 [2.05]
+ *
+ * Avoid io_request_lock problems.
+ * John Cavan <johncavan@home.com> [2.06]
+ *
+ * Busy wait for connected status bit in ppa_completion()
+ * in order to cope with some hardware that has this bit low
+ * for short periods of time.
+ * Add udelay() to ppa_select()
+ * by Peter Cherriman <pjc@ecs.soton.ac.uk> and
+ * Oleg Makarenko <omakarenko@cyberplat.ru>
+ * [2.07]
+ */
+/* ------ END OF USER CONFIGURABLE PARAMETERS ----- */
+
+#include <linux/stddef.h>
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/ioport.h>
+#include <linux/delay.h>
+#include <linux/proc_fs.h>
+#include <linux/stat.h>
+#include <linux/blkdev.h>
+#include <linux/sched.h>
+#include <linux/interrupt.h>
+
+#include <asm/io.h>
+#include <scsi/scsi_host.h>
+/* batteries not included :-) */
+
+/*
+ * modes in which the driver can operate
+ */
+#define PPA_AUTODETECT 0 /* Autodetect mode */
+#define PPA_NIBBLE 1 /* work in standard 4 bit mode */
+#define PPA_PS2 2 /* PS/2 byte mode */
+#define PPA_EPP_8 3 /* EPP mode, 8 bit */
+#define PPA_EPP_16 4 /* EPP mode, 16 bit */
+#define PPA_EPP_32 5 /* EPP mode, 32 bit */
+#define PPA_UNKNOWN 6 /* Just in case... */
+
+static char *PPA_MODE_STRING[] =
+{
+ "Autodetect",
+ "SPP",
+ "PS/2",
+ "EPP 8 bit",
+ "EPP 16 bit",
+#ifdef CONFIG_SCSI_IZIP_EPP16
+ "EPP 16 bit",
+#else
+ "EPP 32 bit",
+#endif
+ "Unknown"};
+
+/* other options */
+#define PPA_BURST_SIZE 512 /* data burst size */
+#define PPA_SELECT_TMO 5000 /* how long to wait for target ? */
+#define PPA_SPIN_TMO 50000 /* ppa_wait loop limiter */
+#define PPA_RECON_TMO 500 /* scsi reconnection loop limiter */
+#define PPA_DEBUG 0 /* debugging option */
+#define IN_EPP_MODE(x) (x == PPA_EPP_8 || x == PPA_EPP_16 || x == PPA_EPP_32)
+
+/* args to ppa_connect */
+#define CONNECT_EPP_MAYBE 1
+#define CONNECT_NORMAL 0
+
+#define r_dtr(x) (unsigned char)inb((x))
+#define r_str(x) (unsigned char)inb((x)+1)
+#define r_ctr(x) (unsigned char)inb((x)+2)
+#define r_epp(x) (unsigned char)inb((x)+4)
+#define r_fifo(x) (unsigned char)inb((x)) /* x must be base_hi */
+ /* On PCI is base+0x400 != base_hi */
+#define r_ecr(x) (unsigned char)inb((x)+0x2) /* x must be base_hi */
+
+#define w_dtr(x,y) outb(y, (x))
+#define w_str(x,y) outb(y, (x)+1)
+#define w_epp(x,y) outb(y, (x)+4)
+#define w_fifo(x,y) outb(y, (x)) /* x must be base_hi */
+#define w_ecr(x,y) outb(y, (x)+0x2)/* x must be base_hi */
+
+#ifdef CONFIG_SCSI_IZIP_SLOW_CTR
+#define w_ctr(x,y) outb_p(y, (x)+2)
+#else
+#define w_ctr(x,y) outb(y, (x)+2)
+#endif
+
+static int ppa_engine(ppa_struct *, struct scsi_cmnd *);
+
+#endif /* _PPA_H */
diff --git a/drivers/scsi/ps3rom.c b/drivers/scsi/ps3rom.c
new file mode 100644
index 000000000..5298def33
--- /dev/null
+++ b/drivers/scsi/ps3rom.c
@@ -0,0 +1,457 @@
+/*
+ * PS3 BD/DVD/CD-ROM Storage Driver
+ *
+ * Copyright (C) 2007 Sony Computer Entertainment Inc.
+ * Copyright 2007 Sony Corp.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published
+ * by the Free Software Foundation; version 2 of the License.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+ */
+
+#include <linux/cdrom.h>
+#include <linux/highmem.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+
+#include <scsi/scsi.h>
+#include <scsi/scsi_cmnd.h>
+#include <scsi/scsi_dbg.h>
+#include <scsi/scsi_device.h>
+#include <scsi/scsi_host.h>
+#include <scsi/scsi_eh.h>
+
+#include <asm/lv1call.h>
+#include <asm/ps3stor.h>
+
+
+#define DEVICE_NAME "ps3rom"
+
+#define BOUNCE_SIZE (64*1024)
+
+#define PS3ROM_MAX_SECTORS (BOUNCE_SIZE >> 9)
+
+
+struct ps3rom_private {
+ struct ps3_storage_device *dev;
+ struct scsi_cmnd *curr_cmd;
+};
+
+
+#define LV1_STORAGE_SEND_ATAPI_COMMAND (1)
+
+struct lv1_atapi_cmnd_block {
+ u8 pkt[32]; /* packet command block */
+ u32 pktlen; /* should be 12 for ATAPI 8020 */
+ u32 blocks;
+ u32 block_size;
+ u32 proto; /* transfer mode */
+ u32 in_out; /* transfer direction */
+ u64 buffer; /* parameter except command block */
+ u32 arglen; /* length above */
+};
+
+enum lv1_atapi_proto {
+ NON_DATA_PROTO = 0,
+ PIO_DATA_IN_PROTO = 1,
+ PIO_DATA_OUT_PROTO = 2,
+ DMA_PROTO = 3
+};
+
+enum lv1_atapi_in_out {
+ DIR_WRITE = 0, /* memory -> device */
+ DIR_READ = 1 /* device -> memory */
+};
+
+
+static int ps3rom_slave_configure(struct scsi_device *scsi_dev)
+{
+ struct ps3rom_private *priv = shost_priv(scsi_dev->host);
+ struct ps3_storage_device *dev = priv->dev;
+
+ dev_dbg(&dev->sbd.core, "%s:%u: id %u, lun %llu, channel %u\n", __func__,
+ __LINE__, scsi_dev->id, scsi_dev->lun, scsi_dev->channel);
+
+ /*
+ * ATAPI SFF8020 devices use MODE_SENSE_10,
+ * so we can prohibit MODE_SENSE_6
+ */
+ scsi_dev->use_10_for_ms = 1;
+
+ /* we don't support {READ,WRITE}_6 */
+ scsi_dev->use_10_for_rw = 1;
+
+ return 0;
+}
+
+static int ps3rom_atapi_request(struct ps3_storage_device *dev,
+ struct scsi_cmnd *cmd)
+{
+ struct lv1_atapi_cmnd_block atapi_cmnd;
+ unsigned char opcode = cmd->cmnd[0];
+ int res;
+ u64 lpar;
+
+ dev_dbg(&dev->sbd.core, "%s:%u: send ATAPI command 0x%02x\n", __func__,
+ __LINE__, opcode);
+
+ memset(&atapi_cmnd, 0, sizeof(struct lv1_atapi_cmnd_block));
+ memcpy(&atapi_cmnd.pkt, cmd->cmnd, 12);
+ atapi_cmnd.pktlen = 12;
+ atapi_cmnd.block_size = 1; /* transfer size is block_size * blocks */
+ atapi_cmnd.blocks = atapi_cmnd.arglen = scsi_bufflen(cmd);
+ atapi_cmnd.buffer = dev->bounce_lpar;
+
+ switch (cmd->sc_data_direction) {
+ case DMA_FROM_DEVICE:
+ if (scsi_bufflen(cmd) >= CD_FRAMESIZE)
+ atapi_cmnd.proto = DMA_PROTO;
+ else
+ atapi_cmnd.proto = PIO_DATA_IN_PROTO;
+ atapi_cmnd.in_out = DIR_READ;
+ break;
+
+ case DMA_TO_DEVICE:
+ if (scsi_bufflen(cmd) >= CD_FRAMESIZE)
+ atapi_cmnd.proto = DMA_PROTO;
+ else
+ atapi_cmnd.proto = PIO_DATA_OUT_PROTO;
+ atapi_cmnd.in_out = DIR_WRITE;
+ scsi_sg_copy_to_buffer(cmd, dev->bounce_buf, dev->bounce_size);
+ break;
+
+ default:
+ atapi_cmnd.proto = NON_DATA_PROTO;
+ break;
+ }
+
+ lpar = ps3_mm_phys_to_lpar(__pa(&atapi_cmnd));
+ res = lv1_storage_send_device_command(dev->sbd.dev_id,
+ LV1_STORAGE_SEND_ATAPI_COMMAND,
+ lpar, sizeof(atapi_cmnd),
+ atapi_cmnd.buffer,
+ atapi_cmnd.arglen, &dev->tag);
+ if (res == LV1_DENIED_BY_POLICY) {
+ dev_dbg(&dev->sbd.core,
+ "%s:%u: ATAPI command 0x%02x denied by policy\n",
+ __func__, __LINE__, opcode);
+ return DID_ERROR << 16;
+ }
+
+ if (res) {
+ dev_err(&dev->sbd.core,
+ "%s:%u: ATAPI command 0x%02x failed %d\n", __func__,
+ __LINE__, opcode, res);
+ return DID_ERROR << 16;
+ }
+
+ return 0;
+}
+
+static inline unsigned int srb10_lba(const struct scsi_cmnd *cmd)
+{
+ return cmd->cmnd[2] << 24 | cmd->cmnd[3] << 16 | cmd->cmnd[4] << 8 |
+ cmd->cmnd[5];
+}
+
+static inline unsigned int srb10_len(const struct scsi_cmnd *cmd)
+{
+ return cmd->cmnd[7] << 8 | cmd->cmnd[8];
+}
+
+static int ps3rom_read_request(struct ps3_storage_device *dev,
+ struct scsi_cmnd *cmd, u32 start_sector,
+ u32 sectors)
+{
+ int res;
+
+ dev_dbg(&dev->sbd.core, "%s:%u: read %u sectors starting at %u\n",
+ __func__, __LINE__, sectors, start_sector);
+
+ res = lv1_storage_read(dev->sbd.dev_id,
+ dev->regions[dev->region_idx].id, start_sector,
+ sectors, 0, dev->bounce_lpar, &dev->tag);
+ if (res) {
+ dev_err(&dev->sbd.core, "%s:%u: read failed %d\n", __func__,
+ __LINE__, res);
+ return DID_ERROR << 16;
+ }
+
+ return 0;
+}
+
+static int ps3rom_write_request(struct ps3_storage_device *dev,
+ struct scsi_cmnd *cmd, u32 start_sector,
+ u32 sectors)
+{
+ int res;
+
+ dev_dbg(&dev->sbd.core, "%s:%u: write %u sectors starting at %u\n",
+ __func__, __LINE__, sectors, start_sector);
+
+ scsi_sg_copy_to_buffer(cmd, dev->bounce_buf, dev->bounce_size);
+
+ res = lv1_storage_write(dev->sbd.dev_id,
+ dev->regions[dev->region_idx].id, start_sector,
+ sectors, 0, dev->bounce_lpar, &dev->tag);
+ if (res) {
+ dev_err(&dev->sbd.core, "%s:%u: write failed %d\n", __func__,
+ __LINE__, res);
+ return DID_ERROR << 16;
+ }
+
+ return 0;
+}
+
+static int ps3rom_queuecommand_lck(struct scsi_cmnd *cmd,
+ void (*done)(struct scsi_cmnd *))
+{
+ struct ps3rom_private *priv = shost_priv(cmd->device->host);
+ struct ps3_storage_device *dev = priv->dev;
+ unsigned char opcode;
+ int res;
+
+ priv->curr_cmd = cmd;
+ cmd->scsi_done = done;
+
+ opcode = cmd->cmnd[0];
+ /*
+ * While we can submit READ/WRITE SCSI commands as ATAPI commands,
+ * it's recommended for various reasons (performance, error handling,
+ * ...) to use lv1_storage_{read,write}() instead
+ */
+ switch (opcode) {
+ case READ_10:
+ res = ps3rom_read_request(dev, cmd, srb10_lba(cmd),
+ srb10_len(cmd));
+ break;
+
+ case WRITE_10:
+ res = ps3rom_write_request(dev, cmd, srb10_lba(cmd),
+ srb10_len(cmd));
+ break;
+
+ default:
+ res = ps3rom_atapi_request(dev, cmd);
+ break;
+ }
+
+ if (res) {
+ memset(cmd->sense_buffer, 0, SCSI_SENSE_BUFFERSIZE);
+ cmd->result = res;
+ cmd->sense_buffer[0] = 0x70;
+ cmd->sense_buffer[2] = ILLEGAL_REQUEST;
+ priv->curr_cmd = NULL;
+ cmd->scsi_done(cmd);
+ }
+
+ return 0;
+}
+
+static DEF_SCSI_QCMD(ps3rom_queuecommand)
+
+static int decode_lv1_status(u64 status, unsigned char *sense_key,
+ unsigned char *asc, unsigned char *ascq)
+{
+ if (((status >> 24) & 0xff) != SAM_STAT_CHECK_CONDITION)
+ return -1;
+
+ *sense_key = (status >> 16) & 0xff;
+ *asc = (status >> 8) & 0xff;
+ *ascq = status & 0xff;
+ return 0;
+}
+
+static irqreturn_t ps3rom_interrupt(int irq, void *data)
+{
+ struct ps3_storage_device *dev = data;
+ struct Scsi_Host *host;
+ struct ps3rom_private *priv;
+ struct scsi_cmnd *cmd;
+ int res;
+ u64 tag, status;
+ unsigned char sense_key, asc, ascq;
+
+ res = lv1_storage_get_async_status(dev->sbd.dev_id, &tag, &status);
+ /*
+ * status = -1 may mean that ATAPI transport completed OK, but
+ * ATAPI command itself resulted CHECK CONDITION
+ * so, upper layer should issue REQUEST_SENSE to check the sense data
+ */
+
+ if (tag != dev->tag)
+ dev_err(&dev->sbd.core,
+ "%s:%u: tag mismatch, got %llx, expected %llx\n",
+ __func__, __LINE__, tag, dev->tag);
+
+ if (res) {
+ dev_err(&dev->sbd.core, "%s:%u: res=%d status=0x%llx\n",
+ __func__, __LINE__, res, status);
+ return IRQ_HANDLED;
+ }
+
+ host = ps3_system_bus_get_drvdata(&dev->sbd);
+ priv = shost_priv(host);
+ cmd = priv->curr_cmd;
+
+ if (!status) {
+ /* OK, completed */
+ if (cmd->sc_data_direction == DMA_FROM_DEVICE) {
+ int len;
+
+ len = scsi_sg_copy_from_buffer(cmd,
+ dev->bounce_buf,
+ dev->bounce_size);
+
+ scsi_set_resid(cmd, scsi_bufflen(cmd) - len);
+ }
+ cmd->result = DID_OK << 16;
+ goto done;
+ }
+
+ if (cmd->cmnd[0] == REQUEST_SENSE) {
+ /* SCSI spec says request sense should never get error */
+ dev_err(&dev->sbd.core, "%s:%u: end error without autosense\n",
+ __func__, __LINE__);
+ cmd->result = DID_ERROR << 16 | SAM_STAT_CHECK_CONDITION;
+ goto done;
+ }
+
+ if (decode_lv1_status(status, &sense_key, &asc, &ascq)) {
+ cmd->result = DID_ERROR << 16;
+ goto done;
+ }
+
+ scsi_build_sense_buffer(0, cmd->sense_buffer, sense_key, asc, ascq);
+ cmd->result = SAM_STAT_CHECK_CONDITION;
+
+done:
+ priv->curr_cmd = NULL;
+ cmd->scsi_done(cmd);
+ return IRQ_HANDLED;
+}
+
+static struct scsi_host_template ps3rom_host_template = {
+ .name = DEVICE_NAME,
+ .slave_configure = ps3rom_slave_configure,
+ .queuecommand = ps3rom_queuecommand,
+ .can_queue = 1,
+ .this_id = 7,
+ .sg_tablesize = SG_ALL,
+ .cmd_per_lun = 1,
+ .emulated = 1, /* only sg driver uses this */
+ .max_sectors = PS3ROM_MAX_SECTORS,
+ .use_clustering = ENABLE_CLUSTERING,
+ .module = THIS_MODULE,
+};
+
+
+static int ps3rom_probe(struct ps3_system_bus_device *_dev)
+{
+ struct ps3_storage_device *dev = to_ps3_storage_device(&_dev->core);
+ int error;
+ struct Scsi_Host *host;
+ struct ps3rom_private *priv;
+
+ if (dev->blk_size != CD_FRAMESIZE) {
+ dev_err(&dev->sbd.core,
+ "%s:%u: cannot handle block size %llu\n", __func__,
+ __LINE__, dev->blk_size);
+ return -EINVAL;
+ }
+
+ dev->bounce_size = BOUNCE_SIZE;
+ dev->bounce_buf = kmalloc(BOUNCE_SIZE, GFP_DMA);
+ if (!dev->bounce_buf)
+ return -ENOMEM;
+
+ error = ps3stor_setup(dev, ps3rom_interrupt);
+ if (error)
+ goto fail_free_bounce;
+
+ host = scsi_host_alloc(&ps3rom_host_template,
+ sizeof(struct ps3rom_private));
+ if (!host) {
+ dev_err(&dev->sbd.core, "%s:%u: scsi_host_alloc failed\n",
+ __func__, __LINE__);
+ error = -ENOMEM;
+ goto fail_teardown;
+ }
+
+ priv = shost_priv(host);
+ ps3_system_bus_set_drvdata(&dev->sbd, host);
+ priv->dev = dev;
+
+ /* One device/LUN per SCSI bus */
+ host->max_id = 1;
+ host->max_lun = 1;
+
+ error = scsi_add_host(host, &dev->sbd.core);
+ if (error) {
+ dev_err(&dev->sbd.core, "%s:%u: scsi_host_alloc failed %d\n",
+ __func__, __LINE__, error);
+ error = -ENODEV;
+ goto fail_host_put;
+ }
+
+ scsi_scan_host(host);
+ return 0;
+
+fail_host_put:
+ scsi_host_put(host);
+ ps3_system_bus_set_drvdata(&dev->sbd, NULL);
+fail_teardown:
+ ps3stor_teardown(dev);
+fail_free_bounce:
+ kfree(dev->bounce_buf);
+ return error;
+}
+
+static int ps3rom_remove(struct ps3_system_bus_device *_dev)
+{
+ struct ps3_storage_device *dev = to_ps3_storage_device(&_dev->core);
+ struct Scsi_Host *host = ps3_system_bus_get_drvdata(&dev->sbd);
+
+ scsi_remove_host(host);
+ ps3stor_teardown(dev);
+ scsi_host_put(host);
+ ps3_system_bus_set_drvdata(&dev->sbd, NULL);
+ kfree(dev->bounce_buf);
+ return 0;
+}
+
+static struct ps3_system_bus_driver ps3rom = {
+ .match_id = PS3_MATCH_ID_STOR_ROM,
+ .core.name = DEVICE_NAME,
+ .core.owner = THIS_MODULE,
+ .probe = ps3rom_probe,
+ .remove = ps3rom_remove
+};
+
+
+static int __init ps3rom_init(void)
+{
+ return ps3_system_bus_driver_register(&ps3rom);
+}
+
+static void __exit ps3rom_exit(void)
+{
+ ps3_system_bus_driver_unregister(&ps3rom);
+}
+
+module_init(ps3rom_init);
+module_exit(ps3rom_exit);
+
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("PS3 BD/DVD/CD-ROM Storage Driver");
+MODULE_AUTHOR("Sony Corporation");
+MODULE_ALIAS(PS3_MODULE_ALIAS_STOR_ROM);
diff --git a/drivers/scsi/qla1280.c b/drivers/scsi/qla1280.c
new file mode 100644
index 000000000..7f69d409c
--- /dev/null
+++ b/drivers/scsi/qla1280.c
@@ -0,0 +1,4492 @@
+/******************************************************************************
+* QLOGIC LINUX SOFTWARE
+*
+* QLogic QLA1280 (Ultra2) and QLA12160 (Ultra3) SCSI driver
+* Copyright (C) 2000 Qlogic Corporation (www.qlogic.com)
+* Copyright (C) 2001-2004 Jes Sorensen, Wild Open Source Inc.
+* Copyright (C) 2003-2004 Christoph Hellwig
+*
+* This program is free software; you can redistribute it and/or modify it
+* under the terms of the GNU General Public License as published by the
+* Free Software Foundation; either version 2, or (at your option) any
+* later version.
+*
+* This program is distributed in the hope that it will be useful, but
+* WITHOUT ANY WARRANTY; without even the implied warranty of
+* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+* General Public License for more details.
+*
+******************************************************************************/
+#define QLA1280_VERSION "3.27.1"
+/*****************************************************************************
+ Revision History:
+ Rev 3.27.1, February 8, 2010, Michael Reed
+ - Retain firmware image for error recovery.
+ Rev 3.27, February 10, 2009, Michael Reed
+ - General code cleanup.
+ - Improve error recovery.
+ Rev 3.26, January 16, 2006 Jes Sorensen
+ - Ditch all < 2.6 support
+ Rev 3.25.1, February 10, 2005 Christoph Hellwig
+ - use pci_map_single to map non-S/G requests
+ - remove qla1280_proc_info
+ Rev 3.25, September 28, 2004, Christoph Hellwig
+ - add support for ISP1020/1040
+ - don't include "scsi.h" anymore for 2.6.x
+ Rev 3.24.4 June 7, 2004 Christoph Hellwig
+ - restructure firmware loading, cleanup initialization code
+ - prepare support for ISP1020/1040 chips
+ Rev 3.24.3 January 19, 2004, Jes Sorensen
+ - Handle PCI DMA mask settings correctly
+ - Correct order of error handling in probe_one, free_irq should not
+ be called if request_irq failed
+ Rev 3.24.2 January 19, 2004, James Bottomley & Andrew Vasquez
+ - Big endian fixes (James)
+ - Remove bogus IOCB content on zero data transfer commands (Andrew)
+ Rev 3.24.1 January 5, 2004, Jes Sorensen
+ - Initialize completion queue to avoid OOPS on probe
+ - Handle interrupts during mailbox testing
+ Rev 3.24 November 17, 2003, Christoph Hellwig
+ - use struct list_head for completion queue
+ - avoid old Scsi_FOO typedefs
+ - cleanup 2.4 compat glue a bit
+ - use <scsi/scsi_*.h> headers on 2.6 instead of "scsi.h"
+ - make initialization for memory mapped vs port I/O more similar
+ - remove broken pci config space manipulation
+ - kill more cruft
+ - this is an almost perfect 2.6 scsi driver now! ;)
+ Rev 3.23.39 December 17, 2003, Jes Sorensen
+ - Delete completion queue from srb if mailbox command failed to
+ to avoid qla1280_done completeting qla1280_error_action's
+ obsolete context
+ - Reduce arguments for qla1280_done
+ Rev 3.23.38 October 18, 2003, Christoph Hellwig
+ - Convert to new-style hotplugable driver for 2.6
+ - Fix missing scsi_unregister/scsi_host_put on HBA removal
+ - Kill some more cruft
+ Rev 3.23.37 October 1, 2003, Jes Sorensen
+ - Make MMIO depend on CONFIG_X86_VISWS instead of yet another
+ random CONFIG option
+ - Clean up locking in probe path
+ Rev 3.23.36 October 1, 2003, Christoph Hellwig
+ - queuecommand only ever receives new commands - clear flags
+ - Reintegrate lost fixes from Linux 2.5
+ Rev 3.23.35 August 14, 2003, Jes Sorensen
+ - Build against 2.6
+ Rev 3.23.34 July 23, 2003, Jes Sorensen
+ - Remove pointless TRUE/FALSE macros
+ - Clean up vchan handling
+ Rev 3.23.33 July 3, 2003, Jes Sorensen
+ - Don't define register access macros before define determining MMIO.
+ This just happened to work out on ia64 but not elsewhere.
+ - Don't try and read from the card while it is in reset as
+ it won't respond and causes an MCA
+ Rev 3.23.32 June 23, 2003, Jes Sorensen
+ - Basic support for boot time arguments
+ Rev 3.23.31 June 8, 2003, Jes Sorensen
+ - Reduce boot time messages
+ Rev 3.23.30 June 6, 2003, Jes Sorensen
+ - Do not enable sync/wide/ppr before it has been determined
+ that the target device actually supports it
+ - Enable DMA arbitration for multi channel controllers
+ Rev 3.23.29 June 3, 2003, Jes Sorensen
+ - Port to 2.5.69
+ Rev 3.23.28 June 3, 2003, Jes Sorensen
+ - Eliminate duplicate marker commands on bus resets
+ - Handle outstanding commands appropriately on bus/device resets
+ Rev 3.23.27 May 28, 2003, Jes Sorensen
+ - Remove bogus input queue code, let the Linux SCSI layer do the work
+ - Clean up NVRAM handling, only read it once from the card
+ - Add a number of missing default nvram parameters
+ Rev 3.23.26 Beta May 28, 2003, Jes Sorensen
+ - Use completion queue for mailbox commands instead of busy wait
+ Rev 3.23.25 Beta May 27, 2003, James Bottomley
+ - Migrate to use new error handling code
+ Rev 3.23.24 Beta May 21, 2003, James Bottomley
+ - Big endian support
+ - Cleanup data direction code
+ Rev 3.23.23 Beta May 12, 2003, Jes Sorensen
+ - Switch to using MMIO instead of PIO
+ Rev 3.23.22 Beta April 15, 2003, Jes Sorensen
+ - Fix PCI parity problem with 12160 during reset.
+ Rev 3.23.21 Beta April 14, 2003, Jes Sorensen
+ - Use pci_map_page()/pci_unmap_page() instead of map_single version.
+ Rev 3.23.20 Beta April 9, 2003, Jes Sorensen
+ - Remove < 2.4.x support
+ - Introduce HOST_LOCK to make the spin lock changes portable.
+ - Remove a bunch of idiotic and unnecessary typedef's
+ - Kill all leftovers of target-mode support which never worked anyway
+ Rev 3.23.19 Beta April 11, 2002, Linus Torvalds
+ - Do qla1280_pci_config() before calling request_irq() and
+ request_region()
+ - Use pci_dma_hi32() to handle upper word of DMA addresses instead
+ of large shifts
+ - Hand correct arguments to free_irq() in case of failure
+ Rev 3.23.18 Beta April 11, 2002, Jes Sorensen
+ - Run source through Lindent and clean up the output
+ Rev 3.23.17 Beta April 11, 2002, Jes Sorensen
+ - Update SCSI firmware to qla1280 v8.15.00 and qla12160 v10.04.32
+ Rev 3.23.16 Beta March 19, 2002, Jes Sorensen
+ - Rely on mailbox commands generating interrupts - do not
+ run qla1280_isr() from ql1280_mailbox_command()
+ - Remove device_reg_t
+ - Integrate ql12160_set_target_parameters() with 1280 version
+ - Make qla1280_setup() non static
+ - Do not call qla1280_check_for_dead_scsi_bus() on every I/O request
+ sent to the card - this command pauses the firmware!!!
+ Rev 3.23.15 Beta March 19, 2002, Jes Sorensen
+ - Clean up qla1280.h - remove obsolete QL_DEBUG_LEVEL_x definitions
+ - Remove a pile of pointless and confusing (srb_t **) and
+ (scsi_lu_t *) typecasts
+ - Explicit mark that we do not use the new error handling (for now)
+ - Remove scsi_qla_host_t and use 'struct' instead
+ - Remove in_abort, watchdog_enabled, dpc, dpc_sched, bios_enabled,
+ pci_64bit_slot flags which weren't used for anything anyway
+ - Grab host->host_lock while calling qla1280_isr() from abort()
+ - Use spin_lock()/spin_unlock() in qla1280_intr_handler() - we
+ do not need to save/restore flags in the interrupt handler
+ - Enable interrupts early (before any mailbox access) in preparation
+ for cleaning up the mailbox handling
+ Rev 3.23.14 Beta March 14, 2002, Jes Sorensen
+ - Further cleanups. Remove all trace of QL_DEBUG_LEVEL_x and replace
+ it with proper use of dprintk().
+ - Make qla1280_print_scsi_cmd() and qla1280_dump_buffer() both take
+ a debug level argument to determine if data is to be printed
+ - Add KERN_* info to printk()
+ Rev 3.23.13 Beta March 14, 2002, Jes Sorensen
+ - Significant cosmetic cleanups
+ - Change debug code to use dprintk() and remove #if mess
+ Rev 3.23.12 Beta March 13, 2002, Jes Sorensen
+ - More cosmetic cleanups, fix places treating return as function
+ - use cpu_relax() in qla1280_debounce_register()
+ Rev 3.23.11 Beta March 13, 2002, Jes Sorensen
+ - Make it compile under 2.5.5
+ Rev 3.23.10 Beta October 1, 2001, Jes Sorensen
+ - Do no typecast short * to long * in QL1280BoardTbl, this
+ broke miserably on big endian boxes
+ Rev 3.23.9 Beta September 30, 2001, Jes Sorensen
+ - Remove pre 2.2 hack for checking for reentrance in interrupt handler
+ - Make data types used to receive from SCSI_{BUS,TCN,LUN}_32
+ unsigned int to match the types from struct scsi_cmnd
+ Rev 3.23.8 Beta September 29, 2001, Jes Sorensen
+ - Remove bogus timer_t typedef from qla1280.h
+ - Remove obsolete pre 2.2 PCI setup code, use proper #define's
+ for PCI_ values, call pci_set_master()
+ - Fix memleak of qla1280_buffer on module unload
+ - Only compile module parsing code #ifdef MODULE - should be
+ changed to use individual MODULE_PARM's later
+ - Remove dummy_buffer that was never modified nor printed
+ - ENTER()/LEAVE() are noops unless QL_DEBUG_LEVEL_3, hence remove
+ #ifdef QL_DEBUG_LEVEL_3/#endif around ENTER()/LEAVE() calls
+ - Remove \r from print statements, this is Linux, not DOS
+ - Remove obsolete QLA1280_{SCSILU,INTR,RING}_{LOCK,UNLOCK}
+ dummy macros
+ - Remove C++ compile hack in header file as Linux driver are not
+ supposed to be compiled as C++
+ - Kill MS_64BITS macro as it makes the code more readable
+ - Remove unnecessary flags.in_interrupts bit
+ Rev 3.23.7 Beta August 20, 2001, Jes Sorensen
+ - Dont' check for set flags on q->q_flag one by one in qla1280_next()
+ - Check whether the interrupt was generated by the QLA1280 before
+ doing any processing
+ - qla1280_status_entry(): Only zero out part of sense_buffer that
+ is not being copied into
+ - Remove more superflouous typecasts
+ - qla1280_32bit_start_scsi() replace home-brew memcpy() with memcpy()
+ Rev 3.23.6 Beta August 20, 2001, Tony Luck, Intel
+ - Don't walk the entire list in qla1280_putq_t() just to directly
+ grab the pointer to the last element afterwards
+ Rev 3.23.5 Beta August 9, 2001, Jes Sorensen
+ - Don't use IRQF_DISABLED, it's use is deprecated for this kinda driver
+ Rev 3.23.4 Beta August 8, 2001, Jes Sorensen
+ - Set dev->max_sectors to 1024
+ Rev 3.23.3 Beta August 6, 2001, Jes Sorensen
+ - Provide compat macros for pci_enable_device(), pci_find_subsys()
+ and scsi_set_pci_device()
+ - Call scsi_set_pci_device() for all devices
+ - Reduce size of kernel version dependent device probe code
+ - Move duplicate probe/init code to separate function
+ - Handle error if qla1280_mem_alloc() fails
+ - Kill OFFSET() macro and use Linux's PCI definitions instead
+ - Kill private structure defining PCI config space (struct config_reg)
+ - Only allocate I/O port region if not in MMIO mode
+ - Remove duplicate (unused) sanity check of sife of srb_t
+ Rev 3.23.2 Beta August 6, 2001, Jes Sorensen
+ - Change home-brew memset() implementations to use memset()
+ - Remove all references to COMTRACE() - accessing a PC's COM2 serial
+ port directly is not legal under Linux.
+ Rev 3.23.1 Beta April 24, 2001, Jes Sorensen
+ - Remove pre 2.2 kernel support
+ - clean up 64 bit DMA setting to use 2.4 API (provide backwards compat)
+ - Fix MMIO access to use readl/writel instead of directly
+ dereferencing pointers
+ - Nuke MSDOS debugging code
+ - Change true/false data types to int from uint8_t
+ - Use int for counters instead of uint8_t etc.
+ - Clean up size & byte order conversion macro usage
+ Rev 3.23 Beta January 11, 2001 BN Qlogic
+ - Added check of device_id when handling non
+ QLA12160s during detect().
+ Rev 3.22 Beta January 5, 2001 BN Qlogic
+ - Changed queue_task() to schedule_task()
+ for kernels 2.4.0 and higher.
+ Note: 2.4.0-testxx kernels released prior to
+ the actual 2.4.0 kernel release on January 2001
+ will get compile/link errors with schedule_task().
+ Please update your kernel to released 2.4.0 level,
+ or comment lines in this file flagged with 3.22
+ to resolve compile/link error of schedule_task().
+ - Added -DCONFIG_SMP in addition to -D__SMP__
+ in Makefile for 2.4.0 builds of driver as module.
+ Rev 3.21 Beta January 4, 2001 BN Qlogic
+ - Changed criteria of 64/32 Bit mode of HBA
+ operation according to BITS_PER_LONG rather
+ than HBA's NVRAM setting of >4Gig memory bit;
+ so that the HBA auto-configures without the need
+ to setup each system individually.
+ Rev 3.20 Beta December 5, 2000 BN Qlogic
+ - Added priority handling to IA-64 onboard SCSI
+ ISP12160 chip for kernels greater than 2.3.18.
+ - Added irqrestore for qla1280_intr_handler.
+ - Enabled /proc/scsi/qla1280 interface.
+ - Clear /proc/scsi/qla1280 counters in detect().
+ Rev 3.19 Beta October 13, 2000 BN Qlogic
+ - Declare driver_template for new kernel
+ (2.4.0 and greater) scsi initialization scheme.
+ - Update /proc/scsi entry for 2.3.18 kernels and
+ above as qla1280
+ Rev 3.18 Beta October 10, 2000 BN Qlogic
+ - Changed scan order of adapters to map
+ the QLA12160 followed by the QLA1280.
+ Rev 3.17 Beta September 18, 2000 BN Qlogic
+ - Removed warnings for 32 bit 2.4.x compiles
+ - Corrected declared size for request and response
+ DMA addresses that are kept in each ha
+ Rev. 3.16 Beta August 25, 2000 BN Qlogic
+ - Corrected 64 bit addressing issue on IA-64
+ where the upper 32 bits were not properly
+ passed to the RISC engine.
+ Rev. 3.15 Beta August 22, 2000 BN Qlogic
+ - Modified qla1280_setup_chip to properly load
+ ISP firmware for greater that 4 Gig memory on IA-64
+ Rev. 3.14 Beta August 16, 2000 BN Qlogic
+ - Added setting of dma_mask to full 64 bit
+ if flags.enable_64bit_addressing is set in NVRAM
+ Rev. 3.13 Beta August 16, 2000 BN Qlogic
+ - Use new PCI DMA mapping APIs for 2.4.x kernel
+ Rev. 3.12 July 18, 2000 Redhat & BN Qlogic
+ - Added check of pci_enable_device to detect() for 2.3.x
+ - Use pci_resource_start() instead of
+ pdev->resource[0].start in detect() for 2.3.x
+ - Updated driver version
+ Rev. 3.11 July 14, 2000 BN Qlogic
+ - Updated SCSI Firmware to following versions:
+ qla1x80: 8.13.08
+ qla1x160: 10.04.08
+ - Updated driver version to 3.11
+ Rev. 3.10 June 23, 2000 BN Qlogic
+ - Added filtering of AMI SubSys Vendor ID devices
+ Rev. 3.9
+ - DEBUG_QLA1280 undefined and new version BN Qlogic
+ Rev. 3.08b May 9, 2000 MD Dell
+ - Added logic to check against AMI subsystem vendor ID
+ Rev. 3.08 May 4, 2000 DG Qlogic
+ - Added logic to check for PCI subsystem ID.
+ Rev. 3.07 Apr 24, 2000 DG & BN Qlogic
+ - Updated SCSI Firmware to following versions:
+ qla12160: 10.01.19
+ qla1280: 8.09.00
+ Rev. 3.06 Apr 12, 2000 DG & BN Qlogic
+ - Internal revision; not released
+ Rev. 3.05 Mar 28, 2000 DG & BN Qlogic
+ - Edit correction for virt_to_bus and PROC.
+ Rev. 3.04 Mar 28, 2000 DG & BN Qlogic
+ - Merge changes from ia64 port.
+ Rev. 3.03 Mar 28, 2000 BN Qlogic
+ - Increase version to reflect new code drop with compile fix
+ of issue with inclusion of linux/spinlock for 2.3 kernels
+ Rev. 3.02 Mar 15, 2000 BN Qlogic
+ - Merge qla1280_proc_info from 2.10 code base
+ Rev. 3.01 Feb 10, 2000 BN Qlogic
+ - Corrected code to compile on a 2.2.x kernel.
+ Rev. 3.00 Jan 17, 2000 DG Qlogic
+ - Added 64-bit support.
+ Rev. 2.07 Nov 9, 1999 DG Qlogic
+ - Added new routine to set target parameters for ISP12160.
+ Rev. 2.06 Sept 10, 1999 DG Qlogic
+ - Added support for ISP12160 Ultra 3 chip.
+ Rev. 2.03 August 3, 1999 Fred Lewis, Intel DuPont
+ - Modified code to remove errors generated when compiling with
+ Cygnus IA64 Compiler.
+ - Changed conversion of pointers to unsigned longs instead of integers.
+ - Changed type of I/O port variables from uint32_t to unsigned long.
+ - Modified OFFSET macro to work with 64-bit as well as 32-bit.
+ - Changed sprintf and printk format specifiers for pointers to %p.
+ - Changed some int to long type casts where needed in sprintf & printk.
+ - Added l modifiers to sprintf and printk format specifiers for longs.
+ - Removed unused local variables.
+ Rev. 1.20 June 8, 1999 DG, Qlogic
+ Changes to support RedHat release 6.0 (kernel 2.2.5).
+ - Added SCSI exclusive access lock (io_request_lock) when accessing
+ the adapter.
+ - Added changes for the new LINUX interface template. Some new error
+ handling routines have been added to the template, but for now we
+ will use the old ones.
+ - Initial Beta Release.
+*****************************************************************************/
+
+
+#include <linux/module.h>
+
+#include <linux/types.h>
+#include <linux/string.h>
+#include <linux/errno.h>
+#include <linux/kernel.h>
+#include <linux/ioport.h>
+#include <linux/delay.h>
+#include <linux/timer.h>
+#include <linux/pci.h>
+#include <linux/proc_fs.h>
+#include <linux/stat.h>
+#include <linux/pci_ids.h>
+#include <linux/interrupt.h>
+#include <linux/init.h>
+#include <linux/dma-mapping.h>
+#include <linux/firmware.h>
+
+#include <asm/io.h>
+#include <asm/irq.h>
+#include <asm/byteorder.h>
+#include <asm/processor.h>
+#include <asm/types.h>
+
+#include <scsi/scsi.h>
+#include <scsi/scsi_cmnd.h>
+#include <scsi/scsi_device.h>
+#include <scsi/scsi_host.h>
+#include <scsi/scsi_tcq.h>
+
+#if defined(CONFIG_IA64_GENERIC) || defined(CONFIG_IA64_SGI_SN2)
+#include <asm/sn/io.h>
+#endif
+
+
+/*
+ * Compile time Options:
+ * 0 - Disable and 1 - Enable
+ */
+#define DEBUG_QLA1280_INTR 0
+#define DEBUG_PRINT_NVRAM 0
+#define DEBUG_QLA1280 0
+
+#define MEMORY_MAPPED_IO 1
+
+#include "qla1280.h"
+
+#ifndef BITS_PER_LONG
+#error "BITS_PER_LONG not defined!"
+#endif
+#if (BITS_PER_LONG == 64) || defined CONFIG_HIGHMEM
+#define QLA_64BIT_PTR 1
+#endif
+
+#ifdef QLA_64BIT_PTR
+#define pci_dma_hi32(a) ((a >> 16) >> 16)
+#else
+#define pci_dma_hi32(a) 0
+#endif
+#define pci_dma_lo32(a) (a & 0xffffffff)
+
+#define NVRAM_DELAY() udelay(500) /* 2 microseconds */
+
+#if defined(__ia64__) && !defined(ia64_platform_is)
+#define ia64_platform_is(foo) (!strcmp(x, platform_name))
+#endif
+
+
+#define IS_ISP1040(ha) (ha->pdev->device == PCI_DEVICE_ID_QLOGIC_ISP1020)
+#define IS_ISP1x40(ha) (ha->pdev->device == PCI_DEVICE_ID_QLOGIC_ISP1020 || \
+ ha->pdev->device == PCI_DEVICE_ID_QLOGIC_ISP1240)
+#define IS_ISP1x160(ha) (ha->pdev->device == PCI_DEVICE_ID_QLOGIC_ISP10160 || \
+ ha->pdev->device == PCI_DEVICE_ID_QLOGIC_ISP12160)
+
+
+static int qla1280_probe_one(struct pci_dev *, const struct pci_device_id *);
+static void qla1280_remove_one(struct pci_dev *);
+
+/*
+ * QLogic Driver Support Function Prototypes.
+ */
+static void qla1280_done(struct scsi_qla_host *);
+static int qla1280_get_token(char *);
+static int qla1280_setup(char *s) __init;
+
+/*
+ * QLogic ISP1280 Hardware Support Function Prototypes.
+ */
+static int qla1280_load_firmware(struct scsi_qla_host *);
+static int qla1280_init_rings(struct scsi_qla_host *);
+static int qla1280_nvram_config(struct scsi_qla_host *);
+static int qla1280_mailbox_command(struct scsi_qla_host *,
+ uint8_t, uint16_t *);
+static int qla1280_bus_reset(struct scsi_qla_host *, int);
+static int qla1280_device_reset(struct scsi_qla_host *, int, int);
+static int qla1280_abort_command(struct scsi_qla_host *, struct srb *, int);
+static int qla1280_abort_isp(struct scsi_qla_host *);
+#ifdef QLA_64BIT_PTR
+static int qla1280_64bit_start_scsi(struct scsi_qla_host *, struct srb *);
+#else
+static int qla1280_32bit_start_scsi(struct scsi_qla_host *, struct srb *);
+#endif
+static void qla1280_nv_write(struct scsi_qla_host *, uint16_t);
+static void qla1280_poll(struct scsi_qla_host *);
+static void qla1280_reset_adapter(struct scsi_qla_host *);
+static void qla1280_marker(struct scsi_qla_host *, int, int, int, u8);
+static void qla1280_isp_cmd(struct scsi_qla_host *);
+static void qla1280_isr(struct scsi_qla_host *, struct list_head *);
+static void qla1280_rst_aen(struct scsi_qla_host *);
+static void qla1280_status_entry(struct scsi_qla_host *, struct response *,
+ struct list_head *);
+static void qla1280_error_entry(struct scsi_qla_host *, struct response *,
+ struct list_head *);
+static uint16_t qla1280_get_nvram_word(struct scsi_qla_host *, uint32_t);
+static uint16_t qla1280_nvram_request(struct scsi_qla_host *, uint32_t);
+static uint16_t qla1280_debounce_register(volatile uint16_t __iomem *);
+static request_t *qla1280_req_pkt(struct scsi_qla_host *);
+static int qla1280_check_for_dead_scsi_bus(struct scsi_qla_host *,
+ unsigned int);
+static void qla1280_get_target_parameters(struct scsi_qla_host *,
+ struct scsi_device *);
+static int qla1280_set_target_parameters(struct scsi_qla_host *, int, int);
+
+
+static struct qla_driver_setup driver_setup;
+
+/*
+ * convert scsi data direction to request_t control flags
+ */
+static inline uint16_t
+qla1280_data_direction(struct scsi_cmnd *cmnd)
+{
+ switch(cmnd->sc_data_direction) {
+ case DMA_FROM_DEVICE:
+ return BIT_5;
+ case DMA_TO_DEVICE:
+ return BIT_6;
+ case DMA_BIDIRECTIONAL:
+ return BIT_5 | BIT_6;
+ /*
+ * We could BUG() on default here if one of the four cases aren't
+ * met, but then again if we receive something like that from the
+ * SCSI layer we have more serious problems. This shuts up GCC.
+ */
+ case DMA_NONE:
+ default:
+ return 0;
+ }
+}
+
+#if DEBUG_QLA1280
+static void __qla1280_print_scsi_cmd(struct scsi_cmnd * cmd);
+static void __qla1280_dump_buffer(char *, int);
+#endif
+
+
+/*
+ * insmod needs to find the variable and make it point to something
+ */
+#ifdef MODULE
+static char *qla1280;
+
+/* insmod qla1280 options=verbose" */
+module_param(qla1280, charp, 0);
+#else
+__setup("qla1280=", qla1280_setup);
+#endif
+
+
+/*
+ * We use the scsi_pointer structure that's included with each scsi_command
+ * to overlay our struct srb over it. qla1280_init() checks that a srb is not
+ * bigger than a scsi_pointer.
+ */
+
+#define CMD_SP(Cmnd) &Cmnd->SCp
+#define CMD_CDBLEN(Cmnd) Cmnd->cmd_len
+#define CMD_CDBP(Cmnd) Cmnd->cmnd
+#define CMD_SNSP(Cmnd) Cmnd->sense_buffer
+#define CMD_SNSLEN(Cmnd) SCSI_SENSE_BUFFERSIZE
+#define CMD_RESULT(Cmnd) Cmnd->result
+#define CMD_HANDLE(Cmnd) Cmnd->host_scribble
+#define CMD_REQUEST(Cmnd) Cmnd->request->cmd
+
+#define CMD_HOST(Cmnd) Cmnd->device->host
+#define SCSI_BUS_32(Cmnd) Cmnd->device->channel
+#define SCSI_TCN_32(Cmnd) Cmnd->device->id
+#define SCSI_LUN_32(Cmnd) Cmnd->device->lun
+
+
+/*****************************************/
+/* ISP Boards supported by this driver */
+/*****************************************/
+
+struct qla_boards {
+ char *name; /* Board ID String */
+ int numPorts; /* Number of SCSI ports */
+ int fw_index; /* index into qla1280_fw_tbl for firmware */
+};
+
+/* NOTE: the last argument in each entry is used to index ql1280_board_tbl */
+static struct pci_device_id qla1280_pci_tbl[] = {
+ {PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP12160,
+ PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
+ {PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP1020,
+ PCI_ANY_ID, PCI_ANY_ID, 0, 0, 1},
+ {PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP1080,
+ PCI_ANY_ID, PCI_ANY_ID, 0, 0, 2},
+ {PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP1240,
+ PCI_ANY_ID, PCI_ANY_ID, 0, 0, 3},
+ {PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP1280,
+ PCI_ANY_ID, PCI_ANY_ID, 0, 0, 4},
+ {PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP10160,
+ PCI_ANY_ID, PCI_ANY_ID, 0, 0, 5},
+ {0,}
+};
+MODULE_DEVICE_TABLE(pci, qla1280_pci_tbl);
+
+DEFINE_MUTEX(qla1280_firmware_mutex);
+
+struct qla_fw {
+ char *fwname;
+ const struct firmware *fw;
+};
+
+#define QL_NUM_FW_IMAGES 3
+
+struct qla_fw qla1280_fw_tbl[QL_NUM_FW_IMAGES] = {
+ {"/*(DEBLOBBED)*/", NULL}, /* image 0 */
+ {"/*(DEBLOBBED)*/", NULL}, /* image 1 */
+ {"/*(DEBLOBBED)*/", NULL}, /* image 2 */
+};
+
+/* NOTE: Order of boards in this table must match order in qla1280_pci_tbl */
+static struct qla_boards ql1280_board_tbl[] = {
+ {.name = "QLA12160", .numPorts = 2, .fw_index = 2},
+ {.name = "QLA1040" , .numPorts = 1, .fw_index = 0},
+ {.name = "QLA1080" , .numPorts = 1, .fw_index = 1},
+ {.name = "QLA1240" , .numPorts = 2, .fw_index = 1},
+ {.name = "QLA1280" , .numPorts = 2, .fw_index = 1},
+ {.name = "QLA10160", .numPorts = 1, .fw_index = 2},
+ {.name = " ", .numPorts = 0, .fw_index = -1},
+};
+
+static int qla1280_verbose = 1;
+
+#if DEBUG_QLA1280
+static int ql_debug_level = 1;
+#define dprintk(level, format, a...) \
+ do { if (ql_debug_level >= level) printk(KERN_ERR format, ##a); } while(0)
+#define qla1280_dump_buffer(level, buf, size) \
+ if (ql_debug_level >= level) __qla1280_dump_buffer(buf, size)
+#define qla1280_print_scsi_cmd(level, cmd) \
+ if (ql_debug_level >= level) __qla1280_print_scsi_cmd(cmd)
+#else
+#define ql_debug_level 0
+#define dprintk(level, format, a...) do{}while(0)
+#define qla1280_dump_buffer(a, b, c) do{}while(0)
+#define qla1280_print_scsi_cmd(a, b) do{}while(0)
+#endif
+
+#define ENTER(x) dprintk(3, "qla1280 : Entering %s()\n", x);
+#define LEAVE(x) dprintk(3, "qla1280 : Leaving %s()\n", x);
+#define ENTER_INTR(x) dprintk(4, "qla1280 : Entering %s()\n", x);
+#define LEAVE_INTR(x) dprintk(4, "qla1280 : Leaving %s()\n", x);
+
+
+static int qla1280_read_nvram(struct scsi_qla_host *ha)
+{
+ uint16_t *wptr;
+ uint8_t chksum;
+ int cnt, i;
+ struct nvram *nv;
+
+ ENTER("qla1280_read_nvram");
+
+ if (driver_setup.no_nvram)
+ return 1;
+
+ printk(KERN_INFO "scsi(%ld): Reading NVRAM\n", ha->host_no);
+
+ wptr = (uint16_t *)&ha->nvram;
+ nv = &ha->nvram;
+ chksum = 0;
+ for (cnt = 0; cnt < 3; cnt++) {
+ *wptr = qla1280_get_nvram_word(ha, cnt);
+ chksum += *wptr & 0xff;
+ chksum += (*wptr >> 8) & 0xff;
+ wptr++;
+ }
+
+ if (nv->id0 != 'I' || nv->id1 != 'S' ||
+ nv->id2 != 'P' || nv->id3 != ' ' || nv->version < 1) {
+ dprintk(2, "Invalid nvram ID or version!\n");
+ chksum = 1;
+ } else {
+ for (; cnt < sizeof(struct nvram); cnt++) {
+ *wptr = qla1280_get_nvram_word(ha, cnt);
+ chksum += *wptr & 0xff;
+ chksum += (*wptr >> 8) & 0xff;
+ wptr++;
+ }
+ }
+
+ dprintk(3, "qla1280_read_nvram: NVRAM Magic ID= %c %c %c %02x"
+ " version %i\n", nv->id0, nv->id1, nv->id2, nv->id3,
+ nv->version);
+
+
+ if (chksum) {
+ if (!driver_setup.no_nvram)
+ printk(KERN_WARNING "scsi(%ld): Unable to identify or "
+ "validate NVRAM checksum, using default "
+ "settings\n", ha->host_no);
+ ha->nvram_valid = 0;
+ } else
+ ha->nvram_valid = 1;
+
+ /* The firmware interface is, um, interesting, in that the
+ * actual firmware image on the chip is little endian, thus,
+ * the process of taking that image to the CPU would end up
+ * little endian. However, the firmware interface requires it
+ * to be read a word (two bytes) at a time.
+ *
+ * The net result of this would be that the word (and
+ * doubleword) quantites in the firmware would be correct, but
+ * the bytes would be pairwise reversed. Since most of the
+ * firmware quantites are, in fact, bytes, we do an extra
+ * le16_to_cpu() in the firmware read routine.
+ *
+ * The upshot of all this is that the bytes in the firmware
+ * are in the correct places, but the 16 and 32 bit quantites
+ * are still in little endian format. We fix that up below by
+ * doing extra reverses on them */
+ nv->isp_parameter = cpu_to_le16(nv->isp_parameter);
+ nv->firmware_feature.w = cpu_to_le16(nv->firmware_feature.w);
+ for(i = 0; i < MAX_BUSES; i++) {
+ nv->bus[i].selection_timeout = cpu_to_le16(nv->bus[i].selection_timeout);
+ nv->bus[i].max_queue_depth = cpu_to_le16(nv->bus[i].max_queue_depth);
+ }
+ dprintk(1, "qla1280_read_nvram: Completed Reading NVRAM\n");
+ LEAVE("qla1280_read_nvram");
+
+ return chksum;
+}
+
+/**************************************************************************
+ * qla1280_info
+ * Return a string describing the driver.
+ **************************************************************************/
+static const char *
+qla1280_info(struct Scsi_Host *host)
+{
+ static char qla1280_scsi_name_buffer[125];
+ char *bp;
+ struct scsi_qla_host *ha;
+ struct qla_boards *bdp;
+
+ bp = &qla1280_scsi_name_buffer[0];
+ ha = (struct scsi_qla_host *)host->hostdata;
+ bdp = &ql1280_board_tbl[ha->devnum];
+ memset(bp, 0, sizeof(qla1280_scsi_name_buffer));
+
+ sprintf (bp,
+ "QLogic %s PCI to SCSI Host Adapter\n"
+ " Firmware version: %2d.%02d.%02d, Driver version %s",
+ &bdp->name[0], ha->fwver1, ha->fwver2, ha->fwver3,
+ QLA1280_VERSION);
+ return bp;
+}
+
+/**************************************************************************
+ * qla1280_queuecommand
+ * Queue a command to the controller.
+ *
+ * Note:
+ * The mid-level driver tries to ensures that queuecommand never gets invoked
+ * concurrently with itself or the interrupt handler (although the
+ * interrupt handler may call this routine as part of request-completion
+ * handling). Unfortunely, it sometimes calls the scheduler in interrupt
+ * context which is a big NO! NO!.
+ **************************************************************************/
+static int
+qla1280_queuecommand_lck(struct scsi_cmnd *cmd, void (*fn)(struct scsi_cmnd *))
+{
+ struct Scsi_Host *host = cmd->device->host;
+ struct scsi_qla_host *ha = (struct scsi_qla_host *)host->hostdata;
+ struct srb *sp = (struct srb *)CMD_SP(cmd);
+ int status;
+
+ cmd->scsi_done = fn;
+ sp->cmd = cmd;
+ sp->flags = 0;
+ sp->wait = NULL;
+ CMD_HANDLE(cmd) = (unsigned char *)NULL;
+
+ qla1280_print_scsi_cmd(5, cmd);
+
+#ifdef QLA_64BIT_PTR
+ /*
+ * Using 64 bit commands if the PCI bridge doesn't support it is a
+ * bit wasteful, however this should really only happen if one's
+ * PCI controller is completely broken, like the BCM1250. For
+ * sane hardware this is not an issue.
+ */
+ status = qla1280_64bit_start_scsi(ha, sp);
+#else
+ status = qla1280_32bit_start_scsi(ha, sp);
+#endif
+ return status;
+}
+
+static DEF_SCSI_QCMD(qla1280_queuecommand)
+
+enum action {
+ ABORT_COMMAND,
+ DEVICE_RESET,
+ BUS_RESET,
+ ADAPTER_RESET,
+};
+
+
+static void qla1280_mailbox_timeout(unsigned long __data)
+{
+ struct scsi_qla_host *ha = (struct scsi_qla_host *)__data;
+ struct device_reg __iomem *reg;
+ reg = ha->iobase;
+
+ ha->mailbox_out[0] = RD_REG_WORD(&reg->mailbox0);
+ printk(KERN_ERR "scsi(%ld): mailbox timed out, mailbox0 %04x, "
+ "ictrl %04x, istatus %04x\n", ha->host_no, ha->mailbox_out[0],
+ RD_REG_WORD(&reg->ictrl), RD_REG_WORD(&reg->istatus));
+ complete(ha->mailbox_wait);
+}
+
+static int
+_qla1280_wait_for_single_command(struct scsi_qla_host *ha, struct srb *sp,
+ struct completion *wait)
+{
+ int status = FAILED;
+ struct scsi_cmnd *cmd = sp->cmd;
+
+ spin_unlock_irq(ha->host->host_lock);
+ wait_for_completion_timeout(wait, 4*HZ);
+ spin_lock_irq(ha->host->host_lock);
+ sp->wait = NULL;
+ if(CMD_HANDLE(cmd) == COMPLETED_HANDLE) {
+ status = SUCCESS;
+ (*cmd->scsi_done)(cmd);
+ }
+ return status;
+}
+
+static int
+qla1280_wait_for_single_command(struct scsi_qla_host *ha, struct srb *sp)
+{
+ DECLARE_COMPLETION_ONSTACK(wait);
+
+ sp->wait = &wait;
+ return _qla1280_wait_for_single_command(ha, sp, &wait);
+}
+
+static int
+qla1280_wait_for_pending_commands(struct scsi_qla_host *ha, int bus, int target)
+{
+ int cnt;
+ int status;
+ struct srb *sp;
+ struct scsi_cmnd *cmd;
+
+ status = SUCCESS;
+
+ /*
+ * Wait for all commands with the designated bus/target
+ * to be completed by the firmware
+ */
+ for (cnt = 0; cnt < MAX_OUTSTANDING_COMMANDS; cnt++) {
+ sp = ha->outstanding_cmds[cnt];
+ if (sp) {
+ cmd = sp->cmd;
+
+ if (bus >= 0 && SCSI_BUS_32(cmd) != bus)
+ continue;
+ if (target >= 0 && SCSI_TCN_32(cmd) != target)
+ continue;
+
+ status = qla1280_wait_for_single_command(ha, sp);
+ if (status == FAILED)
+ break;
+ }
+ }
+ return status;
+}
+
+/**************************************************************************
+ * qla1280_error_action
+ * The function will attempt to perform a specified error action and
+ * wait for the results (or time out).
+ *
+ * Input:
+ * cmd = Linux SCSI command packet of the command that cause the
+ * bus reset.
+ * action = error action to take (see action_t)
+ *
+ * Returns:
+ * SUCCESS or FAILED
+ *
+ **************************************************************************/
+static int
+qla1280_error_action(struct scsi_cmnd *cmd, enum action action)
+{
+ struct scsi_qla_host *ha;
+ int bus, target, lun;
+ struct srb *sp;
+ int i, found;
+ int result=FAILED;
+ int wait_for_bus=-1;
+ int wait_for_target = -1;
+ DECLARE_COMPLETION_ONSTACK(wait);
+
+ ENTER("qla1280_error_action");
+
+ ha = (struct scsi_qla_host *)(CMD_HOST(cmd)->hostdata);
+ sp = (struct srb *)CMD_SP(cmd);
+ bus = SCSI_BUS_32(cmd);
+ target = SCSI_TCN_32(cmd);
+ lun = SCSI_LUN_32(cmd);
+
+ dprintk(4, "error_action %i, istatus 0x%04x\n", action,
+ RD_REG_WORD(&ha->iobase->istatus));
+
+ dprintk(4, "host_cmd 0x%04x, ictrl 0x%04x, jiffies %li\n",
+ RD_REG_WORD(&ha->iobase->host_cmd),
+ RD_REG_WORD(&ha->iobase->ictrl), jiffies);
+
+ if (qla1280_verbose)
+ printk(KERN_INFO "scsi(%li): Resetting Cmnd=0x%p, "
+ "Handle=0x%p, action=0x%x\n",
+ ha->host_no, cmd, CMD_HANDLE(cmd), action);
+
+ /*
+ * Check to see if we have the command in the outstanding_cmds[]
+ * array. If not then it must have completed before this error
+ * action was initiated. If the error_action isn't ABORT_COMMAND
+ * then the driver must proceed with the requested action.
+ */
+ found = -1;
+ for (i = 0; i < MAX_OUTSTANDING_COMMANDS; i++) {
+ if (sp == ha->outstanding_cmds[i]) {
+ found = i;
+ sp->wait = &wait; /* we'll wait for it to complete */
+ break;
+ }
+ }
+
+ if (found < 0) { /* driver doesn't have command */
+ result = SUCCESS;
+ if (qla1280_verbose) {
+ printk(KERN_INFO
+ "scsi(%ld:%d:%d:%d): specified command has "
+ "already completed.\n", ha->host_no, bus,
+ target, lun);
+ }
+ }
+
+ switch (action) {
+
+ case ABORT_COMMAND:
+ dprintk(1, "qla1280: RISC aborting command\n");
+ /*
+ * The abort might fail due to race when the host_lock
+ * is released to issue the abort. As such, we
+ * don't bother to check the return status.
+ */
+ if (found >= 0)
+ qla1280_abort_command(ha, sp, found);
+ break;
+
+ case DEVICE_RESET:
+ if (qla1280_verbose)
+ printk(KERN_INFO
+ "scsi(%ld:%d:%d:%d): Queueing device reset "
+ "command.\n", ha->host_no, bus, target, lun);
+ if (qla1280_device_reset(ha, bus, target) == 0) {
+ /* issued device reset, set wait conditions */
+ wait_for_bus = bus;
+ wait_for_target = target;
+ }
+ break;
+
+ case BUS_RESET:
+ if (qla1280_verbose)
+ printk(KERN_INFO "qla1280(%ld:%d): Issued bus "
+ "reset.\n", ha->host_no, bus);
+ if (qla1280_bus_reset(ha, bus) == 0) {
+ /* issued bus reset, set wait conditions */
+ wait_for_bus = bus;
+ }
+ break;
+
+ case ADAPTER_RESET:
+ default:
+ if (qla1280_verbose) {
+ printk(KERN_INFO
+ "scsi(%ld): Issued ADAPTER RESET\n",
+ ha->host_no);
+ printk(KERN_INFO "scsi(%ld): I/O processing will "
+ "continue automatically\n", ha->host_no);
+ }
+ ha->flags.reset_active = 1;
+
+ if (qla1280_abort_isp(ha) != 0) { /* it's dead */
+ result = FAILED;
+ }
+
+ ha->flags.reset_active = 0;
+ }
+
+ /*
+ * At this point, the host_lock has been released and retaken
+ * by the issuance of the mailbox command.
+ * Wait for the command passed in by the mid-layer if it
+ * was found by the driver. It might have been returned
+ * between eh recovery steps, hence the check of the "found"
+ * variable.
+ */
+
+ if (found >= 0)
+ result = _qla1280_wait_for_single_command(ha, sp, &wait);
+
+ if (action == ABORT_COMMAND && result != SUCCESS) {
+ printk(KERN_WARNING
+ "scsi(%li:%i:%i:%i): "
+ "Unable to abort command!\n",
+ ha->host_no, bus, target, lun);
+ }
+
+ /*
+ * If the command passed in by the mid-layer has been
+ * returned by the board, then wait for any additional
+ * commands which are supposed to complete based upon
+ * the error action.
+ *
+ * All commands are unconditionally returned during a
+ * call to qla1280_abort_isp(), ADAPTER_RESET. No need
+ * to wait for them.
+ */
+ if (result == SUCCESS && wait_for_bus >= 0) {
+ result = qla1280_wait_for_pending_commands(ha,
+ wait_for_bus, wait_for_target);
+ }
+
+ dprintk(1, "RESET returning %d\n", result);
+
+ LEAVE("qla1280_error_action");
+ return result;
+}
+
+/**************************************************************************
+ * qla1280_abort
+ * Abort the specified SCSI command(s).
+ **************************************************************************/
+static int
+qla1280_eh_abort(struct scsi_cmnd * cmd)
+{
+ int rc;
+
+ spin_lock_irq(cmd->device->host->host_lock);
+ rc = qla1280_error_action(cmd, ABORT_COMMAND);
+ spin_unlock_irq(cmd->device->host->host_lock);
+
+ return rc;
+}
+
+/**************************************************************************
+ * qla1280_device_reset
+ * Reset the specified SCSI device
+ **************************************************************************/
+static int
+qla1280_eh_device_reset(struct scsi_cmnd *cmd)
+{
+ int rc;
+
+ spin_lock_irq(cmd->device->host->host_lock);
+ rc = qla1280_error_action(cmd, DEVICE_RESET);
+ spin_unlock_irq(cmd->device->host->host_lock);
+
+ return rc;
+}
+
+/**************************************************************************
+ * qla1280_bus_reset
+ * Reset the specified bus.
+ **************************************************************************/
+static int
+qla1280_eh_bus_reset(struct scsi_cmnd *cmd)
+{
+ int rc;
+
+ spin_lock_irq(cmd->device->host->host_lock);
+ rc = qla1280_error_action(cmd, BUS_RESET);
+ spin_unlock_irq(cmd->device->host->host_lock);
+
+ return rc;
+}
+
+/**************************************************************************
+ * qla1280_adapter_reset
+ * Reset the specified adapter (both channels)
+ **************************************************************************/
+static int
+qla1280_eh_adapter_reset(struct scsi_cmnd *cmd)
+{
+ int rc;
+
+ spin_lock_irq(cmd->device->host->host_lock);
+ rc = qla1280_error_action(cmd, ADAPTER_RESET);
+ spin_unlock_irq(cmd->device->host->host_lock);
+
+ return rc;
+}
+
+static int
+qla1280_biosparam(struct scsi_device *sdev, struct block_device *bdev,
+ sector_t capacity, int geom[])
+{
+ int heads, sectors, cylinders;
+
+ heads = 64;
+ sectors = 32;
+ cylinders = (unsigned long)capacity / (heads * sectors);
+ if (cylinders > 1024) {
+ heads = 255;
+ sectors = 63;
+ cylinders = (unsigned long)capacity / (heads * sectors);
+ /* if (cylinders > 1023)
+ cylinders = 1023; */
+ }
+
+ geom[0] = heads;
+ geom[1] = sectors;
+ geom[2] = cylinders;
+
+ return 0;
+}
+
+
+/* disable risc and host interrupts */
+static inline void
+qla1280_disable_intrs(struct scsi_qla_host *ha)
+{
+ WRT_REG_WORD(&ha->iobase->ictrl, 0);
+ RD_REG_WORD(&ha->iobase->ictrl); /* PCI Posted Write flush */
+}
+
+/* enable risc and host interrupts */
+static inline void
+qla1280_enable_intrs(struct scsi_qla_host *ha)
+{
+ WRT_REG_WORD(&ha->iobase->ictrl, (ISP_EN_INT | ISP_EN_RISC));
+ RD_REG_WORD(&ha->iobase->ictrl); /* PCI Posted Write flush */
+}
+
+/**************************************************************************
+ * qla1280_intr_handler
+ * Handles the H/W interrupt
+ **************************************************************************/
+static irqreturn_t
+qla1280_intr_handler(int irq, void *dev_id)
+{
+ struct scsi_qla_host *ha;
+ struct device_reg __iomem *reg;
+ u16 data;
+ int handled = 0;
+
+ ENTER_INTR ("qla1280_intr_handler");
+ ha = (struct scsi_qla_host *)dev_id;
+
+ spin_lock(ha->host->host_lock);
+
+ ha->isr_count++;
+ reg = ha->iobase;
+
+ qla1280_disable_intrs(ha);
+
+ data = qla1280_debounce_register(&reg->istatus);
+ /* Check for pending interrupts. */
+ if (data & RISC_INT) {
+ qla1280_isr(ha, &ha->done_q);
+ handled = 1;
+ }
+ if (!list_empty(&ha->done_q))
+ qla1280_done(ha);
+
+ spin_unlock(ha->host->host_lock);
+
+ qla1280_enable_intrs(ha);
+
+ LEAVE_INTR("qla1280_intr_handler");
+ return IRQ_RETVAL(handled);
+}
+
+
+static int
+qla1280_set_target_parameters(struct scsi_qla_host *ha, int bus, int target)
+{
+ uint8_t mr;
+ uint16_t mb[MAILBOX_REGISTER_COUNT];
+ struct nvram *nv;
+ int status, lun;
+
+ nv = &ha->nvram;
+
+ mr = BIT_3 | BIT_2 | BIT_1 | BIT_0;
+
+ /* Set Target Parameters. */
+ mb[0] = MBC_SET_TARGET_PARAMETERS;
+ mb[1] = (uint16_t)((bus ? target | BIT_7 : target) << 8);
+ mb[2] = nv->bus[bus].target[target].parameter.renegotiate_on_error << 8;
+ mb[2] |= nv->bus[bus].target[target].parameter.stop_queue_on_check << 9;
+ mb[2] |= nv->bus[bus].target[target].parameter.auto_request_sense << 10;
+ mb[2] |= nv->bus[bus].target[target].parameter.tag_queuing << 11;
+ mb[2] |= nv->bus[bus].target[target].parameter.enable_sync << 12;
+ mb[2] |= nv->bus[bus].target[target].parameter.enable_wide << 13;
+ mb[2] |= nv->bus[bus].target[target].parameter.parity_checking << 14;
+ mb[2] |= nv->bus[bus].target[target].parameter.disconnect_allowed << 15;
+
+ if (IS_ISP1x160(ha)) {
+ mb[2] |= nv->bus[bus].target[target].ppr_1x160.flags.enable_ppr << 5;
+ mb[3] = (nv->bus[bus].target[target].flags.flags1x160.sync_offset << 8);
+ mb[6] = (nv->bus[bus].target[target].ppr_1x160.flags.ppr_options << 8) |
+ nv->bus[bus].target[target].ppr_1x160.flags.ppr_bus_width;
+ mr |= BIT_6;
+ } else {
+ mb[3] = (nv->bus[bus].target[target].flags.flags1x80.sync_offset << 8);
+ }
+ mb[3] |= nv->bus[bus].target[target].sync_period;
+
+ status = qla1280_mailbox_command(ha, mr, mb);
+
+ /* Set Device Queue Parameters. */
+ for (lun = 0; lun < MAX_LUNS; lun++) {
+ mb[0] = MBC_SET_DEVICE_QUEUE;
+ mb[1] = (uint16_t)((bus ? target | BIT_7 : target) << 8);
+ mb[1] |= lun;
+ mb[2] = nv->bus[bus].max_queue_depth;
+ mb[3] = nv->bus[bus].target[target].execution_throttle;
+ status |= qla1280_mailbox_command(ha, 0x0f, mb);
+ }
+
+ if (status)
+ printk(KERN_WARNING "scsi(%ld:%i:%i): "
+ "qla1280_set_target_parameters() failed\n",
+ ha->host_no, bus, target);
+ return status;
+}
+
+
+/**************************************************************************
+ * qla1280_slave_configure
+ *
+ * Description:
+ * Determines the queue depth for a given device. There are two ways
+ * a queue depth can be obtained for a tagged queueing device. One
+ * way is the default queue depth which is determined by whether
+ * If it is defined, then it is used
+ * as the default queue depth. Otherwise, we use either 4 or 8 as the
+ * default queue depth (dependent on the number of hardware SCBs).
+ **************************************************************************/
+static int
+qla1280_slave_configure(struct scsi_device *device)
+{
+ struct scsi_qla_host *ha;
+ int default_depth = 3;
+ int bus = device->channel;
+ int target = device->id;
+ int status = 0;
+ struct nvram *nv;
+ unsigned long flags;
+
+ ha = (struct scsi_qla_host *)device->host->hostdata;
+ nv = &ha->nvram;
+
+ if (qla1280_check_for_dead_scsi_bus(ha, bus))
+ return 1;
+
+ if (device->tagged_supported &&
+ (ha->bus_settings[bus].qtag_enables & (BIT_0 << target))) {
+ scsi_change_queue_depth(device, ha->bus_settings[bus].hiwat);
+ } else {
+ scsi_change_queue_depth(device, default_depth);
+ }
+
+ nv->bus[bus].target[target].parameter.enable_sync = device->sdtr;
+ nv->bus[bus].target[target].parameter.enable_wide = device->wdtr;
+ nv->bus[bus].target[target].ppr_1x160.flags.enable_ppr = device->ppr;
+
+ if (driver_setup.no_sync ||
+ (driver_setup.sync_mask &&
+ (~driver_setup.sync_mask & (1 << target))))
+ nv->bus[bus].target[target].parameter.enable_sync = 0;
+ if (driver_setup.no_wide ||
+ (driver_setup.wide_mask &&
+ (~driver_setup.wide_mask & (1 << target))))
+ nv->bus[bus].target[target].parameter.enable_wide = 0;
+ if (IS_ISP1x160(ha)) {
+ if (driver_setup.no_ppr ||
+ (driver_setup.ppr_mask &&
+ (~driver_setup.ppr_mask & (1 << target))))
+ nv->bus[bus].target[target].ppr_1x160.flags.enable_ppr = 0;
+ }
+
+ spin_lock_irqsave(ha->host->host_lock, flags);
+ if (nv->bus[bus].target[target].parameter.enable_sync)
+ status = qla1280_set_target_parameters(ha, bus, target);
+ qla1280_get_target_parameters(ha, device);
+ spin_unlock_irqrestore(ha->host->host_lock, flags);
+ return status;
+}
+
+
+/*
+ * qla1280_done
+ * Process completed commands.
+ *
+ * Input:
+ * ha = adapter block pointer.
+ */
+static void
+qla1280_done(struct scsi_qla_host *ha)
+{
+ struct srb *sp;
+ struct list_head *done_q;
+ int bus, target, lun;
+ struct scsi_cmnd *cmd;
+
+ ENTER("qla1280_done");
+
+ done_q = &ha->done_q;
+
+ while (!list_empty(done_q)) {
+ sp = list_entry(done_q->next, struct srb, list);
+
+ list_del(&sp->list);
+
+ cmd = sp->cmd;
+ bus = SCSI_BUS_32(cmd);
+ target = SCSI_TCN_32(cmd);
+ lun = SCSI_LUN_32(cmd);
+
+ switch ((CMD_RESULT(cmd) >> 16)) {
+ case DID_RESET:
+ /* Issue marker command. */
+ if (!ha->flags.abort_isp_active)
+ qla1280_marker(ha, bus, target, 0, MK_SYNC_ID);
+ break;
+ case DID_ABORT:
+ sp->flags &= ~SRB_ABORT_PENDING;
+ sp->flags |= SRB_ABORTED;
+ break;
+ default:
+ break;
+ }
+
+ /* Release memory used for this I/O */
+ scsi_dma_unmap(cmd);
+
+ /* Call the mid-level driver interrupt handler */
+ ha->actthreads--;
+
+ if (sp->wait == NULL)
+ (*(cmd)->scsi_done)(cmd);
+ else
+ complete(sp->wait);
+ }
+ LEAVE("qla1280_done");
+}
+
+/*
+ * Translates a ISP error to a Linux SCSI error
+ */
+static int
+qla1280_return_status(struct response * sts, struct scsi_cmnd *cp)
+{
+ int host_status = DID_ERROR;
+ uint16_t comp_status = le16_to_cpu(sts->comp_status);
+ uint16_t state_flags = le16_to_cpu(sts->state_flags);
+ uint32_t residual_length = le32_to_cpu(sts->residual_length);
+ uint16_t scsi_status = le16_to_cpu(sts->scsi_status);
+#if DEBUG_QLA1280_INTR
+ static char *reason[] = {
+ "DID_OK",
+ "DID_NO_CONNECT",
+ "DID_BUS_BUSY",
+ "DID_TIME_OUT",
+ "DID_BAD_TARGET",
+ "DID_ABORT",
+ "DID_PARITY",
+ "DID_ERROR",
+ "DID_RESET",
+ "DID_BAD_INTR"
+ };
+#endif /* DEBUG_QLA1280_INTR */
+
+ ENTER("qla1280_return_status");
+
+#if DEBUG_QLA1280_INTR
+ /*
+ dprintk(1, "qla1280_return_status: compl status = 0x%04x\n",
+ comp_status);
+ */
+#endif
+
+ switch (comp_status) {
+ case CS_COMPLETE:
+ host_status = DID_OK;
+ break;
+
+ case CS_INCOMPLETE:
+ if (!(state_flags & SF_GOT_BUS))
+ host_status = DID_NO_CONNECT;
+ else if (!(state_flags & SF_GOT_TARGET))
+ host_status = DID_BAD_TARGET;
+ else if (!(state_flags & SF_SENT_CDB))
+ host_status = DID_ERROR;
+ else if (!(state_flags & SF_TRANSFERRED_DATA))
+ host_status = DID_ERROR;
+ else if (!(state_flags & SF_GOT_STATUS))
+ host_status = DID_ERROR;
+ else if (!(state_flags & SF_GOT_SENSE))
+ host_status = DID_ERROR;
+ break;
+
+ case CS_RESET:
+ host_status = DID_RESET;
+ break;
+
+ case CS_ABORTED:
+ host_status = DID_ABORT;
+ break;
+
+ case CS_TIMEOUT:
+ host_status = DID_TIME_OUT;
+ break;
+
+ case CS_DATA_OVERRUN:
+ dprintk(2, "Data overrun 0x%x\n", residual_length);
+ dprintk(2, "qla1280_return_status: response packet data\n");
+ qla1280_dump_buffer(2, (char *)sts, RESPONSE_ENTRY_SIZE);
+ host_status = DID_ERROR;
+ break;
+
+ case CS_DATA_UNDERRUN:
+ if ((scsi_bufflen(cp) - residual_length) <
+ cp->underflow) {
+ printk(KERN_WARNING
+ "scsi: Underflow detected - retrying "
+ "command.\n");
+ host_status = DID_ERROR;
+ } else {
+ scsi_set_resid(cp, residual_length);
+ host_status = DID_OK;
+ }
+ break;
+
+ default:
+ host_status = DID_ERROR;
+ break;
+ }
+
+#if DEBUG_QLA1280_INTR
+ dprintk(1, "qla1280 ISP status: host status (%s) scsi status %x\n",
+ reason[host_status], scsi_status);
+#endif
+
+ LEAVE("qla1280_return_status");
+
+ return (scsi_status & 0xff) | (host_status << 16);
+}
+
+/****************************************************************************/
+/* QLogic ISP1280 Hardware Support Functions. */
+/****************************************************************************/
+
+/*
+ * qla1280_initialize_adapter
+ * Initialize board.
+ *
+ * Input:
+ * ha = adapter block pointer.
+ *
+ * Returns:
+ * 0 = success
+ */
+static int
+qla1280_initialize_adapter(struct scsi_qla_host *ha)
+{
+ struct device_reg __iomem *reg;
+ int status;
+ int bus;
+ unsigned long flags;
+
+ ENTER("qla1280_initialize_adapter");
+
+ /* Clear adapter flags. */
+ ha->flags.online = 0;
+ ha->flags.disable_host_adapter = 0;
+ ha->flags.reset_active = 0;
+ ha->flags.abort_isp_active = 0;
+
+#if defined(CONFIG_IA64_GENERIC) || defined(CONFIG_IA64_SGI_SN2)
+ if (ia64_platform_is("sn2")) {
+ printk(KERN_INFO "scsi(%li): Enabling SN2 PCI DMA "
+ "dual channel lockup workaround\n", ha->host_no);
+ ha->flags.use_pci_vchannel = 1;
+ driver_setup.no_nvram = 1;
+ }
+#endif
+
+ /* TODO: implement support for the 1040 nvram format */
+ if (IS_ISP1040(ha))
+ driver_setup.no_nvram = 1;
+
+ dprintk(1, "Configure PCI space for adapter...\n");
+
+ reg = ha->iobase;
+
+ /* Insure mailbox registers are free. */
+ WRT_REG_WORD(&reg->semaphore, 0);
+ WRT_REG_WORD(&reg->host_cmd, HC_CLR_RISC_INT);
+ WRT_REG_WORD(&reg->host_cmd, HC_CLR_HOST_INT);
+ RD_REG_WORD(&reg->host_cmd);
+
+ if (qla1280_read_nvram(ha)) {
+ dprintk(2, "qla1280_initialize_adapter: failed to read "
+ "NVRAM\n");
+ }
+
+ /*
+ * It's necessary to grab the spin here as qla1280_mailbox_command
+ * needs to be able to drop the lock unconditionally to wait
+ * for completion.
+ */
+ spin_lock_irqsave(ha->host->host_lock, flags);
+
+ status = qla1280_load_firmware(ha);
+ if (status) {
+ printk(KERN_ERR "scsi(%li): initialize: pci probe failed!\n",
+ ha->host_no);
+ goto out;
+ }
+
+ /* Setup adapter based on NVRAM parameters. */
+ dprintk(1, "scsi(%ld): Configure NVRAM parameters\n", ha->host_no);
+ qla1280_nvram_config(ha);
+
+ if (ha->flags.disable_host_adapter) {
+ status = 1;
+ goto out;
+ }
+
+ status = qla1280_init_rings(ha);
+ if (status)
+ goto out;
+
+ /* Issue SCSI reset, if we can't reset twice then bus is dead */
+ for (bus = 0; bus < ha->ports; bus++) {
+ if (!ha->bus_settings[bus].disable_scsi_reset &&
+ qla1280_bus_reset(ha, bus) &&
+ qla1280_bus_reset(ha, bus))
+ ha->bus_settings[bus].scsi_bus_dead = 1;
+ }
+
+ ha->flags.online = 1;
+ out:
+ spin_unlock_irqrestore(ha->host->host_lock, flags);
+
+ if (status)
+ dprintk(2, "qla1280_initialize_adapter: **** FAILED ****\n");
+
+ LEAVE("qla1280_initialize_adapter");
+ return status;
+}
+
+/*
+ * qla1280_request_firmware
+ * Acquire firmware for chip. Retain in memory
+ * for error recovery.
+ *
+ * Input:
+ * ha = adapter block pointer.
+ *
+ * Returns:
+ * Pointer to firmware image or an error code
+ * cast to pointer via ERR_PTR().
+ */
+static const struct firmware *
+qla1280_request_firmware(struct scsi_qla_host *ha)
+{
+ const struct firmware *fw;
+ int err;
+ int index;
+ char *fwname;
+
+ spin_unlock_irq(ha->host->host_lock);
+ mutex_lock(&qla1280_firmware_mutex);
+
+ index = ql1280_board_tbl[ha->devnum].fw_index;
+ fw = qla1280_fw_tbl[index].fw;
+ if (fw)
+ goto out;
+
+ fwname = qla1280_fw_tbl[index].fwname;
+ err = reject_firmware(&fw, fwname, &ha->pdev->dev);
+
+ if (err) {
+ printk(KERN_ERR "Failed to load image \"%s\" err %d\n",
+ fwname, err);
+ fw = ERR_PTR(err);
+ goto unlock;
+ }
+ if ((fw->size % 2) || (fw->size < 6)) {
+ printk(KERN_ERR "Invalid firmware length %zu in image \"%s\"\n",
+ fw->size, fwname);
+ release_firmware(fw);
+ fw = ERR_PTR(-EINVAL);
+ goto unlock;
+ }
+
+ qla1280_fw_tbl[index].fw = fw;
+
+ out:
+ ha->fwver1 = fw->data[0];
+ ha->fwver2 = fw->data[1];
+ ha->fwver3 = fw->data[2];
+ unlock:
+ mutex_unlock(&qla1280_firmware_mutex);
+ spin_lock_irq(ha->host->host_lock);
+ return fw;
+}
+
+/*
+ * Chip diagnostics
+ * Test chip for proper operation.
+ *
+ * Input:
+ * ha = adapter block pointer.
+ *
+ * Returns:
+ * 0 = success.
+ */
+static int
+qla1280_chip_diag(struct scsi_qla_host *ha)
+{
+ uint16_t mb[MAILBOX_REGISTER_COUNT];
+ struct device_reg __iomem *reg = ha->iobase;
+ int status = 0;
+ int cnt;
+ uint16_t data;
+ dprintk(3, "qla1280_chip_diag: testing device at 0x%p \n", &reg->id_l);
+
+ dprintk(1, "scsi(%ld): Verifying chip\n", ha->host_no);
+
+ /* Soft reset chip and wait for it to finish. */
+ WRT_REG_WORD(&reg->ictrl, ISP_RESET);
+
+ /*
+ * We can't do a traditional PCI write flush here by reading
+ * back the register. The card will not respond once the reset
+ * is in action and we end up with a machine check exception
+ * instead. Nothing to do but wait and hope for the best.
+ * A portable pci_write_flush(pdev) call would be very useful here.
+ */
+ udelay(20);
+ data = qla1280_debounce_register(&reg->ictrl);
+ /*
+ * Yet another QLogic gem ;-(
+ */
+ for (cnt = 1000000; cnt && data & ISP_RESET; cnt--) {
+ udelay(5);
+ data = RD_REG_WORD(&reg->ictrl);
+ }
+
+ if (!cnt)
+ goto fail;
+
+ /* Reset register cleared by chip reset. */
+ dprintk(3, "qla1280_chip_diag: reset register cleared by chip reset\n");
+
+ WRT_REG_WORD(&reg->cfg_1, 0);
+
+ /* Reset RISC and disable BIOS which
+ allows RISC to execute out of RAM. */
+ WRT_REG_WORD(&reg->host_cmd, HC_RESET_RISC |
+ HC_RELEASE_RISC | HC_DISABLE_BIOS);
+
+ RD_REG_WORD(&reg->id_l); /* Flush PCI write */
+ data = qla1280_debounce_register(&reg->mailbox0);
+
+ /*
+ * I *LOVE* this code!
+ */
+ for (cnt = 1000000; cnt && data == MBS_BUSY; cnt--) {
+ udelay(5);
+ data = RD_REG_WORD(&reg->mailbox0);
+ }
+
+ if (!cnt)
+ goto fail;
+
+ /* Check product ID of chip */
+ dprintk(3, "qla1280_chip_diag: Checking product ID of chip\n");
+
+ if (RD_REG_WORD(&reg->mailbox1) != PROD_ID_1 ||
+ (RD_REG_WORD(&reg->mailbox2) != PROD_ID_2 &&
+ RD_REG_WORD(&reg->mailbox2) != PROD_ID_2a) ||
+ RD_REG_WORD(&reg->mailbox3) != PROD_ID_3 ||
+ RD_REG_WORD(&reg->mailbox4) != PROD_ID_4) {
+ printk(KERN_INFO "qla1280: Wrong product ID = "
+ "0x%x,0x%x,0x%x,0x%x\n",
+ RD_REG_WORD(&reg->mailbox1),
+ RD_REG_WORD(&reg->mailbox2),
+ RD_REG_WORD(&reg->mailbox3),
+ RD_REG_WORD(&reg->mailbox4));
+ goto fail;
+ }
+
+ /*
+ * Enable ints early!!!
+ */
+ qla1280_enable_intrs(ha);
+
+ dprintk(1, "qla1280_chip_diag: Checking mailboxes of chip\n");
+ /* Wrap Incoming Mailboxes Test. */
+ mb[0] = MBC_MAILBOX_REGISTER_TEST;
+ mb[1] = 0xAAAA;
+ mb[2] = 0x5555;
+ mb[3] = 0xAA55;
+ mb[4] = 0x55AA;
+ mb[5] = 0xA5A5;
+ mb[6] = 0x5A5A;
+ mb[7] = 0x2525;
+
+ status = qla1280_mailbox_command(ha, 0xff, mb);
+ if (status)
+ goto fail;
+
+ if (mb[1] != 0xAAAA || mb[2] != 0x5555 || mb[3] != 0xAA55 ||
+ mb[4] != 0x55AA || mb[5] != 0xA5A5 || mb[6] != 0x5A5A ||
+ mb[7] != 0x2525) {
+ printk(KERN_INFO "qla1280: Failed mbox check\n");
+ goto fail;
+ }
+
+ dprintk(3, "qla1280_chip_diag: exiting normally\n");
+ return 0;
+ fail:
+ dprintk(2, "qla1280_chip_diag: **** FAILED ****\n");
+ return status;
+}
+
+static int
+qla1280_load_firmware_pio(struct scsi_qla_host *ha)
+{
+ /* enter with host_lock acquired */
+
+ const struct firmware *fw;
+ const __le16 *fw_data;
+ uint16_t risc_address, risc_code_size;
+ uint16_t mb[MAILBOX_REGISTER_COUNT], i;
+ int err = 0;
+
+ fw = qla1280_request_firmware(ha);
+ if (IS_ERR(fw))
+ return PTR_ERR(fw);
+
+ fw_data = (const __le16 *)&fw->data[0];
+ ha->fwstart = __le16_to_cpu(fw_data[2]);
+
+ /* Load RISC code. */
+ risc_address = ha->fwstart;
+ fw_data = (const __le16 *)&fw->data[6];
+ risc_code_size = (fw->size - 6) / 2;
+
+ for (i = 0; i < risc_code_size; i++) {
+ mb[0] = MBC_WRITE_RAM_WORD;
+ mb[1] = risc_address + i;
+ mb[2] = __le16_to_cpu(fw_data[i]);
+
+ err = qla1280_mailbox_command(ha, BIT_0 | BIT_1 | BIT_2, mb);
+ if (err) {
+ printk(KERN_ERR "scsi(%li): Failed to load firmware\n",
+ ha->host_no);
+ break;
+ }
+ }
+
+ return err;
+}
+
+#define DUMP_IT_BACK 0 /* for debug of RISC loading */
+static int
+qla1280_load_firmware_dma(struct scsi_qla_host *ha)
+{
+ /* enter with host_lock acquired */
+ const struct firmware *fw;
+ const __le16 *fw_data;
+ uint16_t risc_address, risc_code_size;
+ uint16_t mb[MAILBOX_REGISTER_COUNT], cnt;
+ int err = 0, num, i;
+#if DUMP_IT_BACK
+ uint8_t *sp, *tbuf;
+ dma_addr_t p_tbuf;
+
+ tbuf = pci_alloc_consistent(ha->pdev, 8000, &p_tbuf);
+ if (!tbuf)
+ return -ENOMEM;
+#endif
+
+ fw = qla1280_request_firmware(ha);
+ if (IS_ERR(fw))
+ return PTR_ERR(fw);
+
+ fw_data = (const __le16 *)&fw->data[0];
+ ha->fwstart = __le16_to_cpu(fw_data[2]);
+
+ /* Load RISC code. */
+ risc_address = ha->fwstart;
+ fw_data = (const __le16 *)&fw->data[6];
+ risc_code_size = (fw->size - 6) / 2;
+
+ dprintk(1, "%s: DMA RISC code (%i) words\n",
+ __func__, risc_code_size);
+
+ num = 0;
+ while (risc_code_size > 0) {
+ int warn __attribute__((unused)) = 0;
+
+ cnt = 2000 >> 1;
+
+ if (cnt > risc_code_size)
+ cnt = risc_code_size;
+
+ dprintk(2, "qla1280_setup_chip: loading risc @ =(0x%p),"
+ "%d,%d(0x%x)\n",
+ fw_data, cnt, num, risc_address);
+ for(i = 0; i < cnt; i++)
+ ((__le16 *)ha->request_ring)[i] = fw_data[i];
+
+ mb[0] = MBC_LOAD_RAM;
+ mb[1] = risc_address;
+ mb[4] = cnt;
+ mb[3] = ha->request_dma & 0xffff;
+ mb[2] = (ha->request_dma >> 16) & 0xffff;
+ mb[7] = pci_dma_hi32(ha->request_dma) & 0xffff;
+ mb[6] = pci_dma_hi32(ha->request_dma) >> 16;
+ dprintk(2, "%s: op=%d 0x%p = 0x%4x,0x%4x,0x%4x,0x%4x\n",
+ __func__, mb[0],
+ (void *)(long)ha->request_dma,
+ mb[6], mb[7], mb[2], mb[3]);
+ err = qla1280_mailbox_command(ha, BIT_4 | BIT_3 | BIT_2 |
+ BIT_1 | BIT_0, mb);
+ if (err) {
+ printk(KERN_ERR "scsi(%li): Failed to load partial "
+ "segment of f\n", ha->host_no);
+ goto out;
+ }
+
+#if DUMP_IT_BACK
+ mb[0] = MBC_DUMP_RAM;
+ mb[1] = risc_address;
+ mb[4] = cnt;
+ mb[3] = p_tbuf & 0xffff;
+ mb[2] = (p_tbuf >> 16) & 0xffff;
+ mb[7] = pci_dma_hi32(p_tbuf) & 0xffff;
+ mb[6] = pci_dma_hi32(p_tbuf) >> 16;
+
+ err = qla1280_mailbox_command(ha, BIT_4 | BIT_3 | BIT_2 |
+ BIT_1 | BIT_0, mb);
+ if (err) {
+ printk(KERN_ERR
+ "Failed to dump partial segment of f/w\n");
+ goto out;
+ }
+ sp = (uint8_t *)ha->request_ring;
+ for (i = 0; i < (cnt << 1); i++) {
+ if (tbuf[i] != sp[i] && warn++ < 10) {
+ printk(KERN_ERR "%s: FW compare error @ "
+ "byte(0x%x) loop#=%x\n",
+ __func__, i, num);
+ printk(KERN_ERR "%s: FWbyte=%x "
+ "FWfromChip=%x\n",
+ __func__, sp[i], tbuf[i]);
+ /*break; */
+ }
+ }
+#endif
+ risc_address += cnt;
+ risc_code_size = risc_code_size - cnt;
+ fw_data = fw_data + cnt;
+ num++;
+ }
+
+ out:
+#if DUMP_IT_BACK
+ pci_free_consistent(ha->pdev, 8000, tbuf, p_tbuf);
+#endif
+ return err;
+}
+
+static int
+qla1280_start_firmware(struct scsi_qla_host *ha)
+{
+ uint16_t mb[MAILBOX_REGISTER_COUNT];
+ int err;
+
+ dprintk(1, "%s: Verifying checksum of loaded RISC code.\n",
+ __func__);
+
+ /* Verify checksum of loaded RISC code. */
+ mb[0] = MBC_VERIFY_CHECKSUM;
+ /* mb[1] = ql12_risc_code_addr01; */
+ mb[1] = ha->fwstart;
+ err = qla1280_mailbox_command(ha, BIT_1 | BIT_0, mb);
+ if (err) {
+ printk(KERN_ERR "scsi(%li): RISC checksum failed.\n", ha->host_no);
+ return err;
+ }
+
+ /* Start firmware execution. */
+ dprintk(1, "%s: start firmware running.\n", __func__);
+ mb[0] = MBC_EXECUTE_FIRMWARE;
+ mb[1] = ha->fwstart;
+ err = qla1280_mailbox_command(ha, BIT_1 | BIT_0, &mb[0]);
+ if (err) {
+ printk(KERN_ERR "scsi(%li): Failed to start firmware\n",
+ ha->host_no);
+ }
+
+ return err;
+}
+
+static int
+qla1280_load_firmware(struct scsi_qla_host *ha)
+{
+ /* enter with host_lock taken */
+ int err;
+
+ err = qla1280_chip_diag(ha);
+ if (err)
+ goto out;
+ if (IS_ISP1040(ha))
+ err = qla1280_load_firmware_pio(ha);
+ else
+ err = qla1280_load_firmware_dma(ha);
+ if (err)
+ goto out;
+ err = qla1280_start_firmware(ha);
+ out:
+ return err;
+}
+
+/*
+ * Initialize rings
+ *
+ * Input:
+ * ha = adapter block pointer.
+ * ha->request_ring = request ring virtual address
+ * ha->response_ring = response ring virtual address
+ * ha->request_dma = request ring physical address
+ * ha->response_dma = response ring physical address
+ *
+ * Returns:
+ * 0 = success.
+ */
+static int
+qla1280_init_rings(struct scsi_qla_host *ha)
+{
+ uint16_t mb[MAILBOX_REGISTER_COUNT];
+ int status = 0;
+
+ ENTER("qla1280_init_rings");
+
+ /* Clear outstanding commands array. */
+ memset(ha->outstanding_cmds, 0,
+ sizeof(struct srb *) * MAX_OUTSTANDING_COMMANDS);
+
+ /* Initialize request queue. */
+ ha->request_ring_ptr = ha->request_ring;
+ ha->req_ring_index = 0;
+ ha->req_q_cnt = REQUEST_ENTRY_CNT;
+ /* mb[0] = MBC_INIT_REQUEST_QUEUE; */
+ mb[0] = MBC_INIT_REQUEST_QUEUE_A64;
+ mb[1] = REQUEST_ENTRY_CNT;
+ mb[3] = ha->request_dma & 0xffff;
+ mb[2] = (ha->request_dma >> 16) & 0xffff;
+ mb[4] = 0;
+ mb[7] = pci_dma_hi32(ha->request_dma) & 0xffff;
+ mb[6] = pci_dma_hi32(ha->request_dma) >> 16;
+ if (!(status = qla1280_mailbox_command(ha, BIT_7 | BIT_6 | BIT_4 |
+ BIT_3 | BIT_2 | BIT_1 | BIT_0,
+ &mb[0]))) {
+ /* Initialize response queue. */
+ ha->response_ring_ptr = ha->response_ring;
+ ha->rsp_ring_index = 0;
+ /* mb[0] = MBC_INIT_RESPONSE_QUEUE; */
+ mb[0] = MBC_INIT_RESPONSE_QUEUE_A64;
+ mb[1] = RESPONSE_ENTRY_CNT;
+ mb[3] = ha->response_dma & 0xffff;
+ mb[2] = (ha->response_dma >> 16) & 0xffff;
+ mb[5] = 0;
+ mb[7] = pci_dma_hi32(ha->response_dma) & 0xffff;
+ mb[6] = pci_dma_hi32(ha->response_dma) >> 16;
+ status = qla1280_mailbox_command(ha, BIT_7 | BIT_6 | BIT_5 |
+ BIT_3 | BIT_2 | BIT_1 | BIT_0,
+ &mb[0]);
+ }
+
+ if (status)
+ dprintk(2, "qla1280_init_rings: **** FAILED ****\n");
+
+ LEAVE("qla1280_init_rings");
+ return status;
+}
+
+static void
+qla1280_print_settings(struct nvram *nv)
+{
+ dprintk(1, "qla1280 : initiator scsi id bus[0]=%d\n",
+ nv->bus[0].config_1.initiator_id);
+ dprintk(1, "qla1280 : initiator scsi id bus[1]=%d\n",
+ nv->bus[1].config_1.initiator_id);
+
+ dprintk(1, "qla1280 : bus reset delay[0]=%d\n",
+ nv->bus[0].bus_reset_delay);
+ dprintk(1, "qla1280 : bus reset delay[1]=%d\n",
+ nv->bus[1].bus_reset_delay);
+
+ dprintk(1, "qla1280 : retry count[0]=%d\n", nv->bus[0].retry_count);
+ dprintk(1, "qla1280 : retry delay[0]=%d\n", nv->bus[0].retry_delay);
+ dprintk(1, "qla1280 : retry count[1]=%d\n", nv->bus[1].retry_count);
+ dprintk(1, "qla1280 : retry delay[1]=%d\n", nv->bus[1].retry_delay);
+
+ dprintk(1, "qla1280 : async data setup time[0]=%d\n",
+ nv->bus[0].config_2.async_data_setup_time);
+ dprintk(1, "qla1280 : async data setup time[1]=%d\n",
+ nv->bus[1].config_2.async_data_setup_time);
+
+ dprintk(1, "qla1280 : req/ack active negation[0]=%d\n",
+ nv->bus[0].config_2.req_ack_active_negation);
+ dprintk(1, "qla1280 : req/ack active negation[1]=%d\n",
+ nv->bus[1].config_2.req_ack_active_negation);
+
+ dprintk(1, "qla1280 : data line active negation[0]=%d\n",
+ nv->bus[0].config_2.data_line_active_negation);
+ dprintk(1, "qla1280 : data line active negation[1]=%d\n",
+ nv->bus[1].config_2.data_line_active_negation);
+
+ dprintk(1, "qla1280 : disable loading risc code=%d\n",
+ nv->cntr_flags_1.disable_loading_risc_code);
+
+ dprintk(1, "qla1280 : enable 64bit addressing=%d\n",
+ nv->cntr_flags_1.enable_64bit_addressing);
+
+ dprintk(1, "qla1280 : selection timeout limit[0]=%d\n",
+ nv->bus[0].selection_timeout);
+ dprintk(1, "qla1280 : selection timeout limit[1]=%d\n",
+ nv->bus[1].selection_timeout);
+
+ dprintk(1, "qla1280 : max queue depth[0]=%d\n",
+ nv->bus[0].max_queue_depth);
+ dprintk(1, "qla1280 : max queue depth[1]=%d\n",
+ nv->bus[1].max_queue_depth);
+}
+
+static void
+qla1280_set_target_defaults(struct scsi_qla_host *ha, int bus, int target)
+{
+ struct nvram *nv = &ha->nvram;
+
+ nv->bus[bus].target[target].parameter.renegotiate_on_error = 1;
+ nv->bus[bus].target[target].parameter.auto_request_sense = 1;
+ nv->bus[bus].target[target].parameter.tag_queuing = 1;
+ nv->bus[bus].target[target].parameter.enable_sync = 1;
+#if 1 /* Some SCSI Processors do not seem to like this */
+ nv->bus[bus].target[target].parameter.enable_wide = 1;
+#endif
+ nv->bus[bus].target[target].execution_throttle =
+ nv->bus[bus].max_queue_depth - 1;
+ nv->bus[bus].target[target].parameter.parity_checking = 1;
+ nv->bus[bus].target[target].parameter.disconnect_allowed = 1;
+
+ if (IS_ISP1x160(ha)) {
+ nv->bus[bus].target[target].flags.flags1x160.device_enable = 1;
+ nv->bus[bus].target[target].flags.flags1x160.sync_offset = 0x0e;
+ nv->bus[bus].target[target].sync_period = 9;
+ nv->bus[bus].target[target].ppr_1x160.flags.enable_ppr = 1;
+ nv->bus[bus].target[target].ppr_1x160.flags.ppr_options = 2;
+ nv->bus[bus].target[target].ppr_1x160.flags.ppr_bus_width = 1;
+ } else {
+ nv->bus[bus].target[target].flags.flags1x80.device_enable = 1;
+ nv->bus[bus].target[target].flags.flags1x80.sync_offset = 12;
+ nv->bus[bus].target[target].sync_period = 10;
+ }
+}
+
+static void
+qla1280_set_defaults(struct scsi_qla_host *ha)
+{
+ struct nvram *nv = &ha->nvram;
+ int bus, target;
+
+ dprintk(1, "Using defaults for NVRAM: \n");
+ memset(nv, 0, sizeof(struct nvram));
+
+ /* nv->cntr_flags_1.disable_loading_risc_code = 1; */
+ nv->firmware_feature.f.enable_fast_posting = 1;
+ nv->firmware_feature.f.disable_synchronous_backoff = 1;
+ nv->termination.scsi_bus_0_control = 3;
+ nv->termination.scsi_bus_1_control = 3;
+ nv->termination.auto_term_support = 1;
+
+ /*
+ * Set default FIFO magic - What appropriate values would be here
+ * is unknown. This is what I have found testing with 12160s.
+ *
+ * Now, I would love the magic decoder ring for this one, the
+ * header file provided by QLogic seems to be bogus or incomplete
+ * at best.
+ */
+ nv->isp_config.burst_enable = 1;
+ if (IS_ISP1040(ha))
+ nv->isp_config.fifo_threshold |= 3;
+ else
+ nv->isp_config.fifo_threshold |= 4;
+
+ if (IS_ISP1x160(ha))
+ nv->isp_parameter = 0x01; /* fast memory enable */
+
+ for (bus = 0; bus < MAX_BUSES; bus++) {
+ nv->bus[bus].config_1.initiator_id = 7;
+ nv->bus[bus].config_2.req_ack_active_negation = 1;
+ nv->bus[bus].config_2.data_line_active_negation = 1;
+ nv->bus[bus].selection_timeout = 250;
+ nv->bus[bus].max_queue_depth = 32;
+
+ if (IS_ISP1040(ha)) {
+ nv->bus[bus].bus_reset_delay = 3;
+ nv->bus[bus].config_2.async_data_setup_time = 6;
+ nv->bus[bus].retry_delay = 1;
+ } else {
+ nv->bus[bus].bus_reset_delay = 5;
+ nv->bus[bus].config_2.async_data_setup_time = 8;
+ }
+
+ for (target = 0; target < MAX_TARGETS; target++)
+ qla1280_set_target_defaults(ha, bus, target);
+ }
+}
+
+static int
+qla1280_config_target(struct scsi_qla_host *ha, int bus, int target)
+{
+ struct nvram *nv = &ha->nvram;
+ uint16_t mb[MAILBOX_REGISTER_COUNT];
+ int status, lun;
+ uint16_t flag;
+
+ /* Set Target Parameters. */
+ mb[0] = MBC_SET_TARGET_PARAMETERS;
+ mb[1] = (uint16_t)((bus ? target | BIT_7 : target) << 8);
+
+ /*
+ * Do not enable sync and ppr for the initial INQUIRY run. We
+ * enable this later if we determine the target actually
+ * supports it.
+ */
+ mb[2] = (TP_RENEGOTIATE | TP_AUTO_REQUEST_SENSE | TP_TAGGED_QUEUE
+ | TP_WIDE | TP_PARITY | TP_DISCONNECT);
+
+ if (IS_ISP1x160(ha))
+ mb[3] = nv->bus[bus].target[target].flags.flags1x160.sync_offset << 8;
+ else
+ mb[3] = nv->bus[bus].target[target].flags.flags1x80.sync_offset << 8;
+ mb[3] |= nv->bus[bus].target[target].sync_period;
+ status = qla1280_mailbox_command(ha, 0x0f, mb);
+
+ /* Save Tag queuing enable flag. */
+ flag = (BIT_0 << target);
+ if (nv->bus[bus].target[target].parameter.tag_queuing)
+ ha->bus_settings[bus].qtag_enables |= flag;
+
+ /* Save Device enable flag. */
+ if (IS_ISP1x160(ha)) {
+ if (nv->bus[bus].target[target].flags.flags1x160.device_enable)
+ ha->bus_settings[bus].device_enables |= flag;
+ ha->bus_settings[bus].lun_disables |= 0;
+ } else {
+ if (nv->bus[bus].target[target].flags.flags1x80.device_enable)
+ ha->bus_settings[bus].device_enables |= flag;
+ /* Save LUN disable flag. */
+ if (nv->bus[bus].target[target].flags.flags1x80.lun_disable)
+ ha->bus_settings[bus].lun_disables |= flag;
+ }
+
+ /* Set Device Queue Parameters. */
+ for (lun = 0; lun < MAX_LUNS; lun++) {
+ mb[0] = MBC_SET_DEVICE_QUEUE;
+ mb[1] = (uint16_t)((bus ? target | BIT_7 : target) << 8);
+ mb[1] |= lun;
+ mb[2] = nv->bus[bus].max_queue_depth;
+ mb[3] = nv->bus[bus].target[target].execution_throttle;
+ status |= qla1280_mailbox_command(ha, 0x0f, mb);
+ }
+
+ return status;
+}
+
+static int
+qla1280_config_bus(struct scsi_qla_host *ha, int bus)
+{
+ struct nvram *nv = &ha->nvram;
+ uint16_t mb[MAILBOX_REGISTER_COUNT];
+ int target, status;
+
+ /* SCSI Reset Disable. */
+ ha->bus_settings[bus].disable_scsi_reset =
+ nv->bus[bus].config_1.scsi_reset_disable;
+
+ /* Initiator ID. */
+ ha->bus_settings[bus].id = nv->bus[bus].config_1.initiator_id;
+ mb[0] = MBC_SET_INITIATOR_ID;
+ mb[1] = bus ? ha->bus_settings[bus].id | BIT_7 :
+ ha->bus_settings[bus].id;
+ status = qla1280_mailbox_command(ha, BIT_1 | BIT_0, &mb[0]);
+
+ /* Reset Delay. */
+ ha->bus_settings[bus].bus_reset_delay =
+ nv->bus[bus].bus_reset_delay;
+
+ /* Command queue depth per device. */
+ ha->bus_settings[bus].hiwat = nv->bus[bus].max_queue_depth - 1;
+
+ /* Set target parameters. */
+ for (target = 0; target < MAX_TARGETS; target++)
+ status |= qla1280_config_target(ha, bus, target);
+
+ return status;
+}
+
+static int
+qla1280_nvram_config(struct scsi_qla_host *ha)
+{
+ struct device_reg __iomem *reg = ha->iobase;
+ struct nvram *nv = &ha->nvram;
+ int bus, target, status = 0;
+ uint16_t mb[MAILBOX_REGISTER_COUNT];
+
+ ENTER("qla1280_nvram_config");
+
+ if (ha->nvram_valid) {
+ /* Always force AUTO sense for LINUX SCSI */
+ for (bus = 0; bus < MAX_BUSES; bus++)
+ for (target = 0; target < MAX_TARGETS; target++) {
+ nv->bus[bus].target[target].parameter.
+ auto_request_sense = 1;
+ }
+ } else {
+ qla1280_set_defaults(ha);
+ }
+
+ qla1280_print_settings(nv);
+
+ /* Disable RISC load of firmware. */
+ ha->flags.disable_risc_code_load =
+ nv->cntr_flags_1.disable_loading_risc_code;
+
+ if (IS_ISP1040(ha)) {
+ uint16_t hwrev, cfg1, cdma_conf, ddma_conf;
+
+ hwrev = RD_REG_WORD(&reg->cfg_0) & ISP_CFG0_HWMSK;
+
+ cfg1 = RD_REG_WORD(&reg->cfg_1) & ~(BIT_4 | BIT_5 | BIT_6);
+ cdma_conf = RD_REG_WORD(&reg->cdma_cfg);
+ ddma_conf = RD_REG_WORD(&reg->ddma_cfg);
+
+ /* Busted fifo, says mjacob. */
+ if (hwrev != ISP_CFG0_1040A)
+ cfg1 |= nv->isp_config.fifo_threshold << 4;
+
+ cfg1 |= nv->isp_config.burst_enable << 2;
+ WRT_REG_WORD(&reg->cfg_1, cfg1);
+
+ WRT_REG_WORD(&reg->cdma_cfg, cdma_conf | CDMA_CONF_BENAB);
+ WRT_REG_WORD(&reg->ddma_cfg, cdma_conf | DDMA_CONF_BENAB);
+ } else {
+ uint16_t cfg1, term;
+
+ /* Set ISP hardware DMA burst */
+ cfg1 = nv->isp_config.fifo_threshold << 4;
+ cfg1 |= nv->isp_config.burst_enable << 2;
+ /* Enable DMA arbitration on dual channel controllers */
+ if (ha->ports > 1)
+ cfg1 |= BIT_13;
+ WRT_REG_WORD(&reg->cfg_1, cfg1);
+
+ /* Set SCSI termination. */
+ WRT_REG_WORD(&reg->gpio_enable,
+ BIT_7 | BIT_3 | BIT_2 | BIT_1 | BIT_0);
+ term = nv->termination.scsi_bus_1_control;
+ term |= nv->termination.scsi_bus_0_control << 2;
+ term |= nv->termination.auto_term_support << 7;
+ RD_REG_WORD(&reg->id_l); /* Flush PCI write */
+ WRT_REG_WORD(&reg->gpio_data, term);
+ }
+ RD_REG_WORD(&reg->id_l); /* Flush PCI write */
+
+ /* ISP parameter word. */
+ mb[0] = MBC_SET_SYSTEM_PARAMETER;
+ mb[1] = nv->isp_parameter;
+ status |= qla1280_mailbox_command(ha, BIT_1 | BIT_0, &mb[0]);
+
+ if (IS_ISP1x40(ha)) {
+ /* clock rate - for qla1240 and older, only */
+ mb[0] = MBC_SET_CLOCK_RATE;
+ mb[1] = 40;
+ status |= qla1280_mailbox_command(ha, BIT_1 | BIT_0, mb);
+ }
+
+ /* Firmware feature word. */
+ mb[0] = MBC_SET_FIRMWARE_FEATURES;
+ mb[1] = nv->firmware_feature.f.enable_fast_posting;
+ mb[1] |= nv->firmware_feature.f.report_lvd_bus_transition << 1;
+ mb[1] |= nv->firmware_feature.f.disable_synchronous_backoff << 5;
+#if defined(CONFIG_IA64_GENERIC) || defined (CONFIG_IA64_SGI_SN2)
+ if (ia64_platform_is("sn2")) {
+ printk(KERN_INFO "scsi(%li): Enabling SN2 PCI DMA "
+ "workaround\n", ha->host_no);
+ mb[1] |= nv->firmware_feature.f.unused_9 << 9; /* XXX */
+ }
+#endif
+ status |= qla1280_mailbox_command(ha, BIT_1 | BIT_0, mb);
+
+ /* Retry count and delay. */
+ mb[0] = MBC_SET_RETRY_COUNT;
+ mb[1] = nv->bus[0].retry_count;
+ mb[2] = nv->bus[0].retry_delay;
+ mb[6] = nv->bus[1].retry_count;
+ mb[7] = nv->bus[1].retry_delay;
+ status |= qla1280_mailbox_command(ha, BIT_7 | BIT_6 | BIT_2 |
+ BIT_1 | BIT_0, &mb[0]);
+
+ /* ASYNC data setup time. */
+ mb[0] = MBC_SET_ASYNC_DATA_SETUP;
+ mb[1] = nv->bus[0].config_2.async_data_setup_time;
+ mb[2] = nv->bus[1].config_2.async_data_setup_time;
+ status |= qla1280_mailbox_command(ha, BIT_2 | BIT_1 | BIT_0, &mb[0]);
+
+ /* Active negation states. */
+ mb[0] = MBC_SET_ACTIVE_NEGATION;
+ mb[1] = 0;
+ if (nv->bus[0].config_2.req_ack_active_negation)
+ mb[1] |= BIT_5;
+ if (nv->bus[0].config_2.data_line_active_negation)
+ mb[1] |= BIT_4;
+ mb[2] = 0;
+ if (nv->bus[1].config_2.req_ack_active_negation)
+ mb[2] |= BIT_5;
+ if (nv->bus[1].config_2.data_line_active_negation)
+ mb[2] |= BIT_4;
+ status |= qla1280_mailbox_command(ha, BIT_2 | BIT_1 | BIT_0, mb);
+
+ mb[0] = MBC_SET_DATA_OVERRUN_RECOVERY;
+ mb[1] = 2; /* Reset SCSI bus and return all outstanding IO */
+ status |= qla1280_mailbox_command(ha, BIT_1 | BIT_0, mb);
+
+ /* thingy */
+ mb[0] = MBC_SET_PCI_CONTROL;
+ mb[1] = BIT_1; /* Data DMA Channel Burst Enable */
+ mb[2] = BIT_1; /* Command DMA Channel Burst Enable */
+ status |= qla1280_mailbox_command(ha, BIT_2 | BIT_1 | BIT_0, mb);
+
+ mb[0] = MBC_SET_TAG_AGE_LIMIT;
+ mb[1] = 8;
+ status |= qla1280_mailbox_command(ha, BIT_1 | BIT_0, mb);
+
+ /* Selection timeout. */
+ mb[0] = MBC_SET_SELECTION_TIMEOUT;
+ mb[1] = nv->bus[0].selection_timeout;
+ mb[2] = nv->bus[1].selection_timeout;
+ status |= qla1280_mailbox_command(ha, BIT_2 | BIT_1 | BIT_0, mb);
+
+ for (bus = 0; bus < ha->ports; bus++)
+ status |= qla1280_config_bus(ha, bus);
+
+ if (status)
+ dprintk(2, "qla1280_nvram_config: **** FAILED ****\n");
+
+ LEAVE("qla1280_nvram_config");
+ return status;
+}
+
+/*
+ * Get NVRAM data word
+ * Calculates word position in NVRAM and calls request routine to
+ * get the word from NVRAM.
+ *
+ * Input:
+ * ha = adapter block pointer.
+ * address = NVRAM word address.
+ *
+ * Returns:
+ * data word.
+ */
+static uint16_t
+qla1280_get_nvram_word(struct scsi_qla_host *ha, uint32_t address)
+{
+ uint32_t nv_cmd;
+ uint16_t data;
+
+ nv_cmd = address << 16;
+ nv_cmd |= NV_READ_OP;
+
+ data = le16_to_cpu(qla1280_nvram_request(ha, nv_cmd));
+
+ dprintk(8, "qla1280_get_nvram_word: exiting normally NVRAM data = "
+ "0x%x", data);
+
+ return data;
+}
+
+/*
+ * NVRAM request
+ * Sends read command to NVRAM and gets data from NVRAM.
+ *
+ * Input:
+ * ha = adapter block pointer.
+ * nv_cmd = Bit 26 = start bit
+ * Bit 25, 24 = opcode
+ * Bit 23-16 = address
+ * Bit 15-0 = write data
+ *
+ * Returns:
+ * data word.
+ */
+static uint16_t
+qla1280_nvram_request(struct scsi_qla_host *ha, uint32_t nv_cmd)
+{
+ struct device_reg __iomem *reg = ha->iobase;
+ int cnt;
+ uint16_t data = 0;
+ uint16_t reg_data;
+
+ /* Send command to NVRAM. */
+
+ nv_cmd <<= 5;
+ for (cnt = 0; cnt < 11; cnt++) {
+ if (nv_cmd & BIT_31)
+ qla1280_nv_write(ha, NV_DATA_OUT);
+ else
+ qla1280_nv_write(ha, 0);
+ nv_cmd <<= 1;
+ }
+
+ /* Read data from NVRAM. */
+
+ for (cnt = 0; cnt < 16; cnt++) {
+ WRT_REG_WORD(&reg->nvram, (NV_SELECT | NV_CLOCK));
+ RD_REG_WORD(&reg->id_l); /* Flush PCI write */
+ NVRAM_DELAY();
+ data <<= 1;
+ reg_data = RD_REG_WORD(&reg->nvram);
+ if (reg_data & NV_DATA_IN)
+ data |= BIT_0;
+ WRT_REG_WORD(&reg->nvram, NV_SELECT);
+ RD_REG_WORD(&reg->id_l); /* Flush PCI write */
+ NVRAM_DELAY();
+ }
+
+ /* Deselect chip. */
+
+ WRT_REG_WORD(&reg->nvram, NV_DESELECT);
+ RD_REG_WORD(&reg->id_l); /* Flush PCI write */
+ NVRAM_DELAY();
+
+ return data;
+}
+
+static void
+qla1280_nv_write(struct scsi_qla_host *ha, uint16_t data)
+{
+ struct device_reg __iomem *reg = ha->iobase;
+
+ WRT_REG_WORD(&reg->nvram, data | NV_SELECT);
+ RD_REG_WORD(&reg->id_l); /* Flush PCI write */
+ NVRAM_DELAY();
+ WRT_REG_WORD(&reg->nvram, data | NV_SELECT | NV_CLOCK);
+ RD_REG_WORD(&reg->id_l); /* Flush PCI write */
+ NVRAM_DELAY();
+ WRT_REG_WORD(&reg->nvram, data | NV_SELECT);
+ RD_REG_WORD(&reg->id_l); /* Flush PCI write */
+ NVRAM_DELAY();
+}
+
+/*
+ * Mailbox Command
+ * Issue mailbox command and waits for completion.
+ *
+ * Input:
+ * ha = adapter block pointer.
+ * mr = mailbox registers to load.
+ * mb = data pointer for mailbox registers.
+ *
+ * Output:
+ * mb[MAILBOX_REGISTER_COUNT] = returned mailbox data.
+ *
+ * Returns:
+ * 0 = success
+ */
+static int
+qla1280_mailbox_command(struct scsi_qla_host *ha, uint8_t mr, uint16_t *mb)
+{
+ struct device_reg __iomem *reg = ha->iobase;
+ int status = 0;
+ int cnt;
+ uint16_t *optr, *iptr;
+ uint16_t __iomem *mptr;
+ uint16_t data;
+ DECLARE_COMPLETION_ONSTACK(wait);
+ struct timer_list timer;
+
+ ENTER("qla1280_mailbox_command");
+
+ if (ha->mailbox_wait) {
+ printk(KERN_ERR "Warning mailbox wait already in use!\n");
+ }
+ ha->mailbox_wait = &wait;
+
+ /*
+ * We really should start out by verifying that the mailbox is
+ * available before starting sending the command data
+ */
+ /* Load mailbox registers. */
+ mptr = (uint16_t __iomem *) &reg->mailbox0;
+ iptr = mb;
+ for (cnt = 0; cnt < MAILBOX_REGISTER_COUNT; cnt++) {
+ if (mr & BIT_0) {
+ WRT_REG_WORD(mptr, (*iptr));
+ }
+
+ mr >>= 1;
+ mptr++;
+ iptr++;
+ }
+
+ /* Issue set host interrupt command. */
+
+ /* set up a timer just in case we're really jammed */
+ init_timer_on_stack(&timer);
+ timer.expires = jiffies + 20*HZ;
+ timer.data = (unsigned long)ha;
+ timer.function = qla1280_mailbox_timeout;
+ add_timer(&timer);
+
+ spin_unlock_irq(ha->host->host_lock);
+ WRT_REG_WORD(&reg->host_cmd, HC_SET_HOST_INT);
+ data = qla1280_debounce_register(&reg->istatus);
+
+ wait_for_completion(&wait);
+ del_timer_sync(&timer);
+
+ spin_lock_irq(ha->host->host_lock);
+
+ ha->mailbox_wait = NULL;
+
+ /* Check for mailbox command timeout. */
+ if (ha->mailbox_out[0] != MBS_CMD_CMP) {
+ printk(KERN_WARNING "qla1280_mailbox_command: Command failed, "
+ "mailbox0 = 0x%04x, mailbox_out0 = 0x%04x, istatus = "
+ "0x%04x\n",
+ mb[0], ha->mailbox_out[0], RD_REG_WORD(&reg->istatus));
+ printk(KERN_WARNING "m0 %04x, m1 %04x, m2 %04x, m3 %04x\n",
+ RD_REG_WORD(&reg->mailbox0), RD_REG_WORD(&reg->mailbox1),
+ RD_REG_WORD(&reg->mailbox2), RD_REG_WORD(&reg->mailbox3));
+ printk(KERN_WARNING "m4 %04x, m5 %04x, m6 %04x, m7 %04x\n",
+ RD_REG_WORD(&reg->mailbox4), RD_REG_WORD(&reg->mailbox5),
+ RD_REG_WORD(&reg->mailbox6), RD_REG_WORD(&reg->mailbox7));
+ status = 1;
+ }
+
+ /* Load return mailbox registers. */
+ optr = mb;
+ iptr = (uint16_t *) &ha->mailbox_out[0];
+ mr = MAILBOX_REGISTER_COUNT;
+ memcpy(optr, iptr, MAILBOX_REGISTER_COUNT * sizeof(uint16_t));
+
+ if (ha->flags.reset_marker)
+ qla1280_rst_aen(ha);
+
+ if (status)
+ dprintk(2, "qla1280_mailbox_command: **** FAILED, mailbox0 = "
+ "0x%x ****\n", mb[0]);
+
+ LEAVE("qla1280_mailbox_command");
+ return status;
+}
+
+/*
+ * qla1280_poll
+ * Polls ISP for interrupts.
+ *
+ * Input:
+ * ha = adapter block pointer.
+ */
+static void
+qla1280_poll(struct scsi_qla_host *ha)
+{
+ struct device_reg __iomem *reg = ha->iobase;
+ uint16_t data;
+ LIST_HEAD(done_q);
+
+ /* ENTER("qla1280_poll"); */
+
+ /* Check for pending interrupts. */
+ data = RD_REG_WORD(&reg->istatus);
+ if (data & RISC_INT)
+ qla1280_isr(ha, &done_q);
+
+ if (!ha->mailbox_wait) {
+ if (ha->flags.reset_marker)
+ qla1280_rst_aen(ha);
+ }
+
+ if (!list_empty(&done_q))
+ qla1280_done(ha);
+
+ /* LEAVE("qla1280_poll"); */
+}
+
+/*
+ * qla1280_bus_reset
+ * Issue SCSI bus reset.
+ *
+ * Input:
+ * ha = adapter block pointer.
+ * bus = SCSI bus number.
+ *
+ * Returns:
+ * 0 = success
+ */
+static int
+qla1280_bus_reset(struct scsi_qla_host *ha, int bus)
+{
+ uint16_t mb[MAILBOX_REGISTER_COUNT];
+ uint16_t reset_delay;
+ int status;
+
+ dprintk(3, "qla1280_bus_reset: entered\n");
+
+ if (qla1280_verbose)
+ printk(KERN_INFO "scsi(%li:%i): Resetting SCSI BUS\n",
+ ha->host_no, bus);
+
+ reset_delay = ha->bus_settings[bus].bus_reset_delay;
+ mb[0] = MBC_BUS_RESET;
+ mb[1] = reset_delay;
+ mb[2] = (uint16_t) bus;
+ status = qla1280_mailbox_command(ha, BIT_2 | BIT_1 | BIT_0, &mb[0]);
+
+ if (status) {
+ if (ha->bus_settings[bus].failed_reset_count > 2)
+ ha->bus_settings[bus].scsi_bus_dead = 1;
+ ha->bus_settings[bus].failed_reset_count++;
+ } else {
+ spin_unlock_irq(ha->host->host_lock);
+ ssleep(reset_delay);
+ spin_lock_irq(ha->host->host_lock);
+
+ ha->bus_settings[bus].scsi_bus_dead = 0;
+ ha->bus_settings[bus].failed_reset_count = 0;
+ ha->bus_settings[bus].reset_marker = 0;
+ /* Issue marker command. */
+ qla1280_marker(ha, bus, 0, 0, MK_SYNC_ALL);
+ }
+
+ /*
+ * We should probably call qla1280_set_target_parameters()
+ * here as well for all devices on the bus.
+ */
+
+ if (status)
+ dprintk(2, "qla1280_bus_reset: **** FAILED ****\n");
+ else
+ dprintk(3, "qla1280_bus_reset: exiting normally\n");
+
+ return status;
+}
+
+/*
+ * qla1280_device_reset
+ * Issue bus device reset message to the target.
+ *
+ * Input:
+ * ha = adapter block pointer.
+ * bus = SCSI BUS number.
+ * target = SCSI ID.
+ *
+ * Returns:
+ * 0 = success
+ */
+static int
+qla1280_device_reset(struct scsi_qla_host *ha, int bus, int target)
+{
+ uint16_t mb[MAILBOX_REGISTER_COUNT];
+ int status;
+
+ ENTER("qla1280_device_reset");
+
+ mb[0] = MBC_ABORT_TARGET;
+ mb[1] = (bus ? (target | BIT_7) : target) << 8;
+ mb[2] = 1;
+ status = qla1280_mailbox_command(ha, BIT_2 | BIT_1 | BIT_0, &mb[0]);
+
+ /* Issue marker command. */
+ qla1280_marker(ha, bus, target, 0, MK_SYNC_ID);
+
+ if (status)
+ dprintk(2, "qla1280_device_reset: **** FAILED ****\n");
+
+ LEAVE("qla1280_device_reset");
+ return status;
+}
+
+/*
+ * qla1280_abort_command
+ * Abort command aborts a specified IOCB.
+ *
+ * Input:
+ * ha = adapter block pointer.
+ * sp = SB structure pointer.
+ *
+ * Returns:
+ * 0 = success
+ */
+static int
+qla1280_abort_command(struct scsi_qla_host *ha, struct srb * sp, int handle)
+{
+ uint16_t mb[MAILBOX_REGISTER_COUNT];
+ unsigned int bus, target, lun;
+ int status;
+
+ ENTER("qla1280_abort_command");
+
+ bus = SCSI_BUS_32(sp->cmd);
+ target = SCSI_TCN_32(sp->cmd);
+ lun = SCSI_LUN_32(sp->cmd);
+
+ sp->flags |= SRB_ABORT_PENDING;
+
+ mb[0] = MBC_ABORT_COMMAND;
+ mb[1] = (bus ? target | BIT_7 : target) << 8 | lun;
+ mb[2] = handle >> 16;
+ mb[3] = handle & 0xffff;
+ status = qla1280_mailbox_command(ha, 0x0f, &mb[0]);
+
+ if (status) {
+ dprintk(2, "qla1280_abort_command: **** FAILED ****\n");
+ sp->flags &= ~SRB_ABORT_PENDING;
+ }
+
+
+ LEAVE("qla1280_abort_command");
+ return status;
+}
+
+/*
+ * qla1280_reset_adapter
+ * Reset adapter.
+ *
+ * Input:
+ * ha = adapter block pointer.
+ */
+static void
+qla1280_reset_adapter(struct scsi_qla_host *ha)
+{
+ struct device_reg __iomem *reg = ha->iobase;
+
+ ENTER("qla1280_reset_adapter");
+
+ /* Disable ISP chip */
+ ha->flags.online = 0;
+ WRT_REG_WORD(&reg->ictrl, ISP_RESET);
+ WRT_REG_WORD(&reg->host_cmd,
+ HC_RESET_RISC | HC_RELEASE_RISC | HC_DISABLE_BIOS);
+ RD_REG_WORD(&reg->id_l); /* Flush PCI write */
+
+ LEAVE("qla1280_reset_adapter");
+}
+
+/*
+ * Issue marker command.
+ * Function issues marker IOCB.
+ *
+ * Input:
+ * ha = adapter block pointer.
+ * bus = SCSI BUS number
+ * id = SCSI ID
+ * lun = SCSI LUN
+ * type = marker modifier
+ */
+static void
+qla1280_marker(struct scsi_qla_host *ha, int bus, int id, int lun, u8 type)
+{
+ struct mrk_entry *pkt;
+
+ ENTER("qla1280_marker");
+
+ /* Get request packet. */
+ if ((pkt = (struct mrk_entry *) qla1280_req_pkt(ha))) {
+ pkt->entry_type = MARKER_TYPE;
+ pkt->lun = (uint8_t) lun;
+ pkt->target = (uint8_t) (bus ? (id | BIT_7) : id);
+ pkt->modifier = type;
+ pkt->entry_status = 0;
+
+ /* Issue command to ISP */
+ qla1280_isp_cmd(ha);
+ }
+
+ LEAVE("qla1280_marker");
+}
+
+
+/*
+ * qla1280_64bit_start_scsi
+ * The start SCSI is responsible for building request packets on
+ * request ring and modifying ISP input pointer.
+ *
+ * Input:
+ * ha = adapter block pointer.
+ * sp = SB structure pointer.
+ *
+ * Returns:
+ * 0 = success, was able to issue command.
+ */
+#ifdef QLA_64BIT_PTR
+static int
+qla1280_64bit_start_scsi(struct scsi_qla_host *ha, struct srb * sp)
+{
+ struct device_reg __iomem *reg = ha->iobase;
+ struct scsi_cmnd *cmd = sp->cmd;
+ cmd_a64_entry_t *pkt;
+ __le32 *dword_ptr;
+ dma_addr_t dma_handle;
+ int status = 0;
+ int cnt;
+ int req_cnt;
+ int seg_cnt;
+ u8 dir;
+
+ ENTER("qla1280_64bit_start_scsi:");
+
+ /* Calculate number of entries and segments required. */
+ req_cnt = 1;
+ seg_cnt = scsi_dma_map(cmd);
+ if (seg_cnt > 0) {
+ if (seg_cnt > 2) {
+ req_cnt += (seg_cnt - 2) / 5;
+ if ((seg_cnt - 2) % 5)
+ req_cnt++;
+ }
+ } else if (seg_cnt < 0) {
+ status = 1;
+ goto out;
+ }
+
+ if ((req_cnt + 2) >= ha->req_q_cnt) {
+ /* Calculate number of free request entries. */
+ cnt = RD_REG_WORD(&reg->mailbox4);
+ if (ha->req_ring_index < cnt)
+ ha->req_q_cnt = cnt - ha->req_ring_index;
+ else
+ ha->req_q_cnt =
+ REQUEST_ENTRY_CNT - (ha->req_ring_index - cnt);
+ }
+
+ dprintk(3, "Number of free entries=(%d) seg_cnt=0x%x\n",
+ ha->req_q_cnt, seg_cnt);
+
+ /* If room for request in request ring. */
+ if ((req_cnt + 2) >= ha->req_q_cnt) {
+ status = SCSI_MLQUEUE_HOST_BUSY;
+ dprintk(2, "qla1280_start_scsi: in-ptr=0x%x req_q_cnt="
+ "0x%xreq_cnt=0x%x", ha->req_ring_index, ha->req_q_cnt,
+ req_cnt);
+ goto out;
+ }
+
+ /* Check for room in outstanding command list. */
+ for (cnt = 0; cnt < MAX_OUTSTANDING_COMMANDS &&
+ ha->outstanding_cmds[cnt] != NULL; cnt++);
+
+ if (cnt >= MAX_OUTSTANDING_COMMANDS) {
+ status = SCSI_MLQUEUE_HOST_BUSY;
+ dprintk(2, "qla1280_start_scsi: NO ROOM IN "
+ "OUTSTANDING ARRAY, req_q_cnt=0x%x", ha->req_q_cnt);
+ goto out;
+ }
+
+ ha->outstanding_cmds[cnt] = sp;
+ ha->req_q_cnt -= req_cnt;
+ CMD_HANDLE(sp->cmd) = (unsigned char *)(unsigned long)(cnt + 1);
+
+ dprintk(2, "start: cmd=%p sp=%p CDB=%xm, handle %lx\n", cmd, sp,
+ cmd->cmnd[0], (long)CMD_HANDLE(sp->cmd));
+ dprintk(2, " bus %i, target %i, lun %i\n",
+ SCSI_BUS_32(cmd), SCSI_TCN_32(cmd), SCSI_LUN_32(cmd));
+ qla1280_dump_buffer(2, cmd->cmnd, MAX_COMMAND_SIZE);
+
+ /*
+ * Build command packet.
+ */
+ pkt = (cmd_a64_entry_t *) ha->request_ring_ptr;
+
+ pkt->entry_type = COMMAND_A64_TYPE;
+ pkt->entry_count = (uint8_t) req_cnt;
+ pkt->sys_define = (uint8_t) ha->req_ring_index;
+ pkt->entry_status = 0;
+ pkt->handle = cpu_to_le32(cnt);
+
+ /* Zero out remaining portion of packet. */
+ memset(((char *)pkt + 8), 0, (REQUEST_ENTRY_SIZE - 8));
+
+ /* Set ISP command timeout. */
+ pkt->timeout = cpu_to_le16(cmd->request->timeout/HZ);
+
+ /* Set device target ID and LUN */
+ pkt->lun = SCSI_LUN_32(cmd);
+ pkt->target = SCSI_BUS_32(cmd) ?
+ (SCSI_TCN_32(cmd) | BIT_7) : SCSI_TCN_32(cmd);
+
+ /* Enable simple tag queuing if device supports it. */
+ if (cmd->device->simple_tags)
+ pkt->control_flags |= cpu_to_le16(BIT_3);
+
+ /* Load SCSI command packet. */
+ pkt->cdb_len = cpu_to_le16(CMD_CDBLEN(cmd));
+ memcpy(pkt->scsi_cdb, CMD_CDBP(cmd), CMD_CDBLEN(cmd));
+ /* dprintk(1, "Build packet for command[0]=0x%x\n",pkt->scsi_cdb[0]); */
+
+ /* Set transfer direction. */
+ dir = qla1280_data_direction(cmd);
+ pkt->control_flags |= cpu_to_le16(dir);
+
+ /* Set total data segment count. */
+ pkt->dseg_count = cpu_to_le16(seg_cnt);
+
+ /*
+ * Load data segments.
+ */
+ if (seg_cnt) { /* If data transfer. */
+ struct scatterlist *sg, *s;
+ int remseg = seg_cnt;
+
+ sg = scsi_sglist(cmd);
+
+ /* Setup packet address segment pointer. */
+ dword_ptr = (u32 *)&pkt->dseg_0_address;
+
+ /* Load command entry data segments. */
+ for_each_sg(sg, s, seg_cnt, cnt) {
+ if (cnt == 2)
+ break;
+
+ dma_handle = sg_dma_address(s);
+#if defined(CONFIG_IA64_GENERIC) || defined(CONFIG_IA64_SGI_SN2)
+ if (ha->flags.use_pci_vchannel)
+ sn_pci_set_vchan(ha->pdev,
+ (unsigned long *)&dma_handle,
+ SCSI_BUS_32(cmd));
+#endif
+ *dword_ptr++ =
+ cpu_to_le32(pci_dma_lo32(dma_handle));
+ *dword_ptr++ =
+ cpu_to_le32(pci_dma_hi32(dma_handle));
+ *dword_ptr++ = cpu_to_le32(sg_dma_len(s));
+ dprintk(3, "S/G Segment phys_addr=%x %x, len=0x%x\n",
+ cpu_to_le32(pci_dma_hi32(dma_handle)),
+ cpu_to_le32(pci_dma_lo32(dma_handle)),
+ cpu_to_le32(sg_dma_len(sg_next(s))));
+ remseg--;
+ }
+ dprintk(5, "qla1280_64bit_start_scsi: Scatter/gather "
+ "command packet data - b %i, t %i, l %i \n",
+ SCSI_BUS_32(cmd), SCSI_TCN_32(cmd),
+ SCSI_LUN_32(cmd));
+ qla1280_dump_buffer(5, (char *)pkt,
+ REQUEST_ENTRY_SIZE);
+
+ /*
+ * Build continuation packets.
+ */
+ dprintk(3, "S/G Building Continuation...seg_cnt=0x%x "
+ "remains\n", seg_cnt);
+
+ while (remseg > 0) {
+ /* Update sg start */
+ sg = s;
+ /* Adjust ring index. */
+ ha->req_ring_index++;
+ if (ha->req_ring_index == REQUEST_ENTRY_CNT) {
+ ha->req_ring_index = 0;
+ ha->request_ring_ptr =
+ ha->request_ring;
+ } else
+ ha->request_ring_ptr++;
+
+ pkt = (cmd_a64_entry_t *)ha->request_ring_ptr;
+
+ /* Zero out packet. */
+ memset(pkt, 0, REQUEST_ENTRY_SIZE);
+
+ /* Load packet defaults. */
+ ((struct cont_a64_entry *) pkt)->entry_type =
+ CONTINUE_A64_TYPE;
+ ((struct cont_a64_entry *) pkt)->entry_count = 1;
+ ((struct cont_a64_entry *) pkt)->sys_define =
+ (uint8_t)ha->req_ring_index;
+ /* Setup packet address segment pointer. */
+ dword_ptr =
+ (u32 *)&((struct cont_a64_entry *) pkt)->dseg_0_address;
+
+ /* Load continuation entry data segments. */
+ for_each_sg(sg, s, remseg, cnt) {
+ if (cnt == 5)
+ break;
+ dma_handle = sg_dma_address(s);
+#if defined(CONFIG_IA64_GENERIC) || defined(CONFIG_IA64_SGI_SN2)
+ if (ha->flags.use_pci_vchannel)
+ sn_pci_set_vchan(ha->pdev,
+ (unsigned long *)&dma_handle,
+ SCSI_BUS_32(cmd));
+#endif
+ *dword_ptr++ =
+ cpu_to_le32(pci_dma_lo32(dma_handle));
+ *dword_ptr++ =
+ cpu_to_le32(pci_dma_hi32(dma_handle));
+ *dword_ptr++ =
+ cpu_to_le32(sg_dma_len(s));
+ dprintk(3, "S/G Segment Cont. phys_addr=%x %x, len=0x%x\n",
+ cpu_to_le32(pci_dma_hi32(dma_handle)),
+ cpu_to_le32(pci_dma_lo32(dma_handle)),
+ cpu_to_le32(sg_dma_len(s)));
+ }
+ remseg -= cnt;
+ dprintk(5, "qla1280_64bit_start_scsi: "
+ "continuation packet data - b %i, t "
+ "%i, l %i \n", SCSI_BUS_32(cmd),
+ SCSI_TCN_32(cmd), SCSI_LUN_32(cmd));
+ qla1280_dump_buffer(5, (char *)pkt,
+ REQUEST_ENTRY_SIZE);
+ }
+ } else { /* No data transfer */
+ dprintk(5, "qla1280_64bit_start_scsi: No data, command "
+ "packet data - b %i, t %i, l %i \n",
+ SCSI_BUS_32(cmd), SCSI_TCN_32(cmd), SCSI_LUN_32(cmd));
+ qla1280_dump_buffer(5, (char *)pkt, REQUEST_ENTRY_SIZE);
+ }
+ /* Adjust ring index. */
+ ha->req_ring_index++;
+ if (ha->req_ring_index == REQUEST_ENTRY_CNT) {
+ ha->req_ring_index = 0;
+ ha->request_ring_ptr = ha->request_ring;
+ } else
+ ha->request_ring_ptr++;
+
+ /* Set chip new ring index. */
+ dprintk(2,
+ "qla1280_64bit_start_scsi: Wakeup RISC for pending command\n");
+ sp->flags |= SRB_SENT;
+ ha->actthreads++;
+ WRT_REG_WORD(&reg->mailbox4, ha->req_ring_index);
+ /* Enforce mmio write ordering; see comment in qla1280_isp_cmd(). */
+ mmiowb();
+
+ out:
+ if (status)
+ dprintk(2, "qla1280_64bit_start_scsi: **** FAILED ****\n");
+ else
+ dprintk(3, "qla1280_64bit_start_scsi: exiting normally\n");
+
+ return status;
+}
+#else /* !QLA_64BIT_PTR */
+
+/*
+ * qla1280_32bit_start_scsi
+ * The start SCSI is responsible for building request packets on
+ * request ring and modifying ISP input pointer.
+ *
+ * The Qlogic firmware interface allows every queue slot to have a SCSI
+ * command and up to 4 scatter/gather (SG) entries. If we need more
+ * than 4 SG entries, then continuation entries are used that can
+ * hold another 7 entries each. The start routine determines if there
+ * is eought empty slots then build the combination of requests to
+ * fulfill the OS request.
+ *
+ * Input:
+ * ha = adapter block pointer.
+ * sp = SCSI Request Block structure pointer.
+ *
+ * Returns:
+ * 0 = success, was able to issue command.
+ */
+static int
+qla1280_32bit_start_scsi(struct scsi_qla_host *ha, struct srb * sp)
+{
+ struct device_reg __iomem *reg = ha->iobase;
+ struct scsi_cmnd *cmd = sp->cmd;
+ struct cmd_entry *pkt;
+ __le32 *dword_ptr;
+ int status = 0;
+ int cnt;
+ int req_cnt;
+ int seg_cnt;
+ u8 dir;
+
+ ENTER("qla1280_32bit_start_scsi");
+
+ dprintk(1, "32bit_start: cmd=%p sp=%p CDB=%x\n", cmd, sp,
+ cmd->cmnd[0]);
+
+ /* Calculate number of entries and segments required. */
+ req_cnt = 1;
+ seg_cnt = scsi_dma_map(cmd);
+ if (seg_cnt) {
+ /*
+ * if greater than four sg entries then we need to allocate
+ * continuation entries
+ */
+ if (seg_cnt > 4) {
+ req_cnt += (seg_cnt - 4) / 7;
+ if ((seg_cnt - 4) % 7)
+ req_cnt++;
+ }
+ dprintk(3, "S/G Transfer cmd=%p seg_cnt=0x%x, req_cnt=%x\n",
+ cmd, seg_cnt, req_cnt);
+ } else if (seg_cnt < 0) {
+ status = 1;
+ goto out;
+ }
+
+ if ((req_cnt + 2) >= ha->req_q_cnt) {
+ /* Calculate number of free request entries. */
+ cnt = RD_REG_WORD(&reg->mailbox4);
+ if (ha->req_ring_index < cnt)
+ ha->req_q_cnt = cnt - ha->req_ring_index;
+ else
+ ha->req_q_cnt =
+ REQUEST_ENTRY_CNT - (ha->req_ring_index - cnt);
+ }
+
+ dprintk(3, "Number of free entries=(%d) seg_cnt=0x%x\n",
+ ha->req_q_cnt, seg_cnt);
+ /* If room for request in request ring. */
+ if ((req_cnt + 2) >= ha->req_q_cnt) {
+ status = SCSI_MLQUEUE_HOST_BUSY;
+ dprintk(2, "qla1280_32bit_start_scsi: in-ptr=0x%x, "
+ "req_q_cnt=0x%x, req_cnt=0x%x", ha->req_ring_index,
+ ha->req_q_cnt, req_cnt);
+ goto out;
+ }
+
+ /* Check for empty slot in outstanding command list. */
+ for (cnt = 0; cnt < MAX_OUTSTANDING_COMMANDS &&
+ (ha->outstanding_cmds[cnt] != 0); cnt++) ;
+
+ if (cnt >= MAX_OUTSTANDING_COMMANDS) {
+ status = SCSI_MLQUEUE_HOST_BUSY;
+ dprintk(2, "qla1280_32bit_start_scsi: NO ROOM IN OUTSTANDING "
+ "ARRAY, req_q_cnt=0x%x\n", ha->req_q_cnt);
+ goto out;
+ }
+
+ CMD_HANDLE(sp->cmd) = (unsigned char *) (unsigned long)(cnt + 1);
+ ha->outstanding_cmds[cnt] = sp;
+ ha->req_q_cnt -= req_cnt;
+
+ /*
+ * Build command packet.
+ */
+ pkt = (struct cmd_entry *) ha->request_ring_ptr;
+
+ pkt->entry_type = COMMAND_TYPE;
+ pkt->entry_count = (uint8_t) req_cnt;
+ pkt->sys_define = (uint8_t) ha->req_ring_index;
+ pkt->entry_status = 0;
+ pkt->handle = cpu_to_le32(cnt);
+
+ /* Zero out remaining portion of packet. */
+ memset(((char *)pkt + 8), 0, (REQUEST_ENTRY_SIZE - 8));
+
+ /* Set ISP command timeout. */
+ pkt->timeout = cpu_to_le16(cmd->request->timeout/HZ);
+
+ /* Set device target ID and LUN */
+ pkt->lun = SCSI_LUN_32(cmd);
+ pkt->target = SCSI_BUS_32(cmd) ?
+ (SCSI_TCN_32(cmd) | BIT_7) : SCSI_TCN_32(cmd);
+
+ /* Enable simple tag queuing if device supports it. */
+ if (cmd->device->simple_tags)
+ pkt->control_flags |= cpu_to_le16(BIT_3);
+
+ /* Load SCSI command packet. */
+ pkt->cdb_len = cpu_to_le16(CMD_CDBLEN(cmd));
+ memcpy(pkt->scsi_cdb, CMD_CDBP(cmd), CMD_CDBLEN(cmd));
+
+ /*dprintk(1, "Build packet for command[0]=0x%x\n",pkt->scsi_cdb[0]); */
+ /* Set transfer direction. */
+ dir = qla1280_data_direction(cmd);
+ pkt->control_flags |= cpu_to_le16(dir);
+
+ /* Set total data segment count. */
+ pkt->dseg_count = cpu_to_le16(seg_cnt);
+
+ /*
+ * Load data segments.
+ */
+ if (seg_cnt) {
+ struct scatterlist *sg, *s;
+ int remseg = seg_cnt;
+
+ sg = scsi_sglist(cmd);
+
+ /* Setup packet address segment pointer. */
+ dword_ptr = &pkt->dseg_0_address;
+
+ dprintk(3, "Building S/G data segments..\n");
+ qla1280_dump_buffer(1, (char *)sg, 4 * 16);
+
+ /* Load command entry data segments. */
+ for_each_sg(sg, s, seg_cnt, cnt) {
+ if (cnt == 4)
+ break;
+ *dword_ptr++ =
+ cpu_to_le32(pci_dma_lo32(sg_dma_address(s)));
+ *dword_ptr++ = cpu_to_le32(sg_dma_len(s));
+ dprintk(3, "S/G Segment phys_addr=0x%lx, len=0x%x\n",
+ (pci_dma_lo32(sg_dma_address(s))),
+ (sg_dma_len(s)));
+ remseg--;
+ }
+ /*
+ * Build continuation packets.
+ */
+ dprintk(3, "S/G Building Continuation"
+ "...seg_cnt=0x%x remains\n", seg_cnt);
+ while (remseg > 0) {
+ /* Continue from end point */
+ sg = s;
+ /* Adjust ring index. */
+ ha->req_ring_index++;
+ if (ha->req_ring_index == REQUEST_ENTRY_CNT) {
+ ha->req_ring_index = 0;
+ ha->request_ring_ptr =
+ ha->request_ring;
+ } else
+ ha->request_ring_ptr++;
+
+ pkt = (struct cmd_entry *)ha->request_ring_ptr;
+
+ /* Zero out packet. */
+ memset(pkt, 0, REQUEST_ENTRY_SIZE);
+
+ /* Load packet defaults. */
+ ((struct cont_entry *) pkt)->
+ entry_type = CONTINUE_TYPE;
+ ((struct cont_entry *) pkt)->entry_count = 1;
+
+ ((struct cont_entry *) pkt)->sys_define =
+ (uint8_t) ha->req_ring_index;
+
+ /* Setup packet address segment pointer. */
+ dword_ptr =
+ &((struct cont_entry *) pkt)->dseg_0_address;
+
+ /* Load continuation entry data segments. */
+ for_each_sg(sg, s, remseg, cnt) {
+ if (cnt == 7)
+ break;
+ *dword_ptr++ =
+ cpu_to_le32(pci_dma_lo32(sg_dma_address(s)));
+ *dword_ptr++ =
+ cpu_to_le32(sg_dma_len(s));
+ dprintk(1,
+ "S/G Segment Cont. phys_addr=0x%x, "
+ "len=0x%x\n",
+ cpu_to_le32(pci_dma_lo32(sg_dma_address(s))),
+ cpu_to_le32(sg_dma_len(s)));
+ }
+ remseg -= cnt;
+ dprintk(5, "qla1280_32bit_start_scsi: "
+ "continuation packet data - "
+ "scsi(%i:%i:%i)\n", SCSI_BUS_32(cmd),
+ SCSI_TCN_32(cmd), SCSI_LUN_32(cmd));
+ qla1280_dump_buffer(5, (char *)pkt,
+ REQUEST_ENTRY_SIZE);
+ }
+ } else { /* No data transfer at all */
+ dprintk(5, "qla1280_32bit_start_scsi: No data, command "
+ "packet data - \n");
+ qla1280_dump_buffer(5, (char *)pkt, REQUEST_ENTRY_SIZE);
+ }
+ dprintk(5, "qla1280_32bit_start_scsi: First IOCB block:\n");
+ qla1280_dump_buffer(5, (char *)ha->request_ring_ptr,
+ REQUEST_ENTRY_SIZE);
+
+ /* Adjust ring index. */
+ ha->req_ring_index++;
+ if (ha->req_ring_index == REQUEST_ENTRY_CNT) {
+ ha->req_ring_index = 0;
+ ha->request_ring_ptr = ha->request_ring;
+ } else
+ ha->request_ring_ptr++;
+
+ /* Set chip new ring index. */
+ dprintk(2, "qla1280_32bit_start_scsi: Wakeup RISC "
+ "for pending command\n");
+ sp->flags |= SRB_SENT;
+ ha->actthreads++;
+ WRT_REG_WORD(&reg->mailbox4, ha->req_ring_index);
+ /* Enforce mmio write ordering; see comment in qla1280_isp_cmd(). */
+ mmiowb();
+
+out:
+ if (status)
+ dprintk(2, "qla1280_32bit_start_scsi: **** FAILED ****\n");
+
+ LEAVE("qla1280_32bit_start_scsi");
+
+ return status;
+}
+#endif
+
+/*
+ * qla1280_req_pkt
+ * Function is responsible for locking ring and
+ * getting a zeroed out request packet.
+ *
+ * Input:
+ * ha = adapter block pointer.
+ *
+ * Returns:
+ * 0 = failed to get slot.
+ */
+static request_t *
+qla1280_req_pkt(struct scsi_qla_host *ha)
+{
+ struct device_reg __iomem *reg = ha->iobase;
+ request_t *pkt = NULL;
+ int cnt;
+ uint32_t timer;
+
+ ENTER("qla1280_req_pkt");
+
+ /*
+ * This can be called from interrupt context, damn it!!!
+ */
+ /* Wait for 30 seconds for slot. */
+ for (timer = 15000000; timer; timer--) {
+ if (ha->req_q_cnt > 0) {
+ /* Calculate number of free request entries. */
+ cnt = RD_REG_WORD(&reg->mailbox4);
+ if (ha->req_ring_index < cnt)
+ ha->req_q_cnt = cnt - ha->req_ring_index;
+ else
+ ha->req_q_cnt =
+ REQUEST_ENTRY_CNT - (ha->req_ring_index - cnt);
+ }
+
+ /* Found empty request ring slot? */
+ if (ha->req_q_cnt > 0) {
+ ha->req_q_cnt--;
+ pkt = ha->request_ring_ptr;
+
+ /* Zero out packet. */
+ memset(pkt, 0, REQUEST_ENTRY_SIZE);
+
+ /*
+ * How can this be right when we have a ring
+ * size of 512???
+ */
+ /* Set system defined field. */
+ pkt->sys_define = (uint8_t) ha->req_ring_index;
+
+ /* Set entry count. */
+ pkt->entry_count = 1;
+
+ break;
+ }
+
+ udelay(2); /* 10 */
+
+ /* Check for pending interrupts. */
+ qla1280_poll(ha);
+ }
+
+ if (!pkt)
+ dprintk(2, "qla1280_req_pkt: **** FAILED ****\n");
+ else
+ dprintk(3, "qla1280_req_pkt: exiting normally\n");
+
+ return pkt;
+}
+
+/*
+ * qla1280_isp_cmd
+ * Function is responsible for modifying ISP input pointer.
+ * Releases ring lock.
+ *
+ * Input:
+ * ha = adapter block pointer.
+ */
+static void
+qla1280_isp_cmd(struct scsi_qla_host *ha)
+{
+ struct device_reg __iomem *reg = ha->iobase;
+
+ ENTER("qla1280_isp_cmd");
+
+ dprintk(5, "qla1280_isp_cmd: IOCB data:\n");
+ qla1280_dump_buffer(5, (char *)ha->request_ring_ptr,
+ REQUEST_ENTRY_SIZE);
+
+ /* Adjust ring index. */
+ ha->req_ring_index++;
+ if (ha->req_ring_index == REQUEST_ENTRY_CNT) {
+ ha->req_ring_index = 0;
+ ha->request_ring_ptr = ha->request_ring;
+ } else
+ ha->request_ring_ptr++;
+
+ /*
+ * Update request index to mailbox4 (Request Queue In).
+ * The mmiowb() ensures that this write is ordered with writes by other
+ * CPUs. Without the mmiowb(), it is possible for the following:
+ * CPUA posts write of index 5 to mailbox4
+ * CPUA releases host lock
+ * CPUB acquires host lock
+ * CPUB posts write of index 6 to mailbox4
+ * On PCI bus, order reverses and write of 6 posts, then index 5,
+ * causing chip to issue full queue of stale commands
+ * The mmiowb() prevents future writes from crossing the barrier.
+ * See Documentation/DocBook/deviceiobook.tmpl for more information.
+ */
+ WRT_REG_WORD(&reg->mailbox4, ha->req_ring_index);
+ mmiowb();
+
+ LEAVE("qla1280_isp_cmd");
+}
+
+/****************************************************************************/
+/* Interrupt Service Routine. */
+/****************************************************************************/
+
+/****************************************************************************
+ * qla1280_isr
+ * Calls I/O done on command completion.
+ *
+ * Input:
+ * ha = adapter block pointer.
+ * done_q = done queue.
+ ****************************************************************************/
+static void
+qla1280_isr(struct scsi_qla_host *ha, struct list_head *done_q)
+{
+ struct device_reg __iomem *reg = ha->iobase;
+ struct response *pkt;
+ struct srb *sp = NULL;
+ uint16_t mailbox[MAILBOX_REGISTER_COUNT];
+ uint16_t *wptr;
+ uint32_t index;
+ u16 istatus;
+
+ ENTER("qla1280_isr");
+
+ istatus = RD_REG_WORD(&reg->istatus);
+ if (!(istatus & (RISC_INT | PCI_INT)))
+ return;
+
+ /* Save mailbox register 5 */
+ mailbox[5] = RD_REG_WORD(&reg->mailbox5);
+
+ /* Check for mailbox interrupt. */
+
+ mailbox[0] = RD_REG_WORD_dmasync(&reg->semaphore);
+
+ if (mailbox[0] & BIT_0) {
+ /* Get mailbox data. */
+ /* dprintk(1, "qla1280_isr: In Get mailbox data \n"); */
+
+ wptr = &mailbox[0];
+ *wptr++ = RD_REG_WORD(&reg->mailbox0);
+ *wptr++ = RD_REG_WORD(&reg->mailbox1);
+ *wptr = RD_REG_WORD(&reg->mailbox2);
+ if (mailbox[0] != MBA_SCSI_COMPLETION) {
+ wptr++;
+ *wptr++ = RD_REG_WORD(&reg->mailbox3);
+ *wptr++ = RD_REG_WORD(&reg->mailbox4);
+ wptr++;
+ *wptr++ = RD_REG_WORD(&reg->mailbox6);
+ *wptr = RD_REG_WORD(&reg->mailbox7);
+ }
+
+ /* Release mailbox registers. */
+
+ WRT_REG_WORD(&reg->semaphore, 0);
+ WRT_REG_WORD(&reg->host_cmd, HC_CLR_RISC_INT);
+
+ dprintk(5, "qla1280_isr: mailbox interrupt mailbox[0] = 0x%x",
+ mailbox[0]);
+
+ /* Handle asynchronous event */
+ switch (mailbox[0]) {
+ case MBA_SCSI_COMPLETION: /* Response completion */
+ dprintk(5, "qla1280_isr: mailbox SCSI response "
+ "completion\n");
+
+ if (ha->flags.online) {
+ /* Get outstanding command index. */
+ index = mailbox[2] << 16 | mailbox[1];
+
+ /* Validate handle. */
+ if (index < MAX_OUTSTANDING_COMMANDS)
+ sp = ha->outstanding_cmds[index];
+ else
+ sp = NULL;
+
+ if (sp) {
+ /* Free outstanding command slot. */
+ ha->outstanding_cmds[index] = NULL;
+
+ /* Save ISP completion status */
+ CMD_RESULT(sp->cmd) = 0;
+ CMD_HANDLE(sp->cmd) = COMPLETED_HANDLE;
+
+ /* Place block on done queue */
+ list_add_tail(&sp->list, done_q);
+ } else {
+ /*
+ * If we get here we have a real problem!
+ */
+ printk(KERN_WARNING
+ "qla1280: ISP invalid handle\n");
+ }
+ }
+ break;
+
+ case MBA_BUS_RESET: /* SCSI Bus Reset */
+ ha->flags.reset_marker = 1;
+ index = mailbox[6] & BIT_0;
+ ha->bus_settings[index].reset_marker = 1;
+
+ printk(KERN_DEBUG "qla1280_isr(): index %i "
+ "asynchronous BUS_RESET\n", index);
+ break;
+
+ case MBA_SYSTEM_ERR: /* System Error */
+ printk(KERN_WARNING
+ "qla1280: ISP System Error - mbx1=%xh, mbx2="
+ "%xh, mbx3=%xh\n", mailbox[1], mailbox[2],
+ mailbox[3]);
+ break;
+
+ case MBA_REQ_TRANSFER_ERR: /* Request Transfer Error */
+ printk(KERN_WARNING
+ "qla1280: ISP Request Transfer Error\n");
+ break;
+
+ case MBA_RSP_TRANSFER_ERR: /* Response Transfer Error */
+ printk(KERN_WARNING
+ "qla1280: ISP Response Transfer Error\n");
+ break;
+
+ case MBA_WAKEUP_THRES: /* Request Queue Wake-up */
+ dprintk(2, "qla1280_isr: asynchronous WAKEUP_THRES\n");
+ break;
+
+ case MBA_TIMEOUT_RESET: /* Execution Timeout Reset */
+ dprintk(2,
+ "qla1280_isr: asynchronous TIMEOUT_RESET\n");
+ break;
+
+ case MBA_DEVICE_RESET: /* Bus Device Reset */
+ printk(KERN_INFO "qla1280_isr(): asynchronous "
+ "BUS_DEVICE_RESET\n");
+
+ ha->flags.reset_marker = 1;
+ index = mailbox[6] & BIT_0;
+ ha->bus_settings[index].reset_marker = 1;
+ break;
+
+ case MBA_BUS_MODE_CHANGE:
+ dprintk(2,
+ "qla1280_isr: asynchronous BUS_MODE_CHANGE\n");
+ break;
+
+ default:
+ /* dprintk(1, "qla1280_isr: default case of switch MB \n"); */
+ if (mailbox[0] < MBA_ASYNC_EVENT) {
+ wptr = &mailbox[0];
+ memcpy((uint16_t *) ha->mailbox_out, wptr,
+ MAILBOX_REGISTER_COUNT *
+ sizeof(uint16_t));
+
+ if(ha->mailbox_wait != NULL)
+ complete(ha->mailbox_wait);
+ }
+ break;
+ }
+ } else {
+ WRT_REG_WORD(&reg->host_cmd, HC_CLR_RISC_INT);
+ }
+
+ /*
+ * We will receive interrupts during mailbox testing prior to
+ * the card being marked online, hence the double check.
+ */
+ if (!(ha->flags.online && !ha->mailbox_wait)) {
+ dprintk(2, "qla1280_isr: Response pointer Error\n");
+ goto out;
+ }
+
+ if (mailbox[5] >= RESPONSE_ENTRY_CNT)
+ goto out;
+
+ while (ha->rsp_ring_index != mailbox[5]) {
+ pkt = ha->response_ring_ptr;
+
+ dprintk(5, "qla1280_isr: ha->rsp_ring_index = 0x%x, mailbox[5]"
+ " = 0x%x\n", ha->rsp_ring_index, mailbox[5]);
+ dprintk(5,"qla1280_isr: response packet data\n");
+ qla1280_dump_buffer(5, (char *)pkt, RESPONSE_ENTRY_SIZE);
+
+ if (pkt->entry_type == STATUS_TYPE) {
+ if ((le16_to_cpu(pkt->scsi_status) & 0xff)
+ || pkt->comp_status || pkt->entry_status) {
+ dprintk(2, "qla1280_isr: ha->rsp_ring_index = "
+ "0x%x mailbox[5] = 0x%x, comp_status "
+ "= 0x%x, scsi_status = 0x%x\n",
+ ha->rsp_ring_index, mailbox[5],
+ le16_to_cpu(pkt->comp_status),
+ le16_to_cpu(pkt->scsi_status));
+ }
+ } else {
+ dprintk(2, "qla1280_isr: ha->rsp_ring_index = "
+ "0x%x, mailbox[5] = 0x%x\n",
+ ha->rsp_ring_index, mailbox[5]);
+ dprintk(2, "qla1280_isr: response packet data\n");
+ qla1280_dump_buffer(2, (char *)pkt,
+ RESPONSE_ENTRY_SIZE);
+ }
+
+ if (pkt->entry_type == STATUS_TYPE || pkt->entry_status) {
+ dprintk(2, "status: Cmd %p, handle %i\n",
+ ha->outstanding_cmds[pkt->handle]->cmd,
+ pkt->handle);
+ if (pkt->entry_type == STATUS_TYPE)
+ qla1280_status_entry(ha, pkt, done_q);
+ else
+ qla1280_error_entry(ha, pkt, done_q);
+ /* Adjust ring index. */
+ ha->rsp_ring_index++;
+ if (ha->rsp_ring_index == RESPONSE_ENTRY_CNT) {
+ ha->rsp_ring_index = 0;
+ ha->response_ring_ptr = ha->response_ring;
+ } else
+ ha->response_ring_ptr++;
+ WRT_REG_WORD(&reg->mailbox5, ha->rsp_ring_index);
+ }
+ }
+
+ out:
+ LEAVE("qla1280_isr");
+}
+
+/*
+ * qla1280_rst_aen
+ * Processes asynchronous reset.
+ *
+ * Input:
+ * ha = adapter block pointer.
+ */
+static void
+qla1280_rst_aen(struct scsi_qla_host *ha)
+{
+ uint8_t bus;
+
+ ENTER("qla1280_rst_aen");
+
+ if (ha->flags.online && !ha->flags.reset_active &&
+ !ha->flags.abort_isp_active) {
+ ha->flags.reset_active = 1;
+ while (ha->flags.reset_marker) {
+ /* Issue marker command. */
+ ha->flags.reset_marker = 0;
+ for (bus = 0; bus < ha->ports &&
+ !ha->flags.reset_marker; bus++) {
+ if (ha->bus_settings[bus].reset_marker) {
+ ha->bus_settings[bus].reset_marker = 0;
+ qla1280_marker(ha, bus, 0, 0,
+ MK_SYNC_ALL);
+ }
+ }
+ }
+ }
+
+ LEAVE("qla1280_rst_aen");
+}
+
+
+/*
+ * qla1280_status_entry
+ * Processes received ISP status entry.
+ *
+ * Input:
+ * ha = adapter block pointer.
+ * pkt = entry pointer.
+ * done_q = done queue.
+ */
+static void
+qla1280_status_entry(struct scsi_qla_host *ha, struct response *pkt,
+ struct list_head *done_q)
+{
+ unsigned int bus, target, lun;
+ int sense_sz;
+ struct srb *sp;
+ struct scsi_cmnd *cmd;
+ uint32_t handle = le32_to_cpu(pkt->handle);
+ uint16_t scsi_status = le16_to_cpu(pkt->scsi_status);
+ uint16_t comp_status = le16_to_cpu(pkt->comp_status);
+
+ ENTER("qla1280_status_entry");
+
+ /* Validate handle. */
+ if (handle < MAX_OUTSTANDING_COMMANDS)
+ sp = ha->outstanding_cmds[handle];
+ else
+ sp = NULL;
+
+ if (!sp) {
+ printk(KERN_WARNING "qla1280: Status Entry invalid handle\n");
+ goto out;
+ }
+
+ /* Free outstanding command slot. */
+ ha->outstanding_cmds[handle] = NULL;
+
+ cmd = sp->cmd;
+
+ /* Generate LU queue on cntrl, target, LUN */
+ bus = SCSI_BUS_32(cmd);
+ target = SCSI_TCN_32(cmd);
+ lun = SCSI_LUN_32(cmd);
+
+ if (comp_status || scsi_status) {
+ dprintk(3, "scsi: comp_status = 0x%x, scsi_status = "
+ "0x%x, handle = 0x%x\n", comp_status,
+ scsi_status, handle);
+ }
+
+ /* Target busy or queue full */
+ if ((scsi_status & 0xFF) == SAM_STAT_TASK_SET_FULL ||
+ (scsi_status & 0xFF) == SAM_STAT_BUSY) {
+ CMD_RESULT(cmd) = scsi_status & 0xff;
+ } else {
+
+ /* Save ISP completion status */
+ CMD_RESULT(cmd) = qla1280_return_status(pkt, cmd);
+
+ if (scsi_status & SAM_STAT_CHECK_CONDITION) {
+ if (comp_status != CS_ARS_FAILED) {
+ uint16_t req_sense_length =
+ le16_to_cpu(pkt->req_sense_length);
+ if (req_sense_length < CMD_SNSLEN(cmd))
+ sense_sz = req_sense_length;
+ else
+ /*
+ * scsi_cmnd->sense_buffer is
+ * 64 bytes, why only copy 63?
+ * This looks wrong! /Jes
+ */
+ sense_sz = CMD_SNSLEN(cmd) - 1;
+
+ memcpy(cmd->sense_buffer,
+ &pkt->req_sense_data, sense_sz);
+ } else
+ sense_sz = 0;
+ memset(cmd->sense_buffer + sense_sz, 0,
+ SCSI_SENSE_BUFFERSIZE - sense_sz);
+
+ dprintk(2, "qla1280_status_entry: Check "
+ "condition Sense data, b %i, t %i, "
+ "l %i\n", bus, target, lun);
+ if (sense_sz)
+ qla1280_dump_buffer(2,
+ (char *)cmd->sense_buffer,
+ sense_sz);
+ }
+ }
+
+ CMD_HANDLE(sp->cmd) = COMPLETED_HANDLE;
+
+ /* Place command on done queue. */
+ list_add_tail(&sp->list, done_q);
+ out:
+ LEAVE("qla1280_status_entry");
+}
+
+/*
+ * qla1280_error_entry
+ * Processes error entry.
+ *
+ * Input:
+ * ha = adapter block pointer.
+ * pkt = entry pointer.
+ * done_q = done queue.
+ */
+static void
+qla1280_error_entry(struct scsi_qla_host *ha, struct response *pkt,
+ struct list_head *done_q)
+{
+ struct srb *sp;
+ uint32_t handle = le32_to_cpu(pkt->handle);
+
+ ENTER("qla1280_error_entry");
+
+ if (pkt->entry_status & BIT_3)
+ dprintk(2, "qla1280_error_entry: BAD PAYLOAD flag error\n");
+ else if (pkt->entry_status & BIT_2)
+ dprintk(2, "qla1280_error_entry: BAD HEADER flag error\n");
+ else if (pkt->entry_status & BIT_1)
+ dprintk(2, "qla1280_error_entry: FULL flag error\n");
+ else
+ dprintk(2, "qla1280_error_entry: UNKNOWN flag error\n");
+
+ /* Validate handle. */
+ if (handle < MAX_OUTSTANDING_COMMANDS)
+ sp = ha->outstanding_cmds[handle];
+ else
+ sp = NULL;
+
+ if (sp) {
+ /* Free outstanding command slot. */
+ ha->outstanding_cmds[handle] = NULL;
+
+ /* Bad payload or header */
+ if (pkt->entry_status & (BIT_3 + BIT_2)) {
+ /* Bad payload or header, set error status. */
+ /* CMD_RESULT(sp->cmd) = CS_BAD_PAYLOAD; */
+ CMD_RESULT(sp->cmd) = DID_ERROR << 16;
+ } else if (pkt->entry_status & BIT_1) { /* FULL flag */
+ CMD_RESULT(sp->cmd) = DID_BUS_BUSY << 16;
+ } else {
+ /* Set error status. */
+ CMD_RESULT(sp->cmd) = DID_ERROR << 16;
+ }
+
+ CMD_HANDLE(sp->cmd) = COMPLETED_HANDLE;
+
+ /* Place command on done queue. */
+ list_add_tail(&sp->list, done_q);
+ }
+#ifdef QLA_64BIT_PTR
+ else if (pkt->entry_type == COMMAND_A64_TYPE) {
+ printk(KERN_WARNING "!qla1280: Error Entry invalid handle");
+ }
+#endif
+
+ LEAVE("qla1280_error_entry");
+}
+
+/*
+ * qla1280_abort_isp
+ * Resets ISP and aborts all outstanding commands.
+ *
+ * Input:
+ * ha = adapter block pointer.
+ *
+ * Returns:
+ * 0 = success
+ */
+static int
+qla1280_abort_isp(struct scsi_qla_host *ha)
+{
+ struct device_reg __iomem *reg = ha->iobase;
+ struct srb *sp;
+ int status = 0;
+ int cnt;
+ int bus;
+
+ ENTER("qla1280_abort_isp");
+
+ if (ha->flags.abort_isp_active || !ha->flags.online)
+ goto out;
+
+ ha->flags.abort_isp_active = 1;
+
+ /* Disable ISP interrupts. */
+ qla1280_disable_intrs(ha);
+ WRT_REG_WORD(&reg->host_cmd, HC_PAUSE_RISC);
+ RD_REG_WORD(&reg->id_l);
+
+ printk(KERN_INFO "scsi(%li): dequeuing outstanding commands\n",
+ ha->host_no);
+ /* Dequeue all commands in outstanding command list. */
+ for (cnt = 0; cnt < MAX_OUTSTANDING_COMMANDS; cnt++) {
+ struct scsi_cmnd *cmd;
+ sp = ha->outstanding_cmds[cnt];
+ if (sp) {
+ cmd = sp->cmd;
+ CMD_RESULT(cmd) = DID_RESET << 16;
+ CMD_HANDLE(cmd) = COMPLETED_HANDLE;
+ ha->outstanding_cmds[cnt] = NULL;
+ list_add_tail(&sp->list, &ha->done_q);
+ }
+ }
+
+ qla1280_done(ha);
+
+ status = qla1280_load_firmware(ha);
+ if (status)
+ goto out;
+
+ /* Setup adapter based on NVRAM parameters. */
+ qla1280_nvram_config (ha);
+
+ status = qla1280_init_rings(ha);
+ if (status)
+ goto out;
+
+ /* Issue SCSI reset. */
+ for (bus = 0; bus < ha->ports; bus++)
+ qla1280_bus_reset(ha, bus);
+
+ ha->flags.abort_isp_active = 0;
+ out:
+ if (status) {
+ printk(KERN_WARNING
+ "qla1280: ISP error recovery failed, board disabled");
+ qla1280_reset_adapter(ha);
+ dprintk(2, "qla1280_abort_isp: **** FAILED ****\n");
+ }
+
+ LEAVE("qla1280_abort_isp");
+ return status;
+}
+
+
+/*
+ * qla1280_debounce_register
+ * Debounce register.
+ *
+ * Input:
+ * port = register address.
+ *
+ * Returns:
+ * register value.
+ */
+static u16
+qla1280_debounce_register(volatile u16 __iomem * addr)
+{
+ volatile u16 ret;
+ volatile u16 ret2;
+
+ ret = RD_REG_WORD(addr);
+ ret2 = RD_REG_WORD(addr);
+
+ if (ret == ret2)
+ return ret;
+
+ do {
+ cpu_relax();
+ ret = RD_REG_WORD(addr);
+ ret2 = RD_REG_WORD(addr);
+ } while (ret != ret2);
+
+ return ret;
+}
+
+
+/************************************************************************
+ * qla1280_check_for_dead_scsi_bus *
+ * *
+ * This routine checks for a dead SCSI bus *
+ ************************************************************************/
+#define SET_SXP_BANK 0x0100
+#define SCSI_PHASE_INVALID 0x87FF
+static int
+qla1280_check_for_dead_scsi_bus(struct scsi_qla_host *ha, unsigned int bus)
+{
+ uint16_t config_reg, scsi_control;
+ struct device_reg __iomem *reg = ha->iobase;
+
+ if (ha->bus_settings[bus].scsi_bus_dead) {
+ WRT_REG_WORD(&reg->host_cmd, HC_PAUSE_RISC);
+ config_reg = RD_REG_WORD(&reg->cfg_1);
+ WRT_REG_WORD(&reg->cfg_1, SET_SXP_BANK);
+ scsi_control = RD_REG_WORD(&reg->scsiControlPins);
+ WRT_REG_WORD(&reg->cfg_1, config_reg);
+ WRT_REG_WORD(&reg->host_cmd, HC_RELEASE_RISC);
+
+ if (scsi_control == SCSI_PHASE_INVALID) {
+ ha->bus_settings[bus].scsi_bus_dead = 1;
+ return 1; /* bus is dead */
+ } else {
+ ha->bus_settings[bus].scsi_bus_dead = 0;
+ ha->bus_settings[bus].failed_reset_count = 0;
+ }
+ }
+ return 0; /* bus is not dead */
+}
+
+static void
+qla1280_get_target_parameters(struct scsi_qla_host *ha,
+ struct scsi_device *device)
+{
+ uint16_t mb[MAILBOX_REGISTER_COUNT];
+ int bus, target, lun;
+
+ bus = device->channel;
+ target = device->id;
+ lun = device->lun;
+
+
+ mb[0] = MBC_GET_TARGET_PARAMETERS;
+ mb[1] = (uint16_t) (bus ? target | BIT_7 : target);
+ mb[1] <<= 8;
+ qla1280_mailbox_command(ha, BIT_6 | BIT_3 | BIT_2 | BIT_1 | BIT_0,
+ &mb[0]);
+
+ printk(KERN_INFO "scsi(%li:%d:%d:%d):", ha->host_no, bus, target, lun);
+
+ if (mb[3] != 0) {
+ printk(" Sync: period %d, offset %d",
+ (mb[3] & 0xff), (mb[3] >> 8));
+ if (mb[2] & BIT_13)
+ printk(", Wide");
+ if ((mb[2] & BIT_5) && ((mb[6] >> 8) & 0xff) >= 2)
+ printk(", DT");
+ } else
+ printk(" Async");
+
+ if (device->simple_tags)
+ printk(", Tagged queuing: depth %d", device->queue_depth);
+ printk("\n");
+}
+
+
+#if DEBUG_QLA1280
+static void
+__qla1280_dump_buffer(char *b, int size)
+{
+ int cnt;
+ u8 c;
+
+ printk(KERN_DEBUG " 0 1 2 3 4 5 6 7 8 9 Ah "
+ "Bh Ch Dh Eh Fh\n");
+ printk(KERN_DEBUG "---------------------------------------------"
+ "------------------\n");
+
+ for (cnt = 0; cnt < size;) {
+ c = *b++;
+
+ printk("0x%02x", c);
+ cnt++;
+ if (!(cnt % 16))
+ printk("\n");
+ else
+ printk(" ");
+ }
+ if (cnt % 16)
+ printk("\n");
+}
+
+/**************************************************************************
+ * ql1280_print_scsi_cmd
+ *
+ **************************************************************************/
+static void
+__qla1280_print_scsi_cmd(struct scsi_cmnd *cmd)
+{
+ struct scsi_qla_host *ha;
+ struct Scsi_Host *host = CMD_HOST(cmd);
+ struct srb *sp;
+ /* struct scatterlist *sg; */
+
+ int i;
+ ha = (struct scsi_qla_host *)host->hostdata;
+
+ sp = (struct srb *)CMD_SP(cmd);
+ printk("SCSI Command @= 0x%p, Handle=0x%p\n", cmd, CMD_HANDLE(cmd));
+ printk(" chan=%d, target = 0x%02x, lun = 0x%02x, cmd_len = 0x%02x\n",
+ SCSI_BUS_32(cmd), SCSI_TCN_32(cmd), SCSI_LUN_32(cmd),
+ CMD_CDBLEN(cmd));
+ printk(" CDB = ");
+ for (i = 0; i < cmd->cmd_len; i++) {
+ printk("0x%02x ", cmd->cmnd[i]);
+ }
+ printk(" seg_cnt =%d\n", scsi_sg_count(cmd));
+ printk(" request buffer=0x%p, request buffer len=0x%x\n",
+ scsi_sglist(cmd), scsi_bufflen(cmd));
+ /* if (cmd->use_sg)
+ {
+ sg = (struct scatterlist *) cmd->request_buffer;
+ printk(" SG buffer: \n");
+ qla1280_dump_buffer(1, (char *)sg, (cmd->use_sg*sizeof(struct scatterlist)));
+ } */
+ printk(" tag=%d, transfersize=0x%x \n",
+ cmd->tag, cmd->transfersize);
+ printk(" SP=0x%p\n", CMD_SP(cmd));
+ printk(" underflow size = 0x%x, direction=0x%x\n",
+ cmd->underflow, cmd->sc_data_direction);
+}
+
+/**************************************************************************
+ * ql1280_dump_device
+ *
+ **************************************************************************/
+static void
+ql1280_dump_device(struct scsi_qla_host *ha)
+{
+
+ struct scsi_cmnd *cp;
+ struct srb *sp;
+ int i;
+
+ printk(KERN_DEBUG "Outstanding Commands on controller:\n");
+
+ for (i = 0; i < MAX_OUTSTANDING_COMMANDS; i++) {
+ if ((sp = ha->outstanding_cmds[i]) == NULL)
+ continue;
+ if ((cp = sp->cmd) == NULL)
+ continue;
+ qla1280_print_scsi_cmd(1, cp);
+ }
+}
+#endif
+
+
+enum tokens {
+ TOKEN_NVRAM,
+ TOKEN_SYNC,
+ TOKEN_WIDE,
+ TOKEN_PPR,
+ TOKEN_VERBOSE,
+ TOKEN_DEBUG,
+};
+
+struct setup_tokens {
+ char *token;
+ int val;
+};
+
+static struct setup_tokens setup_token[] __initdata =
+{
+ { "nvram", TOKEN_NVRAM },
+ { "sync", TOKEN_SYNC },
+ { "wide", TOKEN_WIDE },
+ { "ppr", TOKEN_PPR },
+ { "verbose", TOKEN_VERBOSE },
+ { "debug", TOKEN_DEBUG },
+};
+
+
+/**************************************************************************
+ * qla1280_setup
+ *
+ * Handle boot parameters. This really needs to be changed so one
+ * can specify per adapter parameters.
+ **************************************************************************/
+static int __init
+qla1280_setup(char *s)
+{
+ char *cp, *ptr;
+ unsigned long val;
+ int toke;
+
+ cp = s;
+
+ while (cp && (ptr = strchr(cp, ':'))) {
+ ptr++;
+ if (!strcmp(ptr, "yes")) {
+ val = 0x10000;
+ ptr += 3;
+ } else if (!strcmp(ptr, "no")) {
+ val = 0;
+ ptr += 2;
+ } else
+ val = simple_strtoul(ptr, &ptr, 0);
+
+ switch ((toke = qla1280_get_token(cp))) {
+ case TOKEN_NVRAM:
+ if (!val)
+ driver_setup.no_nvram = 1;
+ break;
+ case TOKEN_SYNC:
+ if (!val)
+ driver_setup.no_sync = 1;
+ else if (val != 0x10000)
+ driver_setup.sync_mask = val;
+ break;
+ case TOKEN_WIDE:
+ if (!val)
+ driver_setup.no_wide = 1;
+ else if (val != 0x10000)
+ driver_setup.wide_mask = val;
+ break;
+ case TOKEN_PPR:
+ if (!val)
+ driver_setup.no_ppr = 1;
+ else if (val != 0x10000)
+ driver_setup.ppr_mask = val;
+ break;
+ case TOKEN_VERBOSE:
+ qla1280_verbose = val;
+ break;
+ default:
+ printk(KERN_INFO "qla1280: unknown boot option %s\n",
+ cp);
+ }
+
+ cp = strchr(ptr, ';');
+ if (cp)
+ cp++;
+ else {
+ break;
+ }
+ }
+ return 1;
+}
+
+
+static int __init
+qla1280_get_token(char *str)
+{
+ char *sep;
+ long ret = -1;
+ int i;
+
+ sep = strchr(str, ':');
+
+ if (sep) {
+ for (i = 0; i < ARRAY_SIZE(setup_token); i++) {
+ if (!strncmp(setup_token[i].token, str, (sep - str))) {
+ ret = setup_token[i].val;
+ break;
+ }
+ }
+ }
+
+ return ret;
+}
+
+
+static struct scsi_host_template qla1280_driver_template = {
+ .module = THIS_MODULE,
+ .proc_name = "qla1280",
+ .name = "Qlogic ISP 1280/12160",
+ .info = qla1280_info,
+ .slave_configure = qla1280_slave_configure,
+ .queuecommand = qla1280_queuecommand,
+ .eh_abort_handler = qla1280_eh_abort,
+ .eh_device_reset_handler= qla1280_eh_device_reset,
+ .eh_bus_reset_handler = qla1280_eh_bus_reset,
+ .eh_host_reset_handler = qla1280_eh_adapter_reset,
+ .bios_param = qla1280_biosparam,
+ .can_queue = 0xfffff,
+ .this_id = -1,
+ .sg_tablesize = SG_ALL,
+ .cmd_per_lun = 1,
+ .use_clustering = ENABLE_CLUSTERING,
+};
+
+
+static int
+qla1280_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
+{
+ int devnum = id->driver_data;
+ struct qla_boards *bdp = &ql1280_board_tbl[devnum];
+ struct Scsi_Host *host;
+ struct scsi_qla_host *ha;
+ int error = -ENODEV;
+
+ /* Bypass all AMI SUBSYS VENDOR IDs */
+ if (pdev->subsystem_vendor == PCI_VENDOR_ID_AMI) {
+ printk(KERN_INFO
+ "qla1280: Skipping AMI SubSys Vendor ID Chip\n");
+ goto error;
+ }
+
+ printk(KERN_INFO "qla1280: %s found on PCI bus %i, dev %i\n",
+ bdp->name, pdev->bus->number, PCI_SLOT(pdev->devfn));
+
+ if (pci_enable_device(pdev)) {
+ printk(KERN_WARNING
+ "qla1280: Failed to enabled pci device, aborting.\n");
+ goto error;
+ }
+
+ pci_set_master(pdev);
+
+ error = -ENOMEM;
+ host = scsi_host_alloc(&qla1280_driver_template, sizeof(*ha));
+ if (!host) {
+ printk(KERN_WARNING
+ "qla1280: Failed to register host, aborting.\n");
+ goto error_disable_device;
+ }
+
+ ha = (struct scsi_qla_host *)host->hostdata;
+ memset(ha, 0, sizeof(struct scsi_qla_host));
+
+ ha->pdev = pdev;
+ ha->devnum = devnum; /* specifies microcode load address */
+
+#ifdef QLA_64BIT_PTR
+ if (pci_set_dma_mask(ha->pdev, DMA_BIT_MASK(64))) {
+ if (pci_set_dma_mask(ha->pdev, DMA_BIT_MASK(32))) {
+ printk(KERN_WARNING "scsi(%li): Unable to set a "
+ "suitable DMA mask - aborting\n", ha->host_no);
+ error = -ENODEV;
+ goto error_put_host;
+ }
+ } else
+ dprintk(2, "scsi(%li): 64 Bit PCI Addressing Enabled\n",
+ ha->host_no);
+#else
+ if (pci_set_dma_mask(ha->pdev, DMA_BIT_MASK(32))) {
+ printk(KERN_WARNING "scsi(%li): Unable to set a "
+ "suitable DMA mask - aborting\n", ha->host_no);
+ error = -ENODEV;
+ goto error_put_host;
+ }
+#endif
+
+ ha->request_ring = pci_alloc_consistent(ha->pdev,
+ ((REQUEST_ENTRY_CNT + 1) * sizeof(request_t)),
+ &ha->request_dma);
+ if (!ha->request_ring) {
+ printk(KERN_INFO "qla1280: Failed to get request memory\n");
+ goto error_put_host;
+ }
+
+ ha->response_ring = pci_alloc_consistent(ha->pdev,
+ ((RESPONSE_ENTRY_CNT + 1) * sizeof(struct response)),
+ &ha->response_dma);
+ if (!ha->response_ring) {
+ printk(KERN_INFO "qla1280: Failed to get response memory\n");
+ goto error_free_request_ring;
+ }
+
+ ha->ports = bdp->numPorts;
+
+ ha->host = host;
+ ha->host_no = host->host_no;
+
+ host->irq = pdev->irq;
+ host->max_channel = bdp->numPorts - 1;
+ host->max_lun = MAX_LUNS - 1;
+ host->max_id = MAX_TARGETS;
+ host->max_sectors = 1024;
+ host->unique_id = host->host_no;
+
+ error = -ENODEV;
+
+#if MEMORY_MAPPED_IO
+ ha->mmpbase = pci_ioremap_bar(ha->pdev, 1);
+ if (!ha->mmpbase) {
+ printk(KERN_INFO "qla1280: Unable to map I/O memory\n");
+ goto error_free_response_ring;
+ }
+
+ host->base = (unsigned long)ha->mmpbase;
+ ha->iobase = (struct device_reg __iomem *)ha->mmpbase;
+#else
+ host->io_port = pci_resource_start(ha->pdev, 0);
+ if (!request_region(host->io_port, 0xff, "qla1280")) {
+ printk(KERN_INFO "qla1280: Failed to reserve i/o region "
+ "0x%04lx-0x%04lx - already in use\n",
+ host->io_port, host->io_port + 0xff);
+ goto error_free_response_ring;
+ }
+
+ ha->iobase = (struct device_reg *)host->io_port;
+#endif
+
+ INIT_LIST_HEAD(&ha->done_q);
+
+ /* Disable ISP interrupts. */
+ qla1280_disable_intrs(ha);
+
+ if (request_irq(pdev->irq, qla1280_intr_handler, IRQF_SHARED,
+ "qla1280", ha)) {
+ printk("qla1280 : Failed to reserve interrupt %d already "
+ "in use\n", pdev->irq);
+ goto error_release_region;
+ }
+
+ /* load the F/W, read paramaters, and init the H/W */
+ if (qla1280_initialize_adapter(ha)) {
+ printk(KERN_INFO "qla1x160: Failed to initialize adapter\n");
+ goto error_free_irq;
+ }
+
+ /* set our host ID (need to do something about our two IDs) */
+ host->this_id = ha->bus_settings[0].id;
+
+ pci_set_drvdata(pdev, host);
+
+ error = scsi_add_host(host, &pdev->dev);
+ if (error)
+ goto error_disable_adapter;
+ scsi_scan_host(host);
+
+ return 0;
+
+ error_disable_adapter:
+ qla1280_disable_intrs(ha);
+ error_free_irq:
+ free_irq(pdev->irq, ha);
+ error_release_region:
+#if MEMORY_MAPPED_IO
+ iounmap(ha->mmpbase);
+#else
+ release_region(host->io_port, 0xff);
+#endif
+ error_free_response_ring:
+ pci_free_consistent(ha->pdev,
+ ((RESPONSE_ENTRY_CNT + 1) * sizeof(struct response)),
+ ha->response_ring, ha->response_dma);
+ error_free_request_ring:
+ pci_free_consistent(ha->pdev,
+ ((REQUEST_ENTRY_CNT + 1) * sizeof(request_t)),
+ ha->request_ring, ha->request_dma);
+ error_put_host:
+ scsi_host_put(host);
+ error_disable_device:
+ pci_disable_device(pdev);
+ error:
+ return error;
+}
+
+
+static void
+qla1280_remove_one(struct pci_dev *pdev)
+{
+ struct Scsi_Host *host = pci_get_drvdata(pdev);
+ struct scsi_qla_host *ha = (struct scsi_qla_host *)host->hostdata;
+
+ scsi_remove_host(host);
+
+ qla1280_disable_intrs(ha);
+
+ free_irq(pdev->irq, ha);
+
+#if MEMORY_MAPPED_IO
+ iounmap(ha->mmpbase);
+#else
+ release_region(host->io_port, 0xff);
+#endif
+
+ pci_free_consistent(ha->pdev,
+ ((REQUEST_ENTRY_CNT + 1) * (sizeof(request_t))),
+ ha->request_ring, ha->request_dma);
+ pci_free_consistent(ha->pdev,
+ ((RESPONSE_ENTRY_CNT + 1) * (sizeof(struct response))),
+ ha->response_ring, ha->response_dma);
+
+ pci_disable_device(pdev);
+
+ scsi_host_put(host);
+}
+
+static struct pci_driver qla1280_pci_driver = {
+ .name = "qla1280",
+ .id_table = qla1280_pci_tbl,
+ .probe = qla1280_probe_one,
+ .remove = qla1280_remove_one,
+};
+
+static int __init
+qla1280_init(void)
+{
+ if (sizeof(struct srb) > sizeof(struct scsi_pointer)) {
+ printk(KERN_WARNING
+ "qla1280: struct srb too big, aborting\n");
+ return -EINVAL;
+ }
+
+#ifdef MODULE
+ /*
+ * If we are called as a module, the qla1280 pointer may not be null
+ * and it would point to our bootup string, just like on the lilo
+ * command line. IF not NULL, then process this config string with
+ * qla1280_setup
+ *
+ * Boot time Options
+ * To add options at boot time add a line to your lilo.conf file like:
+ * append="qla1280=verbose,max_tags:{{255,255,255,255},{255,255,255,255}}"
+ * which will result in the first four devices on the first two
+ * controllers being set to a tagged queue depth of 32.
+ */
+ if (qla1280)
+ qla1280_setup(qla1280);
+#endif
+
+ return pci_register_driver(&qla1280_pci_driver);
+}
+
+static void __exit
+qla1280_exit(void)
+{
+ int i;
+
+ pci_unregister_driver(&qla1280_pci_driver);
+ /* release any allocated firmware images */
+ for (i = 0; i < QL_NUM_FW_IMAGES; i++) {
+ release_firmware(qla1280_fw_tbl[i].fw);
+ qla1280_fw_tbl[i].fw = NULL;
+ }
+}
+
+module_init(qla1280_init);
+module_exit(qla1280_exit);
+
+MODULE_AUTHOR("Qlogic & Jes Sorensen");
+MODULE_DESCRIPTION("Qlogic ISP SCSI (qla1x80/qla1x160) driver");
+MODULE_LICENSE("GPL");
+/*(DEBLOBBED)*/
+MODULE_VERSION(QLA1280_VERSION);
+
+/*
+ * Overrides for Emacs so that we almost follow Linus's tabbing style.
+ * Emacs will notice this stuff at the end of the file and automatically
+ * adjust the settings for this buffer only. This must remain at the end
+ * of the file.
+ * ---------------------------------------------------------------------------
+ * Local variables:
+ * c-basic-offset: 8
+ * tab-width: 8
+ * End:
+ */
diff --git a/drivers/scsi/qla1280.h b/drivers/scsi/qla1280.h
new file mode 100644
index 000000000..834884b9e
--- /dev/null
+++ b/drivers/scsi/qla1280.h
@@ -0,0 +1,1081 @@
+/******************************************************************************
+* QLOGIC LINUX SOFTWARE
+*
+* QLogic ISP1280 (Ultra2) /12160 (Ultra3) SCSI driver
+* Copyright (C) 2000 Qlogic Corporation
+* (www.qlogic.com)
+*
+* This program is free software; you can redistribute it and/or modify it
+* under the terms of the GNU General Public License as published by the
+* Free Software Foundation; either version 2, or (at your option) any
+* later version.
+*
+* This program is distributed in the hope that it will be useful, but
+* WITHOUT ANY WARRANTY; without even the implied warranty of
+* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+* General Public License for more details.
+*
+******************************************************************************/
+
+#ifndef _QLA1280_H
+#define _QLA1280_H
+
+/*
+ * Data bit definitions.
+ */
+#define BIT_0 0x1
+#define BIT_1 0x2
+#define BIT_2 0x4
+#define BIT_3 0x8
+#define BIT_4 0x10
+#define BIT_5 0x20
+#define BIT_6 0x40
+#define BIT_7 0x80
+#define BIT_8 0x100
+#define BIT_9 0x200
+#define BIT_10 0x400
+#define BIT_11 0x800
+#define BIT_12 0x1000
+#define BIT_13 0x2000
+#define BIT_14 0x4000
+#define BIT_15 0x8000
+#define BIT_16 0x10000
+#define BIT_17 0x20000
+#define BIT_18 0x40000
+#define BIT_19 0x80000
+#define BIT_20 0x100000
+#define BIT_21 0x200000
+#define BIT_22 0x400000
+#define BIT_23 0x800000
+#define BIT_24 0x1000000
+#define BIT_25 0x2000000
+#define BIT_26 0x4000000
+#define BIT_27 0x8000000
+#define BIT_28 0x10000000
+#define BIT_29 0x20000000
+#define BIT_30 0x40000000
+#define BIT_31 0x80000000
+
+#if MEMORY_MAPPED_IO
+#define RD_REG_WORD(addr) readw_relaxed(addr)
+#define RD_REG_WORD_dmasync(addr) readw(addr)
+#define WRT_REG_WORD(addr, data) writew(data, addr)
+#else /* MEMORY_MAPPED_IO */
+#define RD_REG_WORD(addr) inw((unsigned long)addr)
+#define RD_REG_WORD_dmasync(addr) RD_REG_WORD(addr)
+#define WRT_REG_WORD(addr, data) outw(data, (unsigned long)addr)
+#endif /* MEMORY_MAPPED_IO */
+
+/*
+ * Host adapter default definitions.
+ */
+#define MAX_BUSES 2 /* 2 */
+#define MAX_B_BITS 1
+
+#define MAX_TARGETS 16 /* 16 */
+#define MAX_T_BITS 4 /* 4 */
+
+#define MAX_LUNS 8 /* 32 */
+#define MAX_L_BITS 3 /* 5 */
+
+/*
+ * Watchdog time quantum
+ */
+#define QLA1280_WDG_TIME_QUANTUM 5 /* In seconds */
+
+/* Command retry count (0-65535) */
+#define COMMAND_RETRY_COUNT 255
+
+/* Maximum outstanding commands in ISP queues */
+#define MAX_OUTSTANDING_COMMANDS 512
+#define COMPLETED_HANDLE ((unsigned char *) \
+ (MAX_OUTSTANDING_COMMANDS + 2))
+
+/* ISP request and response entry counts (37-65535) */
+#define REQUEST_ENTRY_CNT 255 /* Number of request entries. */
+#define RESPONSE_ENTRY_CNT 63 /* Number of response entries. */
+
+/*
+ * SCSI Request Block structure (sp) that is placed
+ * on cmd->SCp location of every I/O
+ */
+struct srb {
+ struct list_head list; /* (8/16) LU queue */
+ struct scsi_cmnd *cmd; /* (4/8) SCSI command block */
+ /* NOTE: the sp->cmd will be NULL when this completion is
+ * called, so you should know the scsi_cmnd when using this */
+ struct completion *wait;
+ dma_addr_t saved_dma_handle; /* for unmap of single transfers */
+ uint8_t flags; /* (1) Status flags. */
+ uint8_t dir; /* direction of transfer */
+};
+
+/*
+ * SRB flag definitions
+ */
+#define SRB_TIMEOUT (1 << 0) /* Command timed out */
+#define SRB_SENT (1 << 1) /* Command sent to ISP */
+#define SRB_ABORT_PENDING (1 << 2) /* Command abort sent to device */
+#define SRB_ABORTED (1 << 3) /* Command aborted command already */
+
+/*
+ * ISP I/O Register Set structure definitions.
+ */
+struct device_reg {
+ uint16_t id_l; /* ID low */
+ uint16_t id_h; /* ID high */
+ uint16_t cfg_0; /* Configuration 0 */
+#define ISP_CFG0_HWMSK 0x000f /* Hardware revision mask */
+#define ISP_CFG0_1020 BIT_0 /* ISP1020 */
+#define ISP_CFG0_1020A BIT_1 /* ISP1020A */
+#define ISP_CFG0_1040 BIT_2 /* ISP1040 */
+#define ISP_CFG0_1040A BIT_3 /* ISP1040A */
+#define ISP_CFG0_1040B BIT_4 /* ISP1040B */
+#define ISP_CFG0_1040C BIT_5 /* ISP1040C */
+ uint16_t cfg_1; /* Configuration 1 */
+#define ISP_CFG1_F128 BIT_6 /* 128-byte FIFO threshold */
+#define ISP_CFG1_F64 BIT_4|BIT_5 /* 128-byte FIFO threshold */
+#define ISP_CFG1_F32 BIT_5 /* 128-byte FIFO threshold */
+#define ISP_CFG1_F16 BIT_4 /* 128-byte FIFO threshold */
+#define ISP_CFG1_BENAB BIT_2 /* Global Bus burst enable */
+#define ISP_CFG1_SXP BIT_0 /* SXP register select */
+ uint16_t ictrl; /* Interface control */
+#define ISP_RESET BIT_0 /* ISP soft reset */
+#define ISP_EN_INT BIT_1 /* ISP enable interrupts. */
+#define ISP_EN_RISC BIT_2 /* ISP enable RISC interrupts. */
+#define ISP_FLASH_ENABLE BIT_8 /* Flash BIOS Read/Write enable */
+#define ISP_FLASH_UPPER BIT_9 /* Flash upper bank select */
+ uint16_t istatus; /* Interface status */
+#define PCI_64BIT_SLOT BIT_14 /* PCI 64-bit slot indicator. */
+#define RISC_INT BIT_2 /* RISC interrupt */
+#define PCI_INT BIT_1 /* PCI interrupt */
+ uint16_t semaphore; /* Semaphore */
+ uint16_t nvram; /* NVRAM register. */
+#define NV_DESELECT 0
+#define NV_CLOCK BIT_0
+#define NV_SELECT BIT_1
+#define NV_DATA_OUT BIT_2
+#define NV_DATA_IN BIT_3
+ uint16_t flash_data; /* Flash BIOS data */
+ uint16_t flash_address; /* Flash BIOS address */
+
+ uint16_t unused_1[0x06];
+
+ /* cdma_* and ddma_* are 1040 only */
+ uint16_t cdma_cfg;
+#define CDMA_CONF_SENAB BIT_3 /* SXP to DMA Data enable */
+#define CDMA_CONF_RIRQ BIT_2 /* RISC interrupt enable */
+#define CDMA_CONF_BENAB BIT_1 /* Bus burst enable */
+#define CDMA_CONF_DIR BIT_0 /* DMA direction (0=fifo->host 1=host->fifo) */
+ uint16_t cdma_ctrl;
+ uint16_t cdma_status;
+ uint16_t cdma_fifo_status;
+ uint16_t cdma_count;
+ uint16_t cdma_reserved;
+ uint16_t cdma_address_count_0;
+ uint16_t cdma_address_count_1;
+ uint16_t cdma_address_count_2;
+ uint16_t cdma_address_count_3;
+
+ uint16_t unused_2[0x06];
+
+ uint16_t ddma_cfg;
+#define DDMA_CONF_SENAB BIT_3 /* SXP to DMA Data enable */
+#define DDMA_CONF_RIRQ BIT_2 /* RISC interrupt enable */
+#define DDMA_CONF_BENAB BIT_1 /* Bus burst enable */
+#define DDMA_CONF_DIR BIT_0 /* DMA direction (0=fifo->host 1=host->fifo) */
+ uint16_t ddma_ctrl;
+ uint16_t ddma_status;
+ uint16_t ddma_fifo_status;
+ uint16_t ddma_xfer_count_low;
+ uint16_t ddma_xfer_count_high;
+ uint16_t ddma_addr_count_0;
+ uint16_t ddma_addr_count_1;
+ uint16_t ddma_addr_count_2;
+ uint16_t ddma_addr_count_3;
+
+ uint16_t unused_3[0x0e];
+
+ uint16_t mailbox0; /* Mailbox 0 */
+ uint16_t mailbox1; /* Mailbox 1 */
+ uint16_t mailbox2; /* Mailbox 2 */
+ uint16_t mailbox3; /* Mailbox 3 */
+ uint16_t mailbox4; /* Mailbox 4 */
+ uint16_t mailbox5; /* Mailbox 5 */
+ uint16_t mailbox6; /* Mailbox 6 */
+ uint16_t mailbox7; /* Mailbox 7 */
+
+ uint16_t unused_4[0x20];/* 0x80-0xbf Gap */
+
+ uint16_t host_cmd; /* Host command and control */
+#define HOST_INT BIT_7 /* host interrupt bit */
+#define BIOS_ENABLE BIT_0
+
+ uint16_t unused_5[0x5]; /* 0xc2-0xcb Gap */
+
+ uint16_t gpio_data;
+ uint16_t gpio_enable;
+
+ uint16_t unused_6[0x11]; /* d0-f0 */
+ uint16_t scsiControlPins; /* f2 */
+};
+
+#define MAILBOX_REGISTER_COUNT 8
+
+/*
+ * ISP product identification definitions in mailboxes after reset.
+ */
+#define PROD_ID_1 0x4953
+#define PROD_ID_2 0x0000
+#define PROD_ID_2a 0x5020
+#define PROD_ID_3 0x2020
+#define PROD_ID_4 0x1
+
+/*
+ * ISP host command and control register command definitions
+ */
+#define HC_RESET_RISC 0x1000 /* Reset RISC */
+#define HC_PAUSE_RISC 0x2000 /* Pause RISC */
+#define HC_RELEASE_RISC 0x3000 /* Release RISC from reset. */
+#define HC_SET_HOST_INT 0x5000 /* Set host interrupt */
+#define HC_CLR_HOST_INT 0x6000 /* Clear HOST interrupt */
+#define HC_CLR_RISC_INT 0x7000 /* Clear RISC interrupt */
+#define HC_DISABLE_BIOS 0x9000 /* Disable BIOS. */
+
+/*
+ * ISP mailbox Self-Test status codes
+ */
+#define MBS_FRM_ALIVE 0 /* Firmware Alive. */
+#define MBS_CHKSUM_ERR 1 /* Checksum Error. */
+#define MBS_SHADOW_LD_ERR 2 /* Shadow Load Error. */
+#define MBS_BUSY 4 /* Busy. */
+
+/*
+ * ISP mailbox command complete status codes
+ */
+#define MBS_CMD_CMP 0x4000 /* Command Complete. */
+#define MBS_INV_CMD 0x4001 /* Invalid Command. */
+#define MBS_HOST_INF_ERR 0x4002 /* Host Interface Error. */
+#define MBS_TEST_FAILED 0x4003 /* Test Failed. */
+#define MBS_CMD_ERR 0x4005 /* Command Error. */
+#define MBS_CMD_PARAM_ERR 0x4006 /* Command Parameter Error. */
+
+/*
+ * ISP mailbox asynchronous event status codes
+ */
+#define MBA_ASYNC_EVENT 0x8000 /* Asynchronous event. */
+#define MBA_BUS_RESET 0x8001 /* SCSI Bus Reset. */
+#define MBA_SYSTEM_ERR 0x8002 /* System Error. */
+#define MBA_REQ_TRANSFER_ERR 0x8003 /* Request Transfer Error. */
+#define MBA_RSP_TRANSFER_ERR 0x8004 /* Response Transfer Error. */
+#define MBA_WAKEUP_THRES 0x8005 /* Request Queue Wake-up. */
+#define MBA_TIMEOUT_RESET 0x8006 /* Execution Timeout Reset. */
+#define MBA_DEVICE_RESET 0x8007 /* Bus Device Reset. */
+#define MBA_BUS_MODE_CHANGE 0x800E /* SCSI bus mode transition. */
+#define MBA_SCSI_COMPLETION 0x8020 /* Completion response. */
+
+/*
+ * ISP mailbox commands
+ */
+#define MBC_NOP 0 /* No Operation */
+#define MBC_LOAD_RAM 1 /* Load RAM */
+#define MBC_EXECUTE_FIRMWARE 2 /* Execute firmware */
+#define MBC_DUMP_RAM 3 /* Dump RAM contents */
+#define MBC_WRITE_RAM_WORD 4 /* Write ram word */
+#define MBC_READ_RAM_WORD 5 /* Read ram word */
+#define MBC_MAILBOX_REGISTER_TEST 6 /* Wrap incoming mailboxes */
+#define MBC_VERIFY_CHECKSUM 7 /* Verify checksum */
+#define MBC_ABOUT_FIRMWARE 8 /* Get firmware revision */
+#define MBC_INIT_REQUEST_QUEUE 0x10 /* Initialize request queue */
+#define MBC_INIT_RESPONSE_QUEUE 0x11 /* Initialize response queue */
+#define MBC_EXECUTE_IOCB 0x12 /* Execute IOCB command */
+#define MBC_ABORT_COMMAND 0x15 /* Abort IOCB command */
+#define MBC_ABORT_DEVICE 0x16 /* Abort device (ID/LUN) */
+#define MBC_ABORT_TARGET 0x17 /* Abort target (ID) */
+#define MBC_BUS_RESET 0x18 /* SCSI bus reset */
+#define MBC_GET_RETRY_COUNT 0x22 /* Get retry count and delay */
+#define MBC_GET_TARGET_PARAMETERS 0x28 /* Get target parameters */
+#define MBC_SET_INITIATOR_ID 0x30 /* Set initiator SCSI ID */
+#define MBC_SET_SELECTION_TIMEOUT 0x31 /* Set selection timeout */
+#define MBC_SET_RETRY_COUNT 0x32 /* Set retry count and delay */
+#define MBC_SET_TAG_AGE_LIMIT 0x33 /* Set tag age limit */
+#define MBC_SET_CLOCK_RATE 0x34 /* Set clock rate */
+#define MBC_SET_ACTIVE_NEGATION 0x35 /* Set active negation state */
+#define MBC_SET_ASYNC_DATA_SETUP 0x36 /* Set async data setup time */
+#define MBC_SET_PCI_CONTROL 0x37 /* Set BUS control parameters */
+#define MBC_SET_TARGET_PARAMETERS 0x38 /* Set target parameters */
+#define MBC_SET_DEVICE_QUEUE 0x39 /* Set device queue parameters */
+#define MBC_SET_RESET_DELAY_PARAMETERS 0x3A /* Set reset delay parameters */
+#define MBC_SET_SYSTEM_PARAMETER 0x45 /* Set system parameter word */
+#define MBC_SET_FIRMWARE_FEATURES 0x4A /* Set firmware feature word */
+#define MBC_INIT_REQUEST_QUEUE_A64 0x52 /* Initialize request queue A64 */
+#define MBC_INIT_RESPONSE_QUEUE_A64 0x53 /* Initialize response q A64 */
+#define MBC_ENABLE_TARGET_MODE 0x55 /* Enable target mode */
+#define MBC_SET_DATA_OVERRUN_RECOVERY 0x5A /* Set data overrun recovery mode */
+
+/*
+ * ISP Get/Set Target Parameters mailbox command control flags.
+ */
+#define TP_PPR BIT_5 /* PPR */
+#define TP_RENEGOTIATE BIT_8 /* Renegotiate on error. */
+#define TP_STOP_QUEUE BIT_9 /* Stop que on check condition */
+#define TP_AUTO_REQUEST_SENSE BIT_10 /* Automatic request sense. */
+#define TP_TAGGED_QUEUE BIT_11 /* Tagged queuing. */
+#define TP_SYNC BIT_12 /* Synchronous data transfers. */
+#define TP_WIDE BIT_13 /* Wide data transfers. */
+#define TP_PARITY BIT_14 /* Parity checking. */
+#define TP_DISCONNECT BIT_15 /* Disconnect privilege. */
+
+/*
+ * NVRAM Command values.
+ */
+#define NV_START_BIT BIT_2
+#define NV_WRITE_OP (BIT_26 | BIT_24)
+#define NV_READ_OP (BIT_26 | BIT_25)
+#define NV_ERASE_OP (BIT_26 | BIT_25 | BIT_24)
+#define NV_MASK_OP (BIT_26 | BIT_25 | BIT_24)
+#define NV_DELAY_COUNT 10
+
+/*
+ * QLogic ISP1280/ISP12160 NVRAM structure definition.
+ */
+struct nvram {
+ uint8_t id0; /* 0 */
+ uint8_t id1; /* 1 */
+ uint8_t id2; /* 2 */
+ uint8_t id3; /* 3 */
+ uint8_t version; /* 4 */
+
+ struct {
+ uint8_t bios_configuration_mode:2;
+ uint8_t bios_disable:1;
+ uint8_t selectable_scsi_boot_enable:1;
+ uint8_t cd_rom_boot_enable:1;
+ uint8_t disable_loading_risc_code:1;
+ uint8_t enable_64bit_addressing:1;
+ uint8_t unused_7:1;
+ } cntr_flags_1; /* 5 */
+
+ struct {
+ uint8_t boot_lun_number:5;
+ uint8_t scsi_bus_number:1;
+ uint8_t unused_6:1;
+ uint8_t unused_7:1;
+ } cntr_flags_2l; /* 7 */
+
+ struct {
+ uint8_t boot_target_number:4;
+ uint8_t unused_12:1;
+ uint8_t unused_13:1;
+ uint8_t unused_14:1;
+ uint8_t unused_15:1;
+ } cntr_flags_2h; /* 8 */
+
+ uint16_t unused_8; /* 8, 9 */
+ uint16_t unused_10; /* 10, 11 */
+ uint16_t unused_12; /* 12, 13 */
+ uint16_t unused_14; /* 14, 15 */
+
+ struct {
+ uint8_t reserved:2;
+ uint8_t burst_enable:1;
+ uint8_t reserved_1:1;
+ uint8_t fifo_threshold:4;
+ } isp_config; /* 16 */
+
+ /* Termination
+ * 0 = Disable, 1 = high only, 3 = Auto term
+ */
+ struct {
+ uint8_t scsi_bus_1_control:2;
+ uint8_t scsi_bus_0_control:2;
+ uint8_t unused_0:1;
+ uint8_t unused_1:1;
+ uint8_t unused_2:1;
+ uint8_t auto_term_support:1;
+ } termination; /* 17 */
+
+ uint16_t isp_parameter; /* 18, 19 */
+
+ union {
+ uint16_t w;
+ struct {
+ uint16_t enable_fast_posting:1;
+ uint16_t report_lvd_bus_transition:1;
+ uint16_t unused_2:1;
+ uint16_t unused_3:1;
+ uint16_t disable_iosbs_with_bus_reset_status:1;
+ uint16_t disable_synchronous_backoff:1;
+ uint16_t unused_6:1;
+ uint16_t synchronous_backoff_reporting:1;
+ uint16_t disable_reselection_fairness:1;
+ uint16_t unused_9:1;
+ uint16_t unused_10:1;
+ uint16_t unused_11:1;
+ uint16_t unused_12:1;
+ uint16_t unused_13:1;
+ uint16_t unused_14:1;
+ uint16_t unused_15:1;
+ } f;
+ } firmware_feature; /* 20, 21 */
+
+ uint16_t unused_22; /* 22, 23 */
+
+ struct {
+ struct {
+ uint8_t initiator_id:4;
+ uint8_t scsi_reset_disable:1;
+ uint8_t scsi_bus_size:1;
+ uint8_t scsi_bus_type:1;
+ uint8_t unused_7:1;
+ } config_1; /* 24 */
+
+ uint8_t bus_reset_delay; /* 25 */
+ uint8_t retry_count; /* 26 */
+ uint8_t retry_delay; /* 27 */
+
+ struct {
+ uint8_t async_data_setup_time:4;
+ uint8_t req_ack_active_negation:1;
+ uint8_t data_line_active_negation:1;
+ uint8_t unused_6:1;
+ uint8_t unused_7:1;
+ } config_2; /* 28 */
+
+ uint8_t unused_29; /* 29 */
+
+ uint16_t selection_timeout; /* 30, 31 */
+ uint16_t max_queue_depth; /* 32, 33 */
+
+ uint16_t unused_34; /* 34, 35 */
+ uint16_t unused_36; /* 36, 37 */
+ uint16_t unused_38; /* 38, 39 */
+
+ struct {
+ struct {
+ uint8_t renegotiate_on_error:1;
+ uint8_t stop_queue_on_check:1;
+ uint8_t auto_request_sense:1;
+ uint8_t tag_queuing:1;
+ uint8_t enable_sync:1;
+ uint8_t enable_wide:1;
+ uint8_t parity_checking:1;
+ uint8_t disconnect_allowed:1;
+ } parameter; /* 40 */
+
+ uint8_t execution_throttle; /* 41 */
+ uint8_t sync_period; /* 42 */
+
+ union { /* 43 */
+ uint8_t flags_43;
+ struct {
+ uint8_t sync_offset:4;
+ uint8_t device_enable:1;
+ uint8_t lun_disable:1;
+ uint8_t unused_6:1;
+ uint8_t unused_7:1;
+ } flags1x80;
+ struct {
+ uint8_t sync_offset:5;
+ uint8_t device_enable:1;
+ uint8_t unused_6:1;
+ uint8_t unused_7:1;
+ } flags1x160;
+ } flags;
+ union { /* PPR flags for the 1x160 controllers */
+ uint8_t unused_44;
+ struct {
+ uint8_t ppr_options:4;
+ uint8_t ppr_bus_width:2;
+ uint8_t unused_8:1;
+ uint8_t enable_ppr:1;
+ } flags; /* 44 */
+ } ppr_1x160;
+ uint8_t unused_45; /* 45 */
+ } target[MAX_TARGETS];
+ } bus[MAX_BUSES];
+
+ uint16_t unused_248; /* 248, 249 */
+
+ uint16_t subsystem_id[2]; /* 250, 251, 252, 253 */
+
+ union { /* 254 */
+ uint8_t unused_254;
+ uint8_t system_id_pointer;
+ } sysid_1x160;
+
+ uint8_t chksum; /* 255 */
+};
+
+/*
+ * ISP queue - command entry structure definition.
+ */
+#define MAX_CMDSZ 12 /* SCSI maximum CDB size. */
+struct cmd_entry {
+ uint8_t entry_type; /* Entry type. */
+#define COMMAND_TYPE 1 /* Command entry */
+ uint8_t entry_count; /* Entry count. */
+ uint8_t sys_define; /* System defined. */
+ uint8_t entry_status; /* Entry Status. */
+ __le32 handle; /* System handle. */
+ uint8_t lun; /* SCSI LUN */
+ uint8_t target; /* SCSI ID */
+ __le16 cdb_len; /* SCSI command length. */
+ __le16 control_flags; /* Control flags. */
+ __le16 reserved;
+ __le16 timeout; /* Command timeout. */
+ __le16 dseg_count; /* Data segment count. */
+ uint8_t scsi_cdb[MAX_CMDSZ]; /* SCSI command words. */
+ __le32 dseg_0_address; /* Data segment 0 address. */
+ __le32 dseg_0_length; /* Data segment 0 length. */
+ __le32 dseg_1_address; /* Data segment 1 address. */
+ __le32 dseg_1_length; /* Data segment 1 length. */
+ __le32 dseg_2_address; /* Data segment 2 address. */
+ __le32 dseg_2_length; /* Data segment 2 length. */
+ __le32 dseg_3_address; /* Data segment 3 address. */
+ __le32 dseg_3_length; /* Data segment 3 length. */
+};
+
+/*
+ * ISP queue - continuation entry structure definition.
+ */
+struct cont_entry {
+ uint8_t entry_type; /* Entry type. */
+#define CONTINUE_TYPE 2 /* Continuation entry. */
+ uint8_t entry_count; /* Entry count. */
+ uint8_t sys_define; /* System defined. */
+ uint8_t entry_status; /* Entry Status. */
+ __le32 reserved; /* Reserved */
+ __le32 dseg_0_address; /* Data segment 0 address. */
+ __le32 dseg_0_length; /* Data segment 0 length. */
+ __le32 dseg_1_address; /* Data segment 1 address. */
+ __le32 dseg_1_length; /* Data segment 1 length. */
+ __le32 dseg_2_address; /* Data segment 2 address. */
+ __le32 dseg_2_length; /* Data segment 2 length. */
+ __le32 dseg_3_address; /* Data segment 3 address. */
+ __le32 dseg_3_length; /* Data segment 3 length. */
+ __le32 dseg_4_address; /* Data segment 4 address. */
+ __le32 dseg_4_length; /* Data segment 4 length. */
+ __le32 dseg_5_address; /* Data segment 5 address. */
+ __le32 dseg_5_length; /* Data segment 5 length. */
+ __le32 dseg_6_address; /* Data segment 6 address. */
+ __le32 dseg_6_length; /* Data segment 6 length. */
+};
+
+/*
+ * ISP queue - status entry structure definition.
+ */
+struct response {
+ uint8_t entry_type; /* Entry type. */
+#define STATUS_TYPE 3 /* Status entry. */
+ uint8_t entry_count; /* Entry count. */
+ uint8_t sys_define; /* System defined. */
+ uint8_t entry_status; /* Entry Status. */
+#define RF_CONT BIT_0 /* Continuation. */
+#define RF_FULL BIT_1 /* Full */
+#define RF_BAD_HEADER BIT_2 /* Bad header. */
+#define RF_BAD_PAYLOAD BIT_3 /* Bad payload. */
+ __le32 handle; /* System handle. */
+ __le16 scsi_status; /* SCSI status. */
+ __le16 comp_status; /* Completion status. */
+ __le16 state_flags; /* State flags. */
+#define SF_TRANSFER_CMPL BIT_14 /* Transfer Complete. */
+#define SF_GOT_SENSE BIT_13 /* Got Sense */
+#define SF_GOT_STATUS BIT_12 /* Got Status */
+#define SF_TRANSFERRED_DATA BIT_11 /* Transferred data */
+#define SF_SENT_CDB BIT_10 /* Send CDB */
+#define SF_GOT_TARGET BIT_9 /* */
+#define SF_GOT_BUS BIT_8 /* */
+ __le16 status_flags; /* Status flags. */
+ __le16 time; /* Time. */
+ __le16 req_sense_length;/* Request sense data length. */
+ __le32 residual_length; /* Residual transfer length. */
+ __le16 reserved[4];
+ uint8_t req_sense_data[32]; /* Request sense data. */
+};
+
+/*
+ * ISP queue - marker entry structure definition.
+ */
+struct mrk_entry {
+ uint8_t entry_type; /* Entry type. */
+#define MARKER_TYPE 4 /* Marker entry. */
+ uint8_t entry_count; /* Entry count. */
+ uint8_t sys_define; /* System defined. */
+ uint8_t entry_status; /* Entry Status. */
+ __le32 reserved;
+ uint8_t lun; /* SCSI LUN */
+ uint8_t target; /* SCSI ID */
+ uint8_t modifier; /* Modifier (7-0). */
+#define MK_SYNC_ID_LUN 0 /* Synchronize ID/LUN */
+#define MK_SYNC_ID 1 /* Synchronize ID */
+#define MK_SYNC_ALL 2 /* Synchronize all ID/LUN */
+ uint8_t reserved_1[53];
+};
+
+/*
+ * ISP queue - extended command entry structure definition.
+ *
+ * Unused by the driver!
+ */
+struct ecmd_entry {
+ uint8_t entry_type; /* Entry type. */
+#define EXTENDED_CMD_TYPE 5 /* Extended command entry. */
+ uint8_t entry_count; /* Entry count. */
+ uint8_t sys_define; /* System defined. */
+ uint8_t entry_status; /* Entry Status. */
+ uint32_t handle; /* System handle. */
+ uint8_t lun; /* SCSI LUN */
+ uint8_t target; /* SCSI ID */
+ __le16 cdb_len; /* SCSI command length. */
+ __le16 control_flags; /* Control flags. */
+ __le16 reserved;
+ __le16 timeout; /* Command timeout. */
+ __le16 dseg_count; /* Data segment count. */
+ uint8_t scsi_cdb[88]; /* SCSI command words. */
+};
+
+/*
+ * ISP queue - 64-Bit addressing, command entry structure definition.
+ */
+typedef struct {
+ uint8_t entry_type; /* Entry type. */
+#define COMMAND_A64_TYPE 9 /* Command A64 entry */
+ uint8_t entry_count; /* Entry count. */
+ uint8_t sys_define; /* System defined. */
+ uint8_t entry_status; /* Entry Status. */
+ __le32 handle; /* System handle. */
+ uint8_t lun; /* SCSI LUN */
+ uint8_t target; /* SCSI ID */
+ __le16 cdb_len; /* SCSI command length. */
+ __le16 control_flags; /* Control flags. */
+ __le16 reserved;
+ __le16 timeout; /* Command timeout. */
+ __le16 dseg_count; /* Data segment count. */
+ uint8_t scsi_cdb[MAX_CMDSZ]; /* SCSI command words. */
+ __le32 reserved_1[2]; /* unused */
+ __le32 dseg_0_address[2]; /* Data segment 0 address. */
+ __le32 dseg_0_length; /* Data segment 0 length. */
+ __le32 dseg_1_address[2]; /* Data segment 1 address. */
+ __le32 dseg_1_length; /* Data segment 1 length. */
+} cmd_a64_entry_t, request_t;
+
+/*
+ * ISP queue - 64-Bit addressing, continuation entry structure definition.
+ */
+struct cont_a64_entry {
+ uint8_t entry_type; /* Entry type. */
+#define CONTINUE_A64_TYPE 0xA /* Continuation A64 entry. */
+ uint8_t entry_count; /* Entry count. */
+ uint8_t sys_define; /* System defined. */
+ uint8_t entry_status; /* Entry Status. */
+ __le32 dseg_0_address[2]; /* Data segment 0 address. */
+ __le32 dseg_0_length; /* Data segment 0 length. */
+ __le32 dseg_1_address[2]; /* Data segment 1 address. */
+ __le32 dseg_1_length; /* Data segment 1 length. */
+ __le32 dseg_2_address[2]; /* Data segment 2 address. */
+ __le32 dseg_2_length; /* Data segment 2 length. */
+ __le32 dseg_3_address[2]; /* Data segment 3 address. */
+ __le32 dseg_3_length; /* Data segment 3 length. */
+ __le32 dseg_4_address[2]; /* Data segment 4 address. */
+ __le32 dseg_4_length; /* Data segment 4 length. */
+};
+
+/*
+ * ISP queue - enable LUN entry structure definition.
+ */
+struct elun_entry {
+ uint8_t entry_type; /* Entry type. */
+#define ENABLE_LUN_TYPE 0xB /* Enable LUN entry. */
+ uint8_t entry_count; /* Entry count. */
+ uint8_t reserved_1;
+ uint8_t entry_status; /* Entry Status not used. */
+ __le32 reserved_2;
+ __le16 lun; /* Bit 15 is bus number. */
+ __le16 reserved_4;
+ __le32 option_flags;
+ uint8_t status;
+ uint8_t reserved_5;
+ uint8_t command_count; /* Number of ATIOs allocated. */
+ uint8_t immed_notify_count; /* Number of Immediate Notify */
+ /* entries allocated. */
+ uint8_t group_6_length; /* SCSI CDB length for group 6 */
+ /* commands (2-26). */
+ uint8_t group_7_length; /* SCSI CDB length for group 7 */
+ /* commands (2-26). */
+ __le16 timeout; /* 0 = 30 seconds, 0xFFFF = disable */
+ __le16 reserved_6[20];
+};
+
+/*
+ * ISP queue - modify LUN entry structure definition.
+ *
+ * Unused by the driver!
+ */
+struct modify_lun_entry {
+ uint8_t entry_type; /* Entry type. */
+#define MODIFY_LUN_TYPE 0xC /* Modify LUN entry. */
+ uint8_t entry_count; /* Entry count. */
+ uint8_t reserved_1;
+ uint8_t entry_status; /* Entry Status. */
+ __le32 reserved_2;
+ uint8_t lun; /* SCSI LUN */
+ uint8_t reserved_3;
+ uint8_t operators;
+ uint8_t reserved_4;
+ __le32 option_flags;
+ uint8_t status;
+ uint8_t reserved_5;
+ uint8_t command_count; /* Number of ATIOs allocated. */
+ uint8_t immed_notify_count; /* Number of Immediate Notify */
+ /* entries allocated. */
+ __le16 reserved_6;
+ __le16 timeout; /* 0 = 30 seconds, 0xFFFF = disable */
+ __le16 reserved_7[20];
+};
+
+/*
+ * ISP queue - immediate notify entry structure definition.
+ */
+struct notify_entry {
+ uint8_t entry_type; /* Entry type. */
+#define IMMED_NOTIFY_TYPE 0xD /* Immediate notify entry. */
+ uint8_t entry_count; /* Entry count. */
+ uint8_t reserved_1;
+ uint8_t entry_status; /* Entry Status. */
+ __le32 reserved_2;
+ uint8_t lun;
+ uint8_t initiator_id;
+ uint8_t reserved_3;
+ uint8_t target_id;
+ __le32 option_flags;
+ uint8_t status;
+ uint8_t reserved_4;
+ uint8_t tag_value; /* Received queue tag message value */
+ uint8_t tag_type; /* Received queue tag message type */
+ /* entries allocated. */
+ __le16 seq_id;
+ uint8_t scsi_msg[8]; /* SCSI message not handled by ISP */
+ __le16 reserved_5[8];
+ uint8_t sense_data[18];
+};
+
+/*
+ * ISP queue - notify acknowledge entry structure definition.
+ */
+struct nack_entry {
+ uint8_t entry_type; /* Entry type. */
+#define NOTIFY_ACK_TYPE 0xE /* Notify acknowledge entry. */
+ uint8_t entry_count; /* Entry count. */
+ uint8_t reserved_1;
+ uint8_t entry_status; /* Entry Status. */
+ __le32 reserved_2;
+ uint8_t lun;
+ uint8_t initiator_id;
+ uint8_t reserved_3;
+ uint8_t target_id;
+ __le32 option_flags;
+ uint8_t status;
+ uint8_t event;
+ __le16 seq_id;
+ __le16 reserved_4[22];
+};
+
+/*
+ * ISP queue - Accept Target I/O (ATIO) entry structure definition.
+ */
+struct atio_entry {
+ uint8_t entry_type; /* Entry type. */
+#define ACCEPT_TGT_IO_TYPE 6 /* Accept target I/O entry. */
+ uint8_t entry_count; /* Entry count. */
+ uint8_t reserved_1;
+ uint8_t entry_status; /* Entry Status. */
+ __le32 reserved_2;
+ uint8_t lun;
+ uint8_t initiator_id;
+ uint8_t cdb_len;
+ uint8_t target_id;
+ __le32 option_flags;
+ uint8_t status;
+ uint8_t scsi_status;
+ uint8_t tag_value; /* Received queue tag message value */
+ uint8_t tag_type; /* Received queue tag message type */
+ uint8_t cdb[26];
+ uint8_t sense_data[18];
+};
+
+/*
+ * ISP queue - Continue Target I/O (CTIO) entry structure definition.
+ */
+struct ctio_entry {
+ uint8_t entry_type; /* Entry type. */
+#define CONTINUE_TGT_IO_TYPE 7 /* CTIO entry */
+ uint8_t entry_count; /* Entry count. */
+ uint8_t reserved_1;
+ uint8_t entry_status; /* Entry Status. */
+ __le32 reserved_2;
+ uint8_t lun; /* SCSI LUN */
+ uint8_t initiator_id;
+ uint8_t reserved_3;
+ uint8_t target_id;
+ __le32 option_flags;
+ uint8_t status;
+ uint8_t scsi_status;
+ uint8_t tag_value; /* Received queue tag message value */
+ uint8_t tag_type; /* Received queue tag message type */
+ __le32 transfer_length;
+ __le32 residual;
+ __le16 timeout; /* 0 = 30 seconds, 0xFFFF = disable */
+ __le16 dseg_count; /* Data segment count. */
+ __le32 dseg_0_address; /* Data segment 0 address. */
+ __le32 dseg_0_length; /* Data segment 0 length. */
+ __le32 dseg_1_address; /* Data segment 1 address. */
+ __le32 dseg_1_length; /* Data segment 1 length. */
+ __le32 dseg_2_address; /* Data segment 2 address. */
+ __le32 dseg_2_length; /* Data segment 2 length. */
+ __le32 dseg_3_address; /* Data segment 3 address. */
+ __le32 dseg_3_length; /* Data segment 3 length. */
+};
+
+/*
+ * ISP queue - CTIO returned entry structure definition.
+ */
+struct ctio_ret_entry {
+ uint8_t entry_type; /* Entry type. */
+#define CTIO_RET_TYPE 7 /* CTIO return entry */
+ uint8_t entry_count; /* Entry count. */
+ uint8_t reserved_1;
+ uint8_t entry_status; /* Entry Status. */
+ __le32 reserved_2;
+ uint8_t lun; /* SCSI LUN */
+ uint8_t initiator_id;
+ uint8_t reserved_3;
+ uint8_t target_id;
+ __le32 option_flags;
+ uint8_t status;
+ uint8_t scsi_status;
+ uint8_t tag_value; /* Received queue tag message value */
+ uint8_t tag_type; /* Received queue tag message type */
+ __le32 transfer_length;
+ __le32 residual;
+ __le16 timeout; /* 0 = 30 seconds, 0xFFFF = disable */
+ __le16 dseg_count; /* Data segment count. */
+ __le32 dseg_0_address; /* Data segment 0 address. */
+ __le32 dseg_0_length; /* Data segment 0 length. */
+ __le32 dseg_1_address; /* Data segment 1 address. */
+ __le16 dseg_1_length; /* Data segment 1 length. */
+ uint8_t sense_data[18];
+};
+
+/*
+ * ISP queue - CTIO A64 entry structure definition.
+ */
+struct ctio_a64_entry {
+ uint8_t entry_type; /* Entry type. */
+#define CTIO_A64_TYPE 0xF /* CTIO A64 entry */
+ uint8_t entry_count; /* Entry count. */
+ uint8_t reserved_1;
+ uint8_t entry_status; /* Entry Status. */
+ __le32 reserved_2;
+ uint8_t lun; /* SCSI LUN */
+ uint8_t initiator_id;
+ uint8_t reserved_3;
+ uint8_t target_id;
+ __le32 option_flags;
+ uint8_t status;
+ uint8_t scsi_status;
+ uint8_t tag_value; /* Received queue tag message value */
+ uint8_t tag_type; /* Received queue tag message type */
+ __le32 transfer_length;
+ __le32 residual;
+ __le16 timeout; /* 0 = 30 seconds, 0xFFFF = disable */
+ __le16 dseg_count; /* Data segment count. */
+ __le32 reserved_4[2];
+ __le32 dseg_0_address[2];/* Data segment 0 address. */
+ __le32 dseg_0_length; /* Data segment 0 length. */
+ __le32 dseg_1_address[2];/* Data segment 1 address. */
+ __le32 dseg_1_length; /* Data segment 1 length. */
+};
+
+/*
+ * ISP queue - CTIO returned entry structure definition.
+ */
+struct ctio_a64_ret_entry {
+ uint8_t entry_type; /* Entry type. */
+#define CTIO_A64_RET_TYPE 0xF /* CTIO A64 returned entry */
+ uint8_t entry_count; /* Entry count. */
+ uint8_t reserved_1;
+ uint8_t entry_status; /* Entry Status. */
+ __le32 reserved_2;
+ uint8_t lun; /* SCSI LUN */
+ uint8_t initiator_id;
+ uint8_t reserved_3;
+ uint8_t target_id;
+ __le32 option_flags;
+ uint8_t status;
+ uint8_t scsi_status;
+ uint8_t tag_value; /* Received queue tag message value */
+ uint8_t tag_type; /* Received queue tag message type */
+ __le32 transfer_length;
+ __le32 residual;
+ __le16 timeout; /* 0 = 30 seconds, 0xFFFF = disable */
+ __le16 dseg_count; /* Data segment count. */
+ __le16 reserved_4[7];
+ uint8_t sense_data[18];
+};
+
+/*
+ * ISP request and response queue entry sizes
+ */
+#define RESPONSE_ENTRY_SIZE (sizeof(struct response))
+#define REQUEST_ENTRY_SIZE (sizeof(request_t))
+
+/*
+ * ISP status entry - completion status definitions.
+ */
+#define CS_COMPLETE 0x0 /* No errors */
+#define CS_INCOMPLETE 0x1 /* Incomplete transfer of cmd. */
+#define CS_DMA 0x2 /* A DMA direction error. */
+#define CS_TRANSPORT 0x3 /* Transport error. */
+#define CS_RESET 0x4 /* SCSI bus reset occurred */
+#define CS_ABORTED 0x5 /* System aborted command. */
+#define CS_TIMEOUT 0x6 /* Timeout error. */
+#define CS_DATA_OVERRUN 0x7 /* Data overrun. */
+#define CS_COMMAND_OVERRUN 0x8 /* Command Overrun. */
+#define CS_STATUS_OVERRUN 0x9 /* Status Overrun. */
+#define CS_BAD_MSG 0xA /* Bad msg after status phase. */
+#define CS_NO_MSG_OUT 0xB /* No msg out after selection. */
+#define CS_EXTENDED_ID 0xC /* Extended ID failed. */
+#define CS_IDE_MSG 0xD /* Target rejected IDE msg. */
+#define CS_ABORT_MSG 0xE /* Target rejected abort msg. */
+#define CS_REJECT_MSG 0xF /* Target rejected reject msg. */
+#define CS_NOP_MSG 0x10 /* Target rejected NOP msg. */
+#define CS_PARITY_MSG 0x11 /* Target rejected parity msg. */
+#define CS_DEV_RESET_MSG 0x12 /* Target rejected dev rst msg. */
+#define CS_ID_MSG 0x13 /* Target rejected ID msg. */
+#define CS_FREE 0x14 /* Unexpected bus free. */
+#define CS_DATA_UNDERRUN 0x15 /* Data Underrun. */
+#define CS_TRANACTION_1 0x18 /* Transaction error 1 */
+#define CS_TRANACTION_2 0x19 /* Transaction error 2 */
+#define CS_TRANACTION_3 0x1a /* Transaction error 3 */
+#define CS_INV_ENTRY_TYPE 0x1b /* Invalid entry type */
+#define CS_DEV_QUEUE_FULL 0x1c /* Device queue full */
+#define CS_PHASED_SKIPPED 0x1d /* SCSI phase skipped */
+#define CS_ARS_FAILED 0x1e /* ARS failed */
+#define CS_LVD_BUS_ERROR 0x21 /* LVD bus error */
+#define CS_BAD_PAYLOAD 0x80 /* Driver defined */
+#define CS_UNKNOWN 0x81 /* Driver defined */
+#define CS_RETRY 0x82 /* Driver defined */
+
+/*
+ * ISP target entries - Option flags bit definitions.
+ */
+#define OF_ENABLE_TAG BIT_1 /* Tagged queue action enable */
+#define OF_DATA_IN BIT_6 /* Data in to initiator */
+ /* (data from target to initiator) */
+#define OF_DATA_OUT BIT_7 /* Data out from initiator */
+ /* (data from initiator to target) */
+#define OF_NO_DATA (BIT_7 | BIT_6)
+#define OF_DISC_DISABLED BIT_15 /* Disconnects disabled */
+#define OF_DISABLE_SDP BIT_24 /* Disable sending save data ptr */
+#define OF_SEND_RDP BIT_26 /* Send restore data pointers msg */
+#define OF_FORCE_DISC BIT_30 /* Disconnects mandatory */
+#define OF_SSTS BIT_31 /* Send SCSI status */
+
+
+/*
+ * BUS parameters/settings structure - UNUSED
+ */
+struct bus_param {
+ uint8_t id; /* Host adapter SCSI id */
+ uint8_t bus_reset_delay; /* SCSI bus reset delay. */
+ uint8_t failed_reset_count; /* number of time reset failed */
+ uint8_t unused;
+ uint16_t device_enables; /* Device enable bits. */
+ uint16_t lun_disables; /* LUN disable bits. */
+ uint16_t qtag_enables; /* Tag queue enables. */
+ uint16_t hiwat; /* High water mark per device. */
+ uint8_t reset_marker:1;
+ uint8_t disable_scsi_reset:1;
+ uint8_t scsi_bus_dead:1; /* SCSI Bus is Dead, when 5 back to back resets failed */
+};
+
+
+struct qla_driver_setup {
+ uint32_t no_sync:1;
+ uint32_t no_wide:1;
+ uint32_t no_ppr:1;
+ uint32_t no_nvram:1;
+ uint16_t sync_mask;
+ uint16_t wide_mask;
+ uint16_t ppr_mask;
+};
+
+
+/*
+ * Linux Host Adapter structure
+ */
+struct scsi_qla_host {
+ /* Linux adapter configuration data */
+ struct Scsi_Host *host; /* pointer to host data */
+ struct scsi_qla_host *next;
+ struct device_reg __iomem *iobase; /* Base Memory-mapped I/O address */
+
+ unsigned char __iomem *mmpbase; /* memory mapped address */
+ unsigned long host_no;
+ struct pci_dev *pdev;
+ uint8_t devnum;
+ uint8_t revision;
+ uint8_t ports;
+
+ unsigned long actthreads;
+ unsigned long isr_count; /* Interrupt count */
+ unsigned long spurious_int;
+
+ /* Outstandings ISP commands. */
+ struct srb *outstanding_cmds[MAX_OUTSTANDING_COMMANDS];
+
+ /* BUS configuration data */
+ struct bus_param bus_settings[MAX_BUSES];
+
+ /* Received ISP mailbox data. */
+ volatile uint16_t mailbox_out[MAILBOX_REGISTER_COUNT];
+
+ dma_addr_t request_dma; /* Physical Address */
+ request_t *request_ring; /* Base virtual address */
+ request_t *request_ring_ptr; /* Current address. */
+ uint16_t req_ring_index; /* Current index. */
+ uint16_t req_q_cnt; /* Number of available entries. */
+
+ dma_addr_t response_dma; /* Physical address. */
+ struct response *response_ring; /* Base virtual address */
+ struct response *response_ring_ptr; /* Current address. */
+ uint16_t rsp_ring_index; /* Current index. */
+
+ struct list_head done_q; /* Done queue */
+
+ struct completion *mailbox_wait;
+
+ volatile struct {
+ uint32_t online:1; /* 0 */
+ uint32_t reset_marker:1; /* 1 */
+ uint32_t disable_host_adapter:1; /* 2 */
+ uint32_t reset_active:1; /* 3 */
+ uint32_t abort_isp_active:1; /* 4 */
+ uint32_t disable_risc_code_load:1; /* 5 */
+#ifdef __ia64__
+ uint32_t use_pci_vchannel:1;
+#endif
+ } flags;
+
+ struct nvram nvram;
+ int nvram_valid;
+
+ /* Firmware Info */
+ unsigned short fwstart; /* start address for F/W */
+ unsigned char fwver1; /* F/W version first char */
+ unsigned char fwver2; /* F/W version second char */
+ unsigned char fwver3; /* F/W version third char */
+};
+
+#endif /* _QLA1280_H */
diff --git a/drivers/scsi/qla2xxx/Kconfig b/drivers/scsi/qla2xxx/Kconfig
new file mode 100644
index 000000000..1bfd82367
--- /dev/null
+++ b/drivers/scsi/qla2xxx/Kconfig
@@ -0,0 +1,17 @@
+config SCSI_QLA_FC
+ tristate "QLogic QLA2XXX Fibre Channel Support"
+ depends on PCI && SCSI
+ depends on SCSI_FC_ATTRS
+ select FW_LOADER
+ ---help---
+ This qla2xxx driver supports all QLogic Fibre Channel
+ PCI and PCIe host adapters.
+
+ /*(DEBLOBBED)*/
+ tristate "TCM_QLA2XXX fabric module for Qlogic 2xxx series target mode HBAs"
+ depends on SCSI_QLA_FC && TARGET_CORE
+ depends on LIBFC
+ select BTREE
+ default n
+ ---help---
+ Say Y here to enable the TCM_QLA2XXX fabric module for Qlogic 2xxx series target mode HBAs
diff --git a/drivers/scsi/qla2xxx/Makefile b/drivers/scsi/qla2xxx/Makefile
new file mode 100644
index 000000000..44def6bb4
--- /dev/null
+++ b/drivers/scsi/qla2xxx/Makefile
@@ -0,0 +1,6 @@
+qla2xxx-y := qla_os.o qla_init.o qla_mbx.o qla_iocb.o qla_isr.o qla_gs.o \
+ qla_dbg.o qla_sup.o qla_attr.o qla_mid.o qla_dfs.o qla_bsg.o \
+ qla_nx.o qla_mr.o qla_nx2.o qla_target.o qla_tmpl.o
+
+obj-$(CONFIG_SCSI_QLA_FC) += qla2xxx.o
+obj-$(CONFIG_TCM_QLA2XXX) += tcm_qla2xxx.o
diff --git a/drivers/scsi/qla2xxx/qla_attr.c b/drivers/scsi/qla2xxx/qla_attr.c
new file mode 100644
index 000000000..82b92c414
--- /dev/null
+++ b/drivers/scsi/qla2xxx/qla_attr.c
@@ -0,0 +1,2323 @@
+/*
+ * QLogic Fibre Channel HBA Driver
+ * Copyright (c) 2003-2014 QLogic Corporation
+ *
+ * See LICENSE.qla2xxx for copyright and licensing details.
+ */
+#include "qla_def.h"
+#include "qla_target.h"
+
+#include <linux/kthread.h>
+#include <linux/vmalloc.h>
+#include <linux/slab.h>
+#include <linux/delay.h>
+
+static int qla24xx_vport_disable(struct fc_vport *, bool);
+
+/* SYSFS attributes --------------------------------------------------------- */
+
+static ssize_t
+qla2x00_sysfs_read_fw_dump(struct file *filp, struct kobject *kobj,
+ struct bin_attribute *bin_attr,
+ char *buf, loff_t off, size_t count)
+{
+ struct scsi_qla_host *vha = shost_priv(dev_to_shost(container_of(kobj,
+ struct device, kobj)));
+ struct qla_hw_data *ha = vha->hw;
+ int rval = 0;
+
+ if (!(ha->fw_dump_reading || ha->mctp_dump_reading))
+ return 0;
+
+ if (IS_P3P_TYPE(ha)) {
+ if (off < ha->md_template_size) {
+ rval = memory_read_from_buffer(buf, count,
+ &off, ha->md_tmplt_hdr, ha->md_template_size);
+ return rval;
+ }
+ off -= ha->md_template_size;
+ rval = memory_read_from_buffer(buf, count,
+ &off, ha->md_dump, ha->md_dump_size);
+ return rval;
+ } else if (ha->mctp_dumped && ha->mctp_dump_reading)
+ return memory_read_from_buffer(buf, count, &off, ha->mctp_dump,
+ MCTP_DUMP_SIZE);
+ else if (ha->fw_dump_reading)
+ return memory_read_from_buffer(buf, count, &off, ha->fw_dump,
+ ha->fw_dump_len);
+ else
+ return 0;
+}
+
+static ssize_t
+qla2x00_sysfs_write_fw_dump(struct file *filp, struct kobject *kobj,
+ struct bin_attribute *bin_attr,
+ char *buf, loff_t off, size_t count)
+{
+ struct scsi_qla_host *vha = shost_priv(dev_to_shost(container_of(kobj,
+ struct device, kobj)));
+ struct qla_hw_data *ha = vha->hw;
+ int reading;
+
+ if (off != 0)
+ return (0);
+
+ reading = simple_strtol(buf, NULL, 10);
+ switch (reading) {
+ case 0:
+ if (!ha->fw_dump_reading)
+ break;
+
+ ql_log(ql_log_info, vha, 0x705d,
+ "Firmware dump cleared on (%ld).\n", vha->host_no);
+
+ if (IS_P3P_TYPE(ha)) {
+ qla82xx_md_free(vha);
+ qla82xx_md_prep(vha);
+ }
+ ha->fw_dump_reading = 0;
+ ha->fw_dumped = 0;
+ break;
+ case 1:
+ if (ha->fw_dumped && !ha->fw_dump_reading) {
+ ha->fw_dump_reading = 1;
+
+ ql_log(ql_log_info, vha, 0x705e,
+ "Raw firmware dump ready for read on (%ld).\n",
+ vha->host_no);
+ }
+ break;
+ case 2:
+ qla2x00_alloc_fw_dump(vha);
+ break;
+ case 3:
+ if (IS_QLA82XX(ha)) {
+ qla82xx_idc_lock(ha);
+ qla82xx_set_reset_owner(vha);
+ qla82xx_idc_unlock(ha);
+ } else if (IS_QLA8044(ha)) {
+ qla8044_idc_lock(ha);
+ qla82xx_set_reset_owner(vha);
+ qla8044_idc_unlock(ha);
+ } else
+ qla2x00_system_error(vha);
+ break;
+ case 4:
+ if (IS_P3P_TYPE(ha)) {
+ if (ha->md_tmplt_hdr)
+ ql_dbg(ql_dbg_user, vha, 0x705b,
+ "MiniDump supported with this firmware.\n");
+ else
+ ql_dbg(ql_dbg_user, vha, 0x709d,
+ "MiniDump not supported with this firmware.\n");
+ }
+ break;
+ case 5:
+ if (IS_P3P_TYPE(ha))
+ set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
+ break;
+ case 6:
+ if (!ha->mctp_dump_reading)
+ break;
+ ql_log(ql_log_info, vha, 0x70c1,
+ "MCTP dump cleared on (%ld).\n", vha->host_no);
+ ha->mctp_dump_reading = 0;
+ ha->mctp_dumped = 0;
+ break;
+ case 7:
+ if (ha->mctp_dumped && !ha->mctp_dump_reading) {
+ ha->mctp_dump_reading = 1;
+ ql_log(ql_log_info, vha, 0x70c2,
+ "Raw mctp dump ready for read on (%ld).\n",
+ vha->host_no);
+ }
+ break;
+ }
+ return count;
+}
+
+static struct bin_attribute sysfs_fw_dump_attr = {
+ .attr = {
+ .name = "fw_dump",
+ .mode = S_IRUSR | S_IWUSR,
+ },
+ .size = 0,
+ .read = qla2x00_sysfs_read_fw_dump,
+ .write = qla2x00_sysfs_write_fw_dump,
+};
+
+static ssize_t
+qla2x00_sysfs_read_fw_dump_template(struct file *filp, struct kobject *kobj,
+ struct bin_attribute *bin_attr,
+ char *buf, loff_t off, size_t count)
+{
+ struct scsi_qla_host *vha = shost_priv(dev_to_shost(container_of(kobj,
+ struct device, kobj)));
+ struct qla_hw_data *ha = vha->hw;
+
+ if (!ha->fw_dump_template || !ha->fw_dump_template_len)
+ return 0;
+
+ ql_dbg(ql_dbg_user, vha, 0x70e2,
+ "chunk <- off=%llx count=%zx\n", off, count);
+ return memory_read_from_buffer(buf, count, &off,
+ ha->fw_dump_template, ha->fw_dump_template_len);
+}
+
+static ssize_t
+qla2x00_sysfs_write_fw_dump_template(struct file *filp, struct kobject *kobj,
+ struct bin_attribute *bin_attr,
+ char *buf, loff_t off, size_t count)
+{
+ struct scsi_qla_host *vha = shost_priv(dev_to_shost(container_of(kobj,
+ struct device, kobj)));
+ struct qla_hw_data *ha = vha->hw;
+ uint32_t size;
+
+ if (off == 0) {
+ if (ha->fw_dump)
+ vfree(ha->fw_dump);
+ if (ha->fw_dump_template)
+ vfree(ha->fw_dump_template);
+
+ ha->fw_dump = NULL;
+ ha->fw_dump_len = 0;
+ ha->fw_dump_template = NULL;
+ ha->fw_dump_template_len = 0;
+
+ size = qla27xx_fwdt_template_size(buf);
+ ql_dbg(ql_dbg_user, vha, 0x70d1,
+ "-> allocating fwdt (%x bytes)...\n", size);
+ ha->fw_dump_template = vmalloc(size);
+ if (!ha->fw_dump_template) {
+ ql_log(ql_log_warn, vha, 0x70d2,
+ "Failed allocate fwdt (%x bytes).\n", size);
+ return -ENOMEM;
+ }
+ ha->fw_dump_template_len = size;
+ }
+
+ if (off + count > ha->fw_dump_template_len) {
+ count = ha->fw_dump_template_len - off;
+ ql_dbg(ql_dbg_user, vha, 0x70d3,
+ "chunk -> truncating to %zx bytes.\n", count);
+ }
+
+ ql_dbg(ql_dbg_user, vha, 0x70d4,
+ "chunk -> off=%llx count=%zx\n", off, count);
+ memcpy(ha->fw_dump_template + off, buf, count);
+
+ if (off + count == ha->fw_dump_template_len) {
+ size = qla27xx_fwdt_calculate_dump_size(vha);
+ ql_dbg(ql_dbg_user, vha, 0x70d5,
+ "-> allocating fwdump (%x bytes)...\n", size);
+ ha->fw_dump = vmalloc(size);
+ if (!ha->fw_dump) {
+ ql_log(ql_log_warn, vha, 0x70d6,
+ "Failed allocate fwdump (%x bytes).\n", size);
+ return -ENOMEM;
+ }
+ ha->fw_dump_len = size;
+ }
+
+ return count;
+}
+static struct bin_attribute sysfs_fw_dump_template_attr = {
+ .attr = {
+ .name = "fw_dump_template",
+ .mode = S_IRUSR | S_IWUSR,
+ },
+ .size = 0,
+ .read = qla2x00_sysfs_read_fw_dump_template,
+ .write = qla2x00_sysfs_write_fw_dump_template,
+};
+
+static ssize_t
+qla2x00_sysfs_read_nvram(struct file *filp, struct kobject *kobj,
+ struct bin_attribute *bin_attr,
+ char *buf, loff_t off, size_t count)
+{
+ struct scsi_qla_host *vha = shost_priv(dev_to_shost(container_of(kobj,
+ struct device, kobj)));
+ struct qla_hw_data *ha = vha->hw;
+
+ if (!capable(CAP_SYS_ADMIN))
+ return 0;
+
+ if (IS_NOCACHE_VPD_TYPE(ha))
+ ha->isp_ops->read_optrom(vha, ha->nvram, ha->flt_region_nvram << 2,
+ ha->nvram_size);
+ return memory_read_from_buffer(buf, count, &off, ha->nvram,
+ ha->nvram_size);
+}
+
+static ssize_t
+qla2x00_sysfs_write_nvram(struct file *filp, struct kobject *kobj,
+ struct bin_attribute *bin_attr,
+ char *buf, loff_t off, size_t count)
+{
+ struct scsi_qla_host *vha = shost_priv(dev_to_shost(container_of(kobj,
+ struct device, kobj)));
+ struct qla_hw_data *ha = vha->hw;
+ uint16_t cnt;
+
+ if (!capable(CAP_SYS_ADMIN) || off != 0 || count != ha->nvram_size ||
+ !ha->isp_ops->write_nvram)
+ return -EINVAL;
+
+ /* Checksum NVRAM. */
+ if (IS_FWI2_CAPABLE(ha)) {
+ uint32_t *iter;
+ uint32_t chksum;
+
+ iter = (uint32_t *)buf;
+ chksum = 0;
+ for (cnt = 0; cnt < ((count >> 2) - 1); cnt++)
+ chksum += le32_to_cpu(*iter++);
+ chksum = ~chksum + 1;
+ *iter = cpu_to_le32(chksum);
+ } else {
+ uint8_t *iter;
+ uint8_t chksum;
+
+ iter = (uint8_t *)buf;
+ chksum = 0;
+ for (cnt = 0; cnt < count - 1; cnt++)
+ chksum += *iter++;
+ chksum = ~chksum + 1;
+ *iter = chksum;
+ }
+
+ if (qla2x00_wait_for_hba_online(vha) != QLA_SUCCESS) {
+ ql_log(ql_log_warn, vha, 0x705f,
+ "HBA not online, failing NVRAM update.\n");
+ return -EAGAIN;
+ }
+
+ /* Write NVRAM. */
+ ha->isp_ops->write_nvram(vha, (uint8_t *)buf, ha->nvram_base, count);
+ ha->isp_ops->read_nvram(vha, (uint8_t *)ha->nvram, ha->nvram_base,
+ count);
+
+ ql_dbg(ql_dbg_user, vha, 0x7060,
+ "Setting ISP_ABORT_NEEDED\n");
+ /* NVRAM settings take effect immediately. */
+ set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
+ qla2xxx_wake_dpc(vha);
+ qla2x00_wait_for_chip_reset(vha);
+
+ return count;
+}
+
+static struct bin_attribute sysfs_nvram_attr = {
+ .attr = {
+ .name = "nvram",
+ .mode = S_IRUSR | S_IWUSR,
+ },
+ .size = 512,
+ .read = qla2x00_sysfs_read_nvram,
+ .write = qla2x00_sysfs_write_nvram,
+};
+
+static ssize_t
+qla2x00_sysfs_read_optrom(struct file *filp, struct kobject *kobj,
+ struct bin_attribute *bin_attr,
+ char *buf, loff_t off, size_t count)
+{
+ struct scsi_qla_host *vha = shost_priv(dev_to_shost(container_of(kobj,
+ struct device, kobj)));
+ struct qla_hw_data *ha = vha->hw;
+ ssize_t rval = 0;
+
+ if (ha->optrom_state != QLA_SREADING)
+ return 0;
+
+ mutex_lock(&ha->optrom_mutex);
+ rval = memory_read_from_buffer(buf, count, &off, ha->optrom_buffer,
+ ha->optrom_region_size);
+ mutex_unlock(&ha->optrom_mutex);
+
+ return rval;
+}
+
+static ssize_t
+qla2x00_sysfs_write_optrom(struct file *filp, struct kobject *kobj,
+ struct bin_attribute *bin_attr,
+ char *buf, loff_t off, size_t count)
+{
+ struct scsi_qla_host *vha = shost_priv(dev_to_shost(container_of(kobj,
+ struct device, kobj)));
+ struct qla_hw_data *ha = vha->hw;
+
+ if (ha->optrom_state != QLA_SWRITING)
+ return -EINVAL;
+ if (off > ha->optrom_region_size)
+ return -ERANGE;
+ if (off + count > ha->optrom_region_size)
+ count = ha->optrom_region_size - off;
+
+ mutex_lock(&ha->optrom_mutex);
+ memcpy(&ha->optrom_buffer[off], buf, count);
+ mutex_unlock(&ha->optrom_mutex);
+
+ return count;
+}
+
+static struct bin_attribute sysfs_optrom_attr = {
+ .attr = {
+ .name = "optrom",
+ .mode = S_IRUSR | S_IWUSR,
+ },
+ .size = 0,
+ .read = qla2x00_sysfs_read_optrom,
+ .write = qla2x00_sysfs_write_optrom,
+};
+
+static ssize_t
+qla2x00_sysfs_write_optrom_ctl(struct file *filp, struct kobject *kobj,
+ struct bin_attribute *bin_attr,
+ char *buf, loff_t off, size_t count)
+{
+ struct scsi_qla_host *vha = shost_priv(dev_to_shost(container_of(kobj,
+ struct device, kobj)));
+ struct qla_hw_data *ha = vha->hw;
+ uint32_t start = 0;
+ uint32_t size = ha->optrom_size;
+ int val, valid;
+ ssize_t rval = count;
+
+ if (off)
+ return -EINVAL;
+
+ if (unlikely(pci_channel_offline(ha->pdev)))
+ return -EAGAIN;
+
+ if (sscanf(buf, "%d:%x:%x", &val, &start, &size) < 1)
+ return -EINVAL;
+ if (start > ha->optrom_size)
+ return -EINVAL;
+
+ mutex_lock(&ha->optrom_mutex);
+ switch (val) {
+ case 0:
+ if (ha->optrom_state != QLA_SREADING &&
+ ha->optrom_state != QLA_SWRITING) {
+ rval = -EINVAL;
+ goto out;
+ }
+ ha->optrom_state = QLA_SWAITING;
+
+ ql_dbg(ql_dbg_user, vha, 0x7061,
+ "Freeing flash region allocation -- 0x%x bytes.\n",
+ ha->optrom_region_size);
+
+ vfree(ha->optrom_buffer);
+ ha->optrom_buffer = NULL;
+ break;
+ case 1:
+ if (ha->optrom_state != QLA_SWAITING) {
+ rval = -EINVAL;
+ goto out;
+ }
+
+ ha->optrom_region_start = start;
+ ha->optrom_region_size = start + size > ha->optrom_size ?
+ ha->optrom_size - start : size;
+
+ ha->optrom_state = QLA_SREADING;
+ ha->optrom_buffer = vmalloc(ha->optrom_region_size);
+ if (ha->optrom_buffer == NULL) {
+ ql_log(ql_log_warn, vha, 0x7062,
+ "Unable to allocate memory for optrom retrieval "
+ "(%x).\n", ha->optrom_region_size);
+
+ ha->optrom_state = QLA_SWAITING;
+ rval = -ENOMEM;
+ goto out;
+ }
+
+ if (qla2x00_wait_for_hba_online(vha) != QLA_SUCCESS) {
+ ql_log(ql_log_warn, vha, 0x7063,
+ "HBA not online, failing NVRAM update.\n");
+ rval = -EAGAIN;
+ goto out;
+ }
+
+ ql_dbg(ql_dbg_user, vha, 0x7064,
+ "Reading flash region -- 0x%x/0x%x.\n",
+ ha->optrom_region_start, ha->optrom_region_size);
+
+ memset(ha->optrom_buffer, 0, ha->optrom_region_size);
+ ha->isp_ops->read_optrom(vha, ha->optrom_buffer,
+ ha->optrom_region_start, ha->optrom_region_size);
+ break;
+ case 2:
+ if (ha->optrom_state != QLA_SWAITING) {
+ rval = -EINVAL;
+ goto out;
+ }
+
+ /*
+ * We need to be more restrictive on which FLASH regions are
+ * allowed to be updated via user-space. Regions accessible
+ * via this method include:
+ *
+ * ISP21xx/ISP22xx/ISP23xx type boards:
+ *
+ * 0x000000 -> 0x020000 -- Boot code.
+ *
+ * ISP2322/ISP24xx type boards:
+ *
+ * 0x000000 -> 0x07ffff -- Boot code.
+ * 0x080000 -> 0x0fffff -- Firmware.
+ *
+ * ISP25xx type boards:
+ *
+ * 0x000000 -> 0x07ffff -- Boot code.
+ * 0x080000 -> 0x0fffff -- Firmware.
+ * 0x120000 -> 0x12ffff -- VPD and HBA parameters.
+ */
+ valid = 0;
+ if (ha->optrom_size == OPTROM_SIZE_2300 && start == 0)
+ valid = 1;
+ else if (start == (ha->flt_region_boot * 4) ||
+ start == (ha->flt_region_fw * 4))
+ valid = 1;
+ else if (IS_QLA24XX_TYPE(ha) || IS_QLA25XX(ha)
+ || IS_CNA_CAPABLE(ha) || IS_QLA2031(ha)
+ || IS_QLA27XX(ha))
+ valid = 1;
+ if (!valid) {
+ ql_log(ql_log_warn, vha, 0x7065,
+ "Invalid start region 0x%x/0x%x.\n", start, size);
+ rval = -EINVAL;
+ goto out;
+ }
+
+ ha->optrom_region_start = start;
+ ha->optrom_region_size = start + size > ha->optrom_size ?
+ ha->optrom_size - start : size;
+
+ ha->optrom_state = QLA_SWRITING;
+ ha->optrom_buffer = vmalloc(ha->optrom_region_size);
+ if (ha->optrom_buffer == NULL) {
+ ql_log(ql_log_warn, vha, 0x7066,
+ "Unable to allocate memory for optrom update "
+ "(%x)\n", ha->optrom_region_size);
+
+ ha->optrom_state = QLA_SWAITING;
+ rval = -ENOMEM;
+ goto out;
+ }
+
+ ql_dbg(ql_dbg_user, vha, 0x7067,
+ "Staging flash region write -- 0x%x/0x%x.\n",
+ ha->optrom_region_start, ha->optrom_region_size);
+
+ memset(ha->optrom_buffer, 0, ha->optrom_region_size);
+ break;
+ case 3:
+ if (ha->optrom_state != QLA_SWRITING) {
+ rval = -EINVAL;
+ goto out;
+ }
+
+ if (qla2x00_wait_for_hba_online(vha) != QLA_SUCCESS) {
+ ql_log(ql_log_warn, vha, 0x7068,
+ "HBA not online, failing flash update.\n");
+ rval = -EAGAIN;
+ goto out;
+ }
+
+ ql_dbg(ql_dbg_user, vha, 0x7069,
+ "Writing flash region -- 0x%x/0x%x.\n",
+ ha->optrom_region_start, ha->optrom_region_size);
+
+ ha->isp_ops->write_optrom(vha, ha->optrom_buffer,
+ ha->optrom_region_start, ha->optrom_region_size);
+ break;
+ default:
+ rval = -EINVAL;
+ }
+
+out:
+ mutex_unlock(&ha->optrom_mutex);
+ return rval;
+}
+
+static struct bin_attribute sysfs_optrom_ctl_attr = {
+ .attr = {
+ .name = "optrom_ctl",
+ .mode = S_IWUSR,
+ },
+ .size = 0,
+ .write = qla2x00_sysfs_write_optrom_ctl,
+};
+
+static ssize_t
+qla2x00_sysfs_read_vpd(struct file *filp, struct kobject *kobj,
+ struct bin_attribute *bin_attr,
+ char *buf, loff_t off, size_t count)
+{
+ struct scsi_qla_host *vha = shost_priv(dev_to_shost(container_of(kobj,
+ struct device, kobj)));
+ struct qla_hw_data *ha = vha->hw;
+
+ if (unlikely(pci_channel_offline(ha->pdev)))
+ return -EAGAIN;
+
+ if (!capable(CAP_SYS_ADMIN))
+ return -EINVAL;
+
+ if (IS_NOCACHE_VPD_TYPE(ha))
+ ha->isp_ops->read_optrom(vha, ha->vpd, ha->flt_region_vpd << 2,
+ ha->vpd_size);
+ return memory_read_from_buffer(buf, count, &off, ha->vpd, ha->vpd_size);
+}
+
+static ssize_t
+qla2x00_sysfs_write_vpd(struct file *filp, struct kobject *kobj,
+ struct bin_attribute *bin_attr,
+ char *buf, loff_t off, size_t count)
+{
+ struct scsi_qla_host *vha = shost_priv(dev_to_shost(container_of(kobj,
+ struct device, kobj)));
+ struct qla_hw_data *ha = vha->hw;
+ uint8_t *tmp_data;
+
+ if (unlikely(pci_channel_offline(ha->pdev)))
+ return 0;
+
+ if (!capable(CAP_SYS_ADMIN) || off != 0 || count != ha->vpd_size ||
+ !ha->isp_ops->write_nvram)
+ return 0;
+
+ if (qla2x00_wait_for_hba_online(vha) != QLA_SUCCESS) {
+ ql_log(ql_log_warn, vha, 0x706a,
+ "HBA not online, failing VPD update.\n");
+ return -EAGAIN;
+ }
+
+ /* Write NVRAM. */
+ ha->isp_ops->write_nvram(vha, (uint8_t *)buf, ha->vpd_base, count);
+ ha->isp_ops->read_nvram(vha, (uint8_t *)ha->vpd, ha->vpd_base, count);
+
+ /* Update flash version information for 4Gb & above. */
+ if (!IS_FWI2_CAPABLE(ha))
+ return -EINVAL;
+
+ tmp_data = vmalloc(256);
+ if (!tmp_data) {
+ ql_log(ql_log_warn, vha, 0x706b,
+ "Unable to allocate memory for VPD information update.\n");
+ return -ENOMEM;
+ }
+ ha->isp_ops->get_flash_version(vha, tmp_data);
+ vfree(tmp_data);
+
+ return count;
+}
+
+static struct bin_attribute sysfs_vpd_attr = {
+ .attr = {
+ .name = "vpd",
+ .mode = S_IRUSR | S_IWUSR,
+ },
+ .size = 0,
+ .read = qla2x00_sysfs_read_vpd,
+ .write = qla2x00_sysfs_write_vpd,
+};
+
+static ssize_t
+qla2x00_sysfs_read_sfp(struct file *filp, struct kobject *kobj,
+ struct bin_attribute *bin_attr,
+ char *buf, loff_t off, size_t count)
+{
+ struct scsi_qla_host *vha = shost_priv(dev_to_shost(container_of(kobj,
+ struct device, kobj)));
+ struct qla_hw_data *ha = vha->hw;
+ uint16_t iter, addr, offset;
+ int rval;
+
+ if (!capable(CAP_SYS_ADMIN) || off != 0 || count != SFP_DEV_SIZE * 2)
+ return 0;
+
+ if (ha->sfp_data)
+ goto do_read;
+
+ ha->sfp_data = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL,
+ &ha->sfp_data_dma);
+ if (!ha->sfp_data) {
+ ql_log(ql_log_warn, vha, 0x706c,
+ "Unable to allocate memory for SFP read-data.\n");
+ return 0;
+ }
+
+do_read:
+ memset(ha->sfp_data, 0, SFP_BLOCK_SIZE);
+ addr = 0xa0;
+ for (iter = 0, offset = 0; iter < (SFP_DEV_SIZE * 2) / SFP_BLOCK_SIZE;
+ iter++, offset += SFP_BLOCK_SIZE) {
+ if (iter == 4) {
+ /* Skip to next device address. */
+ addr = 0xa2;
+ offset = 0;
+ }
+
+ rval = qla2x00_read_sfp(vha, ha->sfp_data_dma, ha->sfp_data,
+ addr, offset, SFP_BLOCK_SIZE, BIT_1);
+ if (rval != QLA_SUCCESS) {
+ ql_log(ql_log_warn, vha, 0x706d,
+ "Unable to read SFP data (%x/%x/%x).\n", rval,
+ addr, offset);
+
+ return -EIO;
+ }
+ memcpy(buf, ha->sfp_data, SFP_BLOCK_SIZE);
+ buf += SFP_BLOCK_SIZE;
+ }
+
+ return count;
+}
+
+static struct bin_attribute sysfs_sfp_attr = {
+ .attr = {
+ .name = "sfp",
+ .mode = S_IRUSR | S_IWUSR,
+ },
+ .size = SFP_DEV_SIZE * 2,
+ .read = qla2x00_sysfs_read_sfp,
+};
+
+static ssize_t
+qla2x00_sysfs_write_reset(struct file *filp, struct kobject *kobj,
+ struct bin_attribute *bin_attr,
+ char *buf, loff_t off, size_t count)
+{
+ struct scsi_qla_host *vha = shost_priv(dev_to_shost(container_of(kobj,
+ struct device, kobj)));
+ struct qla_hw_data *ha = vha->hw;
+ struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev);
+ int type;
+ uint32_t idc_control;
+ uint8_t *tmp_data = NULL;
+ if (off != 0)
+ return -EINVAL;
+
+ type = simple_strtol(buf, NULL, 10);
+ switch (type) {
+ case 0x2025c:
+ ql_log(ql_log_info, vha, 0x706e,
+ "Issuing ISP reset.\n");
+
+ scsi_block_requests(vha->host);
+ if (IS_QLA82XX(ha)) {
+ ha->flags.isp82xx_no_md_cap = 1;
+ qla82xx_idc_lock(ha);
+ qla82xx_set_reset_owner(vha);
+ qla82xx_idc_unlock(ha);
+ } else if (IS_QLA8044(ha)) {
+ qla8044_idc_lock(ha);
+ idc_control = qla8044_rd_reg(ha,
+ QLA8044_IDC_DRV_CTRL);
+ qla8044_wr_reg(ha, QLA8044_IDC_DRV_CTRL,
+ (idc_control | GRACEFUL_RESET_BIT1));
+ qla82xx_set_reset_owner(vha);
+ qla8044_idc_unlock(ha);
+ } else {
+ set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
+ qla2xxx_wake_dpc(vha);
+ }
+ qla2x00_wait_for_chip_reset(vha);
+ scsi_unblock_requests(vha->host);
+ break;
+ case 0x2025d:
+ if (!IS_QLA81XX(ha) && !IS_QLA83XX(ha))
+ return -EPERM;
+
+ ql_log(ql_log_info, vha, 0x706f,
+ "Issuing MPI reset.\n");
+
+ if (IS_QLA83XX(ha)) {
+ uint32_t idc_control;
+
+ qla83xx_idc_lock(vha, 0);
+ __qla83xx_get_idc_control(vha, &idc_control);
+ idc_control |= QLA83XX_IDC_GRACEFUL_RESET;
+ __qla83xx_set_idc_control(vha, idc_control);
+ qla83xx_wr_reg(vha, QLA83XX_IDC_DEV_STATE,
+ QLA8XXX_DEV_NEED_RESET);
+ qla83xx_idc_audit(vha, IDC_AUDIT_TIMESTAMP);
+ qla83xx_idc_unlock(vha, 0);
+ break;
+ } else {
+ /* Make sure FC side is not in reset */
+ qla2x00_wait_for_hba_online(vha);
+
+ /* Issue MPI reset */
+ scsi_block_requests(vha->host);
+ if (qla81xx_restart_mpi_firmware(vha) != QLA_SUCCESS)
+ ql_log(ql_log_warn, vha, 0x7070,
+ "MPI reset failed.\n");
+ scsi_unblock_requests(vha->host);
+ break;
+ }
+ case 0x2025e:
+ if (!IS_P3P_TYPE(ha) || vha != base_vha) {
+ ql_log(ql_log_info, vha, 0x7071,
+ "FCoE ctx reset no supported.\n");
+ return -EPERM;
+ }
+
+ ql_log(ql_log_info, vha, 0x7072,
+ "Issuing FCoE ctx reset.\n");
+ set_bit(FCOE_CTX_RESET_NEEDED, &vha->dpc_flags);
+ qla2xxx_wake_dpc(vha);
+ qla2x00_wait_for_fcoe_ctx_reset(vha);
+ break;
+ case 0x2025f:
+ if (!IS_QLA8031(ha))
+ return -EPERM;
+ ql_log(ql_log_info, vha, 0x70bc,
+ "Disabling Reset by IDC control\n");
+ qla83xx_idc_lock(vha, 0);
+ __qla83xx_get_idc_control(vha, &idc_control);
+ idc_control |= QLA83XX_IDC_RESET_DISABLED;
+ __qla83xx_set_idc_control(vha, idc_control);
+ qla83xx_idc_unlock(vha, 0);
+ break;
+ case 0x20260:
+ if (!IS_QLA8031(ha))
+ return -EPERM;
+ ql_log(ql_log_info, vha, 0x70bd,
+ "Enabling Reset by IDC control\n");
+ qla83xx_idc_lock(vha, 0);
+ __qla83xx_get_idc_control(vha, &idc_control);
+ idc_control &= ~QLA83XX_IDC_RESET_DISABLED;
+ __qla83xx_set_idc_control(vha, idc_control);
+ qla83xx_idc_unlock(vha, 0);
+ break;
+ case 0x20261:
+ ql_dbg(ql_dbg_user, vha, 0x70e0,
+ "Updating cache versions without reset ");
+
+ tmp_data = vmalloc(256);
+ if (!tmp_data) {
+ ql_log(ql_log_warn, vha, 0x70e1,
+ "Unable to allocate memory for VPD information update.\n");
+ return -ENOMEM;
+ }
+ ha->isp_ops->get_flash_version(vha, tmp_data);
+ vfree(tmp_data);
+ break;
+ }
+ return count;
+}
+
+static struct bin_attribute sysfs_reset_attr = {
+ .attr = {
+ .name = "reset",
+ .mode = S_IWUSR,
+ },
+ .size = 0,
+ .write = qla2x00_sysfs_write_reset,
+};
+
+static ssize_t
+qla2x00_sysfs_read_xgmac_stats(struct file *filp, struct kobject *kobj,
+ struct bin_attribute *bin_attr,
+ char *buf, loff_t off, size_t count)
+{
+ struct scsi_qla_host *vha = shost_priv(dev_to_shost(container_of(kobj,
+ struct device, kobj)));
+ struct qla_hw_data *ha = vha->hw;
+ int rval;
+ uint16_t actual_size;
+
+ if (!capable(CAP_SYS_ADMIN) || off != 0 || count > XGMAC_DATA_SIZE)
+ return 0;
+
+ if (ha->xgmac_data)
+ goto do_read;
+
+ ha->xgmac_data = dma_alloc_coherent(&ha->pdev->dev, XGMAC_DATA_SIZE,
+ &ha->xgmac_data_dma, GFP_KERNEL);
+ if (!ha->xgmac_data) {
+ ql_log(ql_log_warn, vha, 0x7076,
+ "Unable to allocate memory for XGMAC read-data.\n");
+ return 0;
+ }
+
+do_read:
+ actual_size = 0;
+ memset(ha->xgmac_data, 0, XGMAC_DATA_SIZE);
+
+ rval = qla2x00_get_xgmac_stats(vha, ha->xgmac_data_dma,
+ XGMAC_DATA_SIZE, &actual_size);
+ if (rval != QLA_SUCCESS) {
+ ql_log(ql_log_warn, vha, 0x7077,
+ "Unable to read XGMAC data (%x).\n", rval);
+ count = 0;
+ }
+
+ count = actual_size > count ? count: actual_size;
+ memcpy(buf, ha->xgmac_data, count);
+
+ return count;
+}
+
+static struct bin_attribute sysfs_xgmac_stats_attr = {
+ .attr = {
+ .name = "xgmac_stats",
+ .mode = S_IRUSR,
+ },
+ .size = 0,
+ .read = qla2x00_sysfs_read_xgmac_stats,
+};
+
+static ssize_t
+qla2x00_sysfs_read_dcbx_tlv(struct file *filp, struct kobject *kobj,
+ struct bin_attribute *bin_attr,
+ char *buf, loff_t off, size_t count)
+{
+ struct scsi_qla_host *vha = shost_priv(dev_to_shost(container_of(kobj,
+ struct device, kobj)));
+ struct qla_hw_data *ha = vha->hw;
+ int rval;
+ uint16_t actual_size;
+
+ if (!capable(CAP_SYS_ADMIN) || off != 0 || count > DCBX_TLV_DATA_SIZE)
+ return 0;
+
+ if (ha->dcbx_tlv)
+ goto do_read;
+
+ ha->dcbx_tlv = dma_alloc_coherent(&ha->pdev->dev, DCBX_TLV_DATA_SIZE,
+ &ha->dcbx_tlv_dma, GFP_KERNEL);
+ if (!ha->dcbx_tlv) {
+ ql_log(ql_log_warn, vha, 0x7078,
+ "Unable to allocate memory for DCBX TLV read-data.\n");
+ return -ENOMEM;
+ }
+
+do_read:
+ actual_size = 0;
+ memset(ha->dcbx_tlv, 0, DCBX_TLV_DATA_SIZE);
+
+ rval = qla2x00_get_dcbx_params(vha, ha->dcbx_tlv_dma,
+ DCBX_TLV_DATA_SIZE);
+ if (rval != QLA_SUCCESS) {
+ ql_log(ql_log_warn, vha, 0x7079,
+ "Unable to read DCBX TLV (%x).\n", rval);
+ return -EIO;
+ }
+
+ memcpy(buf, ha->dcbx_tlv, count);
+
+ return count;
+}
+
+static struct bin_attribute sysfs_dcbx_tlv_attr = {
+ .attr = {
+ .name = "dcbx_tlv",
+ .mode = S_IRUSR,
+ },
+ .size = 0,
+ .read = qla2x00_sysfs_read_dcbx_tlv,
+};
+
+static struct sysfs_entry {
+ char *name;
+ struct bin_attribute *attr;
+ int is4GBp_only;
+} bin_file_entries[] = {
+ { "fw_dump", &sysfs_fw_dump_attr, },
+ { "fw_dump_template", &sysfs_fw_dump_template_attr, 0x27 },
+ { "nvram", &sysfs_nvram_attr, },
+ { "optrom", &sysfs_optrom_attr, },
+ { "optrom_ctl", &sysfs_optrom_ctl_attr, },
+ { "vpd", &sysfs_vpd_attr, 1 },
+ { "sfp", &sysfs_sfp_attr, 1 },
+ { "reset", &sysfs_reset_attr, },
+ { "xgmac_stats", &sysfs_xgmac_stats_attr, 3 },
+ { "dcbx_tlv", &sysfs_dcbx_tlv_attr, 3 },
+ { NULL },
+};
+
+void
+qla2x00_alloc_sysfs_attr(scsi_qla_host_t *vha)
+{
+ struct Scsi_Host *host = vha->host;
+ struct sysfs_entry *iter;
+ int ret;
+
+ for (iter = bin_file_entries; iter->name; iter++) {
+ if (iter->is4GBp_only && !IS_FWI2_CAPABLE(vha->hw))
+ continue;
+ if (iter->is4GBp_only == 2 && !IS_QLA25XX(vha->hw))
+ continue;
+ if (iter->is4GBp_only == 3 && !(IS_CNA_CAPABLE(vha->hw)))
+ continue;
+ if (iter->is4GBp_only == 0x27 && !IS_QLA27XX(vha->hw))
+ continue;
+
+ ret = sysfs_create_bin_file(&host->shost_gendev.kobj,
+ iter->attr);
+ if (ret)
+ ql_log(ql_log_warn, vha, 0x00f3,
+ "Unable to create sysfs %s binary attribute (%d).\n",
+ iter->name, ret);
+ else
+ ql_dbg(ql_dbg_init, vha, 0x00f4,
+ "Successfully created sysfs %s binary attribure.\n",
+ iter->name);
+ }
+}
+
+void
+qla2x00_free_sysfs_attr(scsi_qla_host_t *vha, bool stop_beacon)
+{
+ struct Scsi_Host *host = vha->host;
+ struct sysfs_entry *iter;
+ struct qla_hw_data *ha = vha->hw;
+
+ for (iter = bin_file_entries; iter->name; iter++) {
+ if (iter->is4GBp_only && !IS_FWI2_CAPABLE(ha))
+ continue;
+ if (iter->is4GBp_only == 2 && !IS_QLA25XX(ha))
+ continue;
+ if (iter->is4GBp_only == 3 && !(IS_CNA_CAPABLE(vha->hw)))
+ continue;
+ if (iter->is4GBp_only == 0x27 && !IS_QLA27XX(vha->hw))
+ continue;
+
+ sysfs_remove_bin_file(&host->shost_gendev.kobj,
+ iter->attr);
+ }
+
+ if (stop_beacon && ha->beacon_blink_led == 1)
+ ha->isp_ops->beacon_off(vha);
+}
+
+/* Scsi_Host attributes. */
+
+static ssize_t
+qla2x00_drvr_version_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ return scnprintf(buf, PAGE_SIZE, "%s\n", qla2x00_version_str);
+}
+
+static ssize_t
+qla2x00_fw_version_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
+ struct qla_hw_data *ha = vha->hw;
+ char fw_str[128];
+
+ return scnprintf(buf, PAGE_SIZE, "%s\n",
+ ha->isp_ops->fw_version_str(vha, fw_str, sizeof(fw_str)));
+}
+
+static ssize_t
+qla2x00_serial_num_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
+ struct qla_hw_data *ha = vha->hw;
+ uint32_t sn;
+
+ if (IS_QLAFX00(vha->hw)) {
+ return scnprintf(buf, PAGE_SIZE, "%s\n",
+ vha->hw->mr.serial_num);
+ } else if (IS_FWI2_CAPABLE(ha)) {
+ qla2xxx_get_vpd_field(vha, "SN", buf, PAGE_SIZE - 1);
+ return strlen(strcat(buf, "\n"));
+ }
+
+ sn = ((ha->serial0 & 0x1f) << 16) | (ha->serial2 << 8) | ha->serial1;
+ return scnprintf(buf, PAGE_SIZE, "%c%05d\n", 'A' + sn / 100000,
+ sn % 100000);
+}
+
+static ssize_t
+qla2x00_isp_name_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
+ return scnprintf(buf, PAGE_SIZE, "ISP%04X\n", vha->hw->pdev->device);
+}
+
+static ssize_t
+qla2x00_isp_id_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
+ struct qla_hw_data *ha = vha->hw;
+
+ if (IS_QLAFX00(vha->hw))
+ return scnprintf(buf, PAGE_SIZE, "%s\n",
+ vha->hw->mr.hw_version);
+
+ return scnprintf(buf, PAGE_SIZE, "%04x %04x %04x %04x\n",
+ ha->product_id[0], ha->product_id[1], ha->product_id[2],
+ ha->product_id[3]);
+}
+
+static ssize_t
+qla2x00_model_name_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
+
+ return scnprintf(buf, PAGE_SIZE, "%s\n", vha->hw->model_number);
+}
+
+static ssize_t
+qla2x00_model_desc_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
+ return scnprintf(buf, PAGE_SIZE, "%s\n",
+ vha->hw->model_desc ? vha->hw->model_desc : "");
+}
+
+static ssize_t
+qla2x00_pci_info_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
+ char pci_info[30];
+
+ return scnprintf(buf, PAGE_SIZE, "%s\n",
+ vha->hw->isp_ops->pci_info_str(vha, pci_info));
+}
+
+static ssize_t
+qla2x00_link_state_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
+ struct qla_hw_data *ha = vha->hw;
+ int len = 0;
+
+ if (atomic_read(&vha->loop_state) == LOOP_DOWN ||
+ atomic_read(&vha->loop_state) == LOOP_DEAD ||
+ vha->device_flags & DFLG_NO_CABLE)
+ len = scnprintf(buf, PAGE_SIZE, "Link Down\n");
+ else if (atomic_read(&vha->loop_state) != LOOP_READY ||
+ qla2x00_reset_active(vha))
+ len = scnprintf(buf, PAGE_SIZE, "Unknown Link State\n");
+ else {
+ len = scnprintf(buf, PAGE_SIZE, "Link Up - ");
+
+ switch (ha->current_topology) {
+ case ISP_CFG_NL:
+ len += scnprintf(buf + len, PAGE_SIZE-len, "Loop\n");
+ break;
+ case ISP_CFG_FL:
+ len += scnprintf(buf + len, PAGE_SIZE-len, "FL_Port\n");
+ break;
+ case ISP_CFG_N:
+ len += scnprintf(buf + len, PAGE_SIZE-len,
+ "N_Port to N_Port\n");
+ break;
+ case ISP_CFG_F:
+ len += scnprintf(buf + len, PAGE_SIZE-len, "F_Port\n");
+ break;
+ default:
+ len += scnprintf(buf + len, PAGE_SIZE-len, "Loop\n");
+ break;
+ }
+ }
+ return len;
+}
+
+static ssize_t
+qla2x00_zio_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
+ int len = 0;
+
+ switch (vha->hw->zio_mode) {
+ case QLA_ZIO_MODE_6:
+ len += scnprintf(buf + len, PAGE_SIZE-len, "Mode 6\n");
+ break;
+ case QLA_ZIO_DISABLED:
+ len += scnprintf(buf + len, PAGE_SIZE-len, "Disabled\n");
+ break;
+ }
+ return len;
+}
+
+static ssize_t
+qla2x00_zio_store(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
+ struct qla_hw_data *ha = vha->hw;
+ int val = 0;
+ uint16_t zio_mode;
+
+ if (!IS_ZIO_SUPPORTED(ha))
+ return -ENOTSUPP;
+
+ if (sscanf(buf, "%d", &val) != 1)
+ return -EINVAL;
+
+ if (val)
+ zio_mode = QLA_ZIO_MODE_6;
+ else
+ zio_mode = QLA_ZIO_DISABLED;
+
+ /* Update per-hba values and queue a reset. */
+ if (zio_mode != QLA_ZIO_DISABLED || ha->zio_mode != QLA_ZIO_DISABLED) {
+ ha->zio_mode = zio_mode;
+ set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
+ }
+ return strlen(buf);
+}
+
+static ssize_t
+qla2x00_zio_timer_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
+
+ return scnprintf(buf, PAGE_SIZE, "%d us\n", vha->hw->zio_timer * 100);
+}
+
+static ssize_t
+qla2x00_zio_timer_store(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
+ int val = 0;
+ uint16_t zio_timer;
+
+ if (sscanf(buf, "%d", &val) != 1)
+ return -EINVAL;
+ if (val > 25500 || val < 100)
+ return -ERANGE;
+
+ zio_timer = (uint16_t)(val / 100);
+ vha->hw->zio_timer = zio_timer;
+
+ return strlen(buf);
+}
+
+static ssize_t
+qla2x00_beacon_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
+ int len = 0;
+
+ if (vha->hw->beacon_blink_led)
+ len += scnprintf(buf + len, PAGE_SIZE-len, "Enabled\n");
+ else
+ len += scnprintf(buf + len, PAGE_SIZE-len, "Disabled\n");
+ return len;
+}
+
+static ssize_t
+qla2x00_beacon_store(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
+ struct qla_hw_data *ha = vha->hw;
+ int val = 0;
+ int rval;
+
+ if (IS_QLA2100(ha) || IS_QLA2200(ha))
+ return -EPERM;
+
+ if (test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags)) {
+ ql_log(ql_log_warn, vha, 0x707a,
+ "Abort ISP active -- ignoring beacon request.\n");
+ return -EBUSY;
+ }
+
+ if (sscanf(buf, "%d", &val) != 1)
+ return -EINVAL;
+
+ if (val)
+ rval = ha->isp_ops->beacon_on(vha);
+ else
+ rval = ha->isp_ops->beacon_off(vha);
+
+ if (rval != QLA_SUCCESS)
+ count = 0;
+
+ return count;
+}
+
+static ssize_t
+qla2x00_optrom_bios_version_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
+ struct qla_hw_data *ha = vha->hw;
+ return scnprintf(buf, PAGE_SIZE, "%d.%02d\n", ha->bios_revision[1],
+ ha->bios_revision[0]);
+}
+
+static ssize_t
+qla2x00_optrom_efi_version_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
+ struct qla_hw_data *ha = vha->hw;
+ return scnprintf(buf, PAGE_SIZE, "%d.%02d\n", ha->efi_revision[1],
+ ha->efi_revision[0]);
+}
+
+static ssize_t
+qla2x00_optrom_fcode_version_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
+ struct qla_hw_data *ha = vha->hw;
+ return scnprintf(buf, PAGE_SIZE, "%d.%02d\n", ha->fcode_revision[1],
+ ha->fcode_revision[0]);
+}
+
+static ssize_t
+qla2x00_optrom_fw_version_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
+ struct qla_hw_data *ha = vha->hw;
+ return scnprintf(buf, PAGE_SIZE, "%d.%02d.%02d %d\n",
+ ha->fw_revision[0], ha->fw_revision[1], ha->fw_revision[2],
+ ha->fw_revision[3]);
+}
+
+static ssize_t
+qla2x00_optrom_gold_fw_version_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
+ struct qla_hw_data *ha = vha->hw;
+
+ if (!IS_QLA81XX(ha) && !IS_QLA83XX(ha) && !IS_QLA27XX(ha))
+ return scnprintf(buf, PAGE_SIZE, "\n");
+
+ return scnprintf(buf, PAGE_SIZE, "%d.%02d.%02d (%d)\n",
+ ha->gold_fw_version[0], ha->gold_fw_version[1],
+ ha->gold_fw_version[2], ha->gold_fw_version[3]);
+}
+
+static ssize_t
+qla2x00_total_isp_aborts_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
+ return scnprintf(buf, PAGE_SIZE, "%d\n",
+ vha->qla_stats.total_isp_aborts);
+}
+
+static ssize_t
+qla24xx_84xx_fw_version_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ int rval = QLA_SUCCESS;
+ uint16_t status[2] = {0, 0};
+ scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
+ struct qla_hw_data *ha = vha->hw;
+
+ if (!IS_QLA84XX(ha))
+ return scnprintf(buf, PAGE_SIZE, "\n");
+
+ if (ha->cs84xx->op_fw_version == 0)
+ rval = qla84xx_verify_chip(vha, status);
+
+ if ((rval == QLA_SUCCESS) && (status[0] == 0))
+ return scnprintf(buf, PAGE_SIZE, "%u\n",
+ (uint32_t)ha->cs84xx->op_fw_version);
+
+ return scnprintf(buf, PAGE_SIZE, "\n");
+}
+
+static ssize_t
+qla2x00_mpi_version_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
+ struct qla_hw_data *ha = vha->hw;
+
+ if (!IS_QLA81XX(ha) && !IS_QLA8031(ha) && !IS_QLA8044(ha))
+ return scnprintf(buf, PAGE_SIZE, "\n");
+
+ return scnprintf(buf, PAGE_SIZE, "%d.%02d.%02d (%x)\n",
+ ha->mpi_version[0], ha->mpi_version[1], ha->mpi_version[2],
+ ha->mpi_capabilities);
+}
+
+static ssize_t
+qla2x00_phy_version_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
+ struct qla_hw_data *ha = vha->hw;
+
+ if (!IS_QLA81XX(ha) && !IS_QLA8031(ha))
+ return scnprintf(buf, PAGE_SIZE, "\n");
+
+ return scnprintf(buf, PAGE_SIZE, "%d.%02d.%02d\n",
+ ha->phy_version[0], ha->phy_version[1], ha->phy_version[2]);
+}
+
+static ssize_t
+qla2x00_flash_block_size_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
+ struct qla_hw_data *ha = vha->hw;
+
+ return scnprintf(buf, PAGE_SIZE, "0x%x\n", ha->fdt_block_size);
+}
+
+static ssize_t
+qla2x00_vlan_id_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
+
+ if (!IS_CNA_CAPABLE(vha->hw))
+ return scnprintf(buf, PAGE_SIZE, "\n");
+
+ return scnprintf(buf, PAGE_SIZE, "%d\n", vha->fcoe_vlan_id);
+}
+
+static ssize_t
+qla2x00_vn_port_mac_address_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
+
+ if (!IS_CNA_CAPABLE(vha->hw))
+ return scnprintf(buf, PAGE_SIZE, "\n");
+
+ return scnprintf(buf, PAGE_SIZE, "%pMR\n", vha->fcoe_vn_port_mac);
+}
+
+static ssize_t
+qla2x00_fabric_param_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
+
+ return scnprintf(buf, PAGE_SIZE, "%d\n", vha->hw->switch_cap);
+}
+
+static ssize_t
+qla2x00_thermal_temp_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
+ uint16_t temp = 0;
+
+ if (qla2x00_reset_active(vha)) {
+ ql_log(ql_log_warn, vha, 0x70dc, "ISP reset active.\n");
+ goto done;
+ }
+
+ if (vha->hw->flags.eeh_busy) {
+ ql_log(ql_log_warn, vha, 0x70dd, "PCI EEH busy.\n");
+ goto done;
+ }
+
+ if (qla2x00_get_thermal_temp(vha, &temp) == QLA_SUCCESS)
+ return scnprintf(buf, PAGE_SIZE, "%d\n", temp);
+
+done:
+ return scnprintf(buf, PAGE_SIZE, "\n");
+}
+
+static ssize_t
+qla2x00_fw_state_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
+ int rval = QLA_FUNCTION_FAILED;
+ uint16_t state[6];
+ uint32_t pstate;
+
+ if (IS_QLAFX00(vha->hw)) {
+ pstate = qlafx00_fw_state_show(dev, attr, buf);
+ return scnprintf(buf, PAGE_SIZE, "0x%x\n", pstate);
+ }
+
+ if (qla2x00_reset_active(vha))
+ ql_log(ql_log_warn, vha, 0x707c,
+ "ISP reset active.\n");
+ else if (!vha->hw->flags.eeh_busy)
+ rval = qla2x00_get_firmware_state(vha, state);
+ if (rval != QLA_SUCCESS)
+ memset(state, -1, sizeof(state));
+
+ return scnprintf(buf, PAGE_SIZE, "0x%x 0x%x 0x%x 0x%x 0x%x 0x%x\n",
+ state[0], state[1], state[2], state[3], state[4], state[5]);
+}
+
+static ssize_t
+qla2x00_diag_requests_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
+
+ if (!IS_BIDI_CAPABLE(vha->hw))
+ return scnprintf(buf, PAGE_SIZE, "\n");
+
+ return scnprintf(buf, PAGE_SIZE, "%llu\n", vha->bidi_stats.io_count);
+}
+
+static ssize_t
+qla2x00_diag_megabytes_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
+
+ if (!IS_BIDI_CAPABLE(vha->hw))
+ return scnprintf(buf, PAGE_SIZE, "\n");
+
+ return scnprintf(buf, PAGE_SIZE, "%llu\n",
+ vha->bidi_stats.transfer_bytes >> 20);
+}
+
+static ssize_t
+qla2x00_fw_dump_size_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
+ struct qla_hw_data *ha = vha->hw;
+ uint32_t size;
+
+ if (!ha->fw_dumped)
+ size = 0;
+ else if (IS_P3P_TYPE(ha))
+ size = ha->md_template_size + ha->md_dump_size;
+ else
+ size = ha->fw_dump_len;
+
+ return scnprintf(buf, PAGE_SIZE, "%d\n", size);
+}
+
+static ssize_t
+qla2x00_allow_cna_fw_dump_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
+
+ if (!IS_P3P_TYPE(vha->hw))
+ return scnprintf(buf, PAGE_SIZE, "\n");
+ else
+ return scnprintf(buf, PAGE_SIZE, "%s\n",
+ vha->hw->allow_cna_fw_dump ? "true" : "false");
+}
+
+static ssize_t
+qla2x00_allow_cna_fw_dump_store(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t count)
+{
+ scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
+ int val = 0;
+
+ if (!IS_P3P_TYPE(vha->hw))
+ return -EINVAL;
+
+ if (sscanf(buf, "%d", &val) != 1)
+ return -EINVAL;
+
+ vha->hw->allow_cna_fw_dump = val != 0;
+
+ return strlen(buf);
+}
+
+static DEVICE_ATTR(driver_version, S_IRUGO, qla2x00_drvr_version_show, NULL);
+static DEVICE_ATTR(fw_version, S_IRUGO, qla2x00_fw_version_show, NULL);
+static DEVICE_ATTR(serial_num, S_IRUGO, qla2x00_serial_num_show, NULL);
+static DEVICE_ATTR(isp_name, S_IRUGO, qla2x00_isp_name_show, NULL);
+static DEVICE_ATTR(isp_id, S_IRUGO, qla2x00_isp_id_show, NULL);
+static DEVICE_ATTR(model_name, S_IRUGO, qla2x00_model_name_show, NULL);
+static DEVICE_ATTR(model_desc, S_IRUGO, qla2x00_model_desc_show, NULL);
+static DEVICE_ATTR(pci_info, S_IRUGO, qla2x00_pci_info_show, NULL);
+static DEVICE_ATTR(link_state, S_IRUGO, qla2x00_link_state_show, NULL);
+static DEVICE_ATTR(zio, S_IRUGO | S_IWUSR, qla2x00_zio_show, qla2x00_zio_store);
+static DEVICE_ATTR(zio_timer, S_IRUGO | S_IWUSR, qla2x00_zio_timer_show,
+ qla2x00_zio_timer_store);
+static DEVICE_ATTR(beacon, S_IRUGO | S_IWUSR, qla2x00_beacon_show,
+ qla2x00_beacon_store);
+static DEVICE_ATTR(optrom_bios_version, S_IRUGO,
+ qla2x00_optrom_bios_version_show, NULL);
+static DEVICE_ATTR(optrom_efi_version, S_IRUGO,
+ qla2x00_optrom_efi_version_show, NULL);
+static DEVICE_ATTR(optrom_fcode_version, S_IRUGO,
+ qla2x00_optrom_fcode_version_show, NULL);
+static DEVICE_ATTR(optrom_fw_version, S_IRUGO, qla2x00_optrom_fw_version_show,
+ NULL);
+static DEVICE_ATTR(optrom_gold_fw_version, S_IRUGO,
+ qla2x00_optrom_gold_fw_version_show, NULL);
+static DEVICE_ATTR(84xx_fw_version, S_IRUGO, qla24xx_84xx_fw_version_show,
+ NULL);
+static DEVICE_ATTR(total_isp_aborts, S_IRUGO, qla2x00_total_isp_aborts_show,
+ NULL);
+static DEVICE_ATTR(mpi_version, S_IRUGO, qla2x00_mpi_version_show, NULL);
+static DEVICE_ATTR(phy_version, S_IRUGO, qla2x00_phy_version_show, NULL);
+static DEVICE_ATTR(flash_block_size, S_IRUGO, qla2x00_flash_block_size_show,
+ NULL);
+static DEVICE_ATTR(vlan_id, S_IRUGO, qla2x00_vlan_id_show, NULL);
+static DEVICE_ATTR(vn_port_mac_address, S_IRUGO,
+ qla2x00_vn_port_mac_address_show, NULL);
+static DEVICE_ATTR(fabric_param, S_IRUGO, qla2x00_fabric_param_show, NULL);
+static DEVICE_ATTR(fw_state, S_IRUGO, qla2x00_fw_state_show, NULL);
+static DEVICE_ATTR(thermal_temp, S_IRUGO, qla2x00_thermal_temp_show, NULL);
+static DEVICE_ATTR(diag_requests, S_IRUGO, qla2x00_diag_requests_show, NULL);
+static DEVICE_ATTR(diag_megabytes, S_IRUGO, qla2x00_diag_megabytes_show, NULL);
+static DEVICE_ATTR(fw_dump_size, S_IRUGO, qla2x00_fw_dump_size_show, NULL);
+static DEVICE_ATTR(allow_cna_fw_dump, S_IRUGO | S_IWUSR,
+ qla2x00_allow_cna_fw_dump_show,
+ qla2x00_allow_cna_fw_dump_store);
+
+struct device_attribute *qla2x00_host_attrs[] = {
+ &dev_attr_driver_version,
+ &dev_attr_fw_version,
+ &dev_attr_serial_num,
+ &dev_attr_isp_name,
+ &dev_attr_isp_id,
+ &dev_attr_model_name,
+ &dev_attr_model_desc,
+ &dev_attr_pci_info,
+ &dev_attr_link_state,
+ &dev_attr_zio,
+ &dev_attr_zio_timer,
+ &dev_attr_beacon,
+ &dev_attr_optrom_bios_version,
+ &dev_attr_optrom_efi_version,
+ &dev_attr_optrom_fcode_version,
+ &dev_attr_optrom_fw_version,
+ &dev_attr_84xx_fw_version,
+ &dev_attr_total_isp_aborts,
+ &dev_attr_mpi_version,
+ &dev_attr_phy_version,
+ &dev_attr_flash_block_size,
+ &dev_attr_vlan_id,
+ &dev_attr_vn_port_mac_address,
+ &dev_attr_fabric_param,
+ &dev_attr_fw_state,
+ &dev_attr_optrom_gold_fw_version,
+ &dev_attr_thermal_temp,
+ &dev_attr_diag_requests,
+ &dev_attr_diag_megabytes,
+ &dev_attr_fw_dump_size,
+ &dev_attr_allow_cna_fw_dump,
+ NULL,
+};
+
+/* Host attributes. */
+
+static void
+qla2x00_get_host_port_id(struct Scsi_Host *shost)
+{
+ scsi_qla_host_t *vha = shost_priv(shost);
+
+ fc_host_port_id(shost) = vha->d_id.b.domain << 16 |
+ vha->d_id.b.area << 8 | vha->d_id.b.al_pa;
+}
+
+static void
+qla2x00_get_host_speed(struct Scsi_Host *shost)
+{
+ struct qla_hw_data *ha = ((struct scsi_qla_host *)
+ (shost_priv(shost)))->hw;
+ u32 speed = FC_PORTSPEED_UNKNOWN;
+
+ if (IS_QLAFX00(ha)) {
+ qlafx00_get_host_speed(shost);
+ return;
+ }
+
+ switch (ha->link_data_rate) {
+ case PORT_SPEED_1GB:
+ speed = FC_PORTSPEED_1GBIT;
+ break;
+ case PORT_SPEED_2GB:
+ speed = FC_PORTSPEED_2GBIT;
+ break;
+ case PORT_SPEED_4GB:
+ speed = FC_PORTSPEED_4GBIT;
+ break;
+ case PORT_SPEED_8GB:
+ speed = FC_PORTSPEED_8GBIT;
+ break;
+ case PORT_SPEED_10GB:
+ speed = FC_PORTSPEED_10GBIT;
+ break;
+ case PORT_SPEED_16GB:
+ speed = FC_PORTSPEED_16GBIT;
+ break;
+ case PORT_SPEED_32GB:
+ speed = FC_PORTSPEED_32GBIT;
+ break;
+ }
+ fc_host_speed(shost) = speed;
+}
+
+static void
+qla2x00_get_host_port_type(struct Scsi_Host *shost)
+{
+ scsi_qla_host_t *vha = shost_priv(shost);
+ uint32_t port_type = FC_PORTTYPE_UNKNOWN;
+
+ if (vha->vp_idx) {
+ fc_host_port_type(shost) = FC_PORTTYPE_NPIV;
+ return;
+ }
+ switch (vha->hw->current_topology) {
+ case ISP_CFG_NL:
+ port_type = FC_PORTTYPE_LPORT;
+ break;
+ case ISP_CFG_FL:
+ port_type = FC_PORTTYPE_NLPORT;
+ break;
+ case ISP_CFG_N:
+ port_type = FC_PORTTYPE_PTP;
+ break;
+ case ISP_CFG_F:
+ port_type = FC_PORTTYPE_NPORT;
+ break;
+ }
+ fc_host_port_type(shost) = port_type;
+}
+
+static void
+qla2x00_get_starget_node_name(struct scsi_target *starget)
+{
+ struct Scsi_Host *host = dev_to_shost(starget->dev.parent);
+ scsi_qla_host_t *vha = shost_priv(host);
+ fc_port_t *fcport;
+ u64 node_name = 0;
+
+ list_for_each_entry(fcport, &vha->vp_fcports, list) {
+ if (fcport->rport &&
+ starget->id == fcport->rport->scsi_target_id) {
+ node_name = wwn_to_u64(fcport->node_name);
+ break;
+ }
+ }
+
+ fc_starget_node_name(starget) = node_name;
+}
+
+static void
+qla2x00_get_starget_port_name(struct scsi_target *starget)
+{
+ struct Scsi_Host *host = dev_to_shost(starget->dev.parent);
+ scsi_qla_host_t *vha = shost_priv(host);
+ fc_port_t *fcport;
+ u64 port_name = 0;
+
+ list_for_each_entry(fcport, &vha->vp_fcports, list) {
+ if (fcport->rport &&
+ starget->id == fcport->rport->scsi_target_id) {
+ port_name = wwn_to_u64(fcport->port_name);
+ break;
+ }
+ }
+
+ fc_starget_port_name(starget) = port_name;
+}
+
+static void
+qla2x00_get_starget_port_id(struct scsi_target *starget)
+{
+ struct Scsi_Host *host = dev_to_shost(starget->dev.parent);
+ scsi_qla_host_t *vha = shost_priv(host);
+ fc_port_t *fcport;
+ uint32_t port_id = ~0U;
+
+ list_for_each_entry(fcport, &vha->vp_fcports, list) {
+ if (fcport->rport &&
+ starget->id == fcport->rport->scsi_target_id) {
+ port_id = fcport->d_id.b.domain << 16 |
+ fcport->d_id.b.area << 8 | fcport->d_id.b.al_pa;
+ break;
+ }
+ }
+
+ fc_starget_port_id(starget) = port_id;
+}
+
+static void
+qla2x00_set_rport_loss_tmo(struct fc_rport *rport, uint32_t timeout)
+{
+ if (timeout)
+ rport->dev_loss_tmo = timeout;
+ else
+ rport->dev_loss_tmo = 1;
+}
+
+static void
+qla2x00_dev_loss_tmo_callbk(struct fc_rport *rport)
+{
+ struct Scsi_Host *host = rport_to_shost(rport);
+ fc_port_t *fcport = *(fc_port_t **)rport->dd_data;
+ unsigned long flags;
+
+ if (!fcport)
+ return;
+
+ /* Now that the rport has been deleted, set the fcport state to
+ FCS_DEVICE_DEAD */
+ qla2x00_set_fcport_state(fcport, FCS_DEVICE_DEAD);
+
+ /*
+ * Transport has effectively 'deleted' the rport, clear
+ * all local references.
+ */
+ spin_lock_irqsave(host->host_lock, flags);
+ fcport->rport = fcport->drport = NULL;
+ *((fc_port_t **)rport->dd_data) = NULL;
+ spin_unlock_irqrestore(host->host_lock, flags);
+
+ if (test_bit(ABORT_ISP_ACTIVE, &fcport->vha->dpc_flags))
+ return;
+
+ if (unlikely(pci_channel_offline(fcport->vha->hw->pdev))) {
+ qla2x00_abort_all_cmds(fcport->vha, DID_NO_CONNECT << 16);
+ return;
+ }
+}
+
+static void
+qla2x00_terminate_rport_io(struct fc_rport *rport)
+{
+ fc_port_t *fcport = *(fc_port_t **)rport->dd_data;
+
+ if (!fcport)
+ return;
+
+ if (test_bit(ABORT_ISP_ACTIVE, &fcport->vha->dpc_flags))
+ return;
+
+ if (unlikely(pci_channel_offline(fcport->vha->hw->pdev))) {
+ qla2x00_abort_all_cmds(fcport->vha, DID_NO_CONNECT << 16);
+ return;
+ }
+ /*
+ * At this point all fcport's software-states are cleared. Perform any
+ * final cleanup of firmware resources (PCBs and XCBs).
+ */
+ if (fcport->loop_id != FC_NO_LOOP_ID) {
+ if (IS_FWI2_CAPABLE(fcport->vha->hw))
+ fcport->vha->hw->isp_ops->fabric_logout(fcport->vha,
+ fcport->loop_id, fcport->d_id.b.domain,
+ fcport->d_id.b.area, fcport->d_id.b.al_pa);
+ else
+ qla2x00_port_logout(fcport->vha, fcport);
+ }
+}
+
+static int
+qla2x00_issue_lip(struct Scsi_Host *shost)
+{
+ scsi_qla_host_t *vha = shost_priv(shost);
+
+ if (IS_QLAFX00(vha->hw))
+ return 0;
+
+ qla2x00_loop_reset(vha);
+ return 0;
+}
+
+static struct fc_host_statistics *
+qla2x00_get_fc_host_stats(struct Scsi_Host *shost)
+{
+ scsi_qla_host_t *vha = shost_priv(shost);
+ struct qla_hw_data *ha = vha->hw;
+ struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev);
+ int rval;
+ struct link_statistics *stats;
+ dma_addr_t stats_dma;
+ struct fc_host_statistics *pfc_host_stat;
+
+ pfc_host_stat = &vha->fc_host_stat;
+ memset(pfc_host_stat, -1, sizeof(struct fc_host_statistics));
+
+ if (IS_QLAFX00(vha->hw))
+ goto done;
+
+ if (test_bit(UNLOADING, &vha->dpc_flags))
+ goto done;
+
+ if (unlikely(pci_channel_offline(ha->pdev)))
+ goto done;
+
+ if (qla2x00_reset_active(vha))
+ goto done;
+
+ stats = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &stats_dma);
+ if (stats == NULL) {
+ ql_log(ql_log_warn, vha, 0x707d,
+ "Failed to allocate memory for stats.\n");
+ goto done;
+ }
+ memset(stats, 0, DMA_POOL_SIZE);
+
+ rval = QLA_FUNCTION_FAILED;
+ if (IS_FWI2_CAPABLE(ha)) {
+ rval = qla24xx_get_isp_stats(base_vha, stats, stats_dma);
+ } else if (atomic_read(&base_vha->loop_state) == LOOP_READY &&
+ !ha->dpc_active) {
+ /* Must be in a 'READY' state for statistics retrieval. */
+ rval = qla2x00_get_link_status(base_vha, base_vha->loop_id,
+ stats, stats_dma);
+ }
+
+ if (rval != QLA_SUCCESS)
+ goto done_free;
+
+ pfc_host_stat->link_failure_count = stats->link_fail_cnt;
+ pfc_host_stat->loss_of_sync_count = stats->loss_sync_cnt;
+ pfc_host_stat->loss_of_signal_count = stats->loss_sig_cnt;
+ pfc_host_stat->prim_seq_protocol_err_count = stats->prim_seq_err_cnt;
+ pfc_host_stat->invalid_tx_word_count = stats->inval_xmit_word_cnt;
+ pfc_host_stat->invalid_crc_count = stats->inval_crc_cnt;
+ if (IS_FWI2_CAPABLE(ha)) {
+ pfc_host_stat->lip_count = stats->lip_cnt;
+ pfc_host_stat->tx_frames = stats->tx_frames;
+ pfc_host_stat->rx_frames = stats->rx_frames;
+ pfc_host_stat->dumped_frames = stats->discarded_frames;
+ pfc_host_stat->nos_count = stats->nos_rcvd;
+ pfc_host_stat->error_frames =
+ stats->dropped_frames + stats->discarded_frames;
+ pfc_host_stat->rx_words = vha->qla_stats.input_bytes;
+ pfc_host_stat->tx_words = vha->qla_stats.output_bytes;
+ }
+ pfc_host_stat->fcp_control_requests = vha->qla_stats.control_requests;
+ pfc_host_stat->fcp_input_requests = vha->qla_stats.input_requests;
+ pfc_host_stat->fcp_output_requests = vha->qla_stats.output_requests;
+ pfc_host_stat->fcp_input_megabytes = vha->qla_stats.input_bytes >> 20;
+ pfc_host_stat->fcp_output_megabytes = vha->qla_stats.output_bytes >> 20;
+ pfc_host_stat->seconds_since_last_reset =
+ get_jiffies_64() - vha->qla_stats.jiffies_at_last_reset;
+ do_div(pfc_host_stat->seconds_since_last_reset, HZ);
+
+done_free:
+ dma_pool_free(ha->s_dma_pool, stats, stats_dma);
+done:
+ return pfc_host_stat;
+}
+
+static void
+qla2x00_reset_host_stats(struct Scsi_Host *shost)
+{
+ scsi_qla_host_t *vha = shost_priv(shost);
+
+ memset(&vha->fc_host_stat, 0, sizeof(vha->fc_host_stat));
+
+ vha->qla_stats.jiffies_at_last_reset = get_jiffies_64();
+}
+
+static void
+qla2x00_get_host_symbolic_name(struct Scsi_Host *shost)
+{
+ scsi_qla_host_t *vha = shost_priv(shost);
+
+ qla2x00_get_sym_node_name(vha, fc_host_symbolic_name(shost),
+ sizeof(fc_host_symbolic_name(shost)));
+}
+
+static void
+qla2x00_set_host_system_hostname(struct Scsi_Host *shost)
+{
+ scsi_qla_host_t *vha = shost_priv(shost);
+
+ set_bit(REGISTER_FDMI_NEEDED, &vha->dpc_flags);
+}
+
+static void
+qla2x00_get_host_fabric_name(struct Scsi_Host *shost)
+{
+ scsi_qla_host_t *vha = shost_priv(shost);
+ uint8_t node_name[WWN_SIZE] = { 0xFF, 0xFF, 0xFF, 0xFF, \
+ 0xFF, 0xFF, 0xFF, 0xFF};
+ u64 fabric_name = wwn_to_u64(node_name);
+
+ if (vha->device_flags & SWITCH_FOUND)
+ fabric_name = wwn_to_u64(vha->fabric_node_name);
+
+ fc_host_fabric_name(shost) = fabric_name;
+}
+
+static void
+qla2x00_get_host_port_state(struct Scsi_Host *shost)
+{
+ scsi_qla_host_t *vha = shost_priv(shost);
+ struct scsi_qla_host *base_vha = pci_get_drvdata(vha->hw->pdev);
+
+ if (!base_vha->flags.online) {
+ fc_host_port_state(shost) = FC_PORTSTATE_OFFLINE;
+ return;
+ }
+
+ switch (atomic_read(&base_vha->loop_state)) {
+ case LOOP_UPDATE:
+ fc_host_port_state(shost) = FC_PORTSTATE_DIAGNOSTICS;
+ break;
+ case LOOP_DOWN:
+ if (test_bit(LOOP_RESYNC_NEEDED, &base_vha->dpc_flags))
+ fc_host_port_state(shost) = FC_PORTSTATE_DIAGNOSTICS;
+ else
+ fc_host_port_state(shost) = FC_PORTSTATE_LINKDOWN;
+ break;
+ case LOOP_DEAD:
+ fc_host_port_state(shost) = FC_PORTSTATE_LINKDOWN;
+ break;
+ case LOOP_READY:
+ fc_host_port_state(shost) = FC_PORTSTATE_ONLINE;
+ break;
+ default:
+ fc_host_port_state(shost) = FC_PORTSTATE_UNKNOWN;
+ break;
+ }
+}
+
+static int
+qla24xx_vport_create(struct fc_vport *fc_vport, bool disable)
+{
+ int ret = 0;
+ uint8_t qos = 0;
+ scsi_qla_host_t *base_vha = shost_priv(fc_vport->shost);
+ scsi_qla_host_t *vha = NULL;
+ struct qla_hw_data *ha = base_vha->hw;
+ uint16_t options = 0;
+ int cnt;
+ struct req_que *req = ha->req_q_map[0];
+
+ ret = qla24xx_vport_create_req_sanity_check(fc_vport);
+ if (ret) {
+ ql_log(ql_log_warn, vha, 0x707e,
+ "Vport sanity check failed, status %x\n", ret);
+ return (ret);
+ }
+
+ vha = qla24xx_create_vhost(fc_vport);
+ if (vha == NULL) {
+ ql_log(ql_log_warn, vha, 0x707f, "Vport create host failed.\n");
+ return FC_VPORT_FAILED;
+ }
+ if (disable) {
+ atomic_set(&vha->vp_state, VP_OFFLINE);
+ fc_vport_set_state(fc_vport, FC_VPORT_DISABLED);
+ } else
+ atomic_set(&vha->vp_state, VP_FAILED);
+
+ /* ready to create vport */
+ ql_log(ql_log_info, vha, 0x7080,
+ "VP entry id %d assigned.\n", vha->vp_idx);
+
+ /* initialized vport states */
+ atomic_set(&vha->loop_state, LOOP_DOWN);
+ vha->vp_err_state= VP_ERR_PORTDWN;
+ vha->vp_prev_err_state= VP_ERR_UNKWN;
+ /* Check if physical ha port is Up */
+ if (atomic_read(&base_vha->loop_state) == LOOP_DOWN ||
+ atomic_read(&base_vha->loop_state) == LOOP_DEAD) {
+ /* Don't retry or attempt login of this virtual port */
+ ql_dbg(ql_dbg_user, vha, 0x7081,
+ "Vport loop state is not UP.\n");
+ atomic_set(&vha->loop_state, LOOP_DEAD);
+ if (!disable)
+ fc_vport_set_state(fc_vport, FC_VPORT_LINKDOWN);
+ }
+
+ if (IS_T10_PI_CAPABLE(ha) && ql2xenabledif) {
+ if (ha->fw_attributes & BIT_4) {
+ int prot = 0, guard;
+ vha->flags.difdix_supported = 1;
+ ql_dbg(ql_dbg_user, vha, 0x7082,
+ "Registered for DIF/DIX type 1 and 3 protection.\n");
+ if (ql2xenabledif == 1)
+ prot = SHOST_DIX_TYPE0_PROTECTION;
+ scsi_host_set_prot(vha->host,
+ prot | SHOST_DIF_TYPE1_PROTECTION
+ | SHOST_DIF_TYPE2_PROTECTION
+ | SHOST_DIF_TYPE3_PROTECTION
+ | SHOST_DIX_TYPE1_PROTECTION
+ | SHOST_DIX_TYPE2_PROTECTION
+ | SHOST_DIX_TYPE3_PROTECTION);
+
+ guard = SHOST_DIX_GUARD_CRC;
+
+ if (IS_PI_IPGUARD_CAPABLE(ha) &&
+ (ql2xenabledif > 1 || IS_PI_DIFB_DIX0_CAPABLE(ha)))
+ guard |= SHOST_DIX_GUARD_IP;
+
+ scsi_host_set_guard(vha->host, guard);
+ } else
+ vha->flags.difdix_supported = 0;
+ }
+
+ if (scsi_add_host_with_dma(vha->host, &fc_vport->dev,
+ &ha->pdev->dev)) {
+ ql_dbg(ql_dbg_user, vha, 0x7083,
+ "scsi_add_host failure for VP[%d].\n", vha->vp_idx);
+ goto vport_create_failed_2;
+ }
+
+ /* initialize attributes */
+ fc_host_dev_loss_tmo(vha->host) = ha->port_down_retry_count;
+ fc_host_node_name(vha->host) = wwn_to_u64(vha->node_name);
+ fc_host_port_name(vha->host) = wwn_to_u64(vha->port_name);
+ fc_host_supported_classes(vha->host) =
+ fc_host_supported_classes(base_vha->host);
+ fc_host_supported_speeds(vha->host) =
+ fc_host_supported_speeds(base_vha->host);
+
+ qlt_vport_create(vha, ha);
+ qla24xx_vport_disable(fc_vport, disable);
+
+ if (ha->flags.cpu_affinity_enabled) {
+ req = ha->req_q_map[1];
+ ql_dbg(ql_dbg_multiq, vha, 0xc000,
+ "Request queue %p attached with "
+ "VP[%d], cpu affinity =%d\n",
+ req, vha->vp_idx, ha->flags.cpu_affinity_enabled);
+ goto vport_queue;
+ } else if (ql2xmaxqueues == 1 || !ha->npiv_info)
+ goto vport_queue;
+ /* Create a request queue in QoS mode for the vport */
+ for (cnt = 0; cnt < ha->nvram_npiv_size; cnt++) {
+ if (memcmp(ha->npiv_info[cnt].port_name, vha->port_name, 8) == 0
+ && memcmp(ha->npiv_info[cnt].node_name, vha->node_name,
+ 8) == 0) {
+ qos = ha->npiv_info[cnt].q_qos;
+ break;
+ }
+ }
+
+ if (qos) {
+ ret = qla25xx_create_req_que(ha, options, vha->vp_idx, 0, 0,
+ qos);
+ if (!ret)
+ ql_log(ql_log_warn, vha, 0x7084,
+ "Can't create request queue for VP[%d]\n",
+ vha->vp_idx);
+ else {
+ ql_dbg(ql_dbg_multiq, vha, 0xc001,
+ "Request Que:%d Q0s: %d) created for VP[%d]\n",
+ ret, qos, vha->vp_idx);
+ ql_dbg(ql_dbg_user, vha, 0x7085,
+ "Request Que:%d Q0s: %d) created for VP[%d]\n",
+ ret, qos, vha->vp_idx);
+ req = ha->req_q_map[ret];
+ }
+ }
+
+vport_queue:
+ vha->req = req;
+ return 0;
+
+vport_create_failed_2:
+ qla24xx_disable_vp(vha);
+ qla24xx_deallocate_vp_id(vha);
+ scsi_host_put(vha->host);
+ return FC_VPORT_FAILED;
+}
+
+static int
+qla24xx_vport_delete(struct fc_vport *fc_vport)
+{
+ scsi_qla_host_t *vha = fc_vport->dd_data;
+ struct qla_hw_data *ha = vha->hw;
+ uint16_t id = vha->vp_idx;
+
+ while (test_bit(LOOP_RESYNC_ACTIVE, &vha->dpc_flags) ||
+ test_bit(FCPORT_UPDATE_NEEDED, &vha->dpc_flags))
+ msleep(1000);
+
+ qla24xx_disable_vp(vha);
+
+ vha->flags.delete_progress = 1;
+
+ qlt_remove_target(ha, vha);
+
+ fc_remove_host(vha->host);
+
+ scsi_remove_host(vha->host);
+
+ /* Allow timer to run to drain queued items, when removing vp */
+ qla24xx_deallocate_vp_id(vha);
+
+ if (vha->timer_active) {
+ qla2x00_vp_stop_timer(vha);
+ ql_dbg(ql_dbg_user, vha, 0x7086,
+ "Timer for the VP[%d] has stopped\n", vha->vp_idx);
+ }
+
+ BUG_ON(atomic_read(&vha->vref_count));
+
+ qla2x00_free_fcports(vha);
+
+ mutex_lock(&ha->vport_lock);
+ ha->cur_vport_count--;
+ clear_bit(vha->vp_idx, ha->vp_idx_map);
+ mutex_unlock(&ha->vport_lock);
+
+ if (vha->req->id && !ha->flags.cpu_affinity_enabled) {
+ if (qla25xx_delete_req_que(vha, vha->req) != QLA_SUCCESS)
+ ql_log(ql_log_warn, vha, 0x7087,
+ "Queue delete failed.\n");
+ }
+
+ ql_log(ql_log_info, vha, 0x7088, "VP[%d] deleted.\n", id);
+ scsi_host_put(vha->host);
+ return 0;
+}
+
+static int
+qla24xx_vport_disable(struct fc_vport *fc_vport, bool disable)
+{
+ scsi_qla_host_t *vha = fc_vport->dd_data;
+
+ if (disable)
+ qla24xx_disable_vp(vha);
+ else
+ qla24xx_enable_vp(vha);
+
+ return 0;
+}
+
+struct fc_function_template qla2xxx_transport_functions = {
+
+ .show_host_node_name = 1,
+ .show_host_port_name = 1,
+ .show_host_supported_classes = 1,
+ .show_host_supported_speeds = 1,
+
+ .get_host_port_id = qla2x00_get_host_port_id,
+ .show_host_port_id = 1,
+ .get_host_speed = qla2x00_get_host_speed,
+ .show_host_speed = 1,
+ .get_host_port_type = qla2x00_get_host_port_type,
+ .show_host_port_type = 1,
+ .get_host_symbolic_name = qla2x00_get_host_symbolic_name,
+ .show_host_symbolic_name = 1,
+ .set_host_system_hostname = qla2x00_set_host_system_hostname,
+ .show_host_system_hostname = 1,
+ .get_host_fabric_name = qla2x00_get_host_fabric_name,
+ .show_host_fabric_name = 1,
+ .get_host_port_state = qla2x00_get_host_port_state,
+ .show_host_port_state = 1,
+
+ .dd_fcrport_size = sizeof(struct fc_port *),
+ .show_rport_supported_classes = 1,
+
+ .get_starget_node_name = qla2x00_get_starget_node_name,
+ .show_starget_node_name = 1,
+ .get_starget_port_name = qla2x00_get_starget_port_name,
+ .show_starget_port_name = 1,
+ .get_starget_port_id = qla2x00_get_starget_port_id,
+ .show_starget_port_id = 1,
+
+ .set_rport_dev_loss_tmo = qla2x00_set_rport_loss_tmo,
+ .show_rport_dev_loss_tmo = 1,
+
+ .issue_fc_host_lip = qla2x00_issue_lip,
+ .dev_loss_tmo_callbk = qla2x00_dev_loss_tmo_callbk,
+ .terminate_rport_io = qla2x00_terminate_rport_io,
+ .get_fc_host_stats = qla2x00_get_fc_host_stats,
+ .reset_fc_host_stats = qla2x00_reset_host_stats,
+
+ .vport_create = qla24xx_vport_create,
+ .vport_disable = qla24xx_vport_disable,
+ .vport_delete = qla24xx_vport_delete,
+ .bsg_request = qla24xx_bsg_request,
+ .bsg_timeout = qla24xx_bsg_timeout,
+};
+
+struct fc_function_template qla2xxx_transport_vport_functions = {
+
+ .show_host_node_name = 1,
+ .show_host_port_name = 1,
+ .show_host_supported_classes = 1,
+
+ .get_host_port_id = qla2x00_get_host_port_id,
+ .show_host_port_id = 1,
+ .get_host_speed = qla2x00_get_host_speed,
+ .show_host_speed = 1,
+ .get_host_port_type = qla2x00_get_host_port_type,
+ .show_host_port_type = 1,
+ .get_host_symbolic_name = qla2x00_get_host_symbolic_name,
+ .show_host_symbolic_name = 1,
+ .set_host_system_hostname = qla2x00_set_host_system_hostname,
+ .show_host_system_hostname = 1,
+ .get_host_fabric_name = qla2x00_get_host_fabric_name,
+ .show_host_fabric_name = 1,
+ .get_host_port_state = qla2x00_get_host_port_state,
+ .show_host_port_state = 1,
+
+ .dd_fcrport_size = sizeof(struct fc_port *),
+ .show_rport_supported_classes = 1,
+
+ .get_starget_node_name = qla2x00_get_starget_node_name,
+ .show_starget_node_name = 1,
+ .get_starget_port_name = qla2x00_get_starget_port_name,
+ .show_starget_port_name = 1,
+ .get_starget_port_id = qla2x00_get_starget_port_id,
+ .show_starget_port_id = 1,
+
+ .set_rport_dev_loss_tmo = qla2x00_set_rport_loss_tmo,
+ .show_rport_dev_loss_tmo = 1,
+
+ .issue_fc_host_lip = qla2x00_issue_lip,
+ .dev_loss_tmo_callbk = qla2x00_dev_loss_tmo_callbk,
+ .terminate_rport_io = qla2x00_terminate_rport_io,
+ .get_fc_host_stats = qla2x00_get_fc_host_stats,
+ .reset_fc_host_stats = qla2x00_reset_host_stats,
+
+ .bsg_request = qla24xx_bsg_request,
+ .bsg_timeout = qla24xx_bsg_timeout,
+};
+
+void
+qla2x00_init_host_attr(scsi_qla_host_t *vha)
+{
+ struct qla_hw_data *ha = vha->hw;
+ u32 speed = FC_PORTSPEED_UNKNOWN;
+
+ fc_host_dev_loss_tmo(vha->host) = ha->port_down_retry_count;
+ fc_host_node_name(vha->host) = wwn_to_u64(vha->node_name);
+ fc_host_port_name(vha->host) = wwn_to_u64(vha->port_name);
+ fc_host_supported_classes(vha->host) = ha->tgt.enable_class_2 ?
+ (FC_COS_CLASS2|FC_COS_CLASS3) : FC_COS_CLASS3;
+ fc_host_max_npiv_vports(vha->host) = ha->max_npiv_vports;
+ fc_host_npiv_vports_inuse(vha->host) = ha->cur_vport_count;
+
+ if (IS_CNA_CAPABLE(ha))
+ speed = FC_PORTSPEED_10GBIT;
+ else if (IS_QLA2031(ha))
+ speed = FC_PORTSPEED_16GBIT | FC_PORTSPEED_8GBIT |
+ FC_PORTSPEED_4GBIT;
+ else if (IS_QLA25XX(ha))
+ speed = FC_PORTSPEED_8GBIT | FC_PORTSPEED_4GBIT |
+ FC_PORTSPEED_2GBIT | FC_PORTSPEED_1GBIT;
+ else if (IS_QLA24XX_TYPE(ha))
+ speed = FC_PORTSPEED_4GBIT | FC_PORTSPEED_2GBIT |
+ FC_PORTSPEED_1GBIT;
+ else if (IS_QLA23XX(ha))
+ speed = FC_PORTSPEED_2GBIT | FC_PORTSPEED_1GBIT;
+ else if (IS_QLAFX00(ha))
+ speed = FC_PORTSPEED_8GBIT | FC_PORTSPEED_4GBIT |
+ FC_PORTSPEED_2GBIT | FC_PORTSPEED_1GBIT;
+ else if (IS_QLA27XX(ha))
+ speed = FC_PORTSPEED_32GBIT | FC_PORTSPEED_16GBIT |
+ FC_PORTSPEED_8GBIT;
+ else
+ speed = FC_PORTSPEED_1GBIT;
+ fc_host_supported_speeds(vha->host) = speed;
+}
diff --git a/drivers/scsi/qla2xxx/qla_bsg.c b/drivers/scsi/qla2xxx/qla_bsg.c
new file mode 100644
index 000000000..2e2bb6f45
--- /dev/null
+++ b/drivers/scsi/qla2xxx/qla_bsg.c
@@ -0,0 +1,2279 @@
+/*
+ * QLogic Fibre Channel HBA Driver
+ * Copyright (c) 2003-2014 QLogic Corporation
+ *
+ * See LICENSE.qla2xxx for copyright and licensing details.
+ */
+#include "qla_def.h"
+
+#include <linux/kthread.h>
+#include <linux/vmalloc.h>
+#include <linux/delay.h>
+
+/* BSG support for ELS/CT pass through */
+void
+qla2x00_bsg_job_done(void *data, void *ptr, int res)
+{
+ srb_t *sp = (srb_t *)ptr;
+ struct scsi_qla_host *vha = (scsi_qla_host_t *)data;
+ struct fc_bsg_job *bsg_job = sp->u.bsg_job;
+
+ bsg_job->reply->result = res;
+ bsg_job->job_done(bsg_job);
+ sp->free(vha, sp);
+}
+
+void
+qla2x00_bsg_sp_free(void *data, void *ptr)
+{
+ srb_t *sp = (srb_t *)ptr;
+ struct scsi_qla_host *vha = sp->fcport->vha;
+ struct fc_bsg_job *bsg_job = sp->u.bsg_job;
+ struct qla_hw_data *ha = vha->hw;
+ struct qla_mt_iocb_rqst_fx00 *piocb_rqst;
+
+ if (sp->type == SRB_FXIOCB_BCMD) {
+ piocb_rqst = (struct qla_mt_iocb_rqst_fx00 *)
+ &bsg_job->request->rqst_data.h_vendor.vendor_cmd[1];
+
+ if (piocb_rqst->flags & SRB_FXDISC_REQ_DMA_VALID)
+ dma_unmap_sg(&ha->pdev->dev,
+ bsg_job->request_payload.sg_list,
+ bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE);
+
+ if (piocb_rqst->flags & SRB_FXDISC_RESP_DMA_VALID)
+ dma_unmap_sg(&ha->pdev->dev,
+ bsg_job->reply_payload.sg_list,
+ bsg_job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
+ } else {
+ dma_unmap_sg(&ha->pdev->dev, bsg_job->request_payload.sg_list,
+ bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE);
+
+ dma_unmap_sg(&ha->pdev->dev, bsg_job->reply_payload.sg_list,
+ bsg_job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
+ }
+
+ if (sp->type == SRB_CT_CMD ||
+ sp->type == SRB_FXIOCB_BCMD ||
+ sp->type == SRB_ELS_CMD_HST)
+ kfree(sp->fcport);
+ qla2x00_rel_sp(vha, sp);
+}
+
+int
+qla24xx_fcp_prio_cfg_valid(scsi_qla_host_t *vha,
+ struct qla_fcp_prio_cfg *pri_cfg, uint8_t flag)
+{
+ int i, ret, num_valid;
+ uint8_t *bcode;
+ struct qla_fcp_prio_entry *pri_entry;
+ uint32_t *bcode_val_ptr, bcode_val;
+
+ ret = 1;
+ num_valid = 0;
+ bcode = (uint8_t *)pri_cfg;
+ bcode_val_ptr = (uint32_t *)pri_cfg;
+ bcode_val = (uint32_t)(*bcode_val_ptr);
+
+ if (bcode_val == 0xFFFFFFFF) {
+ /* No FCP Priority config data in flash */
+ ql_dbg(ql_dbg_user, vha, 0x7051,
+ "No FCP Priority config data.\n");
+ return 0;
+ }
+
+ if (bcode[0] != 'H' || bcode[1] != 'Q' || bcode[2] != 'O' ||
+ bcode[3] != 'S') {
+ /* Invalid FCP priority data header*/
+ ql_dbg(ql_dbg_user, vha, 0x7052,
+ "Invalid FCP Priority data header. bcode=0x%x.\n",
+ bcode_val);
+ return 0;
+ }
+ if (flag != 1)
+ return ret;
+
+ pri_entry = &pri_cfg->entry[0];
+ for (i = 0; i < pri_cfg->num_entries; i++) {
+ if (pri_entry->flags & FCP_PRIO_ENTRY_TAG_VALID)
+ num_valid++;
+ pri_entry++;
+ }
+
+ if (num_valid == 0) {
+ /* No valid FCP priority data entries */
+ ql_dbg(ql_dbg_user, vha, 0x7053,
+ "No valid FCP Priority data entries.\n");
+ ret = 0;
+ } else {
+ /* FCP priority data is valid */
+ ql_dbg(ql_dbg_user, vha, 0x7054,
+ "Valid FCP priority data. num entries = %d.\n",
+ num_valid);
+ }
+
+ return ret;
+}
+
+static int
+qla24xx_proc_fcp_prio_cfg_cmd(struct fc_bsg_job *bsg_job)
+{
+ struct Scsi_Host *host = bsg_job->shost;
+ scsi_qla_host_t *vha = shost_priv(host);
+ struct qla_hw_data *ha = vha->hw;
+ int ret = 0;
+ uint32_t len;
+ uint32_t oper;
+
+ if (!(IS_QLA24XX_TYPE(ha) || IS_QLA25XX(ha) || IS_P3P_TYPE(ha))) {
+ ret = -EINVAL;
+ goto exit_fcp_prio_cfg;
+ }
+
+ /* Get the sub command */
+ oper = bsg_job->request->rqst_data.h_vendor.vendor_cmd[1];
+
+ /* Only set config is allowed if config memory is not allocated */
+ if (!ha->fcp_prio_cfg && (oper != QLFC_FCP_PRIO_SET_CONFIG)) {
+ ret = -EINVAL;
+ goto exit_fcp_prio_cfg;
+ }
+ switch (oper) {
+ case QLFC_FCP_PRIO_DISABLE:
+ if (ha->flags.fcp_prio_enabled) {
+ ha->flags.fcp_prio_enabled = 0;
+ ha->fcp_prio_cfg->attributes &=
+ ~FCP_PRIO_ATTR_ENABLE;
+ qla24xx_update_all_fcp_prio(vha);
+ bsg_job->reply->result = DID_OK;
+ } else {
+ ret = -EINVAL;
+ bsg_job->reply->result = (DID_ERROR << 16);
+ goto exit_fcp_prio_cfg;
+ }
+ break;
+
+ case QLFC_FCP_PRIO_ENABLE:
+ if (!ha->flags.fcp_prio_enabled) {
+ if (ha->fcp_prio_cfg) {
+ ha->flags.fcp_prio_enabled = 1;
+ ha->fcp_prio_cfg->attributes |=
+ FCP_PRIO_ATTR_ENABLE;
+ qla24xx_update_all_fcp_prio(vha);
+ bsg_job->reply->result = DID_OK;
+ } else {
+ ret = -EINVAL;
+ bsg_job->reply->result = (DID_ERROR << 16);
+ goto exit_fcp_prio_cfg;
+ }
+ }
+ break;
+
+ case QLFC_FCP_PRIO_GET_CONFIG:
+ len = bsg_job->reply_payload.payload_len;
+ if (!len || len > FCP_PRIO_CFG_SIZE) {
+ ret = -EINVAL;
+ bsg_job->reply->result = (DID_ERROR << 16);
+ goto exit_fcp_prio_cfg;
+ }
+
+ bsg_job->reply->result = DID_OK;
+ bsg_job->reply->reply_payload_rcv_len =
+ sg_copy_from_buffer(
+ bsg_job->reply_payload.sg_list,
+ bsg_job->reply_payload.sg_cnt, ha->fcp_prio_cfg,
+ len);
+
+ break;
+
+ case QLFC_FCP_PRIO_SET_CONFIG:
+ len = bsg_job->request_payload.payload_len;
+ if (!len || len > FCP_PRIO_CFG_SIZE) {
+ bsg_job->reply->result = (DID_ERROR << 16);
+ ret = -EINVAL;
+ goto exit_fcp_prio_cfg;
+ }
+
+ if (!ha->fcp_prio_cfg) {
+ ha->fcp_prio_cfg = vmalloc(FCP_PRIO_CFG_SIZE);
+ if (!ha->fcp_prio_cfg) {
+ ql_log(ql_log_warn, vha, 0x7050,
+ "Unable to allocate memory for fcp prio "
+ "config data (%x).\n", FCP_PRIO_CFG_SIZE);
+ bsg_job->reply->result = (DID_ERROR << 16);
+ ret = -ENOMEM;
+ goto exit_fcp_prio_cfg;
+ }
+ }
+
+ memset(ha->fcp_prio_cfg, 0, FCP_PRIO_CFG_SIZE);
+ sg_copy_to_buffer(bsg_job->request_payload.sg_list,
+ bsg_job->request_payload.sg_cnt, ha->fcp_prio_cfg,
+ FCP_PRIO_CFG_SIZE);
+
+ /* validate fcp priority data */
+
+ if (!qla24xx_fcp_prio_cfg_valid(vha,
+ (struct qla_fcp_prio_cfg *) ha->fcp_prio_cfg, 1)) {
+ bsg_job->reply->result = (DID_ERROR << 16);
+ ret = -EINVAL;
+ /* If buffer was invalidatic int
+ * fcp_prio_cfg is of no use
+ */
+ vfree(ha->fcp_prio_cfg);
+ ha->fcp_prio_cfg = NULL;
+ goto exit_fcp_prio_cfg;
+ }
+
+ ha->flags.fcp_prio_enabled = 0;
+ if (ha->fcp_prio_cfg->attributes & FCP_PRIO_ATTR_ENABLE)
+ ha->flags.fcp_prio_enabled = 1;
+ qla24xx_update_all_fcp_prio(vha);
+ bsg_job->reply->result = DID_OK;
+ break;
+ default:
+ ret = -EINVAL;
+ break;
+ }
+exit_fcp_prio_cfg:
+ if (!ret)
+ bsg_job->job_done(bsg_job);
+ return ret;
+}
+
+static int
+qla2x00_process_els(struct fc_bsg_job *bsg_job)
+{
+ struct fc_rport *rport;
+ fc_port_t *fcport = NULL;
+ struct Scsi_Host *host;
+ scsi_qla_host_t *vha;
+ struct qla_hw_data *ha;
+ srb_t *sp;
+ const char *type;
+ int req_sg_cnt, rsp_sg_cnt;
+ int rval = (DRIVER_ERROR << 16);
+ uint16_t nextlid = 0;
+
+ if (bsg_job->request->msgcode == FC_BSG_RPT_ELS) {
+ rport = bsg_job->rport;
+ fcport = *(fc_port_t **) rport->dd_data;
+ host = rport_to_shost(rport);
+ vha = shost_priv(host);
+ ha = vha->hw;
+ type = "FC_BSG_RPT_ELS";
+ } else {
+ host = bsg_job->shost;
+ vha = shost_priv(host);
+ ha = vha->hw;
+ type = "FC_BSG_HST_ELS_NOLOGIN";
+ }
+
+ if (!vha->flags.online) {
+ ql_log(ql_log_warn, vha, 0x7005, "Host not online.\n");
+ rval = -EIO;
+ goto done;
+ }
+
+ /* pass through is supported only for ISP 4Gb or higher */
+ if (!IS_FWI2_CAPABLE(ha)) {
+ ql_dbg(ql_dbg_user, vha, 0x7001,
+ "ELS passthru not supported for ISP23xx based adapters.\n");
+ rval = -EPERM;
+ goto done;
+ }
+
+ /* Multiple SG's are not supported for ELS requests */
+ if (bsg_job->request_payload.sg_cnt > 1 ||
+ bsg_job->reply_payload.sg_cnt > 1) {
+ ql_dbg(ql_dbg_user, vha, 0x7002,
+ "Multiple SG's are not suppored for ELS requests, "
+ "request_sg_cnt=%x reply_sg_cnt=%x.\n",
+ bsg_job->request_payload.sg_cnt,
+ bsg_job->reply_payload.sg_cnt);
+ rval = -EPERM;
+ goto done;
+ }
+
+ /* ELS request for rport */
+ if (bsg_job->request->msgcode == FC_BSG_RPT_ELS) {
+ /* make sure the rport is logged in,
+ * if not perform fabric login
+ */
+ if (qla2x00_fabric_login(vha, fcport, &nextlid)) {
+ ql_dbg(ql_dbg_user, vha, 0x7003,
+ "Failed to login port %06X for ELS passthru.\n",
+ fcport->d_id.b24);
+ rval = -EIO;
+ goto done;
+ }
+ } else {
+ /* Allocate a dummy fcport structure, since functions
+ * preparing the IOCB and mailbox command retrieves port
+ * specific information from fcport structure. For Host based
+ * ELS commands there will be no fcport structure allocated
+ */
+ fcport = qla2x00_alloc_fcport(vha, GFP_KERNEL);
+ if (!fcport) {
+ rval = -ENOMEM;
+ goto done;
+ }
+
+ /* Initialize all required fields of fcport */
+ fcport->vha = vha;
+ fcport->d_id.b.al_pa =
+ bsg_job->request->rqst_data.h_els.port_id[0];
+ fcport->d_id.b.area =
+ bsg_job->request->rqst_data.h_els.port_id[1];
+ fcport->d_id.b.domain =
+ bsg_job->request->rqst_data.h_els.port_id[2];
+ fcport->loop_id =
+ (fcport->d_id.b.al_pa == 0xFD) ?
+ NPH_FABRIC_CONTROLLER : NPH_F_PORT;
+ }
+
+ req_sg_cnt =
+ dma_map_sg(&ha->pdev->dev, bsg_job->request_payload.sg_list,
+ bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE);
+ if (!req_sg_cnt) {
+ rval = -ENOMEM;
+ goto done_free_fcport;
+ }
+
+ rsp_sg_cnt = dma_map_sg(&ha->pdev->dev, bsg_job->reply_payload.sg_list,
+ bsg_job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
+ if (!rsp_sg_cnt) {
+ rval = -ENOMEM;
+ goto done_free_fcport;
+ }
+
+ if ((req_sg_cnt != bsg_job->request_payload.sg_cnt) ||
+ (rsp_sg_cnt != bsg_job->reply_payload.sg_cnt)) {
+ ql_log(ql_log_warn, vha, 0x7008,
+ "dma mapping resulted in different sg counts, "
+ "request_sg_cnt: %x dma_request_sg_cnt:%x reply_sg_cnt:%x "
+ "dma_reply_sg_cnt:%x.\n", bsg_job->request_payload.sg_cnt,
+ req_sg_cnt, bsg_job->reply_payload.sg_cnt, rsp_sg_cnt);
+ rval = -EAGAIN;
+ goto done_unmap_sg;
+ }
+
+ /* Alloc SRB structure */
+ sp = qla2x00_get_sp(vha, fcport, GFP_KERNEL);
+ if (!sp) {
+ rval = -ENOMEM;
+ goto done_unmap_sg;
+ }
+
+ sp->type =
+ (bsg_job->request->msgcode == FC_BSG_RPT_ELS ?
+ SRB_ELS_CMD_RPT : SRB_ELS_CMD_HST);
+ sp->name =
+ (bsg_job->request->msgcode == FC_BSG_RPT_ELS ?
+ "bsg_els_rpt" : "bsg_els_hst");
+ sp->u.bsg_job = bsg_job;
+ sp->free = qla2x00_bsg_sp_free;
+ sp->done = qla2x00_bsg_job_done;
+
+ ql_dbg(ql_dbg_user, vha, 0x700a,
+ "bsg rqst type: %s els type: %x - loop-id=%x "
+ "portid=%-2x%02x%02x.\n", type,
+ bsg_job->request->rqst_data.h_els.command_code, fcport->loop_id,
+ fcport->d_id.b.domain, fcport->d_id.b.area, fcport->d_id.b.al_pa);
+
+ rval = qla2x00_start_sp(sp);
+ if (rval != QLA_SUCCESS) {
+ ql_log(ql_log_warn, vha, 0x700e,
+ "qla2x00_start_sp failed = %d\n", rval);
+ qla2x00_rel_sp(vha, sp);
+ rval = -EIO;
+ goto done_unmap_sg;
+ }
+ return rval;
+
+done_unmap_sg:
+ dma_unmap_sg(&ha->pdev->dev, bsg_job->request_payload.sg_list,
+ bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE);
+ dma_unmap_sg(&ha->pdev->dev, bsg_job->reply_payload.sg_list,
+ bsg_job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
+ goto done_free_fcport;
+
+done_free_fcport:
+ if (bsg_job->request->msgcode == FC_BSG_RPT_ELS)
+ kfree(fcport);
+done:
+ return rval;
+}
+
+inline uint16_t
+qla24xx_calc_ct_iocbs(uint16_t dsds)
+{
+ uint16_t iocbs;
+
+ iocbs = 1;
+ if (dsds > 2) {
+ iocbs += (dsds - 2) / 5;
+ if ((dsds - 2) % 5)
+ iocbs++;
+ }
+ return iocbs;
+}
+
+static int
+qla2x00_process_ct(struct fc_bsg_job *bsg_job)
+{
+ srb_t *sp;
+ struct Scsi_Host *host = bsg_job->shost;
+ scsi_qla_host_t *vha = shost_priv(host);
+ struct qla_hw_data *ha = vha->hw;
+ int rval = (DRIVER_ERROR << 16);
+ int req_sg_cnt, rsp_sg_cnt;
+ uint16_t loop_id;
+ struct fc_port *fcport;
+ char *type = "FC_BSG_HST_CT";
+
+ req_sg_cnt =
+ dma_map_sg(&ha->pdev->dev, bsg_job->request_payload.sg_list,
+ bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE);
+ if (!req_sg_cnt) {
+ ql_log(ql_log_warn, vha, 0x700f,
+ "dma_map_sg return %d for request\n", req_sg_cnt);
+ rval = -ENOMEM;
+ goto done;
+ }
+
+ rsp_sg_cnt = dma_map_sg(&ha->pdev->dev, bsg_job->reply_payload.sg_list,
+ bsg_job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
+ if (!rsp_sg_cnt) {
+ ql_log(ql_log_warn, vha, 0x7010,
+ "dma_map_sg return %d for reply\n", rsp_sg_cnt);
+ rval = -ENOMEM;
+ goto done;
+ }
+
+ if ((req_sg_cnt != bsg_job->request_payload.sg_cnt) ||
+ (rsp_sg_cnt != bsg_job->reply_payload.sg_cnt)) {
+ ql_log(ql_log_warn, vha, 0x7011,
+ "request_sg_cnt: %x dma_request_sg_cnt: %x reply_sg_cnt:%x "
+ "dma_reply_sg_cnt: %x\n", bsg_job->request_payload.sg_cnt,
+ req_sg_cnt, bsg_job->reply_payload.sg_cnt, rsp_sg_cnt);
+ rval = -EAGAIN;
+ goto done_unmap_sg;
+ }
+
+ if (!vha->flags.online) {
+ ql_log(ql_log_warn, vha, 0x7012,
+ "Host is not online.\n");
+ rval = -EIO;
+ goto done_unmap_sg;
+ }
+
+ loop_id =
+ (bsg_job->request->rqst_data.h_ct.preamble_word1 & 0xFF000000)
+ >> 24;
+ switch (loop_id) {
+ case 0xFC:
+ loop_id = cpu_to_le16(NPH_SNS);
+ break;
+ case 0xFA:
+ loop_id = vha->mgmt_svr_loop_id;
+ break;
+ default:
+ ql_dbg(ql_dbg_user, vha, 0x7013,
+ "Unknown loop id: %x.\n", loop_id);
+ rval = -EINVAL;
+ goto done_unmap_sg;
+ }
+
+ /* Allocate a dummy fcport structure, since functions preparing the
+ * IOCB and mailbox command retrieves port specific information
+ * from fcport structure. For Host based ELS commands there will be
+ * no fcport structure allocated
+ */
+ fcport = qla2x00_alloc_fcport(vha, GFP_KERNEL);
+ if (!fcport) {
+ ql_log(ql_log_warn, vha, 0x7014,
+ "Failed to allocate fcport.\n");
+ rval = -ENOMEM;
+ goto done_unmap_sg;
+ }
+
+ /* Initialize all required fields of fcport */
+ fcport->vha = vha;
+ fcport->d_id.b.al_pa = bsg_job->request->rqst_data.h_ct.port_id[0];
+ fcport->d_id.b.area = bsg_job->request->rqst_data.h_ct.port_id[1];
+ fcport->d_id.b.domain = bsg_job->request->rqst_data.h_ct.port_id[2];
+ fcport->loop_id = loop_id;
+
+ /* Alloc SRB structure */
+ sp = qla2x00_get_sp(vha, fcport, GFP_KERNEL);
+ if (!sp) {
+ ql_log(ql_log_warn, vha, 0x7015,
+ "qla2x00_get_sp failed.\n");
+ rval = -ENOMEM;
+ goto done_free_fcport;
+ }
+
+ sp->type = SRB_CT_CMD;
+ sp->name = "bsg_ct";
+ sp->iocbs = qla24xx_calc_ct_iocbs(req_sg_cnt + rsp_sg_cnt);
+ sp->u.bsg_job = bsg_job;
+ sp->free = qla2x00_bsg_sp_free;
+ sp->done = qla2x00_bsg_job_done;
+
+ ql_dbg(ql_dbg_user, vha, 0x7016,
+ "bsg rqst type: %s else type: %x - "
+ "loop-id=%x portid=%02x%02x%02x.\n", type,
+ (bsg_job->request->rqst_data.h_ct.preamble_word2 >> 16),
+ fcport->loop_id, fcport->d_id.b.domain, fcport->d_id.b.area,
+ fcport->d_id.b.al_pa);
+
+ rval = qla2x00_start_sp(sp);
+ if (rval != QLA_SUCCESS) {
+ ql_log(ql_log_warn, vha, 0x7017,
+ "qla2x00_start_sp failed=%d.\n", rval);
+ qla2x00_rel_sp(vha, sp);
+ rval = -EIO;
+ goto done_free_fcport;
+ }
+ return rval;
+
+done_free_fcport:
+ kfree(fcport);
+done_unmap_sg:
+ dma_unmap_sg(&ha->pdev->dev, bsg_job->request_payload.sg_list,
+ bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE);
+ dma_unmap_sg(&ha->pdev->dev, bsg_job->reply_payload.sg_list,
+ bsg_job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
+done:
+ return rval;
+}
+
+/* Disable loopback mode */
+static inline int
+qla81xx_reset_loopback_mode(scsi_qla_host_t *vha, uint16_t *config,
+ int wait, int wait2)
+{
+ int ret = 0;
+ int rval = 0;
+ uint16_t new_config[4];
+ struct qla_hw_data *ha = vha->hw;
+
+ if (!IS_QLA81XX(ha) && !IS_QLA8031(ha) && !IS_QLA8044(ha))
+ goto done_reset_internal;
+
+ memset(new_config, 0 , sizeof(new_config));
+ if ((config[0] & INTERNAL_LOOPBACK_MASK) >> 1 ==
+ ENABLE_INTERNAL_LOOPBACK ||
+ (config[0] & INTERNAL_LOOPBACK_MASK) >> 1 ==
+ ENABLE_EXTERNAL_LOOPBACK) {
+ new_config[0] = config[0] & ~INTERNAL_LOOPBACK_MASK;
+ ql_dbg(ql_dbg_user, vha, 0x70bf, "new_config[0]=%02x\n",
+ (new_config[0] & INTERNAL_LOOPBACK_MASK));
+ memcpy(&new_config[1], &config[1], sizeof(uint16_t) * 3) ;
+
+ ha->notify_dcbx_comp = wait;
+ ha->notify_lb_portup_comp = wait2;
+
+ ret = qla81xx_set_port_config(vha, new_config);
+ if (ret != QLA_SUCCESS) {
+ ql_log(ql_log_warn, vha, 0x7025,
+ "Set port config failed.\n");
+ ha->notify_dcbx_comp = 0;
+ ha->notify_lb_portup_comp = 0;
+ rval = -EINVAL;
+ goto done_reset_internal;
+ }
+
+ /* Wait for DCBX complete event */
+ if (wait && !wait_for_completion_timeout(&ha->dcbx_comp,
+ (DCBX_COMP_TIMEOUT * HZ))) {
+ ql_dbg(ql_dbg_user, vha, 0x7026,
+ "DCBX completion not received.\n");
+ ha->notify_dcbx_comp = 0;
+ ha->notify_lb_portup_comp = 0;
+ rval = -EINVAL;
+ goto done_reset_internal;
+ } else
+ ql_dbg(ql_dbg_user, vha, 0x7027,
+ "DCBX completion received.\n");
+
+ if (wait2 &&
+ !wait_for_completion_timeout(&ha->lb_portup_comp,
+ (LB_PORTUP_COMP_TIMEOUT * HZ))) {
+ ql_dbg(ql_dbg_user, vha, 0x70c5,
+ "Port up completion not received.\n");
+ ha->notify_lb_portup_comp = 0;
+ rval = -EINVAL;
+ goto done_reset_internal;
+ } else
+ ql_dbg(ql_dbg_user, vha, 0x70c6,
+ "Port up completion received.\n");
+
+ ha->notify_dcbx_comp = 0;
+ ha->notify_lb_portup_comp = 0;
+ }
+done_reset_internal:
+ return rval;
+}
+
+/*
+ * Set the port configuration to enable the internal or external loopback
+ * depending on the loopback mode.
+ */
+static inline int
+qla81xx_set_loopback_mode(scsi_qla_host_t *vha, uint16_t *config,
+ uint16_t *new_config, uint16_t mode)
+{
+ int ret = 0;
+ int rval = 0;
+ unsigned long rem_tmo = 0, current_tmo = 0;
+ struct qla_hw_data *ha = vha->hw;
+
+ if (!IS_QLA81XX(ha) && !IS_QLA8031(ha) && !IS_QLA8044(ha))
+ goto done_set_internal;
+
+ if (mode == INTERNAL_LOOPBACK)
+ new_config[0] = config[0] | (ENABLE_INTERNAL_LOOPBACK << 1);
+ else if (mode == EXTERNAL_LOOPBACK)
+ new_config[0] = config[0] | (ENABLE_EXTERNAL_LOOPBACK << 1);
+ ql_dbg(ql_dbg_user, vha, 0x70be,
+ "new_config[0]=%02x\n", (new_config[0] & INTERNAL_LOOPBACK_MASK));
+
+ memcpy(&new_config[1], &config[1], sizeof(uint16_t) * 3);
+
+ ha->notify_dcbx_comp = 1;
+ ret = qla81xx_set_port_config(vha, new_config);
+ if (ret != QLA_SUCCESS) {
+ ql_log(ql_log_warn, vha, 0x7021,
+ "set port config failed.\n");
+ ha->notify_dcbx_comp = 0;
+ rval = -EINVAL;
+ goto done_set_internal;
+ }
+
+ /* Wait for DCBX complete event */
+ current_tmo = DCBX_COMP_TIMEOUT * HZ;
+ while (1) {
+ rem_tmo = wait_for_completion_timeout(&ha->dcbx_comp,
+ current_tmo);
+ if (!ha->idc_extend_tmo || rem_tmo) {
+ ha->idc_extend_tmo = 0;
+ break;
+ }
+ current_tmo = ha->idc_extend_tmo * HZ;
+ ha->idc_extend_tmo = 0;
+ }
+
+ if (!rem_tmo) {
+ ql_dbg(ql_dbg_user, vha, 0x7022,
+ "DCBX completion not received.\n");
+ ret = qla81xx_reset_loopback_mode(vha, new_config, 0, 0);
+ /*
+ * If the reset of the loopback mode doesn't work take a FCoE
+ * dump and reset the chip.
+ */
+ if (ret) {
+ ha->isp_ops->fw_dump(vha, 0);
+ set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
+ }
+ rval = -EINVAL;
+ } else {
+ if (ha->flags.idc_compl_status) {
+ ql_dbg(ql_dbg_user, vha, 0x70c3,
+ "Bad status in IDC Completion AEN\n");
+ rval = -EINVAL;
+ ha->flags.idc_compl_status = 0;
+ } else
+ ql_dbg(ql_dbg_user, vha, 0x7023,
+ "DCBX completion received.\n");
+ }
+
+ ha->notify_dcbx_comp = 0;
+ ha->idc_extend_tmo = 0;
+
+done_set_internal:
+ return rval;
+}
+
+static int
+qla2x00_process_loopback(struct fc_bsg_job *bsg_job)
+{
+ struct Scsi_Host *host = bsg_job->shost;
+ scsi_qla_host_t *vha = shost_priv(host);
+ struct qla_hw_data *ha = vha->hw;
+ int rval;
+ uint8_t command_sent;
+ char *type;
+ struct msg_echo_lb elreq;
+ uint16_t response[MAILBOX_REGISTER_COUNT];
+ uint16_t config[4], new_config[4];
+ uint8_t *fw_sts_ptr;
+ uint8_t *req_data = NULL;
+ dma_addr_t req_data_dma;
+ uint32_t req_data_len;
+ uint8_t *rsp_data = NULL;
+ dma_addr_t rsp_data_dma;
+ uint32_t rsp_data_len;
+
+ if (!vha->flags.online) {
+ ql_log(ql_log_warn, vha, 0x7019, "Host is not online.\n");
+ return -EIO;
+ }
+
+ elreq.req_sg_cnt = dma_map_sg(&ha->pdev->dev,
+ bsg_job->request_payload.sg_list, bsg_job->request_payload.sg_cnt,
+ DMA_TO_DEVICE);
+
+ if (!elreq.req_sg_cnt) {
+ ql_log(ql_log_warn, vha, 0x701a,
+ "dma_map_sg returned %d for request.\n", elreq.req_sg_cnt);
+ return -ENOMEM;
+ }
+
+ elreq.rsp_sg_cnt = dma_map_sg(&ha->pdev->dev,
+ bsg_job->reply_payload.sg_list, bsg_job->reply_payload.sg_cnt,
+ DMA_FROM_DEVICE);
+
+ if (!elreq.rsp_sg_cnt) {
+ ql_log(ql_log_warn, vha, 0x701b,
+ "dma_map_sg returned %d for reply.\n", elreq.rsp_sg_cnt);
+ rval = -ENOMEM;
+ goto done_unmap_req_sg;
+ }
+
+ if ((elreq.req_sg_cnt != bsg_job->request_payload.sg_cnt) ||
+ (elreq.rsp_sg_cnt != bsg_job->reply_payload.sg_cnt)) {
+ ql_log(ql_log_warn, vha, 0x701c,
+ "dma mapping resulted in different sg counts, "
+ "request_sg_cnt: %x dma_request_sg_cnt: %x "
+ "reply_sg_cnt: %x dma_reply_sg_cnt: %x.\n",
+ bsg_job->request_payload.sg_cnt, elreq.req_sg_cnt,
+ bsg_job->reply_payload.sg_cnt, elreq.rsp_sg_cnt);
+ rval = -EAGAIN;
+ goto done_unmap_sg;
+ }
+ req_data_len = rsp_data_len = bsg_job->request_payload.payload_len;
+ req_data = dma_alloc_coherent(&ha->pdev->dev, req_data_len,
+ &req_data_dma, GFP_KERNEL);
+ if (!req_data) {
+ ql_log(ql_log_warn, vha, 0x701d,
+ "dma alloc failed for req_data.\n");
+ rval = -ENOMEM;
+ goto done_unmap_sg;
+ }
+
+ rsp_data = dma_alloc_coherent(&ha->pdev->dev, rsp_data_len,
+ &rsp_data_dma, GFP_KERNEL);
+ if (!rsp_data) {
+ ql_log(ql_log_warn, vha, 0x7004,
+ "dma alloc failed for rsp_data.\n");
+ rval = -ENOMEM;
+ goto done_free_dma_req;
+ }
+
+ /* Copy the request buffer in req_data now */
+ sg_copy_to_buffer(bsg_job->request_payload.sg_list,
+ bsg_job->request_payload.sg_cnt, req_data, req_data_len);
+
+ elreq.send_dma = req_data_dma;
+ elreq.rcv_dma = rsp_data_dma;
+ elreq.transfer_size = req_data_len;
+
+ elreq.options = bsg_job->request->rqst_data.h_vendor.vendor_cmd[1];
+ elreq.iteration_count =
+ bsg_job->request->rqst_data.h_vendor.vendor_cmd[2];
+
+ if (atomic_read(&vha->loop_state) == LOOP_READY &&
+ (ha->current_topology == ISP_CFG_F ||
+ ((IS_QLA81XX(ha) || IS_QLA8031(ha) || IS_QLA8044(ha)) &&
+ le32_to_cpu(*(uint32_t *)req_data) == ELS_OPCODE_BYTE
+ && req_data_len == MAX_ELS_FRAME_PAYLOAD)) &&
+ elreq.options == EXTERNAL_LOOPBACK) {
+ type = "FC_BSG_HST_VENDOR_ECHO_DIAG";
+ ql_dbg(ql_dbg_user, vha, 0x701e,
+ "BSG request type: %s.\n", type);
+ command_sent = INT_DEF_LB_ECHO_CMD;
+ rval = qla2x00_echo_test(vha, &elreq, response);
+ } else {
+ if (IS_QLA81XX(ha) || IS_QLA8031(ha) || IS_QLA8044(ha)) {
+ memset(config, 0, sizeof(config));
+ memset(new_config, 0, sizeof(new_config));
+
+ if (qla81xx_get_port_config(vha, config)) {
+ ql_log(ql_log_warn, vha, 0x701f,
+ "Get port config failed.\n");
+ rval = -EPERM;
+ goto done_free_dma_rsp;
+ }
+
+ if ((config[0] & INTERNAL_LOOPBACK_MASK) != 0) {
+ ql_dbg(ql_dbg_user, vha, 0x70c4,
+ "Loopback operation already in "
+ "progress.\n");
+ rval = -EAGAIN;
+ goto done_free_dma_rsp;
+ }
+
+ ql_dbg(ql_dbg_user, vha, 0x70c0,
+ "elreq.options=%04x\n", elreq.options);
+
+ if (elreq.options == EXTERNAL_LOOPBACK)
+ if (IS_QLA8031(ha) || IS_QLA8044(ha))
+ rval = qla81xx_set_loopback_mode(vha,
+ config, new_config, elreq.options);
+ else
+ rval = qla81xx_reset_loopback_mode(vha,
+ config, 1, 0);
+ else
+ rval = qla81xx_set_loopback_mode(vha, config,
+ new_config, elreq.options);
+
+ if (rval) {
+ rval = -EPERM;
+ goto done_free_dma_rsp;
+ }
+
+ type = "FC_BSG_HST_VENDOR_LOOPBACK";
+ ql_dbg(ql_dbg_user, vha, 0x7028,
+ "BSG request type: %s.\n", type);
+
+ command_sent = INT_DEF_LB_LOOPBACK_CMD;
+ rval = qla2x00_loopback_test(vha, &elreq, response);
+
+ if (response[0] == MBS_COMMAND_ERROR &&
+ response[1] == MBS_LB_RESET) {
+ ql_log(ql_log_warn, vha, 0x7029,
+ "MBX command error, Aborting ISP.\n");
+ set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
+ qla2xxx_wake_dpc(vha);
+ qla2x00_wait_for_chip_reset(vha);
+ /* Also reset the MPI */
+ if (IS_QLA81XX(ha)) {
+ if (qla81xx_restart_mpi_firmware(vha) !=
+ QLA_SUCCESS) {
+ ql_log(ql_log_warn, vha, 0x702a,
+ "MPI reset failed.\n");
+ }
+ }
+
+ rval = -EIO;
+ goto done_free_dma_rsp;
+ }
+
+ if (new_config[0]) {
+ int ret;
+
+ /* Revert back to original port config
+ * Also clear internal loopback
+ */
+ ret = qla81xx_reset_loopback_mode(vha,
+ new_config, 0, 1);
+ if (ret) {
+ /*
+ * If the reset of the loopback mode
+ * doesn't work take FCoE dump and then
+ * reset the chip.
+ */
+ ha->isp_ops->fw_dump(vha, 0);
+ set_bit(ISP_ABORT_NEEDED,
+ &vha->dpc_flags);
+ }
+
+ }
+
+ } else {
+ type = "FC_BSG_HST_VENDOR_LOOPBACK";
+ ql_dbg(ql_dbg_user, vha, 0x702b,
+ "BSG request type: %s.\n", type);
+ command_sent = INT_DEF_LB_LOOPBACK_CMD;
+ rval = qla2x00_loopback_test(vha, &elreq, response);
+ }
+ }
+
+ if (rval) {
+ ql_log(ql_log_warn, vha, 0x702c,
+ "Vendor request %s failed.\n", type);
+
+ rval = 0;
+ bsg_job->reply->result = (DID_ERROR << 16);
+ bsg_job->reply->reply_payload_rcv_len = 0;
+ } else {
+ ql_dbg(ql_dbg_user, vha, 0x702d,
+ "Vendor request %s completed.\n", type);
+ bsg_job->reply->result = (DID_OK << 16);
+ sg_copy_from_buffer(bsg_job->reply_payload.sg_list,
+ bsg_job->reply_payload.sg_cnt, rsp_data,
+ rsp_data_len);
+ }
+
+ bsg_job->reply_len = sizeof(struct fc_bsg_reply) +
+ sizeof(response) + sizeof(uint8_t);
+ fw_sts_ptr = ((uint8_t *)bsg_job->req->sense) +
+ sizeof(struct fc_bsg_reply);
+ memcpy(fw_sts_ptr, response, sizeof(response));
+ fw_sts_ptr += sizeof(response);
+ *fw_sts_ptr = command_sent;
+
+done_free_dma_rsp:
+ dma_free_coherent(&ha->pdev->dev, rsp_data_len,
+ rsp_data, rsp_data_dma);
+done_free_dma_req:
+ dma_free_coherent(&ha->pdev->dev, req_data_len,
+ req_data, req_data_dma);
+done_unmap_sg:
+ dma_unmap_sg(&ha->pdev->dev,
+ bsg_job->reply_payload.sg_list,
+ bsg_job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
+done_unmap_req_sg:
+ dma_unmap_sg(&ha->pdev->dev,
+ bsg_job->request_payload.sg_list,
+ bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE);
+ if (!rval)
+ bsg_job->job_done(bsg_job);
+ return rval;
+}
+
+static int
+qla84xx_reset(struct fc_bsg_job *bsg_job)
+{
+ struct Scsi_Host *host = bsg_job->shost;
+ scsi_qla_host_t *vha = shost_priv(host);
+ struct qla_hw_data *ha = vha->hw;
+ int rval = 0;
+ uint32_t flag;
+
+ if (!IS_QLA84XX(ha)) {
+ ql_dbg(ql_dbg_user, vha, 0x702f, "Not 84xx, exiting.\n");
+ return -EINVAL;
+ }
+
+ flag = bsg_job->request->rqst_data.h_vendor.vendor_cmd[1];
+
+ rval = qla84xx_reset_chip(vha, flag == A84_ISSUE_RESET_DIAG_FW);
+
+ if (rval) {
+ ql_log(ql_log_warn, vha, 0x7030,
+ "Vendor request 84xx reset failed.\n");
+ rval = (DID_ERROR << 16);
+
+ } else {
+ ql_dbg(ql_dbg_user, vha, 0x7031,
+ "Vendor request 84xx reset completed.\n");
+ bsg_job->reply->result = DID_OK;
+ bsg_job->job_done(bsg_job);
+ }
+
+ return rval;
+}
+
+static int
+qla84xx_updatefw(struct fc_bsg_job *bsg_job)
+{
+ struct Scsi_Host *host = bsg_job->shost;
+ scsi_qla_host_t *vha = shost_priv(host);
+ struct qla_hw_data *ha = vha->hw;
+ struct verify_chip_entry_84xx *mn = NULL;
+ dma_addr_t mn_dma, fw_dma;
+ void *fw_buf = NULL;
+ int rval = 0;
+ uint32_t sg_cnt;
+ uint32_t data_len;
+ uint16_t options;
+ uint32_t flag;
+ uint32_t fw_ver;
+
+ if (!IS_QLA84XX(ha)) {
+ ql_dbg(ql_dbg_user, vha, 0x7032,
+ "Not 84xx, exiting.\n");
+ return -EINVAL;
+ }
+
+ sg_cnt = dma_map_sg(&ha->pdev->dev, bsg_job->request_payload.sg_list,
+ bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE);
+ if (!sg_cnt) {
+ ql_log(ql_log_warn, vha, 0x7033,
+ "dma_map_sg returned %d for request.\n", sg_cnt);
+ return -ENOMEM;
+ }
+
+ if (sg_cnt != bsg_job->request_payload.sg_cnt) {
+ ql_log(ql_log_warn, vha, 0x7034,
+ "DMA mapping resulted in different sg counts, "
+ "request_sg_cnt: %x dma_request_sg_cnt: %x.\n",
+ bsg_job->request_payload.sg_cnt, sg_cnt);
+ rval = -EAGAIN;
+ goto done_unmap_sg;
+ }
+
+ data_len = bsg_job->request_payload.payload_len;
+ fw_buf = dma_alloc_coherent(&ha->pdev->dev, data_len,
+ &fw_dma, GFP_KERNEL);
+ if (!fw_buf) {
+ ql_log(ql_log_warn, vha, 0x7035,
+ "DMA alloc failed for fw_buf.\n");
+ rval = -ENOMEM;
+ goto done_unmap_sg;
+ }
+
+ sg_copy_to_buffer(bsg_job->request_payload.sg_list,
+ bsg_job->request_payload.sg_cnt, fw_buf, data_len);
+
+ mn = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &mn_dma);
+ if (!mn) {
+ ql_log(ql_log_warn, vha, 0x7036,
+ "DMA alloc failed for fw buffer.\n");
+ rval = -ENOMEM;
+ goto done_free_fw_buf;
+ }
+
+ flag = bsg_job->request->rqst_data.h_vendor.vendor_cmd[1];
+ fw_ver = le32_to_cpu(*((uint32_t *)((uint32_t *)fw_buf + 2)));
+
+ memset(mn, 0, sizeof(struct access_chip_84xx));
+ mn->entry_type = VERIFY_CHIP_IOCB_TYPE;
+ mn->entry_count = 1;
+
+ options = VCO_FORCE_UPDATE | VCO_END_OF_DATA;
+ if (flag == A84_ISSUE_UPDATE_DIAGFW_CMD)
+ options |= VCO_DIAG_FW;
+
+ mn->options = cpu_to_le16(options);
+ mn->fw_ver = cpu_to_le32(fw_ver);
+ mn->fw_size = cpu_to_le32(data_len);
+ mn->fw_seq_size = cpu_to_le32(data_len);
+ mn->dseg_address[0] = cpu_to_le32(LSD(fw_dma));
+ mn->dseg_address[1] = cpu_to_le32(MSD(fw_dma));
+ mn->dseg_length = cpu_to_le32(data_len);
+ mn->data_seg_cnt = cpu_to_le16(1);
+
+ rval = qla2x00_issue_iocb_timeout(vha, mn, mn_dma, 0, 120);
+
+ if (rval) {
+ ql_log(ql_log_warn, vha, 0x7037,
+ "Vendor request 84xx updatefw failed.\n");
+
+ rval = (DID_ERROR << 16);
+ } else {
+ ql_dbg(ql_dbg_user, vha, 0x7038,
+ "Vendor request 84xx updatefw completed.\n");
+
+ bsg_job->reply_len = sizeof(struct fc_bsg_reply);
+ bsg_job->reply->result = DID_OK;
+ }
+
+ dma_pool_free(ha->s_dma_pool, mn, mn_dma);
+
+done_free_fw_buf:
+ dma_free_coherent(&ha->pdev->dev, data_len, fw_buf, fw_dma);
+
+done_unmap_sg:
+ dma_unmap_sg(&ha->pdev->dev, bsg_job->request_payload.sg_list,
+ bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE);
+
+ if (!rval)
+ bsg_job->job_done(bsg_job);
+ return rval;
+}
+
+static int
+qla84xx_mgmt_cmd(struct fc_bsg_job *bsg_job)
+{
+ struct Scsi_Host *host = bsg_job->shost;
+ scsi_qla_host_t *vha = shost_priv(host);
+ struct qla_hw_data *ha = vha->hw;
+ struct access_chip_84xx *mn = NULL;
+ dma_addr_t mn_dma, mgmt_dma;
+ void *mgmt_b = NULL;
+ int rval = 0;
+ struct qla_bsg_a84_mgmt *ql84_mgmt;
+ uint32_t sg_cnt;
+ uint32_t data_len = 0;
+ uint32_t dma_direction = DMA_NONE;
+
+ if (!IS_QLA84XX(ha)) {
+ ql_log(ql_log_warn, vha, 0x703a,
+ "Not 84xx, exiting.\n");
+ return -EINVAL;
+ }
+
+ mn = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &mn_dma);
+ if (!mn) {
+ ql_log(ql_log_warn, vha, 0x703c,
+ "DMA alloc failed for fw buffer.\n");
+ return -ENOMEM;
+ }
+
+ memset(mn, 0, sizeof(struct access_chip_84xx));
+ mn->entry_type = ACCESS_CHIP_IOCB_TYPE;
+ mn->entry_count = 1;
+ ql84_mgmt = (void *)bsg_job->request + sizeof(struct fc_bsg_request);
+ switch (ql84_mgmt->mgmt.cmd) {
+ case QLA84_MGMT_READ_MEM:
+ case QLA84_MGMT_GET_INFO:
+ sg_cnt = dma_map_sg(&ha->pdev->dev,
+ bsg_job->reply_payload.sg_list,
+ bsg_job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
+ if (!sg_cnt) {
+ ql_log(ql_log_warn, vha, 0x703d,
+ "dma_map_sg returned %d for reply.\n", sg_cnt);
+ rval = -ENOMEM;
+ goto exit_mgmt;
+ }
+
+ dma_direction = DMA_FROM_DEVICE;
+
+ if (sg_cnt != bsg_job->reply_payload.sg_cnt) {
+ ql_log(ql_log_warn, vha, 0x703e,
+ "DMA mapping resulted in different sg counts, "
+ "reply_sg_cnt: %x dma_reply_sg_cnt: %x.\n",
+ bsg_job->reply_payload.sg_cnt, sg_cnt);
+ rval = -EAGAIN;
+ goto done_unmap_sg;
+ }
+
+ data_len = bsg_job->reply_payload.payload_len;
+
+ mgmt_b = dma_alloc_coherent(&ha->pdev->dev, data_len,
+ &mgmt_dma, GFP_KERNEL);
+ if (!mgmt_b) {
+ ql_log(ql_log_warn, vha, 0x703f,
+ "DMA alloc failed for mgmt_b.\n");
+ rval = -ENOMEM;
+ goto done_unmap_sg;
+ }
+
+ if (ql84_mgmt->mgmt.cmd == QLA84_MGMT_READ_MEM) {
+ mn->options = cpu_to_le16(ACO_DUMP_MEMORY);
+ mn->parameter1 =
+ cpu_to_le32(
+ ql84_mgmt->mgmt.mgmtp.u.mem.start_addr);
+
+ } else if (ql84_mgmt->mgmt.cmd == QLA84_MGMT_GET_INFO) {
+ mn->options = cpu_to_le16(ACO_REQUEST_INFO);
+ mn->parameter1 =
+ cpu_to_le32(ql84_mgmt->mgmt.mgmtp.u.info.type);
+
+ mn->parameter2 =
+ cpu_to_le32(
+ ql84_mgmt->mgmt.mgmtp.u.info.context);
+ }
+ break;
+
+ case QLA84_MGMT_WRITE_MEM:
+ sg_cnt = dma_map_sg(&ha->pdev->dev,
+ bsg_job->request_payload.sg_list,
+ bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE);
+
+ if (!sg_cnt) {
+ ql_log(ql_log_warn, vha, 0x7040,
+ "dma_map_sg returned %d.\n", sg_cnt);
+ rval = -ENOMEM;
+ goto exit_mgmt;
+ }
+
+ dma_direction = DMA_TO_DEVICE;
+
+ if (sg_cnt != bsg_job->request_payload.sg_cnt) {
+ ql_log(ql_log_warn, vha, 0x7041,
+ "DMA mapping resulted in different sg counts, "
+ "request_sg_cnt: %x dma_request_sg_cnt: %x.\n",
+ bsg_job->request_payload.sg_cnt, sg_cnt);
+ rval = -EAGAIN;
+ goto done_unmap_sg;
+ }
+
+ data_len = bsg_job->request_payload.payload_len;
+ mgmt_b = dma_alloc_coherent(&ha->pdev->dev, data_len,
+ &mgmt_dma, GFP_KERNEL);
+ if (!mgmt_b) {
+ ql_log(ql_log_warn, vha, 0x7042,
+ "DMA alloc failed for mgmt_b.\n");
+ rval = -ENOMEM;
+ goto done_unmap_sg;
+ }
+
+ sg_copy_to_buffer(bsg_job->request_payload.sg_list,
+ bsg_job->request_payload.sg_cnt, mgmt_b, data_len);
+
+ mn->options = cpu_to_le16(ACO_LOAD_MEMORY);
+ mn->parameter1 =
+ cpu_to_le32(ql84_mgmt->mgmt.mgmtp.u.mem.start_addr);
+ break;
+
+ case QLA84_MGMT_CHNG_CONFIG:
+ mn->options = cpu_to_le16(ACO_CHANGE_CONFIG_PARAM);
+ mn->parameter1 =
+ cpu_to_le32(ql84_mgmt->mgmt.mgmtp.u.config.id);
+
+ mn->parameter2 =
+ cpu_to_le32(ql84_mgmt->mgmt.mgmtp.u.config.param0);
+
+ mn->parameter3 =
+ cpu_to_le32(ql84_mgmt->mgmt.mgmtp.u.config.param1);
+ break;
+
+ default:
+ rval = -EIO;
+ goto exit_mgmt;
+ }
+
+ if (ql84_mgmt->mgmt.cmd != QLA84_MGMT_CHNG_CONFIG) {
+ mn->total_byte_cnt = cpu_to_le32(ql84_mgmt->mgmt.len);
+ mn->dseg_count = cpu_to_le16(1);
+ mn->dseg_address[0] = cpu_to_le32(LSD(mgmt_dma));
+ mn->dseg_address[1] = cpu_to_le32(MSD(mgmt_dma));
+ mn->dseg_length = cpu_to_le32(ql84_mgmt->mgmt.len);
+ }
+
+ rval = qla2x00_issue_iocb(vha, mn, mn_dma, 0);
+
+ if (rval) {
+ ql_log(ql_log_warn, vha, 0x7043,
+ "Vendor request 84xx mgmt failed.\n");
+
+ rval = (DID_ERROR << 16);
+
+ } else {
+ ql_dbg(ql_dbg_user, vha, 0x7044,
+ "Vendor request 84xx mgmt completed.\n");
+
+ bsg_job->reply_len = sizeof(struct fc_bsg_reply);
+ bsg_job->reply->result = DID_OK;
+
+ if ((ql84_mgmt->mgmt.cmd == QLA84_MGMT_READ_MEM) ||
+ (ql84_mgmt->mgmt.cmd == QLA84_MGMT_GET_INFO)) {
+ bsg_job->reply->reply_payload_rcv_len =
+ bsg_job->reply_payload.payload_len;
+
+ sg_copy_from_buffer(bsg_job->reply_payload.sg_list,
+ bsg_job->reply_payload.sg_cnt, mgmt_b,
+ data_len);
+ }
+ }
+
+done_unmap_sg:
+ if (mgmt_b)
+ dma_free_coherent(&ha->pdev->dev, data_len, mgmt_b, mgmt_dma);
+
+ if (dma_direction == DMA_TO_DEVICE)
+ dma_unmap_sg(&ha->pdev->dev, bsg_job->request_payload.sg_list,
+ bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE);
+ else if (dma_direction == DMA_FROM_DEVICE)
+ dma_unmap_sg(&ha->pdev->dev, bsg_job->reply_payload.sg_list,
+ bsg_job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
+
+exit_mgmt:
+ dma_pool_free(ha->s_dma_pool, mn, mn_dma);
+
+ if (!rval)
+ bsg_job->job_done(bsg_job);
+ return rval;
+}
+
+static int
+qla24xx_iidma(struct fc_bsg_job *bsg_job)
+{
+ struct Scsi_Host *host = bsg_job->shost;
+ scsi_qla_host_t *vha = shost_priv(host);
+ int rval = 0;
+ struct qla_port_param *port_param = NULL;
+ fc_port_t *fcport = NULL;
+ int found = 0;
+ uint16_t mb[MAILBOX_REGISTER_COUNT];
+ uint8_t *rsp_ptr = NULL;
+
+ if (!IS_IIDMA_CAPABLE(vha->hw)) {
+ ql_log(ql_log_info, vha, 0x7046, "iiDMA not supported.\n");
+ return -EINVAL;
+ }
+
+ port_param = (void *)bsg_job->request + sizeof(struct fc_bsg_request);
+ if (port_param->fc_scsi_addr.dest_type != EXT_DEF_TYPE_WWPN) {
+ ql_log(ql_log_warn, vha, 0x7048,
+ "Invalid destination type.\n");
+ return -EINVAL;
+ }
+
+ list_for_each_entry(fcport, &vha->vp_fcports, list) {
+ if (fcport->port_type != FCT_TARGET)
+ continue;
+
+ if (memcmp(port_param->fc_scsi_addr.dest_addr.wwpn,
+ fcport->port_name, sizeof(fcport->port_name)))
+ continue;
+
+ found = 1;
+ break;
+ }
+
+ if (!found) {
+ ql_log(ql_log_warn, vha, 0x7049,
+ "Failed to find port.\n");
+ return -EINVAL;
+ }
+
+ if (atomic_read(&fcport->state) != FCS_ONLINE) {
+ ql_log(ql_log_warn, vha, 0x704a,
+ "Port is not online.\n");
+ return -EINVAL;
+ }
+
+ if (fcport->flags & FCF_LOGIN_NEEDED) {
+ ql_log(ql_log_warn, vha, 0x704b,
+ "Remote port not logged in flags = 0x%x.\n", fcport->flags);
+ return -EINVAL;
+ }
+
+ if (port_param->mode)
+ rval = qla2x00_set_idma_speed(vha, fcport->loop_id,
+ port_param->speed, mb);
+ else
+ rval = qla2x00_get_idma_speed(vha, fcport->loop_id,
+ &port_param->speed, mb);
+
+ if (rval) {
+ ql_log(ql_log_warn, vha, 0x704c,
+ "iIDMA cmd failed for %8phN -- "
+ "%04x %x %04x %04x.\n", fcport->port_name,
+ rval, fcport->fp_speed, mb[0], mb[1]);
+ rval = (DID_ERROR << 16);
+ } else {
+ if (!port_param->mode) {
+ bsg_job->reply_len = sizeof(struct fc_bsg_reply) +
+ sizeof(struct qla_port_param);
+
+ rsp_ptr = ((uint8_t *)bsg_job->reply) +
+ sizeof(struct fc_bsg_reply);
+
+ memcpy(rsp_ptr, port_param,
+ sizeof(struct qla_port_param));
+ }
+
+ bsg_job->reply->result = DID_OK;
+ bsg_job->job_done(bsg_job);
+ }
+
+ return rval;
+}
+
+static int
+qla2x00_optrom_setup(struct fc_bsg_job *bsg_job, scsi_qla_host_t *vha,
+ uint8_t is_update)
+{
+ uint32_t start = 0;
+ int valid = 0;
+ struct qla_hw_data *ha = vha->hw;
+
+ if (unlikely(pci_channel_offline(ha->pdev)))
+ return -EINVAL;
+
+ start = bsg_job->request->rqst_data.h_vendor.vendor_cmd[1];
+ if (start > ha->optrom_size) {
+ ql_log(ql_log_warn, vha, 0x7055,
+ "start %d > optrom_size %d.\n", start, ha->optrom_size);
+ return -EINVAL;
+ }
+
+ if (ha->optrom_state != QLA_SWAITING) {
+ ql_log(ql_log_info, vha, 0x7056,
+ "optrom_state %d.\n", ha->optrom_state);
+ return -EBUSY;
+ }
+
+ ha->optrom_region_start = start;
+ ql_dbg(ql_dbg_user, vha, 0x7057, "is_update=%d.\n", is_update);
+ if (is_update) {
+ if (ha->optrom_size == OPTROM_SIZE_2300 && start == 0)
+ valid = 1;
+ else if (start == (ha->flt_region_boot * 4) ||
+ start == (ha->flt_region_fw * 4))
+ valid = 1;
+ else if (IS_QLA24XX_TYPE(ha) || IS_QLA25XX(ha) ||
+ IS_CNA_CAPABLE(ha) || IS_QLA2031(ha) || IS_QLA27XX(ha))
+ valid = 1;
+ if (!valid) {
+ ql_log(ql_log_warn, vha, 0x7058,
+ "Invalid start region 0x%x/0x%x.\n", start,
+ bsg_job->request_payload.payload_len);
+ return -EINVAL;
+ }
+
+ ha->optrom_region_size = start +
+ bsg_job->request_payload.payload_len > ha->optrom_size ?
+ ha->optrom_size - start :
+ bsg_job->request_payload.payload_len;
+ ha->optrom_state = QLA_SWRITING;
+ } else {
+ ha->optrom_region_size = start +
+ bsg_job->reply_payload.payload_len > ha->optrom_size ?
+ ha->optrom_size - start :
+ bsg_job->reply_payload.payload_len;
+ ha->optrom_state = QLA_SREADING;
+ }
+
+ ha->optrom_buffer = vmalloc(ha->optrom_region_size);
+ if (!ha->optrom_buffer) {
+ ql_log(ql_log_warn, vha, 0x7059,
+ "Read: Unable to allocate memory for optrom retrieval "
+ "(%x)\n", ha->optrom_region_size);
+
+ ha->optrom_state = QLA_SWAITING;
+ return -ENOMEM;
+ }
+
+ memset(ha->optrom_buffer, 0, ha->optrom_region_size);
+ return 0;
+}
+
+static int
+qla2x00_read_optrom(struct fc_bsg_job *bsg_job)
+{
+ struct Scsi_Host *host = bsg_job->shost;
+ scsi_qla_host_t *vha = shost_priv(host);
+ struct qla_hw_data *ha = vha->hw;
+ int rval = 0;
+
+ if (ha->flags.nic_core_reset_hdlr_active)
+ return -EBUSY;
+
+ mutex_lock(&ha->optrom_mutex);
+ rval = qla2x00_optrom_setup(bsg_job, vha, 0);
+ if (rval) {
+ mutex_unlock(&ha->optrom_mutex);
+ return rval;
+ }
+
+ ha->isp_ops->read_optrom(vha, ha->optrom_buffer,
+ ha->optrom_region_start, ha->optrom_region_size);
+
+ sg_copy_from_buffer(bsg_job->reply_payload.sg_list,
+ bsg_job->reply_payload.sg_cnt, ha->optrom_buffer,
+ ha->optrom_region_size);
+
+ bsg_job->reply->reply_payload_rcv_len = ha->optrom_region_size;
+ bsg_job->reply->result = DID_OK;
+ vfree(ha->optrom_buffer);
+ ha->optrom_buffer = NULL;
+ ha->optrom_state = QLA_SWAITING;
+ mutex_unlock(&ha->optrom_mutex);
+ bsg_job->job_done(bsg_job);
+ return rval;
+}
+
+static int
+qla2x00_update_optrom(struct fc_bsg_job *bsg_job)
+{
+ struct Scsi_Host *host = bsg_job->shost;
+ scsi_qla_host_t *vha = shost_priv(host);
+ struct qla_hw_data *ha = vha->hw;
+ int rval = 0;
+
+ mutex_lock(&ha->optrom_mutex);
+ rval = qla2x00_optrom_setup(bsg_job, vha, 1);
+ if (rval) {
+ mutex_unlock(&ha->optrom_mutex);
+ return rval;
+ }
+
+ /* Set the isp82xx_no_md_cap not to capture minidump */
+ ha->flags.isp82xx_no_md_cap = 1;
+
+ sg_copy_to_buffer(bsg_job->request_payload.sg_list,
+ bsg_job->request_payload.sg_cnt, ha->optrom_buffer,
+ ha->optrom_region_size);
+
+ ha->isp_ops->write_optrom(vha, ha->optrom_buffer,
+ ha->optrom_region_start, ha->optrom_region_size);
+
+ bsg_job->reply->result = DID_OK;
+ vfree(ha->optrom_buffer);
+ ha->optrom_buffer = NULL;
+ ha->optrom_state = QLA_SWAITING;
+ mutex_unlock(&ha->optrom_mutex);
+ bsg_job->job_done(bsg_job);
+ return rval;
+}
+
+static int
+qla2x00_update_fru_versions(struct fc_bsg_job *bsg_job)
+{
+ struct Scsi_Host *host = bsg_job->shost;
+ scsi_qla_host_t *vha = shost_priv(host);
+ struct qla_hw_data *ha = vha->hw;
+ int rval = 0;
+ uint8_t bsg[DMA_POOL_SIZE];
+ struct qla_image_version_list *list = (void *)bsg;
+ struct qla_image_version *image;
+ uint32_t count;
+ dma_addr_t sfp_dma;
+ void *sfp = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &sfp_dma);
+ if (!sfp) {
+ bsg_job->reply->reply_data.vendor_reply.vendor_rsp[0] =
+ EXT_STATUS_NO_MEMORY;
+ goto done;
+ }
+
+ sg_copy_to_buffer(bsg_job->request_payload.sg_list,
+ bsg_job->request_payload.sg_cnt, list, sizeof(bsg));
+
+ image = list->version;
+ count = list->count;
+ while (count--) {
+ memcpy(sfp, &image->field_info, sizeof(image->field_info));
+ rval = qla2x00_write_sfp(vha, sfp_dma, sfp,
+ image->field_address.device, image->field_address.offset,
+ sizeof(image->field_info), image->field_address.option);
+ if (rval) {
+ bsg_job->reply->reply_data.vendor_reply.vendor_rsp[0] =
+ EXT_STATUS_MAILBOX;
+ goto dealloc;
+ }
+ image++;
+ }
+
+ bsg_job->reply->reply_data.vendor_reply.vendor_rsp[0] = 0;
+
+dealloc:
+ dma_pool_free(ha->s_dma_pool, sfp, sfp_dma);
+
+done:
+ bsg_job->reply_len = sizeof(struct fc_bsg_reply);
+ bsg_job->reply->result = DID_OK << 16;
+ bsg_job->job_done(bsg_job);
+
+ return 0;
+}
+
+static int
+qla2x00_read_fru_status(struct fc_bsg_job *bsg_job)
+{
+ struct Scsi_Host *host = bsg_job->shost;
+ scsi_qla_host_t *vha = shost_priv(host);
+ struct qla_hw_data *ha = vha->hw;
+ int rval = 0;
+ uint8_t bsg[DMA_POOL_SIZE];
+ struct qla_status_reg *sr = (void *)bsg;
+ dma_addr_t sfp_dma;
+ uint8_t *sfp = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &sfp_dma);
+ if (!sfp) {
+ bsg_job->reply->reply_data.vendor_reply.vendor_rsp[0] =
+ EXT_STATUS_NO_MEMORY;
+ goto done;
+ }
+
+ sg_copy_to_buffer(bsg_job->request_payload.sg_list,
+ bsg_job->request_payload.sg_cnt, sr, sizeof(*sr));
+
+ rval = qla2x00_read_sfp(vha, sfp_dma, sfp,
+ sr->field_address.device, sr->field_address.offset,
+ sizeof(sr->status_reg), sr->field_address.option);
+ sr->status_reg = *sfp;
+
+ if (rval) {
+ bsg_job->reply->reply_data.vendor_reply.vendor_rsp[0] =
+ EXT_STATUS_MAILBOX;
+ goto dealloc;
+ }
+
+ sg_copy_from_buffer(bsg_job->reply_payload.sg_list,
+ bsg_job->reply_payload.sg_cnt, sr, sizeof(*sr));
+
+ bsg_job->reply->reply_data.vendor_reply.vendor_rsp[0] = 0;
+
+dealloc:
+ dma_pool_free(ha->s_dma_pool, sfp, sfp_dma);
+
+done:
+ bsg_job->reply_len = sizeof(struct fc_bsg_reply);
+ bsg_job->reply->reply_payload_rcv_len = sizeof(*sr);
+ bsg_job->reply->result = DID_OK << 16;
+ bsg_job->job_done(bsg_job);
+
+ return 0;
+}
+
+static int
+qla2x00_write_fru_status(struct fc_bsg_job *bsg_job)
+{
+ struct Scsi_Host *host = bsg_job->shost;
+ scsi_qla_host_t *vha = shost_priv(host);
+ struct qla_hw_data *ha = vha->hw;
+ int rval = 0;
+ uint8_t bsg[DMA_POOL_SIZE];
+ struct qla_status_reg *sr = (void *)bsg;
+ dma_addr_t sfp_dma;
+ uint8_t *sfp = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &sfp_dma);
+ if (!sfp) {
+ bsg_job->reply->reply_data.vendor_reply.vendor_rsp[0] =
+ EXT_STATUS_NO_MEMORY;
+ goto done;
+ }
+
+ sg_copy_to_buffer(bsg_job->request_payload.sg_list,
+ bsg_job->request_payload.sg_cnt, sr, sizeof(*sr));
+
+ *sfp = sr->status_reg;
+ rval = qla2x00_write_sfp(vha, sfp_dma, sfp,
+ sr->field_address.device, sr->field_address.offset,
+ sizeof(sr->status_reg), sr->field_address.option);
+
+ if (rval) {
+ bsg_job->reply->reply_data.vendor_reply.vendor_rsp[0] =
+ EXT_STATUS_MAILBOX;
+ goto dealloc;
+ }
+
+ bsg_job->reply->reply_data.vendor_reply.vendor_rsp[0] = 0;
+
+dealloc:
+ dma_pool_free(ha->s_dma_pool, sfp, sfp_dma);
+
+done:
+ bsg_job->reply_len = sizeof(struct fc_bsg_reply);
+ bsg_job->reply->result = DID_OK << 16;
+ bsg_job->job_done(bsg_job);
+
+ return 0;
+}
+
+static int
+qla2x00_write_i2c(struct fc_bsg_job *bsg_job)
+{
+ struct Scsi_Host *host = bsg_job->shost;
+ scsi_qla_host_t *vha = shost_priv(host);
+ struct qla_hw_data *ha = vha->hw;
+ int rval = 0;
+ uint8_t bsg[DMA_POOL_SIZE];
+ struct qla_i2c_access *i2c = (void *)bsg;
+ dma_addr_t sfp_dma;
+ uint8_t *sfp = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &sfp_dma);
+ if (!sfp) {
+ bsg_job->reply->reply_data.vendor_reply.vendor_rsp[0] =
+ EXT_STATUS_NO_MEMORY;
+ goto done;
+ }
+
+ sg_copy_to_buffer(bsg_job->request_payload.sg_list,
+ bsg_job->request_payload.sg_cnt, i2c, sizeof(*i2c));
+
+ memcpy(sfp, i2c->buffer, i2c->length);
+ rval = qla2x00_write_sfp(vha, sfp_dma, sfp,
+ i2c->device, i2c->offset, i2c->length, i2c->option);
+
+ if (rval) {
+ bsg_job->reply->reply_data.vendor_reply.vendor_rsp[0] =
+ EXT_STATUS_MAILBOX;
+ goto dealloc;
+ }
+
+ bsg_job->reply->reply_data.vendor_reply.vendor_rsp[0] = 0;
+
+dealloc:
+ dma_pool_free(ha->s_dma_pool, sfp, sfp_dma);
+
+done:
+ bsg_job->reply_len = sizeof(struct fc_bsg_reply);
+ bsg_job->reply->result = DID_OK << 16;
+ bsg_job->job_done(bsg_job);
+
+ return 0;
+}
+
+static int
+qla2x00_read_i2c(struct fc_bsg_job *bsg_job)
+{
+ struct Scsi_Host *host = bsg_job->shost;
+ scsi_qla_host_t *vha = shost_priv(host);
+ struct qla_hw_data *ha = vha->hw;
+ int rval = 0;
+ uint8_t bsg[DMA_POOL_SIZE];
+ struct qla_i2c_access *i2c = (void *)bsg;
+ dma_addr_t sfp_dma;
+ uint8_t *sfp = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &sfp_dma);
+ if (!sfp) {
+ bsg_job->reply->reply_data.vendor_reply.vendor_rsp[0] =
+ EXT_STATUS_NO_MEMORY;
+ goto done;
+ }
+
+ sg_copy_to_buffer(bsg_job->request_payload.sg_list,
+ bsg_job->request_payload.sg_cnt, i2c, sizeof(*i2c));
+
+ rval = qla2x00_read_sfp(vha, sfp_dma, sfp,
+ i2c->device, i2c->offset, i2c->length, i2c->option);
+
+ if (rval) {
+ bsg_job->reply->reply_data.vendor_reply.vendor_rsp[0] =
+ EXT_STATUS_MAILBOX;
+ goto dealloc;
+ }
+
+ memcpy(i2c->buffer, sfp, i2c->length);
+ sg_copy_from_buffer(bsg_job->reply_payload.sg_list,
+ bsg_job->reply_payload.sg_cnt, i2c, sizeof(*i2c));
+
+ bsg_job->reply->reply_data.vendor_reply.vendor_rsp[0] = 0;
+
+dealloc:
+ dma_pool_free(ha->s_dma_pool, sfp, sfp_dma);
+
+done:
+ bsg_job->reply_len = sizeof(struct fc_bsg_reply);
+ bsg_job->reply->reply_payload_rcv_len = sizeof(*i2c);
+ bsg_job->reply->result = DID_OK << 16;
+ bsg_job->job_done(bsg_job);
+
+ return 0;
+}
+
+static int
+qla24xx_process_bidir_cmd(struct fc_bsg_job *bsg_job)
+{
+ struct Scsi_Host *host = bsg_job->shost;
+ scsi_qla_host_t *vha = shost_priv(host);
+ struct qla_hw_data *ha = vha->hw;
+ uint16_t thread_id;
+ uint32_t rval = EXT_STATUS_OK;
+ uint16_t req_sg_cnt = 0;
+ uint16_t rsp_sg_cnt = 0;
+ uint16_t nextlid = 0;
+ uint32_t tot_dsds;
+ srb_t *sp = NULL;
+ uint32_t req_data_len = 0;
+ uint32_t rsp_data_len = 0;
+
+ /* Check the type of the adapter */
+ if (!IS_BIDI_CAPABLE(ha)) {
+ ql_log(ql_log_warn, vha, 0x70a0,
+ "This adapter is not supported\n");
+ rval = EXT_STATUS_NOT_SUPPORTED;
+ goto done;
+ }
+
+ if (test_bit(ISP_ABORT_NEEDED, &vha->dpc_flags) ||
+ test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags) ||
+ test_bit(ISP_ABORT_RETRY, &vha->dpc_flags)) {
+ rval = EXT_STATUS_BUSY;
+ goto done;
+ }
+
+ /* Check if host is online */
+ if (!vha->flags.online) {
+ ql_log(ql_log_warn, vha, 0x70a1,
+ "Host is not online\n");
+ rval = EXT_STATUS_DEVICE_OFFLINE;
+ goto done;
+ }
+
+ /* Check if cable is plugged in or not */
+ if (vha->device_flags & DFLG_NO_CABLE) {
+ ql_log(ql_log_warn, vha, 0x70a2,
+ "Cable is unplugged...\n");
+ rval = EXT_STATUS_INVALID_CFG;
+ goto done;
+ }
+
+ /* Check if the switch is connected or not */
+ if (ha->current_topology != ISP_CFG_F) {
+ ql_log(ql_log_warn, vha, 0x70a3,
+ "Host is not connected to the switch\n");
+ rval = EXT_STATUS_INVALID_CFG;
+ goto done;
+ }
+
+ /* Check if operating mode is P2P */
+ if (ha->operating_mode != P2P) {
+ ql_log(ql_log_warn, vha, 0x70a4,
+ "Host is operating mode is not P2p\n");
+ rval = EXT_STATUS_INVALID_CFG;
+ goto done;
+ }
+
+ thread_id = bsg_job->request->rqst_data.h_vendor.vendor_cmd[1];
+
+ mutex_lock(&ha->selflogin_lock);
+ if (vha->self_login_loop_id == 0) {
+ /* Initialize all required fields of fcport */
+ vha->bidir_fcport.vha = vha;
+ vha->bidir_fcport.d_id.b.al_pa = vha->d_id.b.al_pa;
+ vha->bidir_fcport.d_id.b.area = vha->d_id.b.area;
+ vha->bidir_fcport.d_id.b.domain = vha->d_id.b.domain;
+ vha->bidir_fcport.loop_id = vha->loop_id;
+
+ if (qla2x00_fabric_login(vha, &(vha->bidir_fcport), &nextlid)) {
+ ql_log(ql_log_warn, vha, 0x70a7,
+ "Failed to login port %06X for bidirectional IOCB\n",
+ vha->bidir_fcport.d_id.b24);
+ mutex_unlock(&ha->selflogin_lock);
+ rval = EXT_STATUS_MAILBOX;
+ goto done;
+ }
+ vha->self_login_loop_id = nextlid - 1;
+
+ }
+ /* Assign the self login loop id to fcport */
+ mutex_unlock(&ha->selflogin_lock);
+
+ vha->bidir_fcport.loop_id = vha->self_login_loop_id;
+
+ req_sg_cnt = dma_map_sg(&ha->pdev->dev,
+ bsg_job->request_payload.sg_list,
+ bsg_job->request_payload.sg_cnt,
+ DMA_TO_DEVICE);
+
+ if (!req_sg_cnt) {
+ rval = EXT_STATUS_NO_MEMORY;
+ goto done;
+ }
+
+ rsp_sg_cnt = dma_map_sg(&ha->pdev->dev,
+ bsg_job->reply_payload.sg_list, bsg_job->reply_payload.sg_cnt,
+ DMA_FROM_DEVICE);
+
+ if (!rsp_sg_cnt) {
+ rval = EXT_STATUS_NO_MEMORY;
+ goto done_unmap_req_sg;
+ }
+
+ if ((req_sg_cnt != bsg_job->request_payload.sg_cnt) ||
+ (rsp_sg_cnt != bsg_job->reply_payload.sg_cnt)) {
+ ql_dbg(ql_dbg_user, vha, 0x70a9,
+ "Dma mapping resulted in different sg counts "
+ "[request_sg_cnt: %x dma_request_sg_cnt: %x reply_sg_cnt: "
+ "%x dma_reply_sg_cnt: %x]\n",
+ bsg_job->request_payload.sg_cnt, req_sg_cnt,
+ bsg_job->reply_payload.sg_cnt, rsp_sg_cnt);
+ rval = EXT_STATUS_NO_MEMORY;
+ goto done_unmap_sg;
+ }
+
+ if (req_data_len != rsp_data_len) {
+ rval = EXT_STATUS_BUSY;
+ ql_log(ql_log_warn, vha, 0x70aa,
+ "req_data_len != rsp_data_len\n");
+ goto done_unmap_sg;
+ }
+
+ req_data_len = bsg_job->request_payload.payload_len;
+ rsp_data_len = bsg_job->reply_payload.payload_len;
+
+
+ /* Alloc SRB structure */
+ sp = qla2x00_get_sp(vha, &(vha->bidir_fcport), GFP_KERNEL);
+ if (!sp) {
+ ql_dbg(ql_dbg_user, vha, 0x70ac,
+ "Alloc SRB structure failed\n");
+ rval = EXT_STATUS_NO_MEMORY;
+ goto done_unmap_sg;
+ }
+
+ /*Populate srb->ctx with bidir ctx*/
+ sp->u.bsg_job = bsg_job;
+ sp->free = qla2x00_bsg_sp_free;
+ sp->type = SRB_BIDI_CMD;
+ sp->done = qla2x00_bsg_job_done;
+
+ /* Add the read and write sg count */
+ tot_dsds = rsp_sg_cnt + req_sg_cnt;
+
+ rval = qla2x00_start_bidir(sp, vha, tot_dsds);
+ if (rval != EXT_STATUS_OK)
+ goto done_free_srb;
+ /* the bsg request will be completed in the interrupt handler */
+ return rval;
+
+done_free_srb:
+ mempool_free(sp, ha->srb_mempool);
+done_unmap_sg:
+ dma_unmap_sg(&ha->pdev->dev,
+ bsg_job->reply_payload.sg_list,
+ bsg_job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
+done_unmap_req_sg:
+ dma_unmap_sg(&ha->pdev->dev,
+ bsg_job->request_payload.sg_list,
+ bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE);
+done:
+
+ /* Return an error vendor specific response
+ * and complete the bsg request
+ */
+ bsg_job->reply->reply_data.vendor_reply.vendor_rsp[0] = rval;
+ bsg_job->reply_len = sizeof(struct fc_bsg_reply);
+ bsg_job->reply->reply_payload_rcv_len = 0;
+ bsg_job->reply->result = (DID_OK) << 16;
+ bsg_job->job_done(bsg_job);
+ /* Always return success, vendor rsp carries correct status */
+ return 0;
+}
+
+static int
+qlafx00_mgmt_cmd(struct fc_bsg_job *bsg_job)
+{
+ struct Scsi_Host *host = bsg_job->shost;
+ scsi_qla_host_t *vha = shost_priv(host);
+ struct qla_hw_data *ha = vha->hw;
+ int rval = (DRIVER_ERROR << 16);
+ struct qla_mt_iocb_rqst_fx00 *piocb_rqst;
+ srb_t *sp;
+ int req_sg_cnt = 0, rsp_sg_cnt = 0;
+ struct fc_port *fcport;
+ char *type = "FC_BSG_HST_FX_MGMT";
+
+ /* Copy the IOCB specific information */
+ piocb_rqst = (struct qla_mt_iocb_rqst_fx00 *)
+ &bsg_job->request->rqst_data.h_vendor.vendor_cmd[1];
+
+ /* Dump the vendor information */
+ ql_dump_buffer(ql_dbg_user + ql_dbg_verbose , vha, 0x70cf,
+ (uint8_t *)piocb_rqst, sizeof(struct qla_mt_iocb_rqst_fx00));
+
+ if (!vha->flags.online) {
+ ql_log(ql_log_warn, vha, 0x70d0,
+ "Host is not online.\n");
+ rval = -EIO;
+ goto done;
+ }
+
+ if (piocb_rqst->flags & SRB_FXDISC_REQ_DMA_VALID) {
+ req_sg_cnt = dma_map_sg(&ha->pdev->dev,
+ bsg_job->request_payload.sg_list,
+ bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE);
+ if (!req_sg_cnt) {
+ ql_log(ql_log_warn, vha, 0x70c7,
+ "dma_map_sg return %d for request\n", req_sg_cnt);
+ rval = -ENOMEM;
+ goto done;
+ }
+ }
+
+ if (piocb_rqst->flags & SRB_FXDISC_RESP_DMA_VALID) {
+ rsp_sg_cnt = dma_map_sg(&ha->pdev->dev,
+ bsg_job->reply_payload.sg_list,
+ bsg_job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
+ if (!rsp_sg_cnt) {
+ ql_log(ql_log_warn, vha, 0x70c8,
+ "dma_map_sg return %d for reply\n", rsp_sg_cnt);
+ rval = -ENOMEM;
+ goto done_unmap_req_sg;
+ }
+ }
+
+ ql_dbg(ql_dbg_user, vha, 0x70c9,
+ "request_sg_cnt: %x dma_request_sg_cnt: %x reply_sg_cnt:%x "
+ "dma_reply_sg_cnt: %x\n", bsg_job->request_payload.sg_cnt,
+ req_sg_cnt, bsg_job->reply_payload.sg_cnt, rsp_sg_cnt);
+
+ /* Allocate a dummy fcport structure, since functions preparing the
+ * IOCB and mailbox command retrieves port specific information
+ * from fcport structure. For Host based ELS commands there will be
+ * no fcport structure allocated
+ */
+ fcport = qla2x00_alloc_fcport(vha, GFP_KERNEL);
+ if (!fcport) {
+ ql_log(ql_log_warn, vha, 0x70ca,
+ "Failed to allocate fcport.\n");
+ rval = -ENOMEM;
+ goto done_unmap_rsp_sg;
+ }
+
+ /* Alloc SRB structure */
+ sp = qla2x00_get_sp(vha, fcport, GFP_KERNEL);
+ if (!sp) {
+ ql_log(ql_log_warn, vha, 0x70cb,
+ "qla2x00_get_sp failed.\n");
+ rval = -ENOMEM;
+ goto done_free_fcport;
+ }
+
+ /* Initialize all required fields of fcport */
+ fcport->vha = vha;
+ fcport->loop_id = piocb_rqst->dataword;
+
+ sp->type = SRB_FXIOCB_BCMD;
+ sp->name = "bsg_fx_mgmt";
+ sp->iocbs = qla24xx_calc_ct_iocbs(req_sg_cnt + rsp_sg_cnt);
+ sp->u.bsg_job = bsg_job;
+ sp->free = qla2x00_bsg_sp_free;
+ sp->done = qla2x00_bsg_job_done;
+
+ ql_dbg(ql_dbg_user, vha, 0x70cc,
+ "bsg rqst type: %s fx_mgmt_type: %x id=%x\n",
+ type, piocb_rqst->func_type, fcport->loop_id);
+
+ rval = qla2x00_start_sp(sp);
+ if (rval != QLA_SUCCESS) {
+ ql_log(ql_log_warn, vha, 0x70cd,
+ "qla2x00_start_sp failed=%d.\n", rval);
+ mempool_free(sp, ha->srb_mempool);
+ rval = -EIO;
+ goto done_free_fcport;
+ }
+ return rval;
+
+done_free_fcport:
+ kfree(fcport);
+
+done_unmap_rsp_sg:
+ if (piocb_rqst->flags & SRB_FXDISC_RESP_DMA_VALID)
+ dma_unmap_sg(&ha->pdev->dev,
+ bsg_job->reply_payload.sg_list,
+ bsg_job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
+done_unmap_req_sg:
+ if (piocb_rqst->flags & SRB_FXDISC_REQ_DMA_VALID)
+ dma_unmap_sg(&ha->pdev->dev,
+ bsg_job->request_payload.sg_list,
+ bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE);
+
+done:
+ return rval;
+}
+
+static int
+qla26xx_serdes_op(struct fc_bsg_job *bsg_job)
+{
+ struct Scsi_Host *host = bsg_job->shost;
+ scsi_qla_host_t *vha = shost_priv(host);
+ int rval = 0;
+ struct qla_serdes_reg sr;
+
+ memset(&sr, 0, sizeof(sr));
+
+ sg_copy_to_buffer(bsg_job->request_payload.sg_list,
+ bsg_job->request_payload.sg_cnt, &sr, sizeof(sr));
+
+ switch (sr.cmd) {
+ case INT_SC_SERDES_WRITE_REG:
+ rval = qla2x00_write_serdes_word(vha, sr.addr, sr.val);
+ bsg_job->reply->reply_payload_rcv_len = 0;
+ break;
+ case INT_SC_SERDES_READ_REG:
+ rval = qla2x00_read_serdes_word(vha, sr.addr, &sr.val);
+ sg_copy_from_buffer(bsg_job->reply_payload.sg_list,
+ bsg_job->reply_payload.sg_cnt, &sr, sizeof(sr));
+ bsg_job->reply->reply_payload_rcv_len = sizeof(sr);
+ break;
+ default:
+ ql_dbg(ql_dbg_user, vha, 0x708c,
+ "Unknown serdes cmd %x.\n", sr.cmd);
+ rval = -EINVAL;
+ break;
+ }
+
+ bsg_job->reply->reply_data.vendor_reply.vendor_rsp[0] =
+ rval ? EXT_STATUS_MAILBOX : 0;
+
+ bsg_job->reply_len = sizeof(struct fc_bsg_reply);
+ bsg_job->reply->result = DID_OK << 16;
+ bsg_job->job_done(bsg_job);
+ return 0;
+}
+
+static int
+qla8044_serdes_op(struct fc_bsg_job *bsg_job)
+{
+ struct Scsi_Host *host = bsg_job->shost;
+ scsi_qla_host_t *vha = shost_priv(host);
+ int rval = 0;
+ struct qla_serdes_reg_ex sr;
+
+ memset(&sr, 0, sizeof(sr));
+
+ sg_copy_to_buffer(bsg_job->request_payload.sg_list,
+ bsg_job->request_payload.sg_cnt, &sr, sizeof(sr));
+
+ switch (sr.cmd) {
+ case INT_SC_SERDES_WRITE_REG:
+ rval = qla8044_write_serdes_word(vha, sr.addr, sr.val);
+ bsg_job->reply->reply_payload_rcv_len = 0;
+ break;
+ case INT_SC_SERDES_READ_REG:
+ rval = qla8044_read_serdes_word(vha, sr.addr, &sr.val);
+ sg_copy_from_buffer(bsg_job->reply_payload.sg_list,
+ bsg_job->reply_payload.sg_cnt, &sr, sizeof(sr));
+ bsg_job->reply->reply_payload_rcv_len = sizeof(sr);
+ break;
+ default:
+ ql_dbg(ql_dbg_user, vha, 0x70cf,
+ "Unknown serdes cmd %x.\n", sr.cmd);
+ rval = -EINVAL;
+ break;
+ }
+
+ bsg_job->reply->reply_data.vendor_reply.vendor_rsp[0] =
+ rval ? EXT_STATUS_MAILBOX : 0;
+
+ bsg_job->reply_len = sizeof(struct fc_bsg_reply);
+ bsg_job->reply->result = DID_OK << 16;
+ bsg_job->job_done(bsg_job);
+ return 0;
+}
+
+static int
+qla2x00_process_vendor_specific(struct fc_bsg_job *bsg_job)
+{
+ switch (bsg_job->request->rqst_data.h_vendor.vendor_cmd[0]) {
+ case QL_VND_LOOPBACK:
+ return qla2x00_process_loopback(bsg_job);
+
+ case QL_VND_A84_RESET:
+ return qla84xx_reset(bsg_job);
+
+ case QL_VND_A84_UPDATE_FW:
+ return qla84xx_updatefw(bsg_job);
+
+ case QL_VND_A84_MGMT_CMD:
+ return qla84xx_mgmt_cmd(bsg_job);
+
+ case QL_VND_IIDMA:
+ return qla24xx_iidma(bsg_job);
+
+ case QL_VND_FCP_PRIO_CFG_CMD:
+ return qla24xx_proc_fcp_prio_cfg_cmd(bsg_job);
+
+ case QL_VND_READ_FLASH:
+ return qla2x00_read_optrom(bsg_job);
+
+ case QL_VND_UPDATE_FLASH:
+ return qla2x00_update_optrom(bsg_job);
+
+ case QL_VND_SET_FRU_VERSION:
+ return qla2x00_update_fru_versions(bsg_job);
+
+ case QL_VND_READ_FRU_STATUS:
+ return qla2x00_read_fru_status(bsg_job);
+
+ case QL_VND_WRITE_FRU_STATUS:
+ return qla2x00_write_fru_status(bsg_job);
+
+ case QL_VND_WRITE_I2C:
+ return qla2x00_write_i2c(bsg_job);
+
+ case QL_VND_READ_I2C:
+ return qla2x00_read_i2c(bsg_job);
+
+ case QL_VND_DIAG_IO_CMD:
+ return qla24xx_process_bidir_cmd(bsg_job);
+
+ case QL_VND_FX00_MGMT_CMD:
+ return qlafx00_mgmt_cmd(bsg_job);
+
+ case QL_VND_SERDES_OP:
+ return qla26xx_serdes_op(bsg_job);
+
+ case QL_VND_SERDES_OP_EX:
+ return qla8044_serdes_op(bsg_job);
+
+ default:
+ return -ENOSYS;
+ }
+}
+
+int
+qla24xx_bsg_request(struct fc_bsg_job *bsg_job)
+{
+ int ret = -EINVAL;
+ struct fc_rport *rport;
+ fc_port_t *fcport = NULL;
+ struct Scsi_Host *host;
+ scsi_qla_host_t *vha;
+
+ /* In case no data transferred. */
+ bsg_job->reply->reply_payload_rcv_len = 0;
+
+ if (bsg_job->request->msgcode == FC_BSG_RPT_ELS) {
+ rport = bsg_job->rport;
+ fcport = *(fc_port_t **) rport->dd_data;
+ host = rport_to_shost(rport);
+ vha = shost_priv(host);
+ } else {
+ host = bsg_job->shost;
+ vha = shost_priv(host);
+ }
+
+ if (qla2x00_reset_active(vha)) {
+ ql_dbg(ql_dbg_user, vha, 0x709f,
+ "BSG: ISP abort active/needed -- cmd=%d.\n",
+ bsg_job->request->msgcode);
+ return -EBUSY;
+ }
+
+ ql_dbg(ql_dbg_user, vha, 0x7000,
+ "Entered %s msgcode=0x%x.\n", __func__, bsg_job->request->msgcode);
+
+ switch (bsg_job->request->msgcode) {
+ case FC_BSG_RPT_ELS:
+ case FC_BSG_HST_ELS_NOLOGIN:
+ ret = qla2x00_process_els(bsg_job);
+ break;
+ case FC_BSG_HST_CT:
+ ret = qla2x00_process_ct(bsg_job);
+ break;
+ case FC_BSG_HST_VENDOR:
+ ret = qla2x00_process_vendor_specific(bsg_job);
+ break;
+ case FC_BSG_HST_ADD_RPORT:
+ case FC_BSG_HST_DEL_RPORT:
+ case FC_BSG_RPT_CT:
+ default:
+ ql_log(ql_log_warn, vha, 0x705a, "Unsupported BSG request.\n");
+ break;
+ }
+ return ret;
+}
+
+int
+qla24xx_bsg_timeout(struct fc_bsg_job *bsg_job)
+{
+ scsi_qla_host_t *vha = shost_priv(bsg_job->shost);
+ struct qla_hw_data *ha = vha->hw;
+ srb_t *sp;
+ int cnt, que;
+ unsigned long flags;
+ struct req_que *req;
+
+ /* find the bsg job from the active list of commands */
+ spin_lock_irqsave(&ha->hardware_lock, flags);
+ for (que = 0; que < ha->max_req_queues; que++) {
+ req = ha->req_q_map[que];
+ if (!req)
+ continue;
+
+ for (cnt = 1; cnt < req->num_outstanding_cmds; cnt++) {
+ sp = req->outstanding_cmds[cnt];
+ if (sp) {
+ if (((sp->type == SRB_CT_CMD) ||
+ (sp->type == SRB_ELS_CMD_HST) ||
+ (sp->type == SRB_FXIOCB_BCMD))
+ && (sp->u.bsg_job == bsg_job)) {
+ req->outstanding_cmds[cnt] = NULL;
+ spin_unlock_irqrestore(&ha->hardware_lock, flags);
+ if (ha->isp_ops->abort_command(sp)) {
+ ql_log(ql_log_warn, vha, 0x7089,
+ "mbx abort_command "
+ "failed.\n");
+ bsg_job->req->errors =
+ bsg_job->reply->result = -EIO;
+ } else {
+ ql_dbg(ql_dbg_user, vha, 0x708a,
+ "mbx abort_command "
+ "success.\n");
+ bsg_job->req->errors =
+ bsg_job->reply->result = 0;
+ }
+ spin_lock_irqsave(&ha->hardware_lock, flags);
+ goto done;
+ }
+ }
+ }
+ }
+ spin_unlock_irqrestore(&ha->hardware_lock, flags);
+ ql_log(ql_log_info, vha, 0x708b, "SRB not found to abort.\n");
+ bsg_job->req->errors = bsg_job->reply->result = -ENXIO;
+ return 0;
+
+done:
+ spin_unlock_irqrestore(&ha->hardware_lock, flags);
+ sp->free(vha, sp);
+ return 0;
+}
diff --git a/drivers/scsi/qla2xxx/qla_bsg.h b/drivers/scsi/qla2xxx/qla_bsg.h
new file mode 100644
index 000000000..d38f9efa5
--- /dev/null
+++ b/drivers/scsi/qla2xxx/qla_bsg.h
@@ -0,0 +1,235 @@
+/*
+ * QLogic Fibre Channel HBA Driver
+ * Copyright (c) 2003-2014 QLogic Corporation
+ *
+ * See LICENSE.qla2xxx for copyright and licensing details.
+ */
+#ifndef __QLA_BSG_H
+#define __QLA_BSG_H
+
+/* BSG Vendor specific commands */
+#define QL_VND_LOOPBACK 0x01
+#define QL_VND_A84_RESET 0x02
+#define QL_VND_A84_UPDATE_FW 0x03
+#define QL_VND_A84_MGMT_CMD 0x04
+#define QL_VND_IIDMA 0x05
+#define QL_VND_FCP_PRIO_CFG_CMD 0x06
+#define QL_VND_READ_FLASH 0x07
+#define QL_VND_UPDATE_FLASH 0x08
+#define QL_VND_SET_FRU_VERSION 0x0B
+#define QL_VND_READ_FRU_STATUS 0x0C
+#define QL_VND_WRITE_FRU_STATUS 0x0D
+#define QL_VND_DIAG_IO_CMD 0x0A
+#define QL_VND_WRITE_I2C 0x10
+#define QL_VND_READ_I2C 0x11
+#define QL_VND_FX00_MGMT_CMD 0x12
+#define QL_VND_SERDES_OP 0x13
+#define QL_VND_SERDES_OP_EX 0x14
+
+/* BSG Vendor specific subcode returns */
+#define EXT_STATUS_OK 0
+#define EXT_STATUS_ERR 1
+#define EXT_STATUS_BUSY 2
+#define EXT_STATUS_INVALID_PARAM 6
+#define EXT_STATUS_DATA_OVERRUN 7
+#define EXT_STATUS_DATA_UNDERRUN 8
+#define EXT_STATUS_MAILBOX 11
+#define EXT_STATUS_NO_MEMORY 17
+#define EXT_STATUS_DEVICE_OFFLINE 22
+
+/*
+ * To support bidirectional iocb
+ * BSG Vendor specific returns
+ */
+#define EXT_STATUS_NOT_SUPPORTED 27
+#define EXT_STATUS_INVALID_CFG 28
+#define EXT_STATUS_DMA_ERR 29
+#define EXT_STATUS_TIMEOUT 30
+#define EXT_STATUS_THREAD_FAILED 31
+#define EXT_STATUS_DATA_CMP_FAILED 32
+
+/* BSG definations for interpreting CommandSent field */
+#define INT_DEF_LB_LOOPBACK_CMD 0
+#define INT_DEF_LB_ECHO_CMD 1
+
+/* Loopback related definations */
+#define INTERNAL_LOOPBACK 0xF1
+#define EXTERNAL_LOOPBACK 0xF2
+#define ENABLE_INTERNAL_LOOPBACK 0x02
+#define ENABLE_EXTERNAL_LOOPBACK 0x04
+#define INTERNAL_LOOPBACK_MASK 0x000E
+#define MAX_ELS_FRAME_PAYLOAD 252
+#define ELS_OPCODE_BYTE 0x10
+
+/* BSG Vendor specific definations */
+#define A84_ISSUE_WRITE_TYPE_CMD 0
+#define A84_ISSUE_READ_TYPE_CMD 1
+#define A84_CLEANUP_CMD 2
+#define A84_ISSUE_RESET_OP_FW 3
+#define A84_ISSUE_RESET_DIAG_FW 4
+#define A84_ISSUE_UPDATE_OPFW_CMD 5
+#define A84_ISSUE_UPDATE_DIAGFW_CMD 6
+
+struct qla84_mgmt_param {
+ union {
+ struct {
+ uint32_t start_addr;
+ } mem; /* for QLA84_MGMT_READ/WRITE_MEM */
+ struct {
+ uint32_t id;
+#define QLA84_MGMT_CONFIG_ID_UIF 1
+#define QLA84_MGMT_CONFIG_ID_FCOE_COS 2
+#define QLA84_MGMT_CONFIG_ID_PAUSE 3
+#define QLA84_MGMT_CONFIG_ID_TIMEOUTS 4
+
+ uint32_t param0;
+ uint32_t param1;
+ } config; /* for QLA84_MGMT_CHNG_CONFIG */
+
+ struct {
+ uint32_t type;
+#define QLA84_MGMT_INFO_CONFIG_LOG_DATA 1 /* Get Config Log Data */
+#define QLA84_MGMT_INFO_LOG_DATA 2 /* Get Log Data */
+#define QLA84_MGMT_INFO_PORT_STAT 3 /* Get Port Statistics */
+#define QLA84_MGMT_INFO_LIF_STAT 4 /* Get LIF Statistics */
+#define QLA84_MGMT_INFO_ASIC_STAT 5 /* Get ASIC Statistics */
+#define QLA84_MGMT_INFO_CONFIG_PARAMS 6 /* Get Config Parameters */
+#define QLA84_MGMT_INFO_PANIC_LOG 7 /* Get Panic Log */
+
+ uint32_t context;
+/*
+* context definitions for QLA84_MGMT_INFO_CONFIG_LOG_DATA
+*/
+#define IC_LOG_DATA_LOG_ID_DEBUG_LOG 0
+#define IC_LOG_DATA_LOG_ID_LEARN_LOG 1
+#define IC_LOG_DATA_LOG_ID_FC_ACL_INGRESS_LOG 2
+#define IC_LOG_DATA_LOG_ID_FC_ACL_EGRESS_LOG 3
+#define IC_LOG_DATA_LOG_ID_ETHERNET_ACL_INGRESS_LOG 4
+#define IC_LOG_DATA_LOG_ID_ETHERNET_ACL_EGRESS_LOG 5
+#define IC_LOG_DATA_LOG_ID_MESSAGE_TRANSMIT_LOG 6
+#define IC_LOG_DATA_LOG_ID_MESSAGE_RECEIVE_LOG 7
+#define IC_LOG_DATA_LOG_ID_LINK_EVENT_LOG 8
+#define IC_LOG_DATA_LOG_ID_DCX_LOG 9
+
+/*
+* context definitions for QLA84_MGMT_INFO_PORT_STAT
+*/
+#define IC_PORT_STATISTICS_PORT_NUMBER_ETHERNET_PORT0 0
+#define IC_PORT_STATISTICS_PORT_NUMBER_ETHERNET_PORT1 1
+#define IC_PORT_STATISTICS_PORT_NUMBER_NSL_PORT0 2
+#define IC_PORT_STATISTICS_PORT_NUMBER_NSL_PORT1 3
+#define IC_PORT_STATISTICS_PORT_NUMBER_FC_PORT0 4
+#define IC_PORT_STATISTICS_PORT_NUMBER_FC_PORT1 5
+
+
+/*
+* context definitions for QLA84_MGMT_INFO_LIF_STAT
+*/
+#define IC_LIF_STATISTICS_LIF_NUMBER_ETHERNET_PORT0 0
+#define IC_LIF_STATISTICS_LIF_NUMBER_ETHERNET_PORT1 1
+#define IC_LIF_STATISTICS_LIF_NUMBER_FC_PORT0 2
+#define IC_LIF_STATISTICS_LIF_NUMBER_FC_PORT1 3
+#define IC_LIF_STATISTICS_LIF_NUMBER_CPU 6
+
+ } info; /* for QLA84_MGMT_GET_INFO */
+ } u;
+};
+
+struct qla84_msg_mgmt {
+ uint16_t cmd;
+#define QLA84_MGMT_READ_MEM 0x00
+#define QLA84_MGMT_WRITE_MEM 0x01
+#define QLA84_MGMT_CHNG_CONFIG 0x02
+#define QLA84_MGMT_GET_INFO 0x03
+ uint16_t rsrvd;
+ struct qla84_mgmt_param mgmtp;/* parameters for cmd */
+ uint32_t len; /* bytes in payload following this struct */
+ uint8_t payload[0]; /* payload for cmd */
+};
+
+struct qla_bsg_a84_mgmt {
+ struct qla84_msg_mgmt mgmt;
+} __attribute__ ((packed));
+
+struct qla_scsi_addr {
+ uint16_t bus;
+ uint16_t target;
+} __attribute__ ((packed));
+
+struct qla_ext_dest_addr {
+ union {
+ uint8_t wwnn[8];
+ uint8_t wwpn[8];
+ uint8_t id[4];
+ struct qla_scsi_addr scsi_addr;
+ } dest_addr;
+ uint16_t dest_type;
+#define EXT_DEF_TYPE_WWPN 2
+ uint16_t lun;
+ uint16_t padding[2];
+} __attribute__ ((packed));
+
+struct qla_port_param {
+ struct qla_ext_dest_addr fc_scsi_addr;
+ uint16_t mode;
+ uint16_t speed;
+} __attribute__ ((packed));
+
+
+/* FRU VPD */
+
+#define MAX_FRU_SIZE 36
+
+struct qla_field_address {
+ uint16_t offset;
+ uint16_t device;
+ uint16_t option;
+} __packed;
+
+struct qla_field_info {
+ uint8_t version[MAX_FRU_SIZE];
+} __packed;
+
+struct qla_image_version {
+ struct qla_field_address field_address;
+ struct qla_field_info field_info;
+} __packed;
+
+struct qla_image_version_list {
+ uint32_t count;
+ struct qla_image_version version[0];
+} __packed;
+
+struct qla_status_reg {
+ struct qla_field_address field_address;
+ uint8_t status_reg;
+ uint8_t reserved[7];
+} __packed;
+
+struct qla_i2c_access {
+ uint16_t device;
+ uint16_t offset;
+ uint16_t option;
+ uint16_t length;
+ uint8_t buffer[0x40];
+} __packed;
+
+/* 26xx serdes register interface */
+
+/* serdes reg commands */
+#define INT_SC_SERDES_READ_REG 1
+#define INT_SC_SERDES_WRITE_REG 2
+
+struct qla_serdes_reg {
+ uint16_t cmd;
+ uint16_t addr;
+ uint16_t val;
+} __packed;
+
+struct qla_serdes_reg_ex {
+ uint16_t cmd;
+ uint32_t addr;
+ uint32_t val;
+} __packed;
+
+#endif
diff --git a/drivers/scsi/qla2xxx/qla_dbg.c b/drivers/scsi/qla2xxx/qla_dbg.c
new file mode 100644
index 000000000..0e6ee3ca3
--- /dev/null
+++ b/drivers/scsi/qla2xxx/qla_dbg.c
@@ -0,0 +1,2696 @@
+/*
+ * QLogic Fibre Channel HBA Driver
+ * Copyright (c) 2003-2014 QLogic Corporation
+ *
+ * See LICENSE.qla2xxx for copyright and licensing details.
+ */
+
+/*
+ * Table for showing the current message id in use for particular level
+ * Change this table for addition of log/debug messages.
+ * ----------------------------------------------------------------------
+ * | Level | Last Value Used | Holes |
+ * ----------------------------------------------------------------------
+ * | Module Init and Probe | 0x017f | 0x0146 |
+ * | | | 0x015b-0x0160 |
+ * | | | 0x016e-0x0170 |
+ * | Mailbox commands | 0x118d | 0x1115-0x1116 |
+ * | | | 0x111a-0x111b |
+ * | Device Discovery | 0x2016 | 0x2020-0x2022, |
+ * | | | 0x2011-0x2012, |
+ * | | | 0x2099-0x20a4 |
+ * | Queue Command and IO tracing | 0x3059 | 0x300b |
+ * | | | 0x3027-0x3028 |
+ * | | | 0x303d-0x3041 |
+ * | | | 0x302d,0x3033 |
+ * | | | 0x3036,0x3038 |
+ * | | | 0x303a |
+ * | DPC Thread | 0x4023 | 0x4002,0x4013 |
+ * | Async Events | 0x5087 | 0x502b-0x502f |
+ * | | | 0x5047 |
+ * | | | 0x5084,0x5075 |
+ * | | | 0x503d,0x5044 |
+ * | | | 0x507b,0x505f |
+ * | Timer Routines | 0x6012 | |
+ * | User Space Interactions | 0x70e2 | 0x7018,0x702e |
+ * | | | 0x7020,0x7024 |
+ * | | | 0x7039,0x7045 |
+ * | | | 0x7073-0x7075 |
+ * | | | 0x70a5-0x70a6 |
+ * | | | 0x70a8,0x70ab |
+ * | | | 0x70ad-0x70ae |
+ * | | | 0x70d7-0x70db |
+ * | | | 0x70de-0x70df |
+ * | Task Management | 0x803d | 0x8000,0x800b |
+ * | | | 0x8019 |
+ * | | | 0x8025,0x8026 |
+ * | | | 0x8031,0x8032 |
+ * | | | 0x8039,0x803c |
+ * | AER/EEH | 0x9011 | |
+ * | Virtual Port | 0xa007 | |
+ * | ISP82XX Specific | 0xb157 | 0xb002,0xb024 |
+ * | | | 0xb09e,0xb0ae |
+ * | | | 0xb0c3,0xb0c6 |
+ * | | | 0xb0e0-0xb0ef |
+ * | | | 0xb085,0xb0dc |
+ * | | | 0xb107,0xb108 |
+ * | | | 0xb111,0xb11e |
+ * | | | 0xb12c,0xb12d |
+ * | | | 0xb13a,0xb142 |
+ * | | | 0xb13c-0xb140 |
+ * | | | 0xb149 |
+ * | MultiQ | 0xc00c | |
+ * | Misc | 0xd300 | 0xd016-0xd017 |
+ * | | | 0xd021,0xd024 |
+ * | | | 0xd025,0xd029 |
+ * | | | 0xd02a,0xd02e |
+ * | | | 0xd031-0xd0ff |
+ * | | | 0xd101-0xd1fe |
+ * | | | 0xd214-0xd2fe |
+ * | Target Mode | 0xe079 | |
+ * | Target Mode Management | 0xf072 | 0xf002 |
+ * | | | 0xf046-0xf049 |
+ * | Target Mode Task Management | 0x1000b | |
+ * ----------------------------------------------------------------------
+ */
+
+#include "qla_def.h"
+
+#include <linux/delay.h>
+
+static uint32_t ql_dbg_offset = 0x800;
+
+static inline void
+qla2xxx_prep_dump(struct qla_hw_data *ha, struct qla2xxx_fw_dump *fw_dump)
+{
+ fw_dump->fw_major_version = htonl(ha->fw_major_version);
+ fw_dump->fw_minor_version = htonl(ha->fw_minor_version);
+ fw_dump->fw_subminor_version = htonl(ha->fw_subminor_version);
+ fw_dump->fw_attributes = htonl(ha->fw_attributes);
+
+ fw_dump->vendor = htonl(ha->pdev->vendor);
+ fw_dump->device = htonl(ha->pdev->device);
+ fw_dump->subsystem_vendor = htonl(ha->pdev->subsystem_vendor);
+ fw_dump->subsystem_device = htonl(ha->pdev->subsystem_device);
+}
+
+static inline void *
+qla2xxx_copy_queues(struct qla_hw_data *ha, void *ptr)
+{
+ struct req_que *req = ha->req_q_map[0];
+ struct rsp_que *rsp = ha->rsp_q_map[0];
+ /* Request queue. */
+ memcpy(ptr, req->ring, req->length *
+ sizeof(request_t));
+
+ /* Response queue. */
+ ptr += req->length * sizeof(request_t);
+ memcpy(ptr, rsp->ring, rsp->length *
+ sizeof(response_t));
+
+ return ptr + (rsp->length * sizeof(response_t));
+}
+
+int
+qla27xx_dump_mpi_ram(struct qla_hw_data *ha, uint32_t addr, uint32_t *ram,
+ uint32_t ram_dwords, void **nxt)
+{
+ int rval;
+ uint32_t cnt, stat, timer, dwords, idx;
+ uint16_t mb0, mb1;
+ struct device_reg_24xx __iomem *reg = &ha->iobase->isp24;
+ dma_addr_t dump_dma = ha->gid_list_dma;
+ uint32_t *dump = (uint32_t *)ha->gid_list;
+
+ rval = QLA_SUCCESS;
+ mb0 = 0;
+
+ WRT_REG_WORD(&reg->mailbox0, MBC_LOAD_DUMP_MPI_RAM);
+ clear_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags);
+
+ dwords = qla2x00_gid_list_size(ha) / 4;
+ for (cnt = 0; cnt < ram_dwords && rval == QLA_SUCCESS;
+ cnt += dwords, addr += dwords) {
+ if (cnt + dwords > ram_dwords)
+ dwords = ram_dwords - cnt;
+
+ WRT_REG_WORD(&reg->mailbox1, LSW(addr));
+ WRT_REG_WORD(&reg->mailbox8, MSW(addr));
+
+ WRT_REG_WORD(&reg->mailbox2, MSW(dump_dma));
+ WRT_REG_WORD(&reg->mailbox3, LSW(dump_dma));
+ WRT_REG_WORD(&reg->mailbox6, MSW(MSD(dump_dma)));
+ WRT_REG_WORD(&reg->mailbox7, LSW(MSD(dump_dma)));
+
+ WRT_REG_WORD(&reg->mailbox4, MSW(dwords));
+ WRT_REG_WORD(&reg->mailbox5, LSW(dwords));
+
+ WRT_REG_WORD(&reg->mailbox9, 0);
+ WRT_REG_DWORD(&reg->hccr, HCCRX_SET_HOST_INT);
+
+ ha->flags.mbox_int = 0;
+ for (timer = 6000000; timer; timer--) {
+ /* Check for pending interrupts. */
+ stat = RD_REG_DWORD(&reg->host_status);
+ if (stat & HSRX_RISC_INT) {
+ stat &= 0xff;
+
+ if (stat == 0x1 || stat == 0x2 ||
+ stat == 0x10 || stat == 0x11) {
+ set_bit(MBX_INTERRUPT,
+ &ha->mbx_cmd_flags);
+
+ mb0 = RD_REG_WORD(&reg->mailbox0);
+ mb1 = RD_REG_WORD(&reg->mailbox1);
+
+ WRT_REG_DWORD(&reg->hccr,
+ HCCRX_CLR_RISC_INT);
+ RD_REG_DWORD(&reg->hccr);
+ break;
+ }
+
+ /* Clear this intr; it wasn't a mailbox intr */
+ WRT_REG_DWORD(&reg->hccr, HCCRX_CLR_RISC_INT);
+ RD_REG_DWORD(&reg->hccr);
+ }
+ udelay(5);
+ }
+ ha->flags.mbox_int = 1;
+
+ if (test_and_clear_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags)) {
+ rval = mb0 & MBS_MASK;
+ for (idx = 0; idx < dwords; idx++)
+ ram[cnt + idx] = IS_QLA27XX(ha) ?
+ le32_to_cpu(dump[idx]) : swab32(dump[idx]);
+ } else {
+ rval = QLA_FUNCTION_FAILED;
+ }
+ }
+
+ *nxt = rval == QLA_SUCCESS ? &ram[cnt] : NULL;
+ return rval;
+}
+
+int
+qla24xx_dump_ram(struct qla_hw_data *ha, uint32_t addr, uint32_t *ram,
+ uint32_t ram_dwords, void **nxt)
+{
+ int rval;
+ uint32_t cnt, stat, timer, dwords, idx;
+ uint16_t mb0;
+ struct device_reg_24xx __iomem *reg = &ha->iobase->isp24;
+ dma_addr_t dump_dma = ha->gid_list_dma;
+ uint32_t *dump = (uint32_t *)ha->gid_list;
+
+ rval = QLA_SUCCESS;
+ mb0 = 0;
+
+ WRT_REG_WORD(&reg->mailbox0, MBC_DUMP_RISC_RAM_EXTENDED);
+ clear_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags);
+
+ dwords = qla2x00_gid_list_size(ha) / 4;
+ for (cnt = 0; cnt < ram_dwords && rval == QLA_SUCCESS;
+ cnt += dwords, addr += dwords) {
+ if (cnt + dwords > ram_dwords)
+ dwords = ram_dwords - cnt;
+
+ WRT_REG_WORD(&reg->mailbox1, LSW(addr));
+ WRT_REG_WORD(&reg->mailbox8, MSW(addr));
+
+ WRT_REG_WORD(&reg->mailbox2, MSW(dump_dma));
+ WRT_REG_WORD(&reg->mailbox3, LSW(dump_dma));
+ WRT_REG_WORD(&reg->mailbox6, MSW(MSD(dump_dma)));
+ WRT_REG_WORD(&reg->mailbox7, LSW(MSD(dump_dma)));
+
+ WRT_REG_WORD(&reg->mailbox4, MSW(dwords));
+ WRT_REG_WORD(&reg->mailbox5, LSW(dwords));
+ WRT_REG_DWORD(&reg->hccr, HCCRX_SET_HOST_INT);
+
+ ha->flags.mbox_int = 0;
+ for (timer = 6000000; timer; timer--) {
+ /* Check for pending interrupts. */
+ stat = RD_REG_DWORD(&reg->host_status);
+ if (stat & HSRX_RISC_INT) {
+ stat &= 0xff;
+
+ if (stat == 0x1 || stat == 0x2 ||
+ stat == 0x10 || stat == 0x11) {
+ set_bit(MBX_INTERRUPT,
+ &ha->mbx_cmd_flags);
+
+ mb0 = RD_REG_WORD(&reg->mailbox0);
+
+ WRT_REG_DWORD(&reg->hccr,
+ HCCRX_CLR_RISC_INT);
+ RD_REG_DWORD(&reg->hccr);
+ break;
+ }
+
+ /* Clear this intr; it wasn't a mailbox intr */
+ WRT_REG_DWORD(&reg->hccr, HCCRX_CLR_RISC_INT);
+ RD_REG_DWORD(&reg->hccr);
+ }
+ udelay(5);
+ }
+ ha->flags.mbox_int = 1;
+
+ if (test_and_clear_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags)) {
+ rval = mb0 & MBS_MASK;
+ for (idx = 0; idx < dwords; idx++)
+ ram[cnt + idx] = IS_QLA27XX(ha) ?
+ le32_to_cpu(dump[idx]) : swab32(dump[idx]);
+ } else {
+ rval = QLA_FUNCTION_FAILED;
+ }
+ }
+
+ *nxt = rval == QLA_SUCCESS ? &ram[cnt]: NULL;
+ return rval;
+}
+
+static int
+qla24xx_dump_memory(struct qla_hw_data *ha, uint32_t *code_ram,
+ uint32_t cram_size, void **nxt)
+{
+ int rval;
+
+ /* Code RAM. */
+ rval = qla24xx_dump_ram(ha, 0x20000, code_ram, cram_size / 4, nxt);
+ if (rval != QLA_SUCCESS)
+ return rval;
+
+ set_bit(RISC_SRAM_DUMP_CMPL, &ha->fw_dump_cap_flags);
+
+ /* External Memory. */
+ rval = qla24xx_dump_ram(ha, 0x100000, *nxt,
+ ha->fw_memory_size - 0x100000 + 1, nxt);
+ if (rval == QLA_SUCCESS)
+ set_bit(RISC_EXT_MEM_DUMP_CMPL, &ha->fw_dump_cap_flags);
+
+ return rval;
+}
+
+static uint32_t *
+qla24xx_read_window(struct device_reg_24xx __iomem *reg, uint32_t iobase,
+ uint32_t count, uint32_t *buf)
+{
+ uint32_t __iomem *dmp_reg;
+
+ WRT_REG_DWORD(&reg->iobase_addr, iobase);
+ dmp_reg = &reg->iobase_window;
+ while (count--)
+ *buf++ = htonl(RD_REG_DWORD(dmp_reg++));
+
+ return buf;
+}
+
+void
+qla24xx_pause_risc(struct device_reg_24xx __iomem *reg, struct qla_hw_data *ha)
+{
+ WRT_REG_DWORD(&reg->hccr, HCCRX_SET_RISC_PAUSE);
+
+ /* 100 usec delay is sufficient enough for hardware to pause RISC */
+ udelay(100);
+ if (RD_REG_DWORD(&reg->host_status) & HSRX_RISC_PAUSED)
+ set_bit(RISC_PAUSE_CMPL, &ha->fw_dump_cap_flags);
+}
+
+int
+qla24xx_soft_reset(struct qla_hw_data *ha)
+{
+ int rval = QLA_SUCCESS;
+ uint32_t cnt;
+ uint16_t wd;
+ struct device_reg_24xx __iomem *reg = &ha->iobase->isp24;
+
+ /*
+ * Reset RISC. The delay is dependent on system architecture.
+ * Driver can proceed with the reset sequence after waiting
+ * for a timeout period.
+ */
+ WRT_REG_DWORD(&reg->ctrl_status, CSRX_DMA_SHUTDOWN|MWB_4096_BYTES);
+ for (cnt = 0; cnt < 30000; cnt++) {
+ if ((RD_REG_DWORD(&reg->ctrl_status) & CSRX_DMA_ACTIVE) == 0)
+ break;
+
+ udelay(10);
+ }
+ if (!(RD_REG_DWORD(&reg->ctrl_status) & CSRX_DMA_ACTIVE))
+ set_bit(DMA_SHUTDOWN_CMPL, &ha->fw_dump_cap_flags);
+
+ WRT_REG_DWORD(&reg->ctrl_status,
+ CSRX_ISP_SOFT_RESET|CSRX_DMA_SHUTDOWN|MWB_4096_BYTES);
+ pci_read_config_word(ha->pdev, PCI_COMMAND, &wd);
+
+ udelay(100);
+
+ /* Wait for soft-reset to complete. */
+ for (cnt = 0; cnt < 30000; cnt++) {
+ if ((RD_REG_DWORD(&reg->ctrl_status) &
+ CSRX_ISP_SOFT_RESET) == 0)
+ break;
+
+ udelay(10);
+ }
+ if (!(RD_REG_DWORD(&reg->ctrl_status) & CSRX_ISP_SOFT_RESET))
+ set_bit(ISP_RESET_CMPL, &ha->fw_dump_cap_flags);
+
+ WRT_REG_DWORD(&reg->hccr, HCCRX_CLR_RISC_RESET);
+ RD_REG_DWORD(&reg->hccr); /* PCI Posting. */
+
+ for (cnt = 10000; RD_REG_WORD(&reg->mailbox0) != 0 &&
+ rval == QLA_SUCCESS; cnt--) {
+ if (cnt)
+ udelay(10);
+ else
+ rval = QLA_FUNCTION_TIMEOUT;
+ }
+ if (rval == QLA_SUCCESS)
+ set_bit(RISC_RDY_AFT_RESET, &ha->fw_dump_cap_flags);
+
+ return rval;
+}
+
+static int
+qla2xxx_dump_ram(struct qla_hw_data *ha, uint32_t addr, uint16_t *ram,
+ uint32_t ram_words, void **nxt)
+{
+ int rval;
+ uint32_t cnt, stat, timer, words, idx;
+ uint16_t mb0;
+ struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
+ dma_addr_t dump_dma = ha->gid_list_dma;
+ uint16_t *dump = (uint16_t *)ha->gid_list;
+
+ rval = QLA_SUCCESS;
+ mb0 = 0;
+
+ WRT_MAILBOX_REG(ha, reg, 0, MBC_DUMP_RISC_RAM_EXTENDED);
+ clear_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags);
+
+ words = qla2x00_gid_list_size(ha) / 2;
+ for (cnt = 0; cnt < ram_words && rval == QLA_SUCCESS;
+ cnt += words, addr += words) {
+ if (cnt + words > ram_words)
+ words = ram_words - cnt;
+
+ WRT_MAILBOX_REG(ha, reg, 1, LSW(addr));
+ WRT_MAILBOX_REG(ha, reg, 8, MSW(addr));
+
+ WRT_MAILBOX_REG(ha, reg, 2, MSW(dump_dma));
+ WRT_MAILBOX_REG(ha, reg, 3, LSW(dump_dma));
+ WRT_MAILBOX_REG(ha, reg, 6, MSW(MSD(dump_dma)));
+ WRT_MAILBOX_REG(ha, reg, 7, LSW(MSD(dump_dma)));
+
+ WRT_MAILBOX_REG(ha, reg, 4, words);
+ WRT_REG_WORD(&reg->hccr, HCCR_SET_HOST_INT);
+
+ for (timer = 6000000; timer; timer--) {
+ /* Check for pending interrupts. */
+ stat = RD_REG_DWORD(&reg->u.isp2300.host_status);
+ if (stat & HSR_RISC_INT) {
+ stat &= 0xff;
+
+ if (stat == 0x1 || stat == 0x2) {
+ set_bit(MBX_INTERRUPT,
+ &ha->mbx_cmd_flags);
+
+ mb0 = RD_MAILBOX_REG(ha, reg, 0);
+
+ /* Release mailbox registers. */
+ WRT_REG_WORD(&reg->semaphore, 0);
+ WRT_REG_WORD(&reg->hccr,
+ HCCR_CLR_RISC_INT);
+ RD_REG_WORD(&reg->hccr);
+ break;
+ } else if (stat == 0x10 || stat == 0x11) {
+ set_bit(MBX_INTERRUPT,
+ &ha->mbx_cmd_flags);
+
+ mb0 = RD_MAILBOX_REG(ha, reg, 0);
+
+ WRT_REG_WORD(&reg->hccr,
+ HCCR_CLR_RISC_INT);
+ RD_REG_WORD(&reg->hccr);
+ break;
+ }
+
+ /* clear this intr; it wasn't a mailbox intr */
+ WRT_REG_WORD(&reg->hccr, HCCR_CLR_RISC_INT);
+ RD_REG_WORD(&reg->hccr);
+ }
+ udelay(5);
+ }
+
+ if (test_and_clear_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags)) {
+ rval = mb0 & MBS_MASK;
+ for (idx = 0; idx < words; idx++)
+ ram[cnt + idx] = swab16(dump[idx]);
+ } else {
+ rval = QLA_FUNCTION_FAILED;
+ }
+ }
+
+ *nxt = rval == QLA_SUCCESS ? &ram[cnt]: NULL;
+ return rval;
+}
+
+static inline void
+qla2xxx_read_window(struct device_reg_2xxx __iomem *reg, uint32_t count,
+ uint16_t *buf)
+{
+ uint16_t __iomem *dmp_reg = &reg->u.isp2300.fb_cmd;
+
+ while (count--)
+ *buf++ = htons(RD_REG_WORD(dmp_reg++));
+}
+
+static inline void *
+qla24xx_copy_eft(struct qla_hw_data *ha, void *ptr)
+{
+ if (!ha->eft)
+ return ptr;
+
+ memcpy(ptr, ha->eft, ntohl(ha->fw_dump->eft_size));
+ return ptr + ntohl(ha->fw_dump->eft_size);
+}
+
+static inline void *
+qla25xx_copy_fce(struct qla_hw_data *ha, void *ptr, uint32_t **last_chain)
+{
+ uint32_t cnt;
+ uint32_t *iter_reg;
+ struct qla2xxx_fce_chain *fcec = ptr;
+
+ if (!ha->fce)
+ return ptr;
+
+ *last_chain = &fcec->type;
+ fcec->type = __constant_htonl(DUMP_CHAIN_FCE);
+ fcec->chain_size = htonl(sizeof(struct qla2xxx_fce_chain) +
+ fce_calc_size(ha->fce_bufs));
+ fcec->size = htonl(fce_calc_size(ha->fce_bufs));
+ fcec->addr_l = htonl(LSD(ha->fce_dma));
+ fcec->addr_h = htonl(MSD(ha->fce_dma));
+
+ iter_reg = fcec->eregs;
+ for (cnt = 0; cnt < 8; cnt++)
+ *iter_reg++ = htonl(ha->fce_mb[cnt]);
+
+ memcpy(iter_reg, ha->fce, ntohl(fcec->size));
+
+ return (char *)iter_reg + ntohl(fcec->size);
+}
+
+static inline void *
+qla2xxx_copy_atioqueues(struct qla_hw_data *ha, void *ptr,
+ uint32_t **last_chain)
+{
+ struct qla2xxx_mqueue_chain *q;
+ struct qla2xxx_mqueue_header *qh;
+ uint32_t num_queues;
+ int que;
+ struct {
+ int length;
+ void *ring;
+ } aq, *aqp;
+
+ if (!ha->tgt.atio_ring)
+ return ptr;
+
+ num_queues = 1;
+ aqp = &aq;
+ aqp->length = ha->tgt.atio_q_length;
+ aqp->ring = ha->tgt.atio_ring;
+
+ for (que = 0; que < num_queues; que++) {
+ /* aqp = ha->atio_q_map[que]; */
+ q = ptr;
+ *last_chain = &q->type;
+ q->type = __constant_htonl(DUMP_CHAIN_QUEUE);
+ q->chain_size = htonl(
+ sizeof(struct qla2xxx_mqueue_chain) +
+ sizeof(struct qla2xxx_mqueue_header) +
+ (aqp->length * sizeof(request_t)));
+ ptr += sizeof(struct qla2xxx_mqueue_chain);
+
+ /* Add header. */
+ qh = ptr;
+ qh->queue = __constant_htonl(TYPE_ATIO_QUEUE);
+ qh->number = htonl(que);
+ qh->size = htonl(aqp->length * sizeof(request_t));
+ ptr += sizeof(struct qla2xxx_mqueue_header);
+
+ /* Add data. */
+ memcpy(ptr, aqp->ring, aqp->length * sizeof(request_t));
+
+ ptr += aqp->length * sizeof(request_t);
+ }
+
+ return ptr;
+}
+
+static inline void *
+qla25xx_copy_mqueues(struct qla_hw_data *ha, void *ptr, uint32_t **last_chain)
+{
+ struct qla2xxx_mqueue_chain *q;
+ struct qla2xxx_mqueue_header *qh;
+ struct req_que *req;
+ struct rsp_que *rsp;
+ int que;
+
+ if (!ha->mqenable)
+ return ptr;
+
+ /* Request queues */
+ for (que = 1; que < ha->max_req_queues; que++) {
+ req = ha->req_q_map[que];
+ if (!req)
+ break;
+
+ /* Add chain. */
+ q = ptr;
+ *last_chain = &q->type;
+ q->type = __constant_htonl(DUMP_CHAIN_QUEUE);
+ q->chain_size = htonl(
+ sizeof(struct qla2xxx_mqueue_chain) +
+ sizeof(struct qla2xxx_mqueue_header) +
+ (req->length * sizeof(request_t)));
+ ptr += sizeof(struct qla2xxx_mqueue_chain);
+
+ /* Add header. */
+ qh = ptr;
+ qh->queue = __constant_htonl(TYPE_REQUEST_QUEUE);
+ qh->number = htonl(que);
+ qh->size = htonl(req->length * sizeof(request_t));
+ ptr += sizeof(struct qla2xxx_mqueue_header);
+
+ /* Add data. */
+ memcpy(ptr, req->ring, req->length * sizeof(request_t));
+ ptr += req->length * sizeof(request_t);
+ }
+
+ /* Response queues */
+ for (que = 1; que < ha->max_rsp_queues; que++) {
+ rsp = ha->rsp_q_map[que];
+ if (!rsp)
+ break;
+
+ /* Add chain. */
+ q = ptr;
+ *last_chain = &q->type;
+ q->type = __constant_htonl(DUMP_CHAIN_QUEUE);
+ q->chain_size = htonl(
+ sizeof(struct qla2xxx_mqueue_chain) +
+ sizeof(struct qla2xxx_mqueue_header) +
+ (rsp->length * sizeof(response_t)));
+ ptr += sizeof(struct qla2xxx_mqueue_chain);
+
+ /* Add header. */
+ qh = ptr;
+ qh->queue = __constant_htonl(TYPE_RESPONSE_QUEUE);
+ qh->number = htonl(que);
+ qh->size = htonl(rsp->length * sizeof(response_t));
+ ptr += sizeof(struct qla2xxx_mqueue_header);
+
+ /* Add data. */
+ memcpy(ptr, rsp->ring, rsp->length * sizeof(response_t));
+ ptr += rsp->length * sizeof(response_t);
+ }
+
+ return ptr;
+}
+
+static inline void *
+qla25xx_copy_mq(struct qla_hw_data *ha, void *ptr, uint32_t **last_chain)
+{
+ uint32_t cnt, que_idx;
+ uint8_t que_cnt;
+ struct qla2xxx_mq_chain *mq = ptr;
+ device_reg_t __iomem *reg;
+
+ if (!ha->mqenable || IS_QLA83XX(ha) || IS_QLA27XX(ha))
+ return ptr;
+
+ mq = ptr;
+ *last_chain = &mq->type;
+ mq->type = __constant_htonl(DUMP_CHAIN_MQ);
+ mq->chain_size = __constant_htonl(sizeof(struct qla2xxx_mq_chain));
+
+ que_cnt = ha->max_req_queues > ha->max_rsp_queues ?
+ ha->max_req_queues : ha->max_rsp_queues;
+ mq->count = htonl(que_cnt);
+ for (cnt = 0; cnt < que_cnt; cnt++) {
+ reg = ISP_QUE_REG(ha, cnt);
+ que_idx = cnt * 4;
+ mq->qregs[que_idx] =
+ htonl(RD_REG_DWORD(&reg->isp25mq.req_q_in));
+ mq->qregs[que_idx+1] =
+ htonl(RD_REG_DWORD(&reg->isp25mq.req_q_out));
+ mq->qregs[que_idx+2] =
+ htonl(RD_REG_DWORD(&reg->isp25mq.rsp_q_in));
+ mq->qregs[que_idx+3] =
+ htonl(RD_REG_DWORD(&reg->isp25mq.rsp_q_out));
+ }
+
+ return ptr + sizeof(struct qla2xxx_mq_chain);
+}
+
+void
+qla2xxx_dump_post_process(scsi_qla_host_t *vha, int rval)
+{
+ struct qla_hw_data *ha = vha->hw;
+
+ if (rval != QLA_SUCCESS) {
+ ql_log(ql_log_warn, vha, 0xd000,
+ "Failed to dump firmware (%x), dump status flags (0x%lx).\n",
+ rval, ha->fw_dump_cap_flags);
+ ha->fw_dumped = 0;
+ } else {
+ ql_log(ql_log_info, vha, 0xd001,
+ "Firmware dump saved to temp buffer (%ld/%p), dump status flags (0x%lx).\n",
+ vha->host_no, ha->fw_dump, ha->fw_dump_cap_flags);
+ ha->fw_dumped = 1;
+ qla2x00_post_uevent_work(vha, QLA_UEVENT_CODE_FW_DUMP);
+ }
+}
+
+/**
+ * qla2300_fw_dump() - Dumps binary data from the 2300 firmware.
+ * @ha: HA context
+ * @hardware_locked: Called with the hardware_lock
+ */
+void
+qla2300_fw_dump(scsi_qla_host_t *vha, int hardware_locked)
+{
+ int rval;
+ uint32_t cnt;
+ struct qla_hw_data *ha = vha->hw;
+ struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
+ uint16_t __iomem *dmp_reg;
+ unsigned long flags;
+ struct qla2300_fw_dump *fw;
+ void *nxt;
+ struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev);
+
+ flags = 0;
+
+ if (!hardware_locked)
+ spin_lock_irqsave(&ha->hardware_lock, flags);
+
+ if (!ha->fw_dump) {
+ ql_log(ql_log_warn, vha, 0xd002,
+ "No buffer available for dump.\n");
+ goto qla2300_fw_dump_failed;
+ }
+
+ if (ha->fw_dumped) {
+ ql_log(ql_log_warn, vha, 0xd003,
+ "Firmware has been previously dumped (%p) "
+ "-- ignoring request.\n",
+ ha->fw_dump);
+ goto qla2300_fw_dump_failed;
+ }
+ fw = &ha->fw_dump->isp.isp23;
+ qla2xxx_prep_dump(ha, ha->fw_dump);
+
+ rval = QLA_SUCCESS;
+ fw->hccr = htons(RD_REG_WORD(&reg->hccr));
+
+ /* Pause RISC. */
+ WRT_REG_WORD(&reg->hccr, HCCR_PAUSE_RISC);
+ if (IS_QLA2300(ha)) {
+ for (cnt = 30000;
+ (RD_REG_WORD(&reg->hccr) & HCCR_RISC_PAUSE) == 0 &&
+ rval == QLA_SUCCESS; cnt--) {
+ if (cnt)
+ udelay(100);
+ else
+ rval = QLA_FUNCTION_TIMEOUT;
+ }
+ } else {
+ RD_REG_WORD(&reg->hccr); /* PCI Posting. */
+ udelay(10);
+ }
+
+ if (rval == QLA_SUCCESS) {
+ dmp_reg = &reg->flash_address;
+ for (cnt = 0; cnt < sizeof(fw->pbiu_reg) / 2; cnt++)
+ fw->pbiu_reg[cnt] = htons(RD_REG_WORD(dmp_reg++));
+
+ dmp_reg = &reg->u.isp2300.req_q_in;
+ for (cnt = 0; cnt < sizeof(fw->risc_host_reg) / 2; cnt++)
+ fw->risc_host_reg[cnt] = htons(RD_REG_WORD(dmp_reg++));
+
+ dmp_reg = &reg->u.isp2300.mailbox0;
+ for (cnt = 0; cnt < sizeof(fw->mailbox_reg) / 2; cnt++)
+ fw->mailbox_reg[cnt] = htons(RD_REG_WORD(dmp_reg++));
+
+ WRT_REG_WORD(&reg->ctrl_status, 0x40);
+ qla2xxx_read_window(reg, 32, fw->resp_dma_reg);
+
+ WRT_REG_WORD(&reg->ctrl_status, 0x50);
+ qla2xxx_read_window(reg, 48, fw->dma_reg);
+
+ WRT_REG_WORD(&reg->ctrl_status, 0x00);
+ dmp_reg = &reg->risc_hw;
+ for (cnt = 0; cnt < sizeof(fw->risc_hdw_reg) / 2; cnt++)
+ fw->risc_hdw_reg[cnt] = htons(RD_REG_WORD(dmp_reg++));
+
+ WRT_REG_WORD(&reg->pcr, 0x2000);
+ qla2xxx_read_window(reg, 16, fw->risc_gp0_reg);
+
+ WRT_REG_WORD(&reg->pcr, 0x2200);
+ qla2xxx_read_window(reg, 16, fw->risc_gp1_reg);
+
+ WRT_REG_WORD(&reg->pcr, 0x2400);
+ qla2xxx_read_window(reg, 16, fw->risc_gp2_reg);
+
+ WRT_REG_WORD(&reg->pcr, 0x2600);
+ qla2xxx_read_window(reg, 16, fw->risc_gp3_reg);
+
+ WRT_REG_WORD(&reg->pcr, 0x2800);
+ qla2xxx_read_window(reg, 16, fw->risc_gp4_reg);
+
+ WRT_REG_WORD(&reg->pcr, 0x2A00);
+ qla2xxx_read_window(reg, 16, fw->risc_gp5_reg);
+
+ WRT_REG_WORD(&reg->pcr, 0x2C00);
+ qla2xxx_read_window(reg, 16, fw->risc_gp6_reg);
+
+ WRT_REG_WORD(&reg->pcr, 0x2E00);
+ qla2xxx_read_window(reg, 16, fw->risc_gp7_reg);
+
+ WRT_REG_WORD(&reg->ctrl_status, 0x10);
+ qla2xxx_read_window(reg, 64, fw->frame_buf_hdw_reg);
+
+ WRT_REG_WORD(&reg->ctrl_status, 0x20);
+ qla2xxx_read_window(reg, 64, fw->fpm_b0_reg);
+
+ WRT_REG_WORD(&reg->ctrl_status, 0x30);
+ qla2xxx_read_window(reg, 64, fw->fpm_b1_reg);
+
+ /* Reset RISC. */
+ WRT_REG_WORD(&reg->ctrl_status, CSR_ISP_SOFT_RESET);
+ for (cnt = 0; cnt < 30000; cnt++) {
+ if ((RD_REG_WORD(&reg->ctrl_status) &
+ CSR_ISP_SOFT_RESET) == 0)
+ break;
+
+ udelay(10);
+ }
+ }
+
+ if (!IS_QLA2300(ha)) {
+ for (cnt = 30000; RD_MAILBOX_REG(ha, reg, 0) != 0 &&
+ rval == QLA_SUCCESS; cnt--) {
+ if (cnt)
+ udelay(100);
+ else
+ rval = QLA_FUNCTION_TIMEOUT;
+ }
+ }
+
+ /* Get RISC SRAM. */
+ if (rval == QLA_SUCCESS)
+ rval = qla2xxx_dump_ram(ha, 0x800, fw->risc_ram,
+ sizeof(fw->risc_ram) / 2, &nxt);
+
+ /* Get stack SRAM. */
+ if (rval == QLA_SUCCESS)
+ rval = qla2xxx_dump_ram(ha, 0x10000, fw->stack_ram,
+ sizeof(fw->stack_ram) / 2, &nxt);
+
+ /* Get data SRAM. */
+ if (rval == QLA_SUCCESS)
+ rval = qla2xxx_dump_ram(ha, 0x11000, fw->data_ram,
+ ha->fw_memory_size - 0x11000 + 1, &nxt);
+
+ if (rval == QLA_SUCCESS)
+ qla2xxx_copy_queues(ha, nxt);
+
+ qla2xxx_dump_post_process(base_vha, rval);
+
+qla2300_fw_dump_failed:
+ if (!hardware_locked)
+ spin_unlock_irqrestore(&ha->hardware_lock, flags);
+}
+
+/**
+ * qla2100_fw_dump() - Dumps binary data from the 2100/2200 firmware.
+ * @ha: HA context
+ * @hardware_locked: Called with the hardware_lock
+ */
+void
+qla2100_fw_dump(scsi_qla_host_t *vha, int hardware_locked)
+{
+ int rval;
+ uint32_t cnt, timer;
+ uint16_t risc_address;
+ uint16_t mb0, mb2;
+ struct qla_hw_data *ha = vha->hw;
+ struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
+ uint16_t __iomem *dmp_reg;
+ unsigned long flags;
+ struct qla2100_fw_dump *fw;
+ struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev);
+
+ risc_address = 0;
+ mb0 = mb2 = 0;
+ flags = 0;
+
+ if (!hardware_locked)
+ spin_lock_irqsave(&ha->hardware_lock, flags);
+
+ if (!ha->fw_dump) {
+ ql_log(ql_log_warn, vha, 0xd004,
+ "No buffer available for dump.\n");
+ goto qla2100_fw_dump_failed;
+ }
+
+ if (ha->fw_dumped) {
+ ql_log(ql_log_warn, vha, 0xd005,
+ "Firmware has been previously dumped (%p) "
+ "-- ignoring request.\n",
+ ha->fw_dump);
+ goto qla2100_fw_dump_failed;
+ }
+ fw = &ha->fw_dump->isp.isp21;
+ qla2xxx_prep_dump(ha, ha->fw_dump);
+
+ rval = QLA_SUCCESS;
+ fw->hccr = htons(RD_REG_WORD(&reg->hccr));
+
+ /* Pause RISC. */
+ WRT_REG_WORD(&reg->hccr, HCCR_PAUSE_RISC);
+ for (cnt = 30000; (RD_REG_WORD(&reg->hccr) & HCCR_RISC_PAUSE) == 0 &&
+ rval == QLA_SUCCESS; cnt--) {
+ if (cnt)
+ udelay(100);
+ else
+ rval = QLA_FUNCTION_TIMEOUT;
+ }
+ if (rval == QLA_SUCCESS) {
+ dmp_reg = &reg->flash_address;
+ for (cnt = 0; cnt < sizeof(fw->pbiu_reg) / 2; cnt++)
+ fw->pbiu_reg[cnt] = htons(RD_REG_WORD(dmp_reg++));
+
+ dmp_reg = &reg->u.isp2100.mailbox0;
+ for (cnt = 0; cnt < ha->mbx_count; cnt++) {
+ if (cnt == 8)
+ dmp_reg = &reg->u_end.isp2200.mailbox8;
+
+ fw->mailbox_reg[cnt] = htons(RD_REG_WORD(dmp_reg++));
+ }
+
+ dmp_reg = &reg->u.isp2100.unused_2[0];
+ for (cnt = 0; cnt < sizeof(fw->dma_reg) / 2; cnt++)
+ fw->dma_reg[cnt] = htons(RD_REG_WORD(dmp_reg++));
+
+ WRT_REG_WORD(&reg->ctrl_status, 0x00);
+ dmp_reg = &reg->risc_hw;
+ for (cnt = 0; cnt < sizeof(fw->risc_hdw_reg) / 2; cnt++)
+ fw->risc_hdw_reg[cnt] = htons(RD_REG_WORD(dmp_reg++));
+
+ WRT_REG_WORD(&reg->pcr, 0x2000);
+ qla2xxx_read_window(reg, 16, fw->risc_gp0_reg);
+
+ WRT_REG_WORD(&reg->pcr, 0x2100);
+ qla2xxx_read_window(reg, 16, fw->risc_gp1_reg);
+
+ WRT_REG_WORD(&reg->pcr, 0x2200);
+ qla2xxx_read_window(reg, 16, fw->risc_gp2_reg);
+
+ WRT_REG_WORD(&reg->pcr, 0x2300);
+ qla2xxx_read_window(reg, 16, fw->risc_gp3_reg);
+
+ WRT_REG_WORD(&reg->pcr, 0x2400);
+ qla2xxx_read_window(reg, 16, fw->risc_gp4_reg);
+
+ WRT_REG_WORD(&reg->pcr, 0x2500);
+ qla2xxx_read_window(reg, 16, fw->risc_gp5_reg);
+
+ WRT_REG_WORD(&reg->pcr, 0x2600);
+ qla2xxx_read_window(reg, 16, fw->risc_gp6_reg);
+
+ WRT_REG_WORD(&reg->pcr, 0x2700);
+ qla2xxx_read_window(reg, 16, fw->risc_gp7_reg);
+
+ WRT_REG_WORD(&reg->ctrl_status, 0x10);
+ qla2xxx_read_window(reg, 16, fw->frame_buf_hdw_reg);
+
+ WRT_REG_WORD(&reg->ctrl_status, 0x20);
+ qla2xxx_read_window(reg, 64, fw->fpm_b0_reg);
+
+ WRT_REG_WORD(&reg->ctrl_status, 0x30);
+ qla2xxx_read_window(reg, 64, fw->fpm_b1_reg);
+
+ /* Reset the ISP. */
+ WRT_REG_WORD(&reg->ctrl_status, CSR_ISP_SOFT_RESET);
+ }
+
+ for (cnt = 30000; RD_MAILBOX_REG(ha, reg, 0) != 0 &&
+ rval == QLA_SUCCESS; cnt--) {
+ if (cnt)
+ udelay(100);
+ else
+ rval = QLA_FUNCTION_TIMEOUT;
+ }
+
+ /* Pause RISC. */
+ if (rval == QLA_SUCCESS && (IS_QLA2200(ha) || (IS_QLA2100(ha) &&
+ (RD_REG_WORD(&reg->mctr) & (BIT_1 | BIT_0)) != 0))) {
+
+ WRT_REG_WORD(&reg->hccr, HCCR_PAUSE_RISC);
+ for (cnt = 30000;
+ (RD_REG_WORD(&reg->hccr) & HCCR_RISC_PAUSE) == 0 &&
+ rval == QLA_SUCCESS; cnt--) {
+ if (cnt)
+ udelay(100);
+ else
+ rval = QLA_FUNCTION_TIMEOUT;
+ }
+ if (rval == QLA_SUCCESS) {
+ /* Set memory configuration and timing. */
+ if (IS_QLA2100(ha))
+ WRT_REG_WORD(&reg->mctr, 0xf1);
+ else
+ WRT_REG_WORD(&reg->mctr, 0xf2);
+ RD_REG_WORD(&reg->mctr); /* PCI Posting. */
+
+ /* Release RISC. */
+ WRT_REG_WORD(&reg->hccr, HCCR_RELEASE_RISC);
+ }
+ }
+
+ if (rval == QLA_SUCCESS) {
+ /* Get RISC SRAM. */
+ risc_address = 0x1000;
+ WRT_MAILBOX_REG(ha, reg, 0, MBC_READ_RAM_WORD);
+ clear_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags);
+ }
+ for (cnt = 0; cnt < sizeof(fw->risc_ram) / 2 && rval == QLA_SUCCESS;
+ cnt++, risc_address++) {
+ WRT_MAILBOX_REG(ha, reg, 1, risc_address);
+ WRT_REG_WORD(&reg->hccr, HCCR_SET_HOST_INT);
+
+ for (timer = 6000000; timer != 0; timer--) {
+ /* Check for pending interrupts. */
+ if (RD_REG_WORD(&reg->istatus) & ISR_RISC_INT) {
+ if (RD_REG_WORD(&reg->semaphore) & BIT_0) {
+ set_bit(MBX_INTERRUPT,
+ &ha->mbx_cmd_flags);
+
+ mb0 = RD_MAILBOX_REG(ha, reg, 0);
+ mb2 = RD_MAILBOX_REG(ha, reg, 2);
+
+ WRT_REG_WORD(&reg->semaphore, 0);
+ WRT_REG_WORD(&reg->hccr,
+ HCCR_CLR_RISC_INT);
+ RD_REG_WORD(&reg->hccr);
+ break;
+ }
+ WRT_REG_WORD(&reg->hccr, HCCR_CLR_RISC_INT);
+ RD_REG_WORD(&reg->hccr);
+ }
+ udelay(5);
+ }
+
+ if (test_and_clear_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags)) {
+ rval = mb0 & MBS_MASK;
+ fw->risc_ram[cnt] = htons(mb2);
+ } else {
+ rval = QLA_FUNCTION_FAILED;
+ }
+ }
+
+ if (rval == QLA_SUCCESS)
+ qla2xxx_copy_queues(ha, &fw->risc_ram[cnt]);
+
+ qla2xxx_dump_post_process(base_vha, rval);
+
+qla2100_fw_dump_failed:
+ if (!hardware_locked)
+ spin_unlock_irqrestore(&ha->hardware_lock, flags);
+}
+
+void
+qla24xx_fw_dump(scsi_qla_host_t *vha, int hardware_locked)
+{
+ int rval;
+ uint32_t cnt;
+ uint32_t risc_address;
+ struct qla_hw_data *ha = vha->hw;
+ struct device_reg_24xx __iomem *reg = &ha->iobase->isp24;
+ uint32_t __iomem *dmp_reg;
+ uint32_t *iter_reg;
+ uint16_t __iomem *mbx_reg;
+ unsigned long flags;
+ struct qla24xx_fw_dump *fw;
+ uint32_t ext_mem_cnt;
+ void *nxt;
+ void *nxt_chain;
+ uint32_t *last_chain = NULL;
+ struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev);
+
+ if (IS_P3P_TYPE(ha))
+ return;
+
+ risc_address = ext_mem_cnt = 0;
+ flags = 0;
+ ha->fw_dump_cap_flags = 0;
+
+ if (!hardware_locked)
+ spin_lock_irqsave(&ha->hardware_lock, flags);
+
+ if (!ha->fw_dump) {
+ ql_log(ql_log_warn, vha, 0xd006,
+ "No buffer available for dump.\n");
+ goto qla24xx_fw_dump_failed;
+ }
+
+ if (ha->fw_dumped) {
+ ql_log(ql_log_warn, vha, 0xd007,
+ "Firmware has been previously dumped (%p) "
+ "-- ignoring request.\n",
+ ha->fw_dump);
+ goto qla24xx_fw_dump_failed;
+ }
+ fw = &ha->fw_dump->isp.isp24;
+ qla2xxx_prep_dump(ha, ha->fw_dump);
+
+ fw->host_status = htonl(RD_REG_DWORD(&reg->host_status));
+
+ /*
+ * Pause RISC. No need to track timeout, as resetting the chip
+ * is the right approach incase of pause timeout
+ */
+ qla24xx_pause_risc(reg, ha);
+
+ /* Host interface registers. */
+ dmp_reg = &reg->flash_addr;
+ for (cnt = 0; cnt < sizeof(fw->host_reg) / 4; cnt++)
+ fw->host_reg[cnt] = htonl(RD_REG_DWORD(dmp_reg++));
+
+ /* Disable interrupts. */
+ WRT_REG_DWORD(&reg->ictrl, 0);
+ RD_REG_DWORD(&reg->ictrl);
+
+ /* Shadow registers. */
+ WRT_REG_DWORD(&reg->iobase_addr, 0x0F70);
+ RD_REG_DWORD(&reg->iobase_addr);
+ WRT_REG_DWORD(&reg->iobase_select, 0xB0000000);
+ fw->shadow_reg[0] = htonl(RD_REG_DWORD(&reg->iobase_sdata));
+
+ WRT_REG_DWORD(&reg->iobase_select, 0xB0100000);
+ fw->shadow_reg[1] = htonl(RD_REG_DWORD(&reg->iobase_sdata));
+
+ WRT_REG_DWORD(&reg->iobase_select, 0xB0200000);
+ fw->shadow_reg[2] = htonl(RD_REG_DWORD(&reg->iobase_sdata));
+
+ WRT_REG_DWORD(&reg->iobase_select, 0xB0300000);
+ fw->shadow_reg[3] = htonl(RD_REG_DWORD(&reg->iobase_sdata));
+
+ WRT_REG_DWORD(&reg->iobase_select, 0xB0400000);
+ fw->shadow_reg[4] = htonl(RD_REG_DWORD(&reg->iobase_sdata));
+
+ WRT_REG_DWORD(&reg->iobase_select, 0xB0500000);
+ fw->shadow_reg[5] = htonl(RD_REG_DWORD(&reg->iobase_sdata));
+
+ WRT_REG_DWORD(&reg->iobase_select, 0xB0600000);
+ fw->shadow_reg[6] = htonl(RD_REG_DWORD(&reg->iobase_sdata));
+
+ /* Mailbox registers. */
+ mbx_reg = &reg->mailbox0;
+ for (cnt = 0; cnt < sizeof(fw->mailbox_reg) / 2; cnt++)
+ fw->mailbox_reg[cnt] = htons(RD_REG_WORD(mbx_reg++));
+
+ /* Transfer sequence registers. */
+ iter_reg = fw->xseq_gp_reg;
+ iter_reg = qla24xx_read_window(reg, 0xBF00, 16, iter_reg);
+ iter_reg = qla24xx_read_window(reg, 0xBF10, 16, iter_reg);
+ iter_reg = qla24xx_read_window(reg, 0xBF20, 16, iter_reg);
+ iter_reg = qla24xx_read_window(reg, 0xBF30, 16, iter_reg);
+ iter_reg = qla24xx_read_window(reg, 0xBF40, 16, iter_reg);
+ iter_reg = qla24xx_read_window(reg, 0xBF50, 16, iter_reg);
+ iter_reg = qla24xx_read_window(reg, 0xBF60, 16, iter_reg);
+ qla24xx_read_window(reg, 0xBF70, 16, iter_reg);
+
+ qla24xx_read_window(reg, 0xBFE0, 16, fw->xseq_0_reg);
+ qla24xx_read_window(reg, 0xBFF0, 16, fw->xseq_1_reg);
+
+ /* Receive sequence registers. */
+ iter_reg = fw->rseq_gp_reg;
+ iter_reg = qla24xx_read_window(reg, 0xFF00, 16, iter_reg);
+ iter_reg = qla24xx_read_window(reg, 0xFF10, 16, iter_reg);
+ iter_reg = qla24xx_read_window(reg, 0xFF20, 16, iter_reg);
+ iter_reg = qla24xx_read_window(reg, 0xFF30, 16, iter_reg);
+ iter_reg = qla24xx_read_window(reg, 0xFF40, 16, iter_reg);
+ iter_reg = qla24xx_read_window(reg, 0xFF50, 16, iter_reg);
+ iter_reg = qla24xx_read_window(reg, 0xFF60, 16, iter_reg);
+ qla24xx_read_window(reg, 0xFF70, 16, iter_reg);
+
+ qla24xx_read_window(reg, 0xFFD0, 16, fw->rseq_0_reg);
+ qla24xx_read_window(reg, 0xFFE0, 16, fw->rseq_1_reg);
+ qla24xx_read_window(reg, 0xFFF0, 16, fw->rseq_2_reg);
+
+ /* Command DMA registers. */
+ qla24xx_read_window(reg, 0x7100, 16, fw->cmd_dma_reg);
+
+ /* Queues. */
+ iter_reg = fw->req0_dma_reg;
+ iter_reg = qla24xx_read_window(reg, 0x7200, 8, iter_reg);
+ dmp_reg = &reg->iobase_q;
+ for (cnt = 0; cnt < 7; cnt++)
+ *iter_reg++ = htonl(RD_REG_DWORD(dmp_reg++));
+
+ iter_reg = fw->resp0_dma_reg;
+ iter_reg = qla24xx_read_window(reg, 0x7300, 8, iter_reg);
+ dmp_reg = &reg->iobase_q;
+ for (cnt = 0; cnt < 7; cnt++)
+ *iter_reg++ = htonl(RD_REG_DWORD(dmp_reg++));
+
+ iter_reg = fw->req1_dma_reg;
+ iter_reg = qla24xx_read_window(reg, 0x7400, 8, iter_reg);
+ dmp_reg = &reg->iobase_q;
+ for (cnt = 0; cnt < 7; cnt++)
+ *iter_reg++ = htonl(RD_REG_DWORD(dmp_reg++));
+
+ /* Transmit DMA registers. */
+ iter_reg = fw->xmt0_dma_reg;
+ iter_reg = qla24xx_read_window(reg, 0x7600, 16, iter_reg);
+ qla24xx_read_window(reg, 0x7610, 16, iter_reg);
+
+ iter_reg = fw->xmt1_dma_reg;
+ iter_reg = qla24xx_read_window(reg, 0x7620, 16, iter_reg);
+ qla24xx_read_window(reg, 0x7630, 16, iter_reg);
+
+ iter_reg = fw->xmt2_dma_reg;
+ iter_reg = qla24xx_read_window(reg, 0x7640, 16, iter_reg);
+ qla24xx_read_window(reg, 0x7650, 16, iter_reg);
+
+ iter_reg = fw->xmt3_dma_reg;
+ iter_reg = qla24xx_read_window(reg, 0x7660, 16, iter_reg);
+ qla24xx_read_window(reg, 0x7670, 16, iter_reg);
+
+ iter_reg = fw->xmt4_dma_reg;
+ iter_reg = qla24xx_read_window(reg, 0x7680, 16, iter_reg);
+ qla24xx_read_window(reg, 0x7690, 16, iter_reg);
+
+ qla24xx_read_window(reg, 0x76A0, 16, fw->xmt_data_dma_reg);
+
+ /* Receive DMA registers. */
+ iter_reg = fw->rcvt0_data_dma_reg;
+ iter_reg = qla24xx_read_window(reg, 0x7700, 16, iter_reg);
+ qla24xx_read_window(reg, 0x7710, 16, iter_reg);
+
+ iter_reg = fw->rcvt1_data_dma_reg;
+ iter_reg = qla24xx_read_window(reg, 0x7720, 16, iter_reg);
+ qla24xx_read_window(reg, 0x7730, 16, iter_reg);
+
+ /* RISC registers. */
+ iter_reg = fw->risc_gp_reg;
+ iter_reg = qla24xx_read_window(reg, 0x0F00, 16, iter_reg);
+ iter_reg = qla24xx_read_window(reg, 0x0F10, 16, iter_reg);
+ iter_reg = qla24xx_read_window(reg, 0x0F20, 16, iter_reg);
+ iter_reg = qla24xx_read_window(reg, 0x0F30, 16, iter_reg);
+ iter_reg = qla24xx_read_window(reg, 0x0F40, 16, iter_reg);
+ iter_reg = qla24xx_read_window(reg, 0x0F50, 16, iter_reg);
+ iter_reg = qla24xx_read_window(reg, 0x0F60, 16, iter_reg);
+ qla24xx_read_window(reg, 0x0F70, 16, iter_reg);
+
+ /* Local memory controller registers. */
+ iter_reg = fw->lmc_reg;
+ iter_reg = qla24xx_read_window(reg, 0x3000, 16, iter_reg);
+ iter_reg = qla24xx_read_window(reg, 0x3010, 16, iter_reg);
+ iter_reg = qla24xx_read_window(reg, 0x3020, 16, iter_reg);
+ iter_reg = qla24xx_read_window(reg, 0x3030, 16, iter_reg);
+ iter_reg = qla24xx_read_window(reg, 0x3040, 16, iter_reg);
+ iter_reg = qla24xx_read_window(reg, 0x3050, 16, iter_reg);
+ qla24xx_read_window(reg, 0x3060, 16, iter_reg);
+
+ /* Fibre Protocol Module registers. */
+ iter_reg = fw->fpm_hdw_reg;
+ iter_reg = qla24xx_read_window(reg, 0x4000, 16, iter_reg);
+ iter_reg = qla24xx_read_window(reg, 0x4010, 16, iter_reg);
+ iter_reg = qla24xx_read_window(reg, 0x4020, 16, iter_reg);
+ iter_reg = qla24xx_read_window(reg, 0x4030, 16, iter_reg);
+ iter_reg = qla24xx_read_window(reg, 0x4040, 16, iter_reg);
+ iter_reg = qla24xx_read_window(reg, 0x4050, 16, iter_reg);
+ iter_reg = qla24xx_read_window(reg, 0x4060, 16, iter_reg);
+ iter_reg = qla24xx_read_window(reg, 0x4070, 16, iter_reg);
+ iter_reg = qla24xx_read_window(reg, 0x4080, 16, iter_reg);
+ iter_reg = qla24xx_read_window(reg, 0x4090, 16, iter_reg);
+ iter_reg = qla24xx_read_window(reg, 0x40A0, 16, iter_reg);
+ qla24xx_read_window(reg, 0x40B0, 16, iter_reg);
+
+ /* Frame Buffer registers. */
+ iter_reg = fw->fb_hdw_reg;
+ iter_reg = qla24xx_read_window(reg, 0x6000, 16, iter_reg);
+ iter_reg = qla24xx_read_window(reg, 0x6010, 16, iter_reg);
+ iter_reg = qla24xx_read_window(reg, 0x6020, 16, iter_reg);
+ iter_reg = qla24xx_read_window(reg, 0x6030, 16, iter_reg);
+ iter_reg = qla24xx_read_window(reg, 0x6040, 16, iter_reg);
+ iter_reg = qla24xx_read_window(reg, 0x6100, 16, iter_reg);
+ iter_reg = qla24xx_read_window(reg, 0x6130, 16, iter_reg);
+ iter_reg = qla24xx_read_window(reg, 0x6150, 16, iter_reg);
+ iter_reg = qla24xx_read_window(reg, 0x6170, 16, iter_reg);
+ iter_reg = qla24xx_read_window(reg, 0x6190, 16, iter_reg);
+ qla24xx_read_window(reg, 0x61B0, 16, iter_reg);
+
+ rval = qla24xx_soft_reset(ha);
+ if (rval != QLA_SUCCESS)
+ goto qla24xx_fw_dump_failed_0;
+
+ rval = qla24xx_dump_memory(ha, fw->code_ram, sizeof(fw->code_ram),
+ &nxt);
+ if (rval != QLA_SUCCESS)
+ goto qla24xx_fw_dump_failed_0;
+
+ nxt = qla2xxx_copy_queues(ha, nxt);
+
+ qla24xx_copy_eft(ha, nxt);
+
+ nxt_chain = (void *)ha->fw_dump + ha->chain_offset;
+ nxt_chain = qla2xxx_copy_atioqueues(ha, nxt_chain, &last_chain);
+ if (last_chain) {
+ ha->fw_dump->version |= __constant_htonl(DUMP_CHAIN_VARIANT);
+ *last_chain |= __constant_htonl(DUMP_CHAIN_LAST);
+ }
+
+ /* Adjust valid length. */
+ ha->fw_dump_len = (nxt_chain - (void *)ha->fw_dump);
+
+qla24xx_fw_dump_failed_0:
+ qla2xxx_dump_post_process(base_vha, rval);
+
+qla24xx_fw_dump_failed:
+ if (!hardware_locked)
+ spin_unlock_irqrestore(&ha->hardware_lock, flags);
+}
+
+void
+qla25xx_fw_dump(scsi_qla_host_t *vha, int hardware_locked)
+{
+ int rval;
+ uint32_t cnt;
+ uint32_t risc_address;
+ struct qla_hw_data *ha = vha->hw;
+ struct device_reg_24xx __iomem *reg = &ha->iobase->isp24;
+ uint32_t __iomem *dmp_reg;
+ uint32_t *iter_reg;
+ uint16_t __iomem *mbx_reg;
+ unsigned long flags;
+ struct qla25xx_fw_dump *fw;
+ uint32_t ext_mem_cnt;
+ void *nxt, *nxt_chain;
+ uint32_t *last_chain = NULL;
+ struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev);
+
+ risc_address = ext_mem_cnt = 0;
+ flags = 0;
+ ha->fw_dump_cap_flags = 0;
+
+ if (!hardware_locked)
+ spin_lock_irqsave(&ha->hardware_lock, flags);
+
+ if (!ha->fw_dump) {
+ ql_log(ql_log_warn, vha, 0xd008,
+ "No buffer available for dump.\n");
+ goto qla25xx_fw_dump_failed;
+ }
+
+ if (ha->fw_dumped) {
+ ql_log(ql_log_warn, vha, 0xd009,
+ "Firmware has been previously dumped (%p) "
+ "-- ignoring request.\n",
+ ha->fw_dump);
+ goto qla25xx_fw_dump_failed;
+ }
+ fw = &ha->fw_dump->isp.isp25;
+ qla2xxx_prep_dump(ha, ha->fw_dump);
+ ha->fw_dump->version = __constant_htonl(2);
+
+ fw->host_status = htonl(RD_REG_DWORD(&reg->host_status));
+
+ /*
+ * Pause RISC. No need to track timeout, as resetting the chip
+ * is the right approach incase of pause timeout
+ */
+ qla24xx_pause_risc(reg, ha);
+
+ /* Host/Risc registers. */
+ iter_reg = fw->host_risc_reg;
+ iter_reg = qla24xx_read_window(reg, 0x7000, 16, iter_reg);
+ qla24xx_read_window(reg, 0x7010, 16, iter_reg);
+
+ /* PCIe registers. */
+ WRT_REG_DWORD(&reg->iobase_addr, 0x7C00);
+ RD_REG_DWORD(&reg->iobase_addr);
+ WRT_REG_DWORD(&reg->iobase_window, 0x01);
+ dmp_reg = &reg->iobase_c4;
+ fw->pcie_regs[0] = htonl(RD_REG_DWORD(dmp_reg++));
+ fw->pcie_regs[1] = htonl(RD_REG_DWORD(dmp_reg++));
+ fw->pcie_regs[2] = htonl(RD_REG_DWORD(dmp_reg));
+ fw->pcie_regs[3] = htonl(RD_REG_DWORD(&reg->iobase_window));
+
+ WRT_REG_DWORD(&reg->iobase_window, 0x00);
+ RD_REG_DWORD(&reg->iobase_window);
+
+ /* Host interface registers. */
+ dmp_reg = &reg->flash_addr;
+ for (cnt = 0; cnt < sizeof(fw->host_reg) / 4; cnt++)
+ fw->host_reg[cnt] = htonl(RD_REG_DWORD(dmp_reg++));
+
+ /* Disable interrupts. */
+ WRT_REG_DWORD(&reg->ictrl, 0);
+ RD_REG_DWORD(&reg->ictrl);
+
+ /* Shadow registers. */
+ WRT_REG_DWORD(&reg->iobase_addr, 0x0F70);
+ RD_REG_DWORD(&reg->iobase_addr);
+ WRT_REG_DWORD(&reg->iobase_select, 0xB0000000);
+ fw->shadow_reg[0] = htonl(RD_REG_DWORD(&reg->iobase_sdata));
+
+ WRT_REG_DWORD(&reg->iobase_select, 0xB0100000);
+ fw->shadow_reg[1] = htonl(RD_REG_DWORD(&reg->iobase_sdata));
+
+ WRT_REG_DWORD(&reg->iobase_select, 0xB0200000);
+ fw->shadow_reg[2] = htonl(RD_REG_DWORD(&reg->iobase_sdata));
+
+ WRT_REG_DWORD(&reg->iobase_select, 0xB0300000);
+ fw->shadow_reg[3] = htonl(RD_REG_DWORD(&reg->iobase_sdata));
+
+ WRT_REG_DWORD(&reg->iobase_select, 0xB0400000);
+ fw->shadow_reg[4] = htonl(RD_REG_DWORD(&reg->iobase_sdata));
+
+ WRT_REG_DWORD(&reg->iobase_select, 0xB0500000);
+ fw->shadow_reg[5] = htonl(RD_REG_DWORD(&reg->iobase_sdata));
+
+ WRT_REG_DWORD(&reg->iobase_select, 0xB0600000);
+ fw->shadow_reg[6] = htonl(RD_REG_DWORD(&reg->iobase_sdata));
+
+ WRT_REG_DWORD(&reg->iobase_select, 0xB0700000);
+ fw->shadow_reg[7] = htonl(RD_REG_DWORD(&reg->iobase_sdata));
+
+ WRT_REG_DWORD(&reg->iobase_select, 0xB0800000);
+ fw->shadow_reg[8] = htonl(RD_REG_DWORD(&reg->iobase_sdata));
+
+ WRT_REG_DWORD(&reg->iobase_select, 0xB0900000);
+ fw->shadow_reg[9] = htonl(RD_REG_DWORD(&reg->iobase_sdata));
+
+ WRT_REG_DWORD(&reg->iobase_select, 0xB0A00000);
+ fw->shadow_reg[10] = htonl(RD_REG_DWORD(&reg->iobase_sdata));
+
+ /* RISC I/O register. */
+ WRT_REG_DWORD(&reg->iobase_addr, 0x0010);
+ fw->risc_io_reg = htonl(RD_REG_DWORD(&reg->iobase_window));
+
+ /* Mailbox registers. */
+ mbx_reg = &reg->mailbox0;
+ for (cnt = 0; cnt < sizeof(fw->mailbox_reg) / 2; cnt++)
+ fw->mailbox_reg[cnt] = htons(RD_REG_WORD(mbx_reg++));
+
+ /* Transfer sequence registers. */
+ iter_reg = fw->xseq_gp_reg;
+ iter_reg = qla24xx_read_window(reg, 0xBF00, 16, iter_reg);
+ iter_reg = qla24xx_read_window(reg, 0xBF10, 16, iter_reg);
+ iter_reg = qla24xx_read_window(reg, 0xBF20, 16, iter_reg);
+ iter_reg = qla24xx_read_window(reg, 0xBF30, 16, iter_reg);
+ iter_reg = qla24xx_read_window(reg, 0xBF40, 16, iter_reg);
+ iter_reg = qla24xx_read_window(reg, 0xBF50, 16, iter_reg);
+ iter_reg = qla24xx_read_window(reg, 0xBF60, 16, iter_reg);
+ qla24xx_read_window(reg, 0xBF70, 16, iter_reg);
+
+ iter_reg = fw->xseq_0_reg;
+ iter_reg = qla24xx_read_window(reg, 0xBFC0, 16, iter_reg);
+ iter_reg = qla24xx_read_window(reg, 0xBFD0, 16, iter_reg);
+ qla24xx_read_window(reg, 0xBFE0, 16, iter_reg);
+
+ qla24xx_read_window(reg, 0xBFF0, 16, fw->xseq_1_reg);
+
+ /* Receive sequence registers. */
+ iter_reg = fw->rseq_gp_reg;
+ iter_reg = qla24xx_read_window(reg, 0xFF00, 16, iter_reg);
+ iter_reg = qla24xx_read_window(reg, 0xFF10, 16, iter_reg);
+ iter_reg = qla24xx_read_window(reg, 0xFF20, 16, iter_reg);
+ iter_reg = qla24xx_read_window(reg, 0xFF30, 16, iter_reg);
+ iter_reg = qla24xx_read_window(reg, 0xFF40, 16, iter_reg);
+ iter_reg = qla24xx_read_window(reg, 0xFF50, 16, iter_reg);
+ iter_reg = qla24xx_read_window(reg, 0xFF60, 16, iter_reg);
+ qla24xx_read_window(reg, 0xFF70, 16, iter_reg);
+
+ iter_reg = fw->rseq_0_reg;
+ iter_reg = qla24xx_read_window(reg, 0xFFC0, 16, iter_reg);
+ qla24xx_read_window(reg, 0xFFD0, 16, iter_reg);
+
+ qla24xx_read_window(reg, 0xFFE0, 16, fw->rseq_1_reg);
+ qla24xx_read_window(reg, 0xFFF0, 16, fw->rseq_2_reg);
+
+ /* Auxiliary sequence registers. */
+ iter_reg = fw->aseq_gp_reg;
+ iter_reg = qla24xx_read_window(reg, 0xB000, 16, iter_reg);
+ iter_reg = qla24xx_read_window(reg, 0xB010, 16, iter_reg);
+ iter_reg = qla24xx_read_window(reg, 0xB020, 16, iter_reg);
+ iter_reg = qla24xx_read_window(reg, 0xB030, 16, iter_reg);
+ iter_reg = qla24xx_read_window(reg, 0xB040, 16, iter_reg);
+ iter_reg = qla24xx_read_window(reg, 0xB050, 16, iter_reg);
+ iter_reg = qla24xx_read_window(reg, 0xB060, 16, iter_reg);
+ qla24xx_read_window(reg, 0xB070, 16, iter_reg);
+
+ iter_reg = fw->aseq_0_reg;
+ iter_reg = qla24xx_read_window(reg, 0xB0C0, 16, iter_reg);
+ qla24xx_read_window(reg, 0xB0D0, 16, iter_reg);
+
+ qla24xx_read_window(reg, 0xB0E0, 16, fw->aseq_1_reg);
+ qla24xx_read_window(reg, 0xB0F0, 16, fw->aseq_2_reg);
+
+ /* Command DMA registers. */
+ qla24xx_read_window(reg, 0x7100, 16, fw->cmd_dma_reg);
+
+ /* Queues. */
+ iter_reg = fw->req0_dma_reg;
+ iter_reg = qla24xx_read_window(reg, 0x7200, 8, iter_reg);
+ dmp_reg = &reg->iobase_q;
+ for (cnt = 0; cnt < 7; cnt++)
+ *iter_reg++ = htonl(RD_REG_DWORD(dmp_reg++));
+
+ iter_reg = fw->resp0_dma_reg;
+ iter_reg = qla24xx_read_window(reg, 0x7300, 8, iter_reg);
+ dmp_reg = &reg->iobase_q;
+ for (cnt = 0; cnt < 7; cnt++)
+ *iter_reg++ = htonl(RD_REG_DWORD(dmp_reg++));
+
+ iter_reg = fw->req1_dma_reg;
+ iter_reg = qla24xx_read_window(reg, 0x7400, 8, iter_reg);
+ dmp_reg = &reg->iobase_q;
+ for (cnt = 0; cnt < 7; cnt++)
+ *iter_reg++ = htonl(RD_REG_DWORD(dmp_reg++));
+
+ /* Transmit DMA registers. */
+ iter_reg = fw->xmt0_dma_reg;
+ iter_reg = qla24xx_read_window(reg, 0x7600, 16, iter_reg);
+ qla24xx_read_window(reg, 0x7610, 16, iter_reg);
+
+ iter_reg = fw->xmt1_dma_reg;
+ iter_reg = qla24xx_read_window(reg, 0x7620, 16, iter_reg);
+ qla24xx_read_window(reg, 0x7630, 16, iter_reg);
+
+ iter_reg = fw->xmt2_dma_reg;
+ iter_reg = qla24xx_read_window(reg, 0x7640, 16, iter_reg);
+ qla24xx_read_window(reg, 0x7650, 16, iter_reg);
+
+ iter_reg = fw->xmt3_dma_reg;
+ iter_reg = qla24xx_read_window(reg, 0x7660, 16, iter_reg);
+ qla24xx_read_window(reg, 0x7670, 16, iter_reg);
+
+ iter_reg = fw->xmt4_dma_reg;
+ iter_reg = qla24xx_read_window(reg, 0x7680, 16, iter_reg);
+ qla24xx_read_window(reg, 0x7690, 16, iter_reg);
+
+ qla24xx_read_window(reg, 0x76A0, 16, fw->xmt_data_dma_reg);
+
+ /* Receive DMA registers. */
+ iter_reg = fw->rcvt0_data_dma_reg;
+ iter_reg = qla24xx_read_window(reg, 0x7700, 16, iter_reg);
+ qla24xx_read_window(reg, 0x7710, 16, iter_reg);
+
+ iter_reg = fw->rcvt1_data_dma_reg;
+ iter_reg = qla24xx_read_window(reg, 0x7720, 16, iter_reg);
+ qla24xx_read_window(reg, 0x7730, 16, iter_reg);
+
+ /* RISC registers. */
+ iter_reg = fw->risc_gp_reg;
+ iter_reg = qla24xx_read_window(reg, 0x0F00, 16, iter_reg);
+ iter_reg = qla24xx_read_window(reg, 0x0F10, 16, iter_reg);
+ iter_reg = qla24xx_read_window(reg, 0x0F20, 16, iter_reg);
+ iter_reg = qla24xx_read_window(reg, 0x0F30, 16, iter_reg);
+ iter_reg = qla24xx_read_window(reg, 0x0F40, 16, iter_reg);
+ iter_reg = qla24xx_read_window(reg, 0x0F50, 16, iter_reg);
+ iter_reg = qla24xx_read_window(reg, 0x0F60, 16, iter_reg);
+ qla24xx_read_window(reg, 0x0F70, 16, iter_reg);
+
+ /* Local memory controller registers. */
+ iter_reg = fw->lmc_reg;
+ iter_reg = qla24xx_read_window(reg, 0x3000, 16, iter_reg);
+ iter_reg = qla24xx_read_window(reg, 0x3010, 16, iter_reg);
+ iter_reg = qla24xx_read_window(reg, 0x3020, 16, iter_reg);
+ iter_reg = qla24xx_read_window(reg, 0x3030, 16, iter_reg);
+ iter_reg = qla24xx_read_window(reg, 0x3040, 16, iter_reg);
+ iter_reg = qla24xx_read_window(reg, 0x3050, 16, iter_reg);
+ iter_reg = qla24xx_read_window(reg, 0x3060, 16, iter_reg);
+ qla24xx_read_window(reg, 0x3070, 16, iter_reg);
+
+ /* Fibre Protocol Module registers. */
+ iter_reg = fw->fpm_hdw_reg;
+ iter_reg = qla24xx_read_window(reg, 0x4000, 16, iter_reg);
+ iter_reg = qla24xx_read_window(reg, 0x4010, 16, iter_reg);
+ iter_reg = qla24xx_read_window(reg, 0x4020, 16, iter_reg);
+ iter_reg = qla24xx_read_window(reg, 0x4030, 16, iter_reg);
+ iter_reg = qla24xx_read_window(reg, 0x4040, 16, iter_reg);
+ iter_reg = qla24xx_read_window(reg, 0x4050, 16, iter_reg);
+ iter_reg = qla24xx_read_window(reg, 0x4060, 16, iter_reg);
+ iter_reg = qla24xx_read_window(reg, 0x4070, 16, iter_reg);
+ iter_reg = qla24xx_read_window(reg, 0x4080, 16, iter_reg);
+ iter_reg = qla24xx_read_window(reg, 0x4090, 16, iter_reg);
+ iter_reg = qla24xx_read_window(reg, 0x40A0, 16, iter_reg);
+ qla24xx_read_window(reg, 0x40B0, 16, iter_reg);
+
+ /* Frame Buffer registers. */
+ iter_reg = fw->fb_hdw_reg;
+ iter_reg = qla24xx_read_window(reg, 0x6000, 16, iter_reg);
+ iter_reg = qla24xx_read_window(reg, 0x6010, 16, iter_reg);
+ iter_reg = qla24xx_read_window(reg, 0x6020, 16, iter_reg);
+ iter_reg = qla24xx_read_window(reg, 0x6030, 16, iter_reg);
+ iter_reg = qla24xx_read_window(reg, 0x6040, 16, iter_reg);
+ iter_reg = qla24xx_read_window(reg, 0x6100, 16, iter_reg);
+ iter_reg = qla24xx_read_window(reg, 0x6130, 16, iter_reg);
+ iter_reg = qla24xx_read_window(reg, 0x6150, 16, iter_reg);
+ iter_reg = qla24xx_read_window(reg, 0x6170, 16, iter_reg);
+ iter_reg = qla24xx_read_window(reg, 0x6190, 16, iter_reg);
+ iter_reg = qla24xx_read_window(reg, 0x61B0, 16, iter_reg);
+ qla24xx_read_window(reg, 0x6F00, 16, iter_reg);
+
+ /* Multi queue registers */
+ nxt_chain = qla25xx_copy_mq(ha, (void *)ha->fw_dump + ha->chain_offset,
+ &last_chain);
+
+ rval = qla24xx_soft_reset(ha);
+ if (rval != QLA_SUCCESS)
+ goto qla25xx_fw_dump_failed_0;
+
+ rval = qla24xx_dump_memory(ha, fw->code_ram, sizeof(fw->code_ram),
+ &nxt);
+ if (rval != QLA_SUCCESS)
+ goto qla25xx_fw_dump_failed_0;
+
+ nxt = qla2xxx_copy_queues(ha, nxt);
+
+ qla24xx_copy_eft(ha, nxt);
+
+ /* Chain entries -- started with MQ. */
+ nxt_chain = qla25xx_copy_fce(ha, nxt_chain, &last_chain);
+ nxt_chain = qla25xx_copy_mqueues(ha, nxt_chain, &last_chain);
+ nxt_chain = qla2xxx_copy_atioqueues(ha, nxt_chain, &last_chain);
+ if (last_chain) {
+ ha->fw_dump->version |= __constant_htonl(DUMP_CHAIN_VARIANT);
+ *last_chain |= __constant_htonl(DUMP_CHAIN_LAST);
+ }
+
+ /* Adjust valid length. */
+ ha->fw_dump_len = (nxt_chain - (void *)ha->fw_dump);
+
+qla25xx_fw_dump_failed_0:
+ qla2xxx_dump_post_process(base_vha, rval);
+
+qla25xx_fw_dump_failed:
+ if (!hardware_locked)
+ spin_unlock_irqrestore(&ha->hardware_lock, flags);
+}
+
+void
+qla81xx_fw_dump(scsi_qla_host_t *vha, int hardware_locked)
+{
+ int rval;
+ uint32_t cnt;
+ uint32_t risc_address;
+ struct qla_hw_data *ha = vha->hw;
+ struct device_reg_24xx __iomem *reg = &ha->iobase->isp24;
+ uint32_t __iomem *dmp_reg;
+ uint32_t *iter_reg;
+ uint16_t __iomem *mbx_reg;
+ unsigned long flags;
+ struct qla81xx_fw_dump *fw;
+ uint32_t ext_mem_cnt;
+ void *nxt, *nxt_chain;
+ uint32_t *last_chain = NULL;
+ struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev);
+
+ risc_address = ext_mem_cnt = 0;
+ flags = 0;
+ ha->fw_dump_cap_flags = 0;
+
+ if (!hardware_locked)
+ spin_lock_irqsave(&ha->hardware_lock, flags);
+
+ if (!ha->fw_dump) {
+ ql_log(ql_log_warn, vha, 0xd00a,
+ "No buffer available for dump.\n");
+ goto qla81xx_fw_dump_failed;
+ }
+
+ if (ha->fw_dumped) {
+ ql_log(ql_log_warn, vha, 0xd00b,
+ "Firmware has been previously dumped (%p) "
+ "-- ignoring request.\n",
+ ha->fw_dump);
+ goto qla81xx_fw_dump_failed;
+ }
+ fw = &ha->fw_dump->isp.isp81;
+ qla2xxx_prep_dump(ha, ha->fw_dump);
+
+ fw->host_status = htonl(RD_REG_DWORD(&reg->host_status));
+
+ /*
+ * Pause RISC. No need to track timeout, as resetting the chip
+ * is the right approach incase of pause timeout
+ */
+ qla24xx_pause_risc(reg, ha);
+
+ /* Host/Risc registers. */
+ iter_reg = fw->host_risc_reg;
+ iter_reg = qla24xx_read_window(reg, 0x7000, 16, iter_reg);
+ qla24xx_read_window(reg, 0x7010, 16, iter_reg);
+
+ /* PCIe registers. */
+ WRT_REG_DWORD(&reg->iobase_addr, 0x7C00);
+ RD_REG_DWORD(&reg->iobase_addr);
+ WRT_REG_DWORD(&reg->iobase_window, 0x01);
+ dmp_reg = &reg->iobase_c4;
+ fw->pcie_regs[0] = htonl(RD_REG_DWORD(dmp_reg++));
+ fw->pcie_regs[1] = htonl(RD_REG_DWORD(dmp_reg++));
+ fw->pcie_regs[2] = htonl(RD_REG_DWORD(dmp_reg));
+ fw->pcie_regs[3] = htonl(RD_REG_DWORD(&reg->iobase_window));
+
+ WRT_REG_DWORD(&reg->iobase_window, 0x00);
+ RD_REG_DWORD(&reg->iobase_window);
+
+ /* Host interface registers. */
+ dmp_reg = &reg->flash_addr;
+ for (cnt = 0; cnt < sizeof(fw->host_reg) / 4; cnt++)
+ fw->host_reg[cnt] = htonl(RD_REG_DWORD(dmp_reg++));
+
+ /* Disable interrupts. */
+ WRT_REG_DWORD(&reg->ictrl, 0);
+ RD_REG_DWORD(&reg->ictrl);
+
+ /* Shadow registers. */
+ WRT_REG_DWORD(&reg->iobase_addr, 0x0F70);
+ RD_REG_DWORD(&reg->iobase_addr);
+ WRT_REG_DWORD(&reg->iobase_select, 0xB0000000);
+ fw->shadow_reg[0] = htonl(RD_REG_DWORD(&reg->iobase_sdata));
+
+ WRT_REG_DWORD(&reg->iobase_select, 0xB0100000);
+ fw->shadow_reg[1] = htonl(RD_REG_DWORD(&reg->iobase_sdata));
+
+ WRT_REG_DWORD(&reg->iobase_select, 0xB0200000);
+ fw->shadow_reg[2] = htonl(RD_REG_DWORD(&reg->iobase_sdata));
+
+ WRT_REG_DWORD(&reg->iobase_select, 0xB0300000);
+ fw->shadow_reg[3] = htonl(RD_REG_DWORD(&reg->iobase_sdata));
+
+ WRT_REG_DWORD(&reg->iobase_select, 0xB0400000);
+ fw->shadow_reg[4] = htonl(RD_REG_DWORD(&reg->iobase_sdata));
+
+ WRT_REG_DWORD(&reg->iobase_select, 0xB0500000);
+ fw->shadow_reg[5] = htonl(RD_REG_DWORD(&reg->iobase_sdata));
+
+ WRT_REG_DWORD(&reg->iobase_select, 0xB0600000);
+ fw->shadow_reg[6] = htonl(RD_REG_DWORD(&reg->iobase_sdata));
+
+ WRT_REG_DWORD(&reg->iobase_select, 0xB0700000);
+ fw->shadow_reg[7] = htonl(RD_REG_DWORD(&reg->iobase_sdata));
+
+ WRT_REG_DWORD(&reg->iobase_select, 0xB0800000);
+ fw->shadow_reg[8] = htonl(RD_REG_DWORD(&reg->iobase_sdata));
+
+ WRT_REG_DWORD(&reg->iobase_select, 0xB0900000);
+ fw->shadow_reg[9] = htonl(RD_REG_DWORD(&reg->iobase_sdata));
+
+ WRT_REG_DWORD(&reg->iobase_select, 0xB0A00000);
+ fw->shadow_reg[10] = htonl(RD_REG_DWORD(&reg->iobase_sdata));
+
+ /* RISC I/O register. */
+ WRT_REG_DWORD(&reg->iobase_addr, 0x0010);
+ fw->risc_io_reg = htonl(RD_REG_DWORD(&reg->iobase_window));
+
+ /* Mailbox registers. */
+ mbx_reg = &reg->mailbox0;
+ for (cnt = 0; cnt < sizeof(fw->mailbox_reg) / 2; cnt++)
+ fw->mailbox_reg[cnt] = htons(RD_REG_WORD(mbx_reg++));
+
+ /* Transfer sequence registers. */
+ iter_reg = fw->xseq_gp_reg;
+ iter_reg = qla24xx_read_window(reg, 0xBF00, 16, iter_reg);
+ iter_reg = qla24xx_read_window(reg, 0xBF10, 16, iter_reg);
+ iter_reg = qla24xx_read_window(reg, 0xBF20, 16, iter_reg);
+ iter_reg = qla24xx_read_window(reg, 0xBF30, 16, iter_reg);
+ iter_reg = qla24xx_read_window(reg, 0xBF40, 16, iter_reg);
+ iter_reg = qla24xx_read_window(reg, 0xBF50, 16, iter_reg);
+ iter_reg = qla24xx_read_window(reg, 0xBF60, 16, iter_reg);
+ qla24xx_read_window(reg, 0xBF70, 16, iter_reg);
+
+ iter_reg = fw->xseq_0_reg;
+ iter_reg = qla24xx_read_window(reg, 0xBFC0, 16, iter_reg);
+ iter_reg = qla24xx_read_window(reg, 0xBFD0, 16, iter_reg);
+ qla24xx_read_window(reg, 0xBFE0, 16, iter_reg);
+
+ qla24xx_read_window(reg, 0xBFF0, 16, fw->xseq_1_reg);
+
+ /* Receive sequence registers. */
+ iter_reg = fw->rseq_gp_reg;
+ iter_reg = qla24xx_read_window(reg, 0xFF00, 16, iter_reg);
+ iter_reg = qla24xx_read_window(reg, 0xFF10, 16, iter_reg);
+ iter_reg = qla24xx_read_window(reg, 0xFF20, 16, iter_reg);
+ iter_reg = qla24xx_read_window(reg, 0xFF30, 16, iter_reg);
+ iter_reg = qla24xx_read_window(reg, 0xFF40, 16, iter_reg);
+ iter_reg = qla24xx_read_window(reg, 0xFF50, 16, iter_reg);
+ iter_reg = qla24xx_read_window(reg, 0xFF60, 16, iter_reg);
+ qla24xx_read_window(reg, 0xFF70, 16, iter_reg);
+
+ iter_reg = fw->rseq_0_reg;
+ iter_reg = qla24xx_read_window(reg, 0xFFC0, 16, iter_reg);
+ qla24xx_read_window(reg, 0xFFD0, 16, iter_reg);
+
+ qla24xx_read_window(reg, 0xFFE0, 16, fw->rseq_1_reg);
+ qla24xx_read_window(reg, 0xFFF0, 16, fw->rseq_2_reg);
+
+ /* Auxiliary sequence registers. */
+ iter_reg = fw->aseq_gp_reg;
+ iter_reg = qla24xx_read_window(reg, 0xB000, 16, iter_reg);
+ iter_reg = qla24xx_read_window(reg, 0xB010, 16, iter_reg);
+ iter_reg = qla24xx_read_window(reg, 0xB020, 16, iter_reg);
+ iter_reg = qla24xx_read_window(reg, 0xB030, 16, iter_reg);
+ iter_reg = qla24xx_read_window(reg, 0xB040, 16, iter_reg);
+ iter_reg = qla24xx_read_window(reg, 0xB050, 16, iter_reg);
+ iter_reg = qla24xx_read_window(reg, 0xB060, 16, iter_reg);
+ qla24xx_read_window(reg, 0xB070, 16, iter_reg);
+
+ iter_reg = fw->aseq_0_reg;
+ iter_reg = qla24xx_read_window(reg, 0xB0C0, 16, iter_reg);
+ qla24xx_read_window(reg, 0xB0D0, 16, iter_reg);
+
+ qla24xx_read_window(reg, 0xB0E0, 16, fw->aseq_1_reg);
+ qla24xx_read_window(reg, 0xB0F0, 16, fw->aseq_2_reg);
+
+ /* Command DMA registers. */
+ qla24xx_read_window(reg, 0x7100, 16, fw->cmd_dma_reg);
+
+ /* Queues. */
+ iter_reg = fw->req0_dma_reg;
+ iter_reg = qla24xx_read_window(reg, 0x7200, 8, iter_reg);
+ dmp_reg = &reg->iobase_q;
+ for (cnt = 0; cnt < 7; cnt++)
+ *iter_reg++ = htonl(RD_REG_DWORD(dmp_reg++));
+
+ iter_reg = fw->resp0_dma_reg;
+ iter_reg = qla24xx_read_window(reg, 0x7300, 8, iter_reg);
+ dmp_reg = &reg->iobase_q;
+ for (cnt = 0; cnt < 7; cnt++)
+ *iter_reg++ = htonl(RD_REG_DWORD(dmp_reg++));
+
+ iter_reg = fw->req1_dma_reg;
+ iter_reg = qla24xx_read_window(reg, 0x7400, 8, iter_reg);
+ dmp_reg = &reg->iobase_q;
+ for (cnt = 0; cnt < 7; cnt++)
+ *iter_reg++ = htonl(RD_REG_DWORD(dmp_reg++));
+
+ /* Transmit DMA registers. */
+ iter_reg = fw->xmt0_dma_reg;
+ iter_reg = qla24xx_read_window(reg, 0x7600, 16, iter_reg);
+ qla24xx_read_window(reg, 0x7610, 16, iter_reg);
+
+ iter_reg = fw->xmt1_dma_reg;
+ iter_reg = qla24xx_read_window(reg, 0x7620, 16, iter_reg);
+ qla24xx_read_window(reg, 0x7630, 16, iter_reg);
+
+ iter_reg = fw->xmt2_dma_reg;
+ iter_reg = qla24xx_read_window(reg, 0x7640, 16, iter_reg);
+ qla24xx_read_window(reg, 0x7650, 16, iter_reg);
+
+ iter_reg = fw->xmt3_dma_reg;
+ iter_reg = qla24xx_read_window(reg, 0x7660, 16, iter_reg);
+ qla24xx_read_window(reg, 0x7670, 16, iter_reg);
+
+ iter_reg = fw->xmt4_dma_reg;
+ iter_reg = qla24xx_read_window(reg, 0x7680, 16, iter_reg);
+ qla24xx_read_window(reg, 0x7690, 16, iter_reg);
+
+ qla24xx_read_window(reg, 0x76A0, 16, fw->xmt_data_dma_reg);
+
+ /* Receive DMA registers. */
+ iter_reg = fw->rcvt0_data_dma_reg;
+ iter_reg = qla24xx_read_window(reg, 0x7700, 16, iter_reg);
+ qla24xx_read_window(reg, 0x7710, 16, iter_reg);
+
+ iter_reg = fw->rcvt1_data_dma_reg;
+ iter_reg = qla24xx_read_window(reg, 0x7720, 16, iter_reg);
+ qla24xx_read_window(reg, 0x7730, 16, iter_reg);
+
+ /* RISC registers. */
+ iter_reg = fw->risc_gp_reg;
+ iter_reg = qla24xx_read_window(reg, 0x0F00, 16, iter_reg);
+ iter_reg = qla24xx_read_window(reg, 0x0F10, 16, iter_reg);
+ iter_reg = qla24xx_read_window(reg, 0x0F20, 16, iter_reg);
+ iter_reg = qla24xx_read_window(reg, 0x0F30, 16, iter_reg);
+ iter_reg = qla24xx_read_window(reg, 0x0F40, 16, iter_reg);
+ iter_reg = qla24xx_read_window(reg, 0x0F50, 16, iter_reg);
+ iter_reg = qla24xx_read_window(reg, 0x0F60, 16, iter_reg);
+ qla24xx_read_window(reg, 0x0F70, 16, iter_reg);
+
+ /* Local memory controller registers. */
+ iter_reg = fw->lmc_reg;
+ iter_reg = qla24xx_read_window(reg, 0x3000, 16, iter_reg);
+ iter_reg = qla24xx_read_window(reg, 0x3010, 16, iter_reg);
+ iter_reg = qla24xx_read_window(reg, 0x3020, 16, iter_reg);
+ iter_reg = qla24xx_read_window(reg, 0x3030, 16, iter_reg);
+ iter_reg = qla24xx_read_window(reg, 0x3040, 16, iter_reg);
+ iter_reg = qla24xx_read_window(reg, 0x3050, 16, iter_reg);
+ iter_reg = qla24xx_read_window(reg, 0x3060, 16, iter_reg);
+ qla24xx_read_window(reg, 0x3070, 16, iter_reg);
+
+ /* Fibre Protocol Module registers. */
+ iter_reg = fw->fpm_hdw_reg;
+ iter_reg = qla24xx_read_window(reg, 0x4000, 16, iter_reg);
+ iter_reg = qla24xx_read_window(reg, 0x4010, 16, iter_reg);
+ iter_reg = qla24xx_read_window(reg, 0x4020, 16, iter_reg);
+ iter_reg = qla24xx_read_window(reg, 0x4030, 16, iter_reg);
+ iter_reg = qla24xx_read_window(reg, 0x4040, 16, iter_reg);
+ iter_reg = qla24xx_read_window(reg, 0x4050, 16, iter_reg);
+ iter_reg = qla24xx_read_window(reg, 0x4060, 16, iter_reg);
+ iter_reg = qla24xx_read_window(reg, 0x4070, 16, iter_reg);
+ iter_reg = qla24xx_read_window(reg, 0x4080, 16, iter_reg);
+ iter_reg = qla24xx_read_window(reg, 0x4090, 16, iter_reg);
+ iter_reg = qla24xx_read_window(reg, 0x40A0, 16, iter_reg);
+ iter_reg = qla24xx_read_window(reg, 0x40B0, 16, iter_reg);
+ iter_reg = qla24xx_read_window(reg, 0x40C0, 16, iter_reg);
+ qla24xx_read_window(reg, 0x40D0, 16, iter_reg);
+
+ /* Frame Buffer registers. */
+ iter_reg = fw->fb_hdw_reg;
+ iter_reg = qla24xx_read_window(reg, 0x6000, 16, iter_reg);
+ iter_reg = qla24xx_read_window(reg, 0x6010, 16, iter_reg);
+ iter_reg = qla24xx_read_window(reg, 0x6020, 16, iter_reg);
+ iter_reg = qla24xx_read_window(reg, 0x6030, 16, iter_reg);
+ iter_reg = qla24xx_read_window(reg, 0x6040, 16, iter_reg);
+ iter_reg = qla24xx_read_window(reg, 0x6100, 16, iter_reg);
+ iter_reg = qla24xx_read_window(reg, 0x6130, 16, iter_reg);
+ iter_reg = qla24xx_read_window(reg, 0x6150, 16, iter_reg);
+ iter_reg = qla24xx_read_window(reg, 0x6170, 16, iter_reg);
+ iter_reg = qla24xx_read_window(reg, 0x6190, 16, iter_reg);
+ iter_reg = qla24xx_read_window(reg, 0x61B0, 16, iter_reg);
+ iter_reg = qla24xx_read_window(reg, 0x61C0, 16, iter_reg);
+ qla24xx_read_window(reg, 0x6F00, 16, iter_reg);
+
+ /* Multi queue registers */
+ nxt_chain = qla25xx_copy_mq(ha, (void *)ha->fw_dump + ha->chain_offset,
+ &last_chain);
+
+ rval = qla24xx_soft_reset(ha);
+ if (rval != QLA_SUCCESS)
+ goto qla81xx_fw_dump_failed_0;
+
+ rval = qla24xx_dump_memory(ha, fw->code_ram, sizeof(fw->code_ram),
+ &nxt);
+ if (rval != QLA_SUCCESS)
+ goto qla81xx_fw_dump_failed_0;
+
+ nxt = qla2xxx_copy_queues(ha, nxt);
+
+ qla24xx_copy_eft(ha, nxt);
+
+ /* Chain entries -- started with MQ. */
+ nxt_chain = qla25xx_copy_fce(ha, nxt_chain, &last_chain);
+ nxt_chain = qla25xx_copy_mqueues(ha, nxt_chain, &last_chain);
+ nxt_chain = qla2xxx_copy_atioqueues(ha, nxt_chain, &last_chain);
+ if (last_chain) {
+ ha->fw_dump->version |= __constant_htonl(DUMP_CHAIN_VARIANT);
+ *last_chain |= __constant_htonl(DUMP_CHAIN_LAST);
+ }
+
+ /* Adjust valid length. */
+ ha->fw_dump_len = (nxt_chain - (void *)ha->fw_dump);
+
+qla81xx_fw_dump_failed_0:
+ qla2xxx_dump_post_process(base_vha, rval);
+
+qla81xx_fw_dump_failed:
+ if (!hardware_locked)
+ spin_unlock_irqrestore(&ha->hardware_lock, flags);
+}
+
+void
+qla83xx_fw_dump(scsi_qla_host_t *vha, int hardware_locked)
+{
+ int rval;
+ uint32_t cnt, reg_data;
+ uint32_t risc_address;
+ struct qla_hw_data *ha = vha->hw;
+ struct device_reg_24xx __iomem *reg = &ha->iobase->isp24;
+ uint32_t __iomem *dmp_reg;
+ uint32_t *iter_reg;
+ uint16_t __iomem *mbx_reg;
+ unsigned long flags;
+ struct qla83xx_fw_dump *fw;
+ uint32_t ext_mem_cnt;
+ void *nxt, *nxt_chain;
+ uint32_t *last_chain = NULL;
+ struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev);
+
+ risc_address = ext_mem_cnt = 0;
+ flags = 0;
+ ha->fw_dump_cap_flags = 0;
+
+ if (!hardware_locked)
+ spin_lock_irqsave(&ha->hardware_lock, flags);
+
+ if (!ha->fw_dump) {
+ ql_log(ql_log_warn, vha, 0xd00c,
+ "No buffer available for dump!!!\n");
+ goto qla83xx_fw_dump_failed;
+ }
+
+ if (ha->fw_dumped) {
+ ql_log(ql_log_warn, vha, 0xd00d,
+ "Firmware has been previously dumped (%p) -- ignoring "
+ "request...\n", ha->fw_dump);
+ goto qla83xx_fw_dump_failed;
+ }
+ fw = &ha->fw_dump->isp.isp83;
+ qla2xxx_prep_dump(ha, ha->fw_dump);
+
+ fw->host_status = htonl(RD_REG_DWORD(&reg->host_status));
+
+ /*
+ * Pause RISC. No need to track timeout, as resetting the chip
+ * is the right approach incase of pause timeout
+ */
+ qla24xx_pause_risc(reg, ha);
+
+ WRT_REG_DWORD(&reg->iobase_addr, 0x6000);
+ dmp_reg = &reg->iobase_window;
+ reg_data = RD_REG_DWORD(dmp_reg);
+ WRT_REG_DWORD(dmp_reg, 0);
+
+ dmp_reg = &reg->unused_4_1[0];
+ reg_data = RD_REG_DWORD(dmp_reg);
+ WRT_REG_DWORD(dmp_reg, 0);
+
+ WRT_REG_DWORD(&reg->iobase_addr, 0x6010);
+ dmp_reg = &reg->unused_4_1[2];
+ reg_data = RD_REG_DWORD(dmp_reg);
+ WRT_REG_DWORD(dmp_reg, 0);
+
+ /* select PCR and disable ecc checking and correction */
+ WRT_REG_DWORD(&reg->iobase_addr, 0x0F70);
+ RD_REG_DWORD(&reg->iobase_addr);
+ WRT_REG_DWORD(&reg->iobase_select, 0x60000000); /* write to F0h = PCR */
+
+ /* Host/Risc registers. */
+ iter_reg = fw->host_risc_reg;
+ iter_reg = qla24xx_read_window(reg, 0x7000, 16, iter_reg);
+ iter_reg = qla24xx_read_window(reg, 0x7010, 16, iter_reg);
+ qla24xx_read_window(reg, 0x7040, 16, iter_reg);
+
+ /* PCIe registers. */
+ WRT_REG_DWORD(&reg->iobase_addr, 0x7C00);
+ RD_REG_DWORD(&reg->iobase_addr);
+ WRT_REG_DWORD(&reg->iobase_window, 0x01);
+ dmp_reg = &reg->iobase_c4;
+ fw->pcie_regs[0] = htonl(RD_REG_DWORD(dmp_reg++));
+ fw->pcie_regs[1] = htonl(RD_REG_DWORD(dmp_reg++));
+ fw->pcie_regs[2] = htonl(RD_REG_DWORD(dmp_reg));
+ fw->pcie_regs[3] = htonl(RD_REG_DWORD(&reg->iobase_window));
+
+ WRT_REG_DWORD(&reg->iobase_window, 0x00);
+ RD_REG_DWORD(&reg->iobase_window);
+
+ /* Host interface registers. */
+ dmp_reg = &reg->flash_addr;
+ for (cnt = 0; cnt < sizeof(fw->host_reg) / 4; cnt++)
+ fw->host_reg[cnt] = htonl(RD_REG_DWORD(dmp_reg++));
+
+ /* Disable interrupts. */
+ WRT_REG_DWORD(&reg->ictrl, 0);
+ RD_REG_DWORD(&reg->ictrl);
+
+ /* Shadow registers. */
+ WRT_REG_DWORD(&reg->iobase_addr, 0x0F70);
+ RD_REG_DWORD(&reg->iobase_addr);
+ WRT_REG_DWORD(&reg->iobase_select, 0xB0000000);
+ fw->shadow_reg[0] = htonl(RD_REG_DWORD(&reg->iobase_sdata));
+
+ WRT_REG_DWORD(&reg->iobase_select, 0xB0100000);
+ fw->shadow_reg[1] = htonl(RD_REG_DWORD(&reg->iobase_sdata));
+
+ WRT_REG_DWORD(&reg->iobase_select, 0xB0200000);
+ fw->shadow_reg[2] = htonl(RD_REG_DWORD(&reg->iobase_sdata));
+
+ WRT_REG_DWORD(&reg->iobase_select, 0xB0300000);
+ fw->shadow_reg[3] = htonl(RD_REG_DWORD(&reg->iobase_sdata));
+
+ WRT_REG_DWORD(&reg->iobase_select, 0xB0400000);
+ fw->shadow_reg[4] = htonl(RD_REG_DWORD(&reg->iobase_sdata));
+
+ WRT_REG_DWORD(&reg->iobase_select, 0xB0500000);
+ fw->shadow_reg[5] = htonl(RD_REG_DWORD(&reg->iobase_sdata));
+
+ WRT_REG_DWORD(&reg->iobase_select, 0xB0600000);
+ fw->shadow_reg[6] = htonl(RD_REG_DWORD(&reg->iobase_sdata));
+
+ WRT_REG_DWORD(&reg->iobase_select, 0xB0700000);
+ fw->shadow_reg[7] = htonl(RD_REG_DWORD(&reg->iobase_sdata));
+
+ WRT_REG_DWORD(&reg->iobase_select, 0xB0800000);
+ fw->shadow_reg[8] = htonl(RD_REG_DWORD(&reg->iobase_sdata));
+
+ WRT_REG_DWORD(&reg->iobase_select, 0xB0900000);
+ fw->shadow_reg[9] = htonl(RD_REG_DWORD(&reg->iobase_sdata));
+
+ WRT_REG_DWORD(&reg->iobase_select, 0xB0A00000);
+ fw->shadow_reg[10] = htonl(RD_REG_DWORD(&reg->iobase_sdata));
+
+ /* RISC I/O register. */
+ WRT_REG_DWORD(&reg->iobase_addr, 0x0010);
+ fw->risc_io_reg = htonl(RD_REG_DWORD(&reg->iobase_window));
+
+ /* Mailbox registers. */
+ mbx_reg = &reg->mailbox0;
+ for (cnt = 0; cnt < sizeof(fw->mailbox_reg) / 2; cnt++)
+ fw->mailbox_reg[cnt] = htons(RD_REG_WORD(mbx_reg++));
+
+ /* Transfer sequence registers. */
+ iter_reg = fw->xseq_gp_reg;
+ iter_reg = qla24xx_read_window(reg, 0xBE00, 16, iter_reg);
+ iter_reg = qla24xx_read_window(reg, 0xBE10, 16, iter_reg);
+ iter_reg = qla24xx_read_window(reg, 0xBE20, 16, iter_reg);
+ iter_reg = qla24xx_read_window(reg, 0xBE30, 16, iter_reg);
+ iter_reg = qla24xx_read_window(reg, 0xBE40, 16, iter_reg);
+ iter_reg = qla24xx_read_window(reg, 0xBE50, 16, iter_reg);
+ iter_reg = qla24xx_read_window(reg, 0xBE60, 16, iter_reg);
+ iter_reg = qla24xx_read_window(reg, 0xBE70, 16, iter_reg);
+ iter_reg = qla24xx_read_window(reg, 0xBF00, 16, iter_reg);
+ iter_reg = qla24xx_read_window(reg, 0xBF10, 16, iter_reg);
+ iter_reg = qla24xx_read_window(reg, 0xBF20, 16, iter_reg);
+ iter_reg = qla24xx_read_window(reg, 0xBF30, 16, iter_reg);
+ iter_reg = qla24xx_read_window(reg, 0xBF40, 16, iter_reg);
+ iter_reg = qla24xx_read_window(reg, 0xBF50, 16, iter_reg);
+ iter_reg = qla24xx_read_window(reg, 0xBF60, 16, iter_reg);
+ qla24xx_read_window(reg, 0xBF70, 16, iter_reg);
+
+ iter_reg = fw->xseq_0_reg;
+ iter_reg = qla24xx_read_window(reg, 0xBFC0, 16, iter_reg);
+ iter_reg = qla24xx_read_window(reg, 0xBFD0, 16, iter_reg);
+ qla24xx_read_window(reg, 0xBFE0, 16, iter_reg);
+
+ qla24xx_read_window(reg, 0xBFF0, 16, fw->xseq_1_reg);
+
+ qla24xx_read_window(reg, 0xBEF0, 16, fw->xseq_2_reg);
+
+ /* Receive sequence registers. */
+ iter_reg = fw->rseq_gp_reg;
+ iter_reg = qla24xx_read_window(reg, 0xFE00, 16, iter_reg);
+ iter_reg = qla24xx_read_window(reg, 0xFE10, 16, iter_reg);
+ iter_reg = qla24xx_read_window(reg, 0xFE20, 16, iter_reg);
+ iter_reg = qla24xx_read_window(reg, 0xFE30, 16, iter_reg);
+ iter_reg = qla24xx_read_window(reg, 0xFE40, 16, iter_reg);
+ iter_reg = qla24xx_read_window(reg, 0xFE50, 16, iter_reg);
+ iter_reg = qla24xx_read_window(reg, 0xFE60, 16, iter_reg);
+ iter_reg = qla24xx_read_window(reg, 0xFE70, 16, iter_reg);
+ iter_reg = qla24xx_read_window(reg, 0xFF00, 16, iter_reg);
+ iter_reg = qla24xx_read_window(reg, 0xFF10, 16, iter_reg);
+ iter_reg = qla24xx_read_window(reg, 0xFF20, 16, iter_reg);
+ iter_reg = qla24xx_read_window(reg, 0xFF30, 16, iter_reg);
+ iter_reg = qla24xx_read_window(reg, 0xFF40, 16, iter_reg);
+ iter_reg = qla24xx_read_window(reg, 0xFF50, 16, iter_reg);
+ iter_reg = qla24xx_read_window(reg, 0xFF60, 16, iter_reg);
+ qla24xx_read_window(reg, 0xFF70, 16, iter_reg);
+
+ iter_reg = fw->rseq_0_reg;
+ iter_reg = qla24xx_read_window(reg, 0xFFC0, 16, iter_reg);
+ qla24xx_read_window(reg, 0xFFD0, 16, iter_reg);
+
+ qla24xx_read_window(reg, 0xFFE0, 16, fw->rseq_1_reg);
+ qla24xx_read_window(reg, 0xFFF0, 16, fw->rseq_2_reg);
+ qla24xx_read_window(reg, 0xFEF0, 16, fw->rseq_3_reg);
+
+ /* Auxiliary sequence registers. */
+ iter_reg = fw->aseq_gp_reg;
+ iter_reg = qla24xx_read_window(reg, 0xB000, 16, iter_reg);
+ iter_reg = qla24xx_read_window(reg, 0xB010, 16, iter_reg);
+ iter_reg = qla24xx_read_window(reg, 0xB020, 16, iter_reg);
+ iter_reg = qla24xx_read_window(reg, 0xB030, 16, iter_reg);
+ iter_reg = qla24xx_read_window(reg, 0xB040, 16, iter_reg);
+ iter_reg = qla24xx_read_window(reg, 0xB050, 16, iter_reg);
+ iter_reg = qla24xx_read_window(reg, 0xB060, 16, iter_reg);
+ iter_reg = qla24xx_read_window(reg, 0xB070, 16, iter_reg);
+ iter_reg = qla24xx_read_window(reg, 0xB100, 16, iter_reg);
+ iter_reg = qla24xx_read_window(reg, 0xB110, 16, iter_reg);
+ iter_reg = qla24xx_read_window(reg, 0xB120, 16, iter_reg);
+ iter_reg = qla24xx_read_window(reg, 0xB130, 16, iter_reg);
+ iter_reg = qla24xx_read_window(reg, 0xB140, 16, iter_reg);
+ iter_reg = qla24xx_read_window(reg, 0xB150, 16, iter_reg);
+ iter_reg = qla24xx_read_window(reg, 0xB160, 16, iter_reg);
+ qla24xx_read_window(reg, 0xB170, 16, iter_reg);
+
+ iter_reg = fw->aseq_0_reg;
+ iter_reg = qla24xx_read_window(reg, 0xB0C0, 16, iter_reg);
+ qla24xx_read_window(reg, 0xB0D0, 16, iter_reg);
+
+ qla24xx_read_window(reg, 0xB0E0, 16, fw->aseq_1_reg);
+ qla24xx_read_window(reg, 0xB0F0, 16, fw->aseq_2_reg);
+ qla24xx_read_window(reg, 0xB1F0, 16, fw->aseq_3_reg);
+
+ /* Command DMA registers. */
+ iter_reg = fw->cmd_dma_reg;
+ iter_reg = qla24xx_read_window(reg, 0x7100, 16, iter_reg);
+ iter_reg = qla24xx_read_window(reg, 0x7120, 16, iter_reg);
+ iter_reg = qla24xx_read_window(reg, 0x7130, 16, iter_reg);
+ qla24xx_read_window(reg, 0x71F0, 16, iter_reg);
+
+ /* Queues. */
+ iter_reg = fw->req0_dma_reg;
+ iter_reg = qla24xx_read_window(reg, 0x7200, 8, iter_reg);
+ dmp_reg = &reg->iobase_q;
+ for (cnt = 0; cnt < 7; cnt++)
+ *iter_reg++ = htonl(RD_REG_DWORD(dmp_reg++));
+
+ iter_reg = fw->resp0_dma_reg;
+ iter_reg = qla24xx_read_window(reg, 0x7300, 8, iter_reg);
+ dmp_reg = &reg->iobase_q;
+ for (cnt = 0; cnt < 7; cnt++)
+ *iter_reg++ = htonl(RD_REG_DWORD(dmp_reg++));
+
+ iter_reg = fw->req1_dma_reg;
+ iter_reg = qla24xx_read_window(reg, 0x7400, 8, iter_reg);
+ dmp_reg = &reg->iobase_q;
+ for (cnt = 0; cnt < 7; cnt++)
+ *iter_reg++ = htonl(RD_REG_DWORD(dmp_reg++));
+
+ /* Transmit DMA registers. */
+ iter_reg = fw->xmt0_dma_reg;
+ iter_reg = qla24xx_read_window(reg, 0x7600, 16, iter_reg);
+ qla24xx_read_window(reg, 0x7610, 16, iter_reg);
+
+ iter_reg = fw->xmt1_dma_reg;
+ iter_reg = qla24xx_read_window(reg, 0x7620, 16, iter_reg);
+ qla24xx_read_window(reg, 0x7630, 16, iter_reg);
+
+ iter_reg = fw->xmt2_dma_reg;
+ iter_reg = qla24xx_read_window(reg, 0x7640, 16, iter_reg);
+ qla24xx_read_window(reg, 0x7650, 16, iter_reg);
+
+ iter_reg = fw->xmt3_dma_reg;
+ iter_reg = qla24xx_read_window(reg, 0x7660, 16, iter_reg);
+ qla24xx_read_window(reg, 0x7670, 16, iter_reg);
+
+ iter_reg = fw->xmt4_dma_reg;
+ iter_reg = qla24xx_read_window(reg, 0x7680, 16, iter_reg);
+ qla24xx_read_window(reg, 0x7690, 16, iter_reg);
+
+ qla24xx_read_window(reg, 0x76A0, 16, fw->xmt_data_dma_reg);
+
+ /* Receive DMA registers. */
+ iter_reg = fw->rcvt0_data_dma_reg;
+ iter_reg = qla24xx_read_window(reg, 0x7700, 16, iter_reg);
+ qla24xx_read_window(reg, 0x7710, 16, iter_reg);
+
+ iter_reg = fw->rcvt1_data_dma_reg;
+ iter_reg = qla24xx_read_window(reg, 0x7720, 16, iter_reg);
+ qla24xx_read_window(reg, 0x7730, 16, iter_reg);
+
+ /* RISC registers. */
+ iter_reg = fw->risc_gp_reg;
+ iter_reg = qla24xx_read_window(reg, 0x0F00, 16, iter_reg);
+ iter_reg = qla24xx_read_window(reg, 0x0F10, 16, iter_reg);
+ iter_reg = qla24xx_read_window(reg, 0x0F20, 16, iter_reg);
+ iter_reg = qla24xx_read_window(reg, 0x0F30, 16, iter_reg);
+ iter_reg = qla24xx_read_window(reg, 0x0F40, 16, iter_reg);
+ iter_reg = qla24xx_read_window(reg, 0x0F50, 16, iter_reg);
+ iter_reg = qla24xx_read_window(reg, 0x0F60, 16, iter_reg);
+ qla24xx_read_window(reg, 0x0F70, 16, iter_reg);
+
+ /* Local memory controller registers. */
+ iter_reg = fw->lmc_reg;
+ iter_reg = qla24xx_read_window(reg, 0x3000, 16, iter_reg);
+ iter_reg = qla24xx_read_window(reg, 0x3010, 16, iter_reg);
+ iter_reg = qla24xx_read_window(reg, 0x3020, 16, iter_reg);
+ iter_reg = qla24xx_read_window(reg, 0x3030, 16, iter_reg);
+ iter_reg = qla24xx_read_window(reg, 0x3040, 16, iter_reg);
+ iter_reg = qla24xx_read_window(reg, 0x3050, 16, iter_reg);
+ iter_reg = qla24xx_read_window(reg, 0x3060, 16, iter_reg);
+ qla24xx_read_window(reg, 0x3070, 16, iter_reg);
+
+ /* Fibre Protocol Module registers. */
+ iter_reg = fw->fpm_hdw_reg;
+ iter_reg = qla24xx_read_window(reg, 0x4000, 16, iter_reg);
+ iter_reg = qla24xx_read_window(reg, 0x4010, 16, iter_reg);
+ iter_reg = qla24xx_read_window(reg, 0x4020, 16, iter_reg);
+ iter_reg = qla24xx_read_window(reg, 0x4030, 16, iter_reg);
+ iter_reg = qla24xx_read_window(reg, 0x4040, 16, iter_reg);
+ iter_reg = qla24xx_read_window(reg, 0x4050, 16, iter_reg);
+ iter_reg = qla24xx_read_window(reg, 0x4060, 16, iter_reg);
+ iter_reg = qla24xx_read_window(reg, 0x4070, 16, iter_reg);
+ iter_reg = qla24xx_read_window(reg, 0x4080, 16, iter_reg);
+ iter_reg = qla24xx_read_window(reg, 0x4090, 16, iter_reg);
+ iter_reg = qla24xx_read_window(reg, 0x40A0, 16, iter_reg);
+ iter_reg = qla24xx_read_window(reg, 0x40B0, 16, iter_reg);
+ iter_reg = qla24xx_read_window(reg, 0x40C0, 16, iter_reg);
+ iter_reg = qla24xx_read_window(reg, 0x40D0, 16, iter_reg);
+ iter_reg = qla24xx_read_window(reg, 0x40E0, 16, iter_reg);
+ qla24xx_read_window(reg, 0x40F0, 16, iter_reg);
+
+ /* RQ0 Array registers. */
+ iter_reg = fw->rq0_array_reg;
+ iter_reg = qla24xx_read_window(reg, 0x5C00, 16, iter_reg);
+ iter_reg = qla24xx_read_window(reg, 0x5C10, 16, iter_reg);
+ iter_reg = qla24xx_read_window(reg, 0x5C20, 16, iter_reg);
+ iter_reg = qla24xx_read_window(reg, 0x5C30, 16, iter_reg);
+ iter_reg = qla24xx_read_window(reg, 0x5C40, 16, iter_reg);
+ iter_reg = qla24xx_read_window(reg, 0x5C50, 16, iter_reg);
+ iter_reg = qla24xx_read_window(reg, 0x5C60, 16, iter_reg);
+ iter_reg = qla24xx_read_window(reg, 0x5C70, 16, iter_reg);
+ iter_reg = qla24xx_read_window(reg, 0x5C80, 16, iter_reg);
+ iter_reg = qla24xx_read_window(reg, 0x5C90, 16, iter_reg);
+ iter_reg = qla24xx_read_window(reg, 0x5CA0, 16, iter_reg);
+ iter_reg = qla24xx_read_window(reg, 0x5CB0, 16, iter_reg);
+ iter_reg = qla24xx_read_window(reg, 0x5CC0, 16, iter_reg);
+ iter_reg = qla24xx_read_window(reg, 0x5CD0, 16, iter_reg);
+ iter_reg = qla24xx_read_window(reg, 0x5CE0, 16, iter_reg);
+ qla24xx_read_window(reg, 0x5CF0, 16, iter_reg);
+
+ /* RQ1 Array registers. */
+ iter_reg = fw->rq1_array_reg;
+ iter_reg = qla24xx_read_window(reg, 0x5D00, 16, iter_reg);
+ iter_reg = qla24xx_read_window(reg, 0x5D10, 16, iter_reg);
+ iter_reg = qla24xx_read_window(reg, 0x5D20, 16, iter_reg);
+ iter_reg = qla24xx_read_window(reg, 0x5D30, 16, iter_reg);
+ iter_reg = qla24xx_read_window(reg, 0x5D40, 16, iter_reg);
+ iter_reg = qla24xx_read_window(reg, 0x5D50, 16, iter_reg);
+ iter_reg = qla24xx_read_window(reg, 0x5D60, 16, iter_reg);
+ iter_reg = qla24xx_read_window(reg, 0x5D70, 16, iter_reg);
+ iter_reg = qla24xx_read_window(reg, 0x5D80, 16, iter_reg);
+ iter_reg = qla24xx_read_window(reg, 0x5D90, 16, iter_reg);
+ iter_reg = qla24xx_read_window(reg, 0x5DA0, 16, iter_reg);
+ iter_reg = qla24xx_read_window(reg, 0x5DB0, 16, iter_reg);
+ iter_reg = qla24xx_read_window(reg, 0x5DC0, 16, iter_reg);
+ iter_reg = qla24xx_read_window(reg, 0x5DD0, 16, iter_reg);
+ iter_reg = qla24xx_read_window(reg, 0x5DE0, 16, iter_reg);
+ qla24xx_read_window(reg, 0x5DF0, 16, iter_reg);
+
+ /* RP0 Array registers. */
+ iter_reg = fw->rp0_array_reg;
+ iter_reg = qla24xx_read_window(reg, 0x5E00, 16, iter_reg);
+ iter_reg = qla24xx_read_window(reg, 0x5E10, 16, iter_reg);
+ iter_reg = qla24xx_read_window(reg, 0x5E20, 16, iter_reg);
+ iter_reg = qla24xx_read_window(reg, 0x5E30, 16, iter_reg);
+ iter_reg = qla24xx_read_window(reg, 0x5E40, 16, iter_reg);
+ iter_reg = qla24xx_read_window(reg, 0x5E50, 16, iter_reg);
+ iter_reg = qla24xx_read_window(reg, 0x5E60, 16, iter_reg);
+ iter_reg = qla24xx_read_window(reg, 0x5E70, 16, iter_reg);
+ iter_reg = qla24xx_read_window(reg, 0x5E80, 16, iter_reg);
+ iter_reg = qla24xx_read_window(reg, 0x5E90, 16, iter_reg);
+ iter_reg = qla24xx_read_window(reg, 0x5EA0, 16, iter_reg);
+ iter_reg = qla24xx_read_window(reg, 0x5EB0, 16, iter_reg);
+ iter_reg = qla24xx_read_window(reg, 0x5EC0, 16, iter_reg);
+ iter_reg = qla24xx_read_window(reg, 0x5ED0, 16, iter_reg);
+ iter_reg = qla24xx_read_window(reg, 0x5EE0, 16, iter_reg);
+ qla24xx_read_window(reg, 0x5EF0, 16, iter_reg);
+
+ /* RP1 Array registers. */
+ iter_reg = fw->rp1_array_reg;
+ iter_reg = qla24xx_read_window(reg, 0x5F00, 16, iter_reg);
+ iter_reg = qla24xx_read_window(reg, 0x5F10, 16, iter_reg);
+ iter_reg = qla24xx_read_window(reg, 0x5F20, 16, iter_reg);
+ iter_reg = qla24xx_read_window(reg, 0x5F30, 16, iter_reg);
+ iter_reg = qla24xx_read_window(reg, 0x5F40, 16, iter_reg);
+ iter_reg = qla24xx_read_window(reg, 0x5F50, 16, iter_reg);
+ iter_reg = qla24xx_read_window(reg, 0x5F60, 16, iter_reg);
+ iter_reg = qla24xx_read_window(reg, 0x5F70, 16, iter_reg);
+ iter_reg = qla24xx_read_window(reg, 0x5F80, 16, iter_reg);
+ iter_reg = qla24xx_read_window(reg, 0x5F90, 16, iter_reg);
+ iter_reg = qla24xx_read_window(reg, 0x5FA0, 16, iter_reg);
+ iter_reg = qla24xx_read_window(reg, 0x5FB0, 16, iter_reg);
+ iter_reg = qla24xx_read_window(reg, 0x5FC0, 16, iter_reg);
+ iter_reg = qla24xx_read_window(reg, 0x5FD0, 16, iter_reg);
+ iter_reg = qla24xx_read_window(reg, 0x5FE0, 16, iter_reg);
+ qla24xx_read_window(reg, 0x5FF0, 16, iter_reg);
+
+ iter_reg = fw->at0_array_reg;
+ iter_reg = qla24xx_read_window(reg, 0x7080, 16, iter_reg);
+ iter_reg = qla24xx_read_window(reg, 0x7090, 16, iter_reg);
+ iter_reg = qla24xx_read_window(reg, 0x70A0, 16, iter_reg);
+ iter_reg = qla24xx_read_window(reg, 0x70B0, 16, iter_reg);
+ iter_reg = qla24xx_read_window(reg, 0x70C0, 16, iter_reg);
+ iter_reg = qla24xx_read_window(reg, 0x70D0, 16, iter_reg);
+ iter_reg = qla24xx_read_window(reg, 0x70E0, 16, iter_reg);
+ qla24xx_read_window(reg, 0x70F0, 16, iter_reg);
+
+ /* I/O Queue Control registers. */
+ qla24xx_read_window(reg, 0x7800, 16, fw->queue_control_reg);
+
+ /* Frame Buffer registers. */
+ iter_reg = fw->fb_hdw_reg;
+ iter_reg = qla24xx_read_window(reg, 0x6000, 16, iter_reg);
+ iter_reg = qla24xx_read_window(reg, 0x6010, 16, iter_reg);
+ iter_reg = qla24xx_read_window(reg, 0x6020, 16, iter_reg);
+ iter_reg = qla24xx_read_window(reg, 0x6030, 16, iter_reg);
+ iter_reg = qla24xx_read_window(reg, 0x6040, 16, iter_reg);
+ iter_reg = qla24xx_read_window(reg, 0x6060, 16, iter_reg);
+ iter_reg = qla24xx_read_window(reg, 0x6070, 16, iter_reg);
+ iter_reg = qla24xx_read_window(reg, 0x6100, 16, iter_reg);
+ iter_reg = qla24xx_read_window(reg, 0x6130, 16, iter_reg);
+ iter_reg = qla24xx_read_window(reg, 0x6150, 16, iter_reg);
+ iter_reg = qla24xx_read_window(reg, 0x6170, 16, iter_reg);
+ iter_reg = qla24xx_read_window(reg, 0x6190, 16, iter_reg);
+ iter_reg = qla24xx_read_window(reg, 0x61B0, 16, iter_reg);
+ iter_reg = qla24xx_read_window(reg, 0x61C0, 16, iter_reg);
+ iter_reg = qla24xx_read_window(reg, 0x6530, 16, iter_reg);
+ iter_reg = qla24xx_read_window(reg, 0x6540, 16, iter_reg);
+ iter_reg = qla24xx_read_window(reg, 0x6550, 16, iter_reg);
+ iter_reg = qla24xx_read_window(reg, 0x6560, 16, iter_reg);
+ iter_reg = qla24xx_read_window(reg, 0x6570, 16, iter_reg);
+ iter_reg = qla24xx_read_window(reg, 0x6580, 16, iter_reg);
+ iter_reg = qla24xx_read_window(reg, 0x6590, 16, iter_reg);
+ iter_reg = qla24xx_read_window(reg, 0x65A0, 16, iter_reg);
+ iter_reg = qla24xx_read_window(reg, 0x65B0, 16, iter_reg);
+ iter_reg = qla24xx_read_window(reg, 0x65C0, 16, iter_reg);
+ iter_reg = qla24xx_read_window(reg, 0x65D0, 16, iter_reg);
+ iter_reg = qla24xx_read_window(reg, 0x65E0, 16, iter_reg);
+ qla24xx_read_window(reg, 0x6F00, 16, iter_reg);
+
+ /* Multi queue registers */
+ nxt_chain = qla25xx_copy_mq(ha, (void *)ha->fw_dump + ha->chain_offset,
+ &last_chain);
+
+ rval = qla24xx_soft_reset(ha);
+ if (rval != QLA_SUCCESS) {
+ ql_log(ql_log_warn, vha, 0xd00e,
+ "SOFT RESET FAILED, forcing continuation of dump!!!\n");
+ rval = QLA_SUCCESS;
+
+ ql_log(ql_log_warn, vha, 0xd00f, "try a bigger hammer!!!\n");
+
+ WRT_REG_DWORD(&reg->hccr, HCCRX_SET_RISC_RESET);
+ RD_REG_DWORD(&reg->hccr);
+
+ WRT_REG_DWORD(&reg->hccr, HCCRX_REL_RISC_PAUSE);
+ RD_REG_DWORD(&reg->hccr);
+
+ WRT_REG_DWORD(&reg->hccr, HCCRX_CLR_RISC_RESET);
+ RD_REG_DWORD(&reg->hccr);
+
+ for (cnt = 30000; cnt && (RD_REG_WORD(&reg->mailbox0)); cnt--)
+ udelay(5);
+
+ if (!cnt) {
+ nxt = fw->code_ram;
+ nxt += sizeof(fw->code_ram);
+ nxt += (ha->fw_memory_size - 0x100000 + 1);
+ goto copy_queue;
+ } else {
+ set_bit(RISC_RDY_AFT_RESET, &ha->fw_dump_cap_flags);
+ ql_log(ql_log_warn, vha, 0xd010,
+ "bigger hammer success?\n");
+ }
+ }
+
+ rval = qla24xx_dump_memory(ha, fw->code_ram, sizeof(fw->code_ram),
+ &nxt);
+ if (rval != QLA_SUCCESS)
+ goto qla83xx_fw_dump_failed_0;
+
+copy_queue:
+ nxt = qla2xxx_copy_queues(ha, nxt);
+
+ qla24xx_copy_eft(ha, nxt);
+
+ /* Chain entries -- started with MQ. */
+ nxt_chain = qla25xx_copy_fce(ha, nxt_chain, &last_chain);
+ nxt_chain = qla25xx_copy_mqueues(ha, nxt_chain, &last_chain);
+ nxt_chain = qla2xxx_copy_atioqueues(ha, nxt_chain, &last_chain);
+ if (last_chain) {
+ ha->fw_dump->version |= __constant_htonl(DUMP_CHAIN_VARIANT);
+ *last_chain |= __constant_htonl(DUMP_CHAIN_LAST);
+ }
+
+ /* Adjust valid length. */
+ ha->fw_dump_len = (nxt_chain - (void *)ha->fw_dump);
+
+qla83xx_fw_dump_failed_0:
+ qla2xxx_dump_post_process(base_vha, rval);
+
+qla83xx_fw_dump_failed:
+ if (!hardware_locked)
+ spin_unlock_irqrestore(&ha->hardware_lock, flags);
+}
+
+/****************************************************************************/
+/* Driver Debug Functions. */
+/****************************************************************************/
+
+static inline int
+ql_mask_match(uint32_t level)
+{
+ if (ql2xextended_error_logging == 1)
+ ql2xextended_error_logging = QL_DBG_DEFAULT1_MASK;
+ return (level & ql2xextended_error_logging) == level;
+}
+
+/*
+ * This function is for formatting and logging debug information.
+ * It is to be used when vha is available. It formats the message
+ * and logs it to the messages file.
+ * parameters:
+ * level: The level of the debug messages to be printed.
+ * If ql2xextended_error_logging value is correctly set,
+ * this message will appear in the messages file.
+ * vha: Pointer to the scsi_qla_host_t.
+ * id: This is a unique identifier for the level. It identifies the
+ * part of the code from where the message originated.
+ * msg: The message to be displayed.
+ */
+void
+ql_dbg(uint32_t level, scsi_qla_host_t *vha, int32_t id, const char *fmt, ...)
+{
+ va_list va;
+ struct va_format vaf;
+
+ if (!ql_mask_match(level))
+ return;
+
+ va_start(va, fmt);
+
+ vaf.fmt = fmt;
+ vaf.va = &va;
+
+ if (vha != NULL) {
+ const struct pci_dev *pdev = vha->hw->pdev;
+ /* <module-name> <pci-name> <msg-id>:<host> Message */
+ pr_warn("%s [%s]-%04x:%ld: %pV",
+ QL_MSGHDR, dev_name(&(pdev->dev)), id + ql_dbg_offset,
+ vha->host_no, &vaf);
+ } else {
+ pr_warn("%s [%s]-%04x: : %pV",
+ QL_MSGHDR, "0000:00:00.0", id + ql_dbg_offset, &vaf);
+ }
+
+ va_end(va);
+
+}
+
+/*
+ * This function is for formatting and logging debug information.
+ * It is to be used when vha is not available and pci is available,
+ * i.e., before host allocation. It formats the message and logs it
+ * to the messages file.
+ * parameters:
+ * level: The level of the debug messages to be printed.
+ * If ql2xextended_error_logging value is correctly set,
+ * this message will appear in the messages file.
+ * pdev: Pointer to the struct pci_dev.
+ * id: This is a unique id for the level. It identifies the part
+ * of the code from where the message originated.
+ * msg: The message to be displayed.
+ */
+void
+ql_dbg_pci(uint32_t level, struct pci_dev *pdev, int32_t id,
+ const char *fmt, ...)
+{
+ va_list va;
+ struct va_format vaf;
+
+ if (pdev == NULL)
+ return;
+ if (!ql_mask_match(level))
+ return;
+
+ va_start(va, fmt);
+
+ vaf.fmt = fmt;
+ vaf.va = &va;
+
+ /* <module-name> <dev-name>:<msg-id> Message */
+ pr_warn("%s [%s]-%04x: : %pV",
+ QL_MSGHDR, dev_name(&(pdev->dev)), id + ql_dbg_offset, &vaf);
+
+ va_end(va);
+}
+
+/*
+ * This function is for formatting and logging log messages.
+ * It is to be used when vha is available. It formats the message
+ * and logs it to the messages file. All the messages will be logged
+ * irrespective of value of ql2xextended_error_logging.
+ * parameters:
+ * level: The level of the log messages to be printed in the
+ * messages file.
+ * vha: Pointer to the scsi_qla_host_t
+ * id: This is a unique id for the level. It identifies the
+ * part of the code from where the message originated.
+ * msg: The message to be displayed.
+ */
+void
+ql_log(uint32_t level, scsi_qla_host_t *vha, int32_t id, const char *fmt, ...)
+{
+ va_list va;
+ struct va_format vaf;
+ char pbuf[128];
+
+ if (level > ql_errlev)
+ return;
+
+ if (vha != NULL) {
+ const struct pci_dev *pdev = vha->hw->pdev;
+ /* <module-name> <msg-id>:<host> Message */
+ snprintf(pbuf, sizeof(pbuf), "%s [%s]-%04x:%ld: ",
+ QL_MSGHDR, dev_name(&(pdev->dev)), id, vha->host_no);
+ } else {
+ snprintf(pbuf, sizeof(pbuf), "%s [%s]-%04x: : ",
+ QL_MSGHDR, "0000:00:00.0", id);
+ }
+ pbuf[sizeof(pbuf) - 1] = 0;
+
+ va_start(va, fmt);
+
+ vaf.fmt = fmt;
+ vaf.va = &va;
+
+ switch (level) {
+ case ql_log_fatal: /* FATAL LOG */
+ pr_crit("%s%pV", pbuf, &vaf);
+ break;
+ case ql_log_warn:
+ pr_err("%s%pV", pbuf, &vaf);
+ break;
+ case ql_log_info:
+ pr_warn("%s%pV", pbuf, &vaf);
+ break;
+ default:
+ pr_info("%s%pV", pbuf, &vaf);
+ break;
+ }
+
+ va_end(va);
+}
+
+/*
+ * This function is for formatting and logging log messages.
+ * It is to be used when vha is not available and pci is available,
+ * i.e., before host allocation. It formats the message and logs
+ * it to the messages file. All the messages are logged irrespective
+ * of the value of ql2xextended_error_logging.
+ * parameters:
+ * level: The level of the log messages to be printed in the
+ * messages file.
+ * pdev: Pointer to the struct pci_dev.
+ * id: This is a unique id for the level. It identifies the
+ * part of the code from where the message originated.
+ * msg: The message to be displayed.
+ */
+void
+ql_log_pci(uint32_t level, struct pci_dev *pdev, int32_t id,
+ const char *fmt, ...)
+{
+ va_list va;
+ struct va_format vaf;
+ char pbuf[128];
+
+ if (pdev == NULL)
+ return;
+ if (level > ql_errlev)
+ return;
+
+ /* <module-name> <dev-name>:<msg-id> Message */
+ snprintf(pbuf, sizeof(pbuf), "%s [%s]-%04x: : ",
+ QL_MSGHDR, dev_name(&(pdev->dev)), id);
+ pbuf[sizeof(pbuf) - 1] = 0;
+
+ va_start(va, fmt);
+
+ vaf.fmt = fmt;
+ vaf.va = &va;
+
+ switch (level) {
+ case ql_log_fatal: /* FATAL LOG */
+ pr_crit("%s%pV", pbuf, &vaf);
+ break;
+ case ql_log_warn:
+ pr_err("%s%pV", pbuf, &vaf);
+ break;
+ case ql_log_info:
+ pr_warn("%s%pV", pbuf, &vaf);
+ break;
+ default:
+ pr_info("%s%pV", pbuf, &vaf);
+ break;
+ }
+
+ va_end(va);
+}
+
+void
+ql_dump_regs(uint32_t level, scsi_qla_host_t *vha, int32_t id)
+{
+ int i;
+ struct qla_hw_data *ha = vha->hw;
+ struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
+ struct device_reg_24xx __iomem *reg24 = &ha->iobase->isp24;
+ struct device_reg_82xx __iomem *reg82 = &ha->iobase->isp82;
+ uint16_t __iomem *mbx_reg;
+
+ if (!ql_mask_match(level))
+ return;
+
+ if (IS_P3P_TYPE(ha))
+ mbx_reg = &reg82->mailbox_in[0];
+ else if (IS_FWI2_CAPABLE(ha))
+ mbx_reg = &reg24->mailbox0;
+ else
+ mbx_reg = MAILBOX_REG(ha, reg, 0);
+
+ ql_dbg(level, vha, id, "Mailbox registers:\n");
+ for (i = 0; i < 6; i++)
+ ql_dbg(level, vha, id,
+ "mbox[%d] 0x%04x\n", i, RD_REG_WORD(mbx_reg++));
+}
+
+
+void
+ql_dump_buffer(uint32_t level, scsi_qla_host_t *vha, int32_t id,
+ uint8_t *b, uint32_t size)
+{
+ uint32_t cnt;
+ uint8_t c;
+
+ if (!ql_mask_match(level))
+ return;
+
+ ql_dbg(level, vha, id, " 0 1 2 3 4 5 6 7 8 "
+ "9 Ah Bh Ch Dh Eh Fh\n");
+ ql_dbg(level, vha, id, "----------------------------------"
+ "----------------------------\n");
+
+ ql_dbg(level, vha, id, " ");
+ for (cnt = 0; cnt < size;) {
+ c = *b++;
+ printk("%02x", (uint32_t) c);
+ cnt++;
+ if (!(cnt % 16))
+ printk("\n");
+ else
+ printk(" ");
+ }
+ if (cnt % 16)
+ ql_dbg(level, vha, id, "\n");
+}
diff --git a/drivers/scsi/qla2xxx/qla_dbg.h b/drivers/scsi/qla2xxx/qla_dbg.h
new file mode 100644
index 000000000..e1fc4e669
--- /dev/null
+++ b/drivers/scsi/qla2xxx/qla_dbg.h
@@ -0,0 +1,358 @@
+/*
+ * QLogic Fibre Channel HBA Driver
+ * Copyright (c) 2003-2014 QLogic Corporation
+ *
+ * See LICENSE.qla2xxx for copyright and licensing details.
+ */
+
+#include "qla_def.h"
+
+/*
+ * Firmware Dump structure definition
+ */
+
+struct qla2300_fw_dump {
+ uint16_t hccr;
+ uint16_t pbiu_reg[8];
+ uint16_t risc_host_reg[8];
+ uint16_t mailbox_reg[32];
+ uint16_t resp_dma_reg[32];
+ uint16_t dma_reg[48];
+ uint16_t risc_hdw_reg[16];
+ uint16_t risc_gp0_reg[16];
+ uint16_t risc_gp1_reg[16];
+ uint16_t risc_gp2_reg[16];
+ uint16_t risc_gp3_reg[16];
+ uint16_t risc_gp4_reg[16];
+ uint16_t risc_gp5_reg[16];
+ uint16_t risc_gp6_reg[16];
+ uint16_t risc_gp7_reg[16];
+ uint16_t frame_buf_hdw_reg[64];
+ uint16_t fpm_b0_reg[64];
+ uint16_t fpm_b1_reg[64];
+ uint16_t risc_ram[0xf800];
+ uint16_t stack_ram[0x1000];
+ uint16_t data_ram[1];
+};
+
+struct qla2100_fw_dump {
+ uint16_t hccr;
+ uint16_t pbiu_reg[8];
+ uint16_t mailbox_reg[32];
+ uint16_t dma_reg[48];
+ uint16_t risc_hdw_reg[16];
+ uint16_t risc_gp0_reg[16];
+ uint16_t risc_gp1_reg[16];
+ uint16_t risc_gp2_reg[16];
+ uint16_t risc_gp3_reg[16];
+ uint16_t risc_gp4_reg[16];
+ uint16_t risc_gp5_reg[16];
+ uint16_t risc_gp6_reg[16];
+ uint16_t risc_gp7_reg[16];
+ uint16_t frame_buf_hdw_reg[16];
+ uint16_t fpm_b0_reg[64];
+ uint16_t fpm_b1_reg[64];
+ uint16_t risc_ram[0xf000];
+};
+
+struct qla24xx_fw_dump {
+ uint32_t host_status;
+ uint32_t host_reg[32];
+ uint32_t shadow_reg[7];
+ uint16_t mailbox_reg[32];
+ uint32_t xseq_gp_reg[128];
+ uint32_t xseq_0_reg[16];
+ uint32_t xseq_1_reg[16];
+ uint32_t rseq_gp_reg[128];
+ uint32_t rseq_0_reg[16];
+ uint32_t rseq_1_reg[16];
+ uint32_t rseq_2_reg[16];
+ uint32_t cmd_dma_reg[16];
+ uint32_t req0_dma_reg[15];
+ uint32_t resp0_dma_reg[15];
+ uint32_t req1_dma_reg[15];
+ uint32_t xmt0_dma_reg[32];
+ uint32_t xmt1_dma_reg[32];
+ uint32_t xmt2_dma_reg[32];
+ uint32_t xmt3_dma_reg[32];
+ uint32_t xmt4_dma_reg[32];
+ uint32_t xmt_data_dma_reg[16];
+ uint32_t rcvt0_data_dma_reg[32];
+ uint32_t rcvt1_data_dma_reg[32];
+ uint32_t risc_gp_reg[128];
+ uint32_t lmc_reg[112];
+ uint32_t fpm_hdw_reg[192];
+ uint32_t fb_hdw_reg[176];
+ uint32_t code_ram[0x2000];
+ uint32_t ext_mem[1];
+};
+
+struct qla25xx_fw_dump {
+ uint32_t host_status;
+ uint32_t host_risc_reg[32];
+ uint32_t pcie_regs[4];
+ uint32_t host_reg[32];
+ uint32_t shadow_reg[11];
+ uint32_t risc_io_reg;
+ uint16_t mailbox_reg[32];
+ uint32_t xseq_gp_reg[128];
+ uint32_t xseq_0_reg[48];
+ uint32_t xseq_1_reg[16];
+ uint32_t rseq_gp_reg[128];
+ uint32_t rseq_0_reg[32];
+ uint32_t rseq_1_reg[16];
+ uint32_t rseq_2_reg[16];
+ uint32_t aseq_gp_reg[128];
+ uint32_t aseq_0_reg[32];
+ uint32_t aseq_1_reg[16];
+ uint32_t aseq_2_reg[16];
+ uint32_t cmd_dma_reg[16];
+ uint32_t req0_dma_reg[15];
+ uint32_t resp0_dma_reg[15];
+ uint32_t req1_dma_reg[15];
+ uint32_t xmt0_dma_reg[32];
+ uint32_t xmt1_dma_reg[32];
+ uint32_t xmt2_dma_reg[32];
+ uint32_t xmt3_dma_reg[32];
+ uint32_t xmt4_dma_reg[32];
+ uint32_t xmt_data_dma_reg[16];
+ uint32_t rcvt0_data_dma_reg[32];
+ uint32_t rcvt1_data_dma_reg[32];
+ uint32_t risc_gp_reg[128];
+ uint32_t lmc_reg[128];
+ uint32_t fpm_hdw_reg[192];
+ uint32_t fb_hdw_reg[192];
+ uint32_t code_ram[0x2000];
+ uint32_t ext_mem[1];
+};
+
+struct qla81xx_fw_dump {
+ uint32_t host_status;
+ uint32_t host_risc_reg[32];
+ uint32_t pcie_regs[4];
+ uint32_t host_reg[32];
+ uint32_t shadow_reg[11];
+ uint32_t risc_io_reg;
+ uint16_t mailbox_reg[32];
+ uint32_t xseq_gp_reg[128];
+ uint32_t xseq_0_reg[48];
+ uint32_t xseq_1_reg[16];
+ uint32_t rseq_gp_reg[128];
+ uint32_t rseq_0_reg[32];
+ uint32_t rseq_1_reg[16];
+ uint32_t rseq_2_reg[16];
+ uint32_t aseq_gp_reg[128];
+ uint32_t aseq_0_reg[32];
+ uint32_t aseq_1_reg[16];
+ uint32_t aseq_2_reg[16];
+ uint32_t cmd_dma_reg[16];
+ uint32_t req0_dma_reg[15];
+ uint32_t resp0_dma_reg[15];
+ uint32_t req1_dma_reg[15];
+ uint32_t xmt0_dma_reg[32];
+ uint32_t xmt1_dma_reg[32];
+ uint32_t xmt2_dma_reg[32];
+ uint32_t xmt3_dma_reg[32];
+ uint32_t xmt4_dma_reg[32];
+ uint32_t xmt_data_dma_reg[16];
+ uint32_t rcvt0_data_dma_reg[32];
+ uint32_t rcvt1_data_dma_reg[32];
+ uint32_t risc_gp_reg[128];
+ uint32_t lmc_reg[128];
+ uint32_t fpm_hdw_reg[224];
+ uint32_t fb_hdw_reg[208];
+ uint32_t code_ram[0x2000];
+ uint32_t ext_mem[1];
+};
+
+struct qla83xx_fw_dump {
+ uint32_t host_status;
+ uint32_t host_risc_reg[48];
+ uint32_t pcie_regs[4];
+ uint32_t host_reg[32];
+ uint32_t shadow_reg[11];
+ uint32_t risc_io_reg;
+ uint16_t mailbox_reg[32];
+ uint32_t xseq_gp_reg[256];
+ uint32_t xseq_0_reg[48];
+ uint32_t xseq_1_reg[16];
+ uint32_t xseq_2_reg[16];
+ uint32_t rseq_gp_reg[256];
+ uint32_t rseq_0_reg[32];
+ uint32_t rseq_1_reg[16];
+ uint32_t rseq_2_reg[16];
+ uint32_t rseq_3_reg[16];
+ uint32_t aseq_gp_reg[256];
+ uint32_t aseq_0_reg[32];
+ uint32_t aseq_1_reg[16];
+ uint32_t aseq_2_reg[16];
+ uint32_t aseq_3_reg[16];
+ uint32_t cmd_dma_reg[64];
+ uint32_t req0_dma_reg[15];
+ uint32_t resp0_dma_reg[15];
+ uint32_t req1_dma_reg[15];
+ uint32_t xmt0_dma_reg[32];
+ uint32_t xmt1_dma_reg[32];
+ uint32_t xmt2_dma_reg[32];
+ uint32_t xmt3_dma_reg[32];
+ uint32_t xmt4_dma_reg[32];
+ uint32_t xmt_data_dma_reg[16];
+ uint32_t rcvt0_data_dma_reg[32];
+ uint32_t rcvt1_data_dma_reg[32];
+ uint32_t risc_gp_reg[128];
+ uint32_t lmc_reg[128];
+ uint32_t fpm_hdw_reg[256];
+ uint32_t rq0_array_reg[256];
+ uint32_t rq1_array_reg[256];
+ uint32_t rp0_array_reg[256];
+ uint32_t rp1_array_reg[256];
+ uint32_t queue_control_reg[16];
+ uint32_t fb_hdw_reg[432];
+ uint32_t at0_array_reg[128];
+ uint32_t code_ram[0x2400];
+ uint32_t ext_mem[1];
+};
+
+#define EFT_NUM_BUFFERS 4
+#define EFT_BYTES_PER_BUFFER 0x4000
+#define EFT_SIZE ((EFT_BYTES_PER_BUFFER) * (EFT_NUM_BUFFERS))
+
+#define FCE_NUM_BUFFERS 64
+#define FCE_BYTES_PER_BUFFER 0x400
+#define FCE_SIZE ((FCE_BYTES_PER_BUFFER) * (FCE_NUM_BUFFERS))
+#define fce_calc_size(b) ((FCE_BYTES_PER_BUFFER) * (b))
+
+struct qla2xxx_fce_chain {
+ uint32_t type;
+ uint32_t chain_size;
+
+ uint32_t size;
+ uint32_t addr_l;
+ uint32_t addr_h;
+ uint32_t eregs[8];
+};
+
+struct qla2xxx_mq_chain {
+ uint32_t type;
+ uint32_t chain_size;
+
+ uint32_t count;
+ uint32_t qregs[4 * QLA_MQ_SIZE];
+};
+
+struct qla2xxx_mqueue_header {
+ uint32_t queue;
+#define TYPE_REQUEST_QUEUE 0x1
+#define TYPE_RESPONSE_QUEUE 0x2
+#define TYPE_ATIO_QUEUE 0x3
+ uint32_t number;
+ uint32_t size;
+};
+
+struct qla2xxx_mqueue_chain {
+ uint32_t type;
+ uint32_t chain_size;
+};
+
+#define DUMP_CHAIN_VARIANT 0x80000000
+#define DUMP_CHAIN_FCE 0x7FFFFAF0
+#define DUMP_CHAIN_MQ 0x7FFFFAF1
+#define DUMP_CHAIN_QUEUE 0x7FFFFAF2
+#define DUMP_CHAIN_LAST 0x80000000
+
+struct qla2xxx_fw_dump {
+ uint8_t signature[4];
+ uint32_t version;
+
+ uint32_t fw_major_version;
+ uint32_t fw_minor_version;
+ uint32_t fw_subminor_version;
+ uint32_t fw_attributes;
+
+ uint32_t vendor;
+ uint32_t device;
+ uint32_t subsystem_vendor;
+ uint32_t subsystem_device;
+
+ uint32_t fixed_size;
+ uint32_t mem_size;
+ uint32_t req_q_size;
+ uint32_t rsp_q_size;
+
+ uint32_t eft_size;
+ uint32_t eft_addr_l;
+ uint32_t eft_addr_h;
+
+ uint32_t header_size;
+
+ union {
+ struct qla2100_fw_dump isp21;
+ struct qla2300_fw_dump isp23;
+ struct qla24xx_fw_dump isp24;
+ struct qla25xx_fw_dump isp25;
+ struct qla81xx_fw_dump isp81;
+ struct qla83xx_fw_dump isp83;
+ } isp;
+};
+
+#define QL_MSGHDR "qla2xxx"
+#define QL_DBG_DEFAULT1_MASK 0x1e400000
+
+#define ql_log_fatal 0 /* display fatal errors */
+#define ql_log_warn 1 /* display critical errors */
+#define ql_log_info 2 /* display all recovered errors */
+#define ql_log_all 3 /* This value is only used by ql_errlev.
+ * No messages will use this value.
+ * This should be always highest value
+ * as compared to other log levels.
+ */
+
+extern int ql_errlev;
+
+void __attribute__((format (printf, 4, 5)))
+ql_dbg(uint32_t, scsi_qla_host_t *vha, int32_t, const char *fmt, ...);
+void __attribute__((format (printf, 4, 5)))
+ql_dbg_pci(uint32_t, struct pci_dev *pdev, int32_t, const char *fmt, ...);
+
+void __attribute__((format (printf, 4, 5)))
+ql_log(uint32_t, scsi_qla_host_t *vha, int32_t, const char *fmt, ...);
+void __attribute__((format (printf, 4, 5)))
+ql_log_pci(uint32_t, struct pci_dev *pdev, int32_t, const char *fmt, ...);
+
+/* Debug Levels */
+/* The 0x40000000 is the max value any debug level can have
+ * as ql2xextended_error_logging is of type signed int
+ */
+#define ql_dbg_init 0x40000000 /* Init Debug */
+#define ql_dbg_mbx 0x20000000 /* MBX Debug */
+#define ql_dbg_disc 0x10000000 /* Device Discovery Debug */
+#define ql_dbg_io 0x08000000 /* IO Tracing Debug */
+#define ql_dbg_dpc 0x04000000 /* DPC Thead Debug */
+#define ql_dbg_async 0x02000000 /* Async events Debug */
+#define ql_dbg_timer 0x01000000 /* Timer Debug */
+#define ql_dbg_user 0x00800000 /* User Space Interations Debug */
+#define ql_dbg_taskm 0x00400000 /* Task Management Debug */
+#define ql_dbg_aer 0x00200000 /* AER/EEH Debug */
+#define ql_dbg_multiq 0x00100000 /* MultiQ Debug */
+#define ql_dbg_p3p 0x00080000 /* P3P specific Debug */
+#define ql_dbg_vport 0x00040000 /* Virtual Port Debug */
+#define ql_dbg_buffer 0x00020000 /* For dumping the buffer/regs */
+#define ql_dbg_misc 0x00010000 /* For dumping everything that is not
+ * not covered by upper categories
+ */
+#define ql_dbg_verbose 0x00008000 /* More verbosity for each level
+ * This is to be used with other levels where
+ * more verbosity is required. It might not
+ * be applicable to all the levels.
+ */
+#define ql_dbg_tgt 0x00004000 /* Target mode */
+#define ql_dbg_tgt_mgt 0x00002000 /* Target mode management */
+#define ql_dbg_tgt_tmr 0x00001000 /* Target mode task management */
+
+extern int qla27xx_dump_mpi_ram(struct qla_hw_data *, uint32_t, uint32_t *,
+ uint32_t, void **);
+extern int qla24xx_dump_ram(struct qla_hw_data *, uint32_t, uint32_t *,
+ uint32_t, void **);
+extern void qla24xx_pause_risc(struct device_reg_24xx __iomem *,
+ struct qla_hw_data *);
+extern int qla24xx_soft_reset(struct qla_hw_data *);
diff --git a/drivers/scsi/qla2xxx/qla_def.h b/drivers/scsi/qla2xxx/qla_def.h
new file mode 100644
index 000000000..e86201d3b
--- /dev/null
+++ b/drivers/scsi/qla2xxx/qla_def.h
@@ -0,0 +1,3713 @@
+/*
+ * QLogic Fibre Channel HBA Driver
+ * Copyright (c) 2003-2014 QLogic Corporation
+ *
+ * See LICENSE.qla2xxx for copyright and licensing details.
+ */
+#ifndef __QLA_DEF_H
+#define __QLA_DEF_H
+
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/types.h>
+#include <linux/module.h>
+#include <linux/list.h>
+#include <linux/pci.h>
+#include <linux/dma-mapping.h>
+#include <linux/sched.h>
+#include <linux/slab.h>
+#include <linux/dmapool.h>
+#include <linux/mempool.h>
+#include <linux/spinlock.h>
+#include <linux/completion.h>
+#include <linux/interrupt.h>
+#include <linux/workqueue.h>
+#include <linux/firmware.h>
+#include <linux/aer.h>
+#include <linux/mutex.h>
+
+#include <scsi/scsi.h>
+#include <scsi/scsi_host.h>
+#include <scsi/scsi_device.h>
+#include <scsi/scsi_cmnd.h>
+#include <scsi/scsi_transport_fc.h>
+#include <scsi/scsi_bsg_fc.h>
+
+#include "qla_bsg.h"
+#include "qla_nx.h"
+#include "qla_nx2.h"
+#define QLA2XXX_DRIVER_NAME "qla2xxx"
+#define QLA2XXX_APIDEV "ql2xapidev"
+#define QLA2XXX_MANUFACTURER "QLogic Corporation"
+
+/*
+ * We have MAILBOX_REGISTER_COUNT sized arrays in a few places,
+ * but that's fine as we don't look at the last 24 ones for
+ * ISP2100 HBAs.
+ */
+#define MAILBOX_REGISTER_COUNT_2100 8
+#define MAILBOX_REGISTER_COUNT_2200 24
+#define MAILBOX_REGISTER_COUNT 32
+
+#define QLA2200A_RISC_ROM_VER 4
+#define FPM_2300 6
+#define FPM_2310 7
+
+#include "qla_settings.h"
+
+/*
+ * Data bit definitions
+ */
+#define BIT_0 0x1
+#define BIT_1 0x2
+#define BIT_2 0x4
+#define BIT_3 0x8
+#define BIT_4 0x10
+#define BIT_5 0x20
+#define BIT_6 0x40
+#define BIT_7 0x80
+#define BIT_8 0x100
+#define BIT_9 0x200
+#define BIT_10 0x400
+#define BIT_11 0x800
+#define BIT_12 0x1000
+#define BIT_13 0x2000
+#define BIT_14 0x4000
+#define BIT_15 0x8000
+#define BIT_16 0x10000
+#define BIT_17 0x20000
+#define BIT_18 0x40000
+#define BIT_19 0x80000
+#define BIT_20 0x100000
+#define BIT_21 0x200000
+#define BIT_22 0x400000
+#define BIT_23 0x800000
+#define BIT_24 0x1000000
+#define BIT_25 0x2000000
+#define BIT_26 0x4000000
+#define BIT_27 0x8000000
+#define BIT_28 0x10000000
+#define BIT_29 0x20000000
+#define BIT_30 0x40000000
+#define BIT_31 0x80000000
+
+#define LSB(x) ((uint8_t)(x))
+#define MSB(x) ((uint8_t)((uint16_t)(x) >> 8))
+
+#define LSW(x) ((uint16_t)(x))
+#define MSW(x) ((uint16_t)((uint32_t)(x) >> 16))
+
+#define LSD(x) ((uint32_t)((uint64_t)(x)))
+#define MSD(x) ((uint32_t)((((uint64_t)(x)) >> 16) >> 16))
+
+#define MAKE_HANDLE(x, y) ((uint32_t)((((uint32_t)(x)) << 16) | (uint32_t)(y)))
+
+/*
+ * I/O register
+*/
+
+#define RD_REG_BYTE(addr) readb(addr)
+#define RD_REG_WORD(addr) readw(addr)
+#define RD_REG_DWORD(addr) readl(addr)
+#define RD_REG_BYTE_RELAXED(addr) readb_relaxed(addr)
+#define RD_REG_WORD_RELAXED(addr) readw_relaxed(addr)
+#define RD_REG_DWORD_RELAXED(addr) readl_relaxed(addr)
+#define WRT_REG_BYTE(addr, data) writeb(data,addr)
+#define WRT_REG_WORD(addr, data) writew(data,addr)
+#define WRT_REG_DWORD(addr, data) writel(data,addr)
+
+/*
+ * ISP83XX specific remote register addresses
+ */
+#define QLA83XX_LED_PORT0 0x00201320
+#define QLA83XX_LED_PORT1 0x00201328
+#define QLA83XX_IDC_DEV_STATE 0x22102384
+#define QLA83XX_IDC_MAJOR_VERSION 0x22102380
+#define QLA83XX_IDC_MINOR_VERSION 0x22102398
+#define QLA83XX_IDC_DRV_PRESENCE 0x22102388
+#define QLA83XX_IDC_DRIVER_ACK 0x2210238c
+#define QLA83XX_IDC_CONTROL 0x22102390
+#define QLA83XX_IDC_AUDIT 0x22102394
+#define QLA83XX_IDC_LOCK_RECOVERY 0x2210239c
+#define QLA83XX_DRIVER_LOCKID 0x22102104
+#define QLA83XX_DRIVER_LOCK 0x8111c028
+#define QLA83XX_DRIVER_UNLOCK 0x8111c02c
+#define QLA83XX_FLASH_LOCKID 0x22102100
+#define QLA83XX_FLASH_LOCK 0x8111c010
+#define QLA83XX_FLASH_UNLOCK 0x8111c014
+#define QLA83XX_DEV_PARTINFO1 0x221023e0
+#define QLA83XX_DEV_PARTINFO2 0x221023e4
+#define QLA83XX_FW_HEARTBEAT 0x221020b0
+#define QLA83XX_PEG_HALT_STATUS1 0x221020a8
+#define QLA83XX_PEG_HALT_STATUS2 0x221020ac
+
+/* 83XX: Macros defining 8200 AEN Reason codes */
+#define IDC_DEVICE_STATE_CHANGE BIT_0
+#define IDC_PEG_HALT_STATUS_CHANGE BIT_1
+#define IDC_NIC_FW_REPORTED_FAILURE BIT_2
+#define IDC_HEARTBEAT_FAILURE BIT_3
+
+/* 83XX: Macros defining 8200 AEN Error-levels */
+#define ERR_LEVEL_NON_FATAL 0x1
+#define ERR_LEVEL_RECOVERABLE_FATAL 0x2
+#define ERR_LEVEL_UNRECOVERABLE_FATAL 0x4
+
+/* 83XX: Macros for IDC Version */
+#define QLA83XX_SUPP_IDC_MAJOR_VERSION 0x01
+#define QLA83XX_SUPP_IDC_MINOR_VERSION 0x0
+
+/* 83XX: Macros for scheduling dpc tasks */
+#define QLA83XX_NIC_CORE_RESET 0x1
+#define QLA83XX_IDC_STATE_HANDLER 0x2
+#define QLA83XX_NIC_CORE_UNRECOVERABLE 0x3
+
+/* 83XX: Macros for defining IDC-Control bits */
+#define QLA83XX_IDC_RESET_DISABLED BIT_0
+#define QLA83XX_IDC_GRACEFUL_RESET BIT_1
+
+/* 83XX: Macros for different timeouts */
+#define QLA83XX_IDC_INITIALIZATION_TIMEOUT 30
+#define QLA83XX_IDC_RESET_ACK_TIMEOUT 10
+#define QLA83XX_MAX_LOCK_RECOVERY_WAIT (2 * HZ)
+
+/* 83XX: Macros for defining class in DEV-Partition Info register */
+#define QLA83XX_CLASS_TYPE_NONE 0x0
+#define QLA83XX_CLASS_TYPE_NIC 0x1
+#define QLA83XX_CLASS_TYPE_FCOE 0x2
+#define QLA83XX_CLASS_TYPE_ISCSI 0x3
+
+/* 83XX: Macros for IDC Lock-Recovery stages */
+#define IDC_LOCK_RECOVERY_STAGE1 0x1 /* Stage1: Intent for
+ * lock-recovery
+ */
+#define IDC_LOCK_RECOVERY_STAGE2 0x2 /* Stage2: Perform lock-recovery */
+
+/* 83XX: Macros for IDC Audit type */
+#define IDC_AUDIT_TIMESTAMP 0x0 /* IDC-AUDIT: Record timestamp of
+ * dev-state change to NEED-RESET
+ * or NEED-QUIESCENT
+ */
+#define IDC_AUDIT_COMPLETION 0x1 /* IDC-AUDIT: Record duration of
+ * reset-recovery completion is
+ * second
+ */
+/* ISP2031: Values for laser on/off */
+#define PORT_0_2031 0x00201340
+#define PORT_1_2031 0x00201350
+#define LASER_ON_2031 0x01800100
+#define LASER_OFF_2031 0x01800180
+
+/*
+ * The ISP2312 v2 chip cannot access the FLASH/GPIO registers via MMIO in an
+ * 133Mhz slot.
+ */
+#define RD_REG_WORD_PIO(addr) (inw((unsigned long)addr))
+#define WRT_REG_WORD_PIO(addr, data) (outw(data,(unsigned long)addr))
+
+/*
+ * Fibre Channel device definitions.
+ */
+#define WWN_SIZE 8 /* Size of WWPN, WWN & WWNN */
+#define MAX_FIBRE_DEVICES_2100 512
+#define MAX_FIBRE_DEVICES_2400 2048
+#define MAX_FIBRE_DEVICES_LOOP 128
+#define MAX_FIBRE_DEVICES_MAX MAX_FIBRE_DEVICES_2400
+#define LOOPID_MAP_SIZE (ha->max_fibre_devices)
+#define MAX_FIBRE_LUNS 0xFFFF
+#define MAX_HOST_COUNT 16
+
+/*
+ * Host adapter default definitions.
+ */
+#define MAX_BUSES 1 /* We only have one bus today */
+#define MIN_LUNS 8
+#define MAX_LUNS MAX_FIBRE_LUNS
+#define MAX_CMDS_PER_LUN 255
+
+/*
+ * Fibre Channel device definitions.
+ */
+#define SNS_LAST_LOOP_ID_2100 0xfe
+#define SNS_LAST_LOOP_ID_2300 0x7ff
+
+#define LAST_LOCAL_LOOP_ID 0x7d
+#define SNS_FL_PORT 0x7e
+#define FABRIC_CONTROLLER 0x7f
+#define SIMPLE_NAME_SERVER 0x80
+#define SNS_FIRST_LOOP_ID 0x81
+#define MANAGEMENT_SERVER 0xfe
+#define BROADCAST 0xff
+
+/*
+ * There is no correspondence between an N-PORT id and an AL_PA. Therefore the
+ * valid range of an N-PORT id is 0 through 0x7ef.
+ */
+#define NPH_LAST_HANDLE 0x7ef
+#define NPH_MGMT_SERVER 0x7fa /* FFFFFA */
+#define NPH_SNS 0x7fc /* FFFFFC */
+#define NPH_FABRIC_CONTROLLER 0x7fd /* FFFFFD */
+#define NPH_F_PORT 0x7fe /* FFFFFE */
+#define NPH_IP_BROADCAST 0x7ff /* FFFFFF */
+
+#define MAX_CMDSZ 16 /* SCSI maximum CDB size. */
+#include "qla_fw.h"
+/*
+ * Timeout timer counts in seconds
+ */
+#define PORT_RETRY_TIME 1
+#define LOOP_DOWN_TIMEOUT 60
+#define LOOP_DOWN_TIME 255 /* 240 */
+#define LOOP_DOWN_RESET (LOOP_DOWN_TIME - 30)
+
+#define DEFAULT_OUTSTANDING_COMMANDS 1024
+#define MIN_OUTSTANDING_COMMANDS 128
+
+/* ISP request and response entry counts (37-65535) */
+#define REQUEST_ENTRY_CNT_2100 128 /* Number of request entries. */
+#define REQUEST_ENTRY_CNT_2200 2048 /* Number of request entries. */
+#define REQUEST_ENTRY_CNT_24XX 2048 /* Number of request entries. */
+#define REQUEST_ENTRY_CNT_83XX 8192 /* Number of request entries. */
+#define RESPONSE_ENTRY_CNT_2100 64 /* Number of response entries.*/
+#define RESPONSE_ENTRY_CNT_2300 512 /* Number of response entries.*/
+#define RESPONSE_ENTRY_CNT_MQ 128 /* Number of response entries.*/
+#define ATIO_ENTRY_CNT_24XX 4096 /* Number of ATIO entries. */
+#define RESPONSE_ENTRY_CNT_FX00 256 /* Number of response entries.*/
+
+struct req_que;
+
+/*
+ * (sd.h is not exported, hence local inclusion)
+ * Data Integrity Field tuple.
+ */
+struct sd_dif_tuple {
+ __be16 guard_tag; /* Checksum */
+ __be16 app_tag; /* Opaque storage */
+ __be32 ref_tag; /* Target LBA or indirect LBA */
+};
+
+/*
+ * SCSI Request Block
+ */
+struct srb_cmd {
+ struct scsi_cmnd *cmd; /* Linux SCSI command pkt */
+ uint32_t request_sense_length;
+ uint32_t fw_sense_length;
+ uint8_t *request_sense_ptr;
+ void *ctx;
+};
+
+/*
+ * SRB flag definitions
+ */
+#define SRB_DMA_VALID BIT_0 /* Command sent to ISP */
+#define SRB_FCP_CMND_DMA_VALID BIT_12 /* DIF: DSD List valid */
+#define SRB_CRC_CTX_DMA_VALID BIT_2 /* DIF: context DMA valid */
+#define SRB_CRC_PROT_DMA_VALID BIT_4 /* DIF: prot DMA valid */
+#define SRB_CRC_CTX_DSD_VALID BIT_5 /* DIF: dsd_list valid */
+
+/* To identify if a srb is of T10-CRC type. @sp => srb_t pointer */
+#define IS_PROT_IO(sp) (sp->flags & SRB_CRC_CTX_DSD_VALID)
+
+/*
+ * SRB extensions.
+ */
+struct srb_iocb {
+ union {
+ struct {
+ uint16_t flags;
+#define SRB_LOGIN_RETRIED BIT_0
+#define SRB_LOGIN_COND_PLOGI BIT_1
+#define SRB_LOGIN_SKIP_PRLI BIT_2
+ uint16_t data[2];
+ } logio;
+ struct {
+ /*
+ * Values for flags field below are as
+ * defined in tsk_mgmt_entry struct
+ * for control_flags field in qla_fw.h.
+ */
+ uint64_t lun;
+ uint32_t flags;
+ uint32_t data;
+ struct completion comp;
+ __le16 comp_status;
+ } tmf;
+ struct {
+#define SRB_FXDISC_REQ_DMA_VALID BIT_0
+#define SRB_FXDISC_RESP_DMA_VALID BIT_1
+#define SRB_FXDISC_REQ_DWRD_VALID BIT_2
+#define SRB_FXDISC_RSP_DWRD_VALID BIT_3
+#define FXDISC_TIMEOUT 20
+ uint8_t flags;
+ uint32_t req_len;
+ uint32_t rsp_len;
+ void *req_addr;
+ void *rsp_addr;
+ dma_addr_t req_dma_handle;
+ dma_addr_t rsp_dma_handle;
+ __le32 adapter_id;
+ __le32 adapter_id_hi;
+ __le16 req_func_type;
+ __le32 req_data;
+ __le32 req_data_extra;
+ __le32 result;
+ __le32 seq_number;
+ __le16 fw_flags;
+ struct completion fxiocb_comp;
+ __le32 reserved_0;
+ uint8_t reserved_1;
+ } fxiocb;
+ struct {
+ uint32_t cmd_hndl;
+ __le16 comp_status;
+ struct completion comp;
+ } abt;
+ } u;
+
+ struct timer_list timer;
+ void (*timeout)(void *);
+};
+
+/* Values for srb_ctx type */
+#define SRB_LOGIN_CMD 1
+#define SRB_LOGOUT_CMD 2
+#define SRB_ELS_CMD_RPT 3
+#define SRB_ELS_CMD_HST 4
+#define SRB_CT_CMD 5
+#define SRB_ADISC_CMD 6
+#define SRB_TM_CMD 7
+#define SRB_SCSI_CMD 8
+#define SRB_BIDI_CMD 9
+#define SRB_FXIOCB_DCMD 10
+#define SRB_FXIOCB_BCMD 11
+#define SRB_ABT_CMD 12
+
+
+typedef struct srb {
+ atomic_t ref_count;
+ struct fc_port *fcport;
+ uint32_t handle;
+ uint16_t flags;
+ uint16_t type;
+ char *name;
+ int iocbs;
+ union {
+ struct srb_iocb iocb_cmd;
+ struct fc_bsg_job *bsg_job;
+ struct srb_cmd scmd;
+ } u;
+ void (*done)(void *, void *, int);
+ void (*free)(void *, void *);
+} srb_t;
+
+#define GET_CMD_SP(sp) (sp->u.scmd.cmd)
+#define SET_CMD_SP(sp, cmd) (sp->u.scmd.cmd = cmd)
+#define GET_CMD_CTX_SP(sp) (sp->u.scmd.ctx)
+
+#define GET_CMD_SENSE_LEN(sp) \
+ (sp->u.scmd.request_sense_length)
+#define SET_CMD_SENSE_LEN(sp, len) \
+ (sp->u.scmd.request_sense_length = len)
+#define GET_CMD_SENSE_PTR(sp) \
+ (sp->u.scmd.request_sense_ptr)
+#define SET_CMD_SENSE_PTR(sp, ptr) \
+ (sp->u.scmd.request_sense_ptr = ptr)
+#define GET_FW_SENSE_LEN(sp) \
+ (sp->u.scmd.fw_sense_length)
+#define SET_FW_SENSE_LEN(sp, len) \
+ (sp->u.scmd.fw_sense_length = len)
+
+struct msg_echo_lb {
+ dma_addr_t send_dma;
+ dma_addr_t rcv_dma;
+ uint16_t req_sg_cnt;
+ uint16_t rsp_sg_cnt;
+ uint16_t options;
+ uint32_t transfer_size;
+ uint32_t iteration_count;
+};
+
+/*
+ * ISP I/O Register Set structure definitions.
+ */
+struct device_reg_2xxx {
+ uint16_t flash_address; /* Flash BIOS address */
+ uint16_t flash_data; /* Flash BIOS data */
+ uint16_t unused_1[1]; /* Gap */
+ uint16_t ctrl_status; /* Control/Status */
+#define CSR_FLASH_64K_BANK BIT_3 /* Flash upper 64K bank select */
+#define CSR_FLASH_ENABLE BIT_1 /* Flash BIOS Read/Write enable */
+#define CSR_ISP_SOFT_RESET BIT_0 /* ISP soft reset */
+
+ uint16_t ictrl; /* Interrupt control */
+#define ICR_EN_INT BIT_15 /* ISP enable interrupts. */
+#define ICR_EN_RISC BIT_3 /* ISP enable RISC interrupts. */
+
+ uint16_t istatus; /* Interrupt status */
+#define ISR_RISC_INT BIT_3 /* RISC interrupt */
+
+ uint16_t semaphore; /* Semaphore */
+ uint16_t nvram; /* NVRAM register. */
+#define NVR_DESELECT 0
+#define NVR_BUSY BIT_15
+#define NVR_WRT_ENABLE BIT_14 /* Write enable */
+#define NVR_PR_ENABLE BIT_13 /* Protection register enable */
+#define NVR_DATA_IN BIT_3
+#define NVR_DATA_OUT BIT_2
+#define NVR_SELECT BIT_1
+#define NVR_CLOCK BIT_0
+
+#define NVR_WAIT_CNT 20000
+
+ union {
+ struct {
+ uint16_t mailbox0;
+ uint16_t mailbox1;
+ uint16_t mailbox2;
+ uint16_t mailbox3;
+ uint16_t mailbox4;
+ uint16_t mailbox5;
+ uint16_t mailbox6;
+ uint16_t mailbox7;
+ uint16_t unused_2[59]; /* Gap */
+ } __attribute__((packed)) isp2100;
+ struct {
+ /* Request Queue */
+ uint16_t req_q_in; /* In-Pointer */
+ uint16_t req_q_out; /* Out-Pointer */
+ /* Response Queue */
+ uint16_t rsp_q_in; /* In-Pointer */
+ uint16_t rsp_q_out; /* Out-Pointer */
+
+ /* RISC to Host Status */
+ uint32_t host_status;
+#define HSR_RISC_INT BIT_15 /* RISC interrupt */
+#define HSR_RISC_PAUSED BIT_8 /* RISC Paused */
+
+ /* Host to Host Semaphore */
+ uint16_t host_semaphore;
+ uint16_t unused_3[17]; /* Gap */
+ uint16_t mailbox0;
+ uint16_t mailbox1;
+ uint16_t mailbox2;
+ uint16_t mailbox3;
+ uint16_t mailbox4;
+ uint16_t mailbox5;
+ uint16_t mailbox6;
+ uint16_t mailbox7;
+ uint16_t mailbox8;
+ uint16_t mailbox9;
+ uint16_t mailbox10;
+ uint16_t mailbox11;
+ uint16_t mailbox12;
+ uint16_t mailbox13;
+ uint16_t mailbox14;
+ uint16_t mailbox15;
+ uint16_t mailbox16;
+ uint16_t mailbox17;
+ uint16_t mailbox18;
+ uint16_t mailbox19;
+ uint16_t mailbox20;
+ uint16_t mailbox21;
+ uint16_t mailbox22;
+ uint16_t mailbox23;
+ uint16_t mailbox24;
+ uint16_t mailbox25;
+ uint16_t mailbox26;
+ uint16_t mailbox27;
+ uint16_t mailbox28;
+ uint16_t mailbox29;
+ uint16_t mailbox30;
+ uint16_t mailbox31;
+ uint16_t fb_cmd;
+ uint16_t unused_4[10]; /* Gap */
+ } __attribute__((packed)) isp2300;
+ } u;
+
+ uint16_t fpm_diag_config;
+ uint16_t unused_5[0x4]; /* Gap */
+ uint16_t risc_hw;
+ uint16_t unused_5_1; /* Gap */
+ uint16_t pcr; /* Processor Control Register. */
+ uint16_t unused_6[0x5]; /* Gap */
+ uint16_t mctr; /* Memory Configuration and Timing. */
+ uint16_t unused_7[0x3]; /* Gap */
+ uint16_t fb_cmd_2100; /* Unused on 23XX */
+ uint16_t unused_8[0x3]; /* Gap */
+ uint16_t hccr; /* Host command & control register. */
+#define HCCR_HOST_INT BIT_7 /* Host interrupt bit */
+#define HCCR_RISC_PAUSE BIT_5 /* Pause mode bit */
+ /* HCCR commands */
+#define HCCR_RESET_RISC 0x1000 /* Reset RISC */
+#define HCCR_PAUSE_RISC 0x2000 /* Pause RISC */
+#define HCCR_RELEASE_RISC 0x3000 /* Release RISC from reset. */
+#define HCCR_SET_HOST_INT 0x5000 /* Set host interrupt */
+#define HCCR_CLR_HOST_INT 0x6000 /* Clear HOST interrupt */
+#define HCCR_CLR_RISC_INT 0x7000 /* Clear RISC interrupt */
+#define HCCR_DISABLE_PARITY_PAUSE 0x4001 /* Disable parity error RISC pause. */
+#define HCCR_ENABLE_PARITY 0xA000 /* Enable PARITY interrupt */
+
+ uint16_t unused_9[5]; /* Gap */
+ uint16_t gpiod; /* GPIO Data register. */
+ uint16_t gpioe; /* GPIO Enable register. */
+#define GPIO_LED_MASK 0x00C0
+#define GPIO_LED_GREEN_OFF_AMBER_OFF 0x0000
+#define GPIO_LED_GREEN_ON_AMBER_OFF 0x0040
+#define GPIO_LED_GREEN_OFF_AMBER_ON 0x0080
+#define GPIO_LED_GREEN_ON_AMBER_ON 0x00C0
+#define GPIO_LED_ALL_OFF 0x0000
+#define GPIO_LED_RED_ON_OTHER_OFF 0x0001 /* isp2322 */
+#define GPIO_LED_RGA_ON 0x00C1 /* isp2322: red green amber */
+
+ union {
+ struct {
+ uint16_t unused_10[8]; /* Gap */
+ uint16_t mailbox8;
+ uint16_t mailbox9;
+ uint16_t mailbox10;
+ uint16_t mailbox11;
+ uint16_t mailbox12;
+ uint16_t mailbox13;
+ uint16_t mailbox14;
+ uint16_t mailbox15;
+ uint16_t mailbox16;
+ uint16_t mailbox17;
+ uint16_t mailbox18;
+ uint16_t mailbox19;
+ uint16_t mailbox20;
+ uint16_t mailbox21;
+ uint16_t mailbox22;
+ uint16_t mailbox23; /* Also probe reg. */
+ } __attribute__((packed)) isp2200;
+ } u_end;
+};
+
+struct device_reg_25xxmq {
+ uint32_t req_q_in;
+ uint32_t req_q_out;
+ uint32_t rsp_q_in;
+ uint32_t rsp_q_out;
+ uint32_t atio_q_in;
+ uint32_t atio_q_out;
+};
+
+
+struct device_reg_fx00 {
+ uint32_t mailbox0; /* 00 */
+ uint32_t mailbox1; /* 04 */
+ uint32_t mailbox2; /* 08 */
+ uint32_t mailbox3; /* 0C */
+ uint32_t mailbox4; /* 10 */
+ uint32_t mailbox5; /* 14 */
+ uint32_t mailbox6; /* 18 */
+ uint32_t mailbox7; /* 1C */
+ uint32_t mailbox8; /* 20 */
+ uint32_t mailbox9; /* 24 */
+ uint32_t mailbox10; /* 28 */
+ uint32_t mailbox11;
+ uint32_t mailbox12;
+ uint32_t mailbox13;
+ uint32_t mailbox14;
+ uint32_t mailbox15;
+ uint32_t mailbox16;
+ uint32_t mailbox17;
+ uint32_t mailbox18;
+ uint32_t mailbox19;
+ uint32_t mailbox20;
+ uint32_t mailbox21;
+ uint32_t mailbox22;
+ uint32_t mailbox23;
+ uint32_t mailbox24;
+ uint32_t mailbox25;
+ uint32_t mailbox26;
+ uint32_t mailbox27;
+ uint32_t mailbox28;
+ uint32_t mailbox29;
+ uint32_t mailbox30;
+ uint32_t mailbox31;
+ uint32_t aenmailbox0;
+ uint32_t aenmailbox1;
+ uint32_t aenmailbox2;
+ uint32_t aenmailbox3;
+ uint32_t aenmailbox4;
+ uint32_t aenmailbox5;
+ uint32_t aenmailbox6;
+ uint32_t aenmailbox7;
+ /* Request Queue. */
+ uint32_t req_q_in; /* A0 - Request Queue In-Pointer */
+ uint32_t req_q_out; /* A4 - Request Queue Out-Pointer */
+ /* Response Queue. */
+ uint32_t rsp_q_in; /* A8 - Response Queue In-Pointer */
+ uint32_t rsp_q_out; /* AC - Response Queue Out-Pointer */
+ /* Init values shadowed on FW Up Event */
+ uint32_t initval0; /* B0 */
+ uint32_t initval1; /* B4 */
+ uint32_t initval2; /* B8 */
+ uint32_t initval3; /* BC */
+ uint32_t initval4; /* C0 */
+ uint32_t initval5; /* C4 */
+ uint32_t initval6; /* C8 */
+ uint32_t initval7; /* CC */
+ uint32_t fwheartbeat; /* D0 */
+ uint32_t pseudoaen; /* D4 */
+};
+
+
+
+typedef union {
+ struct device_reg_2xxx isp;
+ struct device_reg_24xx isp24;
+ struct device_reg_25xxmq isp25mq;
+ struct device_reg_82xx isp82;
+ struct device_reg_fx00 ispfx00;
+} __iomem device_reg_t;
+
+#define ISP_REQ_Q_IN(ha, reg) \
+ (IS_QLA2100(ha) || IS_QLA2200(ha) ? \
+ &(reg)->u.isp2100.mailbox4 : \
+ &(reg)->u.isp2300.req_q_in)
+#define ISP_REQ_Q_OUT(ha, reg) \
+ (IS_QLA2100(ha) || IS_QLA2200(ha) ? \
+ &(reg)->u.isp2100.mailbox4 : \
+ &(reg)->u.isp2300.req_q_out)
+#define ISP_RSP_Q_IN(ha, reg) \
+ (IS_QLA2100(ha) || IS_QLA2200(ha) ? \
+ &(reg)->u.isp2100.mailbox5 : \
+ &(reg)->u.isp2300.rsp_q_in)
+#define ISP_RSP_Q_OUT(ha, reg) \
+ (IS_QLA2100(ha) || IS_QLA2200(ha) ? \
+ &(reg)->u.isp2100.mailbox5 : \
+ &(reg)->u.isp2300.rsp_q_out)
+
+#define ISP_ATIO_Q_IN(vha) (vha->hw->tgt.atio_q_in)
+#define ISP_ATIO_Q_OUT(vha) (vha->hw->tgt.atio_q_out)
+
+#define MAILBOX_REG(ha, reg, num) \
+ (IS_QLA2100(ha) || IS_QLA2200(ha) ? \
+ (num < 8 ? \
+ &(reg)->u.isp2100.mailbox0 + (num) : \
+ &(reg)->u_end.isp2200.mailbox8 + (num) - 8) : \
+ &(reg)->u.isp2300.mailbox0 + (num))
+#define RD_MAILBOX_REG(ha, reg, num) \
+ RD_REG_WORD(MAILBOX_REG(ha, reg, num))
+#define WRT_MAILBOX_REG(ha, reg, num, data) \
+ WRT_REG_WORD(MAILBOX_REG(ha, reg, num), data)
+
+#define FB_CMD_REG(ha, reg) \
+ (IS_QLA2100(ha) || IS_QLA2200(ha) ? \
+ &(reg)->fb_cmd_2100 : \
+ &(reg)->u.isp2300.fb_cmd)
+#define RD_FB_CMD_REG(ha, reg) \
+ RD_REG_WORD(FB_CMD_REG(ha, reg))
+#define WRT_FB_CMD_REG(ha, reg, data) \
+ WRT_REG_WORD(FB_CMD_REG(ha, reg), data)
+
+typedef struct {
+ uint32_t out_mb; /* outbound from driver */
+ uint32_t in_mb; /* Incoming from RISC */
+ uint16_t mb[MAILBOX_REGISTER_COUNT];
+ long buf_size;
+ void *bufp;
+ uint32_t tov;
+ uint8_t flags;
+#define MBX_DMA_IN BIT_0
+#define MBX_DMA_OUT BIT_1
+#define IOCTL_CMD BIT_2
+} mbx_cmd_t;
+
+struct mbx_cmd_32 {
+ uint32_t out_mb; /* outbound from driver */
+ uint32_t in_mb; /* Incoming from RISC */
+ uint32_t mb[MAILBOX_REGISTER_COUNT];
+ long buf_size;
+ void *bufp;
+ uint32_t tov;
+ uint8_t flags;
+#define MBX_DMA_IN BIT_0
+#define MBX_DMA_OUT BIT_1
+#define IOCTL_CMD BIT_2
+};
+
+
+#define MBX_TOV_SECONDS 30
+
+/*
+ * ISP product identification definitions in mailboxes after reset.
+ */
+#define PROD_ID_1 0x4953
+#define PROD_ID_2 0x0000
+#define PROD_ID_2a 0x5020
+#define PROD_ID_3 0x2020
+
+/*
+ * ISP mailbox Self-Test status codes
+ */
+#define MBS_FRM_ALIVE 0 /* Firmware Alive. */
+#define MBS_CHKSUM_ERR 1 /* Checksum Error. */
+#define MBS_BUSY 4 /* Busy. */
+
+/*
+ * ISP mailbox command complete status codes
+ */
+#define MBS_COMMAND_COMPLETE 0x4000
+#define MBS_INVALID_COMMAND 0x4001
+#define MBS_HOST_INTERFACE_ERROR 0x4002
+#define MBS_TEST_FAILED 0x4003
+#define MBS_COMMAND_ERROR 0x4005
+#define MBS_COMMAND_PARAMETER_ERROR 0x4006
+#define MBS_PORT_ID_USED 0x4007
+#define MBS_LOOP_ID_USED 0x4008
+#define MBS_ALL_IDS_IN_USE 0x4009
+#define MBS_NOT_LOGGED_IN 0x400A
+#define MBS_LINK_DOWN_ERROR 0x400B
+#define MBS_DIAG_ECHO_TEST_ERROR 0x400C
+
+/*
+ * ISP mailbox asynchronous event status codes
+ */
+#define MBA_ASYNC_EVENT 0x8000 /* Asynchronous event. */
+#define MBA_RESET 0x8001 /* Reset Detected. */
+#define MBA_SYSTEM_ERR 0x8002 /* System Error. */
+#define MBA_REQ_TRANSFER_ERR 0x8003 /* Request Transfer Error. */
+#define MBA_RSP_TRANSFER_ERR 0x8004 /* Response Transfer Error. */
+#define MBA_WAKEUP_THRES 0x8005 /* Request Queue Wake-up. */
+#define MBA_LIP_OCCURRED 0x8010 /* Loop Initialization Procedure */
+ /* occurred. */
+#define MBA_LOOP_UP 0x8011 /* FC Loop UP. */
+#define MBA_LOOP_DOWN 0x8012 /* FC Loop Down. */
+#define MBA_LIP_RESET 0x8013 /* LIP reset occurred. */
+#define MBA_PORT_UPDATE 0x8014 /* Port Database update. */
+#define MBA_RSCN_UPDATE 0x8015 /* Register State Chg Notification. */
+#define MBA_LIP_F8 0x8016 /* Received a LIP F8. */
+#define MBA_LOOP_INIT_ERR 0x8017 /* Loop Initialization Error. */
+#define MBA_FABRIC_AUTH_REQ 0x801b /* Fabric Authentication Required. */
+#define MBA_SCSI_COMPLETION 0x8020 /* SCSI Command Complete. */
+#define MBA_CTIO_COMPLETION 0x8021 /* CTIO Complete. */
+#define MBA_IP_COMPLETION 0x8022 /* IP Transmit Command Complete. */
+#define MBA_IP_RECEIVE 0x8023 /* IP Received. */
+#define MBA_IP_BROADCAST 0x8024 /* IP Broadcast Received. */
+#define MBA_IP_LOW_WATER_MARK 0x8025 /* IP Low Water Mark reached. */
+#define MBA_IP_RCV_BUFFER_EMPTY 0x8026 /* IP receive buffer queue empty. */
+#define MBA_IP_HDR_DATA_SPLIT 0x8027 /* IP header/data splitting feature */
+ /* used. */
+#define MBA_TRACE_NOTIFICATION 0x8028 /* Trace/Diagnostic notification. */
+#define MBA_POINT_TO_POINT 0x8030 /* Point to point mode. */
+#define MBA_CMPLT_1_16BIT 0x8031 /* Completion 1 16bit IOSB. */
+#define MBA_CMPLT_2_16BIT 0x8032 /* Completion 2 16bit IOSB. */
+#define MBA_CMPLT_3_16BIT 0x8033 /* Completion 3 16bit IOSB. */
+#define MBA_CMPLT_4_16BIT 0x8034 /* Completion 4 16bit IOSB. */
+#define MBA_CMPLT_5_16BIT 0x8035 /* Completion 5 16bit IOSB. */
+#define MBA_CHG_IN_CONNECTION 0x8036 /* Change in connection mode. */
+#define MBA_RIO_RESPONSE 0x8040 /* RIO response queue update. */
+#define MBA_ZIO_RESPONSE 0x8040 /* ZIO response queue update. */
+#define MBA_CMPLT_2_32BIT 0x8042 /* Completion 2 32bit IOSB. */
+#define MBA_BYPASS_NOTIFICATION 0x8043 /* Auto bypass notification. */
+#define MBA_DISCARD_RND_FRAME 0x8048 /* discard RND frame due to error. */
+#define MBA_REJECTED_FCP_CMD 0x8049 /* rejected FCP_CMD. */
+#define MBA_FW_NOT_STARTED 0x8050 /* Firmware not started */
+#define MBA_FW_STARTING 0x8051 /* Firmware starting */
+#define MBA_FW_RESTART_CMPLT 0x8060 /* Firmware restart complete */
+#define MBA_INIT_REQUIRED 0x8061 /* Initialization required */
+#define MBA_SHUTDOWN_REQUESTED 0x8062 /* Shutdown Requested */
+#define MBA_DPORT_DIAGNOSTICS 0x8080 /* D-port Diagnostics */
+#define MBA_FW_INIT_FAILURE 0x8401 /* Firmware initialization failure */
+#define MBA_MIRROR_LUN_CHANGE 0x8402 /* Mirror LUN State Change
+ Notification */
+#define MBA_FW_POLL_STATE 0x8600 /* Firmware in poll diagnostic state */
+#define MBA_FW_RESET_FCT 0x8502 /* Firmware reset factory defaults */
+#define MBA_FW_INIT_INPROGRESS 0x8500 /* Firmware boot in progress */
+/* 83XX FCoE specific */
+#define MBA_IDC_AEN 0x8200 /* FCoE: NIC Core state change AEN */
+
+/* Interrupt type codes */
+#define INTR_ROM_MB_SUCCESS 0x1
+#define INTR_ROM_MB_FAILED 0x2
+#define INTR_MB_SUCCESS 0x10
+#define INTR_MB_FAILED 0x11
+#define INTR_ASYNC_EVENT 0x12
+#define INTR_RSP_QUE_UPDATE 0x13
+#define INTR_RSP_QUE_UPDATE_83XX 0x14
+#define INTR_ATIO_QUE_UPDATE 0x1C
+#define INTR_ATIO_RSP_QUE_UPDATE 0x1D
+
+/* ISP mailbox loopback echo diagnostic error code */
+#define MBS_LB_RESET 0x17
+/*
+ * Firmware options 1, 2, 3.
+ */
+#define FO1_AE_ON_LIPF8 BIT_0
+#define FO1_AE_ALL_LIP_RESET BIT_1
+#define FO1_CTIO_RETRY BIT_3
+#define FO1_DISABLE_LIP_F7_SW BIT_4
+#define FO1_DISABLE_100MS_LOS_WAIT BIT_5
+#define FO1_DISABLE_GPIO6_7 BIT_6 /* LED bits */
+#define FO1_AE_ON_LOOP_INIT_ERR BIT_7
+#define FO1_SET_EMPHASIS_SWING BIT_8
+#define FO1_AE_AUTO_BYPASS BIT_9
+#define FO1_ENABLE_PURE_IOCB BIT_10
+#define FO1_AE_PLOGI_RJT BIT_11
+#define FO1_ENABLE_ABORT_SEQUENCE BIT_12
+#define FO1_AE_QUEUE_FULL BIT_13
+
+#define FO2_ENABLE_ATIO_TYPE_3 BIT_0
+#define FO2_REV_LOOPBACK BIT_1
+
+#define FO3_ENABLE_EMERG_IOCB BIT_0
+#define FO3_AE_RND_ERROR BIT_1
+
+/* 24XX additional firmware options */
+#define ADD_FO_COUNT 3
+#define ADD_FO1_DISABLE_GPIO_LED_CTRL BIT_6 /* LED bits */
+#define ADD_FO1_ENABLE_PUREX_IOCB BIT_10
+
+#define ADD_FO2_ENABLE_SEL_CLS2 BIT_5
+
+#define ADD_FO3_NO_ABT_ON_LINK_DOWN BIT_14
+
+/*
+ * ISP mailbox commands
+ */
+#define MBC_LOAD_RAM 1 /* Load RAM. */
+#define MBC_EXECUTE_FIRMWARE 2 /* Execute firmware. */
+#define MBC_READ_RAM_WORD 5 /* Read RAM word. */
+#define MBC_MAILBOX_REGISTER_TEST 6 /* Wrap incoming mailboxes */
+#define MBC_VERIFY_CHECKSUM 7 /* Verify checksum. */
+#define MBC_GET_FIRMWARE_VERSION 8 /* Get firmware revision. */
+#define MBC_LOAD_RISC_RAM 9 /* Load RAM command. */
+#define MBC_DUMP_RISC_RAM 0xa /* Dump RAM command. */
+#define MBC_LOAD_RISC_RAM_EXTENDED 0xb /* Load RAM extended. */
+#define MBC_DUMP_RISC_RAM_EXTENDED 0xc /* Dump RAM extended. */
+#define MBC_WRITE_RAM_WORD_EXTENDED 0xd /* Write RAM word extended */
+#define MBC_READ_RAM_EXTENDED 0xf /* Read RAM extended. */
+#define MBC_IOCB_COMMAND 0x12 /* Execute IOCB command. */
+#define MBC_STOP_FIRMWARE 0x14 /* Stop firmware. */
+#define MBC_ABORT_COMMAND 0x15 /* Abort IOCB command. */
+#define MBC_ABORT_DEVICE 0x16 /* Abort device (ID/LUN). */
+#define MBC_ABORT_TARGET 0x17 /* Abort target (ID). */
+#define MBC_RESET 0x18 /* Reset. */
+#define MBC_GET_ADAPTER_LOOP_ID 0x20 /* Get loop id of ISP2200. */
+#define MBC_GET_RETRY_COUNT 0x22 /* Get f/w retry cnt/delay. */
+#define MBC_DISABLE_VI 0x24 /* Disable VI operation. */
+#define MBC_ENABLE_VI 0x25 /* Enable VI operation. */
+#define MBC_GET_FIRMWARE_OPTION 0x28 /* Get Firmware Options. */
+#define MBC_SET_FIRMWARE_OPTION 0x38 /* Set Firmware Options. */
+#define MBC_LOOP_PORT_BYPASS 0x40 /* Loop Port Bypass. */
+#define MBC_LOOP_PORT_ENABLE 0x41 /* Loop Port Enable. */
+#define MBC_GET_RESOURCE_COUNTS 0x42 /* Get Resource Counts. */
+#define MBC_NON_PARTICIPATE 0x43 /* Non-Participating Mode. */
+#define MBC_DIAGNOSTIC_ECHO 0x44 /* Diagnostic echo. */
+#define MBC_DIAGNOSTIC_LOOP_BACK 0x45 /* Diagnostic loop back. */
+#define MBC_ONLINE_SELF_TEST 0x46 /* Online self-test. */
+#define MBC_ENHANCED_GET_PORT_DATABASE 0x47 /* Get port database + login */
+#define MBC_CONFIGURE_VF 0x4b /* Configure VFs */
+#define MBC_RESET_LINK_STATUS 0x52 /* Reset Link Error Status */
+#define MBC_IOCB_COMMAND_A64 0x54 /* Execute IOCB command (64) */
+#define MBC_PORT_LOGOUT 0x56 /* Port Logout request */
+#define MBC_SEND_RNID_ELS 0x57 /* Send RNID ELS request */
+#define MBC_SET_RNID_PARAMS 0x59 /* Set RNID parameters */
+#define MBC_GET_RNID_PARAMS 0x5a /* Get RNID parameters */
+#define MBC_DATA_RATE 0x5d /* Data Rate */
+#define MBC_INITIALIZE_FIRMWARE 0x60 /* Initialize firmware */
+#define MBC_INITIATE_LIP 0x62 /* Initiate Loop */
+ /* Initialization Procedure */
+#define MBC_GET_FC_AL_POSITION_MAP 0x63 /* Get FC_AL Position Map. */
+#define MBC_GET_PORT_DATABASE 0x64 /* Get Port Database. */
+#define MBC_CLEAR_ACA 0x65 /* Clear ACA. */
+#define MBC_TARGET_RESET 0x66 /* Target Reset. */
+#define MBC_CLEAR_TASK_SET 0x67 /* Clear Task Set. */
+#define MBC_ABORT_TASK_SET 0x68 /* Abort Task Set. */
+#define MBC_GET_FIRMWARE_STATE 0x69 /* Get firmware state. */
+#define MBC_GET_PORT_NAME 0x6a /* Get port name. */
+#define MBC_GET_LINK_STATUS 0x6b /* Get port link status. */
+#define MBC_LIP_RESET 0x6c /* LIP reset. */
+#define MBC_SEND_SNS_COMMAND 0x6e /* Send Simple Name Server */
+ /* commandd. */
+#define MBC_LOGIN_FABRIC_PORT 0x6f /* Login fabric port. */
+#define MBC_SEND_CHANGE_REQUEST 0x70 /* Send Change Request. */
+#define MBC_LOGOUT_FABRIC_PORT 0x71 /* Logout fabric port. */
+#define MBC_LIP_FULL_LOGIN 0x72 /* Full login LIP. */
+#define MBC_LOGIN_LOOP_PORT 0x74 /* Login Loop Port. */
+#define MBC_PORT_NODE_NAME_LIST 0x75 /* Get port/node name list. */
+#define MBC_INITIALIZE_RECEIVE_QUEUE 0x77 /* Initialize receive queue */
+#define MBC_UNLOAD_IP 0x79 /* Shutdown IP */
+#define MBC_GET_ID_LIST 0x7C /* Get Port ID list. */
+#define MBC_SEND_LFA_COMMAND 0x7D /* Send Loop Fabric Address */
+#define MBC_LUN_RESET 0x7E /* Send LUN reset */
+
+/*
+ * all the Mt. Rainier mailbox command codes that clash with FC/FCoE ones
+ * should be defined with MBC_MR_*
+ */
+#define MBC_MR_DRV_SHUTDOWN 0x6A
+
+/*
+ * ISP24xx mailbox commands
+ */
+#define MBC_WRITE_SERDES 0x3 /* Write serdes word. */
+#define MBC_READ_SERDES 0x4 /* Read serdes word. */
+#define MBC_LOAD_DUMP_MPI_RAM 0x5 /* Load/Dump MPI RAM. */
+#define MBC_SERDES_PARAMS 0x10 /* Serdes Tx Parameters. */
+#define MBC_GET_IOCB_STATUS 0x12 /* Get IOCB status command. */
+#define MBC_PORT_PARAMS 0x1A /* Port iDMA Parameters. */
+#define MBC_GET_TIMEOUT_PARAMS 0x22 /* Get FW timeouts. */
+#define MBC_TRACE_CONTROL 0x27 /* Trace control command. */
+#define MBC_GEN_SYSTEM_ERROR 0x2a /* Generate System Error. */
+#define MBC_WRITE_SFP 0x30 /* Write SFP Data. */
+#define MBC_READ_SFP 0x31 /* Read SFP Data. */
+#define MBC_SET_TIMEOUT_PARAMS 0x32 /* Set FW timeouts. */
+#define MBC_DPORT_DIAGNOSTICS 0x47 /* D-Port Diagnostics */
+#define MBC_MID_INITIALIZE_FIRMWARE 0x48 /* MID Initialize firmware. */
+#define MBC_MID_GET_VP_DATABASE 0x49 /* MID Get VP Database. */
+#define MBC_MID_GET_VP_ENTRY 0x4a /* MID Get VP Entry. */
+#define MBC_HOST_MEMORY_COPY 0x53 /* Host Memory Copy. */
+#define MBC_SEND_RNFT_ELS 0x5e /* Send RNFT ELS request */
+#define MBC_GET_LINK_PRIV_STATS 0x6d /* Get link & private data. */
+#define MBC_LINK_INITIALIZATION 0x72 /* Do link initialization. */
+#define MBC_SET_VENDOR_ID 0x76 /* Set Vendor ID. */
+#define MBC_PORT_RESET 0x120 /* Port Reset */
+#define MBC_SET_PORT_CONFIG 0x122 /* Set port configuration */
+#define MBC_GET_PORT_CONFIG 0x123 /* Get port configuration */
+
+/*
+ * ISP81xx mailbox commands
+ */
+#define MBC_WRITE_MPI_REGISTER 0x01 /* Write MPI Register. */
+
+/*
+ * ISP8044 mailbox commands
+ */
+#define MBC_SET_GET_ETH_SERDES_REG 0x150
+#define HCS_WRITE_SERDES 0x3
+#define HCS_READ_SERDES 0x4
+
+/* Firmware return data sizes */
+#define FCAL_MAP_SIZE 128
+
+/* Mailbox bit definitions for out_mb and in_mb */
+#define MBX_31 BIT_31
+#define MBX_30 BIT_30
+#define MBX_29 BIT_29
+#define MBX_28 BIT_28
+#define MBX_27 BIT_27
+#define MBX_26 BIT_26
+#define MBX_25 BIT_25
+#define MBX_24 BIT_24
+#define MBX_23 BIT_23
+#define MBX_22 BIT_22
+#define MBX_21 BIT_21
+#define MBX_20 BIT_20
+#define MBX_19 BIT_19
+#define MBX_18 BIT_18
+#define MBX_17 BIT_17
+#define MBX_16 BIT_16
+#define MBX_15 BIT_15
+#define MBX_14 BIT_14
+#define MBX_13 BIT_13
+#define MBX_12 BIT_12
+#define MBX_11 BIT_11
+#define MBX_10 BIT_10
+#define MBX_9 BIT_9
+#define MBX_8 BIT_8
+#define MBX_7 BIT_7
+#define MBX_6 BIT_6
+#define MBX_5 BIT_5
+#define MBX_4 BIT_4
+#define MBX_3 BIT_3
+#define MBX_2 BIT_2
+#define MBX_1 BIT_1
+#define MBX_0 BIT_0
+
+#define RNID_TYPE_SET_VERSION 0x9
+#define RNID_TYPE_ASIC_TEMP 0xC
+
+/*
+ * Firmware state codes from get firmware state mailbox command
+ */
+#define FSTATE_CONFIG_WAIT 0
+#define FSTATE_WAIT_AL_PA 1
+#define FSTATE_WAIT_LOGIN 2
+#define FSTATE_READY 3
+#define FSTATE_LOSS_OF_SYNC 4
+#define FSTATE_ERROR 5
+#define FSTATE_REINIT 6
+#define FSTATE_NON_PART 7
+
+#define FSTATE_CONFIG_CORRECT 0
+#define FSTATE_P2P_RCV_LIP 1
+#define FSTATE_P2P_CHOOSE_LOOP 2
+#define FSTATE_P2P_RCV_UNIDEN_LIP 3
+#define FSTATE_FATAL_ERROR 4
+#define FSTATE_LOOP_BACK_CONN 5
+
+/*
+ * Port Database structure definition
+ * Little endian except where noted.
+ */
+#define PORT_DATABASE_SIZE 128 /* bytes */
+typedef struct {
+ uint8_t options;
+ uint8_t control;
+ uint8_t master_state;
+ uint8_t slave_state;
+ uint8_t reserved[2];
+ uint8_t hard_address;
+ uint8_t reserved_1;
+ uint8_t port_id[4];
+ uint8_t node_name[WWN_SIZE];
+ uint8_t port_name[WWN_SIZE];
+ uint16_t execution_throttle;
+ uint16_t execution_count;
+ uint8_t reset_count;
+ uint8_t reserved_2;
+ uint16_t resource_allocation;
+ uint16_t current_allocation;
+ uint16_t queue_head;
+ uint16_t queue_tail;
+ uint16_t transmit_execution_list_next;
+ uint16_t transmit_execution_list_previous;
+ uint16_t common_features;
+ uint16_t total_concurrent_sequences;
+ uint16_t RO_by_information_category;
+ uint8_t recipient;
+ uint8_t initiator;
+ uint16_t receive_data_size;
+ uint16_t concurrent_sequences;
+ uint16_t open_sequences_per_exchange;
+ uint16_t lun_abort_flags;
+ uint16_t lun_stop_flags;
+ uint16_t stop_queue_head;
+ uint16_t stop_queue_tail;
+ uint16_t port_retry_timer;
+ uint16_t next_sequence_id;
+ uint16_t frame_count;
+ uint16_t PRLI_payload_length;
+ uint8_t prli_svc_param_word_0[2]; /* Big endian */
+ /* Bits 15-0 of word 0 */
+ uint8_t prli_svc_param_word_3[2]; /* Big endian */
+ /* Bits 15-0 of word 3 */
+ uint16_t loop_id;
+ uint16_t extended_lun_info_list_pointer;
+ uint16_t extended_lun_stop_list_pointer;
+} port_database_t;
+
+/*
+ * Port database slave/master states
+ */
+#define PD_STATE_DISCOVERY 0
+#define PD_STATE_WAIT_DISCOVERY_ACK 1
+#define PD_STATE_PORT_LOGIN 2
+#define PD_STATE_WAIT_PORT_LOGIN_ACK 3
+#define PD_STATE_PROCESS_LOGIN 4
+#define PD_STATE_WAIT_PROCESS_LOGIN_ACK 5
+#define PD_STATE_PORT_LOGGED_IN 6
+#define PD_STATE_PORT_UNAVAILABLE 7
+#define PD_STATE_PROCESS_LOGOUT 8
+#define PD_STATE_WAIT_PROCESS_LOGOUT_ACK 9
+#define PD_STATE_PORT_LOGOUT 10
+#define PD_STATE_WAIT_PORT_LOGOUT_ACK 11
+
+
+#define QLA_ZIO_MODE_6 (BIT_2 | BIT_1)
+#define QLA_ZIO_DISABLED 0
+#define QLA_ZIO_DEFAULT_TIMER 2
+
+/*
+ * ISP Initialization Control Block.
+ * Little endian except where noted.
+ */
+#define ICB_VERSION 1
+typedef struct {
+ uint8_t version;
+ uint8_t reserved_1;
+
+ /*
+ * LSB BIT 0 = Enable Hard Loop Id
+ * LSB BIT 1 = Enable Fairness
+ * LSB BIT 2 = Enable Full-Duplex
+ * LSB BIT 3 = Enable Fast Posting
+ * LSB BIT 4 = Enable Target Mode
+ * LSB BIT 5 = Disable Initiator Mode
+ * LSB BIT 6 = Enable ADISC
+ * LSB BIT 7 = Enable Target Inquiry Data
+ *
+ * MSB BIT 0 = Enable PDBC Notify
+ * MSB BIT 1 = Non Participating LIP
+ * MSB BIT 2 = Descending Loop ID Search
+ * MSB BIT 3 = Acquire Loop ID in LIPA
+ * MSB BIT 4 = Stop PortQ on Full Status
+ * MSB BIT 5 = Full Login after LIP
+ * MSB BIT 6 = Node Name Option
+ * MSB BIT 7 = Ext IFWCB enable bit
+ */
+ uint8_t firmware_options[2];
+
+ uint16_t frame_payload_size;
+ uint16_t max_iocb_allocation;
+ uint16_t execution_throttle;
+ uint8_t retry_count;
+ uint8_t retry_delay; /* unused */
+ uint8_t port_name[WWN_SIZE]; /* Big endian. */
+ uint16_t hard_address;
+ uint8_t inquiry_data;
+ uint8_t login_timeout;
+ uint8_t node_name[WWN_SIZE]; /* Big endian. */
+
+ uint16_t request_q_outpointer;
+ uint16_t response_q_inpointer;
+ uint16_t request_q_length;
+ uint16_t response_q_length;
+ uint32_t request_q_address[2];
+ uint32_t response_q_address[2];
+
+ uint16_t lun_enables;
+ uint8_t command_resource_count;
+ uint8_t immediate_notify_resource_count;
+ uint16_t timeout;
+ uint8_t reserved_2[2];
+
+ /*
+ * LSB BIT 0 = Timer Operation mode bit 0
+ * LSB BIT 1 = Timer Operation mode bit 1
+ * LSB BIT 2 = Timer Operation mode bit 2
+ * LSB BIT 3 = Timer Operation mode bit 3
+ * LSB BIT 4 = Init Config Mode bit 0
+ * LSB BIT 5 = Init Config Mode bit 1
+ * LSB BIT 6 = Init Config Mode bit 2
+ * LSB BIT 7 = Enable Non part on LIHA failure
+ *
+ * MSB BIT 0 = Enable class 2
+ * MSB BIT 1 = Enable ACK0
+ * MSB BIT 2 =
+ * MSB BIT 3 =
+ * MSB BIT 4 = FC Tape Enable
+ * MSB BIT 5 = Enable FC Confirm
+ * MSB BIT 6 = Enable command queuing in target mode
+ * MSB BIT 7 = No Logo On Link Down
+ */
+ uint8_t add_firmware_options[2];
+
+ uint8_t response_accumulation_timer;
+ uint8_t interrupt_delay_timer;
+
+ /*
+ * LSB BIT 0 = Enable Read xfr_rdy
+ * LSB BIT 1 = Soft ID only
+ * LSB BIT 2 =
+ * LSB BIT 3 =
+ * LSB BIT 4 = FCP RSP Payload [0]
+ * LSB BIT 5 = FCP RSP Payload [1] / Sbus enable - 2200
+ * LSB BIT 6 = Enable Out-of-Order frame handling
+ * LSB BIT 7 = Disable Automatic PLOGI on Local Loop
+ *
+ * MSB BIT 0 = Sbus enable - 2300
+ * MSB BIT 1 =
+ * MSB BIT 2 =
+ * MSB BIT 3 =
+ * MSB BIT 4 = LED mode
+ * MSB BIT 5 = enable 50 ohm termination
+ * MSB BIT 6 = Data Rate (2300 only)
+ * MSB BIT 7 = Data Rate (2300 only)
+ */
+ uint8_t special_options[2];
+
+ uint8_t reserved_3[26];
+} init_cb_t;
+
+/*
+ * Get Link Status mailbox command return buffer.
+ */
+#define GLSO_SEND_RPS BIT_0
+#define GLSO_USE_DID BIT_3
+
+struct link_statistics {
+ uint32_t link_fail_cnt;
+ uint32_t loss_sync_cnt;
+ uint32_t loss_sig_cnt;
+ uint32_t prim_seq_err_cnt;
+ uint32_t inval_xmit_word_cnt;
+ uint32_t inval_crc_cnt;
+ uint32_t lip_cnt;
+ uint32_t unused1[0x1a];
+ uint32_t tx_frames;
+ uint32_t rx_frames;
+ uint32_t discarded_frames;
+ uint32_t dropped_frames;
+ uint32_t unused2[1];
+ uint32_t nos_rcvd;
+};
+
+/*
+ * NVRAM Command values.
+ */
+#define NV_START_BIT BIT_2
+#define NV_WRITE_OP (BIT_26+BIT_24)
+#define NV_READ_OP (BIT_26+BIT_25)
+#define NV_ERASE_OP (BIT_26+BIT_25+BIT_24)
+#define NV_MASK_OP (BIT_26+BIT_25+BIT_24)
+#define NV_DELAY_COUNT 10
+
+/*
+ * QLogic ISP2100, ISP2200 and ISP2300 NVRAM structure definition.
+ */
+typedef struct {
+ /*
+ * NVRAM header
+ */
+ uint8_t id[4];
+ uint8_t nvram_version;
+ uint8_t reserved_0;
+
+ /*
+ * NVRAM RISC parameter block
+ */
+ uint8_t parameter_block_version;
+ uint8_t reserved_1;
+
+ /*
+ * LSB BIT 0 = Enable Hard Loop Id
+ * LSB BIT 1 = Enable Fairness
+ * LSB BIT 2 = Enable Full-Duplex
+ * LSB BIT 3 = Enable Fast Posting
+ * LSB BIT 4 = Enable Target Mode
+ * LSB BIT 5 = Disable Initiator Mode
+ * LSB BIT 6 = Enable ADISC
+ * LSB BIT 7 = Enable Target Inquiry Data
+ *
+ * MSB BIT 0 = Enable PDBC Notify
+ * MSB BIT 1 = Non Participating LIP
+ * MSB BIT 2 = Descending Loop ID Search
+ * MSB BIT 3 = Acquire Loop ID in LIPA
+ * MSB BIT 4 = Stop PortQ on Full Status
+ * MSB BIT 5 = Full Login after LIP
+ * MSB BIT 6 = Node Name Option
+ * MSB BIT 7 = Ext IFWCB enable bit
+ */
+ uint8_t firmware_options[2];
+
+ uint16_t frame_payload_size;
+ uint16_t max_iocb_allocation;
+ uint16_t execution_throttle;
+ uint8_t retry_count;
+ uint8_t retry_delay; /* unused */
+ uint8_t port_name[WWN_SIZE]; /* Big endian. */
+ uint16_t hard_address;
+ uint8_t inquiry_data;
+ uint8_t login_timeout;
+ uint8_t node_name[WWN_SIZE]; /* Big endian. */
+
+ /*
+ * LSB BIT 0 = Timer Operation mode bit 0
+ * LSB BIT 1 = Timer Operation mode bit 1
+ * LSB BIT 2 = Timer Operation mode bit 2
+ * LSB BIT 3 = Timer Operation mode bit 3
+ * LSB BIT 4 = Init Config Mode bit 0
+ * LSB BIT 5 = Init Config Mode bit 1
+ * LSB BIT 6 = Init Config Mode bit 2
+ * LSB BIT 7 = Enable Non part on LIHA failure
+ *
+ * MSB BIT 0 = Enable class 2
+ * MSB BIT 1 = Enable ACK0
+ * MSB BIT 2 =
+ * MSB BIT 3 =
+ * MSB BIT 4 = FC Tape Enable
+ * MSB BIT 5 = Enable FC Confirm
+ * MSB BIT 6 = Enable command queuing in target mode
+ * MSB BIT 7 = No Logo On Link Down
+ */
+ uint8_t add_firmware_options[2];
+
+ uint8_t response_accumulation_timer;
+ uint8_t interrupt_delay_timer;
+
+ /*
+ * LSB BIT 0 = Enable Read xfr_rdy
+ * LSB BIT 1 = Soft ID only
+ * LSB BIT 2 =
+ * LSB BIT 3 =
+ * LSB BIT 4 = FCP RSP Payload [0]
+ * LSB BIT 5 = FCP RSP Payload [1] / Sbus enable - 2200
+ * LSB BIT 6 = Enable Out-of-Order frame handling
+ * LSB BIT 7 = Disable Automatic PLOGI on Local Loop
+ *
+ * MSB BIT 0 = Sbus enable - 2300
+ * MSB BIT 1 =
+ * MSB BIT 2 =
+ * MSB BIT 3 =
+ * MSB BIT 4 = LED mode
+ * MSB BIT 5 = enable 50 ohm termination
+ * MSB BIT 6 = Data Rate (2300 only)
+ * MSB BIT 7 = Data Rate (2300 only)
+ */
+ uint8_t special_options[2];
+
+ /* Reserved for expanded RISC parameter block */
+ uint8_t reserved_2[22];
+
+ /*
+ * LSB BIT 0 = Tx Sensitivity 1G bit 0
+ * LSB BIT 1 = Tx Sensitivity 1G bit 1
+ * LSB BIT 2 = Tx Sensitivity 1G bit 2
+ * LSB BIT 3 = Tx Sensitivity 1G bit 3
+ * LSB BIT 4 = Rx Sensitivity 1G bit 0
+ * LSB BIT 5 = Rx Sensitivity 1G bit 1
+ * LSB BIT 6 = Rx Sensitivity 1G bit 2
+ * LSB BIT 7 = Rx Sensitivity 1G bit 3
+ *
+ * MSB BIT 0 = Tx Sensitivity 2G bit 0
+ * MSB BIT 1 = Tx Sensitivity 2G bit 1
+ * MSB BIT 2 = Tx Sensitivity 2G bit 2
+ * MSB BIT 3 = Tx Sensitivity 2G bit 3
+ * MSB BIT 4 = Rx Sensitivity 2G bit 0
+ * MSB BIT 5 = Rx Sensitivity 2G bit 1
+ * MSB BIT 6 = Rx Sensitivity 2G bit 2
+ * MSB BIT 7 = Rx Sensitivity 2G bit 3
+ *
+ * LSB BIT 0 = Output Swing 1G bit 0
+ * LSB BIT 1 = Output Swing 1G bit 1
+ * LSB BIT 2 = Output Swing 1G bit 2
+ * LSB BIT 3 = Output Emphasis 1G bit 0
+ * LSB BIT 4 = Output Emphasis 1G bit 1
+ * LSB BIT 5 = Output Swing 2G bit 0
+ * LSB BIT 6 = Output Swing 2G bit 1
+ * LSB BIT 7 = Output Swing 2G bit 2
+ *
+ * MSB BIT 0 = Output Emphasis 2G bit 0
+ * MSB BIT 1 = Output Emphasis 2G bit 1
+ * MSB BIT 2 = Output Enable
+ * MSB BIT 3 =
+ * MSB BIT 4 =
+ * MSB BIT 5 =
+ * MSB BIT 6 =
+ * MSB BIT 7 =
+ */
+ uint8_t seriallink_options[4];
+
+ /*
+ * NVRAM host parameter block
+ *
+ * LSB BIT 0 = Enable spinup delay
+ * LSB BIT 1 = Disable BIOS
+ * LSB BIT 2 = Enable Memory Map BIOS
+ * LSB BIT 3 = Enable Selectable Boot
+ * LSB BIT 4 = Disable RISC code load
+ * LSB BIT 5 = Set cache line size 1
+ * LSB BIT 6 = PCI Parity Disable
+ * LSB BIT 7 = Enable extended logging
+ *
+ * MSB BIT 0 = Enable 64bit addressing
+ * MSB BIT 1 = Enable lip reset
+ * MSB BIT 2 = Enable lip full login
+ * MSB BIT 3 = Enable target reset
+ * MSB BIT 4 = Enable database storage
+ * MSB BIT 5 = Enable cache flush read
+ * MSB BIT 6 = Enable database load
+ * MSB BIT 7 = Enable alternate WWN
+ */
+ uint8_t host_p[2];
+
+ uint8_t boot_node_name[WWN_SIZE];
+ uint8_t boot_lun_number;
+ uint8_t reset_delay;
+ uint8_t port_down_retry_count;
+ uint8_t boot_id_number;
+ uint16_t max_luns_per_target;
+ uint8_t fcode_boot_port_name[WWN_SIZE];
+ uint8_t alternate_port_name[WWN_SIZE];
+ uint8_t alternate_node_name[WWN_SIZE];
+
+ /*
+ * BIT 0 = Selective Login
+ * BIT 1 = Alt-Boot Enable
+ * BIT 2 =
+ * BIT 3 = Boot Order List
+ * BIT 4 =
+ * BIT 5 = Selective LUN
+ * BIT 6 =
+ * BIT 7 = unused
+ */
+ uint8_t efi_parameters;
+
+ uint8_t link_down_timeout;
+
+ uint8_t adapter_id[16];
+
+ uint8_t alt1_boot_node_name[WWN_SIZE];
+ uint16_t alt1_boot_lun_number;
+ uint8_t alt2_boot_node_name[WWN_SIZE];
+ uint16_t alt2_boot_lun_number;
+ uint8_t alt3_boot_node_name[WWN_SIZE];
+ uint16_t alt3_boot_lun_number;
+ uint8_t alt4_boot_node_name[WWN_SIZE];
+ uint16_t alt4_boot_lun_number;
+ uint8_t alt5_boot_node_name[WWN_SIZE];
+ uint16_t alt5_boot_lun_number;
+ uint8_t alt6_boot_node_name[WWN_SIZE];
+ uint16_t alt6_boot_lun_number;
+ uint8_t alt7_boot_node_name[WWN_SIZE];
+ uint16_t alt7_boot_lun_number;
+
+ uint8_t reserved_3[2];
+
+ /* Offset 200-215 : Model Number */
+ uint8_t model_number[16];
+
+ /* OEM related items */
+ uint8_t oem_specific[16];
+
+ /*
+ * NVRAM Adapter Features offset 232-239
+ *
+ * LSB BIT 0 = External GBIC
+ * LSB BIT 1 = Risc RAM parity
+ * LSB BIT 2 = Buffer Plus Module
+ * LSB BIT 3 = Multi Chip Adapter
+ * LSB BIT 4 = Internal connector
+ * LSB BIT 5 =
+ * LSB BIT 6 =
+ * LSB BIT 7 =
+ *
+ * MSB BIT 0 =
+ * MSB BIT 1 =
+ * MSB BIT 2 =
+ * MSB BIT 3 =
+ * MSB BIT 4 =
+ * MSB BIT 5 =
+ * MSB BIT 6 =
+ * MSB BIT 7 =
+ */
+ uint8_t adapter_features[2];
+
+ uint8_t reserved_4[16];
+
+ /* Subsystem vendor ID for ISP2200 */
+ uint16_t subsystem_vendor_id_2200;
+
+ /* Subsystem device ID for ISP2200 */
+ uint16_t subsystem_device_id_2200;
+
+ uint8_t reserved_5;
+ uint8_t checksum;
+} nvram_t;
+
+/*
+ * ISP queue - response queue entry definition.
+ */
+typedef struct {
+ uint8_t entry_type; /* Entry type. */
+ uint8_t entry_count; /* Entry count. */
+ uint8_t sys_define; /* System defined. */
+ uint8_t entry_status; /* Entry Status. */
+ uint32_t handle; /* System defined handle */
+ uint8_t data[52];
+ uint32_t signature;
+#define RESPONSE_PROCESSED 0xDEADDEAD /* Signature */
+} response_t;
+
+/*
+ * ISP queue - ATIO queue entry definition.
+ */
+struct atio {
+ uint8_t entry_type; /* Entry type. */
+ uint8_t entry_count; /* Entry count. */
+ uint8_t data[58];
+ uint32_t signature;
+#define ATIO_PROCESSED 0xDEADDEAD /* Signature */
+};
+
+typedef union {
+ uint16_t extended;
+ struct {
+ uint8_t reserved;
+ uint8_t standard;
+ } id;
+} target_id_t;
+
+#define SET_TARGET_ID(ha, to, from) \
+do { \
+ if (HAS_EXTENDED_IDS(ha)) \
+ to.extended = cpu_to_le16(from); \
+ else \
+ to.id.standard = (uint8_t)from; \
+} while (0)
+
+/*
+ * ISP queue - command entry structure definition.
+ */
+#define COMMAND_TYPE 0x11 /* Command entry */
+typedef struct {
+ uint8_t entry_type; /* Entry type. */
+ uint8_t entry_count; /* Entry count. */
+ uint8_t sys_define; /* System defined. */
+ uint8_t entry_status; /* Entry Status. */
+ uint32_t handle; /* System handle. */
+ target_id_t target; /* SCSI ID */
+ uint16_t lun; /* SCSI LUN */
+ uint16_t control_flags; /* Control flags. */
+#define CF_WRITE BIT_6
+#define CF_READ BIT_5
+#define CF_SIMPLE_TAG BIT_3
+#define CF_ORDERED_TAG BIT_2
+#define CF_HEAD_TAG BIT_1
+ uint16_t reserved_1;
+ uint16_t timeout; /* Command timeout. */
+ uint16_t dseg_count; /* Data segment count. */
+ uint8_t scsi_cdb[MAX_CMDSZ]; /* SCSI command words. */
+ uint32_t byte_count; /* Total byte count. */
+ uint32_t dseg_0_address; /* Data segment 0 address. */
+ uint32_t dseg_0_length; /* Data segment 0 length. */
+ uint32_t dseg_1_address; /* Data segment 1 address. */
+ uint32_t dseg_1_length; /* Data segment 1 length. */
+ uint32_t dseg_2_address; /* Data segment 2 address. */
+ uint32_t dseg_2_length; /* Data segment 2 length. */
+} cmd_entry_t;
+
+/*
+ * ISP queue - 64-Bit addressing, command entry structure definition.
+ */
+#define COMMAND_A64_TYPE 0x19 /* Command A64 entry */
+typedef struct {
+ uint8_t entry_type; /* Entry type. */
+ uint8_t entry_count; /* Entry count. */
+ uint8_t sys_define; /* System defined. */
+ uint8_t entry_status; /* Entry Status. */
+ uint32_t handle; /* System handle. */
+ target_id_t target; /* SCSI ID */
+ uint16_t lun; /* SCSI LUN */
+ uint16_t control_flags; /* Control flags. */
+ uint16_t reserved_1;
+ uint16_t timeout; /* Command timeout. */
+ uint16_t dseg_count; /* Data segment count. */
+ uint8_t scsi_cdb[MAX_CMDSZ]; /* SCSI command words. */
+ uint32_t byte_count; /* Total byte count. */
+ uint32_t dseg_0_address[2]; /* Data segment 0 address. */
+ uint32_t dseg_0_length; /* Data segment 0 length. */
+ uint32_t dseg_1_address[2]; /* Data segment 1 address. */
+ uint32_t dseg_1_length; /* Data segment 1 length. */
+} cmd_a64_entry_t, request_t;
+
+/*
+ * ISP queue - continuation entry structure definition.
+ */
+#define CONTINUE_TYPE 0x02 /* Continuation entry. */
+typedef struct {
+ uint8_t entry_type; /* Entry type. */
+ uint8_t entry_count; /* Entry count. */
+ uint8_t sys_define; /* System defined. */
+ uint8_t entry_status; /* Entry Status. */
+ uint32_t reserved;
+ uint32_t dseg_0_address; /* Data segment 0 address. */
+ uint32_t dseg_0_length; /* Data segment 0 length. */
+ uint32_t dseg_1_address; /* Data segment 1 address. */
+ uint32_t dseg_1_length; /* Data segment 1 length. */
+ uint32_t dseg_2_address; /* Data segment 2 address. */
+ uint32_t dseg_2_length; /* Data segment 2 length. */
+ uint32_t dseg_3_address; /* Data segment 3 address. */
+ uint32_t dseg_3_length; /* Data segment 3 length. */
+ uint32_t dseg_4_address; /* Data segment 4 address. */
+ uint32_t dseg_4_length; /* Data segment 4 length. */
+ uint32_t dseg_5_address; /* Data segment 5 address. */
+ uint32_t dseg_5_length; /* Data segment 5 length. */
+ uint32_t dseg_6_address; /* Data segment 6 address. */
+ uint32_t dseg_6_length; /* Data segment 6 length. */
+} cont_entry_t;
+
+/*
+ * ISP queue - 64-Bit addressing, continuation entry structure definition.
+ */
+#define CONTINUE_A64_TYPE 0x0A /* Continuation A64 entry. */
+typedef struct {
+ uint8_t entry_type; /* Entry type. */
+ uint8_t entry_count; /* Entry count. */
+ uint8_t sys_define; /* System defined. */
+ uint8_t entry_status; /* Entry Status. */
+ uint32_t dseg_0_address[2]; /* Data segment 0 address. */
+ uint32_t dseg_0_length; /* Data segment 0 length. */
+ uint32_t dseg_1_address[2]; /* Data segment 1 address. */
+ uint32_t dseg_1_length; /* Data segment 1 length. */
+ uint32_t dseg_2_address [2]; /* Data segment 2 address. */
+ uint32_t dseg_2_length; /* Data segment 2 length. */
+ uint32_t dseg_3_address[2]; /* Data segment 3 address. */
+ uint32_t dseg_3_length; /* Data segment 3 length. */
+ uint32_t dseg_4_address[2]; /* Data segment 4 address. */
+ uint32_t dseg_4_length; /* Data segment 4 length. */
+} cont_a64_entry_t;
+
+#define PO_MODE_DIF_INSERT 0
+#define PO_MODE_DIF_REMOVE 1
+#define PO_MODE_DIF_PASS 2
+#define PO_MODE_DIF_REPLACE 3
+#define PO_MODE_DIF_TCP_CKSUM 6
+#define PO_ENABLE_INCR_GUARD_SEED BIT_3
+#define PO_DISABLE_GUARD_CHECK BIT_4
+#define PO_DISABLE_INCR_REF_TAG BIT_5
+#define PO_DIS_HEADER_MODE BIT_7
+#define PO_ENABLE_DIF_BUNDLING BIT_8
+#define PO_DIS_FRAME_MODE BIT_9
+#define PO_DIS_VALD_APP_ESC BIT_10 /* Dis validation for escape tag/ffffh */
+#define PO_DIS_VALD_APP_REF_ESC BIT_11
+
+#define PO_DIS_APP_TAG_REPL BIT_12 /* disable REG Tag replacement */
+#define PO_DIS_REF_TAG_REPL BIT_13
+#define PO_DIS_APP_TAG_VALD BIT_14 /* disable REF Tag validation */
+#define PO_DIS_REF_TAG_VALD BIT_15
+
+/*
+ * ISP queue - 64-Bit addressing, continuation crc entry structure definition.
+ */
+struct crc_context {
+ uint32_t handle; /* System handle. */
+ __le32 ref_tag;
+ __le16 app_tag;
+ uint8_t ref_tag_mask[4]; /* Validation/Replacement Mask*/
+ uint8_t app_tag_mask[2]; /* Validation/Replacement Mask*/
+ __le16 guard_seed; /* Initial Guard Seed */
+ __le16 prot_opts; /* Requested Data Protection Mode */
+ __le16 blk_size; /* Data size in bytes */
+ uint16_t runt_blk_guard; /* Guard value for runt block (tape
+ * only) */
+ __le32 byte_count; /* Total byte count/ total data
+ * transfer count */
+ union {
+ struct {
+ uint32_t reserved_1;
+ uint16_t reserved_2;
+ uint16_t reserved_3;
+ uint32_t reserved_4;
+ uint32_t data_address[2];
+ uint32_t data_length;
+ uint32_t reserved_5[2];
+ uint32_t reserved_6;
+ } nobundling;
+ struct {
+ __le32 dif_byte_count; /* Total DIF byte
+ * count */
+ uint16_t reserved_1;
+ __le16 dseg_count; /* Data segment count */
+ uint32_t reserved_2;
+ uint32_t data_address[2];
+ uint32_t data_length;
+ uint32_t dif_address[2];
+ uint32_t dif_length; /* Data segment 0
+ * length */
+ } bundling;
+ } u;
+
+ struct fcp_cmnd fcp_cmnd;
+ dma_addr_t crc_ctx_dma;
+ /* List of DMA context transfers */
+ struct list_head dsd_list;
+
+ /* This structure should not exceed 512 bytes */
+};
+
+#define CRC_CONTEXT_LEN_FW (offsetof(struct crc_context, fcp_cmnd.lun))
+#define CRC_CONTEXT_FCPCMND_OFF (offsetof(struct crc_context, fcp_cmnd.lun))
+
+/*
+ * ISP queue - status entry structure definition.
+ */
+#define STATUS_TYPE 0x03 /* Status entry. */
+typedef struct {
+ uint8_t entry_type; /* Entry type. */
+ uint8_t entry_count; /* Entry count. */
+ uint8_t sys_define; /* System defined. */
+ uint8_t entry_status; /* Entry Status. */
+ uint32_t handle; /* System handle. */
+ uint16_t scsi_status; /* SCSI status. */
+ uint16_t comp_status; /* Completion status. */
+ uint16_t state_flags; /* State flags. */
+ uint16_t status_flags; /* Status flags. */
+ uint16_t rsp_info_len; /* Response Info Length. */
+ uint16_t req_sense_length; /* Request sense data length. */
+ uint32_t residual_length; /* Residual transfer length. */
+ uint8_t rsp_info[8]; /* FCP response information. */
+ uint8_t req_sense_data[32]; /* Request sense data. */
+} sts_entry_t;
+
+/*
+ * Status entry entry status
+ */
+#define RF_RQ_DMA_ERROR BIT_6 /* Request Queue DMA error. */
+#define RF_INV_E_ORDER BIT_5 /* Invalid entry order. */
+#define RF_INV_E_COUNT BIT_4 /* Invalid entry count. */
+#define RF_INV_E_PARAM BIT_3 /* Invalid entry parameter. */
+#define RF_INV_E_TYPE BIT_2 /* Invalid entry type. */
+#define RF_BUSY BIT_1 /* Busy */
+#define RF_MASK (RF_RQ_DMA_ERROR | RF_INV_E_ORDER | RF_INV_E_COUNT | \
+ RF_INV_E_PARAM | RF_INV_E_TYPE | RF_BUSY)
+#define RF_MASK_24XX (RF_INV_E_ORDER | RF_INV_E_COUNT | RF_INV_E_PARAM | \
+ RF_INV_E_TYPE)
+
+/*
+ * Status entry SCSI status bit definitions.
+ */
+#define SS_MASK 0xfff /* Reserved bits BIT_12-BIT_15*/
+#define SS_RESIDUAL_UNDER BIT_11
+#define SS_RESIDUAL_OVER BIT_10
+#define SS_SENSE_LEN_VALID BIT_9
+#define SS_RESPONSE_INFO_LEN_VALID BIT_8
+
+#define SS_RESERVE_CONFLICT (BIT_4 | BIT_3)
+#define SS_BUSY_CONDITION BIT_3
+#define SS_CONDITION_MET BIT_2
+#define SS_CHECK_CONDITION BIT_1
+
+/*
+ * Status entry completion status
+ */
+#define CS_COMPLETE 0x0 /* No errors */
+#define CS_INCOMPLETE 0x1 /* Incomplete transfer of cmd. */
+#define CS_DMA 0x2 /* A DMA direction error. */
+#define CS_TRANSPORT 0x3 /* Transport error. */
+#define CS_RESET 0x4 /* SCSI bus reset occurred */
+#define CS_ABORTED 0x5 /* System aborted command. */
+#define CS_TIMEOUT 0x6 /* Timeout error. */
+#define CS_DATA_OVERRUN 0x7 /* Data overrun. */
+#define CS_DIF_ERROR 0xC /* DIF error detected */
+
+#define CS_DATA_UNDERRUN 0x15 /* Data Underrun. */
+#define CS_QUEUE_FULL 0x1C /* Queue Full. */
+#define CS_PORT_UNAVAILABLE 0x28 /* Port unavailable */
+ /* (selection timeout) */
+#define CS_PORT_LOGGED_OUT 0x29 /* Port Logged Out */
+#define CS_PORT_CONFIG_CHG 0x2A /* Port Configuration Changed */
+#define CS_PORT_BUSY 0x2B /* Port Busy */
+#define CS_COMPLETE_CHKCOND 0x30 /* Error? */
+#define CS_IOCB_ERROR 0x31 /* Generic error for IOCB request
+ failure */
+#define CS_BAD_PAYLOAD 0x80 /* Driver defined */
+#define CS_UNKNOWN 0x81 /* Driver defined */
+#define CS_RETRY 0x82 /* Driver defined */
+#define CS_LOOP_DOWN_ABORT 0x83 /* Driver defined */
+
+#define CS_BIDIR_RD_OVERRUN 0x700
+#define CS_BIDIR_RD_WR_OVERRUN 0x707
+#define CS_BIDIR_RD_OVERRUN_WR_UNDERRUN 0x715
+#define CS_BIDIR_RD_UNDERRUN 0x1500
+#define CS_BIDIR_RD_UNDERRUN_WR_OVERRUN 0x1507
+#define CS_BIDIR_RD_WR_UNDERRUN 0x1515
+#define CS_BIDIR_DMA 0x200
+/*
+ * Status entry status flags
+ */
+#define SF_ABTS_TERMINATED BIT_10
+#define SF_LOGOUT_SENT BIT_13
+
+/*
+ * ISP queue - status continuation entry structure definition.
+ */
+#define STATUS_CONT_TYPE 0x10 /* Status continuation entry. */
+typedef struct {
+ uint8_t entry_type; /* Entry type. */
+ uint8_t entry_count; /* Entry count. */
+ uint8_t sys_define; /* System defined. */
+ uint8_t entry_status; /* Entry Status. */
+ uint8_t data[60]; /* data */
+} sts_cont_entry_t;
+
+/*
+ * ISP queue - RIO Type 1 status entry (32 bit I/O entry handles)
+ * structure definition.
+ */
+#define STATUS_TYPE_21 0x21 /* Status entry. */
+typedef struct {
+ uint8_t entry_type; /* Entry type. */
+ uint8_t entry_count; /* Entry count. */
+ uint8_t handle_count; /* Handle count. */
+ uint8_t entry_status; /* Entry Status. */
+ uint32_t handle[15]; /* System handles. */
+} sts21_entry_t;
+
+/*
+ * ISP queue - RIO Type 2 status entry (16 bit I/O entry handles)
+ * structure definition.
+ */
+#define STATUS_TYPE_22 0x22 /* Status entry. */
+typedef struct {
+ uint8_t entry_type; /* Entry type. */
+ uint8_t entry_count; /* Entry count. */
+ uint8_t handle_count; /* Handle count. */
+ uint8_t entry_status; /* Entry Status. */
+ uint16_t handle[30]; /* System handles. */
+} sts22_entry_t;
+
+/*
+ * ISP queue - marker entry structure definition.
+ */
+#define MARKER_TYPE 0x04 /* Marker entry. */
+typedef struct {
+ uint8_t entry_type; /* Entry type. */
+ uint8_t entry_count; /* Entry count. */
+ uint8_t handle_count; /* Handle count. */
+ uint8_t entry_status; /* Entry Status. */
+ uint32_t sys_define_2; /* System defined. */
+ target_id_t target; /* SCSI ID */
+ uint8_t modifier; /* Modifier (7-0). */
+#define MK_SYNC_ID_LUN 0 /* Synchronize ID/LUN */
+#define MK_SYNC_ID 1 /* Synchronize ID */
+#define MK_SYNC_ALL 2 /* Synchronize all ID/LUN */
+#define MK_SYNC_LIP 3 /* Synchronize all ID/LUN, */
+ /* clear port changed, */
+ /* use sequence number. */
+ uint8_t reserved_1;
+ uint16_t sequence_number; /* Sequence number of event */
+ uint16_t lun; /* SCSI LUN */
+ uint8_t reserved_2[48];
+} mrk_entry_t;
+
+/*
+ * ISP queue - Management Server entry structure definition.
+ */
+#define MS_IOCB_TYPE 0x29 /* Management Server IOCB entry */
+typedef struct {
+ uint8_t entry_type; /* Entry type. */
+ uint8_t entry_count; /* Entry count. */
+ uint8_t handle_count; /* Handle count. */
+ uint8_t entry_status; /* Entry Status. */
+ uint32_t handle1; /* System handle. */
+ target_id_t loop_id;
+ uint16_t status;
+ uint16_t control_flags; /* Control flags. */
+ uint16_t reserved2;
+ uint16_t timeout;
+ uint16_t cmd_dsd_count;
+ uint16_t total_dsd_count;
+ uint8_t type;
+ uint8_t r_ctl;
+ uint16_t rx_id;
+ uint16_t reserved3;
+ uint32_t handle2;
+ uint32_t rsp_bytecount;
+ uint32_t req_bytecount;
+ uint32_t dseg_req_address[2]; /* Data segment 0 address. */
+ uint32_t dseg_req_length; /* Data segment 0 length. */
+ uint32_t dseg_rsp_address[2]; /* Data segment 1 address. */
+ uint32_t dseg_rsp_length; /* Data segment 1 length. */
+} ms_iocb_entry_t;
+
+
+/*
+ * ISP queue - Mailbox Command entry structure definition.
+ */
+#define MBX_IOCB_TYPE 0x39
+struct mbx_entry {
+ uint8_t entry_type;
+ uint8_t entry_count;
+ uint8_t sys_define1;
+ /* Use sys_define1 for source type */
+#define SOURCE_SCSI 0x00
+#define SOURCE_IP 0x01
+#define SOURCE_VI 0x02
+#define SOURCE_SCTP 0x03
+#define SOURCE_MP 0x04
+#define SOURCE_MPIOCTL 0x05
+#define SOURCE_ASYNC_IOCB 0x07
+
+ uint8_t entry_status;
+
+ uint32_t handle;
+ target_id_t loop_id;
+
+ uint16_t status;
+ uint16_t state_flags;
+ uint16_t status_flags;
+
+ uint32_t sys_define2[2];
+
+ uint16_t mb0;
+ uint16_t mb1;
+ uint16_t mb2;
+ uint16_t mb3;
+ uint16_t mb6;
+ uint16_t mb7;
+ uint16_t mb9;
+ uint16_t mb10;
+ uint32_t reserved_2[2];
+ uint8_t node_name[WWN_SIZE];
+ uint8_t port_name[WWN_SIZE];
+};
+
+/*
+ * ISP request and response queue entry sizes
+ */
+#define RESPONSE_ENTRY_SIZE (sizeof(response_t))
+#define REQUEST_ENTRY_SIZE (sizeof(request_t))
+
+
+/*
+ * 24 bit port ID type definition.
+ */
+typedef union {
+ uint32_t b24 : 24;
+
+ struct {
+#ifdef __BIG_ENDIAN
+ uint8_t domain;
+ uint8_t area;
+ uint8_t al_pa;
+#elif defined(__LITTLE_ENDIAN)
+ uint8_t al_pa;
+ uint8_t area;
+ uint8_t domain;
+#else
+#error "__BIG_ENDIAN or __LITTLE_ENDIAN must be defined!"
+#endif
+ uint8_t rsvd_1;
+ } b;
+} port_id_t;
+#define INVALID_PORT_ID 0xFFFFFF
+
+/*
+ * Switch info gathering structure.
+ */
+typedef struct {
+ port_id_t d_id;
+ uint8_t node_name[WWN_SIZE];
+ uint8_t port_name[WWN_SIZE];
+ uint8_t fabric_port_name[WWN_SIZE];
+ uint16_t fp_speed;
+ uint8_t fc4_type;
+} sw_info_t;
+
+/* FCP-4 types */
+#define FC4_TYPE_FCP_SCSI 0x08
+#define FC4_TYPE_OTHER 0x0
+#define FC4_TYPE_UNKNOWN 0xff
+
+/*
+ * Fibre channel port type.
+ */
+ typedef enum {
+ FCT_UNKNOWN,
+ FCT_RSCN,
+ FCT_SWITCH,
+ FCT_BROADCAST,
+ FCT_INITIATOR,
+ FCT_TARGET
+} fc_port_type_t;
+
+/*
+ * Fibre channel port structure.
+ */
+typedef struct fc_port {
+ struct list_head list;
+ struct scsi_qla_host *vha;
+
+ uint8_t node_name[WWN_SIZE];
+ uint8_t port_name[WWN_SIZE];
+ port_id_t d_id;
+ uint16_t loop_id;
+ uint16_t old_loop_id;
+
+ uint16_t tgt_id;
+ uint16_t old_tgt_id;
+
+ uint8_t fcp_prio;
+
+ uint8_t fabric_port_name[WWN_SIZE];
+ uint16_t fp_speed;
+
+ fc_port_type_t port_type;
+
+ atomic_t state;
+ uint32_t flags;
+
+ int login_retry;
+
+ struct fc_rport *rport, *drport;
+ u32 supported_classes;
+
+ uint8_t fc4_type;
+ uint8_t scan_state;
+
+ unsigned long last_queue_full;
+ unsigned long last_ramp_up;
+
+ uint16_t port_id;
+
+ unsigned long retry_delay_timestamp;
+} fc_port_t;
+
+#include "qla_mr.h"
+
+/*
+ * Fibre channel port/lun states.
+ */
+#define FCS_UNCONFIGURED 1
+#define FCS_DEVICE_DEAD 2
+#define FCS_DEVICE_LOST 3
+#define FCS_ONLINE 4
+
+static const char * const port_state_str[] = {
+ "Unknown",
+ "UNCONFIGURED",
+ "DEAD",
+ "LOST",
+ "ONLINE"
+};
+
+/*
+ * FC port flags.
+ */
+#define FCF_FABRIC_DEVICE BIT_0
+#define FCF_LOGIN_NEEDED BIT_1
+#define FCF_FCP2_DEVICE BIT_2
+#define FCF_ASYNC_SENT BIT_3
+#define FCF_CONF_COMP_SUPPORTED BIT_4
+
+/* No loop ID flag. */
+#define FC_NO_LOOP_ID 0x1000
+
+/*
+ * FC-CT interface
+ *
+ * NOTE: All structures are big-endian in form.
+ */
+
+#define CT_REJECT_RESPONSE 0x8001
+#define CT_ACCEPT_RESPONSE 0x8002
+#define CT_REASON_INVALID_COMMAND_CODE 0x01
+#define CT_REASON_CANNOT_PERFORM 0x09
+#define CT_REASON_COMMAND_UNSUPPORTED 0x0b
+#define CT_EXPL_ALREADY_REGISTERED 0x10
+#define CT_EXPL_HBA_ATTR_NOT_REGISTERED 0x11
+#define CT_EXPL_MULTIPLE_HBA_ATTR 0x12
+#define CT_EXPL_INVALID_HBA_BLOCK_LENGTH 0x13
+#define CT_EXPL_MISSING_REQ_HBA_ATTR 0x14
+#define CT_EXPL_PORT_NOT_REGISTERED_ 0x15
+#define CT_EXPL_MISSING_HBA_ID_PORT_LIST 0x16
+#define CT_EXPL_HBA_NOT_REGISTERED 0x17
+#define CT_EXPL_PORT_ATTR_NOT_REGISTERED 0x20
+#define CT_EXPL_PORT_NOT_REGISTERED 0x21
+#define CT_EXPL_MULTIPLE_PORT_ATTR 0x22
+#define CT_EXPL_INVALID_PORT_BLOCK_LENGTH 0x23
+
+#define NS_N_PORT_TYPE 0x01
+#define NS_NL_PORT_TYPE 0x02
+#define NS_NX_PORT_TYPE 0x7F
+
+#define GA_NXT_CMD 0x100
+#define GA_NXT_REQ_SIZE (16 + 4)
+#define GA_NXT_RSP_SIZE (16 + 620)
+
+#define GID_PT_CMD 0x1A1
+#define GID_PT_REQ_SIZE (16 + 4)
+
+#define GPN_ID_CMD 0x112
+#define GPN_ID_REQ_SIZE (16 + 4)
+#define GPN_ID_RSP_SIZE (16 + 8)
+
+#define GNN_ID_CMD 0x113
+#define GNN_ID_REQ_SIZE (16 + 4)
+#define GNN_ID_RSP_SIZE (16 + 8)
+
+#define GFT_ID_CMD 0x117
+#define GFT_ID_REQ_SIZE (16 + 4)
+#define GFT_ID_RSP_SIZE (16 + 32)
+
+#define RFT_ID_CMD 0x217
+#define RFT_ID_REQ_SIZE (16 + 4 + 32)
+#define RFT_ID_RSP_SIZE 16
+
+#define RFF_ID_CMD 0x21F
+#define RFF_ID_REQ_SIZE (16 + 4 + 2 + 1 + 1)
+#define RFF_ID_RSP_SIZE 16
+
+#define RNN_ID_CMD 0x213
+#define RNN_ID_REQ_SIZE (16 + 4 + 8)
+#define RNN_ID_RSP_SIZE 16
+
+#define RSNN_NN_CMD 0x239
+#define RSNN_NN_REQ_SIZE (16 + 8 + 1 + 255)
+#define RSNN_NN_RSP_SIZE 16
+
+#define GFPN_ID_CMD 0x11C
+#define GFPN_ID_REQ_SIZE (16 + 4)
+#define GFPN_ID_RSP_SIZE (16 + 8)
+
+#define GPSC_CMD 0x127
+#define GPSC_REQ_SIZE (16 + 8)
+#define GPSC_RSP_SIZE (16 + 2 + 2)
+
+#define GFF_ID_CMD 0x011F
+#define GFF_ID_REQ_SIZE (16 + 4)
+#define GFF_ID_RSP_SIZE (16 + 128)
+
+/*
+ * HBA attribute types.
+ */
+#define FDMI_HBA_ATTR_COUNT 9
+#define FDMIV2_HBA_ATTR_COUNT 17
+#define FDMI_HBA_NODE_NAME 0x1
+#define FDMI_HBA_MANUFACTURER 0x2
+#define FDMI_HBA_SERIAL_NUMBER 0x3
+#define FDMI_HBA_MODEL 0x4
+#define FDMI_HBA_MODEL_DESCRIPTION 0x5
+#define FDMI_HBA_HARDWARE_VERSION 0x6
+#define FDMI_HBA_DRIVER_VERSION 0x7
+#define FDMI_HBA_OPTION_ROM_VERSION 0x8
+#define FDMI_HBA_FIRMWARE_VERSION 0x9
+#define FDMI_HBA_OS_NAME_AND_VERSION 0xa
+#define FDMI_HBA_MAXIMUM_CT_PAYLOAD_LENGTH 0xb
+#define FDMI_HBA_NODE_SYMBOLIC_NAME 0xc
+#define FDMI_HBA_VENDOR_ID 0xd
+#define FDMI_HBA_NUM_PORTS 0xe
+#define FDMI_HBA_FABRIC_NAME 0xf
+#define FDMI_HBA_BOOT_BIOS_NAME 0x10
+#define FDMI_HBA_TYPE_VENDOR_IDENTIFIER 0xe0
+
+struct ct_fdmi_hba_attr {
+ uint16_t type;
+ uint16_t len;
+ union {
+ uint8_t node_name[WWN_SIZE];
+ uint8_t manufacturer[64];
+ uint8_t serial_num[32];
+ uint8_t model[16+1];
+ uint8_t model_desc[80];
+ uint8_t hw_version[32];
+ uint8_t driver_version[32];
+ uint8_t orom_version[16];
+ uint8_t fw_version[32];
+ uint8_t os_version[128];
+ uint32_t max_ct_len;
+ } a;
+};
+
+struct ct_fdmi_hba_attributes {
+ uint32_t count;
+ struct ct_fdmi_hba_attr entry[FDMI_HBA_ATTR_COUNT];
+};
+
+struct ct_fdmiv2_hba_attr {
+ uint16_t type;
+ uint16_t len;
+ union {
+ uint8_t node_name[WWN_SIZE];
+ uint8_t manufacturer[64];
+ uint8_t serial_num[32];
+ uint8_t model[16+1];
+ uint8_t model_desc[80];
+ uint8_t hw_version[16];
+ uint8_t driver_version[32];
+ uint8_t orom_version[16];
+ uint8_t fw_version[32];
+ uint8_t os_version[128];
+ uint32_t max_ct_len;
+ uint8_t sym_name[256];
+ uint32_t vendor_id;
+ uint32_t num_ports;
+ uint8_t fabric_name[WWN_SIZE];
+ uint8_t bios_name[32];
+ uint8_t vendor_indentifer[8];
+ } a;
+};
+
+struct ct_fdmiv2_hba_attributes {
+ uint32_t count;
+ struct ct_fdmiv2_hba_attr entry[FDMIV2_HBA_ATTR_COUNT];
+};
+
+/*
+ * Port attribute types.
+ */
+#define FDMI_PORT_ATTR_COUNT 6
+#define FDMIV2_PORT_ATTR_COUNT 16
+#define FDMI_PORT_FC4_TYPES 0x1
+#define FDMI_PORT_SUPPORT_SPEED 0x2
+#define FDMI_PORT_CURRENT_SPEED 0x3
+#define FDMI_PORT_MAX_FRAME_SIZE 0x4
+#define FDMI_PORT_OS_DEVICE_NAME 0x5
+#define FDMI_PORT_HOST_NAME 0x6
+#define FDMI_PORT_NODE_NAME 0x7
+#define FDMI_PORT_NAME 0x8
+#define FDMI_PORT_SYM_NAME 0x9
+#define FDMI_PORT_TYPE 0xa
+#define FDMI_PORT_SUPP_COS 0xb
+#define FDMI_PORT_FABRIC_NAME 0xc
+#define FDMI_PORT_FC4_TYPE 0xd
+#define FDMI_PORT_STATE 0x101
+#define FDMI_PORT_COUNT 0x102
+#define FDMI_PORT_ID 0x103
+
+#define FDMI_PORT_SPEED_1GB 0x1
+#define FDMI_PORT_SPEED_2GB 0x2
+#define FDMI_PORT_SPEED_10GB 0x4
+#define FDMI_PORT_SPEED_4GB 0x8
+#define FDMI_PORT_SPEED_8GB 0x10
+#define FDMI_PORT_SPEED_16GB 0x20
+#define FDMI_PORT_SPEED_32GB 0x40
+#define FDMI_PORT_SPEED_UNKNOWN 0x8000
+
+#define FC_CLASS_2 0x04
+#define FC_CLASS_3 0x08
+#define FC_CLASS_2_3 0x0C
+
+struct ct_fdmiv2_port_attr {
+ uint16_t type;
+ uint16_t len;
+ union {
+ uint8_t fc4_types[32];
+ uint32_t sup_speed;
+ uint32_t cur_speed;
+ uint32_t max_frame_size;
+ uint8_t os_dev_name[32];
+ uint8_t host_name[256];
+ uint8_t node_name[WWN_SIZE];
+ uint8_t port_name[WWN_SIZE];
+ uint8_t port_sym_name[128];
+ uint32_t port_type;
+ uint32_t port_supported_cos;
+ uint8_t fabric_name[WWN_SIZE];
+ uint8_t port_fc4_type[32];
+ uint32_t port_state;
+ uint32_t num_ports;
+ uint32_t port_id;
+ } a;
+};
+
+/*
+ * Port Attribute Block.
+ */
+struct ct_fdmiv2_port_attributes {
+ uint32_t count;
+ struct ct_fdmiv2_port_attr entry[FDMIV2_PORT_ATTR_COUNT];
+};
+
+struct ct_fdmi_port_attr {
+ uint16_t type;
+ uint16_t len;
+ union {
+ uint8_t fc4_types[32];
+ uint32_t sup_speed;
+ uint32_t cur_speed;
+ uint32_t max_frame_size;
+ uint8_t os_dev_name[32];
+ uint8_t host_name[256];
+ } a;
+};
+
+struct ct_fdmi_port_attributes {
+ uint32_t count;
+ struct ct_fdmi_port_attr entry[FDMI_PORT_ATTR_COUNT];
+};
+
+/* FDMI definitions. */
+#define GRHL_CMD 0x100
+#define GHAT_CMD 0x101
+#define GRPL_CMD 0x102
+#define GPAT_CMD 0x110
+
+#define RHBA_CMD 0x200
+#define RHBA_RSP_SIZE 16
+
+#define RHAT_CMD 0x201
+#define RPRT_CMD 0x210
+
+#define RPA_CMD 0x211
+#define RPA_RSP_SIZE 16
+
+#define DHBA_CMD 0x300
+#define DHBA_REQ_SIZE (16 + 8)
+#define DHBA_RSP_SIZE 16
+
+#define DHAT_CMD 0x301
+#define DPRT_CMD 0x310
+#define DPA_CMD 0x311
+
+/* CT command header -- request/response common fields */
+struct ct_cmd_hdr {
+ uint8_t revision;
+ uint8_t in_id[3];
+ uint8_t gs_type;
+ uint8_t gs_subtype;
+ uint8_t options;
+ uint8_t reserved;
+};
+
+/* CT command request */
+struct ct_sns_req {
+ struct ct_cmd_hdr header;
+ uint16_t command;
+ uint16_t max_rsp_size;
+ uint8_t fragment_id;
+ uint8_t reserved[3];
+
+ union {
+ /* GA_NXT, GPN_ID, GNN_ID, GFT_ID, GFPN_ID */
+ struct {
+ uint8_t reserved;
+ uint8_t port_id[3];
+ } port_id;
+
+ struct {
+ uint8_t port_type;
+ uint8_t domain;
+ uint8_t area;
+ uint8_t reserved;
+ } gid_pt;
+
+ struct {
+ uint8_t reserved;
+ uint8_t port_id[3];
+ uint8_t fc4_types[32];
+ } rft_id;
+
+ struct {
+ uint8_t reserved;
+ uint8_t port_id[3];
+ uint16_t reserved2;
+ uint8_t fc4_feature;
+ uint8_t fc4_type;
+ } rff_id;
+
+ struct {
+ uint8_t reserved;
+ uint8_t port_id[3];
+ uint8_t node_name[8];
+ } rnn_id;
+
+ struct {
+ uint8_t node_name[8];
+ uint8_t name_len;
+ uint8_t sym_node_name[255];
+ } rsnn_nn;
+
+ struct {
+ uint8_t hba_indentifier[8];
+ } ghat;
+
+ struct {
+ uint8_t hba_identifier[8];
+ uint32_t entry_count;
+ uint8_t port_name[8];
+ struct ct_fdmi_hba_attributes attrs;
+ } rhba;
+
+ struct {
+ uint8_t hba_identifier[8];
+ uint32_t entry_count;
+ uint8_t port_name[8];
+ struct ct_fdmiv2_hba_attributes attrs;
+ } rhba2;
+
+ struct {
+ uint8_t hba_identifier[8];
+ struct ct_fdmi_hba_attributes attrs;
+ } rhat;
+
+ struct {
+ uint8_t port_name[8];
+ struct ct_fdmi_port_attributes attrs;
+ } rpa;
+
+ struct {
+ uint8_t port_name[8];
+ struct ct_fdmiv2_port_attributes attrs;
+ } rpa2;
+
+ struct {
+ uint8_t port_name[8];
+ } dhba;
+
+ struct {
+ uint8_t port_name[8];
+ } dhat;
+
+ struct {
+ uint8_t port_name[8];
+ } dprt;
+
+ struct {
+ uint8_t port_name[8];
+ } dpa;
+
+ struct {
+ uint8_t port_name[8];
+ } gpsc;
+
+ struct {
+ uint8_t reserved;
+ uint8_t port_name[3];
+ } gff_id;
+ } req;
+};
+
+/* CT command response header */
+struct ct_rsp_hdr {
+ struct ct_cmd_hdr header;
+ uint16_t response;
+ uint16_t residual;
+ uint8_t fragment_id;
+ uint8_t reason_code;
+ uint8_t explanation_code;
+ uint8_t vendor_unique;
+};
+
+struct ct_sns_gid_pt_data {
+ uint8_t control_byte;
+ uint8_t port_id[3];
+};
+
+struct ct_sns_rsp {
+ struct ct_rsp_hdr header;
+
+ union {
+ struct {
+ uint8_t port_type;
+ uint8_t port_id[3];
+ uint8_t port_name[8];
+ uint8_t sym_port_name_len;
+ uint8_t sym_port_name[255];
+ uint8_t node_name[8];
+ uint8_t sym_node_name_len;
+ uint8_t sym_node_name[255];
+ uint8_t init_proc_assoc[8];
+ uint8_t node_ip_addr[16];
+ uint8_t class_of_service[4];
+ uint8_t fc4_types[32];
+ uint8_t ip_address[16];
+ uint8_t fabric_port_name[8];
+ uint8_t reserved;
+ uint8_t hard_address[3];
+ } ga_nxt;
+
+ struct {
+ /* Assume the largest number of targets for the union */
+ struct ct_sns_gid_pt_data
+ entries[MAX_FIBRE_DEVICES_MAX];
+ } gid_pt;
+
+ struct {
+ uint8_t port_name[8];
+ } gpn_id;
+
+ struct {
+ uint8_t node_name[8];
+ } gnn_id;
+
+ struct {
+ uint8_t fc4_types[32];
+ } gft_id;
+
+ struct {
+ uint32_t entry_count;
+ uint8_t port_name[8];
+ struct ct_fdmi_hba_attributes attrs;
+ } ghat;
+
+ struct {
+ uint8_t port_name[8];
+ } gfpn_id;
+
+ struct {
+ uint16_t speeds;
+ uint16_t speed;
+ } gpsc;
+
+#define GFF_FCP_SCSI_OFFSET 7
+ struct {
+ uint8_t fc4_features[128];
+ } gff_id;
+ } rsp;
+};
+
+struct ct_sns_pkt {
+ union {
+ struct ct_sns_req req;
+ struct ct_sns_rsp rsp;
+ } p;
+};
+
+/*
+ * SNS command structures -- for 2200 compatibility.
+ */
+#define RFT_ID_SNS_SCMD_LEN 22
+#define RFT_ID_SNS_CMD_SIZE 60
+#define RFT_ID_SNS_DATA_SIZE 16
+
+#define RNN_ID_SNS_SCMD_LEN 10
+#define RNN_ID_SNS_CMD_SIZE 36
+#define RNN_ID_SNS_DATA_SIZE 16
+
+#define GA_NXT_SNS_SCMD_LEN 6
+#define GA_NXT_SNS_CMD_SIZE 28
+#define GA_NXT_SNS_DATA_SIZE (620 + 16)
+
+#define GID_PT_SNS_SCMD_LEN 6
+#define GID_PT_SNS_CMD_SIZE 28
+/*
+ * Assume MAX_FIBRE_DEVICES_2100 as these defines are only used with older
+ * adapters.
+ */
+#define GID_PT_SNS_DATA_SIZE (MAX_FIBRE_DEVICES_2100 * 4 + 16)
+
+#define GPN_ID_SNS_SCMD_LEN 6
+#define GPN_ID_SNS_CMD_SIZE 28
+#define GPN_ID_SNS_DATA_SIZE (8 + 16)
+
+#define GNN_ID_SNS_SCMD_LEN 6
+#define GNN_ID_SNS_CMD_SIZE 28
+#define GNN_ID_SNS_DATA_SIZE (8 + 16)
+
+struct sns_cmd_pkt {
+ union {
+ struct {
+ uint16_t buffer_length;
+ uint16_t reserved_1;
+ uint32_t buffer_address[2];
+ uint16_t subcommand_length;
+ uint16_t reserved_2;
+ uint16_t subcommand;
+ uint16_t size;
+ uint32_t reserved_3;
+ uint8_t param[36];
+ } cmd;
+
+ uint8_t rft_data[RFT_ID_SNS_DATA_SIZE];
+ uint8_t rnn_data[RNN_ID_SNS_DATA_SIZE];
+ uint8_t gan_data[GA_NXT_SNS_DATA_SIZE];
+ uint8_t gid_data[GID_PT_SNS_DATA_SIZE];
+ uint8_t gpn_data[GPN_ID_SNS_DATA_SIZE];
+ uint8_t gnn_data[GNN_ID_SNS_DATA_SIZE];
+ } p;
+};
+
+struct fw_blob {
+ char *name;
+ uint32_t segs[4];
+ const struct firmware *fw;
+};
+
+/* Return data from MBC_GET_ID_LIST call. */
+struct gid_list_info {
+ uint8_t al_pa;
+ uint8_t area;
+ uint8_t domain;
+ uint8_t loop_id_2100; /* ISP2100/ISP2200 -- 4 bytes. */
+ uint16_t loop_id; /* ISP23XX -- 6 bytes. */
+ uint16_t reserved_1; /* ISP24XX -- 8 bytes. */
+};
+
+/* NPIV */
+typedef struct vport_info {
+ uint8_t port_name[WWN_SIZE];
+ uint8_t node_name[WWN_SIZE];
+ int vp_id;
+ uint16_t loop_id;
+ unsigned long host_no;
+ uint8_t port_id[3];
+ int loop_state;
+} vport_info_t;
+
+typedef struct vport_params {
+ uint8_t port_name[WWN_SIZE];
+ uint8_t node_name[WWN_SIZE];
+ uint32_t options;
+#define VP_OPTS_RETRY_ENABLE BIT_0
+#define VP_OPTS_VP_DISABLE BIT_1
+} vport_params_t;
+
+/* NPIV - return codes of VP create and modify */
+#define VP_RET_CODE_OK 0
+#define VP_RET_CODE_FATAL 1
+#define VP_RET_CODE_WRONG_ID 2
+#define VP_RET_CODE_WWPN 3
+#define VP_RET_CODE_RESOURCES 4
+#define VP_RET_CODE_NO_MEM 5
+#define VP_RET_CODE_NOT_FOUND 6
+
+struct qla_hw_data;
+struct rsp_que;
+/*
+ * ISP operations
+ */
+struct isp_operations {
+
+ int (*pci_config) (struct scsi_qla_host *);
+ void (*reset_chip) (struct scsi_qla_host *);
+ int (*chip_diag) (struct scsi_qla_host *);
+ void (*config_rings) (struct scsi_qla_host *);
+ void (*reset_adapter) (struct scsi_qla_host *);
+ int (*nvram_config) (struct scsi_qla_host *);
+ void (*update_fw_options) (struct scsi_qla_host *);
+ int (*load_risc) (struct scsi_qla_host *, uint32_t *);
+
+ char * (*pci_info_str) (struct scsi_qla_host *, char *);
+ char * (*fw_version_str)(struct scsi_qla_host *, char *, size_t);
+
+ irq_handler_t intr_handler;
+ void (*enable_intrs) (struct qla_hw_data *);
+ void (*disable_intrs) (struct qla_hw_data *);
+
+ int (*abort_command) (srb_t *);
+ int (*target_reset) (struct fc_port *, uint64_t, int);
+ int (*lun_reset) (struct fc_port *, uint64_t, int);
+ int (*fabric_login) (struct scsi_qla_host *, uint16_t, uint8_t,
+ uint8_t, uint8_t, uint16_t *, uint8_t);
+ int (*fabric_logout) (struct scsi_qla_host *, uint16_t, uint8_t,
+ uint8_t, uint8_t);
+
+ uint16_t (*calc_req_entries) (uint16_t);
+ void (*build_iocbs) (srb_t *, cmd_entry_t *, uint16_t);
+ void * (*prep_ms_iocb) (struct scsi_qla_host *, uint32_t, uint32_t);
+ void * (*prep_ms_fdmi_iocb) (struct scsi_qla_host *, uint32_t,
+ uint32_t);
+
+ uint8_t * (*read_nvram) (struct scsi_qla_host *, uint8_t *,
+ uint32_t, uint32_t);
+ int (*write_nvram) (struct scsi_qla_host *, uint8_t *, uint32_t,
+ uint32_t);
+
+ void (*fw_dump) (struct scsi_qla_host *, int);
+
+ int (*beacon_on) (struct scsi_qla_host *);
+ int (*beacon_off) (struct scsi_qla_host *);
+ void (*beacon_blink) (struct scsi_qla_host *);
+
+ uint8_t * (*read_optrom) (struct scsi_qla_host *, uint8_t *,
+ uint32_t, uint32_t);
+ int (*write_optrom) (struct scsi_qla_host *, uint8_t *, uint32_t,
+ uint32_t);
+
+ int (*get_flash_version) (struct scsi_qla_host *, void *);
+ int (*start_scsi) (srb_t *);
+ int (*abort_isp) (struct scsi_qla_host *);
+ int (*iospace_config)(struct qla_hw_data*);
+ int (*initialize_adapter)(struct scsi_qla_host *);
+};
+
+/* MSI-X Support *************************************************************/
+
+#define QLA_MSIX_CHIP_REV_24XX 3
+#define QLA_MSIX_FW_MODE(m) (((m) & (BIT_7|BIT_8|BIT_9)) >> 7)
+#define QLA_MSIX_FW_MODE_1(m) (QLA_MSIX_FW_MODE(m) == 1)
+
+#define QLA_MSIX_DEFAULT 0x00
+#define QLA_MSIX_RSP_Q 0x01
+
+#define QLA_MIDX_DEFAULT 0
+#define QLA_MIDX_RSP_Q 1
+#define QLA_PCI_MSIX_CONTROL 0xa2
+#define QLA_83XX_PCI_MSIX_CONTROL 0x92
+
+struct scsi_qla_host;
+
+struct qla_msix_entry {
+ int have_irq;
+ uint32_t vector;
+ uint16_t entry;
+ struct rsp_que *rsp;
+};
+
+#define WATCH_INTERVAL 1 /* number of seconds */
+
+/* Work events. */
+enum qla_work_type {
+ QLA_EVT_AEN,
+ QLA_EVT_IDC_ACK,
+ QLA_EVT_ASYNC_LOGIN,
+ QLA_EVT_ASYNC_LOGIN_DONE,
+ QLA_EVT_ASYNC_LOGOUT,
+ QLA_EVT_ASYNC_LOGOUT_DONE,
+ QLA_EVT_ASYNC_ADISC,
+ QLA_EVT_ASYNC_ADISC_DONE,
+ QLA_EVT_UEVENT,
+ QLA_EVT_AENFX,
+};
+
+
+struct qla_work_evt {
+ struct list_head list;
+ enum qla_work_type type;
+ u32 flags;
+#define QLA_EVT_FLAG_FREE 0x1
+
+ union {
+ struct {
+ enum fc_host_event_code code;
+ u32 data;
+ } aen;
+ struct {
+#define QLA_IDC_ACK_REGS 7
+ uint16_t mb[QLA_IDC_ACK_REGS];
+ } idc_ack;
+ struct {
+ struct fc_port *fcport;
+#define QLA_LOGIO_LOGIN_RETRIED BIT_0
+ u16 data[2];
+ } logio;
+ struct {
+ u32 code;
+#define QLA_UEVENT_CODE_FW_DUMP 0
+ } uevent;
+ struct {
+ uint32_t evtcode;
+ uint32_t mbx[8];
+ uint32_t count;
+ } aenfx;
+ struct {
+ srb_t *sp;
+ } iosb;
+ } u;
+};
+
+struct qla_chip_state_84xx {
+ struct list_head list;
+ struct kref kref;
+
+ void *bus;
+ spinlock_t access_lock;
+ struct mutex fw_update_mutex;
+ uint32_t fw_update;
+ uint32_t op_fw_version;
+ uint32_t op_fw_size;
+ uint32_t op_fw_seq_size;
+ uint32_t diag_fw_version;
+ uint32_t gold_fw_version;
+};
+
+struct qla_statistics {
+ uint32_t total_isp_aborts;
+ uint64_t input_bytes;
+ uint64_t output_bytes;
+ uint64_t input_requests;
+ uint64_t output_requests;
+ uint32_t control_requests;
+
+ uint64_t jiffies_at_last_reset;
+ uint32_t stat_max_pend_cmds;
+ uint32_t stat_max_qfull_cmds_alloc;
+ uint32_t stat_max_qfull_cmds_dropped;
+};
+
+struct bidi_statistics {
+ unsigned long long io_count;
+ unsigned long long transfer_bytes;
+};
+
+/* Multi queue support */
+#define MBC_INITIALIZE_MULTIQ 0x1f
+#define QLA_QUE_PAGE 0X1000
+#define QLA_MQ_SIZE 32
+#define QLA_MAX_QUEUES 256
+#define ISP_QUE_REG(ha, id) \
+ ((ha->mqenable || IS_QLA83XX(ha) || IS_QLA27XX(ha)) ? \
+ ((void __iomem *)ha->mqiobase + (QLA_QUE_PAGE * id)) :\
+ ((void __iomem *)ha->iobase))
+#define QLA_REQ_QUE_ID(tag) \
+ ((tag < QLA_MAX_QUEUES && tag > 0) ? tag : 0)
+#define QLA_DEFAULT_QUE_QOS 5
+#define QLA_PRECONFIG_VPORTS 32
+#define QLA_MAX_VPORTS_QLA24XX 128
+#define QLA_MAX_VPORTS_QLA25XX 256
+/* Response queue data structure */
+struct rsp_que {
+ dma_addr_t dma;
+ response_t *ring;
+ response_t *ring_ptr;
+ uint32_t __iomem *rsp_q_in; /* FWI2-capable only. */
+ uint32_t __iomem *rsp_q_out;
+ uint16_t ring_index;
+ uint16_t out_ptr;
+ uint16_t *in_ptr; /* queue shadow in index */
+ uint16_t length;
+ uint16_t options;
+ uint16_t rid;
+ uint16_t id;
+ uint16_t vp_idx;
+ struct qla_hw_data *hw;
+ struct qla_msix_entry *msix;
+ struct req_que *req;
+ srb_t *status_srb; /* status continuation entry */
+ struct work_struct q_work;
+
+ dma_addr_t dma_fx00;
+ response_t *ring_fx00;
+ uint16_t length_fx00;
+ uint8_t rsp_pkt[REQUEST_ENTRY_SIZE];
+};
+
+/* Request queue data structure */
+struct req_que {
+ dma_addr_t dma;
+ request_t *ring;
+ request_t *ring_ptr;
+ uint32_t __iomem *req_q_in; /* FWI2-capable only. */
+ uint32_t __iomem *req_q_out;
+ uint16_t ring_index;
+ uint16_t in_ptr;
+ uint16_t *out_ptr; /* queue shadow out index */
+ uint16_t cnt;
+ uint16_t length;
+ uint16_t options;
+ uint16_t rid;
+ uint16_t id;
+ uint16_t qos;
+ uint16_t vp_idx;
+ struct rsp_que *rsp;
+ srb_t **outstanding_cmds;
+ uint32_t current_outstanding_cmd;
+ uint16_t num_outstanding_cmds;
+ int max_q_depth;
+
+ dma_addr_t dma_fx00;
+ request_t *ring_fx00;
+ uint16_t length_fx00;
+ uint8_t req_pkt[REQUEST_ENTRY_SIZE];
+};
+
+/* Place holder for FW buffer parameters */
+struct qlfc_fw {
+ void *fw_buf;
+ dma_addr_t fw_dma;
+ uint32_t len;
+};
+
+struct scsi_qlt_host {
+ void *target_lport_ptr;
+ struct mutex tgt_mutex;
+ struct mutex tgt_host_action_mutex;
+ struct qla_tgt *qla_tgt;
+};
+
+struct qlt_hw_data {
+ /* Protected by hw lock */
+ uint32_t enable_class_2:1;
+ uint32_t enable_explicit_conf:1;
+ uint32_t ini_mode_force_reverse:1;
+ uint32_t node_name_set:1;
+
+ dma_addr_t atio_dma; /* Physical address. */
+ struct atio *atio_ring; /* Base virtual address */
+ struct atio *atio_ring_ptr; /* Current address. */
+ uint16_t atio_ring_index; /* Current index. */
+ uint16_t atio_q_length;
+ uint32_t __iomem *atio_q_in;
+ uint32_t __iomem *atio_q_out;
+
+ struct qla_tgt_func_tmpl *tgt_ops;
+ struct qla_tgt_cmd *cmds[DEFAULT_OUTSTANDING_COMMANDS];
+ uint16_t current_handle;
+
+ struct qla_tgt_vp_map *tgt_vp_map;
+
+ int saved_set;
+ uint16_t saved_exchange_count;
+ uint32_t saved_firmware_options_1;
+ uint32_t saved_firmware_options_2;
+ uint32_t saved_firmware_options_3;
+ uint8_t saved_firmware_options[2];
+ uint8_t saved_add_firmware_options[2];
+
+ uint8_t tgt_node_name[WWN_SIZE];
+
+ struct list_head q_full_list;
+ uint32_t num_pend_cmds;
+ uint32_t num_qfull_cmds_alloc;
+ uint32_t num_qfull_cmds_dropped;
+ spinlock_t q_full_lock;
+ uint32_t leak_exchg_thresh_hold;
+};
+
+#define MAX_QFULL_CMDS_ALLOC 8192
+#define Q_FULL_THRESH_HOLD_PERCENT 90
+#define Q_FULL_THRESH_HOLD(ha) \
+ ((ha->fw_xcb_count/100) * Q_FULL_THRESH_HOLD_PERCENT)
+
+#define LEAK_EXCHG_THRESH_HOLD_PERCENT 75 /* 75 percent */
+
+/*
+ * Qlogic host adapter specific data structure.
+*/
+struct qla_hw_data {
+ struct pci_dev *pdev;
+ /* SRB cache. */
+#define SRB_MIN_REQ 128
+ mempool_t *srb_mempool;
+
+ volatile struct {
+ uint32_t mbox_int :1;
+ uint32_t mbox_busy :1;
+ uint32_t disable_risc_code_load :1;
+ uint32_t enable_64bit_addressing :1;
+ uint32_t enable_lip_reset :1;
+ uint32_t enable_target_reset :1;
+ uint32_t enable_lip_full_login :1;
+ uint32_t enable_led_scheme :1;
+
+ uint32_t msi_enabled :1;
+ uint32_t msix_enabled :1;
+ uint32_t disable_serdes :1;
+ uint32_t gpsc_supported :1;
+ uint32_t npiv_supported :1;
+ uint32_t pci_channel_io_perm_failure :1;
+ uint32_t fce_enabled :1;
+ uint32_t fac_supported :1;
+
+ uint32_t chip_reset_done :1;
+ uint32_t running_gold_fw :1;
+ uint32_t eeh_busy :1;
+ uint32_t cpu_affinity_enabled :1;
+ uint32_t disable_msix_handshake :1;
+ uint32_t fcp_prio_enabled :1;
+ uint32_t isp82xx_fw_hung:1;
+ uint32_t nic_core_hung:1;
+
+ uint32_t quiesce_owner:1;
+ uint32_t nic_core_reset_hdlr_active:1;
+ uint32_t nic_core_reset_owner:1;
+ uint32_t isp82xx_no_md_cap:1;
+ uint32_t host_shutting_down:1;
+ uint32_t idc_compl_status:1;
+
+ uint32_t mr_reset_hdlr_active:1;
+ uint32_t mr_intr_valid:1;
+ uint32_t fawwpn_enabled:1;
+ /* 35 bits */
+ } flags;
+
+ /* This spinlock is used to protect "io transactions", you must
+ * acquire it before doing any IO to the card, eg with RD_REG*() and
+ * WRT_REG*() for the duration of your entire commandtransaction.
+ *
+ * This spinlock is of lower priority than the io request lock.
+ */
+
+ spinlock_t hardware_lock ____cacheline_aligned;
+ int bars;
+ int mem_only;
+ device_reg_t *iobase; /* Base I/O address */
+ resource_size_t pio_address;
+
+#define MIN_IOBASE_LEN 0x100
+ dma_addr_t bar0_hdl;
+
+ void __iomem *cregbase;
+ dma_addr_t bar2_hdl;
+#define BAR0_LEN_FX00 (1024 * 1024)
+#define BAR2_LEN_FX00 (128 * 1024)
+
+ uint32_t rqstq_intr_code;
+ uint32_t mbx_intr_code;
+ uint32_t req_que_len;
+ uint32_t rsp_que_len;
+ uint32_t req_que_off;
+ uint32_t rsp_que_off;
+
+ /* Multi queue data structs */
+ device_reg_t *mqiobase;
+ device_reg_t *msixbase;
+ uint16_t msix_count;
+ uint8_t mqenable;
+ struct req_que **req_q_map;
+ struct rsp_que **rsp_q_map;
+ unsigned long req_qid_map[(QLA_MAX_QUEUES / 8) / sizeof(unsigned long)];
+ unsigned long rsp_qid_map[(QLA_MAX_QUEUES / 8) / sizeof(unsigned long)];
+ uint8_t max_req_queues;
+ uint8_t max_rsp_queues;
+ struct qla_npiv_entry *npiv_info;
+ uint16_t nvram_npiv_size;
+
+ uint16_t switch_cap;
+#define FLOGI_SEQ_DEL BIT_8
+#define FLOGI_MID_SUPPORT BIT_10
+#define FLOGI_VSAN_SUPPORT BIT_12
+#define FLOGI_SP_SUPPORT BIT_13
+
+ uint8_t port_no; /* Physical port of adapter */
+
+ /* Timeout timers. */
+ uint8_t loop_down_abort_time; /* port down timer */
+ atomic_t loop_down_timer; /* loop down timer */
+ uint8_t link_down_timeout; /* link down timeout */
+ uint16_t max_loop_id;
+ uint16_t max_fibre_devices; /* Maximum number of targets */
+
+ uint16_t fb_rev;
+ uint16_t min_external_loopid; /* First external loop Id */
+
+#define PORT_SPEED_UNKNOWN 0xFFFF
+#define PORT_SPEED_1GB 0x00
+#define PORT_SPEED_2GB 0x01
+#define PORT_SPEED_4GB 0x03
+#define PORT_SPEED_8GB 0x04
+#define PORT_SPEED_16GB 0x05
+#define PORT_SPEED_32GB 0x06
+#define PORT_SPEED_10GB 0x13
+ uint16_t link_data_rate; /* F/W operating speed */
+
+ uint8_t current_topology;
+ uint8_t prev_topology;
+#define ISP_CFG_NL 1
+#define ISP_CFG_N 2
+#define ISP_CFG_FL 4
+#define ISP_CFG_F 8
+
+ uint8_t operating_mode; /* F/W operating mode */
+#define LOOP 0
+#define P2P 1
+#define LOOP_P2P 2
+#define P2P_LOOP 3
+ uint8_t interrupts_on;
+ uint32_t isp_abort_cnt;
+
+#define PCI_DEVICE_ID_QLOGIC_ISP2532 0x2532
+#define PCI_DEVICE_ID_QLOGIC_ISP8432 0x8432
+#define PCI_DEVICE_ID_QLOGIC_ISP8001 0x8001
+#define PCI_DEVICE_ID_QLOGIC_ISP8031 0x8031
+#define PCI_DEVICE_ID_QLOGIC_ISP2031 0x2031
+#define PCI_DEVICE_ID_QLOGIC_ISP2071 0x2071
+#define PCI_DEVICE_ID_QLOGIC_ISP2271 0x2271
+
+ uint32_t device_type;
+#define DT_ISP2100 BIT_0
+#define DT_ISP2200 BIT_1
+#define DT_ISP2300 BIT_2
+#define DT_ISP2312 BIT_3
+#define DT_ISP2322 BIT_4
+#define DT_ISP6312 BIT_5
+#define DT_ISP6322 BIT_6
+#define DT_ISP2422 BIT_7
+#define DT_ISP2432 BIT_8
+#define DT_ISP5422 BIT_9
+#define DT_ISP5432 BIT_10
+#define DT_ISP2532 BIT_11
+#define DT_ISP8432 BIT_12
+#define DT_ISP8001 BIT_13
+#define DT_ISP8021 BIT_14
+#define DT_ISP2031 BIT_15
+#define DT_ISP8031 BIT_16
+#define DT_ISPFX00 BIT_17
+#define DT_ISP8044 BIT_18
+#define DT_ISP2071 BIT_19
+#define DT_ISP2271 BIT_20
+#define DT_ISP_LAST (DT_ISP2271 << 1)
+
+#define DT_T10_PI BIT_25
+#define DT_IIDMA BIT_26
+#define DT_FWI2 BIT_27
+#define DT_ZIO_SUPPORTED BIT_28
+#define DT_OEM_001 BIT_29
+#define DT_ISP2200A BIT_30
+#define DT_EXTENDED_IDS BIT_31
+#define DT_MASK(ha) ((ha)->device_type & (DT_ISP_LAST - 1))
+#define IS_QLA2100(ha) (DT_MASK(ha) & DT_ISP2100)
+#define IS_QLA2200(ha) (DT_MASK(ha) & DT_ISP2200)
+#define IS_QLA2300(ha) (DT_MASK(ha) & DT_ISP2300)
+#define IS_QLA2312(ha) (DT_MASK(ha) & DT_ISP2312)
+#define IS_QLA2322(ha) (DT_MASK(ha) & DT_ISP2322)
+#define IS_QLA6312(ha) (DT_MASK(ha) & DT_ISP6312)
+#define IS_QLA6322(ha) (DT_MASK(ha) & DT_ISP6322)
+#define IS_QLA2422(ha) (DT_MASK(ha) & DT_ISP2422)
+#define IS_QLA2432(ha) (DT_MASK(ha) & DT_ISP2432)
+#define IS_QLA5422(ha) (DT_MASK(ha) & DT_ISP5422)
+#define IS_QLA5432(ha) (DT_MASK(ha) & DT_ISP5432)
+#define IS_QLA2532(ha) (DT_MASK(ha) & DT_ISP2532)
+#define IS_QLA8432(ha) (DT_MASK(ha) & DT_ISP8432)
+#define IS_QLA8001(ha) (DT_MASK(ha) & DT_ISP8001)
+#define IS_QLA81XX(ha) (IS_QLA8001(ha))
+#define IS_QLA82XX(ha) (DT_MASK(ha) & DT_ISP8021)
+#define IS_QLA8044(ha) (DT_MASK(ha) & DT_ISP8044)
+#define IS_QLA2031(ha) (DT_MASK(ha) & DT_ISP2031)
+#define IS_QLA8031(ha) (DT_MASK(ha) & DT_ISP8031)
+#define IS_QLAFX00(ha) (DT_MASK(ha) & DT_ISPFX00)
+#define IS_QLA2071(ha) (DT_MASK(ha) & DT_ISP2071)
+#define IS_QLA2271(ha) (DT_MASK(ha) & DT_ISP2271)
+
+#define IS_QLA23XX(ha) (IS_QLA2300(ha) || IS_QLA2312(ha) || IS_QLA2322(ha) || \
+ IS_QLA6312(ha) || IS_QLA6322(ha))
+#define IS_QLA24XX(ha) (IS_QLA2422(ha) || IS_QLA2432(ha))
+#define IS_QLA54XX(ha) (IS_QLA5422(ha) || IS_QLA5432(ha))
+#define IS_QLA25XX(ha) (IS_QLA2532(ha))
+#define IS_QLA83XX(ha) (IS_QLA2031(ha) || IS_QLA8031(ha))
+#define IS_QLA84XX(ha) (IS_QLA8432(ha))
+#define IS_QLA27XX(ha) (IS_QLA2071(ha) || IS_QLA2271(ha))
+#define IS_QLA24XX_TYPE(ha) (IS_QLA24XX(ha) || IS_QLA54XX(ha) || \
+ IS_QLA84XX(ha))
+#define IS_CNA_CAPABLE(ha) (IS_QLA81XX(ha) || IS_QLA82XX(ha) || \
+ IS_QLA8031(ha) || IS_QLA8044(ha))
+#define IS_P3P_TYPE(ha) (IS_QLA82XX(ha) || IS_QLA8044(ha))
+#define IS_QLA2XXX_MIDTYPE(ha) (IS_QLA24XX(ha) || IS_QLA84XX(ha) || \
+ IS_QLA25XX(ha) || IS_QLA81XX(ha) || \
+ IS_QLA82XX(ha) || IS_QLA83XX(ha) || \
+ IS_QLA8044(ha) || IS_QLA27XX(ha))
+#define IS_MSIX_NACK_CAPABLE(ha) (IS_QLA81XX(ha) || IS_QLA83XX(ha) || \
+ IS_QLA27XX(ha))
+#define IS_NOPOLLING_TYPE(ha) (IS_QLA81XX(ha) && (ha)->flags.msix_enabled)
+#define IS_FAC_REQUIRED(ha) (IS_QLA81XX(ha) || IS_QLA83XX(ha) || \
+ IS_QLA27XX(ha))
+#define IS_NOCACHE_VPD_TYPE(ha) (IS_QLA81XX(ha) || IS_QLA83XX(ha) || \
+ IS_QLA27XX(ha))
+#define IS_ALOGIO_CAPABLE(ha) (IS_QLA23XX(ha) || IS_FWI2_CAPABLE(ha))
+
+#define IS_T10_PI_CAPABLE(ha) ((ha)->device_type & DT_T10_PI)
+#define IS_IIDMA_CAPABLE(ha) ((ha)->device_type & DT_IIDMA)
+#define IS_FWI2_CAPABLE(ha) ((ha)->device_type & DT_FWI2)
+#define IS_ZIO_SUPPORTED(ha) ((ha)->device_type & DT_ZIO_SUPPORTED)
+#define IS_OEM_001(ha) ((ha)->device_type & DT_OEM_001)
+#define HAS_EXTENDED_IDS(ha) ((ha)->device_type & DT_EXTENDED_IDS)
+#define IS_CT6_SUPPORTED(ha) ((ha)->device_type & DT_CT6_SUPPORTED)
+#define IS_MQUE_CAPABLE(ha) ((ha)->mqenable || IS_QLA83XX(ha) || \
+ IS_QLA27XX(ha))
+#define IS_BIDI_CAPABLE(ha) ((IS_QLA25XX(ha) || IS_QLA2031(ha)))
+/* Bit 21 of fw_attributes decides the MCTP capabilities */
+#define IS_MCTP_CAPABLE(ha) (IS_QLA2031(ha) && \
+ ((ha)->fw_attributes_ext[0] & BIT_0))
+#define IS_PI_UNINIT_CAPABLE(ha) (IS_QLA83XX(ha))
+#define IS_PI_IPGUARD_CAPABLE(ha) (IS_QLA83XX(ha))
+#define IS_PI_DIFB_DIX0_CAPABLE(ha) (0)
+#define IS_PI_SPLIT_DET_CAPABLE_HBA(ha) (IS_QLA83XX(ha))
+#define IS_PI_SPLIT_DET_CAPABLE(ha) (IS_PI_SPLIT_DET_CAPABLE_HBA(ha) && \
+ (((ha)->fw_attributes_h << 16 | (ha)->fw_attributes) & BIT_22))
+#define IS_ATIO_MSIX_CAPABLE(ha) (IS_QLA83XX(ha))
+#define IS_TGT_MODE_CAPABLE(ha) (ha->tgt.atio_q_length)
+#define IS_SHADOW_REG_CAPABLE(ha) (IS_QLA27XX(ha))
+#define IS_DPORT_CAPABLE(ha) (IS_QLA83XX(ha) || IS_QLA27XX(ha))
+
+ /* HBA serial number */
+ uint8_t serial0;
+ uint8_t serial1;
+ uint8_t serial2;
+
+ /* NVRAM configuration data */
+#define MAX_NVRAM_SIZE 4096
+#define VPD_OFFSET MAX_NVRAM_SIZE / 2
+ uint16_t nvram_size;
+ uint16_t nvram_base;
+ void *nvram;
+ uint16_t vpd_size;
+ uint16_t vpd_base;
+ void *vpd;
+
+ uint16_t loop_reset_delay;
+ uint8_t retry_count;
+ uint8_t login_timeout;
+ uint16_t r_a_tov;
+ int port_down_retry_count;
+ uint8_t mbx_count;
+ uint8_t aen_mbx_count;
+
+ uint32_t login_retry_count;
+ /* SNS command interfaces. */
+ ms_iocb_entry_t *ms_iocb;
+ dma_addr_t ms_iocb_dma;
+ struct ct_sns_pkt *ct_sns;
+ dma_addr_t ct_sns_dma;
+ /* SNS command interfaces for 2200. */
+ struct sns_cmd_pkt *sns_cmd;
+ dma_addr_t sns_cmd_dma;
+
+#define SFP_DEV_SIZE 256
+#define SFP_BLOCK_SIZE 64
+ void *sfp_data;
+ dma_addr_t sfp_data_dma;
+
+#define XGMAC_DATA_SIZE 4096
+ void *xgmac_data;
+ dma_addr_t xgmac_data_dma;
+
+#define DCBX_TLV_DATA_SIZE 4096
+ void *dcbx_tlv;
+ dma_addr_t dcbx_tlv_dma;
+
+ struct task_struct *dpc_thread;
+ uint8_t dpc_active; /* DPC routine is active */
+
+ dma_addr_t gid_list_dma;
+ struct gid_list_info *gid_list;
+ int gid_list_info_size;
+
+ /* Small DMA pool allocations -- maximum 256 bytes in length. */
+#define DMA_POOL_SIZE 256
+ struct dma_pool *s_dma_pool;
+
+ dma_addr_t init_cb_dma;
+ init_cb_t *init_cb;
+ int init_cb_size;
+ dma_addr_t ex_init_cb_dma;
+ struct ex_init_cb_81xx *ex_init_cb;
+
+ void *async_pd;
+ dma_addr_t async_pd_dma;
+
+ void *swl;
+
+ /* These are used by mailbox operations. */
+ uint16_t mailbox_out[MAILBOX_REGISTER_COUNT];
+ uint32_t mailbox_out32[MAILBOX_REGISTER_COUNT];
+ uint32_t aenmb[AEN_MAILBOX_REGISTER_COUNT_FX00];
+
+ mbx_cmd_t *mcp;
+ struct mbx_cmd_32 *mcp32;
+
+ unsigned long mbx_cmd_flags;
+#define MBX_INTERRUPT 1
+#define MBX_INTR_WAIT 2
+#define MBX_UPDATE_FLASH_ACTIVE 3
+
+ struct mutex vport_lock; /* Virtual port synchronization */
+ spinlock_t vport_slock; /* order is hardware_lock, then vport_slock */
+ struct completion mbx_cmd_comp; /* Serialize mbx access */
+ struct completion mbx_intr_comp; /* Used for completion notification */
+ struct completion dcbx_comp; /* For set port config notification */
+ struct completion lb_portup_comp; /* Used to wait for link up during
+ * loopback */
+#define DCBX_COMP_TIMEOUT 20
+#define LB_PORTUP_COMP_TIMEOUT 10
+
+ int notify_dcbx_comp;
+ int notify_lb_portup_comp;
+ struct mutex selflogin_lock;
+
+ /* Basic firmware related information. */
+ uint16_t fw_major_version;
+ uint16_t fw_minor_version;
+ uint16_t fw_subminor_version;
+ uint16_t fw_attributes;
+ uint16_t fw_attributes_h;
+ uint16_t fw_attributes_ext[2];
+ uint32_t fw_memory_size;
+ uint32_t fw_transfer_size;
+ uint32_t fw_srisc_address;
+#define RISC_START_ADDRESS_2100 0x1000
+#define RISC_START_ADDRESS_2300 0x800
+#define RISC_START_ADDRESS_2400 0x100000
+ uint16_t fw_xcb_count;
+ uint16_t fw_iocb_count;
+
+ uint32_t fw_shared_ram_start;
+ uint32_t fw_shared_ram_end;
+
+ uint16_t fw_options[16]; /* slots: 1,2,3,10,11 */
+ uint8_t fw_seriallink_options[4];
+ uint16_t fw_seriallink_options24[4];
+
+ uint8_t mpi_version[3];
+ uint32_t mpi_capabilities;
+ uint8_t phy_version[3];
+
+ /* Firmware dump template */
+ void *fw_dump_template;
+ uint32_t fw_dump_template_len;
+ /* Firmware dump information. */
+ struct qla2xxx_fw_dump *fw_dump;
+ uint32_t fw_dump_len;
+ int fw_dumped;
+ unsigned long fw_dump_cap_flags;
+#define RISC_PAUSE_CMPL 0
+#define DMA_SHUTDOWN_CMPL 1
+#define ISP_RESET_CMPL 2
+#define RISC_RDY_AFT_RESET 3
+#define RISC_SRAM_DUMP_CMPL 4
+#define RISC_EXT_MEM_DUMP_CMPL 5
+#define ISP_MBX_RDY 6
+#define ISP_SOFT_RESET_CMPL 7
+ int fw_dump_reading;
+ int prev_minidump_failed;
+ dma_addr_t eft_dma;
+ void *eft;
+/* Current size of mctp dump is 0x086064 bytes */
+#define MCTP_DUMP_SIZE 0x086064
+ dma_addr_t mctp_dump_dma;
+ void *mctp_dump;
+ int mctp_dumped;
+ int mctp_dump_reading;
+ uint32_t chain_offset;
+ struct dentry *dfs_dir;
+ struct dentry *dfs_fce;
+ dma_addr_t fce_dma;
+ void *fce;
+ uint32_t fce_bufs;
+ uint16_t fce_mb[8];
+ uint64_t fce_wr, fce_rd;
+ struct mutex fce_mutex;
+
+ uint32_t pci_attr;
+ uint16_t chip_revision;
+
+ uint16_t product_id[4];
+
+ uint8_t model_number[16+1];
+#define BINZERO "\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0"
+ char model_desc[80];
+ uint8_t adapter_id[16+1];
+
+ /* Option ROM information. */
+ char *optrom_buffer;
+ uint32_t optrom_size;
+ int optrom_state;
+#define QLA_SWAITING 0
+#define QLA_SREADING 1
+#define QLA_SWRITING 2
+ uint32_t optrom_region_start;
+ uint32_t optrom_region_size;
+ struct mutex optrom_mutex;
+
+/* PCI expansion ROM image information. */
+#define ROM_CODE_TYPE_BIOS 0
+#define ROM_CODE_TYPE_FCODE 1
+#define ROM_CODE_TYPE_EFI 3
+ uint8_t bios_revision[2];
+ uint8_t efi_revision[2];
+ uint8_t fcode_revision[16];
+ uint32_t fw_revision[4];
+
+ uint32_t gold_fw_version[4];
+
+ /* Offsets for flash/nvram access (set to ~0 if not used). */
+ uint32_t flash_conf_off;
+ uint32_t flash_data_off;
+ uint32_t nvram_conf_off;
+ uint32_t nvram_data_off;
+
+ uint32_t fdt_wrt_disable;
+ uint32_t fdt_wrt_enable;
+ uint32_t fdt_erase_cmd;
+ uint32_t fdt_block_size;
+ uint32_t fdt_unprotect_sec_cmd;
+ uint32_t fdt_protect_sec_cmd;
+ uint32_t fdt_wrt_sts_reg_cmd;
+
+ uint32_t flt_region_flt;
+ uint32_t flt_region_fdt;
+ uint32_t flt_region_boot;
+ uint32_t flt_region_fw;
+ uint32_t flt_region_vpd_nvram;
+ uint32_t flt_region_vpd;
+ uint32_t flt_region_nvram;
+ uint32_t flt_region_npiv_conf;
+ uint32_t flt_region_gold_fw;
+ uint32_t flt_region_fcp_prio;
+ uint32_t flt_region_bootload;
+
+ /* Needed for BEACON */
+ uint16_t beacon_blink_led;
+ uint8_t beacon_color_state;
+#define QLA_LED_GRN_ON 0x01
+#define QLA_LED_YLW_ON 0x02
+#define QLA_LED_ABR_ON 0x04
+#define QLA_LED_ALL_ON 0x07 /* yellow, green, amber. */
+ /* ISP2322: red, green, amber. */
+ uint16_t zio_mode;
+ uint16_t zio_timer;
+
+ struct qla_msix_entry *msix_entries;
+
+ struct list_head vp_list; /* list of VP */
+ unsigned long vp_idx_map[(MAX_MULTI_ID_FABRIC / 8) /
+ sizeof(unsigned long)];
+ uint16_t num_vhosts; /* number of vports created */
+ uint16_t num_vsans; /* number of vsan created */
+ uint16_t max_npiv_vports; /* 63 or 125 per topoloty */
+ int cur_vport_count;
+
+ struct qla_chip_state_84xx *cs84xx;
+ struct qla_statistics qla_stats;
+ struct isp_operations *isp_ops;
+ struct workqueue_struct *wq;
+ struct qlfc_fw fw_buf;
+
+ /* FCP_CMND priority support */
+ struct qla_fcp_prio_cfg *fcp_prio_cfg;
+
+ struct dma_pool *dl_dma_pool;
+#define DSD_LIST_DMA_POOL_SIZE 512
+
+ struct dma_pool *fcp_cmnd_dma_pool;
+ mempool_t *ctx_mempool;
+#define FCP_CMND_DMA_POOL_SIZE 512
+
+ unsigned long nx_pcibase; /* Base I/O address */
+ uint8_t *nxdb_rd_ptr; /* Doorbell read pointer */
+ unsigned long nxdb_wr_ptr; /* Door bell write pointer */
+
+ uint32_t crb_win;
+ uint32_t curr_window;
+ uint32_t ddr_mn_window;
+ unsigned long mn_win_crb;
+ unsigned long ms_win_crb;
+ int qdr_sn_window;
+ uint32_t fcoe_dev_init_timeout;
+ uint32_t fcoe_reset_timeout;
+ rwlock_t hw_lock;
+ uint16_t portnum; /* port number */
+ int link_width;
+ struct fw_blob *hablob;
+ struct qla82xx_legacy_intr_set nx_legacy_intr;
+
+ uint16_t gbl_dsd_inuse;
+ uint16_t gbl_dsd_avail;
+ struct list_head gbl_dsd_list;
+#define NUM_DSD_CHAIN 4096
+
+ uint8_t fw_type;
+ __le32 file_prd_off; /* File firmware product offset */
+
+ uint32_t md_template_size;
+ void *md_tmplt_hdr;
+ dma_addr_t md_tmplt_hdr_dma;
+ void *md_dump;
+ uint32_t md_dump_size;
+
+ void *loop_id_map;
+
+ /* QLA83XX IDC specific fields */
+ uint32_t idc_audit_ts;
+ uint32_t idc_extend_tmo;
+
+ /* DPC low-priority workqueue */
+ struct workqueue_struct *dpc_lp_wq;
+ struct work_struct idc_aen;
+ /* DPC high-priority workqueue */
+ struct workqueue_struct *dpc_hp_wq;
+ struct work_struct nic_core_reset;
+ struct work_struct idc_state_handler;
+ struct work_struct nic_core_unrecoverable;
+ struct work_struct board_disable;
+
+ struct mr_data_fx00 mr;
+ uint32_t chip_reset;
+
+ struct qlt_hw_data tgt;
+ int allow_cna_fw_dump;
+};
+
+/*
+ * Qlogic scsi host structure
+ */
+typedef struct scsi_qla_host {
+ struct list_head list;
+ struct list_head vp_fcports; /* list of fcports */
+ struct list_head work_list;
+ spinlock_t work_lock;
+
+ /* Commonly used flags and state information. */
+ struct Scsi_Host *host;
+ unsigned long host_no;
+ uint8_t host_str[16];
+
+ volatile struct {
+ uint32_t init_done :1;
+ uint32_t online :1;
+ uint32_t reset_active :1;
+
+ uint32_t management_server_logged_in :1;
+ uint32_t process_response_queue :1;
+ uint32_t difdix_supported:1;
+ uint32_t delete_progress:1;
+
+ uint32_t fw_tgt_reported:1;
+ } flags;
+
+ atomic_t loop_state;
+#define LOOP_TIMEOUT 1
+#define LOOP_DOWN 2
+#define LOOP_UP 3
+#define LOOP_UPDATE 4
+#define LOOP_READY 5
+#define LOOP_DEAD 6
+
+ unsigned long dpc_flags;
+#define RESET_MARKER_NEEDED 0 /* Send marker to ISP. */
+#define RESET_ACTIVE 1
+#define ISP_ABORT_NEEDED 2 /* Initiate ISP abort. */
+#define ABORT_ISP_ACTIVE 3 /* ISP abort in progress. */
+#define LOOP_RESYNC_NEEDED 4 /* Device Resync needed. */
+#define LOOP_RESYNC_ACTIVE 5
+#define LOCAL_LOOP_UPDATE 6 /* Perform a local loop update. */
+#define RSCN_UPDATE 7 /* Perform an RSCN update. */
+#define RELOGIN_NEEDED 8
+#define REGISTER_FC4_NEEDED 9 /* SNS FC4 registration required. */
+#define ISP_ABORT_RETRY 10 /* ISP aborted. */
+#define BEACON_BLINK_NEEDED 11
+#define REGISTER_FDMI_NEEDED 12
+#define FCPORT_UPDATE_NEEDED 13
+#define VP_DPC_NEEDED 14 /* wake up for VP dpc handling */
+#define UNLOADING 15
+#define NPIV_CONFIG_NEEDED 16
+#define ISP_UNRECOVERABLE 17
+#define FCOE_CTX_RESET_NEEDED 18 /* Initiate FCoE context reset */
+#define MPI_RESET_NEEDED 19 /* Initiate MPI FW reset */
+#define ISP_QUIESCE_NEEDED 20 /* Driver need some quiescence */
+#define SCR_PENDING 21 /* SCR in target mode */
+#define PORT_UPDATE_NEEDED 22
+#define FX00_RESET_RECOVERY 23
+#define FX00_TARGET_SCAN 24
+#define FX00_CRITEMP_RECOVERY 25
+#define FX00_HOST_INFO_RESEND 26
+
+ unsigned long pci_flags;
+#define PFLG_DISCONNECTED 0 /* PCI device removed */
+#define PFLG_DRIVER_REMOVING 1 /* PCI driver .remove */
+#define PFLG_DRIVER_PROBING 2 /* PCI driver .probe */
+
+ uint32_t device_flags;
+#define SWITCH_FOUND BIT_0
+#define DFLG_NO_CABLE BIT_1
+#define DFLG_DEV_FAILED BIT_5
+
+ /* ISP configuration data. */
+ uint16_t loop_id; /* Host adapter loop id */
+ uint16_t self_login_loop_id; /* host adapter loop id
+ * get it on self login
+ */
+ fc_port_t bidir_fcport; /* fcport used for bidir cmnds
+ * no need of allocating it for
+ * each command
+ */
+
+ port_id_t d_id; /* Host adapter port id */
+ uint8_t marker_needed;
+ uint16_t mgmt_svr_loop_id;
+
+
+
+ /* Timeout timers. */
+ uint8_t loop_down_abort_time; /* port down timer */
+ atomic_t loop_down_timer; /* loop down timer */
+ uint8_t link_down_timeout; /* link down timeout */
+
+ uint32_t timer_active;
+ struct timer_list timer;
+
+ uint8_t node_name[WWN_SIZE];
+ uint8_t port_name[WWN_SIZE];
+ uint8_t fabric_node_name[WWN_SIZE];
+
+ uint16_t fcoe_vlan_id;
+ uint16_t fcoe_fcf_idx;
+ uint8_t fcoe_vn_port_mac[6];
+
+ uint32_t vp_abort_cnt;
+
+ struct fc_vport *fc_vport; /* holds fc_vport * for each vport */
+ uint16_t vp_idx; /* vport ID */
+
+ unsigned long vp_flags;
+#define VP_IDX_ACQUIRED 0 /* bit no 0 */
+#define VP_CREATE_NEEDED 1
+#define VP_BIND_NEEDED 2
+#define VP_DELETE_NEEDED 3
+#define VP_SCR_NEEDED 4 /* State Change Request registration */
+#define VP_CONFIG_OK 5 /* Flag to cfg VP, if FW is ready */
+ atomic_t vp_state;
+#define VP_OFFLINE 0
+#define VP_ACTIVE 1
+#define VP_FAILED 2
+// #define VP_DISABLE 3
+ uint16_t vp_err_state;
+ uint16_t vp_prev_err_state;
+#define VP_ERR_UNKWN 0
+#define VP_ERR_PORTDWN 1
+#define VP_ERR_FAB_UNSUPPORTED 2
+#define VP_ERR_FAB_NORESOURCES 3
+#define VP_ERR_FAB_LOGOUT 4
+#define VP_ERR_ADAP_NORESOURCES 5
+ struct qla_hw_data *hw;
+ struct scsi_qlt_host vha_tgt;
+ struct req_que *req;
+ int fw_heartbeat_counter;
+ int seconds_since_last_heartbeat;
+ struct fc_host_statistics fc_host_stat;
+ struct qla_statistics qla_stats;
+ struct bidi_statistics bidi_stats;
+
+ atomic_t vref_count;
+ struct qla8044_reset_template reset_tmplt;
+} scsi_qla_host_t;
+
+#define SET_VP_IDX 1
+#define SET_AL_PA 2
+#define RESET_VP_IDX 3
+#define RESET_AL_PA 4
+struct qla_tgt_vp_map {
+ uint8_t idx;
+ scsi_qla_host_t *vha;
+};
+
+/*
+ * Macros to help code, maintain, etc.
+ */
+#define LOOP_TRANSITION(ha) \
+ (test_bit(ISP_ABORT_NEEDED, &ha->dpc_flags) || \
+ test_bit(LOOP_RESYNC_NEEDED, &ha->dpc_flags) || \
+ atomic_read(&ha->loop_state) == LOOP_DOWN)
+
+#define STATE_TRANSITION(ha) \
+ (test_bit(ISP_ABORT_NEEDED, &ha->dpc_flags) || \
+ test_bit(LOOP_RESYNC_NEEDED, &ha->dpc_flags))
+
+#define QLA_VHA_MARK_BUSY(__vha, __bail) do { \
+ atomic_inc(&__vha->vref_count); \
+ mb(); \
+ if (__vha->flags.delete_progress) { \
+ atomic_dec(&__vha->vref_count); \
+ __bail = 1; \
+ } else { \
+ __bail = 0; \
+ } \
+} while (0)
+
+#define QLA_VHA_MARK_NOT_BUSY(__vha) do { \
+ atomic_dec(&__vha->vref_count); \
+} while (0)
+
+/*
+ * qla2x00 local function return status codes
+ */
+#define MBS_MASK 0x3fff
+
+#define QLA_SUCCESS (MBS_COMMAND_COMPLETE & MBS_MASK)
+#define QLA_INVALID_COMMAND (MBS_INVALID_COMMAND & MBS_MASK)
+#define QLA_INTERFACE_ERROR (MBS_HOST_INTERFACE_ERROR & MBS_MASK)
+#define QLA_TEST_FAILED (MBS_TEST_FAILED & MBS_MASK)
+#define QLA_COMMAND_ERROR (MBS_COMMAND_ERROR & MBS_MASK)
+#define QLA_PARAMETER_ERROR (MBS_COMMAND_PARAMETER_ERROR & MBS_MASK)
+#define QLA_PORT_ID_USED (MBS_PORT_ID_USED & MBS_MASK)
+#define QLA_LOOP_ID_USED (MBS_LOOP_ID_USED & MBS_MASK)
+#define QLA_ALL_IDS_IN_USE (MBS_ALL_IDS_IN_USE & MBS_MASK)
+#define QLA_NOT_LOGGED_IN (MBS_NOT_LOGGED_IN & MBS_MASK)
+
+#define QLA_FUNCTION_TIMEOUT 0x100
+#define QLA_FUNCTION_PARAMETER_ERROR 0x101
+#define QLA_FUNCTION_FAILED 0x102
+#define QLA_MEMORY_ALLOC_FAILED 0x103
+#define QLA_LOCK_TIMEOUT 0x104
+#define QLA_ABORTED 0x105
+#define QLA_SUSPENDED 0x106
+#define QLA_BUSY 0x107
+#define QLA_ALREADY_REGISTERED 0x109
+
+#define NVRAM_DELAY() udelay(10)
+
+/*
+ * Flash support definitions
+ */
+#define OPTROM_SIZE_2300 0x20000
+#define OPTROM_SIZE_2322 0x100000
+#define OPTROM_SIZE_24XX 0x100000
+#define OPTROM_SIZE_25XX 0x200000
+#define OPTROM_SIZE_81XX 0x400000
+#define OPTROM_SIZE_82XX 0x800000
+#define OPTROM_SIZE_83XX 0x1000000
+
+#define OPTROM_BURST_SIZE 0x1000
+#define OPTROM_BURST_DWORDS (OPTROM_BURST_SIZE / 4)
+
+#define QLA_DSDS_PER_IOCB 37
+
+#define CMD_SP(Cmnd) ((Cmnd)->SCp.ptr)
+
+#define QLA_SG_ALL 1024
+
+enum nexus_wait_type {
+ WAIT_HOST = 0,
+ WAIT_TARGET,
+ WAIT_LUN,
+};
+
+#include "qla_gbl.h"
+#include "qla_dbg.h"
+#include "qla_inline.h"
+#endif
diff --git a/drivers/scsi/qla2xxx/qla_devtbl.h b/drivers/scsi/qla2xxx/qla_devtbl.h
new file mode 100644
index 000000000..d6ea69df7
--- /dev/null
+++ b/drivers/scsi/qla2xxx/qla_devtbl.h
@@ -0,0 +1,99 @@
+#define QLA_MODEL_NAMES 0x5C
+
+/*
+ * Adapter model names and descriptions.
+ */
+static char *qla2x00_model_name[QLA_MODEL_NAMES*2] = {
+ "QLA2340", "133MHz PCI-X to 2Gb FC, Single Channel", /* 0x100 */
+ "QLA2342", "133MHz PCI-X to 2Gb FC, Dual Channel", /* 0x101 */
+ "QLA2344", "133MHz PCI-X to 2Gb FC, Quad Channel", /* 0x102 */
+ "QCP2342", "cPCI to 2Gb FC, Dual Channel", /* 0x103 */
+ "QSB2340", "SBUS to 2Gb FC, Single Channel", /* 0x104 */
+ "QSB2342", "SBUS to 2Gb FC, Dual Channel", /* 0x105 */
+ "QLA2310", "Sun 66MHz PCI-X to 2Gb FC, Single Channel", /* 0x106 */
+ "QLA2332", "Sun 66MHz PCI-X to 2Gb FC, Single Channel", /* 0x107 */
+ "QCP2332", "Sun cPCI to 2Gb FC, Dual Channel", /* 0x108 */
+ "QCP2340", "cPCI to 2Gb FC, Single Channel", /* 0x109 */
+ "QLA2342", "Sun 133MHz PCI-X to 2Gb FC, Dual Channel", /* 0x10a */
+ "QCP2342", "Sun - cPCI to 2Gb FC, Dual Channel", /* 0x10b */
+ "QLA2350", "133MHz PCI-X to 2Gb FC, Single Channel", /* 0x10c */
+ "QLA2352", "133MHz PCI-X to 2Gb FC, Dual Channel", /* 0x10d */
+ "QLA2352", "Sun 133MHz PCI-X to 2Gb FC, Dual Channel", /* 0x10e */
+ " ", " ", /* 0x10f */
+ " ", " ", /* 0x110 */
+ " ", " ", /* 0x111 */
+ " ", " ", /* 0x112 */
+ " ", " ", /* 0x113 */
+ " ", " ", /* 0x114 */
+ "QLA2360", "133MHz PCI-X to 2Gb FC, Single Channel", /* 0x115 */
+ "QLA2362", "133MHz PCI-X to 2Gb FC, Dual Channel", /* 0x116 */
+ "QLE2360", "PCI-Express to 2Gb FC, Single Channel", /* 0x117 */
+ "QLE2362", "PCI-Express to 2Gb FC, Dual Channel", /* 0x118 */
+ "QLA200", "133MHz PCI-X to 2Gb FC Optical", /* 0x119 */
+ " ", " ", /* 0x11a */
+ " ", " ", /* 0x11b */
+ "QLA200P", "133MHz PCI-X to 2Gb FC SFP", /* 0x11c */
+ " ", " ", /* 0x11d */
+ " ", " ", /* 0x11e */
+ " ", " ", /* 0x11f */
+ " ", " ", /* 0x120 */
+ " ", " ", /* 0x121 */
+ " ", " ", /* 0x122 */
+ " ", " ", /* 0x123 */
+ " ", " ", /* 0x124 */
+ " ", " ", /* 0x125 */
+ " ", " ", /* 0x126 */
+ " ", " ", /* 0x127 */
+ " ", " ", /* 0x128 */
+ " ", " ", /* 0x129 */
+ " ", " ", /* 0x12a */
+ " ", " ", /* 0x12b */
+ " ", " ", /* 0x12c */
+ " ", " ", /* 0x12d */
+ " ", " ", /* 0x12e */
+ "QLA210", "133MHz PCI-X to 2Gb FC, Single Channel", /* 0x12f */
+ "EMC 250", "133MHz PCI-X to 2Gb FC, Single Channel", /* 0x130 */
+ "HP A7538A", "HP 1p2g PCI-X to 2Gb FC, Single Channel", /* 0x131 */
+ "QLA210", "Sun 133MHz PCI-X to 2Gb FC, Single Channel", /* 0x132 */
+ "QLA2460", "PCI-X 2.0 to 4Gb FC, Single Channel", /* 0x133 */
+ "QLA2462", "PCI-X 2.0 to 4Gb FC, Dual Channel", /* 0x134 */
+ "QMC2462", "IBM eServer BC 4Gb FC Expansion Card", /* 0x135 */
+ "QMC2462S", "IBM eServer BC 4Gb FC Expansion Card SFF", /* 0x136 */
+ "QLE2460", "PCI-Express to 4Gb FC, Single Channel", /* 0x137 */
+ "QLE2462", "PCI-Express to 4Gb FC, Dual Channel", /* 0x138 */
+ "QME2462", "Dell BS PCI-Express to 4Gb FC, Dual Channel", /* 0x139 */
+ " ", " ", /* 0x13a */
+ " ", " ", /* 0x13b */
+ " ", " ", /* 0x13c */
+ "QEM2462", "Sun Server I/O Module 4Gb FC, Dual Channel", /* 0x13d */
+ "QLE210", "PCI-Express to 2Gb FC, Single Channel", /* 0x13e */
+ "QLE220", "PCI-Express to 4Gb FC, Single Channel", /* 0x13f */
+ "QLA2460", "Sun PCI-X 2.0 to 4Gb FC, Single Channel", /* 0x140 */
+ "QLA2462", "Sun PCI-X 2.0 to 4Gb FC, Dual Channel", /* 0x141 */
+ "QLE2460", "Sun PCI-Express to 2Gb FC, Single Channel", /* 0x142 */
+ "QLE2462", "Sun PCI-Express to 4Gb FC, Single Channel", /* 0x143 */
+ "QEM2462", "Server I/O Module 4Gb FC, Dual Channel", /* 0x144 */
+ "QLE2440", "PCI-Express to 4Gb FC, Single Channel", /* 0x145 */
+ "QLE2464", "PCI-Express to 4Gb FC, Quad Channel", /* 0x146 */
+ "QLA2440", "PCI-X 2.0 to 4Gb FC, Single Channel", /* 0x147 */
+ "HP AE369A", "PCI-X 2.0 to 4Gb FC, Dual Channel", /* 0x148 */
+ "QLA2340", "Sun 133MHz PCI-X to 2Gb FC, Single Channel", /* 0x149 */
+ " ", " ", /* 0x14a */
+ " ", " ", /* 0x14b */
+ "QMC2432M", "IBM eServer BC 4Gb FC Expansion Card CFFE", /* 0x14c */
+ "QMC2422M", "IBM eServer BC 4Gb FC Expansion Card CFFX", /* 0x14d */
+ "QLE220", "Sun PCI-Express to 4Gb FC, Single Channel", /* 0x14e */
+ " ", " ", /* 0x14f */
+ " ", " ", /* 0x150 */
+ " ", " ", /* 0x151 */
+ "QME2462", "PCI-Express to 4Gb FC, Dual Channel Mezz HBA", /* 0x152 */
+ "QMH2462", "PCI-Express to 4Gb FC, Dual Channel Mezz HBA", /* 0x153 */
+ " ", " ", /* 0x154 */
+ "QLE220", "PCI-Express to 4Gb FC, Single Channel", /* 0x155 */
+ "QLE220", "PCI-Express to 4Gb FC, Single Channel", /* 0x156 */
+ " ", " ", /* 0x157 */
+ " ", " ", /* 0x158 */
+ " ", " ", /* 0x159 */
+ " ", " ", /* 0x15a */
+ "QME2472", "Dell BS PCI-Express to 4Gb FC, Dual Channel", /* 0x15b */
+};
diff --git a/drivers/scsi/qla2xxx/qla_dfs.c b/drivers/scsi/qla2xxx/qla_dfs.c
new file mode 100644
index 000000000..15cf074ff
--- /dev/null
+++ b/drivers/scsi/qla2xxx/qla_dfs.c
@@ -0,0 +1,182 @@
+/*
+ * QLogic Fibre Channel HBA Driver
+ * Copyright (c) 2003-2014 QLogic Corporation
+ *
+ * See LICENSE.qla2xxx for copyright and licensing details.
+ */
+#include "qla_def.h"
+
+#include <linux/debugfs.h>
+#include <linux/seq_file.h>
+
+static struct dentry *qla2x00_dfs_root;
+static atomic_t qla2x00_dfs_root_count;
+
+static int
+qla2x00_dfs_fce_show(struct seq_file *s, void *unused)
+{
+ scsi_qla_host_t *vha = s->private;
+ uint32_t cnt;
+ uint32_t *fce;
+ uint64_t fce_start;
+ struct qla_hw_data *ha = vha->hw;
+
+ mutex_lock(&ha->fce_mutex);
+
+ seq_puts(s, "FCE Trace Buffer\n");
+ seq_printf(s, "In Pointer = %llx\n\n", (unsigned long long)ha->fce_wr);
+ seq_printf(s, "Base = %llx\n\n", (unsigned long long) ha->fce_dma);
+ seq_puts(s, "FCE Enable Registers\n");
+ seq_printf(s, "%08x %08x %08x %08x %08x %08x\n",
+ ha->fce_mb[0], ha->fce_mb[2], ha->fce_mb[3], ha->fce_mb[4],
+ ha->fce_mb[5], ha->fce_mb[6]);
+
+ fce = (uint32_t *) ha->fce;
+ fce_start = (unsigned long long) ha->fce_dma;
+ for (cnt = 0; cnt < fce_calc_size(ha->fce_bufs) / 4; cnt++) {
+ if (cnt % 8 == 0)
+ seq_printf(s, "\n%llx: ",
+ (unsigned long long)((cnt * 4) + fce_start));
+ else
+ seq_putc(s, ' ');
+ seq_printf(s, "%08x", *fce++);
+ }
+
+ seq_puts(s, "\nEnd\n");
+
+ mutex_unlock(&ha->fce_mutex);
+
+ return 0;
+}
+
+static int
+qla2x00_dfs_fce_open(struct inode *inode, struct file *file)
+{
+ scsi_qla_host_t *vha = inode->i_private;
+ struct qla_hw_data *ha = vha->hw;
+ int rval;
+
+ if (!ha->flags.fce_enabled)
+ goto out;
+
+ mutex_lock(&ha->fce_mutex);
+
+ /* Pause tracing to flush FCE buffers. */
+ rval = qla2x00_disable_fce_trace(vha, &ha->fce_wr, &ha->fce_rd);
+ if (rval)
+ ql_dbg(ql_dbg_user, vha, 0x705c,
+ "DebugFS: Unable to disable FCE (%d).\n", rval);
+
+ ha->flags.fce_enabled = 0;
+
+ mutex_unlock(&ha->fce_mutex);
+out:
+ return single_open(file, qla2x00_dfs_fce_show, vha);
+}
+
+static int
+qla2x00_dfs_fce_release(struct inode *inode, struct file *file)
+{
+ scsi_qla_host_t *vha = inode->i_private;
+ struct qla_hw_data *ha = vha->hw;
+ int rval;
+
+ if (ha->flags.fce_enabled)
+ goto out;
+
+ mutex_lock(&ha->fce_mutex);
+
+ /* Re-enable FCE tracing. */
+ ha->flags.fce_enabled = 1;
+ memset(ha->fce, 0, fce_calc_size(ha->fce_bufs));
+ rval = qla2x00_enable_fce_trace(vha, ha->fce_dma, ha->fce_bufs,
+ ha->fce_mb, &ha->fce_bufs);
+ if (rval) {
+ ql_dbg(ql_dbg_user, vha, 0x700d,
+ "DebugFS: Unable to reinitialize FCE (%d).\n", rval);
+ ha->flags.fce_enabled = 0;
+ }
+
+ mutex_unlock(&ha->fce_mutex);
+out:
+ return single_release(inode, file);
+}
+
+static const struct file_operations dfs_fce_ops = {
+ .open = qla2x00_dfs_fce_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = qla2x00_dfs_fce_release,
+};
+
+int
+qla2x00_dfs_setup(scsi_qla_host_t *vha)
+{
+ struct qla_hw_data *ha = vha->hw;
+
+ if (!IS_QLA25XX(ha) && !IS_QLA81XX(ha) && !IS_QLA83XX(ha) &&
+ !IS_QLA27XX(ha))
+ goto out;
+ if (!ha->fce)
+ goto out;
+
+ if (qla2x00_dfs_root)
+ goto create_dir;
+
+ atomic_set(&qla2x00_dfs_root_count, 0);
+ qla2x00_dfs_root = debugfs_create_dir(QLA2XXX_DRIVER_NAME, NULL);
+ if (!qla2x00_dfs_root) {
+ ql_log(ql_log_warn, vha, 0x00f7,
+ "Unable to create debugfs root directory.\n");
+ goto out;
+ }
+
+create_dir:
+ if (ha->dfs_dir)
+ goto create_nodes;
+
+ mutex_init(&ha->fce_mutex);
+ ha->dfs_dir = debugfs_create_dir(vha->host_str, qla2x00_dfs_root);
+ if (!ha->dfs_dir) {
+ ql_log(ql_log_warn, vha, 0x00f8,
+ "Unable to create debugfs ha directory.\n");
+ goto out;
+ }
+
+ atomic_inc(&qla2x00_dfs_root_count);
+
+create_nodes:
+ ha->dfs_fce = debugfs_create_file("fce", S_IRUSR, ha->dfs_dir, vha,
+ &dfs_fce_ops);
+ if (!ha->dfs_fce) {
+ ql_log(ql_log_warn, vha, 0x00f9,
+ "Unable to create debugfs fce node.\n");
+ goto out;
+ }
+out:
+ return 0;
+}
+
+int
+qla2x00_dfs_remove(scsi_qla_host_t *vha)
+{
+ struct qla_hw_data *ha = vha->hw;
+ if (ha->dfs_fce) {
+ debugfs_remove(ha->dfs_fce);
+ ha->dfs_fce = NULL;
+ }
+
+ if (ha->dfs_dir) {
+ debugfs_remove(ha->dfs_dir);
+ ha->dfs_dir = NULL;
+ atomic_dec(&qla2x00_dfs_root_count);
+ }
+
+ if (atomic_read(&qla2x00_dfs_root_count) == 0 &&
+ qla2x00_dfs_root) {
+ debugfs_remove(qla2x00_dfs_root);
+ qla2x00_dfs_root = NULL;
+ }
+
+ return 0;
+}
diff --git a/drivers/scsi/qla2xxx/qla_fw.h b/drivers/scsi/qla2xxx/qla_fw.h
new file mode 100644
index 000000000..42bb357bf
--- /dev/null
+++ b/drivers/scsi/qla2xxx/qla_fw.h
@@ -0,0 +1,1954 @@
+/*
+ * QLogic Fibre Channel HBA Driver
+ * Copyright (c) 2003-2014 QLogic Corporation
+ *
+ * See LICENSE.qla2xxx for copyright and licensing details.
+ */
+#ifndef __QLA_FW_H
+#define __QLA_FW_H
+
+#define MBS_CHECKSUM_ERROR 0x4010
+#define MBS_INVALID_PRODUCT_KEY 0x4020
+
+/*
+ * Firmware Options.
+ */
+#define FO1_ENABLE_PUREX BIT_10
+#define FO1_DISABLE_LED_CTRL BIT_6
+#define FO1_ENABLE_8016 BIT_0
+#define FO2_ENABLE_SEL_CLASS2 BIT_5
+#define FO3_NO_ABTS_ON_LINKDOWN BIT_14
+#define FO3_HOLD_STS_IOCB BIT_12
+
+/*
+ * Port Database structure definition for ISP 24xx.
+ */
+#define PDO_FORCE_ADISC BIT_1
+#define PDO_FORCE_PLOGI BIT_0
+
+
+#define PORT_DATABASE_24XX_SIZE 64
+struct port_database_24xx {
+ uint16_t flags;
+#define PDF_TASK_RETRY_ID BIT_14
+#define PDF_FC_TAPE BIT_7
+#define PDF_ACK0_CAPABLE BIT_6
+#define PDF_FCP2_CONF BIT_5
+#define PDF_CLASS_2 BIT_4
+#define PDF_HARD_ADDR BIT_1
+
+ uint8_t current_login_state;
+ uint8_t last_login_state;
+#define PDS_PLOGI_PENDING 0x03
+#define PDS_PLOGI_COMPLETE 0x04
+#define PDS_PRLI_PENDING 0x05
+#define PDS_PRLI_COMPLETE 0x06
+#define PDS_PORT_UNAVAILABLE 0x07
+#define PDS_PRLO_PENDING 0x09
+#define PDS_LOGO_PENDING 0x11
+#define PDS_PRLI2_PENDING 0x12
+
+ uint8_t hard_address[3];
+ uint8_t reserved_1;
+
+ uint8_t port_id[3];
+ uint8_t sequence_id;
+
+ uint16_t port_timer;
+
+ uint16_t nport_handle; /* N_PORT handle. */
+
+ uint16_t receive_data_size;
+ uint16_t reserved_2;
+
+ uint8_t prli_svc_param_word_0[2]; /* Big endian */
+ /* Bits 15-0 of word 0 */
+ uint8_t prli_svc_param_word_3[2]; /* Big endian */
+ /* Bits 15-0 of word 3 */
+
+ uint8_t port_name[WWN_SIZE];
+ uint8_t node_name[WWN_SIZE];
+
+ uint8_t reserved_3[24];
+};
+
+struct vp_database_24xx {
+ uint16_t vp_status;
+ uint8_t options;
+ uint8_t id;
+ uint8_t port_name[WWN_SIZE];
+ uint8_t node_name[WWN_SIZE];
+ uint16_t port_id_low;
+ uint16_t port_id_high;
+};
+
+struct nvram_24xx {
+ /* NVRAM header. */
+ uint8_t id[4];
+ uint16_t nvram_version;
+ uint16_t reserved_0;
+
+ /* Firmware Initialization Control Block. */
+ uint16_t version;
+ uint16_t reserved_1;
+ __le16 frame_payload_size;
+ uint16_t execution_throttle;
+ uint16_t exchange_count;
+ uint16_t hard_address;
+
+ uint8_t port_name[WWN_SIZE];
+ uint8_t node_name[WWN_SIZE];
+
+ uint16_t login_retry_count;
+ uint16_t link_down_on_nos;
+ uint16_t interrupt_delay_timer;
+ uint16_t login_timeout;
+
+ uint32_t firmware_options_1;
+ uint32_t firmware_options_2;
+ uint32_t firmware_options_3;
+
+ /* Offset 56. */
+
+ /*
+ * BIT 0 = Control Enable
+ * BIT 1-15 =
+ *
+ * BIT 0-7 = Reserved
+ * BIT 8-10 = Output Swing 1G
+ * BIT 11-13 = Output Emphasis 1G
+ * BIT 14-15 = Reserved
+ *
+ * BIT 0-7 = Reserved
+ * BIT 8-10 = Output Swing 2G
+ * BIT 11-13 = Output Emphasis 2G
+ * BIT 14-15 = Reserved
+ *
+ * BIT 0-7 = Reserved
+ * BIT 8-10 = Output Swing 4G
+ * BIT 11-13 = Output Emphasis 4G
+ * BIT 14-15 = Reserved
+ */
+ uint16_t seriallink_options[4];
+
+ uint16_t reserved_2[16];
+
+ /* Offset 96. */
+ uint16_t reserved_3[16];
+
+ /* PCIe table entries. */
+ uint16_t reserved_4[16];
+
+ /* Offset 160. */
+ uint16_t reserved_5[16];
+
+ /* Offset 192. */
+ uint16_t reserved_6[16];
+
+ /* Offset 224. */
+ uint16_t reserved_7[16];
+
+ /*
+ * BIT 0 = Enable spinup delay
+ * BIT 1 = Disable BIOS
+ * BIT 2 = Enable Memory Map BIOS
+ * BIT 3 = Enable Selectable Boot
+ * BIT 4 = Disable RISC code load
+ * BIT 5 = Disable Serdes
+ * BIT 6 =
+ * BIT 7 =
+ *
+ * BIT 8 =
+ * BIT 9 =
+ * BIT 10 = Enable lip full login
+ * BIT 11 = Enable target reset
+ * BIT 12 =
+ * BIT 13 =
+ * BIT 14 =
+ * BIT 15 = Enable alternate WWN
+ *
+ * BIT 16-31 =
+ */
+ uint32_t host_p;
+
+ uint8_t alternate_port_name[WWN_SIZE];
+ uint8_t alternate_node_name[WWN_SIZE];
+
+ uint8_t boot_port_name[WWN_SIZE];
+ uint16_t boot_lun_number;
+ uint16_t reserved_8;
+
+ uint8_t alt1_boot_port_name[WWN_SIZE];
+ uint16_t alt1_boot_lun_number;
+ uint16_t reserved_9;
+
+ uint8_t alt2_boot_port_name[WWN_SIZE];
+ uint16_t alt2_boot_lun_number;
+ uint16_t reserved_10;
+
+ uint8_t alt3_boot_port_name[WWN_SIZE];
+ uint16_t alt3_boot_lun_number;
+ uint16_t reserved_11;
+
+ /*
+ * BIT 0 = Selective Login
+ * BIT 1 = Alt-Boot Enable
+ * BIT 2 = Reserved
+ * BIT 3 = Boot Order List
+ * BIT 4 = Reserved
+ * BIT 5 = Selective LUN
+ * BIT 6 = Reserved
+ * BIT 7-31 =
+ */
+ uint32_t efi_parameters;
+
+ uint8_t reset_delay;
+ uint8_t reserved_12;
+ uint16_t reserved_13;
+
+ uint16_t boot_id_number;
+ uint16_t reserved_14;
+
+ uint16_t max_luns_per_target;
+ uint16_t reserved_15;
+
+ uint16_t port_down_retry_count;
+ uint16_t link_down_timeout;
+
+ /* FCode parameters. */
+ uint16_t fcode_parameter;
+
+ uint16_t reserved_16[3];
+
+ /* Offset 352. */
+ uint8_t prev_drv_ver_major;
+ uint8_t prev_drv_ver_submajob;
+ uint8_t prev_drv_ver_minor;
+ uint8_t prev_drv_ver_subminor;
+
+ uint16_t prev_bios_ver_major;
+ uint16_t prev_bios_ver_minor;
+
+ uint16_t prev_efi_ver_major;
+ uint16_t prev_efi_ver_minor;
+
+ uint16_t prev_fw_ver_major;
+ uint8_t prev_fw_ver_minor;
+ uint8_t prev_fw_ver_subminor;
+
+ uint16_t reserved_17[8];
+
+ /* Offset 384. */
+ uint16_t reserved_18[16];
+
+ /* Offset 416. */
+ uint16_t reserved_19[16];
+
+ /* Offset 448. */
+ uint16_t reserved_20[16];
+
+ /* Offset 480. */
+ uint8_t model_name[16];
+
+ uint16_t reserved_21[2];
+
+ /* Offset 500. */
+ /* HW Parameter Block. */
+ uint16_t pcie_table_sig;
+ uint16_t pcie_table_offset;
+
+ uint16_t subsystem_vendor_id;
+ uint16_t subsystem_device_id;
+
+ uint32_t checksum;
+};
+
+/*
+ * ISP Initialization Control Block.
+ * Little endian except where noted.
+ */
+#define ICB_VERSION 1
+struct init_cb_24xx {
+ uint16_t version;
+ uint16_t reserved_1;
+
+ uint16_t frame_payload_size;
+ uint16_t execution_throttle;
+ uint16_t exchange_count;
+
+ uint16_t hard_address;
+
+ uint8_t port_name[WWN_SIZE]; /* Big endian. */
+ uint8_t node_name[WWN_SIZE]; /* Big endian. */
+
+ uint16_t response_q_inpointer;
+ uint16_t request_q_outpointer;
+
+ uint16_t login_retry_count;
+
+ uint16_t prio_request_q_outpointer;
+
+ uint16_t response_q_length;
+ uint16_t request_q_length;
+
+ uint16_t link_down_on_nos; /* Milliseconds. */
+
+ uint16_t prio_request_q_length;
+
+ uint32_t request_q_address[2];
+ uint32_t response_q_address[2];
+ uint32_t prio_request_q_address[2];
+
+ uint16_t msix;
+ uint16_t msix_atio;
+ uint8_t reserved_2[4];
+
+ uint16_t atio_q_inpointer;
+ uint16_t atio_q_length;
+ uint32_t atio_q_address[2];
+
+ uint16_t interrupt_delay_timer; /* 100us increments. */
+ uint16_t login_timeout;
+
+ /*
+ * BIT 0 = Enable Hard Loop Id
+ * BIT 1 = Enable Fairness
+ * BIT 2 = Enable Full-Duplex
+ * BIT 3 = Reserved
+ * BIT 4 = Enable Target Mode
+ * BIT 5 = Disable Initiator Mode
+ * BIT 6 = Acquire FA-WWN
+ * BIT 7 = Enable D-port Diagnostics
+ *
+ * BIT 8 = Reserved
+ * BIT 9 = Non Participating LIP
+ * BIT 10 = Descending Loop ID Search
+ * BIT 11 = Acquire Loop ID in LIPA
+ * BIT 12 = Reserved
+ * BIT 13 = Full Login after LIP
+ * BIT 14 = Node Name Option
+ * BIT 15-31 = Reserved
+ */
+ uint32_t firmware_options_1;
+
+ /*
+ * BIT 0 = Operation Mode bit 0
+ * BIT 1 = Operation Mode bit 1
+ * BIT 2 = Operation Mode bit 2
+ * BIT 3 = Operation Mode bit 3
+ * BIT 4 = Connection Options bit 0
+ * BIT 5 = Connection Options bit 1
+ * BIT 6 = Connection Options bit 2
+ * BIT 7 = Enable Non part on LIHA failure
+ *
+ * BIT 8 = Enable Class 2
+ * BIT 9 = Enable ACK0
+ * BIT 10 = Reserved
+ * BIT 11 = Enable FC-SP Security
+ * BIT 12 = FC Tape Enable
+ * BIT 13 = Reserved
+ * BIT 14 = Enable Target PRLI Control
+ * BIT 15-31 = Reserved
+ */
+ uint32_t firmware_options_2;
+
+ /*
+ * BIT 0 = Reserved
+ * BIT 1 = Soft ID only
+ * BIT 2 = Reserved
+ * BIT 3 = Reserved
+ * BIT 4 = FCP RSP Payload bit 0
+ * BIT 5 = FCP RSP Payload bit 1
+ * BIT 6 = Enable Receive Out-of-Order data frame handling
+ * BIT 7 = Disable Automatic PLOGI on Local Loop
+ *
+ * BIT 8 = Reserved
+ * BIT 9 = Enable Out-of-Order FCP_XFER_RDY relative offset handling
+ * BIT 10 = Reserved
+ * BIT 11 = Reserved
+ * BIT 12 = Reserved
+ * BIT 13 = Data Rate bit 0
+ * BIT 14 = Data Rate bit 1
+ * BIT 15 = Data Rate bit 2
+ * BIT 16 = Enable 75 ohm Termination Select
+ * BIT 17-28 = Reserved
+ * BIT 29 = Enable response queue 0 in index shadowing
+ * BIT 30 = Enable request queue 0 out index shadowing
+ * BIT 31 = Reserved
+ */
+ uint32_t firmware_options_3;
+ uint16_t qos;
+ uint16_t rid;
+ uint8_t reserved_3[20];
+};
+
+/*
+ * ISP queue - command entry structure definition.
+ */
+#define COMMAND_BIDIRECTIONAL 0x75
+struct cmd_bidir {
+ uint8_t entry_type; /* Entry type. */
+ uint8_t entry_count; /* Entry count. */
+ uint8_t sys_define; /* System defined */
+ uint8_t entry_status; /* Entry status. */
+
+ uint32_t handle; /* System handle. */
+
+ uint16_t nport_handle; /* N_PORT hanlde. */
+
+ uint16_t timeout; /* Commnad timeout. */
+
+ uint16_t wr_dseg_count; /* Write Data segment count. */
+ uint16_t rd_dseg_count; /* Read Data segment count. */
+
+ struct scsi_lun lun; /* FCP LUN (BE). */
+
+ uint16_t control_flags; /* Control flags. */
+#define BD_WRAP_BACK BIT_3
+#define BD_READ_DATA BIT_1
+#define BD_WRITE_DATA BIT_0
+
+ uint16_t fcp_cmnd_dseg_len; /* Data segment length. */
+ uint32_t fcp_cmnd_dseg_address[2]; /* Data segment address. */
+
+ uint16_t reserved[2]; /* Reserved */
+
+ uint32_t rd_byte_count; /* Total Byte count Read. */
+ uint32_t wr_byte_count; /* Total Byte count write. */
+
+ uint8_t port_id[3]; /* PortID of destination port.*/
+ uint8_t vp_index;
+
+ uint32_t fcp_data_dseg_address[2]; /* Data segment address. */
+ uint16_t fcp_data_dseg_len; /* Data segment length. */
+};
+
+#define COMMAND_TYPE_6 0x48 /* Command Type 6 entry */
+struct cmd_type_6 {
+ uint8_t entry_type; /* Entry type. */
+ uint8_t entry_count; /* Entry count. */
+ uint8_t sys_define; /* System defined. */
+ uint8_t entry_status; /* Entry Status. */
+
+ uint32_t handle; /* System handle. */
+
+ uint16_t nport_handle; /* N_PORT handle. */
+ uint16_t timeout; /* Command timeout. */
+
+ uint16_t dseg_count; /* Data segment count. */
+
+ uint16_t fcp_rsp_dsd_len; /* FCP_RSP DSD length. */
+
+ struct scsi_lun lun; /* FCP LUN (BE). */
+
+ uint16_t control_flags; /* Control flags. */
+#define CF_DIF_SEG_DESCR_ENABLE BIT_3
+#define CF_DATA_SEG_DESCR_ENABLE BIT_2
+#define CF_READ_DATA BIT_1
+#define CF_WRITE_DATA BIT_0
+
+ uint16_t fcp_cmnd_dseg_len; /* Data segment length. */
+ uint32_t fcp_cmnd_dseg_address[2]; /* Data segment address. */
+
+ uint32_t fcp_rsp_dseg_address[2]; /* Data segment address. */
+
+ uint32_t byte_count; /* Total byte count. */
+
+ uint8_t port_id[3]; /* PortID of destination port. */
+ uint8_t vp_index;
+
+ uint32_t fcp_data_dseg_address[2]; /* Data segment address. */
+ uint32_t fcp_data_dseg_len; /* Data segment length. */
+};
+
+#define COMMAND_TYPE_7 0x18 /* Command Type 7 entry */
+struct cmd_type_7 {
+ uint8_t entry_type; /* Entry type. */
+ uint8_t entry_count; /* Entry count. */
+ uint8_t sys_define; /* System defined. */
+ uint8_t entry_status; /* Entry Status. */
+
+ uint32_t handle; /* System handle. */
+
+ uint16_t nport_handle; /* N_PORT handle. */
+ uint16_t timeout; /* Command timeout. */
+#define FW_MAX_TIMEOUT 0x1999
+
+ uint16_t dseg_count; /* Data segment count. */
+ uint16_t reserved_1;
+
+ struct scsi_lun lun; /* FCP LUN (BE). */
+
+ uint16_t task_mgmt_flags; /* Task management flags. */
+#define TMF_CLEAR_ACA BIT_14
+#define TMF_TARGET_RESET BIT_13
+#define TMF_LUN_RESET BIT_12
+#define TMF_CLEAR_TASK_SET BIT_10
+#define TMF_ABORT_TASK_SET BIT_9
+#define TMF_DSD_LIST_ENABLE BIT_2
+#define TMF_READ_DATA BIT_1
+#define TMF_WRITE_DATA BIT_0
+
+ uint8_t task;
+#define TSK_SIMPLE 0
+#define TSK_HEAD_OF_QUEUE 1
+#define TSK_ORDERED 2
+#define TSK_ACA 4
+#define TSK_UNTAGGED 5
+
+ uint8_t crn;
+
+ uint8_t fcp_cdb[MAX_CMDSZ]; /* SCSI command words. */
+ uint32_t byte_count; /* Total byte count. */
+
+ uint8_t port_id[3]; /* PortID of destination port. */
+ uint8_t vp_index;
+
+ uint32_t dseg_0_address[2]; /* Data segment 0 address. */
+ uint32_t dseg_0_len; /* Data segment 0 length. */
+};
+
+#define COMMAND_TYPE_CRC_2 0x6A /* Command Type CRC_2 (Type 6)
+ * (T10-DIF) */
+struct cmd_type_crc_2 {
+ uint8_t entry_type; /* Entry type. */
+ uint8_t entry_count; /* Entry count. */
+ uint8_t sys_define; /* System defined. */
+ uint8_t entry_status; /* Entry Status. */
+
+ uint32_t handle; /* System handle. */
+
+ uint16_t nport_handle; /* N_PORT handle. */
+ uint16_t timeout; /* Command timeout. */
+
+ uint16_t dseg_count; /* Data segment count. */
+
+ uint16_t fcp_rsp_dseg_len; /* FCP_RSP DSD length. */
+
+ struct scsi_lun lun; /* FCP LUN (BE). */
+
+ uint16_t control_flags; /* Control flags. */
+
+ uint16_t fcp_cmnd_dseg_len; /* Data segment length. */
+ uint32_t fcp_cmnd_dseg_address[2]; /* Data segment address. */
+
+ uint32_t fcp_rsp_dseg_address[2]; /* Data segment address. */
+
+ uint32_t byte_count; /* Total byte count. */
+
+ uint8_t port_id[3]; /* PortID of destination port. */
+ uint8_t vp_index;
+
+ uint32_t crc_context_address[2]; /* Data segment address. */
+ uint16_t crc_context_len; /* Data segment length. */
+ uint16_t reserved_1; /* MUST be set to 0. */
+};
+
+
+/*
+ * ISP queue - status entry structure definition.
+ */
+#define STATUS_TYPE 0x03 /* Status entry. */
+struct sts_entry_24xx {
+ uint8_t entry_type; /* Entry type. */
+ uint8_t entry_count; /* Entry count. */
+ uint8_t sys_define; /* System defined. */
+ uint8_t entry_status; /* Entry Status. */
+
+ uint32_t handle; /* System handle. */
+
+ uint16_t comp_status; /* Completion status. */
+ uint16_t ox_id; /* OX_ID used by the firmware. */
+
+ uint32_t residual_len; /* FW calc residual transfer length. */
+
+ uint16_t reserved_1;
+ uint16_t state_flags; /* State flags. */
+#define SF_TRANSFERRED_DATA BIT_11
+#define SF_FCP_RSP_DMA BIT_0
+
+ uint16_t retry_delay;
+ uint16_t scsi_status; /* SCSI status. */
+#define SS_CONFIRMATION_REQ BIT_12
+
+ uint32_t rsp_residual_count; /* FCP RSP residual count. */
+
+ uint32_t sense_len; /* FCP SENSE length. */
+ uint32_t rsp_data_len; /* FCP response data length. */
+ uint8_t data[28]; /* FCP response/sense information. */
+ /*
+ * If DIF Error is set in comp_status, these additional fields are
+ * defined:
+ *
+ * !!! NOTE: Firmware sends expected/actual DIF data in big endian
+ * format; but all of the "data" field gets swab32-d in the beginning
+ * of qla2x00_status_entry().
+ *
+ * &data[10] : uint8_t report_runt_bg[2]; - computed guard
+ * &data[12] : uint8_t actual_dif[8]; - DIF Data received
+ * &data[20] : uint8_t expected_dif[8]; - DIF Data computed
+ */
+};
+
+
+/*
+ * Status entry completion status
+ */
+#define CS_DATA_REASSEMBLY_ERROR 0x11 /* Data Reassembly Error.. */
+#define CS_ABTS_BY_TARGET 0x13 /* Target send ABTS to abort IOCB. */
+#define CS_FW_RESOURCE 0x2C /* Firmware Resource Unavailable. */
+#define CS_TASK_MGMT_OVERRUN 0x30 /* Task management overrun (8+). */
+#define CS_ABORT_BY_TARGET 0x47 /* Abort By Target. */
+
+/*
+ * ISP queue - marker entry structure definition.
+ */
+#define MARKER_TYPE 0x04 /* Marker entry. */
+struct mrk_entry_24xx {
+ uint8_t entry_type; /* Entry type. */
+ uint8_t entry_count; /* Entry count. */
+ uint8_t handle_count; /* Handle count. */
+ uint8_t entry_status; /* Entry Status. */
+
+ uint32_t handle; /* System handle. */
+
+ uint16_t nport_handle; /* N_PORT handle. */
+
+ uint8_t modifier; /* Modifier (7-0). */
+#define MK_SYNC_ID_LUN 0 /* Synchronize ID/LUN */
+#define MK_SYNC_ID 1 /* Synchronize ID */
+#define MK_SYNC_ALL 2 /* Synchronize all ID/LUN */
+ uint8_t reserved_1;
+
+ uint8_t reserved_2;
+ uint8_t vp_index;
+
+ uint16_t reserved_3;
+
+ uint8_t lun[8]; /* FCP LUN (BE). */
+ uint8_t reserved_4[40];
+};
+
+/*
+ * ISP queue - CT Pass-Through entry structure definition.
+ */
+#define CT_IOCB_TYPE 0x29 /* CT Pass-Through IOCB entry */
+struct ct_entry_24xx {
+ uint8_t entry_type; /* Entry type. */
+ uint8_t entry_count; /* Entry count. */
+ uint8_t sys_define; /* System Defined. */
+ uint8_t entry_status; /* Entry Status. */
+
+ uint32_t handle; /* System handle. */
+
+ uint16_t comp_status; /* Completion status. */
+
+ uint16_t nport_handle; /* N_PORT handle. */
+
+ uint16_t cmd_dsd_count;
+
+ uint8_t vp_index;
+ uint8_t reserved_1;
+
+ uint16_t timeout; /* Command timeout. */
+ uint16_t reserved_2;
+
+ uint16_t rsp_dsd_count;
+
+ uint8_t reserved_3[10];
+
+ uint32_t rsp_byte_count;
+ uint32_t cmd_byte_count;
+
+ uint32_t dseg_0_address[2]; /* Data segment 0 address. */
+ uint32_t dseg_0_len; /* Data segment 0 length. */
+ uint32_t dseg_1_address[2]; /* Data segment 1 address. */
+ uint32_t dseg_1_len; /* Data segment 1 length. */
+};
+
+/*
+ * ISP queue - ELS Pass-Through entry structure definition.
+ */
+#define ELS_IOCB_TYPE 0x53 /* ELS Pass-Through IOCB entry */
+struct els_entry_24xx {
+ uint8_t entry_type; /* Entry type. */
+ uint8_t entry_count; /* Entry count. */
+ uint8_t sys_define; /* System Defined. */
+ uint8_t entry_status; /* Entry Status. */
+
+ uint32_t handle; /* System handle. */
+
+ uint16_t reserved_1;
+
+ uint16_t nport_handle; /* N_PORT handle. */
+
+ uint16_t tx_dsd_count;
+
+ uint8_t vp_index;
+ uint8_t sof_type;
+#define EST_SOFI3 (1 << 4)
+#define EST_SOFI2 (3 << 4)
+
+ uint32_t rx_xchg_address; /* Receive exchange address. */
+ uint16_t rx_dsd_count;
+
+ uint8_t opcode;
+ uint8_t reserved_2;
+
+ uint8_t port_id[3];
+ uint8_t reserved_3;
+
+ uint16_t reserved_4;
+
+ uint16_t control_flags; /* Control flags. */
+#define ECF_PAYLOAD_DESCR_MASK (BIT_15|BIT_14|BIT_13)
+#define EPD_ELS_COMMAND (0 << 13)
+#define EPD_ELS_ACC (1 << 13)
+#define EPD_ELS_RJT (2 << 13)
+#define EPD_RX_XCHG (3 << 13)
+#define ECF_CLR_PASSTHRU_PEND BIT_12
+#define ECF_INCL_FRAME_HDR BIT_11
+
+ uint32_t rx_byte_count;
+ uint32_t tx_byte_count;
+
+ uint32_t tx_address[2]; /* Data segment 0 address. */
+ uint32_t tx_len; /* Data segment 0 length. */
+ uint32_t rx_address[2]; /* Data segment 1 address. */
+ uint32_t rx_len; /* Data segment 1 length. */
+};
+
+struct els_sts_entry_24xx {
+ uint8_t entry_type; /* Entry type. */
+ uint8_t entry_count; /* Entry count. */
+ uint8_t sys_define; /* System Defined. */
+ uint8_t entry_status; /* Entry Status. */
+
+ uint32_t handle; /* System handle. */
+
+ uint16_t comp_status;
+
+ uint16_t nport_handle; /* N_PORT handle. */
+
+ uint16_t reserved_1;
+
+ uint8_t vp_index;
+ uint8_t sof_type;
+
+ uint32_t rx_xchg_address; /* Receive exchange address. */
+ uint16_t reserved_2;
+
+ uint8_t opcode;
+ uint8_t reserved_3;
+
+ uint8_t port_id[3];
+ uint8_t reserved_4;
+
+ uint16_t reserved_5;
+
+ uint16_t control_flags; /* Control flags. */
+ uint32_t total_byte_count;
+ uint32_t error_subcode_1;
+ uint32_t error_subcode_2;
+};
+/*
+ * ISP queue - Mailbox Command entry structure definition.
+ */
+#define MBX_IOCB_TYPE 0x39
+struct mbx_entry_24xx {
+ uint8_t entry_type; /* Entry type. */
+ uint8_t entry_count; /* Entry count. */
+ uint8_t handle_count; /* Handle count. */
+ uint8_t entry_status; /* Entry Status. */
+
+ uint32_t handle; /* System handle. */
+
+ uint16_t mbx[28];
+};
+
+
+#define LOGINOUT_PORT_IOCB_TYPE 0x52 /* Login/Logout Port entry. */
+struct logio_entry_24xx {
+ uint8_t entry_type; /* Entry type. */
+ uint8_t entry_count; /* Entry count. */
+ uint8_t sys_define; /* System defined. */
+ uint8_t entry_status; /* Entry Status. */
+
+ uint32_t handle; /* System handle. */
+
+ uint16_t comp_status; /* Completion status. */
+#define CS_LOGIO_ERROR 0x31 /* Login/Logout IOCB error. */
+
+ uint16_t nport_handle; /* N_PORT handle. */
+
+ uint16_t control_flags; /* Control flags. */
+ /* Modifiers. */
+#define LCF_INCLUDE_SNS BIT_10 /* Include SNS (FFFFFC) during LOGO. */
+#define LCF_FCP2_OVERRIDE BIT_9 /* Set/Reset word 3 of PRLI. */
+#define LCF_CLASS_2 BIT_8 /* Enable class 2 during PLOGI. */
+#define LCF_FREE_NPORT BIT_7 /* Release NPORT handle after LOGO. */
+#define LCF_EXPL_LOGO BIT_6 /* Perform an explicit LOGO. */
+#define LCF_SKIP_PRLI BIT_5 /* Skip PRLI after PLOGI. */
+#define LCF_IMPL_LOGO_ALL BIT_5 /* Implicit LOGO to all ports. */
+#define LCF_COND_PLOGI BIT_4 /* PLOGI only if not logged-in. */
+#define LCF_IMPL_LOGO BIT_4 /* Perform an implicit LOGO. */
+#define LCF_IMPL_PRLO BIT_4 /* Perform an implicit PRLO. */
+ /* Commands. */
+#define LCF_COMMAND_PLOGI 0x00 /* PLOGI. */
+#define LCF_COMMAND_PRLI 0x01 /* PRLI. */
+#define LCF_COMMAND_PDISC 0x02 /* PDISC. */
+#define LCF_COMMAND_ADISC 0x03 /* ADISC. */
+#define LCF_COMMAND_LOGO 0x08 /* LOGO. */
+#define LCF_COMMAND_PRLO 0x09 /* PRLO. */
+#define LCF_COMMAND_TPRLO 0x0A /* TPRLO. */
+
+ uint8_t vp_index;
+ uint8_t reserved_1;
+
+ uint8_t port_id[3]; /* PortID of destination port. */
+
+ uint8_t rsp_size; /* Response size in 32bit words. */
+
+ uint32_t io_parameter[11]; /* General I/O parameters. */
+#define LSC_SCODE_NOLINK 0x01
+#define LSC_SCODE_NOIOCB 0x02
+#define LSC_SCODE_NOXCB 0x03
+#define LSC_SCODE_CMD_FAILED 0x04
+#define LSC_SCODE_NOFABRIC 0x05
+#define LSC_SCODE_FW_NOT_READY 0x07
+#define LSC_SCODE_NOT_LOGGED_IN 0x09
+#define LSC_SCODE_NOPCB 0x0A
+
+#define LSC_SCODE_ELS_REJECT 0x18
+#define LSC_SCODE_CMD_PARAM_ERR 0x19
+#define LSC_SCODE_PORTID_USED 0x1A
+#define LSC_SCODE_NPORT_USED 0x1B
+#define LSC_SCODE_NONPORT 0x1C
+#define LSC_SCODE_LOGGED_IN 0x1D
+#define LSC_SCODE_NOFLOGI_ACC 0x1F
+};
+
+#define TSK_MGMT_IOCB_TYPE 0x14
+struct tsk_mgmt_entry {
+ uint8_t entry_type; /* Entry type. */
+ uint8_t entry_count; /* Entry count. */
+ uint8_t handle_count; /* Handle count. */
+ uint8_t entry_status; /* Entry Status. */
+
+ uint32_t handle; /* System handle. */
+
+ uint16_t nport_handle; /* N_PORT handle. */
+
+ uint16_t reserved_1;
+
+ uint16_t delay; /* Activity delay in seconds. */
+
+ uint16_t timeout; /* Command timeout. */
+
+ struct scsi_lun lun; /* FCP LUN (BE). */
+
+ uint32_t control_flags; /* Control Flags. */
+#define TCF_NOTMCMD_TO_TARGET BIT_31
+#define TCF_LUN_RESET BIT_4
+#define TCF_ABORT_TASK_SET BIT_3
+#define TCF_CLEAR_TASK_SET BIT_2
+#define TCF_TARGET_RESET BIT_1
+#define TCF_CLEAR_ACA BIT_0
+
+ uint8_t reserved_2[20];
+
+ uint8_t port_id[3]; /* PortID of destination port. */
+ uint8_t vp_index;
+
+ uint8_t reserved_3[12];
+};
+
+#define ABORT_IOCB_TYPE 0x33
+struct abort_entry_24xx {
+ uint8_t entry_type; /* Entry type. */
+ uint8_t entry_count; /* Entry count. */
+ uint8_t handle_count; /* Handle count. */
+ uint8_t entry_status; /* Entry Status. */
+
+ uint32_t handle; /* System handle. */
+
+ uint16_t nport_handle; /* N_PORT handle. */
+ /* or Completion status. */
+
+ uint16_t options; /* Options. */
+#define AOF_NO_ABTS BIT_0 /* Do not send any ABTS. */
+
+ uint32_t handle_to_abort; /* System handle to abort. */
+
+ uint16_t req_que_no;
+ uint8_t reserved_1[30];
+
+ uint8_t port_id[3]; /* PortID of destination port. */
+ uint8_t vp_index;
+
+ uint8_t reserved_2[12];
+};
+
+/*
+ * ISP I/O Register Set structure definitions.
+ */
+struct device_reg_24xx {
+ uint32_t flash_addr; /* Flash/NVRAM BIOS address. */
+#define FARX_DATA_FLAG BIT_31
+#define FARX_ACCESS_FLASH_CONF 0x7FFD0000
+#define FARX_ACCESS_FLASH_DATA 0x7FF00000
+#define FARX_ACCESS_NVRAM_CONF 0x7FFF0000
+#define FARX_ACCESS_NVRAM_DATA 0x7FFE0000
+
+#define FA_NVRAM_FUNC0_ADDR 0x80
+#define FA_NVRAM_FUNC1_ADDR 0x180
+
+#define FA_NVRAM_VPD_SIZE 0x200
+#define FA_NVRAM_VPD0_ADDR 0x00
+#define FA_NVRAM_VPD1_ADDR 0x100
+
+#define FA_BOOT_CODE_ADDR 0x00000
+ /*
+ * RISC code begins at offset 512KB
+ * within flash. Consisting of two
+ * contiguous RISC code segments.
+ */
+#define FA_RISC_CODE_ADDR 0x20000
+#define FA_RISC_CODE_SEGMENTS 2
+
+#define FA_FLASH_DESCR_ADDR_24 0x11000
+#define FA_FLASH_LAYOUT_ADDR_24 0x11400
+#define FA_NPIV_CONF0_ADDR_24 0x16000
+#define FA_NPIV_CONF1_ADDR_24 0x17000
+
+#define FA_FW_AREA_ADDR 0x40000
+#define FA_VPD_NVRAM_ADDR 0x48000
+#define FA_FEATURE_ADDR 0x4C000
+#define FA_FLASH_DESCR_ADDR 0x50000
+#define FA_FLASH_LAYOUT_ADDR 0x50400
+#define FA_HW_EVENT0_ADDR 0x54000
+#define FA_HW_EVENT1_ADDR 0x54400
+#define FA_HW_EVENT_SIZE 0x200
+#define FA_HW_EVENT_ENTRY_SIZE 4
+#define FA_NPIV_CONF0_ADDR 0x5C000
+#define FA_NPIV_CONF1_ADDR 0x5D000
+#define FA_FCP_PRIO0_ADDR 0x10000
+#define FA_FCP_PRIO1_ADDR 0x12000
+
+/*
+ * Flash Error Log Event Codes.
+ */
+#define HW_EVENT_RESET_ERR 0xF00B
+#define HW_EVENT_ISP_ERR 0xF020
+#define HW_EVENT_PARITY_ERR 0xF022
+#define HW_EVENT_NVRAM_CHKSUM_ERR 0xF023
+#define HW_EVENT_FLASH_FW_ERR 0xF024
+
+ uint32_t flash_data; /* Flash/NVRAM BIOS data. */
+
+ uint32_t ctrl_status; /* Control/Status. */
+#define CSRX_FLASH_ACCESS_ERROR BIT_18 /* Flash/NVRAM Access Error. */
+#define CSRX_DMA_ACTIVE BIT_17 /* DMA Active status. */
+#define CSRX_DMA_SHUTDOWN BIT_16 /* DMA Shutdown control status. */
+#define CSRX_FUNCTION BIT_15 /* Function number. */
+ /* PCI-X Bus Mode. */
+#define CSRX_PCIX_BUS_MODE_MASK (BIT_11|BIT_10|BIT_9|BIT_8)
+#define PBM_PCI_33MHZ (0 << 8)
+#define PBM_PCIX_M1_66MHZ (1 << 8)
+#define PBM_PCIX_M1_100MHZ (2 << 8)
+#define PBM_PCIX_M1_133MHZ (3 << 8)
+#define PBM_PCIX_M2_66MHZ (5 << 8)
+#define PBM_PCIX_M2_100MHZ (6 << 8)
+#define PBM_PCIX_M2_133MHZ (7 << 8)
+#define PBM_PCI_66MHZ (8 << 8)
+ /* Max Write Burst byte count. */
+#define CSRX_MAX_WRT_BURST_MASK (BIT_5|BIT_4)
+#define MWB_512_BYTES (0 << 4)
+#define MWB_1024_BYTES (1 << 4)
+#define MWB_2048_BYTES (2 << 4)
+#define MWB_4096_BYTES (3 << 4)
+
+#define CSRX_64BIT_SLOT BIT_2 /* PCI 64-Bit Bus Slot. */
+#define CSRX_FLASH_ENABLE BIT_1 /* Flash BIOS Read/Write enable. */
+#define CSRX_ISP_SOFT_RESET BIT_0 /* ISP soft reset. */
+
+ uint32_t ictrl; /* Interrupt control. */
+#define ICRX_EN_RISC_INT BIT_3 /* Enable RISC interrupts on PCI. */
+
+ uint32_t istatus; /* Interrupt status. */
+#define ISRX_RISC_INT BIT_3 /* RISC interrupt. */
+
+ uint32_t unused_1[2]; /* Gap. */
+
+ /* Request Queue. */
+ uint32_t req_q_in; /* In-Pointer. */
+ uint32_t req_q_out; /* Out-Pointer. */
+ /* Response Queue. */
+ uint32_t rsp_q_in; /* In-Pointer. */
+ uint32_t rsp_q_out; /* Out-Pointer. */
+ /* Priority Request Queue. */
+ uint32_t preq_q_in; /* In-Pointer. */
+ uint32_t preq_q_out; /* Out-Pointer. */
+
+ uint32_t unused_2[2]; /* Gap. */
+
+ /* ATIO Queue. */
+ uint32_t atio_q_in; /* In-Pointer. */
+ uint32_t atio_q_out; /* Out-Pointer. */
+
+ uint32_t host_status;
+#define HSRX_RISC_INT BIT_15 /* RISC to Host interrupt. */
+#define HSRX_RISC_PAUSED BIT_8 /* RISC Paused. */
+
+ uint32_t hccr; /* Host command & control register. */
+ /* HCCR statuses. */
+#define HCCRX_HOST_INT BIT_6 /* Host to RISC interrupt bit. */
+#define HCCRX_RISC_RESET BIT_5 /* RISC Reset mode bit. */
+ /* HCCR commands. */
+ /* NOOP. */
+#define HCCRX_NOOP 0x00000000
+ /* Set RISC Reset. */
+#define HCCRX_SET_RISC_RESET 0x10000000
+ /* Clear RISC Reset. */
+#define HCCRX_CLR_RISC_RESET 0x20000000
+ /* Set RISC Pause. */
+#define HCCRX_SET_RISC_PAUSE 0x30000000
+ /* Releases RISC Pause. */
+#define HCCRX_REL_RISC_PAUSE 0x40000000
+ /* Set HOST to RISC interrupt. */
+#define HCCRX_SET_HOST_INT 0x50000000
+ /* Clear HOST to RISC interrupt. */
+#define HCCRX_CLR_HOST_INT 0x60000000
+ /* Clear RISC to PCI interrupt. */
+#define HCCRX_CLR_RISC_INT 0xA0000000
+
+ uint32_t gpiod; /* GPIO Data register. */
+
+ /* LED update mask. */
+#define GPDX_LED_UPDATE_MASK (BIT_20|BIT_19|BIT_18)
+ /* Data update mask. */
+#define GPDX_DATA_UPDATE_MASK (BIT_17|BIT_16)
+ /* Data update mask. */
+#define GPDX_DATA_UPDATE_2_MASK (BIT_28|BIT_27|BIT_26|BIT_17|BIT_16)
+ /* LED control mask. */
+#define GPDX_LED_COLOR_MASK (BIT_4|BIT_3|BIT_2)
+ /* LED bit values. Color names as
+ * referenced in fw spec.
+ */
+#define GPDX_LED_YELLOW_ON BIT_2
+#define GPDX_LED_GREEN_ON BIT_3
+#define GPDX_LED_AMBER_ON BIT_4
+ /* Data in/out. */
+#define GPDX_DATA_INOUT (BIT_1|BIT_0)
+
+ uint32_t gpioe; /* GPIO Enable register. */
+ /* Enable update mask. */
+#define GPEX_ENABLE_UPDATE_MASK (BIT_17|BIT_16)
+ /* Enable update mask. */
+#define GPEX_ENABLE_UPDATE_2_MASK (BIT_28|BIT_27|BIT_26|BIT_17|BIT_16)
+ /* Enable. */
+#define GPEX_ENABLE (BIT_1|BIT_0)
+
+ uint32_t iobase_addr; /* I/O Bus Base Address register. */
+
+ uint32_t unused_3[10]; /* Gap. */
+
+ uint16_t mailbox0;
+ uint16_t mailbox1;
+ uint16_t mailbox2;
+ uint16_t mailbox3;
+ uint16_t mailbox4;
+ uint16_t mailbox5;
+ uint16_t mailbox6;
+ uint16_t mailbox7;
+ uint16_t mailbox8;
+ uint16_t mailbox9;
+ uint16_t mailbox10;
+ uint16_t mailbox11;
+ uint16_t mailbox12;
+ uint16_t mailbox13;
+ uint16_t mailbox14;
+ uint16_t mailbox15;
+ uint16_t mailbox16;
+ uint16_t mailbox17;
+ uint16_t mailbox18;
+ uint16_t mailbox19;
+ uint16_t mailbox20;
+ uint16_t mailbox21;
+ uint16_t mailbox22;
+ uint16_t mailbox23;
+ uint16_t mailbox24;
+ uint16_t mailbox25;
+ uint16_t mailbox26;
+ uint16_t mailbox27;
+ uint16_t mailbox28;
+ uint16_t mailbox29;
+ uint16_t mailbox30;
+ uint16_t mailbox31;
+
+ uint32_t iobase_window;
+ uint32_t iobase_c4;
+ uint32_t iobase_c8;
+ uint32_t unused_4_1[6]; /* Gap. */
+ uint32_t iobase_q;
+ uint32_t unused_5[2]; /* Gap. */
+ uint32_t iobase_select;
+ uint32_t unused_6[2]; /* Gap. */
+ uint32_t iobase_sdata;
+};
+/* RISC-RISC semaphore register PCI offet */
+#define RISC_REGISTER_BASE_OFFSET 0x7010
+#define RISC_REGISTER_WINDOW_OFFET 0x6
+
+/* RISC-RISC semaphore/flag register (risc address 0x7016) */
+
+#define RISC_SEMAPHORE 0x1UL
+#define RISC_SEMAPHORE_WE (RISC_SEMAPHORE << 16)
+#define RISC_SEMAPHORE_CLR (RISC_SEMAPHORE_WE | 0x0UL)
+#define RISC_SEMAPHORE_SET (RISC_SEMAPHORE_WE | RISC_SEMAPHORE)
+
+#define RISC_SEMAPHORE_FORCE 0x8000UL
+#define RISC_SEMAPHORE_FORCE_WE (RISC_SEMAPHORE_FORCE << 16)
+#define RISC_SEMAPHORE_FORCE_CLR (RISC_SEMAPHORE_FORCE_WE | 0x0UL)
+#define RISC_SEMAPHORE_FORCE_SET \
+ (RISC_SEMAPHORE_FORCE_WE | RISC_SEMAPHORE_FORCE)
+
+/* RISC semaphore timeouts (ms) */
+#define TIMEOUT_SEMAPHORE 2500
+#define TIMEOUT_SEMAPHORE_FORCE 2000
+#define TIMEOUT_TOTAL_ELAPSED 4500
+
+/* Trace Control *************************************************************/
+
+#define TC_AEN_DISABLE 0
+
+#define TC_EFT_ENABLE 4
+#define TC_EFT_DISABLE 5
+
+#define TC_FCE_ENABLE 8
+#define TC_FCE_OPTIONS 0
+#define TC_FCE_DEFAULT_RX_SIZE 2112
+#define TC_FCE_DEFAULT_TX_SIZE 2112
+#define TC_FCE_DISABLE 9
+#define TC_FCE_DISABLE_TRACE BIT_0
+
+/* MID Support ***************************************************************/
+
+#define MIN_MULTI_ID_FABRIC 64 /* Must be power-of-2. */
+#define MAX_MULTI_ID_FABRIC 256 /* ... */
+
+struct mid_conf_entry_24xx {
+ uint16_t reserved_1;
+
+ /*
+ * BIT 0 = Enable Hard Loop Id
+ * BIT 1 = Acquire Loop ID in LIPA
+ * BIT 2 = ID not Acquired
+ * BIT 3 = Enable VP
+ * BIT 4 = Enable Initiator Mode
+ * BIT 5 = Disable Target Mode
+ * BIT 6-7 = Reserved
+ */
+ uint8_t options;
+
+ uint8_t hard_address;
+
+ uint8_t port_name[WWN_SIZE];
+ uint8_t node_name[WWN_SIZE];
+};
+
+struct mid_init_cb_24xx {
+ struct init_cb_24xx init_cb;
+
+ uint16_t count;
+ uint16_t options;
+
+ struct mid_conf_entry_24xx entries[MAX_MULTI_ID_FABRIC];
+};
+
+
+struct mid_db_entry_24xx {
+ uint16_t status;
+#define MDBS_NON_PARTIC BIT_3
+#define MDBS_ID_ACQUIRED BIT_1
+#define MDBS_ENABLED BIT_0
+
+ uint8_t options;
+ uint8_t hard_address;
+
+ uint8_t port_name[WWN_SIZE];
+ uint8_t node_name[WWN_SIZE];
+
+ uint8_t port_id[3];
+ uint8_t reserved_1;
+};
+
+/*
+ * Virtual Port Control IOCB
+ */
+#define VP_CTRL_IOCB_TYPE 0x30 /* Virtual Port Control entry. */
+struct vp_ctrl_entry_24xx {
+ uint8_t entry_type; /* Entry type. */
+ uint8_t entry_count; /* Entry count. */
+ uint8_t sys_define; /* System defined. */
+ uint8_t entry_status; /* Entry Status. */
+
+ uint32_t handle; /* System handle. */
+
+ uint16_t vp_idx_failed;
+
+ uint16_t comp_status; /* Completion status. */
+#define CS_VCE_IOCB_ERROR 0x01 /* Error processing IOCB */
+#define CS_VCE_ACQ_ID_ERROR 0x02 /* Error while acquireing ID. */
+#define CS_VCE_BUSY 0x05 /* Firmware not ready to accept cmd. */
+
+ uint16_t command;
+#define VCE_COMMAND_ENABLE_VPS 0x00 /* Enable VPs. */
+#define VCE_COMMAND_DISABLE_VPS 0x08 /* Disable VPs. */
+#define VCE_COMMAND_DISABLE_VPS_REINIT 0x09 /* Disable VPs and reinit link. */
+#define VCE_COMMAND_DISABLE_VPS_LOGO 0x0a /* Disable VPs and LOGO ports. */
+#define VCE_COMMAND_DISABLE_VPS_LOGO_ALL 0x0b /* Disable VPs and LOGO ports. */
+
+ uint16_t vp_count;
+
+ uint8_t vp_idx_map[16];
+ uint16_t flags;
+ uint16_t id;
+ uint16_t reserved_4;
+ uint16_t hopct;
+ uint8_t reserved_5[24];
+};
+
+/*
+ * Modify Virtual Port Configuration IOCB
+ */
+#define VP_CONFIG_IOCB_TYPE 0x31 /* Virtual Port Config entry. */
+struct vp_config_entry_24xx {
+ uint8_t entry_type; /* Entry type. */
+ uint8_t entry_count; /* Entry count. */
+ uint8_t handle_count;
+ uint8_t entry_status; /* Entry Status. */
+
+ uint32_t handle; /* System handle. */
+
+ uint16_t flags;
+#define CS_VF_BIND_VPORTS_TO_VF BIT_0
+#define CS_VF_SET_QOS_OF_VPORTS BIT_1
+#define CS_VF_SET_HOPS_OF_VPORTS BIT_2
+
+ uint16_t comp_status; /* Completion status. */
+#define CS_VCT_STS_ERROR 0x01 /* Specified VPs were not disabled. */
+#define CS_VCT_CNT_ERROR 0x02 /* Invalid VP count. */
+#define CS_VCT_ERROR 0x03 /* Unknown error. */
+#define CS_VCT_IDX_ERROR 0x02 /* Invalid VP index. */
+#define CS_VCT_BUSY 0x05 /* Firmware not ready to accept cmd. */
+
+ uint8_t command;
+#define VCT_COMMAND_MOD_VPS 0x00 /* Modify VP configurations. */
+#define VCT_COMMAND_MOD_ENABLE_VPS 0x01 /* Modify configuration & enable VPs. */
+
+ uint8_t vp_count;
+
+ uint8_t vp_index1;
+ uint8_t vp_index2;
+
+ uint8_t options_idx1;
+ uint8_t hard_address_idx1;
+ uint16_t reserved_vp1;
+ uint8_t port_name_idx1[WWN_SIZE];
+ uint8_t node_name_idx1[WWN_SIZE];
+
+ uint8_t options_idx2;
+ uint8_t hard_address_idx2;
+ uint16_t reserved_vp2;
+ uint8_t port_name_idx2[WWN_SIZE];
+ uint8_t node_name_idx2[WWN_SIZE];
+ uint16_t id;
+ uint16_t reserved_4;
+ uint16_t hopct;
+ uint8_t reserved_5[2];
+};
+
+#define VP_RPT_ID_IOCB_TYPE 0x32 /* Report ID Acquisition entry. */
+struct vp_rpt_id_entry_24xx {
+ uint8_t entry_type; /* Entry type. */
+ uint8_t entry_count; /* Entry count. */
+ uint8_t sys_define; /* System defined. */
+ uint8_t entry_status; /* Entry Status. */
+
+ uint32_t handle; /* System handle. */
+
+ uint16_t vp_count; /* Format 0 -- | VP setup | VP acq |. */
+ /* Format 1 -- | VP count |. */
+ uint16_t vp_idx; /* Format 0 -- Reserved. */
+ /* Format 1 -- VP status and index. */
+
+ uint8_t port_id[3];
+ uint8_t format;
+
+ uint8_t vp_idx_map[16];
+
+ uint8_t reserved_4[32];
+};
+
+#define VF_EVFP_IOCB_TYPE 0x26 /* Exchange Virtual Fabric Parameters entry. */
+struct vf_evfp_entry_24xx {
+ uint8_t entry_type; /* Entry type. */
+ uint8_t entry_count; /* Entry count. */
+ uint8_t sys_define; /* System defined. */
+ uint8_t entry_status; /* Entry Status. */
+
+ uint32_t handle; /* System handle. */
+ uint16_t comp_status; /* Completion status. */
+ uint16_t timeout; /* timeout */
+ uint16_t adim_tagging_mode;
+
+ uint16_t vfport_id;
+ uint32_t exch_addr;
+
+ uint16_t nport_handle; /* N_PORT handle. */
+ uint16_t control_flags;
+ uint32_t io_parameter_0;
+ uint32_t io_parameter_1;
+ uint32_t tx_address[2]; /* Data segment 0 address. */
+ uint32_t tx_len; /* Data segment 0 length. */
+ uint32_t rx_address[2]; /* Data segment 1 address. */
+ uint32_t rx_len; /* Data segment 1 length. */
+};
+
+/* END MID Support ***********************************************************/
+
+/* Flash Description Table ***************************************************/
+
+struct qla_fdt_layout {
+ uint8_t sig[4];
+ uint16_t version;
+ uint16_t len;
+ uint16_t checksum;
+ uint8_t unused1[2];
+ uint8_t model[16];
+ uint16_t man_id;
+ uint16_t id;
+ uint8_t flags;
+ uint8_t erase_cmd;
+ uint8_t alt_erase_cmd;
+ uint8_t wrt_enable_cmd;
+ uint8_t wrt_enable_bits;
+ uint8_t wrt_sts_reg_cmd;
+ uint8_t unprotect_sec_cmd;
+ uint8_t read_man_id_cmd;
+ uint32_t block_size;
+ uint32_t alt_block_size;
+ uint32_t flash_size;
+ uint32_t wrt_enable_data;
+ uint8_t read_id_addr_len;
+ uint8_t wrt_disable_bits;
+ uint8_t read_dev_id_len;
+ uint8_t chip_erase_cmd;
+ uint16_t read_timeout;
+ uint8_t protect_sec_cmd;
+ uint8_t unused2[65];
+};
+
+/* Flash Layout Table ********************************************************/
+
+struct qla_flt_location {
+ uint8_t sig[4];
+ uint16_t start_lo;
+ uint16_t start_hi;
+ uint8_t version;
+ uint8_t unused[5];
+ uint16_t checksum;
+};
+
+struct qla_flt_header {
+ uint16_t version;
+ uint16_t length;
+ uint16_t checksum;
+ uint16_t unused;
+};
+
+#define FLT_REG_FW 0x01
+#define FLT_REG_BOOT_CODE 0x07
+#define FLT_REG_VPD_0 0x14
+#define FLT_REG_NVRAM_0 0x15
+#define FLT_REG_VPD_1 0x16
+#define FLT_REG_NVRAM_1 0x17
+#define FLT_REG_VPD_2 0xD4
+#define FLT_REG_NVRAM_2 0xD5
+#define FLT_REG_VPD_3 0xD6
+#define FLT_REG_NVRAM_3 0xD7
+#define FLT_REG_FDT 0x1a
+#define FLT_REG_FLT 0x1c
+#define FLT_REG_HW_EVENT_0 0x1d
+#define FLT_REG_HW_EVENT_1 0x1f
+#define FLT_REG_NPIV_CONF_0 0x29
+#define FLT_REG_NPIV_CONF_1 0x2a
+#define FLT_REG_GOLD_FW 0x2f
+#define FLT_REG_FCP_PRIO_0 0x87
+#define FLT_REG_FCP_PRIO_1 0x88
+#define FLT_REG_CNA_FW 0x97
+#define FLT_REG_BOOT_CODE_8044 0xA2
+#define FLT_REG_FCOE_FW 0xA4
+#define FLT_REG_FCOE_NVRAM_0 0xAA
+#define FLT_REG_FCOE_NVRAM_1 0xAC
+
+struct qla_flt_region {
+ uint32_t code;
+ uint32_t size;
+ uint32_t start;
+ uint32_t end;
+};
+
+/* Flash NPIV Configuration Table ********************************************/
+
+struct qla_npiv_header {
+ uint8_t sig[2];
+ uint16_t version;
+ uint16_t entries;
+ uint16_t unused[4];
+ uint16_t checksum;
+};
+
+struct qla_npiv_entry {
+ uint16_t flags;
+ uint16_t vf_id;
+ uint8_t q_qos;
+ uint8_t f_qos;
+ uint16_t unused1;
+ uint8_t port_name[WWN_SIZE];
+ uint8_t node_name[WWN_SIZE];
+};
+
+/* 84XX Support **************************************************************/
+
+#define MBA_ISP84XX_ALERT 0x800f /* Alert Notification. */
+#define A84_PANIC_RECOVERY 0x1
+#define A84_OP_LOGIN_COMPLETE 0x2
+#define A84_DIAG_LOGIN_COMPLETE 0x3
+#define A84_GOLD_LOGIN_COMPLETE 0x4
+
+#define MBC_ISP84XX_RESET 0x3a /* Reset. */
+
+#define FSTATE_REMOTE_FC_DOWN BIT_0
+#define FSTATE_NSL_LINK_DOWN BIT_1
+#define FSTATE_IS_DIAG_FW BIT_2
+#define FSTATE_LOGGED_IN BIT_3
+#define FSTATE_WAITING_FOR_VERIFY BIT_4
+
+#define VERIFY_CHIP_IOCB_TYPE 0x1B
+struct verify_chip_entry_84xx {
+ uint8_t entry_type;
+ uint8_t entry_count;
+ uint8_t sys_defined;
+ uint8_t entry_status;
+
+ uint32_t handle;
+
+ uint16_t options;
+#define VCO_DONT_UPDATE_FW BIT_0
+#define VCO_FORCE_UPDATE BIT_1
+#define VCO_DONT_RESET_UPDATE BIT_2
+#define VCO_DIAG_FW BIT_3
+#define VCO_END_OF_DATA BIT_14
+#define VCO_ENABLE_DSD BIT_15
+
+ uint16_t reserved_1;
+
+ uint16_t data_seg_cnt;
+ uint16_t reserved_2[3];
+
+ uint32_t fw_ver;
+ uint32_t exchange_address;
+
+ uint32_t reserved_3[3];
+ uint32_t fw_size;
+ uint32_t fw_seq_size;
+ uint32_t relative_offset;
+
+ uint32_t dseg_address[2];
+ uint32_t dseg_length;
+};
+
+struct verify_chip_rsp_84xx {
+ uint8_t entry_type;
+ uint8_t entry_count;
+ uint8_t sys_defined;
+ uint8_t entry_status;
+
+ uint32_t handle;
+
+ uint16_t comp_status;
+#define CS_VCS_CHIP_FAILURE 0x3
+#define CS_VCS_BAD_EXCHANGE 0x8
+#define CS_VCS_SEQ_COMPLETEi 0x40
+
+ uint16_t failure_code;
+#define VFC_CHECKSUM_ERROR 0x1
+#define VFC_INVALID_LEN 0x2
+#define VFC_ALREADY_IN_PROGRESS 0x8
+
+ uint16_t reserved_1[4];
+
+ uint32_t fw_ver;
+ uint32_t exchange_address;
+
+ uint32_t reserved_2[6];
+};
+
+#define ACCESS_CHIP_IOCB_TYPE 0x2B
+struct access_chip_84xx {
+ uint8_t entry_type;
+ uint8_t entry_count;
+ uint8_t sys_defined;
+ uint8_t entry_status;
+
+ uint32_t handle;
+
+ uint16_t options;
+#define ACO_DUMP_MEMORY 0x0
+#define ACO_LOAD_MEMORY 0x1
+#define ACO_CHANGE_CONFIG_PARAM 0x2
+#define ACO_REQUEST_INFO 0x3
+
+ uint16_t reserved1;
+
+ uint16_t dseg_count;
+ uint16_t reserved2[3];
+
+ uint32_t parameter1;
+ uint32_t parameter2;
+ uint32_t parameter3;
+
+ uint32_t reserved3[3];
+ uint32_t total_byte_cnt;
+ uint32_t reserved4;
+
+ uint32_t dseg_address[2];
+ uint32_t dseg_length;
+};
+
+struct access_chip_rsp_84xx {
+ uint8_t entry_type;
+ uint8_t entry_count;
+ uint8_t sys_defined;
+ uint8_t entry_status;
+
+ uint32_t handle;
+
+ uint16_t comp_status;
+ uint16_t failure_code;
+ uint32_t residual_count;
+
+ uint32_t reserved[12];
+};
+
+/* 81XX Support **************************************************************/
+
+#define MBA_DCBX_START 0x8016
+#define MBA_DCBX_COMPLETE 0x8030
+#define MBA_FCF_CONF_ERR 0x8031
+#define MBA_DCBX_PARAM_UPDATE 0x8032
+#define MBA_IDC_COMPLETE 0x8100
+#define MBA_IDC_NOTIFY 0x8101
+#define MBA_IDC_TIME_EXT 0x8102
+
+#define MBC_IDC_ACK 0x101
+#define MBC_RESTART_MPI_FW 0x3d
+#define MBC_FLASH_ACCESS_CTRL 0x3e /* Control flash access. */
+#define MBC_GET_XGMAC_STATS 0x7a
+#define MBC_GET_DCBX_PARAMS 0x51
+
+/*
+ * ISP83xx mailbox commands
+ */
+#define MBC_WRITE_REMOTE_REG 0x0001 /* Write remote register */
+#define MBC_READ_REMOTE_REG 0x0009 /* Read remote register */
+#define MBC_RESTART_NIC_FIRMWARE 0x003d /* Restart NIC firmware */
+#define MBC_SET_ACCESS_CONTROL 0x003e /* Access control command */
+
+/* Flash access control option field bit definitions */
+#define FAC_OPT_FORCE_SEMAPHORE BIT_15
+#define FAC_OPT_REQUESTOR_ID BIT_14
+#define FAC_OPT_CMD_SUBCODE 0xff
+
+/* Flash access control command subcodes */
+#define FAC_OPT_CMD_WRITE_PROTECT 0x00
+#define FAC_OPT_CMD_WRITE_ENABLE 0x01
+#define FAC_OPT_CMD_ERASE_SECTOR 0x02
+#define FAC_OPT_CMD_LOCK_SEMAPHORE 0x03
+#define FAC_OPT_CMD_UNLOCK_SEMAPHORE 0x04
+#define FAC_OPT_CMD_GET_SECTOR_SIZE 0x05
+
+struct nvram_81xx {
+ /* NVRAM header. */
+ uint8_t id[4];
+ uint16_t nvram_version;
+ uint16_t reserved_0;
+
+ /* Firmware Initialization Control Block. */
+ uint16_t version;
+ uint16_t reserved_1;
+ uint16_t frame_payload_size;
+ uint16_t execution_throttle;
+ uint16_t exchange_count;
+ uint16_t reserved_2;
+
+ uint8_t port_name[WWN_SIZE];
+ uint8_t node_name[WWN_SIZE];
+
+ uint16_t login_retry_count;
+ uint16_t reserved_3;
+ uint16_t interrupt_delay_timer;
+ uint16_t login_timeout;
+
+ uint32_t firmware_options_1;
+ uint32_t firmware_options_2;
+ uint32_t firmware_options_3;
+
+ uint16_t reserved_4[4];
+
+ /* Offset 64. */
+ uint8_t enode_mac[6];
+ uint16_t reserved_5[5];
+
+ /* Offset 80. */
+ uint16_t reserved_6[24];
+
+ /* Offset 128. */
+ uint16_t ex_version;
+ uint8_t prio_fcf_matching_flags;
+ uint8_t reserved_6_1[3];
+ uint16_t pri_fcf_vlan_id;
+ uint8_t pri_fcf_fabric_name[8];
+ uint16_t reserved_6_2[7];
+ uint8_t spma_mac_addr[6];
+ uint16_t reserved_6_3[14];
+
+ /* Offset 192. */
+ uint16_t reserved_7[32];
+
+ /*
+ * BIT 0 = Enable spinup delay
+ * BIT 1 = Disable BIOS
+ * BIT 2 = Enable Memory Map BIOS
+ * BIT 3 = Enable Selectable Boot
+ * BIT 4 = Disable RISC code load
+ * BIT 5 = Disable Serdes
+ * BIT 6 = Opt boot mode
+ * BIT 7 = Interrupt enable
+ *
+ * BIT 8 = EV Control enable
+ * BIT 9 = Enable lip reset
+ * BIT 10 = Enable lip full login
+ * BIT 11 = Enable target reset
+ * BIT 12 = Stop firmware
+ * BIT 13 = Enable nodename option
+ * BIT 14 = Default WWPN valid
+ * BIT 15 = Enable alternate WWN
+ *
+ * BIT 16 = CLP LUN string
+ * BIT 17 = CLP Target string
+ * BIT 18 = CLP BIOS enable string
+ * BIT 19 = CLP Serdes string
+ * BIT 20 = CLP WWPN string
+ * BIT 21 = CLP WWNN string
+ * BIT 22 =
+ * BIT 23 =
+ * BIT 24 = Keep WWPN
+ * BIT 25 = Temp WWPN
+ * BIT 26-31 =
+ */
+ uint32_t host_p;
+
+ uint8_t alternate_port_name[WWN_SIZE];
+ uint8_t alternate_node_name[WWN_SIZE];
+
+ uint8_t boot_port_name[WWN_SIZE];
+ uint16_t boot_lun_number;
+ uint16_t reserved_8;
+
+ uint8_t alt1_boot_port_name[WWN_SIZE];
+ uint16_t alt1_boot_lun_number;
+ uint16_t reserved_9;
+
+ uint8_t alt2_boot_port_name[WWN_SIZE];
+ uint16_t alt2_boot_lun_number;
+ uint16_t reserved_10;
+
+ uint8_t alt3_boot_port_name[WWN_SIZE];
+ uint16_t alt3_boot_lun_number;
+ uint16_t reserved_11;
+
+ /*
+ * BIT 0 = Selective Login
+ * BIT 1 = Alt-Boot Enable
+ * BIT 2 = Reserved
+ * BIT 3 = Boot Order List
+ * BIT 4 = Reserved
+ * BIT 5 = Selective LUN
+ * BIT 6 = Reserved
+ * BIT 7-31 =
+ */
+ uint32_t efi_parameters;
+
+ uint8_t reset_delay;
+ uint8_t reserved_12;
+ uint16_t reserved_13;
+
+ uint16_t boot_id_number;
+ uint16_t reserved_14;
+
+ uint16_t max_luns_per_target;
+ uint16_t reserved_15;
+
+ uint16_t port_down_retry_count;
+ uint16_t link_down_timeout;
+
+ /* FCode parameters. */
+ uint16_t fcode_parameter;
+
+ uint16_t reserved_16[3];
+
+ /* Offset 352. */
+ uint8_t reserved_17[4];
+ uint16_t reserved_18[5];
+ uint8_t reserved_19[2];
+ uint16_t reserved_20[8];
+
+ /* Offset 384. */
+ uint8_t reserved_21[16];
+ uint16_t reserved_22[3];
+
+ /*
+ * BIT 0 = Extended BB credits for LR
+ * BIT 1 = Virtual Fabric Enable
+ * BIT 2 = Enhanced Features Unused
+ * BIT 3-7 = Enhanced Features Reserved
+ */
+ /* Enhanced Features */
+ uint8_t enhanced_features;
+
+ uint8_t reserved_23;
+ uint16_t reserved_24[4];
+
+ /* Offset 416. */
+ uint16_t reserved_25[32];
+
+ /* Offset 480. */
+ uint8_t model_name[16];
+
+ /* Offset 496. */
+ uint16_t feature_mask_l;
+ uint16_t feature_mask_h;
+ uint16_t reserved_26[2];
+
+ uint16_t subsystem_vendor_id;
+ uint16_t subsystem_device_id;
+
+ uint32_t checksum;
+};
+
+/*
+ * ISP Initialization Control Block.
+ * Little endian except where noted.
+ */
+#define ICB_VERSION 1
+struct init_cb_81xx {
+ uint16_t version;
+ uint16_t reserved_1;
+
+ uint16_t frame_payload_size;
+ uint16_t execution_throttle;
+ uint16_t exchange_count;
+
+ uint16_t reserved_2;
+
+ uint8_t port_name[WWN_SIZE]; /* Big endian. */
+ uint8_t node_name[WWN_SIZE]; /* Big endian. */
+
+ uint16_t response_q_inpointer;
+ uint16_t request_q_outpointer;
+
+ uint16_t login_retry_count;
+
+ uint16_t prio_request_q_outpointer;
+
+ uint16_t response_q_length;
+ uint16_t request_q_length;
+
+ uint16_t reserved_3;
+
+ uint16_t prio_request_q_length;
+
+ uint32_t request_q_address[2];
+ uint32_t response_q_address[2];
+ uint32_t prio_request_q_address[2];
+
+ uint8_t reserved_4[8];
+
+ uint16_t atio_q_inpointer;
+ uint16_t atio_q_length;
+ uint32_t atio_q_address[2];
+
+ uint16_t interrupt_delay_timer; /* 100us increments. */
+ uint16_t login_timeout;
+
+ /*
+ * BIT 0-3 = Reserved
+ * BIT 4 = Enable Target Mode
+ * BIT 5 = Disable Initiator Mode
+ * BIT 6 = Reserved
+ * BIT 7 = Reserved
+ *
+ * BIT 8-13 = Reserved
+ * BIT 14 = Node Name Option
+ * BIT 15-31 = Reserved
+ */
+ uint32_t firmware_options_1;
+
+ /*
+ * BIT 0 = Operation Mode bit 0
+ * BIT 1 = Operation Mode bit 1
+ * BIT 2 = Operation Mode bit 2
+ * BIT 3 = Operation Mode bit 3
+ * BIT 4-7 = Reserved
+ *
+ * BIT 8 = Enable Class 2
+ * BIT 9 = Enable ACK0
+ * BIT 10 = Reserved
+ * BIT 11 = Enable FC-SP Security
+ * BIT 12 = FC Tape Enable
+ * BIT 13 = Reserved
+ * BIT 14 = Enable Target PRLI Control
+ * BIT 15-31 = Reserved
+ */
+ uint32_t firmware_options_2;
+
+ /*
+ * BIT 0-3 = Reserved
+ * BIT 4 = FCP RSP Payload bit 0
+ * BIT 5 = FCP RSP Payload bit 1
+ * BIT 6 = Enable Receive Out-of-Order data frame handling
+ * BIT 7 = Reserved
+ *
+ * BIT 8 = Reserved
+ * BIT 9 = Enable Out-of-Order FCP_XFER_RDY relative offset handling
+ * BIT 10-16 = Reserved
+ * BIT 17 = Enable multiple FCFs
+ * BIT 18-20 = MAC addressing mode
+ * BIT 21-25 = Ethernet data rate
+ * BIT 26 = Enable ethernet header rx IOCB for ATIO q
+ * BIT 27 = Enable ethernet header rx IOCB for response q
+ * BIT 28 = SPMA selection bit 0
+ * BIT 28 = SPMA selection bit 1
+ * BIT 30-31 = Reserved
+ */
+ uint32_t firmware_options_3;
+
+ uint8_t reserved_5[8];
+
+ uint8_t enode_mac[6];
+
+ uint8_t reserved_6[10];
+};
+
+struct mid_init_cb_81xx {
+ struct init_cb_81xx init_cb;
+
+ uint16_t count;
+ uint16_t options;
+
+ struct mid_conf_entry_24xx entries[MAX_MULTI_ID_FABRIC];
+};
+
+struct ex_init_cb_81xx {
+ uint16_t ex_version;
+ uint8_t prio_fcf_matching_flags;
+ uint8_t reserved_1[3];
+ uint16_t pri_fcf_vlan_id;
+ uint8_t pri_fcf_fabric_name[8];
+ uint16_t reserved_2[7];
+ uint8_t spma_mac_addr[6];
+ uint16_t reserved_3[14];
+};
+
+#define FARX_ACCESS_FLASH_CONF_81XX 0x7FFD0000
+#define FARX_ACCESS_FLASH_DATA_81XX 0x7F800000
+
+/* FCP priority config defines *************************************/
+/* operations */
+#define QLFC_FCP_PRIO_DISABLE 0x0
+#define QLFC_FCP_PRIO_ENABLE 0x1
+#define QLFC_FCP_PRIO_GET_CONFIG 0x2
+#define QLFC_FCP_PRIO_SET_CONFIG 0x3
+
+struct qla_fcp_prio_entry {
+ uint16_t flags; /* Describes parameter(s) in FCP */
+ /* priority entry that are valid */
+#define FCP_PRIO_ENTRY_VALID 0x1
+#define FCP_PRIO_ENTRY_TAG_VALID 0x2
+#define FCP_PRIO_ENTRY_SPID_VALID 0x4
+#define FCP_PRIO_ENTRY_DPID_VALID 0x8
+#define FCP_PRIO_ENTRY_LUNB_VALID 0x10
+#define FCP_PRIO_ENTRY_LUNE_VALID 0x20
+#define FCP_PRIO_ENTRY_SWWN_VALID 0x40
+#define FCP_PRIO_ENTRY_DWWN_VALID 0x80
+ uint8_t tag; /* Priority value */
+ uint8_t reserved; /* Reserved for future use */
+ uint32_t src_pid; /* Src port id. high order byte */
+ /* unused; -1 (wild card) */
+ uint32_t dst_pid; /* Src port id. high order byte */
+ /* unused; -1 (wild card) */
+ uint16_t lun_beg; /* 1st lun num of lun range. */
+ /* -1 (wild card) */
+ uint16_t lun_end; /* 2nd lun num of lun range. */
+ /* -1 (wild card) */
+ uint8_t src_wwpn[8]; /* Source WWPN: -1 (wild card) */
+ uint8_t dst_wwpn[8]; /* Destination WWPN: -1 (wild card) */
+};
+
+struct qla_fcp_prio_cfg {
+ uint8_t signature[4]; /* "HQOS" signature of config data */
+ uint16_t version; /* 1: Initial version */
+ uint16_t length; /* config data size in num bytes */
+ uint16_t checksum; /* config data bytes checksum */
+ uint16_t num_entries; /* Number of entries */
+ uint16_t size_of_entry; /* Size of each entry in num bytes */
+ uint8_t attributes; /* enable/disable, persistence */
+#define FCP_PRIO_ATTR_DISABLE 0x0
+#define FCP_PRIO_ATTR_ENABLE 0x1
+#define FCP_PRIO_ATTR_PERSIST 0x2
+ uint8_t reserved; /* Reserved for future use */
+#define FCP_PRIO_CFG_HDR_SIZE 0x10
+ struct qla_fcp_prio_entry entry[1]; /* fcp priority entries */
+#define FCP_PRIO_CFG_ENTRY_SIZE 0x20
+};
+
+#define FCP_PRIO_CFG_SIZE (32*1024) /* fcp prio data per port*/
+
+/* 25XX Support ****************************************************/
+#define FA_FCP_PRIO0_ADDR_25 0x3C000
+#define FA_FCP_PRIO1_ADDR_25 0x3E000
+
+/* 81XX Flash locations -- occupies second 2MB region. */
+#define FA_BOOT_CODE_ADDR_81 0x80000
+#define FA_RISC_CODE_ADDR_81 0xA0000
+#define FA_FW_AREA_ADDR_81 0xC0000
+#define FA_VPD_NVRAM_ADDR_81 0xD0000
+#define FA_VPD0_ADDR_81 0xD0000
+#define FA_VPD1_ADDR_81 0xD0400
+#define FA_NVRAM0_ADDR_81 0xD0080
+#define FA_NVRAM1_ADDR_81 0xD0180
+#define FA_FEATURE_ADDR_81 0xD4000
+#define FA_FLASH_DESCR_ADDR_81 0xD8000
+#define FA_FLASH_LAYOUT_ADDR_81 0xD8400
+#define FA_HW_EVENT0_ADDR_81 0xDC000
+#define FA_HW_EVENT1_ADDR_81 0xDC400
+#define FA_NPIV_CONF0_ADDR_81 0xD1000
+#define FA_NPIV_CONF1_ADDR_81 0xD2000
+
+/* 83XX Flash locations -- occupies second 8MB region. */
+#define FA_FLASH_LAYOUT_ADDR_83 0xFC400
+
+#endif
diff --git a/drivers/scsi/qla2xxx/qla_gbl.h b/drivers/scsi/qla2xxx/qla_gbl.h
new file mode 100644
index 000000000..7686bfe9a
--- /dev/null
+++ b/drivers/scsi/qla2xxx/qla_gbl.h
@@ -0,0 +1,769 @@
+/*
+ * QLogic Fibre Channel HBA Driver
+ * Copyright (c) 2003-2014 QLogic Corporation
+ *
+ * See LICENSE.qla2xxx for copyright and licensing details.
+ */
+#ifndef __QLA_GBL_H
+#define __QLA_GBL_H
+
+#include <linux/interrupt.h>
+
+/*
+ * Global Function Prototypes in qla_init.c source file.
+ */
+extern int qla2x00_initialize_adapter(scsi_qla_host_t *);
+
+extern int qla2100_pci_config(struct scsi_qla_host *);
+extern int qla2300_pci_config(struct scsi_qla_host *);
+extern int qla24xx_pci_config(scsi_qla_host_t *);
+extern int qla25xx_pci_config(scsi_qla_host_t *);
+extern void qla2x00_reset_chip(struct scsi_qla_host *);
+extern void qla24xx_reset_chip(struct scsi_qla_host *);
+extern int qla2x00_chip_diag(struct scsi_qla_host *);
+extern int qla24xx_chip_diag(struct scsi_qla_host *);
+extern void qla2x00_config_rings(struct scsi_qla_host *);
+extern void qla24xx_config_rings(struct scsi_qla_host *);
+extern void qla2x00_reset_adapter(struct scsi_qla_host *);
+extern void qla24xx_reset_adapter(struct scsi_qla_host *);
+extern int qla2x00_nvram_config(struct scsi_qla_host *);
+extern int qla24xx_nvram_config(struct scsi_qla_host *);
+extern int qla81xx_nvram_config(struct scsi_qla_host *);
+extern void qla2x00_update_fw_options(struct scsi_qla_host *);
+extern void qla24xx_update_fw_options(scsi_qla_host_t *);
+extern void qla81xx_update_fw_options(scsi_qla_host_t *);
+extern int qla2x00_load_risc(struct scsi_qla_host *, uint32_t *);
+extern int qla24xx_load_risc(scsi_qla_host_t *, uint32_t *);
+extern int qla81xx_load_risc(scsi_qla_host_t *, uint32_t *);
+
+extern int qla2x00_perform_loop_resync(scsi_qla_host_t *);
+extern int qla2x00_loop_resync(scsi_qla_host_t *);
+
+extern int qla2x00_find_new_loop_id(scsi_qla_host_t *, fc_port_t *);
+
+extern int qla2x00_fabric_login(scsi_qla_host_t *, fc_port_t *, uint16_t *);
+extern int qla2x00_local_device_login(scsi_qla_host_t *, fc_port_t *);
+
+extern void qla2x00_update_fcports(scsi_qla_host_t *);
+
+extern int qla2x00_abort_isp(scsi_qla_host_t *);
+extern void qla2x00_abort_isp_cleanup(scsi_qla_host_t *);
+extern void qla2x00_quiesce_io(scsi_qla_host_t *);
+
+extern void qla2x00_update_fcport(scsi_qla_host_t *, fc_port_t *);
+
+extern void qla2x00_alloc_fw_dump(scsi_qla_host_t *);
+extern void qla2x00_try_to_stop_firmware(scsi_qla_host_t *);
+
+extern int qla2x00_get_thermal_temp(scsi_qla_host_t *, uint16_t *);
+
+extern void qla84xx_put_chip(struct scsi_qla_host *);
+
+extern int qla2x00_async_login(struct scsi_qla_host *, fc_port_t *,
+ uint16_t *);
+extern int qla2x00_async_logout(struct scsi_qla_host *, fc_port_t *);
+extern int qla2x00_async_adisc(struct scsi_qla_host *, fc_port_t *,
+ uint16_t *);
+extern int qla2x00_async_tm_cmd(fc_port_t *, uint32_t, uint32_t, uint32_t);
+extern void qla2x00_async_login_done(struct scsi_qla_host *, fc_port_t *,
+ uint16_t *);
+extern void qla2x00_async_logout_done(struct scsi_qla_host *, fc_port_t *,
+ uint16_t *);
+extern void qla2x00_async_adisc_done(struct scsi_qla_host *, fc_port_t *,
+ uint16_t *);
+extern void *qla2x00_alloc_iocbs(struct scsi_qla_host *, srb_t *);
+extern void *qla2x00_alloc_iocbs_ready(struct scsi_qla_host *, srb_t *);
+extern int qla24xx_update_fcport_fcp_prio(scsi_qla_host_t *, fc_port_t *);
+
+extern fc_port_t *
+qla2x00_alloc_fcport(scsi_qla_host_t *, gfp_t );
+
+extern int __qla83xx_set_idc_control(scsi_qla_host_t *, uint32_t);
+extern int __qla83xx_get_idc_control(scsi_qla_host_t *, uint32_t *);
+extern void qla83xx_idc_audit(scsi_qla_host_t *, int);
+extern int qla83xx_nic_core_reset(scsi_qla_host_t *);
+extern void qla83xx_reset_ownership(scsi_qla_host_t *);
+extern int qla2xxx_mctp_dump(scsi_qla_host_t *);
+
+extern int
+qla2x00_alloc_outstanding_cmds(struct qla_hw_data *, struct req_que *);
+extern int qla2x00_init_rings(scsi_qla_host_t *);
+
+/*
+ * Global Data in qla_os.c source file.
+ */
+extern char qla2x00_version_str[];
+
+extern int ql2xlogintimeout;
+extern int qlport_down_retry;
+extern int ql2xplogiabsentdevice;
+extern int ql2xloginretrycount;
+extern int ql2xfdmienable;
+extern int ql2xallocfwdump;
+extern int ql2xextended_error_logging;
+extern int ql2xiidmaenable;
+extern int ql2xmaxqueues;
+extern int ql2xmultique_tag;
+extern int ql2xfwloadbin;
+extern int ql2xetsenable;
+extern int ql2xshiftctondsd;
+extern int ql2xdbwr;
+extern int ql2xasynctmfenable;
+extern int ql2xgffidenable;
+extern int ql2xenabledif;
+extern int ql2xenablehba_err_chk;
+extern int ql2xtargetreset;
+extern int ql2xdontresethba;
+extern uint64_t ql2xmaxlun;
+extern int ql2xmdcapmask;
+extern int ql2xmdenable;
+
+extern int qla2x00_loop_reset(scsi_qla_host_t *);
+extern void qla2x00_abort_all_cmds(scsi_qla_host_t *, int);
+extern int qla2x00_post_aen_work(struct scsi_qla_host *, enum
+ fc_host_event_code, u32);
+extern int qla2x00_post_idc_ack_work(struct scsi_qla_host *, uint16_t *);
+extern int qla2x00_post_async_login_work(struct scsi_qla_host *, fc_port_t *,
+ uint16_t *);
+extern int qla2x00_post_async_login_done_work(struct scsi_qla_host *,
+ fc_port_t *, uint16_t *);
+extern int qla2x00_post_async_logout_work(struct scsi_qla_host *, fc_port_t *,
+ uint16_t *);
+extern int qla2x00_post_async_logout_done_work(struct scsi_qla_host *,
+ fc_port_t *, uint16_t *);
+extern int qla2x00_post_async_adisc_work(struct scsi_qla_host *, fc_port_t *,
+ uint16_t *);
+extern int qla2x00_post_async_adisc_done_work(struct scsi_qla_host *,
+ fc_port_t *, uint16_t *);
+
+extern int qla81xx_restart_mpi_firmware(scsi_qla_host_t *);
+
+extern struct scsi_qla_host *qla2x00_create_host(struct scsi_host_template *,
+ struct qla_hw_data *);
+extern void qla2x00_free_host(struct scsi_qla_host *);
+extern void qla2x00_relogin(struct scsi_qla_host *);
+extern void qla2x00_do_work(struct scsi_qla_host *);
+extern void qla2x00_free_fcports(struct scsi_qla_host *);
+
+extern void qla83xx_schedule_work(scsi_qla_host_t *, int);
+extern void qla83xx_service_idc_aen(struct work_struct *);
+extern void qla83xx_nic_core_unrecoverable_work(struct work_struct *);
+extern void qla83xx_idc_state_handler_work(struct work_struct *);
+extern void qla83xx_nic_core_reset_work(struct work_struct *);
+
+extern void qla83xx_idc_lock(scsi_qla_host_t *, uint16_t);
+extern void qla83xx_idc_unlock(scsi_qla_host_t *, uint16_t);
+extern int qla83xx_idc_state_handler(scsi_qla_host_t *);
+extern int qla83xx_set_drv_presence(scsi_qla_host_t *vha);
+extern int __qla83xx_set_drv_presence(scsi_qla_host_t *vha);
+extern int qla83xx_clear_drv_presence(scsi_qla_host_t *vha);
+extern int __qla83xx_clear_drv_presence(scsi_qla_host_t *vha);
+extern int qla2x00_post_uevent_work(struct scsi_qla_host *, u32);
+
+extern int qla2x00_post_uevent_work(struct scsi_qla_host *, u32);
+extern void qla2x00_disable_board_on_pci_error(struct work_struct *);
+
+/*
+ * Global Functions in qla_mid.c source file.
+ */
+extern struct scsi_host_template qla2xxx_driver_template;
+extern struct scsi_transport_template *qla2xxx_transport_vport_template;
+extern void qla2x00_timer(scsi_qla_host_t *);
+extern void qla2x00_start_timer(scsi_qla_host_t *, void *, unsigned long);
+extern void qla24xx_deallocate_vp_id(scsi_qla_host_t *);
+extern int qla24xx_disable_vp (scsi_qla_host_t *);
+extern int qla24xx_enable_vp (scsi_qla_host_t *);
+extern int qla24xx_control_vp(scsi_qla_host_t *, int );
+extern int qla24xx_modify_vp_config(scsi_qla_host_t *);
+extern int qla2x00_send_change_request(scsi_qla_host_t *, uint16_t, uint16_t);
+extern void qla2x00_vp_stop_timer(scsi_qla_host_t *);
+extern int qla24xx_configure_vhba (scsi_qla_host_t *);
+extern void qla24xx_report_id_acquisition(scsi_qla_host_t *,
+ struct vp_rpt_id_entry_24xx *);
+extern void qla2x00_do_dpc_all_vps(scsi_qla_host_t *);
+extern int qla24xx_vport_create_req_sanity_check(struct fc_vport *);
+extern scsi_qla_host_t * qla24xx_create_vhost(struct fc_vport *);
+
+extern void qla2x00_sp_free_dma(void *, void *);
+extern char *qla2x00_get_fw_version_str(struct scsi_qla_host *, char *);
+
+extern void qla2x00_mark_device_lost(scsi_qla_host_t *, fc_port_t *, int, int);
+extern void qla2x00_mark_all_devices_lost(scsi_qla_host_t *, int);
+
+extern struct fw_blob *qla2x00_request_firmware(scsi_qla_host_t *);
+
+extern int qla2x00_wait_for_hba_online(scsi_qla_host_t *);
+extern int qla2x00_wait_for_chip_reset(scsi_qla_host_t *);
+extern int qla2x00_wait_for_fcoe_ctx_reset(scsi_qla_host_t *);
+
+extern void qla2xxx_wake_dpc(struct scsi_qla_host *);
+extern void qla2x00_alert_all_vps(struct rsp_que *, uint16_t *);
+extern void qla2x00_async_event(scsi_qla_host_t *, struct rsp_que *,
+ uint16_t *);
+extern int qla2x00_vp_abort_isp(scsi_qla_host_t *);
+
+/*
+ * Global Function Prototypes in qla_iocb.c source file.
+ */
+
+extern uint16_t qla2x00_calc_iocbs_32(uint16_t);
+extern uint16_t qla2x00_calc_iocbs_64(uint16_t);
+extern void qla2x00_build_scsi_iocbs_32(srb_t *, cmd_entry_t *, uint16_t);
+extern void qla2x00_build_scsi_iocbs_64(srb_t *, cmd_entry_t *, uint16_t);
+extern int qla2x00_start_scsi(srb_t *sp);
+extern int qla24xx_start_scsi(srb_t *sp);
+int qla2x00_marker(struct scsi_qla_host *, struct req_que *, struct rsp_que *,
+ uint16_t, uint64_t, uint8_t);
+extern int qla2x00_start_sp(srb_t *);
+extern int qla24xx_dif_start_scsi(srb_t *);
+extern int qla2x00_start_bidir(srb_t *, struct scsi_qla_host *, uint32_t);
+extern unsigned long qla2x00_get_async_timeout(struct scsi_qla_host *);
+
+extern void *qla2x00_alloc_iocbs(scsi_qla_host_t *, srb_t *);
+extern int qla2x00_issue_marker(scsi_qla_host_t *, int);
+extern int qla24xx_walk_and_build_sglist_no_difb(struct qla_hw_data *, srb_t *,
+ uint32_t *, uint16_t, struct qla_tgt_cmd *);
+extern int qla24xx_walk_and_build_sglist(struct qla_hw_data *, srb_t *,
+ uint32_t *, uint16_t, struct qla_tgt_cmd *);
+extern int qla24xx_walk_and_build_prot_sglist(struct qla_hw_data *, srb_t *,
+ uint32_t *, uint16_t, struct qla_tgt_cmd *);
+
+
+/*
+ * Global Function Prototypes in qla_mbx.c source file.
+ */
+extern int
+qla2x00_load_ram(scsi_qla_host_t *, dma_addr_t, uint32_t, uint32_t);
+
+extern int
+qla2x00_dump_ram(scsi_qla_host_t *, dma_addr_t, uint32_t, uint32_t);
+
+extern int
+qla2x00_execute_fw(scsi_qla_host_t *, uint32_t);
+
+extern int
+qla2x00_get_fw_version(scsi_qla_host_t *);
+
+extern int
+qla2x00_get_fw_options(scsi_qla_host_t *, uint16_t *);
+
+extern int
+qla2x00_set_fw_options(scsi_qla_host_t *, uint16_t *);
+
+extern int
+qla2x00_mbx_reg_test(scsi_qla_host_t *);
+
+extern int
+qla2x00_verify_checksum(scsi_qla_host_t *, uint32_t);
+
+extern int
+qla2x00_issue_iocb(scsi_qla_host_t *, void *, dma_addr_t, size_t);
+
+extern int
+qla2x00_abort_command(srb_t *);
+
+extern int
+qla2x00_abort_target(struct fc_port *, uint64_t, int);
+
+extern int
+qla2x00_lun_reset(struct fc_port *, uint64_t, int);
+
+extern int
+qla2x00_get_adapter_id(scsi_qla_host_t *, uint16_t *, uint8_t *, uint8_t *,
+ uint8_t *, uint16_t *, uint16_t *);
+
+extern int
+qla2x00_get_retry_cnt(scsi_qla_host_t *, uint8_t *, uint8_t *, uint16_t *);
+
+extern int
+qla2x00_init_firmware(scsi_qla_host_t *, uint16_t);
+
+extern int
+qla2x00_get_node_name_list(scsi_qla_host_t *, void **, int *);
+
+extern int
+qla2x00_get_port_database(scsi_qla_host_t *, fc_port_t *, uint8_t);
+
+extern int
+qla2x00_get_firmware_state(scsi_qla_host_t *, uint16_t *);
+
+extern int
+qla2x00_get_port_name(scsi_qla_host_t *, uint16_t, uint8_t *, uint8_t);
+
+extern int
+qla24xx_link_initialize(scsi_qla_host_t *);
+
+extern int
+qla2x00_lip_reset(scsi_qla_host_t *);
+
+extern int
+qla2x00_send_sns(scsi_qla_host_t *, dma_addr_t, uint16_t, size_t);
+
+extern int
+qla2x00_login_fabric(scsi_qla_host_t *, uint16_t, uint8_t, uint8_t, uint8_t,
+ uint16_t *, uint8_t);
+extern int
+qla24xx_login_fabric(scsi_qla_host_t *, uint16_t, uint8_t, uint8_t, uint8_t,
+ uint16_t *, uint8_t);
+
+extern int
+qla2x00_login_local_device(scsi_qla_host_t *, fc_port_t *, uint16_t *,
+ uint8_t);
+
+extern int
+qla2x00_fabric_logout(scsi_qla_host_t *, uint16_t, uint8_t, uint8_t, uint8_t);
+
+extern int
+qla24xx_fabric_logout(scsi_qla_host_t *, uint16_t, uint8_t, uint8_t, uint8_t);
+
+extern int
+qla2x00_full_login_lip(scsi_qla_host_t *ha);
+
+extern int
+qla2x00_get_id_list(scsi_qla_host_t *, void *, dma_addr_t, uint16_t *);
+
+extern int
+qla2x00_get_resource_cnts(scsi_qla_host_t *, uint16_t *, uint16_t *,
+ uint16_t *, uint16_t *, uint16_t *, uint16_t *);
+
+extern int
+qla2x00_get_fcal_position_map(scsi_qla_host_t *ha, char *pos_map);
+
+extern int
+qla2x00_get_link_status(scsi_qla_host_t *, uint16_t, struct link_statistics *,
+ dma_addr_t);
+
+extern int
+qla24xx_get_isp_stats(scsi_qla_host_t *, struct link_statistics *,
+ dma_addr_t);
+
+extern int qla24xx_abort_command(srb_t *);
+extern int qla24xx_async_abort_command(srb_t *);
+extern int
+qla24xx_abort_target(struct fc_port *, uint64_t, int);
+extern int
+qla24xx_lun_reset(struct fc_port *, uint64_t, int);
+extern int
+qla2x00_eh_wait_for_pending_commands(scsi_qla_host_t *, unsigned int,
+ uint64_t, enum nexus_wait_type);
+extern int
+qla2x00_system_error(scsi_qla_host_t *);
+
+extern int
+qla2x00_write_serdes_word(scsi_qla_host_t *, uint16_t, uint16_t);
+extern int
+qla2x00_read_serdes_word(scsi_qla_host_t *, uint16_t, uint16_t *);
+
+extern int
+qla8044_write_serdes_word(scsi_qla_host_t *, uint32_t, uint32_t);
+extern int
+qla8044_read_serdes_word(scsi_qla_host_t *, uint32_t, uint32_t *);
+
+extern int
+qla2x00_set_serdes_params(scsi_qla_host_t *, uint16_t, uint16_t, uint16_t);
+
+extern int
+qla2x00_stop_firmware(scsi_qla_host_t *);
+
+extern int
+qla2x00_enable_eft_trace(scsi_qla_host_t *, dma_addr_t, uint16_t);
+extern int
+qla2x00_disable_eft_trace(scsi_qla_host_t *);
+
+extern int
+qla2x00_enable_fce_trace(scsi_qla_host_t *, dma_addr_t, uint16_t , uint16_t *,
+ uint32_t *);
+
+extern int
+qla2x00_disable_fce_trace(scsi_qla_host_t *, uint64_t *, uint64_t *);
+
+extern int
+qla82xx_set_driver_version(scsi_qla_host_t *, char *);
+
+extern int
+qla25xx_set_driver_version(scsi_qla_host_t *, char *);
+
+extern int
+qla2x00_read_sfp(scsi_qla_host_t *, dma_addr_t, uint8_t *,
+ uint16_t, uint16_t, uint16_t, uint16_t);
+
+extern int
+qla2x00_write_sfp(scsi_qla_host_t *, dma_addr_t, uint8_t *,
+ uint16_t, uint16_t, uint16_t, uint16_t);
+
+extern int
+qla2x00_set_idma_speed(scsi_qla_host_t *, uint16_t, uint16_t, uint16_t *);
+
+extern int qla84xx_verify_chip(struct scsi_qla_host *, uint16_t *);
+
+extern int qla81xx_idc_ack(scsi_qla_host_t *, uint16_t *);
+
+extern int
+qla81xx_fac_get_sector_size(scsi_qla_host_t *, uint32_t *);
+
+extern int
+qla81xx_fac_do_write_enable(scsi_qla_host_t *, int);
+
+extern int
+qla81xx_fac_erase_sector(scsi_qla_host_t *, uint32_t, uint32_t);
+
+extern int
+qla2x00_get_xgmac_stats(scsi_qla_host_t *, dma_addr_t, uint16_t, uint16_t *);
+
+extern int
+qla2x00_get_dcbx_params(scsi_qla_host_t *, dma_addr_t, uint16_t);
+
+extern int
+qla2x00_read_ram_word(scsi_qla_host_t *, uint32_t, uint32_t *);
+
+extern int
+qla2x00_write_ram_word(scsi_qla_host_t *, uint32_t, uint32_t);
+
+extern int
+qla81xx_write_mpi_register(scsi_qla_host_t *, uint16_t *);
+extern int qla2x00_get_data_rate(scsi_qla_host_t *);
+extern int qla24xx_set_fcp_prio(scsi_qla_host_t *, uint16_t, uint16_t,
+ uint16_t *);
+extern int
+qla81xx_get_port_config(scsi_qla_host_t *, uint16_t *);
+
+extern int
+qla81xx_set_port_config(scsi_qla_host_t *, uint16_t *);
+
+extern int
+qla2x00_port_logout(scsi_qla_host_t *, struct fc_port *);
+
+extern int
+qla2x00_dump_mctp_data(scsi_qla_host_t *, dma_addr_t, uint32_t, uint32_t);
+
+/*
+ * Global Function Prototypes in qla_isr.c source file.
+ */
+extern irqreturn_t qla2100_intr_handler(int, void *);
+extern irqreturn_t qla2300_intr_handler(int, void *);
+extern irqreturn_t qla24xx_intr_handler(int, void *);
+extern void qla2x00_process_response_queue(struct rsp_que *);
+extern void
+qla24xx_process_response_queue(struct scsi_qla_host *, struct rsp_que *);
+extern int qla2x00_request_irqs(struct qla_hw_data *, struct rsp_que *);
+extern void qla2x00_free_irqs(scsi_qla_host_t *);
+
+extern int qla2x00_get_data_rate(scsi_qla_host_t *);
+extern const char *qla2x00_get_link_speed_str(struct qla_hw_data *, uint16_t);
+extern srb_t *
+qla2x00_get_sp_from_handle(scsi_qla_host_t *, const char *, struct req_que *,
+ void *);
+extern void
+qla2x00_process_completed_request(struct scsi_qla_host *, struct req_que *,
+ uint32_t);
+
+/*
+ * Global Function Prototypes in qla_sup.c source file.
+ */
+extern void qla2x00_release_nvram_protection(scsi_qla_host_t *);
+extern uint32_t *qla24xx_read_flash_data(scsi_qla_host_t *, uint32_t *,
+ uint32_t, uint32_t);
+extern uint8_t *qla2x00_read_nvram_data(scsi_qla_host_t *, uint8_t *, uint32_t,
+ uint32_t);
+extern uint8_t *qla24xx_read_nvram_data(scsi_qla_host_t *, uint8_t *, uint32_t,
+ uint32_t);
+extern int qla2x00_write_nvram_data(scsi_qla_host_t *, uint8_t *, uint32_t,
+ uint32_t);
+extern int qla24xx_write_nvram_data(scsi_qla_host_t *, uint8_t *, uint32_t,
+ uint32_t);
+extern uint8_t *qla25xx_read_nvram_data(scsi_qla_host_t *, uint8_t *, uint32_t,
+ uint32_t);
+extern int qla25xx_write_nvram_data(scsi_qla_host_t *, uint8_t *, uint32_t,
+ uint32_t);
+extern int qla2x00_is_a_vp_did(scsi_qla_host_t *, uint32_t);
+bool qla2x00_check_reg32_for_disconnect(scsi_qla_host_t *, uint32_t);
+bool qla2x00_check_reg16_for_disconnect(scsi_qla_host_t *, uint16_t);
+
+extern int qla2x00_beacon_on(struct scsi_qla_host *);
+extern int qla2x00_beacon_off(struct scsi_qla_host *);
+extern void qla2x00_beacon_blink(struct scsi_qla_host *);
+extern int qla24xx_beacon_on(struct scsi_qla_host *);
+extern int qla24xx_beacon_off(struct scsi_qla_host *);
+extern void qla24xx_beacon_blink(struct scsi_qla_host *);
+extern void qla83xx_beacon_blink(struct scsi_qla_host *);
+extern int qla82xx_beacon_on(struct scsi_qla_host *);
+extern int qla82xx_beacon_off(struct scsi_qla_host *);
+extern int qla83xx_wr_reg(scsi_qla_host_t *, uint32_t, uint32_t);
+extern int qla83xx_rd_reg(scsi_qla_host_t *, uint32_t, uint32_t *);
+extern int qla83xx_restart_nic_firmware(scsi_qla_host_t *);
+extern int qla83xx_access_control(scsi_qla_host_t *, uint16_t, uint32_t,
+ uint32_t, uint16_t *);
+
+extern uint8_t *qla2x00_read_optrom_data(struct scsi_qla_host *, uint8_t *,
+ uint32_t, uint32_t);
+extern int qla2x00_write_optrom_data(struct scsi_qla_host *, uint8_t *,
+ uint32_t, uint32_t);
+extern uint8_t *qla24xx_read_optrom_data(struct scsi_qla_host *, uint8_t *,
+ uint32_t, uint32_t);
+extern int qla24xx_write_optrom_data(struct scsi_qla_host *, uint8_t *,
+ uint32_t, uint32_t);
+extern uint8_t *qla25xx_read_optrom_data(struct scsi_qla_host *, uint8_t *,
+ uint32_t, uint32_t);
+extern uint8_t *qla8044_read_optrom_data(struct scsi_qla_host *,
+ uint8_t *, uint32_t, uint32_t);
+extern void qla8044_watchdog(struct scsi_qla_host *vha);
+
+extern int qla2x00_get_flash_version(scsi_qla_host_t *, void *);
+extern int qla24xx_get_flash_version(scsi_qla_host_t *, void *);
+extern int qla82xx_get_flash_version(scsi_qla_host_t *, void *);
+
+extern int qla2xxx_get_flash_info(scsi_qla_host_t *);
+extern int qla2xxx_get_vpd_field(scsi_qla_host_t *, char *, char *, size_t);
+
+extern void qla2xxx_flash_npiv_conf(scsi_qla_host_t *);
+extern int qla24xx_read_fcp_prio_cfg(scsi_qla_host_t *);
+
+/*
+ * Global Function Prototypes in qla_dbg.c source file.
+ */
+extern void qla2100_fw_dump(scsi_qla_host_t *, int);
+extern void qla2300_fw_dump(scsi_qla_host_t *, int);
+extern void qla24xx_fw_dump(scsi_qla_host_t *, int);
+extern void qla25xx_fw_dump(scsi_qla_host_t *, int);
+extern void qla81xx_fw_dump(scsi_qla_host_t *, int);
+extern void qla82xx_fw_dump(scsi_qla_host_t *, int);
+extern void qla8044_fw_dump(scsi_qla_host_t *, int);
+
+extern void qla27xx_fwdump(scsi_qla_host_t *, int);
+extern ulong qla27xx_fwdt_calculate_dump_size(struct scsi_qla_host *);
+extern int qla27xx_fwdt_template_valid(void *);
+extern ulong qla27xx_fwdt_template_size(void *);
+extern const void *qla27xx_fwdt_template_default(void);
+extern ulong qla27xx_fwdt_template_default_size(void);
+
+extern void qla2x00_dump_regs(scsi_qla_host_t *);
+extern void qla2x00_dump_buffer(uint8_t *, uint32_t);
+extern void qla2x00_dump_buffer_zipped(uint8_t *, uint32_t);
+extern void ql_dump_regs(uint32_t, scsi_qla_host_t *, int32_t);
+extern void ql_dump_buffer(uint32_t, scsi_qla_host_t *, int32_t,
+ uint8_t *, uint32_t);
+extern void qla2xxx_dump_post_process(scsi_qla_host_t *, int);
+
+/*
+ * Global Function Prototypes in qla_gs.c source file.
+ */
+extern void *qla2x00_prep_ms_iocb(scsi_qla_host_t *, uint32_t, uint32_t);
+extern void *qla24xx_prep_ms_iocb(scsi_qla_host_t *, uint32_t, uint32_t);
+extern int qla2x00_ga_nxt(scsi_qla_host_t *, fc_port_t *);
+extern int qla2x00_gid_pt(scsi_qla_host_t *, sw_info_t *);
+extern int qla2x00_gpn_id(scsi_qla_host_t *, sw_info_t *);
+extern int qla2x00_gnn_id(scsi_qla_host_t *, sw_info_t *);
+extern void qla2x00_gff_id(scsi_qla_host_t *, sw_info_t *);
+extern int qla2x00_rft_id(scsi_qla_host_t *);
+extern int qla2x00_rff_id(scsi_qla_host_t *);
+extern int qla2x00_rnn_id(scsi_qla_host_t *);
+extern int qla2x00_rsnn_nn(scsi_qla_host_t *);
+extern void *qla2x00_prep_ms_fdmi_iocb(scsi_qla_host_t *, uint32_t, uint32_t);
+extern void *qla24xx_prep_ms_fdmi_iocb(scsi_qla_host_t *, uint32_t, uint32_t);
+extern int qla2x00_fdmi_register(scsi_qla_host_t *);
+extern int qla2x00_gfpn_id(scsi_qla_host_t *, sw_info_t *);
+extern int qla2x00_gpsc(scsi_qla_host_t *, sw_info_t *);
+extern void qla2x00_get_sym_node_name(scsi_qla_host_t *, uint8_t *, size_t);
+
+/*
+ * Global Function Prototypes in qla_attr.c source file.
+ */
+struct device_attribute;
+extern struct device_attribute *qla2x00_host_attrs[];
+struct fc_function_template;
+extern struct fc_function_template qla2xxx_transport_functions;
+extern struct fc_function_template qla2xxx_transport_vport_functions;
+extern void qla2x00_alloc_sysfs_attr(scsi_qla_host_t *);
+extern void qla2x00_free_sysfs_attr(scsi_qla_host_t *, bool);
+extern void qla2x00_init_host_attr(scsi_qla_host_t *);
+extern void qla2x00_alloc_sysfs_attr(scsi_qla_host_t *);
+extern int qla2x00_loopback_test(scsi_qla_host_t *, struct msg_echo_lb *, uint16_t *);
+extern int qla2x00_echo_test(scsi_qla_host_t *,
+ struct msg_echo_lb *, uint16_t *);
+extern int qla24xx_update_all_fcp_prio(scsi_qla_host_t *);
+extern int qla24xx_fcp_prio_cfg_valid(scsi_qla_host_t *,
+ struct qla_fcp_prio_cfg *, uint8_t);
+
+/*
+ * Global Function Prototypes in qla_dfs.c source file.
+ */
+extern int qla2x00_dfs_setup(scsi_qla_host_t *);
+extern int qla2x00_dfs_remove(scsi_qla_host_t *);
+
+/* Globa function prototypes for multi-q */
+extern int qla25xx_request_irq(struct rsp_que *);
+extern int qla25xx_init_req_que(struct scsi_qla_host *, struct req_que *);
+extern int qla25xx_init_rsp_que(struct scsi_qla_host *, struct rsp_que *);
+extern int qla25xx_create_req_que(struct qla_hw_data *, uint16_t, uint8_t,
+ uint16_t, int, uint8_t);
+extern int qla25xx_create_rsp_que(struct qla_hw_data *, uint16_t, uint8_t,
+ uint16_t, int);
+extern void qla2x00_init_response_q_entries(struct rsp_que *);
+extern int qla25xx_delete_req_que(struct scsi_qla_host *, struct req_que *);
+extern int qla25xx_delete_queues(struct scsi_qla_host *);
+extern uint16_t qla24xx_rd_req_reg(struct qla_hw_data *, uint16_t);
+extern uint16_t qla25xx_rd_req_reg(struct qla_hw_data *, uint16_t);
+extern void qla24xx_wrt_req_reg(struct qla_hw_data *, uint16_t, uint16_t);
+extern void qla25xx_wrt_req_reg(struct qla_hw_data *, uint16_t, uint16_t);
+extern void qla25xx_wrt_rsp_reg(struct qla_hw_data *, uint16_t, uint16_t);
+extern void qla24xx_wrt_rsp_reg(struct qla_hw_data *, uint16_t, uint16_t);
+
+/* qlafx00 related functions */
+extern int qlafx00_pci_config(struct scsi_qla_host *);
+extern int qlafx00_initialize_adapter(struct scsi_qla_host *);
+extern void qlafx00_soft_reset(scsi_qla_host_t *);
+extern int qlafx00_chip_diag(scsi_qla_host_t *);
+extern void qlafx00_config_rings(struct scsi_qla_host *);
+extern char *qlafx00_pci_info_str(struct scsi_qla_host *, char *);
+extern char *qlafx00_fw_version_str(struct scsi_qla_host *, char *, size_t);
+extern irqreturn_t qlafx00_intr_handler(int, void *);
+extern void qlafx00_enable_intrs(struct qla_hw_data *);
+extern void qlafx00_disable_intrs(struct qla_hw_data *);
+extern int qlafx00_abort_target(fc_port_t *, uint64_t, int);
+extern int qlafx00_lun_reset(fc_port_t *, uint64_t, int);
+extern int qlafx00_start_scsi(srb_t *);
+extern int qlafx00_abort_isp(scsi_qla_host_t *);
+extern int qlafx00_iospace_config(struct qla_hw_data *);
+extern int qlafx00_init_firmware(scsi_qla_host_t *, uint16_t);
+extern int qlafx00_driver_shutdown(scsi_qla_host_t *, int);
+extern int qlafx00_fw_ready(scsi_qla_host_t *);
+extern int qlafx00_configure_devices(scsi_qla_host_t *);
+extern int qlafx00_reset_initialize(scsi_qla_host_t *);
+extern int qlafx00_fx_disc(scsi_qla_host_t *, fc_port_t *, uint16_t);
+extern int qlafx00_process_aen(struct scsi_qla_host *, struct qla_work_evt *);
+extern int qlafx00_post_aenfx_work(struct scsi_qla_host *, uint32_t,
+ uint32_t *, int);
+extern uint32_t qlafx00_fw_state_show(struct device *,
+ struct device_attribute *, char *);
+extern void qlafx00_get_host_speed(struct Scsi_Host *);
+extern void qlafx00_init_response_q_entries(struct rsp_que *);
+
+extern void qlafx00_tm_iocb(srb_t *, struct tsk_mgmt_entry_fx00 *);
+extern void qlafx00_abort_iocb(srb_t *, struct abort_iocb_entry_fx00 *);
+extern void qlafx00_fxdisc_iocb(srb_t *, struct fxdisc_entry_fx00 *);
+extern void qlafx00_timer_routine(scsi_qla_host_t *);
+extern int qlafx00_rescan_isp(scsi_qla_host_t *);
+extern int qlafx00_loop_reset(scsi_qla_host_t *vha);
+
+/* qla82xx related functions */
+
+/* PCI related functions */
+extern int qla82xx_pci_config(struct scsi_qla_host *);
+extern int qla82xx_pci_mem_read_2M(struct qla_hw_data *, u64, void *, int);
+extern int qla82xx_pci_region_offset(struct pci_dev *, int);
+extern int qla82xx_iospace_config(struct qla_hw_data *);
+
+/* Initialization related functions */
+extern void qla82xx_reset_chip(struct scsi_qla_host *);
+extern void qla82xx_config_rings(struct scsi_qla_host *);
+extern void qla82xx_watchdog(scsi_qla_host_t *);
+extern int qla82xx_start_firmware(scsi_qla_host_t *);
+
+/* Firmware and flash related functions */
+extern int qla82xx_load_risc(scsi_qla_host_t *, uint32_t *);
+extern uint8_t *qla82xx_read_optrom_data(struct scsi_qla_host *, uint8_t *,
+ uint32_t, uint32_t);
+extern int qla82xx_write_optrom_data(struct scsi_qla_host *, uint8_t *,
+ uint32_t, uint32_t);
+
+/* Mailbox related functions */
+extern int qla82xx_abort_isp(scsi_qla_host_t *);
+extern int qla82xx_restart_isp(scsi_qla_host_t *);
+
+/* IOCB related functions */
+extern int qla82xx_start_scsi(srb_t *);
+extern void qla2x00_sp_free(void *, void *);
+extern void qla2x00_sp_timeout(unsigned long);
+extern void qla2x00_bsg_job_done(void *, void *, int);
+extern void qla2x00_bsg_sp_free(void *, void *);
+extern void qla2x00_start_iocbs(struct scsi_qla_host *, struct req_que *);
+
+/* Interrupt related */
+extern irqreturn_t qla82xx_intr_handler(int, void *);
+extern irqreturn_t qla82xx_msi_handler(int, void *);
+extern irqreturn_t qla82xx_msix_default(int, void *);
+extern irqreturn_t qla82xx_msix_rsp_q(int, void *);
+extern void qla82xx_enable_intrs(struct qla_hw_data *);
+extern void qla82xx_disable_intrs(struct qla_hw_data *);
+extern void qla82xx_poll(int, void *);
+extern void qla82xx_init_flags(struct qla_hw_data *);
+
+/* ISP 8021 hardware related */
+extern void qla82xx_set_drv_active(scsi_qla_host_t *);
+extern int qla82xx_wr_32(struct qla_hw_data *, ulong, u32);
+extern int qla82xx_rd_32(struct qla_hw_data *, ulong);
+extern int qla82xx_rdmem(struct qla_hw_data *, u64, void *, int);
+extern int qla82xx_wrmem(struct qla_hw_data *, u64, void *, int);
+
+/* ISP 8021 IDC */
+extern void qla82xx_clear_drv_active(struct qla_hw_data *);
+extern uint32_t qla82xx_wait_for_state_change(scsi_qla_host_t *, uint32_t);
+extern int qla82xx_idc_lock(struct qla_hw_data *);
+extern void qla82xx_idc_unlock(struct qla_hw_data *);
+extern int qla82xx_device_state_handler(scsi_qla_host_t *);
+extern void qla8xxx_dev_failed_handler(scsi_qla_host_t *);
+extern void qla82xx_clear_qsnt_ready(scsi_qla_host_t *);
+
+extern void qla2x00_set_model_info(scsi_qla_host_t *, uint8_t *,
+ size_t, char *);
+extern int qla82xx_mbx_intr_enable(scsi_qla_host_t *);
+extern int qla82xx_mbx_intr_disable(scsi_qla_host_t *);
+extern void qla82xx_start_iocbs(scsi_qla_host_t *);
+extern int qla82xx_fcoe_ctx_reset(scsi_qla_host_t *);
+extern int qla82xx_check_md_needed(scsi_qla_host_t *);
+extern void qla82xx_chip_reset_cleanup(scsi_qla_host_t *);
+extern int qla81xx_set_led_config(scsi_qla_host_t *, uint16_t *);
+extern int qla81xx_get_led_config(scsi_qla_host_t *, uint16_t *);
+extern int qla82xx_mbx_beacon_ctl(scsi_qla_host_t *, int);
+extern char *qdev_state(uint32_t);
+extern void qla82xx_clear_pending_mbx(scsi_qla_host_t *);
+extern int qla82xx_read_temperature(scsi_qla_host_t *);
+extern int qla8044_read_temperature(scsi_qla_host_t *);
+
+/* BSG related functions */
+extern int qla24xx_bsg_request(struct fc_bsg_job *);
+extern int qla24xx_bsg_timeout(struct fc_bsg_job *);
+extern int qla84xx_reset_chip(scsi_qla_host_t *, uint16_t);
+extern int qla2x00_issue_iocb_timeout(scsi_qla_host_t *, void *,
+ dma_addr_t, size_t, uint32_t);
+extern int qla2x00_get_idma_speed(scsi_qla_host_t *, uint16_t,
+ uint16_t *, uint16_t *);
+
+/* 83xx related functions */
+extern void qla83xx_fw_dump(scsi_qla_host_t *, int);
+
+/* Minidump related functions */
+extern int qla82xx_md_get_template_size(scsi_qla_host_t *);
+extern int qla82xx_md_get_template(scsi_qla_host_t *);
+extern int qla82xx_md_alloc(scsi_qla_host_t *);
+extern void qla82xx_md_free(scsi_qla_host_t *);
+extern int qla82xx_md_collect(scsi_qla_host_t *);
+extern void qla82xx_md_prep(scsi_qla_host_t *);
+extern void qla82xx_set_reset_owner(scsi_qla_host_t *);
+extern int qla82xx_validate_template_chksum(scsi_qla_host_t *vha);
+
+/* Function declarations for ISP8044 */
+extern int qla8044_idc_lock(struct qla_hw_data *ha);
+extern void qla8044_idc_unlock(struct qla_hw_data *ha);
+extern uint32_t qla8044_rd_reg(struct qla_hw_data *ha, ulong addr);
+extern void qla8044_wr_reg(struct qla_hw_data *ha, ulong addr, uint32_t val);
+extern void qla8044_read_reset_template(struct scsi_qla_host *ha);
+extern void qla8044_set_idc_dontreset(struct scsi_qla_host *ha);
+extern int qla8044_rd_direct(struct scsi_qla_host *vha, const uint32_t crb_reg);
+extern void qla8044_wr_direct(struct scsi_qla_host *vha,
+ const uint32_t crb_reg, const uint32_t value);
+extern int qla8044_device_state_handler(struct scsi_qla_host *vha);
+extern void qla8044_clear_qsnt_ready(struct scsi_qla_host *vha);
+extern void qla8044_clear_drv_active(struct qla_hw_data *);
+void qla8044_get_minidump(struct scsi_qla_host *vha);
+int qla8044_collect_md_data(struct scsi_qla_host *vha);
+extern int qla8044_md_get_template(scsi_qla_host_t *);
+extern int qla8044_write_optrom_data(struct scsi_qla_host *, uint8_t *,
+ uint32_t, uint32_t);
+extern irqreturn_t qla8044_intr_handler(int, void *);
+extern void qla82xx_mbx_completion(scsi_qla_host_t *, uint16_t);
+extern int qla8044_abort_isp(scsi_qla_host_t *);
+extern int qla8044_check_fw_alive(struct scsi_qla_host *);
+
+extern void qlt_host_reset_handler(struct qla_hw_data *ha);
+#endif /* _QLA_GBL_H */
diff --git a/drivers/scsi/qla2xxx/qla_gs.c b/drivers/scsi/qla2xxx/qla_gs.c
new file mode 100644
index 000000000..dccc4dcc3
--- /dev/null
+++ b/drivers/scsi/qla2xxx/qla_gs.c
@@ -0,0 +1,2694 @@
+/*
+ * QLogic Fibre Channel HBA Driver
+ * Copyright (c) 2003-2014 QLogic Corporation
+ *
+ * See LICENSE.qla2xxx for copyright and licensing details.
+ */
+#include "qla_def.h"
+#include "qla_target.h"
+#include <linux/utsname.h>
+
+static int qla2x00_sns_ga_nxt(scsi_qla_host_t *, fc_port_t *);
+static int qla2x00_sns_gid_pt(scsi_qla_host_t *, sw_info_t *);
+static int qla2x00_sns_gpn_id(scsi_qla_host_t *, sw_info_t *);
+static int qla2x00_sns_gnn_id(scsi_qla_host_t *, sw_info_t *);
+static int qla2x00_sns_rft_id(scsi_qla_host_t *);
+static int qla2x00_sns_rnn_id(scsi_qla_host_t *);
+
+/**
+ * qla2x00_prep_ms_iocb() - Prepare common MS/CT IOCB fields for SNS CT query.
+ * @ha: HA context
+ * @req_size: request size in bytes
+ * @rsp_size: response size in bytes
+ *
+ * Returns a pointer to the @ha's ms_iocb.
+ */
+void *
+qla2x00_prep_ms_iocb(scsi_qla_host_t *vha, uint32_t req_size, uint32_t rsp_size)
+{
+ struct qla_hw_data *ha = vha->hw;
+ ms_iocb_entry_t *ms_pkt;
+
+ ms_pkt = ha->ms_iocb;
+ memset(ms_pkt, 0, sizeof(ms_iocb_entry_t));
+
+ ms_pkt->entry_type = MS_IOCB_TYPE;
+ ms_pkt->entry_count = 1;
+ SET_TARGET_ID(ha, ms_pkt->loop_id, SIMPLE_NAME_SERVER);
+ ms_pkt->control_flags = __constant_cpu_to_le16(CF_READ | CF_HEAD_TAG);
+ ms_pkt->timeout = cpu_to_le16(ha->r_a_tov / 10 * 2);
+ ms_pkt->cmd_dsd_count = __constant_cpu_to_le16(1);
+ ms_pkt->total_dsd_count = __constant_cpu_to_le16(2);
+ ms_pkt->rsp_bytecount = cpu_to_le32(rsp_size);
+ ms_pkt->req_bytecount = cpu_to_le32(req_size);
+
+ ms_pkt->dseg_req_address[0] = cpu_to_le32(LSD(ha->ct_sns_dma));
+ ms_pkt->dseg_req_address[1] = cpu_to_le32(MSD(ha->ct_sns_dma));
+ ms_pkt->dseg_req_length = ms_pkt->req_bytecount;
+
+ ms_pkt->dseg_rsp_address[0] = cpu_to_le32(LSD(ha->ct_sns_dma));
+ ms_pkt->dseg_rsp_address[1] = cpu_to_le32(MSD(ha->ct_sns_dma));
+ ms_pkt->dseg_rsp_length = ms_pkt->rsp_bytecount;
+
+ vha->qla_stats.control_requests++;
+
+ return (ms_pkt);
+}
+
+/**
+ * qla24xx_prep_ms_iocb() - Prepare common CT IOCB fields for SNS CT query.
+ * @ha: HA context
+ * @req_size: request size in bytes
+ * @rsp_size: response size in bytes
+ *
+ * Returns a pointer to the @ha's ms_iocb.
+ */
+void *
+qla24xx_prep_ms_iocb(scsi_qla_host_t *vha, uint32_t req_size, uint32_t rsp_size)
+{
+ struct qla_hw_data *ha = vha->hw;
+ struct ct_entry_24xx *ct_pkt;
+
+ ct_pkt = (struct ct_entry_24xx *)ha->ms_iocb;
+ memset(ct_pkt, 0, sizeof(struct ct_entry_24xx));
+
+ ct_pkt->entry_type = CT_IOCB_TYPE;
+ ct_pkt->entry_count = 1;
+ ct_pkt->nport_handle = __constant_cpu_to_le16(NPH_SNS);
+ ct_pkt->timeout = cpu_to_le16(ha->r_a_tov / 10 * 2);
+ ct_pkt->cmd_dsd_count = __constant_cpu_to_le16(1);
+ ct_pkt->rsp_dsd_count = __constant_cpu_to_le16(1);
+ ct_pkt->rsp_byte_count = cpu_to_le32(rsp_size);
+ ct_pkt->cmd_byte_count = cpu_to_le32(req_size);
+
+ ct_pkt->dseg_0_address[0] = cpu_to_le32(LSD(ha->ct_sns_dma));
+ ct_pkt->dseg_0_address[1] = cpu_to_le32(MSD(ha->ct_sns_dma));
+ ct_pkt->dseg_0_len = ct_pkt->cmd_byte_count;
+
+ ct_pkt->dseg_1_address[0] = cpu_to_le32(LSD(ha->ct_sns_dma));
+ ct_pkt->dseg_1_address[1] = cpu_to_le32(MSD(ha->ct_sns_dma));
+ ct_pkt->dseg_1_len = ct_pkt->rsp_byte_count;
+ ct_pkt->vp_index = vha->vp_idx;
+
+ vha->qla_stats.control_requests++;
+
+ return (ct_pkt);
+}
+
+/**
+ * qla2x00_prep_ct_req() - Prepare common CT request fields for SNS query.
+ * @ct_req: CT request buffer
+ * @cmd: GS command
+ * @rsp_size: response size in bytes
+ *
+ * Returns a pointer to the intitialized @ct_req.
+ */
+static inline struct ct_sns_req *
+qla2x00_prep_ct_req(struct ct_sns_pkt *p, uint16_t cmd, uint16_t rsp_size)
+{
+ memset(p, 0, sizeof(struct ct_sns_pkt));
+
+ p->p.req.header.revision = 0x01;
+ p->p.req.header.gs_type = 0xFC;
+ p->p.req.header.gs_subtype = 0x02;
+ p->p.req.command = cpu_to_be16(cmd);
+ p->p.req.max_rsp_size = cpu_to_be16((rsp_size - 16) / 4);
+
+ return &p->p.req;
+}
+
+static int
+qla2x00_chk_ms_status(scsi_qla_host_t *vha, ms_iocb_entry_t *ms_pkt,
+ struct ct_sns_rsp *ct_rsp, const char *routine)
+{
+ int rval;
+ uint16_t comp_status;
+ struct qla_hw_data *ha = vha->hw;
+
+ rval = QLA_FUNCTION_FAILED;
+ if (ms_pkt->entry_status != 0) {
+ ql_dbg(ql_dbg_disc, vha, 0x2031,
+ "%s failed, error status (%x) on port_id: %02x%02x%02x.\n",
+ routine, ms_pkt->entry_status, vha->d_id.b.domain,
+ vha->d_id.b.area, vha->d_id.b.al_pa);
+ } else {
+ if (IS_FWI2_CAPABLE(ha))
+ comp_status = le16_to_cpu(
+ ((struct ct_entry_24xx *)ms_pkt)->comp_status);
+ else
+ comp_status = le16_to_cpu(ms_pkt->status);
+ switch (comp_status) {
+ case CS_COMPLETE:
+ case CS_DATA_UNDERRUN:
+ case CS_DATA_OVERRUN: /* Overrun? */
+ if (ct_rsp->header.response !=
+ __constant_cpu_to_be16(CT_ACCEPT_RESPONSE)) {
+ ql_dbg(ql_dbg_disc + ql_dbg_buffer, vha, 0x2077,
+ "%s failed rejected request on port_id: %02x%02x%02x Compeltion status 0x%x, response 0x%x\n",
+ routine, vha->d_id.b.domain,
+ vha->d_id.b.area, vha->d_id.b.al_pa,
+ comp_status, ct_rsp->header.response);
+ ql_dump_buffer(ql_dbg_disc + ql_dbg_buffer, vha,
+ 0x2078, (uint8_t *)&ct_rsp->header,
+ sizeof(struct ct_rsp_hdr));
+ rval = QLA_INVALID_COMMAND;
+ } else
+ rval = QLA_SUCCESS;
+ break;
+ default:
+ ql_dbg(ql_dbg_disc, vha, 0x2033,
+ "%s failed, completion status (%x) on port_id: "
+ "%02x%02x%02x.\n", routine, comp_status,
+ vha->d_id.b.domain, vha->d_id.b.area,
+ vha->d_id.b.al_pa);
+ break;
+ }
+ }
+ return rval;
+}
+
+/**
+ * qla2x00_ga_nxt() - SNS scan for fabric devices via GA_NXT command.
+ * @ha: HA context
+ * @fcport: fcport entry to updated
+ *
+ * Returns 0 on success.
+ */
+int
+qla2x00_ga_nxt(scsi_qla_host_t *vha, fc_port_t *fcport)
+{
+ int rval;
+
+ ms_iocb_entry_t *ms_pkt;
+ struct ct_sns_req *ct_req;
+ struct ct_sns_rsp *ct_rsp;
+ struct qla_hw_data *ha = vha->hw;
+
+ if (IS_QLA2100(ha) || IS_QLA2200(ha))
+ return qla2x00_sns_ga_nxt(vha, fcport);
+
+ /* Issue GA_NXT */
+ /* Prepare common MS IOCB */
+ ms_pkt = ha->isp_ops->prep_ms_iocb(vha, GA_NXT_REQ_SIZE,
+ GA_NXT_RSP_SIZE);
+
+ /* Prepare CT request */
+ ct_req = qla2x00_prep_ct_req(ha->ct_sns, GA_NXT_CMD,
+ GA_NXT_RSP_SIZE);
+ ct_rsp = &ha->ct_sns->p.rsp;
+
+ /* Prepare CT arguments -- port_id */
+ ct_req->req.port_id.port_id[0] = fcport->d_id.b.domain;
+ ct_req->req.port_id.port_id[1] = fcport->d_id.b.area;
+ ct_req->req.port_id.port_id[2] = fcport->d_id.b.al_pa;
+
+ /* Execute MS IOCB */
+ rval = qla2x00_issue_iocb(vha, ha->ms_iocb, ha->ms_iocb_dma,
+ sizeof(ms_iocb_entry_t));
+ if (rval != QLA_SUCCESS) {
+ /*EMPTY*/
+ ql_dbg(ql_dbg_disc, vha, 0x2062,
+ "GA_NXT issue IOCB failed (%d).\n", rval);
+ } else if (qla2x00_chk_ms_status(vha, ms_pkt, ct_rsp, "GA_NXT") !=
+ QLA_SUCCESS) {
+ rval = QLA_FUNCTION_FAILED;
+ } else {
+ /* Populate fc_port_t entry. */
+ fcport->d_id.b.domain = ct_rsp->rsp.ga_nxt.port_id[0];
+ fcport->d_id.b.area = ct_rsp->rsp.ga_nxt.port_id[1];
+ fcport->d_id.b.al_pa = ct_rsp->rsp.ga_nxt.port_id[2];
+
+ memcpy(fcport->node_name, ct_rsp->rsp.ga_nxt.node_name,
+ WWN_SIZE);
+ memcpy(fcport->port_name, ct_rsp->rsp.ga_nxt.port_name,
+ WWN_SIZE);
+
+ fcport->fc4_type = (ct_rsp->rsp.ga_nxt.fc4_types[2] & BIT_0) ?
+ FC4_TYPE_FCP_SCSI : FC4_TYPE_OTHER;
+
+ if (ct_rsp->rsp.ga_nxt.port_type != NS_N_PORT_TYPE &&
+ ct_rsp->rsp.ga_nxt.port_type != NS_NL_PORT_TYPE)
+ fcport->d_id.b.domain = 0xf0;
+
+ ql_dbg(ql_dbg_disc, vha, 0x2063,
+ "GA_NXT entry - nn %8phN pn %8phN "
+ "port_id=%02x%02x%02x.\n",
+ fcport->node_name, fcport->port_name,
+ fcport->d_id.b.domain, fcport->d_id.b.area,
+ fcport->d_id.b.al_pa);
+ }
+
+ return (rval);
+}
+
+static inline int
+qla2x00_gid_pt_rsp_size(scsi_qla_host_t *vha)
+{
+ return vha->hw->max_fibre_devices * 4 + 16;
+}
+
+/**
+ * qla2x00_gid_pt() - SNS scan for fabric devices via GID_PT command.
+ * @ha: HA context
+ * @list: switch info entries to populate
+ *
+ * NOTE: Non-Nx_Ports are not requested.
+ *
+ * Returns 0 on success.
+ */
+int
+qla2x00_gid_pt(scsi_qla_host_t *vha, sw_info_t *list)
+{
+ int rval;
+ uint16_t i;
+
+ ms_iocb_entry_t *ms_pkt;
+ struct ct_sns_req *ct_req;
+ struct ct_sns_rsp *ct_rsp;
+
+ struct ct_sns_gid_pt_data *gid_data;
+ struct qla_hw_data *ha = vha->hw;
+ uint16_t gid_pt_rsp_size;
+
+ if (IS_QLA2100(ha) || IS_QLA2200(ha))
+ return qla2x00_sns_gid_pt(vha, list);
+
+ gid_data = NULL;
+ gid_pt_rsp_size = qla2x00_gid_pt_rsp_size(vha);
+ /* Issue GID_PT */
+ /* Prepare common MS IOCB */
+ ms_pkt = ha->isp_ops->prep_ms_iocb(vha, GID_PT_REQ_SIZE,
+ gid_pt_rsp_size);
+
+ /* Prepare CT request */
+ ct_req = qla2x00_prep_ct_req(ha->ct_sns, GID_PT_CMD, gid_pt_rsp_size);
+ ct_rsp = &ha->ct_sns->p.rsp;
+
+ /* Prepare CT arguments -- port_type */
+ ct_req->req.gid_pt.port_type = NS_NX_PORT_TYPE;
+
+ /* Execute MS IOCB */
+ rval = qla2x00_issue_iocb(vha, ha->ms_iocb, ha->ms_iocb_dma,
+ sizeof(ms_iocb_entry_t));
+ if (rval != QLA_SUCCESS) {
+ /*EMPTY*/
+ ql_dbg(ql_dbg_disc, vha, 0x2055,
+ "GID_PT issue IOCB failed (%d).\n", rval);
+ } else if (qla2x00_chk_ms_status(vha, ms_pkt, ct_rsp, "GID_PT") !=
+ QLA_SUCCESS) {
+ rval = QLA_FUNCTION_FAILED;
+ } else {
+ /* Set port IDs in switch info list. */
+ for (i = 0; i < ha->max_fibre_devices; i++) {
+ gid_data = &ct_rsp->rsp.gid_pt.entries[i];
+ list[i].d_id.b.domain = gid_data->port_id[0];
+ list[i].d_id.b.area = gid_data->port_id[1];
+ list[i].d_id.b.al_pa = gid_data->port_id[2];
+ memset(list[i].fabric_port_name, 0, WWN_SIZE);
+ list[i].fp_speed = PORT_SPEED_UNKNOWN;
+
+ /* Last one exit. */
+ if (gid_data->control_byte & BIT_7) {
+ list[i].d_id.b.rsvd_1 = gid_data->control_byte;
+ break;
+ }
+ }
+
+ /*
+ * If we've used all available slots, then the switch is
+ * reporting back more devices than we can handle with this
+ * single call. Return a failed status, and let GA_NXT handle
+ * the overload.
+ */
+ if (i == ha->max_fibre_devices)
+ rval = QLA_FUNCTION_FAILED;
+ }
+
+ return (rval);
+}
+
+/**
+ * qla2x00_gpn_id() - SNS Get Port Name (GPN_ID) query.
+ * @ha: HA context
+ * @list: switch info entries to populate
+ *
+ * Returns 0 on success.
+ */
+int
+qla2x00_gpn_id(scsi_qla_host_t *vha, sw_info_t *list)
+{
+ int rval = QLA_SUCCESS;
+ uint16_t i;
+
+ ms_iocb_entry_t *ms_pkt;
+ struct ct_sns_req *ct_req;
+ struct ct_sns_rsp *ct_rsp;
+ struct qla_hw_data *ha = vha->hw;
+
+ if (IS_QLA2100(ha) || IS_QLA2200(ha))
+ return qla2x00_sns_gpn_id(vha, list);
+
+ for (i = 0; i < ha->max_fibre_devices; i++) {
+ /* Issue GPN_ID */
+ /* Prepare common MS IOCB */
+ ms_pkt = ha->isp_ops->prep_ms_iocb(vha, GPN_ID_REQ_SIZE,
+ GPN_ID_RSP_SIZE);
+
+ /* Prepare CT request */
+ ct_req = qla2x00_prep_ct_req(ha->ct_sns, GPN_ID_CMD,
+ GPN_ID_RSP_SIZE);
+ ct_rsp = &ha->ct_sns->p.rsp;
+
+ /* Prepare CT arguments -- port_id */
+ ct_req->req.port_id.port_id[0] = list[i].d_id.b.domain;
+ ct_req->req.port_id.port_id[1] = list[i].d_id.b.area;
+ ct_req->req.port_id.port_id[2] = list[i].d_id.b.al_pa;
+
+ /* Execute MS IOCB */
+ rval = qla2x00_issue_iocb(vha, ha->ms_iocb, ha->ms_iocb_dma,
+ sizeof(ms_iocb_entry_t));
+ if (rval != QLA_SUCCESS) {
+ /*EMPTY*/
+ ql_dbg(ql_dbg_disc, vha, 0x2056,
+ "GPN_ID issue IOCB failed (%d).\n", rval);
+ break;
+ } else if (qla2x00_chk_ms_status(vha, ms_pkt, ct_rsp,
+ "GPN_ID") != QLA_SUCCESS) {
+ rval = QLA_FUNCTION_FAILED;
+ break;
+ } else {
+ /* Save portname */
+ memcpy(list[i].port_name,
+ ct_rsp->rsp.gpn_id.port_name, WWN_SIZE);
+ }
+
+ /* Last device exit. */
+ if (list[i].d_id.b.rsvd_1 != 0)
+ break;
+ }
+
+ return (rval);
+}
+
+/**
+ * qla2x00_gnn_id() - SNS Get Node Name (GNN_ID) query.
+ * @ha: HA context
+ * @list: switch info entries to populate
+ *
+ * Returns 0 on success.
+ */
+int
+qla2x00_gnn_id(scsi_qla_host_t *vha, sw_info_t *list)
+{
+ int rval = QLA_SUCCESS;
+ uint16_t i;
+ struct qla_hw_data *ha = vha->hw;
+ ms_iocb_entry_t *ms_pkt;
+ struct ct_sns_req *ct_req;
+ struct ct_sns_rsp *ct_rsp;
+
+ if (IS_QLA2100(ha) || IS_QLA2200(ha))
+ return qla2x00_sns_gnn_id(vha, list);
+
+ for (i = 0; i < ha->max_fibre_devices; i++) {
+ /* Issue GNN_ID */
+ /* Prepare common MS IOCB */
+ ms_pkt = ha->isp_ops->prep_ms_iocb(vha, GNN_ID_REQ_SIZE,
+ GNN_ID_RSP_SIZE);
+
+ /* Prepare CT request */
+ ct_req = qla2x00_prep_ct_req(ha->ct_sns, GNN_ID_CMD,
+ GNN_ID_RSP_SIZE);
+ ct_rsp = &ha->ct_sns->p.rsp;
+
+ /* Prepare CT arguments -- port_id */
+ ct_req->req.port_id.port_id[0] = list[i].d_id.b.domain;
+ ct_req->req.port_id.port_id[1] = list[i].d_id.b.area;
+ ct_req->req.port_id.port_id[2] = list[i].d_id.b.al_pa;
+
+ /* Execute MS IOCB */
+ rval = qla2x00_issue_iocb(vha, ha->ms_iocb, ha->ms_iocb_dma,
+ sizeof(ms_iocb_entry_t));
+ if (rval != QLA_SUCCESS) {
+ /*EMPTY*/
+ ql_dbg(ql_dbg_disc, vha, 0x2057,
+ "GNN_ID issue IOCB failed (%d).\n", rval);
+ break;
+ } else if (qla2x00_chk_ms_status(vha, ms_pkt, ct_rsp,
+ "GNN_ID") != QLA_SUCCESS) {
+ rval = QLA_FUNCTION_FAILED;
+ break;
+ } else {
+ /* Save nodename */
+ memcpy(list[i].node_name,
+ ct_rsp->rsp.gnn_id.node_name, WWN_SIZE);
+
+ ql_dbg(ql_dbg_disc, vha, 0x2058,
+ "GID_PT entry - nn %8phN pn %8phN "
+ "portid=%02x%02x%02x.\n",
+ list[i].node_name, list[i].port_name,
+ list[i].d_id.b.domain, list[i].d_id.b.area,
+ list[i].d_id.b.al_pa);
+ }
+
+ /* Last device exit. */
+ if (list[i].d_id.b.rsvd_1 != 0)
+ break;
+ }
+
+ return (rval);
+}
+
+/**
+ * qla2x00_rft_id() - SNS Register FC-4 TYPEs (RFT_ID) supported by the HBA.
+ * @ha: HA context
+ *
+ * Returns 0 on success.
+ */
+int
+qla2x00_rft_id(scsi_qla_host_t *vha)
+{
+ int rval;
+ struct qla_hw_data *ha = vha->hw;
+ ms_iocb_entry_t *ms_pkt;
+ struct ct_sns_req *ct_req;
+ struct ct_sns_rsp *ct_rsp;
+
+ if (IS_QLA2100(ha) || IS_QLA2200(ha))
+ return qla2x00_sns_rft_id(vha);
+
+ /* Issue RFT_ID */
+ /* Prepare common MS IOCB */
+ ms_pkt = ha->isp_ops->prep_ms_iocb(vha, RFT_ID_REQ_SIZE,
+ RFT_ID_RSP_SIZE);
+
+ /* Prepare CT request */
+ ct_req = qla2x00_prep_ct_req(ha->ct_sns, RFT_ID_CMD,
+ RFT_ID_RSP_SIZE);
+ ct_rsp = &ha->ct_sns->p.rsp;
+
+ /* Prepare CT arguments -- port_id, FC-4 types */
+ ct_req->req.rft_id.port_id[0] = vha->d_id.b.domain;
+ ct_req->req.rft_id.port_id[1] = vha->d_id.b.area;
+ ct_req->req.rft_id.port_id[2] = vha->d_id.b.al_pa;
+
+ ct_req->req.rft_id.fc4_types[2] = 0x01; /* FCP-3 */
+
+ /* Execute MS IOCB */
+ rval = qla2x00_issue_iocb(vha, ha->ms_iocb, ha->ms_iocb_dma,
+ sizeof(ms_iocb_entry_t));
+ if (rval != QLA_SUCCESS) {
+ /*EMPTY*/
+ ql_dbg(ql_dbg_disc, vha, 0x2043,
+ "RFT_ID issue IOCB failed (%d).\n", rval);
+ } else if (qla2x00_chk_ms_status(vha, ms_pkt, ct_rsp, "RFT_ID") !=
+ QLA_SUCCESS) {
+ rval = QLA_FUNCTION_FAILED;
+ } else {
+ ql_dbg(ql_dbg_disc, vha, 0x2044,
+ "RFT_ID exiting normally.\n");
+ }
+
+ return (rval);
+}
+
+/**
+ * qla2x00_rff_id() - SNS Register FC-4 Features (RFF_ID) supported by the HBA.
+ * @ha: HA context
+ *
+ * Returns 0 on success.
+ */
+int
+qla2x00_rff_id(scsi_qla_host_t *vha)
+{
+ int rval;
+ struct qla_hw_data *ha = vha->hw;
+ ms_iocb_entry_t *ms_pkt;
+ struct ct_sns_req *ct_req;
+ struct ct_sns_rsp *ct_rsp;
+
+ if (IS_QLA2100(ha) || IS_QLA2200(ha)) {
+ ql_dbg(ql_dbg_disc, vha, 0x2046,
+ "RFF_ID call not supported on ISP2100/ISP2200.\n");
+ return (QLA_SUCCESS);
+ }
+
+ /* Issue RFF_ID */
+ /* Prepare common MS IOCB */
+ ms_pkt = ha->isp_ops->prep_ms_iocb(vha, RFF_ID_REQ_SIZE,
+ RFF_ID_RSP_SIZE);
+
+ /* Prepare CT request */
+ ct_req = qla2x00_prep_ct_req(ha->ct_sns, RFF_ID_CMD,
+ RFF_ID_RSP_SIZE);
+ ct_rsp = &ha->ct_sns->p.rsp;
+
+ /* Prepare CT arguments -- port_id, FC-4 feature, FC-4 type */
+ ct_req->req.rff_id.port_id[0] = vha->d_id.b.domain;
+ ct_req->req.rff_id.port_id[1] = vha->d_id.b.area;
+ ct_req->req.rff_id.port_id[2] = vha->d_id.b.al_pa;
+
+ qlt_rff_id(vha, ct_req);
+
+ ct_req->req.rff_id.fc4_type = 0x08; /* SCSI - FCP */
+
+ /* Execute MS IOCB */
+ rval = qla2x00_issue_iocb(vha, ha->ms_iocb, ha->ms_iocb_dma,
+ sizeof(ms_iocb_entry_t));
+ if (rval != QLA_SUCCESS) {
+ /*EMPTY*/
+ ql_dbg(ql_dbg_disc, vha, 0x2047,
+ "RFF_ID issue IOCB failed (%d).\n", rval);
+ } else if (qla2x00_chk_ms_status(vha, ms_pkt, ct_rsp, "RFF_ID") !=
+ QLA_SUCCESS) {
+ rval = QLA_FUNCTION_FAILED;
+ } else {
+ ql_dbg(ql_dbg_disc, vha, 0x2048,
+ "RFF_ID exiting normally.\n");
+ }
+
+ return (rval);
+}
+
+/**
+ * qla2x00_rnn_id() - SNS Register Node Name (RNN_ID) of the HBA.
+ * @ha: HA context
+ *
+ * Returns 0 on success.
+ */
+int
+qla2x00_rnn_id(scsi_qla_host_t *vha)
+{
+ int rval;
+ struct qla_hw_data *ha = vha->hw;
+ ms_iocb_entry_t *ms_pkt;
+ struct ct_sns_req *ct_req;
+ struct ct_sns_rsp *ct_rsp;
+
+ if (IS_QLA2100(ha) || IS_QLA2200(ha))
+ return qla2x00_sns_rnn_id(vha);
+
+ /* Issue RNN_ID */
+ /* Prepare common MS IOCB */
+ ms_pkt = ha->isp_ops->prep_ms_iocb(vha, RNN_ID_REQ_SIZE,
+ RNN_ID_RSP_SIZE);
+
+ /* Prepare CT request */
+ ct_req = qla2x00_prep_ct_req(ha->ct_sns, RNN_ID_CMD, RNN_ID_RSP_SIZE);
+ ct_rsp = &ha->ct_sns->p.rsp;
+
+ /* Prepare CT arguments -- port_id, node_name */
+ ct_req->req.rnn_id.port_id[0] = vha->d_id.b.domain;
+ ct_req->req.rnn_id.port_id[1] = vha->d_id.b.area;
+ ct_req->req.rnn_id.port_id[2] = vha->d_id.b.al_pa;
+
+ memcpy(ct_req->req.rnn_id.node_name, vha->node_name, WWN_SIZE);
+
+ /* Execute MS IOCB */
+ rval = qla2x00_issue_iocb(vha, ha->ms_iocb, ha->ms_iocb_dma,
+ sizeof(ms_iocb_entry_t));
+ if (rval != QLA_SUCCESS) {
+ /*EMPTY*/
+ ql_dbg(ql_dbg_disc, vha, 0x204d,
+ "RNN_ID issue IOCB failed (%d).\n", rval);
+ } else if (qla2x00_chk_ms_status(vha, ms_pkt, ct_rsp, "RNN_ID") !=
+ QLA_SUCCESS) {
+ rval = QLA_FUNCTION_FAILED;
+ } else {
+ ql_dbg(ql_dbg_disc, vha, 0x204e,
+ "RNN_ID exiting normally.\n");
+ }
+
+ return (rval);
+}
+
+void
+qla2x00_get_sym_node_name(scsi_qla_host_t *vha, uint8_t *snn, size_t size)
+{
+ struct qla_hw_data *ha = vha->hw;
+
+ if (IS_QLAFX00(ha))
+ snprintf(snn, size, "%s FW:v%s DVR:v%s", ha->model_number,
+ ha->mr.fw_version, qla2x00_version_str);
+ else
+ snprintf(snn, size,
+ "%s FW:v%d.%02d.%02d DVR:v%s", ha->model_number,
+ ha->fw_major_version, ha->fw_minor_version,
+ ha->fw_subminor_version, qla2x00_version_str);
+}
+
+/**
+ * qla2x00_rsnn_nn() - SNS Register Symbolic Node Name (RSNN_NN) of the HBA.
+ * @ha: HA context
+ *
+ * Returns 0 on success.
+ */
+int
+qla2x00_rsnn_nn(scsi_qla_host_t *vha)
+{
+ int rval;
+ struct qla_hw_data *ha = vha->hw;
+ ms_iocb_entry_t *ms_pkt;
+ struct ct_sns_req *ct_req;
+ struct ct_sns_rsp *ct_rsp;
+
+ if (IS_QLA2100(ha) || IS_QLA2200(ha)) {
+ ql_dbg(ql_dbg_disc, vha, 0x2050,
+ "RSNN_ID call unsupported on ISP2100/ISP2200.\n");
+ return (QLA_SUCCESS);
+ }
+
+ /* Issue RSNN_NN */
+ /* Prepare common MS IOCB */
+ /* Request size adjusted after CT preparation */
+ ms_pkt = ha->isp_ops->prep_ms_iocb(vha, 0, RSNN_NN_RSP_SIZE);
+
+ /* Prepare CT request */
+ ct_req = qla2x00_prep_ct_req(ha->ct_sns, RSNN_NN_CMD,
+ RSNN_NN_RSP_SIZE);
+ ct_rsp = &ha->ct_sns->p.rsp;
+
+ /* Prepare CT arguments -- node_name, symbolic node_name, size */
+ memcpy(ct_req->req.rsnn_nn.node_name, vha->node_name, WWN_SIZE);
+
+ /* Prepare the Symbolic Node Name */
+ qla2x00_get_sym_node_name(vha, ct_req->req.rsnn_nn.sym_node_name,
+ sizeof(ct_req->req.rsnn_nn.sym_node_name));
+
+ /* Calculate SNN length */
+ ct_req->req.rsnn_nn.name_len =
+ (uint8_t)strlen(ct_req->req.rsnn_nn.sym_node_name);
+
+ /* Update MS IOCB request */
+ ms_pkt->req_bytecount =
+ cpu_to_le32(24 + 1 + ct_req->req.rsnn_nn.name_len);
+ ms_pkt->dseg_req_length = ms_pkt->req_bytecount;
+
+ /* Execute MS IOCB */
+ rval = qla2x00_issue_iocb(vha, ha->ms_iocb, ha->ms_iocb_dma,
+ sizeof(ms_iocb_entry_t));
+ if (rval != QLA_SUCCESS) {
+ /*EMPTY*/
+ ql_dbg(ql_dbg_disc, vha, 0x2051,
+ "RSNN_NN issue IOCB failed (%d).\n", rval);
+ } else if (qla2x00_chk_ms_status(vha, ms_pkt, ct_rsp, "RSNN_NN") !=
+ QLA_SUCCESS) {
+ rval = QLA_FUNCTION_FAILED;
+ } else {
+ ql_dbg(ql_dbg_disc, vha, 0x2052,
+ "RSNN_NN exiting normally.\n");
+ }
+
+ return (rval);
+}
+
+/**
+ * qla2x00_prep_sns_cmd() - Prepare common SNS command request fields for query.
+ * @ha: HA context
+ * @cmd: GS command
+ * @scmd_len: Subcommand length
+ * @data_size: response size in bytes
+ *
+ * Returns a pointer to the @ha's sns_cmd.
+ */
+static inline struct sns_cmd_pkt *
+qla2x00_prep_sns_cmd(scsi_qla_host_t *vha, uint16_t cmd, uint16_t scmd_len,
+ uint16_t data_size)
+{
+ uint16_t wc;
+ struct sns_cmd_pkt *sns_cmd;
+ struct qla_hw_data *ha = vha->hw;
+
+ sns_cmd = ha->sns_cmd;
+ memset(sns_cmd, 0, sizeof(struct sns_cmd_pkt));
+ wc = data_size / 2; /* Size in 16bit words. */
+ sns_cmd->p.cmd.buffer_length = cpu_to_le16(wc);
+ sns_cmd->p.cmd.buffer_address[0] = cpu_to_le32(LSD(ha->sns_cmd_dma));
+ sns_cmd->p.cmd.buffer_address[1] = cpu_to_le32(MSD(ha->sns_cmd_dma));
+ sns_cmd->p.cmd.subcommand_length = cpu_to_le16(scmd_len);
+ sns_cmd->p.cmd.subcommand = cpu_to_le16(cmd);
+ wc = (data_size - 16) / 4; /* Size in 32bit words. */
+ sns_cmd->p.cmd.size = cpu_to_le16(wc);
+
+ vha->qla_stats.control_requests++;
+
+ return (sns_cmd);
+}
+
+/**
+ * qla2x00_sns_ga_nxt() - SNS scan for fabric devices via GA_NXT command.
+ * @ha: HA context
+ * @fcport: fcport entry to updated
+ *
+ * This command uses the old Exectute SNS Command mailbox routine.
+ *
+ * Returns 0 on success.
+ */
+static int
+qla2x00_sns_ga_nxt(scsi_qla_host_t *vha, fc_port_t *fcport)
+{
+ int rval = QLA_SUCCESS;
+ struct qla_hw_data *ha = vha->hw;
+ struct sns_cmd_pkt *sns_cmd;
+
+ /* Issue GA_NXT. */
+ /* Prepare SNS command request. */
+ sns_cmd = qla2x00_prep_sns_cmd(vha, GA_NXT_CMD, GA_NXT_SNS_SCMD_LEN,
+ GA_NXT_SNS_DATA_SIZE);
+
+ /* Prepare SNS command arguments -- port_id. */
+ sns_cmd->p.cmd.param[0] = fcport->d_id.b.al_pa;
+ sns_cmd->p.cmd.param[1] = fcport->d_id.b.area;
+ sns_cmd->p.cmd.param[2] = fcport->d_id.b.domain;
+
+ /* Execute SNS command. */
+ rval = qla2x00_send_sns(vha, ha->sns_cmd_dma, GA_NXT_SNS_CMD_SIZE / 2,
+ sizeof(struct sns_cmd_pkt));
+ if (rval != QLA_SUCCESS) {
+ /*EMPTY*/
+ ql_dbg(ql_dbg_disc, vha, 0x205f,
+ "GA_NXT Send SNS failed (%d).\n", rval);
+ } else if (sns_cmd->p.gan_data[8] != 0x80 ||
+ sns_cmd->p.gan_data[9] != 0x02) {
+ ql_dbg(ql_dbg_disc + ql_dbg_buffer, vha, 0x2084,
+ "GA_NXT failed, rejected request ga_nxt_rsp:\n");
+ ql_dump_buffer(ql_dbg_disc + ql_dbg_buffer, vha, 0x2074,
+ sns_cmd->p.gan_data, 16);
+ rval = QLA_FUNCTION_FAILED;
+ } else {
+ /* Populate fc_port_t entry. */
+ fcport->d_id.b.domain = sns_cmd->p.gan_data[17];
+ fcport->d_id.b.area = sns_cmd->p.gan_data[18];
+ fcport->d_id.b.al_pa = sns_cmd->p.gan_data[19];
+
+ memcpy(fcport->node_name, &sns_cmd->p.gan_data[284], WWN_SIZE);
+ memcpy(fcport->port_name, &sns_cmd->p.gan_data[20], WWN_SIZE);
+
+ if (sns_cmd->p.gan_data[16] != NS_N_PORT_TYPE &&
+ sns_cmd->p.gan_data[16] != NS_NL_PORT_TYPE)
+ fcport->d_id.b.domain = 0xf0;
+
+ ql_dbg(ql_dbg_disc, vha, 0x2061,
+ "GA_NXT entry - nn %8phN pn %8phN "
+ "port_id=%02x%02x%02x.\n",
+ fcport->node_name, fcport->port_name,
+ fcport->d_id.b.domain, fcport->d_id.b.area,
+ fcport->d_id.b.al_pa);
+ }
+
+ return (rval);
+}
+
+/**
+ * qla2x00_sns_gid_pt() - SNS scan for fabric devices via GID_PT command.
+ * @ha: HA context
+ * @list: switch info entries to populate
+ *
+ * This command uses the old Exectute SNS Command mailbox routine.
+ *
+ * NOTE: Non-Nx_Ports are not requested.
+ *
+ * Returns 0 on success.
+ */
+static int
+qla2x00_sns_gid_pt(scsi_qla_host_t *vha, sw_info_t *list)
+{
+ int rval;
+ struct qla_hw_data *ha = vha->hw;
+ uint16_t i;
+ uint8_t *entry;
+ struct sns_cmd_pkt *sns_cmd;
+ uint16_t gid_pt_sns_data_size;
+
+ gid_pt_sns_data_size = qla2x00_gid_pt_rsp_size(vha);
+
+ /* Issue GID_PT. */
+ /* Prepare SNS command request. */
+ sns_cmd = qla2x00_prep_sns_cmd(vha, GID_PT_CMD, GID_PT_SNS_SCMD_LEN,
+ gid_pt_sns_data_size);
+
+ /* Prepare SNS command arguments -- port_type. */
+ sns_cmd->p.cmd.param[0] = NS_NX_PORT_TYPE;
+
+ /* Execute SNS command. */
+ rval = qla2x00_send_sns(vha, ha->sns_cmd_dma, GID_PT_SNS_CMD_SIZE / 2,
+ sizeof(struct sns_cmd_pkt));
+ if (rval != QLA_SUCCESS) {
+ /*EMPTY*/
+ ql_dbg(ql_dbg_disc, vha, 0x206d,
+ "GID_PT Send SNS failed (%d).\n", rval);
+ } else if (sns_cmd->p.gid_data[8] != 0x80 ||
+ sns_cmd->p.gid_data[9] != 0x02) {
+ ql_dbg(ql_dbg_disc, vha, 0x202f,
+ "GID_PT failed, rejected request, gid_rsp:\n");
+ ql_dump_buffer(ql_dbg_disc + ql_dbg_buffer, vha, 0x2081,
+ sns_cmd->p.gid_data, 16);
+ rval = QLA_FUNCTION_FAILED;
+ } else {
+ /* Set port IDs in switch info list. */
+ for (i = 0; i < ha->max_fibre_devices; i++) {
+ entry = &sns_cmd->p.gid_data[(i * 4) + 16];
+ list[i].d_id.b.domain = entry[1];
+ list[i].d_id.b.area = entry[2];
+ list[i].d_id.b.al_pa = entry[3];
+
+ /* Last one exit. */
+ if (entry[0] & BIT_7) {
+ list[i].d_id.b.rsvd_1 = entry[0];
+ break;
+ }
+ }
+
+ /*
+ * If we've used all available slots, then the switch is
+ * reporting back more devices that we can handle with this
+ * single call. Return a failed status, and let GA_NXT handle
+ * the overload.
+ */
+ if (i == ha->max_fibre_devices)
+ rval = QLA_FUNCTION_FAILED;
+ }
+
+ return (rval);
+}
+
+/**
+ * qla2x00_sns_gpn_id() - SNS Get Port Name (GPN_ID) query.
+ * @ha: HA context
+ * @list: switch info entries to populate
+ *
+ * This command uses the old Exectute SNS Command mailbox routine.
+ *
+ * Returns 0 on success.
+ */
+static int
+qla2x00_sns_gpn_id(scsi_qla_host_t *vha, sw_info_t *list)
+{
+ int rval = QLA_SUCCESS;
+ struct qla_hw_data *ha = vha->hw;
+ uint16_t i;
+ struct sns_cmd_pkt *sns_cmd;
+
+ for (i = 0; i < ha->max_fibre_devices; i++) {
+ /* Issue GPN_ID */
+ /* Prepare SNS command request. */
+ sns_cmd = qla2x00_prep_sns_cmd(vha, GPN_ID_CMD,
+ GPN_ID_SNS_SCMD_LEN, GPN_ID_SNS_DATA_SIZE);
+
+ /* Prepare SNS command arguments -- port_id. */
+ sns_cmd->p.cmd.param[0] = list[i].d_id.b.al_pa;
+ sns_cmd->p.cmd.param[1] = list[i].d_id.b.area;
+ sns_cmd->p.cmd.param[2] = list[i].d_id.b.domain;
+
+ /* Execute SNS command. */
+ rval = qla2x00_send_sns(vha, ha->sns_cmd_dma,
+ GPN_ID_SNS_CMD_SIZE / 2, sizeof(struct sns_cmd_pkt));
+ if (rval != QLA_SUCCESS) {
+ /*EMPTY*/
+ ql_dbg(ql_dbg_disc, vha, 0x2032,
+ "GPN_ID Send SNS failed (%d).\n", rval);
+ } else if (sns_cmd->p.gpn_data[8] != 0x80 ||
+ sns_cmd->p.gpn_data[9] != 0x02) {
+ ql_dbg(ql_dbg_disc + ql_dbg_buffer, vha, 0x207e,
+ "GPN_ID failed, rejected request, gpn_rsp:\n");
+ ql_dump_buffer(ql_dbg_disc + ql_dbg_buffer, vha, 0x207f,
+ sns_cmd->p.gpn_data, 16);
+ rval = QLA_FUNCTION_FAILED;
+ } else {
+ /* Save portname */
+ memcpy(list[i].port_name, &sns_cmd->p.gpn_data[16],
+ WWN_SIZE);
+ }
+
+ /* Last device exit. */
+ if (list[i].d_id.b.rsvd_1 != 0)
+ break;
+ }
+
+ return (rval);
+}
+
+/**
+ * qla2x00_sns_gnn_id() - SNS Get Node Name (GNN_ID) query.
+ * @ha: HA context
+ * @list: switch info entries to populate
+ *
+ * This command uses the old Exectute SNS Command mailbox routine.
+ *
+ * Returns 0 on success.
+ */
+static int
+qla2x00_sns_gnn_id(scsi_qla_host_t *vha, sw_info_t *list)
+{
+ int rval = QLA_SUCCESS;
+ struct qla_hw_data *ha = vha->hw;
+ uint16_t i;
+ struct sns_cmd_pkt *sns_cmd;
+
+ for (i = 0; i < ha->max_fibre_devices; i++) {
+ /* Issue GNN_ID */
+ /* Prepare SNS command request. */
+ sns_cmd = qla2x00_prep_sns_cmd(vha, GNN_ID_CMD,
+ GNN_ID_SNS_SCMD_LEN, GNN_ID_SNS_DATA_SIZE);
+
+ /* Prepare SNS command arguments -- port_id. */
+ sns_cmd->p.cmd.param[0] = list[i].d_id.b.al_pa;
+ sns_cmd->p.cmd.param[1] = list[i].d_id.b.area;
+ sns_cmd->p.cmd.param[2] = list[i].d_id.b.domain;
+
+ /* Execute SNS command. */
+ rval = qla2x00_send_sns(vha, ha->sns_cmd_dma,
+ GNN_ID_SNS_CMD_SIZE / 2, sizeof(struct sns_cmd_pkt));
+ if (rval != QLA_SUCCESS) {
+ /*EMPTY*/
+ ql_dbg(ql_dbg_disc, vha, 0x203f,
+ "GNN_ID Send SNS failed (%d).\n", rval);
+ } else if (sns_cmd->p.gnn_data[8] != 0x80 ||
+ sns_cmd->p.gnn_data[9] != 0x02) {
+ ql_dbg(ql_dbg_disc + ql_dbg_buffer, vha, 0x2082,
+ "GNN_ID failed, rejected request, gnn_rsp:\n");
+ ql_dump_buffer(ql_dbg_disc + ql_dbg_buffer, vha, 0x207a,
+ sns_cmd->p.gnn_data, 16);
+ rval = QLA_FUNCTION_FAILED;
+ } else {
+ /* Save nodename */
+ memcpy(list[i].node_name, &sns_cmd->p.gnn_data[16],
+ WWN_SIZE);
+
+ ql_dbg(ql_dbg_disc, vha, 0x206e,
+ "GID_PT entry - nn %8phN pn %8phN "
+ "port_id=%02x%02x%02x.\n",
+ list[i].node_name, list[i].port_name,
+ list[i].d_id.b.domain, list[i].d_id.b.area,
+ list[i].d_id.b.al_pa);
+ }
+
+ /* Last device exit. */
+ if (list[i].d_id.b.rsvd_1 != 0)
+ break;
+ }
+
+ return (rval);
+}
+
+/**
+ * qla2x00_snd_rft_id() - SNS Register FC-4 TYPEs (RFT_ID) supported by the HBA.
+ * @ha: HA context
+ *
+ * This command uses the old Exectute SNS Command mailbox routine.
+ *
+ * Returns 0 on success.
+ */
+static int
+qla2x00_sns_rft_id(scsi_qla_host_t *vha)
+{
+ int rval;
+ struct qla_hw_data *ha = vha->hw;
+ struct sns_cmd_pkt *sns_cmd;
+
+ /* Issue RFT_ID. */
+ /* Prepare SNS command request. */
+ sns_cmd = qla2x00_prep_sns_cmd(vha, RFT_ID_CMD, RFT_ID_SNS_SCMD_LEN,
+ RFT_ID_SNS_DATA_SIZE);
+
+ /* Prepare SNS command arguments -- port_id, FC-4 types */
+ sns_cmd->p.cmd.param[0] = vha->d_id.b.al_pa;
+ sns_cmd->p.cmd.param[1] = vha->d_id.b.area;
+ sns_cmd->p.cmd.param[2] = vha->d_id.b.domain;
+
+ sns_cmd->p.cmd.param[5] = 0x01; /* FCP-3 */
+
+ /* Execute SNS command. */
+ rval = qla2x00_send_sns(vha, ha->sns_cmd_dma, RFT_ID_SNS_CMD_SIZE / 2,
+ sizeof(struct sns_cmd_pkt));
+ if (rval != QLA_SUCCESS) {
+ /*EMPTY*/
+ ql_dbg(ql_dbg_disc, vha, 0x2060,
+ "RFT_ID Send SNS failed (%d).\n", rval);
+ } else if (sns_cmd->p.rft_data[8] != 0x80 ||
+ sns_cmd->p.rft_data[9] != 0x02) {
+ ql_dbg(ql_dbg_disc + ql_dbg_buffer, vha, 0x2083,
+ "RFT_ID failed, rejected request rft_rsp:\n");
+ ql_dump_buffer(ql_dbg_disc + ql_dbg_buffer, vha, 0x2080,
+ sns_cmd->p.rft_data, 16);
+ rval = QLA_FUNCTION_FAILED;
+ } else {
+ ql_dbg(ql_dbg_disc, vha, 0x2073,
+ "RFT_ID exiting normally.\n");
+ }
+
+ return (rval);
+}
+
+/**
+ * qla2x00_sns_rnn_id() - SNS Register Node Name (RNN_ID) of the HBA.
+ * HBA.
+ * @ha: HA context
+ *
+ * This command uses the old Exectute SNS Command mailbox routine.
+ *
+ * Returns 0 on success.
+ */
+static int
+qla2x00_sns_rnn_id(scsi_qla_host_t *vha)
+{
+ int rval;
+ struct qla_hw_data *ha = vha->hw;
+ struct sns_cmd_pkt *sns_cmd;
+
+ /* Issue RNN_ID. */
+ /* Prepare SNS command request. */
+ sns_cmd = qla2x00_prep_sns_cmd(vha, RNN_ID_CMD, RNN_ID_SNS_SCMD_LEN,
+ RNN_ID_SNS_DATA_SIZE);
+
+ /* Prepare SNS command arguments -- port_id, nodename. */
+ sns_cmd->p.cmd.param[0] = vha->d_id.b.al_pa;
+ sns_cmd->p.cmd.param[1] = vha->d_id.b.area;
+ sns_cmd->p.cmd.param[2] = vha->d_id.b.domain;
+
+ sns_cmd->p.cmd.param[4] = vha->node_name[7];
+ sns_cmd->p.cmd.param[5] = vha->node_name[6];
+ sns_cmd->p.cmd.param[6] = vha->node_name[5];
+ sns_cmd->p.cmd.param[7] = vha->node_name[4];
+ sns_cmd->p.cmd.param[8] = vha->node_name[3];
+ sns_cmd->p.cmd.param[9] = vha->node_name[2];
+ sns_cmd->p.cmd.param[10] = vha->node_name[1];
+ sns_cmd->p.cmd.param[11] = vha->node_name[0];
+
+ /* Execute SNS command. */
+ rval = qla2x00_send_sns(vha, ha->sns_cmd_dma, RNN_ID_SNS_CMD_SIZE / 2,
+ sizeof(struct sns_cmd_pkt));
+ if (rval != QLA_SUCCESS) {
+ /*EMPTY*/
+ ql_dbg(ql_dbg_disc, vha, 0x204a,
+ "RNN_ID Send SNS failed (%d).\n", rval);
+ } else if (sns_cmd->p.rnn_data[8] != 0x80 ||
+ sns_cmd->p.rnn_data[9] != 0x02) {
+ ql_dbg(ql_dbg_disc + ql_dbg_buffer, vha, 0x207b,
+ "RNN_ID failed, rejected request, rnn_rsp:\n");
+ ql_dump_buffer(ql_dbg_disc + ql_dbg_buffer, vha, 0x207c,
+ sns_cmd->p.rnn_data, 16);
+ rval = QLA_FUNCTION_FAILED;
+ } else {
+ ql_dbg(ql_dbg_disc, vha, 0x204c,
+ "RNN_ID exiting normally.\n");
+ }
+
+ return (rval);
+}
+
+/**
+ * qla2x00_mgmt_svr_login() - Login to fabric Management Service.
+ * @ha: HA context
+ *
+ * Returns 0 on success.
+ */
+static int
+qla2x00_mgmt_svr_login(scsi_qla_host_t *vha)
+{
+ int ret, rval;
+ uint16_t mb[MAILBOX_REGISTER_COUNT];
+ struct qla_hw_data *ha = vha->hw;
+ ret = QLA_SUCCESS;
+ if (vha->flags.management_server_logged_in)
+ return ret;
+
+ rval = ha->isp_ops->fabric_login(vha, vha->mgmt_svr_loop_id, 0xff, 0xff,
+ 0xfa, mb, BIT_1);
+ if (rval != QLA_SUCCESS || mb[0] != MBS_COMMAND_COMPLETE) {
+ if (rval == QLA_MEMORY_ALLOC_FAILED)
+ ql_dbg(ql_dbg_disc, vha, 0x2085,
+ "Failed management_server login: loopid=%x "
+ "rval=%d\n", vha->mgmt_svr_loop_id, rval);
+ else
+ ql_dbg(ql_dbg_disc, vha, 0x2024,
+ "Failed management_server login: loopid=%x "
+ "mb[0]=%x mb[1]=%x mb[2]=%x mb[6]=%x mb[7]=%x.\n",
+ vha->mgmt_svr_loop_id, mb[0], mb[1], mb[2], mb[6],
+ mb[7]);
+ ret = QLA_FUNCTION_FAILED;
+ } else
+ vha->flags.management_server_logged_in = 1;
+
+ return ret;
+}
+
+/**
+ * qla2x00_prep_ms_fdmi_iocb() - Prepare common MS IOCB fields for FDMI query.
+ * @ha: HA context
+ * @req_size: request size in bytes
+ * @rsp_size: response size in bytes
+ *
+ * Returns a pointer to the @ha's ms_iocb.
+ */
+void *
+qla2x00_prep_ms_fdmi_iocb(scsi_qla_host_t *vha, uint32_t req_size,
+ uint32_t rsp_size)
+{
+ ms_iocb_entry_t *ms_pkt;
+ struct qla_hw_data *ha = vha->hw;
+ ms_pkt = ha->ms_iocb;
+ memset(ms_pkt, 0, sizeof(ms_iocb_entry_t));
+
+ ms_pkt->entry_type = MS_IOCB_TYPE;
+ ms_pkt->entry_count = 1;
+ SET_TARGET_ID(ha, ms_pkt->loop_id, vha->mgmt_svr_loop_id);
+ ms_pkt->control_flags = __constant_cpu_to_le16(CF_READ | CF_HEAD_TAG);
+ ms_pkt->timeout = cpu_to_le16(ha->r_a_tov / 10 * 2);
+ ms_pkt->cmd_dsd_count = __constant_cpu_to_le16(1);
+ ms_pkt->total_dsd_count = __constant_cpu_to_le16(2);
+ ms_pkt->rsp_bytecount = cpu_to_le32(rsp_size);
+ ms_pkt->req_bytecount = cpu_to_le32(req_size);
+
+ ms_pkt->dseg_req_address[0] = cpu_to_le32(LSD(ha->ct_sns_dma));
+ ms_pkt->dseg_req_address[1] = cpu_to_le32(MSD(ha->ct_sns_dma));
+ ms_pkt->dseg_req_length = ms_pkt->req_bytecount;
+
+ ms_pkt->dseg_rsp_address[0] = cpu_to_le32(LSD(ha->ct_sns_dma));
+ ms_pkt->dseg_rsp_address[1] = cpu_to_le32(MSD(ha->ct_sns_dma));
+ ms_pkt->dseg_rsp_length = ms_pkt->rsp_bytecount;
+
+ return ms_pkt;
+}
+
+/**
+ * qla24xx_prep_ms_fdmi_iocb() - Prepare common MS IOCB fields for FDMI query.
+ * @ha: HA context
+ * @req_size: request size in bytes
+ * @rsp_size: response size in bytes
+ *
+ * Returns a pointer to the @ha's ms_iocb.
+ */
+void *
+qla24xx_prep_ms_fdmi_iocb(scsi_qla_host_t *vha, uint32_t req_size,
+ uint32_t rsp_size)
+{
+ struct ct_entry_24xx *ct_pkt;
+ struct qla_hw_data *ha = vha->hw;
+
+ ct_pkt = (struct ct_entry_24xx *)ha->ms_iocb;
+ memset(ct_pkt, 0, sizeof(struct ct_entry_24xx));
+
+ ct_pkt->entry_type = CT_IOCB_TYPE;
+ ct_pkt->entry_count = 1;
+ ct_pkt->nport_handle = cpu_to_le16(vha->mgmt_svr_loop_id);
+ ct_pkt->timeout = cpu_to_le16(ha->r_a_tov / 10 * 2);
+ ct_pkt->cmd_dsd_count = __constant_cpu_to_le16(1);
+ ct_pkt->rsp_dsd_count = __constant_cpu_to_le16(1);
+ ct_pkt->rsp_byte_count = cpu_to_le32(rsp_size);
+ ct_pkt->cmd_byte_count = cpu_to_le32(req_size);
+
+ ct_pkt->dseg_0_address[0] = cpu_to_le32(LSD(ha->ct_sns_dma));
+ ct_pkt->dseg_0_address[1] = cpu_to_le32(MSD(ha->ct_sns_dma));
+ ct_pkt->dseg_0_len = ct_pkt->cmd_byte_count;
+
+ ct_pkt->dseg_1_address[0] = cpu_to_le32(LSD(ha->ct_sns_dma));
+ ct_pkt->dseg_1_address[1] = cpu_to_le32(MSD(ha->ct_sns_dma));
+ ct_pkt->dseg_1_len = ct_pkt->rsp_byte_count;
+ ct_pkt->vp_index = vha->vp_idx;
+
+ return ct_pkt;
+}
+
+static inline ms_iocb_entry_t *
+qla2x00_update_ms_fdmi_iocb(scsi_qla_host_t *vha, uint32_t req_size)
+{
+ struct qla_hw_data *ha = vha->hw;
+ ms_iocb_entry_t *ms_pkt = ha->ms_iocb;
+ struct ct_entry_24xx *ct_pkt = (struct ct_entry_24xx *)ha->ms_iocb;
+
+ if (IS_FWI2_CAPABLE(ha)) {
+ ct_pkt->cmd_byte_count = cpu_to_le32(req_size);
+ ct_pkt->dseg_0_len = ct_pkt->cmd_byte_count;
+ } else {
+ ms_pkt->req_bytecount = cpu_to_le32(req_size);
+ ms_pkt->dseg_req_length = ms_pkt->req_bytecount;
+ }
+
+ return ms_pkt;
+}
+
+/**
+ * qla2x00_prep_ct_req() - Prepare common CT request fields for SNS query.
+ * @ct_req: CT request buffer
+ * @cmd: GS command
+ * @rsp_size: response size in bytes
+ *
+ * Returns a pointer to the intitialized @ct_req.
+ */
+static inline struct ct_sns_req *
+qla2x00_prep_ct_fdmi_req(struct ct_sns_pkt *p, uint16_t cmd,
+ uint16_t rsp_size)
+{
+ memset(p, 0, sizeof(struct ct_sns_pkt));
+
+ p->p.req.header.revision = 0x01;
+ p->p.req.header.gs_type = 0xFA;
+ p->p.req.header.gs_subtype = 0x10;
+ p->p.req.command = cpu_to_be16(cmd);
+ p->p.req.max_rsp_size = cpu_to_be16((rsp_size - 16) / 4);
+
+ return &p->p.req;
+}
+
+/**
+ * qla2x00_fdmi_rhba() -
+ * @ha: HA context
+ *
+ * Returns 0 on success.
+ */
+static int
+qla2x00_fdmi_rhba(scsi_qla_host_t *vha)
+{
+ int rval, alen;
+ uint32_t size, sn;
+
+ ms_iocb_entry_t *ms_pkt;
+ struct ct_sns_req *ct_req;
+ struct ct_sns_rsp *ct_rsp;
+ void *entries;
+ struct ct_fdmi_hba_attr *eiter;
+ struct qla_hw_data *ha = vha->hw;
+
+ /* Issue RHBA */
+ /* Prepare common MS IOCB */
+ /* Request size adjusted after CT preparation */
+ ms_pkt = ha->isp_ops->prep_ms_fdmi_iocb(vha, 0, RHBA_RSP_SIZE);
+
+ /* Prepare CT request */
+ ct_req = qla2x00_prep_ct_fdmi_req(ha->ct_sns, RHBA_CMD, RHBA_RSP_SIZE);
+ ct_rsp = &ha->ct_sns->p.rsp;
+
+ /* Prepare FDMI command arguments -- attribute block, attributes. */
+ memcpy(ct_req->req.rhba.hba_identifier, vha->port_name, WWN_SIZE);
+ ct_req->req.rhba.entry_count = __constant_cpu_to_be32(1);
+ memcpy(ct_req->req.rhba.port_name, vha->port_name, WWN_SIZE);
+ size = 2 * WWN_SIZE + 4 + 4;
+
+ /* Attributes */
+ ct_req->req.rhba.attrs.count =
+ __constant_cpu_to_be32(FDMI_HBA_ATTR_COUNT);
+ entries = ct_req->req.rhba.hba_identifier;
+
+ /* Nodename. */
+ eiter = entries + size;
+ eiter->type = __constant_cpu_to_be16(FDMI_HBA_NODE_NAME);
+ eiter->len = __constant_cpu_to_be16(4 + WWN_SIZE);
+ memcpy(eiter->a.node_name, vha->node_name, WWN_SIZE);
+ size += 4 + WWN_SIZE;
+
+ ql_dbg(ql_dbg_disc, vha, 0x2025,
+ "NodeName = %8phN.\n", eiter->a.node_name);
+
+ /* Manufacturer. */
+ eiter = entries + size;
+ eiter->type = __constant_cpu_to_be16(FDMI_HBA_MANUFACTURER);
+ alen = strlen(QLA2XXX_MANUFACTURER);
+ snprintf(eiter->a.manufacturer, sizeof(eiter->a.manufacturer),
+ "%s", "QLogic Corporation");
+ alen += 4 - (alen & 3);
+ eiter->len = cpu_to_be16(4 + alen);
+ size += 4 + alen;
+
+ ql_dbg(ql_dbg_disc, vha, 0x2026,
+ "Manufacturer = %s.\n", eiter->a.manufacturer);
+
+ /* Serial number. */
+ eiter = entries + size;
+ eiter->type = __constant_cpu_to_be16(FDMI_HBA_SERIAL_NUMBER);
+ if (IS_FWI2_CAPABLE(ha))
+ qla2xxx_get_vpd_field(vha, "SN", eiter->a.serial_num,
+ sizeof(eiter->a.serial_num));
+ else {
+ sn = ((ha->serial0 & 0x1f) << 16) |
+ (ha->serial2 << 8) | ha->serial1;
+ snprintf(eiter->a.serial_num, sizeof(eiter->a.serial_num),
+ "%c%05d", 'A' + sn / 100000, sn % 100000);
+ }
+ alen = strlen(eiter->a.serial_num);
+ alen += 4 - (alen & 3);
+ eiter->len = cpu_to_be16(4 + alen);
+ size += 4 + alen;
+
+ ql_dbg(ql_dbg_disc, vha, 0x2027,
+ "Serial no. = %s.\n", eiter->a.serial_num);
+
+ /* Model name. */
+ eiter = entries + size;
+ eiter->type = __constant_cpu_to_be16(FDMI_HBA_MODEL);
+ snprintf(eiter->a.model, sizeof(eiter->a.model),
+ "%s", ha->model_number);
+ alen = strlen(eiter->a.model);
+ alen += 4 - (alen & 3);
+ eiter->len = cpu_to_be16(4 + alen);
+ size += 4 + alen;
+
+ ql_dbg(ql_dbg_disc, vha, 0x2028,
+ "Model Name = %s.\n", eiter->a.model);
+
+ /* Model description. */
+ eiter = entries + size;
+ eiter->type = __constant_cpu_to_be16(FDMI_HBA_MODEL_DESCRIPTION);
+ snprintf(eiter->a.model_desc, sizeof(eiter->a.model_desc),
+ "%s", ha->model_desc);
+ alen = strlen(eiter->a.model_desc);
+ alen += 4 - (alen & 3);
+ eiter->len = cpu_to_be16(4 + alen);
+ size += 4 + alen;
+
+ ql_dbg(ql_dbg_disc, vha, 0x2029,
+ "Model Desc = %s.\n", eiter->a.model_desc);
+
+ /* Hardware version. */
+ eiter = entries + size;
+ eiter->type = __constant_cpu_to_be16(FDMI_HBA_HARDWARE_VERSION);
+ if (!IS_FWI2_CAPABLE(ha)) {
+ snprintf(eiter->a.hw_version, sizeof(eiter->a.hw_version),
+ "HW:%s", ha->adapter_id);
+ } else if (qla2xxx_get_vpd_field(vha, "MN", eiter->a.hw_version,
+ sizeof(eiter->a.hw_version))) {
+ ;
+ } else if (qla2xxx_get_vpd_field(vha, "EC", eiter->a.hw_version,
+ sizeof(eiter->a.hw_version))) {
+ ;
+ } else {
+ snprintf(eiter->a.hw_version, sizeof(eiter->a.hw_version),
+ "HW:%s", ha->adapter_id);
+ }
+ alen = strlen(eiter->a.hw_version);
+ alen += 4 - (alen & 3);
+ eiter->len = cpu_to_be16(4 + alen);
+ size += 4 + alen;
+
+ ql_dbg(ql_dbg_disc, vha, 0x202a,
+ "Hardware ver = %s.\n", eiter->a.hw_version);
+
+ /* Driver version. */
+ eiter = entries + size;
+ eiter->type = __constant_cpu_to_be16(FDMI_HBA_DRIVER_VERSION);
+ snprintf(eiter->a.driver_version, sizeof(eiter->a.driver_version),
+ "%s", qla2x00_version_str);
+ alen = strlen(eiter->a.driver_version);
+ alen += 4 - (alen & 3);
+ eiter->len = cpu_to_be16(4 + alen);
+ size += 4 + alen;
+
+ ql_dbg(ql_dbg_disc, vha, 0x202b,
+ "Driver ver = %s.\n", eiter->a.driver_version);
+
+ /* Option ROM version. */
+ eiter = entries + size;
+ eiter->type = __constant_cpu_to_be16(FDMI_HBA_OPTION_ROM_VERSION);
+ snprintf(eiter->a.orom_version, sizeof(eiter->a.orom_version),
+ "%d.%02d", ha->bios_revision[1], ha->bios_revision[0]);
+ alen = strlen(eiter->a.orom_version);
+ alen += 4 - (alen & 3);
+ eiter->len = cpu_to_be16(4 + alen);
+ size += 4 + alen;
+
+ ql_dbg(ql_dbg_disc, vha , 0x202c,
+ "Optrom vers = %s.\n", eiter->a.orom_version);
+
+ /* Firmware version */
+ eiter = entries + size;
+ eiter->type = __constant_cpu_to_be16(FDMI_HBA_FIRMWARE_VERSION);
+ ha->isp_ops->fw_version_str(vha, eiter->a.fw_version,
+ sizeof(eiter->a.fw_version));
+ alen = strlen(eiter->a.fw_version);
+ alen += 4 - (alen & 3);
+ eiter->len = cpu_to_be16(4 + alen);
+ size += 4 + alen;
+
+ ql_dbg(ql_dbg_disc, vha, 0x202d,
+ "Firmware vers = %s.\n", eiter->a.fw_version);
+
+ /* Update MS request size. */
+ qla2x00_update_ms_fdmi_iocb(vha, size + 16);
+
+ ql_dbg(ql_dbg_disc, vha, 0x202e,
+ "RHBA identifier = %8phN size=%d.\n",
+ ct_req->req.rhba.hba_identifier, size);
+ ql_dump_buffer(ql_dbg_disc + ql_dbg_buffer, vha, 0x2076,
+ entries, size);
+
+ /* Execute MS IOCB */
+ rval = qla2x00_issue_iocb(vha, ha->ms_iocb, ha->ms_iocb_dma,
+ sizeof(ms_iocb_entry_t));
+ if (rval != QLA_SUCCESS) {
+ /*EMPTY*/
+ ql_dbg(ql_dbg_disc, vha, 0x2030,
+ "RHBA issue IOCB failed (%d).\n", rval);
+ } else if (qla2x00_chk_ms_status(vha, ms_pkt, ct_rsp, "RHBA") !=
+ QLA_SUCCESS) {
+ rval = QLA_FUNCTION_FAILED;
+ if (ct_rsp->header.reason_code == CT_REASON_CANNOT_PERFORM &&
+ ct_rsp->header.explanation_code ==
+ CT_EXPL_ALREADY_REGISTERED) {
+ ql_dbg(ql_dbg_disc, vha, 0x2034,
+ "HBA already registered.\n");
+ rval = QLA_ALREADY_REGISTERED;
+ } else {
+ ql_dbg(ql_dbg_disc, vha, 0x20ad,
+ "RHBA FDMI registration failed, CT Reason code: 0x%x, CT Explanation 0x%x\n",
+ ct_rsp->header.reason_code,
+ ct_rsp->header.explanation_code);
+ }
+ } else {
+ ql_dbg(ql_dbg_disc, vha, 0x2035,
+ "RHBA exiting normally.\n");
+ }
+
+ return rval;
+}
+
+/**
+ * qla2x00_fdmi_rpa() -
+ * @ha: HA context
+ *
+ * Returns 0 on success.
+ */
+static int
+qla2x00_fdmi_rpa(scsi_qla_host_t *vha)
+{
+ int rval, alen;
+ uint32_t size;
+ struct qla_hw_data *ha = vha->hw;
+ ms_iocb_entry_t *ms_pkt;
+ struct ct_sns_req *ct_req;
+ struct ct_sns_rsp *ct_rsp;
+ void *entries;
+ struct ct_fdmi_port_attr *eiter;
+ struct init_cb_24xx *icb24 = (struct init_cb_24xx *)ha->init_cb;
+ struct new_utsname *p_sysid = NULL;
+
+ /* Issue RPA */
+ /* Prepare common MS IOCB */
+ /* Request size adjusted after CT preparation */
+ ms_pkt = ha->isp_ops->prep_ms_fdmi_iocb(vha, 0, RPA_RSP_SIZE);
+
+ /* Prepare CT request */
+ ct_req = qla2x00_prep_ct_fdmi_req(ha->ct_sns, RPA_CMD,
+ RPA_RSP_SIZE);
+ ct_rsp = &ha->ct_sns->p.rsp;
+
+ /* Prepare FDMI command arguments -- attribute block, attributes. */
+ memcpy(ct_req->req.rpa.port_name, vha->port_name, WWN_SIZE);
+ size = WWN_SIZE + 4;
+
+ /* Attributes */
+ ct_req->req.rpa.attrs.count = cpu_to_be32(FDMI_PORT_ATTR_COUNT);
+ entries = ct_req->req.rpa.port_name;
+
+ /* FC4 types. */
+ eiter = entries + size;
+ eiter->type = cpu_to_be16(FDMI_PORT_FC4_TYPES);
+ eiter->len = cpu_to_be16(4 + 32);
+ eiter->a.fc4_types[2] = 0x01;
+ size += 4 + 32;
+
+ ql_dbg(ql_dbg_disc, vha, 0x2039,
+ "FC4_TYPES=%02x %02x.\n",
+ eiter->a.fc4_types[2],
+ eiter->a.fc4_types[1]);
+
+ /* Supported speed. */
+ eiter = entries + size;
+ eiter->type = cpu_to_be16(FDMI_PORT_SUPPORT_SPEED);
+ eiter->len = cpu_to_be16(4 + 4);
+ if (IS_CNA_CAPABLE(ha))
+ eiter->a.sup_speed = cpu_to_be32(
+ FDMI_PORT_SPEED_10GB);
+ else if (IS_QLA27XX(ha))
+ eiter->a.sup_speed = cpu_to_be32(
+ FDMI_PORT_SPEED_32GB|
+ FDMI_PORT_SPEED_16GB|
+ FDMI_PORT_SPEED_8GB);
+ else if (IS_QLA2031(ha))
+ eiter->a.sup_speed = cpu_to_be32(
+ FDMI_PORT_SPEED_16GB|
+ FDMI_PORT_SPEED_8GB|
+ FDMI_PORT_SPEED_4GB);
+ else if (IS_QLA25XX(ha))
+ eiter->a.sup_speed = cpu_to_be32(
+ FDMI_PORT_SPEED_8GB|
+ FDMI_PORT_SPEED_4GB|
+ FDMI_PORT_SPEED_2GB|
+ FDMI_PORT_SPEED_1GB);
+ else if (IS_QLA24XX_TYPE(ha))
+ eiter->a.sup_speed = cpu_to_be32(
+ FDMI_PORT_SPEED_4GB|
+ FDMI_PORT_SPEED_2GB|
+ FDMI_PORT_SPEED_1GB);
+ else if (IS_QLA23XX(ha))
+ eiter->a.sup_speed = cpu_to_be32(
+ FDMI_PORT_SPEED_2GB|
+ FDMI_PORT_SPEED_1GB);
+ else
+ eiter->a.sup_speed = cpu_to_be32(
+ FDMI_PORT_SPEED_1GB);
+ size += 4 + 4;
+
+ ql_dbg(ql_dbg_disc, vha, 0x203a,
+ "Supported_Speed=%x.\n", eiter->a.sup_speed);
+
+ /* Current speed. */
+ eiter = entries + size;
+ eiter->type = cpu_to_be16(FDMI_PORT_CURRENT_SPEED);
+ eiter->len = cpu_to_be16(4 + 4);
+ switch (ha->link_data_rate) {
+ case PORT_SPEED_1GB:
+ eiter->a.cur_speed =
+ cpu_to_be32(FDMI_PORT_SPEED_1GB);
+ break;
+ case PORT_SPEED_2GB:
+ eiter->a.cur_speed =
+ cpu_to_be32(FDMI_PORT_SPEED_2GB);
+ break;
+ case PORT_SPEED_4GB:
+ eiter->a.cur_speed =
+ cpu_to_be32(FDMI_PORT_SPEED_4GB);
+ break;
+ case PORT_SPEED_8GB:
+ eiter->a.cur_speed =
+ cpu_to_be32(FDMI_PORT_SPEED_8GB);
+ break;
+ case PORT_SPEED_10GB:
+ eiter->a.cur_speed =
+ cpu_to_be32(FDMI_PORT_SPEED_10GB);
+ break;
+ case PORT_SPEED_16GB:
+ eiter->a.cur_speed =
+ cpu_to_be32(FDMI_PORT_SPEED_16GB);
+ break;
+ case PORT_SPEED_32GB:
+ eiter->a.cur_speed =
+ cpu_to_be32(FDMI_PORT_SPEED_32GB);
+ break;
+ default:
+ eiter->a.cur_speed =
+ cpu_to_be32(FDMI_PORT_SPEED_UNKNOWN);
+ break;
+ }
+ size += 4 + 4;
+
+ ql_dbg(ql_dbg_disc, vha, 0x203b,
+ "Current_Speed=%x.\n", eiter->a.cur_speed);
+
+ /* Max frame size. */
+ eiter = entries + size;
+ eiter->type = cpu_to_be16(FDMI_PORT_MAX_FRAME_SIZE);
+ eiter->len = cpu_to_be16(4 + 4);
+ eiter->a.max_frame_size = IS_FWI2_CAPABLE(ha) ?
+ le16_to_cpu(icb24->frame_payload_size) :
+ le16_to_cpu(ha->init_cb->frame_payload_size);
+ eiter->a.max_frame_size = cpu_to_be32(eiter->a.max_frame_size);
+ size += 4 + 4;
+
+ ql_dbg(ql_dbg_disc, vha, 0x203c,
+ "Max_Frame_Size=%x.\n", eiter->a.max_frame_size);
+
+ /* OS device name. */
+ eiter = entries + size;
+ eiter->type = cpu_to_be16(FDMI_PORT_OS_DEVICE_NAME);
+ snprintf(eiter->a.os_dev_name, sizeof(eiter->a.os_dev_name),
+ "%s:host%lu", QLA2XXX_DRIVER_NAME, vha->host_no);
+ alen = strlen(eiter->a.os_dev_name);
+ alen += 4 - (alen & 3);
+ eiter->len = cpu_to_be16(4 + alen);
+ size += 4 + alen;
+
+ ql_dbg(ql_dbg_disc, vha, 0x204b,
+ "OS_Device_Name=%s.\n", eiter->a.os_dev_name);
+
+ /* Hostname. */
+ eiter = entries + size;
+ eiter->type = cpu_to_be16(FDMI_PORT_HOST_NAME);
+ p_sysid = utsname();
+ if (p_sysid) {
+ snprintf(eiter->a.host_name, sizeof(eiter->a.host_name),
+ "%s", p_sysid->nodename);
+ } else {
+ snprintf(eiter->a.host_name, sizeof(eiter->a.host_name),
+ "%s", fc_host_system_hostname(vha->host));
+ }
+ alen = strlen(eiter->a.host_name);
+ alen += 4 - (alen & 3);
+ eiter->len = cpu_to_be16(4 + alen);
+ size += 4 + alen;
+
+ ql_dbg(ql_dbg_disc, vha, 0x203d, "HostName=%s.\n", eiter->a.host_name);
+
+ /* Update MS request size. */
+ qla2x00_update_ms_fdmi_iocb(vha, size + 16);
+
+ ql_dbg(ql_dbg_disc, vha, 0x203e,
+ "RPA portname %016llx, size = %d.\n",
+ wwn_to_u64(ct_req->req.rpa.port_name), size);
+ ql_dump_buffer(ql_dbg_disc + ql_dbg_buffer, vha, 0x2079,
+ entries, size);
+
+ /* Execute MS IOCB */
+ rval = qla2x00_issue_iocb(vha, ha->ms_iocb, ha->ms_iocb_dma,
+ sizeof(ms_iocb_entry_t));
+ if (rval != QLA_SUCCESS) {
+ /*EMPTY*/
+ ql_dbg(ql_dbg_disc, vha, 0x2040,
+ "RPA issue IOCB failed (%d).\n", rval);
+ } else if (qla2x00_chk_ms_status(vha, ms_pkt, ct_rsp, "RPA") !=
+ QLA_SUCCESS) {
+ rval = QLA_FUNCTION_FAILED;
+ if (ct_rsp->header.reason_code == CT_REASON_CANNOT_PERFORM &&
+ ct_rsp->header.explanation_code ==
+ CT_EXPL_ALREADY_REGISTERED) {
+ ql_dbg(ql_dbg_disc, vha, 0x20cd,
+ "RPA already registered.\n");
+ rval = QLA_ALREADY_REGISTERED;
+ }
+
+ } else {
+ ql_dbg(ql_dbg_disc, vha, 0x2041,
+ "RPA exiting normally.\n");
+ }
+
+ return rval;
+}
+
+/**
+ * qla2x00_fdmiv2_rhba() -
+ * @ha: HA context
+ *
+ * Returns 0 on success.
+ */
+static int
+qla2x00_fdmiv2_rhba(scsi_qla_host_t *vha)
+{
+ int rval, alen;
+ uint32_t size, sn;
+ ms_iocb_entry_t *ms_pkt;
+ struct ct_sns_req *ct_req;
+ struct ct_sns_rsp *ct_rsp;
+ void *entries;
+ struct ct_fdmiv2_hba_attr *eiter;
+ struct qla_hw_data *ha = vha->hw;
+ struct init_cb_24xx *icb24 = (struct init_cb_24xx *)ha->init_cb;
+ struct new_utsname *p_sysid = NULL;
+
+ /* Issue RHBA */
+ /* Prepare common MS IOCB */
+ /* Request size adjusted after CT preparation */
+ ms_pkt = ha->isp_ops->prep_ms_fdmi_iocb(vha, 0, RHBA_RSP_SIZE);
+
+ /* Prepare CT request */
+ ct_req = qla2x00_prep_ct_fdmi_req(ha->ct_sns, RHBA_CMD,
+ RHBA_RSP_SIZE);
+ ct_rsp = &ha->ct_sns->p.rsp;
+
+ /* Prepare FDMI command arguments -- attribute block, attributes. */
+ memcpy(ct_req->req.rhba2.hba_identifier, vha->port_name, WWN_SIZE);
+ ct_req->req.rhba2.entry_count = cpu_to_be32(1);
+ memcpy(ct_req->req.rhba2.port_name, vha->port_name, WWN_SIZE);
+ size = 2 * WWN_SIZE + 4 + 4;
+
+ /* Attributes */
+ ct_req->req.rhba2.attrs.count = cpu_to_be32(FDMIV2_HBA_ATTR_COUNT);
+ entries = ct_req->req.rhba2.hba_identifier;
+
+ /* Nodename. */
+ eiter = entries + size;
+ eiter->type = cpu_to_be16(FDMI_HBA_NODE_NAME);
+ eiter->len = cpu_to_be16(4 + WWN_SIZE);
+ memcpy(eiter->a.node_name, vha->node_name, WWN_SIZE);
+ size += 4 + WWN_SIZE;
+
+ ql_dbg(ql_dbg_disc, vha, 0x207d,
+ "NodeName = %016llx.\n", wwn_to_u64(eiter->a.node_name));
+
+ /* Manufacturer. */
+ eiter = entries + size;
+ eiter->type = cpu_to_be16(FDMI_HBA_MANUFACTURER);
+ snprintf(eiter->a.manufacturer, sizeof(eiter->a.manufacturer),
+ "%s", "QLogic Corporation");
+ eiter->a.manufacturer[strlen("QLogic Corporation")] = '\0';
+ alen = strlen(eiter->a.manufacturer);
+ alen += 4 - (alen & 3);
+ eiter->len = cpu_to_be16(4 + alen);
+ size += 4 + alen;
+
+ ql_dbg(ql_dbg_disc, vha, 0x20a5,
+ "Manufacturer = %s.\n", eiter->a.manufacturer);
+
+ /* Serial number. */
+ eiter = entries + size;
+ eiter->type = cpu_to_be16(FDMI_HBA_SERIAL_NUMBER);
+ if (IS_FWI2_CAPABLE(ha))
+ qla2xxx_get_vpd_field(vha, "SN", eiter->a.serial_num,
+ sizeof(eiter->a.serial_num));
+ else {
+ sn = ((ha->serial0 & 0x1f) << 16) |
+ (ha->serial2 << 8) | ha->serial1;
+ snprintf(eiter->a.serial_num, sizeof(eiter->a.serial_num),
+ "%c%05d", 'A' + sn / 100000, sn % 100000);
+ }
+ alen = strlen(eiter->a.serial_num);
+ alen += 4 - (alen & 3);
+ eiter->len = cpu_to_be16(4 + alen);
+ size += 4 + alen;
+
+ ql_dbg(ql_dbg_disc, vha, 0x20a6,
+ "Serial no. = %s.\n", eiter->a.serial_num);
+
+ /* Model name. */
+ eiter = entries + size;
+ eiter->type = cpu_to_be16(FDMI_HBA_MODEL);
+ snprintf(eiter->a.model, sizeof(eiter->a.model),
+ "%s", ha->model_number);
+ alen = strlen(eiter->a.model);
+ alen += 4 - (alen & 3);
+ eiter->len = cpu_to_be16(4 + alen);
+ size += 4 + alen;
+
+ ql_dbg(ql_dbg_disc, vha, 0x20a7,
+ "Model Name = %s.\n", eiter->a.model);
+
+ /* Model description. */
+ eiter = entries + size;
+ eiter->type = cpu_to_be16(FDMI_HBA_MODEL_DESCRIPTION);
+ snprintf(eiter->a.model_desc, sizeof(eiter->a.model_desc),
+ "%s", ha->model_desc);
+ alen = strlen(eiter->a.model_desc);
+ alen += 4 - (alen & 3);
+ eiter->len = cpu_to_be16(4 + alen);
+ size += 4 + alen;
+
+ ql_dbg(ql_dbg_disc, vha, 0x20a8,
+ "Model Desc = %s.\n", eiter->a.model_desc);
+
+ /* Hardware version. */
+ eiter = entries + size;
+ eiter->type = cpu_to_be16(FDMI_HBA_HARDWARE_VERSION);
+ if (!IS_FWI2_CAPABLE(ha)) {
+ snprintf(eiter->a.hw_version, sizeof(eiter->a.hw_version),
+ "HW:%s", ha->adapter_id);
+ } else if (qla2xxx_get_vpd_field(vha, "MN", eiter->a.hw_version,
+ sizeof(eiter->a.hw_version))) {
+ ;
+ } else if (qla2xxx_get_vpd_field(vha, "EC", eiter->a.hw_version,
+ sizeof(eiter->a.hw_version))) {
+ ;
+ } else {
+ snprintf(eiter->a.hw_version, sizeof(eiter->a.hw_version),
+ "HW:%s", ha->adapter_id);
+ }
+ alen = strlen(eiter->a.hw_version);
+ alen += 4 - (alen & 3);
+ eiter->len = cpu_to_be16(4 + alen);
+ size += 4 + alen;
+
+ ql_dbg(ql_dbg_disc, vha, 0x20a9,
+ "Hardware ver = %s.\n", eiter->a.hw_version);
+
+ /* Driver version. */
+ eiter = entries + size;
+ eiter->type = cpu_to_be16(FDMI_HBA_DRIVER_VERSION);
+ snprintf(eiter->a.driver_version, sizeof(eiter->a.driver_version),
+ "%s", qla2x00_version_str);
+ alen = strlen(eiter->a.driver_version);
+ alen += 4 - (alen & 3);
+ eiter->len = cpu_to_be16(4 + alen);
+ size += 4 + alen;
+
+ ql_dbg(ql_dbg_disc, vha, 0x20aa,
+ "Driver ver = %s.\n", eiter->a.driver_version);
+
+ /* Option ROM version. */
+ eiter = entries + size;
+ eiter->type = cpu_to_be16(FDMI_HBA_OPTION_ROM_VERSION);
+ snprintf(eiter->a.orom_version, sizeof(eiter->a.orom_version),
+ "%d.%02d", ha->bios_revision[1], ha->bios_revision[0]);
+ alen = strlen(eiter->a.orom_version);
+ alen += 4 - (alen & 3);
+ eiter->len = cpu_to_be16(4 + alen);
+ size += 4 + alen;
+
+ ql_dbg(ql_dbg_disc, vha , 0x20ab,
+ "Optrom version = %d.%02d.\n", eiter->a.orom_version[1],
+ eiter->a.orom_version[0]);
+
+ /* Firmware version */
+ eiter = entries + size;
+ eiter->type = cpu_to_be16(FDMI_HBA_FIRMWARE_VERSION);
+ ha->isp_ops->fw_version_str(vha, eiter->a.fw_version,
+ sizeof(eiter->a.fw_version));
+ alen = strlen(eiter->a.fw_version);
+ alen += 4 - (alen & 3);
+ eiter->len = cpu_to_be16(4 + alen);
+ size += 4 + alen;
+
+ ql_dbg(ql_dbg_disc, vha, 0x20ac,
+ "Firmware vers = %s.\n", eiter->a.fw_version);
+
+ /* OS Name and Version */
+ eiter = entries + size;
+ eiter->type = cpu_to_be16(FDMI_HBA_OS_NAME_AND_VERSION);
+ p_sysid = utsname();
+ if (p_sysid) {
+ snprintf(eiter->a.os_version, sizeof(eiter->a.os_version),
+ "%s %s %s",
+ p_sysid->sysname, p_sysid->release, p_sysid->version);
+ } else {
+ snprintf(eiter->a.os_version, sizeof(eiter->a.os_version),
+ "%s %s", "Linux", fc_host_system_hostname(vha->host));
+ }
+ alen = strlen(eiter->a.os_version);
+ alen += 4 - (alen & 3);
+ eiter->len = cpu_to_be16(4 + alen);
+ size += 4 + alen;
+
+ ql_dbg(ql_dbg_disc, vha, 0x20ae,
+ "OS Name and Version = %s.\n", eiter->a.os_version);
+
+ /* MAX CT Payload Length */
+ eiter = entries + size;
+ eiter->type = cpu_to_be16(FDMI_HBA_MAXIMUM_CT_PAYLOAD_LENGTH);
+ eiter->a.max_ct_len = IS_FWI2_CAPABLE(ha) ?
+ le16_to_cpu(icb24->frame_payload_size) :
+ le16_to_cpu(ha->init_cb->frame_payload_size);
+ eiter->a.max_ct_len = cpu_to_be32(eiter->a.max_ct_len);
+ eiter->len = cpu_to_be16(4 + 4);
+ size += 4 + 4;
+
+ ql_dbg(ql_dbg_disc, vha, 0x20af,
+ "CT Payload Length = 0x%x.\n", eiter->a.max_ct_len);
+
+ /* Node Sybolic Name */
+ eiter = entries + size;
+ eiter->type = cpu_to_be16(FDMI_HBA_NODE_SYMBOLIC_NAME);
+ qla2x00_get_sym_node_name(vha, eiter->a.sym_name,
+ sizeof(eiter->a.sym_name));
+ alen = strlen(eiter->a.sym_name);
+ alen += 4 - (alen & 3);
+ eiter->len = cpu_to_be16(4 + alen);
+ size += 4 + alen;
+
+ ql_dbg(ql_dbg_disc, vha, 0x20b0,
+ "Symbolic Name = %s.\n", eiter->a.sym_name);
+
+ /* Vendor Id */
+ eiter = entries + size;
+ eiter->type = cpu_to_be16(FDMI_HBA_VENDOR_ID);
+ eiter->a.vendor_id = cpu_to_be32(0x1077);
+ eiter->len = cpu_to_be16(4 + 4);
+ size += 4 + 4;
+
+ ql_dbg(ql_dbg_disc, vha, 0x20b1,
+ "Vendor Id = %x.\n", eiter->a.vendor_id);
+
+ /* Num Ports */
+ eiter = entries + size;
+ eiter->type = cpu_to_be16(FDMI_HBA_NUM_PORTS);
+ eiter->a.num_ports = cpu_to_be32(1);
+ eiter->len = cpu_to_be16(4 + 4);
+ size += 4 + 4;
+
+ ql_dbg(ql_dbg_disc, vha, 0x20b2,
+ "Port Num = %x.\n", eiter->a.num_ports);
+
+ /* Fabric Name */
+ eiter = entries + size;
+ eiter->type = cpu_to_be16(FDMI_HBA_FABRIC_NAME);
+ memcpy(eiter->a.fabric_name, vha->fabric_node_name, WWN_SIZE);
+ eiter->len = cpu_to_be16(4 + WWN_SIZE);
+ size += 4 + WWN_SIZE;
+
+ ql_dbg(ql_dbg_disc, vha, 0x20b3,
+ "Fabric Name = %016llx.\n", wwn_to_u64(eiter->a.fabric_name));
+
+ /* BIOS Version */
+ eiter = entries + size;
+ eiter->type = cpu_to_be16(FDMI_HBA_BOOT_BIOS_NAME);
+ snprintf(eiter->a.bios_name, sizeof(eiter->a.bios_name),
+ "BIOS %d.%02d", ha->bios_revision[1], ha->bios_revision[0]);
+ alen = strlen(eiter->a.bios_name);
+ alen += 4 - (alen & 3);
+ eiter->len = cpu_to_be16(4 + alen);
+ size += 4 + alen;
+
+ ql_dbg(ql_dbg_disc, vha, 0x20b4,
+ "BIOS Name = %s\n", eiter->a.bios_name);
+
+ /* Vendor Identifier */
+ eiter = entries + size;
+ eiter->type = cpu_to_be16(FDMI_HBA_TYPE_VENDOR_IDENTIFIER);
+ snprintf(eiter->a.vendor_indentifer, sizeof(eiter->a.vendor_indentifer),
+ "%s", "QLGC");
+ alen = strlen(eiter->a.vendor_indentifer);
+ alen += 4 - (alen & 3);
+ eiter->len = cpu_to_be16(4 + alen);
+ size += 4 + alen;
+
+ ql_dbg(ql_dbg_disc, vha, 0x20b1,
+ "Vendor Identifier = %s.\n", eiter->a.vendor_indentifer);
+
+ /* Update MS request size. */
+ qla2x00_update_ms_fdmi_iocb(vha, size + 16);
+
+ ql_dbg(ql_dbg_disc, vha, 0x20b5,
+ "RHBA identifier = %016llx.\n",
+ wwn_to_u64(ct_req->req.rhba2.hba_identifier));
+ ql_dump_buffer(ql_dbg_disc + ql_dbg_buffer, vha, 0x20b6,
+ entries, size);
+
+ /* Execute MS IOCB */
+ rval = qla2x00_issue_iocb(vha, ha->ms_iocb, ha->ms_iocb_dma,
+ sizeof(ms_iocb_entry_t));
+ if (rval != QLA_SUCCESS) {
+ /*EMPTY*/
+ ql_dbg(ql_dbg_disc, vha, 0x20b7,
+ "RHBA issue IOCB failed (%d).\n", rval);
+ } else if (qla2x00_chk_ms_status(vha, ms_pkt, ct_rsp, "RHBA") !=
+ QLA_SUCCESS) {
+ rval = QLA_FUNCTION_FAILED;
+
+ if (ct_rsp->header.reason_code == CT_REASON_CANNOT_PERFORM &&
+ ct_rsp->header.explanation_code ==
+ CT_EXPL_ALREADY_REGISTERED) {
+ ql_dbg(ql_dbg_disc, vha, 0x20b8,
+ "HBA already registered.\n");
+ rval = QLA_ALREADY_REGISTERED;
+ } else {
+ ql_dbg(ql_dbg_disc, vha, 0x2016,
+ "RHBA FDMI v2 failed, CT Reason code: 0x%x, CT Explanation 0x%x\n",
+ ct_rsp->header.reason_code,
+ ct_rsp->header.explanation_code);
+ }
+ } else {
+ ql_dbg(ql_dbg_disc, vha, 0x20b9,
+ "RHBA FDMI V2 exiting normally.\n");
+ }
+
+ return rval;
+}
+
+/**
+ * qla2x00_fdmi_dhba() -
+ * @ha: HA context
+ *
+ * Returns 0 on success.
+ */
+static int
+qla2x00_fdmi_dhba(scsi_qla_host_t *vha)
+{
+ int rval;
+ struct qla_hw_data *ha = vha->hw;
+ ms_iocb_entry_t *ms_pkt;
+ struct ct_sns_req *ct_req;
+ struct ct_sns_rsp *ct_rsp;
+
+ /* Issue RPA */
+ /* Prepare common MS IOCB */
+ ms_pkt = ha->isp_ops->prep_ms_fdmi_iocb(vha, DHBA_REQ_SIZE,
+ DHBA_RSP_SIZE);
+
+ /* Prepare CT request */
+ ct_req = qla2x00_prep_ct_fdmi_req(ha->ct_sns, DHBA_CMD, DHBA_RSP_SIZE);
+ ct_rsp = &ha->ct_sns->p.rsp;
+
+ /* Prepare FDMI command arguments -- portname. */
+ memcpy(ct_req->req.dhba.port_name, vha->port_name, WWN_SIZE);
+
+ ql_dbg(ql_dbg_disc, vha, 0x2036,
+ "DHBA portname = %8phN.\n", ct_req->req.dhba.port_name);
+
+ /* Execute MS IOCB */
+ rval = qla2x00_issue_iocb(vha, ha->ms_iocb, ha->ms_iocb_dma,
+ sizeof(ms_iocb_entry_t));
+ if (rval != QLA_SUCCESS) {
+ /*EMPTY*/
+ ql_dbg(ql_dbg_disc, vha, 0x2037,
+ "DHBA issue IOCB failed (%d).\n", rval);
+ } else if (qla2x00_chk_ms_status(vha, ms_pkt, ct_rsp, "DHBA") !=
+ QLA_SUCCESS) {
+ rval = QLA_FUNCTION_FAILED;
+ } else {
+ ql_dbg(ql_dbg_disc, vha, 0x2038,
+ "DHBA exiting normally.\n");
+ }
+
+ return rval;
+}
+
+/**
+ * qla2x00_fdmiv2_rpa() -
+ * @ha: HA context
+ *
+ * Returns 0 on success.
+ */
+static int
+qla2x00_fdmiv2_rpa(scsi_qla_host_t *vha)
+{
+ int rval, alen;
+ uint32_t size;
+ struct qla_hw_data *ha = vha->hw;
+ ms_iocb_entry_t *ms_pkt;
+ struct ct_sns_req *ct_req;
+ struct ct_sns_rsp *ct_rsp;
+ void *entries;
+ struct ct_fdmiv2_port_attr *eiter;
+ struct init_cb_24xx *icb24 = (struct init_cb_24xx *)ha->init_cb;
+ struct new_utsname *p_sysid = NULL;
+
+ /* Issue RPA */
+ /* Prepare common MS IOCB */
+ /* Request size adjusted after CT preparation */
+ ms_pkt = ha->isp_ops->prep_ms_fdmi_iocb(vha, 0, RPA_RSP_SIZE);
+
+ /* Prepare CT request */
+ ct_req = qla2x00_prep_ct_fdmi_req(ha->ct_sns, RPA_CMD, RPA_RSP_SIZE);
+ ct_rsp = &ha->ct_sns->p.rsp;
+
+ /* Prepare FDMI command arguments -- attribute block, attributes. */
+ memcpy(ct_req->req.rpa2.port_name, vha->port_name, WWN_SIZE);
+ size = WWN_SIZE + 4;
+
+ /* Attributes */
+ ct_req->req.rpa2.attrs.count = cpu_to_be32(FDMIV2_PORT_ATTR_COUNT);
+ entries = ct_req->req.rpa2.port_name;
+
+ /* FC4 types. */
+ eiter = entries + size;
+ eiter->type = cpu_to_be16(FDMI_PORT_FC4_TYPES);
+ eiter->len = cpu_to_be16(4 + 32);
+ eiter->a.fc4_types[2] = 0x01;
+ size += 4 + 32;
+
+ ql_dbg(ql_dbg_disc, vha, 0x20ba,
+ "FC4_TYPES=%02x %02x.\n",
+ eiter->a.fc4_types[2],
+ eiter->a.fc4_types[1]);
+
+ /* Supported speed. */
+ eiter = entries + size;
+ eiter->type = cpu_to_be16(FDMI_PORT_SUPPORT_SPEED);
+ eiter->len = cpu_to_be16(4 + 4);
+ if (IS_CNA_CAPABLE(ha))
+ eiter->a.sup_speed = cpu_to_be32(
+ FDMI_PORT_SPEED_10GB);
+ else if (IS_QLA27XX(ha))
+ eiter->a.sup_speed = cpu_to_be32(
+ FDMI_PORT_SPEED_32GB|
+ FDMI_PORT_SPEED_16GB|
+ FDMI_PORT_SPEED_8GB);
+ else if (IS_QLA2031(ha))
+ eiter->a.sup_speed = cpu_to_be32(
+ FDMI_PORT_SPEED_16GB|
+ FDMI_PORT_SPEED_8GB|
+ FDMI_PORT_SPEED_4GB);
+ else if (IS_QLA25XX(ha))
+ eiter->a.sup_speed = cpu_to_be32(
+ FDMI_PORT_SPEED_8GB|
+ FDMI_PORT_SPEED_4GB|
+ FDMI_PORT_SPEED_2GB|
+ FDMI_PORT_SPEED_1GB);
+ else if (IS_QLA24XX_TYPE(ha))
+ eiter->a.sup_speed = cpu_to_be32(
+ FDMI_PORT_SPEED_4GB|
+ FDMI_PORT_SPEED_2GB|
+ FDMI_PORT_SPEED_1GB);
+ else if (IS_QLA23XX(ha))
+ eiter->a.sup_speed = cpu_to_be32(
+ FDMI_PORT_SPEED_2GB|
+ FDMI_PORT_SPEED_1GB);
+ else
+ eiter->a.sup_speed = cpu_to_be32(
+ FDMI_PORT_SPEED_1GB);
+ size += 4 + 4;
+
+ ql_dbg(ql_dbg_disc, vha, 0x20bb,
+ "Supported Port Speed = %x.\n", eiter->a.sup_speed);
+
+ /* Current speed. */
+ eiter = entries + size;
+ eiter->type = cpu_to_be16(FDMI_PORT_CURRENT_SPEED);
+ eiter->len = cpu_to_be16(4 + 4);
+ switch (ha->link_data_rate) {
+ case PORT_SPEED_1GB:
+ eiter->a.cur_speed = cpu_to_be32(FDMI_PORT_SPEED_1GB);
+ break;
+ case PORT_SPEED_2GB:
+ eiter->a.cur_speed = cpu_to_be32(FDMI_PORT_SPEED_2GB);
+ break;
+ case PORT_SPEED_4GB:
+ eiter->a.cur_speed = cpu_to_be32(FDMI_PORT_SPEED_4GB);
+ break;
+ case PORT_SPEED_8GB:
+ eiter->a.cur_speed = cpu_to_be32(FDMI_PORT_SPEED_8GB);
+ break;
+ case PORT_SPEED_10GB:
+ eiter->a.cur_speed = cpu_to_be32(FDMI_PORT_SPEED_10GB);
+ break;
+ case PORT_SPEED_16GB:
+ eiter->a.cur_speed = cpu_to_be32(FDMI_PORT_SPEED_16GB);
+ break;
+ case PORT_SPEED_32GB:
+ eiter->a.cur_speed = cpu_to_be32(FDMI_PORT_SPEED_32GB);
+ break;
+ default:
+ eiter->a.cur_speed = cpu_to_be32(FDMI_PORT_SPEED_UNKNOWN);
+ break;
+ }
+ size += 4 + 4;
+
+ ql_dbg(ql_dbg_disc, vha, 0x20bc,
+ "Current_Speed = %x.\n", eiter->a.cur_speed);
+
+ /* Max frame size. */
+ eiter = entries + size;
+ eiter->type = cpu_to_be16(FDMI_PORT_MAX_FRAME_SIZE);
+ eiter->len = cpu_to_be16(4 + 4);
+ eiter->a.max_frame_size = IS_FWI2_CAPABLE(ha) ?
+ le16_to_cpu(icb24->frame_payload_size):
+ le16_to_cpu(ha->init_cb->frame_payload_size);
+ eiter->a.max_frame_size = cpu_to_be32(eiter->a.max_frame_size);
+ size += 4 + 4;
+
+ ql_dbg(ql_dbg_disc, vha, 0x20bc,
+ "Max_Frame_Size = %x.\n", eiter->a.max_frame_size);
+
+ /* OS device name. */
+ eiter = entries + size;
+ eiter->type = cpu_to_be16(FDMI_PORT_OS_DEVICE_NAME);
+ alen = strlen(QLA2XXX_DRIVER_NAME);
+ snprintf(eiter->a.os_dev_name, sizeof(eiter->a.os_dev_name),
+ "%s:host%lu", QLA2XXX_DRIVER_NAME, vha->host_no);
+ alen += 4 - (alen & 3);
+ eiter->len = cpu_to_be16(4 + alen);
+ size += 4 + alen;
+
+ ql_dbg(ql_dbg_disc, vha, 0x20be,
+ "OS_Device_Name = %s.\n", eiter->a.os_dev_name);
+
+ /* Hostname. */
+ eiter = entries + size;
+ eiter->type = cpu_to_be16(FDMI_PORT_HOST_NAME);
+ p_sysid = utsname();
+ if (p_sysid) {
+ snprintf(eiter->a.host_name, sizeof(eiter->a.host_name),
+ "%s", p_sysid->nodename);
+ } else {
+ snprintf(eiter->a.host_name, sizeof(eiter->a.host_name),
+ "%s", fc_host_system_hostname(vha->host));
+ }
+ alen = strlen(eiter->a.host_name);
+ alen += 4 - (alen & 3);
+ eiter->len = cpu_to_be16(4 + alen);
+ size += 4 + alen;
+
+ ql_dbg(ql_dbg_disc, vha, 0x203d,
+ "HostName=%s.\n", eiter->a.host_name);
+
+ /* Node Name */
+ eiter = entries + size;
+ eiter->type = cpu_to_be16(FDMI_PORT_NODE_NAME);
+ memcpy(eiter->a.node_name, vha->node_name, WWN_SIZE);
+ eiter->len = cpu_to_be16(4 + WWN_SIZE);
+ size += 4 + WWN_SIZE;
+
+ ql_dbg(ql_dbg_disc, vha, 0x20c0,
+ "Node Name = %016llx.\n", wwn_to_u64(eiter->a.node_name));
+
+ /* Port Name */
+ eiter = entries + size;
+ eiter->type = cpu_to_be16(FDMI_PORT_NAME);
+ memcpy(eiter->a.port_name, vha->port_name, WWN_SIZE);
+ eiter->len = cpu_to_be16(4 + WWN_SIZE);
+ size += 4 + WWN_SIZE;
+
+ ql_dbg(ql_dbg_disc, vha, 0x20c1,
+ "Port Name = %016llx.\n", wwn_to_u64(eiter->a.port_name));
+
+ /* Port Symbolic Name */
+ eiter = entries + size;
+ eiter->type = cpu_to_be16(FDMI_PORT_SYM_NAME);
+ qla2x00_get_sym_node_name(vha, eiter->a.port_sym_name,
+ sizeof(eiter->a.port_sym_name));
+ alen = strlen(eiter->a.port_sym_name);
+ alen += 4 - (alen & 3);
+ eiter->len = cpu_to_be16(4 + alen);
+ size += 4 + alen;
+
+ ql_dbg(ql_dbg_disc, vha, 0x20c2,
+ "port symbolic name = %s\n", eiter->a.port_sym_name);
+
+ /* Port Type */
+ eiter = entries + size;
+ eiter->type = cpu_to_be16(FDMI_PORT_TYPE);
+ eiter->a.port_type = cpu_to_be32(NS_NX_PORT_TYPE);
+ eiter->len = cpu_to_be16(4 + 4);
+ size += 4 + 4;
+
+ ql_dbg(ql_dbg_disc, vha, 0x20c3,
+ "Port Type = %x.\n", eiter->a.port_type);
+
+ /* Class of Service */
+ eiter = entries + size;
+ eiter->type = cpu_to_be16(FDMI_PORT_SUPP_COS);
+ eiter->a.port_supported_cos = cpu_to_be32(FC_CLASS_3);
+ eiter->len = cpu_to_be16(4 + 4);
+ size += 4 + 4;
+
+ ql_dbg(ql_dbg_disc, vha, 0x20c4,
+ "Supported COS = %08x\n", eiter->a.port_supported_cos);
+
+ /* Port Fabric Name */
+ eiter = entries + size;
+ eiter->type = cpu_to_be16(FDMI_PORT_FABRIC_NAME);
+ memcpy(eiter->a.fabric_name, vha->fabric_node_name, WWN_SIZE);
+ eiter->len = cpu_to_be16(4 + WWN_SIZE);
+ size += 4 + WWN_SIZE;
+
+ ql_dbg(ql_dbg_disc, vha, 0x20c5,
+ "Fabric Name = %016llx.\n", wwn_to_u64(eiter->a.fabric_name));
+
+ /* FC4_type */
+ eiter = entries + size;
+ eiter->type = cpu_to_be16(FDMI_PORT_FC4_TYPE);
+ eiter->a.port_fc4_type[0] = 0;
+ eiter->a.port_fc4_type[1] = 0;
+ eiter->a.port_fc4_type[2] = 1;
+ eiter->a.port_fc4_type[3] = 0;
+ eiter->len = cpu_to_be16(4 + 32);
+ size += 4 + 32;
+
+ ql_dbg(ql_dbg_disc, vha, 0x20c6,
+ "Port Active FC4 Type = %02x %02x.\n",
+ eiter->a.port_fc4_type[2], eiter->a.port_fc4_type[1]);
+
+ /* Port State */
+ eiter = entries + size;
+ eiter->type = cpu_to_be16(FDMI_PORT_STATE);
+ eiter->a.port_state = cpu_to_be32(1);
+ eiter->len = cpu_to_be16(4 + 4);
+ size += 4 + 4;
+
+ ql_dbg(ql_dbg_disc, vha, 0x20c7,
+ "Port State = %x.\n", eiter->a.port_state);
+
+ /* Number of Ports */
+ eiter = entries + size;
+ eiter->type = cpu_to_be16(FDMI_PORT_COUNT);
+ eiter->a.num_ports = cpu_to_be32(1);
+ eiter->len = cpu_to_be16(4 + 4);
+ size += 4 + 4;
+
+ ql_dbg(ql_dbg_disc, vha, 0x20c8,
+ "Number of ports = %x.\n", eiter->a.num_ports);
+
+ /* Port Id */
+ eiter = entries + size;
+ eiter->type = cpu_to_be16(FDMI_PORT_ID);
+ eiter->a.port_id = cpu_to_be32(vha->d_id.b24);
+ eiter->len = cpu_to_be16(4 + 4);
+ size += 4 + 4;
+
+ ql_dbg(ql_dbg_disc, vha, 0x20c8,
+ "Port Id = %x.\n", eiter->a.port_id);
+
+ /* Update MS request size. */
+ qla2x00_update_ms_fdmi_iocb(vha, size + 16);
+
+ ql_dbg(ql_dbg_disc, vha, 0x203e,
+ "RPA portname= %8phN size=%d.\n", ct_req->req.rpa.port_name, size);
+ ql_dump_buffer(ql_dbg_disc + ql_dbg_buffer, vha, 0x20ca,
+ entries, size);
+
+ /* Execute MS IOCB */
+ rval = qla2x00_issue_iocb(vha, ha->ms_iocb, ha->ms_iocb_dma,
+ sizeof(ms_iocb_entry_t));
+ if (rval != QLA_SUCCESS) {
+ /*EMPTY*/
+ ql_dbg(ql_dbg_disc, vha, 0x20cb,
+ "RPA FDMI v2 issue IOCB failed (%d).\n", rval);
+ } else if (qla2x00_chk_ms_status(vha, ms_pkt, ct_rsp, "RPA") !=
+ QLA_SUCCESS) {
+ rval = QLA_FUNCTION_FAILED;
+ if (ct_rsp->header.reason_code == CT_REASON_CANNOT_PERFORM &&
+ ct_rsp->header.explanation_code ==
+ CT_EXPL_ALREADY_REGISTERED) {
+ ql_dbg(ql_dbg_disc, vha, 0x20ce,
+ "RPA FDMI v2 already registered\n");
+ rval = QLA_ALREADY_REGISTERED;
+ } else {
+ ql_dbg(ql_dbg_disc, vha, 0x2020,
+ "RPA FDMI v2 failed, CT Reason code: 0x%x, CT Explanation 0x%x\n",
+ ct_rsp->header.reason_code,
+ ct_rsp->header.explanation_code);
+ }
+ } else {
+ ql_dbg(ql_dbg_disc, vha, 0x20cc,
+ "RPA FDMI V2 exiting normally.\n");
+ }
+
+ return rval;
+}
+
+/**
+ * qla2x00_fdmi_register() -
+ * @ha: HA context
+ *
+ * Returns 0 on success.
+ */
+int
+qla2x00_fdmi_register(scsi_qla_host_t *vha)
+{
+ int rval = QLA_FUNCTION_FAILED;
+ struct qla_hw_data *ha = vha->hw;
+
+ if (IS_QLA2100(ha) || IS_QLA2200(ha) ||
+ IS_QLAFX00(ha))
+ return QLA_FUNCTION_FAILED;
+
+ rval = qla2x00_mgmt_svr_login(vha);
+ if (rval)
+ return rval;
+
+ rval = qla2x00_fdmiv2_rhba(vha);
+ if (rval) {
+ if (rval != QLA_ALREADY_REGISTERED)
+ goto try_fdmi;
+
+ rval = qla2x00_fdmi_dhba(vha);
+ if (rval)
+ goto try_fdmi;
+
+ rval = qla2x00_fdmiv2_rhba(vha);
+ if (rval)
+ goto try_fdmi;
+ }
+ rval = qla2x00_fdmiv2_rpa(vha);
+ if (rval)
+ goto try_fdmi;
+
+ goto out;
+
+try_fdmi:
+ rval = qla2x00_fdmi_rhba(vha);
+ if (rval) {
+ if (rval != QLA_ALREADY_REGISTERED)
+ return rval;
+
+ rval = qla2x00_fdmi_dhba(vha);
+ if (rval)
+ return rval;
+
+ rval = qla2x00_fdmi_rhba(vha);
+ if (rval)
+ return rval;
+ }
+ rval = qla2x00_fdmi_rpa(vha);
+out:
+ return rval;
+}
+
+/**
+ * qla2x00_gfpn_id() - SNS Get Fabric Port Name (GFPN_ID) query.
+ * @ha: HA context
+ * @list: switch info entries to populate
+ *
+ * Returns 0 on success.
+ */
+int
+qla2x00_gfpn_id(scsi_qla_host_t *vha, sw_info_t *list)
+{
+ int rval = QLA_SUCCESS;
+ uint16_t i;
+ struct qla_hw_data *ha = vha->hw;
+ ms_iocb_entry_t *ms_pkt;
+ struct ct_sns_req *ct_req;
+ struct ct_sns_rsp *ct_rsp;
+
+ if (!IS_IIDMA_CAPABLE(ha))
+ return QLA_FUNCTION_FAILED;
+
+ for (i = 0; i < ha->max_fibre_devices; i++) {
+ /* Issue GFPN_ID */
+ /* Prepare common MS IOCB */
+ ms_pkt = ha->isp_ops->prep_ms_iocb(vha, GFPN_ID_REQ_SIZE,
+ GFPN_ID_RSP_SIZE);
+
+ /* Prepare CT request */
+ ct_req = qla2x00_prep_ct_req(ha->ct_sns, GFPN_ID_CMD,
+ GFPN_ID_RSP_SIZE);
+ ct_rsp = &ha->ct_sns->p.rsp;
+
+ /* Prepare CT arguments -- port_id */
+ ct_req->req.port_id.port_id[0] = list[i].d_id.b.domain;
+ ct_req->req.port_id.port_id[1] = list[i].d_id.b.area;
+ ct_req->req.port_id.port_id[2] = list[i].d_id.b.al_pa;
+
+ /* Execute MS IOCB */
+ rval = qla2x00_issue_iocb(vha, ha->ms_iocb, ha->ms_iocb_dma,
+ sizeof(ms_iocb_entry_t));
+ if (rval != QLA_SUCCESS) {
+ /*EMPTY*/
+ ql_dbg(ql_dbg_disc, vha, 0x2023,
+ "GFPN_ID issue IOCB failed (%d).\n", rval);
+ break;
+ } else if (qla2x00_chk_ms_status(vha, ms_pkt, ct_rsp,
+ "GFPN_ID") != QLA_SUCCESS) {
+ rval = QLA_FUNCTION_FAILED;
+ break;
+ } else {
+ /* Save fabric portname */
+ memcpy(list[i].fabric_port_name,
+ ct_rsp->rsp.gfpn_id.port_name, WWN_SIZE);
+ }
+
+ /* Last device exit. */
+ if (list[i].d_id.b.rsvd_1 != 0)
+ break;
+ }
+
+ return (rval);
+}
+
+static inline void *
+qla24xx_prep_ms_fm_iocb(scsi_qla_host_t *vha, uint32_t req_size,
+ uint32_t rsp_size)
+{
+ struct ct_entry_24xx *ct_pkt;
+ struct qla_hw_data *ha = vha->hw;
+ ct_pkt = (struct ct_entry_24xx *)ha->ms_iocb;
+ memset(ct_pkt, 0, sizeof(struct ct_entry_24xx));
+
+ ct_pkt->entry_type = CT_IOCB_TYPE;
+ ct_pkt->entry_count = 1;
+ ct_pkt->nport_handle = cpu_to_le16(vha->mgmt_svr_loop_id);
+ ct_pkt->timeout = cpu_to_le16(ha->r_a_tov / 10 * 2);
+ ct_pkt->cmd_dsd_count = __constant_cpu_to_le16(1);
+ ct_pkt->rsp_dsd_count = __constant_cpu_to_le16(1);
+ ct_pkt->rsp_byte_count = cpu_to_le32(rsp_size);
+ ct_pkt->cmd_byte_count = cpu_to_le32(req_size);
+
+ ct_pkt->dseg_0_address[0] = cpu_to_le32(LSD(ha->ct_sns_dma));
+ ct_pkt->dseg_0_address[1] = cpu_to_le32(MSD(ha->ct_sns_dma));
+ ct_pkt->dseg_0_len = ct_pkt->cmd_byte_count;
+
+ ct_pkt->dseg_1_address[0] = cpu_to_le32(LSD(ha->ct_sns_dma));
+ ct_pkt->dseg_1_address[1] = cpu_to_le32(MSD(ha->ct_sns_dma));
+ ct_pkt->dseg_1_len = ct_pkt->rsp_byte_count;
+ ct_pkt->vp_index = vha->vp_idx;
+
+ return ct_pkt;
+}
+
+
+static inline struct ct_sns_req *
+qla24xx_prep_ct_fm_req(struct ct_sns_pkt *p, uint16_t cmd,
+ uint16_t rsp_size)
+{
+ memset(p, 0, sizeof(struct ct_sns_pkt));
+
+ p->p.req.header.revision = 0x01;
+ p->p.req.header.gs_type = 0xFA;
+ p->p.req.header.gs_subtype = 0x01;
+ p->p.req.command = cpu_to_be16(cmd);
+ p->p.req.max_rsp_size = cpu_to_be16((rsp_size - 16) / 4);
+
+ return &p->p.req;
+}
+
+/**
+ * qla2x00_gpsc() - FCS Get Port Speed Capabilities (GPSC) query.
+ * @ha: HA context
+ * @list: switch info entries to populate
+ *
+ * Returns 0 on success.
+ */
+int
+qla2x00_gpsc(scsi_qla_host_t *vha, sw_info_t *list)
+{
+ int rval;
+ uint16_t i;
+ struct qla_hw_data *ha = vha->hw;
+ ms_iocb_entry_t *ms_pkt;
+ struct ct_sns_req *ct_req;
+ struct ct_sns_rsp *ct_rsp;
+
+ if (!IS_IIDMA_CAPABLE(ha))
+ return QLA_FUNCTION_FAILED;
+ if (!ha->flags.gpsc_supported)
+ return QLA_FUNCTION_FAILED;
+
+ rval = qla2x00_mgmt_svr_login(vha);
+ if (rval)
+ return rval;
+
+ for (i = 0; i < ha->max_fibre_devices; i++) {
+ /* Issue GFPN_ID */
+ /* Prepare common MS IOCB */
+ ms_pkt = qla24xx_prep_ms_fm_iocb(vha, GPSC_REQ_SIZE,
+ GPSC_RSP_SIZE);
+
+ /* Prepare CT request */
+ ct_req = qla24xx_prep_ct_fm_req(ha->ct_sns, GPSC_CMD,
+ GPSC_RSP_SIZE);
+ ct_rsp = &ha->ct_sns->p.rsp;
+
+ /* Prepare CT arguments -- port_name */
+ memcpy(ct_req->req.gpsc.port_name, list[i].fabric_port_name,
+ WWN_SIZE);
+
+ /* Execute MS IOCB */
+ rval = qla2x00_issue_iocb(vha, ha->ms_iocb, ha->ms_iocb_dma,
+ sizeof(ms_iocb_entry_t));
+ if (rval != QLA_SUCCESS) {
+ /*EMPTY*/
+ ql_dbg(ql_dbg_disc, vha, 0x2059,
+ "GPSC issue IOCB failed (%d).\n", rval);
+ } else if ((rval = qla2x00_chk_ms_status(vha, ms_pkt, ct_rsp,
+ "GPSC")) != QLA_SUCCESS) {
+ /* FM command unsupported? */
+ if (rval == QLA_INVALID_COMMAND &&
+ (ct_rsp->header.reason_code ==
+ CT_REASON_INVALID_COMMAND_CODE ||
+ ct_rsp->header.reason_code ==
+ CT_REASON_COMMAND_UNSUPPORTED)) {
+ ql_dbg(ql_dbg_disc, vha, 0x205a,
+ "GPSC command unsupported, disabling "
+ "query.\n");
+ ha->flags.gpsc_supported = 0;
+ rval = QLA_FUNCTION_FAILED;
+ break;
+ }
+ rval = QLA_FUNCTION_FAILED;
+ } else {
+ /* Save port-speed */
+ switch (be16_to_cpu(ct_rsp->rsp.gpsc.speed)) {
+ case BIT_15:
+ list[i].fp_speed = PORT_SPEED_1GB;
+ break;
+ case BIT_14:
+ list[i].fp_speed = PORT_SPEED_2GB;
+ break;
+ case BIT_13:
+ list[i].fp_speed = PORT_SPEED_4GB;
+ break;
+ case BIT_12:
+ list[i].fp_speed = PORT_SPEED_10GB;
+ break;
+ case BIT_11:
+ list[i].fp_speed = PORT_SPEED_8GB;
+ break;
+ case BIT_10:
+ list[i].fp_speed = PORT_SPEED_16GB;
+ break;
+ case BIT_8:
+ list[i].fp_speed = PORT_SPEED_32GB;
+ break;
+ }
+
+ ql_dbg(ql_dbg_disc, vha, 0x205b,
+ "GPSC ext entry - fpn "
+ "%8phN speeds=%04x speed=%04x.\n",
+ list[i].fabric_port_name,
+ be16_to_cpu(ct_rsp->rsp.gpsc.speeds),
+ be16_to_cpu(ct_rsp->rsp.gpsc.speed));
+ }
+
+ /* Last device exit. */
+ if (list[i].d_id.b.rsvd_1 != 0)
+ break;
+ }
+
+ return (rval);
+}
+
+/**
+ * qla2x00_gff_id() - SNS Get FC-4 Features (GFF_ID) query.
+ *
+ * @ha: HA context
+ * @list: switch info entries to populate
+ *
+ */
+void
+qla2x00_gff_id(scsi_qla_host_t *vha, sw_info_t *list)
+{
+ int rval;
+ uint16_t i;
+
+ ms_iocb_entry_t *ms_pkt;
+ struct ct_sns_req *ct_req;
+ struct ct_sns_rsp *ct_rsp;
+ struct qla_hw_data *ha = vha->hw;
+ uint8_t fcp_scsi_features = 0;
+
+ for (i = 0; i < ha->max_fibre_devices; i++) {
+ /* Set default FC4 Type as UNKNOWN so the default is to
+ * Process this port */
+ list[i].fc4_type = FC4_TYPE_UNKNOWN;
+
+ /* Do not attempt GFF_ID if we are not FWI_2 capable */
+ if (!IS_FWI2_CAPABLE(ha))
+ continue;
+
+ /* Prepare common MS IOCB */
+ ms_pkt = ha->isp_ops->prep_ms_iocb(vha, GFF_ID_REQ_SIZE,
+ GFF_ID_RSP_SIZE);
+
+ /* Prepare CT request */
+ ct_req = qla2x00_prep_ct_req(ha->ct_sns, GFF_ID_CMD,
+ GFF_ID_RSP_SIZE);
+ ct_rsp = &ha->ct_sns->p.rsp;
+
+ /* Prepare CT arguments -- port_id */
+ ct_req->req.port_id.port_id[0] = list[i].d_id.b.domain;
+ ct_req->req.port_id.port_id[1] = list[i].d_id.b.area;
+ ct_req->req.port_id.port_id[2] = list[i].d_id.b.al_pa;
+
+ /* Execute MS IOCB */
+ rval = qla2x00_issue_iocb(vha, ha->ms_iocb, ha->ms_iocb_dma,
+ sizeof(ms_iocb_entry_t));
+
+ if (rval != QLA_SUCCESS) {
+ ql_dbg(ql_dbg_disc, vha, 0x205c,
+ "GFF_ID issue IOCB failed (%d).\n", rval);
+ } else if (qla2x00_chk_ms_status(vha, ms_pkt, ct_rsp,
+ "GFF_ID") != QLA_SUCCESS) {
+ ql_dbg(ql_dbg_disc, vha, 0x205d,
+ "GFF_ID IOCB status had a failure status code.\n");
+ } else {
+ fcp_scsi_features =
+ ct_rsp->rsp.gff_id.fc4_features[GFF_FCP_SCSI_OFFSET];
+ fcp_scsi_features &= 0x0f;
+
+ if (fcp_scsi_features)
+ list[i].fc4_type = FC4_TYPE_FCP_SCSI;
+ else
+ list[i].fc4_type = FC4_TYPE_OTHER;
+ }
+
+ /* Last device exit. */
+ if (list[i].d_id.b.rsvd_1 != 0)
+ break;
+ }
+}
diff --git a/drivers/scsi/qla2xxx/qla_init.c b/drivers/scsi/qla2xxx/qla_init.c
new file mode 100644
index 000000000..285cb204f
--- /dev/null
+++ b/drivers/scsi/qla2xxx/qla_init.c
@@ -0,0 +1,6473 @@
+/*
+ * QLogic Fibre Channel HBA Driver
+ * Copyright (c) 2003-2014 QLogic Corporation
+ *
+ * See LICENSE.qla2xxx for copyright and licensing details.
+ */
+#include "qla_def.h"
+#include "qla_gbl.h"
+
+#include <linux/delay.h>
+#include <linux/slab.h>
+#include <linux/vmalloc.h>
+
+#include "qla_devtbl.h"
+
+#ifdef CONFIG_SPARC
+#include <asm/prom.h>
+#endif
+
+#include <target/target_core_base.h>
+#include "qla_target.h"
+
+/*
+* QLogic ISP2x00 Hardware Support Function Prototypes.
+*/
+static int qla2x00_isp_firmware(scsi_qla_host_t *);
+static int qla2x00_setup_chip(scsi_qla_host_t *);
+static int qla2x00_fw_ready(scsi_qla_host_t *);
+static int qla2x00_configure_hba(scsi_qla_host_t *);
+static int qla2x00_configure_loop(scsi_qla_host_t *);
+static int qla2x00_configure_local_loop(scsi_qla_host_t *);
+static int qla2x00_configure_fabric(scsi_qla_host_t *);
+static int qla2x00_find_all_fabric_devs(scsi_qla_host_t *, struct list_head *);
+static int qla2x00_fabric_dev_login(scsi_qla_host_t *, fc_port_t *,
+ uint16_t *);
+
+static int qla2x00_restart_isp(scsi_qla_host_t *);
+
+static struct qla_chip_state_84xx *qla84xx_get_chip(struct scsi_qla_host *);
+static int qla84xx_init_chip(scsi_qla_host_t *);
+static int qla25xx_init_queues(struct qla_hw_data *);
+
+/* SRB Extensions ---------------------------------------------------------- */
+
+void
+qla2x00_sp_timeout(unsigned long __data)
+{
+ srb_t *sp = (srb_t *)__data;
+ struct srb_iocb *iocb;
+ fc_port_t *fcport = sp->fcport;
+ struct qla_hw_data *ha = fcport->vha->hw;
+ struct req_que *req;
+ unsigned long flags;
+
+ spin_lock_irqsave(&ha->hardware_lock, flags);
+ req = ha->req_q_map[0];
+ req->outstanding_cmds[sp->handle] = NULL;
+ iocb = &sp->u.iocb_cmd;
+ iocb->timeout(sp);
+ sp->free(fcport->vha, sp);
+ spin_unlock_irqrestore(&ha->hardware_lock, flags);
+}
+
+void
+qla2x00_sp_free(void *data, void *ptr)
+{
+ srb_t *sp = (srb_t *)ptr;
+ struct srb_iocb *iocb = &sp->u.iocb_cmd;
+ struct scsi_qla_host *vha = (scsi_qla_host_t *)data;
+
+ del_timer(&iocb->timer);
+ qla2x00_rel_sp(vha, sp);
+}
+
+/* Asynchronous Login/Logout Routines -------------------------------------- */
+
+unsigned long
+qla2x00_get_async_timeout(struct scsi_qla_host *vha)
+{
+ unsigned long tmo;
+ struct qla_hw_data *ha = vha->hw;
+
+ /* Firmware should use switch negotiated r_a_tov for timeout. */
+ tmo = ha->r_a_tov / 10 * 2;
+ if (IS_QLAFX00(ha)) {
+ tmo = FX00_DEF_RATOV * 2;
+ } else if (!IS_FWI2_CAPABLE(ha)) {
+ /*
+ * Except for earlier ISPs where the timeout is seeded from the
+ * initialization control block.
+ */
+ tmo = ha->login_timeout;
+ }
+ return tmo;
+}
+
+static void
+qla2x00_async_iocb_timeout(void *data)
+{
+ srb_t *sp = (srb_t *)data;
+ fc_port_t *fcport = sp->fcport;
+
+ ql_dbg(ql_dbg_disc, fcport->vha, 0x2071,
+ "Async-%s timeout - hdl=%x portid=%02x%02x%02x.\n",
+ sp->name, sp->handle, fcport->d_id.b.domain, fcport->d_id.b.area,
+ fcport->d_id.b.al_pa);
+
+ fcport->flags &= ~FCF_ASYNC_SENT;
+ if (sp->type == SRB_LOGIN_CMD) {
+ struct srb_iocb *lio = &sp->u.iocb_cmd;
+ qla2x00_post_async_logout_work(fcport->vha, fcport, NULL);
+ /* Retry as needed. */
+ lio->u.logio.data[0] = MBS_COMMAND_ERROR;
+ lio->u.logio.data[1] = lio->u.logio.flags & SRB_LOGIN_RETRIED ?
+ QLA_LOGIO_LOGIN_RETRIED : 0;
+ qla2x00_post_async_login_done_work(fcport->vha, fcport,
+ lio->u.logio.data);
+ }
+}
+
+static void
+qla2x00_async_login_sp_done(void *data, void *ptr, int res)
+{
+ srb_t *sp = (srb_t *)ptr;
+ struct srb_iocb *lio = &sp->u.iocb_cmd;
+ struct scsi_qla_host *vha = (scsi_qla_host_t *)data;
+
+ if (!test_bit(UNLOADING, &vha->dpc_flags))
+ qla2x00_post_async_login_done_work(sp->fcport->vha, sp->fcport,
+ lio->u.logio.data);
+ sp->free(sp->fcport->vha, sp);
+}
+
+int
+qla2x00_async_login(struct scsi_qla_host *vha, fc_port_t *fcport,
+ uint16_t *data)
+{
+ srb_t *sp;
+ struct srb_iocb *lio;
+ int rval;
+
+ rval = QLA_FUNCTION_FAILED;
+ sp = qla2x00_get_sp(vha, fcport, GFP_KERNEL);
+ if (!sp)
+ goto done;
+
+ sp->type = SRB_LOGIN_CMD;
+ sp->name = "login";
+ qla2x00_init_timer(sp, qla2x00_get_async_timeout(vha) + 2);
+
+ lio = &sp->u.iocb_cmd;
+ lio->timeout = qla2x00_async_iocb_timeout;
+ sp->done = qla2x00_async_login_sp_done;
+ lio->u.logio.flags |= SRB_LOGIN_COND_PLOGI;
+ if (data[1] & QLA_LOGIO_LOGIN_RETRIED)
+ lio->u.logio.flags |= SRB_LOGIN_RETRIED;
+ rval = qla2x00_start_sp(sp);
+ if (rval != QLA_SUCCESS)
+ goto done_free_sp;
+
+ ql_dbg(ql_dbg_disc, vha, 0x2072,
+ "Async-login - hdl=%x, loopid=%x portid=%02x%02x%02x "
+ "retries=%d.\n", sp->handle, fcport->loop_id,
+ fcport->d_id.b.domain, fcport->d_id.b.area, fcport->d_id.b.al_pa,
+ fcport->login_retry);
+ return rval;
+
+done_free_sp:
+ sp->free(fcport->vha, sp);
+done:
+ return rval;
+}
+
+static void
+qla2x00_async_logout_sp_done(void *data, void *ptr, int res)
+{
+ srb_t *sp = (srb_t *)ptr;
+ struct srb_iocb *lio = &sp->u.iocb_cmd;
+ struct scsi_qla_host *vha = (scsi_qla_host_t *)data;
+
+ if (!test_bit(UNLOADING, &vha->dpc_flags))
+ qla2x00_post_async_logout_done_work(sp->fcport->vha, sp->fcport,
+ lio->u.logio.data);
+ sp->free(sp->fcport->vha, sp);
+}
+
+int
+qla2x00_async_logout(struct scsi_qla_host *vha, fc_port_t *fcport)
+{
+ srb_t *sp;
+ struct srb_iocb *lio;
+ int rval;
+
+ rval = QLA_FUNCTION_FAILED;
+ sp = qla2x00_get_sp(vha, fcport, GFP_KERNEL);
+ if (!sp)
+ goto done;
+
+ sp->type = SRB_LOGOUT_CMD;
+ sp->name = "logout";
+ qla2x00_init_timer(sp, qla2x00_get_async_timeout(vha) + 2);
+
+ lio = &sp->u.iocb_cmd;
+ lio->timeout = qla2x00_async_iocb_timeout;
+ sp->done = qla2x00_async_logout_sp_done;
+ rval = qla2x00_start_sp(sp);
+ if (rval != QLA_SUCCESS)
+ goto done_free_sp;
+
+ ql_dbg(ql_dbg_disc, vha, 0x2070,
+ "Async-logout - hdl=%x loop-id=%x portid=%02x%02x%02x.\n",
+ sp->handle, fcport->loop_id, fcport->d_id.b.domain,
+ fcport->d_id.b.area, fcport->d_id.b.al_pa);
+ return rval;
+
+done_free_sp:
+ sp->free(fcport->vha, sp);
+done:
+ return rval;
+}
+
+static void
+qla2x00_async_adisc_sp_done(void *data, void *ptr, int res)
+{
+ srb_t *sp = (srb_t *)ptr;
+ struct srb_iocb *lio = &sp->u.iocb_cmd;
+ struct scsi_qla_host *vha = (scsi_qla_host_t *)data;
+
+ if (!test_bit(UNLOADING, &vha->dpc_flags))
+ qla2x00_post_async_adisc_done_work(sp->fcport->vha, sp->fcport,
+ lio->u.logio.data);
+ sp->free(sp->fcport->vha, sp);
+}
+
+int
+qla2x00_async_adisc(struct scsi_qla_host *vha, fc_port_t *fcport,
+ uint16_t *data)
+{
+ srb_t *sp;
+ struct srb_iocb *lio;
+ int rval;
+
+ rval = QLA_FUNCTION_FAILED;
+ sp = qla2x00_get_sp(vha, fcport, GFP_KERNEL);
+ if (!sp)
+ goto done;
+
+ sp->type = SRB_ADISC_CMD;
+ sp->name = "adisc";
+ qla2x00_init_timer(sp, qla2x00_get_async_timeout(vha) + 2);
+
+ lio = &sp->u.iocb_cmd;
+ lio->timeout = qla2x00_async_iocb_timeout;
+ sp->done = qla2x00_async_adisc_sp_done;
+ if (data[1] & QLA_LOGIO_LOGIN_RETRIED)
+ lio->u.logio.flags |= SRB_LOGIN_RETRIED;
+ rval = qla2x00_start_sp(sp);
+ if (rval != QLA_SUCCESS)
+ goto done_free_sp;
+
+ ql_dbg(ql_dbg_disc, vha, 0x206f,
+ "Async-adisc - hdl=%x loopid=%x portid=%02x%02x%02x.\n",
+ sp->handle, fcport->loop_id, fcport->d_id.b.domain,
+ fcport->d_id.b.area, fcport->d_id.b.al_pa);
+ return rval;
+
+done_free_sp:
+ sp->free(fcport->vha, sp);
+done:
+ return rval;
+}
+
+static void
+qla2x00_tmf_iocb_timeout(void *data)
+{
+ srb_t *sp = (srb_t *)data;
+ struct srb_iocb *tmf = &sp->u.iocb_cmd;
+
+ tmf->u.tmf.comp_status = CS_TIMEOUT;
+ complete(&tmf->u.tmf.comp);
+}
+
+static void
+qla2x00_tmf_sp_done(void *data, void *ptr, int res)
+{
+ srb_t *sp = (srb_t *)ptr;
+ struct srb_iocb *tmf = &sp->u.iocb_cmd;
+ complete(&tmf->u.tmf.comp);
+}
+
+int
+qla2x00_async_tm_cmd(fc_port_t *fcport, uint32_t flags, uint32_t lun,
+ uint32_t tag)
+{
+ struct scsi_qla_host *vha = fcport->vha;
+ struct srb_iocb *tm_iocb;
+ srb_t *sp;
+ int rval = QLA_FUNCTION_FAILED;
+
+ sp = qla2x00_get_sp(vha, fcport, GFP_KERNEL);
+ if (!sp)
+ goto done;
+
+ tm_iocb = &sp->u.iocb_cmd;
+ sp->type = SRB_TM_CMD;
+ sp->name = "tmf";
+ qla2x00_init_timer(sp, qla2x00_get_async_timeout(vha));
+ tm_iocb->u.tmf.flags = flags;
+ tm_iocb->u.tmf.lun = lun;
+ tm_iocb->u.tmf.data = tag;
+ sp->done = qla2x00_tmf_sp_done;
+ tm_iocb->timeout = qla2x00_tmf_iocb_timeout;
+ init_completion(&tm_iocb->u.tmf.comp);
+
+ rval = qla2x00_start_sp(sp);
+ if (rval != QLA_SUCCESS)
+ goto done_free_sp;
+
+ ql_dbg(ql_dbg_taskm, vha, 0x802f,
+ "Async-tmf hdl=%x loop-id=%x portid=%02x%02x%02x.\n",
+ sp->handle, fcport->loop_id, fcport->d_id.b.domain,
+ fcport->d_id.b.area, fcport->d_id.b.al_pa);
+
+ wait_for_completion(&tm_iocb->u.tmf.comp);
+
+ rval = tm_iocb->u.tmf.comp_status == CS_COMPLETE ?
+ QLA_SUCCESS : QLA_FUNCTION_FAILED;
+
+ if ((rval != QLA_SUCCESS) || tm_iocb->u.tmf.data) {
+ ql_dbg(ql_dbg_taskm, vha, 0x8030,
+ "TM IOCB failed (%x).\n", rval);
+ }
+
+ if (!test_bit(UNLOADING, &vha->dpc_flags) && !IS_QLAFX00(vha->hw)) {
+ flags = tm_iocb->u.tmf.flags;
+ lun = (uint16_t)tm_iocb->u.tmf.lun;
+
+ /* Issue Marker IOCB */
+ qla2x00_marker(vha, vha->hw->req_q_map[0],
+ vha->hw->rsp_q_map[0], sp->fcport->loop_id, lun,
+ flags == TCF_LUN_RESET ? MK_SYNC_ID_LUN : MK_SYNC_ID);
+ }
+
+done_free_sp:
+ sp->free(vha, sp);
+done:
+ return rval;
+}
+
+static void
+qla24xx_abort_iocb_timeout(void *data)
+{
+ srb_t *sp = (srb_t *)data;
+ struct srb_iocb *abt = &sp->u.iocb_cmd;
+
+ abt->u.abt.comp_status = CS_TIMEOUT;
+ complete(&abt->u.abt.comp);
+}
+
+static void
+qla24xx_abort_sp_done(void *data, void *ptr, int res)
+{
+ srb_t *sp = (srb_t *)ptr;
+ struct srb_iocb *abt = &sp->u.iocb_cmd;
+
+ complete(&abt->u.abt.comp);
+}
+
+static int
+qla24xx_async_abort_cmd(srb_t *cmd_sp)
+{
+ scsi_qla_host_t *vha = cmd_sp->fcport->vha;
+ fc_port_t *fcport = cmd_sp->fcport;
+ struct srb_iocb *abt_iocb;
+ srb_t *sp;
+ int rval = QLA_FUNCTION_FAILED;
+
+ sp = qla2x00_get_sp(vha, fcport, GFP_KERNEL);
+ if (!sp)
+ goto done;
+
+ abt_iocb = &sp->u.iocb_cmd;
+ sp->type = SRB_ABT_CMD;
+ sp->name = "abort";
+ qla2x00_init_timer(sp, qla2x00_get_async_timeout(vha));
+ abt_iocb->u.abt.cmd_hndl = cmd_sp->handle;
+ sp->done = qla24xx_abort_sp_done;
+ abt_iocb->timeout = qla24xx_abort_iocb_timeout;
+ init_completion(&abt_iocb->u.abt.comp);
+
+ rval = qla2x00_start_sp(sp);
+ if (rval != QLA_SUCCESS)
+ goto done_free_sp;
+
+ ql_dbg(ql_dbg_async, vha, 0x507c,
+ "Abort command issued - hdl=%x, target_id=%x\n",
+ cmd_sp->handle, fcport->tgt_id);
+
+ wait_for_completion(&abt_iocb->u.abt.comp);
+
+ rval = abt_iocb->u.abt.comp_status == CS_COMPLETE ?
+ QLA_SUCCESS : QLA_FUNCTION_FAILED;
+
+done_free_sp:
+ sp->free(vha, sp);
+done:
+ return rval;
+}
+
+int
+qla24xx_async_abort_command(srb_t *sp)
+{
+ unsigned long flags = 0;
+
+ uint32_t handle;
+ fc_port_t *fcport = sp->fcport;
+ struct scsi_qla_host *vha = fcport->vha;
+ struct qla_hw_data *ha = vha->hw;
+ struct req_que *req = vha->req;
+
+ spin_lock_irqsave(&ha->hardware_lock, flags);
+ for (handle = 1; handle < req->num_outstanding_cmds; handle++) {
+ if (req->outstanding_cmds[handle] == sp)
+ break;
+ }
+ spin_unlock_irqrestore(&ha->hardware_lock, flags);
+ if (handle == req->num_outstanding_cmds) {
+ /* Command not found. */
+ return QLA_FUNCTION_FAILED;
+ }
+ if (sp->type == SRB_FXIOCB_DCMD)
+ return qlafx00_fx_disc(vha, &vha->hw->mr.fcport,
+ FXDISC_ABORT_IOCTL);
+
+ return qla24xx_async_abort_cmd(sp);
+}
+
+void
+qla2x00_async_login_done(struct scsi_qla_host *vha, fc_port_t *fcport,
+ uint16_t *data)
+{
+ int rval;
+
+ switch (data[0]) {
+ case MBS_COMMAND_COMPLETE:
+ /*
+ * Driver must validate login state - If PRLI not complete,
+ * force a relogin attempt via implicit LOGO, PLOGI, and PRLI
+ * requests.
+ */
+ rval = qla2x00_get_port_database(vha, fcport, 0);
+ if (rval == QLA_NOT_LOGGED_IN) {
+ fcport->flags &= ~FCF_ASYNC_SENT;
+ fcport->flags |= FCF_LOGIN_NEEDED;
+ set_bit(RELOGIN_NEEDED, &vha->dpc_flags);
+ break;
+ }
+
+ if (rval != QLA_SUCCESS) {
+ qla2x00_post_async_logout_work(vha, fcport, NULL);
+ qla2x00_post_async_login_work(vha, fcport, NULL);
+ break;
+ }
+ if (fcport->flags & FCF_FCP2_DEVICE) {
+ qla2x00_post_async_adisc_work(vha, fcport, data);
+ break;
+ }
+ qla2x00_update_fcport(vha, fcport);
+ break;
+ case MBS_COMMAND_ERROR:
+ fcport->flags &= ~FCF_ASYNC_SENT;
+ if (data[1] & QLA_LOGIO_LOGIN_RETRIED)
+ set_bit(RELOGIN_NEEDED, &vha->dpc_flags);
+ else
+ qla2x00_mark_device_lost(vha, fcport, 1, 0);
+ break;
+ case MBS_PORT_ID_USED:
+ fcport->loop_id = data[1];
+ qla2x00_post_async_logout_work(vha, fcport, NULL);
+ qla2x00_post_async_login_work(vha, fcport, NULL);
+ break;
+ case MBS_LOOP_ID_USED:
+ fcport->loop_id++;
+ rval = qla2x00_find_new_loop_id(vha, fcport);
+ if (rval != QLA_SUCCESS) {
+ fcport->flags &= ~FCF_ASYNC_SENT;
+ qla2x00_mark_device_lost(vha, fcport, 1, 0);
+ break;
+ }
+ qla2x00_post_async_login_work(vha, fcport, NULL);
+ break;
+ }
+ return;
+}
+
+void
+qla2x00_async_logout_done(struct scsi_qla_host *vha, fc_port_t *fcport,
+ uint16_t *data)
+{
+ qla2x00_mark_device_lost(vha, fcport, 1, 0);
+ return;
+}
+
+void
+qla2x00_async_adisc_done(struct scsi_qla_host *vha, fc_port_t *fcport,
+ uint16_t *data)
+{
+ if (data[0] == MBS_COMMAND_COMPLETE) {
+ qla2x00_update_fcport(vha, fcport);
+
+ return;
+ }
+
+ /* Retry login. */
+ fcport->flags &= ~FCF_ASYNC_SENT;
+ if (data[1] & QLA_LOGIO_LOGIN_RETRIED)
+ set_bit(RELOGIN_NEEDED, &vha->dpc_flags);
+ else
+ qla2x00_mark_device_lost(vha, fcport, 1, 0);
+
+ return;
+}
+
+/****************************************************************************/
+/* QLogic ISP2x00 Hardware Support Functions. */
+/****************************************************************************/
+
+static int
+qla83xx_nic_core_fw_load(scsi_qla_host_t *vha)
+{
+ int rval = QLA_SUCCESS;
+ struct qla_hw_data *ha = vha->hw;
+ uint32_t idc_major_ver, idc_minor_ver;
+ uint16_t config[4];
+
+ qla83xx_idc_lock(vha, 0);
+
+ /* SV: TODO: Assign initialization timeout from
+ * flash-info / other param
+ */
+ ha->fcoe_dev_init_timeout = QLA83XX_IDC_INITIALIZATION_TIMEOUT;
+ ha->fcoe_reset_timeout = QLA83XX_IDC_RESET_ACK_TIMEOUT;
+
+ /* Set our fcoe function presence */
+ if (__qla83xx_set_drv_presence(vha) != QLA_SUCCESS) {
+ ql_dbg(ql_dbg_p3p, vha, 0xb077,
+ "Error while setting DRV-Presence.\n");
+ rval = QLA_FUNCTION_FAILED;
+ goto exit;
+ }
+
+ /* Decide the reset ownership */
+ qla83xx_reset_ownership(vha);
+
+ /*
+ * On first protocol driver load:
+ * Init-Owner: Set IDC-Major-Version and Clear IDC-Lock-Recovery
+ * register.
+ * Others: Check compatibility with current IDC Major version.
+ */
+ qla83xx_rd_reg(vha, QLA83XX_IDC_MAJOR_VERSION, &idc_major_ver);
+ if (ha->flags.nic_core_reset_owner) {
+ /* Set IDC Major version */
+ idc_major_ver = QLA83XX_SUPP_IDC_MAJOR_VERSION;
+ qla83xx_wr_reg(vha, QLA83XX_IDC_MAJOR_VERSION, idc_major_ver);
+
+ /* Clearing IDC-Lock-Recovery register */
+ qla83xx_wr_reg(vha, QLA83XX_IDC_LOCK_RECOVERY, 0);
+ } else if (idc_major_ver != QLA83XX_SUPP_IDC_MAJOR_VERSION) {
+ /*
+ * Clear further IDC participation if we are not compatible with
+ * the current IDC Major Version.
+ */
+ ql_log(ql_log_warn, vha, 0xb07d,
+ "Failing load, idc_major_ver=%d, expected_major_ver=%d.\n",
+ idc_major_ver, QLA83XX_SUPP_IDC_MAJOR_VERSION);
+ __qla83xx_clear_drv_presence(vha);
+ rval = QLA_FUNCTION_FAILED;
+ goto exit;
+ }
+ /* Each function sets its supported Minor version. */
+ qla83xx_rd_reg(vha, QLA83XX_IDC_MINOR_VERSION, &idc_minor_ver);
+ idc_minor_ver |= (QLA83XX_SUPP_IDC_MINOR_VERSION << (ha->portnum * 2));
+ qla83xx_wr_reg(vha, QLA83XX_IDC_MINOR_VERSION, idc_minor_ver);
+
+ if (ha->flags.nic_core_reset_owner) {
+ memset(config, 0, sizeof(config));
+ if (!qla81xx_get_port_config(vha, config))
+ qla83xx_wr_reg(vha, QLA83XX_IDC_DEV_STATE,
+ QLA8XXX_DEV_READY);
+ }
+
+ rval = qla83xx_idc_state_handler(vha);
+
+exit:
+ qla83xx_idc_unlock(vha, 0);
+
+ return rval;
+}
+
+/*
+* qla2x00_initialize_adapter
+* Initialize board.
+*
+* Input:
+* ha = adapter block pointer.
+*
+* Returns:
+* 0 = success
+*/
+int
+qla2x00_initialize_adapter(scsi_qla_host_t *vha)
+{
+ int rval;
+ struct qla_hw_data *ha = vha->hw;
+ struct req_que *req = ha->req_q_map[0];
+
+ /* Clear adapter flags. */
+ vha->flags.online = 0;
+ ha->flags.chip_reset_done = 0;
+ vha->flags.reset_active = 0;
+ ha->flags.pci_channel_io_perm_failure = 0;
+ ha->flags.eeh_busy = 0;
+ vha->qla_stats.jiffies_at_last_reset = get_jiffies_64();
+ atomic_set(&vha->loop_down_timer, LOOP_DOWN_TIME);
+ atomic_set(&vha->loop_state, LOOP_DOWN);
+ vha->device_flags = DFLG_NO_CABLE;
+ vha->dpc_flags = 0;
+ vha->flags.management_server_logged_in = 0;
+ vha->marker_needed = 0;
+ ha->isp_abort_cnt = 0;
+ ha->beacon_blink_led = 0;
+
+ set_bit(0, ha->req_qid_map);
+ set_bit(0, ha->rsp_qid_map);
+
+ ql_dbg(ql_dbg_init, vha, 0x0040,
+ "Configuring PCI space...\n");
+ rval = ha->isp_ops->pci_config(vha);
+ if (rval) {
+ ql_log(ql_log_warn, vha, 0x0044,
+ "Unable to configure PCI space.\n");
+ return (rval);
+ }
+
+ ha->isp_ops->reset_chip(vha);
+
+ rval = qla2xxx_get_flash_info(vha);
+ if (rval) {
+ ql_log(ql_log_fatal, vha, 0x004f,
+ "Unable to validate FLASH data.\n");
+ return rval;
+ }
+
+ if (IS_QLA8044(ha)) {
+ qla8044_read_reset_template(vha);
+
+ /* NOTE: If ql2xdontresethba==1, set IDC_CTRL DONTRESET_BIT0.
+ * If DONRESET_BIT0 is set, drivers should not set dev_state
+ * to NEED_RESET. But if NEED_RESET is set, drivers should
+ * should honor the reset. */
+ if (ql2xdontresethba == 1)
+ qla8044_set_idc_dontreset(vha);
+ }
+
+ ha->isp_ops->get_flash_version(vha, req->ring);
+ ql_dbg(ql_dbg_init, vha, 0x0061,
+ "Configure NVRAM parameters...\n");
+
+ ha->isp_ops->nvram_config(vha);
+
+ if (ha->flags.disable_serdes) {
+ /* Mask HBA via NVRAM settings? */
+ ql_log(ql_log_info, vha, 0x0077,
+ "Masking HBA WWPN %8phN (via NVRAM).\n", vha->port_name);
+ return QLA_FUNCTION_FAILED;
+ }
+
+ ql_dbg(ql_dbg_init, vha, 0x0078,
+ "Verifying loaded RISC code...\n");
+
+ if (qla2x00_isp_firmware(vha) != QLA_SUCCESS) {
+ rval = ha->isp_ops->chip_diag(vha);
+ if (rval)
+ return (rval);
+ rval = qla2x00_setup_chip(vha);
+ if (rval)
+ return (rval);
+ }
+
+ if (IS_QLA84XX(ha)) {
+ ha->cs84xx = qla84xx_get_chip(vha);
+ if (!ha->cs84xx) {
+ ql_log(ql_log_warn, vha, 0x00d0,
+ "Unable to configure ISP84XX.\n");
+ return QLA_FUNCTION_FAILED;
+ }
+ }
+
+ if (qla_ini_mode_enabled(vha))
+ rval = qla2x00_init_rings(vha);
+
+ ha->flags.chip_reset_done = 1;
+
+ if (rval == QLA_SUCCESS && IS_QLA84XX(ha)) {
+ /* Issue verify 84xx FW IOCB to complete 84xx initialization */
+ rval = qla84xx_init_chip(vha);
+ if (rval != QLA_SUCCESS) {
+ ql_log(ql_log_warn, vha, 0x00d4,
+ "Unable to initialize ISP84XX.\n");
+ qla84xx_put_chip(vha);
+ }
+ }
+
+ /* Load the NIC Core f/w if we are the first protocol driver. */
+ if (IS_QLA8031(ha)) {
+ rval = qla83xx_nic_core_fw_load(vha);
+ if (rval)
+ ql_log(ql_log_warn, vha, 0x0124,
+ "Error in initializing NIC Core f/w.\n");
+ }
+
+ if (IS_QLA24XX_TYPE(ha) || IS_QLA25XX(ha))
+ qla24xx_read_fcp_prio_cfg(vha);
+
+ if (IS_P3P_TYPE(ha))
+ qla82xx_set_driver_version(vha, QLA2XXX_VERSION);
+ else
+ qla25xx_set_driver_version(vha, QLA2XXX_VERSION);
+
+ return (rval);
+}
+
+/**
+ * qla2100_pci_config() - Setup ISP21xx PCI configuration registers.
+ * @ha: HA context
+ *
+ * Returns 0 on success.
+ */
+int
+qla2100_pci_config(scsi_qla_host_t *vha)
+{
+ uint16_t w;
+ unsigned long flags;
+ struct qla_hw_data *ha = vha->hw;
+ struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
+
+ pci_set_master(ha->pdev);
+ pci_try_set_mwi(ha->pdev);
+
+ pci_read_config_word(ha->pdev, PCI_COMMAND, &w);
+ w |= (PCI_COMMAND_PARITY | PCI_COMMAND_SERR);
+ pci_write_config_word(ha->pdev, PCI_COMMAND, w);
+
+ pci_disable_rom(ha->pdev);
+
+ /* Get PCI bus information. */
+ spin_lock_irqsave(&ha->hardware_lock, flags);
+ ha->pci_attr = RD_REG_WORD(&reg->ctrl_status);
+ spin_unlock_irqrestore(&ha->hardware_lock, flags);
+
+ return QLA_SUCCESS;
+}
+
+/**
+ * qla2300_pci_config() - Setup ISP23xx PCI configuration registers.
+ * @ha: HA context
+ *
+ * Returns 0 on success.
+ */
+int
+qla2300_pci_config(scsi_qla_host_t *vha)
+{
+ uint16_t w;
+ unsigned long flags = 0;
+ uint32_t cnt;
+ struct qla_hw_data *ha = vha->hw;
+ struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
+
+ pci_set_master(ha->pdev);
+ pci_try_set_mwi(ha->pdev);
+
+ pci_read_config_word(ha->pdev, PCI_COMMAND, &w);
+ w |= (PCI_COMMAND_PARITY | PCI_COMMAND_SERR);
+
+ if (IS_QLA2322(ha) || IS_QLA6322(ha))
+ w &= ~PCI_COMMAND_INTX_DISABLE;
+ pci_write_config_word(ha->pdev, PCI_COMMAND, w);
+
+ /*
+ * If this is a 2300 card and not 2312, reset the
+ * COMMAND_INVALIDATE due to a bug in the 2300. Unfortunately,
+ * the 2310 also reports itself as a 2300 so we need to get the
+ * fb revision level -- a 6 indicates it really is a 2300 and
+ * not a 2310.
+ */
+ if (IS_QLA2300(ha)) {
+ spin_lock_irqsave(&ha->hardware_lock, flags);
+
+ /* Pause RISC. */
+ WRT_REG_WORD(&reg->hccr, HCCR_PAUSE_RISC);
+ for (cnt = 0; cnt < 30000; cnt++) {
+ if ((RD_REG_WORD(&reg->hccr) & HCCR_RISC_PAUSE) != 0)
+ break;
+
+ udelay(10);
+ }
+
+ /* Select FPM registers. */
+ WRT_REG_WORD(&reg->ctrl_status, 0x20);
+ RD_REG_WORD(&reg->ctrl_status);
+
+ /* Get the fb rev level */
+ ha->fb_rev = RD_FB_CMD_REG(ha, reg);
+
+ if (ha->fb_rev == FPM_2300)
+ pci_clear_mwi(ha->pdev);
+
+ /* Deselect FPM registers. */
+ WRT_REG_WORD(&reg->ctrl_status, 0x0);
+ RD_REG_WORD(&reg->ctrl_status);
+
+ /* Release RISC module. */
+ WRT_REG_WORD(&reg->hccr, HCCR_RELEASE_RISC);
+ for (cnt = 0; cnt < 30000; cnt++) {
+ if ((RD_REG_WORD(&reg->hccr) & HCCR_RISC_PAUSE) == 0)
+ break;
+
+ udelay(10);
+ }
+
+ spin_unlock_irqrestore(&ha->hardware_lock, flags);
+ }
+
+ pci_write_config_byte(ha->pdev, PCI_LATENCY_TIMER, 0x80);
+
+ pci_disable_rom(ha->pdev);
+
+ /* Get PCI bus information. */
+ spin_lock_irqsave(&ha->hardware_lock, flags);
+ ha->pci_attr = RD_REG_WORD(&reg->ctrl_status);
+ spin_unlock_irqrestore(&ha->hardware_lock, flags);
+
+ return QLA_SUCCESS;
+}
+
+/**
+ * qla24xx_pci_config() - Setup ISP24xx PCI configuration registers.
+ * @ha: HA context
+ *
+ * Returns 0 on success.
+ */
+int
+qla24xx_pci_config(scsi_qla_host_t *vha)
+{
+ uint16_t w;
+ unsigned long flags = 0;
+ struct qla_hw_data *ha = vha->hw;
+ struct device_reg_24xx __iomem *reg = &ha->iobase->isp24;
+
+ pci_set_master(ha->pdev);
+ pci_try_set_mwi(ha->pdev);
+
+ pci_read_config_word(ha->pdev, PCI_COMMAND, &w);
+ w |= (PCI_COMMAND_PARITY | PCI_COMMAND_SERR);
+ w &= ~PCI_COMMAND_INTX_DISABLE;
+ pci_write_config_word(ha->pdev, PCI_COMMAND, w);
+
+ pci_write_config_byte(ha->pdev, PCI_LATENCY_TIMER, 0x80);
+
+ /* PCI-X -- adjust Maximum Memory Read Byte Count (2048). */
+ if (pci_find_capability(ha->pdev, PCI_CAP_ID_PCIX))
+ pcix_set_mmrbc(ha->pdev, 2048);
+
+ /* PCIe -- adjust Maximum Read Request Size (2048). */
+ if (pci_is_pcie(ha->pdev))
+ pcie_set_readrq(ha->pdev, 4096);
+
+ pci_disable_rom(ha->pdev);
+
+ ha->chip_revision = ha->pdev->revision;
+
+ /* Get PCI bus information. */
+ spin_lock_irqsave(&ha->hardware_lock, flags);
+ ha->pci_attr = RD_REG_DWORD(&reg->ctrl_status);
+ spin_unlock_irqrestore(&ha->hardware_lock, flags);
+
+ return QLA_SUCCESS;
+}
+
+/**
+ * qla25xx_pci_config() - Setup ISP25xx PCI configuration registers.
+ * @ha: HA context
+ *
+ * Returns 0 on success.
+ */
+int
+qla25xx_pci_config(scsi_qla_host_t *vha)
+{
+ uint16_t w;
+ struct qla_hw_data *ha = vha->hw;
+
+ pci_set_master(ha->pdev);
+ pci_try_set_mwi(ha->pdev);
+
+ pci_read_config_word(ha->pdev, PCI_COMMAND, &w);
+ w |= (PCI_COMMAND_PARITY | PCI_COMMAND_SERR);
+ w &= ~PCI_COMMAND_INTX_DISABLE;
+ pci_write_config_word(ha->pdev, PCI_COMMAND, w);
+
+ /* PCIe -- adjust Maximum Read Request Size (2048). */
+ if (pci_is_pcie(ha->pdev))
+ pcie_set_readrq(ha->pdev, 4096);
+
+ pci_disable_rom(ha->pdev);
+
+ ha->chip_revision = ha->pdev->revision;
+
+ return QLA_SUCCESS;
+}
+
+/**
+ * qla2x00_isp_firmware() - Choose firmware image.
+ * @ha: HA context
+ *
+ * Returns 0 on success.
+ */
+static int
+qla2x00_isp_firmware(scsi_qla_host_t *vha)
+{
+ int rval;
+ uint16_t loop_id, topo, sw_cap;
+ uint8_t domain, area, al_pa;
+ struct qla_hw_data *ha = vha->hw;
+
+ /* Assume loading risc code */
+ rval = QLA_FUNCTION_FAILED;
+
+ if (ha->flags.disable_risc_code_load) {
+ ql_log(ql_log_info, vha, 0x0079, "RISC CODE NOT loaded.\n");
+
+ /* Verify checksum of loaded RISC code. */
+ rval = qla2x00_verify_checksum(vha, ha->fw_srisc_address);
+ if (rval == QLA_SUCCESS) {
+ /* And, verify we are not in ROM code. */
+ rval = qla2x00_get_adapter_id(vha, &loop_id, &al_pa,
+ &area, &domain, &topo, &sw_cap);
+ }
+ }
+
+ if (rval)
+ ql_dbg(ql_dbg_init, vha, 0x007a,
+ "**** Load RISC code ****.\n");
+
+ return (rval);
+}
+
+/**
+ * qla2x00_reset_chip() - Reset ISP chip.
+ * @ha: HA context
+ *
+ * Returns 0 on success.
+ */
+void
+qla2x00_reset_chip(scsi_qla_host_t *vha)
+{
+ unsigned long flags = 0;
+ struct qla_hw_data *ha = vha->hw;
+ struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
+ uint32_t cnt;
+ uint16_t cmd;
+
+ if (unlikely(pci_channel_offline(ha->pdev)))
+ return;
+
+ ha->isp_ops->disable_intrs(ha);
+
+ spin_lock_irqsave(&ha->hardware_lock, flags);
+
+ /* Turn off master enable */
+ cmd = 0;
+ pci_read_config_word(ha->pdev, PCI_COMMAND, &cmd);
+ cmd &= ~PCI_COMMAND_MASTER;
+ pci_write_config_word(ha->pdev, PCI_COMMAND, cmd);
+
+ if (!IS_QLA2100(ha)) {
+ /* Pause RISC. */
+ WRT_REG_WORD(&reg->hccr, HCCR_PAUSE_RISC);
+ if (IS_QLA2200(ha) || IS_QLA2300(ha)) {
+ for (cnt = 0; cnt < 30000; cnt++) {
+ if ((RD_REG_WORD(&reg->hccr) &
+ HCCR_RISC_PAUSE) != 0)
+ break;
+ udelay(100);
+ }
+ } else {
+ RD_REG_WORD(&reg->hccr); /* PCI Posting. */
+ udelay(10);
+ }
+
+ /* Select FPM registers. */
+ WRT_REG_WORD(&reg->ctrl_status, 0x20);
+ RD_REG_WORD(&reg->ctrl_status); /* PCI Posting. */
+
+ /* FPM Soft Reset. */
+ WRT_REG_WORD(&reg->fpm_diag_config, 0x100);
+ RD_REG_WORD(&reg->fpm_diag_config); /* PCI Posting. */
+
+ /* Toggle Fpm Reset. */
+ if (!IS_QLA2200(ha)) {
+ WRT_REG_WORD(&reg->fpm_diag_config, 0x0);
+ RD_REG_WORD(&reg->fpm_diag_config); /* PCI Posting. */
+ }
+
+ /* Select frame buffer registers. */
+ WRT_REG_WORD(&reg->ctrl_status, 0x10);
+ RD_REG_WORD(&reg->ctrl_status); /* PCI Posting. */
+
+ /* Reset frame buffer FIFOs. */
+ if (IS_QLA2200(ha)) {
+ WRT_FB_CMD_REG(ha, reg, 0xa000);
+ RD_FB_CMD_REG(ha, reg); /* PCI Posting. */
+ } else {
+ WRT_FB_CMD_REG(ha, reg, 0x00fc);
+
+ /* Read back fb_cmd until zero or 3 seconds max */
+ for (cnt = 0; cnt < 3000; cnt++) {
+ if ((RD_FB_CMD_REG(ha, reg) & 0xff) == 0)
+ break;
+ udelay(100);
+ }
+ }
+
+ /* Select RISC module registers. */
+ WRT_REG_WORD(&reg->ctrl_status, 0);
+ RD_REG_WORD(&reg->ctrl_status); /* PCI Posting. */
+
+ /* Reset RISC processor. */
+ WRT_REG_WORD(&reg->hccr, HCCR_RESET_RISC);
+ RD_REG_WORD(&reg->hccr); /* PCI Posting. */
+
+ /* Release RISC processor. */
+ WRT_REG_WORD(&reg->hccr, HCCR_RELEASE_RISC);
+ RD_REG_WORD(&reg->hccr); /* PCI Posting. */
+ }
+
+ WRT_REG_WORD(&reg->hccr, HCCR_CLR_RISC_INT);
+ WRT_REG_WORD(&reg->hccr, HCCR_CLR_HOST_INT);
+
+ /* Reset ISP chip. */
+ WRT_REG_WORD(&reg->ctrl_status, CSR_ISP_SOFT_RESET);
+
+ /* Wait for RISC to recover from reset. */
+ if (IS_QLA2100(ha) || IS_QLA2200(ha) || IS_QLA2300(ha)) {
+ /*
+ * It is necessary to for a delay here since the card doesn't
+ * respond to PCI reads during a reset. On some architectures
+ * this will result in an MCA.
+ */
+ udelay(20);
+ for (cnt = 30000; cnt; cnt--) {
+ if ((RD_REG_WORD(&reg->ctrl_status) &
+ CSR_ISP_SOFT_RESET) == 0)
+ break;
+ udelay(100);
+ }
+ } else
+ udelay(10);
+
+ /* Reset RISC processor. */
+ WRT_REG_WORD(&reg->hccr, HCCR_RESET_RISC);
+
+ WRT_REG_WORD(&reg->semaphore, 0);
+
+ /* Release RISC processor. */
+ WRT_REG_WORD(&reg->hccr, HCCR_RELEASE_RISC);
+ RD_REG_WORD(&reg->hccr); /* PCI Posting. */
+
+ if (IS_QLA2100(ha) || IS_QLA2200(ha) || IS_QLA2300(ha)) {
+ for (cnt = 0; cnt < 30000; cnt++) {
+ if (RD_MAILBOX_REG(ha, reg, 0) != MBS_BUSY)
+ break;
+
+ udelay(100);
+ }
+ } else
+ udelay(100);
+
+ /* Turn on master enable */
+ cmd |= PCI_COMMAND_MASTER;
+ pci_write_config_word(ha->pdev, PCI_COMMAND, cmd);
+
+ /* Disable RISC pause on FPM parity error. */
+ if (!IS_QLA2100(ha)) {
+ WRT_REG_WORD(&reg->hccr, HCCR_DISABLE_PARITY_PAUSE);
+ RD_REG_WORD(&reg->hccr); /* PCI Posting. */
+ }
+
+ spin_unlock_irqrestore(&ha->hardware_lock, flags);
+}
+
+/**
+ * qla81xx_reset_mpi() - Reset's MPI FW via Write MPI Register MBC.
+ *
+ * Returns 0 on success.
+ */
+static int
+qla81xx_reset_mpi(scsi_qla_host_t *vha)
+{
+ uint16_t mb[4] = {0x1010, 0, 1, 0};
+
+ if (!IS_QLA81XX(vha->hw))
+ return QLA_SUCCESS;
+
+ return qla81xx_write_mpi_register(vha, mb);
+}
+
+/**
+ * qla24xx_reset_risc() - Perform full reset of ISP24xx RISC.
+ * @ha: HA context
+ *
+ * Returns 0 on success.
+ */
+static inline int
+qla24xx_reset_risc(scsi_qla_host_t *vha)
+{
+ unsigned long flags = 0;
+ struct qla_hw_data *ha = vha->hw;
+ struct device_reg_24xx __iomem *reg = &ha->iobase->isp24;
+ uint32_t cnt, d2;
+ uint16_t wd;
+ static int abts_cnt; /* ISP abort retry counts */
+ int rval = QLA_SUCCESS;
+
+ spin_lock_irqsave(&ha->hardware_lock, flags);
+
+ /* Reset RISC. */
+ WRT_REG_DWORD(&reg->ctrl_status, CSRX_DMA_SHUTDOWN|MWB_4096_BYTES);
+ for (cnt = 0; cnt < 30000; cnt++) {
+ if ((RD_REG_DWORD(&reg->ctrl_status) & CSRX_DMA_ACTIVE) == 0)
+ break;
+
+ udelay(10);
+ }
+
+ if (!(RD_REG_DWORD(&reg->ctrl_status) & CSRX_DMA_ACTIVE))
+ set_bit(DMA_SHUTDOWN_CMPL, &ha->fw_dump_cap_flags);
+
+ ql_dbg(ql_dbg_init + ql_dbg_verbose, vha, 0x017e,
+ "HCCR: 0x%x, Control Status %x, DMA active status:0x%x\n",
+ RD_REG_DWORD(&reg->hccr),
+ RD_REG_DWORD(&reg->ctrl_status),
+ (RD_REG_DWORD(&reg->ctrl_status) & CSRX_DMA_ACTIVE));
+
+ WRT_REG_DWORD(&reg->ctrl_status,
+ CSRX_ISP_SOFT_RESET|CSRX_DMA_SHUTDOWN|MWB_4096_BYTES);
+ pci_read_config_word(ha->pdev, PCI_COMMAND, &wd);
+
+ udelay(100);
+
+ /* Wait for firmware to complete NVRAM accesses. */
+ d2 = (uint32_t) RD_REG_WORD(&reg->mailbox0);
+ for (cnt = 10000; RD_REG_WORD(&reg->mailbox0) != 0 &&
+ rval == QLA_SUCCESS; cnt--) {
+ barrier();
+ if (cnt)
+ udelay(5);
+ else
+ rval = QLA_FUNCTION_TIMEOUT;
+ }
+
+ if (rval == QLA_SUCCESS)
+ set_bit(ISP_MBX_RDY, &ha->fw_dump_cap_flags);
+
+ ql_dbg(ql_dbg_init + ql_dbg_verbose, vha, 0x017f,
+ "HCCR: 0x%x, MailBox0 Status 0x%x\n",
+ RD_REG_DWORD(&reg->hccr),
+ RD_REG_DWORD(&reg->mailbox0));
+
+ /* Wait for soft-reset to complete. */
+ d2 = RD_REG_DWORD(&reg->ctrl_status);
+ for (cnt = 0; cnt < 6000000; cnt++) {
+ barrier();
+ if ((RD_REG_DWORD(&reg->ctrl_status) &
+ CSRX_ISP_SOFT_RESET) == 0)
+ break;
+
+ udelay(5);
+ }
+ if (!(RD_REG_DWORD(&reg->ctrl_status) & CSRX_ISP_SOFT_RESET))
+ set_bit(ISP_SOFT_RESET_CMPL, &ha->fw_dump_cap_flags);
+
+ ql_dbg(ql_dbg_init + ql_dbg_verbose, vha, 0x015d,
+ "HCCR: 0x%x, Soft Reset status: 0x%x\n",
+ RD_REG_DWORD(&reg->hccr),
+ RD_REG_DWORD(&reg->ctrl_status));
+
+ /* If required, do an MPI FW reset now */
+ if (test_and_clear_bit(MPI_RESET_NEEDED, &vha->dpc_flags)) {
+ if (qla81xx_reset_mpi(vha) != QLA_SUCCESS) {
+ if (++abts_cnt < 5) {
+ set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
+ set_bit(MPI_RESET_NEEDED, &vha->dpc_flags);
+ } else {
+ /*
+ * We exhausted the ISP abort retries. We have to
+ * set the board offline.
+ */
+ abts_cnt = 0;
+ vha->flags.online = 0;
+ }
+ }
+ }
+
+ WRT_REG_DWORD(&reg->hccr, HCCRX_SET_RISC_RESET);
+ RD_REG_DWORD(&reg->hccr);
+
+ WRT_REG_DWORD(&reg->hccr, HCCRX_REL_RISC_PAUSE);
+ RD_REG_DWORD(&reg->hccr);
+
+ WRT_REG_DWORD(&reg->hccr, HCCRX_CLR_RISC_RESET);
+ RD_REG_DWORD(&reg->hccr);
+
+ d2 = (uint32_t) RD_REG_WORD(&reg->mailbox0);
+ for (cnt = 6000000; RD_REG_WORD(&reg->mailbox0) != 0 &&
+ rval == QLA_SUCCESS; cnt--) {
+ barrier();
+ if (cnt)
+ udelay(5);
+ else
+ rval = QLA_FUNCTION_TIMEOUT;
+ }
+ if (rval == QLA_SUCCESS)
+ set_bit(RISC_RDY_AFT_RESET, &ha->fw_dump_cap_flags);
+
+ ql_dbg(ql_dbg_init + ql_dbg_verbose, vha, 0x015e,
+ "Host Risc 0x%x, mailbox0 0x%x\n",
+ RD_REG_DWORD(&reg->hccr),
+ RD_REG_WORD(&reg->mailbox0));
+
+ spin_unlock_irqrestore(&ha->hardware_lock, flags);
+
+ ql_dbg(ql_dbg_init + ql_dbg_verbose, vha, 0x015f,
+ "Driver in %s mode\n",
+ IS_NOPOLLING_TYPE(ha) ? "Interrupt" : "Polling");
+
+ if (IS_NOPOLLING_TYPE(ha))
+ ha->isp_ops->enable_intrs(ha);
+
+ return rval;
+}
+
+static void
+qla25xx_read_risc_sema_reg(scsi_qla_host_t *vha, uint32_t *data)
+{
+ struct device_reg_24xx __iomem *reg = &vha->hw->iobase->isp24;
+
+ WRT_REG_DWORD(&reg->iobase_addr, RISC_REGISTER_BASE_OFFSET);
+ *data = RD_REG_DWORD(&reg->iobase_window + RISC_REGISTER_WINDOW_OFFET);
+
+}
+
+static void
+qla25xx_write_risc_sema_reg(scsi_qla_host_t *vha, uint32_t data)
+{
+ struct device_reg_24xx __iomem *reg = &vha->hw->iobase->isp24;
+
+ WRT_REG_DWORD(&reg->iobase_addr, RISC_REGISTER_BASE_OFFSET);
+ WRT_REG_DWORD(&reg->iobase_window + RISC_REGISTER_WINDOW_OFFET, data);
+}
+
+static void
+qla25xx_manipulate_risc_semaphore(scsi_qla_host_t *vha)
+{
+ struct qla_hw_data *ha = vha->hw;
+ uint32_t wd32 = 0;
+ uint delta_msec = 100;
+ uint elapsed_msec = 0;
+ uint timeout_msec;
+ ulong n;
+
+ if (!IS_QLA25XX(ha) && !IS_QLA2031(ha))
+ return;
+
+attempt:
+ timeout_msec = TIMEOUT_SEMAPHORE;
+ n = timeout_msec / delta_msec;
+ while (n--) {
+ qla25xx_write_risc_sema_reg(vha, RISC_SEMAPHORE_SET);
+ qla25xx_read_risc_sema_reg(vha, &wd32);
+ if (wd32 & RISC_SEMAPHORE)
+ break;
+ msleep(delta_msec);
+ elapsed_msec += delta_msec;
+ if (elapsed_msec > TIMEOUT_TOTAL_ELAPSED)
+ goto force;
+ }
+
+ if (!(wd32 & RISC_SEMAPHORE))
+ goto force;
+
+ if (!(wd32 & RISC_SEMAPHORE_FORCE))
+ goto acquired;
+
+ qla25xx_write_risc_sema_reg(vha, RISC_SEMAPHORE_CLR);
+ timeout_msec = TIMEOUT_SEMAPHORE_FORCE;
+ n = timeout_msec / delta_msec;
+ while (n--) {
+ qla25xx_read_risc_sema_reg(vha, &wd32);
+ if (!(wd32 & RISC_SEMAPHORE_FORCE))
+ break;
+ msleep(delta_msec);
+ elapsed_msec += delta_msec;
+ if (elapsed_msec > TIMEOUT_TOTAL_ELAPSED)
+ goto force;
+ }
+
+ if (wd32 & RISC_SEMAPHORE_FORCE)
+ qla25xx_write_risc_sema_reg(vha, RISC_SEMAPHORE_FORCE_CLR);
+
+ goto attempt;
+
+force:
+ qla25xx_write_risc_sema_reg(vha, RISC_SEMAPHORE_FORCE_SET);
+
+acquired:
+ return;
+}
+
+/**
+ * qla24xx_reset_chip() - Reset ISP24xx chip.
+ * @ha: HA context
+ *
+ * Returns 0 on success.
+ */
+void
+qla24xx_reset_chip(scsi_qla_host_t *vha)
+{
+ struct qla_hw_data *ha = vha->hw;
+
+ if (pci_channel_offline(ha->pdev) &&
+ ha->flags.pci_channel_io_perm_failure) {
+ return;
+ }
+
+ ha->isp_ops->disable_intrs(ha);
+
+ qla25xx_manipulate_risc_semaphore(vha);
+
+ /* Perform RISC reset. */
+ qla24xx_reset_risc(vha);
+}
+
+/**
+ * qla2x00_chip_diag() - Test chip for proper operation.
+ * @ha: HA context
+ *
+ * Returns 0 on success.
+ */
+int
+qla2x00_chip_diag(scsi_qla_host_t *vha)
+{
+ int rval;
+ struct qla_hw_data *ha = vha->hw;
+ struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
+ unsigned long flags = 0;
+ uint16_t data;
+ uint32_t cnt;
+ uint16_t mb[5];
+ struct req_que *req = ha->req_q_map[0];
+
+ /* Assume a failed state */
+ rval = QLA_FUNCTION_FAILED;
+
+ ql_dbg(ql_dbg_init, vha, 0x007b,
+ "Testing device at %lx.\n", (u_long)&reg->flash_address);
+
+ spin_lock_irqsave(&ha->hardware_lock, flags);
+
+ /* Reset ISP chip. */
+ WRT_REG_WORD(&reg->ctrl_status, CSR_ISP_SOFT_RESET);
+
+ /*
+ * We need to have a delay here since the card will not respond while
+ * in reset causing an MCA on some architectures.
+ */
+ udelay(20);
+ data = qla2x00_debounce_register(&reg->ctrl_status);
+ for (cnt = 6000000 ; cnt && (data & CSR_ISP_SOFT_RESET); cnt--) {
+ udelay(5);
+ data = RD_REG_WORD(&reg->ctrl_status);
+ barrier();
+ }
+
+ if (!cnt)
+ goto chip_diag_failed;
+
+ ql_dbg(ql_dbg_init, vha, 0x007c,
+ "Reset register cleared by chip reset.\n");
+
+ /* Reset RISC processor. */
+ WRT_REG_WORD(&reg->hccr, HCCR_RESET_RISC);
+ WRT_REG_WORD(&reg->hccr, HCCR_RELEASE_RISC);
+
+ /* Workaround for QLA2312 PCI parity error */
+ if (IS_QLA2100(ha) || IS_QLA2200(ha) || IS_QLA2300(ha)) {
+ data = qla2x00_debounce_register(MAILBOX_REG(ha, reg, 0));
+ for (cnt = 6000000; cnt && (data == MBS_BUSY); cnt--) {
+ udelay(5);
+ data = RD_MAILBOX_REG(ha, reg, 0);
+ barrier();
+ }
+ } else
+ udelay(10);
+
+ if (!cnt)
+ goto chip_diag_failed;
+
+ /* Check product ID of chip */
+ ql_dbg(ql_dbg_init, vha, 0x007d, "Checking product Id of chip.\n");
+
+ mb[1] = RD_MAILBOX_REG(ha, reg, 1);
+ mb[2] = RD_MAILBOX_REG(ha, reg, 2);
+ mb[3] = RD_MAILBOX_REG(ha, reg, 3);
+ mb[4] = qla2x00_debounce_register(MAILBOX_REG(ha, reg, 4));
+ if (mb[1] != PROD_ID_1 || (mb[2] != PROD_ID_2 && mb[2] != PROD_ID_2a) ||
+ mb[3] != PROD_ID_3) {
+ ql_log(ql_log_warn, vha, 0x0062,
+ "Wrong product ID = 0x%x,0x%x,0x%x.\n",
+ mb[1], mb[2], mb[3]);
+
+ goto chip_diag_failed;
+ }
+ ha->product_id[0] = mb[1];
+ ha->product_id[1] = mb[2];
+ ha->product_id[2] = mb[3];
+ ha->product_id[3] = mb[4];
+
+ /* Adjust fw RISC transfer size */
+ if (req->length > 1024)
+ ha->fw_transfer_size = REQUEST_ENTRY_SIZE * 1024;
+ else
+ ha->fw_transfer_size = REQUEST_ENTRY_SIZE *
+ req->length;
+
+ if (IS_QLA2200(ha) &&
+ RD_MAILBOX_REG(ha, reg, 7) == QLA2200A_RISC_ROM_VER) {
+ /* Limit firmware transfer size with a 2200A */
+ ql_dbg(ql_dbg_init, vha, 0x007e, "Found QLA2200A Chip.\n");
+
+ ha->device_type |= DT_ISP2200A;
+ ha->fw_transfer_size = 128;
+ }
+
+ /* Wrap Incoming Mailboxes Test. */
+ spin_unlock_irqrestore(&ha->hardware_lock, flags);
+
+ ql_dbg(ql_dbg_init, vha, 0x007f, "Checking mailboxes.\n");
+ rval = qla2x00_mbx_reg_test(vha);
+ if (rval)
+ ql_log(ql_log_warn, vha, 0x0080,
+ "Failed mailbox send register test.\n");
+ else
+ /* Flag a successful rval */
+ rval = QLA_SUCCESS;
+ spin_lock_irqsave(&ha->hardware_lock, flags);
+
+chip_diag_failed:
+ if (rval)
+ ql_log(ql_log_info, vha, 0x0081,
+ "Chip diagnostics **** FAILED ****.\n");
+
+ spin_unlock_irqrestore(&ha->hardware_lock, flags);
+
+ return (rval);
+}
+
+/**
+ * qla24xx_chip_diag() - Test ISP24xx for proper operation.
+ * @ha: HA context
+ *
+ * Returns 0 on success.
+ */
+int
+qla24xx_chip_diag(scsi_qla_host_t *vha)
+{
+ int rval;
+ struct qla_hw_data *ha = vha->hw;
+ struct req_que *req = ha->req_q_map[0];
+
+ if (IS_P3P_TYPE(ha))
+ return QLA_SUCCESS;
+
+ ha->fw_transfer_size = REQUEST_ENTRY_SIZE * req->length;
+
+ rval = qla2x00_mbx_reg_test(vha);
+ if (rval) {
+ ql_log(ql_log_warn, vha, 0x0082,
+ "Failed mailbox send register test.\n");
+ } else {
+ /* Flag a successful rval */
+ rval = QLA_SUCCESS;
+ }
+
+ return rval;
+}
+
+void
+qla2x00_alloc_fw_dump(scsi_qla_host_t *vha)
+{
+ int rval;
+ uint32_t dump_size, fixed_size, mem_size, req_q_size, rsp_q_size,
+ eft_size, fce_size, mq_size;
+ dma_addr_t tc_dma;
+ void *tc;
+ struct qla_hw_data *ha = vha->hw;
+ struct req_que *req = ha->req_q_map[0];
+ struct rsp_que *rsp = ha->rsp_q_map[0];
+
+ if (ha->fw_dump) {
+ ql_dbg(ql_dbg_init, vha, 0x00bd,
+ "Firmware dump already allocated.\n");
+ return;
+ }
+
+ ha->fw_dumped = 0;
+ ha->fw_dump_cap_flags = 0;
+ dump_size = fixed_size = mem_size = eft_size = fce_size = mq_size = 0;
+ req_q_size = rsp_q_size = 0;
+
+ if (IS_QLA27XX(ha))
+ goto try_fce;
+
+ if (IS_QLA2100(ha) || IS_QLA2200(ha)) {
+ fixed_size = sizeof(struct qla2100_fw_dump);
+ } else if (IS_QLA23XX(ha)) {
+ fixed_size = offsetof(struct qla2300_fw_dump, data_ram);
+ mem_size = (ha->fw_memory_size - 0x11000 + 1) *
+ sizeof(uint16_t);
+ } else if (IS_FWI2_CAPABLE(ha)) {
+ if (IS_QLA83XX(ha))
+ fixed_size = offsetof(struct qla83xx_fw_dump, ext_mem);
+ else if (IS_QLA81XX(ha))
+ fixed_size = offsetof(struct qla81xx_fw_dump, ext_mem);
+ else if (IS_QLA25XX(ha))
+ fixed_size = offsetof(struct qla25xx_fw_dump, ext_mem);
+ else
+ fixed_size = offsetof(struct qla24xx_fw_dump, ext_mem);
+
+ mem_size = (ha->fw_memory_size - 0x100000 + 1) *
+ sizeof(uint32_t);
+ if (ha->mqenable) {
+ if (!IS_QLA83XX(ha))
+ mq_size = sizeof(struct qla2xxx_mq_chain);
+ /*
+ * Allocate maximum buffer size for all queues.
+ * Resizing must be done at end-of-dump processing.
+ */
+ mq_size += ha->max_req_queues *
+ (req->length * sizeof(request_t));
+ mq_size += ha->max_rsp_queues *
+ (rsp->length * sizeof(response_t));
+ }
+ if (ha->tgt.atio_ring)
+ mq_size += ha->tgt.atio_q_length * sizeof(request_t);
+ /* Allocate memory for Fibre Channel Event Buffer. */
+ if (!IS_QLA25XX(ha) && !IS_QLA81XX(ha) && !IS_QLA83XX(ha) &&
+ !IS_QLA27XX(ha))
+ goto try_eft;
+
+try_fce:
+ if (ha->fce)
+ dma_free_coherent(&ha->pdev->dev,
+ FCE_SIZE, ha->fce, ha->fce_dma);
+
+ /* Allocate memory for Fibre Channel Event Buffer. */
+ tc = dma_zalloc_coherent(&ha->pdev->dev, FCE_SIZE, &tc_dma,
+ GFP_KERNEL);
+ if (!tc) {
+ ql_log(ql_log_warn, vha, 0x00be,
+ "Unable to allocate (%d KB) for FCE.\n",
+ FCE_SIZE / 1024);
+ goto try_eft;
+ }
+
+ rval = qla2x00_enable_fce_trace(vha, tc_dma, FCE_NUM_BUFFERS,
+ ha->fce_mb, &ha->fce_bufs);
+ if (rval) {
+ ql_log(ql_log_warn, vha, 0x00bf,
+ "Unable to initialize FCE (%d).\n", rval);
+ dma_free_coherent(&ha->pdev->dev, FCE_SIZE, tc,
+ tc_dma);
+ ha->flags.fce_enabled = 0;
+ goto try_eft;
+ }
+ ql_dbg(ql_dbg_init, vha, 0x00c0,
+ "Allocate (%d KB) for FCE...\n", FCE_SIZE / 1024);
+
+ fce_size = sizeof(struct qla2xxx_fce_chain) + FCE_SIZE;
+ ha->flags.fce_enabled = 1;
+ ha->fce_dma = tc_dma;
+ ha->fce = tc;
+
+try_eft:
+ if (ha->eft)
+ dma_free_coherent(&ha->pdev->dev,
+ EFT_SIZE, ha->eft, ha->eft_dma);
+
+ /* Allocate memory for Extended Trace Buffer. */
+ tc = dma_zalloc_coherent(&ha->pdev->dev, EFT_SIZE, &tc_dma,
+ GFP_KERNEL);
+ if (!tc) {
+ ql_log(ql_log_warn, vha, 0x00c1,
+ "Unable to allocate (%d KB) for EFT.\n",
+ EFT_SIZE / 1024);
+ goto cont_alloc;
+ }
+
+ rval = qla2x00_enable_eft_trace(vha, tc_dma, EFT_NUM_BUFFERS);
+ if (rval) {
+ ql_log(ql_log_warn, vha, 0x00c2,
+ "Unable to initialize EFT (%d).\n", rval);
+ dma_free_coherent(&ha->pdev->dev, EFT_SIZE, tc,
+ tc_dma);
+ goto cont_alloc;
+ }
+ ql_dbg(ql_dbg_init, vha, 0x00c3,
+ "Allocated (%d KB) EFT ...\n", EFT_SIZE / 1024);
+
+ eft_size = EFT_SIZE;
+ ha->eft_dma = tc_dma;
+ ha->eft = tc;
+ }
+
+cont_alloc:
+ if (IS_QLA27XX(ha)) {
+ if (!ha->fw_dump_template) {
+ ql_log(ql_log_warn, vha, 0x00ba,
+ "Failed missing fwdump template\n");
+ return;
+ }
+ dump_size = qla27xx_fwdt_calculate_dump_size(vha);
+ ql_dbg(ql_dbg_init, vha, 0x00fa,
+ "-> allocating fwdump (%x bytes)...\n", dump_size);
+ goto allocate;
+ }
+
+ req_q_size = req->length * sizeof(request_t);
+ rsp_q_size = rsp->length * sizeof(response_t);
+ dump_size = offsetof(struct qla2xxx_fw_dump, isp);
+ dump_size += fixed_size + mem_size + req_q_size + rsp_q_size + eft_size;
+ ha->chain_offset = dump_size;
+ dump_size += mq_size + fce_size;
+
+allocate:
+ ha->fw_dump = vmalloc(dump_size);
+ if (!ha->fw_dump) {
+ ql_log(ql_log_warn, vha, 0x00c4,
+ "Unable to allocate (%d KB) for firmware dump.\n",
+ dump_size / 1024);
+
+ if (ha->fce) {
+ dma_free_coherent(&ha->pdev->dev, FCE_SIZE, ha->fce,
+ ha->fce_dma);
+ ha->fce = NULL;
+ ha->fce_dma = 0;
+ }
+
+ if (ha->eft) {
+ dma_free_coherent(&ha->pdev->dev, eft_size, ha->eft,
+ ha->eft_dma);
+ ha->eft = NULL;
+ ha->eft_dma = 0;
+ }
+ return;
+ }
+ ha->fw_dump_len = dump_size;
+ ql_dbg(ql_dbg_init, vha, 0x00c5,
+ "Allocated (%d KB) for firmware dump.\n", dump_size / 1024);
+
+ if (IS_QLA27XX(ha))
+ return;
+
+ ha->fw_dump->signature[0] = 'Q';
+ ha->fw_dump->signature[1] = 'L';
+ ha->fw_dump->signature[2] = 'G';
+ ha->fw_dump->signature[3] = 'C';
+ ha->fw_dump->version = __constant_htonl(1);
+
+ ha->fw_dump->fixed_size = htonl(fixed_size);
+ ha->fw_dump->mem_size = htonl(mem_size);
+ ha->fw_dump->req_q_size = htonl(req_q_size);
+ ha->fw_dump->rsp_q_size = htonl(rsp_q_size);
+
+ ha->fw_dump->eft_size = htonl(eft_size);
+ ha->fw_dump->eft_addr_l = htonl(LSD(ha->eft_dma));
+ ha->fw_dump->eft_addr_h = htonl(MSD(ha->eft_dma));
+
+ ha->fw_dump->header_size =
+ htonl(offsetof(struct qla2xxx_fw_dump, isp));
+}
+
+static int
+qla81xx_mpi_sync(scsi_qla_host_t *vha)
+{
+#define MPS_MASK 0xe0
+ int rval;
+ uint16_t dc;
+ uint32_t dw;
+
+ if (!IS_QLA81XX(vha->hw))
+ return QLA_SUCCESS;
+
+ rval = qla2x00_write_ram_word(vha, 0x7c00, 1);
+ if (rval != QLA_SUCCESS) {
+ ql_log(ql_log_warn, vha, 0x0105,
+ "Unable to acquire semaphore.\n");
+ goto done;
+ }
+
+ pci_read_config_word(vha->hw->pdev, 0x54, &dc);
+ rval = qla2x00_read_ram_word(vha, 0x7a15, &dw);
+ if (rval != QLA_SUCCESS) {
+ ql_log(ql_log_warn, vha, 0x0067, "Unable to read sync.\n");
+ goto done_release;
+ }
+
+ dc &= MPS_MASK;
+ if (dc == (dw & MPS_MASK))
+ goto done_release;
+
+ dw &= ~MPS_MASK;
+ dw |= dc;
+ rval = qla2x00_write_ram_word(vha, 0x7a15, dw);
+ if (rval != QLA_SUCCESS) {
+ ql_log(ql_log_warn, vha, 0x0114, "Unable to gain sync.\n");
+ }
+
+done_release:
+ rval = qla2x00_write_ram_word(vha, 0x7c00, 0);
+ if (rval != QLA_SUCCESS) {
+ ql_log(ql_log_warn, vha, 0x006d,
+ "Unable to release semaphore.\n");
+ }
+
+done:
+ return rval;
+}
+
+int
+qla2x00_alloc_outstanding_cmds(struct qla_hw_data *ha, struct req_que *req)
+{
+ /* Don't try to reallocate the array */
+ if (req->outstanding_cmds)
+ return QLA_SUCCESS;
+
+ if (!IS_FWI2_CAPABLE(ha) || (ha->mqiobase &&
+ (ql2xmultique_tag || ql2xmaxqueues > 1)))
+ req->num_outstanding_cmds = DEFAULT_OUTSTANDING_COMMANDS;
+ else {
+ if (ha->fw_xcb_count <= ha->fw_iocb_count)
+ req->num_outstanding_cmds = ha->fw_xcb_count;
+ else
+ req->num_outstanding_cmds = ha->fw_iocb_count;
+ }
+
+ req->outstanding_cmds = kzalloc(sizeof(srb_t *) *
+ req->num_outstanding_cmds, GFP_KERNEL);
+
+ if (!req->outstanding_cmds) {
+ /*
+ * Try to allocate a minimal size just so we can get through
+ * initialization.
+ */
+ req->num_outstanding_cmds = MIN_OUTSTANDING_COMMANDS;
+ req->outstanding_cmds = kzalloc(sizeof(srb_t *) *
+ req->num_outstanding_cmds, GFP_KERNEL);
+
+ if (!req->outstanding_cmds) {
+ ql_log(ql_log_fatal, NULL, 0x0126,
+ "Failed to allocate memory for "
+ "outstanding_cmds for req_que %p.\n", req);
+ req->num_outstanding_cmds = 0;
+ return QLA_FUNCTION_FAILED;
+ }
+ }
+
+ return QLA_SUCCESS;
+}
+
+/**
+ * qla2x00_setup_chip() - Load and start RISC firmware.
+ * @ha: HA context
+ *
+ * Returns 0 on success.
+ */
+static int
+qla2x00_setup_chip(scsi_qla_host_t *vha)
+{
+ int rval;
+ uint32_t srisc_address = 0;
+ struct qla_hw_data *ha = vha->hw;
+ struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
+ unsigned long flags;
+ uint16_t fw_major_version;
+
+ if (IS_P3P_TYPE(ha)) {
+ rval = ha->isp_ops->load_risc(vha, &srisc_address);
+ if (rval == QLA_SUCCESS) {
+ qla2x00_stop_firmware(vha);
+ goto enable_82xx_npiv;
+ } else
+ goto failed;
+ }
+
+ if (!IS_FWI2_CAPABLE(ha) && !IS_QLA2100(ha) && !IS_QLA2200(ha)) {
+ /* Disable SRAM, Instruction RAM and GP RAM parity. */
+ spin_lock_irqsave(&ha->hardware_lock, flags);
+ WRT_REG_WORD(&reg->hccr, (HCCR_ENABLE_PARITY + 0x0));
+ RD_REG_WORD(&reg->hccr);
+ spin_unlock_irqrestore(&ha->hardware_lock, flags);
+ }
+
+ qla81xx_mpi_sync(vha);
+
+ /* Load firmware sequences */
+ rval = ha->isp_ops->load_risc(vha, &srisc_address);
+ if (rval == QLA_SUCCESS) {
+ ql_dbg(ql_dbg_init, vha, 0x00c9,
+ "Verifying Checksum of loaded RISC code.\n");
+
+ rval = qla2x00_verify_checksum(vha, srisc_address);
+ if (rval == QLA_SUCCESS) {
+ /* Start firmware execution. */
+ ql_dbg(ql_dbg_init, vha, 0x00ca,
+ "Starting firmware.\n");
+
+ rval = qla2x00_execute_fw(vha, srisc_address);
+ /* Retrieve firmware information. */
+ if (rval == QLA_SUCCESS) {
+enable_82xx_npiv:
+ fw_major_version = ha->fw_major_version;
+ if (IS_P3P_TYPE(ha))
+ qla82xx_check_md_needed(vha);
+ else
+ rval = qla2x00_get_fw_version(vha);
+ if (rval != QLA_SUCCESS)
+ goto failed;
+ ha->flags.npiv_supported = 0;
+ if (IS_QLA2XXX_MIDTYPE(ha) &&
+ (ha->fw_attributes & BIT_2)) {
+ ha->flags.npiv_supported = 1;
+ if ((!ha->max_npiv_vports) ||
+ ((ha->max_npiv_vports + 1) %
+ MIN_MULTI_ID_FABRIC))
+ ha->max_npiv_vports =
+ MIN_MULTI_ID_FABRIC - 1;
+ }
+ qla2x00_get_resource_cnts(vha, NULL,
+ &ha->fw_xcb_count, NULL, &ha->fw_iocb_count,
+ &ha->max_npiv_vports, NULL);
+
+ /*
+ * Allocate the array of outstanding commands
+ * now that we know the firmware resources.
+ */
+ rval = qla2x00_alloc_outstanding_cmds(ha,
+ vha->req);
+ if (rval != QLA_SUCCESS)
+ goto failed;
+
+ if (!fw_major_version && ql2xallocfwdump
+ && !(IS_P3P_TYPE(ha)))
+ qla2x00_alloc_fw_dump(vha);
+ } else {
+ goto failed;
+ }
+ } else {
+ ql_log(ql_log_fatal, vha, 0x00cd,
+ "ISP Firmware failed checksum.\n");
+ goto failed;
+ }
+ } else
+ goto failed;
+
+ if (!IS_FWI2_CAPABLE(ha) && !IS_QLA2100(ha) && !IS_QLA2200(ha)) {
+ /* Enable proper parity. */
+ spin_lock_irqsave(&ha->hardware_lock, flags);
+ if (IS_QLA2300(ha))
+ /* SRAM parity */
+ WRT_REG_WORD(&reg->hccr, HCCR_ENABLE_PARITY + 0x1);
+ else
+ /* SRAM, Instruction RAM and GP RAM parity */
+ WRT_REG_WORD(&reg->hccr, HCCR_ENABLE_PARITY + 0x7);
+ RD_REG_WORD(&reg->hccr);
+ spin_unlock_irqrestore(&ha->hardware_lock, flags);
+ }
+
+ if (IS_QLA27XX(ha))
+ ha->flags.fac_supported = 1;
+ else if (rval == QLA_SUCCESS && IS_FAC_REQUIRED(ha)) {
+ uint32_t size;
+
+ rval = qla81xx_fac_get_sector_size(vha, &size);
+ if (rval == QLA_SUCCESS) {
+ ha->flags.fac_supported = 1;
+ ha->fdt_block_size = size << 2;
+ } else {
+ ql_log(ql_log_warn, vha, 0x00ce,
+ "Unsupported FAC firmware (%d.%02d.%02d).\n",
+ ha->fw_major_version, ha->fw_minor_version,
+ ha->fw_subminor_version);
+
+ if (IS_QLA83XX(ha) || IS_QLA27XX(ha)) {
+ ha->flags.fac_supported = 0;
+ rval = QLA_SUCCESS;
+ }
+ }
+ }
+failed:
+ if (rval) {
+ ql_log(ql_log_fatal, vha, 0x00cf,
+ "Setup chip ****FAILED****.\n");
+ }
+
+ return (rval);
+}
+
+/**
+ * qla2x00_init_response_q_entries() - Initializes response queue entries.
+ * @ha: HA context
+ *
+ * Beginning of request ring has initialization control block already built
+ * by nvram config routine.
+ *
+ * Returns 0 on success.
+ */
+void
+qla2x00_init_response_q_entries(struct rsp_que *rsp)
+{
+ uint16_t cnt;
+ response_t *pkt;
+
+ rsp->ring_ptr = rsp->ring;
+ rsp->ring_index = 0;
+ rsp->status_srb = NULL;
+ pkt = rsp->ring_ptr;
+ for (cnt = 0; cnt < rsp->length; cnt++) {
+ pkt->signature = RESPONSE_PROCESSED;
+ pkt++;
+ }
+}
+
+/**
+ * qla2x00_update_fw_options() - Read and process firmware options.
+ * @ha: HA context
+ *
+ * Returns 0 on success.
+ */
+void
+qla2x00_update_fw_options(scsi_qla_host_t *vha)
+{
+ uint16_t swing, emphasis, tx_sens, rx_sens;
+ struct qla_hw_data *ha = vha->hw;
+
+ memset(ha->fw_options, 0, sizeof(ha->fw_options));
+ qla2x00_get_fw_options(vha, ha->fw_options);
+
+ if (IS_QLA2100(ha) || IS_QLA2200(ha))
+ return;
+
+ /* Serial Link options. */
+ ql_dbg(ql_dbg_init + ql_dbg_buffer, vha, 0x0115,
+ "Serial link options.\n");
+ ql_dump_buffer(ql_dbg_init + ql_dbg_buffer, vha, 0x0109,
+ (uint8_t *)&ha->fw_seriallink_options,
+ sizeof(ha->fw_seriallink_options));
+
+ ha->fw_options[1] &= ~FO1_SET_EMPHASIS_SWING;
+ if (ha->fw_seriallink_options[3] & BIT_2) {
+ ha->fw_options[1] |= FO1_SET_EMPHASIS_SWING;
+
+ /* 1G settings */
+ swing = ha->fw_seriallink_options[2] & (BIT_2 | BIT_1 | BIT_0);
+ emphasis = (ha->fw_seriallink_options[2] &
+ (BIT_4 | BIT_3)) >> 3;
+ tx_sens = ha->fw_seriallink_options[0] &
+ (BIT_3 | BIT_2 | BIT_1 | BIT_0);
+ rx_sens = (ha->fw_seriallink_options[0] &
+ (BIT_7 | BIT_6 | BIT_5 | BIT_4)) >> 4;
+ ha->fw_options[10] = (emphasis << 14) | (swing << 8);
+ if (IS_QLA2300(ha) || IS_QLA2312(ha) || IS_QLA6312(ha)) {
+ if (rx_sens == 0x0)
+ rx_sens = 0x3;
+ ha->fw_options[10] |= (tx_sens << 4) | rx_sens;
+ } else if (IS_QLA2322(ha) || IS_QLA6322(ha))
+ ha->fw_options[10] |= BIT_5 |
+ ((rx_sens & (BIT_1 | BIT_0)) << 2) |
+ (tx_sens & (BIT_1 | BIT_0));
+
+ /* 2G settings */
+ swing = (ha->fw_seriallink_options[2] &
+ (BIT_7 | BIT_6 | BIT_5)) >> 5;
+ emphasis = ha->fw_seriallink_options[3] & (BIT_1 | BIT_0);
+ tx_sens = ha->fw_seriallink_options[1] &
+ (BIT_3 | BIT_2 | BIT_1 | BIT_0);
+ rx_sens = (ha->fw_seriallink_options[1] &
+ (BIT_7 | BIT_6 | BIT_5 | BIT_4)) >> 4;
+ ha->fw_options[11] = (emphasis << 14) | (swing << 8);
+ if (IS_QLA2300(ha) || IS_QLA2312(ha) || IS_QLA6312(ha)) {
+ if (rx_sens == 0x0)
+ rx_sens = 0x3;
+ ha->fw_options[11] |= (tx_sens << 4) | rx_sens;
+ } else if (IS_QLA2322(ha) || IS_QLA6322(ha))
+ ha->fw_options[11] |= BIT_5 |
+ ((rx_sens & (BIT_1 | BIT_0)) << 2) |
+ (tx_sens & (BIT_1 | BIT_0));
+ }
+
+ /* FCP2 options. */
+ /* Return command IOCBs without waiting for an ABTS to complete. */
+ ha->fw_options[3] |= BIT_13;
+
+ /* LED scheme. */
+ if (ha->flags.enable_led_scheme)
+ ha->fw_options[2] |= BIT_12;
+
+ /* Detect ISP6312. */
+ if (IS_QLA6312(ha))
+ ha->fw_options[2] |= BIT_13;
+
+ /* Update firmware options. */
+ qla2x00_set_fw_options(vha, ha->fw_options);
+}
+
+void
+qla24xx_update_fw_options(scsi_qla_host_t *vha)
+{
+ int rval;
+ struct qla_hw_data *ha = vha->hw;
+
+ if (IS_P3P_TYPE(ha))
+ return;
+
+ /* Update Serial Link options. */
+ if ((le16_to_cpu(ha->fw_seriallink_options24[0]) & BIT_0) == 0)
+ return;
+
+ rval = qla2x00_set_serdes_params(vha,
+ le16_to_cpu(ha->fw_seriallink_options24[1]),
+ le16_to_cpu(ha->fw_seriallink_options24[2]),
+ le16_to_cpu(ha->fw_seriallink_options24[3]));
+ if (rval != QLA_SUCCESS) {
+ ql_log(ql_log_warn, vha, 0x0104,
+ "Unable to update Serial Link options (%x).\n", rval);
+ }
+}
+
+void
+qla2x00_config_rings(struct scsi_qla_host *vha)
+{
+ struct qla_hw_data *ha = vha->hw;
+ struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
+ struct req_que *req = ha->req_q_map[0];
+ struct rsp_que *rsp = ha->rsp_q_map[0];
+
+ /* Setup ring parameters in initialization control block. */
+ ha->init_cb->request_q_outpointer = __constant_cpu_to_le16(0);
+ ha->init_cb->response_q_inpointer = __constant_cpu_to_le16(0);
+ ha->init_cb->request_q_length = cpu_to_le16(req->length);
+ ha->init_cb->response_q_length = cpu_to_le16(rsp->length);
+ ha->init_cb->request_q_address[0] = cpu_to_le32(LSD(req->dma));
+ ha->init_cb->request_q_address[1] = cpu_to_le32(MSD(req->dma));
+ ha->init_cb->response_q_address[0] = cpu_to_le32(LSD(rsp->dma));
+ ha->init_cb->response_q_address[1] = cpu_to_le32(MSD(rsp->dma));
+
+ WRT_REG_WORD(ISP_REQ_Q_IN(ha, reg), 0);
+ WRT_REG_WORD(ISP_REQ_Q_OUT(ha, reg), 0);
+ WRT_REG_WORD(ISP_RSP_Q_IN(ha, reg), 0);
+ WRT_REG_WORD(ISP_RSP_Q_OUT(ha, reg), 0);
+ RD_REG_WORD(ISP_RSP_Q_OUT(ha, reg)); /* PCI Posting. */
+}
+
+void
+qla24xx_config_rings(struct scsi_qla_host *vha)
+{
+ struct qla_hw_data *ha = vha->hw;
+ device_reg_t __iomem *reg = ISP_QUE_REG(ha, 0);
+ struct device_reg_2xxx __iomem *ioreg = &ha->iobase->isp;
+ struct qla_msix_entry *msix;
+ struct init_cb_24xx *icb;
+ uint16_t rid = 0;
+ struct req_que *req = ha->req_q_map[0];
+ struct rsp_que *rsp = ha->rsp_q_map[0];
+
+ /* Setup ring parameters in initialization control block. */
+ icb = (struct init_cb_24xx *)ha->init_cb;
+ icb->request_q_outpointer = __constant_cpu_to_le16(0);
+ icb->response_q_inpointer = __constant_cpu_to_le16(0);
+ icb->request_q_length = cpu_to_le16(req->length);
+ icb->response_q_length = cpu_to_le16(rsp->length);
+ icb->request_q_address[0] = cpu_to_le32(LSD(req->dma));
+ icb->request_q_address[1] = cpu_to_le32(MSD(req->dma));
+ icb->response_q_address[0] = cpu_to_le32(LSD(rsp->dma));
+ icb->response_q_address[1] = cpu_to_le32(MSD(rsp->dma));
+
+ /* Setup ATIO queue dma pointers for target mode */
+ icb->atio_q_inpointer = __constant_cpu_to_le16(0);
+ icb->atio_q_length = cpu_to_le16(ha->tgt.atio_q_length);
+ icb->atio_q_address[0] = cpu_to_le32(LSD(ha->tgt.atio_dma));
+ icb->atio_q_address[1] = cpu_to_le32(MSD(ha->tgt.atio_dma));
+
+ if (IS_SHADOW_REG_CAPABLE(ha))
+ icb->firmware_options_2 |=
+ __constant_cpu_to_le32(BIT_30|BIT_29);
+
+ if (ha->mqenable || IS_QLA83XX(ha) || IS_QLA27XX(ha)) {
+ icb->qos = __constant_cpu_to_le16(QLA_DEFAULT_QUE_QOS);
+ icb->rid = __constant_cpu_to_le16(rid);
+ if (ha->flags.msix_enabled) {
+ msix = &ha->msix_entries[1];
+ ql_dbg(ql_dbg_init, vha, 0x00fd,
+ "Registering vector 0x%x for base que.\n",
+ msix->entry);
+ icb->msix = cpu_to_le16(msix->entry);
+ }
+ /* Use alternate PCI bus number */
+ if (MSB(rid))
+ icb->firmware_options_2 |=
+ __constant_cpu_to_le32(BIT_19);
+ /* Use alternate PCI devfn */
+ if (LSB(rid))
+ icb->firmware_options_2 |=
+ __constant_cpu_to_le32(BIT_18);
+
+ /* Use Disable MSIX Handshake mode for capable adapters */
+ if ((ha->fw_attributes & BIT_6) && (IS_MSIX_NACK_CAPABLE(ha)) &&
+ (ha->flags.msix_enabled)) {
+ icb->firmware_options_2 &=
+ __constant_cpu_to_le32(~BIT_22);
+ ha->flags.disable_msix_handshake = 1;
+ ql_dbg(ql_dbg_init, vha, 0x00fe,
+ "MSIX Handshake Disable Mode turned on.\n");
+ } else {
+ icb->firmware_options_2 |=
+ __constant_cpu_to_le32(BIT_22);
+ }
+ icb->firmware_options_2 |= __constant_cpu_to_le32(BIT_23);
+
+ WRT_REG_DWORD(&reg->isp25mq.req_q_in, 0);
+ WRT_REG_DWORD(&reg->isp25mq.req_q_out, 0);
+ WRT_REG_DWORD(&reg->isp25mq.rsp_q_in, 0);
+ WRT_REG_DWORD(&reg->isp25mq.rsp_q_out, 0);
+ } else {
+ WRT_REG_DWORD(&reg->isp24.req_q_in, 0);
+ WRT_REG_DWORD(&reg->isp24.req_q_out, 0);
+ WRT_REG_DWORD(&reg->isp24.rsp_q_in, 0);
+ WRT_REG_DWORD(&reg->isp24.rsp_q_out, 0);
+ }
+ qlt_24xx_config_rings(vha);
+
+ /* PCI posting */
+ RD_REG_DWORD(&ioreg->hccr);
+}
+
+/**
+ * qla2x00_init_rings() - Initializes firmware.
+ * @ha: HA context
+ *
+ * Beginning of request ring has initialization control block already built
+ * by nvram config routine.
+ *
+ * Returns 0 on success.
+ */
+int
+qla2x00_init_rings(scsi_qla_host_t *vha)
+{
+ int rval;
+ unsigned long flags = 0;
+ int cnt, que;
+ struct qla_hw_data *ha = vha->hw;
+ struct req_que *req;
+ struct rsp_que *rsp;
+ struct mid_init_cb_24xx *mid_init_cb =
+ (struct mid_init_cb_24xx *) ha->init_cb;
+
+ spin_lock_irqsave(&ha->hardware_lock, flags);
+
+ /* Clear outstanding commands array. */
+ for (que = 0; que < ha->max_req_queues; que++) {
+ req = ha->req_q_map[que];
+ if (!req)
+ continue;
+ req->out_ptr = (void *)(req->ring + req->length);
+ *req->out_ptr = 0;
+ for (cnt = 1; cnt < req->num_outstanding_cmds; cnt++)
+ req->outstanding_cmds[cnt] = NULL;
+
+ req->current_outstanding_cmd = 1;
+
+ /* Initialize firmware. */
+ req->ring_ptr = req->ring;
+ req->ring_index = 0;
+ req->cnt = req->length;
+ }
+
+ for (que = 0; que < ha->max_rsp_queues; que++) {
+ rsp = ha->rsp_q_map[que];
+ if (!rsp)
+ continue;
+ rsp->in_ptr = (void *)(rsp->ring + rsp->length);
+ *rsp->in_ptr = 0;
+ /* Initialize response queue entries */
+ if (IS_QLAFX00(ha))
+ qlafx00_init_response_q_entries(rsp);
+ else
+ qla2x00_init_response_q_entries(rsp);
+ }
+
+ ha->tgt.atio_ring_ptr = ha->tgt.atio_ring;
+ ha->tgt.atio_ring_index = 0;
+ /* Initialize ATIO queue entries */
+ qlt_init_atio_q_entries(vha);
+
+ ha->isp_ops->config_rings(vha);
+
+ spin_unlock_irqrestore(&ha->hardware_lock, flags);
+
+ ql_dbg(ql_dbg_init, vha, 0x00d1, "Issue init firmware.\n");
+
+ if (IS_QLAFX00(ha)) {
+ rval = qlafx00_init_firmware(vha, ha->init_cb_size);
+ goto next_check;
+ }
+
+ /* Update any ISP specific firmware options before initialization. */
+ ha->isp_ops->update_fw_options(vha);
+
+ if (ha->flags.npiv_supported) {
+ if (ha->operating_mode == LOOP && !IS_CNA_CAPABLE(ha))
+ ha->max_npiv_vports = MIN_MULTI_ID_FABRIC - 1;
+ mid_init_cb->count = cpu_to_le16(ha->max_npiv_vports);
+ }
+
+ if (IS_FWI2_CAPABLE(ha)) {
+ mid_init_cb->options = __constant_cpu_to_le16(BIT_1);
+ mid_init_cb->init_cb.execution_throttle =
+ cpu_to_le16(ha->fw_xcb_count);
+ /* D-Port Status */
+ if (IS_DPORT_CAPABLE(ha))
+ mid_init_cb->init_cb.firmware_options_1 |=
+ cpu_to_le16(BIT_7);
+ /* Enable FA-WWPN */
+ ha->flags.fawwpn_enabled =
+ (mid_init_cb->init_cb.firmware_options_1 & BIT_6) ? 1 : 0;
+ ql_dbg(ql_dbg_init, vha, 0x0141, "FA-WWPN Support: %s.\n",
+ (ha->flags.fawwpn_enabled) ? "enabled" : "disabled");
+ }
+
+ rval = qla2x00_init_firmware(vha, ha->init_cb_size);
+next_check:
+ if (rval) {
+ ql_log(ql_log_fatal, vha, 0x00d2,
+ "Init Firmware **** FAILED ****.\n");
+ } else {
+ ql_dbg(ql_dbg_init, vha, 0x00d3,
+ "Init Firmware -- success.\n");
+ }
+
+ return (rval);
+}
+
+/**
+ * qla2x00_fw_ready() - Waits for firmware ready.
+ * @ha: HA context
+ *
+ * Returns 0 on success.
+ */
+static int
+qla2x00_fw_ready(scsi_qla_host_t *vha)
+{
+ int rval;
+ unsigned long wtime, mtime, cs84xx_time;
+ uint16_t min_wait; /* Minimum wait time if loop is down */
+ uint16_t wait_time; /* Wait time if loop is coming ready */
+ uint16_t state[6];
+ struct qla_hw_data *ha = vha->hw;
+
+ if (IS_QLAFX00(vha->hw))
+ return qlafx00_fw_ready(vha);
+
+ rval = QLA_SUCCESS;
+
+ /* Time to wait for loop down */
+ if (IS_P3P_TYPE(ha))
+ min_wait = 30;
+ else
+ min_wait = 20;
+
+ /*
+ * Firmware should take at most one RATOV to login, plus 5 seconds for
+ * our own processing.
+ */
+ if ((wait_time = (ha->retry_count*ha->login_timeout) + 5) < min_wait) {
+ wait_time = min_wait;
+ }
+
+ /* Min wait time if loop down */
+ mtime = jiffies + (min_wait * HZ);
+
+ /* wait time before firmware ready */
+ wtime = jiffies + (wait_time * HZ);
+
+ /* Wait for ISP to finish LIP */
+ if (!vha->flags.init_done)
+ ql_log(ql_log_info, vha, 0x801e,
+ "Waiting for LIP to complete.\n");
+
+ do {
+ memset(state, -1, sizeof(state));
+ rval = qla2x00_get_firmware_state(vha, state);
+ if (rval == QLA_SUCCESS) {
+ if (state[0] < FSTATE_LOSS_OF_SYNC) {
+ vha->device_flags &= ~DFLG_NO_CABLE;
+ }
+ if (IS_QLA84XX(ha) && state[0] != FSTATE_READY) {
+ ql_dbg(ql_dbg_taskm, vha, 0x801f,
+ "fw_state=%x 84xx=%x.\n", state[0],
+ state[2]);
+ if ((state[2] & FSTATE_LOGGED_IN) &&
+ (state[2] & FSTATE_WAITING_FOR_VERIFY)) {
+ ql_dbg(ql_dbg_taskm, vha, 0x8028,
+ "Sending verify iocb.\n");
+
+ cs84xx_time = jiffies;
+ rval = qla84xx_init_chip(vha);
+ if (rval != QLA_SUCCESS) {
+ ql_log(ql_log_warn,
+ vha, 0x8007,
+ "Init chip failed.\n");
+ break;
+ }
+
+ /* Add time taken to initialize. */
+ cs84xx_time = jiffies - cs84xx_time;
+ wtime += cs84xx_time;
+ mtime += cs84xx_time;
+ ql_dbg(ql_dbg_taskm, vha, 0x8008,
+ "Increasing wait time by %ld. "
+ "New time %ld.\n", cs84xx_time,
+ wtime);
+ }
+ } else if (state[0] == FSTATE_READY) {
+ ql_dbg(ql_dbg_taskm, vha, 0x8037,
+ "F/W Ready - OK.\n");
+
+ qla2x00_get_retry_cnt(vha, &ha->retry_count,
+ &ha->login_timeout, &ha->r_a_tov);
+
+ rval = QLA_SUCCESS;
+ break;
+ }
+
+ rval = QLA_FUNCTION_FAILED;
+
+ if (atomic_read(&vha->loop_down_timer) &&
+ state[0] != FSTATE_READY) {
+ /* Loop down. Timeout on min_wait for states
+ * other than Wait for Login.
+ */
+ if (time_after_eq(jiffies, mtime)) {
+ ql_log(ql_log_info, vha, 0x8038,
+ "Cable is unplugged...\n");
+
+ vha->device_flags |= DFLG_NO_CABLE;
+ break;
+ }
+ }
+ } else {
+ /* Mailbox cmd failed. Timeout on min_wait. */
+ if (time_after_eq(jiffies, mtime) ||
+ ha->flags.isp82xx_fw_hung)
+ break;
+ }
+
+ if (time_after_eq(jiffies, wtime))
+ break;
+
+ /* Delay for a while */
+ msleep(500);
+ } while (1);
+
+ ql_dbg(ql_dbg_taskm, vha, 0x803a,
+ "fw_state=%x (%x, %x, %x, %x %x) curr time=%lx.\n", state[0],
+ state[1], state[2], state[3], state[4], state[5], jiffies);
+
+ if (rval && !(vha->device_flags & DFLG_NO_CABLE)) {
+ ql_log(ql_log_warn, vha, 0x803b,
+ "Firmware ready **** FAILED ****.\n");
+ }
+
+ return (rval);
+}
+
+/*
+* qla2x00_configure_hba
+* Setup adapter context.
+*
+* Input:
+* ha = adapter state pointer.
+*
+* Returns:
+* 0 = success
+*
+* Context:
+* Kernel context.
+*/
+static int
+qla2x00_configure_hba(scsi_qla_host_t *vha)
+{
+ int rval;
+ uint16_t loop_id;
+ uint16_t topo;
+ uint16_t sw_cap;
+ uint8_t al_pa;
+ uint8_t area;
+ uint8_t domain;
+ char connect_type[22];
+ struct qla_hw_data *ha = vha->hw;
+ unsigned long flags;
+ scsi_qla_host_t *base_vha = pci_get_drvdata(ha->pdev);
+
+ /* Get host addresses. */
+ rval = qla2x00_get_adapter_id(vha,
+ &loop_id, &al_pa, &area, &domain, &topo, &sw_cap);
+ if (rval != QLA_SUCCESS) {
+ if (LOOP_TRANSITION(vha) || atomic_read(&ha->loop_down_timer) ||
+ IS_CNA_CAPABLE(ha) ||
+ (rval == QLA_COMMAND_ERROR && loop_id == 0x7)) {
+ ql_dbg(ql_dbg_disc, vha, 0x2008,
+ "Loop is in a transition state.\n");
+ } else {
+ ql_log(ql_log_warn, vha, 0x2009,
+ "Unable to get host loop ID.\n");
+ if (IS_FWI2_CAPABLE(ha) && (vha == base_vha) &&
+ (rval == QLA_COMMAND_ERROR && loop_id == 0x1b)) {
+ ql_log(ql_log_warn, vha, 0x1151,
+ "Doing link init.\n");
+ if (qla24xx_link_initialize(vha) == QLA_SUCCESS)
+ return rval;
+ }
+ set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
+ }
+ return (rval);
+ }
+
+ if (topo == 4) {
+ ql_log(ql_log_info, vha, 0x200a,
+ "Cannot get topology - retrying.\n");
+ return (QLA_FUNCTION_FAILED);
+ }
+
+ vha->loop_id = loop_id;
+
+ /* initialize */
+ ha->min_external_loopid = SNS_FIRST_LOOP_ID;
+ ha->operating_mode = LOOP;
+ ha->switch_cap = 0;
+
+ switch (topo) {
+ case 0:
+ ql_dbg(ql_dbg_disc, vha, 0x200b, "HBA in NL topology.\n");
+ ha->current_topology = ISP_CFG_NL;
+ strcpy(connect_type, "(Loop)");
+ break;
+
+ case 1:
+ ql_dbg(ql_dbg_disc, vha, 0x200c, "HBA in FL topology.\n");
+ ha->switch_cap = sw_cap;
+ ha->current_topology = ISP_CFG_FL;
+ strcpy(connect_type, "(FL_Port)");
+ break;
+
+ case 2:
+ ql_dbg(ql_dbg_disc, vha, 0x200d, "HBA in N P2P topology.\n");
+ ha->operating_mode = P2P;
+ ha->current_topology = ISP_CFG_N;
+ strcpy(connect_type, "(N_Port-to-N_Port)");
+ break;
+
+ case 3:
+ ql_dbg(ql_dbg_disc, vha, 0x200e, "HBA in F P2P topology.\n");
+ ha->switch_cap = sw_cap;
+ ha->operating_mode = P2P;
+ ha->current_topology = ISP_CFG_F;
+ strcpy(connect_type, "(F_Port)");
+ break;
+
+ default:
+ ql_dbg(ql_dbg_disc, vha, 0x200f,
+ "HBA in unknown topology %x, using NL.\n", topo);
+ ha->current_topology = ISP_CFG_NL;
+ strcpy(connect_type, "(Loop)");
+ break;
+ }
+
+ /* Save Host port and loop ID. */
+ /* byte order - Big Endian */
+ vha->d_id.b.domain = domain;
+ vha->d_id.b.area = area;
+ vha->d_id.b.al_pa = al_pa;
+
+ spin_lock_irqsave(&ha->vport_slock, flags);
+ qlt_update_vp_map(vha, SET_AL_PA);
+ spin_unlock_irqrestore(&ha->vport_slock, flags);
+
+ if (!vha->flags.init_done)
+ ql_log(ql_log_info, vha, 0x2010,
+ "Topology - %s, Host Loop address 0x%x.\n",
+ connect_type, vha->loop_id);
+
+ return(rval);
+}
+
+inline void
+qla2x00_set_model_info(scsi_qla_host_t *vha, uint8_t *model, size_t len,
+ char *def)
+{
+ char *st, *en;
+ uint16_t index;
+ struct qla_hw_data *ha = vha->hw;
+ int use_tbl = !IS_QLA24XX_TYPE(ha) && !IS_QLA25XX(ha) &&
+ !IS_CNA_CAPABLE(ha) && !IS_QLA2031(ha);
+
+ if (memcmp(model, BINZERO, len) != 0) {
+ strncpy(ha->model_number, model, len);
+ st = en = ha->model_number;
+ en += len - 1;
+ while (en > st) {
+ if (*en != 0x20 && *en != 0x00)
+ break;
+ *en-- = '\0';
+ }
+
+ index = (ha->pdev->subsystem_device & 0xff);
+ if (use_tbl &&
+ ha->pdev->subsystem_vendor == PCI_VENDOR_ID_QLOGIC &&
+ index < QLA_MODEL_NAMES)
+ strncpy(ha->model_desc,
+ qla2x00_model_name[index * 2 + 1],
+ sizeof(ha->model_desc) - 1);
+ } else {
+ index = (ha->pdev->subsystem_device & 0xff);
+ if (use_tbl &&
+ ha->pdev->subsystem_vendor == PCI_VENDOR_ID_QLOGIC &&
+ index < QLA_MODEL_NAMES) {
+ strcpy(ha->model_number,
+ qla2x00_model_name[index * 2]);
+ strncpy(ha->model_desc,
+ qla2x00_model_name[index * 2 + 1],
+ sizeof(ha->model_desc) - 1);
+ } else {
+ strcpy(ha->model_number, def);
+ }
+ }
+ if (IS_FWI2_CAPABLE(ha))
+ qla2xxx_get_vpd_field(vha, "\x82", ha->model_desc,
+ sizeof(ha->model_desc));
+}
+
+/* On sparc systems, obtain port and node WWN from firmware
+ * properties.
+ */
+static void qla2xxx_nvram_wwn_from_ofw(scsi_qla_host_t *vha, nvram_t *nv)
+{
+#ifdef CONFIG_SPARC
+ struct qla_hw_data *ha = vha->hw;
+ struct pci_dev *pdev = ha->pdev;
+ struct device_node *dp = pci_device_to_OF_node(pdev);
+ const u8 *val;
+ int len;
+
+ val = of_get_property(dp, "port-wwn", &len);
+ if (val && len >= WWN_SIZE)
+ memcpy(nv->port_name, val, WWN_SIZE);
+
+ val = of_get_property(dp, "node-wwn", &len);
+ if (val && len >= WWN_SIZE)
+ memcpy(nv->node_name, val, WWN_SIZE);
+#endif
+}
+
+/*
+* NVRAM configuration for ISP 2xxx
+*
+* Input:
+* ha = adapter block pointer.
+*
+* Output:
+* initialization control block in response_ring
+* host adapters parameters in host adapter block
+*
+* Returns:
+* 0 = success.
+*/
+int
+qla2x00_nvram_config(scsi_qla_host_t *vha)
+{
+ int rval;
+ uint8_t chksum = 0;
+ uint16_t cnt;
+ uint8_t *dptr1, *dptr2;
+ struct qla_hw_data *ha = vha->hw;
+ init_cb_t *icb = ha->init_cb;
+ nvram_t *nv = ha->nvram;
+ uint8_t *ptr = ha->nvram;
+ struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
+
+ rval = QLA_SUCCESS;
+
+ /* Determine NVRAM starting address. */
+ ha->nvram_size = sizeof(nvram_t);
+ ha->nvram_base = 0;
+ if (!IS_QLA2100(ha) && !IS_QLA2200(ha) && !IS_QLA2300(ha))
+ if ((RD_REG_WORD(&reg->ctrl_status) >> 14) == 1)
+ ha->nvram_base = 0x80;
+
+ /* Get NVRAM data and calculate checksum. */
+ ha->isp_ops->read_nvram(vha, ptr, ha->nvram_base, ha->nvram_size);
+ for (cnt = 0, chksum = 0; cnt < ha->nvram_size; cnt++)
+ chksum += *ptr++;
+
+ ql_dbg(ql_dbg_init + ql_dbg_buffer, vha, 0x010f,
+ "Contents of NVRAM.\n");
+ ql_dump_buffer(ql_dbg_init + ql_dbg_buffer, vha, 0x0110,
+ (uint8_t *)nv, ha->nvram_size);
+
+ /* Bad NVRAM data, set defaults parameters. */
+ if (chksum || nv->id[0] != 'I' || nv->id[1] != 'S' ||
+ nv->id[2] != 'P' || nv->id[3] != ' ' || nv->nvram_version < 1) {
+ /* Reset NVRAM data. */
+ ql_log(ql_log_warn, vha, 0x0064,
+ "Inconsistent NVRAM "
+ "detected: checksum=0x%x id=%c version=0x%x.\n",
+ chksum, nv->id[0], nv->nvram_version);
+ ql_log(ql_log_warn, vha, 0x0065,
+ "Falling back to "
+ "functioning (yet invalid -- WWPN) defaults.\n");
+
+ /*
+ * Set default initialization control block.
+ */
+ memset(nv, 0, ha->nvram_size);
+ nv->parameter_block_version = ICB_VERSION;
+
+ if (IS_QLA23XX(ha)) {
+ nv->firmware_options[0] = BIT_2 | BIT_1;
+ nv->firmware_options[1] = BIT_7 | BIT_5;
+ nv->add_firmware_options[0] = BIT_5;
+ nv->add_firmware_options[1] = BIT_5 | BIT_4;
+ nv->frame_payload_size = 2048;
+ nv->special_options[1] = BIT_7;
+ } else if (IS_QLA2200(ha)) {
+ nv->firmware_options[0] = BIT_2 | BIT_1;
+ nv->firmware_options[1] = BIT_7 | BIT_5;
+ nv->add_firmware_options[0] = BIT_5;
+ nv->add_firmware_options[1] = BIT_5 | BIT_4;
+ nv->frame_payload_size = 1024;
+ } else if (IS_QLA2100(ha)) {
+ nv->firmware_options[0] = BIT_3 | BIT_1;
+ nv->firmware_options[1] = BIT_5;
+ nv->frame_payload_size = 1024;
+ }
+
+ nv->max_iocb_allocation = __constant_cpu_to_le16(256);
+ nv->execution_throttle = __constant_cpu_to_le16(16);
+ nv->retry_count = 8;
+ nv->retry_delay = 1;
+
+ nv->port_name[0] = 33;
+ nv->port_name[3] = 224;
+ nv->port_name[4] = 139;
+
+ qla2xxx_nvram_wwn_from_ofw(vha, nv);
+
+ nv->login_timeout = 4;
+
+ /*
+ * Set default host adapter parameters
+ */
+ nv->host_p[1] = BIT_2;
+ nv->reset_delay = 5;
+ nv->port_down_retry_count = 8;
+ nv->max_luns_per_target = __constant_cpu_to_le16(8);
+ nv->link_down_timeout = 60;
+
+ rval = 1;
+ }
+
+#if defined(CONFIG_IA64_GENERIC) || defined(CONFIG_IA64_SGI_SN2)
+ /*
+ * The SN2 does not provide BIOS emulation which means you can't change
+ * potentially bogus BIOS settings. Force the use of default settings
+ * for link rate and frame size. Hope that the rest of the settings
+ * are valid.
+ */
+ if (ia64_platform_is("sn2")) {
+ nv->frame_payload_size = 2048;
+ if (IS_QLA23XX(ha))
+ nv->special_options[1] = BIT_7;
+ }
+#endif
+
+ /* Reset Initialization control block */
+ memset(icb, 0, ha->init_cb_size);
+
+ /*
+ * Setup driver NVRAM options.
+ */
+ nv->firmware_options[0] |= (BIT_6 | BIT_1);
+ nv->firmware_options[0] &= ~(BIT_5 | BIT_4);
+ nv->firmware_options[1] |= (BIT_5 | BIT_0);
+ nv->firmware_options[1] &= ~BIT_4;
+
+ if (IS_QLA23XX(ha)) {
+ nv->firmware_options[0] |= BIT_2;
+ nv->firmware_options[0] &= ~BIT_3;
+ nv->special_options[0] &= ~BIT_6;
+ nv->add_firmware_options[1] |= BIT_5 | BIT_4;
+
+ if (IS_QLA2300(ha)) {
+ if (ha->fb_rev == FPM_2310) {
+ strcpy(ha->model_number, "QLA2310");
+ } else {
+ strcpy(ha->model_number, "QLA2300");
+ }
+ } else {
+ qla2x00_set_model_info(vha, nv->model_number,
+ sizeof(nv->model_number), "QLA23xx");
+ }
+ } else if (IS_QLA2200(ha)) {
+ nv->firmware_options[0] |= BIT_2;
+ /*
+ * 'Point-to-point preferred, else loop' is not a safe
+ * connection mode setting.
+ */
+ if ((nv->add_firmware_options[0] & (BIT_6 | BIT_5 | BIT_4)) ==
+ (BIT_5 | BIT_4)) {
+ /* Force 'loop preferred, else point-to-point'. */
+ nv->add_firmware_options[0] &= ~(BIT_6 | BIT_5 | BIT_4);
+ nv->add_firmware_options[0] |= BIT_5;
+ }
+ strcpy(ha->model_number, "QLA22xx");
+ } else /*if (IS_QLA2100(ha))*/ {
+ strcpy(ha->model_number, "QLA2100");
+ }
+
+ /*
+ * Copy over NVRAM RISC parameter block to initialization control block.
+ */
+ dptr1 = (uint8_t *)icb;
+ dptr2 = (uint8_t *)&nv->parameter_block_version;
+ cnt = (uint8_t *)&icb->request_q_outpointer - (uint8_t *)&icb->version;
+ while (cnt--)
+ *dptr1++ = *dptr2++;
+
+ /* Copy 2nd half. */
+ dptr1 = (uint8_t *)icb->add_firmware_options;
+ cnt = (uint8_t *)icb->reserved_3 - (uint8_t *)icb->add_firmware_options;
+ while (cnt--)
+ *dptr1++ = *dptr2++;
+
+ /* Use alternate WWN? */
+ if (nv->host_p[1] & BIT_7) {
+ memcpy(icb->node_name, nv->alternate_node_name, WWN_SIZE);
+ memcpy(icb->port_name, nv->alternate_port_name, WWN_SIZE);
+ }
+
+ /* Prepare nodename */
+ if ((icb->firmware_options[1] & BIT_6) == 0) {
+ /*
+ * Firmware will apply the following mask if the nodename was
+ * not provided.
+ */
+ memcpy(icb->node_name, icb->port_name, WWN_SIZE);
+ icb->node_name[0] &= 0xF0;
+ }
+
+ /*
+ * Set host adapter parameters.
+ */
+
+ /*
+ * BIT_7 in the host-parameters section allows for modification to
+ * internal driver logging.
+ */
+ if (nv->host_p[0] & BIT_7)
+ ql2xextended_error_logging = QL_DBG_DEFAULT1_MASK;
+ ha->flags.disable_risc_code_load = ((nv->host_p[0] & BIT_4) ? 1 : 0);
+ /* Always load RISC code on non ISP2[12]00 chips. */
+ if (!IS_QLA2100(ha) && !IS_QLA2200(ha))
+ ha->flags.disable_risc_code_load = 0;
+ ha->flags.enable_lip_reset = ((nv->host_p[1] & BIT_1) ? 1 : 0);
+ ha->flags.enable_lip_full_login = ((nv->host_p[1] & BIT_2) ? 1 : 0);
+ ha->flags.enable_target_reset = ((nv->host_p[1] & BIT_3) ? 1 : 0);
+ ha->flags.enable_led_scheme = (nv->special_options[1] & BIT_4) ? 1 : 0;
+ ha->flags.disable_serdes = 0;
+
+ ha->operating_mode =
+ (icb->add_firmware_options[0] & (BIT_6 | BIT_5 | BIT_4)) >> 4;
+
+ memcpy(ha->fw_seriallink_options, nv->seriallink_options,
+ sizeof(ha->fw_seriallink_options));
+
+ /* save HBA serial number */
+ ha->serial0 = icb->port_name[5];
+ ha->serial1 = icb->port_name[6];
+ ha->serial2 = icb->port_name[7];
+ memcpy(vha->node_name, icb->node_name, WWN_SIZE);
+ memcpy(vha->port_name, icb->port_name, WWN_SIZE);
+
+ icb->execution_throttle = __constant_cpu_to_le16(0xFFFF);
+
+ ha->retry_count = nv->retry_count;
+
+ /* Set minimum login_timeout to 4 seconds. */
+ if (nv->login_timeout != ql2xlogintimeout)
+ nv->login_timeout = ql2xlogintimeout;
+ if (nv->login_timeout < 4)
+ nv->login_timeout = 4;
+ ha->login_timeout = nv->login_timeout;
+ icb->login_timeout = nv->login_timeout;
+
+ /* Set minimum RATOV to 100 tenths of a second. */
+ ha->r_a_tov = 100;
+
+ ha->loop_reset_delay = nv->reset_delay;
+
+ /* Link Down Timeout = 0:
+ *
+ * When Port Down timer expires we will start returning
+ * I/O's to OS with "DID_NO_CONNECT".
+ *
+ * Link Down Timeout != 0:
+ *
+ * The driver waits for the link to come up after link down
+ * before returning I/Os to OS with "DID_NO_CONNECT".
+ */
+ if (nv->link_down_timeout == 0) {
+ ha->loop_down_abort_time =
+ (LOOP_DOWN_TIME - LOOP_DOWN_TIMEOUT);
+ } else {
+ ha->link_down_timeout = nv->link_down_timeout;
+ ha->loop_down_abort_time =
+ (LOOP_DOWN_TIME - ha->link_down_timeout);
+ }
+
+ /*
+ * Need enough time to try and get the port back.
+ */
+ ha->port_down_retry_count = nv->port_down_retry_count;
+ if (qlport_down_retry)
+ ha->port_down_retry_count = qlport_down_retry;
+ /* Set login_retry_count */
+ ha->login_retry_count = nv->retry_count;
+ if (ha->port_down_retry_count == nv->port_down_retry_count &&
+ ha->port_down_retry_count > 3)
+ ha->login_retry_count = ha->port_down_retry_count;
+ else if (ha->port_down_retry_count > (int)ha->login_retry_count)
+ ha->login_retry_count = ha->port_down_retry_count;
+ if (ql2xloginretrycount)
+ ha->login_retry_count = ql2xloginretrycount;
+
+ icb->lun_enables = __constant_cpu_to_le16(0);
+ icb->command_resource_count = 0;
+ icb->immediate_notify_resource_count = 0;
+ icb->timeout = __constant_cpu_to_le16(0);
+
+ if (IS_QLA2100(ha) || IS_QLA2200(ha)) {
+ /* Enable RIO */
+ icb->firmware_options[0] &= ~BIT_3;
+ icb->add_firmware_options[0] &=
+ ~(BIT_3 | BIT_2 | BIT_1 | BIT_0);
+ icb->add_firmware_options[0] |= BIT_2;
+ icb->response_accumulation_timer = 3;
+ icb->interrupt_delay_timer = 5;
+
+ vha->flags.process_response_queue = 1;
+ } else {
+ /* Enable ZIO. */
+ if (!vha->flags.init_done) {
+ ha->zio_mode = icb->add_firmware_options[0] &
+ (BIT_3 | BIT_2 | BIT_1 | BIT_0);
+ ha->zio_timer = icb->interrupt_delay_timer ?
+ icb->interrupt_delay_timer: 2;
+ }
+ icb->add_firmware_options[0] &=
+ ~(BIT_3 | BIT_2 | BIT_1 | BIT_0);
+ vha->flags.process_response_queue = 0;
+ if (ha->zio_mode != QLA_ZIO_DISABLED) {
+ ha->zio_mode = QLA_ZIO_MODE_6;
+
+ ql_log(ql_log_info, vha, 0x0068,
+ "ZIO mode %d enabled; timer delay (%d us).\n",
+ ha->zio_mode, ha->zio_timer * 100);
+
+ icb->add_firmware_options[0] |= (uint8_t)ha->zio_mode;
+ icb->interrupt_delay_timer = (uint8_t)ha->zio_timer;
+ vha->flags.process_response_queue = 1;
+ }
+ }
+
+ if (rval) {
+ ql_log(ql_log_warn, vha, 0x0069,
+ "NVRAM configuration failed.\n");
+ }
+ return (rval);
+}
+
+static void
+qla2x00_rport_del(void *data)
+{
+ fc_port_t *fcport = data;
+ struct fc_rport *rport;
+ scsi_qla_host_t *vha = fcport->vha;
+ unsigned long flags;
+
+ spin_lock_irqsave(fcport->vha->host->host_lock, flags);
+ rport = fcport->drport ? fcport->drport: fcport->rport;
+ fcport->drport = NULL;
+ spin_unlock_irqrestore(fcport->vha->host->host_lock, flags);
+ if (rport) {
+ fc_remote_port_delete(rport);
+ /*
+ * Release the target mode FC NEXUS in qla_target.c code
+ * if target mod is enabled.
+ */
+ qlt_fc_port_deleted(vha, fcport);
+ }
+}
+
+/**
+ * qla2x00_alloc_fcport() - Allocate a generic fcport.
+ * @ha: HA context
+ * @flags: allocation flags
+ *
+ * Returns a pointer to the allocated fcport, or NULL, if none available.
+ */
+fc_port_t *
+qla2x00_alloc_fcport(scsi_qla_host_t *vha, gfp_t flags)
+{
+ fc_port_t *fcport;
+
+ fcport = kzalloc(sizeof(fc_port_t), flags);
+ if (!fcport)
+ return NULL;
+
+ /* Setup fcport template structure. */
+ fcport->vha = vha;
+ fcport->port_type = FCT_UNKNOWN;
+ fcport->loop_id = FC_NO_LOOP_ID;
+ qla2x00_set_fcport_state(fcport, FCS_UNCONFIGURED);
+ fcport->supported_classes = FC_COS_UNSPECIFIED;
+
+ return fcport;
+}
+
+/*
+ * qla2x00_configure_loop
+ * Updates Fibre Channel Device Database with what is actually on loop.
+ *
+ * Input:
+ * ha = adapter block pointer.
+ *
+ * Returns:
+ * 0 = success.
+ * 1 = error.
+ * 2 = database was full and device was not configured.
+ */
+static int
+qla2x00_configure_loop(scsi_qla_host_t *vha)
+{
+ int rval;
+ unsigned long flags, save_flags;
+ struct qla_hw_data *ha = vha->hw;
+ rval = QLA_SUCCESS;
+
+ /* Get Initiator ID */
+ if (test_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags)) {
+ rval = qla2x00_configure_hba(vha);
+ if (rval != QLA_SUCCESS) {
+ ql_dbg(ql_dbg_disc, vha, 0x2013,
+ "Unable to configure HBA.\n");
+ return (rval);
+ }
+ }
+
+ save_flags = flags = vha->dpc_flags;
+ ql_dbg(ql_dbg_disc, vha, 0x2014,
+ "Configure loop -- dpc flags = 0x%lx.\n", flags);
+
+ /*
+ * If we have both an RSCN and PORT UPDATE pending then handle them
+ * both at the same time.
+ */
+ clear_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags);
+ clear_bit(RSCN_UPDATE, &vha->dpc_flags);
+
+ qla2x00_get_data_rate(vha);
+
+ /* Determine what we need to do */
+ if (ha->current_topology == ISP_CFG_FL &&
+ (test_bit(LOCAL_LOOP_UPDATE, &flags))) {
+
+ set_bit(RSCN_UPDATE, &flags);
+
+ } else if (ha->current_topology == ISP_CFG_F &&
+ (test_bit(LOCAL_LOOP_UPDATE, &flags))) {
+
+ set_bit(RSCN_UPDATE, &flags);
+ clear_bit(LOCAL_LOOP_UPDATE, &flags);
+
+ } else if (ha->current_topology == ISP_CFG_N) {
+ clear_bit(RSCN_UPDATE, &flags);
+
+ } else if (!vha->flags.online ||
+ (test_bit(ABORT_ISP_ACTIVE, &flags))) {
+
+ set_bit(RSCN_UPDATE, &flags);
+ set_bit(LOCAL_LOOP_UPDATE, &flags);
+ }
+
+ if (test_bit(LOCAL_LOOP_UPDATE, &flags)) {
+ if (test_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags)) {
+ ql_dbg(ql_dbg_disc, vha, 0x2015,
+ "Loop resync needed, failing.\n");
+ rval = QLA_FUNCTION_FAILED;
+ } else
+ rval = qla2x00_configure_local_loop(vha);
+ }
+
+ if (rval == QLA_SUCCESS && test_bit(RSCN_UPDATE, &flags)) {
+ if (LOOP_TRANSITION(vha)) {
+ ql_dbg(ql_dbg_disc, vha, 0x201e,
+ "Needs RSCN update and loop transition.\n");
+ rval = QLA_FUNCTION_FAILED;
+ }
+ else
+ rval = qla2x00_configure_fabric(vha);
+ }
+
+ if (rval == QLA_SUCCESS) {
+ if (atomic_read(&vha->loop_down_timer) ||
+ test_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags)) {
+ rval = QLA_FUNCTION_FAILED;
+ } else {
+ atomic_set(&vha->loop_state, LOOP_READY);
+ ql_dbg(ql_dbg_disc, vha, 0x2069,
+ "LOOP READY.\n");
+ }
+ }
+
+ if (rval) {
+ ql_dbg(ql_dbg_disc, vha, 0x206a,
+ "%s *** FAILED ***.\n", __func__);
+ } else {
+ ql_dbg(ql_dbg_disc, vha, 0x206b,
+ "%s: exiting normally.\n", __func__);
+ }
+
+ /* Restore state if a resync event occurred during processing */
+ if (test_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags)) {
+ if (test_bit(LOCAL_LOOP_UPDATE, &save_flags))
+ set_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags);
+ if (test_bit(RSCN_UPDATE, &save_flags)) {
+ set_bit(RSCN_UPDATE, &vha->dpc_flags);
+ }
+ }
+
+ return (rval);
+}
+
+
+
+/*
+ * qla2x00_configure_local_loop
+ * Updates Fibre Channel Device Database with local loop devices.
+ *
+ * Input:
+ * ha = adapter block pointer.
+ *
+ * Returns:
+ * 0 = success.
+ */
+static int
+qla2x00_configure_local_loop(scsi_qla_host_t *vha)
+{
+ int rval, rval2;
+ int found_devs;
+ int found;
+ fc_port_t *fcport, *new_fcport;
+
+ uint16_t index;
+ uint16_t entries;
+ char *id_iter;
+ uint16_t loop_id;
+ uint8_t domain, area, al_pa;
+ struct qla_hw_data *ha = vha->hw;
+
+ found_devs = 0;
+ new_fcport = NULL;
+ entries = MAX_FIBRE_DEVICES_LOOP;
+
+ /* Get list of logged in devices. */
+ memset(ha->gid_list, 0, qla2x00_gid_list_size(ha));
+ rval = qla2x00_get_id_list(vha, ha->gid_list, ha->gid_list_dma,
+ &entries);
+ if (rval != QLA_SUCCESS)
+ goto cleanup_allocation;
+
+ ql_dbg(ql_dbg_disc, vha, 0x2017,
+ "Entries in ID list (%d).\n", entries);
+ ql_dump_buffer(ql_dbg_disc + ql_dbg_buffer, vha, 0x2075,
+ (uint8_t *)ha->gid_list,
+ entries * sizeof(struct gid_list_info));
+
+ /* Allocate temporary fcport for any new fcports discovered. */
+ new_fcport = qla2x00_alloc_fcport(vha, GFP_KERNEL);
+ if (new_fcport == NULL) {
+ ql_log(ql_log_warn, vha, 0x2018,
+ "Memory allocation failed for fcport.\n");
+ rval = QLA_MEMORY_ALLOC_FAILED;
+ goto cleanup_allocation;
+ }
+ new_fcport->flags &= ~FCF_FABRIC_DEVICE;
+
+ /*
+ * Mark local devices that were present with FCF_DEVICE_LOST for now.
+ */
+ list_for_each_entry(fcport, &vha->vp_fcports, list) {
+ if (atomic_read(&fcport->state) == FCS_ONLINE &&
+ fcport->port_type != FCT_BROADCAST &&
+ (fcport->flags & FCF_FABRIC_DEVICE) == 0) {
+
+ ql_dbg(ql_dbg_disc, vha, 0x2019,
+ "Marking port lost loop_id=0x%04x.\n",
+ fcport->loop_id);
+
+ qla2x00_set_fcport_state(fcport, FCS_DEVICE_LOST);
+ }
+ }
+
+ /* Add devices to port list. */
+ id_iter = (char *)ha->gid_list;
+ for (index = 0; index < entries; index++) {
+ domain = ((struct gid_list_info *)id_iter)->domain;
+ area = ((struct gid_list_info *)id_iter)->area;
+ al_pa = ((struct gid_list_info *)id_iter)->al_pa;
+ if (IS_QLA2100(ha) || IS_QLA2200(ha))
+ loop_id = (uint16_t)
+ ((struct gid_list_info *)id_iter)->loop_id_2100;
+ else
+ loop_id = le16_to_cpu(
+ ((struct gid_list_info *)id_iter)->loop_id);
+ id_iter += ha->gid_list_info_size;
+
+ /* Bypass reserved domain fields. */
+ if ((domain & 0xf0) == 0xf0)
+ continue;
+
+ /* Bypass if not same domain and area of adapter. */
+ if (area && domain &&
+ (area != vha->d_id.b.area || domain != vha->d_id.b.domain))
+ continue;
+
+ /* Bypass invalid local loop ID. */
+ if (loop_id > LAST_LOCAL_LOOP_ID)
+ continue;
+
+ memset(new_fcport, 0, sizeof(fc_port_t));
+
+ /* Fill in member data. */
+ new_fcport->d_id.b.domain = domain;
+ new_fcport->d_id.b.area = area;
+ new_fcport->d_id.b.al_pa = al_pa;
+ new_fcport->loop_id = loop_id;
+ rval2 = qla2x00_get_port_database(vha, new_fcport, 0);
+ if (rval2 != QLA_SUCCESS) {
+ ql_dbg(ql_dbg_disc, vha, 0x201a,
+ "Failed to retrieve fcport information "
+ "-- get_port_database=%x, loop_id=0x%04x.\n",
+ rval2, new_fcport->loop_id);
+ ql_dbg(ql_dbg_disc, vha, 0x201b,
+ "Scheduling resync.\n");
+ set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags);
+ continue;
+ }
+
+ /* Check for matching device in port list. */
+ found = 0;
+ fcport = NULL;
+ list_for_each_entry(fcport, &vha->vp_fcports, list) {
+ if (memcmp(new_fcport->port_name, fcport->port_name,
+ WWN_SIZE))
+ continue;
+
+ fcport->flags &= ~FCF_FABRIC_DEVICE;
+ fcport->loop_id = new_fcport->loop_id;
+ fcport->port_type = new_fcport->port_type;
+ fcport->d_id.b24 = new_fcport->d_id.b24;
+ memcpy(fcport->node_name, new_fcport->node_name,
+ WWN_SIZE);
+
+ found++;
+ break;
+ }
+
+ if (!found) {
+ /* New device, add to fcports list. */
+ list_add_tail(&new_fcport->list, &vha->vp_fcports);
+
+ /* Allocate a new replacement fcport. */
+ fcport = new_fcport;
+ new_fcport = qla2x00_alloc_fcport(vha, GFP_KERNEL);
+ if (new_fcport == NULL) {
+ ql_log(ql_log_warn, vha, 0x201c,
+ "Failed to allocate memory for fcport.\n");
+ rval = QLA_MEMORY_ALLOC_FAILED;
+ goto cleanup_allocation;
+ }
+ new_fcport->flags &= ~FCF_FABRIC_DEVICE;
+ }
+
+ /* Base iIDMA settings on HBA port speed. */
+ fcport->fp_speed = ha->link_data_rate;
+
+ qla2x00_update_fcport(vha, fcport);
+
+ found_devs++;
+ }
+
+cleanup_allocation:
+ kfree(new_fcport);
+
+ if (rval != QLA_SUCCESS) {
+ ql_dbg(ql_dbg_disc, vha, 0x201d,
+ "Configure local loop error exit: rval=%x.\n", rval);
+ }
+
+ return (rval);
+}
+
+static void
+qla2x00_iidma_fcport(scsi_qla_host_t *vha, fc_port_t *fcport)
+{
+ int rval;
+ uint16_t mb[MAILBOX_REGISTER_COUNT];
+ struct qla_hw_data *ha = vha->hw;
+
+ if (!IS_IIDMA_CAPABLE(ha))
+ return;
+
+ if (atomic_read(&fcport->state) != FCS_ONLINE)
+ return;
+
+ if (fcport->fp_speed == PORT_SPEED_UNKNOWN ||
+ fcport->fp_speed > ha->link_data_rate)
+ return;
+
+ rval = qla2x00_set_idma_speed(vha, fcport->loop_id, fcport->fp_speed,
+ mb);
+ if (rval != QLA_SUCCESS) {
+ ql_dbg(ql_dbg_disc, vha, 0x2004,
+ "Unable to adjust iIDMA %8phN -- %04x %x %04x %04x.\n",
+ fcport->port_name, rval, fcport->fp_speed, mb[0], mb[1]);
+ } else {
+ ql_dbg(ql_dbg_disc, vha, 0x2005,
+ "iIDMA adjusted to %s GB/s on %8phN.\n",
+ qla2x00_get_link_speed_str(ha, fcport->fp_speed),
+ fcport->port_name);
+ }
+}
+
+static void
+qla2x00_reg_remote_port(scsi_qla_host_t *vha, fc_port_t *fcport)
+{
+ struct fc_rport_identifiers rport_ids;
+ struct fc_rport *rport;
+ unsigned long flags;
+
+ rport_ids.node_name = wwn_to_u64(fcport->node_name);
+ rport_ids.port_name = wwn_to_u64(fcport->port_name);
+ rport_ids.port_id = fcport->d_id.b.domain << 16 |
+ fcport->d_id.b.area << 8 | fcport->d_id.b.al_pa;
+ rport_ids.roles = FC_RPORT_ROLE_UNKNOWN;
+ fcport->rport = rport = fc_remote_port_add(vha->host, 0, &rport_ids);
+ if (!rport) {
+ ql_log(ql_log_warn, vha, 0x2006,
+ "Unable to allocate fc remote port.\n");
+ return;
+ }
+ /*
+ * Create target mode FC NEXUS in qla_target.c if target mode is
+ * enabled..
+ */
+ qlt_fc_port_added(vha, fcport);
+
+ spin_lock_irqsave(fcport->vha->host->host_lock, flags);
+ *((fc_port_t **)rport->dd_data) = fcport;
+ spin_unlock_irqrestore(fcport->vha->host->host_lock, flags);
+
+ rport->supported_classes = fcport->supported_classes;
+
+ rport_ids.roles = FC_RPORT_ROLE_UNKNOWN;
+ if (fcport->port_type == FCT_INITIATOR)
+ rport_ids.roles |= FC_RPORT_ROLE_FCP_INITIATOR;
+ if (fcport->port_type == FCT_TARGET)
+ rport_ids.roles |= FC_RPORT_ROLE_FCP_TARGET;
+ fc_remote_port_rolechg(rport, rport_ids.roles);
+}
+
+/*
+ * qla2x00_update_fcport
+ * Updates device on list.
+ *
+ * Input:
+ * ha = adapter block pointer.
+ * fcport = port structure pointer.
+ *
+ * Return:
+ * 0 - Success
+ * BIT_0 - error
+ *
+ * Context:
+ * Kernel context.
+ */
+void
+qla2x00_update_fcport(scsi_qla_host_t *vha, fc_port_t *fcport)
+{
+ fcport->vha = vha;
+
+ if (IS_QLAFX00(vha->hw)) {
+ qla2x00_set_fcport_state(fcport, FCS_ONLINE);
+ qla2x00_reg_remote_port(vha, fcport);
+ return;
+ }
+ fcport->login_retry = 0;
+ fcport->flags &= ~(FCF_LOGIN_NEEDED | FCF_ASYNC_SENT);
+
+ qla2x00_set_fcport_state(fcport, FCS_ONLINE);
+ qla2x00_iidma_fcport(vha, fcport);
+ qla24xx_update_fcport_fcp_prio(vha, fcport);
+ qla2x00_reg_remote_port(vha, fcport);
+}
+
+/*
+ * qla2x00_configure_fabric
+ * Setup SNS devices with loop ID's.
+ *
+ * Input:
+ * ha = adapter block pointer.
+ *
+ * Returns:
+ * 0 = success.
+ * BIT_0 = error
+ */
+static int
+qla2x00_configure_fabric(scsi_qla_host_t *vha)
+{
+ int rval;
+ fc_port_t *fcport, *fcptemp;
+ uint16_t next_loopid;
+ uint16_t mb[MAILBOX_REGISTER_COUNT];
+ uint16_t loop_id;
+ LIST_HEAD(new_fcports);
+ struct qla_hw_data *ha = vha->hw;
+ struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev);
+
+ /* If FL port exists, then SNS is present */
+ if (IS_FWI2_CAPABLE(ha))
+ loop_id = NPH_F_PORT;
+ else
+ loop_id = SNS_FL_PORT;
+ rval = qla2x00_get_port_name(vha, loop_id, vha->fabric_node_name, 1);
+ if (rval != QLA_SUCCESS) {
+ ql_dbg(ql_dbg_disc, vha, 0x201f,
+ "MBX_GET_PORT_NAME failed, No FL Port.\n");
+
+ vha->device_flags &= ~SWITCH_FOUND;
+ return (QLA_SUCCESS);
+ }
+ vha->device_flags |= SWITCH_FOUND;
+
+ do {
+ /* FDMI support. */
+ if (ql2xfdmienable &&
+ test_and_clear_bit(REGISTER_FDMI_NEEDED, &vha->dpc_flags))
+ qla2x00_fdmi_register(vha);
+
+ /* Ensure we are logged into the SNS. */
+ if (IS_FWI2_CAPABLE(ha))
+ loop_id = NPH_SNS;
+ else
+ loop_id = SIMPLE_NAME_SERVER;
+ rval = ha->isp_ops->fabric_login(vha, loop_id, 0xff, 0xff,
+ 0xfc, mb, BIT_1|BIT_0);
+ if (rval != QLA_SUCCESS) {
+ set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags);
+ return rval;
+ }
+ if (mb[0] != MBS_COMMAND_COMPLETE) {
+ ql_dbg(ql_dbg_disc, vha, 0x2042,
+ "Failed SNS login: loop_id=%x mb[0]=%x mb[1]=%x mb[2]=%x "
+ "mb[6]=%x mb[7]=%x.\n", loop_id, mb[0], mb[1],
+ mb[2], mb[6], mb[7]);
+ return (QLA_SUCCESS);
+ }
+
+ if (test_and_clear_bit(REGISTER_FC4_NEEDED, &vha->dpc_flags)) {
+ if (qla2x00_rft_id(vha)) {
+ /* EMPTY */
+ ql_dbg(ql_dbg_disc, vha, 0x2045,
+ "Register FC-4 TYPE failed.\n");
+ }
+ if (qla2x00_rff_id(vha)) {
+ /* EMPTY */
+ ql_dbg(ql_dbg_disc, vha, 0x2049,
+ "Register FC-4 Features failed.\n");
+ }
+ if (qla2x00_rnn_id(vha)) {
+ /* EMPTY */
+ ql_dbg(ql_dbg_disc, vha, 0x204f,
+ "Register Node Name failed.\n");
+ } else if (qla2x00_rsnn_nn(vha)) {
+ /* EMPTY */
+ ql_dbg(ql_dbg_disc, vha, 0x2053,
+ "Register Symobilic Node Name failed.\n");
+ }
+ }
+
+#define QLA_FCPORT_SCAN 1
+#define QLA_FCPORT_FOUND 2
+
+ list_for_each_entry(fcport, &vha->vp_fcports, list) {
+ fcport->scan_state = QLA_FCPORT_SCAN;
+ }
+
+ rval = qla2x00_find_all_fabric_devs(vha, &new_fcports);
+ if (rval != QLA_SUCCESS)
+ break;
+
+ /*
+ * Logout all previous fabric devices marked lost, except
+ * FCP2 devices.
+ */
+ list_for_each_entry(fcport, &vha->vp_fcports, list) {
+ if (test_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags))
+ break;
+
+ if ((fcport->flags & FCF_FABRIC_DEVICE) == 0)
+ continue;
+
+ if (fcport->scan_state == QLA_FCPORT_SCAN &&
+ atomic_read(&fcport->state) == FCS_ONLINE) {
+ qla2x00_mark_device_lost(vha, fcport,
+ ql2xplogiabsentdevice, 0);
+ if (fcport->loop_id != FC_NO_LOOP_ID &&
+ (fcport->flags & FCF_FCP2_DEVICE) == 0 &&
+ fcport->port_type != FCT_INITIATOR &&
+ fcport->port_type != FCT_BROADCAST) {
+ ha->isp_ops->fabric_logout(vha,
+ fcport->loop_id,
+ fcport->d_id.b.domain,
+ fcport->d_id.b.area,
+ fcport->d_id.b.al_pa);
+ qla2x00_clear_loop_id(fcport);
+ }
+ }
+ }
+
+ /* Starting free loop ID. */
+ next_loopid = ha->min_external_loopid;
+
+ /*
+ * Scan through our port list and login entries that need to be
+ * logged in.
+ */
+ list_for_each_entry(fcport, &vha->vp_fcports, list) {
+ if (atomic_read(&vha->loop_down_timer) ||
+ test_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags))
+ break;
+
+ if ((fcport->flags & FCF_FABRIC_DEVICE) == 0 ||
+ (fcport->flags & FCF_LOGIN_NEEDED) == 0)
+ continue;
+
+ if (fcport->loop_id == FC_NO_LOOP_ID) {
+ fcport->loop_id = next_loopid;
+ rval = qla2x00_find_new_loop_id(
+ base_vha, fcport);
+ if (rval != QLA_SUCCESS) {
+ /* Ran out of IDs to use */
+ break;
+ }
+ }
+ /* Login and update database */
+ qla2x00_fabric_dev_login(vha, fcport, &next_loopid);
+ }
+
+ /* Exit if out of loop IDs. */
+ if (rval != QLA_SUCCESS) {
+ break;
+ }
+
+ /*
+ * Login and add the new devices to our port list.
+ */
+ list_for_each_entry_safe(fcport, fcptemp, &new_fcports, list) {
+ if (atomic_read(&vha->loop_down_timer) ||
+ test_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags))
+ break;
+
+ /* Find a new loop ID to use. */
+ fcport->loop_id = next_loopid;
+ rval = qla2x00_find_new_loop_id(base_vha, fcport);
+ if (rval != QLA_SUCCESS) {
+ /* Ran out of IDs to use */
+ break;
+ }
+
+ /* Login and update database */
+ qla2x00_fabric_dev_login(vha, fcport, &next_loopid);
+
+ list_move_tail(&fcport->list, &vha->vp_fcports);
+ }
+ } while (0);
+
+ /* Free all new device structures not processed. */
+ list_for_each_entry_safe(fcport, fcptemp, &new_fcports, list) {
+ list_del(&fcport->list);
+ kfree(fcport);
+ }
+
+ if (rval) {
+ ql_dbg(ql_dbg_disc, vha, 0x2068,
+ "Configure fabric error exit rval=%d.\n", rval);
+ }
+
+ return (rval);
+}
+
+/*
+ * qla2x00_find_all_fabric_devs
+ *
+ * Input:
+ * ha = adapter block pointer.
+ * dev = database device entry pointer.
+ *
+ * Returns:
+ * 0 = success.
+ *
+ * Context:
+ * Kernel context.
+ */
+static int
+qla2x00_find_all_fabric_devs(scsi_qla_host_t *vha,
+ struct list_head *new_fcports)
+{
+ int rval;
+ uint16_t loop_id;
+ fc_port_t *fcport, *new_fcport, *fcptemp;
+ int found;
+
+ sw_info_t *swl;
+ int swl_idx;
+ int first_dev, last_dev;
+ port_id_t wrap = {}, nxt_d_id;
+ struct qla_hw_data *ha = vha->hw;
+ struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev);
+
+ rval = QLA_SUCCESS;
+
+ /* Try GID_PT to get device list, else GAN. */
+ if (!ha->swl)
+ ha->swl = kcalloc(ha->max_fibre_devices, sizeof(sw_info_t),
+ GFP_KERNEL);
+ swl = ha->swl;
+ if (!swl) {
+ /*EMPTY*/
+ ql_dbg(ql_dbg_disc, vha, 0x2054,
+ "GID_PT allocations failed, fallback on GA_NXT.\n");
+ } else {
+ memset(swl, 0, ha->max_fibre_devices * sizeof(sw_info_t));
+ if (qla2x00_gid_pt(vha, swl) != QLA_SUCCESS) {
+ swl = NULL;
+ } else if (qla2x00_gpn_id(vha, swl) != QLA_SUCCESS) {
+ swl = NULL;
+ } else if (qla2x00_gnn_id(vha, swl) != QLA_SUCCESS) {
+ swl = NULL;
+ } else if (ql2xiidmaenable &&
+ qla2x00_gfpn_id(vha, swl) == QLA_SUCCESS) {
+ qla2x00_gpsc(vha, swl);
+ }
+
+ /* If other queries succeeded probe for FC-4 type */
+ if (swl)
+ qla2x00_gff_id(vha, swl);
+ }
+ swl_idx = 0;
+
+ /* Allocate temporary fcport for any new fcports discovered. */
+ new_fcport = qla2x00_alloc_fcport(vha, GFP_KERNEL);
+ if (new_fcport == NULL) {
+ ql_log(ql_log_warn, vha, 0x205e,
+ "Failed to allocate memory for fcport.\n");
+ return (QLA_MEMORY_ALLOC_FAILED);
+ }
+ new_fcport->flags |= (FCF_FABRIC_DEVICE | FCF_LOGIN_NEEDED);
+ /* Set start port ID scan at adapter ID. */
+ first_dev = 1;
+ last_dev = 0;
+
+ /* Starting free loop ID. */
+ loop_id = ha->min_external_loopid;
+ for (; loop_id <= ha->max_loop_id; loop_id++) {
+ if (qla2x00_is_reserved_id(vha, loop_id))
+ continue;
+
+ if (ha->current_topology == ISP_CFG_FL &&
+ (atomic_read(&vha->loop_down_timer) ||
+ LOOP_TRANSITION(vha))) {
+ atomic_set(&vha->loop_down_timer, 0);
+ set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags);
+ set_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags);
+ break;
+ }
+
+ if (swl != NULL) {
+ if (last_dev) {
+ wrap.b24 = new_fcport->d_id.b24;
+ } else {
+ new_fcport->d_id.b24 = swl[swl_idx].d_id.b24;
+ memcpy(new_fcport->node_name,
+ swl[swl_idx].node_name, WWN_SIZE);
+ memcpy(new_fcport->port_name,
+ swl[swl_idx].port_name, WWN_SIZE);
+ memcpy(new_fcport->fabric_port_name,
+ swl[swl_idx].fabric_port_name, WWN_SIZE);
+ new_fcport->fp_speed = swl[swl_idx].fp_speed;
+ new_fcport->fc4_type = swl[swl_idx].fc4_type;
+
+ if (swl[swl_idx].d_id.b.rsvd_1 != 0) {
+ last_dev = 1;
+ }
+ swl_idx++;
+ }
+ } else {
+ /* Send GA_NXT to the switch */
+ rval = qla2x00_ga_nxt(vha, new_fcport);
+ if (rval != QLA_SUCCESS) {
+ ql_log(ql_log_warn, vha, 0x2064,
+ "SNS scan failed -- assuming "
+ "zero-entry result.\n");
+ list_for_each_entry_safe(fcport, fcptemp,
+ new_fcports, list) {
+ list_del(&fcport->list);
+ kfree(fcport);
+ }
+ rval = QLA_SUCCESS;
+ break;
+ }
+ }
+
+ /* If wrap on switch device list, exit. */
+ if (first_dev) {
+ wrap.b24 = new_fcport->d_id.b24;
+ first_dev = 0;
+ } else if (new_fcport->d_id.b24 == wrap.b24) {
+ ql_dbg(ql_dbg_disc, vha, 0x2065,
+ "Device wrap (%02x%02x%02x).\n",
+ new_fcport->d_id.b.domain,
+ new_fcport->d_id.b.area,
+ new_fcport->d_id.b.al_pa);
+ break;
+ }
+
+ /* Bypass if same physical adapter. */
+ if (new_fcport->d_id.b24 == base_vha->d_id.b24)
+ continue;
+
+ /* Bypass virtual ports of the same host. */
+ if (qla2x00_is_a_vp_did(vha, new_fcport->d_id.b24))
+ continue;
+
+ /* Bypass if same domain and area of adapter. */
+ if (((new_fcport->d_id.b24 & 0xffff00) ==
+ (vha->d_id.b24 & 0xffff00)) && ha->current_topology ==
+ ISP_CFG_FL)
+ continue;
+
+ /* Bypass reserved domain fields. */
+ if ((new_fcport->d_id.b.domain & 0xf0) == 0xf0)
+ continue;
+
+ /* Bypass ports whose FCP-4 type is not FCP_SCSI */
+ if (ql2xgffidenable &&
+ (new_fcport->fc4_type != FC4_TYPE_FCP_SCSI &&
+ new_fcport->fc4_type != FC4_TYPE_UNKNOWN))
+ continue;
+
+ /* Locate matching device in database. */
+ found = 0;
+ list_for_each_entry(fcport, &vha->vp_fcports, list) {
+ if (memcmp(new_fcport->port_name, fcport->port_name,
+ WWN_SIZE))
+ continue;
+
+ fcport->scan_state = QLA_FCPORT_FOUND;
+
+ found++;
+
+ /* Update port state. */
+ memcpy(fcport->fabric_port_name,
+ new_fcport->fabric_port_name, WWN_SIZE);
+ fcport->fp_speed = new_fcport->fp_speed;
+
+ /*
+ * If address the same and state FCS_ONLINE, nothing
+ * changed.
+ */
+ if (fcport->d_id.b24 == new_fcport->d_id.b24 &&
+ atomic_read(&fcport->state) == FCS_ONLINE) {
+ break;
+ }
+
+ /*
+ * If device was not a fabric device before.
+ */
+ if ((fcport->flags & FCF_FABRIC_DEVICE) == 0) {
+ fcport->d_id.b24 = new_fcport->d_id.b24;
+ qla2x00_clear_loop_id(fcport);
+ fcport->flags |= (FCF_FABRIC_DEVICE |
+ FCF_LOGIN_NEEDED);
+ break;
+ }
+
+ /*
+ * Port ID changed or device was marked to be updated;
+ * Log it out if still logged in and mark it for
+ * relogin later.
+ */
+ fcport->d_id.b24 = new_fcport->d_id.b24;
+ fcport->flags |= FCF_LOGIN_NEEDED;
+ if (fcport->loop_id != FC_NO_LOOP_ID &&
+ (fcport->flags & FCF_FCP2_DEVICE) == 0 &&
+ (fcport->flags & FCF_ASYNC_SENT) == 0 &&
+ fcport->port_type != FCT_INITIATOR &&
+ fcport->port_type != FCT_BROADCAST) {
+ ha->isp_ops->fabric_logout(vha, fcport->loop_id,
+ fcport->d_id.b.domain, fcport->d_id.b.area,
+ fcport->d_id.b.al_pa);
+ qla2x00_clear_loop_id(fcport);
+ }
+
+ break;
+ }
+
+ if (found)
+ continue;
+ /* If device was not in our fcports list, then add it. */
+ list_add_tail(&new_fcport->list, new_fcports);
+
+ /* Allocate a new replacement fcport. */
+ nxt_d_id.b24 = new_fcport->d_id.b24;
+ new_fcport = qla2x00_alloc_fcport(vha, GFP_KERNEL);
+ if (new_fcport == NULL) {
+ ql_log(ql_log_warn, vha, 0x2066,
+ "Memory allocation failed for fcport.\n");
+ return (QLA_MEMORY_ALLOC_FAILED);
+ }
+ new_fcport->flags |= (FCF_FABRIC_DEVICE | FCF_LOGIN_NEEDED);
+ new_fcport->d_id.b24 = nxt_d_id.b24;
+ }
+
+ kfree(new_fcport);
+
+ return (rval);
+}
+
+/*
+ * qla2x00_find_new_loop_id
+ * Scan through our port list and find a new usable loop ID.
+ *
+ * Input:
+ * ha: adapter state pointer.
+ * dev: port structure pointer.
+ *
+ * Returns:
+ * qla2x00 local function return status code.
+ *
+ * Context:
+ * Kernel context.
+ */
+int
+qla2x00_find_new_loop_id(scsi_qla_host_t *vha, fc_port_t *dev)
+{
+ int rval;
+ struct qla_hw_data *ha = vha->hw;
+ unsigned long flags = 0;
+
+ rval = QLA_SUCCESS;
+
+ spin_lock_irqsave(&ha->vport_slock, flags);
+
+ dev->loop_id = find_first_zero_bit(ha->loop_id_map,
+ LOOPID_MAP_SIZE);
+ if (dev->loop_id >= LOOPID_MAP_SIZE ||
+ qla2x00_is_reserved_id(vha, dev->loop_id)) {
+ dev->loop_id = FC_NO_LOOP_ID;
+ rval = QLA_FUNCTION_FAILED;
+ } else
+ set_bit(dev->loop_id, ha->loop_id_map);
+
+ spin_unlock_irqrestore(&ha->vport_slock, flags);
+
+ if (rval == QLA_SUCCESS)
+ ql_dbg(ql_dbg_disc, dev->vha, 0x2086,
+ "Assigning new loopid=%x, portid=%x.\n",
+ dev->loop_id, dev->d_id.b24);
+ else
+ ql_log(ql_log_warn, dev->vha, 0x2087,
+ "No loop_id's available, portid=%x.\n",
+ dev->d_id.b24);
+
+ return (rval);
+}
+
+/*
+ * qla2x00_fabric_dev_login
+ * Login fabric target device and update FC port database.
+ *
+ * Input:
+ * ha: adapter state pointer.
+ * fcport: port structure list pointer.
+ * next_loopid: contains value of a new loop ID that can be used
+ * by the next login attempt.
+ *
+ * Returns:
+ * qla2x00 local function return status code.
+ *
+ * Context:
+ * Kernel context.
+ */
+static int
+qla2x00_fabric_dev_login(scsi_qla_host_t *vha, fc_port_t *fcport,
+ uint16_t *next_loopid)
+{
+ int rval;
+ int retry;
+ uint8_t opts;
+ struct qla_hw_data *ha = vha->hw;
+
+ rval = QLA_SUCCESS;
+ retry = 0;
+
+ if (IS_ALOGIO_CAPABLE(ha)) {
+ if (fcport->flags & FCF_ASYNC_SENT)
+ return rval;
+ fcport->flags |= FCF_ASYNC_SENT;
+ rval = qla2x00_post_async_login_work(vha, fcport, NULL);
+ if (!rval)
+ return rval;
+ }
+
+ fcport->flags &= ~FCF_ASYNC_SENT;
+ rval = qla2x00_fabric_login(vha, fcport, next_loopid);
+ if (rval == QLA_SUCCESS) {
+ /* Send an ADISC to FCP2 devices.*/
+ opts = 0;
+ if (fcport->flags & FCF_FCP2_DEVICE)
+ opts |= BIT_1;
+ rval = qla2x00_get_port_database(vha, fcport, opts);
+ if (rval != QLA_SUCCESS) {
+ ha->isp_ops->fabric_logout(vha, fcport->loop_id,
+ fcport->d_id.b.domain, fcport->d_id.b.area,
+ fcport->d_id.b.al_pa);
+ qla2x00_mark_device_lost(vha, fcport, 1, 0);
+ } else {
+ qla2x00_update_fcport(vha, fcport);
+ }
+ } else {
+ /* Retry Login. */
+ qla2x00_mark_device_lost(vha, fcport, 1, 0);
+ }
+
+ return (rval);
+}
+
+/*
+ * qla2x00_fabric_login
+ * Issue fabric login command.
+ *
+ * Input:
+ * ha = adapter block pointer.
+ * device = pointer to FC device type structure.
+ *
+ * Returns:
+ * 0 - Login successfully
+ * 1 - Login failed
+ * 2 - Initiator device
+ * 3 - Fatal error
+ */
+int
+qla2x00_fabric_login(scsi_qla_host_t *vha, fc_port_t *fcport,
+ uint16_t *next_loopid)
+{
+ int rval;
+ int retry;
+ uint16_t tmp_loopid;
+ uint16_t mb[MAILBOX_REGISTER_COUNT];
+ struct qla_hw_data *ha = vha->hw;
+
+ retry = 0;
+ tmp_loopid = 0;
+
+ for (;;) {
+ ql_dbg(ql_dbg_disc, vha, 0x2000,
+ "Trying Fabric Login w/loop id 0x%04x for port "
+ "%02x%02x%02x.\n",
+ fcport->loop_id, fcport->d_id.b.domain,
+ fcport->d_id.b.area, fcport->d_id.b.al_pa);
+
+ /* Login fcport on switch. */
+ rval = ha->isp_ops->fabric_login(vha, fcport->loop_id,
+ fcport->d_id.b.domain, fcport->d_id.b.area,
+ fcport->d_id.b.al_pa, mb, BIT_0);
+ if (rval != QLA_SUCCESS) {
+ return rval;
+ }
+ if (mb[0] == MBS_PORT_ID_USED) {
+ /*
+ * Device has another loop ID. The firmware team
+ * recommends the driver perform an implicit login with
+ * the specified ID again. The ID we just used is save
+ * here so we return with an ID that can be tried by
+ * the next login.
+ */
+ retry++;
+ tmp_loopid = fcport->loop_id;
+ fcport->loop_id = mb[1];
+
+ ql_dbg(ql_dbg_disc, vha, 0x2001,
+ "Fabric Login: port in use - next loop "
+ "id=0x%04x, port id= %02x%02x%02x.\n",
+ fcport->loop_id, fcport->d_id.b.domain,
+ fcport->d_id.b.area, fcport->d_id.b.al_pa);
+
+ } else if (mb[0] == MBS_COMMAND_COMPLETE) {
+ /*
+ * Login succeeded.
+ */
+ if (retry) {
+ /* A retry occurred before. */
+ *next_loopid = tmp_loopid;
+ } else {
+ /*
+ * No retry occurred before. Just increment the
+ * ID value for next login.
+ */
+ *next_loopid = (fcport->loop_id + 1);
+ }
+
+ if (mb[1] & BIT_0) {
+ fcport->port_type = FCT_INITIATOR;
+ } else {
+ fcport->port_type = FCT_TARGET;
+ if (mb[1] & BIT_1) {
+ fcport->flags |= FCF_FCP2_DEVICE;
+ }
+ }
+
+ if (mb[10] & BIT_0)
+ fcport->supported_classes |= FC_COS_CLASS2;
+ if (mb[10] & BIT_1)
+ fcport->supported_classes |= FC_COS_CLASS3;
+
+ if (IS_FWI2_CAPABLE(ha)) {
+ if (mb[10] & BIT_7)
+ fcport->flags |=
+ FCF_CONF_COMP_SUPPORTED;
+ }
+
+ rval = QLA_SUCCESS;
+ break;
+ } else if (mb[0] == MBS_LOOP_ID_USED) {
+ /*
+ * Loop ID already used, try next loop ID.
+ */
+ fcport->loop_id++;
+ rval = qla2x00_find_new_loop_id(vha, fcport);
+ if (rval != QLA_SUCCESS) {
+ /* Ran out of loop IDs to use */
+ break;
+ }
+ } else if (mb[0] == MBS_COMMAND_ERROR) {
+ /*
+ * Firmware possibly timed out during login. If NO
+ * retries are left to do then the device is declared
+ * dead.
+ */
+ *next_loopid = fcport->loop_id;
+ ha->isp_ops->fabric_logout(vha, fcport->loop_id,
+ fcport->d_id.b.domain, fcport->d_id.b.area,
+ fcport->d_id.b.al_pa);
+ qla2x00_mark_device_lost(vha, fcport, 1, 0);
+
+ rval = 1;
+ break;
+ } else {
+ /*
+ * unrecoverable / not handled error
+ */
+ ql_dbg(ql_dbg_disc, vha, 0x2002,
+ "Failed=%x port_id=%02x%02x%02x loop_id=%x "
+ "jiffies=%lx.\n", mb[0], fcport->d_id.b.domain,
+ fcport->d_id.b.area, fcport->d_id.b.al_pa,
+ fcport->loop_id, jiffies);
+
+ *next_loopid = fcport->loop_id;
+ ha->isp_ops->fabric_logout(vha, fcport->loop_id,
+ fcport->d_id.b.domain, fcport->d_id.b.area,
+ fcport->d_id.b.al_pa);
+ qla2x00_clear_loop_id(fcport);
+ fcport->login_retry = 0;
+
+ rval = 3;
+ break;
+ }
+ }
+
+ return (rval);
+}
+
+/*
+ * qla2x00_local_device_login
+ * Issue local device login command.
+ *
+ * Input:
+ * ha = adapter block pointer.
+ * loop_id = loop id of device to login to.
+ *
+ * Returns (Where's the #define!!!!):
+ * 0 - Login successfully
+ * 1 - Login failed
+ * 3 - Fatal error
+ */
+int
+qla2x00_local_device_login(scsi_qla_host_t *vha, fc_port_t *fcport)
+{
+ int rval;
+ uint16_t mb[MAILBOX_REGISTER_COUNT];
+
+ memset(mb, 0, sizeof(mb));
+ rval = qla2x00_login_local_device(vha, fcport, mb, BIT_0);
+ if (rval == QLA_SUCCESS) {
+ /* Interrogate mailbox registers for any errors */
+ if (mb[0] == MBS_COMMAND_ERROR)
+ rval = 1;
+ else if (mb[0] == MBS_COMMAND_PARAMETER_ERROR)
+ /* device not in PCB table */
+ rval = 3;
+ }
+
+ return (rval);
+}
+
+/*
+ * qla2x00_loop_resync
+ * Resync with fibre channel devices.
+ *
+ * Input:
+ * ha = adapter block pointer.
+ *
+ * Returns:
+ * 0 = success
+ */
+int
+qla2x00_loop_resync(scsi_qla_host_t *vha)
+{
+ int rval = QLA_SUCCESS;
+ uint32_t wait_time;
+ struct req_que *req;
+ struct rsp_que *rsp;
+
+ if (vha->hw->flags.cpu_affinity_enabled)
+ req = vha->hw->req_q_map[0];
+ else
+ req = vha->req;
+ rsp = req->rsp;
+
+ clear_bit(ISP_ABORT_RETRY, &vha->dpc_flags);
+ if (vha->flags.online) {
+ if (!(rval = qla2x00_fw_ready(vha))) {
+ /* Wait at most MAX_TARGET RSCNs for a stable link. */
+ wait_time = 256;
+ do {
+ if (!IS_QLAFX00(vha->hw)) {
+ /*
+ * Issue a marker after FW becomes
+ * ready.
+ */
+ qla2x00_marker(vha, req, rsp, 0, 0,
+ MK_SYNC_ALL);
+ vha->marker_needed = 0;
+ }
+
+ /* Remap devices on Loop. */
+ clear_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags);
+
+ if (IS_QLAFX00(vha->hw))
+ qlafx00_configure_devices(vha);
+ else
+ qla2x00_configure_loop(vha);
+
+ wait_time--;
+ } while (!atomic_read(&vha->loop_down_timer) &&
+ !(test_bit(ISP_ABORT_NEEDED, &vha->dpc_flags))
+ && wait_time && (test_bit(LOOP_RESYNC_NEEDED,
+ &vha->dpc_flags)));
+ }
+ }
+
+ if (test_bit(ISP_ABORT_NEEDED, &vha->dpc_flags))
+ return (QLA_FUNCTION_FAILED);
+
+ if (rval)
+ ql_dbg(ql_dbg_disc, vha, 0x206c,
+ "%s *** FAILED ***.\n", __func__);
+
+ return (rval);
+}
+
+/*
+* qla2x00_perform_loop_resync
+* Description: This function will set the appropriate flags and call
+* qla2x00_loop_resync. If successful loop will be resynced
+* Arguments : scsi_qla_host_t pointer
+* returm : Success or Failure
+*/
+
+int qla2x00_perform_loop_resync(scsi_qla_host_t *ha)
+{
+ int32_t rval = 0;
+
+ if (!test_and_set_bit(LOOP_RESYNC_ACTIVE, &ha->dpc_flags)) {
+ /*Configure the flags so that resync happens properly*/
+ atomic_set(&ha->loop_down_timer, 0);
+ if (!(ha->device_flags & DFLG_NO_CABLE)) {
+ atomic_set(&ha->loop_state, LOOP_UP);
+ set_bit(LOCAL_LOOP_UPDATE, &ha->dpc_flags);
+ set_bit(REGISTER_FC4_NEEDED, &ha->dpc_flags);
+ set_bit(LOOP_RESYNC_NEEDED, &ha->dpc_flags);
+
+ rval = qla2x00_loop_resync(ha);
+ } else
+ atomic_set(&ha->loop_state, LOOP_DEAD);
+
+ clear_bit(LOOP_RESYNC_ACTIVE, &ha->dpc_flags);
+ }
+
+ return rval;
+}
+
+void
+qla2x00_update_fcports(scsi_qla_host_t *base_vha)
+{
+ fc_port_t *fcport;
+ struct scsi_qla_host *vha;
+ struct qla_hw_data *ha = base_vha->hw;
+ unsigned long flags;
+
+ spin_lock_irqsave(&ha->vport_slock, flags);
+ /* Go with deferred removal of rport references. */
+ list_for_each_entry(vha, &base_vha->hw->vp_list, list) {
+ atomic_inc(&vha->vref_count);
+ list_for_each_entry(fcport, &vha->vp_fcports, list) {
+ if (fcport->drport &&
+ atomic_read(&fcport->state) != FCS_UNCONFIGURED) {
+ spin_unlock_irqrestore(&ha->vport_slock, flags);
+ qla2x00_rport_del(fcport);
+ spin_lock_irqsave(&ha->vport_slock, flags);
+ }
+ }
+ atomic_dec(&vha->vref_count);
+ }
+ spin_unlock_irqrestore(&ha->vport_slock, flags);
+}
+
+/* Assumes idc_lock always held on entry */
+void
+qla83xx_reset_ownership(scsi_qla_host_t *vha)
+{
+ struct qla_hw_data *ha = vha->hw;
+ uint32_t drv_presence, drv_presence_mask;
+ uint32_t dev_part_info1, dev_part_info2, class_type;
+ uint32_t class_type_mask = 0x3;
+ uint16_t fcoe_other_function = 0xffff, i;
+
+ if (IS_QLA8044(ha)) {
+ drv_presence = qla8044_rd_direct(vha,
+ QLA8044_CRB_DRV_ACTIVE_INDEX);
+ dev_part_info1 = qla8044_rd_direct(vha,
+ QLA8044_CRB_DEV_PART_INFO_INDEX);
+ dev_part_info2 = qla8044_rd_direct(vha,
+ QLA8044_CRB_DEV_PART_INFO2);
+ } else {
+ qla83xx_rd_reg(vha, QLA83XX_IDC_DRV_PRESENCE, &drv_presence);
+ qla83xx_rd_reg(vha, QLA83XX_DEV_PARTINFO1, &dev_part_info1);
+ qla83xx_rd_reg(vha, QLA83XX_DEV_PARTINFO2, &dev_part_info2);
+ }
+ for (i = 0; i < 8; i++) {
+ class_type = ((dev_part_info1 >> (i * 4)) & class_type_mask);
+ if ((class_type == QLA83XX_CLASS_TYPE_FCOE) &&
+ (i != ha->portnum)) {
+ fcoe_other_function = i;
+ break;
+ }
+ }
+ if (fcoe_other_function == 0xffff) {
+ for (i = 0; i < 8; i++) {
+ class_type = ((dev_part_info2 >> (i * 4)) &
+ class_type_mask);
+ if ((class_type == QLA83XX_CLASS_TYPE_FCOE) &&
+ ((i + 8) != ha->portnum)) {
+ fcoe_other_function = i + 8;
+ break;
+ }
+ }
+ }
+ /*
+ * Prepare drv-presence mask based on fcoe functions present.
+ * However consider only valid physical fcoe function numbers (0-15).
+ */
+ drv_presence_mask = ~((1 << (ha->portnum)) |
+ ((fcoe_other_function == 0xffff) ?
+ 0 : (1 << (fcoe_other_function))));
+
+ /* We are the reset owner iff:
+ * - No other protocol drivers present.
+ * - This is the lowest among fcoe functions. */
+ if (!(drv_presence & drv_presence_mask) &&
+ (ha->portnum < fcoe_other_function)) {
+ ql_dbg(ql_dbg_p3p, vha, 0xb07f,
+ "This host is Reset owner.\n");
+ ha->flags.nic_core_reset_owner = 1;
+ }
+}
+
+static int
+__qla83xx_set_drv_ack(scsi_qla_host_t *vha)
+{
+ int rval = QLA_SUCCESS;
+ struct qla_hw_data *ha = vha->hw;
+ uint32_t drv_ack;
+
+ rval = qla83xx_rd_reg(vha, QLA83XX_IDC_DRIVER_ACK, &drv_ack);
+ if (rval == QLA_SUCCESS) {
+ drv_ack |= (1 << ha->portnum);
+ rval = qla83xx_wr_reg(vha, QLA83XX_IDC_DRIVER_ACK, drv_ack);
+ }
+
+ return rval;
+}
+
+static int
+__qla83xx_clear_drv_ack(scsi_qla_host_t *vha)
+{
+ int rval = QLA_SUCCESS;
+ struct qla_hw_data *ha = vha->hw;
+ uint32_t drv_ack;
+
+ rval = qla83xx_rd_reg(vha, QLA83XX_IDC_DRIVER_ACK, &drv_ack);
+ if (rval == QLA_SUCCESS) {
+ drv_ack &= ~(1 << ha->portnum);
+ rval = qla83xx_wr_reg(vha, QLA83XX_IDC_DRIVER_ACK, drv_ack);
+ }
+
+ return rval;
+}
+
+static const char *
+qla83xx_dev_state_to_string(uint32_t dev_state)
+{
+ switch (dev_state) {
+ case QLA8XXX_DEV_COLD:
+ return "COLD/RE-INIT";
+ case QLA8XXX_DEV_INITIALIZING:
+ return "INITIALIZING";
+ case QLA8XXX_DEV_READY:
+ return "READY";
+ case QLA8XXX_DEV_NEED_RESET:
+ return "NEED RESET";
+ case QLA8XXX_DEV_NEED_QUIESCENT:
+ return "NEED QUIESCENT";
+ case QLA8XXX_DEV_FAILED:
+ return "FAILED";
+ case QLA8XXX_DEV_QUIESCENT:
+ return "QUIESCENT";
+ default:
+ return "Unknown";
+ }
+}
+
+/* Assumes idc-lock always held on entry */
+void
+qla83xx_idc_audit(scsi_qla_host_t *vha, int audit_type)
+{
+ struct qla_hw_data *ha = vha->hw;
+ uint32_t idc_audit_reg = 0, duration_secs = 0;
+
+ switch (audit_type) {
+ case IDC_AUDIT_TIMESTAMP:
+ ha->idc_audit_ts = (jiffies_to_msecs(jiffies) / 1000);
+ idc_audit_reg = (ha->portnum) |
+ (IDC_AUDIT_TIMESTAMP << 7) | (ha->idc_audit_ts << 8);
+ qla83xx_wr_reg(vha, QLA83XX_IDC_AUDIT, idc_audit_reg);
+ break;
+
+ case IDC_AUDIT_COMPLETION:
+ duration_secs = ((jiffies_to_msecs(jiffies) -
+ jiffies_to_msecs(ha->idc_audit_ts)) / 1000);
+ idc_audit_reg = (ha->portnum) |
+ (IDC_AUDIT_COMPLETION << 7) | (duration_secs << 8);
+ qla83xx_wr_reg(vha, QLA83XX_IDC_AUDIT, idc_audit_reg);
+ break;
+
+ default:
+ ql_log(ql_log_warn, vha, 0xb078,
+ "Invalid audit type specified.\n");
+ break;
+ }
+}
+
+/* Assumes idc_lock always held on entry */
+static int
+qla83xx_initiating_reset(scsi_qla_host_t *vha)
+{
+ struct qla_hw_data *ha = vha->hw;
+ uint32_t idc_control, dev_state;
+
+ __qla83xx_get_idc_control(vha, &idc_control);
+ if ((idc_control & QLA83XX_IDC_RESET_DISABLED)) {
+ ql_log(ql_log_info, vha, 0xb080,
+ "NIC Core reset has been disabled. idc-control=0x%x\n",
+ idc_control);
+ return QLA_FUNCTION_FAILED;
+ }
+
+ /* Set NEED-RESET iff in READY state and we are the reset-owner */
+ qla83xx_rd_reg(vha, QLA83XX_IDC_DEV_STATE, &dev_state);
+ if (ha->flags.nic_core_reset_owner && dev_state == QLA8XXX_DEV_READY) {
+ qla83xx_wr_reg(vha, QLA83XX_IDC_DEV_STATE,
+ QLA8XXX_DEV_NEED_RESET);
+ ql_log(ql_log_info, vha, 0xb056, "HW State: NEED RESET.\n");
+ qla83xx_idc_audit(vha, IDC_AUDIT_TIMESTAMP);
+ } else {
+ const char *state = qla83xx_dev_state_to_string(dev_state);
+ ql_log(ql_log_info, vha, 0xb057, "HW State: %s.\n", state);
+
+ /* SV: XXX: Is timeout required here? */
+ /* Wait for IDC state change READY -> NEED_RESET */
+ while (dev_state == QLA8XXX_DEV_READY) {
+ qla83xx_idc_unlock(vha, 0);
+ msleep(200);
+ qla83xx_idc_lock(vha, 0);
+ qla83xx_rd_reg(vha, QLA83XX_IDC_DEV_STATE, &dev_state);
+ }
+ }
+
+ /* Send IDC ack by writing to drv-ack register */
+ __qla83xx_set_drv_ack(vha);
+
+ return QLA_SUCCESS;
+}
+
+int
+__qla83xx_set_idc_control(scsi_qla_host_t *vha, uint32_t idc_control)
+{
+ return qla83xx_wr_reg(vha, QLA83XX_IDC_CONTROL, idc_control);
+}
+
+int
+__qla83xx_get_idc_control(scsi_qla_host_t *vha, uint32_t *idc_control)
+{
+ return qla83xx_rd_reg(vha, QLA83XX_IDC_CONTROL, idc_control);
+}
+
+static int
+qla83xx_check_driver_presence(scsi_qla_host_t *vha)
+{
+ uint32_t drv_presence = 0;
+ struct qla_hw_data *ha = vha->hw;
+
+ qla83xx_rd_reg(vha, QLA83XX_IDC_DRV_PRESENCE, &drv_presence);
+ if (drv_presence & (1 << ha->portnum))
+ return QLA_SUCCESS;
+ else
+ return QLA_TEST_FAILED;
+}
+
+int
+qla83xx_nic_core_reset(scsi_qla_host_t *vha)
+{
+ int rval = QLA_SUCCESS;
+ struct qla_hw_data *ha = vha->hw;
+
+ ql_dbg(ql_dbg_p3p, vha, 0xb058,
+ "Entered %s().\n", __func__);
+
+ if (vha->device_flags & DFLG_DEV_FAILED) {
+ ql_log(ql_log_warn, vha, 0xb059,
+ "Device in unrecoverable FAILED state.\n");
+ return QLA_FUNCTION_FAILED;
+ }
+
+ qla83xx_idc_lock(vha, 0);
+
+ if (qla83xx_check_driver_presence(vha) != QLA_SUCCESS) {
+ ql_log(ql_log_warn, vha, 0xb05a,
+ "Function=0x%x has been removed from IDC participation.\n",
+ ha->portnum);
+ rval = QLA_FUNCTION_FAILED;
+ goto exit;
+ }
+
+ qla83xx_reset_ownership(vha);
+
+ rval = qla83xx_initiating_reset(vha);
+
+ /*
+ * Perform reset if we are the reset-owner,
+ * else wait till IDC state changes to READY/FAILED.
+ */
+ if (rval == QLA_SUCCESS) {
+ rval = qla83xx_idc_state_handler(vha);
+
+ if (rval == QLA_SUCCESS)
+ ha->flags.nic_core_hung = 0;
+ __qla83xx_clear_drv_ack(vha);
+ }
+
+exit:
+ qla83xx_idc_unlock(vha, 0);
+
+ ql_dbg(ql_dbg_p3p, vha, 0xb05b, "Exiting %s.\n", __func__);
+
+ return rval;
+}
+
+int
+qla2xxx_mctp_dump(scsi_qla_host_t *vha)
+{
+ struct qla_hw_data *ha = vha->hw;
+ int rval = QLA_FUNCTION_FAILED;
+
+ if (!IS_MCTP_CAPABLE(ha)) {
+ /* This message can be removed from the final version */
+ ql_log(ql_log_info, vha, 0x506d,
+ "This board is not MCTP capable\n");
+ return rval;
+ }
+
+ if (!ha->mctp_dump) {
+ ha->mctp_dump = dma_alloc_coherent(&ha->pdev->dev,
+ MCTP_DUMP_SIZE, &ha->mctp_dump_dma, GFP_KERNEL);
+
+ if (!ha->mctp_dump) {
+ ql_log(ql_log_warn, vha, 0x506e,
+ "Failed to allocate memory for mctp dump\n");
+ return rval;
+ }
+ }
+
+#define MCTP_DUMP_STR_ADDR 0x00000000
+ rval = qla2x00_dump_mctp_data(vha, ha->mctp_dump_dma,
+ MCTP_DUMP_STR_ADDR, MCTP_DUMP_SIZE/4);
+ if (rval != QLA_SUCCESS) {
+ ql_log(ql_log_warn, vha, 0x506f,
+ "Failed to capture mctp dump\n");
+ } else {
+ ql_log(ql_log_info, vha, 0x5070,
+ "Mctp dump capture for host (%ld/%p).\n",
+ vha->host_no, ha->mctp_dump);
+ ha->mctp_dumped = 1;
+ }
+
+ if (!ha->flags.nic_core_reset_hdlr_active && !ha->portnum) {
+ ha->flags.nic_core_reset_hdlr_active = 1;
+ rval = qla83xx_restart_nic_firmware(vha);
+ if (rval)
+ /* NIC Core reset failed. */
+ ql_log(ql_log_warn, vha, 0x5071,
+ "Failed to restart nic firmware\n");
+ else
+ ql_dbg(ql_dbg_p3p, vha, 0xb084,
+ "Restarted NIC firmware successfully.\n");
+ ha->flags.nic_core_reset_hdlr_active = 0;
+ }
+
+ return rval;
+
+}
+
+/*
+* qla2x00_quiesce_io
+* Description: This function will block the new I/Os
+* Its not aborting any I/Os as context
+* is not destroyed during quiescence
+* Arguments: scsi_qla_host_t
+* return : void
+*/
+void
+qla2x00_quiesce_io(scsi_qla_host_t *vha)
+{
+ struct qla_hw_data *ha = vha->hw;
+ struct scsi_qla_host *vp;
+
+ ql_dbg(ql_dbg_dpc, vha, 0x401d,
+ "Quiescing I/O - ha=%p.\n", ha);
+
+ atomic_set(&ha->loop_down_timer, LOOP_DOWN_TIME);
+ if (atomic_read(&vha->loop_state) != LOOP_DOWN) {
+ atomic_set(&vha->loop_state, LOOP_DOWN);
+ qla2x00_mark_all_devices_lost(vha, 0);
+ list_for_each_entry(vp, &ha->vp_list, list)
+ qla2x00_mark_all_devices_lost(vp, 0);
+ } else {
+ if (!atomic_read(&vha->loop_down_timer))
+ atomic_set(&vha->loop_down_timer,
+ LOOP_DOWN_TIME);
+ }
+ /* Wait for pending cmds to complete */
+ qla2x00_eh_wait_for_pending_commands(vha, 0, 0, WAIT_HOST);
+}
+
+void
+qla2x00_abort_isp_cleanup(scsi_qla_host_t *vha)
+{
+ struct qla_hw_data *ha = vha->hw;
+ struct scsi_qla_host *vp;
+ unsigned long flags;
+ fc_port_t *fcport;
+
+ /* For ISP82XX, driver waits for completion of the commands.
+ * online flag should be set.
+ */
+ if (!(IS_P3P_TYPE(ha)))
+ vha->flags.online = 0;
+ ha->flags.chip_reset_done = 0;
+ clear_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
+ vha->qla_stats.total_isp_aborts++;
+
+ ql_log(ql_log_info, vha, 0x00af,
+ "Performing ISP error recovery - ha=%p.\n", ha);
+
+ /* For ISP82XX, reset_chip is just disabling interrupts.
+ * Driver waits for the completion of the commands.
+ * the interrupts need to be enabled.
+ */
+ if (!(IS_P3P_TYPE(ha)))
+ ha->isp_ops->reset_chip(vha);
+
+ atomic_set(&vha->loop_down_timer, LOOP_DOWN_TIME);
+ if (atomic_read(&vha->loop_state) != LOOP_DOWN) {
+ atomic_set(&vha->loop_state, LOOP_DOWN);
+ qla2x00_mark_all_devices_lost(vha, 0);
+
+ spin_lock_irqsave(&ha->vport_slock, flags);
+ list_for_each_entry(vp, &ha->vp_list, list) {
+ atomic_inc(&vp->vref_count);
+ spin_unlock_irqrestore(&ha->vport_slock, flags);
+
+ qla2x00_mark_all_devices_lost(vp, 0);
+
+ spin_lock_irqsave(&ha->vport_slock, flags);
+ atomic_dec(&vp->vref_count);
+ }
+ spin_unlock_irqrestore(&ha->vport_slock, flags);
+ } else {
+ if (!atomic_read(&vha->loop_down_timer))
+ atomic_set(&vha->loop_down_timer,
+ LOOP_DOWN_TIME);
+ }
+
+ /* Clear all async request states across all VPs. */
+ list_for_each_entry(fcport, &vha->vp_fcports, list)
+ fcport->flags &= ~(FCF_LOGIN_NEEDED | FCF_ASYNC_SENT);
+ spin_lock_irqsave(&ha->vport_slock, flags);
+ list_for_each_entry(vp, &ha->vp_list, list) {
+ atomic_inc(&vp->vref_count);
+ spin_unlock_irqrestore(&ha->vport_slock, flags);
+
+ list_for_each_entry(fcport, &vp->vp_fcports, list)
+ fcport->flags &= ~(FCF_LOGIN_NEEDED | FCF_ASYNC_SENT);
+
+ spin_lock_irqsave(&ha->vport_slock, flags);
+ atomic_dec(&vp->vref_count);
+ }
+ spin_unlock_irqrestore(&ha->vport_slock, flags);
+
+ if (!ha->flags.eeh_busy) {
+ /* Make sure for ISP 82XX IO DMA is complete */
+ if (IS_P3P_TYPE(ha)) {
+ qla82xx_chip_reset_cleanup(vha);
+ ql_log(ql_log_info, vha, 0x00b4,
+ "Done chip reset cleanup.\n");
+
+ /* Done waiting for pending commands.
+ * Reset the online flag.
+ */
+ vha->flags.online = 0;
+ }
+
+ /* Requeue all commands in outstanding command list. */
+ qla2x00_abort_all_cmds(vha, DID_RESET << 16);
+ }
+
+ ha->chip_reset++;
+ /* memory barrier */
+ wmb();
+}
+
+/*
+* qla2x00_abort_isp
+* Resets ISP and aborts all outstanding commands.
+*
+* Input:
+* ha = adapter block pointer.
+*
+* Returns:
+* 0 = success
+*/
+int
+qla2x00_abort_isp(scsi_qla_host_t *vha)
+{
+ int rval;
+ uint8_t status = 0;
+ struct qla_hw_data *ha = vha->hw;
+ struct scsi_qla_host *vp;
+ struct req_que *req = ha->req_q_map[0];
+ unsigned long flags;
+
+ if (vha->flags.online) {
+ qla2x00_abort_isp_cleanup(vha);
+
+ if (IS_QLA8031(ha)) {
+ ql_dbg(ql_dbg_p3p, vha, 0xb05c,
+ "Clearing fcoe driver presence.\n");
+ if (qla83xx_clear_drv_presence(vha) != QLA_SUCCESS)
+ ql_dbg(ql_dbg_p3p, vha, 0xb073,
+ "Error while clearing DRV-Presence.\n");
+ }
+
+ if (unlikely(pci_channel_offline(ha->pdev) &&
+ ha->flags.pci_channel_io_perm_failure)) {
+ clear_bit(ISP_ABORT_RETRY, &vha->dpc_flags);
+ status = 0;
+ return status;
+ }
+
+ ha->isp_ops->get_flash_version(vha, req->ring);
+
+ ha->isp_ops->nvram_config(vha);
+
+ if (!qla2x00_restart_isp(vha)) {
+ clear_bit(RESET_MARKER_NEEDED, &vha->dpc_flags);
+
+ if (!atomic_read(&vha->loop_down_timer)) {
+ /*
+ * Issue marker command only when we are going
+ * to start the I/O .
+ */
+ vha->marker_needed = 1;
+ }
+
+ vha->flags.online = 1;
+
+ ha->isp_ops->enable_intrs(ha);
+
+ ha->isp_abort_cnt = 0;
+ clear_bit(ISP_ABORT_RETRY, &vha->dpc_flags);
+
+ if (IS_QLA81XX(ha) || IS_QLA8031(ha))
+ qla2x00_get_fw_version(vha);
+ if (ha->fce) {
+ ha->flags.fce_enabled = 1;
+ memset(ha->fce, 0,
+ fce_calc_size(ha->fce_bufs));
+ rval = qla2x00_enable_fce_trace(vha,
+ ha->fce_dma, ha->fce_bufs, ha->fce_mb,
+ &ha->fce_bufs);
+ if (rval) {
+ ql_log(ql_log_warn, vha, 0x8033,
+ "Unable to reinitialize FCE "
+ "(%d).\n", rval);
+ ha->flags.fce_enabled = 0;
+ }
+ }
+
+ if (ha->eft) {
+ memset(ha->eft, 0, EFT_SIZE);
+ rval = qla2x00_enable_eft_trace(vha,
+ ha->eft_dma, EFT_NUM_BUFFERS);
+ if (rval) {
+ ql_log(ql_log_warn, vha, 0x8034,
+ "Unable to reinitialize EFT "
+ "(%d).\n", rval);
+ }
+ }
+ } else { /* failed the ISP abort */
+ vha->flags.online = 1;
+ if (test_bit(ISP_ABORT_RETRY, &vha->dpc_flags)) {
+ if (ha->isp_abort_cnt == 0) {
+ ql_log(ql_log_fatal, vha, 0x8035,
+ "ISP error recover failed - "
+ "board disabled.\n");
+ /*
+ * The next call disables the board
+ * completely.
+ */
+ ha->isp_ops->reset_adapter(vha);
+ vha->flags.online = 0;
+ clear_bit(ISP_ABORT_RETRY,
+ &vha->dpc_flags);
+ status = 0;
+ } else { /* schedule another ISP abort */
+ ha->isp_abort_cnt--;
+ ql_dbg(ql_dbg_taskm, vha, 0x8020,
+ "ISP abort - retry remaining %d.\n",
+ ha->isp_abort_cnt);
+ status = 1;
+ }
+ } else {
+ ha->isp_abort_cnt = MAX_RETRIES_OF_ISP_ABORT;
+ ql_dbg(ql_dbg_taskm, vha, 0x8021,
+ "ISP error recovery - retrying (%d) "
+ "more times.\n", ha->isp_abort_cnt);
+ set_bit(ISP_ABORT_RETRY, &vha->dpc_flags);
+ status = 1;
+ }
+ }
+
+ }
+
+ if (!status) {
+ ql_dbg(ql_dbg_taskm, vha, 0x8022, "%s succeeded.\n", __func__);
+
+ spin_lock_irqsave(&ha->vport_slock, flags);
+ list_for_each_entry(vp, &ha->vp_list, list) {
+ if (vp->vp_idx) {
+ atomic_inc(&vp->vref_count);
+ spin_unlock_irqrestore(&ha->vport_slock, flags);
+
+ qla2x00_vp_abort_isp(vp);
+
+ spin_lock_irqsave(&ha->vport_slock, flags);
+ atomic_dec(&vp->vref_count);
+ }
+ }
+ spin_unlock_irqrestore(&ha->vport_slock, flags);
+
+ if (IS_QLA8031(ha)) {
+ ql_dbg(ql_dbg_p3p, vha, 0xb05d,
+ "Setting back fcoe driver presence.\n");
+ if (qla83xx_set_drv_presence(vha) != QLA_SUCCESS)
+ ql_dbg(ql_dbg_p3p, vha, 0xb074,
+ "Error while setting DRV-Presence.\n");
+ }
+ } else {
+ ql_log(ql_log_warn, vha, 0x8023, "%s **** FAILED ****.\n",
+ __func__);
+ }
+
+ return(status);
+}
+
+/*
+* qla2x00_restart_isp
+* restarts the ISP after a reset
+*
+* Input:
+* ha = adapter block pointer.
+*
+* Returns:
+* 0 = success
+*/
+static int
+qla2x00_restart_isp(scsi_qla_host_t *vha)
+{
+ int status = 0;
+ struct qla_hw_data *ha = vha->hw;
+ struct req_que *req = ha->req_q_map[0];
+ struct rsp_que *rsp = ha->rsp_q_map[0];
+ unsigned long flags;
+
+ /* If firmware needs to be loaded */
+ if (qla2x00_isp_firmware(vha)) {
+ vha->flags.online = 0;
+ status = ha->isp_ops->chip_diag(vha);
+ if (!status)
+ status = qla2x00_setup_chip(vha);
+ }
+
+ if (!status && !(status = qla2x00_init_rings(vha))) {
+ clear_bit(RESET_MARKER_NEEDED, &vha->dpc_flags);
+ ha->flags.chip_reset_done = 1;
+
+ /* Initialize the queues in use */
+ qla25xx_init_queues(ha);
+
+ status = qla2x00_fw_ready(vha);
+ if (!status) {
+ /* Issue a marker after FW becomes ready. */
+ qla2x00_marker(vha, req, rsp, 0, 0, MK_SYNC_ALL);
+
+ vha->flags.online = 1;
+
+ /*
+ * Process any ATIO queue entries that came in
+ * while we weren't online.
+ */
+ spin_lock_irqsave(&ha->hardware_lock, flags);
+ if (qla_tgt_mode_enabled(vha))
+ qlt_24xx_process_atio_queue(vha);
+ spin_unlock_irqrestore(&ha->hardware_lock, flags);
+
+ set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags);
+ }
+
+ /* if no cable then assume it's good */
+ if ((vha->device_flags & DFLG_NO_CABLE))
+ status = 0;
+ }
+ return (status);
+}
+
+static int
+qla25xx_init_queues(struct qla_hw_data *ha)
+{
+ struct rsp_que *rsp = NULL;
+ struct req_que *req = NULL;
+ struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev);
+ int ret = -1;
+ int i;
+
+ for (i = 1; i < ha->max_rsp_queues; i++) {
+ rsp = ha->rsp_q_map[i];
+ if (rsp) {
+ rsp->options &= ~BIT_0;
+ ret = qla25xx_init_rsp_que(base_vha, rsp);
+ if (ret != QLA_SUCCESS)
+ ql_dbg(ql_dbg_init, base_vha, 0x00ff,
+ "%s Rsp que: %d init failed.\n",
+ __func__, rsp->id);
+ else
+ ql_dbg(ql_dbg_init, base_vha, 0x0100,
+ "%s Rsp que: %d inited.\n",
+ __func__, rsp->id);
+ }
+ }
+ for (i = 1; i < ha->max_req_queues; i++) {
+ req = ha->req_q_map[i];
+ if (req) {
+ /* Clear outstanding commands array. */
+ req->options &= ~BIT_0;
+ ret = qla25xx_init_req_que(base_vha, req);
+ if (ret != QLA_SUCCESS)
+ ql_dbg(ql_dbg_init, base_vha, 0x0101,
+ "%s Req que: %d init failed.\n",
+ __func__, req->id);
+ else
+ ql_dbg(ql_dbg_init, base_vha, 0x0102,
+ "%s Req que: %d inited.\n",
+ __func__, req->id);
+ }
+ }
+ return ret;
+}
+
+/*
+* qla2x00_reset_adapter
+* Reset adapter.
+*
+* Input:
+* ha = adapter block pointer.
+*/
+void
+qla2x00_reset_adapter(scsi_qla_host_t *vha)
+{
+ unsigned long flags = 0;
+ struct qla_hw_data *ha = vha->hw;
+ struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
+
+ vha->flags.online = 0;
+ ha->isp_ops->disable_intrs(ha);
+
+ spin_lock_irqsave(&ha->hardware_lock, flags);
+ WRT_REG_WORD(&reg->hccr, HCCR_RESET_RISC);
+ RD_REG_WORD(&reg->hccr); /* PCI Posting. */
+ WRT_REG_WORD(&reg->hccr, HCCR_RELEASE_RISC);
+ RD_REG_WORD(&reg->hccr); /* PCI Posting. */
+ spin_unlock_irqrestore(&ha->hardware_lock, flags);
+}
+
+void
+qla24xx_reset_adapter(scsi_qla_host_t *vha)
+{
+ unsigned long flags = 0;
+ struct qla_hw_data *ha = vha->hw;
+ struct device_reg_24xx __iomem *reg = &ha->iobase->isp24;
+
+ if (IS_P3P_TYPE(ha))
+ return;
+
+ vha->flags.online = 0;
+ ha->isp_ops->disable_intrs(ha);
+
+ spin_lock_irqsave(&ha->hardware_lock, flags);
+ WRT_REG_DWORD(&reg->hccr, HCCRX_SET_RISC_RESET);
+ RD_REG_DWORD(&reg->hccr);
+ WRT_REG_DWORD(&reg->hccr, HCCRX_REL_RISC_PAUSE);
+ RD_REG_DWORD(&reg->hccr);
+ spin_unlock_irqrestore(&ha->hardware_lock, flags);
+
+ if (IS_NOPOLLING_TYPE(ha))
+ ha->isp_ops->enable_intrs(ha);
+}
+
+/* On sparc systems, obtain port and node WWN from firmware
+ * properties.
+ */
+static void qla24xx_nvram_wwn_from_ofw(scsi_qla_host_t *vha,
+ struct nvram_24xx *nv)
+{
+#ifdef CONFIG_SPARC
+ struct qla_hw_data *ha = vha->hw;
+ struct pci_dev *pdev = ha->pdev;
+ struct device_node *dp = pci_device_to_OF_node(pdev);
+ const u8 *val;
+ int len;
+
+ val = of_get_property(dp, "port-wwn", &len);
+ if (val && len >= WWN_SIZE)
+ memcpy(nv->port_name, val, WWN_SIZE);
+
+ val = of_get_property(dp, "node-wwn", &len);
+ if (val && len >= WWN_SIZE)
+ memcpy(nv->node_name, val, WWN_SIZE);
+#endif
+}
+
+int
+qla24xx_nvram_config(scsi_qla_host_t *vha)
+{
+ int rval;
+ struct init_cb_24xx *icb;
+ struct nvram_24xx *nv;
+ uint32_t *dptr;
+ uint8_t *dptr1, *dptr2;
+ uint32_t chksum;
+ uint16_t cnt;
+ struct qla_hw_data *ha = vha->hw;
+
+ rval = QLA_SUCCESS;
+ icb = (struct init_cb_24xx *)ha->init_cb;
+ nv = ha->nvram;
+
+ /* Determine NVRAM starting address. */
+ if (ha->port_no == 0) {
+ ha->nvram_base = FA_NVRAM_FUNC0_ADDR;
+ ha->vpd_base = FA_NVRAM_VPD0_ADDR;
+ } else {
+ ha->nvram_base = FA_NVRAM_FUNC1_ADDR;
+ ha->vpd_base = FA_NVRAM_VPD1_ADDR;
+ }
+
+ ha->nvram_size = sizeof(struct nvram_24xx);
+ ha->vpd_size = FA_NVRAM_VPD_SIZE;
+
+ /* Get VPD data into cache */
+ ha->vpd = ha->nvram + VPD_OFFSET;
+ ha->isp_ops->read_nvram(vha, (uint8_t *)ha->vpd,
+ ha->nvram_base - FA_NVRAM_FUNC0_ADDR, FA_NVRAM_VPD_SIZE * 4);
+
+ /* Get NVRAM data into cache and calculate checksum. */
+ dptr = (uint32_t *)nv;
+ ha->isp_ops->read_nvram(vha, (uint8_t *)dptr, ha->nvram_base,
+ ha->nvram_size);
+ for (cnt = 0, chksum = 0; cnt < ha->nvram_size >> 2; cnt++)
+ chksum += le32_to_cpu(*dptr++);
+
+ ql_dbg(ql_dbg_init + ql_dbg_buffer, vha, 0x006a,
+ "Contents of NVRAM\n");
+ ql_dump_buffer(ql_dbg_init + ql_dbg_buffer, vha, 0x010d,
+ (uint8_t *)nv, ha->nvram_size);
+
+ /* Bad NVRAM data, set defaults parameters. */
+ if (chksum || nv->id[0] != 'I' || nv->id[1] != 'S' || nv->id[2] != 'P'
+ || nv->id[3] != ' ' ||
+ nv->nvram_version < __constant_cpu_to_le16(ICB_VERSION)) {
+ /* Reset NVRAM data. */
+ ql_log(ql_log_warn, vha, 0x006b,
+ "Inconsistent NVRAM detected: checksum=0x%x id=%c "
+ "version=0x%x.\n", chksum, nv->id[0], nv->nvram_version);
+ ql_log(ql_log_warn, vha, 0x006c,
+ "Falling back to functioning (yet invalid -- WWPN) "
+ "defaults.\n");
+
+ /*
+ * Set default initialization control block.
+ */
+ memset(nv, 0, ha->nvram_size);
+ nv->nvram_version = __constant_cpu_to_le16(ICB_VERSION);
+ nv->version = __constant_cpu_to_le16(ICB_VERSION);
+ nv->frame_payload_size = 2048;
+ nv->execution_throttle = __constant_cpu_to_le16(0xFFFF);
+ nv->exchange_count = __constant_cpu_to_le16(0);
+ nv->hard_address = __constant_cpu_to_le16(124);
+ nv->port_name[0] = 0x21;
+ nv->port_name[1] = 0x00 + ha->port_no + 1;
+ nv->port_name[2] = 0x00;
+ nv->port_name[3] = 0xe0;
+ nv->port_name[4] = 0x8b;
+ nv->port_name[5] = 0x1c;
+ nv->port_name[6] = 0x55;
+ nv->port_name[7] = 0x86;
+ nv->node_name[0] = 0x20;
+ nv->node_name[1] = 0x00;
+ nv->node_name[2] = 0x00;
+ nv->node_name[3] = 0xe0;
+ nv->node_name[4] = 0x8b;
+ nv->node_name[5] = 0x1c;
+ nv->node_name[6] = 0x55;
+ nv->node_name[7] = 0x86;
+ qla24xx_nvram_wwn_from_ofw(vha, nv);
+ nv->login_retry_count = __constant_cpu_to_le16(8);
+ nv->interrupt_delay_timer = __constant_cpu_to_le16(0);
+ nv->login_timeout = __constant_cpu_to_le16(0);
+ nv->firmware_options_1 =
+ __constant_cpu_to_le32(BIT_14|BIT_13|BIT_2|BIT_1);
+ nv->firmware_options_2 = __constant_cpu_to_le32(2 << 4);
+ nv->firmware_options_2 |= __constant_cpu_to_le32(BIT_12);
+ nv->firmware_options_3 = __constant_cpu_to_le32(2 << 13);
+ nv->host_p = __constant_cpu_to_le32(BIT_11|BIT_10);
+ nv->efi_parameters = __constant_cpu_to_le32(0);
+ nv->reset_delay = 5;
+ nv->max_luns_per_target = __constant_cpu_to_le16(128);
+ nv->port_down_retry_count = __constant_cpu_to_le16(30);
+ nv->link_down_timeout = __constant_cpu_to_le16(30);
+
+ rval = 1;
+ }
+
+ if (!qla_ini_mode_enabled(vha)) {
+ /* Don't enable full login after initial LIP */
+ nv->firmware_options_1 &= __constant_cpu_to_le32(~BIT_13);
+ /* Don't enable LIP full login for initiator */
+ nv->host_p &= __constant_cpu_to_le32(~BIT_10);
+ }
+
+ qlt_24xx_config_nvram_stage1(vha, nv);
+
+ /* Reset Initialization control block */
+ memset(icb, 0, ha->init_cb_size);
+
+ /* Copy 1st segment. */
+ dptr1 = (uint8_t *)icb;
+ dptr2 = (uint8_t *)&nv->version;
+ cnt = (uint8_t *)&icb->response_q_inpointer - (uint8_t *)&icb->version;
+ while (cnt--)
+ *dptr1++ = *dptr2++;
+
+ icb->login_retry_count = nv->login_retry_count;
+ icb->link_down_on_nos = nv->link_down_on_nos;
+
+ /* Copy 2nd segment. */
+ dptr1 = (uint8_t *)&icb->interrupt_delay_timer;
+ dptr2 = (uint8_t *)&nv->interrupt_delay_timer;
+ cnt = (uint8_t *)&icb->reserved_3 -
+ (uint8_t *)&icb->interrupt_delay_timer;
+ while (cnt--)
+ *dptr1++ = *dptr2++;
+
+ /*
+ * Setup driver NVRAM options.
+ */
+ qla2x00_set_model_info(vha, nv->model_name, sizeof(nv->model_name),
+ "QLA2462");
+
+ qlt_24xx_config_nvram_stage2(vha, icb);
+
+ if (nv->host_p & __constant_cpu_to_le32(BIT_15)) {
+ /* Use alternate WWN? */
+ memcpy(icb->node_name, nv->alternate_node_name, WWN_SIZE);
+ memcpy(icb->port_name, nv->alternate_port_name, WWN_SIZE);
+ }
+
+ /* Prepare nodename */
+ if ((icb->firmware_options_1 & __constant_cpu_to_le32(BIT_14)) == 0) {
+ /*
+ * Firmware will apply the following mask if the nodename was
+ * not provided.
+ */
+ memcpy(icb->node_name, icb->port_name, WWN_SIZE);
+ icb->node_name[0] &= 0xF0;
+ }
+
+ /* Set host adapter parameters. */
+ ha->flags.disable_risc_code_load = 0;
+ ha->flags.enable_lip_reset = 0;
+ ha->flags.enable_lip_full_login =
+ le32_to_cpu(nv->host_p) & BIT_10 ? 1: 0;
+ ha->flags.enable_target_reset =
+ le32_to_cpu(nv->host_p) & BIT_11 ? 1: 0;
+ ha->flags.enable_led_scheme = 0;
+ ha->flags.disable_serdes = le32_to_cpu(nv->host_p) & BIT_5 ? 1: 0;
+
+ ha->operating_mode = (le32_to_cpu(icb->firmware_options_2) &
+ (BIT_6 | BIT_5 | BIT_4)) >> 4;
+
+ memcpy(ha->fw_seriallink_options24, nv->seriallink_options,
+ sizeof(ha->fw_seriallink_options24));
+
+ /* save HBA serial number */
+ ha->serial0 = icb->port_name[5];
+ ha->serial1 = icb->port_name[6];
+ ha->serial2 = icb->port_name[7];
+ memcpy(vha->node_name, icb->node_name, WWN_SIZE);
+ memcpy(vha->port_name, icb->port_name, WWN_SIZE);
+
+ icb->execution_throttle = __constant_cpu_to_le16(0xFFFF);
+
+ ha->retry_count = le16_to_cpu(nv->login_retry_count);
+
+ /* Set minimum login_timeout to 4 seconds. */
+ if (le16_to_cpu(nv->login_timeout) < ql2xlogintimeout)
+ nv->login_timeout = cpu_to_le16(ql2xlogintimeout);
+ if (le16_to_cpu(nv->login_timeout) < 4)
+ nv->login_timeout = __constant_cpu_to_le16(4);
+ ha->login_timeout = le16_to_cpu(nv->login_timeout);
+ icb->login_timeout = nv->login_timeout;
+
+ /* Set minimum RATOV to 100 tenths of a second. */
+ ha->r_a_tov = 100;
+
+ ha->loop_reset_delay = nv->reset_delay;
+
+ /* Link Down Timeout = 0:
+ *
+ * When Port Down timer expires we will start returning
+ * I/O's to OS with "DID_NO_CONNECT".
+ *
+ * Link Down Timeout != 0:
+ *
+ * The driver waits for the link to come up after link down
+ * before returning I/Os to OS with "DID_NO_CONNECT".
+ */
+ if (le16_to_cpu(nv->link_down_timeout) == 0) {
+ ha->loop_down_abort_time =
+ (LOOP_DOWN_TIME - LOOP_DOWN_TIMEOUT);
+ } else {
+ ha->link_down_timeout = le16_to_cpu(nv->link_down_timeout);
+ ha->loop_down_abort_time =
+ (LOOP_DOWN_TIME - ha->link_down_timeout);
+ }
+
+ /* Need enough time to try and get the port back. */
+ ha->port_down_retry_count = le16_to_cpu(nv->port_down_retry_count);
+ if (qlport_down_retry)
+ ha->port_down_retry_count = qlport_down_retry;
+
+ /* Set login_retry_count */
+ ha->login_retry_count = le16_to_cpu(nv->login_retry_count);
+ if (ha->port_down_retry_count ==
+ le16_to_cpu(nv->port_down_retry_count) &&
+ ha->port_down_retry_count > 3)
+ ha->login_retry_count = ha->port_down_retry_count;
+ else if (ha->port_down_retry_count > (int)ha->login_retry_count)
+ ha->login_retry_count = ha->port_down_retry_count;
+ if (ql2xloginretrycount)
+ ha->login_retry_count = ql2xloginretrycount;
+
+ /* Enable ZIO. */
+ if (!vha->flags.init_done) {
+ ha->zio_mode = le32_to_cpu(icb->firmware_options_2) &
+ (BIT_3 | BIT_2 | BIT_1 | BIT_0);
+ ha->zio_timer = le16_to_cpu(icb->interrupt_delay_timer) ?
+ le16_to_cpu(icb->interrupt_delay_timer): 2;
+ }
+ icb->firmware_options_2 &= __constant_cpu_to_le32(
+ ~(BIT_3 | BIT_2 | BIT_1 | BIT_0));
+ vha->flags.process_response_queue = 0;
+ if (ha->zio_mode != QLA_ZIO_DISABLED) {
+ ha->zio_mode = QLA_ZIO_MODE_6;
+
+ ql_log(ql_log_info, vha, 0x006f,
+ "ZIO mode %d enabled; timer delay (%d us).\n",
+ ha->zio_mode, ha->zio_timer * 100);
+
+ icb->firmware_options_2 |= cpu_to_le32(
+ (uint32_t)ha->zio_mode);
+ icb->interrupt_delay_timer = cpu_to_le16(ha->zio_timer);
+ vha->flags.process_response_queue = 1;
+ }
+
+ if (rval) {
+ ql_log(ql_log_warn, vha, 0x0070,
+ "NVRAM configuration failed.\n");
+ }
+ return (rval);
+}
+
+static int
+qla24xx_load_risc_flash(scsi_qla_host_t *vha, uint32_t *srisc_addr,
+ uint32_t faddr)
+{
+ int rval = QLA_SUCCESS;
+ int segments, fragment;
+ uint32_t *dcode, dlen;
+ uint32_t risc_addr;
+ uint32_t risc_size;
+ uint32_t i;
+ struct qla_hw_data *ha = vha->hw;
+ struct req_que *req = ha->req_q_map[0];
+
+ ql_dbg(ql_dbg_init, vha, 0x008b,
+ "FW: Loading firmware from flash (%x).\n", faddr);
+
+ rval = QLA_SUCCESS;
+
+ segments = FA_RISC_CODE_SEGMENTS;
+ dcode = (uint32_t *)req->ring;
+ *srisc_addr = 0;
+
+ /* Validate firmware image by checking version. */
+ qla24xx_read_flash_data(vha, dcode, faddr + 4, 4);
+ for (i = 0; i < 4; i++)
+ dcode[i] = be32_to_cpu(dcode[i]);
+ if ((dcode[0] == 0xffffffff && dcode[1] == 0xffffffff &&
+ dcode[2] == 0xffffffff && dcode[3] == 0xffffffff) ||
+ (dcode[0] == 0 && dcode[1] == 0 && dcode[2] == 0 &&
+ dcode[3] == 0)) {
+ ql_log(ql_log_fatal, vha, 0x008c,
+ "Unable to verify the integrity of flash firmware "
+ "image.\n");
+ ql_log(ql_log_fatal, vha, 0x008d,
+ "Firmware data: %08x %08x %08x %08x.\n",
+ dcode[0], dcode[1], dcode[2], dcode[3]);
+
+ return QLA_FUNCTION_FAILED;
+ }
+
+ while (segments && rval == QLA_SUCCESS) {
+ /* Read segment's load information. */
+ qla24xx_read_flash_data(vha, dcode, faddr, 4);
+
+ risc_addr = be32_to_cpu(dcode[2]);
+ *srisc_addr = *srisc_addr == 0 ? risc_addr : *srisc_addr;
+ risc_size = be32_to_cpu(dcode[3]);
+
+ fragment = 0;
+ while (risc_size > 0 && rval == QLA_SUCCESS) {
+ dlen = (uint32_t)(ha->fw_transfer_size >> 2);
+ if (dlen > risc_size)
+ dlen = risc_size;
+
+ ql_dbg(ql_dbg_init, vha, 0x008e,
+ "Loading risc segment@ risc addr %x "
+ "number of dwords 0x%x offset 0x%x.\n",
+ risc_addr, dlen, faddr);
+
+ qla24xx_read_flash_data(vha, dcode, faddr, dlen);
+ for (i = 0; i < dlen; i++)
+ dcode[i] = swab32(dcode[i]);
+
+ rval = qla2x00_load_ram(vha, req->dma, risc_addr,
+ dlen);
+ if (rval) {
+ ql_log(ql_log_fatal, vha, 0x008f,
+ "Failed to load segment %d of firmware.\n",
+ fragment);
+ return QLA_FUNCTION_FAILED;
+ }
+
+ faddr += dlen;
+ risc_addr += dlen;
+ risc_size -= dlen;
+ fragment++;
+ }
+
+ /* Next segment. */
+ segments--;
+ }
+
+ if (!IS_QLA27XX(ha))
+ return rval;
+
+ if (ha->fw_dump_template)
+ vfree(ha->fw_dump_template);
+ ha->fw_dump_template = NULL;
+ ha->fw_dump_template_len = 0;
+
+ ql_dbg(ql_dbg_init, vha, 0x0161,
+ "Loading fwdump template from %x\n", faddr);
+ qla24xx_read_flash_data(vha, dcode, faddr, 7);
+ risc_size = be32_to_cpu(dcode[2]);
+ ql_dbg(ql_dbg_init, vha, 0x0162,
+ "-> array size %x dwords\n", risc_size);
+ if (risc_size == 0 || risc_size == ~0)
+ goto default_template;
+
+ dlen = (risc_size - 8) * sizeof(*dcode);
+ ql_dbg(ql_dbg_init, vha, 0x0163,
+ "-> template allocating %x bytes...\n", dlen);
+ ha->fw_dump_template = vmalloc(dlen);
+ if (!ha->fw_dump_template) {
+ ql_log(ql_log_warn, vha, 0x0164,
+ "Failed fwdump template allocate %x bytes.\n", risc_size);
+ goto default_template;
+ }
+
+ faddr += 7;
+ risc_size -= 8;
+ dcode = ha->fw_dump_template;
+ qla24xx_read_flash_data(vha, dcode, faddr, risc_size);
+ for (i = 0; i < risc_size; i++)
+ dcode[i] = le32_to_cpu(dcode[i]);
+
+ if (!qla27xx_fwdt_template_valid(dcode)) {
+ ql_log(ql_log_warn, vha, 0x0165,
+ "Failed fwdump template validate\n");
+ goto default_template;
+ }
+
+ dlen = qla27xx_fwdt_template_size(dcode);
+ ql_dbg(ql_dbg_init, vha, 0x0166,
+ "-> template size %x bytes\n", dlen);
+ if (dlen > risc_size * sizeof(*dcode)) {
+ ql_log(ql_log_warn, vha, 0x0167,
+ "Failed fwdump template exceeds array by %x bytes\n",
+ (uint32_t)(dlen - risc_size * sizeof(*dcode)));
+ goto default_template;
+ }
+ ha->fw_dump_template_len = dlen;
+ return rval;
+
+default_template:
+ ql_log(ql_log_warn, vha, 0x0168, "Using default fwdump template\n");
+ if (ha->fw_dump_template)
+ vfree(ha->fw_dump_template);
+ ha->fw_dump_template = NULL;
+ ha->fw_dump_template_len = 0;
+
+ dlen = qla27xx_fwdt_template_default_size();
+ ql_dbg(ql_dbg_init, vha, 0x0169,
+ "-> template allocating %x bytes...\n", dlen);
+ ha->fw_dump_template = vmalloc(dlen);
+ if (!ha->fw_dump_template) {
+ ql_log(ql_log_warn, vha, 0x016a,
+ "Failed fwdump template allocate %x bytes.\n", risc_size);
+ goto failed_template;
+ }
+
+ dcode = ha->fw_dump_template;
+ risc_size = dlen / sizeof(*dcode);
+ memcpy(dcode, qla27xx_fwdt_template_default(), dlen);
+ for (i = 0; i < risc_size; i++)
+ dcode[i] = be32_to_cpu(dcode[i]);
+
+ if (!qla27xx_fwdt_template_valid(ha->fw_dump_template)) {
+ ql_log(ql_log_warn, vha, 0x016b,
+ "Failed fwdump template validate\n");
+ goto failed_template;
+ }
+
+ dlen = qla27xx_fwdt_template_size(ha->fw_dump_template);
+ ql_dbg(ql_dbg_init, vha, 0x016c,
+ "-> template size %x bytes\n", dlen);
+ ha->fw_dump_template_len = dlen;
+ return rval;
+
+failed_template:
+ ql_log(ql_log_warn, vha, 0x016d, "Failed default fwdump template\n");
+ if (ha->fw_dump_template)
+ vfree(ha->fw_dump_template);
+ ha->fw_dump_template = NULL;
+ ha->fw_dump_template_len = 0;
+ return rval;
+}
+
+#define QLA_FW_URL "http://ldriver.qlogic.com/firmware/"
+
+int
+qla2x00_load_risc(scsi_qla_host_t *vha, uint32_t *srisc_addr)
+{
+ int rval;
+ int i, fragment;
+ uint16_t *wcode, *fwcode;
+ uint32_t risc_addr, risc_size, fwclen, wlen, *seg;
+ struct fw_blob *blob;
+ struct qla_hw_data *ha = vha->hw;
+ struct req_que *req = ha->req_q_map[0];
+
+ /* Load firmware blob. */
+ blob = qla2x00_request_firmware(vha);
+ if (!blob) {
+ ql_log(ql_log_info, vha, 0x0083,
+ "Firmware image unavailable.\n");
+ ql_log(ql_log_info, vha, 0x0084,
+ "Firmware images can be retrieved from: "QLA_FW_URL ".\n");
+ return QLA_FUNCTION_FAILED;
+ }
+
+ rval = QLA_SUCCESS;
+
+ wcode = (uint16_t *)req->ring;
+ *srisc_addr = 0;
+ fwcode = (uint16_t *)blob->fw->data;
+ fwclen = 0;
+
+ /* Validate firmware image by checking version. */
+ if (blob->fw->size < 8 * sizeof(uint16_t)) {
+ ql_log(ql_log_fatal, vha, 0x0085,
+ "Unable to verify integrity of firmware image (%Zd).\n",
+ blob->fw->size);
+ goto fail_fw_integrity;
+ }
+ for (i = 0; i < 4; i++)
+ wcode[i] = be16_to_cpu(fwcode[i + 4]);
+ if ((wcode[0] == 0xffff && wcode[1] == 0xffff && wcode[2] == 0xffff &&
+ wcode[3] == 0xffff) || (wcode[0] == 0 && wcode[1] == 0 &&
+ wcode[2] == 0 && wcode[3] == 0)) {
+ ql_log(ql_log_fatal, vha, 0x0086,
+ "Unable to verify integrity of firmware image.\n");
+ ql_log(ql_log_fatal, vha, 0x0087,
+ "Firmware data: %04x %04x %04x %04x.\n",
+ wcode[0], wcode[1], wcode[2], wcode[3]);
+ goto fail_fw_integrity;
+ }
+
+ seg = blob->segs;
+ while (*seg && rval == QLA_SUCCESS) {
+ risc_addr = *seg;
+ *srisc_addr = *srisc_addr == 0 ? *seg : *srisc_addr;
+ risc_size = be16_to_cpu(fwcode[3]);
+
+ /* Validate firmware image size. */
+ fwclen += risc_size * sizeof(uint16_t);
+ if (blob->fw->size < fwclen) {
+ ql_log(ql_log_fatal, vha, 0x0088,
+ "Unable to verify integrity of firmware image "
+ "(%Zd).\n", blob->fw->size);
+ goto fail_fw_integrity;
+ }
+
+ fragment = 0;
+ while (risc_size > 0 && rval == QLA_SUCCESS) {
+ wlen = (uint16_t)(ha->fw_transfer_size >> 1);
+ if (wlen > risc_size)
+ wlen = risc_size;
+ ql_dbg(ql_dbg_init, vha, 0x0089,
+ "Loading risc segment@ risc addr %x number of "
+ "words 0x%x.\n", risc_addr, wlen);
+
+ for (i = 0; i < wlen; i++)
+ wcode[i] = swab16(fwcode[i]);
+
+ rval = qla2x00_load_ram(vha, req->dma, risc_addr,
+ wlen);
+ if (rval) {
+ ql_log(ql_log_fatal, vha, 0x008a,
+ "Failed to load segment %d of firmware.\n",
+ fragment);
+ break;
+ }
+
+ fwcode += wlen;
+ risc_addr += wlen;
+ risc_size -= wlen;
+ fragment++;
+ }
+
+ /* Next segment. */
+ seg++;
+ }
+ return rval;
+
+fail_fw_integrity:
+ return QLA_FUNCTION_FAILED;
+}
+
+static int
+qla24xx_load_risc_blob(scsi_qla_host_t *vha, uint32_t *srisc_addr)
+{
+ int rval;
+ int segments, fragment;
+ uint32_t *dcode, dlen;
+ uint32_t risc_addr;
+ uint32_t risc_size;
+ uint32_t i;
+ struct fw_blob *blob;
+ const uint32_t *fwcode;
+ uint32_t fwclen;
+ struct qla_hw_data *ha = vha->hw;
+ struct req_que *req = ha->req_q_map[0];
+
+ /* Load firmware blob. */
+ blob = qla2x00_request_firmware(vha);
+ if (!blob) {
+ ql_log(ql_log_warn, vha, 0x0090,
+ "Firmware image unavailable.\n");
+ ql_log(ql_log_warn, vha, 0x0091,
+ "Firmware images can be retrieved from: "
+ QLA_FW_URL ".\n");
+
+ return QLA_FUNCTION_FAILED;
+ }
+
+ ql_dbg(ql_dbg_init, vha, 0x0092,
+ "FW: Loading via request-firmware.\n");
+
+ rval = QLA_SUCCESS;
+
+ segments = FA_RISC_CODE_SEGMENTS;
+ dcode = (uint32_t *)req->ring;
+ *srisc_addr = 0;
+ fwcode = (uint32_t *)blob->fw->data;
+ fwclen = 0;
+
+ /* Validate firmware image by checking version. */
+ if (blob->fw->size < 8 * sizeof(uint32_t)) {
+ ql_log(ql_log_fatal, vha, 0x0093,
+ "Unable to verify integrity of firmware image (%Zd).\n",
+ blob->fw->size);
+ return QLA_FUNCTION_FAILED;
+ }
+ for (i = 0; i < 4; i++)
+ dcode[i] = be32_to_cpu(fwcode[i + 4]);
+ if ((dcode[0] == 0xffffffff && dcode[1] == 0xffffffff &&
+ dcode[2] == 0xffffffff && dcode[3] == 0xffffffff) ||
+ (dcode[0] == 0 && dcode[1] == 0 && dcode[2] == 0 &&
+ dcode[3] == 0)) {
+ ql_log(ql_log_fatal, vha, 0x0094,
+ "Unable to verify integrity of firmware image (%Zd).\n",
+ blob->fw->size);
+ ql_log(ql_log_fatal, vha, 0x0095,
+ "Firmware data: %08x %08x %08x %08x.\n",
+ dcode[0], dcode[1], dcode[2], dcode[3]);
+ return QLA_FUNCTION_FAILED;
+ }
+
+ while (segments && rval == QLA_SUCCESS) {
+ risc_addr = be32_to_cpu(fwcode[2]);
+ *srisc_addr = *srisc_addr == 0 ? risc_addr : *srisc_addr;
+ risc_size = be32_to_cpu(fwcode[3]);
+
+ /* Validate firmware image size. */
+ fwclen += risc_size * sizeof(uint32_t);
+ if (blob->fw->size < fwclen) {
+ ql_log(ql_log_fatal, vha, 0x0096,
+ "Unable to verify integrity of firmware image "
+ "(%Zd).\n", blob->fw->size);
+ return QLA_FUNCTION_FAILED;
+ }
+
+ fragment = 0;
+ while (risc_size > 0 && rval == QLA_SUCCESS) {
+ dlen = (uint32_t)(ha->fw_transfer_size >> 2);
+ if (dlen > risc_size)
+ dlen = risc_size;
+
+ ql_dbg(ql_dbg_init, vha, 0x0097,
+ "Loading risc segment@ risc addr %x "
+ "number of dwords 0x%x.\n", risc_addr, dlen);
+
+ for (i = 0; i < dlen; i++)
+ dcode[i] = swab32(fwcode[i]);
+
+ rval = qla2x00_load_ram(vha, req->dma, risc_addr,
+ dlen);
+ if (rval) {
+ ql_log(ql_log_fatal, vha, 0x0098,
+ "Failed to load segment %d of firmware.\n",
+ fragment);
+ return QLA_FUNCTION_FAILED;
+ }
+
+ fwcode += dlen;
+ risc_addr += dlen;
+ risc_size -= dlen;
+ fragment++;
+ }
+
+ /* Next segment. */
+ segments--;
+ }
+
+ if (!IS_QLA27XX(ha))
+ return rval;
+
+ if (ha->fw_dump_template)
+ vfree(ha->fw_dump_template);
+ ha->fw_dump_template = NULL;
+ ha->fw_dump_template_len = 0;
+
+ ql_dbg(ql_dbg_init, vha, 0x171,
+ "Loading fwdump template from %x\n",
+ (uint32_t)((void *)fwcode - (void *)blob->fw->data));
+ risc_size = be32_to_cpu(fwcode[2]);
+ ql_dbg(ql_dbg_init, vha, 0x172,
+ "-> array size %x dwords\n", risc_size);
+ if (risc_size == 0 || risc_size == ~0)
+ goto default_template;
+
+ dlen = (risc_size - 8) * sizeof(*fwcode);
+ ql_dbg(ql_dbg_init, vha, 0x0173,
+ "-> template allocating %x bytes...\n", dlen);
+ ha->fw_dump_template = vmalloc(dlen);
+ if (!ha->fw_dump_template) {
+ ql_log(ql_log_warn, vha, 0x0174,
+ "Failed fwdump template allocate %x bytes.\n", risc_size);
+ goto default_template;
+ }
+
+ fwcode += 7;
+ risc_size -= 8;
+ dcode = ha->fw_dump_template;
+ for (i = 0; i < risc_size; i++)
+ dcode[i] = le32_to_cpu(fwcode[i]);
+
+ if (!qla27xx_fwdt_template_valid(dcode)) {
+ ql_log(ql_log_warn, vha, 0x0175,
+ "Failed fwdump template validate\n");
+ goto default_template;
+ }
+
+ dlen = qla27xx_fwdt_template_size(dcode);
+ ql_dbg(ql_dbg_init, vha, 0x0176,
+ "-> template size %x bytes\n", dlen);
+ if (dlen > risc_size * sizeof(*fwcode)) {
+ ql_log(ql_log_warn, vha, 0x0177,
+ "Failed fwdump template exceeds array by %x bytes\n",
+ (uint32_t)(dlen - risc_size * sizeof(*fwcode)));
+ goto default_template;
+ }
+ ha->fw_dump_template_len = dlen;
+ return rval;
+
+default_template:
+ ql_log(ql_log_warn, vha, 0x0178, "Using default fwdump template\n");
+ if (ha->fw_dump_template)
+ vfree(ha->fw_dump_template);
+ ha->fw_dump_template = NULL;
+ ha->fw_dump_template_len = 0;
+
+ dlen = qla27xx_fwdt_template_default_size();
+ ql_dbg(ql_dbg_init, vha, 0x0179,
+ "-> template allocating %x bytes...\n", dlen);
+ ha->fw_dump_template = vmalloc(dlen);
+ if (!ha->fw_dump_template) {
+ ql_log(ql_log_warn, vha, 0x017a,
+ "Failed fwdump template allocate %x bytes.\n", risc_size);
+ goto failed_template;
+ }
+
+ dcode = ha->fw_dump_template;
+ risc_size = dlen / sizeof(*fwcode);
+ fwcode = qla27xx_fwdt_template_default();
+ for (i = 0; i < risc_size; i++)
+ dcode[i] = be32_to_cpu(fwcode[i]);
+
+ if (!qla27xx_fwdt_template_valid(ha->fw_dump_template)) {
+ ql_log(ql_log_warn, vha, 0x017b,
+ "Failed fwdump template validate\n");
+ goto failed_template;
+ }
+
+ dlen = qla27xx_fwdt_template_size(ha->fw_dump_template);
+ ql_dbg(ql_dbg_init, vha, 0x017c,
+ "-> template size %x bytes\n", dlen);
+ ha->fw_dump_template_len = dlen;
+ return rval;
+
+failed_template:
+ ql_log(ql_log_warn, vha, 0x017d, "Failed default fwdump template\n");
+ if (ha->fw_dump_template)
+ vfree(ha->fw_dump_template);
+ ha->fw_dump_template = NULL;
+ ha->fw_dump_template_len = 0;
+ return rval;
+}
+
+int
+qla24xx_load_risc(scsi_qla_host_t *vha, uint32_t *srisc_addr)
+{
+ int rval;
+
+ if (ql2xfwloadbin == 1)
+ return qla81xx_load_risc(vha, srisc_addr);
+
+ /*
+ * FW Load priority:
+ * 1) Firmware via request-firmware interface (.bin file).
+ * 2) Firmware residing in flash.
+ */
+ rval = qla24xx_load_risc_blob(vha, srisc_addr);
+ if (rval == QLA_SUCCESS)
+ return rval;
+
+ return qla24xx_load_risc_flash(vha, srisc_addr,
+ vha->hw->flt_region_fw);
+}
+
+int
+qla81xx_load_risc(scsi_qla_host_t *vha, uint32_t *srisc_addr)
+{
+ int rval;
+ struct qla_hw_data *ha = vha->hw;
+
+ if (ql2xfwloadbin == 2)
+ goto try_blob_fw;
+
+ /*
+ * FW Load priority:
+ * 1) Firmware residing in flash.
+ * 2) Firmware via request-firmware interface (.bin file).
+ * 3) Golden-Firmware residing in flash -- limited operation.
+ */
+ rval = qla24xx_load_risc_flash(vha, srisc_addr, ha->flt_region_fw);
+ if (rval == QLA_SUCCESS)
+ return rval;
+
+try_blob_fw:
+ rval = qla24xx_load_risc_blob(vha, srisc_addr);
+ if (rval == QLA_SUCCESS || !ha->flt_region_gold_fw)
+ return rval;
+
+ ql_log(ql_log_info, vha, 0x0099,
+ "Attempting to fallback to golden firmware.\n");
+ rval = qla24xx_load_risc_flash(vha, srisc_addr, ha->flt_region_gold_fw);
+ if (rval != QLA_SUCCESS)
+ return rval;
+
+ ql_log(ql_log_info, vha, 0x009a, "Update operational firmware.\n");
+ ha->flags.running_gold_fw = 1;
+ return rval;
+}
+
+void
+qla2x00_try_to_stop_firmware(scsi_qla_host_t *vha)
+{
+ int ret, retries;
+ struct qla_hw_data *ha = vha->hw;
+
+ if (ha->flags.pci_channel_io_perm_failure)
+ return;
+ if (!IS_FWI2_CAPABLE(ha))
+ return;
+ if (!ha->fw_major_version)
+ return;
+
+ ret = qla2x00_stop_firmware(vha);
+ for (retries = 5; ret != QLA_SUCCESS && ret != QLA_FUNCTION_TIMEOUT &&
+ ret != QLA_INVALID_COMMAND && retries ; retries--) {
+ ha->isp_ops->reset_chip(vha);
+ if (ha->isp_ops->chip_diag(vha) != QLA_SUCCESS)
+ continue;
+ if (qla2x00_setup_chip(vha) != QLA_SUCCESS)
+ continue;
+ ql_log(ql_log_info, vha, 0x8015,
+ "Attempting retry of stop-firmware command.\n");
+ ret = qla2x00_stop_firmware(vha);
+ }
+}
+
+int
+qla24xx_configure_vhba(scsi_qla_host_t *vha)
+{
+ int rval = QLA_SUCCESS;
+ int rval2;
+ uint16_t mb[MAILBOX_REGISTER_COUNT];
+ struct qla_hw_data *ha = vha->hw;
+ struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev);
+ struct req_que *req;
+ struct rsp_que *rsp;
+
+ if (!vha->vp_idx)
+ return -EINVAL;
+
+ rval = qla2x00_fw_ready(base_vha);
+ if (ha->flags.cpu_affinity_enabled)
+ req = ha->req_q_map[0];
+ else
+ req = vha->req;
+ rsp = req->rsp;
+
+ if (rval == QLA_SUCCESS) {
+ clear_bit(RESET_MARKER_NEEDED, &vha->dpc_flags);
+ qla2x00_marker(vha, req, rsp, 0, 0, MK_SYNC_ALL);
+ }
+
+ vha->flags.management_server_logged_in = 0;
+
+ /* Login to SNS first */
+ rval2 = ha->isp_ops->fabric_login(vha, NPH_SNS, 0xff, 0xff, 0xfc, mb,
+ BIT_1);
+ if (rval2 != QLA_SUCCESS || mb[0] != MBS_COMMAND_COMPLETE) {
+ if (rval2 == QLA_MEMORY_ALLOC_FAILED)
+ ql_dbg(ql_dbg_init, vha, 0x0120,
+ "Failed SNS login: loop_id=%x, rval2=%d\n",
+ NPH_SNS, rval2);
+ else
+ ql_dbg(ql_dbg_init, vha, 0x0103,
+ "Failed SNS login: loop_id=%x mb[0]=%x mb[1]=%x "
+ "mb[2]=%x mb[6]=%x mb[7]=%x.\n",
+ NPH_SNS, mb[0], mb[1], mb[2], mb[6], mb[7]);
+ return (QLA_FUNCTION_FAILED);
+ }
+
+ atomic_set(&vha->loop_down_timer, 0);
+ atomic_set(&vha->loop_state, LOOP_UP);
+ set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags);
+ set_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags);
+ rval = qla2x00_loop_resync(base_vha);
+
+ return rval;
+}
+
+/* 84XX Support **************************************************************/
+
+static LIST_HEAD(qla_cs84xx_list);
+static DEFINE_MUTEX(qla_cs84xx_mutex);
+
+static struct qla_chip_state_84xx *
+qla84xx_get_chip(struct scsi_qla_host *vha)
+{
+ struct qla_chip_state_84xx *cs84xx;
+ struct qla_hw_data *ha = vha->hw;
+
+ mutex_lock(&qla_cs84xx_mutex);
+
+ /* Find any shared 84xx chip. */
+ list_for_each_entry(cs84xx, &qla_cs84xx_list, list) {
+ if (cs84xx->bus == ha->pdev->bus) {
+ kref_get(&cs84xx->kref);
+ goto done;
+ }
+ }
+
+ cs84xx = kzalloc(sizeof(*cs84xx), GFP_KERNEL);
+ if (!cs84xx)
+ goto done;
+
+ kref_init(&cs84xx->kref);
+ spin_lock_init(&cs84xx->access_lock);
+ mutex_init(&cs84xx->fw_update_mutex);
+ cs84xx->bus = ha->pdev->bus;
+
+ list_add_tail(&cs84xx->list, &qla_cs84xx_list);
+done:
+ mutex_unlock(&qla_cs84xx_mutex);
+ return cs84xx;
+}
+
+static void
+__qla84xx_chip_release(struct kref *kref)
+{
+ struct qla_chip_state_84xx *cs84xx =
+ container_of(kref, struct qla_chip_state_84xx, kref);
+
+ mutex_lock(&qla_cs84xx_mutex);
+ list_del(&cs84xx->list);
+ mutex_unlock(&qla_cs84xx_mutex);
+ kfree(cs84xx);
+}
+
+void
+qla84xx_put_chip(struct scsi_qla_host *vha)
+{
+ struct qla_hw_data *ha = vha->hw;
+ if (ha->cs84xx)
+ kref_put(&ha->cs84xx->kref, __qla84xx_chip_release);
+}
+
+static int
+qla84xx_init_chip(scsi_qla_host_t *vha)
+{
+ int rval;
+ uint16_t status[2];
+ struct qla_hw_data *ha = vha->hw;
+
+ mutex_lock(&ha->cs84xx->fw_update_mutex);
+
+ rval = qla84xx_verify_chip(vha, status);
+
+ mutex_unlock(&ha->cs84xx->fw_update_mutex);
+
+ return rval != QLA_SUCCESS || status[0] ? QLA_FUNCTION_FAILED:
+ QLA_SUCCESS;
+}
+
+/* 81XX Support **************************************************************/
+
+int
+qla81xx_nvram_config(scsi_qla_host_t *vha)
+{
+ int rval;
+ struct init_cb_81xx *icb;
+ struct nvram_81xx *nv;
+ uint32_t *dptr;
+ uint8_t *dptr1, *dptr2;
+ uint32_t chksum;
+ uint16_t cnt;
+ struct qla_hw_data *ha = vha->hw;
+
+ rval = QLA_SUCCESS;
+ icb = (struct init_cb_81xx *)ha->init_cb;
+ nv = ha->nvram;
+
+ /* Determine NVRAM starting address. */
+ ha->nvram_size = sizeof(struct nvram_81xx);
+ ha->vpd_size = FA_NVRAM_VPD_SIZE;
+ if (IS_P3P_TYPE(ha) || IS_QLA8031(ha))
+ ha->vpd_size = FA_VPD_SIZE_82XX;
+
+ /* Get VPD data into cache */
+ ha->vpd = ha->nvram + VPD_OFFSET;
+ ha->isp_ops->read_optrom(vha, ha->vpd, ha->flt_region_vpd << 2,
+ ha->vpd_size);
+
+ /* Get NVRAM data into cache and calculate checksum. */
+ ha->isp_ops->read_optrom(vha, ha->nvram, ha->flt_region_nvram << 2,
+ ha->nvram_size);
+ dptr = (uint32_t *)nv;
+ for (cnt = 0, chksum = 0; cnt < ha->nvram_size >> 2; cnt++)
+ chksum += le32_to_cpu(*dptr++);
+
+ ql_dbg(ql_dbg_init + ql_dbg_buffer, vha, 0x0111,
+ "Contents of NVRAM:\n");
+ ql_dump_buffer(ql_dbg_init + ql_dbg_buffer, vha, 0x0112,
+ (uint8_t *)nv, ha->nvram_size);
+
+ /* Bad NVRAM data, set defaults parameters. */
+ if (chksum || nv->id[0] != 'I' || nv->id[1] != 'S' || nv->id[2] != 'P'
+ || nv->id[3] != ' ' ||
+ nv->nvram_version < __constant_cpu_to_le16(ICB_VERSION)) {
+ /* Reset NVRAM data. */
+ ql_log(ql_log_info, vha, 0x0073,
+ "Inconsistent NVRAM detected: checksum=0x%x id=%c "
+ "version=0x%x.\n", chksum, nv->id[0],
+ le16_to_cpu(nv->nvram_version));
+ ql_log(ql_log_info, vha, 0x0074,
+ "Falling back to functioning (yet invalid -- WWPN) "
+ "defaults.\n");
+
+ /*
+ * Set default initialization control block.
+ */
+ memset(nv, 0, ha->nvram_size);
+ nv->nvram_version = __constant_cpu_to_le16(ICB_VERSION);
+ nv->version = __constant_cpu_to_le16(ICB_VERSION);
+ nv->frame_payload_size = 2048;
+ nv->execution_throttle = __constant_cpu_to_le16(0xFFFF);
+ nv->exchange_count = __constant_cpu_to_le16(0);
+ nv->port_name[0] = 0x21;
+ nv->port_name[1] = 0x00 + ha->port_no + 1;
+ nv->port_name[2] = 0x00;
+ nv->port_name[3] = 0xe0;
+ nv->port_name[4] = 0x8b;
+ nv->port_name[5] = 0x1c;
+ nv->port_name[6] = 0x55;
+ nv->port_name[7] = 0x86;
+ nv->node_name[0] = 0x20;
+ nv->node_name[1] = 0x00;
+ nv->node_name[2] = 0x00;
+ nv->node_name[3] = 0xe0;
+ nv->node_name[4] = 0x8b;
+ nv->node_name[5] = 0x1c;
+ nv->node_name[6] = 0x55;
+ nv->node_name[7] = 0x86;
+ nv->login_retry_count = __constant_cpu_to_le16(8);
+ nv->interrupt_delay_timer = __constant_cpu_to_le16(0);
+ nv->login_timeout = __constant_cpu_to_le16(0);
+ nv->firmware_options_1 =
+ __constant_cpu_to_le32(BIT_14|BIT_13|BIT_2|BIT_1);
+ nv->firmware_options_2 = __constant_cpu_to_le32(2 << 4);
+ nv->firmware_options_2 |= __constant_cpu_to_le32(BIT_12);
+ nv->firmware_options_3 = __constant_cpu_to_le32(2 << 13);
+ nv->host_p = __constant_cpu_to_le32(BIT_11|BIT_10);
+ nv->efi_parameters = __constant_cpu_to_le32(0);
+ nv->reset_delay = 5;
+ nv->max_luns_per_target = __constant_cpu_to_le16(128);
+ nv->port_down_retry_count = __constant_cpu_to_le16(30);
+ nv->link_down_timeout = __constant_cpu_to_le16(180);
+ nv->enode_mac[0] = 0x00;
+ nv->enode_mac[1] = 0xC0;
+ nv->enode_mac[2] = 0xDD;
+ nv->enode_mac[3] = 0x04;
+ nv->enode_mac[4] = 0x05;
+ nv->enode_mac[5] = 0x06 + ha->port_no + 1;
+
+ rval = 1;
+ }
+
+ if (IS_T10_PI_CAPABLE(ha))
+ nv->frame_payload_size &= ~7;
+
+ qlt_81xx_config_nvram_stage1(vha, nv);
+
+ /* Reset Initialization control block */
+ memset(icb, 0, ha->init_cb_size);
+
+ /* Copy 1st segment. */
+ dptr1 = (uint8_t *)icb;
+ dptr2 = (uint8_t *)&nv->version;
+ cnt = (uint8_t *)&icb->response_q_inpointer - (uint8_t *)&icb->version;
+ while (cnt--)
+ *dptr1++ = *dptr2++;
+
+ icb->login_retry_count = nv->login_retry_count;
+
+ /* Copy 2nd segment. */
+ dptr1 = (uint8_t *)&icb->interrupt_delay_timer;
+ dptr2 = (uint8_t *)&nv->interrupt_delay_timer;
+ cnt = (uint8_t *)&icb->reserved_5 -
+ (uint8_t *)&icb->interrupt_delay_timer;
+ while (cnt--)
+ *dptr1++ = *dptr2++;
+
+ memcpy(icb->enode_mac, nv->enode_mac, sizeof(icb->enode_mac));
+ /* Some boards (with valid NVRAMs) still have NULL enode_mac!! */
+ if (!memcmp(icb->enode_mac, "\0\0\0\0\0\0", sizeof(icb->enode_mac))) {
+ icb->enode_mac[0] = 0x00;
+ icb->enode_mac[1] = 0xC0;
+ icb->enode_mac[2] = 0xDD;
+ icb->enode_mac[3] = 0x04;
+ icb->enode_mac[4] = 0x05;
+ icb->enode_mac[5] = 0x06 + ha->port_no + 1;
+ }
+
+ /* Use extended-initialization control block. */
+ memcpy(ha->ex_init_cb, &nv->ex_version, sizeof(*ha->ex_init_cb));
+
+ /*
+ * Setup driver NVRAM options.
+ */
+ qla2x00_set_model_info(vha, nv->model_name, sizeof(nv->model_name),
+ "QLE8XXX");
+
+ qlt_81xx_config_nvram_stage2(vha, icb);
+
+ /* Use alternate WWN? */
+ if (nv->host_p & __constant_cpu_to_le32(BIT_15)) {
+ memcpy(icb->node_name, nv->alternate_node_name, WWN_SIZE);
+ memcpy(icb->port_name, nv->alternate_port_name, WWN_SIZE);
+ }
+
+ /* Prepare nodename */
+ if ((icb->firmware_options_1 & __constant_cpu_to_le32(BIT_14)) == 0) {
+ /*
+ * Firmware will apply the following mask if the nodename was
+ * not provided.
+ */
+ memcpy(icb->node_name, icb->port_name, WWN_SIZE);
+ icb->node_name[0] &= 0xF0;
+ }
+
+ /* Set host adapter parameters. */
+ ha->flags.disable_risc_code_load = 0;
+ ha->flags.enable_lip_reset = 0;
+ ha->flags.enable_lip_full_login =
+ le32_to_cpu(nv->host_p) & BIT_10 ? 1: 0;
+ ha->flags.enable_target_reset =
+ le32_to_cpu(nv->host_p) & BIT_11 ? 1: 0;
+ ha->flags.enable_led_scheme = 0;
+ ha->flags.disable_serdes = le32_to_cpu(nv->host_p) & BIT_5 ? 1: 0;
+
+ ha->operating_mode = (le32_to_cpu(icb->firmware_options_2) &
+ (BIT_6 | BIT_5 | BIT_4)) >> 4;
+
+ /* save HBA serial number */
+ ha->serial0 = icb->port_name[5];
+ ha->serial1 = icb->port_name[6];
+ ha->serial2 = icb->port_name[7];
+ memcpy(vha->node_name, icb->node_name, WWN_SIZE);
+ memcpy(vha->port_name, icb->port_name, WWN_SIZE);
+
+ icb->execution_throttle = __constant_cpu_to_le16(0xFFFF);
+
+ ha->retry_count = le16_to_cpu(nv->login_retry_count);
+
+ /* Set minimum login_timeout to 4 seconds. */
+ if (le16_to_cpu(nv->login_timeout) < ql2xlogintimeout)
+ nv->login_timeout = cpu_to_le16(ql2xlogintimeout);
+ if (le16_to_cpu(nv->login_timeout) < 4)
+ nv->login_timeout = __constant_cpu_to_le16(4);
+ ha->login_timeout = le16_to_cpu(nv->login_timeout);
+ icb->login_timeout = nv->login_timeout;
+
+ /* Set minimum RATOV to 100 tenths of a second. */
+ ha->r_a_tov = 100;
+
+ ha->loop_reset_delay = nv->reset_delay;
+
+ /* Link Down Timeout = 0:
+ *
+ * When Port Down timer expires we will start returning
+ * I/O's to OS with "DID_NO_CONNECT".
+ *
+ * Link Down Timeout != 0:
+ *
+ * The driver waits for the link to come up after link down
+ * before returning I/Os to OS with "DID_NO_CONNECT".
+ */
+ if (le16_to_cpu(nv->link_down_timeout) == 0) {
+ ha->loop_down_abort_time =
+ (LOOP_DOWN_TIME - LOOP_DOWN_TIMEOUT);
+ } else {
+ ha->link_down_timeout = le16_to_cpu(nv->link_down_timeout);
+ ha->loop_down_abort_time =
+ (LOOP_DOWN_TIME - ha->link_down_timeout);
+ }
+
+ /* Need enough time to try and get the port back. */
+ ha->port_down_retry_count = le16_to_cpu(nv->port_down_retry_count);
+ if (qlport_down_retry)
+ ha->port_down_retry_count = qlport_down_retry;
+
+ /* Set login_retry_count */
+ ha->login_retry_count = le16_to_cpu(nv->login_retry_count);
+ if (ha->port_down_retry_count ==
+ le16_to_cpu(nv->port_down_retry_count) &&
+ ha->port_down_retry_count > 3)
+ ha->login_retry_count = ha->port_down_retry_count;
+ else if (ha->port_down_retry_count > (int)ha->login_retry_count)
+ ha->login_retry_count = ha->port_down_retry_count;
+ if (ql2xloginretrycount)
+ ha->login_retry_count = ql2xloginretrycount;
+
+ /* if not running MSI-X we need handshaking on interrupts */
+ if (!vha->hw->flags.msix_enabled && (IS_QLA83XX(ha) || IS_QLA27XX(ha)))
+ icb->firmware_options_2 |= __constant_cpu_to_le32(BIT_22);
+
+ /* Enable ZIO. */
+ if (!vha->flags.init_done) {
+ ha->zio_mode = le32_to_cpu(icb->firmware_options_2) &
+ (BIT_3 | BIT_2 | BIT_1 | BIT_0);
+ ha->zio_timer = le16_to_cpu(icb->interrupt_delay_timer) ?
+ le16_to_cpu(icb->interrupt_delay_timer): 2;
+ }
+ icb->firmware_options_2 &= __constant_cpu_to_le32(
+ ~(BIT_3 | BIT_2 | BIT_1 | BIT_0));
+ vha->flags.process_response_queue = 0;
+ if (ha->zio_mode != QLA_ZIO_DISABLED) {
+ ha->zio_mode = QLA_ZIO_MODE_6;
+
+ ql_log(ql_log_info, vha, 0x0075,
+ "ZIO mode %d enabled; timer delay (%d us).\n",
+ ha->zio_mode,
+ ha->zio_timer * 100);
+
+ icb->firmware_options_2 |= cpu_to_le32(
+ (uint32_t)ha->zio_mode);
+ icb->interrupt_delay_timer = cpu_to_le16(ha->zio_timer);
+ vha->flags.process_response_queue = 1;
+ }
+
+ if (rval) {
+ ql_log(ql_log_warn, vha, 0x0076,
+ "NVRAM configuration failed.\n");
+ }
+ return (rval);
+}
+
+int
+qla82xx_restart_isp(scsi_qla_host_t *vha)
+{
+ int status, rval;
+ struct qla_hw_data *ha = vha->hw;
+ struct req_que *req = ha->req_q_map[0];
+ struct rsp_que *rsp = ha->rsp_q_map[0];
+ struct scsi_qla_host *vp;
+ unsigned long flags;
+
+ status = qla2x00_init_rings(vha);
+ if (!status) {
+ clear_bit(RESET_MARKER_NEEDED, &vha->dpc_flags);
+ ha->flags.chip_reset_done = 1;
+
+ status = qla2x00_fw_ready(vha);
+ if (!status) {
+ /* Issue a marker after FW becomes ready. */
+ qla2x00_marker(vha, req, rsp, 0, 0, MK_SYNC_ALL);
+ vha->flags.online = 1;
+ set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags);
+ }
+
+ /* if no cable then assume it's good */
+ if ((vha->device_flags & DFLG_NO_CABLE))
+ status = 0;
+ }
+
+ if (!status) {
+ clear_bit(RESET_MARKER_NEEDED, &vha->dpc_flags);
+
+ if (!atomic_read(&vha->loop_down_timer)) {
+ /*
+ * Issue marker command only when we are going
+ * to start the I/O .
+ */
+ vha->marker_needed = 1;
+ }
+
+ ha->isp_ops->enable_intrs(ha);
+
+ ha->isp_abort_cnt = 0;
+ clear_bit(ISP_ABORT_RETRY, &vha->dpc_flags);
+
+ /* Update the firmware version */
+ status = qla82xx_check_md_needed(vha);
+
+ if (ha->fce) {
+ ha->flags.fce_enabled = 1;
+ memset(ha->fce, 0,
+ fce_calc_size(ha->fce_bufs));
+ rval = qla2x00_enable_fce_trace(vha,
+ ha->fce_dma, ha->fce_bufs, ha->fce_mb,
+ &ha->fce_bufs);
+ if (rval) {
+ ql_log(ql_log_warn, vha, 0x8001,
+ "Unable to reinitialize FCE (%d).\n",
+ rval);
+ ha->flags.fce_enabled = 0;
+ }
+ }
+
+ if (ha->eft) {
+ memset(ha->eft, 0, EFT_SIZE);
+ rval = qla2x00_enable_eft_trace(vha,
+ ha->eft_dma, EFT_NUM_BUFFERS);
+ if (rval) {
+ ql_log(ql_log_warn, vha, 0x8010,
+ "Unable to reinitialize EFT (%d).\n",
+ rval);
+ }
+ }
+ }
+
+ if (!status) {
+ ql_dbg(ql_dbg_taskm, vha, 0x8011,
+ "qla82xx_restart_isp succeeded.\n");
+
+ spin_lock_irqsave(&ha->vport_slock, flags);
+ list_for_each_entry(vp, &ha->vp_list, list) {
+ if (vp->vp_idx) {
+ atomic_inc(&vp->vref_count);
+ spin_unlock_irqrestore(&ha->vport_slock, flags);
+
+ qla2x00_vp_abort_isp(vp);
+
+ spin_lock_irqsave(&ha->vport_slock, flags);
+ atomic_dec(&vp->vref_count);
+ }
+ }
+ spin_unlock_irqrestore(&ha->vport_slock, flags);
+
+ } else {
+ ql_log(ql_log_warn, vha, 0x8016,
+ "qla82xx_restart_isp **** FAILED ****.\n");
+ }
+
+ return status;
+}
+
+void
+qla81xx_update_fw_options(scsi_qla_host_t *vha)
+{
+ struct qla_hw_data *ha = vha->hw;
+
+ if (!ql2xetsenable)
+ return;
+
+ /* Enable ETS Burst. */
+ memset(ha->fw_options, 0, sizeof(ha->fw_options));
+ ha->fw_options[2] |= BIT_9;
+ qla2x00_set_fw_options(vha, ha->fw_options);
+}
+
+/*
+ * qla24xx_get_fcp_prio
+ * Gets the fcp cmd priority value for the logged in port.
+ * Looks for a match of the port descriptors within
+ * each of the fcp prio config entries. If a match is found,
+ * the tag (priority) value is returned.
+ *
+ * Input:
+ * vha = scsi host structure pointer.
+ * fcport = port structure pointer.
+ *
+ * Return:
+ * non-zero (if found)
+ * -1 (if not found)
+ *
+ * Context:
+ * Kernel context
+ */
+static int
+qla24xx_get_fcp_prio(scsi_qla_host_t *vha, fc_port_t *fcport)
+{
+ int i, entries;
+ uint8_t pid_match, wwn_match;
+ int priority;
+ uint32_t pid1, pid2;
+ uint64_t wwn1, wwn2;
+ struct qla_fcp_prio_entry *pri_entry;
+ struct qla_hw_data *ha = vha->hw;
+
+ if (!ha->fcp_prio_cfg || !ha->flags.fcp_prio_enabled)
+ return -1;
+
+ priority = -1;
+ entries = ha->fcp_prio_cfg->num_entries;
+ pri_entry = &ha->fcp_prio_cfg->entry[0];
+
+ for (i = 0; i < entries; i++) {
+ pid_match = wwn_match = 0;
+
+ if (!(pri_entry->flags & FCP_PRIO_ENTRY_VALID)) {
+ pri_entry++;
+ continue;
+ }
+
+ /* check source pid for a match */
+ if (pri_entry->flags & FCP_PRIO_ENTRY_SPID_VALID) {
+ pid1 = pri_entry->src_pid & INVALID_PORT_ID;
+ pid2 = vha->d_id.b24 & INVALID_PORT_ID;
+ if (pid1 == INVALID_PORT_ID)
+ pid_match++;
+ else if (pid1 == pid2)
+ pid_match++;
+ }
+
+ /* check destination pid for a match */
+ if (pri_entry->flags & FCP_PRIO_ENTRY_DPID_VALID) {
+ pid1 = pri_entry->dst_pid & INVALID_PORT_ID;
+ pid2 = fcport->d_id.b24 & INVALID_PORT_ID;
+ if (pid1 == INVALID_PORT_ID)
+ pid_match++;
+ else if (pid1 == pid2)
+ pid_match++;
+ }
+
+ /* check source WWN for a match */
+ if (pri_entry->flags & FCP_PRIO_ENTRY_SWWN_VALID) {
+ wwn1 = wwn_to_u64(vha->port_name);
+ wwn2 = wwn_to_u64(pri_entry->src_wwpn);
+ if (wwn2 == (uint64_t)-1)
+ wwn_match++;
+ else if (wwn1 == wwn2)
+ wwn_match++;
+ }
+
+ /* check destination WWN for a match */
+ if (pri_entry->flags & FCP_PRIO_ENTRY_DWWN_VALID) {
+ wwn1 = wwn_to_u64(fcport->port_name);
+ wwn2 = wwn_to_u64(pri_entry->dst_wwpn);
+ if (wwn2 == (uint64_t)-1)
+ wwn_match++;
+ else if (wwn1 == wwn2)
+ wwn_match++;
+ }
+
+ if (pid_match == 2 || wwn_match == 2) {
+ /* Found a matching entry */
+ if (pri_entry->flags & FCP_PRIO_ENTRY_TAG_VALID)
+ priority = pri_entry->tag;
+ break;
+ }
+
+ pri_entry++;
+ }
+
+ return priority;
+}
+
+/*
+ * qla24xx_update_fcport_fcp_prio
+ * Activates fcp priority for the logged in fc port
+ *
+ * Input:
+ * vha = scsi host structure pointer.
+ * fcp = port structure pointer.
+ *
+ * Return:
+ * QLA_SUCCESS or QLA_FUNCTION_FAILED
+ *
+ * Context:
+ * Kernel context.
+ */
+int
+qla24xx_update_fcport_fcp_prio(scsi_qla_host_t *vha, fc_port_t *fcport)
+{
+ int ret;
+ int priority;
+ uint16_t mb[5];
+
+ if (fcport->port_type != FCT_TARGET ||
+ fcport->loop_id == FC_NO_LOOP_ID)
+ return QLA_FUNCTION_FAILED;
+
+ priority = qla24xx_get_fcp_prio(vha, fcport);
+ if (priority < 0)
+ return QLA_FUNCTION_FAILED;
+
+ if (IS_P3P_TYPE(vha->hw)) {
+ fcport->fcp_prio = priority & 0xf;
+ return QLA_SUCCESS;
+ }
+
+ ret = qla24xx_set_fcp_prio(vha, fcport->loop_id, priority, mb);
+ if (ret == QLA_SUCCESS) {
+ if (fcport->fcp_prio != priority)
+ ql_dbg(ql_dbg_user, vha, 0x709e,
+ "Updated FCP_CMND priority - value=%d loop_id=%d "
+ "port_id=%02x%02x%02x.\n", priority,
+ fcport->loop_id, fcport->d_id.b.domain,
+ fcport->d_id.b.area, fcport->d_id.b.al_pa);
+ fcport->fcp_prio = priority & 0xf;
+ } else
+ ql_dbg(ql_dbg_user, vha, 0x704f,
+ "Unable to update FCP_CMND priority - ret=0x%x for "
+ "loop_id=%d port_id=%02x%02x%02x.\n", ret, fcport->loop_id,
+ fcport->d_id.b.domain, fcport->d_id.b.area,
+ fcport->d_id.b.al_pa);
+ return ret;
+}
+
+/*
+ * qla24xx_update_all_fcp_prio
+ * Activates fcp priority for all the logged in ports
+ *
+ * Input:
+ * ha = adapter block pointer.
+ *
+ * Return:
+ * QLA_SUCCESS or QLA_FUNCTION_FAILED
+ *
+ * Context:
+ * Kernel context.
+ */
+int
+qla24xx_update_all_fcp_prio(scsi_qla_host_t *vha)
+{
+ int ret;
+ fc_port_t *fcport;
+
+ ret = QLA_FUNCTION_FAILED;
+ /* We need to set priority for all logged in ports */
+ list_for_each_entry(fcport, &vha->vp_fcports, list)
+ ret = qla24xx_update_fcport_fcp_prio(vha, fcport);
+
+ return ret;
+}
diff --git a/drivers/scsi/qla2xxx/qla_inline.h b/drivers/scsi/qla2xxx/qla_inline.h
new file mode 100644
index 000000000..fee9eb7c8
--- /dev/null
+++ b/drivers/scsi/qla2xxx/qla_inline.h
@@ -0,0 +1,289 @@
+/*
+ * QLogic Fibre Channel HBA Driver
+ * Copyright (c) 2003-2014 QLogic Corporation
+ *
+ * See LICENSE.qla2xxx for copyright and licensing details.
+ */
+
+#include "qla_target.h"
+/**
+ * qla24xx_calc_iocbs() - Determine number of Command Type 3 and
+ * Continuation Type 1 IOCBs to allocate.
+ *
+ * @dsds: number of data segment decriptors needed
+ *
+ * Returns the number of IOCB entries needed to store @dsds.
+ */
+static inline uint16_t
+qla24xx_calc_iocbs(scsi_qla_host_t *vha, uint16_t dsds)
+{
+ uint16_t iocbs;
+
+ iocbs = 1;
+ if (dsds > 1) {
+ iocbs += (dsds - 1) / 5;
+ if ((dsds - 1) % 5)
+ iocbs++;
+ }
+ return iocbs;
+}
+
+/*
+ * qla2x00_debounce_register
+ * Debounce register.
+ *
+ * Input:
+ * port = register address.
+ *
+ * Returns:
+ * register value.
+ */
+static __inline__ uint16_t
+qla2x00_debounce_register(volatile uint16_t __iomem *addr)
+{
+ volatile uint16_t first;
+ volatile uint16_t second;
+
+ do {
+ first = RD_REG_WORD(addr);
+ barrier();
+ cpu_relax();
+ second = RD_REG_WORD(addr);
+ } while (first != second);
+
+ return (first);
+}
+
+static inline void
+qla2x00_poll(struct rsp_que *rsp)
+{
+ unsigned long flags;
+ struct qla_hw_data *ha = rsp->hw;
+ local_irq_save(flags);
+ if (IS_P3P_TYPE(ha))
+ qla82xx_poll(0, rsp);
+ else
+ ha->isp_ops->intr_handler(0, rsp);
+ local_irq_restore(flags);
+}
+
+static inline uint8_t *
+host_to_fcp_swap(uint8_t *fcp, uint32_t bsize)
+{
+ uint32_t *ifcp = (uint32_t *) fcp;
+ uint32_t *ofcp = (uint32_t *) fcp;
+ uint32_t iter = bsize >> 2;
+
+ for (; iter ; iter--)
+ *ofcp++ = swab32(*ifcp++);
+
+ return fcp;
+}
+
+static inline void
+host_to_adap(uint8_t *src, uint8_t *dst, uint32_t bsize)
+{
+ uint32_t *isrc = (uint32_t *) src;
+ __le32 *odest = (__le32 *) dst;
+ uint32_t iter = bsize >> 2;
+
+ for (; iter ; iter--)
+ *odest++ = cpu_to_le32(*isrc++);
+}
+
+static inline void
+qla2x00_set_reserved_loop_ids(struct qla_hw_data *ha)
+{
+ int i;
+
+ if (IS_FWI2_CAPABLE(ha))
+ return;
+
+ for (i = 0; i < SNS_FIRST_LOOP_ID; i++)
+ set_bit(i, ha->loop_id_map);
+ set_bit(MANAGEMENT_SERVER, ha->loop_id_map);
+ set_bit(BROADCAST, ha->loop_id_map);
+}
+
+static inline int
+qla2x00_is_reserved_id(scsi_qla_host_t *vha, uint16_t loop_id)
+{
+ struct qla_hw_data *ha = vha->hw;
+ if (IS_FWI2_CAPABLE(ha))
+ return (loop_id > NPH_LAST_HANDLE);
+
+ return ((loop_id > ha->max_loop_id && loop_id < SNS_FIRST_LOOP_ID) ||
+ loop_id == MANAGEMENT_SERVER || loop_id == BROADCAST);
+}
+
+static inline void
+qla2x00_clear_loop_id(fc_port_t *fcport) {
+ struct qla_hw_data *ha = fcport->vha->hw;
+
+ if (fcport->loop_id == FC_NO_LOOP_ID ||
+ qla2x00_is_reserved_id(fcport->vha, fcport->loop_id))
+ return;
+
+ clear_bit(fcport->loop_id, ha->loop_id_map);
+ fcport->loop_id = FC_NO_LOOP_ID;
+}
+
+static inline void
+qla2x00_clean_dsd_pool(struct qla_hw_data *ha, srb_t *sp,
+ struct qla_tgt_cmd *tc)
+{
+ struct dsd_dma *dsd_ptr, *tdsd_ptr;
+ struct crc_context *ctx;
+
+ if (sp)
+ ctx = (struct crc_context *)GET_CMD_CTX_SP(sp);
+ else if (tc)
+ ctx = (struct crc_context *)tc->ctx;
+ else {
+ BUG();
+ return;
+ }
+
+ /* clean up allocated prev pool */
+ list_for_each_entry_safe(dsd_ptr, tdsd_ptr,
+ &ctx->dsd_list, list) {
+ dma_pool_free(ha->dl_dma_pool, dsd_ptr->dsd_addr,
+ dsd_ptr->dsd_list_dma);
+ list_del(&dsd_ptr->list);
+ kfree(dsd_ptr);
+ }
+ INIT_LIST_HEAD(&ctx->dsd_list);
+}
+
+static inline void
+qla2x00_set_fcport_state(fc_port_t *fcport, int state)
+{
+ int old_state;
+
+ old_state = atomic_read(&fcport->state);
+ atomic_set(&fcport->state, state);
+
+ /* Don't print state transitions during initial allocation of fcport */
+ if (old_state && old_state != state) {
+ ql_dbg(ql_dbg_disc, fcport->vha, 0x207d,
+ "FCPort state transitioned from %s to %s - "
+ "portid=%02x%02x%02x.\n",
+ port_state_str[old_state], port_state_str[state],
+ fcport->d_id.b.domain, fcport->d_id.b.area,
+ fcport->d_id.b.al_pa);
+ }
+}
+
+static inline int
+qla2x00_hba_err_chk_enabled(srb_t *sp)
+{
+ /*
+ * Uncomment when corresponding SCSI changes are done.
+ *
+ if (!sp->cmd->prot_chk)
+ return 0;
+ *
+ */
+ switch (scsi_get_prot_op(GET_CMD_SP(sp))) {
+ case SCSI_PROT_READ_STRIP:
+ case SCSI_PROT_WRITE_INSERT:
+ if (ql2xenablehba_err_chk >= 1)
+ return 1;
+ break;
+ case SCSI_PROT_READ_PASS:
+ case SCSI_PROT_WRITE_PASS:
+ if (ql2xenablehba_err_chk >= 2)
+ return 1;
+ break;
+ case SCSI_PROT_READ_INSERT:
+ case SCSI_PROT_WRITE_STRIP:
+ return 1;
+ }
+ return 0;
+}
+
+static inline int
+qla2x00_reset_active(scsi_qla_host_t *vha)
+{
+ scsi_qla_host_t *base_vha = pci_get_drvdata(vha->hw->pdev);
+
+ /* Test appropriate base-vha and vha flags. */
+ return test_bit(ISP_ABORT_NEEDED, &base_vha->dpc_flags) ||
+ test_bit(ABORT_ISP_ACTIVE, &base_vha->dpc_flags) ||
+ test_bit(ISP_ABORT_RETRY, &base_vha->dpc_flags) ||
+ test_bit(ISP_ABORT_NEEDED, &vha->dpc_flags) ||
+ test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags);
+}
+
+static inline srb_t *
+qla2x00_get_sp(scsi_qla_host_t *vha, fc_port_t *fcport, gfp_t flag)
+{
+ srb_t *sp = NULL;
+ struct qla_hw_data *ha = vha->hw;
+ uint8_t bail;
+
+ QLA_VHA_MARK_BUSY(vha, bail);
+ if (unlikely(bail))
+ return NULL;
+
+ sp = mempool_alloc(ha->srb_mempool, flag);
+ if (!sp)
+ goto done;
+
+ memset(sp, 0, sizeof(*sp));
+ sp->fcport = fcport;
+ sp->iocbs = 1;
+done:
+ if (!sp)
+ QLA_VHA_MARK_NOT_BUSY(vha);
+ return sp;
+}
+
+static inline void
+qla2x00_rel_sp(scsi_qla_host_t *vha, srb_t *sp)
+{
+ mempool_free(sp, vha->hw->srb_mempool);
+ QLA_VHA_MARK_NOT_BUSY(vha);
+}
+
+static inline void
+qla2x00_init_timer(srb_t *sp, unsigned long tmo)
+{
+ init_timer(&sp->u.iocb_cmd.timer);
+ sp->u.iocb_cmd.timer.expires = jiffies + tmo * HZ;
+ sp->u.iocb_cmd.timer.data = (unsigned long)sp;
+ sp->u.iocb_cmd.timer.function = qla2x00_sp_timeout;
+ add_timer(&sp->u.iocb_cmd.timer);
+ sp->free = qla2x00_sp_free;
+ if ((IS_QLAFX00(sp->fcport->vha->hw)) &&
+ (sp->type == SRB_FXIOCB_DCMD))
+ init_completion(&sp->u.iocb_cmd.u.fxiocb.fxiocb_comp);
+}
+
+static inline int
+qla2x00_gid_list_size(struct qla_hw_data *ha)
+{
+ if (IS_QLAFX00(ha))
+ return sizeof(uint32_t) * 32;
+ else
+ return sizeof(struct gid_list_info) * ha->max_fibre_devices;
+}
+
+static inline void
+qla2x00_handle_mbx_completion(struct qla_hw_data *ha, int status)
+{
+ if (test_bit(MBX_INTR_WAIT, &ha->mbx_cmd_flags) &&
+ (status & MBX_INTERRUPT) && ha->flags.mbox_int) {
+ set_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags);
+ clear_bit(MBX_INTR_WAIT, &ha->mbx_cmd_flags);
+ complete(&ha->mbx_intr_comp);
+ }
+}
+
+static inline void
+qla2x00_set_retry_delay_timestamp(fc_port_t *fcport, uint16_t retry_delay)
+{
+ if (retry_delay)
+ fcport->retry_delay_timestamp = jiffies +
+ (retry_delay * HZ / 10);
+}
diff --git a/drivers/scsi/qla2xxx/qla_iocb.c b/drivers/scsi/qla2xxx/qla_iocb.c
new file mode 100644
index 000000000..a1ab25fca
--- /dev/null
+++ b/drivers/scsi/qla2xxx/qla_iocb.c
@@ -0,0 +1,2857 @@
+/*
+ * QLogic Fibre Channel HBA Driver
+ * Copyright (c) 2003-2014 QLogic Corporation
+ *
+ * See LICENSE.qla2xxx for copyright and licensing details.
+ */
+#include "qla_def.h"
+#include "qla_target.h"
+
+#include <linux/blkdev.h>
+#include <linux/delay.h>
+
+#include <scsi/scsi_tcq.h>
+
+static void qla25xx_set_que(srb_t *, struct rsp_que **);
+/**
+ * qla2x00_get_cmd_direction() - Determine control_flag data direction.
+ * @cmd: SCSI command
+ *
+ * Returns the proper CF_* direction based on CDB.
+ */
+static inline uint16_t
+qla2x00_get_cmd_direction(srb_t *sp)
+{
+ uint16_t cflags;
+ struct scsi_cmnd *cmd = GET_CMD_SP(sp);
+ struct scsi_qla_host *vha = sp->fcport->vha;
+
+ cflags = 0;
+
+ /* Set transfer direction */
+ if (cmd->sc_data_direction == DMA_TO_DEVICE) {
+ cflags = CF_WRITE;
+ vha->qla_stats.output_bytes += scsi_bufflen(cmd);
+ vha->qla_stats.output_requests++;
+ } else if (cmd->sc_data_direction == DMA_FROM_DEVICE) {
+ cflags = CF_READ;
+ vha->qla_stats.input_bytes += scsi_bufflen(cmd);
+ vha->qla_stats.input_requests++;
+ }
+ return (cflags);
+}
+
+/**
+ * qla2x00_calc_iocbs_32() - Determine number of Command Type 2 and
+ * Continuation Type 0 IOCBs to allocate.
+ *
+ * @dsds: number of data segment decriptors needed
+ *
+ * Returns the number of IOCB entries needed to store @dsds.
+ */
+uint16_t
+qla2x00_calc_iocbs_32(uint16_t dsds)
+{
+ uint16_t iocbs;
+
+ iocbs = 1;
+ if (dsds > 3) {
+ iocbs += (dsds - 3) / 7;
+ if ((dsds - 3) % 7)
+ iocbs++;
+ }
+ return (iocbs);
+}
+
+/**
+ * qla2x00_calc_iocbs_64() - Determine number of Command Type 3 and
+ * Continuation Type 1 IOCBs to allocate.
+ *
+ * @dsds: number of data segment decriptors needed
+ *
+ * Returns the number of IOCB entries needed to store @dsds.
+ */
+uint16_t
+qla2x00_calc_iocbs_64(uint16_t dsds)
+{
+ uint16_t iocbs;
+
+ iocbs = 1;
+ if (dsds > 2) {
+ iocbs += (dsds - 2) / 5;
+ if ((dsds - 2) % 5)
+ iocbs++;
+ }
+ return (iocbs);
+}
+
+/**
+ * qla2x00_prep_cont_type0_iocb() - Initialize a Continuation Type 0 IOCB.
+ * @ha: HA context
+ *
+ * Returns a pointer to the Continuation Type 0 IOCB packet.
+ */
+static inline cont_entry_t *
+qla2x00_prep_cont_type0_iocb(struct scsi_qla_host *vha)
+{
+ cont_entry_t *cont_pkt;
+ struct req_que *req = vha->req;
+ /* Adjust ring index. */
+ req->ring_index++;
+ if (req->ring_index == req->length) {
+ req->ring_index = 0;
+ req->ring_ptr = req->ring;
+ } else {
+ req->ring_ptr++;
+ }
+
+ cont_pkt = (cont_entry_t *)req->ring_ptr;
+
+ /* Load packet defaults. */
+ *((uint32_t *)(&cont_pkt->entry_type)) =
+ __constant_cpu_to_le32(CONTINUE_TYPE);
+
+ return (cont_pkt);
+}
+
+/**
+ * qla2x00_prep_cont_type1_iocb() - Initialize a Continuation Type 1 IOCB.
+ * @ha: HA context
+ *
+ * Returns a pointer to the continuation type 1 IOCB packet.
+ */
+static inline cont_a64_entry_t *
+qla2x00_prep_cont_type1_iocb(scsi_qla_host_t *vha, struct req_que *req)
+{
+ cont_a64_entry_t *cont_pkt;
+
+ /* Adjust ring index. */
+ req->ring_index++;
+ if (req->ring_index == req->length) {
+ req->ring_index = 0;
+ req->ring_ptr = req->ring;
+ } else {
+ req->ring_ptr++;
+ }
+
+ cont_pkt = (cont_a64_entry_t *)req->ring_ptr;
+
+ /* Load packet defaults. */
+ *((uint32_t *)(&cont_pkt->entry_type)) = IS_QLAFX00(vha->hw) ?
+ __constant_cpu_to_le32(CONTINUE_A64_TYPE_FX00) :
+ __constant_cpu_to_le32(CONTINUE_A64_TYPE);
+
+ return (cont_pkt);
+}
+
+static inline int
+qla24xx_configure_prot_mode(srb_t *sp, uint16_t *fw_prot_opts)
+{
+ struct scsi_cmnd *cmd = GET_CMD_SP(sp);
+ uint8_t guard = scsi_host_get_guard(cmd->device->host);
+
+ /* We always use DIFF Bundling for best performance */
+ *fw_prot_opts = 0;
+
+ /* Translate SCSI opcode to a protection opcode */
+ switch (scsi_get_prot_op(cmd)) {
+ case SCSI_PROT_READ_STRIP:
+ *fw_prot_opts |= PO_MODE_DIF_REMOVE;
+ break;
+ case SCSI_PROT_WRITE_INSERT:
+ *fw_prot_opts |= PO_MODE_DIF_INSERT;
+ break;
+ case SCSI_PROT_READ_INSERT:
+ *fw_prot_opts |= PO_MODE_DIF_INSERT;
+ break;
+ case SCSI_PROT_WRITE_STRIP:
+ *fw_prot_opts |= PO_MODE_DIF_REMOVE;
+ break;
+ case SCSI_PROT_READ_PASS:
+ case SCSI_PROT_WRITE_PASS:
+ if (guard & SHOST_DIX_GUARD_IP)
+ *fw_prot_opts |= PO_MODE_DIF_TCP_CKSUM;
+ else
+ *fw_prot_opts |= PO_MODE_DIF_PASS;
+ break;
+ default: /* Normal Request */
+ *fw_prot_opts |= PO_MODE_DIF_PASS;
+ break;
+ }
+
+ return scsi_prot_sg_count(cmd);
+}
+
+/*
+ * qla2x00_build_scsi_iocbs_32() - Build IOCB command utilizing 32bit
+ * capable IOCB types.
+ *
+ * @sp: SRB command to process
+ * @cmd_pkt: Command type 2 IOCB
+ * @tot_dsds: Total number of segments to transfer
+ */
+void qla2x00_build_scsi_iocbs_32(srb_t *sp, cmd_entry_t *cmd_pkt,
+ uint16_t tot_dsds)
+{
+ uint16_t avail_dsds;
+ uint32_t *cur_dsd;
+ scsi_qla_host_t *vha;
+ struct scsi_cmnd *cmd;
+ struct scatterlist *sg;
+ int i;
+
+ cmd = GET_CMD_SP(sp);
+
+ /* Update entry type to indicate Command Type 2 IOCB */
+ *((uint32_t *)(&cmd_pkt->entry_type)) =
+ __constant_cpu_to_le32(COMMAND_TYPE);
+
+ /* No data transfer */
+ if (!scsi_bufflen(cmd) || cmd->sc_data_direction == DMA_NONE) {
+ cmd_pkt->byte_count = __constant_cpu_to_le32(0);
+ return;
+ }
+
+ vha = sp->fcport->vha;
+ cmd_pkt->control_flags |= cpu_to_le16(qla2x00_get_cmd_direction(sp));
+
+ /* Three DSDs are available in the Command Type 2 IOCB */
+ avail_dsds = 3;
+ cur_dsd = (uint32_t *)&cmd_pkt->dseg_0_address;
+
+ /* Load data segments */
+ scsi_for_each_sg(cmd, sg, tot_dsds, i) {
+ cont_entry_t *cont_pkt;
+
+ /* Allocate additional continuation packets? */
+ if (avail_dsds == 0) {
+ /*
+ * Seven DSDs are available in the Continuation
+ * Type 0 IOCB.
+ */
+ cont_pkt = qla2x00_prep_cont_type0_iocb(vha);
+ cur_dsd = (uint32_t *)&cont_pkt->dseg_0_address;
+ avail_dsds = 7;
+ }
+
+ *cur_dsd++ = cpu_to_le32(sg_dma_address(sg));
+ *cur_dsd++ = cpu_to_le32(sg_dma_len(sg));
+ avail_dsds--;
+ }
+}
+
+/**
+ * qla2x00_build_scsi_iocbs_64() - Build IOCB command utilizing 64bit
+ * capable IOCB types.
+ *
+ * @sp: SRB command to process
+ * @cmd_pkt: Command type 3 IOCB
+ * @tot_dsds: Total number of segments to transfer
+ */
+void qla2x00_build_scsi_iocbs_64(srb_t *sp, cmd_entry_t *cmd_pkt,
+ uint16_t tot_dsds)
+{
+ uint16_t avail_dsds;
+ uint32_t *cur_dsd;
+ scsi_qla_host_t *vha;
+ struct scsi_cmnd *cmd;
+ struct scatterlist *sg;
+ int i;
+
+ cmd = GET_CMD_SP(sp);
+
+ /* Update entry type to indicate Command Type 3 IOCB */
+ *((uint32_t *)(&cmd_pkt->entry_type)) =
+ __constant_cpu_to_le32(COMMAND_A64_TYPE);
+
+ /* No data transfer */
+ if (!scsi_bufflen(cmd) || cmd->sc_data_direction == DMA_NONE) {
+ cmd_pkt->byte_count = __constant_cpu_to_le32(0);
+ return;
+ }
+
+ vha = sp->fcport->vha;
+ cmd_pkt->control_flags |= cpu_to_le16(qla2x00_get_cmd_direction(sp));
+
+ /* Two DSDs are available in the Command Type 3 IOCB */
+ avail_dsds = 2;
+ cur_dsd = (uint32_t *)&cmd_pkt->dseg_0_address;
+
+ /* Load data segments */
+ scsi_for_each_sg(cmd, sg, tot_dsds, i) {
+ dma_addr_t sle_dma;
+ cont_a64_entry_t *cont_pkt;
+
+ /* Allocate additional continuation packets? */
+ if (avail_dsds == 0) {
+ /*
+ * Five DSDs are available in the Continuation
+ * Type 1 IOCB.
+ */
+ cont_pkt = qla2x00_prep_cont_type1_iocb(vha, vha->req);
+ cur_dsd = (uint32_t *)cont_pkt->dseg_0_address;
+ avail_dsds = 5;
+ }
+
+ sle_dma = sg_dma_address(sg);
+ *cur_dsd++ = cpu_to_le32(LSD(sle_dma));
+ *cur_dsd++ = cpu_to_le32(MSD(sle_dma));
+ *cur_dsd++ = cpu_to_le32(sg_dma_len(sg));
+ avail_dsds--;
+ }
+}
+
+/**
+ * qla2x00_start_scsi() - Send a SCSI command to the ISP
+ * @sp: command to send to the ISP
+ *
+ * Returns non-zero if a failure occurred, else zero.
+ */
+int
+qla2x00_start_scsi(srb_t *sp)
+{
+ int ret, nseg;
+ unsigned long flags;
+ scsi_qla_host_t *vha;
+ struct scsi_cmnd *cmd;
+ uint32_t *clr_ptr;
+ uint32_t index;
+ uint32_t handle;
+ cmd_entry_t *cmd_pkt;
+ uint16_t cnt;
+ uint16_t req_cnt;
+ uint16_t tot_dsds;
+ struct device_reg_2xxx __iomem *reg;
+ struct qla_hw_data *ha;
+ struct req_que *req;
+ struct rsp_que *rsp;
+
+ /* Setup device pointers. */
+ ret = 0;
+ vha = sp->fcport->vha;
+ ha = vha->hw;
+ reg = &ha->iobase->isp;
+ cmd = GET_CMD_SP(sp);
+ req = ha->req_q_map[0];
+ rsp = ha->rsp_q_map[0];
+ /* So we know we haven't pci_map'ed anything yet */
+ tot_dsds = 0;
+
+ /* Send marker if required */
+ if (vha->marker_needed != 0) {
+ if (qla2x00_marker(vha, req, rsp, 0, 0, MK_SYNC_ALL) !=
+ QLA_SUCCESS) {
+ return (QLA_FUNCTION_FAILED);
+ }
+ vha->marker_needed = 0;
+ }
+
+ /* Acquire ring specific lock */
+ spin_lock_irqsave(&ha->hardware_lock, flags);
+
+ /* Check for room in outstanding command list. */
+ handle = req->current_outstanding_cmd;
+ for (index = 1; index < req->num_outstanding_cmds; index++) {
+ handle++;
+ if (handle == req->num_outstanding_cmds)
+ handle = 1;
+ if (!req->outstanding_cmds[handle])
+ break;
+ }
+ if (index == req->num_outstanding_cmds)
+ goto queuing_error;
+
+ /* Map the sg table so we have an accurate count of sg entries needed */
+ if (scsi_sg_count(cmd)) {
+ nseg = dma_map_sg(&ha->pdev->dev, scsi_sglist(cmd),
+ scsi_sg_count(cmd), cmd->sc_data_direction);
+ if (unlikely(!nseg))
+ goto queuing_error;
+ } else
+ nseg = 0;
+
+ tot_dsds = nseg;
+
+ /* Calculate the number of request entries needed. */
+ req_cnt = ha->isp_ops->calc_req_entries(tot_dsds);
+ if (req->cnt < (req_cnt + 2)) {
+ cnt = RD_REG_WORD_RELAXED(ISP_REQ_Q_OUT(ha, reg));
+ if (req->ring_index < cnt)
+ req->cnt = cnt - req->ring_index;
+ else
+ req->cnt = req->length -
+ (req->ring_index - cnt);
+ /* If still no head room then bail out */
+ if (req->cnt < (req_cnt + 2))
+ goto queuing_error;
+ }
+
+ /* Build command packet */
+ req->current_outstanding_cmd = handle;
+ req->outstanding_cmds[handle] = sp;
+ sp->handle = handle;
+ cmd->host_scribble = (unsigned char *)(unsigned long)handle;
+ req->cnt -= req_cnt;
+
+ cmd_pkt = (cmd_entry_t *)req->ring_ptr;
+ cmd_pkt->handle = handle;
+ /* Zero out remaining portion of packet. */
+ clr_ptr = (uint32_t *)cmd_pkt + 2;
+ memset(clr_ptr, 0, REQUEST_ENTRY_SIZE - 8);
+ cmd_pkt->dseg_count = cpu_to_le16(tot_dsds);
+
+ /* Set target ID and LUN number*/
+ SET_TARGET_ID(ha, cmd_pkt->target, sp->fcport->loop_id);
+ cmd_pkt->lun = cpu_to_le16(cmd->device->lun);
+ cmd_pkt->control_flags = __constant_cpu_to_le16(CF_SIMPLE_TAG);
+
+ /* Load SCSI command packet. */
+ memcpy(cmd_pkt->scsi_cdb, cmd->cmnd, cmd->cmd_len);
+ cmd_pkt->byte_count = cpu_to_le32((uint32_t)scsi_bufflen(cmd));
+
+ /* Build IOCB segments */
+ ha->isp_ops->build_iocbs(sp, cmd_pkt, tot_dsds);
+
+ /* Set total data segment count. */
+ cmd_pkt->entry_count = (uint8_t)req_cnt;
+ wmb();
+
+ /* Adjust ring index. */
+ req->ring_index++;
+ if (req->ring_index == req->length) {
+ req->ring_index = 0;
+ req->ring_ptr = req->ring;
+ } else
+ req->ring_ptr++;
+
+ sp->flags |= SRB_DMA_VALID;
+
+ /* Set chip new ring index. */
+ WRT_REG_WORD(ISP_REQ_Q_IN(ha, reg), req->ring_index);
+ RD_REG_WORD_RELAXED(ISP_REQ_Q_IN(ha, reg)); /* PCI Posting. */
+
+ /* Manage unprocessed RIO/ZIO commands in response queue. */
+ if (vha->flags.process_response_queue &&
+ rsp->ring_ptr->signature != RESPONSE_PROCESSED)
+ qla2x00_process_response_queue(rsp);
+
+ spin_unlock_irqrestore(&ha->hardware_lock, flags);
+ return (QLA_SUCCESS);
+
+queuing_error:
+ if (tot_dsds)
+ scsi_dma_unmap(cmd);
+
+ spin_unlock_irqrestore(&ha->hardware_lock, flags);
+
+ return (QLA_FUNCTION_FAILED);
+}
+
+/**
+ * qla2x00_start_iocbs() - Execute the IOCB command
+ */
+void
+qla2x00_start_iocbs(struct scsi_qla_host *vha, struct req_que *req)
+{
+ struct qla_hw_data *ha = vha->hw;
+ device_reg_t __iomem *reg = ISP_QUE_REG(ha, req->id);
+
+ if (IS_P3P_TYPE(ha)) {
+ qla82xx_start_iocbs(vha);
+ } else {
+ /* Adjust ring index. */
+ req->ring_index++;
+ if (req->ring_index == req->length) {
+ req->ring_index = 0;
+ req->ring_ptr = req->ring;
+ } else
+ req->ring_ptr++;
+
+ /* Set chip new ring index. */
+ if (ha->mqenable || IS_QLA83XX(ha) || IS_QLA27XX(ha)) {
+ WRT_REG_DWORD(req->req_q_in, req->ring_index);
+ RD_REG_DWORD_RELAXED(&ha->iobase->isp24.hccr);
+ } else if (IS_QLAFX00(ha)) {
+ WRT_REG_DWORD(&reg->ispfx00.req_q_in, req->ring_index);
+ RD_REG_DWORD_RELAXED(&reg->ispfx00.req_q_in);
+ QLAFX00_SET_HST_INTR(ha, ha->rqstq_intr_code);
+ } else if (IS_FWI2_CAPABLE(ha)) {
+ WRT_REG_DWORD(&reg->isp24.req_q_in, req->ring_index);
+ RD_REG_DWORD_RELAXED(&reg->isp24.req_q_in);
+ } else {
+ WRT_REG_WORD(ISP_REQ_Q_IN(ha, &reg->isp),
+ req->ring_index);
+ RD_REG_WORD_RELAXED(ISP_REQ_Q_IN(ha, &reg->isp));
+ }
+ }
+}
+
+/**
+ * qla2x00_marker() - Send a marker IOCB to the firmware.
+ * @ha: HA context
+ * @loop_id: loop ID
+ * @lun: LUN
+ * @type: marker modifier
+ *
+ * Can be called from both normal and interrupt context.
+ *
+ * Returns non-zero if a failure occurred, else zero.
+ */
+static int
+__qla2x00_marker(struct scsi_qla_host *vha, struct req_que *req,
+ struct rsp_que *rsp, uint16_t loop_id,
+ uint64_t lun, uint8_t type)
+{
+ mrk_entry_t *mrk;
+ struct mrk_entry_24xx *mrk24 = NULL;
+
+ struct qla_hw_data *ha = vha->hw;
+ scsi_qla_host_t *base_vha = pci_get_drvdata(ha->pdev);
+
+ req = ha->req_q_map[0];
+ mrk = (mrk_entry_t *)qla2x00_alloc_iocbs(vha, NULL);
+ if (mrk == NULL) {
+ ql_log(ql_log_warn, base_vha, 0x3026,
+ "Failed to allocate Marker IOCB.\n");
+
+ return (QLA_FUNCTION_FAILED);
+ }
+
+ mrk->entry_type = MARKER_TYPE;
+ mrk->modifier = type;
+ if (type != MK_SYNC_ALL) {
+ if (IS_FWI2_CAPABLE(ha)) {
+ mrk24 = (struct mrk_entry_24xx *) mrk;
+ mrk24->nport_handle = cpu_to_le16(loop_id);
+ int_to_scsilun(lun, (struct scsi_lun *)&mrk24->lun);
+ host_to_fcp_swap(mrk24->lun, sizeof(mrk24->lun));
+ mrk24->vp_index = vha->vp_idx;
+ mrk24->handle = MAKE_HANDLE(req->id, mrk24->handle);
+ } else {
+ SET_TARGET_ID(ha, mrk->target, loop_id);
+ mrk->lun = cpu_to_le16((uint16_t)lun);
+ }
+ }
+ wmb();
+
+ qla2x00_start_iocbs(vha, req);
+
+ return (QLA_SUCCESS);
+}
+
+int
+qla2x00_marker(struct scsi_qla_host *vha, struct req_que *req,
+ struct rsp_que *rsp, uint16_t loop_id, uint64_t lun,
+ uint8_t type)
+{
+ int ret;
+ unsigned long flags = 0;
+
+ spin_lock_irqsave(&vha->hw->hardware_lock, flags);
+ ret = __qla2x00_marker(vha, req, rsp, loop_id, lun, type);
+ spin_unlock_irqrestore(&vha->hw->hardware_lock, flags);
+
+ return (ret);
+}
+
+/*
+ * qla2x00_issue_marker
+ *
+ * Issue marker
+ * Caller CAN have hardware lock held as specified by ha_locked parameter.
+ * Might release it, then reaquire.
+ */
+int qla2x00_issue_marker(scsi_qla_host_t *vha, int ha_locked)
+{
+ if (ha_locked) {
+ if (__qla2x00_marker(vha, vha->req, vha->req->rsp, 0, 0,
+ MK_SYNC_ALL) != QLA_SUCCESS)
+ return QLA_FUNCTION_FAILED;
+ } else {
+ if (qla2x00_marker(vha, vha->req, vha->req->rsp, 0, 0,
+ MK_SYNC_ALL) != QLA_SUCCESS)
+ return QLA_FUNCTION_FAILED;
+ }
+ vha->marker_needed = 0;
+
+ return QLA_SUCCESS;
+}
+
+static inline int
+qla24xx_build_scsi_type_6_iocbs(srb_t *sp, struct cmd_type_6 *cmd_pkt,
+ uint16_t tot_dsds)
+{
+ uint32_t *cur_dsd = NULL;
+ scsi_qla_host_t *vha;
+ struct qla_hw_data *ha;
+ struct scsi_cmnd *cmd;
+ struct scatterlist *cur_seg;
+ uint32_t *dsd_seg;
+ void *next_dsd;
+ uint8_t avail_dsds;
+ uint8_t first_iocb = 1;
+ uint32_t dsd_list_len;
+ struct dsd_dma *dsd_ptr;
+ struct ct6_dsd *ctx;
+
+ cmd = GET_CMD_SP(sp);
+
+ /* Update entry type to indicate Command Type 3 IOCB */
+ *((uint32_t *)(&cmd_pkt->entry_type)) =
+ __constant_cpu_to_le32(COMMAND_TYPE_6);
+
+ /* No data transfer */
+ if (!scsi_bufflen(cmd) || cmd->sc_data_direction == DMA_NONE) {
+ cmd_pkt->byte_count = __constant_cpu_to_le32(0);
+ return 0;
+ }
+
+ vha = sp->fcport->vha;
+ ha = vha->hw;
+
+ /* Set transfer direction */
+ if (cmd->sc_data_direction == DMA_TO_DEVICE) {
+ cmd_pkt->control_flags =
+ __constant_cpu_to_le16(CF_WRITE_DATA);
+ vha->qla_stats.output_bytes += scsi_bufflen(cmd);
+ vha->qla_stats.output_requests++;
+ } else if (cmd->sc_data_direction == DMA_FROM_DEVICE) {
+ cmd_pkt->control_flags =
+ __constant_cpu_to_le16(CF_READ_DATA);
+ vha->qla_stats.input_bytes += scsi_bufflen(cmd);
+ vha->qla_stats.input_requests++;
+ }
+
+ cur_seg = scsi_sglist(cmd);
+ ctx = GET_CMD_CTX_SP(sp);
+
+ while (tot_dsds) {
+ avail_dsds = (tot_dsds > QLA_DSDS_PER_IOCB) ?
+ QLA_DSDS_PER_IOCB : tot_dsds;
+ tot_dsds -= avail_dsds;
+ dsd_list_len = (avail_dsds + 1) * QLA_DSD_SIZE;
+
+ dsd_ptr = list_first_entry(&ha->gbl_dsd_list,
+ struct dsd_dma, list);
+ next_dsd = dsd_ptr->dsd_addr;
+ list_del(&dsd_ptr->list);
+ ha->gbl_dsd_avail--;
+ list_add_tail(&dsd_ptr->list, &ctx->dsd_list);
+ ctx->dsd_use_cnt++;
+ ha->gbl_dsd_inuse++;
+
+ if (first_iocb) {
+ first_iocb = 0;
+ dsd_seg = (uint32_t *)&cmd_pkt->fcp_data_dseg_address;
+ *dsd_seg++ = cpu_to_le32(LSD(dsd_ptr->dsd_list_dma));
+ *dsd_seg++ = cpu_to_le32(MSD(dsd_ptr->dsd_list_dma));
+ cmd_pkt->fcp_data_dseg_len = cpu_to_le32(dsd_list_len);
+ } else {
+ *cur_dsd++ = cpu_to_le32(LSD(dsd_ptr->dsd_list_dma));
+ *cur_dsd++ = cpu_to_le32(MSD(dsd_ptr->dsd_list_dma));
+ *cur_dsd++ = cpu_to_le32(dsd_list_len);
+ }
+ cur_dsd = (uint32_t *)next_dsd;
+ while (avail_dsds) {
+ dma_addr_t sle_dma;
+
+ sle_dma = sg_dma_address(cur_seg);
+ *cur_dsd++ = cpu_to_le32(LSD(sle_dma));
+ *cur_dsd++ = cpu_to_le32(MSD(sle_dma));
+ *cur_dsd++ = cpu_to_le32(sg_dma_len(cur_seg));
+ cur_seg = sg_next(cur_seg);
+ avail_dsds--;
+ }
+ }
+
+ /* Null termination */
+ *cur_dsd++ = 0;
+ *cur_dsd++ = 0;
+ *cur_dsd++ = 0;
+ cmd_pkt->control_flags |= CF_DATA_SEG_DESCR_ENABLE;
+ return 0;
+}
+
+/*
+ * qla24xx_calc_dsd_lists() - Determine number of DSD list required
+ * for Command Type 6.
+ *
+ * @dsds: number of data segment decriptors needed
+ *
+ * Returns the number of dsd list needed to store @dsds.
+ */
+inline uint16_t
+qla24xx_calc_dsd_lists(uint16_t dsds)
+{
+ uint16_t dsd_lists = 0;
+
+ dsd_lists = (dsds/QLA_DSDS_PER_IOCB);
+ if (dsds % QLA_DSDS_PER_IOCB)
+ dsd_lists++;
+ return dsd_lists;
+}
+
+
+/**
+ * qla24xx_build_scsi_iocbs() - Build IOCB command utilizing Command Type 7
+ * IOCB types.
+ *
+ * @sp: SRB command to process
+ * @cmd_pkt: Command type 3 IOCB
+ * @tot_dsds: Total number of segments to transfer
+ */
+inline void
+qla24xx_build_scsi_iocbs(srb_t *sp, struct cmd_type_7 *cmd_pkt,
+ uint16_t tot_dsds)
+{
+ uint16_t avail_dsds;
+ uint32_t *cur_dsd;
+ scsi_qla_host_t *vha;
+ struct scsi_cmnd *cmd;
+ struct scatterlist *sg;
+ int i;
+ struct req_que *req;
+
+ cmd = GET_CMD_SP(sp);
+
+ /* Update entry type to indicate Command Type 3 IOCB */
+ *((uint32_t *)(&cmd_pkt->entry_type)) =
+ __constant_cpu_to_le32(COMMAND_TYPE_7);
+
+ /* No data transfer */
+ if (!scsi_bufflen(cmd) || cmd->sc_data_direction == DMA_NONE) {
+ cmd_pkt->byte_count = __constant_cpu_to_le32(0);
+ return;
+ }
+
+ vha = sp->fcport->vha;
+ req = vha->req;
+
+ /* Set transfer direction */
+ if (cmd->sc_data_direction == DMA_TO_DEVICE) {
+ cmd_pkt->task_mgmt_flags =
+ __constant_cpu_to_le16(TMF_WRITE_DATA);
+ vha->qla_stats.output_bytes += scsi_bufflen(cmd);
+ vha->qla_stats.output_requests++;
+ } else if (cmd->sc_data_direction == DMA_FROM_DEVICE) {
+ cmd_pkt->task_mgmt_flags =
+ __constant_cpu_to_le16(TMF_READ_DATA);
+ vha->qla_stats.input_bytes += scsi_bufflen(cmd);
+ vha->qla_stats.input_requests++;
+ }
+
+ /* One DSD is available in the Command Type 3 IOCB */
+ avail_dsds = 1;
+ cur_dsd = (uint32_t *)&cmd_pkt->dseg_0_address;
+
+ /* Load data segments */
+
+ scsi_for_each_sg(cmd, sg, tot_dsds, i) {
+ dma_addr_t sle_dma;
+ cont_a64_entry_t *cont_pkt;
+
+ /* Allocate additional continuation packets? */
+ if (avail_dsds == 0) {
+ /*
+ * Five DSDs are available in the Continuation
+ * Type 1 IOCB.
+ */
+ cont_pkt = qla2x00_prep_cont_type1_iocb(vha, vha->req);
+ cur_dsd = (uint32_t *)cont_pkt->dseg_0_address;
+ avail_dsds = 5;
+ }
+
+ sle_dma = sg_dma_address(sg);
+ *cur_dsd++ = cpu_to_le32(LSD(sle_dma));
+ *cur_dsd++ = cpu_to_le32(MSD(sle_dma));
+ *cur_dsd++ = cpu_to_le32(sg_dma_len(sg));
+ avail_dsds--;
+ }
+}
+
+struct fw_dif_context {
+ uint32_t ref_tag;
+ uint16_t app_tag;
+ uint8_t ref_tag_mask[4]; /* Validation/Replacement Mask*/
+ uint8_t app_tag_mask[2]; /* Validation/Replacement Mask*/
+};
+
+/*
+ * qla24xx_set_t10dif_tags_from_cmd - Extract Ref and App tags from SCSI command
+ *
+ */
+static inline void
+qla24xx_set_t10dif_tags(srb_t *sp, struct fw_dif_context *pkt,
+ unsigned int protcnt)
+{
+ struct scsi_cmnd *cmd = GET_CMD_SP(sp);
+
+ switch (scsi_get_prot_type(cmd)) {
+ case SCSI_PROT_DIF_TYPE0:
+ /*
+ * No check for ql2xenablehba_err_chk, as it would be an
+ * I/O error if hba tag generation is not done.
+ */
+ pkt->ref_tag = cpu_to_le32((uint32_t)
+ (0xffffffff & scsi_get_lba(cmd)));
+
+ if (!qla2x00_hba_err_chk_enabled(sp))
+ break;
+
+ pkt->ref_tag_mask[0] = 0xff;
+ pkt->ref_tag_mask[1] = 0xff;
+ pkt->ref_tag_mask[2] = 0xff;
+ pkt->ref_tag_mask[3] = 0xff;
+ break;
+
+ /*
+ * For TYPE 2 protection: 16 bit GUARD + 32 bit REF tag has to
+ * match LBA in CDB + N
+ */
+ case SCSI_PROT_DIF_TYPE2:
+ pkt->app_tag = __constant_cpu_to_le16(0);
+ pkt->app_tag_mask[0] = 0x0;
+ pkt->app_tag_mask[1] = 0x0;
+
+ pkt->ref_tag = cpu_to_le32((uint32_t)
+ (0xffffffff & scsi_get_lba(cmd)));
+
+ if (!qla2x00_hba_err_chk_enabled(sp))
+ break;
+
+ /* enable ALL bytes of the ref tag */
+ pkt->ref_tag_mask[0] = 0xff;
+ pkt->ref_tag_mask[1] = 0xff;
+ pkt->ref_tag_mask[2] = 0xff;
+ pkt->ref_tag_mask[3] = 0xff;
+ break;
+
+ /* For Type 3 protection: 16 bit GUARD only */
+ case SCSI_PROT_DIF_TYPE3:
+ pkt->ref_tag_mask[0] = pkt->ref_tag_mask[1] =
+ pkt->ref_tag_mask[2] = pkt->ref_tag_mask[3] =
+ 0x00;
+ break;
+
+ /*
+ * For TYpe 1 protection: 16 bit GUARD tag, 32 bit REF tag, and
+ * 16 bit app tag.
+ */
+ case SCSI_PROT_DIF_TYPE1:
+ pkt->ref_tag = cpu_to_le32((uint32_t)
+ (0xffffffff & scsi_get_lba(cmd)));
+ pkt->app_tag = __constant_cpu_to_le16(0);
+ pkt->app_tag_mask[0] = 0x0;
+ pkt->app_tag_mask[1] = 0x0;
+
+ if (!qla2x00_hba_err_chk_enabled(sp))
+ break;
+
+ /* enable ALL bytes of the ref tag */
+ pkt->ref_tag_mask[0] = 0xff;
+ pkt->ref_tag_mask[1] = 0xff;
+ pkt->ref_tag_mask[2] = 0xff;
+ pkt->ref_tag_mask[3] = 0xff;
+ break;
+ }
+}
+
+struct qla2_sgx {
+ dma_addr_t dma_addr; /* OUT */
+ uint32_t dma_len; /* OUT */
+
+ uint32_t tot_bytes; /* IN */
+ struct scatterlist *cur_sg; /* IN */
+
+ /* for book keeping, bzero on initial invocation */
+ uint32_t bytes_consumed;
+ uint32_t num_bytes;
+ uint32_t tot_partial;
+
+ /* for debugging */
+ uint32_t num_sg;
+ srb_t *sp;
+};
+
+static int
+qla24xx_get_one_block_sg(uint32_t blk_sz, struct qla2_sgx *sgx,
+ uint32_t *partial)
+{
+ struct scatterlist *sg;
+ uint32_t cumulative_partial, sg_len;
+ dma_addr_t sg_dma_addr;
+
+ if (sgx->num_bytes == sgx->tot_bytes)
+ return 0;
+
+ sg = sgx->cur_sg;
+ cumulative_partial = sgx->tot_partial;
+
+ sg_dma_addr = sg_dma_address(sg);
+ sg_len = sg_dma_len(sg);
+
+ sgx->dma_addr = sg_dma_addr + sgx->bytes_consumed;
+
+ if ((cumulative_partial + (sg_len - sgx->bytes_consumed)) >= blk_sz) {
+ sgx->dma_len = (blk_sz - cumulative_partial);
+ sgx->tot_partial = 0;
+ sgx->num_bytes += blk_sz;
+ *partial = 0;
+ } else {
+ sgx->dma_len = sg_len - sgx->bytes_consumed;
+ sgx->tot_partial += sgx->dma_len;
+ *partial = 1;
+ }
+
+ sgx->bytes_consumed += sgx->dma_len;
+
+ if (sg_len == sgx->bytes_consumed) {
+ sg = sg_next(sg);
+ sgx->num_sg++;
+ sgx->cur_sg = sg;
+ sgx->bytes_consumed = 0;
+ }
+
+ return 1;
+}
+
+int
+qla24xx_walk_and_build_sglist_no_difb(struct qla_hw_data *ha, srb_t *sp,
+ uint32_t *dsd, uint16_t tot_dsds, struct qla_tgt_cmd *tc)
+{
+ void *next_dsd;
+ uint8_t avail_dsds = 0;
+ uint32_t dsd_list_len;
+ struct dsd_dma *dsd_ptr;
+ struct scatterlist *sg_prot;
+ uint32_t *cur_dsd = dsd;
+ uint16_t used_dsds = tot_dsds;
+
+ uint32_t prot_int; /* protection interval */
+ uint32_t partial;
+ struct qla2_sgx sgx;
+ dma_addr_t sle_dma;
+ uint32_t sle_dma_len, tot_prot_dma_len = 0;
+ struct scsi_cmnd *cmd;
+ struct scsi_qla_host *vha;
+
+ memset(&sgx, 0, sizeof(struct qla2_sgx));
+ if (sp) {
+ vha = sp->fcport->vha;
+ cmd = GET_CMD_SP(sp);
+ prot_int = cmd->device->sector_size;
+
+ sgx.tot_bytes = scsi_bufflen(cmd);
+ sgx.cur_sg = scsi_sglist(cmd);
+ sgx.sp = sp;
+
+ sg_prot = scsi_prot_sglist(cmd);
+ } else if (tc) {
+ vha = tc->vha;
+ prot_int = tc->blk_sz;
+ sgx.tot_bytes = tc->bufflen;
+ sgx.cur_sg = tc->sg;
+ sg_prot = tc->prot_sg;
+ } else {
+ BUG();
+ return 1;
+ }
+
+ while (qla24xx_get_one_block_sg(prot_int, &sgx, &partial)) {
+
+ sle_dma = sgx.dma_addr;
+ sle_dma_len = sgx.dma_len;
+alloc_and_fill:
+ /* Allocate additional continuation packets? */
+ if (avail_dsds == 0) {
+ avail_dsds = (used_dsds > QLA_DSDS_PER_IOCB) ?
+ QLA_DSDS_PER_IOCB : used_dsds;
+ dsd_list_len = (avail_dsds + 1) * 12;
+ used_dsds -= avail_dsds;
+
+ /* allocate tracking DS */
+ dsd_ptr = kzalloc(sizeof(struct dsd_dma), GFP_ATOMIC);
+ if (!dsd_ptr)
+ return 1;
+
+ /* allocate new list */
+ dsd_ptr->dsd_addr = next_dsd =
+ dma_pool_alloc(ha->dl_dma_pool, GFP_ATOMIC,
+ &dsd_ptr->dsd_list_dma);
+
+ if (!next_dsd) {
+ /*
+ * Need to cleanup only this dsd_ptr, rest
+ * will be done by sp_free_dma()
+ */
+ kfree(dsd_ptr);
+ return 1;
+ }
+
+ if (sp) {
+ list_add_tail(&dsd_ptr->list,
+ &((struct crc_context *)
+ sp->u.scmd.ctx)->dsd_list);
+
+ sp->flags |= SRB_CRC_CTX_DSD_VALID;
+ } else {
+ list_add_tail(&dsd_ptr->list,
+ &(tc->ctx->dsd_list));
+ tc->ctx_dsd_alloced = 1;
+ }
+
+
+ /* add new list to cmd iocb or last list */
+ *cur_dsd++ = cpu_to_le32(LSD(dsd_ptr->dsd_list_dma));
+ *cur_dsd++ = cpu_to_le32(MSD(dsd_ptr->dsd_list_dma));
+ *cur_dsd++ = dsd_list_len;
+ cur_dsd = (uint32_t *)next_dsd;
+ }
+ *cur_dsd++ = cpu_to_le32(LSD(sle_dma));
+ *cur_dsd++ = cpu_to_le32(MSD(sle_dma));
+ *cur_dsd++ = cpu_to_le32(sle_dma_len);
+ avail_dsds--;
+
+ if (partial == 0) {
+ /* Got a full protection interval */
+ sle_dma = sg_dma_address(sg_prot) + tot_prot_dma_len;
+ sle_dma_len = 8;
+
+ tot_prot_dma_len += sle_dma_len;
+ if (tot_prot_dma_len == sg_dma_len(sg_prot)) {
+ tot_prot_dma_len = 0;
+ sg_prot = sg_next(sg_prot);
+ }
+
+ partial = 1; /* So as to not re-enter this block */
+ goto alloc_and_fill;
+ }
+ }
+ /* Null termination */
+ *cur_dsd++ = 0;
+ *cur_dsd++ = 0;
+ *cur_dsd++ = 0;
+ return 0;
+}
+
+int
+qla24xx_walk_and_build_sglist(struct qla_hw_data *ha, srb_t *sp, uint32_t *dsd,
+ uint16_t tot_dsds, struct qla_tgt_cmd *tc)
+{
+ void *next_dsd;
+ uint8_t avail_dsds = 0;
+ uint32_t dsd_list_len;
+ struct dsd_dma *dsd_ptr;
+ struct scatterlist *sg, *sgl;
+ uint32_t *cur_dsd = dsd;
+ int i;
+ uint16_t used_dsds = tot_dsds;
+ struct scsi_cmnd *cmd;
+ struct scsi_qla_host *vha;
+
+ if (sp) {
+ cmd = GET_CMD_SP(sp);
+ sgl = scsi_sglist(cmd);
+ vha = sp->fcport->vha;
+ } else if (tc) {
+ sgl = tc->sg;
+ vha = tc->vha;
+ } else {
+ BUG();
+ return 1;
+ }
+
+
+ for_each_sg(sgl, sg, tot_dsds, i) {
+ dma_addr_t sle_dma;
+
+ /* Allocate additional continuation packets? */
+ if (avail_dsds == 0) {
+ avail_dsds = (used_dsds > QLA_DSDS_PER_IOCB) ?
+ QLA_DSDS_PER_IOCB : used_dsds;
+ dsd_list_len = (avail_dsds + 1) * 12;
+ used_dsds -= avail_dsds;
+
+ /* allocate tracking DS */
+ dsd_ptr = kzalloc(sizeof(struct dsd_dma), GFP_ATOMIC);
+ if (!dsd_ptr)
+ return 1;
+
+ /* allocate new list */
+ dsd_ptr->dsd_addr = next_dsd =
+ dma_pool_alloc(ha->dl_dma_pool, GFP_ATOMIC,
+ &dsd_ptr->dsd_list_dma);
+
+ if (!next_dsd) {
+ /*
+ * Need to cleanup only this dsd_ptr, rest
+ * will be done by sp_free_dma()
+ */
+ kfree(dsd_ptr);
+ return 1;
+ }
+
+ if (sp) {
+ list_add_tail(&dsd_ptr->list,
+ &((struct crc_context *)
+ sp->u.scmd.ctx)->dsd_list);
+
+ sp->flags |= SRB_CRC_CTX_DSD_VALID;
+ } else {
+ list_add_tail(&dsd_ptr->list,
+ &(tc->ctx->dsd_list));
+ tc->ctx_dsd_alloced = 1;
+ }
+
+ /* add new list to cmd iocb or last list */
+ *cur_dsd++ = cpu_to_le32(LSD(dsd_ptr->dsd_list_dma));
+ *cur_dsd++ = cpu_to_le32(MSD(dsd_ptr->dsd_list_dma));
+ *cur_dsd++ = dsd_list_len;
+ cur_dsd = (uint32_t *)next_dsd;
+ }
+ sle_dma = sg_dma_address(sg);
+
+ *cur_dsd++ = cpu_to_le32(LSD(sle_dma));
+ *cur_dsd++ = cpu_to_le32(MSD(sle_dma));
+ *cur_dsd++ = cpu_to_le32(sg_dma_len(sg));
+ avail_dsds--;
+
+ }
+ /* Null termination */
+ *cur_dsd++ = 0;
+ *cur_dsd++ = 0;
+ *cur_dsd++ = 0;
+ return 0;
+}
+
+int
+qla24xx_walk_and_build_prot_sglist(struct qla_hw_data *ha, srb_t *sp,
+ uint32_t *dsd, uint16_t tot_dsds, struct qla_tgt_cmd *tc)
+{
+ void *next_dsd;
+ uint8_t avail_dsds = 0;
+ uint32_t dsd_list_len;
+ struct dsd_dma *dsd_ptr;
+ struct scatterlist *sg, *sgl;
+ int i;
+ struct scsi_cmnd *cmd;
+ uint32_t *cur_dsd = dsd;
+ uint16_t used_dsds = tot_dsds;
+ struct scsi_qla_host *vha;
+
+ if (sp) {
+ cmd = GET_CMD_SP(sp);
+ sgl = scsi_prot_sglist(cmd);
+ vha = sp->fcport->vha;
+ } else if (tc) {
+ vha = tc->vha;
+ sgl = tc->prot_sg;
+ } else {
+ BUG();
+ return 1;
+ }
+
+ ql_dbg(ql_dbg_tgt, vha, 0xe021,
+ "%s: enter\n", __func__);
+
+ for_each_sg(sgl, sg, tot_dsds, i) {
+ dma_addr_t sle_dma;
+
+ /* Allocate additional continuation packets? */
+ if (avail_dsds == 0) {
+ avail_dsds = (used_dsds > QLA_DSDS_PER_IOCB) ?
+ QLA_DSDS_PER_IOCB : used_dsds;
+ dsd_list_len = (avail_dsds + 1) * 12;
+ used_dsds -= avail_dsds;
+
+ /* allocate tracking DS */
+ dsd_ptr = kzalloc(sizeof(struct dsd_dma), GFP_ATOMIC);
+ if (!dsd_ptr)
+ return 1;
+
+ /* allocate new list */
+ dsd_ptr->dsd_addr = next_dsd =
+ dma_pool_alloc(ha->dl_dma_pool, GFP_ATOMIC,
+ &dsd_ptr->dsd_list_dma);
+
+ if (!next_dsd) {
+ /*
+ * Need to cleanup only this dsd_ptr, rest
+ * will be done by sp_free_dma()
+ */
+ kfree(dsd_ptr);
+ return 1;
+ }
+
+ if (sp) {
+ list_add_tail(&dsd_ptr->list,
+ &((struct crc_context *)
+ sp->u.scmd.ctx)->dsd_list);
+
+ sp->flags |= SRB_CRC_CTX_DSD_VALID;
+ } else {
+ list_add_tail(&dsd_ptr->list,
+ &(tc->ctx->dsd_list));
+ tc->ctx_dsd_alloced = 1;
+ }
+
+ /* add new list to cmd iocb or last list */
+ *cur_dsd++ = cpu_to_le32(LSD(dsd_ptr->dsd_list_dma));
+ *cur_dsd++ = cpu_to_le32(MSD(dsd_ptr->dsd_list_dma));
+ *cur_dsd++ = dsd_list_len;
+ cur_dsd = (uint32_t *)next_dsd;
+ }
+ sle_dma = sg_dma_address(sg);
+
+ *cur_dsd++ = cpu_to_le32(LSD(sle_dma));
+ *cur_dsd++ = cpu_to_le32(MSD(sle_dma));
+ *cur_dsd++ = cpu_to_le32(sg_dma_len(sg));
+
+ avail_dsds--;
+ }
+ /* Null termination */
+ *cur_dsd++ = 0;
+ *cur_dsd++ = 0;
+ *cur_dsd++ = 0;
+ return 0;
+}
+
+/**
+ * qla24xx_build_scsi_crc_2_iocbs() - Build IOCB command utilizing Command
+ * Type 6 IOCB types.
+ *
+ * @sp: SRB command to process
+ * @cmd_pkt: Command type 3 IOCB
+ * @tot_dsds: Total number of segments to transfer
+ */
+static inline int
+qla24xx_build_scsi_crc_2_iocbs(srb_t *sp, struct cmd_type_crc_2 *cmd_pkt,
+ uint16_t tot_dsds, uint16_t tot_prot_dsds, uint16_t fw_prot_opts)
+{
+ uint32_t *cur_dsd, *fcp_dl;
+ scsi_qla_host_t *vha;
+ struct scsi_cmnd *cmd;
+ int sgc;
+ uint32_t total_bytes = 0;
+ uint32_t data_bytes;
+ uint32_t dif_bytes;
+ uint8_t bundling = 1;
+ uint16_t blk_size;
+ uint8_t *clr_ptr;
+ struct crc_context *crc_ctx_pkt = NULL;
+ struct qla_hw_data *ha;
+ uint8_t additional_fcpcdb_len;
+ uint16_t fcp_cmnd_len;
+ struct fcp_cmnd *fcp_cmnd;
+ dma_addr_t crc_ctx_dma;
+
+ cmd = GET_CMD_SP(sp);
+
+ sgc = 0;
+ /* Update entry type to indicate Command Type CRC_2 IOCB */
+ *((uint32_t *)(&cmd_pkt->entry_type)) =
+ __constant_cpu_to_le32(COMMAND_TYPE_CRC_2);
+
+ vha = sp->fcport->vha;
+ ha = vha->hw;
+
+ /* No data transfer */
+ data_bytes = scsi_bufflen(cmd);
+ if (!data_bytes || cmd->sc_data_direction == DMA_NONE) {
+ cmd_pkt->byte_count = __constant_cpu_to_le32(0);
+ return QLA_SUCCESS;
+ }
+
+ cmd_pkt->vp_index = sp->fcport->vha->vp_idx;
+
+ /* Set transfer direction */
+ if (cmd->sc_data_direction == DMA_TO_DEVICE) {
+ cmd_pkt->control_flags =
+ __constant_cpu_to_le16(CF_WRITE_DATA);
+ } else if (cmd->sc_data_direction == DMA_FROM_DEVICE) {
+ cmd_pkt->control_flags =
+ __constant_cpu_to_le16(CF_READ_DATA);
+ }
+
+ if ((scsi_get_prot_op(cmd) == SCSI_PROT_READ_INSERT) ||
+ (scsi_get_prot_op(cmd) == SCSI_PROT_WRITE_STRIP) ||
+ (scsi_get_prot_op(cmd) == SCSI_PROT_READ_STRIP) ||
+ (scsi_get_prot_op(cmd) == SCSI_PROT_WRITE_INSERT))
+ bundling = 0;
+
+ /* Allocate CRC context from global pool */
+ crc_ctx_pkt = sp->u.scmd.ctx =
+ dma_pool_alloc(ha->dl_dma_pool, GFP_ATOMIC, &crc_ctx_dma);
+
+ if (!crc_ctx_pkt)
+ goto crc_queuing_error;
+
+ /* Zero out CTX area. */
+ clr_ptr = (uint8_t *)crc_ctx_pkt;
+ memset(clr_ptr, 0, sizeof(*crc_ctx_pkt));
+
+ crc_ctx_pkt->crc_ctx_dma = crc_ctx_dma;
+
+ sp->flags |= SRB_CRC_CTX_DMA_VALID;
+
+ /* Set handle */
+ crc_ctx_pkt->handle = cmd_pkt->handle;
+
+ INIT_LIST_HEAD(&crc_ctx_pkt->dsd_list);
+
+ qla24xx_set_t10dif_tags(sp, (struct fw_dif_context *)
+ &crc_ctx_pkt->ref_tag, tot_prot_dsds);
+
+ cmd_pkt->crc_context_address[0] = cpu_to_le32(LSD(crc_ctx_dma));
+ cmd_pkt->crc_context_address[1] = cpu_to_le32(MSD(crc_ctx_dma));
+ cmd_pkt->crc_context_len = CRC_CONTEXT_LEN_FW;
+
+ /* Determine SCSI command length -- align to 4 byte boundary */
+ if (cmd->cmd_len > 16) {
+ additional_fcpcdb_len = cmd->cmd_len - 16;
+ if ((cmd->cmd_len % 4) != 0) {
+ /* SCSI cmd > 16 bytes must be multiple of 4 */
+ goto crc_queuing_error;
+ }
+ fcp_cmnd_len = 12 + cmd->cmd_len + 4;
+ } else {
+ additional_fcpcdb_len = 0;
+ fcp_cmnd_len = 12 + 16 + 4;
+ }
+
+ fcp_cmnd = &crc_ctx_pkt->fcp_cmnd;
+
+ fcp_cmnd->additional_cdb_len = additional_fcpcdb_len;
+ if (cmd->sc_data_direction == DMA_TO_DEVICE)
+ fcp_cmnd->additional_cdb_len |= 1;
+ else if (cmd->sc_data_direction == DMA_FROM_DEVICE)
+ fcp_cmnd->additional_cdb_len |= 2;
+
+ int_to_scsilun(cmd->device->lun, &fcp_cmnd->lun);
+ memcpy(fcp_cmnd->cdb, cmd->cmnd, cmd->cmd_len);
+ cmd_pkt->fcp_cmnd_dseg_len = cpu_to_le16(fcp_cmnd_len);
+ cmd_pkt->fcp_cmnd_dseg_address[0] = cpu_to_le32(
+ LSD(crc_ctx_dma + CRC_CONTEXT_FCPCMND_OFF));
+ cmd_pkt->fcp_cmnd_dseg_address[1] = cpu_to_le32(
+ MSD(crc_ctx_dma + CRC_CONTEXT_FCPCMND_OFF));
+ fcp_cmnd->task_management = 0;
+ fcp_cmnd->task_attribute = TSK_SIMPLE;
+
+ cmd_pkt->fcp_rsp_dseg_len = 0; /* Let response come in status iocb */
+
+ /* Compute dif len and adjust data len to incude protection */
+ dif_bytes = 0;
+ blk_size = cmd->device->sector_size;
+ dif_bytes = (data_bytes / blk_size) * 8;
+
+ switch (scsi_get_prot_op(GET_CMD_SP(sp))) {
+ case SCSI_PROT_READ_INSERT:
+ case SCSI_PROT_WRITE_STRIP:
+ total_bytes = data_bytes;
+ data_bytes += dif_bytes;
+ break;
+
+ case SCSI_PROT_READ_STRIP:
+ case SCSI_PROT_WRITE_INSERT:
+ case SCSI_PROT_READ_PASS:
+ case SCSI_PROT_WRITE_PASS:
+ total_bytes = data_bytes + dif_bytes;
+ break;
+ default:
+ BUG();
+ }
+
+ if (!qla2x00_hba_err_chk_enabled(sp))
+ fw_prot_opts |= 0x10; /* Disable Guard tag checking */
+ /* HBA error checking enabled */
+ else if (IS_PI_UNINIT_CAPABLE(ha)) {
+ if ((scsi_get_prot_type(GET_CMD_SP(sp)) == SCSI_PROT_DIF_TYPE1)
+ || (scsi_get_prot_type(GET_CMD_SP(sp)) ==
+ SCSI_PROT_DIF_TYPE2))
+ fw_prot_opts |= BIT_10;
+ else if (scsi_get_prot_type(GET_CMD_SP(sp)) ==
+ SCSI_PROT_DIF_TYPE3)
+ fw_prot_opts |= BIT_11;
+ }
+
+ if (!bundling) {
+ cur_dsd = (uint32_t *) &crc_ctx_pkt->u.nobundling.data_address;
+ } else {
+ /*
+ * Configure Bundling if we need to fetch interlaving
+ * protection PCI accesses
+ */
+ fw_prot_opts |= PO_ENABLE_DIF_BUNDLING;
+ crc_ctx_pkt->u.bundling.dif_byte_count = cpu_to_le32(dif_bytes);
+ crc_ctx_pkt->u.bundling.dseg_count = cpu_to_le16(tot_dsds -
+ tot_prot_dsds);
+ cur_dsd = (uint32_t *) &crc_ctx_pkt->u.bundling.data_address;
+ }
+
+ /* Finish the common fields of CRC pkt */
+ crc_ctx_pkt->blk_size = cpu_to_le16(blk_size);
+ crc_ctx_pkt->prot_opts = cpu_to_le16(fw_prot_opts);
+ crc_ctx_pkt->byte_count = cpu_to_le32(data_bytes);
+ crc_ctx_pkt->guard_seed = __constant_cpu_to_le16(0);
+ /* Fibre channel byte count */
+ cmd_pkt->byte_count = cpu_to_le32(total_bytes);
+ fcp_dl = (uint32_t *)(crc_ctx_pkt->fcp_cmnd.cdb + 16 +
+ additional_fcpcdb_len);
+ *fcp_dl = htonl(total_bytes);
+
+ if (!data_bytes || cmd->sc_data_direction == DMA_NONE) {
+ cmd_pkt->byte_count = __constant_cpu_to_le32(0);
+ return QLA_SUCCESS;
+ }
+ /* Walks data segments */
+
+ cmd_pkt->control_flags |=
+ __constant_cpu_to_le16(CF_DATA_SEG_DESCR_ENABLE);
+
+ if (!bundling && tot_prot_dsds) {
+ if (qla24xx_walk_and_build_sglist_no_difb(ha, sp,
+ cur_dsd, tot_dsds, NULL))
+ goto crc_queuing_error;
+ } else if (qla24xx_walk_and_build_sglist(ha, sp, cur_dsd,
+ (tot_dsds - tot_prot_dsds), NULL))
+ goto crc_queuing_error;
+
+ if (bundling && tot_prot_dsds) {
+ /* Walks dif segments */
+ cmd_pkt->control_flags |=
+ __constant_cpu_to_le16(CF_DIF_SEG_DESCR_ENABLE);
+ cur_dsd = (uint32_t *) &crc_ctx_pkt->u.bundling.dif_address;
+ if (qla24xx_walk_and_build_prot_sglist(ha, sp, cur_dsd,
+ tot_prot_dsds, NULL))
+ goto crc_queuing_error;
+ }
+ return QLA_SUCCESS;
+
+crc_queuing_error:
+ /* Cleanup will be performed by the caller */
+
+ return QLA_FUNCTION_FAILED;
+}
+
+/**
+ * qla24xx_start_scsi() - Send a SCSI command to the ISP
+ * @sp: command to send to the ISP
+ *
+ * Returns non-zero if a failure occurred, else zero.
+ */
+int
+qla24xx_start_scsi(srb_t *sp)
+{
+ int ret, nseg;
+ unsigned long flags;
+ uint32_t *clr_ptr;
+ uint32_t index;
+ uint32_t handle;
+ struct cmd_type_7 *cmd_pkt;
+ uint16_t cnt;
+ uint16_t req_cnt;
+ uint16_t tot_dsds;
+ struct req_que *req = NULL;
+ struct rsp_que *rsp = NULL;
+ struct scsi_cmnd *cmd = GET_CMD_SP(sp);
+ struct scsi_qla_host *vha = sp->fcport->vha;
+ struct qla_hw_data *ha = vha->hw;
+
+ /* Setup device pointers. */
+ ret = 0;
+
+ qla25xx_set_que(sp, &rsp);
+ req = vha->req;
+
+ /* So we know we haven't pci_map'ed anything yet */
+ tot_dsds = 0;
+
+ /* Send marker if required */
+ if (vha->marker_needed != 0) {
+ if (qla2x00_marker(vha, req, rsp, 0, 0, MK_SYNC_ALL) !=
+ QLA_SUCCESS)
+ return QLA_FUNCTION_FAILED;
+ vha->marker_needed = 0;
+ }
+
+ /* Acquire ring specific lock */
+ spin_lock_irqsave(&ha->hardware_lock, flags);
+
+ /* Check for room in outstanding command list. */
+ handle = req->current_outstanding_cmd;
+ for (index = 1; index < req->num_outstanding_cmds; index++) {
+ handle++;
+ if (handle == req->num_outstanding_cmds)
+ handle = 1;
+ if (!req->outstanding_cmds[handle])
+ break;
+ }
+ if (index == req->num_outstanding_cmds)
+ goto queuing_error;
+
+ /* Map the sg table so we have an accurate count of sg entries needed */
+ if (scsi_sg_count(cmd)) {
+ nseg = dma_map_sg(&ha->pdev->dev, scsi_sglist(cmd),
+ scsi_sg_count(cmd), cmd->sc_data_direction);
+ if (unlikely(!nseg))
+ goto queuing_error;
+ } else
+ nseg = 0;
+
+ tot_dsds = nseg;
+ req_cnt = qla24xx_calc_iocbs(vha, tot_dsds);
+ if (req->cnt < (req_cnt + 2)) {
+ cnt = IS_SHADOW_REG_CAPABLE(ha) ? *req->out_ptr :
+ RD_REG_DWORD_RELAXED(req->req_q_out);
+ if (req->ring_index < cnt)
+ req->cnt = cnt - req->ring_index;
+ else
+ req->cnt = req->length -
+ (req->ring_index - cnt);
+ if (req->cnt < (req_cnt + 2))
+ goto queuing_error;
+ }
+
+ /* Build command packet. */
+ req->current_outstanding_cmd = handle;
+ req->outstanding_cmds[handle] = sp;
+ sp->handle = handle;
+ cmd->host_scribble = (unsigned char *)(unsigned long)handle;
+ req->cnt -= req_cnt;
+
+ cmd_pkt = (struct cmd_type_7 *)req->ring_ptr;
+ cmd_pkt->handle = MAKE_HANDLE(req->id, handle);
+
+ /* Zero out remaining portion of packet. */
+ /* tagged queuing modifier -- default is TSK_SIMPLE (0). */
+ clr_ptr = (uint32_t *)cmd_pkt + 2;
+ memset(clr_ptr, 0, REQUEST_ENTRY_SIZE - 8);
+ cmd_pkt->dseg_count = cpu_to_le16(tot_dsds);
+
+ /* Set NPORT-ID and LUN number*/
+ cmd_pkt->nport_handle = cpu_to_le16(sp->fcport->loop_id);
+ cmd_pkt->port_id[0] = sp->fcport->d_id.b.al_pa;
+ cmd_pkt->port_id[1] = sp->fcport->d_id.b.area;
+ cmd_pkt->port_id[2] = sp->fcport->d_id.b.domain;
+ cmd_pkt->vp_index = sp->fcport->vha->vp_idx;
+
+ int_to_scsilun(cmd->device->lun, &cmd_pkt->lun);
+ host_to_fcp_swap((uint8_t *)&cmd_pkt->lun, sizeof(cmd_pkt->lun));
+
+ cmd_pkt->task = TSK_SIMPLE;
+
+ /* Load SCSI command packet. */
+ memcpy(cmd_pkt->fcp_cdb, cmd->cmnd, cmd->cmd_len);
+ host_to_fcp_swap(cmd_pkt->fcp_cdb, sizeof(cmd_pkt->fcp_cdb));
+
+ cmd_pkt->byte_count = cpu_to_le32((uint32_t)scsi_bufflen(cmd));
+
+ /* Build IOCB segments */
+ qla24xx_build_scsi_iocbs(sp, cmd_pkt, tot_dsds);
+
+ /* Set total data segment count. */
+ cmd_pkt->entry_count = (uint8_t)req_cnt;
+ /* Specify response queue number where completion should happen */
+ cmd_pkt->entry_status = (uint8_t) rsp->id;
+ wmb();
+ /* Adjust ring index. */
+ req->ring_index++;
+ if (req->ring_index == req->length) {
+ req->ring_index = 0;
+ req->ring_ptr = req->ring;
+ } else
+ req->ring_ptr++;
+
+ sp->flags |= SRB_DMA_VALID;
+
+ /* Set chip new ring index. */
+ WRT_REG_DWORD(req->req_q_in, req->ring_index);
+ RD_REG_DWORD_RELAXED(&ha->iobase->isp24.hccr);
+
+ /* Manage unprocessed RIO/ZIO commands in response queue. */
+ if (vha->flags.process_response_queue &&
+ rsp->ring_ptr->signature != RESPONSE_PROCESSED)
+ qla24xx_process_response_queue(vha, rsp);
+
+ spin_unlock_irqrestore(&ha->hardware_lock, flags);
+ return QLA_SUCCESS;
+
+queuing_error:
+ if (tot_dsds)
+ scsi_dma_unmap(cmd);
+
+ spin_unlock_irqrestore(&ha->hardware_lock, flags);
+
+ return QLA_FUNCTION_FAILED;
+}
+
+/**
+ * qla24xx_dif_start_scsi() - Send a SCSI command to the ISP
+ * @sp: command to send to the ISP
+ *
+ * Returns non-zero if a failure occurred, else zero.
+ */
+int
+qla24xx_dif_start_scsi(srb_t *sp)
+{
+ int nseg;
+ unsigned long flags;
+ uint32_t *clr_ptr;
+ uint32_t index;
+ uint32_t handle;
+ uint16_t cnt;
+ uint16_t req_cnt = 0;
+ uint16_t tot_dsds;
+ uint16_t tot_prot_dsds;
+ uint16_t fw_prot_opts = 0;
+ struct req_que *req = NULL;
+ struct rsp_que *rsp = NULL;
+ struct scsi_cmnd *cmd = GET_CMD_SP(sp);
+ struct scsi_qla_host *vha = sp->fcport->vha;
+ struct qla_hw_data *ha = vha->hw;
+ struct cmd_type_crc_2 *cmd_pkt;
+ uint32_t status = 0;
+
+#define QDSS_GOT_Q_SPACE BIT_0
+
+ /* Only process protection or >16 cdb in this routine */
+ if (scsi_get_prot_op(cmd) == SCSI_PROT_NORMAL) {
+ if (cmd->cmd_len <= 16)
+ return qla24xx_start_scsi(sp);
+ }
+
+ /* Setup device pointers. */
+
+ qla25xx_set_que(sp, &rsp);
+ req = vha->req;
+
+ /* So we know we haven't pci_map'ed anything yet */
+ tot_dsds = 0;
+
+ /* Send marker if required */
+ if (vha->marker_needed != 0) {
+ if (qla2x00_marker(vha, req, rsp, 0, 0, MK_SYNC_ALL) !=
+ QLA_SUCCESS)
+ return QLA_FUNCTION_FAILED;
+ vha->marker_needed = 0;
+ }
+
+ /* Acquire ring specific lock */
+ spin_lock_irqsave(&ha->hardware_lock, flags);
+
+ /* Check for room in outstanding command list. */
+ handle = req->current_outstanding_cmd;
+ for (index = 1; index < req->num_outstanding_cmds; index++) {
+ handle++;
+ if (handle == req->num_outstanding_cmds)
+ handle = 1;
+ if (!req->outstanding_cmds[handle])
+ break;
+ }
+
+ if (index == req->num_outstanding_cmds)
+ goto queuing_error;
+
+ /* Compute number of required data segments */
+ /* Map the sg table so we have an accurate count of sg entries needed */
+ if (scsi_sg_count(cmd)) {
+ nseg = dma_map_sg(&ha->pdev->dev, scsi_sglist(cmd),
+ scsi_sg_count(cmd), cmd->sc_data_direction);
+ if (unlikely(!nseg))
+ goto queuing_error;
+ else
+ sp->flags |= SRB_DMA_VALID;
+
+ if ((scsi_get_prot_op(cmd) == SCSI_PROT_READ_INSERT) ||
+ (scsi_get_prot_op(cmd) == SCSI_PROT_WRITE_STRIP)) {
+ struct qla2_sgx sgx;
+ uint32_t partial;
+
+ memset(&sgx, 0, sizeof(struct qla2_sgx));
+ sgx.tot_bytes = scsi_bufflen(cmd);
+ sgx.cur_sg = scsi_sglist(cmd);
+ sgx.sp = sp;
+
+ nseg = 0;
+ while (qla24xx_get_one_block_sg(
+ cmd->device->sector_size, &sgx, &partial))
+ nseg++;
+ }
+ } else
+ nseg = 0;
+
+ /* number of required data segments */
+ tot_dsds = nseg;
+
+ /* Compute number of required protection segments */
+ if (qla24xx_configure_prot_mode(sp, &fw_prot_opts)) {
+ nseg = dma_map_sg(&ha->pdev->dev, scsi_prot_sglist(cmd),
+ scsi_prot_sg_count(cmd), cmd->sc_data_direction);
+ if (unlikely(!nseg))
+ goto queuing_error;
+ else
+ sp->flags |= SRB_CRC_PROT_DMA_VALID;
+
+ if ((scsi_get_prot_op(cmd) == SCSI_PROT_READ_INSERT) ||
+ (scsi_get_prot_op(cmd) == SCSI_PROT_WRITE_STRIP)) {
+ nseg = scsi_bufflen(cmd) / cmd->device->sector_size;
+ }
+ } else {
+ nseg = 0;
+ }
+
+ req_cnt = 1;
+ /* Total Data and protection sg segment(s) */
+ tot_prot_dsds = nseg;
+ tot_dsds += nseg;
+ if (req->cnt < (req_cnt + 2)) {
+ cnt = IS_SHADOW_REG_CAPABLE(ha) ? *req->out_ptr :
+ RD_REG_DWORD_RELAXED(req->req_q_out);
+ if (req->ring_index < cnt)
+ req->cnt = cnt - req->ring_index;
+ else
+ req->cnt = req->length -
+ (req->ring_index - cnt);
+ if (req->cnt < (req_cnt + 2))
+ goto queuing_error;
+ }
+
+ status |= QDSS_GOT_Q_SPACE;
+
+ /* Build header part of command packet (excluding the OPCODE). */
+ req->current_outstanding_cmd = handle;
+ req->outstanding_cmds[handle] = sp;
+ sp->handle = handle;
+ cmd->host_scribble = (unsigned char *)(unsigned long)handle;
+ req->cnt -= req_cnt;
+
+ /* Fill-in common area */
+ cmd_pkt = (struct cmd_type_crc_2 *)req->ring_ptr;
+ cmd_pkt->handle = MAKE_HANDLE(req->id, handle);
+
+ clr_ptr = (uint32_t *)cmd_pkt + 2;
+ memset(clr_ptr, 0, REQUEST_ENTRY_SIZE - 8);
+
+ /* Set NPORT-ID and LUN number*/
+ cmd_pkt->nport_handle = cpu_to_le16(sp->fcport->loop_id);
+ cmd_pkt->port_id[0] = sp->fcport->d_id.b.al_pa;
+ cmd_pkt->port_id[1] = sp->fcport->d_id.b.area;
+ cmd_pkt->port_id[2] = sp->fcport->d_id.b.domain;
+
+ int_to_scsilun(cmd->device->lun, &cmd_pkt->lun);
+ host_to_fcp_swap((uint8_t *)&cmd_pkt->lun, sizeof(cmd_pkt->lun));
+
+ /* Total Data and protection segment(s) */
+ cmd_pkt->dseg_count = cpu_to_le16(tot_dsds);
+
+ /* Build IOCB segments and adjust for data protection segments */
+ if (qla24xx_build_scsi_crc_2_iocbs(sp, (struct cmd_type_crc_2 *)
+ req->ring_ptr, tot_dsds, tot_prot_dsds, fw_prot_opts) !=
+ QLA_SUCCESS)
+ goto queuing_error;
+
+ cmd_pkt->entry_count = (uint8_t)req_cnt;
+ /* Specify response queue number where completion should happen */
+ cmd_pkt->entry_status = (uint8_t) rsp->id;
+ cmd_pkt->timeout = __constant_cpu_to_le16(0);
+ wmb();
+
+ /* Adjust ring index. */
+ req->ring_index++;
+ if (req->ring_index == req->length) {
+ req->ring_index = 0;
+ req->ring_ptr = req->ring;
+ } else
+ req->ring_ptr++;
+
+ /* Set chip new ring index. */
+ WRT_REG_DWORD(req->req_q_in, req->ring_index);
+ RD_REG_DWORD_RELAXED(&ha->iobase->isp24.hccr);
+
+ /* Manage unprocessed RIO/ZIO commands in response queue. */
+ if (vha->flags.process_response_queue &&
+ rsp->ring_ptr->signature != RESPONSE_PROCESSED)
+ qla24xx_process_response_queue(vha, rsp);
+
+ spin_unlock_irqrestore(&ha->hardware_lock, flags);
+
+ return QLA_SUCCESS;
+
+queuing_error:
+ if (status & QDSS_GOT_Q_SPACE) {
+ req->outstanding_cmds[handle] = NULL;
+ req->cnt += req_cnt;
+ }
+ /* Cleanup will be performed by the caller (queuecommand) */
+
+ spin_unlock_irqrestore(&ha->hardware_lock, flags);
+ return QLA_FUNCTION_FAILED;
+}
+
+
+static void qla25xx_set_que(srb_t *sp, struct rsp_que **rsp)
+{
+ struct scsi_cmnd *cmd = GET_CMD_SP(sp);
+ struct qla_hw_data *ha = sp->fcport->vha->hw;
+ int affinity = cmd->request->cpu;
+
+ if (ha->flags.cpu_affinity_enabled && affinity >= 0 &&
+ affinity < ha->max_rsp_queues - 1)
+ *rsp = ha->rsp_q_map[affinity + 1];
+ else
+ *rsp = ha->rsp_q_map[0];
+}
+
+/* Generic Control-SRB manipulation functions. */
+
+/* hardware_lock assumed to be held. */
+void *
+qla2x00_alloc_iocbs_ready(scsi_qla_host_t *vha, srb_t *sp)
+{
+ if (qla2x00_reset_active(vha))
+ return NULL;
+
+ return qla2x00_alloc_iocbs(vha, sp);
+}
+
+void *
+qla2x00_alloc_iocbs(scsi_qla_host_t *vha, srb_t *sp)
+{
+ struct qla_hw_data *ha = vha->hw;
+ struct req_que *req = ha->req_q_map[0];
+ device_reg_t __iomem *reg = ISP_QUE_REG(ha, req->id);
+ uint32_t index, handle;
+ request_t *pkt;
+ uint16_t cnt, req_cnt;
+
+ pkt = NULL;
+ req_cnt = 1;
+ handle = 0;
+
+ if (!sp)
+ goto skip_cmd_array;
+
+ /* Check for room in outstanding command list. */
+ handle = req->current_outstanding_cmd;
+ for (index = 1; index < req->num_outstanding_cmds; index++) {
+ handle++;
+ if (handle == req->num_outstanding_cmds)
+ handle = 1;
+ if (!req->outstanding_cmds[handle])
+ break;
+ }
+ if (index == req->num_outstanding_cmds) {
+ ql_log(ql_log_warn, vha, 0x700b,
+ "No room on outstanding cmd array.\n");
+ goto queuing_error;
+ }
+
+ /* Prep command array. */
+ req->current_outstanding_cmd = handle;
+ req->outstanding_cmds[handle] = sp;
+ sp->handle = handle;
+
+ /* Adjust entry-counts as needed. */
+ if (sp->type != SRB_SCSI_CMD)
+ req_cnt = sp->iocbs;
+
+skip_cmd_array:
+ /* Check for room on request queue. */
+ if (req->cnt < req_cnt + 2) {
+ if (ha->mqenable || IS_QLA83XX(ha) || IS_QLA27XX(ha))
+ cnt = RD_REG_DWORD(&reg->isp25mq.req_q_out);
+ else if (IS_P3P_TYPE(ha))
+ cnt = RD_REG_DWORD(&reg->isp82.req_q_out);
+ else if (IS_FWI2_CAPABLE(ha))
+ cnt = RD_REG_DWORD(&reg->isp24.req_q_out);
+ else if (IS_QLAFX00(ha))
+ cnt = RD_REG_DWORD(&reg->ispfx00.req_q_out);
+ else
+ cnt = qla2x00_debounce_register(
+ ISP_REQ_Q_OUT(ha, &reg->isp));
+
+ if (req->ring_index < cnt)
+ req->cnt = cnt - req->ring_index;
+ else
+ req->cnt = req->length -
+ (req->ring_index - cnt);
+ }
+ if (req->cnt < req_cnt + 2)
+ goto queuing_error;
+
+ /* Prep packet */
+ req->cnt -= req_cnt;
+ pkt = req->ring_ptr;
+ memset(pkt, 0, REQUEST_ENTRY_SIZE);
+ if (IS_QLAFX00(ha)) {
+ WRT_REG_BYTE((void __iomem *)&pkt->entry_count, req_cnt);
+ WRT_REG_WORD((void __iomem *)&pkt->handle, handle);
+ } else {
+ pkt->entry_count = req_cnt;
+ pkt->handle = handle;
+ }
+
+queuing_error:
+ return pkt;
+}
+
+static void
+qla24xx_login_iocb(srb_t *sp, struct logio_entry_24xx *logio)
+{
+ struct srb_iocb *lio = &sp->u.iocb_cmd;
+
+ logio->entry_type = LOGINOUT_PORT_IOCB_TYPE;
+ logio->control_flags = cpu_to_le16(LCF_COMMAND_PLOGI);
+ if (lio->u.logio.flags & SRB_LOGIN_COND_PLOGI)
+ logio->control_flags |= cpu_to_le16(LCF_COND_PLOGI);
+ if (lio->u.logio.flags & SRB_LOGIN_SKIP_PRLI)
+ logio->control_flags |= cpu_to_le16(LCF_SKIP_PRLI);
+ logio->nport_handle = cpu_to_le16(sp->fcport->loop_id);
+ logio->port_id[0] = sp->fcport->d_id.b.al_pa;
+ logio->port_id[1] = sp->fcport->d_id.b.area;
+ logio->port_id[2] = sp->fcport->d_id.b.domain;
+ logio->vp_index = sp->fcport->vha->vp_idx;
+}
+
+static void
+qla2x00_login_iocb(srb_t *sp, struct mbx_entry *mbx)
+{
+ struct qla_hw_data *ha = sp->fcport->vha->hw;
+ struct srb_iocb *lio = &sp->u.iocb_cmd;
+ uint16_t opts;
+
+ mbx->entry_type = MBX_IOCB_TYPE;
+ SET_TARGET_ID(ha, mbx->loop_id, sp->fcport->loop_id);
+ mbx->mb0 = cpu_to_le16(MBC_LOGIN_FABRIC_PORT);
+ opts = lio->u.logio.flags & SRB_LOGIN_COND_PLOGI ? BIT_0 : 0;
+ opts |= lio->u.logio.flags & SRB_LOGIN_SKIP_PRLI ? BIT_1 : 0;
+ if (HAS_EXTENDED_IDS(ha)) {
+ mbx->mb1 = cpu_to_le16(sp->fcport->loop_id);
+ mbx->mb10 = cpu_to_le16(opts);
+ } else {
+ mbx->mb1 = cpu_to_le16((sp->fcport->loop_id << 8) | opts);
+ }
+ mbx->mb2 = cpu_to_le16(sp->fcport->d_id.b.domain);
+ mbx->mb3 = cpu_to_le16(sp->fcport->d_id.b.area << 8 |
+ sp->fcport->d_id.b.al_pa);
+ mbx->mb9 = cpu_to_le16(sp->fcport->vha->vp_idx);
+}
+
+static void
+qla24xx_logout_iocb(srb_t *sp, struct logio_entry_24xx *logio)
+{
+ logio->entry_type = LOGINOUT_PORT_IOCB_TYPE;
+ logio->control_flags =
+ cpu_to_le16(LCF_COMMAND_LOGO|LCF_IMPL_LOGO);
+ logio->nport_handle = cpu_to_le16(sp->fcport->loop_id);
+ logio->port_id[0] = sp->fcport->d_id.b.al_pa;
+ logio->port_id[1] = sp->fcport->d_id.b.area;
+ logio->port_id[2] = sp->fcport->d_id.b.domain;
+ logio->vp_index = sp->fcport->vha->vp_idx;
+}
+
+static void
+qla2x00_logout_iocb(srb_t *sp, struct mbx_entry *mbx)
+{
+ struct qla_hw_data *ha = sp->fcport->vha->hw;
+
+ mbx->entry_type = MBX_IOCB_TYPE;
+ SET_TARGET_ID(ha, mbx->loop_id, sp->fcport->loop_id);
+ mbx->mb0 = cpu_to_le16(MBC_LOGOUT_FABRIC_PORT);
+ mbx->mb1 = HAS_EXTENDED_IDS(ha) ?
+ cpu_to_le16(sp->fcport->loop_id):
+ cpu_to_le16(sp->fcport->loop_id << 8);
+ mbx->mb2 = cpu_to_le16(sp->fcport->d_id.b.domain);
+ mbx->mb3 = cpu_to_le16(sp->fcport->d_id.b.area << 8 |
+ sp->fcport->d_id.b.al_pa);
+ mbx->mb9 = cpu_to_le16(sp->fcport->vha->vp_idx);
+ /* Implicit: mbx->mbx10 = 0. */
+}
+
+static void
+qla24xx_adisc_iocb(srb_t *sp, struct logio_entry_24xx *logio)
+{
+ logio->entry_type = LOGINOUT_PORT_IOCB_TYPE;
+ logio->control_flags = cpu_to_le16(LCF_COMMAND_ADISC);
+ logio->nport_handle = cpu_to_le16(sp->fcport->loop_id);
+ logio->vp_index = sp->fcport->vha->vp_idx;
+}
+
+static void
+qla2x00_adisc_iocb(srb_t *sp, struct mbx_entry *mbx)
+{
+ struct qla_hw_data *ha = sp->fcport->vha->hw;
+
+ mbx->entry_type = MBX_IOCB_TYPE;
+ SET_TARGET_ID(ha, mbx->loop_id, sp->fcport->loop_id);
+ mbx->mb0 = cpu_to_le16(MBC_GET_PORT_DATABASE);
+ if (HAS_EXTENDED_IDS(ha)) {
+ mbx->mb1 = cpu_to_le16(sp->fcport->loop_id);
+ mbx->mb10 = cpu_to_le16(BIT_0);
+ } else {
+ mbx->mb1 = cpu_to_le16((sp->fcport->loop_id << 8) | BIT_0);
+ }
+ mbx->mb2 = cpu_to_le16(MSW(ha->async_pd_dma));
+ mbx->mb3 = cpu_to_le16(LSW(ha->async_pd_dma));
+ mbx->mb6 = cpu_to_le16(MSW(MSD(ha->async_pd_dma)));
+ mbx->mb7 = cpu_to_le16(LSW(MSD(ha->async_pd_dma)));
+ mbx->mb9 = cpu_to_le16(sp->fcport->vha->vp_idx);
+}
+
+static void
+qla24xx_tm_iocb(srb_t *sp, struct tsk_mgmt_entry *tsk)
+{
+ uint32_t flags;
+ uint64_t lun;
+ struct fc_port *fcport = sp->fcport;
+ scsi_qla_host_t *vha = fcport->vha;
+ struct qla_hw_data *ha = vha->hw;
+ struct srb_iocb *iocb = &sp->u.iocb_cmd;
+ struct req_que *req = vha->req;
+
+ flags = iocb->u.tmf.flags;
+ lun = iocb->u.tmf.lun;
+
+ tsk->entry_type = TSK_MGMT_IOCB_TYPE;
+ tsk->entry_count = 1;
+ tsk->handle = MAKE_HANDLE(req->id, tsk->handle);
+ tsk->nport_handle = cpu_to_le16(fcport->loop_id);
+ tsk->timeout = cpu_to_le16(ha->r_a_tov / 10 * 2);
+ tsk->control_flags = cpu_to_le32(flags);
+ tsk->port_id[0] = fcport->d_id.b.al_pa;
+ tsk->port_id[1] = fcport->d_id.b.area;
+ tsk->port_id[2] = fcport->d_id.b.domain;
+ tsk->vp_index = fcport->vha->vp_idx;
+
+ if (flags == TCF_LUN_RESET) {
+ int_to_scsilun(lun, &tsk->lun);
+ host_to_fcp_swap((uint8_t *)&tsk->lun,
+ sizeof(tsk->lun));
+ }
+}
+
+static void
+qla24xx_els_iocb(srb_t *sp, struct els_entry_24xx *els_iocb)
+{
+ struct fc_bsg_job *bsg_job = sp->u.bsg_job;
+
+ els_iocb->entry_type = ELS_IOCB_TYPE;
+ els_iocb->entry_count = 1;
+ els_iocb->sys_define = 0;
+ els_iocb->entry_status = 0;
+ els_iocb->handle = sp->handle;
+ els_iocb->nport_handle = cpu_to_le16(sp->fcport->loop_id);
+ els_iocb->tx_dsd_count = __constant_cpu_to_le16(bsg_job->request_payload.sg_cnt);
+ els_iocb->vp_index = sp->fcport->vha->vp_idx;
+ els_iocb->sof_type = EST_SOFI3;
+ els_iocb->rx_dsd_count = __constant_cpu_to_le16(bsg_job->reply_payload.sg_cnt);
+
+ els_iocb->opcode =
+ sp->type == SRB_ELS_CMD_RPT ?
+ bsg_job->request->rqst_data.r_els.els_code :
+ bsg_job->request->rqst_data.h_els.command_code;
+ els_iocb->port_id[0] = sp->fcport->d_id.b.al_pa;
+ els_iocb->port_id[1] = sp->fcport->d_id.b.area;
+ els_iocb->port_id[2] = sp->fcport->d_id.b.domain;
+ els_iocb->control_flags = 0;
+ els_iocb->rx_byte_count =
+ cpu_to_le32(bsg_job->reply_payload.payload_len);
+ els_iocb->tx_byte_count =
+ cpu_to_le32(bsg_job->request_payload.payload_len);
+
+ els_iocb->tx_address[0] = cpu_to_le32(LSD(sg_dma_address
+ (bsg_job->request_payload.sg_list)));
+ els_iocb->tx_address[1] = cpu_to_le32(MSD(sg_dma_address
+ (bsg_job->request_payload.sg_list)));
+ els_iocb->tx_len = cpu_to_le32(sg_dma_len
+ (bsg_job->request_payload.sg_list));
+
+ els_iocb->rx_address[0] = cpu_to_le32(LSD(sg_dma_address
+ (bsg_job->reply_payload.sg_list)));
+ els_iocb->rx_address[1] = cpu_to_le32(MSD(sg_dma_address
+ (bsg_job->reply_payload.sg_list)));
+ els_iocb->rx_len = cpu_to_le32(sg_dma_len
+ (bsg_job->reply_payload.sg_list));
+
+ sp->fcport->vha->qla_stats.control_requests++;
+}
+
+static void
+qla2x00_ct_iocb(srb_t *sp, ms_iocb_entry_t *ct_iocb)
+{
+ uint16_t avail_dsds;
+ uint32_t *cur_dsd;
+ struct scatterlist *sg;
+ int index;
+ uint16_t tot_dsds;
+ scsi_qla_host_t *vha = sp->fcport->vha;
+ struct qla_hw_data *ha = vha->hw;
+ struct fc_bsg_job *bsg_job = sp->u.bsg_job;
+ int loop_iterartion = 0;
+ int cont_iocb_prsnt = 0;
+ int entry_count = 1;
+
+ memset(ct_iocb, 0, sizeof(ms_iocb_entry_t));
+ ct_iocb->entry_type = CT_IOCB_TYPE;
+ ct_iocb->entry_status = 0;
+ ct_iocb->handle1 = sp->handle;
+ SET_TARGET_ID(ha, ct_iocb->loop_id, sp->fcport->loop_id);
+ ct_iocb->status = __constant_cpu_to_le16(0);
+ ct_iocb->control_flags = __constant_cpu_to_le16(0);
+ ct_iocb->timeout = 0;
+ ct_iocb->cmd_dsd_count =
+ __constant_cpu_to_le16(bsg_job->request_payload.sg_cnt);
+ ct_iocb->total_dsd_count =
+ __constant_cpu_to_le16(bsg_job->request_payload.sg_cnt + 1);
+ ct_iocb->req_bytecount =
+ cpu_to_le32(bsg_job->request_payload.payload_len);
+ ct_iocb->rsp_bytecount =
+ cpu_to_le32(bsg_job->reply_payload.payload_len);
+
+ ct_iocb->dseg_req_address[0] = cpu_to_le32(LSD(sg_dma_address
+ (bsg_job->request_payload.sg_list)));
+ ct_iocb->dseg_req_address[1] = cpu_to_le32(MSD(sg_dma_address
+ (bsg_job->request_payload.sg_list)));
+ ct_iocb->dseg_req_length = ct_iocb->req_bytecount;
+
+ ct_iocb->dseg_rsp_address[0] = cpu_to_le32(LSD(sg_dma_address
+ (bsg_job->reply_payload.sg_list)));
+ ct_iocb->dseg_rsp_address[1] = cpu_to_le32(MSD(sg_dma_address
+ (bsg_job->reply_payload.sg_list)));
+ ct_iocb->dseg_rsp_length = ct_iocb->rsp_bytecount;
+
+ avail_dsds = 1;
+ cur_dsd = (uint32_t *)ct_iocb->dseg_rsp_address;
+ index = 0;
+ tot_dsds = bsg_job->reply_payload.sg_cnt;
+
+ for_each_sg(bsg_job->reply_payload.sg_list, sg, tot_dsds, index) {
+ dma_addr_t sle_dma;
+ cont_a64_entry_t *cont_pkt;
+
+ /* Allocate additional continuation packets? */
+ if (avail_dsds == 0) {
+ /*
+ * Five DSDs are available in the Cont.
+ * Type 1 IOCB.
+ */
+ cont_pkt = qla2x00_prep_cont_type1_iocb(vha,
+ vha->hw->req_q_map[0]);
+ cur_dsd = (uint32_t *) cont_pkt->dseg_0_address;
+ avail_dsds = 5;
+ cont_iocb_prsnt = 1;
+ entry_count++;
+ }
+
+ sle_dma = sg_dma_address(sg);
+ *cur_dsd++ = cpu_to_le32(LSD(sle_dma));
+ *cur_dsd++ = cpu_to_le32(MSD(sle_dma));
+ *cur_dsd++ = cpu_to_le32(sg_dma_len(sg));
+ loop_iterartion++;
+ avail_dsds--;
+ }
+ ct_iocb->entry_count = entry_count;
+
+ sp->fcport->vha->qla_stats.control_requests++;
+}
+
+static void
+qla24xx_ct_iocb(srb_t *sp, struct ct_entry_24xx *ct_iocb)
+{
+ uint16_t avail_dsds;
+ uint32_t *cur_dsd;
+ struct scatterlist *sg;
+ int index;
+ uint16_t tot_dsds;
+ scsi_qla_host_t *vha = sp->fcport->vha;
+ struct qla_hw_data *ha = vha->hw;
+ struct fc_bsg_job *bsg_job = sp->u.bsg_job;
+ int loop_iterartion = 0;
+ int cont_iocb_prsnt = 0;
+ int entry_count = 1;
+
+ ct_iocb->entry_type = CT_IOCB_TYPE;
+ ct_iocb->entry_status = 0;
+ ct_iocb->sys_define = 0;
+ ct_iocb->handle = sp->handle;
+
+ ct_iocb->nport_handle = cpu_to_le16(sp->fcport->loop_id);
+ ct_iocb->vp_index = sp->fcport->vha->vp_idx;
+ ct_iocb->comp_status = __constant_cpu_to_le16(0);
+
+ ct_iocb->cmd_dsd_count =
+ __constant_cpu_to_le16(bsg_job->request_payload.sg_cnt);
+ ct_iocb->timeout = 0;
+ ct_iocb->rsp_dsd_count =
+ __constant_cpu_to_le16(bsg_job->reply_payload.sg_cnt);
+ ct_iocb->rsp_byte_count =
+ cpu_to_le32(bsg_job->reply_payload.payload_len);
+ ct_iocb->cmd_byte_count =
+ cpu_to_le32(bsg_job->request_payload.payload_len);
+ ct_iocb->dseg_0_address[0] = cpu_to_le32(LSD(sg_dma_address
+ (bsg_job->request_payload.sg_list)));
+ ct_iocb->dseg_0_address[1] = cpu_to_le32(MSD(sg_dma_address
+ (bsg_job->request_payload.sg_list)));
+ ct_iocb->dseg_0_len = cpu_to_le32(sg_dma_len
+ (bsg_job->request_payload.sg_list));
+
+ avail_dsds = 1;
+ cur_dsd = (uint32_t *)ct_iocb->dseg_1_address;
+ index = 0;
+ tot_dsds = bsg_job->reply_payload.sg_cnt;
+
+ for_each_sg(bsg_job->reply_payload.sg_list, sg, tot_dsds, index) {
+ dma_addr_t sle_dma;
+ cont_a64_entry_t *cont_pkt;
+
+ /* Allocate additional continuation packets? */
+ if (avail_dsds == 0) {
+ /*
+ * Five DSDs are available in the Cont.
+ * Type 1 IOCB.
+ */
+ cont_pkt = qla2x00_prep_cont_type1_iocb(vha,
+ ha->req_q_map[0]);
+ cur_dsd = (uint32_t *) cont_pkt->dseg_0_address;
+ avail_dsds = 5;
+ cont_iocb_prsnt = 1;
+ entry_count++;
+ }
+
+ sle_dma = sg_dma_address(sg);
+ *cur_dsd++ = cpu_to_le32(LSD(sle_dma));
+ *cur_dsd++ = cpu_to_le32(MSD(sle_dma));
+ *cur_dsd++ = cpu_to_le32(sg_dma_len(sg));
+ loop_iterartion++;
+ avail_dsds--;
+ }
+ ct_iocb->entry_count = entry_count;
+}
+
+/*
+ * qla82xx_start_scsi() - Send a SCSI command to the ISP
+ * @sp: command to send to the ISP
+ *
+ * Returns non-zero if a failure occurred, else zero.
+ */
+int
+qla82xx_start_scsi(srb_t *sp)
+{
+ int ret, nseg;
+ unsigned long flags;
+ struct scsi_cmnd *cmd;
+ uint32_t *clr_ptr;
+ uint32_t index;
+ uint32_t handle;
+ uint16_t cnt;
+ uint16_t req_cnt;
+ uint16_t tot_dsds;
+ struct device_reg_82xx __iomem *reg;
+ uint32_t dbval;
+ uint32_t *fcp_dl;
+ uint8_t additional_cdb_len;
+ struct ct6_dsd *ctx;
+ struct scsi_qla_host *vha = sp->fcport->vha;
+ struct qla_hw_data *ha = vha->hw;
+ struct req_que *req = NULL;
+ struct rsp_que *rsp = NULL;
+
+ /* Setup device pointers. */
+ ret = 0;
+ reg = &ha->iobase->isp82;
+ cmd = GET_CMD_SP(sp);
+ req = vha->req;
+ rsp = ha->rsp_q_map[0];
+
+ /* So we know we haven't pci_map'ed anything yet */
+ tot_dsds = 0;
+
+ dbval = 0x04 | (ha->portnum << 5);
+
+ /* Send marker if required */
+ if (vha->marker_needed != 0) {
+ if (qla2x00_marker(vha, req,
+ rsp, 0, 0, MK_SYNC_ALL) != QLA_SUCCESS) {
+ ql_log(ql_log_warn, vha, 0x300c,
+ "qla2x00_marker failed for cmd=%p.\n", cmd);
+ return QLA_FUNCTION_FAILED;
+ }
+ vha->marker_needed = 0;
+ }
+
+ /* Acquire ring specific lock */
+ spin_lock_irqsave(&ha->hardware_lock, flags);
+
+ /* Check for room in outstanding command list. */
+ handle = req->current_outstanding_cmd;
+ for (index = 1; index < req->num_outstanding_cmds; index++) {
+ handle++;
+ if (handle == req->num_outstanding_cmds)
+ handle = 1;
+ if (!req->outstanding_cmds[handle])
+ break;
+ }
+ if (index == req->num_outstanding_cmds)
+ goto queuing_error;
+
+ /* Map the sg table so we have an accurate count of sg entries needed */
+ if (scsi_sg_count(cmd)) {
+ nseg = dma_map_sg(&ha->pdev->dev, scsi_sglist(cmd),
+ scsi_sg_count(cmd), cmd->sc_data_direction);
+ if (unlikely(!nseg))
+ goto queuing_error;
+ } else
+ nseg = 0;
+
+ tot_dsds = nseg;
+
+ if (tot_dsds > ql2xshiftctondsd) {
+ struct cmd_type_6 *cmd_pkt;
+ uint16_t more_dsd_lists = 0;
+ struct dsd_dma *dsd_ptr;
+ uint16_t i;
+
+ more_dsd_lists = qla24xx_calc_dsd_lists(tot_dsds);
+ if ((more_dsd_lists + ha->gbl_dsd_inuse) >= NUM_DSD_CHAIN) {
+ ql_dbg(ql_dbg_io, vha, 0x300d,
+ "Num of DSD list %d is than %d for cmd=%p.\n",
+ more_dsd_lists + ha->gbl_dsd_inuse, NUM_DSD_CHAIN,
+ cmd);
+ goto queuing_error;
+ }
+
+ if (more_dsd_lists <= ha->gbl_dsd_avail)
+ goto sufficient_dsds;
+ else
+ more_dsd_lists -= ha->gbl_dsd_avail;
+
+ for (i = 0; i < more_dsd_lists; i++) {
+ dsd_ptr = kzalloc(sizeof(struct dsd_dma), GFP_ATOMIC);
+ if (!dsd_ptr) {
+ ql_log(ql_log_fatal, vha, 0x300e,
+ "Failed to allocate memory for dsd_dma "
+ "for cmd=%p.\n", cmd);
+ goto queuing_error;
+ }
+
+ dsd_ptr->dsd_addr = dma_pool_alloc(ha->dl_dma_pool,
+ GFP_ATOMIC, &dsd_ptr->dsd_list_dma);
+ if (!dsd_ptr->dsd_addr) {
+ kfree(dsd_ptr);
+ ql_log(ql_log_fatal, vha, 0x300f,
+ "Failed to allocate memory for dsd_addr "
+ "for cmd=%p.\n", cmd);
+ goto queuing_error;
+ }
+ list_add_tail(&dsd_ptr->list, &ha->gbl_dsd_list);
+ ha->gbl_dsd_avail++;
+ }
+
+sufficient_dsds:
+ req_cnt = 1;
+
+ if (req->cnt < (req_cnt + 2)) {
+ cnt = (uint16_t)RD_REG_DWORD_RELAXED(
+ &reg->req_q_out[0]);
+ if (req->ring_index < cnt)
+ req->cnt = cnt - req->ring_index;
+ else
+ req->cnt = req->length -
+ (req->ring_index - cnt);
+ if (req->cnt < (req_cnt + 2))
+ goto queuing_error;
+ }
+
+ ctx = sp->u.scmd.ctx =
+ mempool_alloc(ha->ctx_mempool, GFP_ATOMIC);
+ if (!ctx) {
+ ql_log(ql_log_fatal, vha, 0x3010,
+ "Failed to allocate ctx for cmd=%p.\n", cmd);
+ goto queuing_error;
+ }
+
+ memset(ctx, 0, sizeof(struct ct6_dsd));
+ ctx->fcp_cmnd = dma_pool_alloc(ha->fcp_cmnd_dma_pool,
+ GFP_ATOMIC, &ctx->fcp_cmnd_dma);
+ if (!ctx->fcp_cmnd) {
+ ql_log(ql_log_fatal, vha, 0x3011,
+ "Failed to allocate fcp_cmnd for cmd=%p.\n", cmd);
+ goto queuing_error;
+ }
+
+ /* Initialize the DSD list and dma handle */
+ INIT_LIST_HEAD(&ctx->dsd_list);
+ ctx->dsd_use_cnt = 0;
+
+ if (cmd->cmd_len > 16) {
+ additional_cdb_len = cmd->cmd_len - 16;
+ if ((cmd->cmd_len % 4) != 0) {
+ /* SCSI command bigger than 16 bytes must be
+ * multiple of 4
+ */
+ ql_log(ql_log_warn, vha, 0x3012,
+ "scsi cmd len %d not multiple of 4 "
+ "for cmd=%p.\n", cmd->cmd_len, cmd);
+ goto queuing_error_fcp_cmnd;
+ }
+ ctx->fcp_cmnd_len = 12 + cmd->cmd_len + 4;
+ } else {
+ additional_cdb_len = 0;
+ ctx->fcp_cmnd_len = 12 + 16 + 4;
+ }
+
+ cmd_pkt = (struct cmd_type_6 *)req->ring_ptr;
+ cmd_pkt->handle = MAKE_HANDLE(req->id, handle);
+
+ /* Zero out remaining portion of packet. */
+ /* tagged queuing modifier -- default is TSK_SIMPLE (0). */
+ clr_ptr = (uint32_t *)cmd_pkt + 2;
+ memset(clr_ptr, 0, REQUEST_ENTRY_SIZE - 8);
+ cmd_pkt->dseg_count = cpu_to_le16(tot_dsds);
+
+ /* Set NPORT-ID and LUN number*/
+ cmd_pkt->nport_handle = cpu_to_le16(sp->fcport->loop_id);
+ cmd_pkt->port_id[0] = sp->fcport->d_id.b.al_pa;
+ cmd_pkt->port_id[1] = sp->fcport->d_id.b.area;
+ cmd_pkt->port_id[2] = sp->fcport->d_id.b.domain;
+ cmd_pkt->vp_index = sp->fcport->vha->vp_idx;
+
+ /* Build IOCB segments */
+ if (qla24xx_build_scsi_type_6_iocbs(sp, cmd_pkt, tot_dsds))
+ goto queuing_error_fcp_cmnd;
+
+ int_to_scsilun(cmd->device->lun, &cmd_pkt->lun);
+ host_to_fcp_swap((uint8_t *)&cmd_pkt->lun, sizeof(cmd_pkt->lun));
+
+ /* build FCP_CMND IU */
+ memset(ctx->fcp_cmnd, 0, sizeof(struct fcp_cmnd));
+ int_to_scsilun(cmd->device->lun, &ctx->fcp_cmnd->lun);
+ ctx->fcp_cmnd->additional_cdb_len = additional_cdb_len;
+
+ if (cmd->sc_data_direction == DMA_TO_DEVICE)
+ ctx->fcp_cmnd->additional_cdb_len |= 1;
+ else if (cmd->sc_data_direction == DMA_FROM_DEVICE)
+ ctx->fcp_cmnd->additional_cdb_len |= 2;
+
+ /* Populate the FCP_PRIO. */
+ if (ha->flags.fcp_prio_enabled)
+ ctx->fcp_cmnd->task_attribute |=
+ sp->fcport->fcp_prio << 3;
+
+ memcpy(ctx->fcp_cmnd->cdb, cmd->cmnd, cmd->cmd_len);
+
+ fcp_dl = (uint32_t *)(ctx->fcp_cmnd->cdb + 16 +
+ additional_cdb_len);
+ *fcp_dl = htonl((uint32_t)scsi_bufflen(cmd));
+
+ cmd_pkt->fcp_cmnd_dseg_len = cpu_to_le16(ctx->fcp_cmnd_len);
+ cmd_pkt->fcp_cmnd_dseg_address[0] =
+ cpu_to_le32(LSD(ctx->fcp_cmnd_dma));
+ cmd_pkt->fcp_cmnd_dseg_address[1] =
+ cpu_to_le32(MSD(ctx->fcp_cmnd_dma));
+
+ sp->flags |= SRB_FCP_CMND_DMA_VALID;
+ cmd_pkt->byte_count = cpu_to_le32((uint32_t)scsi_bufflen(cmd));
+ /* Set total data segment count. */
+ cmd_pkt->entry_count = (uint8_t)req_cnt;
+ /* Specify response queue number where
+ * completion should happen
+ */
+ cmd_pkt->entry_status = (uint8_t) rsp->id;
+ } else {
+ struct cmd_type_7 *cmd_pkt;
+ req_cnt = qla24xx_calc_iocbs(vha, tot_dsds);
+ if (req->cnt < (req_cnt + 2)) {
+ cnt = (uint16_t)RD_REG_DWORD_RELAXED(
+ &reg->req_q_out[0]);
+ if (req->ring_index < cnt)
+ req->cnt = cnt - req->ring_index;
+ else
+ req->cnt = req->length -
+ (req->ring_index - cnt);
+ }
+ if (req->cnt < (req_cnt + 2))
+ goto queuing_error;
+
+ cmd_pkt = (struct cmd_type_7 *)req->ring_ptr;
+ cmd_pkt->handle = MAKE_HANDLE(req->id, handle);
+
+ /* Zero out remaining portion of packet. */
+ /* tagged queuing modifier -- default is TSK_SIMPLE (0).*/
+ clr_ptr = (uint32_t *)cmd_pkt + 2;
+ memset(clr_ptr, 0, REQUEST_ENTRY_SIZE - 8);
+ cmd_pkt->dseg_count = cpu_to_le16(tot_dsds);
+
+ /* Set NPORT-ID and LUN number*/
+ cmd_pkt->nport_handle = cpu_to_le16(sp->fcport->loop_id);
+ cmd_pkt->port_id[0] = sp->fcport->d_id.b.al_pa;
+ cmd_pkt->port_id[1] = sp->fcport->d_id.b.area;
+ cmd_pkt->port_id[2] = sp->fcport->d_id.b.domain;
+ cmd_pkt->vp_index = sp->fcport->vha->vp_idx;
+
+ int_to_scsilun(cmd->device->lun, &cmd_pkt->lun);
+ host_to_fcp_swap((uint8_t *)&cmd_pkt->lun,
+ sizeof(cmd_pkt->lun));
+
+ /* Populate the FCP_PRIO. */
+ if (ha->flags.fcp_prio_enabled)
+ cmd_pkt->task |= sp->fcport->fcp_prio << 3;
+
+ /* Load SCSI command packet. */
+ memcpy(cmd_pkt->fcp_cdb, cmd->cmnd, cmd->cmd_len);
+ host_to_fcp_swap(cmd_pkt->fcp_cdb, sizeof(cmd_pkt->fcp_cdb));
+
+ cmd_pkt->byte_count = cpu_to_le32((uint32_t)scsi_bufflen(cmd));
+
+ /* Build IOCB segments */
+ qla24xx_build_scsi_iocbs(sp, cmd_pkt, tot_dsds);
+
+ /* Set total data segment count. */
+ cmd_pkt->entry_count = (uint8_t)req_cnt;
+ /* Specify response queue number where
+ * completion should happen.
+ */
+ cmd_pkt->entry_status = (uint8_t) rsp->id;
+
+ }
+ /* Build command packet. */
+ req->current_outstanding_cmd = handle;
+ req->outstanding_cmds[handle] = sp;
+ sp->handle = handle;
+ cmd->host_scribble = (unsigned char *)(unsigned long)handle;
+ req->cnt -= req_cnt;
+ wmb();
+
+ /* Adjust ring index. */
+ req->ring_index++;
+ if (req->ring_index == req->length) {
+ req->ring_index = 0;
+ req->ring_ptr = req->ring;
+ } else
+ req->ring_ptr++;
+
+ sp->flags |= SRB_DMA_VALID;
+
+ /* Set chip new ring index. */
+ /* write, read and verify logic */
+ dbval = dbval | (req->id << 8) | (req->ring_index << 16);
+ if (ql2xdbwr)
+ qla82xx_wr_32(ha, ha->nxdb_wr_ptr, dbval);
+ else {
+ WRT_REG_DWORD(
+ (unsigned long __iomem *)ha->nxdb_wr_ptr,
+ dbval);
+ wmb();
+ while (RD_REG_DWORD((void __iomem *)ha->nxdb_rd_ptr) != dbval) {
+ WRT_REG_DWORD(
+ (unsigned long __iomem *)ha->nxdb_wr_ptr,
+ dbval);
+ wmb();
+ }
+ }
+
+ /* Manage unprocessed RIO/ZIO commands in response queue. */
+ if (vha->flags.process_response_queue &&
+ rsp->ring_ptr->signature != RESPONSE_PROCESSED)
+ qla24xx_process_response_queue(vha, rsp);
+
+ spin_unlock_irqrestore(&ha->hardware_lock, flags);
+ return QLA_SUCCESS;
+
+queuing_error_fcp_cmnd:
+ dma_pool_free(ha->fcp_cmnd_dma_pool, ctx->fcp_cmnd, ctx->fcp_cmnd_dma);
+queuing_error:
+ if (tot_dsds)
+ scsi_dma_unmap(cmd);
+
+ if (sp->u.scmd.ctx) {
+ mempool_free(sp->u.scmd.ctx, ha->ctx_mempool);
+ sp->u.scmd.ctx = NULL;
+ }
+ spin_unlock_irqrestore(&ha->hardware_lock, flags);
+
+ return QLA_FUNCTION_FAILED;
+}
+
+static void
+qla24xx_abort_iocb(srb_t *sp, struct abort_entry_24xx *abt_iocb)
+{
+ struct srb_iocb *aio = &sp->u.iocb_cmd;
+ scsi_qla_host_t *vha = sp->fcport->vha;
+ struct req_que *req = vha->req;
+
+ memset(abt_iocb, 0, sizeof(struct abort_entry_24xx));
+ abt_iocb->entry_type = ABORT_IOCB_TYPE;
+ abt_iocb->entry_count = 1;
+ abt_iocb->handle = cpu_to_le32(MAKE_HANDLE(req->id, sp->handle));
+ abt_iocb->nport_handle = cpu_to_le16(sp->fcport->loop_id);
+ abt_iocb->handle_to_abort =
+ cpu_to_le32(MAKE_HANDLE(req->id, aio->u.abt.cmd_hndl));
+ abt_iocb->port_id[0] = sp->fcport->d_id.b.al_pa;
+ abt_iocb->port_id[1] = sp->fcport->d_id.b.area;
+ abt_iocb->port_id[2] = sp->fcport->d_id.b.domain;
+ abt_iocb->vp_index = vha->vp_idx;
+ abt_iocb->req_que_no = cpu_to_le16(req->id);
+ /* Send the command to the firmware */
+ wmb();
+}
+
+int
+qla2x00_start_sp(srb_t *sp)
+{
+ int rval;
+ struct qla_hw_data *ha = sp->fcport->vha->hw;
+ void *pkt;
+ unsigned long flags;
+
+ rval = QLA_FUNCTION_FAILED;
+ spin_lock_irqsave(&ha->hardware_lock, flags);
+ pkt = qla2x00_alloc_iocbs(sp->fcport->vha, sp);
+ if (!pkt) {
+ ql_log(ql_log_warn, sp->fcport->vha, 0x700c,
+ "qla2x00_alloc_iocbs failed.\n");
+ goto done;
+ }
+
+ rval = QLA_SUCCESS;
+ switch (sp->type) {
+ case SRB_LOGIN_CMD:
+ IS_FWI2_CAPABLE(ha) ?
+ qla24xx_login_iocb(sp, pkt) :
+ qla2x00_login_iocb(sp, pkt);
+ break;
+ case SRB_LOGOUT_CMD:
+ IS_FWI2_CAPABLE(ha) ?
+ qla24xx_logout_iocb(sp, pkt) :
+ qla2x00_logout_iocb(sp, pkt);
+ break;
+ case SRB_ELS_CMD_RPT:
+ case SRB_ELS_CMD_HST:
+ qla24xx_els_iocb(sp, pkt);
+ break;
+ case SRB_CT_CMD:
+ IS_FWI2_CAPABLE(ha) ?
+ qla24xx_ct_iocb(sp, pkt) :
+ qla2x00_ct_iocb(sp, pkt);
+ break;
+ case SRB_ADISC_CMD:
+ IS_FWI2_CAPABLE(ha) ?
+ qla24xx_adisc_iocb(sp, pkt) :
+ qla2x00_adisc_iocb(sp, pkt);
+ break;
+ case SRB_TM_CMD:
+ IS_QLAFX00(ha) ?
+ qlafx00_tm_iocb(sp, pkt) :
+ qla24xx_tm_iocb(sp, pkt);
+ break;
+ case SRB_FXIOCB_DCMD:
+ case SRB_FXIOCB_BCMD:
+ qlafx00_fxdisc_iocb(sp, pkt);
+ break;
+ case SRB_ABT_CMD:
+ IS_QLAFX00(ha) ?
+ qlafx00_abort_iocb(sp, pkt) :
+ qla24xx_abort_iocb(sp, pkt);
+ break;
+ default:
+ break;
+ }
+
+ wmb();
+ qla2x00_start_iocbs(sp->fcport->vha, ha->req_q_map[0]);
+done:
+ spin_unlock_irqrestore(&ha->hardware_lock, flags);
+ return rval;
+}
+
+static void
+qla25xx_build_bidir_iocb(srb_t *sp, struct scsi_qla_host *vha,
+ struct cmd_bidir *cmd_pkt, uint32_t tot_dsds)
+{
+ uint16_t avail_dsds;
+ uint32_t *cur_dsd;
+ uint32_t req_data_len = 0;
+ uint32_t rsp_data_len = 0;
+ struct scatterlist *sg;
+ int index;
+ int entry_count = 1;
+ struct fc_bsg_job *bsg_job = sp->u.bsg_job;
+
+ /*Update entry type to indicate bidir command */
+ *((uint32_t *)(&cmd_pkt->entry_type)) =
+ __constant_cpu_to_le32(COMMAND_BIDIRECTIONAL);
+
+ /* Set the transfer direction, in this set both flags
+ * Also set the BD_WRAP_BACK flag, firmware will take care
+ * assigning DID=SID for outgoing pkts.
+ */
+ cmd_pkt->wr_dseg_count = cpu_to_le16(bsg_job->request_payload.sg_cnt);
+ cmd_pkt->rd_dseg_count = cpu_to_le16(bsg_job->reply_payload.sg_cnt);
+ cmd_pkt->control_flags =
+ __constant_cpu_to_le16(BD_WRITE_DATA | BD_READ_DATA |
+ BD_WRAP_BACK);
+
+ req_data_len = rsp_data_len = bsg_job->request_payload.payload_len;
+ cmd_pkt->wr_byte_count = cpu_to_le32(req_data_len);
+ cmd_pkt->rd_byte_count = cpu_to_le32(rsp_data_len);
+ cmd_pkt->timeout = cpu_to_le16(qla2x00_get_async_timeout(vha) + 2);
+
+ vha->bidi_stats.transfer_bytes += req_data_len;
+ vha->bidi_stats.io_count++;
+
+ vha->qla_stats.output_bytes += req_data_len;
+ vha->qla_stats.output_requests++;
+
+ /* Only one dsd is available for bidirectional IOCB, remaining dsds
+ * are bundled in continuation iocb
+ */
+ avail_dsds = 1;
+ cur_dsd = (uint32_t *)&cmd_pkt->fcp_data_dseg_address;
+
+ index = 0;
+
+ for_each_sg(bsg_job->request_payload.sg_list, sg,
+ bsg_job->request_payload.sg_cnt, index) {
+ dma_addr_t sle_dma;
+ cont_a64_entry_t *cont_pkt;
+
+ /* Allocate additional continuation packets */
+ if (avail_dsds == 0) {
+ /* Continuation type 1 IOCB can accomodate
+ * 5 DSDS
+ */
+ cont_pkt = qla2x00_prep_cont_type1_iocb(vha, vha->req);
+ cur_dsd = (uint32_t *) cont_pkt->dseg_0_address;
+ avail_dsds = 5;
+ entry_count++;
+ }
+ sle_dma = sg_dma_address(sg);
+ *cur_dsd++ = cpu_to_le32(LSD(sle_dma));
+ *cur_dsd++ = cpu_to_le32(MSD(sle_dma));
+ *cur_dsd++ = cpu_to_le32(sg_dma_len(sg));
+ avail_dsds--;
+ }
+ /* For read request DSD will always goes to continuation IOCB
+ * and follow the write DSD. If there is room on the current IOCB
+ * then it is added to that IOCB else new continuation IOCB is
+ * allocated.
+ */
+ for_each_sg(bsg_job->reply_payload.sg_list, sg,
+ bsg_job->reply_payload.sg_cnt, index) {
+ dma_addr_t sle_dma;
+ cont_a64_entry_t *cont_pkt;
+
+ /* Allocate additional continuation packets */
+ if (avail_dsds == 0) {
+ /* Continuation type 1 IOCB can accomodate
+ * 5 DSDS
+ */
+ cont_pkt = qla2x00_prep_cont_type1_iocb(vha, vha->req);
+ cur_dsd = (uint32_t *) cont_pkt->dseg_0_address;
+ avail_dsds = 5;
+ entry_count++;
+ }
+ sle_dma = sg_dma_address(sg);
+ *cur_dsd++ = cpu_to_le32(LSD(sle_dma));
+ *cur_dsd++ = cpu_to_le32(MSD(sle_dma));
+ *cur_dsd++ = cpu_to_le32(sg_dma_len(sg));
+ avail_dsds--;
+ }
+ /* This value should be same as number of IOCB required for this cmd */
+ cmd_pkt->entry_count = entry_count;
+}
+
+int
+qla2x00_start_bidir(srb_t *sp, struct scsi_qla_host *vha, uint32_t tot_dsds)
+{
+
+ struct qla_hw_data *ha = vha->hw;
+ unsigned long flags;
+ uint32_t handle;
+ uint32_t index;
+ uint16_t req_cnt;
+ uint16_t cnt;
+ uint32_t *clr_ptr;
+ struct cmd_bidir *cmd_pkt = NULL;
+ struct rsp_que *rsp;
+ struct req_que *req;
+ int rval = EXT_STATUS_OK;
+
+ rval = QLA_SUCCESS;
+
+ rsp = ha->rsp_q_map[0];
+ req = vha->req;
+
+ /* Send marker if required */
+ if (vha->marker_needed != 0) {
+ if (qla2x00_marker(vha, req,
+ rsp, 0, 0, MK_SYNC_ALL) != QLA_SUCCESS)
+ return EXT_STATUS_MAILBOX;
+ vha->marker_needed = 0;
+ }
+
+ /* Acquire ring specific lock */
+ spin_lock_irqsave(&ha->hardware_lock, flags);
+
+ /* Check for room in outstanding command list. */
+ handle = req->current_outstanding_cmd;
+ for (index = 1; index < req->num_outstanding_cmds; index++) {
+ handle++;
+ if (handle == req->num_outstanding_cmds)
+ handle = 1;
+ if (!req->outstanding_cmds[handle])
+ break;
+ }
+
+ if (index == req->num_outstanding_cmds) {
+ rval = EXT_STATUS_BUSY;
+ goto queuing_error;
+ }
+
+ /* Calculate number of IOCB required */
+ req_cnt = qla24xx_calc_iocbs(vha, tot_dsds);
+
+ /* Check for room on request queue. */
+ if (req->cnt < req_cnt + 2) {
+ cnt = IS_SHADOW_REG_CAPABLE(ha) ? *req->out_ptr :
+ RD_REG_DWORD_RELAXED(req->req_q_out);
+ if (req->ring_index < cnt)
+ req->cnt = cnt - req->ring_index;
+ else
+ req->cnt = req->length -
+ (req->ring_index - cnt);
+ }
+ if (req->cnt < req_cnt + 2) {
+ rval = EXT_STATUS_BUSY;
+ goto queuing_error;
+ }
+
+ cmd_pkt = (struct cmd_bidir *)req->ring_ptr;
+ cmd_pkt->handle = MAKE_HANDLE(req->id, handle);
+
+ /* Zero out remaining portion of packet. */
+ /* tagged queuing modifier -- default is TSK_SIMPLE (0).*/
+ clr_ptr = (uint32_t *)cmd_pkt + 2;
+ memset(clr_ptr, 0, REQUEST_ENTRY_SIZE - 8);
+
+ /* Set NPORT-ID (of vha)*/
+ cmd_pkt->nport_handle = cpu_to_le16(vha->self_login_loop_id);
+ cmd_pkt->port_id[0] = vha->d_id.b.al_pa;
+ cmd_pkt->port_id[1] = vha->d_id.b.area;
+ cmd_pkt->port_id[2] = vha->d_id.b.domain;
+
+ qla25xx_build_bidir_iocb(sp, vha, cmd_pkt, tot_dsds);
+ cmd_pkt->entry_status = (uint8_t) rsp->id;
+ /* Build command packet. */
+ req->current_outstanding_cmd = handle;
+ req->outstanding_cmds[handle] = sp;
+ sp->handle = handle;
+ req->cnt -= req_cnt;
+
+ /* Send the command to the firmware */
+ wmb();
+ qla2x00_start_iocbs(vha, req);
+queuing_error:
+ spin_unlock_irqrestore(&ha->hardware_lock, flags);
+ return rval;
+}
diff --git a/drivers/scsi/qla2xxx/qla_isr.c b/drivers/scsi/qla2xxx/qla_isr.c
new file mode 100644
index 000000000..6dc14cd78
--- /dev/null
+++ b/drivers/scsi/qla2xxx/qla_isr.c
@@ -0,0 +1,3210 @@
+/*
+ * QLogic Fibre Channel HBA Driver
+ * Copyright (c) 2003-2014 QLogic Corporation
+ *
+ * See LICENSE.qla2xxx for copyright and licensing details.
+ */
+#include "qla_def.h"
+#include "qla_target.h"
+
+#include <linux/delay.h>
+#include <linux/slab.h>
+#include <scsi/scsi_tcq.h>
+#include <scsi/scsi_bsg_fc.h>
+#include <scsi/scsi_eh.h>
+
+static void qla2x00_mbx_completion(scsi_qla_host_t *, uint16_t);
+static void qla2x00_status_entry(scsi_qla_host_t *, struct rsp_que *, void *);
+static void qla2x00_status_cont_entry(struct rsp_que *, sts_cont_entry_t *);
+static void qla2x00_error_entry(scsi_qla_host_t *, struct rsp_que *,
+ sts_entry_t *);
+
+/**
+ * qla2100_intr_handler() - Process interrupts for the ISP2100 and ISP2200.
+ * @irq:
+ * @dev_id: SCSI driver HA context
+ *
+ * Called by system whenever the host adapter generates an interrupt.
+ *
+ * Returns handled flag.
+ */
+irqreturn_t
+qla2100_intr_handler(int irq, void *dev_id)
+{
+ scsi_qla_host_t *vha;
+ struct qla_hw_data *ha;
+ struct device_reg_2xxx __iomem *reg;
+ int status;
+ unsigned long iter;
+ uint16_t hccr;
+ uint16_t mb[4];
+ struct rsp_que *rsp;
+ unsigned long flags;
+
+ rsp = (struct rsp_que *) dev_id;
+ if (!rsp) {
+ ql_log(ql_log_info, NULL, 0x505d,
+ "%s: NULL response queue pointer.\n", __func__);
+ return (IRQ_NONE);
+ }
+
+ ha = rsp->hw;
+ reg = &ha->iobase->isp;
+ status = 0;
+
+ spin_lock_irqsave(&ha->hardware_lock, flags);
+ vha = pci_get_drvdata(ha->pdev);
+ for (iter = 50; iter--; ) {
+ hccr = RD_REG_WORD(&reg->hccr);
+ if (qla2x00_check_reg16_for_disconnect(vha, hccr))
+ break;
+ if (hccr & HCCR_RISC_PAUSE) {
+ if (pci_channel_offline(ha->pdev))
+ break;
+
+ /*
+ * Issue a "HARD" reset in order for the RISC interrupt
+ * bit to be cleared. Schedule a big hammer to get
+ * out of the RISC PAUSED state.
+ */
+ WRT_REG_WORD(&reg->hccr, HCCR_RESET_RISC);
+ RD_REG_WORD(&reg->hccr);
+
+ ha->isp_ops->fw_dump(vha, 1);
+ set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
+ break;
+ } else if ((RD_REG_WORD(&reg->istatus) & ISR_RISC_INT) == 0)
+ break;
+
+ if (RD_REG_WORD(&reg->semaphore) & BIT_0) {
+ WRT_REG_WORD(&reg->hccr, HCCR_CLR_RISC_INT);
+ RD_REG_WORD(&reg->hccr);
+
+ /* Get mailbox data. */
+ mb[0] = RD_MAILBOX_REG(ha, reg, 0);
+ if (mb[0] > 0x3fff && mb[0] < 0x8000) {
+ qla2x00_mbx_completion(vha, mb[0]);
+ status |= MBX_INTERRUPT;
+ } else if (mb[0] > 0x7fff && mb[0] < 0xc000) {
+ mb[1] = RD_MAILBOX_REG(ha, reg, 1);
+ mb[2] = RD_MAILBOX_REG(ha, reg, 2);
+ mb[3] = RD_MAILBOX_REG(ha, reg, 3);
+ qla2x00_async_event(vha, rsp, mb);
+ } else {
+ /*EMPTY*/
+ ql_dbg(ql_dbg_async, vha, 0x5025,
+ "Unrecognized interrupt type (%d).\n",
+ mb[0]);
+ }
+ /* Release mailbox registers. */
+ WRT_REG_WORD(&reg->semaphore, 0);
+ RD_REG_WORD(&reg->semaphore);
+ } else {
+ qla2x00_process_response_queue(rsp);
+
+ WRT_REG_WORD(&reg->hccr, HCCR_CLR_RISC_INT);
+ RD_REG_WORD(&reg->hccr);
+ }
+ }
+ qla2x00_handle_mbx_completion(ha, status);
+ spin_unlock_irqrestore(&ha->hardware_lock, flags);
+
+ return (IRQ_HANDLED);
+}
+
+bool
+qla2x00_check_reg32_for_disconnect(scsi_qla_host_t *vha, uint32_t reg)
+{
+ /* Check for PCI disconnection */
+ if (reg == 0xffffffff) {
+ if (!test_and_set_bit(PFLG_DISCONNECTED, &vha->pci_flags) &&
+ !test_bit(PFLG_DRIVER_REMOVING, &vha->pci_flags) &&
+ !test_bit(PFLG_DRIVER_PROBING, &vha->pci_flags)) {
+ /*
+ * Schedule this (only once) on the default system
+ * workqueue so that all the adapter workqueues and the
+ * DPC thread can be shutdown cleanly.
+ */
+ schedule_work(&vha->hw->board_disable);
+ }
+ return true;
+ } else
+ return false;
+}
+
+bool
+qla2x00_check_reg16_for_disconnect(scsi_qla_host_t *vha, uint16_t reg)
+{
+ return qla2x00_check_reg32_for_disconnect(vha, 0xffff0000 | reg);
+}
+
+/**
+ * qla2300_intr_handler() - Process interrupts for the ISP23xx and ISP63xx.
+ * @irq:
+ * @dev_id: SCSI driver HA context
+ *
+ * Called by system whenever the host adapter generates an interrupt.
+ *
+ * Returns handled flag.
+ */
+irqreturn_t
+qla2300_intr_handler(int irq, void *dev_id)
+{
+ scsi_qla_host_t *vha;
+ struct device_reg_2xxx __iomem *reg;
+ int status;
+ unsigned long iter;
+ uint32_t stat;
+ uint16_t hccr;
+ uint16_t mb[4];
+ struct rsp_que *rsp;
+ struct qla_hw_data *ha;
+ unsigned long flags;
+
+ rsp = (struct rsp_que *) dev_id;
+ if (!rsp) {
+ ql_log(ql_log_info, NULL, 0x5058,
+ "%s: NULL response queue pointer.\n", __func__);
+ return (IRQ_NONE);
+ }
+
+ ha = rsp->hw;
+ reg = &ha->iobase->isp;
+ status = 0;
+
+ spin_lock_irqsave(&ha->hardware_lock, flags);
+ vha = pci_get_drvdata(ha->pdev);
+ for (iter = 50; iter--; ) {
+ stat = RD_REG_DWORD(&reg->u.isp2300.host_status);
+ if (qla2x00_check_reg32_for_disconnect(vha, stat))
+ break;
+ if (stat & HSR_RISC_PAUSED) {
+ if (unlikely(pci_channel_offline(ha->pdev)))
+ break;
+
+ hccr = RD_REG_WORD(&reg->hccr);
+
+ if (hccr & (BIT_15 | BIT_13 | BIT_11 | BIT_8))
+ ql_log(ql_log_warn, vha, 0x5026,
+ "Parity error -- HCCR=%x, Dumping "
+ "firmware.\n", hccr);
+ else
+ ql_log(ql_log_warn, vha, 0x5027,
+ "RISC paused -- HCCR=%x, Dumping "
+ "firmware.\n", hccr);
+
+ /*
+ * Issue a "HARD" reset in order for the RISC
+ * interrupt bit to be cleared. Schedule a big
+ * hammer to get out of the RISC PAUSED state.
+ */
+ WRT_REG_WORD(&reg->hccr, HCCR_RESET_RISC);
+ RD_REG_WORD(&reg->hccr);
+
+ ha->isp_ops->fw_dump(vha, 1);
+ set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
+ break;
+ } else if ((stat & HSR_RISC_INT) == 0)
+ break;
+
+ switch (stat & 0xff) {
+ case 0x1:
+ case 0x2:
+ case 0x10:
+ case 0x11:
+ qla2x00_mbx_completion(vha, MSW(stat));
+ status |= MBX_INTERRUPT;
+
+ /* Release mailbox registers. */
+ WRT_REG_WORD(&reg->semaphore, 0);
+ break;
+ case 0x12:
+ mb[0] = MSW(stat);
+ mb[1] = RD_MAILBOX_REG(ha, reg, 1);
+ mb[2] = RD_MAILBOX_REG(ha, reg, 2);
+ mb[3] = RD_MAILBOX_REG(ha, reg, 3);
+ qla2x00_async_event(vha, rsp, mb);
+ break;
+ case 0x13:
+ qla2x00_process_response_queue(rsp);
+ break;
+ case 0x15:
+ mb[0] = MBA_CMPLT_1_16BIT;
+ mb[1] = MSW(stat);
+ qla2x00_async_event(vha, rsp, mb);
+ break;
+ case 0x16:
+ mb[0] = MBA_SCSI_COMPLETION;
+ mb[1] = MSW(stat);
+ mb[2] = RD_MAILBOX_REG(ha, reg, 2);
+ qla2x00_async_event(vha, rsp, mb);
+ break;
+ default:
+ ql_dbg(ql_dbg_async, vha, 0x5028,
+ "Unrecognized interrupt type (%d).\n", stat & 0xff);
+ break;
+ }
+ WRT_REG_WORD(&reg->hccr, HCCR_CLR_RISC_INT);
+ RD_REG_WORD_RELAXED(&reg->hccr);
+ }
+ qla2x00_handle_mbx_completion(ha, status);
+ spin_unlock_irqrestore(&ha->hardware_lock, flags);
+
+ return (IRQ_HANDLED);
+}
+
+/**
+ * qla2x00_mbx_completion() - Process mailbox command completions.
+ * @ha: SCSI driver HA context
+ * @mb0: Mailbox0 register
+ */
+static void
+qla2x00_mbx_completion(scsi_qla_host_t *vha, uint16_t mb0)
+{
+ uint16_t cnt;
+ uint32_t mboxes;
+ uint16_t __iomem *wptr;
+ struct qla_hw_data *ha = vha->hw;
+ struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
+
+ /* Read all mbox registers? */
+ mboxes = (1 << ha->mbx_count) - 1;
+ if (!ha->mcp)
+ ql_dbg(ql_dbg_async, vha, 0x5001, "MBX pointer ERROR.\n");
+ else
+ mboxes = ha->mcp->in_mb;
+
+ /* Load return mailbox registers. */
+ ha->flags.mbox_int = 1;
+ ha->mailbox_out[0] = mb0;
+ mboxes >>= 1;
+ wptr = (uint16_t __iomem *)MAILBOX_REG(ha, reg, 1);
+
+ for (cnt = 1; cnt < ha->mbx_count; cnt++) {
+ if (IS_QLA2200(ha) && cnt == 8)
+ wptr = (uint16_t __iomem *)MAILBOX_REG(ha, reg, 8);
+ if ((cnt == 4 || cnt == 5) && (mboxes & BIT_0))
+ ha->mailbox_out[cnt] = qla2x00_debounce_register(wptr);
+ else if (mboxes & BIT_0)
+ ha->mailbox_out[cnt] = RD_REG_WORD(wptr);
+
+ wptr++;
+ mboxes >>= 1;
+ }
+}
+
+static void
+qla81xx_idc_event(scsi_qla_host_t *vha, uint16_t aen, uint16_t descr)
+{
+ static char *event[] =
+ { "Complete", "Request Notification", "Time Extension" };
+ int rval;
+ struct device_reg_24xx __iomem *reg24 = &vha->hw->iobase->isp24;
+ struct device_reg_82xx __iomem *reg82 = &vha->hw->iobase->isp82;
+ uint16_t __iomem *wptr;
+ uint16_t cnt, timeout, mb[QLA_IDC_ACK_REGS];
+
+ /* Seed data -- mailbox1 -> mailbox7. */
+ if (IS_QLA81XX(vha->hw) || IS_QLA83XX(vha->hw))
+ wptr = (uint16_t __iomem *)&reg24->mailbox1;
+ else if (IS_QLA8044(vha->hw))
+ wptr = (uint16_t __iomem *)&reg82->mailbox_out[1];
+ else
+ return;
+
+ for (cnt = 0; cnt < QLA_IDC_ACK_REGS; cnt++, wptr++)
+ mb[cnt] = RD_REG_WORD(wptr);
+
+ ql_dbg(ql_dbg_async, vha, 0x5021,
+ "Inter-Driver Communication %s -- "
+ "%04x %04x %04x %04x %04x %04x %04x.\n",
+ event[aen & 0xff], mb[0], mb[1], mb[2], mb[3],
+ mb[4], mb[5], mb[6]);
+ switch (aen) {
+ /* Handle IDC Error completion case. */
+ case MBA_IDC_COMPLETE:
+ if (mb[1] >> 15) {
+ vha->hw->flags.idc_compl_status = 1;
+ if (vha->hw->notify_dcbx_comp && !vha->vp_idx)
+ complete(&vha->hw->dcbx_comp);
+ }
+ break;
+
+ case MBA_IDC_NOTIFY:
+ /* Acknowledgement needed? [Notify && non-zero timeout]. */
+ timeout = (descr >> 8) & 0xf;
+ ql_dbg(ql_dbg_async, vha, 0x5022,
+ "%lu Inter-Driver Communication %s -- ACK timeout=%d.\n",
+ vha->host_no, event[aen & 0xff], timeout);
+
+ if (!timeout)
+ return;
+ rval = qla2x00_post_idc_ack_work(vha, mb);
+ if (rval != QLA_SUCCESS)
+ ql_log(ql_log_warn, vha, 0x5023,
+ "IDC failed to post ACK.\n");
+ break;
+ case MBA_IDC_TIME_EXT:
+ vha->hw->idc_extend_tmo = descr;
+ ql_dbg(ql_dbg_async, vha, 0x5087,
+ "%lu Inter-Driver Communication %s -- "
+ "Extend timeout by=%d.\n",
+ vha->host_no, event[aen & 0xff], vha->hw->idc_extend_tmo);
+ break;
+ }
+}
+
+#define LS_UNKNOWN 2
+const char *
+qla2x00_get_link_speed_str(struct qla_hw_data *ha, uint16_t speed)
+{
+ static const char *const link_speeds[] = {
+ "1", "2", "?", "4", "8", "16", "32", "10"
+ };
+#define QLA_LAST_SPEED 7
+
+ if (IS_QLA2100(ha) || IS_QLA2200(ha))
+ return link_speeds[0];
+ else if (speed == 0x13)
+ return link_speeds[QLA_LAST_SPEED];
+ else if (speed < QLA_LAST_SPEED)
+ return link_speeds[speed];
+ else
+ return link_speeds[LS_UNKNOWN];
+}
+
+static void
+qla83xx_handle_8200_aen(scsi_qla_host_t *vha, uint16_t *mb)
+{
+ struct qla_hw_data *ha = vha->hw;
+
+ /*
+ * 8200 AEN Interpretation:
+ * mb[0] = AEN code
+ * mb[1] = AEN Reason code
+ * mb[2] = LSW of Peg-Halt Status-1 Register
+ * mb[6] = MSW of Peg-Halt Status-1 Register
+ * mb[3] = LSW of Peg-Halt Status-2 register
+ * mb[7] = MSW of Peg-Halt Status-2 register
+ * mb[4] = IDC Device-State Register value
+ * mb[5] = IDC Driver-Presence Register value
+ */
+ ql_dbg(ql_dbg_async, vha, 0x506b, "AEN Code: mb[0] = 0x%x AEN reason: "
+ "mb[1] = 0x%x PH-status1: mb[2] = 0x%x PH-status1: mb[6] = 0x%x.\n",
+ mb[0], mb[1], mb[2], mb[6]);
+ ql_dbg(ql_dbg_async, vha, 0x506c, "PH-status2: mb[3] = 0x%x "
+ "PH-status2: mb[7] = 0x%x Device-State: mb[4] = 0x%x "
+ "Drv-Presence: mb[5] = 0x%x.\n", mb[3], mb[7], mb[4], mb[5]);
+
+ if (mb[1] & (IDC_PEG_HALT_STATUS_CHANGE | IDC_NIC_FW_REPORTED_FAILURE |
+ IDC_HEARTBEAT_FAILURE)) {
+ ha->flags.nic_core_hung = 1;
+ ql_log(ql_log_warn, vha, 0x5060,
+ "83XX: F/W Error Reported: Check if reset required.\n");
+
+ if (mb[1] & IDC_PEG_HALT_STATUS_CHANGE) {
+ uint32_t protocol_engine_id, fw_err_code, err_level;
+
+ /*
+ * IDC_PEG_HALT_STATUS_CHANGE interpretation:
+ * - PEG-Halt Status-1 Register:
+ * (LSW = mb[2], MSW = mb[6])
+ * Bits 0-7 = protocol-engine ID
+ * Bits 8-28 = f/w error code
+ * Bits 29-31 = Error-level
+ * Error-level 0x1 = Non-Fatal error
+ * Error-level 0x2 = Recoverable Fatal error
+ * Error-level 0x4 = UnRecoverable Fatal error
+ * - PEG-Halt Status-2 Register:
+ * (LSW = mb[3], MSW = mb[7])
+ */
+ protocol_engine_id = (mb[2] & 0xff);
+ fw_err_code = (((mb[2] & 0xff00) >> 8) |
+ ((mb[6] & 0x1fff) << 8));
+ err_level = ((mb[6] & 0xe000) >> 13);
+ ql_log(ql_log_warn, vha, 0x5061, "PegHalt Status-1 "
+ "Register: protocol_engine_id=0x%x "
+ "fw_err_code=0x%x err_level=0x%x.\n",
+ protocol_engine_id, fw_err_code, err_level);
+ ql_log(ql_log_warn, vha, 0x5062, "PegHalt Status-2 "
+ "Register: 0x%x%x.\n", mb[7], mb[3]);
+ if (err_level == ERR_LEVEL_NON_FATAL) {
+ ql_log(ql_log_warn, vha, 0x5063,
+ "Not a fatal error, f/w has recovered "
+ "iteself.\n");
+ } else if (err_level == ERR_LEVEL_RECOVERABLE_FATAL) {
+ ql_log(ql_log_fatal, vha, 0x5064,
+ "Recoverable Fatal error: Chip reset "
+ "required.\n");
+ qla83xx_schedule_work(vha,
+ QLA83XX_NIC_CORE_RESET);
+ } else if (err_level == ERR_LEVEL_UNRECOVERABLE_FATAL) {
+ ql_log(ql_log_fatal, vha, 0x5065,
+ "Unrecoverable Fatal error: Set FAILED "
+ "state, reboot required.\n");
+ qla83xx_schedule_work(vha,
+ QLA83XX_NIC_CORE_UNRECOVERABLE);
+ }
+ }
+
+ if (mb[1] & IDC_NIC_FW_REPORTED_FAILURE) {
+ uint16_t peg_fw_state, nw_interface_link_up;
+ uint16_t nw_interface_signal_detect, sfp_status;
+ uint16_t htbt_counter, htbt_monitor_enable;
+ uint16_t sfp_additonal_info, sfp_multirate;
+ uint16_t sfp_tx_fault, link_speed, dcbx_status;
+
+ /*
+ * IDC_NIC_FW_REPORTED_FAILURE interpretation:
+ * - PEG-to-FC Status Register:
+ * (LSW = mb[2], MSW = mb[6])
+ * Bits 0-7 = Peg-Firmware state
+ * Bit 8 = N/W Interface Link-up
+ * Bit 9 = N/W Interface signal detected
+ * Bits 10-11 = SFP Status
+ * SFP Status 0x0 = SFP+ transceiver not expected
+ * SFP Status 0x1 = SFP+ transceiver not present
+ * SFP Status 0x2 = SFP+ transceiver invalid
+ * SFP Status 0x3 = SFP+ transceiver present and
+ * valid
+ * Bits 12-14 = Heartbeat Counter
+ * Bit 15 = Heartbeat Monitor Enable
+ * Bits 16-17 = SFP Additional Info
+ * SFP info 0x0 = Unregocnized transceiver for
+ * Ethernet
+ * SFP info 0x1 = SFP+ brand validation failed
+ * SFP info 0x2 = SFP+ speed validation failed
+ * SFP info 0x3 = SFP+ access error
+ * Bit 18 = SFP Multirate
+ * Bit 19 = SFP Tx Fault
+ * Bits 20-22 = Link Speed
+ * Bits 23-27 = Reserved
+ * Bits 28-30 = DCBX Status
+ * DCBX Status 0x0 = DCBX Disabled
+ * DCBX Status 0x1 = DCBX Enabled
+ * DCBX Status 0x2 = DCBX Exchange error
+ * Bit 31 = Reserved
+ */
+ peg_fw_state = (mb[2] & 0x00ff);
+ nw_interface_link_up = ((mb[2] & 0x0100) >> 8);
+ nw_interface_signal_detect = ((mb[2] & 0x0200) >> 9);
+ sfp_status = ((mb[2] & 0x0c00) >> 10);
+ htbt_counter = ((mb[2] & 0x7000) >> 12);
+ htbt_monitor_enable = ((mb[2] & 0x8000) >> 15);
+ sfp_additonal_info = (mb[6] & 0x0003);
+ sfp_multirate = ((mb[6] & 0x0004) >> 2);
+ sfp_tx_fault = ((mb[6] & 0x0008) >> 3);
+ link_speed = ((mb[6] & 0x0070) >> 4);
+ dcbx_status = ((mb[6] & 0x7000) >> 12);
+
+ ql_log(ql_log_warn, vha, 0x5066,
+ "Peg-to-Fc Status Register:\n"
+ "peg_fw_state=0x%x, nw_interface_link_up=0x%x, "
+ "nw_interface_signal_detect=0x%x"
+ "\nsfp_statis=0x%x.\n ", peg_fw_state,
+ nw_interface_link_up, nw_interface_signal_detect,
+ sfp_status);
+ ql_log(ql_log_warn, vha, 0x5067,
+ "htbt_counter=0x%x, htbt_monitor_enable=0x%x, "
+ "sfp_additonal_info=0x%x, sfp_multirate=0x%x.\n ",
+ htbt_counter, htbt_monitor_enable,
+ sfp_additonal_info, sfp_multirate);
+ ql_log(ql_log_warn, vha, 0x5068,
+ "sfp_tx_fault=0x%x, link_state=0x%x, "
+ "dcbx_status=0x%x.\n", sfp_tx_fault, link_speed,
+ dcbx_status);
+
+ qla83xx_schedule_work(vha, QLA83XX_NIC_CORE_RESET);
+ }
+
+ if (mb[1] & IDC_HEARTBEAT_FAILURE) {
+ ql_log(ql_log_warn, vha, 0x5069,
+ "Heartbeat Failure encountered, chip reset "
+ "required.\n");
+
+ qla83xx_schedule_work(vha, QLA83XX_NIC_CORE_RESET);
+ }
+ }
+
+ if (mb[1] & IDC_DEVICE_STATE_CHANGE) {
+ ql_log(ql_log_info, vha, 0x506a,
+ "IDC Device-State changed = 0x%x.\n", mb[4]);
+ if (ha->flags.nic_core_reset_owner)
+ return;
+ qla83xx_schedule_work(vha, MBA_IDC_AEN);
+ }
+}
+
+int
+qla2x00_is_a_vp_did(scsi_qla_host_t *vha, uint32_t rscn_entry)
+{
+ struct qla_hw_data *ha = vha->hw;
+ scsi_qla_host_t *vp;
+ uint32_t vp_did;
+ unsigned long flags;
+ int ret = 0;
+
+ if (!ha->num_vhosts)
+ return ret;
+
+ spin_lock_irqsave(&ha->vport_slock, flags);
+ list_for_each_entry(vp, &ha->vp_list, list) {
+ vp_did = vp->d_id.b24;
+ if (vp_did == rscn_entry) {
+ ret = 1;
+ break;
+ }
+ }
+ spin_unlock_irqrestore(&ha->vport_slock, flags);
+
+ return ret;
+}
+
+/**
+ * qla2x00_async_event() - Process aynchronous events.
+ * @ha: SCSI driver HA context
+ * @mb: Mailbox registers (0 - 3)
+ */
+void
+qla2x00_async_event(scsi_qla_host_t *vha, struct rsp_que *rsp, uint16_t *mb)
+{
+ uint16_t handle_cnt;
+ uint16_t cnt, mbx;
+ uint32_t handles[5];
+ struct qla_hw_data *ha = vha->hw;
+ struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
+ struct device_reg_24xx __iomem *reg24 = &ha->iobase->isp24;
+ struct device_reg_82xx __iomem *reg82 = &ha->iobase->isp82;
+ uint32_t rscn_entry, host_pid, tmp_pid;
+ unsigned long flags;
+ fc_port_t *fcport = NULL;
+
+ /* Setup to process RIO completion. */
+ handle_cnt = 0;
+ if (IS_CNA_CAPABLE(ha))
+ goto skip_rio;
+ switch (mb[0]) {
+ case MBA_SCSI_COMPLETION:
+ handles[0] = le32_to_cpu((uint32_t)((mb[2] << 16) | mb[1]));
+ handle_cnt = 1;
+ break;
+ case MBA_CMPLT_1_16BIT:
+ handles[0] = mb[1];
+ handle_cnt = 1;
+ mb[0] = MBA_SCSI_COMPLETION;
+ break;
+ case MBA_CMPLT_2_16BIT:
+ handles[0] = mb[1];
+ handles[1] = mb[2];
+ handle_cnt = 2;
+ mb[0] = MBA_SCSI_COMPLETION;
+ break;
+ case MBA_CMPLT_3_16BIT:
+ handles[0] = mb[1];
+ handles[1] = mb[2];
+ handles[2] = mb[3];
+ handle_cnt = 3;
+ mb[0] = MBA_SCSI_COMPLETION;
+ break;
+ case MBA_CMPLT_4_16BIT:
+ handles[0] = mb[1];
+ handles[1] = mb[2];
+ handles[2] = mb[3];
+ handles[3] = (uint32_t)RD_MAILBOX_REG(ha, reg, 6);
+ handle_cnt = 4;
+ mb[0] = MBA_SCSI_COMPLETION;
+ break;
+ case MBA_CMPLT_5_16BIT:
+ handles[0] = mb[1];
+ handles[1] = mb[2];
+ handles[2] = mb[3];
+ handles[3] = (uint32_t)RD_MAILBOX_REG(ha, reg, 6);
+ handles[4] = (uint32_t)RD_MAILBOX_REG(ha, reg, 7);
+ handle_cnt = 5;
+ mb[0] = MBA_SCSI_COMPLETION;
+ break;
+ case MBA_CMPLT_2_32BIT:
+ handles[0] = le32_to_cpu((uint32_t)((mb[2] << 16) | mb[1]));
+ handles[1] = le32_to_cpu(
+ ((uint32_t)(RD_MAILBOX_REG(ha, reg, 7) << 16)) |
+ RD_MAILBOX_REG(ha, reg, 6));
+ handle_cnt = 2;
+ mb[0] = MBA_SCSI_COMPLETION;
+ break;
+ default:
+ break;
+ }
+skip_rio:
+ switch (mb[0]) {
+ case MBA_SCSI_COMPLETION: /* Fast Post */
+ if (!vha->flags.online)
+ break;
+
+ for (cnt = 0; cnt < handle_cnt; cnt++)
+ qla2x00_process_completed_request(vha, rsp->req,
+ handles[cnt]);
+ break;
+
+ case MBA_RESET: /* Reset */
+ ql_dbg(ql_dbg_async, vha, 0x5002,
+ "Asynchronous RESET.\n");
+
+ set_bit(RESET_MARKER_NEEDED, &vha->dpc_flags);
+ break;
+
+ case MBA_SYSTEM_ERR: /* System Error */
+ mbx = (IS_QLA81XX(ha) || IS_QLA83XX(ha) || IS_QLA27XX(ha)) ?
+ RD_REG_WORD(&reg24->mailbox7) : 0;
+ ql_log(ql_log_warn, vha, 0x5003,
+ "ISP System Error - mbx1=%xh mbx2=%xh mbx3=%xh "
+ "mbx7=%xh.\n", mb[1], mb[2], mb[3], mbx);
+
+ ha->isp_ops->fw_dump(vha, 1);
+
+ if (IS_FWI2_CAPABLE(ha)) {
+ if (mb[1] == 0 && mb[2] == 0) {
+ ql_log(ql_log_fatal, vha, 0x5004,
+ "Unrecoverable Hardware Error: adapter "
+ "marked OFFLINE!\n");
+ vha->flags.online = 0;
+ vha->device_flags |= DFLG_DEV_FAILED;
+ } else {
+ /* Check to see if MPI timeout occurred */
+ if ((mbx & MBX_3) && (ha->port_no == 0))
+ set_bit(MPI_RESET_NEEDED,
+ &vha->dpc_flags);
+
+ set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
+ }
+ } else if (mb[1] == 0) {
+ ql_log(ql_log_fatal, vha, 0x5005,
+ "Unrecoverable Hardware Error: adapter marked "
+ "OFFLINE!\n");
+ vha->flags.online = 0;
+ vha->device_flags |= DFLG_DEV_FAILED;
+ } else
+ set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
+ break;
+
+ case MBA_REQ_TRANSFER_ERR: /* Request Transfer Error */
+ ql_log(ql_log_warn, vha, 0x5006,
+ "ISP Request Transfer Error (%x).\n", mb[1]);
+
+ set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
+ break;
+
+ case MBA_RSP_TRANSFER_ERR: /* Response Transfer Error */
+ ql_log(ql_log_warn, vha, 0x5007,
+ "ISP Response Transfer Error.\n");
+
+ set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
+ break;
+
+ case MBA_WAKEUP_THRES: /* Request Queue Wake-up */
+ ql_dbg(ql_dbg_async, vha, 0x5008,
+ "Asynchronous WAKEUP_THRES.\n");
+
+ break;
+ case MBA_LIP_OCCURRED: /* Loop Initialization Procedure */
+ ql_dbg(ql_dbg_async, vha, 0x5009,
+ "LIP occurred (%x).\n", mb[1]);
+
+ if (atomic_read(&vha->loop_state) != LOOP_DOWN) {
+ atomic_set(&vha->loop_state, LOOP_DOWN);
+ atomic_set(&vha->loop_down_timer, LOOP_DOWN_TIME);
+ qla2x00_mark_all_devices_lost(vha, 1);
+ }
+
+ if (vha->vp_idx) {
+ atomic_set(&vha->vp_state, VP_FAILED);
+ fc_vport_set_state(vha->fc_vport, FC_VPORT_FAILED);
+ }
+
+ set_bit(REGISTER_FC4_NEEDED, &vha->dpc_flags);
+ set_bit(REGISTER_FDMI_NEEDED, &vha->dpc_flags);
+
+ vha->flags.management_server_logged_in = 0;
+ qla2x00_post_aen_work(vha, FCH_EVT_LIP, mb[1]);
+ break;
+
+ case MBA_LOOP_UP: /* Loop Up Event */
+ if (IS_QLA2100(ha) || IS_QLA2200(ha))
+ ha->link_data_rate = PORT_SPEED_1GB;
+ else
+ ha->link_data_rate = mb[1];
+
+ ql_log(ql_log_info, vha, 0x500a,
+ "LOOP UP detected (%s Gbps).\n",
+ qla2x00_get_link_speed_str(ha, ha->link_data_rate));
+
+ vha->flags.management_server_logged_in = 0;
+ qla2x00_post_aen_work(vha, FCH_EVT_LINKUP, ha->link_data_rate);
+ break;
+
+ case MBA_LOOP_DOWN: /* Loop Down Event */
+ mbx = (IS_QLA81XX(ha) || IS_QLA8031(ha))
+ ? RD_REG_WORD(&reg24->mailbox4) : 0;
+ mbx = (IS_P3P_TYPE(ha)) ? RD_REG_WORD(&reg82->mailbox_out[4])
+ : mbx;
+ ql_log(ql_log_info, vha, 0x500b,
+ "LOOP DOWN detected (%x %x %x %x).\n",
+ mb[1], mb[2], mb[3], mbx);
+
+ if (atomic_read(&vha->loop_state) != LOOP_DOWN) {
+ atomic_set(&vha->loop_state, LOOP_DOWN);
+ atomic_set(&vha->loop_down_timer, LOOP_DOWN_TIME);
+ /*
+ * In case of loop down, restore WWPN from
+ * NVRAM in case of FA-WWPN capable ISP
+ * Restore for Physical Port only
+ */
+ if (!vha->vp_idx) {
+ if (ha->flags.fawwpn_enabled) {
+ void *wwpn = ha->init_cb->port_name;
+ memcpy(vha->port_name, wwpn, WWN_SIZE);
+ fc_host_port_name(vha->host) =
+ wwn_to_u64(vha->port_name);
+ ql_dbg(ql_dbg_init + ql_dbg_verbose,
+ vha, 0x0144, "LOOP DOWN detected,"
+ "restore WWPN %016llx\n",
+ wwn_to_u64(vha->port_name));
+ }
+
+ clear_bit(VP_CONFIG_OK, &vha->vp_flags);
+ }
+
+ vha->device_flags |= DFLG_NO_CABLE;
+ qla2x00_mark_all_devices_lost(vha, 1);
+ }
+
+ if (vha->vp_idx) {
+ atomic_set(&vha->vp_state, VP_FAILED);
+ fc_vport_set_state(vha->fc_vport, FC_VPORT_FAILED);
+ }
+
+ vha->flags.management_server_logged_in = 0;
+ ha->link_data_rate = PORT_SPEED_UNKNOWN;
+ qla2x00_post_aen_work(vha, FCH_EVT_LINKDOWN, 0);
+ break;
+
+ case MBA_LIP_RESET: /* LIP reset occurred */
+ ql_dbg(ql_dbg_async, vha, 0x500c,
+ "LIP reset occurred (%x).\n", mb[1]);
+
+ if (atomic_read(&vha->loop_state) != LOOP_DOWN) {
+ atomic_set(&vha->loop_state, LOOP_DOWN);
+ atomic_set(&vha->loop_down_timer, LOOP_DOWN_TIME);
+ qla2x00_mark_all_devices_lost(vha, 1);
+ }
+
+ if (vha->vp_idx) {
+ atomic_set(&vha->vp_state, VP_FAILED);
+ fc_vport_set_state(vha->fc_vport, FC_VPORT_FAILED);
+ }
+
+ set_bit(RESET_MARKER_NEEDED, &vha->dpc_flags);
+
+ ha->operating_mode = LOOP;
+ vha->flags.management_server_logged_in = 0;
+ qla2x00_post_aen_work(vha, FCH_EVT_LIPRESET, mb[1]);
+ break;
+
+ /* case MBA_DCBX_COMPLETE: */
+ case MBA_POINT_TO_POINT: /* Point-to-Point */
+ if (IS_QLA2100(ha))
+ break;
+
+ if (IS_CNA_CAPABLE(ha)) {
+ ql_dbg(ql_dbg_async, vha, 0x500d,
+ "DCBX Completed -- %04x %04x %04x.\n",
+ mb[1], mb[2], mb[3]);
+ if (ha->notify_dcbx_comp && !vha->vp_idx)
+ complete(&ha->dcbx_comp);
+
+ } else
+ ql_dbg(ql_dbg_async, vha, 0x500e,
+ "Asynchronous P2P MODE received.\n");
+
+ /*
+ * Until there's a transition from loop down to loop up, treat
+ * this as loop down only.
+ */
+ if (atomic_read(&vha->loop_state) != LOOP_DOWN) {
+ atomic_set(&vha->loop_state, LOOP_DOWN);
+ if (!atomic_read(&vha->loop_down_timer))
+ atomic_set(&vha->loop_down_timer,
+ LOOP_DOWN_TIME);
+ qla2x00_mark_all_devices_lost(vha, 1);
+ }
+
+ if (vha->vp_idx) {
+ atomic_set(&vha->vp_state, VP_FAILED);
+ fc_vport_set_state(vha->fc_vport, FC_VPORT_FAILED);
+ }
+
+ if (!(test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags)))
+ set_bit(RESET_MARKER_NEEDED, &vha->dpc_flags);
+
+ set_bit(REGISTER_FC4_NEEDED, &vha->dpc_flags);
+ set_bit(REGISTER_FDMI_NEEDED, &vha->dpc_flags);
+
+ ha->flags.gpsc_supported = 1;
+ vha->flags.management_server_logged_in = 0;
+ break;
+
+ case MBA_CHG_IN_CONNECTION: /* Change in connection mode */
+ if (IS_QLA2100(ha))
+ break;
+
+ ql_dbg(ql_dbg_async, vha, 0x500f,
+ "Configuration change detected: value=%x.\n", mb[1]);
+
+ if (atomic_read(&vha->loop_state) != LOOP_DOWN) {
+ atomic_set(&vha->loop_state, LOOP_DOWN);
+ if (!atomic_read(&vha->loop_down_timer))
+ atomic_set(&vha->loop_down_timer,
+ LOOP_DOWN_TIME);
+ qla2x00_mark_all_devices_lost(vha, 1);
+ }
+
+ if (vha->vp_idx) {
+ atomic_set(&vha->vp_state, VP_FAILED);
+ fc_vport_set_state(vha->fc_vport, FC_VPORT_FAILED);
+ }
+
+ set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags);
+ set_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags);
+ break;
+
+ case MBA_PORT_UPDATE: /* Port database update */
+ /*
+ * Handle only global and vn-port update events
+ *
+ * Relevant inputs:
+ * mb[1] = N_Port handle of changed port
+ * OR 0xffff for global event
+ * mb[2] = New login state
+ * 7 = Port logged out
+ * mb[3] = LSB is vp_idx, 0xff = all vps
+ *
+ * Skip processing if:
+ * Event is global, vp_idx is NOT all vps,
+ * vp_idx does not match
+ * Event is not global, vp_idx does not match
+ */
+ if (IS_QLA2XXX_MIDTYPE(ha) &&
+ ((mb[1] == 0xffff && (mb[3] & 0xff) != 0xff) ||
+ (mb[1] != 0xffff)) && vha->vp_idx != (mb[3] & 0xff))
+ break;
+
+ /* Global event -- port logout or port unavailable. */
+ if (mb[1] == 0xffff && mb[2] == 0x7) {
+ ql_dbg(ql_dbg_async, vha, 0x5010,
+ "Port unavailable %04x %04x %04x.\n",
+ mb[1], mb[2], mb[3]);
+ ql_log(ql_log_warn, vha, 0x505e,
+ "Link is offline.\n");
+
+ if (atomic_read(&vha->loop_state) != LOOP_DOWN) {
+ atomic_set(&vha->loop_state, LOOP_DOWN);
+ atomic_set(&vha->loop_down_timer,
+ LOOP_DOWN_TIME);
+ vha->device_flags |= DFLG_NO_CABLE;
+ qla2x00_mark_all_devices_lost(vha, 1);
+ }
+
+ if (vha->vp_idx) {
+ atomic_set(&vha->vp_state, VP_FAILED);
+ fc_vport_set_state(vha->fc_vport,
+ FC_VPORT_FAILED);
+ qla2x00_mark_all_devices_lost(vha, 1);
+ }
+
+ vha->flags.management_server_logged_in = 0;
+ ha->link_data_rate = PORT_SPEED_UNKNOWN;
+ break;
+ }
+
+ /*
+ * If PORT UPDATE is global (received LIP_OCCURRED/LIP_RESET
+ * event etc. earlier indicating loop is down) then process
+ * it. Otherwise ignore it and Wait for RSCN to come in.
+ */
+ atomic_set(&vha->loop_down_timer, 0);
+ if (atomic_read(&vha->loop_state) != LOOP_DOWN &&
+ atomic_read(&vha->loop_state) != LOOP_DEAD) {
+ ql_dbg(ql_dbg_async, vha, 0x5011,
+ "Asynchronous PORT UPDATE ignored %04x/%04x/%04x.\n",
+ mb[1], mb[2], mb[3]);
+
+ qlt_async_event(mb[0], vha, mb);
+ break;
+ }
+
+ ql_dbg(ql_dbg_async, vha, 0x5012,
+ "Port database changed %04x %04x %04x.\n",
+ mb[1], mb[2], mb[3]);
+
+ /*
+ * Mark all devices as missing so we will login again.
+ */
+ atomic_set(&vha->loop_state, LOOP_UP);
+
+ qla2x00_mark_all_devices_lost(vha, 1);
+
+ if (vha->vp_idx == 0 && !qla_ini_mode_enabled(vha))
+ set_bit(SCR_PENDING, &vha->dpc_flags);
+
+ set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags);
+ set_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags);
+ set_bit(VP_CONFIG_OK, &vha->vp_flags);
+
+ qlt_async_event(mb[0], vha, mb);
+ break;
+
+ case MBA_RSCN_UPDATE: /* State Change Registration */
+ /* Check if the Vport has issued a SCR */
+ if (vha->vp_idx && test_bit(VP_SCR_NEEDED, &vha->vp_flags))
+ break;
+ /* Only handle SCNs for our Vport index. */
+ if (ha->flags.npiv_supported && vha->vp_idx != (mb[3] & 0xff))
+ break;
+
+ ql_dbg(ql_dbg_async, vha, 0x5013,
+ "RSCN database changed -- %04x %04x %04x.\n",
+ mb[1], mb[2], mb[3]);
+
+ rscn_entry = ((mb[1] & 0xff) << 16) | mb[2];
+ host_pid = (vha->d_id.b.domain << 16) | (vha->d_id.b.area << 8)
+ | vha->d_id.b.al_pa;
+ if (rscn_entry == host_pid) {
+ ql_dbg(ql_dbg_async, vha, 0x5014,
+ "Ignoring RSCN update to local host "
+ "port ID (%06x).\n", host_pid);
+ break;
+ }
+
+ /* Ignore reserved bits from RSCN-payload. */
+ rscn_entry = ((mb[1] & 0x3ff) << 16) | mb[2];
+
+ /* Skip RSCNs for virtual ports on the same physical port */
+ if (qla2x00_is_a_vp_did(vha, rscn_entry))
+ break;
+
+ /*
+ * Search for the rport related to this RSCN entry and mark it
+ * as lost.
+ */
+ list_for_each_entry(fcport, &vha->vp_fcports, list) {
+ if (atomic_read(&fcport->state) != FCS_ONLINE)
+ continue;
+ tmp_pid = fcport->d_id.b24;
+ if (fcport->d_id.b24 == rscn_entry) {
+ qla2x00_mark_device_lost(vha, fcport, 0, 0);
+ break;
+ }
+ }
+
+ atomic_set(&vha->loop_down_timer, 0);
+ vha->flags.management_server_logged_in = 0;
+
+ set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags);
+ set_bit(RSCN_UPDATE, &vha->dpc_flags);
+ qla2x00_post_aen_work(vha, FCH_EVT_RSCN, rscn_entry);
+ break;
+
+ /* case MBA_RIO_RESPONSE: */
+ case MBA_ZIO_RESPONSE:
+ ql_dbg(ql_dbg_async, vha, 0x5015,
+ "[R|Z]IO update completion.\n");
+
+ if (IS_FWI2_CAPABLE(ha))
+ qla24xx_process_response_queue(vha, rsp);
+ else
+ qla2x00_process_response_queue(rsp);
+ break;
+
+ case MBA_DISCARD_RND_FRAME:
+ ql_dbg(ql_dbg_async, vha, 0x5016,
+ "Discard RND Frame -- %04x %04x %04x.\n",
+ mb[1], mb[2], mb[3]);
+ break;
+
+ case MBA_TRACE_NOTIFICATION:
+ ql_dbg(ql_dbg_async, vha, 0x5017,
+ "Trace Notification -- %04x %04x.\n", mb[1], mb[2]);
+ break;
+
+ case MBA_ISP84XX_ALERT:
+ ql_dbg(ql_dbg_async, vha, 0x5018,
+ "ISP84XX Alert Notification -- %04x %04x %04x.\n",
+ mb[1], mb[2], mb[3]);
+
+ spin_lock_irqsave(&ha->cs84xx->access_lock, flags);
+ switch (mb[1]) {
+ case A84_PANIC_RECOVERY:
+ ql_log(ql_log_info, vha, 0x5019,
+ "Alert 84XX: panic recovery %04x %04x.\n",
+ mb[2], mb[3]);
+ break;
+ case A84_OP_LOGIN_COMPLETE:
+ ha->cs84xx->op_fw_version = mb[3] << 16 | mb[2];
+ ql_log(ql_log_info, vha, 0x501a,
+ "Alert 84XX: firmware version %x.\n",
+ ha->cs84xx->op_fw_version);
+ break;
+ case A84_DIAG_LOGIN_COMPLETE:
+ ha->cs84xx->diag_fw_version = mb[3] << 16 | mb[2];
+ ql_log(ql_log_info, vha, 0x501b,
+ "Alert 84XX: diagnostic firmware version %x.\n",
+ ha->cs84xx->diag_fw_version);
+ break;
+ case A84_GOLD_LOGIN_COMPLETE:
+ ha->cs84xx->diag_fw_version = mb[3] << 16 | mb[2];
+ ha->cs84xx->fw_update = 1;
+ ql_log(ql_log_info, vha, 0x501c,
+ "Alert 84XX: gold firmware version %x.\n",
+ ha->cs84xx->gold_fw_version);
+ break;
+ default:
+ ql_log(ql_log_warn, vha, 0x501d,
+ "Alert 84xx: Invalid Alert %04x %04x %04x.\n",
+ mb[1], mb[2], mb[3]);
+ }
+ spin_unlock_irqrestore(&ha->cs84xx->access_lock, flags);
+ break;
+ case MBA_DCBX_START:
+ ql_dbg(ql_dbg_async, vha, 0x501e,
+ "DCBX Started -- %04x %04x %04x.\n",
+ mb[1], mb[2], mb[3]);
+ break;
+ case MBA_DCBX_PARAM_UPDATE:
+ ql_dbg(ql_dbg_async, vha, 0x501f,
+ "DCBX Parameters Updated -- %04x %04x %04x.\n",
+ mb[1], mb[2], mb[3]);
+ break;
+ case MBA_FCF_CONF_ERR:
+ ql_dbg(ql_dbg_async, vha, 0x5020,
+ "FCF Configuration Error -- %04x %04x %04x.\n",
+ mb[1], mb[2], mb[3]);
+ break;
+ case MBA_IDC_NOTIFY:
+ if (IS_QLA8031(vha->hw) || IS_QLA8044(ha)) {
+ mb[4] = RD_REG_WORD(&reg24->mailbox4);
+ if (((mb[2] & 0x7fff) == MBC_PORT_RESET ||
+ (mb[2] & 0x7fff) == MBC_SET_PORT_CONFIG) &&
+ (mb[4] & INTERNAL_LOOPBACK_MASK) != 0) {
+ set_bit(ISP_QUIESCE_NEEDED, &vha->dpc_flags);
+ /*
+ * Extend loop down timer since port is active.
+ */
+ if (atomic_read(&vha->loop_state) == LOOP_DOWN)
+ atomic_set(&vha->loop_down_timer,
+ LOOP_DOWN_TIME);
+ qla2xxx_wake_dpc(vha);
+ }
+ }
+ case MBA_IDC_COMPLETE:
+ if (ha->notify_lb_portup_comp && !vha->vp_idx)
+ complete(&ha->lb_portup_comp);
+ /* Fallthru */
+ case MBA_IDC_TIME_EXT:
+ if (IS_QLA81XX(vha->hw) || IS_QLA8031(vha->hw) ||
+ IS_QLA8044(ha))
+ qla81xx_idc_event(vha, mb[0], mb[1]);
+ break;
+
+ case MBA_IDC_AEN:
+ mb[4] = RD_REG_WORD(&reg24->mailbox4);
+ mb[5] = RD_REG_WORD(&reg24->mailbox5);
+ mb[6] = RD_REG_WORD(&reg24->mailbox6);
+ mb[7] = RD_REG_WORD(&reg24->mailbox7);
+ qla83xx_handle_8200_aen(vha, mb);
+ break;
+
+ case MBA_DPORT_DIAGNOSTICS:
+ ql_dbg(ql_dbg_async, vha, 0x5052,
+ "D-Port Diagnostics: %04x %04x=%s\n", mb[0], mb[1],
+ mb[1] == 0 ? "start" :
+ mb[1] == 1 ? "done (ok)" :
+ mb[1] == 2 ? "done (error)" : "other");
+ break;
+
+ default:
+ ql_dbg(ql_dbg_async, vha, 0x5057,
+ "Unknown AEN:%04x %04x %04x %04x\n",
+ mb[0], mb[1], mb[2], mb[3]);
+ }
+
+ qlt_async_event(mb[0], vha, mb);
+
+ if (!vha->vp_idx && ha->num_vhosts)
+ qla2x00_alert_all_vps(rsp, mb);
+}
+
+/**
+ * qla2x00_process_completed_request() - Process a Fast Post response.
+ * @ha: SCSI driver HA context
+ * @index: SRB index
+ */
+void
+qla2x00_process_completed_request(struct scsi_qla_host *vha,
+ struct req_que *req, uint32_t index)
+{
+ srb_t *sp;
+ struct qla_hw_data *ha = vha->hw;
+
+ /* Validate handle. */
+ if (index >= req->num_outstanding_cmds) {
+ ql_log(ql_log_warn, vha, 0x3014,
+ "Invalid SCSI command index (%x).\n", index);
+
+ if (IS_P3P_TYPE(ha))
+ set_bit(FCOE_CTX_RESET_NEEDED, &vha->dpc_flags);
+ else
+ set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
+ return;
+ }
+
+ sp = req->outstanding_cmds[index];
+ if (sp) {
+ /* Free outstanding command slot. */
+ req->outstanding_cmds[index] = NULL;
+
+ /* Save ISP completion status */
+ sp->done(ha, sp, DID_OK << 16);
+ } else {
+ ql_log(ql_log_warn, vha, 0x3016, "Invalid SCSI SRB.\n");
+
+ if (IS_P3P_TYPE(ha))
+ set_bit(FCOE_CTX_RESET_NEEDED, &vha->dpc_flags);
+ else
+ set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
+ }
+}
+
+srb_t *
+qla2x00_get_sp_from_handle(scsi_qla_host_t *vha, const char *func,
+ struct req_que *req, void *iocb)
+{
+ struct qla_hw_data *ha = vha->hw;
+ sts_entry_t *pkt = iocb;
+ srb_t *sp = NULL;
+ uint16_t index;
+
+ index = LSW(pkt->handle);
+ if (index >= req->num_outstanding_cmds) {
+ ql_log(ql_log_warn, vha, 0x5031,
+ "Invalid command index (%x).\n", index);
+ if (IS_P3P_TYPE(ha))
+ set_bit(FCOE_CTX_RESET_NEEDED, &vha->dpc_flags);
+ else
+ set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
+ goto done;
+ }
+ sp = req->outstanding_cmds[index];
+ if (!sp) {
+ ql_log(ql_log_warn, vha, 0x5032,
+ "Invalid completion handle (%x) -- timed-out.\n", index);
+ return sp;
+ }
+ if (sp->handle != index) {
+ ql_log(ql_log_warn, vha, 0x5033,
+ "SRB handle (%x) mismatch %x.\n", sp->handle, index);
+ return NULL;
+ }
+
+ req->outstanding_cmds[index] = NULL;
+
+done:
+ return sp;
+}
+
+static void
+qla2x00_mbx_iocb_entry(scsi_qla_host_t *vha, struct req_que *req,
+ struct mbx_entry *mbx)
+{
+ const char func[] = "MBX-IOCB";
+ const char *type;
+ fc_port_t *fcport;
+ srb_t *sp;
+ struct srb_iocb *lio;
+ uint16_t *data;
+ uint16_t status;
+
+ sp = qla2x00_get_sp_from_handle(vha, func, req, mbx);
+ if (!sp)
+ return;
+
+ lio = &sp->u.iocb_cmd;
+ type = sp->name;
+ fcport = sp->fcport;
+ data = lio->u.logio.data;
+
+ data[0] = MBS_COMMAND_ERROR;
+ data[1] = lio->u.logio.flags & SRB_LOGIN_RETRIED ?
+ QLA_LOGIO_LOGIN_RETRIED : 0;
+ if (mbx->entry_status) {
+ ql_dbg(ql_dbg_async, vha, 0x5043,
+ "Async-%s error entry - hdl=%x portid=%02x%02x%02x "
+ "entry-status=%x status=%x state-flag=%x "
+ "status-flags=%x.\n", type, sp->handle,
+ fcport->d_id.b.domain, fcport->d_id.b.area,
+ fcport->d_id.b.al_pa, mbx->entry_status,
+ le16_to_cpu(mbx->status), le16_to_cpu(mbx->state_flags),
+ le16_to_cpu(mbx->status_flags));
+
+ ql_dump_buffer(ql_dbg_async + ql_dbg_buffer, vha, 0x5029,
+ (uint8_t *)mbx, sizeof(*mbx));
+
+ goto logio_done;
+ }
+
+ status = le16_to_cpu(mbx->status);
+ if (status == 0x30 && sp->type == SRB_LOGIN_CMD &&
+ le16_to_cpu(mbx->mb0) == MBS_COMMAND_COMPLETE)
+ status = 0;
+ if (!status && le16_to_cpu(mbx->mb0) == MBS_COMMAND_COMPLETE) {
+ ql_dbg(ql_dbg_async, vha, 0x5045,
+ "Async-%s complete - hdl=%x portid=%02x%02x%02x mbx1=%x.\n",
+ type, sp->handle, fcport->d_id.b.domain,
+ fcport->d_id.b.area, fcport->d_id.b.al_pa,
+ le16_to_cpu(mbx->mb1));
+
+ data[0] = MBS_COMMAND_COMPLETE;
+ if (sp->type == SRB_LOGIN_CMD) {
+ fcport->port_type = FCT_TARGET;
+ if (le16_to_cpu(mbx->mb1) & BIT_0)
+ fcport->port_type = FCT_INITIATOR;
+ else if (le16_to_cpu(mbx->mb1) & BIT_1)
+ fcport->flags |= FCF_FCP2_DEVICE;
+ }
+ goto logio_done;
+ }
+
+ data[0] = le16_to_cpu(mbx->mb0);
+ switch (data[0]) {
+ case MBS_PORT_ID_USED:
+ data[1] = le16_to_cpu(mbx->mb1);
+ break;
+ case MBS_LOOP_ID_USED:
+ break;
+ default:
+ data[0] = MBS_COMMAND_ERROR;
+ break;
+ }
+
+ ql_log(ql_log_warn, vha, 0x5046,
+ "Async-%s failed - hdl=%x portid=%02x%02x%02x status=%x "
+ "mb0=%x mb1=%x mb2=%x mb6=%x mb7=%x.\n", type, sp->handle,
+ fcport->d_id.b.domain, fcport->d_id.b.area, fcport->d_id.b.al_pa,
+ status, le16_to_cpu(mbx->mb0), le16_to_cpu(mbx->mb1),
+ le16_to_cpu(mbx->mb2), le16_to_cpu(mbx->mb6),
+ le16_to_cpu(mbx->mb7));
+
+logio_done:
+ sp->done(vha, sp, 0);
+}
+
+static void
+qla2x00_ct_entry(scsi_qla_host_t *vha, struct req_que *req,
+ sts_entry_t *pkt, int iocb_type)
+{
+ const char func[] = "CT_IOCB";
+ const char *type;
+ srb_t *sp;
+ struct fc_bsg_job *bsg_job;
+ uint16_t comp_status;
+ int res;
+
+ sp = qla2x00_get_sp_from_handle(vha, func, req, pkt);
+ if (!sp)
+ return;
+
+ bsg_job = sp->u.bsg_job;
+
+ type = "ct pass-through";
+
+ comp_status = le16_to_cpu(pkt->comp_status);
+
+ /* return FC_CTELS_STATUS_OK and leave the decoding of the ELS/CT
+ * fc payload to the caller
+ */
+ bsg_job->reply->reply_data.ctels_reply.status = FC_CTELS_STATUS_OK;
+ bsg_job->reply_len = sizeof(struct fc_bsg_reply);
+
+ if (comp_status != CS_COMPLETE) {
+ if (comp_status == CS_DATA_UNDERRUN) {
+ res = DID_OK << 16;
+ bsg_job->reply->reply_payload_rcv_len =
+ le16_to_cpu(((sts_entry_t *)pkt)->rsp_info_len);
+
+ ql_log(ql_log_warn, vha, 0x5048,
+ "CT pass-through-%s error "
+ "comp_status-status=0x%x total_byte = 0x%x.\n",
+ type, comp_status,
+ bsg_job->reply->reply_payload_rcv_len);
+ } else {
+ ql_log(ql_log_warn, vha, 0x5049,
+ "CT pass-through-%s error "
+ "comp_status-status=0x%x.\n", type, comp_status);
+ res = DID_ERROR << 16;
+ bsg_job->reply->reply_payload_rcv_len = 0;
+ }
+ ql_dump_buffer(ql_dbg_async + ql_dbg_buffer, vha, 0x5035,
+ (uint8_t *)pkt, sizeof(*pkt));
+ } else {
+ res = DID_OK << 16;
+ bsg_job->reply->reply_payload_rcv_len =
+ bsg_job->reply_payload.payload_len;
+ bsg_job->reply_len = 0;
+ }
+
+ sp->done(vha, sp, res);
+}
+
+static void
+qla24xx_els_ct_entry(scsi_qla_host_t *vha, struct req_que *req,
+ struct sts_entry_24xx *pkt, int iocb_type)
+{
+ const char func[] = "ELS_CT_IOCB";
+ const char *type;
+ srb_t *sp;
+ struct fc_bsg_job *bsg_job;
+ uint16_t comp_status;
+ uint32_t fw_status[3];
+ uint8_t* fw_sts_ptr;
+ int res;
+
+ sp = qla2x00_get_sp_from_handle(vha, func, req, pkt);
+ if (!sp)
+ return;
+ bsg_job = sp->u.bsg_job;
+
+ type = NULL;
+ switch (sp->type) {
+ case SRB_ELS_CMD_RPT:
+ case SRB_ELS_CMD_HST:
+ type = "els";
+ break;
+ case SRB_CT_CMD:
+ type = "ct pass-through";
+ break;
+ default:
+ ql_dbg(ql_dbg_user, vha, 0x503e,
+ "Unrecognized SRB: (%p) type=%d.\n", sp, sp->type);
+ return;
+ }
+
+ comp_status = fw_status[0] = le16_to_cpu(pkt->comp_status);
+ fw_status[1] = le16_to_cpu(((struct els_sts_entry_24xx*)pkt)->error_subcode_1);
+ fw_status[2] = le16_to_cpu(((struct els_sts_entry_24xx*)pkt)->error_subcode_2);
+
+ /* return FC_CTELS_STATUS_OK and leave the decoding of the ELS/CT
+ * fc payload to the caller
+ */
+ bsg_job->reply->reply_data.ctels_reply.status = FC_CTELS_STATUS_OK;
+ bsg_job->reply_len = sizeof(struct fc_bsg_reply) + sizeof(fw_status);
+
+ if (comp_status != CS_COMPLETE) {
+ if (comp_status == CS_DATA_UNDERRUN) {
+ res = DID_OK << 16;
+ bsg_job->reply->reply_payload_rcv_len =
+ le16_to_cpu(((struct els_sts_entry_24xx *)pkt)->total_byte_count);
+
+ ql_dbg(ql_dbg_user, vha, 0x503f,
+ "ELS-CT pass-through-%s error hdl=%x comp_status-status=0x%x "
+ "error subcode 1=0x%x error subcode 2=0x%x total_byte = 0x%x.\n",
+ type, sp->handle, comp_status, fw_status[1], fw_status[2],
+ le16_to_cpu(((struct els_sts_entry_24xx *)
+ pkt)->total_byte_count));
+ fw_sts_ptr = ((uint8_t*)bsg_job->req->sense) + sizeof(struct fc_bsg_reply);
+ memcpy( fw_sts_ptr, fw_status, sizeof(fw_status));
+ }
+ else {
+ ql_dbg(ql_dbg_user, vha, 0x5040,
+ "ELS-CT pass-through-%s error hdl=%x comp_status-status=0x%x "
+ "error subcode 1=0x%x error subcode 2=0x%x.\n",
+ type, sp->handle, comp_status,
+ le16_to_cpu(((struct els_sts_entry_24xx *)
+ pkt)->error_subcode_1),
+ le16_to_cpu(((struct els_sts_entry_24xx *)
+ pkt)->error_subcode_2));
+ res = DID_ERROR << 16;
+ bsg_job->reply->reply_payload_rcv_len = 0;
+ fw_sts_ptr = ((uint8_t*)bsg_job->req->sense) + sizeof(struct fc_bsg_reply);
+ memcpy( fw_sts_ptr, fw_status, sizeof(fw_status));
+ }
+ ql_dump_buffer(ql_dbg_user + ql_dbg_buffer, vha, 0x5056,
+ (uint8_t *)pkt, sizeof(*pkt));
+ }
+ else {
+ res = DID_OK << 16;
+ bsg_job->reply->reply_payload_rcv_len = bsg_job->reply_payload.payload_len;
+ bsg_job->reply_len = 0;
+ }
+
+ sp->done(vha, sp, res);
+}
+
+static void
+qla24xx_logio_entry(scsi_qla_host_t *vha, struct req_que *req,
+ struct logio_entry_24xx *logio)
+{
+ const char func[] = "LOGIO-IOCB";
+ const char *type;
+ fc_port_t *fcport;
+ srb_t *sp;
+ struct srb_iocb *lio;
+ uint16_t *data;
+ uint32_t iop[2];
+
+ sp = qla2x00_get_sp_from_handle(vha, func, req, logio);
+ if (!sp)
+ return;
+
+ lio = &sp->u.iocb_cmd;
+ type = sp->name;
+ fcport = sp->fcport;
+ data = lio->u.logio.data;
+
+ data[0] = MBS_COMMAND_ERROR;
+ data[1] = lio->u.logio.flags & SRB_LOGIN_RETRIED ?
+ QLA_LOGIO_LOGIN_RETRIED : 0;
+ if (logio->entry_status) {
+ ql_log(ql_log_warn, fcport->vha, 0x5034,
+ "Async-%s error entry - hdl=%x"
+ "portid=%02x%02x%02x entry-status=%x.\n",
+ type, sp->handle, fcport->d_id.b.domain,
+ fcport->d_id.b.area, fcport->d_id.b.al_pa,
+ logio->entry_status);
+ ql_dump_buffer(ql_dbg_async + ql_dbg_buffer, vha, 0x504d,
+ (uint8_t *)logio, sizeof(*logio));
+
+ goto logio_done;
+ }
+
+ if (le16_to_cpu(logio->comp_status) == CS_COMPLETE) {
+ ql_dbg(ql_dbg_async, fcport->vha, 0x5036,
+ "Async-%s complete - hdl=%x portid=%02x%02x%02x "
+ "iop0=%x.\n", type, sp->handle, fcport->d_id.b.domain,
+ fcport->d_id.b.area, fcport->d_id.b.al_pa,
+ le32_to_cpu(logio->io_parameter[0]));
+
+ data[0] = MBS_COMMAND_COMPLETE;
+ if (sp->type != SRB_LOGIN_CMD)
+ goto logio_done;
+
+ iop[0] = le32_to_cpu(logio->io_parameter[0]);
+ if (iop[0] & BIT_4) {
+ fcport->port_type = FCT_TARGET;
+ if (iop[0] & BIT_8)
+ fcport->flags |= FCF_FCP2_DEVICE;
+ } else if (iop[0] & BIT_5)
+ fcport->port_type = FCT_INITIATOR;
+
+ if (iop[0] & BIT_7)
+ fcport->flags |= FCF_CONF_COMP_SUPPORTED;
+
+ if (logio->io_parameter[7] || logio->io_parameter[8])
+ fcport->supported_classes |= FC_COS_CLASS2;
+ if (logio->io_parameter[9] || logio->io_parameter[10])
+ fcport->supported_classes |= FC_COS_CLASS3;
+
+ goto logio_done;
+ }
+
+ iop[0] = le32_to_cpu(logio->io_parameter[0]);
+ iop[1] = le32_to_cpu(logio->io_parameter[1]);
+ switch (iop[0]) {
+ case LSC_SCODE_PORTID_USED:
+ data[0] = MBS_PORT_ID_USED;
+ data[1] = LSW(iop[1]);
+ break;
+ case LSC_SCODE_NPORT_USED:
+ data[0] = MBS_LOOP_ID_USED;
+ break;
+ default:
+ data[0] = MBS_COMMAND_ERROR;
+ break;
+ }
+
+ ql_dbg(ql_dbg_async, fcport->vha, 0x5037,
+ "Async-%s failed - hdl=%x portid=%02x%02x%02x comp=%x "
+ "iop0=%x iop1=%x.\n", type, sp->handle, fcport->d_id.b.domain,
+ fcport->d_id.b.area, fcport->d_id.b.al_pa,
+ le16_to_cpu(logio->comp_status),
+ le32_to_cpu(logio->io_parameter[0]),
+ le32_to_cpu(logio->io_parameter[1]));
+
+logio_done:
+ sp->done(vha, sp, 0);
+}
+
+static void
+qla24xx_tm_iocb_entry(scsi_qla_host_t *vha, struct req_que *req, void *tsk)
+{
+ const char func[] = "TMF-IOCB";
+ const char *type;
+ fc_port_t *fcport;
+ srb_t *sp;
+ struct srb_iocb *iocb;
+ struct sts_entry_24xx *sts = (struct sts_entry_24xx *)tsk;
+
+ sp = qla2x00_get_sp_from_handle(vha, func, req, tsk);
+ if (!sp)
+ return;
+
+ iocb = &sp->u.iocb_cmd;
+ type = sp->name;
+ fcport = sp->fcport;
+ iocb->u.tmf.data = QLA_SUCCESS;
+
+ if (sts->entry_status) {
+ ql_log(ql_log_warn, fcport->vha, 0x5038,
+ "Async-%s error - hdl=%x entry-status(%x).\n",
+ type, sp->handle, sts->entry_status);
+ iocb->u.tmf.data = QLA_FUNCTION_FAILED;
+ } else if (sts->comp_status != __constant_cpu_to_le16(CS_COMPLETE)) {
+ ql_log(ql_log_warn, fcport->vha, 0x5039,
+ "Async-%s error - hdl=%x completion status(%x).\n",
+ type, sp->handle, sts->comp_status);
+ iocb->u.tmf.data = QLA_FUNCTION_FAILED;
+ } else if ((le16_to_cpu(sts->scsi_status) &
+ SS_RESPONSE_INFO_LEN_VALID)) {
+ if (le32_to_cpu(sts->rsp_data_len) < 4) {
+ ql_log(ql_log_warn, fcport->vha, 0x503b,
+ "Async-%s error - hdl=%x not enough response(%d).\n",
+ type, sp->handle, sts->rsp_data_len);
+ } else if (sts->data[3]) {
+ ql_log(ql_log_warn, fcport->vha, 0x503c,
+ "Async-%s error - hdl=%x response(%x).\n",
+ type, sp->handle, sts->data[3]);
+ iocb->u.tmf.data = QLA_FUNCTION_FAILED;
+ }
+ }
+
+ if (iocb->u.tmf.data != QLA_SUCCESS)
+ ql_dump_buffer(ql_dbg_async + ql_dbg_buffer, vha, 0x5055,
+ (uint8_t *)sts, sizeof(*sts));
+
+ sp->done(vha, sp, 0);
+}
+
+/**
+ * qla2x00_process_response_queue() - Process response queue entries.
+ * @ha: SCSI driver HA context
+ */
+void
+qla2x00_process_response_queue(struct rsp_que *rsp)
+{
+ struct scsi_qla_host *vha;
+ struct qla_hw_data *ha = rsp->hw;
+ struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
+ sts_entry_t *pkt;
+ uint16_t handle_cnt;
+ uint16_t cnt;
+
+ vha = pci_get_drvdata(ha->pdev);
+
+ if (!vha->flags.online)
+ return;
+
+ while (rsp->ring_ptr->signature != RESPONSE_PROCESSED) {
+ pkt = (sts_entry_t *)rsp->ring_ptr;
+
+ rsp->ring_index++;
+ if (rsp->ring_index == rsp->length) {
+ rsp->ring_index = 0;
+ rsp->ring_ptr = rsp->ring;
+ } else {
+ rsp->ring_ptr++;
+ }
+
+ if (pkt->entry_status != 0) {
+ qla2x00_error_entry(vha, rsp, pkt);
+ ((response_t *)pkt)->signature = RESPONSE_PROCESSED;
+ wmb();
+ continue;
+ }
+
+ switch (pkt->entry_type) {
+ case STATUS_TYPE:
+ qla2x00_status_entry(vha, rsp, pkt);
+ break;
+ case STATUS_TYPE_21:
+ handle_cnt = ((sts21_entry_t *)pkt)->handle_count;
+ for (cnt = 0; cnt < handle_cnt; cnt++) {
+ qla2x00_process_completed_request(vha, rsp->req,
+ ((sts21_entry_t *)pkt)->handle[cnt]);
+ }
+ break;
+ case STATUS_TYPE_22:
+ handle_cnt = ((sts22_entry_t *)pkt)->handle_count;
+ for (cnt = 0; cnt < handle_cnt; cnt++) {
+ qla2x00_process_completed_request(vha, rsp->req,
+ ((sts22_entry_t *)pkt)->handle[cnt]);
+ }
+ break;
+ case STATUS_CONT_TYPE:
+ qla2x00_status_cont_entry(rsp, (sts_cont_entry_t *)pkt);
+ break;
+ case MBX_IOCB_TYPE:
+ qla2x00_mbx_iocb_entry(vha, rsp->req,
+ (struct mbx_entry *)pkt);
+ break;
+ case CT_IOCB_TYPE:
+ qla2x00_ct_entry(vha, rsp->req, pkt, CT_IOCB_TYPE);
+ break;
+ default:
+ /* Type Not Supported. */
+ ql_log(ql_log_warn, vha, 0x504a,
+ "Received unknown response pkt type %x "
+ "entry status=%x.\n",
+ pkt->entry_type, pkt->entry_status);
+ break;
+ }
+ ((response_t *)pkt)->signature = RESPONSE_PROCESSED;
+ wmb();
+ }
+
+ /* Adjust ring index */
+ WRT_REG_WORD(ISP_RSP_Q_OUT(ha, reg), rsp->ring_index);
+}
+
+static inline void
+qla2x00_handle_sense(srb_t *sp, uint8_t *sense_data, uint32_t par_sense_len,
+ uint32_t sense_len, struct rsp_que *rsp, int res)
+{
+ struct scsi_qla_host *vha = sp->fcport->vha;
+ struct scsi_cmnd *cp = GET_CMD_SP(sp);
+ uint32_t track_sense_len;
+
+ if (sense_len >= SCSI_SENSE_BUFFERSIZE)
+ sense_len = SCSI_SENSE_BUFFERSIZE;
+
+ SET_CMD_SENSE_LEN(sp, sense_len);
+ SET_CMD_SENSE_PTR(sp, cp->sense_buffer);
+ track_sense_len = sense_len;
+
+ if (sense_len > par_sense_len)
+ sense_len = par_sense_len;
+
+ memcpy(cp->sense_buffer, sense_data, sense_len);
+
+ SET_CMD_SENSE_PTR(sp, cp->sense_buffer + sense_len);
+ track_sense_len -= sense_len;
+ SET_CMD_SENSE_LEN(sp, track_sense_len);
+
+ if (track_sense_len != 0) {
+ rsp->status_srb = sp;
+ cp->result = res;
+ }
+
+ if (sense_len) {
+ ql_dbg(ql_dbg_io + ql_dbg_buffer, vha, 0x301c,
+ "Check condition Sense data, nexus%ld:%d:%llu cmd=%p.\n",
+ sp->fcport->vha->host_no, cp->device->id, cp->device->lun,
+ cp);
+ ql_dump_buffer(ql_dbg_io + ql_dbg_buffer, vha, 0x302b,
+ cp->sense_buffer, sense_len);
+ }
+}
+
+struct scsi_dif_tuple {
+ __be16 guard; /* Checksum */
+ __be16 app_tag; /* APPL identifier */
+ __be32 ref_tag; /* Target LBA or indirect LBA */
+};
+
+/*
+ * Checks the guard or meta-data for the type of error
+ * detected by the HBA. In case of errors, we set the
+ * ASC/ASCQ fields in the sense buffer with ILLEGAL_REQUEST
+ * to indicate to the kernel that the HBA detected error.
+ */
+static inline int
+qla2x00_handle_dif_error(srb_t *sp, struct sts_entry_24xx *sts24)
+{
+ struct scsi_qla_host *vha = sp->fcport->vha;
+ struct scsi_cmnd *cmd = GET_CMD_SP(sp);
+ uint8_t *ap = &sts24->data[12];
+ uint8_t *ep = &sts24->data[20];
+ uint32_t e_ref_tag, a_ref_tag;
+ uint16_t e_app_tag, a_app_tag;
+ uint16_t e_guard, a_guard;
+
+ /*
+ * swab32 of the "data" field in the beginning of qla2x00_status_entry()
+ * would make guard field appear at offset 2
+ */
+ a_guard = le16_to_cpu(*(uint16_t *)(ap + 2));
+ a_app_tag = le16_to_cpu(*(uint16_t *)(ap + 0));
+ a_ref_tag = le32_to_cpu(*(uint32_t *)(ap + 4));
+ e_guard = le16_to_cpu(*(uint16_t *)(ep + 2));
+ e_app_tag = le16_to_cpu(*(uint16_t *)(ep + 0));
+ e_ref_tag = le32_to_cpu(*(uint32_t *)(ep + 4));
+
+ ql_dbg(ql_dbg_io, vha, 0x3023,
+ "iocb(s) %p Returned STATUS.\n", sts24);
+
+ ql_dbg(ql_dbg_io, vha, 0x3024,
+ "DIF ERROR in cmd 0x%x lba 0x%llx act ref"
+ " tag=0x%x, exp ref_tag=0x%x, act app tag=0x%x, exp app"
+ " tag=0x%x, act guard=0x%x, exp guard=0x%x.\n",
+ cmd->cmnd[0], (u64)scsi_get_lba(cmd), a_ref_tag, e_ref_tag,
+ a_app_tag, e_app_tag, a_guard, e_guard);
+
+ /*
+ * Ignore sector if:
+ * For type 3: ref & app tag is all 'f's
+ * For type 0,1,2: app tag is all 'f's
+ */
+ if ((a_app_tag == 0xffff) &&
+ ((scsi_get_prot_type(cmd) != SCSI_PROT_DIF_TYPE3) ||
+ (a_ref_tag == 0xffffffff))) {
+ uint32_t blocks_done, resid;
+ sector_t lba_s = scsi_get_lba(cmd);
+
+ /* 2TB boundary case covered automatically with this */
+ blocks_done = e_ref_tag - (uint32_t)lba_s + 1;
+
+ resid = scsi_bufflen(cmd) - (blocks_done *
+ cmd->device->sector_size);
+
+ scsi_set_resid(cmd, resid);
+ cmd->result = DID_OK << 16;
+
+ /* Update protection tag */
+ if (scsi_prot_sg_count(cmd)) {
+ uint32_t i, j = 0, k = 0, num_ent;
+ struct scatterlist *sg;
+ struct sd_dif_tuple *spt;
+
+ /* Patch the corresponding protection tags */
+ scsi_for_each_prot_sg(cmd, sg,
+ scsi_prot_sg_count(cmd), i) {
+ num_ent = sg_dma_len(sg) / 8;
+ if (k + num_ent < blocks_done) {
+ k += num_ent;
+ continue;
+ }
+ j = blocks_done - k - 1;
+ k = blocks_done;
+ break;
+ }
+
+ if (k != blocks_done) {
+ ql_log(ql_log_warn, vha, 0x302f,
+ "unexpected tag values tag:lba=%x:%llx)\n",
+ e_ref_tag, (unsigned long long)lba_s);
+ return 1;
+ }
+
+ spt = page_address(sg_page(sg)) + sg->offset;
+ spt += j;
+
+ spt->app_tag = 0xffff;
+ if (scsi_get_prot_type(cmd) == SCSI_PROT_DIF_TYPE3)
+ spt->ref_tag = 0xffffffff;
+ }
+
+ return 0;
+ }
+
+ /* check guard */
+ if (e_guard != a_guard) {
+ scsi_build_sense_buffer(1, cmd->sense_buffer, ILLEGAL_REQUEST,
+ 0x10, 0x1);
+ set_driver_byte(cmd, DRIVER_SENSE);
+ set_host_byte(cmd, DID_ABORT);
+ cmd->result |= SAM_STAT_CHECK_CONDITION << 1;
+ return 1;
+ }
+
+ /* check ref tag */
+ if (e_ref_tag != a_ref_tag) {
+ scsi_build_sense_buffer(1, cmd->sense_buffer, ILLEGAL_REQUEST,
+ 0x10, 0x3);
+ set_driver_byte(cmd, DRIVER_SENSE);
+ set_host_byte(cmd, DID_ABORT);
+ cmd->result |= SAM_STAT_CHECK_CONDITION << 1;
+ return 1;
+ }
+
+ /* check appl tag */
+ if (e_app_tag != a_app_tag) {
+ scsi_build_sense_buffer(1, cmd->sense_buffer, ILLEGAL_REQUEST,
+ 0x10, 0x2);
+ set_driver_byte(cmd, DRIVER_SENSE);
+ set_host_byte(cmd, DID_ABORT);
+ cmd->result |= SAM_STAT_CHECK_CONDITION << 1;
+ return 1;
+ }
+
+ return 1;
+}
+
+static void
+qla25xx_process_bidir_status_iocb(scsi_qla_host_t *vha, void *pkt,
+ struct req_que *req, uint32_t index)
+{
+ struct qla_hw_data *ha = vha->hw;
+ srb_t *sp;
+ uint16_t comp_status;
+ uint16_t scsi_status;
+ uint16_t thread_id;
+ uint32_t rval = EXT_STATUS_OK;
+ struct fc_bsg_job *bsg_job = NULL;
+ sts_entry_t *sts;
+ struct sts_entry_24xx *sts24;
+ sts = (sts_entry_t *) pkt;
+ sts24 = (struct sts_entry_24xx *) pkt;
+
+ /* Validate handle. */
+ if (index >= req->num_outstanding_cmds) {
+ ql_log(ql_log_warn, vha, 0x70af,
+ "Invalid SCSI completion handle 0x%x.\n", index);
+ set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
+ return;
+ }
+
+ sp = req->outstanding_cmds[index];
+ if (sp) {
+ /* Free outstanding command slot. */
+ req->outstanding_cmds[index] = NULL;
+ bsg_job = sp->u.bsg_job;
+ } else {
+ ql_log(ql_log_warn, vha, 0x70b0,
+ "Req:%d: Invalid ISP SCSI completion handle(0x%x)\n",
+ req->id, index);
+
+ set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
+ return;
+ }
+
+ if (IS_FWI2_CAPABLE(ha)) {
+ comp_status = le16_to_cpu(sts24->comp_status);
+ scsi_status = le16_to_cpu(sts24->scsi_status) & SS_MASK;
+ } else {
+ comp_status = le16_to_cpu(sts->comp_status);
+ scsi_status = le16_to_cpu(sts->scsi_status) & SS_MASK;
+ }
+
+ thread_id = bsg_job->request->rqst_data.h_vendor.vendor_cmd[1];
+ switch (comp_status) {
+ case CS_COMPLETE:
+ if (scsi_status == 0) {
+ bsg_job->reply->reply_payload_rcv_len =
+ bsg_job->reply_payload.payload_len;
+ vha->qla_stats.input_bytes +=
+ bsg_job->reply->reply_payload_rcv_len;
+ vha->qla_stats.input_requests++;
+ rval = EXT_STATUS_OK;
+ }
+ goto done;
+
+ case CS_DATA_OVERRUN:
+ ql_dbg(ql_dbg_user, vha, 0x70b1,
+ "Command completed with date overrun thread_id=%d\n",
+ thread_id);
+ rval = EXT_STATUS_DATA_OVERRUN;
+ break;
+
+ case CS_DATA_UNDERRUN:
+ ql_dbg(ql_dbg_user, vha, 0x70b2,
+ "Command completed with date underrun thread_id=%d\n",
+ thread_id);
+ rval = EXT_STATUS_DATA_UNDERRUN;
+ break;
+ case CS_BIDIR_RD_OVERRUN:
+ ql_dbg(ql_dbg_user, vha, 0x70b3,
+ "Command completed with read data overrun thread_id=%d\n",
+ thread_id);
+ rval = EXT_STATUS_DATA_OVERRUN;
+ break;
+
+ case CS_BIDIR_RD_WR_OVERRUN:
+ ql_dbg(ql_dbg_user, vha, 0x70b4,
+ "Command completed with read and write data overrun "
+ "thread_id=%d\n", thread_id);
+ rval = EXT_STATUS_DATA_OVERRUN;
+ break;
+
+ case CS_BIDIR_RD_OVERRUN_WR_UNDERRUN:
+ ql_dbg(ql_dbg_user, vha, 0x70b5,
+ "Command completed with read data over and write data "
+ "underrun thread_id=%d\n", thread_id);
+ rval = EXT_STATUS_DATA_OVERRUN;
+ break;
+
+ case CS_BIDIR_RD_UNDERRUN:
+ ql_dbg(ql_dbg_user, vha, 0x70b6,
+ "Command completed with read data data underrun "
+ "thread_id=%d\n", thread_id);
+ rval = EXT_STATUS_DATA_UNDERRUN;
+ break;
+
+ case CS_BIDIR_RD_UNDERRUN_WR_OVERRUN:
+ ql_dbg(ql_dbg_user, vha, 0x70b7,
+ "Command completed with read data under and write data "
+ "overrun thread_id=%d\n", thread_id);
+ rval = EXT_STATUS_DATA_UNDERRUN;
+ break;
+
+ case CS_BIDIR_RD_WR_UNDERRUN:
+ ql_dbg(ql_dbg_user, vha, 0x70b8,
+ "Command completed with read and write data underrun "
+ "thread_id=%d\n", thread_id);
+ rval = EXT_STATUS_DATA_UNDERRUN;
+ break;
+
+ case CS_BIDIR_DMA:
+ ql_dbg(ql_dbg_user, vha, 0x70b9,
+ "Command completed with data DMA error thread_id=%d\n",
+ thread_id);
+ rval = EXT_STATUS_DMA_ERR;
+ break;
+
+ case CS_TIMEOUT:
+ ql_dbg(ql_dbg_user, vha, 0x70ba,
+ "Command completed with timeout thread_id=%d\n",
+ thread_id);
+ rval = EXT_STATUS_TIMEOUT;
+ break;
+ default:
+ ql_dbg(ql_dbg_user, vha, 0x70bb,
+ "Command completed with completion status=0x%x "
+ "thread_id=%d\n", comp_status, thread_id);
+ rval = EXT_STATUS_ERR;
+ break;
+ }
+ bsg_job->reply->reply_payload_rcv_len = 0;
+
+done:
+ /* Return the vendor specific reply to API */
+ bsg_job->reply->reply_data.vendor_reply.vendor_rsp[0] = rval;
+ bsg_job->reply_len = sizeof(struct fc_bsg_reply);
+ /* Always return DID_OK, bsg will send the vendor specific response
+ * in this case only */
+ sp->done(vha, sp, (DID_OK << 6));
+
+}
+
+/**
+ * qla2x00_status_entry() - Process a Status IOCB entry.
+ * @ha: SCSI driver HA context
+ * @pkt: Entry pointer
+ */
+static void
+qla2x00_status_entry(scsi_qla_host_t *vha, struct rsp_que *rsp, void *pkt)
+{
+ srb_t *sp;
+ fc_port_t *fcport;
+ struct scsi_cmnd *cp;
+ sts_entry_t *sts;
+ struct sts_entry_24xx *sts24;
+ uint16_t comp_status;
+ uint16_t scsi_status;
+ uint16_t ox_id;
+ uint8_t lscsi_status;
+ int32_t resid;
+ uint32_t sense_len, par_sense_len, rsp_info_len, resid_len,
+ fw_resid_len;
+ uint8_t *rsp_info, *sense_data;
+ struct qla_hw_data *ha = vha->hw;
+ uint32_t handle;
+ uint16_t que;
+ struct req_que *req;
+ int logit = 1;
+ int res = 0;
+ uint16_t state_flags = 0;
+ uint16_t retry_delay = 0;
+
+ sts = (sts_entry_t *) pkt;
+ sts24 = (struct sts_entry_24xx *) pkt;
+ if (IS_FWI2_CAPABLE(ha)) {
+ comp_status = le16_to_cpu(sts24->comp_status);
+ scsi_status = le16_to_cpu(sts24->scsi_status) & SS_MASK;
+ state_flags = le16_to_cpu(sts24->state_flags);
+ } else {
+ comp_status = le16_to_cpu(sts->comp_status);
+ scsi_status = le16_to_cpu(sts->scsi_status) & SS_MASK;
+ }
+ handle = (uint32_t) LSW(sts->handle);
+ que = MSW(sts->handle);
+ req = ha->req_q_map[que];
+
+ /* Check for invalid queue pointer */
+ if (req == NULL ||
+ que >= find_first_zero_bit(ha->req_qid_map, ha->max_req_queues)) {
+ ql_dbg(ql_dbg_io, vha, 0x3059,
+ "Invalid status handle (0x%x): Bad req pointer. req=%p, "
+ "que=%u.\n", sts->handle, req, que);
+ return;
+ }
+
+ /* Validate handle. */
+ if (handle < req->num_outstanding_cmds)
+ sp = req->outstanding_cmds[handle];
+ else
+ sp = NULL;
+
+ if (sp == NULL) {
+ ql_dbg(ql_dbg_io, vha, 0x3017,
+ "Invalid status handle (0x%x).\n", sts->handle);
+
+ if (!test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags)) {
+ if (IS_P3P_TYPE(ha))
+ set_bit(FCOE_CTX_RESET_NEEDED, &vha->dpc_flags);
+ else
+ set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
+ qla2xxx_wake_dpc(vha);
+ }
+ return;
+ }
+
+ if (unlikely((state_flags & BIT_1) && (sp->type == SRB_BIDI_CMD))) {
+ qla25xx_process_bidir_status_iocb(vha, pkt, req, handle);
+ return;
+ }
+
+ /* Task Management completion. */
+ if (sp->type == SRB_TM_CMD) {
+ qla24xx_tm_iocb_entry(vha, req, pkt);
+ return;
+ }
+
+ /* Fast path completion. */
+ if (comp_status == CS_COMPLETE && scsi_status == 0) {
+ qla2x00_process_completed_request(vha, req, handle);
+
+ return;
+ }
+
+ req->outstanding_cmds[handle] = NULL;
+ cp = GET_CMD_SP(sp);
+ if (cp == NULL) {
+ ql_dbg(ql_dbg_io, vha, 0x3018,
+ "Command already returned (0x%x/%p).\n",
+ sts->handle, sp);
+
+ return;
+ }
+
+ lscsi_status = scsi_status & STATUS_MASK;
+
+ fcport = sp->fcport;
+
+ ox_id = 0;
+ sense_len = par_sense_len = rsp_info_len = resid_len =
+ fw_resid_len = 0;
+ if (IS_FWI2_CAPABLE(ha)) {
+ if (scsi_status & SS_SENSE_LEN_VALID)
+ sense_len = le32_to_cpu(sts24->sense_len);
+ if (scsi_status & SS_RESPONSE_INFO_LEN_VALID)
+ rsp_info_len = le32_to_cpu(sts24->rsp_data_len);
+ if (scsi_status & (SS_RESIDUAL_UNDER | SS_RESIDUAL_OVER))
+ resid_len = le32_to_cpu(sts24->rsp_residual_count);
+ if (comp_status == CS_DATA_UNDERRUN)
+ fw_resid_len = le32_to_cpu(sts24->residual_len);
+ rsp_info = sts24->data;
+ sense_data = sts24->data;
+ host_to_fcp_swap(sts24->data, sizeof(sts24->data));
+ ox_id = le16_to_cpu(sts24->ox_id);
+ par_sense_len = sizeof(sts24->data);
+ /* Valid values of the retry delay timer are 0x1-0xffef */
+ if (sts24->retry_delay > 0 && sts24->retry_delay < 0xfff1)
+ retry_delay = sts24->retry_delay;
+ } else {
+ if (scsi_status & SS_SENSE_LEN_VALID)
+ sense_len = le16_to_cpu(sts->req_sense_length);
+ if (scsi_status & SS_RESPONSE_INFO_LEN_VALID)
+ rsp_info_len = le16_to_cpu(sts->rsp_info_len);
+ resid_len = le32_to_cpu(sts->residual_length);
+ rsp_info = sts->rsp_info;
+ sense_data = sts->req_sense_data;
+ par_sense_len = sizeof(sts->req_sense_data);
+ }
+
+ /* Check for any FCP transport errors. */
+ if (scsi_status & SS_RESPONSE_INFO_LEN_VALID) {
+ /* Sense data lies beyond any FCP RESPONSE data. */
+ if (IS_FWI2_CAPABLE(ha)) {
+ sense_data += rsp_info_len;
+ par_sense_len -= rsp_info_len;
+ }
+ if (rsp_info_len > 3 && rsp_info[3]) {
+ ql_dbg(ql_dbg_io, fcport->vha, 0x3019,
+ "FCP I/O protocol failure (0x%x/0x%x).\n",
+ rsp_info_len, rsp_info[3]);
+
+ res = DID_BUS_BUSY << 16;
+ goto out;
+ }
+ }
+
+ /* Check for overrun. */
+ if (IS_FWI2_CAPABLE(ha) && comp_status == CS_COMPLETE &&
+ scsi_status & SS_RESIDUAL_OVER)
+ comp_status = CS_DATA_OVERRUN;
+
+ /*
+ * Check retry_delay_timer value if we receive a busy or
+ * queue full.
+ */
+ if (lscsi_status == SAM_STAT_TASK_SET_FULL ||
+ lscsi_status == SAM_STAT_BUSY)
+ qla2x00_set_retry_delay_timestamp(fcport, retry_delay);
+
+ /*
+ * Based on Host and scsi status generate status code for Linux
+ */
+ switch (comp_status) {
+ case CS_COMPLETE:
+ case CS_QUEUE_FULL:
+ if (scsi_status == 0) {
+ res = DID_OK << 16;
+ break;
+ }
+ if (scsi_status & (SS_RESIDUAL_UNDER | SS_RESIDUAL_OVER)) {
+ resid = resid_len;
+ scsi_set_resid(cp, resid);
+
+ if (!lscsi_status &&
+ ((unsigned)(scsi_bufflen(cp) - resid) <
+ cp->underflow)) {
+ ql_dbg(ql_dbg_io, fcport->vha, 0x301a,
+ "Mid-layer underflow "
+ "detected (0x%x of 0x%x bytes).\n",
+ resid, scsi_bufflen(cp));
+
+ res = DID_ERROR << 16;
+ break;
+ }
+ }
+ res = DID_OK << 16 | lscsi_status;
+
+ if (lscsi_status == SAM_STAT_TASK_SET_FULL) {
+ ql_dbg(ql_dbg_io, fcport->vha, 0x301b,
+ "QUEUE FULL detected.\n");
+ break;
+ }
+ logit = 0;
+ if (lscsi_status != SS_CHECK_CONDITION)
+ break;
+
+ memset(cp->sense_buffer, 0, SCSI_SENSE_BUFFERSIZE);
+ if (!(scsi_status & SS_SENSE_LEN_VALID))
+ break;
+
+ qla2x00_handle_sense(sp, sense_data, par_sense_len, sense_len,
+ rsp, res);
+ break;
+
+ case CS_DATA_UNDERRUN:
+ /* Use F/W calculated residual length. */
+ resid = IS_FWI2_CAPABLE(ha) ? fw_resid_len : resid_len;
+ scsi_set_resid(cp, resid);
+ if (scsi_status & SS_RESIDUAL_UNDER) {
+ if (IS_FWI2_CAPABLE(ha) && fw_resid_len != resid_len) {
+ ql_dbg(ql_dbg_io, fcport->vha, 0x301d,
+ "Dropped frame(s) detected "
+ "(0x%x of 0x%x bytes).\n",
+ resid, scsi_bufflen(cp));
+
+ res = DID_ERROR << 16 | lscsi_status;
+ goto check_scsi_status;
+ }
+
+ if (!lscsi_status &&
+ ((unsigned)(scsi_bufflen(cp) - resid) <
+ cp->underflow)) {
+ ql_dbg(ql_dbg_io, fcport->vha, 0x301e,
+ "Mid-layer underflow "
+ "detected (0x%x of 0x%x bytes).\n",
+ resid, scsi_bufflen(cp));
+
+ res = DID_ERROR << 16;
+ break;
+ }
+ } else if (lscsi_status != SAM_STAT_TASK_SET_FULL &&
+ lscsi_status != SAM_STAT_BUSY) {
+ /*
+ * scsi status of task set and busy are considered to be
+ * task not completed.
+ */
+
+ ql_dbg(ql_dbg_io, fcport->vha, 0x301f,
+ "Dropped frame(s) detected (0x%x "
+ "of 0x%x bytes).\n", resid,
+ scsi_bufflen(cp));
+
+ res = DID_ERROR << 16 | lscsi_status;
+ goto check_scsi_status;
+ } else {
+ ql_dbg(ql_dbg_io, fcport->vha, 0x3030,
+ "scsi_status: 0x%x, lscsi_status: 0x%x\n",
+ scsi_status, lscsi_status);
+ }
+
+ res = DID_OK << 16 | lscsi_status;
+ logit = 0;
+
+check_scsi_status:
+ /*
+ * Check to see if SCSI Status is non zero. If so report SCSI
+ * Status.
+ */
+ if (lscsi_status != 0) {
+ if (lscsi_status == SAM_STAT_TASK_SET_FULL) {
+ ql_dbg(ql_dbg_io, fcport->vha, 0x3020,
+ "QUEUE FULL detected.\n");
+ logit = 1;
+ break;
+ }
+ if (lscsi_status != SS_CHECK_CONDITION)
+ break;
+
+ memset(cp->sense_buffer, 0, SCSI_SENSE_BUFFERSIZE);
+ if (!(scsi_status & SS_SENSE_LEN_VALID))
+ break;
+
+ qla2x00_handle_sense(sp, sense_data, par_sense_len,
+ sense_len, rsp, res);
+ }
+ break;
+
+ case CS_PORT_LOGGED_OUT:
+ case CS_PORT_CONFIG_CHG:
+ case CS_PORT_BUSY:
+ case CS_INCOMPLETE:
+ case CS_PORT_UNAVAILABLE:
+ case CS_TIMEOUT:
+ case CS_RESET:
+
+ /*
+ * We are going to have the fc class block the rport
+ * while we try to recover so instruct the mid layer
+ * to requeue until the class decides how to handle this.
+ */
+ res = DID_TRANSPORT_DISRUPTED << 16;
+
+ if (comp_status == CS_TIMEOUT) {
+ if (IS_FWI2_CAPABLE(ha))
+ break;
+ else if ((le16_to_cpu(sts->status_flags) &
+ SF_LOGOUT_SENT) == 0)
+ break;
+ }
+
+ ql_dbg(ql_dbg_io, fcport->vha, 0x3021,
+ "Port to be marked lost on fcport=%02x%02x%02x, current "
+ "port state= %s.\n", fcport->d_id.b.domain,
+ fcport->d_id.b.area, fcport->d_id.b.al_pa,
+ port_state_str[atomic_read(&fcport->state)]);
+
+ if (atomic_read(&fcport->state) == FCS_ONLINE)
+ qla2x00_mark_device_lost(fcport->vha, fcport, 1, 1);
+ break;
+
+ case CS_ABORTED:
+ res = DID_RESET << 16;
+ break;
+
+ case CS_DIF_ERROR:
+ logit = qla2x00_handle_dif_error(sp, sts24);
+ res = cp->result;
+ break;
+
+ case CS_TRANSPORT:
+ res = DID_ERROR << 16;
+
+ if (!IS_PI_SPLIT_DET_CAPABLE(ha))
+ break;
+
+ if (state_flags & BIT_4)
+ scmd_printk(KERN_WARNING, cp,
+ "Unsupported device '%s' found.\n",
+ cp->device->vendor);
+ break;
+
+ default:
+ res = DID_ERROR << 16;
+ break;
+ }
+
+out:
+ if (logit)
+ ql_dbg(ql_dbg_io, fcport->vha, 0x3022,
+ "FCP command status: 0x%x-0x%x (0x%x) nexus=%ld:%d:%llu "
+ "portid=%02x%02x%02x oxid=0x%x cdb=%10phN len=0x%x "
+ "rsp_info=0x%x resid=0x%x fw_resid=0x%x.\n",
+ comp_status, scsi_status, res, vha->host_no,
+ cp->device->id, cp->device->lun, fcport->d_id.b.domain,
+ fcport->d_id.b.area, fcport->d_id.b.al_pa, ox_id,
+ cp->cmnd, scsi_bufflen(cp), rsp_info_len,
+ resid_len, fw_resid_len);
+
+ if (rsp->status_srb == NULL)
+ sp->done(ha, sp, res);
+}
+
+/**
+ * qla2x00_status_cont_entry() - Process a Status Continuations entry.
+ * @ha: SCSI driver HA context
+ * @pkt: Entry pointer
+ *
+ * Extended sense data.
+ */
+static void
+qla2x00_status_cont_entry(struct rsp_que *rsp, sts_cont_entry_t *pkt)
+{
+ uint8_t sense_sz = 0;
+ struct qla_hw_data *ha = rsp->hw;
+ struct scsi_qla_host *vha = pci_get_drvdata(ha->pdev);
+ srb_t *sp = rsp->status_srb;
+ struct scsi_cmnd *cp;
+ uint32_t sense_len;
+ uint8_t *sense_ptr;
+
+ if (!sp || !GET_CMD_SENSE_LEN(sp))
+ return;
+
+ sense_len = GET_CMD_SENSE_LEN(sp);
+ sense_ptr = GET_CMD_SENSE_PTR(sp);
+
+ cp = GET_CMD_SP(sp);
+ if (cp == NULL) {
+ ql_log(ql_log_warn, vha, 0x3025,
+ "cmd is NULL: already returned to OS (sp=%p).\n", sp);
+
+ rsp->status_srb = NULL;
+ return;
+ }
+
+ if (sense_len > sizeof(pkt->data))
+ sense_sz = sizeof(pkt->data);
+ else
+ sense_sz = sense_len;
+
+ /* Move sense data. */
+ if (IS_FWI2_CAPABLE(ha))
+ host_to_fcp_swap(pkt->data, sizeof(pkt->data));
+ memcpy(sense_ptr, pkt->data, sense_sz);
+ ql_dump_buffer(ql_dbg_io + ql_dbg_buffer, vha, 0x302c,
+ sense_ptr, sense_sz);
+
+ sense_len -= sense_sz;
+ sense_ptr += sense_sz;
+
+ SET_CMD_SENSE_PTR(sp, sense_ptr);
+ SET_CMD_SENSE_LEN(sp, sense_len);
+
+ /* Place command on done queue. */
+ if (sense_len == 0) {
+ rsp->status_srb = NULL;
+ sp->done(ha, sp, cp->result);
+ }
+}
+
+/**
+ * qla2x00_error_entry() - Process an error entry.
+ * @ha: SCSI driver HA context
+ * @pkt: Entry pointer
+ */
+static void
+qla2x00_error_entry(scsi_qla_host_t *vha, struct rsp_que *rsp, sts_entry_t *pkt)
+{
+ srb_t *sp;
+ struct qla_hw_data *ha = vha->hw;
+ const char func[] = "ERROR-IOCB";
+ uint16_t que = MSW(pkt->handle);
+ struct req_que *req = NULL;
+ int res = DID_ERROR << 16;
+
+ ql_dbg(ql_dbg_async, vha, 0x502a,
+ "type of error status in response: 0x%x\n", pkt->entry_status);
+
+ if (que >= ha->max_req_queues || !ha->req_q_map[que])
+ goto fatal;
+
+ req = ha->req_q_map[que];
+
+ if (pkt->entry_status & RF_BUSY)
+ res = DID_BUS_BUSY << 16;
+
+ sp = qla2x00_get_sp_from_handle(vha, func, req, pkt);
+ if (sp) {
+ sp->done(ha, sp, res);
+ return;
+ }
+fatal:
+ ql_log(ql_log_warn, vha, 0x5030,
+ "Error entry - invalid handle/queue.\n");
+
+ if (IS_P3P_TYPE(ha))
+ set_bit(FCOE_CTX_RESET_NEEDED, &vha->dpc_flags);
+ else
+ set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
+ qla2xxx_wake_dpc(vha);
+}
+
+/**
+ * qla24xx_mbx_completion() - Process mailbox command completions.
+ * @ha: SCSI driver HA context
+ * @mb0: Mailbox0 register
+ */
+static void
+qla24xx_mbx_completion(scsi_qla_host_t *vha, uint16_t mb0)
+{
+ uint16_t cnt;
+ uint32_t mboxes;
+ uint16_t __iomem *wptr;
+ struct qla_hw_data *ha = vha->hw;
+ struct device_reg_24xx __iomem *reg = &ha->iobase->isp24;
+
+ /* Read all mbox registers? */
+ mboxes = (1 << ha->mbx_count) - 1;
+ if (!ha->mcp)
+ ql_dbg(ql_dbg_async, vha, 0x504e, "MBX pointer ERROR.\n");
+ else
+ mboxes = ha->mcp->in_mb;
+
+ /* Load return mailbox registers. */
+ ha->flags.mbox_int = 1;
+ ha->mailbox_out[0] = mb0;
+ mboxes >>= 1;
+ wptr = (uint16_t __iomem *)&reg->mailbox1;
+
+ for (cnt = 1; cnt < ha->mbx_count; cnt++) {
+ if (mboxes & BIT_0)
+ ha->mailbox_out[cnt] = RD_REG_WORD(wptr);
+
+ mboxes >>= 1;
+ wptr++;
+ }
+}
+
+static void
+qla24xx_abort_iocb_entry(scsi_qla_host_t *vha, struct req_que *req,
+ struct abort_entry_24xx *pkt)
+{
+ const char func[] = "ABT_IOCB";
+ srb_t *sp;
+ struct srb_iocb *abt;
+
+ sp = qla2x00_get_sp_from_handle(vha, func, req, pkt);
+ if (!sp)
+ return;
+
+ abt = &sp->u.iocb_cmd;
+ abt->u.abt.comp_status = le32_to_cpu(pkt->nport_handle);
+ sp->done(vha, sp, 0);
+}
+
+/**
+ * qla24xx_process_response_queue() - Process response queue entries.
+ * @ha: SCSI driver HA context
+ */
+void qla24xx_process_response_queue(struct scsi_qla_host *vha,
+ struct rsp_que *rsp)
+{
+ struct sts_entry_24xx *pkt;
+ struct qla_hw_data *ha = vha->hw;
+
+ if (!vha->flags.online)
+ return;
+
+ while (rsp->ring_ptr->signature != RESPONSE_PROCESSED) {
+ pkt = (struct sts_entry_24xx *)rsp->ring_ptr;
+
+ rsp->ring_index++;
+ if (rsp->ring_index == rsp->length) {
+ rsp->ring_index = 0;
+ rsp->ring_ptr = rsp->ring;
+ } else {
+ rsp->ring_ptr++;
+ }
+
+ if (pkt->entry_status != 0) {
+ qla2x00_error_entry(vha, rsp, (sts_entry_t *) pkt);
+
+ if (qlt_24xx_process_response_error(vha, pkt))
+ goto process_err;
+
+ ((response_t *)pkt)->signature = RESPONSE_PROCESSED;
+ wmb();
+ continue;
+ }
+process_err:
+
+ switch (pkt->entry_type) {
+ case STATUS_TYPE:
+ qla2x00_status_entry(vha, rsp, pkt);
+ break;
+ case STATUS_CONT_TYPE:
+ qla2x00_status_cont_entry(rsp, (sts_cont_entry_t *)pkt);
+ break;
+ case VP_RPT_ID_IOCB_TYPE:
+ qla24xx_report_id_acquisition(vha,
+ (struct vp_rpt_id_entry_24xx *)pkt);
+ break;
+ case LOGINOUT_PORT_IOCB_TYPE:
+ qla24xx_logio_entry(vha, rsp->req,
+ (struct logio_entry_24xx *)pkt);
+ break;
+ case CT_IOCB_TYPE:
+ qla24xx_els_ct_entry(vha, rsp->req, pkt, CT_IOCB_TYPE);
+ break;
+ case ELS_IOCB_TYPE:
+ qla24xx_els_ct_entry(vha, rsp->req, pkt, ELS_IOCB_TYPE);
+ break;
+ case ABTS_RECV_24XX:
+ /* ensure that the ATIO queue is empty */
+ qlt_24xx_process_atio_queue(vha);
+ case ABTS_RESP_24XX:
+ case CTIO_TYPE7:
+ case NOTIFY_ACK_TYPE:
+ case CTIO_CRC2:
+ qlt_response_pkt_all_vps(vha, (response_t *)pkt);
+ break;
+ case MARKER_TYPE:
+ /* Do nothing in this case, this check is to prevent it
+ * from falling into default case
+ */
+ break;
+ case ABORT_IOCB_TYPE:
+ qla24xx_abort_iocb_entry(vha, rsp->req,
+ (struct abort_entry_24xx *)pkt);
+ break;
+ default:
+ /* Type Not Supported. */
+ ql_dbg(ql_dbg_async, vha, 0x5042,
+ "Received unknown response pkt type %x "
+ "entry status=%x.\n",
+ pkt->entry_type, pkt->entry_status);
+ break;
+ }
+ ((response_t *)pkt)->signature = RESPONSE_PROCESSED;
+ wmb();
+ }
+
+ /* Adjust ring index */
+ if (IS_P3P_TYPE(ha)) {
+ struct device_reg_82xx __iomem *reg = &ha->iobase->isp82;
+ WRT_REG_DWORD(&reg->rsp_q_out[0], rsp->ring_index);
+ } else
+ WRT_REG_DWORD(rsp->rsp_q_out, rsp->ring_index);
+}
+
+static void
+qla2xxx_check_risc_status(scsi_qla_host_t *vha)
+{
+ int rval;
+ uint32_t cnt;
+ struct qla_hw_data *ha = vha->hw;
+ struct device_reg_24xx __iomem *reg = &ha->iobase->isp24;
+
+ if (!IS_QLA25XX(ha) && !IS_QLA81XX(ha) && !IS_QLA83XX(ha) &&
+ !IS_QLA27XX(ha))
+ return;
+
+ rval = QLA_SUCCESS;
+ WRT_REG_DWORD(&reg->iobase_addr, 0x7C00);
+ RD_REG_DWORD(&reg->iobase_addr);
+ WRT_REG_DWORD(&reg->iobase_window, 0x0001);
+ for (cnt = 10000; (RD_REG_DWORD(&reg->iobase_window) & BIT_0) == 0 &&
+ rval == QLA_SUCCESS; cnt--) {
+ if (cnt) {
+ WRT_REG_DWORD(&reg->iobase_window, 0x0001);
+ udelay(10);
+ } else
+ rval = QLA_FUNCTION_TIMEOUT;
+ }
+ if (rval == QLA_SUCCESS)
+ goto next_test;
+
+ rval = QLA_SUCCESS;
+ WRT_REG_DWORD(&reg->iobase_window, 0x0003);
+ for (cnt = 100; (RD_REG_DWORD(&reg->iobase_window) & BIT_0) == 0 &&
+ rval == QLA_SUCCESS; cnt--) {
+ if (cnt) {
+ WRT_REG_DWORD(&reg->iobase_window, 0x0003);
+ udelay(10);
+ } else
+ rval = QLA_FUNCTION_TIMEOUT;
+ }
+ if (rval != QLA_SUCCESS)
+ goto done;
+
+next_test:
+ if (RD_REG_DWORD(&reg->iobase_c8) & BIT_3)
+ ql_log(ql_log_info, vha, 0x504c,
+ "Additional code -- 0x55AA.\n");
+
+done:
+ WRT_REG_DWORD(&reg->iobase_window, 0x0000);
+ RD_REG_DWORD(&reg->iobase_window);
+}
+
+/**
+ * qla24xx_intr_handler() - Process interrupts for the ISP23xx and ISP24xx.
+ * @irq:
+ * @dev_id: SCSI driver HA context
+ *
+ * Called by system whenever the host adapter generates an interrupt.
+ *
+ * Returns handled flag.
+ */
+irqreturn_t
+qla24xx_intr_handler(int irq, void *dev_id)
+{
+ scsi_qla_host_t *vha;
+ struct qla_hw_data *ha;
+ struct device_reg_24xx __iomem *reg;
+ int status;
+ unsigned long iter;
+ uint32_t stat;
+ uint32_t hccr;
+ uint16_t mb[8];
+ struct rsp_que *rsp;
+ unsigned long flags;
+
+ rsp = (struct rsp_que *) dev_id;
+ if (!rsp) {
+ ql_log(ql_log_info, NULL, 0x5059,
+ "%s: NULL response queue pointer.\n", __func__);
+ return IRQ_NONE;
+ }
+
+ ha = rsp->hw;
+ reg = &ha->iobase->isp24;
+ status = 0;
+
+ if (unlikely(pci_channel_offline(ha->pdev)))
+ return IRQ_HANDLED;
+
+ spin_lock_irqsave(&ha->hardware_lock, flags);
+ vha = pci_get_drvdata(ha->pdev);
+ for (iter = 50; iter--; ) {
+ stat = RD_REG_DWORD(&reg->host_status);
+ if (qla2x00_check_reg32_for_disconnect(vha, stat))
+ break;
+ if (stat & HSRX_RISC_PAUSED) {
+ if (unlikely(pci_channel_offline(ha->pdev)))
+ break;
+
+ hccr = RD_REG_DWORD(&reg->hccr);
+
+ ql_log(ql_log_warn, vha, 0x504b,
+ "RISC paused -- HCCR=%x, Dumping firmware.\n",
+ hccr);
+
+ qla2xxx_check_risc_status(vha);
+
+ ha->isp_ops->fw_dump(vha, 1);
+ set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
+ break;
+ } else if ((stat & HSRX_RISC_INT) == 0)
+ break;
+
+ switch (stat & 0xff) {
+ case INTR_ROM_MB_SUCCESS:
+ case INTR_ROM_MB_FAILED:
+ case INTR_MB_SUCCESS:
+ case INTR_MB_FAILED:
+ qla24xx_mbx_completion(vha, MSW(stat));
+ status |= MBX_INTERRUPT;
+
+ break;
+ case INTR_ASYNC_EVENT:
+ mb[0] = MSW(stat);
+ mb[1] = RD_REG_WORD(&reg->mailbox1);
+ mb[2] = RD_REG_WORD(&reg->mailbox2);
+ mb[3] = RD_REG_WORD(&reg->mailbox3);
+ qla2x00_async_event(vha, rsp, mb);
+ break;
+ case INTR_RSP_QUE_UPDATE:
+ case INTR_RSP_QUE_UPDATE_83XX:
+ qla24xx_process_response_queue(vha, rsp);
+ break;
+ case INTR_ATIO_QUE_UPDATE:
+ qlt_24xx_process_atio_queue(vha);
+ break;
+ case INTR_ATIO_RSP_QUE_UPDATE:
+ qlt_24xx_process_atio_queue(vha);
+ qla24xx_process_response_queue(vha, rsp);
+ break;
+ default:
+ ql_dbg(ql_dbg_async, vha, 0x504f,
+ "Unrecognized interrupt type (%d).\n", stat * 0xff);
+ break;
+ }
+ WRT_REG_DWORD(&reg->hccr, HCCRX_CLR_RISC_INT);
+ RD_REG_DWORD_RELAXED(&reg->hccr);
+ if (unlikely(IS_QLA83XX(ha) && (ha->pdev->revision == 1)))
+ ndelay(3500);
+ }
+ qla2x00_handle_mbx_completion(ha, status);
+ spin_unlock_irqrestore(&ha->hardware_lock, flags);
+
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t
+qla24xx_msix_rsp_q(int irq, void *dev_id)
+{
+ struct qla_hw_data *ha;
+ struct rsp_que *rsp;
+ struct device_reg_24xx __iomem *reg;
+ struct scsi_qla_host *vha;
+ unsigned long flags;
+ uint32_t stat = 0;
+
+ rsp = (struct rsp_que *) dev_id;
+ if (!rsp) {
+ ql_log(ql_log_info, NULL, 0x505a,
+ "%s: NULL response queue pointer.\n", __func__);
+ return IRQ_NONE;
+ }
+ ha = rsp->hw;
+ reg = &ha->iobase->isp24;
+
+ spin_lock_irqsave(&ha->hardware_lock, flags);
+
+ vha = pci_get_drvdata(ha->pdev);
+ /*
+ * Use host_status register to check to PCI disconnection before we
+ * we process the response queue.
+ */
+ stat = RD_REG_DWORD(&reg->host_status);
+ if (qla2x00_check_reg32_for_disconnect(vha, stat))
+ goto out;
+ qla24xx_process_response_queue(vha, rsp);
+ if (!ha->flags.disable_msix_handshake) {
+ WRT_REG_DWORD(&reg->hccr, HCCRX_CLR_RISC_INT);
+ RD_REG_DWORD_RELAXED(&reg->hccr);
+ }
+out:
+ spin_unlock_irqrestore(&ha->hardware_lock, flags);
+
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t
+qla25xx_msix_rsp_q(int irq, void *dev_id)
+{
+ struct qla_hw_data *ha;
+ scsi_qla_host_t *vha;
+ struct rsp_que *rsp;
+ struct device_reg_24xx __iomem *reg;
+ unsigned long flags;
+ uint32_t hccr = 0;
+
+ rsp = (struct rsp_que *) dev_id;
+ if (!rsp) {
+ ql_log(ql_log_info, NULL, 0x505b,
+ "%s: NULL response queue pointer.\n", __func__);
+ return IRQ_NONE;
+ }
+ ha = rsp->hw;
+ vha = pci_get_drvdata(ha->pdev);
+
+ /* Clear the interrupt, if enabled, for this response queue */
+ if (!ha->flags.disable_msix_handshake) {
+ reg = &ha->iobase->isp24;
+ spin_lock_irqsave(&ha->hardware_lock, flags);
+ WRT_REG_DWORD(&reg->hccr, HCCRX_CLR_RISC_INT);
+ hccr = RD_REG_DWORD_RELAXED(&reg->hccr);
+ spin_unlock_irqrestore(&ha->hardware_lock, flags);
+ }
+ if (qla2x00_check_reg32_for_disconnect(vha, hccr))
+ goto out;
+ queue_work_on((int) (rsp->id - 1), ha->wq, &rsp->q_work);
+
+out:
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t
+qla24xx_msix_default(int irq, void *dev_id)
+{
+ scsi_qla_host_t *vha;
+ struct qla_hw_data *ha;
+ struct rsp_que *rsp;
+ struct device_reg_24xx __iomem *reg;
+ int status;
+ uint32_t stat;
+ uint32_t hccr;
+ uint16_t mb[8];
+ unsigned long flags;
+
+ rsp = (struct rsp_que *) dev_id;
+ if (!rsp) {
+ ql_log(ql_log_info, NULL, 0x505c,
+ "%s: NULL response queue pointer.\n", __func__);
+ return IRQ_NONE;
+ }
+ ha = rsp->hw;
+ reg = &ha->iobase->isp24;
+ status = 0;
+
+ spin_lock_irqsave(&ha->hardware_lock, flags);
+ vha = pci_get_drvdata(ha->pdev);
+ do {
+ stat = RD_REG_DWORD(&reg->host_status);
+ if (qla2x00_check_reg32_for_disconnect(vha, stat))
+ break;
+ if (stat & HSRX_RISC_PAUSED) {
+ if (unlikely(pci_channel_offline(ha->pdev)))
+ break;
+
+ hccr = RD_REG_DWORD(&reg->hccr);
+
+ ql_log(ql_log_info, vha, 0x5050,
+ "RISC paused -- HCCR=%x, Dumping firmware.\n",
+ hccr);
+
+ qla2xxx_check_risc_status(vha);
+
+ ha->isp_ops->fw_dump(vha, 1);
+ set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
+ break;
+ } else if ((stat & HSRX_RISC_INT) == 0)
+ break;
+
+ switch (stat & 0xff) {
+ case INTR_ROM_MB_SUCCESS:
+ case INTR_ROM_MB_FAILED:
+ case INTR_MB_SUCCESS:
+ case INTR_MB_FAILED:
+ qla24xx_mbx_completion(vha, MSW(stat));
+ status |= MBX_INTERRUPT;
+
+ break;
+ case INTR_ASYNC_EVENT:
+ mb[0] = MSW(stat);
+ mb[1] = RD_REG_WORD(&reg->mailbox1);
+ mb[2] = RD_REG_WORD(&reg->mailbox2);
+ mb[3] = RD_REG_WORD(&reg->mailbox3);
+ qla2x00_async_event(vha, rsp, mb);
+ break;
+ case INTR_RSP_QUE_UPDATE:
+ case INTR_RSP_QUE_UPDATE_83XX:
+ qla24xx_process_response_queue(vha, rsp);
+ break;
+ case INTR_ATIO_QUE_UPDATE:
+ qlt_24xx_process_atio_queue(vha);
+ break;
+ case INTR_ATIO_RSP_QUE_UPDATE:
+ qlt_24xx_process_atio_queue(vha);
+ qla24xx_process_response_queue(vha, rsp);
+ break;
+ default:
+ ql_dbg(ql_dbg_async, vha, 0x5051,
+ "Unrecognized interrupt type (%d).\n", stat & 0xff);
+ break;
+ }
+ WRT_REG_DWORD(&reg->hccr, HCCRX_CLR_RISC_INT);
+ } while (0);
+ qla2x00_handle_mbx_completion(ha, status);
+ spin_unlock_irqrestore(&ha->hardware_lock, flags);
+
+ return IRQ_HANDLED;
+}
+
+/* Interrupt handling helpers. */
+
+struct qla_init_msix_entry {
+ const char *name;
+ irq_handler_t handler;
+};
+
+static struct qla_init_msix_entry msix_entries[3] = {
+ { "qla2xxx (default)", qla24xx_msix_default },
+ { "qla2xxx (rsp_q)", qla24xx_msix_rsp_q },
+ { "qla2xxx (multiq)", qla25xx_msix_rsp_q },
+};
+
+static struct qla_init_msix_entry qla82xx_msix_entries[2] = {
+ { "qla2xxx (default)", qla82xx_msix_default },
+ { "qla2xxx (rsp_q)", qla82xx_msix_rsp_q },
+};
+
+static struct qla_init_msix_entry qla83xx_msix_entries[3] = {
+ { "qla2xxx (default)", qla24xx_msix_default },
+ { "qla2xxx (rsp_q)", qla24xx_msix_rsp_q },
+ { "qla2xxx (atio_q)", qla83xx_msix_atio_q },
+};
+
+static void
+qla24xx_disable_msix(struct qla_hw_data *ha)
+{
+ int i;
+ struct qla_msix_entry *qentry;
+ scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev);
+
+ for (i = 0; i < ha->msix_count; i++) {
+ qentry = &ha->msix_entries[i];
+ if (qentry->have_irq)
+ free_irq(qentry->vector, qentry->rsp);
+ }
+ pci_disable_msix(ha->pdev);
+ kfree(ha->msix_entries);
+ ha->msix_entries = NULL;
+ ha->flags.msix_enabled = 0;
+ ql_dbg(ql_dbg_init, vha, 0x0042,
+ "Disabled the MSI.\n");
+}
+
+static int
+qla24xx_enable_msix(struct qla_hw_data *ha, struct rsp_que *rsp)
+{
+#define MIN_MSIX_COUNT 2
+#define ATIO_VECTOR 2
+ int i, ret;
+ struct msix_entry *entries;
+ struct qla_msix_entry *qentry;
+ scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev);
+
+ entries = kzalloc(sizeof(struct msix_entry) * ha->msix_count,
+ GFP_KERNEL);
+ if (!entries) {
+ ql_log(ql_log_warn, vha, 0x00bc,
+ "Failed to allocate memory for msix_entry.\n");
+ return -ENOMEM;
+ }
+
+ for (i = 0; i < ha->msix_count; i++)
+ entries[i].entry = i;
+
+ ret = pci_enable_msix_range(ha->pdev,
+ entries, MIN_MSIX_COUNT, ha->msix_count);
+ if (ret < 0) {
+ ql_log(ql_log_fatal, vha, 0x00c7,
+ "MSI-X: Failed to enable support, "
+ "giving up -- %d/%d.\n",
+ ha->msix_count, ret);
+ goto msix_out;
+ } else if (ret < ha->msix_count) {
+ ql_log(ql_log_warn, vha, 0x00c6,
+ "MSI-X: Failed to enable support "
+ "-- %d/%d\n Retry with %d vectors.\n",
+ ha->msix_count, ret, ret);
+ }
+ ha->msix_count = ret;
+ ha->max_rsp_queues = ha->msix_count - 1;
+ ha->msix_entries = kzalloc(sizeof(struct qla_msix_entry) *
+ ha->msix_count, GFP_KERNEL);
+ if (!ha->msix_entries) {
+ ql_log(ql_log_fatal, vha, 0x00c8,
+ "Failed to allocate memory for ha->msix_entries.\n");
+ ret = -ENOMEM;
+ goto msix_out;
+ }
+ ha->flags.msix_enabled = 1;
+
+ for (i = 0; i < ha->msix_count; i++) {
+ qentry = &ha->msix_entries[i];
+ qentry->vector = entries[i].vector;
+ qentry->entry = entries[i].entry;
+ qentry->have_irq = 0;
+ qentry->rsp = NULL;
+ }
+
+ /* Enable MSI-X vectors for the base queue */
+ for (i = 0; i < 2; i++) {
+ qentry = &ha->msix_entries[i];
+ if (IS_P3P_TYPE(ha))
+ ret = request_irq(qentry->vector,
+ qla82xx_msix_entries[i].handler,
+ 0, qla82xx_msix_entries[i].name, rsp);
+ else
+ ret = request_irq(qentry->vector,
+ msix_entries[i].handler,
+ 0, msix_entries[i].name, rsp);
+ if (ret)
+ goto msix_register_fail;
+ qentry->have_irq = 1;
+ qentry->rsp = rsp;
+ rsp->msix = qentry;
+ }
+
+ /*
+ * If target mode is enable, also request the vector for the ATIO
+ * queue.
+ */
+ if (QLA_TGT_MODE_ENABLED() && IS_ATIO_MSIX_CAPABLE(ha)) {
+ qentry = &ha->msix_entries[ATIO_VECTOR];
+ ret = request_irq(qentry->vector,
+ qla83xx_msix_entries[ATIO_VECTOR].handler,
+ 0, qla83xx_msix_entries[ATIO_VECTOR].name, rsp);
+ qentry->have_irq = 1;
+ qentry->rsp = rsp;
+ rsp->msix = qentry;
+ }
+
+msix_register_fail:
+ if (ret) {
+ ql_log(ql_log_fatal, vha, 0x00cb,
+ "MSI-X: unable to register handler -- %x/%d.\n",
+ qentry->vector, ret);
+ qla24xx_disable_msix(ha);
+ ha->mqenable = 0;
+ goto msix_out;
+ }
+
+ /* Enable MSI-X vector for response queue update for queue 0 */
+ if (IS_QLA83XX(ha) || IS_QLA27XX(ha)) {
+ if (ha->msixbase && ha->mqiobase &&
+ (ha->max_rsp_queues > 1 || ha->max_req_queues > 1))
+ ha->mqenable = 1;
+ } else
+ if (ha->mqiobase
+ && (ha->max_rsp_queues > 1 || ha->max_req_queues > 1))
+ ha->mqenable = 1;
+ ql_dbg(ql_dbg_multiq, vha, 0xc005,
+ "mqiobase=%p, max_rsp_queues=%d, max_req_queues=%d.\n",
+ ha->mqiobase, ha->max_rsp_queues, ha->max_req_queues);
+ ql_dbg(ql_dbg_init, vha, 0x0055,
+ "mqiobase=%p, max_rsp_queues=%d, max_req_queues=%d.\n",
+ ha->mqiobase, ha->max_rsp_queues, ha->max_req_queues);
+
+msix_out:
+ kfree(entries);
+ return ret;
+}
+
+int
+qla2x00_request_irqs(struct qla_hw_data *ha, struct rsp_que *rsp)
+{
+ int ret = QLA_FUNCTION_FAILED;
+ device_reg_t *reg = ha->iobase;
+ scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev);
+
+ /* If possible, enable MSI-X. */
+ if (!IS_QLA2432(ha) && !IS_QLA2532(ha) && !IS_QLA8432(ha) &&
+ !IS_CNA_CAPABLE(ha) && !IS_QLA2031(ha) && !IS_QLAFX00(ha) &&
+ !IS_QLA27XX(ha))
+ goto skip_msi;
+
+ if (ha->pdev->subsystem_vendor == PCI_VENDOR_ID_HP &&
+ (ha->pdev->subsystem_device == 0x7040 ||
+ ha->pdev->subsystem_device == 0x7041 ||
+ ha->pdev->subsystem_device == 0x1705)) {
+ ql_log(ql_log_warn, vha, 0x0034,
+ "MSI-X: Unsupported ISP 2432 SSVID/SSDID (0x%X,0x%X).\n",
+ ha->pdev->subsystem_vendor,
+ ha->pdev->subsystem_device);
+ goto skip_msi;
+ }
+
+ if (IS_QLA2432(ha) && (ha->pdev->revision < QLA_MSIX_CHIP_REV_24XX)) {
+ ql_log(ql_log_warn, vha, 0x0035,
+ "MSI-X; Unsupported ISP2432 (0x%X, 0x%X).\n",
+ ha->pdev->revision, QLA_MSIX_CHIP_REV_24XX);
+ goto skip_msix;
+ }
+
+ ret = qla24xx_enable_msix(ha, rsp);
+ if (!ret) {
+ ql_dbg(ql_dbg_init, vha, 0x0036,
+ "MSI-X: Enabled (0x%X, 0x%X).\n",
+ ha->chip_revision, ha->fw_attributes);
+ goto clear_risc_ints;
+ }
+
+skip_msix:
+
+ ql_log(ql_log_info, vha, 0x0037,
+ "Falling back-to MSI mode -%d.\n", ret);
+
+ if (!IS_QLA24XX(ha) && !IS_QLA2532(ha) && !IS_QLA8432(ha) &&
+ !IS_QLA8001(ha) && !IS_P3P_TYPE(ha) && !IS_QLAFX00(ha) &&
+ !IS_QLA27XX(ha))
+ goto skip_msi;
+
+ ret = pci_enable_msi(ha->pdev);
+ if (!ret) {
+ ql_dbg(ql_dbg_init, vha, 0x0038,
+ "MSI: Enabled.\n");
+ ha->flags.msi_enabled = 1;
+ } else
+ ql_log(ql_log_warn, vha, 0x0039,
+ "Falling back-to INTa mode -- %d.\n", ret);
+skip_msi:
+
+ /* Skip INTx on ISP82xx. */
+ if (!ha->flags.msi_enabled && IS_QLA82XX(ha))
+ return QLA_FUNCTION_FAILED;
+
+ ret = request_irq(ha->pdev->irq, ha->isp_ops->intr_handler,
+ ha->flags.msi_enabled ? 0 : IRQF_SHARED,
+ QLA2XXX_DRIVER_NAME, rsp);
+ if (ret) {
+ ql_log(ql_log_warn, vha, 0x003a,
+ "Failed to reserve interrupt %d already in use.\n",
+ ha->pdev->irq);
+ goto fail;
+ } else if (!ha->flags.msi_enabled) {
+ ql_dbg(ql_dbg_init, vha, 0x0125,
+ "INTa mode: Enabled.\n");
+ ha->flags.mr_intr_valid = 1;
+ }
+
+clear_risc_ints:
+ if (IS_FWI2_CAPABLE(ha) || IS_QLAFX00(ha))
+ goto fail;
+
+ spin_lock_irq(&ha->hardware_lock);
+ WRT_REG_WORD(&reg->isp.semaphore, 0);
+ spin_unlock_irq(&ha->hardware_lock);
+
+fail:
+ return ret;
+}
+
+void
+qla2x00_free_irqs(scsi_qla_host_t *vha)
+{
+ struct qla_hw_data *ha = vha->hw;
+ struct rsp_que *rsp;
+
+ /*
+ * We need to check that ha->rsp_q_map is valid in case we are called
+ * from a probe failure context.
+ */
+ if (!ha->rsp_q_map || !ha->rsp_q_map[0])
+ return;
+ rsp = ha->rsp_q_map[0];
+
+ if (ha->flags.msix_enabled)
+ qla24xx_disable_msix(ha);
+ else if (ha->flags.msi_enabled) {
+ free_irq(ha->pdev->irq, rsp);
+ pci_disable_msi(ha->pdev);
+ } else
+ free_irq(ha->pdev->irq, rsp);
+}
+
+
+int qla25xx_request_irq(struct rsp_que *rsp)
+{
+ struct qla_hw_data *ha = rsp->hw;
+ struct qla_init_msix_entry *intr = &msix_entries[2];
+ struct qla_msix_entry *msix = rsp->msix;
+ scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev);
+ int ret;
+
+ ret = request_irq(msix->vector, intr->handler, 0, intr->name, rsp);
+ if (ret) {
+ ql_log(ql_log_fatal, vha, 0x00e6,
+ "MSI-X: Unable to register handler -- %x/%d.\n",
+ msix->vector, ret);
+ return ret;
+ }
+ msix->have_irq = 1;
+ msix->rsp = rsp;
+ return ret;
+}
diff --git a/drivers/scsi/qla2xxx/qla_mbx.c b/drivers/scsi/qla2xxx/qla_mbx.c
new file mode 100644
index 000000000..02b1c1c53
--- /dev/null
+++ b/drivers/scsi/qla2xxx/qla_mbx.c
@@ -0,0 +1,5471 @@
+/*
+ * QLogic Fibre Channel HBA Driver
+ * Copyright (c) 2003-2014 QLogic Corporation
+ *
+ * See LICENSE.qla2xxx for copyright and licensing details.
+ */
+#include "qla_def.h"
+#include "qla_target.h"
+
+#include <linux/delay.h>
+#include <linux/gfp.h>
+
+
+/*
+ * qla2x00_mailbox_command
+ * Issue mailbox command and waits for completion.
+ *
+ * Input:
+ * ha = adapter block pointer.
+ * mcp = driver internal mbx struct pointer.
+ *
+ * Output:
+ * mb[MAX_MAILBOX_REGISTER_COUNT] = returned mailbox data.
+ *
+ * Returns:
+ * 0 : QLA_SUCCESS = cmd performed success
+ * 1 : QLA_FUNCTION_FAILED (error encountered)
+ * 6 : QLA_FUNCTION_TIMEOUT (timeout condition encountered)
+ *
+ * Context:
+ * Kernel context.
+ */
+static int
+qla2x00_mailbox_command(scsi_qla_host_t *vha, mbx_cmd_t *mcp)
+{
+ int rval, i;
+ unsigned long flags = 0;
+ device_reg_t *reg;
+ uint8_t abort_active;
+ uint8_t io_lock_on;
+ uint16_t command = 0;
+ uint16_t *iptr;
+ uint16_t __iomem *optr;
+ uint32_t cnt;
+ uint32_t mboxes;
+ uint16_t __iomem *mbx_reg;
+ unsigned long wait_time;
+ struct qla_hw_data *ha = vha->hw;
+ scsi_qla_host_t *base_vha = pci_get_drvdata(ha->pdev);
+
+
+ ql_dbg(ql_dbg_mbx, vha, 0x1000, "Entered %s.\n", __func__);
+
+ if (ha->pdev->error_state > pci_channel_io_frozen) {
+ ql_log(ql_log_warn, vha, 0x1001,
+ "error_state is greater than pci_channel_io_frozen, "
+ "exiting.\n");
+ return QLA_FUNCTION_TIMEOUT;
+ }
+
+ if (vha->device_flags & DFLG_DEV_FAILED) {
+ ql_log(ql_log_warn, vha, 0x1002,
+ "Device in failed state, exiting.\n");
+ return QLA_FUNCTION_TIMEOUT;
+ }
+
+ reg = ha->iobase;
+ io_lock_on = base_vha->flags.init_done;
+
+ rval = QLA_SUCCESS;
+ abort_active = test_bit(ABORT_ISP_ACTIVE, &base_vha->dpc_flags);
+
+
+ if (ha->flags.pci_channel_io_perm_failure) {
+ ql_log(ql_log_warn, vha, 0x1003,
+ "Perm failure on EEH timeout MBX, exiting.\n");
+ return QLA_FUNCTION_TIMEOUT;
+ }
+
+ if (IS_P3P_TYPE(ha) && ha->flags.isp82xx_fw_hung) {
+ /* Setting Link-Down error */
+ mcp->mb[0] = MBS_LINK_DOWN_ERROR;
+ ql_log(ql_log_warn, vha, 0x1004,
+ "FW hung = %d.\n", ha->flags.isp82xx_fw_hung);
+ return QLA_FUNCTION_TIMEOUT;
+ }
+
+ /*
+ * Wait for active mailbox commands to finish by waiting at most tov
+ * seconds. This is to serialize actual issuing of mailbox cmds during
+ * non ISP abort time.
+ */
+ if (!wait_for_completion_timeout(&ha->mbx_cmd_comp, mcp->tov * HZ)) {
+ /* Timeout occurred. Return error. */
+ ql_log(ql_log_warn, vha, 0x1005,
+ "Cmd access timeout, cmd=0x%x, Exiting.\n",
+ mcp->mb[0]);
+ return QLA_FUNCTION_TIMEOUT;
+ }
+
+ ha->flags.mbox_busy = 1;
+ /* Save mailbox command for debug */
+ ha->mcp = mcp;
+
+ ql_dbg(ql_dbg_mbx, vha, 0x1006,
+ "Prepare to issue mbox cmd=0x%x.\n", mcp->mb[0]);
+
+ spin_lock_irqsave(&ha->hardware_lock, flags);
+
+ /* Load mailbox registers. */
+ if (IS_P3P_TYPE(ha))
+ optr = (uint16_t __iomem *)&reg->isp82.mailbox_in[0];
+ else if (IS_FWI2_CAPABLE(ha) && !(IS_P3P_TYPE(ha)))
+ optr = (uint16_t __iomem *)&reg->isp24.mailbox0;
+ else
+ optr = (uint16_t __iomem *)MAILBOX_REG(ha, &reg->isp, 0);
+
+ iptr = mcp->mb;
+ command = mcp->mb[0];
+ mboxes = mcp->out_mb;
+
+ ql_dbg(ql_dbg_mbx, vha, 0x1111,
+ "Mailbox registers (OUT):\n");
+ for (cnt = 0; cnt < ha->mbx_count; cnt++) {
+ if (IS_QLA2200(ha) && cnt == 8)
+ optr =
+ (uint16_t __iomem *)MAILBOX_REG(ha, &reg->isp, 8);
+ if (mboxes & BIT_0) {
+ ql_dbg(ql_dbg_mbx, vha, 0x1112,
+ "mbox[%d]<-0x%04x\n", cnt, *iptr);
+ WRT_REG_WORD(optr, *iptr);
+ }
+
+ mboxes >>= 1;
+ optr++;
+ iptr++;
+ }
+
+ ql_dbg(ql_dbg_mbx + ql_dbg_buffer, vha, 0x1117,
+ "I/O Address = %p.\n", optr);
+
+ /* Issue set host interrupt command to send cmd out. */
+ ha->flags.mbox_int = 0;
+ clear_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags);
+
+ /* Unlock mbx registers and wait for interrupt */
+ ql_dbg(ql_dbg_mbx, vha, 0x100f,
+ "Going to unlock irq & waiting for interrupts. "
+ "jiffies=%lx.\n", jiffies);
+
+ /* Wait for mbx cmd completion until timeout */
+
+ if ((!abort_active && io_lock_on) || IS_NOPOLLING_TYPE(ha)) {
+ set_bit(MBX_INTR_WAIT, &ha->mbx_cmd_flags);
+
+ if (IS_P3P_TYPE(ha)) {
+ if (RD_REG_DWORD(&reg->isp82.hint) &
+ HINT_MBX_INT_PENDING) {
+ spin_unlock_irqrestore(&ha->hardware_lock,
+ flags);
+ ha->flags.mbox_busy = 0;
+ ql_dbg(ql_dbg_mbx, vha, 0x1010,
+ "Pending mailbox timeout, exiting.\n");
+ rval = QLA_FUNCTION_TIMEOUT;
+ goto premature_exit;
+ }
+ WRT_REG_DWORD(&reg->isp82.hint, HINT_MBX_INT_PENDING);
+ } else if (IS_FWI2_CAPABLE(ha))
+ WRT_REG_DWORD(&reg->isp24.hccr, HCCRX_SET_HOST_INT);
+ else
+ WRT_REG_WORD(&reg->isp.hccr, HCCR_SET_HOST_INT);
+ spin_unlock_irqrestore(&ha->hardware_lock, flags);
+
+ if (!wait_for_completion_timeout(&ha->mbx_intr_comp,
+ mcp->tov * HZ)) {
+ ql_dbg(ql_dbg_mbx, vha, 0x117a,
+ "cmd=%x Timeout.\n", command);
+ spin_lock_irqsave(&ha->hardware_lock, flags);
+ clear_bit(MBX_INTR_WAIT, &ha->mbx_cmd_flags);
+ spin_unlock_irqrestore(&ha->hardware_lock, flags);
+ }
+ } else {
+ ql_dbg(ql_dbg_mbx, vha, 0x1011,
+ "Cmd=%x Polling Mode.\n", command);
+
+ if (IS_P3P_TYPE(ha)) {
+ if (RD_REG_DWORD(&reg->isp82.hint) &
+ HINT_MBX_INT_PENDING) {
+ spin_unlock_irqrestore(&ha->hardware_lock,
+ flags);
+ ha->flags.mbox_busy = 0;
+ ql_dbg(ql_dbg_mbx, vha, 0x1012,
+ "Pending mailbox timeout, exiting.\n");
+ rval = QLA_FUNCTION_TIMEOUT;
+ goto premature_exit;
+ }
+ WRT_REG_DWORD(&reg->isp82.hint, HINT_MBX_INT_PENDING);
+ } else if (IS_FWI2_CAPABLE(ha))
+ WRT_REG_DWORD(&reg->isp24.hccr, HCCRX_SET_HOST_INT);
+ else
+ WRT_REG_WORD(&reg->isp.hccr, HCCR_SET_HOST_INT);
+ spin_unlock_irqrestore(&ha->hardware_lock, flags);
+
+ wait_time = jiffies + mcp->tov * HZ; /* wait at most tov secs */
+ while (!ha->flags.mbox_int) {
+ if (time_after(jiffies, wait_time))
+ break;
+
+ /* Check for pending interrupts. */
+ qla2x00_poll(ha->rsp_q_map[0]);
+
+ if (!ha->flags.mbox_int &&
+ !(IS_QLA2200(ha) &&
+ command == MBC_LOAD_RISC_RAM_EXTENDED))
+ msleep(10);
+ } /* while */
+ ql_dbg(ql_dbg_mbx, vha, 0x1013,
+ "Waited %d sec.\n",
+ (uint)((jiffies - (wait_time - (mcp->tov * HZ)))/HZ));
+ }
+
+ /* Check whether we timed out */
+ if (ha->flags.mbox_int) {
+ uint16_t *iptr2;
+
+ ql_dbg(ql_dbg_mbx, vha, 0x1014,
+ "Cmd=%x completed.\n", command);
+
+ /* Got interrupt. Clear the flag. */
+ ha->flags.mbox_int = 0;
+ clear_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags);
+
+ if (IS_P3P_TYPE(ha) && ha->flags.isp82xx_fw_hung) {
+ ha->flags.mbox_busy = 0;
+ /* Setting Link-Down error */
+ mcp->mb[0] = MBS_LINK_DOWN_ERROR;
+ ha->mcp = NULL;
+ rval = QLA_FUNCTION_FAILED;
+ ql_log(ql_log_warn, vha, 0x1015,
+ "FW hung = %d.\n", ha->flags.isp82xx_fw_hung);
+ goto premature_exit;
+ }
+
+ if (ha->mailbox_out[0] != MBS_COMMAND_COMPLETE)
+ rval = QLA_FUNCTION_FAILED;
+
+ /* Load return mailbox registers. */
+ iptr2 = mcp->mb;
+ iptr = (uint16_t *)&ha->mailbox_out[0];
+ mboxes = mcp->in_mb;
+
+ ql_dbg(ql_dbg_mbx, vha, 0x1113,
+ "Mailbox registers (IN):\n");
+ for (cnt = 0; cnt < ha->mbx_count; cnt++) {
+ if (mboxes & BIT_0) {
+ *iptr2 = *iptr;
+ ql_dbg(ql_dbg_mbx, vha, 0x1114,
+ "mbox[%d]->0x%04x\n", cnt, *iptr2);
+ }
+
+ mboxes >>= 1;
+ iptr2++;
+ iptr++;
+ }
+ } else {
+
+ uint16_t mb0;
+ uint32_t ictrl;
+
+ if (IS_FWI2_CAPABLE(ha)) {
+ mb0 = RD_REG_WORD(&reg->isp24.mailbox0);
+ ictrl = RD_REG_DWORD(&reg->isp24.ictrl);
+ } else {
+ mb0 = RD_MAILBOX_REG(ha, &reg->isp, 0);
+ ictrl = RD_REG_WORD(&reg->isp.ictrl);
+ }
+ ql_dbg(ql_dbg_mbx + ql_dbg_buffer, vha, 0x1119,
+ "MBX Command timeout for cmd %x, iocontrol=%x jiffies=%lx "
+ "mb[0]=0x%x\n", command, ictrl, jiffies, mb0);
+ ql_dump_regs(ql_dbg_mbx + ql_dbg_buffer, vha, 0x1019);
+
+ /*
+ * Attempt to capture a firmware dump for further analysis
+ * of the current firmware state. We do not need to do this
+ * if we are intentionally generating a dump.
+ */
+ if (mcp->mb[0] != MBC_GEN_SYSTEM_ERROR)
+ ha->isp_ops->fw_dump(vha, 0);
+
+ rval = QLA_FUNCTION_TIMEOUT;
+ }
+
+ ha->flags.mbox_busy = 0;
+
+ /* Clean up */
+ ha->mcp = NULL;
+
+ if ((abort_active || !io_lock_on) && !IS_NOPOLLING_TYPE(ha)) {
+ ql_dbg(ql_dbg_mbx, vha, 0x101a,
+ "Checking for additional resp interrupt.\n");
+
+ /* polling mode for non isp_abort commands. */
+ qla2x00_poll(ha->rsp_q_map[0]);
+ }
+
+ if (rval == QLA_FUNCTION_TIMEOUT &&
+ mcp->mb[0] != MBC_GEN_SYSTEM_ERROR) {
+ if (!io_lock_on || (mcp->flags & IOCTL_CMD) ||
+ ha->flags.eeh_busy) {
+ /* not in dpc. schedule it for dpc to take over. */
+ ql_dbg(ql_dbg_mbx, vha, 0x101b,
+ "Timeout, schedule isp_abort_needed.\n");
+
+ if (!test_bit(ISP_ABORT_NEEDED, &vha->dpc_flags) &&
+ !test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags) &&
+ !test_bit(ISP_ABORT_RETRY, &vha->dpc_flags)) {
+ if (IS_QLA82XX(ha)) {
+ ql_dbg(ql_dbg_mbx, vha, 0x112a,
+ "disabling pause transmit on port "
+ "0 & 1.\n");
+ qla82xx_wr_32(ha,
+ QLA82XX_CRB_NIU + 0x98,
+ CRB_NIU_XG_PAUSE_CTL_P0|
+ CRB_NIU_XG_PAUSE_CTL_P1);
+ }
+ ql_log(ql_log_info, base_vha, 0x101c,
+ "Mailbox cmd timeout occurred, cmd=0x%x, "
+ "mb[0]=0x%x, eeh_busy=0x%x. Scheduling ISP "
+ "abort.\n", command, mcp->mb[0],
+ ha->flags.eeh_busy);
+ set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
+ qla2xxx_wake_dpc(vha);
+ }
+ } else if (!abort_active) {
+ /* call abort directly since we are in the DPC thread */
+ ql_dbg(ql_dbg_mbx, vha, 0x101d,
+ "Timeout, calling abort_isp.\n");
+
+ if (!test_bit(ISP_ABORT_NEEDED, &vha->dpc_flags) &&
+ !test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags) &&
+ !test_bit(ISP_ABORT_RETRY, &vha->dpc_flags)) {
+ if (IS_QLA82XX(ha)) {
+ ql_dbg(ql_dbg_mbx, vha, 0x112b,
+ "disabling pause transmit on port "
+ "0 & 1.\n");
+ qla82xx_wr_32(ha,
+ QLA82XX_CRB_NIU + 0x98,
+ CRB_NIU_XG_PAUSE_CTL_P0|
+ CRB_NIU_XG_PAUSE_CTL_P1);
+ }
+ ql_log(ql_log_info, base_vha, 0x101e,
+ "Mailbox cmd timeout occurred, cmd=0x%x, "
+ "mb[0]=0x%x. Scheduling ISP abort ",
+ command, mcp->mb[0]);
+ set_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags);
+ clear_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
+ /* Allow next mbx cmd to come in. */
+ complete(&ha->mbx_cmd_comp);
+ if (ha->isp_ops->abort_isp(vha)) {
+ /* Failed. retry later. */
+ set_bit(ISP_ABORT_NEEDED,
+ &vha->dpc_flags);
+ }
+ clear_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags);
+ ql_dbg(ql_dbg_mbx, vha, 0x101f,
+ "Finished abort_isp.\n");
+ goto mbx_done;
+ }
+ }
+ }
+
+premature_exit:
+ /* Allow next mbx cmd to come in. */
+ complete(&ha->mbx_cmd_comp);
+
+mbx_done:
+ if (rval) {
+ ql_dbg(ql_dbg_disc, base_vha, 0x1020,
+ "**** Failed mbx[0]=%x, mb[1]=%x, mb[2]=%x, mb[3]=%x, cmd=%x ****.\n",
+ mcp->mb[0], mcp->mb[1], mcp->mb[2], mcp->mb[3], command);
+
+ ql_dbg(ql_dbg_disc, vha, 0x1115,
+ "host status: 0x%x, flags:0x%lx, intr ctrl reg:0x%x, intr status:0x%x\n",
+ RD_REG_DWORD(&reg->isp24.host_status),
+ ha->fw_dump_cap_flags,
+ RD_REG_DWORD(&reg->isp24.ictrl),
+ RD_REG_DWORD(&reg->isp24.istatus));
+
+ mbx_reg = &reg->isp24.mailbox0;
+ for (i = 0; i < 6; i++)
+ ql_dbg(ql_dbg_disc + ql_dbg_verbose, vha, 0x1116,
+ "mbox[%d] 0x%04x\n", i, RD_REG_WORD(mbx_reg++));
+ } else {
+ ql_dbg(ql_dbg_mbx, base_vha, 0x1021, "Done %s.\n", __func__);
+ }
+
+ return rval;
+}
+
+int
+qla2x00_load_ram(scsi_qla_host_t *vha, dma_addr_t req_dma, uint32_t risc_addr,
+ uint32_t risc_code_size)
+{
+ int rval;
+ struct qla_hw_data *ha = vha->hw;
+ mbx_cmd_t mc;
+ mbx_cmd_t *mcp = &mc;
+
+ ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1022,
+ "Entered %s.\n", __func__);
+
+ if (MSW(risc_addr) || IS_FWI2_CAPABLE(ha)) {
+ mcp->mb[0] = MBC_LOAD_RISC_RAM_EXTENDED;
+ mcp->mb[8] = MSW(risc_addr);
+ mcp->out_mb = MBX_8|MBX_0;
+ } else {
+ mcp->mb[0] = MBC_LOAD_RISC_RAM;
+ mcp->out_mb = MBX_0;
+ }
+ mcp->mb[1] = LSW(risc_addr);
+ mcp->mb[2] = MSW(req_dma);
+ mcp->mb[3] = LSW(req_dma);
+ mcp->mb[6] = MSW(MSD(req_dma));
+ mcp->mb[7] = LSW(MSD(req_dma));
+ mcp->out_mb |= MBX_7|MBX_6|MBX_3|MBX_2|MBX_1;
+ if (IS_FWI2_CAPABLE(ha)) {
+ mcp->mb[4] = MSW(risc_code_size);
+ mcp->mb[5] = LSW(risc_code_size);
+ mcp->out_mb |= MBX_5|MBX_4;
+ } else {
+ mcp->mb[4] = LSW(risc_code_size);
+ mcp->out_mb |= MBX_4;
+ }
+
+ mcp->in_mb = MBX_0;
+ mcp->tov = MBX_TOV_SECONDS;
+ mcp->flags = 0;
+ rval = qla2x00_mailbox_command(vha, mcp);
+
+ if (rval != QLA_SUCCESS) {
+ ql_dbg(ql_dbg_mbx, vha, 0x1023,
+ "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
+ } else {
+ ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1024,
+ "Done %s.\n", __func__);
+ }
+
+ return rval;
+}
+
+#define EXTENDED_BB_CREDITS BIT_0
+/*
+ * qla2x00_execute_fw
+ * Start adapter firmware.
+ *
+ * Input:
+ * ha = adapter block pointer.
+ * TARGET_QUEUE_LOCK must be released.
+ * ADAPTER_STATE_LOCK must be released.
+ *
+ * Returns:
+ * qla2x00 local function return status code.
+ *
+ * Context:
+ * Kernel context.
+ */
+int
+qla2x00_execute_fw(scsi_qla_host_t *vha, uint32_t risc_addr)
+{
+ int rval;
+ struct qla_hw_data *ha = vha->hw;
+ mbx_cmd_t mc;
+ mbx_cmd_t *mcp = &mc;
+
+ ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1025,
+ "Entered %s.\n", __func__);
+
+ mcp->mb[0] = MBC_EXECUTE_FIRMWARE;
+ mcp->out_mb = MBX_0;
+ mcp->in_mb = MBX_0;
+ if (IS_FWI2_CAPABLE(ha)) {
+ mcp->mb[1] = MSW(risc_addr);
+ mcp->mb[2] = LSW(risc_addr);
+ mcp->mb[3] = 0;
+ if (IS_QLA25XX(ha) || IS_QLA81XX(ha) || IS_QLA83XX(ha) ||
+ IS_QLA27XX(ha)) {
+ struct nvram_81xx *nv = ha->nvram;
+ mcp->mb[4] = (nv->enhanced_features &
+ EXTENDED_BB_CREDITS);
+ } else
+ mcp->mb[4] = 0;
+ mcp->out_mb |= MBX_4|MBX_3|MBX_2|MBX_1;
+ mcp->in_mb |= MBX_1;
+ } else {
+ mcp->mb[1] = LSW(risc_addr);
+ mcp->out_mb |= MBX_1;
+ if (IS_QLA2322(ha) || IS_QLA6322(ha)) {
+ mcp->mb[2] = 0;
+ mcp->out_mb |= MBX_2;
+ }
+ }
+
+ mcp->tov = MBX_TOV_SECONDS;
+ mcp->flags = 0;
+ rval = qla2x00_mailbox_command(vha, mcp);
+
+ if (rval != QLA_SUCCESS) {
+ ql_dbg(ql_dbg_mbx, vha, 0x1026,
+ "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
+ } else {
+ if (IS_FWI2_CAPABLE(ha)) {
+ ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1027,
+ "Done exchanges=%x.\n", mcp->mb[1]);
+ } else {
+ ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1028,
+ "Done %s.\n", __func__);
+ }
+ }
+
+ return rval;
+}
+
+/*
+ * qla2x00_get_fw_version
+ * Get firmware version.
+ *
+ * Input:
+ * ha: adapter state pointer.
+ * major: pointer for major number.
+ * minor: pointer for minor number.
+ * subminor: pointer for subminor number.
+ *
+ * Returns:
+ * qla2x00 local function return status code.
+ *
+ * Context:
+ * Kernel context.
+ */
+int
+qla2x00_get_fw_version(scsi_qla_host_t *vha)
+{
+ int rval;
+ mbx_cmd_t mc;
+ mbx_cmd_t *mcp = &mc;
+ struct qla_hw_data *ha = vha->hw;
+
+ ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1029,
+ "Entered %s.\n", __func__);
+
+ mcp->mb[0] = MBC_GET_FIRMWARE_VERSION;
+ mcp->out_mb = MBX_0;
+ mcp->in_mb = MBX_6|MBX_5|MBX_4|MBX_3|MBX_2|MBX_1|MBX_0;
+ if (IS_QLA81XX(vha->hw) || IS_QLA8031(ha) || IS_QLA8044(ha))
+ mcp->in_mb |= MBX_13|MBX_12|MBX_11|MBX_10|MBX_9|MBX_8;
+ if (IS_FWI2_CAPABLE(ha))
+ mcp->in_mb |= MBX_17|MBX_16|MBX_15;
+ if (IS_QLA27XX(ha))
+ mcp->in_mb |= MBX_21|MBX_20|MBX_19|MBX_18;
+ mcp->flags = 0;
+ mcp->tov = MBX_TOV_SECONDS;
+ rval = qla2x00_mailbox_command(vha, mcp);
+ if (rval != QLA_SUCCESS)
+ goto failed;
+
+ /* Return mailbox data. */
+ ha->fw_major_version = mcp->mb[1];
+ ha->fw_minor_version = mcp->mb[2];
+ ha->fw_subminor_version = mcp->mb[3];
+ ha->fw_attributes = mcp->mb[6];
+ if (IS_QLA2100(vha->hw) || IS_QLA2200(vha->hw))
+ ha->fw_memory_size = 0x1FFFF; /* Defaults to 128KB. */
+ else
+ ha->fw_memory_size = (mcp->mb[5] << 16) | mcp->mb[4];
+ if (IS_QLA81XX(vha->hw) || IS_QLA8031(vha->hw) || IS_QLA8044(ha)) {
+ ha->mpi_version[0] = mcp->mb[10] & 0xff;
+ ha->mpi_version[1] = mcp->mb[11] >> 8;
+ ha->mpi_version[2] = mcp->mb[11] & 0xff;
+ ha->mpi_capabilities = (mcp->mb[12] << 16) | mcp->mb[13];
+ ha->phy_version[0] = mcp->mb[8] & 0xff;
+ ha->phy_version[1] = mcp->mb[9] >> 8;
+ ha->phy_version[2] = mcp->mb[9] & 0xff;
+ }
+ if (IS_FWI2_CAPABLE(ha)) {
+ ha->fw_attributes_h = mcp->mb[15];
+ ha->fw_attributes_ext[0] = mcp->mb[16];
+ ha->fw_attributes_ext[1] = mcp->mb[17];
+ ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1139,
+ "%s: FW_attributes Upper: 0x%x, Lower: 0x%x.\n",
+ __func__, mcp->mb[15], mcp->mb[6]);
+ ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x112f,
+ "%s: Ext_FwAttributes Upper: 0x%x, Lower: 0x%x.\n",
+ __func__, mcp->mb[17], mcp->mb[16]);
+ }
+ if (IS_QLA27XX(ha)) {
+ ha->fw_shared_ram_start = (mcp->mb[19] << 16) | mcp->mb[18];
+ ha->fw_shared_ram_end = (mcp->mb[21] << 16) | mcp->mb[20];
+ }
+
+failed:
+ if (rval != QLA_SUCCESS) {
+ /*EMPTY*/
+ ql_dbg(ql_dbg_mbx, vha, 0x102a, "Failed=%x.\n", rval);
+ } else {
+ /*EMPTY*/
+ ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x102b,
+ "Done %s.\n", __func__);
+ }
+ return rval;
+}
+
+/*
+ * qla2x00_get_fw_options
+ * Set firmware options.
+ *
+ * Input:
+ * ha = adapter block pointer.
+ * fwopt = pointer for firmware options.
+ *
+ * Returns:
+ * qla2x00 local function return status code.
+ *
+ * Context:
+ * Kernel context.
+ */
+int
+qla2x00_get_fw_options(scsi_qla_host_t *vha, uint16_t *fwopts)
+{
+ int rval;
+ mbx_cmd_t mc;
+ mbx_cmd_t *mcp = &mc;
+
+ ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x102c,
+ "Entered %s.\n", __func__);
+
+ mcp->mb[0] = MBC_GET_FIRMWARE_OPTION;
+ mcp->out_mb = MBX_0;
+ mcp->in_mb = MBX_3|MBX_2|MBX_1|MBX_0;
+ mcp->tov = MBX_TOV_SECONDS;
+ mcp->flags = 0;
+ rval = qla2x00_mailbox_command(vha, mcp);
+
+ if (rval != QLA_SUCCESS) {
+ /*EMPTY*/
+ ql_dbg(ql_dbg_mbx, vha, 0x102d, "Failed=%x.\n", rval);
+ } else {
+ fwopts[0] = mcp->mb[0];
+ fwopts[1] = mcp->mb[1];
+ fwopts[2] = mcp->mb[2];
+ fwopts[3] = mcp->mb[3];
+
+ ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x102e,
+ "Done %s.\n", __func__);
+ }
+
+ return rval;
+}
+
+
+/*
+ * qla2x00_set_fw_options
+ * Set firmware options.
+ *
+ * Input:
+ * ha = adapter block pointer.
+ * fwopt = pointer for firmware options.
+ *
+ * Returns:
+ * qla2x00 local function return status code.
+ *
+ * Context:
+ * Kernel context.
+ */
+int
+qla2x00_set_fw_options(scsi_qla_host_t *vha, uint16_t *fwopts)
+{
+ int rval;
+ mbx_cmd_t mc;
+ mbx_cmd_t *mcp = &mc;
+
+ ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x102f,
+ "Entered %s.\n", __func__);
+
+ mcp->mb[0] = MBC_SET_FIRMWARE_OPTION;
+ mcp->mb[1] = fwopts[1];
+ mcp->mb[2] = fwopts[2];
+ mcp->mb[3] = fwopts[3];
+ mcp->out_mb = MBX_3|MBX_2|MBX_1|MBX_0;
+ mcp->in_mb = MBX_0;
+ if (IS_FWI2_CAPABLE(vha->hw)) {
+ mcp->in_mb |= MBX_1;
+ } else {
+ mcp->mb[10] = fwopts[10];
+ mcp->mb[11] = fwopts[11];
+ mcp->mb[12] = 0; /* Undocumented, but used */
+ mcp->out_mb |= MBX_12|MBX_11|MBX_10;
+ }
+ mcp->tov = MBX_TOV_SECONDS;
+ mcp->flags = 0;
+ rval = qla2x00_mailbox_command(vha, mcp);
+
+ fwopts[0] = mcp->mb[0];
+
+ if (rval != QLA_SUCCESS) {
+ /*EMPTY*/
+ ql_dbg(ql_dbg_mbx, vha, 0x1030,
+ "Failed=%x (%x/%x).\n", rval, mcp->mb[0], mcp->mb[1]);
+ } else {
+ /*EMPTY*/
+ ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1031,
+ "Done %s.\n", __func__);
+ }
+
+ return rval;
+}
+
+/*
+ * qla2x00_mbx_reg_test
+ * Mailbox register wrap test.
+ *
+ * Input:
+ * ha = adapter block pointer.
+ * TARGET_QUEUE_LOCK must be released.
+ * ADAPTER_STATE_LOCK must be released.
+ *
+ * Returns:
+ * qla2x00 local function return status code.
+ *
+ * Context:
+ * Kernel context.
+ */
+int
+qla2x00_mbx_reg_test(scsi_qla_host_t *vha)
+{
+ int rval;
+ mbx_cmd_t mc;
+ mbx_cmd_t *mcp = &mc;
+
+ ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1032,
+ "Entered %s.\n", __func__);
+
+ mcp->mb[0] = MBC_MAILBOX_REGISTER_TEST;
+ mcp->mb[1] = 0xAAAA;
+ mcp->mb[2] = 0x5555;
+ mcp->mb[3] = 0xAA55;
+ mcp->mb[4] = 0x55AA;
+ mcp->mb[5] = 0xA5A5;
+ mcp->mb[6] = 0x5A5A;
+ mcp->mb[7] = 0x2525;
+ mcp->out_mb = MBX_7|MBX_6|MBX_5|MBX_4|MBX_3|MBX_2|MBX_1|MBX_0;
+ mcp->in_mb = MBX_7|MBX_6|MBX_5|MBX_4|MBX_3|MBX_2|MBX_1|MBX_0;
+ mcp->tov = MBX_TOV_SECONDS;
+ mcp->flags = 0;
+ rval = qla2x00_mailbox_command(vha, mcp);
+
+ if (rval == QLA_SUCCESS) {
+ if (mcp->mb[1] != 0xAAAA || mcp->mb[2] != 0x5555 ||
+ mcp->mb[3] != 0xAA55 || mcp->mb[4] != 0x55AA)
+ rval = QLA_FUNCTION_FAILED;
+ if (mcp->mb[5] != 0xA5A5 || mcp->mb[6] != 0x5A5A ||
+ mcp->mb[7] != 0x2525)
+ rval = QLA_FUNCTION_FAILED;
+ }
+
+ if (rval != QLA_SUCCESS) {
+ /*EMPTY*/
+ ql_dbg(ql_dbg_mbx, vha, 0x1033, "Failed=%x.\n", rval);
+ } else {
+ /*EMPTY*/
+ ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1034,
+ "Done %s.\n", __func__);
+ }
+
+ return rval;
+}
+
+/*
+ * qla2x00_verify_checksum
+ * Verify firmware checksum.
+ *
+ * Input:
+ * ha = adapter block pointer.
+ * TARGET_QUEUE_LOCK must be released.
+ * ADAPTER_STATE_LOCK must be released.
+ *
+ * Returns:
+ * qla2x00 local function return status code.
+ *
+ * Context:
+ * Kernel context.
+ */
+int
+qla2x00_verify_checksum(scsi_qla_host_t *vha, uint32_t risc_addr)
+{
+ int rval;
+ mbx_cmd_t mc;
+ mbx_cmd_t *mcp = &mc;
+
+ ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1035,
+ "Entered %s.\n", __func__);
+
+ mcp->mb[0] = MBC_VERIFY_CHECKSUM;
+ mcp->out_mb = MBX_0;
+ mcp->in_mb = MBX_0;
+ if (IS_FWI2_CAPABLE(vha->hw)) {
+ mcp->mb[1] = MSW(risc_addr);
+ mcp->mb[2] = LSW(risc_addr);
+ mcp->out_mb |= MBX_2|MBX_1;
+ mcp->in_mb |= MBX_2|MBX_1;
+ } else {
+ mcp->mb[1] = LSW(risc_addr);
+ mcp->out_mb |= MBX_1;
+ mcp->in_mb |= MBX_1;
+ }
+
+ mcp->tov = MBX_TOV_SECONDS;
+ mcp->flags = 0;
+ rval = qla2x00_mailbox_command(vha, mcp);
+
+ if (rval != QLA_SUCCESS) {
+ ql_dbg(ql_dbg_mbx, vha, 0x1036,
+ "Failed=%x chm sum=%x.\n", rval, IS_FWI2_CAPABLE(vha->hw) ?
+ (mcp->mb[2] << 16) | mcp->mb[1] : mcp->mb[1]);
+ } else {
+ ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1037,
+ "Done %s.\n", __func__);
+ }
+
+ return rval;
+}
+
+/*
+ * qla2x00_issue_iocb
+ * Issue IOCB using mailbox command
+ *
+ * Input:
+ * ha = adapter state pointer.
+ * buffer = buffer pointer.
+ * phys_addr = physical address of buffer.
+ * size = size of buffer.
+ * TARGET_QUEUE_LOCK must be released.
+ * ADAPTER_STATE_LOCK must be released.
+ *
+ * Returns:
+ * qla2x00 local function return status code.
+ *
+ * Context:
+ * Kernel context.
+ */
+int
+qla2x00_issue_iocb_timeout(scsi_qla_host_t *vha, void *buffer,
+ dma_addr_t phys_addr, size_t size, uint32_t tov)
+{
+ int rval;
+ mbx_cmd_t mc;
+ mbx_cmd_t *mcp = &mc;
+
+ ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1038,
+ "Entered %s.\n", __func__);
+
+ mcp->mb[0] = MBC_IOCB_COMMAND_A64;
+ mcp->mb[1] = 0;
+ mcp->mb[2] = MSW(phys_addr);
+ mcp->mb[3] = LSW(phys_addr);
+ mcp->mb[6] = MSW(MSD(phys_addr));
+ mcp->mb[7] = LSW(MSD(phys_addr));
+ mcp->out_mb = MBX_7|MBX_6|MBX_3|MBX_2|MBX_1|MBX_0;
+ mcp->in_mb = MBX_2|MBX_0;
+ mcp->tov = tov;
+ mcp->flags = 0;
+ rval = qla2x00_mailbox_command(vha, mcp);
+
+ if (rval != QLA_SUCCESS) {
+ /*EMPTY*/
+ ql_dbg(ql_dbg_mbx, vha, 0x1039, "Failed=%x.\n", rval);
+ } else {
+ sts_entry_t *sts_entry = (sts_entry_t *) buffer;
+
+ /* Mask reserved bits. */
+ sts_entry->entry_status &=
+ IS_FWI2_CAPABLE(vha->hw) ? RF_MASK_24XX : RF_MASK;
+ ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x103a,
+ "Done %s.\n", __func__);
+ }
+
+ return rval;
+}
+
+int
+qla2x00_issue_iocb(scsi_qla_host_t *vha, void *buffer, dma_addr_t phys_addr,
+ size_t size)
+{
+ return qla2x00_issue_iocb_timeout(vha, buffer, phys_addr, size,
+ MBX_TOV_SECONDS);
+}
+
+/*
+ * qla2x00_abort_command
+ * Abort command aborts a specified IOCB.
+ *
+ * Input:
+ * ha = adapter block pointer.
+ * sp = SB structure pointer.
+ *
+ * Returns:
+ * qla2x00 local function return status code.
+ *
+ * Context:
+ * Kernel context.
+ */
+int
+qla2x00_abort_command(srb_t *sp)
+{
+ unsigned long flags = 0;
+ int rval;
+ uint32_t handle = 0;
+ mbx_cmd_t mc;
+ mbx_cmd_t *mcp = &mc;
+ fc_port_t *fcport = sp->fcport;
+ scsi_qla_host_t *vha = fcport->vha;
+ struct qla_hw_data *ha = vha->hw;
+ struct req_que *req = vha->req;
+ struct scsi_cmnd *cmd = GET_CMD_SP(sp);
+
+ ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x103b,
+ "Entered %s.\n", __func__);
+
+ spin_lock_irqsave(&ha->hardware_lock, flags);
+ for (handle = 1; handle < req->num_outstanding_cmds; handle++) {
+ if (req->outstanding_cmds[handle] == sp)
+ break;
+ }
+ spin_unlock_irqrestore(&ha->hardware_lock, flags);
+
+ if (handle == req->num_outstanding_cmds) {
+ /* command not found */
+ return QLA_FUNCTION_FAILED;
+ }
+
+ mcp->mb[0] = MBC_ABORT_COMMAND;
+ if (HAS_EXTENDED_IDS(ha))
+ mcp->mb[1] = fcport->loop_id;
+ else
+ mcp->mb[1] = fcport->loop_id << 8;
+ mcp->mb[2] = (uint16_t)handle;
+ mcp->mb[3] = (uint16_t)(handle >> 16);
+ mcp->mb[6] = (uint16_t)cmd->device->lun;
+ mcp->out_mb = MBX_6|MBX_3|MBX_2|MBX_1|MBX_0;
+ mcp->in_mb = MBX_0;
+ mcp->tov = MBX_TOV_SECONDS;
+ mcp->flags = 0;
+ rval = qla2x00_mailbox_command(vha, mcp);
+
+ if (rval != QLA_SUCCESS) {
+ ql_dbg(ql_dbg_mbx, vha, 0x103c, "Failed=%x.\n", rval);
+ } else {
+ ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x103d,
+ "Done %s.\n", __func__);
+ }
+
+ return rval;
+}
+
+int
+qla2x00_abort_target(struct fc_port *fcport, uint64_t l, int tag)
+{
+ int rval, rval2;
+ mbx_cmd_t mc;
+ mbx_cmd_t *mcp = &mc;
+ scsi_qla_host_t *vha;
+ struct req_que *req;
+ struct rsp_que *rsp;
+
+ l = l;
+ vha = fcport->vha;
+
+ ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x103e,
+ "Entered %s.\n", __func__);
+
+ req = vha->hw->req_q_map[0];
+ rsp = req->rsp;
+ mcp->mb[0] = MBC_ABORT_TARGET;
+ mcp->out_mb = MBX_9|MBX_2|MBX_1|MBX_0;
+ if (HAS_EXTENDED_IDS(vha->hw)) {
+ mcp->mb[1] = fcport->loop_id;
+ mcp->mb[10] = 0;
+ mcp->out_mb |= MBX_10;
+ } else {
+ mcp->mb[1] = fcport->loop_id << 8;
+ }
+ mcp->mb[2] = vha->hw->loop_reset_delay;
+ mcp->mb[9] = vha->vp_idx;
+
+ mcp->in_mb = MBX_0;
+ mcp->tov = MBX_TOV_SECONDS;
+ mcp->flags = 0;
+ rval = qla2x00_mailbox_command(vha, mcp);
+ if (rval != QLA_SUCCESS) {
+ ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x103f,
+ "Failed=%x.\n", rval);
+ }
+
+ /* Issue marker IOCB. */
+ rval2 = qla2x00_marker(vha, req, rsp, fcport->loop_id, 0,
+ MK_SYNC_ID);
+ if (rval2 != QLA_SUCCESS) {
+ ql_dbg(ql_dbg_mbx, vha, 0x1040,
+ "Failed to issue marker IOCB (%x).\n", rval2);
+ } else {
+ ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1041,
+ "Done %s.\n", __func__);
+ }
+
+ return rval;
+}
+
+int
+qla2x00_lun_reset(struct fc_port *fcport, uint64_t l, int tag)
+{
+ int rval, rval2;
+ mbx_cmd_t mc;
+ mbx_cmd_t *mcp = &mc;
+ scsi_qla_host_t *vha;
+ struct req_que *req;
+ struct rsp_que *rsp;
+
+ vha = fcport->vha;
+
+ ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1042,
+ "Entered %s.\n", __func__);
+
+ req = vha->hw->req_q_map[0];
+ rsp = req->rsp;
+ mcp->mb[0] = MBC_LUN_RESET;
+ mcp->out_mb = MBX_9|MBX_3|MBX_2|MBX_1|MBX_0;
+ if (HAS_EXTENDED_IDS(vha->hw))
+ mcp->mb[1] = fcport->loop_id;
+ else
+ mcp->mb[1] = fcport->loop_id << 8;
+ mcp->mb[2] = (u32)l;
+ mcp->mb[3] = 0;
+ mcp->mb[9] = vha->vp_idx;
+
+ mcp->in_mb = MBX_0;
+ mcp->tov = MBX_TOV_SECONDS;
+ mcp->flags = 0;
+ rval = qla2x00_mailbox_command(vha, mcp);
+ if (rval != QLA_SUCCESS) {
+ ql_dbg(ql_dbg_mbx, vha, 0x1043, "Failed=%x.\n", rval);
+ }
+
+ /* Issue marker IOCB. */
+ rval2 = qla2x00_marker(vha, req, rsp, fcport->loop_id, l,
+ MK_SYNC_ID_LUN);
+ if (rval2 != QLA_SUCCESS) {
+ ql_dbg(ql_dbg_mbx, vha, 0x1044,
+ "Failed to issue marker IOCB (%x).\n", rval2);
+ } else {
+ ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1045,
+ "Done %s.\n", __func__);
+ }
+
+ return rval;
+}
+
+/*
+ * qla2x00_get_adapter_id
+ * Get adapter ID and topology.
+ *
+ * Input:
+ * ha = adapter block pointer.
+ * id = pointer for loop ID.
+ * al_pa = pointer for AL_PA.
+ * area = pointer for area.
+ * domain = pointer for domain.
+ * top = pointer for topology.
+ * TARGET_QUEUE_LOCK must be released.
+ * ADAPTER_STATE_LOCK must be released.
+ *
+ * Returns:
+ * qla2x00 local function return status code.
+ *
+ * Context:
+ * Kernel context.
+ */
+int
+qla2x00_get_adapter_id(scsi_qla_host_t *vha, uint16_t *id, uint8_t *al_pa,
+ uint8_t *area, uint8_t *domain, uint16_t *top, uint16_t *sw_cap)
+{
+ int rval;
+ mbx_cmd_t mc;
+ mbx_cmd_t *mcp = &mc;
+
+ ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1046,
+ "Entered %s.\n", __func__);
+
+ mcp->mb[0] = MBC_GET_ADAPTER_LOOP_ID;
+ mcp->mb[9] = vha->vp_idx;
+ mcp->out_mb = MBX_9|MBX_0;
+ mcp->in_mb = MBX_9|MBX_7|MBX_6|MBX_3|MBX_2|MBX_1|MBX_0;
+ if (IS_CNA_CAPABLE(vha->hw))
+ mcp->in_mb |= MBX_13|MBX_12|MBX_11|MBX_10;
+ if (IS_FWI2_CAPABLE(vha->hw))
+ mcp->in_mb |= MBX_19|MBX_18|MBX_17|MBX_16;
+ mcp->tov = MBX_TOV_SECONDS;
+ mcp->flags = 0;
+ rval = qla2x00_mailbox_command(vha, mcp);
+ if (mcp->mb[0] == MBS_COMMAND_ERROR)
+ rval = QLA_COMMAND_ERROR;
+ else if (mcp->mb[0] == MBS_INVALID_COMMAND)
+ rval = QLA_INVALID_COMMAND;
+
+ /* Return data. */
+ *id = mcp->mb[1];
+ *al_pa = LSB(mcp->mb[2]);
+ *area = MSB(mcp->mb[2]);
+ *domain = LSB(mcp->mb[3]);
+ *top = mcp->mb[6];
+ *sw_cap = mcp->mb[7];
+
+ if (rval != QLA_SUCCESS) {
+ /*EMPTY*/
+ ql_dbg(ql_dbg_mbx, vha, 0x1047, "Failed=%x.\n", rval);
+ } else {
+ ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1048,
+ "Done %s.\n", __func__);
+
+ if (IS_CNA_CAPABLE(vha->hw)) {
+ vha->fcoe_vlan_id = mcp->mb[9] & 0xfff;
+ vha->fcoe_fcf_idx = mcp->mb[10];
+ vha->fcoe_vn_port_mac[5] = mcp->mb[11] >> 8;
+ vha->fcoe_vn_port_mac[4] = mcp->mb[11] & 0xff;
+ vha->fcoe_vn_port_mac[3] = mcp->mb[12] >> 8;
+ vha->fcoe_vn_port_mac[2] = mcp->mb[12] & 0xff;
+ vha->fcoe_vn_port_mac[1] = mcp->mb[13] >> 8;
+ vha->fcoe_vn_port_mac[0] = mcp->mb[13] & 0xff;
+ }
+ /* If FA-WWN supported */
+ if (mcp->mb[7] & BIT_14) {
+ vha->port_name[0] = MSB(mcp->mb[16]);
+ vha->port_name[1] = LSB(mcp->mb[16]);
+ vha->port_name[2] = MSB(mcp->mb[17]);
+ vha->port_name[3] = LSB(mcp->mb[17]);
+ vha->port_name[4] = MSB(mcp->mb[18]);
+ vha->port_name[5] = LSB(mcp->mb[18]);
+ vha->port_name[6] = MSB(mcp->mb[19]);
+ vha->port_name[7] = LSB(mcp->mb[19]);
+ fc_host_port_name(vha->host) =
+ wwn_to_u64(vha->port_name);
+ ql_dbg(ql_dbg_mbx, vha, 0x10ca,
+ "FA-WWN acquired %016llx\n",
+ wwn_to_u64(vha->port_name));
+ }
+ }
+
+ return rval;
+}
+
+/*
+ * qla2x00_get_retry_cnt
+ * Get current firmware login retry count and delay.
+ *
+ * Input:
+ * ha = adapter block pointer.
+ * retry_cnt = pointer to login retry count.
+ * tov = pointer to login timeout value.
+ *
+ * Returns:
+ * qla2x00 local function return status code.
+ *
+ * Context:
+ * Kernel context.
+ */
+int
+qla2x00_get_retry_cnt(scsi_qla_host_t *vha, uint8_t *retry_cnt, uint8_t *tov,
+ uint16_t *r_a_tov)
+{
+ int rval;
+ uint16_t ratov;
+ mbx_cmd_t mc;
+ mbx_cmd_t *mcp = &mc;
+
+ ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1049,
+ "Entered %s.\n", __func__);
+
+ mcp->mb[0] = MBC_GET_RETRY_COUNT;
+ mcp->out_mb = MBX_0;
+ mcp->in_mb = MBX_3|MBX_2|MBX_1|MBX_0;
+ mcp->tov = MBX_TOV_SECONDS;
+ mcp->flags = 0;
+ rval = qla2x00_mailbox_command(vha, mcp);
+
+ if (rval != QLA_SUCCESS) {
+ /*EMPTY*/
+ ql_dbg(ql_dbg_mbx, vha, 0x104a,
+ "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
+ } else {
+ /* Convert returned data and check our values. */
+ *r_a_tov = mcp->mb[3] / 2;
+ ratov = (mcp->mb[3]/2) / 10; /* mb[3] value is in 100ms */
+ if (mcp->mb[1] * ratov > (*retry_cnt) * (*tov)) {
+ /* Update to the larger values */
+ *retry_cnt = (uint8_t)mcp->mb[1];
+ *tov = ratov;
+ }
+
+ ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x104b,
+ "Done %s mb3=%d ratov=%d.\n", __func__, mcp->mb[3], ratov);
+ }
+
+ return rval;
+}
+
+/*
+ * qla2x00_init_firmware
+ * Initialize adapter firmware.
+ *
+ * Input:
+ * ha = adapter block pointer.
+ * dptr = Initialization control block pointer.
+ * size = size of initialization control block.
+ * TARGET_QUEUE_LOCK must be released.
+ * ADAPTER_STATE_LOCK must be released.
+ *
+ * Returns:
+ * qla2x00 local function return status code.
+ *
+ * Context:
+ * Kernel context.
+ */
+int
+qla2x00_init_firmware(scsi_qla_host_t *vha, uint16_t size)
+{
+ int rval;
+ mbx_cmd_t mc;
+ mbx_cmd_t *mcp = &mc;
+ struct qla_hw_data *ha = vha->hw;
+
+ ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x104c,
+ "Entered %s.\n", __func__);
+
+ if (IS_P3P_TYPE(ha) && ql2xdbwr)
+ qla82xx_wr_32(ha, ha->nxdb_wr_ptr,
+ (0x04 | (ha->portnum << 5) | (0 << 8) | (0 << 16)));
+
+ if (ha->flags.npiv_supported)
+ mcp->mb[0] = MBC_MID_INITIALIZE_FIRMWARE;
+ else
+ mcp->mb[0] = MBC_INITIALIZE_FIRMWARE;
+
+ mcp->mb[1] = 0;
+ mcp->mb[2] = MSW(ha->init_cb_dma);
+ mcp->mb[3] = LSW(ha->init_cb_dma);
+ mcp->mb[6] = MSW(MSD(ha->init_cb_dma));
+ mcp->mb[7] = LSW(MSD(ha->init_cb_dma));
+ mcp->out_mb = MBX_7|MBX_6|MBX_3|MBX_2|MBX_1|MBX_0;
+ if (ha->ex_init_cb && ha->ex_init_cb->ex_version) {
+ mcp->mb[1] = BIT_0;
+ mcp->mb[10] = MSW(ha->ex_init_cb_dma);
+ mcp->mb[11] = LSW(ha->ex_init_cb_dma);
+ mcp->mb[12] = MSW(MSD(ha->ex_init_cb_dma));
+ mcp->mb[13] = LSW(MSD(ha->ex_init_cb_dma));
+ mcp->mb[14] = sizeof(*ha->ex_init_cb);
+ mcp->out_mb |= MBX_14|MBX_13|MBX_12|MBX_11|MBX_10;
+ }
+ /* 1 and 2 should normally be captured. */
+ mcp->in_mb = MBX_2|MBX_1|MBX_0;
+ if (IS_QLA83XX(ha) || IS_QLA27XX(ha))
+ /* mb3 is additional info about the installed SFP. */
+ mcp->in_mb |= MBX_3;
+ mcp->buf_size = size;
+ mcp->flags = MBX_DMA_OUT;
+ mcp->tov = MBX_TOV_SECONDS;
+ rval = qla2x00_mailbox_command(vha, mcp);
+
+ if (rval != QLA_SUCCESS) {
+ /*EMPTY*/
+ ql_dbg(ql_dbg_mbx, vha, 0x104d,
+ "Failed=%x mb[0]=%x, mb[1]=%x, mb[2]=%x, mb[3]=%x,.\n",
+ rval, mcp->mb[0], mcp->mb[1], mcp->mb[2], mcp->mb[3]);
+ } else {
+ /*EMPTY*/
+ ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x104e,
+ "Done %s.\n", __func__);
+ }
+
+ return rval;
+}
+
+/*
+ * qla2x00_get_node_name_list
+ * Issue get node name list mailbox command, kmalloc()
+ * and return the resulting list. Caller must kfree() it!
+ *
+ * Input:
+ * ha = adapter state pointer.
+ * out_data = resulting list
+ * out_len = length of the resulting list
+ *
+ * Returns:
+ * qla2x00 local function return status code.
+ *
+ * Context:
+ * Kernel context.
+ */
+int
+qla2x00_get_node_name_list(scsi_qla_host_t *vha, void **out_data, int *out_len)
+{
+ struct qla_hw_data *ha = vha->hw;
+ struct qla_port_24xx_data *list = NULL;
+ void *pmap;
+ mbx_cmd_t mc;
+ dma_addr_t pmap_dma;
+ ulong dma_size;
+ int rval, left;
+
+ left = 1;
+ while (left > 0) {
+ dma_size = left * sizeof(*list);
+ pmap = dma_alloc_coherent(&ha->pdev->dev, dma_size,
+ &pmap_dma, GFP_KERNEL);
+ if (!pmap) {
+ ql_log(ql_log_warn, vha, 0x113f,
+ "%s(%ld): DMA Alloc failed of %ld\n",
+ __func__, vha->host_no, dma_size);
+ rval = QLA_MEMORY_ALLOC_FAILED;
+ goto out;
+ }
+
+ mc.mb[0] = MBC_PORT_NODE_NAME_LIST;
+ mc.mb[1] = BIT_1 | BIT_3;
+ mc.mb[2] = MSW(pmap_dma);
+ mc.mb[3] = LSW(pmap_dma);
+ mc.mb[6] = MSW(MSD(pmap_dma));
+ mc.mb[7] = LSW(MSD(pmap_dma));
+ mc.mb[8] = dma_size;
+ mc.out_mb = MBX_0|MBX_1|MBX_2|MBX_3|MBX_6|MBX_7|MBX_8;
+ mc.in_mb = MBX_0|MBX_1;
+ mc.tov = 30;
+ mc.flags = MBX_DMA_IN;
+
+ rval = qla2x00_mailbox_command(vha, &mc);
+ if (rval != QLA_SUCCESS) {
+ if ((mc.mb[0] == MBS_COMMAND_ERROR) &&
+ (mc.mb[1] == 0xA)) {
+ left += le16_to_cpu(mc.mb[2]) /
+ sizeof(struct qla_port_24xx_data);
+ goto restart;
+ }
+ goto out_free;
+ }
+
+ left = 0;
+
+ list = kmemdup(pmap, dma_size, GFP_KERNEL);
+ if (!list) {
+ ql_log(ql_log_warn, vha, 0x1140,
+ "%s(%ld): failed to allocate node names list "
+ "structure.\n", __func__, vha->host_no);
+ rval = QLA_MEMORY_ALLOC_FAILED;
+ goto out_free;
+ }
+
+restart:
+ dma_free_coherent(&ha->pdev->dev, dma_size, pmap, pmap_dma);
+ }
+
+ *out_data = list;
+ *out_len = dma_size;
+
+out:
+ return rval;
+
+out_free:
+ dma_free_coherent(&ha->pdev->dev, dma_size, pmap, pmap_dma);
+ return rval;
+}
+
+/*
+ * qla2x00_get_port_database
+ * Issue normal/enhanced get port database mailbox command
+ * and copy device name as necessary.
+ *
+ * Input:
+ * ha = adapter state pointer.
+ * dev = structure pointer.
+ * opt = enhanced cmd option byte.
+ *
+ * Returns:
+ * qla2x00 local function return status code.
+ *
+ * Context:
+ * Kernel context.
+ */
+int
+qla2x00_get_port_database(scsi_qla_host_t *vha, fc_port_t *fcport, uint8_t opt)
+{
+ int rval;
+ mbx_cmd_t mc;
+ mbx_cmd_t *mcp = &mc;
+ port_database_t *pd;
+ struct port_database_24xx *pd24;
+ dma_addr_t pd_dma;
+ struct qla_hw_data *ha = vha->hw;
+
+ ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x104f,
+ "Entered %s.\n", __func__);
+
+ pd24 = NULL;
+ pd = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &pd_dma);
+ if (pd == NULL) {
+ ql_log(ql_log_warn, vha, 0x1050,
+ "Failed to allocate port database structure.\n");
+ return QLA_MEMORY_ALLOC_FAILED;
+ }
+ memset(pd, 0, max(PORT_DATABASE_SIZE, PORT_DATABASE_24XX_SIZE));
+
+ mcp->mb[0] = MBC_GET_PORT_DATABASE;
+ if (opt != 0 && !IS_FWI2_CAPABLE(ha))
+ mcp->mb[0] = MBC_ENHANCED_GET_PORT_DATABASE;
+ mcp->mb[2] = MSW(pd_dma);
+ mcp->mb[3] = LSW(pd_dma);
+ mcp->mb[6] = MSW(MSD(pd_dma));
+ mcp->mb[7] = LSW(MSD(pd_dma));
+ mcp->mb[9] = vha->vp_idx;
+ mcp->out_mb = MBX_9|MBX_7|MBX_6|MBX_3|MBX_2|MBX_0;
+ mcp->in_mb = MBX_0;
+ if (IS_FWI2_CAPABLE(ha)) {
+ mcp->mb[1] = fcport->loop_id;
+ mcp->mb[10] = opt;
+ mcp->out_mb |= MBX_10|MBX_1;
+ mcp->in_mb |= MBX_1;
+ } else if (HAS_EXTENDED_IDS(ha)) {
+ mcp->mb[1] = fcport->loop_id;
+ mcp->mb[10] = opt;
+ mcp->out_mb |= MBX_10|MBX_1;
+ } else {
+ mcp->mb[1] = fcport->loop_id << 8 | opt;
+ mcp->out_mb |= MBX_1;
+ }
+ mcp->buf_size = IS_FWI2_CAPABLE(ha) ?
+ PORT_DATABASE_24XX_SIZE : PORT_DATABASE_SIZE;
+ mcp->flags = MBX_DMA_IN;
+ mcp->tov = (ha->login_timeout * 2) + (ha->login_timeout / 2);
+ rval = qla2x00_mailbox_command(vha, mcp);
+ if (rval != QLA_SUCCESS)
+ goto gpd_error_out;
+
+ if (IS_FWI2_CAPABLE(ha)) {
+ uint64_t zero = 0;
+ pd24 = (struct port_database_24xx *) pd;
+
+ /* Check for logged in state. */
+ if (pd24->current_login_state != PDS_PRLI_COMPLETE &&
+ pd24->last_login_state != PDS_PRLI_COMPLETE) {
+ ql_dbg(ql_dbg_mbx, vha, 0x1051,
+ "Unable to verify login-state (%x/%x) for "
+ "loop_id %x.\n", pd24->current_login_state,
+ pd24->last_login_state, fcport->loop_id);
+ rval = QLA_FUNCTION_FAILED;
+ goto gpd_error_out;
+ }
+
+ if (fcport->loop_id == FC_NO_LOOP_ID ||
+ (memcmp(fcport->port_name, (uint8_t *)&zero, 8) &&
+ memcmp(fcport->port_name, pd24->port_name, 8))) {
+ /* We lost the device mid way. */
+ rval = QLA_NOT_LOGGED_IN;
+ goto gpd_error_out;
+ }
+
+ /* Names are little-endian. */
+ memcpy(fcport->node_name, pd24->node_name, WWN_SIZE);
+ memcpy(fcport->port_name, pd24->port_name, WWN_SIZE);
+
+ /* Get port_id of device. */
+ fcport->d_id.b.domain = pd24->port_id[0];
+ fcport->d_id.b.area = pd24->port_id[1];
+ fcport->d_id.b.al_pa = pd24->port_id[2];
+ fcport->d_id.b.rsvd_1 = 0;
+
+ /* If not target must be initiator or unknown type. */
+ if ((pd24->prli_svc_param_word_3[0] & BIT_4) == 0)
+ fcport->port_type = FCT_INITIATOR;
+ else
+ fcport->port_type = FCT_TARGET;
+
+ /* Passback COS information. */
+ fcport->supported_classes = (pd24->flags & PDF_CLASS_2) ?
+ FC_COS_CLASS2 : FC_COS_CLASS3;
+
+ if (pd24->prli_svc_param_word_3[0] & BIT_7)
+ fcport->flags |= FCF_CONF_COMP_SUPPORTED;
+ } else {
+ uint64_t zero = 0;
+
+ /* Check for logged in state. */
+ if (pd->master_state != PD_STATE_PORT_LOGGED_IN &&
+ pd->slave_state != PD_STATE_PORT_LOGGED_IN) {
+ ql_dbg(ql_dbg_mbx, vha, 0x100a,
+ "Unable to verify login-state (%x/%x) - "
+ "portid=%02x%02x%02x.\n", pd->master_state,
+ pd->slave_state, fcport->d_id.b.domain,
+ fcport->d_id.b.area, fcport->d_id.b.al_pa);
+ rval = QLA_FUNCTION_FAILED;
+ goto gpd_error_out;
+ }
+
+ if (fcport->loop_id == FC_NO_LOOP_ID ||
+ (memcmp(fcport->port_name, (uint8_t *)&zero, 8) &&
+ memcmp(fcport->port_name, pd->port_name, 8))) {
+ /* We lost the device mid way. */
+ rval = QLA_NOT_LOGGED_IN;
+ goto gpd_error_out;
+ }
+
+ /* Names are little-endian. */
+ memcpy(fcport->node_name, pd->node_name, WWN_SIZE);
+ memcpy(fcport->port_name, pd->port_name, WWN_SIZE);
+
+ /* Get port_id of device. */
+ fcport->d_id.b.domain = pd->port_id[0];
+ fcport->d_id.b.area = pd->port_id[3];
+ fcport->d_id.b.al_pa = pd->port_id[2];
+ fcport->d_id.b.rsvd_1 = 0;
+
+ /* If not target must be initiator or unknown type. */
+ if ((pd->prli_svc_param_word_3[0] & BIT_4) == 0)
+ fcport->port_type = FCT_INITIATOR;
+ else
+ fcport->port_type = FCT_TARGET;
+
+ /* Passback COS information. */
+ fcport->supported_classes = (pd->options & BIT_4) ?
+ FC_COS_CLASS2: FC_COS_CLASS3;
+ }
+
+gpd_error_out:
+ dma_pool_free(ha->s_dma_pool, pd, pd_dma);
+
+ if (rval != QLA_SUCCESS) {
+ ql_dbg(ql_dbg_mbx, vha, 0x1052,
+ "Failed=%x mb[0]=%x mb[1]=%x.\n", rval,
+ mcp->mb[0], mcp->mb[1]);
+ } else {
+ ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1053,
+ "Done %s.\n", __func__);
+ }
+
+ return rval;
+}
+
+/*
+ * qla2x00_get_firmware_state
+ * Get adapter firmware state.
+ *
+ * Input:
+ * ha = adapter block pointer.
+ * dptr = pointer for firmware state.
+ * TARGET_QUEUE_LOCK must be released.
+ * ADAPTER_STATE_LOCK must be released.
+ *
+ * Returns:
+ * qla2x00 local function return status code.
+ *
+ * Context:
+ * Kernel context.
+ */
+int
+qla2x00_get_firmware_state(scsi_qla_host_t *vha, uint16_t *states)
+{
+ int rval;
+ mbx_cmd_t mc;
+ mbx_cmd_t *mcp = &mc;
+
+ ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1054,
+ "Entered %s.\n", __func__);
+
+ mcp->mb[0] = MBC_GET_FIRMWARE_STATE;
+ mcp->out_mb = MBX_0;
+ if (IS_FWI2_CAPABLE(vha->hw))
+ mcp->in_mb = MBX_6|MBX_5|MBX_4|MBX_3|MBX_2|MBX_1|MBX_0;
+ else
+ mcp->in_mb = MBX_1|MBX_0;
+ mcp->tov = MBX_TOV_SECONDS;
+ mcp->flags = 0;
+ rval = qla2x00_mailbox_command(vha, mcp);
+
+ /* Return firmware states. */
+ states[0] = mcp->mb[1];
+ if (IS_FWI2_CAPABLE(vha->hw)) {
+ states[1] = mcp->mb[2];
+ states[2] = mcp->mb[3];
+ states[3] = mcp->mb[4];
+ states[4] = mcp->mb[5];
+ states[5] = mcp->mb[6]; /* DPORT status */
+ }
+
+ if (rval != QLA_SUCCESS) {
+ /*EMPTY*/
+ ql_dbg(ql_dbg_mbx, vha, 0x1055, "Failed=%x.\n", rval);
+ } else {
+ /*EMPTY*/
+ ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1056,
+ "Done %s.\n", __func__);
+ }
+
+ return rval;
+}
+
+/*
+ * qla2x00_get_port_name
+ * Issue get port name mailbox command.
+ * Returned name is in big endian format.
+ *
+ * Input:
+ * ha = adapter block pointer.
+ * loop_id = loop ID of device.
+ * name = pointer for name.
+ * TARGET_QUEUE_LOCK must be released.
+ * ADAPTER_STATE_LOCK must be released.
+ *
+ * Returns:
+ * qla2x00 local function return status code.
+ *
+ * Context:
+ * Kernel context.
+ */
+int
+qla2x00_get_port_name(scsi_qla_host_t *vha, uint16_t loop_id, uint8_t *name,
+ uint8_t opt)
+{
+ int rval;
+ mbx_cmd_t mc;
+ mbx_cmd_t *mcp = &mc;
+
+ ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1057,
+ "Entered %s.\n", __func__);
+
+ mcp->mb[0] = MBC_GET_PORT_NAME;
+ mcp->mb[9] = vha->vp_idx;
+ mcp->out_mb = MBX_9|MBX_1|MBX_0;
+ if (HAS_EXTENDED_IDS(vha->hw)) {
+ mcp->mb[1] = loop_id;
+ mcp->mb[10] = opt;
+ mcp->out_mb |= MBX_10;
+ } else {
+ mcp->mb[1] = loop_id << 8 | opt;
+ }
+
+ mcp->in_mb = MBX_7|MBX_6|MBX_3|MBX_2|MBX_1|MBX_0;
+ mcp->tov = MBX_TOV_SECONDS;
+ mcp->flags = 0;
+ rval = qla2x00_mailbox_command(vha, mcp);
+
+ if (rval != QLA_SUCCESS) {
+ /*EMPTY*/
+ ql_dbg(ql_dbg_mbx, vha, 0x1058, "Failed=%x.\n", rval);
+ } else {
+ if (name != NULL) {
+ /* This function returns name in big endian. */
+ name[0] = MSB(mcp->mb[2]);
+ name[1] = LSB(mcp->mb[2]);
+ name[2] = MSB(mcp->mb[3]);
+ name[3] = LSB(mcp->mb[3]);
+ name[4] = MSB(mcp->mb[6]);
+ name[5] = LSB(mcp->mb[6]);
+ name[6] = MSB(mcp->mb[7]);
+ name[7] = LSB(mcp->mb[7]);
+ }
+
+ ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1059,
+ "Done %s.\n", __func__);
+ }
+
+ return rval;
+}
+
+/*
+ * qla24xx_link_initialization
+ * Issue link initialization mailbox command.
+ *
+ * Input:
+ * ha = adapter block pointer.
+ * TARGET_QUEUE_LOCK must be released.
+ * ADAPTER_STATE_LOCK must be released.
+ *
+ * Returns:
+ * qla2x00 local function return status code.
+ *
+ * Context:
+ * Kernel context.
+ */
+int
+qla24xx_link_initialize(scsi_qla_host_t *vha)
+{
+ int rval;
+ mbx_cmd_t mc;
+ mbx_cmd_t *mcp = &mc;
+
+ ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1152,
+ "Entered %s.\n", __func__);
+
+ if (!IS_FWI2_CAPABLE(vha->hw) || IS_CNA_CAPABLE(vha->hw))
+ return QLA_FUNCTION_FAILED;
+
+ mcp->mb[0] = MBC_LINK_INITIALIZATION;
+ mcp->mb[1] = BIT_4;
+ if (vha->hw->operating_mode == LOOP)
+ mcp->mb[1] |= BIT_6;
+ else
+ mcp->mb[1] |= BIT_5;
+ mcp->mb[2] = 0;
+ mcp->mb[3] = 0;
+ mcp->out_mb = MBX_3|MBX_2|MBX_1|MBX_0;
+ mcp->in_mb = MBX_0;
+ mcp->tov = MBX_TOV_SECONDS;
+ mcp->flags = 0;
+ rval = qla2x00_mailbox_command(vha, mcp);
+
+ if (rval != QLA_SUCCESS) {
+ ql_dbg(ql_dbg_mbx, vha, 0x1153, "Failed=%x.\n", rval);
+ } else {
+ ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1154,
+ "Done %s.\n", __func__);
+ }
+
+ return rval;
+}
+
+/*
+ * qla2x00_lip_reset
+ * Issue LIP reset mailbox command.
+ *
+ * Input:
+ * ha = adapter block pointer.
+ * TARGET_QUEUE_LOCK must be released.
+ * ADAPTER_STATE_LOCK must be released.
+ *
+ * Returns:
+ * qla2x00 local function return status code.
+ *
+ * Context:
+ * Kernel context.
+ */
+int
+qla2x00_lip_reset(scsi_qla_host_t *vha)
+{
+ int rval;
+ mbx_cmd_t mc;
+ mbx_cmd_t *mcp = &mc;
+
+ ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x105a,
+ "Entered %s.\n", __func__);
+
+ if (IS_CNA_CAPABLE(vha->hw)) {
+ /* Logout across all FCFs. */
+ mcp->mb[0] = MBC_LIP_FULL_LOGIN;
+ mcp->mb[1] = BIT_1;
+ mcp->mb[2] = 0;
+ mcp->out_mb = MBX_2|MBX_1|MBX_0;
+ } else if (IS_FWI2_CAPABLE(vha->hw)) {
+ mcp->mb[0] = MBC_LIP_FULL_LOGIN;
+ mcp->mb[1] = BIT_6;
+ mcp->mb[2] = 0;
+ mcp->mb[3] = vha->hw->loop_reset_delay;
+ mcp->out_mb = MBX_3|MBX_2|MBX_1|MBX_0;
+ } else {
+ mcp->mb[0] = MBC_LIP_RESET;
+ mcp->out_mb = MBX_3|MBX_2|MBX_1|MBX_0;
+ if (HAS_EXTENDED_IDS(vha->hw)) {
+ mcp->mb[1] = 0x00ff;
+ mcp->mb[10] = 0;
+ mcp->out_mb |= MBX_10;
+ } else {
+ mcp->mb[1] = 0xff00;
+ }
+ mcp->mb[2] = vha->hw->loop_reset_delay;
+ mcp->mb[3] = 0;
+ }
+ mcp->in_mb = MBX_0;
+ mcp->tov = MBX_TOV_SECONDS;
+ mcp->flags = 0;
+ rval = qla2x00_mailbox_command(vha, mcp);
+
+ if (rval != QLA_SUCCESS) {
+ /*EMPTY*/
+ ql_dbg(ql_dbg_mbx, vha, 0x105b, "Failed=%x.\n", rval);
+ } else {
+ /*EMPTY*/
+ ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x105c,
+ "Done %s.\n", __func__);
+ }
+
+ return rval;
+}
+
+/*
+ * qla2x00_send_sns
+ * Send SNS command.
+ *
+ * Input:
+ * ha = adapter block pointer.
+ * sns = pointer for command.
+ * cmd_size = command size.
+ * buf_size = response/command size.
+ * TARGET_QUEUE_LOCK must be released.
+ * ADAPTER_STATE_LOCK must be released.
+ *
+ * Returns:
+ * qla2x00 local function return status code.
+ *
+ * Context:
+ * Kernel context.
+ */
+int
+qla2x00_send_sns(scsi_qla_host_t *vha, dma_addr_t sns_phys_address,
+ uint16_t cmd_size, size_t buf_size)
+{
+ int rval;
+ mbx_cmd_t mc;
+ mbx_cmd_t *mcp = &mc;
+
+ ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x105d,
+ "Entered %s.\n", __func__);
+
+ ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x105e,
+ "Retry cnt=%d ratov=%d total tov=%d.\n",
+ vha->hw->retry_count, vha->hw->login_timeout, mcp->tov);
+
+ mcp->mb[0] = MBC_SEND_SNS_COMMAND;
+ mcp->mb[1] = cmd_size;
+ mcp->mb[2] = MSW(sns_phys_address);
+ mcp->mb[3] = LSW(sns_phys_address);
+ mcp->mb[6] = MSW(MSD(sns_phys_address));
+ mcp->mb[7] = LSW(MSD(sns_phys_address));
+ mcp->out_mb = MBX_7|MBX_6|MBX_3|MBX_2|MBX_1|MBX_0;
+ mcp->in_mb = MBX_0|MBX_1;
+ mcp->buf_size = buf_size;
+ mcp->flags = MBX_DMA_OUT|MBX_DMA_IN;
+ mcp->tov = (vha->hw->login_timeout * 2) + (vha->hw->login_timeout / 2);
+ rval = qla2x00_mailbox_command(vha, mcp);
+
+ if (rval != QLA_SUCCESS) {
+ /*EMPTY*/
+ ql_dbg(ql_dbg_mbx, vha, 0x105f,
+ "Failed=%x mb[0]=%x mb[1]=%x.\n",
+ rval, mcp->mb[0], mcp->mb[1]);
+ } else {
+ /*EMPTY*/
+ ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1060,
+ "Done %s.\n", __func__);
+ }
+
+ return rval;
+}
+
+int
+qla24xx_login_fabric(scsi_qla_host_t *vha, uint16_t loop_id, uint8_t domain,
+ uint8_t area, uint8_t al_pa, uint16_t *mb, uint8_t opt)
+{
+ int rval;
+
+ struct logio_entry_24xx *lg;
+ dma_addr_t lg_dma;
+ uint32_t iop[2];
+ struct qla_hw_data *ha = vha->hw;
+ struct req_que *req;
+ struct rsp_que *rsp;
+
+ ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1061,
+ "Entered %s.\n", __func__);
+
+ if (ha->flags.cpu_affinity_enabled)
+ req = ha->req_q_map[0];
+ else
+ req = vha->req;
+ rsp = req->rsp;
+
+ lg = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &lg_dma);
+ if (lg == NULL) {
+ ql_log(ql_log_warn, vha, 0x1062,
+ "Failed to allocate login IOCB.\n");
+ return QLA_MEMORY_ALLOC_FAILED;
+ }
+ memset(lg, 0, sizeof(struct logio_entry_24xx));
+
+ lg->entry_type = LOGINOUT_PORT_IOCB_TYPE;
+ lg->entry_count = 1;
+ lg->handle = MAKE_HANDLE(req->id, lg->handle);
+ lg->nport_handle = cpu_to_le16(loop_id);
+ lg->control_flags = __constant_cpu_to_le16(LCF_COMMAND_PLOGI);
+ if (opt & BIT_0)
+ lg->control_flags |= __constant_cpu_to_le16(LCF_COND_PLOGI);
+ if (opt & BIT_1)
+ lg->control_flags |= __constant_cpu_to_le16(LCF_SKIP_PRLI);
+ lg->port_id[0] = al_pa;
+ lg->port_id[1] = area;
+ lg->port_id[2] = domain;
+ lg->vp_index = vha->vp_idx;
+ rval = qla2x00_issue_iocb_timeout(vha, lg, lg_dma, 0,
+ (ha->r_a_tov / 10 * 2) + 2);
+ if (rval != QLA_SUCCESS) {
+ ql_dbg(ql_dbg_mbx, vha, 0x1063,
+ "Failed to issue login IOCB (%x).\n", rval);
+ } else if (lg->entry_status != 0) {
+ ql_dbg(ql_dbg_mbx, vha, 0x1064,
+ "Failed to complete IOCB -- error status (%x).\n",
+ lg->entry_status);
+ rval = QLA_FUNCTION_FAILED;
+ } else if (lg->comp_status != __constant_cpu_to_le16(CS_COMPLETE)) {
+ iop[0] = le32_to_cpu(lg->io_parameter[0]);
+ iop[1] = le32_to_cpu(lg->io_parameter[1]);
+
+ ql_dbg(ql_dbg_mbx, vha, 0x1065,
+ "Failed to complete IOCB -- completion status (%x) "
+ "ioparam=%x/%x.\n", le16_to_cpu(lg->comp_status),
+ iop[0], iop[1]);
+
+ switch (iop[0]) {
+ case LSC_SCODE_PORTID_USED:
+ mb[0] = MBS_PORT_ID_USED;
+ mb[1] = LSW(iop[1]);
+ break;
+ case LSC_SCODE_NPORT_USED:
+ mb[0] = MBS_LOOP_ID_USED;
+ break;
+ case LSC_SCODE_NOLINK:
+ case LSC_SCODE_NOIOCB:
+ case LSC_SCODE_NOXCB:
+ case LSC_SCODE_CMD_FAILED:
+ case LSC_SCODE_NOFABRIC:
+ case LSC_SCODE_FW_NOT_READY:
+ case LSC_SCODE_NOT_LOGGED_IN:
+ case LSC_SCODE_NOPCB:
+ case LSC_SCODE_ELS_REJECT:
+ case LSC_SCODE_CMD_PARAM_ERR:
+ case LSC_SCODE_NONPORT:
+ case LSC_SCODE_LOGGED_IN:
+ case LSC_SCODE_NOFLOGI_ACC:
+ default:
+ mb[0] = MBS_COMMAND_ERROR;
+ break;
+ }
+ } else {
+ ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1066,
+ "Done %s.\n", __func__);
+
+ iop[0] = le32_to_cpu(lg->io_parameter[0]);
+
+ mb[0] = MBS_COMMAND_COMPLETE;
+ mb[1] = 0;
+ if (iop[0] & BIT_4) {
+ if (iop[0] & BIT_8)
+ mb[1] |= BIT_1;
+ } else
+ mb[1] = BIT_0;
+
+ /* Passback COS information. */
+ mb[10] = 0;
+ if (lg->io_parameter[7] || lg->io_parameter[8])
+ mb[10] |= BIT_0; /* Class 2. */
+ if (lg->io_parameter[9] || lg->io_parameter[10])
+ mb[10] |= BIT_1; /* Class 3. */
+ if (lg->io_parameter[0] & __constant_cpu_to_le32(BIT_7))
+ mb[10] |= BIT_7; /* Confirmed Completion
+ * Allowed
+ */
+ }
+
+ dma_pool_free(ha->s_dma_pool, lg, lg_dma);
+
+ return rval;
+}
+
+/*
+ * qla2x00_login_fabric
+ * Issue login fabric port mailbox command.
+ *
+ * Input:
+ * ha = adapter block pointer.
+ * loop_id = device loop ID.
+ * domain = device domain.
+ * area = device area.
+ * al_pa = device AL_PA.
+ * status = pointer for return status.
+ * opt = command options.
+ * TARGET_QUEUE_LOCK must be released.
+ * ADAPTER_STATE_LOCK must be released.
+ *
+ * Returns:
+ * qla2x00 local function return status code.
+ *
+ * Context:
+ * Kernel context.
+ */
+int
+qla2x00_login_fabric(scsi_qla_host_t *vha, uint16_t loop_id, uint8_t domain,
+ uint8_t area, uint8_t al_pa, uint16_t *mb, uint8_t opt)
+{
+ int rval;
+ mbx_cmd_t mc;
+ mbx_cmd_t *mcp = &mc;
+ struct qla_hw_data *ha = vha->hw;
+
+ ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1067,
+ "Entered %s.\n", __func__);
+
+ mcp->mb[0] = MBC_LOGIN_FABRIC_PORT;
+ mcp->out_mb = MBX_3|MBX_2|MBX_1|MBX_0;
+ if (HAS_EXTENDED_IDS(ha)) {
+ mcp->mb[1] = loop_id;
+ mcp->mb[10] = opt;
+ mcp->out_mb |= MBX_10;
+ } else {
+ mcp->mb[1] = (loop_id << 8) | opt;
+ }
+ mcp->mb[2] = domain;
+ mcp->mb[3] = area << 8 | al_pa;
+
+ mcp->in_mb = MBX_7|MBX_6|MBX_2|MBX_1|MBX_0;
+ mcp->tov = (ha->login_timeout * 2) + (ha->login_timeout / 2);
+ mcp->flags = 0;
+ rval = qla2x00_mailbox_command(vha, mcp);
+
+ /* Return mailbox statuses. */
+ if (mb != NULL) {
+ mb[0] = mcp->mb[0];
+ mb[1] = mcp->mb[1];
+ mb[2] = mcp->mb[2];
+ mb[6] = mcp->mb[6];
+ mb[7] = mcp->mb[7];
+ /* COS retrieved from Get-Port-Database mailbox command. */
+ mb[10] = 0;
+ }
+
+ if (rval != QLA_SUCCESS) {
+ /* RLU tmp code: need to change main mailbox_command function to
+ * return ok even when the mailbox completion value is not
+ * SUCCESS. The caller needs to be responsible to interpret
+ * the return values of this mailbox command if we're not
+ * to change too much of the existing code.
+ */
+ if (mcp->mb[0] == 0x4001 || mcp->mb[0] == 0x4002 ||
+ mcp->mb[0] == 0x4003 || mcp->mb[0] == 0x4005 ||
+ mcp->mb[0] == 0x4006)
+ rval = QLA_SUCCESS;
+
+ /*EMPTY*/
+ ql_dbg(ql_dbg_mbx, vha, 0x1068,
+ "Failed=%x mb[0]=%x mb[1]=%x mb[2]=%x.\n",
+ rval, mcp->mb[0], mcp->mb[1], mcp->mb[2]);
+ } else {
+ /*EMPTY*/
+ ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1069,
+ "Done %s.\n", __func__);
+ }
+
+ return rval;
+}
+
+/*
+ * qla2x00_login_local_device
+ * Issue login loop port mailbox command.
+ *
+ * Input:
+ * ha = adapter block pointer.
+ * loop_id = device loop ID.
+ * opt = command options.
+ *
+ * Returns:
+ * Return status code.
+ *
+ * Context:
+ * Kernel context.
+ *
+ */
+int
+qla2x00_login_local_device(scsi_qla_host_t *vha, fc_port_t *fcport,
+ uint16_t *mb_ret, uint8_t opt)
+{
+ int rval;
+ mbx_cmd_t mc;
+ mbx_cmd_t *mcp = &mc;
+ struct qla_hw_data *ha = vha->hw;
+
+ ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x106a,
+ "Entered %s.\n", __func__);
+
+ if (IS_FWI2_CAPABLE(ha))
+ return qla24xx_login_fabric(vha, fcport->loop_id,
+ fcport->d_id.b.domain, fcport->d_id.b.area,
+ fcport->d_id.b.al_pa, mb_ret, opt);
+
+ mcp->mb[0] = MBC_LOGIN_LOOP_PORT;
+ if (HAS_EXTENDED_IDS(ha))
+ mcp->mb[1] = fcport->loop_id;
+ else
+ mcp->mb[1] = fcport->loop_id << 8;
+ mcp->mb[2] = opt;
+ mcp->out_mb = MBX_2|MBX_1|MBX_0;
+ mcp->in_mb = MBX_7|MBX_6|MBX_1|MBX_0;
+ mcp->tov = (ha->login_timeout * 2) + (ha->login_timeout / 2);
+ mcp->flags = 0;
+ rval = qla2x00_mailbox_command(vha, mcp);
+
+ /* Return mailbox statuses. */
+ if (mb_ret != NULL) {
+ mb_ret[0] = mcp->mb[0];
+ mb_ret[1] = mcp->mb[1];
+ mb_ret[6] = mcp->mb[6];
+ mb_ret[7] = mcp->mb[7];
+ }
+
+ if (rval != QLA_SUCCESS) {
+ /* AV tmp code: need to change main mailbox_command function to
+ * return ok even when the mailbox completion value is not
+ * SUCCESS. The caller needs to be responsible to interpret
+ * the return values of this mailbox command if we're not
+ * to change too much of the existing code.
+ */
+ if (mcp->mb[0] == 0x4005 || mcp->mb[0] == 0x4006)
+ rval = QLA_SUCCESS;
+
+ ql_dbg(ql_dbg_mbx, vha, 0x106b,
+ "Failed=%x mb[0]=%x mb[1]=%x mb[6]=%x mb[7]=%x.\n",
+ rval, mcp->mb[0], mcp->mb[1], mcp->mb[6], mcp->mb[7]);
+ } else {
+ /*EMPTY*/
+ ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x106c,
+ "Done %s.\n", __func__);
+ }
+
+ return (rval);
+}
+
+int
+qla24xx_fabric_logout(scsi_qla_host_t *vha, uint16_t loop_id, uint8_t domain,
+ uint8_t area, uint8_t al_pa)
+{
+ int rval;
+ struct logio_entry_24xx *lg;
+ dma_addr_t lg_dma;
+ struct qla_hw_data *ha = vha->hw;
+ struct req_que *req;
+ struct rsp_que *rsp;
+
+ ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x106d,
+ "Entered %s.\n", __func__);
+
+ lg = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &lg_dma);
+ if (lg == NULL) {
+ ql_log(ql_log_warn, vha, 0x106e,
+ "Failed to allocate logout IOCB.\n");
+ return QLA_MEMORY_ALLOC_FAILED;
+ }
+ memset(lg, 0, sizeof(struct logio_entry_24xx));
+
+ if (ql2xmaxqueues > 1)
+ req = ha->req_q_map[0];
+ else
+ req = vha->req;
+ rsp = req->rsp;
+ lg->entry_type = LOGINOUT_PORT_IOCB_TYPE;
+ lg->entry_count = 1;
+ lg->handle = MAKE_HANDLE(req->id, lg->handle);
+ lg->nport_handle = cpu_to_le16(loop_id);
+ lg->control_flags =
+ __constant_cpu_to_le16(LCF_COMMAND_LOGO|LCF_IMPL_LOGO|
+ LCF_FREE_NPORT);
+ lg->port_id[0] = al_pa;
+ lg->port_id[1] = area;
+ lg->port_id[2] = domain;
+ lg->vp_index = vha->vp_idx;
+ rval = qla2x00_issue_iocb_timeout(vha, lg, lg_dma, 0,
+ (ha->r_a_tov / 10 * 2) + 2);
+ if (rval != QLA_SUCCESS) {
+ ql_dbg(ql_dbg_mbx, vha, 0x106f,
+ "Failed to issue logout IOCB (%x).\n", rval);
+ } else if (lg->entry_status != 0) {
+ ql_dbg(ql_dbg_mbx, vha, 0x1070,
+ "Failed to complete IOCB -- error status (%x).\n",
+ lg->entry_status);
+ rval = QLA_FUNCTION_FAILED;
+ } else if (lg->comp_status != __constant_cpu_to_le16(CS_COMPLETE)) {
+ ql_dbg(ql_dbg_mbx, vha, 0x1071,
+ "Failed to complete IOCB -- completion status (%x) "
+ "ioparam=%x/%x.\n", le16_to_cpu(lg->comp_status),
+ le32_to_cpu(lg->io_parameter[0]),
+ le32_to_cpu(lg->io_parameter[1]));
+ } else {
+ /*EMPTY*/
+ ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1072,
+ "Done %s.\n", __func__);
+ }
+
+ dma_pool_free(ha->s_dma_pool, lg, lg_dma);
+
+ return rval;
+}
+
+/*
+ * qla2x00_fabric_logout
+ * Issue logout fabric port mailbox command.
+ *
+ * Input:
+ * ha = adapter block pointer.
+ * loop_id = device loop ID.
+ * TARGET_QUEUE_LOCK must be released.
+ * ADAPTER_STATE_LOCK must be released.
+ *
+ * Returns:
+ * qla2x00 local function return status code.
+ *
+ * Context:
+ * Kernel context.
+ */
+int
+qla2x00_fabric_logout(scsi_qla_host_t *vha, uint16_t loop_id, uint8_t domain,
+ uint8_t area, uint8_t al_pa)
+{
+ int rval;
+ mbx_cmd_t mc;
+ mbx_cmd_t *mcp = &mc;
+
+ ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1073,
+ "Entered %s.\n", __func__);
+
+ mcp->mb[0] = MBC_LOGOUT_FABRIC_PORT;
+ mcp->out_mb = MBX_1|MBX_0;
+ if (HAS_EXTENDED_IDS(vha->hw)) {
+ mcp->mb[1] = loop_id;
+ mcp->mb[10] = 0;
+ mcp->out_mb |= MBX_10;
+ } else {
+ mcp->mb[1] = loop_id << 8;
+ }
+
+ mcp->in_mb = MBX_1|MBX_0;
+ mcp->tov = MBX_TOV_SECONDS;
+ mcp->flags = 0;
+ rval = qla2x00_mailbox_command(vha, mcp);
+
+ if (rval != QLA_SUCCESS) {
+ /*EMPTY*/
+ ql_dbg(ql_dbg_mbx, vha, 0x1074,
+ "Failed=%x mb[1]=%x.\n", rval, mcp->mb[1]);
+ } else {
+ /*EMPTY*/
+ ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1075,
+ "Done %s.\n", __func__);
+ }
+
+ return rval;
+}
+
+/*
+ * qla2x00_full_login_lip
+ * Issue full login LIP mailbox command.
+ *
+ * Input:
+ * ha = adapter block pointer.
+ * TARGET_QUEUE_LOCK must be released.
+ * ADAPTER_STATE_LOCK must be released.
+ *
+ * Returns:
+ * qla2x00 local function return status code.
+ *
+ * Context:
+ * Kernel context.
+ */
+int
+qla2x00_full_login_lip(scsi_qla_host_t *vha)
+{
+ int rval;
+ mbx_cmd_t mc;
+ mbx_cmd_t *mcp = &mc;
+
+ ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1076,
+ "Entered %s.\n", __func__);
+
+ mcp->mb[0] = MBC_LIP_FULL_LOGIN;
+ mcp->mb[1] = IS_FWI2_CAPABLE(vha->hw) ? BIT_3 : 0;
+ mcp->mb[2] = 0;
+ mcp->mb[3] = 0;
+ mcp->out_mb = MBX_3|MBX_2|MBX_1|MBX_0;
+ mcp->in_mb = MBX_0;
+ mcp->tov = MBX_TOV_SECONDS;
+ mcp->flags = 0;
+ rval = qla2x00_mailbox_command(vha, mcp);
+
+ if (rval != QLA_SUCCESS) {
+ /*EMPTY*/
+ ql_dbg(ql_dbg_mbx, vha, 0x1077, "Failed=%x.\n", rval);
+ } else {
+ /*EMPTY*/
+ ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1078,
+ "Done %s.\n", __func__);
+ }
+
+ return rval;
+}
+
+/*
+ * qla2x00_get_id_list
+ *
+ * Input:
+ * ha = adapter block pointer.
+ *
+ * Returns:
+ * qla2x00 local function return status code.
+ *
+ * Context:
+ * Kernel context.
+ */
+int
+qla2x00_get_id_list(scsi_qla_host_t *vha, void *id_list, dma_addr_t id_list_dma,
+ uint16_t *entries)
+{
+ int rval;
+ mbx_cmd_t mc;
+ mbx_cmd_t *mcp = &mc;
+
+ ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1079,
+ "Entered %s.\n", __func__);
+
+ if (id_list == NULL)
+ return QLA_FUNCTION_FAILED;
+
+ mcp->mb[0] = MBC_GET_ID_LIST;
+ mcp->out_mb = MBX_0;
+ if (IS_FWI2_CAPABLE(vha->hw)) {
+ mcp->mb[2] = MSW(id_list_dma);
+ mcp->mb[3] = LSW(id_list_dma);
+ mcp->mb[6] = MSW(MSD(id_list_dma));
+ mcp->mb[7] = LSW(MSD(id_list_dma));
+ mcp->mb[8] = 0;
+ mcp->mb[9] = vha->vp_idx;
+ mcp->out_mb |= MBX_9|MBX_8|MBX_7|MBX_6|MBX_3|MBX_2;
+ } else {
+ mcp->mb[1] = MSW(id_list_dma);
+ mcp->mb[2] = LSW(id_list_dma);
+ mcp->mb[3] = MSW(MSD(id_list_dma));
+ mcp->mb[6] = LSW(MSD(id_list_dma));
+ mcp->out_mb |= MBX_6|MBX_3|MBX_2|MBX_1;
+ }
+ mcp->in_mb = MBX_1|MBX_0;
+ mcp->tov = MBX_TOV_SECONDS;
+ mcp->flags = 0;
+ rval = qla2x00_mailbox_command(vha, mcp);
+
+ if (rval != QLA_SUCCESS) {
+ /*EMPTY*/
+ ql_dbg(ql_dbg_mbx, vha, 0x107a, "Failed=%x.\n", rval);
+ } else {
+ *entries = mcp->mb[1];
+ ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x107b,
+ "Done %s.\n", __func__);
+ }
+
+ return rval;
+}
+
+/*
+ * qla2x00_get_resource_cnts
+ * Get current firmware resource counts.
+ *
+ * Input:
+ * ha = adapter block pointer.
+ *
+ * Returns:
+ * qla2x00 local function return status code.
+ *
+ * Context:
+ * Kernel context.
+ */
+int
+qla2x00_get_resource_cnts(scsi_qla_host_t *vha, uint16_t *cur_xchg_cnt,
+ uint16_t *orig_xchg_cnt, uint16_t *cur_iocb_cnt,
+ uint16_t *orig_iocb_cnt, uint16_t *max_npiv_vports, uint16_t *max_fcfs)
+{
+ int rval;
+ mbx_cmd_t mc;
+ mbx_cmd_t *mcp = &mc;
+
+ ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x107c,
+ "Entered %s.\n", __func__);
+
+ mcp->mb[0] = MBC_GET_RESOURCE_COUNTS;
+ mcp->out_mb = MBX_0;
+ mcp->in_mb = MBX_11|MBX_10|MBX_7|MBX_6|MBX_3|MBX_2|MBX_1|MBX_0;
+ if (IS_QLA81XX(vha->hw) || IS_QLA83XX(vha->hw) || IS_QLA27XX(vha->hw))
+ mcp->in_mb |= MBX_12;
+ mcp->tov = MBX_TOV_SECONDS;
+ mcp->flags = 0;
+ rval = qla2x00_mailbox_command(vha, mcp);
+
+ if (rval != QLA_SUCCESS) {
+ /*EMPTY*/
+ ql_dbg(ql_dbg_mbx, vha, 0x107d,
+ "Failed mb[0]=%x.\n", mcp->mb[0]);
+ } else {
+ ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x107e,
+ "Done %s mb1=%x mb2=%x mb3=%x mb6=%x mb7=%x mb10=%x "
+ "mb11=%x mb12=%x.\n", __func__, mcp->mb[1], mcp->mb[2],
+ mcp->mb[3], mcp->mb[6], mcp->mb[7], mcp->mb[10],
+ mcp->mb[11], mcp->mb[12]);
+
+ if (cur_xchg_cnt)
+ *cur_xchg_cnt = mcp->mb[3];
+ if (orig_xchg_cnt)
+ *orig_xchg_cnt = mcp->mb[6];
+ if (cur_iocb_cnt)
+ *cur_iocb_cnt = mcp->mb[7];
+ if (orig_iocb_cnt)
+ *orig_iocb_cnt = mcp->mb[10];
+ if (vha->hw->flags.npiv_supported && max_npiv_vports)
+ *max_npiv_vports = mcp->mb[11];
+ if ((IS_QLA81XX(vha->hw) || IS_QLA83XX(vha->hw)) && max_fcfs)
+ *max_fcfs = mcp->mb[12];
+ }
+
+ return (rval);
+}
+
+/*
+ * qla2x00_get_fcal_position_map
+ * Get FCAL (LILP) position map using mailbox command
+ *
+ * Input:
+ * ha = adapter state pointer.
+ * pos_map = buffer pointer (can be NULL).
+ *
+ * Returns:
+ * qla2x00 local function return status code.
+ *
+ * Context:
+ * Kernel context.
+ */
+int
+qla2x00_get_fcal_position_map(scsi_qla_host_t *vha, char *pos_map)
+{
+ int rval;
+ mbx_cmd_t mc;
+ mbx_cmd_t *mcp = &mc;
+ char *pmap;
+ dma_addr_t pmap_dma;
+ struct qla_hw_data *ha = vha->hw;
+
+ ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x107f,
+ "Entered %s.\n", __func__);
+
+ pmap = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &pmap_dma);
+ if (pmap == NULL) {
+ ql_log(ql_log_warn, vha, 0x1080,
+ "Memory alloc failed.\n");
+ return QLA_MEMORY_ALLOC_FAILED;
+ }
+ memset(pmap, 0, FCAL_MAP_SIZE);
+
+ mcp->mb[0] = MBC_GET_FC_AL_POSITION_MAP;
+ mcp->mb[2] = MSW(pmap_dma);
+ mcp->mb[3] = LSW(pmap_dma);
+ mcp->mb[6] = MSW(MSD(pmap_dma));
+ mcp->mb[7] = LSW(MSD(pmap_dma));
+ mcp->out_mb = MBX_7|MBX_6|MBX_3|MBX_2|MBX_0;
+ mcp->in_mb = MBX_1|MBX_0;
+ mcp->buf_size = FCAL_MAP_SIZE;
+ mcp->flags = MBX_DMA_IN;
+ mcp->tov = (ha->login_timeout * 2) + (ha->login_timeout / 2);
+ rval = qla2x00_mailbox_command(vha, mcp);
+
+ if (rval == QLA_SUCCESS) {
+ ql_dbg(ql_dbg_mbx + ql_dbg_buffer, vha, 0x1081,
+ "mb0/mb1=%x/%X FC/AL position map size (%x).\n",
+ mcp->mb[0], mcp->mb[1], (unsigned)pmap[0]);
+ ql_dump_buffer(ql_dbg_mbx + ql_dbg_buffer, vha, 0x111d,
+ pmap, pmap[0] + 1);
+
+ if (pos_map)
+ memcpy(pos_map, pmap, FCAL_MAP_SIZE);
+ }
+ dma_pool_free(ha->s_dma_pool, pmap, pmap_dma);
+
+ if (rval != QLA_SUCCESS) {
+ ql_dbg(ql_dbg_mbx, vha, 0x1082, "Failed=%x.\n", rval);
+ } else {
+ ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1083,
+ "Done %s.\n", __func__);
+ }
+
+ return rval;
+}
+
+/*
+ * qla2x00_get_link_status
+ *
+ * Input:
+ * ha = adapter block pointer.
+ * loop_id = device loop ID.
+ * ret_buf = pointer to link status return buffer.
+ *
+ * Returns:
+ * 0 = success.
+ * BIT_0 = mem alloc error.
+ * BIT_1 = mailbox error.
+ */
+int
+qla2x00_get_link_status(scsi_qla_host_t *vha, uint16_t loop_id,
+ struct link_statistics *stats, dma_addr_t stats_dma)
+{
+ int rval;
+ mbx_cmd_t mc;
+ mbx_cmd_t *mcp = &mc;
+ uint32_t *siter, *diter, dwords;
+ struct qla_hw_data *ha = vha->hw;
+
+ ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1084,
+ "Entered %s.\n", __func__);
+
+ mcp->mb[0] = MBC_GET_LINK_STATUS;
+ mcp->mb[2] = MSW(stats_dma);
+ mcp->mb[3] = LSW(stats_dma);
+ mcp->mb[6] = MSW(MSD(stats_dma));
+ mcp->mb[7] = LSW(MSD(stats_dma));
+ mcp->out_mb = MBX_7|MBX_6|MBX_3|MBX_2|MBX_0;
+ mcp->in_mb = MBX_0;
+ if (IS_FWI2_CAPABLE(ha)) {
+ mcp->mb[1] = loop_id;
+ mcp->mb[4] = 0;
+ mcp->mb[10] = 0;
+ mcp->out_mb |= MBX_10|MBX_4|MBX_1;
+ mcp->in_mb |= MBX_1;
+ } else if (HAS_EXTENDED_IDS(ha)) {
+ mcp->mb[1] = loop_id;
+ mcp->mb[10] = 0;
+ mcp->out_mb |= MBX_10|MBX_1;
+ } else {
+ mcp->mb[1] = loop_id << 8;
+ mcp->out_mb |= MBX_1;
+ }
+ mcp->tov = MBX_TOV_SECONDS;
+ mcp->flags = IOCTL_CMD;
+ rval = qla2x00_mailbox_command(vha, mcp);
+
+ if (rval == QLA_SUCCESS) {
+ if (mcp->mb[0] != MBS_COMMAND_COMPLETE) {
+ ql_dbg(ql_dbg_mbx, vha, 0x1085,
+ "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
+ rval = QLA_FUNCTION_FAILED;
+ } else {
+ /* Copy over data -- firmware data is LE. */
+ ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1086,
+ "Done %s.\n", __func__);
+ dwords = offsetof(struct link_statistics, unused1) / 4;
+ siter = diter = &stats->link_fail_cnt;
+ while (dwords--)
+ *diter++ = le32_to_cpu(*siter++);
+ }
+ } else {
+ /* Failed. */
+ ql_dbg(ql_dbg_mbx, vha, 0x1087, "Failed=%x.\n", rval);
+ }
+
+ return rval;
+}
+
+int
+qla24xx_get_isp_stats(scsi_qla_host_t *vha, struct link_statistics *stats,
+ dma_addr_t stats_dma)
+{
+ int rval;
+ mbx_cmd_t mc;
+ mbx_cmd_t *mcp = &mc;
+ uint32_t *siter, *diter, dwords;
+
+ ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1088,
+ "Entered %s.\n", __func__);
+
+ mcp->mb[0] = MBC_GET_LINK_PRIV_STATS;
+ mcp->mb[2] = MSW(stats_dma);
+ mcp->mb[3] = LSW(stats_dma);
+ mcp->mb[6] = MSW(MSD(stats_dma));
+ mcp->mb[7] = LSW(MSD(stats_dma));
+ mcp->mb[8] = sizeof(struct link_statistics) / 4;
+ mcp->mb[9] = vha->vp_idx;
+ mcp->mb[10] = 0;
+ mcp->out_mb = MBX_10|MBX_9|MBX_8|MBX_7|MBX_6|MBX_3|MBX_2|MBX_0;
+ mcp->in_mb = MBX_2|MBX_1|MBX_0;
+ mcp->tov = MBX_TOV_SECONDS;
+ mcp->flags = IOCTL_CMD;
+ rval = qla2x00_mailbox_command(vha, mcp);
+
+ if (rval == QLA_SUCCESS) {
+ if (mcp->mb[0] != MBS_COMMAND_COMPLETE) {
+ ql_dbg(ql_dbg_mbx, vha, 0x1089,
+ "Failed mb[0]=%x.\n", mcp->mb[0]);
+ rval = QLA_FUNCTION_FAILED;
+ } else {
+ ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x108a,
+ "Done %s.\n", __func__);
+ /* Copy over data -- firmware data is LE. */
+ dwords = sizeof(struct link_statistics) / 4;
+ siter = diter = &stats->link_fail_cnt;
+ while (dwords--)
+ *diter++ = le32_to_cpu(*siter++);
+ }
+ } else {
+ /* Failed. */
+ ql_dbg(ql_dbg_mbx, vha, 0x108b, "Failed=%x.\n", rval);
+ }
+
+ return rval;
+}
+
+int
+qla24xx_abort_command(srb_t *sp)
+{
+ int rval;
+ unsigned long flags = 0;
+
+ struct abort_entry_24xx *abt;
+ dma_addr_t abt_dma;
+ uint32_t handle;
+ fc_port_t *fcport = sp->fcport;
+ struct scsi_qla_host *vha = fcport->vha;
+ struct qla_hw_data *ha = vha->hw;
+ struct req_que *req = vha->req;
+
+ ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x108c,
+ "Entered %s.\n", __func__);
+
+ if (ql2xasynctmfenable)
+ return qla24xx_async_abort_command(sp);
+
+ spin_lock_irqsave(&ha->hardware_lock, flags);
+ for (handle = 1; handle < req->num_outstanding_cmds; handle++) {
+ if (req->outstanding_cmds[handle] == sp)
+ break;
+ }
+ spin_unlock_irqrestore(&ha->hardware_lock, flags);
+ if (handle == req->num_outstanding_cmds) {
+ /* Command not found. */
+ return QLA_FUNCTION_FAILED;
+ }
+
+ abt = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &abt_dma);
+ if (abt == NULL) {
+ ql_log(ql_log_warn, vha, 0x108d,
+ "Failed to allocate abort IOCB.\n");
+ return QLA_MEMORY_ALLOC_FAILED;
+ }
+ memset(abt, 0, sizeof(struct abort_entry_24xx));
+
+ abt->entry_type = ABORT_IOCB_TYPE;
+ abt->entry_count = 1;
+ abt->handle = MAKE_HANDLE(req->id, abt->handle);
+ abt->nport_handle = cpu_to_le16(fcport->loop_id);
+ abt->handle_to_abort = MAKE_HANDLE(req->id, handle);
+ abt->port_id[0] = fcport->d_id.b.al_pa;
+ abt->port_id[1] = fcport->d_id.b.area;
+ abt->port_id[2] = fcport->d_id.b.domain;
+ abt->vp_index = fcport->vha->vp_idx;
+
+ abt->req_que_no = cpu_to_le16(req->id);
+
+ rval = qla2x00_issue_iocb(vha, abt, abt_dma, 0);
+ if (rval != QLA_SUCCESS) {
+ ql_dbg(ql_dbg_mbx, vha, 0x108e,
+ "Failed to issue IOCB (%x).\n", rval);
+ } else if (abt->entry_status != 0) {
+ ql_dbg(ql_dbg_mbx, vha, 0x108f,
+ "Failed to complete IOCB -- error status (%x).\n",
+ abt->entry_status);
+ rval = QLA_FUNCTION_FAILED;
+ } else if (abt->nport_handle != __constant_cpu_to_le16(0)) {
+ ql_dbg(ql_dbg_mbx, vha, 0x1090,
+ "Failed to complete IOCB -- completion status (%x).\n",
+ le16_to_cpu(abt->nport_handle));
+ if (abt->nport_handle == CS_IOCB_ERROR)
+ rval = QLA_FUNCTION_PARAMETER_ERROR;
+ else
+ rval = QLA_FUNCTION_FAILED;
+ } else {
+ ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1091,
+ "Done %s.\n", __func__);
+ }
+
+ dma_pool_free(ha->s_dma_pool, abt, abt_dma);
+
+ return rval;
+}
+
+struct tsk_mgmt_cmd {
+ union {
+ struct tsk_mgmt_entry tsk;
+ struct sts_entry_24xx sts;
+ } p;
+};
+
+static int
+__qla24xx_issue_tmf(char *name, uint32_t type, struct fc_port *fcport,
+ uint64_t l, int tag)
+{
+ int rval, rval2;
+ struct tsk_mgmt_cmd *tsk;
+ struct sts_entry_24xx *sts;
+ dma_addr_t tsk_dma;
+ scsi_qla_host_t *vha;
+ struct qla_hw_data *ha;
+ struct req_que *req;
+ struct rsp_que *rsp;
+
+ vha = fcport->vha;
+ ha = vha->hw;
+ req = vha->req;
+
+ ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1092,
+ "Entered %s.\n", __func__);
+
+ if (ha->flags.cpu_affinity_enabled)
+ rsp = ha->rsp_q_map[tag + 1];
+ else
+ rsp = req->rsp;
+ tsk = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &tsk_dma);
+ if (tsk == NULL) {
+ ql_log(ql_log_warn, vha, 0x1093,
+ "Failed to allocate task management IOCB.\n");
+ return QLA_MEMORY_ALLOC_FAILED;
+ }
+ memset(tsk, 0, sizeof(struct tsk_mgmt_cmd));
+
+ tsk->p.tsk.entry_type = TSK_MGMT_IOCB_TYPE;
+ tsk->p.tsk.entry_count = 1;
+ tsk->p.tsk.handle = MAKE_HANDLE(req->id, tsk->p.tsk.handle);
+ tsk->p.tsk.nport_handle = cpu_to_le16(fcport->loop_id);
+ tsk->p.tsk.timeout = cpu_to_le16(ha->r_a_tov / 10 * 2);
+ tsk->p.tsk.control_flags = cpu_to_le32(type);
+ tsk->p.tsk.port_id[0] = fcport->d_id.b.al_pa;
+ tsk->p.tsk.port_id[1] = fcport->d_id.b.area;
+ tsk->p.tsk.port_id[2] = fcport->d_id.b.domain;
+ tsk->p.tsk.vp_index = fcport->vha->vp_idx;
+ if (type == TCF_LUN_RESET) {
+ int_to_scsilun(l, &tsk->p.tsk.lun);
+ host_to_fcp_swap((uint8_t *)&tsk->p.tsk.lun,
+ sizeof(tsk->p.tsk.lun));
+ }
+
+ sts = &tsk->p.sts;
+ rval = qla2x00_issue_iocb(vha, tsk, tsk_dma, 0);
+ if (rval != QLA_SUCCESS) {
+ ql_dbg(ql_dbg_mbx, vha, 0x1094,
+ "Failed to issue %s reset IOCB (%x).\n", name, rval);
+ } else if (sts->entry_status != 0) {
+ ql_dbg(ql_dbg_mbx, vha, 0x1095,
+ "Failed to complete IOCB -- error status (%x).\n",
+ sts->entry_status);
+ rval = QLA_FUNCTION_FAILED;
+ } else if (sts->comp_status !=
+ __constant_cpu_to_le16(CS_COMPLETE)) {
+ ql_dbg(ql_dbg_mbx, vha, 0x1096,
+ "Failed to complete IOCB -- completion status (%x).\n",
+ le16_to_cpu(sts->comp_status));
+ rval = QLA_FUNCTION_FAILED;
+ } else if (le16_to_cpu(sts->scsi_status) &
+ SS_RESPONSE_INFO_LEN_VALID) {
+ if (le32_to_cpu(sts->rsp_data_len) < 4) {
+ ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1097,
+ "Ignoring inconsistent data length -- not enough "
+ "response info (%d).\n",
+ le32_to_cpu(sts->rsp_data_len));
+ } else if (sts->data[3]) {
+ ql_dbg(ql_dbg_mbx, vha, 0x1098,
+ "Failed to complete IOCB -- response (%x).\n",
+ sts->data[3]);
+ rval = QLA_FUNCTION_FAILED;
+ }
+ }
+
+ /* Issue marker IOCB. */
+ rval2 = qla2x00_marker(vha, req, rsp, fcport->loop_id, l,
+ type == TCF_LUN_RESET ? MK_SYNC_ID_LUN: MK_SYNC_ID);
+ if (rval2 != QLA_SUCCESS) {
+ ql_dbg(ql_dbg_mbx, vha, 0x1099,
+ "Failed to issue marker IOCB (%x).\n", rval2);
+ } else {
+ ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x109a,
+ "Done %s.\n", __func__);
+ }
+
+ dma_pool_free(ha->s_dma_pool, tsk, tsk_dma);
+
+ return rval;
+}
+
+int
+qla24xx_abort_target(struct fc_port *fcport, uint64_t l, int tag)
+{
+ struct qla_hw_data *ha = fcport->vha->hw;
+
+ if ((ql2xasynctmfenable) && IS_FWI2_CAPABLE(ha))
+ return qla2x00_async_tm_cmd(fcport, TCF_TARGET_RESET, l, tag);
+
+ return __qla24xx_issue_tmf("Target", TCF_TARGET_RESET, fcport, l, tag);
+}
+
+int
+qla24xx_lun_reset(struct fc_port *fcport, uint64_t l, int tag)
+{
+ struct qla_hw_data *ha = fcport->vha->hw;
+
+ if ((ql2xasynctmfenable) && IS_FWI2_CAPABLE(ha))
+ return qla2x00_async_tm_cmd(fcport, TCF_LUN_RESET, l, tag);
+
+ return __qla24xx_issue_tmf("Lun", TCF_LUN_RESET, fcport, l, tag);
+}
+
+int
+qla2x00_system_error(scsi_qla_host_t *vha)
+{
+ int rval;
+ mbx_cmd_t mc;
+ mbx_cmd_t *mcp = &mc;
+ struct qla_hw_data *ha = vha->hw;
+
+ if (!IS_QLA23XX(ha) && !IS_FWI2_CAPABLE(ha))
+ return QLA_FUNCTION_FAILED;
+
+ ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x109b,
+ "Entered %s.\n", __func__);
+
+ mcp->mb[0] = MBC_GEN_SYSTEM_ERROR;
+ mcp->out_mb = MBX_0;
+ mcp->in_mb = MBX_0;
+ mcp->tov = 5;
+ mcp->flags = 0;
+ rval = qla2x00_mailbox_command(vha, mcp);
+
+ if (rval != QLA_SUCCESS) {
+ ql_dbg(ql_dbg_mbx, vha, 0x109c, "Failed=%x.\n", rval);
+ } else {
+ ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x109d,
+ "Done %s.\n", __func__);
+ }
+
+ return rval;
+}
+
+int
+qla2x00_write_serdes_word(scsi_qla_host_t *vha, uint16_t addr, uint16_t data)
+{
+ int rval;
+ mbx_cmd_t mc;
+ mbx_cmd_t *mcp = &mc;
+
+ if (!IS_QLA2031(vha->hw) && !IS_QLA27XX(vha->hw))
+ return QLA_FUNCTION_FAILED;
+
+ ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1182,
+ "Entered %s.\n", __func__);
+
+ mcp->mb[0] = MBC_WRITE_SERDES;
+ mcp->mb[1] = addr;
+ if (IS_QLA2031(vha->hw))
+ mcp->mb[2] = data & 0xff;
+ else
+ mcp->mb[2] = data;
+
+ mcp->mb[3] = 0;
+ mcp->out_mb = MBX_3|MBX_2|MBX_1|MBX_0;
+ mcp->in_mb = MBX_0;
+ mcp->tov = MBX_TOV_SECONDS;
+ mcp->flags = 0;
+ rval = qla2x00_mailbox_command(vha, mcp);
+
+ if (rval != QLA_SUCCESS) {
+ ql_dbg(ql_dbg_mbx, vha, 0x1183,
+ "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
+ } else {
+ ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1184,
+ "Done %s.\n", __func__);
+ }
+
+ return rval;
+}
+
+int
+qla2x00_read_serdes_word(scsi_qla_host_t *vha, uint16_t addr, uint16_t *data)
+{
+ int rval;
+ mbx_cmd_t mc;
+ mbx_cmd_t *mcp = &mc;
+
+ if (!IS_QLA2031(vha->hw) && !IS_QLA27XX(vha->hw))
+ return QLA_FUNCTION_FAILED;
+
+ ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1185,
+ "Entered %s.\n", __func__);
+
+ mcp->mb[0] = MBC_READ_SERDES;
+ mcp->mb[1] = addr;
+ mcp->mb[3] = 0;
+ mcp->out_mb = MBX_3|MBX_1|MBX_0;
+ mcp->in_mb = MBX_1|MBX_0;
+ mcp->tov = MBX_TOV_SECONDS;
+ mcp->flags = 0;
+ rval = qla2x00_mailbox_command(vha, mcp);
+
+ if (IS_QLA2031(vha->hw))
+ *data = mcp->mb[1] & 0xff;
+ else
+ *data = mcp->mb[1];
+
+ if (rval != QLA_SUCCESS) {
+ ql_dbg(ql_dbg_mbx, vha, 0x1186,
+ "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
+ } else {
+ ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1187,
+ "Done %s.\n", __func__);
+ }
+
+ return rval;
+}
+
+int
+qla8044_write_serdes_word(scsi_qla_host_t *vha, uint32_t addr, uint32_t data)
+{
+ int rval;
+ mbx_cmd_t mc;
+ mbx_cmd_t *mcp = &mc;
+
+ if (!IS_QLA8044(vha->hw))
+ return QLA_FUNCTION_FAILED;
+
+ ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1186,
+ "Entered %s.\n", __func__);
+
+ mcp->mb[0] = MBC_SET_GET_ETH_SERDES_REG;
+ mcp->mb[1] = HCS_WRITE_SERDES;
+ mcp->mb[3] = LSW(addr);
+ mcp->mb[4] = MSW(addr);
+ mcp->mb[5] = LSW(data);
+ mcp->mb[6] = MSW(data);
+ mcp->out_mb = MBX_6|MBX_5|MBX_4|MBX_3|MBX_1|MBX_0;
+ mcp->in_mb = MBX_0;
+ mcp->tov = MBX_TOV_SECONDS;
+ mcp->flags = 0;
+ rval = qla2x00_mailbox_command(vha, mcp);
+
+ if (rval != QLA_SUCCESS) {
+ ql_dbg(ql_dbg_mbx, vha, 0x1187,
+ "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
+ } else {
+ ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1188,
+ "Done %s.\n", __func__);
+ }
+
+ return rval;
+}
+
+int
+qla8044_read_serdes_word(scsi_qla_host_t *vha, uint32_t addr, uint32_t *data)
+{
+ int rval;
+ mbx_cmd_t mc;
+ mbx_cmd_t *mcp = &mc;
+
+ if (!IS_QLA8044(vha->hw))
+ return QLA_FUNCTION_FAILED;
+
+ ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1189,
+ "Entered %s.\n", __func__);
+
+ mcp->mb[0] = MBC_SET_GET_ETH_SERDES_REG;
+ mcp->mb[1] = HCS_READ_SERDES;
+ mcp->mb[3] = LSW(addr);
+ mcp->mb[4] = MSW(addr);
+ mcp->out_mb = MBX_4|MBX_3|MBX_1|MBX_0;
+ mcp->in_mb = MBX_2|MBX_1|MBX_0;
+ mcp->tov = MBX_TOV_SECONDS;
+ mcp->flags = 0;
+ rval = qla2x00_mailbox_command(vha, mcp);
+
+ *data = mcp->mb[2] << 16 | mcp->mb[1];
+
+ if (rval != QLA_SUCCESS) {
+ ql_dbg(ql_dbg_mbx, vha, 0x118a,
+ "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
+ } else {
+ ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x118b,
+ "Done %s.\n", __func__);
+ }
+
+ return rval;
+}
+
+/**
+ * qla2x00_set_serdes_params() -
+ * @ha: HA context
+ *
+ * Returns
+ */
+int
+qla2x00_set_serdes_params(scsi_qla_host_t *vha, uint16_t sw_em_1g,
+ uint16_t sw_em_2g, uint16_t sw_em_4g)
+{
+ int rval;
+ mbx_cmd_t mc;
+ mbx_cmd_t *mcp = &mc;
+
+ ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x109e,
+ "Entered %s.\n", __func__);
+
+ mcp->mb[0] = MBC_SERDES_PARAMS;
+ mcp->mb[1] = BIT_0;
+ mcp->mb[2] = sw_em_1g | BIT_15;
+ mcp->mb[3] = sw_em_2g | BIT_15;
+ mcp->mb[4] = sw_em_4g | BIT_15;
+ mcp->out_mb = MBX_4|MBX_3|MBX_2|MBX_1|MBX_0;
+ mcp->in_mb = MBX_0;
+ mcp->tov = MBX_TOV_SECONDS;
+ mcp->flags = 0;
+ rval = qla2x00_mailbox_command(vha, mcp);
+
+ if (rval != QLA_SUCCESS) {
+ /*EMPTY*/
+ ql_dbg(ql_dbg_mbx, vha, 0x109f,
+ "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
+ } else {
+ /*EMPTY*/
+ ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10a0,
+ "Done %s.\n", __func__);
+ }
+
+ return rval;
+}
+
+int
+qla2x00_stop_firmware(scsi_qla_host_t *vha)
+{
+ int rval;
+ mbx_cmd_t mc;
+ mbx_cmd_t *mcp = &mc;
+
+ if (!IS_FWI2_CAPABLE(vha->hw))
+ return QLA_FUNCTION_FAILED;
+
+ ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10a1,
+ "Entered %s.\n", __func__);
+
+ mcp->mb[0] = MBC_STOP_FIRMWARE;
+ mcp->mb[1] = 0;
+ mcp->out_mb = MBX_1|MBX_0;
+ mcp->in_mb = MBX_0;
+ mcp->tov = 5;
+ mcp->flags = 0;
+ rval = qla2x00_mailbox_command(vha, mcp);
+
+ if (rval != QLA_SUCCESS) {
+ ql_dbg(ql_dbg_mbx, vha, 0x10a2, "Failed=%x.\n", rval);
+ if (mcp->mb[0] == MBS_INVALID_COMMAND)
+ rval = QLA_INVALID_COMMAND;
+ } else {
+ ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10a3,
+ "Done %s.\n", __func__);
+ }
+
+ return rval;
+}
+
+int
+qla2x00_enable_eft_trace(scsi_qla_host_t *vha, dma_addr_t eft_dma,
+ uint16_t buffers)
+{
+ int rval;
+ mbx_cmd_t mc;
+ mbx_cmd_t *mcp = &mc;
+
+ ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10a4,
+ "Entered %s.\n", __func__);
+
+ if (!IS_FWI2_CAPABLE(vha->hw))
+ return QLA_FUNCTION_FAILED;
+
+ if (unlikely(pci_channel_offline(vha->hw->pdev)))
+ return QLA_FUNCTION_FAILED;
+
+ mcp->mb[0] = MBC_TRACE_CONTROL;
+ mcp->mb[1] = TC_EFT_ENABLE;
+ mcp->mb[2] = LSW(eft_dma);
+ mcp->mb[3] = MSW(eft_dma);
+ mcp->mb[4] = LSW(MSD(eft_dma));
+ mcp->mb[5] = MSW(MSD(eft_dma));
+ mcp->mb[6] = buffers;
+ mcp->mb[7] = TC_AEN_DISABLE;
+ mcp->out_mb = MBX_7|MBX_6|MBX_5|MBX_4|MBX_3|MBX_2|MBX_1|MBX_0;
+ mcp->in_mb = MBX_1|MBX_0;
+ mcp->tov = MBX_TOV_SECONDS;
+ mcp->flags = 0;
+ rval = qla2x00_mailbox_command(vha, mcp);
+ if (rval != QLA_SUCCESS) {
+ ql_dbg(ql_dbg_mbx, vha, 0x10a5,
+ "Failed=%x mb[0]=%x mb[1]=%x.\n",
+ rval, mcp->mb[0], mcp->mb[1]);
+ } else {
+ ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10a6,
+ "Done %s.\n", __func__);
+ }
+
+ return rval;
+}
+
+int
+qla2x00_disable_eft_trace(scsi_qla_host_t *vha)
+{
+ int rval;
+ mbx_cmd_t mc;
+ mbx_cmd_t *mcp = &mc;
+
+ ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10a7,
+ "Entered %s.\n", __func__);
+
+ if (!IS_FWI2_CAPABLE(vha->hw))
+ return QLA_FUNCTION_FAILED;
+
+ if (unlikely(pci_channel_offline(vha->hw->pdev)))
+ return QLA_FUNCTION_FAILED;
+
+ mcp->mb[0] = MBC_TRACE_CONTROL;
+ mcp->mb[1] = TC_EFT_DISABLE;
+ mcp->out_mb = MBX_1|MBX_0;
+ mcp->in_mb = MBX_1|MBX_0;
+ mcp->tov = MBX_TOV_SECONDS;
+ mcp->flags = 0;
+ rval = qla2x00_mailbox_command(vha, mcp);
+ if (rval != QLA_SUCCESS) {
+ ql_dbg(ql_dbg_mbx, vha, 0x10a8,
+ "Failed=%x mb[0]=%x mb[1]=%x.\n",
+ rval, mcp->mb[0], mcp->mb[1]);
+ } else {
+ ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10a9,
+ "Done %s.\n", __func__);
+ }
+
+ return rval;
+}
+
+int
+qla2x00_enable_fce_trace(scsi_qla_host_t *vha, dma_addr_t fce_dma,
+ uint16_t buffers, uint16_t *mb, uint32_t *dwords)
+{
+ int rval;
+ mbx_cmd_t mc;
+ mbx_cmd_t *mcp = &mc;
+
+ ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10aa,
+ "Entered %s.\n", __func__);
+
+ if (!IS_QLA25XX(vha->hw) && !IS_QLA81XX(vha->hw) &&
+ !IS_QLA83XX(vha->hw) && !IS_QLA27XX(vha->hw))
+ return QLA_FUNCTION_FAILED;
+
+ if (unlikely(pci_channel_offline(vha->hw->pdev)))
+ return QLA_FUNCTION_FAILED;
+
+ mcp->mb[0] = MBC_TRACE_CONTROL;
+ mcp->mb[1] = TC_FCE_ENABLE;
+ mcp->mb[2] = LSW(fce_dma);
+ mcp->mb[3] = MSW(fce_dma);
+ mcp->mb[4] = LSW(MSD(fce_dma));
+ mcp->mb[5] = MSW(MSD(fce_dma));
+ mcp->mb[6] = buffers;
+ mcp->mb[7] = TC_AEN_DISABLE;
+ mcp->mb[8] = 0;
+ mcp->mb[9] = TC_FCE_DEFAULT_RX_SIZE;
+ mcp->mb[10] = TC_FCE_DEFAULT_TX_SIZE;
+ mcp->out_mb = MBX_10|MBX_9|MBX_8|MBX_7|MBX_6|MBX_5|MBX_4|MBX_3|MBX_2|
+ MBX_1|MBX_0;
+ mcp->in_mb = MBX_6|MBX_5|MBX_4|MBX_3|MBX_2|MBX_1|MBX_0;
+ mcp->tov = MBX_TOV_SECONDS;
+ mcp->flags = 0;
+ rval = qla2x00_mailbox_command(vha, mcp);
+ if (rval != QLA_SUCCESS) {
+ ql_dbg(ql_dbg_mbx, vha, 0x10ab,
+ "Failed=%x mb[0]=%x mb[1]=%x.\n",
+ rval, mcp->mb[0], mcp->mb[1]);
+ } else {
+ ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10ac,
+ "Done %s.\n", __func__);
+
+ if (mb)
+ memcpy(mb, mcp->mb, 8 * sizeof(*mb));
+ if (dwords)
+ *dwords = buffers;
+ }
+
+ return rval;
+}
+
+int
+qla2x00_disable_fce_trace(scsi_qla_host_t *vha, uint64_t *wr, uint64_t *rd)
+{
+ int rval;
+ mbx_cmd_t mc;
+ mbx_cmd_t *mcp = &mc;
+
+ ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10ad,
+ "Entered %s.\n", __func__);
+
+ if (!IS_FWI2_CAPABLE(vha->hw))
+ return QLA_FUNCTION_FAILED;
+
+ if (unlikely(pci_channel_offline(vha->hw->pdev)))
+ return QLA_FUNCTION_FAILED;
+
+ mcp->mb[0] = MBC_TRACE_CONTROL;
+ mcp->mb[1] = TC_FCE_DISABLE;
+ mcp->mb[2] = TC_FCE_DISABLE_TRACE;
+ mcp->out_mb = MBX_2|MBX_1|MBX_0;
+ mcp->in_mb = MBX_9|MBX_8|MBX_7|MBX_6|MBX_5|MBX_4|MBX_3|MBX_2|
+ MBX_1|MBX_0;
+ mcp->tov = MBX_TOV_SECONDS;
+ mcp->flags = 0;
+ rval = qla2x00_mailbox_command(vha, mcp);
+ if (rval != QLA_SUCCESS) {
+ ql_dbg(ql_dbg_mbx, vha, 0x10ae,
+ "Failed=%x mb[0]=%x mb[1]=%x.\n",
+ rval, mcp->mb[0], mcp->mb[1]);
+ } else {
+ ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10af,
+ "Done %s.\n", __func__);
+
+ if (wr)
+ *wr = (uint64_t) mcp->mb[5] << 48 |
+ (uint64_t) mcp->mb[4] << 32 |
+ (uint64_t) mcp->mb[3] << 16 |
+ (uint64_t) mcp->mb[2];
+ if (rd)
+ *rd = (uint64_t) mcp->mb[9] << 48 |
+ (uint64_t) mcp->mb[8] << 32 |
+ (uint64_t) mcp->mb[7] << 16 |
+ (uint64_t) mcp->mb[6];
+ }
+
+ return rval;
+}
+
+int
+qla2x00_get_idma_speed(scsi_qla_host_t *vha, uint16_t loop_id,
+ uint16_t *port_speed, uint16_t *mb)
+{
+ int rval;
+ mbx_cmd_t mc;
+ mbx_cmd_t *mcp = &mc;
+
+ ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10b0,
+ "Entered %s.\n", __func__);
+
+ if (!IS_IIDMA_CAPABLE(vha->hw))
+ return QLA_FUNCTION_FAILED;
+
+ mcp->mb[0] = MBC_PORT_PARAMS;
+ mcp->mb[1] = loop_id;
+ mcp->mb[2] = mcp->mb[3] = 0;
+ mcp->mb[9] = vha->vp_idx;
+ mcp->out_mb = MBX_9|MBX_3|MBX_2|MBX_1|MBX_0;
+ mcp->in_mb = MBX_3|MBX_1|MBX_0;
+ mcp->tov = MBX_TOV_SECONDS;
+ mcp->flags = 0;
+ rval = qla2x00_mailbox_command(vha, mcp);
+
+ /* Return mailbox statuses. */
+ if (mb != NULL) {
+ mb[0] = mcp->mb[0];
+ mb[1] = mcp->mb[1];
+ mb[3] = mcp->mb[3];
+ }
+
+ if (rval != QLA_SUCCESS) {
+ ql_dbg(ql_dbg_mbx, vha, 0x10b1, "Failed=%x.\n", rval);
+ } else {
+ ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10b2,
+ "Done %s.\n", __func__);
+ if (port_speed)
+ *port_speed = mcp->mb[3];
+ }
+
+ return rval;
+}
+
+int
+qla2x00_set_idma_speed(scsi_qla_host_t *vha, uint16_t loop_id,
+ uint16_t port_speed, uint16_t *mb)
+{
+ int rval;
+ mbx_cmd_t mc;
+ mbx_cmd_t *mcp = &mc;
+
+ ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10b3,
+ "Entered %s.\n", __func__);
+
+ if (!IS_IIDMA_CAPABLE(vha->hw))
+ return QLA_FUNCTION_FAILED;
+
+ mcp->mb[0] = MBC_PORT_PARAMS;
+ mcp->mb[1] = loop_id;
+ mcp->mb[2] = BIT_0;
+ if (IS_CNA_CAPABLE(vha->hw))
+ mcp->mb[3] = port_speed & (BIT_5|BIT_4|BIT_3|BIT_2|BIT_1|BIT_0);
+ else
+ mcp->mb[3] = port_speed & (BIT_2|BIT_1|BIT_0);
+ mcp->mb[9] = vha->vp_idx;
+ mcp->out_mb = MBX_9|MBX_3|MBX_2|MBX_1|MBX_0;
+ mcp->in_mb = MBX_3|MBX_1|MBX_0;
+ mcp->tov = MBX_TOV_SECONDS;
+ mcp->flags = 0;
+ rval = qla2x00_mailbox_command(vha, mcp);
+
+ /* Return mailbox statuses. */
+ if (mb != NULL) {
+ mb[0] = mcp->mb[0];
+ mb[1] = mcp->mb[1];
+ mb[3] = mcp->mb[3];
+ }
+
+ if (rval != QLA_SUCCESS) {
+ ql_dbg(ql_dbg_mbx, vha, 0x10b4,
+ "Failed=%x.\n", rval);
+ } else {
+ ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10b5,
+ "Done %s.\n", __func__);
+ }
+
+ return rval;
+}
+
+void
+qla24xx_report_id_acquisition(scsi_qla_host_t *vha,
+ struct vp_rpt_id_entry_24xx *rptid_entry)
+{
+ uint8_t vp_idx;
+ uint16_t stat = le16_to_cpu(rptid_entry->vp_idx);
+ struct qla_hw_data *ha = vha->hw;
+ scsi_qla_host_t *vp;
+ unsigned long flags;
+ int found;
+
+ ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10b6,
+ "Entered %s.\n", __func__);
+
+ if (rptid_entry->entry_status != 0)
+ return;
+
+ if (rptid_entry->format == 0) {
+ ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10b7,
+ "Format 0 : Number of VPs setup %d, number of "
+ "VPs acquired %d.\n",
+ MSB(le16_to_cpu(rptid_entry->vp_count)),
+ LSB(le16_to_cpu(rptid_entry->vp_count)));
+ ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10b8,
+ "Primary port id %02x%02x%02x.\n",
+ rptid_entry->port_id[2], rptid_entry->port_id[1],
+ rptid_entry->port_id[0]);
+ } else if (rptid_entry->format == 1) {
+ vp_idx = LSB(stat);
+ ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10b9,
+ "Format 1: VP[%d] enabled - status %d - with "
+ "port id %02x%02x%02x.\n", vp_idx, MSB(stat),
+ rptid_entry->port_id[2], rptid_entry->port_id[1],
+ rptid_entry->port_id[0]);
+
+ /* FA-WWN is only for physical port */
+ if (!vp_idx) {
+ void *wwpn = ha->init_cb->port_name;
+
+ if (!MSB(stat)) {
+ if (rptid_entry->vp_idx_map[1] & BIT_6)
+ wwpn = rptid_entry->reserved_4 + 8;
+ }
+ memcpy(vha->port_name, wwpn, WWN_SIZE);
+ fc_host_port_name(vha->host) =
+ wwn_to_u64(vha->port_name);
+ ql_dbg(ql_dbg_mbx, vha, 0x1018,
+ "FA-WWN portname %016llx (%x)\n",
+ fc_host_port_name(vha->host), MSB(stat));
+ }
+
+ vp = vha;
+ if (vp_idx == 0)
+ goto reg_needed;
+
+ if (MSB(stat) != 0 && MSB(stat) != 2) {
+ ql_dbg(ql_dbg_mbx, vha, 0x10ba,
+ "Could not acquire ID for VP[%d].\n", vp_idx);
+ return;
+ }
+
+ found = 0;
+ spin_lock_irqsave(&ha->vport_slock, flags);
+ list_for_each_entry(vp, &ha->vp_list, list) {
+ if (vp_idx == vp->vp_idx) {
+ found = 1;
+ break;
+ }
+ }
+ spin_unlock_irqrestore(&ha->vport_slock, flags);
+
+ if (!found)
+ return;
+
+ vp->d_id.b.domain = rptid_entry->port_id[2];
+ vp->d_id.b.area = rptid_entry->port_id[1];
+ vp->d_id.b.al_pa = rptid_entry->port_id[0];
+
+ /*
+ * Cannot configure here as we are still sitting on the
+ * response queue. Handle it in dpc context.
+ */
+ set_bit(VP_IDX_ACQUIRED, &vp->vp_flags);
+
+reg_needed:
+ set_bit(REGISTER_FC4_NEEDED, &vp->dpc_flags);
+ set_bit(REGISTER_FDMI_NEEDED, &vp->dpc_flags);
+ set_bit(VP_DPC_NEEDED, &vha->dpc_flags);
+ qla2xxx_wake_dpc(vha);
+ }
+}
+
+/*
+ * qla24xx_modify_vp_config
+ * Change VP configuration for vha
+ *
+ * Input:
+ * vha = adapter block pointer.
+ *
+ * Returns:
+ * qla2xxx local function return status code.
+ *
+ * Context:
+ * Kernel context.
+ */
+int
+qla24xx_modify_vp_config(scsi_qla_host_t *vha)
+{
+ int rval;
+ struct vp_config_entry_24xx *vpmod;
+ dma_addr_t vpmod_dma;
+ struct qla_hw_data *ha = vha->hw;
+ struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev);
+
+ /* This can be called by the parent */
+
+ ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10bb,
+ "Entered %s.\n", __func__);
+
+ vpmod = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &vpmod_dma);
+ if (!vpmod) {
+ ql_log(ql_log_warn, vha, 0x10bc,
+ "Failed to allocate modify VP IOCB.\n");
+ return QLA_MEMORY_ALLOC_FAILED;
+ }
+
+ memset(vpmod, 0, sizeof(struct vp_config_entry_24xx));
+ vpmod->entry_type = VP_CONFIG_IOCB_TYPE;
+ vpmod->entry_count = 1;
+ vpmod->command = VCT_COMMAND_MOD_ENABLE_VPS;
+ vpmod->vp_count = 1;
+ vpmod->vp_index1 = vha->vp_idx;
+ vpmod->options_idx1 = BIT_3|BIT_4|BIT_5;
+
+ qlt_modify_vp_config(vha, vpmod);
+
+ memcpy(vpmod->node_name_idx1, vha->node_name, WWN_SIZE);
+ memcpy(vpmod->port_name_idx1, vha->port_name, WWN_SIZE);
+ vpmod->entry_count = 1;
+
+ rval = qla2x00_issue_iocb(base_vha, vpmod, vpmod_dma, 0);
+ if (rval != QLA_SUCCESS) {
+ ql_dbg(ql_dbg_mbx, vha, 0x10bd,
+ "Failed to issue VP config IOCB (%x).\n", rval);
+ } else if (vpmod->comp_status != 0) {
+ ql_dbg(ql_dbg_mbx, vha, 0x10be,
+ "Failed to complete IOCB -- error status (%x).\n",
+ vpmod->comp_status);
+ rval = QLA_FUNCTION_FAILED;
+ } else if (vpmod->comp_status != __constant_cpu_to_le16(CS_COMPLETE)) {
+ ql_dbg(ql_dbg_mbx, vha, 0x10bf,
+ "Failed to complete IOCB -- completion status (%x).\n",
+ le16_to_cpu(vpmod->comp_status));
+ rval = QLA_FUNCTION_FAILED;
+ } else {
+ /* EMPTY */
+ ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10c0,
+ "Done %s.\n", __func__);
+ fc_vport_set_state(vha->fc_vport, FC_VPORT_INITIALIZING);
+ }
+ dma_pool_free(ha->s_dma_pool, vpmod, vpmod_dma);
+
+ return rval;
+}
+
+/*
+ * qla24xx_control_vp
+ * Enable a virtual port for given host
+ *
+ * Input:
+ * ha = adapter block pointer.
+ * vhba = virtual adapter (unused)
+ * index = index number for enabled VP
+ *
+ * Returns:
+ * qla2xxx local function return status code.
+ *
+ * Context:
+ * Kernel context.
+ */
+int
+qla24xx_control_vp(scsi_qla_host_t *vha, int cmd)
+{
+ int rval;
+ int map, pos;
+ struct vp_ctrl_entry_24xx *vce;
+ dma_addr_t vce_dma;
+ struct qla_hw_data *ha = vha->hw;
+ int vp_index = vha->vp_idx;
+ struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev);
+
+ ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10c1,
+ "Entered %s enabling index %d.\n", __func__, vp_index);
+
+ if (vp_index == 0 || vp_index >= ha->max_npiv_vports)
+ return QLA_PARAMETER_ERROR;
+
+ vce = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &vce_dma);
+ if (!vce) {
+ ql_log(ql_log_warn, vha, 0x10c2,
+ "Failed to allocate VP control IOCB.\n");
+ return QLA_MEMORY_ALLOC_FAILED;
+ }
+ memset(vce, 0, sizeof(struct vp_ctrl_entry_24xx));
+
+ vce->entry_type = VP_CTRL_IOCB_TYPE;
+ vce->entry_count = 1;
+ vce->command = cpu_to_le16(cmd);
+ vce->vp_count = __constant_cpu_to_le16(1);
+
+ /* index map in firmware starts with 1; decrement index
+ * this is ok as we never use index 0
+ */
+ map = (vp_index - 1) / 8;
+ pos = (vp_index - 1) & 7;
+ mutex_lock(&ha->vport_lock);
+ vce->vp_idx_map[map] |= 1 << pos;
+ mutex_unlock(&ha->vport_lock);
+
+ rval = qla2x00_issue_iocb(base_vha, vce, vce_dma, 0);
+ if (rval != QLA_SUCCESS) {
+ ql_dbg(ql_dbg_mbx, vha, 0x10c3,
+ "Failed to issue VP control IOCB (%x).\n", rval);
+ } else if (vce->entry_status != 0) {
+ ql_dbg(ql_dbg_mbx, vha, 0x10c4,
+ "Failed to complete IOCB -- error status (%x).\n",
+ vce->entry_status);
+ rval = QLA_FUNCTION_FAILED;
+ } else if (vce->comp_status != __constant_cpu_to_le16(CS_COMPLETE)) {
+ ql_dbg(ql_dbg_mbx, vha, 0x10c5,
+ "Failed to complet IOCB -- completion status (%x).\n",
+ le16_to_cpu(vce->comp_status));
+ rval = QLA_FUNCTION_FAILED;
+ } else {
+ ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10c6,
+ "Done %s.\n", __func__);
+ }
+
+ dma_pool_free(ha->s_dma_pool, vce, vce_dma);
+
+ return rval;
+}
+
+/*
+ * qla2x00_send_change_request
+ * Receive or disable RSCN request from fabric controller
+ *
+ * Input:
+ * ha = adapter block pointer
+ * format = registration format:
+ * 0 - Reserved
+ * 1 - Fabric detected registration
+ * 2 - N_port detected registration
+ * 3 - Full registration
+ * FF - clear registration
+ * vp_idx = Virtual port index
+ *
+ * Returns:
+ * qla2x00 local function return status code.
+ *
+ * Context:
+ * Kernel Context
+ */
+
+int
+qla2x00_send_change_request(scsi_qla_host_t *vha, uint16_t format,
+ uint16_t vp_idx)
+{
+ int rval;
+ mbx_cmd_t mc;
+ mbx_cmd_t *mcp = &mc;
+
+ ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10c7,
+ "Entered %s.\n", __func__);
+
+ mcp->mb[0] = MBC_SEND_CHANGE_REQUEST;
+ mcp->mb[1] = format;
+ mcp->mb[9] = vp_idx;
+ mcp->out_mb = MBX_9|MBX_1|MBX_0;
+ mcp->in_mb = MBX_0|MBX_1;
+ mcp->tov = MBX_TOV_SECONDS;
+ mcp->flags = 0;
+ rval = qla2x00_mailbox_command(vha, mcp);
+
+ if (rval == QLA_SUCCESS) {
+ if (mcp->mb[0] != MBS_COMMAND_COMPLETE) {
+ rval = BIT_1;
+ }
+ } else
+ rval = BIT_1;
+
+ return rval;
+}
+
+int
+qla2x00_dump_ram(scsi_qla_host_t *vha, dma_addr_t req_dma, uint32_t addr,
+ uint32_t size)
+{
+ int rval;
+ mbx_cmd_t mc;
+ mbx_cmd_t *mcp = &mc;
+
+ ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1009,
+ "Entered %s.\n", __func__);
+
+ if (MSW(addr) || IS_FWI2_CAPABLE(vha->hw)) {
+ mcp->mb[0] = MBC_DUMP_RISC_RAM_EXTENDED;
+ mcp->mb[8] = MSW(addr);
+ mcp->out_mb = MBX_8|MBX_0;
+ } else {
+ mcp->mb[0] = MBC_DUMP_RISC_RAM;
+ mcp->out_mb = MBX_0;
+ }
+ mcp->mb[1] = LSW(addr);
+ mcp->mb[2] = MSW(req_dma);
+ mcp->mb[3] = LSW(req_dma);
+ mcp->mb[6] = MSW(MSD(req_dma));
+ mcp->mb[7] = LSW(MSD(req_dma));
+ mcp->out_mb |= MBX_7|MBX_6|MBX_3|MBX_2|MBX_1;
+ if (IS_FWI2_CAPABLE(vha->hw)) {
+ mcp->mb[4] = MSW(size);
+ mcp->mb[5] = LSW(size);
+ mcp->out_mb |= MBX_5|MBX_4;
+ } else {
+ mcp->mb[4] = LSW(size);
+ mcp->out_mb |= MBX_4;
+ }
+
+ mcp->in_mb = MBX_0;
+ mcp->tov = MBX_TOV_SECONDS;
+ mcp->flags = 0;
+ rval = qla2x00_mailbox_command(vha, mcp);
+
+ if (rval != QLA_SUCCESS) {
+ ql_dbg(ql_dbg_mbx, vha, 0x1008,
+ "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
+ } else {
+ ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1007,
+ "Done %s.\n", __func__);
+ }
+
+ return rval;
+}
+/* 84XX Support **************************************************************/
+
+struct cs84xx_mgmt_cmd {
+ union {
+ struct verify_chip_entry_84xx req;
+ struct verify_chip_rsp_84xx rsp;
+ } p;
+};
+
+int
+qla84xx_verify_chip(struct scsi_qla_host *vha, uint16_t *status)
+{
+ int rval, retry;
+ struct cs84xx_mgmt_cmd *mn;
+ dma_addr_t mn_dma;
+ uint16_t options;
+ unsigned long flags;
+ struct qla_hw_data *ha = vha->hw;
+
+ ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10c8,
+ "Entered %s.\n", __func__);
+
+ mn = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &mn_dma);
+ if (mn == NULL) {
+ return QLA_MEMORY_ALLOC_FAILED;
+ }
+
+ /* Force Update? */
+ options = ha->cs84xx->fw_update ? VCO_FORCE_UPDATE : 0;
+ /* Diagnostic firmware? */
+ /* options |= MENLO_DIAG_FW; */
+ /* We update the firmware with only one data sequence. */
+ options |= VCO_END_OF_DATA;
+
+ do {
+ retry = 0;
+ memset(mn, 0, sizeof(*mn));
+ mn->p.req.entry_type = VERIFY_CHIP_IOCB_TYPE;
+ mn->p.req.entry_count = 1;
+ mn->p.req.options = cpu_to_le16(options);
+
+ ql_dbg(ql_dbg_mbx + ql_dbg_buffer, vha, 0x111c,
+ "Dump of Verify Request.\n");
+ ql_dump_buffer(ql_dbg_mbx + ql_dbg_buffer, vha, 0x111e,
+ (uint8_t *)mn, sizeof(*mn));
+
+ rval = qla2x00_issue_iocb_timeout(vha, mn, mn_dma, 0, 120);
+ if (rval != QLA_SUCCESS) {
+ ql_dbg(ql_dbg_mbx, vha, 0x10cb,
+ "Failed to issue verify IOCB (%x).\n", rval);
+ goto verify_done;
+ }
+
+ ql_dbg(ql_dbg_mbx + ql_dbg_buffer, vha, 0x1110,
+ "Dump of Verify Response.\n");
+ ql_dump_buffer(ql_dbg_mbx + ql_dbg_buffer, vha, 0x1118,
+ (uint8_t *)mn, sizeof(*mn));
+
+ status[0] = le16_to_cpu(mn->p.rsp.comp_status);
+ status[1] = status[0] == CS_VCS_CHIP_FAILURE ?
+ le16_to_cpu(mn->p.rsp.failure_code) : 0;
+ ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10ce,
+ "cs=%x fc=%x.\n", status[0], status[1]);
+
+ if (status[0] != CS_COMPLETE) {
+ rval = QLA_FUNCTION_FAILED;
+ if (!(options & VCO_DONT_UPDATE_FW)) {
+ ql_dbg(ql_dbg_mbx, vha, 0x10cf,
+ "Firmware update failed. Retrying "
+ "without update firmware.\n");
+ options |= VCO_DONT_UPDATE_FW;
+ options &= ~VCO_FORCE_UPDATE;
+ retry = 1;
+ }
+ } else {
+ ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10d0,
+ "Firmware updated to %x.\n",
+ le32_to_cpu(mn->p.rsp.fw_ver));
+
+ /* NOTE: we only update OP firmware. */
+ spin_lock_irqsave(&ha->cs84xx->access_lock, flags);
+ ha->cs84xx->op_fw_version =
+ le32_to_cpu(mn->p.rsp.fw_ver);
+ spin_unlock_irqrestore(&ha->cs84xx->access_lock,
+ flags);
+ }
+ } while (retry);
+
+verify_done:
+ dma_pool_free(ha->s_dma_pool, mn, mn_dma);
+
+ if (rval != QLA_SUCCESS) {
+ ql_dbg(ql_dbg_mbx, vha, 0x10d1,
+ "Failed=%x.\n", rval);
+ } else {
+ ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10d2,
+ "Done %s.\n", __func__);
+ }
+
+ return rval;
+}
+
+int
+qla25xx_init_req_que(struct scsi_qla_host *vha, struct req_que *req)
+{
+ int rval;
+ unsigned long flags;
+ mbx_cmd_t mc;
+ mbx_cmd_t *mcp = &mc;
+ struct qla_hw_data *ha = vha->hw;
+
+ ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10d3,
+ "Entered %s.\n", __func__);
+
+ if (IS_SHADOW_REG_CAPABLE(ha))
+ req->options |= BIT_13;
+
+ mcp->mb[0] = MBC_INITIALIZE_MULTIQ;
+ mcp->mb[1] = req->options;
+ mcp->mb[2] = MSW(LSD(req->dma));
+ mcp->mb[3] = LSW(LSD(req->dma));
+ mcp->mb[6] = MSW(MSD(req->dma));
+ mcp->mb[7] = LSW(MSD(req->dma));
+ mcp->mb[5] = req->length;
+ if (req->rsp)
+ mcp->mb[10] = req->rsp->id;
+ mcp->mb[12] = req->qos;
+ mcp->mb[11] = req->vp_idx;
+ mcp->mb[13] = req->rid;
+ if (IS_QLA83XX(ha) || IS_QLA27XX(ha))
+ mcp->mb[15] = 0;
+
+ mcp->mb[4] = req->id;
+ /* que in ptr index */
+ mcp->mb[8] = 0;
+ /* que out ptr index */
+ mcp->mb[9] = *req->out_ptr = 0;
+ mcp->out_mb = MBX_14|MBX_13|MBX_12|MBX_11|MBX_10|MBX_9|MBX_8|MBX_7|
+ MBX_6|MBX_5|MBX_4|MBX_3|MBX_2|MBX_1|MBX_0;
+ mcp->in_mb = MBX_0;
+ mcp->flags = MBX_DMA_OUT;
+ mcp->tov = MBX_TOV_SECONDS * 2;
+
+ if (IS_QLA81XX(ha) || IS_QLA83XX(ha) || IS_QLA27XX(ha))
+ mcp->in_mb |= MBX_1;
+ if (IS_QLA83XX(ha) || IS_QLA27XX(ha)) {
+ mcp->out_mb |= MBX_15;
+ /* debug q create issue in SR-IOV */
+ mcp->in_mb |= MBX_9 | MBX_8 | MBX_7;
+ }
+
+ spin_lock_irqsave(&ha->hardware_lock, flags);
+ if (!(req->options & BIT_0)) {
+ WRT_REG_DWORD(req->req_q_in, 0);
+ if (!IS_QLA83XX(ha) && !IS_QLA27XX(ha))
+ WRT_REG_DWORD(req->req_q_out, 0);
+ }
+ spin_unlock_irqrestore(&ha->hardware_lock, flags);
+
+ rval = qla2x00_mailbox_command(vha, mcp);
+ if (rval != QLA_SUCCESS) {
+ ql_dbg(ql_dbg_mbx, vha, 0x10d4,
+ "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
+ } else {
+ ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10d5,
+ "Done %s.\n", __func__);
+ }
+
+ return rval;
+}
+
+int
+qla25xx_init_rsp_que(struct scsi_qla_host *vha, struct rsp_que *rsp)
+{
+ int rval;
+ unsigned long flags;
+ mbx_cmd_t mc;
+ mbx_cmd_t *mcp = &mc;
+ struct qla_hw_data *ha = vha->hw;
+
+ ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10d6,
+ "Entered %s.\n", __func__);
+
+ if (IS_SHADOW_REG_CAPABLE(ha))
+ rsp->options |= BIT_13;
+
+ mcp->mb[0] = MBC_INITIALIZE_MULTIQ;
+ mcp->mb[1] = rsp->options;
+ mcp->mb[2] = MSW(LSD(rsp->dma));
+ mcp->mb[3] = LSW(LSD(rsp->dma));
+ mcp->mb[6] = MSW(MSD(rsp->dma));
+ mcp->mb[7] = LSW(MSD(rsp->dma));
+ mcp->mb[5] = rsp->length;
+ mcp->mb[14] = rsp->msix->entry;
+ mcp->mb[13] = rsp->rid;
+ if (IS_QLA83XX(ha) || IS_QLA27XX(ha))
+ mcp->mb[15] = 0;
+
+ mcp->mb[4] = rsp->id;
+ /* que in ptr index */
+ mcp->mb[8] = *rsp->in_ptr = 0;
+ /* que out ptr index */
+ mcp->mb[9] = 0;
+ mcp->out_mb = MBX_14|MBX_13|MBX_9|MBX_8|MBX_7
+ |MBX_6|MBX_5|MBX_4|MBX_3|MBX_2|MBX_1|MBX_0;
+ mcp->in_mb = MBX_0;
+ mcp->flags = MBX_DMA_OUT;
+ mcp->tov = MBX_TOV_SECONDS * 2;
+
+ if (IS_QLA81XX(ha)) {
+ mcp->out_mb |= MBX_12|MBX_11|MBX_10;
+ mcp->in_mb |= MBX_1;
+ } else if (IS_QLA83XX(ha) || IS_QLA27XX(ha)) {
+ mcp->out_mb |= MBX_15|MBX_12|MBX_11|MBX_10;
+ mcp->in_mb |= MBX_1;
+ /* debug q create issue in SR-IOV */
+ mcp->in_mb |= MBX_9 | MBX_8 | MBX_7;
+ }
+
+ spin_lock_irqsave(&ha->hardware_lock, flags);
+ if (!(rsp->options & BIT_0)) {
+ WRT_REG_DWORD(rsp->rsp_q_out, 0);
+ if (!IS_QLA83XX(ha))
+ WRT_REG_DWORD(rsp->rsp_q_in, 0);
+ }
+
+ spin_unlock_irqrestore(&ha->hardware_lock, flags);
+
+ rval = qla2x00_mailbox_command(vha, mcp);
+ if (rval != QLA_SUCCESS) {
+ ql_dbg(ql_dbg_mbx, vha, 0x10d7,
+ "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
+ } else {
+ ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10d8,
+ "Done %s.\n", __func__);
+ }
+
+ return rval;
+}
+
+int
+qla81xx_idc_ack(scsi_qla_host_t *vha, uint16_t *mb)
+{
+ int rval;
+ mbx_cmd_t mc;
+ mbx_cmd_t *mcp = &mc;
+
+ ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10d9,
+ "Entered %s.\n", __func__);
+
+ mcp->mb[0] = MBC_IDC_ACK;
+ memcpy(&mcp->mb[1], mb, QLA_IDC_ACK_REGS * sizeof(uint16_t));
+ mcp->out_mb = MBX_7|MBX_6|MBX_5|MBX_4|MBX_3|MBX_2|MBX_1|MBX_0;
+ mcp->in_mb = MBX_0;
+ mcp->tov = MBX_TOV_SECONDS;
+ mcp->flags = 0;
+ rval = qla2x00_mailbox_command(vha, mcp);
+
+ if (rval != QLA_SUCCESS) {
+ ql_dbg(ql_dbg_mbx, vha, 0x10da,
+ "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
+ } else {
+ ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10db,
+ "Done %s.\n", __func__);
+ }
+
+ return rval;
+}
+
+int
+qla81xx_fac_get_sector_size(scsi_qla_host_t *vha, uint32_t *sector_size)
+{
+ int rval;
+ mbx_cmd_t mc;
+ mbx_cmd_t *mcp = &mc;
+
+ ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10dc,
+ "Entered %s.\n", __func__);
+
+ if (!IS_QLA81XX(vha->hw) && !IS_QLA83XX(vha->hw) &&
+ !IS_QLA27XX(vha->hw))
+ return QLA_FUNCTION_FAILED;
+
+ mcp->mb[0] = MBC_FLASH_ACCESS_CTRL;
+ mcp->mb[1] = FAC_OPT_CMD_GET_SECTOR_SIZE;
+ mcp->out_mb = MBX_1|MBX_0;
+ mcp->in_mb = MBX_1|MBX_0;
+ mcp->tov = MBX_TOV_SECONDS;
+ mcp->flags = 0;
+ rval = qla2x00_mailbox_command(vha, mcp);
+
+ if (rval != QLA_SUCCESS) {
+ ql_dbg(ql_dbg_mbx, vha, 0x10dd,
+ "Failed=%x mb[0]=%x mb[1]=%x.\n",
+ rval, mcp->mb[0], mcp->mb[1]);
+ } else {
+ ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10de,
+ "Done %s.\n", __func__);
+ *sector_size = mcp->mb[1];
+ }
+
+ return rval;
+}
+
+int
+qla81xx_fac_do_write_enable(scsi_qla_host_t *vha, int enable)
+{
+ int rval;
+ mbx_cmd_t mc;
+ mbx_cmd_t *mcp = &mc;
+
+ if (!IS_QLA81XX(vha->hw) && !IS_QLA83XX(vha->hw) &&
+ !IS_QLA27XX(vha->hw))
+ return QLA_FUNCTION_FAILED;
+
+ ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10df,
+ "Entered %s.\n", __func__);
+
+ mcp->mb[0] = MBC_FLASH_ACCESS_CTRL;
+ mcp->mb[1] = enable ? FAC_OPT_CMD_WRITE_ENABLE :
+ FAC_OPT_CMD_WRITE_PROTECT;
+ mcp->out_mb = MBX_1|MBX_0;
+ mcp->in_mb = MBX_1|MBX_0;
+ mcp->tov = MBX_TOV_SECONDS;
+ mcp->flags = 0;
+ rval = qla2x00_mailbox_command(vha, mcp);
+
+ if (rval != QLA_SUCCESS) {
+ ql_dbg(ql_dbg_mbx, vha, 0x10e0,
+ "Failed=%x mb[0]=%x mb[1]=%x.\n",
+ rval, mcp->mb[0], mcp->mb[1]);
+ } else {
+ ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10e1,
+ "Done %s.\n", __func__);
+ }
+
+ return rval;
+}
+
+int
+qla81xx_fac_erase_sector(scsi_qla_host_t *vha, uint32_t start, uint32_t finish)
+{
+ int rval;
+ mbx_cmd_t mc;
+ mbx_cmd_t *mcp = &mc;
+
+ if (!IS_QLA81XX(vha->hw) && !IS_QLA83XX(vha->hw) &&
+ !IS_QLA27XX(vha->hw))
+ return QLA_FUNCTION_FAILED;
+
+ ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10e2,
+ "Entered %s.\n", __func__);
+
+ mcp->mb[0] = MBC_FLASH_ACCESS_CTRL;
+ mcp->mb[1] = FAC_OPT_CMD_ERASE_SECTOR;
+ mcp->mb[2] = LSW(start);
+ mcp->mb[3] = MSW(start);
+ mcp->mb[4] = LSW(finish);
+ mcp->mb[5] = MSW(finish);
+ mcp->out_mb = MBX_5|MBX_4|MBX_3|MBX_2|MBX_1|MBX_0;
+ mcp->in_mb = MBX_2|MBX_1|MBX_0;
+ mcp->tov = MBX_TOV_SECONDS;
+ mcp->flags = 0;
+ rval = qla2x00_mailbox_command(vha, mcp);
+
+ if (rval != QLA_SUCCESS) {
+ ql_dbg(ql_dbg_mbx, vha, 0x10e3,
+ "Failed=%x mb[0]=%x mb[1]=%x mb[2]=%x.\n",
+ rval, mcp->mb[0], mcp->mb[1], mcp->mb[2]);
+ } else {
+ ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10e4,
+ "Done %s.\n", __func__);
+ }
+
+ return rval;
+}
+
+int
+qla81xx_restart_mpi_firmware(scsi_qla_host_t *vha)
+{
+ int rval = 0;
+ mbx_cmd_t mc;
+ mbx_cmd_t *mcp = &mc;
+
+ ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10e5,
+ "Entered %s.\n", __func__);
+
+ mcp->mb[0] = MBC_RESTART_MPI_FW;
+ mcp->out_mb = MBX_0;
+ mcp->in_mb = MBX_0|MBX_1;
+ mcp->tov = MBX_TOV_SECONDS;
+ mcp->flags = 0;
+ rval = qla2x00_mailbox_command(vha, mcp);
+
+ if (rval != QLA_SUCCESS) {
+ ql_dbg(ql_dbg_mbx, vha, 0x10e6,
+ "Failed=%x mb[0]=%x mb[1]=%x.\n",
+ rval, mcp->mb[0], mcp->mb[1]);
+ } else {
+ ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10e7,
+ "Done %s.\n", __func__);
+ }
+
+ return rval;
+}
+
+int
+qla82xx_set_driver_version(scsi_qla_host_t *vha, char *version)
+{
+ int rval;
+ mbx_cmd_t mc;
+ mbx_cmd_t *mcp = &mc;
+ int i;
+ int len;
+ uint16_t *str;
+ struct qla_hw_data *ha = vha->hw;
+
+ if (!IS_P3P_TYPE(ha))
+ return QLA_FUNCTION_FAILED;
+
+ ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x117b,
+ "Entered %s.\n", __func__);
+
+ str = (void *)version;
+ len = strlen(version);
+
+ mcp->mb[0] = MBC_SET_RNID_PARAMS;
+ mcp->mb[1] = RNID_TYPE_SET_VERSION << 8;
+ mcp->out_mb = MBX_1|MBX_0;
+ for (i = 4; i < 16 && len; i++, str++, len -= 2) {
+ mcp->mb[i] = cpu_to_le16p(str);
+ mcp->out_mb |= 1<<i;
+ }
+ for (; i < 16; i++) {
+ mcp->mb[i] = 0;
+ mcp->out_mb |= 1<<i;
+ }
+ mcp->in_mb = MBX_1|MBX_0;
+ mcp->tov = MBX_TOV_SECONDS;
+ mcp->flags = 0;
+ rval = qla2x00_mailbox_command(vha, mcp);
+
+ if (rval != QLA_SUCCESS) {
+ ql_dbg(ql_dbg_mbx, vha, 0x117c,
+ "Failed=%x mb[0]=%x,%x.\n", rval, mcp->mb[0], mcp->mb[1]);
+ } else {
+ ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x117d,
+ "Done %s.\n", __func__);
+ }
+
+ return rval;
+}
+
+int
+qla25xx_set_driver_version(scsi_qla_host_t *vha, char *version)
+{
+ int rval;
+ mbx_cmd_t mc;
+ mbx_cmd_t *mcp = &mc;
+ int len;
+ uint16_t dwlen;
+ uint8_t *str;
+ dma_addr_t str_dma;
+ struct qla_hw_data *ha = vha->hw;
+
+ if (!IS_FWI2_CAPABLE(ha) || IS_QLA24XX_TYPE(ha) || IS_QLA81XX(ha) ||
+ IS_P3P_TYPE(ha))
+ return QLA_FUNCTION_FAILED;
+
+ ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x117e,
+ "Entered %s.\n", __func__);
+
+ str = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &str_dma);
+ if (!str) {
+ ql_log(ql_log_warn, vha, 0x117f,
+ "Failed to allocate driver version param.\n");
+ return QLA_MEMORY_ALLOC_FAILED;
+ }
+
+ memcpy(str, "\x7\x3\x11\x0", 4);
+ dwlen = str[0];
+ len = dwlen * 4 - 4;
+ memset(str + 4, 0, len);
+ if (len > strlen(version))
+ len = strlen(version);
+ memcpy(str + 4, version, len);
+
+ mcp->mb[0] = MBC_SET_RNID_PARAMS;
+ mcp->mb[1] = RNID_TYPE_SET_VERSION << 8 | dwlen;
+ mcp->mb[2] = MSW(LSD(str_dma));
+ mcp->mb[3] = LSW(LSD(str_dma));
+ mcp->mb[6] = MSW(MSD(str_dma));
+ mcp->mb[7] = LSW(MSD(str_dma));
+ mcp->out_mb = MBX_7|MBX_6|MBX_3|MBX_2|MBX_1|MBX_0;
+ mcp->in_mb = MBX_1|MBX_0;
+ mcp->tov = MBX_TOV_SECONDS;
+ mcp->flags = 0;
+ rval = qla2x00_mailbox_command(vha, mcp);
+
+ if (rval != QLA_SUCCESS) {
+ ql_dbg(ql_dbg_mbx, vha, 0x1180,
+ "Failed=%x mb[0]=%x,%x.\n", rval, mcp->mb[0], mcp->mb[1]);
+ } else {
+ ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1181,
+ "Done %s.\n", __func__);
+ }
+
+ dma_pool_free(ha->s_dma_pool, str, str_dma);
+
+ return rval;
+}
+
+static int
+qla2x00_read_asic_temperature(scsi_qla_host_t *vha, uint16_t *temp)
+{
+ int rval;
+ mbx_cmd_t mc;
+ mbx_cmd_t *mcp = &mc;
+
+ if (!IS_FWI2_CAPABLE(vha->hw))
+ return QLA_FUNCTION_FAILED;
+
+ ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1159,
+ "Entered %s.\n", __func__);
+
+ mcp->mb[0] = MBC_GET_RNID_PARAMS;
+ mcp->mb[1] = RNID_TYPE_ASIC_TEMP << 8;
+ mcp->out_mb = MBX_1|MBX_0;
+ mcp->in_mb = MBX_1|MBX_0;
+ mcp->tov = MBX_TOV_SECONDS;
+ mcp->flags = 0;
+ rval = qla2x00_mailbox_command(vha, mcp);
+ *temp = mcp->mb[1];
+
+ if (rval != QLA_SUCCESS) {
+ ql_dbg(ql_dbg_mbx, vha, 0x115a,
+ "Failed=%x mb[0]=%x,%x.\n", rval, mcp->mb[0], mcp->mb[1]);
+ } else {
+ ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x115b,
+ "Done %s.\n", __func__);
+ }
+
+ return rval;
+}
+
+int
+qla2x00_read_sfp(scsi_qla_host_t *vha, dma_addr_t sfp_dma, uint8_t *sfp,
+ uint16_t dev, uint16_t off, uint16_t len, uint16_t opt)
+{
+ int rval;
+ mbx_cmd_t mc;
+ mbx_cmd_t *mcp = &mc;
+ struct qla_hw_data *ha = vha->hw;
+
+ ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10e8,
+ "Entered %s.\n", __func__);
+
+ if (!IS_FWI2_CAPABLE(ha))
+ return QLA_FUNCTION_FAILED;
+
+ if (len == 1)
+ opt |= BIT_0;
+
+ mcp->mb[0] = MBC_READ_SFP;
+ mcp->mb[1] = dev;
+ mcp->mb[2] = MSW(sfp_dma);
+ mcp->mb[3] = LSW(sfp_dma);
+ mcp->mb[6] = MSW(MSD(sfp_dma));
+ mcp->mb[7] = LSW(MSD(sfp_dma));
+ mcp->mb[8] = len;
+ mcp->mb[9] = off;
+ mcp->mb[10] = opt;
+ mcp->out_mb = MBX_10|MBX_9|MBX_8|MBX_7|MBX_6|MBX_3|MBX_2|MBX_1|MBX_0;
+ mcp->in_mb = MBX_1|MBX_0;
+ mcp->tov = MBX_TOV_SECONDS;
+ mcp->flags = 0;
+ rval = qla2x00_mailbox_command(vha, mcp);
+
+ if (opt & BIT_0)
+ *sfp = mcp->mb[1];
+
+ if (rval != QLA_SUCCESS) {
+ ql_dbg(ql_dbg_mbx, vha, 0x10e9,
+ "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
+ } else {
+ ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10ea,
+ "Done %s.\n", __func__);
+ }
+
+ return rval;
+}
+
+int
+qla2x00_write_sfp(scsi_qla_host_t *vha, dma_addr_t sfp_dma, uint8_t *sfp,
+ uint16_t dev, uint16_t off, uint16_t len, uint16_t opt)
+{
+ int rval;
+ mbx_cmd_t mc;
+ mbx_cmd_t *mcp = &mc;
+ struct qla_hw_data *ha = vha->hw;
+
+ ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10eb,
+ "Entered %s.\n", __func__);
+
+ if (!IS_FWI2_CAPABLE(ha))
+ return QLA_FUNCTION_FAILED;
+
+ if (len == 1)
+ opt |= BIT_0;
+
+ if (opt & BIT_0)
+ len = *sfp;
+
+ mcp->mb[0] = MBC_WRITE_SFP;
+ mcp->mb[1] = dev;
+ mcp->mb[2] = MSW(sfp_dma);
+ mcp->mb[3] = LSW(sfp_dma);
+ mcp->mb[6] = MSW(MSD(sfp_dma));
+ mcp->mb[7] = LSW(MSD(sfp_dma));
+ mcp->mb[8] = len;
+ mcp->mb[9] = off;
+ mcp->mb[10] = opt;
+ mcp->out_mb = MBX_10|MBX_9|MBX_8|MBX_7|MBX_6|MBX_3|MBX_2|MBX_1|MBX_0;
+ mcp->in_mb = MBX_1|MBX_0;
+ mcp->tov = MBX_TOV_SECONDS;
+ mcp->flags = 0;
+ rval = qla2x00_mailbox_command(vha, mcp);
+
+ if (rval != QLA_SUCCESS) {
+ ql_dbg(ql_dbg_mbx, vha, 0x10ec,
+ "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
+ } else {
+ ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10ed,
+ "Done %s.\n", __func__);
+ }
+
+ return rval;
+}
+
+int
+qla2x00_get_xgmac_stats(scsi_qla_host_t *vha, dma_addr_t stats_dma,
+ uint16_t size_in_bytes, uint16_t *actual_size)
+{
+ int rval;
+ mbx_cmd_t mc;
+ mbx_cmd_t *mcp = &mc;
+
+ ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10ee,
+ "Entered %s.\n", __func__);
+
+ if (!IS_CNA_CAPABLE(vha->hw))
+ return QLA_FUNCTION_FAILED;
+
+ mcp->mb[0] = MBC_GET_XGMAC_STATS;
+ mcp->mb[2] = MSW(stats_dma);
+ mcp->mb[3] = LSW(stats_dma);
+ mcp->mb[6] = MSW(MSD(stats_dma));
+ mcp->mb[7] = LSW(MSD(stats_dma));
+ mcp->mb[8] = size_in_bytes >> 2;
+ mcp->out_mb = MBX_8|MBX_7|MBX_6|MBX_3|MBX_2|MBX_0;
+ mcp->in_mb = MBX_2|MBX_1|MBX_0;
+ mcp->tov = MBX_TOV_SECONDS;
+ mcp->flags = 0;
+ rval = qla2x00_mailbox_command(vha, mcp);
+
+ if (rval != QLA_SUCCESS) {
+ ql_dbg(ql_dbg_mbx, vha, 0x10ef,
+ "Failed=%x mb[0]=%x mb[1]=%x mb[2]=%x.\n",
+ rval, mcp->mb[0], mcp->mb[1], mcp->mb[2]);
+ } else {
+ ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10f0,
+ "Done %s.\n", __func__);
+
+
+ *actual_size = mcp->mb[2] << 2;
+ }
+
+ return rval;
+}
+
+int
+qla2x00_get_dcbx_params(scsi_qla_host_t *vha, dma_addr_t tlv_dma,
+ uint16_t size)
+{
+ int rval;
+ mbx_cmd_t mc;
+ mbx_cmd_t *mcp = &mc;
+
+ ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10f1,
+ "Entered %s.\n", __func__);
+
+ if (!IS_CNA_CAPABLE(vha->hw))
+ return QLA_FUNCTION_FAILED;
+
+ mcp->mb[0] = MBC_GET_DCBX_PARAMS;
+ mcp->mb[1] = 0;
+ mcp->mb[2] = MSW(tlv_dma);
+ mcp->mb[3] = LSW(tlv_dma);
+ mcp->mb[6] = MSW(MSD(tlv_dma));
+ mcp->mb[7] = LSW(MSD(tlv_dma));
+ mcp->mb[8] = size;
+ mcp->out_mb = MBX_8|MBX_7|MBX_6|MBX_3|MBX_2|MBX_1|MBX_0;
+ mcp->in_mb = MBX_2|MBX_1|MBX_0;
+ mcp->tov = MBX_TOV_SECONDS;
+ mcp->flags = 0;
+ rval = qla2x00_mailbox_command(vha, mcp);
+
+ if (rval != QLA_SUCCESS) {
+ ql_dbg(ql_dbg_mbx, vha, 0x10f2,
+ "Failed=%x mb[0]=%x mb[1]=%x mb[2]=%x.\n",
+ rval, mcp->mb[0], mcp->mb[1], mcp->mb[2]);
+ } else {
+ ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10f3,
+ "Done %s.\n", __func__);
+ }
+
+ return rval;
+}
+
+int
+qla2x00_read_ram_word(scsi_qla_host_t *vha, uint32_t risc_addr, uint32_t *data)
+{
+ int rval;
+ mbx_cmd_t mc;
+ mbx_cmd_t *mcp = &mc;
+
+ ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10f4,
+ "Entered %s.\n", __func__);
+
+ if (!IS_FWI2_CAPABLE(vha->hw))
+ return QLA_FUNCTION_FAILED;
+
+ mcp->mb[0] = MBC_READ_RAM_EXTENDED;
+ mcp->mb[1] = LSW(risc_addr);
+ mcp->mb[8] = MSW(risc_addr);
+ mcp->out_mb = MBX_8|MBX_1|MBX_0;
+ mcp->in_mb = MBX_3|MBX_2|MBX_0;
+ mcp->tov = 30;
+ mcp->flags = 0;
+ rval = qla2x00_mailbox_command(vha, mcp);
+ if (rval != QLA_SUCCESS) {
+ ql_dbg(ql_dbg_mbx, vha, 0x10f5,
+ "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
+ } else {
+ ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10f6,
+ "Done %s.\n", __func__);
+ *data = mcp->mb[3] << 16 | mcp->mb[2];
+ }
+
+ return rval;
+}
+
+int
+qla2x00_loopback_test(scsi_qla_host_t *vha, struct msg_echo_lb *mreq,
+ uint16_t *mresp)
+{
+ int rval;
+ mbx_cmd_t mc;
+ mbx_cmd_t *mcp = &mc;
+
+ ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10f7,
+ "Entered %s.\n", __func__);
+
+ memset(mcp->mb, 0 , sizeof(mcp->mb));
+ mcp->mb[0] = MBC_DIAGNOSTIC_LOOP_BACK;
+ mcp->mb[1] = mreq->options | BIT_6; // BIT_6 specifies 64 bit addressing
+
+ /* transfer count */
+ mcp->mb[10] = LSW(mreq->transfer_size);
+ mcp->mb[11] = MSW(mreq->transfer_size);
+
+ /* send data address */
+ mcp->mb[14] = LSW(mreq->send_dma);
+ mcp->mb[15] = MSW(mreq->send_dma);
+ mcp->mb[20] = LSW(MSD(mreq->send_dma));
+ mcp->mb[21] = MSW(MSD(mreq->send_dma));
+
+ /* receive data address */
+ mcp->mb[16] = LSW(mreq->rcv_dma);
+ mcp->mb[17] = MSW(mreq->rcv_dma);
+ mcp->mb[6] = LSW(MSD(mreq->rcv_dma));
+ mcp->mb[7] = MSW(MSD(mreq->rcv_dma));
+
+ /* Iteration count */
+ mcp->mb[18] = LSW(mreq->iteration_count);
+ mcp->mb[19] = MSW(mreq->iteration_count);
+
+ mcp->out_mb = MBX_21|MBX_20|MBX_19|MBX_18|MBX_17|MBX_16|MBX_15|
+ MBX_14|MBX_13|MBX_12|MBX_11|MBX_10|MBX_7|MBX_6|MBX_1|MBX_0;
+ if (IS_CNA_CAPABLE(vha->hw))
+ mcp->out_mb |= MBX_2;
+ mcp->in_mb = MBX_19|MBX_18|MBX_3|MBX_2|MBX_1|MBX_0;
+
+ mcp->buf_size = mreq->transfer_size;
+ mcp->tov = MBX_TOV_SECONDS;
+ mcp->flags = MBX_DMA_OUT|MBX_DMA_IN|IOCTL_CMD;
+
+ rval = qla2x00_mailbox_command(vha, mcp);
+
+ if (rval != QLA_SUCCESS) {
+ ql_dbg(ql_dbg_mbx, vha, 0x10f8,
+ "Failed=%x mb[0]=%x mb[1]=%x mb[2]=%x mb[3]=%x mb[18]=%x "
+ "mb[19]=%x.\n", rval, mcp->mb[0], mcp->mb[1], mcp->mb[2],
+ mcp->mb[3], mcp->mb[18], mcp->mb[19]);
+ } else {
+ ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10f9,
+ "Done %s.\n", __func__);
+ }
+
+ /* Copy mailbox information */
+ memcpy( mresp, mcp->mb, 64);
+ return rval;
+}
+
+int
+qla2x00_echo_test(scsi_qla_host_t *vha, struct msg_echo_lb *mreq,
+ uint16_t *mresp)
+{
+ int rval;
+ mbx_cmd_t mc;
+ mbx_cmd_t *mcp = &mc;
+ struct qla_hw_data *ha = vha->hw;
+
+ ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10fa,
+ "Entered %s.\n", __func__);
+
+ memset(mcp->mb, 0 , sizeof(mcp->mb));
+ mcp->mb[0] = MBC_DIAGNOSTIC_ECHO;
+ mcp->mb[1] = mreq->options | BIT_6; /* BIT_6 specifies 64bit address */
+ if (IS_CNA_CAPABLE(ha)) {
+ mcp->mb[1] |= BIT_15;
+ mcp->mb[2] = vha->fcoe_fcf_idx;
+ }
+ mcp->mb[16] = LSW(mreq->rcv_dma);
+ mcp->mb[17] = MSW(mreq->rcv_dma);
+ mcp->mb[6] = LSW(MSD(mreq->rcv_dma));
+ mcp->mb[7] = MSW(MSD(mreq->rcv_dma));
+
+ mcp->mb[10] = LSW(mreq->transfer_size);
+
+ mcp->mb[14] = LSW(mreq->send_dma);
+ mcp->mb[15] = MSW(mreq->send_dma);
+ mcp->mb[20] = LSW(MSD(mreq->send_dma));
+ mcp->mb[21] = MSW(MSD(mreq->send_dma));
+
+ mcp->out_mb = MBX_21|MBX_20|MBX_17|MBX_16|MBX_15|
+ MBX_14|MBX_10|MBX_7|MBX_6|MBX_1|MBX_0;
+ if (IS_CNA_CAPABLE(ha))
+ mcp->out_mb |= MBX_2;
+
+ mcp->in_mb = MBX_0;
+ if (IS_QLA24XX_TYPE(ha) || IS_QLA25XX(ha) ||
+ IS_CNA_CAPABLE(ha) || IS_QLA2031(ha))
+ mcp->in_mb |= MBX_1;
+ if (IS_CNA_CAPABLE(ha) || IS_QLA2031(ha))
+ mcp->in_mb |= MBX_3;
+
+ mcp->tov = MBX_TOV_SECONDS;
+ mcp->flags = MBX_DMA_OUT|MBX_DMA_IN|IOCTL_CMD;
+ mcp->buf_size = mreq->transfer_size;
+
+ rval = qla2x00_mailbox_command(vha, mcp);
+
+ if (rval != QLA_SUCCESS) {
+ ql_dbg(ql_dbg_mbx, vha, 0x10fb,
+ "Failed=%x mb[0]=%x mb[1]=%x.\n",
+ rval, mcp->mb[0], mcp->mb[1]);
+ } else {
+ ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10fc,
+ "Done %s.\n", __func__);
+ }
+
+ /* Copy mailbox information */
+ memcpy(mresp, mcp->mb, 64);
+ return rval;
+}
+
+int
+qla84xx_reset_chip(scsi_qla_host_t *vha, uint16_t enable_diagnostic)
+{
+ int rval;
+ mbx_cmd_t mc;
+ mbx_cmd_t *mcp = &mc;
+
+ ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10fd,
+ "Entered %s enable_diag=%d.\n", __func__, enable_diagnostic);
+
+ mcp->mb[0] = MBC_ISP84XX_RESET;
+ mcp->mb[1] = enable_diagnostic;
+ mcp->out_mb = MBX_1|MBX_0;
+ mcp->in_mb = MBX_1|MBX_0;
+ mcp->tov = MBX_TOV_SECONDS;
+ mcp->flags = MBX_DMA_OUT|MBX_DMA_IN|IOCTL_CMD;
+ rval = qla2x00_mailbox_command(vha, mcp);
+
+ if (rval != QLA_SUCCESS)
+ ql_dbg(ql_dbg_mbx, vha, 0x10fe, "Failed=%x.\n", rval);
+ else
+ ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10ff,
+ "Done %s.\n", __func__);
+
+ return rval;
+}
+
+int
+qla2x00_write_ram_word(scsi_qla_host_t *vha, uint32_t risc_addr, uint32_t data)
+{
+ int rval;
+ mbx_cmd_t mc;
+ mbx_cmd_t *mcp = &mc;
+
+ ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1100,
+ "Entered %s.\n", __func__);
+
+ if (!IS_FWI2_CAPABLE(vha->hw))
+ return QLA_FUNCTION_FAILED;
+
+ mcp->mb[0] = MBC_WRITE_RAM_WORD_EXTENDED;
+ mcp->mb[1] = LSW(risc_addr);
+ mcp->mb[2] = LSW(data);
+ mcp->mb[3] = MSW(data);
+ mcp->mb[8] = MSW(risc_addr);
+ mcp->out_mb = MBX_8|MBX_3|MBX_2|MBX_1|MBX_0;
+ mcp->in_mb = MBX_0;
+ mcp->tov = 30;
+ mcp->flags = 0;
+ rval = qla2x00_mailbox_command(vha, mcp);
+ if (rval != QLA_SUCCESS) {
+ ql_dbg(ql_dbg_mbx, vha, 0x1101,
+ "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
+ } else {
+ ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1102,
+ "Done %s.\n", __func__);
+ }
+
+ return rval;
+}
+
+int
+qla81xx_write_mpi_register(scsi_qla_host_t *vha, uint16_t *mb)
+{
+ int rval;
+ uint32_t stat, timer;
+ uint16_t mb0 = 0;
+ struct qla_hw_data *ha = vha->hw;
+ struct device_reg_24xx __iomem *reg = &ha->iobase->isp24;
+
+ rval = QLA_SUCCESS;
+
+ ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1103,
+ "Entered %s.\n", __func__);
+
+ clear_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags);
+
+ /* Write the MBC data to the registers */
+ WRT_REG_WORD(&reg->mailbox0, MBC_WRITE_MPI_REGISTER);
+ WRT_REG_WORD(&reg->mailbox1, mb[0]);
+ WRT_REG_WORD(&reg->mailbox2, mb[1]);
+ WRT_REG_WORD(&reg->mailbox3, mb[2]);
+ WRT_REG_WORD(&reg->mailbox4, mb[3]);
+
+ WRT_REG_DWORD(&reg->hccr, HCCRX_SET_HOST_INT);
+
+ /* Poll for MBC interrupt */
+ for (timer = 6000000; timer; timer--) {
+ /* Check for pending interrupts. */
+ stat = RD_REG_DWORD(&reg->host_status);
+ if (stat & HSRX_RISC_INT) {
+ stat &= 0xff;
+
+ if (stat == 0x1 || stat == 0x2 ||
+ stat == 0x10 || stat == 0x11) {
+ set_bit(MBX_INTERRUPT,
+ &ha->mbx_cmd_flags);
+ mb0 = RD_REG_WORD(&reg->mailbox0);
+ WRT_REG_DWORD(&reg->hccr,
+ HCCRX_CLR_RISC_INT);
+ RD_REG_DWORD(&reg->hccr);
+ break;
+ }
+ }
+ udelay(5);
+ }
+
+ if (test_and_clear_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags))
+ rval = mb0 & MBS_MASK;
+ else
+ rval = QLA_FUNCTION_FAILED;
+
+ if (rval != QLA_SUCCESS) {
+ ql_dbg(ql_dbg_mbx, vha, 0x1104,
+ "Failed=%x mb[0]=%x.\n", rval, mb[0]);
+ } else {
+ ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1105,
+ "Done %s.\n", __func__);
+ }
+
+ return rval;
+}
+
+int
+qla2x00_get_data_rate(scsi_qla_host_t *vha)
+{
+ int rval;
+ mbx_cmd_t mc;
+ mbx_cmd_t *mcp = &mc;
+ struct qla_hw_data *ha = vha->hw;
+
+ ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1106,
+ "Entered %s.\n", __func__);
+
+ if (!IS_FWI2_CAPABLE(ha))
+ return QLA_FUNCTION_FAILED;
+
+ mcp->mb[0] = MBC_DATA_RATE;
+ mcp->mb[1] = 0;
+ mcp->out_mb = MBX_1|MBX_0;
+ mcp->in_mb = MBX_2|MBX_1|MBX_0;
+ if (IS_QLA83XX(ha) || IS_QLA27XX(ha))
+ mcp->in_mb |= MBX_3;
+ mcp->tov = MBX_TOV_SECONDS;
+ mcp->flags = 0;
+ rval = qla2x00_mailbox_command(vha, mcp);
+ if (rval != QLA_SUCCESS) {
+ ql_dbg(ql_dbg_mbx, vha, 0x1107,
+ "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
+ } else {
+ ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1108,
+ "Done %s.\n", __func__);
+ if (mcp->mb[1] != 0x7)
+ ha->link_data_rate = mcp->mb[1];
+ }
+
+ return rval;
+}
+
+int
+qla81xx_get_port_config(scsi_qla_host_t *vha, uint16_t *mb)
+{
+ int rval;
+ mbx_cmd_t mc;
+ mbx_cmd_t *mcp = &mc;
+ struct qla_hw_data *ha = vha->hw;
+
+ ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1109,
+ "Entered %s.\n", __func__);
+
+ if (!IS_QLA81XX(ha) && !IS_QLA83XX(ha) && !IS_QLA8044(ha) &&
+ !IS_QLA27XX(ha))
+ return QLA_FUNCTION_FAILED;
+ mcp->mb[0] = MBC_GET_PORT_CONFIG;
+ mcp->out_mb = MBX_0;
+ mcp->in_mb = MBX_4|MBX_3|MBX_2|MBX_1|MBX_0;
+ mcp->tov = MBX_TOV_SECONDS;
+ mcp->flags = 0;
+
+ rval = qla2x00_mailbox_command(vha, mcp);
+
+ if (rval != QLA_SUCCESS) {
+ ql_dbg(ql_dbg_mbx, vha, 0x110a,
+ "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
+ } else {
+ /* Copy all bits to preserve original value */
+ memcpy(mb, &mcp->mb[1], sizeof(uint16_t) * 4);
+
+ ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x110b,
+ "Done %s.\n", __func__);
+ }
+ return rval;
+}
+
+int
+qla81xx_set_port_config(scsi_qla_host_t *vha, uint16_t *mb)
+{
+ int rval;
+ mbx_cmd_t mc;
+ mbx_cmd_t *mcp = &mc;
+
+ ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x110c,
+ "Entered %s.\n", __func__);
+
+ mcp->mb[0] = MBC_SET_PORT_CONFIG;
+ /* Copy all bits to preserve original setting */
+ memcpy(&mcp->mb[1], mb, sizeof(uint16_t) * 4);
+ mcp->out_mb = MBX_4|MBX_3|MBX_2|MBX_1|MBX_0;
+ mcp->in_mb = MBX_0;
+ mcp->tov = MBX_TOV_SECONDS;
+ mcp->flags = 0;
+ rval = qla2x00_mailbox_command(vha, mcp);
+
+ if (rval != QLA_SUCCESS) {
+ ql_dbg(ql_dbg_mbx, vha, 0x110d,
+ "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
+ } else
+ ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x110e,
+ "Done %s.\n", __func__);
+
+ return rval;
+}
+
+
+int
+qla24xx_set_fcp_prio(scsi_qla_host_t *vha, uint16_t loop_id, uint16_t priority,
+ uint16_t *mb)
+{
+ int rval;
+ mbx_cmd_t mc;
+ mbx_cmd_t *mcp = &mc;
+ struct qla_hw_data *ha = vha->hw;
+
+ ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x110f,
+ "Entered %s.\n", __func__);
+
+ if (!IS_QLA24XX_TYPE(ha) && !IS_QLA25XX(ha))
+ return QLA_FUNCTION_FAILED;
+
+ mcp->mb[0] = MBC_PORT_PARAMS;
+ mcp->mb[1] = loop_id;
+ if (ha->flags.fcp_prio_enabled)
+ mcp->mb[2] = BIT_1;
+ else
+ mcp->mb[2] = BIT_2;
+ mcp->mb[4] = priority & 0xf;
+ mcp->mb[9] = vha->vp_idx;
+ mcp->out_mb = MBX_9|MBX_4|MBX_3|MBX_2|MBX_1|MBX_0;
+ mcp->in_mb = MBX_4|MBX_3|MBX_1|MBX_0;
+ mcp->tov = 30;
+ mcp->flags = 0;
+ rval = qla2x00_mailbox_command(vha, mcp);
+ if (mb != NULL) {
+ mb[0] = mcp->mb[0];
+ mb[1] = mcp->mb[1];
+ mb[3] = mcp->mb[3];
+ mb[4] = mcp->mb[4];
+ }
+
+ if (rval != QLA_SUCCESS) {
+ ql_dbg(ql_dbg_mbx, vha, 0x10cd, "Failed=%x.\n", rval);
+ } else {
+ ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10cc,
+ "Done %s.\n", __func__);
+ }
+
+ return rval;
+}
+
+int
+qla2x00_get_thermal_temp(scsi_qla_host_t *vha, uint16_t *temp)
+{
+ int rval = QLA_FUNCTION_FAILED;
+ struct qla_hw_data *ha = vha->hw;
+ uint8_t byte;
+
+ if (!IS_FWI2_CAPABLE(ha) || IS_QLA24XX_TYPE(ha) || IS_QLA81XX(ha)) {
+ ql_dbg(ql_dbg_mbx, vha, 0x1150,
+ "Thermal not supported by this card.\n");
+ return rval;
+ }
+
+ if (IS_QLA25XX(ha)) {
+ if (ha->pdev->subsystem_vendor == PCI_VENDOR_ID_QLOGIC &&
+ ha->pdev->subsystem_device == 0x0175) {
+ rval = qla2x00_read_sfp(vha, 0, &byte,
+ 0x98, 0x1, 1, BIT_13|BIT_0);
+ *temp = byte;
+ return rval;
+ }
+ if (ha->pdev->subsystem_vendor == PCI_VENDOR_ID_HP &&
+ ha->pdev->subsystem_device == 0x338e) {
+ rval = qla2x00_read_sfp(vha, 0, &byte,
+ 0x98, 0x1, 1, BIT_15|BIT_14|BIT_0);
+ *temp = byte;
+ return rval;
+ }
+ ql_dbg(ql_dbg_mbx, vha, 0x10c9,
+ "Thermal not supported by this card.\n");
+ return rval;
+ }
+
+ if (IS_QLA82XX(ha)) {
+ *temp = qla82xx_read_temperature(vha);
+ rval = QLA_SUCCESS;
+ return rval;
+ } else if (IS_QLA8044(ha)) {
+ *temp = qla8044_read_temperature(vha);
+ rval = QLA_SUCCESS;
+ return rval;
+ }
+
+ rval = qla2x00_read_asic_temperature(vha, temp);
+ return rval;
+}
+
+int
+qla82xx_mbx_intr_enable(scsi_qla_host_t *vha)
+{
+ int rval;
+ struct qla_hw_data *ha = vha->hw;
+ mbx_cmd_t mc;
+ mbx_cmd_t *mcp = &mc;
+
+ ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1017,
+ "Entered %s.\n", __func__);
+
+ if (!IS_FWI2_CAPABLE(ha))
+ return QLA_FUNCTION_FAILED;
+
+ memset(mcp, 0, sizeof(mbx_cmd_t));
+ mcp->mb[0] = MBC_TOGGLE_INTERRUPT;
+ mcp->mb[1] = 1;
+
+ mcp->out_mb = MBX_1|MBX_0;
+ mcp->in_mb = MBX_0;
+ mcp->tov = 30;
+ mcp->flags = 0;
+
+ rval = qla2x00_mailbox_command(vha, mcp);
+ if (rval != QLA_SUCCESS) {
+ ql_dbg(ql_dbg_mbx, vha, 0x1016,
+ "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
+ } else {
+ ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x100e,
+ "Done %s.\n", __func__);
+ }
+
+ return rval;
+}
+
+int
+qla82xx_mbx_intr_disable(scsi_qla_host_t *vha)
+{
+ int rval;
+ struct qla_hw_data *ha = vha->hw;
+ mbx_cmd_t mc;
+ mbx_cmd_t *mcp = &mc;
+
+ ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x100d,
+ "Entered %s.\n", __func__);
+
+ if (!IS_P3P_TYPE(ha))
+ return QLA_FUNCTION_FAILED;
+
+ memset(mcp, 0, sizeof(mbx_cmd_t));
+ mcp->mb[0] = MBC_TOGGLE_INTERRUPT;
+ mcp->mb[1] = 0;
+
+ mcp->out_mb = MBX_1|MBX_0;
+ mcp->in_mb = MBX_0;
+ mcp->tov = 30;
+ mcp->flags = 0;
+
+ rval = qla2x00_mailbox_command(vha, mcp);
+ if (rval != QLA_SUCCESS) {
+ ql_dbg(ql_dbg_mbx, vha, 0x100c,
+ "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
+ } else {
+ ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x100b,
+ "Done %s.\n", __func__);
+ }
+
+ return rval;
+}
+
+int
+qla82xx_md_get_template_size(scsi_qla_host_t *vha)
+{
+ struct qla_hw_data *ha = vha->hw;
+ mbx_cmd_t mc;
+ mbx_cmd_t *mcp = &mc;
+ int rval = QLA_FUNCTION_FAILED;
+
+ ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x111f,
+ "Entered %s.\n", __func__);
+
+ memset(mcp->mb, 0 , sizeof(mcp->mb));
+ mcp->mb[0] = LSW(MBC_DIAGNOSTIC_MINIDUMP_TEMPLATE);
+ mcp->mb[1] = MSW(MBC_DIAGNOSTIC_MINIDUMP_TEMPLATE);
+ mcp->mb[2] = LSW(RQST_TMPLT_SIZE);
+ mcp->mb[3] = MSW(RQST_TMPLT_SIZE);
+
+ mcp->out_mb = MBX_3|MBX_2|MBX_1|MBX_0;
+ mcp->in_mb = MBX_14|MBX_13|MBX_12|MBX_11|MBX_10|MBX_9|MBX_8|
+ MBX_7|MBX_6|MBX_5|MBX_4|MBX_3|MBX_2|MBX_1|MBX_0;
+
+ mcp->flags = MBX_DMA_OUT|MBX_DMA_IN|IOCTL_CMD;
+ mcp->tov = MBX_TOV_SECONDS;
+ rval = qla2x00_mailbox_command(vha, mcp);
+
+ /* Always copy back return mailbox values. */
+ if (rval != QLA_SUCCESS) {
+ ql_dbg(ql_dbg_mbx, vha, 0x1120,
+ "mailbox command FAILED=0x%x, subcode=%x.\n",
+ (mcp->mb[1] << 16) | mcp->mb[0],
+ (mcp->mb[3] << 16) | mcp->mb[2]);
+ } else {
+ ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1121,
+ "Done %s.\n", __func__);
+ ha->md_template_size = ((mcp->mb[3] << 16) | mcp->mb[2]);
+ if (!ha->md_template_size) {
+ ql_dbg(ql_dbg_mbx, vha, 0x1122,
+ "Null template size obtained.\n");
+ rval = QLA_FUNCTION_FAILED;
+ }
+ }
+ return rval;
+}
+
+int
+qla82xx_md_get_template(scsi_qla_host_t *vha)
+{
+ struct qla_hw_data *ha = vha->hw;
+ mbx_cmd_t mc;
+ mbx_cmd_t *mcp = &mc;
+ int rval = QLA_FUNCTION_FAILED;
+
+ ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1123,
+ "Entered %s.\n", __func__);
+
+ ha->md_tmplt_hdr = dma_alloc_coherent(&ha->pdev->dev,
+ ha->md_template_size, &ha->md_tmplt_hdr_dma, GFP_KERNEL);
+ if (!ha->md_tmplt_hdr) {
+ ql_log(ql_log_warn, vha, 0x1124,
+ "Unable to allocate memory for Minidump template.\n");
+ return rval;
+ }
+
+ memset(mcp->mb, 0 , sizeof(mcp->mb));
+ mcp->mb[0] = LSW(MBC_DIAGNOSTIC_MINIDUMP_TEMPLATE);
+ mcp->mb[1] = MSW(MBC_DIAGNOSTIC_MINIDUMP_TEMPLATE);
+ mcp->mb[2] = LSW(RQST_TMPLT);
+ mcp->mb[3] = MSW(RQST_TMPLT);
+ mcp->mb[4] = LSW(LSD(ha->md_tmplt_hdr_dma));
+ mcp->mb[5] = MSW(LSD(ha->md_tmplt_hdr_dma));
+ mcp->mb[6] = LSW(MSD(ha->md_tmplt_hdr_dma));
+ mcp->mb[7] = MSW(MSD(ha->md_tmplt_hdr_dma));
+ mcp->mb[8] = LSW(ha->md_template_size);
+ mcp->mb[9] = MSW(ha->md_template_size);
+
+ mcp->flags = MBX_DMA_OUT|MBX_DMA_IN|IOCTL_CMD;
+ mcp->tov = MBX_TOV_SECONDS;
+ mcp->out_mb = MBX_11|MBX_10|MBX_9|MBX_8|
+ MBX_7|MBX_6|MBX_5|MBX_4|MBX_3|MBX_2|MBX_1|MBX_0;
+ mcp->in_mb = MBX_3|MBX_2|MBX_1|MBX_0;
+ rval = qla2x00_mailbox_command(vha, mcp);
+
+ if (rval != QLA_SUCCESS) {
+ ql_dbg(ql_dbg_mbx, vha, 0x1125,
+ "mailbox command FAILED=0x%x, subcode=%x.\n",
+ ((mcp->mb[1] << 16) | mcp->mb[0]),
+ ((mcp->mb[3] << 16) | mcp->mb[2]));
+ } else
+ ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1126,
+ "Done %s.\n", __func__);
+ return rval;
+}
+
+int
+qla8044_md_get_template(scsi_qla_host_t *vha)
+{
+ struct qla_hw_data *ha = vha->hw;
+ mbx_cmd_t mc;
+ mbx_cmd_t *mcp = &mc;
+ int rval = QLA_FUNCTION_FAILED;
+ int offset = 0, size = MINIDUMP_SIZE_36K;
+ ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0xb11f,
+ "Entered %s.\n", __func__);
+
+ ha->md_tmplt_hdr = dma_alloc_coherent(&ha->pdev->dev,
+ ha->md_template_size, &ha->md_tmplt_hdr_dma, GFP_KERNEL);
+ if (!ha->md_tmplt_hdr) {
+ ql_log(ql_log_warn, vha, 0xb11b,
+ "Unable to allocate memory for Minidump template.\n");
+ return rval;
+ }
+
+ memset(mcp->mb, 0 , sizeof(mcp->mb));
+ while (offset < ha->md_template_size) {
+ mcp->mb[0] = LSW(MBC_DIAGNOSTIC_MINIDUMP_TEMPLATE);
+ mcp->mb[1] = MSW(MBC_DIAGNOSTIC_MINIDUMP_TEMPLATE);
+ mcp->mb[2] = LSW(RQST_TMPLT);
+ mcp->mb[3] = MSW(RQST_TMPLT);
+ mcp->mb[4] = LSW(LSD(ha->md_tmplt_hdr_dma + offset));
+ mcp->mb[5] = MSW(LSD(ha->md_tmplt_hdr_dma + offset));
+ mcp->mb[6] = LSW(MSD(ha->md_tmplt_hdr_dma + offset));
+ mcp->mb[7] = MSW(MSD(ha->md_tmplt_hdr_dma + offset));
+ mcp->mb[8] = LSW(size);
+ mcp->mb[9] = MSW(size);
+ mcp->mb[10] = offset & 0x0000FFFF;
+ mcp->mb[11] = offset & 0xFFFF0000;
+ mcp->flags = MBX_DMA_OUT|MBX_DMA_IN|IOCTL_CMD;
+ mcp->tov = MBX_TOV_SECONDS;
+ mcp->out_mb = MBX_11|MBX_10|MBX_9|MBX_8|
+ MBX_7|MBX_6|MBX_5|MBX_4|MBX_3|MBX_2|MBX_1|MBX_0;
+ mcp->in_mb = MBX_3|MBX_2|MBX_1|MBX_0;
+ rval = qla2x00_mailbox_command(vha, mcp);
+
+ if (rval != QLA_SUCCESS) {
+ ql_dbg(ql_dbg_mbx, vha, 0xb11c,
+ "mailbox command FAILED=0x%x, subcode=%x.\n",
+ ((mcp->mb[1] << 16) | mcp->mb[0]),
+ ((mcp->mb[3] << 16) | mcp->mb[2]));
+ return rval;
+ } else
+ ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0xb11d,
+ "Done %s.\n", __func__);
+ offset = offset + size;
+ }
+ return rval;
+}
+
+int
+qla81xx_set_led_config(scsi_qla_host_t *vha, uint16_t *led_cfg)
+{
+ int rval;
+ struct qla_hw_data *ha = vha->hw;
+ mbx_cmd_t mc;
+ mbx_cmd_t *mcp = &mc;
+
+ if (!IS_QLA81XX(ha) && !IS_QLA8031(ha))
+ return QLA_FUNCTION_FAILED;
+
+ ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1133,
+ "Entered %s.\n", __func__);
+
+ memset(mcp, 0, sizeof(mbx_cmd_t));
+ mcp->mb[0] = MBC_SET_LED_CONFIG;
+ mcp->mb[1] = led_cfg[0];
+ mcp->mb[2] = led_cfg[1];
+ if (IS_QLA8031(ha)) {
+ mcp->mb[3] = led_cfg[2];
+ mcp->mb[4] = led_cfg[3];
+ mcp->mb[5] = led_cfg[4];
+ mcp->mb[6] = led_cfg[5];
+ }
+
+ mcp->out_mb = MBX_2|MBX_1|MBX_0;
+ if (IS_QLA8031(ha))
+ mcp->out_mb |= MBX_6|MBX_5|MBX_4|MBX_3;
+ mcp->in_mb = MBX_0;
+ mcp->tov = 30;
+ mcp->flags = 0;
+
+ rval = qla2x00_mailbox_command(vha, mcp);
+ if (rval != QLA_SUCCESS) {
+ ql_dbg(ql_dbg_mbx, vha, 0x1134,
+ "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
+ } else {
+ ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1135,
+ "Done %s.\n", __func__);
+ }
+
+ return rval;
+}
+
+int
+qla81xx_get_led_config(scsi_qla_host_t *vha, uint16_t *led_cfg)
+{
+ int rval;
+ struct qla_hw_data *ha = vha->hw;
+ mbx_cmd_t mc;
+ mbx_cmd_t *mcp = &mc;
+
+ if (!IS_QLA81XX(ha) && !IS_QLA8031(ha))
+ return QLA_FUNCTION_FAILED;
+
+ ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1136,
+ "Entered %s.\n", __func__);
+
+ memset(mcp, 0, sizeof(mbx_cmd_t));
+ mcp->mb[0] = MBC_GET_LED_CONFIG;
+
+ mcp->out_mb = MBX_0;
+ mcp->in_mb = MBX_2|MBX_1|MBX_0;
+ if (IS_QLA8031(ha))
+ mcp->in_mb |= MBX_6|MBX_5|MBX_4|MBX_3;
+ mcp->tov = 30;
+ mcp->flags = 0;
+
+ rval = qla2x00_mailbox_command(vha, mcp);
+ if (rval != QLA_SUCCESS) {
+ ql_dbg(ql_dbg_mbx, vha, 0x1137,
+ "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
+ } else {
+ led_cfg[0] = mcp->mb[1];
+ led_cfg[1] = mcp->mb[2];
+ if (IS_QLA8031(ha)) {
+ led_cfg[2] = mcp->mb[3];
+ led_cfg[3] = mcp->mb[4];
+ led_cfg[4] = mcp->mb[5];
+ led_cfg[5] = mcp->mb[6];
+ }
+ ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1138,
+ "Done %s.\n", __func__);
+ }
+
+ return rval;
+}
+
+int
+qla82xx_mbx_beacon_ctl(scsi_qla_host_t *vha, int enable)
+{
+ int rval;
+ struct qla_hw_data *ha = vha->hw;
+ mbx_cmd_t mc;
+ mbx_cmd_t *mcp = &mc;
+
+ if (!IS_P3P_TYPE(ha))
+ return QLA_FUNCTION_FAILED;
+
+ ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1127,
+ "Entered %s.\n", __func__);
+
+ memset(mcp, 0, sizeof(mbx_cmd_t));
+ mcp->mb[0] = MBC_SET_LED_CONFIG;
+ if (enable)
+ mcp->mb[7] = 0xE;
+ else
+ mcp->mb[7] = 0xD;
+
+ mcp->out_mb = MBX_7|MBX_0;
+ mcp->in_mb = MBX_0;
+ mcp->tov = MBX_TOV_SECONDS;
+ mcp->flags = 0;
+
+ rval = qla2x00_mailbox_command(vha, mcp);
+ if (rval != QLA_SUCCESS) {
+ ql_dbg(ql_dbg_mbx, vha, 0x1128,
+ "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
+ } else {
+ ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1129,
+ "Done %s.\n", __func__);
+ }
+
+ return rval;
+}
+
+int
+qla83xx_wr_reg(scsi_qla_host_t *vha, uint32_t reg, uint32_t data)
+{
+ int rval;
+ struct qla_hw_data *ha = vha->hw;
+ mbx_cmd_t mc;
+ mbx_cmd_t *mcp = &mc;
+
+ if (!IS_QLA83XX(ha) && !IS_QLA27XX(ha))
+ return QLA_FUNCTION_FAILED;
+
+ ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1130,
+ "Entered %s.\n", __func__);
+
+ mcp->mb[0] = MBC_WRITE_REMOTE_REG;
+ mcp->mb[1] = LSW(reg);
+ mcp->mb[2] = MSW(reg);
+ mcp->mb[3] = LSW(data);
+ mcp->mb[4] = MSW(data);
+ mcp->out_mb = MBX_4|MBX_3|MBX_2|MBX_1|MBX_0;
+
+ mcp->in_mb = MBX_1|MBX_0;
+ mcp->tov = MBX_TOV_SECONDS;
+ mcp->flags = 0;
+ rval = qla2x00_mailbox_command(vha, mcp);
+
+ if (rval != QLA_SUCCESS) {
+ ql_dbg(ql_dbg_mbx, vha, 0x1131,
+ "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
+ } else {
+ ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1132,
+ "Done %s.\n", __func__);
+ }
+
+ return rval;
+}
+
+int
+qla2x00_port_logout(scsi_qla_host_t *vha, struct fc_port *fcport)
+{
+ int rval;
+ struct qla_hw_data *ha = vha->hw;
+ mbx_cmd_t mc;
+ mbx_cmd_t *mcp = &mc;
+
+ if (IS_QLA2100(ha) || IS_QLA2200(ha)) {
+ ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x113b,
+ "Implicit LOGO Unsupported.\n");
+ return QLA_FUNCTION_FAILED;
+ }
+
+
+ ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x113c,
+ "Entering %s.\n", __func__);
+
+ /* Perform Implicit LOGO. */
+ mcp->mb[0] = MBC_PORT_LOGOUT;
+ mcp->mb[1] = fcport->loop_id;
+ mcp->mb[10] = BIT_15;
+ mcp->out_mb = MBX_10|MBX_1|MBX_0;
+ mcp->in_mb = MBX_0;
+ mcp->tov = MBX_TOV_SECONDS;
+ mcp->flags = 0;
+ rval = qla2x00_mailbox_command(vha, mcp);
+ if (rval != QLA_SUCCESS)
+ ql_dbg(ql_dbg_mbx, vha, 0x113d,
+ "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
+ else
+ ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x113e,
+ "Done %s.\n", __func__);
+
+ return rval;
+}
+
+int
+qla83xx_rd_reg(scsi_qla_host_t *vha, uint32_t reg, uint32_t *data)
+{
+ int rval;
+ mbx_cmd_t mc;
+ mbx_cmd_t *mcp = &mc;
+ struct qla_hw_data *ha = vha->hw;
+ unsigned long retry_max_time = jiffies + (2 * HZ);
+
+ if (!IS_QLA83XX(ha) && !IS_QLA27XX(ha))
+ return QLA_FUNCTION_FAILED;
+
+ ql_dbg(ql_dbg_mbx, vha, 0x114b, "Entered %s.\n", __func__);
+
+retry_rd_reg:
+ mcp->mb[0] = MBC_READ_REMOTE_REG;
+ mcp->mb[1] = LSW(reg);
+ mcp->mb[2] = MSW(reg);
+ mcp->out_mb = MBX_2|MBX_1|MBX_0;
+ mcp->in_mb = MBX_4|MBX_3|MBX_1|MBX_0;
+ mcp->tov = MBX_TOV_SECONDS;
+ mcp->flags = 0;
+ rval = qla2x00_mailbox_command(vha, mcp);
+
+ if (rval != QLA_SUCCESS) {
+ ql_dbg(ql_dbg_mbx, vha, 0x114c,
+ "Failed=%x mb[0]=%x mb[1]=%x.\n",
+ rval, mcp->mb[0], mcp->mb[1]);
+ } else {
+ *data = (mcp->mb[3] | (mcp->mb[4] << 16));
+ if (*data == QLA8XXX_BAD_VALUE) {
+ /*
+ * During soft-reset CAMRAM register reads might
+ * return 0xbad0bad0. So retry for MAX of 2 sec
+ * while reading camram registers.
+ */
+ if (time_after(jiffies, retry_max_time)) {
+ ql_dbg(ql_dbg_mbx, vha, 0x1141,
+ "Failure to read CAMRAM register. "
+ "data=0x%x.\n", *data);
+ return QLA_FUNCTION_FAILED;
+ }
+ msleep(100);
+ goto retry_rd_reg;
+ }
+ ql_dbg(ql_dbg_mbx, vha, 0x1142, "Done %s.\n", __func__);
+ }
+
+ return rval;
+}
+
+int
+qla83xx_restart_nic_firmware(scsi_qla_host_t *vha)
+{
+ int rval;
+ mbx_cmd_t mc;
+ mbx_cmd_t *mcp = &mc;
+ struct qla_hw_data *ha = vha->hw;
+
+ if (!IS_QLA83XX(ha))
+ return QLA_FUNCTION_FAILED;
+
+ ql_dbg(ql_dbg_mbx, vha, 0x1143, "Entered %s.\n", __func__);
+
+ mcp->mb[0] = MBC_RESTART_NIC_FIRMWARE;
+ mcp->out_mb = MBX_0;
+ mcp->in_mb = MBX_1|MBX_0;
+ mcp->tov = MBX_TOV_SECONDS;
+ mcp->flags = 0;
+ rval = qla2x00_mailbox_command(vha, mcp);
+
+ if (rval != QLA_SUCCESS) {
+ ql_dbg(ql_dbg_mbx, vha, 0x1144,
+ "Failed=%x mb[0]=%x mb[1]=%x.\n",
+ rval, mcp->mb[0], mcp->mb[1]);
+ ha->isp_ops->fw_dump(vha, 0);
+ } else {
+ ql_dbg(ql_dbg_mbx, vha, 0x1145, "Done %s.\n", __func__);
+ }
+
+ return rval;
+}
+
+int
+qla83xx_access_control(scsi_qla_host_t *vha, uint16_t options,
+ uint32_t start_addr, uint32_t end_addr, uint16_t *sector_size)
+{
+ int rval;
+ mbx_cmd_t mc;
+ mbx_cmd_t *mcp = &mc;
+ uint8_t subcode = (uint8_t)options;
+ struct qla_hw_data *ha = vha->hw;
+
+ if (!IS_QLA8031(ha))
+ return QLA_FUNCTION_FAILED;
+
+ ql_dbg(ql_dbg_mbx, vha, 0x1146, "Entered %s.\n", __func__);
+
+ mcp->mb[0] = MBC_SET_ACCESS_CONTROL;
+ mcp->mb[1] = options;
+ mcp->out_mb = MBX_1|MBX_0;
+ if (subcode & BIT_2) {
+ mcp->mb[2] = LSW(start_addr);
+ mcp->mb[3] = MSW(start_addr);
+ mcp->mb[4] = LSW(end_addr);
+ mcp->mb[5] = MSW(end_addr);
+ mcp->out_mb |= MBX_5|MBX_4|MBX_3|MBX_2;
+ }
+ mcp->in_mb = MBX_2|MBX_1|MBX_0;
+ if (!(subcode & (BIT_2 | BIT_5)))
+ mcp->in_mb |= MBX_4|MBX_3;
+ mcp->tov = MBX_TOV_SECONDS;
+ mcp->flags = 0;
+ rval = qla2x00_mailbox_command(vha, mcp);
+
+ if (rval != QLA_SUCCESS) {
+ ql_dbg(ql_dbg_mbx, vha, 0x1147,
+ "Failed=%x mb[0]=%x mb[1]=%x mb[2]=%x mb[3]=%x mb[4]=%x.\n",
+ rval, mcp->mb[0], mcp->mb[1], mcp->mb[2], mcp->mb[3],
+ mcp->mb[4]);
+ ha->isp_ops->fw_dump(vha, 0);
+ } else {
+ if (subcode & BIT_5)
+ *sector_size = mcp->mb[1];
+ else if (subcode & (BIT_6 | BIT_7)) {
+ ql_dbg(ql_dbg_mbx, vha, 0x1148,
+ "Driver-lock id=%x%x", mcp->mb[4], mcp->mb[3]);
+ } else if (subcode & (BIT_3 | BIT_4)) {
+ ql_dbg(ql_dbg_mbx, vha, 0x1149,
+ "Flash-lock id=%x%x", mcp->mb[4], mcp->mb[3]);
+ }
+ ql_dbg(ql_dbg_mbx, vha, 0x114a, "Done %s.\n", __func__);
+ }
+
+ return rval;
+}
+
+int
+qla2x00_dump_mctp_data(scsi_qla_host_t *vha, dma_addr_t req_dma, uint32_t addr,
+ uint32_t size)
+{
+ int rval;
+ mbx_cmd_t mc;
+ mbx_cmd_t *mcp = &mc;
+
+ if (!IS_MCTP_CAPABLE(vha->hw))
+ return QLA_FUNCTION_FAILED;
+
+ ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x114f,
+ "Entered %s.\n", __func__);
+
+ mcp->mb[0] = MBC_DUMP_RISC_RAM_EXTENDED;
+ mcp->mb[1] = LSW(addr);
+ mcp->mb[2] = MSW(req_dma);
+ mcp->mb[3] = LSW(req_dma);
+ mcp->mb[4] = MSW(size);
+ mcp->mb[5] = LSW(size);
+ mcp->mb[6] = MSW(MSD(req_dma));
+ mcp->mb[7] = LSW(MSD(req_dma));
+ mcp->mb[8] = MSW(addr);
+ /* Setting RAM ID to valid */
+ mcp->mb[10] |= BIT_7;
+ /* For MCTP RAM ID is 0x40 */
+ mcp->mb[10] |= 0x40;
+
+ mcp->out_mb |= MBX_10|MBX_8|MBX_7|MBX_6|MBX_5|MBX_4|MBX_3|MBX_2|MBX_1|
+ MBX_0;
+
+ mcp->in_mb = MBX_0;
+ mcp->tov = MBX_TOV_SECONDS;
+ mcp->flags = 0;
+ rval = qla2x00_mailbox_command(vha, mcp);
+
+ if (rval != QLA_SUCCESS) {
+ ql_dbg(ql_dbg_mbx, vha, 0x114e,
+ "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
+ } else {
+ ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x114d,
+ "Done %s.\n", __func__);
+ }
+
+ return rval;
+}
diff --git a/drivers/scsi/qla2xxx/qla_mid.c b/drivers/scsi/qla2xxx/qla_mid.c
new file mode 100644
index 000000000..cc9419251
--- /dev/null
+++ b/drivers/scsi/qla2xxx/qla_mid.c
@@ -0,0 +1,859 @@
+/*
+ * QLogic Fibre Channel HBA Driver
+ * Copyright (c) 2003-2014 QLogic Corporation
+ *
+ * See LICENSE.qla2xxx for copyright and licensing details.
+ */
+#include "qla_def.h"
+#include "qla_gbl.h"
+#include "qla_target.h"
+
+#include <linux/moduleparam.h>
+#include <linux/vmalloc.h>
+#include <linux/slab.h>
+#include <linux/list.h>
+
+#include <scsi/scsi_tcq.h>
+#include <scsi/scsicam.h>
+#include <linux/delay.h>
+
+void
+qla2x00_vp_stop_timer(scsi_qla_host_t *vha)
+{
+ if (vha->vp_idx && vha->timer_active) {
+ del_timer_sync(&vha->timer);
+ vha->timer_active = 0;
+ }
+}
+
+static uint32_t
+qla24xx_allocate_vp_id(scsi_qla_host_t *vha)
+{
+ uint32_t vp_id;
+ struct qla_hw_data *ha = vha->hw;
+ unsigned long flags;
+
+ /* Find an empty slot and assign an vp_id */
+ mutex_lock(&ha->vport_lock);
+ vp_id = find_first_zero_bit(ha->vp_idx_map, ha->max_npiv_vports + 1);
+ if (vp_id > ha->max_npiv_vports) {
+ ql_dbg(ql_dbg_vport, vha, 0xa000,
+ "vp_id %d is bigger than max-supported %d.\n",
+ vp_id, ha->max_npiv_vports);
+ mutex_unlock(&ha->vport_lock);
+ return vp_id;
+ }
+
+ set_bit(vp_id, ha->vp_idx_map);
+ ha->num_vhosts++;
+ vha->vp_idx = vp_id;
+
+ spin_lock_irqsave(&ha->vport_slock, flags);
+ list_add_tail(&vha->list, &ha->vp_list);
+
+ qlt_update_vp_map(vha, SET_VP_IDX);
+
+ spin_unlock_irqrestore(&ha->vport_slock, flags);
+
+ mutex_unlock(&ha->vport_lock);
+ return vp_id;
+}
+
+void
+qla24xx_deallocate_vp_id(scsi_qla_host_t *vha)
+{
+ uint16_t vp_id;
+ struct qla_hw_data *ha = vha->hw;
+ unsigned long flags = 0;
+
+ mutex_lock(&ha->vport_lock);
+ /*
+ * Wait for all pending activities to finish before removing vport from
+ * the list.
+ * Lock needs to be held for safe removal from the list (it
+ * ensures no active vp_list traversal while the vport is removed
+ * from the queue)
+ */
+ spin_lock_irqsave(&ha->vport_slock, flags);
+ while (atomic_read(&vha->vref_count)) {
+ spin_unlock_irqrestore(&ha->vport_slock, flags);
+
+ msleep(500);
+
+ spin_lock_irqsave(&ha->vport_slock, flags);
+ }
+ list_del(&vha->list);
+ qlt_update_vp_map(vha, RESET_VP_IDX);
+ spin_unlock_irqrestore(&ha->vport_slock, flags);
+
+ vp_id = vha->vp_idx;
+ ha->num_vhosts--;
+ clear_bit(vp_id, ha->vp_idx_map);
+
+ mutex_unlock(&ha->vport_lock);
+}
+
+static scsi_qla_host_t *
+qla24xx_find_vhost_by_name(struct qla_hw_data *ha, uint8_t *port_name)
+{
+ scsi_qla_host_t *vha;
+ struct scsi_qla_host *tvha;
+ unsigned long flags;
+
+ spin_lock_irqsave(&ha->vport_slock, flags);
+ /* Locate matching device in database. */
+ list_for_each_entry_safe(vha, tvha, &ha->vp_list, list) {
+ if (!memcmp(port_name, vha->port_name, WWN_SIZE)) {
+ spin_unlock_irqrestore(&ha->vport_slock, flags);
+ return vha;
+ }
+ }
+ spin_unlock_irqrestore(&ha->vport_slock, flags);
+ return NULL;
+}
+
+/*
+ * qla2x00_mark_vp_devices_dead
+ * Updates fcport state when device goes offline.
+ *
+ * Input:
+ * ha = adapter block pointer.
+ * fcport = port structure pointer.
+ *
+ * Return:
+ * None.
+ *
+ * Context:
+ */
+static void
+qla2x00_mark_vp_devices_dead(scsi_qla_host_t *vha)
+{
+ /*
+ * !!! NOTE !!!
+ * This function, if called in contexts other than vp create, disable
+ * or delete, please make sure this is synchronized with the
+ * delete thread.
+ */
+ fc_port_t *fcport;
+
+ list_for_each_entry(fcport, &vha->vp_fcports, list) {
+ ql_dbg(ql_dbg_vport, vha, 0xa001,
+ "Marking port dead, loop_id=0x%04x : %x.\n",
+ fcport->loop_id, fcport->vha->vp_idx);
+
+ qla2x00_mark_device_lost(vha, fcport, 0, 0);
+ qla2x00_set_fcport_state(fcport, FCS_UNCONFIGURED);
+ }
+}
+
+int
+qla24xx_disable_vp(scsi_qla_host_t *vha)
+{
+ unsigned long flags;
+ int ret;
+
+ ret = qla24xx_control_vp(vha, VCE_COMMAND_DISABLE_VPS_LOGO_ALL);
+ atomic_set(&vha->loop_state, LOOP_DOWN);
+ atomic_set(&vha->loop_down_timer, LOOP_DOWN_TIME);
+
+ /* Remove port id from vp target map */
+ spin_lock_irqsave(&vha->hw->vport_slock, flags);
+ qlt_update_vp_map(vha, RESET_AL_PA);
+ spin_unlock_irqrestore(&vha->hw->vport_slock, flags);
+
+ qla2x00_mark_vp_devices_dead(vha);
+ atomic_set(&vha->vp_state, VP_FAILED);
+ vha->flags.management_server_logged_in = 0;
+ if (ret == QLA_SUCCESS) {
+ fc_vport_set_state(vha->fc_vport, FC_VPORT_DISABLED);
+ } else {
+ fc_vport_set_state(vha->fc_vport, FC_VPORT_FAILED);
+ return -1;
+ }
+ return 0;
+}
+
+int
+qla24xx_enable_vp(scsi_qla_host_t *vha)
+{
+ int ret;
+ struct qla_hw_data *ha = vha->hw;
+ scsi_qla_host_t *base_vha = pci_get_drvdata(ha->pdev);
+
+ /* Check if physical ha port is Up */
+ if (atomic_read(&base_vha->loop_state) == LOOP_DOWN ||
+ atomic_read(&base_vha->loop_state) == LOOP_DEAD ||
+ !(ha->current_topology & ISP_CFG_F)) {
+ vha->vp_err_state = VP_ERR_PORTDWN;
+ fc_vport_set_state(vha->fc_vport, FC_VPORT_LINKDOWN);
+ goto enable_failed;
+ }
+
+ /* Initialize the new vport unless it is a persistent port */
+ mutex_lock(&ha->vport_lock);
+ ret = qla24xx_modify_vp_config(vha);
+ mutex_unlock(&ha->vport_lock);
+
+ if (ret != QLA_SUCCESS) {
+ fc_vport_set_state(vha->fc_vport, FC_VPORT_FAILED);
+ goto enable_failed;
+ }
+
+ ql_dbg(ql_dbg_taskm, vha, 0x801a,
+ "Virtual port with id: %d - Enabled.\n", vha->vp_idx);
+ return 0;
+
+enable_failed:
+ ql_dbg(ql_dbg_taskm, vha, 0x801b,
+ "Virtual port with id: %d - Disabled.\n", vha->vp_idx);
+ return 1;
+}
+
+static void
+qla24xx_configure_vp(scsi_qla_host_t *vha)
+{
+ struct fc_vport *fc_vport;
+ int ret;
+
+ fc_vport = vha->fc_vport;
+
+ ql_dbg(ql_dbg_vport, vha, 0xa002,
+ "%s: change request #3.\n", __func__);
+ ret = qla2x00_send_change_request(vha, 0x3, vha->vp_idx);
+ if (ret != QLA_SUCCESS) {
+ ql_dbg(ql_dbg_vport, vha, 0xa003, "Failed to enable "
+ "receiving of RSCN requests: 0x%x.\n", ret);
+ return;
+ } else {
+ /* Corresponds to SCR enabled */
+ clear_bit(VP_SCR_NEEDED, &vha->vp_flags);
+ }
+
+ vha->flags.online = 1;
+ if (qla24xx_configure_vhba(vha))
+ return;
+
+ atomic_set(&vha->vp_state, VP_ACTIVE);
+ fc_vport_set_state(fc_vport, FC_VPORT_ACTIVE);
+}
+
+void
+qla2x00_alert_all_vps(struct rsp_que *rsp, uint16_t *mb)
+{
+ scsi_qla_host_t *vha;
+ struct qla_hw_data *ha = rsp->hw;
+ int i = 0;
+ unsigned long flags;
+
+ spin_lock_irqsave(&ha->vport_slock, flags);
+ list_for_each_entry(vha, &ha->vp_list, list) {
+ if (vha->vp_idx) {
+ atomic_inc(&vha->vref_count);
+ spin_unlock_irqrestore(&ha->vport_slock, flags);
+
+ switch (mb[0]) {
+ case MBA_LIP_OCCURRED:
+ case MBA_LOOP_UP:
+ case MBA_LOOP_DOWN:
+ case MBA_LIP_RESET:
+ case MBA_POINT_TO_POINT:
+ case MBA_CHG_IN_CONNECTION:
+ case MBA_PORT_UPDATE:
+ case MBA_RSCN_UPDATE:
+ ql_dbg(ql_dbg_async, vha, 0x5024,
+ "Async_event for VP[%d], mb=0x%x vha=%p.\n",
+ i, *mb, vha);
+ qla2x00_async_event(vha, rsp, mb);
+ break;
+ }
+
+ spin_lock_irqsave(&ha->vport_slock, flags);
+ atomic_dec(&vha->vref_count);
+ }
+ i++;
+ }
+ spin_unlock_irqrestore(&ha->vport_slock, flags);
+}
+
+int
+qla2x00_vp_abort_isp(scsi_qla_host_t *vha)
+{
+ /*
+ * Physical port will do most of the abort and recovery work. We can
+ * just treat it as a loop down
+ */
+ if (atomic_read(&vha->loop_state) != LOOP_DOWN) {
+ atomic_set(&vha->loop_state, LOOP_DOWN);
+ qla2x00_mark_all_devices_lost(vha, 0);
+ } else {
+ if (!atomic_read(&vha->loop_down_timer))
+ atomic_set(&vha->loop_down_timer, LOOP_DOWN_TIME);
+ }
+
+ /*
+ * To exclusively reset vport, we need to log it out first. Note: this
+ * control_vp can fail if ISP reset is already issued, this is
+ * expected, as the vp would be already logged out due to ISP reset.
+ */
+ if (!test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags))
+ qla24xx_control_vp(vha, VCE_COMMAND_DISABLE_VPS_LOGO_ALL);
+
+ ql_dbg(ql_dbg_taskm, vha, 0x801d,
+ "Scheduling enable of Vport %d.\n", vha->vp_idx);
+ return qla24xx_enable_vp(vha);
+}
+
+static int
+qla2x00_do_dpc_vp(scsi_qla_host_t *vha)
+{
+ struct qla_hw_data *ha = vha->hw;
+ scsi_qla_host_t *base_vha = pci_get_drvdata(ha->pdev);
+
+ ql_dbg(ql_dbg_dpc + ql_dbg_verbose, vha, 0x4012,
+ "Entering %s vp_flags: 0x%lx.\n", __func__, vha->vp_flags);
+
+ qla2x00_do_work(vha);
+
+ /* Check if Fw is ready to configure VP first */
+ if (test_bit(VP_CONFIG_OK, &base_vha->vp_flags)) {
+ if (test_and_clear_bit(VP_IDX_ACQUIRED, &vha->vp_flags)) {
+ /* VP acquired. complete port configuration */
+ ql_dbg(ql_dbg_dpc, vha, 0x4014,
+ "Configure VP scheduled.\n");
+ qla24xx_configure_vp(vha);
+ ql_dbg(ql_dbg_dpc, vha, 0x4015,
+ "Configure VP end.\n");
+ return 0;
+ }
+ }
+
+ if (test_bit(FCPORT_UPDATE_NEEDED, &vha->dpc_flags)) {
+ ql_dbg(ql_dbg_dpc, vha, 0x4016,
+ "FCPort update scheduled.\n");
+ qla2x00_update_fcports(vha);
+ clear_bit(FCPORT_UPDATE_NEEDED, &vha->dpc_flags);
+ ql_dbg(ql_dbg_dpc, vha, 0x4017,
+ "FCPort update end.\n");
+ }
+
+ if ((test_and_clear_bit(RELOGIN_NEEDED, &vha->dpc_flags)) &&
+ !test_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags) &&
+ atomic_read(&vha->loop_state) != LOOP_DOWN) {
+
+ ql_dbg(ql_dbg_dpc, vha, 0x4018,
+ "Relogin needed scheduled.\n");
+ qla2x00_relogin(vha);
+ ql_dbg(ql_dbg_dpc, vha, 0x4019,
+ "Relogin needed end.\n");
+ }
+
+ if (test_and_clear_bit(RESET_MARKER_NEEDED, &vha->dpc_flags) &&
+ (!(test_and_set_bit(RESET_ACTIVE, &vha->dpc_flags)))) {
+ clear_bit(RESET_ACTIVE, &vha->dpc_flags);
+ }
+
+ if (test_and_clear_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags)) {
+ if (!(test_and_set_bit(LOOP_RESYNC_ACTIVE, &vha->dpc_flags))) {
+ ql_dbg(ql_dbg_dpc, vha, 0x401a,
+ "Loop resync scheduled.\n");
+ qla2x00_loop_resync(vha);
+ clear_bit(LOOP_RESYNC_ACTIVE, &vha->dpc_flags);
+ ql_dbg(ql_dbg_dpc, vha, 0x401b,
+ "Loop resync end.\n");
+ }
+ }
+
+ ql_dbg(ql_dbg_dpc + ql_dbg_verbose, vha, 0x401c,
+ "Exiting %s.\n", __func__);
+ return 0;
+}
+
+void
+qla2x00_do_dpc_all_vps(scsi_qla_host_t *vha)
+{
+ int ret;
+ struct qla_hw_data *ha = vha->hw;
+ scsi_qla_host_t *vp;
+ unsigned long flags = 0;
+
+ if (vha->vp_idx)
+ return;
+ if (list_empty(&ha->vp_list))
+ return;
+
+ clear_bit(VP_DPC_NEEDED, &vha->dpc_flags);
+
+ if (!(ha->current_topology & ISP_CFG_F))
+ return;
+
+ spin_lock_irqsave(&ha->vport_slock, flags);
+ list_for_each_entry(vp, &ha->vp_list, list) {
+ if (vp->vp_idx) {
+ atomic_inc(&vp->vref_count);
+ spin_unlock_irqrestore(&ha->vport_slock, flags);
+
+ ret = qla2x00_do_dpc_vp(vp);
+
+ spin_lock_irqsave(&ha->vport_slock, flags);
+ atomic_dec(&vp->vref_count);
+ }
+ }
+ spin_unlock_irqrestore(&ha->vport_slock, flags);
+}
+
+int
+qla24xx_vport_create_req_sanity_check(struct fc_vport *fc_vport)
+{
+ scsi_qla_host_t *base_vha = shost_priv(fc_vport->shost);
+ struct qla_hw_data *ha = base_vha->hw;
+ scsi_qla_host_t *vha;
+ uint8_t port_name[WWN_SIZE];
+
+ if (fc_vport->roles != FC_PORT_ROLE_FCP_INITIATOR)
+ return VPCERR_UNSUPPORTED;
+
+ /* Check up the F/W and H/W support NPIV */
+ if (!ha->flags.npiv_supported)
+ return VPCERR_UNSUPPORTED;
+
+ /* Check up whether npiv supported switch presented */
+ if (!(ha->switch_cap & FLOGI_MID_SUPPORT))
+ return VPCERR_NO_FABRIC_SUPP;
+
+ /* Check up unique WWPN */
+ u64_to_wwn(fc_vport->port_name, port_name);
+ if (!memcmp(port_name, base_vha->port_name, WWN_SIZE))
+ return VPCERR_BAD_WWN;
+ vha = qla24xx_find_vhost_by_name(ha, port_name);
+ if (vha)
+ return VPCERR_BAD_WWN;
+
+ /* Check up max-npiv-supports */
+ if (ha->num_vhosts > ha->max_npiv_vports) {
+ ql_dbg(ql_dbg_vport, vha, 0xa004,
+ "num_vhosts %ud is bigger "
+ "than max_npiv_vports %ud.\n",
+ ha->num_vhosts, ha->max_npiv_vports);
+ return VPCERR_UNSUPPORTED;
+ }
+ return 0;
+}
+
+scsi_qla_host_t *
+qla24xx_create_vhost(struct fc_vport *fc_vport)
+{
+ scsi_qla_host_t *base_vha = shost_priv(fc_vport->shost);
+ struct qla_hw_data *ha = base_vha->hw;
+ scsi_qla_host_t *vha;
+ struct scsi_host_template *sht = &qla2xxx_driver_template;
+ struct Scsi_Host *host;
+
+ vha = qla2x00_create_host(sht, ha);
+ if (!vha) {
+ ql_log(ql_log_warn, vha, 0xa005,
+ "scsi_host_alloc() failed for vport.\n");
+ return(NULL);
+ }
+
+ host = vha->host;
+ fc_vport->dd_data = vha;
+ /* New host info */
+ u64_to_wwn(fc_vport->node_name, vha->node_name);
+ u64_to_wwn(fc_vport->port_name, vha->port_name);
+
+ vha->fc_vport = fc_vport;
+ vha->device_flags = 0;
+ vha->vp_idx = qla24xx_allocate_vp_id(vha);
+ if (vha->vp_idx > ha->max_npiv_vports) {
+ ql_dbg(ql_dbg_vport, vha, 0xa006,
+ "Couldn't allocate vp_id.\n");
+ goto create_vhost_failed;
+ }
+ vha->mgmt_svr_loop_id = 10 + vha->vp_idx;
+
+ vha->dpc_flags = 0L;
+
+ /*
+ * To fix the issue of processing a parent's RSCN for the vport before
+ * its SCR is complete.
+ */
+ set_bit(VP_SCR_NEEDED, &vha->vp_flags);
+ atomic_set(&vha->loop_state, LOOP_DOWN);
+ atomic_set(&vha->loop_down_timer, LOOP_DOWN_TIME);
+
+ qla2x00_start_timer(vha, qla2x00_timer, WATCH_INTERVAL);
+
+ vha->req = base_vha->req;
+ host->can_queue = base_vha->req->length + 128;
+ host->cmd_per_lun = 3;
+ if (IS_T10_PI_CAPABLE(ha) && ql2xenabledif)
+ host->max_cmd_len = 32;
+ else
+ host->max_cmd_len = MAX_CMDSZ;
+ host->max_channel = MAX_BUSES - 1;
+ host->max_lun = ql2xmaxlun;
+ host->unique_id = host->host_no;
+ host->max_id = ha->max_fibre_devices;
+ host->transportt = qla2xxx_transport_vport_template;
+
+ ql_dbg(ql_dbg_vport, vha, 0xa007,
+ "Detect vport hba %ld at address = %p.\n",
+ vha->host_no, vha);
+
+ vha->flags.init_done = 1;
+
+ mutex_lock(&ha->vport_lock);
+ set_bit(vha->vp_idx, ha->vp_idx_map);
+ ha->cur_vport_count++;
+ mutex_unlock(&ha->vport_lock);
+
+ return vha;
+
+create_vhost_failed:
+ return NULL;
+}
+
+static void
+qla25xx_free_req_que(struct scsi_qla_host *vha, struct req_que *req)
+{
+ struct qla_hw_data *ha = vha->hw;
+ uint16_t que_id = req->id;
+
+ dma_free_coherent(&ha->pdev->dev, (req->length + 1) *
+ sizeof(request_t), req->ring, req->dma);
+ req->ring = NULL;
+ req->dma = 0;
+ if (que_id) {
+ ha->req_q_map[que_id] = NULL;
+ mutex_lock(&ha->vport_lock);
+ clear_bit(que_id, ha->req_qid_map);
+ mutex_unlock(&ha->vport_lock);
+ }
+ kfree(req->outstanding_cmds);
+ kfree(req);
+ req = NULL;
+}
+
+static void
+qla25xx_free_rsp_que(struct scsi_qla_host *vha, struct rsp_que *rsp)
+{
+ struct qla_hw_data *ha = vha->hw;
+ uint16_t que_id = rsp->id;
+
+ if (rsp->msix && rsp->msix->have_irq) {
+ free_irq(rsp->msix->vector, rsp);
+ rsp->msix->have_irq = 0;
+ rsp->msix->rsp = NULL;
+ }
+ dma_free_coherent(&ha->pdev->dev, (rsp->length + 1) *
+ sizeof(response_t), rsp->ring, rsp->dma);
+ rsp->ring = NULL;
+ rsp->dma = 0;
+ if (que_id) {
+ ha->rsp_q_map[que_id] = NULL;
+ mutex_lock(&ha->vport_lock);
+ clear_bit(que_id, ha->rsp_qid_map);
+ mutex_unlock(&ha->vport_lock);
+ }
+ kfree(rsp);
+ rsp = NULL;
+}
+
+int
+qla25xx_delete_req_que(struct scsi_qla_host *vha, struct req_que *req)
+{
+ int ret = -1;
+
+ if (req) {
+ req->options |= BIT_0;
+ ret = qla25xx_init_req_que(vha, req);
+ }
+ if (ret == QLA_SUCCESS)
+ qla25xx_free_req_que(vha, req);
+
+ return ret;
+}
+
+static int
+qla25xx_delete_rsp_que(struct scsi_qla_host *vha, struct rsp_que *rsp)
+{
+ int ret = -1;
+
+ if (rsp) {
+ rsp->options |= BIT_0;
+ ret = qla25xx_init_rsp_que(vha, rsp);
+ }
+ if (ret == QLA_SUCCESS)
+ qla25xx_free_rsp_que(vha, rsp);
+
+ return ret;
+}
+
+/* Delete all queues for a given vhost */
+int
+qla25xx_delete_queues(struct scsi_qla_host *vha)
+{
+ int cnt, ret = 0;
+ struct req_que *req = NULL;
+ struct rsp_que *rsp = NULL;
+ struct qla_hw_data *ha = vha->hw;
+
+ /* Delete request queues */
+ for (cnt = 1; cnt < ha->max_req_queues; cnt++) {
+ req = ha->req_q_map[cnt];
+ if (req) {
+ ret = qla25xx_delete_req_que(vha, req);
+ if (ret != QLA_SUCCESS) {
+ ql_log(ql_log_warn, vha, 0x00ea,
+ "Couldn't delete req que %d.\n",
+ req->id);
+ return ret;
+ }
+ }
+ }
+
+ /* Delete response queues */
+ for (cnt = 1; cnt < ha->max_rsp_queues; cnt++) {
+ rsp = ha->rsp_q_map[cnt];
+ if (rsp) {
+ ret = qla25xx_delete_rsp_que(vha, rsp);
+ if (ret != QLA_SUCCESS) {
+ ql_log(ql_log_warn, vha, 0x00eb,
+ "Couldn't delete rsp que %d.\n",
+ rsp->id);
+ return ret;
+ }
+ }
+ }
+ return ret;
+}
+
+int
+qla25xx_create_req_que(struct qla_hw_data *ha, uint16_t options,
+ uint8_t vp_idx, uint16_t rid, int rsp_que, uint8_t qos)
+{
+ int ret = 0;
+ struct req_que *req = NULL;
+ struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev);
+ uint16_t que_id = 0;
+ device_reg_t *reg;
+ uint32_t cnt;
+
+ req = kzalloc(sizeof(struct req_que), GFP_KERNEL);
+ if (req == NULL) {
+ ql_log(ql_log_fatal, base_vha, 0x00d9,
+ "Failed to allocate memory for request queue.\n");
+ goto failed;
+ }
+
+ req->length = REQUEST_ENTRY_CNT_24XX;
+ req->ring = dma_alloc_coherent(&ha->pdev->dev,
+ (req->length + 1) * sizeof(request_t),
+ &req->dma, GFP_KERNEL);
+ if (req->ring == NULL) {
+ ql_log(ql_log_fatal, base_vha, 0x00da,
+ "Failed to allocate memory for request_ring.\n");
+ goto que_failed;
+ }
+
+ ret = qla2x00_alloc_outstanding_cmds(ha, req);
+ if (ret != QLA_SUCCESS)
+ goto que_failed;
+
+ mutex_lock(&ha->vport_lock);
+ que_id = find_first_zero_bit(ha->req_qid_map, ha->max_req_queues);
+ if (que_id >= ha->max_req_queues) {
+ mutex_unlock(&ha->vport_lock);
+ ql_log(ql_log_warn, base_vha, 0x00db,
+ "No resources to create additional request queue.\n");
+ goto que_failed;
+ }
+ set_bit(que_id, ha->req_qid_map);
+ ha->req_q_map[que_id] = req;
+ req->rid = rid;
+ req->vp_idx = vp_idx;
+ req->qos = qos;
+
+ ql_dbg(ql_dbg_multiq, base_vha, 0xc002,
+ "queue_id=%d rid=%d vp_idx=%d qos=%d.\n",
+ que_id, req->rid, req->vp_idx, req->qos);
+ ql_dbg(ql_dbg_init, base_vha, 0x00dc,
+ "queue_id=%d rid=%d vp_idx=%d qos=%d.\n",
+ que_id, req->rid, req->vp_idx, req->qos);
+ if (rsp_que < 0)
+ req->rsp = NULL;
+ else
+ req->rsp = ha->rsp_q_map[rsp_que];
+ /* Use alternate PCI bus number */
+ if (MSB(req->rid))
+ options |= BIT_4;
+ /* Use alternate PCI devfn */
+ if (LSB(req->rid))
+ options |= BIT_5;
+ req->options = options;
+
+ ql_dbg(ql_dbg_multiq, base_vha, 0xc003,
+ "options=0x%x.\n", req->options);
+ ql_dbg(ql_dbg_init, base_vha, 0x00dd,
+ "options=0x%x.\n", req->options);
+ for (cnt = 1; cnt < req->num_outstanding_cmds; cnt++)
+ req->outstanding_cmds[cnt] = NULL;
+ req->current_outstanding_cmd = 1;
+
+ req->ring_ptr = req->ring;
+ req->ring_index = 0;
+ req->cnt = req->length;
+ req->id = que_id;
+ reg = ISP_QUE_REG(ha, que_id);
+ req->req_q_in = &reg->isp25mq.req_q_in;
+ req->req_q_out = &reg->isp25mq.req_q_out;
+ req->max_q_depth = ha->req_q_map[0]->max_q_depth;
+ req->out_ptr = (void *)(req->ring + req->length);
+ mutex_unlock(&ha->vport_lock);
+ ql_dbg(ql_dbg_multiq, base_vha, 0xc004,
+ "ring_ptr=%p ring_index=%d, "
+ "cnt=%d id=%d max_q_depth=%d.\n",
+ req->ring_ptr, req->ring_index,
+ req->cnt, req->id, req->max_q_depth);
+ ql_dbg(ql_dbg_init, base_vha, 0x00de,
+ "ring_ptr=%p ring_index=%d, "
+ "cnt=%d id=%d max_q_depth=%d.\n",
+ req->ring_ptr, req->ring_index, req->cnt,
+ req->id, req->max_q_depth);
+
+ ret = qla25xx_init_req_que(base_vha, req);
+ if (ret != QLA_SUCCESS) {
+ ql_log(ql_log_fatal, base_vha, 0x00df,
+ "%s failed.\n", __func__);
+ mutex_lock(&ha->vport_lock);
+ clear_bit(que_id, ha->req_qid_map);
+ mutex_unlock(&ha->vport_lock);
+ goto que_failed;
+ }
+
+ return req->id;
+
+que_failed:
+ qla25xx_free_req_que(base_vha, req);
+failed:
+ return 0;
+}
+
+static void qla_do_work(struct work_struct *work)
+{
+ unsigned long flags;
+ struct rsp_que *rsp = container_of(work, struct rsp_que, q_work);
+ struct scsi_qla_host *vha;
+ struct qla_hw_data *ha = rsp->hw;
+
+ spin_lock_irqsave(&rsp->hw->hardware_lock, flags);
+ vha = pci_get_drvdata(ha->pdev);
+ qla24xx_process_response_queue(vha, rsp);
+ spin_unlock_irqrestore(&rsp->hw->hardware_lock, flags);
+}
+
+/* create response queue */
+int
+qla25xx_create_rsp_que(struct qla_hw_data *ha, uint16_t options,
+ uint8_t vp_idx, uint16_t rid, int req)
+{
+ int ret = 0;
+ struct rsp_que *rsp = NULL;
+ struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev);
+ uint16_t que_id = 0;
+ device_reg_t *reg;
+
+ rsp = kzalloc(sizeof(struct rsp_que), GFP_KERNEL);
+ if (rsp == NULL) {
+ ql_log(ql_log_warn, base_vha, 0x0066,
+ "Failed to allocate memory for response queue.\n");
+ goto failed;
+ }
+
+ rsp->length = RESPONSE_ENTRY_CNT_MQ;
+ rsp->ring = dma_alloc_coherent(&ha->pdev->dev,
+ (rsp->length + 1) * sizeof(response_t),
+ &rsp->dma, GFP_KERNEL);
+ if (rsp->ring == NULL) {
+ ql_log(ql_log_warn, base_vha, 0x00e1,
+ "Failed to allocate memory for response ring.\n");
+ goto que_failed;
+ }
+
+ mutex_lock(&ha->vport_lock);
+ que_id = find_first_zero_bit(ha->rsp_qid_map, ha->max_rsp_queues);
+ if (que_id >= ha->max_rsp_queues) {
+ mutex_unlock(&ha->vport_lock);
+ ql_log(ql_log_warn, base_vha, 0x00e2,
+ "No resources to create additional request queue.\n");
+ goto que_failed;
+ }
+ set_bit(que_id, ha->rsp_qid_map);
+
+ if (ha->flags.msix_enabled)
+ rsp->msix = &ha->msix_entries[que_id + 1];
+ else
+ ql_log(ql_log_warn, base_vha, 0x00e3,
+ "MSIX not enabled.\n");
+
+ ha->rsp_q_map[que_id] = rsp;
+ rsp->rid = rid;
+ rsp->vp_idx = vp_idx;
+ rsp->hw = ha;
+ ql_dbg(ql_dbg_init, base_vha, 0x00e4,
+ "queue_id=%d rid=%d vp_idx=%d hw=%p.\n",
+ que_id, rsp->rid, rsp->vp_idx, rsp->hw);
+ /* Use alternate PCI bus number */
+ if (MSB(rsp->rid))
+ options |= BIT_4;
+ /* Use alternate PCI devfn */
+ if (LSB(rsp->rid))
+ options |= BIT_5;
+ /* Enable MSIX handshake mode on for uncapable adapters */
+ if (!IS_MSIX_NACK_CAPABLE(ha))
+ options |= BIT_6;
+
+ rsp->options = options;
+ rsp->id = que_id;
+ reg = ISP_QUE_REG(ha, que_id);
+ rsp->rsp_q_in = &reg->isp25mq.rsp_q_in;
+ rsp->rsp_q_out = &reg->isp25mq.rsp_q_out;
+ rsp->in_ptr = (void *)(rsp->ring + rsp->length);
+ mutex_unlock(&ha->vport_lock);
+ ql_dbg(ql_dbg_multiq, base_vha, 0xc00b,
+ "options=%x id=%d rsp_q_in=%p rsp_q_out=%p",
+ rsp->options, rsp->id, rsp->rsp_q_in,
+ rsp->rsp_q_out);
+ ql_dbg(ql_dbg_init, base_vha, 0x00e5,
+ "options=%x id=%d rsp_q_in=%p rsp_q_out=%p",
+ rsp->options, rsp->id, rsp->rsp_q_in,
+ rsp->rsp_q_out);
+
+ ret = qla25xx_request_irq(rsp);
+ if (ret)
+ goto que_failed;
+
+ ret = qla25xx_init_rsp_que(base_vha, rsp);
+ if (ret != QLA_SUCCESS) {
+ ql_log(ql_log_fatal, base_vha, 0x00e7,
+ "%s failed.\n", __func__);
+ mutex_lock(&ha->vport_lock);
+ clear_bit(que_id, ha->rsp_qid_map);
+ mutex_unlock(&ha->vport_lock);
+ goto que_failed;
+ }
+ if (req >= 0)
+ rsp->req = ha->req_q_map[req];
+ else
+ rsp->req = NULL;
+
+ qla2x00_init_response_q_entries(rsp);
+ if (rsp->hw->wq)
+ INIT_WORK(&rsp->q_work, qla_do_work);
+ return rsp->id;
+
+que_failed:
+ qla25xx_free_rsp_que(base_vha, rsp);
+failed:
+ return 0;
+}
diff --git a/drivers/scsi/qla2xxx/qla_mr.c b/drivers/scsi/qla2xxx/qla_mr.c
new file mode 100644
index 000000000..6d190b4b8
--- /dev/null
+++ b/drivers/scsi/qla2xxx/qla_mr.c
@@ -0,0 +1,3461 @@
+/*
+ * QLogic Fibre Channel HBA Driver
+ * Copyright (c) 2003-2014 QLogic Corporation
+ *
+ * See LICENSE.qla2xxx for copyright and licensing details.
+ */
+#include "qla_def.h"
+#include <linux/delay.h>
+#include <linux/pci.h>
+#include <linux/ratelimit.h>
+#include <linux/vmalloc.h>
+#include <scsi/scsi_tcq.h>
+#include <linux/utsname.h>
+
+
+/* QLAFX00 specific Mailbox implementation functions */
+
+/*
+ * qlafx00_mailbox_command
+ * Issue mailbox command and waits for completion.
+ *
+ * Input:
+ * ha = adapter block pointer.
+ * mcp = driver internal mbx struct pointer.
+ *
+ * Output:
+ * mb[MAX_MAILBOX_REGISTER_COUNT] = returned mailbox data.
+ *
+ * Returns:
+ * 0 : QLA_SUCCESS = cmd performed success
+ * 1 : QLA_FUNCTION_FAILED (error encountered)
+ * 6 : QLA_FUNCTION_TIMEOUT (timeout condition encountered)
+ *
+ * Context:
+ * Kernel context.
+ */
+static int
+qlafx00_mailbox_command(scsi_qla_host_t *vha, struct mbx_cmd_32 *mcp)
+
+{
+ int rval;
+ unsigned long flags = 0;
+ device_reg_t *reg;
+ uint8_t abort_active;
+ uint8_t io_lock_on;
+ uint16_t command = 0;
+ uint32_t *iptr;
+ uint32_t __iomem *optr;
+ uint32_t cnt;
+ uint32_t mboxes;
+ unsigned long wait_time;
+ struct qla_hw_data *ha = vha->hw;
+ scsi_qla_host_t *base_vha = pci_get_drvdata(ha->pdev);
+
+ if (ha->pdev->error_state > pci_channel_io_frozen) {
+ ql_log(ql_log_warn, vha, 0x115c,
+ "error_state is greater than pci_channel_io_frozen, "
+ "exiting.\n");
+ return QLA_FUNCTION_TIMEOUT;
+ }
+
+ if (vha->device_flags & DFLG_DEV_FAILED) {
+ ql_log(ql_log_warn, vha, 0x115f,
+ "Device in failed state, exiting.\n");
+ return QLA_FUNCTION_TIMEOUT;
+ }
+
+ reg = ha->iobase;
+ io_lock_on = base_vha->flags.init_done;
+
+ rval = QLA_SUCCESS;
+ abort_active = test_bit(ABORT_ISP_ACTIVE, &base_vha->dpc_flags);
+
+ if (ha->flags.pci_channel_io_perm_failure) {
+ ql_log(ql_log_warn, vha, 0x1175,
+ "Perm failure on EEH timeout MBX, exiting.\n");
+ return QLA_FUNCTION_TIMEOUT;
+ }
+
+ if (ha->flags.isp82xx_fw_hung) {
+ /* Setting Link-Down error */
+ mcp->mb[0] = MBS_LINK_DOWN_ERROR;
+ ql_log(ql_log_warn, vha, 0x1176,
+ "FW hung = %d.\n", ha->flags.isp82xx_fw_hung);
+ rval = QLA_FUNCTION_FAILED;
+ goto premature_exit;
+ }
+
+ /*
+ * Wait for active mailbox commands to finish by waiting at most tov
+ * seconds. This is to serialize actual issuing of mailbox cmds during
+ * non ISP abort time.
+ */
+ if (!wait_for_completion_timeout(&ha->mbx_cmd_comp, mcp->tov * HZ)) {
+ /* Timeout occurred. Return error. */
+ ql_log(ql_log_warn, vha, 0x1177,
+ "Cmd access timeout, cmd=0x%x, Exiting.\n",
+ mcp->mb[0]);
+ return QLA_FUNCTION_TIMEOUT;
+ }
+
+ ha->flags.mbox_busy = 1;
+ /* Save mailbox command for debug */
+ ha->mcp32 = mcp;
+
+ ql_dbg(ql_dbg_mbx, vha, 0x1178,
+ "Prepare to issue mbox cmd=0x%x.\n", mcp->mb[0]);
+
+ spin_lock_irqsave(&ha->hardware_lock, flags);
+
+ /* Load mailbox registers. */
+ optr = (uint32_t __iomem *)&reg->ispfx00.mailbox0;
+
+ iptr = mcp->mb;
+ command = mcp->mb[0];
+ mboxes = mcp->out_mb;
+
+ for (cnt = 0; cnt < ha->mbx_count; cnt++) {
+ if (mboxes & BIT_0)
+ WRT_REG_DWORD(optr, *iptr);
+
+ mboxes >>= 1;
+ optr++;
+ iptr++;
+ }
+
+ /* Issue set host interrupt command to send cmd out. */
+ ha->flags.mbox_int = 0;
+ clear_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags);
+
+ ql_dump_buffer(ql_dbg_mbx + ql_dbg_buffer, vha, 0x1172,
+ (uint8_t *)mcp->mb, 16);
+ ql_dump_buffer(ql_dbg_mbx + ql_dbg_buffer, vha, 0x1173,
+ ((uint8_t *)mcp->mb + 0x10), 16);
+ ql_dump_buffer(ql_dbg_mbx + ql_dbg_buffer, vha, 0x1174,
+ ((uint8_t *)mcp->mb + 0x20), 8);
+
+ /* Unlock mbx registers and wait for interrupt */
+ ql_dbg(ql_dbg_mbx, vha, 0x1179,
+ "Going to unlock irq & waiting for interrupts. "
+ "jiffies=%lx.\n", jiffies);
+
+ /* Wait for mbx cmd completion until timeout */
+ if ((!abort_active && io_lock_on) || IS_NOPOLLING_TYPE(ha)) {
+ set_bit(MBX_INTR_WAIT, &ha->mbx_cmd_flags);
+
+ QLAFX00_SET_HST_INTR(ha, ha->mbx_intr_code);
+ spin_unlock_irqrestore(&ha->hardware_lock, flags);
+
+ wait_for_completion_timeout(&ha->mbx_intr_comp, mcp->tov * HZ);
+ } else {
+ ql_dbg(ql_dbg_mbx, vha, 0x112c,
+ "Cmd=%x Polling Mode.\n", command);
+
+ QLAFX00_SET_HST_INTR(ha, ha->mbx_intr_code);
+ spin_unlock_irqrestore(&ha->hardware_lock, flags);
+
+ wait_time = jiffies + mcp->tov * HZ; /* wait at most tov secs */
+ while (!ha->flags.mbox_int) {
+ if (time_after(jiffies, wait_time))
+ break;
+
+ /* Check for pending interrupts. */
+ qla2x00_poll(ha->rsp_q_map[0]);
+
+ if (!ha->flags.mbox_int &&
+ !(IS_QLA2200(ha) &&
+ command == MBC_LOAD_RISC_RAM_EXTENDED))
+ usleep_range(10000, 11000);
+ } /* while */
+ ql_dbg(ql_dbg_mbx, vha, 0x112d,
+ "Waited %d sec.\n",
+ (uint)((jiffies - (wait_time - (mcp->tov * HZ)))/HZ));
+ }
+
+ /* Check whether we timed out */
+ if (ha->flags.mbox_int) {
+ uint32_t *iptr2;
+
+ ql_dbg(ql_dbg_mbx, vha, 0x112e,
+ "Cmd=%x completed.\n", command);
+
+ /* Got interrupt. Clear the flag. */
+ ha->flags.mbox_int = 0;
+ clear_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags);
+
+ if (ha->mailbox_out32[0] != MBS_COMMAND_COMPLETE)
+ rval = QLA_FUNCTION_FAILED;
+
+ /* Load return mailbox registers. */
+ iptr2 = mcp->mb;
+ iptr = (uint32_t *)&ha->mailbox_out32[0];
+ mboxes = mcp->in_mb;
+ for (cnt = 0; cnt < ha->mbx_count; cnt++) {
+ if (mboxes & BIT_0)
+ *iptr2 = *iptr;
+
+ mboxes >>= 1;
+ iptr2++;
+ iptr++;
+ }
+ } else {
+
+ rval = QLA_FUNCTION_TIMEOUT;
+ }
+
+ ha->flags.mbox_busy = 0;
+
+ /* Clean up */
+ ha->mcp32 = NULL;
+
+ if ((abort_active || !io_lock_on) && !IS_NOPOLLING_TYPE(ha)) {
+ ql_dbg(ql_dbg_mbx, vha, 0x113a,
+ "checking for additional resp interrupt.\n");
+
+ /* polling mode for non isp_abort commands. */
+ qla2x00_poll(ha->rsp_q_map[0]);
+ }
+
+ if (rval == QLA_FUNCTION_TIMEOUT &&
+ mcp->mb[0] != MBC_GEN_SYSTEM_ERROR) {
+ if (!io_lock_on || (mcp->flags & IOCTL_CMD) ||
+ ha->flags.eeh_busy) {
+ /* not in dpc. schedule it for dpc to take over. */
+ ql_dbg(ql_dbg_mbx, vha, 0x115d,
+ "Timeout, schedule isp_abort_needed.\n");
+
+ if (!test_bit(ISP_ABORT_NEEDED, &vha->dpc_flags) &&
+ !test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags) &&
+ !test_bit(ISP_ABORT_RETRY, &vha->dpc_flags)) {
+
+ ql_log(ql_log_info, base_vha, 0x115e,
+ "Mailbox cmd timeout occurred, cmd=0x%x, "
+ "mb[0]=0x%x, eeh_busy=0x%x. Scheduling ISP "
+ "abort.\n", command, mcp->mb[0],
+ ha->flags.eeh_busy);
+ set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
+ qla2xxx_wake_dpc(vha);
+ }
+ } else if (!abort_active) {
+ /* call abort directly since we are in the DPC thread */
+ ql_dbg(ql_dbg_mbx, vha, 0x1160,
+ "Timeout, calling abort_isp.\n");
+
+ if (!test_bit(ISP_ABORT_NEEDED, &vha->dpc_flags) &&
+ !test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags) &&
+ !test_bit(ISP_ABORT_RETRY, &vha->dpc_flags)) {
+
+ ql_log(ql_log_info, base_vha, 0x1161,
+ "Mailbox cmd timeout occurred, cmd=0x%x, "
+ "mb[0]=0x%x. Scheduling ISP abort ",
+ command, mcp->mb[0]);
+
+ set_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags);
+ clear_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
+ if (ha->isp_ops->abort_isp(vha)) {
+ /* Failed. retry later. */
+ set_bit(ISP_ABORT_NEEDED,
+ &vha->dpc_flags);
+ }
+ clear_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags);
+ ql_dbg(ql_dbg_mbx, vha, 0x1162,
+ "Finished abort_isp.\n");
+ }
+ }
+ }
+
+premature_exit:
+ /* Allow next mbx cmd to come in. */
+ complete(&ha->mbx_cmd_comp);
+
+ if (rval) {
+ ql_log(ql_log_warn, base_vha, 0x1163,
+ "**** Failed mbx[0]=%x, mb[1]=%x, mb[2]=%x, "
+ "mb[3]=%x, cmd=%x ****.\n",
+ mcp->mb[0], mcp->mb[1], mcp->mb[2], mcp->mb[3], command);
+ } else {
+ ql_dbg(ql_dbg_mbx, base_vha, 0x1164, "Done %s.\n", __func__);
+ }
+
+ return rval;
+}
+
+/*
+ * qlafx00_driver_shutdown
+ * Indicate a driver shutdown to firmware.
+ *
+ * Input:
+ * ha = adapter block pointer.
+ *
+ * Returns:
+ * local function return status code.
+ *
+ * Context:
+ * Kernel context.
+ */
+int
+qlafx00_driver_shutdown(scsi_qla_host_t *vha, int tmo)
+{
+ int rval;
+ struct mbx_cmd_32 mc;
+ struct mbx_cmd_32 *mcp = &mc;
+
+ ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1166,
+ "Entered %s.\n", __func__);
+
+ mcp->mb[0] = MBC_MR_DRV_SHUTDOWN;
+ mcp->out_mb = MBX_0;
+ mcp->in_mb = MBX_0;
+ if (tmo)
+ mcp->tov = tmo;
+ else
+ mcp->tov = MBX_TOV_SECONDS;
+ mcp->flags = 0;
+ rval = qlafx00_mailbox_command(vha, mcp);
+
+ if (rval != QLA_SUCCESS) {
+ ql_dbg(ql_dbg_mbx, vha, 0x1167,
+ "Failed=%x.\n", rval);
+ } else {
+ ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1168,
+ "Done %s.\n", __func__);
+ }
+
+ return rval;
+}
+
+/*
+ * qlafx00_get_firmware_state
+ * Get adapter firmware state.
+ *
+ * Input:
+ * ha = adapter block pointer.
+ * TARGET_QUEUE_LOCK must be released.
+ * ADAPTER_STATE_LOCK must be released.
+ *
+ * Returns:
+ * qla7xxx local function return status code.
+ *
+ * Context:
+ * Kernel context.
+ */
+static int
+qlafx00_get_firmware_state(scsi_qla_host_t *vha, uint32_t *states)
+{
+ int rval;
+ struct mbx_cmd_32 mc;
+ struct mbx_cmd_32 *mcp = &mc;
+
+ ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1169,
+ "Entered %s.\n", __func__);
+
+ mcp->mb[0] = MBC_GET_FIRMWARE_STATE;
+ mcp->out_mb = MBX_0;
+ mcp->in_mb = MBX_1|MBX_0;
+ mcp->tov = MBX_TOV_SECONDS;
+ mcp->flags = 0;
+ rval = qlafx00_mailbox_command(vha, mcp);
+
+ /* Return firmware states. */
+ states[0] = mcp->mb[1];
+
+ if (rval != QLA_SUCCESS) {
+ ql_dbg(ql_dbg_mbx, vha, 0x116a,
+ "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
+ } else {
+ ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x116b,
+ "Done %s.\n", __func__);
+ }
+ return rval;
+}
+
+/*
+ * qlafx00_init_firmware
+ * Initialize adapter firmware.
+ *
+ * Input:
+ * ha = adapter block pointer.
+ * dptr = Initialization control block pointer.
+ * size = size of initialization control block.
+ * TARGET_QUEUE_LOCK must be released.
+ * ADAPTER_STATE_LOCK must be released.
+ *
+ * Returns:
+ * qlafx00 local function return status code.
+ *
+ * Context:
+ * Kernel context.
+ */
+int
+qlafx00_init_firmware(scsi_qla_host_t *vha, uint16_t size)
+{
+ int rval;
+ struct mbx_cmd_32 mc;
+ struct mbx_cmd_32 *mcp = &mc;
+ struct qla_hw_data *ha = vha->hw;
+
+ ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x116c,
+ "Entered %s.\n", __func__);
+
+ mcp->mb[0] = MBC_INITIALIZE_FIRMWARE;
+
+ mcp->mb[1] = 0;
+ mcp->mb[2] = MSD(ha->init_cb_dma);
+ mcp->mb[3] = LSD(ha->init_cb_dma);
+
+ mcp->out_mb = MBX_3|MBX_2|MBX_1|MBX_0;
+ mcp->in_mb = MBX_0;
+ mcp->buf_size = size;
+ mcp->flags = MBX_DMA_OUT;
+ mcp->tov = MBX_TOV_SECONDS;
+ rval = qlafx00_mailbox_command(vha, mcp);
+
+ if (rval != QLA_SUCCESS) {
+ ql_dbg(ql_dbg_mbx, vha, 0x116d,
+ "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
+ } else {
+ ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x116e,
+ "Done %s.\n", __func__);
+ }
+ return rval;
+}
+
+/*
+ * qlafx00_mbx_reg_test
+ */
+static int
+qlafx00_mbx_reg_test(scsi_qla_host_t *vha)
+{
+ int rval;
+ struct mbx_cmd_32 mc;
+ struct mbx_cmd_32 *mcp = &mc;
+
+ ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x116f,
+ "Entered %s.\n", __func__);
+
+
+ mcp->mb[0] = MBC_MAILBOX_REGISTER_TEST;
+ mcp->mb[1] = 0xAAAA;
+ mcp->mb[2] = 0x5555;
+ mcp->mb[3] = 0xAA55;
+ mcp->mb[4] = 0x55AA;
+ mcp->mb[5] = 0xA5A5;
+ mcp->mb[6] = 0x5A5A;
+ mcp->mb[7] = 0x2525;
+ mcp->mb[8] = 0xBBBB;
+ mcp->mb[9] = 0x6666;
+ mcp->mb[10] = 0xBB66;
+ mcp->mb[11] = 0x66BB;
+ mcp->mb[12] = 0xB6B6;
+ mcp->mb[13] = 0x6B6B;
+ mcp->mb[14] = 0x3636;
+ mcp->mb[15] = 0xCCCC;
+
+
+ mcp->out_mb = MBX_15|MBX_14|MBX_13|MBX_12|MBX_11|MBX_10|MBX_9|MBX_8|
+ MBX_7|MBX_6|MBX_5|MBX_4|MBX_3|MBX_2|MBX_1|MBX_0;
+ mcp->in_mb = MBX_15|MBX_14|MBX_13|MBX_12|MBX_11|MBX_10|MBX_9|MBX_8|
+ MBX_7|MBX_6|MBX_5|MBX_4|MBX_3|MBX_2|MBX_1|MBX_0;
+ mcp->buf_size = 0;
+ mcp->flags = MBX_DMA_OUT;
+ mcp->tov = MBX_TOV_SECONDS;
+ rval = qlafx00_mailbox_command(vha, mcp);
+ if (rval == QLA_SUCCESS) {
+ if (mcp->mb[17] != 0xAAAA || mcp->mb[18] != 0x5555 ||
+ mcp->mb[19] != 0xAA55 || mcp->mb[20] != 0x55AA)
+ rval = QLA_FUNCTION_FAILED;
+ if (mcp->mb[21] != 0xA5A5 || mcp->mb[22] != 0x5A5A ||
+ mcp->mb[23] != 0x2525 || mcp->mb[24] != 0xBBBB)
+ rval = QLA_FUNCTION_FAILED;
+ if (mcp->mb[25] != 0x6666 || mcp->mb[26] != 0xBB66 ||
+ mcp->mb[27] != 0x66BB || mcp->mb[28] != 0xB6B6)
+ rval = QLA_FUNCTION_FAILED;
+ if (mcp->mb[29] != 0x6B6B || mcp->mb[30] != 0x3636 ||
+ mcp->mb[31] != 0xCCCC)
+ rval = QLA_FUNCTION_FAILED;
+ }
+
+ if (rval != QLA_SUCCESS) {
+ ql_dbg(ql_dbg_mbx, vha, 0x1170,
+ "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
+ } else {
+ ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1171,
+ "Done %s.\n", __func__);
+ }
+ return rval;
+}
+
+/**
+ * qlafx00_pci_config() - Setup ISPFx00 PCI configuration registers.
+ * @ha: HA context
+ *
+ * Returns 0 on success.
+ */
+int
+qlafx00_pci_config(scsi_qla_host_t *vha)
+{
+ uint16_t w;
+ struct qla_hw_data *ha = vha->hw;
+
+ pci_set_master(ha->pdev);
+ pci_try_set_mwi(ha->pdev);
+
+ pci_read_config_word(ha->pdev, PCI_COMMAND, &w);
+ w |= (PCI_COMMAND_PARITY | PCI_COMMAND_SERR);
+ w &= ~PCI_COMMAND_INTX_DISABLE;
+ pci_write_config_word(ha->pdev, PCI_COMMAND, w);
+
+ /* PCIe -- adjust Maximum Read Request Size (2048). */
+ if (pci_is_pcie(ha->pdev))
+ pcie_set_readrq(ha->pdev, 2048);
+
+ ha->chip_revision = ha->pdev->revision;
+
+ return QLA_SUCCESS;
+}
+
+/**
+ * qlafx00_warm_reset() - Perform warm reset of iSA(CPUs being reset on SOC).
+ * @ha: HA context
+ *
+ */
+static inline void
+qlafx00_soc_cpu_reset(scsi_qla_host_t *vha)
+{
+ unsigned long flags = 0;
+ struct qla_hw_data *ha = vha->hw;
+ int i, core;
+ uint32_t cnt;
+ uint32_t reg_val;
+
+ spin_lock_irqsave(&ha->hardware_lock, flags);
+
+ QLAFX00_SET_HBA_SOC_REG(ha, 0x80004, 0);
+ QLAFX00_SET_HBA_SOC_REG(ha, 0x82004, 0);
+
+ /* stop the XOR DMA engines */
+ QLAFX00_SET_HBA_SOC_REG(ha, 0x60920, 0x02);
+ QLAFX00_SET_HBA_SOC_REG(ha, 0x60924, 0x02);
+ QLAFX00_SET_HBA_SOC_REG(ha, 0xf0920, 0x02);
+ QLAFX00_SET_HBA_SOC_REG(ha, 0xf0924, 0x02);
+
+ /* stop the IDMA engines */
+ reg_val = QLAFX00_GET_HBA_SOC_REG(ha, 0x60840);
+ reg_val &= ~(1<<12);
+ QLAFX00_SET_HBA_SOC_REG(ha, 0x60840, reg_val);
+
+ reg_val = QLAFX00_GET_HBA_SOC_REG(ha, 0x60844);
+ reg_val &= ~(1<<12);
+ QLAFX00_SET_HBA_SOC_REG(ha, 0x60844, reg_val);
+
+ reg_val = QLAFX00_GET_HBA_SOC_REG(ha, 0x60848);
+ reg_val &= ~(1<<12);
+ QLAFX00_SET_HBA_SOC_REG(ha, 0x60848, reg_val);
+
+ reg_val = QLAFX00_GET_HBA_SOC_REG(ha, 0x6084C);
+ reg_val &= ~(1<<12);
+ QLAFX00_SET_HBA_SOC_REG(ha, 0x6084C, reg_val);
+
+ for (i = 0; i < 100000; i++) {
+ if ((QLAFX00_GET_HBA_SOC_REG(ha, 0xd0000) & 0x10000000) == 0 &&
+ (QLAFX00_GET_HBA_SOC_REG(ha, 0x10600) & 0x1) == 0)
+ break;
+ udelay(100);
+ }
+
+ /* Set all 4 cores in reset */
+ for (i = 0; i < 4; i++) {
+ QLAFX00_SET_HBA_SOC_REG(ha,
+ (SOC_SW_RST_CONTROL_REG_CORE0 + 8*i), (0xF01));
+ QLAFX00_SET_HBA_SOC_REG(ha,
+ (SOC_SW_RST_CONTROL_REG_CORE0 + 4 + 8*i), (0x01010101));
+ }
+
+ /* Reset all units in Fabric */
+ QLAFX00_SET_HBA_SOC_REG(ha, SOC_FABRIC_RST_CONTROL_REG, (0x011f0101));
+
+ /* */
+ QLAFX00_SET_HBA_SOC_REG(ha, 0x10610, 1);
+ QLAFX00_SET_HBA_SOC_REG(ha, 0x10600, 0);
+
+ /* Set all 4 core Memory Power Down Registers */
+ for (i = 0; i < 5; i++) {
+ QLAFX00_SET_HBA_SOC_REG(ha,
+ (SOC_PWR_MANAGEMENT_PWR_DOWN_REG + 4*i), (0x0));
+ }
+
+ /* Reset all interrupt control registers */
+ for (i = 0; i < 115; i++) {
+ QLAFX00_SET_HBA_SOC_REG(ha,
+ (SOC_INTERRUPT_SOURCE_I_CONTROL_REG + 4*i), (0x0));
+ }
+
+ /* Reset Timers control registers. per core */
+ for (core = 0; core < 4; core++)
+ for (i = 0; i < 8; i++)
+ QLAFX00_SET_HBA_SOC_REG(ha,
+ (SOC_CORE_TIMER_REG + 0x100*core + 4*i), (0x0));
+
+ /* Reset per core IRQ ack register */
+ for (core = 0; core < 4; core++)
+ QLAFX00_SET_HBA_SOC_REG(ha,
+ (SOC_IRQ_ACK_REG + 0x100*core), (0x3FF));
+
+ /* Set Fabric control and config to defaults */
+ QLAFX00_SET_HBA_SOC_REG(ha, SOC_FABRIC_CONTROL_REG, (0x2));
+ QLAFX00_SET_HBA_SOC_REG(ha, SOC_FABRIC_CONFIG_REG, (0x3));
+
+ /* Kick in Fabric units */
+ QLAFX00_SET_HBA_SOC_REG(ha, SOC_FABRIC_RST_CONTROL_REG, (0x0));
+
+ /* Kick in Core0 to start boot process */
+ QLAFX00_SET_HBA_SOC_REG(ha, SOC_SW_RST_CONTROL_REG_CORE0, (0xF00));
+
+ spin_unlock_irqrestore(&ha->hardware_lock, flags);
+
+ /* Wait 10secs for soft-reset to complete. */
+ for (cnt = 10; cnt; cnt--) {
+ msleep(1000);
+ barrier();
+ }
+}
+
+/**
+ * qlafx00_soft_reset() - Soft Reset ISPFx00.
+ * @ha: HA context
+ *
+ * Returns 0 on success.
+ */
+void
+qlafx00_soft_reset(scsi_qla_host_t *vha)
+{
+ struct qla_hw_data *ha = vha->hw;
+
+ if (unlikely(pci_channel_offline(ha->pdev) &&
+ ha->flags.pci_channel_io_perm_failure))
+ return;
+
+ ha->isp_ops->disable_intrs(ha);
+ qlafx00_soc_cpu_reset(vha);
+}
+
+/**
+ * qlafx00_chip_diag() - Test ISPFx00 for proper operation.
+ * @ha: HA context
+ *
+ * Returns 0 on success.
+ */
+int
+qlafx00_chip_diag(scsi_qla_host_t *vha)
+{
+ int rval = 0;
+ struct qla_hw_data *ha = vha->hw;
+ struct req_que *req = ha->req_q_map[0];
+
+ ha->fw_transfer_size = REQUEST_ENTRY_SIZE * req->length;
+
+ rval = qlafx00_mbx_reg_test(vha);
+ if (rval) {
+ ql_log(ql_log_warn, vha, 0x1165,
+ "Failed mailbox send register test\n");
+ } else {
+ /* Flag a successful rval */
+ rval = QLA_SUCCESS;
+ }
+ return rval;
+}
+
+void
+qlafx00_config_rings(struct scsi_qla_host *vha)
+{
+ struct qla_hw_data *ha = vha->hw;
+ struct device_reg_fx00 __iomem *reg = &ha->iobase->ispfx00;
+
+ WRT_REG_DWORD(&reg->req_q_in, 0);
+ WRT_REG_DWORD(&reg->req_q_out, 0);
+
+ WRT_REG_DWORD(&reg->rsp_q_in, 0);
+ WRT_REG_DWORD(&reg->rsp_q_out, 0);
+
+ /* PCI posting */
+ RD_REG_DWORD(&reg->rsp_q_out);
+}
+
+char *
+qlafx00_pci_info_str(struct scsi_qla_host *vha, char *str)
+{
+ struct qla_hw_data *ha = vha->hw;
+
+ if (pci_is_pcie(ha->pdev)) {
+ strcpy(str, "PCIe iSA");
+ return str;
+ }
+ return str;
+}
+
+char *
+qlafx00_fw_version_str(struct scsi_qla_host *vha, char *str, size_t size)
+{
+ struct qla_hw_data *ha = vha->hw;
+
+ snprintf(str, size, "%s", ha->mr.fw_version);
+ return str;
+}
+
+void
+qlafx00_enable_intrs(struct qla_hw_data *ha)
+{
+ unsigned long flags = 0;
+
+ spin_lock_irqsave(&ha->hardware_lock, flags);
+ ha->interrupts_on = 1;
+ QLAFX00_ENABLE_ICNTRL_REG(ha);
+ spin_unlock_irqrestore(&ha->hardware_lock, flags);
+}
+
+void
+qlafx00_disable_intrs(struct qla_hw_data *ha)
+{
+ unsigned long flags = 0;
+
+ spin_lock_irqsave(&ha->hardware_lock, flags);
+ ha->interrupts_on = 0;
+ QLAFX00_DISABLE_ICNTRL_REG(ha);
+ spin_unlock_irqrestore(&ha->hardware_lock, flags);
+}
+
+int
+qlafx00_abort_target(fc_port_t *fcport, uint64_t l, int tag)
+{
+ return qla2x00_async_tm_cmd(fcport, TCF_TARGET_RESET, l, tag);
+}
+
+int
+qlafx00_lun_reset(fc_port_t *fcport, uint64_t l, int tag)
+{
+ return qla2x00_async_tm_cmd(fcport, TCF_LUN_RESET, l, tag);
+}
+
+int
+qlafx00_loop_reset(scsi_qla_host_t *vha)
+{
+ int ret;
+ struct fc_port *fcport;
+ struct qla_hw_data *ha = vha->hw;
+
+ if (ql2xtargetreset) {
+ list_for_each_entry(fcport, &vha->vp_fcports, list) {
+ if (fcport->port_type != FCT_TARGET)
+ continue;
+
+ ret = ha->isp_ops->target_reset(fcport, 0, 0);
+ if (ret != QLA_SUCCESS) {
+ ql_dbg(ql_dbg_taskm, vha, 0x803d,
+ "Bus Reset failed: Reset=%d "
+ "d_id=%x.\n", ret, fcport->d_id.b24);
+ }
+ }
+ }
+ return QLA_SUCCESS;
+}
+
+int
+qlafx00_iospace_config(struct qla_hw_data *ha)
+{
+ if (pci_request_selected_regions(ha->pdev, ha->bars,
+ QLA2XXX_DRIVER_NAME)) {
+ ql_log_pci(ql_log_fatal, ha->pdev, 0x014e,
+ "Failed to reserve PIO/MMIO regions (%s), aborting.\n",
+ pci_name(ha->pdev));
+ goto iospace_error_exit;
+ }
+
+ /* Use MMIO operations for all accesses. */
+ if (!(pci_resource_flags(ha->pdev, 0) & IORESOURCE_MEM)) {
+ ql_log_pci(ql_log_warn, ha->pdev, 0x014f,
+ "Invalid pci I/O region size (%s).\n",
+ pci_name(ha->pdev));
+ goto iospace_error_exit;
+ }
+ if (pci_resource_len(ha->pdev, 0) < BAR0_LEN_FX00) {
+ ql_log_pci(ql_log_warn, ha->pdev, 0x0127,
+ "Invalid PCI mem BAR0 region size (%s), aborting\n",
+ pci_name(ha->pdev));
+ goto iospace_error_exit;
+ }
+
+ ha->cregbase =
+ ioremap_nocache(pci_resource_start(ha->pdev, 0), BAR0_LEN_FX00);
+ if (!ha->cregbase) {
+ ql_log_pci(ql_log_fatal, ha->pdev, 0x0128,
+ "cannot remap MMIO (%s), aborting\n", pci_name(ha->pdev));
+ goto iospace_error_exit;
+ }
+
+ if (!(pci_resource_flags(ha->pdev, 2) & IORESOURCE_MEM)) {
+ ql_log_pci(ql_log_warn, ha->pdev, 0x0129,
+ "region #2 not an MMIO resource (%s), aborting\n",
+ pci_name(ha->pdev));
+ goto iospace_error_exit;
+ }
+ if (pci_resource_len(ha->pdev, 2) < BAR2_LEN_FX00) {
+ ql_log_pci(ql_log_warn, ha->pdev, 0x012a,
+ "Invalid PCI mem BAR2 region size (%s), aborting\n",
+ pci_name(ha->pdev));
+ goto iospace_error_exit;
+ }
+
+ ha->iobase =
+ ioremap_nocache(pci_resource_start(ha->pdev, 2), BAR2_LEN_FX00);
+ if (!ha->iobase) {
+ ql_log_pci(ql_log_fatal, ha->pdev, 0x012b,
+ "cannot remap MMIO (%s), aborting\n", pci_name(ha->pdev));
+ goto iospace_error_exit;
+ }
+
+ /* Determine queue resources */
+ ha->max_req_queues = ha->max_rsp_queues = 1;
+
+ ql_log_pci(ql_log_info, ha->pdev, 0x012c,
+ "Bars 0x%x, iobase0 0x%p, iobase2 0x%p\n",
+ ha->bars, ha->cregbase, ha->iobase);
+
+ return 0;
+
+iospace_error_exit:
+ return -ENOMEM;
+}
+
+static void
+qlafx00_save_queue_ptrs(struct scsi_qla_host *vha)
+{
+ struct qla_hw_data *ha = vha->hw;
+ struct req_que *req = ha->req_q_map[0];
+ struct rsp_que *rsp = ha->rsp_q_map[0];
+
+ req->length_fx00 = req->length;
+ req->ring_fx00 = req->ring;
+ req->dma_fx00 = req->dma;
+
+ rsp->length_fx00 = rsp->length;
+ rsp->ring_fx00 = rsp->ring;
+ rsp->dma_fx00 = rsp->dma;
+
+ ql_dbg(ql_dbg_init, vha, 0x012d,
+ "req: %p, ring_fx00: %p, length_fx00: 0x%x,"
+ "req->dma_fx00: 0x%llx\n", req, req->ring_fx00,
+ req->length_fx00, (u64)req->dma_fx00);
+
+ ql_dbg(ql_dbg_init, vha, 0x012e,
+ "rsp: %p, ring_fx00: %p, length_fx00: 0x%x,"
+ "rsp->dma_fx00: 0x%llx\n", rsp, rsp->ring_fx00,
+ rsp->length_fx00, (u64)rsp->dma_fx00);
+}
+
+static int
+qlafx00_config_queues(struct scsi_qla_host *vha)
+{
+ struct qla_hw_data *ha = vha->hw;
+ struct req_que *req = ha->req_q_map[0];
+ struct rsp_que *rsp = ha->rsp_q_map[0];
+ dma_addr_t bar2_hdl = pci_resource_start(ha->pdev, 2);
+
+ req->length = ha->req_que_len;
+ req->ring = (void *)ha->iobase + ha->req_que_off;
+ req->dma = bar2_hdl + ha->req_que_off;
+ if ((!req->ring) || (req->length == 0)) {
+ ql_log_pci(ql_log_info, ha->pdev, 0x012f,
+ "Unable to allocate memory for req_ring\n");
+ return QLA_FUNCTION_FAILED;
+ }
+
+ ql_dbg(ql_dbg_init, vha, 0x0130,
+ "req: %p req_ring pointer %p req len 0x%x "
+ "req off 0x%x\n, req->dma: 0x%llx",
+ req, req->ring, req->length,
+ ha->req_que_off, (u64)req->dma);
+
+ rsp->length = ha->rsp_que_len;
+ rsp->ring = (void *)ha->iobase + ha->rsp_que_off;
+ rsp->dma = bar2_hdl + ha->rsp_que_off;
+ if ((!rsp->ring) || (rsp->length == 0)) {
+ ql_log_pci(ql_log_info, ha->pdev, 0x0131,
+ "Unable to allocate memory for rsp_ring\n");
+ return QLA_FUNCTION_FAILED;
+ }
+
+ ql_dbg(ql_dbg_init, vha, 0x0132,
+ "rsp: %p rsp_ring pointer %p rsp len 0x%x "
+ "rsp off 0x%x, rsp->dma: 0x%llx\n",
+ rsp, rsp->ring, rsp->length,
+ ha->rsp_que_off, (u64)rsp->dma);
+
+ return QLA_SUCCESS;
+}
+
+static int
+qlafx00_init_fw_ready(scsi_qla_host_t *vha)
+{
+ int rval = 0;
+ unsigned long wtime;
+ uint16_t wait_time; /* Wait time */
+ struct qla_hw_data *ha = vha->hw;
+ struct device_reg_fx00 __iomem *reg = &ha->iobase->ispfx00;
+ uint32_t aenmbx, aenmbx7 = 0;
+ uint32_t pseudo_aen;
+ uint32_t state[5];
+ bool done = false;
+
+ /* 30 seconds wait - Adjust if required */
+ wait_time = 30;
+
+ pseudo_aen = RD_REG_DWORD(&reg->pseudoaen);
+ if (pseudo_aen == 1) {
+ aenmbx7 = RD_REG_DWORD(&reg->initval7);
+ ha->mbx_intr_code = MSW(aenmbx7);
+ ha->rqstq_intr_code = LSW(aenmbx7);
+ rval = qlafx00_driver_shutdown(vha, 10);
+ if (rval != QLA_SUCCESS)
+ qlafx00_soft_reset(vha);
+ }
+
+ /* wait time before firmware ready */
+ wtime = jiffies + (wait_time * HZ);
+ do {
+ aenmbx = RD_REG_DWORD(&reg->aenmailbox0);
+ barrier();
+ ql_dbg(ql_dbg_mbx, vha, 0x0133,
+ "aenmbx: 0x%x\n", aenmbx);
+
+ switch (aenmbx) {
+ case MBA_FW_NOT_STARTED:
+ case MBA_FW_STARTING:
+ break;
+
+ case MBA_SYSTEM_ERR:
+ case MBA_REQ_TRANSFER_ERR:
+ case MBA_RSP_TRANSFER_ERR:
+ case MBA_FW_INIT_FAILURE:
+ qlafx00_soft_reset(vha);
+ break;
+
+ case MBA_FW_RESTART_CMPLT:
+ /* Set the mbx and rqstq intr code */
+ aenmbx7 = RD_REG_DWORD(&reg->aenmailbox7);
+ ha->mbx_intr_code = MSW(aenmbx7);
+ ha->rqstq_intr_code = LSW(aenmbx7);
+ ha->req_que_off = RD_REG_DWORD(&reg->aenmailbox1);
+ ha->rsp_que_off = RD_REG_DWORD(&reg->aenmailbox3);
+ ha->req_que_len = RD_REG_DWORD(&reg->aenmailbox5);
+ ha->rsp_que_len = RD_REG_DWORD(&reg->aenmailbox6);
+ WRT_REG_DWORD(&reg->aenmailbox0, 0);
+ RD_REG_DWORD_RELAXED(&reg->aenmailbox0);
+ ql_dbg(ql_dbg_init, vha, 0x0134,
+ "f/w returned mbx_intr_code: 0x%x, "
+ "rqstq_intr_code: 0x%x\n",
+ ha->mbx_intr_code, ha->rqstq_intr_code);
+ QLAFX00_CLR_INTR_REG(ha, QLAFX00_HST_INT_STS_BITS);
+ rval = QLA_SUCCESS;
+ done = true;
+ break;
+
+ default:
+ if ((aenmbx & 0xFF00) == MBA_FW_INIT_INPROGRESS)
+ break;
+
+ /* If fw is apparently not ready. In order to continue,
+ * we might need to issue Mbox cmd, but the problem is
+ * that the DoorBell vector values that come with the
+ * 8060 AEN are most likely gone by now (and thus no
+ * bell would be rung on the fw side when mbox cmd is
+ * issued). We have to therefore grab the 8060 AEN
+ * shadow regs (filled in by FW when the last 8060
+ * AEN was being posted).
+ * Do the following to determine what is needed in
+ * order to get the FW ready:
+ * 1. reload the 8060 AEN values from the shadow regs
+ * 2. clear int status to get rid of possible pending
+ * interrupts
+ * 3. issue Get FW State Mbox cmd to determine fw state
+ * Set the mbx and rqstq intr code from Shadow Regs
+ */
+ aenmbx7 = RD_REG_DWORD(&reg->initval7);
+ ha->mbx_intr_code = MSW(aenmbx7);
+ ha->rqstq_intr_code = LSW(aenmbx7);
+ ha->req_que_off = RD_REG_DWORD(&reg->initval1);
+ ha->rsp_que_off = RD_REG_DWORD(&reg->initval3);
+ ha->req_que_len = RD_REG_DWORD(&reg->initval5);
+ ha->rsp_que_len = RD_REG_DWORD(&reg->initval6);
+ ql_dbg(ql_dbg_init, vha, 0x0135,
+ "f/w returned mbx_intr_code: 0x%x, "
+ "rqstq_intr_code: 0x%x\n",
+ ha->mbx_intr_code, ha->rqstq_intr_code);
+ QLAFX00_CLR_INTR_REG(ha, QLAFX00_HST_INT_STS_BITS);
+
+ /* Get the FW state */
+ rval = qlafx00_get_firmware_state(vha, state);
+ if (rval != QLA_SUCCESS) {
+ /* Retry if timer has not expired */
+ break;
+ }
+
+ if (state[0] == FSTATE_FX00_CONFIG_WAIT) {
+ /* Firmware is waiting to be
+ * initialized by driver
+ */
+ rval = QLA_SUCCESS;
+ done = true;
+ break;
+ }
+
+ /* Issue driver shutdown and wait until f/w recovers.
+ * Driver should continue to poll until 8060 AEN is
+ * received indicating firmware recovery.
+ */
+ ql_dbg(ql_dbg_init, vha, 0x0136,
+ "Sending Driver shutdown fw_state 0x%x\n",
+ state[0]);
+
+ rval = qlafx00_driver_shutdown(vha, 10);
+ if (rval != QLA_SUCCESS) {
+ rval = QLA_FUNCTION_FAILED;
+ break;
+ }
+ msleep(500);
+
+ wtime = jiffies + (wait_time * HZ);
+ break;
+ }
+
+ if (!done) {
+ if (time_after_eq(jiffies, wtime)) {
+ ql_dbg(ql_dbg_init, vha, 0x0137,
+ "Init f/w failed: aen[7]: 0x%x\n",
+ RD_REG_DWORD(&reg->aenmailbox7));
+ rval = QLA_FUNCTION_FAILED;
+ done = true;
+ break;
+ }
+ /* Delay for a while */
+ msleep(500);
+ }
+ } while (!done);
+
+ if (rval)
+ ql_dbg(ql_dbg_init, vha, 0x0138,
+ "%s **** FAILED ****.\n", __func__);
+ else
+ ql_dbg(ql_dbg_init, vha, 0x0139,
+ "%s **** SUCCESS ****.\n", __func__);
+
+ return rval;
+}
+
+/*
+ * qlafx00_fw_ready() - Waits for firmware ready.
+ * @ha: HA context
+ *
+ * Returns 0 on success.
+ */
+int
+qlafx00_fw_ready(scsi_qla_host_t *vha)
+{
+ int rval;
+ unsigned long wtime;
+ uint16_t wait_time; /* Wait time if loop is coming ready */
+ uint32_t state[5];
+
+ rval = QLA_SUCCESS;
+
+ wait_time = 10;
+
+ /* wait time before firmware ready */
+ wtime = jiffies + (wait_time * HZ);
+
+ /* Wait for ISP to finish init */
+ if (!vha->flags.init_done)
+ ql_dbg(ql_dbg_init, vha, 0x013a,
+ "Waiting for init to complete...\n");
+
+ do {
+ rval = qlafx00_get_firmware_state(vha, state);
+
+ if (rval == QLA_SUCCESS) {
+ if (state[0] == FSTATE_FX00_INITIALIZED) {
+ ql_dbg(ql_dbg_init, vha, 0x013b,
+ "fw_state=%x\n", state[0]);
+ rval = QLA_SUCCESS;
+ break;
+ }
+ }
+ rval = QLA_FUNCTION_FAILED;
+
+ if (time_after_eq(jiffies, wtime))
+ break;
+
+ /* Delay for a while */
+ msleep(500);
+
+ ql_dbg(ql_dbg_init, vha, 0x013c,
+ "fw_state=%x curr time=%lx.\n", state[0], jiffies);
+ } while (1);
+
+
+ if (rval)
+ ql_dbg(ql_dbg_init, vha, 0x013d,
+ "Firmware ready **** FAILED ****.\n");
+ else
+ ql_dbg(ql_dbg_init, vha, 0x013e,
+ "Firmware ready **** SUCCESS ****.\n");
+
+ return rval;
+}
+
+static int
+qlafx00_find_all_targets(scsi_qla_host_t *vha,
+ struct list_head *new_fcports)
+{
+ int rval;
+ uint16_t tgt_id;
+ fc_port_t *fcport, *new_fcport;
+ int found;
+ struct qla_hw_data *ha = vha->hw;
+
+ rval = QLA_SUCCESS;
+
+ if (!test_bit(LOOP_RESYNC_ACTIVE, &vha->dpc_flags))
+ return QLA_FUNCTION_FAILED;
+
+ if ((atomic_read(&vha->loop_down_timer) ||
+ STATE_TRANSITION(vha))) {
+ atomic_set(&vha->loop_down_timer, 0);
+ set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags);
+ return QLA_FUNCTION_FAILED;
+ }
+
+ ql_dbg(ql_dbg_disc + ql_dbg_init, vha, 0x2088,
+ "Listing Target bit map...\n");
+ ql_dump_buffer(ql_dbg_disc + ql_dbg_init, vha,
+ 0x2089, (uint8_t *)ha->gid_list, 32);
+
+ /* Allocate temporary rmtport for any new rmtports discovered. */
+ new_fcport = qla2x00_alloc_fcport(vha, GFP_KERNEL);
+ if (new_fcport == NULL)
+ return QLA_MEMORY_ALLOC_FAILED;
+
+ for_each_set_bit(tgt_id, (void *)ha->gid_list,
+ QLAFX00_TGT_NODE_LIST_SIZE) {
+
+ /* Send get target node info */
+ new_fcport->tgt_id = tgt_id;
+ rval = qlafx00_fx_disc(vha, new_fcport,
+ FXDISC_GET_TGT_NODE_INFO);
+ if (rval != QLA_SUCCESS) {
+ ql_log(ql_log_warn, vha, 0x208a,
+ "Target info scan failed -- assuming zero-entry "
+ "result...\n");
+ continue;
+ }
+
+ /* Locate matching device in database. */
+ found = 0;
+ list_for_each_entry(fcport, &vha->vp_fcports, list) {
+ if (memcmp(new_fcport->port_name,
+ fcport->port_name, WWN_SIZE))
+ continue;
+
+ found++;
+
+ /*
+ * If tgt_id is same and state FCS_ONLINE, nothing
+ * changed.
+ */
+ if (fcport->tgt_id == new_fcport->tgt_id &&
+ atomic_read(&fcport->state) == FCS_ONLINE)
+ break;
+
+ /*
+ * Tgt ID changed or device was marked to be updated.
+ */
+ ql_dbg(ql_dbg_disc + ql_dbg_init, vha, 0x208b,
+ "TGT-ID Change(%s): Present tgt id: "
+ "0x%x state: 0x%x "
+ "wwnn = %llx wwpn = %llx.\n",
+ __func__, fcport->tgt_id,
+ atomic_read(&fcport->state),
+ (unsigned long long)wwn_to_u64(fcport->node_name),
+ (unsigned long long)wwn_to_u64(fcport->port_name));
+
+ ql_log(ql_log_info, vha, 0x208c,
+ "TGT-ID Announce(%s): Discovered tgt "
+ "id 0x%x wwnn = %llx "
+ "wwpn = %llx.\n", __func__, new_fcport->tgt_id,
+ (unsigned long long)
+ wwn_to_u64(new_fcport->node_name),
+ (unsigned long long)
+ wwn_to_u64(new_fcport->port_name));
+
+ if (atomic_read(&fcport->state) != FCS_ONLINE) {
+ fcport->old_tgt_id = fcport->tgt_id;
+ fcport->tgt_id = new_fcport->tgt_id;
+ ql_log(ql_log_info, vha, 0x208d,
+ "TGT-ID: New fcport Added: %p\n", fcport);
+ qla2x00_update_fcport(vha, fcport);
+ } else {
+ ql_log(ql_log_info, vha, 0x208e,
+ " Existing TGT-ID %x did not get "
+ " offline event from firmware.\n",
+ fcport->old_tgt_id);
+ qla2x00_mark_device_lost(vha, fcport, 0, 0);
+ set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags);
+ kfree(new_fcport);
+ return rval;
+ }
+ break;
+ }
+
+ if (found)
+ continue;
+
+ /* If device was not in our fcports list, then add it. */
+ list_add_tail(&new_fcport->list, new_fcports);
+
+ /* Allocate a new replacement fcport. */
+ new_fcport = qla2x00_alloc_fcport(vha, GFP_KERNEL);
+ if (new_fcport == NULL)
+ return QLA_MEMORY_ALLOC_FAILED;
+ }
+
+ kfree(new_fcport);
+ return rval;
+}
+
+/*
+ * qlafx00_configure_all_targets
+ * Setup target devices with node ID's.
+ *
+ * Input:
+ * ha = adapter block pointer.
+ *
+ * Returns:
+ * 0 = success.
+ * BIT_0 = error
+ */
+static int
+qlafx00_configure_all_targets(scsi_qla_host_t *vha)
+{
+ int rval;
+ fc_port_t *fcport, *rmptemp;
+ LIST_HEAD(new_fcports);
+
+ rval = qlafx00_fx_disc(vha, &vha->hw->mr.fcport,
+ FXDISC_GET_TGT_NODE_LIST);
+ if (rval != QLA_SUCCESS) {
+ set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags);
+ return rval;
+ }
+
+ rval = qlafx00_find_all_targets(vha, &new_fcports);
+ if (rval != QLA_SUCCESS) {
+ set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags);
+ return rval;
+ }
+
+ /*
+ * Delete all previous devices marked lost.
+ */
+ list_for_each_entry(fcport, &vha->vp_fcports, list) {
+ if (test_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags))
+ break;
+
+ if (atomic_read(&fcport->state) == FCS_DEVICE_LOST) {
+ if (fcport->port_type != FCT_INITIATOR)
+ qla2x00_mark_device_lost(vha, fcport, 0, 0);
+ }
+ }
+
+ /*
+ * Add the new devices to our devices list.
+ */
+ list_for_each_entry_safe(fcport, rmptemp, &new_fcports, list) {
+ if (test_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags))
+ break;
+
+ qla2x00_update_fcport(vha, fcport);
+ list_move_tail(&fcport->list, &vha->vp_fcports);
+ ql_log(ql_log_info, vha, 0x208f,
+ "Attach new target id 0x%x wwnn = %llx "
+ "wwpn = %llx.\n",
+ fcport->tgt_id,
+ (unsigned long long)wwn_to_u64(fcport->node_name),
+ (unsigned long long)wwn_to_u64(fcport->port_name));
+ }
+
+ /* Free all new device structures not processed. */
+ list_for_each_entry_safe(fcport, rmptemp, &new_fcports, list) {
+ list_del(&fcport->list);
+ kfree(fcport);
+ }
+
+ return rval;
+}
+
+/*
+ * qlafx00_configure_devices
+ * Updates Fibre Channel Device Database with what is actually on loop.
+ *
+ * Input:
+ * ha = adapter block pointer.
+ *
+ * Returns:
+ * 0 = success.
+ * 1 = error.
+ * 2 = database was full and device was not configured.
+ */
+int
+qlafx00_configure_devices(scsi_qla_host_t *vha)
+{
+ int rval;
+ unsigned long flags, save_flags;
+ rval = QLA_SUCCESS;
+
+ save_flags = flags = vha->dpc_flags;
+
+ ql_dbg(ql_dbg_disc, vha, 0x2090,
+ "Configure devices -- dpc flags =0x%lx\n", flags);
+
+ rval = qlafx00_configure_all_targets(vha);
+
+ if (rval == QLA_SUCCESS) {
+ if (test_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags)) {
+ rval = QLA_FUNCTION_FAILED;
+ } else {
+ atomic_set(&vha->loop_state, LOOP_READY);
+ ql_log(ql_log_info, vha, 0x2091,
+ "Device Ready\n");
+ }
+ }
+
+ if (rval) {
+ ql_dbg(ql_dbg_disc, vha, 0x2092,
+ "%s *** FAILED ***.\n", __func__);
+ } else {
+ ql_dbg(ql_dbg_disc, vha, 0x2093,
+ "%s: exiting normally.\n", __func__);
+ }
+ return rval;
+}
+
+static void
+qlafx00_abort_isp_cleanup(scsi_qla_host_t *vha, bool critemp)
+{
+ struct qla_hw_data *ha = vha->hw;
+ fc_port_t *fcport;
+
+ vha->flags.online = 0;
+ ha->mr.fw_hbt_en = 0;
+
+ if (!critemp) {
+ ha->flags.chip_reset_done = 0;
+ clear_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
+ vha->qla_stats.total_isp_aborts++;
+ ql_log(ql_log_info, vha, 0x013f,
+ "Performing ISP error recovery - ha = %p.\n", ha);
+ ha->isp_ops->reset_chip(vha);
+ }
+
+ if (atomic_read(&vha->loop_state) != LOOP_DOWN) {
+ atomic_set(&vha->loop_state, LOOP_DOWN);
+ atomic_set(&vha->loop_down_timer,
+ QLAFX00_LOOP_DOWN_TIME);
+ } else {
+ if (!atomic_read(&vha->loop_down_timer))
+ atomic_set(&vha->loop_down_timer,
+ QLAFX00_LOOP_DOWN_TIME);
+ }
+
+ /* Clear all async request states across all VPs. */
+ list_for_each_entry(fcport, &vha->vp_fcports, list) {
+ fcport->flags = 0;
+ if (atomic_read(&fcport->state) == FCS_ONLINE)
+ qla2x00_set_fcport_state(fcport, FCS_DEVICE_LOST);
+ }
+
+ if (!ha->flags.eeh_busy) {
+ if (critemp) {
+ qla2x00_abort_all_cmds(vha, DID_NO_CONNECT << 16);
+ } else {
+ /* Requeue all commands in outstanding command list. */
+ qla2x00_abort_all_cmds(vha, DID_RESET << 16);
+ }
+ }
+
+ qla2x00_free_irqs(vha);
+ if (critemp)
+ set_bit(FX00_CRITEMP_RECOVERY, &vha->dpc_flags);
+ else
+ set_bit(FX00_RESET_RECOVERY, &vha->dpc_flags);
+
+ /* Clear the Interrupts */
+ QLAFX00_CLR_INTR_REG(ha, QLAFX00_HST_INT_STS_BITS);
+
+ ql_log(ql_log_info, vha, 0x0140,
+ "%s Done done - ha=%p.\n", __func__, ha);
+}
+
+/**
+ * qlafx00_init_response_q_entries() - Initializes response queue entries.
+ * @ha: HA context
+ *
+ * Beginning of request ring has initialization control block already built
+ * by nvram config routine.
+ *
+ * Returns 0 on success.
+ */
+void
+qlafx00_init_response_q_entries(struct rsp_que *rsp)
+{
+ uint16_t cnt;
+ response_t *pkt;
+
+ rsp->ring_ptr = rsp->ring;
+ rsp->ring_index = 0;
+ rsp->status_srb = NULL;
+ pkt = rsp->ring_ptr;
+ for (cnt = 0; cnt < rsp->length; cnt++) {
+ pkt->signature = RESPONSE_PROCESSED;
+ WRT_REG_DWORD((void __iomem *)&pkt->signature,
+ RESPONSE_PROCESSED);
+ pkt++;
+ }
+}
+
+int
+qlafx00_rescan_isp(scsi_qla_host_t *vha)
+{
+ uint32_t status = QLA_FUNCTION_FAILED;
+ struct qla_hw_data *ha = vha->hw;
+ struct device_reg_fx00 __iomem *reg = &ha->iobase->ispfx00;
+ uint32_t aenmbx7;
+
+ qla2x00_request_irqs(ha, ha->rsp_q_map[0]);
+
+ aenmbx7 = RD_REG_DWORD(&reg->aenmailbox7);
+ ha->mbx_intr_code = MSW(aenmbx7);
+ ha->rqstq_intr_code = LSW(aenmbx7);
+ ha->req_que_off = RD_REG_DWORD(&reg->aenmailbox1);
+ ha->rsp_que_off = RD_REG_DWORD(&reg->aenmailbox3);
+ ha->req_que_len = RD_REG_DWORD(&reg->aenmailbox5);
+ ha->rsp_que_len = RD_REG_DWORD(&reg->aenmailbox6);
+
+ ql_dbg(ql_dbg_disc, vha, 0x2094,
+ "fw returned mbx_intr_code: 0x%x, rqstq_intr_code: 0x%x "
+ " Req que offset 0x%x Rsp que offset 0x%x\n",
+ ha->mbx_intr_code, ha->rqstq_intr_code,
+ ha->req_que_off, ha->rsp_que_len);
+
+ /* Clear the Interrupts */
+ QLAFX00_CLR_INTR_REG(ha, QLAFX00_HST_INT_STS_BITS);
+
+ status = qla2x00_init_rings(vha);
+ if (!status) {
+ vha->flags.online = 1;
+
+ /* if no cable then assume it's good */
+ if ((vha->device_flags & DFLG_NO_CABLE))
+ status = 0;
+ /* Register system information */
+ if (qlafx00_fx_disc(vha,
+ &vha->hw->mr.fcport, FXDISC_REG_HOST_INFO))
+ ql_dbg(ql_dbg_disc, vha, 0x2095,
+ "failed to register host info\n");
+ }
+ scsi_unblock_requests(vha->host);
+ return status;
+}
+
+void
+qlafx00_timer_routine(scsi_qla_host_t *vha)
+{
+ struct qla_hw_data *ha = vha->hw;
+ uint32_t fw_heart_beat;
+ uint32_t aenmbx0;
+ struct device_reg_fx00 __iomem *reg = &ha->iobase->ispfx00;
+ uint32_t tempc;
+
+ /* Check firmware health */
+ if (ha->mr.fw_hbt_cnt)
+ ha->mr.fw_hbt_cnt--;
+ else {
+ if ((!ha->flags.mr_reset_hdlr_active) &&
+ (!test_bit(UNLOADING, &vha->dpc_flags)) &&
+ (!test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags)) &&
+ (ha->mr.fw_hbt_en)) {
+ fw_heart_beat = RD_REG_DWORD(&reg->fwheartbeat);
+ if (fw_heart_beat != ha->mr.old_fw_hbt_cnt) {
+ ha->mr.old_fw_hbt_cnt = fw_heart_beat;
+ ha->mr.fw_hbt_miss_cnt = 0;
+ } else {
+ ha->mr.fw_hbt_miss_cnt++;
+ if (ha->mr.fw_hbt_miss_cnt ==
+ QLAFX00_HEARTBEAT_MISS_CNT) {
+ set_bit(ISP_ABORT_NEEDED,
+ &vha->dpc_flags);
+ qla2xxx_wake_dpc(vha);
+ ha->mr.fw_hbt_miss_cnt = 0;
+ }
+ }
+ }
+ ha->mr.fw_hbt_cnt = QLAFX00_HEARTBEAT_INTERVAL;
+ }
+
+ if (test_bit(FX00_RESET_RECOVERY, &vha->dpc_flags)) {
+ /* Reset recovery to be performed in timer routine */
+ aenmbx0 = RD_REG_DWORD(&reg->aenmailbox0);
+ if (ha->mr.fw_reset_timer_exp) {
+ set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
+ qla2xxx_wake_dpc(vha);
+ ha->mr.fw_reset_timer_exp = 0;
+ } else if (aenmbx0 == MBA_FW_RESTART_CMPLT) {
+ /* Wake up DPC to rescan the targets */
+ set_bit(FX00_TARGET_SCAN, &vha->dpc_flags);
+ clear_bit(FX00_RESET_RECOVERY, &vha->dpc_flags);
+ qla2xxx_wake_dpc(vha);
+ ha->mr.fw_reset_timer_tick = QLAFX00_RESET_INTERVAL;
+ } else if ((aenmbx0 == MBA_FW_STARTING) &&
+ (!ha->mr.fw_hbt_en)) {
+ ha->mr.fw_hbt_en = 1;
+ } else if (!ha->mr.fw_reset_timer_tick) {
+ if (aenmbx0 == ha->mr.old_aenmbx0_state)
+ ha->mr.fw_reset_timer_exp = 1;
+ ha->mr.fw_reset_timer_tick = QLAFX00_RESET_INTERVAL;
+ } else if (aenmbx0 == 0xFFFFFFFF) {
+ uint32_t data0, data1;
+
+ data0 = QLAFX00_RD_REG(ha,
+ QLAFX00_BAR1_BASE_ADDR_REG);
+ data1 = QLAFX00_RD_REG(ha,
+ QLAFX00_PEX0_WIN0_BASE_ADDR_REG);
+
+ data0 &= 0xffff0000;
+ data1 &= 0x0000ffff;
+
+ QLAFX00_WR_REG(ha,
+ QLAFX00_PEX0_WIN0_BASE_ADDR_REG,
+ (data0 | data1));
+ } else if ((aenmbx0 & 0xFF00) == MBA_FW_POLL_STATE) {
+ ha->mr.fw_reset_timer_tick =
+ QLAFX00_MAX_RESET_INTERVAL;
+ } else if (aenmbx0 == MBA_FW_RESET_FCT) {
+ ha->mr.fw_reset_timer_tick =
+ QLAFX00_MAX_RESET_INTERVAL;
+ }
+ if (ha->mr.old_aenmbx0_state != aenmbx0) {
+ ha->mr.old_aenmbx0_state = aenmbx0;
+ ha->mr.fw_reset_timer_tick = QLAFX00_RESET_INTERVAL;
+ }
+ ha->mr.fw_reset_timer_tick--;
+ }
+ if (test_bit(FX00_CRITEMP_RECOVERY, &vha->dpc_flags)) {
+ /*
+ * Critical temperature recovery to be
+ * performed in timer routine
+ */
+ if (ha->mr.fw_critemp_timer_tick == 0) {
+ tempc = QLAFX00_GET_TEMPERATURE(ha);
+ ql_dbg(ql_dbg_timer, vha, 0x6012,
+ "ISPFx00(%s): Critical temp timer, "
+ "current SOC temperature: %d\n",
+ __func__, tempc);
+ if (tempc < ha->mr.critical_temperature) {
+ set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
+ clear_bit(FX00_CRITEMP_RECOVERY,
+ &vha->dpc_flags);
+ qla2xxx_wake_dpc(vha);
+ }
+ ha->mr.fw_critemp_timer_tick =
+ QLAFX00_CRITEMP_INTERVAL;
+ } else {
+ ha->mr.fw_critemp_timer_tick--;
+ }
+ }
+ if (ha->mr.host_info_resend) {
+ /*
+ * Incomplete host info might be sent to firmware
+ * durinng system boot - info should be resend
+ */
+ if (ha->mr.hinfo_resend_timer_tick == 0) {
+ ha->mr.host_info_resend = false;
+ set_bit(FX00_HOST_INFO_RESEND, &vha->dpc_flags);
+ ha->mr.hinfo_resend_timer_tick =
+ QLAFX00_HINFO_RESEND_INTERVAL;
+ qla2xxx_wake_dpc(vha);
+ } else {
+ ha->mr.hinfo_resend_timer_tick--;
+ }
+ }
+
+}
+
+/*
+ * qlfx00a_reset_initialize
+ * Re-initialize after a iSA device reset.
+ *
+ * Input:
+ * ha = adapter block pointer.
+ *
+ * Returns:
+ * 0 = success
+ */
+int
+qlafx00_reset_initialize(scsi_qla_host_t *vha)
+{
+ struct qla_hw_data *ha = vha->hw;
+
+ if (vha->device_flags & DFLG_DEV_FAILED) {
+ ql_dbg(ql_dbg_init, vha, 0x0142,
+ "Device in failed state\n");
+ return QLA_SUCCESS;
+ }
+
+ ha->flags.mr_reset_hdlr_active = 1;
+
+ if (vha->flags.online) {
+ scsi_block_requests(vha->host);
+ qlafx00_abort_isp_cleanup(vha, false);
+ }
+
+ ql_log(ql_log_info, vha, 0x0143,
+ "(%s): succeeded.\n", __func__);
+ ha->flags.mr_reset_hdlr_active = 0;
+ return QLA_SUCCESS;
+}
+
+/*
+ * qlafx00_abort_isp
+ * Resets ISP and aborts all outstanding commands.
+ *
+ * Input:
+ * ha = adapter block pointer.
+ *
+ * Returns:
+ * 0 = success
+ */
+int
+qlafx00_abort_isp(scsi_qla_host_t *vha)
+{
+ struct qla_hw_data *ha = vha->hw;
+
+ if (vha->flags.online) {
+ if (unlikely(pci_channel_offline(ha->pdev) &&
+ ha->flags.pci_channel_io_perm_failure)) {
+ clear_bit(ISP_ABORT_RETRY, &vha->dpc_flags);
+ return QLA_SUCCESS;
+ }
+
+ scsi_block_requests(vha->host);
+ qlafx00_abort_isp_cleanup(vha, false);
+ } else {
+ scsi_block_requests(vha->host);
+ clear_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
+ vha->qla_stats.total_isp_aborts++;
+ ha->isp_ops->reset_chip(vha);
+ set_bit(FX00_RESET_RECOVERY, &vha->dpc_flags);
+ /* Clear the Interrupts */
+ QLAFX00_CLR_INTR_REG(ha, QLAFX00_HST_INT_STS_BITS);
+ }
+
+ ql_log(ql_log_info, vha, 0x0145,
+ "(%s): succeeded.\n", __func__);
+
+ return QLA_SUCCESS;
+}
+
+static inline fc_port_t*
+qlafx00_get_fcport(struct scsi_qla_host *vha, int tgt_id)
+{
+ fc_port_t *fcport;
+
+ /* Check for matching device in remote port list. */
+ list_for_each_entry(fcport, &vha->vp_fcports, list) {
+ if (fcport->tgt_id == tgt_id) {
+ ql_dbg(ql_dbg_async, vha, 0x5072,
+ "Matching fcport(%p) found with TGT-ID: 0x%x "
+ "and Remote TGT_ID: 0x%x\n",
+ fcport, fcport->tgt_id, tgt_id);
+ return fcport;
+ }
+ }
+ return NULL;
+}
+
+static void
+qlafx00_tgt_detach(struct scsi_qla_host *vha, int tgt_id)
+{
+ fc_port_t *fcport;
+
+ ql_log(ql_log_info, vha, 0x5073,
+ "Detach TGT-ID: 0x%x\n", tgt_id);
+
+ fcport = qlafx00_get_fcport(vha, tgt_id);
+ if (!fcport)
+ return;
+
+ qla2x00_mark_device_lost(vha, fcport, 0, 0);
+
+ return;
+}
+
+int
+qlafx00_process_aen(struct scsi_qla_host *vha, struct qla_work_evt *evt)
+{
+ int rval = 0;
+ uint32_t aen_code, aen_data;
+
+ aen_code = FCH_EVT_VENDOR_UNIQUE;
+ aen_data = evt->u.aenfx.evtcode;
+
+ switch (evt->u.aenfx.evtcode) {
+ case QLAFX00_MBA_PORT_UPDATE: /* Port database update */
+ if (evt->u.aenfx.mbx[1] == 0) {
+ if (evt->u.aenfx.mbx[2] == 1) {
+ if (!vha->flags.fw_tgt_reported)
+ vha->flags.fw_tgt_reported = 1;
+ atomic_set(&vha->loop_down_timer, 0);
+ atomic_set(&vha->loop_state, LOOP_UP);
+ set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags);
+ qla2xxx_wake_dpc(vha);
+ } else if (evt->u.aenfx.mbx[2] == 2) {
+ qlafx00_tgt_detach(vha, evt->u.aenfx.mbx[3]);
+ }
+ } else if (evt->u.aenfx.mbx[1] == 0xffff) {
+ if (evt->u.aenfx.mbx[2] == 1) {
+ if (!vha->flags.fw_tgt_reported)
+ vha->flags.fw_tgt_reported = 1;
+ set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags);
+ } else if (evt->u.aenfx.mbx[2] == 2) {
+ vha->device_flags |= DFLG_NO_CABLE;
+ qla2x00_mark_all_devices_lost(vha, 1);
+ }
+ }
+ break;
+ case QLAFX00_MBA_LINK_UP:
+ aen_code = FCH_EVT_LINKUP;
+ aen_data = 0;
+ break;
+ case QLAFX00_MBA_LINK_DOWN:
+ aen_code = FCH_EVT_LINKDOWN;
+ aen_data = 0;
+ break;
+ case QLAFX00_MBA_TEMP_CRIT: /* Critical temperature event */
+ ql_log(ql_log_info, vha, 0x5082,
+ "Process critical temperature event "
+ "aenmb[0]: %x\n",
+ evt->u.aenfx.evtcode);
+ scsi_block_requests(vha->host);
+ qlafx00_abort_isp_cleanup(vha, true);
+ scsi_unblock_requests(vha->host);
+ break;
+ }
+
+ fc_host_post_event(vha->host, fc_get_event_number(),
+ aen_code, aen_data);
+
+ return rval;
+}
+
+static void
+qlafx00_update_host_attr(scsi_qla_host_t *vha, struct port_info_data *pinfo)
+{
+ u64 port_name = 0, node_name = 0;
+
+ port_name = (unsigned long long)wwn_to_u64(pinfo->port_name);
+ node_name = (unsigned long long)wwn_to_u64(pinfo->node_name);
+
+ fc_host_node_name(vha->host) = node_name;
+ fc_host_port_name(vha->host) = port_name;
+ if (!pinfo->port_type)
+ vha->hw->current_topology = ISP_CFG_F;
+ if (pinfo->link_status == QLAFX00_LINK_STATUS_UP)
+ atomic_set(&vha->loop_state, LOOP_READY);
+ else if (pinfo->link_status == QLAFX00_LINK_STATUS_DOWN)
+ atomic_set(&vha->loop_state, LOOP_DOWN);
+ vha->hw->link_data_rate = (uint16_t)pinfo->link_config;
+}
+
+static void
+qla2x00_fxdisc_iocb_timeout(void *data)
+{
+ srb_t *sp = (srb_t *)data;
+ struct srb_iocb *lio = &sp->u.iocb_cmd;
+
+ complete(&lio->u.fxiocb.fxiocb_comp);
+}
+
+static void
+qla2x00_fxdisc_sp_done(void *data, void *ptr, int res)
+{
+ srb_t *sp = (srb_t *)ptr;
+ struct srb_iocb *lio = &sp->u.iocb_cmd;
+
+ complete(&lio->u.fxiocb.fxiocb_comp);
+}
+
+int
+qlafx00_fx_disc(scsi_qla_host_t *vha, fc_port_t *fcport, uint16_t fx_type)
+{
+ srb_t *sp;
+ struct srb_iocb *fdisc;
+ int rval = QLA_FUNCTION_FAILED;
+ struct qla_hw_data *ha = vha->hw;
+ struct host_system_info *phost_info;
+ struct register_host_info *preg_hsi;
+ struct new_utsname *p_sysid = NULL;
+ struct timeval tv;
+
+ sp = qla2x00_get_sp(vha, fcport, GFP_KERNEL);
+ if (!sp)
+ goto done;
+
+ fdisc = &sp->u.iocb_cmd;
+ switch (fx_type) {
+ case FXDISC_GET_CONFIG_INFO:
+ fdisc->u.fxiocb.flags =
+ SRB_FXDISC_RESP_DMA_VALID;
+ fdisc->u.fxiocb.rsp_len = sizeof(struct config_info_data);
+ break;
+ case FXDISC_GET_PORT_INFO:
+ fdisc->u.fxiocb.flags =
+ SRB_FXDISC_RESP_DMA_VALID | SRB_FXDISC_REQ_DWRD_VALID;
+ fdisc->u.fxiocb.rsp_len = QLAFX00_PORT_DATA_INFO;
+ fdisc->u.fxiocb.req_data = cpu_to_le32(fcport->port_id);
+ break;
+ case FXDISC_GET_TGT_NODE_INFO:
+ fdisc->u.fxiocb.flags =
+ SRB_FXDISC_RESP_DMA_VALID | SRB_FXDISC_REQ_DWRD_VALID;
+ fdisc->u.fxiocb.rsp_len = QLAFX00_TGT_NODE_INFO;
+ fdisc->u.fxiocb.req_data = cpu_to_le32(fcport->tgt_id);
+ break;
+ case FXDISC_GET_TGT_NODE_LIST:
+ fdisc->u.fxiocb.flags =
+ SRB_FXDISC_RESP_DMA_VALID | SRB_FXDISC_REQ_DWRD_VALID;
+ fdisc->u.fxiocb.rsp_len = QLAFX00_TGT_NODE_LIST_SIZE;
+ break;
+ case FXDISC_REG_HOST_INFO:
+ fdisc->u.fxiocb.flags = SRB_FXDISC_REQ_DMA_VALID;
+ fdisc->u.fxiocb.req_len = sizeof(struct register_host_info);
+ p_sysid = utsname();
+ if (!p_sysid) {
+ ql_log(ql_log_warn, vha, 0x303c,
+ "Not able to get the system information\n");
+ goto done_free_sp;
+ }
+ break;
+ case FXDISC_ABORT_IOCTL:
+ default:
+ break;
+ }
+
+ if (fdisc->u.fxiocb.flags & SRB_FXDISC_REQ_DMA_VALID) {
+ fdisc->u.fxiocb.req_addr = dma_alloc_coherent(&ha->pdev->dev,
+ fdisc->u.fxiocb.req_len,
+ &fdisc->u.fxiocb.req_dma_handle, GFP_KERNEL);
+ if (!fdisc->u.fxiocb.req_addr)
+ goto done_free_sp;
+
+ if (fx_type == FXDISC_REG_HOST_INFO) {
+ preg_hsi = (struct register_host_info *)
+ fdisc->u.fxiocb.req_addr;
+ phost_info = &preg_hsi->hsi;
+ memset(preg_hsi, 0, sizeof(struct register_host_info));
+ phost_info->os_type = OS_TYPE_LINUX;
+ strncpy(phost_info->sysname,
+ p_sysid->sysname, SYSNAME_LENGTH);
+ strncpy(phost_info->nodename,
+ p_sysid->nodename, NODENAME_LENGTH);
+ if (!strcmp(phost_info->nodename, "(none)"))
+ ha->mr.host_info_resend = true;
+ strncpy(phost_info->release,
+ p_sysid->release, RELEASE_LENGTH);
+ strncpy(phost_info->version,
+ p_sysid->version, VERSION_LENGTH);
+ strncpy(phost_info->machine,
+ p_sysid->machine, MACHINE_LENGTH);
+ strncpy(phost_info->domainname,
+ p_sysid->domainname, DOMNAME_LENGTH);
+ strncpy(phost_info->hostdriver,
+ QLA2XXX_VERSION, VERSION_LENGTH);
+ do_gettimeofday(&tv);
+ preg_hsi->utc = (uint64_t)tv.tv_sec;
+ ql_dbg(ql_dbg_init, vha, 0x0149,
+ "ISP%04X: Host registration with firmware\n",
+ ha->pdev->device);
+ ql_dbg(ql_dbg_init, vha, 0x014a,
+ "os_type = '%d', sysname = '%s', nodname = '%s'\n",
+ phost_info->os_type,
+ phost_info->sysname,
+ phost_info->nodename);
+ ql_dbg(ql_dbg_init, vha, 0x014b,
+ "release = '%s', version = '%s'\n",
+ phost_info->release,
+ phost_info->version);
+ ql_dbg(ql_dbg_init, vha, 0x014c,
+ "machine = '%s' "
+ "domainname = '%s', hostdriver = '%s'\n",
+ phost_info->machine,
+ phost_info->domainname,
+ phost_info->hostdriver);
+ ql_dump_buffer(ql_dbg_init + ql_dbg_disc, vha, 0x014d,
+ (uint8_t *)phost_info,
+ sizeof(struct host_system_info));
+ }
+ }
+
+ if (fdisc->u.fxiocb.flags & SRB_FXDISC_RESP_DMA_VALID) {
+ fdisc->u.fxiocb.rsp_addr = dma_alloc_coherent(&ha->pdev->dev,
+ fdisc->u.fxiocb.rsp_len,
+ &fdisc->u.fxiocb.rsp_dma_handle, GFP_KERNEL);
+ if (!fdisc->u.fxiocb.rsp_addr)
+ goto done_unmap_req;
+ }
+
+ sp->type = SRB_FXIOCB_DCMD;
+ sp->name = "fxdisc";
+ qla2x00_init_timer(sp, FXDISC_TIMEOUT);
+ fdisc->timeout = qla2x00_fxdisc_iocb_timeout;
+ fdisc->u.fxiocb.req_func_type = cpu_to_le16(fx_type);
+ sp->done = qla2x00_fxdisc_sp_done;
+
+ rval = qla2x00_start_sp(sp);
+ if (rval != QLA_SUCCESS)
+ goto done_unmap_dma;
+
+ wait_for_completion(&fdisc->u.fxiocb.fxiocb_comp);
+
+ if (fx_type == FXDISC_GET_CONFIG_INFO) {
+ struct config_info_data *pinfo =
+ (struct config_info_data *) fdisc->u.fxiocb.rsp_addr;
+ strcpy(vha->hw->model_number, pinfo->model_num);
+ strcpy(vha->hw->model_desc, pinfo->model_description);
+ memcpy(&vha->hw->mr.symbolic_name, pinfo->symbolic_name,
+ sizeof(vha->hw->mr.symbolic_name));
+ memcpy(&vha->hw->mr.serial_num, pinfo->serial_num,
+ sizeof(vha->hw->mr.serial_num));
+ memcpy(&vha->hw->mr.hw_version, pinfo->hw_version,
+ sizeof(vha->hw->mr.hw_version));
+ memcpy(&vha->hw->mr.fw_version, pinfo->fw_version,
+ sizeof(vha->hw->mr.fw_version));
+ strim(vha->hw->mr.fw_version);
+ memcpy(&vha->hw->mr.uboot_version, pinfo->uboot_version,
+ sizeof(vha->hw->mr.uboot_version));
+ memcpy(&vha->hw->mr.fru_serial_num, pinfo->fru_serial_num,
+ sizeof(vha->hw->mr.fru_serial_num));
+ vha->hw->mr.critical_temperature =
+ (pinfo->nominal_temp_value) ?
+ pinfo->nominal_temp_value : QLAFX00_CRITEMP_THRSHLD;
+ ha->mr.extended_io_enabled = (pinfo->enabled_capabilities &
+ QLAFX00_EXTENDED_IO_EN_MASK) != 0;
+ } else if (fx_type == FXDISC_GET_PORT_INFO) {
+ struct port_info_data *pinfo =
+ (struct port_info_data *) fdisc->u.fxiocb.rsp_addr;
+ memcpy(vha->node_name, pinfo->node_name, WWN_SIZE);
+ memcpy(vha->port_name, pinfo->port_name, WWN_SIZE);
+ vha->d_id.b.domain = pinfo->port_id[0];
+ vha->d_id.b.area = pinfo->port_id[1];
+ vha->d_id.b.al_pa = pinfo->port_id[2];
+ qlafx00_update_host_attr(vha, pinfo);
+ ql_dump_buffer(ql_dbg_init + ql_dbg_buffer, vha, 0x0141,
+ (uint8_t *)pinfo, 16);
+ } else if (fx_type == FXDISC_GET_TGT_NODE_INFO) {
+ struct qlafx00_tgt_node_info *pinfo =
+ (struct qlafx00_tgt_node_info *) fdisc->u.fxiocb.rsp_addr;
+ memcpy(fcport->node_name, pinfo->tgt_node_wwnn, WWN_SIZE);
+ memcpy(fcport->port_name, pinfo->tgt_node_wwpn, WWN_SIZE);
+ fcport->port_type = FCT_TARGET;
+ ql_dump_buffer(ql_dbg_init + ql_dbg_buffer, vha, 0x0144,
+ (uint8_t *)pinfo, 16);
+ } else if (fx_type == FXDISC_GET_TGT_NODE_LIST) {
+ struct qlafx00_tgt_node_info *pinfo =
+ (struct qlafx00_tgt_node_info *) fdisc->u.fxiocb.rsp_addr;
+ ql_dump_buffer(ql_dbg_init + ql_dbg_buffer, vha, 0x0146,
+ (uint8_t *)pinfo, 16);
+ memcpy(vha->hw->gid_list, pinfo, QLAFX00_TGT_NODE_LIST_SIZE);
+ } else if (fx_type == FXDISC_ABORT_IOCTL)
+ fdisc->u.fxiocb.result =
+ (fdisc->u.fxiocb.result ==
+ cpu_to_le32(QLAFX00_IOCTL_ICOB_ABORT_SUCCESS)) ?
+ cpu_to_le32(QLA_SUCCESS) : cpu_to_le32(QLA_FUNCTION_FAILED);
+
+ rval = le32_to_cpu(fdisc->u.fxiocb.result);
+
+done_unmap_dma:
+ if (fdisc->u.fxiocb.rsp_addr)
+ dma_free_coherent(&ha->pdev->dev, fdisc->u.fxiocb.rsp_len,
+ fdisc->u.fxiocb.rsp_addr, fdisc->u.fxiocb.rsp_dma_handle);
+
+done_unmap_req:
+ if (fdisc->u.fxiocb.req_addr)
+ dma_free_coherent(&ha->pdev->dev, fdisc->u.fxiocb.req_len,
+ fdisc->u.fxiocb.req_addr, fdisc->u.fxiocb.req_dma_handle);
+done_free_sp:
+ sp->free(vha, sp);
+done:
+ return rval;
+}
+
+/*
+ * qlafx00_initialize_adapter
+ * Initialize board.
+ *
+ * Input:
+ * ha = adapter block pointer.
+ *
+ * Returns:
+ * 0 = success
+ */
+int
+qlafx00_initialize_adapter(scsi_qla_host_t *vha)
+{
+ int rval;
+ struct qla_hw_data *ha = vha->hw;
+ uint32_t tempc;
+
+ /* Clear adapter flags. */
+ vha->flags.online = 0;
+ ha->flags.chip_reset_done = 0;
+ vha->flags.reset_active = 0;
+ ha->flags.pci_channel_io_perm_failure = 0;
+ ha->flags.eeh_busy = 0;
+ atomic_set(&vha->loop_down_timer, LOOP_DOWN_TIME);
+ atomic_set(&vha->loop_state, LOOP_DOWN);
+ vha->device_flags = DFLG_NO_CABLE;
+ vha->dpc_flags = 0;
+ vha->flags.management_server_logged_in = 0;
+ ha->isp_abort_cnt = 0;
+ ha->beacon_blink_led = 0;
+
+ set_bit(0, ha->req_qid_map);
+ set_bit(0, ha->rsp_qid_map);
+
+ ql_dbg(ql_dbg_init, vha, 0x0147,
+ "Configuring PCI space...\n");
+
+ rval = ha->isp_ops->pci_config(vha);
+ if (rval) {
+ ql_log(ql_log_warn, vha, 0x0148,
+ "Unable to configure PCI space.\n");
+ return rval;
+ }
+
+ rval = qlafx00_init_fw_ready(vha);
+ if (rval != QLA_SUCCESS)
+ return rval;
+
+ qlafx00_save_queue_ptrs(vha);
+
+ rval = qlafx00_config_queues(vha);
+ if (rval != QLA_SUCCESS)
+ return rval;
+
+ /*
+ * Allocate the array of outstanding commands
+ * now that we know the firmware resources.
+ */
+ rval = qla2x00_alloc_outstanding_cmds(ha, vha->req);
+ if (rval != QLA_SUCCESS)
+ return rval;
+
+ rval = qla2x00_init_rings(vha);
+ ha->flags.chip_reset_done = 1;
+
+ tempc = QLAFX00_GET_TEMPERATURE(ha);
+ ql_dbg(ql_dbg_init, vha, 0x0152,
+ "ISPFx00(%s): Critical temp timer, current SOC temperature: 0x%x\n",
+ __func__, tempc);
+
+ return rval;
+}
+
+uint32_t
+qlafx00_fw_state_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
+ int rval = QLA_FUNCTION_FAILED;
+ uint32_t state[1];
+
+ if (qla2x00_reset_active(vha))
+ ql_log(ql_log_warn, vha, 0x70ce,
+ "ISP reset active.\n");
+ else if (!vha->hw->flags.eeh_busy) {
+ rval = qlafx00_get_firmware_state(vha, state);
+ }
+ if (rval != QLA_SUCCESS)
+ memset(state, -1, sizeof(state));
+
+ return state[0];
+}
+
+void
+qlafx00_get_host_speed(struct Scsi_Host *shost)
+{
+ struct qla_hw_data *ha = ((struct scsi_qla_host *)
+ (shost_priv(shost)))->hw;
+ u32 speed = FC_PORTSPEED_UNKNOWN;
+
+ switch (ha->link_data_rate) {
+ case QLAFX00_PORT_SPEED_2G:
+ speed = FC_PORTSPEED_2GBIT;
+ break;
+ case QLAFX00_PORT_SPEED_4G:
+ speed = FC_PORTSPEED_4GBIT;
+ break;
+ case QLAFX00_PORT_SPEED_8G:
+ speed = FC_PORTSPEED_8GBIT;
+ break;
+ case QLAFX00_PORT_SPEED_10G:
+ speed = FC_PORTSPEED_10GBIT;
+ break;
+ }
+ fc_host_speed(shost) = speed;
+}
+
+/** QLAFX00 specific ISR implementation functions */
+
+static inline void
+qlafx00_handle_sense(srb_t *sp, uint8_t *sense_data, uint32_t par_sense_len,
+ uint32_t sense_len, struct rsp_que *rsp, int res)
+{
+ struct scsi_qla_host *vha = sp->fcport->vha;
+ struct scsi_cmnd *cp = GET_CMD_SP(sp);
+ uint32_t track_sense_len;
+
+ SET_FW_SENSE_LEN(sp, sense_len);
+
+ if (sense_len >= SCSI_SENSE_BUFFERSIZE)
+ sense_len = SCSI_SENSE_BUFFERSIZE;
+
+ SET_CMD_SENSE_LEN(sp, sense_len);
+ SET_CMD_SENSE_PTR(sp, cp->sense_buffer);
+ track_sense_len = sense_len;
+
+ if (sense_len > par_sense_len)
+ sense_len = par_sense_len;
+
+ memcpy(cp->sense_buffer, sense_data, sense_len);
+
+ SET_FW_SENSE_LEN(sp, GET_FW_SENSE_LEN(sp) - sense_len);
+
+ SET_CMD_SENSE_PTR(sp, cp->sense_buffer + sense_len);
+ track_sense_len -= sense_len;
+ SET_CMD_SENSE_LEN(sp, track_sense_len);
+
+ ql_dbg(ql_dbg_io, vha, 0x304d,
+ "sense_len=0x%x par_sense_len=0x%x track_sense_len=0x%x.\n",
+ sense_len, par_sense_len, track_sense_len);
+ if (GET_FW_SENSE_LEN(sp) > 0) {
+ rsp->status_srb = sp;
+ cp->result = res;
+ }
+
+ if (sense_len) {
+ ql_dbg(ql_dbg_io + ql_dbg_buffer, vha, 0x3039,
+ "Check condition Sense data, nexus%ld:%d:%llu cmd=%p.\n",
+ sp->fcport->vha->host_no, cp->device->id, cp->device->lun,
+ cp);
+ ql_dump_buffer(ql_dbg_io + ql_dbg_buffer, vha, 0x3049,
+ cp->sense_buffer, sense_len);
+ }
+}
+
+static void
+qlafx00_tm_iocb_entry(scsi_qla_host_t *vha, struct req_que *req,
+ struct tsk_mgmt_entry_fx00 *pkt, srb_t *sp,
+ __le16 sstatus, __le16 cpstatus)
+{
+ struct srb_iocb *tmf;
+
+ tmf = &sp->u.iocb_cmd;
+ if (cpstatus != cpu_to_le16((uint16_t)CS_COMPLETE) ||
+ (sstatus & cpu_to_le16((uint16_t)SS_RESPONSE_INFO_LEN_VALID)))
+ cpstatus = cpu_to_le16((uint16_t)CS_INCOMPLETE);
+ tmf->u.tmf.comp_status = cpstatus;
+ sp->done(vha, sp, 0);
+}
+
+static void
+qlafx00_abort_iocb_entry(scsi_qla_host_t *vha, struct req_que *req,
+ struct abort_iocb_entry_fx00 *pkt)
+{
+ const char func[] = "ABT_IOCB";
+ srb_t *sp;
+ struct srb_iocb *abt;
+
+ sp = qla2x00_get_sp_from_handle(vha, func, req, pkt);
+ if (!sp)
+ return;
+
+ abt = &sp->u.iocb_cmd;
+ abt->u.abt.comp_status = pkt->tgt_id_sts;
+ sp->done(vha, sp, 0);
+}
+
+static void
+qlafx00_ioctl_iosb_entry(scsi_qla_host_t *vha, struct req_que *req,
+ struct ioctl_iocb_entry_fx00 *pkt)
+{
+ const char func[] = "IOSB_IOCB";
+ srb_t *sp;
+ struct fc_bsg_job *bsg_job;
+ struct srb_iocb *iocb_job;
+ int res;
+ struct qla_mt_iocb_rsp_fx00 fstatus;
+ uint8_t *fw_sts_ptr;
+
+ sp = qla2x00_get_sp_from_handle(vha, func, req, pkt);
+ if (!sp)
+ return;
+
+ if (sp->type == SRB_FXIOCB_DCMD) {
+ iocb_job = &sp->u.iocb_cmd;
+ iocb_job->u.fxiocb.seq_number = pkt->seq_no;
+ iocb_job->u.fxiocb.fw_flags = pkt->fw_iotcl_flags;
+ iocb_job->u.fxiocb.result = pkt->status;
+ if (iocb_job->u.fxiocb.flags & SRB_FXDISC_RSP_DWRD_VALID)
+ iocb_job->u.fxiocb.req_data =
+ pkt->dataword_r;
+ } else {
+ bsg_job = sp->u.bsg_job;
+
+ memset(&fstatus, 0, sizeof(struct qla_mt_iocb_rsp_fx00));
+
+ fstatus.reserved_1 = pkt->reserved_0;
+ fstatus.func_type = pkt->comp_func_num;
+ fstatus.ioctl_flags = pkt->fw_iotcl_flags;
+ fstatus.ioctl_data = pkt->dataword_r;
+ fstatus.adapid = pkt->adapid;
+ fstatus.reserved_2 = pkt->dataword_r_extra;
+ fstatus.res_count = pkt->residuallen;
+ fstatus.status = pkt->status;
+ fstatus.seq_number = pkt->seq_no;
+ memcpy(fstatus.reserved_3,
+ pkt->reserved_2, 20 * sizeof(uint8_t));
+
+ fw_sts_ptr = ((uint8_t *)bsg_job->req->sense) +
+ sizeof(struct fc_bsg_reply);
+
+ memcpy(fw_sts_ptr, (uint8_t *)&fstatus,
+ sizeof(struct qla_mt_iocb_rsp_fx00));
+ bsg_job->reply_len = sizeof(struct fc_bsg_reply) +
+ sizeof(struct qla_mt_iocb_rsp_fx00) + sizeof(uint8_t);
+
+ ql_dump_buffer(ql_dbg_user + ql_dbg_verbose,
+ sp->fcport->vha, 0x5080,
+ (uint8_t *)pkt, sizeof(struct ioctl_iocb_entry_fx00));
+
+ ql_dump_buffer(ql_dbg_user + ql_dbg_verbose,
+ sp->fcport->vha, 0x5074,
+ (uint8_t *)fw_sts_ptr, sizeof(struct qla_mt_iocb_rsp_fx00));
+
+ res = bsg_job->reply->result = DID_OK << 16;
+ bsg_job->reply->reply_payload_rcv_len =
+ bsg_job->reply_payload.payload_len;
+ }
+ sp->done(vha, sp, res);
+}
+
+/**
+ * qlafx00_status_entry() - Process a Status IOCB entry.
+ * @ha: SCSI driver HA context
+ * @pkt: Entry pointer
+ */
+static void
+qlafx00_status_entry(scsi_qla_host_t *vha, struct rsp_que *rsp, void *pkt)
+{
+ srb_t *sp;
+ fc_port_t *fcport;
+ struct scsi_cmnd *cp;
+ struct sts_entry_fx00 *sts;
+ __le16 comp_status;
+ __le16 scsi_status;
+ uint16_t ox_id;
+ __le16 lscsi_status;
+ int32_t resid;
+ uint32_t sense_len, par_sense_len, rsp_info_len, resid_len,
+ fw_resid_len;
+ uint8_t *rsp_info = NULL, *sense_data = NULL;
+ struct qla_hw_data *ha = vha->hw;
+ uint32_t hindex, handle;
+ uint16_t que;
+ struct req_que *req;
+ int logit = 1;
+ int res = 0;
+
+ sts = (struct sts_entry_fx00 *) pkt;
+
+ comp_status = sts->comp_status;
+ scsi_status = sts->scsi_status & cpu_to_le16((uint16_t)SS_MASK);
+ hindex = sts->handle;
+ handle = LSW(hindex);
+
+ que = MSW(hindex);
+ req = ha->req_q_map[que];
+
+ /* Validate handle. */
+ if (handle < req->num_outstanding_cmds)
+ sp = req->outstanding_cmds[handle];
+ else
+ sp = NULL;
+
+ if (sp == NULL) {
+ ql_dbg(ql_dbg_io, vha, 0x3034,
+ "Invalid status handle (0x%x).\n", handle);
+
+ set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
+ qla2xxx_wake_dpc(vha);
+ return;
+ }
+
+ if (sp->type == SRB_TM_CMD) {
+ req->outstanding_cmds[handle] = NULL;
+ qlafx00_tm_iocb_entry(vha, req, pkt, sp,
+ scsi_status, comp_status);
+ return;
+ }
+
+ /* Fast path completion. */
+ if (comp_status == CS_COMPLETE && scsi_status == 0) {
+ qla2x00_process_completed_request(vha, req, handle);
+ return;
+ }
+
+ req->outstanding_cmds[handle] = NULL;
+ cp = GET_CMD_SP(sp);
+ if (cp == NULL) {
+ ql_dbg(ql_dbg_io, vha, 0x3048,
+ "Command already returned (0x%x/%p).\n",
+ handle, sp);
+
+ return;
+ }
+
+ lscsi_status = scsi_status & cpu_to_le16((uint16_t)STATUS_MASK);
+
+ fcport = sp->fcport;
+
+ ox_id = 0;
+ sense_len = par_sense_len = rsp_info_len = resid_len =
+ fw_resid_len = 0;
+ if (scsi_status & cpu_to_le16((uint16_t)SS_SENSE_LEN_VALID))
+ sense_len = sts->sense_len;
+ if (scsi_status & cpu_to_le16(((uint16_t)SS_RESIDUAL_UNDER
+ | (uint16_t)SS_RESIDUAL_OVER)))
+ resid_len = le32_to_cpu(sts->residual_len);
+ if (comp_status == cpu_to_le16((uint16_t)CS_DATA_UNDERRUN))
+ fw_resid_len = le32_to_cpu(sts->residual_len);
+ rsp_info = sense_data = sts->data;
+ par_sense_len = sizeof(sts->data);
+
+ /* Check for overrun. */
+ if (comp_status == CS_COMPLETE &&
+ scsi_status & cpu_to_le16((uint16_t)SS_RESIDUAL_OVER))
+ comp_status = cpu_to_le16((uint16_t)CS_DATA_OVERRUN);
+
+ /*
+ * Based on Host and scsi status generate status code for Linux
+ */
+ switch (le16_to_cpu(comp_status)) {
+ case CS_COMPLETE:
+ case CS_QUEUE_FULL:
+ if (scsi_status == 0) {
+ res = DID_OK << 16;
+ break;
+ }
+ if (scsi_status & cpu_to_le16(((uint16_t)SS_RESIDUAL_UNDER
+ | (uint16_t)SS_RESIDUAL_OVER))) {
+ resid = resid_len;
+ scsi_set_resid(cp, resid);
+
+ if (!lscsi_status &&
+ ((unsigned)(scsi_bufflen(cp) - resid) <
+ cp->underflow)) {
+ ql_dbg(ql_dbg_io, fcport->vha, 0x3050,
+ "Mid-layer underflow "
+ "detected (0x%x of 0x%x bytes).\n",
+ resid, scsi_bufflen(cp));
+
+ res = DID_ERROR << 16;
+ break;
+ }
+ }
+ res = DID_OK << 16 | le16_to_cpu(lscsi_status);
+
+ if (lscsi_status ==
+ cpu_to_le16((uint16_t)SAM_STAT_TASK_SET_FULL)) {
+ ql_dbg(ql_dbg_io, fcport->vha, 0x3051,
+ "QUEUE FULL detected.\n");
+ break;
+ }
+ logit = 0;
+ if (lscsi_status != cpu_to_le16((uint16_t)SS_CHECK_CONDITION))
+ break;
+
+ memset(cp->sense_buffer, 0, SCSI_SENSE_BUFFERSIZE);
+ if (!(scsi_status & cpu_to_le16((uint16_t)SS_SENSE_LEN_VALID)))
+ break;
+
+ qlafx00_handle_sense(sp, sense_data, par_sense_len, sense_len,
+ rsp, res);
+ break;
+
+ case CS_DATA_UNDERRUN:
+ /* Use F/W calculated residual length. */
+ if (IS_FWI2_CAPABLE(ha) || IS_QLAFX00(ha))
+ resid = fw_resid_len;
+ else
+ resid = resid_len;
+ scsi_set_resid(cp, resid);
+ if (scsi_status & cpu_to_le16((uint16_t)SS_RESIDUAL_UNDER)) {
+ if ((IS_FWI2_CAPABLE(ha) || IS_QLAFX00(ha))
+ && fw_resid_len != resid_len) {
+ ql_dbg(ql_dbg_io, fcport->vha, 0x3052,
+ "Dropped frame(s) detected "
+ "(0x%x of 0x%x bytes).\n",
+ resid, scsi_bufflen(cp));
+
+ res = DID_ERROR << 16 |
+ le16_to_cpu(lscsi_status);
+ goto check_scsi_status;
+ }
+
+ if (!lscsi_status &&
+ ((unsigned)(scsi_bufflen(cp) - resid) <
+ cp->underflow)) {
+ ql_dbg(ql_dbg_io, fcport->vha, 0x3053,
+ "Mid-layer underflow "
+ "detected (0x%x of 0x%x bytes, "
+ "cp->underflow: 0x%x).\n",
+ resid, scsi_bufflen(cp), cp->underflow);
+
+ res = DID_ERROR << 16;
+ break;
+ }
+ } else if (lscsi_status !=
+ cpu_to_le16((uint16_t)SAM_STAT_TASK_SET_FULL) &&
+ lscsi_status != cpu_to_le16((uint16_t)SAM_STAT_BUSY)) {
+ /*
+ * scsi status of task set and busy are considered
+ * to be task not completed.
+ */
+
+ ql_dbg(ql_dbg_io, fcport->vha, 0x3054,
+ "Dropped frame(s) detected (0x%x "
+ "of 0x%x bytes).\n", resid,
+ scsi_bufflen(cp));
+
+ res = DID_ERROR << 16 | le16_to_cpu(lscsi_status);
+ goto check_scsi_status;
+ } else {
+ ql_dbg(ql_dbg_io, fcport->vha, 0x3055,
+ "scsi_status: 0x%x, lscsi_status: 0x%x\n",
+ scsi_status, lscsi_status);
+ }
+
+ res = DID_OK << 16 | le16_to_cpu(lscsi_status);
+ logit = 0;
+
+check_scsi_status:
+ /*
+ * Check to see if SCSI Status is non zero. If so report SCSI
+ * Status.
+ */
+ if (lscsi_status != 0) {
+ if (lscsi_status ==
+ cpu_to_le16((uint16_t)SAM_STAT_TASK_SET_FULL)) {
+ ql_dbg(ql_dbg_io, fcport->vha, 0x3056,
+ "QUEUE FULL detected.\n");
+ logit = 1;
+ break;
+ }
+ if (lscsi_status !=
+ cpu_to_le16((uint16_t)SS_CHECK_CONDITION))
+ break;
+
+ memset(cp->sense_buffer, 0, SCSI_SENSE_BUFFERSIZE);
+ if (!(scsi_status &
+ cpu_to_le16((uint16_t)SS_SENSE_LEN_VALID)))
+ break;
+
+ qlafx00_handle_sense(sp, sense_data, par_sense_len,
+ sense_len, rsp, res);
+ }
+ break;
+
+ case CS_PORT_LOGGED_OUT:
+ case CS_PORT_CONFIG_CHG:
+ case CS_PORT_BUSY:
+ case CS_INCOMPLETE:
+ case CS_PORT_UNAVAILABLE:
+ case CS_TIMEOUT:
+ case CS_RESET:
+
+ /*
+ * We are going to have the fc class block the rport
+ * while we try to recover so instruct the mid layer
+ * to requeue until the class decides how to handle this.
+ */
+ res = DID_TRANSPORT_DISRUPTED << 16;
+
+ ql_dbg(ql_dbg_io, fcport->vha, 0x3057,
+ "Port down status: port-state=0x%x.\n",
+ atomic_read(&fcport->state));
+
+ if (atomic_read(&fcport->state) == FCS_ONLINE)
+ qla2x00_mark_device_lost(fcport->vha, fcport, 1, 1);
+ break;
+
+ case CS_ABORTED:
+ res = DID_RESET << 16;
+ break;
+
+ default:
+ res = DID_ERROR << 16;
+ break;
+ }
+
+ if (logit)
+ ql_dbg(ql_dbg_io, fcport->vha, 0x3058,
+ "FCP command status: 0x%x-0x%x (0x%x) nexus=%ld:%d:%llu "
+ "tgt_id: 0x%x lscsi_status: 0x%x cdb=%10phN len=0x%x "
+ "rsp_info=0x%x resid=0x%x fw_resid=0x%x sense_len=0x%x, "
+ "par_sense_len=0x%x, rsp_info_len=0x%x\n",
+ comp_status, scsi_status, res, vha->host_no,
+ cp->device->id, cp->device->lun, fcport->tgt_id,
+ lscsi_status, cp->cmnd, scsi_bufflen(cp),
+ rsp_info_len, resid_len, fw_resid_len, sense_len,
+ par_sense_len, rsp_info_len);
+
+ if (rsp->status_srb == NULL)
+ sp->done(ha, sp, res);
+}
+
+/**
+ * qlafx00_status_cont_entry() - Process a Status Continuations entry.
+ * @ha: SCSI driver HA context
+ * @pkt: Entry pointer
+ *
+ * Extended sense data.
+ */
+static void
+qlafx00_status_cont_entry(struct rsp_que *rsp, sts_cont_entry_t *pkt)
+{
+ uint8_t sense_sz = 0;
+ struct qla_hw_data *ha = rsp->hw;
+ struct scsi_qla_host *vha = pci_get_drvdata(ha->pdev);
+ srb_t *sp = rsp->status_srb;
+ struct scsi_cmnd *cp;
+ uint32_t sense_len;
+ uint8_t *sense_ptr;
+
+ if (!sp) {
+ ql_dbg(ql_dbg_io, vha, 0x3037,
+ "no SP, sp = %p\n", sp);
+ return;
+ }
+
+ if (!GET_FW_SENSE_LEN(sp)) {
+ ql_dbg(ql_dbg_io, vha, 0x304b,
+ "no fw sense data, sp = %p\n", sp);
+ return;
+ }
+ cp = GET_CMD_SP(sp);
+ if (cp == NULL) {
+ ql_log(ql_log_warn, vha, 0x303b,
+ "cmd is NULL: already returned to OS (sp=%p).\n", sp);
+
+ rsp->status_srb = NULL;
+ return;
+ }
+
+ if (!GET_CMD_SENSE_LEN(sp)) {
+ ql_dbg(ql_dbg_io, vha, 0x304c,
+ "no sense data, sp = %p\n", sp);
+ } else {
+ sense_len = GET_CMD_SENSE_LEN(sp);
+ sense_ptr = GET_CMD_SENSE_PTR(sp);
+ ql_dbg(ql_dbg_io, vha, 0x304f,
+ "sp=%p sense_len=0x%x sense_ptr=%p.\n",
+ sp, sense_len, sense_ptr);
+
+ if (sense_len > sizeof(pkt->data))
+ sense_sz = sizeof(pkt->data);
+ else
+ sense_sz = sense_len;
+
+ /* Move sense data. */
+ ql_dump_buffer(ql_dbg_io + ql_dbg_buffer, vha, 0x304e,
+ (uint8_t *)pkt, sizeof(sts_cont_entry_t));
+ memcpy(sense_ptr, pkt->data, sense_sz);
+ ql_dump_buffer(ql_dbg_io + ql_dbg_buffer, vha, 0x304a,
+ sense_ptr, sense_sz);
+
+ sense_len -= sense_sz;
+ sense_ptr += sense_sz;
+
+ SET_CMD_SENSE_PTR(sp, sense_ptr);
+ SET_CMD_SENSE_LEN(sp, sense_len);
+ }
+ sense_len = GET_FW_SENSE_LEN(sp);
+ sense_len = (sense_len > sizeof(pkt->data)) ?
+ (sense_len - sizeof(pkt->data)) : 0;
+ SET_FW_SENSE_LEN(sp, sense_len);
+
+ /* Place command on done queue. */
+ if (sense_len == 0) {
+ rsp->status_srb = NULL;
+ sp->done(ha, sp, cp->result);
+ }
+}
+
+/**
+ * qlafx00_multistatus_entry() - Process Multi response queue entries.
+ * @ha: SCSI driver HA context
+ */
+static void
+qlafx00_multistatus_entry(struct scsi_qla_host *vha,
+ struct rsp_que *rsp, void *pkt)
+{
+ srb_t *sp;
+ struct multi_sts_entry_fx00 *stsmfx;
+ struct qla_hw_data *ha = vha->hw;
+ uint32_t handle, hindex, handle_count, i;
+ uint16_t que;
+ struct req_que *req;
+ __le32 *handle_ptr;
+
+ stsmfx = (struct multi_sts_entry_fx00 *) pkt;
+
+ handle_count = stsmfx->handle_count;
+
+ if (handle_count > MAX_HANDLE_COUNT) {
+ ql_dbg(ql_dbg_io, vha, 0x3035,
+ "Invalid handle count (0x%x).\n", handle_count);
+ set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
+ qla2xxx_wake_dpc(vha);
+ return;
+ }
+
+ handle_ptr = &stsmfx->handles[0];
+
+ for (i = 0; i < handle_count; i++) {
+ hindex = le32_to_cpu(*handle_ptr);
+ handle = LSW(hindex);
+ que = MSW(hindex);
+ req = ha->req_q_map[que];
+
+ /* Validate handle. */
+ if (handle < req->num_outstanding_cmds)
+ sp = req->outstanding_cmds[handle];
+ else
+ sp = NULL;
+
+ if (sp == NULL) {
+ ql_dbg(ql_dbg_io, vha, 0x3044,
+ "Invalid status handle (0x%x).\n", handle);
+ set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
+ qla2xxx_wake_dpc(vha);
+ return;
+ }
+ qla2x00_process_completed_request(vha, req, handle);
+ handle_ptr++;
+ }
+}
+
+/**
+ * qlafx00_error_entry() - Process an error entry.
+ * @ha: SCSI driver HA context
+ * @pkt: Entry pointer
+ */
+static void
+qlafx00_error_entry(scsi_qla_host_t *vha, struct rsp_que *rsp,
+ struct sts_entry_fx00 *pkt, uint8_t estatus, uint8_t etype)
+{
+ srb_t *sp;
+ struct qla_hw_data *ha = vha->hw;
+ const char func[] = "ERROR-IOCB";
+ uint16_t que = 0;
+ struct req_que *req = NULL;
+ int res = DID_ERROR << 16;
+
+ ql_dbg(ql_dbg_async, vha, 0x507f,
+ "type of error status in response: 0x%x\n", estatus);
+
+ req = ha->req_q_map[que];
+
+ sp = qla2x00_get_sp_from_handle(vha, func, req, pkt);
+ if (sp) {
+ sp->done(ha, sp, res);
+ return;
+ }
+
+ set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
+ qla2xxx_wake_dpc(vha);
+}
+
+/**
+ * qlafx00_process_response_queue() - Process response queue entries.
+ * @ha: SCSI driver HA context
+ */
+static void
+qlafx00_process_response_queue(struct scsi_qla_host *vha,
+ struct rsp_que *rsp)
+{
+ struct sts_entry_fx00 *pkt;
+ response_t *lptr;
+ uint16_t lreq_q_in = 0;
+ uint16_t lreq_q_out = 0;
+
+ lreq_q_in = RD_REG_DWORD(rsp->rsp_q_in);
+ lreq_q_out = rsp->ring_index;
+
+ while (lreq_q_in != lreq_q_out) {
+ lptr = rsp->ring_ptr;
+ memcpy_fromio(rsp->rsp_pkt, (void __iomem *)lptr,
+ sizeof(rsp->rsp_pkt));
+ pkt = (struct sts_entry_fx00 *)rsp->rsp_pkt;
+
+ rsp->ring_index++;
+ lreq_q_out++;
+ if (rsp->ring_index == rsp->length) {
+ lreq_q_out = 0;
+ rsp->ring_index = 0;
+ rsp->ring_ptr = rsp->ring;
+ } else {
+ rsp->ring_ptr++;
+ }
+
+ if (pkt->entry_status != 0 &&
+ pkt->entry_type != IOCTL_IOSB_TYPE_FX00) {
+ qlafx00_error_entry(vha, rsp,
+ (struct sts_entry_fx00 *)pkt, pkt->entry_status,
+ pkt->entry_type);
+ continue;
+ }
+
+ switch (pkt->entry_type) {
+ case STATUS_TYPE_FX00:
+ qlafx00_status_entry(vha, rsp, pkt);
+ break;
+
+ case STATUS_CONT_TYPE_FX00:
+ qlafx00_status_cont_entry(rsp, (sts_cont_entry_t *)pkt);
+ break;
+
+ case MULTI_STATUS_TYPE_FX00:
+ qlafx00_multistatus_entry(vha, rsp, pkt);
+ break;
+
+ case ABORT_IOCB_TYPE_FX00:
+ qlafx00_abort_iocb_entry(vha, rsp->req,
+ (struct abort_iocb_entry_fx00 *)pkt);
+ break;
+
+ case IOCTL_IOSB_TYPE_FX00:
+ qlafx00_ioctl_iosb_entry(vha, rsp->req,
+ (struct ioctl_iocb_entry_fx00 *)pkt);
+ break;
+ default:
+ /* Type Not Supported. */
+ ql_dbg(ql_dbg_async, vha, 0x5081,
+ "Received unknown response pkt type %x "
+ "entry status=%x.\n",
+ pkt->entry_type, pkt->entry_status);
+ break;
+ }
+ }
+
+ /* Adjust ring index */
+ WRT_REG_DWORD(rsp->rsp_q_out, rsp->ring_index);
+}
+
+/**
+ * qlafx00_async_event() - Process aynchronous events.
+ * @ha: SCSI driver HA context
+ */
+static void
+qlafx00_async_event(scsi_qla_host_t *vha)
+{
+ struct qla_hw_data *ha = vha->hw;
+ struct device_reg_fx00 __iomem *reg;
+ int data_size = 1;
+
+ reg = &ha->iobase->ispfx00;
+ /* Setup to process RIO completion. */
+ switch (ha->aenmb[0]) {
+ case QLAFX00_MBA_SYSTEM_ERR: /* System Error */
+ ql_log(ql_log_warn, vha, 0x5079,
+ "ISP System Error - mbx1=%x\n", ha->aenmb[0]);
+ set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
+ break;
+
+ case QLAFX00_MBA_SHUTDOWN_RQSTD: /* Shutdown requested */
+ ql_dbg(ql_dbg_async, vha, 0x5076,
+ "Asynchronous FW shutdown requested.\n");
+ set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
+ qla2xxx_wake_dpc(vha);
+ break;
+
+ case QLAFX00_MBA_PORT_UPDATE: /* Port database update */
+ ha->aenmb[1] = RD_REG_DWORD(&reg->aenmailbox1);
+ ha->aenmb[2] = RD_REG_DWORD(&reg->aenmailbox2);
+ ha->aenmb[3] = RD_REG_DWORD(&reg->aenmailbox3);
+ ql_dbg(ql_dbg_async, vha, 0x5077,
+ "Asynchronous port Update received "
+ "aenmb[0]: %x, aenmb[1]: %x, aenmb[2]: %x, aenmb[3]: %x\n",
+ ha->aenmb[0], ha->aenmb[1], ha->aenmb[2], ha->aenmb[3]);
+ data_size = 4;
+ break;
+
+ case QLAFX00_MBA_TEMP_OVER: /* Over temperature event */
+ ql_log(ql_log_info, vha, 0x5085,
+ "Asynchronous over temperature event received "
+ "aenmb[0]: %x\n",
+ ha->aenmb[0]);
+ break;
+
+ case QLAFX00_MBA_TEMP_NORM: /* Normal temperature event */
+ ql_log(ql_log_info, vha, 0x5086,
+ "Asynchronous normal temperature event received "
+ "aenmb[0]: %x\n",
+ ha->aenmb[0]);
+ break;
+
+ case QLAFX00_MBA_TEMP_CRIT: /* Critical temperature event */
+ ql_log(ql_log_info, vha, 0x5083,
+ "Asynchronous critical temperature event received "
+ "aenmb[0]: %x\n",
+ ha->aenmb[0]);
+ break;
+
+ default:
+ ha->aenmb[1] = RD_REG_WORD(&reg->aenmailbox1);
+ ha->aenmb[2] = RD_REG_WORD(&reg->aenmailbox2);
+ ha->aenmb[3] = RD_REG_WORD(&reg->aenmailbox3);
+ ha->aenmb[4] = RD_REG_WORD(&reg->aenmailbox4);
+ ha->aenmb[5] = RD_REG_WORD(&reg->aenmailbox5);
+ ha->aenmb[6] = RD_REG_WORD(&reg->aenmailbox6);
+ ha->aenmb[7] = RD_REG_WORD(&reg->aenmailbox7);
+ ql_dbg(ql_dbg_async, vha, 0x5078,
+ "AEN:%04x %04x %04x %04x :%04x %04x %04x %04x\n",
+ ha->aenmb[0], ha->aenmb[1], ha->aenmb[2], ha->aenmb[3],
+ ha->aenmb[4], ha->aenmb[5], ha->aenmb[6], ha->aenmb[7]);
+ break;
+ }
+ qlafx00_post_aenfx_work(vha, ha->aenmb[0],
+ (uint32_t *)ha->aenmb, data_size);
+}
+
+/**
+ *
+ * qlafx00x_mbx_completion() - Process mailbox command completions.
+ * @ha: SCSI driver HA context
+ * @mb16: Mailbox16 register
+ */
+static void
+qlafx00_mbx_completion(scsi_qla_host_t *vha, uint32_t mb0)
+{
+ uint16_t cnt;
+ uint32_t __iomem *wptr;
+ struct qla_hw_data *ha = vha->hw;
+ struct device_reg_fx00 __iomem *reg = &ha->iobase->ispfx00;
+
+ if (!ha->mcp32)
+ ql_dbg(ql_dbg_async, vha, 0x507e, "MBX pointer ERROR.\n");
+
+ /* Load return mailbox registers. */
+ ha->flags.mbox_int = 1;
+ ha->mailbox_out32[0] = mb0;
+ wptr = (uint32_t __iomem *)&reg->mailbox17;
+
+ for (cnt = 1; cnt < ha->mbx_count; cnt++) {
+ ha->mailbox_out32[cnt] = RD_REG_DWORD(wptr);
+ wptr++;
+ }
+}
+
+/**
+ * qlafx00_intr_handler() - Process interrupts for the ISPFX00.
+ * @irq:
+ * @dev_id: SCSI driver HA context
+ *
+ * Called by system whenever the host adapter generates an interrupt.
+ *
+ * Returns handled flag.
+ */
+irqreturn_t
+qlafx00_intr_handler(int irq, void *dev_id)
+{
+ scsi_qla_host_t *vha;
+ struct qla_hw_data *ha;
+ struct device_reg_fx00 __iomem *reg;
+ int status;
+ unsigned long iter;
+ uint32_t stat;
+ uint32_t mb[8];
+ struct rsp_que *rsp;
+ unsigned long flags;
+ uint32_t clr_intr = 0;
+ uint32_t intr_stat = 0;
+
+ rsp = (struct rsp_que *) dev_id;
+ if (!rsp) {
+ ql_log(ql_log_info, NULL, 0x507d,
+ "%s: NULL response queue pointer.\n", __func__);
+ return IRQ_NONE;
+ }
+
+ ha = rsp->hw;
+ reg = &ha->iobase->ispfx00;
+ status = 0;
+
+ if (unlikely(pci_channel_offline(ha->pdev)))
+ return IRQ_HANDLED;
+
+ spin_lock_irqsave(&ha->hardware_lock, flags);
+ vha = pci_get_drvdata(ha->pdev);
+ for (iter = 50; iter--; clr_intr = 0) {
+ stat = QLAFX00_RD_INTR_REG(ha);
+ if (qla2x00_check_reg32_for_disconnect(vha, stat))
+ break;
+ intr_stat = stat & QLAFX00_HST_INT_STS_BITS;
+ if (!intr_stat)
+ break;
+
+ if (stat & QLAFX00_INTR_MB_CMPLT) {
+ mb[0] = RD_REG_WORD(&reg->mailbox16);
+ qlafx00_mbx_completion(vha, mb[0]);
+ status |= MBX_INTERRUPT;
+ clr_intr |= QLAFX00_INTR_MB_CMPLT;
+ }
+ if (intr_stat & QLAFX00_INTR_ASYNC_CMPLT) {
+ ha->aenmb[0] = RD_REG_WORD(&reg->aenmailbox0);
+ qlafx00_async_event(vha);
+ clr_intr |= QLAFX00_INTR_ASYNC_CMPLT;
+ }
+ if (intr_stat & QLAFX00_INTR_RSP_CMPLT) {
+ qlafx00_process_response_queue(vha, rsp);
+ clr_intr |= QLAFX00_INTR_RSP_CMPLT;
+ }
+
+ QLAFX00_CLR_INTR_REG(ha, clr_intr);
+ QLAFX00_RD_INTR_REG(ha);
+ }
+
+ qla2x00_handle_mbx_completion(ha, status);
+ spin_unlock_irqrestore(&ha->hardware_lock, flags);
+
+ return IRQ_HANDLED;
+}
+
+/** QLAFX00 specific IOCB implementation functions */
+
+static inline cont_a64_entry_t *
+qlafx00_prep_cont_type1_iocb(struct req_que *req,
+ cont_a64_entry_t *lcont_pkt)
+{
+ cont_a64_entry_t *cont_pkt;
+
+ /* Adjust ring index. */
+ req->ring_index++;
+ if (req->ring_index == req->length) {
+ req->ring_index = 0;
+ req->ring_ptr = req->ring;
+ } else {
+ req->ring_ptr++;
+ }
+
+ cont_pkt = (cont_a64_entry_t *)req->ring_ptr;
+
+ /* Load packet defaults. */
+ lcont_pkt->entry_type = CONTINUE_A64_TYPE_FX00;
+
+ return cont_pkt;
+}
+
+static inline void
+qlafx00_build_scsi_iocbs(srb_t *sp, struct cmd_type_7_fx00 *cmd_pkt,
+ uint16_t tot_dsds, struct cmd_type_7_fx00 *lcmd_pkt)
+{
+ uint16_t avail_dsds;
+ __le32 *cur_dsd;
+ scsi_qla_host_t *vha;
+ struct scsi_cmnd *cmd;
+ struct scatterlist *sg;
+ int i, cont;
+ struct req_que *req;
+ cont_a64_entry_t lcont_pkt;
+ cont_a64_entry_t *cont_pkt;
+
+ vha = sp->fcport->vha;
+ req = vha->req;
+
+ cmd = GET_CMD_SP(sp);
+ cont = 0;
+ cont_pkt = NULL;
+
+ /* Update entry type to indicate Command Type 3 IOCB */
+ lcmd_pkt->entry_type = FX00_COMMAND_TYPE_7;
+
+ /* No data transfer */
+ if (!scsi_bufflen(cmd) || cmd->sc_data_direction == DMA_NONE) {
+ lcmd_pkt->byte_count = __constant_cpu_to_le32(0);
+ return;
+ }
+
+ /* Set transfer direction */
+ if (cmd->sc_data_direction == DMA_TO_DEVICE) {
+ lcmd_pkt->cntrl_flags = TMF_WRITE_DATA;
+ vha->qla_stats.output_bytes += scsi_bufflen(cmd);
+ } else if (cmd->sc_data_direction == DMA_FROM_DEVICE) {
+ lcmd_pkt->cntrl_flags = TMF_READ_DATA;
+ vha->qla_stats.input_bytes += scsi_bufflen(cmd);
+ }
+
+ /* One DSD is available in the Command Type 3 IOCB */
+ avail_dsds = 1;
+ cur_dsd = (__le32 *)&lcmd_pkt->dseg_0_address;
+
+ /* Load data segments */
+ scsi_for_each_sg(cmd, sg, tot_dsds, i) {
+ dma_addr_t sle_dma;
+
+ /* Allocate additional continuation packets? */
+ if (avail_dsds == 0) {
+ /*
+ * Five DSDs are available in the Continuation
+ * Type 1 IOCB.
+ */
+ memset(&lcont_pkt, 0, REQUEST_ENTRY_SIZE);
+ cont_pkt =
+ qlafx00_prep_cont_type1_iocb(req, &lcont_pkt);
+ cur_dsd = (__le32 *)lcont_pkt.dseg_0_address;
+ avail_dsds = 5;
+ cont = 1;
+ }
+
+ sle_dma = sg_dma_address(sg);
+ *cur_dsd++ = cpu_to_le32(LSD(sle_dma));
+ *cur_dsd++ = cpu_to_le32(MSD(sle_dma));
+ *cur_dsd++ = cpu_to_le32(sg_dma_len(sg));
+ avail_dsds--;
+ if (avail_dsds == 0 && cont == 1) {
+ cont = 0;
+ memcpy_toio((void __iomem *)cont_pkt, &lcont_pkt,
+ REQUEST_ENTRY_SIZE);
+ }
+
+ }
+ if (avail_dsds != 0 && cont == 1) {
+ memcpy_toio((void __iomem *)cont_pkt, &lcont_pkt,
+ REQUEST_ENTRY_SIZE);
+ }
+}
+
+/**
+ * qlafx00_start_scsi() - Send a SCSI command to the ISP
+ * @sp: command to send to the ISP
+ *
+ * Returns non-zero if a failure occurred, else zero.
+ */
+int
+qlafx00_start_scsi(srb_t *sp)
+{
+ int ret, nseg;
+ unsigned long flags;
+ uint32_t index;
+ uint32_t handle;
+ uint16_t cnt;
+ uint16_t req_cnt;
+ uint16_t tot_dsds;
+ struct req_que *req = NULL;
+ struct rsp_que *rsp = NULL;
+ struct scsi_cmnd *cmd = GET_CMD_SP(sp);
+ struct scsi_qla_host *vha = sp->fcport->vha;
+ struct qla_hw_data *ha = vha->hw;
+ struct cmd_type_7_fx00 *cmd_pkt;
+ struct cmd_type_7_fx00 lcmd_pkt;
+ struct scsi_lun llun;
+
+ /* Setup device pointers. */
+ ret = 0;
+
+ rsp = ha->rsp_q_map[0];
+ req = vha->req;
+
+ /* So we know we haven't pci_map'ed anything yet */
+ tot_dsds = 0;
+
+ /* Acquire ring specific lock */
+ spin_lock_irqsave(&ha->hardware_lock, flags);
+
+ /* Check for room in outstanding command list. */
+ handle = req->current_outstanding_cmd;
+ for (index = 1; index < req->num_outstanding_cmds; index++) {
+ handle++;
+ if (handle == req->num_outstanding_cmds)
+ handle = 1;
+ if (!req->outstanding_cmds[handle])
+ break;
+ }
+ if (index == req->num_outstanding_cmds)
+ goto queuing_error;
+
+ /* Map the sg table so we have an accurate count of sg entries needed */
+ if (scsi_sg_count(cmd)) {
+ nseg = dma_map_sg(&ha->pdev->dev, scsi_sglist(cmd),
+ scsi_sg_count(cmd), cmd->sc_data_direction);
+ if (unlikely(!nseg))
+ goto queuing_error;
+ } else
+ nseg = 0;
+
+ tot_dsds = nseg;
+ req_cnt = qla24xx_calc_iocbs(vha, tot_dsds);
+ if (req->cnt < (req_cnt + 2)) {
+ cnt = RD_REG_DWORD_RELAXED(req->req_q_out);
+
+ if (req->ring_index < cnt)
+ req->cnt = cnt - req->ring_index;
+ else
+ req->cnt = req->length -
+ (req->ring_index - cnt);
+ if (req->cnt < (req_cnt + 2))
+ goto queuing_error;
+ }
+
+ /* Build command packet. */
+ req->current_outstanding_cmd = handle;
+ req->outstanding_cmds[handle] = sp;
+ sp->handle = handle;
+ cmd->host_scribble = (unsigned char *)(unsigned long)handle;
+ req->cnt -= req_cnt;
+
+ cmd_pkt = (struct cmd_type_7_fx00 *)req->ring_ptr;
+
+ memset(&lcmd_pkt, 0, REQUEST_ENTRY_SIZE);
+
+ lcmd_pkt.handle = MAKE_HANDLE(req->id, sp->handle);
+ lcmd_pkt.reserved_0 = 0;
+ lcmd_pkt.port_path_ctrl = 0;
+ lcmd_pkt.reserved_1 = 0;
+ lcmd_pkt.dseg_count = cpu_to_le16(tot_dsds);
+ lcmd_pkt.tgt_idx = cpu_to_le16(sp->fcport->tgt_id);
+
+ int_to_scsilun(cmd->device->lun, &llun);
+ host_to_adap((uint8_t *)&llun, (uint8_t *)&lcmd_pkt.lun,
+ sizeof(lcmd_pkt.lun));
+
+ /* Load SCSI command packet. */
+ host_to_adap(cmd->cmnd, lcmd_pkt.fcp_cdb, sizeof(lcmd_pkt.fcp_cdb));
+ lcmd_pkt.byte_count = cpu_to_le32((uint32_t)scsi_bufflen(cmd));
+
+ /* Build IOCB segments */
+ qlafx00_build_scsi_iocbs(sp, cmd_pkt, tot_dsds, &lcmd_pkt);
+
+ /* Set total data segment count. */
+ lcmd_pkt.entry_count = (uint8_t)req_cnt;
+
+ /* Specify response queue number where completion should happen */
+ lcmd_pkt.entry_status = (uint8_t) rsp->id;
+
+ ql_dump_buffer(ql_dbg_io + ql_dbg_buffer, vha, 0x302e,
+ (uint8_t *)cmd->cmnd, cmd->cmd_len);
+ ql_dump_buffer(ql_dbg_io + ql_dbg_buffer, vha, 0x3032,
+ (uint8_t *)&lcmd_pkt, REQUEST_ENTRY_SIZE);
+
+ memcpy_toio((void __iomem *)cmd_pkt, &lcmd_pkt, REQUEST_ENTRY_SIZE);
+ wmb();
+
+ /* Adjust ring index. */
+ req->ring_index++;
+ if (req->ring_index == req->length) {
+ req->ring_index = 0;
+ req->ring_ptr = req->ring;
+ } else
+ req->ring_ptr++;
+
+ sp->flags |= SRB_DMA_VALID;
+
+ /* Set chip new ring index. */
+ WRT_REG_DWORD(req->req_q_in, req->ring_index);
+ QLAFX00_SET_HST_INTR(ha, ha->rqstq_intr_code);
+
+ spin_unlock_irqrestore(&ha->hardware_lock, flags);
+ return QLA_SUCCESS;
+
+queuing_error:
+ if (tot_dsds)
+ scsi_dma_unmap(cmd);
+
+ spin_unlock_irqrestore(&ha->hardware_lock, flags);
+
+ return QLA_FUNCTION_FAILED;
+}
+
+void
+qlafx00_tm_iocb(srb_t *sp, struct tsk_mgmt_entry_fx00 *ptm_iocb)
+{
+ struct srb_iocb *fxio = &sp->u.iocb_cmd;
+ scsi_qla_host_t *vha = sp->fcport->vha;
+ struct req_que *req = vha->req;
+ struct tsk_mgmt_entry_fx00 tm_iocb;
+ struct scsi_lun llun;
+
+ memset(&tm_iocb, 0, sizeof(struct tsk_mgmt_entry_fx00));
+ tm_iocb.entry_type = TSK_MGMT_IOCB_TYPE_FX00;
+ tm_iocb.entry_count = 1;
+ tm_iocb.handle = cpu_to_le32(MAKE_HANDLE(req->id, sp->handle));
+ tm_iocb.reserved_0 = 0;
+ tm_iocb.tgt_id = cpu_to_le16(sp->fcport->tgt_id);
+ tm_iocb.control_flags = cpu_to_le32(fxio->u.tmf.flags);
+ if (tm_iocb.control_flags == cpu_to_le32((uint32_t)TCF_LUN_RESET)) {
+ int_to_scsilun(fxio->u.tmf.lun, &llun);
+ host_to_adap((uint8_t *)&llun, (uint8_t *)&tm_iocb.lun,
+ sizeof(struct scsi_lun));
+ }
+
+ memcpy((void *)ptm_iocb, &tm_iocb,
+ sizeof(struct tsk_mgmt_entry_fx00));
+ wmb();
+}
+
+void
+qlafx00_abort_iocb(srb_t *sp, struct abort_iocb_entry_fx00 *pabt_iocb)
+{
+ struct srb_iocb *fxio = &sp->u.iocb_cmd;
+ scsi_qla_host_t *vha = sp->fcport->vha;
+ struct req_que *req = vha->req;
+ struct abort_iocb_entry_fx00 abt_iocb;
+
+ memset(&abt_iocb, 0, sizeof(struct abort_iocb_entry_fx00));
+ abt_iocb.entry_type = ABORT_IOCB_TYPE_FX00;
+ abt_iocb.entry_count = 1;
+ abt_iocb.handle = cpu_to_le32(MAKE_HANDLE(req->id, sp->handle));
+ abt_iocb.abort_handle =
+ cpu_to_le32(MAKE_HANDLE(req->id, fxio->u.abt.cmd_hndl));
+ abt_iocb.tgt_id_sts = cpu_to_le16(sp->fcport->tgt_id);
+ abt_iocb.req_que_no = cpu_to_le16(req->id);
+
+ memcpy((void *)pabt_iocb, &abt_iocb,
+ sizeof(struct abort_iocb_entry_fx00));
+ wmb();
+}
+
+void
+qlafx00_fxdisc_iocb(srb_t *sp, struct fxdisc_entry_fx00 *pfxiocb)
+{
+ struct srb_iocb *fxio = &sp->u.iocb_cmd;
+ struct qla_mt_iocb_rqst_fx00 *piocb_rqst;
+ struct fc_bsg_job *bsg_job;
+ struct fxdisc_entry_fx00 fx_iocb;
+ uint8_t entry_cnt = 1;
+
+ memset(&fx_iocb, 0, sizeof(struct fxdisc_entry_fx00));
+ fx_iocb.entry_type = FX00_IOCB_TYPE;
+ fx_iocb.handle = cpu_to_le32(sp->handle);
+ fx_iocb.entry_count = entry_cnt;
+
+ if (sp->type == SRB_FXIOCB_DCMD) {
+ fx_iocb.func_num =
+ sp->u.iocb_cmd.u.fxiocb.req_func_type;
+ fx_iocb.adapid = fxio->u.fxiocb.adapter_id;
+ fx_iocb.adapid_hi = fxio->u.fxiocb.adapter_id_hi;
+ fx_iocb.reserved_0 = fxio->u.fxiocb.reserved_0;
+ fx_iocb.reserved_1 = fxio->u.fxiocb.reserved_1;
+ fx_iocb.dataword_extra = fxio->u.fxiocb.req_data_extra;
+
+ if (fxio->u.fxiocb.flags & SRB_FXDISC_REQ_DMA_VALID) {
+ fx_iocb.req_dsdcnt = cpu_to_le16(1);
+ fx_iocb.req_xfrcnt =
+ cpu_to_le16(fxio->u.fxiocb.req_len);
+ fx_iocb.dseg_rq_address[0] =
+ cpu_to_le32(LSD(fxio->u.fxiocb.req_dma_handle));
+ fx_iocb.dseg_rq_address[1] =
+ cpu_to_le32(MSD(fxio->u.fxiocb.req_dma_handle));
+ fx_iocb.dseg_rq_len =
+ cpu_to_le32(fxio->u.fxiocb.req_len);
+ }
+
+ if (fxio->u.fxiocb.flags & SRB_FXDISC_RESP_DMA_VALID) {
+ fx_iocb.rsp_dsdcnt = cpu_to_le16(1);
+ fx_iocb.rsp_xfrcnt =
+ cpu_to_le16(fxio->u.fxiocb.rsp_len);
+ fx_iocb.dseg_rsp_address[0] =
+ cpu_to_le32(LSD(fxio->u.fxiocb.rsp_dma_handle));
+ fx_iocb.dseg_rsp_address[1] =
+ cpu_to_le32(MSD(fxio->u.fxiocb.rsp_dma_handle));
+ fx_iocb.dseg_rsp_len =
+ cpu_to_le32(fxio->u.fxiocb.rsp_len);
+ }
+
+ if (fxio->u.fxiocb.flags & SRB_FXDISC_REQ_DWRD_VALID) {
+ fx_iocb.dataword = fxio->u.fxiocb.req_data;
+ }
+ fx_iocb.flags = fxio->u.fxiocb.flags;
+ } else {
+ struct scatterlist *sg;
+ bsg_job = sp->u.bsg_job;
+ piocb_rqst = (struct qla_mt_iocb_rqst_fx00 *)
+ &bsg_job->request->rqst_data.h_vendor.vendor_cmd[1];
+
+ fx_iocb.func_num = piocb_rqst->func_type;
+ fx_iocb.adapid = piocb_rqst->adapid;
+ fx_iocb.adapid_hi = piocb_rqst->adapid_hi;
+ fx_iocb.reserved_0 = piocb_rqst->reserved_0;
+ fx_iocb.reserved_1 = piocb_rqst->reserved_1;
+ fx_iocb.dataword_extra = piocb_rqst->dataword_extra;
+ fx_iocb.dataword = piocb_rqst->dataword;
+ fx_iocb.req_xfrcnt = piocb_rqst->req_len;
+ fx_iocb.rsp_xfrcnt = piocb_rqst->rsp_len;
+
+ if (piocb_rqst->flags & SRB_FXDISC_REQ_DMA_VALID) {
+ int avail_dsds, tot_dsds;
+ cont_a64_entry_t lcont_pkt;
+ cont_a64_entry_t *cont_pkt = NULL;
+ __le32 *cur_dsd;
+ int index = 0, cont = 0;
+
+ fx_iocb.req_dsdcnt =
+ cpu_to_le16(bsg_job->request_payload.sg_cnt);
+ tot_dsds =
+ bsg_job->request_payload.sg_cnt;
+ cur_dsd = (__le32 *)&fx_iocb.dseg_rq_address[0];
+ avail_dsds = 1;
+ for_each_sg(bsg_job->request_payload.sg_list, sg,
+ tot_dsds, index) {
+ dma_addr_t sle_dma;
+
+ /* Allocate additional continuation packets? */
+ if (avail_dsds == 0) {
+ /*
+ * Five DSDs are available in the Cont.
+ * Type 1 IOCB.
+ */
+ memset(&lcont_pkt, 0,
+ REQUEST_ENTRY_SIZE);
+ cont_pkt =
+ qlafx00_prep_cont_type1_iocb(
+ sp->fcport->vha->req,
+ &lcont_pkt);
+ cur_dsd = (__le32 *)
+ lcont_pkt.dseg_0_address;
+ avail_dsds = 5;
+ cont = 1;
+ entry_cnt++;
+ }
+
+ sle_dma = sg_dma_address(sg);
+ *cur_dsd++ = cpu_to_le32(LSD(sle_dma));
+ *cur_dsd++ = cpu_to_le32(MSD(sle_dma));
+ *cur_dsd++ = cpu_to_le32(sg_dma_len(sg));
+ avail_dsds--;
+
+ if (avail_dsds == 0 && cont == 1) {
+ cont = 0;
+ memcpy_toio(
+ (void __iomem *)cont_pkt,
+ &lcont_pkt, REQUEST_ENTRY_SIZE);
+ ql_dump_buffer(
+ ql_dbg_user + ql_dbg_verbose,
+ sp->fcport->vha, 0x3042,
+ (uint8_t *)&lcont_pkt,
+ REQUEST_ENTRY_SIZE);
+ }
+ }
+ if (avail_dsds != 0 && cont == 1) {
+ memcpy_toio((void __iomem *)cont_pkt,
+ &lcont_pkt, REQUEST_ENTRY_SIZE);
+ ql_dump_buffer(ql_dbg_user + ql_dbg_verbose,
+ sp->fcport->vha, 0x3043,
+ (uint8_t *)&lcont_pkt, REQUEST_ENTRY_SIZE);
+ }
+ }
+
+ if (piocb_rqst->flags & SRB_FXDISC_RESP_DMA_VALID) {
+ int avail_dsds, tot_dsds;
+ cont_a64_entry_t lcont_pkt;
+ cont_a64_entry_t *cont_pkt = NULL;
+ __le32 *cur_dsd;
+ int index = 0, cont = 0;
+
+ fx_iocb.rsp_dsdcnt =
+ cpu_to_le16(bsg_job->reply_payload.sg_cnt);
+ tot_dsds = bsg_job->reply_payload.sg_cnt;
+ cur_dsd = (__le32 *)&fx_iocb.dseg_rsp_address[0];
+ avail_dsds = 1;
+
+ for_each_sg(bsg_job->reply_payload.sg_list, sg,
+ tot_dsds, index) {
+ dma_addr_t sle_dma;
+
+ /* Allocate additional continuation packets? */
+ if (avail_dsds == 0) {
+ /*
+ * Five DSDs are available in the Cont.
+ * Type 1 IOCB.
+ */
+ memset(&lcont_pkt, 0,
+ REQUEST_ENTRY_SIZE);
+ cont_pkt =
+ qlafx00_prep_cont_type1_iocb(
+ sp->fcport->vha->req,
+ &lcont_pkt);
+ cur_dsd = (__le32 *)
+ lcont_pkt.dseg_0_address;
+ avail_dsds = 5;
+ cont = 1;
+ entry_cnt++;
+ }
+
+ sle_dma = sg_dma_address(sg);
+ *cur_dsd++ = cpu_to_le32(LSD(sle_dma));
+ *cur_dsd++ = cpu_to_le32(MSD(sle_dma));
+ *cur_dsd++ = cpu_to_le32(sg_dma_len(sg));
+ avail_dsds--;
+
+ if (avail_dsds == 0 && cont == 1) {
+ cont = 0;
+ memcpy_toio((void __iomem *)cont_pkt,
+ &lcont_pkt,
+ REQUEST_ENTRY_SIZE);
+ ql_dump_buffer(
+ ql_dbg_user + ql_dbg_verbose,
+ sp->fcport->vha, 0x3045,
+ (uint8_t *)&lcont_pkt,
+ REQUEST_ENTRY_SIZE);
+ }
+ }
+ if (avail_dsds != 0 && cont == 1) {
+ memcpy_toio((void __iomem *)cont_pkt,
+ &lcont_pkt, REQUEST_ENTRY_SIZE);
+ ql_dump_buffer(ql_dbg_user + ql_dbg_verbose,
+ sp->fcport->vha, 0x3046,
+ (uint8_t *)&lcont_pkt, REQUEST_ENTRY_SIZE);
+ }
+ }
+
+ if (piocb_rqst->flags & SRB_FXDISC_REQ_DWRD_VALID)
+ fx_iocb.dataword = piocb_rqst->dataword;
+ fx_iocb.flags = piocb_rqst->flags;
+ fx_iocb.entry_count = entry_cnt;
+ }
+
+ ql_dump_buffer(ql_dbg_user + ql_dbg_verbose,
+ sp->fcport->vha, 0x3047,
+ (uint8_t *)&fx_iocb, sizeof(struct fxdisc_entry_fx00));
+
+ memcpy_toio((void __iomem *)pfxiocb, &fx_iocb,
+ sizeof(struct fxdisc_entry_fx00));
+ wmb();
+}
diff --git a/drivers/scsi/qla2xxx/qla_mr.h b/drivers/scsi/qla2xxx/qla_mr.h
new file mode 100644
index 000000000..aeaa1b40b
--- /dev/null
+++ b/drivers/scsi/qla2xxx/qla_mr.h
@@ -0,0 +1,527 @@
+/*
+ * QLogic Fibre Channel HBA Driver
+ * Copyright (c) 2003-2014 QLogic Corporation
+ *
+ * See LICENSE.qla2xxx for copyright and licensing details.
+ */
+#ifndef __QLA_MR_H
+#define __QLA_MR_H
+
+/*
+ * The PCI VendorID and DeviceID for our board.
+ */
+#define PCI_DEVICE_ID_QLOGIC_ISPF001 0xF001
+
+/* FX00 specific definitions */
+
+#define FX00_COMMAND_TYPE_7 0x07 /* Command Type 7 entry for 7XXX */
+struct cmd_type_7_fx00 {
+ uint8_t entry_type; /* Entry type. */
+ uint8_t entry_count; /* Entry count. */
+ uint8_t sys_define; /* System defined. */
+ uint8_t entry_status; /* Entry Status. */
+
+ uint32_t handle; /* System handle. */
+ uint8_t reserved_0;
+ uint8_t port_path_ctrl;
+ uint16_t reserved_1;
+
+ __le16 tgt_idx; /* Target Idx. */
+ uint16_t timeout; /* Command timeout. */
+
+ __le16 dseg_count; /* Data segment count. */
+ uint8_t scsi_rsp_dsd_len;
+ uint8_t reserved_2;
+
+ struct scsi_lun lun; /* LUN (LE). */
+
+ uint8_t cntrl_flags;
+
+ uint8_t task_mgmt_flags; /* Task management flags. */
+
+ uint8_t task;
+
+ uint8_t crn;
+
+ uint8_t fcp_cdb[MAX_CMDSZ]; /* SCSI command words. */
+ __le32 byte_count; /* Total byte count. */
+
+ uint32_t dseg_0_address[2]; /* Data segment 0 address. */
+ uint32_t dseg_0_len; /* Data segment 0 length. */
+};
+
+#define STATUS_TYPE_FX00 0x01 /* Status entry. */
+struct sts_entry_fx00 {
+ uint8_t entry_type; /* Entry type. */
+ uint8_t entry_count; /* Entry count. */
+ uint8_t sys_define; /* System defined. */
+ uint8_t entry_status; /* Entry Status. */
+
+ uint32_t handle; /* System handle. */
+ uint32_t reserved_3; /* System handle. */
+
+ __le16 comp_status; /* Completion status. */
+ uint16_t reserved_0; /* OX_ID used by the firmware. */
+
+ __le32 residual_len; /* FW calc residual transfer length. */
+
+ uint16_t reserved_1;
+ uint16_t state_flags; /* State flags. */
+
+ uint16_t reserved_2;
+ __le16 scsi_status; /* SCSI status. */
+
+ uint32_t sense_len; /* FCP SENSE length. */
+ uint8_t data[32]; /* FCP response/sense information. */
+};
+
+
+#define MAX_HANDLE_COUNT 15
+#define MULTI_STATUS_TYPE_FX00 0x0D
+
+struct multi_sts_entry_fx00 {
+ uint8_t entry_type; /* Entry type. */
+ uint8_t entry_count; /* Entry count. */
+ uint8_t handle_count;
+ uint8_t entry_status;
+
+ __le32 handles[MAX_HANDLE_COUNT];
+};
+
+#define TSK_MGMT_IOCB_TYPE_FX00 0x05
+struct tsk_mgmt_entry_fx00 {
+ uint8_t entry_type; /* Entry type. */
+ uint8_t entry_count; /* Entry count. */
+ uint8_t sys_define;
+ uint8_t entry_status; /* Entry Status. */
+
+ __le32 handle; /* System handle. */
+
+ uint32_t reserved_0;
+
+ __le16 tgt_id; /* Target Idx. */
+
+ uint16_t reserved_1;
+ uint16_t reserved_3;
+ uint16_t reserved_4;
+
+ struct scsi_lun lun; /* LUN (LE). */
+
+ __le32 control_flags; /* Control Flags. */
+
+ uint8_t reserved_2[32];
+};
+
+
+#define ABORT_IOCB_TYPE_FX00 0x08 /* Abort IOCB status. */
+struct abort_iocb_entry_fx00 {
+ uint8_t entry_type; /* Entry type. */
+ uint8_t entry_count; /* Entry count. */
+ uint8_t sys_define; /* System defined. */
+ uint8_t entry_status; /* Entry Status. */
+
+ __le32 handle; /* System handle. */
+ __le32 reserved_0;
+
+ __le16 tgt_id_sts; /* Completion status. */
+ __le16 options;
+
+ __le32 abort_handle; /* System handle. */
+ __le32 reserved_2;
+
+ __le16 req_que_no;
+ uint8_t reserved_1[38];
+};
+
+#define IOCTL_IOSB_TYPE_FX00 0x0C
+struct ioctl_iocb_entry_fx00 {
+ uint8_t entry_type; /* Entry type. */
+ uint8_t entry_count; /* Entry count. */
+ uint8_t sys_define; /* System defined. */
+ uint8_t entry_status; /* Entry Status. */
+
+ uint32_t handle; /* System handle. */
+ uint32_t reserved_0; /* System handle. */
+
+ uint16_t comp_func_num;
+ __le16 fw_iotcl_flags;
+
+ __le32 dataword_r; /* Data word returned */
+ uint32_t adapid; /* Adapter ID */
+ uint32_t dataword_r_extra;
+
+ __le32 seq_no;
+ uint8_t reserved_2[20];
+ uint32_t residuallen;
+ __le32 status;
+};
+
+#define STATUS_CONT_TYPE_FX00 0x04
+
+#define FX00_IOCB_TYPE 0x0B
+struct fxdisc_entry_fx00 {
+ uint8_t entry_type; /* Entry type. */
+ uint8_t entry_count; /* Entry count. */
+ uint8_t sys_define; /* System Defined. */
+ uint8_t entry_status; /* Entry Status. */
+
+ __le32 handle; /* System handle. */
+ __le32 reserved_0; /* System handle. */
+
+ __le16 func_num;
+ __le16 req_xfrcnt;
+ __le16 req_dsdcnt;
+ __le16 rsp_xfrcnt;
+ __le16 rsp_dsdcnt;
+ uint8_t flags;
+ uint8_t reserved_1;
+
+ __le32 dseg_rq_address[2]; /* Data segment 0 address. */
+ __le32 dseg_rq_len; /* Data segment 0 length. */
+ __le32 dseg_rsp_address[2]; /* Data segment 1 address. */
+ __le32 dseg_rsp_len; /* Data segment 1 length. */
+
+ __le32 dataword;
+ __le32 adapid;
+ __le32 adapid_hi;
+ __le32 dataword_extra;
+};
+
+struct qlafx00_tgt_node_info {
+ uint8_t tgt_node_wwpn[WWN_SIZE];
+ uint8_t tgt_node_wwnn[WWN_SIZE];
+ uint32_t tgt_node_state;
+ uint8_t reserved[128];
+ uint32_t reserved_1[8];
+ uint64_t reserved_2[4];
+} __packed;
+
+#define QLAFX00_TGT_NODE_INFO sizeof(struct qlafx00_tgt_node_info)
+
+#define QLAFX00_LINK_STATUS_DOWN 0x10
+#define QLAFX00_LINK_STATUS_UP 0x11
+
+#define QLAFX00_PORT_SPEED_2G 0x2
+#define QLAFX00_PORT_SPEED_4G 0x4
+#define QLAFX00_PORT_SPEED_8G 0x8
+#define QLAFX00_PORT_SPEED_10G 0xa
+struct port_info_data {
+ uint8_t port_state;
+ uint8_t port_type;
+ uint16_t port_identifier;
+ uint32_t up_port_state;
+ uint8_t fw_ver_num[32];
+ uint8_t portal_attrib;
+ uint16_t host_option;
+ uint8_t reset_delay;
+ uint8_t pdwn_retry_cnt;
+ uint16_t max_luns2tgt;
+ uint8_t risc_ver;
+ uint8_t pconn_option;
+ uint16_t risc_option;
+ uint16_t max_frame_len;
+ uint16_t max_iocb_alloc;
+ uint16_t exec_throttle;
+ uint8_t retry_cnt;
+ uint8_t retry_delay;
+ uint8_t port_name[8];
+ uint8_t port_id[3];
+ uint8_t link_status;
+ uint8_t plink_rate;
+ uint32_t link_config;
+ uint16_t adap_haddr;
+ uint8_t tgt_disc;
+ uint8_t log_tout;
+ uint8_t node_name[8];
+ uint16_t erisc_opt1;
+ uint8_t resp_acc_tmr;
+ uint8_t intr_del_tmr;
+ uint8_t erisc_opt2;
+ uint8_t alt_port_name[8];
+ uint8_t alt_node_name[8];
+ uint8_t link_down_tout;
+ uint8_t conn_type;
+ uint8_t fc_fw_mode;
+ uint32_t uiReserved[48];
+} __packed;
+
+/* OS Type Designations */
+#define OS_TYPE_UNKNOWN 0
+#define OS_TYPE_LINUX 2
+
+/* Linux Info */
+#define SYSNAME_LENGTH 128
+#define NODENAME_LENGTH 64
+#define RELEASE_LENGTH 64
+#define VERSION_LENGTH 64
+#define MACHINE_LENGTH 64
+#define DOMNAME_LENGTH 64
+
+struct host_system_info {
+ uint32_t os_type;
+ char sysname[SYSNAME_LENGTH];
+ char nodename[NODENAME_LENGTH];
+ char release[RELEASE_LENGTH];
+ char version[VERSION_LENGTH];
+ char machine[MACHINE_LENGTH];
+ char domainname[DOMNAME_LENGTH];
+ char hostdriver[VERSION_LENGTH];
+ uint32_t reserved[64];
+} __packed;
+
+struct register_host_info {
+ struct host_system_info hsi; /* host system info */
+ uint64_t utc; /* UTC (system time) */
+ uint32_t reserved[64]; /* future additions */
+} __packed;
+
+
+#define QLAFX00_PORT_DATA_INFO (sizeof(struct port_info_data))
+#define QLAFX00_TGT_NODE_LIST_SIZE (sizeof(uint32_t) * 32)
+
+struct config_info_data {
+ uint8_t model_num[16];
+ uint8_t model_description[80];
+ uint8_t reserved0[160];
+ uint8_t symbolic_name[64];
+ uint8_t serial_num[32];
+ uint8_t hw_version[16];
+ uint8_t fw_version[16];
+ uint8_t uboot_version[16];
+ uint8_t fru_serial_num[32];
+
+ uint8_t fc_port_count;
+ uint8_t iscsi_port_count;
+ uint8_t reserved1[2];
+
+ uint8_t mode;
+ uint8_t log_level;
+ uint8_t reserved2[2];
+
+ uint32_t log_size;
+
+ uint8_t tgt_pres_mode;
+ uint8_t iqn_flags;
+ uint8_t lun_mapping;
+
+ uint64_t adapter_id;
+
+ uint32_t cluster_key_len;
+ uint8_t cluster_key[16];
+
+ uint64_t cluster_master_id;
+ uint64_t cluster_slave_id;
+ uint8_t cluster_flags;
+ uint32_t enabled_capabilities;
+ uint32_t nominal_temp_value;
+} __packed;
+
+#define FXDISC_GET_CONFIG_INFO 0x01
+#define FXDISC_GET_PORT_INFO 0x02
+#define FXDISC_GET_TGT_NODE_INFO 0x80
+#define FXDISC_GET_TGT_NODE_LIST 0x81
+#define FXDISC_REG_HOST_INFO 0x99
+#define FXDISC_ABORT_IOCTL 0xff
+
+#define QLAFX00_HBA_ICNTRL_REG 0x20B08
+#define QLAFX00_ICR_ENB_MASK 0x80000000
+#define QLAFX00_ICR_DIS_MASK 0x7fffffff
+#define QLAFX00_HST_RST_REG 0x18264
+#define QLAFX00_SOC_TEMP_REG 0x184C4
+#define QLAFX00_HST_TO_HBA_REG 0x20A04
+#define QLAFX00_HBA_TO_HOST_REG 0x21B70
+#define QLAFX00_HST_INT_STS_BITS 0x7
+#define QLAFX00_BAR1_BASE_ADDR_REG 0x40018
+#define QLAFX00_PEX0_WIN0_BASE_ADDR_REG 0x41824
+
+#define QLAFX00_INTR_MB_CMPLT 0x1
+#define QLAFX00_INTR_RSP_CMPLT 0x2
+#define QLAFX00_INTR_ASYNC_CMPLT 0x4
+
+#define QLAFX00_MBA_SYSTEM_ERR 0x8002
+#define QLAFX00_MBA_TEMP_OVER 0x8005
+#define QLAFX00_MBA_TEMP_NORM 0x8006
+#define QLAFX00_MBA_TEMP_CRIT 0x8007
+#define QLAFX00_MBA_LINK_UP 0x8011
+#define QLAFX00_MBA_LINK_DOWN 0x8012
+#define QLAFX00_MBA_PORT_UPDATE 0x8014
+#define QLAFX00_MBA_SHUTDOWN_RQSTD 0x8062
+
+#define SOC_SW_RST_CONTROL_REG_CORE0 0x0020800
+#define SOC_FABRIC_RST_CONTROL_REG 0x0020840
+#define SOC_FABRIC_CONTROL_REG 0x0020200
+#define SOC_FABRIC_CONFIG_REG 0x0020204
+#define SOC_PWR_MANAGEMENT_PWR_DOWN_REG 0x001820C
+
+#define SOC_INTERRUPT_SOURCE_I_CONTROL_REG 0x0020B00
+#define SOC_CORE_TIMER_REG 0x0021850
+#define SOC_IRQ_ACK_REG 0x00218b4
+
+#define CONTINUE_A64_TYPE_FX00 0x03 /* Continuation entry. */
+
+#define QLAFX00_SET_HST_INTR(ha, value) \
+ WRT_REG_DWORD((ha)->cregbase + QLAFX00_HST_TO_HBA_REG, \
+ value)
+
+#define QLAFX00_CLR_HST_INTR(ha, value) \
+ WRT_REG_DWORD((ha)->cregbase + QLAFX00_HBA_TO_HOST_REG, \
+ ~value)
+
+#define QLAFX00_RD_INTR_REG(ha) \
+ RD_REG_DWORD((ha)->cregbase + QLAFX00_HBA_TO_HOST_REG)
+
+#define QLAFX00_CLR_INTR_REG(ha, value) \
+ WRT_REG_DWORD((ha)->cregbase + QLAFX00_HBA_TO_HOST_REG, \
+ ~value)
+
+#define QLAFX00_SET_HBA_SOC_REG(ha, off, val)\
+ WRT_REG_DWORD((ha)->cregbase + off, val)
+
+#define QLAFX00_GET_HBA_SOC_REG(ha, off)\
+ RD_REG_DWORD((ha)->cregbase + off)
+
+#define QLAFX00_HBA_RST_REG(ha, val)\
+ WRT_REG_DWORD((ha)->cregbase + QLAFX00_HST_RST_REG, val)
+
+#define QLAFX00_RD_ICNTRL_REG(ha) \
+ RD_REG_DWORD((ha)->cregbase + QLAFX00_HBA_ICNTRL_REG)
+
+#define QLAFX00_ENABLE_ICNTRL_REG(ha) \
+ WRT_REG_DWORD((ha)->cregbase + QLAFX00_HBA_ICNTRL_REG, \
+ (QLAFX00_GET_HBA_SOC_REG(ha, QLAFX00_HBA_ICNTRL_REG) | \
+ QLAFX00_ICR_ENB_MASK))
+
+#define QLAFX00_DISABLE_ICNTRL_REG(ha) \
+ WRT_REG_DWORD((ha)->cregbase + QLAFX00_HBA_ICNTRL_REG, \
+ (QLAFX00_GET_HBA_SOC_REG(ha, QLAFX00_HBA_ICNTRL_REG) & \
+ QLAFX00_ICR_DIS_MASK))
+
+#define QLAFX00_RD_REG(ha, off) \
+ RD_REG_DWORD((ha)->cregbase + off)
+
+#define QLAFX00_WR_REG(ha, off, val) \
+ WRT_REG_DWORD((ha)->cregbase + off, val)
+
+struct qla_mt_iocb_rqst_fx00 {
+ __le32 reserved_0;
+
+ __le16 func_type;
+ uint8_t flags;
+ uint8_t reserved_1;
+
+ __le32 dataword;
+
+ __le32 adapid;
+ __le32 adapid_hi;
+
+ __le32 dataword_extra;
+
+ __le16 req_len;
+ __le16 reserved_2;
+
+ __le16 rsp_len;
+ __le16 reserved_3;
+};
+
+struct qla_mt_iocb_rsp_fx00 {
+ uint32_t reserved_1;
+
+ uint16_t func_type;
+ __le16 ioctl_flags;
+
+ __le32 ioctl_data;
+
+ uint32_t adapid;
+ uint32_t adapid_hi;
+
+ uint32_t reserved_2;
+ __le32 seq_number;
+
+ uint8_t reserved_3[20];
+
+ int32_t res_count;
+
+ __le32 status;
+};
+
+
+#define MAILBOX_REGISTER_COUNT_FX00 16
+#define AEN_MAILBOX_REGISTER_COUNT_FX00 8
+#define MAX_FIBRE_DEVICES_FX00 512
+#define MAX_LUNS_FX00 0x1024
+#define MAX_TARGETS_FX00 MAX_ISA_DEVICES
+#define REQUEST_ENTRY_CNT_FX00 512 /* Number of request entries. */
+#define RESPONSE_ENTRY_CNT_FX00 256 /* Number of response entries.*/
+
+/*
+ * Firmware state codes for QLAFX00 adapters
+ */
+#define FSTATE_FX00_CONFIG_WAIT 0x0000 /* Waiting for driver to issue
+ * Initialize FW Mbox cmd
+ */
+#define FSTATE_FX00_INITIALIZED 0x1000 /* FW has been initialized by
+ * the driver
+ */
+
+#define FX00_DEF_RATOV 10
+
+struct mr_data_fx00 {
+ uint8_t symbolic_name[64];
+ uint8_t serial_num[32];
+ uint8_t hw_version[16];
+ uint8_t fw_version[16];
+ uint8_t uboot_version[16];
+ uint8_t fru_serial_num[32];
+ fc_port_t fcport; /* fcport used for requests
+ * that are not linked
+ * to a particular target
+ */
+ uint8_t fw_hbt_en;
+ uint8_t fw_hbt_cnt;
+ uint8_t fw_hbt_miss_cnt;
+ uint32_t old_fw_hbt_cnt;
+ uint16_t fw_reset_timer_tick;
+ uint8_t fw_reset_timer_exp;
+ uint16_t fw_critemp_timer_tick;
+ uint32_t old_aenmbx0_state;
+ uint32_t critical_temperature;
+ bool extended_io_enabled;
+ bool host_info_resend;
+ uint8_t hinfo_resend_timer_tick;
+};
+
+#define QLAFX00_EXTENDED_IO_EN_MASK 0x20
+
+/*
+ * SoC Junction Temperature is stored in
+ * bits 9:1 of SoC Junction Temperature Register
+ * in a firmware specific format format.
+ * To get the temperature in Celsius degrees
+ * the value from this bitfiled should be converted
+ * using this formula:
+ * Temperature (degrees C) = ((3,153,000 - (10,000 * X)) / 13,825)
+ * where X is the bit field value
+ * this macro reads the register, extracts the bitfield value,
+ * performs the calcualtions and returns temperature in Celsius
+ */
+#define QLAFX00_GET_TEMPERATURE(ha) ((3153000 - (10000 * \
+ ((QLAFX00_RD_REG(ha, QLAFX00_SOC_TEMP_REG) & 0x3FE) >> 1))) / 13825)
+
+
+#define QLAFX00_LOOP_DOWN_TIME 615 /* 600 */
+#define QLAFX00_HEARTBEAT_INTERVAL 6 /* number of seconds */
+#define QLAFX00_HEARTBEAT_MISS_CNT 3 /* number of miss */
+#define QLAFX00_RESET_INTERVAL 120 /* number of seconds */
+#define QLAFX00_MAX_RESET_INTERVAL 600 /* number of seconds */
+#define QLAFX00_CRITEMP_INTERVAL 60 /* number of seconds */
+#define QLAFX00_HINFO_RESEND_INTERVAL 60 /* number of seconds */
+
+#define QLAFX00_CRITEMP_THRSHLD 80 /* Celsius degrees */
+
+/* Max conncurrent IOs that can be queued */
+#define QLAFX00_MAX_CANQUEUE 1024
+
+/* IOCTL IOCB abort success */
+#define QLAFX00_IOCTL_ICOB_ABORT_SUCCESS 0x68
+
+#endif
diff --git a/drivers/scsi/qla2xxx/qla_nx.c b/drivers/scsi/qla2xxx/qla_nx.c
new file mode 100644
index 000000000..7d2b18f26
--- /dev/null
+++ b/drivers/scsi/qla2xxx/qla_nx.c
@@ -0,0 +1,4518 @@
+/*
+ * QLogic Fibre Channel HBA Driver
+ * Copyright (c) 2003-2014 QLogic Corporation
+ *
+ * See LICENSE.qla2xxx for copyright and licensing details.
+ */
+#include "qla_def.h"
+#include <linux/delay.h>
+#include <linux/pci.h>
+#include <linux/ratelimit.h>
+#include <linux/vmalloc.h>
+#include <scsi/scsi_tcq.h>
+
+#define MASK(n) ((1ULL<<(n))-1)
+#define MN_WIN(addr) (((addr & 0x1fc0000) >> 1) | \
+ ((addr >> 25) & 0x3ff))
+#define OCM_WIN(addr) (((addr & 0x1ff0000) >> 1) | \
+ ((addr >> 25) & 0x3ff))
+#define MS_WIN(addr) (addr & 0x0ffc0000)
+#define QLA82XX_PCI_MN_2M (0)
+#define QLA82XX_PCI_MS_2M (0x80000)
+#define QLA82XX_PCI_OCM0_2M (0xc0000)
+#define VALID_OCM_ADDR(addr) (((addr) & 0x3f800) != 0x3f800)
+#define GET_MEM_OFFS_2M(addr) (addr & MASK(18))
+#define BLOCK_PROTECT_BITS 0x0F
+
+/* CRB window related */
+#define CRB_BLK(off) ((off >> 20) & 0x3f)
+#define CRB_SUBBLK(off) ((off >> 16) & 0xf)
+#define CRB_WINDOW_2M (0x130060)
+#define QLA82XX_PCI_CAMQM_2M_END (0x04800800UL)
+#define CRB_HI(off) ((qla82xx_crb_hub_agt[CRB_BLK(off)] << 20) | \
+ ((off) & 0xf0000))
+#define QLA82XX_PCI_CAMQM_2M_BASE (0x000ff800UL)
+#define CRB_INDIRECT_2M (0x1e0000UL)
+
+#define MAX_CRB_XFORM 60
+static unsigned long crb_addr_xform[MAX_CRB_XFORM];
+static int qla82xx_crb_table_initialized;
+
+#define qla82xx_crb_addr_transform(name) \
+ (crb_addr_xform[QLA82XX_HW_PX_MAP_CRB_##name] = \
+ QLA82XX_HW_CRB_HUB_AGT_ADR_##name << 20)
+
+static void qla82xx_crb_addr_transform_setup(void)
+{
+ qla82xx_crb_addr_transform(XDMA);
+ qla82xx_crb_addr_transform(TIMR);
+ qla82xx_crb_addr_transform(SRE);
+ qla82xx_crb_addr_transform(SQN3);
+ qla82xx_crb_addr_transform(SQN2);
+ qla82xx_crb_addr_transform(SQN1);
+ qla82xx_crb_addr_transform(SQN0);
+ qla82xx_crb_addr_transform(SQS3);
+ qla82xx_crb_addr_transform(SQS2);
+ qla82xx_crb_addr_transform(SQS1);
+ qla82xx_crb_addr_transform(SQS0);
+ qla82xx_crb_addr_transform(RPMX7);
+ qla82xx_crb_addr_transform(RPMX6);
+ qla82xx_crb_addr_transform(RPMX5);
+ qla82xx_crb_addr_transform(RPMX4);
+ qla82xx_crb_addr_transform(RPMX3);
+ qla82xx_crb_addr_transform(RPMX2);
+ qla82xx_crb_addr_transform(RPMX1);
+ qla82xx_crb_addr_transform(RPMX0);
+ qla82xx_crb_addr_transform(ROMUSB);
+ qla82xx_crb_addr_transform(SN);
+ qla82xx_crb_addr_transform(QMN);
+ qla82xx_crb_addr_transform(QMS);
+ qla82xx_crb_addr_transform(PGNI);
+ qla82xx_crb_addr_transform(PGND);
+ qla82xx_crb_addr_transform(PGN3);
+ qla82xx_crb_addr_transform(PGN2);
+ qla82xx_crb_addr_transform(PGN1);
+ qla82xx_crb_addr_transform(PGN0);
+ qla82xx_crb_addr_transform(PGSI);
+ qla82xx_crb_addr_transform(PGSD);
+ qla82xx_crb_addr_transform(PGS3);
+ qla82xx_crb_addr_transform(PGS2);
+ qla82xx_crb_addr_transform(PGS1);
+ qla82xx_crb_addr_transform(PGS0);
+ qla82xx_crb_addr_transform(PS);
+ qla82xx_crb_addr_transform(PH);
+ qla82xx_crb_addr_transform(NIU);
+ qla82xx_crb_addr_transform(I2Q);
+ qla82xx_crb_addr_transform(EG);
+ qla82xx_crb_addr_transform(MN);
+ qla82xx_crb_addr_transform(MS);
+ qla82xx_crb_addr_transform(CAS2);
+ qla82xx_crb_addr_transform(CAS1);
+ qla82xx_crb_addr_transform(CAS0);
+ qla82xx_crb_addr_transform(CAM);
+ qla82xx_crb_addr_transform(C2C1);
+ qla82xx_crb_addr_transform(C2C0);
+ qla82xx_crb_addr_transform(SMB);
+ qla82xx_crb_addr_transform(OCM0);
+ /*
+ * Used only in P3 just define it for P2 also.
+ */
+ qla82xx_crb_addr_transform(I2C0);
+
+ qla82xx_crb_table_initialized = 1;
+}
+
+static struct crb_128M_2M_block_map crb_128M_2M_map[64] = {
+ {{{0, 0, 0, 0} } },
+ {{{1, 0x0100000, 0x0102000, 0x120000},
+ {1, 0x0110000, 0x0120000, 0x130000},
+ {1, 0x0120000, 0x0122000, 0x124000},
+ {1, 0x0130000, 0x0132000, 0x126000},
+ {1, 0x0140000, 0x0142000, 0x128000},
+ {1, 0x0150000, 0x0152000, 0x12a000},
+ {1, 0x0160000, 0x0170000, 0x110000},
+ {1, 0x0170000, 0x0172000, 0x12e000},
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {1, 0x01e0000, 0x01e0800, 0x122000},
+ {0, 0x0000000, 0x0000000, 0x000000} } } ,
+ {{{1, 0x0200000, 0x0210000, 0x180000} } },
+ {{{0, 0, 0, 0} } },
+ {{{1, 0x0400000, 0x0401000, 0x169000} } },
+ {{{1, 0x0500000, 0x0510000, 0x140000} } },
+ {{{1, 0x0600000, 0x0610000, 0x1c0000} } },
+ {{{1, 0x0700000, 0x0704000, 0x1b8000} } },
+ {{{1, 0x0800000, 0x0802000, 0x170000},
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {1, 0x08f0000, 0x08f2000, 0x172000} } },
+ {{{1, 0x0900000, 0x0902000, 0x174000},
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {1, 0x09f0000, 0x09f2000, 0x176000} } },
+ {{{0, 0x0a00000, 0x0a02000, 0x178000},
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {1, 0x0af0000, 0x0af2000, 0x17a000} } },
+ {{{0, 0x0b00000, 0x0b02000, 0x17c000},
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {1, 0x0bf0000, 0x0bf2000, 0x17e000} } },
+ {{{1, 0x0c00000, 0x0c04000, 0x1d4000} } },
+ {{{1, 0x0d00000, 0x0d04000, 0x1a4000} } },
+ {{{1, 0x0e00000, 0x0e04000, 0x1a0000} } },
+ {{{1, 0x0f00000, 0x0f01000, 0x164000} } },
+ {{{0, 0x1000000, 0x1004000, 0x1a8000} } },
+ {{{1, 0x1100000, 0x1101000, 0x160000} } },
+ {{{1, 0x1200000, 0x1201000, 0x161000} } },
+ {{{1, 0x1300000, 0x1301000, 0x162000} } },
+ {{{1, 0x1400000, 0x1401000, 0x163000} } },
+ {{{1, 0x1500000, 0x1501000, 0x165000} } },
+ {{{1, 0x1600000, 0x1601000, 0x166000} } },
+ {{{0, 0, 0, 0} } },
+ {{{0, 0, 0, 0} } },
+ {{{0, 0, 0, 0} } },
+ {{{0, 0, 0, 0} } },
+ {{{0, 0, 0, 0} } },
+ {{{0, 0, 0, 0} } },
+ {{{1, 0x1d00000, 0x1d10000, 0x190000} } },
+ {{{1, 0x1e00000, 0x1e01000, 0x16a000} } },
+ {{{1, 0x1f00000, 0x1f10000, 0x150000} } },
+ {{{0} } },
+ {{{1, 0x2100000, 0x2102000, 0x120000},
+ {1, 0x2110000, 0x2120000, 0x130000},
+ {1, 0x2120000, 0x2122000, 0x124000},
+ {1, 0x2130000, 0x2132000, 0x126000},
+ {1, 0x2140000, 0x2142000, 0x128000},
+ {1, 0x2150000, 0x2152000, 0x12a000},
+ {1, 0x2160000, 0x2170000, 0x110000},
+ {1, 0x2170000, 0x2172000, 0x12e000},
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {0, 0x0000000, 0x0000000, 0x000000} } },
+ {{{1, 0x2200000, 0x2204000, 0x1b0000} } },
+ {{{0} } },
+ {{{0} } },
+ {{{0} } },
+ {{{0} } },
+ {{{0} } },
+ {{{1, 0x2800000, 0x2804000, 0x1a4000} } },
+ {{{1, 0x2900000, 0x2901000, 0x16b000} } },
+ {{{1, 0x2a00000, 0x2a00400, 0x1ac400} } },
+ {{{1, 0x2b00000, 0x2b00400, 0x1ac800} } },
+ {{{1, 0x2c00000, 0x2c00400, 0x1acc00} } },
+ {{{1, 0x2d00000, 0x2d00400, 0x1ad000} } },
+ {{{1, 0x2e00000, 0x2e00400, 0x1ad400} } },
+ {{{1, 0x2f00000, 0x2f00400, 0x1ad800} } },
+ {{{1, 0x3000000, 0x3000400, 0x1adc00} } },
+ {{{0, 0x3100000, 0x3104000, 0x1a8000} } },
+ {{{1, 0x3200000, 0x3204000, 0x1d4000} } },
+ {{{1, 0x3300000, 0x3304000, 0x1a0000} } },
+ {{{0} } },
+ {{{1, 0x3500000, 0x3500400, 0x1ac000} } },
+ {{{1, 0x3600000, 0x3600400, 0x1ae000} } },
+ {{{1, 0x3700000, 0x3700400, 0x1ae400} } },
+ {{{1, 0x3800000, 0x3804000, 0x1d0000} } },
+ {{{1, 0x3900000, 0x3904000, 0x1b4000} } },
+ {{{1, 0x3a00000, 0x3a04000, 0x1d8000} } },
+ {{{0} } },
+ {{{0} } },
+ {{{1, 0x3d00000, 0x3d04000, 0x1dc000} } },
+ {{{1, 0x3e00000, 0x3e01000, 0x167000} } },
+ {{{1, 0x3f00000, 0x3f01000, 0x168000} } }
+};
+
+/*
+ * top 12 bits of crb internal address (hub, agent)
+ */
+static unsigned qla82xx_crb_hub_agt[64] = {
+ 0,
+ QLA82XX_HW_CRB_HUB_AGT_ADR_PS,
+ QLA82XX_HW_CRB_HUB_AGT_ADR_MN,
+ QLA82XX_HW_CRB_HUB_AGT_ADR_MS,
+ 0,
+ QLA82XX_HW_CRB_HUB_AGT_ADR_SRE,
+ QLA82XX_HW_CRB_HUB_AGT_ADR_NIU,
+ QLA82XX_HW_CRB_HUB_AGT_ADR_QMN,
+ QLA82XX_HW_CRB_HUB_AGT_ADR_SQN0,
+ QLA82XX_HW_CRB_HUB_AGT_ADR_SQN1,
+ QLA82XX_HW_CRB_HUB_AGT_ADR_SQN2,
+ QLA82XX_HW_CRB_HUB_AGT_ADR_SQN3,
+ QLA82XX_HW_CRB_HUB_AGT_ADR_I2Q,
+ QLA82XX_HW_CRB_HUB_AGT_ADR_TIMR,
+ QLA82XX_HW_CRB_HUB_AGT_ADR_ROMUSB,
+ QLA82XX_HW_CRB_HUB_AGT_ADR_PGN4,
+ QLA82XX_HW_CRB_HUB_AGT_ADR_XDMA,
+ QLA82XX_HW_CRB_HUB_AGT_ADR_PGN0,
+ QLA82XX_HW_CRB_HUB_AGT_ADR_PGN1,
+ QLA82XX_HW_CRB_HUB_AGT_ADR_PGN2,
+ QLA82XX_HW_CRB_HUB_AGT_ADR_PGN3,
+ QLA82XX_HW_CRB_HUB_AGT_ADR_PGND,
+ QLA82XX_HW_CRB_HUB_AGT_ADR_PGNI,
+ QLA82XX_HW_CRB_HUB_AGT_ADR_PGS0,
+ QLA82XX_HW_CRB_HUB_AGT_ADR_PGS1,
+ QLA82XX_HW_CRB_HUB_AGT_ADR_PGS2,
+ QLA82XX_HW_CRB_HUB_AGT_ADR_PGS3,
+ 0,
+ QLA82XX_HW_CRB_HUB_AGT_ADR_PGSI,
+ QLA82XX_HW_CRB_HUB_AGT_ADR_SN,
+ 0,
+ QLA82XX_HW_CRB_HUB_AGT_ADR_EG,
+ 0,
+ QLA82XX_HW_CRB_HUB_AGT_ADR_PS,
+ QLA82XX_HW_CRB_HUB_AGT_ADR_CAM,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ QLA82XX_HW_CRB_HUB_AGT_ADR_TIMR,
+ 0,
+ QLA82XX_HW_CRB_HUB_AGT_ADR_RPMX1,
+ QLA82XX_HW_CRB_HUB_AGT_ADR_RPMX2,
+ QLA82XX_HW_CRB_HUB_AGT_ADR_RPMX3,
+ QLA82XX_HW_CRB_HUB_AGT_ADR_RPMX4,
+ QLA82XX_HW_CRB_HUB_AGT_ADR_RPMX5,
+ QLA82XX_HW_CRB_HUB_AGT_ADR_RPMX6,
+ QLA82XX_HW_CRB_HUB_AGT_ADR_RPMX7,
+ QLA82XX_HW_CRB_HUB_AGT_ADR_XDMA,
+ QLA82XX_HW_CRB_HUB_AGT_ADR_I2Q,
+ QLA82XX_HW_CRB_HUB_AGT_ADR_ROMUSB,
+ 0,
+ QLA82XX_HW_CRB_HUB_AGT_ADR_RPMX0,
+ QLA82XX_HW_CRB_HUB_AGT_ADR_RPMX8,
+ QLA82XX_HW_CRB_HUB_AGT_ADR_RPMX9,
+ QLA82XX_HW_CRB_HUB_AGT_ADR_OCM0,
+ 0,
+ QLA82XX_HW_CRB_HUB_AGT_ADR_SMB,
+ QLA82XX_HW_CRB_HUB_AGT_ADR_I2C0,
+ QLA82XX_HW_CRB_HUB_AGT_ADR_I2C1,
+ 0,
+ QLA82XX_HW_CRB_HUB_AGT_ADR_PGNC,
+ 0,
+};
+
+/* Device states */
+static char *q_dev_state[] = {
+ "Unknown",
+ "Cold",
+ "Initializing",
+ "Ready",
+ "Need Reset",
+ "Need Quiescent",
+ "Failed",
+ "Quiescent",
+};
+
+char *qdev_state(uint32_t dev_state)
+{
+ return q_dev_state[dev_state];
+}
+
+/*
+ * In: 'off' is offset from CRB space in 128M pci map
+ * Out: 'off' is 2M pci map addr
+ * side effect: lock crb window
+ */
+static void
+qla82xx_pci_set_crbwindow_2M(struct qla_hw_data *ha, ulong *off)
+{
+ u32 win_read;
+ scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev);
+
+ ha->crb_win = CRB_HI(*off);
+ writel(ha->crb_win,
+ (void __iomem *)(CRB_WINDOW_2M + ha->nx_pcibase));
+
+ /* Read back value to make sure write has gone through before trying
+ * to use it.
+ */
+ win_read = RD_REG_DWORD((void __iomem *)
+ (CRB_WINDOW_2M + ha->nx_pcibase));
+ if (win_read != ha->crb_win) {
+ ql_dbg(ql_dbg_p3p, vha, 0xb000,
+ "%s: Written crbwin (0x%x) "
+ "!= Read crbwin (0x%x), off=0x%lx.\n",
+ __func__, ha->crb_win, win_read, *off);
+ }
+ *off = (*off & MASK(16)) + CRB_INDIRECT_2M + ha->nx_pcibase;
+}
+
+static inline unsigned long
+qla82xx_pci_set_crbwindow(struct qla_hw_data *ha, u64 off)
+{
+ scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev);
+ /* See if we are currently pointing to the region we want to use next */
+ if ((off >= QLA82XX_CRB_PCIX_HOST) && (off < QLA82XX_CRB_DDR_NET)) {
+ /* No need to change window. PCIX and PCIEregs are in both
+ * regs are in both windows.
+ */
+ return off;
+ }
+
+ if ((off >= QLA82XX_CRB_PCIX_HOST) && (off < QLA82XX_CRB_PCIX_HOST2)) {
+ /* We are in first CRB window */
+ if (ha->curr_window != 0)
+ WARN_ON(1);
+ return off;
+ }
+
+ if ((off > QLA82XX_CRB_PCIX_HOST2) && (off < QLA82XX_CRB_MAX)) {
+ /* We are in second CRB window */
+ off = off - QLA82XX_CRB_PCIX_HOST2 + QLA82XX_CRB_PCIX_HOST;
+
+ if (ha->curr_window != 1)
+ return off;
+
+ /* We are in the QM or direct access
+ * register region - do nothing
+ */
+ if ((off >= QLA82XX_PCI_DIRECT_CRB) &&
+ (off < QLA82XX_PCI_CAMQM_MAX))
+ return off;
+ }
+ /* strange address given */
+ ql_dbg(ql_dbg_p3p, vha, 0xb001,
+ "%s: Warning: unm_nic_pci_set_crbwindow "
+ "called with an unknown address(%llx).\n",
+ QLA2XXX_DRIVER_NAME, off);
+ return off;
+}
+
+static int
+qla82xx_pci_get_crb_addr_2M(struct qla_hw_data *ha, ulong *off)
+{
+ struct crb_128M_2M_sub_block_map *m;
+
+ if (*off >= QLA82XX_CRB_MAX)
+ return -1;
+
+ if (*off >= QLA82XX_PCI_CAMQM && (*off < QLA82XX_PCI_CAMQM_2M_END)) {
+ *off = (*off - QLA82XX_PCI_CAMQM) +
+ QLA82XX_PCI_CAMQM_2M_BASE + ha->nx_pcibase;
+ return 0;
+ }
+
+ if (*off < QLA82XX_PCI_CRBSPACE)
+ return -1;
+
+ *off -= QLA82XX_PCI_CRBSPACE;
+
+ /* Try direct map */
+ m = &crb_128M_2M_map[CRB_BLK(*off)].sub_block[CRB_SUBBLK(*off)];
+
+ if (m->valid && (m->start_128M <= *off) && (m->end_128M > *off)) {
+ *off = *off + m->start_2M - m->start_128M + ha->nx_pcibase;
+ return 0;
+ }
+ /* Not in direct map, use crb window */
+ return 1;
+}
+
+#define CRB_WIN_LOCK_TIMEOUT 100000000
+static int qla82xx_crb_win_lock(struct qla_hw_data *ha)
+{
+ int done = 0, timeout = 0;
+
+ while (!done) {
+ /* acquire semaphore3 from PCI HW block */
+ done = qla82xx_rd_32(ha, QLA82XX_PCIE_REG(PCIE_SEM7_LOCK));
+ if (done == 1)
+ break;
+ if (timeout >= CRB_WIN_LOCK_TIMEOUT)
+ return -1;
+ timeout++;
+ }
+ qla82xx_wr_32(ha, QLA82XX_CRB_WIN_LOCK_ID, ha->portnum);
+ return 0;
+}
+
+int
+qla82xx_wr_32(struct qla_hw_data *ha, ulong off, u32 data)
+{
+ unsigned long flags = 0;
+ int rv;
+
+ rv = qla82xx_pci_get_crb_addr_2M(ha, &off);
+
+ BUG_ON(rv == -1);
+
+ if (rv == 1) {
+ write_lock_irqsave(&ha->hw_lock, flags);
+ qla82xx_crb_win_lock(ha);
+ qla82xx_pci_set_crbwindow_2M(ha, &off);
+ }
+
+ writel(data, (void __iomem *)off);
+
+ if (rv == 1) {
+ qla82xx_rd_32(ha, QLA82XX_PCIE_REG(PCIE_SEM7_UNLOCK));
+ write_unlock_irqrestore(&ha->hw_lock, flags);
+ }
+ return 0;
+}
+
+int
+qla82xx_rd_32(struct qla_hw_data *ha, ulong off)
+{
+ unsigned long flags = 0;
+ int rv;
+ u32 data;
+
+ rv = qla82xx_pci_get_crb_addr_2M(ha, &off);
+
+ BUG_ON(rv == -1);
+
+ if (rv == 1) {
+ write_lock_irqsave(&ha->hw_lock, flags);
+ qla82xx_crb_win_lock(ha);
+ qla82xx_pci_set_crbwindow_2M(ha, &off);
+ }
+ data = RD_REG_DWORD((void __iomem *)off);
+
+ if (rv == 1) {
+ qla82xx_rd_32(ha, QLA82XX_PCIE_REG(PCIE_SEM7_UNLOCK));
+ write_unlock_irqrestore(&ha->hw_lock, flags);
+ }
+ return data;
+}
+
+#define IDC_LOCK_TIMEOUT 100000000
+int qla82xx_idc_lock(struct qla_hw_data *ha)
+{
+ int i;
+ int done = 0, timeout = 0;
+
+ while (!done) {
+ /* acquire semaphore5 from PCI HW block */
+ done = qla82xx_rd_32(ha, QLA82XX_PCIE_REG(PCIE_SEM5_LOCK));
+ if (done == 1)
+ break;
+ if (timeout >= IDC_LOCK_TIMEOUT)
+ return -1;
+
+ timeout++;
+
+ /* Yield CPU */
+ if (!in_interrupt())
+ schedule();
+ else {
+ for (i = 0; i < 20; i++)
+ cpu_relax();
+ }
+ }
+
+ return 0;
+}
+
+void qla82xx_idc_unlock(struct qla_hw_data *ha)
+{
+ qla82xx_rd_32(ha, QLA82XX_PCIE_REG(PCIE_SEM5_UNLOCK));
+}
+
+/* PCI Windowing for DDR regions. */
+#define QLA82XX_ADDR_IN_RANGE(addr, low, high) \
+ (((addr) <= (high)) && ((addr) >= (low)))
+/*
+ * check memory access boundary.
+ * used by test agent. support ddr access only for now
+ */
+static unsigned long
+qla82xx_pci_mem_bound_check(struct qla_hw_data *ha,
+ unsigned long long addr, int size)
+{
+ if (!QLA82XX_ADDR_IN_RANGE(addr, QLA82XX_ADDR_DDR_NET,
+ QLA82XX_ADDR_DDR_NET_MAX) ||
+ !QLA82XX_ADDR_IN_RANGE(addr + size - 1, QLA82XX_ADDR_DDR_NET,
+ QLA82XX_ADDR_DDR_NET_MAX) ||
+ ((size != 1) && (size != 2) && (size != 4) && (size != 8)))
+ return 0;
+ else
+ return 1;
+}
+
+static int qla82xx_pci_set_window_warning_count;
+
+static unsigned long
+qla82xx_pci_set_window(struct qla_hw_data *ha, unsigned long long addr)
+{
+ int window;
+ u32 win_read;
+ scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev);
+
+ if (QLA82XX_ADDR_IN_RANGE(addr, QLA82XX_ADDR_DDR_NET,
+ QLA82XX_ADDR_DDR_NET_MAX)) {
+ /* DDR network side */
+ window = MN_WIN(addr);
+ ha->ddr_mn_window = window;
+ qla82xx_wr_32(ha,
+ ha->mn_win_crb | QLA82XX_PCI_CRBSPACE, window);
+ win_read = qla82xx_rd_32(ha,
+ ha->mn_win_crb | QLA82XX_PCI_CRBSPACE);
+ if ((win_read << 17) != window) {
+ ql_dbg(ql_dbg_p3p, vha, 0xb003,
+ "%s: Written MNwin (0x%x) != Read MNwin (0x%x).\n",
+ __func__, window, win_read);
+ }
+ addr = GET_MEM_OFFS_2M(addr) + QLA82XX_PCI_DDR_NET;
+ } else if (QLA82XX_ADDR_IN_RANGE(addr, QLA82XX_ADDR_OCM0,
+ QLA82XX_ADDR_OCM0_MAX)) {
+ unsigned int temp1;
+ if ((addr & 0x00ff800) == 0xff800) {
+ ql_log(ql_log_warn, vha, 0xb004,
+ "%s: QM access not handled.\n", __func__);
+ addr = -1UL;
+ }
+ window = OCM_WIN(addr);
+ ha->ddr_mn_window = window;
+ qla82xx_wr_32(ha,
+ ha->mn_win_crb | QLA82XX_PCI_CRBSPACE, window);
+ win_read = qla82xx_rd_32(ha,
+ ha->mn_win_crb | QLA82XX_PCI_CRBSPACE);
+ temp1 = ((window & 0x1FF) << 7) |
+ ((window & 0x0FFFE0000) >> 17);
+ if (win_read != temp1) {
+ ql_log(ql_log_warn, vha, 0xb005,
+ "%s: Written OCMwin (0x%x) != Read OCMwin (0x%x).\n",
+ __func__, temp1, win_read);
+ }
+ addr = GET_MEM_OFFS_2M(addr) + QLA82XX_PCI_OCM0_2M;
+
+ } else if (QLA82XX_ADDR_IN_RANGE(addr, QLA82XX_ADDR_QDR_NET,
+ QLA82XX_P3_ADDR_QDR_NET_MAX)) {
+ /* QDR network side */
+ window = MS_WIN(addr);
+ ha->qdr_sn_window = window;
+ qla82xx_wr_32(ha,
+ ha->ms_win_crb | QLA82XX_PCI_CRBSPACE, window);
+ win_read = qla82xx_rd_32(ha,
+ ha->ms_win_crb | QLA82XX_PCI_CRBSPACE);
+ if (win_read != window) {
+ ql_log(ql_log_warn, vha, 0xb006,
+ "%s: Written MSwin (0x%x) != Read MSwin (0x%x).\n",
+ __func__, window, win_read);
+ }
+ addr = GET_MEM_OFFS_2M(addr) + QLA82XX_PCI_QDR_NET;
+ } else {
+ /*
+ * peg gdb frequently accesses memory that doesn't exist,
+ * this limits the chit chat so debugging isn't slowed down.
+ */
+ if ((qla82xx_pci_set_window_warning_count++ < 8) ||
+ (qla82xx_pci_set_window_warning_count%64 == 0)) {
+ ql_log(ql_log_warn, vha, 0xb007,
+ "%s: Warning:%s Unknown address range!.\n",
+ __func__, QLA2XXX_DRIVER_NAME);
+ }
+ addr = -1UL;
+ }
+ return addr;
+}
+
+/* check if address is in the same windows as the previous access */
+static int qla82xx_pci_is_same_window(struct qla_hw_data *ha,
+ unsigned long long addr)
+{
+ int window;
+ unsigned long long qdr_max;
+
+ qdr_max = QLA82XX_P3_ADDR_QDR_NET_MAX;
+
+ /* DDR network side */
+ if (QLA82XX_ADDR_IN_RANGE(addr, QLA82XX_ADDR_DDR_NET,
+ QLA82XX_ADDR_DDR_NET_MAX))
+ BUG();
+ else if (QLA82XX_ADDR_IN_RANGE(addr, QLA82XX_ADDR_OCM0,
+ QLA82XX_ADDR_OCM0_MAX))
+ return 1;
+ else if (QLA82XX_ADDR_IN_RANGE(addr, QLA82XX_ADDR_OCM1,
+ QLA82XX_ADDR_OCM1_MAX))
+ return 1;
+ else if (QLA82XX_ADDR_IN_RANGE(addr, QLA82XX_ADDR_QDR_NET, qdr_max)) {
+ /* QDR network side */
+ window = ((addr - QLA82XX_ADDR_QDR_NET) >> 22) & 0x3f;
+ if (ha->qdr_sn_window == window)
+ return 1;
+ }
+ return 0;
+}
+
+static int qla82xx_pci_mem_read_direct(struct qla_hw_data *ha,
+ u64 off, void *data, int size)
+{
+ unsigned long flags;
+ void __iomem *addr = NULL;
+ int ret = 0;
+ u64 start;
+ uint8_t __iomem *mem_ptr = NULL;
+ unsigned long mem_base;
+ unsigned long mem_page;
+ scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev);
+
+ write_lock_irqsave(&ha->hw_lock, flags);
+
+ /*
+ * If attempting to access unknown address or straddle hw windows,
+ * do not access.
+ */
+ start = qla82xx_pci_set_window(ha, off);
+ if ((start == -1UL) ||
+ (qla82xx_pci_is_same_window(ha, off + size - 1) == 0)) {
+ write_unlock_irqrestore(&ha->hw_lock, flags);
+ ql_log(ql_log_fatal, vha, 0xb008,
+ "%s out of bound pci memory "
+ "access, offset is 0x%llx.\n",
+ QLA2XXX_DRIVER_NAME, off);
+ return -1;
+ }
+
+ write_unlock_irqrestore(&ha->hw_lock, flags);
+ mem_base = pci_resource_start(ha->pdev, 0);
+ mem_page = start & PAGE_MASK;
+ /* Map two pages whenever user tries to access addresses in two
+ * consecutive pages.
+ */
+ if (mem_page != ((start + size - 1) & PAGE_MASK))
+ mem_ptr = ioremap(mem_base + mem_page, PAGE_SIZE * 2);
+ else
+ mem_ptr = ioremap(mem_base + mem_page, PAGE_SIZE);
+ if (mem_ptr == NULL) {
+ *(u8 *)data = 0;
+ return -1;
+ }
+ addr = mem_ptr;
+ addr += start & (PAGE_SIZE - 1);
+ write_lock_irqsave(&ha->hw_lock, flags);
+
+ switch (size) {
+ case 1:
+ *(u8 *)data = readb(addr);
+ break;
+ case 2:
+ *(u16 *)data = readw(addr);
+ break;
+ case 4:
+ *(u32 *)data = readl(addr);
+ break;
+ case 8:
+ *(u64 *)data = readq(addr);
+ break;
+ default:
+ ret = -1;
+ break;
+ }
+ write_unlock_irqrestore(&ha->hw_lock, flags);
+
+ if (mem_ptr)
+ iounmap(mem_ptr);
+ return ret;
+}
+
+static int
+qla82xx_pci_mem_write_direct(struct qla_hw_data *ha,
+ u64 off, void *data, int size)
+{
+ unsigned long flags;
+ void __iomem *addr = NULL;
+ int ret = 0;
+ u64 start;
+ uint8_t __iomem *mem_ptr = NULL;
+ unsigned long mem_base;
+ unsigned long mem_page;
+ scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev);
+
+ write_lock_irqsave(&ha->hw_lock, flags);
+
+ /*
+ * If attempting to access unknown address or straddle hw windows,
+ * do not access.
+ */
+ start = qla82xx_pci_set_window(ha, off);
+ if ((start == -1UL) ||
+ (qla82xx_pci_is_same_window(ha, off + size - 1) == 0)) {
+ write_unlock_irqrestore(&ha->hw_lock, flags);
+ ql_log(ql_log_fatal, vha, 0xb009,
+ "%s out of bount memory "
+ "access, offset is 0x%llx.\n",
+ QLA2XXX_DRIVER_NAME, off);
+ return -1;
+ }
+
+ write_unlock_irqrestore(&ha->hw_lock, flags);
+ mem_base = pci_resource_start(ha->pdev, 0);
+ mem_page = start & PAGE_MASK;
+ /* Map two pages whenever user tries to access addresses in two
+ * consecutive pages.
+ */
+ if (mem_page != ((start + size - 1) & PAGE_MASK))
+ mem_ptr = ioremap(mem_base + mem_page, PAGE_SIZE*2);
+ else
+ mem_ptr = ioremap(mem_base + mem_page, PAGE_SIZE);
+ if (mem_ptr == NULL)
+ return -1;
+
+ addr = mem_ptr;
+ addr += start & (PAGE_SIZE - 1);
+ write_lock_irqsave(&ha->hw_lock, flags);
+
+ switch (size) {
+ case 1:
+ writeb(*(u8 *)data, addr);
+ break;
+ case 2:
+ writew(*(u16 *)data, addr);
+ break;
+ case 4:
+ writel(*(u32 *)data, addr);
+ break;
+ case 8:
+ writeq(*(u64 *)data, addr);
+ break;
+ default:
+ ret = -1;
+ break;
+ }
+ write_unlock_irqrestore(&ha->hw_lock, flags);
+ if (mem_ptr)
+ iounmap(mem_ptr);
+ return ret;
+}
+
+#define MTU_FUDGE_FACTOR 100
+static unsigned long
+qla82xx_decode_crb_addr(unsigned long addr)
+{
+ int i;
+ unsigned long base_addr, offset, pci_base;
+
+ if (!qla82xx_crb_table_initialized)
+ qla82xx_crb_addr_transform_setup();
+
+ pci_base = ADDR_ERROR;
+ base_addr = addr & 0xfff00000;
+ offset = addr & 0x000fffff;
+
+ for (i = 0; i < MAX_CRB_XFORM; i++) {
+ if (crb_addr_xform[i] == base_addr) {
+ pci_base = i << 20;
+ break;
+ }
+ }
+ if (pci_base == ADDR_ERROR)
+ return pci_base;
+ return pci_base + offset;
+}
+
+static long rom_max_timeout = 100;
+static long qla82xx_rom_lock_timeout = 100;
+
+static int
+qla82xx_rom_lock(struct qla_hw_data *ha)
+{
+ int done = 0, timeout = 0;
+ uint32_t lock_owner = 0;
+ scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev);
+
+ while (!done) {
+ /* acquire semaphore2 from PCI HW block */
+ done = qla82xx_rd_32(ha, QLA82XX_PCIE_REG(PCIE_SEM2_LOCK));
+ if (done == 1)
+ break;
+ if (timeout >= qla82xx_rom_lock_timeout) {
+ lock_owner = qla82xx_rd_32(ha, QLA82XX_ROM_LOCK_ID);
+ ql_dbg(ql_dbg_p3p, vha, 0xb157,
+ "%s: Simultaneous flash access by following ports, active port = %d: accessing port = %d",
+ __func__, ha->portnum, lock_owner);
+ return -1;
+ }
+ timeout++;
+ }
+ qla82xx_wr_32(ha, QLA82XX_ROM_LOCK_ID, ha->portnum);
+ return 0;
+}
+
+static void
+qla82xx_rom_unlock(struct qla_hw_data *ha)
+{
+ qla82xx_wr_32(ha, QLA82XX_ROM_LOCK_ID, 0xffffffff);
+ qla82xx_rd_32(ha, QLA82XX_PCIE_REG(PCIE_SEM2_UNLOCK));
+}
+
+static int
+qla82xx_wait_rom_busy(struct qla_hw_data *ha)
+{
+ long timeout = 0;
+ long done = 0 ;
+ scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev);
+
+ while (done == 0) {
+ done = qla82xx_rd_32(ha, QLA82XX_ROMUSB_GLB_STATUS);
+ done &= 4;
+ timeout++;
+ if (timeout >= rom_max_timeout) {
+ ql_dbg(ql_dbg_p3p, vha, 0xb00a,
+ "%s: Timeout reached waiting for rom busy.\n",
+ QLA2XXX_DRIVER_NAME);
+ return -1;
+ }
+ }
+ return 0;
+}
+
+static int
+qla82xx_wait_rom_done(struct qla_hw_data *ha)
+{
+ long timeout = 0;
+ long done = 0 ;
+ scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev);
+
+ while (done == 0) {
+ done = qla82xx_rd_32(ha, QLA82XX_ROMUSB_GLB_STATUS);
+ done &= 2;
+ timeout++;
+ if (timeout >= rom_max_timeout) {
+ ql_dbg(ql_dbg_p3p, vha, 0xb00b,
+ "%s: Timeout reached waiting for rom done.\n",
+ QLA2XXX_DRIVER_NAME);
+ return -1;
+ }
+ }
+ return 0;
+}
+
+static int
+qla82xx_md_rw_32(struct qla_hw_data *ha, uint32_t off, u32 data, uint8_t flag)
+{
+ uint32_t off_value, rval = 0;
+
+ WRT_REG_DWORD((void __iomem *)(CRB_WINDOW_2M + ha->nx_pcibase),
+ (off & 0xFFFF0000));
+
+ /* Read back value to make sure write has gone through */
+ RD_REG_DWORD((void __iomem *)(CRB_WINDOW_2M + ha->nx_pcibase));
+ off_value = (off & 0x0000FFFF);
+
+ if (flag)
+ WRT_REG_DWORD((void __iomem *)
+ (off_value + CRB_INDIRECT_2M + ha->nx_pcibase),
+ data);
+ else
+ rval = RD_REG_DWORD((void __iomem *)
+ (off_value + CRB_INDIRECT_2M + ha->nx_pcibase));
+
+ return rval;
+}
+
+static int
+qla82xx_do_rom_fast_read(struct qla_hw_data *ha, int addr, int *valp)
+{
+ /* Dword reads to flash. */
+ qla82xx_md_rw_32(ha, MD_DIRECT_ROM_WINDOW, (addr & 0xFFFF0000), 1);
+ *valp = qla82xx_md_rw_32(ha, MD_DIRECT_ROM_READ_BASE +
+ (addr & 0x0000FFFF), 0, 0);
+
+ return 0;
+}
+
+static int
+qla82xx_rom_fast_read(struct qla_hw_data *ha, int addr, int *valp)
+{
+ int ret, loops = 0;
+ uint32_t lock_owner = 0;
+ scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev);
+
+ while ((qla82xx_rom_lock(ha) != 0) && (loops < 50000)) {
+ udelay(100);
+ schedule();
+ loops++;
+ }
+ if (loops >= 50000) {
+ lock_owner = qla82xx_rd_32(ha, QLA82XX_ROM_LOCK_ID);
+ ql_log(ql_log_fatal, vha, 0x00b9,
+ "Failed to acquire SEM2 lock, Lock Owner %u.\n",
+ lock_owner);
+ return -1;
+ }
+ ret = qla82xx_do_rom_fast_read(ha, addr, valp);
+ qla82xx_rom_unlock(ha);
+ return ret;
+}
+
+static int
+qla82xx_read_status_reg(struct qla_hw_data *ha, uint32_t *val)
+{
+ scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev);
+ qla82xx_wr_32(ha, QLA82XX_ROMUSB_ROM_INSTR_OPCODE, M25P_INSTR_RDSR);
+ qla82xx_wait_rom_busy(ha);
+ if (qla82xx_wait_rom_done(ha)) {
+ ql_log(ql_log_warn, vha, 0xb00c,
+ "Error waiting for rom done.\n");
+ return -1;
+ }
+ *val = qla82xx_rd_32(ha, QLA82XX_ROMUSB_ROM_RDATA);
+ return 0;
+}
+
+static int
+qla82xx_flash_wait_write_finish(struct qla_hw_data *ha)
+{
+ long timeout = 0;
+ uint32_t done = 1 ;
+ uint32_t val;
+ int ret = 0;
+ scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev);
+
+ qla82xx_wr_32(ha, QLA82XX_ROMUSB_ROM_ABYTE_CNT, 0);
+ while ((done != 0) && (ret == 0)) {
+ ret = qla82xx_read_status_reg(ha, &val);
+ done = val & 1;
+ timeout++;
+ udelay(10);
+ cond_resched();
+ if (timeout >= 50000) {
+ ql_log(ql_log_warn, vha, 0xb00d,
+ "Timeout reached waiting for write finish.\n");
+ return -1;
+ }
+ }
+ return ret;
+}
+
+static int
+qla82xx_flash_set_write_enable(struct qla_hw_data *ha)
+{
+ uint32_t val;
+ qla82xx_wait_rom_busy(ha);
+ qla82xx_wr_32(ha, QLA82XX_ROMUSB_ROM_ABYTE_CNT, 0);
+ qla82xx_wr_32(ha, QLA82XX_ROMUSB_ROM_INSTR_OPCODE, M25P_INSTR_WREN);
+ qla82xx_wait_rom_busy(ha);
+ if (qla82xx_wait_rom_done(ha))
+ return -1;
+ if (qla82xx_read_status_reg(ha, &val) != 0)
+ return -1;
+ if ((val & 2) != 2)
+ return -1;
+ return 0;
+}
+
+static int
+qla82xx_write_status_reg(struct qla_hw_data *ha, uint32_t val)
+{
+ scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev);
+ if (qla82xx_flash_set_write_enable(ha))
+ return -1;
+ qla82xx_wr_32(ha, QLA82XX_ROMUSB_ROM_WDATA, val);
+ qla82xx_wr_32(ha, QLA82XX_ROMUSB_ROM_INSTR_OPCODE, 0x1);
+ if (qla82xx_wait_rom_done(ha)) {
+ ql_log(ql_log_warn, vha, 0xb00e,
+ "Error waiting for rom done.\n");
+ return -1;
+ }
+ return qla82xx_flash_wait_write_finish(ha);
+}
+
+static int
+qla82xx_write_disable_flash(struct qla_hw_data *ha)
+{
+ scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev);
+ qla82xx_wr_32(ha, QLA82XX_ROMUSB_ROM_INSTR_OPCODE, M25P_INSTR_WRDI);
+ if (qla82xx_wait_rom_done(ha)) {
+ ql_log(ql_log_warn, vha, 0xb00f,
+ "Error waiting for rom done.\n");
+ return -1;
+ }
+ return 0;
+}
+
+static int
+ql82xx_rom_lock_d(struct qla_hw_data *ha)
+{
+ int loops = 0;
+ uint32_t lock_owner = 0;
+ scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev);
+
+ while ((qla82xx_rom_lock(ha) != 0) && (loops < 50000)) {
+ udelay(100);
+ cond_resched();
+ loops++;
+ }
+ if (loops >= 50000) {
+ lock_owner = qla82xx_rd_32(ha, QLA82XX_ROM_LOCK_ID);
+ ql_log(ql_log_warn, vha, 0xb010,
+ "ROM lock failed, Lock Owner %u.\n", lock_owner);
+ return -1;
+ }
+ return 0;
+}
+
+static int
+qla82xx_write_flash_dword(struct qla_hw_data *ha, uint32_t flashaddr,
+ uint32_t data)
+{
+ int ret = 0;
+ scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev);
+
+ ret = ql82xx_rom_lock_d(ha);
+ if (ret < 0) {
+ ql_log(ql_log_warn, vha, 0xb011,
+ "ROM lock failed.\n");
+ return ret;
+ }
+
+ if (qla82xx_flash_set_write_enable(ha))
+ goto done_write;
+
+ qla82xx_wr_32(ha, QLA82XX_ROMUSB_ROM_WDATA, data);
+ qla82xx_wr_32(ha, QLA82XX_ROMUSB_ROM_ADDRESS, flashaddr);
+ qla82xx_wr_32(ha, QLA82XX_ROMUSB_ROM_ABYTE_CNT, 3);
+ qla82xx_wr_32(ha, QLA82XX_ROMUSB_ROM_INSTR_OPCODE, M25P_INSTR_PP);
+ qla82xx_wait_rom_busy(ha);
+ if (qla82xx_wait_rom_done(ha)) {
+ ql_log(ql_log_warn, vha, 0xb012,
+ "Error waiting for rom done.\n");
+ ret = -1;
+ goto done_write;
+ }
+
+ ret = qla82xx_flash_wait_write_finish(ha);
+
+done_write:
+ qla82xx_rom_unlock(ha);
+ return ret;
+}
+
+/* This routine does CRB initialize sequence
+ * to put the ISP into operational state
+ */
+static int
+qla82xx_pinit_from_rom(scsi_qla_host_t *vha)
+{
+ int addr, val;
+ int i ;
+ struct crb_addr_pair *buf;
+ unsigned long off;
+ unsigned offset, n;
+ struct qla_hw_data *ha = vha->hw;
+
+ struct crb_addr_pair {
+ long addr;
+ long data;
+ };
+
+ /* Halt all the individual PEGs and other blocks of the ISP */
+ qla82xx_rom_lock(ha);
+
+ /* disable all I2Q */
+ qla82xx_wr_32(ha, QLA82XX_CRB_I2Q + 0x10, 0x0);
+ qla82xx_wr_32(ha, QLA82XX_CRB_I2Q + 0x14, 0x0);
+ qla82xx_wr_32(ha, QLA82XX_CRB_I2Q + 0x18, 0x0);
+ qla82xx_wr_32(ha, QLA82XX_CRB_I2Q + 0x1c, 0x0);
+ qla82xx_wr_32(ha, QLA82XX_CRB_I2Q + 0x20, 0x0);
+ qla82xx_wr_32(ha, QLA82XX_CRB_I2Q + 0x24, 0x0);
+
+ /* disable all niu interrupts */
+ qla82xx_wr_32(ha, QLA82XX_CRB_NIU + 0x40, 0xff);
+ /* disable xge rx/tx */
+ qla82xx_wr_32(ha, QLA82XX_CRB_NIU + 0x70000, 0x00);
+ /* disable xg1 rx/tx */
+ qla82xx_wr_32(ha, QLA82XX_CRB_NIU + 0x80000, 0x00);
+ /* disable sideband mac */
+ qla82xx_wr_32(ha, QLA82XX_CRB_NIU + 0x90000, 0x00);
+ /* disable ap0 mac */
+ qla82xx_wr_32(ha, QLA82XX_CRB_NIU + 0xa0000, 0x00);
+ /* disable ap1 mac */
+ qla82xx_wr_32(ha, QLA82XX_CRB_NIU + 0xb0000, 0x00);
+
+ /* halt sre */
+ val = qla82xx_rd_32(ha, QLA82XX_CRB_SRE + 0x1000);
+ qla82xx_wr_32(ha, QLA82XX_CRB_SRE + 0x1000, val & (~(0x1)));
+
+ /* halt epg */
+ qla82xx_wr_32(ha, QLA82XX_CRB_EPG + 0x1300, 0x1);
+
+ /* halt timers */
+ qla82xx_wr_32(ha, QLA82XX_CRB_TIMER + 0x0, 0x0);
+ qla82xx_wr_32(ha, QLA82XX_CRB_TIMER + 0x8, 0x0);
+ qla82xx_wr_32(ha, QLA82XX_CRB_TIMER + 0x10, 0x0);
+ qla82xx_wr_32(ha, QLA82XX_CRB_TIMER + 0x18, 0x0);
+ qla82xx_wr_32(ha, QLA82XX_CRB_TIMER + 0x100, 0x0);
+ qla82xx_wr_32(ha, QLA82XX_CRB_TIMER + 0x200, 0x0);
+
+ /* halt pegs */
+ qla82xx_wr_32(ha, QLA82XX_CRB_PEG_NET_0 + 0x3c, 1);
+ qla82xx_wr_32(ha, QLA82XX_CRB_PEG_NET_1 + 0x3c, 1);
+ qla82xx_wr_32(ha, QLA82XX_CRB_PEG_NET_2 + 0x3c, 1);
+ qla82xx_wr_32(ha, QLA82XX_CRB_PEG_NET_3 + 0x3c, 1);
+ qla82xx_wr_32(ha, QLA82XX_CRB_PEG_NET_4 + 0x3c, 1);
+ msleep(20);
+
+ /* big hammer */
+ if (test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags))
+ /* don't reset CAM block on reset */
+ qla82xx_wr_32(ha, QLA82XX_ROMUSB_GLB_SW_RESET, 0xfeffffff);
+ else
+ qla82xx_wr_32(ha, QLA82XX_ROMUSB_GLB_SW_RESET, 0xffffffff);
+ qla82xx_rom_unlock(ha);
+
+ /* Read the signature value from the flash.
+ * Offset 0: Contain signature (0xcafecafe)
+ * Offset 4: Offset and number of addr/value pairs
+ * that present in CRB initialize sequence
+ */
+ if (qla82xx_rom_fast_read(ha, 0, &n) != 0 || n != 0xcafecafeUL ||
+ qla82xx_rom_fast_read(ha, 4, &n) != 0) {
+ ql_log(ql_log_fatal, vha, 0x006e,
+ "Error Reading crb_init area: n: %08x.\n", n);
+ return -1;
+ }
+
+ /* Offset in flash = lower 16 bits
+ * Number of entries = upper 16 bits
+ */
+ offset = n & 0xffffU;
+ n = (n >> 16) & 0xffffU;
+
+ /* number of addr/value pair should not exceed 1024 entries */
+ if (n >= 1024) {
+ ql_log(ql_log_fatal, vha, 0x0071,
+ "Card flash not initialized:n=0x%x.\n", n);
+ return -1;
+ }
+
+ ql_log(ql_log_info, vha, 0x0072,
+ "%d CRB init values found in ROM.\n", n);
+
+ buf = kmalloc(n * sizeof(struct crb_addr_pair), GFP_KERNEL);
+ if (buf == NULL) {
+ ql_log(ql_log_fatal, vha, 0x010c,
+ "Unable to allocate memory.\n");
+ return -1;
+ }
+
+ for (i = 0; i < n; i++) {
+ if (qla82xx_rom_fast_read(ha, 8*i + 4*offset, &val) != 0 ||
+ qla82xx_rom_fast_read(ha, 8*i + 4*offset + 4, &addr) != 0) {
+ kfree(buf);
+ return -1;
+ }
+
+ buf[i].addr = addr;
+ buf[i].data = val;
+ }
+
+ for (i = 0; i < n; i++) {
+ /* Translate internal CRB initialization
+ * address to PCI bus address
+ */
+ off = qla82xx_decode_crb_addr((unsigned long)buf[i].addr) +
+ QLA82XX_PCI_CRBSPACE;
+ /* Not all CRB addr/value pair to be written,
+ * some of them are skipped
+ */
+
+ /* skipping cold reboot MAGIC */
+ if (off == QLA82XX_CAM_RAM(0x1fc))
+ continue;
+
+ /* do not reset PCI */
+ if (off == (ROMUSB_GLB + 0xbc))
+ continue;
+
+ /* skip core clock, so that firmware can increase the clock */
+ if (off == (ROMUSB_GLB + 0xc8))
+ continue;
+
+ /* skip the function enable register */
+ if (off == QLA82XX_PCIE_REG(PCIE_SETUP_FUNCTION))
+ continue;
+
+ if (off == QLA82XX_PCIE_REG(PCIE_SETUP_FUNCTION2))
+ continue;
+
+ if ((off & 0x0ff00000) == QLA82XX_CRB_SMB)
+ continue;
+
+ if ((off & 0x0ff00000) == QLA82XX_CRB_DDR_NET)
+ continue;
+
+ if (off == ADDR_ERROR) {
+ ql_log(ql_log_fatal, vha, 0x0116,
+ "Unknown addr: 0x%08lx.\n", buf[i].addr);
+ continue;
+ }
+
+ qla82xx_wr_32(ha, off, buf[i].data);
+
+ /* ISP requires much bigger delay to settle down,
+ * else crb_window returns 0xffffffff
+ */
+ if (off == QLA82XX_ROMUSB_GLB_SW_RESET)
+ msleep(1000);
+
+ /* ISP requires millisec delay between
+ * successive CRB register updation
+ */
+ msleep(1);
+ }
+
+ kfree(buf);
+
+ /* Resetting the data and instruction cache */
+ qla82xx_wr_32(ha, QLA82XX_CRB_PEG_NET_D+0xec, 0x1e);
+ qla82xx_wr_32(ha, QLA82XX_CRB_PEG_NET_D+0x4c, 8);
+ qla82xx_wr_32(ha, QLA82XX_CRB_PEG_NET_I+0x4c, 8);
+
+ /* Clear all protocol processing engines */
+ qla82xx_wr_32(ha, QLA82XX_CRB_PEG_NET_0+0x8, 0);
+ qla82xx_wr_32(ha, QLA82XX_CRB_PEG_NET_0+0xc, 0);
+ qla82xx_wr_32(ha, QLA82XX_CRB_PEG_NET_1+0x8, 0);
+ qla82xx_wr_32(ha, QLA82XX_CRB_PEG_NET_1+0xc, 0);
+ qla82xx_wr_32(ha, QLA82XX_CRB_PEG_NET_2+0x8, 0);
+ qla82xx_wr_32(ha, QLA82XX_CRB_PEG_NET_2+0xc, 0);
+ qla82xx_wr_32(ha, QLA82XX_CRB_PEG_NET_3+0x8, 0);
+ qla82xx_wr_32(ha, QLA82XX_CRB_PEG_NET_3+0xc, 0);
+ return 0;
+}
+
+static int
+qla82xx_pci_mem_write_2M(struct qla_hw_data *ha,
+ u64 off, void *data, int size)
+{
+ int i, j, ret = 0, loop, sz[2], off0;
+ int scale, shift_amount, startword;
+ uint32_t temp;
+ uint64_t off8, mem_crb, tmpw, word[2] = {0, 0};
+
+ /*
+ * If not MN, go check for MS or invalid.
+ */
+ if (off >= QLA82XX_ADDR_QDR_NET && off <= QLA82XX_P3_ADDR_QDR_NET_MAX)
+ mem_crb = QLA82XX_CRB_QDR_NET;
+ else {
+ mem_crb = QLA82XX_CRB_DDR_NET;
+ if (qla82xx_pci_mem_bound_check(ha, off, size) == 0)
+ return qla82xx_pci_mem_write_direct(ha,
+ off, data, size);
+ }
+
+ off0 = off & 0x7;
+ sz[0] = (size < (8 - off0)) ? size : (8 - off0);
+ sz[1] = size - sz[0];
+
+ off8 = off & 0xfffffff0;
+ loop = (((off & 0xf) + size - 1) >> 4) + 1;
+ shift_amount = 4;
+ scale = 2;
+ startword = (off & 0xf)/8;
+
+ for (i = 0; i < loop; i++) {
+ if (qla82xx_pci_mem_read_2M(ha, off8 +
+ (i << shift_amount), &word[i * scale], 8))
+ return -1;
+ }
+
+ switch (size) {
+ case 1:
+ tmpw = *((uint8_t *)data);
+ break;
+ case 2:
+ tmpw = *((uint16_t *)data);
+ break;
+ case 4:
+ tmpw = *((uint32_t *)data);
+ break;
+ case 8:
+ default:
+ tmpw = *((uint64_t *)data);
+ break;
+ }
+
+ if (sz[0] == 8) {
+ word[startword] = tmpw;
+ } else {
+ word[startword] &=
+ ~((~(~0ULL << (sz[0] * 8))) << (off0 * 8));
+ word[startword] |= tmpw << (off0 * 8);
+ }
+ if (sz[1] != 0) {
+ word[startword+1] &= ~(~0ULL << (sz[1] * 8));
+ word[startword+1] |= tmpw >> (sz[0] * 8);
+ }
+
+ for (i = 0; i < loop; i++) {
+ temp = off8 + (i << shift_amount);
+ qla82xx_wr_32(ha, mem_crb+MIU_TEST_AGT_ADDR_LO, temp);
+ temp = 0;
+ qla82xx_wr_32(ha, mem_crb+MIU_TEST_AGT_ADDR_HI, temp);
+ temp = word[i * scale] & 0xffffffff;
+ qla82xx_wr_32(ha, mem_crb+MIU_TEST_AGT_WRDATA_LO, temp);
+ temp = (word[i * scale] >> 32) & 0xffffffff;
+ qla82xx_wr_32(ha, mem_crb+MIU_TEST_AGT_WRDATA_HI, temp);
+ temp = word[i*scale + 1] & 0xffffffff;
+ qla82xx_wr_32(ha, mem_crb +
+ MIU_TEST_AGT_WRDATA_UPPER_LO, temp);
+ temp = (word[i*scale + 1] >> 32) & 0xffffffff;
+ qla82xx_wr_32(ha, mem_crb +
+ MIU_TEST_AGT_WRDATA_UPPER_HI, temp);
+
+ temp = MIU_TA_CTL_ENABLE | MIU_TA_CTL_WRITE;
+ qla82xx_wr_32(ha, mem_crb + MIU_TEST_AGT_CTRL, temp);
+ temp = MIU_TA_CTL_START | MIU_TA_CTL_ENABLE | MIU_TA_CTL_WRITE;
+ qla82xx_wr_32(ha, mem_crb + MIU_TEST_AGT_CTRL, temp);
+
+ for (j = 0; j < MAX_CTL_CHECK; j++) {
+ temp = qla82xx_rd_32(ha, mem_crb + MIU_TEST_AGT_CTRL);
+ if ((temp & MIU_TA_CTL_BUSY) == 0)
+ break;
+ }
+
+ if (j >= MAX_CTL_CHECK) {
+ if (printk_ratelimit())
+ dev_err(&ha->pdev->dev,
+ "failed to write through agent.\n");
+ ret = -1;
+ break;
+ }
+ }
+
+ return ret;
+}
+
+static int
+qla82xx_fw_load_from_flash(struct qla_hw_data *ha)
+{
+ int i;
+ long size = 0;
+ long flashaddr = ha->flt_region_bootload << 2;
+ long memaddr = BOOTLD_START;
+ u64 data;
+ u32 high, low;
+ size = (IMAGE_START - BOOTLD_START) / 8;
+
+ for (i = 0; i < size; i++) {
+ if ((qla82xx_rom_fast_read(ha, flashaddr, (int *)&low)) ||
+ (qla82xx_rom_fast_read(ha, flashaddr + 4, (int *)&high))) {
+ return -1;
+ }
+ data = ((u64)high << 32) | low ;
+ qla82xx_pci_mem_write_2M(ha, memaddr, &data, 8);
+ flashaddr += 8;
+ memaddr += 8;
+
+ if (i % 0x1000 == 0)
+ msleep(1);
+ }
+ udelay(100);
+ read_lock(&ha->hw_lock);
+ qla82xx_wr_32(ha, QLA82XX_CRB_PEG_NET_0 + 0x18, 0x1020);
+ qla82xx_wr_32(ha, QLA82XX_ROMUSB_GLB_SW_RESET, 0x80001e);
+ read_unlock(&ha->hw_lock);
+ return 0;
+}
+
+int
+qla82xx_pci_mem_read_2M(struct qla_hw_data *ha,
+ u64 off, void *data, int size)
+{
+ int i, j = 0, k, start, end, loop, sz[2], off0[2];
+ int shift_amount;
+ uint32_t temp;
+ uint64_t off8, val, mem_crb, word[2] = {0, 0};
+
+ /*
+ * If not MN, go check for MS or invalid.
+ */
+
+ if (off >= QLA82XX_ADDR_QDR_NET && off <= QLA82XX_P3_ADDR_QDR_NET_MAX)
+ mem_crb = QLA82XX_CRB_QDR_NET;
+ else {
+ mem_crb = QLA82XX_CRB_DDR_NET;
+ if (qla82xx_pci_mem_bound_check(ha, off, size) == 0)
+ return qla82xx_pci_mem_read_direct(ha,
+ off, data, size);
+ }
+
+ off8 = off & 0xfffffff0;
+ off0[0] = off & 0xf;
+ sz[0] = (size < (16 - off0[0])) ? size : (16 - off0[0]);
+ shift_amount = 4;
+ loop = ((off0[0] + size - 1) >> shift_amount) + 1;
+ off0[1] = 0;
+ sz[1] = size - sz[0];
+
+ for (i = 0; i < loop; i++) {
+ temp = off8 + (i << shift_amount);
+ qla82xx_wr_32(ha, mem_crb + MIU_TEST_AGT_ADDR_LO, temp);
+ temp = 0;
+ qla82xx_wr_32(ha, mem_crb + MIU_TEST_AGT_ADDR_HI, temp);
+ temp = MIU_TA_CTL_ENABLE;
+ qla82xx_wr_32(ha, mem_crb + MIU_TEST_AGT_CTRL, temp);
+ temp = MIU_TA_CTL_START | MIU_TA_CTL_ENABLE;
+ qla82xx_wr_32(ha, mem_crb + MIU_TEST_AGT_CTRL, temp);
+
+ for (j = 0; j < MAX_CTL_CHECK; j++) {
+ temp = qla82xx_rd_32(ha, mem_crb + MIU_TEST_AGT_CTRL);
+ if ((temp & MIU_TA_CTL_BUSY) == 0)
+ break;
+ }
+
+ if (j >= MAX_CTL_CHECK) {
+ if (printk_ratelimit())
+ dev_err(&ha->pdev->dev,
+ "failed to read through agent.\n");
+ break;
+ }
+
+ start = off0[i] >> 2;
+ end = (off0[i] + sz[i] - 1) >> 2;
+ for (k = start; k <= end; k++) {
+ temp = qla82xx_rd_32(ha,
+ mem_crb + MIU_TEST_AGT_RDDATA(k));
+ word[i] |= ((uint64_t)temp << (32 * (k & 1)));
+ }
+ }
+
+ if (j >= MAX_CTL_CHECK)
+ return -1;
+
+ if ((off0[0] & 7) == 0) {
+ val = word[0];
+ } else {
+ val = ((word[0] >> (off0[0] * 8)) & (~(~0ULL << (sz[0] * 8)))) |
+ ((word[1] & (~(~0ULL << (sz[1] * 8)))) << (sz[0] * 8));
+ }
+
+ switch (size) {
+ case 1:
+ *(uint8_t *)data = val;
+ break;
+ case 2:
+ *(uint16_t *)data = val;
+ break;
+ case 4:
+ *(uint32_t *)data = val;
+ break;
+ case 8:
+ *(uint64_t *)data = val;
+ break;
+ }
+ return 0;
+}
+
+
+static struct qla82xx_uri_table_desc *
+qla82xx_get_table_desc(const u8 *unirom, int section)
+{
+ uint32_t i;
+ struct qla82xx_uri_table_desc *directory =
+ (struct qla82xx_uri_table_desc *)&unirom[0];
+ __le32 offset;
+ __le32 tab_type;
+ __le32 entries = cpu_to_le32(directory->num_entries);
+
+ for (i = 0; i < entries; i++) {
+ offset = cpu_to_le32(directory->findex) +
+ (i * cpu_to_le32(directory->entry_size));
+ tab_type = cpu_to_le32(*((u32 *)&unirom[offset] + 8));
+
+ if (tab_type == section)
+ return (struct qla82xx_uri_table_desc *)&unirom[offset];
+ }
+
+ return NULL;
+}
+
+static struct qla82xx_uri_data_desc *
+qla82xx_get_data_desc(struct qla_hw_data *ha,
+ u32 section, u32 idx_offset)
+{
+ const u8 *unirom = ha->hablob->fw->data;
+ int idx = cpu_to_le32(*((int *)&unirom[ha->file_prd_off] + idx_offset));
+ struct qla82xx_uri_table_desc *tab_desc = NULL;
+ __le32 offset;
+
+ tab_desc = qla82xx_get_table_desc(unirom, section);
+ if (!tab_desc)
+ return NULL;
+
+ offset = cpu_to_le32(tab_desc->findex) +
+ (cpu_to_le32(tab_desc->entry_size) * idx);
+
+ return (struct qla82xx_uri_data_desc *)&unirom[offset];
+}
+
+static u8 *
+qla82xx_get_bootld_offset(struct qla_hw_data *ha)
+{
+ u32 offset = BOOTLD_START;
+ struct qla82xx_uri_data_desc *uri_desc = NULL;
+
+ if (ha->fw_type == QLA82XX_UNIFIED_ROMIMAGE) {
+ uri_desc = qla82xx_get_data_desc(ha,
+ QLA82XX_URI_DIR_SECT_BOOTLD, QLA82XX_URI_BOOTLD_IDX_OFF);
+ if (uri_desc)
+ offset = cpu_to_le32(uri_desc->findex);
+ }
+
+ return (u8 *)&ha->hablob->fw->data[offset];
+}
+
+static __le32
+qla82xx_get_fw_size(struct qla_hw_data *ha)
+{
+ struct qla82xx_uri_data_desc *uri_desc = NULL;
+
+ if (ha->fw_type == QLA82XX_UNIFIED_ROMIMAGE) {
+ uri_desc = qla82xx_get_data_desc(ha, QLA82XX_URI_DIR_SECT_FW,
+ QLA82XX_URI_FIRMWARE_IDX_OFF);
+ if (uri_desc)
+ return cpu_to_le32(uri_desc->size);
+ }
+
+ return cpu_to_le32(*(u32 *)&ha->hablob->fw->data[FW_SIZE_OFFSET]);
+}
+
+static u8 *
+qla82xx_get_fw_offs(struct qla_hw_data *ha)
+{
+ u32 offset = IMAGE_START;
+ struct qla82xx_uri_data_desc *uri_desc = NULL;
+
+ if (ha->fw_type == QLA82XX_UNIFIED_ROMIMAGE) {
+ uri_desc = qla82xx_get_data_desc(ha, QLA82XX_URI_DIR_SECT_FW,
+ QLA82XX_URI_FIRMWARE_IDX_OFF);
+ if (uri_desc)
+ offset = cpu_to_le32(uri_desc->findex);
+ }
+
+ return (u8 *)&ha->hablob->fw->data[offset];
+}
+
+/* PCI related functions */
+int qla82xx_pci_region_offset(struct pci_dev *pdev, int region)
+{
+ unsigned long val = 0;
+ u32 control;
+
+ switch (region) {
+ case 0:
+ val = 0;
+ break;
+ case 1:
+ pci_read_config_dword(pdev, QLA82XX_PCI_REG_MSIX_TBL, &control);
+ val = control + QLA82XX_MSIX_TBL_SPACE;
+ break;
+ }
+ return val;
+}
+
+
+int
+qla82xx_iospace_config(struct qla_hw_data *ha)
+{
+ uint32_t len = 0;
+
+ if (pci_request_regions(ha->pdev, QLA2XXX_DRIVER_NAME)) {
+ ql_log_pci(ql_log_fatal, ha->pdev, 0x000c,
+ "Failed to reserver selected regions.\n");
+ goto iospace_error_exit;
+ }
+
+ /* Use MMIO operations for all accesses. */
+ if (!(pci_resource_flags(ha->pdev, 0) & IORESOURCE_MEM)) {
+ ql_log_pci(ql_log_fatal, ha->pdev, 0x000d,
+ "Region #0 not an MMIO resource, aborting.\n");
+ goto iospace_error_exit;
+ }
+
+ len = pci_resource_len(ha->pdev, 0);
+ ha->nx_pcibase =
+ (unsigned long)ioremap(pci_resource_start(ha->pdev, 0), len);
+ if (!ha->nx_pcibase) {
+ ql_log_pci(ql_log_fatal, ha->pdev, 0x000e,
+ "Cannot remap pcibase MMIO, aborting.\n");
+ goto iospace_error_exit;
+ }
+
+ /* Mapping of IO base pointer */
+ if (IS_QLA8044(ha)) {
+ ha->iobase =
+ (device_reg_t *)((uint8_t *)ha->nx_pcibase);
+ } else if (IS_QLA82XX(ha)) {
+ ha->iobase =
+ (device_reg_t *)((uint8_t *)ha->nx_pcibase +
+ 0xbc000 + (ha->pdev->devfn << 11));
+ }
+
+ if (!ql2xdbwr) {
+ ha->nxdb_wr_ptr =
+ (unsigned long)ioremap((pci_resource_start(ha->pdev, 4) +
+ (ha->pdev->devfn << 12)), 4);
+ if (!ha->nxdb_wr_ptr) {
+ ql_log_pci(ql_log_fatal, ha->pdev, 0x000f,
+ "Cannot remap MMIO, aborting.\n");
+ goto iospace_error_exit;
+ }
+
+ /* Mapping of IO base pointer,
+ * door bell read and write pointer
+ */
+ ha->nxdb_rd_ptr = (uint8_t *) ha->nx_pcibase + (512 * 1024) +
+ (ha->pdev->devfn * 8);
+ } else {
+ ha->nxdb_wr_ptr = (ha->pdev->devfn == 6 ?
+ QLA82XX_CAMRAM_DB1 :
+ QLA82XX_CAMRAM_DB2);
+ }
+
+ ha->max_req_queues = ha->max_rsp_queues = 1;
+ ha->msix_count = ha->max_rsp_queues + 1;
+ ql_dbg_pci(ql_dbg_multiq, ha->pdev, 0xc006,
+ "nx_pci_base=%p iobase=%p "
+ "max_req_queues=%d msix_count=%d.\n",
+ (void *)ha->nx_pcibase, ha->iobase,
+ ha->max_req_queues, ha->msix_count);
+ ql_dbg_pci(ql_dbg_init, ha->pdev, 0x0010,
+ "nx_pci_base=%p iobase=%p "
+ "max_req_queues=%d msix_count=%d.\n",
+ (void *)ha->nx_pcibase, ha->iobase,
+ ha->max_req_queues, ha->msix_count);
+ return 0;
+
+iospace_error_exit:
+ return -ENOMEM;
+}
+
+/* GS related functions */
+
+/* Initialization related functions */
+
+/**
+ * qla82xx_pci_config() - Setup ISP82xx PCI configuration registers.
+ * @ha: HA context
+ *
+ * Returns 0 on success.
+*/
+int
+qla82xx_pci_config(scsi_qla_host_t *vha)
+{
+ struct qla_hw_data *ha = vha->hw;
+ int ret;
+
+ pci_set_master(ha->pdev);
+ ret = pci_set_mwi(ha->pdev);
+ ha->chip_revision = ha->pdev->revision;
+ ql_dbg(ql_dbg_init, vha, 0x0043,
+ "Chip revision:%d.\n",
+ ha->chip_revision);
+ return 0;
+}
+
+/**
+ * qla82xx_reset_chip() - Setup ISP82xx PCI configuration registers.
+ * @ha: HA context
+ *
+ * Returns 0 on success.
+ */
+void
+qla82xx_reset_chip(scsi_qla_host_t *vha)
+{
+ struct qla_hw_data *ha = vha->hw;
+ ha->isp_ops->disable_intrs(ha);
+}
+
+void qla82xx_config_rings(struct scsi_qla_host *vha)
+{
+ struct qla_hw_data *ha = vha->hw;
+ struct device_reg_82xx __iomem *reg = &ha->iobase->isp82;
+ struct init_cb_81xx *icb;
+ struct req_que *req = ha->req_q_map[0];
+ struct rsp_que *rsp = ha->rsp_q_map[0];
+
+ /* Setup ring parameters in initialization control block. */
+ icb = (struct init_cb_81xx *)ha->init_cb;
+ icb->request_q_outpointer = __constant_cpu_to_le16(0);
+ icb->response_q_inpointer = __constant_cpu_to_le16(0);
+ icb->request_q_length = cpu_to_le16(req->length);
+ icb->response_q_length = cpu_to_le16(rsp->length);
+ icb->request_q_address[0] = cpu_to_le32(LSD(req->dma));
+ icb->request_q_address[1] = cpu_to_le32(MSD(req->dma));
+ icb->response_q_address[0] = cpu_to_le32(LSD(rsp->dma));
+ icb->response_q_address[1] = cpu_to_le32(MSD(rsp->dma));
+
+ WRT_REG_DWORD((unsigned long __iomem *)&reg->req_q_out[0], 0);
+ WRT_REG_DWORD((unsigned long __iomem *)&reg->rsp_q_in[0], 0);
+ WRT_REG_DWORD((unsigned long __iomem *)&reg->rsp_q_out[0], 0);
+}
+
+static int
+qla82xx_fw_load_from_blob(struct qla_hw_data *ha)
+{
+ u64 *ptr64;
+ u32 i, flashaddr, size;
+ __le64 data;
+
+ size = (IMAGE_START - BOOTLD_START) / 8;
+
+ ptr64 = (u64 *)qla82xx_get_bootld_offset(ha);
+ flashaddr = BOOTLD_START;
+
+ for (i = 0; i < size; i++) {
+ data = cpu_to_le64(ptr64[i]);
+ if (qla82xx_pci_mem_write_2M(ha, flashaddr, &data, 8))
+ return -EIO;
+ flashaddr += 8;
+ }
+
+ flashaddr = FLASH_ADDR_START;
+ size = (__force u32)qla82xx_get_fw_size(ha) / 8;
+ ptr64 = (u64 *)qla82xx_get_fw_offs(ha);
+
+ for (i = 0; i < size; i++) {
+ data = cpu_to_le64(ptr64[i]);
+
+ if (qla82xx_pci_mem_write_2M(ha, flashaddr, &data, 8))
+ return -EIO;
+ flashaddr += 8;
+ }
+ udelay(100);
+
+ /* Write a magic value to CAMRAM register
+ * at a specified offset to indicate
+ * that all data is written and
+ * ready for firmware to initialize.
+ */
+ qla82xx_wr_32(ha, QLA82XX_CAM_RAM(0x1fc), QLA82XX_BDINFO_MAGIC);
+
+ read_lock(&ha->hw_lock);
+ qla82xx_wr_32(ha, QLA82XX_CRB_PEG_NET_0 + 0x18, 0x1020);
+ qla82xx_wr_32(ha, QLA82XX_ROMUSB_GLB_SW_RESET, 0x80001e);
+ read_unlock(&ha->hw_lock);
+ return 0;
+}
+
+static int
+qla82xx_set_product_offset(struct qla_hw_data *ha)
+{
+ struct qla82xx_uri_table_desc *ptab_desc = NULL;
+ const uint8_t *unirom = ha->hablob->fw->data;
+ uint32_t i;
+ __le32 entries;
+ __le32 flags, file_chiprev, offset;
+ uint8_t chiprev = ha->chip_revision;
+ /* Hardcoding mn_present flag for P3P */
+ int mn_present = 0;
+ uint32_t flagbit;
+
+ ptab_desc = qla82xx_get_table_desc(unirom,
+ QLA82XX_URI_DIR_SECT_PRODUCT_TBL);
+ if (!ptab_desc)
+ return -1;
+
+ entries = cpu_to_le32(ptab_desc->num_entries);
+
+ for (i = 0; i < entries; i++) {
+ offset = cpu_to_le32(ptab_desc->findex) +
+ (i * cpu_to_le32(ptab_desc->entry_size));
+ flags = cpu_to_le32(*((int *)&unirom[offset] +
+ QLA82XX_URI_FLAGS_OFF));
+ file_chiprev = cpu_to_le32(*((int *)&unirom[offset] +
+ QLA82XX_URI_CHIP_REV_OFF));
+
+ flagbit = mn_present ? 1 : 2;
+
+ if ((chiprev == file_chiprev) && ((1ULL << flagbit) & flags)) {
+ ha->file_prd_off = offset;
+ return 0;
+ }
+ }
+ return -1;
+}
+
+static int
+qla82xx_validate_firmware_blob(scsi_qla_host_t *vha, uint8_t fw_type)
+{
+ __le32 val;
+ uint32_t min_size;
+ struct qla_hw_data *ha = vha->hw;
+ const struct firmware *fw = ha->hablob->fw;
+
+ ha->fw_type = fw_type;
+
+ if (fw_type == QLA82XX_UNIFIED_ROMIMAGE) {
+ if (qla82xx_set_product_offset(ha))
+ return -EINVAL;
+
+ min_size = QLA82XX_URI_FW_MIN_SIZE;
+ } else {
+ val = cpu_to_le32(*(u32 *)&fw->data[QLA82XX_FW_MAGIC_OFFSET]);
+ if ((__force u32)val != QLA82XX_BDINFO_MAGIC)
+ return -EINVAL;
+
+ min_size = QLA82XX_FW_MIN_SIZE;
+ }
+
+ if (fw->size < min_size)
+ return -EINVAL;
+ return 0;
+}
+
+static int
+qla82xx_check_cmdpeg_state(struct qla_hw_data *ha)
+{
+ u32 val = 0;
+ int retries = 60;
+ scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev);
+
+ do {
+ read_lock(&ha->hw_lock);
+ val = qla82xx_rd_32(ha, CRB_CMDPEG_STATE);
+ read_unlock(&ha->hw_lock);
+
+ switch (val) {
+ case PHAN_INITIALIZE_COMPLETE:
+ case PHAN_INITIALIZE_ACK:
+ return QLA_SUCCESS;
+ case PHAN_INITIALIZE_FAILED:
+ break;
+ default:
+ break;
+ }
+ ql_log(ql_log_info, vha, 0x00a8,
+ "CRB_CMDPEG_STATE: 0x%x and retries:0x%x.\n",
+ val, retries);
+
+ msleep(500);
+
+ } while (--retries);
+
+ ql_log(ql_log_fatal, vha, 0x00a9,
+ "Cmd Peg initialization failed: 0x%x.\n", val);
+
+ val = qla82xx_rd_32(ha, QLA82XX_ROMUSB_GLB_PEGTUNE_DONE);
+ read_lock(&ha->hw_lock);
+ qla82xx_wr_32(ha, CRB_CMDPEG_STATE, PHAN_INITIALIZE_FAILED);
+ read_unlock(&ha->hw_lock);
+ return QLA_FUNCTION_FAILED;
+}
+
+static int
+qla82xx_check_rcvpeg_state(struct qla_hw_data *ha)
+{
+ u32 val = 0;
+ int retries = 60;
+ scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev);
+
+ do {
+ read_lock(&ha->hw_lock);
+ val = qla82xx_rd_32(ha, CRB_RCVPEG_STATE);
+ read_unlock(&ha->hw_lock);
+
+ switch (val) {
+ case PHAN_INITIALIZE_COMPLETE:
+ case PHAN_INITIALIZE_ACK:
+ return QLA_SUCCESS;
+ case PHAN_INITIALIZE_FAILED:
+ break;
+ default:
+ break;
+ }
+ ql_log(ql_log_info, vha, 0x00ab,
+ "CRB_RCVPEG_STATE: 0x%x and retries: 0x%x.\n",
+ val, retries);
+
+ msleep(500);
+
+ } while (--retries);
+
+ ql_log(ql_log_fatal, vha, 0x00ac,
+ "Rcv Peg initializatin failed: 0x%x.\n", val);
+ read_lock(&ha->hw_lock);
+ qla82xx_wr_32(ha, CRB_RCVPEG_STATE, PHAN_INITIALIZE_FAILED);
+ read_unlock(&ha->hw_lock);
+ return QLA_FUNCTION_FAILED;
+}
+
+/* ISR related functions */
+static struct qla82xx_legacy_intr_set legacy_intr[] = \
+ QLA82XX_LEGACY_INTR_CONFIG;
+
+/*
+ * qla82xx_mbx_completion() - Process mailbox command completions.
+ * @ha: SCSI driver HA context
+ * @mb0: Mailbox0 register
+ */
+void
+qla82xx_mbx_completion(scsi_qla_host_t *vha, uint16_t mb0)
+{
+ uint16_t cnt;
+ uint16_t __iomem *wptr;
+ struct qla_hw_data *ha = vha->hw;
+ struct device_reg_82xx __iomem *reg = &ha->iobase->isp82;
+ wptr = (uint16_t __iomem *)&reg->mailbox_out[1];
+
+ /* Load return mailbox registers. */
+ ha->flags.mbox_int = 1;
+ ha->mailbox_out[0] = mb0;
+
+ for (cnt = 1; cnt < ha->mbx_count; cnt++) {
+ ha->mailbox_out[cnt] = RD_REG_WORD(wptr);
+ wptr++;
+ }
+
+ if (!ha->mcp)
+ ql_dbg(ql_dbg_async, vha, 0x5053,
+ "MBX pointer ERROR.\n");
+}
+
+/*
+ * qla82xx_intr_handler() - Process interrupts for the ISP23xx and ISP63xx.
+ * @irq:
+ * @dev_id: SCSI driver HA context
+ * @regs:
+ *
+ * Called by system whenever the host adapter generates an interrupt.
+ *
+ * Returns handled flag.
+ */
+irqreturn_t
+qla82xx_intr_handler(int irq, void *dev_id)
+{
+ scsi_qla_host_t *vha;
+ struct qla_hw_data *ha;
+ struct rsp_que *rsp;
+ struct device_reg_82xx __iomem *reg;
+ int status = 0, status1 = 0;
+ unsigned long flags;
+ unsigned long iter;
+ uint32_t stat = 0;
+ uint16_t mb[4];
+
+ rsp = (struct rsp_que *) dev_id;
+ if (!rsp) {
+ ql_log(ql_log_info, NULL, 0xb053,
+ "%s: NULL response queue pointer.\n", __func__);
+ return IRQ_NONE;
+ }
+ ha = rsp->hw;
+
+ if (!ha->flags.msi_enabled) {
+ status = qla82xx_rd_32(ha, ISR_INT_VECTOR);
+ if (!(status & ha->nx_legacy_intr.int_vec_bit))
+ return IRQ_NONE;
+
+ status1 = qla82xx_rd_32(ha, ISR_INT_STATE_REG);
+ if (!ISR_IS_LEGACY_INTR_TRIGGERED(status1))
+ return IRQ_NONE;
+ }
+
+ /* clear the interrupt */
+ qla82xx_wr_32(ha, ha->nx_legacy_intr.tgt_status_reg, 0xffffffff);
+
+ /* read twice to ensure write is flushed */
+ qla82xx_rd_32(ha, ISR_INT_VECTOR);
+ qla82xx_rd_32(ha, ISR_INT_VECTOR);
+
+ reg = &ha->iobase->isp82;
+
+ spin_lock_irqsave(&ha->hardware_lock, flags);
+ vha = pci_get_drvdata(ha->pdev);
+ for (iter = 1; iter--; ) {
+
+ if (RD_REG_DWORD(&reg->host_int)) {
+ stat = RD_REG_DWORD(&reg->host_status);
+
+ switch (stat & 0xff) {
+ case 0x1:
+ case 0x2:
+ case 0x10:
+ case 0x11:
+ qla82xx_mbx_completion(vha, MSW(stat));
+ status |= MBX_INTERRUPT;
+ break;
+ case 0x12:
+ mb[0] = MSW(stat);
+ mb[1] = RD_REG_WORD(&reg->mailbox_out[1]);
+ mb[2] = RD_REG_WORD(&reg->mailbox_out[2]);
+ mb[3] = RD_REG_WORD(&reg->mailbox_out[3]);
+ qla2x00_async_event(vha, rsp, mb);
+ break;
+ case 0x13:
+ qla24xx_process_response_queue(vha, rsp);
+ break;
+ default:
+ ql_dbg(ql_dbg_async, vha, 0x5054,
+ "Unrecognized interrupt type (%d).\n",
+ stat & 0xff);
+ break;
+ }
+ }
+ WRT_REG_DWORD(&reg->host_int, 0);
+ }
+
+ qla2x00_handle_mbx_completion(ha, status);
+ spin_unlock_irqrestore(&ha->hardware_lock, flags);
+
+ if (!ha->flags.msi_enabled)
+ qla82xx_wr_32(ha, ha->nx_legacy_intr.tgt_mask_reg, 0xfbff);
+
+ return IRQ_HANDLED;
+}
+
+irqreturn_t
+qla82xx_msix_default(int irq, void *dev_id)
+{
+ scsi_qla_host_t *vha;
+ struct qla_hw_data *ha;
+ struct rsp_que *rsp;
+ struct device_reg_82xx __iomem *reg;
+ int status = 0;
+ unsigned long flags;
+ uint32_t stat = 0;
+ uint32_t host_int = 0;
+ uint16_t mb[4];
+
+ rsp = (struct rsp_que *) dev_id;
+ if (!rsp) {
+ printk(KERN_INFO
+ "%s(): NULL response queue pointer.\n", __func__);
+ return IRQ_NONE;
+ }
+ ha = rsp->hw;
+
+ reg = &ha->iobase->isp82;
+
+ spin_lock_irqsave(&ha->hardware_lock, flags);
+ vha = pci_get_drvdata(ha->pdev);
+ do {
+ host_int = RD_REG_DWORD(&reg->host_int);
+ if (qla2x00_check_reg32_for_disconnect(vha, host_int))
+ break;
+ if (host_int) {
+ stat = RD_REG_DWORD(&reg->host_status);
+
+ switch (stat & 0xff) {
+ case 0x1:
+ case 0x2:
+ case 0x10:
+ case 0x11:
+ qla82xx_mbx_completion(vha, MSW(stat));
+ status |= MBX_INTERRUPT;
+ break;
+ case 0x12:
+ mb[0] = MSW(stat);
+ mb[1] = RD_REG_WORD(&reg->mailbox_out[1]);
+ mb[2] = RD_REG_WORD(&reg->mailbox_out[2]);
+ mb[3] = RD_REG_WORD(&reg->mailbox_out[3]);
+ qla2x00_async_event(vha, rsp, mb);
+ break;
+ case 0x13:
+ qla24xx_process_response_queue(vha, rsp);
+ break;
+ default:
+ ql_dbg(ql_dbg_async, vha, 0x5041,
+ "Unrecognized interrupt type (%d).\n",
+ stat & 0xff);
+ break;
+ }
+ }
+ WRT_REG_DWORD(&reg->host_int, 0);
+ } while (0);
+
+ qla2x00_handle_mbx_completion(ha, status);
+ spin_unlock_irqrestore(&ha->hardware_lock, flags);
+
+ return IRQ_HANDLED;
+}
+
+irqreturn_t
+qla82xx_msix_rsp_q(int irq, void *dev_id)
+{
+ scsi_qla_host_t *vha;
+ struct qla_hw_data *ha;
+ struct rsp_que *rsp;
+ struct device_reg_82xx __iomem *reg;
+ unsigned long flags;
+ uint32_t host_int = 0;
+
+ rsp = (struct rsp_que *) dev_id;
+ if (!rsp) {
+ printk(KERN_INFO
+ "%s(): NULL response queue pointer.\n", __func__);
+ return IRQ_NONE;
+ }
+
+ ha = rsp->hw;
+ reg = &ha->iobase->isp82;
+ spin_lock_irqsave(&ha->hardware_lock, flags);
+ vha = pci_get_drvdata(ha->pdev);
+ host_int = RD_REG_DWORD(&reg->host_int);
+ if (qla2x00_check_reg32_for_disconnect(vha, host_int))
+ goto out;
+ qla24xx_process_response_queue(vha, rsp);
+ WRT_REG_DWORD(&reg->host_int, 0);
+out:
+ spin_unlock_irqrestore(&ha->hardware_lock, flags);
+ return IRQ_HANDLED;
+}
+
+void
+qla82xx_poll(int irq, void *dev_id)
+{
+ scsi_qla_host_t *vha;
+ struct qla_hw_data *ha;
+ struct rsp_que *rsp;
+ struct device_reg_82xx __iomem *reg;
+ int status = 0;
+ uint32_t stat;
+ uint32_t host_int = 0;
+ uint16_t mb[4];
+ unsigned long flags;
+
+ rsp = (struct rsp_que *) dev_id;
+ if (!rsp) {
+ printk(KERN_INFO
+ "%s(): NULL response queue pointer.\n", __func__);
+ return;
+ }
+ ha = rsp->hw;
+
+ reg = &ha->iobase->isp82;
+ spin_lock_irqsave(&ha->hardware_lock, flags);
+ vha = pci_get_drvdata(ha->pdev);
+
+ host_int = RD_REG_DWORD(&reg->host_int);
+ if (qla2x00_check_reg32_for_disconnect(vha, host_int))
+ goto out;
+ if (host_int) {
+ stat = RD_REG_DWORD(&reg->host_status);
+ switch (stat & 0xff) {
+ case 0x1:
+ case 0x2:
+ case 0x10:
+ case 0x11:
+ qla82xx_mbx_completion(vha, MSW(stat));
+ status |= MBX_INTERRUPT;
+ break;
+ case 0x12:
+ mb[0] = MSW(stat);
+ mb[1] = RD_REG_WORD(&reg->mailbox_out[1]);
+ mb[2] = RD_REG_WORD(&reg->mailbox_out[2]);
+ mb[3] = RD_REG_WORD(&reg->mailbox_out[3]);
+ qla2x00_async_event(vha, rsp, mb);
+ break;
+ case 0x13:
+ qla24xx_process_response_queue(vha, rsp);
+ break;
+ default:
+ ql_dbg(ql_dbg_p3p, vha, 0xb013,
+ "Unrecognized interrupt type (%d).\n",
+ stat * 0xff);
+ break;
+ }
+ WRT_REG_DWORD(&reg->host_int, 0);
+ }
+out:
+ spin_unlock_irqrestore(&ha->hardware_lock, flags);
+}
+
+void
+qla82xx_enable_intrs(struct qla_hw_data *ha)
+{
+ scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev);
+ qla82xx_mbx_intr_enable(vha);
+ spin_lock_irq(&ha->hardware_lock);
+ if (IS_QLA8044(ha))
+ qla8044_wr_reg(ha, LEG_INTR_MASK_OFFSET, 0);
+ else
+ qla82xx_wr_32(ha, ha->nx_legacy_intr.tgt_mask_reg, 0xfbff);
+ spin_unlock_irq(&ha->hardware_lock);
+ ha->interrupts_on = 1;
+}
+
+void
+qla82xx_disable_intrs(struct qla_hw_data *ha)
+{
+ scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev);
+ qla82xx_mbx_intr_disable(vha);
+ spin_lock_irq(&ha->hardware_lock);
+ if (IS_QLA8044(ha))
+ qla8044_wr_reg(ha, LEG_INTR_MASK_OFFSET, 1);
+ else
+ qla82xx_wr_32(ha, ha->nx_legacy_intr.tgt_mask_reg, 0x0400);
+ spin_unlock_irq(&ha->hardware_lock);
+ ha->interrupts_on = 0;
+}
+
+void qla82xx_init_flags(struct qla_hw_data *ha)
+{
+ struct qla82xx_legacy_intr_set *nx_legacy_intr;
+
+ /* ISP 8021 initializations */
+ rwlock_init(&ha->hw_lock);
+ ha->qdr_sn_window = -1;
+ ha->ddr_mn_window = -1;
+ ha->curr_window = 255;
+ ha->portnum = PCI_FUNC(ha->pdev->devfn);
+ nx_legacy_intr = &legacy_intr[ha->portnum];
+ ha->nx_legacy_intr.int_vec_bit = nx_legacy_intr->int_vec_bit;
+ ha->nx_legacy_intr.tgt_status_reg = nx_legacy_intr->tgt_status_reg;
+ ha->nx_legacy_intr.tgt_mask_reg = nx_legacy_intr->tgt_mask_reg;
+ ha->nx_legacy_intr.pci_int_reg = nx_legacy_intr->pci_int_reg;
+}
+
+inline void
+qla82xx_set_idc_version(scsi_qla_host_t *vha)
+{
+ int idc_ver;
+ uint32_t drv_active;
+ struct qla_hw_data *ha = vha->hw;
+
+ drv_active = qla82xx_rd_32(ha, QLA82XX_CRB_DRV_ACTIVE);
+ if (drv_active == (QLA82XX_DRV_ACTIVE << (ha->portnum * 4))) {
+ qla82xx_wr_32(ha, QLA82XX_CRB_DRV_IDC_VERSION,
+ QLA82XX_IDC_VERSION);
+ ql_log(ql_log_info, vha, 0xb082,
+ "IDC version updated to %d\n", QLA82XX_IDC_VERSION);
+ } else {
+ idc_ver = qla82xx_rd_32(ha, QLA82XX_CRB_DRV_IDC_VERSION);
+ if (idc_ver != QLA82XX_IDC_VERSION)
+ ql_log(ql_log_info, vha, 0xb083,
+ "qla2xxx driver IDC version %d is not compatible "
+ "with IDC version %d of the other drivers\n",
+ QLA82XX_IDC_VERSION, idc_ver);
+ }
+}
+
+inline void
+qla82xx_set_drv_active(scsi_qla_host_t *vha)
+{
+ uint32_t drv_active;
+ struct qla_hw_data *ha = vha->hw;
+
+ drv_active = qla82xx_rd_32(ha, QLA82XX_CRB_DRV_ACTIVE);
+
+ /* If reset value is all FF's, initialize DRV_ACTIVE */
+ if (drv_active == 0xffffffff) {
+ qla82xx_wr_32(ha, QLA82XX_CRB_DRV_ACTIVE,
+ QLA82XX_DRV_NOT_ACTIVE);
+ drv_active = qla82xx_rd_32(ha, QLA82XX_CRB_DRV_ACTIVE);
+ }
+ drv_active |= (QLA82XX_DRV_ACTIVE << (ha->portnum * 4));
+ qla82xx_wr_32(ha, QLA82XX_CRB_DRV_ACTIVE, drv_active);
+}
+
+inline void
+qla82xx_clear_drv_active(struct qla_hw_data *ha)
+{
+ uint32_t drv_active;
+
+ drv_active = qla82xx_rd_32(ha, QLA82XX_CRB_DRV_ACTIVE);
+ drv_active &= ~(QLA82XX_DRV_ACTIVE << (ha->portnum * 4));
+ qla82xx_wr_32(ha, QLA82XX_CRB_DRV_ACTIVE, drv_active);
+}
+
+static inline int
+qla82xx_need_reset(struct qla_hw_data *ha)
+{
+ uint32_t drv_state;
+ int rval;
+
+ if (ha->flags.nic_core_reset_owner)
+ return 1;
+ else {
+ drv_state = qla82xx_rd_32(ha, QLA82XX_CRB_DRV_STATE);
+ rval = drv_state & (QLA82XX_DRVST_RST_RDY << (ha->portnum * 4));
+ return rval;
+ }
+}
+
+static inline void
+qla82xx_set_rst_ready(struct qla_hw_data *ha)
+{
+ uint32_t drv_state;
+ scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev);
+
+ drv_state = qla82xx_rd_32(ha, QLA82XX_CRB_DRV_STATE);
+
+ /* If reset value is all FF's, initialize DRV_STATE */
+ if (drv_state == 0xffffffff) {
+ qla82xx_wr_32(ha, QLA82XX_CRB_DRV_STATE, QLA82XX_DRVST_NOT_RDY);
+ drv_state = qla82xx_rd_32(ha, QLA82XX_CRB_DRV_STATE);
+ }
+ drv_state |= (QLA82XX_DRVST_RST_RDY << (ha->portnum * 4));
+ ql_dbg(ql_dbg_init, vha, 0x00bb,
+ "drv_state = 0x%08x.\n", drv_state);
+ qla82xx_wr_32(ha, QLA82XX_CRB_DRV_STATE, drv_state);
+}
+
+static inline void
+qla82xx_clear_rst_ready(struct qla_hw_data *ha)
+{
+ uint32_t drv_state;
+
+ drv_state = qla82xx_rd_32(ha, QLA82XX_CRB_DRV_STATE);
+ drv_state &= ~(QLA82XX_DRVST_RST_RDY << (ha->portnum * 4));
+ qla82xx_wr_32(ha, QLA82XX_CRB_DRV_STATE, drv_state);
+}
+
+static inline void
+qla82xx_set_qsnt_ready(struct qla_hw_data *ha)
+{
+ uint32_t qsnt_state;
+
+ qsnt_state = qla82xx_rd_32(ha, QLA82XX_CRB_DRV_STATE);
+ qsnt_state |= (QLA82XX_DRVST_QSNT_RDY << (ha->portnum * 4));
+ qla82xx_wr_32(ha, QLA82XX_CRB_DRV_STATE, qsnt_state);
+}
+
+void
+qla82xx_clear_qsnt_ready(scsi_qla_host_t *vha)
+{
+ struct qla_hw_data *ha = vha->hw;
+ uint32_t qsnt_state;
+
+ qsnt_state = qla82xx_rd_32(ha, QLA82XX_CRB_DRV_STATE);
+ qsnt_state &= ~(QLA82XX_DRVST_QSNT_RDY << (ha->portnum * 4));
+ qla82xx_wr_32(ha, QLA82XX_CRB_DRV_STATE, qsnt_state);
+}
+
+static int
+qla82xx_load_fw(scsi_qla_host_t *vha)
+{
+ int rst;
+ struct fw_blob *blob;
+ struct qla_hw_data *ha = vha->hw;
+
+ if (qla82xx_pinit_from_rom(vha) != QLA_SUCCESS) {
+ ql_log(ql_log_fatal, vha, 0x009f,
+ "Error during CRB initialization.\n");
+ return QLA_FUNCTION_FAILED;
+ }
+ udelay(500);
+
+ /* Bring QM and CAMRAM out of reset */
+ rst = qla82xx_rd_32(ha, QLA82XX_ROMUSB_GLB_SW_RESET);
+ rst &= ~((1 << 28) | (1 << 24));
+ qla82xx_wr_32(ha, QLA82XX_ROMUSB_GLB_SW_RESET, rst);
+
+ /*
+ * FW Load priority:
+ * 1) Operational firmware residing in flash.
+ * 2) Firmware via request-firmware interface (.bin file).
+ */
+ if (ql2xfwloadbin == 2)
+ goto try_blob_fw;
+
+ ql_log(ql_log_info, vha, 0x00a0,
+ "Attempting to load firmware from flash.\n");
+
+ if (qla82xx_fw_load_from_flash(ha) == QLA_SUCCESS) {
+ ql_log(ql_log_info, vha, 0x00a1,
+ "Firmware loaded successfully from flash.\n");
+ return QLA_SUCCESS;
+ } else {
+ ql_log(ql_log_warn, vha, 0x0108,
+ "Firmware load from flash failed.\n");
+ }
+
+try_blob_fw:
+ ql_log(ql_log_info, vha, 0x00a2,
+ "Attempting to load firmware from blob.\n");
+
+ /* Load firmware blob. */
+ blob = ha->hablob = qla2x00_request_firmware(vha);
+ if (!blob) {
+ ql_log(ql_log_fatal, vha, 0x00a3,
+ "Firmware image not present.\n");
+ goto fw_load_failed;
+ }
+
+ /* Validating firmware blob */
+ if (qla82xx_validate_firmware_blob(vha,
+ QLA82XX_FLASH_ROMIMAGE)) {
+ /* Fallback to URI format */
+ if (qla82xx_validate_firmware_blob(vha,
+ QLA82XX_UNIFIED_ROMIMAGE)) {
+ ql_log(ql_log_fatal, vha, 0x00a4,
+ "No valid firmware image found.\n");
+ return QLA_FUNCTION_FAILED;
+ }
+ }
+
+ if (qla82xx_fw_load_from_blob(ha) == QLA_SUCCESS) {
+ ql_log(ql_log_info, vha, 0x00a5,
+ "Firmware loaded successfully from binary blob.\n");
+ return QLA_SUCCESS;
+ } else {
+ ql_log(ql_log_fatal, vha, 0x00a6,
+ "Firmware load failed for binary blob.\n");
+ blob->fw = NULL;
+ blob = NULL;
+ goto fw_load_failed;
+ }
+ return QLA_SUCCESS;
+
+fw_load_failed:
+ return QLA_FUNCTION_FAILED;
+}
+
+int
+qla82xx_start_firmware(scsi_qla_host_t *vha)
+{
+ uint16_t lnk;
+ struct qla_hw_data *ha = vha->hw;
+
+ /* scrub dma mask expansion register */
+ qla82xx_wr_32(ha, CRB_DMA_SHIFT, QLA82XX_DMA_SHIFT_VALUE);
+
+ /* Put both the PEG CMD and RCV PEG to default state
+ * of 0 before resetting the hardware
+ */
+ qla82xx_wr_32(ha, CRB_CMDPEG_STATE, 0);
+ qla82xx_wr_32(ha, CRB_RCVPEG_STATE, 0);
+
+ /* Overwrite stale initialization register values */
+ qla82xx_wr_32(ha, QLA82XX_PEG_HALT_STATUS1, 0);
+ qla82xx_wr_32(ha, QLA82XX_PEG_HALT_STATUS2, 0);
+
+ if (qla82xx_load_fw(vha) != QLA_SUCCESS) {
+ ql_log(ql_log_fatal, vha, 0x00a7,
+ "Error trying to start fw.\n");
+ return QLA_FUNCTION_FAILED;
+ }
+
+ /* Handshake with the card before we register the devices. */
+ if (qla82xx_check_cmdpeg_state(ha) != QLA_SUCCESS) {
+ ql_log(ql_log_fatal, vha, 0x00aa,
+ "Error during card handshake.\n");
+ return QLA_FUNCTION_FAILED;
+ }
+
+ /* Negotiated Link width */
+ pcie_capability_read_word(ha->pdev, PCI_EXP_LNKSTA, &lnk);
+ ha->link_width = (lnk >> 4) & 0x3f;
+
+ /* Synchronize with Receive peg */
+ return qla82xx_check_rcvpeg_state(ha);
+}
+
+static uint32_t *
+qla82xx_read_flash_data(scsi_qla_host_t *vha, uint32_t *dwptr, uint32_t faddr,
+ uint32_t length)
+{
+ uint32_t i;
+ uint32_t val;
+ struct qla_hw_data *ha = vha->hw;
+
+ /* Dword reads to flash. */
+ for (i = 0; i < length/4; i++, faddr += 4) {
+ if (qla82xx_rom_fast_read(ha, faddr, &val)) {
+ ql_log(ql_log_warn, vha, 0x0106,
+ "Do ROM fast read failed.\n");
+ goto done_read;
+ }
+ dwptr[i] = __constant_cpu_to_le32(val);
+ }
+done_read:
+ return dwptr;
+}
+
+static int
+qla82xx_unprotect_flash(struct qla_hw_data *ha)
+{
+ int ret;
+ uint32_t val;
+ scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev);
+
+ ret = ql82xx_rom_lock_d(ha);
+ if (ret < 0) {
+ ql_log(ql_log_warn, vha, 0xb014,
+ "ROM Lock failed.\n");
+ return ret;
+ }
+
+ ret = qla82xx_read_status_reg(ha, &val);
+ if (ret < 0)
+ goto done_unprotect;
+
+ val &= ~(BLOCK_PROTECT_BITS << 2);
+ ret = qla82xx_write_status_reg(ha, val);
+ if (ret < 0) {
+ val |= (BLOCK_PROTECT_BITS << 2);
+ qla82xx_write_status_reg(ha, val);
+ }
+
+ if (qla82xx_write_disable_flash(ha) != 0)
+ ql_log(ql_log_warn, vha, 0xb015,
+ "Write disable failed.\n");
+
+done_unprotect:
+ qla82xx_rom_unlock(ha);
+ return ret;
+}
+
+static int
+qla82xx_protect_flash(struct qla_hw_data *ha)
+{
+ int ret;
+ uint32_t val;
+ scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev);
+
+ ret = ql82xx_rom_lock_d(ha);
+ if (ret < 0) {
+ ql_log(ql_log_warn, vha, 0xb016,
+ "ROM Lock failed.\n");
+ return ret;
+ }
+
+ ret = qla82xx_read_status_reg(ha, &val);
+ if (ret < 0)
+ goto done_protect;
+
+ val |= (BLOCK_PROTECT_BITS << 2);
+ /* LOCK all sectors */
+ ret = qla82xx_write_status_reg(ha, val);
+ if (ret < 0)
+ ql_log(ql_log_warn, vha, 0xb017,
+ "Write status register failed.\n");
+
+ if (qla82xx_write_disable_flash(ha) != 0)
+ ql_log(ql_log_warn, vha, 0xb018,
+ "Write disable failed.\n");
+done_protect:
+ qla82xx_rom_unlock(ha);
+ return ret;
+}
+
+static int
+qla82xx_erase_sector(struct qla_hw_data *ha, int addr)
+{
+ int ret = 0;
+ scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev);
+
+ ret = ql82xx_rom_lock_d(ha);
+ if (ret < 0) {
+ ql_log(ql_log_warn, vha, 0xb019,
+ "ROM Lock failed.\n");
+ return ret;
+ }
+
+ qla82xx_flash_set_write_enable(ha);
+ qla82xx_wr_32(ha, QLA82XX_ROMUSB_ROM_ADDRESS, addr);
+ qla82xx_wr_32(ha, QLA82XX_ROMUSB_ROM_ABYTE_CNT, 3);
+ qla82xx_wr_32(ha, QLA82XX_ROMUSB_ROM_INSTR_OPCODE, M25P_INSTR_SE);
+
+ if (qla82xx_wait_rom_done(ha)) {
+ ql_log(ql_log_warn, vha, 0xb01a,
+ "Error waiting for rom done.\n");
+ ret = -1;
+ goto done;
+ }
+ ret = qla82xx_flash_wait_write_finish(ha);
+done:
+ qla82xx_rom_unlock(ha);
+ return ret;
+}
+
+/*
+ * Address and length are byte address
+ */
+uint8_t *
+qla82xx_read_optrom_data(struct scsi_qla_host *vha, uint8_t *buf,
+ uint32_t offset, uint32_t length)
+{
+ scsi_block_requests(vha->host);
+ qla82xx_read_flash_data(vha, (uint32_t *)buf, offset, length);
+ scsi_unblock_requests(vha->host);
+ return buf;
+}
+
+static int
+qla82xx_write_flash_data(struct scsi_qla_host *vha, uint32_t *dwptr,
+ uint32_t faddr, uint32_t dwords)
+{
+ int ret;
+ uint32_t liter;
+ uint32_t sec_mask, rest_addr;
+ dma_addr_t optrom_dma;
+ void *optrom = NULL;
+ int page_mode = 0;
+ struct qla_hw_data *ha = vha->hw;
+
+ ret = -1;
+
+ /* Prepare burst-capable write on supported ISPs. */
+ if (page_mode && !(faddr & 0xfff) &&
+ dwords > OPTROM_BURST_DWORDS) {
+ optrom = dma_alloc_coherent(&ha->pdev->dev, OPTROM_BURST_SIZE,
+ &optrom_dma, GFP_KERNEL);
+ if (!optrom) {
+ ql_log(ql_log_warn, vha, 0xb01b,
+ "Unable to allocate memory "
+ "for optrom burst write (%x KB).\n",
+ OPTROM_BURST_SIZE / 1024);
+ }
+ }
+
+ rest_addr = ha->fdt_block_size - 1;
+ sec_mask = ~rest_addr;
+
+ ret = qla82xx_unprotect_flash(ha);
+ if (ret) {
+ ql_log(ql_log_warn, vha, 0xb01c,
+ "Unable to unprotect flash for update.\n");
+ goto write_done;
+ }
+
+ for (liter = 0; liter < dwords; liter++, faddr += 4, dwptr++) {
+ /* Are we at the beginning of a sector? */
+ if ((faddr & rest_addr) == 0) {
+
+ ret = qla82xx_erase_sector(ha, faddr);
+ if (ret) {
+ ql_log(ql_log_warn, vha, 0xb01d,
+ "Unable to erase sector: address=%x.\n",
+ faddr);
+ break;
+ }
+ }
+
+ /* Go with burst-write. */
+ if (optrom && (liter + OPTROM_BURST_DWORDS) <= dwords) {
+ /* Copy data to DMA'ble buffer. */
+ memcpy(optrom, dwptr, OPTROM_BURST_SIZE);
+
+ ret = qla2x00_load_ram(vha, optrom_dma,
+ (ha->flash_data_off | faddr),
+ OPTROM_BURST_DWORDS);
+ if (ret != QLA_SUCCESS) {
+ ql_log(ql_log_warn, vha, 0xb01e,
+ "Unable to burst-write optrom segment "
+ "(%x/%x/%llx).\n", ret,
+ (ha->flash_data_off | faddr),
+ (unsigned long long)optrom_dma);
+ ql_log(ql_log_warn, vha, 0xb01f,
+ "Reverting to slow-write.\n");
+
+ dma_free_coherent(&ha->pdev->dev,
+ OPTROM_BURST_SIZE, optrom, optrom_dma);
+ optrom = NULL;
+ } else {
+ liter += OPTROM_BURST_DWORDS - 1;
+ faddr += OPTROM_BURST_DWORDS - 1;
+ dwptr += OPTROM_BURST_DWORDS - 1;
+ continue;
+ }
+ }
+
+ ret = qla82xx_write_flash_dword(ha, faddr,
+ cpu_to_le32(*dwptr));
+ if (ret) {
+ ql_dbg(ql_dbg_p3p, vha, 0xb020,
+ "Unable to program flash address=%x data=%x.\n",
+ faddr, *dwptr);
+ break;
+ }
+ }
+
+ ret = qla82xx_protect_flash(ha);
+ if (ret)
+ ql_log(ql_log_warn, vha, 0xb021,
+ "Unable to protect flash after update.\n");
+write_done:
+ if (optrom)
+ dma_free_coherent(&ha->pdev->dev,
+ OPTROM_BURST_SIZE, optrom, optrom_dma);
+ return ret;
+}
+
+int
+qla82xx_write_optrom_data(struct scsi_qla_host *vha, uint8_t *buf,
+ uint32_t offset, uint32_t length)
+{
+ int rval;
+
+ /* Suspend HBA. */
+ scsi_block_requests(vha->host);
+ rval = qla82xx_write_flash_data(vha, (uint32_t *)buf, offset,
+ length >> 2);
+ scsi_unblock_requests(vha->host);
+
+ /* Convert return ISP82xx to generic */
+ if (rval)
+ rval = QLA_FUNCTION_FAILED;
+ else
+ rval = QLA_SUCCESS;
+ return rval;
+}
+
+void
+qla82xx_start_iocbs(scsi_qla_host_t *vha)
+{
+ struct qla_hw_data *ha = vha->hw;
+ struct req_que *req = ha->req_q_map[0];
+ struct device_reg_82xx __iomem *reg;
+ uint32_t dbval;
+
+ /* Adjust ring index. */
+ req->ring_index++;
+ if (req->ring_index == req->length) {
+ req->ring_index = 0;
+ req->ring_ptr = req->ring;
+ } else
+ req->ring_ptr++;
+
+ reg = &ha->iobase->isp82;
+ dbval = 0x04 | (ha->portnum << 5);
+
+ dbval = dbval | (req->id << 8) | (req->ring_index << 16);
+ if (ql2xdbwr)
+ qla82xx_wr_32(ha, ha->nxdb_wr_ptr, dbval);
+ else {
+ WRT_REG_DWORD((unsigned long __iomem *)ha->nxdb_wr_ptr, dbval);
+ wmb();
+ while (RD_REG_DWORD((void __iomem *)ha->nxdb_rd_ptr) != dbval) {
+ WRT_REG_DWORD((unsigned long __iomem *)ha->nxdb_wr_ptr,
+ dbval);
+ wmb();
+ }
+ }
+}
+
+static void
+qla82xx_rom_lock_recovery(struct qla_hw_data *ha)
+{
+ scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev);
+ uint32_t lock_owner = 0;
+
+ if (qla82xx_rom_lock(ha)) {
+ lock_owner = qla82xx_rd_32(ha, QLA82XX_ROM_LOCK_ID);
+ /* Someone else is holding the lock. */
+ ql_log(ql_log_info, vha, 0xb022,
+ "Resetting rom_lock, Lock Owner %u.\n", lock_owner);
+ }
+ /*
+ * Either we got the lock, or someone
+ * else died while holding it.
+ * In either case, unlock.
+ */
+ qla82xx_rom_unlock(ha);
+}
+
+/*
+ * qla82xx_device_bootstrap
+ * Initialize device, set DEV_READY, start fw
+ *
+ * Note:
+ * IDC lock must be held upon entry
+ *
+ * Return:
+ * Success : 0
+ * Failed : 1
+ */
+static int
+qla82xx_device_bootstrap(scsi_qla_host_t *vha)
+{
+ int rval = QLA_SUCCESS;
+ int i;
+ uint32_t old_count, count;
+ struct qla_hw_data *ha = vha->hw;
+ int need_reset = 0;
+
+ need_reset = qla82xx_need_reset(ha);
+
+ if (need_reset) {
+ /* We are trying to perform a recovery here. */
+ if (ha->flags.isp82xx_fw_hung)
+ qla82xx_rom_lock_recovery(ha);
+ } else {
+ old_count = qla82xx_rd_32(ha, QLA82XX_PEG_ALIVE_COUNTER);
+ for (i = 0; i < 10; i++) {
+ msleep(200);
+ count = qla82xx_rd_32(ha, QLA82XX_PEG_ALIVE_COUNTER);
+ if (count != old_count) {
+ rval = QLA_SUCCESS;
+ goto dev_ready;
+ }
+ }
+ qla82xx_rom_lock_recovery(ha);
+ }
+
+ /* set to DEV_INITIALIZING */
+ ql_log(ql_log_info, vha, 0x009e,
+ "HW State: INITIALIZING.\n");
+ qla82xx_wr_32(ha, QLA82XX_CRB_DEV_STATE, QLA8XXX_DEV_INITIALIZING);
+
+ qla82xx_idc_unlock(ha);
+ rval = qla82xx_start_firmware(vha);
+ qla82xx_idc_lock(ha);
+
+ if (rval != QLA_SUCCESS) {
+ ql_log(ql_log_fatal, vha, 0x00ad,
+ "HW State: FAILED.\n");
+ qla82xx_clear_drv_active(ha);
+ qla82xx_wr_32(ha, QLA82XX_CRB_DEV_STATE, QLA8XXX_DEV_FAILED);
+ return rval;
+ }
+
+dev_ready:
+ ql_log(ql_log_info, vha, 0x00ae,
+ "HW State: READY.\n");
+ qla82xx_wr_32(ha, QLA82XX_CRB_DEV_STATE, QLA8XXX_DEV_READY);
+
+ return QLA_SUCCESS;
+}
+
+/*
+* qla82xx_need_qsnt_handler
+* Code to start quiescence sequence
+*
+* Note:
+* IDC lock must be held upon entry
+*
+* Return: void
+*/
+
+static void
+qla82xx_need_qsnt_handler(scsi_qla_host_t *vha)
+{
+ struct qla_hw_data *ha = vha->hw;
+ uint32_t dev_state, drv_state, drv_active;
+ unsigned long reset_timeout;
+
+ if (vha->flags.online) {
+ /*Block any further I/O and wait for pending cmnds to complete*/
+ qla2x00_quiesce_io(vha);
+ }
+
+ /* Set the quiescence ready bit */
+ qla82xx_set_qsnt_ready(ha);
+
+ /*wait for 30 secs for other functions to ack */
+ reset_timeout = jiffies + (30 * HZ);
+
+ drv_state = qla82xx_rd_32(ha, QLA82XX_CRB_DRV_STATE);
+ drv_active = qla82xx_rd_32(ha, QLA82XX_CRB_DRV_ACTIVE);
+ /* Its 2 that is written when qsnt is acked, moving one bit */
+ drv_active = drv_active << 0x01;
+
+ while (drv_state != drv_active) {
+
+ if (time_after_eq(jiffies, reset_timeout)) {
+ /* quiescence timeout, other functions didn't ack
+ * changing the state to DEV_READY
+ */
+ ql_log(ql_log_info, vha, 0xb023,
+ "%s : QUIESCENT TIMEOUT DRV_ACTIVE:%d "
+ "DRV_STATE:%d.\n", QLA2XXX_DRIVER_NAME,
+ drv_active, drv_state);
+ qla82xx_wr_32(ha, QLA82XX_CRB_DEV_STATE,
+ QLA8XXX_DEV_READY);
+ ql_log(ql_log_info, vha, 0xb025,
+ "HW State: DEV_READY.\n");
+ qla82xx_idc_unlock(ha);
+ qla2x00_perform_loop_resync(vha);
+ qla82xx_idc_lock(ha);
+
+ qla82xx_clear_qsnt_ready(vha);
+ return;
+ }
+
+ qla82xx_idc_unlock(ha);
+ msleep(1000);
+ qla82xx_idc_lock(ha);
+
+ drv_state = qla82xx_rd_32(ha, QLA82XX_CRB_DRV_STATE);
+ drv_active = qla82xx_rd_32(ha, QLA82XX_CRB_DRV_ACTIVE);
+ drv_active = drv_active << 0x01;
+ }
+ dev_state = qla82xx_rd_32(ha, QLA82XX_CRB_DEV_STATE);
+ /* everyone acked so set the state to DEV_QUIESCENCE */
+ if (dev_state == QLA8XXX_DEV_NEED_QUIESCENT) {
+ ql_log(ql_log_info, vha, 0xb026,
+ "HW State: DEV_QUIESCENT.\n");
+ qla82xx_wr_32(ha, QLA82XX_CRB_DEV_STATE, QLA8XXX_DEV_QUIESCENT);
+ }
+}
+
+/*
+* qla82xx_wait_for_state_change
+* Wait for device state to change from given current state
+*
+* Note:
+* IDC lock must not be held upon entry
+*
+* Return:
+* Changed device state.
+*/
+uint32_t
+qla82xx_wait_for_state_change(scsi_qla_host_t *vha, uint32_t curr_state)
+{
+ struct qla_hw_data *ha = vha->hw;
+ uint32_t dev_state;
+
+ do {
+ msleep(1000);
+ qla82xx_idc_lock(ha);
+ dev_state = qla82xx_rd_32(ha, QLA82XX_CRB_DEV_STATE);
+ qla82xx_idc_unlock(ha);
+ } while (dev_state == curr_state);
+
+ return dev_state;
+}
+
+void
+qla8xxx_dev_failed_handler(scsi_qla_host_t *vha)
+{
+ struct qla_hw_data *ha = vha->hw;
+
+ /* Disable the board */
+ ql_log(ql_log_fatal, vha, 0x00b8,
+ "Disabling the board.\n");
+
+ if (IS_QLA82XX(ha)) {
+ qla82xx_clear_drv_active(ha);
+ qla82xx_idc_unlock(ha);
+ } else if (IS_QLA8044(ha)) {
+ qla8044_clear_drv_active(ha);
+ qla8044_idc_unlock(ha);
+ }
+
+ /* Set DEV_FAILED flag to disable timer */
+ vha->device_flags |= DFLG_DEV_FAILED;
+ qla2x00_abort_all_cmds(vha, DID_NO_CONNECT << 16);
+ qla2x00_mark_all_devices_lost(vha, 0);
+ vha->flags.online = 0;
+ vha->flags.init_done = 0;
+}
+
+/*
+ * qla82xx_need_reset_handler
+ * Code to start reset sequence
+ *
+ * Note:
+ * IDC lock must be held upon entry
+ *
+ * Return:
+ * Success : 0
+ * Failed : 1
+ */
+static void
+qla82xx_need_reset_handler(scsi_qla_host_t *vha)
+{
+ uint32_t dev_state, drv_state, drv_active;
+ uint32_t active_mask = 0;
+ unsigned long reset_timeout;
+ struct qla_hw_data *ha = vha->hw;
+ struct req_que *req = ha->req_q_map[0];
+
+ if (vha->flags.online) {
+ qla82xx_idc_unlock(ha);
+ qla2x00_abort_isp_cleanup(vha);
+ ha->isp_ops->get_flash_version(vha, req->ring);
+ ha->isp_ops->nvram_config(vha);
+ qla82xx_idc_lock(ha);
+ }
+
+ drv_active = qla82xx_rd_32(ha, QLA82XX_CRB_DRV_ACTIVE);
+ if (!ha->flags.nic_core_reset_owner) {
+ ql_dbg(ql_dbg_p3p, vha, 0xb028,
+ "reset_acknowledged by 0x%x\n", ha->portnum);
+ qla82xx_set_rst_ready(ha);
+ } else {
+ active_mask = ~(QLA82XX_DRV_ACTIVE << (ha->portnum * 4));
+ drv_active &= active_mask;
+ ql_dbg(ql_dbg_p3p, vha, 0xb029,
+ "active_mask: 0x%08x\n", active_mask);
+ }
+
+ /* wait for 10 seconds for reset ack from all functions */
+ reset_timeout = jiffies + (ha->fcoe_reset_timeout * HZ);
+
+ drv_state = qla82xx_rd_32(ha, QLA82XX_CRB_DRV_STATE);
+ drv_active = qla82xx_rd_32(ha, QLA82XX_CRB_DRV_ACTIVE);
+ dev_state = qla82xx_rd_32(ha, QLA82XX_CRB_DEV_STATE);
+
+ ql_dbg(ql_dbg_p3p, vha, 0xb02a,
+ "drv_state: 0x%08x, drv_active: 0x%08x, "
+ "dev_state: 0x%08x, active_mask: 0x%08x\n",
+ drv_state, drv_active, dev_state, active_mask);
+
+ while (drv_state != drv_active &&
+ dev_state != QLA8XXX_DEV_INITIALIZING) {
+ if (time_after_eq(jiffies, reset_timeout)) {
+ ql_log(ql_log_warn, vha, 0x00b5,
+ "Reset timeout.\n");
+ break;
+ }
+ qla82xx_idc_unlock(ha);
+ msleep(1000);
+ qla82xx_idc_lock(ha);
+ drv_state = qla82xx_rd_32(ha, QLA82XX_CRB_DRV_STATE);
+ drv_active = qla82xx_rd_32(ha, QLA82XX_CRB_DRV_ACTIVE);
+ if (ha->flags.nic_core_reset_owner)
+ drv_active &= active_mask;
+ dev_state = qla82xx_rd_32(ha, QLA82XX_CRB_DEV_STATE);
+ }
+
+ ql_dbg(ql_dbg_p3p, vha, 0xb02b,
+ "drv_state: 0x%08x, drv_active: 0x%08x, "
+ "dev_state: 0x%08x, active_mask: 0x%08x\n",
+ drv_state, drv_active, dev_state, active_mask);
+
+ ql_log(ql_log_info, vha, 0x00b6,
+ "Device state is 0x%x = %s.\n",
+ dev_state,
+ dev_state < MAX_STATES ? qdev_state(dev_state) : "Unknown");
+
+ /* Force to DEV_COLD unless someone else is starting a reset */
+ if (dev_state != QLA8XXX_DEV_INITIALIZING &&
+ dev_state != QLA8XXX_DEV_COLD) {
+ ql_log(ql_log_info, vha, 0x00b7,
+ "HW State: COLD/RE-INIT.\n");
+ qla82xx_wr_32(ha, QLA82XX_CRB_DEV_STATE, QLA8XXX_DEV_COLD);
+ qla82xx_set_rst_ready(ha);
+ if (ql2xmdenable) {
+ if (qla82xx_md_collect(vha))
+ ql_log(ql_log_warn, vha, 0xb02c,
+ "Minidump not collected.\n");
+ } else
+ ql_log(ql_log_warn, vha, 0xb04f,
+ "Minidump disabled.\n");
+ }
+}
+
+int
+qla82xx_check_md_needed(scsi_qla_host_t *vha)
+{
+ struct qla_hw_data *ha = vha->hw;
+ uint16_t fw_major_version, fw_minor_version, fw_subminor_version;
+ int rval = QLA_SUCCESS;
+
+ fw_major_version = ha->fw_major_version;
+ fw_minor_version = ha->fw_minor_version;
+ fw_subminor_version = ha->fw_subminor_version;
+
+ rval = qla2x00_get_fw_version(vha);
+ if (rval != QLA_SUCCESS)
+ return rval;
+
+ if (ql2xmdenable) {
+ if (!ha->fw_dumped) {
+ if ((fw_major_version != ha->fw_major_version ||
+ fw_minor_version != ha->fw_minor_version ||
+ fw_subminor_version != ha->fw_subminor_version) ||
+ (ha->prev_minidump_failed)) {
+ ql_dbg(ql_dbg_p3p, vha, 0xb02d,
+ "Firmware version differs Previous version: %d:%d:%d - New version: %d:%d:%d, prev_minidump_failed: %d.\n",
+ fw_major_version, fw_minor_version,
+ fw_subminor_version,
+ ha->fw_major_version,
+ ha->fw_minor_version,
+ ha->fw_subminor_version,
+ ha->prev_minidump_failed);
+ /* Release MiniDump resources */
+ qla82xx_md_free(vha);
+ /* ALlocate MiniDump resources */
+ qla82xx_md_prep(vha);
+ }
+ } else
+ ql_log(ql_log_info, vha, 0xb02e,
+ "Firmware dump available to retrieve\n");
+ }
+ return rval;
+}
+
+
+static int
+qla82xx_check_fw_alive(scsi_qla_host_t *vha)
+{
+ uint32_t fw_heartbeat_counter;
+ int status = 0;
+
+ fw_heartbeat_counter = qla82xx_rd_32(vha->hw,
+ QLA82XX_PEG_ALIVE_COUNTER);
+ /* all 0xff, assume AER/EEH in progress, ignore */
+ if (fw_heartbeat_counter == 0xffffffff) {
+ ql_dbg(ql_dbg_timer, vha, 0x6003,
+ "FW heartbeat counter is 0xffffffff, "
+ "returning status=%d.\n", status);
+ return status;
+ }
+ if (vha->fw_heartbeat_counter == fw_heartbeat_counter) {
+ vha->seconds_since_last_heartbeat++;
+ /* FW not alive after 2 seconds */
+ if (vha->seconds_since_last_heartbeat == 2) {
+ vha->seconds_since_last_heartbeat = 0;
+ status = 1;
+ }
+ } else
+ vha->seconds_since_last_heartbeat = 0;
+ vha->fw_heartbeat_counter = fw_heartbeat_counter;
+ if (status)
+ ql_dbg(ql_dbg_timer, vha, 0x6004,
+ "Returning status=%d.\n", status);
+ return status;
+}
+
+/*
+ * qla82xx_device_state_handler
+ * Main state handler
+ *
+ * Note:
+ * IDC lock must be held upon entry
+ *
+ * Return:
+ * Success : 0
+ * Failed : 1
+ */
+int
+qla82xx_device_state_handler(scsi_qla_host_t *vha)
+{
+ uint32_t dev_state;
+ uint32_t old_dev_state;
+ int rval = QLA_SUCCESS;
+ unsigned long dev_init_timeout;
+ struct qla_hw_data *ha = vha->hw;
+ int loopcount = 0;
+
+ qla82xx_idc_lock(ha);
+ if (!vha->flags.init_done) {
+ qla82xx_set_drv_active(vha);
+ qla82xx_set_idc_version(vha);
+ }
+
+ dev_state = qla82xx_rd_32(ha, QLA82XX_CRB_DEV_STATE);
+ old_dev_state = dev_state;
+ ql_log(ql_log_info, vha, 0x009b,
+ "Device state is 0x%x = %s.\n",
+ dev_state,
+ dev_state < MAX_STATES ? qdev_state(dev_state) : "Unknown");
+
+ /* wait for 30 seconds for device to go ready */
+ dev_init_timeout = jiffies + (ha->fcoe_dev_init_timeout * HZ);
+
+ while (1) {
+
+ if (time_after_eq(jiffies, dev_init_timeout)) {
+ ql_log(ql_log_fatal, vha, 0x009c,
+ "Device init failed.\n");
+ rval = QLA_FUNCTION_FAILED;
+ break;
+ }
+ dev_state = qla82xx_rd_32(ha, QLA82XX_CRB_DEV_STATE);
+ if (old_dev_state != dev_state) {
+ loopcount = 0;
+ old_dev_state = dev_state;
+ }
+ if (loopcount < 5) {
+ ql_log(ql_log_info, vha, 0x009d,
+ "Device state is 0x%x = %s.\n",
+ dev_state,
+ dev_state < MAX_STATES ? qdev_state(dev_state) :
+ "Unknown");
+ }
+
+ switch (dev_state) {
+ case QLA8XXX_DEV_READY:
+ ha->flags.nic_core_reset_owner = 0;
+ goto rel_lock;
+ case QLA8XXX_DEV_COLD:
+ rval = qla82xx_device_bootstrap(vha);
+ break;
+ case QLA8XXX_DEV_INITIALIZING:
+ qla82xx_idc_unlock(ha);
+ msleep(1000);
+ qla82xx_idc_lock(ha);
+ break;
+ case QLA8XXX_DEV_NEED_RESET:
+ if (!ql2xdontresethba)
+ qla82xx_need_reset_handler(vha);
+ else {
+ qla82xx_idc_unlock(ha);
+ msleep(1000);
+ qla82xx_idc_lock(ha);
+ }
+ dev_init_timeout = jiffies +
+ (ha->fcoe_dev_init_timeout * HZ);
+ break;
+ case QLA8XXX_DEV_NEED_QUIESCENT:
+ qla82xx_need_qsnt_handler(vha);
+ /* Reset timeout value after quiescence handler */
+ dev_init_timeout = jiffies + (ha->fcoe_dev_init_timeout\
+ * HZ);
+ break;
+ case QLA8XXX_DEV_QUIESCENT:
+ /* Owner will exit and other will wait for the state
+ * to get changed
+ */
+ if (ha->flags.quiesce_owner)
+ goto rel_lock;
+
+ qla82xx_idc_unlock(ha);
+ msleep(1000);
+ qla82xx_idc_lock(ha);
+
+ /* Reset timeout value after quiescence handler */
+ dev_init_timeout = jiffies + (ha->fcoe_dev_init_timeout\
+ * HZ);
+ break;
+ case QLA8XXX_DEV_FAILED:
+ qla8xxx_dev_failed_handler(vha);
+ rval = QLA_FUNCTION_FAILED;
+ goto exit;
+ default:
+ qla82xx_idc_unlock(ha);
+ msleep(1000);
+ qla82xx_idc_lock(ha);
+ }
+ loopcount++;
+ }
+rel_lock:
+ qla82xx_idc_unlock(ha);
+exit:
+ return rval;
+}
+
+static int qla82xx_check_temp(scsi_qla_host_t *vha)
+{
+ uint32_t temp, temp_state, temp_val;
+ struct qla_hw_data *ha = vha->hw;
+
+ temp = qla82xx_rd_32(ha, CRB_TEMP_STATE);
+ temp_state = qla82xx_get_temp_state(temp);
+ temp_val = qla82xx_get_temp_val(temp);
+
+ if (temp_state == QLA82XX_TEMP_PANIC) {
+ ql_log(ql_log_warn, vha, 0x600e,
+ "Device temperature %d degrees C exceeds "
+ " maximum allowed. Hardware has been shut down.\n",
+ temp_val);
+ return 1;
+ } else if (temp_state == QLA82XX_TEMP_WARN) {
+ ql_log(ql_log_warn, vha, 0x600f,
+ "Device temperature %d degrees C exceeds "
+ "operating range. Immediate action needed.\n",
+ temp_val);
+ }
+ return 0;
+}
+
+int qla82xx_read_temperature(scsi_qla_host_t *vha)
+{
+ uint32_t temp;
+
+ temp = qla82xx_rd_32(vha->hw, CRB_TEMP_STATE);
+ return qla82xx_get_temp_val(temp);
+}
+
+void qla82xx_clear_pending_mbx(scsi_qla_host_t *vha)
+{
+ struct qla_hw_data *ha = vha->hw;
+
+ if (ha->flags.mbox_busy) {
+ ha->flags.mbox_int = 1;
+ ha->flags.mbox_busy = 0;
+ ql_log(ql_log_warn, vha, 0x6010,
+ "Doing premature completion of mbx command.\n");
+ if (test_and_clear_bit(MBX_INTR_WAIT, &ha->mbx_cmd_flags))
+ complete(&ha->mbx_intr_comp);
+ }
+}
+
+void qla82xx_watchdog(scsi_qla_host_t *vha)
+{
+ uint32_t dev_state, halt_status;
+ struct qla_hw_data *ha = vha->hw;
+
+ /* don't poll if reset is going on */
+ if (!ha->flags.nic_core_reset_hdlr_active) {
+ dev_state = qla82xx_rd_32(ha, QLA82XX_CRB_DEV_STATE);
+ if (qla82xx_check_temp(vha)) {
+ set_bit(ISP_UNRECOVERABLE, &vha->dpc_flags);
+ ha->flags.isp82xx_fw_hung = 1;
+ qla82xx_clear_pending_mbx(vha);
+ } else if (dev_state == QLA8XXX_DEV_NEED_RESET &&
+ !test_bit(ISP_ABORT_NEEDED, &vha->dpc_flags)) {
+ ql_log(ql_log_warn, vha, 0x6001,
+ "Adapter reset needed.\n");
+ set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
+ } else if (dev_state == QLA8XXX_DEV_NEED_QUIESCENT &&
+ !test_bit(ISP_QUIESCE_NEEDED, &vha->dpc_flags)) {
+ ql_log(ql_log_warn, vha, 0x6002,
+ "Quiescent needed.\n");
+ set_bit(ISP_QUIESCE_NEEDED, &vha->dpc_flags);
+ } else if (dev_state == QLA8XXX_DEV_FAILED &&
+ !test_bit(ISP_UNRECOVERABLE, &vha->dpc_flags) &&
+ vha->flags.online == 1) {
+ ql_log(ql_log_warn, vha, 0xb055,
+ "Adapter state is failed. Offlining.\n");
+ set_bit(ISP_UNRECOVERABLE, &vha->dpc_flags);
+ ha->flags.isp82xx_fw_hung = 1;
+ qla82xx_clear_pending_mbx(vha);
+ } else {
+ if (qla82xx_check_fw_alive(vha)) {
+ ql_dbg(ql_dbg_timer, vha, 0x6011,
+ "disabling pause transmit on port 0 & 1.\n");
+ qla82xx_wr_32(ha, QLA82XX_CRB_NIU + 0x98,
+ CRB_NIU_XG_PAUSE_CTL_P0|CRB_NIU_XG_PAUSE_CTL_P1);
+ halt_status = qla82xx_rd_32(ha,
+ QLA82XX_PEG_HALT_STATUS1);
+ ql_log(ql_log_info, vha, 0x6005,
+ "dumping hw/fw registers:.\n "
+ " PEG_HALT_STATUS1: 0x%x, PEG_HALT_STATUS2: 0x%x,.\n "
+ " PEG_NET_0_PC: 0x%x, PEG_NET_1_PC: 0x%x,.\n "
+ " PEG_NET_2_PC: 0x%x, PEG_NET_3_PC: 0x%x,.\n "
+ " PEG_NET_4_PC: 0x%x.\n", halt_status,
+ qla82xx_rd_32(ha, QLA82XX_PEG_HALT_STATUS2),
+ qla82xx_rd_32(ha,
+ QLA82XX_CRB_PEG_NET_0 + 0x3c),
+ qla82xx_rd_32(ha,
+ QLA82XX_CRB_PEG_NET_1 + 0x3c),
+ qla82xx_rd_32(ha,
+ QLA82XX_CRB_PEG_NET_2 + 0x3c),
+ qla82xx_rd_32(ha,
+ QLA82XX_CRB_PEG_NET_3 + 0x3c),
+ qla82xx_rd_32(ha,
+ QLA82XX_CRB_PEG_NET_4 + 0x3c));
+ if (((halt_status & 0x1fffff00) >> 8) == 0x67)
+ ql_log(ql_log_warn, vha, 0xb052,
+ "Firmware aborted with "
+ "error code 0x00006700. Device is "
+ "being reset.\n");
+ if (halt_status & HALT_STATUS_UNRECOVERABLE) {
+ set_bit(ISP_UNRECOVERABLE,
+ &vha->dpc_flags);
+ } else {
+ ql_log(ql_log_info, vha, 0x6006,
+ "Detect abort needed.\n");
+ set_bit(ISP_ABORT_NEEDED,
+ &vha->dpc_flags);
+ }
+ ha->flags.isp82xx_fw_hung = 1;
+ ql_log(ql_log_warn, vha, 0x6007, "Firmware hung.\n");
+ qla82xx_clear_pending_mbx(vha);
+ }
+ }
+ }
+}
+
+int qla82xx_load_risc(scsi_qla_host_t *vha, uint32_t *srisc_addr)
+{
+ int rval = -1;
+ struct qla_hw_data *ha = vha->hw;
+
+ if (IS_QLA82XX(ha))
+ rval = qla82xx_device_state_handler(vha);
+ else if (IS_QLA8044(ha)) {
+ qla8044_idc_lock(ha);
+ /* Decide the reset ownership */
+ qla83xx_reset_ownership(vha);
+ qla8044_idc_unlock(ha);
+ rval = qla8044_device_state_handler(vha);
+ }
+ return rval;
+}
+
+void
+qla82xx_set_reset_owner(scsi_qla_host_t *vha)
+{
+ struct qla_hw_data *ha = vha->hw;
+ uint32_t dev_state = 0;
+
+ if (IS_QLA82XX(ha))
+ dev_state = qla82xx_rd_32(ha, QLA82XX_CRB_DEV_STATE);
+ else if (IS_QLA8044(ha))
+ dev_state = qla8044_rd_direct(vha, QLA8044_CRB_DEV_STATE_INDEX);
+
+ if (dev_state == QLA8XXX_DEV_READY) {
+ ql_log(ql_log_info, vha, 0xb02f,
+ "HW State: NEED RESET\n");
+ if (IS_QLA82XX(ha)) {
+ qla82xx_wr_32(ha, QLA82XX_CRB_DEV_STATE,
+ QLA8XXX_DEV_NEED_RESET);
+ ha->flags.nic_core_reset_owner = 1;
+ ql_dbg(ql_dbg_p3p, vha, 0xb030,
+ "reset_owner is 0x%x\n", ha->portnum);
+ } else if (IS_QLA8044(ha))
+ qla8044_wr_direct(vha, QLA8044_CRB_DEV_STATE_INDEX,
+ QLA8XXX_DEV_NEED_RESET);
+ } else
+ ql_log(ql_log_info, vha, 0xb031,
+ "Device state is 0x%x = %s.\n",
+ dev_state,
+ dev_state < MAX_STATES ? qdev_state(dev_state) : "Unknown");
+}
+
+/*
+ * qla82xx_abort_isp
+ * Resets ISP and aborts all outstanding commands.
+ *
+ * Input:
+ * ha = adapter block pointer.
+ *
+ * Returns:
+ * 0 = success
+ */
+int
+qla82xx_abort_isp(scsi_qla_host_t *vha)
+{
+ int rval = -1;
+ struct qla_hw_data *ha = vha->hw;
+
+ if (vha->device_flags & DFLG_DEV_FAILED) {
+ ql_log(ql_log_warn, vha, 0x8024,
+ "Device in failed state, exiting.\n");
+ return QLA_SUCCESS;
+ }
+ ha->flags.nic_core_reset_hdlr_active = 1;
+
+ qla82xx_idc_lock(ha);
+ qla82xx_set_reset_owner(vha);
+ qla82xx_idc_unlock(ha);
+
+ if (IS_QLA82XX(ha))
+ rval = qla82xx_device_state_handler(vha);
+ else if (IS_QLA8044(ha)) {
+ qla8044_idc_lock(ha);
+ /* Decide the reset ownership */
+ qla83xx_reset_ownership(vha);
+ qla8044_idc_unlock(ha);
+ rval = qla8044_device_state_handler(vha);
+ }
+
+ qla82xx_idc_lock(ha);
+ qla82xx_clear_rst_ready(ha);
+ qla82xx_idc_unlock(ha);
+
+ if (rval == QLA_SUCCESS) {
+ ha->flags.isp82xx_fw_hung = 0;
+ ha->flags.nic_core_reset_hdlr_active = 0;
+ qla82xx_restart_isp(vha);
+ }
+
+ if (rval) {
+ vha->flags.online = 1;
+ if (test_bit(ISP_ABORT_RETRY, &vha->dpc_flags)) {
+ if (ha->isp_abort_cnt == 0) {
+ ql_log(ql_log_warn, vha, 0x8027,
+ "ISP error recover failed - board "
+ "disabled.\n");
+ /*
+ * The next call disables the board
+ * completely.
+ */
+ ha->isp_ops->reset_adapter(vha);
+ vha->flags.online = 0;
+ clear_bit(ISP_ABORT_RETRY,
+ &vha->dpc_flags);
+ rval = QLA_SUCCESS;
+ } else { /* schedule another ISP abort */
+ ha->isp_abort_cnt--;
+ ql_log(ql_log_warn, vha, 0x8036,
+ "ISP abort - retry remaining %d.\n",
+ ha->isp_abort_cnt);
+ rval = QLA_FUNCTION_FAILED;
+ }
+ } else {
+ ha->isp_abort_cnt = MAX_RETRIES_OF_ISP_ABORT;
+ ql_dbg(ql_dbg_taskm, vha, 0x8029,
+ "ISP error recovery - retrying (%d) more times.\n",
+ ha->isp_abort_cnt);
+ set_bit(ISP_ABORT_RETRY, &vha->dpc_flags);
+ rval = QLA_FUNCTION_FAILED;
+ }
+ }
+ return rval;
+}
+
+/*
+ * qla82xx_fcoe_ctx_reset
+ * Perform a quick reset and aborts all outstanding commands.
+ * This will only perform an FCoE context reset and avoids a full blown
+ * chip reset.
+ *
+ * Input:
+ * ha = adapter block pointer.
+ * is_reset_path = flag for identifying the reset path.
+ *
+ * Returns:
+ * 0 = success
+ */
+int qla82xx_fcoe_ctx_reset(scsi_qla_host_t *vha)
+{
+ int rval = QLA_FUNCTION_FAILED;
+
+ if (vha->flags.online) {
+ /* Abort all outstanding commands, so as to be requeued later */
+ qla2x00_abort_isp_cleanup(vha);
+ }
+
+ /* Stop currently executing firmware.
+ * This will destroy existing FCoE context at the F/W end.
+ */
+ qla2x00_try_to_stop_firmware(vha);
+
+ /* Restart. Creates a new FCoE context on INIT_FIRMWARE. */
+ rval = qla82xx_restart_isp(vha);
+
+ return rval;
+}
+
+/*
+ * qla2x00_wait_for_fcoe_ctx_reset
+ * Wait till the FCoE context is reset.
+ *
+ * Note:
+ * Does context switching here.
+ * Release SPIN_LOCK (if any) before calling this routine.
+ *
+ * Return:
+ * Success (fcoe_ctx reset is done) : 0
+ * Failed (fcoe_ctx reset not completed within max loop timout ) : 1
+ */
+int qla2x00_wait_for_fcoe_ctx_reset(scsi_qla_host_t *vha)
+{
+ int status = QLA_FUNCTION_FAILED;
+ unsigned long wait_reset;
+
+ wait_reset = jiffies + (MAX_LOOP_TIMEOUT * HZ);
+ while ((test_bit(FCOE_CTX_RESET_NEEDED, &vha->dpc_flags) ||
+ test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags))
+ && time_before(jiffies, wait_reset)) {
+
+ set_current_state(TASK_UNINTERRUPTIBLE);
+ schedule_timeout(HZ);
+
+ if (!test_bit(FCOE_CTX_RESET_NEEDED, &vha->dpc_flags) &&
+ !test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags)) {
+ status = QLA_SUCCESS;
+ break;
+ }
+ }
+ ql_dbg(ql_dbg_p3p, vha, 0xb027,
+ "%s: status=%d.\n", __func__, status);
+
+ return status;
+}
+
+void
+qla82xx_chip_reset_cleanup(scsi_qla_host_t *vha)
+{
+ int i, fw_state = 0;
+ unsigned long flags;
+ struct qla_hw_data *ha = vha->hw;
+
+ /* Check if 82XX firmware is alive or not
+ * We may have arrived here from NEED_RESET
+ * detection only
+ */
+ if (!ha->flags.isp82xx_fw_hung) {
+ for (i = 0; i < 2; i++) {
+ msleep(1000);
+ if (IS_QLA82XX(ha))
+ fw_state = qla82xx_check_fw_alive(vha);
+ else if (IS_QLA8044(ha))
+ fw_state = qla8044_check_fw_alive(vha);
+ if (fw_state) {
+ ha->flags.isp82xx_fw_hung = 1;
+ qla82xx_clear_pending_mbx(vha);
+ break;
+ }
+ }
+ }
+ ql_dbg(ql_dbg_init, vha, 0x00b0,
+ "Entered %s fw_hung=%d.\n",
+ __func__, ha->flags.isp82xx_fw_hung);
+
+ /* Abort all commands gracefully if fw NOT hung */
+ if (!ha->flags.isp82xx_fw_hung) {
+ int cnt, que;
+ srb_t *sp;
+ struct req_que *req;
+
+ spin_lock_irqsave(&ha->hardware_lock, flags);
+ for (que = 0; que < ha->max_req_queues; que++) {
+ req = ha->req_q_map[que];
+ if (!req)
+ continue;
+ for (cnt = 1; cnt < req->num_outstanding_cmds; cnt++) {
+ sp = req->outstanding_cmds[cnt];
+ if (sp) {
+ if ((!sp->u.scmd.ctx ||
+ (sp->flags &
+ SRB_FCP_CMND_DMA_VALID)) &&
+ !ha->flags.isp82xx_fw_hung) {
+ spin_unlock_irqrestore(
+ &ha->hardware_lock, flags);
+ if (ha->isp_ops->abort_command(sp)) {
+ ql_log(ql_log_info, vha,
+ 0x00b1,
+ "mbx abort failed.\n");
+ } else {
+ ql_log(ql_log_info, vha,
+ 0x00b2,
+ "mbx abort success.\n");
+ }
+ spin_lock_irqsave(&ha->hardware_lock, flags);
+ }
+ }
+ }
+ }
+ spin_unlock_irqrestore(&ha->hardware_lock, flags);
+
+ /* Wait for pending cmds (physical and virtual) to complete */
+ if (!qla2x00_eh_wait_for_pending_commands(vha, 0, 0,
+ WAIT_HOST) == QLA_SUCCESS) {
+ ql_dbg(ql_dbg_init, vha, 0x00b3,
+ "Done wait for "
+ "pending commands.\n");
+ }
+ }
+}
+
+/* Minidump related functions */
+static int
+qla82xx_minidump_process_control(scsi_qla_host_t *vha,
+ qla82xx_md_entry_hdr_t *entry_hdr, uint32_t **d_ptr)
+{
+ struct qla_hw_data *ha = vha->hw;
+ struct qla82xx_md_entry_crb *crb_entry;
+ uint32_t read_value, opcode, poll_time;
+ uint32_t addr, index, crb_addr;
+ unsigned long wtime;
+ struct qla82xx_md_template_hdr *tmplt_hdr;
+ uint32_t rval = QLA_SUCCESS;
+ int i;
+
+ tmplt_hdr = (struct qla82xx_md_template_hdr *)ha->md_tmplt_hdr;
+ crb_entry = (struct qla82xx_md_entry_crb *)entry_hdr;
+ crb_addr = crb_entry->addr;
+
+ for (i = 0; i < crb_entry->op_count; i++) {
+ opcode = crb_entry->crb_ctrl.opcode;
+ if (opcode & QLA82XX_DBG_OPCODE_WR) {
+ qla82xx_md_rw_32(ha, crb_addr,
+ crb_entry->value_1, 1);
+ opcode &= ~QLA82XX_DBG_OPCODE_WR;
+ }
+
+ if (opcode & QLA82XX_DBG_OPCODE_RW) {
+ read_value = qla82xx_md_rw_32(ha, crb_addr, 0, 0);
+ qla82xx_md_rw_32(ha, crb_addr, read_value, 1);
+ opcode &= ~QLA82XX_DBG_OPCODE_RW;
+ }
+
+ if (opcode & QLA82XX_DBG_OPCODE_AND) {
+ read_value = qla82xx_md_rw_32(ha, crb_addr, 0, 0);
+ read_value &= crb_entry->value_2;
+ opcode &= ~QLA82XX_DBG_OPCODE_AND;
+ if (opcode & QLA82XX_DBG_OPCODE_OR) {
+ read_value |= crb_entry->value_3;
+ opcode &= ~QLA82XX_DBG_OPCODE_OR;
+ }
+ qla82xx_md_rw_32(ha, crb_addr, read_value, 1);
+ }
+
+ if (opcode & QLA82XX_DBG_OPCODE_OR) {
+ read_value = qla82xx_md_rw_32(ha, crb_addr, 0, 0);
+ read_value |= crb_entry->value_3;
+ qla82xx_md_rw_32(ha, crb_addr, read_value, 1);
+ opcode &= ~QLA82XX_DBG_OPCODE_OR;
+ }
+
+ if (opcode & QLA82XX_DBG_OPCODE_POLL) {
+ poll_time = crb_entry->crb_strd.poll_timeout;
+ wtime = jiffies + poll_time;
+ read_value = qla82xx_md_rw_32(ha, crb_addr, 0, 0);
+
+ do {
+ if ((read_value & crb_entry->value_2)
+ == crb_entry->value_1)
+ break;
+ else if (time_after_eq(jiffies, wtime)) {
+ /* capturing dump failed */
+ rval = QLA_FUNCTION_FAILED;
+ break;
+ } else
+ read_value = qla82xx_md_rw_32(ha,
+ crb_addr, 0, 0);
+ } while (1);
+ opcode &= ~QLA82XX_DBG_OPCODE_POLL;
+ }
+
+ if (opcode & QLA82XX_DBG_OPCODE_RDSTATE) {
+ if (crb_entry->crb_strd.state_index_a) {
+ index = crb_entry->crb_strd.state_index_a;
+ addr = tmplt_hdr->saved_state_array[index];
+ } else
+ addr = crb_addr;
+
+ read_value = qla82xx_md_rw_32(ha, addr, 0, 0);
+ index = crb_entry->crb_ctrl.state_index_v;
+ tmplt_hdr->saved_state_array[index] = read_value;
+ opcode &= ~QLA82XX_DBG_OPCODE_RDSTATE;
+ }
+
+ if (opcode & QLA82XX_DBG_OPCODE_WRSTATE) {
+ if (crb_entry->crb_strd.state_index_a) {
+ index = crb_entry->crb_strd.state_index_a;
+ addr = tmplt_hdr->saved_state_array[index];
+ } else
+ addr = crb_addr;
+
+ if (crb_entry->crb_ctrl.state_index_v) {
+ index = crb_entry->crb_ctrl.state_index_v;
+ read_value =
+ tmplt_hdr->saved_state_array[index];
+ } else
+ read_value = crb_entry->value_1;
+
+ qla82xx_md_rw_32(ha, addr, read_value, 1);
+ opcode &= ~QLA82XX_DBG_OPCODE_WRSTATE;
+ }
+
+ if (opcode & QLA82XX_DBG_OPCODE_MDSTATE) {
+ index = crb_entry->crb_ctrl.state_index_v;
+ read_value = tmplt_hdr->saved_state_array[index];
+ read_value <<= crb_entry->crb_ctrl.shl;
+ read_value >>= crb_entry->crb_ctrl.shr;
+ if (crb_entry->value_2)
+ read_value &= crb_entry->value_2;
+ read_value |= crb_entry->value_3;
+ read_value += crb_entry->value_1;
+ tmplt_hdr->saved_state_array[index] = read_value;
+ opcode &= ~QLA82XX_DBG_OPCODE_MDSTATE;
+ }
+ crb_addr += crb_entry->crb_strd.addr_stride;
+ }
+ return rval;
+}
+
+static void
+qla82xx_minidump_process_rdocm(scsi_qla_host_t *vha,
+ qla82xx_md_entry_hdr_t *entry_hdr, uint32_t **d_ptr)
+{
+ struct qla_hw_data *ha = vha->hw;
+ uint32_t r_addr, r_stride, loop_cnt, i, r_value;
+ struct qla82xx_md_entry_rdocm *ocm_hdr;
+ uint32_t *data_ptr = *d_ptr;
+
+ ocm_hdr = (struct qla82xx_md_entry_rdocm *)entry_hdr;
+ r_addr = ocm_hdr->read_addr;
+ r_stride = ocm_hdr->read_addr_stride;
+ loop_cnt = ocm_hdr->op_count;
+
+ for (i = 0; i < loop_cnt; i++) {
+ r_value = RD_REG_DWORD((void __iomem *)
+ (r_addr + ha->nx_pcibase));
+ *data_ptr++ = cpu_to_le32(r_value);
+ r_addr += r_stride;
+ }
+ *d_ptr = data_ptr;
+}
+
+static void
+qla82xx_minidump_process_rdmux(scsi_qla_host_t *vha,
+ qla82xx_md_entry_hdr_t *entry_hdr, uint32_t **d_ptr)
+{
+ struct qla_hw_data *ha = vha->hw;
+ uint32_t r_addr, s_stride, s_addr, s_value, loop_cnt, i, r_value;
+ struct qla82xx_md_entry_mux *mux_hdr;
+ uint32_t *data_ptr = *d_ptr;
+
+ mux_hdr = (struct qla82xx_md_entry_mux *)entry_hdr;
+ r_addr = mux_hdr->read_addr;
+ s_addr = mux_hdr->select_addr;
+ s_stride = mux_hdr->select_value_stride;
+ s_value = mux_hdr->select_value;
+ loop_cnt = mux_hdr->op_count;
+
+ for (i = 0; i < loop_cnt; i++) {
+ qla82xx_md_rw_32(ha, s_addr, s_value, 1);
+ r_value = qla82xx_md_rw_32(ha, r_addr, 0, 0);
+ *data_ptr++ = cpu_to_le32(s_value);
+ *data_ptr++ = cpu_to_le32(r_value);
+ s_value += s_stride;
+ }
+ *d_ptr = data_ptr;
+}
+
+static void
+qla82xx_minidump_process_rdcrb(scsi_qla_host_t *vha,
+ qla82xx_md_entry_hdr_t *entry_hdr, uint32_t **d_ptr)
+{
+ struct qla_hw_data *ha = vha->hw;
+ uint32_t r_addr, r_stride, loop_cnt, i, r_value;
+ struct qla82xx_md_entry_crb *crb_hdr;
+ uint32_t *data_ptr = *d_ptr;
+
+ crb_hdr = (struct qla82xx_md_entry_crb *)entry_hdr;
+ r_addr = crb_hdr->addr;
+ r_stride = crb_hdr->crb_strd.addr_stride;
+ loop_cnt = crb_hdr->op_count;
+
+ for (i = 0; i < loop_cnt; i++) {
+ r_value = qla82xx_md_rw_32(ha, r_addr, 0, 0);
+ *data_ptr++ = cpu_to_le32(r_addr);
+ *data_ptr++ = cpu_to_le32(r_value);
+ r_addr += r_stride;
+ }
+ *d_ptr = data_ptr;
+}
+
+static int
+qla82xx_minidump_process_l2tag(scsi_qla_host_t *vha,
+ qla82xx_md_entry_hdr_t *entry_hdr, uint32_t **d_ptr)
+{
+ struct qla_hw_data *ha = vha->hw;
+ uint32_t addr, r_addr, c_addr, t_r_addr;
+ uint32_t i, k, loop_count, t_value, r_cnt, r_value;
+ unsigned long p_wait, w_time, p_mask;
+ uint32_t c_value_w, c_value_r;
+ struct qla82xx_md_entry_cache *cache_hdr;
+ int rval = QLA_FUNCTION_FAILED;
+ uint32_t *data_ptr = *d_ptr;
+
+ cache_hdr = (struct qla82xx_md_entry_cache *)entry_hdr;
+ loop_count = cache_hdr->op_count;
+ r_addr = cache_hdr->read_addr;
+ c_addr = cache_hdr->control_addr;
+ c_value_w = cache_hdr->cache_ctrl.write_value;
+
+ t_r_addr = cache_hdr->tag_reg_addr;
+ t_value = cache_hdr->addr_ctrl.init_tag_value;
+ r_cnt = cache_hdr->read_ctrl.read_addr_cnt;
+ p_wait = cache_hdr->cache_ctrl.poll_wait;
+ p_mask = cache_hdr->cache_ctrl.poll_mask;
+
+ for (i = 0; i < loop_count; i++) {
+ qla82xx_md_rw_32(ha, t_r_addr, t_value, 1);
+ if (c_value_w)
+ qla82xx_md_rw_32(ha, c_addr, c_value_w, 1);
+
+ if (p_mask) {
+ w_time = jiffies + p_wait;
+ do {
+ c_value_r = qla82xx_md_rw_32(ha, c_addr, 0, 0);
+ if ((c_value_r & p_mask) == 0)
+ break;
+ else if (time_after_eq(jiffies, w_time)) {
+ /* capturing dump failed */
+ ql_dbg(ql_dbg_p3p, vha, 0xb032,
+ "c_value_r: 0x%x, poll_mask: 0x%lx, "
+ "w_time: 0x%lx\n",
+ c_value_r, p_mask, w_time);
+ return rval;
+ }
+ } while (1);
+ }
+
+ addr = r_addr;
+ for (k = 0; k < r_cnt; k++) {
+ r_value = qla82xx_md_rw_32(ha, addr, 0, 0);
+ *data_ptr++ = cpu_to_le32(r_value);
+ addr += cache_hdr->read_ctrl.read_addr_stride;
+ }
+ t_value += cache_hdr->addr_ctrl.tag_value_stride;
+ }
+ *d_ptr = data_ptr;
+ return QLA_SUCCESS;
+}
+
+static void
+qla82xx_minidump_process_l1cache(scsi_qla_host_t *vha,
+ qla82xx_md_entry_hdr_t *entry_hdr, uint32_t **d_ptr)
+{
+ struct qla_hw_data *ha = vha->hw;
+ uint32_t addr, r_addr, c_addr, t_r_addr;
+ uint32_t i, k, loop_count, t_value, r_cnt, r_value;
+ uint32_t c_value_w;
+ struct qla82xx_md_entry_cache *cache_hdr;
+ uint32_t *data_ptr = *d_ptr;
+
+ cache_hdr = (struct qla82xx_md_entry_cache *)entry_hdr;
+ loop_count = cache_hdr->op_count;
+ r_addr = cache_hdr->read_addr;
+ c_addr = cache_hdr->control_addr;
+ c_value_w = cache_hdr->cache_ctrl.write_value;
+
+ t_r_addr = cache_hdr->tag_reg_addr;
+ t_value = cache_hdr->addr_ctrl.init_tag_value;
+ r_cnt = cache_hdr->read_ctrl.read_addr_cnt;
+
+ for (i = 0; i < loop_count; i++) {
+ qla82xx_md_rw_32(ha, t_r_addr, t_value, 1);
+ qla82xx_md_rw_32(ha, c_addr, c_value_w, 1);
+ addr = r_addr;
+ for (k = 0; k < r_cnt; k++) {
+ r_value = qla82xx_md_rw_32(ha, addr, 0, 0);
+ *data_ptr++ = cpu_to_le32(r_value);
+ addr += cache_hdr->read_ctrl.read_addr_stride;
+ }
+ t_value += cache_hdr->addr_ctrl.tag_value_stride;
+ }
+ *d_ptr = data_ptr;
+}
+
+static void
+qla82xx_minidump_process_queue(scsi_qla_host_t *vha,
+ qla82xx_md_entry_hdr_t *entry_hdr, uint32_t **d_ptr)
+{
+ struct qla_hw_data *ha = vha->hw;
+ uint32_t s_addr, r_addr;
+ uint32_t r_stride, r_value, r_cnt, qid = 0;
+ uint32_t i, k, loop_cnt;
+ struct qla82xx_md_entry_queue *q_hdr;
+ uint32_t *data_ptr = *d_ptr;
+
+ q_hdr = (struct qla82xx_md_entry_queue *)entry_hdr;
+ s_addr = q_hdr->select_addr;
+ r_cnt = q_hdr->rd_strd.read_addr_cnt;
+ r_stride = q_hdr->rd_strd.read_addr_stride;
+ loop_cnt = q_hdr->op_count;
+
+ for (i = 0; i < loop_cnt; i++) {
+ qla82xx_md_rw_32(ha, s_addr, qid, 1);
+ r_addr = q_hdr->read_addr;
+ for (k = 0; k < r_cnt; k++) {
+ r_value = qla82xx_md_rw_32(ha, r_addr, 0, 0);
+ *data_ptr++ = cpu_to_le32(r_value);
+ r_addr += r_stride;
+ }
+ qid += q_hdr->q_strd.queue_id_stride;
+ }
+ *d_ptr = data_ptr;
+}
+
+static void
+qla82xx_minidump_process_rdrom(scsi_qla_host_t *vha,
+ qla82xx_md_entry_hdr_t *entry_hdr, uint32_t **d_ptr)
+{
+ struct qla_hw_data *ha = vha->hw;
+ uint32_t r_addr, r_value;
+ uint32_t i, loop_cnt;
+ struct qla82xx_md_entry_rdrom *rom_hdr;
+ uint32_t *data_ptr = *d_ptr;
+
+ rom_hdr = (struct qla82xx_md_entry_rdrom *)entry_hdr;
+ r_addr = rom_hdr->read_addr;
+ loop_cnt = rom_hdr->read_data_size/sizeof(uint32_t);
+
+ for (i = 0; i < loop_cnt; i++) {
+ qla82xx_md_rw_32(ha, MD_DIRECT_ROM_WINDOW,
+ (r_addr & 0xFFFF0000), 1);
+ r_value = qla82xx_md_rw_32(ha,
+ MD_DIRECT_ROM_READ_BASE +
+ (r_addr & 0x0000FFFF), 0, 0);
+ *data_ptr++ = cpu_to_le32(r_value);
+ r_addr += sizeof(uint32_t);
+ }
+ *d_ptr = data_ptr;
+}
+
+static int
+qla82xx_minidump_process_rdmem(scsi_qla_host_t *vha,
+ qla82xx_md_entry_hdr_t *entry_hdr, uint32_t **d_ptr)
+{
+ struct qla_hw_data *ha = vha->hw;
+ uint32_t r_addr, r_value, r_data;
+ uint32_t i, j, loop_cnt;
+ struct qla82xx_md_entry_rdmem *m_hdr;
+ unsigned long flags;
+ int rval = QLA_FUNCTION_FAILED;
+ uint32_t *data_ptr = *d_ptr;
+
+ m_hdr = (struct qla82xx_md_entry_rdmem *)entry_hdr;
+ r_addr = m_hdr->read_addr;
+ loop_cnt = m_hdr->read_data_size/16;
+
+ if (r_addr & 0xf) {
+ ql_log(ql_log_warn, vha, 0xb033,
+ "Read addr 0x%x not 16 bytes aligned\n", r_addr);
+ return rval;
+ }
+
+ if (m_hdr->read_data_size % 16) {
+ ql_log(ql_log_warn, vha, 0xb034,
+ "Read data[0x%x] not multiple of 16 bytes\n",
+ m_hdr->read_data_size);
+ return rval;
+ }
+
+ ql_dbg(ql_dbg_p3p, vha, 0xb035,
+ "[%s]: rdmem_addr: 0x%x, read_data_size: 0x%x, loop_cnt: 0x%x\n",
+ __func__, r_addr, m_hdr->read_data_size, loop_cnt);
+
+ write_lock_irqsave(&ha->hw_lock, flags);
+ for (i = 0; i < loop_cnt; i++) {
+ qla82xx_md_rw_32(ha, MD_MIU_TEST_AGT_ADDR_LO, r_addr, 1);
+ r_value = 0;
+ qla82xx_md_rw_32(ha, MD_MIU_TEST_AGT_ADDR_HI, r_value, 1);
+ r_value = MIU_TA_CTL_ENABLE;
+ qla82xx_md_rw_32(ha, MD_MIU_TEST_AGT_CTRL, r_value, 1);
+ r_value = MIU_TA_CTL_START | MIU_TA_CTL_ENABLE;
+ qla82xx_md_rw_32(ha, MD_MIU_TEST_AGT_CTRL, r_value, 1);
+
+ for (j = 0; j < MAX_CTL_CHECK; j++) {
+ r_value = qla82xx_md_rw_32(ha,
+ MD_MIU_TEST_AGT_CTRL, 0, 0);
+ if ((r_value & MIU_TA_CTL_BUSY) == 0)
+ break;
+ }
+
+ if (j >= MAX_CTL_CHECK) {
+ printk_ratelimited(KERN_ERR
+ "failed to read through agent\n");
+ write_unlock_irqrestore(&ha->hw_lock, flags);
+ return rval;
+ }
+
+ for (j = 0; j < 4; j++) {
+ r_data = qla82xx_md_rw_32(ha,
+ MD_MIU_TEST_AGT_RDDATA[j], 0, 0);
+ *data_ptr++ = cpu_to_le32(r_data);
+ }
+ r_addr += 16;
+ }
+ write_unlock_irqrestore(&ha->hw_lock, flags);
+ *d_ptr = data_ptr;
+ return QLA_SUCCESS;
+}
+
+int
+qla82xx_validate_template_chksum(scsi_qla_host_t *vha)
+{
+ struct qla_hw_data *ha = vha->hw;
+ uint64_t chksum = 0;
+ uint32_t *d_ptr = (uint32_t *)ha->md_tmplt_hdr;
+ int count = ha->md_template_size/sizeof(uint32_t);
+
+ while (count-- > 0)
+ chksum += *d_ptr++;
+ while (chksum >> 32)
+ chksum = (chksum & 0xFFFFFFFF) + (chksum >> 32);
+ return ~chksum;
+}
+
+static void
+qla82xx_mark_entry_skipped(scsi_qla_host_t *vha,
+ qla82xx_md_entry_hdr_t *entry_hdr, int index)
+{
+ entry_hdr->d_ctrl.driver_flags |= QLA82XX_DBG_SKIPPED_FLAG;
+ ql_dbg(ql_dbg_p3p, vha, 0xb036,
+ "Skipping entry[%d]: "
+ "ETYPE[0x%x]-ELEVEL[0x%x]\n",
+ index, entry_hdr->entry_type,
+ entry_hdr->d_ctrl.entry_capture_mask);
+}
+
+int
+qla82xx_md_collect(scsi_qla_host_t *vha)
+{
+ struct qla_hw_data *ha = vha->hw;
+ int no_entry_hdr = 0;
+ qla82xx_md_entry_hdr_t *entry_hdr;
+ struct qla82xx_md_template_hdr *tmplt_hdr;
+ uint32_t *data_ptr;
+ uint32_t total_data_size = 0, f_capture_mask, data_collected = 0;
+ int i = 0, rval = QLA_FUNCTION_FAILED;
+
+ tmplt_hdr = (struct qla82xx_md_template_hdr *)ha->md_tmplt_hdr;
+ data_ptr = (uint32_t *)ha->md_dump;
+
+ if (ha->fw_dumped) {
+ ql_log(ql_log_warn, vha, 0xb037,
+ "Firmware has been previously dumped (%p) "
+ "-- ignoring request.\n", ha->fw_dump);
+ goto md_failed;
+ }
+
+ ha->fw_dumped = 0;
+
+ if (!ha->md_tmplt_hdr || !ha->md_dump) {
+ ql_log(ql_log_warn, vha, 0xb038,
+ "Memory not allocated for minidump capture\n");
+ goto md_failed;
+ }
+
+ if (ha->flags.isp82xx_no_md_cap) {
+ ql_log(ql_log_warn, vha, 0xb054,
+ "Forced reset from application, "
+ "ignore minidump capture\n");
+ ha->flags.isp82xx_no_md_cap = 0;
+ goto md_failed;
+ }
+
+ if (qla82xx_validate_template_chksum(vha)) {
+ ql_log(ql_log_info, vha, 0xb039,
+ "Template checksum validation error\n");
+ goto md_failed;
+ }
+
+ no_entry_hdr = tmplt_hdr->num_of_entries;
+ ql_dbg(ql_dbg_p3p, vha, 0xb03a,
+ "No of entry headers in Template: 0x%x\n", no_entry_hdr);
+
+ ql_dbg(ql_dbg_p3p, vha, 0xb03b,
+ "Capture Mask obtained: 0x%x\n", tmplt_hdr->capture_debug_level);
+
+ f_capture_mask = tmplt_hdr->capture_debug_level & 0xFF;
+
+ /* Validate whether required debug level is set */
+ if ((f_capture_mask & 0x3) != 0x3) {
+ ql_log(ql_log_warn, vha, 0xb03c,
+ "Minimum required capture mask[0x%x] level not set\n",
+ f_capture_mask);
+ goto md_failed;
+ }
+ tmplt_hdr->driver_capture_mask = ql2xmdcapmask;
+
+ tmplt_hdr->driver_info[0] = vha->host_no;
+ tmplt_hdr->driver_info[1] = (QLA_DRIVER_MAJOR_VER << 24) |
+ (QLA_DRIVER_MINOR_VER << 16) | (QLA_DRIVER_PATCH_VER << 8) |
+ QLA_DRIVER_BETA_VER;
+
+ total_data_size = ha->md_dump_size;
+
+ ql_dbg(ql_dbg_p3p, vha, 0xb03d,
+ "Total minidump data_size 0x%x to be captured\n", total_data_size);
+
+ /* Check whether template obtained is valid */
+ if (tmplt_hdr->entry_type != QLA82XX_TLHDR) {
+ ql_log(ql_log_warn, vha, 0xb04e,
+ "Bad template header entry type: 0x%x obtained\n",
+ tmplt_hdr->entry_type);
+ goto md_failed;
+ }
+
+ entry_hdr = (qla82xx_md_entry_hdr_t *) \
+ (((uint8_t *)ha->md_tmplt_hdr) + tmplt_hdr->first_entry_offset);
+
+ /* Walk through the entry headers */
+ for (i = 0; i < no_entry_hdr; i++) {
+
+ if (data_collected > total_data_size) {
+ ql_log(ql_log_warn, vha, 0xb03e,
+ "More MiniDump data collected: [0x%x]\n",
+ data_collected);
+ goto md_failed;
+ }
+
+ if (!(entry_hdr->d_ctrl.entry_capture_mask &
+ ql2xmdcapmask)) {
+ entry_hdr->d_ctrl.driver_flags |=
+ QLA82XX_DBG_SKIPPED_FLAG;
+ ql_dbg(ql_dbg_p3p, vha, 0xb03f,
+ "Skipping entry[%d]: "
+ "ETYPE[0x%x]-ELEVEL[0x%x]\n",
+ i, entry_hdr->entry_type,
+ entry_hdr->d_ctrl.entry_capture_mask);
+ goto skip_nxt_entry;
+ }
+
+ ql_dbg(ql_dbg_p3p, vha, 0xb040,
+ "[%s]: data ptr[%d]: %p, entry_hdr: %p\n"
+ "entry_type: 0x%x, captrue_mask: 0x%x\n",
+ __func__, i, data_ptr, entry_hdr,
+ entry_hdr->entry_type,
+ entry_hdr->d_ctrl.entry_capture_mask);
+
+ ql_dbg(ql_dbg_p3p, vha, 0xb041,
+ "Data collected: [0x%x], Dump size left:[0x%x]\n",
+ data_collected, (ha->md_dump_size - data_collected));
+
+ /* Decode the entry type and take
+ * required action to capture debug data */
+ switch (entry_hdr->entry_type) {
+ case QLA82XX_RDEND:
+ qla82xx_mark_entry_skipped(vha, entry_hdr, i);
+ break;
+ case QLA82XX_CNTRL:
+ rval = qla82xx_minidump_process_control(vha,
+ entry_hdr, &data_ptr);
+ if (rval != QLA_SUCCESS) {
+ qla82xx_mark_entry_skipped(vha, entry_hdr, i);
+ goto md_failed;
+ }
+ break;
+ case QLA82XX_RDCRB:
+ qla82xx_minidump_process_rdcrb(vha,
+ entry_hdr, &data_ptr);
+ break;
+ case QLA82XX_RDMEM:
+ rval = qla82xx_minidump_process_rdmem(vha,
+ entry_hdr, &data_ptr);
+ if (rval != QLA_SUCCESS) {
+ qla82xx_mark_entry_skipped(vha, entry_hdr, i);
+ goto md_failed;
+ }
+ break;
+ case QLA82XX_BOARD:
+ case QLA82XX_RDROM:
+ qla82xx_minidump_process_rdrom(vha,
+ entry_hdr, &data_ptr);
+ break;
+ case QLA82XX_L2DTG:
+ case QLA82XX_L2ITG:
+ case QLA82XX_L2DAT:
+ case QLA82XX_L2INS:
+ rval = qla82xx_minidump_process_l2tag(vha,
+ entry_hdr, &data_ptr);
+ if (rval != QLA_SUCCESS) {
+ qla82xx_mark_entry_skipped(vha, entry_hdr, i);
+ goto md_failed;
+ }
+ break;
+ case QLA82XX_L1DAT:
+ case QLA82XX_L1INS:
+ qla82xx_minidump_process_l1cache(vha,
+ entry_hdr, &data_ptr);
+ break;
+ case QLA82XX_RDOCM:
+ qla82xx_minidump_process_rdocm(vha,
+ entry_hdr, &data_ptr);
+ break;
+ case QLA82XX_RDMUX:
+ qla82xx_minidump_process_rdmux(vha,
+ entry_hdr, &data_ptr);
+ break;
+ case QLA82XX_QUEUE:
+ qla82xx_minidump_process_queue(vha,
+ entry_hdr, &data_ptr);
+ break;
+ case QLA82XX_RDNOP:
+ default:
+ qla82xx_mark_entry_skipped(vha, entry_hdr, i);
+ break;
+ }
+
+ ql_dbg(ql_dbg_p3p, vha, 0xb042,
+ "[%s]: data ptr[%d]: %p\n", __func__, i, data_ptr);
+
+ data_collected = (uint8_t *)data_ptr -
+ (uint8_t *)ha->md_dump;
+skip_nxt_entry:
+ entry_hdr = (qla82xx_md_entry_hdr_t *) \
+ (((uint8_t *)entry_hdr) + entry_hdr->entry_size);
+ }
+
+ if (data_collected != total_data_size) {
+ ql_dbg(ql_dbg_p3p, vha, 0xb043,
+ "MiniDump data mismatch: Data collected: [0x%x],"
+ "total_data_size:[0x%x]\n",
+ data_collected, total_data_size);
+ goto md_failed;
+ }
+
+ ql_log(ql_log_info, vha, 0xb044,
+ "Firmware dump saved to temp buffer (%ld/%p %ld/%p).\n",
+ vha->host_no, ha->md_tmplt_hdr, vha->host_no, ha->md_dump);
+ ha->fw_dumped = 1;
+ qla2x00_post_uevent_work(vha, QLA_UEVENT_CODE_FW_DUMP);
+
+md_failed:
+ return rval;
+}
+
+int
+qla82xx_md_alloc(scsi_qla_host_t *vha)
+{
+ struct qla_hw_data *ha = vha->hw;
+ int i, k;
+ struct qla82xx_md_template_hdr *tmplt_hdr;
+
+ tmplt_hdr = (struct qla82xx_md_template_hdr *)ha->md_tmplt_hdr;
+
+ if (ql2xmdcapmask < 0x3 || ql2xmdcapmask > 0x7F) {
+ ql2xmdcapmask = tmplt_hdr->capture_debug_level & 0xFF;
+ ql_log(ql_log_info, vha, 0xb045,
+ "Forcing driver capture mask to firmware default capture mask: 0x%x.\n",
+ ql2xmdcapmask);
+ }
+
+ for (i = 0x2, k = 1; (i & QLA82XX_DEFAULT_CAP_MASK); i <<= 1, k++) {
+ if (i & ql2xmdcapmask)
+ ha->md_dump_size += tmplt_hdr->capture_size_array[k];
+ }
+
+ if (ha->md_dump) {
+ ql_log(ql_log_warn, vha, 0xb046,
+ "Firmware dump previously allocated.\n");
+ return 1;
+ }
+
+ ha->md_dump = vmalloc(ha->md_dump_size);
+ if (ha->md_dump == NULL) {
+ ql_log(ql_log_warn, vha, 0xb047,
+ "Unable to allocate memory for Minidump size "
+ "(0x%x).\n", ha->md_dump_size);
+ return 1;
+ }
+ return 0;
+}
+
+void
+qla82xx_md_free(scsi_qla_host_t *vha)
+{
+ struct qla_hw_data *ha = vha->hw;
+
+ /* Release the template header allocated */
+ if (ha->md_tmplt_hdr) {
+ ql_log(ql_log_info, vha, 0xb048,
+ "Free MiniDump template: %p, size (%d KB)\n",
+ ha->md_tmplt_hdr, ha->md_template_size / 1024);
+ dma_free_coherent(&ha->pdev->dev, ha->md_template_size,
+ ha->md_tmplt_hdr, ha->md_tmplt_hdr_dma);
+ ha->md_tmplt_hdr = NULL;
+ }
+
+ /* Release the template data buffer allocated */
+ if (ha->md_dump) {
+ ql_log(ql_log_info, vha, 0xb049,
+ "Free MiniDump memory: %p, size (%d KB)\n",
+ ha->md_dump, ha->md_dump_size / 1024);
+ vfree(ha->md_dump);
+ ha->md_dump_size = 0;
+ ha->md_dump = NULL;
+ }
+}
+
+void
+qla82xx_md_prep(scsi_qla_host_t *vha)
+{
+ struct qla_hw_data *ha = vha->hw;
+ int rval;
+
+ /* Get Minidump template size */
+ rval = qla82xx_md_get_template_size(vha);
+ if (rval == QLA_SUCCESS) {
+ ql_log(ql_log_info, vha, 0xb04a,
+ "MiniDump Template size obtained (%d KB)\n",
+ ha->md_template_size / 1024);
+
+ /* Get Minidump template */
+ if (IS_QLA8044(ha))
+ rval = qla8044_md_get_template(vha);
+ else
+ rval = qla82xx_md_get_template(vha);
+
+ if (rval == QLA_SUCCESS) {
+ ql_dbg(ql_dbg_p3p, vha, 0xb04b,
+ "MiniDump Template obtained\n");
+
+ /* Allocate memory for minidump */
+ rval = qla82xx_md_alloc(vha);
+ if (rval == QLA_SUCCESS)
+ ql_log(ql_log_info, vha, 0xb04c,
+ "MiniDump memory allocated (%d KB)\n",
+ ha->md_dump_size / 1024);
+ else {
+ ql_log(ql_log_info, vha, 0xb04d,
+ "Free MiniDump template: %p, size: (%d KB)\n",
+ ha->md_tmplt_hdr,
+ ha->md_template_size / 1024);
+ dma_free_coherent(&ha->pdev->dev,
+ ha->md_template_size,
+ ha->md_tmplt_hdr, ha->md_tmplt_hdr_dma);
+ ha->md_tmplt_hdr = NULL;
+ }
+
+ }
+ }
+}
+
+int
+qla82xx_beacon_on(struct scsi_qla_host *vha)
+{
+
+ int rval;
+ struct qla_hw_data *ha = vha->hw;
+ qla82xx_idc_lock(ha);
+ rval = qla82xx_mbx_beacon_ctl(vha, 1);
+
+ if (rval) {
+ ql_log(ql_log_warn, vha, 0xb050,
+ "mbx set led config failed in %s\n", __func__);
+ goto exit;
+ }
+ ha->beacon_blink_led = 1;
+exit:
+ qla82xx_idc_unlock(ha);
+ return rval;
+}
+
+int
+qla82xx_beacon_off(struct scsi_qla_host *vha)
+{
+
+ int rval;
+ struct qla_hw_data *ha = vha->hw;
+ qla82xx_idc_lock(ha);
+ rval = qla82xx_mbx_beacon_ctl(vha, 0);
+
+ if (rval) {
+ ql_log(ql_log_warn, vha, 0xb051,
+ "mbx set led config failed in %s\n", __func__);
+ goto exit;
+ }
+ ha->beacon_blink_led = 0;
+exit:
+ qla82xx_idc_unlock(ha);
+ return rval;
+}
+
+void
+qla82xx_fw_dump(scsi_qla_host_t *vha, int hardware_locked)
+{
+ struct qla_hw_data *ha = vha->hw;
+
+ if (!ha->allow_cna_fw_dump)
+ return;
+
+ scsi_block_requests(vha->host);
+ ha->flags.isp82xx_no_md_cap = 1;
+ qla82xx_idc_lock(ha);
+ qla82xx_set_reset_owner(vha);
+ qla82xx_idc_unlock(ha);
+ qla2x00_wait_for_chip_reset(vha);
+ scsi_unblock_requests(vha->host);
+}
diff --git a/drivers/scsi/qla2xxx/qla_nx.h b/drivers/scsi/qla2xxx/qla_nx.h
new file mode 100644
index 000000000..59c477883
--- /dev/null
+++ b/drivers/scsi/qla2xxx/qla_nx.h
@@ -0,0 +1,1202 @@
+/*
+ * QLogic Fibre Channel HBA Driver
+ * Copyright (c) 2003-2014 QLogic Corporation
+ *
+ * See LICENSE.qla2xxx for copyright and licensing details.
+ */
+#ifndef __QLA_NX_H
+#define __QLA_NX_H
+
+/*
+ * Following are the states of the Phantom. Phantom will set them and
+ * Host will read to check if the fields are correct.
+*/
+#define PHAN_INITIALIZE_FAILED 0xffff
+#define PHAN_INITIALIZE_COMPLETE 0xff01
+
+/* Host writes the following to notify that it has done the init-handshake */
+#define PHAN_INITIALIZE_ACK 0xf00f
+#define PHAN_PEG_RCV_INITIALIZED 0xff01
+
+/*CRB_RELATED*/
+#define QLA82XX_CRB_BASE QLA82XX_CAM_RAM(0x200)
+#define QLA82XX_REG(X) (QLA82XX_CRB_BASE+(X))
+
+#define CRB_CMDPEG_STATE QLA82XX_REG(0x50)
+#define CRB_RCVPEG_STATE QLA82XX_REG(0x13c)
+#define BOOT_LOADER_DIMM_STATUS QLA82XX_REG(0x54)
+#define CRB_DMA_SHIFT QLA82XX_REG(0xcc)
+#define CRB_TEMP_STATE QLA82XX_REG(0x1b4)
+#define QLA82XX_DMA_SHIFT_VALUE 0x55555555
+
+#define QLA82XX_HW_H0_CH_HUB_ADR 0x05
+#define QLA82XX_HW_H1_CH_HUB_ADR 0x0E
+#define QLA82XX_HW_H2_CH_HUB_ADR 0x03
+#define QLA82XX_HW_H3_CH_HUB_ADR 0x01
+#define QLA82XX_HW_H4_CH_HUB_ADR 0x06
+#define QLA82XX_HW_H5_CH_HUB_ADR 0x07
+#define QLA82XX_HW_H6_CH_HUB_ADR 0x08
+
+/* Hub 0 */
+#define QLA82XX_HW_MN_CRB_AGT_ADR 0x15
+#define QLA82XX_HW_MS_CRB_AGT_ADR 0x25
+
+/* Hub 1 */
+#define QLA82XX_HW_PS_CRB_AGT_ADR 0x73
+#define QLA82XX_HW_QMS_CRB_AGT_ADR 0x00
+#define QLA82XX_HW_RPMX3_CRB_AGT_ADR 0x0b
+#define QLA82XX_HW_SQGS0_CRB_AGT_ADR 0x01
+#define QLA82XX_HW_SQGS1_CRB_AGT_ADR 0x02
+#define QLA82XX_HW_SQGS2_CRB_AGT_ADR 0x03
+#define QLA82XX_HW_SQGS3_CRB_AGT_ADR 0x04
+#define QLA82XX_HW_C2C0_CRB_AGT_ADR 0x58
+#define QLA82XX_HW_C2C1_CRB_AGT_ADR 0x59
+#define QLA82XX_HW_C2C2_CRB_AGT_ADR 0x5a
+#define QLA82XX_HW_RPMX2_CRB_AGT_ADR 0x0a
+#define QLA82XX_HW_RPMX4_CRB_AGT_ADR 0x0c
+#define QLA82XX_HW_RPMX7_CRB_AGT_ADR 0x0f
+#define QLA82XX_HW_RPMX9_CRB_AGT_ADR 0x12
+#define QLA82XX_HW_SMB_CRB_AGT_ADR 0x18
+
+/* Hub 2 */
+#define QLA82XX_HW_NIU_CRB_AGT_ADR 0x31
+#define QLA82XX_HW_I2C0_CRB_AGT_ADR 0x19
+#define QLA82XX_HW_I2C1_CRB_AGT_ADR 0x29
+
+#define QLA82XX_HW_SN_CRB_AGT_ADR 0x10
+#define QLA82XX_HW_I2Q_CRB_AGT_ADR 0x20
+#define QLA82XX_HW_LPC_CRB_AGT_ADR 0x22
+#define QLA82XX_HW_ROMUSB_CRB_AGT_ADR 0x21
+#define QLA82XX_HW_QM_CRB_AGT_ADR 0x66
+#define QLA82XX_HW_SQG0_CRB_AGT_ADR 0x60
+#define QLA82XX_HW_SQG1_CRB_AGT_ADR 0x61
+#define QLA82XX_HW_SQG2_CRB_AGT_ADR 0x62
+#define QLA82XX_HW_SQG3_CRB_AGT_ADR 0x63
+#define QLA82XX_HW_RPMX1_CRB_AGT_ADR 0x09
+#define QLA82XX_HW_RPMX5_CRB_AGT_ADR 0x0d
+#define QLA82XX_HW_RPMX6_CRB_AGT_ADR 0x0e
+#define QLA82XX_HW_RPMX8_CRB_AGT_ADR 0x11
+
+/* Hub 3 */
+#define QLA82XX_HW_PH_CRB_AGT_ADR 0x1A
+#define QLA82XX_HW_SRE_CRB_AGT_ADR 0x50
+#define QLA82XX_HW_EG_CRB_AGT_ADR 0x51
+#define QLA82XX_HW_RPMX0_CRB_AGT_ADR 0x08
+
+/* Hub 4 */
+#define QLA82XX_HW_PEGN0_CRB_AGT_ADR 0x40
+#define QLA82XX_HW_PEGN1_CRB_AGT_ADR 0x41
+#define QLA82XX_HW_PEGN2_CRB_AGT_ADR 0x42
+#define QLA82XX_HW_PEGN3_CRB_AGT_ADR 0x43
+#define QLA82XX_HW_PEGNI_CRB_AGT_ADR 0x44
+#define QLA82XX_HW_PEGND_CRB_AGT_ADR 0x45
+#define QLA82XX_HW_PEGNC_CRB_AGT_ADR 0x46
+#define QLA82XX_HW_PEGR0_CRB_AGT_ADR 0x47
+#define QLA82XX_HW_PEGR1_CRB_AGT_ADR 0x48
+#define QLA82XX_HW_PEGR2_CRB_AGT_ADR 0x49
+#define QLA82XX_HW_PEGR3_CRB_AGT_ADR 0x4a
+#define QLA82XX_HW_PEGN4_CRB_AGT_ADR 0x4b
+
+/* Hub 5 */
+#define QLA82XX_HW_PEGS0_CRB_AGT_ADR 0x40
+#define QLA82XX_HW_PEGS1_CRB_AGT_ADR 0x41
+#define QLA82XX_HW_PEGS2_CRB_AGT_ADR 0x42
+#define QLA82XX_HW_PEGS3_CRB_AGT_ADR 0x43
+#define QLA82XX_HW_PEGSI_CRB_AGT_ADR 0x44
+#define QLA82XX_HW_PEGSD_CRB_AGT_ADR 0x45
+#define QLA82XX_HW_PEGSC_CRB_AGT_ADR 0x46
+
+/* Hub 6 */
+#define QLA82XX_HW_CAS0_CRB_AGT_ADR 0x46
+#define QLA82XX_HW_CAS1_CRB_AGT_ADR 0x47
+#define QLA82XX_HW_CAS2_CRB_AGT_ADR 0x48
+#define QLA82XX_HW_CAS3_CRB_AGT_ADR 0x49
+#define QLA82XX_HW_NCM_CRB_AGT_ADR 0x16
+#define QLA82XX_HW_TMR_CRB_AGT_ADR 0x17
+#define QLA82XX_HW_XDMA_CRB_AGT_ADR 0x05
+#define QLA82XX_HW_OCM0_CRB_AGT_ADR 0x06
+#define QLA82XX_HW_OCM1_CRB_AGT_ADR 0x07
+
+/* This field defines PCI/X adr [25:20] of agents on the CRB */
+/* */
+#define QLA82XX_HW_PX_MAP_CRB_PH 0
+#define QLA82XX_HW_PX_MAP_CRB_PS 1
+#define QLA82XX_HW_PX_MAP_CRB_MN 2
+#define QLA82XX_HW_PX_MAP_CRB_MS 3
+#define QLA82XX_HW_PX_MAP_CRB_SRE 5
+#define QLA82XX_HW_PX_MAP_CRB_NIU 6
+#define QLA82XX_HW_PX_MAP_CRB_QMN 7
+#define QLA82XX_HW_PX_MAP_CRB_SQN0 8
+#define QLA82XX_HW_PX_MAP_CRB_SQN1 9
+#define QLA82XX_HW_PX_MAP_CRB_SQN2 10
+#define QLA82XX_HW_PX_MAP_CRB_SQN3 11
+#define QLA82XX_HW_PX_MAP_CRB_QMS 12
+#define QLA82XX_HW_PX_MAP_CRB_SQS0 13
+#define QLA82XX_HW_PX_MAP_CRB_SQS1 14
+#define QLA82XX_HW_PX_MAP_CRB_SQS2 15
+#define QLA82XX_HW_PX_MAP_CRB_SQS3 16
+#define QLA82XX_HW_PX_MAP_CRB_PGN0 17
+#define QLA82XX_HW_PX_MAP_CRB_PGN1 18
+#define QLA82XX_HW_PX_MAP_CRB_PGN2 19
+#define QLA82XX_HW_PX_MAP_CRB_PGN3 20
+#define QLA82XX_HW_PX_MAP_CRB_PGN4 QLA82XX_HW_PX_MAP_CRB_SQS2
+#define QLA82XX_HW_PX_MAP_CRB_PGND 21
+#define QLA82XX_HW_PX_MAP_CRB_PGNI 22
+#define QLA82XX_HW_PX_MAP_CRB_PGS0 23
+#define QLA82XX_HW_PX_MAP_CRB_PGS1 24
+#define QLA82XX_HW_PX_MAP_CRB_PGS2 25
+#define QLA82XX_HW_PX_MAP_CRB_PGS3 26
+#define QLA82XX_HW_PX_MAP_CRB_PGSD 27
+#define QLA82XX_HW_PX_MAP_CRB_PGSI 28
+#define QLA82XX_HW_PX_MAP_CRB_SN 29
+#define QLA82XX_HW_PX_MAP_CRB_EG 31
+#define QLA82XX_HW_PX_MAP_CRB_PH2 32
+#define QLA82XX_HW_PX_MAP_CRB_PS2 33
+#define QLA82XX_HW_PX_MAP_CRB_CAM 34
+#define QLA82XX_HW_PX_MAP_CRB_CAS0 35
+#define QLA82XX_HW_PX_MAP_CRB_CAS1 36
+#define QLA82XX_HW_PX_MAP_CRB_CAS2 37
+#define QLA82XX_HW_PX_MAP_CRB_C2C0 38
+#define QLA82XX_HW_PX_MAP_CRB_C2C1 39
+#define QLA82XX_HW_PX_MAP_CRB_TIMR 40
+#define QLA82XX_HW_PX_MAP_CRB_RPMX1 42
+#define QLA82XX_HW_PX_MAP_CRB_RPMX2 43
+#define QLA82XX_HW_PX_MAP_CRB_RPMX3 44
+#define QLA82XX_HW_PX_MAP_CRB_RPMX4 45
+#define QLA82XX_HW_PX_MAP_CRB_RPMX5 46
+#define QLA82XX_HW_PX_MAP_CRB_RPMX6 47
+#define QLA82XX_HW_PX_MAP_CRB_RPMX7 48
+#define QLA82XX_HW_PX_MAP_CRB_XDMA 49
+#define QLA82XX_HW_PX_MAP_CRB_I2Q 50
+#define QLA82XX_HW_PX_MAP_CRB_ROMUSB 51
+#define QLA82XX_HW_PX_MAP_CRB_CAS3 52
+#define QLA82XX_HW_PX_MAP_CRB_RPMX0 53
+#define QLA82XX_HW_PX_MAP_CRB_RPMX8 54
+#define QLA82XX_HW_PX_MAP_CRB_RPMX9 55
+#define QLA82XX_HW_PX_MAP_CRB_OCM0 56
+#define QLA82XX_HW_PX_MAP_CRB_OCM1 57
+#define QLA82XX_HW_PX_MAP_CRB_SMB 58
+#define QLA82XX_HW_PX_MAP_CRB_I2C0 59
+#define QLA82XX_HW_PX_MAP_CRB_I2C1 60
+#define QLA82XX_HW_PX_MAP_CRB_LPC 61
+#define QLA82XX_HW_PX_MAP_CRB_PGNC 62
+#define QLA82XX_HW_PX_MAP_CRB_PGR0 63
+#define QLA82XX_HW_PX_MAP_CRB_PGR1 4
+#define QLA82XX_HW_PX_MAP_CRB_PGR2 30
+#define QLA82XX_HW_PX_MAP_CRB_PGR3 41
+
+/* This field defines CRB adr [31:20] of the agents */
+/* */
+
+#define QLA82XX_HW_CRB_HUB_AGT_ADR_MN ((QLA82XX_HW_H0_CH_HUB_ADR << 7) | \
+ QLA82XX_HW_MN_CRB_AGT_ADR)
+#define QLA82XX_HW_CRB_HUB_AGT_ADR_PH ((QLA82XX_HW_H0_CH_HUB_ADR << 7) | \
+ QLA82XX_HW_PH_CRB_AGT_ADR)
+#define QLA82XX_HW_CRB_HUB_AGT_ADR_MS ((QLA82XX_HW_H0_CH_HUB_ADR << 7) | \
+ QLA82XX_HW_MS_CRB_AGT_ADR)
+#define QLA82XX_HW_CRB_HUB_AGT_ADR_PS ((QLA82XX_HW_H1_CH_HUB_ADR << 7) | \
+ QLA82XX_HW_PS_CRB_AGT_ADR)
+#define QLA82XX_HW_CRB_HUB_AGT_ADR_SS ((QLA82XX_HW_H1_CH_HUB_ADR << 7) | \
+ QLA82XX_HW_SS_CRB_AGT_ADR)
+#define QLA82XX_HW_CRB_HUB_AGT_ADR_RPMX3 ((QLA82XX_HW_H1_CH_HUB_ADR << 7) | \
+ QLA82XX_HW_RPMX3_CRB_AGT_ADR)
+#define QLA82XX_HW_CRB_HUB_AGT_ADR_QMS ((QLA82XX_HW_H1_CH_HUB_ADR << 7) | \
+ QLA82XX_HW_QMS_CRB_AGT_ADR)
+#define QLA82XX_HW_CRB_HUB_AGT_ADR_SQS0 ((QLA82XX_HW_H1_CH_HUB_ADR << 7) | \
+ QLA82XX_HW_SQGS0_CRB_AGT_ADR)
+#define QLA82XX_HW_CRB_HUB_AGT_ADR_SQS1 ((QLA82XX_HW_H1_CH_HUB_ADR << 7) | \
+ QLA82XX_HW_SQGS1_CRB_AGT_ADR)
+#define QLA82XX_HW_CRB_HUB_AGT_ADR_SQS2 ((QLA82XX_HW_H1_CH_HUB_ADR << 7) | \
+ QLA82XX_HW_SQGS2_CRB_AGT_ADR)
+#define QLA82XX_HW_CRB_HUB_AGT_ADR_SQS3 ((QLA82XX_HW_H1_CH_HUB_ADR << 7) | \
+ QLA82XX_HW_SQGS3_CRB_AGT_ADR)
+#define QLA82XX_HW_CRB_HUB_AGT_ADR_C2C0 ((QLA82XX_HW_H1_CH_HUB_ADR << 7) | \
+ QLA82XX_HW_C2C0_CRB_AGT_ADR)
+#define QLA82XX_HW_CRB_HUB_AGT_ADR_C2C1 ((QLA82XX_HW_H1_CH_HUB_ADR << 7) | \
+ QLA82XX_HW_C2C1_CRB_AGT_ADR)
+#define QLA82XX_HW_CRB_HUB_AGT_ADR_RPMX2 ((QLA82XX_HW_H1_CH_HUB_ADR << 7) | \
+ QLA82XX_HW_RPMX2_CRB_AGT_ADR)
+#define QLA82XX_HW_CRB_HUB_AGT_ADR_RPMX4 ((QLA82XX_HW_H1_CH_HUB_ADR << 7) | \
+ QLA82XX_HW_RPMX4_CRB_AGT_ADR)
+#define QLA82XX_HW_CRB_HUB_AGT_ADR_RPMX7 ((QLA82XX_HW_H1_CH_HUB_ADR << 7) | \
+ QLA82XX_HW_RPMX7_CRB_AGT_ADR)
+#define QLA82XX_HW_CRB_HUB_AGT_ADR_RPMX9 ((QLA82XX_HW_H1_CH_HUB_ADR << 7) | \
+ QLA82XX_HW_RPMX9_CRB_AGT_ADR)
+#define QLA82XX_HW_CRB_HUB_AGT_ADR_SMB ((QLA82XX_HW_H1_CH_HUB_ADR << 7) | \
+ QLA82XX_HW_SMB_CRB_AGT_ADR)
+#define QLA82XX_HW_CRB_HUB_AGT_ADR_NIU ((QLA82XX_HW_H2_CH_HUB_ADR << 7) | \
+ QLA82XX_HW_NIU_CRB_AGT_ADR)
+#define QLA82XX_HW_CRB_HUB_AGT_ADR_I2C0 ((QLA82XX_HW_H2_CH_HUB_ADR << 7) | \
+ QLA82XX_HW_I2C0_CRB_AGT_ADR)
+#define QLA82XX_HW_CRB_HUB_AGT_ADR_I2C1 ((QLA82XX_HW_H2_CH_HUB_ADR << 7) | \
+ QLA82XX_HW_I2C1_CRB_AGT_ADR)
+#define QLA82XX_HW_CRB_HUB_AGT_ADR_SRE ((QLA82XX_HW_H3_CH_HUB_ADR << 7) | \
+ QLA82XX_HW_SRE_CRB_AGT_ADR)
+#define QLA82XX_HW_CRB_HUB_AGT_ADR_EG ((QLA82XX_HW_H3_CH_HUB_ADR << 7) | \
+ QLA82XX_HW_EG_CRB_AGT_ADR)
+#define QLA82XX_HW_CRB_HUB_AGT_ADR_RPMX0 ((QLA82XX_HW_H3_CH_HUB_ADR << 7) | \
+ QLA82XX_HW_RPMX0_CRB_AGT_ADR)
+#define QLA82XX_HW_CRB_HUB_AGT_ADR_QMN ((QLA82XX_HW_H3_CH_HUB_ADR << 7) | \
+ QLA82XX_HW_QM_CRB_AGT_ADR)
+#define QLA82XX_HW_CRB_HUB_AGT_ADR_SQN0 ((QLA82XX_HW_H3_CH_HUB_ADR << 7) | \
+ QLA82XX_HW_SQG0_CRB_AGT_ADR)
+#define QLA82XX_HW_CRB_HUB_AGT_ADR_SQN1 ((QLA82XX_HW_H3_CH_HUB_ADR << 7) | \
+ QLA82XX_HW_SQG1_CRB_AGT_ADR)
+#define QLA82XX_HW_CRB_HUB_AGT_ADR_SQN2 ((QLA82XX_HW_H3_CH_HUB_ADR << 7) | \
+ QLA82XX_HW_SQG2_CRB_AGT_ADR)
+#define QLA82XX_HW_CRB_HUB_AGT_ADR_SQN3 ((QLA82XX_HW_H3_CH_HUB_ADR << 7) | \
+ QLA82XX_HW_SQG3_CRB_AGT_ADR)
+#define QLA82XX_HW_CRB_HUB_AGT_ADR_RPMX1 ((QLA82XX_HW_H3_CH_HUB_ADR << 7) | \
+ QLA82XX_HW_RPMX1_CRB_AGT_ADR)
+#define QLA82XX_HW_CRB_HUB_AGT_ADR_RPMX5 ((QLA82XX_HW_H3_CH_HUB_ADR << 7) | \
+ QLA82XX_HW_RPMX5_CRB_AGT_ADR)
+#define QLA82XX_HW_CRB_HUB_AGT_ADR_RPMX6 ((QLA82XX_HW_H3_CH_HUB_ADR << 7) | \
+ QLA82XX_HW_RPMX6_CRB_AGT_ADR)
+#define QLA82XX_HW_CRB_HUB_AGT_ADR_RPMX8 ((QLA82XX_HW_H3_CH_HUB_ADR << 7) | \
+ QLA82XX_HW_RPMX8_CRB_AGT_ADR)
+#define QLA82XX_HW_CRB_HUB_AGT_ADR_CAS0 ((QLA82XX_HW_H3_CH_HUB_ADR << 7) | \
+ QLA82XX_HW_CAS0_CRB_AGT_ADR)
+#define QLA82XX_HW_CRB_HUB_AGT_ADR_CAS1 ((QLA82XX_HW_H3_CH_HUB_ADR << 7) | \
+ QLA82XX_HW_CAS1_CRB_AGT_ADR)
+#define QLA82XX_HW_CRB_HUB_AGT_ADR_CAS2 ((QLA82XX_HW_H3_CH_HUB_ADR << 7) | \
+ QLA82XX_HW_CAS2_CRB_AGT_ADR)
+#define QLA82XX_HW_CRB_HUB_AGT_ADR_CAS3 ((QLA82XX_HW_H3_CH_HUB_ADR << 7) | \
+ QLA82XX_HW_CAS3_CRB_AGT_ADR)
+#define QLA82XX_HW_CRB_HUB_AGT_ADR_PGNI ((QLA82XX_HW_H4_CH_HUB_ADR << 7) | \
+ QLA82XX_HW_PEGNI_CRB_AGT_ADR)
+#define QLA82XX_HW_CRB_HUB_AGT_ADR_PGND ((QLA82XX_HW_H4_CH_HUB_ADR << 7) | \
+ QLA82XX_HW_PEGND_CRB_AGT_ADR)
+#define QLA82XX_HW_CRB_HUB_AGT_ADR_PGN0 ((QLA82XX_HW_H4_CH_HUB_ADR << 7) | \
+ QLA82XX_HW_PEGN0_CRB_AGT_ADR)
+#define QLA82XX_HW_CRB_HUB_AGT_ADR_PGN1 ((QLA82XX_HW_H4_CH_HUB_ADR << 7) | \
+ QLA82XX_HW_PEGN1_CRB_AGT_ADR)
+#define QLA82XX_HW_CRB_HUB_AGT_ADR_PGN2 ((QLA82XX_HW_H4_CH_HUB_ADR << 7) | \
+ QLA82XX_HW_PEGN2_CRB_AGT_ADR)
+#define QLA82XX_HW_CRB_HUB_AGT_ADR_PGN3 ((QLA82XX_HW_H4_CH_HUB_ADR << 7) | \
+ QLA82XX_HW_PEGN3_CRB_AGT_ADR)
+#define QLA82XX_HW_CRB_HUB_AGT_ADR_PGN4 ((QLA82XX_HW_H4_CH_HUB_ADR << 7) | \
+ QLA82XX_HW_PEGN4_CRB_AGT_ADR)
+#define QLA82XX_HW_CRB_HUB_AGT_ADR_PGNC ((QLA82XX_HW_H4_CH_HUB_ADR << 7) | \
+ QLA82XX_HW_PEGNC_CRB_AGT_ADR)
+#define QLA82XX_HW_CRB_HUB_AGT_ADR_PGR0 ((QLA82XX_HW_H4_CH_HUB_ADR << 7) | \
+ QLA82XX_HW_PEGR0_CRB_AGT_ADR)
+#define QLA82XX_HW_CRB_HUB_AGT_ADR_PGR1 ((QLA82XX_HW_H4_CH_HUB_ADR << 7) | \
+ QLA82XX_HW_PEGR1_CRB_AGT_ADR)
+#define QLA82XX_HW_CRB_HUB_AGT_ADR_PGR2 ((QLA82XX_HW_H4_CH_HUB_ADR << 7) | \
+ QLA82XX_HW_PEGR2_CRB_AGT_ADR)
+#define QLA82XX_HW_CRB_HUB_AGT_ADR_PGR3 ((QLA82XX_HW_H4_CH_HUB_ADR << 7) | \
+ QLA82XX_HW_PEGR3_CRB_AGT_ADR)
+#define QLA82XX_HW_CRB_HUB_AGT_ADR_PGSI ((QLA82XX_HW_H5_CH_HUB_ADR << 7) | \
+ QLA82XX_HW_PEGSI_CRB_AGT_ADR)
+#define QLA82XX_HW_CRB_HUB_AGT_ADR_PGSD ((QLA82XX_HW_H5_CH_HUB_ADR << 7) | \
+ QLA82XX_HW_PEGSD_CRB_AGT_ADR)
+#define QLA82XX_HW_CRB_HUB_AGT_ADR_PGS0 ((QLA82XX_HW_H5_CH_HUB_ADR << 7) | \
+ QLA82XX_HW_PEGS0_CRB_AGT_ADR)
+#define QLA82XX_HW_CRB_HUB_AGT_ADR_PGS1 ((QLA82XX_HW_H5_CH_HUB_ADR << 7) | \
+ QLA82XX_HW_PEGS1_CRB_AGT_ADR)
+#define QLA82XX_HW_CRB_HUB_AGT_ADR_PGS2 ((QLA82XX_HW_H5_CH_HUB_ADR << 7) | \
+ QLA82XX_HW_PEGS2_CRB_AGT_ADR)
+#define QLA82XX_HW_CRB_HUB_AGT_ADR_PGS3 ((QLA82XX_HW_H5_CH_HUB_ADR << 7) | \
+ QLA82XX_HW_PEGS3_CRB_AGT_ADR)
+#define QLA82XX_HW_CRB_HUB_AGT_ADR_PGSC ((QLA82XX_HW_H5_CH_HUB_ADR << 7) | \
+ QLA82XX_HW_PEGSC_CRB_AGT_ADR)
+#define QLA82XX_HW_CRB_HUB_AGT_ADR_CAM ((QLA82XX_HW_H6_CH_HUB_ADR << 7) | \
+ QLA82XX_HW_NCM_CRB_AGT_ADR)
+#define QLA82XX_HW_CRB_HUB_AGT_ADR_TIMR ((QLA82XX_HW_H6_CH_HUB_ADR << 7) | \
+ QLA82XX_HW_TMR_CRB_AGT_ADR)
+#define QLA82XX_HW_CRB_HUB_AGT_ADR_XDMA ((QLA82XX_HW_H6_CH_HUB_ADR << 7) | \
+ QLA82XX_HW_XDMA_CRB_AGT_ADR)
+#define QLA82XX_HW_CRB_HUB_AGT_ADR_SN ((QLA82XX_HW_H6_CH_HUB_ADR << 7) | \
+ QLA82XX_HW_SN_CRB_AGT_ADR)
+#define QLA82XX_HW_CRB_HUB_AGT_ADR_I2Q ((QLA82XX_HW_H6_CH_HUB_ADR << 7) | \
+ QLA82XX_HW_I2Q_CRB_AGT_ADR)
+#define QLA82XX_HW_CRB_HUB_AGT_ADR_ROMUSB ((QLA82XX_HW_H6_CH_HUB_ADR << 7) | \
+ QLA82XX_HW_ROMUSB_CRB_AGT_ADR)
+#define QLA82XX_HW_CRB_HUB_AGT_ADR_OCM0 ((QLA82XX_HW_H6_CH_HUB_ADR << 7) | \
+ QLA82XX_HW_OCM0_CRB_AGT_ADR)
+#define QLA82XX_HW_CRB_HUB_AGT_ADR_OCM1 ((QLA82XX_HW_H6_CH_HUB_ADR << 7) | \
+ QLA82XX_HW_OCM1_CRB_AGT_ADR)
+#define QLA82XX_HW_CRB_HUB_AGT_ADR_LPC ((QLA82XX_HW_H6_CH_HUB_ADR << 7) | \
+ QLA82XX_HW_LPC_CRB_AGT_ADR)
+
+#define ROMUSB_GLB (QLA82XX_CRB_ROMUSB + 0x00000)
+#define QLA82XX_ROMUSB_GLB_PEGTUNE_DONE (ROMUSB_GLB + 0x005c)
+#define QLA82XX_ROMUSB_GLB_STATUS (ROMUSB_GLB + 0x0004)
+#define QLA82XX_ROMUSB_GLB_SW_RESET (ROMUSB_GLB + 0x0008)
+#define QLA82XX_ROMUSB_ROM_ADDRESS (ROMUSB_ROM + 0x0008)
+#define QLA82XX_ROMUSB_ROM_WDATA (ROMUSB_ROM + 0x000c)
+#define QLA82XX_ROMUSB_ROM_ABYTE_CNT (ROMUSB_ROM + 0x0010)
+#define QLA82XX_ROMUSB_ROM_DUMMY_BYTE_CNT (ROMUSB_ROM + 0x0014)
+#define QLA82XX_ROMUSB_ROM_RDATA (ROMUSB_ROM + 0x0018)
+
+#define ROMUSB_ROM (QLA82XX_CRB_ROMUSB + 0x10000)
+#define QLA82XX_ROMUSB_ROM_INSTR_OPCODE (ROMUSB_ROM + 0x0004)
+#define QLA82XX_ROMUSB_GLB_CAS_RST (ROMUSB_GLB + 0x0038)
+
+#define QLA82XX_PCI_CRB_WINDOWSIZE 0x00100000 /* all are 1MB windows */
+#define QLA82XX_PCI_CRB_WINDOW(A) \
+ (QLA82XX_PCI_CRBSPACE + (A)*QLA82XX_PCI_CRB_WINDOWSIZE)
+#define QLA82XX_CRB_C2C_0 \
+ QLA82XX_PCI_CRB_WINDOW(QLA82XX_HW_PX_MAP_CRB_C2C0)
+#define QLA82XX_CRB_C2C_1 \
+ QLA82XX_PCI_CRB_WINDOW(QLA82XX_HW_PX_MAP_CRB_C2C1)
+#define QLA82XX_CRB_C2C_2 \
+ QLA82XX_PCI_CRB_WINDOW(QLA82XX_HW_PX_MAP_CRB_C2C2)
+#define QLA82XX_CRB_CAM \
+ QLA82XX_PCI_CRB_WINDOW(QLA82XX_HW_PX_MAP_CRB_CAM)
+#define QLA82XX_CRB_CASPER \
+ QLA82XX_PCI_CRB_WINDOW(QLA82XX_HW_PX_MAP_CRB_CAS)
+#define QLA82XX_CRB_CASPER_0 \
+ QLA82XX_PCI_CRB_WINDOW(QLA82XX_HW_PX_MAP_CRB_CAS0)
+#define QLA82XX_CRB_CASPER_1 \
+ QLA82XX_PCI_CRB_WINDOW(QLA82XX_HW_PX_MAP_CRB_CAS1)
+#define QLA82XX_CRB_CASPER_2 \
+ QLA82XX_PCI_CRB_WINDOW(QLA82XX_HW_PX_MAP_CRB_CAS2)
+#define QLA82XX_CRB_DDR_MD \
+ QLA82XX_PCI_CRB_WINDOW(QLA82XX_HW_PX_MAP_CRB_MS)
+#define QLA82XX_CRB_DDR_NET \
+ QLA82XX_PCI_CRB_WINDOW(QLA82XX_HW_PX_MAP_CRB_MN)
+#define QLA82XX_CRB_EPG \
+ QLA82XX_PCI_CRB_WINDOW(QLA82XX_HW_PX_MAP_CRB_EG)
+#define QLA82XX_CRB_I2Q \
+ QLA82XX_PCI_CRB_WINDOW(QLA82XX_HW_PX_MAP_CRB_I2Q)
+#define QLA82XX_CRB_NIU \
+ QLA82XX_PCI_CRB_WINDOW(QLA82XX_HW_PX_MAP_CRB_NIU)
+
+#define QLA82XX_CRB_PCIX_HOST \
+ QLA82XX_PCI_CRB_WINDOW(QLA82XX_HW_PX_MAP_CRB_PH)
+#define QLA82XX_CRB_PCIX_HOST2 \
+ QLA82XX_PCI_CRB_WINDOW(QLA82XX_HW_PX_MAP_CRB_PH2)
+#define QLA82XX_CRB_PCIX_MD \
+ QLA82XX_PCI_CRB_WINDOW(QLA82XX_HW_PX_MAP_CRB_PS)
+#define QLA82XX_CRB_PCIE \
+ QLA82XX_CRB_PCIX_MD
+
+/* window 1 pcie slot */
+#define QLA82XX_CRB_PCIE2 \
+ QLA82XX_PCI_CRB_WINDOW(QLA82XX_HW_PX_MAP_CRB_PS2)
+#define QLA82XX_CRB_PEG_MD_0 \
+ QLA82XX_PCI_CRB_WINDOW(QLA82XX_HW_PX_MAP_CRB_PGS0)
+#define QLA82XX_CRB_PEG_MD_1 \
+ QLA82XX_PCI_CRB_WINDOW(QLA82XX_HW_PX_MAP_CRB_PGS1)
+#define QLA82XX_CRB_PEG_MD_2 \
+ QLA82XX_PCI_CRB_WINDOW(QLA82XX_HW_PX_MAP_CRB_PGS2)
+#define QLA82XX_CRB_PEG_MD_3 \
+ QLA82XX_PCI_CRB_WINDOW(QLA82XX_HW_PX_MAP_CRB_PGS3)
+#define QLA82XX_CRB_PEG_MD_3 \
+ QLA82XX_PCI_CRB_WINDOW(QLA82XX_HW_PX_MAP_CRB_PGS3)
+#define QLA82XX_CRB_PEG_MD_D \
+ QLA82XX_PCI_CRB_WINDOW(QLA82XX_HW_PX_MAP_CRB_PGSD)
+#define QLA82XX_CRB_PEG_MD_I \
+ QLA82XX_PCI_CRB_WINDOW(QLA82XX_HW_PX_MAP_CRB_PGSI)
+#define QLA82XX_CRB_PEG_NET_0 \
+ QLA82XX_PCI_CRB_WINDOW(QLA82XX_HW_PX_MAP_CRB_PGN0)
+#define QLA82XX_CRB_PEG_NET_1 \
+ QLA82XX_PCI_CRB_WINDOW(QLA82XX_HW_PX_MAP_CRB_PGN1)
+#define QLA82XX_CRB_PEG_NET_2 \
+ QLA82XX_PCI_CRB_WINDOW(QLA82XX_HW_PX_MAP_CRB_PGN2)
+#define QLA82XX_CRB_PEG_NET_3 \
+ QLA82XX_PCI_CRB_WINDOW(QLA82XX_HW_PX_MAP_CRB_PGN3)
+#define QLA82XX_CRB_PEG_NET_4 \
+ QLA82XX_PCI_CRB_WINDOW(QLA82XX_HW_PX_MAP_CRB_PGN4)
+#define QLA82XX_CRB_PEG_NET_D \
+ QLA82XX_PCI_CRB_WINDOW(QLA82XX_HW_PX_MAP_CRB_PGND)
+#define QLA82XX_CRB_PEG_NET_I \
+ QLA82XX_PCI_CRB_WINDOW(QLA82XX_HW_PX_MAP_CRB_PGNI)
+#define QLA82XX_CRB_PQM_MD \
+ QLA82XX_PCI_CRB_WINDOW(QLA82XX_HW_PX_MAP_CRB_QMS)
+#define QLA82XX_CRB_PQM_NET \
+ QLA82XX_PCI_CRB_WINDOW(QLA82XX_HW_PX_MAP_CRB_QMN)
+#define QLA82XX_CRB_QDR_MD \
+ QLA82XX_PCI_CRB_WINDOW(QLA82XX_HW_PX_MAP_CRB_SS)
+#define QLA82XX_CRB_QDR_NET \
+ QLA82XX_PCI_CRB_WINDOW(QLA82XX_HW_PX_MAP_CRB_SN)
+#define QLA82XX_CRB_ROMUSB \
+ QLA82XX_PCI_CRB_WINDOW(QLA82XX_HW_PX_MAP_CRB_ROMUSB)
+#define QLA82XX_CRB_RPMX_0 \
+ QLA82XX_PCI_CRB_WINDOW(QLA82XX_HW_PX_MAP_CRB_RPMX0)
+#define QLA82XX_CRB_RPMX_1 \
+ QLA82XX_PCI_CRB_WINDOW(QLA82XX_HW_PX_MAP_CRB_RPMX1)
+#define QLA82XX_CRB_RPMX_2 \
+ QLA82XX_PCI_CRB_WINDOW(QLA82XX_HW_PX_MAP_CRB_RPMX2)
+#define QLA82XX_CRB_RPMX_3 \
+ QLA82XX_PCI_CRB_WINDOW(QLA82XX_HW_PX_MAP_CRB_RPMX3)
+#define QLA82XX_CRB_RPMX_4 \
+ QLA82XX_PCI_CRB_WINDOW(QLA82XX_HW_PX_MAP_CRB_RPMX4)
+#define QLA82XX_CRB_RPMX_5 \
+ QLA82XX_PCI_CRB_WINDOW(QLA82XX_HW_PX_MAP_CRB_RPMX5)
+#define QLA82XX_CRB_RPMX_6 \
+ QLA82XX_PCI_CRB_WINDOW(QLA82XX_HW_PX_MAP_CRB_RPMX6)
+#define QLA82XX_CRB_RPMX_7 \
+ QLA82XX_PCI_CRB_WINDOW(QLA82XX_HW_PX_MAP_CRB_RPMX7)
+#define QLA82XX_CRB_SQM_MD_0 \
+ QLA82XX_PCI_CRB_WINDOW(QLA82XX_HW_PX_MAP_CRB_SQS0)
+#define QLA82XX_CRB_SQM_MD_1 \
+ QLA82XX_PCI_CRB_WINDOW(QLA82XX_HW_PX_MAP_CRB_SQS1)
+#define QLA82XX_CRB_SQM_MD_2 \
+ QLA82XX_PCI_CRB_WINDOW(QLA82XX_HW_PX_MAP_CRB_SQS2)
+#define QLA82XX_CRB_SQM_MD_3 \
+ QLA82XX_PCI_CRB_WINDOW(QLA82XX_HW_PX_MAP_CRB_SQS3)
+#define QLA82XX_CRB_SQM_NET_0 \
+ QLA82XX_PCI_CRB_WINDOW(QLA82XX_HW_PX_MAP_CRB_SQN0)
+#define QLA82XX_CRB_SQM_NET_1 \
+ QLA82XX_PCI_CRB_WINDOW(QLA82XX_HW_PX_MAP_CRB_SQN1)
+#define QLA82XX_CRB_SQM_NET_2 \
+ QLA82XX_PCI_CRB_WINDOW(QLA82XX_HW_PX_MAP_CRB_SQN2)
+#define QLA82XX_CRB_SQM_NET_3 \
+ QLA82XX_PCI_CRB_WINDOW(QLA82XX_HW_PX_MAP_CRB_SQN3)
+#define QLA82XX_CRB_SRE \
+ QLA82XX_PCI_CRB_WINDOW(QLA82XX_HW_PX_MAP_CRB_SRE)
+#define QLA82XX_CRB_TIMER \
+ QLA82XX_PCI_CRB_WINDOW(QLA82XX_HW_PX_MAP_CRB_TIMR)
+#define QLA82XX_CRB_XDMA \
+ QLA82XX_PCI_CRB_WINDOW(QLA82XX_HW_PX_MAP_CRB_XDMA)
+#define QLA82XX_CRB_I2C0 \
+ QLA82XX_PCI_CRB_WINDOW(QLA82XX_HW_PX_MAP_CRB_I2C0)
+#define QLA82XX_CRB_I2C1 \
+ QLA82XX_PCI_CRB_WINDOW(QLA82XX_HW_PX_MAP_CRB_I2C1)
+#define QLA82XX_CRB_OCM0 \
+ QLA82XX_PCI_CRB_WINDOW(QLA82XX_HW_PX_MAP_CRB_OCM0)
+#define QLA82XX_CRB_SMB \
+ QLA82XX_PCI_CRB_WINDOW(QLA82XX_HW_PX_MAP_CRB_SMB)
+#define QLA82XX_CRB_MAX \
+ QLA82XX_PCI_CRB_WINDOW(64)
+
+/*
+ * ====================== BASE ADDRESSES ON-CHIP ======================
+ * Base addresses of major components on-chip.
+ * ====================== BASE ADDRESSES ON-CHIP ======================
+ */
+#define QLA82XX_ADDR_DDR_NET (0x0000000000000000ULL)
+#define QLA82XX_ADDR_DDR_NET_MAX (0x000000000fffffffULL)
+
+/* Imbus address bit used to indicate a host address. This bit is
+ * eliminated by the pcie bar and bar select before presentation
+ * over pcie. */
+/* host memory via IMBUS */
+#define QLA82XX_P2_ADDR_PCIE (0x0000000800000000ULL)
+#define QLA82XX_P3_ADDR_PCIE (0x0000008000000000ULL)
+#define QLA82XX_ADDR_PCIE_MAX (0x0000000FFFFFFFFFULL)
+#define QLA82XX_ADDR_OCM0 (0x0000000200000000ULL)
+#define QLA82XX_ADDR_OCM0_MAX (0x00000002000fffffULL)
+#define QLA82XX_ADDR_OCM1 (0x0000000200400000ULL)
+#define QLA82XX_ADDR_OCM1_MAX (0x00000002004fffffULL)
+#define QLA82XX_ADDR_QDR_NET (0x0000000300000000ULL)
+#define QLA82XX_P3_ADDR_QDR_NET_MAX (0x0000000303ffffffULL)
+
+#define QLA82XX_PCI_CRBSPACE (unsigned long)0x06000000
+#define QLA82XX_PCI_DIRECT_CRB (unsigned long)0x04400000
+#define QLA82XX_PCI_CAMQM (unsigned long)0x04800000
+#define QLA82XX_PCI_CAMQM_MAX (unsigned long)0x04ffffff
+#define QLA82XX_PCI_DDR_NET (unsigned long)0x00000000
+#define QLA82XX_PCI_QDR_NET (unsigned long)0x04000000
+#define QLA82XX_PCI_QDR_NET_MAX (unsigned long)0x043fffff
+
+/*
+ * Register offsets for MN
+ */
+#define MIU_CONTROL (0x000)
+#define MIU_TAG (0x004)
+#define MIU_TEST_AGT_CTRL (0x090)
+#define MIU_TEST_AGT_ADDR_LO (0x094)
+#define MIU_TEST_AGT_ADDR_HI (0x098)
+#define MIU_TEST_AGT_WRDATA_LO (0x0a0)
+#define MIU_TEST_AGT_WRDATA_HI (0x0a4)
+#define MIU_TEST_AGT_WRDATA(i) (0x0a0+(4*(i)))
+#define MIU_TEST_AGT_RDDATA_LO (0x0a8)
+#define MIU_TEST_AGT_RDDATA_HI (0x0ac)
+#define MIU_TEST_AGT_RDDATA(i) (0x0a8+(4*(i)))
+#define MIU_TEST_AGT_ADDR_MASK 0xfffffff8
+#define MIU_TEST_AGT_UPPER_ADDR(off) (0)
+
+/* MIU_TEST_AGT_CTRL flags. work for SIU as well */
+#define MIU_TA_CTL_START 1
+#define MIU_TA_CTL_ENABLE 2
+#define MIU_TA_CTL_WRITE 4
+#define MIU_TA_CTL_BUSY 8
+
+/*CAM RAM */
+# define QLA82XX_CAM_RAM_BASE (QLA82XX_CRB_CAM + 0x02000)
+# define QLA82XX_CAM_RAM(reg) (QLA82XX_CAM_RAM_BASE + (reg))
+
+#define QLA82XX_PORT_MODE_ADDR (QLA82XX_CAM_RAM(0x24))
+#define QLA82XX_PEG_HALT_STATUS1 (QLA82XX_CAM_RAM(0xa8))
+#define QLA82XX_PEG_HALT_STATUS2 (QLA82XX_CAM_RAM(0xac))
+#define QLA82XX_PEG_ALIVE_COUNTER (QLA82XX_CAM_RAM(0xb0))
+
+#define QLA82XX_CAMRAM_DB1 (QLA82XX_CAM_RAM(0x1b8))
+#define QLA82XX_CAMRAM_DB2 (QLA82XX_CAM_RAM(0x1bc))
+
+#define HALT_STATUS_UNRECOVERABLE 0x80000000
+#define HALT_STATUS_RECOVERABLE 0x40000000
+
+/* Driver Coexistence Defines */
+#define QLA82XX_CRB_DRV_ACTIVE (QLA82XX_CAM_RAM(0x138))
+#define QLA82XX_CRB_DEV_STATE (QLA82XX_CAM_RAM(0x140))
+#define QLA82XX_CRB_DRV_STATE (QLA82XX_CAM_RAM(0x144))
+#define QLA82XX_CRB_DRV_SCRATCH (QLA82XX_CAM_RAM(0x148))
+#define QLA82XX_CRB_DEV_PART_INFO (QLA82XX_CAM_RAM(0x14c))
+#define QLA82XX_CRB_DRV_IDC_VERSION (QLA82XX_CAM_RAM(0x174))
+
+/* Every driver should use these Device State */
+#define QLA8XXX_DEV_COLD 1
+#define QLA8XXX_DEV_INITIALIZING 2
+#define QLA8XXX_DEV_READY 3
+#define QLA8XXX_DEV_NEED_RESET 4
+#define QLA8XXX_DEV_NEED_QUIESCENT 5
+#define QLA8XXX_DEV_FAILED 6
+#define QLA8XXX_DEV_QUIESCENT 7
+#define MAX_STATES 8 /* Increment if new state added */
+#define QLA8XXX_BAD_VALUE 0xbad0bad0
+
+#define QLA82XX_IDC_VERSION 1
+#define QLA82XX_ROM_DEV_INIT_TIMEOUT 30
+#define QLA82XX_ROM_DRV_RESET_ACK_TIMEOUT 10
+
+#define QLA82XX_ROM_LOCK_ID (QLA82XX_CAM_RAM(0x100))
+#define QLA82XX_CRB_WIN_LOCK_ID (QLA82XX_CAM_RAM(0x124))
+#define QLA82XX_FW_VERSION_MAJOR (QLA82XX_CAM_RAM(0x150))
+#define QLA82XX_FW_VERSION_MINOR (QLA82XX_CAM_RAM(0x154))
+#define QLA82XX_FW_VERSION_SUB (QLA82XX_CAM_RAM(0x158))
+#define QLA82XX_PCIE_REG(reg) (QLA82XX_CRB_PCIE + (reg))
+
+#define PCIE_SETUP_FUNCTION (0x12040)
+#define PCIE_SETUP_FUNCTION2 (0x12048)
+
+#define QLA82XX_PCIX_PS_REG(reg) (QLA82XX_CRB_PCIX_MD + (reg))
+#define QLA82XX_PCIX_PS2_REG(reg) (QLA82XX_CRB_PCIE2 + (reg))
+
+#define PCIE_SEM2_LOCK (0x1c010) /* Flash lock */
+#define PCIE_SEM2_UNLOCK (0x1c014) /* Flash unlock */
+#define PCIE_SEM5_LOCK (0x1c028) /* Coexistence lock */
+#define PCIE_SEM5_UNLOCK (0x1c02c) /* Coexistence unlock */
+#define PCIE_SEM7_LOCK (0x1c038) /* crb win lock */
+#define PCIE_SEM7_UNLOCK (0x1c03c) /* crbwin unlock*/
+
+/* Different drive state */
+#define QLA82XX_DRVST_NOT_RDY 0
+#define QLA82XX_DRVST_RST_RDY 1
+#define QLA82XX_DRVST_QSNT_RDY 2
+
+/* Different drive active state */
+#define QLA82XX_DRV_NOT_ACTIVE 0
+#define QLA82XX_DRV_ACTIVE 1
+
+/*
+ * The PCI VendorID and DeviceID for our board.
+ */
+#define PCI_DEVICE_ID_QLOGIC_ISP8021 0x8021
+#define PCI_DEVICE_ID_QLOGIC_ISP8044 0x8044
+
+#define QLA82XX_MSIX_TBL_SPACE 8192
+#define QLA82XX_PCI_REG_MSIX_TBL 0x44
+#define QLA82XX_PCI_MSIX_CONTROL 0x40
+
+struct crb_128M_2M_sub_block_map {
+ unsigned valid;
+ unsigned start_128M;
+ unsigned end_128M;
+ unsigned start_2M;
+};
+
+struct crb_128M_2M_block_map {
+ struct crb_128M_2M_sub_block_map sub_block[16];
+};
+
+struct crb_addr_pair {
+ long addr;
+ long data;
+};
+
+#define ADDR_ERROR ((unsigned long) 0xffffffff)
+#define MAX_CTL_CHECK 1000
+
+/***************************************************************************
+ * PCI related defines.
+ **************************************************************************/
+
+/*
+ * Interrupt related defines.
+ */
+#define PCIX_TARGET_STATUS (0x10118)
+#define PCIX_TARGET_STATUS_F1 (0x10160)
+#define PCIX_TARGET_STATUS_F2 (0x10164)
+#define PCIX_TARGET_STATUS_F3 (0x10168)
+#define PCIX_TARGET_STATUS_F4 (0x10360)
+#define PCIX_TARGET_STATUS_F5 (0x10364)
+#define PCIX_TARGET_STATUS_F6 (0x10368)
+#define PCIX_TARGET_STATUS_F7 (0x1036c)
+
+#define PCIX_TARGET_MASK (0x10128)
+#define PCIX_TARGET_MASK_F1 (0x10170)
+#define PCIX_TARGET_MASK_F2 (0x10174)
+#define PCIX_TARGET_MASK_F3 (0x10178)
+#define PCIX_TARGET_MASK_F4 (0x10370)
+#define PCIX_TARGET_MASK_F5 (0x10374)
+#define PCIX_TARGET_MASK_F6 (0x10378)
+#define PCIX_TARGET_MASK_F7 (0x1037c)
+
+/*
+ * Message Signaled Interrupts
+ */
+#define PCIX_MSI_F0 (0x13000)
+#define PCIX_MSI_F1 (0x13004)
+#define PCIX_MSI_F2 (0x13008)
+#define PCIX_MSI_F3 (0x1300c)
+#define PCIX_MSI_F4 (0x13010)
+#define PCIX_MSI_F5 (0x13014)
+#define PCIX_MSI_F6 (0x13018)
+#define PCIX_MSI_F7 (0x1301c)
+#define PCIX_MSI_F(FUNC) (0x13000 + ((FUNC) * 4))
+#define PCIX_INT_VECTOR (0x10100)
+#define PCIX_INT_MASK (0x10104)
+
+/*
+ * Interrupt state machine and other bits.
+ */
+#define PCIE_MISCCFG_RC (0x1206c)
+
+#define ISR_INT_TARGET_STATUS \
+ (QLA82XX_PCIX_PS_REG(PCIX_TARGET_STATUS))
+#define ISR_INT_TARGET_STATUS_F1 \
+ (QLA82XX_PCIX_PS_REG(PCIX_TARGET_STATUS_F1))
+#define ISR_INT_TARGET_STATUS_F2 \
+ (QLA82XX_PCIX_PS_REG(PCIX_TARGET_STATUS_F2))
+#define ISR_INT_TARGET_STATUS_F3 \
+ (QLA82XX_PCIX_PS_REG(PCIX_TARGET_STATUS_F3))
+#define ISR_INT_TARGET_STATUS_F4 \
+ (QLA82XX_PCIX_PS_REG(PCIX_TARGET_STATUS_F4))
+#define ISR_INT_TARGET_STATUS_F5 \
+ (QLA82XX_PCIX_PS_REG(PCIX_TARGET_STATUS_F5))
+#define ISR_INT_TARGET_STATUS_F6 \
+ (QLA82XX_PCIX_PS_REG(PCIX_TARGET_STATUS_F6))
+#define ISR_INT_TARGET_STATUS_F7 \
+ (QLA82XX_PCIX_PS_REG(PCIX_TARGET_STATUS_F7))
+
+#define ISR_INT_TARGET_MASK \
+ (QLA82XX_PCIX_PS_REG(PCIX_TARGET_MASK))
+#define ISR_INT_TARGET_MASK_F1 \
+ (QLA82XX_PCIX_PS_REG(PCIX_TARGET_MASK_F1))
+#define ISR_INT_TARGET_MASK_F2 \
+ (QLA82XX_PCIX_PS_REG(PCIX_TARGET_MASK_F2))
+#define ISR_INT_TARGET_MASK_F3 \
+ (QLA82XX_PCIX_PS_REG(PCIX_TARGET_MASK_F3))
+#define ISR_INT_TARGET_MASK_F4 \
+ (QLA82XX_PCIX_PS_REG(PCIX_TARGET_MASK_F4))
+#define ISR_INT_TARGET_MASK_F5 \
+ (QLA82XX_PCIX_PS_REG(PCIX_TARGET_MASK_F5))
+#define ISR_INT_TARGET_MASK_F6 \
+ (QLA82XX_PCIX_PS_REG(PCIX_TARGET_MASK_F6))
+#define ISR_INT_TARGET_MASK_F7 \
+ (QLA82XX_PCIX_PS_REG(PCIX_TARGET_MASK_F7))
+
+#define ISR_INT_VECTOR \
+ (QLA82XX_PCIX_PS_REG(PCIX_INT_VECTOR))
+#define ISR_INT_MASK \
+ (QLA82XX_PCIX_PS_REG(PCIX_INT_MASK))
+#define ISR_INT_STATE_REG \
+ (QLA82XX_PCIX_PS_REG(PCIE_MISCCFG_RC))
+
+#define ISR_MSI_INT_TRIGGER(FUNC) \
+ (QLA82XX_PCIX_PS_REG(PCIX_MSI_F(FUNC)))
+
+#define ISR_IS_LEGACY_INTR_IDLE(VAL) (((VAL) & 0x300) == 0)
+#define ISR_IS_LEGACY_INTR_TRIGGERED(VAL) (((VAL) & 0x300) == 0x200)
+
+/*
+ * PCI Interrupt Vector Values.
+ */
+#define PCIX_INT_VECTOR_BIT_F0 0x0080
+#define PCIX_INT_VECTOR_BIT_F1 0x0100
+#define PCIX_INT_VECTOR_BIT_F2 0x0200
+#define PCIX_INT_VECTOR_BIT_F3 0x0400
+#define PCIX_INT_VECTOR_BIT_F4 0x0800
+#define PCIX_INT_VECTOR_BIT_F5 0x1000
+#define PCIX_INT_VECTOR_BIT_F6 0x2000
+#define PCIX_INT_VECTOR_BIT_F7 0x4000
+
+struct qla82xx_legacy_intr_set {
+ uint32_t int_vec_bit;
+ uint32_t tgt_status_reg;
+ uint32_t tgt_mask_reg;
+ uint32_t pci_int_reg;
+};
+
+#define QLA82XX_LEGACY_INTR_CONFIG \
+{ \
+ { \
+ .int_vec_bit = PCIX_INT_VECTOR_BIT_F0, \
+ .tgt_status_reg = ISR_INT_TARGET_STATUS, \
+ .tgt_mask_reg = ISR_INT_TARGET_MASK, \
+ .pci_int_reg = ISR_MSI_INT_TRIGGER(0) }, \
+ \
+ { \
+ .int_vec_bit = PCIX_INT_VECTOR_BIT_F1, \
+ .tgt_status_reg = ISR_INT_TARGET_STATUS_F1, \
+ .tgt_mask_reg = ISR_INT_TARGET_MASK_F1, \
+ .pci_int_reg = ISR_MSI_INT_TRIGGER(1) }, \
+ \
+ { \
+ .int_vec_bit = PCIX_INT_VECTOR_BIT_F2, \
+ .tgt_status_reg = ISR_INT_TARGET_STATUS_F2, \
+ .tgt_mask_reg = ISR_INT_TARGET_MASK_F2, \
+ .pci_int_reg = ISR_MSI_INT_TRIGGER(2) }, \
+ \
+ { \
+ .int_vec_bit = PCIX_INT_VECTOR_BIT_F3, \
+ .tgt_status_reg = ISR_INT_TARGET_STATUS_F3, \
+ .tgt_mask_reg = ISR_INT_TARGET_MASK_F3, \
+ .pci_int_reg = ISR_MSI_INT_TRIGGER(3) }, \
+ \
+ { \
+ .int_vec_bit = PCIX_INT_VECTOR_BIT_F4, \
+ .tgt_status_reg = ISR_INT_TARGET_STATUS_F4, \
+ .tgt_mask_reg = ISR_INT_TARGET_MASK_F4, \
+ .pci_int_reg = ISR_MSI_INT_TRIGGER(4) }, \
+ \
+ { \
+ .int_vec_bit = PCIX_INT_VECTOR_BIT_F5, \
+ .tgt_status_reg = ISR_INT_TARGET_STATUS_F5, \
+ .tgt_mask_reg = ISR_INT_TARGET_MASK_F5, \
+ .pci_int_reg = ISR_MSI_INT_TRIGGER(5) }, \
+ \
+ { \
+ .int_vec_bit = PCIX_INT_VECTOR_BIT_F6, \
+ .tgt_status_reg = ISR_INT_TARGET_STATUS_F6, \
+ .tgt_mask_reg = ISR_INT_TARGET_MASK_F6, \
+ .pci_int_reg = ISR_MSI_INT_TRIGGER(6) }, \
+ \
+ { \
+ .int_vec_bit = PCIX_INT_VECTOR_BIT_F7, \
+ .tgt_status_reg = ISR_INT_TARGET_STATUS_F7, \
+ .tgt_mask_reg = ISR_INT_TARGET_MASK_F7, \
+ .pci_int_reg = ISR_MSI_INT_TRIGGER(7) }, \
+}
+
+#define BRDCFG_START 0x4000
+#define BOOTLD_START 0x10000
+#define IMAGE_START 0x100000
+#define FLASH_ADDR_START 0x43000
+
+/* Magic number to let user know flash is programmed */
+#define QLA82XX_BDINFO_MAGIC 0x12345678
+#define QLA82XX_FW_MAGIC_OFFSET (BRDCFG_START + 0x128)
+#define FW_SIZE_OFFSET (0x3e840c)
+#define QLA82XX_FW_MIN_SIZE 0x3fffff
+
+/* UNIFIED ROMIMAGE START */
+#define QLA82XX_URI_FW_MIN_SIZE 0xc8000
+#define QLA82XX_URI_DIR_SECT_PRODUCT_TBL 0x0
+#define QLA82XX_URI_DIR_SECT_BOOTLD 0x6
+#define QLA82XX_URI_DIR_SECT_FW 0x7
+
+/* Offsets */
+#define QLA82XX_URI_CHIP_REV_OFF 10
+#define QLA82XX_URI_FLAGS_OFF 11
+#define QLA82XX_URI_BIOS_VERSION_OFF 12
+#define QLA82XX_URI_BOOTLD_IDX_OFF 27
+#define QLA82XX_URI_FIRMWARE_IDX_OFF 29
+
+struct qla82xx_uri_table_desc{
+ uint32_t findex;
+ uint32_t num_entries;
+ uint32_t entry_size;
+ uint32_t reserved[5];
+};
+
+struct qla82xx_uri_data_desc{
+ uint32_t findex;
+ uint32_t size;
+ uint32_t reserved[5];
+};
+
+/* UNIFIED ROMIMAGE END */
+
+#define QLA82XX_UNIFIED_ROMIMAGE 3
+#define QLA82XX_FLASH_ROMIMAGE 4
+#define QLA82XX_UNKNOWN_ROMIMAGE 0xff
+
+#define MIU_TEST_AGT_WRDATA_UPPER_LO (0x0b0)
+#define MIU_TEST_AGT_WRDATA_UPPER_HI (0x0b4)
+
+#ifndef readq
+static inline u64 readq(void __iomem *addr)
+{
+ return readl(addr) | (((u64) readl(addr + 4)) << 32LL);
+}
+#endif
+
+#ifndef writeq
+static inline void writeq(u64 val, void __iomem *addr)
+{
+ writel(((u32) (val)), (addr));
+ writel(((u32) (val >> 32)), (addr + 4));
+}
+#endif
+
+/* Request and response queue size */
+#define REQUEST_ENTRY_CNT_82XX 128 /* Number of request entries. */
+#define RESPONSE_ENTRY_CNT_82XX 128 /* Number of response entries.*/
+
+/*
+ * ISP 8021 I/O Register Set structure definitions.
+ */
+struct device_reg_82xx {
+ uint32_t req_q_out[64]; /* Request Queue out-Pointer (64 * 4) */
+ uint32_t rsp_q_in[64]; /* Response Queue In-Pointer. */
+ uint32_t rsp_q_out[64]; /* Response Queue Out-Pointer. */
+
+ uint16_t mailbox_in[32]; /* Mail box In registers */
+ uint16_t unused_1[32];
+ uint32_t hint; /* Host interrupt register */
+#define HINT_MBX_INT_PENDING BIT_0
+ uint16_t unused_2[62];
+ uint16_t mailbox_out[32]; /* Mail box Out registers */
+ uint32_t unused_3[48];
+
+ uint32_t host_status; /* host status */
+#define HSRX_RISC_INT BIT_15 /* RISC to Host interrupt. */
+#define HSRX_RISC_PAUSED BIT_8 /* RISC Paused. */
+ uint32_t host_int; /* Interrupt status. */
+#define ISRX_NX_RISC_INT BIT_0 /* RISC interrupt. */
+};
+
+struct fcp_cmnd {
+ struct scsi_lun lun;
+ uint8_t crn;
+ uint8_t task_attribute;
+ uint8_t task_management;
+ uint8_t additional_cdb_len;
+ uint8_t cdb[260]; /* 256 for CDB len and 4 for FCP_DL */
+};
+
+struct dsd_dma {
+ struct list_head list;
+ dma_addr_t dsd_list_dma;
+ void *dsd_addr;
+};
+
+#define QLA_DSDS_PER_IOCB 37
+#define QLA_DSD_SIZE 12
+struct ct6_dsd {
+ uint16_t fcp_cmnd_len;
+ dma_addr_t fcp_cmnd_dma;
+ struct fcp_cmnd *fcp_cmnd;
+ int dsd_use_cnt;
+ struct list_head dsd_list;
+};
+
+#define MBC_TOGGLE_INTERRUPT 0x10
+#define MBC_SET_LED_CONFIG 0x125 /* FCoE specific LED control */
+#define MBC_GET_LED_CONFIG 0x126 /* FCoE specific LED control */
+
+/* Flash offset */
+#define FLT_REG_BOOTLOAD_82XX 0x72
+#define FLT_REG_BOOT_CODE_82XX 0x78
+#define FLT_REG_FW_82XX 0x74
+#define FLT_REG_GOLD_FW_82XX 0x75
+#define FLT_REG_VPD_8XXX 0x81
+
+#define FA_VPD_SIZE_82XX 0x400
+
+#define FA_FLASH_LAYOUT_ADDR_82 0xFC400
+
+/******************************************************************************
+*
+* Definitions specific to M25P flash
+*
+*******************************************************************************
+* Instructions
+*/
+#define M25P_INSTR_WREN 0x06
+#define M25P_INSTR_WRDI 0x04
+#define M25P_INSTR_RDID 0x9f
+#define M25P_INSTR_RDSR 0x05
+#define M25P_INSTR_WRSR 0x01
+#define M25P_INSTR_READ 0x03
+#define M25P_INSTR_FAST_READ 0x0b
+#define M25P_INSTR_PP 0x02
+#define M25P_INSTR_SE 0xd8
+#define M25P_INSTR_BE 0xc7
+#define M25P_INSTR_DP 0xb9
+#define M25P_INSTR_RES 0xab
+
+/* Minidump related */
+
+/*
+ * Version of the template
+ * 4 Bytes
+ * X.Major.Minor.RELEASE
+ */
+#define QLA82XX_MINIDUMP_VERSION 0x10101
+
+/*
+ * Entry Type Defines
+ */
+#define QLA82XX_RDNOP 0
+#define QLA82XX_RDCRB 1
+#define QLA82XX_RDMUX 2
+#define QLA82XX_QUEUE 3
+#define QLA82XX_BOARD 4
+#define QLA82XX_RDSRE 5
+#define QLA82XX_RDOCM 6
+#define QLA82XX_CACHE 10
+#define QLA82XX_L1DAT 11
+#define QLA82XX_L1INS 12
+#define QLA82XX_L2DTG 21
+#define QLA82XX_L2ITG 22
+#define QLA82XX_L2DAT 23
+#define QLA82XX_L2INS 24
+#define QLA82XX_RDROM 71
+#define QLA82XX_RDMEM 72
+#define QLA82XX_CNTRL 98
+#define QLA82XX_TLHDR 99
+#define QLA82XX_RDEND 255
+#define QLA8044_POLLRD 35
+#define QLA8044_RDMUX2 36
+#define QLA8044_L1DTG 8
+#define QLA8044_L1ITG 9
+#define QLA8044_POLLRDMWR 37
+
+/*
+ * Opcodes for Control Entries.
+ * These Flags are bit fields.
+ */
+#define QLA82XX_DBG_OPCODE_WR 0x01
+#define QLA82XX_DBG_OPCODE_RW 0x02
+#define QLA82XX_DBG_OPCODE_AND 0x04
+#define QLA82XX_DBG_OPCODE_OR 0x08
+#define QLA82XX_DBG_OPCODE_POLL 0x10
+#define QLA82XX_DBG_OPCODE_RDSTATE 0x20
+#define QLA82XX_DBG_OPCODE_WRSTATE 0x40
+#define QLA82XX_DBG_OPCODE_MDSTATE 0x80
+
+/*
+ * Template Header and Entry Header definitions start here.
+ */
+
+/*
+ * Template Header
+ * Parts of the template header can be modified by the driver.
+ * These include the saved_state_array, capture_debug_level, driver_timestamp
+ */
+
+#define QLA82XX_DBG_STATE_ARRAY_LEN 16
+#define QLA82XX_DBG_CAP_SIZE_ARRAY_LEN 8
+#define QLA82XX_DBG_RSVD_ARRAY_LEN 8
+
+/*
+ * Driver Flags
+ */
+#define QLA82XX_DBG_SKIPPED_FLAG 0x80 /* driver skipped this entry */
+#define QLA82XX_DEFAULT_CAP_MASK 0xFF /* default capture mask */
+
+struct qla82xx_md_template_hdr {
+ uint32_t entry_type;
+ uint32_t first_entry_offset;
+ uint32_t size_of_template;
+ uint32_t capture_debug_level;
+
+ uint32_t num_of_entries;
+ uint32_t version;
+ uint32_t driver_timestamp;
+ uint32_t template_checksum;
+
+ uint32_t driver_capture_mask;
+ uint32_t driver_info[3];
+
+ uint32_t saved_state_array[QLA82XX_DBG_STATE_ARRAY_LEN];
+ uint32_t capture_size_array[QLA82XX_DBG_CAP_SIZE_ARRAY_LEN];
+
+ /* markers_array used to capture some special locations on board */
+ uint32_t markers_array[QLA82XX_DBG_RSVD_ARRAY_LEN];
+ uint32_t num_of_free_entries; /* For internal use */
+ uint32_t free_entry_offset; /* For internal use */
+ uint32_t total_table_size; /* For internal use */
+ uint32_t bkup_table_offset; /* For internal use */
+} __packed;
+
+/*
+ * Entry Header: Common to All Entry Types
+ */
+
+/*
+ * Driver Code is for driver to write some info about the entry.
+ * Currently not used.
+ */
+typedef struct qla82xx_md_entry_hdr {
+ uint32_t entry_type;
+ uint32_t entry_size;
+ uint32_t entry_capture_size;
+ struct {
+ uint8_t entry_capture_mask;
+ uint8_t entry_code;
+ uint8_t driver_code;
+ uint8_t driver_flags;
+ } d_ctrl;
+} __packed qla82xx_md_entry_hdr_t;
+
+/*
+ * Read CRB entry header
+ */
+struct qla82xx_md_entry_crb {
+ qla82xx_md_entry_hdr_t h;
+ uint32_t addr;
+ struct {
+ uint8_t addr_stride;
+ uint8_t state_index_a;
+ uint16_t poll_timeout;
+ } crb_strd;
+
+ uint32_t data_size;
+ uint32_t op_count;
+
+ struct {
+ uint8_t opcode;
+ uint8_t state_index_v;
+ uint8_t shl;
+ uint8_t shr;
+ } crb_ctrl;
+
+ uint32_t value_1;
+ uint32_t value_2;
+ uint32_t value_3;
+} __packed;
+
+/*
+ * Cache entry header
+ */
+struct qla82xx_md_entry_cache {
+ qla82xx_md_entry_hdr_t h;
+
+ uint32_t tag_reg_addr;
+ struct {
+ uint16_t tag_value_stride;
+ uint16_t init_tag_value;
+ } addr_ctrl;
+
+ uint32_t data_size;
+ uint32_t op_count;
+
+ uint32_t control_addr;
+ struct {
+ uint16_t write_value;
+ uint8_t poll_mask;
+ uint8_t poll_wait;
+ } cache_ctrl;
+
+ uint32_t read_addr;
+ struct {
+ uint8_t read_addr_stride;
+ uint8_t read_addr_cnt;
+ uint16_t rsvd_1;
+ } read_ctrl;
+} __packed;
+
+/*
+ * Read OCM
+ */
+struct qla82xx_md_entry_rdocm {
+ qla82xx_md_entry_hdr_t h;
+
+ uint32_t rsvd_0;
+ uint32_t rsvd_1;
+ uint32_t data_size;
+ uint32_t op_count;
+
+ uint32_t rsvd_2;
+ uint32_t rsvd_3;
+ uint32_t read_addr;
+ uint32_t read_addr_stride;
+ uint32_t read_addr_cntrl;
+} __packed;
+
+/*
+ * Read Memory
+ */
+struct qla82xx_md_entry_rdmem {
+ qla82xx_md_entry_hdr_t h;
+ uint32_t rsvd[6];
+ uint32_t read_addr;
+ uint32_t read_data_size;
+} __packed;
+
+/*
+ * Read ROM
+ */
+struct qla82xx_md_entry_rdrom {
+ qla82xx_md_entry_hdr_t h;
+ uint32_t rsvd[6];
+ uint32_t read_addr;
+ uint32_t read_data_size;
+} __packed;
+
+struct qla82xx_md_entry_mux {
+ qla82xx_md_entry_hdr_t h;
+
+ uint32_t select_addr;
+ uint32_t rsvd_0;
+ uint32_t data_size;
+ uint32_t op_count;
+
+ uint32_t select_value;
+ uint32_t select_value_stride;
+ uint32_t read_addr;
+ uint32_t rsvd_1;
+} __packed;
+
+struct qla82xx_md_entry_queue {
+ qla82xx_md_entry_hdr_t h;
+
+ uint32_t select_addr;
+ struct {
+ uint16_t queue_id_stride;
+ uint16_t rsvd_0;
+ } q_strd;
+
+ uint32_t data_size;
+ uint32_t op_count;
+ uint32_t rsvd_1;
+ uint32_t rsvd_2;
+
+ uint32_t read_addr;
+ struct {
+ uint8_t read_addr_stride;
+ uint8_t read_addr_cnt;
+ uint16_t rsvd_3;
+ } rd_strd;
+} __packed;
+
+#define MBC_DIAGNOSTIC_MINIDUMP_TEMPLATE 0x129
+#define RQST_TMPLT_SIZE 0x0
+#define RQST_TMPLT 0x1
+#define MD_DIRECT_ROM_WINDOW 0x42110030
+#define MD_DIRECT_ROM_READ_BASE 0x42150000
+#define MD_MIU_TEST_AGT_CTRL 0x41000090
+#define MD_MIU_TEST_AGT_ADDR_LO 0x41000094
+#define MD_MIU_TEST_AGT_ADDR_HI 0x41000098
+
+static const int MD_MIU_TEST_AGT_RDDATA[] = { 0x410000A8, 0x410000AC,
+ 0x410000B8, 0x410000BC };
+
+#define CRB_NIU_XG_PAUSE_CTL_P0 0x1
+#define CRB_NIU_XG_PAUSE_CTL_P1 0x8
+
+#define qla82xx_get_temp_val(x) ((x) >> 16)
+#define qla82xx_get_temp_val1(x) ((x) && 0x0000FFFF)
+#define qla82xx_get_temp_state(x) ((x) & 0xffff)
+#define qla82xx_encode_temp(val, state) (((val) << 16) | (state))
+
+/*
+ * Temperature control.
+ */
+enum {
+ QLA82XX_TEMP_NORMAL = 0x1, /* Normal operating range */
+ QLA82XX_TEMP_WARN, /* Sound alert, temperature getting high */
+ QLA82XX_TEMP_PANIC /* Fatal error, hardware has shut down. */
+};
+
+#define LEG_INTR_PTR_OFFSET 0x38C0
+#define LEG_INTR_TRIG_OFFSET 0x38C4
+#define LEG_INTR_MASK_OFFSET 0x38C8
+#endif
diff --git a/drivers/scsi/qla2xxx/qla_nx2.c b/drivers/scsi/qla2xxx/qla_nx2.c
new file mode 100644
index 000000000..ed4d6b6b5
--- /dev/null
+++ b/drivers/scsi/qla2xxx/qla_nx2.c
@@ -0,0 +1,4079 @@
+/*
+ * QLogic Fibre Channel HBA Driver
+ * Copyright (c) 2003-2014 QLogic Corporation
+ *
+ * See LICENSE.qla2xxx for copyright and licensing details.
+ */
+
+#include <linux/vmalloc.h>
+#include <linux/delay.h>
+
+#include "qla_def.h"
+#include "qla_gbl.h"
+
+#include <linux/delay.h>
+
+#define TIMEOUT_100_MS 100
+
+/* 8044 Flash Read/Write functions */
+uint32_t
+qla8044_rd_reg(struct qla_hw_data *ha, ulong addr)
+{
+ return readl((void __iomem *) (ha->nx_pcibase + addr));
+}
+
+void
+qla8044_wr_reg(struct qla_hw_data *ha, ulong addr, uint32_t val)
+{
+ writel(val, (void __iomem *)((ha)->nx_pcibase + addr));
+}
+
+int
+qla8044_rd_direct(struct scsi_qla_host *vha,
+ const uint32_t crb_reg)
+{
+ struct qla_hw_data *ha = vha->hw;
+
+ if (crb_reg < CRB_REG_INDEX_MAX)
+ return qla8044_rd_reg(ha, qla8044_reg_tbl[crb_reg]);
+ else
+ return QLA_FUNCTION_FAILED;
+}
+
+void
+qla8044_wr_direct(struct scsi_qla_host *vha,
+ const uint32_t crb_reg,
+ const uint32_t value)
+{
+ struct qla_hw_data *ha = vha->hw;
+
+ if (crb_reg < CRB_REG_INDEX_MAX)
+ qla8044_wr_reg(ha, qla8044_reg_tbl[crb_reg], value);
+}
+
+static int
+qla8044_set_win_base(scsi_qla_host_t *vha, uint32_t addr)
+{
+ uint32_t val;
+ int ret_val = QLA_SUCCESS;
+ struct qla_hw_data *ha = vha->hw;
+
+ qla8044_wr_reg(ha, QLA8044_CRB_WIN_FUNC(ha->portnum), addr);
+ val = qla8044_rd_reg(ha, QLA8044_CRB_WIN_FUNC(ha->portnum));
+
+ if (val != addr) {
+ ql_log(ql_log_warn, vha, 0xb087,
+ "%s: Failed to set register window : "
+ "addr written 0x%x, read 0x%x!\n",
+ __func__, addr, val);
+ ret_val = QLA_FUNCTION_FAILED;
+ }
+ return ret_val;
+}
+
+static int
+qla8044_rd_reg_indirect(scsi_qla_host_t *vha, uint32_t addr, uint32_t *data)
+{
+ int ret_val = QLA_SUCCESS;
+ struct qla_hw_data *ha = vha->hw;
+
+ ret_val = qla8044_set_win_base(vha, addr);
+ if (!ret_val)
+ *data = qla8044_rd_reg(ha, QLA8044_WILDCARD);
+ else
+ ql_log(ql_log_warn, vha, 0xb088,
+ "%s: failed read of addr 0x%x!\n", __func__, addr);
+ return ret_val;
+}
+
+static int
+qla8044_wr_reg_indirect(scsi_qla_host_t *vha, uint32_t addr, uint32_t data)
+{
+ int ret_val = QLA_SUCCESS;
+ struct qla_hw_data *ha = vha->hw;
+
+ ret_val = qla8044_set_win_base(vha, addr);
+ if (!ret_val)
+ qla8044_wr_reg(ha, QLA8044_WILDCARD, data);
+ else
+ ql_log(ql_log_warn, vha, 0xb089,
+ "%s: failed wrt to addr 0x%x, data 0x%x\n",
+ __func__, addr, data);
+ return ret_val;
+}
+
+/*
+ * qla8044_read_write_crb_reg - Read from raddr and write value to waddr.
+ *
+ * @ha : Pointer to adapter structure
+ * @raddr : CRB address to read from
+ * @waddr : CRB address to write to
+ *
+ */
+static void
+qla8044_read_write_crb_reg(struct scsi_qla_host *vha,
+ uint32_t raddr, uint32_t waddr)
+{
+ uint32_t value;
+
+ qla8044_rd_reg_indirect(vha, raddr, &value);
+ qla8044_wr_reg_indirect(vha, waddr, value);
+}
+
+static int
+qla8044_poll_wait_for_ready(struct scsi_qla_host *vha, uint32_t addr1,
+ uint32_t mask)
+{
+ unsigned long timeout;
+ uint32_t temp;
+
+ /* jiffies after 100ms */
+ timeout = jiffies + msecs_to_jiffies(TIMEOUT_100_MS);
+ do {
+ qla8044_rd_reg_indirect(vha, addr1, &temp);
+ if ((temp & mask) != 0)
+ break;
+ if (time_after_eq(jiffies, timeout)) {
+ ql_log(ql_log_warn, vha, 0xb151,
+ "Error in processing rdmdio entry\n");
+ return -1;
+ }
+ } while (1);
+
+ return 0;
+}
+
+static uint32_t
+qla8044_ipmdio_rd_reg(struct scsi_qla_host *vha,
+ uint32_t addr1, uint32_t addr3, uint32_t mask, uint32_t addr)
+{
+ uint32_t temp;
+ int ret = 0;
+
+ ret = qla8044_poll_wait_for_ready(vha, addr1, mask);
+ if (ret == -1)
+ return -1;
+
+ temp = (0x40000000 | addr);
+ qla8044_wr_reg_indirect(vha, addr1, temp);
+
+ ret = qla8044_poll_wait_for_ready(vha, addr1, mask);
+ if (ret == -1)
+ return 0;
+
+ qla8044_rd_reg_indirect(vha, addr3, &ret);
+
+ return ret;
+}
+
+
+static int
+qla8044_poll_wait_ipmdio_bus_idle(struct scsi_qla_host *vha,
+ uint32_t addr1, uint32_t addr2, uint32_t addr3, uint32_t mask)
+{
+ unsigned long timeout;
+ uint32_t temp;
+
+ /* jiffies after 100 msecs */
+ timeout = jiffies + msecs_to_jiffies(TIMEOUT_100_MS);
+ do {
+ temp = qla8044_ipmdio_rd_reg(vha, addr1, addr3, mask, addr2);
+ if ((temp & 0x1) != 1)
+ break;
+ if (time_after_eq(jiffies, timeout)) {
+ ql_log(ql_log_warn, vha, 0xb152,
+ "Error in processing mdiobus idle\n");
+ return -1;
+ }
+ } while (1);
+
+ return 0;
+}
+
+static int
+qla8044_ipmdio_wr_reg(struct scsi_qla_host *vha, uint32_t addr1,
+ uint32_t addr3, uint32_t mask, uint32_t addr, uint32_t value)
+{
+ int ret = 0;
+
+ ret = qla8044_poll_wait_for_ready(vha, addr1, mask);
+ if (ret == -1)
+ return -1;
+
+ qla8044_wr_reg_indirect(vha, addr3, value);
+ qla8044_wr_reg_indirect(vha, addr1, addr);
+
+ ret = qla8044_poll_wait_for_ready(vha, addr1, mask);
+ if (ret == -1)
+ return -1;
+
+ return 0;
+}
+/*
+ * qla8044_rmw_crb_reg - Read value from raddr, AND with test_mask,
+ * Shift Left,Right/OR/XOR with values RMW header and write value to waddr.
+ *
+ * @vha : Pointer to adapter structure
+ * @raddr : CRB address to read from
+ * @waddr : CRB address to write to
+ * @p_rmw_hdr : header with shift/or/xor values.
+ *
+ */
+static void
+qla8044_rmw_crb_reg(struct scsi_qla_host *vha,
+ uint32_t raddr, uint32_t waddr, struct qla8044_rmw *p_rmw_hdr)
+{
+ uint32_t value;
+
+ if (p_rmw_hdr->index_a)
+ value = vha->reset_tmplt.array[p_rmw_hdr->index_a];
+ else
+ qla8044_rd_reg_indirect(vha, raddr, &value);
+ value &= p_rmw_hdr->test_mask;
+ value <<= p_rmw_hdr->shl;
+ value >>= p_rmw_hdr->shr;
+ value |= p_rmw_hdr->or_value;
+ value ^= p_rmw_hdr->xor_value;
+ qla8044_wr_reg_indirect(vha, waddr, value);
+ return;
+}
+
+static inline void
+qla8044_set_qsnt_ready(struct scsi_qla_host *vha)
+{
+ uint32_t qsnt_state;
+ struct qla_hw_data *ha = vha->hw;
+
+ qsnt_state = qla8044_rd_direct(vha, QLA8044_CRB_DRV_STATE_INDEX);
+ qsnt_state |= (1 << ha->portnum);
+ qla8044_wr_direct(vha, QLA8044_CRB_DRV_STATE_INDEX, qsnt_state);
+ ql_log(ql_log_info, vha, 0xb08e, "%s(%ld): qsnt_state: 0x%08x\n",
+ __func__, vha->host_no, qsnt_state);
+}
+
+void
+qla8044_clear_qsnt_ready(struct scsi_qla_host *vha)
+{
+ uint32_t qsnt_state;
+ struct qla_hw_data *ha = vha->hw;
+
+ qsnt_state = qla8044_rd_direct(vha, QLA8044_CRB_DRV_STATE_INDEX);
+ qsnt_state &= ~(1 << ha->portnum);
+ qla8044_wr_direct(vha, QLA8044_CRB_DRV_STATE_INDEX, qsnt_state);
+ ql_log(ql_log_info, vha, 0xb08f, "%s(%ld): qsnt_state: 0x%08x\n",
+ __func__, vha->host_no, qsnt_state);
+}
+
+/**
+ *
+ * qla8044_lock_recovery - Recovers the idc_lock.
+ * @ha : Pointer to adapter structure
+ *
+ * Lock Recovery Register
+ * 5-2 Lock recovery owner: Function ID of driver doing lock recovery,
+ * valid if bits 1..0 are set by driver doing lock recovery.
+ * 1-0 1 - Driver intends to force unlock the IDC lock.
+ * 2 - Driver is moving forward to unlock the IDC lock. Driver clears
+ * this field after force unlocking the IDC lock.
+ *
+ * Lock Recovery process
+ * a. Read the IDC_LOCK_RECOVERY register. If the value in bits 1..0 is
+ * greater than 0, then wait for the other driver to unlock otherwise
+ * move to the next step.
+ * b. Indicate intent to force-unlock by writing 1h to the IDC_LOCK_RECOVERY
+ * register bits 1..0 and also set the function# in bits 5..2.
+ * c. Read the IDC_LOCK_RECOVERY register again after a delay of 200ms.
+ * Wait for the other driver to perform lock recovery if the function
+ * number in bits 5..2 has changed, otherwise move to the next step.
+ * d. Write a value of 2h to the IDC_LOCK_RECOVERY register bits 1..0
+ * leaving your function# in bits 5..2.
+ * e. Force unlock using the DRIVER_UNLOCK register and immediately clear
+ * the IDC_LOCK_RECOVERY bits 5..0 by writing 0.
+ **/
+static int
+qla8044_lock_recovery(struct scsi_qla_host *vha)
+{
+ uint32_t lock = 0, lockid;
+ struct qla_hw_data *ha = vha->hw;
+
+ lockid = qla8044_rd_reg(ha, QLA8044_DRV_LOCKRECOVERY);
+
+ /* Check for other Recovery in progress, go wait */
+ if ((lockid & IDC_LOCK_RECOVERY_STATE_MASK) != 0)
+ return QLA_FUNCTION_FAILED;
+
+ /* Intent to Recover */
+ qla8044_wr_reg(ha, QLA8044_DRV_LOCKRECOVERY,
+ (ha->portnum <<
+ IDC_LOCK_RECOVERY_STATE_SHIFT_BITS) | INTENT_TO_RECOVER);
+ msleep(200);
+
+ /* Check Intent to Recover is advertised */
+ lockid = qla8044_rd_reg(ha, QLA8044_DRV_LOCKRECOVERY);
+ if ((lockid & IDC_LOCK_RECOVERY_OWNER_MASK) != (ha->portnum <<
+ IDC_LOCK_RECOVERY_STATE_SHIFT_BITS))
+ return QLA_FUNCTION_FAILED;
+
+ ql_dbg(ql_dbg_p3p, vha, 0xb08B, "%s:%d: IDC Lock recovery initiated\n"
+ , __func__, ha->portnum);
+
+ /* Proceed to Recover */
+ qla8044_wr_reg(ha, QLA8044_DRV_LOCKRECOVERY,
+ (ha->portnum << IDC_LOCK_RECOVERY_STATE_SHIFT_BITS) |
+ PROCEED_TO_RECOVER);
+
+ /* Force Unlock() */
+ qla8044_wr_reg(ha, QLA8044_DRV_LOCK_ID, 0xFF);
+ qla8044_rd_reg(ha, QLA8044_DRV_UNLOCK);
+
+ /* Clear bits 0-5 in IDC_RECOVERY register*/
+ qla8044_wr_reg(ha, QLA8044_DRV_LOCKRECOVERY, 0);
+
+ /* Get lock() */
+ lock = qla8044_rd_reg(ha, QLA8044_DRV_LOCK);
+ if (lock) {
+ lockid = qla8044_rd_reg(ha, QLA8044_DRV_LOCK_ID);
+ lockid = ((lockid + (1 << 8)) & ~0xFF) | ha->portnum;
+ qla8044_wr_reg(ha, QLA8044_DRV_LOCK_ID, lockid);
+ return QLA_SUCCESS;
+ } else
+ return QLA_FUNCTION_FAILED;
+}
+
+int
+qla8044_idc_lock(struct qla_hw_data *ha)
+{
+ uint32_t ret_val = QLA_SUCCESS, timeout = 0, status = 0;
+ uint32_t lock_id, lock_cnt, func_num, tmo_owner = 0, first_owner = 0;
+ scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev);
+
+ while (status == 0) {
+ /* acquire semaphore5 from PCI HW block */
+ status = qla8044_rd_reg(ha, QLA8044_DRV_LOCK);
+
+ if (status) {
+ /* Increment Counter (8-31) and update func_num (0-7) on
+ * getting a successful lock */
+ lock_id = qla8044_rd_reg(ha, QLA8044_DRV_LOCK_ID);
+ lock_id = ((lock_id + (1 << 8)) & ~0xFF) | ha->portnum;
+ qla8044_wr_reg(ha, QLA8044_DRV_LOCK_ID, lock_id);
+ break;
+ }
+
+ if (timeout == 0)
+ first_owner = qla8044_rd_reg(ha, QLA8044_DRV_LOCK_ID);
+
+ if (++timeout >=
+ (QLA8044_DRV_LOCK_TIMEOUT / QLA8044_DRV_LOCK_MSLEEP)) {
+ tmo_owner = qla8044_rd_reg(ha, QLA8044_DRV_LOCK_ID);
+ func_num = tmo_owner & 0xFF;
+ lock_cnt = tmo_owner >> 8;
+ ql_log(ql_log_warn, vha, 0xb114,
+ "%s: Lock by func %d failed after 2s, lock held "
+ "by func %d, lock count %d, first_owner %d\n",
+ __func__, ha->portnum, func_num, lock_cnt,
+ (first_owner & 0xFF));
+ if (first_owner != tmo_owner) {
+ /* Some other driver got lock,
+ * OR same driver got lock again (counter
+ * value changed), when we were waiting for
+ * lock. Retry for another 2 sec */
+ ql_dbg(ql_dbg_p3p, vha, 0xb115,
+ "%s: %d: IDC lock failed\n",
+ __func__, ha->portnum);
+ timeout = 0;
+ } else {
+ /* Same driver holding lock > 2sec.
+ * Force Recovery */
+ if (qla8044_lock_recovery(vha) == QLA_SUCCESS) {
+ /* Recovered and got lock */
+ ret_val = QLA_SUCCESS;
+ ql_dbg(ql_dbg_p3p, vha, 0xb116,
+ "%s:IDC lock Recovery by %d"
+ "successful...\n", __func__,
+ ha->portnum);
+ }
+ /* Recovery Failed, some other function
+ * has the lock, wait for 2secs
+ * and retry
+ */
+ ql_dbg(ql_dbg_p3p, vha, 0xb08a,
+ "%s: IDC lock Recovery by %d "
+ "failed, Retrying timeout\n", __func__,
+ ha->portnum);
+ timeout = 0;
+ }
+ }
+ msleep(QLA8044_DRV_LOCK_MSLEEP);
+ }
+ return ret_val;
+}
+
+void
+qla8044_idc_unlock(struct qla_hw_data *ha)
+{
+ int id;
+ scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev);
+
+ id = qla8044_rd_reg(ha, QLA8044_DRV_LOCK_ID);
+
+ if ((id & 0xFF) != ha->portnum) {
+ ql_log(ql_log_warn, vha, 0xb118,
+ "%s: IDC Unlock by %d failed, lock owner is %d!\n",
+ __func__, ha->portnum, (id & 0xFF));
+ return;
+ }
+
+ /* Keep lock counter value, update the ha->func_num to 0xFF */
+ qla8044_wr_reg(ha, QLA8044_DRV_LOCK_ID, (id | 0xFF));
+ qla8044_rd_reg(ha, QLA8044_DRV_UNLOCK);
+}
+
+/* 8044 Flash Lock/Unlock functions */
+static int
+qla8044_flash_lock(scsi_qla_host_t *vha)
+{
+ int lock_owner;
+ int timeout = 0;
+ uint32_t lock_status = 0;
+ int ret_val = QLA_SUCCESS;
+ struct qla_hw_data *ha = vha->hw;
+
+ while (lock_status == 0) {
+ lock_status = qla8044_rd_reg(ha, QLA8044_FLASH_LOCK);
+ if (lock_status)
+ break;
+
+ if (++timeout >= QLA8044_FLASH_LOCK_TIMEOUT / 20) {
+ lock_owner = qla8044_rd_reg(ha,
+ QLA8044_FLASH_LOCK_ID);
+ ql_log(ql_log_warn, vha, 0xb113,
+ "%s: Simultaneous flash access by following ports, active port = %d: accessing port = %d",
+ __func__, ha->portnum, lock_owner);
+ ret_val = QLA_FUNCTION_FAILED;
+ break;
+ }
+ msleep(20);
+ }
+ qla8044_wr_reg(ha, QLA8044_FLASH_LOCK_ID, ha->portnum);
+ return ret_val;
+}
+
+static void
+qla8044_flash_unlock(scsi_qla_host_t *vha)
+{
+ int ret_val;
+ struct qla_hw_data *ha = vha->hw;
+
+ /* Reading FLASH_UNLOCK register unlocks the Flash */
+ qla8044_wr_reg(ha, QLA8044_FLASH_LOCK_ID, 0xFF);
+ ret_val = qla8044_rd_reg(ha, QLA8044_FLASH_UNLOCK);
+}
+
+
+static
+void qla8044_flash_lock_recovery(struct scsi_qla_host *vha)
+{
+
+ if (qla8044_flash_lock(vha)) {
+ /* Someone else is holding the lock. */
+ ql_log(ql_log_warn, vha, 0xb120, "Resetting flash_lock\n");
+ }
+
+ /*
+ * Either we got the lock, or someone
+ * else died while holding it.
+ * In either case, unlock.
+ */
+ qla8044_flash_unlock(vha);
+}
+
+/*
+ * Address and length are byte address
+ */
+static int
+qla8044_read_flash_data(scsi_qla_host_t *vha, uint8_t *p_data,
+ uint32_t flash_addr, int u32_word_count)
+{
+ int i, ret_val = QLA_SUCCESS;
+ uint32_t u32_word;
+
+ if (qla8044_flash_lock(vha) != QLA_SUCCESS) {
+ ret_val = QLA_FUNCTION_FAILED;
+ goto exit_lock_error;
+ }
+
+ if (flash_addr & 0x03) {
+ ql_log(ql_log_warn, vha, 0xb117,
+ "%s: Illegal addr = 0x%x\n", __func__, flash_addr);
+ ret_val = QLA_FUNCTION_FAILED;
+ goto exit_flash_read;
+ }
+
+ for (i = 0; i < u32_word_count; i++) {
+ if (qla8044_wr_reg_indirect(vha, QLA8044_FLASH_DIRECT_WINDOW,
+ (flash_addr & 0xFFFF0000))) {
+ ql_log(ql_log_warn, vha, 0xb119,
+ "%s: failed to write addr 0x%x to "
+ "FLASH_DIRECT_WINDOW\n! ",
+ __func__, flash_addr);
+ ret_val = QLA_FUNCTION_FAILED;
+ goto exit_flash_read;
+ }
+
+ ret_val = qla8044_rd_reg_indirect(vha,
+ QLA8044_FLASH_DIRECT_DATA(flash_addr),
+ &u32_word);
+ if (ret_val != QLA_SUCCESS) {
+ ql_log(ql_log_warn, vha, 0xb08c,
+ "%s: failed to read addr 0x%x!\n",
+ __func__, flash_addr);
+ goto exit_flash_read;
+ }
+
+ *(uint32_t *)p_data = u32_word;
+ p_data = p_data + 4;
+ flash_addr = flash_addr + 4;
+ }
+
+exit_flash_read:
+ qla8044_flash_unlock(vha);
+
+exit_lock_error:
+ return ret_val;
+}
+
+/*
+ * Address and length are byte address
+ */
+uint8_t *
+qla8044_read_optrom_data(struct scsi_qla_host *vha, uint8_t *buf,
+ uint32_t offset, uint32_t length)
+{
+ scsi_block_requests(vha->host);
+ if (qla8044_read_flash_data(vha, (uint8_t *)buf, offset, length / 4)
+ != QLA_SUCCESS) {
+ ql_log(ql_log_warn, vha, 0xb08d,
+ "%s: Failed to read from flash\n",
+ __func__);
+ }
+ scsi_unblock_requests(vha->host);
+ return buf;
+}
+
+inline int
+qla8044_need_reset(struct scsi_qla_host *vha)
+{
+ uint32_t drv_state, drv_active;
+ int rval;
+ struct qla_hw_data *ha = vha->hw;
+
+ drv_active = qla8044_rd_direct(vha, QLA8044_CRB_DRV_ACTIVE_INDEX);
+ drv_state = qla8044_rd_direct(vha, QLA8044_CRB_DRV_STATE_INDEX);
+
+ rval = drv_state & (1 << ha->portnum);
+
+ if (ha->flags.eeh_busy && drv_active)
+ rval = 1;
+ return rval;
+}
+
+/*
+ * qla8044_write_list - Write the value (p_entry->arg2) to address specified
+ * by p_entry->arg1 for all entries in header with delay of p_hdr->delay between
+ * entries.
+ *
+ * @vha : Pointer to adapter structure
+ * @p_hdr : reset_entry header for WRITE_LIST opcode.
+ *
+ */
+static void
+qla8044_write_list(struct scsi_qla_host *vha,
+ struct qla8044_reset_entry_hdr *p_hdr)
+{
+ struct qla8044_entry *p_entry;
+ uint32_t i;
+
+ p_entry = (struct qla8044_entry *)((char *)p_hdr +
+ sizeof(struct qla8044_reset_entry_hdr));
+
+ for (i = 0; i < p_hdr->count; i++, p_entry++) {
+ qla8044_wr_reg_indirect(vha, p_entry->arg1, p_entry->arg2);
+ if (p_hdr->delay)
+ udelay((uint32_t)(p_hdr->delay));
+ }
+}
+
+/*
+ * qla8044_read_write_list - Read from address specified by p_entry->arg1,
+ * write value read to address specified by p_entry->arg2, for all entries in
+ * header with delay of p_hdr->delay between entries.
+ *
+ * @vha : Pointer to adapter structure
+ * @p_hdr : reset_entry header for READ_WRITE_LIST opcode.
+ *
+ */
+static void
+qla8044_read_write_list(struct scsi_qla_host *vha,
+ struct qla8044_reset_entry_hdr *p_hdr)
+{
+ struct qla8044_entry *p_entry;
+ uint32_t i;
+
+ p_entry = (struct qla8044_entry *)((char *)p_hdr +
+ sizeof(struct qla8044_reset_entry_hdr));
+
+ for (i = 0; i < p_hdr->count; i++, p_entry++) {
+ qla8044_read_write_crb_reg(vha, p_entry->arg1,
+ p_entry->arg2);
+ if (p_hdr->delay)
+ udelay((uint32_t)(p_hdr->delay));
+ }
+}
+
+/*
+ * qla8044_poll_reg - Poll the given CRB addr for duration msecs till
+ * value read ANDed with test_mask is equal to test_result.
+ *
+ * @ha : Pointer to adapter structure
+ * @addr : CRB register address
+ * @duration : Poll for total of "duration" msecs
+ * @test_mask : Mask value read with "test_mask"
+ * @test_result : Compare (value&test_mask) with test_result.
+ *
+ * Return Value - QLA_SUCCESS/QLA_FUNCTION_FAILED
+ */
+static int
+qla8044_poll_reg(struct scsi_qla_host *vha, uint32_t addr,
+ int duration, uint32_t test_mask, uint32_t test_result)
+{
+ uint32_t value;
+ int timeout_error;
+ uint8_t retries;
+ int ret_val = QLA_SUCCESS;
+
+ ret_val = qla8044_rd_reg_indirect(vha, addr, &value);
+ if (ret_val == QLA_FUNCTION_FAILED) {
+ timeout_error = 1;
+ goto exit_poll_reg;
+ }
+
+ /* poll every 1/10 of the total duration */
+ retries = duration/10;
+
+ do {
+ if ((value & test_mask) != test_result) {
+ timeout_error = 1;
+ msleep(duration/10);
+ ret_val = qla8044_rd_reg_indirect(vha, addr, &value);
+ if (ret_val == QLA_FUNCTION_FAILED) {
+ timeout_error = 1;
+ goto exit_poll_reg;
+ }
+ } else {
+ timeout_error = 0;
+ break;
+ }
+ } while (retries--);
+
+exit_poll_reg:
+ if (timeout_error) {
+ vha->reset_tmplt.seq_error++;
+ ql_log(ql_log_fatal, vha, 0xb090,
+ "%s: Poll Failed: 0x%08x 0x%08x 0x%08x\n",
+ __func__, value, test_mask, test_result);
+ }
+
+ return timeout_error;
+}
+
+/*
+ * qla8044_poll_list - For all entries in the POLL_LIST header, poll read CRB
+ * register specified by p_entry->arg1 and compare (value AND test_mask) with
+ * test_result to validate it. Wait for p_hdr->delay between processing entries.
+ *
+ * @ha : Pointer to adapter structure
+ * @p_hdr : reset_entry header for POLL_LIST opcode.
+ *
+ */
+static void
+qla8044_poll_list(struct scsi_qla_host *vha,
+ struct qla8044_reset_entry_hdr *p_hdr)
+{
+ long delay;
+ struct qla8044_entry *p_entry;
+ struct qla8044_poll *p_poll;
+ uint32_t i;
+ uint32_t value;
+
+ p_poll = (struct qla8044_poll *)
+ ((char *)p_hdr + sizeof(struct qla8044_reset_entry_hdr));
+
+ /* Entries start after 8 byte qla8044_poll, poll header contains
+ * the test_mask, test_value.
+ */
+ p_entry = (struct qla8044_entry *)((char *)p_poll +
+ sizeof(struct qla8044_poll));
+
+ delay = (long)p_hdr->delay;
+
+ if (!delay) {
+ for (i = 0; i < p_hdr->count; i++, p_entry++)
+ qla8044_poll_reg(vha, p_entry->arg1,
+ delay, p_poll->test_mask, p_poll->test_value);
+ } else {
+ for (i = 0; i < p_hdr->count; i++, p_entry++) {
+ if (delay) {
+ if (qla8044_poll_reg(vha,
+ p_entry->arg1, delay,
+ p_poll->test_mask,
+ p_poll->test_value)) {
+ /*If
+ * (data_read&test_mask != test_value)
+ * read TIMEOUT_ADDR (arg1) and
+ * ADDR (arg2) registers
+ */
+ qla8044_rd_reg_indirect(vha,
+ p_entry->arg1, &value);
+ qla8044_rd_reg_indirect(vha,
+ p_entry->arg2, &value);
+ }
+ }
+ }
+ }
+}
+
+/*
+ * qla8044_poll_write_list - Write dr_value, ar_value to dr_addr/ar_addr,
+ * read ar_addr, if (value& test_mask != test_mask) re-read till timeout
+ * expires.
+ *
+ * @vha : Pointer to adapter structure
+ * @p_hdr : reset entry header for POLL_WRITE_LIST opcode.
+ *
+ */
+static void
+qla8044_poll_write_list(struct scsi_qla_host *vha,
+ struct qla8044_reset_entry_hdr *p_hdr)
+{
+ long delay;
+ struct qla8044_quad_entry *p_entry;
+ struct qla8044_poll *p_poll;
+ uint32_t i;
+
+ p_poll = (struct qla8044_poll *)((char *)p_hdr +
+ sizeof(struct qla8044_reset_entry_hdr));
+
+ p_entry = (struct qla8044_quad_entry *)((char *)p_poll +
+ sizeof(struct qla8044_poll));
+
+ delay = (long)p_hdr->delay;
+
+ for (i = 0; i < p_hdr->count; i++, p_entry++) {
+ qla8044_wr_reg_indirect(vha,
+ p_entry->dr_addr, p_entry->dr_value);
+ qla8044_wr_reg_indirect(vha,
+ p_entry->ar_addr, p_entry->ar_value);
+ if (delay) {
+ if (qla8044_poll_reg(vha,
+ p_entry->ar_addr, delay,
+ p_poll->test_mask,
+ p_poll->test_value)) {
+ ql_dbg(ql_dbg_p3p, vha, 0xb091,
+ "%s: Timeout Error: poll list, ",
+ __func__);
+ ql_dbg(ql_dbg_p3p, vha, 0xb092,
+ "item_num %d, entry_num %d\n", i,
+ vha->reset_tmplt.seq_index);
+ }
+ }
+ }
+}
+
+/*
+ * qla8044_read_modify_write - Read value from p_entry->arg1, modify the
+ * value, write value to p_entry->arg2. Process entries with p_hdr->delay
+ * between entries.
+ *
+ * @vha : Pointer to adapter structure
+ * @p_hdr : header with shift/or/xor values.
+ *
+ */
+static void
+qla8044_read_modify_write(struct scsi_qla_host *vha,
+ struct qla8044_reset_entry_hdr *p_hdr)
+{
+ struct qla8044_entry *p_entry;
+ struct qla8044_rmw *p_rmw_hdr;
+ uint32_t i;
+
+ p_rmw_hdr = (struct qla8044_rmw *)((char *)p_hdr +
+ sizeof(struct qla8044_reset_entry_hdr));
+
+ p_entry = (struct qla8044_entry *)((char *)p_rmw_hdr +
+ sizeof(struct qla8044_rmw));
+
+ for (i = 0; i < p_hdr->count; i++, p_entry++) {
+ qla8044_rmw_crb_reg(vha, p_entry->arg1,
+ p_entry->arg2, p_rmw_hdr);
+ if (p_hdr->delay)
+ udelay((uint32_t)(p_hdr->delay));
+ }
+}
+
+/*
+ * qla8044_pause - Wait for p_hdr->delay msecs, called between processing
+ * two entries of a sequence.
+ *
+ * @vha : Pointer to adapter structure
+ * @p_hdr : Common reset entry header.
+ *
+ */
+static
+void qla8044_pause(struct scsi_qla_host *vha,
+ struct qla8044_reset_entry_hdr *p_hdr)
+{
+ if (p_hdr->delay)
+ mdelay((uint32_t)((long)p_hdr->delay));
+}
+
+/*
+ * qla8044_template_end - Indicates end of reset sequence processing.
+ *
+ * @vha : Pointer to adapter structure
+ * @p_hdr : Common reset entry header.
+ *
+ */
+static void
+qla8044_template_end(struct scsi_qla_host *vha,
+ struct qla8044_reset_entry_hdr *p_hdr)
+{
+ vha->reset_tmplt.template_end = 1;
+
+ if (vha->reset_tmplt.seq_error == 0) {
+ ql_dbg(ql_dbg_p3p, vha, 0xb093,
+ "%s: Reset sequence completed SUCCESSFULLY.\n", __func__);
+ } else {
+ ql_log(ql_log_fatal, vha, 0xb094,
+ "%s: Reset sequence completed with some timeout "
+ "errors.\n", __func__);
+ }
+}
+
+/*
+ * qla8044_poll_read_list - Write ar_value to ar_addr register, read ar_addr,
+ * if (value & test_mask != test_value) re-read till timeout value expires,
+ * read dr_addr register and assign to reset_tmplt.array.
+ *
+ * @vha : Pointer to adapter structure
+ * @p_hdr : Common reset entry header.
+ *
+ */
+static void
+qla8044_poll_read_list(struct scsi_qla_host *vha,
+ struct qla8044_reset_entry_hdr *p_hdr)
+{
+ long delay;
+ int index;
+ struct qla8044_quad_entry *p_entry;
+ struct qla8044_poll *p_poll;
+ uint32_t i;
+ uint32_t value;
+
+ p_poll = (struct qla8044_poll *)
+ ((char *)p_hdr + sizeof(struct qla8044_reset_entry_hdr));
+
+ p_entry = (struct qla8044_quad_entry *)
+ ((char *)p_poll + sizeof(struct qla8044_poll));
+
+ delay = (long)p_hdr->delay;
+
+ for (i = 0; i < p_hdr->count; i++, p_entry++) {
+ qla8044_wr_reg_indirect(vha, p_entry->ar_addr,
+ p_entry->ar_value);
+ if (delay) {
+ if (qla8044_poll_reg(vha, p_entry->ar_addr, delay,
+ p_poll->test_mask, p_poll->test_value)) {
+ ql_dbg(ql_dbg_p3p, vha, 0xb095,
+ "%s: Timeout Error: poll "
+ "list, ", __func__);
+ ql_dbg(ql_dbg_p3p, vha, 0xb096,
+ "Item_num %d, "
+ "entry_num %d\n", i,
+ vha->reset_tmplt.seq_index);
+ } else {
+ index = vha->reset_tmplt.array_index;
+ qla8044_rd_reg_indirect(vha,
+ p_entry->dr_addr, &value);
+ vha->reset_tmplt.array[index++] = value;
+ if (index == QLA8044_MAX_RESET_SEQ_ENTRIES)
+ vha->reset_tmplt.array_index = 1;
+ }
+ }
+ }
+}
+
+/*
+ * qla8031_process_reset_template - Process all entries in reset template
+ * till entry with SEQ_END opcode, which indicates end of the reset template
+ * processing. Each entry has a Reset Entry header, entry opcode/command, with
+ * size of the entry, number of entries in sub-sequence and delay in microsecs
+ * or timeout in millisecs.
+ *
+ * @ha : Pointer to adapter structure
+ * @p_buff : Common reset entry header.
+ *
+ */
+static void
+qla8044_process_reset_template(struct scsi_qla_host *vha,
+ char *p_buff)
+{
+ int index, entries;
+ struct qla8044_reset_entry_hdr *p_hdr;
+ char *p_entry = p_buff;
+
+ vha->reset_tmplt.seq_end = 0;
+ vha->reset_tmplt.template_end = 0;
+ entries = vha->reset_tmplt.hdr->entries;
+ index = vha->reset_tmplt.seq_index;
+
+ for (; (!vha->reset_tmplt.seq_end) && (index < entries); index++) {
+ p_hdr = (struct qla8044_reset_entry_hdr *)p_entry;
+ switch (p_hdr->cmd) {
+ case OPCODE_NOP:
+ break;
+ case OPCODE_WRITE_LIST:
+ qla8044_write_list(vha, p_hdr);
+ break;
+ case OPCODE_READ_WRITE_LIST:
+ qla8044_read_write_list(vha, p_hdr);
+ break;
+ case OPCODE_POLL_LIST:
+ qla8044_poll_list(vha, p_hdr);
+ break;
+ case OPCODE_POLL_WRITE_LIST:
+ qla8044_poll_write_list(vha, p_hdr);
+ break;
+ case OPCODE_READ_MODIFY_WRITE:
+ qla8044_read_modify_write(vha, p_hdr);
+ break;
+ case OPCODE_SEQ_PAUSE:
+ qla8044_pause(vha, p_hdr);
+ break;
+ case OPCODE_SEQ_END:
+ vha->reset_tmplt.seq_end = 1;
+ break;
+ case OPCODE_TMPL_END:
+ qla8044_template_end(vha, p_hdr);
+ break;
+ case OPCODE_POLL_READ_LIST:
+ qla8044_poll_read_list(vha, p_hdr);
+ break;
+ default:
+ ql_log(ql_log_fatal, vha, 0xb097,
+ "%s: Unknown command ==> 0x%04x on "
+ "entry = %d\n", __func__, p_hdr->cmd, index);
+ break;
+ }
+ /*
+ *Set pointer to next entry in the sequence.
+ */
+ p_entry += p_hdr->size;
+ }
+ vha->reset_tmplt.seq_index = index;
+}
+
+static void
+qla8044_process_init_seq(struct scsi_qla_host *vha)
+{
+ qla8044_process_reset_template(vha,
+ vha->reset_tmplt.init_offset);
+ if (vha->reset_tmplt.seq_end != 1)
+ ql_log(ql_log_fatal, vha, 0xb098,
+ "%s: Abrupt INIT Sub-Sequence end.\n",
+ __func__);
+}
+
+static void
+qla8044_process_stop_seq(struct scsi_qla_host *vha)
+{
+ vha->reset_tmplt.seq_index = 0;
+ qla8044_process_reset_template(vha, vha->reset_tmplt.stop_offset);
+ if (vha->reset_tmplt.seq_end != 1)
+ ql_log(ql_log_fatal, vha, 0xb099,
+ "%s: Abrupt STOP Sub-Sequence end.\n", __func__);
+}
+
+static void
+qla8044_process_start_seq(struct scsi_qla_host *vha)
+{
+ qla8044_process_reset_template(vha, vha->reset_tmplt.start_offset);
+ if (vha->reset_tmplt.template_end != 1)
+ ql_log(ql_log_fatal, vha, 0xb09a,
+ "%s: Abrupt START Sub-Sequence end.\n",
+ __func__);
+}
+
+static int
+qla8044_lockless_flash_read_u32(struct scsi_qla_host *vha,
+ uint32_t flash_addr, uint8_t *p_data, int u32_word_count)
+{
+ uint32_t i;
+ uint32_t u32_word;
+ uint32_t flash_offset;
+ uint32_t addr = flash_addr;
+ int ret_val = QLA_SUCCESS;
+
+ flash_offset = addr & (QLA8044_FLASH_SECTOR_SIZE - 1);
+
+ if (addr & 0x3) {
+ ql_log(ql_log_fatal, vha, 0xb09b, "%s: Illegal addr = 0x%x\n",
+ __func__, addr);
+ ret_val = QLA_FUNCTION_FAILED;
+ goto exit_lockless_read;
+ }
+
+ ret_val = qla8044_wr_reg_indirect(vha,
+ QLA8044_FLASH_DIRECT_WINDOW, (addr));
+
+ if (ret_val != QLA_SUCCESS) {
+ ql_log(ql_log_fatal, vha, 0xb09c,
+ "%s: failed to write addr 0x%x to FLASH_DIRECT_WINDOW!\n",
+ __func__, addr);
+ goto exit_lockless_read;
+ }
+
+ /* Check if data is spread across multiple sectors */
+ if ((flash_offset + (u32_word_count * sizeof(uint32_t))) >
+ (QLA8044_FLASH_SECTOR_SIZE - 1)) {
+ /* Multi sector read */
+ for (i = 0; i < u32_word_count; i++) {
+ ret_val = qla8044_rd_reg_indirect(vha,
+ QLA8044_FLASH_DIRECT_DATA(addr), &u32_word);
+ if (ret_val != QLA_SUCCESS) {
+ ql_log(ql_log_fatal, vha, 0xb09d,
+ "%s: failed to read addr 0x%x!\n",
+ __func__, addr);
+ goto exit_lockless_read;
+ }
+ *(uint32_t *)p_data = u32_word;
+ p_data = p_data + 4;
+ addr = addr + 4;
+ flash_offset = flash_offset + 4;
+ if (flash_offset > (QLA8044_FLASH_SECTOR_SIZE - 1)) {
+ /* This write is needed once for each sector */
+ ret_val = qla8044_wr_reg_indirect(vha,
+ QLA8044_FLASH_DIRECT_WINDOW, (addr));
+ if (ret_val != QLA_SUCCESS) {
+ ql_log(ql_log_fatal, vha, 0xb09f,
+ "%s: failed to write addr "
+ "0x%x to FLASH_DIRECT_WINDOW!\n",
+ __func__, addr);
+ goto exit_lockless_read;
+ }
+ flash_offset = 0;
+ }
+ }
+ } else {
+ /* Single sector read */
+ for (i = 0; i < u32_word_count; i++) {
+ ret_val = qla8044_rd_reg_indirect(vha,
+ QLA8044_FLASH_DIRECT_DATA(addr), &u32_word);
+ if (ret_val != QLA_SUCCESS) {
+ ql_log(ql_log_fatal, vha, 0xb0a0,
+ "%s: failed to read addr 0x%x!\n",
+ __func__, addr);
+ goto exit_lockless_read;
+ }
+ *(uint32_t *)p_data = u32_word;
+ p_data = p_data + 4;
+ addr = addr + 4;
+ }
+ }
+
+exit_lockless_read:
+ return ret_val;
+}
+
+/*
+ * qla8044_ms_mem_write_128b - Writes data to MS/off-chip memory
+ *
+ * @vha : Pointer to adapter structure
+ * addr : Flash address to write to
+ * data : Data to be written
+ * count : word_count to be written
+ *
+ * Return Value - QLA_SUCCESS/QLA_FUNCTION_FAILED
+ */
+static int
+qla8044_ms_mem_write_128b(struct scsi_qla_host *vha,
+ uint64_t addr, uint32_t *data, uint32_t count)
+{
+ int i, j, ret_val = QLA_SUCCESS;
+ uint32_t agt_ctrl;
+ unsigned long flags;
+ struct qla_hw_data *ha = vha->hw;
+
+ /* Only 128-bit aligned access */
+ if (addr & 0xF) {
+ ret_val = QLA_FUNCTION_FAILED;
+ goto exit_ms_mem_write;
+ }
+ write_lock_irqsave(&ha->hw_lock, flags);
+
+ /* Write address */
+ ret_val = qla8044_wr_reg_indirect(vha, MD_MIU_TEST_AGT_ADDR_HI, 0);
+ if (ret_val == QLA_FUNCTION_FAILED) {
+ ql_log(ql_log_fatal, vha, 0xb0a1,
+ "%s: write to AGT_ADDR_HI failed!\n", __func__);
+ goto exit_ms_mem_write_unlock;
+ }
+
+ for (i = 0; i < count; i++, addr += 16) {
+ if (!((QLA8044_ADDR_IN_RANGE(addr, QLA8044_ADDR_QDR_NET,
+ QLA8044_ADDR_QDR_NET_MAX)) ||
+ (QLA8044_ADDR_IN_RANGE(addr, QLA8044_ADDR_DDR_NET,
+ QLA8044_ADDR_DDR_NET_MAX)))) {
+ ret_val = QLA_FUNCTION_FAILED;
+ goto exit_ms_mem_write_unlock;
+ }
+
+ ret_val = qla8044_wr_reg_indirect(vha,
+ MD_MIU_TEST_AGT_ADDR_LO, addr);
+
+ /* Write data */
+ ret_val += qla8044_wr_reg_indirect(vha,
+ MD_MIU_TEST_AGT_WRDATA_LO, *data++);
+ ret_val += qla8044_wr_reg_indirect(vha,
+ MD_MIU_TEST_AGT_WRDATA_HI, *data++);
+ ret_val += qla8044_wr_reg_indirect(vha,
+ MD_MIU_TEST_AGT_WRDATA_ULO, *data++);
+ ret_val += qla8044_wr_reg_indirect(vha,
+ MD_MIU_TEST_AGT_WRDATA_UHI, *data++);
+ if (ret_val == QLA_FUNCTION_FAILED) {
+ ql_log(ql_log_fatal, vha, 0xb0a2,
+ "%s: write to AGT_WRDATA failed!\n",
+ __func__);
+ goto exit_ms_mem_write_unlock;
+ }
+
+ /* Check write status */
+ ret_val = qla8044_wr_reg_indirect(vha, MD_MIU_TEST_AGT_CTRL,
+ MIU_TA_CTL_WRITE_ENABLE);
+ ret_val += qla8044_wr_reg_indirect(vha, MD_MIU_TEST_AGT_CTRL,
+ MIU_TA_CTL_WRITE_START);
+ if (ret_val == QLA_FUNCTION_FAILED) {
+ ql_log(ql_log_fatal, vha, 0xb0a3,
+ "%s: write to AGT_CTRL failed!\n", __func__);
+ goto exit_ms_mem_write_unlock;
+ }
+
+ for (j = 0; j < MAX_CTL_CHECK; j++) {
+ ret_val = qla8044_rd_reg_indirect(vha,
+ MD_MIU_TEST_AGT_CTRL, &agt_ctrl);
+ if (ret_val == QLA_FUNCTION_FAILED) {
+ ql_log(ql_log_fatal, vha, 0xb0a4,
+ "%s: failed to read "
+ "MD_MIU_TEST_AGT_CTRL!\n", __func__);
+ goto exit_ms_mem_write_unlock;
+ }
+ if ((agt_ctrl & MIU_TA_CTL_BUSY) == 0)
+ break;
+ }
+
+ /* Status check failed */
+ if (j >= MAX_CTL_CHECK) {
+ ql_log(ql_log_fatal, vha, 0xb0a5,
+ "%s: MS memory write failed!\n",
+ __func__);
+ ret_val = QLA_FUNCTION_FAILED;
+ goto exit_ms_mem_write_unlock;
+ }
+ }
+
+exit_ms_mem_write_unlock:
+ write_unlock_irqrestore(&ha->hw_lock, flags);
+
+exit_ms_mem_write:
+ return ret_val;
+}
+
+static int
+qla8044_copy_bootloader(struct scsi_qla_host *vha)
+{
+ uint8_t *p_cache;
+ uint32_t src, count, size;
+ uint64_t dest;
+ int ret_val = QLA_SUCCESS;
+ struct qla_hw_data *ha = vha->hw;
+
+ src = QLA8044_BOOTLOADER_FLASH_ADDR;
+ dest = qla8044_rd_reg(ha, QLA8044_BOOTLOADER_ADDR);
+ size = qla8044_rd_reg(ha, QLA8044_BOOTLOADER_SIZE);
+
+ /* 128 bit alignment check */
+ if (size & 0xF)
+ size = (size + 16) & ~0xF;
+
+ /* 16 byte count */
+ count = size/16;
+
+ p_cache = vmalloc(size);
+ if (p_cache == NULL) {
+ ql_log(ql_log_fatal, vha, 0xb0a6,
+ "%s: Failed to allocate memory for "
+ "boot loader cache\n", __func__);
+ ret_val = QLA_FUNCTION_FAILED;
+ goto exit_copy_bootloader;
+ }
+
+ ret_val = qla8044_lockless_flash_read_u32(vha, src,
+ p_cache, size/sizeof(uint32_t));
+ if (ret_val == QLA_FUNCTION_FAILED) {
+ ql_log(ql_log_fatal, vha, 0xb0a7,
+ "%s: Error reading F/W from flash!!!\n", __func__);
+ goto exit_copy_error;
+ }
+ ql_dbg(ql_dbg_p3p, vha, 0xb0a8, "%s: Read F/W from flash!\n",
+ __func__);
+
+ /* 128 bit/16 byte write to MS memory */
+ ret_val = qla8044_ms_mem_write_128b(vha, dest,
+ (uint32_t *)p_cache, count);
+ if (ret_val == QLA_FUNCTION_FAILED) {
+ ql_log(ql_log_fatal, vha, 0xb0a9,
+ "%s: Error writing F/W to MS !!!\n", __func__);
+ goto exit_copy_error;
+ }
+ ql_dbg(ql_dbg_p3p, vha, 0xb0aa,
+ "%s: Wrote F/W (size %d) to MS !!!\n",
+ __func__, size);
+
+exit_copy_error:
+ vfree(p_cache);
+
+exit_copy_bootloader:
+ return ret_val;
+}
+
+static int
+qla8044_restart(struct scsi_qla_host *vha)
+{
+ int ret_val = QLA_SUCCESS;
+ struct qla_hw_data *ha = vha->hw;
+
+ qla8044_process_stop_seq(vha);
+
+ /* Collect minidump */
+ if (ql2xmdenable)
+ qla8044_get_minidump(vha);
+ else
+ ql_log(ql_log_fatal, vha, 0xb14c,
+ "Minidump disabled.\n");
+
+ qla8044_process_init_seq(vha);
+
+ if (qla8044_copy_bootloader(vha)) {
+ ql_log(ql_log_fatal, vha, 0xb0ab,
+ "%s: Copy bootloader, firmware restart failed!\n",
+ __func__);
+ ret_val = QLA_FUNCTION_FAILED;
+ goto exit_restart;
+ }
+
+ /*
+ * Loads F/W from flash
+ */
+ qla8044_wr_reg(ha, QLA8044_FW_IMAGE_VALID, QLA8044_BOOT_FROM_FLASH);
+
+ qla8044_process_start_seq(vha);
+
+exit_restart:
+ return ret_val;
+}
+
+/*
+ * qla8044_check_cmd_peg_status - Check peg status to see if Peg is
+ * initialized.
+ *
+ * @ha : Pointer to adapter structure
+ *
+ * Return Value - QLA_SUCCESS/QLA_FUNCTION_FAILED
+ */
+static int
+qla8044_check_cmd_peg_status(struct scsi_qla_host *vha)
+{
+ uint32_t val, ret_val = QLA_FUNCTION_FAILED;
+ int retries = CRB_CMDPEG_CHECK_RETRY_COUNT;
+ struct qla_hw_data *ha = vha->hw;
+
+ do {
+ val = qla8044_rd_reg(ha, QLA8044_CMDPEG_STATE);
+ if (val == PHAN_INITIALIZE_COMPLETE) {
+ ql_dbg(ql_dbg_p3p, vha, 0xb0ac,
+ "%s: Command Peg initialization "
+ "complete! state=0x%x\n", __func__, val);
+ ret_val = QLA_SUCCESS;
+ break;
+ }
+ msleep(CRB_CMDPEG_CHECK_DELAY);
+ } while (--retries);
+
+ return ret_val;
+}
+
+static int
+qla8044_start_firmware(struct scsi_qla_host *vha)
+{
+ int ret_val = QLA_SUCCESS;
+
+ if (qla8044_restart(vha)) {
+ ql_log(ql_log_fatal, vha, 0xb0ad,
+ "%s: Restart Error!!!, Need Reset!!!\n",
+ __func__);
+ ret_val = QLA_FUNCTION_FAILED;
+ goto exit_start_fw;
+ } else
+ ql_dbg(ql_dbg_p3p, vha, 0xb0af,
+ "%s: Restart done!\n", __func__);
+
+ ret_val = qla8044_check_cmd_peg_status(vha);
+ if (ret_val) {
+ ql_log(ql_log_fatal, vha, 0xb0b0,
+ "%s: Peg not initialized!\n", __func__);
+ ret_val = QLA_FUNCTION_FAILED;
+ }
+
+exit_start_fw:
+ return ret_val;
+}
+
+void
+qla8044_clear_drv_active(struct qla_hw_data *ha)
+{
+ uint32_t drv_active;
+ struct scsi_qla_host *vha = pci_get_drvdata(ha->pdev);
+
+ drv_active = qla8044_rd_direct(vha, QLA8044_CRB_DRV_ACTIVE_INDEX);
+ drv_active &= ~(1 << (ha->portnum));
+
+ ql_log(ql_log_info, vha, 0xb0b1,
+ "%s(%ld): drv_active: 0x%08x\n",
+ __func__, vha->host_no, drv_active);
+
+ qla8044_wr_direct(vha, QLA8044_CRB_DRV_ACTIVE_INDEX, drv_active);
+}
+
+/*
+ * qla8044_device_bootstrap - Initialize device, set DEV_READY, start fw
+ * @ha: pointer to adapter structure
+ *
+ * Note: IDC lock must be held upon entry
+ **/
+static int
+qla8044_device_bootstrap(struct scsi_qla_host *vha)
+{
+ int rval = QLA_FUNCTION_FAILED;
+ int i;
+ uint32_t old_count = 0, count = 0;
+ int need_reset = 0;
+ uint32_t idc_ctrl;
+ struct qla_hw_data *ha = vha->hw;
+
+ need_reset = qla8044_need_reset(vha);
+
+ if (!need_reset) {
+ old_count = qla8044_rd_direct(vha,
+ QLA8044_PEG_ALIVE_COUNTER_INDEX);
+
+ for (i = 0; i < 10; i++) {
+ msleep(200);
+
+ count = qla8044_rd_direct(vha,
+ QLA8044_PEG_ALIVE_COUNTER_INDEX);
+ if (count != old_count) {
+ rval = QLA_SUCCESS;
+ goto dev_ready;
+ }
+ }
+ qla8044_flash_lock_recovery(vha);
+ } else {
+ /* We are trying to perform a recovery here. */
+ if (ha->flags.isp82xx_fw_hung)
+ qla8044_flash_lock_recovery(vha);
+ }
+
+ /* set to DEV_INITIALIZING */
+ ql_log(ql_log_info, vha, 0xb0b2,
+ "%s: HW State: INITIALIZING\n", __func__);
+ qla8044_wr_direct(vha, QLA8044_CRB_DEV_STATE_INDEX,
+ QLA8XXX_DEV_INITIALIZING);
+
+ qla8044_idc_unlock(ha);
+ rval = qla8044_start_firmware(vha);
+ qla8044_idc_lock(ha);
+
+ if (rval != QLA_SUCCESS) {
+ ql_log(ql_log_info, vha, 0xb0b3,
+ "%s: HW State: FAILED\n", __func__);
+ qla8044_clear_drv_active(ha);
+ qla8044_wr_direct(vha, QLA8044_CRB_DEV_STATE_INDEX,
+ QLA8XXX_DEV_FAILED);
+ return rval;
+ }
+
+ /* For ISP8044, If IDC_CTRL GRACEFUL_RESET_BIT1 is set , reset it after
+ * device goes to INIT state. */
+ idc_ctrl = qla8044_rd_reg(ha, QLA8044_IDC_DRV_CTRL);
+ if (idc_ctrl & GRACEFUL_RESET_BIT1) {
+ qla8044_wr_reg(ha, QLA8044_IDC_DRV_CTRL,
+ (idc_ctrl & ~GRACEFUL_RESET_BIT1));
+ ha->fw_dumped = 0;
+ }
+
+dev_ready:
+ ql_log(ql_log_info, vha, 0xb0b4,
+ "%s: HW State: READY\n", __func__);
+ qla8044_wr_direct(vha, QLA8044_CRB_DEV_STATE_INDEX, QLA8XXX_DEV_READY);
+
+ return rval;
+}
+
+/*-------------------------Reset Sequence Functions-----------------------*/
+static void
+qla8044_dump_reset_seq_hdr(struct scsi_qla_host *vha)
+{
+ u8 *phdr;
+
+ if (!vha->reset_tmplt.buff) {
+ ql_log(ql_log_fatal, vha, 0xb0b5,
+ "%s: Error Invalid reset_seq_template\n", __func__);
+ return;
+ }
+
+ phdr = vha->reset_tmplt.buff;
+ ql_dbg(ql_dbg_p3p, vha, 0xb0b6,
+ "Reset Template :\n\t0x%X 0x%X 0x%X 0x%X"
+ "0x%X 0x%X 0x%X 0x%X 0x%X 0x%X\n"
+ "\t0x%X 0x%X 0x%X 0x%X 0x%X 0x%X\n\n",
+ *phdr, *(phdr+1), *(phdr+2), *(phdr+3), *(phdr+4),
+ *(phdr+5), *(phdr+6), *(phdr+7), *(phdr + 8),
+ *(phdr+9), *(phdr+10), *(phdr+11), *(phdr+12),
+ *(phdr+13), *(phdr+14), *(phdr+15));
+}
+
+/*
+ * qla8044_reset_seq_checksum_test - Validate Reset Sequence template.
+ *
+ * @ha : Pointer to adapter structure
+ *
+ * Return Value - QLA_SUCCESS/QLA_FUNCTION_FAILED
+ */
+static int
+qla8044_reset_seq_checksum_test(struct scsi_qla_host *vha)
+{
+ uint32_t sum = 0;
+ uint16_t *buff = (uint16_t *)vha->reset_tmplt.buff;
+ int u16_count = vha->reset_tmplt.hdr->size / sizeof(uint16_t);
+
+ while (u16_count-- > 0)
+ sum += *buff++;
+
+ while (sum >> 16)
+ sum = (sum & 0xFFFF) + (sum >> 16);
+
+ /* checksum of 0 indicates a valid template */
+ if (~sum) {
+ return QLA_SUCCESS;
+ } else {
+ ql_log(ql_log_fatal, vha, 0xb0b7,
+ "%s: Reset seq checksum failed\n", __func__);
+ return QLA_FUNCTION_FAILED;
+ }
+}
+
+/*
+ * qla8044_read_reset_template - Read Reset Template from Flash, validate
+ * the template and store offsets of stop/start/init offsets in ha->reset_tmplt.
+ *
+ * @ha : Pointer to adapter structure
+ */
+void
+qla8044_read_reset_template(struct scsi_qla_host *vha)
+{
+ uint8_t *p_buff;
+ uint32_t addr, tmplt_hdr_def_size, tmplt_hdr_size;
+
+ vha->reset_tmplt.seq_error = 0;
+ vha->reset_tmplt.buff = vmalloc(QLA8044_RESTART_TEMPLATE_SIZE);
+ if (vha->reset_tmplt.buff == NULL) {
+ ql_log(ql_log_fatal, vha, 0xb0b8,
+ "%s: Failed to allocate reset template resources\n",
+ __func__);
+ goto exit_read_reset_template;
+ }
+
+ p_buff = vha->reset_tmplt.buff;
+ addr = QLA8044_RESET_TEMPLATE_ADDR;
+
+ tmplt_hdr_def_size =
+ sizeof(struct qla8044_reset_template_hdr) / sizeof(uint32_t);
+
+ ql_dbg(ql_dbg_p3p, vha, 0xb0b9,
+ "%s: Read template hdr size %d from Flash\n",
+ __func__, tmplt_hdr_def_size);
+
+ /* Copy template header from flash */
+ if (qla8044_read_flash_data(vha, p_buff, addr, tmplt_hdr_def_size)) {
+ ql_log(ql_log_fatal, vha, 0xb0ba,
+ "%s: Failed to read reset template\n", __func__);
+ goto exit_read_template_error;
+ }
+
+ vha->reset_tmplt.hdr =
+ (struct qla8044_reset_template_hdr *) vha->reset_tmplt.buff;
+
+ /* Validate the template header size and signature */
+ tmplt_hdr_size = vha->reset_tmplt.hdr->hdr_size/sizeof(uint32_t);
+ if ((tmplt_hdr_size != tmplt_hdr_def_size) ||
+ (vha->reset_tmplt.hdr->signature != RESET_TMPLT_HDR_SIGNATURE)) {
+ ql_log(ql_log_fatal, vha, 0xb0bb,
+ "%s: Template Header size invalid %d "
+ "tmplt_hdr_def_size %d!!!\n", __func__,
+ tmplt_hdr_size, tmplt_hdr_def_size);
+ goto exit_read_template_error;
+ }
+
+ addr = QLA8044_RESET_TEMPLATE_ADDR + vha->reset_tmplt.hdr->hdr_size;
+ p_buff = vha->reset_tmplt.buff + vha->reset_tmplt.hdr->hdr_size;
+ tmplt_hdr_def_size = (vha->reset_tmplt.hdr->size -
+ vha->reset_tmplt.hdr->hdr_size)/sizeof(uint32_t);
+
+ ql_dbg(ql_dbg_p3p, vha, 0xb0bc,
+ "%s: Read rest of the template size %d\n",
+ __func__, vha->reset_tmplt.hdr->size);
+
+ /* Copy rest of the template */
+ if (qla8044_read_flash_data(vha, p_buff, addr, tmplt_hdr_def_size)) {
+ ql_log(ql_log_fatal, vha, 0xb0bd,
+ "%s: Failed to read reset tempelate\n", __func__);
+ goto exit_read_template_error;
+ }
+
+ /* Integrity check */
+ if (qla8044_reset_seq_checksum_test(vha)) {
+ ql_log(ql_log_fatal, vha, 0xb0be,
+ "%s: Reset Seq checksum failed!\n", __func__);
+ goto exit_read_template_error;
+ }
+
+ ql_dbg(ql_dbg_p3p, vha, 0xb0bf,
+ "%s: Reset Seq checksum passed! Get stop, "
+ "start and init seq offsets\n", __func__);
+
+ /* Get STOP, START, INIT sequence offsets */
+ vha->reset_tmplt.init_offset = vha->reset_tmplt.buff +
+ vha->reset_tmplt.hdr->init_seq_offset;
+
+ vha->reset_tmplt.start_offset = vha->reset_tmplt.buff +
+ vha->reset_tmplt.hdr->start_seq_offset;
+
+ vha->reset_tmplt.stop_offset = vha->reset_tmplt.buff +
+ vha->reset_tmplt.hdr->hdr_size;
+
+ qla8044_dump_reset_seq_hdr(vha);
+
+ goto exit_read_reset_template;
+
+exit_read_template_error:
+ vfree(vha->reset_tmplt.buff);
+
+exit_read_reset_template:
+ return;
+}
+
+void
+qla8044_set_idc_dontreset(struct scsi_qla_host *vha)
+{
+ uint32_t idc_ctrl;
+ struct qla_hw_data *ha = vha->hw;
+
+ idc_ctrl = qla8044_rd_reg(ha, QLA8044_IDC_DRV_CTRL);
+ idc_ctrl |= DONTRESET_BIT0;
+ ql_dbg(ql_dbg_p3p, vha, 0xb0c0,
+ "%s: idc_ctrl = %d\n", __func__, idc_ctrl);
+ qla8044_wr_reg(ha, QLA8044_IDC_DRV_CTRL, idc_ctrl);
+}
+
+inline void
+qla8044_set_rst_ready(struct scsi_qla_host *vha)
+{
+ uint32_t drv_state;
+ struct qla_hw_data *ha = vha->hw;
+
+ drv_state = qla8044_rd_direct(vha, QLA8044_CRB_DRV_STATE_INDEX);
+
+ /* For ISP8044, drv_active register has 1 bit per function,
+ * shift 1 by func_num to set a bit for the function.*/
+ drv_state |= (1 << ha->portnum);
+
+ ql_log(ql_log_info, vha, 0xb0c1,
+ "%s(%ld): drv_state: 0x%08x\n",
+ __func__, vha->host_no, drv_state);
+ qla8044_wr_direct(vha, QLA8044_CRB_DRV_STATE_INDEX, drv_state);
+}
+
+/**
+ * qla8044_need_reset_handler - Code to start reset sequence
+ * @ha: pointer to adapter structure
+ *
+ * Note: IDC lock must be held upon entry
+ **/
+static void
+qla8044_need_reset_handler(struct scsi_qla_host *vha)
+{
+ uint32_t dev_state = 0, drv_state, drv_active;
+ unsigned long reset_timeout;
+ struct qla_hw_data *ha = vha->hw;
+
+ ql_log(ql_log_fatal, vha, 0xb0c2,
+ "%s: Performing ISP error recovery\n", __func__);
+
+ if (vha->flags.online) {
+ qla8044_idc_unlock(ha);
+ qla2x00_abort_isp_cleanup(vha);
+ ha->isp_ops->get_flash_version(vha, vha->req->ring);
+ ha->isp_ops->nvram_config(vha);
+ qla8044_idc_lock(ha);
+ }
+
+ dev_state = qla8044_rd_direct(vha,
+ QLA8044_CRB_DEV_STATE_INDEX);
+ drv_state = qla8044_rd_direct(vha,
+ QLA8044_CRB_DRV_STATE_INDEX);
+ drv_active = qla8044_rd_direct(vha,
+ QLA8044_CRB_DRV_ACTIVE_INDEX);
+
+ ql_log(ql_log_info, vha, 0xb0c5,
+ "%s(%ld): drv_state = 0x%x, drv_active = 0x%x dev_state = 0x%x\n",
+ __func__, vha->host_no, drv_state, drv_active, dev_state);
+
+ qla8044_set_rst_ready(vha);
+
+ /* wait for 10 seconds for reset ack from all functions */
+ reset_timeout = jiffies + (ha->fcoe_reset_timeout * HZ);
+
+ do {
+ if (time_after_eq(jiffies, reset_timeout)) {
+ ql_log(ql_log_info, vha, 0xb0c4,
+ "%s: Function %d: Reset Ack Timeout!, drv_state: 0x%08x, drv_active: 0x%08x\n",
+ __func__, ha->portnum, drv_state, drv_active);
+ break;
+ }
+
+ qla8044_idc_unlock(ha);
+ msleep(1000);
+ qla8044_idc_lock(ha);
+
+ dev_state = qla8044_rd_direct(vha,
+ QLA8044_CRB_DEV_STATE_INDEX);
+ drv_state = qla8044_rd_direct(vha,
+ QLA8044_CRB_DRV_STATE_INDEX);
+ drv_active = qla8044_rd_direct(vha,
+ QLA8044_CRB_DRV_ACTIVE_INDEX);
+ } while (((drv_state & drv_active) != drv_active) &&
+ (dev_state == QLA8XXX_DEV_NEED_RESET));
+
+ /* Remove IDC participation of functions not acknowledging */
+ if (drv_state != drv_active) {
+ ql_log(ql_log_info, vha, 0xb0c7,
+ "%s(%ld): Function %d turning off drv_active of non-acking function 0x%x\n",
+ __func__, vha->host_no, ha->portnum,
+ (drv_active ^ drv_state));
+ drv_active = drv_active & drv_state;
+ qla8044_wr_direct(vha, QLA8044_CRB_DRV_ACTIVE_INDEX,
+ drv_active);
+ } else {
+ /*
+ * Reset owner should execute reset recovery,
+ * if all functions acknowledged
+ */
+ if ((ha->flags.nic_core_reset_owner) &&
+ (dev_state == QLA8XXX_DEV_NEED_RESET)) {
+ ha->flags.nic_core_reset_owner = 0;
+ qla8044_device_bootstrap(vha);
+ return;
+ }
+ }
+
+ /* Exit if non active function */
+ if (!(drv_active & (1 << ha->portnum))) {
+ ha->flags.nic_core_reset_owner = 0;
+ return;
+ }
+
+ /*
+ * Execute Reset Recovery if Reset Owner or Function 7
+ * is the only active function
+ */
+ if (ha->flags.nic_core_reset_owner ||
+ ((drv_state & drv_active) == QLA8044_FUN7_ACTIVE_INDEX)) {
+ ha->flags.nic_core_reset_owner = 0;
+ qla8044_device_bootstrap(vha);
+ }
+}
+
+static void
+qla8044_set_drv_active(struct scsi_qla_host *vha)
+{
+ uint32_t drv_active;
+ struct qla_hw_data *ha = vha->hw;
+
+ drv_active = qla8044_rd_direct(vha, QLA8044_CRB_DRV_ACTIVE_INDEX);
+
+ /* For ISP8044, drv_active register has 1 bit per function,
+ * shift 1 by func_num to set a bit for the function.*/
+ drv_active |= (1 << ha->portnum);
+
+ ql_log(ql_log_info, vha, 0xb0c8,
+ "%s(%ld): drv_active: 0x%08x\n",
+ __func__, vha->host_no, drv_active);
+ qla8044_wr_direct(vha, QLA8044_CRB_DRV_ACTIVE_INDEX, drv_active);
+}
+
+static int
+qla8044_check_drv_active(struct scsi_qla_host *vha)
+{
+ uint32_t drv_active;
+ struct qla_hw_data *ha = vha->hw;
+
+ drv_active = qla8044_rd_direct(vha, QLA8044_CRB_DRV_ACTIVE_INDEX);
+ if (drv_active & (1 << ha->portnum))
+ return QLA_SUCCESS;
+ else
+ return QLA_TEST_FAILED;
+}
+
+static void
+qla8044_clear_idc_dontreset(struct scsi_qla_host *vha)
+{
+ uint32_t idc_ctrl;
+ struct qla_hw_data *ha = vha->hw;
+
+ idc_ctrl = qla8044_rd_reg(ha, QLA8044_IDC_DRV_CTRL);
+ idc_ctrl &= ~DONTRESET_BIT0;
+ ql_log(ql_log_info, vha, 0xb0c9,
+ "%s: idc_ctrl = %d\n", __func__,
+ idc_ctrl);
+ qla8044_wr_reg(ha, QLA8044_IDC_DRV_CTRL, idc_ctrl);
+}
+
+static int
+qla8044_set_idc_ver(struct scsi_qla_host *vha)
+{
+ int idc_ver;
+ uint32_t drv_active;
+ int rval = QLA_SUCCESS;
+ struct qla_hw_data *ha = vha->hw;
+
+ drv_active = qla8044_rd_direct(vha, QLA8044_CRB_DRV_ACTIVE_INDEX);
+ if (drv_active == (1 << ha->portnum)) {
+ idc_ver = qla8044_rd_direct(vha,
+ QLA8044_CRB_DRV_IDC_VERSION_INDEX);
+ idc_ver &= (~0xFF);
+ idc_ver |= QLA8044_IDC_VER_MAJ_VALUE;
+ qla8044_wr_direct(vha, QLA8044_CRB_DRV_IDC_VERSION_INDEX,
+ idc_ver);
+ ql_log(ql_log_info, vha, 0xb0ca,
+ "%s: IDC version updated to %d\n",
+ __func__, idc_ver);
+ } else {
+ idc_ver = qla8044_rd_direct(vha,
+ QLA8044_CRB_DRV_IDC_VERSION_INDEX);
+ idc_ver &= 0xFF;
+ if (QLA8044_IDC_VER_MAJ_VALUE != idc_ver) {
+ ql_log(ql_log_info, vha, 0xb0cb,
+ "%s: qla4xxx driver IDC version %d "
+ "is not compatible with IDC version %d "
+ "of other drivers!\n",
+ __func__, QLA8044_IDC_VER_MAJ_VALUE,
+ idc_ver);
+ rval = QLA_FUNCTION_FAILED;
+ goto exit_set_idc_ver;
+ }
+ }
+
+ /* Update IDC_MINOR_VERSION */
+ idc_ver = qla8044_rd_reg(ha, QLA8044_CRB_IDC_VER_MINOR);
+ idc_ver &= ~(0x03 << (ha->portnum * 2));
+ idc_ver |= (QLA8044_IDC_VER_MIN_VALUE << (ha->portnum * 2));
+ qla8044_wr_reg(ha, QLA8044_CRB_IDC_VER_MINOR, idc_ver);
+
+exit_set_idc_ver:
+ return rval;
+}
+
+static int
+qla8044_update_idc_reg(struct scsi_qla_host *vha)
+{
+ uint32_t drv_active;
+ int rval = QLA_SUCCESS;
+ struct qla_hw_data *ha = vha->hw;
+
+ if (vha->flags.init_done)
+ goto exit_update_idc_reg;
+
+ qla8044_idc_lock(ha);
+ qla8044_set_drv_active(vha);
+
+ drv_active = qla8044_rd_direct(vha,
+ QLA8044_CRB_DRV_ACTIVE_INDEX);
+
+ /* If we are the first driver to load and
+ * ql2xdontresethba is not set, clear IDC_CTRL BIT0. */
+ if ((drv_active == (1 << ha->portnum)) && !ql2xdontresethba)
+ qla8044_clear_idc_dontreset(vha);
+
+ rval = qla8044_set_idc_ver(vha);
+ if (rval == QLA_FUNCTION_FAILED)
+ qla8044_clear_drv_active(ha);
+ qla8044_idc_unlock(ha);
+
+exit_update_idc_reg:
+ return rval;
+}
+
+/**
+ * qla8044_need_qsnt_handler - Code to start qsnt
+ * @ha: pointer to adapter structure
+ **/
+static void
+qla8044_need_qsnt_handler(struct scsi_qla_host *vha)
+{
+ unsigned long qsnt_timeout;
+ uint32_t drv_state, drv_active, dev_state;
+ struct qla_hw_data *ha = vha->hw;
+
+ if (vha->flags.online)
+ qla2x00_quiesce_io(vha);
+ else
+ return;
+
+ qla8044_set_qsnt_ready(vha);
+
+ /* Wait for 30 secs for all functions to ack qsnt mode */
+ qsnt_timeout = jiffies + (QSNT_ACK_TOV * HZ);
+ drv_state = qla8044_rd_direct(vha, QLA8044_CRB_DRV_STATE_INDEX);
+ drv_active = qla8044_rd_direct(vha, QLA8044_CRB_DRV_ACTIVE_INDEX);
+
+ /* Shift drv_active by 1 to match drv_state. As quiescent ready bit
+ position is at bit 1 and drv active is at bit 0 */
+ drv_active = drv_active << 1;
+
+ while (drv_state != drv_active) {
+ if (time_after_eq(jiffies, qsnt_timeout)) {
+ /* Other functions did not ack, changing state to
+ * DEV_READY
+ */
+ clear_bit(ISP_QUIESCE_NEEDED, &vha->dpc_flags);
+ qla8044_wr_direct(vha, QLA8044_CRB_DEV_STATE_INDEX,
+ QLA8XXX_DEV_READY);
+ qla8044_clear_qsnt_ready(vha);
+ ql_log(ql_log_info, vha, 0xb0cc,
+ "Timeout waiting for quiescent ack!!!\n");
+ return;
+ }
+ qla8044_idc_unlock(ha);
+ msleep(1000);
+ qla8044_idc_lock(ha);
+
+ drv_state = qla8044_rd_direct(vha,
+ QLA8044_CRB_DRV_STATE_INDEX);
+ drv_active = qla8044_rd_direct(vha,
+ QLA8044_CRB_DRV_ACTIVE_INDEX);
+ drv_active = drv_active << 1;
+ }
+
+ /* All functions have Acked. Set quiescent state */
+ dev_state = qla8044_rd_direct(vha, QLA8044_CRB_DEV_STATE_INDEX);
+
+ if (dev_state == QLA8XXX_DEV_NEED_QUIESCENT) {
+ qla8044_wr_direct(vha, QLA8044_CRB_DEV_STATE_INDEX,
+ QLA8XXX_DEV_QUIESCENT);
+ ql_log(ql_log_info, vha, 0xb0cd,
+ "%s: HW State: QUIESCENT\n", __func__);
+ }
+}
+
+/*
+ * qla8044_device_state_handler - Adapter state machine
+ * @ha: pointer to host adapter structure.
+ *
+ * Note: IDC lock must be UNLOCKED upon entry
+ **/
+int
+qla8044_device_state_handler(struct scsi_qla_host *vha)
+{
+ uint32_t dev_state;
+ int rval = QLA_SUCCESS;
+ unsigned long dev_init_timeout;
+ struct qla_hw_data *ha = vha->hw;
+
+ rval = qla8044_update_idc_reg(vha);
+ if (rval == QLA_FUNCTION_FAILED)
+ goto exit_error;
+
+ dev_state = qla8044_rd_direct(vha, QLA8044_CRB_DEV_STATE_INDEX);
+ ql_dbg(ql_dbg_p3p, vha, 0xb0ce,
+ "Device state is 0x%x = %s\n",
+ dev_state, dev_state < MAX_STATES ?
+ qdev_state(dev_state) : "Unknown");
+
+ /* wait for 30 seconds for device to go ready */
+ dev_init_timeout = jiffies + (ha->fcoe_dev_init_timeout * HZ);
+
+ qla8044_idc_lock(ha);
+
+ while (1) {
+ if (time_after_eq(jiffies, dev_init_timeout)) {
+ if (qla8044_check_drv_active(vha) == QLA_SUCCESS) {
+ ql_log(ql_log_warn, vha, 0xb0cf,
+ "%s: Device Init Failed 0x%x = %s\n",
+ QLA2XXX_DRIVER_NAME, dev_state,
+ dev_state < MAX_STATES ?
+ qdev_state(dev_state) : "Unknown");
+ qla8044_wr_direct(vha,
+ QLA8044_CRB_DEV_STATE_INDEX,
+ QLA8XXX_DEV_FAILED);
+ }
+ }
+
+ dev_state = qla8044_rd_direct(vha, QLA8044_CRB_DEV_STATE_INDEX);
+ ql_log(ql_log_info, vha, 0xb0d0,
+ "Device state is 0x%x = %s\n",
+ dev_state, dev_state < MAX_STATES ?
+ qdev_state(dev_state) : "Unknown");
+
+ /* NOTE: Make sure idc unlocked upon exit of switch statement */
+ switch (dev_state) {
+ case QLA8XXX_DEV_READY:
+ ha->flags.nic_core_reset_owner = 0;
+ goto exit;
+ case QLA8XXX_DEV_COLD:
+ rval = qla8044_device_bootstrap(vha);
+ break;
+ case QLA8XXX_DEV_INITIALIZING:
+ qla8044_idc_unlock(ha);
+ msleep(1000);
+ qla8044_idc_lock(ha);
+ break;
+ case QLA8XXX_DEV_NEED_RESET:
+ /* For ISP8044, if NEED_RESET is set by any driver,
+ * it should be honored, irrespective of IDC_CTRL
+ * DONTRESET_BIT0 */
+ qla8044_need_reset_handler(vha);
+ break;
+ case QLA8XXX_DEV_NEED_QUIESCENT:
+ /* idc locked/unlocked in handler */
+ qla8044_need_qsnt_handler(vha);
+
+ /* Reset the init timeout after qsnt handler */
+ dev_init_timeout = jiffies +
+ (ha->fcoe_reset_timeout * HZ);
+ break;
+ case QLA8XXX_DEV_QUIESCENT:
+ ql_log(ql_log_info, vha, 0xb0d1,
+ "HW State: QUIESCENT\n");
+
+ qla8044_idc_unlock(ha);
+ msleep(1000);
+ qla8044_idc_lock(ha);
+
+ /* Reset the init timeout after qsnt handler */
+ dev_init_timeout = jiffies +
+ (ha->fcoe_reset_timeout * HZ);
+ break;
+ case QLA8XXX_DEV_FAILED:
+ ha->flags.nic_core_reset_owner = 0;
+ qla8044_idc_unlock(ha);
+ qla8xxx_dev_failed_handler(vha);
+ rval = QLA_FUNCTION_FAILED;
+ qla8044_idc_lock(ha);
+ goto exit;
+ default:
+ qla8044_idc_unlock(ha);
+ qla8xxx_dev_failed_handler(vha);
+ rval = QLA_FUNCTION_FAILED;
+ qla8044_idc_lock(ha);
+ goto exit;
+ }
+ }
+exit:
+ qla8044_idc_unlock(ha);
+
+exit_error:
+ return rval;
+}
+
+/**
+ * qla4_8xxx_check_temp - Check the ISP82XX temperature.
+ * @ha: adapter block pointer.
+ *
+ * Note: The caller should not hold the idc lock.
+ **/
+static int
+qla8044_check_temp(struct scsi_qla_host *vha)
+{
+ uint32_t temp, temp_state, temp_val;
+ int status = QLA_SUCCESS;
+
+ temp = qla8044_rd_direct(vha, QLA8044_CRB_TEMP_STATE_INDEX);
+ temp_state = qla82xx_get_temp_state(temp);
+ temp_val = qla82xx_get_temp_val(temp);
+
+ if (temp_state == QLA82XX_TEMP_PANIC) {
+ ql_log(ql_log_warn, vha, 0xb0d2,
+ "Device temperature %d degrees C"
+ " exceeds maximum allowed. Hardware has been shut"
+ " down\n", temp_val);
+ status = QLA_FUNCTION_FAILED;
+ return status;
+ } else if (temp_state == QLA82XX_TEMP_WARN) {
+ ql_log(ql_log_warn, vha, 0xb0d3,
+ "Device temperature %d"
+ " degrees C exceeds operating range."
+ " Immediate action needed.\n", temp_val);
+ }
+ return 0;
+}
+
+int qla8044_read_temperature(scsi_qla_host_t *vha)
+{
+ uint32_t temp;
+
+ temp = qla8044_rd_direct(vha, QLA8044_CRB_TEMP_STATE_INDEX);
+ return qla82xx_get_temp_val(temp);
+}
+
+/**
+ * qla8044_check_fw_alive - Check firmware health
+ * @ha: Pointer to host adapter structure.
+ *
+ * Context: Interrupt
+ **/
+int
+qla8044_check_fw_alive(struct scsi_qla_host *vha)
+{
+ uint32_t fw_heartbeat_counter;
+ uint32_t halt_status1, halt_status2;
+ int status = QLA_SUCCESS;
+
+ fw_heartbeat_counter = qla8044_rd_direct(vha,
+ QLA8044_PEG_ALIVE_COUNTER_INDEX);
+
+ /* If PEG_ALIVE_COUNTER is 0xffffffff, AER/EEH is in progress, ignore */
+ if (fw_heartbeat_counter == 0xffffffff) {
+ ql_dbg(ql_dbg_p3p, vha, 0xb0d4,
+ "scsi%ld: %s: Device in frozen "
+ "state, QLA82XX_PEG_ALIVE_COUNTER is 0xffffffff\n",
+ vha->host_no, __func__);
+ return status;
+ }
+
+ if (vha->fw_heartbeat_counter == fw_heartbeat_counter) {
+ vha->seconds_since_last_heartbeat++;
+ /* FW not alive after 2 seconds */
+ if (vha->seconds_since_last_heartbeat == 2) {
+ vha->seconds_since_last_heartbeat = 0;
+ halt_status1 = qla8044_rd_direct(vha,
+ QLA8044_PEG_HALT_STATUS1_INDEX);
+ halt_status2 = qla8044_rd_direct(vha,
+ QLA8044_PEG_HALT_STATUS2_INDEX);
+
+ ql_log(ql_log_info, vha, 0xb0d5,
+ "scsi(%ld): %s, ISP8044 "
+ "Dumping hw/fw registers:\n"
+ " PEG_HALT_STATUS1: 0x%x, "
+ "PEG_HALT_STATUS2: 0x%x,\n",
+ vha->host_no, __func__, halt_status1,
+ halt_status2);
+ status = QLA_FUNCTION_FAILED;
+ }
+ } else
+ vha->seconds_since_last_heartbeat = 0;
+
+ vha->fw_heartbeat_counter = fw_heartbeat_counter;
+ return status;
+}
+
+void
+qla8044_watchdog(struct scsi_qla_host *vha)
+{
+ uint32_t dev_state, halt_status;
+ int halt_status_unrecoverable = 0;
+ struct qla_hw_data *ha = vha->hw;
+
+ /* don't poll if reset is going on or FW hang in quiescent state */
+ if (!(test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags) ||
+ test_bit(FCOE_CTX_RESET_NEEDED, &vha->dpc_flags))) {
+ dev_state = qla8044_rd_direct(vha, QLA8044_CRB_DEV_STATE_INDEX);
+
+ if (qla8044_check_fw_alive(vha)) {
+ ha->flags.isp82xx_fw_hung = 1;
+ ql_log(ql_log_warn, vha, 0xb10a,
+ "Firmware hung.\n");
+ qla82xx_clear_pending_mbx(vha);
+ }
+
+ if (qla8044_check_temp(vha)) {
+ set_bit(ISP_UNRECOVERABLE, &vha->dpc_flags);
+ ha->flags.isp82xx_fw_hung = 1;
+ qla2xxx_wake_dpc(vha);
+ } else if (dev_state == QLA8XXX_DEV_NEED_RESET &&
+ !test_bit(ISP_ABORT_NEEDED, &vha->dpc_flags)) {
+ ql_log(ql_log_info, vha, 0xb0d6,
+ "%s: HW State: NEED RESET!\n",
+ __func__);
+ set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
+ qla2xxx_wake_dpc(vha);
+ } else if (dev_state == QLA8XXX_DEV_NEED_QUIESCENT &&
+ !test_bit(ISP_QUIESCE_NEEDED, &vha->dpc_flags)) {
+ ql_log(ql_log_info, vha, 0xb0d7,
+ "%s: HW State: NEED QUIES detected!\n",
+ __func__);
+ set_bit(ISP_QUIESCE_NEEDED, &vha->dpc_flags);
+ qla2xxx_wake_dpc(vha);
+ } else {
+ /* Check firmware health */
+ if (ha->flags.isp82xx_fw_hung) {
+ halt_status = qla8044_rd_direct(vha,
+ QLA8044_PEG_HALT_STATUS1_INDEX);
+ if (halt_status &
+ QLA8044_HALT_STATUS_FW_RESET) {
+ ql_log(ql_log_fatal, vha,
+ 0xb0d8, "%s: Firmware "
+ "error detected device "
+ "is being reset\n",
+ __func__);
+ } else if (halt_status &
+ QLA8044_HALT_STATUS_UNRECOVERABLE) {
+ halt_status_unrecoverable = 1;
+ }
+
+ /* Since we cannot change dev_state in interrupt
+ * context, set appropriate DPC flag then wakeup
+ * DPC */
+ if (halt_status_unrecoverable) {
+ set_bit(ISP_UNRECOVERABLE,
+ &vha->dpc_flags);
+ } else {
+ if (dev_state ==
+ QLA8XXX_DEV_QUIESCENT) {
+ set_bit(FCOE_CTX_RESET_NEEDED,
+ &vha->dpc_flags);
+ ql_log(ql_log_info, vha, 0xb0d9,
+ "%s: FW CONTEXT Reset "
+ "needed!\n", __func__);
+ } else {
+ ql_log(ql_log_info, vha,
+ 0xb0da, "%s: "
+ "detect abort needed\n",
+ __func__);
+ set_bit(ISP_ABORT_NEEDED,
+ &vha->dpc_flags);
+ }
+ }
+ qla2xxx_wake_dpc(vha);
+ }
+ }
+
+ }
+}
+
+static int
+qla8044_minidump_process_control(struct scsi_qla_host *vha,
+ struct qla8044_minidump_entry_hdr *entry_hdr)
+{
+ struct qla8044_minidump_entry_crb *crb_entry;
+ uint32_t read_value, opcode, poll_time, addr, index;
+ uint32_t crb_addr, rval = QLA_SUCCESS;
+ unsigned long wtime;
+ struct qla8044_minidump_template_hdr *tmplt_hdr;
+ int i;
+ struct qla_hw_data *ha = vha->hw;
+
+ ql_dbg(ql_dbg_p3p, vha, 0xb0dd, "Entering fn: %s\n", __func__);
+ tmplt_hdr = (struct qla8044_minidump_template_hdr *)
+ ha->md_tmplt_hdr;
+ crb_entry = (struct qla8044_minidump_entry_crb *)entry_hdr;
+
+ crb_addr = crb_entry->addr;
+ for (i = 0; i < crb_entry->op_count; i++) {
+ opcode = crb_entry->crb_ctrl.opcode;
+
+ if (opcode & QLA82XX_DBG_OPCODE_WR) {
+ qla8044_wr_reg_indirect(vha, crb_addr,
+ crb_entry->value_1);
+ opcode &= ~QLA82XX_DBG_OPCODE_WR;
+ }
+
+ if (opcode & QLA82XX_DBG_OPCODE_RW) {
+ qla8044_rd_reg_indirect(vha, crb_addr, &read_value);
+ qla8044_wr_reg_indirect(vha, crb_addr, read_value);
+ opcode &= ~QLA82XX_DBG_OPCODE_RW;
+ }
+
+ if (opcode & QLA82XX_DBG_OPCODE_AND) {
+ qla8044_rd_reg_indirect(vha, crb_addr, &read_value);
+ read_value &= crb_entry->value_2;
+ opcode &= ~QLA82XX_DBG_OPCODE_AND;
+ if (opcode & QLA82XX_DBG_OPCODE_OR) {
+ read_value |= crb_entry->value_3;
+ opcode &= ~QLA82XX_DBG_OPCODE_OR;
+ }
+ qla8044_wr_reg_indirect(vha, crb_addr, read_value);
+ }
+ if (opcode & QLA82XX_DBG_OPCODE_OR) {
+ qla8044_rd_reg_indirect(vha, crb_addr, &read_value);
+ read_value |= crb_entry->value_3;
+ qla8044_wr_reg_indirect(vha, crb_addr, read_value);
+ opcode &= ~QLA82XX_DBG_OPCODE_OR;
+ }
+ if (opcode & QLA82XX_DBG_OPCODE_POLL) {
+ poll_time = crb_entry->crb_strd.poll_timeout;
+ wtime = jiffies + poll_time;
+ qla8044_rd_reg_indirect(vha, crb_addr, &read_value);
+
+ do {
+ if ((read_value & crb_entry->value_2) ==
+ crb_entry->value_1) {
+ break;
+ } else if (time_after_eq(jiffies, wtime)) {
+ /* capturing dump failed */
+ rval = QLA_FUNCTION_FAILED;
+ break;
+ } else {
+ qla8044_rd_reg_indirect(vha,
+ crb_addr, &read_value);
+ }
+ } while (1);
+ opcode &= ~QLA82XX_DBG_OPCODE_POLL;
+ }
+
+ if (opcode & QLA82XX_DBG_OPCODE_RDSTATE) {
+ if (crb_entry->crb_strd.state_index_a) {
+ index = crb_entry->crb_strd.state_index_a;
+ addr = tmplt_hdr->saved_state_array[index];
+ } else {
+ addr = crb_addr;
+ }
+
+ qla8044_rd_reg_indirect(vha, addr, &read_value);
+ index = crb_entry->crb_ctrl.state_index_v;
+ tmplt_hdr->saved_state_array[index] = read_value;
+ opcode &= ~QLA82XX_DBG_OPCODE_RDSTATE;
+ }
+
+ if (opcode & QLA82XX_DBG_OPCODE_WRSTATE) {
+ if (crb_entry->crb_strd.state_index_a) {
+ index = crb_entry->crb_strd.state_index_a;
+ addr = tmplt_hdr->saved_state_array[index];
+ } else {
+ addr = crb_addr;
+ }
+
+ if (crb_entry->crb_ctrl.state_index_v) {
+ index = crb_entry->crb_ctrl.state_index_v;
+ read_value =
+ tmplt_hdr->saved_state_array[index];
+ } else {
+ read_value = crb_entry->value_1;
+ }
+
+ qla8044_wr_reg_indirect(vha, addr, read_value);
+ opcode &= ~QLA82XX_DBG_OPCODE_WRSTATE;
+ }
+
+ if (opcode & QLA82XX_DBG_OPCODE_MDSTATE) {
+ index = crb_entry->crb_ctrl.state_index_v;
+ read_value = tmplt_hdr->saved_state_array[index];
+ read_value <<= crb_entry->crb_ctrl.shl;
+ read_value >>= crb_entry->crb_ctrl.shr;
+ if (crb_entry->value_2)
+ read_value &= crb_entry->value_2;
+ read_value |= crb_entry->value_3;
+ read_value += crb_entry->value_1;
+ tmplt_hdr->saved_state_array[index] = read_value;
+ opcode &= ~QLA82XX_DBG_OPCODE_MDSTATE;
+ }
+ crb_addr += crb_entry->crb_strd.addr_stride;
+ }
+ return rval;
+}
+
+static void
+qla8044_minidump_process_rdcrb(struct scsi_qla_host *vha,
+ struct qla8044_minidump_entry_hdr *entry_hdr, uint32_t **d_ptr)
+{
+ uint32_t r_addr, r_stride, loop_cnt, i, r_value;
+ struct qla8044_minidump_entry_crb *crb_hdr;
+ uint32_t *data_ptr = *d_ptr;
+
+ ql_dbg(ql_dbg_p3p, vha, 0xb0de, "Entering fn: %s\n", __func__);
+ crb_hdr = (struct qla8044_minidump_entry_crb *)entry_hdr;
+ r_addr = crb_hdr->addr;
+ r_stride = crb_hdr->crb_strd.addr_stride;
+ loop_cnt = crb_hdr->op_count;
+
+ for (i = 0; i < loop_cnt; i++) {
+ qla8044_rd_reg_indirect(vha, r_addr, &r_value);
+ *data_ptr++ = r_addr;
+ *data_ptr++ = r_value;
+ r_addr += r_stride;
+ }
+ *d_ptr = data_ptr;
+}
+
+static int
+qla8044_minidump_process_rdmem(struct scsi_qla_host *vha,
+ struct qla8044_minidump_entry_hdr *entry_hdr, uint32_t **d_ptr)
+{
+ uint32_t r_addr, r_value, r_data;
+ uint32_t i, j, loop_cnt;
+ struct qla8044_minidump_entry_rdmem *m_hdr;
+ unsigned long flags;
+ uint32_t *data_ptr = *d_ptr;
+ struct qla_hw_data *ha = vha->hw;
+
+ ql_dbg(ql_dbg_p3p, vha, 0xb0df, "Entering fn: %s\n", __func__);
+ m_hdr = (struct qla8044_minidump_entry_rdmem *)entry_hdr;
+ r_addr = m_hdr->read_addr;
+ loop_cnt = m_hdr->read_data_size/16;
+
+ ql_dbg(ql_dbg_p3p, vha, 0xb0f0,
+ "[%s]: Read addr: 0x%x, read_data_size: 0x%x\n",
+ __func__, r_addr, m_hdr->read_data_size);
+
+ if (r_addr & 0xf) {
+ ql_dbg(ql_dbg_p3p, vha, 0xb0f1,
+ "[%s]: Read addr 0x%x not 16 bytes aligned\n",
+ __func__, r_addr);
+ return QLA_FUNCTION_FAILED;
+ }
+
+ if (m_hdr->read_data_size % 16) {
+ ql_dbg(ql_dbg_p3p, vha, 0xb0f2,
+ "[%s]: Read data[0x%x] not multiple of 16 bytes\n",
+ __func__, m_hdr->read_data_size);
+ return QLA_FUNCTION_FAILED;
+ }
+
+ ql_dbg(ql_dbg_p3p, vha, 0xb0f3,
+ "[%s]: rdmem_addr: 0x%x, read_data_size: 0x%x, loop_cnt: 0x%x\n",
+ __func__, r_addr, m_hdr->read_data_size, loop_cnt);
+
+ write_lock_irqsave(&ha->hw_lock, flags);
+ for (i = 0; i < loop_cnt; i++) {
+ qla8044_wr_reg_indirect(vha, MD_MIU_TEST_AGT_ADDR_LO, r_addr);
+ r_value = 0;
+ qla8044_wr_reg_indirect(vha, MD_MIU_TEST_AGT_ADDR_HI, r_value);
+ r_value = MIU_TA_CTL_ENABLE;
+ qla8044_wr_reg_indirect(vha, MD_MIU_TEST_AGT_CTRL, r_value);
+ r_value = MIU_TA_CTL_START_ENABLE;
+ qla8044_wr_reg_indirect(vha, MD_MIU_TEST_AGT_CTRL, r_value);
+
+ for (j = 0; j < MAX_CTL_CHECK; j++) {
+ qla8044_rd_reg_indirect(vha, MD_MIU_TEST_AGT_CTRL,
+ &r_value);
+ if ((r_value & MIU_TA_CTL_BUSY) == 0)
+ break;
+ }
+
+ if (j >= MAX_CTL_CHECK) {
+ write_unlock_irqrestore(&ha->hw_lock, flags);
+ return QLA_SUCCESS;
+ }
+
+ for (j = 0; j < 4; j++) {
+ qla8044_rd_reg_indirect(vha, MD_MIU_TEST_AGT_RDDATA[j],
+ &r_data);
+ *data_ptr++ = r_data;
+ }
+
+ r_addr += 16;
+ }
+ write_unlock_irqrestore(&ha->hw_lock, flags);
+
+ ql_dbg(ql_dbg_p3p, vha, 0xb0f4,
+ "Leaving fn: %s datacount: 0x%x\n",
+ __func__, (loop_cnt * 16));
+
+ *d_ptr = data_ptr;
+ return QLA_SUCCESS;
+}
+
+/* ISP83xx flash read for _RDROM _BOARD */
+static uint32_t
+qla8044_minidump_process_rdrom(struct scsi_qla_host *vha,
+ struct qla8044_minidump_entry_hdr *entry_hdr, uint32_t **d_ptr)
+{
+ uint32_t fl_addr, u32_count, rval;
+ struct qla8044_minidump_entry_rdrom *rom_hdr;
+ uint32_t *data_ptr = *d_ptr;
+
+ rom_hdr = (struct qla8044_minidump_entry_rdrom *)entry_hdr;
+ fl_addr = rom_hdr->read_addr;
+ u32_count = (rom_hdr->read_data_size)/sizeof(uint32_t);
+
+ ql_dbg(ql_dbg_p3p, vha, 0xb0f5, "[%s]: fl_addr: 0x%x, count: 0x%x\n",
+ __func__, fl_addr, u32_count);
+
+ rval = qla8044_lockless_flash_read_u32(vha, fl_addr,
+ (u8 *)(data_ptr), u32_count);
+
+ if (rval != QLA_SUCCESS) {
+ ql_log(ql_log_fatal, vha, 0xb0f6,
+ "%s: Flash Read Error,Count=%d\n", __func__, u32_count);
+ return QLA_FUNCTION_FAILED;
+ } else {
+ data_ptr += u32_count;
+ *d_ptr = data_ptr;
+ return QLA_SUCCESS;
+ }
+}
+
+static void
+qla8044_mark_entry_skipped(struct scsi_qla_host *vha,
+ struct qla8044_minidump_entry_hdr *entry_hdr, int index)
+{
+ entry_hdr->d_ctrl.driver_flags |= QLA82XX_DBG_SKIPPED_FLAG;
+
+ ql_log(ql_log_info, vha, 0xb0f7,
+ "scsi(%ld): Skipping entry[%d]: ETYPE[0x%x]-ELEVEL[0x%x]\n",
+ vha->host_no, index, entry_hdr->entry_type,
+ entry_hdr->d_ctrl.entry_capture_mask);
+}
+
+static int
+qla8044_minidump_process_l2tag(struct scsi_qla_host *vha,
+ struct qla8044_minidump_entry_hdr *entry_hdr,
+ uint32_t **d_ptr)
+{
+ uint32_t addr, r_addr, c_addr, t_r_addr;
+ uint32_t i, k, loop_count, t_value, r_cnt, r_value;
+ unsigned long p_wait, w_time, p_mask;
+ uint32_t c_value_w, c_value_r;
+ struct qla8044_minidump_entry_cache *cache_hdr;
+ int rval = QLA_FUNCTION_FAILED;
+ uint32_t *data_ptr = *d_ptr;
+
+ ql_dbg(ql_dbg_p3p, vha, 0xb0f8, "Entering fn: %s\n", __func__);
+ cache_hdr = (struct qla8044_minidump_entry_cache *)entry_hdr;
+
+ loop_count = cache_hdr->op_count;
+ r_addr = cache_hdr->read_addr;
+ c_addr = cache_hdr->control_addr;
+ c_value_w = cache_hdr->cache_ctrl.write_value;
+
+ t_r_addr = cache_hdr->tag_reg_addr;
+ t_value = cache_hdr->addr_ctrl.init_tag_value;
+ r_cnt = cache_hdr->read_ctrl.read_addr_cnt;
+ p_wait = cache_hdr->cache_ctrl.poll_wait;
+ p_mask = cache_hdr->cache_ctrl.poll_mask;
+
+ for (i = 0; i < loop_count; i++) {
+ qla8044_wr_reg_indirect(vha, t_r_addr, t_value);
+ if (c_value_w)
+ qla8044_wr_reg_indirect(vha, c_addr, c_value_w);
+
+ if (p_mask) {
+ w_time = jiffies + p_wait;
+ do {
+ qla8044_rd_reg_indirect(vha, c_addr,
+ &c_value_r);
+ if ((c_value_r & p_mask) == 0) {
+ break;
+ } else if (time_after_eq(jiffies, w_time)) {
+ /* capturing dump failed */
+ return rval;
+ }
+ } while (1);
+ }
+
+ addr = r_addr;
+ for (k = 0; k < r_cnt; k++) {
+ qla8044_rd_reg_indirect(vha, addr, &r_value);
+ *data_ptr++ = r_value;
+ addr += cache_hdr->read_ctrl.read_addr_stride;
+ }
+ t_value += cache_hdr->addr_ctrl.tag_value_stride;
+ }
+ *d_ptr = data_ptr;
+ return QLA_SUCCESS;
+}
+
+static void
+qla8044_minidump_process_l1cache(struct scsi_qla_host *vha,
+ struct qla8044_minidump_entry_hdr *entry_hdr, uint32_t **d_ptr)
+{
+ uint32_t addr, r_addr, c_addr, t_r_addr;
+ uint32_t i, k, loop_count, t_value, r_cnt, r_value;
+ uint32_t c_value_w;
+ struct qla8044_minidump_entry_cache *cache_hdr;
+ uint32_t *data_ptr = *d_ptr;
+
+ cache_hdr = (struct qla8044_minidump_entry_cache *)entry_hdr;
+ loop_count = cache_hdr->op_count;
+ r_addr = cache_hdr->read_addr;
+ c_addr = cache_hdr->control_addr;
+ c_value_w = cache_hdr->cache_ctrl.write_value;
+
+ t_r_addr = cache_hdr->tag_reg_addr;
+ t_value = cache_hdr->addr_ctrl.init_tag_value;
+ r_cnt = cache_hdr->read_ctrl.read_addr_cnt;
+
+ for (i = 0; i < loop_count; i++) {
+ qla8044_wr_reg_indirect(vha, t_r_addr, t_value);
+ qla8044_wr_reg_indirect(vha, c_addr, c_value_w);
+ addr = r_addr;
+ for (k = 0; k < r_cnt; k++) {
+ qla8044_rd_reg_indirect(vha, addr, &r_value);
+ *data_ptr++ = r_value;
+ addr += cache_hdr->read_ctrl.read_addr_stride;
+ }
+ t_value += cache_hdr->addr_ctrl.tag_value_stride;
+ }
+ *d_ptr = data_ptr;
+}
+
+static void
+qla8044_minidump_process_rdocm(struct scsi_qla_host *vha,
+ struct qla8044_minidump_entry_hdr *entry_hdr, uint32_t **d_ptr)
+{
+ uint32_t r_addr, r_stride, loop_cnt, i, r_value;
+ struct qla8044_minidump_entry_rdocm *ocm_hdr;
+ uint32_t *data_ptr = *d_ptr;
+ struct qla_hw_data *ha = vha->hw;
+
+ ql_dbg(ql_dbg_p3p, vha, 0xb0f9, "Entering fn: %s\n", __func__);
+
+ ocm_hdr = (struct qla8044_minidump_entry_rdocm *)entry_hdr;
+ r_addr = ocm_hdr->read_addr;
+ r_stride = ocm_hdr->read_addr_stride;
+ loop_cnt = ocm_hdr->op_count;
+
+ ql_dbg(ql_dbg_p3p, vha, 0xb0fa,
+ "[%s]: r_addr: 0x%x, r_stride: 0x%x, loop_cnt: 0x%x\n",
+ __func__, r_addr, r_stride, loop_cnt);
+
+ for (i = 0; i < loop_cnt; i++) {
+ r_value = readl((void __iomem *)(r_addr + ha->nx_pcibase));
+ *data_ptr++ = r_value;
+ r_addr += r_stride;
+ }
+ ql_dbg(ql_dbg_p3p, vha, 0xb0fb, "Leaving fn: %s datacount: 0x%lx\n",
+ __func__, (long unsigned int) (loop_cnt * sizeof(uint32_t)));
+
+ *d_ptr = data_ptr;
+}
+
+static void
+qla8044_minidump_process_rdmux(struct scsi_qla_host *vha,
+ struct qla8044_minidump_entry_hdr *entry_hdr,
+ uint32_t **d_ptr)
+{
+ uint32_t r_addr, s_stride, s_addr, s_value, loop_cnt, i, r_value;
+ struct qla8044_minidump_entry_mux *mux_hdr;
+ uint32_t *data_ptr = *d_ptr;
+
+ ql_dbg(ql_dbg_p3p, vha, 0xb0fc, "Entering fn: %s\n", __func__);
+
+ mux_hdr = (struct qla8044_minidump_entry_mux *)entry_hdr;
+ r_addr = mux_hdr->read_addr;
+ s_addr = mux_hdr->select_addr;
+ s_stride = mux_hdr->select_value_stride;
+ s_value = mux_hdr->select_value;
+ loop_cnt = mux_hdr->op_count;
+
+ for (i = 0; i < loop_cnt; i++) {
+ qla8044_wr_reg_indirect(vha, s_addr, s_value);
+ qla8044_rd_reg_indirect(vha, r_addr, &r_value);
+ *data_ptr++ = s_value;
+ *data_ptr++ = r_value;
+ s_value += s_stride;
+ }
+ *d_ptr = data_ptr;
+}
+
+static void
+qla8044_minidump_process_queue(struct scsi_qla_host *vha,
+ struct qla8044_minidump_entry_hdr *entry_hdr,
+ uint32_t **d_ptr)
+{
+ uint32_t s_addr, r_addr;
+ uint32_t r_stride, r_value, r_cnt, qid = 0;
+ uint32_t i, k, loop_cnt;
+ struct qla8044_minidump_entry_queue *q_hdr;
+ uint32_t *data_ptr = *d_ptr;
+
+ ql_dbg(ql_dbg_p3p, vha, 0xb0fd, "Entering fn: %s\n", __func__);
+ q_hdr = (struct qla8044_minidump_entry_queue *)entry_hdr;
+ s_addr = q_hdr->select_addr;
+ r_cnt = q_hdr->rd_strd.read_addr_cnt;
+ r_stride = q_hdr->rd_strd.read_addr_stride;
+ loop_cnt = q_hdr->op_count;
+
+ for (i = 0; i < loop_cnt; i++) {
+ qla8044_wr_reg_indirect(vha, s_addr, qid);
+ r_addr = q_hdr->read_addr;
+ for (k = 0; k < r_cnt; k++) {
+ qla8044_rd_reg_indirect(vha, r_addr, &r_value);
+ *data_ptr++ = r_value;
+ r_addr += r_stride;
+ }
+ qid += q_hdr->q_strd.queue_id_stride;
+ }
+ *d_ptr = data_ptr;
+}
+
+/* ISP83xx functions to process new minidump entries... */
+static uint32_t
+qla8044_minidump_process_pollrd(struct scsi_qla_host *vha,
+ struct qla8044_minidump_entry_hdr *entry_hdr,
+ uint32_t **d_ptr)
+{
+ uint32_t r_addr, s_addr, s_value, r_value, poll_wait, poll_mask;
+ uint16_t s_stride, i;
+ struct qla8044_minidump_entry_pollrd *pollrd_hdr;
+ uint32_t *data_ptr = *d_ptr;
+
+ pollrd_hdr = (struct qla8044_minidump_entry_pollrd *) entry_hdr;
+ s_addr = pollrd_hdr->select_addr;
+ r_addr = pollrd_hdr->read_addr;
+ s_value = pollrd_hdr->select_value;
+ s_stride = pollrd_hdr->select_value_stride;
+
+ poll_wait = pollrd_hdr->poll_wait;
+ poll_mask = pollrd_hdr->poll_mask;
+
+ for (i = 0; i < pollrd_hdr->op_count; i++) {
+ qla8044_wr_reg_indirect(vha, s_addr, s_value);
+ poll_wait = pollrd_hdr->poll_wait;
+ while (1) {
+ qla8044_rd_reg_indirect(vha, s_addr, &r_value);
+ if ((r_value & poll_mask) != 0) {
+ break;
+ } else {
+ usleep_range(1000, 1100);
+ if (--poll_wait == 0) {
+ ql_log(ql_log_fatal, vha, 0xb0fe,
+ "%s: TIMEOUT\n", __func__);
+ goto error;
+ }
+ }
+ }
+ qla8044_rd_reg_indirect(vha, r_addr, &r_value);
+ *data_ptr++ = s_value;
+ *data_ptr++ = r_value;
+
+ s_value += s_stride;
+ }
+ *d_ptr = data_ptr;
+ return QLA_SUCCESS;
+
+error:
+ return QLA_FUNCTION_FAILED;
+}
+
+static void
+qla8044_minidump_process_rdmux2(struct scsi_qla_host *vha,
+ struct qla8044_minidump_entry_hdr *entry_hdr, uint32_t **d_ptr)
+{
+ uint32_t sel_val1, sel_val2, t_sel_val, data, i;
+ uint32_t sel_addr1, sel_addr2, sel_val_mask, read_addr;
+ struct qla8044_minidump_entry_rdmux2 *rdmux2_hdr;
+ uint32_t *data_ptr = *d_ptr;
+
+ rdmux2_hdr = (struct qla8044_minidump_entry_rdmux2 *) entry_hdr;
+ sel_val1 = rdmux2_hdr->select_value_1;
+ sel_val2 = rdmux2_hdr->select_value_2;
+ sel_addr1 = rdmux2_hdr->select_addr_1;
+ sel_addr2 = rdmux2_hdr->select_addr_2;
+ sel_val_mask = rdmux2_hdr->select_value_mask;
+ read_addr = rdmux2_hdr->read_addr;
+
+ for (i = 0; i < rdmux2_hdr->op_count; i++) {
+ qla8044_wr_reg_indirect(vha, sel_addr1, sel_val1);
+ t_sel_val = sel_val1 & sel_val_mask;
+ *data_ptr++ = t_sel_val;
+
+ qla8044_wr_reg_indirect(vha, sel_addr2, t_sel_val);
+ qla8044_rd_reg_indirect(vha, read_addr, &data);
+
+ *data_ptr++ = data;
+
+ qla8044_wr_reg_indirect(vha, sel_addr1, sel_val2);
+ t_sel_val = sel_val2 & sel_val_mask;
+ *data_ptr++ = t_sel_val;
+
+ qla8044_wr_reg_indirect(vha, sel_addr2, t_sel_val);
+ qla8044_rd_reg_indirect(vha, read_addr, &data);
+
+ *data_ptr++ = data;
+
+ sel_val1 += rdmux2_hdr->select_value_stride;
+ sel_val2 += rdmux2_hdr->select_value_stride;
+ }
+
+ *d_ptr = data_ptr;
+}
+
+static uint32_t
+qla8044_minidump_process_pollrdmwr(struct scsi_qla_host *vha,
+ struct qla8044_minidump_entry_hdr *entry_hdr,
+ uint32_t **d_ptr)
+{
+ uint32_t poll_wait, poll_mask, r_value, data;
+ uint32_t addr_1, addr_2, value_1, value_2;
+ struct qla8044_minidump_entry_pollrdmwr *poll_hdr;
+ uint32_t *data_ptr = *d_ptr;
+
+ poll_hdr = (struct qla8044_minidump_entry_pollrdmwr *) entry_hdr;
+ addr_1 = poll_hdr->addr_1;
+ addr_2 = poll_hdr->addr_2;
+ value_1 = poll_hdr->value_1;
+ value_2 = poll_hdr->value_2;
+ poll_mask = poll_hdr->poll_mask;
+
+ qla8044_wr_reg_indirect(vha, addr_1, value_1);
+
+ poll_wait = poll_hdr->poll_wait;
+ while (1) {
+ qla8044_rd_reg_indirect(vha, addr_1, &r_value);
+
+ if ((r_value & poll_mask) != 0) {
+ break;
+ } else {
+ usleep_range(1000, 1100);
+ if (--poll_wait == 0) {
+ ql_log(ql_log_fatal, vha, 0xb0ff,
+ "%s: TIMEOUT\n", __func__);
+ goto error;
+ }
+ }
+ }
+
+ qla8044_rd_reg_indirect(vha, addr_2, &data);
+ data &= poll_hdr->modify_mask;
+ qla8044_wr_reg_indirect(vha, addr_2, data);
+ qla8044_wr_reg_indirect(vha, addr_1, value_2);
+
+ poll_wait = poll_hdr->poll_wait;
+ while (1) {
+ qla8044_rd_reg_indirect(vha, addr_1, &r_value);
+
+ if ((r_value & poll_mask) != 0) {
+ break;
+ } else {
+ usleep_range(1000, 1100);
+ if (--poll_wait == 0) {
+ ql_log(ql_log_fatal, vha, 0xb100,
+ "%s: TIMEOUT2\n", __func__);
+ goto error;
+ }
+ }
+ }
+
+ *data_ptr++ = addr_2;
+ *data_ptr++ = data;
+
+ *d_ptr = data_ptr;
+
+ return QLA_SUCCESS;
+
+error:
+ return QLA_FUNCTION_FAILED;
+}
+
+#define ISP8044_PEX_DMA_ENGINE_INDEX 8
+#define ISP8044_PEX_DMA_BASE_ADDRESS 0x77320000
+#define ISP8044_PEX_DMA_NUM_OFFSET 0x10000
+#define ISP8044_PEX_DMA_CMD_ADDR_LOW 0x0
+#define ISP8044_PEX_DMA_CMD_ADDR_HIGH 0x04
+#define ISP8044_PEX_DMA_CMD_STS_AND_CNTRL 0x08
+
+#define ISP8044_PEX_DMA_READ_SIZE (16 * 1024)
+#define ISP8044_PEX_DMA_MAX_WAIT (100 * 100) /* Max wait of 100 msecs */
+
+static int
+qla8044_check_dma_engine_state(struct scsi_qla_host *vha)
+{
+ struct qla_hw_data *ha = vha->hw;
+ int rval = QLA_SUCCESS;
+ uint32_t dma_eng_num = 0, cmd_sts_and_cntrl = 0;
+ uint64_t dma_base_addr = 0;
+ struct qla8044_minidump_template_hdr *tmplt_hdr = NULL;
+
+ tmplt_hdr = ha->md_tmplt_hdr;
+ dma_eng_num =
+ tmplt_hdr->saved_state_array[ISP8044_PEX_DMA_ENGINE_INDEX];
+ dma_base_addr = ISP8044_PEX_DMA_BASE_ADDRESS +
+ (dma_eng_num * ISP8044_PEX_DMA_NUM_OFFSET);
+
+ /* Read the pex-dma's command-status-and-control register. */
+ rval = qla8044_rd_reg_indirect(vha,
+ (dma_base_addr + ISP8044_PEX_DMA_CMD_STS_AND_CNTRL),
+ &cmd_sts_and_cntrl);
+ if (rval)
+ return QLA_FUNCTION_FAILED;
+
+ /* Check if requested pex-dma engine is available. */
+ if (cmd_sts_and_cntrl & BIT_31)
+ return QLA_SUCCESS;
+
+ return QLA_FUNCTION_FAILED;
+}
+
+static int
+qla8044_start_pex_dma(struct scsi_qla_host *vha,
+ struct qla8044_minidump_entry_rdmem_pex_dma *m_hdr)
+{
+ struct qla_hw_data *ha = vha->hw;
+ int rval = QLA_SUCCESS, wait = 0;
+ uint32_t dma_eng_num = 0, cmd_sts_and_cntrl = 0;
+ uint64_t dma_base_addr = 0;
+ struct qla8044_minidump_template_hdr *tmplt_hdr = NULL;
+
+ tmplt_hdr = ha->md_tmplt_hdr;
+ dma_eng_num =
+ tmplt_hdr->saved_state_array[ISP8044_PEX_DMA_ENGINE_INDEX];
+ dma_base_addr = ISP8044_PEX_DMA_BASE_ADDRESS +
+ (dma_eng_num * ISP8044_PEX_DMA_NUM_OFFSET);
+
+ rval = qla8044_wr_reg_indirect(vha,
+ dma_base_addr + ISP8044_PEX_DMA_CMD_ADDR_LOW,
+ m_hdr->desc_card_addr);
+ if (rval)
+ goto error_exit;
+
+ rval = qla8044_wr_reg_indirect(vha,
+ dma_base_addr + ISP8044_PEX_DMA_CMD_ADDR_HIGH, 0);
+ if (rval)
+ goto error_exit;
+
+ rval = qla8044_wr_reg_indirect(vha,
+ dma_base_addr + ISP8044_PEX_DMA_CMD_STS_AND_CNTRL,
+ m_hdr->start_dma_cmd);
+ if (rval)
+ goto error_exit;
+
+ /* Wait for dma operation to complete. */
+ for (wait = 0; wait < ISP8044_PEX_DMA_MAX_WAIT; wait++) {
+ rval = qla8044_rd_reg_indirect(vha,
+ (dma_base_addr + ISP8044_PEX_DMA_CMD_STS_AND_CNTRL),
+ &cmd_sts_and_cntrl);
+ if (rval)
+ goto error_exit;
+
+ if ((cmd_sts_and_cntrl & BIT_1) == 0)
+ break;
+
+ udelay(10);
+ }
+
+ /* Wait a max of 100 ms, otherwise fallback to rdmem entry read */
+ if (wait >= ISP8044_PEX_DMA_MAX_WAIT) {
+ rval = QLA_FUNCTION_FAILED;
+ goto error_exit;
+ }
+
+error_exit:
+ return rval;
+}
+
+static int
+qla8044_minidump_pex_dma_read(struct scsi_qla_host *vha,
+ struct qla8044_minidump_entry_hdr *entry_hdr, uint32_t **d_ptr)
+{
+ struct qla_hw_data *ha = vha->hw;
+ int rval = QLA_SUCCESS;
+ struct qla8044_minidump_entry_rdmem_pex_dma *m_hdr = NULL;
+ uint32_t chunk_size, read_size;
+ uint8_t *data_ptr = (uint8_t *)*d_ptr;
+ void *rdmem_buffer = NULL;
+ dma_addr_t rdmem_dma;
+ struct qla8044_pex_dma_descriptor dma_desc;
+
+ rval = qla8044_check_dma_engine_state(vha);
+ if (rval != QLA_SUCCESS) {
+ ql_dbg(ql_dbg_p3p, vha, 0xb147,
+ "DMA engine not available. Fallback to rdmem-read.\n");
+ return QLA_FUNCTION_FAILED;
+ }
+
+ m_hdr = (void *)entry_hdr;
+
+ rdmem_buffer = dma_alloc_coherent(&ha->pdev->dev,
+ ISP8044_PEX_DMA_READ_SIZE, &rdmem_dma, GFP_KERNEL);
+ if (!rdmem_buffer) {
+ ql_dbg(ql_dbg_p3p, vha, 0xb148,
+ "Unable to allocate rdmem dma buffer\n");
+ return QLA_FUNCTION_FAILED;
+ }
+
+ /* Prepare pex-dma descriptor to be written to MS memory. */
+ /* dma-desc-cmd layout:
+ * 0-3: dma-desc-cmd 0-3
+ * 4-7: pcid function number
+ * 8-15: dma-desc-cmd 8-15
+ * dma_bus_addr: dma buffer address
+ * cmd.read_data_size: amount of data-chunk to be read.
+ */
+ dma_desc.cmd.dma_desc_cmd = (m_hdr->dma_desc_cmd & 0xff0f);
+ dma_desc.cmd.dma_desc_cmd |=
+ ((PCI_FUNC(ha->pdev->devfn) & 0xf) << 0x4);
+
+ dma_desc.dma_bus_addr = rdmem_dma;
+ dma_desc.cmd.read_data_size = chunk_size = ISP8044_PEX_DMA_READ_SIZE;
+ read_size = 0;
+
+ /*
+ * Perform rdmem operation using pex-dma.
+ * Prepare dma in chunks of ISP8044_PEX_DMA_READ_SIZE.
+ */
+ while (read_size < m_hdr->read_data_size) {
+ if (m_hdr->read_data_size - read_size <
+ ISP8044_PEX_DMA_READ_SIZE) {
+ chunk_size = (m_hdr->read_data_size - read_size);
+ dma_desc.cmd.read_data_size = chunk_size;
+ }
+
+ dma_desc.src_addr = m_hdr->read_addr + read_size;
+
+ /* Prepare: Write pex-dma descriptor to MS memory. */
+ rval = qla8044_ms_mem_write_128b(vha,
+ m_hdr->desc_card_addr, (void *)&dma_desc,
+ (sizeof(struct qla8044_pex_dma_descriptor)/16));
+ if (rval) {
+ ql_log(ql_log_warn, vha, 0xb14a,
+ "%s: Error writing rdmem-dma-init to MS !!!\n",
+ __func__);
+ goto error_exit;
+ }
+ ql_dbg(ql_dbg_p3p, vha, 0xb14b,
+ "%s: Dma-descriptor: Instruct for rdmem dma "
+ "(chunk_size 0x%x).\n", __func__, chunk_size);
+
+ /* Execute: Start pex-dma operation. */
+ rval = qla8044_start_pex_dma(vha, m_hdr);
+ if (rval)
+ goto error_exit;
+
+ memcpy(data_ptr, rdmem_buffer, chunk_size);
+ data_ptr += chunk_size;
+ read_size += chunk_size;
+ }
+
+ *d_ptr = (void *)data_ptr;
+
+error_exit:
+ if (rdmem_buffer)
+ dma_free_coherent(&ha->pdev->dev, ISP8044_PEX_DMA_READ_SIZE,
+ rdmem_buffer, rdmem_dma);
+
+ return rval;
+}
+
+static uint32_t
+qla8044_minidump_process_rddfe(struct scsi_qla_host *vha,
+ struct qla8044_minidump_entry_hdr *entry_hdr, uint32_t **d_ptr)
+{
+ int loop_cnt;
+ uint32_t addr1, addr2, value, data, temp, wrVal;
+ uint8_t stride, stride2;
+ uint16_t count;
+ uint32_t poll, mask, data_size, modify_mask;
+ uint32_t wait_count = 0;
+
+ uint32_t *data_ptr = *d_ptr;
+
+ struct qla8044_minidump_entry_rddfe *rddfe;
+ rddfe = (struct qla8044_minidump_entry_rddfe *) entry_hdr;
+
+ addr1 = rddfe->addr_1;
+ value = rddfe->value;
+ stride = rddfe->stride;
+ stride2 = rddfe->stride2;
+ count = rddfe->count;
+
+ poll = rddfe->poll;
+ mask = rddfe->mask;
+ modify_mask = rddfe->modify_mask;
+ data_size = rddfe->data_size;
+
+ addr2 = addr1 + stride;
+
+ for (loop_cnt = 0x0; loop_cnt < count; loop_cnt++) {
+ qla8044_wr_reg_indirect(vha, addr1, (0x40000000 | value));
+
+ wait_count = 0;
+ while (wait_count < poll) {
+ qla8044_rd_reg_indirect(vha, addr1, &temp);
+ if ((temp & mask) != 0)
+ break;
+ wait_count++;
+ }
+
+ if (wait_count == poll) {
+ ql_log(ql_log_warn, vha, 0xb153,
+ "%s: TIMEOUT\n", __func__);
+ goto error;
+ } else {
+ qla8044_rd_reg_indirect(vha, addr2, &temp);
+ temp = temp & modify_mask;
+ temp = (temp | ((loop_cnt << 16) | loop_cnt));
+ wrVal = ((temp << 16) | temp);
+
+ qla8044_wr_reg_indirect(vha, addr2, wrVal);
+ qla8044_wr_reg_indirect(vha, addr1, value);
+
+ wait_count = 0;
+ while (wait_count < poll) {
+ qla8044_rd_reg_indirect(vha, addr1, &temp);
+ if ((temp & mask) != 0)
+ break;
+ wait_count++;
+ }
+ if (wait_count == poll) {
+ ql_log(ql_log_warn, vha, 0xb154,
+ "%s: TIMEOUT\n", __func__);
+ goto error;
+ }
+
+ qla8044_wr_reg_indirect(vha, addr1,
+ ((0x40000000 | value) + stride2));
+ wait_count = 0;
+ while (wait_count < poll) {
+ qla8044_rd_reg_indirect(vha, addr1, &temp);
+ if ((temp & mask) != 0)
+ break;
+ wait_count++;
+ }
+
+ if (wait_count == poll) {
+ ql_log(ql_log_warn, vha, 0xb155,
+ "%s: TIMEOUT\n", __func__);
+ goto error;
+ }
+
+ qla8044_rd_reg_indirect(vha, addr2, &data);
+
+ *data_ptr++ = wrVal;
+ *data_ptr++ = data;
+ }
+
+ }
+
+ *d_ptr = data_ptr;
+ return QLA_SUCCESS;
+
+error:
+ return -1;
+
+}
+
+static uint32_t
+qla8044_minidump_process_rdmdio(struct scsi_qla_host *vha,
+ struct qla8044_minidump_entry_hdr *entry_hdr, uint32_t **d_ptr)
+{
+ int ret = 0;
+ uint32_t addr1, addr2, value1, value2, data, selVal;
+ uint8_t stride1, stride2;
+ uint32_t addr3, addr4, addr5, addr6, addr7;
+ uint16_t count, loop_cnt;
+ uint32_t poll, mask;
+ uint32_t *data_ptr = *d_ptr;
+
+ struct qla8044_minidump_entry_rdmdio *rdmdio;
+
+ rdmdio = (struct qla8044_minidump_entry_rdmdio *) entry_hdr;
+
+ addr1 = rdmdio->addr_1;
+ addr2 = rdmdio->addr_2;
+ value1 = rdmdio->value_1;
+ stride1 = rdmdio->stride_1;
+ stride2 = rdmdio->stride_2;
+ count = rdmdio->count;
+
+ poll = rdmdio->poll;
+ mask = rdmdio->mask;
+ value2 = rdmdio->value_2;
+
+ addr3 = addr1 + stride1;
+
+ for (loop_cnt = 0; loop_cnt < count; loop_cnt++) {
+ ret = qla8044_poll_wait_ipmdio_bus_idle(vha, addr1, addr2,
+ addr3, mask);
+ if (ret == -1)
+ goto error;
+
+ addr4 = addr2 - stride1;
+ ret = qla8044_ipmdio_wr_reg(vha, addr1, addr3, mask, addr4,
+ value2);
+ if (ret == -1)
+ goto error;
+
+ addr5 = addr2 - (2 * stride1);
+ ret = qla8044_ipmdio_wr_reg(vha, addr1, addr3, mask, addr5,
+ value1);
+ if (ret == -1)
+ goto error;
+
+ addr6 = addr2 - (3 * stride1);
+ ret = qla8044_ipmdio_wr_reg(vha, addr1, addr3, mask,
+ addr6, 0x2);
+ if (ret == -1)
+ goto error;
+
+ ret = qla8044_poll_wait_ipmdio_bus_idle(vha, addr1, addr2,
+ addr3, mask);
+ if (ret == -1)
+ goto error;
+
+ addr7 = addr2 - (4 * stride1);
+ data = qla8044_ipmdio_rd_reg(vha, addr1, addr3,
+ mask, addr7);
+ if (data == -1)
+ goto error;
+
+ selVal = (value2 << 18) | (value1 << 2) | 2;
+
+ stride2 = rdmdio->stride_2;
+ *data_ptr++ = selVal;
+ *data_ptr++ = data;
+
+ value1 = value1 + stride2;
+ *d_ptr = data_ptr;
+ }
+
+ return 0;
+
+error:
+ return -1;
+}
+
+static uint32_t qla8044_minidump_process_pollwr(struct scsi_qla_host *vha,
+ struct qla8044_minidump_entry_hdr *entry_hdr, uint32_t **d_ptr)
+{
+ uint32_t addr1, addr2, value1, value2, poll, mask, r_value;
+ uint32_t wait_count = 0;
+ struct qla8044_minidump_entry_pollwr *pollwr_hdr;
+
+ pollwr_hdr = (struct qla8044_minidump_entry_pollwr *)entry_hdr;
+ addr1 = pollwr_hdr->addr_1;
+ addr2 = pollwr_hdr->addr_2;
+ value1 = pollwr_hdr->value_1;
+ value2 = pollwr_hdr->value_2;
+
+ poll = pollwr_hdr->poll;
+ mask = pollwr_hdr->mask;
+
+ while (wait_count < poll) {
+ qla8044_rd_reg_indirect(vha, addr1, &r_value);
+
+ if ((r_value & poll) != 0)
+ break;
+ wait_count++;
+ }
+
+ if (wait_count == poll) {
+ ql_log(ql_log_warn, vha, 0xb156, "%s: TIMEOUT\n", __func__);
+ goto error;
+ }
+
+ qla8044_wr_reg_indirect(vha, addr2, value2);
+ qla8044_wr_reg_indirect(vha, addr1, value1);
+
+ wait_count = 0;
+ while (wait_count < poll) {
+ qla8044_rd_reg_indirect(vha, addr1, &r_value);
+
+ if ((r_value & poll) != 0)
+ break;
+ wait_count++;
+ }
+
+ return QLA_SUCCESS;
+
+error:
+ return -1;
+}
+
+/*
+ *
+ * qla8044_collect_md_data - Retrieve firmware minidump data.
+ * @ha: pointer to adapter structure
+ **/
+int
+qla8044_collect_md_data(struct scsi_qla_host *vha)
+{
+ int num_entry_hdr = 0;
+ struct qla8044_minidump_entry_hdr *entry_hdr;
+ struct qla8044_minidump_template_hdr *tmplt_hdr;
+ uint32_t *data_ptr;
+ uint32_t data_collected = 0, f_capture_mask;
+ int i, rval = QLA_FUNCTION_FAILED;
+ uint64_t now;
+ uint32_t timestamp, idc_control;
+ struct qla_hw_data *ha = vha->hw;
+
+ if (!ha->md_dump) {
+ ql_log(ql_log_info, vha, 0xb101,
+ "%s(%ld) No buffer to dump\n",
+ __func__, vha->host_no);
+ return rval;
+ }
+
+ if (ha->fw_dumped) {
+ ql_log(ql_log_warn, vha, 0xb10d,
+ "Firmware has been previously dumped (%p) "
+ "-- ignoring request.\n", ha->fw_dump);
+ goto md_failed;
+ }
+
+ ha->fw_dumped = 0;
+
+ if (!ha->md_tmplt_hdr || !ha->md_dump) {
+ ql_log(ql_log_warn, vha, 0xb10e,
+ "Memory not allocated for minidump capture\n");
+ goto md_failed;
+ }
+
+ qla8044_idc_lock(ha);
+ idc_control = qla8044_rd_reg(ha, QLA8044_IDC_DRV_CTRL);
+ if (idc_control & GRACEFUL_RESET_BIT1) {
+ ql_log(ql_log_warn, vha, 0xb112,
+ "Forced reset from application, "
+ "ignore minidump capture\n");
+ qla8044_wr_reg(ha, QLA8044_IDC_DRV_CTRL,
+ (idc_control & ~GRACEFUL_RESET_BIT1));
+ qla8044_idc_unlock(ha);
+
+ goto md_failed;
+ }
+ qla8044_idc_unlock(ha);
+
+ if (qla82xx_validate_template_chksum(vha)) {
+ ql_log(ql_log_info, vha, 0xb109,
+ "Template checksum validation error\n");
+ goto md_failed;
+ }
+
+ tmplt_hdr = (struct qla8044_minidump_template_hdr *)
+ ha->md_tmplt_hdr;
+ data_ptr = (uint32_t *)((uint8_t *)ha->md_dump);
+ num_entry_hdr = tmplt_hdr->num_of_entries;
+
+ ql_dbg(ql_dbg_p3p, vha, 0xb11a,
+ "Capture Mask obtained: 0x%x\n", tmplt_hdr->capture_debug_level);
+
+ f_capture_mask = tmplt_hdr->capture_debug_level & 0xFF;
+
+ /* Validate whether required debug level is set */
+ if ((f_capture_mask & 0x3) != 0x3) {
+ ql_log(ql_log_warn, vha, 0xb10f,
+ "Minimum required capture mask[0x%x] level not set\n",
+ f_capture_mask);
+
+ }
+ tmplt_hdr->driver_capture_mask = ql2xmdcapmask;
+ ql_log(ql_log_info, vha, 0xb102,
+ "[%s]: starting data ptr: %p\n",
+ __func__, data_ptr);
+ ql_log(ql_log_info, vha, 0xb10b,
+ "[%s]: no of entry headers in Template: 0x%x\n",
+ __func__, num_entry_hdr);
+ ql_log(ql_log_info, vha, 0xb10c,
+ "[%s]: Total_data_size 0x%x, %d obtained\n",
+ __func__, ha->md_dump_size, ha->md_dump_size);
+
+ /* Update current timestamp before taking dump */
+ now = get_jiffies_64();
+ timestamp = (u32)(jiffies_to_msecs(now) / 1000);
+ tmplt_hdr->driver_timestamp = timestamp;
+
+ entry_hdr = (struct qla8044_minidump_entry_hdr *)
+ (((uint8_t *)ha->md_tmplt_hdr) + tmplt_hdr->first_entry_offset);
+ tmplt_hdr->saved_state_array[QLA8044_SS_OCM_WNDREG_INDEX] =
+ tmplt_hdr->ocm_window_reg[ha->portnum];
+
+ /* Walk through the entry headers - validate/perform required action */
+ for (i = 0; i < num_entry_hdr; i++) {
+ if (data_collected > ha->md_dump_size) {
+ ql_log(ql_log_info, vha, 0xb103,
+ "Data collected: [0x%x], "
+ "Total Dump size: [0x%x]\n",
+ data_collected, ha->md_dump_size);
+ return rval;
+ }
+
+ if (!(entry_hdr->d_ctrl.entry_capture_mask &
+ ql2xmdcapmask)) {
+ entry_hdr->d_ctrl.driver_flags |=
+ QLA82XX_DBG_SKIPPED_FLAG;
+ goto skip_nxt_entry;
+ }
+
+ ql_dbg(ql_dbg_p3p, vha, 0xb104,
+ "Data collected: [0x%x], Dump size left:[0x%x]\n",
+ data_collected,
+ (ha->md_dump_size - data_collected));
+
+ /* Decode the entry type and take required action to capture
+ * debug data
+ */
+ switch (entry_hdr->entry_type) {
+ case QLA82XX_RDEND:
+ qla8044_mark_entry_skipped(vha, entry_hdr, i);
+ break;
+ case QLA82XX_CNTRL:
+ rval = qla8044_minidump_process_control(vha,
+ entry_hdr);
+ if (rval != QLA_SUCCESS) {
+ qla8044_mark_entry_skipped(vha, entry_hdr, i);
+ goto md_failed;
+ }
+ break;
+ case QLA82XX_RDCRB:
+ qla8044_minidump_process_rdcrb(vha,
+ entry_hdr, &data_ptr);
+ break;
+ case QLA82XX_RDMEM:
+ rval = qla8044_minidump_pex_dma_read(vha,
+ entry_hdr, &data_ptr);
+ if (rval != QLA_SUCCESS) {
+ rval = qla8044_minidump_process_rdmem(vha,
+ entry_hdr, &data_ptr);
+ if (rval != QLA_SUCCESS) {
+ qla8044_mark_entry_skipped(vha,
+ entry_hdr, i);
+ goto md_failed;
+ }
+ }
+ break;
+ case QLA82XX_BOARD:
+ case QLA82XX_RDROM:
+ rval = qla8044_minidump_process_rdrom(vha,
+ entry_hdr, &data_ptr);
+ if (rval != QLA_SUCCESS) {
+ qla8044_mark_entry_skipped(vha,
+ entry_hdr, i);
+ }
+ break;
+ case QLA82XX_L2DTG:
+ case QLA82XX_L2ITG:
+ case QLA82XX_L2DAT:
+ case QLA82XX_L2INS:
+ rval = qla8044_minidump_process_l2tag(vha,
+ entry_hdr, &data_ptr);
+ if (rval != QLA_SUCCESS) {
+ qla8044_mark_entry_skipped(vha, entry_hdr, i);
+ goto md_failed;
+ }
+ break;
+ case QLA8044_L1DTG:
+ case QLA8044_L1ITG:
+ case QLA82XX_L1DAT:
+ case QLA82XX_L1INS:
+ qla8044_minidump_process_l1cache(vha,
+ entry_hdr, &data_ptr);
+ break;
+ case QLA82XX_RDOCM:
+ qla8044_minidump_process_rdocm(vha,
+ entry_hdr, &data_ptr);
+ break;
+ case QLA82XX_RDMUX:
+ qla8044_minidump_process_rdmux(vha,
+ entry_hdr, &data_ptr);
+ break;
+ case QLA82XX_QUEUE:
+ qla8044_minidump_process_queue(vha,
+ entry_hdr, &data_ptr);
+ break;
+ case QLA8044_POLLRD:
+ rval = qla8044_minidump_process_pollrd(vha,
+ entry_hdr, &data_ptr);
+ if (rval != QLA_SUCCESS)
+ qla8044_mark_entry_skipped(vha, entry_hdr, i);
+ break;
+ case QLA8044_RDMUX2:
+ qla8044_minidump_process_rdmux2(vha,
+ entry_hdr, &data_ptr);
+ break;
+ case QLA8044_POLLRDMWR:
+ rval = qla8044_minidump_process_pollrdmwr(vha,
+ entry_hdr, &data_ptr);
+ if (rval != QLA_SUCCESS)
+ qla8044_mark_entry_skipped(vha, entry_hdr, i);
+ break;
+ case QLA8044_RDDFE:
+ rval = qla8044_minidump_process_rddfe(vha, entry_hdr,
+ &data_ptr);
+ if (rval != QLA_SUCCESS)
+ qla8044_mark_entry_skipped(vha, entry_hdr, i);
+ break;
+ case QLA8044_RDMDIO:
+ rval = qla8044_minidump_process_rdmdio(vha, entry_hdr,
+ &data_ptr);
+ if (rval != QLA_SUCCESS)
+ qla8044_mark_entry_skipped(vha, entry_hdr, i);
+ break;
+ case QLA8044_POLLWR:
+ rval = qla8044_minidump_process_pollwr(vha, entry_hdr,
+ &data_ptr);
+ if (rval != QLA_SUCCESS)
+ qla8044_mark_entry_skipped(vha, entry_hdr, i);
+ break;
+ case QLA82XX_RDNOP:
+ default:
+ qla8044_mark_entry_skipped(vha, entry_hdr, i);
+ break;
+ }
+
+ data_collected = (uint8_t *)data_ptr -
+ (uint8_t *)((uint8_t *)ha->md_dump);
+skip_nxt_entry:
+ /*
+ * next entry in the template
+ */
+ entry_hdr = (struct qla8044_minidump_entry_hdr *)
+ (((uint8_t *)entry_hdr) + entry_hdr->entry_size);
+ }
+
+ if (data_collected != ha->md_dump_size) {
+ ql_log(ql_log_info, vha, 0xb105,
+ "Dump data mismatch: Data collected: "
+ "[0x%x], total_data_size:[0x%x]\n",
+ data_collected, ha->md_dump_size);
+ rval = QLA_FUNCTION_FAILED;
+ goto md_failed;
+ }
+
+ ql_log(ql_log_info, vha, 0xb110,
+ "Firmware dump saved to temp buffer (%ld/%p %ld/%p).\n",
+ vha->host_no, ha->md_tmplt_hdr, vha->host_no, ha->md_dump);
+ ha->fw_dumped = 1;
+ qla2x00_post_uevent_work(vha, QLA_UEVENT_CODE_FW_DUMP);
+
+
+ ql_log(ql_log_info, vha, 0xb106,
+ "Leaving fn: %s Last entry: 0x%x\n",
+ __func__, i);
+md_failed:
+ return rval;
+}
+
+void
+qla8044_get_minidump(struct scsi_qla_host *vha)
+{
+ struct qla_hw_data *ha = vha->hw;
+
+ if (!qla8044_collect_md_data(vha)) {
+ ha->fw_dumped = 1;
+ ha->prev_minidump_failed = 0;
+ } else {
+ ql_log(ql_log_fatal, vha, 0xb0db,
+ "%s: Unable to collect minidump\n",
+ __func__);
+ ha->prev_minidump_failed = 1;
+ }
+}
+
+static int
+qla8044_poll_flash_status_reg(struct scsi_qla_host *vha)
+{
+ uint32_t flash_status;
+ int retries = QLA8044_FLASH_READ_RETRY_COUNT;
+ int ret_val = QLA_SUCCESS;
+
+ while (retries--) {
+ ret_val = qla8044_rd_reg_indirect(vha, QLA8044_FLASH_STATUS,
+ &flash_status);
+ if (ret_val) {
+ ql_log(ql_log_warn, vha, 0xb13c,
+ "%s: Failed to read FLASH_STATUS reg.\n",
+ __func__);
+ break;
+ }
+ if ((flash_status & QLA8044_FLASH_STATUS_READY) ==
+ QLA8044_FLASH_STATUS_READY)
+ break;
+ msleep(QLA8044_FLASH_STATUS_REG_POLL_DELAY);
+ }
+
+ if (!retries)
+ ret_val = QLA_FUNCTION_FAILED;
+
+ return ret_val;
+}
+
+static int
+qla8044_write_flash_status_reg(struct scsi_qla_host *vha,
+ uint32_t data)
+{
+ int ret_val = QLA_SUCCESS;
+ uint32_t cmd;
+
+ cmd = vha->hw->fdt_wrt_sts_reg_cmd;
+
+ ret_val = qla8044_wr_reg_indirect(vha, QLA8044_FLASH_ADDR,
+ QLA8044_FLASH_STATUS_WRITE_DEF_SIG | cmd);
+ if (ret_val) {
+ ql_log(ql_log_warn, vha, 0xb125,
+ "%s: Failed to write to FLASH_ADDR.\n", __func__);
+ goto exit_func;
+ }
+
+ ret_val = qla8044_wr_reg_indirect(vha, QLA8044_FLASH_WRDATA, data);
+ if (ret_val) {
+ ql_log(ql_log_warn, vha, 0xb126,
+ "%s: Failed to write to FLASH_WRDATA.\n", __func__);
+ goto exit_func;
+ }
+
+ ret_val = qla8044_wr_reg_indirect(vha, QLA8044_FLASH_CONTROL,
+ QLA8044_FLASH_SECOND_ERASE_MS_VAL);
+ if (ret_val) {
+ ql_log(ql_log_warn, vha, 0xb127,
+ "%s: Failed to write to FLASH_CONTROL.\n", __func__);
+ goto exit_func;
+ }
+
+ ret_val = qla8044_poll_flash_status_reg(vha);
+ if (ret_val)
+ ql_log(ql_log_warn, vha, 0xb128,
+ "%s: Error polling flash status reg.\n", __func__);
+
+exit_func:
+ return ret_val;
+}
+
+/*
+ * This function assumes that the flash lock is held.
+ */
+static int
+qla8044_unprotect_flash(scsi_qla_host_t *vha)
+{
+ int ret_val;
+ struct qla_hw_data *ha = vha->hw;
+
+ ret_val = qla8044_write_flash_status_reg(vha, ha->fdt_wrt_enable);
+ if (ret_val)
+ ql_log(ql_log_warn, vha, 0xb139,
+ "%s: Write flash status failed.\n", __func__);
+
+ return ret_val;
+}
+
+/*
+ * This function assumes that the flash lock is held.
+ */
+static int
+qla8044_protect_flash(scsi_qla_host_t *vha)
+{
+ int ret_val;
+ struct qla_hw_data *ha = vha->hw;
+
+ ret_val = qla8044_write_flash_status_reg(vha, ha->fdt_wrt_disable);
+ if (ret_val)
+ ql_log(ql_log_warn, vha, 0xb13b,
+ "%s: Write flash status failed.\n", __func__);
+
+ return ret_val;
+}
+
+
+static int
+qla8044_erase_flash_sector(struct scsi_qla_host *vha,
+ uint32_t sector_start_addr)
+{
+ uint32_t reversed_addr;
+ int ret_val = QLA_SUCCESS;
+
+ ret_val = qla8044_poll_flash_status_reg(vha);
+ if (ret_val) {
+ ql_log(ql_log_warn, vha, 0xb12e,
+ "%s: Poll flash status after erase failed..\n", __func__);
+ }
+
+ reversed_addr = (((sector_start_addr & 0xFF) << 16) |
+ (sector_start_addr & 0xFF00) |
+ ((sector_start_addr & 0xFF0000) >> 16));
+
+ ret_val = qla8044_wr_reg_indirect(vha,
+ QLA8044_FLASH_WRDATA, reversed_addr);
+ if (ret_val) {
+ ql_log(ql_log_warn, vha, 0xb12f,
+ "%s: Failed to write to FLASH_WRDATA.\n", __func__);
+ }
+ ret_val = qla8044_wr_reg_indirect(vha, QLA8044_FLASH_ADDR,
+ QLA8044_FLASH_ERASE_SIG | vha->hw->fdt_erase_cmd);
+ if (ret_val) {
+ ql_log(ql_log_warn, vha, 0xb130,
+ "%s: Failed to write to FLASH_ADDR.\n", __func__);
+ }
+ ret_val = qla8044_wr_reg_indirect(vha, QLA8044_FLASH_CONTROL,
+ QLA8044_FLASH_LAST_ERASE_MS_VAL);
+ if (ret_val) {
+ ql_log(ql_log_warn, vha, 0xb131,
+ "%s: Failed write to FLASH_CONTROL.\n", __func__);
+ }
+ ret_val = qla8044_poll_flash_status_reg(vha);
+ if (ret_val) {
+ ql_log(ql_log_warn, vha, 0xb132,
+ "%s: Poll flash status failed.\n", __func__);
+ }
+
+
+ return ret_val;
+}
+
+/*
+ * qla8044_flash_write_u32 - Write data to flash
+ *
+ * @ha : Pointer to adapter structure
+ * addr : Flash address to write to
+ * p_data : Data to be written
+ *
+ * Return Value - QLA_SUCCESS/QLA_FUNCTION_FAILED
+ *
+ * NOTE: Lock should be held on entry
+ */
+static int
+qla8044_flash_write_u32(struct scsi_qla_host *vha, uint32_t addr,
+ uint32_t *p_data)
+{
+ int ret_val = QLA_SUCCESS;
+
+ ret_val = qla8044_wr_reg_indirect(vha, QLA8044_FLASH_ADDR,
+ 0x00800000 | (addr >> 2));
+ if (ret_val) {
+ ql_log(ql_log_warn, vha, 0xb134,
+ "%s: Failed write to FLASH_ADDR.\n", __func__);
+ goto exit_func;
+ }
+ ret_val = qla8044_wr_reg_indirect(vha, QLA8044_FLASH_WRDATA, *p_data);
+ if (ret_val) {
+ ql_log(ql_log_warn, vha, 0xb135,
+ "%s: Failed write to FLASH_WRDATA.\n", __func__);
+ goto exit_func;
+ }
+ ret_val = qla8044_wr_reg_indirect(vha, QLA8044_FLASH_CONTROL, 0x3D);
+ if (ret_val) {
+ ql_log(ql_log_warn, vha, 0xb136,
+ "%s: Failed write to FLASH_CONTROL.\n", __func__);
+ goto exit_func;
+ }
+ ret_val = qla8044_poll_flash_status_reg(vha);
+ if (ret_val) {
+ ql_log(ql_log_warn, vha, 0xb137,
+ "%s: Poll flash status failed.\n", __func__);
+ }
+
+exit_func:
+ return ret_val;
+}
+
+static int
+qla8044_write_flash_buffer_mode(scsi_qla_host_t *vha, uint32_t *dwptr,
+ uint32_t faddr, uint32_t dwords)
+{
+ int ret = QLA_FUNCTION_FAILED;
+ uint32_t spi_val;
+
+ if (dwords < QLA8044_MIN_OPTROM_BURST_DWORDS ||
+ dwords > QLA8044_MAX_OPTROM_BURST_DWORDS) {
+ ql_dbg(ql_dbg_user, vha, 0xb123,
+ "Got unsupported dwords = 0x%x.\n",
+ dwords);
+ return QLA_FUNCTION_FAILED;
+ }
+
+ qla8044_rd_reg_indirect(vha, QLA8044_FLASH_SPI_CONTROL, &spi_val);
+ qla8044_wr_reg_indirect(vha, QLA8044_FLASH_SPI_CONTROL,
+ spi_val | QLA8044_FLASH_SPI_CTL);
+ qla8044_wr_reg_indirect(vha, QLA8044_FLASH_ADDR,
+ QLA8044_FLASH_FIRST_TEMP_VAL);
+
+ /* First DWORD write to FLASH_WRDATA */
+ ret = qla8044_wr_reg_indirect(vha, QLA8044_FLASH_WRDATA,
+ *dwptr++);
+ qla8044_wr_reg_indirect(vha, QLA8044_FLASH_CONTROL,
+ QLA8044_FLASH_FIRST_MS_PATTERN);
+
+ ret = qla8044_poll_flash_status_reg(vha);
+ if (ret) {
+ ql_log(ql_log_warn, vha, 0xb124,
+ "%s: Failed.\n", __func__);
+ goto exit_func;
+ }
+
+ dwords--;
+
+ qla8044_wr_reg_indirect(vha, QLA8044_FLASH_ADDR,
+ QLA8044_FLASH_SECOND_TEMP_VAL);
+
+
+ /* Second to N-1 DWORDS writes */
+ while (dwords != 1) {
+ qla8044_wr_reg_indirect(vha, QLA8044_FLASH_WRDATA, *dwptr++);
+ qla8044_wr_reg_indirect(vha, QLA8044_FLASH_CONTROL,
+ QLA8044_FLASH_SECOND_MS_PATTERN);
+ ret = qla8044_poll_flash_status_reg(vha);
+ if (ret) {
+ ql_log(ql_log_warn, vha, 0xb129,
+ "%s: Failed.\n", __func__);
+ goto exit_func;
+ }
+ dwords--;
+ }
+
+ qla8044_wr_reg_indirect(vha, QLA8044_FLASH_ADDR,
+ QLA8044_FLASH_FIRST_TEMP_VAL | (faddr >> 2));
+
+ /* Last DWORD write */
+ qla8044_wr_reg_indirect(vha, QLA8044_FLASH_WRDATA, *dwptr++);
+ qla8044_wr_reg_indirect(vha, QLA8044_FLASH_CONTROL,
+ QLA8044_FLASH_LAST_MS_PATTERN);
+ ret = qla8044_poll_flash_status_reg(vha);
+ if (ret) {
+ ql_log(ql_log_warn, vha, 0xb12a,
+ "%s: Failed.\n", __func__);
+ goto exit_func;
+ }
+ qla8044_rd_reg_indirect(vha, QLA8044_FLASH_SPI_STATUS, &spi_val);
+
+ if ((spi_val & QLA8044_FLASH_SPI_CTL) == QLA8044_FLASH_SPI_CTL) {
+ ql_log(ql_log_warn, vha, 0xb12b,
+ "%s: Failed.\n", __func__);
+ spi_val = 0;
+ /* Operation failed, clear error bit. */
+ qla8044_rd_reg_indirect(vha, QLA8044_FLASH_SPI_CONTROL,
+ &spi_val);
+ qla8044_wr_reg_indirect(vha, QLA8044_FLASH_SPI_CONTROL,
+ spi_val | QLA8044_FLASH_SPI_CTL);
+ }
+exit_func:
+ return ret;
+}
+
+static int
+qla8044_write_flash_dword_mode(scsi_qla_host_t *vha, uint32_t *dwptr,
+ uint32_t faddr, uint32_t dwords)
+{
+ int ret = QLA_FUNCTION_FAILED;
+ uint32_t liter;
+
+ for (liter = 0; liter < dwords; liter++, faddr += 4, dwptr++) {
+ ret = qla8044_flash_write_u32(vha, faddr, dwptr);
+ if (ret) {
+ ql_dbg(ql_dbg_p3p, vha, 0xb141,
+ "%s: flash address=%x data=%x.\n", __func__,
+ faddr, *dwptr);
+ break;
+ }
+ }
+
+ return ret;
+}
+
+int
+qla8044_write_optrom_data(struct scsi_qla_host *vha, uint8_t *buf,
+ uint32_t offset, uint32_t length)
+{
+ int rval = QLA_FUNCTION_FAILED, i, burst_iter_count;
+ int dword_count, erase_sec_count;
+ uint32_t erase_offset;
+ uint8_t *p_cache, *p_src;
+
+ erase_offset = offset;
+
+ p_cache = kcalloc(length, sizeof(uint8_t), GFP_KERNEL);
+ if (!p_cache)
+ return QLA_FUNCTION_FAILED;
+
+ memcpy(p_cache, buf, length);
+ p_src = p_cache;
+ dword_count = length / sizeof(uint32_t);
+ /* Since the offset and legth are sector aligned, it will be always
+ * multiple of burst_iter_count (64)
+ */
+ burst_iter_count = dword_count / QLA8044_MAX_OPTROM_BURST_DWORDS;
+ erase_sec_count = length / QLA8044_SECTOR_SIZE;
+
+ /* Suspend HBA. */
+ scsi_block_requests(vha->host);
+ /* Lock and enable write for whole operation. */
+ qla8044_flash_lock(vha);
+ qla8044_unprotect_flash(vha);
+
+ /* Erasing the sectors */
+ for (i = 0; i < erase_sec_count; i++) {
+ rval = qla8044_erase_flash_sector(vha, erase_offset);
+ ql_dbg(ql_dbg_user, vha, 0xb138,
+ "Done erase of sector=0x%x.\n",
+ erase_offset);
+ if (rval) {
+ ql_log(ql_log_warn, vha, 0xb121,
+ "Failed to erase the sector having address: "
+ "0x%x.\n", erase_offset);
+ goto out;
+ }
+ erase_offset += QLA8044_SECTOR_SIZE;
+ }
+ ql_dbg(ql_dbg_user, vha, 0xb13f,
+ "Got write for addr = 0x%x length=0x%x.\n",
+ offset, length);
+
+ for (i = 0; i < burst_iter_count; i++) {
+
+ /* Go with write. */
+ rval = qla8044_write_flash_buffer_mode(vha, (uint32_t *)p_src,
+ offset, QLA8044_MAX_OPTROM_BURST_DWORDS);
+ if (rval) {
+ /* Buffer Mode failed skip to dword mode */
+ ql_log(ql_log_warn, vha, 0xb122,
+ "Failed to write flash in buffer mode, "
+ "Reverting to slow-write.\n");
+ rval = qla8044_write_flash_dword_mode(vha,
+ (uint32_t *)p_src, offset,
+ QLA8044_MAX_OPTROM_BURST_DWORDS);
+ }
+ p_src += sizeof(uint32_t) * QLA8044_MAX_OPTROM_BURST_DWORDS;
+ offset += sizeof(uint32_t) * QLA8044_MAX_OPTROM_BURST_DWORDS;
+ }
+ ql_dbg(ql_dbg_user, vha, 0xb133,
+ "Done writing.\n");
+
+out:
+ qla8044_protect_flash(vha);
+ qla8044_flash_unlock(vha);
+ scsi_unblock_requests(vha->host);
+ kfree(p_cache);
+
+ return rval;
+}
+
+#define LEG_INT_PTR_B31 (1 << 31)
+#define LEG_INT_PTR_B30 (1 << 30)
+#define PF_BITS_MASK (0xF << 16)
+/**
+ * qla8044_intr_handler() - Process interrupts for the ISP8044
+ * @irq:
+ * @dev_id: SCSI driver HA context
+ *
+ * Called by system whenever the host adapter generates an interrupt.
+ *
+ * Returns handled flag.
+ */
+irqreturn_t
+qla8044_intr_handler(int irq, void *dev_id)
+{
+ scsi_qla_host_t *vha;
+ struct qla_hw_data *ha;
+ struct rsp_que *rsp;
+ struct device_reg_82xx __iomem *reg;
+ int status = 0;
+ unsigned long flags;
+ unsigned long iter;
+ uint32_t stat;
+ uint16_t mb[4];
+ uint32_t leg_int_ptr = 0, pf_bit;
+
+ rsp = (struct rsp_que *) dev_id;
+ if (!rsp) {
+ ql_log(ql_log_info, NULL, 0xb143,
+ "%s(): NULL response queue pointer\n", __func__);
+ return IRQ_NONE;
+ }
+ ha = rsp->hw;
+ vha = pci_get_drvdata(ha->pdev);
+
+ if (unlikely(pci_channel_offline(ha->pdev)))
+ return IRQ_HANDLED;
+
+ leg_int_ptr = qla8044_rd_reg(ha, LEG_INTR_PTR_OFFSET);
+
+ /* Legacy interrupt is valid if bit31 of leg_int_ptr is set */
+ if (!(leg_int_ptr & (LEG_INT_PTR_B31))) {
+ ql_dbg(ql_dbg_p3p, vha, 0xb144,
+ "%s: Legacy Interrupt Bit 31 not set, "
+ "spurious interrupt!\n", __func__);
+ return IRQ_NONE;
+ }
+
+ pf_bit = ha->portnum << 16;
+ /* Validate the PCIE function ID set in leg_int_ptr bits [19..16] */
+ if ((leg_int_ptr & (PF_BITS_MASK)) != pf_bit) {
+ ql_dbg(ql_dbg_p3p, vha, 0xb145,
+ "%s: Incorrect function ID 0x%x in "
+ "legacy interrupt register, "
+ "ha->pf_bit = 0x%x\n", __func__,
+ (leg_int_ptr & (PF_BITS_MASK)), pf_bit);
+ return IRQ_NONE;
+ }
+
+ /* To de-assert legacy interrupt, write 0 to Legacy Interrupt Trigger
+ * Control register and poll till Legacy Interrupt Pointer register
+ * bit32 is 0.
+ */
+ qla8044_wr_reg(ha, LEG_INTR_TRIG_OFFSET, 0);
+ do {
+ leg_int_ptr = qla8044_rd_reg(ha, LEG_INTR_PTR_OFFSET);
+ if ((leg_int_ptr & (PF_BITS_MASK)) != pf_bit)
+ break;
+ } while (leg_int_ptr & (LEG_INT_PTR_B30));
+
+ reg = &ha->iobase->isp82;
+ spin_lock_irqsave(&ha->hardware_lock, flags);
+ for (iter = 1; iter--; ) {
+
+ if (RD_REG_DWORD(&reg->host_int)) {
+ stat = RD_REG_DWORD(&reg->host_status);
+ if ((stat & HSRX_RISC_INT) == 0)
+ break;
+
+ switch (stat & 0xff) {
+ case 0x1:
+ case 0x2:
+ case 0x10:
+ case 0x11:
+ qla82xx_mbx_completion(vha, MSW(stat));
+ status |= MBX_INTERRUPT;
+ break;
+ case 0x12:
+ mb[0] = MSW(stat);
+ mb[1] = RD_REG_WORD(&reg->mailbox_out[1]);
+ mb[2] = RD_REG_WORD(&reg->mailbox_out[2]);
+ mb[3] = RD_REG_WORD(&reg->mailbox_out[3]);
+ qla2x00_async_event(vha, rsp, mb);
+ break;
+ case 0x13:
+ qla24xx_process_response_queue(vha, rsp);
+ break;
+ default:
+ ql_dbg(ql_dbg_p3p, vha, 0xb146,
+ "Unrecognized interrupt type "
+ "(%d).\n", stat & 0xff);
+ break;
+ }
+ }
+ WRT_REG_DWORD(&reg->host_int, 0);
+ }
+
+ qla2x00_handle_mbx_completion(ha, status);
+ spin_unlock_irqrestore(&ha->hardware_lock, flags);
+
+ return IRQ_HANDLED;
+}
+
+static int
+qla8044_idc_dontreset(struct qla_hw_data *ha)
+{
+ uint32_t idc_ctrl;
+
+ idc_ctrl = qla8044_rd_reg(ha, QLA8044_IDC_DRV_CTRL);
+ return idc_ctrl & DONTRESET_BIT0;
+}
+
+static void
+qla8044_clear_rst_ready(scsi_qla_host_t *vha)
+{
+ uint32_t drv_state;
+
+ drv_state = qla8044_rd_direct(vha, QLA8044_CRB_DRV_STATE_INDEX);
+
+ /*
+ * For ISP8044, drv_active register has 1 bit per function,
+ * shift 1 by func_num to set a bit for the function.
+ * For ISP82xx, drv_active has 4 bits per function
+ */
+ drv_state &= ~(1 << vha->hw->portnum);
+
+ ql_dbg(ql_dbg_p3p, vha, 0xb13d,
+ "drv_state: 0x%08x\n", drv_state);
+ qla8044_wr_direct(vha, QLA8044_CRB_DRV_STATE_INDEX, drv_state);
+}
+
+int
+qla8044_abort_isp(scsi_qla_host_t *vha)
+{
+ int rval;
+ uint32_t dev_state;
+ struct qla_hw_data *ha = vha->hw;
+
+ qla8044_idc_lock(ha);
+ dev_state = qla8044_rd_direct(vha, QLA8044_CRB_DEV_STATE_INDEX);
+
+ if (ql2xdontresethba)
+ qla8044_set_idc_dontreset(vha);
+
+ /* If device_state is NEED_RESET, go ahead with
+ * Reset,irrespective of ql2xdontresethba. This is to allow a
+ * non-reset-owner to force a reset. Non-reset-owner sets
+ * the IDC_CTRL BIT0 to prevent Reset-owner from doing a Reset
+ * and then forces a Reset by setting device_state to
+ * NEED_RESET. */
+ if (dev_state == QLA8XXX_DEV_READY) {
+ /* If IDC_CTRL DONTRESETHBA_BIT0 is set don't do reset
+ * recovery */
+ if (qla8044_idc_dontreset(ha) == DONTRESET_BIT0) {
+ ql_dbg(ql_dbg_p3p, vha, 0xb13e,
+ "Reset recovery disabled\n");
+ rval = QLA_FUNCTION_FAILED;
+ goto exit_isp_reset;
+ }
+
+ ql_dbg(ql_dbg_p3p, vha, 0xb140,
+ "HW State: NEED RESET\n");
+ qla8044_wr_direct(vha, QLA8044_CRB_DEV_STATE_INDEX,
+ QLA8XXX_DEV_NEED_RESET);
+ }
+
+ /* For ISP8044, Reset owner is NIC, iSCSI or FCOE based on priority
+ * and which drivers are present. Unlike ISP82XX, the function setting
+ * NEED_RESET, may not be the Reset owner. */
+ qla83xx_reset_ownership(vha);
+
+ qla8044_idc_unlock(ha);
+ rval = qla8044_device_state_handler(vha);
+ qla8044_idc_lock(ha);
+ qla8044_clear_rst_ready(vha);
+
+exit_isp_reset:
+ qla8044_idc_unlock(ha);
+ if (rval == QLA_SUCCESS) {
+ ha->flags.isp82xx_fw_hung = 0;
+ ha->flags.nic_core_reset_hdlr_active = 0;
+ rval = qla82xx_restart_isp(vha);
+ }
+
+ return rval;
+}
+
+void
+qla8044_fw_dump(scsi_qla_host_t *vha, int hardware_locked)
+{
+ struct qla_hw_data *ha = vha->hw;
+
+ if (!ha->allow_cna_fw_dump)
+ return;
+
+ scsi_block_requests(vha->host);
+ ha->flags.isp82xx_no_md_cap = 1;
+ qla8044_idc_lock(ha);
+ qla82xx_set_reset_owner(vha);
+ qla8044_idc_unlock(ha);
+ qla2x00_wait_for_chip_reset(vha);
+ scsi_unblock_requests(vha->host);
+}
diff --git a/drivers/scsi/qla2xxx/qla_nx2.h b/drivers/scsi/qla2xxx/qla_nx2.h
new file mode 100644
index 000000000..ada36057d
--- /dev/null
+++ b/drivers/scsi/qla2xxx/qla_nx2.h
@@ -0,0 +1,599 @@
+/*
+ * QLogic Fibre Channel HBA Driver
+ * Copyright (c) 2003-2014 QLogic Corporation
+ *
+ * See LICENSE.qla2xxx for copyright and licensing details.
+ */
+
+#ifndef __QLA_NX2_H
+#define __QLA_NX2_H
+
+#define QSNT_ACK_TOV 30
+#define INTENT_TO_RECOVER 0x01
+#define PROCEED_TO_RECOVER 0x02
+#define IDC_LOCK_RECOVERY_OWNER_MASK 0x3C
+#define IDC_LOCK_RECOVERY_STATE_MASK 0x3
+#define IDC_LOCK_RECOVERY_STATE_SHIFT_BITS 2
+
+#define QLA8044_DRV_LOCK_MSLEEP 200
+#define QLA8044_ADDR_DDR_NET (0x0000000000000000ULL)
+#define QLA8044_ADDR_DDR_NET_MAX (0x000000000fffffffULL)
+
+#define MD_MIU_TEST_AGT_WRDATA_LO 0x410000A0
+#define MD_MIU_TEST_AGT_WRDATA_HI 0x410000A4
+#define MD_MIU_TEST_AGT_WRDATA_ULO 0x410000B0
+#define MD_MIU_TEST_AGT_WRDATA_UHI 0x410000B4
+#define MD_MIU_TEST_AGT_RDDATA_LO 0x410000A8
+#define MD_MIU_TEST_AGT_RDDATA_HI 0x410000AC
+#define MD_MIU_TEST_AGT_RDDATA_ULO 0x410000B8
+#define MD_MIU_TEST_AGT_RDDATA_UHI 0x410000BC
+
+/* MIU_TEST_AGT_CTRL flags. work for SIU as well */
+#define MIU_TA_CTL_WRITE_ENABLE (MIU_TA_CTL_WRITE | MIU_TA_CTL_ENABLE)
+#define MIU_TA_CTL_WRITE_START (MIU_TA_CTL_WRITE | MIU_TA_CTL_ENABLE | \
+ MIU_TA_CTL_START)
+#define MIU_TA_CTL_START_ENABLE (MIU_TA_CTL_START | MIU_TA_CTL_ENABLE)
+
+/* Imbus address bit used to indicate a host address. This bit is
+ * eliminated by the pcie bar and bar select before presentation
+ * over pcie. */
+/* host memory via IMBUS */
+#define QLA8044_P2_ADDR_PCIE (0x0000000800000000ULL)
+#define QLA8044_P3_ADDR_PCIE (0x0000008000000000ULL)
+#define QLA8044_ADDR_PCIE_MAX (0x0000000FFFFFFFFFULL)
+#define QLA8044_ADDR_OCM0 (0x0000000200000000ULL)
+#define QLA8044_ADDR_OCM0_MAX (0x00000002000fffffULL)
+#define QLA8044_ADDR_OCM1 (0x0000000200400000ULL)
+#define QLA8044_ADDR_OCM1_MAX (0x00000002004fffffULL)
+#define QLA8044_ADDR_QDR_NET (0x0000000300000000ULL)
+#define QLA8044_P2_ADDR_QDR_NET_MAX (0x00000003001fffffULL)
+#define QLA8044_P3_ADDR_QDR_NET_MAX (0x0000000303ffffffULL)
+#define QLA8044_ADDR_QDR_NET_MAX (0x0000000307ffffffULL)
+#define QLA8044_PCI_CRBSPACE ((unsigned long)0x06000000)
+#define QLA8044_PCI_DIRECT_CRB ((unsigned long)0x04400000)
+#define QLA8044_PCI_CAMQM ((unsigned long)0x04800000)
+#define QLA8044_PCI_CAMQM_MAX ((unsigned long)0x04ffffff)
+#define QLA8044_PCI_DDR_NET ((unsigned long)0x00000000)
+#define QLA8044_PCI_QDR_NET ((unsigned long)0x04000000)
+#define QLA8044_PCI_QDR_NET_MAX ((unsigned long)0x043fffff)
+
+/* PCI Windowing for DDR regions. */
+#define QLA8044_ADDR_IN_RANGE(addr, low, high) \
+ (((addr) <= (high)) && ((addr) >= (low)))
+
+/* Indirectly Mapped Registers */
+#define QLA8044_FLASH_SPI_STATUS 0x2808E010
+#define QLA8044_FLASH_SPI_CONTROL 0x2808E014
+#define QLA8044_FLASH_STATUS 0x42100004
+#define QLA8044_FLASH_CONTROL 0x42110004
+#define QLA8044_FLASH_ADDR 0x42110008
+#define QLA8044_FLASH_WRDATA 0x4211000C
+#define QLA8044_FLASH_RDDATA 0x42110018
+#define QLA8044_FLASH_DIRECT_WINDOW 0x42110030
+#define QLA8044_FLASH_DIRECT_DATA(DATA) (0x42150000 | (0x0000FFFF&DATA))
+
+/* Flash access regs */
+#define QLA8044_FLASH_LOCK 0x3850
+#define QLA8044_FLASH_UNLOCK 0x3854
+#define QLA8044_FLASH_LOCK_ID 0x3500
+
+/* Driver Lock regs */
+#define QLA8044_DRV_LOCK 0x3868
+#define QLA8044_DRV_UNLOCK 0x386C
+#define QLA8044_DRV_LOCK_ID 0x3504
+#define QLA8044_DRV_LOCKRECOVERY 0x379C
+
+/* IDC version */
+#define QLA8044_IDC_VER_MAJ_VALUE 0x1
+#define QLA8044_IDC_VER_MIN_VALUE 0x0
+
+/* IDC Registers : Driver Coexistence Defines */
+#define QLA8044_CRB_IDC_VER_MAJOR 0x3780
+#define QLA8044_CRB_IDC_VER_MINOR 0x3798
+#define QLA8044_IDC_DRV_AUDIT 0x3794
+#define QLA8044_SRE_SHIM_CONTROL 0x0D200284
+#define QLA8044_PORT0_RXB_PAUSE_THRS 0x0B2003A4
+#define QLA8044_PORT1_RXB_PAUSE_THRS 0x0B2013A4
+#define QLA8044_PORT0_RXB_TC_MAX_CELL 0x0B200388
+#define QLA8044_PORT1_RXB_TC_MAX_CELL 0x0B201388
+#define QLA8044_PORT0_RXB_TC_STATS 0x0B20039C
+#define QLA8044_PORT1_RXB_TC_STATS 0x0B20139C
+#define QLA8044_PORT2_IFB_PAUSE_THRS 0x0B200704
+#define QLA8044_PORT3_IFB_PAUSE_THRS 0x0B201704
+
+/* set value to pause threshold value */
+#define QLA8044_SET_PAUSE_VAL 0x0
+#define QLA8044_SET_TC_MAX_CELL_VAL 0x03FF03FF
+#define QLA8044_PEG_HALT_STATUS1 0x34A8
+#define QLA8044_PEG_HALT_STATUS2 0x34AC
+#define QLA8044_PEG_ALIVE_COUNTER 0x34B0 /* FW_HEARTBEAT */
+#define QLA8044_FW_CAPABILITIES 0x3528
+#define QLA8044_CRB_DRV_ACTIVE 0x3788 /* IDC_DRV_PRESENCE */
+#define QLA8044_CRB_DEV_STATE 0x3784 /* IDC_DEV_STATE */
+#define QLA8044_CRB_DRV_STATE 0x378C /* IDC_DRV_ACK */
+#define QLA8044_CRB_DRV_SCRATCH 0x3548
+#define QLA8044_CRB_DEV_PART_INFO1 0x37E0
+#define QLA8044_CRB_DEV_PART_INFO2 0x37E4
+#define QLA8044_FW_VER_MAJOR 0x3550
+#define QLA8044_FW_VER_MINOR 0x3554
+#define QLA8044_FW_VER_SUB 0x3558
+#define QLA8044_NPAR_STATE 0x359C
+#define QLA8044_FW_IMAGE_VALID 0x35FC
+#define QLA8044_CMDPEG_STATE 0x3650
+#define QLA8044_ASIC_TEMP 0x37B4
+#define QLA8044_FW_API 0x356C
+#define QLA8044_DRV_OP_MODE 0x3570
+#define QLA8044_CRB_WIN_BASE 0x3800
+#define QLA8044_CRB_WIN_FUNC(f) (QLA8044_CRB_WIN_BASE+((f)*4))
+#define QLA8044_SEM_LOCK_BASE 0x3840
+#define QLA8044_SEM_UNLOCK_BASE 0x3844
+#define QLA8044_SEM_LOCK_FUNC(f) (QLA8044_SEM_LOCK_BASE+((f)*8))
+#define QLA8044_SEM_UNLOCK_FUNC(f) (QLA8044_SEM_UNLOCK_BASE+((f)*8))
+#define QLA8044_LINK_STATE(f) (0x3698+((f) > 7 ? 4 : 0))
+#define QLA8044_LINK_SPEED(f) (0x36E0+(((f) >> 2) * 4))
+#define QLA8044_MAX_LINK_SPEED(f) (0x36F0+(((f) / 4) * 4))
+#define QLA8044_LINK_SPEED_FACTOR 10
+#define QLA8044_FUN7_ACTIVE_INDEX 0x80
+
+/* FLASH API Defines */
+#define QLA8044_FLASH_MAX_WAIT_USEC 100
+#define QLA8044_FLASH_LOCK_TIMEOUT 10000
+#define QLA8044_FLASH_SECTOR_SIZE 65536
+#define QLA8044_DRV_LOCK_TIMEOUT 2000
+#define QLA8044_FLASH_SECTOR_ERASE_CMD 0xdeadbeef
+#define QLA8044_FLASH_WRITE_CMD 0xdacdacda
+#define QLA8044_FLASH_BUFFER_WRITE_CMD 0xcadcadca
+#define QLA8044_FLASH_READ_RETRY_COUNT 2000
+#define QLA8044_FLASH_STATUS_READY 0x6
+#define QLA8044_FLASH_BUFFER_WRITE_MIN 2
+#define QLA8044_FLASH_BUFFER_WRITE_MAX 64
+#define QLA8044_FLASH_STATUS_REG_POLL_DELAY 1
+#define QLA8044_ERASE_MODE 1
+#define QLA8044_WRITE_MODE 2
+#define QLA8044_DWORD_WRITE_MODE 3
+#define QLA8044_GLOBAL_RESET 0x38CC
+#define QLA8044_WILDCARD 0x38F0
+#define QLA8044_INFORMANT 0x38FC
+#define QLA8044_HOST_MBX_CTRL 0x3038
+#define QLA8044_FW_MBX_CTRL 0x303C
+#define QLA8044_BOOTLOADER_ADDR 0x355C
+#define QLA8044_BOOTLOADER_SIZE 0x3560
+#define QLA8044_FW_IMAGE_ADDR 0x3564
+#define QLA8044_MBX_INTR_ENABLE 0x1000
+#define QLA8044_MBX_INTR_MASK 0x1200
+
+/* IDC Control Register bit defines */
+#define DONTRESET_BIT0 0x1
+#define GRACEFUL_RESET_BIT1 0x2
+
+/* ISP8044 PEG_HALT_STATUS1 bits */
+#define QLA8044_HALT_STATUS_INFORMATIONAL (0x1 << 29)
+#define QLA8044_HALT_STATUS_FW_RESET (0x2 << 29)
+#define QLA8044_HALT_STATUS_UNRECOVERABLE (0x4 << 29)
+
+/* Firmware image definitions */
+#define QLA8044_BOOTLOADER_FLASH_ADDR 0x10000
+#define QLA8044_BOOT_FROM_FLASH 0
+#define QLA8044_IDC_PARAM_ADDR 0x3e8020
+
+/* FLASH related definitions */
+#define QLA8044_OPTROM_BURST_SIZE 0x100
+#define QLA8044_MAX_OPTROM_BURST_DWORDS (QLA8044_OPTROM_BURST_SIZE / 4)
+#define QLA8044_MIN_OPTROM_BURST_DWORDS 2
+#define QLA8044_SECTOR_SIZE (64 * 1024)
+
+#define QLA8044_FLASH_SPI_CTL 0x4
+#define QLA8044_FLASH_FIRST_TEMP_VAL 0x00800000
+#define QLA8044_FLASH_SECOND_TEMP_VAL 0x00800001
+#define QLA8044_FLASH_FIRST_MS_PATTERN 0x43
+#define QLA8044_FLASH_SECOND_MS_PATTERN 0x7F
+#define QLA8044_FLASH_LAST_MS_PATTERN 0x7D
+#define QLA8044_FLASH_STATUS_WRITE_DEF_SIG 0xFD0100
+#define QLA8044_FLASH_SECOND_ERASE_MS_VAL 0x5
+#define QLA8044_FLASH_ERASE_SIG 0xFD0300
+#define QLA8044_FLASH_LAST_ERASE_MS_VAL 0x3D
+
+/* Reset template definitions */
+#define QLA8044_MAX_RESET_SEQ_ENTRIES 16
+#define QLA8044_RESTART_TEMPLATE_SIZE 0x2000
+#define QLA8044_RESET_TEMPLATE_ADDR 0x4F0000
+#define QLA8044_RESET_SEQ_VERSION 0x0101
+
+/* Reset template entry opcodes */
+#define OPCODE_NOP 0x0000
+#define OPCODE_WRITE_LIST 0x0001
+#define OPCODE_READ_WRITE_LIST 0x0002
+#define OPCODE_POLL_LIST 0x0004
+#define OPCODE_POLL_WRITE_LIST 0x0008
+#define OPCODE_READ_MODIFY_WRITE 0x0010
+#define OPCODE_SEQ_PAUSE 0x0020
+#define OPCODE_SEQ_END 0x0040
+#define OPCODE_TMPL_END 0x0080
+#define OPCODE_POLL_READ_LIST 0x0100
+
+/* Template Header */
+#define RESET_TMPLT_HDR_SIGNATURE 0xCAFE
+#define QLA8044_IDC_DRV_CTRL 0x3790
+#define AF_8044_NO_FW_DUMP 27 /* 0x08000000 */
+
+#define MINIDUMP_SIZE_36K 36864
+
+struct qla8044_reset_template_hdr {
+ uint16_t version;
+ uint16_t signature;
+ uint16_t size;
+ uint16_t entries;
+ uint16_t hdr_size;
+ uint16_t checksum;
+ uint16_t init_seq_offset;
+ uint16_t start_seq_offset;
+} __packed;
+
+/* Common Entry Header. */
+struct qla8044_reset_entry_hdr {
+ uint16_t cmd;
+ uint16_t size;
+ uint16_t count;
+ uint16_t delay;
+} __packed;
+
+/* Generic poll entry type. */
+struct qla8044_poll {
+ uint32_t test_mask;
+ uint32_t test_value;
+} __packed;
+
+/* Read modify write entry type. */
+struct qla8044_rmw {
+ uint32_t test_mask;
+ uint32_t xor_value;
+ uint32_t or_value;
+ uint8_t shl;
+ uint8_t shr;
+ uint8_t index_a;
+ uint8_t rsvd;
+} __packed;
+
+/* Generic Entry Item with 2 DWords. */
+struct qla8044_entry {
+ uint32_t arg1;
+ uint32_t arg2;
+} __packed;
+
+/* Generic Entry Item with 4 DWords.*/
+struct qla8044_quad_entry {
+ uint32_t dr_addr;
+ uint32_t dr_value;
+ uint32_t ar_addr;
+ uint32_t ar_value;
+} __packed;
+
+struct qla8044_reset_template {
+ int seq_index;
+ int seq_error;
+ int array_index;
+ uint32_t array[QLA8044_MAX_RESET_SEQ_ENTRIES];
+ uint8_t *buff;
+ uint8_t *stop_offset;
+ uint8_t *start_offset;
+ uint8_t *init_offset;
+ struct qla8044_reset_template_hdr *hdr;
+ uint8_t seq_end;
+ uint8_t template_end;
+};
+
+/* Driver_code is for driver to write some info about the entry
+ * currently not used.
+ */
+struct qla8044_minidump_entry_hdr {
+ uint32_t entry_type;
+ uint32_t entry_size;
+ uint32_t entry_capture_size;
+ struct {
+ uint8_t entry_capture_mask;
+ uint8_t entry_code;
+ uint8_t driver_code;
+ uint8_t driver_flags;
+ } d_ctrl;
+} __packed;
+
+/* Read CRB entry header */
+struct qla8044_minidump_entry_crb {
+ struct qla8044_minidump_entry_hdr h;
+ uint32_t addr;
+ struct {
+ uint8_t addr_stride;
+ uint8_t state_index_a;
+ uint16_t poll_timeout;
+ } crb_strd;
+ uint32_t data_size;
+ uint32_t op_count;
+
+ struct {
+ uint8_t opcode;
+ uint8_t state_index_v;
+ uint8_t shl;
+ uint8_t shr;
+ } crb_ctrl;
+
+ uint32_t value_1;
+ uint32_t value_2;
+ uint32_t value_3;
+} __packed;
+
+struct qla8044_minidump_entry_cache {
+ struct qla8044_minidump_entry_hdr h;
+ uint32_t tag_reg_addr;
+ struct {
+ uint16_t tag_value_stride;
+ uint16_t init_tag_value;
+ } addr_ctrl;
+ uint32_t data_size;
+ uint32_t op_count;
+ uint32_t control_addr;
+ struct {
+ uint16_t write_value;
+ uint8_t poll_mask;
+ uint8_t poll_wait;
+ } cache_ctrl;
+ uint32_t read_addr;
+ struct {
+ uint8_t read_addr_stride;
+ uint8_t read_addr_cnt;
+ uint16_t rsvd_1;
+ } read_ctrl;
+} __packed;
+
+/* Read OCM */
+struct qla8044_minidump_entry_rdocm {
+ struct qla8044_minidump_entry_hdr h;
+ uint32_t rsvd_0;
+ uint32_t rsvd_1;
+ uint32_t data_size;
+ uint32_t op_count;
+ uint32_t rsvd_2;
+ uint32_t rsvd_3;
+ uint32_t read_addr;
+ uint32_t read_addr_stride;
+} __packed;
+
+/* Read Memory */
+struct qla8044_minidump_entry_rdmem {
+ struct qla8044_minidump_entry_hdr h;
+ uint32_t rsvd[6];
+ uint32_t read_addr;
+ uint32_t read_data_size;
+};
+
+/* Read Memory: For Pex-DMA */
+struct qla8044_minidump_entry_rdmem_pex_dma {
+ struct qla8044_minidump_entry_hdr h;
+ uint32_t desc_card_addr;
+ uint16_t dma_desc_cmd;
+ uint8_t rsvd[2];
+ uint32_t start_dma_cmd;
+ uint8_t rsvd2[12];
+ uint32_t read_addr;
+ uint32_t read_data_size;
+} __packed;
+
+/* Read ROM */
+struct qla8044_minidump_entry_rdrom {
+ struct qla8044_minidump_entry_hdr h;
+ uint32_t rsvd[6];
+ uint32_t read_addr;
+ uint32_t read_data_size;
+} __packed;
+
+/* Mux entry */
+struct qla8044_minidump_entry_mux {
+ struct qla8044_minidump_entry_hdr h;
+ uint32_t select_addr;
+ uint32_t rsvd_0;
+ uint32_t data_size;
+ uint32_t op_count;
+ uint32_t select_value;
+ uint32_t select_value_stride;
+ uint32_t read_addr;
+ uint32_t rsvd_1;
+} __packed;
+
+/* Queue entry */
+struct qla8044_minidump_entry_queue {
+ struct qla8044_minidump_entry_hdr h;
+ uint32_t select_addr;
+ struct {
+ uint16_t queue_id_stride;
+ uint16_t rsvd_0;
+ } q_strd;
+ uint32_t data_size;
+ uint32_t op_count;
+ uint32_t rsvd_1;
+ uint32_t rsvd_2;
+ uint32_t read_addr;
+ struct {
+ uint8_t read_addr_stride;
+ uint8_t read_addr_cnt;
+ uint16_t rsvd_3;
+ } rd_strd;
+} __packed;
+
+/* POLLRD Entry */
+struct qla8044_minidump_entry_pollrd {
+ struct qla8044_minidump_entry_hdr h;
+ uint32_t select_addr;
+ uint32_t read_addr;
+ uint32_t select_value;
+ uint16_t select_value_stride;
+ uint16_t op_count;
+ uint32_t poll_wait;
+ uint32_t poll_mask;
+ uint32_t data_size;
+ uint32_t rsvd_1;
+} __packed;
+
+struct qla8044_minidump_entry_rddfe {
+ struct qla8044_minidump_entry_hdr h;
+ uint32_t addr_1;
+ uint32_t value;
+ uint8_t stride;
+ uint8_t stride2;
+ uint16_t count;
+ uint32_t poll;
+ uint32_t mask;
+ uint32_t modify_mask;
+ uint32_t data_size;
+ uint32_t rsvd;
+
+} __packed;
+
+struct qla8044_minidump_entry_rdmdio {
+ struct qla8044_minidump_entry_hdr h;
+
+ uint32_t addr_1;
+ uint32_t addr_2;
+ uint32_t value_1;
+ uint8_t stride_1;
+ uint8_t stride_2;
+ uint16_t count;
+ uint32_t poll;
+ uint32_t mask;
+ uint32_t value_2;
+ uint32_t data_size;
+
+} __packed;
+
+struct qla8044_minidump_entry_pollwr {
+ struct qla8044_minidump_entry_hdr h;
+ uint32_t addr_1;
+ uint32_t addr_2;
+ uint32_t value_1;
+ uint32_t value_2;
+ uint32_t poll;
+ uint32_t mask;
+ uint32_t data_size;
+ uint32_t rsvd;
+
+} __packed;
+
+/* RDMUX2 Entry */
+struct qla8044_minidump_entry_rdmux2 {
+ struct qla8044_minidump_entry_hdr h;
+ uint32_t select_addr_1;
+ uint32_t select_addr_2;
+ uint32_t select_value_1;
+ uint32_t select_value_2;
+ uint32_t op_count;
+ uint32_t select_value_mask;
+ uint32_t read_addr;
+ uint8_t select_value_stride;
+ uint8_t data_size;
+ uint8_t rsvd[2];
+} __packed;
+
+/* POLLRDMWR Entry */
+struct qla8044_minidump_entry_pollrdmwr {
+ struct qla8044_minidump_entry_hdr h;
+ uint32_t addr_1;
+ uint32_t addr_2;
+ uint32_t value_1;
+ uint32_t value_2;
+ uint32_t poll_wait;
+ uint32_t poll_mask;
+ uint32_t modify_mask;
+ uint32_t data_size;
+} __packed;
+
+/* IDC additional information */
+struct qla8044_idc_information {
+ uint32_t request_desc; /* IDC request descriptor */
+ uint32_t info1; /* IDC additional info */
+ uint32_t info2; /* IDC additional info */
+ uint32_t info3; /* IDC additional info */
+} __packed;
+
+enum qla_regs {
+ QLA8044_PEG_HALT_STATUS1_INDEX = 0,
+ QLA8044_PEG_HALT_STATUS2_INDEX,
+ QLA8044_PEG_ALIVE_COUNTER_INDEX,
+ QLA8044_CRB_DRV_ACTIVE_INDEX,
+ QLA8044_CRB_DEV_STATE_INDEX,
+ QLA8044_CRB_DRV_STATE_INDEX,
+ QLA8044_CRB_DRV_SCRATCH_INDEX,
+ QLA8044_CRB_DEV_PART_INFO_INDEX,
+ QLA8044_CRB_DRV_IDC_VERSION_INDEX,
+ QLA8044_FW_VERSION_MAJOR_INDEX,
+ QLA8044_FW_VERSION_MINOR_INDEX,
+ QLA8044_FW_VERSION_SUB_INDEX,
+ QLA8044_CRB_CMDPEG_STATE_INDEX,
+ QLA8044_CRB_TEMP_STATE_INDEX,
+} __packed;
+
+#define CRB_REG_INDEX_MAX 14
+#define CRB_CMDPEG_CHECK_RETRY_COUNT 60
+#define CRB_CMDPEG_CHECK_DELAY 500
+
+static const uint32_t qla8044_reg_tbl[] = {
+ QLA8044_PEG_HALT_STATUS1,
+ QLA8044_PEG_HALT_STATUS2,
+ QLA8044_PEG_ALIVE_COUNTER,
+ QLA8044_CRB_DRV_ACTIVE,
+ QLA8044_CRB_DEV_STATE,
+ QLA8044_CRB_DRV_STATE,
+ QLA8044_CRB_DRV_SCRATCH,
+ QLA8044_CRB_DEV_PART_INFO1,
+ QLA8044_CRB_IDC_VER_MAJOR,
+ QLA8044_FW_VER_MAJOR,
+ QLA8044_FW_VER_MINOR,
+ QLA8044_FW_VER_SUB,
+ QLA8044_CMDPEG_STATE,
+ QLA8044_ASIC_TEMP,
+};
+
+/* MiniDump Structures */
+
+/* Driver_code is for driver to write some info about the entry
+ * currently not used.
+ */
+#define QLA8044_SS_OCM_WNDREG_INDEX 3
+#define QLA8044_DBG_STATE_ARRAY_LEN 16
+#define QLA8044_DBG_CAP_SIZE_ARRAY_LEN 8
+#define QLA8044_DBG_RSVD_ARRAY_LEN 8
+#define QLA8044_DBG_OCM_WNDREG_ARRAY_LEN 16
+#define QLA8044_SS_PCI_INDEX 0
+#define QLA8044_RDDFE 38
+#define QLA8044_RDMDIO 39
+#define QLA8044_POLLWR 40
+
+struct qla8044_minidump_template_hdr {
+ uint32_t entry_type;
+ uint32_t first_entry_offset;
+ uint32_t size_of_template;
+ uint32_t capture_debug_level;
+ uint32_t num_of_entries;
+ uint32_t version;
+ uint32_t driver_timestamp;
+ uint32_t checksum;
+
+ uint32_t driver_capture_mask;
+ uint32_t driver_info_word2;
+ uint32_t driver_info_word3;
+ uint32_t driver_info_word4;
+
+ uint32_t saved_state_array[QLA8044_DBG_STATE_ARRAY_LEN];
+ uint32_t capture_size_array[QLA8044_DBG_CAP_SIZE_ARRAY_LEN];
+ uint32_t ocm_window_reg[QLA8044_DBG_OCM_WNDREG_ARRAY_LEN];
+};
+
+struct qla8044_pex_dma_descriptor {
+ struct {
+ uint32_t read_data_size; /* 0-23: size, 24-31: rsvd */
+ uint8_t rsvd[2];
+ uint16_t dma_desc_cmd;
+ } cmd;
+ uint64_t src_addr;
+ uint64_t dma_bus_addr; /*0-3: desc-cmd, 4-7: pci-func, 8-15: desc-cmd*/
+ uint8_t rsvd[24];
+} __packed;
+
+#endif
diff --git a/drivers/scsi/qla2xxx/qla_os.c b/drivers/scsi/qla2xxx/qla_os.c
new file mode 100644
index 000000000..a0305d83b
--- /dev/null
+++ b/drivers/scsi/qla2xxx/qla_os.c
@@ -0,0 +1,5831 @@
+/*
+ * QLogic Fibre Channel HBA Driver
+ * Copyright (c) 2003-2014 QLogic Corporation
+ *
+ * See LICENSE.qla2xxx for copyright and licensing details.
+ */
+#include "qla_def.h"
+
+#include <linux/moduleparam.h>
+#include <linux/vmalloc.h>
+#include <linux/delay.h>
+#include <linux/kthread.h>
+#include <linux/mutex.h>
+#include <linux/kobject.h>
+#include <linux/slab.h>
+#include <scsi/scsi_tcq.h>
+#include <scsi/scsicam.h>
+#include <scsi/scsi_transport.h>
+#include <scsi/scsi_transport_fc.h>
+
+#include "qla_target.h"
+
+/*
+ * Driver version
+ */
+char qla2x00_version_str[40];
+
+static int apidev_major;
+
+/*
+ * SRB allocation cache
+ */
+static struct kmem_cache *srb_cachep;
+
+/*
+ * CT6 CTX allocation cache
+ */
+static struct kmem_cache *ctx_cachep;
+/*
+ * error level for logging
+ */
+int ql_errlev = ql_log_all;
+
+static int ql2xenableclass2;
+module_param(ql2xenableclass2, int, S_IRUGO|S_IRUSR);
+MODULE_PARM_DESC(ql2xenableclass2,
+ "Specify if Class 2 operations are supported from the very "
+ "beginning. Default is 0 - class 2 not supported.");
+
+
+int ql2xlogintimeout = 20;
+module_param(ql2xlogintimeout, int, S_IRUGO);
+MODULE_PARM_DESC(ql2xlogintimeout,
+ "Login timeout value in seconds.");
+
+int qlport_down_retry;
+module_param(qlport_down_retry, int, S_IRUGO);
+MODULE_PARM_DESC(qlport_down_retry,
+ "Maximum number of command retries to a port that returns "
+ "a PORT-DOWN status.");
+
+int ql2xplogiabsentdevice;
+module_param(ql2xplogiabsentdevice, int, S_IRUGO|S_IWUSR);
+MODULE_PARM_DESC(ql2xplogiabsentdevice,
+ "Option to enable PLOGI to devices that are not present after "
+ "a Fabric scan. This is needed for several broken switches. "
+ "Default is 0 - no PLOGI. 1 - perfom PLOGI.");
+
+int ql2xloginretrycount = 0;
+module_param(ql2xloginretrycount, int, S_IRUGO);
+MODULE_PARM_DESC(ql2xloginretrycount,
+ "Specify an alternate value for the NVRAM login retry count.");
+
+int ql2xallocfwdump = 1;
+module_param(ql2xallocfwdump, int, S_IRUGO);
+MODULE_PARM_DESC(ql2xallocfwdump,
+ "Option to enable allocation of memory for a firmware dump "
+ "during HBA initialization. Memory allocation requirements "
+ "vary by ISP type. Default is 1 - allocate memory.");
+
+int ql2xextended_error_logging;
+module_param(ql2xextended_error_logging, int, S_IRUGO|S_IWUSR);
+MODULE_PARM_DESC(ql2xextended_error_logging,
+ "Option to enable extended error logging,\n"
+ "\t\tDefault is 0 - no logging. 0x40000000 - Module Init & Probe.\n"
+ "\t\t0x20000000 - Mailbox Cmnds. 0x10000000 - Device Discovery.\n"
+ "\t\t0x08000000 - IO tracing. 0x04000000 - DPC Thread.\n"
+ "\t\t0x02000000 - Async events. 0x01000000 - Timer routines.\n"
+ "\t\t0x00800000 - User space. 0x00400000 - Task Management.\n"
+ "\t\t0x00200000 - AER/EEH. 0x00100000 - Multi Q.\n"
+ "\t\t0x00080000 - P3P Specific. 0x00040000 - Virtual Port.\n"
+ "\t\t0x00020000 - Buffer Dump. 0x00010000 - Misc.\n"
+ "\t\t0x00008000 - Verbose. 0x00004000 - Target.\n"
+ "\t\t0x00002000 - Target Mgmt. 0x00001000 - Target TMF.\n"
+ "\t\t0x7fffffff - For enabling all logs, can be too many logs.\n"
+ "\t\t0x1e400000 - Preferred value for capturing essential "
+ "debug information (equivalent to old "
+ "ql2xextended_error_logging=1).\n"
+ "\t\tDo LOGICAL OR of the value to enable more than one level");
+
+int ql2xshiftctondsd = 6;
+module_param(ql2xshiftctondsd, int, S_IRUGO);
+MODULE_PARM_DESC(ql2xshiftctondsd,
+ "Set to control shifting of command type processing "
+ "based on total number of SG elements.");
+
+int ql2xfdmienable=1;
+module_param(ql2xfdmienable, int, S_IRUGO|S_IWUSR);
+MODULE_PARM_DESC(ql2xfdmienable,
+ "Enables FDMI registrations. "
+ "0 - no FDMI. Default is 1 - perform FDMI.");
+
+#define MAX_Q_DEPTH 32
+static int ql2xmaxqdepth = MAX_Q_DEPTH;
+module_param(ql2xmaxqdepth, int, S_IRUGO|S_IWUSR);
+MODULE_PARM_DESC(ql2xmaxqdepth,
+ "Maximum queue depth to set for each LUN. "
+ "Default is 32.");
+
+int ql2xenabledif = 2;
+module_param(ql2xenabledif, int, S_IRUGO);
+MODULE_PARM_DESC(ql2xenabledif,
+ " Enable T10-CRC-DIF:\n"
+ " Default is 2.\n"
+ " 0 -- No DIF Support\n"
+ " 1 -- Enable DIF for all types\n"
+ " 2 -- Enable DIF for all types, except Type 0.\n");
+
+int ql2xenablehba_err_chk = 2;
+module_param(ql2xenablehba_err_chk, int, S_IRUGO|S_IWUSR);
+MODULE_PARM_DESC(ql2xenablehba_err_chk,
+ " Enable T10-CRC-DIF Error isolation by HBA:\n"
+ " Default is 2.\n"
+ " 0 -- Error isolation disabled\n"
+ " 1 -- Error isolation enabled only for DIX Type 0\n"
+ " 2 -- Error isolation enabled for all Types\n");
+
+int ql2xiidmaenable=1;
+module_param(ql2xiidmaenable, int, S_IRUGO);
+MODULE_PARM_DESC(ql2xiidmaenable,
+ "Enables iIDMA settings "
+ "Default is 1 - perform iIDMA. 0 - no iIDMA.");
+
+int ql2xmaxqueues = 1;
+module_param(ql2xmaxqueues, int, S_IRUGO);
+MODULE_PARM_DESC(ql2xmaxqueues,
+ "Enables MQ settings "
+ "Default is 1 for single queue. Set it to number "
+ "of queues in MQ mode.");
+
+int ql2xmultique_tag;
+module_param(ql2xmultique_tag, int, S_IRUGO);
+MODULE_PARM_DESC(ql2xmultique_tag,
+ "Enables CPU affinity settings for the driver "
+ "Default is 0 for no affinity of request and response IO. "
+ "Set it to 1 to turn on the cpu affinity.");
+
+int ql2xfwloadbin;
+module_param(ql2xfwloadbin, int, S_IRUGO|S_IWUSR);
+MODULE_PARM_DESC(ql2xfwloadbin,
+ "Option to specify location from which to load ISP firmware:.\n"
+ " 2 -- load firmware via the reject_firmware() (hotplug).\n"
+ " interface.\n"
+ " 1 -- load firmware from flash.\n"
+ " 0 -- use default semantics.\n");
+
+int ql2xetsenable;
+module_param(ql2xetsenable, int, S_IRUGO);
+MODULE_PARM_DESC(ql2xetsenable,
+ "Enables firmware ETS burst."
+ "Default is 0 - skip ETS enablement.");
+
+int ql2xdbwr = 1;
+module_param(ql2xdbwr, int, S_IRUGO|S_IWUSR);
+MODULE_PARM_DESC(ql2xdbwr,
+ "Option to specify scheme for request queue posting.\n"
+ " 0 -- Regular doorbell.\n"
+ " 1 -- CAMRAM doorbell (faster).\n");
+
+int ql2xtargetreset = 1;
+module_param(ql2xtargetreset, int, S_IRUGO);
+MODULE_PARM_DESC(ql2xtargetreset,
+ "Enable target reset."
+ "Default is 1 - use hw defaults.");
+
+int ql2xgffidenable;
+module_param(ql2xgffidenable, int, S_IRUGO);
+MODULE_PARM_DESC(ql2xgffidenable,
+ "Enables GFF_ID checks of port type. "
+ "Default is 0 - Do not use GFF_ID information.");
+
+int ql2xasynctmfenable;
+module_param(ql2xasynctmfenable, int, S_IRUGO);
+MODULE_PARM_DESC(ql2xasynctmfenable,
+ "Enables issue of TM IOCBs asynchronously via IOCB mechanism"
+ "Default is 0 - Issue TM IOCBs via mailbox mechanism.");
+
+int ql2xdontresethba;
+module_param(ql2xdontresethba, int, S_IRUGO|S_IWUSR);
+MODULE_PARM_DESC(ql2xdontresethba,
+ "Option to specify reset behaviour.\n"
+ " 0 (Default) -- Reset on failure.\n"
+ " 1 -- Do not reset on failure.\n");
+
+uint64_t ql2xmaxlun = MAX_LUNS;
+module_param(ql2xmaxlun, ullong, S_IRUGO);
+MODULE_PARM_DESC(ql2xmaxlun,
+ "Defines the maximum LU number to register with the SCSI "
+ "midlayer. Default is 65535.");
+
+int ql2xmdcapmask = 0x1F;
+module_param(ql2xmdcapmask, int, S_IRUGO);
+MODULE_PARM_DESC(ql2xmdcapmask,
+ "Set the Minidump driver capture mask level. "
+ "Default is 0x1F - Can be set to 0x3, 0x7, 0xF, 0x1F, 0x7F.");
+
+int ql2xmdenable = 1;
+module_param(ql2xmdenable, int, S_IRUGO);
+MODULE_PARM_DESC(ql2xmdenable,
+ "Enable/disable MiniDump. "
+ "0 - MiniDump disabled. "
+ "1 (Default) - MiniDump enabled.");
+
+/*
+ * SCSI host template entry points
+ */
+static int qla2xxx_slave_configure(struct scsi_device * device);
+static int qla2xxx_slave_alloc(struct scsi_device *);
+static int qla2xxx_scan_finished(struct Scsi_Host *, unsigned long time);
+static void qla2xxx_scan_start(struct Scsi_Host *);
+static void qla2xxx_slave_destroy(struct scsi_device *);
+static int qla2xxx_queuecommand(struct Scsi_Host *h, struct scsi_cmnd *cmd);
+static int qla2xxx_eh_abort(struct scsi_cmnd *);
+static int qla2xxx_eh_device_reset(struct scsi_cmnd *);
+static int qla2xxx_eh_target_reset(struct scsi_cmnd *);
+static int qla2xxx_eh_bus_reset(struct scsi_cmnd *);
+static int qla2xxx_eh_host_reset(struct scsi_cmnd *);
+
+static void qla2x00_clear_drv_active(struct qla_hw_data *);
+static void qla2x00_free_device(scsi_qla_host_t *);
+static void qla83xx_disable_laser(scsi_qla_host_t *vha);
+
+struct scsi_host_template qla2xxx_driver_template = {
+ .module = THIS_MODULE,
+ .name = QLA2XXX_DRIVER_NAME,
+ .queuecommand = qla2xxx_queuecommand,
+
+ .eh_abort_handler = qla2xxx_eh_abort,
+ .eh_device_reset_handler = qla2xxx_eh_device_reset,
+ .eh_target_reset_handler = qla2xxx_eh_target_reset,
+ .eh_bus_reset_handler = qla2xxx_eh_bus_reset,
+ .eh_host_reset_handler = qla2xxx_eh_host_reset,
+
+ .slave_configure = qla2xxx_slave_configure,
+
+ .slave_alloc = qla2xxx_slave_alloc,
+ .slave_destroy = qla2xxx_slave_destroy,
+ .scan_finished = qla2xxx_scan_finished,
+ .scan_start = qla2xxx_scan_start,
+ .change_queue_depth = scsi_change_queue_depth,
+ .this_id = -1,
+ .cmd_per_lun = 3,
+ .use_clustering = ENABLE_CLUSTERING,
+ .sg_tablesize = SG_ALL,
+
+ .max_sectors = 0xFFFF,
+ .shost_attrs = qla2x00_host_attrs,
+
+ .supported_mode = MODE_INITIATOR,
+ .use_blk_tags = 1,
+ .track_queue_depth = 1,
+};
+
+static struct scsi_transport_template *qla2xxx_transport_template = NULL;
+struct scsi_transport_template *qla2xxx_transport_vport_template = NULL;
+
+/* TODO Convert to inlines
+ *
+ * Timer routines
+ */
+
+__inline__ void
+qla2x00_start_timer(scsi_qla_host_t *vha, void *func, unsigned long interval)
+{
+ init_timer(&vha->timer);
+ vha->timer.expires = jiffies + interval * HZ;
+ vha->timer.data = (unsigned long)vha;
+ vha->timer.function = (void (*)(unsigned long))func;
+ add_timer(&vha->timer);
+ vha->timer_active = 1;
+}
+
+static inline void
+qla2x00_restart_timer(scsi_qla_host_t *vha, unsigned long interval)
+{
+ /* Currently used for 82XX only. */
+ if (vha->device_flags & DFLG_DEV_FAILED) {
+ ql_dbg(ql_dbg_timer, vha, 0x600d,
+ "Device in a failed state, returning.\n");
+ return;
+ }
+
+ mod_timer(&vha->timer, jiffies + interval * HZ);
+}
+
+static __inline__ void
+qla2x00_stop_timer(scsi_qla_host_t *vha)
+{
+ del_timer_sync(&vha->timer);
+ vha->timer_active = 0;
+}
+
+static int qla2x00_do_dpc(void *data);
+
+static void qla2x00_rst_aen(scsi_qla_host_t *);
+
+static int qla2x00_mem_alloc(struct qla_hw_data *, uint16_t, uint16_t,
+ struct req_que **, struct rsp_que **);
+static void qla2x00_free_fw_dump(struct qla_hw_data *);
+static void qla2x00_mem_free(struct qla_hw_data *);
+
+/* -------------------------------------------------------------------------- */
+static int qla2x00_alloc_queues(struct qla_hw_data *ha, struct req_que *req,
+ struct rsp_que *rsp)
+{
+ scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev);
+ ha->req_q_map = kzalloc(sizeof(struct req_que *) * ha->max_req_queues,
+ GFP_KERNEL);
+ if (!ha->req_q_map) {
+ ql_log(ql_log_fatal, vha, 0x003b,
+ "Unable to allocate memory for request queue ptrs.\n");
+ goto fail_req_map;
+ }
+
+ ha->rsp_q_map = kzalloc(sizeof(struct rsp_que *) * ha->max_rsp_queues,
+ GFP_KERNEL);
+ if (!ha->rsp_q_map) {
+ ql_log(ql_log_fatal, vha, 0x003c,
+ "Unable to allocate memory for response queue ptrs.\n");
+ goto fail_rsp_map;
+ }
+ /*
+ * Make sure we record at least the request and response queue zero in
+ * case we need to free them if part of the probe fails.
+ */
+ ha->rsp_q_map[0] = rsp;
+ ha->req_q_map[0] = req;
+ set_bit(0, ha->rsp_qid_map);
+ set_bit(0, ha->req_qid_map);
+ return 1;
+
+fail_rsp_map:
+ kfree(ha->req_q_map);
+ ha->req_q_map = NULL;
+fail_req_map:
+ return -ENOMEM;
+}
+
+static void qla2x00_free_req_que(struct qla_hw_data *ha, struct req_que *req)
+{
+ if (IS_QLAFX00(ha)) {
+ if (req && req->ring_fx00)
+ dma_free_coherent(&ha->pdev->dev,
+ (req->length_fx00 + 1) * sizeof(request_t),
+ req->ring_fx00, req->dma_fx00);
+ } else if (req && req->ring)
+ dma_free_coherent(&ha->pdev->dev,
+ (req->length + 1) * sizeof(request_t),
+ req->ring, req->dma);
+
+ if (req)
+ kfree(req->outstanding_cmds);
+
+ kfree(req);
+ req = NULL;
+}
+
+static void qla2x00_free_rsp_que(struct qla_hw_data *ha, struct rsp_que *rsp)
+{
+ if (IS_QLAFX00(ha)) {
+ if (rsp && rsp->ring)
+ dma_free_coherent(&ha->pdev->dev,
+ (rsp->length_fx00 + 1) * sizeof(request_t),
+ rsp->ring_fx00, rsp->dma_fx00);
+ } else if (rsp && rsp->ring) {
+ dma_free_coherent(&ha->pdev->dev,
+ (rsp->length + 1) * sizeof(response_t),
+ rsp->ring, rsp->dma);
+ }
+ kfree(rsp);
+ rsp = NULL;
+}
+
+static void qla2x00_free_queues(struct qla_hw_data *ha)
+{
+ struct req_que *req;
+ struct rsp_que *rsp;
+ int cnt;
+
+ for (cnt = 0; cnt < ha->max_req_queues; cnt++) {
+ req = ha->req_q_map[cnt];
+ qla2x00_free_req_que(ha, req);
+ }
+ kfree(ha->req_q_map);
+ ha->req_q_map = NULL;
+
+ for (cnt = 0; cnt < ha->max_rsp_queues; cnt++) {
+ rsp = ha->rsp_q_map[cnt];
+ qla2x00_free_rsp_que(ha, rsp);
+ }
+ kfree(ha->rsp_q_map);
+ ha->rsp_q_map = NULL;
+}
+
+static int qla25xx_setup_mode(struct scsi_qla_host *vha)
+{
+ uint16_t options = 0;
+ int ques, req, ret;
+ struct qla_hw_data *ha = vha->hw;
+
+ if (!(ha->fw_attributes & BIT_6)) {
+ ql_log(ql_log_warn, vha, 0x00d8,
+ "Firmware is not multi-queue capable.\n");
+ goto fail;
+ }
+ if (ql2xmultique_tag) {
+ /* create a request queue for IO */
+ options |= BIT_7;
+ req = qla25xx_create_req_que(ha, options, 0, 0, -1,
+ QLA_DEFAULT_QUE_QOS);
+ if (!req) {
+ ql_log(ql_log_warn, vha, 0x00e0,
+ "Failed to create request queue.\n");
+ goto fail;
+ }
+ ha->wq = alloc_workqueue("qla2xxx_wq", WQ_MEM_RECLAIM, 1);
+ vha->req = ha->req_q_map[req];
+ options |= BIT_1;
+ for (ques = 1; ques < ha->max_rsp_queues; ques++) {
+ ret = qla25xx_create_rsp_que(ha, options, 0, 0, req);
+ if (!ret) {
+ ql_log(ql_log_warn, vha, 0x00e8,
+ "Failed to create response queue.\n");
+ goto fail2;
+ }
+ }
+ ha->flags.cpu_affinity_enabled = 1;
+ ql_dbg(ql_dbg_multiq, vha, 0xc007,
+ "CPU affinity mode enabled, "
+ "no. of response queues:%d no. of request queues:%d.\n",
+ ha->max_rsp_queues, ha->max_req_queues);
+ ql_dbg(ql_dbg_init, vha, 0x00e9,
+ "CPU affinity mode enabled, "
+ "no. of response queues:%d no. of request queues:%d.\n",
+ ha->max_rsp_queues, ha->max_req_queues);
+ }
+ return 0;
+fail2:
+ qla25xx_delete_queues(vha);
+ destroy_workqueue(ha->wq);
+ ha->wq = NULL;
+ vha->req = ha->req_q_map[0];
+fail:
+ ha->mqenable = 0;
+ kfree(ha->req_q_map);
+ kfree(ha->rsp_q_map);
+ ha->max_req_queues = ha->max_rsp_queues = 1;
+ return 1;
+}
+
+static char *
+qla2x00_pci_info_str(struct scsi_qla_host *vha, char *str)
+{
+ struct qla_hw_data *ha = vha->hw;
+ static char *pci_bus_modes[] = {
+ "33", "66", "100", "133",
+ };
+ uint16_t pci_bus;
+
+ strcpy(str, "PCI");
+ pci_bus = (ha->pci_attr & (BIT_9 | BIT_10)) >> 9;
+ if (pci_bus) {
+ strcat(str, "-X (");
+ strcat(str, pci_bus_modes[pci_bus]);
+ } else {
+ pci_bus = (ha->pci_attr & BIT_8) >> 8;
+ strcat(str, " (");
+ strcat(str, pci_bus_modes[pci_bus]);
+ }
+ strcat(str, " MHz)");
+
+ return (str);
+}
+
+static char *
+qla24xx_pci_info_str(struct scsi_qla_host *vha, char *str)
+{
+ static char *pci_bus_modes[] = { "33", "66", "100", "133", };
+ struct qla_hw_data *ha = vha->hw;
+ uint32_t pci_bus;
+
+ if (pci_is_pcie(ha->pdev)) {
+ char lwstr[6];
+ uint32_t lstat, lspeed, lwidth;
+
+ pcie_capability_read_dword(ha->pdev, PCI_EXP_LNKCAP, &lstat);
+ lspeed = lstat & PCI_EXP_LNKCAP_SLS;
+ lwidth = (lstat & PCI_EXP_LNKCAP_MLW) >> 4;
+
+ strcpy(str, "PCIe (");
+ switch (lspeed) {
+ case 1:
+ strcat(str, "2.5GT/s ");
+ break;
+ case 2:
+ strcat(str, "5.0GT/s ");
+ break;
+ case 3:
+ strcat(str, "8.0GT/s ");
+ break;
+ default:
+ strcat(str, "<unknown> ");
+ break;
+ }
+ snprintf(lwstr, sizeof(lwstr), "x%d)", lwidth);
+ strcat(str, lwstr);
+
+ return str;
+ }
+
+ strcpy(str, "PCI");
+ pci_bus = (ha->pci_attr & CSRX_PCIX_BUS_MODE_MASK) >> 8;
+ if (pci_bus == 0 || pci_bus == 8) {
+ strcat(str, " (");
+ strcat(str, pci_bus_modes[pci_bus >> 3]);
+ } else {
+ strcat(str, "-X ");
+ if (pci_bus & BIT_2)
+ strcat(str, "Mode 2");
+ else
+ strcat(str, "Mode 1");
+ strcat(str, " (");
+ strcat(str, pci_bus_modes[pci_bus & ~BIT_2]);
+ }
+ strcat(str, " MHz)");
+
+ return str;
+}
+
+static char *
+qla2x00_fw_version_str(struct scsi_qla_host *vha, char *str, size_t size)
+{
+ char un_str[10];
+ struct qla_hw_data *ha = vha->hw;
+
+ snprintf(str, size, "%d.%02d.%02d ", ha->fw_major_version,
+ ha->fw_minor_version, ha->fw_subminor_version);
+
+ if (ha->fw_attributes & BIT_9) {
+ strcat(str, "FLX");
+ return (str);
+ }
+
+ switch (ha->fw_attributes & 0xFF) {
+ case 0x7:
+ strcat(str, "EF");
+ break;
+ case 0x17:
+ strcat(str, "TP");
+ break;
+ case 0x37:
+ strcat(str, "IP");
+ break;
+ case 0x77:
+ strcat(str, "VI");
+ break;
+ default:
+ sprintf(un_str, "(%x)", ha->fw_attributes);
+ strcat(str, un_str);
+ break;
+ }
+ if (ha->fw_attributes & 0x100)
+ strcat(str, "X");
+
+ return (str);
+}
+
+static char *
+qla24xx_fw_version_str(struct scsi_qla_host *vha, char *str, size_t size)
+{
+ struct qla_hw_data *ha = vha->hw;
+
+ snprintf(str, size, "%d.%02d.%02d (%x)", ha->fw_major_version,
+ ha->fw_minor_version, ha->fw_subminor_version, ha->fw_attributes);
+ return str;
+}
+
+void
+qla2x00_sp_free_dma(void *vha, void *ptr)
+{
+ srb_t *sp = (srb_t *)ptr;
+ struct scsi_cmnd *cmd = GET_CMD_SP(sp);
+ struct qla_hw_data *ha = sp->fcport->vha->hw;
+ void *ctx = GET_CMD_CTX_SP(sp);
+
+ if (sp->flags & SRB_DMA_VALID) {
+ scsi_dma_unmap(cmd);
+ sp->flags &= ~SRB_DMA_VALID;
+ }
+
+ if (sp->flags & SRB_CRC_PROT_DMA_VALID) {
+ dma_unmap_sg(&ha->pdev->dev, scsi_prot_sglist(cmd),
+ scsi_prot_sg_count(cmd), cmd->sc_data_direction);
+ sp->flags &= ~SRB_CRC_PROT_DMA_VALID;
+ }
+
+ if (sp->flags & SRB_CRC_CTX_DSD_VALID) {
+ /* List assured to be having elements */
+ qla2x00_clean_dsd_pool(ha, sp, NULL);
+ sp->flags &= ~SRB_CRC_CTX_DSD_VALID;
+ }
+
+ if (sp->flags & SRB_CRC_CTX_DMA_VALID) {
+ dma_pool_free(ha->dl_dma_pool, ctx,
+ ((struct crc_context *)ctx)->crc_ctx_dma);
+ sp->flags &= ~SRB_CRC_CTX_DMA_VALID;
+ }
+
+ if (sp->flags & SRB_FCP_CMND_DMA_VALID) {
+ struct ct6_dsd *ctx1 = (struct ct6_dsd *)ctx;
+
+ dma_pool_free(ha->fcp_cmnd_dma_pool, ctx1->fcp_cmnd,
+ ctx1->fcp_cmnd_dma);
+ list_splice(&ctx1->dsd_list, &ha->gbl_dsd_list);
+ ha->gbl_dsd_inuse -= ctx1->dsd_use_cnt;
+ ha->gbl_dsd_avail += ctx1->dsd_use_cnt;
+ mempool_free(ctx1, ha->ctx_mempool);
+ ctx1 = NULL;
+ }
+
+ CMD_SP(cmd) = NULL;
+ qla2x00_rel_sp(sp->fcport->vha, sp);
+}
+
+static void
+qla2x00_sp_compl(void *data, void *ptr, int res)
+{
+ struct qla_hw_data *ha = (struct qla_hw_data *)data;
+ srb_t *sp = (srb_t *)ptr;
+ struct scsi_cmnd *cmd = GET_CMD_SP(sp);
+
+ cmd->result = res;
+
+ if (atomic_read(&sp->ref_count) == 0) {
+ ql_dbg(ql_dbg_io, sp->fcport->vha, 0x3015,
+ "SP reference-count to ZERO -- sp=%p cmd=%p.\n",
+ sp, GET_CMD_SP(sp));
+ if (ql2xextended_error_logging & ql_dbg_io)
+ BUG();
+ return;
+ }
+ if (!atomic_dec_and_test(&sp->ref_count))
+ return;
+
+ qla2x00_sp_free_dma(ha, sp);
+ cmd->scsi_done(cmd);
+}
+
+/* If we are SP1 here, we need to still take and release the host_lock as SP1
+ * does not have the changes necessary to avoid taking host->host_lock.
+ */
+static int
+qla2xxx_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *cmd)
+{
+ scsi_qla_host_t *vha = shost_priv(host);
+ fc_port_t *fcport = (struct fc_port *) cmd->device->hostdata;
+ struct fc_rport *rport = starget_to_rport(scsi_target(cmd->device));
+ struct qla_hw_data *ha = vha->hw;
+ struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev);
+ srb_t *sp;
+ int rval;
+
+ if (ha->flags.eeh_busy) {
+ if (ha->flags.pci_channel_io_perm_failure) {
+ ql_dbg(ql_dbg_aer, vha, 0x9010,
+ "PCI Channel IO permanent failure, exiting "
+ "cmd=%p.\n", cmd);
+ cmd->result = DID_NO_CONNECT << 16;
+ } else {
+ ql_dbg(ql_dbg_aer, vha, 0x9011,
+ "EEH_Busy, Requeuing the cmd=%p.\n", cmd);
+ cmd->result = DID_REQUEUE << 16;
+ }
+ goto qc24_fail_command;
+ }
+
+ rval = fc_remote_port_chkready(rport);
+ if (rval) {
+ cmd->result = rval;
+ ql_dbg(ql_dbg_io + ql_dbg_verbose, vha, 0x3003,
+ "fc_remote_port_chkready failed for cmd=%p, rval=0x%x.\n",
+ cmd, rval);
+ goto qc24_fail_command;
+ }
+
+ if (!vha->flags.difdix_supported &&
+ scsi_get_prot_op(cmd) != SCSI_PROT_NORMAL) {
+ ql_dbg(ql_dbg_io, vha, 0x3004,
+ "DIF Cap not reg, fail DIF capable cmd's:%p.\n",
+ cmd);
+ cmd->result = DID_NO_CONNECT << 16;
+ goto qc24_fail_command;
+ }
+
+ if (!fcport) {
+ cmd->result = DID_NO_CONNECT << 16;
+ goto qc24_fail_command;
+ }
+
+ if (atomic_read(&fcport->state) != FCS_ONLINE) {
+ if (atomic_read(&fcport->state) == FCS_DEVICE_DEAD ||
+ atomic_read(&base_vha->loop_state) == LOOP_DEAD) {
+ ql_dbg(ql_dbg_io, vha, 0x3005,
+ "Returning DNC, fcport_state=%d loop_state=%d.\n",
+ atomic_read(&fcport->state),
+ atomic_read(&base_vha->loop_state));
+ cmd->result = DID_NO_CONNECT << 16;
+ goto qc24_fail_command;
+ }
+ goto qc24_target_busy;
+ }
+
+ /*
+ * Return target busy if we've received a non-zero retry_delay_timer
+ * in a FCP_RSP.
+ */
+ if (fcport->retry_delay_timestamp == 0) {
+ /* retry delay not set */
+ } else if (time_after(jiffies, fcport->retry_delay_timestamp))
+ fcport->retry_delay_timestamp = 0;
+ else
+ goto qc24_target_busy;
+
+ sp = qla2x00_get_sp(vha, fcport, GFP_ATOMIC);
+ if (!sp)
+ goto qc24_host_busy;
+
+ sp->u.scmd.cmd = cmd;
+ sp->type = SRB_SCSI_CMD;
+ atomic_set(&sp->ref_count, 1);
+ CMD_SP(cmd) = (void *)sp;
+ sp->free = qla2x00_sp_free_dma;
+ sp->done = qla2x00_sp_compl;
+
+ rval = ha->isp_ops->start_scsi(sp);
+ if (rval != QLA_SUCCESS) {
+ ql_dbg(ql_dbg_io + ql_dbg_verbose, vha, 0x3013,
+ "Start scsi failed rval=%d for cmd=%p.\n", rval, cmd);
+ goto qc24_host_busy_free_sp;
+ }
+
+ return 0;
+
+qc24_host_busy_free_sp:
+ qla2x00_sp_free_dma(ha, sp);
+
+qc24_host_busy:
+ return SCSI_MLQUEUE_HOST_BUSY;
+
+qc24_target_busy:
+ return SCSI_MLQUEUE_TARGET_BUSY;
+
+qc24_fail_command:
+ cmd->scsi_done(cmd);
+
+ return 0;
+}
+
+/*
+ * qla2x00_eh_wait_on_command
+ * Waits for the command to be returned by the Firmware for some
+ * max time.
+ *
+ * Input:
+ * cmd = Scsi Command to wait on.
+ *
+ * Return:
+ * Not Found : 0
+ * Found : 1
+ */
+static int
+qla2x00_eh_wait_on_command(struct scsi_cmnd *cmd)
+{
+#define ABORT_POLLING_PERIOD 1000
+#define ABORT_WAIT_ITER ((2 * 1000) / (ABORT_POLLING_PERIOD))
+ unsigned long wait_iter = ABORT_WAIT_ITER;
+ scsi_qla_host_t *vha = shost_priv(cmd->device->host);
+ struct qla_hw_data *ha = vha->hw;
+ int ret = QLA_SUCCESS;
+
+ if (unlikely(pci_channel_offline(ha->pdev)) || ha->flags.eeh_busy) {
+ ql_dbg(ql_dbg_taskm, vha, 0x8005,
+ "Return:eh_wait.\n");
+ return ret;
+ }
+
+ while (CMD_SP(cmd) && wait_iter--) {
+ msleep(ABORT_POLLING_PERIOD);
+ }
+ if (CMD_SP(cmd))
+ ret = QLA_FUNCTION_FAILED;
+
+ return ret;
+}
+
+/*
+ * qla2x00_wait_for_hba_online
+ * Wait till the HBA is online after going through
+ * <= MAX_RETRIES_OF_ISP_ABORT or
+ * finally HBA is disabled ie marked offline
+ *
+ * Input:
+ * ha - pointer to host adapter structure
+ *
+ * Note:
+ * Does context switching-Release SPIN_LOCK
+ * (if any) before calling this routine.
+ *
+ * Return:
+ * Success (Adapter is online) : 0
+ * Failed (Adapter is offline/disabled) : 1
+ */
+int
+qla2x00_wait_for_hba_online(scsi_qla_host_t *vha)
+{
+ int return_status;
+ unsigned long wait_online;
+ struct qla_hw_data *ha = vha->hw;
+ scsi_qla_host_t *base_vha = pci_get_drvdata(ha->pdev);
+
+ wait_online = jiffies + (MAX_LOOP_TIMEOUT * HZ);
+ while (((test_bit(ISP_ABORT_NEEDED, &base_vha->dpc_flags)) ||
+ test_bit(ABORT_ISP_ACTIVE, &base_vha->dpc_flags) ||
+ test_bit(ISP_ABORT_RETRY, &base_vha->dpc_flags) ||
+ ha->dpc_active) && time_before(jiffies, wait_online)) {
+
+ msleep(1000);
+ }
+ if (base_vha->flags.online)
+ return_status = QLA_SUCCESS;
+ else
+ return_status = QLA_FUNCTION_FAILED;
+
+ return (return_status);
+}
+
+/*
+ * qla2x00_wait_for_hba_ready
+ * Wait till the HBA is ready before doing driver unload
+ *
+ * Input:
+ * ha - pointer to host adapter structure
+ *
+ * Note:
+ * Does context switching-Release SPIN_LOCK
+ * (if any) before calling this routine.
+ *
+ */
+static void
+qla2x00_wait_for_hba_ready(scsi_qla_host_t *vha)
+{
+ struct qla_hw_data *ha = vha->hw;
+
+ while (((qla2x00_reset_active(vha)) || ha->dpc_active ||
+ ha->flags.mbox_busy) ||
+ test_bit(FX00_RESET_RECOVERY, &vha->dpc_flags) ||
+ test_bit(FX00_TARGET_SCAN, &vha->dpc_flags))
+ msleep(1000);
+}
+
+int
+qla2x00_wait_for_chip_reset(scsi_qla_host_t *vha)
+{
+ int return_status;
+ unsigned long wait_reset;
+ struct qla_hw_data *ha = vha->hw;
+ scsi_qla_host_t *base_vha = pci_get_drvdata(ha->pdev);
+
+ wait_reset = jiffies + (MAX_LOOP_TIMEOUT * HZ);
+ while (((test_bit(ISP_ABORT_NEEDED, &base_vha->dpc_flags)) ||
+ test_bit(ABORT_ISP_ACTIVE, &base_vha->dpc_flags) ||
+ test_bit(ISP_ABORT_RETRY, &base_vha->dpc_flags) ||
+ ha->dpc_active) && time_before(jiffies, wait_reset)) {
+
+ msleep(1000);
+
+ if (!test_bit(ISP_ABORT_NEEDED, &base_vha->dpc_flags) &&
+ ha->flags.chip_reset_done)
+ break;
+ }
+ if (ha->flags.chip_reset_done)
+ return_status = QLA_SUCCESS;
+ else
+ return_status = QLA_FUNCTION_FAILED;
+
+ return return_status;
+}
+
+static void
+sp_get(struct srb *sp)
+{
+ atomic_inc(&sp->ref_count);
+}
+
+/**************************************************************************
+* qla2xxx_eh_abort
+*
+* Description:
+* The abort function will abort the specified command.
+*
+* Input:
+* cmd = Linux SCSI command packet to be aborted.
+*
+* Returns:
+* Either SUCCESS or FAILED.
+*
+* Note:
+* Only return FAILED if command not returned by firmware.
+**************************************************************************/
+static int
+qla2xxx_eh_abort(struct scsi_cmnd *cmd)
+{
+ scsi_qla_host_t *vha = shost_priv(cmd->device->host);
+ srb_t *sp;
+ int ret;
+ unsigned int id;
+ uint64_t lun;
+ unsigned long flags;
+ int rval, wait = 0;
+ struct qla_hw_data *ha = vha->hw;
+
+ if (!CMD_SP(cmd))
+ return SUCCESS;
+
+ ret = fc_block_scsi_eh(cmd);
+ if (ret != 0)
+ return ret;
+ ret = SUCCESS;
+
+ id = cmd->device->id;
+ lun = cmd->device->lun;
+
+ spin_lock_irqsave(&ha->hardware_lock, flags);
+ sp = (srb_t *) CMD_SP(cmd);
+ if (!sp) {
+ spin_unlock_irqrestore(&ha->hardware_lock, flags);
+ return SUCCESS;
+ }
+
+ ql_dbg(ql_dbg_taskm, vha, 0x8002,
+ "Aborting from RISC nexus=%ld:%d:%llu sp=%p cmd=%p\n",
+ vha->host_no, id, lun, sp, cmd);
+
+ /* Get a reference to the sp and drop the lock.*/
+ sp_get(sp);
+
+ spin_unlock_irqrestore(&ha->hardware_lock, flags);
+ rval = ha->isp_ops->abort_command(sp);
+ if (rval) {
+ if (rval == QLA_FUNCTION_PARAMETER_ERROR) {
+ /*
+ * Decrement the ref_count since we can't find the
+ * command
+ */
+ atomic_dec(&sp->ref_count);
+ ret = SUCCESS;
+ } else
+ ret = FAILED;
+
+ ql_dbg(ql_dbg_taskm, vha, 0x8003,
+ "Abort command mbx failed cmd=%p, rval=%x.\n", cmd, rval);
+ } else {
+ ql_dbg(ql_dbg_taskm, vha, 0x8004,
+ "Abort command mbx success cmd=%p.\n", cmd);
+ wait = 1;
+ }
+
+ spin_lock_irqsave(&ha->hardware_lock, flags);
+ /*
+ * Clear the slot in the oustanding_cmds array if we can't find the
+ * command to reclaim the resources.
+ */
+ if (rval == QLA_FUNCTION_PARAMETER_ERROR)
+ vha->req->outstanding_cmds[sp->handle] = NULL;
+ sp->done(ha, sp, 0);
+ spin_unlock_irqrestore(&ha->hardware_lock, flags);
+
+ /* Did the command return during mailbox execution? */
+ if (ret == FAILED && !CMD_SP(cmd))
+ ret = SUCCESS;
+
+ /* Wait for the command to be returned. */
+ if (wait) {
+ if (qla2x00_eh_wait_on_command(cmd) != QLA_SUCCESS) {
+ ql_log(ql_log_warn, vha, 0x8006,
+ "Abort handler timed out cmd=%p.\n", cmd);
+ ret = FAILED;
+ }
+ }
+
+ ql_log(ql_log_info, vha, 0x801c,
+ "Abort command issued nexus=%ld:%d:%llu -- %d %x.\n",
+ vha->host_no, id, lun, wait, ret);
+
+ return ret;
+}
+
+int
+qla2x00_eh_wait_for_pending_commands(scsi_qla_host_t *vha, unsigned int t,
+ uint64_t l, enum nexus_wait_type type)
+{
+ int cnt, match, status;
+ unsigned long flags;
+ struct qla_hw_data *ha = vha->hw;
+ struct req_que *req;
+ srb_t *sp;
+ struct scsi_cmnd *cmd;
+
+ status = QLA_SUCCESS;
+
+ spin_lock_irqsave(&ha->hardware_lock, flags);
+ req = vha->req;
+ for (cnt = 1; status == QLA_SUCCESS &&
+ cnt < req->num_outstanding_cmds; cnt++) {
+ sp = req->outstanding_cmds[cnt];
+ if (!sp)
+ continue;
+ if (sp->type != SRB_SCSI_CMD)
+ continue;
+ if (vha->vp_idx != sp->fcport->vha->vp_idx)
+ continue;
+ match = 0;
+ cmd = GET_CMD_SP(sp);
+ switch (type) {
+ case WAIT_HOST:
+ match = 1;
+ break;
+ case WAIT_TARGET:
+ match = cmd->device->id == t;
+ break;
+ case WAIT_LUN:
+ match = (cmd->device->id == t &&
+ cmd->device->lun == l);
+ break;
+ }
+ if (!match)
+ continue;
+
+ spin_unlock_irqrestore(&ha->hardware_lock, flags);
+ status = qla2x00_eh_wait_on_command(cmd);
+ spin_lock_irqsave(&ha->hardware_lock, flags);
+ }
+ spin_unlock_irqrestore(&ha->hardware_lock, flags);
+
+ return status;
+}
+
+static char *reset_errors[] = {
+ "HBA not online",
+ "HBA not ready",
+ "Task management failed",
+ "Waiting for command completions",
+};
+
+static int
+__qla2xxx_eh_generic_reset(char *name, enum nexus_wait_type type,
+ struct scsi_cmnd *cmd, int (*do_reset)(struct fc_port *, uint64_t, int))
+{
+ scsi_qla_host_t *vha = shost_priv(cmd->device->host);
+ fc_port_t *fcport = (struct fc_port *) cmd->device->hostdata;
+ int err;
+
+ if (!fcport) {
+ return FAILED;
+ }
+
+ err = fc_block_scsi_eh(cmd);
+ if (err != 0)
+ return err;
+
+ ql_log(ql_log_info, vha, 0x8009,
+ "%s RESET ISSUED nexus=%ld:%d:%llu cmd=%p.\n", name, vha->host_no,
+ cmd->device->id, cmd->device->lun, cmd);
+
+ err = 0;
+ if (qla2x00_wait_for_hba_online(vha) != QLA_SUCCESS) {
+ ql_log(ql_log_warn, vha, 0x800a,
+ "Wait for hba online failed for cmd=%p.\n", cmd);
+ goto eh_reset_failed;
+ }
+ err = 2;
+ if (do_reset(fcport, cmd->device->lun, cmd->request->cpu + 1)
+ != QLA_SUCCESS) {
+ ql_log(ql_log_warn, vha, 0x800c,
+ "do_reset failed for cmd=%p.\n", cmd);
+ goto eh_reset_failed;
+ }
+ err = 3;
+ if (qla2x00_eh_wait_for_pending_commands(vha, cmd->device->id,
+ cmd->device->lun, type) != QLA_SUCCESS) {
+ ql_log(ql_log_warn, vha, 0x800d,
+ "wait for pending cmds failed for cmd=%p.\n", cmd);
+ goto eh_reset_failed;
+ }
+
+ ql_log(ql_log_info, vha, 0x800e,
+ "%s RESET SUCCEEDED nexus:%ld:%d:%llu cmd=%p.\n", name,
+ vha->host_no, cmd->device->id, cmd->device->lun, cmd);
+
+ return SUCCESS;
+
+eh_reset_failed:
+ ql_log(ql_log_info, vha, 0x800f,
+ "%s RESET FAILED: %s nexus=%ld:%d:%llu cmd=%p.\n", name,
+ reset_errors[err], vha->host_no, cmd->device->id, cmd->device->lun,
+ cmd);
+ return FAILED;
+}
+
+static int
+qla2xxx_eh_device_reset(struct scsi_cmnd *cmd)
+{
+ scsi_qla_host_t *vha = shost_priv(cmd->device->host);
+ struct qla_hw_data *ha = vha->hw;
+
+ return __qla2xxx_eh_generic_reset("DEVICE", WAIT_LUN, cmd,
+ ha->isp_ops->lun_reset);
+}
+
+static int
+qla2xxx_eh_target_reset(struct scsi_cmnd *cmd)
+{
+ scsi_qla_host_t *vha = shost_priv(cmd->device->host);
+ struct qla_hw_data *ha = vha->hw;
+
+ return __qla2xxx_eh_generic_reset("TARGET", WAIT_TARGET, cmd,
+ ha->isp_ops->target_reset);
+}
+
+/**************************************************************************
+* qla2xxx_eh_bus_reset
+*
+* Description:
+* The bus reset function will reset the bus and abort any executing
+* commands.
+*
+* Input:
+* cmd = Linux SCSI command packet of the command that cause the
+* bus reset.
+*
+* Returns:
+* SUCCESS/FAILURE (defined as macro in scsi.h).
+*
+**************************************************************************/
+static int
+qla2xxx_eh_bus_reset(struct scsi_cmnd *cmd)
+{
+ scsi_qla_host_t *vha = shost_priv(cmd->device->host);
+ fc_port_t *fcport = (struct fc_port *) cmd->device->hostdata;
+ int ret = FAILED;
+ unsigned int id;
+ uint64_t lun;
+
+ id = cmd->device->id;
+ lun = cmd->device->lun;
+
+ if (!fcport) {
+ return ret;
+ }
+
+ ret = fc_block_scsi_eh(cmd);
+ if (ret != 0)
+ return ret;
+ ret = FAILED;
+
+ ql_log(ql_log_info, vha, 0x8012,
+ "BUS RESET ISSUED nexus=%ld:%d:%llu.\n", vha->host_no, id, lun);
+
+ if (qla2x00_wait_for_hba_online(vha) != QLA_SUCCESS) {
+ ql_log(ql_log_fatal, vha, 0x8013,
+ "Wait for hba online failed board disabled.\n");
+ goto eh_bus_reset_done;
+ }
+
+ if (qla2x00_loop_reset(vha) == QLA_SUCCESS)
+ ret = SUCCESS;
+
+ if (ret == FAILED)
+ goto eh_bus_reset_done;
+
+ /* Flush outstanding commands. */
+ if (qla2x00_eh_wait_for_pending_commands(vha, 0, 0, WAIT_HOST) !=
+ QLA_SUCCESS) {
+ ql_log(ql_log_warn, vha, 0x8014,
+ "Wait for pending commands failed.\n");
+ ret = FAILED;
+ }
+
+eh_bus_reset_done:
+ ql_log(ql_log_warn, vha, 0x802b,
+ "BUS RESET %s nexus=%ld:%d:%llu.\n",
+ (ret == FAILED) ? "FAILED" : "SUCCEEDED", vha->host_no, id, lun);
+
+ return ret;
+}
+
+/**************************************************************************
+* qla2xxx_eh_host_reset
+*
+* Description:
+* The reset function will reset the Adapter.
+*
+* Input:
+* cmd = Linux SCSI command packet of the command that cause the
+* adapter reset.
+*
+* Returns:
+* Either SUCCESS or FAILED.
+*
+* Note:
+**************************************************************************/
+static int
+qla2xxx_eh_host_reset(struct scsi_cmnd *cmd)
+{
+ scsi_qla_host_t *vha = shost_priv(cmd->device->host);
+ struct qla_hw_data *ha = vha->hw;
+ int ret = FAILED;
+ unsigned int id;
+ uint64_t lun;
+ scsi_qla_host_t *base_vha = pci_get_drvdata(ha->pdev);
+
+ id = cmd->device->id;
+ lun = cmd->device->lun;
+
+ ql_log(ql_log_info, vha, 0x8018,
+ "ADAPTER RESET ISSUED nexus=%ld:%d:%llu.\n", vha->host_no, id, lun);
+
+ /*
+ * No point in issuing another reset if one is active. Also do not
+ * attempt a reset if we are updating flash.
+ */
+ if (qla2x00_reset_active(vha) || ha->optrom_state != QLA_SWAITING)
+ goto eh_host_reset_lock;
+
+ if (vha != base_vha) {
+ if (qla2x00_vp_abort_isp(vha))
+ goto eh_host_reset_lock;
+ } else {
+ if (IS_P3P_TYPE(vha->hw)) {
+ if (!qla82xx_fcoe_ctx_reset(vha)) {
+ /* Ctx reset success */
+ ret = SUCCESS;
+ goto eh_host_reset_lock;
+ }
+ /* fall thru if ctx reset failed */
+ }
+ if (ha->wq)
+ flush_workqueue(ha->wq);
+
+ set_bit(ABORT_ISP_ACTIVE, &base_vha->dpc_flags);
+ if (ha->isp_ops->abort_isp(base_vha)) {
+ clear_bit(ABORT_ISP_ACTIVE, &base_vha->dpc_flags);
+ /* failed. schedule dpc to try */
+ set_bit(ISP_ABORT_NEEDED, &base_vha->dpc_flags);
+
+ if (qla2x00_wait_for_hba_online(vha) != QLA_SUCCESS) {
+ ql_log(ql_log_warn, vha, 0x802a,
+ "wait for hba online failed.\n");
+ goto eh_host_reset_lock;
+ }
+ }
+ clear_bit(ABORT_ISP_ACTIVE, &base_vha->dpc_flags);
+ }
+
+ /* Waiting for command to be returned to OS.*/
+ if (qla2x00_eh_wait_for_pending_commands(vha, 0, 0, WAIT_HOST) ==
+ QLA_SUCCESS)
+ ret = SUCCESS;
+
+eh_host_reset_lock:
+ ql_log(ql_log_info, vha, 0x8017,
+ "ADAPTER RESET %s nexus=%ld:%d:%llu.\n",
+ (ret == FAILED) ? "FAILED" : "SUCCEEDED", vha->host_no, id, lun);
+
+ return ret;
+}
+
+/*
+* qla2x00_loop_reset
+* Issue loop reset.
+*
+* Input:
+* ha = adapter block pointer.
+*
+* Returns:
+* 0 = success
+*/
+int
+qla2x00_loop_reset(scsi_qla_host_t *vha)
+{
+ int ret;
+ struct fc_port *fcport;
+ struct qla_hw_data *ha = vha->hw;
+
+ if (IS_QLAFX00(ha)) {
+ return qlafx00_loop_reset(vha);
+ }
+
+ if (ql2xtargetreset == 1 && ha->flags.enable_target_reset) {
+ list_for_each_entry(fcport, &vha->vp_fcports, list) {
+ if (fcport->port_type != FCT_TARGET)
+ continue;
+
+ ret = ha->isp_ops->target_reset(fcport, 0, 0);
+ if (ret != QLA_SUCCESS) {
+ ql_dbg(ql_dbg_taskm, vha, 0x802c,
+ "Bus Reset failed: Reset=%d "
+ "d_id=%x.\n", ret, fcport->d_id.b24);
+ }
+ }
+ }
+
+
+ if (ha->flags.enable_lip_full_login && !IS_CNA_CAPABLE(ha)) {
+ atomic_set(&vha->loop_state, LOOP_DOWN);
+ atomic_set(&vha->loop_down_timer, LOOP_DOWN_TIME);
+ qla2x00_mark_all_devices_lost(vha, 0);
+ ret = qla2x00_full_login_lip(vha);
+ if (ret != QLA_SUCCESS) {
+ ql_dbg(ql_dbg_taskm, vha, 0x802d,
+ "full_login_lip=%d.\n", ret);
+ }
+ }
+
+ if (ha->flags.enable_lip_reset) {
+ ret = qla2x00_lip_reset(vha);
+ if (ret != QLA_SUCCESS)
+ ql_dbg(ql_dbg_taskm, vha, 0x802e,
+ "lip_reset failed (%d).\n", ret);
+ }
+
+ /* Issue marker command only when we are going to start the I/O */
+ vha->marker_needed = 1;
+
+ return QLA_SUCCESS;
+}
+
+void
+qla2x00_abort_all_cmds(scsi_qla_host_t *vha, int res)
+{
+ int que, cnt;
+ unsigned long flags;
+ srb_t *sp;
+ struct qla_hw_data *ha = vha->hw;
+ struct req_que *req;
+
+ qlt_host_reset_handler(ha);
+
+ spin_lock_irqsave(&ha->hardware_lock, flags);
+ for (que = 0; que < ha->max_req_queues; que++) {
+ req = ha->req_q_map[que];
+ if (!req)
+ continue;
+ if (!req->outstanding_cmds)
+ continue;
+ for (cnt = 1; cnt < req->num_outstanding_cmds; cnt++) {
+ sp = req->outstanding_cmds[cnt];
+ if (sp) {
+ req->outstanding_cmds[cnt] = NULL;
+ sp->done(vha, sp, res);
+ }
+ }
+ }
+ spin_unlock_irqrestore(&ha->hardware_lock, flags);
+}
+
+static int
+qla2xxx_slave_alloc(struct scsi_device *sdev)
+{
+ struct fc_rport *rport = starget_to_rport(scsi_target(sdev));
+
+ if (!rport || fc_remote_port_chkready(rport))
+ return -ENXIO;
+
+ sdev->hostdata = *(fc_port_t **)rport->dd_data;
+
+ return 0;
+}
+
+static int
+qla2xxx_slave_configure(struct scsi_device *sdev)
+{
+ scsi_qla_host_t *vha = shost_priv(sdev->host);
+ struct req_que *req = vha->req;
+
+ if (IS_T10_PI_CAPABLE(vha->hw))
+ blk_queue_update_dma_alignment(sdev->request_queue, 0x7);
+
+ scsi_change_queue_depth(sdev, req->max_q_depth);
+ return 0;
+}
+
+static void
+qla2xxx_slave_destroy(struct scsi_device *sdev)
+{
+ sdev->hostdata = NULL;
+}
+
+/**
+ * qla2x00_config_dma_addressing() - Configure OS DMA addressing method.
+ * @ha: HA context
+ *
+ * At exit, the @ha's flags.enable_64bit_addressing set to indicated
+ * supported addressing method.
+ */
+static void
+qla2x00_config_dma_addressing(struct qla_hw_data *ha)
+{
+ /* Assume a 32bit DMA mask. */
+ ha->flags.enable_64bit_addressing = 0;
+
+ if (!dma_set_mask(&ha->pdev->dev, DMA_BIT_MASK(64))) {
+ /* Any upper-dword bits set? */
+ if (MSD(dma_get_required_mask(&ha->pdev->dev)) &&
+ !pci_set_consistent_dma_mask(ha->pdev, DMA_BIT_MASK(64))) {
+ /* Ok, a 64bit DMA mask is applicable. */
+ ha->flags.enable_64bit_addressing = 1;
+ ha->isp_ops->calc_req_entries = qla2x00_calc_iocbs_64;
+ ha->isp_ops->build_iocbs = qla2x00_build_scsi_iocbs_64;
+ return;
+ }
+ }
+
+ dma_set_mask(&ha->pdev->dev, DMA_BIT_MASK(32));
+ pci_set_consistent_dma_mask(ha->pdev, DMA_BIT_MASK(32));
+}
+
+static void
+qla2x00_enable_intrs(struct qla_hw_data *ha)
+{
+ unsigned long flags = 0;
+ struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
+
+ spin_lock_irqsave(&ha->hardware_lock, flags);
+ ha->interrupts_on = 1;
+ /* enable risc and host interrupts */
+ WRT_REG_WORD(&reg->ictrl, ICR_EN_INT | ICR_EN_RISC);
+ RD_REG_WORD(&reg->ictrl);
+ spin_unlock_irqrestore(&ha->hardware_lock, flags);
+
+}
+
+static void
+qla2x00_disable_intrs(struct qla_hw_data *ha)
+{
+ unsigned long flags = 0;
+ struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
+
+ spin_lock_irqsave(&ha->hardware_lock, flags);
+ ha->interrupts_on = 0;
+ /* disable risc and host interrupts */
+ WRT_REG_WORD(&reg->ictrl, 0);
+ RD_REG_WORD(&reg->ictrl);
+ spin_unlock_irqrestore(&ha->hardware_lock, flags);
+}
+
+static void
+qla24xx_enable_intrs(struct qla_hw_data *ha)
+{
+ unsigned long flags = 0;
+ struct device_reg_24xx __iomem *reg = &ha->iobase->isp24;
+
+ spin_lock_irqsave(&ha->hardware_lock, flags);
+ ha->interrupts_on = 1;
+ WRT_REG_DWORD(&reg->ictrl, ICRX_EN_RISC_INT);
+ RD_REG_DWORD(&reg->ictrl);
+ spin_unlock_irqrestore(&ha->hardware_lock, flags);
+}
+
+static void
+qla24xx_disable_intrs(struct qla_hw_data *ha)
+{
+ unsigned long flags = 0;
+ struct device_reg_24xx __iomem *reg = &ha->iobase->isp24;
+
+ if (IS_NOPOLLING_TYPE(ha))
+ return;
+ spin_lock_irqsave(&ha->hardware_lock, flags);
+ ha->interrupts_on = 0;
+ WRT_REG_DWORD(&reg->ictrl, 0);
+ RD_REG_DWORD(&reg->ictrl);
+ spin_unlock_irqrestore(&ha->hardware_lock, flags);
+}
+
+static int
+qla2x00_iospace_config(struct qla_hw_data *ha)
+{
+ resource_size_t pio;
+ uint16_t msix;
+ int cpus;
+
+ if (pci_request_selected_regions(ha->pdev, ha->bars,
+ QLA2XXX_DRIVER_NAME)) {
+ ql_log_pci(ql_log_fatal, ha->pdev, 0x0011,
+ "Failed to reserve PIO/MMIO regions (%s), aborting.\n",
+ pci_name(ha->pdev));
+ goto iospace_error_exit;
+ }
+ if (!(ha->bars & 1))
+ goto skip_pio;
+
+ /* We only need PIO for Flash operations on ISP2312 v2 chips. */
+ pio = pci_resource_start(ha->pdev, 0);
+ if (pci_resource_flags(ha->pdev, 0) & IORESOURCE_IO) {
+ if (pci_resource_len(ha->pdev, 0) < MIN_IOBASE_LEN) {
+ ql_log_pci(ql_log_warn, ha->pdev, 0x0012,
+ "Invalid pci I/O region size (%s).\n",
+ pci_name(ha->pdev));
+ pio = 0;
+ }
+ } else {
+ ql_log_pci(ql_log_warn, ha->pdev, 0x0013,
+ "Region #0 no a PIO resource (%s).\n",
+ pci_name(ha->pdev));
+ pio = 0;
+ }
+ ha->pio_address = pio;
+ ql_dbg_pci(ql_dbg_init, ha->pdev, 0x0014,
+ "PIO address=%llu.\n",
+ (unsigned long long)ha->pio_address);
+
+skip_pio:
+ /* Use MMIO operations for all accesses. */
+ if (!(pci_resource_flags(ha->pdev, 1) & IORESOURCE_MEM)) {
+ ql_log_pci(ql_log_fatal, ha->pdev, 0x0015,
+ "Region #1 not an MMIO resource (%s), aborting.\n",
+ pci_name(ha->pdev));
+ goto iospace_error_exit;
+ }
+ if (pci_resource_len(ha->pdev, 1) < MIN_IOBASE_LEN) {
+ ql_log_pci(ql_log_fatal, ha->pdev, 0x0016,
+ "Invalid PCI mem region size (%s), aborting.\n",
+ pci_name(ha->pdev));
+ goto iospace_error_exit;
+ }
+
+ ha->iobase = ioremap(pci_resource_start(ha->pdev, 1), MIN_IOBASE_LEN);
+ if (!ha->iobase) {
+ ql_log_pci(ql_log_fatal, ha->pdev, 0x0017,
+ "Cannot remap MMIO (%s), aborting.\n",
+ pci_name(ha->pdev));
+ goto iospace_error_exit;
+ }
+
+ /* Determine queue resources */
+ ha->max_req_queues = ha->max_rsp_queues = 1;
+ if ((ql2xmaxqueues <= 1 && !ql2xmultique_tag) ||
+ (ql2xmaxqueues > 1 && ql2xmultique_tag) ||
+ (!IS_QLA25XX(ha) && !IS_QLA81XX(ha)))
+ goto mqiobase_exit;
+
+ ha->mqiobase = ioremap(pci_resource_start(ha->pdev, 3),
+ pci_resource_len(ha->pdev, 3));
+ if (ha->mqiobase) {
+ ql_dbg_pci(ql_dbg_init, ha->pdev, 0x0018,
+ "MQIO Base=%p.\n", ha->mqiobase);
+ /* Read MSIX vector size of the board */
+ pci_read_config_word(ha->pdev, QLA_PCI_MSIX_CONTROL, &msix);
+ ha->msix_count = msix;
+ /* Max queues are bounded by available msix vectors */
+ /* queue 0 uses two msix vectors */
+ if (ql2xmultique_tag) {
+ cpus = num_online_cpus();
+ ha->max_rsp_queues = (ha->msix_count - 1 > cpus) ?
+ (cpus + 1) : (ha->msix_count - 1);
+ ha->max_req_queues = 2;
+ } else if (ql2xmaxqueues > 1) {
+ ha->max_req_queues = ql2xmaxqueues > QLA_MQ_SIZE ?
+ QLA_MQ_SIZE : ql2xmaxqueues;
+ ql_dbg_pci(ql_dbg_multiq, ha->pdev, 0xc008,
+ "QoS mode set, max no of request queues:%d.\n",
+ ha->max_req_queues);
+ ql_dbg_pci(ql_dbg_init, ha->pdev, 0x0019,
+ "QoS mode set, max no of request queues:%d.\n",
+ ha->max_req_queues);
+ }
+ ql_log_pci(ql_log_info, ha->pdev, 0x001a,
+ "MSI-X vector count: %d.\n", msix);
+ } else
+ ql_log_pci(ql_log_info, ha->pdev, 0x001b,
+ "BAR 3 not enabled.\n");
+
+mqiobase_exit:
+ ha->msix_count = ha->max_rsp_queues + 1;
+ ql_dbg_pci(ql_dbg_init, ha->pdev, 0x001c,
+ "MSIX Count:%d.\n", ha->msix_count);
+ return (0);
+
+iospace_error_exit:
+ return (-ENOMEM);
+}
+
+
+static int
+qla83xx_iospace_config(struct qla_hw_data *ha)
+{
+ uint16_t msix;
+ int cpus;
+
+ if (pci_request_selected_regions(ha->pdev, ha->bars,
+ QLA2XXX_DRIVER_NAME)) {
+ ql_log_pci(ql_log_fatal, ha->pdev, 0x0117,
+ "Failed to reserve PIO/MMIO regions (%s), aborting.\n",
+ pci_name(ha->pdev));
+
+ goto iospace_error_exit;
+ }
+
+ /* Use MMIO operations for all accesses. */
+ if (!(pci_resource_flags(ha->pdev, 0) & IORESOURCE_MEM)) {
+ ql_log_pci(ql_log_warn, ha->pdev, 0x0118,
+ "Invalid pci I/O region size (%s).\n",
+ pci_name(ha->pdev));
+ goto iospace_error_exit;
+ }
+ if (pci_resource_len(ha->pdev, 0) < MIN_IOBASE_LEN) {
+ ql_log_pci(ql_log_warn, ha->pdev, 0x0119,
+ "Invalid PCI mem region size (%s), aborting\n",
+ pci_name(ha->pdev));
+ goto iospace_error_exit;
+ }
+
+ ha->iobase = ioremap(pci_resource_start(ha->pdev, 0), MIN_IOBASE_LEN);
+ if (!ha->iobase) {
+ ql_log_pci(ql_log_fatal, ha->pdev, 0x011a,
+ "Cannot remap MMIO (%s), aborting.\n",
+ pci_name(ha->pdev));
+ goto iospace_error_exit;
+ }
+
+ /* 64bit PCI BAR - BAR2 will correspoond to region 4 */
+ /* 83XX 26XX always use MQ type access for queues
+ * - mbar 2, a.k.a region 4 */
+ ha->max_req_queues = ha->max_rsp_queues = 1;
+ ha->mqiobase = ioremap(pci_resource_start(ha->pdev, 4),
+ pci_resource_len(ha->pdev, 4));
+
+ if (!ha->mqiobase) {
+ ql_log_pci(ql_log_fatal, ha->pdev, 0x011d,
+ "BAR2/region4 not enabled\n");
+ goto mqiobase_exit;
+ }
+
+ ha->msixbase = ioremap(pci_resource_start(ha->pdev, 2),
+ pci_resource_len(ha->pdev, 2));
+ if (ha->msixbase) {
+ /* Read MSIX vector size of the board */
+ pci_read_config_word(ha->pdev,
+ QLA_83XX_PCI_MSIX_CONTROL, &msix);
+ ha->msix_count = msix;
+ /* Max queues are bounded by available msix vectors */
+ /* queue 0 uses two msix vectors */
+ if (ql2xmultique_tag) {
+ cpus = num_online_cpus();
+ ha->max_rsp_queues = (ha->msix_count - 1 > cpus) ?
+ (cpus + 1) : (ha->msix_count - 1);
+ ha->max_req_queues = 2;
+ } else if (ql2xmaxqueues > 1) {
+ ha->max_req_queues = ql2xmaxqueues > QLA_MQ_SIZE ?
+ QLA_MQ_SIZE : ql2xmaxqueues;
+ ql_dbg_pci(ql_dbg_multiq, ha->pdev, 0xc00c,
+ "QoS mode set, max no of request queues:%d.\n",
+ ha->max_req_queues);
+ ql_dbg_pci(ql_dbg_init, ha->pdev, 0x011b,
+ "QoS mode set, max no of request queues:%d.\n",
+ ha->max_req_queues);
+ }
+ ql_log_pci(ql_log_info, ha->pdev, 0x011c,
+ "MSI-X vector count: %d.\n", msix);
+ } else
+ ql_log_pci(ql_log_info, ha->pdev, 0x011e,
+ "BAR 1 not enabled.\n");
+
+mqiobase_exit:
+ ha->msix_count = ha->max_rsp_queues + 1;
+
+ qlt_83xx_iospace_config(ha);
+
+ ql_dbg_pci(ql_dbg_init, ha->pdev, 0x011f,
+ "MSIX Count:%d.\n", ha->msix_count);
+ return 0;
+
+iospace_error_exit:
+ return -ENOMEM;
+}
+
+static struct isp_operations qla2100_isp_ops = {
+ .pci_config = qla2100_pci_config,
+ .reset_chip = qla2x00_reset_chip,
+ .chip_diag = qla2x00_chip_diag,
+ .config_rings = qla2x00_config_rings,
+ .reset_adapter = qla2x00_reset_adapter,
+ .nvram_config = qla2x00_nvram_config,
+ .update_fw_options = qla2x00_update_fw_options,
+ .load_risc = qla2x00_load_risc,
+ .pci_info_str = qla2x00_pci_info_str,
+ .fw_version_str = qla2x00_fw_version_str,
+ .intr_handler = qla2100_intr_handler,
+ .enable_intrs = qla2x00_enable_intrs,
+ .disable_intrs = qla2x00_disable_intrs,
+ .abort_command = qla2x00_abort_command,
+ .target_reset = qla2x00_abort_target,
+ .lun_reset = qla2x00_lun_reset,
+ .fabric_login = qla2x00_login_fabric,
+ .fabric_logout = qla2x00_fabric_logout,
+ .calc_req_entries = qla2x00_calc_iocbs_32,
+ .build_iocbs = qla2x00_build_scsi_iocbs_32,
+ .prep_ms_iocb = qla2x00_prep_ms_iocb,
+ .prep_ms_fdmi_iocb = qla2x00_prep_ms_fdmi_iocb,
+ .read_nvram = qla2x00_read_nvram_data,
+ .write_nvram = qla2x00_write_nvram_data,
+ .fw_dump = qla2100_fw_dump,
+ .beacon_on = NULL,
+ .beacon_off = NULL,
+ .beacon_blink = NULL,
+ .read_optrom = qla2x00_read_optrom_data,
+ .write_optrom = qla2x00_write_optrom_data,
+ .get_flash_version = qla2x00_get_flash_version,
+ .start_scsi = qla2x00_start_scsi,
+ .abort_isp = qla2x00_abort_isp,
+ .iospace_config = qla2x00_iospace_config,
+ .initialize_adapter = qla2x00_initialize_adapter,
+};
+
+static struct isp_operations qla2300_isp_ops = {
+ .pci_config = qla2300_pci_config,
+ .reset_chip = qla2x00_reset_chip,
+ .chip_diag = qla2x00_chip_diag,
+ .config_rings = qla2x00_config_rings,
+ .reset_adapter = qla2x00_reset_adapter,
+ .nvram_config = qla2x00_nvram_config,
+ .update_fw_options = qla2x00_update_fw_options,
+ .load_risc = qla2x00_load_risc,
+ .pci_info_str = qla2x00_pci_info_str,
+ .fw_version_str = qla2x00_fw_version_str,
+ .intr_handler = qla2300_intr_handler,
+ .enable_intrs = qla2x00_enable_intrs,
+ .disable_intrs = qla2x00_disable_intrs,
+ .abort_command = qla2x00_abort_command,
+ .target_reset = qla2x00_abort_target,
+ .lun_reset = qla2x00_lun_reset,
+ .fabric_login = qla2x00_login_fabric,
+ .fabric_logout = qla2x00_fabric_logout,
+ .calc_req_entries = qla2x00_calc_iocbs_32,
+ .build_iocbs = qla2x00_build_scsi_iocbs_32,
+ .prep_ms_iocb = qla2x00_prep_ms_iocb,
+ .prep_ms_fdmi_iocb = qla2x00_prep_ms_fdmi_iocb,
+ .read_nvram = qla2x00_read_nvram_data,
+ .write_nvram = qla2x00_write_nvram_data,
+ .fw_dump = qla2300_fw_dump,
+ .beacon_on = qla2x00_beacon_on,
+ .beacon_off = qla2x00_beacon_off,
+ .beacon_blink = qla2x00_beacon_blink,
+ .read_optrom = qla2x00_read_optrom_data,
+ .write_optrom = qla2x00_write_optrom_data,
+ .get_flash_version = qla2x00_get_flash_version,
+ .start_scsi = qla2x00_start_scsi,
+ .abort_isp = qla2x00_abort_isp,
+ .iospace_config = qla2x00_iospace_config,
+ .initialize_adapter = qla2x00_initialize_adapter,
+};
+
+static struct isp_operations qla24xx_isp_ops = {
+ .pci_config = qla24xx_pci_config,
+ .reset_chip = qla24xx_reset_chip,
+ .chip_diag = qla24xx_chip_diag,
+ .config_rings = qla24xx_config_rings,
+ .reset_adapter = qla24xx_reset_adapter,
+ .nvram_config = qla24xx_nvram_config,
+ .update_fw_options = qla24xx_update_fw_options,
+ .load_risc = qla24xx_load_risc,
+ .pci_info_str = qla24xx_pci_info_str,
+ .fw_version_str = qla24xx_fw_version_str,
+ .intr_handler = qla24xx_intr_handler,
+ .enable_intrs = qla24xx_enable_intrs,
+ .disable_intrs = qla24xx_disable_intrs,
+ .abort_command = qla24xx_abort_command,
+ .target_reset = qla24xx_abort_target,
+ .lun_reset = qla24xx_lun_reset,
+ .fabric_login = qla24xx_login_fabric,
+ .fabric_logout = qla24xx_fabric_logout,
+ .calc_req_entries = NULL,
+ .build_iocbs = NULL,
+ .prep_ms_iocb = qla24xx_prep_ms_iocb,
+ .prep_ms_fdmi_iocb = qla24xx_prep_ms_fdmi_iocb,
+ .read_nvram = qla24xx_read_nvram_data,
+ .write_nvram = qla24xx_write_nvram_data,
+ .fw_dump = qla24xx_fw_dump,
+ .beacon_on = qla24xx_beacon_on,
+ .beacon_off = qla24xx_beacon_off,
+ .beacon_blink = qla24xx_beacon_blink,
+ .read_optrom = qla24xx_read_optrom_data,
+ .write_optrom = qla24xx_write_optrom_data,
+ .get_flash_version = qla24xx_get_flash_version,
+ .start_scsi = qla24xx_start_scsi,
+ .abort_isp = qla2x00_abort_isp,
+ .iospace_config = qla2x00_iospace_config,
+ .initialize_adapter = qla2x00_initialize_adapter,
+};
+
+static struct isp_operations qla25xx_isp_ops = {
+ .pci_config = qla25xx_pci_config,
+ .reset_chip = qla24xx_reset_chip,
+ .chip_diag = qla24xx_chip_diag,
+ .config_rings = qla24xx_config_rings,
+ .reset_adapter = qla24xx_reset_adapter,
+ .nvram_config = qla24xx_nvram_config,
+ .update_fw_options = qla24xx_update_fw_options,
+ .load_risc = qla24xx_load_risc,
+ .pci_info_str = qla24xx_pci_info_str,
+ .fw_version_str = qla24xx_fw_version_str,
+ .intr_handler = qla24xx_intr_handler,
+ .enable_intrs = qla24xx_enable_intrs,
+ .disable_intrs = qla24xx_disable_intrs,
+ .abort_command = qla24xx_abort_command,
+ .target_reset = qla24xx_abort_target,
+ .lun_reset = qla24xx_lun_reset,
+ .fabric_login = qla24xx_login_fabric,
+ .fabric_logout = qla24xx_fabric_logout,
+ .calc_req_entries = NULL,
+ .build_iocbs = NULL,
+ .prep_ms_iocb = qla24xx_prep_ms_iocb,
+ .prep_ms_fdmi_iocb = qla24xx_prep_ms_fdmi_iocb,
+ .read_nvram = qla25xx_read_nvram_data,
+ .write_nvram = qla25xx_write_nvram_data,
+ .fw_dump = qla25xx_fw_dump,
+ .beacon_on = qla24xx_beacon_on,
+ .beacon_off = qla24xx_beacon_off,
+ .beacon_blink = qla24xx_beacon_blink,
+ .read_optrom = qla25xx_read_optrom_data,
+ .write_optrom = qla24xx_write_optrom_data,
+ .get_flash_version = qla24xx_get_flash_version,
+ .start_scsi = qla24xx_dif_start_scsi,
+ .abort_isp = qla2x00_abort_isp,
+ .iospace_config = qla2x00_iospace_config,
+ .initialize_adapter = qla2x00_initialize_adapter,
+};
+
+static struct isp_operations qla81xx_isp_ops = {
+ .pci_config = qla25xx_pci_config,
+ .reset_chip = qla24xx_reset_chip,
+ .chip_diag = qla24xx_chip_diag,
+ .config_rings = qla24xx_config_rings,
+ .reset_adapter = qla24xx_reset_adapter,
+ .nvram_config = qla81xx_nvram_config,
+ .update_fw_options = qla81xx_update_fw_options,
+ .load_risc = qla81xx_load_risc,
+ .pci_info_str = qla24xx_pci_info_str,
+ .fw_version_str = qla24xx_fw_version_str,
+ .intr_handler = qla24xx_intr_handler,
+ .enable_intrs = qla24xx_enable_intrs,
+ .disable_intrs = qla24xx_disable_intrs,
+ .abort_command = qla24xx_abort_command,
+ .target_reset = qla24xx_abort_target,
+ .lun_reset = qla24xx_lun_reset,
+ .fabric_login = qla24xx_login_fabric,
+ .fabric_logout = qla24xx_fabric_logout,
+ .calc_req_entries = NULL,
+ .build_iocbs = NULL,
+ .prep_ms_iocb = qla24xx_prep_ms_iocb,
+ .prep_ms_fdmi_iocb = qla24xx_prep_ms_fdmi_iocb,
+ .read_nvram = NULL,
+ .write_nvram = NULL,
+ .fw_dump = qla81xx_fw_dump,
+ .beacon_on = qla24xx_beacon_on,
+ .beacon_off = qla24xx_beacon_off,
+ .beacon_blink = qla83xx_beacon_blink,
+ .read_optrom = qla25xx_read_optrom_data,
+ .write_optrom = qla24xx_write_optrom_data,
+ .get_flash_version = qla24xx_get_flash_version,
+ .start_scsi = qla24xx_dif_start_scsi,
+ .abort_isp = qla2x00_abort_isp,
+ .iospace_config = qla2x00_iospace_config,
+ .initialize_adapter = qla2x00_initialize_adapter,
+};
+
+static struct isp_operations qla82xx_isp_ops = {
+ .pci_config = qla82xx_pci_config,
+ .reset_chip = qla82xx_reset_chip,
+ .chip_diag = qla24xx_chip_diag,
+ .config_rings = qla82xx_config_rings,
+ .reset_adapter = qla24xx_reset_adapter,
+ .nvram_config = qla81xx_nvram_config,
+ .update_fw_options = qla24xx_update_fw_options,
+ .load_risc = qla82xx_load_risc,
+ .pci_info_str = qla24xx_pci_info_str,
+ .fw_version_str = qla24xx_fw_version_str,
+ .intr_handler = qla82xx_intr_handler,
+ .enable_intrs = qla82xx_enable_intrs,
+ .disable_intrs = qla82xx_disable_intrs,
+ .abort_command = qla24xx_abort_command,
+ .target_reset = qla24xx_abort_target,
+ .lun_reset = qla24xx_lun_reset,
+ .fabric_login = qla24xx_login_fabric,
+ .fabric_logout = qla24xx_fabric_logout,
+ .calc_req_entries = NULL,
+ .build_iocbs = NULL,
+ .prep_ms_iocb = qla24xx_prep_ms_iocb,
+ .prep_ms_fdmi_iocb = qla24xx_prep_ms_fdmi_iocb,
+ .read_nvram = qla24xx_read_nvram_data,
+ .write_nvram = qla24xx_write_nvram_data,
+ .fw_dump = qla82xx_fw_dump,
+ .beacon_on = qla82xx_beacon_on,
+ .beacon_off = qla82xx_beacon_off,
+ .beacon_blink = NULL,
+ .read_optrom = qla82xx_read_optrom_data,
+ .write_optrom = qla82xx_write_optrom_data,
+ .get_flash_version = qla82xx_get_flash_version,
+ .start_scsi = qla82xx_start_scsi,
+ .abort_isp = qla82xx_abort_isp,
+ .iospace_config = qla82xx_iospace_config,
+ .initialize_adapter = qla2x00_initialize_adapter,
+};
+
+static struct isp_operations qla8044_isp_ops = {
+ .pci_config = qla82xx_pci_config,
+ .reset_chip = qla82xx_reset_chip,
+ .chip_diag = qla24xx_chip_diag,
+ .config_rings = qla82xx_config_rings,
+ .reset_adapter = qla24xx_reset_adapter,
+ .nvram_config = qla81xx_nvram_config,
+ .update_fw_options = qla24xx_update_fw_options,
+ .load_risc = qla82xx_load_risc,
+ .pci_info_str = qla24xx_pci_info_str,
+ .fw_version_str = qla24xx_fw_version_str,
+ .intr_handler = qla8044_intr_handler,
+ .enable_intrs = qla82xx_enable_intrs,
+ .disable_intrs = qla82xx_disable_intrs,
+ .abort_command = qla24xx_abort_command,
+ .target_reset = qla24xx_abort_target,
+ .lun_reset = qla24xx_lun_reset,
+ .fabric_login = qla24xx_login_fabric,
+ .fabric_logout = qla24xx_fabric_logout,
+ .calc_req_entries = NULL,
+ .build_iocbs = NULL,
+ .prep_ms_iocb = qla24xx_prep_ms_iocb,
+ .prep_ms_fdmi_iocb = qla24xx_prep_ms_fdmi_iocb,
+ .read_nvram = NULL,
+ .write_nvram = NULL,
+ .fw_dump = qla8044_fw_dump,
+ .beacon_on = qla82xx_beacon_on,
+ .beacon_off = qla82xx_beacon_off,
+ .beacon_blink = NULL,
+ .read_optrom = qla8044_read_optrom_data,
+ .write_optrom = qla8044_write_optrom_data,
+ .get_flash_version = qla82xx_get_flash_version,
+ .start_scsi = qla82xx_start_scsi,
+ .abort_isp = qla8044_abort_isp,
+ .iospace_config = qla82xx_iospace_config,
+ .initialize_adapter = qla2x00_initialize_adapter,
+};
+
+static struct isp_operations qla83xx_isp_ops = {
+ .pci_config = qla25xx_pci_config,
+ .reset_chip = qla24xx_reset_chip,
+ .chip_diag = qla24xx_chip_diag,
+ .config_rings = qla24xx_config_rings,
+ .reset_adapter = qla24xx_reset_adapter,
+ .nvram_config = qla81xx_nvram_config,
+ .update_fw_options = qla81xx_update_fw_options,
+ .load_risc = qla81xx_load_risc,
+ .pci_info_str = qla24xx_pci_info_str,
+ .fw_version_str = qla24xx_fw_version_str,
+ .intr_handler = qla24xx_intr_handler,
+ .enable_intrs = qla24xx_enable_intrs,
+ .disable_intrs = qla24xx_disable_intrs,
+ .abort_command = qla24xx_abort_command,
+ .target_reset = qla24xx_abort_target,
+ .lun_reset = qla24xx_lun_reset,
+ .fabric_login = qla24xx_login_fabric,
+ .fabric_logout = qla24xx_fabric_logout,
+ .calc_req_entries = NULL,
+ .build_iocbs = NULL,
+ .prep_ms_iocb = qla24xx_prep_ms_iocb,
+ .prep_ms_fdmi_iocb = qla24xx_prep_ms_fdmi_iocb,
+ .read_nvram = NULL,
+ .write_nvram = NULL,
+ .fw_dump = qla83xx_fw_dump,
+ .beacon_on = qla24xx_beacon_on,
+ .beacon_off = qla24xx_beacon_off,
+ .beacon_blink = qla83xx_beacon_blink,
+ .read_optrom = qla25xx_read_optrom_data,
+ .write_optrom = qla24xx_write_optrom_data,
+ .get_flash_version = qla24xx_get_flash_version,
+ .start_scsi = qla24xx_dif_start_scsi,
+ .abort_isp = qla2x00_abort_isp,
+ .iospace_config = qla83xx_iospace_config,
+ .initialize_adapter = qla2x00_initialize_adapter,
+};
+
+static struct isp_operations qlafx00_isp_ops = {
+ .pci_config = qlafx00_pci_config,
+ .reset_chip = qlafx00_soft_reset,
+ .chip_diag = qlafx00_chip_diag,
+ .config_rings = qlafx00_config_rings,
+ .reset_adapter = qlafx00_soft_reset,
+ .nvram_config = NULL,
+ .update_fw_options = NULL,
+ .load_risc = NULL,
+ .pci_info_str = qlafx00_pci_info_str,
+ .fw_version_str = qlafx00_fw_version_str,
+ .intr_handler = qlafx00_intr_handler,
+ .enable_intrs = qlafx00_enable_intrs,
+ .disable_intrs = qlafx00_disable_intrs,
+ .abort_command = qla24xx_async_abort_command,
+ .target_reset = qlafx00_abort_target,
+ .lun_reset = qlafx00_lun_reset,
+ .fabric_login = NULL,
+ .fabric_logout = NULL,
+ .calc_req_entries = NULL,
+ .build_iocbs = NULL,
+ .prep_ms_iocb = qla24xx_prep_ms_iocb,
+ .prep_ms_fdmi_iocb = qla24xx_prep_ms_fdmi_iocb,
+ .read_nvram = qla24xx_read_nvram_data,
+ .write_nvram = qla24xx_write_nvram_data,
+ .fw_dump = NULL,
+ .beacon_on = qla24xx_beacon_on,
+ .beacon_off = qla24xx_beacon_off,
+ .beacon_blink = NULL,
+ .read_optrom = qla24xx_read_optrom_data,
+ .write_optrom = qla24xx_write_optrom_data,
+ .get_flash_version = qla24xx_get_flash_version,
+ .start_scsi = qlafx00_start_scsi,
+ .abort_isp = qlafx00_abort_isp,
+ .iospace_config = qlafx00_iospace_config,
+ .initialize_adapter = qlafx00_initialize_adapter,
+};
+
+static struct isp_operations qla27xx_isp_ops = {
+ .pci_config = qla25xx_pci_config,
+ .reset_chip = qla24xx_reset_chip,
+ .chip_diag = qla24xx_chip_diag,
+ .config_rings = qla24xx_config_rings,
+ .reset_adapter = qla24xx_reset_adapter,
+ .nvram_config = qla81xx_nvram_config,
+ .update_fw_options = qla81xx_update_fw_options,
+ .load_risc = qla81xx_load_risc,
+ .pci_info_str = qla24xx_pci_info_str,
+ .fw_version_str = qla24xx_fw_version_str,
+ .intr_handler = qla24xx_intr_handler,
+ .enable_intrs = qla24xx_enable_intrs,
+ .disable_intrs = qla24xx_disable_intrs,
+ .abort_command = qla24xx_abort_command,
+ .target_reset = qla24xx_abort_target,
+ .lun_reset = qla24xx_lun_reset,
+ .fabric_login = qla24xx_login_fabric,
+ .fabric_logout = qla24xx_fabric_logout,
+ .calc_req_entries = NULL,
+ .build_iocbs = NULL,
+ .prep_ms_iocb = qla24xx_prep_ms_iocb,
+ .prep_ms_fdmi_iocb = qla24xx_prep_ms_fdmi_iocb,
+ .read_nvram = NULL,
+ .write_nvram = NULL,
+ .fw_dump = qla27xx_fwdump,
+ .beacon_on = qla24xx_beacon_on,
+ .beacon_off = qla24xx_beacon_off,
+ .beacon_blink = qla83xx_beacon_blink,
+ .read_optrom = qla25xx_read_optrom_data,
+ .write_optrom = qla24xx_write_optrom_data,
+ .get_flash_version = qla24xx_get_flash_version,
+ .start_scsi = qla24xx_dif_start_scsi,
+ .abort_isp = qla2x00_abort_isp,
+ .iospace_config = qla83xx_iospace_config,
+ .initialize_adapter = qla2x00_initialize_adapter,
+};
+
+static inline void
+qla2x00_set_isp_flags(struct qla_hw_data *ha)
+{
+ ha->device_type = DT_EXTENDED_IDS;
+ switch (ha->pdev->device) {
+ case PCI_DEVICE_ID_QLOGIC_ISP2100:
+ ha->device_type |= DT_ISP2100;
+ ha->device_type &= ~DT_EXTENDED_IDS;
+ ha->fw_srisc_address = RISC_START_ADDRESS_2100;
+ break;
+ case PCI_DEVICE_ID_QLOGIC_ISP2200:
+ ha->device_type |= DT_ISP2200;
+ ha->device_type &= ~DT_EXTENDED_IDS;
+ ha->fw_srisc_address = RISC_START_ADDRESS_2100;
+ break;
+ case PCI_DEVICE_ID_QLOGIC_ISP2300:
+ ha->device_type |= DT_ISP2300;
+ ha->device_type |= DT_ZIO_SUPPORTED;
+ ha->fw_srisc_address = RISC_START_ADDRESS_2300;
+ break;
+ case PCI_DEVICE_ID_QLOGIC_ISP2312:
+ ha->device_type |= DT_ISP2312;
+ ha->device_type |= DT_ZIO_SUPPORTED;
+ ha->fw_srisc_address = RISC_START_ADDRESS_2300;
+ break;
+ case PCI_DEVICE_ID_QLOGIC_ISP2322:
+ ha->device_type |= DT_ISP2322;
+ ha->device_type |= DT_ZIO_SUPPORTED;
+ if (ha->pdev->subsystem_vendor == 0x1028 &&
+ ha->pdev->subsystem_device == 0x0170)
+ ha->device_type |= DT_OEM_001;
+ ha->fw_srisc_address = RISC_START_ADDRESS_2300;
+ break;
+ case PCI_DEVICE_ID_QLOGIC_ISP6312:
+ ha->device_type |= DT_ISP6312;
+ ha->fw_srisc_address = RISC_START_ADDRESS_2300;
+ break;
+ case PCI_DEVICE_ID_QLOGIC_ISP6322:
+ ha->device_type |= DT_ISP6322;
+ ha->fw_srisc_address = RISC_START_ADDRESS_2300;
+ break;
+ case PCI_DEVICE_ID_QLOGIC_ISP2422:
+ ha->device_type |= DT_ISP2422;
+ ha->device_type |= DT_ZIO_SUPPORTED;
+ ha->device_type |= DT_FWI2;
+ ha->device_type |= DT_IIDMA;
+ ha->fw_srisc_address = RISC_START_ADDRESS_2400;
+ break;
+ case PCI_DEVICE_ID_QLOGIC_ISP2432:
+ ha->device_type |= DT_ISP2432;
+ ha->device_type |= DT_ZIO_SUPPORTED;
+ ha->device_type |= DT_FWI2;
+ ha->device_type |= DT_IIDMA;
+ ha->fw_srisc_address = RISC_START_ADDRESS_2400;
+ break;
+ case PCI_DEVICE_ID_QLOGIC_ISP8432:
+ ha->device_type |= DT_ISP8432;
+ ha->device_type |= DT_ZIO_SUPPORTED;
+ ha->device_type |= DT_FWI2;
+ ha->device_type |= DT_IIDMA;
+ ha->fw_srisc_address = RISC_START_ADDRESS_2400;
+ break;
+ case PCI_DEVICE_ID_QLOGIC_ISP5422:
+ ha->device_type |= DT_ISP5422;
+ ha->device_type |= DT_FWI2;
+ ha->fw_srisc_address = RISC_START_ADDRESS_2400;
+ break;
+ case PCI_DEVICE_ID_QLOGIC_ISP5432:
+ ha->device_type |= DT_ISP5432;
+ ha->device_type |= DT_FWI2;
+ ha->fw_srisc_address = RISC_START_ADDRESS_2400;
+ break;
+ case PCI_DEVICE_ID_QLOGIC_ISP2532:
+ ha->device_type |= DT_ISP2532;
+ ha->device_type |= DT_ZIO_SUPPORTED;
+ ha->device_type |= DT_FWI2;
+ ha->device_type |= DT_IIDMA;
+ ha->fw_srisc_address = RISC_START_ADDRESS_2400;
+ break;
+ case PCI_DEVICE_ID_QLOGIC_ISP8001:
+ ha->device_type |= DT_ISP8001;
+ ha->device_type |= DT_ZIO_SUPPORTED;
+ ha->device_type |= DT_FWI2;
+ ha->device_type |= DT_IIDMA;
+ ha->fw_srisc_address = RISC_START_ADDRESS_2400;
+ break;
+ case PCI_DEVICE_ID_QLOGIC_ISP8021:
+ ha->device_type |= DT_ISP8021;
+ ha->device_type |= DT_ZIO_SUPPORTED;
+ ha->device_type |= DT_FWI2;
+ ha->fw_srisc_address = RISC_START_ADDRESS_2400;
+ /* Initialize 82XX ISP flags */
+ qla82xx_init_flags(ha);
+ break;
+ case PCI_DEVICE_ID_QLOGIC_ISP8044:
+ ha->device_type |= DT_ISP8044;
+ ha->device_type |= DT_ZIO_SUPPORTED;
+ ha->device_type |= DT_FWI2;
+ ha->fw_srisc_address = RISC_START_ADDRESS_2400;
+ /* Initialize 82XX ISP flags */
+ qla82xx_init_flags(ha);
+ break;
+ case PCI_DEVICE_ID_QLOGIC_ISP2031:
+ ha->device_type |= DT_ISP2031;
+ ha->device_type |= DT_ZIO_SUPPORTED;
+ ha->device_type |= DT_FWI2;
+ ha->device_type |= DT_IIDMA;
+ ha->device_type |= DT_T10_PI;
+ ha->fw_srisc_address = RISC_START_ADDRESS_2400;
+ break;
+ case PCI_DEVICE_ID_QLOGIC_ISP8031:
+ ha->device_type |= DT_ISP8031;
+ ha->device_type |= DT_ZIO_SUPPORTED;
+ ha->device_type |= DT_FWI2;
+ ha->device_type |= DT_IIDMA;
+ ha->device_type |= DT_T10_PI;
+ ha->fw_srisc_address = RISC_START_ADDRESS_2400;
+ break;
+ case PCI_DEVICE_ID_QLOGIC_ISPF001:
+ ha->device_type |= DT_ISPFX00;
+ break;
+ case PCI_DEVICE_ID_QLOGIC_ISP2071:
+ ha->device_type |= DT_ISP2071;
+ ha->device_type |= DT_ZIO_SUPPORTED;
+ ha->device_type |= DT_FWI2;
+ ha->device_type |= DT_IIDMA;
+ ha->fw_srisc_address = RISC_START_ADDRESS_2400;
+ break;
+ case PCI_DEVICE_ID_QLOGIC_ISP2271:
+ ha->device_type |= DT_ISP2271;
+ ha->device_type |= DT_ZIO_SUPPORTED;
+ ha->device_type |= DT_FWI2;
+ ha->device_type |= DT_IIDMA;
+ ha->fw_srisc_address = RISC_START_ADDRESS_2400;
+ break;
+ }
+
+ if (IS_QLA82XX(ha))
+ ha->port_no = ha->portnum & 1;
+ else {
+ /* Get adapter physical port no from interrupt pin register. */
+ pci_read_config_byte(ha->pdev, PCI_INTERRUPT_PIN, &ha->port_no);
+ if (IS_QLA27XX(ha))
+ ha->port_no--;
+ else
+ ha->port_no = !(ha->port_no & 1);
+ }
+
+ ql_dbg_pci(ql_dbg_init, ha->pdev, 0x000b,
+ "device_type=0x%x port=%d fw_srisc_address=0x%x.\n",
+ ha->device_type, ha->port_no, ha->fw_srisc_address);
+}
+
+static void
+qla2xxx_scan_start(struct Scsi_Host *shost)
+{
+ scsi_qla_host_t *vha = shost_priv(shost);
+
+ if (vha->hw->flags.running_gold_fw)
+ return;
+
+ set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags);
+ set_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags);
+ set_bit(RSCN_UPDATE, &vha->dpc_flags);
+ set_bit(NPIV_CONFIG_NEEDED, &vha->dpc_flags);
+}
+
+static int
+qla2xxx_scan_finished(struct Scsi_Host *shost, unsigned long time)
+{
+ scsi_qla_host_t *vha = shost_priv(shost);
+
+ if (!vha->host)
+ return 1;
+ if (time > vha->hw->loop_reset_delay * HZ)
+ return 1;
+
+ return atomic_read(&vha->loop_state) == LOOP_READY;
+}
+
+/*
+ * PCI driver interface
+ */
+static int
+qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
+{
+ int ret = -ENODEV;
+ struct Scsi_Host *host;
+ scsi_qla_host_t *base_vha = NULL;
+ struct qla_hw_data *ha;
+ char pci_info[30];
+ char fw_str[30], wq_name[30];
+ struct scsi_host_template *sht;
+ int bars, mem_only = 0;
+ uint16_t req_length = 0, rsp_length = 0;
+ struct req_que *req = NULL;
+ struct rsp_que *rsp = NULL;
+ bars = pci_select_bars(pdev, IORESOURCE_MEM | IORESOURCE_IO);
+ sht = &qla2xxx_driver_template;
+ if (pdev->device == PCI_DEVICE_ID_QLOGIC_ISP2422 ||
+ pdev->device == PCI_DEVICE_ID_QLOGIC_ISP2432 ||
+ pdev->device == PCI_DEVICE_ID_QLOGIC_ISP8432 ||
+ pdev->device == PCI_DEVICE_ID_QLOGIC_ISP5422 ||
+ pdev->device == PCI_DEVICE_ID_QLOGIC_ISP5432 ||
+ pdev->device == PCI_DEVICE_ID_QLOGIC_ISP2532 ||
+ pdev->device == PCI_DEVICE_ID_QLOGIC_ISP8001 ||
+ pdev->device == PCI_DEVICE_ID_QLOGIC_ISP8021 ||
+ pdev->device == PCI_DEVICE_ID_QLOGIC_ISP2031 ||
+ pdev->device == PCI_DEVICE_ID_QLOGIC_ISP8031 ||
+ pdev->device == PCI_DEVICE_ID_QLOGIC_ISPF001 ||
+ pdev->device == PCI_DEVICE_ID_QLOGIC_ISP8044 ||
+ pdev->device == PCI_DEVICE_ID_QLOGIC_ISP2071 ||
+ pdev->device == PCI_DEVICE_ID_QLOGIC_ISP2271) {
+ bars = pci_select_bars(pdev, IORESOURCE_MEM);
+ mem_only = 1;
+ ql_dbg_pci(ql_dbg_init, pdev, 0x0007,
+ "Mem only adapter.\n");
+ }
+ ql_dbg_pci(ql_dbg_init, pdev, 0x0008,
+ "Bars=%d.\n", bars);
+
+ if (mem_only) {
+ if (pci_enable_device_mem(pdev))
+ goto probe_out;
+ } else {
+ if (pci_enable_device(pdev))
+ goto probe_out;
+ }
+
+ /* This may fail but that's ok */
+ pci_enable_pcie_error_reporting(pdev);
+
+ ha = kzalloc(sizeof(struct qla_hw_data), GFP_KERNEL);
+ if (!ha) {
+ ql_log_pci(ql_log_fatal, pdev, 0x0009,
+ "Unable to allocate memory for ha.\n");
+ goto probe_out;
+ }
+ ql_dbg_pci(ql_dbg_init, pdev, 0x000a,
+ "Memory allocated for ha=%p.\n", ha);
+ ha->pdev = pdev;
+ ha->tgt.enable_class_2 = ql2xenableclass2;
+ INIT_LIST_HEAD(&ha->tgt.q_full_list);
+ spin_lock_init(&ha->tgt.q_full_lock);
+
+ /* Clear our data area */
+ ha->bars = bars;
+ ha->mem_only = mem_only;
+ spin_lock_init(&ha->hardware_lock);
+ spin_lock_init(&ha->vport_slock);
+ mutex_init(&ha->selflogin_lock);
+ mutex_init(&ha->optrom_mutex);
+
+ /* Set ISP-type information. */
+ qla2x00_set_isp_flags(ha);
+
+ /* Set EEH reset type to fundamental if required by hba */
+ if (IS_QLA24XX(ha) || IS_QLA25XX(ha) || IS_QLA81XX(ha) ||
+ IS_QLA83XX(ha) || IS_QLA27XX(ha))
+ pdev->needs_freset = 1;
+
+ ha->prev_topology = 0;
+ ha->init_cb_size = sizeof(init_cb_t);
+ ha->link_data_rate = PORT_SPEED_UNKNOWN;
+ ha->optrom_size = OPTROM_SIZE_2300;
+
+ /* Assign ISP specific operations. */
+ if (IS_QLA2100(ha)) {
+ ha->max_fibre_devices = MAX_FIBRE_DEVICES_2100;
+ ha->mbx_count = MAILBOX_REGISTER_COUNT_2100;
+ req_length = REQUEST_ENTRY_CNT_2100;
+ rsp_length = RESPONSE_ENTRY_CNT_2100;
+ ha->max_loop_id = SNS_LAST_LOOP_ID_2100;
+ ha->gid_list_info_size = 4;
+ ha->flash_conf_off = ~0;
+ ha->flash_data_off = ~0;
+ ha->nvram_conf_off = ~0;
+ ha->nvram_data_off = ~0;
+ ha->isp_ops = &qla2100_isp_ops;
+ } else if (IS_QLA2200(ha)) {
+ ha->max_fibre_devices = MAX_FIBRE_DEVICES_2100;
+ ha->mbx_count = MAILBOX_REGISTER_COUNT_2200;
+ req_length = REQUEST_ENTRY_CNT_2200;
+ rsp_length = RESPONSE_ENTRY_CNT_2100;
+ ha->max_loop_id = SNS_LAST_LOOP_ID_2100;
+ ha->gid_list_info_size = 4;
+ ha->flash_conf_off = ~0;
+ ha->flash_data_off = ~0;
+ ha->nvram_conf_off = ~0;
+ ha->nvram_data_off = ~0;
+ ha->isp_ops = &qla2100_isp_ops;
+ } else if (IS_QLA23XX(ha)) {
+ ha->max_fibre_devices = MAX_FIBRE_DEVICES_2100;
+ ha->mbx_count = MAILBOX_REGISTER_COUNT;
+ req_length = REQUEST_ENTRY_CNT_2200;
+ rsp_length = RESPONSE_ENTRY_CNT_2300;
+ ha->max_loop_id = SNS_LAST_LOOP_ID_2300;
+ ha->gid_list_info_size = 6;
+ if (IS_QLA2322(ha) || IS_QLA6322(ha))
+ ha->optrom_size = OPTROM_SIZE_2322;
+ ha->flash_conf_off = ~0;
+ ha->flash_data_off = ~0;
+ ha->nvram_conf_off = ~0;
+ ha->nvram_data_off = ~0;
+ ha->isp_ops = &qla2300_isp_ops;
+ } else if (IS_QLA24XX_TYPE(ha)) {
+ ha->max_fibre_devices = MAX_FIBRE_DEVICES_2400;
+ ha->mbx_count = MAILBOX_REGISTER_COUNT;
+ req_length = REQUEST_ENTRY_CNT_24XX;
+ rsp_length = RESPONSE_ENTRY_CNT_2300;
+ ha->tgt.atio_q_length = ATIO_ENTRY_CNT_24XX;
+ ha->max_loop_id = SNS_LAST_LOOP_ID_2300;
+ ha->init_cb_size = sizeof(struct mid_init_cb_24xx);
+ ha->gid_list_info_size = 8;
+ ha->optrom_size = OPTROM_SIZE_24XX;
+ ha->nvram_npiv_size = QLA_MAX_VPORTS_QLA24XX;
+ ha->isp_ops = &qla24xx_isp_ops;
+ ha->flash_conf_off = FARX_ACCESS_FLASH_CONF;
+ ha->flash_data_off = FARX_ACCESS_FLASH_DATA;
+ ha->nvram_conf_off = FARX_ACCESS_NVRAM_CONF;
+ ha->nvram_data_off = FARX_ACCESS_NVRAM_DATA;
+ } else if (IS_QLA25XX(ha)) {
+ ha->max_fibre_devices = MAX_FIBRE_DEVICES_2400;
+ ha->mbx_count = MAILBOX_REGISTER_COUNT;
+ req_length = REQUEST_ENTRY_CNT_24XX;
+ rsp_length = RESPONSE_ENTRY_CNT_2300;
+ ha->tgt.atio_q_length = ATIO_ENTRY_CNT_24XX;
+ ha->max_loop_id = SNS_LAST_LOOP_ID_2300;
+ ha->init_cb_size = sizeof(struct mid_init_cb_24xx);
+ ha->gid_list_info_size = 8;
+ ha->optrom_size = OPTROM_SIZE_25XX;
+ ha->nvram_npiv_size = QLA_MAX_VPORTS_QLA25XX;
+ ha->isp_ops = &qla25xx_isp_ops;
+ ha->flash_conf_off = FARX_ACCESS_FLASH_CONF;
+ ha->flash_data_off = FARX_ACCESS_FLASH_DATA;
+ ha->nvram_conf_off = FARX_ACCESS_NVRAM_CONF;
+ ha->nvram_data_off = FARX_ACCESS_NVRAM_DATA;
+ } else if (IS_QLA81XX(ha)) {
+ ha->max_fibre_devices = MAX_FIBRE_DEVICES_2400;
+ ha->mbx_count = MAILBOX_REGISTER_COUNT;
+ req_length = REQUEST_ENTRY_CNT_24XX;
+ rsp_length = RESPONSE_ENTRY_CNT_2300;
+ ha->tgt.atio_q_length = ATIO_ENTRY_CNT_24XX;
+ ha->max_loop_id = SNS_LAST_LOOP_ID_2300;
+ ha->init_cb_size = sizeof(struct mid_init_cb_81xx);
+ ha->gid_list_info_size = 8;
+ ha->optrom_size = OPTROM_SIZE_81XX;
+ ha->nvram_npiv_size = QLA_MAX_VPORTS_QLA25XX;
+ ha->isp_ops = &qla81xx_isp_ops;
+ ha->flash_conf_off = FARX_ACCESS_FLASH_CONF_81XX;
+ ha->flash_data_off = FARX_ACCESS_FLASH_DATA_81XX;
+ ha->nvram_conf_off = ~0;
+ ha->nvram_data_off = ~0;
+ } else if (IS_QLA82XX(ha)) {
+ ha->max_fibre_devices = MAX_FIBRE_DEVICES_2400;
+ ha->mbx_count = MAILBOX_REGISTER_COUNT;
+ req_length = REQUEST_ENTRY_CNT_82XX;
+ rsp_length = RESPONSE_ENTRY_CNT_82XX;
+ ha->max_loop_id = SNS_LAST_LOOP_ID_2300;
+ ha->init_cb_size = sizeof(struct mid_init_cb_81xx);
+ ha->gid_list_info_size = 8;
+ ha->optrom_size = OPTROM_SIZE_82XX;
+ ha->nvram_npiv_size = QLA_MAX_VPORTS_QLA25XX;
+ ha->isp_ops = &qla82xx_isp_ops;
+ ha->flash_conf_off = FARX_ACCESS_FLASH_CONF;
+ ha->flash_data_off = FARX_ACCESS_FLASH_DATA;
+ ha->nvram_conf_off = FARX_ACCESS_NVRAM_CONF;
+ ha->nvram_data_off = FARX_ACCESS_NVRAM_DATA;
+ } else if (IS_QLA8044(ha)) {
+ ha->max_fibre_devices = MAX_FIBRE_DEVICES_2400;
+ ha->mbx_count = MAILBOX_REGISTER_COUNT;
+ req_length = REQUEST_ENTRY_CNT_82XX;
+ rsp_length = RESPONSE_ENTRY_CNT_82XX;
+ ha->max_loop_id = SNS_LAST_LOOP_ID_2300;
+ ha->init_cb_size = sizeof(struct mid_init_cb_81xx);
+ ha->gid_list_info_size = 8;
+ ha->optrom_size = OPTROM_SIZE_83XX;
+ ha->nvram_npiv_size = QLA_MAX_VPORTS_QLA25XX;
+ ha->isp_ops = &qla8044_isp_ops;
+ ha->flash_conf_off = FARX_ACCESS_FLASH_CONF;
+ ha->flash_data_off = FARX_ACCESS_FLASH_DATA;
+ ha->nvram_conf_off = FARX_ACCESS_NVRAM_CONF;
+ ha->nvram_data_off = FARX_ACCESS_NVRAM_DATA;
+ } else if (IS_QLA83XX(ha)) {
+ ha->portnum = PCI_FUNC(ha->pdev->devfn);
+ ha->max_fibre_devices = MAX_FIBRE_DEVICES_2400;
+ ha->mbx_count = MAILBOX_REGISTER_COUNT;
+ req_length = REQUEST_ENTRY_CNT_83XX;
+ rsp_length = RESPONSE_ENTRY_CNT_2300;
+ ha->tgt.atio_q_length = ATIO_ENTRY_CNT_24XX;
+ ha->max_loop_id = SNS_LAST_LOOP_ID_2300;
+ ha->init_cb_size = sizeof(struct mid_init_cb_81xx);
+ ha->gid_list_info_size = 8;
+ ha->optrom_size = OPTROM_SIZE_83XX;
+ ha->nvram_npiv_size = QLA_MAX_VPORTS_QLA25XX;
+ ha->isp_ops = &qla83xx_isp_ops;
+ ha->flash_conf_off = FARX_ACCESS_FLASH_CONF_81XX;
+ ha->flash_data_off = FARX_ACCESS_FLASH_DATA_81XX;
+ ha->nvram_conf_off = ~0;
+ ha->nvram_data_off = ~0;
+ } else if (IS_QLAFX00(ha)) {
+ ha->max_fibre_devices = MAX_FIBRE_DEVICES_FX00;
+ ha->mbx_count = MAILBOX_REGISTER_COUNT_FX00;
+ ha->aen_mbx_count = AEN_MAILBOX_REGISTER_COUNT_FX00;
+ req_length = REQUEST_ENTRY_CNT_FX00;
+ rsp_length = RESPONSE_ENTRY_CNT_FX00;
+ ha->isp_ops = &qlafx00_isp_ops;
+ ha->port_down_retry_count = 30; /* default value */
+ ha->mr.fw_hbt_cnt = QLAFX00_HEARTBEAT_INTERVAL;
+ ha->mr.fw_reset_timer_tick = QLAFX00_RESET_INTERVAL;
+ ha->mr.fw_critemp_timer_tick = QLAFX00_CRITEMP_INTERVAL;
+ ha->mr.fw_hbt_en = 1;
+ ha->mr.host_info_resend = false;
+ ha->mr.hinfo_resend_timer_tick = QLAFX00_HINFO_RESEND_INTERVAL;
+ } else if (IS_QLA27XX(ha)) {
+ ha->portnum = PCI_FUNC(ha->pdev->devfn);
+ ha->max_fibre_devices = MAX_FIBRE_DEVICES_2400;
+ ha->mbx_count = MAILBOX_REGISTER_COUNT;
+ req_length = REQUEST_ENTRY_CNT_24XX;
+ rsp_length = RESPONSE_ENTRY_CNT_2300;
+ ha->max_loop_id = SNS_LAST_LOOP_ID_2300;
+ ha->init_cb_size = sizeof(struct mid_init_cb_81xx);
+ ha->gid_list_info_size = 8;
+ ha->optrom_size = OPTROM_SIZE_83XX;
+ ha->nvram_npiv_size = QLA_MAX_VPORTS_QLA25XX;
+ ha->isp_ops = &qla27xx_isp_ops;
+ ha->flash_conf_off = FARX_ACCESS_FLASH_CONF_81XX;
+ ha->flash_data_off = FARX_ACCESS_FLASH_DATA_81XX;
+ ha->nvram_conf_off = ~0;
+ ha->nvram_data_off = ~0;
+ }
+
+ ql_dbg_pci(ql_dbg_init, pdev, 0x001e,
+ "mbx_count=%d, req_length=%d, "
+ "rsp_length=%d, max_loop_id=%d, init_cb_size=%d, "
+ "gid_list_info_size=%d, optrom_size=%d, nvram_npiv_size=%d, "
+ "max_fibre_devices=%d.\n",
+ ha->mbx_count, req_length, rsp_length, ha->max_loop_id,
+ ha->init_cb_size, ha->gid_list_info_size, ha->optrom_size,
+ ha->nvram_npiv_size, ha->max_fibre_devices);
+ ql_dbg_pci(ql_dbg_init, pdev, 0x001f,
+ "isp_ops=%p, flash_conf_off=%d, "
+ "flash_data_off=%d, nvram_conf_off=%d, nvram_data_off=%d.\n",
+ ha->isp_ops, ha->flash_conf_off, ha->flash_data_off,
+ ha->nvram_conf_off, ha->nvram_data_off);
+
+ /* Configure PCI I/O space */
+ ret = ha->isp_ops->iospace_config(ha);
+ if (ret)
+ goto iospace_config_failed;
+
+ ql_log_pci(ql_log_info, pdev, 0x001d,
+ "Found an ISP%04X irq %d iobase 0x%p.\n",
+ pdev->device, pdev->irq, ha->iobase);
+ mutex_init(&ha->vport_lock);
+ init_completion(&ha->mbx_cmd_comp);
+ complete(&ha->mbx_cmd_comp);
+ init_completion(&ha->mbx_intr_comp);
+ init_completion(&ha->dcbx_comp);
+ init_completion(&ha->lb_portup_comp);
+
+ set_bit(0, (unsigned long *) ha->vp_idx_map);
+
+ qla2x00_config_dma_addressing(ha);
+ ql_dbg_pci(ql_dbg_init, pdev, 0x0020,
+ "64 Bit addressing is %s.\n",
+ ha->flags.enable_64bit_addressing ? "enable" :
+ "disable");
+ ret = qla2x00_mem_alloc(ha, req_length, rsp_length, &req, &rsp);
+ if (ret) {
+ ql_log_pci(ql_log_fatal, pdev, 0x0031,
+ "Failed to allocate memory for adapter, aborting.\n");
+
+ goto probe_hw_failed;
+ }
+
+ req->max_q_depth = MAX_Q_DEPTH;
+ if (ql2xmaxqdepth != 0 && ql2xmaxqdepth <= 0xffffU)
+ req->max_q_depth = ql2xmaxqdepth;
+
+
+ base_vha = qla2x00_create_host(sht, ha);
+ if (!base_vha) {
+ ret = -ENOMEM;
+ qla2x00_mem_free(ha);
+ qla2x00_free_req_que(ha, req);
+ qla2x00_free_rsp_que(ha, rsp);
+ goto probe_hw_failed;
+ }
+
+ pci_set_drvdata(pdev, base_vha);
+ set_bit(PFLG_DRIVER_PROBING, &base_vha->pci_flags);
+
+ host = base_vha->host;
+ base_vha->req = req;
+ if (IS_QLA2XXX_MIDTYPE(ha))
+ base_vha->mgmt_svr_loop_id = 10 + base_vha->vp_idx;
+ else
+ base_vha->mgmt_svr_loop_id = MANAGEMENT_SERVER +
+ base_vha->vp_idx;
+
+ /* Setup fcport template structure. */
+ ha->mr.fcport.vha = base_vha;
+ ha->mr.fcport.port_type = FCT_UNKNOWN;
+ ha->mr.fcport.loop_id = FC_NO_LOOP_ID;
+ qla2x00_set_fcport_state(&ha->mr.fcport, FCS_UNCONFIGURED);
+ ha->mr.fcport.supported_classes = FC_COS_UNSPECIFIED;
+ ha->mr.fcport.scan_state = 1;
+
+ /* Set the SG table size based on ISP type */
+ if (!IS_FWI2_CAPABLE(ha)) {
+ if (IS_QLA2100(ha))
+ host->sg_tablesize = 32;
+ } else {
+ if (!IS_QLA82XX(ha))
+ host->sg_tablesize = QLA_SG_ALL;
+ }
+ host->max_id = ha->max_fibre_devices;
+ host->cmd_per_lun = 3;
+ host->unique_id = host->host_no;
+ if (IS_T10_PI_CAPABLE(ha) && ql2xenabledif)
+ host->max_cmd_len = 32;
+ else
+ host->max_cmd_len = MAX_CMDSZ;
+ host->max_channel = MAX_BUSES - 1;
+ /* Older HBAs support only 16-bit LUNs */
+ if (!IS_QLAFX00(ha) && !IS_FWI2_CAPABLE(ha) &&
+ ql2xmaxlun > 0xffff)
+ host->max_lun = 0xffff;
+ else
+ host->max_lun = ql2xmaxlun;
+ host->transportt = qla2xxx_transport_template;
+ sht->vendor_id = (SCSI_NL_VID_TYPE_PCI | PCI_VENDOR_ID_QLOGIC);
+
+ ql_dbg(ql_dbg_init, base_vha, 0x0033,
+ "max_id=%d this_id=%d "
+ "cmd_per_len=%d unique_id=%d max_cmd_len=%d max_channel=%d "
+ "max_lun=%llu transportt=%p, vendor_id=%llu.\n", host->max_id,
+ host->this_id, host->cmd_per_lun, host->unique_id,
+ host->max_cmd_len, host->max_channel, host->max_lun,
+ host->transportt, sht->vendor_id);
+
+que_init:
+ /* Alloc arrays of request and response ring ptrs */
+ if (!qla2x00_alloc_queues(ha, req, rsp)) {
+ ql_log(ql_log_fatal, base_vha, 0x003d,
+ "Failed to allocate memory for queue pointers..."
+ "aborting.\n");
+ goto probe_init_failed;
+ }
+
+ qlt_probe_one_stage1(base_vha, ha);
+
+ /* Set up the irqs */
+ ret = qla2x00_request_irqs(ha, rsp);
+ if (ret)
+ goto probe_init_failed;
+
+ pci_save_state(pdev);
+
+ /* Assign back pointers */
+ rsp->req = req;
+ req->rsp = rsp;
+
+ if (IS_QLAFX00(ha)) {
+ ha->rsp_q_map[0] = rsp;
+ ha->req_q_map[0] = req;
+ set_bit(0, ha->req_qid_map);
+ set_bit(0, ha->rsp_qid_map);
+ }
+
+ /* FWI2-capable only. */
+ req->req_q_in = &ha->iobase->isp24.req_q_in;
+ req->req_q_out = &ha->iobase->isp24.req_q_out;
+ rsp->rsp_q_in = &ha->iobase->isp24.rsp_q_in;
+ rsp->rsp_q_out = &ha->iobase->isp24.rsp_q_out;
+ if (ha->mqenable || IS_QLA83XX(ha) || IS_QLA27XX(ha)) {
+ req->req_q_in = &ha->mqiobase->isp25mq.req_q_in;
+ req->req_q_out = &ha->mqiobase->isp25mq.req_q_out;
+ rsp->rsp_q_in = &ha->mqiobase->isp25mq.rsp_q_in;
+ rsp->rsp_q_out = &ha->mqiobase->isp25mq.rsp_q_out;
+ }
+
+ if (IS_QLAFX00(ha)) {
+ req->req_q_in = &ha->iobase->ispfx00.req_q_in;
+ req->req_q_out = &ha->iobase->ispfx00.req_q_out;
+ rsp->rsp_q_in = &ha->iobase->ispfx00.rsp_q_in;
+ rsp->rsp_q_out = &ha->iobase->ispfx00.rsp_q_out;
+ }
+
+ if (IS_P3P_TYPE(ha)) {
+ req->req_q_out = &ha->iobase->isp82.req_q_out[0];
+ rsp->rsp_q_in = &ha->iobase->isp82.rsp_q_in[0];
+ rsp->rsp_q_out = &ha->iobase->isp82.rsp_q_out[0];
+ }
+
+ ql_dbg(ql_dbg_multiq, base_vha, 0xc009,
+ "rsp_q_map=%p req_q_map=%p rsp->req=%p req->rsp=%p.\n",
+ ha->rsp_q_map, ha->req_q_map, rsp->req, req->rsp);
+ ql_dbg(ql_dbg_multiq, base_vha, 0xc00a,
+ "req->req_q_in=%p req->req_q_out=%p "
+ "rsp->rsp_q_in=%p rsp->rsp_q_out=%p.\n",
+ req->req_q_in, req->req_q_out,
+ rsp->rsp_q_in, rsp->rsp_q_out);
+ ql_dbg(ql_dbg_init, base_vha, 0x003e,
+ "rsp_q_map=%p req_q_map=%p rsp->req=%p req->rsp=%p.\n",
+ ha->rsp_q_map, ha->req_q_map, rsp->req, req->rsp);
+ ql_dbg(ql_dbg_init, base_vha, 0x003f,
+ "req->req_q_in=%p req->req_q_out=%p rsp->rsp_q_in=%p rsp->rsp_q_out=%p.\n",
+ req->req_q_in, req->req_q_out, rsp->rsp_q_in, rsp->rsp_q_out);
+
+ if (ha->isp_ops->initialize_adapter(base_vha)) {
+ ql_log(ql_log_fatal, base_vha, 0x00d6,
+ "Failed to initialize adapter - Adapter flags %x.\n",
+ base_vha->device_flags);
+
+ if (IS_QLA82XX(ha)) {
+ qla82xx_idc_lock(ha);
+ qla82xx_wr_32(ha, QLA82XX_CRB_DEV_STATE,
+ QLA8XXX_DEV_FAILED);
+ qla82xx_idc_unlock(ha);
+ ql_log(ql_log_fatal, base_vha, 0x00d7,
+ "HW State: FAILED.\n");
+ } else if (IS_QLA8044(ha)) {
+ qla8044_idc_lock(ha);
+ qla8044_wr_direct(base_vha,
+ QLA8044_CRB_DEV_STATE_INDEX,
+ QLA8XXX_DEV_FAILED);
+ qla8044_idc_unlock(ha);
+ ql_log(ql_log_fatal, base_vha, 0x0150,
+ "HW State: FAILED.\n");
+ }
+
+ ret = -ENODEV;
+ goto probe_failed;
+ }
+
+ if (IS_QLAFX00(ha))
+ host->can_queue = QLAFX00_MAX_CANQUEUE;
+ else
+ host->can_queue = req->num_outstanding_cmds - 10;
+
+ ql_dbg(ql_dbg_init, base_vha, 0x0032,
+ "can_queue=%d, req=%p, mgmt_svr_loop_id=%d, sg_tablesize=%d.\n",
+ host->can_queue, base_vha->req,
+ base_vha->mgmt_svr_loop_id, host->sg_tablesize);
+
+ if (ha->mqenable) {
+ if (qla25xx_setup_mode(base_vha)) {
+ ql_log(ql_log_warn, base_vha, 0x00ec,
+ "Failed to create queues, falling back to single queue mode.\n");
+ goto que_init;
+ }
+ }
+
+ if (ha->flags.running_gold_fw)
+ goto skip_dpc;
+
+ /*
+ * Startup the kernel thread for this host adapter
+ */
+ ha->dpc_thread = kthread_create(qla2x00_do_dpc, ha,
+ "%s_dpc", base_vha->host_str);
+ if (IS_ERR(ha->dpc_thread)) {
+ ql_log(ql_log_fatal, base_vha, 0x00ed,
+ "Failed to start DPC thread.\n");
+ ret = PTR_ERR(ha->dpc_thread);
+ goto probe_failed;
+ }
+ ql_dbg(ql_dbg_init, base_vha, 0x00ee,
+ "DPC thread started successfully.\n");
+
+ /*
+ * If we're not coming up in initiator mode, we might sit for
+ * a while without waking up the dpc thread, which leads to a
+ * stuck process warning. So just kick the dpc once here and
+ * let the kthread start (and go back to sleep in qla2x00_do_dpc).
+ */
+ qla2xxx_wake_dpc(base_vha);
+
+ INIT_WORK(&ha->board_disable, qla2x00_disable_board_on_pci_error);
+
+ if (IS_QLA8031(ha) || IS_MCTP_CAPABLE(ha)) {
+ sprintf(wq_name, "qla2xxx_%lu_dpc_lp_wq", base_vha->host_no);
+ ha->dpc_lp_wq = create_singlethread_workqueue(wq_name);
+ INIT_WORK(&ha->idc_aen, qla83xx_service_idc_aen);
+
+ sprintf(wq_name, "qla2xxx_%lu_dpc_hp_wq", base_vha->host_no);
+ ha->dpc_hp_wq = create_singlethread_workqueue(wq_name);
+ INIT_WORK(&ha->nic_core_reset, qla83xx_nic_core_reset_work);
+ INIT_WORK(&ha->idc_state_handler,
+ qla83xx_idc_state_handler_work);
+ INIT_WORK(&ha->nic_core_unrecoverable,
+ qla83xx_nic_core_unrecoverable_work);
+ }
+
+skip_dpc:
+ list_add_tail(&base_vha->list, &ha->vp_list);
+ base_vha->host->irq = ha->pdev->irq;
+
+ /* Initialized the timer */
+ qla2x00_start_timer(base_vha, qla2x00_timer, WATCH_INTERVAL);
+ ql_dbg(ql_dbg_init, base_vha, 0x00ef,
+ "Started qla2x00_timer with "
+ "interval=%d.\n", WATCH_INTERVAL);
+ ql_dbg(ql_dbg_init, base_vha, 0x00f0,
+ "Detected hba at address=%p.\n",
+ ha);
+
+ if (IS_T10_PI_CAPABLE(ha) && ql2xenabledif) {
+ if (ha->fw_attributes & BIT_4) {
+ int prot = 0, guard;
+ base_vha->flags.difdix_supported = 1;
+ ql_dbg(ql_dbg_init, base_vha, 0x00f1,
+ "Registering for DIF/DIX type 1 and 3 protection.\n");
+ if (ql2xenabledif == 1)
+ prot = SHOST_DIX_TYPE0_PROTECTION;
+ scsi_host_set_prot(host,
+ prot | SHOST_DIF_TYPE1_PROTECTION
+ | SHOST_DIF_TYPE2_PROTECTION
+ | SHOST_DIF_TYPE3_PROTECTION
+ | SHOST_DIX_TYPE1_PROTECTION
+ | SHOST_DIX_TYPE2_PROTECTION
+ | SHOST_DIX_TYPE3_PROTECTION);
+
+ guard = SHOST_DIX_GUARD_CRC;
+
+ if (IS_PI_IPGUARD_CAPABLE(ha) &&
+ (ql2xenabledif > 1 || IS_PI_DIFB_DIX0_CAPABLE(ha)))
+ guard |= SHOST_DIX_GUARD_IP;
+
+ scsi_host_set_guard(host, guard);
+ } else
+ base_vha->flags.difdix_supported = 0;
+ }
+
+ ha->isp_ops->enable_intrs(ha);
+
+ if (IS_QLAFX00(ha)) {
+ ret = qlafx00_fx_disc(base_vha,
+ &base_vha->hw->mr.fcport, FXDISC_GET_CONFIG_INFO);
+ host->sg_tablesize = (ha->mr.extended_io_enabled) ?
+ QLA_SG_ALL : 128;
+ }
+
+ ret = scsi_add_host(host, &pdev->dev);
+ if (ret)
+ goto probe_failed;
+
+ base_vha->flags.init_done = 1;
+ base_vha->flags.online = 1;
+ ha->prev_minidump_failed = 0;
+
+ ql_dbg(ql_dbg_init, base_vha, 0x00f2,
+ "Init done and hba is online.\n");
+
+ if (qla_ini_mode_enabled(base_vha))
+ scsi_scan_host(host);
+ else
+ ql_dbg(ql_dbg_init, base_vha, 0x0122,
+ "skipping scsi_scan_host() for non-initiator port\n");
+
+ qla2x00_alloc_sysfs_attr(base_vha);
+
+ if (IS_QLAFX00(ha)) {
+ ret = qlafx00_fx_disc(base_vha,
+ &base_vha->hw->mr.fcport, FXDISC_GET_PORT_INFO);
+
+ /* Register system information */
+ ret = qlafx00_fx_disc(base_vha,
+ &base_vha->hw->mr.fcport, FXDISC_REG_HOST_INFO);
+ }
+
+ qla2x00_init_host_attr(base_vha);
+
+ qla2x00_dfs_setup(base_vha);
+
+ ql_log(ql_log_info, base_vha, 0x00fb,
+ "QLogic %s - %s.\n", ha->model_number, ha->model_desc);
+ ql_log(ql_log_info, base_vha, 0x00fc,
+ "ISP%04X: %s @ %s hdma%c host#=%ld fw=%s.\n",
+ pdev->device, ha->isp_ops->pci_info_str(base_vha, pci_info),
+ pci_name(pdev), ha->flags.enable_64bit_addressing ? '+' : '-',
+ base_vha->host_no,
+ ha->isp_ops->fw_version_str(base_vha, fw_str, sizeof(fw_str)));
+
+ qlt_add_target(ha, base_vha);
+
+ clear_bit(PFLG_DRIVER_PROBING, &base_vha->pci_flags);
+ return 0;
+
+probe_init_failed:
+ qla2x00_free_req_que(ha, req);
+ ha->req_q_map[0] = NULL;
+ clear_bit(0, ha->req_qid_map);
+ qla2x00_free_rsp_que(ha, rsp);
+ ha->rsp_q_map[0] = NULL;
+ clear_bit(0, ha->rsp_qid_map);
+ ha->max_req_queues = ha->max_rsp_queues = 0;
+
+probe_failed:
+ if (base_vha->timer_active)
+ qla2x00_stop_timer(base_vha);
+ base_vha->flags.online = 0;
+ if (ha->dpc_thread) {
+ struct task_struct *t = ha->dpc_thread;
+
+ ha->dpc_thread = NULL;
+ kthread_stop(t);
+ }
+
+ qla2x00_free_device(base_vha);
+
+ scsi_host_put(base_vha->host);
+
+probe_hw_failed:
+ qla2x00_clear_drv_active(ha);
+
+iospace_config_failed:
+ if (IS_P3P_TYPE(ha)) {
+ if (!ha->nx_pcibase)
+ iounmap((device_reg_t *)ha->nx_pcibase);
+ if (!ql2xdbwr)
+ iounmap((device_reg_t *)ha->nxdb_wr_ptr);
+ } else {
+ if (ha->iobase)
+ iounmap(ha->iobase);
+ if (ha->cregbase)
+ iounmap(ha->cregbase);
+ }
+ pci_release_selected_regions(ha->pdev, ha->bars);
+ kfree(ha);
+ ha = NULL;
+
+probe_out:
+ pci_disable_device(pdev);
+ return ret;
+}
+
+static void
+qla2x00_shutdown(struct pci_dev *pdev)
+{
+ scsi_qla_host_t *vha;
+ struct qla_hw_data *ha;
+
+ if (!atomic_read(&pdev->enable_cnt))
+ return;
+
+ vha = pci_get_drvdata(pdev);
+ ha = vha->hw;
+
+ /* Notify ISPFX00 firmware */
+ if (IS_QLAFX00(ha))
+ qlafx00_driver_shutdown(vha, 20);
+
+ /* Turn-off FCE trace */
+ if (ha->flags.fce_enabled) {
+ qla2x00_disable_fce_trace(vha, NULL, NULL);
+ ha->flags.fce_enabled = 0;
+ }
+
+ /* Turn-off EFT trace */
+ if (ha->eft)
+ qla2x00_disable_eft_trace(vha);
+
+ /* Stop currently executing firmware. */
+ qla2x00_try_to_stop_firmware(vha);
+
+ /* Turn adapter off line */
+ vha->flags.online = 0;
+
+ /* turn-off interrupts on the card */
+ if (ha->interrupts_on) {
+ vha->flags.init_done = 0;
+ ha->isp_ops->disable_intrs(ha);
+ }
+
+ qla2x00_free_irqs(vha);
+
+ qla2x00_free_fw_dump(ha);
+
+ pci_disable_pcie_error_reporting(pdev);
+ pci_disable_device(pdev);
+}
+
+/* Deletes all the virtual ports for a given ha */
+static void
+qla2x00_delete_all_vps(struct qla_hw_data *ha, scsi_qla_host_t *base_vha)
+{
+ struct Scsi_Host *scsi_host;
+ scsi_qla_host_t *vha;
+ unsigned long flags;
+
+ mutex_lock(&ha->vport_lock);
+ while (ha->cur_vport_count) {
+ spin_lock_irqsave(&ha->vport_slock, flags);
+
+ BUG_ON(base_vha->list.next == &ha->vp_list);
+ /* This assumes first entry in ha->vp_list is always base vha */
+ vha = list_first_entry(&base_vha->list, scsi_qla_host_t, list);
+ scsi_host = scsi_host_get(vha->host);
+
+ spin_unlock_irqrestore(&ha->vport_slock, flags);
+ mutex_unlock(&ha->vport_lock);
+
+ fc_vport_terminate(vha->fc_vport);
+ scsi_host_put(vha->host);
+
+ mutex_lock(&ha->vport_lock);
+ }
+ mutex_unlock(&ha->vport_lock);
+}
+
+/* Stops all deferred work threads */
+static void
+qla2x00_destroy_deferred_work(struct qla_hw_data *ha)
+{
+ /* Flush the work queue and remove it */
+ if (ha->wq) {
+ flush_workqueue(ha->wq);
+ destroy_workqueue(ha->wq);
+ ha->wq = NULL;
+ }
+
+ /* Cancel all work and destroy DPC workqueues */
+ if (ha->dpc_lp_wq) {
+ cancel_work_sync(&ha->idc_aen);
+ destroy_workqueue(ha->dpc_lp_wq);
+ ha->dpc_lp_wq = NULL;
+ }
+
+ if (ha->dpc_hp_wq) {
+ cancel_work_sync(&ha->nic_core_reset);
+ cancel_work_sync(&ha->idc_state_handler);
+ cancel_work_sync(&ha->nic_core_unrecoverable);
+ destroy_workqueue(ha->dpc_hp_wq);
+ ha->dpc_hp_wq = NULL;
+ }
+
+ /* Kill the kernel thread for this host */
+ if (ha->dpc_thread) {
+ struct task_struct *t = ha->dpc_thread;
+
+ /*
+ * qla2xxx_wake_dpc checks for ->dpc_thread
+ * so we need to zero it out.
+ */
+ ha->dpc_thread = NULL;
+ kthread_stop(t);
+ }
+}
+
+static void
+qla2x00_unmap_iobases(struct qla_hw_data *ha)
+{
+ if (IS_QLA82XX(ha)) {
+
+ iounmap((device_reg_t *)ha->nx_pcibase);
+ if (!ql2xdbwr)
+ iounmap((device_reg_t *)ha->nxdb_wr_ptr);
+ } else {
+ if (ha->iobase)
+ iounmap(ha->iobase);
+
+ if (ha->cregbase)
+ iounmap(ha->cregbase);
+
+ if (ha->mqiobase)
+ iounmap(ha->mqiobase);
+
+ if ((IS_QLA83XX(ha) || IS_QLA27XX(ha)) && ha->msixbase)
+ iounmap(ha->msixbase);
+ }
+}
+
+static void
+qla2x00_clear_drv_active(struct qla_hw_data *ha)
+{
+ if (IS_QLA8044(ha)) {
+ qla8044_idc_lock(ha);
+ qla8044_clear_drv_active(ha);
+ qla8044_idc_unlock(ha);
+ } else if (IS_QLA82XX(ha)) {
+ qla82xx_idc_lock(ha);
+ qla82xx_clear_drv_active(ha);
+ qla82xx_idc_unlock(ha);
+ }
+}
+
+static void
+qla2x00_remove_one(struct pci_dev *pdev)
+{
+ scsi_qla_host_t *base_vha;
+ struct qla_hw_data *ha;
+
+ base_vha = pci_get_drvdata(pdev);
+ ha = base_vha->hw;
+
+ /* Indicate device removal to prevent future board_disable and wait
+ * until any pending board_disable has completed. */
+ set_bit(PFLG_DRIVER_REMOVING, &base_vha->pci_flags);
+ cancel_work_sync(&ha->board_disable);
+
+ /*
+ * If the PCI device is disabled then there was a PCI-disconnect and
+ * qla2x00_disable_board_on_pci_error has taken care of most of the
+ * resources.
+ */
+ if (!atomic_read(&pdev->enable_cnt)) {
+ scsi_host_put(base_vha->host);
+ kfree(ha);
+ pci_set_drvdata(pdev, NULL);
+ return;
+ }
+
+ qla2x00_wait_for_hba_ready(base_vha);
+
+ set_bit(UNLOADING, &base_vha->dpc_flags);
+
+ if (IS_QLAFX00(ha))
+ qlafx00_driver_shutdown(base_vha, 20);
+
+ qla2x00_delete_all_vps(ha, base_vha);
+
+ if (IS_QLA8031(ha)) {
+ ql_dbg(ql_dbg_p3p, base_vha, 0xb07e,
+ "Clearing fcoe driver presence.\n");
+ if (qla83xx_clear_drv_presence(base_vha) != QLA_SUCCESS)
+ ql_dbg(ql_dbg_p3p, base_vha, 0xb079,
+ "Error while clearing DRV-Presence.\n");
+ }
+
+ qla2x00_abort_all_cmds(base_vha, DID_NO_CONNECT << 16);
+
+ qla2x00_dfs_remove(base_vha);
+
+ qla84xx_put_chip(base_vha);
+
+ /* Laser should be disabled only for ISP2031 */
+ if (IS_QLA2031(ha))
+ qla83xx_disable_laser(base_vha);
+
+ /* Disable timer */
+ if (base_vha->timer_active)
+ qla2x00_stop_timer(base_vha);
+
+ base_vha->flags.online = 0;
+
+ qla2x00_destroy_deferred_work(ha);
+
+ qlt_remove_target(ha, base_vha);
+
+ qla2x00_free_sysfs_attr(base_vha, true);
+
+ fc_remove_host(base_vha->host);
+
+ scsi_remove_host(base_vha->host);
+
+ qla2x00_free_device(base_vha);
+
+ qla2x00_clear_drv_active(ha);
+
+ scsi_host_put(base_vha->host);
+
+ qla2x00_unmap_iobases(ha);
+
+ pci_release_selected_regions(ha->pdev, ha->bars);
+ kfree(ha);
+ ha = NULL;
+
+ pci_disable_pcie_error_reporting(pdev);
+
+ pci_disable_device(pdev);
+}
+
+static void
+qla2x00_free_device(scsi_qla_host_t *vha)
+{
+ struct qla_hw_data *ha = vha->hw;
+
+ qla2x00_abort_all_cmds(vha, DID_NO_CONNECT << 16);
+
+ /* Disable timer */
+ if (vha->timer_active)
+ qla2x00_stop_timer(vha);
+
+ qla25xx_delete_queues(vha);
+
+ if (ha->flags.fce_enabled)
+ qla2x00_disable_fce_trace(vha, NULL, NULL);
+
+ if (ha->eft)
+ qla2x00_disable_eft_trace(vha);
+
+ /* Stop currently executing firmware. */
+ qla2x00_try_to_stop_firmware(vha);
+
+ vha->flags.online = 0;
+
+ /* turn-off interrupts on the card */
+ if (ha->interrupts_on) {
+ vha->flags.init_done = 0;
+ ha->isp_ops->disable_intrs(ha);
+ }
+
+ qla2x00_free_irqs(vha);
+
+ qla2x00_free_fcports(vha);
+
+ qla2x00_mem_free(ha);
+
+ qla82xx_md_free(vha);
+
+ qla2x00_free_queues(ha);
+}
+
+void qla2x00_free_fcports(struct scsi_qla_host *vha)
+{
+ fc_port_t *fcport, *tfcport;
+
+ list_for_each_entry_safe(fcport, tfcport, &vha->vp_fcports, list) {
+ list_del(&fcport->list);
+ qla2x00_clear_loop_id(fcport);
+ kfree(fcport);
+ fcport = NULL;
+ }
+}
+
+static inline void
+qla2x00_schedule_rport_del(struct scsi_qla_host *vha, fc_port_t *fcport,
+ int defer)
+{
+ struct fc_rport *rport;
+ scsi_qla_host_t *base_vha;
+ unsigned long flags;
+
+ if (!fcport->rport)
+ return;
+
+ rport = fcport->rport;
+ if (defer) {
+ base_vha = pci_get_drvdata(vha->hw->pdev);
+ spin_lock_irqsave(vha->host->host_lock, flags);
+ fcport->drport = rport;
+ spin_unlock_irqrestore(vha->host->host_lock, flags);
+ set_bit(FCPORT_UPDATE_NEEDED, &base_vha->dpc_flags);
+ qla2xxx_wake_dpc(base_vha);
+ } else {
+ fc_remote_port_delete(rport);
+ qlt_fc_port_deleted(vha, fcport);
+ }
+}
+
+/*
+ * qla2x00_mark_device_lost Updates fcport state when device goes offline.
+ *
+ * Input: ha = adapter block pointer. fcport = port structure pointer.
+ *
+ * Return: None.
+ *
+ * Context:
+ */
+void qla2x00_mark_device_lost(scsi_qla_host_t *vha, fc_port_t *fcport,
+ int do_login, int defer)
+{
+ if (IS_QLAFX00(vha->hw)) {
+ qla2x00_set_fcport_state(fcport, FCS_DEVICE_LOST);
+ qla2x00_schedule_rport_del(vha, fcport, defer);
+ return;
+ }
+
+ if (atomic_read(&fcport->state) == FCS_ONLINE &&
+ vha->vp_idx == fcport->vha->vp_idx) {
+ qla2x00_set_fcport_state(fcport, FCS_DEVICE_LOST);
+ qla2x00_schedule_rport_del(vha, fcport, defer);
+ }
+ /*
+ * We may need to retry the login, so don't change the state of the
+ * port but do the retries.
+ */
+ if (atomic_read(&fcport->state) != FCS_DEVICE_DEAD)
+ qla2x00_set_fcport_state(fcport, FCS_DEVICE_LOST);
+
+ if (!do_login)
+ return;
+
+ if (fcport->login_retry == 0) {
+ fcport->login_retry = vha->hw->login_retry_count;
+ set_bit(RELOGIN_NEEDED, &vha->dpc_flags);
+
+ ql_dbg(ql_dbg_disc, vha, 0x2067,
+ "Port login retry %8phN, id = 0x%04x retry cnt=%d.\n",
+ fcport->port_name, fcport->loop_id, fcport->login_retry);
+ }
+}
+
+/*
+ * qla2x00_mark_all_devices_lost
+ * Updates fcport state when device goes offline.
+ *
+ * Input:
+ * ha = adapter block pointer.
+ * fcport = port structure pointer.
+ *
+ * Return:
+ * None.
+ *
+ * Context:
+ */
+void
+qla2x00_mark_all_devices_lost(scsi_qla_host_t *vha, int defer)
+{
+ fc_port_t *fcport;
+
+ list_for_each_entry(fcport, &vha->vp_fcports, list) {
+ if (vha->vp_idx != 0 && vha->vp_idx != fcport->vha->vp_idx)
+ continue;
+
+ /*
+ * No point in marking the device as lost, if the device is
+ * already DEAD.
+ */
+ if (atomic_read(&fcport->state) == FCS_DEVICE_DEAD)
+ continue;
+ if (atomic_read(&fcport->state) == FCS_ONLINE) {
+ qla2x00_set_fcport_state(fcport, FCS_DEVICE_LOST);
+ if (defer)
+ qla2x00_schedule_rport_del(vha, fcport, defer);
+ else if (vha->vp_idx == fcport->vha->vp_idx)
+ qla2x00_schedule_rport_del(vha, fcport, defer);
+ }
+ }
+}
+
+/*
+* qla2x00_mem_alloc
+* Allocates adapter memory.
+*
+* Returns:
+* 0 = success.
+* !0 = failure.
+*/
+static int
+qla2x00_mem_alloc(struct qla_hw_data *ha, uint16_t req_len, uint16_t rsp_len,
+ struct req_que **req, struct rsp_que **rsp)
+{
+ char name[16];
+
+ ha->init_cb = dma_alloc_coherent(&ha->pdev->dev, ha->init_cb_size,
+ &ha->init_cb_dma, GFP_KERNEL);
+ if (!ha->init_cb)
+ goto fail;
+
+ if (qlt_mem_alloc(ha) < 0)
+ goto fail_free_init_cb;
+
+ ha->gid_list = dma_alloc_coherent(&ha->pdev->dev,
+ qla2x00_gid_list_size(ha), &ha->gid_list_dma, GFP_KERNEL);
+ if (!ha->gid_list)
+ goto fail_free_tgt_mem;
+
+ ha->srb_mempool = mempool_create_slab_pool(SRB_MIN_REQ, srb_cachep);
+ if (!ha->srb_mempool)
+ goto fail_free_gid_list;
+
+ if (IS_P3P_TYPE(ha)) {
+ /* Allocate cache for CT6 Ctx. */
+ if (!ctx_cachep) {
+ ctx_cachep = kmem_cache_create("qla2xxx_ctx",
+ sizeof(struct ct6_dsd), 0,
+ SLAB_HWCACHE_ALIGN, NULL);
+ if (!ctx_cachep)
+ goto fail_free_gid_list;
+ }
+ ha->ctx_mempool = mempool_create_slab_pool(SRB_MIN_REQ,
+ ctx_cachep);
+ if (!ha->ctx_mempool)
+ goto fail_free_srb_mempool;
+ ql_dbg_pci(ql_dbg_init, ha->pdev, 0x0021,
+ "ctx_cachep=%p ctx_mempool=%p.\n",
+ ctx_cachep, ha->ctx_mempool);
+ }
+
+ /* Get memory for cached NVRAM */
+ ha->nvram = kzalloc(MAX_NVRAM_SIZE, GFP_KERNEL);
+ if (!ha->nvram)
+ goto fail_free_ctx_mempool;
+
+ snprintf(name, sizeof(name), "%s_%d", QLA2XXX_DRIVER_NAME,
+ ha->pdev->device);
+ ha->s_dma_pool = dma_pool_create(name, &ha->pdev->dev,
+ DMA_POOL_SIZE, 8, 0);
+ if (!ha->s_dma_pool)
+ goto fail_free_nvram;
+
+ ql_dbg_pci(ql_dbg_init, ha->pdev, 0x0022,
+ "init_cb=%p gid_list=%p, srb_mempool=%p s_dma_pool=%p.\n",
+ ha->init_cb, ha->gid_list, ha->srb_mempool, ha->s_dma_pool);
+
+ if (IS_P3P_TYPE(ha) || ql2xenabledif) {
+ ha->dl_dma_pool = dma_pool_create(name, &ha->pdev->dev,
+ DSD_LIST_DMA_POOL_SIZE, 8, 0);
+ if (!ha->dl_dma_pool) {
+ ql_log_pci(ql_log_fatal, ha->pdev, 0x0023,
+ "Failed to allocate memory for dl_dma_pool.\n");
+ goto fail_s_dma_pool;
+ }
+
+ ha->fcp_cmnd_dma_pool = dma_pool_create(name, &ha->pdev->dev,
+ FCP_CMND_DMA_POOL_SIZE, 8, 0);
+ if (!ha->fcp_cmnd_dma_pool) {
+ ql_log_pci(ql_log_fatal, ha->pdev, 0x0024,
+ "Failed to allocate memory for fcp_cmnd_dma_pool.\n");
+ goto fail_dl_dma_pool;
+ }
+ ql_dbg_pci(ql_dbg_init, ha->pdev, 0x0025,
+ "dl_dma_pool=%p fcp_cmnd_dma_pool=%p.\n",
+ ha->dl_dma_pool, ha->fcp_cmnd_dma_pool);
+ }
+
+ /* Allocate memory for SNS commands */
+ if (IS_QLA2100(ha) || IS_QLA2200(ha)) {
+ /* Get consistent memory allocated for SNS commands */
+ ha->sns_cmd = dma_alloc_coherent(&ha->pdev->dev,
+ sizeof(struct sns_cmd_pkt), &ha->sns_cmd_dma, GFP_KERNEL);
+ if (!ha->sns_cmd)
+ goto fail_dma_pool;
+ ql_dbg_pci(ql_dbg_init, ha->pdev, 0x0026,
+ "sns_cmd: %p.\n", ha->sns_cmd);
+ } else {
+ /* Get consistent memory allocated for MS IOCB */
+ ha->ms_iocb = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL,
+ &ha->ms_iocb_dma);
+ if (!ha->ms_iocb)
+ goto fail_dma_pool;
+ /* Get consistent memory allocated for CT SNS commands */
+ ha->ct_sns = dma_alloc_coherent(&ha->pdev->dev,
+ sizeof(struct ct_sns_pkt), &ha->ct_sns_dma, GFP_KERNEL);
+ if (!ha->ct_sns)
+ goto fail_free_ms_iocb;
+ ql_dbg_pci(ql_dbg_init, ha->pdev, 0x0027,
+ "ms_iocb=%p ct_sns=%p.\n",
+ ha->ms_iocb, ha->ct_sns);
+ }
+
+ /* Allocate memory for request ring */
+ *req = kzalloc(sizeof(struct req_que), GFP_KERNEL);
+ if (!*req) {
+ ql_log_pci(ql_log_fatal, ha->pdev, 0x0028,
+ "Failed to allocate memory for req.\n");
+ goto fail_req;
+ }
+ (*req)->length = req_len;
+ (*req)->ring = dma_alloc_coherent(&ha->pdev->dev,
+ ((*req)->length + 1) * sizeof(request_t),
+ &(*req)->dma, GFP_KERNEL);
+ if (!(*req)->ring) {
+ ql_log_pci(ql_log_fatal, ha->pdev, 0x0029,
+ "Failed to allocate memory for req_ring.\n");
+ goto fail_req_ring;
+ }
+ /* Allocate memory for response ring */
+ *rsp = kzalloc(sizeof(struct rsp_que), GFP_KERNEL);
+ if (!*rsp) {
+ ql_log_pci(ql_log_fatal, ha->pdev, 0x002a,
+ "Failed to allocate memory for rsp.\n");
+ goto fail_rsp;
+ }
+ (*rsp)->hw = ha;
+ (*rsp)->length = rsp_len;
+ (*rsp)->ring = dma_alloc_coherent(&ha->pdev->dev,
+ ((*rsp)->length + 1) * sizeof(response_t),
+ &(*rsp)->dma, GFP_KERNEL);
+ if (!(*rsp)->ring) {
+ ql_log_pci(ql_log_fatal, ha->pdev, 0x002b,
+ "Failed to allocate memory for rsp_ring.\n");
+ goto fail_rsp_ring;
+ }
+ (*req)->rsp = *rsp;
+ (*rsp)->req = *req;
+ ql_dbg_pci(ql_dbg_init, ha->pdev, 0x002c,
+ "req=%p req->length=%d req->ring=%p rsp=%p "
+ "rsp->length=%d rsp->ring=%p.\n",
+ *req, (*req)->length, (*req)->ring, *rsp, (*rsp)->length,
+ (*rsp)->ring);
+ /* Allocate memory for NVRAM data for vports */
+ if (ha->nvram_npiv_size) {
+ ha->npiv_info = kzalloc(sizeof(struct qla_npiv_entry) *
+ ha->nvram_npiv_size, GFP_KERNEL);
+ if (!ha->npiv_info) {
+ ql_log_pci(ql_log_fatal, ha->pdev, 0x002d,
+ "Failed to allocate memory for npiv_info.\n");
+ goto fail_npiv_info;
+ }
+ } else
+ ha->npiv_info = NULL;
+
+ /* Get consistent memory allocated for EX-INIT-CB. */
+ if (IS_CNA_CAPABLE(ha) || IS_QLA2031(ha) || IS_QLA27XX(ha)) {
+ ha->ex_init_cb = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL,
+ &ha->ex_init_cb_dma);
+ if (!ha->ex_init_cb)
+ goto fail_ex_init_cb;
+ ql_dbg_pci(ql_dbg_init, ha->pdev, 0x002e,
+ "ex_init_cb=%p.\n", ha->ex_init_cb);
+ }
+
+ INIT_LIST_HEAD(&ha->gbl_dsd_list);
+
+ /* Get consistent memory allocated for Async Port-Database. */
+ if (!IS_FWI2_CAPABLE(ha)) {
+ ha->async_pd = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL,
+ &ha->async_pd_dma);
+ if (!ha->async_pd)
+ goto fail_async_pd;
+ ql_dbg_pci(ql_dbg_init, ha->pdev, 0x002f,
+ "async_pd=%p.\n", ha->async_pd);
+ }
+
+ INIT_LIST_HEAD(&ha->vp_list);
+
+ /* Allocate memory for our loop_id bitmap */
+ ha->loop_id_map = kzalloc(BITS_TO_LONGS(LOOPID_MAP_SIZE) * sizeof(long),
+ GFP_KERNEL);
+ if (!ha->loop_id_map)
+ goto fail_async_pd;
+ else {
+ qla2x00_set_reserved_loop_ids(ha);
+ ql_dbg_pci(ql_dbg_init, ha->pdev, 0x0123,
+ "loop_id_map=%p.\n", ha->loop_id_map);
+ }
+
+ return 0;
+
+fail_async_pd:
+ dma_pool_free(ha->s_dma_pool, ha->ex_init_cb, ha->ex_init_cb_dma);
+fail_ex_init_cb:
+ kfree(ha->npiv_info);
+fail_npiv_info:
+ dma_free_coherent(&ha->pdev->dev, ((*rsp)->length + 1) *
+ sizeof(response_t), (*rsp)->ring, (*rsp)->dma);
+ (*rsp)->ring = NULL;
+ (*rsp)->dma = 0;
+fail_rsp_ring:
+ kfree(*rsp);
+fail_rsp:
+ dma_free_coherent(&ha->pdev->dev, ((*req)->length + 1) *
+ sizeof(request_t), (*req)->ring, (*req)->dma);
+ (*req)->ring = NULL;
+ (*req)->dma = 0;
+fail_req_ring:
+ kfree(*req);
+fail_req:
+ dma_free_coherent(&ha->pdev->dev, sizeof(struct ct_sns_pkt),
+ ha->ct_sns, ha->ct_sns_dma);
+ ha->ct_sns = NULL;
+ ha->ct_sns_dma = 0;
+fail_free_ms_iocb:
+ dma_pool_free(ha->s_dma_pool, ha->ms_iocb, ha->ms_iocb_dma);
+ ha->ms_iocb = NULL;
+ ha->ms_iocb_dma = 0;
+fail_dma_pool:
+ if (IS_QLA82XX(ha) || ql2xenabledif) {
+ dma_pool_destroy(ha->fcp_cmnd_dma_pool);
+ ha->fcp_cmnd_dma_pool = NULL;
+ }
+fail_dl_dma_pool:
+ if (IS_QLA82XX(ha) || ql2xenabledif) {
+ dma_pool_destroy(ha->dl_dma_pool);
+ ha->dl_dma_pool = NULL;
+ }
+fail_s_dma_pool:
+ dma_pool_destroy(ha->s_dma_pool);
+ ha->s_dma_pool = NULL;
+fail_free_nvram:
+ kfree(ha->nvram);
+ ha->nvram = NULL;
+fail_free_ctx_mempool:
+ mempool_destroy(ha->ctx_mempool);
+ ha->ctx_mempool = NULL;
+fail_free_srb_mempool:
+ mempool_destroy(ha->srb_mempool);
+ ha->srb_mempool = NULL;
+fail_free_gid_list:
+ dma_free_coherent(&ha->pdev->dev, qla2x00_gid_list_size(ha),
+ ha->gid_list,
+ ha->gid_list_dma);
+ ha->gid_list = NULL;
+ ha->gid_list_dma = 0;
+fail_free_tgt_mem:
+ qlt_mem_free(ha);
+fail_free_init_cb:
+ dma_free_coherent(&ha->pdev->dev, ha->init_cb_size, ha->init_cb,
+ ha->init_cb_dma);
+ ha->init_cb = NULL;
+ ha->init_cb_dma = 0;
+fail:
+ ql_log(ql_log_fatal, NULL, 0x0030,
+ "Memory allocation failure.\n");
+ return -ENOMEM;
+}
+
+/*
+* qla2x00_free_fw_dump
+* Frees fw dump stuff.
+*
+* Input:
+* ha = adapter block pointer
+*/
+static void
+qla2x00_free_fw_dump(struct qla_hw_data *ha)
+{
+ if (ha->fce)
+ dma_free_coherent(&ha->pdev->dev,
+ FCE_SIZE, ha->fce, ha->fce_dma);
+
+ if (ha->eft)
+ dma_free_coherent(&ha->pdev->dev,
+ EFT_SIZE, ha->eft, ha->eft_dma);
+
+ if (ha->fw_dump)
+ vfree(ha->fw_dump);
+ if (ha->fw_dump_template)
+ vfree(ha->fw_dump_template);
+
+ ha->fce = NULL;
+ ha->fce_dma = 0;
+ ha->eft = NULL;
+ ha->eft_dma = 0;
+ ha->fw_dumped = 0;
+ ha->fw_dump_cap_flags = 0;
+ ha->fw_dump_reading = 0;
+ ha->fw_dump = NULL;
+ ha->fw_dump_len = 0;
+ ha->fw_dump_template = NULL;
+ ha->fw_dump_template_len = 0;
+}
+
+/*
+* qla2x00_mem_free
+* Frees all adapter allocated memory.
+*
+* Input:
+* ha = adapter block pointer.
+*/
+static void
+qla2x00_mem_free(struct qla_hw_data *ha)
+{
+ qla2x00_free_fw_dump(ha);
+
+ if (ha->mctp_dump)
+ dma_free_coherent(&ha->pdev->dev, MCTP_DUMP_SIZE, ha->mctp_dump,
+ ha->mctp_dump_dma);
+
+ if (ha->srb_mempool)
+ mempool_destroy(ha->srb_mempool);
+
+ if (ha->dcbx_tlv)
+ dma_free_coherent(&ha->pdev->dev, DCBX_TLV_DATA_SIZE,
+ ha->dcbx_tlv, ha->dcbx_tlv_dma);
+
+ if (ha->xgmac_data)
+ dma_free_coherent(&ha->pdev->dev, XGMAC_DATA_SIZE,
+ ha->xgmac_data, ha->xgmac_data_dma);
+
+ if (ha->sns_cmd)
+ dma_free_coherent(&ha->pdev->dev, sizeof(struct sns_cmd_pkt),
+ ha->sns_cmd, ha->sns_cmd_dma);
+
+ if (ha->ct_sns)
+ dma_free_coherent(&ha->pdev->dev, sizeof(struct ct_sns_pkt),
+ ha->ct_sns, ha->ct_sns_dma);
+
+ if (ha->sfp_data)
+ dma_pool_free(ha->s_dma_pool, ha->sfp_data, ha->sfp_data_dma);
+
+ if (ha->ms_iocb)
+ dma_pool_free(ha->s_dma_pool, ha->ms_iocb, ha->ms_iocb_dma);
+
+ if (ha->ex_init_cb)
+ dma_pool_free(ha->s_dma_pool,
+ ha->ex_init_cb, ha->ex_init_cb_dma);
+
+ if (ha->async_pd)
+ dma_pool_free(ha->s_dma_pool, ha->async_pd, ha->async_pd_dma);
+
+ if (ha->s_dma_pool)
+ dma_pool_destroy(ha->s_dma_pool);
+
+ if (ha->gid_list)
+ dma_free_coherent(&ha->pdev->dev, qla2x00_gid_list_size(ha),
+ ha->gid_list, ha->gid_list_dma);
+
+ if (IS_QLA82XX(ha)) {
+ if (!list_empty(&ha->gbl_dsd_list)) {
+ struct dsd_dma *dsd_ptr, *tdsd_ptr;
+
+ /* clean up allocated prev pool */
+ list_for_each_entry_safe(dsd_ptr,
+ tdsd_ptr, &ha->gbl_dsd_list, list) {
+ dma_pool_free(ha->dl_dma_pool,
+ dsd_ptr->dsd_addr, dsd_ptr->dsd_list_dma);
+ list_del(&dsd_ptr->list);
+ kfree(dsd_ptr);
+ }
+ }
+ }
+
+ if (ha->dl_dma_pool)
+ dma_pool_destroy(ha->dl_dma_pool);
+
+ if (ha->fcp_cmnd_dma_pool)
+ dma_pool_destroy(ha->fcp_cmnd_dma_pool);
+
+ if (ha->ctx_mempool)
+ mempool_destroy(ha->ctx_mempool);
+
+ qlt_mem_free(ha);
+
+ if (ha->init_cb)
+ dma_free_coherent(&ha->pdev->dev, ha->init_cb_size,
+ ha->init_cb, ha->init_cb_dma);
+ vfree(ha->optrom_buffer);
+ kfree(ha->nvram);
+ kfree(ha->npiv_info);
+ kfree(ha->swl);
+ kfree(ha->loop_id_map);
+
+ ha->srb_mempool = NULL;
+ ha->ctx_mempool = NULL;
+ ha->sns_cmd = NULL;
+ ha->sns_cmd_dma = 0;
+ ha->ct_sns = NULL;
+ ha->ct_sns_dma = 0;
+ ha->ms_iocb = NULL;
+ ha->ms_iocb_dma = 0;
+ ha->init_cb = NULL;
+ ha->init_cb_dma = 0;
+ ha->ex_init_cb = NULL;
+ ha->ex_init_cb_dma = 0;
+ ha->async_pd = NULL;
+ ha->async_pd_dma = 0;
+
+ ha->s_dma_pool = NULL;
+ ha->dl_dma_pool = NULL;
+ ha->fcp_cmnd_dma_pool = NULL;
+
+ ha->gid_list = NULL;
+ ha->gid_list_dma = 0;
+
+ ha->tgt.atio_ring = NULL;
+ ha->tgt.atio_dma = 0;
+ ha->tgt.tgt_vp_map = NULL;
+}
+
+struct scsi_qla_host *qla2x00_create_host(struct scsi_host_template *sht,
+ struct qla_hw_data *ha)
+{
+ struct Scsi_Host *host;
+ struct scsi_qla_host *vha = NULL;
+
+ host = scsi_host_alloc(sht, sizeof(scsi_qla_host_t));
+ if (host == NULL) {
+ ql_log_pci(ql_log_fatal, ha->pdev, 0x0107,
+ "Failed to allocate host from the scsi layer, aborting.\n");
+ goto fail;
+ }
+
+ /* Clear our data area */
+ vha = shost_priv(host);
+ memset(vha, 0, sizeof(scsi_qla_host_t));
+
+ vha->host = host;
+ vha->host_no = host->host_no;
+ vha->hw = ha;
+
+ INIT_LIST_HEAD(&vha->vp_fcports);
+ INIT_LIST_HEAD(&vha->work_list);
+ INIT_LIST_HEAD(&vha->list);
+
+ spin_lock_init(&vha->work_lock);
+
+ sprintf(vha->host_str, "%s_%ld", QLA2XXX_DRIVER_NAME, vha->host_no);
+ ql_dbg(ql_dbg_init, vha, 0x0041,
+ "Allocated the host=%p hw=%p vha=%p dev_name=%s",
+ vha->host, vha->hw, vha,
+ dev_name(&(ha->pdev->dev)));
+
+ return vha;
+
+fail:
+ return vha;
+}
+
+static struct qla_work_evt *
+qla2x00_alloc_work(struct scsi_qla_host *vha, enum qla_work_type type)
+{
+ struct qla_work_evt *e;
+ uint8_t bail;
+
+ QLA_VHA_MARK_BUSY(vha, bail);
+ if (bail)
+ return NULL;
+
+ e = kzalloc(sizeof(struct qla_work_evt), GFP_ATOMIC);
+ if (!e) {
+ QLA_VHA_MARK_NOT_BUSY(vha);
+ return NULL;
+ }
+
+ INIT_LIST_HEAD(&e->list);
+ e->type = type;
+ e->flags = QLA_EVT_FLAG_FREE;
+ return e;
+}
+
+static int
+qla2x00_post_work(struct scsi_qla_host *vha, struct qla_work_evt *e)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&vha->work_lock, flags);
+ list_add_tail(&e->list, &vha->work_list);
+ spin_unlock_irqrestore(&vha->work_lock, flags);
+ qla2xxx_wake_dpc(vha);
+
+ return QLA_SUCCESS;
+}
+
+int
+qla2x00_post_aen_work(struct scsi_qla_host *vha, enum fc_host_event_code code,
+ u32 data)
+{
+ struct qla_work_evt *e;
+
+ e = qla2x00_alloc_work(vha, QLA_EVT_AEN);
+ if (!e)
+ return QLA_FUNCTION_FAILED;
+
+ e->u.aen.code = code;
+ e->u.aen.data = data;
+ return qla2x00_post_work(vha, e);
+}
+
+int
+qla2x00_post_idc_ack_work(struct scsi_qla_host *vha, uint16_t *mb)
+{
+ struct qla_work_evt *e;
+
+ e = qla2x00_alloc_work(vha, QLA_EVT_IDC_ACK);
+ if (!e)
+ return QLA_FUNCTION_FAILED;
+
+ memcpy(e->u.idc_ack.mb, mb, QLA_IDC_ACK_REGS * sizeof(uint16_t));
+ return qla2x00_post_work(vha, e);
+}
+
+#define qla2x00_post_async_work(name, type) \
+int qla2x00_post_async_##name##_work( \
+ struct scsi_qla_host *vha, \
+ fc_port_t *fcport, uint16_t *data) \
+{ \
+ struct qla_work_evt *e; \
+ \
+ e = qla2x00_alloc_work(vha, type); \
+ if (!e) \
+ return QLA_FUNCTION_FAILED; \
+ \
+ e->u.logio.fcport = fcport; \
+ if (data) { \
+ e->u.logio.data[0] = data[0]; \
+ e->u.logio.data[1] = data[1]; \
+ } \
+ return qla2x00_post_work(vha, e); \
+}
+
+qla2x00_post_async_work(login, QLA_EVT_ASYNC_LOGIN);
+qla2x00_post_async_work(login_done, QLA_EVT_ASYNC_LOGIN_DONE);
+qla2x00_post_async_work(logout, QLA_EVT_ASYNC_LOGOUT);
+qla2x00_post_async_work(logout_done, QLA_EVT_ASYNC_LOGOUT_DONE);
+qla2x00_post_async_work(adisc, QLA_EVT_ASYNC_ADISC);
+qla2x00_post_async_work(adisc_done, QLA_EVT_ASYNC_ADISC_DONE);
+
+int
+qla2x00_post_uevent_work(struct scsi_qla_host *vha, u32 code)
+{
+ struct qla_work_evt *e;
+
+ e = qla2x00_alloc_work(vha, QLA_EVT_UEVENT);
+ if (!e)
+ return QLA_FUNCTION_FAILED;
+
+ e->u.uevent.code = code;
+ return qla2x00_post_work(vha, e);
+}
+
+static void
+qla2x00_uevent_emit(struct scsi_qla_host *vha, u32 code)
+{
+ char event_string[40];
+ char *envp[] = { event_string, NULL };
+
+ switch (code) {
+ case QLA_UEVENT_CODE_FW_DUMP:
+ snprintf(event_string, sizeof(event_string), "FW_DUMP=%ld",
+ vha->host_no);
+ break;
+ default:
+ /* do nothing */
+ break;
+ }
+ kobject_uevent_env(&vha->hw->pdev->dev.kobj, KOBJ_CHANGE, envp);
+}
+
+int
+qlafx00_post_aenfx_work(struct scsi_qla_host *vha, uint32_t evtcode,
+ uint32_t *data, int cnt)
+{
+ struct qla_work_evt *e;
+
+ e = qla2x00_alloc_work(vha, QLA_EVT_AENFX);
+ if (!e)
+ return QLA_FUNCTION_FAILED;
+
+ e->u.aenfx.evtcode = evtcode;
+ e->u.aenfx.count = cnt;
+ memcpy(e->u.aenfx.mbx, data, sizeof(*data) * cnt);
+ return qla2x00_post_work(vha, e);
+}
+
+void
+qla2x00_do_work(struct scsi_qla_host *vha)
+{
+ struct qla_work_evt *e, *tmp;
+ unsigned long flags;
+ LIST_HEAD(work);
+
+ spin_lock_irqsave(&vha->work_lock, flags);
+ list_splice_init(&vha->work_list, &work);
+ spin_unlock_irqrestore(&vha->work_lock, flags);
+
+ list_for_each_entry_safe(e, tmp, &work, list) {
+ list_del_init(&e->list);
+
+ switch (e->type) {
+ case QLA_EVT_AEN:
+ fc_host_post_event(vha->host, fc_get_event_number(),
+ e->u.aen.code, e->u.aen.data);
+ break;
+ case QLA_EVT_IDC_ACK:
+ qla81xx_idc_ack(vha, e->u.idc_ack.mb);
+ break;
+ case QLA_EVT_ASYNC_LOGIN:
+ qla2x00_async_login(vha, e->u.logio.fcport,
+ e->u.logio.data);
+ break;
+ case QLA_EVT_ASYNC_LOGIN_DONE:
+ qla2x00_async_login_done(vha, e->u.logio.fcport,
+ e->u.logio.data);
+ break;
+ case QLA_EVT_ASYNC_LOGOUT:
+ qla2x00_async_logout(vha, e->u.logio.fcport);
+ break;
+ case QLA_EVT_ASYNC_LOGOUT_DONE:
+ qla2x00_async_logout_done(vha, e->u.logio.fcport,
+ e->u.logio.data);
+ break;
+ case QLA_EVT_ASYNC_ADISC:
+ qla2x00_async_adisc(vha, e->u.logio.fcport,
+ e->u.logio.data);
+ break;
+ case QLA_EVT_ASYNC_ADISC_DONE:
+ qla2x00_async_adisc_done(vha, e->u.logio.fcport,
+ e->u.logio.data);
+ break;
+ case QLA_EVT_UEVENT:
+ qla2x00_uevent_emit(vha, e->u.uevent.code);
+ break;
+ case QLA_EVT_AENFX:
+ qlafx00_process_aen(vha, e);
+ break;
+ }
+ if (e->flags & QLA_EVT_FLAG_FREE)
+ kfree(e);
+
+ /* For each work completed decrement vha ref count */
+ QLA_VHA_MARK_NOT_BUSY(vha);
+ }
+}
+
+/* Relogins all the fcports of a vport
+ * Context: dpc thread
+ */
+void qla2x00_relogin(struct scsi_qla_host *vha)
+{
+ fc_port_t *fcport;
+ int status;
+ uint16_t next_loopid = 0;
+ struct qla_hw_data *ha = vha->hw;
+ uint16_t data[2];
+
+ list_for_each_entry(fcport, &vha->vp_fcports, list) {
+ /*
+ * If the port is not ONLINE then try to login
+ * to it if we haven't run out of retries.
+ */
+ if (atomic_read(&fcport->state) != FCS_ONLINE &&
+ fcport->login_retry && !(fcport->flags & FCF_ASYNC_SENT)) {
+ fcport->login_retry--;
+ if (fcport->flags & FCF_FABRIC_DEVICE) {
+ if (fcport->flags & FCF_FCP2_DEVICE)
+ ha->isp_ops->fabric_logout(vha,
+ fcport->loop_id,
+ fcport->d_id.b.domain,
+ fcport->d_id.b.area,
+ fcport->d_id.b.al_pa);
+
+ if (fcport->loop_id == FC_NO_LOOP_ID) {
+ fcport->loop_id = next_loopid =
+ ha->min_external_loopid;
+ status = qla2x00_find_new_loop_id(
+ vha, fcport);
+ if (status != QLA_SUCCESS) {
+ /* Ran out of IDs to use */
+ break;
+ }
+ }
+
+ if (IS_ALOGIO_CAPABLE(ha)) {
+ fcport->flags |= FCF_ASYNC_SENT;
+ data[0] = 0;
+ data[1] = QLA_LOGIO_LOGIN_RETRIED;
+ status = qla2x00_post_async_login_work(
+ vha, fcport, data);
+ if (status == QLA_SUCCESS)
+ continue;
+ /* Attempt a retry. */
+ status = 1;
+ } else {
+ status = qla2x00_fabric_login(vha,
+ fcport, &next_loopid);
+ if (status == QLA_SUCCESS) {
+ int status2;
+ uint8_t opts;
+
+ opts = 0;
+ if (fcport->flags &
+ FCF_FCP2_DEVICE)
+ opts |= BIT_1;
+ status2 =
+ qla2x00_get_port_database(
+ vha, fcport, opts);
+ if (status2 != QLA_SUCCESS)
+ status = 1;
+ }
+ }
+ } else
+ status = qla2x00_local_device_login(vha,
+ fcport);
+
+ if (status == QLA_SUCCESS) {
+ fcport->old_loop_id = fcport->loop_id;
+
+ ql_dbg(ql_dbg_disc, vha, 0x2003,
+ "Port login OK: logged in ID 0x%x.\n",
+ fcport->loop_id);
+
+ qla2x00_update_fcport(vha, fcport);
+
+ } else if (status == 1) {
+ set_bit(RELOGIN_NEEDED, &vha->dpc_flags);
+ /* retry the login again */
+ ql_dbg(ql_dbg_disc, vha, 0x2007,
+ "Retrying %d login again loop_id 0x%x.\n",
+ fcport->login_retry, fcport->loop_id);
+ } else {
+ fcport->login_retry = 0;
+ }
+
+ if (fcport->login_retry == 0 && status != QLA_SUCCESS)
+ qla2x00_clear_loop_id(fcport);
+ }
+ if (test_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags))
+ break;
+ }
+}
+
+/* Schedule work on any of the dpc-workqueues */
+void
+qla83xx_schedule_work(scsi_qla_host_t *base_vha, int work_code)
+{
+ struct qla_hw_data *ha = base_vha->hw;
+
+ switch (work_code) {
+ case MBA_IDC_AEN: /* 0x8200 */
+ if (ha->dpc_lp_wq)
+ queue_work(ha->dpc_lp_wq, &ha->idc_aen);
+ break;
+
+ case QLA83XX_NIC_CORE_RESET: /* 0x1 */
+ if (!ha->flags.nic_core_reset_hdlr_active) {
+ if (ha->dpc_hp_wq)
+ queue_work(ha->dpc_hp_wq, &ha->nic_core_reset);
+ } else
+ ql_dbg(ql_dbg_p3p, base_vha, 0xb05e,
+ "NIC Core reset is already active. Skip "
+ "scheduling it again.\n");
+ break;
+ case QLA83XX_IDC_STATE_HANDLER: /* 0x2 */
+ if (ha->dpc_hp_wq)
+ queue_work(ha->dpc_hp_wq, &ha->idc_state_handler);
+ break;
+ case QLA83XX_NIC_CORE_UNRECOVERABLE: /* 0x3 */
+ if (ha->dpc_hp_wq)
+ queue_work(ha->dpc_hp_wq, &ha->nic_core_unrecoverable);
+ break;
+ default:
+ ql_log(ql_log_warn, base_vha, 0xb05f,
+ "Unknown work-code=0x%x.\n", work_code);
+ }
+
+ return;
+}
+
+/* Work: Perform NIC Core Unrecoverable state handling */
+void
+qla83xx_nic_core_unrecoverable_work(struct work_struct *work)
+{
+ struct qla_hw_data *ha =
+ container_of(work, struct qla_hw_data, nic_core_unrecoverable);
+ scsi_qla_host_t *base_vha = pci_get_drvdata(ha->pdev);
+ uint32_t dev_state = 0;
+
+ qla83xx_idc_lock(base_vha, 0);
+ qla83xx_rd_reg(base_vha, QLA83XX_IDC_DEV_STATE, &dev_state);
+ qla83xx_reset_ownership(base_vha);
+ if (ha->flags.nic_core_reset_owner) {
+ ha->flags.nic_core_reset_owner = 0;
+ qla83xx_wr_reg(base_vha, QLA83XX_IDC_DEV_STATE,
+ QLA8XXX_DEV_FAILED);
+ ql_log(ql_log_info, base_vha, 0xb060, "HW State: FAILED.\n");
+ qla83xx_schedule_work(base_vha, QLA83XX_IDC_STATE_HANDLER);
+ }
+ qla83xx_idc_unlock(base_vha, 0);
+}
+
+/* Work: Execute IDC state handler */
+void
+qla83xx_idc_state_handler_work(struct work_struct *work)
+{
+ struct qla_hw_data *ha =
+ container_of(work, struct qla_hw_data, idc_state_handler);
+ scsi_qla_host_t *base_vha = pci_get_drvdata(ha->pdev);
+ uint32_t dev_state = 0;
+
+ qla83xx_idc_lock(base_vha, 0);
+ qla83xx_rd_reg(base_vha, QLA83XX_IDC_DEV_STATE, &dev_state);
+ if (dev_state == QLA8XXX_DEV_FAILED ||
+ dev_state == QLA8XXX_DEV_NEED_QUIESCENT)
+ qla83xx_idc_state_handler(base_vha);
+ qla83xx_idc_unlock(base_vha, 0);
+}
+
+static int
+qla83xx_check_nic_core_fw_alive(scsi_qla_host_t *base_vha)
+{
+ int rval = QLA_SUCCESS;
+ unsigned long heart_beat_wait = jiffies + (1 * HZ);
+ uint32_t heart_beat_counter1, heart_beat_counter2;
+
+ do {
+ if (time_after(jiffies, heart_beat_wait)) {
+ ql_dbg(ql_dbg_p3p, base_vha, 0xb07c,
+ "Nic Core f/w is not alive.\n");
+ rval = QLA_FUNCTION_FAILED;
+ break;
+ }
+
+ qla83xx_idc_lock(base_vha, 0);
+ qla83xx_rd_reg(base_vha, QLA83XX_FW_HEARTBEAT,
+ &heart_beat_counter1);
+ qla83xx_idc_unlock(base_vha, 0);
+ msleep(100);
+ qla83xx_idc_lock(base_vha, 0);
+ qla83xx_rd_reg(base_vha, QLA83XX_FW_HEARTBEAT,
+ &heart_beat_counter2);
+ qla83xx_idc_unlock(base_vha, 0);
+ } while (heart_beat_counter1 == heart_beat_counter2);
+
+ return rval;
+}
+
+/* Work: Perform NIC Core Reset handling */
+void
+qla83xx_nic_core_reset_work(struct work_struct *work)
+{
+ struct qla_hw_data *ha =
+ container_of(work, struct qla_hw_data, nic_core_reset);
+ scsi_qla_host_t *base_vha = pci_get_drvdata(ha->pdev);
+ uint32_t dev_state = 0;
+
+ if (IS_QLA2031(ha)) {
+ if (qla2xxx_mctp_dump(base_vha) != QLA_SUCCESS)
+ ql_log(ql_log_warn, base_vha, 0xb081,
+ "Failed to dump mctp\n");
+ return;
+ }
+
+ if (!ha->flags.nic_core_reset_hdlr_active) {
+ if (qla83xx_check_nic_core_fw_alive(base_vha) == QLA_SUCCESS) {
+ qla83xx_idc_lock(base_vha, 0);
+ qla83xx_rd_reg(base_vha, QLA83XX_IDC_DEV_STATE,
+ &dev_state);
+ qla83xx_idc_unlock(base_vha, 0);
+ if (dev_state != QLA8XXX_DEV_NEED_RESET) {
+ ql_dbg(ql_dbg_p3p, base_vha, 0xb07a,
+ "Nic Core f/w is alive.\n");
+ return;
+ }
+ }
+
+ ha->flags.nic_core_reset_hdlr_active = 1;
+ if (qla83xx_nic_core_reset(base_vha)) {
+ /* NIC Core reset failed. */
+ ql_dbg(ql_dbg_p3p, base_vha, 0xb061,
+ "NIC Core reset failed.\n");
+ }
+ ha->flags.nic_core_reset_hdlr_active = 0;
+ }
+}
+
+/* Work: Handle 8200 IDC aens */
+void
+qla83xx_service_idc_aen(struct work_struct *work)
+{
+ struct qla_hw_data *ha =
+ container_of(work, struct qla_hw_data, idc_aen);
+ scsi_qla_host_t *base_vha = pci_get_drvdata(ha->pdev);
+ uint32_t dev_state, idc_control;
+
+ qla83xx_idc_lock(base_vha, 0);
+ qla83xx_rd_reg(base_vha, QLA83XX_IDC_DEV_STATE, &dev_state);
+ qla83xx_rd_reg(base_vha, QLA83XX_IDC_CONTROL, &idc_control);
+ qla83xx_idc_unlock(base_vha, 0);
+ if (dev_state == QLA8XXX_DEV_NEED_RESET) {
+ if (idc_control & QLA83XX_IDC_GRACEFUL_RESET) {
+ ql_dbg(ql_dbg_p3p, base_vha, 0xb062,
+ "Application requested NIC Core Reset.\n");
+ qla83xx_schedule_work(base_vha, QLA83XX_NIC_CORE_RESET);
+ } else if (qla83xx_check_nic_core_fw_alive(base_vha) ==
+ QLA_SUCCESS) {
+ ql_dbg(ql_dbg_p3p, base_vha, 0xb07b,
+ "Other protocol driver requested NIC Core Reset.\n");
+ qla83xx_schedule_work(base_vha, QLA83XX_NIC_CORE_RESET);
+ }
+ } else if (dev_state == QLA8XXX_DEV_FAILED ||
+ dev_state == QLA8XXX_DEV_NEED_QUIESCENT) {
+ qla83xx_schedule_work(base_vha, QLA83XX_IDC_STATE_HANDLER);
+ }
+}
+
+static void
+qla83xx_wait_logic(void)
+{
+ int i;
+
+ /* Yield CPU */
+ if (!in_interrupt()) {
+ /*
+ * Wait about 200ms before retrying again.
+ * This controls the number of retries for single
+ * lock operation.
+ */
+ msleep(100);
+ schedule();
+ } else {
+ for (i = 0; i < 20; i++)
+ cpu_relax(); /* This a nop instr on i386 */
+ }
+}
+
+static int
+qla83xx_force_lock_recovery(scsi_qla_host_t *base_vha)
+{
+ int rval;
+ uint32_t data;
+ uint32_t idc_lck_rcvry_stage_mask = 0x3;
+ uint32_t idc_lck_rcvry_owner_mask = 0x3c;
+ struct qla_hw_data *ha = base_vha->hw;
+ ql_dbg(ql_dbg_p3p, base_vha, 0xb086,
+ "Trying force recovery of the IDC lock.\n");
+
+ rval = qla83xx_rd_reg(base_vha, QLA83XX_IDC_LOCK_RECOVERY, &data);
+ if (rval)
+ return rval;
+
+ if ((data & idc_lck_rcvry_stage_mask) > 0) {
+ return QLA_SUCCESS;
+ } else {
+ data = (IDC_LOCK_RECOVERY_STAGE1) | (ha->portnum << 2);
+ rval = qla83xx_wr_reg(base_vha, QLA83XX_IDC_LOCK_RECOVERY,
+ data);
+ if (rval)
+ return rval;
+
+ msleep(200);
+
+ rval = qla83xx_rd_reg(base_vha, QLA83XX_IDC_LOCK_RECOVERY,
+ &data);
+ if (rval)
+ return rval;
+
+ if (((data & idc_lck_rcvry_owner_mask) >> 2) == ha->portnum) {
+ data &= (IDC_LOCK_RECOVERY_STAGE2 |
+ ~(idc_lck_rcvry_stage_mask));
+ rval = qla83xx_wr_reg(base_vha,
+ QLA83XX_IDC_LOCK_RECOVERY, data);
+ if (rval)
+ return rval;
+
+ /* Forcefully perform IDC UnLock */
+ rval = qla83xx_rd_reg(base_vha, QLA83XX_DRIVER_UNLOCK,
+ &data);
+ if (rval)
+ return rval;
+ /* Clear lock-id by setting 0xff */
+ rval = qla83xx_wr_reg(base_vha, QLA83XX_DRIVER_LOCKID,
+ 0xff);
+ if (rval)
+ return rval;
+ /* Clear lock-recovery by setting 0x0 */
+ rval = qla83xx_wr_reg(base_vha,
+ QLA83XX_IDC_LOCK_RECOVERY, 0x0);
+ if (rval)
+ return rval;
+ } else
+ return QLA_SUCCESS;
+ }
+
+ return rval;
+}
+
+static int
+qla83xx_idc_lock_recovery(scsi_qla_host_t *base_vha)
+{
+ int rval = QLA_SUCCESS;
+ uint32_t o_drv_lockid, n_drv_lockid;
+ unsigned long lock_recovery_timeout;
+
+ lock_recovery_timeout = jiffies + QLA83XX_MAX_LOCK_RECOVERY_WAIT;
+retry_lockid:
+ rval = qla83xx_rd_reg(base_vha, QLA83XX_DRIVER_LOCKID, &o_drv_lockid);
+ if (rval)
+ goto exit;
+
+ /* MAX wait time before forcing IDC Lock recovery = 2 secs */
+ if (time_after_eq(jiffies, lock_recovery_timeout)) {
+ if (qla83xx_force_lock_recovery(base_vha) == QLA_SUCCESS)
+ return QLA_SUCCESS;
+ else
+ return QLA_FUNCTION_FAILED;
+ }
+
+ rval = qla83xx_rd_reg(base_vha, QLA83XX_DRIVER_LOCKID, &n_drv_lockid);
+ if (rval)
+ goto exit;
+
+ if (o_drv_lockid == n_drv_lockid) {
+ qla83xx_wait_logic();
+ goto retry_lockid;
+ } else
+ return QLA_SUCCESS;
+
+exit:
+ return rval;
+}
+
+void
+qla83xx_idc_lock(scsi_qla_host_t *base_vha, uint16_t requester_id)
+{
+ uint16_t options = (requester_id << 15) | BIT_6;
+ uint32_t data;
+ uint32_t lock_owner;
+ struct qla_hw_data *ha = base_vha->hw;
+
+ /* IDC-lock implementation using driver-lock/lock-id remote registers */
+retry_lock:
+ if (qla83xx_rd_reg(base_vha, QLA83XX_DRIVER_LOCK, &data)
+ == QLA_SUCCESS) {
+ if (data) {
+ /* Setting lock-id to our function-number */
+ qla83xx_wr_reg(base_vha, QLA83XX_DRIVER_LOCKID,
+ ha->portnum);
+ } else {
+ qla83xx_rd_reg(base_vha, QLA83XX_DRIVER_LOCKID,
+ &lock_owner);
+ ql_dbg(ql_dbg_p3p, base_vha, 0xb063,
+ "Failed to acquire IDC lock, acquired by %d, "
+ "retrying...\n", lock_owner);
+
+ /* Retry/Perform IDC-Lock recovery */
+ if (qla83xx_idc_lock_recovery(base_vha)
+ == QLA_SUCCESS) {
+ qla83xx_wait_logic();
+ goto retry_lock;
+ } else
+ ql_log(ql_log_warn, base_vha, 0xb075,
+ "IDC Lock recovery FAILED.\n");
+ }
+
+ }
+
+ return;
+
+ /* XXX: IDC-lock implementation using access-control mbx */
+retry_lock2:
+ if (qla83xx_access_control(base_vha, options, 0, 0, NULL)) {
+ ql_dbg(ql_dbg_p3p, base_vha, 0xb072,
+ "Failed to acquire IDC lock. retrying...\n");
+ /* Retry/Perform IDC-Lock recovery */
+ if (qla83xx_idc_lock_recovery(base_vha) == QLA_SUCCESS) {
+ qla83xx_wait_logic();
+ goto retry_lock2;
+ } else
+ ql_log(ql_log_warn, base_vha, 0xb076,
+ "IDC Lock recovery FAILED.\n");
+ }
+
+ return;
+}
+
+void
+qla83xx_idc_unlock(scsi_qla_host_t *base_vha, uint16_t requester_id)
+{
+ uint16_t options = (requester_id << 15) | BIT_7, retry;
+ uint32_t data;
+ struct qla_hw_data *ha = base_vha->hw;
+
+ /* IDC-unlock implementation using driver-unlock/lock-id
+ * remote registers
+ */
+ retry = 0;
+retry_unlock:
+ if (qla83xx_rd_reg(base_vha, QLA83XX_DRIVER_LOCKID, &data)
+ == QLA_SUCCESS) {
+ if (data == ha->portnum) {
+ qla83xx_rd_reg(base_vha, QLA83XX_DRIVER_UNLOCK, &data);
+ /* Clearing lock-id by setting 0xff */
+ qla83xx_wr_reg(base_vha, QLA83XX_DRIVER_LOCKID, 0xff);
+ } else if (retry < 10) {
+ /* SV: XXX: IDC unlock retrying needed here? */
+
+ /* Retry for IDC-unlock */
+ qla83xx_wait_logic();
+ retry++;
+ ql_dbg(ql_dbg_p3p, base_vha, 0xb064,
+ "Failed to release IDC lock, retyring=%d\n", retry);
+ goto retry_unlock;
+ }
+ } else if (retry < 10) {
+ /* Retry for IDC-unlock */
+ qla83xx_wait_logic();
+ retry++;
+ ql_dbg(ql_dbg_p3p, base_vha, 0xb065,
+ "Failed to read drv-lockid, retyring=%d\n", retry);
+ goto retry_unlock;
+ }
+
+ return;
+
+ /* XXX: IDC-unlock implementation using access-control mbx */
+ retry = 0;
+retry_unlock2:
+ if (qla83xx_access_control(base_vha, options, 0, 0, NULL)) {
+ if (retry < 10) {
+ /* Retry for IDC-unlock */
+ qla83xx_wait_logic();
+ retry++;
+ ql_dbg(ql_dbg_p3p, base_vha, 0xb066,
+ "Failed to release IDC lock, retyring=%d\n", retry);
+ goto retry_unlock2;
+ }
+ }
+
+ return;
+}
+
+int
+__qla83xx_set_drv_presence(scsi_qla_host_t *vha)
+{
+ int rval = QLA_SUCCESS;
+ struct qla_hw_data *ha = vha->hw;
+ uint32_t drv_presence;
+
+ rval = qla83xx_rd_reg(vha, QLA83XX_IDC_DRV_PRESENCE, &drv_presence);
+ if (rval == QLA_SUCCESS) {
+ drv_presence |= (1 << ha->portnum);
+ rval = qla83xx_wr_reg(vha, QLA83XX_IDC_DRV_PRESENCE,
+ drv_presence);
+ }
+
+ return rval;
+}
+
+int
+qla83xx_set_drv_presence(scsi_qla_host_t *vha)
+{
+ int rval = QLA_SUCCESS;
+
+ qla83xx_idc_lock(vha, 0);
+ rval = __qla83xx_set_drv_presence(vha);
+ qla83xx_idc_unlock(vha, 0);
+
+ return rval;
+}
+
+int
+__qla83xx_clear_drv_presence(scsi_qla_host_t *vha)
+{
+ int rval = QLA_SUCCESS;
+ struct qla_hw_data *ha = vha->hw;
+ uint32_t drv_presence;
+
+ rval = qla83xx_rd_reg(vha, QLA83XX_IDC_DRV_PRESENCE, &drv_presence);
+ if (rval == QLA_SUCCESS) {
+ drv_presence &= ~(1 << ha->portnum);
+ rval = qla83xx_wr_reg(vha, QLA83XX_IDC_DRV_PRESENCE,
+ drv_presence);
+ }
+
+ return rval;
+}
+
+int
+qla83xx_clear_drv_presence(scsi_qla_host_t *vha)
+{
+ int rval = QLA_SUCCESS;
+
+ qla83xx_idc_lock(vha, 0);
+ rval = __qla83xx_clear_drv_presence(vha);
+ qla83xx_idc_unlock(vha, 0);
+
+ return rval;
+}
+
+static void
+qla83xx_need_reset_handler(scsi_qla_host_t *vha)
+{
+ struct qla_hw_data *ha = vha->hw;
+ uint32_t drv_ack, drv_presence;
+ unsigned long ack_timeout;
+
+ /* Wait for IDC ACK from all functions (DRV-ACK == DRV-PRESENCE) */
+ ack_timeout = jiffies + (ha->fcoe_reset_timeout * HZ);
+ while (1) {
+ qla83xx_rd_reg(vha, QLA83XX_IDC_DRIVER_ACK, &drv_ack);
+ qla83xx_rd_reg(vha, QLA83XX_IDC_DRV_PRESENCE, &drv_presence);
+ if ((drv_ack & drv_presence) == drv_presence)
+ break;
+
+ if (time_after_eq(jiffies, ack_timeout)) {
+ ql_log(ql_log_warn, vha, 0xb067,
+ "RESET ACK TIMEOUT! drv_presence=0x%x "
+ "drv_ack=0x%x\n", drv_presence, drv_ack);
+ /*
+ * The function(s) which did not ack in time are forced
+ * to withdraw any further participation in the IDC
+ * reset.
+ */
+ if (drv_ack != drv_presence)
+ qla83xx_wr_reg(vha, QLA83XX_IDC_DRV_PRESENCE,
+ drv_ack);
+ break;
+ }
+
+ qla83xx_idc_unlock(vha, 0);
+ msleep(1000);
+ qla83xx_idc_lock(vha, 0);
+ }
+
+ qla83xx_wr_reg(vha, QLA83XX_IDC_DEV_STATE, QLA8XXX_DEV_COLD);
+ ql_log(ql_log_info, vha, 0xb068, "HW State: COLD/RE-INIT.\n");
+}
+
+static int
+qla83xx_device_bootstrap(scsi_qla_host_t *vha)
+{
+ int rval = QLA_SUCCESS;
+ uint32_t idc_control;
+
+ qla83xx_wr_reg(vha, QLA83XX_IDC_DEV_STATE, QLA8XXX_DEV_INITIALIZING);
+ ql_log(ql_log_info, vha, 0xb069, "HW State: INITIALIZING.\n");
+
+ /* Clearing IDC-Control Graceful-Reset Bit before resetting f/w */
+ __qla83xx_get_idc_control(vha, &idc_control);
+ idc_control &= ~QLA83XX_IDC_GRACEFUL_RESET;
+ __qla83xx_set_idc_control(vha, 0);
+
+ qla83xx_idc_unlock(vha, 0);
+ rval = qla83xx_restart_nic_firmware(vha);
+ qla83xx_idc_lock(vha, 0);
+
+ if (rval != QLA_SUCCESS) {
+ ql_log(ql_log_fatal, vha, 0xb06a,
+ "Failed to restart NIC f/w.\n");
+ qla83xx_wr_reg(vha, QLA83XX_IDC_DEV_STATE, QLA8XXX_DEV_FAILED);
+ ql_log(ql_log_info, vha, 0xb06b, "HW State: FAILED.\n");
+ } else {
+ ql_dbg(ql_dbg_p3p, vha, 0xb06c,
+ "Success in restarting nic f/w.\n");
+ qla83xx_wr_reg(vha, QLA83XX_IDC_DEV_STATE, QLA8XXX_DEV_READY);
+ ql_log(ql_log_info, vha, 0xb06d, "HW State: READY.\n");
+ }
+
+ return rval;
+}
+
+/* Assumes idc_lock always held on entry */
+int
+qla83xx_idc_state_handler(scsi_qla_host_t *base_vha)
+{
+ struct qla_hw_data *ha = base_vha->hw;
+ int rval = QLA_SUCCESS;
+ unsigned long dev_init_timeout;
+ uint32_t dev_state;
+
+ /* Wait for MAX-INIT-TIMEOUT for the device to go ready */
+ dev_init_timeout = jiffies + (ha->fcoe_dev_init_timeout * HZ);
+
+ while (1) {
+
+ if (time_after_eq(jiffies, dev_init_timeout)) {
+ ql_log(ql_log_warn, base_vha, 0xb06e,
+ "Initialization TIMEOUT!\n");
+ /* Init timeout. Disable further NIC Core
+ * communication.
+ */
+ qla83xx_wr_reg(base_vha, QLA83XX_IDC_DEV_STATE,
+ QLA8XXX_DEV_FAILED);
+ ql_log(ql_log_info, base_vha, 0xb06f,
+ "HW State: FAILED.\n");
+ }
+
+ qla83xx_rd_reg(base_vha, QLA83XX_IDC_DEV_STATE, &dev_state);
+ switch (dev_state) {
+ case QLA8XXX_DEV_READY:
+ if (ha->flags.nic_core_reset_owner)
+ qla83xx_idc_audit(base_vha,
+ IDC_AUDIT_COMPLETION);
+ ha->flags.nic_core_reset_owner = 0;
+ ql_dbg(ql_dbg_p3p, base_vha, 0xb070,
+ "Reset_owner reset by 0x%x.\n",
+ ha->portnum);
+ goto exit;
+ case QLA8XXX_DEV_COLD:
+ if (ha->flags.nic_core_reset_owner)
+ rval = qla83xx_device_bootstrap(base_vha);
+ else {
+ /* Wait for AEN to change device-state */
+ qla83xx_idc_unlock(base_vha, 0);
+ msleep(1000);
+ qla83xx_idc_lock(base_vha, 0);
+ }
+ break;
+ case QLA8XXX_DEV_INITIALIZING:
+ /* Wait for AEN to change device-state */
+ qla83xx_idc_unlock(base_vha, 0);
+ msleep(1000);
+ qla83xx_idc_lock(base_vha, 0);
+ break;
+ case QLA8XXX_DEV_NEED_RESET:
+ if (!ql2xdontresethba && ha->flags.nic_core_reset_owner)
+ qla83xx_need_reset_handler(base_vha);
+ else {
+ /* Wait for AEN to change device-state */
+ qla83xx_idc_unlock(base_vha, 0);
+ msleep(1000);
+ qla83xx_idc_lock(base_vha, 0);
+ }
+ /* reset timeout value after need reset handler */
+ dev_init_timeout = jiffies +
+ (ha->fcoe_dev_init_timeout * HZ);
+ break;
+ case QLA8XXX_DEV_NEED_QUIESCENT:
+ /* XXX: DEBUG for now */
+ qla83xx_idc_unlock(base_vha, 0);
+ msleep(1000);
+ qla83xx_idc_lock(base_vha, 0);
+ break;
+ case QLA8XXX_DEV_QUIESCENT:
+ /* XXX: DEBUG for now */
+ if (ha->flags.quiesce_owner)
+ goto exit;
+
+ qla83xx_idc_unlock(base_vha, 0);
+ msleep(1000);
+ qla83xx_idc_lock(base_vha, 0);
+ dev_init_timeout = jiffies +
+ (ha->fcoe_dev_init_timeout * HZ);
+ break;
+ case QLA8XXX_DEV_FAILED:
+ if (ha->flags.nic_core_reset_owner)
+ qla83xx_idc_audit(base_vha,
+ IDC_AUDIT_COMPLETION);
+ ha->flags.nic_core_reset_owner = 0;
+ __qla83xx_clear_drv_presence(base_vha);
+ qla83xx_idc_unlock(base_vha, 0);
+ qla8xxx_dev_failed_handler(base_vha);
+ rval = QLA_FUNCTION_FAILED;
+ qla83xx_idc_lock(base_vha, 0);
+ goto exit;
+ case QLA8XXX_BAD_VALUE:
+ qla83xx_idc_unlock(base_vha, 0);
+ msleep(1000);
+ qla83xx_idc_lock(base_vha, 0);
+ break;
+ default:
+ ql_log(ql_log_warn, base_vha, 0xb071,
+ "Unknown Device State: %x.\n", dev_state);
+ qla83xx_idc_unlock(base_vha, 0);
+ qla8xxx_dev_failed_handler(base_vha);
+ rval = QLA_FUNCTION_FAILED;
+ qla83xx_idc_lock(base_vha, 0);
+ goto exit;
+ }
+ }
+
+exit:
+ return rval;
+}
+
+void
+qla2x00_disable_board_on_pci_error(struct work_struct *work)
+{
+ struct qla_hw_data *ha = container_of(work, struct qla_hw_data,
+ board_disable);
+ struct pci_dev *pdev = ha->pdev;
+ scsi_qla_host_t *base_vha = pci_get_drvdata(ha->pdev);
+
+ ql_log(ql_log_warn, base_vha, 0x015b,
+ "Disabling adapter.\n");
+
+ set_bit(UNLOADING, &base_vha->dpc_flags);
+
+ qla2x00_delete_all_vps(ha, base_vha);
+
+ qla2x00_abort_all_cmds(base_vha, DID_NO_CONNECT << 16);
+
+ qla2x00_dfs_remove(base_vha);
+
+ qla84xx_put_chip(base_vha);
+
+ if (base_vha->timer_active)
+ qla2x00_stop_timer(base_vha);
+
+ base_vha->flags.online = 0;
+
+ qla2x00_destroy_deferred_work(ha);
+
+ /*
+ * Do not try to stop beacon blink as it will issue a mailbox
+ * command.
+ */
+ qla2x00_free_sysfs_attr(base_vha, false);
+
+ fc_remove_host(base_vha->host);
+
+ scsi_remove_host(base_vha->host);
+
+ base_vha->flags.init_done = 0;
+ qla25xx_delete_queues(base_vha);
+ qla2x00_free_irqs(base_vha);
+ qla2x00_free_fcports(base_vha);
+ qla2x00_mem_free(ha);
+ qla82xx_md_free(base_vha);
+ qla2x00_free_queues(ha);
+
+ qla2x00_unmap_iobases(ha);
+
+ pci_release_selected_regions(ha->pdev, ha->bars);
+ pci_disable_pcie_error_reporting(pdev);
+ pci_disable_device(pdev);
+
+ /*
+ * Let qla2x00_remove_one cleanup qla_hw_data on device removal.
+ */
+}
+
+/**************************************************************************
+* qla2x00_do_dpc
+* This kernel thread is a task that is schedule by the interrupt handler
+* to perform the background processing for interrupts.
+*
+* Notes:
+* This task always run in the context of a kernel thread. It
+* is kick-off by the driver's detect code and starts up
+* up one per adapter. It immediately goes to sleep and waits for
+* some fibre event. When either the interrupt handler or
+* the timer routine detects a event it will one of the task
+* bits then wake us up.
+**************************************************************************/
+static int
+qla2x00_do_dpc(void *data)
+{
+ int rval;
+ scsi_qla_host_t *base_vha;
+ struct qla_hw_data *ha;
+
+ ha = (struct qla_hw_data *)data;
+ base_vha = pci_get_drvdata(ha->pdev);
+
+ set_user_nice(current, MIN_NICE);
+
+ set_current_state(TASK_INTERRUPTIBLE);
+ while (!kthread_should_stop()) {
+ ql_dbg(ql_dbg_dpc, base_vha, 0x4000,
+ "DPC handler sleeping.\n");
+
+ schedule();
+
+ if (!base_vha->flags.init_done || ha->flags.mbox_busy)
+ goto end_loop;
+
+ if (ha->flags.eeh_busy) {
+ ql_dbg(ql_dbg_dpc, base_vha, 0x4003,
+ "eeh_busy=%d.\n", ha->flags.eeh_busy);
+ goto end_loop;
+ }
+
+ ha->dpc_active = 1;
+
+ ql_dbg(ql_dbg_dpc + ql_dbg_verbose, base_vha, 0x4001,
+ "DPC handler waking up, dpc_flags=0x%lx.\n",
+ base_vha->dpc_flags);
+
+ qla2x00_do_work(base_vha);
+
+ if (IS_P3P_TYPE(ha)) {
+ if (IS_QLA8044(ha)) {
+ if (test_and_clear_bit(ISP_UNRECOVERABLE,
+ &base_vha->dpc_flags)) {
+ qla8044_idc_lock(ha);
+ qla8044_wr_direct(base_vha,
+ QLA8044_CRB_DEV_STATE_INDEX,
+ QLA8XXX_DEV_FAILED);
+ qla8044_idc_unlock(ha);
+ ql_log(ql_log_info, base_vha, 0x4004,
+ "HW State: FAILED.\n");
+ qla8044_device_state_handler(base_vha);
+ continue;
+ }
+
+ } else {
+ if (test_and_clear_bit(ISP_UNRECOVERABLE,
+ &base_vha->dpc_flags)) {
+ qla82xx_idc_lock(ha);
+ qla82xx_wr_32(ha, QLA82XX_CRB_DEV_STATE,
+ QLA8XXX_DEV_FAILED);
+ qla82xx_idc_unlock(ha);
+ ql_log(ql_log_info, base_vha, 0x0151,
+ "HW State: FAILED.\n");
+ qla82xx_device_state_handler(base_vha);
+ continue;
+ }
+ }
+
+ if (test_and_clear_bit(FCOE_CTX_RESET_NEEDED,
+ &base_vha->dpc_flags)) {
+
+ ql_dbg(ql_dbg_dpc, base_vha, 0x4005,
+ "FCoE context reset scheduled.\n");
+ if (!(test_and_set_bit(ABORT_ISP_ACTIVE,
+ &base_vha->dpc_flags))) {
+ if (qla82xx_fcoe_ctx_reset(base_vha)) {
+ /* FCoE-ctx reset failed.
+ * Escalate to chip-reset
+ */
+ set_bit(ISP_ABORT_NEEDED,
+ &base_vha->dpc_flags);
+ }
+ clear_bit(ABORT_ISP_ACTIVE,
+ &base_vha->dpc_flags);
+ }
+
+ ql_dbg(ql_dbg_dpc, base_vha, 0x4006,
+ "FCoE context reset end.\n");
+ }
+ } else if (IS_QLAFX00(ha)) {
+ if (test_and_clear_bit(ISP_UNRECOVERABLE,
+ &base_vha->dpc_flags)) {
+ ql_dbg(ql_dbg_dpc, base_vha, 0x4020,
+ "Firmware Reset Recovery\n");
+ if (qlafx00_reset_initialize(base_vha)) {
+ /* Failed. Abort isp later. */
+ if (!test_bit(UNLOADING,
+ &base_vha->dpc_flags)) {
+ set_bit(ISP_UNRECOVERABLE,
+ &base_vha->dpc_flags);
+ ql_dbg(ql_dbg_dpc, base_vha,
+ 0x4021,
+ "Reset Recovery Failed\n");
+ }
+ }
+ }
+
+ if (test_and_clear_bit(FX00_TARGET_SCAN,
+ &base_vha->dpc_flags)) {
+ ql_dbg(ql_dbg_dpc, base_vha, 0x4022,
+ "ISPFx00 Target Scan scheduled\n");
+ if (qlafx00_rescan_isp(base_vha)) {
+ if (!test_bit(UNLOADING,
+ &base_vha->dpc_flags))
+ set_bit(ISP_UNRECOVERABLE,
+ &base_vha->dpc_flags);
+ ql_dbg(ql_dbg_dpc, base_vha, 0x401e,
+ "ISPFx00 Target Scan Failed\n");
+ }
+ ql_dbg(ql_dbg_dpc, base_vha, 0x401f,
+ "ISPFx00 Target Scan End\n");
+ }
+ if (test_and_clear_bit(FX00_HOST_INFO_RESEND,
+ &base_vha->dpc_flags)) {
+ ql_dbg(ql_dbg_dpc, base_vha, 0x4023,
+ "ISPFx00 Host Info resend scheduled\n");
+ qlafx00_fx_disc(base_vha,
+ &base_vha->hw->mr.fcport,
+ FXDISC_REG_HOST_INFO);
+ }
+ }
+
+ if (test_and_clear_bit(ISP_ABORT_NEEDED,
+ &base_vha->dpc_flags)) {
+
+ ql_dbg(ql_dbg_dpc, base_vha, 0x4007,
+ "ISP abort scheduled.\n");
+ if (!(test_and_set_bit(ABORT_ISP_ACTIVE,
+ &base_vha->dpc_flags))) {
+
+ if (ha->isp_ops->abort_isp(base_vha)) {
+ /* failed. retry later */
+ set_bit(ISP_ABORT_NEEDED,
+ &base_vha->dpc_flags);
+ }
+ clear_bit(ABORT_ISP_ACTIVE,
+ &base_vha->dpc_flags);
+ }
+
+ ql_dbg(ql_dbg_dpc, base_vha, 0x4008,
+ "ISP abort end.\n");
+ }
+
+ if (test_and_clear_bit(FCPORT_UPDATE_NEEDED,
+ &base_vha->dpc_flags)) {
+ qla2x00_update_fcports(base_vha);
+ }
+
+ if (test_bit(SCR_PENDING, &base_vha->dpc_flags)) {
+ int ret;
+ ret = qla2x00_send_change_request(base_vha, 0x3, 0);
+ if (ret != QLA_SUCCESS)
+ ql_log(ql_log_warn, base_vha, 0x121,
+ "Failed to enable receiving of RSCN "
+ "requests: 0x%x.\n", ret);
+ clear_bit(SCR_PENDING, &base_vha->dpc_flags);
+ }
+
+ if (IS_QLAFX00(ha))
+ goto loop_resync_check;
+
+ if (test_bit(ISP_QUIESCE_NEEDED, &base_vha->dpc_flags)) {
+ ql_dbg(ql_dbg_dpc, base_vha, 0x4009,
+ "Quiescence mode scheduled.\n");
+ if (IS_P3P_TYPE(ha)) {
+ if (IS_QLA82XX(ha))
+ qla82xx_device_state_handler(base_vha);
+ if (IS_QLA8044(ha))
+ qla8044_device_state_handler(base_vha);
+ clear_bit(ISP_QUIESCE_NEEDED,
+ &base_vha->dpc_flags);
+ if (!ha->flags.quiesce_owner) {
+ qla2x00_perform_loop_resync(base_vha);
+ if (IS_QLA82XX(ha)) {
+ qla82xx_idc_lock(ha);
+ qla82xx_clear_qsnt_ready(
+ base_vha);
+ qla82xx_idc_unlock(ha);
+ } else if (IS_QLA8044(ha)) {
+ qla8044_idc_lock(ha);
+ qla8044_clear_qsnt_ready(
+ base_vha);
+ qla8044_idc_unlock(ha);
+ }
+ }
+ } else {
+ clear_bit(ISP_QUIESCE_NEEDED,
+ &base_vha->dpc_flags);
+ qla2x00_quiesce_io(base_vha);
+ }
+ ql_dbg(ql_dbg_dpc, base_vha, 0x400a,
+ "Quiescence mode end.\n");
+ }
+
+ if (test_and_clear_bit(RESET_MARKER_NEEDED,
+ &base_vha->dpc_flags) &&
+ (!(test_and_set_bit(RESET_ACTIVE, &base_vha->dpc_flags)))) {
+
+ ql_dbg(ql_dbg_dpc, base_vha, 0x400b,
+ "Reset marker scheduled.\n");
+ qla2x00_rst_aen(base_vha);
+ clear_bit(RESET_ACTIVE, &base_vha->dpc_flags);
+ ql_dbg(ql_dbg_dpc, base_vha, 0x400c,
+ "Reset marker end.\n");
+ }
+
+ /* Retry each device up to login retry count */
+ if ((test_and_clear_bit(RELOGIN_NEEDED,
+ &base_vha->dpc_flags)) &&
+ !test_bit(LOOP_RESYNC_NEEDED, &base_vha->dpc_flags) &&
+ atomic_read(&base_vha->loop_state) != LOOP_DOWN) {
+
+ ql_dbg(ql_dbg_dpc, base_vha, 0x400d,
+ "Relogin scheduled.\n");
+ qla2x00_relogin(base_vha);
+ ql_dbg(ql_dbg_dpc, base_vha, 0x400e,
+ "Relogin end.\n");
+ }
+loop_resync_check:
+ if (test_and_clear_bit(LOOP_RESYNC_NEEDED,
+ &base_vha->dpc_flags)) {
+
+ ql_dbg(ql_dbg_dpc, base_vha, 0x400f,
+ "Loop resync scheduled.\n");
+
+ if (!(test_and_set_bit(LOOP_RESYNC_ACTIVE,
+ &base_vha->dpc_flags))) {
+
+ rval = qla2x00_loop_resync(base_vha);
+
+ clear_bit(LOOP_RESYNC_ACTIVE,
+ &base_vha->dpc_flags);
+ }
+
+ ql_dbg(ql_dbg_dpc, base_vha, 0x4010,
+ "Loop resync end.\n");
+ }
+
+ if (IS_QLAFX00(ha))
+ goto intr_on_check;
+
+ if (test_bit(NPIV_CONFIG_NEEDED, &base_vha->dpc_flags) &&
+ atomic_read(&base_vha->loop_state) == LOOP_READY) {
+ clear_bit(NPIV_CONFIG_NEEDED, &base_vha->dpc_flags);
+ qla2xxx_flash_npiv_conf(base_vha);
+ }
+
+intr_on_check:
+ if (!ha->interrupts_on)
+ ha->isp_ops->enable_intrs(ha);
+
+ if (test_and_clear_bit(BEACON_BLINK_NEEDED,
+ &base_vha->dpc_flags)) {
+ if (ha->beacon_blink_led == 1)
+ ha->isp_ops->beacon_blink(base_vha);
+ }
+
+ if (!IS_QLAFX00(ha))
+ qla2x00_do_dpc_all_vps(base_vha);
+
+ ha->dpc_active = 0;
+end_loop:
+ set_current_state(TASK_INTERRUPTIBLE);
+ } /* End of while(1) */
+ __set_current_state(TASK_RUNNING);
+
+ ql_dbg(ql_dbg_dpc, base_vha, 0x4011,
+ "DPC handler exiting.\n");
+
+ /*
+ * Make sure that nobody tries to wake us up again.
+ */
+ ha->dpc_active = 0;
+
+ /* Cleanup any residual CTX SRBs. */
+ qla2x00_abort_all_cmds(base_vha, DID_NO_CONNECT << 16);
+
+ return 0;
+}
+
+void
+qla2xxx_wake_dpc(struct scsi_qla_host *vha)
+{
+ struct qla_hw_data *ha = vha->hw;
+ struct task_struct *t = ha->dpc_thread;
+
+ if (!test_bit(UNLOADING, &vha->dpc_flags) && t)
+ wake_up_process(t);
+}
+
+/*
+* qla2x00_rst_aen
+* Processes asynchronous reset.
+*
+* Input:
+* ha = adapter block pointer.
+*/
+static void
+qla2x00_rst_aen(scsi_qla_host_t *vha)
+{
+ if (vha->flags.online && !vha->flags.reset_active &&
+ !atomic_read(&vha->loop_down_timer) &&
+ !(test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags))) {
+ do {
+ clear_bit(RESET_MARKER_NEEDED, &vha->dpc_flags);
+
+ /*
+ * Issue marker command only when we are going to start
+ * the I/O.
+ */
+ vha->marker_needed = 1;
+ } while (!atomic_read(&vha->loop_down_timer) &&
+ (test_bit(RESET_MARKER_NEEDED, &vha->dpc_flags)));
+ }
+}
+
+/**************************************************************************
+* qla2x00_timer
+*
+* Description:
+* One second timer
+*
+* Context: Interrupt
+***************************************************************************/
+void
+qla2x00_timer(scsi_qla_host_t *vha)
+{
+ unsigned long cpu_flags = 0;
+ int start_dpc = 0;
+ int index;
+ srb_t *sp;
+ uint16_t w;
+ struct qla_hw_data *ha = vha->hw;
+ struct req_que *req;
+
+ if (ha->flags.eeh_busy) {
+ ql_dbg(ql_dbg_timer, vha, 0x6000,
+ "EEH = %d, restarting timer.\n",
+ ha->flags.eeh_busy);
+ qla2x00_restart_timer(vha, WATCH_INTERVAL);
+ return;
+ }
+
+ /*
+ * Hardware read to raise pending EEH errors during mailbox waits. If
+ * the read returns -1 then disable the board.
+ */
+ if (!pci_channel_offline(ha->pdev)) {
+ pci_read_config_word(ha->pdev, PCI_VENDOR_ID, &w);
+ qla2x00_check_reg16_for_disconnect(vha, w);
+ }
+
+ /* Make sure qla82xx_watchdog is run only for physical port */
+ if (!vha->vp_idx && IS_P3P_TYPE(ha)) {
+ if (test_bit(ISP_QUIESCE_NEEDED, &vha->dpc_flags))
+ start_dpc++;
+ if (IS_QLA82XX(ha))
+ qla82xx_watchdog(vha);
+ else if (IS_QLA8044(ha))
+ qla8044_watchdog(vha);
+ }
+
+ if (!vha->vp_idx && IS_QLAFX00(ha))
+ qlafx00_timer_routine(vha);
+
+ /* Loop down handler. */
+ if (atomic_read(&vha->loop_down_timer) > 0 &&
+ !(test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags)) &&
+ !(test_bit(FCOE_CTX_RESET_NEEDED, &vha->dpc_flags))
+ && vha->flags.online) {
+
+ if (atomic_read(&vha->loop_down_timer) ==
+ vha->loop_down_abort_time) {
+
+ ql_log(ql_log_info, vha, 0x6008,
+ "Loop down - aborting the queues before time expires.\n");
+
+ if (!IS_QLA2100(ha) && vha->link_down_timeout)
+ atomic_set(&vha->loop_state, LOOP_DEAD);
+
+ /*
+ * Schedule an ISP abort to return any FCP2-device
+ * commands.
+ */
+ /* NPIV - scan physical port only */
+ if (!vha->vp_idx) {
+ spin_lock_irqsave(&ha->hardware_lock,
+ cpu_flags);
+ req = ha->req_q_map[0];
+ for (index = 1;
+ index < req->num_outstanding_cmds;
+ index++) {
+ fc_port_t *sfcp;
+
+ sp = req->outstanding_cmds[index];
+ if (!sp)
+ continue;
+ if (sp->type != SRB_SCSI_CMD)
+ continue;
+ sfcp = sp->fcport;
+ if (!(sfcp->flags & FCF_FCP2_DEVICE))
+ continue;
+
+ if (IS_QLA82XX(ha))
+ set_bit(FCOE_CTX_RESET_NEEDED,
+ &vha->dpc_flags);
+ else
+ set_bit(ISP_ABORT_NEEDED,
+ &vha->dpc_flags);
+ break;
+ }
+ spin_unlock_irqrestore(&ha->hardware_lock,
+ cpu_flags);
+ }
+ start_dpc++;
+ }
+
+ /* if the loop has been down for 4 minutes, reinit adapter */
+ if (atomic_dec_and_test(&vha->loop_down_timer) != 0) {
+ if (!(vha->device_flags & DFLG_NO_CABLE)) {
+ ql_log(ql_log_warn, vha, 0x6009,
+ "Loop down - aborting ISP.\n");
+
+ if (IS_QLA82XX(ha))
+ set_bit(FCOE_CTX_RESET_NEEDED,
+ &vha->dpc_flags);
+ else
+ set_bit(ISP_ABORT_NEEDED,
+ &vha->dpc_flags);
+ }
+ }
+ ql_dbg(ql_dbg_timer, vha, 0x600a,
+ "Loop down - seconds remaining %d.\n",
+ atomic_read(&vha->loop_down_timer));
+ }
+ /* Check if beacon LED needs to be blinked for physical host only */
+ if (!vha->vp_idx && (ha->beacon_blink_led == 1)) {
+ /* There is no beacon_blink function for ISP82xx */
+ if (!IS_P3P_TYPE(ha)) {
+ set_bit(BEACON_BLINK_NEEDED, &vha->dpc_flags);
+ start_dpc++;
+ }
+ }
+
+ /* Process any deferred work. */
+ if (!list_empty(&vha->work_list))
+ start_dpc++;
+
+ /* Schedule the DPC routine if needed */
+ if ((test_bit(ISP_ABORT_NEEDED, &vha->dpc_flags) ||
+ test_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags) ||
+ test_bit(FCPORT_UPDATE_NEEDED, &vha->dpc_flags) ||
+ start_dpc ||
+ test_bit(RESET_MARKER_NEEDED, &vha->dpc_flags) ||
+ test_bit(BEACON_BLINK_NEEDED, &vha->dpc_flags) ||
+ test_bit(ISP_UNRECOVERABLE, &vha->dpc_flags) ||
+ test_bit(FCOE_CTX_RESET_NEEDED, &vha->dpc_flags) ||
+ test_bit(VP_DPC_NEEDED, &vha->dpc_flags) ||
+ test_bit(RELOGIN_NEEDED, &vha->dpc_flags))) {
+ ql_dbg(ql_dbg_timer, vha, 0x600b,
+ "isp_abort_needed=%d loop_resync_needed=%d "
+ "fcport_update_needed=%d start_dpc=%d "
+ "reset_marker_needed=%d",
+ test_bit(ISP_ABORT_NEEDED, &vha->dpc_flags),
+ test_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags),
+ test_bit(FCPORT_UPDATE_NEEDED, &vha->dpc_flags),
+ start_dpc,
+ test_bit(RESET_MARKER_NEEDED, &vha->dpc_flags));
+ ql_dbg(ql_dbg_timer, vha, 0x600c,
+ "beacon_blink_needed=%d isp_unrecoverable=%d "
+ "fcoe_ctx_reset_needed=%d vp_dpc_needed=%d "
+ "relogin_needed=%d.\n",
+ test_bit(BEACON_BLINK_NEEDED, &vha->dpc_flags),
+ test_bit(ISP_UNRECOVERABLE, &vha->dpc_flags),
+ test_bit(FCOE_CTX_RESET_NEEDED, &vha->dpc_flags),
+ test_bit(VP_DPC_NEEDED, &vha->dpc_flags),
+ test_bit(RELOGIN_NEEDED, &vha->dpc_flags));
+ qla2xxx_wake_dpc(vha);
+ }
+
+ qla2x00_restart_timer(vha, WATCH_INTERVAL);
+}
+
+/* Firmware interface routines. */
+
+#define FW_BLOBS 11
+#define FW_ISP21XX 0
+#define FW_ISP22XX 1
+#define FW_ISP2300 2
+#define FW_ISP2322 3
+#define FW_ISP24XX 4
+#define FW_ISP25XX 5
+#define FW_ISP81XX 6
+#define FW_ISP82XX 7
+#define FW_ISP2031 8
+#define FW_ISP8031 9
+#define FW_ISP27XX 10
+
+#define FW_FILE_ISP21XX "/*(DEBLOBBED)*/"
+#define FW_FILE_ISP22XX "/*(DEBLOBBED)*/"
+#define FW_FILE_ISP2300 "/*(DEBLOBBED)*/"
+#define FW_FILE_ISP2322 "/*(DEBLOBBED)*/"
+#define FW_FILE_ISP24XX "/*(DEBLOBBED)*/"
+#define FW_FILE_ISP25XX "/*(DEBLOBBED)*/"
+#define FW_FILE_ISP81XX "/*(DEBLOBBED)*/"
+#define FW_FILE_ISP82XX "/*(DEBLOBBED)*/"
+#define FW_FILE_ISP2031 "/*(DEBLOBBED)*/"
+#define FW_FILE_ISP8031 "/*(DEBLOBBED)*/"
+#define FW_FILE_ISP27XX "/*(DEBLOBBED)*/"
+
+
+static DEFINE_MUTEX(qla_fw_lock);
+
+static struct fw_blob qla_fw_blobs[FW_BLOBS] = {
+ { .name = FW_FILE_ISP21XX, .segs = { 0x1000, 0 }, },
+ { .name = FW_FILE_ISP22XX, .segs = { 0x1000, 0 }, },
+ { .name = FW_FILE_ISP2300, .segs = { 0x800, 0 }, },
+ { .name = FW_FILE_ISP2322, .segs = { 0x800, 0x1c000, 0x1e000, 0 }, },
+ { .name = FW_FILE_ISP24XX, },
+ { .name = FW_FILE_ISP25XX, },
+ { .name = FW_FILE_ISP81XX, },
+ { .name = FW_FILE_ISP82XX, },
+ { .name = FW_FILE_ISP2031, },
+ { .name = FW_FILE_ISP8031, },
+ { .name = FW_FILE_ISP27XX, },
+};
+
+struct fw_blob *
+qla2x00_request_firmware(scsi_qla_host_t *vha)
+{
+ struct qla_hw_data *ha = vha->hw;
+ struct fw_blob *blob;
+
+ if (IS_QLA2100(ha)) {
+ blob = &qla_fw_blobs[FW_ISP21XX];
+ } else if (IS_QLA2200(ha)) {
+ blob = &qla_fw_blobs[FW_ISP22XX];
+ } else if (IS_QLA2300(ha) || IS_QLA2312(ha) || IS_QLA6312(ha)) {
+ blob = &qla_fw_blobs[FW_ISP2300];
+ } else if (IS_QLA2322(ha) || IS_QLA6322(ha)) {
+ blob = &qla_fw_blobs[FW_ISP2322];
+ } else if (IS_QLA24XX_TYPE(ha)) {
+ blob = &qla_fw_blobs[FW_ISP24XX];
+ } else if (IS_QLA25XX(ha)) {
+ blob = &qla_fw_blobs[FW_ISP25XX];
+ } else if (IS_QLA81XX(ha)) {
+ blob = &qla_fw_blobs[FW_ISP81XX];
+ } else if (IS_QLA82XX(ha)) {
+ blob = &qla_fw_blobs[FW_ISP82XX];
+ } else if (IS_QLA2031(ha)) {
+ blob = &qla_fw_blobs[FW_ISP2031];
+ } else if (IS_QLA8031(ha)) {
+ blob = &qla_fw_blobs[FW_ISP8031];
+ } else if (IS_QLA27XX(ha)) {
+ blob = &qla_fw_blobs[FW_ISP27XX];
+ } else {
+ return NULL;
+ }
+
+ mutex_lock(&qla_fw_lock);
+ if (blob->fw)
+ goto out;
+
+ if (reject_firmware(&blob->fw, blob->name, &ha->pdev->dev)) {
+ ql_log(ql_log_warn, vha, 0x0063,
+ "Failed to load firmware image (%s).\n", blob->name);
+ blob->fw = NULL;
+ blob = NULL;
+ goto out;
+ }
+
+out:
+ mutex_unlock(&qla_fw_lock);
+ return blob;
+}
+
+static void
+qla2x00_release_firmware(void)
+{
+ int idx;
+
+ mutex_lock(&qla_fw_lock);
+ for (idx = 0; idx < FW_BLOBS; idx++)
+ release_firmware(qla_fw_blobs[idx].fw);
+ mutex_unlock(&qla_fw_lock);
+}
+
+static pci_ers_result_t
+qla2xxx_pci_error_detected(struct pci_dev *pdev, pci_channel_state_t state)
+{
+ scsi_qla_host_t *vha = pci_get_drvdata(pdev);
+ struct qla_hw_data *ha = vha->hw;
+
+ ql_dbg(ql_dbg_aer, vha, 0x9000,
+ "PCI error detected, state %x.\n", state);
+
+ switch (state) {
+ case pci_channel_io_normal:
+ ha->flags.eeh_busy = 0;
+ return PCI_ERS_RESULT_CAN_RECOVER;
+ case pci_channel_io_frozen:
+ ha->flags.eeh_busy = 1;
+ /* For ISP82XX complete any pending mailbox cmd */
+ if (IS_QLA82XX(ha)) {
+ ha->flags.isp82xx_fw_hung = 1;
+ ql_dbg(ql_dbg_aer, vha, 0x9001, "Pci channel io frozen\n");
+ qla82xx_clear_pending_mbx(vha);
+ }
+ qla2x00_free_irqs(vha);
+ pci_disable_device(pdev);
+ /* Return back all IOs */
+ qla2x00_abort_all_cmds(vha, DID_RESET << 16);
+ return PCI_ERS_RESULT_NEED_RESET;
+ case pci_channel_io_perm_failure:
+ ha->flags.pci_channel_io_perm_failure = 1;
+ qla2x00_abort_all_cmds(vha, DID_NO_CONNECT << 16);
+ return PCI_ERS_RESULT_DISCONNECT;
+ }
+ return PCI_ERS_RESULT_NEED_RESET;
+}
+
+static pci_ers_result_t
+qla2xxx_pci_mmio_enabled(struct pci_dev *pdev)
+{
+ int risc_paused = 0;
+ uint32_t stat;
+ unsigned long flags;
+ scsi_qla_host_t *base_vha = pci_get_drvdata(pdev);
+ struct qla_hw_data *ha = base_vha->hw;
+ struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
+ struct device_reg_24xx __iomem *reg24 = &ha->iobase->isp24;
+
+ if (IS_QLA82XX(ha))
+ return PCI_ERS_RESULT_RECOVERED;
+
+ spin_lock_irqsave(&ha->hardware_lock, flags);
+ if (IS_QLA2100(ha) || IS_QLA2200(ha)){
+ stat = RD_REG_DWORD(&reg->hccr);
+ if (stat & HCCR_RISC_PAUSE)
+ risc_paused = 1;
+ } else if (IS_QLA23XX(ha)) {
+ stat = RD_REG_DWORD(&reg->u.isp2300.host_status);
+ if (stat & HSR_RISC_PAUSED)
+ risc_paused = 1;
+ } else if (IS_FWI2_CAPABLE(ha)) {
+ stat = RD_REG_DWORD(&reg24->host_status);
+ if (stat & HSRX_RISC_PAUSED)
+ risc_paused = 1;
+ }
+ spin_unlock_irqrestore(&ha->hardware_lock, flags);
+
+ if (risc_paused) {
+ ql_log(ql_log_info, base_vha, 0x9003,
+ "RISC paused -- mmio_enabled, Dumping firmware.\n");
+ ha->isp_ops->fw_dump(base_vha, 0);
+
+ return PCI_ERS_RESULT_NEED_RESET;
+ } else
+ return PCI_ERS_RESULT_RECOVERED;
+}
+
+static uint32_t
+qla82xx_error_recovery(scsi_qla_host_t *base_vha)
+{
+ uint32_t rval = QLA_FUNCTION_FAILED;
+ uint32_t drv_active = 0;
+ struct qla_hw_data *ha = base_vha->hw;
+ int fn;
+ struct pci_dev *other_pdev = NULL;
+
+ ql_dbg(ql_dbg_aer, base_vha, 0x9006,
+ "Entered %s.\n", __func__);
+
+ set_bit(ABORT_ISP_ACTIVE, &base_vha->dpc_flags);
+
+ if (base_vha->flags.online) {
+ /* Abort all outstanding commands,
+ * so as to be requeued later */
+ qla2x00_abort_isp_cleanup(base_vha);
+ }
+
+
+ fn = PCI_FUNC(ha->pdev->devfn);
+ while (fn > 0) {
+ fn--;
+ ql_dbg(ql_dbg_aer, base_vha, 0x9007,
+ "Finding pci device at function = 0x%x.\n", fn);
+ other_pdev =
+ pci_get_domain_bus_and_slot(pci_domain_nr(ha->pdev->bus),
+ ha->pdev->bus->number, PCI_DEVFN(PCI_SLOT(ha->pdev->devfn),
+ fn));
+
+ if (!other_pdev)
+ continue;
+ if (atomic_read(&other_pdev->enable_cnt)) {
+ ql_dbg(ql_dbg_aer, base_vha, 0x9008,
+ "Found PCI func available and enable at 0x%x.\n",
+ fn);
+ pci_dev_put(other_pdev);
+ break;
+ }
+ pci_dev_put(other_pdev);
+ }
+
+ if (!fn) {
+ /* Reset owner */
+ ql_dbg(ql_dbg_aer, base_vha, 0x9009,
+ "This devfn is reset owner = 0x%x.\n",
+ ha->pdev->devfn);
+ qla82xx_idc_lock(ha);
+
+ qla82xx_wr_32(ha, QLA82XX_CRB_DEV_STATE,
+ QLA8XXX_DEV_INITIALIZING);
+
+ qla82xx_wr_32(ha, QLA82XX_CRB_DRV_IDC_VERSION,
+ QLA82XX_IDC_VERSION);
+
+ drv_active = qla82xx_rd_32(ha, QLA82XX_CRB_DRV_ACTIVE);
+ ql_dbg(ql_dbg_aer, base_vha, 0x900a,
+ "drv_active = 0x%x.\n", drv_active);
+
+ qla82xx_idc_unlock(ha);
+ /* Reset if device is not already reset
+ * drv_active would be 0 if a reset has already been done
+ */
+ if (drv_active)
+ rval = qla82xx_start_firmware(base_vha);
+ else
+ rval = QLA_SUCCESS;
+ qla82xx_idc_lock(ha);
+
+ if (rval != QLA_SUCCESS) {
+ ql_log(ql_log_info, base_vha, 0x900b,
+ "HW State: FAILED.\n");
+ qla82xx_clear_drv_active(ha);
+ qla82xx_wr_32(ha, QLA82XX_CRB_DEV_STATE,
+ QLA8XXX_DEV_FAILED);
+ } else {
+ ql_log(ql_log_info, base_vha, 0x900c,
+ "HW State: READY.\n");
+ qla82xx_wr_32(ha, QLA82XX_CRB_DEV_STATE,
+ QLA8XXX_DEV_READY);
+ qla82xx_idc_unlock(ha);
+ ha->flags.isp82xx_fw_hung = 0;
+ rval = qla82xx_restart_isp(base_vha);
+ qla82xx_idc_lock(ha);
+ /* Clear driver state register */
+ qla82xx_wr_32(ha, QLA82XX_CRB_DRV_STATE, 0);
+ qla82xx_set_drv_active(base_vha);
+ }
+ qla82xx_idc_unlock(ha);
+ } else {
+ ql_dbg(ql_dbg_aer, base_vha, 0x900d,
+ "This devfn is not reset owner = 0x%x.\n",
+ ha->pdev->devfn);
+ if ((qla82xx_rd_32(ha, QLA82XX_CRB_DEV_STATE) ==
+ QLA8XXX_DEV_READY)) {
+ ha->flags.isp82xx_fw_hung = 0;
+ rval = qla82xx_restart_isp(base_vha);
+ qla82xx_idc_lock(ha);
+ qla82xx_set_drv_active(base_vha);
+ qla82xx_idc_unlock(ha);
+ }
+ }
+ clear_bit(ABORT_ISP_ACTIVE, &base_vha->dpc_flags);
+
+ return rval;
+}
+
+static pci_ers_result_t
+qla2xxx_pci_slot_reset(struct pci_dev *pdev)
+{
+ pci_ers_result_t ret = PCI_ERS_RESULT_DISCONNECT;
+ scsi_qla_host_t *base_vha = pci_get_drvdata(pdev);
+ struct qla_hw_data *ha = base_vha->hw;
+ struct rsp_que *rsp;
+ int rc, retries = 10;
+
+ ql_dbg(ql_dbg_aer, base_vha, 0x9004,
+ "Slot Reset.\n");
+
+ /* Workaround: qla2xxx driver which access hardware earlier
+ * needs error state to be pci_channel_io_online.
+ * Otherwise mailbox command timesout.
+ */
+ pdev->error_state = pci_channel_io_normal;
+
+ pci_restore_state(pdev);
+
+ /* pci_restore_state() clears the saved_state flag of the device
+ * save restored state which resets saved_state flag
+ */
+ pci_save_state(pdev);
+
+ if (ha->mem_only)
+ rc = pci_enable_device_mem(pdev);
+ else
+ rc = pci_enable_device(pdev);
+
+ if (rc) {
+ ql_log(ql_log_warn, base_vha, 0x9005,
+ "Can't re-enable PCI device after reset.\n");
+ goto exit_slot_reset;
+ }
+
+ rsp = ha->rsp_q_map[0];
+ if (qla2x00_request_irqs(ha, rsp))
+ goto exit_slot_reset;
+
+ if (ha->isp_ops->pci_config(base_vha))
+ goto exit_slot_reset;
+
+ if (IS_QLA82XX(ha)) {
+ if (qla82xx_error_recovery(base_vha) == QLA_SUCCESS) {
+ ret = PCI_ERS_RESULT_RECOVERED;
+ goto exit_slot_reset;
+ } else
+ goto exit_slot_reset;
+ }
+
+ while (ha->flags.mbox_busy && retries--)
+ msleep(1000);
+
+ set_bit(ABORT_ISP_ACTIVE, &base_vha->dpc_flags);
+ if (ha->isp_ops->abort_isp(base_vha) == QLA_SUCCESS)
+ ret = PCI_ERS_RESULT_RECOVERED;
+ clear_bit(ABORT_ISP_ACTIVE, &base_vha->dpc_flags);
+
+
+exit_slot_reset:
+ ql_dbg(ql_dbg_aer, base_vha, 0x900e,
+ "slot_reset return %x.\n", ret);
+
+ return ret;
+}
+
+static void
+qla2xxx_pci_resume(struct pci_dev *pdev)
+{
+ scsi_qla_host_t *base_vha = pci_get_drvdata(pdev);
+ struct qla_hw_data *ha = base_vha->hw;
+ int ret;
+
+ ql_dbg(ql_dbg_aer, base_vha, 0x900f,
+ "pci_resume.\n");
+
+ ret = qla2x00_wait_for_hba_online(base_vha);
+ if (ret != QLA_SUCCESS) {
+ ql_log(ql_log_fatal, base_vha, 0x9002,
+ "The device failed to resume I/O from slot/link_reset.\n");
+ }
+
+ pci_cleanup_aer_uncorrect_error_status(pdev);
+
+ ha->flags.eeh_busy = 0;
+}
+
+static void
+qla83xx_disable_laser(scsi_qla_host_t *vha)
+{
+ uint32_t reg, data, fn;
+ struct qla_hw_data *ha = vha->hw;
+ struct device_reg_24xx __iomem *isp_reg = &ha->iobase->isp24;
+
+ /* pci func #/port # */
+ ql_dbg(ql_dbg_init, vha, 0x004b,
+ "Disabling Laser for hba: %p\n", vha);
+
+ fn = (RD_REG_DWORD(&isp_reg->ctrl_status) &
+ (BIT_15|BIT_14|BIT_13|BIT_12));
+
+ fn = (fn >> 12);
+
+ if (fn & 1)
+ reg = PORT_1_2031;
+ else
+ reg = PORT_0_2031;
+
+ data = LASER_OFF_2031;
+
+ qla83xx_wr_reg(vha, reg, data);
+}
+
+static const struct pci_error_handlers qla2xxx_err_handler = {
+ .error_detected = qla2xxx_pci_error_detected,
+ .mmio_enabled = qla2xxx_pci_mmio_enabled,
+ .slot_reset = qla2xxx_pci_slot_reset,
+ .resume = qla2xxx_pci_resume,
+};
+
+static struct pci_device_id qla2xxx_pci_tbl[] = {
+ { PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP2100) },
+ { PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP2200) },
+ { PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP2300) },
+ { PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP2312) },
+ { PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP2322) },
+ { PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP6312) },
+ { PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP6322) },
+ { PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP2422) },
+ { PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP2432) },
+ { PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP8432) },
+ { PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP5422) },
+ { PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP5432) },
+ { PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP2532) },
+ { PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP2031) },
+ { PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP8001) },
+ { PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP8021) },
+ { PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP8031) },
+ { PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISPF001) },
+ { PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP8044) },
+ { PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP2071) },
+ { PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP2271) },
+ { 0 },
+};
+MODULE_DEVICE_TABLE(pci, qla2xxx_pci_tbl);
+
+static struct pci_driver qla2xxx_pci_driver = {
+ .name = QLA2XXX_DRIVER_NAME,
+ .driver = {
+ .owner = THIS_MODULE,
+ },
+ .id_table = qla2xxx_pci_tbl,
+ .probe = qla2x00_probe_one,
+ .remove = qla2x00_remove_one,
+ .shutdown = qla2x00_shutdown,
+ .err_handler = &qla2xxx_err_handler,
+};
+
+static const struct file_operations apidev_fops = {
+ .owner = THIS_MODULE,
+ .llseek = noop_llseek,
+};
+
+/**
+ * qla2x00_module_init - Module initialization.
+ **/
+static int __init
+qla2x00_module_init(void)
+{
+ int ret = 0;
+
+ /* Allocate cache for SRBs. */
+ srb_cachep = kmem_cache_create("qla2xxx_srbs", sizeof(srb_t), 0,
+ SLAB_HWCACHE_ALIGN, NULL);
+ if (srb_cachep == NULL) {
+ ql_log(ql_log_fatal, NULL, 0x0001,
+ "Unable to allocate SRB cache...Failing load!.\n");
+ return -ENOMEM;
+ }
+
+ /* Initialize target kmem_cache and mem_pools */
+ ret = qlt_init();
+ if (ret < 0) {
+ kmem_cache_destroy(srb_cachep);
+ return ret;
+ } else if (ret > 0) {
+ /*
+ * If initiator mode is explictly disabled by qlt_init(),
+ * prevent scsi_transport_fc.c:fc_scsi_scan_rport() from
+ * performing scsi_scan_target() during LOOP UP event.
+ */
+ qla2xxx_transport_functions.disable_target_scan = 1;
+ qla2xxx_transport_vport_functions.disable_target_scan = 1;
+ }
+
+ /* Derive version string. */
+ strcpy(qla2x00_version_str, QLA2XXX_VERSION);
+ if (ql2xextended_error_logging)
+ strcat(qla2x00_version_str, "-debug");
+
+ qla2xxx_transport_template =
+ fc_attach_transport(&qla2xxx_transport_functions);
+ if (!qla2xxx_transport_template) {
+ kmem_cache_destroy(srb_cachep);
+ ql_log(ql_log_fatal, NULL, 0x0002,
+ "fc_attach_transport failed...Failing load!.\n");
+ qlt_exit();
+ return -ENODEV;
+ }
+
+ apidev_major = register_chrdev(0, QLA2XXX_APIDEV, &apidev_fops);
+ if (apidev_major < 0) {
+ ql_log(ql_log_fatal, NULL, 0x0003,
+ "Unable to register char device %s.\n", QLA2XXX_APIDEV);
+ }
+
+ qla2xxx_transport_vport_template =
+ fc_attach_transport(&qla2xxx_transport_vport_functions);
+ if (!qla2xxx_transport_vport_template) {
+ kmem_cache_destroy(srb_cachep);
+ qlt_exit();
+ fc_release_transport(qla2xxx_transport_template);
+ ql_log(ql_log_fatal, NULL, 0x0004,
+ "fc_attach_transport vport failed...Failing load!.\n");
+ return -ENODEV;
+ }
+ ql_log(ql_log_info, NULL, 0x0005,
+ "QLogic Fibre Channel HBA Driver: %s.\n",
+ qla2x00_version_str);
+ ret = pci_register_driver(&qla2xxx_pci_driver);
+ if (ret) {
+ kmem_cache_destroy(srb_cachep);
+ qlt_exit();
+ fc_release_transport(qla2xxx_transport_template);
+ fc_release_transport(qla2xxx_transport_vport_template);
+ ql_log(ql_log_fatal, NULL, 0x0006,
+ "pci_register_driver failed...ret=%d Failing load!.\n",
+ ret);
+ }
+ return ret;
+}
+
+/**
+ * qla2x00_module_exit - Module cleanup.
+ **/
+static void __exit
+qla2x00_module_exit(void)
+{
+ unregister_chrdev(apidev_major, QLA2XXX_APIDEV);
+ pci_unregister_driver(&qla2xxx_pci_driver);
+ qla2x00_release_firmware();
+ kmem_cache_destroy(srb_cachep);
+ qlt_exit();
+ if (ctx_cachep)
+ kmem_cache_destroy(ctx_cachep);
+ fc_release_transport(qla2xxx_transport_template);
+ fc_release_transport(qla2xxx_transport_vport_template);
+}
+
+module_init(qla2x00_module_init);
+module_exit(qla2x00_module_exit);
+
+MODULE_AUTHOR("QLogic Corporation");
+MODULE_DESCRIPTION("QLogic Fibre Channel HBA Driver");
+MODULE_LICENSE("GPL");
+MODULE_VERSION(QLA2XXX_VERSION);
+/*(DEBLOBBED)*/
diff --git a/drivers/scsi/qla2xxx/qla_settings.h b/drivers/scsi/qla2xxx/qla_settings.h
new file mode 100644
index 000000000..2fb7ebfbb
--- /dev/null
+++ b/drivers/scsi/qla2xxx/qla_settings.h
@@ -0,0 +1,12 @@
+/*
+ * QLogic Fibre Channel HBA Driver
+ * Copyright (c) 2003-2014 QLogic Corporation
+ *
+ * See LICENSE.qla2xxx for copyright and licensing details.
+ */
+#define MAX_RETRIES_OF_ISP_ABORT 5
+
+/* Max time to wait for the loop to be in LOOP_READY state */
+#define MAX_LOOP_TIMEOUT (60 * 5)
+
+#include "qla_version.h"
diff --git a/drivers/scsi/qla2xxx/qla_sup.c b/drivers/scsi/qla2xxx/qla_sup.c
new file mode 100644
index 000000000..028e8c8a7
--- /dev/null
+++ b/drivers/scsi/qla2xxx/qla_sup.c
@@ -0,0 +1,3219 @@
+/*
+ * QLogic Fibre Channel HBA Driver
+ * Copyright (c) 2003-2014 QLogic Corporation
+ *
+ * See LICENSE.qla2xxx for copyright and licensing details.
+ */
+#include "qla_def.h"
+
+#include <linux/delay.h>
+#include <linux/slab.h>
+#include <linux/vmalloc.h>
+#include <asm/uaccess.h>
+
+/*
+ * NVRAM support routines
+ */
+
+/**
+ * qla2x00_lock_nvram_access() -
+ * @ha: HA context
+ */
+static void
+qla2x00_lock_nvram_access(struct qla_hw_data *ha)
+{
+ uint16_t data;
+ struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
+
+ if (!IS_QLA2100(ha) && !IS_QLA2200(ha) && !IS_QLA2300(ha)) {
+ data = RD_REG_WORD(&reg->nvram);
+ while (data & NVR_BUSY) {
+ udelay(100);
+ data = RD_REG_WORD(&reg->nvram);
+ }
+
+ /* Lock resource */
+ WRT_REG_WORD(&reg->u.isp2300.host_semaphore, 0x1);
+ RD_REG_WORD(&reg->u.isp2300.host_semaphore);
+ udelay(5);
+ data = RD_REG_WORD(&reg->u.isp2300.host_semaphore);
+ while ((data & BIT_0) == 0) {
+ /* Lock failed */
+ udelay(100);
+ WRT_REG_WORD(&reg->u.isp2300.host_semaphore, 0x1);
+ RD_REG_WORD(&reg->u.isp2300.host_semaphore);
+ udelay(5);
+ data = RD_REG_WORD(&reg->u.isp2300.host_semaphore);
+ }
+ }
+}
+
+/**
+ * qla2x00_unlock_nvram_access() -
+ * @ha: HA context
+ */
+static void
+qla2x00_unlock_nvram_access(struct qla_hw_data *ha)
+{
+ struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
+
+ if (!IS_QLA2100(ha) && !IS_QLA2200(ha) && !IS_QLA2300(ha)) {
+ WRT_REG_WORD(&reg->u.isp2300.host_semaphore, 0);
+ RD_REG_WORD(&reg->u.isp2300.host_semaphore);
+ }
+}
+
+/**
+ * qla2x00_nv_write() - Prepare for NVRAM read/write operation.
+ * @ha: HA context
+ * @data: Serial interface selector
+ */
+static void
+qla2x00_nv_write(struct qla_hw_data *ha, uint16_t data)
+{
+ struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
+
+ WRT_REG_WORD(&reg->nvram, data | NVR_SELECT | NVR_WRT_ENABLE);
+ RD_REG_WORD(&reg->nvram); /* PCI Posting. */
+ NVRAM_DELAY();
+ WRT_REG_WORD(&reg->nvram, data | NVR_SELECT | NVR_CLOCK |
+ NVR_WRT_ENABLE);
+ RD_REG_WORD(&reg->nvram); /* PCI Posting. */
+ NVRAM_DELAY();
+ WRT_REG_WORD(&reg->nvram, data | NVR_SELECT | NVR_WRT_ENABLE);
+ RD_REG_WORD(&reg->nvram); /* PCI Posting. */
+ NVRAM_DELAY();
+}
+
+/**
+ * qla2x00_nvram_request() - Sends read command to NVRAM and gets data from
+ * NVRAM.
+ * @ha: HA context
+ * @nv_cmd: NVRAM command
+ *
+ * Bit definitions for NVRAM command:
+ *
+ * Bit 26 = start bit
+ * Bit 25, 24 = opcode
+ * Bit 23-16 = address
+ * Bit 15-0 = write data
+ *
+ * Returns the word read from nvram @addr.
+ */
+static uint16_t
+qla2x00_nvram_request(struct qla_hw_data *ha, uint32_t nv_cmd)
+{
+ uint8_t cnt;
+ struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
+ uint16_t data = 0;
+ uint16_t reg_data;
+
+ /* Send command to NVRAM. */
+ nv_cmd <<= 5;
+ for (cnt = 0; cnt < 11; cnt++) {
+ if (nv_cmd & BIT_31)
+ qla2x00_nv_write(ha, NVR_DATA_OUT);
+ else
+ qla2x00_nv_write(ha, 0);
+ nv_cmd <<= 1;
+ }
+
+ /* Read data from NVRAM. */
+ for (cnt = 0; cnt < 16; cnt++) {
+ WRT_REG_WORD(&reg->nvram, NVR_SELECT | NVR_CLOCK);
+ RD_REG_WORD(&reg->nvram); /* PCI Posting. */
+ NVRAM_DELAY();
+ data <<= 1;
+ reg_data = RD_REG_WORD(&reg->nvram);
+ if (reg_data & NVR_DATA_IN)
+ data |= BIT_0;
+ WRT_REG_WORD(&reg->nvram, NVR_SELECT);
+ RD_REG_WORD(&reg->nvram); /* PCI Posting. */
+ NVRAM_DELAY();
+ }
+
+ /* Deselect chip. */
+ WRT_REG_WORD(&reg->nvram, NVR_DESELECT);
+ RD_REG_WORD(&reg->nvram); /* PCI Posting. */
+ NVRAM_DELAY();
+
+ return data;
+}
+
+
+/**
+ * qla2x00_get_nvram_word() - Calculates word position in NVRAM and calls the
+ * request routine to get the word from NVRAM.
+ * @ha: HA context
+ * @addr: Address in NVRAM to read
+ *
+ * Returns the word read from nvram @addr.
+ */
+static uint16_t
+qla2x00_get_nvram_word(struct qla_hw_data *ha, uint32_t addr)
+{
+ uint16_t data;
+ uint32_t nv_cmd;
+
+ nv_cmd = addr << 16;
+ nv_cmd |= NV_READ_OP;
+ data = qla2x00_nvram_request(ha, nv_cmd);
+
+ return (data);
+}
+
+/**
+ * qla2x00_nv_deselect() - Deselect NVRAM operations.
+ * @ha: HA context
+ */
+static void
+qla2x00_nv_deselect(struct qla_hw_data *ha)
+{
+ struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
+
+ WRT_REG_WORD(&reg->nvram, NVR_DESELECT);
+ RD_REG_WORD(&reg->nvram); /* PCI Posting. */
+ NVRAM_DELAY();
+}
+
+/**
+ * qla2x00_write_nvram_word() - Write NVRAM data.
+ * @ha: HA context
+ * @addr: Address in NVRAM to write
+ * @data: word to program
+ */
+static void
+qla2x00_write_nvram_word(struct qla_hw_data *ha, uint32_t addr, uint16_t data)
+{
+ int count;
+ uint16_t word;
+ uint32_t nv_cmd, wait_cnt;
+ struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
+ scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev);
+
+ qla2x00_nv_write(ha, NVR_DATA_OUT);
+ qla2x00_nv_write(ha, 0);
+ qla2x00_nv_write(ha, 0);
+
+ for (word = 0; word < 8; word++)
+ qla2x00_nv_write(ha, NVR_DATA_OUT);
+
+ qla2x00_nv_deselect(ha);
+
+ /* Write data */
+ nv_cmd = (addr << 16) | NV_WRITE_OP;
+ nv_cmd |= data;
+ nv_cmd <<= 5;
+ for (count = 0; count < 27; count++) {
+ if (nv_cmd & BIT_31)
+ qla2x00_nv_write(ha, NVR_DATA_OUT);
+ else
+ qla2x00_nv_write(ha, 0);
+
+ nv_cmd <<= 1;
+ }
+
+ qla2x00_nv_deselect(ha);
+
+ /* Wait for NVRAM to become ready */
+ WRT_REG_WORD(&reg->nvram, NVR_SELECT);
+ RD_REG_WORD(&reg->nvram); /* PCI Posting. */
+ wait_cnt = NVR_WAIT_CNT;
+ do {
+ if (!--wait_cnt) {
+ ql_dbg(ql_dbg_user, vha, 0x708d,
+ "NVRAM didn't go ready...\n");
+ break;
+ }
+ NVRAM_DELAY();
+ word = RD_REG_WORD(&reg->nvram);
+ } while ((word & NVR_DATA_IN) == 0);
+
+ qla2x00_nv_deselect(ha);
+
+ /* Disable writes */
+ qla2x00_nv_write(ha, NVR_DATA_OUT);
+ for (count = 0; count < 10; count++)
+ qla2x00_nv_write(ha, 0);
+
+ qla2x00_nv_deselect(ha);
+}
+
+static int
+qla2x00_write_nvram_word_tmo(struct qla_hw_data *ha, uint32_t addr,
+ uint16_t data, uint32_t tmo)
+{
+ int ret, count;
+ uint16_t word;
+ uint32_t nv_cmd;
+ struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
+
+ ret = QLA_SUCCESS;
+
+ qla2x00_nv_write(ha, NVR_DATA_OUT);
+ qla2x00_nv_write(ha, 0);
+ qla2x00_nv_write(ha, 0);
+
+ for (word = 0; word < 8; word++)
+ qla2x00_nv_write(ha, NVR_DATA_OUT);
+
+ qla2x00_nv_deselect(ha);
+
+ /* Write data */
+ nv_cmd = (addr << 16) | NV_WRITE_OP;
+ nv_cmd |= data;
+ nv_cmd <<= 5;
+ for (count = 0; count < 27; count++) {
+ if (nv_cmd & BIT_31)
+ qla2x00_nv_write(ha, NVR_DATA_OUT);
+ else
+ qla2x00_nv_write(ha, 0);
+
+ nv_cmd <<= 1;
+ }
+
+ qla2x00_nv_deselect(ha);
+
+ /* Wait for NVRAM to become ready */
+ WRT_REG_WORD(&reg->nvram, NVR_SELECT);
+ RD_REG_WORD(&reg->nvram); /* PCI Posting. */
+ do {
+ NVRAM_DELAY();
+ word = RD_REG_WORD(&reg->nvram);
+ if (!--tmo) {
+ ret = QLA_FUNCTION_FAILED;
+ break;
+ }
+ } while ((word & NVR_DATA_IN) == 0);
+
+ qla2x00_nv_deselect(ha);
+
+ /* Disable writes */
+ qla2x00_nv_write(ha, NVR_DATA_OUT);
+ for (count = 0; count < 10; count++)
+ qla2x00_nv_write(ha, 0);
+
+ qla2x00_nv_deselect(ha);
+
+ return ret;
+}
+
+/**
+ * qla2x00_clear_nvram_protection() -
+ * @ha: HA context
+ */
+static int
+qla2x00_clear_nvram_protection(struct qla_hw_data *ha)
+{
+ int ret, stat;
+ struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
+ uint32_t word, wait_cnt;
+ uint16_t wprot, wprot_old;
+ scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev);
+
+ /* Clear NVRAM write protection. */
+ ret = QLA_FUNCTION_FAILED;
+
+ wprot_old = cpu_to_le16(qla2x00_get_nvram_word(ha, ha->nvram_base));
+ stat = qla2x00_write_nvram_word_tmo(ha, ha->nvram_base,
+ __constant_cpu_to_le16(0x1234), 100000);
+ wprot = cpu_to_le16(qla2x00_get_nvram_word(ha, ha->nvram_base));
+ if (stat != QLA_SUCCESS || wprot != 0x1234) {
+ /* Write enable. */
+ qla2x00_nv_write(ha, NVR_DATA_OUT);
+ qla2x00_nv_write(ha, 0);
+ qla2x00_nv_write(ha, 0);
+ for (word = 0; word < 8; word++)
+ qla2x00_nv_write(ha, NVR_DATA_OUT);
+
+ qla2x00_nv_deselect(ha);
+
+ /* Enable protection register. */
+ qla2x00_nv_write(ha, NVR_PR_ENABLE | NVR_DATA_OUT);
+ qla2x00_nv_write(ha, NVR_PR_ENABLE);
+ qla2x00_nv_write(ha, NVR_PR_ENABLE);
+ for (word = 0; word < 8; word++)
+ qla2x00_nv_write(ha, NVR_DATA_OUT | NVR_PR_ENABLE);
+
+ qla2x00_nv_deselect(ha);
+
+ /* Clear protection register (ffff is cleared). */
+ qla2x00_nv_write(ha, NVR_PR_ENABLE | NVR_DATA_OUT);
+ qla2x00_nv_write(ha, NVR_PR_ENABLE | NVR_DATA_OUT);
+ qla2x00_nv_write(ha, NVR_PR_ENABLE | NVR_DATA_OUT);
+ for (word = 0; word < 8; word++)
+ qla2x00_nv_write(ha, NVR_DATA_OUT | NVR_PR_ENABLE);
+
+ qla2x00_nv_deselect(ha);
+
+ /* Wait for NVRAM to become ready. */
+ WRT_REG_WORD(&reg->nvram, NVR_SELECT);
+ RD_REG_WORD(&reg->nvram); /* PCI Posting. */
+ wait_cnt = NVR_WAIT_CNT;
+ do {
+ if (!--wait_cnt) {
+ ql_dbg(ql_dbg_user, vha, 0x708e,
+ "NVRAM didn't go ready...\n");
+ break;
+ }
+ NVRAM_DELAY();
+ word = RD_REG_WORD(&reg->nvram);
+ } while ((word & NVR_DATA_IN) == 0);
+
+ if (wait_cnt)
+ ret = QLA_SUCCESS;
+ } else
+ qla2x00_write_nvram_word(ha, ha->nvram_base, wprot_old);
+
+ return ret;
+}
+
+static void
+qla2x00_set_nvram_protection(struct qla_hw_data *ha, int stat)
+{
+ struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
+ uint32_t word, wait_cnt;
+ scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev);
+
+ if (stat != QLA_SUCCESS)
+ return;
+
+ /* Set NVRAM write protection. */
+ /* Write enable. */
+ qla2x00_nv_write(ha, NVR_DATA_OUT);
+ qla2x00_nv_write(ha, 0);
+ qla2x00_nv_write(ha, 0);
+ for (word = 0; word < 8; word++)
+ qla2x00_nv_write(ha, NVR_DATA_OUT);
+
+ qla2x00_nv_deselect(ha);
+
+ /* Enable protection register. */
+ qla2x00_nv_write(ha, NVR_PR_ENABLE | NVR_DATA_OUT);
+ qla2x00_nv_write(ha, NVR_PR_ENABLE);
+ qla2x00_nv_write(ha, NVR_PR_ENABLE);
+ for (word = 0; word < 8; word++)
+ qla2x00_nv_write(ha, NVR_DATA_OUT | NVR_PR_ENABLE);
+
+ qla2x00_nv_deselect(ha);
+
+ /* Enable protection register. */
+ qla2x00_nv_write(ha, NVR_PR_ENABLE | NVR_DATA_OUT);
+ qla2x00_nv_write(ha, NVR_PR_ENABLE);
+ qla2x00_nv_write(ha, NVR_PR_ENABLE | NVR_DATA_OUT);
+ for (word = 0; word < 8; word++)
+ qla2x00_nv_write(ha, NVR_PR_ENABLE);
+
+ qla2x00_nv_deselect(ha);
+
+ /* Wait for NVRAM to become ready. */
+ WRT_REG_WORD(&reg->nvram, NVR_SELECT);
+ RD_REG_WORD(&reg->nvram); /* PCI Posting. */
+ wait_cnt = NVR_WAIT_CNT;
+ do {
+ if (!--wait_cnt) {
+ ql_dbg(ql_dbg_user, vha, 0x708f,
+ "NVRAM didn't go ready...\n");
+ break;
+ }
+ NVRAM_DELAY();
+ word = RD_REG_WORD(&reg->nvram);
+ } while ((word & NVR_DATA_IN) == 0);
+}
+
+
+/*****************************************************************************/
+/* Flash Manipulation Routines */
+/*****************************************************************************/
+
+static inline uint32_t
+flash_conf_addr(struct qla_hw_data *ha, uint32_t faddr)
+{
+ return ha->flash_conf_off | faddr;
+}
+
+static inline uint32_t
+flash_data_addr(struct qla_hw_data *ha, uint32_t faddr)
+{
+ return ha->flash_data_off | faddr;
+}
+
+static inline uint32_t
+nvram_conf_addr(struct qla_hw_data *ha, uint32_t naddr)
+{
+ return ha->nvram_conf_off | naddr;
+}
+
+static inline uint32_t
+nvram_data_addr(struct qla_hw_data *ha, uint32_t naddr)
+{
+ return ha->nvram_data_off | naddr;
+}
+
+static uint32_t
+qla24xx_read_flash_dword(struct qla_hw_data *ha, uint32_t addr)
+{
+ int rval;
+ uint32_t cnt, data;
+ struct device_reg_24xx __iomem *reg = &ha->iobase->isp24;
+
+ WRT_REG_DWORD(&reg->flash_addr, addr & ~FARX_DATA_FLAG);
+ /* Wait for READ cycle to complete. */
+ rval = QLA_SUCCESS;
+ for (cnt = 3000;
+ (RD_REG_DWORD(&reg->flash_addr) & FARX_DATA_FLAG) == 0 &&
+ rval == QLA_SUCCESS; cnt--) {
+ if (cnt)
+ udelay(10);
+ else
+ rval = QLA_FUNCTION_TIMEOUT;
+ cond_resched();
+ }
+
+ /* TODO: What happens if we time out? */
+ data = 0xDEADDEAD;
+ if (rval == QLA_SUCCESS)
+ data = RD_REG_DWORD(&reg->flash_data);
+
+ return data;
+}
+
+uint32_t *
+qla24xx_read_flash_data(scsi_qla_host_t *vha, uint32_t *dwptr, uint32_t faddr,
+ uint32_t dwords)
+{
+ uint32_t i;
+ struct qla_hw_data *ha = vha->hw;
+
+ /* Dword reads to flash. */
+ for (i = 0; i < dwords; i++, faddr++)
+ dwptr[i] = cpu_to_le32(qla24xx_read_flash_dword(ha,
+ flash_data_addr(ha, faddr)));
+
+ return dwptr;
+}
+
+static int
+qla24xx_write_flash_dword(struct qla_hw_data *ha, uint32_t addr, uint32_t data)
+{
+ int rval;
+ uint32_t cnt;
+ struct device_reg_24xx __iomem *reg = &ha->iobase->isp24;
+
+ WRT_REG_DWORD(&reg->flash_data, data);
+ RD_REG_DWORD(&reg->flash_data); /* PCI Posting. */
+ WRT_REG_DWORD(&reg->flash_addr, addr | FARX_DATA_FLAG);
+ /* Wait for Write cycle to complete. */
+ rval = QLA_SUCCESS;
+ for (cnt = 500000; (RD_REG_DWORD(&reg->flash_addr) & FARX_DATA_FLAG) &&
+ rval == QLA_SUCCESS; cnt--) {
+ if (cnt)
+ udelay(10);
+ else
+ rval = QLA_FUNCTION_TIMEOUT;
+ cond_resched();
+ }
+ return rval;
+}
+
+static void
+qla24xx_get_flash_manufacturer(struct qla_hw_data *ha, uint8_t *man_id,
+ uint8_t *flash_id)
+{
+ uint32_t ids;
+
+ ids = qla24xx_read_flash_dword(ha, flash_conf_addr(ha, 0x03ab));
+ *man_id = LSB(ids);
+ *flash_id = MSB(ids);
+
+ /* Check if man_id and flash_id are valid. */
+ if (ids != 0xDEADDEAD && (*man_id == 0 || *flash_id == 0)) {
+ /* Read information using 0x9f opcode
+ * Device ID, Mfg ID would be read in the format:
+ * <Ext Dev Info><Device ID Part2><Device ID Part 1><Mfg ID>
+ * Example: ATMEL 0x00 01 45 1F
+ * Extract MFG and Dev ID from last two bytes.
+ */
+ ids = qla24xx_read_flash_dword(ha, flash_conf_addr(ha, 0x009f));
+ *man_id = LSB(ids);
+ *flash_id = MSB(ids);
+ }
+}
+
+static int
+qla2xxx_find_flt_start(scsi_qla_host_t *vha, uint32_t *start)
+{
+ const char *loc, *locations[] = { "DEF", "PCI" };
+ uint32_t pcihdr, pcids;
+ uint32_t *dcode;
+ uint8_t *buf, *bcode, last_image;
+ uint16_t cnt, chksum, *wptr;
+ struct qla_flt_location *fltl;
+ struct qla_hw_data *ha = vha->hw;
+ struct req_que *req = ha->req_q_map[0];
+
+ /*
+ * FLT-location structure resides after the last PCI region.
+ */
+
+ /* Begin with sane defaults. */
+ loc = locations[0];
+ *start = 0;
+ if (IS_QLA24XX_TYPE(ha))
+ *start = FA_FLASH_LAYOUT_ADDR_24;
+ else if (IS_QLA25XX(ha))
+ *start = FA_FLASH_LAYOUT_ADDR;
+ else if (IS_QLA81XX(ha))
+ *start = FA_FLASH_LAYOUT_ADDR_81;
+ else if (IS_P3P_TYPE(ha)) {
+ *start = FA_FLASH_LAYOUT_ADDR_82;
+ goto end;
+ } else if (IS_QLA83XX(ha) || IS_QLA27XX(ha)) {
+ *start = FA_FLASH_LAYOUT_ADDR_83;
+ goto end;
+ }
+ /* Begin with first PCI expansion ROM header. */
+ buf = (uint8_t *)req->ring;
+ dcode = (uint32_t *)req->ring;
+ pcihdr = 0;
+ last_image = 1;
+ do {
+ /* Verify PCI expansion ROM header. */
+ qla24xx_read_flash_data(vha, dcode, pcihdr >> 2, 0x20);
+ bcode = buf + (pcihdr % 4);
+ if (bcode[0x0] != 0x55 || bcode[0x1] != 0xaa)
+ goto end;
+
+ /* Locate PCI data structure. */
+ pcids = pcihdr + ((bcode[0x19] << 8) | bcode[0x18]);
+ qla24xx_read_flash_data(vha, dcode, pcids >> 2, 0x20);
+ bcode = buf + (pcihdr % 4);
+
+ /* Validate signature of PCI data structure. */
+ if (bcode[0x0] != 'P' || bcode[0x1] != 'C' ||
+ bcode[0x2] != 'I' || bcode[0x3] != 'R')
+ goto end;
+
+ last_image = bcode[0x15] & BIT_7;
+
+ /* Locate next PCI expansion ROM. */
+ pcihdr += ((bcode[0x11] << 8) | bcode[0x10]) * 512;
+ } while (!last_image);
+
+ /* Now verify FLT-location structure. */
+ fltl = (struct qla_flt_location *)req->ring;
+ qla24xx_read_flash_data(vha, dcode, pcihdr >> 2,
+ sizeof(struct qla_flt_location) >> 2);
+ if (fltl->sig[0] != 'Q' || fltl->sig[1] != 'F' ||
+ fltl->sig[2] != 'L' || fltl->sig[3] != 'T')
+ goto end;
+
+ wptr = (uint16_t *)req->ring;
+ cnt = sizeof(struct qla_flt_location) >> 1;
+ for (chksum = 0; cnt; cnt--)
+ chksum += le16_to_cpu(*wptr++);
+ if (chksum) {
+ ql_log(ql_log_fatal, vha, 0x0045,
+ "Inconsistent FLTL detected: checksum=0x%x.\n", chksum);
+ ql_dump_buffer(ql_dbg_init + ql_dbg_buffer, vha, 0x010e,
+ buf, sizeof(struct qla_flt_location));
+ return QLA_FUNCTION_FAILED;
+ }
+
+ /* Good data. Use specified location. */
+ loc = locations[1];
+ *start = (le16_to_cpu(fltl->start_hi) << 16 |
+ le16_to_cpu(fltl->start_lo)) >> 2;
+end:
+ ql_dbg(ql_dbg_init, vha, 0x0046,
+ "FLTL[%s] = 0x%x.\n",
+ loc, *start);
+ return QLA_SUCCESS;
+}
+
+static void
+qla2xxx_get_flt_info(scsi_qla_host_t *vha, uint32_t flt_addr)
+{
+ const char *loc, *locations[] = { "DEF", "FLT" };
+ const uint32_t def_fw[] =
+ { FA_RISC_CODE_ADDR, FA_RISC_CODE_ADDR, FA_RISC_CODE_ADDR_81 };
+ const uint32_t def_boot[] =
+ { FA_BOOT_CODE_ADDR, FA_BOOT_CODE_ADDR, FA_BOOT_CODE_ADDR_81 };
+ const uint32_t def_vpd_nvram[] =
+ { FA_VPD_NVRAM_ADDR, FA_VPD_NVRAM_ADDR, FA_VPD_NVRAM_ADDR_81 };
+ const uint32_t def_vpd0[] =
+ { 0, 0, FA_VPD0_ADDR_81 };
+ const uint32_t def_vpd1[] =
+ { 0, 0, FA_VPD1_ADDR_81 };
+ const uint32_t def_nvram0[] =
+ { 0, 0, FA_NVRAM0_ADDR_81 };
+ const uint32_t def_nvram1[] =
+ { 0, 0, FA_NVRAM1_ADDR_81 };
+ const uint32_t def_fdt[] =
+ { FA_FLASH_DESCR_ADDR_24, FA_FLASH_DESCR_ADDR,
+ FA_FLASH_DESCR_ADDR_81 };
+ const uint32_t def_npiv_conf0[] =
+ { FA_NPIV_CONF0_ADDR_24, FA_NPIV_CONF0_ADDR,
+ FA_NPIV_CONF0_ADDR_81 };
+ const uint32_t def_npiv_conf1[] =
+ { FA_NPIV_CONF1_ADDR_24, FA_NPIV_CONF1_ADDR,
+ FA_NPIV_CONF1_ADDR_81 };
+ const uint32_t fcp_prio_cfg0[] =
+ { FA_FCP_PRIO0_ADDR, FA_FCP_PRIO0_ADDR_25,
+ 0 };
+ const uint32_t fcp_prio_cfg1[] =
+ { FA_FCP_PRIO1_ADDR, FA_FCP_PRIO1_ADDR_25,
+ 0 };
+ uint32_t def;
+ uint16_t *wptr;
+ uint16_t cnt, chksum;
+ uint32_t start;
+ struct qla_flt_header *flt;
+ struct qla_flt_region *region;
+ struct qla_hw_data *ha = vha->hw;
+ struct req_que *req = ha->req_q_map[0];
+
+ def = 0;
+ if (IS_QLA25XX(ha))
+ def = 1;
+ else if (IS_QLA81XX(ha))
+ def = 2;
+
+ /* Assign FCP prio region since older adapters may not have FLT, or
+ FCP prio region in it's FLT.
+ */
+ ha->flt_region_fcp_prio = (ha->port_no == 0) ?
+ fcp_prio_cfg0[def] : fcp_prio_cfg1[def];
+
+ ha->flt_region_flt = flt_addr;
+ wptr = (uint16_t *)req->ring;
+ flt = (struct qla_flt_header *)req->ring;
+ region = (struct qla_flt_region *)&flt[1];
+ ha->isp_ops->read_optrom(vha, (uint8_t *)req->ring,
+ flt_addr << 2, OPTROM_BURST_SIZE);
+ if (*wptr == __constant_cpu_to_le16(0xffff))
+ goto no_flash_data;
+ if (flt->version != __constant_cpu_to_le16(1)) {
+ ql_log(ql_log_warn, vha, 0x0047,
+ "Unsupported FLT detected: version=0x%x length=0x%x checksum=0x%x.\n",
+ le16_to_cpu(flt->version), le16_to_cpu(flt->length),
+ le16_to_cpu(flt->checksum));
+ goto no_flash_data;
+ }
+
+ cnt = (sizeof(struct qla_flt_header) + le16_to_cpu(flt->length)) >> 1;
+ for (chksum = 0; cnt; cnt--)
+ chksum += le16_to_cpu(*wptr++);
+ if (chksum) {
+ ql_log(ql_log_fatal, vha, 0x0048,
+ "Inconsistent FLT detected: version=0x%x length=0x%x checksum=0x%x.\n",
+ le16_to_cpu(flt->version), le16_to_cpu(flt->length),
+ le16_to_cpu(flt->checksum));
+ goto no_flash_data;
+ }
+
+ loc = locations[1];
+ cnt = le16_to_cpu(flt->length) / sizeof(struct qla_flt_region);
+ for ( ; cnt; cnt--, region++) {
+ /* Store addresses as DWORD offsets. */
+ start = le32_to_cpu(region->start) >> 2;
+ ql_dbg(ql_dbg_init, vha, 0x0049,
+ "FLT[%02x]: start=0x%x "
+ "end=0x%x size=0x%x.\n", le32_to_cpu(region->code) & 0xff,
+ start, le32_to_cpu(region->end) >> 2,
+ le32_to_cpu(region->size));
+
+ switch (le32_to_cpu(region->code) & 0xff) {
+ case FLT_REG_FCOE_FW:
+ if (!IS_QLA8031(ha))
+ break;
+ ha->flt_region_fw = start;
+ break;
+ case FLT_REG_FW:
+ if (IS_QLA8031(ha))
+ break;
+ ha->flt_region_fw = start;
+ break;
+ case FLT_REG_BOOT_CODE:
+ ha->flt_region_boot = start;
+ break;
+ case FLT_REG_VPD_0:
+ if (IS_QLA8031(ha))
+ break;
+ ha->flt_region_vpd_nvram = start;
+ if (IS_P3P_TYPE(ha))
+ break;
+ if (ha->port_no == 0)
+ ha->flt_region_vpd = start;
+ break;
+ case FLT_REG_VPD_1:
+ if (IS_P3P_TYPE(ha) || IS_QLA8031(ha))
+ break;
+ if (ha->port_no == 1)
+ ha->flt_region_vpd = start;
+ break;
+ case FLT_REG_VPD_2:
+ if (!IS_QLA27XX(ha))
+ break;
+ if (ha->port_no == 2)
+ ha->flt_region_vpd = start;
+ break;
+ case FLT_REG_VPD_3:
+ if (!IS_QLA27XX(ha))
+ break;
+ if (ha->port_no == 3)
+ ha->flt_region_vpd = start;
+ break;
+ case FLT_REG_NVRAM_0:
+ if (IS_QLA8031(ha))
+ break;
+ if (ha->port_no == 0)
+ ha->flt_region_nvram = start;
+ break;
+ case FLT_REG_NVRAM_1:
+ if (IS_QLA8031(ha))
+ break;
+ if (ha->port_no == 1)
+ ha->flt_region_nvram = start;
+ break;
+ case FLT_REG_NVRAM_2:
+ if (!IS_QLA27XX(ha))
+ break;
+ if (ha->port_no == 2)
+ ha->flt_region_nvram = start;
+ break;
+ case FLT_REG_NVRAM_3:
+ if (!IS_QLA27XX(ha))
+ break;
+ if (ha->port_no == 3)
+ ha->flt_region_nvram = start;
+ break;
+ case FLT_REG_FDT:
+ ha->flt_region_fdt = start;
+ break;
+ case FLT_REG_NPIV_CONF_0:
+ if (ha->port_no == 0)
+ ha->flt_region_npiv_conf = start;
+ break;
+ case FLT_REG_NPIV_CONF_1:
+ if (ha->port_no == 1)
+ ha->flt_region_npiv_conf = start;
+ break;
+ case FLT_REG_GOLD_FW:
+ ha->flt_region_gold_fw = start;
+ break;
+ case FLT_REG_FCP_PRIO_0:
+ if (ha->port_no == 0)
+ ha->flt_region_fcp_prio = start;
+ break;
+ case FLT_REG_FCP_PRIO_1:
+ if (ha->port_no == 1)
+ ha->flt_region_fcp_prio = start;
+ break;
+ case FLT_REG_BOOT_CODE_82XX:
+ ha->flt_region_boot = start;
+ break;
+ case FLT_REG_BOOT_CODE_8044:
+ if (IS_QLA8044(ha))
+ ha->flt_region_boot = start;
+ break;
+ case FLT_REG_FW_82XX:
+ ha->flt_region_fw = start;
+ break;
+ case FLT_REG_CNA_FW:
+ if (IS_CNA_CAPABLE(ha))
+ ha->flt_region_fw = start;
+ break;
+ case FLT_REG_GOLD_FW_82XX:
+ ha->flt_region_gold_fw = start;
+ break;
+ case FLT_REG_BOOTLOAD_82XX:
+ ha->flt_region_bootload = start;
+ break;
+ case FLT_REG_VPD_8XXX:
+ if (IS_CNA_CAPABLE(ha))
+ ha->flt_region_vpd = start;
+ break;
+ case FLT_REG_FCOE_NVRAM_0:
+ if (!(IS_QLA8031(ha) || IS_QLA8044(ha)))
+ break;
+ if (ha->port_no == 0)
+ ha->flt_region_nvram = start;
+ break;
+ case FLT_REG_FCOE_NVRAM_1:
+ if (!(IS_QLA8031(ha) || IS_QLA8044(ha)))
+ break;
+ if (ha->port_no == 1)
+ ha->flt_region_nvram = start;
+ break;
+ }
+ }
+ goto done;
+
+no_flash_data:
+ /* Use hardcoded defaults. */
+ loc = locations[0];
+ ha->flt_region_fw = def_fw[def];
+ ha->flt_region_boot = def_boot[def];
+ ha->flt_region_vpd_nvram = def_vpd_nvram[def];
+ ha->flt_region_vpd = (ha->port_no == 0) ?
+ def_vpd0[def] : def_vpd1[def];
+ ha->flt_region_nvram = (ha->port_no == 0) ?
+ def_nvram0[def] : def_nvram1[def];
+ ha->flt_region_fdt = def_fdt[def];
+ ha->flt_region_npiv_conf = (ha->port_no == 0) ?
+ def_npiv_conf0[def] : def_npiv_conf1[def];
+done:
+ ql_dbg(ql_dbg_init, vha, 0x004a,
+ "FLT[%s]: boot=0x%x fw=0x%x vpd_nvram=0x%x vpd=0x%x nvram=0x%x "
+ "fdt=0x%x flt=0x%x npiv=0x%x fcp_prif_cfg=0x%x.\n",
+ loc, ha->flt_region_boot, ha->flt_region_fw,
+ ha->flt_region_vpd_nvram, ha->flt_region_vpd, ha->flt_region_nvram,
+ ha->flt_region_fdt, ha->flt_region_flt, ha->flt_region_npiv_conf,
+ ha->flt_region_fcp_prio);
+}
+
+static void
+qla2xxx_get_fdt_info(scsi_qla_host_t *vha)
+{
+#define FLASH_BLK_SIZE_4K 0x1000
+#define FLASH_BLK_SIZE_32K 0x8000
+#define FLASH_BLK_SIZE_64K 0x10000
+ const char *loc, *locations[] = { "MID", "FDT" };
+ uint16_t cnt, chksum;
+ uint16_t *wptr;
+ struct qla_fdt_layout *fdt;
+ uint8_t man_id, flash_id;
+ uint16_t mid = 0, fid = 0;
+ struct qla_hw_data *ha = vha->hw;
+ struct req_que *req = ha->req_q_map[0];
+
+ wptr = (uint16_t *)req->ring;
+ fdt = (struct qla_fdt_layout *)req->ring;
+ ha->isp_ops->read_optrom(vha, (uint8_t *)req->ring,
+ ha->flt_region_fdt << 2, OPTROM_BURST_SIZE);
+ if (*wptr == __constant_cpu_to_le16(0xffff))
+ goto no_flash_data;
+ if (fdt->sig[0] != 'Q' || fdt->sig[1] != 'L' || fdt->sig[2] != 'I' ||
+ fdt->sig[3] != 'D')
+ goto no_flash_data;
+
+ for (cnt = 0, chksum = 0; cnt < sizeof(struct qla_fdt_layout) >> 1;
+ cnt++)
+ chksum += le16_to_cpu(*wptr++);
+ if (chksum) {
+ ql_dbg(ql_dbg_init, vha, 0x004c,
+ "Inconsistent FDT detected:"
+ " checksum=0x%x id=%c version0x%x.\n", chksum,
+ fdt->sig[0], le16_to_cpu(fdt->version));
+ ql_dump_buffer(ql_dbg_init + ql_dbg_buffer, vha, 0x0113,
+ (uint8_t *)fdt, sizeof(*fdt));
+ goto no_flash_data;
+ }
+
+ loc = locations[1];
+ mid = le16_to_cpu(fdt->man_id);
+ fid = le16_to_cpu(fdt->id);
+ ha->fdt_wrt_disable = fdt->wrt_disable_bits;
+ ha->fdt_wrt_enable = fdt->wrt_enable_bits;
+ ha->fdt_wrt_sts_reg_cmd = fdt->wrt_sts_reg_cmd;
+ if (IS_QLA8044(ha))
+ ha->fdt_erase_cmd = fdt->erase_cmd;
+ else
+ ha->fdt_erase_cmd =
+ flash_conf_addr(ha, 0x0300 | fdt->erase_cmd);
+ ha->fdt_block_size = le32_to_cpu(fdt->block_size);
+ if (fdt->unprotect_sec_cmd) {
+ ha->fdt_unprotect_sec_cmd = flash_conf_addr(ha, 0x0300 |
+ fdt->unprotect_sec_cmd);
+ ha->fdt_protect_sec_cmd = fdt->protect_sec_cmd ?
+ flash_conf_addr(ha, 0x0300 | fdt->protect_sec_cmd):
+ flash_conf_addr(ha, 0x0336);
+ }
+ goto done;
+no_flash_data:
+ loc = locations[0];
+ if (IS_P3P_TYPE(ha)) {
+ ha->fdt_block_size = FLASH_BLK_SIZE_64K;
+ goto done;
+ }
+ qla24xx_get_flash_manufacturer(ha, &man_id, &flash_id);
+ mid = man_id;
+ fid = flash_id;
+ ha->fdt_wrt_disable = 0x9c;
+ ha->fdt_erase_cmd = flash_conf_addr(ha, 0x03d8);
+ switch (man_id) {
+ case 0xbf: /* STT flash. */
+ if (flash_id == 0x8e)
+ ha->fdt_block_size = FLASH_BLK_SIZE_64K;
+ else
+ ha->fdt_block_size = FLASH_BLK_SIZE_32K;
+
+ if (flash_id == 0x80)
+ ha->fdt_erase_cmd = flash_conf_addr(ha, 0x0352);
+ break;
+ case 0x13: /* ST M25P80. */
+ ha->fdt_block_size = FLASH_BLK_SIZE_64K;
+ break;
+ case 0x1f: /* Atmel 26DF081A. */
+ ha->fdt_block_size = FLASH_BLK_SIZE_4K;
+ ha->fdt_erase_cmd = flash_conf_addr(ha, 0x0320);
+ ha->fdt_unprotect_sec_cmd = flash_conf_addr(ha, 0x0339);
+ ha->fdt_protect_sec_cmd = flash_conf_addr(ha, 0x0336);
+ break;
+ default:
+ /* Default to 64 kb sector size. */
+ ha->fdt_block_size = FLASH_BLK_SIZE_64K;
+ break;
+ }
+done:
+ ql_dbg(ql_dbg_init, vha, 0x004d,
+ "FDT[%s]: (0x%x/0x%x) erase=0x%x "
+ "pr=%x wrtd=0x%x blk=0x%x.\n",
+ loc, mid, fid,
+ ha->fdt_erase_cmd, ha->fdt_protect_sec_cmd,
+ ha->fdt_wrt_disable, ha->fdt_block_size);
+
+}
+
+static void
+qla2xxx_get_idc_param(scsi_qla_host_t *vha)
+{
+#define QLA82XX_IDC_PARAM_ADDR 0x003e885c
+ uint32_t *wptr;
+ struct qla_hw_data *ha = vha->hw;
+ struct req_que *req = ha->req_q_map[0];
+
+ if (!(IS_P3P_TYPE(ha)))
+ return;
+
+ wptr = (uint32_t *)req->ring;
+ ha->isp_ops->read_optrom(vha, (uint8_t *)req->ring,
+ QLA82XX_IDC_PARAM_ADDR , 8);
+
+ if (*wptr == __constant_cpu_to_le32(0xffffffff)) {
+ ha->fcoe_dev_init_timeout = QLA82XX_ROM_DEV_INIT_TIMEOUT;
+ ha->fcoe_reset_timeout = QLA82XX_ROM_DRV_RESET_ACK_TIMEOUT;
+ } else {
+ ha->fcoe_dev_init_timeout = le32_to_cpu(*wptr++);
+ ha->fcoe_reset_timeout = le32_to_cpu(*wptr);
+ }
+ ql_dbg(ql_dbg_init, vha, 0x004e,
+ "fcoe_dev_init_timeout=%d "
+ "fcoe_reset_timeout=%d.\n", ha->fcoe_dev_init_timeout,
+ ha->fcoe_reset_timeout);
+ return;
+}
+
+int
+qla2xxx_get_flash_info(scsi_qla_host_t *vha)
+{
+ int ret;
+ uint32_t flt_addr;
+ struct qla_hw_data *ha = vha->hw;
+
+ if (!IS_QLA24XX_TYPE(ha) && !IS_QLA25XX(ha) &&
+ !IS_CNA_CAPABLE(ha) && !IS_QLA2031(ha) && !IS_QLA27XX(ha))
+ return QLA_SUCCESS;
+
+ ret = qla2xxx_find_flt_start(vha, &flt_addr);
+ if (ret != QLA_SUCCESS)
+ return ret;
+
+ qla2xxx_get_flt_info(vha, flt_addr);
+ qla2xxx_get_fdt_info(vha);
+ qla2xxx_get_idc_param(vha);
+
+ return QLA_SUCCESS;
+}
+
+void
+qla2xxx_flash_npiv_conf(scsi_qla_host_t *vha)
+{
+#define NPIV_CONFIG_SIZE (16*1024)
+ void *data;
+ uint16_t *wptr;
+ uint16_t cnt, chksum;
+ int i;
+ struct qla_npiv_header hdr;
+ struct qla_npiv_entry *entry;
+ struct qla_hw_data *ha = vha->hw;
+
+ if (!IS_QLA24XX_TYPE(ha) && !IS_QLA25XX(ha) &&
+ !IS_CNA_CAPABLE(ha) && !IS_QLA2031(ha))
+ return;
+
+ if (ha->flags.nic_core_reset_hdlr_active)
+ return;
+
+ if (IS_QLA8044(ha))
+ return;
+
+ ha->isp_ops->read_optrom(vha, (uint8_t *)&hdr,
+ ha->flt_region_npiv_conf << 2, sizeof(struct qla_npiv_header));
+ if (hdr.version == __constant_cpu_to_le16(0xffff))
+ return;
+ if (hdr.version != __constant_cpu_to_le16(1)) {
+ ql_dbg(ql_dbg_user, vha, 0x7090,
+ "Unsupported NPIV-Config "
+ "detected: version=0x%x entries=0x%x checksum=0x%x.\n",
+ le16_to_cpu(hdr.version), le16_to_cpu(hdr.entries),
+ le16_to_cpu(hdr.checksum));
+ return;
+ }
+
+ data = kmalloc(NPIV_CONFIG_SIZE, GFP_KERNEL);
+ if (!data) {
+ ql_log(ql_log_warn, vha, 0x7091,
+ "Unable to allocate memory for data.\n");
+ return;
+ }
+
+ ha->isp_ops->read_optrom(vha, (uint8_t *)data,
+ ha->flt_region_npiv_conf << 2, NPIV_CONFIG_SIZE);
+
+ cnt = (sizeof(struct qla_npiv_header) + le16_to_cpu(hdr.entries) *
+ sizeof(struct qla_npiv_entry)) >> 1;
+ for (wptr = data, chksum = 0; cnt; cnt--)
+ chksum += le16_to_cpu(*wptr++);
+ if (chksum) {
+ ql_dbg(ql_dbg_user, vha, 0x7092,
+ "Inconsistent NPIV-Config "
+ "detected: version=0x%x entries=0x%x checksum=0x%x.\n",
+ le16_to_cpu(hdr.version), le16_to_cpu(hdr.entries),
+ le16_to_cpu(hdr.checksum));
+ goto done;
+ }
+
+ entry = data + sizeof(struct qla_npiv_header);
+ cnt = le16_to_cpu(hdr.entries);
+ for (i = 0; cnt; cnt--, entry++, i++) {
+ uint16_t flags;
+ struct fc_vport_identifiers vid;
+ struct fc_vport *vport;
+
+ memcpy(&ha->npiv_info[i], entry, sizeof(struct qla_npiv_entry));
+
+ flags = le16_to_cpu(entry->flags);
+ if (flags == 0xffff)
+ continue;
+ if ((flags & BIT_0) == 0)
+ continue;
+
+ memset(&vid, 0, sizeof(vid));
+ vid.roles = FC_PORT_ROLE_FCP_INITIATOR;
+ vid.vport_type = FC_PORTTYPE_NPIV;
+ vid.disable = false;
+ vid.port_name = wwn_to_u64(entry->port_name);
+ vid.node_name = wwn_to_u64(entry->node_name);
+
+ ql_dbg(ql_dbg_user, vha, 0x7093,
+ "NPIV[%02x]: wwpn=%llx "
+ "wwnn=%llx vf_id=0x%x Q_qos=0x%x F_qos=0x%x.\n", cnt,
+ (unsigned long long)vid.port_name,
+ (unsigned long long)vid.node_name,
+ le16_to_cpu(entry->vf_id),
+ entry->q_qos, entry->f_qos);
+
+ if (i < QLA_PRECONFIG_VPORTS) {
+ vport = fc_vport_create(vha->host, 0, &vid);
+ if (!vport)
+ ql_log(ql_log_warn, vha, 0x7094,
+ "NPIV-Config Failed to create vport [%02x]: "
+ "wwpn=%llx wwnn=%llx.\n", cnt,
+ (unsigned long long)vid.port_name,
+ (unsigned long long)vid.node_name);
+ }
+ }
+done:
+ kfree(data);
+}
+
+static int
+qla24xx_unprotect_flash(scsi_qla_host_t *vha)
+{
+ struct qla_hw_data *ha = vha->hw;
+ struct device_reg_24xx __iomem *reg = &ha->iobase->isp24;
+
+ if (ha->flags.fac_supported)
+ return qla81xx_fac_do_write_enable(vha, 1);
+
+ /* Enable flash write. */
+ WRT_REG_DWORD(&reg->ctrl_status,
+ RD_REG_DWORD(&reg->ctrl_status) | CSRX_FLASH_ENABLE);
+ RD_REG_DWORD(&reg->ctrl_status); /* PCI Posting. */
+
+ if (!ha->fdt_wrt_disable)
+ goto done;
+
+ /* Disable flash write-protection, first clear SR protection bit */
+ qla24xx_write_flash_dword(ha, flash_conf_addr(ha, 0x101), 0);
+ /* Then write zero again to clear remaining SR bits.*/
+ qla24xx_write_flash_dword(ha, flash_conf_addr(ha, 0x101), 0);
+done:
+ return QLA_SUCCESS;
+}
+
+static int
+qla24xx_protect_flash(scsi_qla_host_t *vha)
+{
+ uint32_t cnt;
+ struct qla_hw_data *ha = vha->hw;
+ struct device_reg_24xx __iomem *reg = &ha->iobase->isp24;
+
+ if (ha->flags.fac_supported)
+ return qla81xx_fac_do_write_enable(vha, 0);
+
+ if (!ha->fdt_wrt_disable)
+ goto skip_wrt_protect;
+
+ /* Enable flash write-protection and wait for completion. */
+ qla24xx_write_flash_dword(ha, flash_conf_addr(ha, 0x101),
+ ha->fdt_wrt_disable);
+ for (cnt = 300; cnt &&
+ qla24xx_read_flash_dword(ha, flash_conf_addr(ha, 0x005)) & BIT_0;
+ cnt--) {
+ udelay(10);
+ }
+
+skip_wrt_protect:
+ /* Disable flash write. */
+ WRT_REG_DWORD(&reg->ctrl_status,
+ RD_REG_DWORD(&reg->ctrl_status) & ~CSRX_FLASH_ENABLE);
+ RD_REG_DWORD(&reg->ctrl_status); /* PCI Posting. */
+
+ return QLA_SUCCESS;
+}
+
+static int
+qla24xx_erase_sector(scsi_qla_host_t *vha, uint32_t fdata)
+{
+ struct qla_hw_data *ha = vha->hw;
+ uint32_t start, finish;
+
+ if (ha->flags.fac_supported) {
+ start = fdata >> 2;
+ finish = start + (ha->fdt_block_size >> 2) - 1;
+ return qla81xx_fac_erase_sector(vha, flash_data_addr(ha,
+ start), flash_data_addr(ha, finish));
+ }
+
+ return qla24xx_write_flash_dword(ha, ha->fdt_erase_cmd,
+ (fdata & 0xff00) | ((fdata << 16) & 0xff0000) |
+ ((fdata >> 16) & 0xff));
+}
+
+static int
+qla24xx_write_flash_data(scsi_qla_host_t *vha, uint32_t *dwptr, uint32_t faddr,
+ uint32_t dwords)
+{
+ int ret;
+ uint32_t liter;
+ uint32_t sec_mask, rest_addr;
+ uint32_t fdata;
+ dma_addr_t optrom_dma;
+ void *optrom = NULL;
+ struct qla_hw_data *ha = vha->hw;
+
+ /* Prepare burst-capable write on supported ISPs. */
+ if ((IS_QLA25XX(ha) || IS_QLA81XX(ha) || IS_QLA83XX(ha) ||
+ IS_QLA27XX(ha)) &&
+ !(faddr & 0xfff) && dwords > OPTROM_BURST_DWORDS) {
+ optrom = dma_alloc_coherent(&ha->pdev->dev, OPTROM_BURST_SIZE,
+ &optrom_dma, GFP_KERNEL);
+ if (!optrom) {
+ ql_log(ql_log_warn, vha, 0x7095,
+ "Unable to allocate "
+ "memory for optrom burst write (%x KB).\n",
+ OPTROM_BURST_SIZE / 1024);
+ }
+ }
+
+ rest_addr = (ha->fdt_block_size >> 2) - 1;
+ sec_mask = ~rest_addr;
+
+ ret = qla24xx_unprotect_flash(vha);
+ if (ret != QLA_SUCCESS) {
+ ql_log(ql_log_warn, vha, 0x7096,
+ "Unable to unprotect flash for update.\n");
+ goto done;
+ }
+
+ for (liter = 0; liter < dwords; liter++, faddr++, dwptr++) {
+ fdata = (faddr & sec_mask) << 2;
+
+ /* Are we at the beginning of a sector? */
+ if ((faddr & rest_addr) == 0) {
+ /* Do sector unprotect. */
+ if (ha->fdt_unprotect_sec_cmd)
+ qla24xx_write_flash_dword(ha,
+ ha->fdt_unprotect_sec_cmd,
+ (fdata & 0xff00) | ((fdata << 16) &
+ 0xff0000) | ((fdata >> 16) & 0xff));
+ ret = qla24xx_erase_sector(vha, fdata);
+ if (ret != QLA_SUCCESS) {
+ ql_dbg(ql_dbg_user, vha, 0x7007,
+ "Unable to erase erase sector: address=%x.\n",
+ faddr);
+ break;
+ }
+ }
+
+ /* Go with burst-write. */
+ if (optrom && (liter + OPTROM_BURST_DWORDS) <= dwords) {
+ /* Copy data to DMA'ble buffer. */
+ memcpy(optrom, dwptr, OPTROM_BURST_SIZE);
+
+ ret = qla2x00_load_ram(vha, optrom_dma,
+ flash_data_addr(ha, faddr),
+ OPTROM_BURST_DWORDS);
+ if (ret != QLA_SUCCESS) {
+ ql_log(ql_log_warn, vha, 0x7097,
+ "Unable to burst-write optrom segment "
+ "(%x/%x/%llx).\n", ret,
+ flash_data_addr(ha, faddr),
+ (unsigned long long)optrom_dma);
+ ql_log(ql_log_warn, vha, 0x7098,
+ "Reverting to slow-write.\n");
+
+ dma_free_coherent(&ha->pdev->dev,
+ OPTROM_BURST_SIZE, optrom, optrom_dma);
+ optrom = NULL;
+ } else {
+ liter += OPTROM_BURST_DWORDS - 1;
+ faddr += OPTROM_BURST_DWORDS - 1;
+ dwptr += OPTROM_BURST_DWORDS - 1;
+ continue;
+ }
+ }
+
+ ret = qla24xx_write_flash_dword(ha,
+ flash_data_addr(ha, faddr), cpu_to_le32(*dwptr));
+ if (ret != QLA_SUCCESS) {
+ ql_dbg(ql_dbg_user, vha, 0x7006,
+ "Unable to program flash address=%x data=%x.\n",
+ faddr, *dwptr);
+ break;
+ }
+
+ /* Do sector protect. */
+ if (ha->fdt_unprotect_sec_cmd &&
+ ((faddr & rest_addr) == rest_addr))
+ qla24xx_write_flash_dword(ha,
+ ha->fdt_protect_sec_cmd,
+ (fdata & 0xff00) | ((fdata << 16) &
+ 0xff0000) | ((fdata >> 16) & 0xff));
+ }
+
+ ret = qla24xx_protect_flash(vha);
+ if (ret != QLA_SUCCESS)
+ ql_log(ql_log_warn, vha, 0x7099,
+ "Unable to protect flash after update.\n");
+done:
+ if (optrom)
+ dma_free_coherent(&ha->pdev->dev,
+ OPTROM_BURST_SIZE, optrom, optrom_dma);
+
+ return ret;
+}
+
+uint8_t *
+qla2x00_read_nvram_data(scsi_qla_host_t *vha, uint8_t *buf, uint32_t naddr,
+ uint32_t bytes)
+{
+ uint32_t i;
+ uint16_t *wptr;
+ struct qla_hw_data *ha = vha->hw;
+
+ /* Word reads to NVRAM via registers. */
+ wptr = (uint16_t *)buf;
+ qla2x00_lock_nvram_access(ha);
+ for (i = 0; i < bytes >> 1; i++, naddr++)
+ wptr[i] = cpu_to_le16(qla2x00_get_nvram_word(ha,
+ naddr));
+ qla2x00_unlock_nvram_access(ha);
+
+ return buf;
+}
+
+uint8_t *
+qla24xx_read_nvram_data(scsi_qla_host_t *vha, uint8_t *buf, uint32_t naddr,
+ uint32_t bytes)
+{
+ uint32_t i;
+ uint32_t *dwptr;
+ struct qla_hw_data *ha = vha->hw;
+
+ if (IS_P3P_TYPE(ha))
+ return buf;
+
+ /* Dword reads to flash. */
+ dwptr = (uint32_t *)buf;
+ for (i = 0; i < bytes >> 2; i++, naddr++)
+ dwptr[i] = cpu_to_le32(qla24xx_read_flash_dword(ha,
+ nvram_data_addr(ha, naddr)));
+
+ return buf;
+}
+
+int
+qla2x00_write_nvram_data(scsi_qla_host_t *vha, uint8_t *buf, uint32_t naddr,
+ uint32_t bytes)
+{
+ int ret, stat;
+ uint32_t i;
+ uint16_t *wptr;
+ unsigned long flags;
+ struct qla_hw_data *ha = vha->hw;
+
+ ret = QLA_SUCCESS;
+
+ spin_lock_irqsave(&ha->hardware_lock, flags);
+ qla2x00_lock_nvram_access(ha);
+
+ /* Disable NVRAM write-protection. */
+ stat = qla2x00_clear_nvram_protection(ha);
+
+ wptr = (uint16_t *)buf;
+ for (i = 0; i < bytes >> 1; i++, naddr++) {
+ qla2x00_write_nvram_word(ha, naddr,
+ cpu_to_le16(*wptr));
+ wptr++;
+ }
+
+ /* Enable NVRAM write-protection. */
+ qla2x00_set_nvram_protection(ha, stat);
+
+ qla2x00_unlock_nvram_access(ha);
+ spin_unlock_irqrestore(&ha->hardware_lock, flags);
+
+ return ret;
+}
+
+int
+qla24xx_write_nvram_data(scsi_qla_host_t *vha, uint8_t *buf, uint32_t naddr,
+ uint32_t bytes)
+{
+ int ret;
+ uint32_t i;
+ uint32_t *dwptr;
+ struct qla_hw_data *ha = vha->hw;
+ struct device_reg_24xx __iomem *reg = &ha->iobase->isp24;
+
+ ret = QLA_SUCCESS;
+
+ if (IS_P3P_TYPE(ha))
+ return ret;
+
+ /* Enable flash write. */
+ WRT_REG_DWORD(&reg->ctrl_status,
+ RD_REG_DWORD(&reg->ctrl_status) | CSRX_FLASH_ENABLE);
+ RD_REG_DWORD(&reg->ctrl_status); /* PCI Posting. */
+
+ /* Disable NVRAM write-protection. */
+ qla24xx_write_flash_dword(ha, nvram_conf_addr(ha, 0x101), 0);
+ qla24xx_write_flash_dword(ha, nvram_conf_addr(ha, 0x101), 0);
+
+ /* Dword writes to flash. */
+ dwptr = (uint32_t *)buf;
+ for (i = 0; i < bytes >> 2; i++, naddr++, dwptr++) {
+ ret = qla24xx_write_flash_dword(ha,
+ nvram_data_addr(ha, naddr), cpu_to_le32(*dwptr));
+ if (ret != QLA_SUCCESS) {
+ ql_dbg(ql_dbg_user, vha, 0x709a,
+ "Unable to program nvram address=%x data=%x.\n",
+ naddr, *dwptr);
+ break;
+ }
+ }
+
+ /* Enable NVRAM write-protection. */
+ qla24xx_write_flash_dword(ha, nvram_conf_addr(ha, 0x101), 0x8c);
+
+ /* Disable flash write. */
+ WRT_REG_DWORD(&reg->ctrl_status,
+ RD_REG_DWORD(&reg->ctrl_status) & ~CSRX_FLASH_ENABLE);
+ RD_REG_DWORD(&reg->ctrl_status); /* PCI Posting. */
+
+ return ret;
+}
+
+uint8_t *
+qla25xx_read_nvram_data(scsi_qla_host_t *vha, uint8_t *buf, uint32_t naddr,
+ uint32_t bytes)
+{
+ uint32_t i;
+ uint32_t *dwptr;
+ struct qla_hw_data *ha = vha->hw;
+
+ /* Dword reads to flash. */
+ dwptr = (uint32_t *)buf;
+ for (i = 0; i < bytes >> 2; i++, naddr++)
+ dwptr[i] = cpu_to_le32(qla24xx_read_flash_dword(ha,
+ flash_data_addr(ha, ha->flt_region_vpd_nvram | naddr)));
+
+ return buf;
+}
+
+int
+qla25xx_write_nvram_data(scsi_qla_host_t *vha, uint8_t *buf, uint32_t naddr,
+ uint32_t bytes)
+{
+ struct qla_hw_data *ha = vha->hw;
+#define RMW_BUFFER_SIZE (64 * 1024)
+ uint8_t *dbuf;
+
+ dbuf = vmalloc(RMW_BUFFER_SIZE);
+ if (!dbuf)
+ return QLA_MEMORY_ALLOC_FAILED;
+ ha->isp_ops->read_optrom(vha, dbuf, ha->flt_region_vpd_nvram << 2,
+ RMW_BUFFER_SIZE);
+ memcpy(dbuf + (naddr << 2), buf, bytes);
+ ha->isp_ops->write_optrom(vha, dbuf, ha->flt_region_vpd_nvram << 2,
+ RMW_BUFFER_SIZE);
+ vfree(dbuf);
+
+ return QLA_SUCCESS;
+}
+
+static inline void
+qla2x00_flip_colors(struct qla_hw_data *ha, uint16_t *pflags)
+{
+ if (IS_QLA2322(ha)) {
+ /* Flip all colors. */
+ if (ha->beacon_color_state == QLA_LED_ALL_ON) {
+ /* Turn off. */
+ ha->beacon_color_state = 0;
+ *pflags = GPIO_LED_ALL_OFF;
+ } else {
+ /* Turn on. */
+ ha->beacon_color_state = QLA_LED_ALL_ON;
+ *pflags = GPIO_LED_RGA_ON;
+ }
+ } else {
+ /* Flip green led only. */
+ if (ha->beacon_color_state == QLA_LED_GRN_ON) {
+ /* Turn off. */
+ ha->beacon_color_state = 0;
+ *pflags = GPIO_LED_GREEN_OFF_AMBER_OFF;
+ } else {
+ /* Turn on. */
+ ha->beacon_color_state = QLA_LED_GRN_ON;
+ *pflags = GPIO_LED_GREEN_ON_AMBER_OFF;
+ }
+ }
+}
+
+#define PIO_REG(h, r) ((h)->pio_address + offsetof(struct device_reg_2xxx, r))
+
+void
+qla2x00_beacon_blink(struct scsi_qla_host *vha)
+{
+ uint16_t gpio_enable;
+ uint16_t gpio_data;
+ uint16_t led_color = 0;
+ unsigned long flags;
+ struct qla_hw_data *ha = vha->hw;
+ struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
+
+ if (IS_P3P_TYPE(ha))
+ return;
+
+ spin_lock_irqsave(&ha->hardware_lock, flags);
+
+ /* Save the Original GPIOE. */
+ if (ha->pio_address) {
+ gpio_enable = RD_REG_WORD_PIO(PIO_REG(ha, gpioe));
+ gpio_data = RD_REG_WORD_PIO(PIO_REG(ha, gpiod));
+ } else {
+ gpio_enable = RD_REG_WORD(&reg->gpioe);
+ gpio_data = RD_REG_WORD(&reg->gpiod);
+ }
+
+ /* Set the modified gpio_enable values */
+ gpio_enable |= GPIO_LED_MASK;
+
+ if (ha->pio_address) {
+ WRT_REG_WORD_PIO(PIO_REG(ha, gpioe), gpio_enable);
+ } else {
+ WRT_REG_WORD(&reg->gpioe, gpio_enable);
+ RD_REG_WORD(&reg->gpioe);
+ }
+
+ qla2x00_flip_colors(ha, &led_color);
+
+ /* Clear out any previously set LED color. */
+ gpio_data &= ~GPIO_LED_MASK;
+
+ /* Set the new input LED color to GPIOD. */
+ gpio_data |= led_color;
+
+ /* Set the modified gpio_data values */
+ if (ha->pio_address) {
+ WRT_REG_WORD_PIO(PIO_REG(ha, gpiod), gpio_data);
+ } else {
+ WRT_REG_WORD(&reg->gpiod, gpio_data);
+ RD_REG_WORD(&reg->gpiod);
+ }
+
+ spin_unlock_irqrestore(&ha->hardware_lock, flags);
+}
+
+int
+qla2x00_beacon_on(struct scsi_qla_host *vha)
+{
+ uint16_t gpio_enable;
+ uint16_t gpio_data;
+ unsigned long flags;
+ struct qla_hw_data *ha = vha->hw;
+ struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
+
+ ha->fw_options[1] &= ~FO1_SET_EMPHASIS_SWING;
+ ha->fw_options[1] |= FO1_DISABLE_GPIO6_7;
+
+ if (qla2x00_set_fw_options(vha, ha->fw_options) != QLA_SUCCESS) {
+ ql_log(ql_log_warn, vha, 0x709b,
+ "Unable to update fw options (beacon on).\n");
+ return QLA_FUNCTION_FAILED;
+ }
+
+ /* Turn off LEDs. */
+ spin_lock_irqsave(&ha->hardware_lock, flags);
+ if (ha->pio_address) {
+ gpio_enable = RD_REG_WORD_PIO(PIO_REG(ha, gpioe));
+ gpio_data = RD_REG_WORD_PIO(PIO_REG(ha, gpiod));
+ } else {
+ gpio_enable = RD_REG_WORD(&reg->gpioe);
+ gpio_data = RD_REG_WORD(&reg->gpiod);
+ }
+ gpio_enable |= GPIO_LED_MASK;
+
+ /* Set the modified gpio_enable values. */
+ if (ha->pio_address) {
+ WRT_REG_WORD_PIO(PIO_REG(ha, gpioe), gpio_enable);
+ } else {
+ WRT_REG_WORD(&reg->gpioe, gpio_enable);
+ RD_REG_WORD(&reg->gpioe);
+ }
+
+ /* Clear out previously set LED colour. */
+ gpio_data &= ~GPIO_LED_MASK;
+ if (ha->pio_address) {
+ WRT_REG_WORD_PIO(PIO_REG(ha, gpiod), gpio_data);
+ } else {
+ WRT_REG_WORD(&reg->gpiod, gpio_data);
+ RD_REG_WORD(&reg->gpiod);
+ }
+ spin_unlock_irqrestore(&ha->hardware_lock, flags);
+
+ /*
+ * Let the per HBA timer kick off the blinking process based on
+ * the following flags. No need to do anything else now.
+ */
+ ha->beacon_blink_led = 1;
+ ha->beacon_color_state = 0;
+
+ return QLA_SUCCESS;
+}
+
+int
+qla2x00_beacon_off(struct scsi_qla_host *vha)
+{
+ int rval = QLA_SUCCESS;
+ struct qla_hw_data *ha = vha->hw;
+
+ ha->beacon_blink_led = 0;
+
+ /* Set the on flag so when it gets flipped it will be off. */
+ if (IS_QLA2322(ha))
+ ha->beacon_color_state = QLA_LED_ALL_ON;
+ else
+ ha->beacon_color_state = QLA_LED_GRN_ON;
+
+ ha->isp_ops->beacon_blink(vha); /* This turns green LED off */
+
+ ha->fw_options[1] &= ~FO1_SET_EMPHASIS_SWING;
+ ha->fw_options[1] &= ~FO1_DISABLE_GPIO6_7;
+
+ rval = qla2x00_set_fw_options(vha, ha->fw_options);
+ if (rval != QLA_SUCCESS)
+ ql_log(ql_log_warn, vha, 0x709c,
+ "Unable to update fw options (beacon off).\n");
+ return rval;
+}
+
+
+static inline void
+qla24xx_flip_colors(struct qla_hw_data *ha, uint16_t *pflags)
+{
+ /* Flip all colors. */
+ if (ha->beacon_color_state == QLA_LED_ALL_ON) {
+ /* Turn off. */
+ ha->beacon_color_state = 0;
+ *pflags = 0;
+ } else {
+ /* Turn on. */
+ ha->beacon_color_state = QLA_LED_ALL_ON;
+ *pflags = GPDX_LED_YELLOW_ON | GPDX_LED_AMBER_ON;
+ }
+}
+
+void
+qla24xx_beacon_blink(struct scsi_qla_host *vha)
+{
+ uint16_t led_color = 0;
+ uint32_t gpio_data;
+ unsigned long flags;
+ struct qla_hw_data *ha = vha->hw;
+ struct device_reg_24xx __iomem *reg = &ha->iobase->isp24;
+
+ /* Save the Original GPIOD. */
+ spin_lock_irqsave(&ha->hardware_lock, flags);
+ gpio_data = RD_REG_DWORD(&reg->gpiod);
+
+ /* Enable the gpio_data reg for update. */
+ gpio_data |= GPDX_LED_UPDATE_MASK;
+
+ WRT_REG_DWORD(&reg->gpiod, gpio_data);
+ gpio_data = RD_REG_DWORD(&reg->gpiod);
+
+ /* Set the color bits. */
+ qla24xx_flip_colors(ha, &led_color);
+
+ /* Clear out any previously set LED color. */
+ gpio_data &= ~GPDX_LED_COLOR_MASK;
+
+ /* Set the new input LED color to GPIOD. */
+ gpio_data |= led_color;
+
+ /* Set the modified gpio_data values. */
+ WRT_REG_DWORD(&reg->gpiod, gpio_data);
+ gpio_data = RD_REG_DWORD(&reg->gpiod);
+ spin_unlock_irqrestore(&ha->hardware_lock, flags);
+}
+
+static uint32_t
+qla83xx_select_led_port(struct qla_hw_data *ha)
+{
+ uint32_t led_select_value = 0;
+
+ if (!IS_QLA83XX(ha))
+ goto out;
+
+ if (ha->port_no == 0)
+ led_select_value = QLA83XX_LED_PORT0;
+ else
+ led_select_value = QLA83XX_LED_PORT1;
+
+out:
+ return led_select_value;
+}
+
+void
+qla83xx_beacon_blink(struct scsi_qla_host *vha)
+{
+ uint32_t led_select_value;
+ struct qla_hw_data *ha = vha->hw;
+ uint16_t led_cfg[6];
+ uint16_t orig_led_cfg[6];
+ uint32_t led_10_value, led_43_value;
+
+ if (!IS_QLA83XX(ha) && !IS_QLA81XX(ha) && !IS_QLA27XX(ha))
+ return;
+
+ if (!ha->beacon_blink_led)
+ return;
+
+ if (IS_QLA27XX(ha)) {
+ qla2x00_write_ram_word(vha, 0x1003, 0x40000230);
+ qla2x00_write_ram_word(vha, 0x1004, 0x40000230);
+ } else if (IS_QLA2031(ha)) {
+ led_select_value = qla83xx_select_led_port(ha);
+
+ qla83xx_wr_reg(vha, led_select_value, 0x40000230);
+ qla83xx_wr_reg(vha, led_select_value + 4, 0x40000230);
+ } else if (IS_QLA8031(ha)) {
+ led_select_value = qla83xx_select_led_port(ha);
+
+ qla83xx_rd_reg(vha, led_select_value, &led_10_value);
+ qla83xx_rd_reg(vha, led_select_value + 0x10, &led_43_value);
+ qla83xx_wr_reg(vha, led_select_value, 0x01f44000);
+ msleep(500);
+ qla83xx_wr_reg(vha, led_select_value, 0x400001f4);
+ msleep(1000);
+ qla83xx_wr_reg(vha, led_select_value, led_10_value);
+ qla83xx_wr_reg(vha, led_select_value + 0x10, led_43_value);
+ } else if (IS_QLA81XX(ha)) {
+ int rval;
+
+ /* Save Current */
+ rval = qla81xx_get_led_config(vha, orig_led_cfg);
+ /* Do the blink */
+ if (rval == QLA_SUCCESS) {
+ if (IS_QLA81XX(ha)) {
+ led_cfg[0] = 0x4000;
+ led_cfg[1] = 0x2000;
+ led_cfg[2] = 0;
+ led_cfg[3] = 0;
+ led_cfg[4] = 0;
+ led_cfg[5] = 0;
+ } else {
+ led_cfg[0] = 0x4000;
+ led_cfg[1] = 0x4000;
+ led_cfg[2] = 0x4000;
+ led_cfg[3] = 0x2000;
+ led_cfg[4] = 0;
+ led_cfg[5] = 0x2000;
+ }
+ rval = qla81xx_set_led_config(vha, led_cfg);
+ msleep(1000);
+ if (IS_QLA81XX(ha)) {
+ led_cfg[0] = 0x4000;
+ led_cfg[1] = 0x2000;
+ led_cfg[2] = 0;
+ } else {
+ led_cfg[0] = 0x4000;
+ led_cfg[1] = 0x2000;
+ led_cfg[2] = 0x4000;
+ led_cfg[3] = 0x4000;
+ led_cfg[4] = 0;
+ led_cfg[5] = 0x2000;
+ }
+ rval = qla81xx_set_led_config(vha, led_cfg);
+ }
+ /* On exit, restore original (presumes no status change) */
+ qla81xx_set_led_config(vha, orig_led_cfg);
+ }
+}
+
+int
+qla24xx_beacon_on(struct scsi_qla_host *vha)
+{
+ uint32_t gpio_data;
+ unsigned long flags;
+ struct qla_hw_data *ha = vha->hw;
+ struct device_reg_24xx __iomem *reg = &ha->iobase->isp24;
+
+ if (IS_P3P_TYPE(ha))
+ return QLA_SUCCESS;
+
+ if (IS_QLA8031(ha) || IS_QLA81XX(ha))
+ goto skip_gpio; /* let blink handle it */
+
+ if (ha->beacon_blink_led == 0) {
+ /* Enable firmware for update */
+ ha->fw_options[1] |= ADD_FO1_DISABLE_GPIO_LED_CTRL;
+
+ if (qla2x00_set_fw_options(vha, ha->fw_options) != QLA_SUCCESS)
+ return QLA_FUNCTION_FAILED;
+
+ if (qla2x00_get_fw_options(vha, ha->fw_options) !=
+ QLA_SUCCESS) {
+ ql_log(ql_log_warn, vha, 0x7009,
+ "Unable to update fw options (beacon on).\n");
+ return QLA_FUNCTION_FAILED;
+ }
+
+ if (IS_QLA2031(ha) || IS_QLA27XX(ha))
+ goto skip_gpio;
+
+ spin_lock_irqsave(&ha->hardware_lock, flags);
+ gpio_data = RD_REG_DWORD(&reg->gpiod);
+
+ /* Enable the gpio_data reg for update. */
+ gpio_data |= GPDX_LED_UPDATE_MASK;
+ WRT_REG_DWORD(&reg->gpiod, gpio_data);
+ RD_REG_DWORD(&reg->gpiod);
+
+ spin_unlock_irqrestore(&ha->hardware_lock, flags);
+ }
+
+ /* So all colors blink together. */
+ ha->beacon_color_state = 0;
+
+skip_gpio:
+ /* Let the per HBA timer kick off the blinking process. */
+ ha->beacon_blink_led = 1;
+
+ return QLA_SUCCESS;
+}
+
+int
+qla24xx_beacon_off(struct scsi_qla_host *vha)
+{
+ uint32_t gpio_data;
+ unsigned long flags;
+ struct qla_hw_data *ha = vha->hw;
+ struct device_reg_24xx __iomem *reg = &ha->iobase->isp24;
+
+ if (IS_P3P_TYPE(ha))
+ return QLA_SUCCESS;
+
+ ha->beacon_blink_led = 0;
+
+ if (IS_QLA2031(ha) || IS_QLA27XX(ha))
+ goto set_fw_options;
+
+ if (IS_QLA8031(ha) || IS_QLA81XX(ha))
+ return QLA_SUCCESS;
+
+ ha->beacon_color_state = QLA_LED_ALL_ON;
+
+ ha->isp_ops->beacon_blink(vha); /* Will flip to all off. */
+
+ /* Give control back to firmware. */
+ spin_lock_irqsave(&ha->hardware_lock, flags);
+ gpio_data = RD_REG_DWORD(&reg->gpiod);
+
+ /* Disable the gpio_data reg for update. */
+ gpio_data &= ~GPDX_LED_UPDATE_MASK;
+ WRT_REG_DWORD(&reg->gpiod, gpio_data);
+ RD_REG_DWORD(&reg->gpiod);
+ spin_unlock_irqrestore(&ha->hardware_lock, flags);
+
+set_fw_options:
+ ha->fw_options[1] &= ~ADD_FO1_DISABLE_GPIO_LED_CTRL;
+
+ if (qla2x00_set_fw_options(vha, ha->fw_options) != QLA_SUCCESS) {
+ ql_log(ql_log_warn, vha, 0x704d,
+ "Unable to update fw options (beacon on).\n");
+ return QLA_FUNCTION_FAILED;
+ }
+
+ if (qla2x00_get_fw_options(vha, ha->fw_options) != QLA_SUCCESS) {
+ ql_log(ql_log_warn, vha, 0x704e,
+ "Unable to update fw options (beacon on).\n");
+ return QLA_FUNCTION_FAILED;
+ }
+
+ return QLA_SUCCESS;
+}
+
+
+/*
+ * Flash support routines
+ */
+
+/**
+ * qla2x00_flash_enable() - Setup flash for reading and writing.
+ * @ha: HA context
+ */
+static void
+qla2x00_flash_enable(struct qla_hw_data *ha)
+{
+ uint16_t data;
+ struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
+
+ data = RD_REG_WORD(&reg->ctrl_status);
+ data |= CSR_FLASH_ENABLE;
+ WRT_REG_WORD(&reg->ctrl_status, data);
+ RD_REG_WORD(&reg->ctrl_status); /* PCI Posting. */
+}
+
+/**
+ * qla2x00_flash_disable() - Disable flash and allow RISC to run.
+ * @ha: HA context
+ */
+static void
+qla2x00_flash_disable(struct qla_hw_data *ha)
+{
+ uint16_t data;
+ struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
+
+ data = RD_REG_WORD(&reg->ctrl_status);
+ data &= ~(CSR_FLASH_ENABLE);
+ WRT_REG_WORD(&reg->ctrl_status, data);
+ RD_REG_WORD(&reg->ctrl_status); /* PCI Posting. */
+}
+
+/**
+ * qla2x00_read_flash_byte() - Reads a byte from flash
+ * @ha: HA context
+ * @addr: Address in flash to read
+ *
+ * A word is read from the chip, but, only the lower byte is valid.
+ *
+ * Returns the byte read from flash @addr.
+ */
+static uint8_t
+qla2x00_read_flash_byte(struct qla_hw_data *ha, uint32_t addr)
+{
+ uint16_t data;
+ uint16_t bank_select;
+ struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
+
+ bank_select = RD_REG_WORD(&reg->ctrl_status);
+
+ if (IS_QLA2322(ha) || IS_QLA6322(ha)) {
+ /* Specify 64K address range: */
+ /* clear out Module Select and Flash Address bits [19:16]. */
+ bank_select &= ~0xf8;
+ bank_select |= addr >> 12 & 0xf0;
+ bank_select |= CSR_FLASH_64K_BANK;
+ WRT_REG_WORD(&reg->ctrl_status, bank_select);
+ RD_REG_WORD(&reg->ctrl_status); /* PCI Posting. */
+
+ WRT_REG_WORD(&reg->flash_address, (uint16_t)addr);
+ data = RD_REG_WORD(&reg->flash_data);
+
+ return (uint8_t)data;
+ }
+
+ /* Setup bit 16 of flash address. */
+ if ((addr & BIT_16) && ((bank_select & CSR_FLASH_64K_BANK) == 0)) {
+ bank_select |= CSR_FLASH_64K_BANK;
+ WRT_REG_WORD(&reg->ctrl_status, bank_select);
+ RD_REG_WORD(&reg->ctrl_status); /* PCI Posting. */
+ } else if (((addr & BIT_16) == 0) &&
+ (bank_select & CSR_FLASH_64K_BANK)) {
+ bank_select &= ~(CSR_FLASH_64K_BANK);
+ WRT_REG_WORD(&reg->ctrl_status, bank_select);
+ RD_REG_WORD(&reg->ctrl_status); /* PCI Posting. */
+ }
+
+ /* Always perform IO mapped accesses to the FLASH registers. */
+ if (ha->pio_address) {
+ uint16_t data2;
+
+ WRT_REG_WORD_PIO(PIO_REG(ha, flash_address), (uint16_t)addr);
+ do {
+ data = RD_REG_WORD_PIO(PIO_REG(ha, flash_data));
+ barrier();
+ cpu_relax();
+ data2 = RD_REG_WORD_PIO(PIO_REG(ha, flash_data));
+ } while (data != data2);
+ } else {
+ WRT_REG_WORD(&reg->flash_address, (uint16_t)addr);
+ data = qla2x00_debounce_register(&reg->flash_data);
+ }
+
+ return (uint8_t)data;
+}
+
+/**
+ * qla2x00_write_flash_byte() - Write a byte to flash
+ * @ha: HA context
+ * @addr: Address in flash to write
+ * @data: Data to write
+ */
+static void
+qla2x00_write_flash_byte(struct qla_hw_data *ha, uint32_t addr, uint8_t data)
+{
+ uint16_t bank_select;
+ struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
+
+ bank_select = RD_REG_WORD(&reg->ctrl_status);
+ if (IS_QLA2322(ha) || IS_QLA6322(ha)) {
+ /* Specify 64K address range: */
+ /* clear out Module Select and Flash Address bits [19:16]. */
+ bank_select &= ~0xf8;
+ bank_select |= addr >> 12 & 0xf0;
+ bank_select |= CSR_FLASH_64K_BANK;
+ WRT_REG_WORD(&reg->ctrl_status, bank_select);
+ RD_REG_WORD(&reg->ctrl_status); /* PCI Posting. */
+
+ WRT_REG_WORD(&reg->flash_address, (uint16_t)addr);
+ RD_REG_WORD(&reg->ctrl_status); /* PCI Posting. */
+ WRT_REG_WORD(&reg->flash_data, (uint16_t)data);
+ RD_REG_WORD(&reg->ctrl_status); /* PCI Posting. */
+
+ return;
+ }
+
+ /* Setup bit 16 of flash address. */
+ if ((addr & BIT_16) && ((bank_select & CSR_FLASH_64K_BANK) == 0)) {
+ bank_select |= CSR_FLASH_64K_BANK;
+ WRT_REG_WORD(&reg->ctrl_status, bank_select);
+ RD_REG_WORD(&reg->ctrl_status); /* PCI Posting. */
+ } else if (((addr & BIT_16) == 0) &&
+ (bank_select & CSR_FLASH_64K_BANK)) {
+ bank_select &= ~(CSR_FLASH_64K_BANK);
+ WRT_REG_WORD(&reg->ctrl_status, bank_select);
+ RD_REG_WORD(&reg->ctrl_status); /* PCI Posting. */
+ }
+
+ /* Always perform IO mapped accesses to the FLASH registers. */
+ if (ha->pio_address) {
+ WRT_REG_WORD_PIO(PIO_REG(ha, flash_address), (uint16_t)addr);
+ WRT_REG_WORD_PIO(PIO_REG(ha, flash_data), (uint16_t)data);
+ } else {
+ WRT_REG_WORD(&reg->flash_address, (uint16_t)addr);
+ RD_REG_WORD(&reg->ctrl_status); /* PCI Posting. */
+ WRT_REG_WORD(&reg->flash_data, (uint16_t)data);
+ RD_REG_WORD(&reg->ctrl_status); /* PCI Posting. */
+ }
+}
+
+/**
+ * qla2x00_poll_flash() - Polls flash for completion.
+ * @ha: HA context
+ * @addr: Address in flash to poll
+ * @poll_data: Data to be polled
+ * @man_id: Flash manufacturer ID
+ * @flash_id: Flash ID
+ *
+ * This function polls the device until bit 7 of what is read matches data
+ * bit 7 or until data bit 5 becomes a 1. If that hapens, the flash ROM timed
+ * out (a fatal error). The flash book recommeds reading bit 7 again after
+ * reading bit 5 as a 1.
+ *
+ * Returns 0 on success, else non-zero.
+ */
+static int
+qla2x00_poll_flash(struct qla_hw_data *ha, uint32_t addr, uint8_t poll_data,
+ uint8_t man_id, uint8_t flash_id)
+{
+ int status;
+ uint8_t flash_data;
+ uint32_t cnt;
+
+ status = 1;
+
+ /* Wait for 30 seconds for command to finish. */
+ poll_data &= BIT_7;
+ for (cnt = 3000000; cnt; cnt--) {
+ flash_data = qla2x00_read_flash_byte(ha, addr);
+ if ((flash_data & BIT_7) == poll_data) {
+ status = 0;
+ break;
+ }
+
+ if (man_id != 0x40 && man_id != 0xda) {
+ if ((flash_data & BIT_5) && cnt > 2)
+ cnt = 2;
+ }
+ udelay(10);
+ barrier();
+ cond_resched();
+ }
+ return status;
+}
+
+/**
+ * qla2x00_program_flash_address() - Programs a flash address
+ * @ha: HA context
+ * @addr: Address in flash to program
+ * @data: Data to be written in flash
+ * @man_id: Flash manufacturer ID
+ * @flash_id: Flash ID
+ *
+ * Returns 0 on success, else non-zero.
+ */
+static int
+qla2x00_program_flash_address(struct qla_hw_data *ha, uint32_t addr,
+ uint8_t data, uint8_t man_id, uint8_t flash_id)
+{
+ /* Write Program Command Sequence. */
+ if (IS_OEM_001(ha)) {
+ qla2x00_write_flash_byte(ha, 0xaaa, 0xaa);
+ qla2x00_write_flash_byte(ha, 0x555, 0x55);
+ qla2x00_write_flash_byte(ha, 0xaaa, 0xa0);
+ qla2x00_write_flash_byte(ha, addr, data);
+ } else {
+ if (man_id == 0xda && flash_id == 0xc1) {
+ qla2x00_write_flash_byte(ha, addr, data);
+ if (addr & 0x7e)
+ return 0;
+ } else {
+ qla2x00_write_flash_byte(ha, 0x5555, 0xaa);
+ qla2x00_write_flash_byte(ha, 0x2aaa, 0x55);
+ qla2x00_write_flash_byte(ha, 0x5555, 0xa0);
+ qla2x00_write_flash_byte(ha, addr, data);
+ }
+ }
+
+ udelay(150);
+
+ /* Wait for write to complete. */
+ return qla2x00_poll_flash(ha, addr, data, man_id, flash_id);
+}
+
+/**
+ * qla2x00_erase_flash() - Erase the flash.
+ * @ha: HA context
+ * @man_id: Flash manufacturer ID
+ * @flash_id: Flash ID
+ *
+ * Returns 0 on success, else non-zero.
+ */
+static int
+qla2x00_erase_flash(struct qla_hw_data *ha, uint8_t man_id, uint8_t flash_id)
+{
+ /* Individual Sector Erase Command Sequence */
+ if (IS_OEM_001(ha)) {
+ qla2x00_write_flash_byte(ha, 0xaaa, 0xaa);
+ qla2x00_write_flash_byte(ha, 0x555, 0x55);
+ qla2x00_write_flash_byte(ha, 0xaaa, 0x80);
+ qla2x00_write_flash_byte(ha, 0xaaa, 0xaa);
+ qla2x00_write_flash_byte(ha, 0x555, 0x55);
+ qla2x00_write_flash_byte(ha, 0xaaa, 0x10);
+ } else {
+ qla2x00_write_flash_byte(ha, 0x5555, 0xaa);
+ qla2x00_write_flash_byte(ha, 0x2aaa, 0x55);
+ qla2x00_write_flash_byte(ha, 0x5555, 0x80);
+ qla2x00_write_flash_byte(ha, 0x5555, 0xaa);
+ qla2x00_write_flash_byte(ha, 0x2aaa, 0x55);
+ qla2x00_write_flash_byte(ha, 0x5555, 0x10);
+ }
+
+ udelay(150);
+
+ /* Wait for erase to complete. */
+ return qla2x00_poll_flash(ha, 0x00, 0x80, man_id, flash_id);
+}
+
+/**
+ * qla2x00_erase_flash_sector() - Erase a flash sector.
+ * @ha: HA context
+ * @addr: Flash sector to erase
+ * @sec_mask: Sector address mask
+ * @man_id: Flash manufacturer ID
+ * @flash_id: Flash ID
+ *
+ * Returns 0 on success, else non-zero.
+ */
+static int
+qla2x00_erase_flash_sector(struct qla_hw_data *ha, uint32_t addr,
+ uint32_t sec_mask, uint8_t man_id, uint8_t flash_id)
+{
+ /* Individual Sector Erase Command Sequence */
+ qla2x00_write_flash_byte(ha, 0x5555, 0xaa);
+ qla2x00_write_flash_byte(ha, 0x2aaa, 0x55);
+ qla2x00_write_flash_byte(ha, 0x5555, 0x80);
+ qla2x00_write_flash_byte(ha, 0x5555, 0xaa);
+ qla2x00_write_flash_byte(ha, 0x2aaa, 0x55);
+ if (man_id == 0x1f && flash_id == 0x13)
+ qla2x00_write_flash_byte(ha, addr & sec_mask, 0x10);
+ else
+ qla2x00_write_flash_byte(ha, addr & sec_mask, 0x30);
+
+ udelay(150);
+
+ /* Wait for erase to complete. */
+ return qla2x00_poll_flash(ha, addr, 0x80, man_id, flash_id);
+}
+
+/**
+ * qla2x00_get_flash_manufacturer() - Read manufacturer ID from flash chip.
+ * @man_id: Flash manufacturer ID
+ * @flash_id: Flash ID
+ */
+static void
+qla2x00_get_flash_manufacturer(struct qla_hw_data *ha, uint8_t *man_id,
+ uint8_t *flash_id)
+{
+ qla2x00_write_flash_byte(ha, 0x5555, 0xaa);
+ qla2x00_write_flash_byte(ha, 0x2aaa, 0x55);
+ qla2x00_write_flash_byte(ha, 0x5555, 0x90);
+ *man_id = qla2x00_read_flash_byte(ha, 0x0000);
+ *flash_id = qla2x00_read_flash_byte(ha, 0x0001);
+ qla2x00_write_flash_byte(ha, 0x5555, 0xaa);
+ qla2x00_write_flash_byte(ha, 0x2aaa, 0x55);
+ qla2x00_write_flash_byte(ha, 0x5555, 0xf0);
+}
+
+static void
+qla2x00_read_flash_data(struct qla_hw_data *ha, uint8_t *tmp_buf,
+ uint32_t saddr, uint32_t length)
+{
+ struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
+ uint32_t midpoint, ilength;
+ uint8_t data;
+
+ midpoint = length / 2;
+
+ WRT_REG_WORD(&reg->nvram, 0);
+ RD_REG_WORD(&reg->nvram);
+ for (ilength = 0; ilength < length; saddr++, ilength++, tmp_buf++) {
+ if (ilength == midpoint) {
+ WRT_REG_WORD(&reg->nvram, NVR_SELECT);
+ RD_REG_WORD(&reg->nvram);
+ }
+ data = qla2x00_read_flash_byte(ha, saddr);
+ if (saddr % 100)
+ udelay(10);
+ *tmp_buf = data;
+ cond_resched();
+ }
+}
+
+static inline void
+qla2x00_suspend_hba(struct scsi_qla_host *vha)
+{
+ int cnt;
+ unsigned long flags;
+ struct qla_hw_data *ha = vha->hw;
+ struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
+
+ /* Suspend HBA. */
+ scsi_block_requests(vha->host);
+ ha->isp_ops->disable_intrs(ha);
+ set_bit(MBX_UPDATE_FLASH_ACTIVE, &ha->mbx_cmd_flags);
+
+ /* Pause RISC. */
+ spin_lock_irqsave(&ha->hardware_lock, flags);
+ WRT_REG_WORD(&reg->hccr, HCCR_PAUSE_RISC);
+ RD_REG_WORD(&reg->hccr);
+ if (IS_QLA2100(ha) || IS_QLA2200(ha) || IS_QLA2300(ha)) {
+ for (cnt = 0; cnt < 30000; cnt++) {
+ if ((RD_REG_WORD(&reg->hccr) & HCCR_RISC_PAUSE) != 0)
+ break;
+ udelay(100);
+ }
+ } else {
+ udelay(10);
+ }
+ spin_unlock_irqrestore(&ha->hardware_lock, flags);
+}
+
+static inline void
+qla2x00_resume_hba(struct scsi_qla_host *vha)
+{
+ struct qla_hw_data *ha = vha->hw;
+
+ /* Resume HBA. */
+ clear_bit(MBX_UPDATE_FLASH_ACTIVE, &ha->mbx_cmd_flags);
+ set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
+ qla2xxx_wake_dpc(vha);
+ qla2x00_wait_for_chip_reset(vha);
+ scsi_unblock_requests(vha->host);
+}
+
+uint8_t *
+qla2x00_read_optrom_data(struct scsi_qla_host *vha, uint8_t *buf,
+ uint32_t offset, uint32_t length)
+{
+ uint32_t addr, midpoint;
+ uint8_t *data;
+ struct qla_hw_data *ha = vha->hw;
+ struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
+
+ /* Suspend HBA. */
+ qla2x00_suspend_hba(vha);
+
+ /* Go with read. */
+ midpoint = ha->optrom_size / 2;
+
+ qla2x00_flash_enable(ha);
+ WRT_REG_WORD(&reg->nvram, 0);
+ RD_REG_WORD(&reg->nvram); /* PCI Posting. */
+ for (addr = offset, data = buf; addr < length; addr++, data++) {
+ if (addr == midpoint) {
+ WRT_REG_WORD(&reg->nvram, NVR_SELECT);
+ RD_REG_WORD(&reg->nvram); /* PCI Posting. */
+ }
+
+ *data = qla2x00_read_flash_byte(ha, addr);
+ }
+ qla2x00_flash_disable(ha);
+
+ /* Resume HBA. */
+ qla2x00_resume_hba(vha);
+
+ return buf;
+}
+
+int
+qla2x00_write_optrom_data(struct scsi_qla_host *vha, uint8_t *buf,
+ uint32_t offset, uint32_t length)
+{
+
+ int rval;
+ uint8_t man_id, flash_id, sec_number, data;
+ uint16_t wd;
+ uint32_t addr, liter, sec_mask, rest_addr;
+ struct qla_hw_data *ha = vha->hw;
+ struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
+
+ /* Suspend HBA. */
+ qla2x00_suspend_hba(vha);
+
+ rval = QLA_SUCCESS;
+ sec_number = 0;
+
+ /* Reset ISP chip. */
+ WRT_REG_WORD(&reg->ctrl_status, CSR_ISP_SOFT_RESET);
+ pci_read_config_word(ha->pdev, PCI_COMMAND, &wd);
+
+ /* Go with write. */
+ qla2x00_flash_enable(ha);
+ do { /* Loop once to provide quick error exit */
+ /* Structure of flash memory based on manufacturer */
+ if (IS_OEM_001(ha)) {
+ /* OEM variant with special flash part. */
+ man_id = flash_id = 0;
+ rest_addr = 0xffff;
+ sec_mask = 0x10000;
+ goto update_flash;
+ }
+ qla2x00_get_flash_manufacturer(ha, &man_id, &flash_id);
+ switch (man_id) {
+ case 0x20: /* ST flash. */
+ if (flash_id == 0xd2 || flash_id == 0xe3) {
+ /*
+ * ST m29w008at part - 64kb sector size with
+ * 32kb,8kb,8kb,16kb sectors at memory address
+ * 0xf0000.
+ */
+ rest_addr = 0xffff;
+ sec_mask = 0x10000;
+ break;
+ }
+ /*
+ * ST m29w010b part - 16kb sector size
+ * Default to 16kb sectors
+ */
+ rest_addr = 0x3fff;
+ sec_mask = 0x1c000;
+ break;
+ case 0x40: /* Mostel flash. */
+ /* Mostel v29c51001 part - 512 byte sector size. */
+ rest_addr = 0x1ff;
+ sec_mask = 0x1fe00;
+ break;
+ case 0xbf: /* SST flash. */
+ /* SST39sf10 part - 4kb sector size. */
+ rest_addr = 0xfff;
+ sec_mask = 0x1f000;
+ break;
+ case 0xda: /* Winbond flash. */
+ /* Winbond W29EE011 part - 256 byte sector size. */
+ rest_addr = 0x7f;
+ sec_mask = 0x1ff80;
+ break;
+ case 0xc2: /* Macronix flash. */
+ /* 64k sector size. */
+ if (flash_id == 0x38 || flash_id == 0x4f) {
+ rest_addr = 0xffff;
+ sec_mask = 0x10000;
+ break;
+ }
+ /* Fall through... */
+
+ case 0x1f: /* Atmel flash. */
+ /* 512k sector size. */
+ if (flash_id == 0x13) {
+ rest_addr = 0x7fffffff;
+ sec_mask = 0x80000000;
+ break;
+ }
+ /* Fall through... */
+
+ case 0x01: /* AMD flash. */
+ if (flash_id == 0x38 || flash_id == 0x40 ||
+ flash_id == 0x4f) {
+ /* Am29LV081 part - 64kb sector size. */
+ /* Am29LV002BT part - 64kb sector size. */
+ rest_addr = 0xffff;
+ sec_mask = 0x10000;
+ break;
+ } else if (flash_id == 0x3e) {
+ /*
+ * Am29LV008b part - 64kb sector size with
+ * 32kb,8kb,8kb,16kb sector at memory address
+ * h0xf0000.
+ */
+ rest_addr = 0xffff;
+ sec_mask = 0x10000;
+ break;
+ } else if (flash_id == 0x20 || flash_id == 0x6e) {
+ /*
+ * Am29LV010 part or AM29f010 - 16kb sector
+ * size.
+ */
+ rest_addr = 0x3fff;
+ sec_mask = 0x1c000;
+ break;
+ } else if (flash_id == 0x6d) {
+ /* Am29LV001 part - 8kb sector size. */
+ rest_addr = 0x1fff;
+ sec_mask = 0x1e000;
+ break;
+ }
+ default:
+ /* Default to 16 kb sector size. */
+ rest_addr = 0x3fff;
+ sec_mask = 0x1c000;
+ break;
+ }
+
+update_flash:
+ if (IS_QLA2322(ha) || IS_QLA6322(ha)) {
+ if (qla2x00_erase_flash(ha, man_id, flash_id)) {
+ rval = QLA_FUNCTION_FAILED;
+ break;
+ }
+ }
+
+ for (addr = offset, liter = 0; liter < length; liter++,
+ addr++) {
+ data = buf[liter];
+ /* Are we at the beginning of a sector? */
+ if ((addr & rest_addr) == 0) {
+ if (IS_QLA2322(ha) || IS_QLA6322(ha)) {
+ if (addr >= 0x10000UL) {
+ if (((addr >> 12) & 0xf0) &&
+ ((man_id == 0x01 &&
+ flash_id == 0x3e) ||
+ (man_id == 0x20 &&
+ flash_id == 0xd2))) {
+ sec_number++;
+ if (sec_number == 1) {
+ rest_addr =
+ 0x7fff;
+ sec_mask =
+ 0x18000;
+ } else if (
+ sec_number == 2 ||
+ sec_number == 3) {
+ rest_addr =
+ 0x1fff;
+ sec_mask =
+ 0x1e000;
+ } else if (
+ sec_number == 4) {
+ rest_addr =
+ 0x3fff;
+ sec_mask =
+ 0x1c000;
+ }
+ }
+ }
+ } else if (addr == ha->optrom_size / 2) {
+ WRT_REG_WORD(&reg->nvram, NVR_SELECT);
+ RD_REG_WORD(&reg->nvram);
+ }
+
+ if (flash_id == 0xda && man_id == 0xc1) {
+ qla2x00_write_flash_byte(ha, 0x5555,
+ 0xaa);
+ qla2x00_write_flash_byte(ha, 0x2aaa,
+ 0x55);
+ qla2x00_write_flash_byte(ha, 0x5555,
+ 0xa0);
+ } else if (!IS_QLA2322(ha) && !IS_QLA6322(ha)) {
+ /* Then erase it */
+ if (qla2x00_erase_flash_sector(ha,
+ addr, sec_mask, man_id,
+ flash_id)) {
+ rval = QLA_FUNCTION_FAILED;
+ break;
+ }
+ if (man_id == 0x01 && flash_id == 0x6d)
+ sec_number++;
+ }
+ }
+
+ if (man_id == 0x01 && flash_id == 0x6d) {
+ if (sec_number == 1 &&
+ addr == (rest_addr - 1)) {
+ rest_addr = 0x0fff;
+ sec_mask = 0x1f000;
+ } else if (sec_number == 3 && (addr & 0x7ffe)) {
+ rest_addr = 0x3fff;
+ sec_mask = 0x1c000;
+ }
+ }
+
+ if (qla2x00_program_flash_address(ha, addr, data,
+ man_id, flash_id)) {
+ rval = QLA_FUNCTION_FAILED;
+ break;
+ }
+ cond_resched();
+ }
+ } while (0);
+ qla2x00_flash_disable(ha);
+
+ /* Resume HBA. */
+ qla2x00_resume_hba(vha);
+
+ return rval;
+}
+
+uint8_t *
+qla24xx_read_optrom_data(struct scsi_qla_host *vha, uint8_t *buf,
+ uint32_t offset, uint32_t length)
+{
+ struct qla_hw_data *ha = vha->hw;
+
+ /* Suspend HBA. */
+ scsi_block_requests(vha->host);
+ set_bit(MBX_UPDATE_FLASH_ACTIVE, &ha->mbx_cmd_flags);
+
+ /* Go with read. */
+ qla24xx_read_flash_data(vha, (uint32_t *)buf, offset >> 2, length >> 2);
+
+ /* Resume HBA. */
+ clear_bit(MBX_UPDATE_FLASH_ACTIVE, &ha->mbx_cmd_flags);
+ scsi_unblock_requests(vha->host);
+
+ return buf;
+}
+
+int
+qla24xx_write_optrom_data(struct scsi_qla_host *vha, uint8_t *buf,
+ uint32_t offset, uint32_t length)
+{
+ int rval;
+ struct qla_hw_data *ha = vha->hw;
+
+ /* Suspend HBA. */
+ scsi_block_requests(vha->host);
+ set_bit(MBX_UPDATE_FLASH_ACTIVE, &ha->mbx_cmd_flags);
+
+ /* Go with write. */
+ rval = qla24xx_write_flash_data(vha, (uint32_t *)buf, offset >> 2,
+ length >> 2);
+
+ clear_bit(MBX_UPDATE_FLASH_ACTIVE, &ha->mbx_cmd_flags);
+ scsi_unblock_requests(vha->host);
+
+ return rval;
+}
+
+uint8_t *
+qla25xx_read_optrom_data(struct scsi_qla_host *vha, uint8_t *buf,
+ uint32_t offset, uint32_t length)
+{
+ int rval;
+ dma_addr_t optrom_dma;
+ void *optrom;
+ uint8_t *pbuf;
+ uint32_t faddr, left, burst;
+ struct qla_hw_data *ha = vha->hw;
+
+ if (IS_QLA25XX(ha) || IS_QLA81XX(ha) || IS_QLA83XX(ha) ||
+ IS_QLA27XX(ha))
+ goto try_fast;
+ if (offset & 0xfff)
+ goto slow_read;
+ if (length < OPTROM_BURST_SIZE)
+ goto slow_read;
+
+try_fast:
+ optrom = dma_alloc_coherent(&ha->pdev->dev, OPTROM_BURST_SIZE,
+ &optrom_dma, GFP_KERNEL);
+ if (!optrom) {
+ ql_log(ql_log_warn, vha, 0x00cc,
+ "Unable to allocate memory for optrom burst read (%x KB).\n",
+ OPTROM_BURST_SIZE / 1024);
+ goto slow_read;
+ }
+
+ pbuf = buf;
+ faddr = offset >> 2;
+ left = length >> 2;
+ burst = OPTROM_BURST_DWORDS;
+ while (left != 0) {
+ if (burst > left)
+ burst = left;
+
+ rval = qla2x00_dump_ram(vha, optrom_dma,
+ flash_data_addr(ha, faddr), burst);
+ if (rval) {
+ ql_log(ql_log_warn, vha, 0x00f5,
+ "Unable to burst-read optrom segment (%x/%x/%llx).\n",
+ rval, flash_data_addr(ha, faddr),
+ (unsigned long long)optrom_dma);
+ ql_log(ql_log_warn, vha, 0x00f6,
+ "Reverting to slow-read.\n");
+
+ dma_free_coherent(&ha->pdev->dev, OPTROM_BURST_SIZE,
+ optrom, optrom_dma);
+ goto slow_read;
+ }
+
+ memcpy(pbuf, optrom, burst * 4);
+
+ left -= burst;
+ faddr += burst;
+ pbuf += burst * 4;
+ }
+
+ dma_free_coherent(&ha->pdev->dev, OPTROM_BURST_SIZE, optrom,
+ optrom_dma);
+
+ return buf;
+
+slow_read:
+ return qla24xx_read_optrom_data(vha, buf, offset, length);
+}
+
+/**
+ * qla2x00_get_fcode_version() - Determine an FCODE image's version.
+ * @ha: HA context
+ * @pcids: Pointer to the FCODE PCI data structure
+ *
+ * The process of retrieving the FCODE version information is at best
+ * described as interesting.
+ *
+ * Within the first 100h bytes of the image an ASCII string is present
+ * which contains several pieces of information including the FCODE
+ * version. Unfortunately it seems the only reliable way to retrieve
+ * the version is by scanning for another sentinel within the string,
+ * the FCODE build date:
+ *
+ * ... 2.00.02 10/17/02 ...
+ *
+ * Returns QLA_SUCCESS on successful retrieval of version.
+ */
+static void
+qla2x00_get_fcode_version(struct qla_hw_data *ha, uint32_t pcids)
+{
+ int ret = QLA_FUNCTION_FAILED;
+ uint32_t istart, iend, iter, vend;
+ uint8_t do_next, rbyte, *vbyte;
+
+ memset(ha->fcode_revision, 0, sizeof(ha->fcode_revision));
+
+ /* Skip the PCI data structure. */
+ istart = pcids +
+ ((qla2x00_read_flash_byte(ha, pcids + 0x0B) << 8) |
+ qla2x00_read_flash_byte(ha, pcids + 0x0A));
+ iend = istart + 0x100;
+ do {
+ /* Scan for the sentinel date string...eeewww. */
+ do_next = 0;
+ iter = istart;
+ while ((iter < iend) && !do_next) {
+ iter++;
+ if (qla2x00_read_flash_byte(ha, iter) == '/') {
+ if (qla2x00_read_flash_byte(ha, iter + 2) ==
+ '/')
+ do_next++;
+ else if (qla2x00_read_flash_byte(ha,
+ iter + 3) == '/')
+ do_next++;
+ }
+ }
+ if (!do_next)
+ break;
+
+ /* Backtrack to previous ' ' (space). */
+ do_next = 0;
+ while ((iter > istart) && !do_next) {
+ iter--;
+ if (qla2x00_read_flash_byte(ha, iter) == ' ')
+ do_next++;
+ }
+ if (!do_next)
+ break;
+
+ /*
+ * Mark end of version tag, and find previous ' ' (space) or
+ * string length (recent FCODE images -- major hack ahead!!!).
+ */
+ vend = iter - 1;
+ do_next = 0;
+ while ((iter > istart) && !do_next) {
+ iter--;
+ rbyte = qla2x00_read_flash_byte(ha, iter);
+ if (rbyte == ' ' || rbyte == 0xd || rbyte == 0x10)
+ do_next++;
+ }
+ if (!do_next)
+ break;
+
+ /* Mark beginning of version tag, and copy data. */
+ iter++;
+ if ((vend - iter) &&
+ ((vend - iter) < sizeof(ha->fcode_revision))) {
+ vbyte = ha->fcode_revision;
+ while (iter <= vend) {
+ *vbyte++ = qla2x00_read_flash_byte(ha, iter);
+ iter++;
+ }
+ ret = QLA_SUCCESS;
+ }
+ } while (0);
+
+ if (ret != QLA_SUCCESS)
+ memset(ha->fcode_revision, 0, sizeof(ha->fcode_revision));
+}
+
+int
+qla2x00_get_flash_version(scsi_qla_host_t *vha, void *mbuf)
+{
+ int ret = QLA_SUCCESS;
+ uint8_t code_type, last_image;
+ uint32_t pcihdr, pcids;
+ uint8_t *dbyte;
+ uint16_t *dcode;
+ struct qla_hw_data *ha = vha->hw;
+
+ if (!ha->pio_address || !mbuf)
+ return QLA_FUNCTION_FAILED;
+
+ memset(ha->bios_revision, 0, sizeof(ha->bios_revision));
+ memset(ha->efi_revision, 0, sizeof(ha->efi_revision));
+ memset(ha->fcode_revision, 0, sizeof(ha->fcode_revision));
+ memset(ha->fw_revision, 0, sizeof(ha->fw_revision));
+
+ qla2x00_flash_enable(ha);
+
+ /* Begin with first PCI expansion ROM header. */
+ pcihdr = 0;
+ last_image = 1;
+ do {
+ /* Verify PCI expansion ROM header. */
+ if (qla2x00_read_flash_byte(ha, pcihdr) != 0x55 ||
+ qla2x00_read_flash_byte(ha, pcihdr + 0x01) != 0xaa) {
+ /* No signature */
+ ql_log(ql_log_fatal, vha, 0x0050,
+ "No matching ROM signature.\n");
+ ret = QLA_FUNCTION_FAILED;
+ break;
+ }
+
+ /* Locate PCI data structure. */
+ pcids = pcihdr +
+ ((qla2x00_read_flash_byte(ha, pcihdr + 0x19) << 8) |
+ qla2x00_read_flash_byte(ha, pcihdr + 0x18));
+
+ /* Validate signature of PCI data structure. */
+ if (qla2x00_read_flash_byte(ha, pcids) != 'P' ||
+ qla2x00_read_flash_byte(ha, pcids + 0x1) != 'C' ||
+ qla2x00_read_flash_byte(ha, pcids + 0x2) != 'I' ||
+ qla2x00_read_flash_byte(ha, pcids + 0x3) != 'R') {
+ /* Incorrect header. */
+ ql_log(ql_log_fatal, vha, 0x0051,
+ "PCI data struct not found pcir_adr=%x.\n", pcids);
+ ret = QLA_FUNCTION_FAILED;
+ break;
+ }
+
+ /* Read version */
+ code_type = qla2x00_read_flash_byte(ha, pcids + 0x14);
+ switch (code_type) {
+ case ROM_CODE_TYPE_BIOS:
+ /* Intel x86, PC-AT compatible. */
+ ha->bios_revision[0] =
+ qla2x00_read_flash_byte(ha, pcids + 0x12);
+ ha->bios_revision[1] =
+ qla2x00_read_flash_byte(ha, pcids + 0x13);
+ ql_dbg(ql_dbg_init, vha, 0x0052,
+ "Read BIOS %d.%d.\n",
+ ha->bios_revision[1], ha->bios_revision[0]);
+ break;
+ case ROM_CODE_TYPE_FCODE:
+ /* Open Firmware standard for PCI (FCode). */
+ /* Eeeewww... */
+ qla2x00_get_fcode_version(ha, pcids);
+ break;
+ case ROM_CODE_TYPE_EFI:
+ /* Extensible Firmware Interface (EFI). */
+ ha->efi_revision[0] =
+ qla2x00_read_flash_byte(ha, pcids + 0x12);
+ ha->efi_revision[1] =
+ qla2x00_read_flash_byte(ha, pcids + 0x13);
+ ql_dbg(ql_dbg_init, vha, 0x0053,
+ "Read EFI %d.%d.\n",
+ ha->efi_revision[1], ha->efi_revision[0]);
+ break;
+ default:
+ ql_log(ql_log_warn, vha, 0x0054,
+ "Unrecognized code type %x at pcids %x.\n",
+ code_type, pcids);
+ break;
+ }
+
+ last_image = qla2x00_read_flash_byte(ha, pcids + 0x15) & BIT_7;
+
+ /* Locate next PCI expansion ROM. */
+ pcihdr += ((qla2x00_read_flash_byte(ha, pcids + 0x11) << 8) |
+ qla2x00_read_flash_byte(ha, pcids + 0x10)) * 512;
+ } while (!last_image);
+
+ if (IS_QLA2322(ha)) {
+ /* Read firmware image information. */
+ memset(ha->fw_revision, 0, sizeof(ha->fw_revision));
+ dbyte = mbuf;
+ memset(dbyte, 0, 8);
+ dcode = (uint16_t *)dbyte;
+
+ qla2x00_read_flash_data(ha, dbyte, ha->flt_region_fw * 4 + 10,
+ 8);
+ ql_dbg(ql_dbg_init + ql_dbg_buffer, vha, 0x010a,
+ "Dumping fw "
+ "ver from flash:.\n");
+ ql_dump_buffer(ql_dbg_init + ql_dbg_buffer, vha, 0x010b,
+ (uint8_t *)dbyte, 8);
+
+ if ((dcode[0] == 0xffff && dcode[1] == 0xffff &&
+ dcode[2] == 0xffff && dcode[3] == 0xffff) ||
+ (dcode[0] == 0 && dcode[1] == 0 && dcode[2] == 0 &&
+ dcode[3] == 0)) {
+ ql_log(ql_log_warn, vha, 0x0057,
+ "Unrecognized fw revision at %x.\n",
+ ha->flt_region_fw * 4);
+ } else {
+ /* values are in big endian */
+ ha->fw_revision[0] = dbyte[0] << 16 | dbyte[1];
+ ha->fw_revision[1] = dbyte[2] << 16 | dbyte[3];
+ ha->fw_revision[2] = dbyte[4] << 16 | dbyte[5];
+ ql_dbg(ql_dbg_init, vha, 0x0058,
+ "FW Version: "
+ "%d.%d.%d.\n", ha->fw_revision[0],
+ ha->fw_revision[1], ha->fw_revision[2]);
+ }
+ }
+
+ qla2x00_flash_disable(ha);
+
+ return ret;
+}
+
+int
+qla82xx_get_flash_version(scsi_qla_host_t *vha, void *mbuf)
+{
+ int ret = QLA_SUCCESS;
+ uint32_t pcihdr, pcids;
+ uint32_t *dcode;
+ uint8_t *bcode;
+ uint8_t code_type, last_image;
+ struct qla_hw_data *ha = vha->hw;
+
+ if (!mbuf)
+ return QLA_FUNCTION_FAILED;
+
+ memset(ha->bios_revision, 0, sizeof(ha->bios_revision));
+ memset(ha->efi_revision, 0, sizeof(ha->efi_revision));
+ memset(ha->fcode_revision, 0, sizeof(ha->fcode_revision));
+ memset(ha->fw_revision, 0, sizeof(ha->fw_revision));
+
+ dcode = mbuf;
+
+ /* Begin with first PCI expansion ROM header. */
+ pcihdr = ha->flt_region_boot << 2;
+ last_image = 1;
+ do {
+ /* Verify PCI expansion ROM header. */
+ ha->isp_ops->read_optrom(vha, (uint8_t *)dcode, pcihdr,
+ 0x20 * 4);
+ bcode = mbuf + (pcihdr % 4);
+ if (bcode[0x0] != 0x55 || bcode[0x1] != 0xaa) {
+ /* No signature */
+ ql_log(ql_log_fatal, vha, 0x0154,
+ "No matching ROM signature.\n");
+ ret = QLA_FUNCTION_FAILED;
+ break;
+ }
+
+ /* Locate PCI data structure. */
+ pcids = pcihdr + ((bcode[0x19] << 8) | bcode[0x18]);
+
+ ha->isp_ops->read_optrom(vha, (uint8_t *)dcode, pcids,
+ 0x20 * 4);
+ bcode = mbuf + (pcihdr % 4);
+
+ /* Validate signature of PCI data structure. */
+ if (bcode[0x0] != 'P' || bcode[0x1] != 'C' ||
+ bcode[0x2] != 'I' || bcode[0x3] != 'R') {
+ /* Incorrect header. */
+ ql_log(ql_log_fatal, vha, 0x0155,
+ "PCI data struct not found pcir_adr=%x.\n", pcids);
+ ret = QLA_FUNCTION_FAILED;
+ break;
+ }
+
+ /* Read version */
+ code_type = bcode[0x14];
+ switch (code_type) {
+ case ROM_CODE_TYPE_BIOS:
+ /* Intel x86, PC-AT compatible. */
+ ha->bios_revision[0] = bcode[0x12];
+ ha->bios_revision[1] = bcode[0x13];
+ ql_dbg(ql_dbg_init, vha, 0x0156,
+ "Read BIOS %d.%d.\n",
+ ha->bios_revision[1], ha->bios_revision[0]);
+ break;
+ case ROM_CODE_TYPE_FCODE:
+ /* Open Firmware standard for PCI (FCode). */
+ ha->fcode_revision[0] = bcode[0x12];
+ ha->fcode_revision[1] = bcode[0x13];
+ ql_dbg(ql_dbg_init, vha, 0x0157,
+ "Read FCODE %d.%d.\n",
+ ha->fcode_revision[1], ha->fcode_revision[0]);
+ break;
+ case ROM_CODE_TYPE_EFI:
+ /* Extensible Firmware Interface (EFI). */
+ ha->efi_revision[0] = bcode[0x12];
+ ha->efi_revision[1] = bcode[0x13];
+ ql_dbg(ql_dbg_init, vha, 0x0158,
+ "Read EFI %d.%d.\n",
+ ha->efi_revision[1], ha->efi_revision[0]);
+ break;
+ default:
+ ql_log(ql_log_warn, vha, 0x0159,
+ "Unrecognized code type %x at pcids %x.\n",
+ code_type, pcids);
+ break;
+ }
+
+ last_image = bcode[0x15] & BIT_7;
+
+ /* Locate next PCI expansion ROM. */
+ pcihdr += ((bcode[0x11] << 8) | bcode[0x10]) * 512;
+ } while (!last_image);
+
+ /* Read firmware image information. */
+ memset(ha->fw_revision, 0, sizeof(ha->fw_revision));
+ dcode = mbuf;
+ ha->isp_ops->read_optrom(vha, (uint8_t *)dcode, ha->flt_region_fw << 2,
+ 0x20);
+ bcode = mbuf + (pcihdr % 4);
+
+ /* Validate signature of PCI data structure. */
+ if (bcode[0x0] == 0x3 && bcode[0x1] == 0x0 &&
+ bcode[0x2] == 0x40 && bcode[0x3] == 0x40) {
+ ha->fw_revision[0] = bcode[0x4];
+ ha->fw_revision[1] = bcode[0x5];
+ ha->fw_revision[2] = bcode[0x6];
+ ql_dbg(ql_dbg_init, vha, 0x0153,
+ "Firmware revision %d.%d.%d\n",
+ ha->fw_revision[0], ha->fw_revision[1],
+ ha->fw_revision[2]);
+ }
+
+ return ret;
+}
+
+int
+qla24xx_get_flash_version(scsi_qla_host_t *vha, void *mbuf)
+{
+ int ret = QLA_SUCCESS;
+ uint32_t pcihdr, pcids;
+ uint32_t *dcode;
+ uint8_t *bcode;
+ uint8_t code_type, last_image;
+ int i;
+ struct qla_hw_data *ha = vha->hw;
+
+ if (IS_P3P_TYPE(ha))
+ return ret;
+
+ if (!mbuf)
+ return QLA_FUNCTION_FAILED;
+
+ memset(ha->bios_revision, 0, sizeof(ha->bios_revision));
+ memset(ha->efi_revision, 0, sizeof(ha->efi_revision));
+ memset(ha->fcode_revision, 0, sizeof(ha->fcode_revision));
+ memset(ha->fw_revision, 0, sizeof(ha->fw_revision));
+
+ dcode = mbuf;
+
+ /* Begin with first PCI expansion ROM header. */
+ pcihdr = ha->flt_region_boot << 2;
+ last_image = 1;
+ do {
+ /* Verify PCI expansion ROM header. */
+ qla24xx_read_flash_data(vha, dcode, pcihdr >> 2, 0x20);
+ bcode = mbuf + (pcihdr % 4);
+ if (bcode[0x0] != 0x55 || bcode[0x1] != 0xaa) {
+ /* No signature */
+ ql_log(ql_log_fatal, vha, 0x0059,
+ "No matching ROM signature.\n");
+ ret = QLA_FUNCTION_FAILED;
+ break;
+ }
+
+ /* Locate PCI data structure. */
+ pcids = pcihdr + ((bcode[0x19] << 8) | bcode[0x18]);
+
+ qla24xx_read_flash_data(vha, dcode, pcids >> 2, 0x20);
+ bcode = mbuf + (pcihdr % 4);
+
+ /* Validate signature of PCI data structure. */
+ if (bcode[0x0] != 'P' || bcode[0x1] != 'C' ||
+ bcode[0x2] != 'I' || bcode[0x3] != 'R') {
+ /* Incorrect header. */
+ ql_log(ql_log_fatal, vha, 0x005a,
+ "PCI data struct not found pcir_adr=%x.\n", pcids);
+ ret = QLA_FUNCTION_FAILED;
+ break;
+ }
+
+ /* Read version */
+ code_type = bcode[0x14];
+ switch (code_type) {
+ case ROM_CODE_TYPE_BIOS:
+ /* Intel x86, PC-AT compatible. */
+ ha->bios_revision[0] = bcode[0x12];
+ ha->bios_revision[1] = bcode[0x13];
+ ql_dbg(ql_dbg_init, vha, 0x005b,
+ "Read BIOS %d.%d.\n",
+ ha->bios_revision[1], ha->bios_revision[0]);
+ break;
+ case ROM_CODE_TYPE_FCODE:
+ /* Open Firmware standard for PCI (FCode). */
+ ha->fcode_revision[0] = bcode[0x12];
+ ha->fcode_revision[1] = bcode[0x13];
+ ql_dbg(ql_dbg_init, vha, 0x005c,
+ "Read FCODE %d.%d.\n",
+ ha->fcode_revision[1], ha->fcode_revision[0]);
+ break;
+ case ROM_CODE_TYPE_EFI:
+ /* Extensible Firmware Interface (EFI). */
+ ha->efi_revision[0] = bcode[0x12];
+ ha->efi_revision[1] = bcode[0x13];
+ ql_dbg(ql_dbg_init, vha, 0x005d,
+ "Read EFI %d.%d.\n",
+ ha->efi_revision[1], ha->efi_revision[0]);
+ break;
+ default:
+ ql_log(ql_log_warn, vha, 0x005e,
+ "Unrecognized code type %x at pcids %x.\n",
+ code_type, pcids);
+ break;
+ }
+
+ last_image = bcode[0x15] & BIT_7;
+
+ /* Locate next PCI expansion ROM. */
+ pcihdr += ((bcode[0x11] << 8) | bcode[0x10]) * 512;
+ } while (!last_image);
+
+ /* Read firmware image information. */
+ memset(ha->fw_revision, 0, sizeof(ha->fw_revision));
+ dcode = mbuf;
+
+ qla24xx_read_flash_data(vha, dcode, ha->flt_region_fw + 4, 4);
+ for (i = 0; i < 4; i++)
+ dcode[i] = be32_to_cpu(dcode[i]);
+
+ if ((dcode[0] == 0xffffffff && dcode[1] == 0xffffffff &&
+ dcode[2] == 0xffffffff && dcode[3] == 0xffffffff) ||
+ (dcode[0] == 0 && dcode[1] == 0 && dcode[2] == 0 &&
+ dcode[3] == 0)) {
+ ql_log(ql_log_warn, vha, 0x005f,
+ "Unrecognized fw revision at %x.\n",
+ ha->flt_region_fw * 4);
+ } else {
+ ha->fw_revision[0] = dcode[0];
+ ha->fw_revision[1] = dcode[1];
+ ha->fw_revision[2] = dcode[2];
+ ha->fw_revision[3] = dcode[3];
+ ql_dbg(ql_dbg_init, vha, 0x0060,
+ "Firmware revision %d.%d.%d (%x).\n",
+ ha->fw_revision[0], ha->fw_revision[1],
+ ha->fw_revision[2], ha->fw_revision[3]);
+ }
+
+ /* Check for golden firmware and get version if available */
+ if (!IS_QLA81XX(ha)) {
+ /* Golden firmware is not present in non 81XX adapters */
+ return ret;
+ }
+
+ memset(ha->gold_fw_version, 0, sizeof(ha->gold_fw_version));
+ dcode = mbuf;
+ ha->isp_ops->read_optrom(vha, (uint8_t *)dcode,
+ ha->flt_region_gold_fw << 2, 32);
+
+ if (dcode[4] == 0xFFFFFFFF && dcode[5] == 0xFFFFFFFF &&
+ dcode[6] == 0xFFFFFFFF && dcode[7] == 0xFFFFFFFF) {
+ ql_log(ql_log_warn, vha, 0x0056,
+ "Unrecognized golden fw at 0x%x.\n",
+ ha->flt_region_gold_fw * 4);
+ return ret;
+ }
+
+ for (i = 4; i < 8; i++)
+ ha->gold_fw_version[i-4] = be32_to_cpu(dcode[i]);
+
+ return ret;
+}
+
+static int
+qla2xxx_is_vpd_valid(uint8_t *pos, uint8_t *end)
+{
+ if (pos >= end || *pos != 0x82)
+ return 0;
+
+ pos += 3 + pos[1];
+ if (pos >= end || *pos != 0x90)
+ return 0;
+
+ pos += 3 + pos[1];
+ if (pos >= end || *pos != 0x78)
+ return 0;
+
+ return 1;
+}
+
+int
+qla2xxx_get_vpd_field(scsi_qla_host_t *vha, char *key, char *str, size_t size)
+{
+ struct qla_hw_data *ha = vha->hw;
+ uint8_t *pos = ha->vpd;
+ uint8_t *end = pos + ha->vpd_size;
+ int len = 0;
+
+ if (!IS_FWI2_CAPABLE(ha) || !qla2xxx_is_vpd_valid(pos, end))
+ return 0;
+
+ while (pos < end && *pos != 0x78) {
+ len = (*pos == 0x82) ? pos[1] : pos[2];
+
+ if (!strncmp(pos, key, strlen(key)))
+ break;
+
+ if (*pos != 0x90 && *pos != 0x91)
+ pos += len;
+
+ pos += 3;
+ }
+
+ if (pos < end - len && *pos != 0x78)
+ return scnprintf(str, size, "%.*s", len, pos + 3);
+
+ return 0;
+}
+
+int
+qla24xx_read_fcp_prio_cfg(scsi_qla_host_t *vha)
+{
+ int len, max_len;
+ uint32_t fcp_prio_addr;
+ struct qla_hw_data *ha = vha->hw;
+
+ if (!ha->fcp_prio_cfg) {
+ ha->fcp_prio_cfg = vmalloc(FCP_PRIO_CFG_SIZE);
+ if (!ha->fcp_prio_cfg) {
+ ql_log(ql_log_warn, vha, 0x00d5,
+ "Unable to allocate memory for fcp priorty data (%x).\n",
+ FCP_PRIO_CFG_SIZE);
+ return QLA_FUNCTION_FAILED;
+ }
+ }
+ memset(ha->fcp_prio_cfg, 0, FCP_PRIO_CFG_SIZE);
+
+ fcp_prio_addr = ha->flt_region_fcp_prio;
+
+ /* first read the fcp priority data header from flash */
+ ha->isp_ops->read_optrom(vha, (uint8_t *)ha->fcp_prio_cfg,
+ fcp_prio_addr << 2, FCP_PRIO_CFG_HDR_SIZE);
+
+ if (!qla24xx_fcp_prio_cfg_valid(vha, ha->fcp_prio_cfg, 0))
+ goto fail;
+
+ /* read remaining FCP CMD config data from flash */
+ fcp_prio_addr += (FCP_PRIO_CFG_HDR_SIZE >> 2);
+ len = ha->fcp_prio_cfg->num_entries * FCP_PRIO_CFG_ENTRY_SIZE;
+ max_len = FCP_PRIO_CFG_SIZE - FCP_PRIO_CFG_HDR_SIZE;
+
+ ha->isp_ops->read_optrom(vha, (uint8_t *)&ha->fcp_prio_cfg->entry[0],
+ fcp_prio_addr << 2, (len < max_len ? len : max_len));
+
+ /* revalidate the entire FCP priority config data, including entries */
+ if (!qla24xx_fcp_prio_cfg_valid(vha, ha->fcp_prio_cfg, 1))
+ goto fail;
+
+ ha->flags.fcp_prio_enabled = 1;
+ return QLA_SUCCESS;
+fail:
+ vfree(ha->fcp_prio_cfg);
+ ha->fcp_prio_cfg = NULL;
+ return QLA_FUNCTION_FAILED;
+}
diff --git a/drivers/scsi/qla2xxx/qla_target.c b/drivers/scsi/qla2xxx/qla_target.c
new file mode 100644
index 000000000..fe8a8d157
--- /dev/null
+++ b/drivers/scsi/qla2xxx/qla_target.c
@@ -0,0 +1,5965 @@
+/*
+ * qla_target.c SCSI LLD infrastructure for QLogic 22xx/23xx/24xx/25xx
+ *
+ * based on qla2x00t.c code:
+ *
+ * Copyright (C) 2004 - 2010 Vladislav Bolkhovitin <vst@vlnb.net>
+ * Copyright (C) 2004 - 2005 Leonid Stoljar
+ * Copyright (C) 2006 Nathaniel Clark <nate@misrule.us>
+ * Copyright (C) 2006 - 2010 ID7 Ltd.
+ *
+ * Forward port and refactoring to modern qla2xxx and target/configfs
+ *
+ * Copyright (C) 2010-2013 Nicholas A. Bellinger <nab@kernel.org>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation, version 2
+ * of the License.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/types.h>
+#include <linux/blkdev.h>
+#include <linux/interrupt.h>
+#include <linux/pci.h>
+#include <linux/delay.h>
+#include <linux/list.h>
+#include <linux/workqueue.h>
+#include <asm/unaligned.h>
+#include <scsi/scsi.h>
+#include <scsi/scsi_host.h>
+#include <scsi/scsi_tcq.h>
+#include <target/target_core_base.h>
+#include <target/target_core_fabric.h>
+
+#include "qla_def.h"
+#include "qla_target.h"
+
+static int ql2xtgt_tape_enable;
+module_param(ql2xtgt_tape_enable, int, S_IRUGO|S_IWUSR);
+MODULE_PARM_DESC(ql2xtgt_tape_enable,
+ "Enables Sequence level error recovery (aka FC Tape). Default is 0 - no SLER. 1 - Enable SLER.");
+
+static char *qlini_mode = QLA2XXX_INI_MODE_STR_ENABLED;
+module_param(qlini_mode, charp, S_IRUGO);
+MODULE_PARM_DESC(qlini_mode,
+ "Determines when initiator mode will be enabled. Possible values: "
+ "\"exclusive\" - initiator mode will be enabled on load, "
+ "disabled on enabling target mode and then on disabling target mode "
+ "enabled back; "
+ "\"disabled\" - initiator mode will never be enabled; "
+ "\"enabled\" (default) - initiator mode will always stay enabled.");
+
+int ql2x_ini_mode = QLA2XXX_INI_MODE_EXCLUSIVE;
+
+static int temp_sam_status = SAM_STAT_BUSY;
+
+/*
+ * From scsi/fc/fc_fcp.h
+ */
+enum fcp_resp_rsp_codes {
+ FCP_TMF_CMPL = 0,
+ FCP_DATA_LEN_INVALID = 1,
+ FCP_CMND_FIELDS_INVALID = 2,
+ FCP_DATA_PARAM_MISMATCH = 3,
+ FCP_TMF_REJECTED = 4,
+ FCP_TMF_FAILED = 5,
+ FCP_TMF_INVALID_LUN = 9,
+};
+
+/*
+ * fc_pri_ta from scsi/fc/fc_fcp.h
+ */
+#define FCP_PTA_SIMPLE 0 /* simple task attribute */
+#define FCP_PTA_HEADQ 1 /* head of queue task attribute */
+#define FCP_PTA_ORDERED 2 /* ordered task attribute */
+#define FCP_PTA_ACA 4 /* auto. contingent allegiance */
+#define FCP_PTA_MASK 7 /* mask for task attribute field */
+#define FCP_PRI_SHIFT 3 /* priority field starts in bit 3 */
+#define FCP_PRI_RESVD_MASK 0x80 /* reserved bits in priority field */
+
+/*
+ * This driver calls qla2x00_alloc_iocbs() and qla2x00_issue_marker(), which
+ * must be called under HW lock and could unlock/lock it inside.
+ * It isn't an issue, since in the current implementation on the time when
+ * those functions are called:
+ *
+ * - Either context is IRQ and only IRQ handler can modify HW data,
+ * including rings related fields,
+ *
+ * - Or access to target mode variables from struct qla_tgt doesn't
+ * cross those functions boundaries, except tgt_stop, which
+ * additionally protected by irq_cmd_count.
+ */
+/* Predefs for callbacks handed to qla2xxx LLD */
+static void qlt_24xx_atio_pkt(struct scsi_qla_host *ha,
+ struct atio_from_isp *pkt);
+static void qlt_response_pkt(struct scsi_qla_host *ha, response_t *pkt);
+static int qlt_issue_task_mgmt(struct qla_tgt_sess *sess, uint32_t lun,
+ int fn, void *iocb, int flags);
+static void qlt_send_term_exchange(struct scsi_qla_host *ha, struct qla_tgt_cmd
+ *cmd, struct atio_from_isp *atio, int ha_locked);
+static void qlt_reject_free_srr_imm(struct scsi_qla_host *ha,
+ struct qla_tgt_srr_imm *imm, int ha_lock);
+static void qlt_abort_cmd_on_host_reset(struct scsi_qla_host *vha,
+ struct qla_tgt_cmd *cmd);
+static void qlt_alloc_qfull_cmd(struct scsi_qla_host *vha,
+ struct atio_from_isp *atio, uint16_t status, int qfull);
+static void qlt_disable_vha(struct scsi_qla_host *vha);
+/*
+ * Global Variables
+ */
+static struct kmem_cache *qla_tgt_mgmt_cmd_cachep;
+static mempool_t *qla_tgt_mgmt_cmd_mempool;
+static struct workqueue_struct *qla_tgt_wq;
+static DEFINE_MUTEX(qla_tgt_mutex);
+static LIST_HEAD(qla_tgt_glist);
+
+/* ha->hardware_lock supposed to be held on entry (to protect tgt->sess_list) */
+static struct qla_tgt_sess *qlt_find_sess_by_port_name(
+ struct qla_tgt *tgt,
+ const uint8_t *port_name)
+{
+ struct qla_tgt_sess *sess;
+
+ list_for_each_entry(sess, &tgt->sess_list, sess_list_entry) {
+ if (!memcmp(sess->port_name, port_name, WWN_SIZE))
+ return sess;
+ }
+
+ return NULL;
+}
+
+/* Might release hw lock, then reaquire!! */
+static inline int qlt_issue_marker(struct scsi_qla_host *vha, int vha_locked)
+{
+ /* Send marker if required */
+ if (unlikely(vha->marker_needed != 0)) {
+ int rc = qla2x00_issue_marker(vha, vha_locked);
+ if (rc != QLA_SUCCESS) {
+ ql_dbg(ql_dbg_tgt, vha, 0xe03d,
+ "qla_target(%d): issue_marker() failed\n",
+ vha->vp_idx);
+ }
+ return rc;
+ }
+ return QLA_SUCCESS;
+}
+
+static inline
+struct scsi_qla_host *qlt_find_host_by_d_id(struct scsi_qla_host *vha,
+ uint8_t *d_id)
+{
+ struct qla_hw_data *ha = vha->hw;
+ uint8_t vp_idx;
+
+ if ((vha->d_id.b.area != d_id[1]) || (vha->d_id.b.domain != d_id[0]))
+ return NULL;
+
+ if (vha->d_id.b.al_pa == d_id[2])
+ return vha;
+
+ BUG_ON(ha->tgt.tgt_vp_map == NULL);
+ vp_idx = ha->tgt.tgt_vp_map[d_id[2]].idx;
+ if (likely(test_bit(vp_idx, ha->vp_idx_map)))
+ return ha->tgt.tgt_vp_map[vp_idx].vha;
+
+ return NULL;
+}
+
+static inline
+struct scsi_qla_host *qlt_find_host_by_vp_idx(struct scsi_qla_host *vha,
+ uint16_t vp_idx)
+{
+ struct qla_hw_data *ha = vha->hw;
+
+ if (vha->vp_idx == vp_idx)
+ return vha;
+
+ BUG_ON(ha->tgt.tgt_vp_map == NULL);
+ if (likely(test_bit(vp_idx, ha->vp_idx_map)))
+ return ha->tgt.tgt_vp_map[vp_idx].vha;
+
+ return NULL;
+}
+
+static inline void qlt_incr_num_pend_cmds(struct scsi_qla_host *vha)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&vha->hw->tgt.q_full_lock, flags);
+
+ vha->hw->tgt.num_pend_cmds++;
+ if (vha->hw->tgt.num_pend_cmds > vha->hw->qla_stats.stat_max_pend_cmds)
+ vha->hw->qla_stats.stat_max_pend_cmds =
+ vha->hw->tgt.num_pend_cmds;
+ spin_unlock_irqrestore(&vha->hw->tgt.q_full_lock, flags);
+}
+static inline void qlt_decr_num_pend_cmds(struct scsi_qla_host *vha)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&vha->hw->tgt.q_full_lock, flags);
+ vha->hw->tgt.num_pend_cmds--;
+ spin_unlock_irqrestore(&vha->hw->tgt.q_full_lock, flags);
+}
+
+static void qlt_24xx_atio_pkt_all_vps(struct scsi_qla_host *vha,
+ struct atio_from_isp *atio)
+{
+ ql_dbg(ql_dbg_tgt, vha, 0xe072,
+ "%s: qla_target(%d): type %x ox_id %04x\n",
+ __func__, vha->vp_idx, atio->u.raw.entry_type,
+ be16_to_cpu(atio->u.isp24.fcp_hdr.ox_id));
+
+ switch (atio->u.raw.entry_type) {
+ case ATIO_TYPE7:
+ {
+ struct scsi_qla_host *host = qlt_find_host_by_d_id(vha,
+ atio->u.isp24.fcp_hdr.d_id);
+ if (unlikely(NULL == host)) {
+ ql_dbg(ql_dbg_tgt, vha, 0xe03e,
+ "qla_target(%d): Received ATIO_TYPE7 "
+ "with unknown d_id %x:%x:%x\n", vha->vp_idx,
+ atio->u.isp24.fcp_hdr.d_id[0],
+ atio->u.isp24.fcp_hdr.d_id[1],
+ atio->u.isp24.fcp_hdr.d_id[2]);
+ break;
+ }
+ qlt_24xx_atio_pkt(host, atio);
+ break;
+ }
+
+ case IMMED_NOTIFY_TYPE:
+ {
+ struct scsi_qla_host *host = vha;
+ struct imm_ntfy_from_isp *entry =
+ (struct imm_ntfy_from_isp *)atio;
+
+ if ((entry->u.isp24.vp_index != 0xFF) &&
+ (entry->u.isp24.nport_handle != 0xFFFF)) {
+ host = qlt_find_host_by_vp_idx(vha,
+ entry->u.isp24.vp_index);
+ if (unlikely(!host)) {
+ ql_dbg(ql_dbg_tgt, vha, 0xe03f,
+ "qla_target(%d): Received "
+ "ATIO (IMMED_NOTIFY_TYPE) "
+ "with unknown vp_index %d\n",
+ vha->vp_idx, entry->u.isp24.vp_index);
+ break;
+ }
+ }
+ qlt_24xx_atio_pkt(host, atio);
+ break;
+ }
+
+ default:
+ ql_dbg(ql_dbg_tgt, vha, 0xe040,
+ "qla_target(%d): Received unknown ATIO atio "
+ "type %x\n", vha->vp_idx, atio->u.raw.entry_type);
+ break;
+ }
+
+ return;
+}
+
+void qlt_response_pkt_all_vps(struct scsi_qla_host *vha, response_t *pkt)
+{
+ switch (pkt->entry_type) {
+ case CTIO_CRC2:
+ ql_dbg(ql_dbg_tgt, vha, 0xe073,
+ "qla_target(%d):%s: CRC2 Response pkt\n",
+ vha->vp_idx, __func__);
+ case CTIO_TYPE7:
+ {
+ struct ctio7_from_24xx *entry = (struct ctio7_from_24xx *)pkt;
+ struct scsi_qla_host *host = qlt_find_host_by_vp_idx(vha,
+ entry->vp_index);
+ if (unlikely(!host)) {
+ ql_dbg(ql_dbg_tgt, vha, 0xe041,
+ "qla_target(%d): Response pkt (CTIO_TYPE7) "
+ "received, with unknown vp_index %d\n",
+ vha->vp_idx, entry->vp_index);
+ break;
+ }
+ qlt_response_pkt(host, pkt);
+ break;
+ }
+
+ case IMMED_NOTIFY_TYPE:
+ {
+ struct scsi_qla_host *host = vha;
+ struct imm_ntfy_from_isp *entry =
+ (struct imm_ntfy_from_isp *)pkt;
+
+ host = qlt_find_host_by_vp_idx(vha, entry->u.isp24.vp_index);
+ if (unlikely(!host)) {
+ ql_dbg(ql_dbg_tgt, vha, 0xe042,
+ "qla_target(%d): Response pkt (IMMED_NOTIFY_TYPE) "
+ "received, with unknown vp_index %d\n",
+ vha->vp_idx, entry->u.isp24.vp_index);
+ break;
+ }
+ qlt_response_pkt(host, pkt);
+ break;
+ }
+
+ case NOTIFY_ACK_TYPE:
+ {
+ struct scsi_qla_host *host = vha;
+ struct nack_to_isp *entry = (struct nack_to_isp *)pkt;
+
+ if (0xFF != entry->u.isp24.vp_index) {
+ host = qlt_find_host_by_vp_idx(vha,
+ entry->u.isp24.vp_index);
+ if (unlikely(!host)) {
+ ql_dbg(ql_dbg_tgt, vha, 0xe043,
+ "qla_target(%d): Response "
+ "pkt (NOTIFY_ACK_TYPE) "
+ "received, with unknown "
+ "vp_index %d\n", vha->vp_idx,
+ entry->u.isp24.vp_index);
+ break;
+ }
+ }
+ qlt_response_pkt(host, pkt);
+ break;
+ }
+
+ case ABTS_RECV_24XX:
+ {
+ struct abts_recv_from_24xx *entry =
+ (struct abts_recv_from_24xx *)pkt;
+ struct scsi_qla_host *host = qlt_find_host_by_vp_idx(vha,
+ entry->vp_index);
+ if (unlikely(!host)) {
+ ql_dbg(ql_dbg_tgt, vha, 0xe044,
+ "qla_target(%d): Response pkt "
+ "(ABTS_RECV_24XX) received, with unknown "
+ "vp_index %d\n", vha->vp_idx, entry->vp_index);
+ break;
+ }
+ qlt_response_pkt(host, pkt);
+ break;
+ }
+
+ case ABTS_RESP_24XX:
+ {
+ struct abts_resp_to_24xx *entry =
+ (struct abts_resp_to_24xx *)pkt;
+ struct scsi_qla_host *host = qlt_find_host_by_vp_idx(vha,
+ entry->vp_index);
+ if (unlikely(!host)) {
+ ql_dbg(ql_dbg_tgt, vha, 0xe045,
+ "qla_target(%d): Response pkt "
+ "(ABTS_RECV_24XX) received, with unknown "
+ "vp_index %d\n", vha->vp_idx, entry->vp_index);
+ break;
+ }
+ qlt_response_pkt(host, pkt);
+ break;
+ }
+
+ default:
+ qlt_response_pkt(vha, pkt);
+ break;
+ }
+
+}
+
+static void qlt_free_session_done(struct work_struct *work)
+{
+ struct qla_tgt_sess *sess = container_of(work, struct qla_tgt_sess,
+ free_work);
+ struct qla_tgt *tgt = sess->tgt;
+ struct scsi_qla_host *vha = sess->vha;
+ struct qla_hw_data *ha = vha->hw;
+
+ BUG_ON(!tgt);
+ /*
+ * Release the target session for FC Nexus from fabric module code.
+ */
+ if (sess->se_sess != NULL)
+ ha->tgt.tgt_ops->free_session(sess);
+
+ ql_dbg(ql_dbg_tgt_mgt, vha, 0xf001,
+ "Unregistration of sess %p finished\n", sess);
+
+ kfree(sess);
+ /*
+ * We need to protect against race, when tgt is freed before or
+ * inside wake_up()
+ */
+ tgt->sess_count--;
+ if (tgt->sess_count == 0)
+ wake_up_all(&tgt->waitQ);
+}
+
+/* ha->hardware_lock supposed to be held on entry */
+void qlt_unreg_sess(struct qla_tgt_sess *sess)
+{
+ struct scsi_qla_host *vha = sess->vha;
+
+ vha->hw->tgt.tgt_ops->clear_nacl_from_fcport_map(sess);
+
+ list_del(&sess->sess_list_entry);
+ if (sess->deleted)
+ list_del(&sess->del_list_entry);
+
+ INIT_WORK(&sess->free_work, qlt_free_session_done);
+ schedule_work(&sess->free_work);
+}
+EXPORT_SYMBOL(qlt_unreg_sess);
+
+/* ha->hardware_lock supposed to be held on entry */
+static int qlt_reset(struct scsi_qla_host *vha, void *iocb, int mcmd)
+{
+ struct qla_hw_data *ha = vha->hw;
+ struct qla_tgt_sess *sess = NULL;
+ uint32_t unpacked_lun, lun = 0;
+ uint16_t loop_id;
+ int res = 0;
+ struct imm_ntfy_from_isp *n = (struct imm_ntfy_from_isp *)iocb;
+ struct atio_from_isp *a = (struct atio_from_isp *)iocb;
+
+ loop_id = le16_to_cpu(n->u.isp24.nport_handle);
+ if (loop_id == 0xFFFF) {
+#if 0 /* FIXME: Re-enable Global event handling.. */
+ /* Global event */
+ atomic_inc(&ha->tgt.qla_tgt->tgt_global_resets_count);
+ qlt_clear_tgt_db(ha->tgt.qla_tgt);
+ if (!list_empty(&ha->tgt.qla_tgt->sess_list)) {
+ sess = list_entry(ha->tgt.qla_tgt->sess_list.next,
+ typeof(*sess), sess_list_entry);
+ switch (mcmd) {
+ case QLA_TGT_NEXUS_LOSS_SESS:
+ mcmd = QLA_TGT_NEXUS_LOSS;
+ break;
+ case QLA_TGT_ABORT_ALL_SESS:
+ mcmd = QLA_TGT_ABORT_ALL;
+ break;
+ case QLA_TGT_NEXUS_LOSS:
+ case QLA_TGT_ABORT_ALL:
+ break;
+ default:
+ ql_dbg(ql_dbg_tgt, vha, 0xe046,
+ "qla_target(%d): Not allowed "
+ "command %x in %s", vha->vp_idx,
+ mcmd, __func__);
+ sess = NULL;
+ break;
+ }
+ } else
+ sess = NULL;
+#endif
+ } else {
+ sess = ha->tgt.tgt_ops->find_sess_by_loop_id(vha, loop_id);
+ }
+
+ ql_dbg(ql_dbg_tgt, vha, 0xe000,
+ "Using sess for qla_tgt_reset: %p\n", sess);
+ if (!sess) {
+ res = -ESRCH;
+ return res;
+ }
+
+ ql_dbg(ql_dbg_tgt, vha, 0xe047,
+ "scsi(%ld): resetting (session %p from port %8phC mcmd %x, "
+ "loop_id %d)\n", vha->host_no, sess, sess->port_name,
+ mcmd, loop_id);
+
+ lun = a->u.isp24.fcp_cmnd.lun;
+ unpacked_lun = scsilun_to_int((struct scsi_lun *)&lun);
+
+ return qlt_issue_task_mgmt(sess, unpacked_lun, mcmd,
+ iocb, QLA24XX_MGMT_SEND_NACK);
+}
+
+/* ha->hardware_lock supposed to be held on entry */
+static void qlt_schedule_sess_for_deletion(struct qla_tgt_sess *sess,
+ bool immediate)
+{
+ struct qla_tgt *tgt = sess->tgt;
+ uint32_t dev_loss_tmo = tgt->ha->port_down_retry_count + 5;
+
+ if (sess->deleted)
+ return;
+
+ ql_dbg(ql_dbg_tgt, sess->vha, 0xe001,
+ "Scheduling sess %p for deletion\n", sess);
+ list_add_tail(&sess->del_list_entry, &tgt->del_sess_list);
+ sess->deleted = 1;
+
+ if (immediate)
+ dev_loss_tmo = 0;
+
+ sess->expires = jiffies + dev_loss_tmo * HZ;
+
+ ql_dbg(ql_dbg_tgt, sess->vha, 0xe048,
+ "qla_target(%d): session for port %8phC (loop ID %d) scheduled for "
+ "deletion in %u secs (expires: %lu) immed: %d\n",
+ sess->vha->vp_idx, sess->port_name, sess->loop_id, dev_loss_tmo,
+ sess->expires, immediate);
+
+ if (immediate)
+ schedule_delayed_work(&tgt->sess_del_work, 0);
+ else
+ schedule_delayed_work(&tgt->sess_del_work,
+ sess->expires - jiffies);
+}
+
+/* ha->hardware_lock supposed to be held on entry */
+static void qlt_clear_tgt_db(struct qla_tgt *tgt)
+{
+ struct qla_tgt_sess *sess;
+
+ list_for_each_entry(sess, &tgt->sess_list, sess_list_entry)
+ qlt_schedule_sess_for_deletion(sess, true);
+
+ /* At this point tgt could be already dead */
+}
+
+static int qla24xx_get_loop_id(struct scsi_qla_host *vha, const uint8_t *s_id,
+ uint16_t *loop_id)
+{
+ struct qla_hw_data *ha = vha->hw;
+ dma_addr_t gid_list_dma;
+ struct gid_list_info *gid_list;
+ char *id_iter;
+ int res, rc, i;
+ uint16_t entries;
+
+ gid_list = dma_alloc_coherent(&ha->pdev->dev, qla2x00_gid_list_size(ha),
+ &gid_list_dma, GFP_KERNEL);
+ if (!gid_list) {
+ ql_dbg(ql_dbg_tgt_mgt, vha, 0xf044,
+ "qla_target(%d): DMA Alloc failed of %u\n",
+ vha->vp_idx, qla2x00_gid_list_size(ha));
+ return -ENOMEM;
+ }
+
+ /* Get list of logged in devices */
+ rc = qla2x00_get_id_list(vha, gid_list, gid_list_dma, &entries);
+ if (rc != QLA_SUCCESS) {
+ ql_dbg(ql_dbg_tgt_mgt, vha, 0xf045,
+ "qla_target(%d): get_id_list() failed: %x\n",
+ vha->vp_idx, rc);
+ res = -1;
+ goto out_free_id_list;
+ }
+
+ id_iter = (char *)gid_list;
+ res = -1;
+ for (i = 0; i < entries; i++) {
+ struct gid_list_info *gid = (struct gid_list_info *)id_iter;
+ if ((gid->al_pa == s_id[2]) &&
+ (gid->area == s_id[1]) &&
+ (gid->domain == s_id[0])) {
+ *loop_id = le16_to_cpu(gid->loop_id);
+ res = 0;
+ break;
+ }
+ id_iter += ha->gid_list_info_size;
+ }
+
+out_free_id_list:
+ dma_free_coherent(&ha->pdev->dev, qla2x00_gid_list_size(ha),
+ gid_list, gid_list_dma);
+ return res;
+}
+
+/* ha->hardware_lock supposed to be held on entry */
+static void qlt_undelete_sess(struct qla_tgt_sess *sess)
+{
+ BUG_ON(!sess->deleted);
+
+ list_del(&sess->del_list_entry);
+ sess->deleted = 0;
+}
+
+static void qlt_del_sess_work_fn(struct delayed_work *work)
+{
+ struct qla_tgt *tgt = container_of(work, struct qla_tgt,
+ sess_del_work);
+ struct scsi_qla_host *vha = tgt->vha;
+ struct qla_hw_data *ha = vha->hw;
+ struct qla_tgt_sess *sess;
+ unsigned long flags, elapsed;
+
+ spin_lock_irqsave(&ha->hardware_lock, flags);
+ while (!list_empty(&tgt->del_sess_list)) {
+ sess = list_entry(tgt->del_sess_list.next, typeof(*sess),
+ del_list_entry);
+ elapsed = jiffies;
+ if (time_after_eq(elapsed, sess->expires)) {
+ qlt_undelete_sess(sess);
+
+ ql_dbg(ql_dbg_tgt_mgt, vha, 0xf004,
+ "Timeout: sess %p about to be deleted\n",
+ sess);
+ ha->tgt.tgt_ops->shutdown_sess(sess);
+ ha->tgt.tgt_ops->put_sess(sess);
+ } else {
+ schedule_delayed_work(&tgt->sess_del_work,
+ sess->expires - elapsed);
+ break;
+ }
+ }
+ spin_unlock_irqrestore(&ha->hardware_lock, flags);
+}
+
+/*
+ * Adds an extra ref to allow to drop hw lock after adding sess to the list.
+ * Caller must put it.
+ */
+static struct qla_tgt_sess *qlt_create_sess(
+ struct scsi_qla_host *vha,
+ fc_port_t *fcport,
+ bool local)
+{
+ struct qla_hw_data *ha = vha->hw;
+ struct qla_tgt_sess *sess;
+ unsigned long flags;
+ unsigned char be_sid[3];
+
+ /* Check to avoid double sessions */
+ spin_lock_irqsave(&ha->hardware_lock, flags);
+ list_for_each_entry(sess, &vha->vha_tgt.qla_tgt->sess_list,
+ sess_list_entry) {
+ if (!memcmp(sess->port_name, fcport->port_name, WWN_SIZE)) {
+ ql_dbg(ql_dbg_tgt_mgt, vha, 0xf005,
+ "Double sess %p found (s_id %x:%x:%x, "
+ "loop_id %d), updating to d_id %x:%x:%x, "
+ "loop_id %d", sess, sess->s_id.b.domain,
+ sess->s_id.b.al_pa, sess->s_id.b.area,
+ sess->loop_id, fcport->d_id.b.domain,
+ fcport->d_id.b.al_pa, fcport->d_id.b.area,
+ fcport->loop_id);
+
+ if (sess->deleted)
+ qlt_undelete_sess(sess);
+
+ kref_get(&sess->se_sess->sess_kref);
+ ha->tgt.tgt_ops->update_sess(sess, fcport->d_id, fcport->loop_id,
+ (fcport->flags & FCF_CONF_COMP_SUPPORTED));
+
+ if (sess->local && !local)
+ sess->local = 0;
+ spin_unlock_irqrestore(&ha->hardware_lock, flags);
+
+ return sess;
+ }
+ }
+ spin_unlock_irqrestore(&ha->hardware_lock, flags);
+
+ sess = kzalloc(sizeof(*sess), GFP_KERNEL);
+ if (!sess) {
+ ql_dbg(ql_dbg_tgt_mgt, vha, 0xf04a,
+ "qla_target(%u): session allocation failed, all commands "
+ "from port %8phC will be refused", vha->vp_idx,
+ fcport->port_name);
+
+ return NULL;
+ }
+ sess->tgt = vha->vha_tgt.qla_tgt;
+ sess->vha = vha;
+ sess->s_id = fcport->d_id;
+ sess->loop_id = fcport->loop_id;
+ sess->local = local;
+
+ ql_dbg(ql_dbg_tgt_mgt, vha, 0xf006,
+ "Adding sess %p to tgt %p via ->check_initiator_node_acl()\n",
+ sess, vha->vha_tgt.qla_tgt);
+
+ be_sid[0] = sess->s_id.b.domain;
+ be_sid[1] = sess->s_id.b.area;
+ be_sid[2] = sess->s_id.b.al_pa;
+ /*
+ * Determine if this fc_port->port_name is allowed to access
+ * target mode using explict NodeACLs+MappedLUNs, or using
+ * TPG demo mode. If this is successful a target mode FC nexus
+ * is created.
+ */
+ if (ha->tgt.tgt_ops->check_initiator_node_acl(vha,
+ &fcport->port_name[0], sess, &be_sid[0], fcport->loop_id) < 0) {
+ kfree(sess);
+ return NULL;
+ }
+ /*
+ * Take an extra reference to ->sess_kref here to handle qla_tgt_sess
+ * access across ->hardware_lock reaquire.
+ */
+ kref_get(&sess->se_sess->sess_kref);
+
+ sess->conf_compl_supported = (fcport->flags & FCF_CONF_COMP_SUPPORTED);
+ BUILD_BUG_ON(sizeof(sess->port_name) != sizeof(fcport->port_name));
+ memcpy(sess->port_name, fcport->port_name, sizeof(sess->port_name));
+
+ spin_lock_irqsave(&ha->hardware_lock, flags);
+ list_add_tail(&sess->sess_list_entry, &vha->vha_tgt.qla_tgt->sess_list);
+ vha->vha_tgt.qla_tgt->sess_count++;
+ spin_unlock_irqrestore(&ha->hardware_lock, flags);
+
+ ql_dbg(ql_dbg_tgt_mgt, vha, 0xf04b,
+ "qla_target(%d): %ssession for wwn %8phC (loop_id %d, "
+ "s_id %x:%x:%x, confirmed completion %ssupported) added\n",
+ vha->vp_idx, local ? "local " : "", fcport->port_name,
+ fcport->loop_id, sess->s_id.b.domain, sess->s_id.b.area,
+ sess->s_id.b.al_pa, sess->conf_compl_supported ? "" : "not ");
+
+ return sess;
+}
+
+/*
+ * Called from drivers/scsi/qla2xxx/qla_init.c:qla2x00_reg_remote_port()
+ */
+void qlt_fc_port_added(struct scsi_qla_host *vha, fc_port_t *fcport)
+{
+ struct qla_hw_data *ha = vha->hw;
+ struct qla_tgt *tgt = vha->vha_tgt.qla_tgt;
+ struct qla_tgt_sess *sess;
+ unsigned long flags;
+
+ if (!vha->hw->tgt.tgt_ops)
+ return;
+
+ if (!tgt || (fcport->port_type != FCT_INITIATOR))
+ return;
+
+ if (qla_ini_mode_enabled(vha))
+ return;
+
+ spin_lock_irqsave(&ha->hardware_lock, flags);
+ if (tgt->tgt_stop) {
+ spin_unlock_irqrestore(&ha->hardware_lock, flags);
+ return;
+ }
+ sess = qlt_find_sess_by_port_name(tgt, fcport->port_name);
+ if (!sess) {
+ spin_unlock_irqrestore(&ha->hardware_lock, flags);
+
+ mutex_lock(&vha->vha_tgt.tgt_mutex);
+ sess = qlt_create_sess(vha, fcport, false);
+ mutex_unlock(&vha->vha_tgt.tgt_mutex);
+
+ spin_lock_irqsave(&ha->hardware_lock, flags);
+ } else {
+ kref_get(&sess->se_sess->sess_kref);
+
+ if (sess->deleted) {
+ qlt_undelete_sess(sess);
+
+ ql_dbg(ql_dbg_tgt_mgt, vha, 0xf04c,
+ "qla_target(%u): %ssession for port %8phC "
+ "(loop ID %d) reappeared\n", vha->vp_idx,
+ sess->local ? "local " : "", sess->port_name,
+ sess->loop_id);
+
+ ql_dbg(ql_dbg_tgt_mgt, vha, 0xf007,
+ "Reappeared sess %p\n", sess);
+ }
+ ha->tgt.tgt_ops->update_sess(sess, fcport->d_id, fcport->loop_id,
+ (fcport->flags & FCF_CONF_COMP_SUPPORTED));
+ }
+
+ if (sess && sess->local) {
+ ql_dbg(ql_dbg_tgt_mgt, vha, 0xf04d,
+ "qla_target(%u): local session for "
+ "port %8phC (loop ID %d) became global\n", vha->vp_idx,
+ fcport->port_name, sess->loop_id);
+ sess->local = 0;
+ }
+ ha->tgt.tgt_ops->put_sess(sess);
+ spin_unlock_irqrestore(&ha->hardware_lock, flags);
+}
+
+void qlt_fc_port_deleted(struct scsi_qla_host *vha, fc_port_t *fcport)
+{
+ struct qla_hw_data *ha = vha->hw;
+ struct qla_tgt *tgt = vha->vha_tgt.qla_tgt;
+ struct qla_tgt_sess *sess;
+ unsigned long flags;
+
+ if (!vha->hw->tgt.tgt_ops)
+ return;
+
+ if (!tgt || (fcport->port_type != FCT_INITIATOR))
+ return;
+
+ spin_lock_irqsave(&ha->hardware_lock, flags);
+ if (tgt->tgt_stop) {
+ spin_unlock_irqrestore(&ha->hardware_lock, flags);
+ return;
+ }
+ sess = qlt_find_sess_by_port_name(tgt, fcport->port_name);
+ if (!sess) {
+ spin_unlock_irqrestore(&ha->hardware_lock, flags);
+ return;
+ }
+
+ ql_dbg(ql_dbg_tgt_mgt, vha, 0xf008, "qla_tgt_fc_port_deleted %p", sess);
+
+ sess->local = 1;
+ qlt_schedule_sess_for_deletion(sess, false);
+ spin_unlock_irqrestore(&ha->hardware_lock, flags);
+}
+
+static inline int test_tgt_sess_count(struct qla_tgt *tgt)
+{
+ struct qla_hw_data *ha = tgt->ha;
+ unsigned long flags;
+ int res;
+ /*
+ * We need to protect against race, when tgt is freed before or
+ * inside wake_up()
+ */
+ spin_lock_irqsave(&ha->hardware_lock, flags);
+ ql_dbg(ql_dbg_tgt, tgt->vha, 0xe002,
+ "tgt %p, empty(sess_list)=%d sess_count=%d\n",
+ tgt, list_empty(&tgt->sess_list), tgt->sess_count);
+ res = (tgt->sess_count == 0);
+ spin_unlock_irqrestore(&ha->hardware_lock, flags);
+
+ return res;
+}
+
+/* Called by tcm_qla2xxx configfs code */
+int qlt_stop_phase1(struct qla_tgt *tgt)
+{
+ struct scsi_qla_host *vha = tgt->vha;
+ struct qla_hw_data *ha = tgt->ha;
+ unsigned long flags;
+
+ mutex_lock(&qla_tgt_mutex);
+ if (!vha->fc_vport) {
+ struct Scsi_Host *sh = vha->host;
+ struct fc_host_attrs *fc_host = shost_to_fc_host(sh);
+ bool npiv_vports;
+
+ spin_lock_irqsave(sh->host_lock, flags);
+ npiv_vports = (fc_host->npiv_vports_inuse);
+ spin_unlock_irqrestore(sh->host_lock, flags);
+
+ if (npiv_vports) {
+ mutex_unlock(&qla_tgt_mutex);
+ return -EPERM;
+ }
+ }
+ if (tgt->tgt_stop || tgt->tgt_stopped) {
+ ql_dbg(ql_dbg_tgt_mgt, vha, 0xf04e,
+ "Already in tgt->tgt_stop or tgt_stopped state\n");
+ mutex_unlock(&qla_tgt_mutex);
+ return -EPERM;
+ }
+
+ ql_dbg(ql_dbg_tgt, vha, 0xe003, "Stopping target for host %ld(%p)\n",
+ vha->host_no, vha);
+ /*
+ * Mutex needed to sync with qla_tgt_fc_port_[added,deleted].
+ * Lock is needed, because we still can get an incoming packet.
+ */
+ mutex_lock(&vha->vha_tgt.tgt_mutex);
+ spin_lock_irqsave(&ha->hardware_lock, flags);
+ tgt->tgt_stop = 1;
+ qlt_clear_tgt_db(tgt);
+ spin_unlock_irqrestore(&ha->hardware_lock, flags);
+ mutex_unlock(&vha->vha_tgt.tgt_mutex);
+ mutex_unlock(&qla_tgt_mutex);
+
+ flush_delayed_work(&tgt->sess_del_work);
+
+ ql_dbg(ql_dbg_tgt_mgt, vha, 0xf009,
+ "Waiting for sess works (tgt %p)", tgt);
+ spin_lock_irqsave(&tgt->sess_work_lock, flags);
+ while (!list_empty(&tgt->sess_works_list)) {
+ spin_unlock_irqrestore(&tgt->sess_work_lock, flags);
+ flush_scheduled_work();
+ spin_lock_irqsave(&tgt->sess_work_lock, flags);
+ }
+ spin_unlock_irqrestore(&tgt->sess_work_lock, flags);
+
+ ql_dbg(ql_dbg_tgt_mgt, vha, 0xf00a,
+ "Waiting for tgt %p: list_empty(sess_list)=%d "
+ "sess_count=%d\n", tgt, list_empty(&tgt->sess_list),
+ tgt->sess_count);
+
+ wait_event(tgt->waitQ, test_tgt_sess_count(tgt));
+
+ /* Big hammer */
+ if (!ha->flags.host_shutting_down && qla_tgt_mode_enabled(vha))
+ qlt_disable_vha(vha);
+
+ /* Wait for sessions to clear out (just in case) */
+ wait_event(tgt->waitQ, test_tgt_sess_count(tgt));
+ return 0;
+}
+EXPORT_SYMBOL(qlt_stop_phase1);
+
+/* Called by tcm_qla2xxx configfs code */
+void qlt_stop_phase2(struct qla_tgt *tgt)
+{
+ struct qla_hw_data *ha = tgt->ha;
+ scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev);
+ unsigned long flags;
+
+ if (tgt->tgt_stopped) {
+ ql_dbg(ql_dbg_tgt_mgt, vha, 0xf04f,
+ "Already in tgt->tgt_stopped state\n");
+ dump_stack();
+ return;
+ }
+
+ ql_dbg(ql_dbg_tgt_mgt, vha, 0xf00b,
+ "Waiting for %d IRQ commands to complete (tgt %p)",
+ tgt->irq_cmd_count, tgt);
+
+ mutex_lock(&vha->vha_tgt.tgt_mutex);
+ spin_lock_irqsave(&ha->hardware_lock, flags);
+ while (tgt->irq_cmd_count != 0) {
+ spin_unlock_irqrestore(&ha->hardware_lock, flags);
+ udelay(2);
+ spin_lock_irqsave(&ha->hardware_lock, flags);
+ }
+ tgt->tgt_stop = 0;
+ tgt->tgt_stopped = 1;
+ spin_unlock_irqrestore(&ha->hardware_lock, flags);
+ mutex_unlock(&vha->vha_tgt.tgt_mutex);
+
+ ql_dbg(ql_dbg_tgt_mgt, vha, 0xf00c, "Stop of tgt %p finished",
+ tgt);
+}
+EXPORT_SYMBOL(qlt_stop_phase2);
+
+/* Called from qlt_remove_target() -> qla2x00_remove_one() */
+static void qlt_release(struct qla_tgt *tgt)
+{
+ scsi_qla_host_t *vha = tgt->vha;
+
+ if ((vha->vha_tgt.qla_tgt != NULL) && !tgt->tgt_stopped)
+ qlt_stop_phase2(tgt);
+
+ vha->vha_tgt.qla_tgt = NULL;
+
+ ql_dbg(ql_dbg_tgt_mgt, vha, 0xf00d,
+ "Release of tgt %p finished\n", tgt);
+
+ kfree(tgt);
+}
+
+/* ha->hardware_lock supposed to be held on entry */
+static int qlt_sched_sess_work(struct qla_tgt *tgt, int type,
+ const void *param, unsigned int param_size)
+{
+ struct qla_tgt_sess_work_param *prm;
+ unsigned long flags;
+
+ prm = kzalloc(sizeof(*prm), GFP_ATOMIC);
+ if (!prm) {
+ ql_dbg(ql_dbg_tgt_mgt, tgt->vha, 0xf050,
+ "qla_target(%d): Unable to create session "
+ "work, command will be refused", 0);
+ return -ENOMEM;
+ }
+
+ ql_dbg(ql_dbg_tgt_mgt, tgt->vha, 0xf00e,
+ "Scheduling work (type %d, prm %p)"
+ " to find session for param %p (size %d, tgt %p)\n",
+ type, prm, param, param_size, tgt);
+
+ prm->type = type;
+ memcpy(&prm->tm_iocb, param, param_size);
+
+ spin_lock_irqsave(&tgt->sess_work_lock, flags);
+ list_add_tail(&prm->sess_works_list_entry, &tgt->sess_works_list);
+ spin_unlock_irqrestore(&tgt->sess_work_lock, flags);
+
+ schedule_work(&tgt->sess_work);
+
+ return 0;
+}
+
+/*
+ * ha->hardware_lock supposed to be held on entry. Might drop it, then reaquire
+ */
+static void qlt_send_notify_ack(struct scsi_qla_host *vha,
+ struct imm_ntfy_from_isp *ntfy,
+ uint32_t add_flags, uint16_t resp_code, int resp_code_valid,
+ uint16_t srr_flags, uint16_t srr_reject_code, uint8_t srr_explan)
+{
+ struct qla_hw_data *ha = vha->hw;
+ request_t *pkt;
+ struct nack_to_isp *nack;
+
+ ql_dbg(ql_dbg_tgt, vha, 0xe004, "Sending NOTIFY_ACK (ha=%p)\n", ha);
+
+ /* Send marker if required */
+ if (qlt_issue_marker(vha, 1) != QLA_SUCCESS)
+ return;
+
+ pkt = (request_t *)qla2x00_alloc_iocbs(vha, NULL);
+ if (!pkt) {
+ ql_dbg(ql_dbg_tgt, vha, 0xe049,
+ "qla_target(%d): %s failed: unable to allocate "
+ "request packet\n", vha->vp_idx, __func__);
+ return;
+ }
+
+ if (vha->vha_tgt.qla_tgt != NULL)
+ vha->vha_tgt.qla_tgt->notify_ack_expected++;
+
+ pkt->entry_type = NOTIFY_ACK_TYPE;
+ pkt->entry_count = 1;
+
+ nack = (struct nack_to_isp *)pkt;
+ nack->ox_id = ntfy->ox_id;
+
+ nack->u.isp24.nport_handle = ntfy->u.isp24.nport_handle;
+ if (le16_to_cpu(ntfy->u.isp24.status) == IMM_NTFY_ELS) {
+ nack->u.isp24.flags = ntfy->u.isp24.flags &
+ __constant_cpu_to_le32(NOTIFY24XX_FLAGS_PUREX_IOCB);
+ }
+ nack->u.isp24.srr_rx_id = ntfy->u.isp24.srr_rx_id;
+ nack->u.isp24.status = ntfy->u.isp24.status;
+ nack->u.isp24.status_subcode = ntfy->u.isp24.status_subcode;
+ nack->u.isp24.fw_handle = ntfy->u.isp24.fw_handle;
+ nack->u.isp24.exchange_address = ntfy->u.isp24.exchange_address;
+ nack->u.isp24.srr_rel_offs = ntfy->u.isp24.srr_rel_offs;
+ nack->u.isp24.srr_ui = ntfy->u.isp24.srr_ui;
+ nack->u.isp24.srr_flags = cpu_to_le16(srr_flags);
+ nack->u.isp24.srr_reject_code = srr_reject_code;
+ nack->u.isp24.srr_reject_code_expl = srr_explan;
+ nack->u.isp24.vp_index = ntfy->u.isp24.vp_index;
+
+ ql_dbg(ql_dbg_tgt, vha, 0xe005,
+ "qla_target(%d): Sending 24xx Notify Ack %d\n",
+ vha->vp_idx, nack->u.isp24.status);
+
+ /* Memory Barrier */
+ wmb();
+ qla2x00_start_iocbs(vha, vha->req);
+}
+
+/*
+ * ha->hardware_lock supposed to be held on entry. Might drop it, then reaquire
+ */
+static void qlt_24xx_send_abts_resp(struct scsi_qla_host *vha,
+ struct abts_recv_from_24xx *abts, uint32_t status,
+ bool ids_reversed)
+{
+ struct qla_hw_data *ha = vha->hw;
+ struct abts_resp_to_24xx *resp;
+ uint32_t f_ctl;
+ uint8_t *p;
+
+ ql_dbg(ql_dbg_tgt, vha, 0xe006,
+ "Sending task mgmt ABTS response (ha=%p, atio=%p, status=%x\n",
+ ha, abts, status);
+
+ /* Send marker if required */
+ if (qlt_issue_marker(vha, 1) != QLA_SUCCESS)
+ return;
+
+ resp = (struct abts_resp_to_24xx *)qla2x00_alloc_iocbs_ready(vha, NULL);
+ if (!resp) {
+ ql_dbg(ql_dbg_tgt, vha, 0xe04a,
+ "qla_target(%d): %s failed: unable to allocate "
+ "request packet", vha->vp_idx, __func__);
+ return;
+ }
+
+ resp->entry_type = ABTS_RESP_24XX;
+ resp->entry_count = 1;
+ resp->nport_handle = abts->nport_handle;
+ resp->vp_index = vha->vp_idx;
+ resp->sof_type = abts->sof_type;
+ resp->exchange_address = abts->exchange_address;
+ resp->fcp_hdr_le = abts->fcp_hdr_le;
+ f_ctl = __constant_cpu_to_le32(F_CTL_EXCH_CONTEXT_RESP |
+ F_CTL_LAST_SEQ | F_CTL_END_SEQ |
+ F_CTL_SEQ_INITIATIVE);
+ p = (uint8_t *)&f_ctl;
+ resp->fcp_hdr_le.f_ctl[0] = *p++;
+ resp->fcp_hdr_le.f_ctl[1] = *p++;
+ resp->fcp_hdr_le.f_ctl[2] = *p;
+ if (ids_reversed) {
+ resp->fcp_hdr_le.d_id[0] = abts->fcp_hdr_le.d_id[0];
+ resp->fcp_hdr_le.d_id[1] = abts->fcp_hdr_le.d_id[1];
+ resp->fcp_hdr_le.d_id[2] = abts->fcp_hdr_le.d_id[2];
+ resp->fcp_hdr_le.s_id[0] = abts->fcp_hdr_le.s_id[0];
+ resp->fcp_hdr_le.s_id[1] = abts->fcp_hdr_le.s_id[1];
+ resp->fcp_hdr_le.s_id[2] = abts->fcp_hdr_le.s_id[2];
+ } else {
+ resp->fcp_hdr_le.d_id[0] = abts->fcp_hdr_le.s_id[0];
+ resp->fcp_hdr_le.d_id[1] = abts->fcp_hdr_le.s_id[1];
+ resp->fcp_hdr_le.d_id[2] = abts->fcp_hdr_le.s_id[2];
+ resp->fcp_hdr_le.s_id[0] = abts->fcp_hdr_le.d_id[0];
+ resp->fcp_hdr_le.s_id[1] = abts->fcp_hdr_le.d_id[1];
+ resp->fcp_hdr_le.s_id[2] = abts->fcp_hdr_le.d_id[2];
+ }
+ resp->exchange_addr_to_abort = abts->exchange_addr_to_abort;
+ if (status == FCP_TMF_CMPL) {
+ resp->fcp_hdr_le.r_ctl = R_CTL_BASIC_LINK_SERV | R_CTL_B_ACC;
+ resp->payload.ba_acct.seq_id_valid = SEQ_ID_INVALID;
+ resp->payload.ba_acct.low_seq_cnt = 0x0000;
+ resp->payload.ba_acct.high_seq_cnt = 0xFFFF;
+ resp->payload.ba_acct.ox_id = abts->fcp_hdr_le.ox_id;
+ resp->payload.ba_acct.rx_id = abts->fcp_hdr_le.rx_id;
+ } else {
+ resp->fcp_hdr_le.r_ctl = R_CTL_BASIC_LINK_SERV | R_CTL_B_RJT;
+ resp->payload.ba_rjt.reason_code =
+ BA_RJT_REASON_CODE_UNABLE_TO_PERFORM;
+ /* Other bytes are zero */
+ }
+
+ vha->vha_tgt.qla_tgt->abts_resp_expected++;
+
+ /* Memory Barrier */
+ wmb();
+ qla2x00_start_iocbs(vha, vha->req);
+}
+
+/*
+ * ha->hardware_lock supposed to be held on entry. Might drop it, then reaquire
+ */
+static void qlt_24xx_retry_term_exchange(struct scsi_qla_host *vha,
+ struct abts_resp_from_24xx_fw *entry)
+{
+ struct ctio7_to_24xx *ctio;
+
+ ql_dbg(ql_dbg_tgt, vha, 0xe007,
+ "Sending retry TERM EXCH CTIO7 (ha=%p)\n", vha->hw);
+ /* Send marker if required */
+ if (qlt_issue_marker(vha, 1) != QLA_SUCCESS)
+ return;
+
+ ctio = (struct ctio7_to_24xx *)qla2x00_alloc_iocbs_ready(vha, NULL);
+ if (ctio == NULL) {
+ ql_dbg(ql_dbg_tgt, vha, 0xe04b,
+ "qla_target(%d): %s failed: unable to allocate "
+ "request packet\n", vha->vp_idx, __func__);
+ return;
+ }
+
+ /*
+ * We've got on entrance firmware's response on by us generated
+ * ABTS response. So, in it ID fields are reversed.
+ */
+
+ ctio->entry_type = CTIO_TYPE7;
+ ctio->entry_count = 1;
+ ctio->nport_handle = entry->nport_handle;
+ ctio->handle = QLA_TGT_SKIP_HANDLE | CTIO_COMPLETION_HANDLE_MARK;
+ ctio->timeout = __constant_cpu_to_le16(QLA_TGT_TIMEOUT);
+ ctio->vp_index = vha->vp_idx;
+ ctio->initiator_id[0] = entry->fcp_hdr_le.d_id[0];
+ ctio->initiator_id[1] = entry->fcp_hdr_le.d_id[1];
+ ctio->initiator_id[2] = entry->fcp_hdr_le.d_id[2];
+ ctio->exchange_addr = entry->exchange_addr_to_abort;
+ ctio->u.status1.flags =
+ __constant_cpu_to_le16(CTIO7_FLAGS_STATUS_MODE_1 |
+ CTIO7_FLAGS_TERMINATE);
+ ctio->u.status1.ox_id = cpu_to_le16(entry->fcp_hdr_le.ox_id);
+
+ /* Memory Barrier */
+ wmb();
+ qla2x00_start_iocbs(vha, vha->req);
+
+ qlt_24xx_send_abts_resp(vha, (struct abts_recv_from_24xx *)entry,
+ FCP_TMF_CMPL, true);
+}
+
+/* ha->hardware_lock supposed to be held on entry */
+static int __qlt_24xx_handle_abts(struct scsi_qla_host *vha,
+ struct abts_recv_from_24xx *abts, struct qla_tgt_sess *sess)
+{
+ struct qla_hw_data *ha = vha->hw;
+ struct se_session *se_sess = sess->se_sess;
+ struct qla_tgt_mgmt_cmd *mcmd;
+ struct se_cmd *se_cmd;
+ u32 lun = 0;
+ int rc;
+ bool found_lun = false;
+
+ spin_lock(&se_sess->sess_cmd_lock);
+ list_for_each_entry(se_cmd, &se_sess->sess_cmd_list, se_cmd_list) {
+ struct qla_tgt_cmd *cmd =
+ container_of(se_cmd, struct qla_tgt_cmd, se_cmd);
+ if (cmd->tag == abts->exchange_addr_to_abort) {
+ lun = cmd->unpacked_lun;
+ found_lun = true;
+ break;
+ }
+ }
+ spin_unlock(&se_sess->sess_cmd_lock);
+
+ if (!found_lun)
+ return -ENOENT;
+
+ ql_dbg(ql_dbg_tgt_mgt, vha, 0xf00f,
+ "qla_target(%d): task abort (tag=%d)\n",
+ vha->vp_idx, abts->exchange_addr_to_abort);
+
+ mcmd = mempool_alloc(qla_tgt_mgmt_cmd_mempool, GFP_ATOMIC);
+ if (mcmd == NULL) {
+ ql_dbg(ql_dbg_tgt_mgt, vha, 0xf051,
+ "qla_target(%d): %s: Allocation of ABORT cmd failed",
+ vha->vp_idx, __func__);
+ return -ENOMEM;
+ }
+ memset(mcmd, 0, sizeof(*mcmd));
+
+ mcmd->sess = sess;
+ memcpy(&mcmd->orig_iocb.abts, abts, sizeof(mcmd->orig_iocb.abts));
+ mcmd->reset_count = vha->hw->chip_reset;
+
+ rc = ha->tgt.tgt_ops->handle_tmr(mcmd, lun, TMR_ABORT_TASK,
+ abts->exchange_addr_to_abort);
+ if (rc != 0) {
+ ql_dbg(ql_dbg_tgt_mgt, vha, 0xf052,
+ "qla_target(%d): tgt_ops->handle_tmr()"
+ " failed: %d", vha->vp_idx, rc);
+ mempool_free(mcmd, qla_tgt_mgmt_cmd_mempool);
+ return -EFAULT;
+ }
+
+ return 0;
+}
+
+/*
+ * ha->hardware_lock supposed to be held on entry. Might drop it, then reaquire
+ */
+static void qlt_24xx_handle_abts(struct scsi_qla_host *vha,
+ struct abts_recv_from_24xx *abts)
+{
+ struct qla_hw_data *ha = vha->hw;
+ struct qla_tgt_sess *sess;
+ uint32_t tag = abts->exchange_addr_to_abort;
+ uint8_t s_id[3];
+ int rc;
+
+ if (le32_to_cpu(abts->fcp_hdr_le.parameter) & ABTS_PARAM_ABORT_SEQ) {
+ ql_dbg(ql_dbg_tgt_mgt, vha, 0xf053,
+ "qla_target(%d): ABTS: Abort Sequence not "
+ "supported\n", vha->vp_idx);
+ qlt_24xx_send_abts_resp(vha, abts, FCP_TMF_REJECTED, false);
+ return;
+ }
+
+ if (tag == ATIO_EXCHANGE_ADDRESS_UNKNOWN) {
+ ql_dbg(ql_dbg_tgt_mgt, vha, 0xf010,
+ "qla_target(%d): ABTS: Unknown Exchange "
+ "Address received\n", vha->vp_idx);
+ qlt_24xx_send_abts_resp(vha, abts, FCP_TMF_REJECTED, false);
+ return;
+ }
+
+ ql_dbg(ql_dbg_tgt_mgt, vha, 0xf011,
+ "qla_target(%d): task abort (s_id=%x:%x:%x, "
+ "tag=%d, param=%x)\n", vha->vp_idx, abts->fcp_hdr_le.s_id[2],
+ abts->fcp_hdr_le.s_id[1], abts->fcp_hdr_le.s_id[0], tag,
+ le32_to_cpu(abts->fcp_hdr_le.parameter));
+
+ s_id[0] = abts->fcp_hdr_le.s_id[2];
+ s_id[1] = abts->fcp_hdr_le.s_id[1];
+ s_id[2] = abts->fcp_hdr_le.s_id[0];
+
+ sess = ha->tgt.tgt_ops->find_sess_by_s_id(vha, s_id);
+ if (!sess) {
+ ql_dbg(ql_dbg_tgt_mgt, vha, 0xf012,
+ "qla_target(%d): task abort for non-existant session\n",
+ vha->vp_idx);
+ rc = qlt_sched_sess_work(vha->vha_tgt.qla_tgt,
+ QLA_TGT_SESS_WORK_ABORT, abts, sizeof(*abts));
+ if (rc != 0) {
+ qlt_24xx_send_abts_resp(vha, abts, FCP_TMF_REJECTED,
+ false);
+ }
+ return;
+ }
+
+ rc = __qlt_24xx_handle_abts(vha, abts, sess);
+ if (rc != 0) {
+ ql_dbg(ql_dbg_tgt_mgt, vha, 0xf054,
+ "qla_target(%d): __qlt_24xx_handle_abts() failed: %d\n",
+ vha->vp_idx, rc);
+ qlt_24xx_send_abts_resp(vha, abts, FCP_TMF_REJECTED, false);
+ return;
+ }
+}
+
+/*
+ * ha->hardware_lock supposed to be held on entry. Might drop it, then reaquire
+ */
+static void qlt_24xx_send_task_mgmt_ctio(struct scsi_qla_host *ha,
+ struct qla_tgt_mgmt_cmd *mcmd, uint32_t resp_code)
+{
+ struct atio_from_isp *atio = &mcmd->orig_iocb.atio;
+ struct ctio7_to_24xx *ctio;
+ uint16_t temp;
+
+ ql_dbg(ql_dbg_tgt, ha, 0xe008,
+ "Sending task mgmt CTIO7 (ha=%p, atio=%p, resp_code=%x\n",
+ ha, atio, resp_code);
+
+ /* Send marker if required */
+ if (qlt_issue_marker(ha, 1) != QLA_SUCCESS)
+ return;
+
+ ctio = (struct ctio7_to_24xx *)qla2x00_alloc_iocbs(ha, NULL);
+ if (ctio == NULL) {
+ ql_dbg(ql_dbg_tgt, ha, 0xe04c,
+ "qla_target(%d): %s failed: unable to allocate "
+ "request packet\n", ha->vp_idx, __func__);
+ return;
+ }
+
+ ctio->entry_type = CTIO_TYPE7;
+ ctio->entry_count = 1;
+ ctio->handle = QLA_TGT_SKIP_HANDLE | CTIO_COMPLETION_HANDLE_MARK;
+ ctio->nport_handle = mcmd->sess->loop_id;
+ ctio->timeout = __constant_cpu_to_le16(QLA_TGT_TIMEOUT);
+ ctio->vp_index = ha->vp_idx;
+ ctio->initiator_id[0] = atio->u.isp24.fcp_hdr.s_id[2];
+ ctio->initiator_id[1] = atio->u.isp24.fcp_hdr.s_id[1];
+ ctio->initiator_id[2] = atio->u.isp24.fcp_hdr.s_id[0];
+ ctio->exchange_addr = atio->u.isp24.exchange_addr;
+ ctio->u.status1.flags = (atio->u.isp24.attr << 9) |
+ __constant_cpu_to_le16(CTIO7_FLAGS_STATUS_MODE_1 |
+ CTIO7_FLAGS_SEND_STATUS);
+ temp = be16_to_cpu(atio->u.isp24.fcp_hdr.ox_id);
+ ctio->u.status1.ox_id = cpu_to_le16(temp);
+ ctio->u.status1.scsi_status =
+ __constant_cpu_to_le16(SS_RESPONSE_INFO_LEN_VALID);
+ ctio->u.status1.response_len = __constant_cpu_to_le16(8);
+ ctio->u.status1.sense_data[0] = resp_code;
+
+ /* Memory Barrier */
+ wmb();
+ qla2x00_start_iocbs(ha, ha->req);
+}
+
+void qlt_free_mcmd(struct qla_tgt_mgmt_cmd *mcmd)
+{
+ mempool_free(mcmd, qla_tgt_mgmt_cmd_mempool);
+}
+EXPORT_SYMBOL(qlt_free_mcmd);
+
+/* callback from target fabric module code */
+void qlt_xmit_tm_rsp(struct qla_tgt_mgmt_cmd *mcmd)
+{
+ struct scsi_qla_host *vha = mcmd->sess->vha;
+ struct qla_hw_data *ha = vha->hw;
+ unsigned long flags;
+
+ ql_dbg(ql_dbg_tgt_mgt, vha, 0xf013,
+ "TM response mcmd (%p) status %#x state %#x",
+ mcmd, mcmd->fc_tm_rsp, mcmd->flags);
+
+ spin_lock_irqsave(&ha->hardware_lock, flags);
+
+ if (qla2x00_reset_active(vha) || mcmd->reset_count != ha->chip_reset) {
+ /*
+ * Either a chip reset is active or this request was from
+ * previous life, just abort the processing.
+ */
+ ql_dbg(ql_dbg_async, vha, 0xe100,
+ "RESET-TMR active/old-count/new-count = %d/%d/%d.\n",
+ qla2x00_reset_active(vha), mcmd->reset_count,
+ ha->chip_reset);
+ ha->tgt.tgt_ops->free_mcmd(mcmd);
+ spin_unlock_irqrestore(&ha->hardware_lock, flags);
+ return;
+ }
+
+ if (mcmd->flags == QLA24XX_MGMT_SEND_NACK)
+ qlt_send_notify_ack(vha, &mcmd->orig_iocb.imm_ntfy,
+ 0, 0, 0, 0, 0, 0);
+ else {
+ if (mcmd->se_cmd.se_tmr_req->function == TMR_ABORT_TASK)
+ qlt_24xx_send_abts_resp(vha, &mcmd->orig_iocb.abts,
+ mcmd->fc_tm_rsp, false);
+ else
+ qlt_24xx_send_task_mgmt_ctio(vha, mcmd,
+ mcmd->fc_tm_rsp);
+ }
+ /*
+ * Make the callback for ->free_mcmd() to queue_work() and invoke
+ * target_put_sess_cmd() to drop cmd_kref to 1. The final
+ * target_put_sess_cmd() call will be made from TFO->check_stop_free()
+ * -> tcm_qla2xxx_check_stop_free() to release the TMR associated se_cmd
+ * descriptor after TFO->queue_tm_rsp() -> tcm_qla2xxx_queue_tm_rsp() ->
+ * qlt_xmit_tm_rsp() returns here..
+ */
+ ha->tgt.tgt_ops->free_mcmd(mcmd);
+ spin_unlock_irqrestore(&ha->hardware_lock, flags);
+}
+EXPORT_SYMBOL(qlt_xmit_tm_rsp);
+
+/* No locks */
+static int qlt_pci_map_calc_cnt(struct qla_tgt_prm *prm)
+{
+ struct qla_tgt_cmd *cmd = prm->cmd;
+
+ BUG_ON(cmd->sg_cnt == 0);
+
+ prm->sg = (struct scatterlist *)cmd->sg;
+ prm->seg_cnt = pci_map_sg(prm->tgt->ha->pdev, cmd->sg,
+ cmd->sg_cnt, cmd->dma_data_direction);
+ if (unlikely(prm->seg_cnt == 0))
+ goto out_err;
+
+ prm->cmd->sg_mapped = 1;
+
+ if (cmd->se_cmd.prot_op == TARGET_PROT_NORMAL) {
+ /*
+ * If greater than four sg entries then we need to allocate
+ * the continuation entries
+ */
+ if (prm->seg_cnt > prm->tgt->datasegs_per_cmd)
+ prm->req_cnt += DIV_ROUND_UP(prm->seg_cnt -
+ prm->tgt->datasegs_per_cmd,
+ prm->tgt->datasegs_per_cont);
+ } else {
+ /* DIF */
+ if ((cmd->se_cmd.prot_op == TARGET_PROT_DIN_INSERT) ||
+ (cmd->se_cmd.prot_op == TARGET_PROT_DOUT_STRIP)) {
+ prm->seg_cnt = DIV_ROUND_UP(cmd->bufflen, cmd->blk_sz);
+ prm->tot_dsds = prm->seg_cnt;
+ } else
+ prm->tot_dsds = prm->seg_cnt;
+
+ if (cmd->prot_sg_cnt) {
+ prm->prot_sg = cmd->prot_sg;
+ prm->prot_seg_cnt = pci_map_sg(prm->tgt->ha->pdev,
+ cmd->prot_sg, cmd->prot_sg_cnt,
+ cmd->dma_data_direction);
+ if (unlikely(prm->prot_seg_cnt == 0))
+ goto out_err;
+
+ if ((cmd->se_cmd.prot_op == TARGET_PROT_DIN_INSERT) ||
+ (cmd->se_cmd.prot_op == TARGET_PROT_DOUT_STRIP)) {
+ /* Dif Bundling not support here */
+ prm->prot_seg_cnt = DIV_ROUND_UP(cmd->bufflen,
+ cmd->blk_sz);
+ prm->tot_dsds += prm->prot_seg_cnt;
+ } else
+ prm->tot_dsds += prm->prot_seg_cnt;
+ }
+ }
+
+ return 0;
+
+out_err:
+ ql_dbg(ql_dbg_tgt, prm->cmd->vha, 0xe04d,
+ "qla_target(%d): PCI mapping failed: sg_cnt=%d",
+ 0, prm->cmd->sg_cnt);
+ return -1;
+}
+
+static void qlt_unmap_sg(struct scsi_qla_host *vha, struct qla_tgt_cmd *cmd)
+{
+ struct qla_hw_data *ha = vha->hw;
+
+ if (!cmd->sg_mapped)
+ return;
+
+ pci_unmap_sg(ha->pdev, cmd->sg, cmd->sg_cnt, cmd->dma_data_direction);
+ cmd->sg_mapped = 0;
+
+ if (cmd->prot_sg_cnt)
+ pci_unmap_sg(ha->pdev, cmd->prot_sg, cmd->prot_sg_cnt,
+ cmd->dma_data_direction);
+
+ if (cmd->ctx_dsd_alloced)
+ qla2x00_clean_dsd_pool(ha, NULL, cmd);
+
+ if (cmd->ctx)
+ dma_pool_free(ha->dl_dma_pool, cmd->ctx, cmd->ctx->crc_ctx_dma);
+}
+
+static int qlt_check_reserve_free_req(struct scsi_qla_host *vha,
+ uint32_t req_cnt)
+{
+ uint32_t cnt, cnt_in;
+
+ if (vha->req->cnt < (req_cnt + 2)) {
+ cnt = (uint16_t)RD_REG_DWORD(vha->req->req_q_out);
+ cnt_in = (uint16_t)RD_REG_DWORD(vha->req->req_q_in);
+
+ if (vha->req->ring_index < cnt)
+ vha->req->cnt = cnt - vha->req->ring_index;
+ else
+ vha->req->cnt = vha->req->length -
+ (vha->req->ring_index - cnt);
+ }
+
+ if (unlikely(vha->req->cnt < (req_cnt + 2))) {
+ ql_dbg(ql_dbg_io, vha, 0x305a,
+ "qla_target(%d): There is no room in the request ring: vha->req->ring_index=%d, vha->req->cnt=%d, req_cnt=%d Req-out=%d Req-in=%d Req-Length=%d\n",
+ vha->vp_idx, vha->req->ring_index,
+ vha->req->cnt, req_cnt, cnt, cnt_in, vha->req->length);
+ return -EAGAIN;
+ }
+ vha->req->cnt -= req_cnt;
+
+ return 0;
+}
+
+/*
+ * ha->hardware_lock supposed to be held on entry. Might drop it, then reaquire
+ */
+static inline void *qlt_get_req_pkt(struct scsi_qla_host *vha)
+{
+ /* Adjust ring index. */
+ vha->req->ring_index++;
+ if (vha->req->ring_index == vha->req->length) {
+ vha->req->ring_index = 0;
+ vha->req->ring_ptr = vha->req->ring;
+ } else {
+ vha->req->ring_ptr++;
+ }
+ return (cont_entry_t *)vha->req->ring_ptr;
+}
+
+/* ha->hardware_lock supposed to be held on entry */
+static inline uint32_t qlt_make_handle(struct scsi_qla_host *vha)
+{
+ struct qla_hw_data *ha = vha->hw;
+ uint32_t h;
+
+ h = ha->tgt.current_handle;
+ /* always increment cmd handle */
+ do {
+ ++h;
+ if (h > DEFAULT_OUTSTANDING_COMMANDS)
+ h = 1; /* 0 is QLA_TGT_NULL_HANDLE */
+ if (h == ha->tgt.current_handle) {
+ ql_dbg(ql_dbg_io, vha, 0x305b,
+ "qla_target(%d): Ran out of "
+ "empty cmd slots in ha %p\n", vha->vp_idx, ha);
+ h = QLA_TGT_NULL_HANDLE;
+ break;
+ }
+ } while ((h == QLA_TGT_NULL_HANDLE) ||
+ (h == QLA_TGT_SKIP_HANDLE) ||
+ (ha->tgt.cmds[h-1] != NULL));
+
+ if (h != QLA_TGT_NULL_HANDLE)
+ ha->tgt.current_handle = h;
+
+ return h;
+}
+
+/* ha->hardware_lock supposed to be held on entry */
+static int qlt_24xx_build_ctio_pkt(struct qla_tgt_prm *prm,
+ struct scsi_qla_host *vha)
+{
+ uint32_t h;
+ struct ctio7_to_24xx *pkt;
+ struct qla_hw_data *ha = vha->hw;
+ struct atio_from_isp *atio = &prm->cmd->atio;
+ uint16_t temp;
+
+ pkt = (struct ctio7_to_24xx *)vha->req->ring_ptr;
+ prm->pkt = pkt;
+ memset(pkt, 0, sizeof(*pkt));
+
+ pkt->entry_type = CTIO_TYPE7;
+ pkt->entry_count = (uint8_t)prm->req_cnt;
+ pkt->vp_index = vha->vp_idx;
+
+ h = qlt_make_handle(vha);
+ if (unlikely(h == QLA_TGT_NULL_HANDLE)) {
+ /*
+ * CTIO type 7 from the firmware doesn't provide a way to
+ * know the initiator's LOOP ID, hence we can't find
+ * the session and, so, the command.
+ */
+ return -EAGAIN;
+ } else
+ ha->tgt.cmds[h-1] = prm->cmd;
+
+ pkt->handle = h | CTIO_COMPLETION_HANDLE_MARK;
+ pkt->nport_handle = prm->cmd->loop_id;
+ pkt->timeout = __constant_cpu_to_le16(QLA_TGT_TIMEOUT);
+ pkt->initiator_id[0] = atio->u.isp24.fcp_hdr.s_id[2];
+ pkt->initiator_id[1] = atio->u.isp24.fcp_hdr.s_id[1];
+ pkt->initiator_id[2] = atio->u.isp24.fcp_hdr.s_id[0];
+ pkt->exchange_addr = atio->u.isp24.exchange_addr;
+ pkt->u.status0.flags |= (atio->u.isp24.attr << 9);
+ temp = be16_to_cpu(atio->u.isp24.fcp_hdr.ox_id);
+ pkt->u.status0.ox_id = cpu_to_le16(temp);
+ pkt->u.status0.relative_offset = cpu_to_le32(prm->cmd->offset);
+
+ return 0;
+}
+
+/*
+ * ha->hardware_lock supposed to be held on entry. We have already made sure
+ * that there is sufficient amount of request entries to not drop it.
+ */
+static void qlt_load_cont_data_segments(struct qla_tgt_prm *prm,
+ struct scsi_qla_host *vha)
+{
+ int cnt;
+ uint32_t *dword_ptr;
+ int enable_64bit_addressing = prm->tgt->tgt_enable_64bit_addr;
+
+ /* Build continuation packets */
+ while (prm->seg_cnt > 0) {
+ cont_a64_entry_t *cont_pkt64 =
+ (cont_a64_entry_t *)qlt_get_req_pkt(vha);
+
+ /*
+ * Make sure that from cont_pkt64 none of
+ * 64-bit specific fields used for 32-bit
+ * addressing. Cast to (cont_entry_t *) for
+ * that.
+ */
+
+ memset(cont_pkt64, 0, sizeof(*cont_pkt64));
+
+ cont_pkt64->entry_count = 1;
+ cont_pkt64->sys_define = 0;
+
+ if (enable_64bit_addressing) {
+ cont_pkt64->entry_type = CONTINUE_A64_TYPE;
+ dword_ptr =
+ (uint32_t *)&cont_pkt64->dseg_0_address;
+ } else {
+ cont_pkt64->entry_type = CONTINUE_TYPE;
+ dword_ptr =
+ (uint32_t *)&((cont_entry_t *)
+ cont_pkt64)->dseg_0_address;
+ }
+
+ /* Load continuation entry data segments */
+ for (cnt = 0;
+ cnt < prm->tgt->datasegs_per_cont && prm->seg_cnt;
+ cnt++, prm->seg_cnt--) {
+ *dword_ptr++ =
+ cpu_to_le32(pci_dma_lo32
+ (sg_dma_address(prm->sg)));
+ if (enable_64bit_addressing) {
+ *dword_ptr++ =
+ cpu_to_le32(pci_dma_hi32
+ (sg_dma_address
+ (prm->sg)));
+ }
+ *dword_ptr++ = cpu_to_le32(sg_dma_len(prm->sg));
+
+ prm->sg = sg_next(prm->sg);
+ }
+ }
+}
+
+/*
+ * ha->hardware_lock supposed to be held on entry. We have already made sure
+ * that there is sufficient amount of request entries to not drop it.
+ */
+static void qlt_load_data_segments(struct qla_tgt_prm *prm,
+ struct scsi_qla_host *vha)
+{
+ int cnt;
+ uint32_t *dword_ptr;
+ int enable_64bit_addressing = prm->tgt->tgt_enable_64bit_addr;
+ struct ctio7_to_24xx *pkt24 = (struct ctio7_to_24xx *)prm->pkt;
+
+ pkt24->u.status0.transfer_length = cpu_to_le32(prm->cmd->bufflen);
+
+ /* Setup packet address segment pointer */
+ dword_ptr = pkt24->u.status0.dseg_0_address;
+
+ /* Set total data segment count */
+ if (prm->seg_cnt)
+ pkt24->dseg_count = cpu_to_le16(prm->seg_cnt);
+
+ if (prm->seg_cnt == 0) {
+ /* No data transfer */
+ *dword_ptr++ = 0;
+ *dword_ptr = 0;
+ return;
+ }
+
+ /* If scatter gather */
+
+ /* Load command entry data segments */
+ for (cnt = 0;
+ (cnt < prm->tgt->datasegs_per_cmd) && prm->seg_cnt;
+ cnt++, prm->seg_cnt--) {
+ *dword_ptr++ =
+ cpu_to_le32(pci_dma_lo32(sg_dma_address(prm->sg)));
+ if (enable_64bit_addressing) {
+ *dword_ptr++ =
+ cpu_to_le32(pci_dma_hi32(
+ sg_dma_address(prm->sg)));
+ }
+ *dword_ptr++ = cpu_to_le32(sg_dma_len(prm->sg));
+
+ prm->sg = sg_next(prm->sg);
+ }
+
+ qlt_load_cont_data_segments(prm, vha);
+}
+
+static inline int qlt_has_data(struct qla_tgt_cmd *cmd)
+{
+ return cmd->bufflen > 0;
+}
+
+/*
+ * Called without ha->hardware_lock held
+ */
+static int qlt_pre_xmit_response(struct qla_tgt_cmd *cmd,
+ struct qla_tgt_prm *prm, int xmit_type, uint8_t scsi_status,
+ uint32_t *full_req_cnt)
+{
+ struct qla_tgt *tgt = cmd->tgt;
+ struct scsi_qla_host *vha = tgt->vha;
+ struct qla_hw_data *ha = vha->hw;
+ struct se_cmd *se_cmd = &cmd->se_cmd;
+
+ if (unlikely(cmd->aborted)) {
+ ql_dbg(ql_dbg_tgt_mgt, vha, 0xf014,
+ "qla_target(%d): terminating exchange "
+ "for aborted cmd=%p (se_cmd=%p, tag=%d)", vha->vp_idx, cmd,
+ se_cmd, cmd->tag);
+
+ cmd->state = QLA_TGT_STATE_ABORTED;
+ cmd->cmd_flags |= BIT_6;
+
+ qlt_send_term_exchange(vha, cmd, &cmd->atio, 0);
+
+ /* !! At this point cmd could be already freed !! */
+ return QLA_TGT_PRE_XMIT_RESP_CMD_ABORTED;
+ }
+
+ prm->cmd = cmd;
+ prm->tgt = tgt;
+ prm->rq_result = scsi_status;
+ prm->sense_buffer = &cmd->sense_buffer[0];
+ prm->sense_buffer_len = TRANSPORT_SENSE_BUFFER;
+ prm->sg = NULL;
+ prm->seg_cnt = -1;
+ prm->req_cnt = 1;
+ prm->add_status_pkt = 0;
+
+ /* Send marker if required */
+ if (qlt_issue_marker(vha, 0) != QLA_SUCCESS)
+ return -EFAULT;
+
+ if ((xmit_type & QLA_TGT_XMIT_DATA) && qlt_has_data(cmd)) {
+ if (qlt_pci_map_calc_cnt(prm) != 0)
+ return -EAGAIN;
+ }
+
+ *full_req_cnt = prm->req_cnt;
+
+ if (se_cmd->se_cmd_flags & SCF_UNDERFLOW_BIT) {
+ prm->residual = se_cmd->residual_count;
+ ql_dbg(ql_dbg_io + ql_dbg_verbose, vha, 0x305c,
+ "Residual underflow: %d (tag %d, "
+ "op %x, bufflen %d, rq_result %x)\n", prm->residual,
+ cmd->tag, se_cmd->t_task_cdb ? se_cmd->t_task_cdb[0] : 0,
+ cmd->bufflen, prm->rq_result);
+ prm->rq_result |= SS_RESIDUAL_UNDER;
+ } else if (se_cmd->se_cmd_flags & SCF_OVERFLOW_BIT) {
+ prm->residual = se_cmd->residual_count;
+ ql_dbg(ql_dbg_io, vha, 0x305d,
+ "Residual overflow: %d (tag %d, "
+ "op %x, bufflen %d, rq_result %x)\n", prm->residual,
+ cmd->tag, se_cmd->t_task_cdb ? se_cmd->t_task_cdb[0] : 0,
+ cmd->bufflen, prm->rq_result);
+ prm->rq_result |= SS_RESIDUAL_OVER;
+ }
+
+ if (xmit_type & QLA_TGT_XMIT_STATUS) {
+ /*
+ * If QLA_TGT_XMIT_DATA is not set, add_status_pkt will be
+ * ignored in *xmit_response() below
+ */
+ if (qlt_has_data(cmd)) {
+ if (QLA_TGT_SENSE_VALID(prm->sense_buffer) ||
+ (IS_FWI2_CAPABLE(ha) &&
+ (prm->rq_result != 0))) {
+ prm->add_status_pkt = 1;
+ (*full_req_cnt)++;
+ }
+ }
+ }
+
+ return 0;
+}
+
+static inline int qlt_need_explicit_conf(struct qla_hw_data *ha,
+ struct qla_tgt_cmd *cmd, int sending_sense)
+{
+ if (ha->tgt.enable_class_2)
+ return 0;
+
+ if (sending_sense)
+ return cmd->conf_compl_supported;
+ else
+ return ha->tgt.enable_explicit_conf &&
+ cmd->conf_compl_supported;
+}
+
+#ifdef CONFIG_QLA_TGT_DEBUG_SRR
+/*
+ * Original taken from the XFS code
+ */
+static unsigned long qlt_srr_random(void)
+{
+ static int Inited;
+ static unsigned long RandomValue;
+ static DEFINE_SPINLOCK(lock);
+ /* cycles pseudo-randomly through all values between 1 and 2^31 - 2 */
+ register long rv;
+ register long lo;
+ register long hi;
+ unsigned long flags;
+
+ spin_lock_irqsave(&lock, flags);
+ if (!Inited) {
+ RandomValue = jiffies;
+ Inited = 1;
+ }
+ rv = RandomValue;
+ hi = rv / 127773;
+ lo = rv % 127773;
+ rv = 16807 * lo - 2836 * hi;
+ if (rv <= 0)
+ rv += 2147483647;
+ RandomValue = rv;
+ spin_unlock_irqrestore(&lock, flags);
+ return rv;
+}
+
+static void qlt_check_srr_debug(struct qla_tgt_cmd *cmd, int *xmit_type)
+{
+#if 0 /* This is not a real status packets lost, so it won't lead to SRR */
+ if ((*xmit_type & QLA_TGT_XMIT_STATUS) && (qlt_srr_random() % 200)
+ == 50) {
+ *xmit_type &= ~QLA_TGT_XMIT_STATUS;
+ ql_dbg(ql_dbg_tgt_mgt, cmd->vha, 0xf015,
+ "Dropping cmd %p (tag %d) status", cmd, cmd->tag);
+ }
+#endif
+ /*
+ * It's currently not possible to simulate SRRs for FCP_WRITE without
+ * a physical link layer failure, so don't even try here..
+ */
+ if (cmd->dma_data_direction != DMA_FROM_DEVICE)
+ return;
+
+ if (qlt_has_data(cmd) && (cmd->sg_cnt > 1) &&
+ ((qlt_srr_random() % 100) == 20)) {
+ int i, leave = 0;
+ unsigned int tot_len = 0;
+
+ while (leave == 0)
+ leave = qlt_srr_random() % cmd->sg_cnt;
+
+ for (i = 0; i < leave; i++)
+ tot_len += cmd->sg[i].length;
+
+ ql_dbg(ql_dbg_tgt_mgt, cmd->vha, 0xf016,
+ "Cutting cmd %p (tag %d) buffer"
+ " tail to len %d, sg_cnt %d (cmd->bufflen %d,"
+ " cmd->sg_cnt %d)", cmd, cmd->tag, tot_len, leave,
+ cmd->bufflen, cmd->sg_cnt);
+
+ cmd->bufflen = tot_len;
+ cmd->sg_cnt = leave;
+ }
+
+ if (qlt_has_data(cmd) && ((qlt_srr_random() % 100) == 70)) {
+ unsigned int offset = qlt_srr_random() % cmd->bufflen;
+
+ ql_dbg(ql_dbg_tgt_mgt, cmd->vha, 0xf017,
+ "Cutting cmd %p (tag %d) buffer head "
+ "to offset %d (cmd->bufflen %d)", cmd, cmd->tag, offset,
+ cmd->bufflen);
+ if (offset == 0)
+ *xmit_type &= ~QLA_TGT_XMIT_DATA;
+ else if (qlt_set_data_offset(cmd, offset)) {
+ ql_dbg(ql_dbg_tgt_mgt, cmd->vha, 0xf018,
+ "qlt_set_data_offset() failed (tag %d)", cmd->tag);
+ }
+ }
+}
+#else
+static inline void qlt_check_srr_debug(struct qla_tgt_cmd *cmd, int *xmit_type)
+{}
+#endif
+
+static void qlt_24xx_init_ctio_to_isp(struct ctio7_to_24xx *ctio,
+ struct qla_tgt_prm *prm)
+{
+ prm->sense_buffer_len = min_t(uint32_t, prm->sense_buffer_len,
+ (uint32_t)sizeof(ctio->u.status1.sense_data));
+ ctio->u.status0.flags |=
+ __constant_cpu_to_le16(CTIO7_FLAGS_SEND_STATUS);
+ if (qlt_need_explicit_conf(prm->tgt->ha, prm->cmd, 0)) {
+ ctio->u.status0.flags |= __constant_cpu_to_le16(
+ CTIO7_FLAGS_EXPLICIT_CONFORM |
+ CTIO7_FLAGS_CONFORM_REQ);
+ }
+ ctio->u.status0.residual = cpu_to_le32(prm->residual);
+ ctio->u.status0.scsi_status = cpu_to_le16(prm->rq_result);
+ if (QLA_TGT_SENSE_VALID(prm->sense_buffer)) {
+ int i;
+
+ if (qlt_need_explicit_conf(prm->tgt->ha, prm->cmd, 1)) {
+ if (prm->cmd->se_cmd.scsi_status != 0) {
+ ql_dbg(ql_dbg_tgt, prm->cmd->vha, 0xe017,
+ "Skipping EXPLICIT_CONFORM and "
+ "CTIO7_FLAGS_CONFORM_REQ for FCP READ w/ "
+ "non GOOD status\n");
+ goto skip_explict_conf;
+ }
+ ctio->u.status1.flags |= __constant_cpu_to_le16(
+ CTIO7_FLAGS_EXPLICIT_CONFORM |
+ CTIO7_FLAGS_CONFORM_REQ);
+ }
+skip_explict_conf:
+ ctio->u.status1.flags &=
+ ~__constant_cpu_to_le16(CTIO7_FLAGS_STATUS_MODE_0);
+ ctio->u.status1.flags |=
+ __constant_cpu_to_le16(CTIO7_FLAGS_STATUS_MODE_1);
+ ctio->u.status1.scsi_status |=
+ __constant_cpu_to_le16(SS_SENSE_LEN_VALID);
+ ctio->u.status1.sense_length =
+ cpu_to_le16(prm->sense_buffer_len);
+ for (i = 0; i < prm->sense_buffer_len/4; i++)
+ ((uint32_t *)ctio->u.status1.sense_data)[i] =
+ cpu_to_be32(((uint32_t *)prm->sense_buffer)[i]);
+#if 0
+ if (unlikely((prm->sense_buffer_len % 4) != 0)) {
+ static int q;
+ if (q < 10) {
+ ql_dbg(ql_dbg_tgt, vha, 0xe04f,
+ "qla_target(%d): %d bytes of sense "
+ "lost", prm->tgt->ha->vp_idx,
+ prm->sense_buffer_len % 4);
+ q++;
+ }
+ }
+#endif
+ } else {
+ ctio->u.status1.flags &=
+ ~__constant_cpu_to_le16(CTIO7_FLAGS_STATUS_MODE_0);
+ ctio->u.status1.flags |=
+ __constant_cpu_to_le16(CTIO7_FLAGS_STATUS_MODE_1);
+ ctio->u.status1.sense_length = 0;
+ memset(ctio->u.status1.sense_data, 0,
+ sizeof(ctio->u.status1.sense_data));
+ }
+
+ /* Sense with len > 24, is it possible ??? */
+}
+
+
+
+/* diff */
+static inline int
+qlt_hba_err_chk_enabled(struct se_cmd *se_cmd)
+{
+ /*
+ * Uncomment when corresponding SCSI changes are done.
+ *
+ if (!sp->cmd->prot_chk)
+ return 0;
+ *
+ */
+ switch (se_cmd->prot_op) {
+ case TARGET_PROT_DOUT_INSERT:
+ case TARGET_PROT_DIN_STRIP:
+ if (ql2xenablehba_err_chk >= 1)
+ return 1;
+ break;
+ case TARGET_PROT_DOUT_PASS:
+ case TARGET_PROT_DIN_PASS:
+ if (ql2xenablehba_err_chk >= 2)
+ return 1;
+ break;
+ case TARGET_PROT_DIN_INSERT:
+ case TARGET_PROT_DOUT_STRIP:
+ return 1;
+ default:
+ break;
+ }
+ return 0;
+}
+
+/*
+ * qla24xx_set_t10dif_tags_from_cmd - Extract Ref and App tags from SCSI command
+ *
+ */
+static inline void
+qlt_set_t10dif_tags(struct se_cmd *se_cmd, struct crc_context *ctx)
+{
+ uint32_t lba = 0xffffffff & se_cmd->t_task_lba;
+
+ /* wait til Mode Sense/Select cmd, modepage Ah, subpage 2
+ * have been immplemented by TCM, before AppTag is avail.
+ * Look for modesense_handlers[]
+ */
+ ctx->app_tag = 0;
+ ctx->app_tag_mask[0] = 0x0;
+ ctx->app_tag_mask[1] = 0x0;
+
+ switch (se_cmd->prot_type) {
+ case TARGET_DIF_TYPE0_PROT:
+ /*
+ * No check for ql2xenablehba_err_chk, as it would be an
+ * I/O error if hba tag generation is not done.
+ */
+ ctx->ref_tag = cpu_to_le32(lba);
+
+ if (!qlt_hba_err_chk_enabled(se_cmd))
+ break;
+
+ /* enable ALL bytes of the ref tag */
+ ctx->ref_tag_mask[0] = 0xff;
+ ctx->ref_tag_mask[1] = 0xff;
+ ctx->ref_tag_mask[2] = 0xff;
+ ctx->ref_tag_mask[3] = 0xff;
+ break;
+ /*
+ * For TYpe 1 protection: 16 bit GUARD tag, 32 bit REF tag, and
+ * 16 bit app tag.
+ */
+ case TARGET_DIF_TYPE1_PROT:
+ ctx->ref_tag = cpu_to_le32(lba);
+
+ if (!qlt_hba_err_chk_enabled(se_cmd))
+ break;
+
+ /* enable ALL bytes of the ref tag */
+ ctx->ref_tag_mask[0] = 0xff;
+ ctx->ref_tag_mask[1] = 0xff;
+ ctx->ref_tag_mask[2] = 0xff;
+ ctx->ref_tag_mask[3] = 0xff;
+ break;
+ /*
+ * For TYPE 2 protection: 16 bit GUARD + 32 bit REF tag has to
+ * match LBA in CDB + N
+ */
+ case TARGET_DIF_TYPE2_PROT:
+ ctx->ref_tag = cpu_to_le32(lba);
+
+ if (!qlt_hba_err_chk_enabled(se_cmd))
+ break;
+
+ /* enable ALL bytes of the ref tag */
+ ctx->ref_tag_mask[0] = 0xff;
+ ctx->ref_tag_mask[1] = 0xff;
+ ctx->ref_tag_mask[2] = 0xff;
+ ctx->ref_tag_mask[3] = 0xff;
+ break;
+
+ /* For Type 3 protection: 16 bit GUARD only */
+ case TARGET_DIF_TYPE3_PROT:
+ ctx->ref_tag_mask[0] = ctx->ref_tag_mask[1] =
+ ctx->ref_tag_mask[2] = ctx->ref_tag_mask[3] = 0x00;
+ break;
+ }
+}
+
+
+static inline int
+qlt_build_ctio_crc2_pkt(struct qla_tgt_prm *prm, scsi_qla_host_t *vha)
+{
+ uint32_t *cur_dsd;
+ int sgc;
+ uint32_t transfer_length = 0;
+ uint32_t data_bytes;
+ uint32_t dif_bytes;
+ uint8_t bundling = 1;
+ uint8_t *clr_ptr;
+ struct crc_context *crc_ctx_pkt = NULL;
+ struct qla_hw_data *ha;
+ struct ctio_crc2_to_fw *pkt;
+ dma_addr_t crc_ctx_dma;
+ uint16_t fw_prot_opts = 0;
+ struct qla_tgt_cmd *cmd = prm->cmd;
+ struct se_cmd *se_cmd = &cmd->se_cmd;
+ uint32_t h;
+ struct atio_from_isp *atio = &prm->cmd->atio;
+ uint16_t t16;
+
+ sgc = 0;
+ ha = vha->hw;
+
+ pkt = (struct ctio_crc2_to_fw *)vha->req->ring_ptr;
+ prm->pkt = pkt;
+ memset(pkt, 0, sizeof(*pkt));
+
+ ql_dbg(ql_dbg_tgt, vha, 0xe071,
+ "qla_target(%d):%s: se_cmd[%p] CRC2 prot_op[0x%x] cmd prot sg:cnt[%p:%x] lba[%llu]\n",
+ vha->vp_idx, __func__, se_cmd, se_cmd->prot_op,
+ prm->prot_sg, prm->prot_seg_cnt, se_cmd->t_task_lba);
+
+ if ((se_cmd->prot_op == TARGET_PROT_DIN_INSERT) ||
+ (se_cmd->prot_op == TARGET_PROT_DOUT_STRIP))
+ bundling = 0;
+
+ /* Compute dif len and adjust data len to incude protection */
+ data_bytes = cmd->bufflen;
+ dif_bytes = (data_bytes / cmd->blk_sz) * 8;
+
+ switch (se_cmd->prot_op) {
+ case TARGET_PROT_DIN_INSERT:
+ case TARGET_PROT_DOUT_STRIP:
+ transfer_length = data_bytes;
+ data_bytes += dif_bytes;
+ break;
+
+ case TARGET_PROT_DIN_STRIP:
+ case TARGET_PROT_DOUT_INSERT:
+ case TARGET_PROT_DIN_PASS:
+ case TARGET_PROT_DOUT_PASS:
+ transfer_length = data_bytes + dif_bytes;
+ break;
+
+ default:
+ BUG();
+ break;
+ }
+
+ if (!qlt_hba_err_chk_enabled(se_cmd))
+ fw_prot_opts |= 0x10; /* Disable Guard tag checking */
+ /* HBA error checking enabled */
+ else if (IS_PI_UNINIT_CAPABLE(ha)) {
+ if ((se_cmd->prot_type == TARGET_DIF_TYPE1_PROT) ||
+ (se_cmd->prot_type == TARGET_DIF_TYPE2_PROT))
+ fw_prot_opts |= PO_DIS_VALD_APP_ESC;
+ else if (se_cmd->prot_type == TARGET_DIF_TYPE3_PROT)
+ fw_prot_opts |= PO_DIS_VALD_APP_REF_ESC;
+ }
+
+ switch (se_cmd->prot_op) {
+ case TARGET_PROT_DIN_INSERT:
+ case TARGET_PROT_DOUT_INSERT:
+ fw_prot_opts |= PO_MODE_DIF_INSERT;
+ break;
+ case TARGET_PROT_DIN_STRIP:
+ case TARGET_PROT_DOUT_STRIP:
+ fw_prot_opts |= PO_MODE_DIF_REMOVE;
+ break;
+ case TARGET_PROT_DIN_PASS:
+ case TARGET_PROT_DOUT_PASS:
+ fw_prot_opts |= PO_MODE_DIF_PASS;
+ /* FUTURE: does tcm require T10CRC<->IPCKSUM conversion? */
+ break;
+ default:/* Normal Request */
+ fw_prot_opts |= PO_MODE_DIF_PASS;
+ break;
+ }
+
+
+ /* ---- PKT ---- */
+ /* Update entry type to indicate Command Type CRC_2 IOCB */
+ pkt->entry_type = CTIO_CRC2;
+ pkt->entry_count = 1;
+ pkt->vp_index = vha->vp_idx;
+
+ h = qlt_make_handle(vha);
+ if (unlikely(h == QLA_TGT_NULL_HANDLE)) {
+ /*
+ * CTIO type 7 from the firmware doesn't provide a way to
+ * know the initiator's LOOP ID, hence we can't find
+ * the session and, so, the command.
+ */
+ return -EAGAIN;
+ } else
+ ha->tgt.cmds[h-1] = prm->cmd;
+
+
+ pkt->handle = h | CTIO_COMPLETION_HANDLE_MARK;
+ pkt->nport_handle = prm->cmd->loop_id;
+ pkt->timeout = __constant_cpu_to_le16(QLA_TGT_TIMEOUT);
+ pkt->initiator_id[0] = atio->u.isp24.fcp_hdr.s_id[2];
+ pkt->initiator_id[1] = atio->u.isp24.fcp_hdr.s_id[1];
+ pkt->initiator_id[2] = atio->u.isp24.fcp_hdr.s_id[0];
+ pkt->exchange_addr = atio->u.isp24.exchange_addr;
+
+ /* silence compile warning */
+ t16 = be16_to_cpu(atio->u.isp24.fcp_hdr.ox_id);
+ pkt->ox_id = cpu_to_le16(t16);
+
+ t16 = (atio->u.isp24.attr << 9);
+ pkt->flags |= cpu_to_le16(t16);
+ pkt->relative_offset = cpu_to_le32(prm->cmd->offset);
+
+ /* Set transfer direction */
+ if (cmd->dma_data_direction == DMA_TO_DEVICE)
+ pkt->flags = __constant_cpu_to_le16(CTIO7_FLAGS_DATA_IN);
+ else if (cmd->dma_data_direction == DMA_FROM_DEVICE)
+ pkt->flags = __constant_cpu_to_le16(CTIO7_FLAGS_DATA_OUT);
+
+
+ pkt->dseg_count = prm->tot_dsds;
+ /* Fibre channel byte count */
+ pkt->transfer_length = cpu_to_le32(transfer_length);
+
+
+ /* ----- CRC context -------- */
+
+ /* Allocate CRC context from global pool */
+ crc_ctx_pkt = cmd->ctx =
+ dma_pool_alloc(ha->dl_dma_pool, GFP_ATOMIC, &crc_ctx_dma);
+
+ if (!crc_ctx_pkt)
+ goto crc_queuing_error;
+
+ /* Zero out CTX area. */
+ clr_ptr = (uint8_t *)crc_ctx_pkt;
+ memset(clr_ptr, 0, sizeof(*crc_ctx_pkt));
+
+ crc_ctx_pkt->crc_ctx_dma = crc_ctx_dma;
+ INIT_LIST_HEAD(&crc_ctx_pkt->dsd_list);
+
+ /* Set handle */
+ crc_ctx_pkt->handle = pkt->handle;
+
+ qlt_set_t10dif_tags(se_cmd, crc_ctx_pkt);
+
+ pkt->crc_context_address[0] = cpu_to_le32(LSD(crc_ctx_dma));
+ pkt->crc_context_address[1] = cpu_to_le32(MSD(crc_ctx_dma));
+ pkt->crc_context_len = CRC_CONTEXT_LEN_FW;
+
+
+ if (!bundling) {
+ cur_dsd = (uint32_t *) &crc_ctx_pkt->u.nobundling.data_address;
+ } else {
+ /*
+ * Configure Bundling if we need to fetch interlaving
+ * protection PCI accesses
+ */
+ fw_prot_opts |= PO_ENABLE_DIF_BUNDLING;
+ crc_ctx_pkt->u.bundling.dif_byte_count = cpu_to_le32(dif_bytes);
+ crc_ctx_pkt->u.bundling.dseg_count =
+ cpu_to_le16(prm->tot_dsds - prm->prot_seg_cnt);
+ cur_dsd = (uint32_t *) &crc_ctx_pkt->u.bundling.data_address;
+ }
+
+ /* Finish the common fields of CRC pkt */
+ crc_ctx_pkt->blk_size = cpu_to_le16(cmd->blk_sz);
+ crc_ctx_pkt->prot_opts = cpu_to_le16(fw_prot_opts);
+ crc_ctx_pkt->byte_count = cpu_to_le32(data_bytes);
+ crc_ctx_pkt->guard_seed = __constant_cpu_to_le16(0);
+
+
+ /* Walks data segments */
+ pkt->flags |= __constant_cpu_to_le16(CTIO7_FLAGS_DSD_PTR);
+
+ if (!bundling && prm->prot_seg_cnt) {
+ if (qla24xx_walk_and_build_sglist_no_difb(ha, NULL, cur_dsd,
+ prm->tot_dsds, cmd))
+ goto crc_queuing_error;
+ } else if (qla24xx_walk_and_build_sglist(ha, NULL, cur_dsd,
+ (prm->tot_dsds - prm->prot_seg_cnt), cmd))
+ goto crc_queuing_error;
+
+ if (bundling && prm->prot_seg_cnt) {
+ /* Walks dif segments */
+ pkt->add_flags |= CTIO_CRC2_AF_DIF_DSD_ENA;
+
+ cur_dsd = (uint32_t *) &crc_ctx_pkt->u.bundling.dif_address;
+ if (qla24xx_walk_and_build_prot_sglist(ha, NULL, cur_dsd,
+ prm->prot_seg_cnt, cmd))
+ goto crc_queuing_error;
+ }
+ return QLA_SUCCESS;
+
+crc_queuing_error:
+ /* Cleanup will be performed by the caller */
+
+ return QLA_FUNCTION_FAILED;
+}
+
+
+/*
+ * Callback to setup response of xmit_type of QLA_TGT_XMIT_DATA and *
+ * QLA_TGT_XMIT_STATUS for >= 24xx silicon
+ */
+int qlt_xmit_response(struct qla_tgt_cmd *cmd, int xmit_type,
+ uint8_t scsi_status)
+{
+ struct scsi_qla_host *vha = cmd->vha;
+ struct qla_hw_data *ha = vha->hw;
+ struct ctio7_to_24xx *pkt;
+ struct qla_tgt_prm prm;
+ uint32_t full_req_cnt = 0;
+ unsigned long flags = 0;
+ int res;
+
+ memset(&prm, 0, sizeof(prm));
+ qlt_check_srr_debug(cmd, &xmit_type);
+
+ ql_dbg(ql_dbg_tgt, cmd->vha, 0xe018,
+ "is_send_status=%d, cmd->bufflen=%d, cmd->sg_cnt=%d, cmd->dma_data_direction=%d se_cmd[%p]\n",
+ (xmit_type & QLA_TGT_XMIT_STATUS) ?
+ 1 : 0, cmd->bufflen, cmd->sg_cnt, cmd->dma_data_direction,
+ &cmd->se_cmd);
+
+ res = qlt_pre_xmit_response(cmd, &prm, xmit_type, scsi_status,
+ &full_req_cnt);
+ if (unlikely(res != 0)) {
+ if (res == QLA_TGT_PRE_XMIT_RESP_CMD_ABORTED)
+ return 0;
+
+ return res;
+ }
+
+ spin_lock_irqsave(&ha->hardware_lock, flags);
+
+ if (qla2x00_reset_active(vha) || cmd->reset_count != ha->chip_reset) {
+ /*
+ * Either a chip reset is active or this request was from
+ * previous life, just abort the processing.
+ */
+ cmd->state = QLA_TGT_STATE_PROCESSED;
+ qlt_abort_cmd_on_host_reset(cmd->vha, cmd);
+ ql_dbg(ql_dbg_async, vha, 0xe101,
+ "RESET-RSP active/old-count/new-count = %d/%d/%d.\n",
+ qla2x00_reset_active(vha), cmd->reset_count,
+ ha->chip_reset);
+ spin_unlock_irqrestore(&ha->hardware_lock, flags);
+ return 0;
+ }
+
+ /* Does F/W have an IOCBs for this request */
+ res = qlt_check_reserve_free_req(vha, full_req_cnt);
+ if (unlikely(res))
+ goto out_unmap_unlock;
+
+ if (cmd->se_cmd.prot_op && (xmit_type & QLA_TGT_XMIT_DATA))
+ res = qlt_build_ctio_crc2_pkt(&prm, vha);
+ else
+ res = qlt_24xx_build_ctio_pkt(&prm, vha);
+ if (unlikely(res != 0))
+ goto out_unmap_unlock;
+
+
+ pkt = (struct ctio7_to_24xx *)prm.pkt;
+
+ if (qlt_has_data(cmd) && (xmit_type & QLA_TGT_XMIT_DATA)) {
+ pkt->u.status0.flags |=
+ __constant_cpu_to_le16(CTIO7_FLAGS_DATA_IN |
+ CTIO7_FLAGS_STATUS_MODE_0);
+
+ if (cmd->se_cmd.prot_op == TARGET_PROT_NORMAL)
+ qlt_load_data_segments(&prm, vha);
+
+ if (prm.add_status_pkt == 0) {
+ if (xmit_type & QLA_TGT_XMIT_STATUS) {
+ pkt->u.status0.scsi_status =
+ cpu_to_le16(prm.rq_result);
+ pkt->u.status0.residual =
+ cpu_to_le32(prm.residual);
+ pkt->u.status0.flags |= __constant_cpu_to_le16(
+ CTIO7_FLAGS_SEND_STATUS);
+ if (qlt_need_explicit_conf(ha, cmd, 0)) {
+ pkt->u.status0.flags |=
+ __constant_cpu_to_le16(
+ CTIO7_FLAGS_EXPLICIT_CONFORM |
+ CTIO7_FLAGS_CONFORM_REQ);
+ }
+ }
+
+ } else {
+ /*
+ * We have already made sure that there is sufficient
+ * amount of request entries to not drop HW lock in
+ * req_pkt().
+ */
+ struct ctio7_to_24xx *ctio =
+ (struct ctio7_to_24xx *)qlt_get_req_pkt(vha);
+
+ ql_dbg(ql_dbg_io, vha, 0x305e,
+ "Building additional status packet 0x%p.\n",
+ ctio);
+
+ /*
+ * T10Dif: ctio_crc2_to_fw overlay ontop of
+ * ctio7_to_24xx
+ */
+ memcpy(ctio, pkt, sizeof(*ctio));
+ /* reset back to CTIO7 */
+ ctio->entry_count = 1;
+ ctio->entry_type = CTIO_TYPE7;
+ ctio->dseg_count = 0;
+ ctio->u.status1.flags &= ~__constant_cpu_to_le16(
+ CTIO7_FLAGS_DATA_IN);
+
+ /* Real finish is ctio_m1's finish */
+ pkt->handle |= CTIO_INTERMEDIATE_HANDLE_MARK;
+ pkt->u.status0.flags |= __constant_cpu_to_le16(
+ CTIO7_FLAGS_DONT_RET_CTIO);
+
+ /* qlt_24xx_init_ctio_to_isp will correct
+ * all neccessary fields that's part of CTIO7.
+ * There should be no residual of CTIO-CRC2 data.
+ */
+ qlt_24xx_init_ctio_to_isp((struct ctio7_to_24xx *)ctio,
+ &prm);
+ pr_debug("Status CTIO7: %p\n", ctio);
+ }
+ } else
+ qlt_24xx_init_ctio_to_isp(pkt, &prm);
+
+
+ cmd->state = QLA_TGT_STATE_PROCESSED; /* Mid-level is done processing */
+ cmd->cmd_sent_to_fw = 1;
+
+ /* Memory Barrier */
+ wmb();
+ qla2x00_start_iocbs(vha, vha->req);
+ spin_unlock_irqrestore(&ha->hardware_lock, flags);
+
+ return 0;
+
+out_unmap_unlock:
+ qlt_unmap_sg(vha, cmd);
+ spin_unlock_irqrestore(&ha->hardware_lock, flags);
+
+ return res;
+}
+EXPORT_SYMBOL(qlt_xmit_response);
+
+int qlt_rdy_to_xfer(struct qla_tgt_cmd *cmd)
+{
+ struct ctio7_to_24xx *pkt;
+ struct scsi_qla_host *vha = cmd->vha;
+ struct qla_hw_data *ha = vha->hw;
+ struct qla_tgt *tgt = cmd->tgt;
+ struct qla_tgt_prm prm;
+ unsigned long flags;
+ int res = 0;
+
+ memset(&prm, 0, sizeof(prm));
+ prm.cmd = cmd;
+ prm.tgt = tgt;
+ prm.sg = NULL;
+ prm.req_cnt = 1;
+
+ /* Send marker if required */
+ if (qlt_issue_marker(vha, 0) != QLA_SUCCESS)
+ return -EIO;
+
+ /* Calculate number of entries and segments required */
+ if (qlt_pci_map_calc_cnt(&prm) != 0)
+ return -EAGAIN;
+
+ spin_lock_irqsave(&ha->hardware_lock, flags);
+
+ if (qla2x00_reset_active(vha) || cmd->reset_count != ha->chip_reset) {
+ /*
+ * Either a chip reset is active or this request was from
+ * previous life, just abort the processing.
+ */
+ cmd->state = QLA_TGT_STATE_NEED_DATA;
+ qlt_abort_cmd_on_host_reset(cmd->vha, cmd);
+ ql_dbg(ql_dbg_async, vha, 0xe102,
+ "RESET-XFR active/old-count/new-count = %d/%d/%d.\n",
+ qla2x00_reset_active(vha), cmd->reset_count,
+ ha->chip_reset);
+ spin_unlock_irqrestore(&ha->hardware_lock, flags);
+ return 0;
+ }
+
+ /* Does F/W have an IOCBs for this request */
+ res = qlt_check_reserve_free_req(vha, prm.req_cnt);
+ if (res != 0)
+ goto out_unlock_free_unmap;
+ if (cmd->se_cmd.prot_op)
+ res = qlt_build_ctio_crc2_pkt(&prm, vha);
+ else
+ res = qlt_24xx_build_ctio_pkt(&prm, vha);
+
+ if (unlikely(res != 0))
+ goto out_unlock_free_unmap;
+ pkt = (struct ctio7_to_24xx *)prm.pkt;
+ pkt->u.status0.flags |= __constant_cpu_to_le16(CTIO7_FLAGS_DATA_OUT |
+ CTIO7_FLAGS_STATUS_MODE_0);
+
+ if (cmd->se_cmd.prot_op == TARGET_PROT_NORMAL)
+ qlt_load_data_segments(&prm, vha);
+
+ cmd->state = QLA_TGT_STATE_NEED_DATA;
+ cmd->cmd_sent_to_fw = 1;
+
+ /* Memory Barrier */
+ wmb();
+ qla2x00_start_iocbs(vha, vha->req);
+ spin_unlock_irqrestore(&ha->hardware_lock, flags);
+
+ return res;
+
+out_unlock_free_unmap:
+ qlt_unmap_sg(vha, cmd);
+ spin_unlock_irqrestore(&ha->hardware_lock, flags);
+
+ return res;
+}
+EXPORT_SYMBOL(qlt_rdy_to_xfer);
+
+
+/*
+ * Checks the guard or meta-data for the type of error
+ * detected by the HBA.
+ */
+static inline int
+qlt_handle_dif_error(struct scsi_qla_host *vha, struct qla_tgt_cmd *cmd,
+ struct ctio_crc_from_fw *sts)
+{
+ uint8_t *ap = &sts->actual_dif[0];
+ uint8_t *ep = &sts->expected_dif[0];
+ uint32_t e_ref_tag, a_ref_tag;
+ uint16_t e_app_tag, a_app_tag;
+ uint16_t e_guard, a_guard;
+ uint64_t lba = cmd->se_cmd.t_task_lba;
+
+ a_guard = be16_to_cpu(*(uint16_t *)(ap + 0));
+ a_app_tag = be16_to_cpu(*(uint16_t *)(ap + 2));
+ a_ref_tag = be32_to_cpu(*(uint32_t *)(ap + 4));
+
+ e_guard = be16_to_cpu(*(uint16_t *)(ep + 0));
+ e_app_tag = be16_to_cpu(*(uint16_t *)(ep + 2));
+ e_ref_tag = be32_to_cpu(*(uint32_t *)(ep + 4));
+
+ ql_dbg(ql_dbg_tgt, vha, 0xe075,
+ "iocb(s) %p Returned STATUS.\n", sts);
+
+ ql_dbg(ql_dbg_tgt, vha, 0xf075,
+ "dif check TGT cdb 0x%x lba 0x%llx: [Actual|Expected] Ref Tag[0x%x|0x%x], App Tag [0x%x|0x%x], Guard [0x%x|0x%x]\n",
+ cmd->atio.u.isp24.fcp_cmnd.cdb[0], lba,
+ a_ref_tag, e_ref_tag, a_app_tag, e_app_tag, a_guard, e_guard);
+
+ /*
+ * Ignore sector if:
+ * For type 3: ref & app tag is all 'f's
+ * For type 0,1,2: app tag is all 'f's
+ */
+ if ((a_app_tag == 0xffff) &&
+ ((cmd->se_cmd.prot_type != TARGET_DIF_TYPE3_PROT) ||
+ (a_ref_tag == 0xffffffff))) {
+ uint32_t blocks_done;
+
+ /* 2TB boundary case covered automatically with this */
+ blocks_done = e_ref_tag - (uint32_t)lba + 1;
+ cmd->se_cmd.bad_sector = e_ref_tag;
+ cmd->se_cmd.pi_err = 0;
+ ql_dbg(ql_dbg_tgt, vha, 0xf074,
+ "need to return scsi good\n");
+
+ /* Update protection tag */
+ if (cmd->prot_sg_cnt) {
+ uint32_t i, j = 0, k = 0, num_ent;
+ struct scatterlist *sg, *sgl;
+
+
+ sgl = cmd->prot_sg;
+
+ /* Patch the corresponding protection tags */
+ for_each_sg(sgl, sg, cmd->prot_sg_cnt, i) {
+ num_ent = sg_dma_len(sg) / 8;
+ if (k + num_ent < blocks_done) {
+ k += num_ent;
+ continue;
+ }
+ j = blocks_done - k - 1;
+ k = blocks_done;
+ break;
+ }
+
+ if (k != blocks_done) {
+ ql_log(ql_log_warn, vha, 0xf076,
+ "unexpected tag values tag:lba=%u:%llu)\n",
+ e_ref_tag, (unsigned long long)lba);
+ goto out;
+ }
+
+#if 0
+ struct sd_dif_tuple *spt;
+ /* TODO:
+ * This section came from initiator. Is it valid here?
+ * should ulp be override with actual val???
+ */
+ spt = page_address(sg_page(sg)) + sg->offset;
+ spt += j;
+
+ spt->app_tag = 0xffff;
+ if (cmd->se_cmd.prot_type == SCSI_PROT_DIF_TYPE3)
+ spt->ref_tag = 0xffffffff;
+#endif
+ }
+
+ return 0;
+ }
+
+ /* check guard */
+ if (e_guard != a_guard) {
+ cmd->se_cmd.pi_err = TCM_LOGICAL_BLOCK_GUARD_CHECK_FAILED;
+ cmd->se_cmd.bad_sector = cmd->se_cmd.t_task_lba;
+
+ ql_log(ql_log_warn, vha, 0xe076,
+ "Guard ERR: cdb 0x%x lba 0x%llx: [Actual|Expected] Ref Tag[0x%x|0x%x], App Tag [0x%x|0x%x], Guard [0x%x|0x%x] cmd=%p\n",
+ cmd->atio.u.isp24.fcp_cmnd.cdb[0], lba,
+ a_ref_tag, e_ref_tag, a_app_tag, e_app_tag,
+ a_guard, e_guard, cmd);
+ goto out;
+ }
+
+ /* check ref tag */
+ if (e_ref_tag != a_ref_tag) {
+ cmd->se_cmd.pi_err = TCM_LOGICAL_BLOCK_REF_TAG_CHECK_FAILED;
+ cmd->se_cmd.bad_sector = e_ref_tag;
+
+ ql_log(ql_log_warn, vha, 0xe077,
+ "Ref Tag ERR: cdb 0x%x lba 0x%llx: [Actual|Expected] Ref Tag[0x%x|0x%x], App Tag [0x%x|0x%x], Guard [0x%x|0x%x] cmd=%p\n",
+ cmd->atio.u.isp24.fcp_cmnd.cdb[0], lba,
+ a_ref_tag, e_ref_tag, a_app_tag, e_app_tag,
+ a_guard, e_guard, cmd);
+ goto out;
+ }
+
+ /* check appl tag */
+ if (e_app_tag != a_app_tag) {
+ cmd->se_cmd.pi_err = TCM_LOGICAL_BLOCK_APP_TAG_CHECK_FAILED;
+ cmd->se_cmd.bad_sector = cmd->se_cmd.t_task_lba;
+
+ ql_log(ql_log_warn, vha, 0xe078,
+ "App Tag ERR: cdb 0x%x lba 0x%llx: [Actual|Expected] Ref Tag[0x%x|0x%x], App Tag [0x%x|0x%x], Guard [0x%x|0x%x] cmd=%p\n",
+ cmd->atio.u.isp24.fcp_cmnd.cdb[0], lba,
+ a_ref_tag, e_ref_tag, a_app_tag, e_app_tag,
+ a_guard, e_guard, cmd);
+ goto out;
+ }
+out:
+ return 1;
+}
+
+
+/* If hardware_lock held on entry, might drop it, then reaquire */
+/* This function sends the appropriate CTIO to ISP 2xxx or 24xx */
+static int __qlt_send_term_exchange(struct scsi_qla_host *vha,
+ struct qla_tgt_cmd *cmd,
+ struct atio_from_isp *atio)
+{
+ struct ctio7_to_24xx *ctio24;
+ struct qla_hw_data *ha = vha->hw;
+ request_t *pkt;
+ int ret = 0;
+ uint16_t temp;
+
+ ql_dbg(ql_dbg_tgt, vha, 0xe01c, "Sending TERM EXCH CTIO (ha=%p)\n", ha);
+
+ pkt = (request_t *)qla2x00_alloc_iocbs_ready(vha, NULL);
+ if (pkt == NULL) {
+ ql_dbg(ql_dbg_tgt, vha, 0xe050,
+ "qla_target(%d): %s failed: unable to allocate "
+ "request packet\n", vha->vp_idx, __func__);
+ return -ENOMEM;
+ }
+
+ if (cmd != NULL) {
+ if (cmd->state < QLA_TGT_STATE_PROCESSED) {
+ ql_dbg(ql_dbg_tgt, vha, 0xe051,
+ "qla_target(%d): Terminating cmd %p with "
+ "incorrect state %d\n", vha->vp_idx, cmd,
+ cmd->state);
+ } else
+ ret = 1;
+ }
+
+ pkt->entry_count = 1;
+ pkt->handle = QLA_TGT_SKIP_HANDLE | CTIO_COMPLETION_HANDLE_MARK;
+
+ ctio24 = (struct ctio7_to_24xx *)pkt;
+ ctio24->entry_type = CTIO_TYPE7;
+ ctio24->nport_handle = cmd ? cmd->loop_id : CTIO7_NHANDLE_UNRECOGNIZED;
+ ctio24->timeout = __constant_cpu_to_le16(QLA_TGT_TIMEOUT);
+ ctio24->vp_index = vha->vp_idx;
+ ctio24->initiator_id[0] = atio->u.isp24.fcp_hdr.s_id[2];
+ ctio24->initiator_id[1] = atio->u.isp24.fcp_hdr.s_id[1];
+ ctio24->initiator_id[2] = atio->u.isp24.fcp_hdr.s_id[0];
+ ctio24->exchange_addr = atio->u.isp24.exchange_addr;
+ ctio24->u.status1.flags = (atio->u.isp24.attr << 9) |
+ __constant_cpu_to_le16(CTIO7_FLAGS_STATUS_MODE_1 |
+ CTIO7_FLAGS_TERMINATE);
+ temp = be16_to_cpu(atio->u.isp24.fcp_hdr.ox_id);
+ ctio24->u.status1.ox_id = cpu_to_le16(temp);
+
+ /* Most likely, it isn't needed */
+ ctio24->u.status1.residual = get_unaligned((uint32_t *)
+ &atio->u.isp24.fcp_cmnd.add_cdb[
+ atio->u.isp24.fcp_cmnd.add_cdb_len]);
+ if (ctio24->u.status1.residual != 0)
+ ctio24->u.status1.scsi_status |= SS_RESIDUAL_UNDER;
+
+ /* Memory Barrier */
+ wmb();
+ qla2x00_start_iocbs(vha, vha->req);
+ return ret;
+}
+
+static void qlt_send_term_exchange(struct scsi_qla_host *vha,
+ struct qla_tgt_cmd *cmd, struct atio_from_isp *atio, int ha_locked)
+{
+ unsigned long flags;
+ int rc;
+
+ if (qlt_issue_marker(vha, ha_locked) < 0)
+ return;
+
+ if (ha_locked) {
+ rc = __qlt_send_term_exchange(vha, cmd, atio);
+ if (rc == -ENOMEM)
+ qlt_alloc_qfull_cmd(vha, atio, 0, 0);
+ goto done;
+ }
+ spin_lock_irqsave(&vha->hw->hardware_lock, flags);
+ rc = __qlt_send_term_exchange(vha, cmd, atio);
+ if (rc == -ENOMEM)
+ qlt_alloc_qfull_cmd(vha, atio, 0, 0);
+ spin_unlock_irqrestore(&vha->hw->hardware_lock, flags);
+
+done:
+ if (cmd && ((cmd->state != QLA_TGT_STATE_ABORTED) ||
+ !cmd->cmd_sent_to_fw)) {
+ if (!ha_locked && !in_interrupt())
+ msleep(250); /* just in case */
+
+ qlt_unmap_sg(vha, cmd);
+ vha->hw->tgt.tgt_ops->free_cmd(cmd);
+ }
+ return;
+}
+
+static void qlt_init_term_exchange(struct scsi_qla_host *vha)
+{
+ struct list_head free_list;
+ struct qla_tgt_cmd *cmd, *tcmd;
+
+ vha->hw->tgt.leak_exchg_thresh_hold =
+ (vha->hw->fw_xcb_count/100) * LEAK_EXCHG_THRESH_HOLD_PERCENT;
+
+ cmd = tcmd = NULL;
+ if (!list_empty(&vha->hw->tgt.q_full_list)) {
+ INIT_LIST_HEAD(&free_list);
+ list_splice_init(&vha->hw->tgt.q_full_list, &free_list);
+
+ list_for_each_entry_safe(cmd, tcmd, &free_list, cmd_list) {
+ list_del(&cmd->cmd_list);
+ /* This cmd was never sent to TCM. There is no need
+ * to schedule free or call free_cmd
+ */
+ qlt_free_cmd(cmd);
+ vha->hw->tgt.num_qfull_cmds_alloc--;
+ }
+ }
+ vha->hw->tgt.num_qfull_cmds_dropped = 0;
+}
+
+static void qlt_chk_exch_leak_thresh_hold(struct scsi_qla_host *vha)
+{
+ uint32_t total_leaked;
+
+ total_leaked = vha->hw->tgt.num_qfull_cmds_dropped;
+
+ if (vha->hw->tgt.leak_exchg_thresh_hold &&
+ (total_leaked > vha->hw->tgt.leak_exchg_thresh_hold)) {
+
+ ql_dbg(ql_dbg_tgt, vha, 0xe079,
+ "Chip reset due to exchange starvation: %d/%d.\n",
+ total_leaked, vha->hw->fw_xcb_count);
+
+ if (IS_P3P_TYPE(vha->hw))
+ set_bit(FCOE_CTX_RESET_NEEDED, &vha->dpc_flags);
+ else
+ set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
+ qla2xxx_wake_dpc(vha);
+ }
+
+}
+
+void qlt_free_cmd(struct qla_tgt_cmd *cmd)
+{
+ struct qla_tgt_sess *sess = cmd->sess;
+
+ ql_dbg(ql_dbg_tgt, cmd->vha, 0xe074,
+ "%s: se_cmd[%p] ox_id %04x\n",
+ __func__, &cmd->se_cmd,
+ be16_to_cpu(cmd->atio.u.isp24.fcp_hdr.ox_id));
+
+ BUG_ON(cmd->cmd_in_wq);
+
+ if (!cmd->q_full)
+ qlt_decr_num_pend_cmds(cmd->vha);
+
+ BUG_ON(cmd->sg_mapped);
+ cmd->jiffies_at_free = get_jiffies_64();
+ if (unlikely(cmd->free_sg))
+ kfree(cmd->sg);
+
+ if (!sess || !sess->se_sess) {
+ WARN_ON(1);
+ return;
+ }
+ cmd->jiffies_at_free = get_jiffies_64();
+ percpu_ida_free(&sess->se_sess->sess_tag_pool, cmd->se_cmd.map_tag);
+}
+EXPORT_SYMBOL(qlt_free_cmd);
+
+/* ha->hardware_lock supposed to be held on entry */
+static int qlt_prepare_srr_ctio(struct scsi_qla_host *vha,
+ struct qla_tgt_cmd *cmd, void *ctio)
+{
+ struct qla_tgt_srr_ctio *sc;
+ struct qla_tgt *tgt = vha->vha_tgt.qla_tgt;
+ struct qla_tgt_srr_imm *imm;
+
+ tgt->ctio_srr_id++;
+ cmd->cmd_flags |= BIT_15;
+
+ ql_dbg(ql_dbg_tgt_mgt, vha, 0xf019,
+ "qla_target(%d): CTIO with SRR status received\n", vha->vp_idx);
+
+ if (!ctio) {
+ ql_dbg(ql_dbg_tgt_mgt, vha, 0xf055,
+ "qla_target(%d): SRR CTIO, but ctio is NULL\n",
+ vha->vp_idx);
+ return -EINVAL;
+ }
+
+ sc = kzalloc(sizeof(*sc), GFP_ATOMIC);
+ if (sc != NULL) {
+ sc->cmd = cmd;
+ /* IRQ is already OFF */
+ spin_lock(&tgt->srr_lock);
+ sc->srr_id = tgt->ctio_srr_id;
+ list_add_tail(&sc->srr_list_entry,
+ &tgt->srr_ctio_list);
+ ql_dbg(ql_dbg_tgt_mgt, vha, 0xf01a,
+ "CTIO SRR %p added (id %d)\n", sc, sc->srr_id);
+ if (tgt->imm_srr_id == tgt->ctio_srr_id) {
+ int found = 0;
+ list_for_each_entry(imm, &tgt->srr_imm_list,
+ srr_list_entry) {
+ if (imm->srr_id == sc->srr_id) {
+ found = 1;
+ break;
+ }
+ }
+ if (found) {
+ ql_dbg(ql_dbg_tgt_mgt, vha, 0xf01b,
+ "Scheduling srr work\n");
+ schedule_work(&tgt->srr_work);
+ } else {
+ ql_dbg(ql_dbg_tgt_mgt, vha, 0xf056,
+ "qla_target(%d): imm_srr_id "
+ "== ctio_srr_id (%d), but there is no "
+ "corresponding SRR IMM, deleting CTIO "
+ "SRR %p\n", vha->vp_idx,
+ tgt->ctio_srr_id, sc);
+ list_del(&sc->srr_list_entry);
+ spin_unlock(&tgt->srr_lock);
+
+ kfree(sc);
+ return -EINVAL;
+ }
+ }
+ spin_unlock(&tgt->srr_lock);
+ } else {
+ struct qla_tgt_srr_imm *ti;
+
+ ql_dbg(ql_dbg_tgt_mgt, vha, 0xf057,
+ "qla_target(%d): Unable to allocate SRR CTIO entry\n",
+ vha->vp_idx);
+ spin_lock(&tgt->srr_lock);
+ list_for_each_entry_safe(imm, ti, &tgt->srr_imm_list,
+ srr_list_entry) {
+ if (imm->srr_id == tgt->ctio_srr_id) {
+ ql_dbg(ql_dbg_tgt_mgt, vha, 0xf01c,
+ "IMM SRR %p deleted (id %d)\n",
+ imm, imm->srr_id);
+ list_del(&imm->srr_list_entry);
+ qlt_reject_free_srr_imm(vha, imm, 1);
+ }
+ }
+ spin_unlock(&tgt->srr_lock);
+
+ return -ENOMEM;
+ }
+
+ return 0;
+}
+
+/*
+ * ha->hardware_lock supposed to be held on entry. Might drop it, then reaquire
+ */
+static int qlt_term_ctio_exchange(struct scsi_qla_host *vha, void *ctio,
+ struct qla_tgt_cmd *cmd, uint32_t status)
+{
+ int term = 0;
+
+ if (ctio != NULL) {
+ struct ctio7_from_24xx *c = (struct ctio7_from_24xx *)ctio;
+ term = !(c->flags &
+ __constant_cpu_to_le16(OF_TERM_EXCH));
+ } else
+ term = 1;
+
+ if (term)
+ qlt_send_term_exchange(vha, cmd, &cmd->atio, 1);
+
+ return term;
+}
+
+/* ha->hardware_lock supposed to be held on entry */
+static inline struct qla_tgt_cmd *qlt_get_cmd(struct scsi_qla_host *vha,
+ uint32_t handle)
+{
+ struct qla_hw_data *ha = vha->hw;
+
+ handle--;
+ if (ha->tgt.cmds[handle] != NULL) {
+ struct qla_tgt_cmd *cmd = ha->tgt.cmds[handle];
+ ha->tgt.cmds[handle] = NULL;
+ return cmd;
+ } else
+ return NULL;
+}
+
+/* ha->hardware_lock supposed to be held on entry */
+static struct qla_tgt_cmd *qlt_ctio_to_cmd(struct scsi_qla_host *vha,
+ uint32_t handle, void *ctio)
+{
+ struct qla_tgt_cmd *cmd = NULL;
+
+ /* Clear out internal marks */
+ handle &= ~(CTIO_COMPLETION_HANDLE_MARK |
+ CTIO_INTERMEDIATE_HANDLE_MARK);
+
+ if (handle != QLA_TGT_NULL_HANDLE) {
+ if (unlikely(handle == QLA_TGT_SKIP_HANDLE))
+ return NULL;
+
+ /* handle-1 is actually used */
+ if (unlikely(handle > DEFAULT_OUTSTANDING_COMMANDS)) {
+ ql_dbg(ql_dbg_tgt, vha, 0xe052,
+ "qla_target(%d): Wrong handle %x received\n",
+ vha->vp_idx, handle);
+ return NULL;
+ }
+ cmd = qlt_get_cmd(vha, handle);
+ if (unlikely(cmd == NULL)) {
+ ql_dbg(ql_dbg_tgt, vha, 0xe053,
+ "qla_target(%d): Suspicious: unable to "
+ "find the command with handle %x\n", vha->vp_idx,
+ handle);
+ return NULL;
+ }
+ } else if (ctio != NULL) {
+ /* We can't get loop ID from CTIO7 */
+ ql_dbg(ql_dbg_tgt, vha, 0xe054,
+ "qla_target(%d): Wrong CTIO received: QLA24xx doesn't "
+ "support NULL handles\n", vha->vp_idx);
+ return NULL;
+ }
+
+ return cmd;
+}
+
+/* hardware_lock should be held by caller. */
+static void
+qlt_abort_cmd_on_host_reset(struct scsi_qla_host *vha, struct qla_tgt_cmd *cmd)
+{
+ struct qla_hw_data *ha = vha->hw;
+ uint32_t handle;
+
+ if (cmd->sg_mapped)
+ qlt_unmap_sg(vha, cmd);
+
+ handle = qlt_make_handle(vha);
+
+ /* TODO: fix debug message type and ids. */
+ if (cmd->state == QLA_TGT_STATE_PROCESSED) {
+ ql_dbg(ql_dbg_io, vha, 0xff00,
+ "HOST-ABORT: handle=%d, state=PROCESSED.\n", handle);
+ } else if (cmd->state == QLA_TGT_STATE_NEED_DATA) {
+ cmd->write_data_transferred = 0;
+ cmd->state = QLA_TGT_STATE_DATA_IN;
+
+ ql_dbg(ql_dbg_io, vha, 0xff01,
+ "HOST-ABORT: handle=%d, state=DATA_IN.\n", handle);
+
+ ha->tgt.tgt_ops->handle_data(cmd);
+ return;
+ } else if (cmd->state == QLA_TGT_STATE_ABORTED) {
+ ql_dbg(ql_dbg_io, vha, 0xff02,
+ "HOST-ABORT: handle=%d, state=ABORTED.\n", handle);
+ } else {
+ ql_dbg(ql_dbg_io, vha, 0xff03,
+ "HOST-ABORT: handle=%d, state=BAD(%d).\n", handle,
+ cmd->state);
+ dump_stack();
+ }
+
+ cmd->cmd_flags |= BIT_12;
+ ha->tgt.tgt_ops->free_cmd(cmd);
+}
+
+void
+qlt_host_reset_handler(struct qla_hw_data *ha)
+{
+ struct qla_tgt_cmd *cmd;
+ unsigned long flags;
+ scsi_qla_host_t *base_vha = pci_get_drvdata(ha->pdev);
+ scsi_qla_host_t *vha = NULL;
+ struct qla_tgt *tgt = base_vha->vha_tgt.qla_tgt;
+ uint32_t i;
+
+ if (!base_vha->hw->tgt.tgt_ops)
+ return;
+
+ if (!tgt || qla_ini_mode_enabled(base_vha)) {
+ ql_dbg(ql_dbg_tgt_mgt, vha, 0xf003,
+ "Target mode disabled\n");
+ return;
+ }
+
+ ql_dbg(ql_dbg_tgt_mgt, vha, 0xff10,
+ "HOST-ABORT-HNDLR: base_vha->dpc_flags=%lx.\n",
+ base_vha->dpc_flags);
+
+ spin_lock_irqsave(&ha->hardware_lock, flags);
+ for (i = 1; i < DEFAULT_OUTSTANDING_COMMANDS + 1; i++) {
+ cmd = qlt_get_cmd(base_vha, i);
+ if (!cmd)
+ continue;
+ /* ha->tgt.cmds entry is cleared by qlt_get_cmd. */
+ vha = cmd->vha;
+ qlt_abort_cmd_on_host_reset(vha, cmd);
+ }
+ spin_unlock_irqrestore(&ha->hardware_lock, flags);
+}
+
+
+/*
+ * ha->hardware_lock supposed to be held on entry. Might drop it, then reaquire
+ */
+static void qlt_do_ctio_completion(struct scsi_qla_host *vha, uint32_t handle,
+ uint32_t status, void *ctio)
+{
+ struct qla_hw_data *ha = vha->hw;
+ struct se_cmd *se_cmd;
+ const struct target_core_fabric_ops *tfo;
+ struct qla_tgt_cmd *cmd;
+
+ if (handle & CTIO_INTERMEDIATE_HANDLE_MARK) {
+ /* That could happen only in case of an error/reset/abort */
+ if (status != CTIO_SUCCESS) {
+ ql_dbg(ql_dbg_tgt_mgt, vha, 0xf01d,
+ "Intermediate CTIO received"
+ " (status %x)\n", status);
+ }
+ return;
+ }
+
+ cmd = qlt_ctio_to_cmd(vha, handle, ctio);
+ if (cmd == NULL)
+ return;
+
+ se_cmd = &cmd->se_cmd;
+ tfo = se_cmd->se_tfo;
+ cmd->cmd_sent_to_fw = 0;
+
+ qlt_unmap_sg(vha, cmd);
+
+ if (unlikely(status != CTIO_SUCCESS)) {
+ switch (status & 0xFFFF) {
+ case CTIO_LIP_RESET:
+ case CTIO_TARGET_RESET:
+ case CTIO_ABORTED:
+ /* driver request abort via Terminate exchange */
+ case CTIO_TIMEOUT:
+ case CTIO_INVALID_RX_ID:
+ /* They are OK */
+ ql_dbg(ql_dbg_tgt_mgt, vha, 0xf058,
+ "qla_target(%d): CTIO with "
+ "status %#x received, state %x, se_cmd %p, "
+ "(LIP_RESET=e, ABORTED=2, TARGET_RESET=17, "
+ "TIMEOUT=b, INVALID_RX_ID=8)\n", vha->vp_idx,
+ status, cmd->state, se_cmd);
+ break;
+
+ case CTIO_PORT_LOGGED_OUT:
+ case CTIO_PORT_UNAVAILABLE:
+ ql_dbg(ql_dbg_tgt_mgt, vha, 0xf059,
+ "qla_target(%d): CTIO with PORT LOGGED "
+ "OUT (29) or PORT UNAVAILABLE (28) status %x "
+ "received (state %x, se_cmd %p)\n", vha->vp_idx,
+ status, cmd->state, se_cmd);
+ break;
+
+ case CTIO_SRR_RECEIVED:
+ ql_dbg(ql_dbg_tgt_mgt, vha, 0xf05a,
+ "qla_target(%d): CTIO with SRR_RECEIVED"
+ " status %x received (state %x, se_cmd %p)\n",
+ vha->vp_idx, status, cmd->state, se_cmd);
+ if (qlt_prepare_srr_ctio(vha, cmd, ctio) != 0)
+ break;
+ else
+ return;
+
+ case CTIO_DIF_ERROR: {
+ struct ctio_crc_from_fw *crc =
+ (struct ctio_crc_from_fw *)ctio;
+ ql_dbg(ql_dbg_tgt_mgt, vha, 0xf073,
+ "qla_target(%d): CTIO with DIF_ERROR status %x received (state %x, se_cmd %p) actual_dif[0x%llx] expect_dif[0x%llx]\n",
+ vha->vp_idx, status, cmd->state, se_cmd,
+ *((u64 *)&crc->actual_dif[0]),
+ *((u64 *)&crc->expected_dif[0]));
+
+ if (qlt_handle_dif_error(vha, cmd, ctio)) {
+ if (cmd->state == QLA_TGT_STATE_NEED_DATA) {
+ /* scsi Write/xfer rdy complete */
+ goto skip_term;
+ } else {
+ /* scsi read/xmit respond complete
+ * call handle dif to send scsi status
+ * rather than terminate exchange.
+ */
+ cmd->state = QLA_TGT_STATE_PROCESSED;
+ ha->tgt.tgt_ops->handle_dif_err(cmd);
+ return;
+ }
+ } else {
+ /* Need to generate a SCSI good completion.
+ * because FW did not send scsi status.
+ */
+ status = 0;
+ goto skip_term;
+ }
+ break;
+ }
+ default:
+ ql_dbg(ql_dbg_tgt_mgt, vha, 0xf05b,
+ "qla_target(%d): CTIO with error status 0x%x received (state %x, se_cmd %p\n",
+ vha->vp_idx, status, cmd->state, se_cmd);
+ break;
+ }
+
+
+ /* "cmd->state == QLA_TGT_STATE_ABORTED" means
+ * cmd is already aborted/terminated, we don't
+ * need to terminate again. The exchange is already
+ * cleaned up/freed at FW level. Just cleanup at driver
+ * level.
+ */
+ if ((cmd->state != QLA_TGT_STATE_NEED_DATA) &&
+ (cmd->state != QLA_TGT_STATE_ABORTED)) {
+ cmd->cmd_flags |= BIT_13;
+ if (qlt_term_ctio_exchange(vha, ctio, cmd, status))
+ return;
+ }
+ }
+skip_term:
+
+ if (cmd->state == QLA_TGT_STATE_PROCESSED) {
+ ;
+ } else if (cmd->state == QLA_TGT_STATE_NEED_DATA) {
+ int rx_status = 0;
+
+ cmd->state = QLA_TGT_STATE_DATA_IN;
+
+ if (unlikely(status != CTIO_SUCCESS))
+ rx_status = -EIO;
+ else
+ cmd->write_data_transferred = 1;
+
+ ha->tgt.tgt_ops->handle_data(cmd);
+ return;
+ } else if (cmd->state == QLA_TGT_STATE_ABORTED) {
+ ql_dbg(ql_dbg_tgt_mgt, vha, 0xf01e,
+ "Aborted command %p (tag %d) finished\n", cmd, cmd->tag);
+ } else {
+ ql_dbg(ql_dbg_tgt_mgt, vha, 0xf05c,
+ "qla_target(%d): A command in state (%d) should "
+ "not return a CTIO complete\n", vha->vp_idx, cmd->state);
+ }
+
+ if (unlikely(status != CTIO_SUCCESS) &&
+ (cmd->state != QLA_TGT_STATE_ABORTED)) {
+ ql_dbg(ql_dbg_tgt_mgt, vha, 0xf01f, "Finishing failed CTIO\n");
+ dump_stack();
+ }
+
+
+ ha->tgt.tgt_ops->free_cmd(cmd);
+}
+
+static inline int qlt_get_fcp_task_attr(struct scsi_qla_host *vha,
+ uint8_t task_codes)
+{
+ int fcp_task_attr;
+
+ switch (task_codes) {
+ case ATIO_SIMPLE_QUEUE:
+ fcp_task_attr = TCM_SIMPLE_TAG;
+ break;
+ case ATIO_HEAD_OF_QUEUE:
+ fcp_task_attr = TCM_HEAD_TAG;
+ break;
+ case ATIO_ORDERED_QUEUE:
+ fcp_task_attr = TCM_ORDERED_TAG;
+ break;
+ case ATIO_ACA_QUEUE:
+ fcp_task_attr = TCM_ACA_TAG;
+ break;
+ case ATIO_UNTAGGED:
+ fcp_task_attr = TCM_SIMPLE_TAG;
+ break;
+ default:
+ ql_dbg(ql_dbg_tgt_mgt, vha, 0xf05d,
+ "qla_target: unknown task code %x, use ORDERED instead\n",
+ task_codes);
+ fcp_task_attr = TCM_ORDERED_TAG;
+ break;
+ }
+
+ return fcp_task_attr;
+}
+
+static struct qla_tgt_sess *qlt_make_local_sess(struct scsi_qla_host *,
+ uint8_t *);
+/*
+ * Process context for I/O path into tcm_qla2xxx code
+ */
+static void __qlt_do_work(struct qla_tgt_cmd *cmd)
+{
+ scsi_qla_host_t *vha = cmd->vha;
+ struct qla_hw_data *ha = vha->hw;
+ struct qla_tgt *tgt = vha->vha_tgt.qla_tgt;
+ struct qla_tgt_sess *sess = cmd->sess;
+ struct atio_from_isp *atio = &cmd->atio;
+ unsigned char *cdb;
+ unsigned long flags;
+ uint32_t data_length;
+ int ret, fcp_task_attr, data_dir, bidi = 0;
+
+ cmd->cmd_in_wq = 0;
+ cmd->cmd_flags |= BIT_1;
+ if (tgt->tgt_stop)
+ goto out_term;
+
+ cdb = &atio->u.isp24.fcp_cmnd.cdb[0];
+ cmd->tag = atio->u.isp24.exchange_addr;
+ cmd->unpacked_lun = scsilun_to_int(
+ (struct scsi_lun *)&atio->u.isp24.fcp_cmnd.lun);
+
+ if (atio->u.isp24.fcp_cmnd.rddata &&
+ atio->u.isp24.fcp_cmnd.wrdata) {
+ bidi = 1;
+ data_dir = DMA_TO_DEVICE;
+ } else if (atio->u.isp24.fcp_cmnd.rddata)
+ data_dir = DMA_FROM_DEVICE;
+ else if (atio->u.isp24.fcp_cmnd.wrdata)
+ data_dir = DMA_TO_DEVICE;
+ else
+ data_dir = DMA_NONE;
+
+ fcp_task_attr = qlt_get_fcp_task_attr(vha,
+ atio->u.isp24.fcp_cmnd.task_attr);
+ data_length = be32_to_cpu(get_unaligned((uint32_t *)
+ &atio->u.isp24.fcp_cmnd.add_cdb[
+ atio->u.isp24.fcp_cmnd.add_cdb_len]));
+
+ ret = ha->tgt.tgt_ops->handle_cmd(vha, cmd, cdb, data_length,
+ fcp_task_attr, data_dir, bidi);
+ if (ret != 0)
+ goto out_term;
+ /*
+ * Drop extra session reference from qla_tgt_handle_cmd_for_atio*(
+ */
+ spin_lock_irqsave(&ha->hardware_lock, flags);
+ ha->tgt.tgt_ops->put_sess(sess);
+ spin_unlock_irqrestore(&ha->hardware_lock, flags);
+ return;
+
+out_term:
+ ql_dbg(ql_dbg_io, vha, 0x3060, "Terminating work cmd %p", cmd);
+ /*
+ * cmd has not sent to target yet, so pass NULL as the second
+ * argument to qlt_send_term_exchange() and free the memory here.
+ */
+ cmd->cmd_flags |= BIT_2;
+ spin_lock_irqsave(&ha->hardware_lock, flags);
+ qlt_send_term_exchange(vha, NULL, &cmd->atio, 1);
+
+ qlt_decr_num_pend_cmds(vha);
+ percpu_ida_free(&sess->se_sess->sess_tag_pool, cmd->se_cmd.map_tag);
+ ha->tgt.tgt_ops->put_sess(sess);
+ spin_unlock_irqrestore(&ha->hardware_lock, flags);
+}
+
+static void qlt_do_work(struct work_struct *work)
+{
+ struct qla_tgt_cmd *cmd = container_of(work, struct qla_tgt_cmd, work);
+
+ __qlt_do_work(cmd);
+}
+
+static struct qla_tgt_cmd *qlt_get_tag(scsi_qla_host_t *vha,
+ struct qla_tgt_sess *sess,
+ struct atio_from_isp *atio)
+{
+ struct se_session *se_sess = sess->se_sess;
+ struct qla_tgt_cmd *cmd;
+ int tag;
+
+ tag = percpu_ida_alloc(&se_sess->sess_tag_pool, TASK_RUNNING);
+ if (tag < 0)
+ return NULL;
+
+ cmd = &((struct qla_tgt_cmd *)se_sess->sess_cmd_map)[tag];
+ memset(cmd, 0, sizeof(struct qla_tgt_cmd));
+
+ memcpy(&cmd->atio, atio, sizeof(*atio));
+ cmd->state = QLA_TGT_STATE_NEW;
+ cmd->tgt = vha->vha_tgt.qla_tgt;
+ qlt_incr_num_pend_cmds(vha);
+ cmd->vha = vha;
+ cmd->se_cmd.map_tag = tag;
+ cmd->sess = sess;
+ cmd->loop_id = sess->loop_id;
+ cmd->conf_compl_supported = sess->conf_compl_supported;
+
+ return cmd;
+}
+
+static void qlt_send_busy(struct scsi_qla_host *, struct atio_from_isp *,
+ uint16_t);
+
+static void qlt_create_sess_from_atio(struct work_struct *work)
+{
+ struct qla_tgt_sess_op *op = container_of(work,
+ struct qla_tgt_sess_op, work);
+ scsi_qla_host_t *vha = op->vha;
+ struct qla_hw_data *ha = vha->hw;
+ struct qla_tgt_sess *sess;
+ struct qla_tgt_cmd *cmd;
+ unsigned long flags;
+ uint8_t *s_id = op->atio.u.isp24.fcp_hdr.s_id;
+
+ ql_dbg(ql_dbg_tgt_mgt, vha, 0xf022,
+ "qla_target(%d): Unable to find wwn login"
+ " (s_id %x:%x:%x), trying to create it manually\n",
+ vha->vp_idx, s_id[0], s_id[1], s_id[2]);
+
+ if (op->atio.u.raw.entry_count > 1) {
+ ql_dbg(ql_dbg_tgt_mgt, vha, 0xf023,
+ "Dropping multy entry atio %p\n", &op->atio);
+ goto out_term;
+ }
+
+ mutex_lock(&vha->vha_tgt.tgt_mutex);
+ sess = qlt_make_local_sess(vha, s_id);
+ /* sess has an extra creation ref. */
+ mutex_unlock(&vha->vha_tgt.tgt_mutex);
+
+ if (!sess)
+ goto out_term;
+ /*
+ * Now obtain a pre-allocated session tag using the original op->atio
+ * packet header, and dispatch into __qlt_do_work() using the existing
+ * process context.
+ */
+ cmd = qlt_get_tag(vha, sess, &op->atio);
+ if (!cmd) {
+ spin_lock_irqsave(&ha->hardware_lock, flags);
+ qlt_send_busy(vha, &op->atio, SAM_STAT_BUSY);
+ ha->tgt.tgt_ops->put_sess(sess);
+ spin_unlock_irqrestore(&ha->hardware_lock, flags);
+ kfree(op);
+ return;
+ }
+ /*
+ * __qlt_do_work() will call ha->tgt.tgt_ops->put_sess() to release
+ * the extra reference taken above by qlt_make_local_sess()
+ */
+ __qlt_do_work(cmd);
+ kfree(op);
+ return;
+
+out_term:
+ spin_lock_irqsave(&ha->hardware_lock, flags);
+ qlt_send_term_exchange(vha, NULL, &op->atio, 1);
+ spin_unlock_irqrestore(&ha->hardware_lock, flags);
+ kfree(op);
+
+}
+
+/* ha->hardware_lock supposed to be held on entry */
+static int qlt_handle_cmd_for_atio(struct scsi_qla_host *vha,
+ struct atio_from_isp *atio)
+{
+ struct qla_hw_data *ha = vha->hw;
+ struct qla_tgt *tgt = vha->vha_tgt.qla_tgt;
+ struct qla_tgt_sess *sess;
+ struct qla_tgt_cmd *cmd;
+
+ if (unlikely(tgt->tgt_stop)) {
+ ql_dbg(ql_dbg_io, vha, 0x3061,
+ "New command while device %p is shutting down\n", tgt);
+ return -EFAULT;
+ }
+
+ sess = ha->tgt.tgt_ops->find_sess_by_s_id(vha, atio->u.isp24.fcp_hdr.s_id);
+ if (unlikely(!sess)) {
+ struct qla_tgt_sess_op *op = kzalloc(sizeof(struct qla_tgt_sess_op),
+ GFP_ATOMIC);
+ if (!op)
+ return -ENOMEM;
+
+ memcpy(&op->atio, atio, sizeof(*atio));
+ op->vha = vha;
+ INIT_WORK(&op->work, qlt_create_sess_from_atio);
+ queue_work(qla_tgt_wq, &op->work);
+ return 0;
+ }
+ /*
+ * Do kref_get() before returning + dropping qla_hw_data->hardware_lock.
+ */
+ kref_get(&sess->se_sess->sess_kref);
+
+ cmd = qlt_get_tag(vha, sess, atio);
+ if (!cmd) {
+ ql_dbg(ql_dbg_io, vha, 0x3062,
+ "qla_target(%d): Allocation of cmd failed\n", vha->vp_idx);
+ ha->tgt.tgt_ops->put_sess(sess);
+ return -ENOMEM;
+ }
+
+ cmd->cmd_flags = 0;
+ cmd->jiffies_at_alloc = get_jiffies_64();
+
+ cmd->reset_count = vha->hw->chip_reset;
+
+ cmd->cmd_in_wq = 1;
+ cmd->cmd_flags |= BIT_0;
+ INIT_WORK(&cmd->work, qlt_do_work);
+ queue_work(qla_tgt_wq, &cmd->work);
+ return 0;
+
+}
+
+/* ha->hardware_lock supposed to be held on entry */
+static int qlt_issue_task_mgmt(struct qla_tgt_sess *sess, uint32_t lun,
+ int fn, void *iocb, int flags)
+{
+ struct scsi_qla_host *vha = sess->vha;
+ struct qla_hw_data *ha = vha->hw;
+ struct qla_tgt_mgmt_cmd *mcmd;
+ int res;
+ uint8_t tmr_func;
+
+ mcmd = mempool_alloc(qla_tgt_mgmt_cmd_mempool, GFP_ATOMIC);
+ if (!mcmd) {
+ ql_dbg(ql_dbg_tgt_tmr, vha, 0x10009,
+ "qla_target(%d): Allocation of management "
+ "command failed, some commands and their data could "
+ "leak\n", vha->vp_idx);
+ return -ENOMEM;
+ }
+ memset(mcmd, 0, sizeof(*mcmd));
+ mcmd->sess = sess;
+
+ if (iocb) {
+ memcpy(&mcmd->orig_iocb.imm_ntfy, iocb,
+ sizeof(mcmd->orig_iocb.imm_ntfy));
+ }
+ mcmd->tmr_func = fn;
+ mcmd->flags = flags;
+ mcmd->reset_count = vha->hw->chip_reset;
+
+ switch (fn) {
+ case QLA_TGT_CLEAR_ACA:
+ ql_dbg(ql_dbg_tgt_tmr, vha, 0x10000,
+ "qla_target(%d): CLEAR_ACA received\n", sess->vha->vp_idx);
+ tmr_func = TMR_CLEAR_ACA;
+ break;
+
+ case QLA_TGT_TARGET_RESET:
+ ql_dbg(ql_dbg_tgt_tmr, vha, 0x10001,
+ "qla_target(%d): TARGET_RESET received\n",
+ sess->vha->vp_idx);
+ tmr_func = TMR_TARGET_WARM_RESET;
+ break;
+
+ case QLA_TGT_LUN_RESET:
+ ql_dbg(ql_dbg_tgt_tmr, vha, 0x10002,
+ "qla_target(%d): LUN_RESET received\n", sess->vha->vp_idx);
+ tmr_func = TMR_LUN_RESET;
+ break;
+
+ case QLA_TGT_CLEAR_TS:
+ ql_dbg(ql_dbg_tgt_tmr, vha, 0x10003,
+ "qla_target(%d): CLEAR_TS received\n", sess->vha->vp_idx);
+ tmr_func = TMR_CLEAR_TASK_SET;
+ break;
+
+ case QLA_TGT_ABORT_TS:
+ ql_dbg(ql_dbg_tgt_tmr, vha, 0x10004,
+ "qla_target(%d): ABORT_TS received\n", sess->vha->vp_idx);
+ tmr_func = TMR_ABORT_TASK_SET;
+ break;
+#if 0
+ case QLA_TGT_ABORT_ALL:
+ ql_dbg(ql_dbg_tgt_tmr, vha, 0x10005,
+ "qla_target(%d): Doing ABORT_ALL_TASKS\n",
+ sess->vha->vp_idx);
+ tmr_func = 0;
+ break;
+
+ case QLA_TGT_ABORT_ALL_SESS:
+ ql_dbg(ql_dbg_tgt_tmr, vha, 0x10006,
+ "qla_target(%d): Doing ABORT_ALL_TASKS_SESS\n",
+ sess->vha->vp_idx);
+ tmr_func = 0;
+ break;
+
+ case QLA_TGT_NEXUS_LOSS_SESS:
+ ql_dbg(ql_dbg_tgt_tmr, vha, 0x10007,
+ "qla_target(%d): Doing NEXUS_LOSS_SESS\n",
+ sess->vha->vp_idx);
+ tmr_func = 0;
+ break;
+
+ case QLA_TGT_NEXUS_LOSS:
+ ql_dbg(ql_dbg_tgt_tmr, vha, 0x10008,
+ "qla_target(%d): Doing NEXUS_LOSS\n", sess->vha->vp_idx);
+ tmr_func = 0;
+ break;
+#endif
+ default:
+ ql_dbg(ql_dbg_tgt_tmr, vha, 0x1000a,
+ "qla_target(%d): Unknown task mgmt fn 0x%x\n",
+ sess->vha->vp_idx, fn);
+ mempool_free(mcmd, qla_tgt_mgmt_cmd_mempool);
+ return -ENOSYS;
+ }
+
+ res = ha->tgt.tgt_ops->handle_tmr(mcmd, lun, tmr_func, 0);
+ if (res != 0) {
+ ql_dbg(ql_dbg_tgt_tmr, vha, 0x1000b,
+ "qla_target(%d): tgt.tgt_ops->handle_tmr() failed: %d\n",
+ sess->vha->vp_idx, res);
+ mempool_free(mcmd, qla_tgt_mgmt_cmd_mempool);
+ return -EFAULT;
+ }
+
+ return 0;
+}
+
+/* ha->hardware_lock supposed to be held on entry */
+static int qlt_handle_task_mgmt(struct scsi_qla_host *vha, void *iocb)
+{
+ struct atio_from_isp *a = (struct atio_from_isp *)iocb;
+ struct qla_hw_data *ha = vha->hw;
+ struct qla_tgt *tgt;
+ struct qla_tgt_sess *sess;
+ uint32_t lun, unpacked_lun;
+ int lun_size, fn;
+
+ tgt = vha->vha_tgt.qla_tgt;
+
+ lun = a->u.isp24.fcp_cmnd.lun;
+ lun_size = sizeof(a->u.isp24.fcp_cmnd.lun);
+ fn = a->u.isp24.fcp_cmnd.task_mgmt_flags;
+ sess = ha->tgt.tgt_ops->find_sess_by_s_id(vha,
+ a->u.isp24.fcp_hdr.s_id);
+ unpacked_lun = scsilun_to_int((struct scsi_lun *)&lun);
+
+ if (!sess) {
+ ql_dbg(ql_dbg_tgt_mgt, vha, 0xf024,
+ "qla_target(%d): task mgmt fn 0x%x for "
+ "non-existant session\n", vha->vp_idx, fn);
+ return qlt_sched_sess_work(tgt, QLA_TGT_SESS_WORK_TM, iocb,
+ sizeof(struct atio_from_isp));
+ }
+
+ return qlt_issue_task_mgmt(sess, unpacked_lun, fn, iocb, 0);
+}
+
+/* ha->hardware_lock supposed to be held on entry */
+static int __qlt_abort_task(struct scsi_qla_host *vha,
+ struct imm_ntfy_from_isp *iocb, struct qla_tgt_sess *sess)
+{
+ struct atio_from_isp *a = (struct atio_from_isp *)iocb;
+ struct qla_hw_data *ha = vha->hw;
+ struct qla_tgt_mgmt_cmd *mcmd;
+ uint32_t lun, unpacked_lun;
+ int rc;
+
+ mcmd = mempool_alloc(qla_tgt_mgmt_cmd_mempool, GFP_ATOMIC);
+ if (mcmd == NULL) {
+ ql_dbg(ql_dbg_tgt_mgt, vha, 0xf05f,
+ "qla_target(%d): %s: Allocation of ABORT cmd failed\n",
+ vha->vp_idx, __func__);
+ return -ENOMEM;
+ }
+ memset(mcmd, 0, sizeof(*mcmd));
+
+ mcmd->sess = sess;
+ memcpy(&mcmd->orig_iocb.imm_ntfy, iocb,
+ sizeof(mcmd->orig_iocb.imm_ntfy));
+
+ lun = a->u.isp24.fcp_cmnd.lun;
+ unpacked_lun = scsilun_to_int((struct scsi_lun *)&lun);
+ mcmd->reset_count = vha->hw->chip_reset;
+
+ rc = ha->tgt.tgt_ops->handle_tmr(mcmd, unpacked_lun, TMR_ABORT_TASK,
+ le16_to_cpu(iocb->u.isp2x.seq_id));
+ if (rc != 0) {
+ ql_dbg(ql_dbg_tgt_mgt, vha, 0xf060,
+ "qla_target(%d): tgt_ops->handle_tmr() failed: %d\n",
+ vha->vp_idx, rc);
+ mempool_free(mcmd, qla_tgt_mgmt_cmd_mempool);
+ return -EFAULT;
+ }
+
+ return 0;
+}
+
+/* ha->hardware_lock supposed to be held on entry */
+static int qlt_abort_task(struct scsi_qla_host *vha,
+ struct imm_ntfy_from_isp *iocb)
+{
+ struct qla_hw_data *ha = vha->hw;
+ struct qla_tgt_sess *sess;
+ int loop_id;
+
+ loop_id = GET_TARGET_ID(ha, (struct atio_from_isp *)iocb);
+
+ sess = ha->tgt.tgt_ops->find_sess_by_loop_id(vha, loop_id);
+ if (sess == NULL) {
+ ql_dbg(ql_dbg_tgt_mgt, vha, 0xf025,
+ "qla_target(%d): task abort for unexisting "
+ "session\n", vha->vp_idx);
+ return qlt_sched_sess_work(vha->vha_tgt.qla_tgt,
+ QLA_TGT_SESS_WORK_ABORT, iocb, sizeof(*iocb));
+ }
+
+ return __qlt_abort_task(vha, iocb, sess);
+}
+
+/*
+ * ha->hardware_lock supposed to be held on entry. Might drop it, then reaquire
+ */
+static int qlt_24xx_handle_els(struct scsi_qla_host *vha,
+ struct imm_ntfy_from_isp *iocb)
+{
+ int res = 0;
+
+ ql_dbg(ql_dbg_tgt_mgt, vha, 0xf026,
+ "qla_target(%d): Port ID: 0x%3phC ELS opcode: 0x%02x\n",
+ vha->vp_idx, iocb->u.isp24.port_id, iocb->u.isp24.status_subcode);
+
+ switch (iocb->u.isp24.status_subcode) {
+ case ELS_PLOGI:
+ case ELS_FLOGI:
+ case ELS_PRLI:
+ case ELS_LOGO:
+ case ELS_PRLO:
+ res = qlt_reset(vha, iocb, QLA_TGT_NEXUS_LOSS_SESS);
+ break;
+ case ELS_PDISC:
+ case ELS_ADISC:
+ {
+ struct qla_tgt *tgt = vha->vha_tgt.qla_tgt;
+ if (tgt->link_reinit_iocb_pending) {
+ qlt_send_notify_ack(vha, &tgt->link_reinit_iocb,
+ 0, 0, 0, 0, 0, 0);
+ tgt->link_reinit_iocb_pending = 0;
+ }
+ res = 1; /* send notify ack */
+ break;
+ }
+
+ default:
+ ql_dbg(ql_dbg_tgt_mgt, vha, 0xf061,
+ "qla_target(%d): Unsupported ELS command %x "
+ "received\n", vha->vp_idx, iocb->u.isp24.status_subcode);
+ res = qlt_reset(vha, iocb, QLA_TGT_NEXUS_LOSS_SESS);
+ break;
+ }
+
+ return res;
+}
+
+static int qlt_set_data_offset(struct qla_tgt_cmd *cmd, uint32_t offset)
+{
+ struct scatterlist *sg, *sgp, *sg_srr, *sg_srr_start = NULL;
+ size_t first_offset = 0, rem_offset = offset, tmp = 0;
+ int i, sg_srr_cnt, bufflen = 0;
+
+ ql_dbg(ql_dbg_tgt, cmd->vha, 0xe023,
+ "Entering qla_tgt_set_data_offset: cmd: %p, cmd->sg: %p, "
+ "cmd->sg_cnt: %u, direction: %d\n",
+ cmd, cmd->sg, cmd->sg_cnt, cmd->dma_data_direction);
+
+ /*
+ * FIXME: Reject non zero SRR relative offset until we can test
+ * this code properly.
+ */
+ pr_debug("Rejecting non zero SRR rel_offs: %u\n", offset);
+ return -1;
+
+ if (!cmd->sg || !cmd->sg_cnt) {
+ ql_dbg(ql_dbg_tgt, cmd->vha, 0xe055,
+ "Missing cmd->sg or zero cmd->sg_cnt in"
+ " qla_tgt_set_data_offset\n");
+ return -EINVAL;
+ }
+ /*
+ * Walk the current cmd->sg list until we locate the new sg_srr_start
+ */
+ for_each_sg(cmd->sg, sg, cmd->sg_cnt, i) {
+ ql_dbg(ql_dbg_tgt, cmd->vha, 0xe024,
+ "sg[%d]: %p page: %p, length: %d, offset: %d\n",
+ i, sg, sg_page(sg), sg->length, sg->offset);
+
+ if ((sg->length + tmp) > offset) {
+ first_offset = rem_offset;
+ sg_srr_start = sg;
+ ql_dbg(ql_dbg_tgt, cmd->vha, 0xe025,
+ "Found matching sg[%d], using %p as sg_srr_start, "
+ "and using first_offset: %zu\n", i, sg,
+ first_offset);
+ break;
+ }
+ tmp += sg->length;
+ rem_offset -= sg->length;
+ }
+
+ if (!sg_srr_start) {
+ ql_dbg(ql_dbg_tgt, cmd->vha, 0xe056,
+ "Unable to locate sg_srr_start for offset: %u\n", offset);
+ return -EINVAL;
+ }
+ sg_srr_cnt = (cmd->sg_cnt - i);
+
+ sg_srr = kzalloc(sizeof(struct scatterlist) * sg_srr_cnt, GFP_KERNEL);
+ if (!sg_srr) {
+ ql_dbg(ql_dbg_tgt, cmd->vha, 0xe057,
+ "Unable to allocate sgp\n");
+ return -ENOMEM;
+ }
+ sg_init_table(sg_srr, sg_srr_cnt);
+ sgp = &sg_srr[0];
+ /*
+ * Walk the remaining list for sg_srr_start, mapping to the newly
+ * allocated sg_srr taking first_offset into account.
+ */
+ for_each_sg(sg_srr_start, sg, sg_srr_cnt, i) {
+ if (first_offset) {
+ sg_set_page(sgp, sg_page(sg),
+ (sg->length - first_offset), first_offset);
+ first_offset = 0;
+ } else {
+ sg_set_page(sgp, sg_page(sg), sg->length, 0);
+ }
+ bufflen += sgp->length;
+
+ sgp = sg_next(sgp);
+ if (!sgp)
+ break;
+ }
+
+ cmd->sg = sg_srr;
+ cmd->sg_cnt = sg_srr_cnt;
+ cmd->bufflen = bufflen;
+ cmd->offset += offset;
+ cmd->free_sg = 1;
+
+ ql_dbg(ql_dbg_tgt, cmd->vha, 0xe026, "New cmd->sg: %p\n", cmd->sg);
+ ql_dbg(ql_dbg_tgt, cmd->vha, 0xe027, "New cmd->sg_cnt: %u\n",
+ cmd->sg_cnt);
+ ql_dbg(ql_dbg_tgt, cmd->vha, 0xe028, "New cmd->bufflen: %u\n",
+ cmd->bufflen);
+ ql_dbg(ql_dbg_tgt, cmd->vha, 0xe029, "New cmd->offset: %u\n",
+ cmd->offset);
+
+ if (cmd->sg_cnt < 0)
+ BUG();
+
+ if (cmd->bufflen < 0)
+ BUG();
+
+ return 0;
+}
+
+static inline int qlt_srr_adjust_data(struct qla_tgt_cmd *cmd,
+ uint32_t srr_rel_offs, int *xmit_type)
+{
+ int res = 0, rel_offs;
+
+ rel_offs = srr_rel_offs - cmd->offset;
+ ql_dbg(ql_dbg_tgt_mgt, cmd->vha, 0xf027, "srr_rel_offs=%d, rel_offs=%d",
+ srr_rel_offs, rel_offs);
+
+ *xmit_type = QLA_TGT_XMIT_ALL;
+
+ if (rel_offs < 0) {
+ ql_dbg(ql_dbg_tgt_mgt, cmd->vha, 0xf062,
+ "qla_target(%d): SRR rel_offs (%d) < 0",
+ cmd->vha->vp_idx, rel_offs);
+ res = -1;
+ } else if (rel_offs == cmd->bufflen)
+ *xmit_type = QLA_TGT_XMIT_STATUS;
+ else if (rel_offs > 0)
+ res = qlt_set_data_offset(cmd, rel_offs);
+
+ return res;
+}
+
+/* No locks, thread context */
+static void qlt_handle_srr(struct scsi_qla_host *vha,
+ struct qla_tgt_srr_ctio *sctio, struct qla_tgt_srr_imm *imm)
+{
+ struct imm_ntfy_from_isp *ntfy =
+ (struct imm_ntfy_from_isp *)&imm->imm_ntfy;
+ struct qla_hw_data *ha = vha->hw;
+ struct qla_tgt_cmd *cmd = sctio->cmd;
+ struct se_cmd *se_cmd = &cmd->se_cmd;
+ unsigned long flags;
+ int xmit_type = 0, resp = 0;
+ uint32_t offset;
+ uint16_t srr_ui;
+
+ offset = le32_to_cpu(ntfy->u.isp24.srr_rel_offs);
+ srr_ui = ntfy->u.isp24.srr_ui;
+
+ ql_dbg(ql_dbg_tgt_mgt, vha, 0xf028, "SRR cmd %p, srr_ui %x\n",
+ cmd, srr_ui);
+
+ switch (srr_ui) {
+ case SRR_IU_STATUS:
+ spin_lock_irqsave(&ha->hardware_lock, flags);
+ qlt_send_notify_ack(vha, ntfy,
+ 0, 0, 0, NOTIFY_ACK_SRR_FLAGS_ACCEPT, 0, 0);
+ spin_unlock_irqrestore(&ha->hardware_lock, flags);
+ xmit_type = QLA_TGT_XMIT_STATUS;
+ resp = 1;
+ break;
+ case SRR_IU_DATA_IN:
+ if (!cmd->sg || !cmd->sg_cnt) {
+ ql_dbg(ql_dbg_tgt_mgt, vha, 0xf063,
+ "Unable to process SRR_IU_DATA_IN due to"
+ " missing cmd->sg, state: %d\n", cmd->state);
+ dump_stack();
+ goto out_reject;
+ }
+ if (se_cmd->scsi_status != 0) {
+ ql_dbg(ql_dbg_tgt, vha, 0xe02a,
+ "Rejecting SRR_IU_DATA_IN with non GOOD "
+ "scsi_status\n");
+ goto out_reject;
+ }
+ cmd->bufflen = se_cmd->data_length;
+
+ if (qlt_has_data(cmd)) {
+ if (qlt_srr_adjust_data(cmd, offset, &xmit_type) != 0)
+ goto out_reject;
+ spin_lock_irqsave(&ha->hardware_lock, flags);
+ qlt_send_notify_ack(vha, ntfy,
+ 0, 0, 0, NOTIFY_ACK_SRR_FLAGS_ACCEPT, 0, 0);
+ spin_unlock_irqrestore(&ha->hardware_lock, flags);
+ resp = 1;
+ } else {
+ ql_dbg(ql_dbg_tgt_mgt, vha, 0xf064,
+ "qla_target(%d): SRR for in data for cmd "
+ "without them (tag %d, SCSI status %d), "
+ "reject", vha->vp_idx, cmd->tag,
+ cmd->se_cmd.scsi_status);
+ goto out_reject;
+ }
+ break;
+ case SRR_IU_DATA_OUT:
+ if (!cmd->sg || !cmd->sg_cnt) {
+ ql_dbg(ql_dbg_tgt_mgt, vha, 0xf065,
+ "Unable to process SRR_IU_DATA_OUT due to"
+ " missing cmd->sg\n");
+ dump_stack();
+ goto out_reject;
+ }
+ if (se_cmd->scsi_status != 0) {
+ ql_dbg(ql_dbg_tgt, vha, 0xe02b,
+ "Rejecting SRR_IU_DATA_OUT"
+ " with non GOOD scsi_status\n");
+ goto out_reject;
+ }
+ cmd->bufflen = se_cmd->data_length;
+
+ if (qlt_has_data(cmd)) {
+ if (qlt_srr_adjust_data(cmd, offset, &xmit_type) != 0)
+ goto out_reject;
+ spin_lock_irqsave(&ha->hardware_lock, flags);
+ qlt_send_notify_ack(vha, ntfy,
+ 0, 0, 0, NOTIFY_ACK_SRR_FLAGS_ACCEPT, 0, 0);
+ spin_unlock_irqrestore(&ha->hardware_lock, flags);
+ if (xmit_type & QLA_TGT_XMIT_DATA) {
+ cmd->cmd_flags |= BIT_8;
+ qlt_rdy_to_xfer(cmd);
+ }
+ } else {
+ ql_dbg(ql_dbg_tgt_mgt, vha, 0xf066,
+ "qla_target(%d): SRR for out data for cmd "
+ "without them (tag %d, SCSI status %d), "
+ "reject", vha->vp_idx, cmd->tag,
+ cmd->se_cmd.scsi_status);
+ goto out_reject;
+ }
+ break;
+ default:
+ ql_dbg(ql_dbg_tgt_mgt, vha, 0xf067,
+ "qla_target(%d): Unknown srr_ui value %x",
+ vha->vp_idx, srr_ui);
+ goto out_reject;
+ }
+
+ /* Transmit response in case of status and data-in cases */
+ if (resp) {
+ cmd->cmd_flags |= BIT_7;
+ qlt_xmit_response(cmd, xmit_type, se_cmd->scsi_status);
+ }
+
+ return;
+
+out_reject:
+ spin_lock_irqsave(&ha->hardware_lock, flags);
+ qlt_send_notify_ack(vha, ntfy, 0, 0, 0,
+ NOTIFY_ACK_SRR_FLAGS_REJECT,
+ NOTIFY_ACK_SRR_REJECT_REASON_UNABLE_TO_PERFORM,
+ NOTIFY_ACK_SRR_FLAGS_REJECT_EXPL_NO_EXPL);
+ if (cmd->state == QLA_TGT_STATE_NEED_DATA) {
+ cmd->state = QLA_TGT_STATE_DATA_IN;
+ dump_stack();
+ } else {
+ cmd->cmd_flags |= BIT_9;
+ qlt_send_term_exchange(vha, cmd, &cmd->atio, 1);
+ }
+ spin_unlock_irqrestore(&ha->hardware_lock, flags);
+}
+
+static void qlt_reject_free_srr_imm(struct scsi_qla_host *vha,
+ struct qla_tgt_srr_imm *imm, int ha_locked)
+{
+ struct qla_hw_data *ha = vha->hw;
+ unsigned long flags = 0;
+
+ if (!ha_locked)
+ spin_lock_irqsave(&ha->hardware_lock, flags);
+
+ qlt_send_notify_ack(vha, (void *)&imm->imm_ntfy, 0, 0, 0,
+ NOTIFY_ACK_SRR_FLAGS_REJECT,
+ NOTIFY_ACK_SRR_REJECT_REASON_UNABLE_TO_PERFORM,
+ NOTIFY_ACK_SRR_FLAGS_REJECT_EXPL_NO_EXPL);
+
+ if (!ha_locked)
+ spin_unlock_irqrestore(&ha->hardware_lock, flags);
+
+ kfree(imm);
+}
+
+static void qlt_handle_srr_work(struct work_struct *work)
+{
+ struct qla_tgt *tgt = container_of(work, struct qla_tgt, srr_work);
+ struct scsi_qla_host *vha = tgt->vha;
+ struct qla_tgt_srr_ctio *sctio;
+ unsigned long flags;
+
+ ql_dbg(ql_dbg_tgt_mgt, vha, 0xf029, "Entering SRR work (tgt %p)\n",
+ tgt);
+
+restart:
+ spin_lock_irqsave(&tgt->srr_lock, flags);
+ list_for_each_entry(sctio, &tgt->srr_ctio_list, srr_list_entry) {
+ struct qla_tgt_srr_imm *imm, *i, *ti;
+ struct qla_tgt_cmd *cmd;
+ struct se_cmd *se_cmd;
+
+ imm = NULL;
+ list_for_each_entry_safe(i, ti, &tgt->srr_imm_list,
+ srr_list_entry) {
+ if (i->srr_id == sctio->srr_id) {
+ list_del(&i->srr_list_entry);
+ if (imm) {
+ ql_dbg(ql_dbg_tgt_mgt, vha, 0xf068,
+ "qla_target(%d): There must be "
+ "only one IMM SRR per CTIO SRR "
+ "(IMM SRR %p, id %d, CTIO %p\n",
+ vha->vp_idx, i, i->srr_id, sctio);
+ qlt_reject_free_srr_imm(tgt->vha, i, 0);
+ } else
+ imm = i;
+ }
+ }
+
+ ql_dbg(ql_dbg_tgt_mgt, vha, 0xf02a,
+ "IMM SRR %p, CTIO SRR %p (id %d)\n", imm, sctio,
+ sctio->srr_id);
+
+ if (imm == NULL) {
+ ql_dbg(ql_dbg_tgt_mgt, vha, 0xf02b,
+ "Not found matching IMM for SRR CTIO (id %d)\n",
+ sctio->srr_id);
+ continue;
+ } else
+ list_del(&sctio->srr_list_entry);
+
+ spin_unlock_irqrestore(&tgt->srr_lock, flags);
+
+ cmd = sctio->cmd;
+ /*
+ * Reset qla_tgt_cmd SRR values and SGL pointer+count to follow
+ * tcm_qla2xxx_write_pending() and tcm_qla2xxx_queue_data_in()
+ * logic..
+ */
+ cmd->offset = 0;
+ if (cmd->free_sg) {
+ kfree(cmd->sg);
+ cmd->sg = NULL;
+ cmd->free_sg = 0;
+ }
+ se_cmd = &cmd->se_cmd;
+
+ cmd->sg_cnt = se_cmd->t_data_nents;
+ cmd->sg = se_cmd->t_data_sg;
+
+ ql_dbg(ql_dbg_tgt_mgt, vha, 0xf02c,
+ "SRR cmd %p (se_cmd %p, tag %d, op %x), "
+ "sg_cnt=%d, offset=%d", cmd, &cmd->se_cmd, cmd->tag,
+ se_cmd->t_task_cdb ? se_cmd->t_task_cdb[0] : 0,
+ cmd->sg_cnt, cmd->offset);
+
+ qlt_handle_srr(vha, sctio, imm);
+
+ kfree(imm);
+ kfree(sctio);
+ goto restart;
+ }
+ spin_unlock_irqrestore(&tgt->srr_lock, flags);
+}
+
+/* ha->hardware_lock supposed to be held on entry */
+static void qlt_prepare_srr_imm(struct scsi_qla_host *vha,
+ struct imm_ntfy_from_isp *iocb)
+{
+ struct qla_tgt_srr_imm *imm;
+ struct qla_tgt *tgt = vha->vha_tgt.qla_tgt;
+ struct qla_tgt_srr_ctio *sctio;
+
+ tgt->imm_srr_id++;
+
+ ql_log(ql_log_warn, vha, 0xf02d, "qla_target(%d): SRR received\n",
+ vha->vp_idx);
+
+ imm = kzalloc(sizeof(*imm), GFP_ATOMIC);
+ if (imm != NULL) {
+ memcpy(&imm->imm_ntfy, iocb, sizeof(imm->imm_ntfy));
+
+ /* IRQ is already OFF */
+ spin_lock(&tgt->srr_lock);
+ imm->srr_id = tgt->imm_srr_id;
+ list_add_tail(&imm->srr_list_entry,
+ &tgt->srr_imm_list);
+ ql_dbg(ql_dbg_tgt_mgt, vha, 0xf02e,
+ "IMM NTFY SRR %p added (id %d, ui %x)\n",
+ imm, imm->srr_id, iocb->u.isp24.srr_ui);
+ if (tgt->imm_srr_id == tgt->ctio_srr_id) {
+ int found = 0;
+ list_for_each_entry(sctio, &tgt->srr_ctio_list,
+ srr_list_entry) {
+ if (sctio->srr_id == imm->srr_id) {
+ found = 1;
+ break;
+ }
+ }
+ if (found) {
+ ql_dbg(ql_dbg_tgt_mgt, vha, 0xf02f, "%s",
+ "Scheduling srr work\n");
+ schedule_work(&tgt->srr_work);
+ } else {
+ ql_dbg(ql_dbg_tgt_mgt, vha, 0xf030,
+ "qla_target(%d): imm_srr_id "
+ "== ctio_srr_id (%d), but there is no "
+ "corresponding SRR CTIO, deleting IMM "
+ "SRR %p\n", vha->vp_idx, tgt->ctio_srr_id,
+ imm);
+ list_del(&imm->srr_list_entry);
+
+ kfree(imm);
+
+ spin_unlock(&tgt->srr_lock);
+ goto out_reject;
+ }
+ }
+ spin_unlock(&tgt->srr_lock);
+ } else {
+ struct qla_tgt_srr_ctio *ts;
+
+ ql_dbg(ql_dbg_tgt_mgt, vha, 0xf069,
+ "qla_target(%d): Unable to allocate SRR IMM "
+ "entry, SRR request will be rejected\n", vha->vp_idx);
+
+ /* IRQ is already OFF */
+ spin_lock(&tgt->srr_lock);
+ list_for_each_entry_safe(sctio, ts, &tgt->srr_ctio_list,
+ srr_list_entry) {
+ if (sctio->srr_id == tgt->imm_srr_id) {
+ ql_dbg(ql_dbg_tgt_mgt, vha, 0xf031,
+ "CTIO SRR %p deleted (id %d)\n",
+ sctio, sctio->srr_id);
+ list_del(&sctio->srr_list_entry);
+ qlt_send_term_exchange(vha, sctio->cmd,
+ &sctio->cmd->atio, 1);
+ kfree(sctio);
+ }
+ }
+ spin_unlock(&tgt->srr_lock);
+ goto out_reject;
+ }
+
+ return;
+
+out_reject:
+ qlt_send_notify_ack(vha, iocb, 0, 0, 0,
+ NOTIFY_ACK_SRR_FLAGS_REJECT,
+ NOTIFY_ACK_SRR_REJECT_REASON_UNABLE_TO_PERFORM,
+ NOTIFY_ACK_SRR_FLAGS_REJECT_EXPL_NO_EXPL);
+}
+
+/*
+ * ha->hardware_lock supposed to be held on entry. Might drop it, then reaquire
+ */
+static void qlt_handle_imm_notify(struct scsi_qla_host *vha,
+ struct imm_ntfy_from_isp *iocb)
+{
+ struct qla_hw_data *ha = vha->hw;
+ uint32_t add_flags = 0;
+ int send_notify_ack = 1;
+ uint16_t status;
+
+ status = le16_to_cpu(iocb->u.isp2x.status);
+ switch (status) {
+ case IMM_NTFY_LIP_RESET:
+ {
+ ql_dbg(ql_dbg_tgt_mgt, vha, 0xf032,
+ "qla_target(%d): LIP reset (loop %#x), subcode %x\n",
+ vha->vp_idx, le16_to_cpu(iocb->u.isp24.nport_handle),
+ iocb->u.isp24.status_subcode);
+
+ if (qlt_reset(vha, iocb, QLA_TGT_ABORT_ALL) == 0)
+ send_notify_ack = 0;
+ break;
+ }
+
+ case IMM_NTFY_LIP_LINK_REINIT:
+ {
+ struct qla_tgt *tgt = vha->vha_tgt.qla_tgt;
+ ql_dbg(ql_dbg_tgt_mgt, vha, 0xf033,
+ "qla_target(%d): LINK REINIT (loop %#x, "
+ "subcode %x)\n", vha->vp_idx,
+ le16_to_cpu(iocb->u.isp24.nport_handle),
+ iocb->u.isp24.status_subcode);
+ if (tgt->link_reinit_iocb_pending) {
+ qlt_send_notify_ack(vha, &tgt->link_reinit_iocb,
+ 0, 0, 0, 0, 0, 0);
+ }
+ memcpy(&tgt->link_reinit_iocb, iocb, sizeof(*iocb));
+ tgt->link_reinit_iocb_pending = 1;
+ /*
+ * QLogic requires to wait after LINK REINIT for possible
+ * PDISC or ADISC ELS commands
+ */
+ send_notify_ack = 0;
+ break;
+ }
+
+ case IMM_NTFY_PORT_LOGOUT:
+ ql_dbg(ql_dbg_tgt_mgt, vha, 0xf034,
+ "qla_target(%d): Port logout (loop "
+ "%#x, subcode %x)\n", vha->vp_idx,
+ le16_to_cpu(iocb->u.isp24.nport_handle),
+ iocb->u.isp24.status_subcode);
+
+ if (qlt_reset(vha, iocb, QLA_TGT_NEXUS_LOSS_SESS) == 0)
+ send_notify_ack = 0;
+ /* The sessions will be cleared in the callback, if needed */
+ break;
+
+ case IMM_NTFY_GLBL_TPRLO:
+ ql_dbg(ql_dbg_tgt_mgt, vha, 0xf035,
+ "qla_target(%d): Global TPRLO (%x)\n", vha->vp_idx, status);
+ if (qlt_reset(vha, iocb, QLA_TGT_NEXUS_LOSS) == 0)
+ send_notify_ack = 0;
+ /* The sessions will be cleared in the callback, if needed */
+ break;
+
+ case IMM_NTFY_PORT_CONFIG:
+ ql_dbg(ql_dbg_tgt_mgt, vha, 0xf036,
+ "qla_target(%d): Port config changed (%x)\n", vha->vp_idx,
+ status);
+ if (qlt_reset(vha, iocb, QLA_TGT_ABORT_ALL) == 0)
+ send_notify_ack = 0;
+ /* The sessions will be cleared in the callback, if needed */
+ break;
+
+ case IMM_NTFY_GLBL_LOGO:
+ ql_dbg(ql_dbg_tgt_mgt, vha, 0xf06a,
+ "qla_target(%d): Link failure detected\n",
+ vha->vp_idx);
+ /* I_T nexus loss */
+ if (qlt_reset(vha, iocb, QLA_TGT_NEXUS_LOSS) == 0)
+ send_notify_ack = 0;
+ break;
+
+ case IMM_NTFY_IOCB_OVERFLOW:
+ ql_dbg(ql_dbg_tgt_mgt, vha, 0xf06b,
+ "qla_target(%d): Cannot provide requested "
+ "capability (IOCB overflowed the immediate notify "
+ "resource count)\n", vha->vp_idx);
+ break;
+
+ case IMM_NTFY_ABORT_TASK:
+ ql_dbg(ql_dbg_tgt_mgt, vha, 0xf037,
+ "qla_target(%d): Abort Task (S %08x I %#x -> "
+ "L %#x)\n", vha->vp_idx,
+ le16_to_cpu(iocb->u.isp2x.seq_id),
+ GET_TARGET_ID(ha, (struct atio_from_isp *)iocb),
+ le16_to_cpu(iocb->u.isp2x.lun));
+ if (qlt_abort_task(vha, iocb) == 0)
+ send_notify_ack = 0;
+ break;
+
+ case IMM_NTFY_RESOURCE:
+ ql_dbg(ql_dbg_tgt_mgt, vha, 0xf06c,
+ "qla_target(%d): Out of resources, host %ld\n",
+ vha->vp_idx, vha->host_no);
+ break;
+
+ case IMM_NTFY_MSG_RX:
+ ql_dbg(ql_dbg_tgt_mgt, vha, 0xf038,
+ "qla_target(%d): Immediate notify task %x\n",
+ vha->vp_idx, iocb->u.isp2x.task_flags);
+ if (qlt_handle_task_mgmt(vha, iocb) == 0)
+ send_notify_ack = 0;
+ break;
+
+ case IMM_NTFY_ELS:
+ if (qlt_24xx_handle_els(vha, iocb) == 0)
+ send_notify_ack = 0;
+ break;
+
+ case IMM_NTFY_SRR:
+ qlt_prepare_srr_imm(vha, iocb);
+ send_notify_ack = 0;
+ break;
+
+ default:
+ ql_dbg(ql_dbg_tgt_mgt, vha, 0xf06d,
+ "qla_target(%d): Received unknown immediate "
+ "notify status %x\n", vha->vp_idx, status);
+ break;
+ }
+
+ if (send_notify_ack)
+ qlt_send_notify_ack(vha, iocb, add_flags, 0, 0, 0, 0, 0);
+}
+
+/*
+ * ha->hardware_lock supposed to be held on entry. Might drop it, then reaquire
+ * This function sends busy to ISP 2xxx or 24xx.
+ */
+static int __qlt_send_busy(struct scsi_qla_host *vha,
+ struct atio_from_isp *atio, uint16_t status)
+{
+ struct ctio7_to_24xx *ctio24;
+ struct qla_hw_data *ha = vha->hw;
+ request_t *pkt;
+ struct qla_tgt_sess *sess = NULL;
+
+ sess = ha->tgt.tgt_ops->find_sess_by_s_id(vha,
+ atio->u.isp24.fcp_hdr.s_id);
+ if (!sess) {
+ qlt_send_term_exchange(vha, NULL, atio, 1);
+ return 0;
+ }
+ /* Sending marker isn't necessary, since we called from ISR */
+
+ pkt = (request_t *)qla2x00_alloc_iocbs(vha, NULL);
+ if (!pkt) {
+ ql_dbg(ql_dbg_io, vha, 0x3063,
+ "qla_target(%d): %s failed: unable to allocate "
+ "request packet", vha->vp_idx, __func__);
+ return -ENOMEM;
+ }
+
+ pkt->entry_count = 1;
+ pkt->handle = QLA_TGT_SKIP_HANDLE | CTIO_COMPLETION_HANDLE_MARK;
+
+ ctio24 = (struct ctio7_to_24xx *)pkt;
+ ctio24->entry_type = CTIO_TYPE7;
+ ctio24->nport_handle = sess->loop_id;
+ ctio24->timeout = __constant_cpu_to_le16(QLA_TGT_TIMEOUT);
+ ctio24->vp_index = vha->vp_idx;
+ ctio24->initiator_id[0] = atio->u.isp24.fcp_hdr.s_id[2];
+ ctio24->initiator_id[1] = atio->u.isp24.fcp_hdr.s_id[1];
+ ctio24->initiator_id[2] = atio->u.isp24.fcp_hdr.s_id[0];
+ ctio24->exchange_addr = atio->u.isp24.exchange_addr;
+ ctio24->u.status1.flags = (atio->u.isp24.attr << 9) |
+ __constant_cpu_to_le16(
+ CTIO7_FLAGS_STATUS_MODE_1 | CTIO7_FLAGS_SEND_STATUS |
+ CTIO7_FLAGS_DONT_RET_CTIO);
+ /*
+ * CTIO from fw w/o se_cmd doesn't provide enough info to retry it,
+ * if the explicit conformation is used.
+ */
+ ctio24->u.status1.ox_id = swab16(atio->u.isp24.fcp_hdr.ox_id);
+ ctio24->u.status1.scsi_status = cpu_to_le16(status);
+ /* Memory Barrier */
+ wmb();
+ qla2x00_start_iocbs(vha, vha->req);
+ return 0;
+}
+
+/*
+ * This routine is used to allocate a command for either a QFull condition
+ * (ie reply SAM_STAT_BUSY) or to terminate an exchange that did not go
+ * out previously.
+ */
+static void
+qlt_alloc_qfull_cmd(struct scsi_qla_host *vha,
+ struct atio_from_isp *atio, uint16_t status, int qfull)
+{
+ struct qla_tgt *tgt = vha->vha_tgt.qla_tgt;
+ struct qla_hw_data *ha = vha->hw;
+ struct qla_tgt_sess *sess;
+ struct se_session *se_sess;
+ struct qla_tgt_cmd *cmd;
+ int tag;
+
+ if (unlikely(tgt->tgt_stop)) {
+ ql_dbg(ql_dbg_io, vha, 0x300a,
+ "New command while device %p is shutting down\n", tgt);
+ return;
+ }
+
+ if ((vha->hw->tgt.num_qfull_cmds_alloc + 1) > MAX_QFULL_CMDS_ALLOC) {
+ vha->hw->tgt.num_qfull_cmds_dropped++;
+ if (vha->hw->tgt.num_qfull_cmds_dropped >
+ vha->hw->qla_stats.stat_max_qfull_cmds_dropped)
+ vha->hw->qla_stats.stat_max_qfull_cmds_dropped =
+ vha->hw->tgt.num_qfull_cmds_dropped;
+
+ ql_dbg(ql_dbg_io, vha, 0x3068,
+ "qla_target(%d): %s: QFull CMD dropped[%d]\n",
+ vha->vp_idx, __func__,
+ vha->hw->tgt.num_qfull_cmds_dropped);
+
+ qlt_chk_exch_leak_thresh_hold(vha);
+ return;
+ }
+
+ sess = ha->tgt.tgt_ops->find_sess_by_s_id
+ (vha, atio->u.isp24.fcp_hdr.s_id);
+ if (!sess)
+ return;
+
+ se_sess = sess->se_sess;
+
+ tag = percpu_ida_alloc(&se_sess->sess_tag_pool, TASK_RUNNING);
+ if (tag < 0)
+ return;
+
+ cmd = &((struct qla_tgt_cmd *)se_sess->sess_cmd_map)[tag];
+ if (!cmd) {
+ ql_dbg(ql_dbg_io, vha, 0x3009,
+ "qla_target(%d): %s: Allocation of cmd failed\n",
+ vha->vp_idx, __func__);
+
+ vha->hw->tgt.num_qfull_cmds_dropped++;
+ if (vha->hw->tgt.num_qfull_cmds_dropped >
+ vha->hw->qla_stats.stat_max_qfull_cmds_dropped)
+ vha->hw->qla_stats.stat_max_qfull_cmds_dropped =
+ vha->hw->tgt.num_qfull_cmds_dropped;
+
+ qlt_chk_exch_leak_thresh_hold(vha);
+ return;
+ }
+
+ memset(cmd, 0, sizeof(struct qla_tgt_cmd));
+
+ qlt_incr_num_pend_cmds(vha);
+ INIT_LIST_HEAD(&cmd->cmd_list);
+ memcpy(&cmd->atio, atio, sizeof(*atio));
+
+ cmd->tgt = vha->vha_tgt.qla_tgt;
+ cmd->vha = vha;
+ cmd->reset_count = vha->hw->chip_reset;
+ cmd->q_full = 1;
+
+ if (qfull) {
+ cmd->q_full = 1;
+ /* NOTE: borrowing the state field to carry the status */
+ cmd->state = status;
+ } else
+ cmd->term_exchg = 1;
+
+ list_add_tail(&cmd->cmd_list, &vha->hw->tgt.q_full_list);
+
+ vha->hw->tgt.num_qfull_cmds_alloc++;
+ if (vha->hw->tgt.num_qfull_cmds_alloc >
+ vha->hw->qla_stats.stat_max_qfull_cmds_alloc)
+ vha->hw->qla_stats.stat_max_qfull_cmds_alloc =
+ vha->hw->tgt.num_qfull_cmds_alloc;
+}
+
+int
+qlt_free_qfull_cmds(struct scsi_qla_host *vha)
+{
+ struct qla_hw_data *ha = vha->hw;
+ unsigned long flags;
+ struct qla_tgt_cmd *cmd, *tcmd;
+ struct list_head free_list;
+ int rc = 0;
+
+ if (list_empty(&ha->tgt.q_full_list))
+ return 0;
+
+ INIT_LIST_HEAD(&free_list);
+
+ spin_lock_irqsave(&vha->hw->hardware_lock, flags);
+
+ if (list_empty(&ha->tgt.q_full_list)) {
+ spin_unlock_irqrestore(&vha->hw->hardware_lock, flags);
+ return 0;
+ }
+
+ list_for_each_entry_safe(cmd, tcmd, &ha->tgt.q_full_list, cmd_list) {
+ if (cmd->q_full)
+ /* cmd->state is a borrowed field to hold status */
+ rc = __qlt_send_busy(vha, &cmd->atio, cmd->state);
+ else if (cmd->term_exchg)
+ rc = __qlt_send_term_exchange(vha, NULL, &cmd->atio);
+
+ if (rc == -ENOMEM)
+ break;
+
+ if (cmd->q_full)
+ ql_dbg(ql_dbg_io, vha, 0x3006,
+ "%s: busy sent for ox_id[%04x]\n", __func__,
+ be16_to_cpu(cmd->atio.u.isp24.fcp_hdr.ox_id));
+ else if (cmd->term_exchg)
+ ql_dbg(ql_dbg_io, vha, 0x3007,
+ "%s: Term exchg sent for ox_id[%04x]\n", __func__,
+ be16_to_cpu(cmd->atio.u.isp24.fcp_hdr.ox_id));
+ else
+ ql_dbg(ql_dbg_io, vha, 0x3008,
+ "%s: Unexpected cmd in QFull list %p\n", __func__,
+ cmd);
+
+ list_del(&cmd->cmd_list);
+ list_add_tail(&cmd->cmd_list, &free_list);
+
+ /* piggy back on hardware_lock for protection */
+ vha->hw->tgt.num_qfull_cmds_alloc--;
+ }
+ spin_unlock_irqrestore(&vha->hw->hardware_lock, flags);
+
+ cmd = NULL;
+
+ list_for_each_entry_safe(cmd, tcmd, &free_list, cmd_list) {
+ list_del(&cmd->cmd_list);
+ /* This cmd was never sent to TCM. There is no need
+ * to schedule free or call free_cmd
+ */
+ qlt_free_cmd(cmd);
+ }
+ return rc;
+}
+
+static void
+qlt_send_busy(struct scsi_qla_host *vha,
+ struct atio_from_isp *atio, uint16_t status)
+{
+ int rc = 0;
+
+ rc = __qlt_send_busy(vha, atio, status);
+ if (rc == -ENOMEM)
+ qlt_alloc_qfull_cmd(vha, atio, status, 1);
+}
+
+static int
+qlt_chk_qfull_thresh_hold(struct scsi_qla_host *vha,
+ struct atio_from_isp *atio)
+{
+ struct qla_hw_data *ha = vha->hw;
+ uint16_t status;
+
+ if (ha->tgt.num_pend_cmds < Q_FULL_THRESH_HOLD(ha))
+ return 0;
+
+ status = temp_sam_status;
+ qlt_send_busy(vha, atio, status);
+ return 1;
+}
+
+/* ha->hardware_lock supposed to be held on entry */
+/* called via callback from qla2xxx */
+static void qlt_24xx_atio_pkt(struct scsi_qla_host *vha,
+ struct atio_from_isp *atio)
+{
+ struct qla_hw_data *ha = vha->hw;
+ struct qla_tgt *tgt = vha->vha_tgt.qla_tgt;
+ int rc;
+
+ if (unlikely(tgt == NULL)) {
+ ql_dbg(ql_dbg_io, vha, 0x3064,
+ "ATIO pkt, but no tgt (ha %p)", ha);
+ return;
+ }
+ /*
+ * In tgt_stop mode we also should allow all requests to pass.
+ * Otherwise, some commands can stuck.
+ */
+
+ tgt->irq_cmd_count++;
+
+ switch (atio->u.raw.entry_type) {
+ case ATIO_TYPE7:
+ if (unlikely(atio->u.isp24.exchange_addr ==
+ ATIO_EXCHANGE_ADDRESS_UNKNOWN)) {
+ ql_dbg(ql_dbg_io, vha, 0x3065,
+ "qla_target(%d): ATIO_TYPE7 "
+ "received with UNKNOWN exchange address, "
+ "sending QUEUE_FULL\n", vha->vp_idx);
+ qlt_send_busy(vha, atio, SAM_STAT_TASK_SET_FULL);
+ break;
+ }
+
+
+
+ if (likely(atio->u.isp24.fcp_cmnd.task_mgmt_flags == 0)) {
+ rc = qlt_chk_qfull_thresh_hold(vha, atio);
+ if (rc != 0) {
+ tgt->irq_cmd_count--;
+ return;
+ }
+ rc = qlt_handle_cmd_for_atio(vha, atio);
+ } else {
+ rc = qlt_handle_task_mgmt(vha, atio);
+ }
+ if (unlikely(rc != 0)) {
+ if (rc == -ESRCH) {
+#if 1 /* With TERM EXCHANGE some FC cards refuse to boot */
+ qlt_send_busy(vha, atio, SAM_STAT_BUSY);
+#else
+ qlt_send_term_exchange(vha, NULL, atio, 1);
+#endif
+ } else {
+ if (tgt->tgt_stop) {
+ ql_dbg(ql_dbg_tgt, vha, 0xe059,
+ "qla_target: Unable to send "
+ "command to target for req, "
+ "ignoring.\n");
+ } else {
+ ql_dbg(ql_dbg_tgt, vha, 0xe05a,
+ "qla_target(%d): Unable to send "
+ "command to target, sending BUSY "
+ "status.\n", vha->vp_idx);
+ qlt_send_busy(vha, atio, SAM_STAT_BUSY);
+ }
+ }
+ }
+ break;
+
+ case IMMED_NOTIFY_TYPE:
+ {
+ if (unlikely(atio->u.isp2x.entry_status != 0)) {
+ ql_dbg(ql_dbg_tgt, vha, 0xe05b,
+ "qla_target(%d): Received ATIO packet %x "
+ "with error status %x\n", vha->vp_idx,
+ atio->u.raw.entry_type,
+ atio->u.isp2x.entry_status);
+ break;
+ }
+ ql_dbg(ql_dbg_tgt, vha, 0xe02e, "%s", "IMMED_NOTIFY ATIO");
+ qlt_handle_imm_notify(vha, (struct imm_ntfy_from_isp *)atio);
+ break;
+ }
+
+ default:
+ ql_dbg(ql_dbg_tgt, vha, 0xe05c,
+ "qla_target(%d): Received unknown ATIO atio "
+ "type %x\n", vha->vp_idx, atio->u.raw.entry_type);
+ break;
+ }
+
+ tgt->irq_cmd_count--;
+}
+
+/* ha->hardware_lock supposed to be held on entry */
+/* called via callback from qla2xxx */
+static void qlt_response_pkt(struct scsi_qla_host *vha, response_t *pkt)
+{
+ struct qla_hw_data *ha = vha->hw;
+ struct qla_tgt *tgt = vha->vha_tgt.qla_tgt;
+
+ if (unlikely(tgt == NULL)) {
+ ql_dbg(ql_dbg_tgt, vha, 0xe05d,
+ "qla_target(%d): Response pkt %x received, but no "
+ "tgt (ha %p)\n", vha->vp_idx, pkt->entry_type, ha);
+ return;
+ }
+
+ /*
+ * In tgt_stop mode we also should allow all requests to pass.
+ * Otherwise, some commands can stuck.
+ */
+
+ tgt->irq_cmd_count++;
+
+ switch (pkt->entry_type) {
+ case CTIO_CRC2:
+ case CTIO_TYPE7:
+ {
+ struct ctio7_from_24xx *entry = (struct ctio7_from_24xx *)pkt;
+ qlt_do_ctio_completion(vha, entry->handle,
+ le16_to_cpu(entry->status)|(pkt->entry_status << 16),
+ entry);
+ break;
+ }
+
+ case ACCEPT_TGT_IO_TYPE:
+ {
+ struct atio_from_isp *atio = (struct atio_from_isp *)pkt;
+ int rc;
+ if (atio->u.isp2x.status !=
+ __constant_cpu_to_le16(ATIO_CDB_VALID)) {
+ ql_dbg(ql_dbg_tgt, vha, 0xe05e,
+ "qla_target(%d): ATIO with error "
+ "status %x received\n", vha->vp_idx,
+ le16_to_cpu(atio->u.isp2x.status));
+ break;
+ }
+
+ rc = qlt_chk_qfull_thresh_hold(vha, atio);
+ if (rc != 0) {
+ tgt->irq_cmd_count--;
+ return;
+ }
+
+ rc = qlt_handle_cmd_for_atio(vha, atio);
+ if (unlikely(rc != 0)) {
+ if (rc == -ESRCH) {
+#if 1 /* With TERM EXCHANGE some FC cards refuse to boot */
+ qlt_send_busy(vha, atio, 0);
+#else
+ qlt_send_term_exchange(vha, NULL, atio, 1);
+#endif
+ } else {
+ if (tgt->tgt_stop) {
+ ql_dbg(ql_dbg_tgt, vha, 0xe05f,
+ "qla_target: Unable to send "
+ "command to target, sending TERM "
+ "EXCHANGE for rsp\n");
+ qlt_send_term_exchange(vha, NULL,
+ atio, 1);
+ } else {
+ ql_dbg(ql_dbg_tgt, vha, 0xe060,
+ "qla_target(%d): Unable to send "
+ "command to target, sending BUSY "
+ "status\n", vha->vp_idx);
+ qlt_send_busy(vha, atio, 0);
+ }
+ }
+ }
+ }
+ break;
+
+ case CONTINUE_TGT_IO_TYPE:
+ {
+ struct ctio_to_2xxx *entry = (struct ctio_to_2xxx *)pkt;
+ qlt_do_ctio_completion(vha, entry->handle,
+ le16_to_cpu(entry->status)|(pkt->entry_status << 16),
+ entry);
+ break;
+ }
+
+ case CTIO_A64_TYPE:
+ {
+ struct ctio_to_2xxx *entry = (struct ctio_to_2xxx *)pkt;
+ qlt_do_ctio_completion(vha, entry->handle,
+ le16_to_cpu(entry->status)|(pkt->entry_status << 16),
+ entry);
+ break;
+ }
+
+ case IMMED_NOTIFY_TYPE:
+ ql_dbg(ql_dbg_tgt, vha, 0xe035, "%s", "IMMED_NOTIFY\n");
+ qlt_handle_imm_notify(vha, (struct imm_ntfy_from_isp *)pkt);
+ break;
+
+ case NOTIFY_ACK_TYPE:
+ if (tgt->notify_ack_expected > 0) {
+ struct nack_to_isp *entry = (struct nack_to_isp *)pkt;
+ ql_dbg(ql_dbg_tgt, vha, 0xe036,
+ "NOTIFY_ACK seq %08x status %x\n",
+ le16_to_cpu(entry->u.isp2x.seq_id),
+ le16_to_cpu(entry->u.isp2x.status));
+ tgt->notify_ack_expected--;
+ if (entry->u.isp2x.status !=
+ __constant_cpu_to_le16(NOTIFY_ACK_SUCCESS)) {
+ ql_dbg(ql_dbg_tgt, vha, 0xe061,
+ "qla_target(%d): NOTIFY_ACK "
+ "failed %x\n", vha->vp_idx,
+ le16_to_cpu(entry->u.isp2x.status));
+ }
+ } else {
+ ql_dbg(ql_dbg_tgt, vha, 0xe062,
+ "qla_target(%d): Unexpected NOTIFY_ACK received\n",
+ vha->vp_idx);
+ }
+ break;
+
+ case ABTS_RECV_24XX:
+ ql_dbg(ql_dbg_tgt, vha, 0xe037,
+ "ABTS_RECV_24XX: instance %d\n", vha->vp_idx);
+ qlt_24xx_handle_abts(vha, (struct abts_recv_from_24xx *)pkt);
+ break;
+
+ case ABTS_RESP_24XX:
+ if (tgt->abts_resp_expected > 0) {
+ struct abts_resp_from_24xx_fw *entry =
+ (struct abts_resp_from_24xx_fw *)pkt;
+ ql_dbg(ql_dbg_tgt, vha, 0xe038,
+ "ABTS_RESP_24XX: compl_status %x\n",
+ entry->compl_status);
+ tgt->abts_resp_expected--;
+ if (le16_to_cpu(entry->compl_status) !=
+ ABTS_RESP_COMPL_SUCCESS) {
+ if ((entry->error_subcode1 == 0x1E) &&
+ (entry->error_subcode2 == 0)) {
+ /*
+ * We've got a race here: aborted
+ * exchange not terminated, i.e.
+ * response for the aborted command was
+ * sent between the abort request was
+ * received and processed.
+ * Unfortunately, the firmware has a
+ * silly requirement that all aborted
+ * exchanges must be explicitely
+ * terminated, otherwise it refuses to
+ * send responses for the abort
+ * requests. So, we have to
+ * (re)terminate the exchange and retry
+ * the abort response.
+ */
+ qlt_24xx_retry_term_exchange(vha,
+ entry);
+ } else
+ ql_dbg(ql_dbg_tgt, vha, 0xe063,
+ "qla_target(%d): ABTS_RESP_24XX "
+ "failed %x (subcode %x:%x)",
+ vha->vp_idx, entry->compl_status,
+ entry->error_subcode1,
+ entry->error_subcode2);
+ }
+ } else {
+ ql_dbg(ql_dbg_tgt, vha, 0xe064,
+ "qla_target(%d): Unexpected ABTS_RESP_24XX "
+ "received\n", vha->vp_idx);
+ }
+ break;
+
+ default:
+ ql_dbg(ql_dbg_tgt, vha, 0xe065,
+ "qla_target(%d): Received unknown response pkt "
+ "type %x\n", vha->vp_idx, pkt->entry_type);
+ break;
+ }
+
+ tgt->irq_cmd_count--;
+}
+
+/*
+ * ha->hardware_lock supposed to be held on entry. Might drop it, then reaquire
+ */
+void qlt_async_event(uint16_t code, struct scsi_qla_host *vha,
+ uint16_t *mailbox)
+{
+ struct qla_hw_data *ha = vha->hw;
+ struct qla_tgt *tgt = vha->vha_tgt.qla_tgt;
+ int login_code;
+
+ if (!ha->tgt.tgt_ops)
+ return;
+
+ if (unlikely(tgt == NULL)) {
+ ql_dbg(ql_dbg_tgt, vha, 0xe03a,
+ "ASYNC EVENT %#x, but no tgt (ha %p)\n", code, ha);
+ return;
+ }
+
+ if (((code == MBA_POINT_TO_POINT) || (code == MBA_CHG_IN_CONNECTION)) &&
+ IS_QLA2100(ha))
+ return;
+ /*
+ * In tgt_stop mode we also should allow all requests to pass.
+ * Otherwise, some commands can stuck.
+ */
+
+ tgt->irq_cmd_count++;
+
+ switch (code) {
+ case MBA_RESET: /* Reset */
+ case MBA_SYSTEM_ERR: /* System Error */
+ case MBA_REQ_TRANSFER_ERR: /* Request Transfer Error */
+ case MBA_RSP_TRANSFER_ERR: /* Response Transfer Error */
+ ql_dbg(ql_dbg_tgt_mgt, vha, 0xf03a,
+ "qla_target(%d): System error async event %#x "
+ "occurred", vha->vp_idx, code);
+ break;
+ case MBA_WAKEUP_THRES: /* Request Queue Wake-up. */
+ set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
+ break;
+
+ case MBA_LOOP_UP:
+ {
+ ql_dbg(ql_dbg_tgt_mgt, vha, 0xf03b,
+ "qla_target(%d): Async LOOP_UP occurred "
+ "(m[0]=%x, m[1]=%x, m[2]=%x, m[3]=%x)", vha->vp_idx,
+ le16_to_cpu(mailbox[0]), le16_to_cpu(mailbox[1]),
+ le16_to_cpu(mailbox[2]), le16_to_cpu(mailbox[3]));
+ if (tgt->link_reinit_iocb_pending) {
+ qlt_send_notify_ack(vha, (void *)&tgt->link_reinit_iocb,
+ 0, 0, 0, 0, 0, 0);
+ tgt->link_reinit_iocb_pending = 0;
+ }
+ break;
+ }
+
+ case MBA_LIP_OCCURRED:
+ case MBA_LOOP_DOWN:
+ case MBA_LIP_RESET:
+ case MBA_RSCN_UPDATE:
+ ql_dbg(ql_dbg_tgt_mgt, vha, 0xf03c,
+ "qla_target(%d): Async event %#x occurred "
+ "(m[0]=%x, m[1]=%x, m[2]=%x, m[3]=%x)", vha->vp_idx, code,
+ le16_to_cpu(mailbox[0]), le16_to_cpu(mailbox[1]),
+ le16_to_cpu(mailbox[2]), le16_to_cpu(mailbox[3]));
+ break;
+
+ case MBA_PORT_UPDATE:
+ ql_dbg(ql_dbg_tgt_mgt, vha, 0xf03d,
+ "qla_target(%d): Port update async event %#x "
+ "occurred: updating the ports database (m[0]=%x, m[1]=%x, "
+ "m[2]=%x, m[3]=%x)", vha->vp_idx, code,
+ le16_to_cpu(mailbox[0]), le16_to_cpu(mailbox[1]),
+ le16_to_cpu(mailbox[2]), le16_to_cpu(mailbox[3]));
+
+ login_code = le16_to_cpu(mailbox[2]);
+ if (login_code == 0x4)
+ ql_dbg(ql_dbg_tgt_mgt, vha, 0xf03e,
+ "Async MB 2: Got PLOGI Complete\n");
+ else if (login_code == 0x7)
+ ql_dbg(ql_dbg_tgt_mgt, vha, 0xf03f,
+ "Async MB 2: Port Logged Out\n");
+ break;
+
+ default:
+ break;
+ }
+
+ tgt->irq_cmd_count--;
+}
+
+static fc_port_t *qlt_get_port_database(struct scsi_qla_host *vha,
+ uint16_t loop_id)
+{
+ fc_port_t *fcport;
+ int rc;
+
+ fcport = kzalloc(sizeof(*fcport), GFP_KERNEL);
+ if (!fcport) {
+ ql_dbg(ql_dbg_tgt_mgt, vha, 0xf06f,
+ "qla_target(%d): Allocation of tmp FC port failed",
+ vha->vp_idx);
+ return NULL;
+ }
+
+ fcport->loop_id = loop_id;
+
+ rc = qla2x00_get_port_database(vha, fcport, 0);
+ if (rc != QLA_SUCCESS) {
+ ql_dbg(ql_dbg_tgt_mgt, vha, 0xf070,
+ "qla_target(%d): Failed to retrieve fcport "
+ "information -- get_port_database() returned %x "
+ "(loop_id=0x%04x)", vha->vp_idx, rc, loop_id);
+ kfree(fcport);
+ return NULL;
+ }
+
+ return fcport;
+}
+
+/* Must be called under tgt_mutex */
+static struct qla_tgt_sess *qlt_make_local_sess(struct scsi_qla_host *vha,
+ uint8_t *s_id)
+{
+ struct qla_tgt_sess *sess = NULL;
+ fc_port_t *fcport = NULL;
+ int rc, global_resets;
+ uint16_t loop_id = 0;
+
+retry:
+ global_resets =
+ atomic_read(&vha->vha_tgt.qla_tgt->tgt_global_resets_count);
+
+ rc = qla24xx_get_loop_id(vha, s_id, &loop_id);
+ if (rc != 0) {
+ if ((s_id[0] == 0xFF) &&
+ (s_id[1] == 0xFC)) {
+ /*
+ * This is Domain Controller, so it should be
+ * OK to drop SCSI commands from it.
+ */
+ ql_dbg(ql_dbg_tgt_mgt, vha, 0xf042,
+ "Unable to find initiator with S_ID %x:%x:%x",
+ s_id[0], s_id[1], s_id[2]);
+ } else
+ ql_dbg(ql_dbg_tgt_mgt, vha, 0xf071,
+ "qla_target(%d): Unable to find "
+ "initiator with S_ID %x:%x:%x",
+ vha->vp_idx, s_id[0], s_id[1],
+ s_id[2]);
+ return NULL;
+ }
+
+ fcport = qlt_get_port_database(vha, loop_id);
+ if (!fcport)
+ return NULL;
+
+ if (global_resets !=
+ atomic_read(&vha->vha_tgt.qla_tgt->tgt_global_resets_count)) {
+ ql_dbg(ql_dbg_tgt_mgt, vha, 0xf043,
+ "qla_target(%d): global reset during session discovery "
+ "(counter was %d, new %d), retrying", vha->vp_idx,
+ global_resets,
+ atomic_read(&vha->vha_tgt.
+ qla_tgt->tgt_global_resets_count));
+ goto retry;
+ }
+
+ sess = qlt_create_sess(vha, fcport, true);
+
+ kfree(fcport);
+ return sess;
+}
+
+static void qlt_abort_work(struct qla_tgt *tgt,
+ struct qla_tgt_sess_work_param *prm)
+{
+ struct scsi_qla_host *vha = tgt->vha;
+ struct qla_hw_data *ha = vha->hw;
+ struct qla_tgt_sess *sess = NULL;
+ unsigned long flags;
+ uint32_t be_s_id;
+ uint8_t s_id[3];
+ int rc;
+
+ spin_lock_irqsave(&ha->hardware_lock, flags);
+
+ if (tgt->tgt_stop)
+ goto out_term;
+
+ s_id[0] = prm->abts.fcp_hdr_le.s_id[2];
+ s_id[1] = prm->abts.fcp_hdr_le.s_id[1];
+ s_id[2] = prm->abts.fcp_hdr_le.s_id[0];
+
+ sess = ha->tgt.tgt_ops->find_sess_by_s_id(vha,
+ (unsigned char *)&be_s_id);
+ if (!sess) {
+ spin_unlock_irqrestore(&ha->hardware_lock, flags);
+
+ mutex_lock(&vha->vha_tgt.tgt_mutex);
+ sess = qlt_make_local_sess(vha, s_id);
+ /* sess has got an extra creation ref */
+ mutex_unlock(&vha->vha_tgt.tgt_mutex);
+
+ spin_lock_irqsave(&ha->hardware_lock, flags);
+ if (!sess)
+ goto out_term;
+ } else {
+ kref_get(&sess->se_sess->sess_kref);
+ }
+
+ if (tgt->tgt_stop)
+ goto out_term;
+
+ rc = __qlt_24xx_handle_abts(vha, &prm->abts, sess);
+ if (rc != 0)
+ goto out_term;
+
+ ha->tgt.tgt_ops->put_sess(sess);
+ spin_unlock_irqrestore(&ha->hardware_lock, flags);
+ return;
+
+out_term:
+ qlt_24xx_send_abts_resp(vha, &prm->abts, FCP_TMF_REJECTED, false);
+ if (sess)
+ ha->tgt.tgt_ops->put_sess(sess);
+ spin_unlock_irqrestore(&ha->hardware_lock, flags);
+}
+
+static void qlt_tmr_work(struct qla_tgt *tgt,
+ struct qla_tgt_sess_work_param *prm)
+{
+ struct atio_from_isp *a = &prm->tm_iocb2;
+ struct scsi_qla_host *vha = tgt->vha;
+ struct qla_hw_data *ha = vha->hw;
+ struct qla_tgt_sess *sess = NULL;
+ unsigned long flags;
+ uint8_t *s_id = NULL; /* to hide compiler warnings */
+ int rc;
+ uint32_t lun, unpacked_lun;
+ int lun_size, fn;
+ void *iocb;
+
+ spin_lock_irqsave(&ha->hardware_lock, flags);
+
+ if (tgt->tgt_stop)
+ goto out_term;
+
+ s_id = prm->tm_iocb2.u.isp24.fcp_hdr.s_id;
+ sess = ha->tgt.tgt_ops->find_sess_by_s_id(vha, s_id);
+ if (!sess) {
+ spin_unlock_irqrestore(&ha->hardware_lock, flags);
+
+ mutex_lock(&vha->vha_tgt.tgt_mutex);
+ sess = qlt_make_local_sess(vha, s_id);
+ /* sess has got an extra creation ref */
+ mutex_unlock(&vha->vha_tgt.tgt_mutex);
+
+ spin_lock_irqsave(&ha->hardware_lock, flags);
+ if (!sess)
+ goto out_term;
+ } else {
+ kref_get(&sess->se_sess->sess_kref);
+ }
+
+ iocb = a;
+ lun = a->u.isp24.fcp_cmnd.lun;
+ lun_size = sizeof(lun);
+ fn = a->u.isp24.fcp_cmnd.task_mgmt_flags;
+ unpacked_lun = scsilun_to_int((struct scsi_lun *)&lun);
+
+ rc = qlt_issue_task_mgmt(sess, unpacked_lun, fn, iocb, 0);
+ if (rc != 0)
+ goto out_term;
+
+ ha->tgt.tgt_ops->put_sess(sess);
+ spin_unlock_irqrestore(&ha->hardware_lock, flags);
+ return;
+
+out_term:
+ qlt_send_term_exchange(vha, NULL, &prm->tm_iocb2, 1);
+ if (sess)
+ ha->tgt.tgt_ops->put_sess(sess);
+ spin_unlock_irqrestore(&ha->hardware_lock, flags);
+}
+
+static void qlt_sess_work_fn(struct work_struct *work)
+{
+ struct qla_tgt *tgt = container_of(work, struct qla_tgt, sess_work);
+ struct scsi_qla_host *vha = tgt->vha;
+ unsigned long flags;
+
+ ql_dbg(ql_dbg_tgt_mgt, vha, 0xf000, "Sess work (tgt %p)", tgt);
+
+ spin_lock_irqsave(&tgt->sess_work_lock, flags);
+ while (!list_empty(&tgt->sess_works_list)) {
+ struct qla_tgt_sess_work_param *prm = list_entry(
+ tgt->sess_works_list.next, typeof(*prm),
+ sess_works_list_entry);
+
+ /*
+ * This work can be scheduled on several CPUs at time, so we
+ * must delete the entry to eliminate double processing
+ */
+ list_del(&prm->sess_works_list_entry);
+
+ spin_unlock_irqrestore(&tgt->sess_work_lock, flags);
+
+ switch (prm->type) {
+ case QLA_TGT_SESS_WORK_ABORT:
+ qlt_abort_work(tgt, prm);
+ break;
+ case QLA_TGT_SESS_WORK_TM:
+ qlt_tmr_work(tgt, prm);
+ break;
+ default:
+ BUG_ON(1);
+ break;
+ }
+
+ spin_lock_irqsave(&tgt->sess_work_lock, flags);
+
+ kfree(prm);
+ }
+ spin_unlock_irqrestore(&tgt->sess_work_lock, flags);
+}
+
+/* Must be called under tgt_host_action_mutex */
+int qlt_add_target(struct qla_hw_data *ha, struct scsi_qla_host *base_vha)
+{
+ struct qla_tgt *tgt;
+
+ if (!QLA_TGT_MODE_ENABLED())
+ return 0;
+
+ if (!IS_TGT_MODE_CAPABLE(ha)) {
+ ql_log(ql_log_warn, base_vha, 0xe070,
+ "This adapter does not support target mode.\n");
+ return 0;
+ }
+
+ ql_dbg(ql_dbg_tgt, base_vha, 0xe03b,
+ "Registering target for host %ld(%p).\n", base_vha->host_no, ha);
+
+ BUG_ON(base_vha->vha_tgt.qla_tgt != NULL);
+
+ tgt = kzalloc(sizeof(struct qla_tgt), GFP_KERNEL);
+ if (!tgt) {
+ ql_dbg(ql_dbg_tgt, base_vha, 0xe066,
+ "Unable to allocate struct qla_tgt\n");
+ return -ENOMEM;
+ }
+
+ if (!(base_vha->host->hostt->supported_mode & MODE_TARGET))
+ base_vha->host->hostt->supported_mode |= MODE_TARGET;
+
+ tgt->ha = ha;
+ tgt->vha = base_vha;
+ init_waitqueue_head(&tgt->waitQ);
+ INIT_LIST_HEAD(&tgt->sess_list);
+ INIT_LIST_HEAD(&tgt->del_sess_list);
+ INIT_DELAYED_WORK(&tgt->sess_del_work,
+ (void (*)(struct work_struct *))qlt_del_sess_work_fn);
+ spin_lock_init(&tgt->sess_work_lock);
+ INIT_WORK(&tgt->sess_work, qlt_sess_work_fn);
+ INIT_LIST_HEAD(&tgt->sess_works_list);
+ spin_lock_init(&tgt->srr_lock);
+ INIT_LIST_HEAD(&tgt->srr_ctio_list);
+ INIT_LIST_HEAD(&tgt->srr_imm_list);
+ INIT_WORK(&tgt->srr_work, qlt_handle_srr_work);
+ atomic_set(&tgt->tgt_global_resets_count, 0);
+
+ base_vha->vha_tgt.qla_tgt = tgt;
+
+ ql_dbg(ql_dbg_tgt, base_vha, 0xe067,
+ "qla_target(%d): using 64 Bit PCI addressing",
+ base_vha->vp_idx);
+ tgt->tgt_enable_64bit_addr = 1;
+ /* 3 is reserved */
+ tgt->sg_tablesize = QLA_TGT_MAX_SG_24XX(base_vha->req->length - 3);
+ tgt->datasegs_per_cmd = QLA_TGT_DATASEGS_PER_CMD_24XX;
+ tgt->datasegs_per_cont = QLA_TGT_DATASEGS_PER_CONT_24XX;
+
+ if (base_vha->fc_vport)
+ return 0;
+
+ mutex_lock(&qla_tgt_mutex);
+ list_add_tail(&tgt->tgt_list_entry, &qla_tgt_glist);
+ mutex_unlock(&qla_tgt_mutex);
+
+ return 0;
+}
+
+/* Must be called under tgt_host_action_mutex */
+int qlt_remove_target(struct qla_hw_data *ha, struct scsi_qla_host *vha)
+{
+ if (!vha->vha_tgt.qla_tgt)
+ return 0;
+
+ if (vha->fc_vport) {
+ qlt_release(vha->vha_tgt.qla_tgt);
+ return 0;
+ }
+
+ /* free left over qfull cmds */
+ qlt_init_term_exchange(vha);
+
+ mutex_lock(&qla_tgt_mutex);
+ list_del(&vha->vha_tgt.qla_tgt->tgt_list_entry);
+ mutex_unlock(&qla_tgt_mutex);
+
+ ql_dbg(ql_dbg_tgt, vha, 0xe03c, "Unregistering target for host %ld(%p)",
+ vha->host_no, ha);
+ qlt_release(vha->vha_tgt.qla_tgt);
+
+ return 0;
+}
+
+static void qlt_lport_dump(struct scsi_qla_host *vha, u64 wwpn,
+ unsigned char *b)
+{
+ int i;
+
+ pr_debug("qla2xxx HW vha->node_name: ");
+ for (i = 0; i < WWN_SIZE; i++)
+ pr_debug("%02x ", vha->node_name[i]);
+ pr_debug("\n");
+ pr_debug("qla2xxx HW vha->port_name: ");
+ for (i = 0; i < WWN_SIZE; i++)
+ pr_debug("%02x ", vha->port_name[i]);
+ pr_debug("\n");
+
+ pr_debug("qla2xxx passed configfs WWPN: ");
+ put_unaligned_be64(wwpn, b);
+ for (i = 0; i < WWN_SIZE; i++)
+ pr_debug("%02x ", b[i]);
+ pr_debug("\n");
+}
+
+/**
+ * qla_tgt_lport_register - register lport with external module
+ *
+ * @qla_tgt_ops: Pointer for tcm_qla2xxx qla_tgt_ops
+ * @wwpn: Passwd FC target WWPN
+ * @callback: lport initialization callback for tcm_qla2xxx code
+ * @target_lport_ptr: pointer for tcm_qla2xxx specific lport data
+ */
+int qlt_lport_register(void *target_lport_ptr, u64 phys_wwpn,
+ u64 npiv_wwpn, u64 npiv_wwnn,
+ int (*callback)(struct scsi_qla_host *, void *, u64, u64))
+{
+ struct qla_tgt *tgt;
+ struct scsi_qla_host *vha;
+ struct qla_hw_data *ha;
+ struct Scsi_Host *host;
+ unsigned long flags;
+ int rc;
+ u8 b[WWN_SIZE];
+
+ mutex_lock(&qla_tgt_mutex);
+ list_for_each_entry(tgt, &qla_tgt_glist, tgt_list_entry) {
+ vha = tgt->vha;
+ ha = vha->hw;
+
+ host = vha->host;
+ if (!host)
+ continue;
+
+ if (!(host->hostt->supported_mode & MODE_TARGET))
+ continue;
+
+ spin_lock_irqsave(&ha->hardware_lock, flags);
+ if ((!npiv_wwpn || !npiv_wwnn) && host->active_mode & MODE_TARGET) {
+ pr_debug("MODE_TARGET already active on qla2xxx(%d)\n",
+ host->host_no);
+ spin_unlock_irqrestore(&ha->hardware_lock, flags);
+ continue;
+ }
+ if (tgt->tgt_stop) {
+ pr_debug("MODE_TARGET in shutdown on qla2xxx(%d)\n",
+ host->host_no);
+ spin_unlock_irqrestore(&ha->hardware_lock, flags);
+ continue;
+ }
+ spin_unlock_irqrestore(&ha->hardware_lock, flags);
+
+ if (!scsi_host_get(host)) {
+ ql_dbg(ql_dbg_tgt, vha, 0xe068,
+ "Unable to scsi_host_get() for"
+ " qla2xxx scsi_host\n");
+ continue;
+ }
+ qlt_lport_dump(vha, phys_wwpn, b);
+
+ if (memcmp(vha->port_name, b, WWN_SIZE)) {
+ scsi_host_put(host);
+ continue;
+ }
+ rc = (*callback)(vha, target_lport_ptr, npiv_wwpn, npiv_wwnn);
+ if (rc != 0)
+ scsi_host_put(host);
+
+ mutex_unlock(&qla_tgt_mutex);
+ return rc;
+ }
+ mutex_unlock(&qla_tgt_mutex);
+
+ return -ENODEV;
+}
+EXPORT_SYMBOL(qlt_lport_register);
+
+/**
+ * qla_tgt_lport_deregister - Degister lport
+ *
+ * @vha: Registered scsi_qla_host pointer
+ */
+void qlt_lport_deregister(struct scsi_qla_host *vha)
+{
+ struct qla_hw_data *ha = vha->hw;
+ struct Scsi_Host *sh = vha->host;
+ /*
+ * Clear the target_lport_ptr qla_target_template pointer in qla_hw_data
+ */
+ vha->vha_tgt.target_lport_ptr = NULL;
+ ha->tgt.tgt_ops = NULL;
+ /*
+ * Release the Scsi_Host reference for the underlying qla2xxx host
+ */
+ scsi_host_put(sh);
+}
+EXPORT_SYMBOL(qlt_lport_deregister);
+
+/* Must be called under HW lock */
+static void qlt_set_mode(struct scsi_qla_host *vha)
+{
+ struct qla_hw_data *ha = vha->hw;
+
+ switch (ql2x_ini_mode) {
+ case QLA2XXX_INI_MODE_DISABLED:
+ case QLA2XXX_INI_MODE_EXCLUSIVE:
+ vha->host->active_mode = MODE_TARGET;
+ break;
+ case QLA2XXX_INI_MODE_ENABLED:
+ vha->host->active_mode |= MODE_TARGET;
+ break;
+ default:
+ break;
+ }
+
+ if (ha->tgt.ini_mode_force_reverse)
+ qla_reverse_ini_mode(vha);
+}
+
+/* Must be called under HW lock */
+static void qlt_clear_mode(struct scsi_qla_host *vha)
+{
+ struct qla_hw_data *ha = vha->hw;
+
+ switch (ql2x_ini_mode) {
+ case QLA2XXX_INI_MODE_DISABLED:
+ vha->host->active_mode = MODE_UNKNOWN;
+ break;
+ case QLA2XXX_INI_MODE_EXCLUSIVE:
+ vha->host->active_mode = MODE_INITIATOR;
+ break;
+ case QLA2XXX_INI_MODE_ENABLED:
+ vha->host->active_mode &= ~MODE_TARGET;
+ break;
+ default:
+ break;
+ }
+
+ if (ha->tgt.ini_mode_force_reverse)
+ qla_reverse_ini_mode(vha);
+}
+
+/*
+ * qla_tgt_enable_vha - NO LOCK HELD
+ *
+ * host_reset, bring up w/ Target Mode Enabled
+ */
+void
+qlt_enable_vha(struct scsi_qla_host *vha)
+{
+ struct qla_hw_data *ha = vha->hw;
+ struct qla_tgt *tgt = vha->vha_tgt.qla_tgt;
+ unsigned long flags;
+ scsi_qla_host_t *base_vha = pci_get_drvdata(ha->pdev);
+
+ if (!tgt) {
+ ql_dbg(ql_dbg_tgt, vha, 0xe069,
+ "Unable to locate qla_tgt pointer from"
+ " struct qla_hw_data\n");
+ dump_stack();
+ return;
+ }
+
+ spin_lock_irqsave(&ha->hardware_lock, flags);
+ tgt->tgt_stopped = 0;
+ qlt_set_mode(vha);
+ spin_unlock_irqrestore(&ha->hardware_lock, flags);
+
+ if (vha->vp_idx) {
+ qla24xx_disable_vp(vha);
+ qla24xx_enable_vp(vha);
+ } else {
+ set_bit(ISP_ABORT_NEEDED, &base_vha->dpc_flags);
+ qla2xxx_wake_dpc(base_vha);
+ qla2x00_wait_for_hba_online(base_vha);
+ }
+}
+EXPORT_SYMBOL(qlt_enable_vha);
+
+/*
+ * qla_tgt_disable_vha - NO LOCK HELD
+ *
+ * Disable Target Mode and reset the adapter
+ */
+static void qlt_disable_vha(struct scsi_qla_host *vha)
+{
+ struct qla_hw_data *ha = vha->hw;
+ struct qla_tgt *tgt = vha->vha_tgt.qla_tgt;
+ unsigned long flags;
+
+ if (!tgt) {
+ ql_dbg(ql_dbg_tgt, vha, 0xe06a,
+ "Unable to locate qla_tgt pointer from"
+ " struct qla_hw_data\n");
+ dump_stack();
+ return;
+ }
+
+ spin_lock_irqsave(&ha->hardware_lock, flags);
+ qlt_clear_mode(vha);
+ spin_unlock_irqrestore(&ha->hardware_lock, flags);
+
+ set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
+ qla2xxx_wake_dpc(vha);
+ qla2x00_wait_for_hba_online(vha);
+}
+
+/*
+ * Called from qla_init.c:qla24xx_vport_create() contex to setup
+ * the target mode specific struct scsi_qla_host and struct qla_hw_data
+ * members.
+ */
+void
+qlt_vport_create(struct scsi_qla_host *vha, struct qla_hw_data *ha)
+{
+ if (!qla_tgt_mode_enabled(vha))
+ return;
+
+ vha->vha_tgt.qla_tgt = NULL;
+
+ mutex_init(&vha->vha_tgt.tgt_mutex);
+ mutex_init(&vha->vha_tgt.tgt_host_action_mutex);
+
+ qlt_clear_mode(vha);
+
+ /*
+ * NOTE: Currently the value is kept the same for <24xx and
+ * >=24xx ISPs. If it is necessary to change it,
+ * the check should be added for specific ISPs,
+ * assigning the value appropriately.
+ */
+ ha->tgt.atio_q_length = ATIO_ENTRY_CNT_24XX;
+
+ qlt_add_target(ha, vha);
+}
+
+void
+qlt_rff_id(struct scsi_qla_host *vha, struct ct_sns_req *ct_req)
+{
+ /*
+ * FC-4 Feature bit 0 indicates target functionality to the name server.
+ */
+ if (qla_tgt_mode_enabled(vha)) {
+ if (qla_ini_mode_enabled(vha))
+ ct_req->req.rff_id.fc4_feature = BIT_0 | BIT_1;
+ else
+ ct_req->req.rff_id.fc4_feature = BIT_0;
+ } else if (qla_ini_mode_enabled(vha)) {
+ ct_req->req.rff_id.fc4_feature = BIT_1;
+ }
+}
+
+/*
+ * qlt_init_atio_q_entries() - Initializes ATIO queue entries.
+ * @ha: HA context
+ *
+ * Beginning of ATIO ring has initialization control block already built
+ * by nvram config routine.
+ *
+ * Returns 0 on success.
+ */
+void
+qlt_init_atio_q_entries(struct scsi_qla_host *vha)
+{
+ struct qla_hw_data *ha = vha->hw;
+ uint16_t cnt;
+ struct atio_from_isp *pkt = (struct atio_from_isp *)ha->tgt.atio_ring;
+
+ if (!qla_tgt_mode_enabled(vha))
+ return;
+
+ for (cnt = 0; cnt < ha->tgt.atio_q_length; cnt++) {
+ pkt->u.raw.signature = ATIO_PROCESSED;
+ pkt++;
+ }
+
+}
+
+/*
+ * qlt_24xx_process_atio_queue() - Process ATIO queue entries.
+ * @ha: SCSI driver HA context
+ */
+void
+qlt_24xx_process_atio_queue(struct scsi_qla_host *vha)
+{
+ struct qla_hw_data *ha = vha->hw;
+ struct atio_from_isp *pkt;
+ int cnt, i;
+
+ if (!vha->flags.online)
+ return;
+
+ while (ha->tgt.atio_ring_ptr->signature != ATIO_PROCESSED) {
+ pkt = (struct atio_from_isp *)ha->tgt.atio_ring_ptr;
+ cnt = pkt->u.raw.entry_count;
+
+ qlt_24xx_atio_pkt_all_vps(vha, (struct atio_from_isp *)pkt);
+
+ for (i = 0; i < cnt; i++) {
+ ha->tgt.atio_ring_index++;
+ if (ha->tgt.atio_ring_index == ha->tgt.atio_q_length) {
+ ha->tgt.atio_ring_index = 0;
+ ha->tgt.atio_ring_ptr = ha->tgt.atio_ring;
+ } else
+ ha->tgt.atio_ring_ptr++;
+
+ pkt->u.raw.signature = ATIO_PROCESSED;
+ pkt = (struct atio_from_isp *)ha->tgt.atio_ring_ptr;
+ }
+ wmb();
+ }
+
+ /* Adjust ring index */
+ WRT_REG_DWORD(ISP_ATIO_Q_OUT(vha), ha->tgt.atio_ring_index);
+}
+
+void
+qlt_24xx_config_rings(struct scsi_qla_host *vha)
+{
+ struct qla_hw_data *ha = vha->hw;
+ if (!QLA_TGT_MODE_ENABLED())
+ return;
+
+ WRT_REG_DWORD(ISP_ATIO_Q_IN(vha), 0);
+ WRT_REG_DWORD(ISP_ATIO_Q_OUT(vha), 0);
+ RD_REG_DWORD(ISP_ATIO_Q_OUT(vha));
+
+ if (IS_ATIO_MSIX_CAPABLE(ha)) {
+ struct qla_msix_entry *msix = &ha->msix_entries[2];
+ struct init_cb_24xx *icb = (struct init_cb_24xx *)ha->init_cb;
+
+ icb->msix_atio = cpu_to_le16(msix->entry);
+ ql_dbg(ql_dbg_init, vha, 0xf072,
+ "Registering ICB vector 0x%x for atio que.\n",
+ msix->entry);
+ }
+}
+
+void
+qlt_24xx_config_nvram_stage1(struct scsi_qla_host *vha, struct nvram_24xx *nv)
+{
+ struct qla_hw_data *ha = vha->hw;
+
+ if (qla_tgt_mode_enabled(vha)) {
+ if (!ha->tgt.saved_set) {
+ /* We save only once */
+ ha->tgt.saved_exchange_count = nv->exchange_count;
+ ha->tgt.saved_firmware_options_1 =
+ nv->firmware_options_1;
+ ha->tgt.saved_firmware_options_2 =
+ nv->firmware_options_2;
+ ha->tgt.saved_firmware_options_3 =
+ nv->firmware_options_3;
+ ha->tgt.saved_set = 1;
+ }
+
+ nv->exchange_count = __constant_cpu_to_le16(0xFFFF);
+
+ /* Enable target mode */
+ nv->firmware_options_1 |= __constant_cpu_to_le32(BIT_4);
+
+ /* Disable ini mode, if requested */
+ if (!qla_ini_mode_enabled(vha))
+ nv->firmware_options_1 |= __constant_cpu_to_le32(BIT_5);
+
+ /* Disable Full Login after LIP */
+ nv->firmware_options_1 &= __constant_cpu_to_le32(~BIT_13);
+ /* Enable initial LIP */
+ nv->firmware_options_1 &= __constant_cpu_to_le32(~BIT_9);
+ if (ql2xtgt_tape_enable)
+ /* Enable FC Tape support */
+ nv->firmware_options_2 |= cpu_to_le32(BIT_12);
+ else
+ /* Disable FC Tape support */
+ nv->firmware_options_2 &= cpu_to_le32(~BIT_12);
+
+ /* Disable Full Login after LIP */
+ nv->host_p &= __constant_cpu_to_le32(~BIT_10);
+ /* Enable target PRLI control */
+ nv->firmware_options_2 |= __constant_cpu_to_le32(BIT_14);
+ } else {
+ if (ha->tgt.saved_set) {
+ nv->exchange_count = ha->tgt.saved_exchange_count;
+ nv->firmware_options_1 =
+ ha->tgt.saved_firmware_options_1;
+ nv->firmware_options_2 =
+ ha->tgt.saved_firmware_options_2;
+ nv->firmware_options_3 =
+ ha->tgt.saved_firmware_options_3;
+ }
+ return;
+ }
+
+ /* out-of-order frames reassembly */
+ nv->firmware_options_3 |= BIT_6|BIT_9;
+
+ if (ha->tgt.enable_class_2) {
+ if (vha->flags.init_done)
+ fc_host_supported_classes(vha->host) =
+ FC_COS_CLASS2 | FC_COS_CLASS3;
+
+ nv->firmware_options_2 |= __constant_cpu_to_le32(BIT_8);
+ } else {
+ if (vha->flags.init_done)
+ fc_host_supported_classes(vha->host) = FC_COS_CLASS3;
+
+ nv->firmware_options_2 &= ~__constant_cpu_to_le32(BIT_8);
+ }
+}
+
+void
+qlt_24xx_config_nvram_stage2(struct scsi_qla_host *vha,
+ struct init_cb_24xx *icb)
+{
+ struct qla_hw_data *ha = vha->hw;
+
+ if (ha->tgt.node_name_set) {
+ memcpy(icb->node_name, ha->tgt.tgt_node_name, WWN_SIZE);
+ icb->firmware_options_1 |= __constant_cpu_to_le32(BIT_14);
+ }
+}
+
+void
+qlt_81xx_config_nvram_stage1(struct scsi_qla_host *vha, struct nvram_81xx *nv)
+{
+ struct qla_hw_data *ha = vha->hw;
+
+ if (!QLA_TGT_MODE_ENABLED())
+ return;
+
+ if (qla_tgt_mode_enabled(vha)) {
+ if (!ha->tgt.saved_set) {
+ /* We save only once */
+ ha->tgt.saved_exchange_count = nv->exchange_count;
+ ha->tgt.saved_firmware_options_1 =
+ nv->firmware_options_1;
+ ha->tgt.saved_firmware_options_2 =
+ nv->firmware_options_2;
+ ha->tgt.saved_firmware_options_3 =
+ nv->firmware_options_3;
+ ha->tgt.saved_set = 1;
+ }
+
+ nv->exchange_count = __constant_cpu_to_le16(0xFFFF);
+
+ /* Enable target mode */
+ nv->firmware_options_1 |= __constant_cpu_to_le32(BIT_4);
+
+ /* Disable ini mode, if requested */
+ if (!qla_ini_mode_enabled(vha))
+ nv->firmware_options_1 |=
+ __constant_cpu_to_le32(BIT_5);
+
+ /* Disable Full Login after LIP */
+ nv->firmware_options_1 &= __constant_cpu_to_le32(~BIT_13);
+ /* Enable initial LIP */
+ nv->firmware_options_1 &= __constant_cpu_to_le32(~BIT_9);
+ if (ql2xtgt_tape_enable)
+ /* Enable FC tape support */
+ nv->firmware_options_2 |= cpu_to_le32(BIT_12);
+ else
+ /* Disable FC tape support */
+ nv->firmware_options_2 &= cpu_to_le32(~BIT_12);
+
+ /* Disable Full Login after LIP */
+ nv->host_p &= __constant_cpu_to_le32(~BIT_10);
+ /* Enable target PRLI control */
+ nv->firmware_options_2 |= __constant_cpu_to_le32(BIT_14);
+ } else {
+ if (ha->tgt.saved_set) {
+ nv->exchange_count = ha->tgt.saved_exchange_count;
+ nv->firmware_options_1 =
+ ha->tgt.saved_firmware_options_1;
+ nv->firmware_options_2 =
+ ha->tgt.saved_firmware_options_2;
+ nv->firmware_options_3 =
+ ha->tgt.saved_firmware_options_3;
+ }
+ return;
+ }
+
+ /* out-of-order frames reassembly */
+ nv->firmware_options_3 |= BIT_6|BIT_9;
+
+ if (ha->tgt.enable_class_2) {
+ if (vha->flags.init_done)
+ fc_host_supported_classes(vha->host) =
+ FC_COS_CLASS2 | FC_COS_CLASS3;
+
+ nv->firmware_options_2 |= __constant_cpu_to_le32(BIT_8);
+ } else {
+ if (vha->flags.init_done)
+ fc_host_supported_classes(vha->host) = FC_COS_CLASS3;
+
+ nv->firmware_options_2 &= ~__constant_cpu_to_le32(BIT_8);
+ }
+}
+
+void
+qlt_81xx_config_nvram_stage2(struct scsi_qla_host *vha,
+ struct init_cb_81xx *icb)
+{
+ struct qla_hw_data *ha = vha->hw;
+
+ if (!QLA_TGT_MODE_ENABLED())
+ return;
+
+ if (ha->tgt.node_name_set) {
+ memcpy(icb->node_name, ha->tgt.tgt_node_name, WWN_SIZE);
+ icb->firmware_options_1 |= __constant_cpu_to_le32(BIT_14);
+ }
+}
+
+void
+qlt_83xx_iospace_config(struct qla_hw_data *ha)
+{
+ if (!QLA_TGT_MODE_ENABLED())
+ return;
+
+ ha->msix_count += 1; /* For ATIO Q */
+}
+
+int
+qlt_24xx_process_response_error(struct scsi_qla_host *vha,
+ struct sts_entry_24xx *pkt)
+{
+ switch (pkt->entry_type) {
+ case ABTS_RECV_24XX:
+ case ABTS_RESP_24XX:
+ case CTIO_TYPE7:
+ case NOTIFY_ACK_TYPE:
+ case CTIO_CRC2:
+ return 1;
+ default:
+ return 0;
+ }
+}
+
+void
+qlt_modify_vp_config(struct scsi_qla_host *vha,
+ struct vp_config_entry_24xx *vpmod)
+{
+ if (qla_tgt_mode_enabled(vha))
+ vpmod->options_idx1 &= ~BIT_5;
+ /* Disable ini mode, if requested */
+ if (!qla_ini_mode_enabled(vha))
+ vpmod->options_idx1 &= ~BIT_4;
+}
+
+void
+qlt_probe_one_stage1(struct scsi_qla_host *base_vha, struct qla_hw_data *ha)
+{
+ if (!QLA_TGT_MODE_ENABLED())
+ return;
+
+ if (ha->mqenable || IS_QLA83XX(ha)) {
+ ISP_ATIO_Q_IN(base_vha) = &ha->mqiobase->isp25mq.atio_q_in;
+ ISP_ATIO_Q_OUT(base_vha) = &ha->mqiobase->isp25mq.atio_q_out;
+ } else {
+ ISP_ATIO_Q_IN(base_vha) = &ha->iobase->isp24.atio_q_in;
+ ISP_ATIO_Q_OUT(base_vha) = &ha->iobase->isp24.atio_q_out;
+ }
+
+ mutex_init(&base_vha->vha_tgt.tgt_mutex);
+ mutex_init(&base_vha->vha_tgt.tgt_host_action_mutex);
+ qlt_clear_mode(base_vha);
+}
+
+irqreturn_t
+qla83xx_msix_atio_q(int irq, void *dev_id)
+{
+ struct rsp_que *rsp;
+ scsi_qla_host_t *vha;
+ struct qla_hw_data *ha;
+ unsigned long flags;
+
+ rsp = (struct rsp_que *) dev_id;
+ ha = rsp->hw;
+ vha = pci_get_drvdata(ha->pdev);
+
+ spin_lock_irqsave(&ha->hardware_lock, flags);
+
+ qlt_24xx_process_atio_queue(vha);
+ qla24xx_process_response_queue(vha, rsp);
+
+ spin_unlock_irqrestore(&ha->hardware_lock, flags);
+
+ return IRQ_HANDLED;
+}
+
+int
+qlt_mem_alloc(struct qla_hw_data *ha)
+{
+ if (!QLA_TGT_MODE_ENABLED())
+ return 0;
+
+ ha->tgt.tgt_vp_map = kzalloc(sizeof(struct qla_tgt_vp_map) *
+ MAX_MULTI_ID_FABRIC, GFP_KERNEL);
+ if (!ha->tgt.tgt_vp_map)
+ return -ENOMEM;
+
+ ha->tgt.atio_ring = dma_alloc_coherent(&ha->pdev->dev,
+ (ha->tgt.atio_q_length + 1) * sizeof(struct atio_from_isp),
+ &ha->tgt.atio_dma, GFP_KERNEL);
+ if (!ha->tgt.atio_ring) {
+ kfree(ha->tgt.tgt_vp_map);
+ return -ENOMEM;
+ }
+ return 0;
+}
+
+void
+qlt_mem_free(struct qla_hw_data *ha)
+{
+ if (!QLA_TGT_MODE_ENABLED())
+ return;
+
+ if (ha->tgt.atio_ring) {
+ dma_free_coherent(&ha->pdev->dev, (ha->tgt.atio_q_length + 1) *
+ sizeof(struct atio_from_isp), ha->tgt.atio_ring,
+ ha->tgt.atio_dma);
+ }
+ kfree(ha->tgt.tgt_vp_map);
+}
+
+/* vport_slock to be held by the caller */
+void
+qlt_update_vp_map(struct scsi_qla_host *vha, int cmd)
+{
+ if (!QLA_TGT_MODE_ENABLED())
+ return;
+
+ switch (cmd) {
+ case SET_VP_IDX:
+ vha->hw->tgt.tgt_vp_map[vha->vp_idx].vha = vha;
+ break;
+ case SET_AL_PA:
+ vha->hw->tgt.tgt_vp_map[vha->d_id.b.al_pa].idx = vha->vp_idx;
+ break;
+ case RESET_VP_IDX:
+ vha->hw->tgt.tgt_vp_map[vha->vp_idx].vha = NULL;
+ break;
+ case RESET_AL_PA:
+ vha->hw->tgt.tgt_vp_map[vha->d_id.b.al_pa].idx = 0;
+ break;
+ }
+}
+
+static int __init qlt_parse_ini_mode(void)
+{
+ if (strcasecmp(qlini_mode, QLA2XXX_INI_MODE_STR_EXCLUSIVE) == 0)
+ ql2x_ini_mode = QLA2XXX_INI_MODE_EXCLUSIVE;
+ else if (strcasecmp(qlini_mode, QLA2XXX_INI_MODE_STR_DISABLED) == 0)
+ ql2x_ini_mode = QLA2XXX_INI_MODE_DISABLED;
+ else if (strcasecmp(qlini_mode, QLA2XXX_INI_MODE_STR_ENABLED) == 0)
+ ql2x_ini_mode = QLA2XXX_INI_MODE_ENABLED;
+ else
+ return false;
+
+ return true;
+}
+
+int __init qlt_init(void)
+{
+ int ret;
+
+ if (!qlt_parse_ini_mode()) {
+ ql_log(ql_log_fatal, NULL, 0xe06b,
+ "qlt_parse_ini_mode() failed\n");
+ return -EINVAL;
+ }
+
+ if (!QLA_TGT_MODE_ENABLED())
+ return 0;
+
+ qla_tgt_mgmt_cmd_cachep = kmem_cache_create("qla_tgt_mgmt_cmd_cachep",
+ sizeof(struct qla_tgt_mgmt_cmd), __alignof__(struct
+ qla_tgt_mgmt_cmd), 0, NULL);
+ if (!qla_tgt_mgmt_cmd_cachep) {
+ ql_log(ql_log_fatal, NULL, 0xe06d,
+ "kmem_cache_create for qla_tgt_mgmt_cmd_cachep failed\n");
+ return -ENOMEM;
+ }
+
+ qla_tgt_mgmt_cmd_mempool = mempool_create(25, mempool_alloc_slab,
+ mempool_free_slab, qla_tgt_mgmt_cmd_cachep);
+ if (!qla_tgt_mgmt_cmd_mempool) {
+ ql_log(ql_log_fatal, NULL, 0xe06e,
+ "mempool_create for qla_tgt_mgmt_cmd_mempool failed\n");
+ ret = -ENOMEM;
+ goto out_mgmt_cmd_cachep;
+ }
+
+ qla_tgt_wq = alloc_workqueue("qla_tgt_wq", 0, 0);
+ if (!qla_tgt_wq) {
+ ql_log(ql_log_fatal, NULL, 0xe06f,
+ "alloc_workqueue for qla_tgt_wq failed\n");
+ ret = -ENOMEM;
+ goto out_cmd_mempool;
+ }
+ /*
+ * Return 1 to signal that initiator-mode is being disabled
+ */
+ return (ql2x_ini_mode == QLA2XXX_INI_MODE_DISABLED) ? 1 : 0;
+
+out_cmd_mempool:
+ mempool_destroy(qla_tgt_mgmt_cmd_mempool);
+out_mgmt_cmd_cachep:
+ kmem_cache_destroy(qla_tgt_mgmt_cmd_cachep);
+ return ret;
+}
+
+void qlt_exit(void)
+{
+ if (!QLA_TGT_MODE_ENABLED())
+ return;
+
+ destroy_workqueue(qla_tgt_wq);
+ mempool_destroy(qla_tgt_mgmt_cmd_mempool);
+ kmem_cache_destroy(qla_tgt_mgmt_cmd_cachep);
+}
diff --git a/drivers/scsi/qla2xxx/qla_target.h b/drivers/scsi/qla2xxx/qla_target.h
new file mode 100644
index 000000000..332086776
--- /dev/null
+++ b/drivers/scsi/qla2xxx/qla_target.h
@@ -0,0 +1,1114 @@
+/*
+ * Copyright (C) 2004 - 2010 Vladislav Bolkhovitin <vst@vlnb.net>
+ * Copyright (C) 2004 - 2005 Leonid Stoljar
+ * Copyright (C) 2006 Nathaniel Clark <nate@misrule.us>
+ * Copyright (C) 2007 - 2010 ID7 Ltd.
+ *
+ * Forward port and refactoring to modern qla2xxx and target/configfs
+ *
+ * Copyright (C) 2010-2011 Nicholas A. Bellinger <nab@kernel.org>
+ *
+ * Additional file for the target driver support.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+/*
+ * This is the global def file that is useful for including from the
+ * target portion.
+ */
+
+#ifndef __QLA_TARGET_H
+#define __QLA_TARGET_H
+
+#include "qla_def.h"
+
+/*
+ * Must be changed on any change in any initiator visible interfaces or
+ * data in the target add-on
+ */
+#define QLA2XXX_TARGET_MAGIC 269
+
+/*
+ * Must be changed on any change in any target visible interfaces or
+ * data in the initiator
+ */
+#define QLA2XXX_INITIATOR_MAGIC 57222
+
+#define QLA2XXX_INI_MODE_STR_EXCLUSIVE "exclusive"
+#define QLA2XXX_INI_MODE_STR_DISABLED "disabled"
+#define QLA2XXX_INI_MODE_STR_ENABLED "enabled"
+
+#define QLA2XXX_INI_MODE_EXCLUSIVE 0
+#define QLA2XXX_INI_MODE_DISABLED 1
+#define QLA2XXX_INI_MODE_ENABLED 2
+
+#define QLA2XXX_COMMAND_COUNT_INIT 250
+#define QLA2XXX_IMMED_NOTIFY_COUNT_INIT 250
+
+/*
+ * Used to mark which completion handles (for RIO Status's) are for CTIO's
+ * vs. regular (non-target) info. This is checked for in
+ * qla2x00_process_response_queue() to see if a handle coming back in a
+ * multi-complete should come to the tgt driver or be handled there by qla2xxx
+ */
+#define CTIO_COMPLETION_HANDLE_MARK BIT_29
+#if (CTIO_COMPLETION_HANDLE_MARK <= DEFAULT_OUTSTANDING_COMMANDS)
+#error "CTIO_COMPLETION_HANDLE_MARK not larger than "
+ "DEFAULT_OUTSTANDING_COMMANDS"
+#endif
+#define HANDLE_IS_CTIO_COMP(h) (h & CTIO_COMPLETION_HANDLE_MARK)
+
+/* Used to mark CTIO as intermediate */
+#define CTIO_INTERMEDIATE_HANDLE_MARK BIT_30
+
+#ifndef OF_SS_MODE_0
+/*
+ * ISP target entries - Flags bit definitions.
+ */
+#define OF_SS_MODE_0 0
+#define OF_SS_MODE_1 1
+#define OF_SS_MODE_2 2
+#define OF_SS_MODE_3 3
+
+#define OF_EXPL_CONF BIT_5 /* Explicit Confirmation Requested */
+#define OF_DATA_IN BIT_6 /* Data in to initiator */
+ /* (data from target to initiator) */
+#define OF_DATA_OUT BIT_7 /* Data out from initiator */
+ /* (data from initiator to target) */
+#define OF_NO_DATA (BIT_7 | BIT_6)
+#define OF_INC_RC BIT_8 /* Increment command resource count */
+#define OF_FAST_POST BIT_9 /* Enable mailbox fast posting. */
+#define OF_CONF_REQ BIT_13 /* Confirmation Requested */
+#define OF_TERM_EXCH BIT_14 /* Terminate exchange */
+#define OF_SSTS BIT_15 /* Send SCSI status */
+#endif
+
+#ifndef QLA_TGT_DATASEGS_PER_CMD32
+#define QLA_TGT_DATASEGS_PER_CMD32 3
+#define QLA_TGT_DATASEGS_PER_CONT32 7
+#define QLA_TGT_MAX_SG32(ql) \
+ (((ql) > 0) ? (QLA_TGT_DATASEGS_PER_CMD32 + \
+ QLA_TGT_DATASEGS_PER_CONT32*((ql) - 1)) : 0)
+
+#define QLA_TGT_DATASEGS_PER_CMD64 2
+#define QLA_TGT_DATASEGS_PER_CONT64 5
+#define QLA_TGT_MAX_SG64(ql) \
+ (((ql) > 0) ? (QLA_TGT_DATASEGS_PER_CMD64 + \
+ QLA_TGT_DATASEGS_PER_CONT64*((ql) - 1)) : 0)
+#endif
+
+#ifndef QLA_TGT_DATASEGS_PER_CMD_24XX
+#define QLA_TGT_DATASEGS_PER_CMD_24XX 1
+#define QLA_TGT_DATASEGS_PER_CONT_24XX 5
+#define QLA_TGT_MAX_SG_24XX(ql) \
+ (min(1270, ((ql) > 0) ? (QLA_TGT_DATASEGS_PER_CMD_24XX + \
+ QLA_TGT_DATASEGS_PER_CONT_24XX*((ql) - 1)) : 0))
+#endif
+#endif
+
+#define GET_TARGET_ID(ha, iocb) ((HAS_EXTENDED_IDS(ha)) \
+ ? le16_to_cpu((iocb)->u.isp2x.target.extended) \
+ : (uint16_t)(iocb)->u.isp2x.target.id.standard)
+
+#ifndef IMMED_NOTIFY_TYPE
+#define IMMED_NOTIFY_TYPE 0x0D /* Immediate notify entry. */
+/*
+ * ISP queue - immediate notify entry structure definition.
+ * This is sent by the ISP to the Target driver.
+ * This IOCB would have report of events sent by the
+ * initiator, that needs to be handled by the target
+ * driver immediately.
+ */
+struct imm_ntfy_from_isp {
+ uint8_t entry_type; /* Entry type. */
+ uint8_t entry_count; /* Entry count. */
+ uint8_t sys_define; /* System defined. */
+ uint8_t entry_status; /* Entry Status. */
+ union {
+ struct {
+ uint32_t sys_define_2; /* System defined. */
+ target_id_t target;
+ uint16_t lun;
+ uint8_t target_id;
+ uint8_t reserved_1;
+ uint16_t status_modifier;
+ uint16_t status;
+ uint16_t task_flags;
+ uint16_t seq_id;
+ uint16_t srr_rx_id;
+ uint32_t srr_rel_offs;
+ uint16_t srr_ui;
+#define SRR_IU_DATA_IN 0x1
+#define SRR_IU_DATA_OUT 0x5
+#define SRR_IU_STATUS 0x7
+ uint16_t srr_ox_id;
+ uint8_t reserved_2[28];
+ } isp2x;
+ struct {
+ uint32_t reserved;
+ uint16_t nport_handle;
+ uint16_t reserved_2;
+ uint16_t flags;
+#define NOTIFY24XX_FLAGS_GLOBAL_TPRLO BIT_1
+#define NOTIFY24XX_FLAGS_PUREX_IOCB BIT_0
+ uint16_t srr_rx_id;
+ uint16_t status;
+ uint8_t status_subcode;
+ uint8_t fw_handle;
+ uint32_t exchange_address;
+ uint32_t srr_rel_offs;
+ uint16_t srr_ui;
+ uint16_t srr_ox_id;
+ uint8_t reserved_4[19];
+ uint8_t vp_index;
+ uint32_t reserved_5;
+ uint8_t port_id[3];
+ uint8_t reserved_6;
+ } isp24;
+ } u;
+ uint16_t reserved_7;
+ uint16_t ox_id;
+} __packed;
+#endif
+
+#ifndef NOTIFY_ACK_TYPE
+#define NOTIFY_ACK_TYPE 0x0E /* Notify acknowledge entry. */
+/*
+ * ISP queue - notify acknowledge entry structure definition.
+ * This is sent to the ISP from the target driver.
+ */
+struct nack_to_isp {
+ uint8_t entry_type; /* Entry type. */
+ uint8_t entry_count; /* Entry count. */
+ uint8_t sys_define; /* System defined. */
+ uint8_t entry_status; /* Entry Status. */
+ union {
+ struct {
+ uint32_t sys_define_2; /* System defined. */
+ target_id_t target;
+ uint8_t target_id;
+ uint8_t reserved_1;
+ uint16_t flags;
+ uint16_t resp_code;
+ uint16_t status;
+ uint16_t task_flags;
+ uint16_t seq_id;
+ uint16_t srr_rx_id;
+ uint32_t srr_rel_offs;
+ uint16_t srr_ui;
+ uint16_t srr_flags;
+ uint16_t srr_reject_code;
+ uint8_t srr_reject_vendor_uniq;
+ uint8_t srr_reject_code_expl;
+ uint8_t reserved_2[24];
+ } isp2x;
+ struct {
+ uint32_t handle;
+ uint16_t nport_handle;
+ uint16_t reserved_1;
+ uint16_t flags;
+ uint16_t srr_rx_id;
+ uint16_t status;
+ uint8_t status_subcode;
+ uint8_t fw_handle;
+ uint32_t exchange_address;
+ uint32_t srr_rel_offs;
+ uint16_t srr_ui;
+ uint16_t srr_flags;
+ uint8_t reserved_4[19];
+ uint8_t vp_index;
+ uint8_t srr_reject_vendor_uniq;
+ uint8_t srr_reject_code_expl;
+ uint8_t srr_reject_code;
+ uint8_t reserved_5[5];
+ } isp24;
+ } u;
+ uint8_t reserved[2];
+ uint16_t ox_id;
+} __packed;
+#define NOTIFY_ACK_SRR_FLAGS_ACCEPT 0
+#define NOTIFY_ACK_SRR_FLAGS_REJECT 1
+
+#define NOTIFY_ACK_SRR_REJECT_REASON_UNABLE_TO_PERFORM 0x9
+
+#define NOTIFY_ACK_SRR_FLAGS_REJECT_EXPL_NO_EXPL 0
+#define NOTIFY_ACK_SRR_FLAGS_REJECT_EXPL_UNABLE_TO_SUPPLY_DATA 0x2a
+
+#define NOTIFY_ACK_SUCCESS 0x01
+#endif
+
+#ifndef ACCEPT_TGT_IO_TYPE
+#define ACCEPT_TGT_IO_TYPE 0x16 /* Accept target I/O entry. */
+#endif
+
+#ifndef CONTINUE_TGT_IO_TYPE
+#define CONTINUE_TGT_IO_TYPE 0x17
+/*
+ * ISP queue - Continue Target I/O (CTIO) entry for status mode 0 structure.
+ * This structure is sent to the ISP 2xxx from target driver.
+ */
+struct ctio_to_2xxx {
+ uint8_t entry_type; /* Entry type. */
+ uint8_t entry_count; /* Entry count. */
+ uint8_t sys_define; /* System defined. */
+ uint8_t entry_status; /* Entry Status. */
+ uint32_t handle; /* System defined handle */
+ target_id_t target;
+ uint16_t rx_id;
+ uint16_t flags;
+ uint16_t status;
+ uint16_t timeout; /* 0 = 30 seconds, 0xFFFF = disable */
+ uint16_t dseg_count; /* Data segment count. */
+ uint32_t relative_offset;
+ uint32_t residual;
+ uint16_t reserved_1[3];
+ uint16_t scsi_status;
+ uint32_t transfer_length;
+ uint32_t dseg_0_address; /* Data segment 0 address. */
+ uint32_t dseg_0_length; /* Data segment 0 length. */
+ uint32_t dseg_1_address; /* Data segment 1 address. */
+ uint32_t dseg_1_length; /* Data segment 1 length. */
+ uint32_t dseg_2_address; /* Data segment 2 address. */
+ uint32_t dseg_2_length; /* Data segment 2 length. */
+} __packed;
+#define ATIO_PATH_INVALID 0x07
+#define ATIO_CANT_PROV_CAP 0x16
+#define ATIO_CDB_VALID 0x3D
+
+#define ATIO_EXEC_READ BIT_1
+#define ATIO_EXEC_WRITE BIT_0
+#endif
+
+#ifndef CTIO_A64_TYPE
+#define CTIO_A64_TYPE 0x1F
+#define CTIO_SUCCESS 0x01
+#define CTIO_ABORTED 0x02
+#define CTIO_INVALID_RX_ID 0x08
+#define CTIO_TIMEOUT 0x0B
+#define CTIO_DIF_ERROR 0x0C /* DIF error detected */
+#define CTIO_LIP_RESET 0x0E
+#define CTIO_TARGET_RESET 0x17
+#define CTIO_PORT_UNAVAILABLE 0x28
+#define CTIO_PORT_LOGGED_OUT 0x29
+#define CTIO_PORT_CONF_CHANGED 0x2A
+#define CTIO_SRR_RECEIVED 0x45
+#endif
+
+#ifndef CTIO_RET_TYPE
+#define CTIO_RET_TYPE 0x17 /* CTIO return entry */
+#define ATIO_TYPE7 0x06 /* Accept target I/O entry for 24xx */
+
+struct fcp_hdr {
+ uint8_t r_ctl;
+ uint8_t d_id[3];
+ uint8_t cs_ctl;
+ uint8_t s_id[3];
+ uint8_t type;
+ uint8_t f_ctl[3];
+ uint8_t seq_id;
+ uint8_t df_ctl;
+ uint16_t seq_cnt;
+ __be16 ox_id;
+ uint16_t rx_id;
+ uint32_t parameter;
+} __packed;
+
+struct fcp_hdr_le {
+ uint8_t d_id[3];
+ uint8_t r_ctl;
+ uint8_t s_id[3];
+ uint8_t cs_ctl;
+ uint8_t f_ctl[3];
+ uint8_t type;
+ uint16_t seq_cnt;
+ uint8_t df_ctl;
+ uint8_t seq_id;
+ uint16_t rx_id;
+ uint16_t ox_id;
+ uint32_t parameter;
+} __packed;
+
+#define F_CTL_EXCH_CONTEXT_RESP BIT_23
+#define F_CTL_SEQ_CONTEXT_RESIP BIT_22
+#define F_CTL_LAST_SEQ BIT_20
+#define F_CTL_END_SEQ BIT_19
+#define F_CTL_SEQ_INITIATIVE BIT_16
+
+#define R_CTL_BASIC_LINK_SERV 0x80
+#define R_CTL_B_ACC 0x4
+#define R_CTL_B_RJT 0x5
+
+struct atio7_fcp_cmnd {
+ uint64_t lun;
+ uint8_t cmnd_ref;
+ uint8_t task_attr:3;
+ uint8_t reserved:5;
+ uint8_t task_mgmt_flags;
+#define FCP_CMND_TASK_MGMT_CLEAR_ACA 6
+#define FCP_CMND_TASK_MGMT_TARGET_RESET 5
+#define FCP_CMND_TASK_MGMT_LU_RESET 4
+#define FCP_CMND_TASK_MGMT_CLEAR_TASK_SET 2
+#define FCP_CMND_TASK_MGMT_ABORT_TASK_SET 1
+ uint8_t wrdata:1;
+ uint8_t rddata:1;
+ uint8_t add_cdb_len:6;
+ uint8_t cdb[16];
+ /*
+ * add_cdb is optional and can absent from struct atio7_fcp_cmnd. Size 4
+ * only to make sizeof(struct atio7_fcp_cmnd) be as expected by
+ * BUILD_BUG_ON in qlt_init().
+ */
+ uint8_t add_cdb[4];
+ /* uint32_t data_length; */
+} __packed;
+
+/*
+ * ISP queue - Accept Target I/O (ATIO) type entry IOCB structure.
+ * This is sent from the ISP to the target driver.
+ */
+struct atio_from_isp {
+ union {
+ struct {
+ uint16_t entry_hdr;
+ uint8_t sys_define; /* System defined. */
+ uint8_t entry_status; /* Entry Status. */
+ uint32_t sys_define_2; /* System defined. */
+ target_id_t target;
+ uint16_t rx_id;
+ uint16_t flags;
+ uint16_t status;
+ uint8_t command_ref;
+ uint8_t task_codes;
+ uint8_t task_flags;
+ uint8_t execution_codes;
+ uint8_t cdb[MAX_CMDSZ];
+ uint32_t data_length;
+ uint16_t lun;
+ uint8_t initiator_port_name[WWN_SIZE]; /* on qla23xx */
+ uint16_t reserved_32[6];
+ uint16_t ox_id;
+ } isp2x;
+ struct {
+ uint16_t entry_hdr;
+ uint8_t fcp_cmnd_len_low;
+ uint8_t fcp_cmnd_len_high:4;
+ uint8_t attr:4;
+ uint32_t exchange_addr;
+#define ATIO_EXCHANGE_ADDRESS_UNKNOWN 0xFFFFFFFF
+ struct fcp_hdr fcp_hdr;
+ struct atio7_fcp_cmnd fcp_cmnd;
+ } isp24;
+ struct {
+ uint8_t entry_type; /* Entry type. */
+ uint8_t entry_count; /* Entry count. */
+ uint8_t data[58];
+ uint32_t signature;
+#define ATIO_PROCESSED 0xDEADDEAD /* Signature */
+ } raw;
+ } u;
+} __packed;
+
+#define CTIO_TYPE7 0x12 /* Continue target I/O entry (for 24xx) */
+
+/*
+ * ISP queue - Continue Target I/O (ATIO) type 7 entry (for 24xx) structure.
+ * This structure is sent to the ISP 24xx from the target driver.
+ */
+
+struct ctio7_to_24xx {
+ uint8_t entry_type; /* Entry type. */
+ uint8_t entry_count; /* Entry count. */
+ uint8_t sys_define; /* System defined. */
+ uint8_t entry_status; /* Entry Status. */
+ uint32_t handle; /* System defined handle */
+ uint16_t nport_handle;
+#define CTIO7_NHANDLE_UNRECOGNIZED 0xFFFF
+ uint16_t timeout;
+ uint16_t dseg_count; /* Data segment count. */
+ uint8_t vp_index;
+ uint8_t add_flags;
+ uint8_t initiator_id[3];
+ uint8_t reserved;
+ uint32_t exchange_addr;
+ union {
+ struct {
+ uint16_t reserved1;
+ __le16 flags;
+ uint32_t residual;
+ __le16 ox_id;
+ uint16_t scsi_status;
+ uint32_t relative_offset;
+ uint32_t reserved2;
+ uint32_t transfer_length;
+ uint32_t reserved3;
+ /* Data segment 0 address. */
+ uint32_t dseg_0_address[2];
+ /* Data segment 0 length. */
+ uint32_t dseg_0_length;
+ } status0;
+ struct {
+ uint16_t sense_length;
+ uint16_t flags;
+ uint32_t residual;
+ __le16 ox_id;
+ uint16_t scsi_status;
+ uint16_t response_len;
+ uint16_t reserved;
+ uint8_t sense_data[24];
+ } status1;
+ } u;
+} __packed;
+
+/*
+ * ISP queue - CTIO type 7 from ISP 24xx to target driver
+ * returned entry structure.
+ */
+struct ctio7_from_24xx {
+ uint8_t entry_type; /* Entry type. */
+ uint8_t entry_count; /* Entry count. */
+ uint8_t sys_define; /* System defined. */
+ uint8_t entry_status; /* Entry Status. */
+ uint32_t handle; /* System defined handle */
+ uint16_t status;
+ uint16_t timeout;
+ uint16_t dseg_count; /* Data segment count. */
+ uint8_t vp_index;
+ uint8_t reserved1[5];
+ uint32_t exchange_address;
+ uint16_t reserved2;
+ uint16_t flags;
+ uint32_t residual;
+ uint16_t ox_id;
+ uint16_t reserved3;
+ uint32_t relative_offset;
+ uint8_t reserved4[24];
+} __packed;
+
+/* CTIO7 flags values */
+#define CTIO7_FLAGS_SEND_STATUS BIT_15
+#define CTIO7_FLAGS_TERMINATE BIT_14
+#define CTIO7_FLAGS_CONFORM_REQ BIT_13
+#define CTIO7_FLAGS_DONT_RET_CTIO BIT_8
+#define CTIO7_FLAGS_STATUS_MODE_0 0
+#define CTIO7_FLAGS_STATUS_MODE_1 BIT_6
+#define CTIO7_FLAGS_STATUS_MODE_2 BIT_7
+#define CTIO7_FLAGS_EXPLICIT_CONFORM BIT_5
+#define CTIO7_FLAGS_CONFIRM_SATISF BIT_4
+#define CTIO7_FLAGS_DSD_PTR BIT_2
+#define CTIO7_FLAGS_DATA_IN BIT_1 /* data to initiator */
+#define CTIO7_FLAGS_DATA_OUT BIT_0 /* data from initiator */
+
+#define ELS_PLOGI 0x3
+#define ELS_FLOGI 0x4
+#define ELS_LOGO 0x5
+#define ELS_PRLI 0x20
+#define ELS_PRLO 0x21
+#define ELS_TPRLO 0x24
+#define ELS_PDISC 0x50
+#define ELS_ADISC 0x52
+
+/*
+ *CTIO Type CRC_2 IOCB
+ */
+struct ctio_crc2_to_fw {
+ uint8_t entry_type; /* Entry type. */
+#define CTIO_CRC2 0x7A
+ uint8_t entry_count; /* Entry count. */
+ uint8_t sys_define; /* System defined. */
+ uint8_t entry_status; /* Entry Status. */
+
+ uint32_t handle; /* System handle. */
+ uint16_t nport_handle; /* N_PORT handle. */
+ __le16 timeout; /* Command timeout. */
+
+ uint16_t dseg_count; /* Data segment count. */
+ uint8_t vp_index;
+ uint8_t add_flags; /* additional flags */
+#define CTIO_CRC2_AF_DIF_DSD_ENA BIT_3
+
+ uint8_t initiator_id[3]; /* initiator ID */
+ uint8_t reserved1;
+ uint32_t exchange_addr; /* rcv exchange address */
+ uint16_t reserved2;
+ __le16 flags; /* refer to CTIO7 flags values */
+ uint32_t residual;
+ __le16 ox_id;
+ uint16_t scsi_status;
+ __le32 relative_offset;
+ uint32_t reserved5;
+ __le32 transfer_length; /* total fc transfer length */
+ uint32_t reserved6;
+ __le32 crc_context_address[2];/* Data segment address. */
+ uint16_t crc_context_len; /* Data segment length. */
+ uint16_t reserved_1; /* MUST be set to 0. */
+} __packed;
+
+/* CTIO Type CRC_x Status IOCB */
+struct ctio_crc_from_fw {
+ uint8_t entry_type; /* Entry type. */
+ uint8_t entry_count; /* Entry count. */
+ uint8_t sys_define; /* System defined. */
+ uint8_t entry_status; /* Entry Status. */
+
+ uint32_t handle; /* System handle. */
+ uint16_t status;
+ uint16_t timeout; /* Command timeout. */
+ uint16_t dseg_count; /* Data segment count. */
+ uint32_t reserved1;
+ uint16_t state_flags;
+#define CTIO_CRC_SF_DIF_CHOPPED BIT_4
+
+ uint32_t exchange_address; /* rcv exchange address */
+ uint16_t reserved2;
+ uint16_t flags;
+ uint32_t resid_xfer_length;
+ uint16_t ox_id;
+ uint8_t reserved3[12];
+ uint16_t runt_guard; /* reported runt blk guard */
+ uint8_t actual_dif[8];
+ uint8_t expected_dif[8];
+} __packed;
+
+/*
+ * ISP queue - ABTS received/response entries structure definition for 24xx.
+ */
+#define ABTS_RECV_24XX 0x54 /* ABTS received (for 24xx) */
+#define ABTS_RESP_24XX 0x55 /* ABTS responce (for 24xx) */
+
+/*
+ * ISP queue - ABTS received IOCB entry structure definition for 24xx.
+ * The ABTS BLS received from the wire is sent to the
+ * target driver by the ISP 24xx.
+ * The IOCB is placed on the response queue.
+ */
+struct abts_recv_from_24xx {
+ uint8_t entry_type; /* Entry type. */
+ uint8_t entry_count; /* Entry count. */
+ uint8_t sys_define; /* System defined. */
+ uint8_t entry_status; /* Entry Status. */
+ uint8_t reserved_1[6];
+ uint16_t nport_handle;
+ uint8_t reserved_2[2];
+ uint8_t vp_index;
+ uint8_t reserved_3:4;
+ uint8_t sof_type:4;
+ uint32_t exchange_address;
+ struct fcp_hdr_le fcp_hdr_le;
+ uint8_t reserved_4[16];
+ uint32_t exchange_addr_to_abort;
+} __packed;
+
+#define ABTS_PARAM_ABORT_SEQ BIT_0
+
+struct ba_acc_le {
+ uint16_t reserved;
+ uint8_t seq_id_last;
+ uint8_t seq_id_valid;
+#define SEQ_ID_VALID 0x80
+#define SEQ_ID_INVALID 0x00
+ uint16_t rx_id;
+ uint16_t ox_id;
+ uint16_t high_seq_cnt;
+ uint16_t low_seq_cnt;
+} __packed;
+
+struct ba_rjt_le {
+ uint8_t vendor_uniq;
+ uint8_t reason_expl;
+ uint8_t reason_code;
+#define BA_RJT_REASON_CODE_INVALID_COMMAND 0x1
+#define BA_RJT_REASON_CODE_UNABLE_TO_PERFORM 0x9
+ uint8_t reserved;
+} __packed;
+
+/*
+ * ISP queue - ABTS Response IOCB entry structure definition for 24xx.
+ * The ABTS response to the ABTS received is sent by the
+ * target driver to the ISP 24xx.
+ * The IOCB is placed on the request queue.
+ */
+struct abts_resp_to_24xx {
+ uint8_t entry_type; /* Entry type. */
+ uint8_t entry_count; /* Entry count. */
+ uint8_t sys_define; /* System defined. */
+ uint8_t entry_status; /* Entry Status. */
+ uint32_t handle;
+ uint16_t reserved_1;
+ uint16_t nport_handle;
+ uint16_t control_flags;
+#define ABTS_CONTR_FLG_TERM_EXCHG BIT_0
+ uint8_t vp_index;
+ uint8_t reserved_3:4;
+ uint8_t sof_type:4;
+ uint32_t exchange_address;
+ struct fcp_hdr_le fcp_hdr_le;
+ union {
+ struct ba_acc_le ba_acct;
+ struct ba_rjt_le ba_rjt;
+ } __packed payload;
+ uint32_t reserved_4;
+ uint32_t exchange_addr_to_abort;
+} __packed;
+
+/*
+ * ISP queue - ABTS Response IOCB from ISP24xx Firmware entry structure.
+ * The ABTS response with completion status to the ABTS response
+ * (sent by the target driver to the ISP 24xx) is sent by the
+ * ISP24xx firmware to the target driver.
+ * The IOCB is placed on the response queue.
+ */
+struct abts_resp_from_24xx_fw {
+ uint8_t entry_type; /* Entry type. */
+ uint8_t entry_count; /* Entry count. */
+ uint8_t sys_define; /* System defined. */
+ uint8_t entry_status; /* Entry Status. */
+ uint32_t handle;
+ uint16_t compl_status;
+#define ABTS_RESP_COMPL_SUCCESS 0
+#define ABTS_RESP_COMPL_SUBCODE_ERROR 0x31
+ uint16_t nport_handle;
+ uint16_t reserved_1;
+ uint8_t reserved_2;
+ uint8_t reserved_3:4;
+ uint8_t sof_type:4;
+ uint32_t exchange_address;
+ struct fcp_hdr_le fcp_hdr_le;
+ uint8_t reserved_4[8];
+ uint32_t error_subcode1;
+#define ABTS_RESP_SUBCODE_ERR_ABORTED_EXCH_NOT_TERM 0x1E
+ uint32_t error_subcode2;
+ uint32_t exchange_addr_to_abort;
+} __packed;
+
+/********************************************************************\
+ * Type Definitions used by initiator & target halves
+\********************************************************************/
+
+struct qla_tgt_mgmt_cmd;
+struct qla_tgt_sess;
+
+/*
+ * This structure provides a template of function calls that the
+ * target driver (from within qla_target.c) can issue to the
+ * target module (tcm_qla2xxx).
+ */
+struct qla_tgt_func_tmpl {
+
+ int (*handle_cmd)(struct scsi_qla_host *, struct qla_tgt_cmd *,
+ unsigned char *, uint32_t, int, int, int);
+ void (*handle_data)(struct qla_tgt_cmd *);
+ void (*handle_dif_err)(struct qla_tgt_cmd *);
+ int (*handle_tmr)(struct qla_tgt_mgmt_cmd *, uint32_t, uint8_t,
+ uint32_t);
+ void (*free_cmd)(struct qla_tgt_cmd *);
+ void (*free_mcmd)(struct qla_tgt_mgmt_cmd *);
+ void (*free_session)(struct qla_tgt_sess *);
+
+ int (*check_initiator_node_acl)(struct scsi_qla_host *, unsigned char *,
+ void *, uint8_t *, uint16_t);
+ void (*update_sess)(struct qla_tgt_sess *, port_id_t, uint16_t, bool);
+ struct qla_tgt_sess *(*find_sess_by_loop_id)(struct scsi_qla_host *,
+ const uint16_t);
+ struct qla_tgt_sess *(*find_sess_by_s_id)(struct scsi_qla_host *,
+ const uint8_t *);
+ void (*clear_nacl_from_fcport_map)(struct qla_tgt_sess *);
+ void (*put_sess)(struct qla_tgt_sess *);
+ void (*shutdown_sess)(struct qla_tgt_sess *);
+};
+
+int qla2x00_wait_for_hba_online(struct scsi_qla_host *);
+
+#include <target/target_core_base.h>
+
+#define QLA_TGT_TIMEOUT 10 /* in seconds */
+
+#define QLA_TGT_MAX_HW_PENDING_TIME 60 /* in seconds */
+
+/* Immediate notify status constants */
+#define IMM_NTFY_LIP_RESET 0x000E
+#define IMM_NTFY_LIP_LINK_REINIT 0x000F
+#define IMM_NTFY_IOCB_OVERFLOW 0x0016
+#define IMM_NTFY_ABORT_TASK 0x0020
+#define IMM_NTFY_PORT_LOGOUT 0x0029
+#define IMM_NTFY_PORT_CONFIG 0x002A
+#define IMM_NTFY_GLBL_TPRLO 0x002D
+#define IMM_NTFY_GLBL_LOGO 0x002E
+#define IMM_NTFY_RESOURCE 0x0034
+#define IMM_NTFY_MSG_RX 0x0036
+#define IMM_NTFY_SRR 0x0045
+#define IMM_NTFY_ELS 0x0046
+
+/* Immediate notify task flags */
+#define IMM_NTFY_TASK_MGMT_SHIFT 8
+
+#define QLA_TGT_CLEAR_ACA 0x40
+#define QLA_TGT_TARGET_RESET 0x20
+#define QLA_TGT_LUN_RESET 0x10
+#define QLA_TGT_CLEAR_TS 0x04
+#define QLA_TGT_ABORT_TS 0x02
+#define QLA_TGT_ABORT_ALL_SESS 0xFFFF
+#define QLA_TGT_ABORT_ALL 0xFFFE
+#define QLA_TGT_NEXUS_LOSS_SESS 0xFFFD
+#define QLA_TGT_NEXUS_LOSS 0xFFFC
+
+/* Notify Acknowledge flags */
+#define NOTIFY_ACK_RES_COUNT BIT_8
+#define NOTIFY_ACK_CLEAR_LIP_RESET BIT_5
+#define NOTIFY_ACK_TM_RESP_CODE_VALID BIT_4
+
+/* Command's states */
+#define QLA_TGT_STATE_NEW 0 /* New command + target processing */
+#define QLA_TGT_STATE_NEED_DATA 1 /* target needs data to continue */
+#define QLA_TGT_STATE_DATA_IN 2 /* Data arrived + target processing */
+#define QLA_TGT_STATE_PROCESSED 3 /* target done processing */
+#define QLA_TGT_STATE_ABORTED 4 /* Command aborted */
+
+/* Special handles */
+#define QLA_TGT_NULL_HANDLE 0
+#define QLA_TGT_SKIP_HANDLE (0xFFFFFFFF & ~CTIO_COMPLETION_HANDLE_MARK)
+
+/* ATIO task_codes field */
+#define ATIO_SIMPLE_QUEUE 0
+#define ATIO_HEAD_OF_QUEUE 1
+#define ATIO_ORDERED_QUEUE 2
+#define ATIO_ACA_QUEUE 4
+#define ATIO_UNTAGGED 5
+
+/* TM failed response codes, see FCP (9.4.11 FCP_RSP_INFO) */
+#define FC_TM_SUCCESS 0
+#define FC_TM_BAD_FCP_DATA 1
+#define FC_TM_BAD_CMD 2
+#define FC_TM_FCP_DATA_MISMATCH 3
+#define FC_TM_REJECT 4
+#define FC_TM_FAILED 5
+
+/*
+ * Error code of qlt_pre_xmit_response() meaning that cmd's exchange was
+ * terminated, so no more actions is needed and success should be returned
+ * to target.
+ */
+#define QLA_TGT_PRE_XMIT_RESP_CMD_ABORTED 0x1717
+
+#if (BITS_PER_LONG > 32) || defined(CONFIG_HIGHMEM64G)
+#define pci_dma_lo32(a) (a & 0xffffffff)
+#define pci_dma_hi32(a) ((((a) >> 16)>>16) & 0xffffffff)
+#else
+#define pci_dma_lo32(a) (a & 0xffffffff)
+#define pci_dma_hi32(a) 0
+#endif
+
+#define QLA_TGT_SENSE_VALID(sense) ((sense != NULL) && \
+ (((const uint8_t *)(sense))[0] & 0x70) == 0x70)
+
+struct qla_port_24xx_data {
+ uint8_t port_name[WWN_SIZE];
+ uint16_t loop_id;
+ uint16_t reserved;
+};
+
+struct qla_tgt {
+ struct scsi_qla_host *vha;
+ struct qla_hw_data *ha;
+
+ /*
+ * To sync between IRQ handlers and qlt_target_release(). Needed,
+ * because req_pkt() can drop/reaquire HW lock inside. Protected by
+ * HW lock.
+ */
+ int irq_cmd_count;
+
+ int datasegs_per_cmd, datasegs_per_cont, sg_tablesize;
+
+ /* Target's flags, serialized by pha->hardware_lock */
+ unsigned int tgt_enable_64bit_addr:1; /* 64-bits PCI addr enabled */
+ unsigned int link_reinit_iocb_pending:1;
+
+ /*
+ * Protected by tgt_mutex AND hardware_lock for writing and tgt_mutex
+ * OR hardware_lock for reading.
+ */
+ int tgt_stop; /* the target mode driver is being stopped */
+ int tgt_stopped; /* the target mode driver has been stopped */
+
+ /* Count of sessions refering qla_tgt. Protected by hardware_lock. */
+ int sess_count;
+
+ /* Protected by hardware_lock. Addition also protected by tgt_mutex. */
+ struct list_head sess_list;
+
+ /* Protected by hardware_lock */
+ struct list_head del_sess_list;
+ struct delayed_work sess_del_work;
+
+ spinlock_t sess_work_lock;
+ struct list_head sess_works_list;
+ struct work_struct sess_work;
+
+ struct imm_ntfy_from_isp link_reinit_iocb;
+ wait_queue_head_t waitQ;
+ int notify_ack_expected;
+ int abts_resp_expected;
+ int modify_lun_expected;
+
+ int ctio_srr_id;
+ int imm_srr_id;
+ spinlock_t srr_lock;
+ struct list_head srr_ctio_list;
+ struct list_head srr_imm_list;
+ struct work_struct srr_work;
+
+ atomic_t tgt_global_resets_count;
+
+ struct list_head tgt_list_entry;
+};
+
+struct qla_tgt_sess_op {
+ struct scsi_qla_host *vha;
+ struct atio_from_isp atio;
+ struct work_struct work;
+};
+
+/*
+ * Equivilant to IT Nexus (Initiator-Target)
+ */
+struct qla_tgt_sess {
+ uint16_t loop_id;
+ port_id_t s_id;
+
+ unsigned int conf_compl_supported:1;
+ unsigned int deleted:1;
+ unsigned int local:1;
+
+ struct se_session *se_sess;
+ struct scsi_qla_host *vha;
+ struct qla_tgt *tgt;
+
+ struct list_head sess_list_entry;
+ unsigned long expires;
+ struct list_head del_list_entry;
+
+ uint8_t port_name[WWN_SIZE];
+ struct work_struct free_work;
+};
+
+struct qla_tgt_cmd {
+ struct se_cmd se_cmd;
+ struct qla_tgt_sess *sess;
+ int state;
+ struct work_struct free_work;
+ struct work_struct work;
+ /* Sense buffer that will be mapped into outgoing status */
+ unsigned char sense_buffer[TRANSPORT_SENSE_BUFFER];
+
+ /* to save extra sess dereferences */
+ unsigned int conf_compl_supported:1;
+ unsigned int sg_mapped:1;
+ unsigned int free_sg:1;
+ unsigned int aborted:1; /* Needed in case of SRR */
+ unsigned int write_data_transferred:1;
+ unsigned int ctx_dsd_alloced:1;
+ unsigned int q_full:1;
+ unsigned int term_exchg:1;
+ unsigned int cmd_sent_to_fw:1;
+ unsigned int cmd_in_wq:1;
+
+ struct scatterlist *sg; /* cmd data buffer SG vector */
+ int sg_cnt; /* SG segments count */
+ int bufflen; /* cmd buffer length */
+ int offset;
+ uint32_t tag;
+ uint32_t unpacked_lun;
+ enum dma_data_direction dma_data_direction;
+ uint32_t reset_count;
+
+ uint16_t loop_id; /* to save extra sess dereferences */
+ struct qla_tgt *tgt; /* to save extra sess dereferences */
+ struct scsi_qla_host *vha;
+ struct list_head cmd_list;
+
+ struct atio_from_isp atio;
+ /* t10dif */
+ struct scatterlist *prot_sg;
+ uint32_t prot_sg_cnt;
+ uint32_t blk_sz;
+ struct crc_context *ctx;
+
+ uint64_t jiffies_at_alloc;
+ uint64_t jiffies_at_free;
+ /* BIT_0 - Atio Arrival / schedule to work
+ * BIT_1 - qlt_do_work
+ * BIT_2 - qlt_do work failed
+ * BIT_3 - xfer rdy/tcm_qla2xxx_write_pending
+ * BIT_4 - read respond/tcm_qla2xx_queue_data_in
+ * BIT_5 - status respond / tcm_qla2xx_queue_status
+ * BIT_6 - tcm request to abort/Term exchange.
+ * pre_xmit_response->qlt_send_term_exchange
+ * BIT_7 - SRR received (qlt_handle_srr->qlt_xmit_response)
+ * BIT_8 - SRR received (qlt_handle_srr->qlt_rdy_to_xfer)
+ * BIT_9 - SRR received (qla_handle_srr->qlt_send_term_exchange)
+ * BIT_10 - Data in - hanlde_data->tcm_qla2xxx_handle_data
+ * BIT_11 - Data actually going to TCM : tcm_qla2xx_handle_data_work
+ * BIT_12 - good completion - qlt_ctio_do_completion -->free_cmd
+ * BIT_13 - Bad completion -
+ * qlt_ctio_do_completion --> qlt_term_ctio_exchange
+ * BIT_14 - Back end data received/sent.
+ * BIT_15 - SRR prepare ctio
+ * BIT_16 - complete free
+ */
+ uint32_t cmd_flags;
+};
+
+struct qla_tgt_sess_work_param {
+ struct list_head sess_works_list_entry;
+
+#define QLA_TGT_SESS_WORK_ABORT 1
+#define QLA_TGT_SESS_WORK_TM 2
+ int type;
+
+ union {
+ struct abts_recv_from_24xx abts;
+ struct imm_ntfy_from_isp tm_iocb;
+ struct atio_from_isp tm_iocb2;
+ };
+};
+
+struct qla_tgt_mgmt_cmd {
+ uint8_t tmr_func;
+ uint8_t fc_tm_rsp;
+ struct qla_tgt_sess *sess;
+ struct se_cmd se_cmd;
+ struct work_struct free_work;
+ unsigned int flags;
+ uint32_t reset_count;
+#define QLA24XX_MGMT_SEND_NACK 1
+ union {
+ struct atio_from_isp atio;
+ struct imm_ntfy_from_isp imm_ntfy;
+ struct abts_recv_from_24xx abts;
+ } __packed orig_iocb;
+};
+
+struct qla_tgt_prm {
+ struct qla_tgt_cmd *cmd;
+ struct qla_tgt *tgt;
+ void *pkt;
+ struct scatterlist *sg; /* cmd data buffer SG vector */
+ unsigned char *sense_buffer;
+ int seg_cnt;
+ int req_cnt;
+ uint16_t rq_result;
+ uint16_t scsi_status;
+ int sense_buffer_len;
+ int residual;
+ int add_status_pkt;
+ /* dif */
+ struct scatterlist *prot_sg;
+ uint16_t prot_seg_cnt;
+ uint16_t tot_dsds;
+};
+
+struct qla_tgt_srr_imm {
+ struct list_head srr_list_entry;
+ int srr_id;
+ struct imm_ntfy_from_isp imm_ntfy;
+};
+
+struct qla_tgt_srr_ctio {
+ struct list_head srr_list_entry;
+ int srr_id;
+ struct qla_tgt_cmd *cmd;
+};
+
+#define QLA_TGT_XMIT_DATA 1
+#define QLA_TGT_XMIT_STATUS 2
+#define QLA_TGT_XMIT_ALL (QLA_TGT_XMIT_STATUS|QLA_TGT_XMIT_DATA)
+
+
+extern struct qla_tgt_data qla_target;
+
+/*
+ * Function prototypes for qla_target.c logic used by qla2xxx LLD code.
+ */
+extern int qlt_add_target(struct qla_hw_data *, struct scsi_qla_host *);
+extern int qlt_remove_target(struct qla_hw_data *, struct scsi_qla_host *);
+extern int qlt_lport_register(void *, u64, u64, u64,
+ int (*callback)(struct scsi_qla_host *, void *, u64, u64));
+extern void qlt_lport_deregister(struct scsi_qla_host *);
+extern void qlt_unreg_sess(struct qla_tgt_sess *);
+extern void qlt_fc_port_added(struct scsi_qla_host *, fc_port_t *);
+extern void qlt_fc_port_deleted(struct scsi_qla_host *, fc_port_t *);
+extern int __init qlt_init(void);
+extern void qlt_exit(void);
+extern void qlt_update_vp_map(struct scsi_qla_host *, int);
+
+/*
+ * This macro is used during early initializations when host->active_mode
+ * is not set. Right now, ha value is ignored.
+ */
+#define QLA_TGT_MODE_ENABLED() (ql2x_ini_mode != QLA2XXX_INI_MODE_ENABLED)
+extern int ql2x_ini_mode;
+
+static inline bool qla_tgt_mode_enabled(struct scsi_qla_host *ha)
+{
+ return ha->host->active_mode & MODE_TARGET;
+}
+
+static inline bool qla_ini_mode_enabled(struct scsi_qla_host *ha)
+{
+ return ha->host->active_mode & MODE_INITIATOR;
+}
+
+static inline void qla_reverse_ini_mode(struct scsi_qla_host *ha)
+{
+ if (ha->host->active_mode & MODE_INITIATOR)
+ ha->host->active_mode &= ~MODE_INITIATOR;
+ else
+ ha->host->active_mode |= MODE_INITIATOR;
+}
+
+/*
+ * Exported symbols from qla_target.c LLD logic used by qla2xxx code..
+ */
+extern void qlt_response_pkt_all_vps(struct scsi_qla_host *, response_t *);
+extern int qlt_rdy_to_xfer(struct qla_tgt_cmd *);
+extern int qlt_xmit_response(struct qla_tgt_cmd *, int, uint8_t);
+extern void qlt_xmit_tm_rsp(struct qla_tgt_mgmt_cmd *);
+extern void qlt_free_mcmd(struct qla_tgt_mgmt_cmd *);
+extern void qlt_free_cmd(struct qla_tgt_cmd *cmd);
+extern void qlt_async_event(uint16_t, struct scsi_qla_host *, uint16_t *);
+extern void qlt_enable_vha(struct scsi_qla_host *);
+extern void qlt_vport_create(struct scsi_qla_host *, struct qla_hw_data *);
+extern void qlt_rff_id(struct scsi_qla_host *, struct ct_sns_req *);
+extern void qlt_init_atio_q_entries(struct scsi_qla_host *);
+extern void qlt_24xx_process_atio_queue(struct scsi_qla_host *);
+extern void qlt_24xx_config_rings(struct scsi_qla_host *);
+extern void qlt_24xx_config_nvram_stage1(struct scsi_qla_host *,
+ struct nvram_24xx *);
+extern void qlt_24xx_config_nvram_stage2(struct scsi_qla_host *,
+ struct init_cb_24xx *);
+extern void qlt_81xx_config_nvram_stage2(struct scsi_qla_host *,
+ struct init_cb_81xx *);
+extern void qlt_81xx_config_nvram_stage1(struct scsi_qla_host *,
+ struct nvram_81xx *);
+extern int qlt_24xx_process_response_error(struct scsi_qla_host *,
+ struct sts_entry_24xx *);
+extern void qlt_modify_vp_config(struct scsi_qla_host *,
+ struct vp_config_entry_24xx *);
+extern void qlt_probe_one_stage1(struct scsi_qla_host *, struct qla_hw_data *);
+extern int qlt_mem_alloc(struct qla_hw_data *);
+extern void qlt_mem_free(struct qla_hw_data *);
+extern int qlt_stop_phase1(struct qla_tgt *);
+extern void qlt_stop_phase2(struct qla_tgt *);
+extern irqreturn_t qla83xx_msix_atio_q(int, void *);
+extern void qlt_83xx_iospace_config(struct qla_hw_data *);
+extern int qlt_free_qfull_cmds(struct scsi_qla_host *);
+
+#endif /* __QLA_TARGET_H */
diff --git a/drivers/scsi/qla2xxx/qla_tmpl.c b/drivers/scsi/qla2xxx/qla_tmpl.c
new file mode 100644
index 000000000..962cb89fe
--- /dev/null
+++ b/drivers/scsi/qla2xxx/qla_tmpl.c
@@ -0,0 +1,959 @@
+/*
+ * QLogic Fibre Channel HBA Driver
+ * Copyright (c) 2003-2014 QLogic Corporation
+ *
+ * See LICENSE.qla2xxx for copyright and licensing details.
+ */
+#include "qla_def.h"
+#include "qla_tmpl.h"
+
+/* note default template is in big endian */
+static const uint32_t ql27xx_fwdt_default_template[] = {
+ 0x63000000, 0xa4000000, 0x7c050000, 0x00000000,
+ 0x30000000, 0x01000000, 0x00000000, 0xc0406eb4,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x04010000, 0x14000000, 0x00000000,
+ 0x02000000, 0x44000000, 0x09010000, 0x10000000,
+ 0x00000000, 0x02000000, 0x01010000, 0x1c000000,
+ 0x00000000, 0x02000000, 0x00600000, 0x00000000,
+ 0xc0000000, 0x01010000, 0x1c000000, 0x00000000,
+ 0x02000000, 0x00600000, 0x00000000, 0xcc000000,
+ 0x01010000, 0x1c000000, 0x00000000, 0x02000000,
+ 0x10600000, 0x00000000, 0xd4000000, 0x01010000,
+ 0x1c000000, 0x00000000, 0x02000000, 0x700f0000,
+ 0x00000060, 0xf0000000, 0x00010000, 0x18000000,
+ 0x00000000, 0x02000000, 0x00700000, 0x041000c0,
+ 0x00010000, 0x18000000, 0x00000000, 0x02000000,
+ 0x10700000, 0x041000c0, 0x00010000, 0x18000000,
+ 0x00000000, 0x02000000, 0x40700000, 0x041000c0,
+ 0x01010000, 0x1c000000, 0x00000000, 0x02000000,
+ 0x007c0000, 0x01000000, 0xc0000000, 0x00010000,
+ 0x18000000, 0x00000000, 0x02000000, 0x007c0000,
+ 0x040300c4, 0x00010000, 0x18000000, 0x00000000,
+ 0x02000000, 0x007c0000, 0x040100c0, 0x01010000,
+ 0x1c000000, 0x00000000, 0x02000000, 0x007c0000,
+ 0x00000000, 0xc0000000, 0x00010000, 0x18000000,
+ 0x00000000, 0x02000000, 0x007c0000, 0x04200000,
+ 0x0b010000, 0x18000000, 0x00000000, 0x02000000,
+ 0x0c000000, 0x00000000, 0x02010000, 0x20000000,
+ 0x00000000, 0x02000000, 0x700f0000, 0x040100fc,
+ 0xf0000000, 0x000000b0, 0x02010000, 0x20000000,
+ 0x00000000, 0x02000000, 0x700f0000, 0x040100fc,
+ 0xf0000000, 0x000010b0, 0x02010000, 0x20000000,
+ 0x00000000, 0x02000000, 0x700f0000, 0x040100fc,
+ 0xf0000000, 0x000020b0, 0x02010000, 0x20000000,
+ 0x00000000, 0x02000000, 0x700f0000, 0x040100fc,
+ 0xf0000000, 0x000030b0, 0x02010000, 0x20000000,
+ 0x00000000, 0x02000000, 0x700f0000, 0x040100fc,
+ 0xf0000000, 0x000040b0, 0x02010000, 0x20000000,
+ 0x00000000, 0x02000000, 0x700f0000, 0x040100fc,
+ 0xf0000000, 0x000050b0, 0x02010000, 0x20000000,
+ 0x00000000, 0x02000000, 0x700f0000, 0x040100fc,
+ 0xf0000000, 0x000060b0, 0x02010000, 0x20000000,
+ 0x00000000, 0x02000000, 0x700f0000, 0x040100fc,
+ 0xf0000000, 0x000070b0, 0x02010000, 0x20000000,
+ 0x00000000, 0x02000000, 0x700f0000, 0x040100fc,
+ 0xf0000000, 0x000080b0, 0x02010000, 0x20000000,
+ 0x00000000, 0x02000000, 0x700f0000, 0x040100fc,
+ 0xf0000000, 0x000090b0, 0x02010000, 0x20000000,
+ 0x00000000, 0x02000000, 0x700f0000, 0x040100fc,
+ 0xf0000000, 0x0000a0b0, 0x00010000, 0x18000000,
+ 0x00000000, 0x02000000, 0x0a000000, 0x040100c0,
+ 0x00010000, 0x18000000, 0x00000000, 0x02000000,
+ 0x0a000000, 0x04200080, 0x00010000, 0x18000000,
+ 0x00000000, 0x02000000, 0x00be0000, 0x041000c0,
+ 0x00010000, 0x18000000, 0x00000000, 0x02000000,
+ 0x10be0000, 0x041000c0, 0x00010000, 0x18000000,
+ 0x00000000, 0x02000000, 0x20be0000, 0x041000c0,
+ 0x00010000, 0x18000000, 0x00000000, 0x02000000,
+ 0x30be0000, 0x041000c0, 0x00010000, 0x18000000,
+ 0x00000000, 0x02000000, 0x00b00000, 0x041000c0,
+ 0x00010000, 0x18000000, 0x00000000, 0x02000000,
+ 0x10b00000, 0x041000c0, 0x00010000, 0x18000000,
+ 0x00000000, 0x02000000, 0x20b00000, 0x041000c0,
+ 0x00010000, 0x18000000, 0x00000000, 0x02000000,
+ 0x30b00000, 0x041000c0, 0x00010000, 0x18000000,
+ 0x00000000, 0x02000000, 0x00300000, 0x041000c0,
+ 0x00010000, 0x18000000, 0x00000000, 0x02000000,
+ 0x10300000, 0x041000c0, 0x00010000, 0x18000000,
+ 0x00000000, 0x02000000, 0x20300000, 0x041000c0,
+ 0x00010000, 0x18000000, 0x00000000, 0x02000000,
+ 0x30300000, 0x041000c0, 0x0a010000, 0x10000000,
+ 0x00000000, 0x02000000, 0x06010000, 0x1c000000,
+ 0x00000000, 0x02000000, 0x01000000, 0x00000200,
+ 0xff230200, 0x06010000, 0x1c000000, 0x00000000,
+ 0x02000000, 0x02000000, 0x00001000, 0x00000000,
+ 0x07010000, 0x18000000, 0x00000000, 0x02000000,
+ 0x00000000, 0x01000000, 0x07010000, 0x18000000,
+ 0x00000000, 0x02000000, 0x00000000, 0x02000000,
+ 0x07010000, 0x18000000, 0x00000000, 0x02000000,
+ 0x00000000, 0x03000000, 0x0d010000, 0x14000000,
+ 0x00000000, 0x02000000, 0x00000000, 0xff000000,
+ 0x10000000, 0x00000000, 0x00000080,
+};
+
+static inline void __iomem *
+qla27xx_isp_reg(struct scsi_qla_host *vha)
+{
+ return &vha->hw->iobase->isp24;
+}
+
+static inline void
+qla27xx_insert16(uint16_t value, void *buf, ulong *len)
+{
+ if (buf) {
+ buf += *len;
+ *(__le16 *)buf = cpu_to_le16(value);
+ }
+ *len += sizeof(value);
+}
+
+static inline void
+qla27xx_insert32(uint32_t value, void *buf, ulong *len)
+{
+ if (buf) {
+ buf += *len;
+ *(__le32 *)buf = cpu_to_le32(value);
+ }
+ *len += sizeof(value);
+}
+
+static inline void
+qla27xx_insertbuf(void *mem, ulong size, void *buf, ulong *len)
+{
+
+ if (buf && mem && size) {
+ buf += *len;
+ memcpy(buf, mem, size);
+ }
+ *len += size;
+}
+
+static inline void
+qla27xx_read8(void *window, void *buf, ulong *len)
+{
+ uint8_t value = ~0;
+
+ if (buf) {
+ value = RD_REG_BYTE((__iomem void *)window);
+ }
+ qla27xx_insert32(value, buf, len);
+}
+
+static inline void
+qla27xx_read16(void *window, void *buf, ulong *len)
+{
+ uint16_t value = ~0;
+
+ if (buf) {
+ value = RD_REG_WORD((__iomem void *)window);
+ }
+ qla27xx_insert32(value, buf, len);
+}
+
+static inline void
+qla27xx_read32(void *window, void *buf, ulong *len)
+{
+ uint32_t value = ~0;
+
+ if (buf) {
+ value = RD_REG_DWORD((__iomem void *)window);
+ }
+ qla27xx_insert32(value, buf, len);
+}
+
+static inline void (*qla27xx_read_vector(uint width))(void *, void *, ulong *)
+{
+ return
+ (width == 1) ? qla27xx_read8 :
+ (width == 2) ? qla27xx_read16 :
+ qla27xx_read32;
+}
+
+static inline void
+qla27xx_read_reg(__iomem struct device_reg_24xx *reg,
+ uint offset, void *buf, ulong *len)
+{
+ void *window = (void *)reg + offset;
+
+ qla27xx_read32(window, buf, len);
+}
+
+static inline void
+qla27xx_write_reg(__iomem struct device_reg_24xx *reg,
+ uint offset, uint32_t data, void *buf)
+{
+ __iomem void *window = (void __iomem *)reg + offset;
+
+ if (buf) {
+ WRT_REG_DWORD(window, data);
+ }
+}
+
+static inline void
+qla27xx_read_window(__iomem struct device_reg_24xx *reg,
+ uint32_t addr, uint offset, uint count, uint width, void *buf,
+ ulong *len)
+{
+ void *window = (void *)reg + offset;
+ void (*readn)(void *, void *, ulong *) = qla27xx_read_vector(width);
+
+ qla27xx_write_reg(reg, IOBASE_ADDR, addr, buf);
+ while (count--) {
+ qla27xx_insert32(addr, buf, len);
+ readn(window, buf, len);
+ window += width;
+ addr++;
+ }
+}
+
+static inline void
+qla27xx_skip_entry(struct qla27xx_fwdt_entry *ent, void *buf)
+{
+ if (buf)
+ ent->hdr.driver_flags |= DRIVER_FLAG_SKIP_ENTRY;
+ ql_dbg(ql_dbg_misc + ql_dbg_verbose, NULL, 0xd011,
+ "Skipping entry %d\n", ent->hdr.entry_type);
+}
+
+static int
+qla27xx_fwdt_entry_t0(struct scsi_qla_host *vha,
+ struct qla27xx_fwdt_entry *ent, void *buf, ulong *len)
+{
+ ql_dbg(ql_dbg_misc, vha, 0xd100,
+ "%s: nop [%lx]\n", __func__, *len);
+ qla27xx_skip_entry(ent, buf);
+
+ return false;
+}
+
+static int
+qla27xx_fwdt_entry_t255(struct scsi_qla_host *vha,
+ struct qla27xx_fwdt_entry *ent, void *buf, ulong *len)
+{
+ ql_dbg(ql_dbg_misc, vha, 0xd1ff,
+ "%s: end [%lx]\n", __func__, *len);
+ qla27xx_skip_entry(ent, buf);
+
+ /* terminate */
+ return true;
+}
+
+static int
+qla27xx_fwdt_entry_t256(struct scsi_qla_host *vha,
+ struct qla27xx_fwdt_entry *ent, void *buf, ulong *len)
+{
+ struct device_reg_24xx __iomem *reg = qla27xx_isp_reg(vha);
+
+ ql_dbg(ql_dbg_misc, vha, 0xd200,
+ "%s: rdio t1 [%lx]\n", __func__, *len);
+ qla27xx_read_window(reg, ent->t256.base_addr, ent->t256.pci_offset,
+ ent->t256.reg_count, ent->t256.reg_width, buf, len);
+
+ return false;
+}
+
+static int
+qla27xx_fwdt_entry_t257(struct scsi_qla_host *vha,
+ struct qla27xx_fwdt_entry *ent, void *buf, ulong *len)
+{
+ struct device_reg_24xx __iomem *reg = qla27xx_isp_reg(vha);
+
+ ql_dbg(ql_dbg_misc, vha, 0xd201,
+ "%s: wrio t1 [%lx]\n", __func__, *len);
+ qla27xx_write_reg(reg, IOBASE_ADDR, ent->t257.base_addr, buf);
+ qla27xx_write_reg(reg, ent->t257.pci_offset, ent->t257.write_data, buf);
+
+ return false;
+}
+
+static int
+qla27xx_fwdt_entry_t258(struct scsi_qla_host *vha,
+ struct qla27xx_fwdt_entry *ent, void *buf, ulong *len)
+{
+ struct device_reg_24xx __iomem *reg = qla27xx_isp_reg(vha);
+
+ ql_dbg(ql_dbg_misc, vha, 0xd202,
+ "%s: rdio t2 [%lx]\n", __func__, *len);
+ qla27xx_write_reg(reg, ent->t258.banksel_offset, ent->t258.bank, buf);
+ qla27xx_read_window(reg, ent->t258.base_addr, ent->t258.pci_offset,
+ ent->t258.reg_count, ent->t258.reg_width, buf, len);
+
+ return false;
+}
+
+static int
+qla27xx_fwdt_entry_t259(struct scsi_qla_host *vha,
+ struct qla27xx_fwdt_entry *ent, void *buf, ulong *len)
+{
+ struct device_reg_24xx __iomem *reg = qla27xx_isp_reg(vha);
+
+ ql_dbg(ql_dbg_misc, vha, 0xd203,
+ "%s: wrio t2 [%lx]\n", __func__, *len);
+ qla27xx_write_reg(reg, IOBASE_ADDR, ent->t259.base_addr, buf);
+ qla27xx_write_reg(reg, ent->t259.banksel_offset, ent->t259.bank, buf);
+ qla27xx_write_reg(reg, ent->t259.pci_offset, ent->t259.write_data, buf);
+
+ return false;
+}
+
+static int
+qla27xx_fwdt_entry_t260(struct scsi_qla_host *vha,
+ struct qla27xx_fwdt_entry *ent, void *buf, ulong *len)
+{
+ struct device_reg_24xx __iomem *reg = qla27xx_isp_reg(vha);
+
+ ql_dbg(ql_dbg_misc, vha, 0xd204,
+ "%s: rdpci [%lx]\n", __func__, *len);
+ qla27xx_insert32(ent->t260.pci_offset, buf, len);
+ qla27xx_read_reg(reg, ent->t260.pci_offset, buf, len);
+
+ return false;
+}
+
+static int
+qla27xx_fwdt_entry_t261(struct scsi_qla_host *vha,
+ struct qla27xx_fwdt_entry *ent, void *buf, ulong *len)
+{
+ struct device_reg_24xx __iomem *reg = qla27xx_isp_reg(vha);
+
+ ql_dbg(ql_dbg_misc, vha, 0xd205,
+ "%s: wrpci [%lx]\n", __func__, *len);
+ qla27xx_write_reg(reg, ent->t261.pci_offset, ent->t261.write_data, buf);
+
+ return false;
+}
+
+static int
+qla27xx_fwdt_entry_t262(struct scsi_qla_host *vha,
+ struct qla27xx_fwdt_entry *ent, void *buf, ulong *len)
+{
+ ulong dwords;
+ ulong start;
+ ulong end;
+
+ ql_dbg(ql_dbg_misc, vha, 0xd206,
+ "%s: rdram(%x) [%lx]\n", __func__, ent->t262.ram_area, *len);
+ start = ent->t262.start_addr;
+ end = ent->t262.end_addr;
+
+ if (ent->t262.ram_area == T262_RAM_AREA_CRITICAL_RAM) {
+ ;
+ } else if (ent->t262.ram_area == T262_RAM_AREA_EXTERNAL_RAM) {
+ end = vha->hw->fw_memory_size;
+ if (buf)
+ ent->t262.end_addr = end;
+ } else if (ent->t262.ram_area == T262_RAM_AREA_SHARED_RAM) {
+ start = vha->hw->fw_shared_ram_start;
+ end = vha->hw->fw_shared_ram_end;
+ if (buf) {
+ ent->t262.start_addr = start;
+ ent->t262.end_addr = end;
+ }
+ } else {
+ ql_dbg(ql_dbg_misc, vha, 0xd022,
+ "%s: unknown area %x\n", __func__, ent->t262.ram_area);
+ qla27xx_skip_entry(ent, buf);
+ goto done;
+ }
+
+ if (end < start || end == 0) {
+ ql_dbg(ql_dbg_misc, vha, 0xd023,
+ "%s: unusable range (start=%x end=%x)\n", __func__,
+ ent->t262.end_addr, ent->t262.start_addr);
+ qla27xx_skip_entry(ent, buf);
+ goto done;
+ }
+
+ dwords = end - start + 1;
+ if (buf) {
+ buf += *len;
+ qla24xx_dump_ram(vha->hw, start, buf, dwords, &buf);
+ }
+ *len += dwords * sizeof(uint32_t);
+done:
+ return false;
+}
+
+static int
+qla27xx_fwdt_entry_t263(struct scsi_qla_host *vha,
+ struct qla27xx_fwdt_entry *ent, void *buf, ulong *len)
+{
+ uint count = 0;
+ uint i;
+ uint length;
+
+ ql_dbg(ql_dbg_misc, vha, 0xd207,
+ "%s: getq(%x) [%lx]\n", __func__, ent->t263.queue_type, *len);
+ if (ent->t263.queue_type == T263_QUEUE_TYPE_REQ) {
+ for (i = 0; i < vha->hw->max_req_queues; i++) {
+ struct req_que *req = vha->hw->req_q_map[i];
+ if (req || !buf) {
+ length = req ?
+ req->length : REQUEST_ENTRY_CNT_24XX;
+ qla27xx_insert16(i, buf, len);
+ qla27xx_insert16(length, buf, len);
+ qla27xx_insertbuf(req ? req->ring : NULL,
+ length * sizeof(*req->ring), buf, len);
+ count++;
+ }
+ }
+ } else if (ent->t263.queue_type == T263_QUEUE_TYPE_RSP) {
+ for (i = 0; i < vha->hw->max_rsp_queues; i++) {
+ struct rsp_que *rsp = vha->hw->rsp_q_map[i];
+ if (rsp || !buf) {
+ length = rsp ?
+ rsp->length : RESPONSE_ENTRY_CNT_MQ;
+ qla27xx_insert16(i, buf, len);
+ qla27xx_insert16(length, buf, len);
+ qla27xx_insertbuf(rsp ? rsp->ring : NULL,
+ length * sizeof(*rsp->ring), buf, len);
+ count++;
+ }
+ }
+ } else {
+ ql_dbg(ql_dbg_misc, vha, 0xd026,
+ "%s: unknown queue %x\n", __func__, ent->t263.queue_type);
+ qla27xx_skip_entry(ent, buf);
+ }
+
+ if (buf)
+ ent->t263.num_queues = count;
+
+ return false;
+}
+
+static int
+qla27xx_fwdt_entry_t264(struct scsi_qla_host *vha,
+ struct qla27xx_fwdt_entry *ent, void *buf, ulong *len)
+{
+ ql_dbg(ql_dbg_misc, vha, 0xd208,
+ "%s: getfce [%lx]\n", __func__, *len);
+ if (vha->hw->fce) {
+ if (buf) {
+ ent->t264.fce_trace_size = FCE_SIZE;
+ ent->t264.write_pointer = vha->hw->fce_wr;
+ ent->t264.base_pointer = vha->hw->fce_dma;
+ ent->t264.fce_enable_mb0 = vha->hw->fce_mb[0];
+ ent->t264.fce_enable_mb2 = vha->hw->fce_mb[2];
+ ent->t264.fce_enable_mb3 = vha->hw->fce_mb[3];
+ ent->t264.fce_enable_mb4 = vha->hw->fce_mb[4];
+ ent->t264.fce_enable_mb5 = vha->hw->fce_mb[5];
+ ent->t264.fce_enable_mb6 = vha->hw->fce_mb[6];
+ }
+ qla27xx_insertbuf(vha->hw->fce, FCE_SIZE, buf, len);
+ } else {
+ ql_dbg(ql_dbg_misc, vha, 0xd027,
+ "%s: missing fce\n", __func__);
+ qla27xx_skip_entry(ent, buf);
+ }
+
+ return false;
+}
+
+static int
+qla27xx_fwdt_entry_t265(struct scsi_qla_host *vha,
+ struct qla27xx_fwdt_entry *ent, void *buf, ulong *len)
+{
+ struct device_reg_24xx __iomem *reg = qla27xx_isp_reg(vha);
+
+ ql_dbg(ql_dbg_misc, vha, 0xd209,
+ "%s: pause risc [%lx]\n", __func__, *len);
+ if (buf)
+ qla24xx_pause_risc(reg, vha->hw);
+
+ return false;
+}
+
+static int
+qla27xx_fwdt_entry_t266(struct scsi_qla_host *vha,
+ struct qla27xx_fwdt_entry *ent, void *buf, ulong *len)
+{
+ ql_dbg(ql_dbg_misc, vha, 0xd20a,
+ "%s: reset risc [%lx]\n", __func__, *len);
+ if (buf)
+ qla24xx_soft_reset(vha->hw);
+
+ return false;
+}
+
+static int
+qla27xx_fwdt_entry_t267(struct scsi_qla_host *vha,
+ struct qla27xx_fwdt_entry *ent, void *buf, ulong *len)
+{
+ struct device_reg_24xx __iomem *reg = qla27xx_isp_reg(vha);
+
+ ql_dbg(ql_dbg_misc, vha, 0xd20b,
+ "%s: dis intr [%lx]\n", __func__, *len);
+ qla27xx_write_reg(reg, ent->t267.pci_offset, ent->t267.data, buf);
+
+ return false;
+}
+
+static int
+qla27xx_fwdt_entry_t268(struct scsi_qla_host *vha,
+ struct qla27xx_fwdt_entry *ent, void *buf, ulong *len)
+{
+ ql_dbg(ql_dbg_misc, vha, 0xd20c,
+ "%s: gethb(%x) [%lx]\n", __func__, ent->t268.buf_type, *len);
+ if (ent->t268.buf_type == T268_BUF_TYPE_EXTD_TRACE) {
+ if (vha->hw->eft) {
+ if (buf) {
+ ent->t268.buf_size = EFT_SIZE;
+ ent->t268.start_addr = vha->hw->eft_dma;
+ }
+ qla27xx_insertbuf(vha->hw->eft, EFT_SIZE, buf, len);
+ } else {
+ ql_dbg(ql_dbg_misc, vha, 0xd028,
+ "%s: missing eft\n", __func__);
+ qla27xx_skip_entry(ent, buf);
+ }
+ } else {
+ ql_dbg(ql_dbg_misc, vha, 0xd02b,
+ "%s: unknown buffer %x\n", __func__, ent->t268.buf_type);
+ qla27xx_skip_entry(ent, buf);
+ }
+
+ return false;
+}
+
+static int
+qla27xx_fwdt_entry_t269(struct scsi_qla_host *vha,
+ struct qla27xx_fwdt_entry *ent, void *buf, ulong *len)
+{
+ ql_dbg(ql_dbg_misc, vha, 0xd20d,
+ "%s: scratch [%lx]\n", __func__, *len);
+ qla27xx_insert32(0xaaaaaaaa, buf, len);
+ qla27xx_insert32(0xbbbbbbbb, buf, len);
+ qla27xx_insert32(0xcccccccc, buf, len);
+ qla27xx_insert32(0xdddddddd, buf, len);
+ qla27xx_insert32(*len + sizeof(uint32_t), buf, len);
+ if (buf)
+ ent->t269.scratch_size = 5 * sizeof(uint32_t);
+
+ return false;
+}
+
+static int
+qla27xx_fwdt_entry_t270(struct scsi_qla_host *vha,
+ struct qla27xx_fwdt_entry *ent, void *buf, ulong *len)
+{
+ struct device_reg_24xx __iomem *reg = qla27xx_isp_reg(vha);
+ ulong dwords = ent->t270.count;
+ ulong addr = ent->t270.addr;
+
+ ql_dbg(ql_dbg_misc, vha, 0xd20e,
+ "%s: rdremreg [%lx]\n", __func__, *len);
+ qla27xx_write_reg(reg, IOBASE_ADDR, 0x40, buf);
+ while (dwords--) {
+ qla27xx_write_reg(reg, 0xc0, addr|0x80000000, buf);
+ qla27xx_insert32(addr, buf, len);
+ qla27xx_read_reg(reg, 0xc4, buf, len);
+ addr += sizeof(uint32_t);
+ }
+
+ return false;
+}
+
+static int
+qla27xx_fwdt_entry_t271(struct scsi_qla_host *vha,
+ struct qla27xx_fwdt_entry *ent, void *buf, ulong *len)
+{
+ struct device_reg_24xx __iomem *reg = qla27xx_isp_reg(vha);
+ ulong addr = ent->t271.addr;
+ ulong data = ent->t271.data;
+
+ ql_dbg(ql_dbg_misc, vha, 0xd20f,
+ "%s: wrremreg [%lx]\n", __func__, *len);
+ qla27xx_write_reg(reg, IOBASE_ADDR, 0x40, buf);
+ qla27xx_write_reg(reg, 0xc4, data, buf);
+ qla27xx_write_reg(reg, 0xc0, addr, buf);
+
+ return false;
+}
+
+static int
+qla27xx_fwdt_entry_t272(struct scsi_qla_host *vha,
+ struct qla27xx_fwdt_entry *ent, void *buf, ulong *len)
+{
+ ulong dwords = ent->t272.count;
+ ulong start = ent->t272.addr;
+
+ ql_dbg(ql_dbg_misc, vha, 0xd210,
+ "%s: rdremram [%lx]\n", __func__, *len);
+ if (buf) {
+ ql_dbg(ql_dbg_misc, vha, 0xd02c,
+ "%s: @%lx -> (%lx dwords)\n", __func__, start, dwords);
+ buf += *len;
+ qla27xx_dump_mpi_ram(vha->hw, start, buf, dwords, &buf);
+ }
+ *len += dwords * sizeof(uint32_t);
+
+ return false;
+}
+
+static int
+qla27xx_fwdt_entry_t273(struct scsi_qla_host *vha,
+ struct qla27xx_fwdt_entry *ent, void *buf, ulong *len)
+{
+ ulong dwords = ent->t273.count;
+ ulong addr = ent->t273.addr;
+ uint32_t value;
+
+ ql_dbg(ql_dbg_misc, vha, 0xd211,
+ "%s: pcicfg [%lx]\n", __func__, *len);
+ while (dwords--) {
+ value = ~0;
+ if (pci_read_config_dword(vha->hw->pdev, addr, &value))
+ ql_dbg(ql_dbg_misc, vha, 0xd02d,
+ "%s: failed pcicfg read at %lx\n", __func__, addr);
+ qla27xx_insert32(addr, buf, len);
+ qla27xx_insert32(value, buf, len);
+ addr += sizeof(uint32_t);
+ }
+
+ return false;
+}
+
+static int
+qla27xx_fwdt_entry_t274(struct scsi_qla_host *vha,
+ struct qla27xx_fwdt_entry *ent, void *buf, ulong *len)
+{
+ uint count = 0;
+ uint i;
+
+ ql_dbg(ql_dbg_misc, vha, 0xd212,
+ "%s: getqsh(%x) [%lx]\n", __func__, ent->t274.queue_type, *len);
+ if (ent->t274.queue_type == T274_QUEUE_TYPE_REQ_SHAD) {
+ for (i = 0; i < vha->hw->max_req_queues; i++) {
+ struct req_que *req = vha->hw->req_q_map[i];
+ if (req || !buf) {
+ qla27xx_insert16(i, buf, len);
+ qla27xx_insert16(1, buf, len);
+ qla27xx_insert32(req && req->out_ptr ?
+ *req->out_ptr : 0, buf, len);
+ count++;
+ }
+ }
+ } else if (ent->t274.queue_type == T274_QUEUE_TYPE_RSP_SHAD) {
+ for (i = 0; i < vha->hw->max_rsp_queues; i++) {
+ struct rsp_que *rsp = vha->hw->rsp_q_map[i];
+ if (rsp || !buf) {
+ qla27xx_insert16(i, buf, len);
+ qla27xx_insert16(1, buf, len);
+ qla27xx_insert32(rsp && rsp->in_ptr ?
+ *rsp->in_ptr : 0, buf, len);
+ count++;
+ }
+ }
+ } else {
+ ql_dbg(ql_dbg_misc, vha, 0xd02f,
+ "%s: unknown queue %x\n", __func__, ent->t274.queue_type);
+ qla27xx_skip_entry(ent, buf);
+ }
+
+ if (buf)
+ ent->t274.num_queues = count;
+
+ if (!count)
+ qla27xx_skip_entry(ent, buf);
+
+ return false;
+}
+
+static int
+qla27xx_fwdt_entry_t275(struct scsi_qla_host *vha,
+ struct qla27xx_fwdt_entry *ent, void *buf, ulong *len)
+{
+ ulong offset = offsetof(typeof(*ent), t275.buffer);
+
+ ql_dbg(ql_dbg_misc, vha, 0xd213,
+ "%s: buffer(%x) [%lx]\n", __func__, ent->t275.length, *len);
+ if (!ent->t275.length) {
+ ql_dbg(ql_dbg_misc, vha, 0xd020,
+ "%s: buffer zero length\n", __func__);
+ qla27xx_skip_entry(ent, buf);
+ goto done;
+ }
+ if (offset + ent->t275.length > ent->hdr.entry_size) {
+ ql_dbg(ql_dbg_misc, vha, 0xd030,
+ "%s: buffer overflow\n", __func__);
+ qla27xx_skip_entry(ent, buf);
+ goto done;
+ }
+
+ qla27xx_insertbuf(ent->t275.buffer, ent->t275.length, buf, len);
+done:
+ return false;
+}
+
+static int
+qla27xx_fwdt_entry_other(struct scsi_qla_host *vha,
+ struct qla27xx_fwdt_entry *ent, void *buf, ulong *len)
+{
+ ql_dbg(ql_dbg_misc, vha, 0xd2ff,
+ "%s: type %x [%lx]\n", __func__, ent->hdr.entry_type, *len);
+ qla27xx_skip_entry(ent, buf);
+
+ return false;
+}
+
+struct qla27xx_fwdt_entry_call {
+ uint type;
+ int (*call)(
+ struct scsi_qla_host *,
+ struct qla27xx_fwdt_entry *,
+ void *,
+ ulong *);
+};
+
+static struct qla27xx_fwdt_entry_call ql27xx_fwdt_entry_call_list[] = {
+ { ENTRY_TYPE_NOP , qla27xx_fwdt_entry_t0 } ,
+ { ENTRY_TYPE_TMP_END , qla27xx_fwdt_entry_t255 } ,
+ { ENTRY_TYPE_RD_IOB_T1 , qla27xx_fwdt_entry_t256 } ,
+ { ENTRY_TYPE_WR_IOB_T1 , qla27xx_fwdt_entry_t257 } ,
+ { ENTRY_TYPE_RD_IOB_T2 , qla27xx_fwdt_entry_t258 } ,
+ { ENTRY_TYPE_WR_IOB_T2 , qla27xx_fwdt_entry_t259 } ,
+ { ENTRY_TYPE_RD_PCI , qla27xx_fwdt_entry_t260 } ,
+ { ENTRY_TYPE_WR_PCI , qla27xx_fwdt_entry_t261 } ,
+ { ENTRY_TYPE_RD_RAM , qla27xx_fwdt_entry_t262 } ,
+ { ENTRY_TYPE_GET_QUEUE , qla27xx_fwdt_entry_t263 } ,
+ { ENTRY_TYPE_GET_FCE , qla27xx_fwdt_entry_t264 } ,
+ { ENTRY_TYPE_PSE_RISC , qla27xx_fwdt_entry_t265 } ,
+ { ENTRY_TYPE_RST_RISC , qla27xx_fwdt_entry_t266 } ,
+ { ENTRY_TYPE_DIS_INTR , qla27xx_fwdt_entry_t267 } ,
+ { ENTRY_TYPE_GET_HBUF , qla27xx_fwdt_entry_t268 } ,
+ { ENTRY_TYPE_SCRATCH , qla27xx_fwdt_entry_t269 } ,
+ { ENTRY_TYPE_RDREMREG , qla27xx_fwdt_entry_t270 } ,
+ { ENTRY_TYPE_WRREMREG , qla27xx_fwdt_entry_t271 } ,
+ { ENTRY_TYPE_RDREMRAM , qla27xx_fwdt_entry_t272 } ,
+ { ENTRY_TYPE_PCICFG , qla27xx_fwdt_entry_t273 } ,
+ { ENTRY_TYPE_GET_SHADOW , qla27xx_fwdt_entry_t274 } ,
+ { ENTRY_TYPE_WRITE_BUF , qla27xx_fwdt_entry_t275 } ,
+ { -1 , qla27xx_fwdt_entry_other }
+};
+
+static inline int (*qla27xx_find_entry(uint type))
+ (struct scsi_qla_host *, struct qla27xx_fwdt_entry *, void *, ulong *)
+{
+ struct qla27xx_fwdt_entry_call *list = ql27xx_fwdt_entry_call_list;
+
+ while (list->type < type)
+ list++;
+
+ if (list->type == type)
+ return list->call;
+ return qla27xx_fwdt_entry_other;
+}
+
+static inline void *
+qla27xx_next_entry(void *p)
+{
+ struct qla27xx_fwdt_entry *ent = p;
+
+ return p + ent->hdr.entry_size;
+}
+
+static void
+qla27xx_walk_template(struct scsi_qla_host *vha,
+ struct qla27xx_fwdt_template *tmp, void *buf, ulong *len)
+{
+ struct qla27xx_fwdt_entry *ent = (void *)tmp + tmp->entry_offset;
+ ulong count = tmp->entry_count;
+
+ ql_dbg(ql_dbg_misc, vha, 0xd01a,
+ "%s: entry count %lx\n", __func__, count);
+ while (count--) {
+ if (qla27xx_find_entry(ent->hdr.entry_type)(vha, ent, buf, len))
+ break;
+ ent = qla27xx_next_entry(ent);
+ }
+
+ if (count)
+ ql_dbg(ql_dbg_misc, vha, 0xd018,
+ "%s: residual count (%lx)\n", __func__, count);
+
+ if (ent->hdr.entry_type != ENTRY_TYPE_TMP_END)
+ ql_dbg(ql_dbg_misc, vha, 0xd019,
+ "%s: missing end (%lx)\n", __func__, count);
+
+ ql_dbg(ql_dbg_misc, vha, 0xd01b,
+ "%s: len=%lx\n", __func__, *len);
+
+ if (buf) {
+ ql_log(ql_log_warn, vha, 0xd015,
+ "Firmware dump saved to temp buffer (%ld/%p)\n",
+ vha->host_no, vha->hw->fw_dump);
+ qla2x00_post_uevent_work(vha, QLA_UEVENT_CODE_FW_DUMP);
+ }
+}
+
+static void
+qla27xx_time_stamp(struct qla27xx_fwdt_template *tmp)
+{
+ tmp->capture_timestamp = jiffies;
+}
+
+static void
+qla27xx_driver_info(struct qla27xx_fwdt_template *tmp)
+{
+ uint8_t v[] = { 0, 0, 0, 0, 0, 0 };
+ int rval = 0;
+
+ rval = sscanf(qla2x00_version_str, "%hhu.%hhu.%hhu.%hhu.%hhu.%hhu",
+ v+0, v+1, v+2, v+3, v+4, v+5);
+
+ tmp->driver_info[0] = v[3] << 24 | v[2] << 16 | v[1] << 8 | v[0];
+ tmp->driver_info[1] = v[5] << 8 | v[4];
+ tmp->driver_info[2] = 0x12345678;
+}
+
+static void
+qla27xx_firmware_info(struct qla27xx_fwdt_template *tmp,
+ struct scsi_qla_host *vha)
+{
+ tmp->firmware_version[0] = vha->hw->fw_major_version;
+ tmp->firmware_version[1] = vha->hw->fw_minor_version;
+ tmp->firmware_version[2] = vha->hw->fw_subminor_version;
+ tmp->firmware_version[3] =
+ vha->hw->fw_attributes_h << 16 | vha->hw->fw_attributes;
+ tmp->firmware_version[4] =
+ vha->hw->fw_attributes_ext[1] << 16 | vha->hw->fw_attributes_ext[0];
+}
+
+static void
+ql27xx_edit_template(struct scsi_qla_host *vha,
+ struct qla27xx_fwdt_template *tmp)
+{
+ qla27xx_time_stamp(tmp);
+ qla27xx_driver_info(tmp);
+ qla27xx_firmware_info(tmp, vha);
+}
+
+static inline uint32_t
+qla27xx_template_checksum(void *p, ulong size)
+{
+ uint32_t *buf = p;
+ uint64_t sum = 0;
+
+ size /= sizeof(*buf);
+
+ while (size--)
+ sum += *buf++;
+
+ sum = (sum & 0xffffffff) + (sum >> 32);
+
+ return ~sum;
+}
+
+static inline int
+qla27xx_verify_template_checksum(struct qla27xx_fwdt_template *tmp)
+{
+ return qla27xx_template_checksum(tmp, tmp->template_size) == 0;
+}
+
+static inline int
+qla27xx_verify_template_header(struct qla27xx_fwdt_template *tmp)
+{
+ return tmp->template_type == TEMPLATE_TYPE_FWDUMP;
+}
+
+static void
+qla27xx_execute_fwdt_template(struct scsi_qla_host *vha)
+{
+ struct qla27xx_fwdt_template *tmp = vha->hw->fw_dump_template;
+ ulong len;
+
+ if (qla27xx_fwdt_template_valid(tmp)) {
+ len = tmp->template_size;
+ tmp = memcpy(vha->hw->fw_dump, tmp, len);
+ ql27xx_edit_template(vha, tmp);
+ qla27xx_walk_template(vha, tmp, tmp, &len);
+ vha->hw->fw_dump_len = len;
+ vha->hw->fw_dumped = 1;
+ }
+}
+
+ulong
+qla27xx_fwdt_calculate_dump_size(struct scsi_qla_host *vha)
+{
+ struct qla27xx_fwdt_template *tmp = vha->hw->fw_dump_template;
+ ulong len = 0;
+
+ if (qla27xx_fwdt_template_valid(tmp)) {
+ len = tmp->template_size;
+ qla27xx_walk_template(vha, tmp, NULL, &len);
+ }
+
+ return len;
+}
+
+ulong
+qla27xx_fwdt_template_size(void *p)
+{
+ struct qla27xx_fwdt_template *tmp = p;
+
+ return tmp->template_size;
+}
+
+ulong
+qla27xx_fwdt_template_default_size(void)
+{
+ return sizeof(ql27xx_fwdt_default_template);
+}
+
+const void *
+qla27xx_fwdt_template_default(void)
+{
+ return ql27xx_fwdt_default_template;
+}
+
+int
+qla27xx_fwdt_template_valid(void *p)
+{
+ struct qla27xx_fwdt_template *tmp = p;
+
+ if (!qla27xx_verify_template_header(tmp)) {
+ ql_log(ql_log_warn, NULL, 0xd01c,
+ "%s: template type %x\n", __func__, tmp->template_type);
+ return false;
+ }
+
+ if (!qla27xx_verify_template_checksum(tmp)) {
+ ql_log(ql_log_warn, NULL, 0xd01d,
+ "%s: failed template checksum\n", __func__);
+ return false;
+ }
+
+ return true;
+}
+
+void
+qla27xx_fwdump(scsi_qla_host_t *vha, int hardware_locked)
+{
+ ulong flags = 0;
+
+ if (!hardware_locked)
+ spin_lock_irqsave(&vha->hw->hardware_lock, flags);
+
+ if (!vha->hw->fw_dump)
+ ql_log(ql_log_warn, vha, 0xd01e, "fwdump buffer missing.\n");
+ else if (!vha->hw->fw_dump_template)
+ ql_log(ql_log_warn, vha, 0xd01f, "fwdump template missing.\n");
+ else if (vha->hw->fw_dumped)
+ ql_log(ql_log_warn, vha, 0xd300,
+ "Firmware has been previously dumped (%p),"
+ " -- ignoring request\n", vha->hw->fw_dump);
+ else
+ qla27xx_execute_fwdt_template(vha);
+
+ if (!hardware_locked)
+ spin_unlock_irqrestore(&vha->hw->hardware_lock, flags);
+}
diff --git a/drivers/scsi/qla2xxx/qla_tmpl.h b/drivers/scsi/qla2xxx/qla_tmpl.h
new file mode 100644
index 000000000..141c1c5e7
--- /dev/null
+++ b/drivers/scsi/qla2xxx/qla_tmpl.h
@@ -0,0 +1,224 @@
+/*
+ * QLogic Fibre Channel HBA Driver
+ * Copyright (c) 2003-2014 QLogic Corporation
+ *
+ * See LICENSE.qla2xxx for copyright and licensing details.
+ */
+
+#ifndef __QLA_DMP27_H__
+#define __QLA_DMP27_H__
+
+#define IOBASE_ADDR offsetof(struct device_reg_24xx, iobase_addr)
+
+struct __packed qla27xx_fwdt_template {
+ uint32_t template_type;
+ uint32_t entry_offset;
+ uint32_t template_size;
+ uint32_t reserved_1;
+
+ uint32_t entry_count;
+ uint32_t template_version;
+ uint32_t capture_timestamp;
+ uint32_t template_checksum;
+
+ uint32_t reserved_2;
+ uint32_t driver_info[3];
+
+ uint32_t saved_state[16];
+
+ uint32_t reserved_3[8];
+ uint32_t firmware_version[5];
+};
+
+#define TEMPLATE_TYPE_FWDUMP 99
+
+#define ENTRY_TYPE_NOP 0
+#define ENTRY_TYPE_TMP_END 255
+#define ENTRY_TYPE_RD_IOB_T1 256
+#define ENTRY_TYPE_WR_IOB_T1 257
+#define ENTRY_TYPE_RD_IOB_T2 258
+#define ENTRY_TYPE_WR_IOB_T2 259
+#define ENTRY_TYPE_RD_PCI 260
+#define ENTRY_TYPE_WR_PCI 261
+#define ENTRY_TYPE_RD_RAM 262
+#define ENTRY_TYPE_GET_QUEUE 263
+#define ENTRY_TYPE_GET_FCE 264
+#define ENTRY_TYPE_PSE_RISC 265
+#define ENTRY_TYPE_RST_RISC 266
+#define ENTRY_TYPE_DIS_INTR 267
+#define ENTRY_TYPE_GET_HBUF 268
+#define ENTRY_TYPE_SCRATCH 269
+#define ENTRY_TYPE_RDREMREG 270
+#define ENTRY_TYPE_WRREMREG 271
+#define ENTRY_TYPE_RDREMRAM 272
+#define ENTRY_TYPE_PCICFG 273
+#define ENTRY_TYPE_GET_SHADOW 274
+#define ENTRY_TYPE_WRITE_BUF 275
+
+#define CAPTURE_FLAG_PHYS_ONLY BIT_0
+#define CAPTURE_FLAG_PHYS_VIRT BIT_1
+
+#define DRIVER_FLAG_SKIP_ENTRY BIT_7
+
+struct __packed qla27xx_fwdt_entry {
+ struct __packed {
+ uint32_t entry_type;
+ uint32_t entry_size;
+ uint32_t reserved_1;
+
+ uint8_t capture_flags;
+ uint8_t reserved_2[2];
+ uint8_t driver_flags;
+ } hdr;
+ union __packed {
+ struct __packed {
+ } t0;
+
+ struct __packed {
+ } t255;
+
+ struct __packed {
+ uint32_t base_addr;
+ uint8_t reg_width;
+ uint16_t reg_count;
+ uint8_t pci_offset;
+ } t256;
+
+ struct __packed {
+ uint32_t base_addr;
+ uint32_t write_data;
+ uint8_t pci_offset;
+ uint8_t reserved[3];
+ } t257;
+
+ struct __packed {
+ uint32_t base_addr;
+ uint8_t reg_width;
+ uint16_t reg_count;
+ uint8_t pci_offset;
+ uint8_t banksel_offset;
+ uint8_t reserved[3];
+ uint32_t bank;
+ } t258;
+
+ struct __packed {
+ uint32_t base_addr;
+ uint32_t write_data;
+ uint8_t reserved[2];
+ uint8_t pci_offset;
+ uint8_t banksel_offset;
+ uint32_t bank;
+ } t259;
+
+ struct __packed {
+ uint8_t pci_offset;
+ uint8_t reserved[3];
+ } t260;
+
+ struct __packed {
+ uint8_t pci_offset;
+ uint8_t reserved[3];
+ uint32_t write_data;
+ } t261;
+
+ struct __packed {
+ uint8_t ram_area;
+ uint8_t reserved[3];
+ uint32_t start_addr;
+ uint32_t end_addr;
+ } t262;
+
+ struct __packed {
+ uint32_t num_queues;
+ uint8_t queue_type;
+ uint8_t reserved[3];
+ } t263;
+
+ struct __packed {
+ uint32_t fce_trace_size;
+ uint64_t write_pointer;
+ uint64_t base_pointer;
+ uint32_t fce_enable_mb0;
+ uint32_t fce_enable_mb2;
+ uint32_t fce_enable_mb3;
+ uint32_t fce_enable_mb4;
+ uint32_t fce_enable_mb5;
+ uint32_t fce_enable_mb6;
+ } t264;
+
+ struct __packed {
+ } t265;
+
+ struct __packed {
+ } t266;
+
+ struct __packed {
+ uint8_t pci_offset;
+ uint8_t reserved[3];
+ uint32_t data;
+ } t267;
+
+ struct __packed {
+ uint8_t buf_type;
+ uint8_t reserved[3];
+ uint32_t buf_size;
+ uint64_t start_addr;
+ } t268;
+
+ struct __packed {
+ uint32_t scratch_size;
+ } t269;
+
+ struct __packed {
+ uint32_t addr;
+ uint32_t count;
+ } t270;
+
+ struct __packed {
+ uint32_t addr;
+ uint32_t data;
+ } t271;
+
+ struct __packed {
+ uint32_t addr;
+ uint32_t count;
+ } t272;
+
+ struct __packed {
+ uint32_t addr;
+ uint32_t count;
+ } t273;
+
+ struct __packed {
+ uint32_t num_queues;
+ uint8_t queue_type;
+ uint8_t reserved[3];
+ } t274;
+
+ struct __packed {
+ uint32_t length;
+ uint8_t buffer[];
+ } t275;
+ };
+};
+
+#define T262_RAM_AREA_CRITICAL_RAM 1
+#define T262_RAM_AREA_EXTERNAL_RAM 2
+#define T262_RAM_AREA_SHARED_RAM 3
+#define T262_RAM_AREA_DDR_RAM 4
+
+#define T263_QUEUE_TYPE_REQ 1
+#define T263_QUEUE_TYPE_RSP 2
+#define T263_QUEUE_TYPE_ATIO 3
+
+#define T268_BUF_TYPE_EXTD_TRACE 1
+#define T268_BUF_TYPE_EXCH_BUFOFF 2
+#define T268_BUF_TYPE_EXTD_LOGIN 3
+#define T268_BUF_TYPE_REQ_MIRROR 4
+#define T268_BUF_TYPE_RSP_MIRROR 5
+
+#define T274_QUEUE_TYPE_REQ_SHAD 1
+#define T274_QUEUE_TYPE_RSP_SHAD 2
+#define T274_QUEUE_TYPE_ATIO_SHAD 3
+
+#endif
diff --git a/drivers/scsi/qla2xxx/qla_version.h b/drivers/scsi/qla2xxx/qla_version.h
new file mode 100644
index 000000000..2ed9ab90a
--- /dev/null
+++ b/drivers/scsi/qla2xxx/qla_version.h
@@ -0,0 +1,15 @@
+/*
+ * QLogic Fibre Channel HBA Driver
+ * Copyright (c) 2003-2014 QLogic Corporation
+ *
+ * See LICENSE.qla2xxx for copyright and licensing details.
+ */
+/*
+ * Driver version
+ */
+#define QLA2XXX_VERSION "8.07.00.18-k"
+
+#define QLA_DRIVER_MAJOR_VER 8
+#define QLA_DRIVER_MINOR_VER 7
+#define QLA_DRIVER_PATCH_VER 0
+#define QLA_DRIVER_BETA_VER 0
diff --git a/drivers/scsi/qla2xxx/tcm_qla2xxx.c b/drivers/scsi/qla2xxx/tcm_qla2xxx.c
new file mode 100644
index 000000000..5c9e680aa
--- /dev/null
+++ b/drivers/scsi/qla2xxx/tcm_qla2xxx.c
@@ -0,0 +1,2168 @@
+/*******************************************************************************
+ * This file contains tcm implementation using v4 configfs fabric infrastructure
+ * for QLogic target mode HBAs
+ *
+ * (c) Copyright 2010-2013 Datera, Inc.
+ *
+ * Author: Nicholas A. Bellinger <nab@daterainc.com>
+ *
+ * tcm_qla2xxx_parse_wwn() and tcm_qla2xxx_format_wwn() contains code from
+ * the TCM_FC / Open-FCoE.org fabric module.
+ *
+ * Copyright (c) 2010 Cisco Systems, Inc
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ ****************************************************************************/
+
+
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <generated/utsrelease.h>
+#include <linux/utsname.h>
+#include <linux/init.h>
+#include <linux/list.h>
+#include <linux/slab.h>
+#include <linux/kthread.h>
+#include <linux/types.h>
+#include <linux/string.h>
+#include <linux/configfs.h>
+#include <linux/ctype.h>
+#include <asm/unaligned.h>
+#include <scsi/scsi.h>
+#include <scsi/scsi_host.h>
+#include <scsi/scsi_device.h>
+#include <scsi/scsi_cmnd.h>
+#include <target/target_core_base.h>
+#include <target/target_core_fabric.h>
+#include <target/target_core_fabric_configfs.h>
+#include <target/target_core_configfs.h>
+#include <target/configfs_macros.h>
+
+#include "qla_def.h"
+#include "qla_target.h"
+#include "tcm_qla2xxx.h"
+
+static struct workqueue_struct *tcm_qla2xxx_free_wq;
+static struct workqueue_struct *tcm_qla2xxx_cmd_wq;
+
+static const struct target_core_fabric_ops tcm_qla2xxx_ops;
+static const struct target_core_fabric_ops tcm_qla2xxx_npiv_ops;
+
+/*
+ * Parse WWN.
+ * If strict, we require lower-case hex and colon separators to be sure
+ * the name is the same as what would be generated by ft_format_wwn()
+ * so the name and wwn are mapped one-to-one.
+ */
+static ssize_t tcm_qla2xxx_parse_wwn(const char *name, u64 *wwn, int strict)
+{
+ const char *cp;
+ char c;
+ u32 nibble;
+ u32 byte = 0;
+ u32 pos = 0;
+ u32 err;
+
+ *wwn = 0;
+ for (cp = name; cp < &name[TCM_QLA2XXX_NAMELEN - 1]; cp++) {
+ c = *cp;
+ if (c == '\n' && cp[1] == '\0')
+ continue;
+ if (strict && pos++ == 2 && byte++ < 7) {
+ pos = 0;
+ if (c == ':')
+ continue;
+ err = 1;
+ goto fail;
+ }
+ if (c == '\0') {
+ err = 2;
+ if (strict && byte != 8)
+ goto fail;
+ return cp - name;
+ }
+ err = 3;
+ if (isdigit(c))
+ nibble = c - '0';
+ else if (isxdigit(c) && (islower(c) || !strict))
+ nibble = tolower(c) - 'a' + 10;
+ else
+ goto fail;
+ *wwn = (*wwn << 4) | nibble;
+ }
+ err = 4;
+fail:
+ pr_debug("err %u len %zu pos %u byte %u\n",
+ err, cp - name, pos, byte);
+ return -1;
+}
+
+static ssize_t tcm_qla2xxx_format_wwn(char *buf, size_t len, u64 wwn)
+{
+ u8 b[8];
+
+ put_unaligned_be64(wwn, b);
+ return snprintf(buf, len,
+ "%2.2x:%2.2x:%2.2x:%2.2x:%2.2x:%2.2x:%2.2x:%2.2x",
+ b[0], b[1], b[2], b[3], b[4], b[5], b[6], b[7]);
+}
+
+static char *tcm_qla2xxx_get_fabric_name(void)
+{
+ return "qla2xxx";
+}
+
+/*
+ * From drivers/scsi/scsi_transport_fc.c:fc_parse_wwn
+ */
+static int tcm_qla2xxx_npiv_extract_wwn(const char *ns, u64 *nm)
+{
+ unsigned int i, j;
+ u8 wwn[8];
+
+ memset(wwn, 0, sizeof(wwn));
+
+ /* Validate and store the new name */
+ for (i = 0, j = 0; i < 16; i++) {
+ int value;
+
+ value = hex_to_bin(*ns++);
+ if (value >= 0)
+ j = (j << 4) | value;
+ else
+ return -EINVAL;
+
+ if (i % 2) {
+ wwn[i/2] = j & 0xff;
+ j = 0;
+ }
+ }
+
+ *nm = wwn_to_u64(wwn);
+ return 0;
+}
+
+/*
+ * This parsing logic follows drivers/scsi/scsi_transport_fc.c:
+ * store_fc_host_vport_create()
+ */
+static int tcm_qla2xxx_npiv_parse_wwn(
+ const char *name,
+ size_t count,
+ u64 *wwpn,
+ u64 *wwnn)
+{
+ unsigned int cnt = count;
+ int rc;
+
+ *wwpn = 0;
+ *wwnn = 0;
+
+ /* count may include a LF at end of string */
+ if (name[cnt-1] == '\n' || name[cnt-1] == 0)
+ cnt--;
+
+ /* validate we have enough characters for WWPN */
+ if ((cnt != (16+1+16)) || (name[16] != ':'))
+ return -EINVAL;
+
+ rc = tcm_qla2xxx_npiv_extract_wwn(&name[0], wwpn);
+ if (rc != 0)
+ return rc;
+
+ rc = tcm_qla2xxx_npiv_extract_wwn(&name[17], wwnn);
+ if (rc != 0)
+ return rc;
+
+ return 0;
+}
+
+static char *tcm_qla2xxx_npiv_get_fabric_name(void)
+{
+ return "qla2xxx_npiv";
+}
+
+static u8 tcm_qla2xxx_get_fabric_proto_ident(struct se_portal_group *se_tpg)
+{
+ struct tcm_qla2xxx_tpg *tpg = container_of(se_tpg,
+ struct tcm_qla2xxx_tpg, se_tpg);
+ struct tcm_qla2xxx_lport *lport = tpg->lport;
+ u8 proto_id;
+
+ switch (lport->lport_proto_id) {
+ case SCSI_PROTOCOL_FCP:
+ default:
+ proto_id = fc_get_fabric_proto_ident(se_tpg);
+ break;
+ }
+
+ return proto_id;
+}
+
+static char *tcm_qla2xxx_get_fabric_wwn(struct se_portal_group *se_tpg)
+{
+ struct tcm_qla2xxx_tpg *tpg = container_of(se_tpg,
+ struct tcm_qla2xxx_tpg, se_tpg);
+ struct tcm_qla2xxx_lport *lport = tpg->lport;
+
+ return lport->lport_naa_name;
+}
+
+static u16 tcm_qla2xxx_get_tag(struct se_portal_group *se_tpg)
+{
+ struct tcm_qla2xxx_tpg *tpg = container_of(se_tpg,
+ struct tcm_qla2xxx_tpg, se_tpg);
+ return tpg->lport_tpgt;
+}
+
+static u32 tcm_qla2xxx_get_default_depth(struct se_portal_group *se_tpg)
+{
+ return 1;
+}
+
+static u32 tcm_qla2xxx_get_pr_transport_id(
+ struct se_portal_group *se_tpg,
+ struct se_node_acl *se_nacl,
+ struct t10_pr_registration *pr_reg,
+ int *format_code,
+ unsigned char *buf)
+{
+ struct tcm_qla2xxx_tpg *tpg = container_of(se_tpg,
+ struct tcm_qla2xxx_tpg, se_tpg);
+ struct tcm_qla2xxx_lport *lport = tpg->lport;
+ int ret = 0;
+
+ switch (lport->lport_proto_id) {
+ case SCSI_PROTOCOL_FCP:
+ default:
+ ret = fc_get_pr_transport_id(se_tpg, se_nacl, pr_reg,
+ format_code, buf);
+ break;
+ }
+
+ return ret;
+}
+
+static u32 tcm_qla2xxx_get_pr_transport_id_len(
+ struct se_portal_group *se_tpg,
+ struct se_node_acl *se_nacl,
+ struct t10_pr_registration *pr_reg,
+ int *format_code)
+{
+ struct tcm_qla2xxx_tpg *tpg = container_of(se_tpg,
+ struct tcm_qla2xxx_tpg, se_tpg);
+ struct tcm_qla2xxx_lport *lport = tpg->lport;
+ int ret = 0;
+
+ switch (lport->lport_proto_id) {
+ case SCSI_PROTOCOL_FCP:
+ default:
+ ret = fc_get_pr_transport_id_len(se_tpg, se_nacl, pr_reg,
+ format_code);
+ break;
+ }
+
+ return ret;
+}
+
+static char *tcm_qla2xxx_parse_pr_out_transport_id(
+ struct se_portal_group *se_tpg,
+ const char *buf,
+ u32 *out_tid_len,
+ char **port_nexus_ptr)
+{
+ struct tcm_qla2xxx_tpg *tpg = container_of(se_tpg,
+ struct tcm_qla2xxx_tpg, se_tpg);
+ struct tcm_qla2xxx_lport *lport = tpg->lport;
+ char *tid = NULL;
+
+ switch (lport->lport_proto_id) {
+ case SCSI_PROTOCOL_FCP:
+ default:
+ tid = fc_parse_pr_out_transport_id(se_tpg, buf, out_tid_len,
+ port_nexus_ptr);
+ break;
+ }
+
+ return tid;
+}
+
+static int tcm_qla2xxx_check_demo_mode(struct se_portal_group *se_tpg)
+{
+ struct tcm_qla2xxx_tpg *tpg = container_of(se_tpg,
+ struct tcm_qla2xxx_tpg, se_tpg);
+
+ return tpg->tpg_attrib.generate_node_acls;
+}
+
+static int tcm_qla2xxx_check_demo_mode_cache(struct se_portal_group *se_tpg)
+{
+ struct tcm_qla2xxx_tpg *tpg = container_of(se_tpg,
+ struct tcm_qla2xxx_tpg, se_tpg);
+
+ return tpg->tpg_attrib.cache_dynamic_acls;
+}
+
+static int tcm_qla2xxx_check_demo_write_protect(struct se_portal_group *se_tpg)
+{
+ struct tcm_qla2xxx_tpg *tpg = container_of(se_tpg,
+ struct tcm_qla2xxx_tpg, se_tpg);
+
+ return tpg->tpg_attrib.demo_mode_write_protect;
+}
+
+static int tcm_qla2xxx_check_prod_write_protect(struct se_portal_group *se_tpg)
+{
+ struct tcm_qla2xxx_tpg *tpg = container_of(se_tpg,
+ struct tcm_qla2xxx_tpg, se_tpg);
+
+ return tpg->tpg_attrib.prod_mode_write_protect;
+}
+
+static int tcm_qla2xxx_check_demo_mode_login_only(struct se_portal_group *se_tpg)
+{
+ struct tcm_qla2xxx_tpg *tpg = container_of(se_tpg,
+ struct tcm_qla2xxx_tpg, se_tpg);
+
+ return tpg->tpg_attrib.demo_mode_login_only;
+}
+
+static int tcm_qla2xxx_check_prot_fabric_only(struct se_portal_group *se_tpg)
+{
+ struct tcm_qla2xxx_tpg *tpg = container_of(se_tpg,
+ struct tcm_qla2xxx_tpg, se_tpg);
+
+ return tpg->tpg_attrib.fabric_prot_type;
+}
+
+static struct se_node_acl *tcm_qla2xxx_alloc_fabric_acl(
+ struct se_portal_group *se_tpg)
+{
+ struct tcm_qla2xxx_nacl *nacl;
+
+ nacl = kzalloc(sizeof(struct tcm_qla2xxx_nacl), GFP_KERNEL);
+ if (!nacl) {
+ pr_err("Unable to allocate struct tcm_qla2xxx_nacl\n");
+ return NULL;
+ }
+
+ return &nacl->se_node_acl;
+}
+
+static void tcm_qla2xxx_release_fabric_acl(
+ struct se_portal_group *se_tpg,
+ struct se_node_acl *se_nacl)
+{
+ struct tcm_qla2xxx_nacl *nacl = container_of(se_nacl,
+ struct tcm_qla2xxx_nacl, se_node_acl);
+ kfree(nacl);
+}
+
+static u32 tcm_qla2xxx_tpg_get_inst_index(struct se_portal_group *se_tpg)
+{
+ struct tcm_qla2xxx_tpg *tpg = container_of(se_tpg,
+ struct tcm_qla2xxx_tpg, se_tpg);
+
+ return tpg->lport_tpgt;
+}
+
+static void tcm_qla2xxx_complete_mcmd(struct work_struct *work)
+{
+ struct qla_tgt_mgmt_cmd *mcmd = container_of(work,
+ struct qla_tgt_mgmt_cmd, free_work);
+
+ transport_generic_free_cmd(&mcmd->se_cmd, 0);
+}
+
+/*
+ * Called from qla_target_template->free_mcmd(), and will call
+ * tcm_qla2xxx_release_cmd() via normal struct target_core_fabric_ops
+ * release callback. qla_hw_data->hardware_lock is expected to be held
+ */
+static void tcm_qla2xxx_free_mcmd(struct qla_tgt_mgmt_cmd *mcmd)
+{
+ INIT_WORK(&mcmd->free_work, tcm_qla2xxx_complete_mcmd);
+ queue_work(tcm_qla2xxx_free_wq, &mcmd->free_work);
+}
+
+static void tcm_qla2xxx_complete_free(struct work_struct *work)
+{
+ struct qla_tgt_cmd *cmd = container_of(work, struct qla_tgt_cmd, work);
+
+ cmd->cmd_in_wq = 0;
+
+ WARN_ON(cmd->cmd_flags & BIT_16);
+
+ cmd->cmd_flags |= BIT_16;
+ transport_generic_free_cmd(&cmd->se_cmd, 0);
+}
+
+/*
+ * Called from qla_target_template->free_cmd(), and will call
+ * tcm_qla2xxx_release_cmd via normal struct target_core_fabric_ops
+ * release callback. qla_hw_data->hardware_lock is expected to be held
+ */
+static void tcm_qla2xxx_free_cmd(struct qla_tgt_cmd *cmd)
+{
+ cmd->cmd_in_wq = 1;
+ INIT_WORK(&cmd->work, tcm_qla2xxx_complete_free);
+ queue_work(tcm_qla2xxx_free_wq, &cmd->work);
+}
+
+/*
+ * Called from struct target_core_fabric_ops->check_stop_free() context
+ */
+static int tcm_qla2xxx_check_stop_free(struct se_cmd *se_cmd)
+{
+ struct qla_tgt_cmd *cmd;
+
+ if ((se_cmd->se_cmd_flags & SCF_SCSI_TMR_CDB) == 0) {
+ cmd = container_of(se_cmd, struct qla_tgt_cmd, se_cmd);
+ cmd->cmd_flags |= BIT_14;
+ }
+
+ return target_put_sess_cmd(se_cmd->se_sess, se_cmd);
+}
+
+/* tcm_qla2xxx_release_cmd - Callback from TCM Core to release underlying
+ * fabric descriptor @se_cmd command to release
+ */
+static void tcm_qla2xxx_release_cmd(struct se_cmd *se_cmd)
+{
+ struct qla_tgt_cmd *cmd;
+
+ if (se_cmd->se_cmd_flags & SCF_SCSI_TMR_CDB) {
+ struct qla_tgt_mgmt_cmd *mcmd = container_of(se_cmd,
+ struct qla_tgt_mgmt_cmd, se_cmd);
+ qlt_free_mcmd(mcmd);
+ return;
+ }
+
+ cmd = container_of(se_cmd, struct qla_tgt_cmd, se_cmd);
+ qlt_free_cmd(cmd);
+}
+
+static int tcm_qla2xxx_shutdown_session(struct se_session *se_sess)
+{
+ struct qla_tgt_sess *sess = se_sess->fabric_sess_ptr;
+ struct scsi_qla_host *vha;
+ unsigned long flags;
+
+ BUG_ON(!sess);
+ vha = sess->vha;
+
+ spin_lock_irqsave(&vha->hw->hardware_lock, flags);
+ target_sess_cmd_list_set_waiting(se_sess);
+ spin_unlock_irqrestore(&vha->hw->hardware_lock, flags);
+
+ return 1;
+}
+
+static void tcm_qla2xxx_close_session(struct se_session *se_sess)
+{
+ struct qla_tgt_sess *sess = se_sess->fabric_sess_ptr;
+ struct scsi_qla_host *vha;
+ unsigned long flags;
+
+ BUG_ON(!sess);
+ vha = sess->vha;
+
+ spin_lock_irqsave(&vha->hw->hardware_lock, flags);
+ qlt_unreg_sess(sess);
+ spin_unlock_irqrestore(&vha->hw->hardware_lock, flags);
+}
+
+static u32 tcm_qla2xxx_sess_get_index(struct se_session *se_sess)
+{
+ return 0;
+}
+
+static int tcm_qla2xxx_write_pending(struct se_cmd *se_cmd)
+{
+ struct qla_tgt_cmd *cmd = container_of(se_cmd,
+ struct qla_tgt_cmd, se_cmd);
+
+ cmd->bufflen = se_cmd->data_length;
+ cmd->dma_data_direction = target_reverse_dma_direction(se_cmd);
+
+ cmd->sg_cnt = se_cmd->t_data_nents;
+ cmd->sg = se_cmd->t_data_sg;
+
+ cmd->prot_sg_cnt = se_cmd->t_prot_nents;
+ cmd->prot_sg = se_cmd->t_prot_sg;
+ cmd->blk_sz = se_cmd->se_dev->dev_attrib.block_size;
+ se_cmd->pi_err = 0;
+
+ /*
+ * qla_target.c:qlt_rdy_to_xfer() will call pci_map_sg() to setup
+ * the SGL mappings into PCIe memory for incoming FCP WRITE data.
+ */
+ return qlt_rdy_to_xfer(cmd);
+}
+
+static int tcm_qla2xxx_write_pending_status(struct se_cmd *se_cmd)
+{
+ unsigned long flags;
+ /*
+ * Check for WRITE_PENDING status to determine if we need to wait for
+ * CTIO aborts to be posted via hardware in tcm_qla2xxx_handle_data().
+ */
+ spin_lock_irqsave(&se_cmd->t_state_lock, flags);
+ if (se_cmd->t_state == TRANSPORT_WRITE_PENDING ||
+ se_cmd->t_state == TRANSPORT_COMPLETE_QF_WP) {
+ spin_unlock_irqrestore(&se_cmd->t_state_lock, flags);
+ wait_for_completion_timeout(&se_cmd->t_transport_stop_comp,
+ 3000);
+ return 0;
+ }
+ spin_unlock_irqrestore(&se_cmd->t_state_lock, flags);
+
+ return 0;
+}
+
+static void tcm_qla2xxx_set_default_node_attrs(struct se_node_acl *nacl)
+{
+ return;
+}
+
+static u32 tcm_qla2xxx_get_task_tag(struct se_cmd *se_cmd)
+{
+ struct qla_tgt_cmd *cmd;
+
+ /* check for task mgmt cmd */
+ if (se_cmd->se_cmd_flags & SCF_SCSI_TMR_CDB)
+ return 0xffffffff;
+
+ cmd = container_of(se_cmd, struct qla_tgt_cmd, se_cmd);
+
+ return cmd->tag;
+}
+
+static int tcm_qla2xxx_get_cmd_state(struct se_cmd *se_cmd)
+{
+ return 0;
+}
+
+/*
+ * Called from process context in qla_target.c:qlt_do_work() code
+ */
+static int tcm_qla2xxx_handle_cmd(scsi_qla_host_t *vha, struct qla_tgt_cmd *cmd,
+ unsigned char *cdb, uint32_t data_length, int fcp_task_attr,
+ int data_dir, int bidi)
+{
+ struct se_cmd *se_cmd = &cmd->se_cmd;
+ struct se_session *se_sess;
+ struct qla_tgt_sess *sess;
+ int flags = TARGET_SCF_ACK_KREF;
+
+ if (bidi)
+ flags |= TARGET_SCF_BIDI_OP;
+
+ sess = cmd->sess;
+ if (!sess) {
+ pr_err("Unable to locate struct qla_tgt_sess from qla_tgt_cmd\n");
+ return -EINVAL;
+ }
+
+ se_sess = sess->se_sess;
+ if (!se_sess) {
+ pr_err("Unable to locate active struct se_session\n");
+ return -EINVAL;
+ }
+
+ return target_submit_cmd(se_cmd, se_sess, cdb, &cmd->sense_buffer[0],
+ cmd->unpacked_lun, data_length, fcp_task_attr,
+ data_dir, flags);
+}
+
+static void tcm_qla2xxx_handle_data_work(struct work_struct *work)
+{
+ struct qla_tgt_cmd *cmd = container_of(work, struct qla_tgt_cmd, work);
+
+ /*
+ * Ensure that the complete FCP WRITE payload has been received.
+ * Otherwise return an exception via CHECK_CONDITION status.
+ */
+ cmd->cmd_in_wq = 0;
+ cmd->cmd_flags |= BIT_11;
+ if (!cmd->write_data_transferred) {
+ /*
+ * Check if se_cmd has already been aborted via LUN_RESET, and
+ * waiting upon completion in tcm_qla2xxx_write_pending_status()
+ */
+ if (cmd->se_cmd.transport_state & CMD_T_ABORTED) {
+ complete(&cmd->se_cmd.t_transport_stop_comp);
+ return;
+ }
+
+ if (cmd->se_cmd.pi_err)
+ transport_generic_request_failure(&cmd->se_cmd,
+ cmd->se_cmd.pi_err);
+ else
+ transport_generic_request_failure(&cmd->se_cmd,
+ TCM_CHECK_CONDITION_ABORT_CMD);
+
+ return;
+ }
+
+ return target_execute_cmd(&cmd->se_cmd);
+}
+
+/*
+ * Called from qla_target.c:qlt_do_ctio_completion()
+ */
+static void tcm_qla2xxx_handle_data(struct qla_tgt_cmd *cmd)
+{
+ cmd->cmd_flags |= BIT_10;
+ cmd->cmd_in_wq = 1;
+ INIT_WORK(&cmd->work, tcm_qla2xxx_handle_data_work);
+ queue_work(tcm_qla2xxx_free_wq, &cmd->work);
+}
+
+static void tcm_qla2xxx_handle_dif_work(struct work_struct *work)
+{
+ struct qla_tgt_cmd *cmd = container_of(work, struct qla_tgt_cmd, work);
+
+ /* take an extra kref to prevent cmd free too early.
+ * need to wait for SCSI status/check condition to
+ * finish responding generate by transport_generic_request_failure.
+ */
+ kref_get(&cmd->se_cmd.cmd_kref);
+ transport_generic_request_failure(&cmd->se_cmd, cmd->se_cmd.pi_err);
+}
+
+/*
+ * Called from qla_target.c:qlt_do_ctio_completion()
+ */
+static void tcm_qla2xxx_handle_dif_err(struct qla_tgt_cmd *cmd)
+{
+ INIT_WORK(&cmd->work, tcm_qla2xxx_handle_dif_work);
+ queue_work(tcm_qla2xxx_free_wq, &cmd->work);
+}
+
+/*
+ * Called from qla_target.c:qlt_issue_task_mgmt()
+ */
+static int tcm_qla2xxx_handle_tmr(struct qla_tgt_mgmt_cmd *mcmd, uint32_t lun,
+ uint8_t tmr_func, uint32_t tag)
+{
+ struct qla_tgt_sess *sess = mcmd->sess;
+ struct se_cmd *se_cmd = &mcmd->se_cmd;
+
+ return target_submit_tmr(se_cmd, sess->se_sess, NULL, lun, mcmd,
+ tmr_func, GFP_ATOMIC, tag, TARGET_SCF_ACK_KREF);
+}
+
+static int tcm_qla2xxx_queue_data_in(struct se_cmd *se_cmd)
+{
+ struct qla_tgt_cmd *cmd = container_of(se_cmd,
+ struct qla_tgt_cmd, se_cmd);
+
+ cmd->cmd_flags |= BIT_4;
+ cmd->bufflen = se_cmd->data_length;
+ cmd->dma_data_direction = target_reverse_dma_direction(se_cmd);
+ cmd->aborted = (se_cmd->transport_state & CMD_T_ABORTED);
+
+ cmd->sg_cnt = se_cmd->t_data_nents;
+ cmd->sg = se_cmd->t_data_sg;
+ cmd->offset = 0;
+ cmd->cmd_flags |= BIT_3;
+
+ cmd->prot_sg_cnt = se_cmd->t_prot_nents;
+ cmd->prot_sg = se_cmd->t_prot_sg;
+ cmd->blk_sz = se_cmd->se_dev->dev_attrib.block_size;
+ se_cmd->pi_err = 0;
+
+ /*
+ * Now queue completed DATA_IN the qla2xxx LLD and response ring
+ */
+ return qlt_xmit_response(cmd, QLA_TGT_XMIT_DATA|QLA_TGT_XMIT_STATUS,
+ se_cmd->scsi_status);
+}
+
+static int tcm_qla2xxx_queue_status(struct se_cmd *se_cmd)
+{
+ struct qla_tgt_cmd *cmd = container_of(se_cmd,
+ struct qla_tgt_cmd, se_cmd);
+ int xmit_type = QLA_TGT_XMIT_STATUS;
+
+ cmd->bufflen = se_cmd->data_length;
+ cmd->sg = NULL;
+ cmd->sg_cnt = 0;
+ cmd->offset = 0;
+ cmd->dma_data_direction = target_reverse_dma_direction(se_cmd);
+ cmd->aborted = (se_cmd->transport_state & CMD_T_ABORTED);
+ if (cmd->cmd_flags & BIT_5) {
+ pr_crit("Bit_5 already set for cmd = %p.\n", cmd);
+ dump_stack();
+ }
+ cmd->cmd_flags |= BIT_5;
+
+ if (se_cmd->data_direction == DMA_FROM_DEVICE) {
+ /*
+ * For FCP_READ with CHECK_CONDITION status, clear cmd->bufflen
+ * for qla_tgt_xmit_response LLD code
+ */
+ if (se_cmd->se_cmd_flags & SCF_OVERFLOW_BIT) {
+ se_cmd->se_cmd_flags &= ~SCF_OVERFLOW_BIT;
+ se_cmd->residual_count = 0;
+ }
+ se_cmd->se_cmd_flags |= SCF_UNDERFLOW_BIT;
+ se_cmd->residual_count += se_cmd->data_length;
+
+ cmd->bufflen = 0;
+ }
+ /*
+ * Now queue status response to qla2xxx LLD code and response ring
+ */
+ return qlt_xmit_response(cmd, xmit_type, se_cmd->scsi_status);
+}
+
+static void tcm_qla2xxx_queue_tm_rsp(struct se_cmd *se_cmd)
+{
+ struct se_tmr_req *se_tmr = se_cmd->se_tmr_req;
+ struct qla_tgt_mgmt_cmd *mcmd = container_of(se_cmd,
+ struct qla_tgt_mgmt_cmd, se_cmd);
+
+ pr_debug("queue_tm_rsp: mcmd: %p func: 0x%02x response: 0x%02x\n",
+ mcmd, se_tmr->function, se_tmr->response);
+ /*
+ * Do translation between TCM TM response codes and
+ * QLA2xxx FC TM response codes.
+ */
+ switch (se_tmr->response) {
+ case TMR_FUNCTION_COMPLETE:
+ mcmd->fc_tm_rsp = FC_TM_SUCCESS;
+ break;
+ case TMR_TASK_DOES_NOT_EXIST:
+ mcmd->fc_tm_rsp = FC_TM_BAD_CMD;
+ break;
+ case TMR_FUNCTION_REJECTED:
+ mcmd->fc_tm_rsp = FC_TM_REJECT;
+ break;
+ case TMR_LUN_DOES_NOT_EXIST:
+ default:
+ mcmd->fc_tm_rsp = FC_TM_FAILED;
+ break;
+ }
+ /*
+ * Queue the TM response to QLA2xxx LLD to build a
+ * CTIO response packet.
+ */
+ qlt_xmit_tm_rsp(mcmd);
+}
+
+static void tcm_qla2xxx_aborted_task(struct se_cmd *se_cmd)
+{
+ struct qla_tgt_cmd *cmd = container_of(se_cmd,
+ struct qla_tgt_cmd, se_cmd);
+ struct scsi_qla_host *vha = cmd->vha;
+ struct qla_hw_data *ha = vha->hw;
+
+ if (!cmd->sg_mapped)
+ return;
+
+ pci_unmap_sg(ha->pdev, cmd->sg, cmd->sg_cnt, cmd->dma_data_direction);
+ cmd->sg_mapped = 0;
+}
+
+static void tcm_qla2xxx_clear_sess_lookup(struct tcm_qla2xxx_lport *,
+ struct tcm_qla2xxx_nacl *, struct qla_tgt_sess *);
+/*
+ * Expected to be called with struct qla_hw_data->hardware_lock held
+ */
+static void tcm_qla2xxx_clear_nacl_from_fcport_map(struct qla_tgt_sess *sess)
+{
+ struct se_node_acl *se_nacl = sess->se_sess->se_node_acl;
+ struct se_portal_group *se_tpg = se_nacl->se_tpg;
+ struct se_wwn *se_wwn = se_tpg->se_tpg_wwn;
+ struct tcm_qla2xxx_lport *lport = container_of(se_wwn,
+ struct tcm_qla2xxx_lport, lport_wwn);
+ struct tcm_qla2xxx_nacl *nacl = container_of(se_nacl,
+ struct tcm_qla2xxx_nacl, se_node_acl);
+ void *node;
+
+ pr_debug("fc_rport domain: port_id 0x%06x\n", nacl->nport_id);
+
+ node = btree_remove32(&lport->lport_fcport_map, nacl->nport_id);
+ if (WARN_ON(node && (node != se_nacl))) {
+ /*
+ * The nacl no longer matches what we think it should be.
+ * Most likely a new dynamic acl has been added while
+ * someone dropped the hardware lock. It clearly is a
+ * bug elsewhere, but this bit can't make things worse.
+ */
+ btree_insert32(&lport->lport_fcport_map, nacl->nport_id,
+ node, GFP_ATOMIC);
+ }
+
+ pr_debug("Removed from fcport_map: %p for WWNN: 0x%016LX, port_id: 0x%06x\n",
+ se_nacl, nacl->nport_wwnn, nacl->nport_id);
+ /*
+ * Now clear the se_nacl and session pointers from our HW lport lookup
+ * table mapping for this initiator's fabric S_ID and LOOP_ID entries.
+ *
+ * This is done ahead of callbacks into tcm_qla2xxx_free_session() ->
+ * target_wait_for_sess_cmds() before the session waits for outstanding
+ * I/O to complete, to avoid a race between session shutdown execution
+ * and incoming ATIOs or TMRs picking up a stale se_node_act reference.
+ */
+ tcm_qla2xxx_clear_sess_lookup(lport, nacl, sess);
+}
+
+static void tcm_qla2xxx_release_session(struct kref *kref)
+{
+ struct se_session *se_sess = container_of(kref,
+ struct se_session, sess_kref);
+
+ qlt_unreg_sess(se_sess->fabric_sess_ptr);
+}
+
+static void tcm_qla2xxx_put_session(struct se_session *se_sess)
+{
+ struct qla_tgt_sess *sess = se_sess->fabric_sess_ptr;
+ struct qla_hw_data *ha = sess->vha->hw;
+ unsigned long flags;
+
+ spin_lock_irqsave(&ha->hardware_lock, flags);
+ kref_put(&se_sess->sess_kref, tcm_qla2xxx_release_session);
+ spin_unlock_irqrestore(&ha->hardware_lock, flags);
+}
+
+static void tcm_qla2xxx_put_sess(struct qla_tgt_sess *sess)
+{
+ if (!sess)
+ return;
+
+ assert_spin_locked(&sess->vha->hw->hardware_lock);
+ kref_put(&sess->se_sess->sess_kref, tcm_qla2xxx_release_session);
+}
+
+static void tcm_qla2xxx_shutdown_sess(struct qla_tgt_sess *sess)
+{
+ assert_spin_locked(&sess->vha->hw->hardware_lock);
+ target_sess_cmd_list_set_waiting(sess->se_sess);
+}
+
+static struct se_node_acl *tcm_qla2xxx_make_nodeacl(
+ struct se_portal_group *se_tpg,
+ struct config_group *group,
+ const char *name)
+{
+ struct se_node_acl *se_nacl, *se_nacl_new;
+ struct tcm_qla2xxx_nacl *nacl;
+ u64 wwnn;
+ u32 qla2xxx_nexus_depth;
+
+ if (tcm_qla2xxx_parse_wwn(name, &wwnn, 1) < 0)
+ return ERR_PTR(-EINVAL);
+
+ se_nacl_new = tcm_qla2xxx_alloc_fabric_acl(se_tpg);
+ if (!se_nacl_new)
+ return ERR_PTR(-ENOMEM);
+/* #warning FIXME: Hardcoded qla2xxx_nexus depth in tcm_qla2xxx_make_nodeacl */
+ qla2xxx_nexus_depth = 1;
+
+ /*
+ * se_nacl_new may be released by core_tpg_add_initiator_node_acl()
+ * when converting a NodeACL from demo mode -> explict
+ */
+ se_nacl = core_tpg_add_initiator_node_acl(se_tpg, se_nacl_new,
+ name, qla2xxx_nexus_depth);
+ if (IS_ERR(se_nacl)) {
+ tcm_qla2xxx_release_fabric_acl(se_tpg, se_nacl_new);
+ return se_nacl;
+ }
+ /*
+ * Locate our struct tcm_qla2xxx_nacl and set the FC Nport WWPN
+ */
+ nacl = container_of(se_nacl, struct tcm_qla2xxx_nacl, se_node_acl);
+ nacl->nport_wwnn = wwnn;
+ tcm_qla2xxx_format_wwn(&nacl->nport_name[0], TCM_QLA2XXX_NAMELEN, wwnn);
+
+ return se_nacl;
+}
+
+static void tcm_qla2xxx_drop_nodeacl(struct se_node_acl *se_acl)
+{
+ struct se_portal_group *se_tpg = se_acl->se_tpg;
+ struct tcm_qla2xxx_nacl *nacl = container_of(se_acl,
+ struct tcm_qla2xxx_nacl, se_node_acl);
+
+ core_tpg_del_initiator_node_acl(se_tpg, se_acl, 1);
+ kfree(nacl);
+}
+
+/* Start items for tcm_qla2xxx_tpg_attrib_cit */
+
+#define DEF_QLA_TPG_ATTRIB(name) \
+ \
+static ssize_t tcm_qla2xxx_tpg_attrib_show_##name( \
+ struct se_portal_group *se_tpg, \
+ char *page) \
+{ \
+ struct tcm_qla2xxx_tpg *tpg = container_of(se_tpg, \
+ struct tcm_qla2xxx_tpg, se_tpg); \
+ \
+ return sprintf(page, "%u\n", tpg->tpg_attrib.name); \
+} \
+ \
+static ssize_t tcm_qla2xxx_tpg_attrib_store_##name( \
+ struct se_portal_group *se_tpg, \
+ const char *page, \
+ size_t count) \
+{ \
+ struct tcm_qla2xxx_tpg *tpg = container_of(se_tpg, \
+ struct tcm_qla2xxx_tpg, se_tpg); \
+ unsigned long val; \
+ int ret; \
+ \
+ ret = kstrtoul(page, 0, &val); \
+ if (ret < 0) { \
+ pr_err("kstrtoul() failed with" \
+ " ret: %d\n", ret); \
+ return -EINVAL; \
+ } \
+ ret = tcm_qla2xxx_set_attrib_##name(tpg, val); \
+ \
+ return (!ret) ? count : -EINVAL; \
+}
+
+#define DEF_QLA_TPG_ATTR_BOOL(_name) \
+ \
+static int tcm_qla2xxx_set_attrib_##_name( \
+ struct tcm_qla2xxx_tpg *tpg, \
+ unsigned long val) \
+{ \
+ struct tcm_qla2xxx_tpg_attrib *a = &tpg->tpg_attrib; \
+ \
+ if ((val != 0) && (val != 1)) { \
+ pr_err("Illegal boolean value %lu\n", val); \
+ return -EINVAL; \
+ } \
+ \
+ a->_name = val; \
+ return 0; \
+}
+
+#define QLA_TPG_ATTR(_name, _mode) \
+ TF_TPG_ATTRIB_ATTR(tcm_qla2xxx, _name, _mode);
+
+/*
+ * Define tcm_qla2xxx_tpg_attrib_s_generate_node_acls
+ */
+DEF_QLA_TPG_ATTR_BOOL(generate_node_acls);
+DEF_QLA_TPG_ATTRIB(generate_node_acls);
+QLA_TPG_ATTR(generate_node_acls, S_IRUGO | S_IWUSR);
+
+/*
+ Define tcm_qla2xxx_attrib_s_cache_dynamic_acls
+ */
+DEF_QLA_TPG_ATTR_BOOL(cache_dynamic_acls);
+DEF_QLA_TPG_ATTRIB(cache_dynamic_acls);
+QLA_TPG_ATTR(cache_dynamic_acls, S_IRUGO | S_IWUSR);
+
+/*
+ * Define tcm_qla2xxx_tpg_attrib_s_demo_mode_write_protect
+ */
+DEF_QLA_TPG_ATTR_BOOL(demo_mode_write_protect);
+DEF_QLA_TPG_ATTRIB(demo_mode_write_protect);
+QLA_TPG_ATTR(demo_mode_write_protect, S_IRUGO | S_IWUSR);
+
+/*
+ * Define tcm_qla2xxx_tpg_attrib_s_prod_mode_write_protect
+ */
+DEF_QLA_TPG_ATTR_BOOL(prod_mode_write_protect);
+DEF_QLA_TPG_ATTRIB(prod_mode_write_protect);
+QLA_TPG_ATTR(prod_mode_write_protect, S_IRUGO | S_IWUSR);
+
+/*
+ * Define tcm_qla2xxx_tpg_attrib_s_demo_mode_login_only
+ */
+DEF_QLA_TPG_ATTR_BOOL(demo_mode_login_only);
+DEF_QLA_TPG_ATTRIB(demo_mode_login_only);
+QLA_TPG_ATTR(demo_mode_login_only, S_IRUGO | S_IWUSR);
+
+static struct configfs_attribute *tcm_qla2xxx_tpg_attrib_attrs[] = {
+ &tcm_qla2xxx_tpg_attrib_generate_node_acls.attr,
+ &tcm_qla2xxx_tpg_attrib_cache_dynamic_acls.attr,
+ &tcm_qla2xxx_tpg_attrib_demo_mode_write_protect.attr,
+ &tcm_qla2xxx_tpg_attrib_prod_mode_write_protect.attr,
+ &tcm_qla2xxx_tpg_attrib_demo_mode_login_only.attr,
+ NULL,
+};
+
+/* End items for tcm_qla2xxx_tpg_attrib_cit */
+
+static ssize_t tcm_qla2xxx_tpg_show_enable(
+ struct se_portal_group *se_tpg,
+ char *page)
+{
+ struct tcm_qla2xxx_tpg *tpg = container_of(se_tpg,
+ struct tcm_qla2xxx_tpg, se_tpg);
+
+ return snprintf(page, PAGE_SIZE, "%d\n",
+ atomic_read(&tpg->lport_tpg_enabled));
+}
+
+static void tcm_qla2xxx_depend_tpg(struct work_struct *work)
+{
+ struct tcm_qla2xxx_tpg *base_tpg = container_of(work,
+ struct tcm_qla2xxx_tpg, tpg_base_work);
+ struct se_portal_group *se_tpg = &base_tpg->se_tpg;
+ struct scsi_qla_host *base_vha = base_tpg->lport->qla_vha;
+
+ if (!target_depend_item(&se_tpg->tpg_group.cg_item)) {
+ atomic_set(&base_tpg->lport_tpg_enabled, 1);
+ qlt_enable_vha(base_vha);
+ }
+ complete(&base_tpg->tpg_base_comp);
+}
+
+static void tcm_qla2xxx_undepend_tpg(struct work_struct *work)
+{
+ struct tcm_qla2xxx_tpg *base_tpg = container_of(work,
+ struct tcm_qla2xxx_tpg, tpg_base_work);
+ struct se_portal_group *se_tpg = &base_tpg->se_tpg;
+ struct scsi_qla_host *base_vha = base_tpg->lport->qla_vha;
+
+ if (!qlt_stop_phase1(base_vha->vha_tgt.qla_tgt)) {
+ atomic_set(&base_tpg->lport_tpg_enabled, 0);
+ target_undepend_item(&se_tpg->tpg_group.cg_item);
+ }
+ complete(&base_tpg->tpg_base_comp);
+}
+
+static ssize_t tcm_qla2xxx_tpg_store_enable(
+ struct se_portal_group *se_tpg,
+ const char *page,
+ size_t count)
+{
+ struct tcm_qla2xxx_tpg *tpg = container_of(se_tpg,
+ struct tcm_qla2xxx_tpg, se_tpg);
+ unsigned long op;
+ int rc;
+
+ rc = kstrtoul(page, 0, &op);
+ if (rc < 0) {
+ pr_err("kstrtoul() returned %d\n", rc);
+ return -EINVAL;
+ }
+ if ((op != 1) && (op != 0)) {
+ pr_err("Illegal value for tpg_enable: %lu\n", op);
+ return -EINVAL;
+ }
+ if (op) {
+ if (atomic_read(&tpg->lport_tpg_enabled))
+ return -EEXIST;
+
+ INIT_WORK(&tpg->tpg_base_work, tcm_qla2xxx_depend_tpg);
+ } else {
+ if (!atomic_read(&tpg->lport_tpg_enabled))
+ return count;
+
+ INIT_WORK(&tpg->tpg_base_work, tcm_qla2xxx_undepend_tpg);
+ }
+ init_completion(&tpg->tpg_base_comp);
+ schedule_work(&tpg->tpg_base_work);
+ wait_for_completion(&tpg->tpg_base_comp);
+
+ if (op) {
+ if (!atomic_read(&tpg->lport_tpg_enabled))
+ return -ENODEV;
+ } else {
+ if (atomic_read(&tpg->lport_tpg_enabled))
+ return -EPERM;
+ }
+ return count;
+}
+
+TF_TPG_BASE_ATTR(tcm_qla2xxx, enable, S_IRUGO | S_IWUSR);
+
+static ssize_t tcm_qla2xxx_tpg_show_dynamic_sessions(
+ struct se_portal_group *se_tpg,
+ char *page)
+{
+ return target_show_dynamic_sessions(se_tpg, page);
+}
+
+TF_TPG_BASE_ATTR_RO(tcm_qla2xxx, dynamic_sessions);
+
+static ssize_t tcm_qla2xxx_tpg_store_fabric_prot_type(
+ struct se_portal_group *se_tpg,
+ const char *page,
+ size_t count)
+{
+ struct tcm_qla2xxx_tpg *tpg = container_of(se_tpg,
+ struct tcm_qla2xxx_tpg, se_tpg);
+ unsigned long val;
+ int ret = kstrtoul(page, 0, &val);
+
+ if (ret) {
+ pr_err("kstrtoul() returned %d for fabric_prot_type\n", ret);
+ return ret;
+ }
+ if (val != 0 && val != 1 && val != 3) {
+ pr_err("Invalid qla2xxx fabric_prot_type: %lu\n", val);
+ return -EINVAL;
+ }
+ tpg->tpg_attrib.fabric_prot_type = val;
+
+ return count;
+}
+
+static ssize_t tcm_qla2xxx_tpg_show_fabric_prot_type(
+ struct se_portal_group *se_tpg,
+ char *page)
+{
+ struct tcm_qla2xxx_tpg *tpg = container_of(se_tpg,
+ struct tcm_qla2xxx_tpg, se_tpg);
+
+ return sprintf(page, "%d\n", tpg->tpg_attrib.fabric_prot_type);
+}
+TF_TPG_BASE_ATTR(tcm_qla2xxx, fabric_prot_type, S_IRUGO | S_IWUSR);
+
+static struct configfs_attribute *tcm_qla2xxx_tpg_attrs[] = {
+ &tcm_qla2xxx_tpg_enable.attr,
+ &tcm_qla2xxx_tpg_dynamic_sessions.attr,
+ &tcm_qla2xxx_tpg_fabric_prot_type.attr,
+ NULL,
+};
+
+static struct se_portal_group *tcm_qla2xxx_make_tpg(
+ struct se_wwn *wwn,
+ struct config_group *group,
+ const char *name)
+{
+ struct tcm_qla2xxx_lport *lport = container_of(wwn,
+ struct tcm_qla2xxx_lport, lport_wwn);
+ struct tcm_qla2xxx_tpg *tpg;
+ unsigned long tpgt;
+ int ret;
+
+ if (strstr(name, "tpgt_") != name)
+ return ERR_PTR(-EINVAL);
+ if (kstrtoul(name + 5, 10, &tpgt) || tpgt > USHRT_MAX)
+ return ERR_PTR(-EINVAL);
+
+ if ((tpgt != 1)) {
+ pr_err("In non NPIV mode, a single TPG=1 is used for HW port mappings\n");
+ return ERR_PTR(-ENOSYS);
+ }
+
+ tpg = kzalloc(sizeof(struct tcm_qla2xxx_tpg), GFP_KERNEL);
+ if (!tpg) {
+ pr_err("Unable to allocate struct tcm_qla2xxx_tpg\n");
+ return ERR_PTR(-ENOMEM);
+ }
+ tpg->lport = lport;
+ tpg->lport_tpgt = tpgt;
+ /*
+ * By default allow READ-ONLY TPG demo-mode access w/ cached dynamic
+ * NodeACLs
+ */
+ tpg->tpg_attrib.generate_node_acls = 1;
+ tpg->tpg_attrib.demo_mode_write_protect = 1;
+ tpg->tpg_attrib.cache_dynamic_acls = 1;
+ tpg->tpg_attrib.demo_mode_login_only = 1;
+
+ ret = core_tpg_register(&tcm_qla2xxx_ops, wwn,
+ &tpg->se_tpg, tpg, TRANSPORT_TPG_TYPE_NORMAL);
+ if (ret < 0) {
+ kfree(tpg);
+ return NULL;
+ }
+
+ lport->tpg_1 = tpg;
+
+ return &tpg->se_tpg;
+}
+
+static void tcm_qla2xxx_drop_tpg(struct se_portal_group *se_tpg)
+{
+ struct tcm_qla2xxx_tpg *tpg = container_of(se_tpg,
+ struct tcm_qla2xxx_tpg, se_tpg);
+ struct tcm_qla2xxx_lport *lport = tpg->lport;
+ struct scsi_qla_host *vha = lport->qla_vha;
+ /*
+ * Call into qla2x_target.c LLD logic to shutdown the active
+ * FC Nexuses and disable target mode operation for this qla_hw_data
+ */
+ if (vha->vha_tgt.qla_tgt && !vha->vha_tgt.qla_tgt->tgt_stop)
+ qlt_stop_phase1(vha->vha_tgt.qla_tgt);
+
+ core_tpg_deregister(se_tpg);
+ /*
+ * Clear local TPG=1 pointer for non NPIV mode.
+ */
+ lport->tpg_1 = NULL;
+ kfree(tpg);
+}
+
+static ssize_t tcm_qla2xxx_npiv_tpg_show_enable(
+ struct se_portal_group *se_tpg,
+ char *page)
+{
+ return tcm_qla2xxx_tpg_show_enable(se_tpg, page);
+}
+
+static ssize_t tcm_qla2xxx_npiv_tpg_store_enable(
+ struct se_portal_group *se_tpg,
+ const char *page,
+ size_t count)
+{
+ struct se_wwn *se_wwn = se_tpg->se_tpg_wwn;
+ struct tcm_qla2xxx_lport *lport = container_of(se_wwn,
+ struct tcm_qla2xxx_lport, lport_wwn);
+ struct scsi_qla_host *vha = lport->qla_vha;
+ struct tcm_qla2xxx_tpg *tpg = container_of(se_tpg,
+ struct tcm_qla2xxx_tpg, se_tpg);
+ unsigned long op;
+ int rc;
+
+ rc = kstrtoul(page, 0, &op);
+ if (rc < 0) {
+ pr_err("kstrtoul() returned %d\n", rc);
+ return -EINVAL;
+ }
+ if ((op != 1) && (op != 0)) {
+ pr_err("Illegal value for tpg_enable: %lu\n", op);
+ return -EINVAL;
+ }
+ if (op) {
+ if (atomic_read(&tpg->lport_tpg_enabled))
+ return -EEXIST;
+
+ atomic_set(&tpg->lport_tpg_enabled, 1);
+ qlt_enable_vha(vha);
+ } else {
+ if (!atomic_read(&tpg->lport_tpg_enabled))
+ return count;
+
+ atomic_set(&tpg->lport_tpg_enabled, 0);
+ qlt_stop_phase1(vha->vha_tgt.qla_tgt);
+ }
+
+ return count;
+}
+
+TF_TPG_BASE_ATTR(tcm_qla2xxx_npiv, enable, S_IRUGO | S_IWUSR);
+
+static struct configfs_attribute *tcm_qla2xxx_npiv_tpg_attrs[] = {
+ &tcm_qla2xxx_npiv_tpg_enable.attr,
+ NULL,
+};
+
+static struct se_portal_group *tcm_qla2xxx_npiv_make_tpg(
+ struct se_wwn *wwn,
+ struct config_group *group,
+ const char *name)
+{
+ struct tcm_qla2xxx_lport *lport = container_of(wwn,
+ struct tcm_qla2xxx_lport, lport_wwn);
+ struct tcm_qla2xxx_tpg *tpg;
+ unsigned long tpgt;
+ int ret;
+
+ if (strstr(name, "tpgt_") != name)
+ return ERR_PTR(-EINVAL);
+ if (kstrtoul(name + 5, 10, &tpgt) || tpgt > USHRT_MAX)
+ return ERR_PTR(-EINVAL);
+
+ tpg = kzalloc(sizeof(struct tcm_qla2xxx_tpg), GFP_KERNEL);
+ if (!tpg) {
+ pr_err("Unable to allocate struct tcm_qla2xxx_tpg\n");
+ return ERR_PTR(-ENOMEM);
+ }
+ tpg->lport = lport;
+ tpg->lport_tpgt = tpgt;
+
+ /*
+ * By default allow READ-ONLY TPG demo-mode access w/ cached dynamic
+ * NodeACLs
+ */
+ tpg->tpg_attrib.generate_node_acls = 1;
+ tpg->tpg_attrib.demo_mode_write_protect = 1;
+ tpg->tpg_attrib.cache_dynamic_acls = 1;
+ tpg->tpg_attrib.demo_mode_login_only = 1;
+
+ ret = core_tpg_register(&tcm_qla2xxx_npiv_ops, wwn,
+ &tpg->se_tpg, tpg, TRANSPORT_TPG_TYPE_NORMAL);
+ if (ret < 0) {
+ kfree(tpg);
+ return NULL;
+ }
+ lport->tpg_1 = tpg;
+ return &tpg->se_tpg;
+}
+
+/*
+ * Expected to be called with struct qla_hw_data->hardware_lock held
+ */
+static struct qla_tgt_sess *tcm_qla2xxx_find_sess_by_s_id(
+ scsi_qla_host_t *vha,
+ const uint8_t *s_id)
+{
+ struct tcm_qla2xxx_lport *lport;
+ struct se_node_acl *se_nacl;
+ struct tcm_qla2xxx_nacl *nacl;
+ u32 key;
+
+ lport = vha->vha_tgt.target_lport_ptr;
+ if (!lport) {
+ pr_err("Unable to locate struct tcm_qla2xxx_lport\n");
+ dump_stack();
+ return NULL;
+ }
+
+ key = (((unsigned long)s_id[0] << 16) |
+ ((unsigned long)s_id[1] << 8) |
+ (unsigned long)s_id[2]);
+ pr_debug("find_sess_by_s_id: 0x%06x\n", key);
+
+ se_nacl = btree_lookup32(&lport->lport_fcport_map, key);
+ if (!se_nacl) {
+ pr_debug("Unable to locate s_id: 0x%06x\n", key);
+ return NULL;
+ }
+ pr_debug("find_sess_by_s_id: located se_nacl: %p, initiatorname: %s\n",
+ se_nacl, se_nacl->initiatorname);
+
+ nacl = container_of(se_nacl, struct tcm_qla2xxx_nacl, se_node_acl);
+ if (!nacl->qla_tgt_sess) {
+ pr_err("Unable to locate struct qla_tgt_sess\n");
+ return NULL;
+ }
+
+ return nacl->qla_tgt_sess;
+}
+
+/*
+ * Expected to be called with struct qla_hw_data->hardware_lock held
+ */
+static void tcm_qla2xxx_set_sess_by_s_id(
+ struct tcm_qla2xxx_lport *lport,
+ struct se_node_acl *new_se_nacl,
+ struct tcm_qla2xxx_nacl *nacl,
+ struct se_session *se_sess,
+ struct qla_tgt_sess *qla_tgt_sess,
+ uint8_t *s_id)
+{
+ u32 key;
+ void *slot;
+ int rc;
+
+ key = (((unsigned long)s_id[0] << 16) |
+ ((unsigned long)s_id[1] << 8) |
+ (unsigned long)s_id[2]);
+ pr_debug("set_sess_by_s_id: %06x\n", key);
+
+ slot = btree_lookup32(&lport->lport_fcport_map, key);
+ if (!slot) {
+ if (new_se_nacl) {
+ pr_debug("Setting up new fc_port entry to new_se_nacl\n");
+ nacl->nport_id = key;
+ rc = btree_insert32(&lport->lport_fcport_map, key,
+ new_se_nacl, GFP_ATOMIC);
+ if (rc)
+ printk(KERN_ERR "Unable to insert s_id into fcport_map: %06x\n",
+ (int)key);
+ } else {
+ pr_debug("Wiping nonexisting fc_port entry\n");
+ }
+
+ qla_tgt_sess->se_sess = se_sess;
+ nacl->qla_tgt_sess = qla_tgt_sess;
+ return;
+ }
+
+ if (nacl->qla_tgt_sess) {
+ if (new_se_nacl == NULL) {
+ pr_debug("Clearing existing nacl->qla_tgt_sess and fc_port entry\n");
+ btree_remove32(&lport->lport_fcport_map, key);
+ nacl->qla_tgt_sess = NULL;
+ return;
+ }
+ pr_debug("Replacing existing nacl->qla_tgt_sess and fc_port entry\n");
+ btree_update32(&lport->lport_fcport_map, key, new_se_nacl);
+ qla_tgt_sess->se_sess = se_sess;
+ nacl->qla_tgt_sess = qla_tgt_sess;
+ return;
+ }
+
+ if (new_se_nacl == NULL) {
+ pr_debug("Clearing existing fc_port entry\n");
+ btree_remove32(&lport->lport_fcport_map, key);
+ return;
+ }
+
+ pr_debug("Replacing existing fc_port entry w/o active nacl->qla_tgt_sess\n");
+ btree_update32(&lport->lport_fcport_map, key, new_se_nacl);
+ qla_tgt_sess->se_sess = se_sess;
+ nacl->qla_tgt_sess = qla_tgt_sess;
+
+ pr_debug("Setup nacl->qla_tgt_sess %p by s_id for se_nacl: %p, initiatorname: %s\n",
+ nacl->qla_tgt_sess, new_se_nacl, new_se_nacl->initiatorname);
+}
+
+/*
+ * Expected to be called with struct qla_hw_data->hardware_lock held
+ */
+static struct qla_tgt_sess *tcm_qla2xxx_find_sess_by_loop_id(
+ scsi_qla_host_t *vha,
+ const uint16_t loop_id)
+{
+ struct tcm_qla2xxx_lport *lport;
+ struct se_node_acl *se_nacl;
+ struct tcm_qla2xxx_nacl *nacl;
+ struct tcm_qla2xxx_fc_loopid *fc_loopid;
+
+ lport = vha->vha_tgt.target_lport_ptr;
+ if (!lport) {
+ pr_err("Unable to locate struct tcm_qla2xxx_lport\n");
+ dump_stack();
+ return NULL;
+ }
+
+ pr_debug("find_sess_by_loop_id: Using loop_id: 0x%04x\n", loop_id);
+
+ fc_loopid = lport->lport_loopid_map + loop_id;
+ se_nacl = fc_loopid->se_nacl;
+ if (!se_nacl) {
+ pr_debug("Unable to locate se_nacl by loop_id: 0x%04x\n",
+ loop_id);
+ return NULL;
+ }
+
+ nacl = container_of(se_nacl, struct tcm_qla2xxx_nacl, se_node_acl);
+
+ if (!nacl->qla_tgt_sess) {
+ pr_err("Unable to locate struct qla_tgt_sess\n");
+ return NULL;
+ }
+
+ return nacl->qla_tgt_sess;
+}
+
+/*
+ * Expected to be called with struct qla_hw_data->hardware_lock held
+ */
+static void tcm_qla2xxx_set_sess_by_loop_id(
+ struct tcm_qla2xxx_lport *lport,
+ struct se_node_acl *new_se_nacl,
+ struct tcm_qla2xxx_nacl *nacl,
+ struct se_session *se_sess,
+ struct qla_tgt_sess *qla_tgt_sess,
+ uint16_t loop_id)
+{
+ struct se_node_acl *saved_nacl;
+ struct tcm_qla2xxx_fc_loopid *fc_loopid;
+
+ pr_debug("set_sess_by_loop_id: Using loop_id: 0x%04x\n", loop_id);
+
+ fc_loopid = &((struct tcm_qla2xxx_fc_loopid *)
+ lport->lport_loopid_map)[loop_id];
+
+ saved_nacl = fc_loopid->se_nacl;
+ if (!saved_nacl) {
+ pr_debug("Setting up new fc_loopid->se_nacl to new_se_nacl\n");
+ fc_loopid->se_nacl = new_se_nacl;
+ if (qla_tgt_sess->se_sess != se_sess)
+ qla_tgt_sess->se_sess = se_sess;
+ if (nacl->qla_tgt_sess != qla_tgt_sess)
+ nacl->qla_tgt_sess = qla_tgt_sess;
+ return;
+ }
+
+ if (nacl->qla_tgt_sess) {
+ if (new_se_nacl == NULL) {
+ pr_debug("Clearing nacl->qla_tgt_sess and fc_loopid->se_nacl\n");
+ fc_loopid->se_nacl = NULL;
+ nacl->qla_tgt_sess = NULL;
+ return;
+ }
+
+ pr_debug("Replacing existing nacl->qla_tgt_sess and fc_loopid->se_nacl\n");
+ fc_loopid->se_nacl = new_se_nacl;
+ if (qla_tgt_sess->se_sess != se_sess)
+ qla_tgt_sess->se_sess = se_sess;
+ if (nacl->qla_tgt_sess != qla_tgt_sess)
+ nacl->qla_tgt_sess = qla_tgt_sess;
+ return;
+ }
+
+ if (new_se_nacl == NULL) {
+ pr_debug("Clearing fc_loopid->se_nacl\n");
+ fc_loopid->se_nacl = NULL;
+ return;
+ }
+
+ pr_debug("Replacing existing fc_loopid->se_nacl w/o active nacl->qla_tgt_sess\n");
+ fc_loopid->se_nacl = new_se_nacl;
+ if (qla_tgt_sess->se_sess != se_sess)
+ qla_tgt_sess->se_sess = se_sess;
+ if (nacl->qla_tgt_sess != qla_tgt_sess)
+ nacl->qla_tgt_sess = qla_tgt_sess;
+
+ pr_debug("Setup nacl->qla_tgt_sess %p by loop_id for se_nacl: %p, initiatorname: %s\n",
+ nacl->qla_tgt_sess, new_se_nacl, new_se_nacl->initiatorname);
+}
+
+/*
+ * Should always be called with qla_hw_data->hardware_lock held.
+ */
+static void tcm_qla2xxx_clear_sess_lookup(struct tcm_qla2xxx_lport *lport,
+ struct tcm_qla2xxx_nacl *nacl, struct qla_tgt_sess *sess)
+{
+ struct se_session *se_sess = sess->se_sess;
+ unsigned char be_sid[3];
+
+ be_sid[0] = sess->s_id.b.domain;
+ be_sid[1] = sess->s_id.b.area;
+ be_sid[2] = sess->s_id.b.al_pa;
+
+ tcm_qla2xxx_set_sess_by_s_id(lport, NULL, nacl, se_sess,
+ sess, be_sid);
+ tcm_qla2xxx_set_sess_by_loop_id(lport, NULL, nacl, se_sess,
+ sess, sess->loop_id);
+}
+
+static void tcm_qla2xxx_free_session(struct qla_tgt_sess *sess)
+{
+ struct qla_tgt *tgt = sess->tgt;
+ struct qla_hw_data *ha = tgt->ha;
+ scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev);
+ struct se_session *se_sess;
+ struct se_node_acl *se_nacl;
+ struct tcm_qla2xxx_lport *lport;
+ struct tcm_qla2xxx_nacl *nacl;
+
+ BUG_ON(in_interrupt());
+
+ se_sess = sess->se_sess;
+ if (!se_sess) {
+ pr_err("struct qla_tgt_sess->se_sess is NULL\n");
+ dump_stack();
+ return;
+ }
+ se_nacl = se_sess->se_node_acl;
+ nacl = container_of(se_nacl, struct tcm_qla2xxx_nacl, se_node_acl);
+
+ lport = vha->vha_tgt.target_lport_ptr;
+ if (!lport) {
+ pr_err("Unable to locate struct tcm_qla2xxx_lport\n");
+ dump_stack();
+ return;
+ }
+ target_wait_for_sess_cmds(se_sess);
+
+ transport_deregister_session_configfs(sess->se_sess);
+ transport_deregister_session(sess->se_sess);
+}
+
+/*
+ * Called via qlt_create_sess():ha->qla2x_tmpl->check_initiator_node_acl()
+ * to locate struct se_node_acl
+ */
+static int tcm_qla2xxx_check_initiator_node_acl(
+ scsi_qla_host_t *vha,
+ unsigned char *fc_wwpn,
+ void *qla_tgt_sess,
+ uint8_t *s_id,
+ uint16_t loop_id)
+{
+ struct qla_hw_data *ha = vha->hw;
+ struct tcm_qla2xxx_lport *lport;
+ struct tcm_qla2xxx_tpg *tpg;
+ struct tcm_qla2xxx_nacl *nacl;
+ struct se_portal_group *se_tpg;
+ struct se_node_acl *se_nacl;
+ struct se_session *se_sess;
+ struct qla_tgt_sess *sess = qla_tgt_sess;
+ unsigned char port_name[36];
+ unsigned long flags;
+ int num_tags = (ha->fw_xcb_count) ? ha->fw_xcb_count :
+ TCM_QLA2XXX_DEFAULT_TAGS;
+
+ lport = vha->vha_tgt.target_lport_ptr;
+ if (!lport) {
+ pr_err("Unable to locate struct tcm_qla2xxx_lport\n");
+ dump_stack();
+ return -EINVAL;
+ }
+ /*
+ * Locate the TPG=1 reference..
+ */
+ tpg = lport->tpg_1;
+ if (!tpg) {
+ pr_err("Unable to lcoate struct tcm_qla2xxx_lport->tpg_1\n");
+ return -EINVAL;
+ }
+ se_tpg = &tpg->se_tpg;
+
+ se_sess = transport_init_session_tags(num_tags,
+ sizeof(struct qla_tgt_cmd),
+ TARGET_PROT_ALL);
+ if (IS_ERR(se_sess)) {
+ pr_err("Unable to initialize struct se_session\n");
+ return PTR_ERR(se_sess);
+ }
+ /*
+ * Format the FCP Initiator port_name into colon seperated values to
+ * match the format by tcm_qla2xxx explict ConfigFS NodeACLs.
+ */
+ memset(&port_name, 0, 36);
+ snprintf(port_name, sizeof(port_name), "%8phC", fc_wwpn);
+ /*
+ * Locate our struct se_node_acl either from an explict NodeACL created
+ * via ConfigFS, or via running in TPG demo mode.
+ */
+ se_sess->se_node_acl = core_tpg_check_initiator_node_acl(se_tpg,
+ port_name);
+ if (!se_sess->se_node_acl) {
+ transport_free_session(se_sess);
+ return -EINVAL;
+ }
+ se_nacl = se_sess->se_node_acl;
+ nacl = container_of(se_nacl, struct tcm_qla2xxx_nacl, se_node_acl);
+ /*
+ * And now setup the new se_nacl and session pointers into our HW lport
+ * mappings for fabric S_ID and LOOP_ID.
+ */
+ spin_lock_irqsave(&ha->hardware_lock, flags);
+ tcm_qla2xxx_set_sess_by_s_id(lport, se_nacl, nacl, se_sess,
+ qla_tgt_sess, s_id);
+ tcm_qla2xxx_set_sess_by_loop_id(lport, se_nacl, nacl, se_sess,
+ qla_tgt_sess, loop_id);
+ spin_unlock_irqrestore(&ha->hardware_lock, flags);
+ /*
+ * Finally register the new FC Nexus with TCM
+ */
+ transport_register_session(se_nacl->se_tpg, se_nacl, se_sess, sess);
+
+ return 0;
+}
+
+static void tcm_qla2xxx_update_sess(struct qla_tgt_sess *sess, port_id_t s_id,
+ uint16_t loop_id, bool conf_compl_supported)
+{
+ struct qla_tgt *tgt = sess->tgt;
+ struct qla_hw_data *ha = tgt->ha;
+ scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev);
+ struct tcm_qla2xxx_lport *lport = vha->vha_tgt.target_lport_ptr;
+ struct se_node_acl *se_nacl = sess->se_sess->se_node_acl;
+ struct tcm_qla2xxx_nacl *nacl = container_of(se_nacl,
+ struct tcm_qla2xxx_nacl, se_node_acl);
+ u32 key;
+
+
+ if (sess->loop_id != loop_id || sess->s_id.b24 != s_id.b24)
+ pr_info("Updating session %p from port %8phC loop_id %d -> %d s_id %x:%x:%x -> %x:%x:%x\n",
+ sess, sess->port_name,
+ sess->loop_id, loop_id, sess->s_id.b.domain,
+ sess->s_id.b.area, sess->s_id.b.al_pa, s_id.b.domain,
+ s_id.b.area, s_id.b.al_pa);
+
+ if (sess->loop_id != loop_id) {
+ /*
+ * Because we can shuffle loop IDs around and we
+ * update different sessions non-atomically, we might
+ * have overwritten this session's old loop ID
+ * already, and we might end up overwriting some other
+ * session that will be updated later. So we have to
+ * be extra careful and we can't warn about those things...
+ */
+ if (lport->lport_loopid_map[sess->loop_id].se_nacl == se_nacl)
+ lport->lport_loopid_map[sess->loop_id].se_nacl = NULL;
+
+ lport->lport_loopid_map[loop_id].se_nacl = se_nacl;
+
+ sess->loop_id = loop_id;
+ }
+
+ if (sess->s_id.b24 != s_id.b24) {
+ key = (((u32) sess->s_id.b.domain << 16) |
+ ((u32) sess->s_id.b.area << 8) |
+ ((u32) sess->s_id.b.al_pa));
+
+ if (btree_lookup32(&lport->lport_fcport_map, key))
+ WARN(btree_remove32(&lport->lport_fcport_map, key) != se_nacl,
+ "Found wrong se_nacl when updating s_id %x:%x:%x\n",
+ sess->s_id.b.domain, sess->s_id.b.area, sess->s_id.b.al_pa);
+ else
+ WARN(1, "No lport_fcport_map entry for s_id %x:%x:%x\n",
+ sess->s_id.b.domain, sess->s_id.b.area, sess->s_id.b.al_pa);
+
+ key = (((u32) s_id.b.domain << 16) |
+ ((u32) s_id.b.area << 8) |
+ ((u32) s_id.b.al_pa));
+
+ if (btree_lookup32(&lport->lport_fcport_map, key)) {
+ WARN(1, "Already have lport_fcport_map entry for s_id %x:%x:%x\n",
+ s_id.b.domain, s_id.b.area, s_id.b.al_pa);
+ btree_update32(&lport->lport_fcport_map, key, se_nacl);
+ } else {
+ btree_insert32(&lport->lport_fcport_map, key, se_nacl, GFP_ATOMIC);
+ }
+
+ sess->s_id = s_id;
+ nacl->nport_id = key;
+ }
+
+ sess->conf_compl_supported = conf_compl_supported;
+}
+
+/*
+ * Calls into tcm_qla2xxx used by qla2xxx LLD I/O path.
+ */
+static struct qla_tgt_func_tmpl tcm_qla2xxx_template = {
+ .handle_cmd = tcm_qla2xxx_handle_cmd,
+ .handle_data = tcm_qla2xxx_handle_data,
+ .handle_dif_err = tcm_qla2xxx_handle_dif_err,
+ .handle_tmr = tcm_qla2xxx_handle_tmr,
+ .free_cmd = tcm_qla2xxx_free_cmd,
+ .free_mcmd = tcm_qla2xxx_free_mcmd,
+ .free_session = tcm_qla2xxx_free_session,
+ .update_sess = tcm_qla2xxx_update_sess,
+ .check_initiator_node_acl = tcm_qla2xxx_check_initiator_node_acl,
+ .find_sess_by_s_id = tcm_qla2xxx_find_sess_by_s_id,
+ .find_sess_by_loop_id = tcm_qla2xxx_find_sess_by_loop_id,
+ .clear_nacl_from_fcport_map = tcm_qla2xxx_clear_nacl_from_fcport_map,
+ .put_sess = tcm_qla2xxx_put_sess,
+ .shutdown_sess = tcm_qla2xxx_shutdown_sess,
+};
+
+static int tcm_qla2xxx_init_lport(struct tcm_qla2xxx_lport *lport)
+{
+ int rc;
+
+ rc = btree_init32(&lport->lport_fcport_map);
+ if (rc) {
+ pr_err("Unable to initialize lport->lport_fcport_map btree\n");
+ return rc;
+ }
+
+ lport->lport_loopid_map = vmalloc(sizeof(struct tcm_qla2xxx_fc_loopid) *
+ 65536);
+ if (!lport->lport_loopid_map) {
+ pr_err("Unable to allocate lport->lport_loopid_map of %zu bytes\n",
+ sizeof(struct tcm_qla2xxx_fc_loopid) * 65536);
+ btree_destroy32(&lport->lport_fcport_map);
+ return -ENOMEM;
+ }
+ memset(lport->lport_loopid_map, 0, sizeof(struct tcm_qla2xxx_fc_loopid)
+ * 65536);
+ pr_debug("qla2xxx: Allocated lport_loopid_map of %zu bytes\n",
+ sizeof(struct tcm_qla2xxx_fc_loopid) * 65536);
+ return 0;
+}
+
+static int tcm_qla2xxx_lport_register_cb(struct scsi_qla_host *vha,
+ void *target_lport_ptr,
+ u64 npiv_wwpn, u64 npiv_wwnn)
+{
+ struct qla_hw_data *ha = vha->hw;
+ struct tcm_qla2xxx_lport *lport =
+ (struct tcm_qla2xxx_lport *)target_lport_ptr;
+ /*
+ * Setup tgt_ops, local pointer to vha and target_lport_ptr
+ */
+ ha->tgt.tgt_ops = &tcm_qla2xxx_template;
+ vha->vha_tgt.target_lport_ptr = target_lport_ptr;
+ lport->qla_vha = vha;
+
+ return 0;
+}
+
+static struct se_wwn *tcm_qla2xxx_make_lport(
+ struct target_fabric_configfs *tf,
+ struct config_group *group,
+ const char *name)
+{
+ struct tcm_qla2xxx_lport *lport;
+ u64 wwpn;
+ int ret = -ENODEV;
+
+ if (tcm_qla2xxx_parse_wwn(name, &wwpn, 1) < 0)
+ return ERR_PTR(-EINVAL);
+
+ lport = kzalloc(sizeof(struct tcm_qla2xxx_lport), GFP_KERNEL);
+ if (!lport) {
+ pr_err("Unable to allocate struct tcm_qla2xxx_lport\n");
+ return ERR_PTR(-ENOMEM);
+ }
+ lport->lport_wwpn = wwpn;
+ tcm_qla2xxx_format_wwn(&lport->lport_name[0], TCM_QLA2XXX_NAMELEN,
+ wwpn);
+ sprintf(lport->lport_naa_name, "naa.%016llx", (unsigned long long) wwpn);
+
+ ret = tcm_qla2xxx_init_lport(lport);
+ if (ret != 0)
+ goto out;
+
+ ret = qlt_lport_register(lport, wwpn, 0, 0,
+ tcm_qla2xxx_lport_register_cb);
+ if (ret != 0)
+ goto out_lport;
+
+ return &lport->lport_wwn;
+out_lport:
+ vfree(lport->lport_loopid_map);
+ btree_destroy32(&lport->lport_fcport_map);
+out:
+ kfree(lport);
+ return ERR_PTR(ret);
+}
+
+static void tcm_qla2xxx_drop_lport(struct se_wwn *wwn)
+{
+ struct tcm_qla2xxx_lport *lport = container_of(wwn,
+ struct tcm_qla2xxx_lport, lport_wwn);
+ struct scsi_qla_host *vha = lport->qla_vha;
+ struct se_node_acl *node;
+ u32 key = 0;
+
+ /*
+ * Call into qla2x_target.c LLD logic to complete the
+ * shutdown of struct qla_tgt after the call to
+ * qlt_stop_phase1() from tcm_qla2xxx_drop_tpg() above..
+ */
+ if (vha->vha_tgt.qla_tgt && !vha->vha_tgt.qla_tgt->tgt_stopped)
+ qlt_stop_phase2(vha->vha_tgt.qla_tgt);
+
+ qlt_lport_deregister(vha);
+
+ vfree(lport->lport_loopid_map);
+ btree_for_each_safe32(&lport->lport_fcport_map, key, node)
+ btree_remove32(&lport->lport_fcport_map, key);
+ btree_destroy32(&lport->lport_fcport_map);
+ kfree(lport);
+}
+
+static int tcm_qla2xxx_lport_register_npiv_cb(struct scsi_qla_host *base_vha,
+ void *target_lport_ptr,
+ u64 npiv_wwpn, u64 npiv_wwnn)
+{
+ struct fc_vport *vport;
+ struct Scsi_Host *sh = base_vha->host;
+ struct scsi_qla_host *npiv_vha;
+ struct tcm_qla2xxx_lport *lport =
+ (struct tcm_qla2xxx_lport *)target_lport_ptr;
+ struct tcm_qla2xxx_lport *base_lport =
+ (struct tcm_qla2xxx_lport *)base_vha->vha_tgt.target_lport_ptr;
+ struct tcm_qla2xxx_tpg *base_tpg;
+ struct fc_vport_identifiers vport_id;
+
+ if (!qla_tgt_mode_enabled(base_vha)) {
+ pr_err("qla2xxx base_vha not enabled for target mode\n");
+ return -EPERM;
+ }
+
+ if (!base_lport || !base_lport->tpg_1 ||
+ !atomic_read(&base_lport->tpg_1->lport_tpg_enabled)) {
+ pr_err("qla2xxx base_lport or tpg_1 not available\n");
+ return -EPERM;
+ }
+ base_tpg = base_lport->tpg_1;
+
+ memset(&vport_id, 0, sizeof(vport_id));
+ vport_id.port_name = npiv_wwpn;
+ vport_id.node_name = npiv_wwnn;
+ vport_id.roles = FC_PORT_ROLE_FCP_INITIATOR;
+ vport_id.vport_type = FC_PORTTYPE_NPIV;
+ vport_id.disable = false;
+
+ vport = fc_vport_create(sh, 0, &vport_id);
+ if (!vport) {
+ pr_err("fc_vport_create failed for qla2xxx_npiv\n");
+ return -ENODEV;
+ }
+ /*
+ * Setup local pointer to NPIV vhba + target_lport_ptr
+ */
+ npiv_vha = (struct scsi_qla_host *)vport->dd_data;
+ npiv_vha->vha_tgt.target_lport_ptr = target_lport_ptr;
+ lport->qla_vha = npiv_vha;
+ scsi_host_get(npiv_vha->host);
+ return 0;
+}
+
+
+static struct se_wwn *tcm_qla2xxx_npiv_make_lport(
+ struct target_fabric_configfs *tf,
+ struct config_group *group,
+ const char *name)
+{
+ struct tcm_qla2xxx_lport *lport;
+ u64 phys_wwpn, npiv_wwpn, npiv_wwnn;
+ char *p, tmp[128];
+ int ret;
+
+ snprintf(tmp, 128, "%s", name);
+
+ p = strchr(tmp, '@');
+ if (!p) {
+ pr_err("Unable to locate NPIV '@' seperator\n");
+ return ERR_PTR(-EINVAL);
+ }
+ *p++ = '\0';
+
+ if (tcm_qla2xxx_parse_wwn(tmp, &phys_wwpn, 1) < 0)
+ return ERR_PTR(-EINVAL);
+
+ if (tcm_qla2xxx_npiv_parse_wwn(p, strlen(p)+1,
+ &npiv_wwpn, &npiv_wwnn) < 0)
+ return ERR_PTR(-EINVAL);
+
+ lport = kzalloc(sizeof(struct tcm_qla2xxx_lport), GFP_KERNEL);
+ if (!lport) {
+ pr_err("Unable to allocate struct tcm_qla2xxx_lport for NPIV\n");
+ return ERR_PTR(-ENOMEM);
+ }
+ lport->lport_npiv_wwpn = npiv_wwpn;
+ lport->lport_npiv_wwnn = npiv_wwnn;
+ sprintf(lport->lport_naa_name, "naa.%016llx", (unsigned long long) npiv_wwpn);
+
+ ret = tcm_qla2xxx_init_lport(lport);
+ if (ret != 0)
+ goto out;
+
+ ret = qlt_lport_register(lport, phys_wwpn, npiv_wwpn, npiv_wwnn,
+ tcm_qla2xxx_lport_register_npiv_cb);
+ if (ret != 0)
+ goto out_lport;
+
+ return &lport->lport_wwn;
+out_lport:
+ vfree(lport->lport_loopid_map);
+ btree_destroy32(&lport->lport_fcport_map);
+out:
+ kfree(lport);
+ return ERR_PTR(ret);
+}
+
+static void tcm_qla2xxx_npiv_drop_lport(struct se_wwn *wwn)
+{
+ struct tcm_qla2xxx_lport *lport = container_of(wwn,
+ struct tcm_qla2xxx_lport, lport_wwn);
+ struct scsi_qla_host *npiv_vha = lport->qla_vha;
+ struct qla_hw_data *ha = npiv_vha->hw;
+ scsi_qla_host_t *base_vha = pci_get_drvdata(ha->pdev);
+
+ scsi_host_put(npiv_vha->host);
+ /*
+ * Notify libfc that we want to release the vha->fc_vport
+ */
+ fc_vport_terminate(npiv_vha->fc_vport);
+ scsi_host_put(base_vha->host);
+ kfree(lport);
+}
+
+
+static ssize_t tcm_qla2xxx_wwn_show_attr_version(
+ struct target_fabric_configfs *tf,
+ char *page)
+{
+ return sprintf(page,
+ "TCM QLOGIC QLA2XXX NPIV capable fabric module %s on %s/%s on "
+ UTS_RELEASE"\n", TCM_QLA2XXX_VERSION, utsname()->sysname,
+ utsname()->machine);
+}
+
+TF_WWN_ATTR_RO(tcm_qla2xxx, version);
+
+static struct configfs_attribute *tcm_qla2xxx_wwn_attrs[] = {
+ &tcm_qla2xxx_wwn_version.attr,
+ NULL,
+};
+
+static const struct target_core_fabric_ops tcm_qla2xxx_ops = {
+ .module = THIS_MODULE,
+ .name = "qla2xxx",
+ .get_fabric_name = tcm_qla2xxx_get_fabric_name,
+ .get_fabric_proto_ident = tcm_qla2xxx_get_fabric_proto_ident,
+ .tpg_get_wwn = tcm_qla2xxx_get_fabric_wwn,
+ .tpg_get_tag = tcm_qla2xxx_get_tag,
+ .tpg_get_default_depth = tcm_qla2xxx_get_default_depth,
+ .tpg_get_pr_transport_id = tcm_qla2xxx_get_pr_transport_id,
+ .tpg_get_pr_transport_id_len = tcm_qla2xxx_get_pr_transport_id_len,
+ .tpg_parse_pr_out_transport_id = tcm_qla2xxx_parse_pr_out_transport_id,
+ .tpg_check_demo_mode = tcm_qla2xxx_check_demo_mode,
+ .tpg_check_demo_mode_cache = tcm_qla2xxx_check_demo_mode_cache,
+ .tpg_check_demo_mode_write_protect =
+ tcm_qla2xxx_check_demo_write_protect,
+ .tpg_check_prod_mode_write_protect =
+ tcm_qla2xxx_check_prod_write_protect,
+ .tpg_check_prot_fabric_only = tcm_qla2xxx_check_prot_fabric_only,
+ .tpg_check_demo_mode_login_only = tcm_qla2xxx_check_demo_mode_login_only,
+ .tpg_alloc_fabric_acl = tcm_qla2xxx_alloc_fabric_acl,
+ .tpg_release_fabric_acl = tcm_qla2xxx_release_fabric_acl,
+ .tpg_get_inst_index = tcm_qla2xxx_tpg_get_inst_index,
+ .check_stop_free = tcm_qla2xxx_check_stop_free,
+ .release_cmd = tcm_qla2xxx_release_cmd,
+ .put_session = tcm_qla2xxx_put_session,
+ .shutdown_session = tcm_qla2xxx_shutdown_session,
+ .close_session = tcm_qla2xxx_close_session,
+ .sess_get_index = tcm_qla2xxx_sess_get_index,
+ .sess_get_initiator_sid = NULL,
+ .write_pending = tcm_qla2xxx_write_pending,
+ .write_pending_status = tcm_qla2xxx_write_pending_status,
+ .set_default_node_attributes = tcm_qla2xxx_set_default_node_attrs,
+ .get_task_tag = tcm_qla2xxx_get_task_tag,
+ .get_cmd_state = tcm_qla2xxx_get_cmd_state,
+ .queue_data_in = tcm_qla2xxx_queue_data_in,
+ .queue_status = tcm_qla2xxx_queue_status,
+ .queue_tm_rsp = tcm_qla2xxx_queue_tm_rsp,
+ .aborted_task = tcm_qla2xxx_aborted_task,
+ /*
+ * Setup function pointers for generic logic in
+ * target_core_fabric_configfs.c
+ */
+ .fabric_make_wwn = tcm_qla2xxx_make_lport,
+ .fabric_drop_wwn = tcm_qla2xxx_drop_lport,
+ .fabric_make_tpg = tcm_qla2xxx_make_tpg,
+ .fabric_drop_tpg = tcm_qla2xxx_drop_tpg,
+ .fabric_post_link = NULL,
+ .fabric_pre_unlink = NULL,
+ .fabric_make_np = NULL,
+ .fabric_drop_np = NULL,
+ .fabric_make_nodeacl = tcm_qla2xxx_make_nodeacl,
+ .fabric_drop_nodeacl = tcm_qla2xxx_drop_nodeacl,
+
+ .tfc_wwn_attrs = tcm_qla2xxx_wwn_attrs,
+ .tfc_tpg_base_attrs = tcm_qla2xxx_tpg_attrs,
+ .tfc_tpg_attrib_attrs = tcm_qla2xxx_tpg_attrib_attrs,
+};
+
+static const struct target_core_fabric_ops tcm_qla2xxx_npiv_ops = {
+ .module = THIS_MODULE,
+ .name = "qla2xxx_npiv",
+ .get_fabric_name = tcm_qla2xxx_npiv_get_fabric_name,
+ .get_fabric_proto_ident = tcm_qla2xxx_get_fabric_proto_ident,
+ .tpg_get_wwn = tcm_qla2xxx_get_fabric_wwn,
+ .tpg_get_tag = tcm_qla2xxx_get_tag,
+ .tpg_get_default_depth = tcm_qla2xxx_get_default_depth,
+ .tpg_get_pr_transport_id = tcm_qla2xxx_get_pr_transport_id,
+ .tpg_get_pr_transport_id_len = tcm_qla2xxx_get_pr_transport_id_len,
+ .tpg_parse_pr_out_transport_id = tcm_qla2xxx_parse_pr_out_transport_id,
+ .tpg_check_demo_mode = tcm_qla2xxx_check_demo_mode,
+ .tpg_check_demo_mode_cache = tcm_qla2xxx_check_demo_mode_cache,
+ .tpg_check_demo_mode_write_protect = tcm_qla2xxx_check_demo_mode,
+ .tpg_check_prod_mode_write_protect =
+ tcm_qla2xxx_check_prod_write_protect,
+ .tpg_check_demo_mode_login_only = tcm_qla2xxx_check_demo_mode_login_only,
+ .tpg_alloc_fabric_acl = tcm_qla2xxx_alloc_fabric_acl,
+ .tpg_release_fabric_acl = tcm_qla2xxx_release_fabric_acl,
+ .tpg_get_inst_index = tcm_qla2xxx_tpg_get_inst_index,
+ .check_stop_free = tcm_qla2xxx_check_stop_free,
+ .release_cmd = tcm_qla2xxx_release_cmd,
+ .put_session = tcm_qla2xxx_put_session,
+ .shutdown_session = tcm_qla2xxx_shutdown_session,
+ .close_session = tcm_qla2xxx_close_session,
+ .sess_get_index = tcm_qla2xxx_sess_get_index,
+ .sess_get_initiator_sid = NULL,
+ .write_pending = tcm_qla2xxx_write_pending,
+ .write_pending_status = tcm_qla2xxx_write_pending_status,
+ .set_default_node_attributes = tcm_qla2xxx_set_default_node_attrs,
+ .get_task_tag = tcm_qla2xxx_get_task_tag,
+ .get_cmd_state = tcm_qla2xxx_get_cmd_state,
+ .queue_data_in = tcm_qla2xxx_queue_data_in,
+ .queue_status = tcm_qla2xxx_queue_status,
+ .queue_tm_rsp = tcm_qla2xxx_queue_tm_rsp,
+ .aborted_task = tcm_qla2xxx_aborted_task,
+ /*
+ * Setup function pointers for generic logic in
+ * target_core_fabric_configfs.c
+ */
+ .fabric_make_wwn = tcm_qla2xxx_npiv_make_lport,
+ .fabric_drop_wwn = tcm_qla2xxx_npiv_drop_lport,
+ .fabric_make_tpg = tcm_qla2xxx_npiv_make_tpg,
+ .fabric_drop_tpg = tcm_qla2xxx_drop_tpg,
+ .fabric_post_link = NULL,
+ .fabric_pre_unlink = NULL,
+ .fabric_make_np = NULL,
+ .fabric_drop_np = NULL,
+ .fabric_make_nodeacl = tcm_qla2xxx_make_nodeacl,
+ .fabric_drop_nodeacl = tcm_qla2xxx_drop_nodeacl,
+
+ .tfc_wwn_attrs = tcm_qla2xxx_wwn_attrs,
+ .tfc_tpg_base_attrs = tcm_qla2xxx_npiv_tpg_attrs,
+};
+
+static int tcm_qla2xxx_register_configfs(void)
+{
+ int ret;
+
+ pr_debug("TCM QLOGIC QLA2XXX fabric module %s on %s/%s on "
+ UTS_RELEASE"\n", TCM_QLA2XXX_VERSION, utsname()->sysname,
+ utsname()->machine);
+
+ ret = target_register_template(&tcm_qla2xxx_ops);
+ if (ret)
+ return ret;
+
+ ret = target_register_template(&tcm_qla2xxx_npiv_ops);
+ if (ret)
+ goto out_fabric;
+
+ tcm_qla2xxx_free_wq = alloc_workqueue("tcm_qla2xxx_free",
+ WQ_MEM_RECLAIM, 0);
+ if (!tcm_qla2xxx_free_wq) {
+ ret = -ENOMEM;
+ goto out_fabric_npiv;
+ }
+
+ tcm_qla2xxx_cmd_wq = alloc_workqueue("tcm_qla2xxx_cmd", 0, 0);
+ if (!tcm_qla2xxx_cmd_wq) {
+ ret = -ENOMEM;
+ goto out_free_wq;
+ }
+
+ return 0;
+
+out_free_wq:
+ destroy_workqueue(tcm_qla2xxx_free_wq);
+out_fabric_npiv:
+ target_unregister_template(&tcm_qla2xxx_npiv_ops);
+out_fabric:
+ target_unregister_template(&tcm_qla2xxx_ops);
+ return ret;
+}
+
+static void tcm_qla2xxx_deregister_configfs(void)
+{
+ destroy_workqueue(tcm_qla2xxx_cmd_wq);
+ destroy_workqueue(tcm_qla2xxx_free_wq);
+
+ target_unregister_template(&tcm_qla2xxx_ops);
+ target_unregister_template(&tcm_qla2xxx_npiv_ops);
+}
+
+static int __init tcm_qla2xxx_init(void)
+{
+ int ret;
+
+ ret = tcm_qla2xxx_register_configfs();
+ if (ret < 0)
+ return ret;
+
+ return 0;
+}
+
+static void __exit tcm_qla2xxx_exit(void)
+{
+ tcm_qla2xxx_deregister_configfs();
+}
+
+MODULE_DESCRIPTION("TCM QLA2XXX series NPIV enabled fabric driver");
+MODULE_LICENSE("GPL");
+module_init(tcm_qla2xxx_init);
+module_exit(tcm_qla2xxx_exit);
diff --git a/drivers/scsi/qla2xxx/tcm_qla2xxx.h b/drivers/scsi/qla2xxx/tcm_qla2xxx.h
new file mode 100644
index 000000000..23295115c
--- /dev/null
+++ b/drivers/scsi/qla2xxx/tcm_qla2xxx.h
@@ -0,0 +1,84 @@
+#include <target/target_core_base.h>
+#include <linux/btree.h>
+
+#define TCM_QLA2XXX_VERSION "v0.1"
+/* length of ASCII WWPNs including pad */
+#define TCM_QLA2XXX_NAMELEN 32
+/*
+ * Number of pre-allocated per-session tags, based upon the worst-case
+ * per port number of iocbs
+ */
+#define TCM_QLA2XXX_DEFAULT_TAGS 2088
+
+#include "qla_target.h"
+
+struct tcm_qla2xxx_nacl {
+ /* From libfc struct fc_rport->port_id */
+ u32 nport_id;
+ /* Binary World Wide unique Node Name for remote FC Initiator Nport */
+ u64 nport_wwnn;
+ /* ASCII formatted WWPN for FC Initiator Nport */
+ char nport_name[TCM_QLA2XXX_NAMELEN];
+ /* Pointer to qla_tgt_sess */
+ struct qla_tgt_sess *qla_tgt_sess;
+ /* Pointer to TCM FC nexus */
+ struct se_session *nport_nexus;
+ /* Returned by tcm_qla2xxx_make_nodeacl() */
+ struct se_node_acl se_node_acl;
+};
+
+struct tcm_qla2xxx_tpg_attrib {
+ int generate_node_acls;
+ int cache_dynamic_acls;
+ int demo_mode_write_protect;
+ int prod_mode_write_protect;
+ int demo_mode_login_only;
+ int fabric_prot_type;
+};
+
+struct tcm_qla2xxx_tpg {
+ /* FC lport target portal group tag for TCM */
+ u16 lport_tpgt;
+ /* Atomic bit to determine TPG active status */
+ atomic_t lport_tpg_enabled;
+ /* Pointer back to tcm_qla2xxx_lport */
+ struct tcm_qla2xxx_lport *lport;
+ /* Used by tcm_qla2xxx_tpg_attrib_cit */
+ struct tcm_qla2xxx_tpg_attrib tpg_attrib;
+ /* Returned by tcm_qla2xxx_make_tpg() */
+ struct se_portal_group se_tpg;
+ /* Items for dealing with configfs_depend_item */
+ struct completion tpg_base_comp;
+ struct work_struct tpg_base_work;
+};
+
+struct tcm_qla2xxx_fc_loopid {
+ struct se_node_acl *se_nacl;
+};
+
+struct tcm_qla2xxx_lport {
+ /* SCSI protocol the lport is providing */
+ u8 lport_proto_id;
+ /* Binary World Wide unique Port Name for FC Target Lport */
+ u64 lport_wwpn;
+ /* Binary World Wide unique Port Name for FC NPIV Target Lport */
+ u64 lport_npiv_wwpn;
+ /* Binary World Wide unique Node Name for FC NPIV Target Lport */
+ u64 lport_npiv_wwnn;
+ /* ASCII formatted WWPN for FC Target Lport */
+ char lport_name[TCM_QLA2XXX_NAMELEN];
+ /* ASCII formatted naa WWPN for VPD page 83 etc */
+ char lport_naa_name[TCM_QLA2XXX_NAMELEN];
+ /* map for fc_port pointers in 24-bit FC Port ID space */
+ struct btree_head32 lport_fcport_map;
+ /* vmalloc-ed memory for fc_port pointers for 16-bit FC loop ID */
+ struct tcm_qla2xxx_fc_loopid *lport_loopid_map;
+ /* Pointer to struct scsi_qla_host from qla2xxx LLD */
+ struct scsi_qla_host *qla_vha;
+ /* Pointer to struct qla_tgt pointer */
+ struct qla_tgt lport_qla_tgt;
+ /* Pointer to TPG=1 for non NPIV mode */
+ struct tcm_qla2xxx_tpg *tpg_1;
+ /* Returned by tcm_qla2xxx_make_lport() */
+ struct se_wwn lport_wwn;
+};
diff --git a/drivers/scsi/qla4xxx/Kconfig b/drivers/scsi/qla4xxx/Kconfig
new file mode 100644
index 000000000..e4dc7c733
--- /dev/null
+++ b/drivers/scsi/qla4xxx/Kconfig
@@ -0,0 +1,8 @@
+config SCSI_QLA_ISCSI
+ tristate "QLogic ISP4XXX and ISP82XX host adapter family support"
+ depends on PCI && SCSI && NET
+ select SCSI_ISCSI_ATTRS
+ select ISCSI_BOOT_SYSFS
+ ---help---
+ This driver supports the QLogic 40xx (ISP4XXX), 8022 (ISP82XX)
+ and 8032 (ISP83XX) iSCSI host adapter family.
diff --git a/drivers/scsi/qla4xxx/Makefile b/drivers/scsi/qla4xxx/Makefile
new file mode 100644
index 000000000..423097774
--- /dev/null
+++ b/drivers/scsi/qla4xxx/Makefile
@@ -0,0 +1,5 @@
+qla4xxx-y := ql4_os.o ql4_init.o ql4_mbx.o ql4_iocb.o ql4_isr.o \
+ ql4_nx.o ql4_nvram.o ql4_dbg.o ql4_attr.o ql4_bsg.o ql4_83xx.o
+
+obj-$(CONFIG_SCSI_QLA_ISCSI) += qla4xxx.o
+
diff --git a/drivers/scsi/qla4xxx/ql4_83xx.c b/drivers/scsi/qla4xxx/ql4_83xx.c
new file mode 100644
index 000000000..556c1525f
--- /dev/null
+++ b/drivers/scsi/qla4xxx/ql4_83xx.c
@@ -0,0 +1,1594 @@
+/*
+ * QLogic iSCSI HBA Driver
+ * Copyright (c) 2003-2013 QLogic Corporation
+ *
+ * See LICENSE.qla4xxx for copyright and licensing details.
+ */
+
+#include <linux/ratelimit.h>
+
+#include "ql4_def.h"
+#include "ql4_version.h"
+#include "ql4_glbl.h"
+#include "ql4_dbg.h"
+#include "ql4_inline.h"
+
+uint32_t qla4_83xx_rd_reg(struct scsi_qla_host *ha, ulong addr)
+{
+ return readl((void __iomem *)(ha->nx_pcibase + addr));
+}
+
+void qla4_83xx_wr_reg(struct scsi_qla_host *ha, ulong addr, uint32_t val)
+{
+ writel(val, (void __iomem *)(ha->nx_pcibase + addr));
+}
+
+static int qla4_83xx_set_win_base(struct scsi_qla_host *ha, uint32_t addr)
+{
+ uint32_t val;
+ int ret_val = QLA_SUCCESS;
+
+ qla4_83xx_wr_reg(ha, QLA83XX_CRB_WIN_FUNC(ha->func_num), addr);
+ val = qla4_83xx_rd_reg(ha, QLA83XX_CRB_WIN_FUNC(ha->func_num));
+ if (val != addr) {
+ ql4_printk(KERN_ERR, ha, "%s: Failed to set register window : addr written 0x%x, read 0x%x!\n",
+ __func__, addr, val);
+ ret_val = QLA_ERROR;
+ }
+
+ return ret_val;
+}
+
+int qla4_83xx_rd_reg_indirect(struct scsi_qla_host *ha, uint32_t addr,
+ uint32_t *data)
+{
+ int ret_val;
+
+ ret_val = qla4_83xx_set_win_base(ha, addr);
+
+ if (ret_val == QLA_SUCCESS)
+ *data = qla4_83xx_rd_reg(ha, QLA83XX_WILDCARD);
+ else
+ ql4_printk(KERN_ERR, ha, "%s: failed read of addr 0x%x!\n",
+ __func__, addr);
+
+ return ret_val;
+}
+
+int qla4_83xx_wr_reg_indirect(struct scsi_qla_host *ha, uint32_t addr,
+ uint32_t data)
+{
+ int ret_val;
+
+ ret_val = qla4_83xx_set_win_base(ha, addr);
+
+ if (ret_val == QLA_SUCCESS)
+ qla4_83xx_wr_reg(ha, QLA83XX_WILDCARD, data);
+ else
+ ql4_printk(KERN_ERR, ha, "%s: failed wrt to addr 0x%x, data 0x%x\n",
+ __func__, addr, data);
+
+ return ret_val;
+}
+
+static int qla4_83xx_flash_lock(struct scsi_qla_host *ha)
+{
+ int lock_owner;
+ int timeout = 0;
+ uint32_t lock_status = 0;
+ int ret_val = QLA_SUCCESS;
+
+ while (lock_status == 0) {
+ lock_status = qla4_83xx_rd_reg(ha, QLA83XX_FLASH_LOCK);
+ if (lock_status)
+ break;
+
+ if (++timeout >= QLA83XX_FLASH_LOCK_TIMEOUT / 20) {
+ lock_owner = qla4_83xx_rd_reg(ha,
+ QLA83XX_FLASH_LOCK_ID);
+ ql4_printk(KERN_ERR, ha, "%s: flash lock by func %d failed, held by func %d\n",
+ __func__, ha->func_num, lock_owner);
+ ret_val = QLA_ERROR;
+ break;
+ }
+ msleep(20);
+ }
+
+ qla4_83xx_wr_reg(ha, QLA83XX_FLASH_LOCK_ID, ha->func_num);
+ return ret_val;
+}
+
+static void qla4_83xx_flash_unlock(struct scsi_qla_host *ha)
+{
+ /* Reading FLASH_UNLOCK register unlocks the Flash */
+ qla4_83xx_wr_reg(ha, QLA83XX_FLASH_LOCK_ID, 0xFF);
+ qla4_83xx_rd_reg(ha, QLA83XX_FLASH_UNLOCK);
+}
+
+int qla4_83xx_flash_read_u32(struct scsi_qla_host *ha, uint32_t flash_addr,
+ uint8_t *p_data, int u32_word_count)
+{
+ int i;
+ uint32_t u32_word;
+ uint32_t addr = flash_addr;
+ int ret_val = QLA_SUCCESS;
+
+ ret_val = qla4_83xx_flash_lock(ha);
+ if (ret_val == QLA_ERROR)
+ goto exit_lock_error;
+
+ if (addr & 0x03) {
+ ql4_printk(KERN_ERR, ha, "%s: Illegal addr = 0x%x\n",
+ __func__, addr);
+ ret_val = QLA_ERROR;
+ goto exit_flash_read;
+ }
+
+ for (i = 0; i < u32_word_count; i++) {
+ ret_val = qla4_83xx_wr_reg_indirect(ha,
+ QLA83XX_FLASH_DIRECT_WINDOW,
+ (addr & 0xFFFF0000));
+ if (ret_val == QLA_ERROR) {
+ ql4_printk(KERN_ERR, ha, "%s: failed to write addr 0x%x to FLASH_DIRECT_WINDOW\n!",
+ __func__, addr);
+ goto exit_flash_read;
+ }
+
+ ret_val = qla4_83xx_rd_reg_indirect(ha,
+ QLA83XX_FLASH_DIRECT_DATA(addr),
+ &u32_word);
+ if (ret_val == QLA_ERROR) {
+ ql4_printk(KERN_ERR, ha, "%s: failed to read addr 0x%x!\n",
+ __func__, addr);
+ goto exit_flash_read;
+ }
+
+ *(__le32 *)p_data = le32_to_cpu(u32_word);
+ p_data = p_data + 4;
+ addr = addr + 4;
+ }
+
+exit_flash_read:
+ qla4_83xx_flash_unlock(ha);
+
+exit_lock_error:
+ return ret_val;
+}
+
+int qla4_83xx_lockless_flash_read_u32(struct scsi_qla_host *ha,
+ uint32_t flash_addr, uint8_t *p_data,
+ int u32_word_count)
+{
+ uint32_t i;
+ uint32_t u32_word;
+ uint32_t flash_offset;
+ uint32_t addr = flash_addr;
+ int ret_val = QLA_SUCCESS;
+
+ flash_offset = addr & (QLA83XX_FLASH_SECTOR_SIZE - 1);
+
+ if (addr & 0x3) {
+ ql4_printk(KERN_ERR, ha, "%s: Illegal addr = 0x%x\n",
+ __func__, addr);
+ ret_val = QLA_ERROR;
+ goto exit_lockless_read;
+ }
+
+ ret_val = qla4_83xx_wr_reg_indirect(ha, QLA83XX_FLASH_DIRECT_WINDOW,
+ addr);
+ if (ret_val == QLA_ERROR) {
+ ql4_printk(KERN_ERR, ha, "%s: failed to write addr 0x%x to FLASH_DIRECT_WINDOW!\n",
+ __func__, addr);
+ goto exit_lockless_read;
+ }
+
+ /* Check if data is spread across multiple sectors */
+ if ((flash_offset + (u32_word_count * sizeof(uint32_t))) >
+ (QLA83XX_FLASH_SECTOR_SIZE - 1)) {
+
+ /* Multi sector read */
+ for (i = 0; i < u32_word_count; i++) {
+ ret_val = qla4_83xx_rd_reg_indirect(ha,
+ QLA83XX_FLASH_DIRECT_DATA(addr),
+ &u32_word);
+ if (ret_val == QLA_ERROR) {
+ ql4_printk(KERN_ERR, ha, "%s: failed to read addr 0x%x!\n",
+ __func__, addr);
+ goto exit_lockless_read;
+ }
+
+ *(__le32 *)p_data = le32_to_cpu(u32_word);
+ p_data = p_data + 4;
+ addr = addr + 4;
+ flash_offset = flash_offset + 4;
+
+ if (flash_offset > (QLA83XX_FLASH_SECTOR_SIZE - 1)) {
+ /* This write is needed once for each sector */
+ ret_val = qla4_83xx_wr_reg_indirect(ha,
+ QLA83XX_FLASH_DIRECT_WINDOW,
+ addr);
+ if (ret_val == QLA_ERROR) {
+ ql4_printk(KERN_ERR, ha, "%s: failed to write addr 0x%x to FLASH_DIRECT_WINDOW!\n",
+ __func__, addr);
+ goto exit_lockless_read;
+ }
+ flash_offset = 0;
+ }
+ }
+ } else {
+ /* Single sector read */
+ for (i = 0; i < u32_word_count; i++) {
+ ret_val = qla4_83xx_rd_reg_indirect(ha,
+ QLA83XX_FLASH_DIRECT_DATA(addr),
+ &u32_word);
+ if (ret_val == QLA_ERROR) {
+ ql4_printk(KERN_ERR, ha, "%s: failed to read addr 0x%x!\n",
+ __func__, addr);
+ goto exit_lockless_read;
+ }
+
+ *(__le32 *)p_data = le32_to_cpu(u32_word);
+ p_data = p_data + 4;
+ addr = addr + 4;
+ }
+ }
+
+exit_lockless_read:
+ return ret_val;
+}
+
+void qla4_83xx_rom_lock_recovery(struct scsi_qla_host *ha)
+{
+ if (qla4_83xx_flash_lock(ha))
+ ql4_printk(KERN_INFO, ha, "%s: Resetting rom lock\n", __func__);
+
+ /*
+ * We got the lock, or someone else is holding the lock
+ * since we are restting, forcefully unlock
+ */
+ qla4_83xx_flash_unlock(ha);
+}
+
+#define INTENT_TO_RECOVER 0x01
+#define PROCEED_TO_RECOVER 0x02
+
+static int qla4_83xx_lock_recovery(struct scsi_qla_host *ha)
+{
+
+ uint32_t lock = 0, lockid;
+ int ret_val = QLA_ERROR;
+
+ lockid = ha->isp_ops->rd_reg_direct(ha, QLA83XX_DRV_LOCKRECOVERY);
+
+ /* Check for other Recovery in progress, go wait */
+ if ((lockid & 0x3) != 0)
+ goto exit_lock_recovery;
+
+ /* Intent to Recover */
+ ha->isp_ops->wr_reg_direct(ha, QLA83XX_DRV_LOCKRECOVERY,
+ (ha->func_num << 2) | INTENT_TO_RECOVER);
+
+ msleep(200);
+
+ /* Check Intent to Recover is advertised */
+ lockid = ha->isp_ops->rd_reg_direct(ha, QLA83XX_DRV_LOCKRECOVERY);
+ if ((lockid & 0x3C) != (ha->func_num << 2))
+ goto exit_lock_recovery;
+
+ ql4_printk(KERN_INFO, ha, "%s: IDC Lock recovery initiated for func %d\n",
+ __func__, ha->func_num);
+
+ /* Proceed to Recover */
+ ha->isp_ops->wr_reg_direct(ha, QLA83XX_DRV_LOCKRECOVERY,
+ (ha->func_num << 2) | PROCEED_TO_RECOVER);
+
+ /* Force Unlock */
+ ha->isp_ops->wr_reg_direct(ha, QLA83XX_DRV_LOCK_ID, 0xFF);
+ ha->isp_ops->rd_reg_direct(ha, QLA83XX_DRV_UNLOCK);
+
+ /* Clear bits 0-5 in IDC_RECOVERY register*/
+ ha->isp_ops->wr_reg_direct(ha, QLA83XX_DRV_LOCKRECOVERY, 0);
+
+ /* Get lock */
+ lock = ha->isp_ops->rd_reg_direct(ha, QLA83XX_DRV_LOCK);
+ if (lock) {
+ lockid = ha->isp_ops->rd_reg_direct(ha, QLA83XX_DRV_LOCK_ID);
+ lockid = ((lockid + (1 << 8)) & ~0xFF) | ha->func_num;
+ ha->isp_ops->wr_reg_direct(ha, QLA83XX_DRV_LOCK_ID, lockid);
+ ret_val = QLA_SUCCESS;
+ }
+
+exit_lock_recovery:
+ return ret_val;
+}
+
+#define QLA83XX_DRV_LOCK_MSLEEP 200
+
+int qla4_83xx_drv_lock(struct scsi_qla_host *ha)
+{
+ int timeout = 0;
+ uint32_t status = 0;
+ int ret_val = QLA_SUCCESS;
+ uint32_t first_owner = 0;
+ uint32_t tmo_owner = 0;
+ uint32_t lock_id;
+ uint32_t func_num;
+ uint32_t lock_cnt;
+
+ while (status == 0) {
+ status = qla4_83xx_rd_reg(ha, QLA83XX_DRV_LOCK);
+ if (status) {
+ /* Increment Counter (8-31) and update func_num (0-7) on
+ * getting a successful lock */
+ lock_id = qla4_83xx_rd_reg(ha, QLA83XX_DRV_LOCK_ID);
+ lock_id = ((lock_id + (1 << 8)) & ~0xFF) | ha->func_num;
+ qla4_83xx_wr_reg(ha, QLA83XX_DRV_LOCK_ID, lock_id);
+ break;
+ }
+
+ if (timeout == 0)
+ /* Save counter + ID of function holding the lock for
+ * first failure */
+ first_owner = ha->isp_ops->rd_reg_direct(ha,
+ QLA83XX_DRV_LOCK_ID);
+
+ if (++timeout >=
+ (QLA83XX_DRV_LOCK_TIMEOUT / QLA83XX_DRV_LOCK_MSLEEP)) {
+ tmo_owner = qla4_83xx_rd_reg(ha, QLA83XX_DRV_LOCK_ID);
+ func_num = tmo_owner & 0xFF;
+ lock_cnt = tmo_owner >> 8;
+ ql4_printk(KERN_INFO, ha, "%s: Lock by func %d failed after 2s, lock held by func %d, lock count %d, first_owner %d\n",
+ __func__, ha->func_num, func_num, lock_cnt,
+ (first_owner & 0xFF));
+
+ if (first_owner != tmo_owner) {
+ /* Some other driver got lock, OR same driver
+ * got lock again (counter value changed), when
+ * we were waiting for lock.
+ * Retry for another 2 sec */
+ ql4_printk(KERN_INFO, ha, "%s: IDC lock failed for func %d\n",
+ __func__, ha->func_num);
+ timeout = 0;
+ } else {
+ /* Same driver holding lock > 2sec.
+ * Force Recovery */
+ ret_val = qla4_83xx_lock_recovery(ha);
+ if (ret_val == QLA_SUCCESS) {
+ /* Recovered and got lock */
+ ql4_printk(KERN_INFO, ha, "%s: IDC lock Recovery by %d successful\n",
+ __func__, ha->func_num);
+ break;
+ }
+ /* Recovery Failed, some other function
+ * has the lock, wait for 2secs and retry */
+ ql4_printk(KERN_INFO, ha, "%s: IDC lock Recovery by %d failed, Retrying timeout\n",
+ __func__, ha->func_num);
+ timeout = 0;
+ }
+ }
+ msleep(QLA83XX_DRV_LOCK_MSLEEP);
+ }
+
+ return ret_val;
+}
+
+void qla4_83xx_drv_unlock(struct scsi_qla_host *ha)
+{
+ int id;
+
+ id = qla4_83xx_rd_reg(ha, QLA83XX_DRV_LOCK_ID);
+
+ if ((id & 0xFF) != ha->func_num) {
+ ql4_printk(KERN_ERR, ha, "%s: IDC Unlock by %d failed, lock owner is %d\n",
+ __func__, ha->func_num, (id & 0xFF));
+ return;
+ }
+
+ /* Keep lock counter value, update the ha->func_num to 0xFF */
+ qla4_83xx_wr_reg(ha, QLA83XX_DRV_LOCK_ID, (id | 0xFF));
+ qla4_83xx_rd_reg(ha, QLA83XX_DRV_UNLOCK);
+}
+
+void qla4_83xx_set_idc_dontreset(struct scsi_qla_host *ha)
+{
+ uint32_t idc_ctrl;
+
+ idc_ctrl = qla4_83xx_rd_reg(ha, QLA83XX_IDC_DRV_CTRL);
+ idc_ctrl |= DONTRESET_BIT0;
+ qla4_83xx_wr_reg(ha, QLA83XX_IDC_DRV_CTRL, idc_ctrl);
+ DEBUG2(ql4_printk(KERN_INFO, ha, "%s: idc_ctrl = %d\n", __func__,
+ idc_ctrl));
+}
+
+void qla4_83xx_clear_idc_dontreset(struct scsi_qla_host *ha)
+{
+ uint32_t idc_ctrl;
+
+ idc_ctrl = qla4_83xx_rd_reg(ha, QLA83XX_IDC_DRV_CTRL);
+ idc_ctrl &= ~DONTRESET_BIT0;
+ qla4_83xx_wr_reg(ha, QLA83XX_IDC_DRV_CTRL, idc_ctrl);
+ DEBUG2(ql4_printk(KERN_INFO, ha, "%s: idc_ctrl = %d\n", __func__,
+ idc_ctrl));
+}
+
+int qla4_83xx_idc_dontreset(struct scsi_qla_host *ha)
+{
+ uint32_t idc_ctrl;
+
+ idc_ctrl = qla4_83xx_rd_reg(ha, QLA83XX_IDC_DRV_CTRL);
+ return idc_ctrl & DONTRESET_BIT0;
+}
+
+/*-------------------------IDC State Machine ---------------------*/
+
+enum {
+ UNKNOWN_CLASS = 0,
+ NIC_CLASS,
+ FCOE_CLASS,
+ ISCSI_CLASS
+};
+
+struct device_info {
+ int func_num;
+ int device_type;
+ int port_num;
+};
+
+int qla4_83xx_can_perform_reset(struct scsi_qla_host *ha)
+{
+ uint32_t drv_active;
+ uint32_t dev_part, dev_part1, dev_part2;
+ int i;
+ struct device_info device_map[16];
+ int func_nibble;
+ int nibble;
+ int nic_present = 0;
+ int iscsi_present = 0;
+ int iscsi_func_low = 0;
+
+ /* Use the dev_partition register to determine the PCI function number
+ * and then check drv_active register to see which driver is loaded */
+ dev_part1 = qla4_83xx_rd_reg(ha,
+ ha->reg_tbl[QLA8XXX_CRB_DEV_PART_INFO]);
+ dev_part2 = qla4_83xx_rd_reg(ha, QLA83XX_CRB_DEV_PART_INFO2);
+ drv_active = qla4_83xx_rd_reg(ha, ha->reg_tbl[QLA8XXX_CRB_DRV_ACTIVE]);
+
+ /* Each function has 4 bits in dev_partition Info register,
+ * Lower 2 bits - device type, Upper 2 bits - physical port number */
+ dev_part = dev_part1;
+ for (i = nibble = 0; i <= 15; i++, nibble++) {
+ func_nibble = dev_part & (0xF << (nibble * 4));
+ func_nibble >>= (nibble * 4);
+ device_map[i].func_num = i;
+ device_map[i].device_type = func_nibble & 0x3;
+ device_map[i].port_num = func_nibble & 0xC;
+
+ if (device_map[i].device_type == NIC_CLASS) {
+ if (drv_active & (1 << device_map[i].func_num)) {
+ nic_present++;
+ break;
+ }
+ } else if (device_map[i].device_type == ISCSI_CLASS) {
+ if (drv_active & (1 << device_map[i].func_num)) {
+ if (!iscsi_present ||
+ (iscsi_present &&
+ (iscsi_func_low > device_map[i].func_num)))
+ iscsi_func_low = device_map[i].func_num;
+
+ iscsi_present++;
+ }
+ }
+
+ /* For function_num[8..15] get info from dev_part2 register */
+ if (nibble == 7) {
+ nibble = 0;
+ dev_part = dev_part2;
+ }
+ }
+
+ /* NIC, iSCSI and FCOE are the Reset owners based on order, NIC gets
+ * precedence over iSCSI and FCOE and iSCSI over FCOE, based on drivers
+ * present. */
+ if (!nic_present && (ha->func_num == iscsi_func_low)) {
+ DEBUG2(ql4_printk(KERN_INFO, ha,
+ "%s: can reset - NIC not present and lower iSCSI function is %d\n",
+ __func__, ha->func_num));
+ return 1;
+ }
+
+ return 0;
+}
+
+/**
+ * qla4_83xx_need_reset_handler - Code to start reset sequence
+ * @ha: pointer to adapter structure
+ *
+ * Note: IDC lock must be held upon entry
+ **/
+void qla4_83xx_need_reset_handler(struct scsi_qla_host *ha)
+{
+ uint32_t dev_state, drv_state, drv_active;
+ unsigned long reset_timeout, dev_init_timeout;
+
+ ql4_printk(KERN_INFO, ha, "%s: Performing ISP error recovery\n",
+ __func__);
+
+ if (!test_bit(AF_8XXX_RST_OWNER, &ha->flags)) {
+ DEBUG2(ql4_printk(KERN_INFO, ha, "%s: reset acknowledged\n",
+ __func__));
+ qla4_8xxx_set_rst_ready(ha);
+
+ /* Non-reset owners ACK Reset and wait for device INIT state
+ * as part of Reset Recovery by Reset Owner */
+ dev_init_timeout = jiffies + (ha->nx_dev_init_timeout * HZ);
+
+ do {
+ if (time_after_eq(jiffies, dev_init_timeout)) {
+ ql4_printk(KERN_INFO, ha, "%s: Non Reset owner dev init timeout\n",
+ __func__);
+ break;
+ }
+
+ ha->isp_ops->idc_unlock(ha);
+ msleep(1000);
+ ha->isp_ops->idc_lock(ha);
+
+ dev_state = qla4_8xxx_rd_direct(ha,
+ QLA8XXX_CRB_DEV_STATE);
+ } while (dev_state == QLA8XXX_DEV_NEED_RESET);
+ } else {
+ qla4_8xxx_set_rst_ready(ha);
+ reset_timeout = jiffies + (ha->nx_reset_timeout * HZ);
+ drv_state = qla4_8xxx_rd_direct(ha, QLA8XXX_CRB_DRV_STATE);
+ drv_active = qla4_8xxx_rd_direct(ha, QLA8XXX_CRB_DRV_ACTIVE);
+
+ ql4_printk(KERN_INFO, ha, "%s: drv_state = 0x%x, drv_active = 0x%x\n",
+ __func__, drv_state, drv_active);
+
+ while (drv_state != drv_active) {
+ if (time_after_eq(jiffies, reset_timeout)) {
+ ql4_printk(KERN_INFO, ha, "%s: %s: RESET TIMEOUT! drv_state: 0x%08x, drv_active: 0x%08x\n",
+ __func__, DRIVER_NAME, drv_state,
+ drv_active);
+ break;
+ }
+
+ ha->isp_ops->idc_unlock(ha);
+ msleep(1000);
+ ha->isp_ops->idc_lock(ha);
+
+ drv_state = qla4_8xxx_rd_direct(ha,
+ QLA8XXX_CRB_DRV_STATE);
+ drv_active = qla4_8xxx_rd_direct(ha,
+ QLA8XXX_CRB_DRV_ACTIVE);
+ }
+
+ if (drv_state != drv_active) {
+ ql4_printk(KERN_INFO, ha, "%s: Reset_owner turning off drv_active of non-acking function 0x%x\n",
+ __func__, (drv_active ^ drv_state));
+ drv_active = drv_active & drv_state;
+ qla4_8xxx_wr_direct(ha, QLA8XXX_CRB_DRV_ACTIVE,
+ drv_active);
+ }
+
+ clear_bit(AF_8XXX_RST_OWNER, &ha->flags);
+ /* Start Reset Recovery */
+ qla4_8xxx_device_bootstrap(ha);
+ }
+}
+
+void qla4_83xx_get_idc_param(struct scsi_qla_host *ha)
+{
+ uint32_t idc_params, ret_val;
+
+ ret_val = qla4_83xx_flash_read_u32(ha, QLA83XX_IDC_PARAM_ADDR,
+ (uint8_t *)&idc_params, 1);
+ if (ret_val == QLA_SUCCESS) {
+ ha->nx_dev_init_timeout = idc_params & 0xFFFF;
+ ha->nx_reset_timeout = (idc_params >> 16) & 0xFFFF;
+ } else {
+ ha->nx_dev_init_timeout = ROM_DEV_INIT_TIMEOUT;
+ ha->nx_reset_timeout = ROM_DRV_RESET_ACK_TIMEOUT;
+ }
+
+ DEBUG2(ql4_printk(KERN_DEBUG, ha,
+ "%s: ha->nx_dev_init_timeout = %d, ha->nx_reset_timeout = %d\n",
+ __func__, ha->nx_dev_init_timeout,
+ ha->nx_reset_timeout));
+}
+
+/*-------------------------Reset Sequence Functions-----------------------*/
+
+static void qla4_83xx_dump_reset_seq_hdr(struct scsi_qla_host *ha)
+{
+ uint8_t *phdr;
+
+ if (!ha->reset_tmplt.buff) {
+ ql4_printk(KERN_ERR, ha, "%s: Error: Invalid reset_seq_template\n",
+ __func__);
+ return;
+ }
+
+ phdr = ha->reset_tmplt.buff;
+
+ DEBUG2(ql4_printk(KERN_INFO, ha,
+ "Reset Template: 0x%X 0x%X 0x%X 0x%X 0x%X 0x%X 0x%X 0x%X 0x%X 0x%X 0x%X 0x%X 0x%X 0x%X 0x%X 0x%X\n",
+ *phdr, *(phdr+1), *(phdr+2), *(phdr+3), *(phdr+4),
+ *(phdr+5), *(phdr+6), *(phdr+7), *(phdr + 8),
+ *(phdr+9), *(phdr+10), *(phdr+11), *(phdr+12),
+ *(phdr+13), *(phdr+14), *(phdr+15)));
+}
+
+static int qla4_83xx_copy_bootloader(struct scsi_qla_host *ha)
+{
+ uint8_t *p_cache;
+ uint32_t src, count, size;
+ uint64_t dest;
+ int ret_val = QLA_SUCCESS;
+
+ src = QLA83XX_BOOTLOADER_FLASH_ADDR;
+ dest = qla4_83xx_rd_reg(ha, QLA83XX_BOOTLOADER_ADDR);
+ size = qla4_83xx_rd_reg(ha, QLA83XX_BOOTLOADER_SIZE);
+
+ /* 128 bit alignment check */
+ if (size & 0xF)
+ size = (size + 16) & ~0xF;
+
+ /* 16 byte count */
+ count = size/16;
+
+ p_cache = vmalloc(size);
+ if (p_cache == NULL) {
+ ql4_printk(KERN_ERR, ha, "%s: Failed to allocate memory for boot loader cache\n",
+ __func__);
+ ret_val = QLA_ERROR;
+ goto exit_copy_bootloader;
+ }
+
+ ret_val = qla4_83xx_lockless_flash_read_u32(ha, src, p_cache,
+ size / sizeof(uint32_t));
+ if (ret_val == QLA_ERROR) {
+ ql4_printk(KERN_ERR, ha, "%s: Error reading firmware from flash\n",
+ __func__);
+ goto exit_copy_error;
+ }
+ DEBUG2(ql4_printk(KERN_INFO, ha, "%s: Read firmware from flash\n",
+ __func__));
+
+ /* 128 bit/16 byte write to MS memory */
+ ret_val = qla4_8xxx_ms_mem_write_128b(ha, dest, (uint32_t *)p_cache,
+ count);
+ if (ret_val == QLA_ERROR) {
+ ql4_printk(KERN_ERR, ha, "%s: Error writing firmware to MS\n",
+ __func__);
+ goto exit_copy_error;
+ }
+
+ DEBUG2(ql4_printk(KERN_INFO, ha, "%s: Wrote firmware size %d to MS\n",
+ __func__, size));
+
+exit_copy_error:
+ vfree(p_cache);
+
+exit_copy_bootloader:
+ return ret_val;
+}
+
+static int qla4_83xx_check_cmd_peg_status(struct scsi_qla_host *ha)
+{
+ uint32_t val, ret_val = QLA_ERROR;
+ int retries = CRB_CMDPEG_CHECK_RETRY_COUNT;
+
+ do {
+ val = qla4_83xx_rd_reg(ha, QLA83XX_CMDPEG_STATE);
+ if (val == PHAN_INITIALIZE_COMPLETE) {
+ DEBUG2(ql4_printk(KERN_INFO, ha,
+ "%s: Command Peg initialization complete. State=0x%x\n",
+ __func__, val));
+ ret_val = QLA_SUCCESS;
+ break;
+ }
+ msleep(CRB_CMDPEG_CHECK_DELAY);
+ } while (--retries);
+
+ return ret_val;
+}
+
+/**
+ * qla4_83xx_poll_reg - Poll the given CRB addr for duration msecs till
+ * value read ANDed with test_mask is equal to test_result.
+ *
+ * @ha : Pointer to adapter structure
+ * @addr : CRB register address
+ * @duration : Poll for total of "duration" msecs
+ * @test_mask : Mask value read with "test_mask"
+ * @test_result : Compare (value&test_mask) with test_result.
+ **/
+static int qla4_83xx_poll_reg(struct scsi_qla_host *ha, uint32_t addr,
+ int duration, uint32_t test_mask,
+ uint32_t test_result)
+{
+ uint32_t value;
+ uint8_t retries;
+ int ret_val = QLA_SUCCESS;
+
+ ret_val = qla4_83xx_rd_reg_indirect(ha, addr, &value);
+ if (ret_val == QLA_ERROR)
+ goto exit_poll_reg;
+
+ retries = duration / 10;
+ do {
+ if ((value & test_mask) != test_result) {
+ msleep(duration / 10);
+ ret_val = qla4_83xx_rd_reg_indirect(ha, addr, &value);
+ if (ret_val == QLA_ERROR)
+ goto exit_poll_reg;
+
+ ret_val = QLA_ERROR;
+ } else {
+ ret_val = QLA_SUCCESS;
+ break;
+ }
+ } while (retries--);
+
+exit_poll_reg:
+ if (ret_val == QLA_ERROR) {
+ ha->reset_tmplt.seq_error++;
+ ql4_printk(KERN_ERR, ha, "%s: Poll Failed: 0x%08x 0x%08x 0x%08x\n",
+ __func__, value, test_mask, test_result);
+ }
+
+ return ret_val;
+}
+
+static int qla4_83xx_reset_seq_checksum_test(struct scsi_qla_host *ha)
+{
+ uint32_t sum = 0;
+ uint16_t *buff = (uint16_t *)ha->reset_tmplt.buff;
+ int u16_count = ha->reset_tmplt.hdr->size / sizeof(uint16_t);
+ int ret_val;
+
+ while (u16_count-- > 0)
+ sum += *buff++;
+
+ while (sum >> 16)
+ sum = (sum & 0xFFFF) + (sum >> 16);
+
+ /* checksum of 0 indicates a valid template */
+ if (~sum) {
+ ret_val = QLA_SUCCESS;
+ } else {
+ ql4_printk(KERN_ERR, ha, "%s: Reset seq checksum failed\n",
+ __func__);
+ ret_val = QLA_ERROR;
+ }
+
+ return ret_val;
+}
+
+/**
+ * qla4_83xx_read_reset_template - Read Reset Template from Flash
+ * @ha: Pointer to adapter structure
+ **/
+void qla4_83xx_read_reset_template(struct scsi_qla_host *ha)
+{
+ uint8_t *p_buff;
+ uint32_t addr, tmplt_hdr_def_size, tmplt_hdr_size;
+ uint32_t ret_val;
+
+ ha->reset_tmplt.seq_error = 0;
+ ha->reset_tmplt.buff = vmalloc(QLA83XX_RESTART_TEMPLATE_SIZE);
+ if (ha->reset_tmplt.buff == NULL) {
+ ql4_printk(KERN_ERR, ha, "%s: Failed to allocate reset template resources\n",
+ __func__);
+ goto exit_read_reset_template;
+ }
+
+ p_buff = ha->reset_tmplt.buff;
+ addr = QLA83XX_RESET_TEMPLATE_ADDR;
+
+ tmplt_hdr_def_size = sizeof(struct qla4_83xx_reset_template_hdr) /
+ sizeof(uint32_t);
+
+ DEBUG2(ql4_printk(KERN_INFO, ha,
+ "%s: Read template hdr size %d from Flash\n",
+ __func__, tmplt_hdr_def_size));
+
+ /* Copy template header from flash */
+ ret_val = qla4_83xx_flash_read_u32(ha, addr, p_buff,
+ tmplt_hdr_def_size);
+ if (ret_val != QLA_SUCCESS) {
+ ql4_printk(KERN_ERR, ha, "%s: Failed to read reset template\n",
+ __func__);
+ goto exit_read_template_error;
+ }
+
+ ha->reset_tmplt.hdr =
+ (struct qla4_83xx_reset_template_hdr *)ha->reset_tmplt.buff;
+
+ /* Validate the template header size and signature */
+ tmplt_hdr_size = ha->reset_tmplt.hdr->hdr_size/sizeof(uint32_t);
+ if ((tmplt_hdr_size != tmplt_hdr_def_size) ||
+ (ha->reset_tmplt.hdr->signature != RESET_TMPLT_HDR_SIGNATURE)) {
+ ql4_printk(KERN_ERR, ha, "%s: Template Header size %d is invalid, tmplt_hdr_def_size %d\n",
+ __func__, tmplt_hdr_size, tmplt_hdr_def_size);
+ goto exit_read_template_error;
+ }
+
+ addr = QLA83XX_RESET_TEMPLATE_ADDR + ha->reset_tmplt.hdr->hdr_size;
+ p_buff = ha->reset_tmplt.buff + ha->reset_tmplt.hdr->hdr_size;
+ tmplt_hdr_def_size = (ha->reset_tmplt.hdr->size -
+ ha->reset_tmplt.hdr->hdr_size) / sizeof(uint32_t);
+
+ DEBUG2(ql4_printk(KERN_INFO, ha,
+ "%s: Read rest of the template size %d\n",
+ __func__, ha->reset_tmplt.hdr->size));
+
+ /* Copy rest of the template */
+ ret_val = qla4_83xx_flash_read_u32(ha, addr, p_buff,
+ tmplt_hdr_def_size);
+ if (ret_val != QLA_SUCCESS) {
+ ql4_printk(KERN_ERR, ha, "%s: Failed to read reset tempelate\n",
+ __func__);
+ goto exit_read_template_error;
+ }
+
+ /* Integrity check */
+ if (qla4_83xx_reset_seq_checksum_test(ha)) {
+ ql4_printk(KERN_ERR, ha, "%s: Reset Seq checksum failed!\n",
+ __func__);
+ goto exit_read_template_error;
+ }
+ DEBUG2(ql4_printk(KERN_INFO, ha,
+ "%s: Reset Seq checksum passed, Get stop, start and init seq offsets\n",
+ __func__));
+
+ /* Get STOP, START, INIT sequence offsets */
+ ha->reset_tmplt.init_offset = ha->reset_tmplt.buff +
+ ha->reset_tmplt.hdr->init_seq_offset;
+ ha->reset_tmplt.start_offset = ha->reset_tmplt.buff +
+ ha->reset_tmplt.hdr->start_seq_offset;
+ ha->reset_tmplt.stop_offset = ha->reset_tmplt.buff +
+ ha->reset_tmplt.hdr->hdr_size;
+ qla4_83xx_dump_reset_seq_hdr(ha);
+
+ goto exit_read_reset_template;
+
+exit_read_template_error:
+ vfree(ha->reset_tmplt.buff);
+
+exit_read_reset_template:
+ return;
+}
+
+/**
+ * qla4_83xx_read_write_crb_reg - Read from raddr and write value to waddr.
+ *
+ * @ha : Pointer to adapter structure
+ * @raddr : CRB address to read from
+ * @waddr : CRB address to write to
+ **/
+static void qla4_83xx_read_write_crb_reg(struct scsi_qla_host *ha,
+ uint32_t raddr, uint32_t waddr)
+{
+ uint32_t value;
+
+ qla4_83xx_rd_reg_indirect(ha, raddr, &value);
+ qla4_83xx_wr_reg_indirect(ha, waddr, value);
+}
+
+/**
+ * qla4_83xx_rmw_crb_reg - Read Modify Write crb register
+ *
+ * This function read value from raddr, AND with test_mask,
+ * Shift Left,Right/OR/XOR with values RMW header and write value to waddr.
+ *
+ * @ha : Pointer to adapter structure
+ * @raddr : CRB address to read from
+ * @waddr : CRB address to write to
+ * @p_rmw_hdr : header with shift/or/xor values.
+ **/
+static void qla4_83xx_rmw_crb_reg(struct scsi_qla_host *ha, uint32_t raddr,
+ uint32_t waddr,
+ struct qla4_83xx_rmw *p_rmw_hdr)
+{
+ uint32_t value;
+
+ if (p_rmw_hdr->index_a)
+ value = ha->reset_tmplt.array[p_rmw_hdr->index_a];
+ else
+ qla4_83xx_rd_reg_indirect(ha, raddr, &value);
+
+ value &= p_rmw_hdr->test_mask;
+ value <<= p_rmw_hdr->shl;
+ value >>= p_rmw_hdr->shr;
+ value |= p_rmw_hdr->or_value;
+ value ^= p_rmw_hdr->xor_value;
+
+ qla4_83xx_wr_reg_indirect(ha, waddr, value);
+
+ return;
+}
+
+static void qla4_83xx_write_list(struct scsi_qla_host *ha,
+ struct qla4_83xx_reset_entry_hdr *p_hdr)
+{
+ struct qla4_83xx_entry *p_entry;
+ uint32_t i;
+
+ p_entry = (struct qla4_83xx_entry *)
+ ((char *)p_hdr + sizeof(struct qla4_83xx_reset_entry_hdr));
+
+ for (i = 0; i < p_hdr->count; i++, p_entry++) {
+ qla4_83xx_wr_reg_indirect(ha, p_entry->arg1, p_entry->arg2);
+ if (p_hdr->delay)
+ udelay((uint32_t)(p_hdr->delay));
+ }
+}
+
+static void qla4_83xx_read_write_list(struct scsi_qla_host *ha,
+ struct qla4_83xx_reset_entry_hdr *p_hdr)
+{
+ struct qla4_83xx_entry *p_entry;
+ uint32_t i;
+
+ p_entry = (struct qla4_83xx_entry *)
+ ((char *)p_hdr + sizeof(struct qla4_83xx_reset_entry_hdr));
+
+ for (i = 0; i < p_hdr->count; i++, p_entry++) {
+ qla4_83xx_read_write_crb_reg(ha, p_entry->arg1, p_entry->arg2);
+ if (p_hdr->delay)
+ udelay((uint32_t)(p_hdr->delay));
+ }
+}
+
+static void qla4_83xx_poll_list(struct scsi_qla_host *ha,
+ struct qla4_83xx_reset_entry_hdr *p_hdr)
+{
+ long delay;
+ struct qla4_83xx_entry *p_entry;
+ struct qla4_83xx_poll *p_poll;
+ uint32_t i;
+ uint32_t value;
+
+ p_poll = (struct qla4_83xx_poll *)
+ ((char *)p_hdr + sizeof(struct qla4_83xx_reset_entry_hdr));
+
+ /* Entries start after 8 byte qla4_83xx_poll, poll header contains
+ * the test_mask, test_value. */
+ p_entry = (struct qla4_83xx_entry *)((char *)p_poll +
+ sizeof(struct qla4_83xx_poll));
+
+ delay = (long)p_hdr->delay;
+ if (!delay) {
+ for (i = 0; i < p_hdr->count; i++, p_entry++) {
+ qla4_83xx_poll_reg(ha, p_entry->arg1, delay,
+ p_poll->test_mask,
+ p_poll->test_value);
+ }
+ } else {
+ for (i = 0; i < p_hdr->count; i++, p_entry++) {
+ if (qla4_83xx_poll_reg(ha, p_entry->arg1, delay,
+ p_poll->test_mask,
+ p_poll->test_value)) {
+ qla4_83xx_rd_reg_indirect(ha, p_entry->arg1,
+ &value);
+ qla4_83xx_rd_reg_indirect(ha, p_entry->arg2,
+ &value);
+ }
+ }
+ }
+}
+
+static void qla4_83xx_poll_write_list(struct scsi_qla_host *ha,
+ struct qla4_83xx_reset_entry_hdr *p_hdr)
+{
+ long delay;
+ struct qla4_83xx_quad_entry *p_entry;
+ struct qla4_83xx_poll *p_poll;
+ uint32_t i;
+
+ p_poll = (struct qla4_83xx_poll *)
+ ((char *)p_hdr + sizeof(struct qla4_83xx_reset_entry_hdr));
+ p_entry = (struct qla4_83xx_quad_entry *)
+ ((char *)p_poll + sizeof(struct qla4_83xx_poll));
+ delay = (long)p_hdr->delay;
+
+ for (i = 0; i < p_hdr->count; i++, p_entry++) {
+ qla4_83xx_wr_reg_indirect(ha, p_entry->dr_addr,
+ p_entry->dr_value);
+ qla4_83xx_wr_reg_indirect(ha, p_entry->ar_addr,
+ p_entry->ar_value);
+ if (delay) {
+ if (qla4_83xx_poll_reg(ha, p_entry->ar_addr, delay,
+ p_poll->test_mask,
+ p_poll->test_value)) {
+ DEBUG2(ql4_printk(KERN_INFO, ha,
+ "%s: Timeout Error: poll list, item_num %d, entry_num %d\n",
+ __func__, i,
+ ha->reset_tmplt.seq_index));
+ }
+ }
+ }
+}
+
+static void qla4_83xx_read_modify_write(struct scsi_qla_host *ha,
+ struct qla4_83xx_reset_entry_hdr *p_hdr)
+{
+ struct qla4_83xx_entry *p_entry;
+ struct qla4_83xx_rmw *p_rmw_hdr;
+ uint32_t i;
+
+ p_rmw_hdr = (struct qla4_83xx_rmw *)
+ ((char *)p_hdr + sizeof(struct qla4_83xx_reset_entry_hdr));
+ p_entry = (struct qla4_83xx_entry *)
+ ((char *)p_rmw_hdr + sizeof(struct qla4_83xx_rmw));
+
+ for (i = 0; i < p_hdr->count; i++, p_entry++) {
+ qla4_83xx_rmw_crb_reg(ha, p_entry->arg1, p_entry->arg2,
+ p_rmw_hdr);
+ if (p_hdr->delay)
+ udelay((uint32_t)(p_hdr->delay));
+ }
+}
+
+static void qla4_83xx_pause(struct scsi_qla_host *ha,
+ struct qla4_83xx_reset_entry_hdr *p_hdr)
+{
+ if (p_hdr->delay)
+ mdelay((uint32_t)((long)p_hdr->delay));
+}
+
+static void qla4_83xx_poll_read_list(struct scsi_qla_host *ha,
+ struct qla4_83xx_reset_entry_hdr *p_hdr)
+{
+ long delay;
+ int index;
+ struct qla4_83xx_quad_entry *p_entry;
+ struct qla4_83xx_poll *p_poll;
+ uint32_t i;
+ uint32_t value;
+
+ p_poll = (struct qla4_83xx_poll *)
+ ((char *)p_hdr + sizeof(struct qla4_83xx_reset_entry_hdr));
+ p_entry = (struct qla4_83xx_quad_entry *)
+ ((char *)p_poll + sizeof(struct qla4_83xx_poll));
+ delay = (long)p_hdr->delay;
+
+ for (i = 0; i < p_hdr->count; i++, p_entry++) {
+ qla4_83xx_wr_reg_indirect(ha, p_entry->ar_addr,
+ p_entry->ar_value);
+ if (delay) {
+ if (qla4_83xx_poll_reg(ha, p_entry->ar_addr, delay,
+ p_poll->test_mask,
+ p_poll->test_value)) {
+ DEBUG2(ql4_printk(KERN_INFO, ha,
+ "%s: Timeout Error: poll list, Item_num %d, entry_num %d\n",
+ __func__, i,
+ ha->reset_tmplt.seq_index));
+ } else {
+ index = ha->reset_tmplt.array_index;
+ qla4_83xx_rd_reg_indirect(ha, p_entry->dr_addr,
+ &value);
+ ha->reset_tmplt.array[index++] = value;
+
+ if (index == QLA83XX_MAX_RESET_SEQ_ENTRIES)
+ ha->reset_tmplt.array_index = 1;
+ }
+ }
+ }
+}
+
+static void qla4_83xx_seq_end(struct scsi_qla_host *ha,
+ struct qla4_83xx_reset_entry_hdr *p_hdr)
+{
+ ha->reset_tmplt.seq_end = 1;
+}
+
+static void qla4_83xx_template_end(struct scsi_qla_host *ha,
+ struct qla4_83xx_reset_entry_hdr *p_hdr)
+{
+ ha->reset_tmplt.template_end = 1;
+
+ if (ha->reset_tmplt.seq_error == 0) {
+ DEBUG2(ql4_printk(KERN_INFO, ha,
+ "%s: Reset sequence completed SUCCESSFULLY.\n",
+ __func__));
+ } else {
+ ql4_printk(KERN_ERR, ha, "%s: Reset sequence completed with some timeout errors.\n",
+ __func__);
+ }
+}
+
+/**
+ * qla4_83xx_process_reset_template - Process reset template.
+ *
+ * Process all entries in reset template till entry with SEQ_END opcode,
+ * which indicates end of the reset template processing. Each entry has a
+ * Reset Entry header, entry opcode/command, with size of the entry, number
+ * of entries in sub-sequence and delay in microsecs or timeout in millisecs.
+ *
+ * @ha : Pointer to adapter structure
+ * @p_buff : Common reset entry header.
+ **/
+static void qla4_83xx_process_reset_template(struct scsi_qla_host *ha,
+ char *p_buff)
+{
+ int index, entries;
+ struct qla4_83xx_reset_entry_hdr *p_hdr;
+ char *p_entry = p_buff;
+
+ ha->reset_tmplt.seq_end = 0;
+ ha->reset_tmplt.template_end = 0;
+ entries = ha->reset_tmplt.hdr->entries;
+ index = ha->reset_tmplt.seq_index;
+
+ for (; (!ha->reset_tmplt.seq_end) && (index < entries); index++) {
+
+ p_hdr = (struct qla4_83xx_reset_entry_hdr *)p_entry;
+ switch (p_hdr->cmd) {
+ case OPCODE_NOP:
+ break;
+ case OPCODE_WRITE_LIST:
+ qla4_83xx_write_list(ha, p_hdr);
+ break;
+ case OPCODE_READ_WRITE_LIST:
+ qla4_83xx_read_write_list(ha, p_hdr);
+ break;
+ case OPCODE_POLL_LIST:
+ qla4_83xx_poll_list(ha, p_hdr);
+ break;
+ case OPCODE_POLL_WRITE_LIST:
+ qla4_83xx_poll_write_list(ha, p_hdr);
+ break;
+ case OPCODE_READ_MODIFY_WRITE:
+ qla4_83xx_read_modify_write(ha, p_hdr);
+ break;
+ case OPCODE_SEQ_PAUSE:
+ qla4_83xx_pause(ha, p_hdr);
+ break;
+ case OPCODE_SEQ_END:
+ qla4_83xx_seq_end(ha, p_hdr);
+ break;
+ case OPCODE_TMPL_END:
+ qla4_83xx_template_end(ha, p_hdr);
+ break;
+ case OPCODE_POLL_READ_LIST:
+ qla4_83xx_poll_read_list(ha, p_hdr);
+ break;
+ default:
+ ql4_printk(KERN_ERR, ha, "%s: Unknown command ==> 0x%04x on entry = %d\n",
+ __func__, p_hdr->cmd, index);
+ break;
+ }
+
+ /* Set pointer to next entry in the sequence. */
+ p_entry += p_hdr->size;
+ }
+
+ ha->reset_tmplt.seq_index = index;
+}
+
+static void qla4_83xx_process_stop_seq(struct scsi_qla_host *ha)
+{
+ ha->reset_tmplt.seq_index = 0;
+ qla4_83xx_process_reset_template(ha, ha->reset_tmplt.stop_offset);
+
+ if (ha->reset_tmplt.seq_end != 1)
+ ql4_printk(KERN_ERR, ha, "%s: Abrupt STOP Sub-Sequence end.\n",
+ __func__);
+}
+
+static void qla4_83xx_process_start_seq(struct scsi_qla_host *ha)
+{
+ qla4_83xx_process_reset_template(ha, ha->reset_tmplt.start_offset);
+
+ if (ha->reset_tmplt.template_end != 1)
+ ql4_printk(KERN_ERR, ha, "%s: Abrupt START Sub-Sequence end.\n",
+ __func__);
+}
+
+static void qla4_83xx_process_init_seq(struct scsi_qla_host *ha)
+{
+ qla4_83xx_process_reset_template(ha, ha->reset_tmplt.init_offset);
+
+ if (ha->reset_tmplt.seq_end != 1)
+ ql4_printk(KERN_ERR, ha, "%s: Abrupt INIT Sub-Sequence end.\n",
+ __func__);
+}
+
+static int qla4_83xx_restart(struct scsi_qla_host *ha)
+{
+ int ret_val = QLA_SUCCESS;
+ uint32_t idc_ctrl;
+
+ qla4_83xx_process_stop_seq(ha);
+
+ /*
+ * Collect minidump.
+ * If IDC_CTRL BIT1 is set, clear it on going to INIT state and
+ * don't collect minidump
+ */
+ idc_ctrl = qla4_83xx_rd_reg(ha, QLA83XX_IDC_DRV_CTRL);
+ if (idc_ctrl & GRACEFUL_RESET_BIT1) {
+ qla4_83xx_wr_reg(ha, QLA83XX_IDC_DRV_CTRL,
+ (idc_ctrl & ~GRACEFUL_RESET_BIT1));
+ ql4_printk(KERN_INFO, ha, "%s: Graceful RESET: Not collecting minidump\n",
+ __func__);
+ } else {
+ qla4_8xxx_get_minidump(ha);
+ }
+
+ qla4_83xx_process_init_seq(ha);
+
+ if (qla4_83xx_copy_bootloader(ha)) {
+ ql4_printk(KERN_ERR, ha, "%s: Copy bootloader, firmware restart failed!\n",
+ __func__);
+ ret_val = QLA_ERROR;
+ goto exit_restart;
+ }
+
+ qla4_83xx_wr_reg(ha, QLA83XX_FW_IMAGE_VALID, QLA83XX_BOOT_FROM_FLASH);
+ qla4_83xx_process_start_seq(ha);
+
+exit_restart:
+ return ret_val;
+}
+
+int qla4_83xx_start_firmware(struct scsi_qla_host *ha)
+{
+ int ret_val = QLA_SUCCESS;
+
+ ret_val = qla4_83xx_restart(ha);
+ if (ret_val == QLA_ERROR) {
+ ql4_printk(KERN_ERR, ha, "%s: Restart error\n", __func__);
+ goto exit_start_fw;
+ } else {
+ DEBUG2(ql4_printk(KERN_INFO, ha, "%s: Restart done\n",
+ __func__));
+ }
+
+ ret_val = qla4_83xx_check_cmd_peg_status(ha);
+ if (ret_val == QLA_ERROR)
+ ql4_printk(KERN_ERR, ha, "%s: Peg not initialized\n",
+ __func__);
+
+exit_start_fw:
+ return ret_val;
+}
+
+/*----------------------Interrupt Related functions ---------------------*/
+
+static void qla4_83xx_disable_iocb_intrs(struct scsi_qla_host *ha)
+{
+ if (test_and_clear_bit(AF_83XX_IOCB_INTR_ON, &ha->flags))
+ qla4_8xxx_intr_disable(ha);
+}
+
+static void qla4_83xx_disable_mbox_intrs(struct scsi_qla_host *ha)
+{
+ uint32_t mb_int, ret;
+
+ if (test_and_clear_bit(AF_83XX_MBOX_INTR_ON, &ha->flags)) {
+ ret = readl(&ha->qla4_83xx_reg->mbox_int);
+ mb_int = ret & ~INT_ENABLE_FW_MB;
+ writel(mb_int, &ha->qla4_83xx_reg->mbox_int);
+ writel(1, &ha->qla4_83xx_reg->leg_int_mask);
+ }
+}
+
+void qla4_83xx_disable_intrs(struct scsi_qla_host *ha)
+{
+ qla4_83xx_disable_mbox_intrs(ha);
+ qla4_83xx_disable_iocb_intrs(ha);
+}
+
+static void qla4_83xx_enable_iocb_intrs(struct scsi_qla_host *ha)
+{
+ if (!test_bit(AF_83XX_IOCB_INTR_ON, &ha->flags)) {
+ qla4_8xxx_intr_enable(ha);
+ set_bit(AF_83XX_IOCB_INTR_ON, &ha->flags);
+ }
+}
+
+void qla4_83xx_enable_mbox_intrs(struct scsi_qla_host *ha)
+{
+ uint32_t mb_int;
+
+ if (!test_bit(AF_83XX_MBOX_INTR_ON, &ha->flags)) {
+ mb_int = INT_ENABLE_FW_MB;
+ writel(mb_int, &ha->qla4_83xx_reg->mbox_int);
+ writel(0, &ha->qla4_83xx_reg->leg_int_mask);
+ set_bit(AF_83XX_MBOX_INTR_ON, &ha->flags);
+ }
+}
+
+
+void qla4_83xx_enable_intrs(struct scsi_qla_host *ha)
+{
+ qla4_83xx_enable_mbox_intrs(ha);
+ qla4_83xx_enable_iocb_intrs(ha);
+}
+
+
+void qla4_83xx_queue_mbox_cmd(struct scsi_qla_host *ha, uint32_t *mbx_cmd,
+ int incount)
+{
+ int i;
+
+ /* Load all mailbox registers, except mailbox 0. */
+ for (i = 1; i < incount; i++)
+ writel(mbx_cmd[i], &ha->qla4_83xx_reg->mailbox_in[i]);
+
+ writel(mbx_cmd[0], &ha->qla4_83xx_reg->mailbox_in[0]);
+
+ /* Set Host Interrupt register to 1, to tell the firmware that
+ * a mailbox command is pending. Firmware after reading the
+ * mailbox command, clears the host interrupt register */
+ writel(HINT_MBX_INT_PENDING, &ha->qla4_83xx_reg->host_intr);
+}
+
+void qla4_83xx_process_mbox_intr(struct scsi_qla_host *ha, int outcount)
+{
+ int intr_status;
+
+ intr_status = readl(&ha->qla4_83xx_reg->risc_intr);
+ if (intr_status) {
+ ha->mbox_status_count = outcount;
+ ha->isp_ops->interrupt_service_routine(ha, intr_status);
+ }
+}
+
+/**
+ * qla4_83xx_isp_reset - Resets ISP and aborts all outstanding commands.
+ * @ha: pointer to host adapter structure.
+ **/
+int qla4_83xx_isp_reset(struct scsi_qla_host *ha)
+{
+ int rval;
+ uint32_t dev_state;
+
+ ha->isp_ops->idc_lock(ha);
+ dev_state = qla4_8xxx_rd_direct(ha, QLA8XXX_CRB_DEV_STATE);
+
+ if (ql4xdontresethba)
+ qla4_83xx_set_idc_dontreset(ha);
+
+ if (dev_state == QLA8XXX_DEV_READY) {
+ /* If IDC_CTRL DONTRESETHBA_BIT0 is set dont do reset
+ * recovery */
+ if (qla4_83xx_idc_dontreset(ha) == DONTRESET_BIT0) {
+ ql4_printk(KERN_ERR, ha, "%s: Reset recovery disabled\n",
+ __func__);
+ rval = QLA_ERROR;
+ goto exit_isp_reset;
+ }
+
+ DEBUG2(ql4_printk(KERN_INFO, ha, "%s: HW State: NEED RESET\n",
+ __func__));
+ qla4_8xxx_wr_direct(ha, QLA8XXX_CRB_DEV_STATE,
+ QLA8XXX_DEV_NEED_RESET);
+
+ } else {
+ /* If device_state is NEED_RESET, go ahead with
+ * Reset,irrespective of ql4xdontresethba. This is to allow a
+ * non-reset-owner to force a reset. Non-reset-owner sets
+ * the IDC_CTRL BIT0 to prevent Reset-owner from doing a Reset
+ * and then forces a Reset by setting device_state to
+ * NEED_RESET. */
+ DEBUG2(ql4_printk(KERN_INFO, ha,
+ "%s: HW state already set to NEED_RESET\n",
+ __func__));
+ }
+
+ /* For ISP8324 and ISP8042, Reset owner is NIC, iSCSI or FCOE based on
+ * priority and which drivers are present. Unlike ISP8022, the function
+ * setting NEED_RESET, may not be the Reset owner. */
+ if (qla4_83xx_can_perform_reset(ha))
+ set_bit(AF_8XXX_RST_OWNER, &ha->flags);
+
+ ha->isp_ops->idc_unlock(ha);
+ rval = qla4_8xxx_device_state_handler(ha);
+
+ ha->isp_ops->idc_lock(ha);
+ qla4_8xxx_clear_rst_ready(ha);
+exit_isp_reset:
+ ha->isp_ops->idc_unlock(ha);
+
+ if (rval == QLA_SUCCESS)
+ clear_bit(AF_FW_RECOVERY, &ha->flags);
+
+ return rval;
+}
+
+static void qla4_83xx_dump_pause_control_regs(struct scsi_qla_host *ha)
+{
+ u32 val = 0, val1 = 0;
+ int i, status = QLA_SUCCESS;
+
+ status = qla4_83xx_rd_reg_indirect(ha, QLA83XX_SRE_SHIM_CONTROL, &val);
+ DEBUG2(ql4_printk(KERN_INFO, ha, "SRE-Shim Ctrl:0x%x\n", val));
+
+ /* Port 0 Rx Buffer Pause Threshold Registers. */
+ DEBUG2(ql4_printk(KERN_INFO, ha,
+ "Port 0 Rx Buffer Pause Threshold Registers[TC7..TC0]:"));
+ for (i = 0; i < 8; i++) {
+ status = qla4_83xx_rd_reg_indirect(ha,
+ QLA83XX_PORT0_RXB_PAUSE_THRS + (i * 0x4), &val);
+ DEBUG2(pr_info("0x%x ", val));
+ }
+
+ DEBUG2(pr_info("\n"));
+
+ /* Port 1 Rx Buffer Pause Threshold Registers. */
+ DEBUG2(ql4_printk(KERN_INFO, ha,
+ "Port 1 Rx Buffer Pause Threshold Registers[TC7..TC0]:"));
+ for (i = 0; i < 8; i++) {
+ status = qla4_83xx_rd_reg_indirect(ha,
+ QLA83XX_PORT1_RXB_PAUSE_THRS + (i * 0x4), &val);
+ DEBUG2(pr_info("0x%x ", val));
+ }
+
+ DEBUG2(pr_info("\n"));
+
+ /* Port 0 RxB Traffic Class Max Cell Registers. */
+ DEBUG2(ql4_printk(KERN_INFO, ha,
+ "Port 0 RxB Traffic Class Max Cell Registers[3..0]:"));
+ for (i = 0; i < 4; i++) {
+ status = qla4_83xx_rd_reg_indirect(ha,
+ QLA83XX_PORT0_RXB_TC_MAX_CELL + (i * 0x4), &val);
+ DEBUG2(pr_info("0x%x ", val));
+ }
+
+ DEBUG2(pr_info("\n"));
+
+ /* Port 1 RxB Traffic Class Max Cell Registers. */
+ DEBUG2(ql4_printk(KERN_INFO, ha,
+ "Port 1 RxB Traffic Class Max Cell Registers[3..0]:"));
+ for (i = 0; i < 4; i++) {
+ status = qla4_83xx_rd_reg_indirect(ha,
+ QLA83XX_PORT1_RXB_TC_MAX_CELL + (i * 0x4), &val);
+ DEBUG2(pr_info("0x%x ", val));
+ }
+
+ DEBUG2(pr_info("\n"));
+
+ /* Port 0 RxB Rx Traffic Class Stats. */
+ DEBUG2(ql4_printk(KERN_INFO, ha,
+ "Port 0 RxB Rx Traffic Class Stats [TC7..TC0]"));
+ for (i = 7; i >= 0; i--) {
+ status = qla4_83xx_rd_reg_indirect(ha,
+ QLA83XX_PORT0_RXB_TC_STATS,
+ &val);
+ val &= ~(0x7 << 29); /* Reset bits 29 to 31 */
+ qla4_83xx_wr_reg_indirect(ha, QLA83XX_PORT0_RXB_TC_STATS,
+ (val | (i << 29)));
+ status = qla4_83xx_rd_reg_indirect(ha,
+ QLA83XX_PORT0_RXB_TC_STATS,
+ &val);
+ DEBUG2(pr_info("0x%x ", val));
+ }
+
+ DEBUG2(pr_info("\n"));
+
+ /* Port 1 RxB Rx Traffic Class Stats. */
+ DEBUG2(ql4_printk(KERN_INFO, ha,
+ "Port 1 RxB Rx Traffic Class Stats [TC7..TC0]"));
+ for (i = 7; i >= 0; i--) {
+ status = qla4_83xx_rd_reg_indirect(ha,
+ QLA83XX_PORT1_RXB_TC_STATS,
+ &val);
+ val &= ~(0x7 << 29); /* Reset bits 29 to 31 */
+ qla4_83xx_wr_reg_indirect(ha, QLA83XX_PORT1_RXB_TC_STATS,
+ (val | (i << 29)));
+ status = qla4_83xx_rd_reg_indirect(ha,
+ QLA83XX_PORT1_RXB_TC_STATS,
+ &val);
+ DEBUG2(pr_info("0x%x ", val));
+ }
+
+ DEBUG2(pr_info("\n"));
+
+ status = qla4_83xx_rd_reg_indirect(ha, QLA83XX_PORT2_IFB_PAUSE_THRS,
+ &val);
+ status = qla4_83xx_rd_reg_indirect(ha, QLA83XX_PORT3_IFB_PAUSE_THRS,
+ &val1);
+
+ DEBUG2(ql4_printk(KERN_INFO, ha,
+ "IFB-Pause Thresholds: Port 2:0x%x, Port 3:0x%x\n",
+ val, val1));
+}
+
+static void __qla4_83xx_disable_pause(struct scsi_qla_host *ha)
+{
+ int i;
+
+ /* set SRE-Shim Control Register */
+ qla4_83xx_wr_reg_indirect(ha, QLA83XX_SRE_SHIM_CONTROL,
+ QLA83XX_SET_PAUSE_VAL);
+
+ for (i = 0; i < 8; i++) {
+ /* Port 0 Rx Buffer Pause Threshold Registers. */
+ qla4_83xx_wr_reg_indirect(ha,
+ QLA83XX_PORT0_RXB_PAUSE_THRS + (i * 0x4),
+ QLA83XX_SET_PAUSE_VAL);
+ /* Port 1 Rx Buffer Pause Threshold Registers. */
+ qla4_83xx_wr_reg_indirect(ha,
+ QLA83XX_PORT1_RXB_PAUSE_THRS + (i * 0x4),
+ QLA83XX_SET_PAUSE_VAL);
+ }
+
+ for (i = 0; i < 4; i++) {
+ /* Port 0 RxB Traffic Class Max Cell Registers. */
+ qla4_83xx_wr_reg_indirect(ha,
+ QLA83XX_PORT0_RXB_TC_MAX_CELL + (i * 0x4),
+ QLA83XX_SET_TC_MAX_CELL_VAL);
+ /* Port 1 RxB Traffic Class Max Cell Registers. */
+ qla4_83xx_wr_reg_indirect(ha,
+ QLA83XX_PORT1_RXB_TC_MAX_CELL + (i * 0x4),
+ QLA83XX_SET_TC_MAX_CELL_VAL);
+ }
+
+ qla4_83xx_wr_reg_indirect(ha, QLA83XX_PORT2_IFB_PAUSE_THRS,
+ QLA83XX_SET_PAUSE_VAL);
+ qla4_83xx_wr_reg_indirect(ha, QLA83XX_PORT3_IFB_PAUSE_THRS,
+ QLA83XX_SET_PAUSE_VAL);
+
+ ql4_printk(KERN_INFO, ha, "Disabled pause frames successfully.\n");
+}
+
+/**
+ * qla4_83xx_eport_init - Initialize EPort.
+ * @ha: Pointer to host adapter structure.
+ *
+ * If EPort hardware is in reset state before disabling pause, there would be
+ * serious hardware wedging issues. To prevent this perform eport init everytime
+ * before disabling pause frames.
+ **/
+static void qla4_83xx_eport_init(struct scsi_qla_host *ha)
+{
+ /* Clear the 8 registers */
+ qla4_83xx_wr_reg_indirect(ha, QLA83XX_RESET_REG, 0x0);
+ qla4_83xx_wr_reg_indirect(ha, QLA83XX_RESET_PORT0, 0x0);
+ qla4_83xx_wr_reg_indirect(ha, QLA83XX_RESET_PORT1, 0x0);
+ qla4_83xx_wr_reg_indirect(ha, QLA83XX_RESET_PORT2, 0x0);
+ qla4_83xx_wr_reg_indirect(ha, QLA83XX_RESET_PORT3, 0x0);
+ qla4_83xx_wr_reg_indirect(ha, QLA83XX_RESET_SRE_SHIM, 0x0);
+ qla4_83xx_wr_reg_indirect(ha, QLA83XX_RESET_EPG_SHIM, 0x0);
+ qla4_83xx_wr_reg_indirect(ha, QLA83XX_RESET_ETHER_PCS, 0x0);
+
+ /* Write any value to Reset Control register */
+ qla4_83xx_wr_reg_indirect(ha, QLA83XX_RESET_CONTROL, 0xFF);
+
+ ql4_printk(KERN_INFO, ha, "EPORT is out of reset.\n");
+}
+
+void qla4_83xx_disable_pause(struct scsi_qla_host *ha)
+{
+ ha->isp_ops->idc_lock(ha);
+ /* Before disabling pause frames, ensure that eport is not in reset */
+ qla4_83xx_eport_init(ha);
+ qla4_83xx_dump_pause_control_regs(ha);
+ __qla4_83xx_disable_pause(ha);
+ ha->isp_ops->idc_unlock(ha);
+}
+
+/**
+ * qla4_83xx_is_detached - Check if we are marked invisible.
+ * @ha: Pointer to host adapter structure.
+ **/
+int qla4_83xx_is_detached(struct scsi_qla_host *ha)
+{
+ uint32_t drv_active;
+
+ drv_active = qla4_8xxx_rd_direct(ha, QLA8XXX_CRB_DRV_ACTIVE);
+
+ if (test_bit(AF_INIT_DONE, &ha->flags) &&
+ !(drv_active & (1 << ha->func_num))) {
+ DEBUG2(ql4_printk(KERN_INFO, ha, "%s: drv_active = 0x%X\n",
+ __func__, drv_active));
+ return QLA_SUCCESS;
+ }
+
+ return QLA_ERROR;
+}
diff --git a/drivers/scsi/qla4xxx/ql4_83xx.h b/drivers/scsi/qla4xxx/ql4_83xx.h
new file mode 100644
index 000000000..775fdf9fc
--- /dev/null
+++ b/drivers/scsi/qla4xxx/ql4_83xx.h
@@ -0,0 +1,371 @@
+/*
+ * QLogic iSCSI HBA Driver
+ * Copyright (c) 2003-2013 QLogic Corporation
+ *
+ * See LICENSE.qla4xxx for copyright and licensing details.
+ */
+
+#ifndef __QL483XX_H
+#define __QL483XX_H
+
+/* Indirectly Mapped Registers */
+#define QLA83XX_FLASH_SPI_STATUS 0x2808E010
+#define QLA83XX_FLASH_SPI_CONTROL 0x2808E014
+#define QLA83XX_FLASH_STATUS 0x42100004
+#define QLA83XX_FLASH_CONTROL 0x42110004
+#define QLA83XX_FLASH_ADDR 0x42110008
+#define QLA83XX_FLASH_WRDATA 0x4211000C
+#define QLA83XX_FLASH_RDDATA 0x42110018
+#define QLA83XX_FLASH_DIRECT_WINDOW 0x42110030
+#define QLA83XX_FLASH_DIRECT_DATA(DATA) (0x42150000 | (0x0000FFFF&DATA))
+
+/* Directly Mapped Registers in 83xx register table */
+
+/* Flash access regs */
+#define QLA83XX_FLASH_LOCK 0x3850
+#define QLA83XX_FLASH_UNLOCK 0x3854
+#define QLA83XX_FLASH_LOCK_ID 0x3500
+
+/* Driver Lock regs */
+#define QLA83XX_DRV_LOCK 0x3868
+#define QLA83XX_DRV_UNLOCK 0x386C
+#define QLA83XX_DRV_LOCK_ID 0x3504
+#define QLA83XX_DRV_LOCKRECOVERY 0x379C
+
+/* IDC version */
+#define QLA83XX_IDC_VER_MAJ_VALUE 0x1
+#define QLA83XX_IDC_VER_MIN_VALUE 0x0
+
+/* IDC Registers : Driver Coexistence Defines */
+#define QLA83XX_CRB_IDC_VER_MAJOR 0x3780
+#define QLA83XX_CRB_IDC_VER_MINOR 0x3798
+#define QLA83XX_IDC_DRV_CTRL 0x3790
+#define QLA83XX_IDC_DRV_AUDIT 0x3794
+#define QLA83XX_SRE_SHIM_CONTROL 0x0D200284
+#define QLA83XX_PORT0_RXB_PAUSE_THRS 0x0B2003A4
+#define QLA83XX_PORT1_RXB_PAUSE_THRS 0x0B2013A4
+#define QLA83XX_PORT0_RXB_TC_MAX_CELL 0x0B200388
+#define QLA83XX_PORT1_RXB_TC_MAX_CELL 0x0B201388
+#define QLA83XX_PORT0_RXB_TC_STATS 0x0B20039C
+#define QLA83XX_PORT1_RXB_TC_STATS 0x0B20139C
+#define QLA83XX_PORT2_IFB_PAUSE_THRS 0x0B200704
+#define QLA83XX_PORT3_IFB_PAUSE_THRS 0x0B201704
+
+/* set value to pause threshold value */
+#define QLA83XX_SET_PAUSE_VAL 0x0
+#define QLA83XX_SET_TC_MAX_CELL_VAL 0x03FF03FF
+
+#define QLA83XX_RESET_CONTROL 0x28084E50
+#define QLA83XX_RESET_REG 0x28084E60
+#define QLA83XX_RESET_PORT0 0x28084E70
+#define QLA83XX_RESET_PORT1 0x28084E80
+#define QLA83XX_RESET_PORT2 0x28084E90
+#define QLA83XX_RESET_PORT3 0x28084EA0
+#define QLA83XX_RESET_SRE_SHIM 0x28084EB0
+#define QLA83XX_RESET_EPG_SHIM 0x28084EC0
+#define QLA83XX_RESET_ETHER_PCS 0x28084ED0
+
+/* qla_83xx_reg_tbl registers */
+#define QLA83XX_PEG_HALT_STATUS1 0x34A8
+#define QLA83XX_PEG_HALT_STATUS2 0x34AC
+#define QLA83XX_PEG_ALIVE_COUNTER 0x34B0 /* FW_HEARTBEAT */
+#define QLA83XX_FW_CAPABILITIES 0x3528
+#define QLA83XX_CRB_DRV_ACTIVE 0x3788 /* IDC_DRV_PRESENCE */
+#define QLA83XX_CRB_DEV_STATE 0x3784 /* IDC_DEV_STATE */
+#define QLA83XX_CRB_DRV_STATE 0x378C /* IDC_DRV_ACK */
+#define QLA83XX_CRB_DRV_SCRATCH 0x3548
+#define QLA83XX_CRB_DEV_PART_INFO1 0x37E0
+#define QLA83XX_CRB_DEV_PART_INFO2 0x37E4
+
+#define QLA83XX_FW_VER_MAJOR 0x3550
+#define QLA83XX_FW_VER_MINOR 0x3554
+#define QLA83XX_FW_VER_SUB 0x3558
+#define QLA83XX_NPAR_STATE 0x359C
+#define QLA83XX_FW_IMAGE_VALID 0x35FC
+#define QLA83XX_CMDPEG_STATE 0x3650
+#define QLA83XX_ASIC_TEMP 0x37B4
+#define QLA83XX_FW_API 0x356C
+#define QLA83XX_DRV_OP_MODE 0x3570
+
+static const uint32_t qla4_83xx_reg_tbl[] = {
+ QLA83XX_PEG_HALT_STATUS1,
+ QLA83XX_PEG_HALT_STATUS2,
+ QLA83XX_PEG_ALIVE_COUNTER,
+ QLA83XX_CRB_DRV_ACTIVE,
+ QLA83XX_CRB_DEV_STATE,
+ QLA83XX_CRB_DRV_STATE,
+ QLA83XX_CRB_DRV_SCRATCH,
+ QLA83XX_CRB_DEV_PART_INFO1,
+ QLA83XX_CRB_IDC_VER_MAJOR,
+ QLA83XX_FW_VER_MAJOR,
+ QLA83XX_FW_VER_MINOR,
+ QLA83XX_FW_VER_SUB,
+ QLA83XX_CMDPEG_STATE,
+ QLA83XX_ASIC_TEMP,
+};
+
+#define QLA83XX_CRB_WIN_BASE 0x3800
+#define QLA83XX_CRB_WIN_FUNC(f) (QLA83XX_CRB_WIN_BASE+((f)*4))
+#define QLA83XX_SEM_LOCK_BASE 0x3840
+#define QLA83XX_SEM_UNLOCK_BASE 0x3844
+#define QLA83XX_SEM_LOCK_FUNC(f) (QLA83XX_SEM_LOCK_BASE+((f)*8))
+#define QLA83XX_SEM_UNLOCK_FUNC(f) (QLA83XX_SEM_UNLOCK_BASE+((f)*8))
+#define QLA83XX_LINK_STATE(f) (0x3698+((f) > 7 ? 4 : 0))
+#define QLA83XX_LINK_SPEED(f) (0x36E0+(((f) >> 2) * 4))
+#define QLA83XX_MAX_LINK_SPEED(f) (0x36F0+(((f) / 4) * 4))
+#define QLA83XX_LINK_SPEED_FACTOR 10
+
+/* FLASH API Defines */
+#define QLA83xx_FLASH_MAX_WAIT_USEC 100
+#define QLA83XX_FLASH_LOCK_TIMEOUT 10000
+#define QLA83XX_FLASH_SECTOR_SIZE 65536
+#define QLA83XX_DRV_LOCK_TIMEOUT 2000
+#define QLA83XX_FLASH_SECTOR_ERASE_CMD 0xdeadbeef
+#define QLA83XX_FLASH_WRITE_CMD 0xdacdacda
+#define QLA83XX_FLASH_BUFFER_WRITE_CMD 0xcadcadca
+#define QLA83XX_FLASH_READ_RETRY_COUNT 2000
+#define QLA83XX_FLASH_STATUS_READY 0x6
+#define QLA83XX_FLASH_BUFFER_WRITE_MIN 2
+#define QLA83XX_FLASH_BUFFER_WRITE_MAX 64
+#define QLA83XX_FLASH_STATUS_REG_POLL_DELAY 1
+#define QLA83XX_ERASE_MODE 1
+#define QLA83XX_WRITE_MODE 2
+#define QLA83XX_DWORD_WRITE_MODE 3
+
+#define QLA83XX_GLOBAL_RESET 0x38CC
+#define QLA83XX_WILDCARD 0x38F0
+#define QLA83XX_INFORMANT 0x38FC
+#define QLA83XX_HOST_MBX_CTRL 0x3038
+#define QLA83XX_FW_MBX_CTRL 0x303C
+#define QLA83XX_BOOTLOADER_ADDR 0x355C
+#define QLA83XX_BOOTLOADER_SIZE 0x3560
+#define QLA83XX_FW_IMAGE_ADDR 0x3564
+#define QLA83XX_MBX_INTR_ENABLE 0x1000
+#define QLA83XX_MBX_INTR_MASK 0x1200
+
+/* IDC Control Register bit defines */
+#define DONTRESET_BIT0 0x1
+#define GRACEFUL_RESET_BIT1 0x2
+
+#define QLA83XX_HALT_STATUS_INFORMATIONAL (0x1 << 29)
+#define QLA83XX_HALT_STATUS_FW_RESET (0x2 << 29)
+#define QLA83XX_HALT_STATUS_UNRECOVERABLE (0x4 << 29)
+
+/* Firmware image definitions */
+#define QLA83XX_BOOTLOADER_FLASH_ADDR 0x10000
+#define QLA83XX_BOOT_FROM_FLASH 0
+
+#define QLA83XX_IDC_PARAM_ADDR 0x3e8020
+/* Reset template definitions */
+#define QLA83XX_MAX_RESET_SEQ_ENTRIES 16
+#define QLA83XX_RESTART_TEMPLATE_SIZE 0x2000
+#define QLA83XX_RESET_TEMPLATE_ADDR 0x4F0000
+#define QLA83XX_RESET_SEQ_VERSION 0x0101
+
+/* Reset template entry opcodes */
+#define OPCODE_NOP 0x0000
+#define OPCODE_WRITE_LIST 0x0001
+#define OPCODE_READ_WRITE_LIST 0x0002
+#define OPCODE_POLL_LIST 0x0004
+#define OPCODE_POLL_WRITE_LIST 0x0008
+#define OPCODE_READ_MODIFY_WRITE 0x0010
+#define OPCODE_SEQ_PAUSE 0x0020
+#define OPCODE_SEQ_END 0x0040
+#define OPCODE_TMPL_END 0x0080
+#define OPCODE_POLL_READ_LIST 0x0100
+
+/* Template Header */
+#define RESET_TMPLT_HDR_SIGNATURE 0xCAFE
+struct qla4_83xx_reset_template_hdr {
+ __le16 version;
+ __le16 signature;
+ __le16 size;
+ __le16 entries;
+ __le16 hdr_size;
+ __le16 checksum;
+ __le16 init_seq_offset;
+ __le16 start_seq_offset;
+} __packed;
+
+/* Common Entry Header. */
+struct qla4_83xx_reset_entry_hdr {
+ __le16 cmd;
+ __le16 size;
+ __le16 count;
+ __le16 delay;
+} __packed;
+
+/* Generic poll entry type. */
+struct qla4_83xx_poll {
+ __le32 test_mask;
+ __le32 test_value;
+} __packed;
+
+/* Read modify write entry type. */
+struct qla4_83xx_rmw {
+ __le32 test_mask;
+ __le32 xor_value;
+ __le32 or_value;
+ uint8_t shl;
+ uint8_t shr;
+ uint8_t index_a;
+ uint8_t rsvd;
+} __packed;
+
+/* Generic Entry Item with 2 DWords. */
+struct qla4_83xx_entry {
+ __le32 arg1;
+ __le32 arg2;
+} __packed;
+
+/* Generic Entry Item with 4 DWords.*/
+struct qla4_83xx_quad_entry {
+ __le32 dr_addr;
+ __le32 dr_value;
+ __le32 ar_addr;
+ __le32 ar_value;
+} __packed;
+
+struct qla4_83xx_reset_template {
+ int seq_index;
+ int seq_error;
+ int array_index;
+ uint32_t array[QLA83XX_MAX_RESET_SEQ_ENTRIES];
+ uint8_t *buff;
+ uint8_t *stop_offset;
+ uint8_t *start_offset;
+ uint8_t *init_offset;
+ struct qla4_83xx_reset_template_hdr *hdr;
+ uint8_t seq_end;
+ uint8_t template_end;
+};
+
+/* POLLRD Entry */
+struct qla83xx_minidump_entry_pollrd {
+ struct qla8xxx_minidump_entry_hdr h;
+ uint32_t select_addr;
+ uint32_t read_addr;
+ uint32_t select_value;
+ uint16_t select_value_stride;
+ uint16_t op_count;
+ uint32_t poll_wait;
+ uint32_t poll_mask;
+ uint32_t data_size;
+ uint32_t rsvd_1;
+};
+
+struct qla8044_minidump_entry_rddfe {
+ struct qla8xxx_minidump_entry_hdr h;
+ uint32_t addr_1;
+ uint32_t value;
+ uint8_t stride;
+ uint8_t stride2;
+ uint16_t count;
+ uint32_t poll;
+ uint32_t mask;
+ uint32_t modify_mask;
+ uint32_t data_size;
+ uint32_t rsvd;
+
+} __packed;
+
+struct qla8044_minidump_entry_rdmdio {
+ struct qla8xxx_minidump_entry_hdr h;
+
+ uint32_t addr_1;
+ uint32_t addr_2;
+ uint32_t value_1;
+ uint8_t stride_1;
+ uint8_t stride_2;
+ uint16_t count;
+ uint32_t poll;
+ uint32_t mask;
+ uint32_t value_2;
+ uint32_t data_size;
+
+} __packed;
+
+struct qla8044_minidump_entry_pollwr {
+ struct qla8xxx_minidump_entry_hdr h;
+ uint32_t addr_1;
+ uint32_t addr_2;
+ uint32_t value_1;
+ uint32_t value_2;
+ uint32_t poll;
+ uint32_t mask;
+ uint32_t data_size;
+ uint32_t rsvd;
+
+} __packed;
+
+/* RDMUX2 Entry */
+struct qla83xx_minidump_entry_rdmux2 {
+ struct qla8xxx_minidump_entry_hdr h;
+ uint32_t select_addr_1;
+ uint32_t select_addr_2;
+ uint32_t select_value_1;
+ uint32_t select_value_2;
+ uint32_t op_count;
+ uint32_t select_value_mask;
+ uint32_t read_addr;
+ uint8_t select_value_stride;
+ uint8_t data_size;
+ uint8_t rsvd[2];
+};
+
+/* POLLRDMWR Entry */
+struct qla83xx_minidump_entry_pollrdmwr {
+ struct qla8xxx_minidump_entry_hdr h;
+ uint32_t addr_1;
+ uint32_t addr_2;
+ uint32_t value_1;
+ uint32_t value_2;
+ uint32_t poll_wait;
+ uint32_t poll_mask;
+ uint32_t modify_mask;
+ uint32_t data_size;
+};
+
+/* IDC additional information */
+struct qla4_83xx_idc_information {
+ uint32_t request_desc; /* IDC request descriptor */
+ uint32_t info1; /* IDC additional info */
+ uint32_t info2; /* IDC additional info */
+ uint32_t info3; /* IDC additional info */
+};
+
+#define QLA83XX_PEX_DMA_ENGINE_INDEX 8
+#define QLA83XX_PEX_DMA_BASE_ADDRESS 0x77320000
+#define QLA83XX_PEX_DMA_NUM_OFFSET 0x10000
+#define QLA83XX_PEX_DMA_CMD_ADDR_LOW 0x0
+#define QLA83XX_PEX_DMA_CMD_ADDR_HIGH 0x04
+#define QLA83XX_PEX_DMA_CMD_STS_AND_CNTRL 0x08
+
+#define QLA83XX_PEX_DMA_READ_SIZE (16 * 1024)
+#define QLA83XX_PEX_DMA_MAX_WAIT (100 * 100) /* Max wait of 100 msecs */
+
+/* Read Memory: For Pex-DMA */
+struct qla4_83xx_minidump_entry_rdmem_pex_dma {
+ struct qla8xxx_minidump_entry_hdr h;
+ uint32_t desc_card_addr;
+ uint16_t dma_desc_cmd;
+ uint8_t rsvd[2];
+ uint32_t start_dma_cmd;
+ uint8_t rsvd2[12];
+ uint32_t read_addr;
+ uint32_t read_data_size;
+};
+
+struct qla4_83xx_pex_dma_descriptor {
+ struct {
+ uint32_t read_data_size; /* 0-23: size, 24-31: rsvd */
+ uint8_t rsvd[2];
+ uint16_t dma_desc_cmd;
+ } cmd;
+ uint64_t src_addr;
+ uint64_t dma_bus_addr; /* 0-3: desc-cmd, 4-7: pci-func,
+ * 8-15: desc-cmd */
+ uint8_t rsvd[24];
+} __packed;
+
+#endif
diff --git a/drivers/scsi/qla4xxx/ql4_attr.c b/drivers/scsi/qla4xxx/ql4_attr.c
new file mode 100644
index 000000000..463239c97
--- /dev/null
+++ b/drivers/scsi/qla4xxx/ql4_attr.c
@@ -0,0 +1,351 @@
+/*
+ * QLogic iSCSI HBA Driver
+ * Copyright (c) 2003-2013 QLogic Corporation
+ *
+ * See LICENSE.qla4xxx for copyright and licensing details.
+ */
+
+#include "ql4_def.h"
+#include "ql4_glbl.h"
+#include "ql4_dbg.h"
+
+static ssize_t
+qla4_8xxx_sysfs_read_fw_dump(struct file *filep, struct kobject *kobj,
+ struct bin_attribute *ba, char *buf, loff_t off,
+ size_t count)
+{
+ struct scsi_qla_host *ha = to_qla_host(dev_to_shost(container_of(kobj,
+ struct device, kobj)));
+
+ if (is_qla40XX(ha))
+ return -EINVAL;
+
+ if (!test_bit(AF_82XX_DUMP_READING, &ha->flags))
+ return 0;
+
+ return memory_read_from_buffer(buf, count, &off, ha->fw_dump,
+ ha->fw_dump_size);
+}
+
+static ssize_t
+qla4_8xxx_sysfs_write_fw_dump(struct file *filep, struct kobject *kobj,
+ struct bin_attribute *ba, char *buf, loff_t off,
+ size_t count)
+{
+ struct scsi_qla_host *ha = to_qla_host(dev_to_shost(container_of(kobj,
+ struct device, kobj)));
+ uint32_t dev_state;
+ long reading;
+ int ret = 0;
+
+ if (is_qla40XX(ha))
+ return -EINVAL;
+
+ if (off != 0)
+ return ret;
+
+ buf[1] = 0;
+ ret = kstrtol(buf, 10, &reading);
+ if (ret) {
+ ql4_printk(KERN_ERR, ha, "%s: Invalid input. Return err %d\n",
+ __func__, ret);
+ return ret;
+ }
+
+ switch (reading) {
+ case 0:
+ /* clear dump collection flags */
+ if (test_and_clear_bit(AF_82XX_DUMP_READING, &ha->flags)) {
+ clear_bit(AF_82XX_FW_DUMPED, &ha->flags);
+ /* Reload minidump template */
+ qla4xxx_alloc_fw_dump(ha);
+ DEBUG2(ql4_printk(KERN_INFO, ha,
+ "Firmware template reloaded\n"));
+ }
+ break;
+ case 1:
+ /* Set flag to read dump */
+ if (test_bit(AF_82XX_FW_DUMPED, &ha->flags) &&
+ !test_bit(AF_82XX_DUMP_READING, &ha->flags)) {
+ set_bit(AF_82XX_DUMP_READING, &ha->flags);
+ DEBUG2(ql4_printk(KERN_INFO, ha,
+ "Raw firmware dump ready for read on (%ld).\n",
+ ha->host_no));
+ }
+ break;
+ case 2:
+ /* Reset HBA and collect FW dump */
+ ha->isp_ops->idc_lock(ha);
+ dev_state = qla4_8xxx_rd_direct(ha, QLA8XXX_CRB_DEV_STATE);
+ if (dev_state == QLA8XXX_DEV_READY) {
+ ql4_printk(KERN_INFO, ha, "%s: Setting Need reset\n",
+ __func__);
+ qla4_8xxx_wr_direct(ha, QLA8XXX_CRB_DEV_STATE,
+ QLA8XXX_DEV_NEED_RESET);
+ if (is_qla8022(ha) ||
+ ((is_qla8032(ha) || is_qla8042(ha)) &&
+ qla4_83xx_can_perform_reset(ha))) {
+ set_bit(AF_8XXX_RST_OWNER, &ha->flags);
+ set_bit(AF_FW_RECOVERY, &ha->flags);
+ ql4_printk(KERN_INFO, ha, "%s: Reset owner is 0x%x\n",
+ __func__, ha->func_num);
+ }
+ } else
+ ql4_printk(KERN_INFO, ha,
+ "%s: Reset not performed as device state is 0x%x\n",
+ __func__, dev_state);
+
+ ha->isp_ops->idc_unlock(ha);
+ break;
+ default:
+ /* do nothing */
+ break;
+ }
+
+ return count;
+}
+
+static struct bin_attribute sysfs_fw_dump_attr = {
+ .attr = {
+ .name = "fw_dump",
+ .mode = S_IRUSR | S_IWUSR,
+ },
+ .size = 0,
+ .read = qla4_8xxx_sysfs_read_fw_dump,
+ .write = qla4_8xxx_sysfs_write_fw_dump,
+};
+
+static struct sysfs_entry {
+ char *name;
+ struct bin_attribute *attr;
+} bin_file_entries[] = {
+ { "fw_dump", &sysfs_fw_dump_attr },
+ { NULL },
+};
+
+void qla4_8xxx_alloc_sysfs_attr(struct scsi_qla_host *ha)
+{
+ struct Scsi_Host *host = ha->host;
+ struct sysfs_entry *iter;
+ int ret;
+
+ for (iter = bin_file_entries; iter->name; iter++) {
+ ret = sysfs_create_bin_file(&host->shost_gendev.kobj,
+ iter->attr);
+ if (ret)
+ ql4_printk(KERN_ERR, ha,
+ "Unable to create sysfs %s binary attribute (%d).\n",
+ iter->name, ret);
+ }
+}
+
+void qla4_8xxx_free_sysfs_attr(struct scsi_qla_host *ha)
+{
+ struct Scsi_Host *host = ha->host;
+ struct sysfs_entry *iter;
+
+ for (iter = bin_file_entries; iter->name; iter++)
+ sysfs_remove_bin_file(&host->shost_gendev.kobj,
+ iter->attr);
+}
+
+/* Scsi_Host attributes. */
+static ssize_t
+qla4xxx_fw_version_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct scsi_qla_host *ha = to_qla_host(class_to_shost(dev));
+
+ if (is_qla80XX(ha))
+ return snprintf(buf, PAGE_SIZE, "%d.%02d.%02d (%x)\n",
+ ha->fw_info.fw_major, ha->fw_info.fw_minor,
+ ha->fw_info.fw_patch, ha->fw_info.fw_build);
+ else
+ return snprintf(buf, PAGE_SIZE, "%d.%02d.%02d.%02d\n",
+ ha->fw_info.fw_major, ha->fw_info.fw_minor,
+ ha->fw_info.fw_patch, ha->fw_info.fw_build);
+}
+
+static ssize_t
+qla4xxx_serial_num_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct scsi_qla_host *ha = to_qla_host(class_to_shost(dev));
+ return snprintf(buf, PAGE_SIZE, "%s\n", ha->serial_number);
+}
+
+static ssize_t
+qla4xxx_iscsi_version_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct scsi_qla_host *ha = to_qla_host(class_to_shost(dev));
+ return snprintf(buf, PAGE_SIZE, "%d.%02d\n", ha->fw_info.iscsi_major,
+ ha->fw_info.iscsi_minor);
+}
+
+static ssize_t
+qla4xxx_optrom_version_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct scsi_qla_host *ha = to_qla_host(class_to_shost(dev));
+ return snprintf(buf, PAGE_SIZE, "%d.%02d.%02d.%02d\n",
+ ha->fw_info.bootload_major, ha->fw_info.bootload_minor,
+ ha->fw_info.bootload_patch, ha->fw_info.bootload_build);
+}
+
+static ssize_t
+qla4xxx_board_id_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct scsi_qla_host *ha = to_qla_host(class_to_shost(dev));
+ return snprintf(buf, PAGE_SIZE, "0x%08X\n", ha->board_id);
+}
+
+static ssize_t
+qla4xxx_fw_state_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct scsi_qla_host *ha = to_qla_host(class_to_shost(dev));
+
+ qla4xxx_get_firmware_state(ha);
+ return snprintf(buf, PAGE_SIZE, "0x%08X%8X\n", ha->firmware_state,
+ ha->addl_fw_state);
+}
+
+static ssize_t
+qla4xxx_phy_port_cnt_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct scsi_qla_host *ha = to_qla_host(class_to_shost(dev));
+
+ if (is_qla40XX(ha))
+ return -ENOSYS;
+
+ return snprintf(buf, PAGE_SIZE, "0x%04X\n", ha->phy_port_cnt);
+}
+
+static ssize_t
+qla4xxx_phy_port_num_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct scsi_qla_host *ha = to_qla_host(class_to_shost(dev));
+
+ if (is_qla40XX(ha))
+ return -ENOSYS;
+
+ return snprintf(buf, PAGE_SIZE, "0x%04X\n", ha->phy_port_num);
+}
+
+static ssize_t
+qla4xxx_iscsi_func_cnt_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct scsi_qla_host *ha = to_qla_host(class_to_shost(dev));
+
+ if (is_qla40XX(ha))
+ return -ENOSYS;
+
+ return snprintf(buf, PAGE_SIZE, "0x%04X\n", ha->iscsi_pci_func_cnt);
+}
+
+static ssize_t
+qla4xxx_hba_model_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct scsi_qla_host *ha = to_qla_host(class_to_shost(dev));
+
+ return snprintf(buf, PAGE_SIZE, "%s\n", ha->model_name);
+}
+
+static ssize_t
+qla4xxx_fw_timestamp_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct scsi_qla_host *ha = to_qla_host(class_to_shost(dev));
+ return snprintf(buf, PAGE_SIZE, "%s %s\n", ha->fw_info.fw_build_date,
+ ha->fw_info.fw_build_time);
+}
+
+static ssize_t
+qla4xxx_fw_build_user_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct scsi_qla_host *ha = to_qla_host(class_to_shost(dev));
+ return snprintf(buf, PAGE_SIZE, "%s\n", ha->fw_info.fw_build_user);
+}
+
+static ssize_t
+qla4xxx_fw_ext_timestamp_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct scsi_qla_host *ha = to_qla_host(class_to_shost(dev));
+ return snprintf(buf, PAGE_SIZE, "%s\n", ha->fw_info.extended_timestamp);
+}
+
+static ssize_t
+qla4xxx_fw_load_src_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct scsi_qla_host *ha = to_qla_host(class_to_shost(dev));
+ char *load_src = NULL;
+
+ switch (ha->fw_info.fw_load_source) {
+ case 1:
+ load_src = "Flash Primary";
+ break;
+ case 2:
+ load_src = "Flash Secondary";
+ break;
+ case 3:
+ load_src = "Host Download";
+ break;
+ }
+
+ return snprintf(buf, PAGE_SIZE, "%s\n", load_src);
+}
+
+static ssize_t
+qla4xxx_fw_uptime_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct scsi_qla_host *ha = to_qla_host(class_to_shost(dev));
+ qla4xxx_about_firmware(ha);
+ return snprintf(buf, PAGE_SIZE, "%u.%u secs\n", ha->fw_uptime_secs,
+ ha->fw_uptime_msecs);
+}
+
+static DEVICE_ATTR(fw_version, S_IRUGO, qla4xxx_fw_version_show, NULL);
+static DEVICE_ATTR(serial_num, S_IRUGO, qla4xxx_serial_num_show, NULL);
+static DEVICE_ATTR(iscsi_version, S_IRUGO, qla4xxx_iscsi_version_show, NULL);
+static DEVICE_ATTR(optrom_version, S_IRUGO, qla4xxx_optrom_version_show, NULL);
+static DEVICE_ATTR(board_id, S_IRUGO, qla4xxx_board_id_show, NULL);
+static DEVICE_ATTR(fw_state, S_IRUGO, qla4xxx_fw_state_show, NULL);
+static DEVICE_ATTR(phy_port_cnt, S_IRUGO, qla4xxx_phy_port_cnt_show, NULL);
+static DEVICE_ATTR(phy_port_num, S_IRUGO, qla4xxx_phy_port_num_show, NULL);
+static DEVICE_ATTR(iscsi_func_cnt, S_IRUGO, qla4xxx_iscsi_func_cnt_show, NULL);
+static DEVICE_ATTR(hba_model, S_IRUGO, qla4xxx_hba_model_show, NULL);
+static DEVICE_ATTR(fw_timestamp, S_IRUGO, qla4xxx_fw_timestamp_show, NULL);
+static DEVICE_ATTR(fw_build_user, S_IRUGO, qla4xxx_fw_build_user_show, NULL);
+static DEVICE_ATTR(fw_ext_timestamp, S_IRUGO, qla4xxx_fw_ext_timestamp_show,
+ NULL);
+static DEVICE_ATTR(fw_load_src, S_IRUGO, qla4xxx_fw_load_src_show, NULL);
+static DEVICE_ATTR(fw_uptime, S_IRUGO, qla4xxx_fw_uptime_show, NULL);
+
+struct device_attribute *qla4xxx_host_attrs[] = {
+ &dev_attr_fw_version,
+ &dev_attr_serial_num,
+ &dev_attr_iscsi_version,
+ &dev_attr_optrom_version,
+ &dev_attr_board_id,
+ &dev_attr_fw_state,
+ &dev_attr_phy_port_cnt,
+ &dev_attr_phy_port_num,
+ &dev_attr_iscsi_func_cnt,
+ &dev_attr_hba_model,
+ &dev_attr_fw_timestamp,
+ &dev_attr_fw_build_user,
+ &dev_attr_fw_ext_timestamp,
+ &dev_attr_fw_load_src,
+ &dev_attr_fw_uptime,
+ NULL,
+};
diff --git a/drivers/scsi/qla4xxx/ql4_bsg.c b/drivers/scsi/qla4xxx/ql4_bsg.c
new file mode 100644
index 000000000..9f92cbf96
--- /dev/null
+++ b/drivers/scsi/qla4xxx/ql4_bsg.c
@@ -0,0 +1,873 @@
+/*
+ * QLogic iSCSI HBA Driver
+ * Copyright (c) 2011-2013 QLogic Corporation
+ *
+ * See LICENSE.qla4xxx for copyright and licensing details.
+ */
+
+#include "ql4_def.h"
+#include "ql4_glbl.h"
+#include "ql4_bsg.h"
+
+static int
+qla4xxx_read_flash(struct bsg_job *bsg_job)
+{
+ struct Scsi_Host *host = iscsi_job_to_shost(bsg_job);
+ struct scsi_qla_host *ha = to_qla_host(host);
+ struct iscsi_bsg_reply *bsg_reply = bsg_job->reply;
+ struct iscsi_bsg_request *bsg_req = bsg_job->request;
+ uint32_t offset = 0;
+ uint32_t length = 0;
+ dma_addr_t flash_dma;
+ uint8_t *flash = NULL;
+ int rval = -EINVAL;
+
+ bsg_reply->reply_payload_rcv_len = 0;
+
+ if (unlikely(pci_channel_offline(ha->pdev)))
+ goto leave;
+
+ if (ql4xxx_reset_active(ha)) {
+ ql4_printk(KERN_ERR, ha, "%s: reset active\n", __func__);
+ rval = -EBUSY;
+ goto leave;
+ }
+
+ if (ha->flash_state != QLFLASH_WAITING) {
+ ql4_printk(KERN_ERR, ha, "%s: another flash operation "
+ "active\n", __func__);
+ rval = -EBUSY;
+ goto leave;
+ }
+
+ ha->flash_state = QLFLASH_READING;
+ offset = bsg_req->rqst_data.h_vendor.vendor_cmd[1];
+ length = bsg_job->reply_payload.payload_len;
+
+ flash = dma_alloc_coherent(&ha->pdev->dev, length, &flash_dma,
+ GFP_KERNEL);
+ if (!flash) {
+ ql4_printk(KERN_ERR, ha, "%s: dma alloc failed for flash "
+ "data\n", __func__);
+ rval = -ENOMEM;
+ goto leave;
+ }
+
+ rval = qla4xxx_get_flash(ha, flash_dma, offset, length);
+ if (rval) {
+ ql4_printk(KERN_ERR, ha, "%s: get flash failed\n", __func__);
+ bsg_reply->result = DID_ERROR << 16;
+ rval = -EIO;
+ } else {
+ bsg_reply->reply_payload_rcv_len =
+ sg_copy_from_buffer(bsg_job->reply_payload.sg_list,
+ bsg_job->reply_payload.sg_cnt,
+ flash, length);
+ bsg_reply->result = DID_OK << 16;
+ }
+
+ bsg_job_done(bsg_job, bsg_reply->result,
+ bsg_reply->reply_payload_rcv_len);
+ dma_free_coherent(&ha->pdev->dev, length, flash, flash_dma);
+leave:
+ ha->flash_state = QLFLASH_WAITING;
+ return rval;
+}
+
+static int
+qla4xxx_update_flash(struct bsg_job *bsg_job)
+{
+ struct Scsi_Host *host = iscsi_job_to_shost(bsg_job);
+ struct scsi_qla_host *ha = to_qla_host(host);
+ struct iscsi_bsg_reply *bsg_reply = bsg_job->reply;
+ struct iscsi_bsg_request *bsg_req = bsg_job->request;
+ uint32_t length = 0;
+ uint32_t offset = 0;
+ uint32_t options = 0;
+ dma_addr_t flash_dma;
+ uint8_t *flash = NULL;
+ int rval = -EINVAL;
+
+ bsg_reply->reply_payload_rcv_len = 0;
+
+ if (unlikely(pci_channel_offline(ha->pdev)))
+ goto leave;
+
+ if (ql4xxx_reset_active(ha)) {
+ ql4_printk(KERN_ERR, ha, "%s: reset active\n", __func__);
+ rval = -EBUSY;
+ goto leave;
+ }
+
+ if (ha->flash_state != QLFLASH_WAITING) {
+ ql4_printk(KERN_ERR, ha, "%s: another flash operation "
+ "active\n", __func__);
+ rval = -EBUSY;
+ goto leave;
+ }
+
+ ha->flash_state = QLFLASH_WRITING;
+ length = bsg_job->request_payload.payload_len;
+ offset = bsg_req->rqst_data.h_vendor.vendor_cmd[1];
+ options = bsg_req->rqst_data.h_vendor.vendor_cmd[2];
+
+ flash = dma_alloc_coherent(&ha->pdev->dev, length, &flash_dma,
+ GFP_KERNEL);
+ if (!flash) {
+ ql4_printk(KERN_ERR, ha, "%s: dma alloc failed for flash "
+ "data\n", __func__);
+ rval = -ENOMEM;
+ goto leave;
+ }
+
+ sg_copy_to_buffer(bsg_job->request_payload.sg_list,
+ bsg_job->request_payload.sg_cnt, flash, length);
+
+ rval = qla4xxx_set_flash(ha, flash_dma, offset, length, options);
+ if (rval) {
+ ql4_printk(KERN_ERR, ha, "%s: set flash failed\n", __func__);
+ bsg_reply->result = DID_ERROR << 16;
+ rval = -EIO;
+ } else
+ bsg_reply->result = DID_OK << 16;
+
+ bsg_job_done(bsg_job, bsg_reply->result,
+ bsg_reply->reply_payload_rcv_len);
+ dma_free_coherent(&ha->pdev->dev, length, flash, flash_dma);
+leave:
+ ha->flash_state = QLFLASH_WAITING;
+ return rval;
+}
+
+static int
+qla4xxx_get_acb_state(struct bsg_job *bsg_job)
+{
+ struct Scsi_Host *host = iscsi_job_to_shost(bsg_job);
+ struct scsi_qla_host *ha = to_qla_host(host);
+ struct iscsi_bsg_request *bsg_req = bsg_job->request;
+ struct iscsi_bsg_reply *bsg_reply = bsg_job->reply;
+ uint32_t status[MBOX_REG_COUNT];
+ uint32_t acb_idx;
+ uint32_t ip_idx;
+ int rval = -EINVAL;
+
+ bsg_reply->reply_payload_rcv_len = 0;
+
+ if (unlikely(pci_channel_offline(ha->pdev)))
+ goto leave;
+
+ /* Only 4022 and above adapters are supported */
+ if (is_qla4010(ha))
+ goto leave;
+
+ if (ql4xxx_reset_active(ha)) {
+ ql4_printk(KERN_ERR, ha, "%s: reset active\n", __func__);
+ rval = -EBUSY;
+ goto leave;
+ }
+
+ if (bsg_job->reply_payload.payload_len < sizeof(status)) {
+ ql4_printk(KERN_ERR, ha, "%s: invalid payload len %d\n",
+ __func__, bsg_job->reply_payload.payload_len);
+ rval = -EINVAL;
+ goto leave;
+ }
+
+ acb_idx = bsg_req->rqst_data.h_vendor.vendor_cmd[1];
+ ip_idx = bsg_req->rqst_data.h_vendor.vendor_cmd[2];
+
+ rval = qla4xxx_get_ip_state(ha, acb_idx, ip_idx, status);
+ if (rval) {
+ ql4_printk(KERN_ERR, ha, "%s: get ip state failed\n",
+ __func__);
+ bsg_reply->result = DID_ERROR << 16;
+ rval = -EIO;
+ } else {
+ bsg_reply->reply_payload_rcv_len =
+ sg_copy_from_buffer(bsg_job->reply_payload.sg_list,
+ bsg_job->reply_payload.sg_cnt,
+ status, sizeof(status));
+ bsg_reply->result = DID_OK << 16;
+ }
+
+ bsg_job_done(bsg_job, bsg_reply->result,
+ bsg_reply->reply_payload_rcv_len);
+leave:
+ return rval;
+}
+
+static int
+qla4xxx_read_nvram(struct bsg_job *bsg_job)
+{
+ struct Scsi_Host *host = iscsi_job_to_shost(bsg_job);
+ struct scsi_qla_host *ha = to_qla_host(host);
+ struct iscsi_bsg_request *bsg_req = bsg_job->request;
+ struct iscsi_bsg_reply *bsg_reply = bsg_job->reply;
+ uint32_t offset = 0;
+ uint32_t len = 0;
+ uint32_t total_len = 0;
+ dma_addr_t nvram_dma;
+ uint8_t *nvram = NULL;
+ int rval = -EINVAL;
+
+ bsg_reply->reply_payload_rcv_len = 0;
+
+ if (unlikely(pci_channel_offline(ha->pdev)))
+ goto leave;
+
+ /* Only 40xx adapters are supported */
+ if (!(is_qla4010(ha) || is_qla4022(ha) || is_qla4032(ha)))
+ goto leave;
+
+ if (ql4xxx_reset_active(ha)) {
+ ql4_printk(KERN_ERR, ha, "%s: reset active\n", __func__);
+ rval = -EBUSY;
+ goto leave;
+ }
+
+ offset = bsg_req->rqst_data.h_vendor.vendor_cmd[1];
+ len = bsg_job->reply_payload.payload_len;
+ total_len = offset + len;
+
+ /* total len should not be greater than max NVRAM size */
+ if ((is_qla4010(ha) && total_len > QL4010_NVRAM_SIZE) ||
+ ((is_qla4022(ha) || is_qla4032(ha)) &&
+ total_len > QL40X2_NVRAM_SIZE)) {
+ ql4_printk(KERN_ERR, ha, "%s: offset+len greater than max"
+ " nvram size, offset=%d len=%d\n",
+ __func__, offset, len);
+ goto leave;
+ }
+
+ nvram = dma_alloc_coherent(&ha->pdev->dev, len, &nvram_dma,
+ GFP_KERNEL);
+ if (!nvram) {
+ ql4_printk(KERN_ERR, ha, "%s: dma alloc failed for nvram "
+ "data\n", __func__);
+ rval = -ENOMEM;
+ goto leave;
+ }
+
+ rval = qla4xxx_get_nvram(ha, nvram_dma, offset, len);
+ if (rval) {
+ ql4_printk(KERN_ERR, ha, "%s: get nvram failed\n", __func__);
+ bsg_reply->result = DID_ERROR << 16;
+ rval = -EIO;
+ } else {
+ bsg_reply->reply_payload_rcv_len =
+ sg_copy_from_buffer(bsg_job->reply_payload.sg_list,
+ bsg_job->reply_payload.sg_cnt,
+ nvram, len);
+ bsg_reply->result = DID_OK << 16;
+ }
+
+ bsg_job_done(bsg_job, bsg_reply->result,
+ bsg_reply->reply_payload_rcv_len);
+ dma_free_coherent(&ha->pdev->dev, len, nvram, nvram_dma);
+leave:
+ return rval;
+}
+
+static int
+qla4xxx_update_nvram(struct bsg_job *bsg_job)
+{
+ struct Scsi_Host *host = iscsi_job_to_shost(bsg_job);
+ struct scsi_qla_host *ha = to_qla_host(host);
+ struct iscsi_bsg_request *bsg_req = bsg_job->request;
+ struct iscsi_bsg_reply *bsg_reply = bsg_job->reply;
+ uint32_t offset = 0;
+ uint32_t len = 0;
+ uint32_t total_len = 0;
+ dma_addr_t nvram_dma;
+ uint8_t *nvram = NULL;
+ int rval = -EINVAL;
+
+ bsg_reply->reply_payload_rcv_len = 0;
+
+ if (unlikely(pci_channel_offline(ha->pdev)))
+ goto leave;
+
+ if (!(is_qla4010(ha) || is_qla4022(ha) || is_qla4032(ha)))
+ goto leave;
+
+ if (ql4xxx_reset_active(ha)) {
+ ql4_printk(KERN_ERR, ha, "%s: reset active\n", __func__);
+ rval = -EBUSY;
+ goto leave;
+ }
+
+ offset = bsg_req->rqst_data.h_vendor.vendor_cmd[1];
+ len = bsg_job->request_payload.payload_len;
+ total_len = offset + len;
+
+ /* total len should not be greater than max NVRAM size */
+ if ((is_qla4010(ha) && total_len > QL4010_NVRAM_SIZE) ||
+ ((is_qla4022(ha) || is_qla4032(ha)) &&
+ total_len > QL40X2_NVRAM_SIZE)) {
+ ql4_printk(KERN_ERR, ha, "%s: offset+len greater than max"
+ " nvram size, offset=%d len=%d\n",
+ __func__, offset, len);
+ goto leave;
+ }
+
+ nvram = dma_alloc_coherent(&ha->pdev->dev, len, &nvram_dma,
+ GFP_KERNEL);
+ if (!nvram) {
+ ql4_printk(KERN_ERR, ha, "%s: dma alloc failed for flash "
+ "data\n", __func__);
+ rval = -ENOMEM;
+ goto leave;
+ }
+
+ sg_copy_to_buffer(bsg_job->request_payload.sg_list,
+ bsg_job->request_payload.sg_cnt, nvram, len);
+
+ rval = qla4xxx_set_nvram(ha, nvram_dma, offset, len);
+ if (rval) {
+ ql4_printk(KERN_ERR, ha, "%s: set nvram failed\n", __func__);
+ bsg_reply->result = DID_ERROR << 16;
+ rval = -EIO;
+ } else
+ bsg_reply->result = DID_OK << 16;
+
+ bsg_job_done(bsg_job, bsg_reply->result,
+ bsg_reply->reply_payload_rcv_len);
+ dma_free_coherent(&ha->pdev->dev, len, nvram, nvram_dma);
+leave:
+ return rval;
+}
+
+static int
+qla4xxx_restore_defaults(struct bsg_job *bsg_job)
+{
+ struct Scsi_Host *host = iscsi_job_to_shost(bsg_job);
+ struct scsi_qla_host *ha = to_qla_host(host);
+ struct iscsi_bsg_request *bsg_req = bsg_job->request;
+ struct iscsi_bsg_reply *bsg_reply = bsg_job->reply;
+ uint32_t region = 0;
+ uint32_t field0 = 0;
+ uint32_t field1 = 0;
+ int rval = -EINVAL;
+
+ bsg_reply->reply_payload_rcv_len = 0;
+
+ if (unlikely(pci_channel_offline(ha->pdev)))
+ goto leave;
+
+ if (is_qla4010(ha))
+ goto leave;
+
+ if (ql4xxx_reset_active(ha)) {
+ ql4_printk(KERN_ERR, ha, "%s: reset active\n", __func__);
+ rval = -EBUSY;
+ goto leave;
+ }
+
+ region = bsg_req->rqst_data.h_vendor.vendor_cmd[1];
+ field0 = bsg_req->rqst_data.h_vendor.vendor_cmd[2];
+ field1 = bsg_req->rqst_data.h_vendor.vendor_cmd[3];
+
+ rval = qla4xxx_restore_factory_defaults(ha, region, field0, field1);
+ if (rval) {
+ ql4_printk(KERN_ERR, ha, "%s: set nvram failed\n", __func__);
+ bsg_reply->result = DID_ERROR << 16;
+ rval = -EIO;
+ } else
+ bsg_reply->result = DID_OK << 16;
+
+ bsg_job_done(bsg_job, bsg_reply->result,
+ bsg_reply->reply_payload_rcv_len);
+leave:
+ return rval;
+}
+
+static int
+qla4xxx_bsg_get_acb(struct bsg_job *bsg_job)
+{
+ struct Scsi_Host *host = iscsi_job_to_shost(bsg_job);
+ struct scsi_qla_host *ha = to_qla_host(host);
+ struct iscsi_bsg_request *bsg_req = bsg_job->request;
+ struct iscsi_bsg_reply *bsg_reply = bsg_job->reply;
+ uint32_t acb_type = 0;
+ uint32_t len = 0;
+ dma_addr_t acb_dma;
+ uint8_t *acb = NULL;
+ int rval = -EINVAL;
+
+ bsg_reply->reply_payload_rcv_len = 0;
+
+ if (unlikely(pci_channel_offline(ha->pdev)))
+ goto leave;
+
+ /* Only 4022 and above adapters are supported */
+ if (is_qla4010(ha))
+ goto leave;
+
+ if (ql4xxx_reset_active(ha)) {
+ ql4_printk(KERN_ERR, ha, "%s: reset active\n", __func__);
+ rval = -EBUSY;
+ goto leave;
+ }
+
+ acb_type = bsg_req->rqst_data.h_vendor.vendor_cmd[1];
+ len = bsg_job->reply_payload.payload_len;
+ if (len < sizeof(struct addr_ctrl_blk)) {
+ ql4_printk(KERN_ERR, ha, "%s: invalid acb len %d\n",
+ __func__, len);
+ rval = -EINVAL;
+ goto leave;
+ }
+
+ acb = dma_alloc_coherent(&ha->pdev->dev, len, &acb_dma, GFP_KERNEL);
+ if (!acb) {
+ ql4_printk(KERN_ERR, ha, "%s: dma alloc failed for acb "
+ "data\n", __func__);
+ rval = -ENOMEM;
+ goto leave;
+ }
+
+ rval = qla4xxx_get_acb(ha, acb_dma, acb_type, len);
+ if (rval) {
+ ql4_printk(KERN_ERR, ha, "%s: get acb failed\n", __func__);
+ bsg_reply->result = DID_ERROR << 16;
+ rval = -EIO;
+ } else {
+ bsg_reply->reply_payload_rcv_len =
+ sg_copy_from_buffer(bsg_job->reply_payload.sg_list,
+ bsg_job->reply_payload.sg_cnt,
+ acb, len);
+ bsg_reply->result = DID_OK << 16;
+ }
+
+ bsg_job_done(bsg_job, bsg_reply->result,
+ bsg_reply->reply_payload_rcv_len);
+ dma_free_coherent(&ha->pdev->dev, len, acb, acb_dma);
+leave:
+ return rval;
+}
+
+static void ql4xxx_execute_diag_cmd(struct bsg_job *bsg_job)
+{
+ struct Scsi_Host *host = iscsi_job_to_shost(bsg_job);
+ struct scsi_qla_host *ha = to_qla_host(host);
+ struct iscsi_bsg_request *bsg_req = bsg_job->request;
+ struct iscsi_bsg_reply *bsg_reply = bsg_job->reply;
+ uint8_t *rsp_ptr = NULL;
+ uint32_t mbox_cmd[MBOX_REG_COUNT];
+ uint32_t mbox_sts[MBOX_REG_COUNT];
+ int status = QLA_ERROR;
+
+ DEBUG2(ql4_printk(KERN_INFO, ha, "%s: in\n", __func__));
+
+ if (test_bit(DPC_RESET_HA, &ha->dpc_flags)) {
+ ql4_printk(KERN_INFO, ha, "%s: Adapter reset in progress. Invalid Request\n",
+ __func__);
+ bsg_reply->result = DID_ERROR << 16;
+ goto exit_diag_mem_test;
+ }
+
+ bsg_reply->reply_payload_rcv_len = 0;
+ memcpy(mbox_cmd, &bsg_req->rqst_data.h_vendor.vendor_cmd[1],
+ sizeof(uint32_t) * MBOX_REG_COUNT);
+
+ DEBUG2(ql4_printk(KERN_INFO, ha,
+ "%s: mbox_cmd: %08X %08X %08X %08X %08X %08X %08X %08X\n",
+ __func__, mbox_cmd[0], mbox_cmd[1], mbox_cmd[2],
+ mbox_cmd[3], mbox_cmd[4], mbox_cmd[5], mbox_cmd[6],
+ mbox_cmd[7]));
+
+ status = qla4xxx_mailbox_command(ha, MBOX_REG_COUNT, 8, &mbox_cmd[0],
+ &mbox_sts[0]);
+
+ DEBUG2(ql4_printk(KERN_INFO, ha,
+ "%s: mbox_sts: %08X %08X %08X %08X %08X %08X %08X %08X\n",
+ __func__, mbox_sts[0], mbox_sts[1], mbox_sts[2],
+ mbox_sts[3], mbox_sts[4], mbox_sts[5], mbox_sts[6],
+ mbox_sts[7]));
+
+ if (status == QLA_SUCCESS)
+ bsg_reply->result = DID_OK << 16;
+ else
+ bsg_reply->result = DID_ERROR << 16;
+
+ /* Send mbox_sts to application */
+ bsg_job->reply_len = sizeof(struct iscsi_bsg_reply) + sizeof(mbox_sts);
+ rsp_ptr = ((uint8_t *)bsg_reply) + sizeof(struct iscsi_bsg_reply);
+ memcpy(rsp_ptr, mbox_sts, sizeof(mbox_sts));
+
+exit_diag_mem_test:
+ DEBUG2(ql4_printk(KERN_INFO, ha,
+ "%s: bsg_reply->result = x%x, status = %s\n",
+ __func__, bsg_reply->result, STATUS(status)));
+
+ bsg_job_done(bsg_job, bsg_reply->result,
+ bsg_reply->reply_payload_rcv_len);
+}
+
+static int qla4_83xx_wait_for_loopback_config_comp(struct scsi_qla_host *ha,
+ int wait_for_link)
+{
+ int status = QLA_SUCCESS;
+
+ if (!wait_for_completion_timeout(&ha->idc_comp, (IDC_COMP_TOV * HZ))) {
+ ql4_printk(KERN_INFO, ha, "%s: IDC Complete notification not received, Waiting for another %d timeout",
+ __func__, ha->idc_extend_tmo);
+ if (ha->idc_extend_tmo) {
+ if (!wait_for_completion_timeout(&ha->idc_comp,
+ (ha->idc_extend_tmo * HZ))) {
+ ha->notify_idc_comp = 0;
+ ha->notify_link_up_comp = 0;
+ ql4_printk(KERN_WARNING, ha, "%s: Aborting: IDC Complete notification not received",
+ __func__);
+ status = QLA_ERROR;
+ goto exit_wait;
+ } else {
+ DEBUG2(ql4_printk(KERN_INFO, ha,
+ "%s: IDC Complete notification received\n",
+ __func__));
+ }
+ }
+ } else {
+ DEBUG2(ql4_printk(KERN_INFO, ha,
+ "%s: IDC Complete notification received\n",
+ __func__));
+ }
+ ha->notify_idc_comp = 0;
+
+ if (wait_for_link) {
+ if (!wait_for_completion_timeout(&ha->link_up_comp,
+ (IDC_COMP_TOV * HZ))) {
+ ha->notify_link_up_comp = 0;
+ ql4_printk(KERN_WARNING, ha, "%s: Aborting: LINK UP notification not received",
+ __func__);
+ status = QLA_ERROR;
+ goto exit_wait;
+ } else {
+ DEBUG2(ql4_printk(KERN_INFO, ha,
+ "%s: LINK UP notification received\n",
+ __func__));
+ }
+ ha->notify_link_up_comp = 0;
+ }
+
+exit_wait:
+ return status;
+}
+
+static int qla4_83xx_pre_loopback_config(struct scsi_qla_host *ha,
+ uint32_t *mbox_cmd)
+{
+ uint32_t config = 0;
+ int status = QLA_SUCCESS;
+
+ DEBUG2(ql4_printk(KERN_INFO, ha, "%s: in\n", __func__));
+
+ status = qla4_83xx_get_port_config(ha, &config);
+ if (status != QLA_SUCCESS)
+ goto exit_pre_loopback_config;
+
+ DEBUG2(ql4_printk(KERN_INFO, ha, "%s: Default port config=%08X\n",
+ __func__, config));
+
+ if ((config & ENABLE_INTERNAL_LOOPBACK) ||
+ (config & ENABLE_EXTERNAL_LOOPBACK)) {
+ ql4_printk(KERN_INFO, ha, "%s: Loopback diagnostics already in progress. Invalid requiest\n",
+ __func__);
+ goto exit_pre_loopback_config;
+ }
+
+ if (mbox_cmd[1] == QL_DIAG_CMD_TEST_INT_LOOPBACK)
+ config |= ENABLE_INTERNAL_LOOPBACK;
+
+ if (mbox_cmd[1] == QL_DIAG_CMD_TEST_EXT_LOOPBACK)
+ config |= ENABLE_EXTERNAL_LOOPBACK;
+
+ config &= ~ENABLE_DCBX;
+
+ DEBUG2(ql4_printk(KERN_INFO, ha, "%s: New port config=%08X\n",
+ __func__, config));
+
+ ha->notify_idc_comp = 1;
+ ha->notify_link_up_comp = 1;
+
+ /* get the link state */
+ qla4xxx_get_firmware_state(ha);
+
+ status = qla4_83xx_set_port_config(ha, &config);
+ if (status != QLA_SUCCESS) {
+ ha->notify_idc_comp = 0;
+ ha->notify_link_up_comp = 0;
+ goto exit_pre_loopback_config;
+ }
+exit_pre_loopback_config:
+ DEBUG2(ql4_printk(KERN_INFO, ha, "%s: status = %s\n", __func__,
+ STATUS(status)));
+ return status;
+}
+
+static int qla4_83xx_post_loopback_config(struct scsi_qla_host *ha,
+ uint32_t *mbox_cmd)
+{
+ int status = QLA_SUCCESS;
+ uint32_t config = 0;
+
+ DEBUG2(ql4_printk(KERN_INFO, ha, "%s: in\n", __func__));
+
+ status = qla4_83xx_get_port_config(ha, &config);
+ if (status != QLA_SUCCESS)
+ goto exit_post_loopback_config;
+
+ DEBUG2(ql4_printk(KERN_INFO, ha, "%s: port config=%08X\n", __func__,
+ config));
+
+ if (mbox_cmd[1] == QL_DIAG_CMD_TEST_INT_LOOPBACK)
+ config &= ~ENABLE_INTERNAL_LOOPBACK;
+ else if (mbox_cmd[1] == QL_DIAG_CMD_TEST_EXT_LOOPBACK)
+ config &= ~ENABLE_EXTERNAL_LOOPBACK;
+
+ config |= ENABLE_DCBX;
+
+ DEBUG2(ql4_printk(KERN_INFO, ha,
+ "%s: Restore default port config=%08X\n", __func__,
+ config));
+
+ ha->notify_idc_comp = 1;
+ if (ha->addl_fw_state & FW_ADDSTATE_LINK_UP)
+ ha->notify_link_up_comp = 1;
+
+ status = qla4_83xx_set_port_config(ha, &config);
+ if (status != QLA_SUCCESS) {
+ ql4_printk(KERN_INFO, ha, "%s: Scheduling adapter reset\n",
+ __func__);
+ set_bit(DPC_RESET_HA, &ha->dpc_flags);
+ clear_bit(AF_LOOPBACK, &ha->flags);
+ goto exit_post_loopback_config;
+ }
+
+exit_post_loopback_config:
+ DEBUG2(ql4_printk(KERN_INFO, ha, "%s: status = %s\n", __func__,
+ STATUS(status)));
+ return status;
+}
+
+static void qla4xxx_execute_diag_loopback_cmd(struct bsg_job *bsg_job)
+{
+ struct Scsi_Host *host = iscsi_job_to_shost(bsg_job);
+ struct scsi_qla_host *ha = to_qla_host(host);
+ struct iscsi_bsg_request *bsg_req = bsg_job->request;
+ struct iscsi_bsg_reply *bsg_reply = bsg_job->reply;
+ uint8_t *rsp_ptr = NULL;
+ uint32_t mbox_cmd[MBOX_REG_COUNT];
+ uint32_t mbox_sts[MBOX_REG_COUNT];
+ int wait_for_link = 1;
+ int status = QLA_ERROR;
+
+ DEBUG2(ql4_printk(KERN_INFO, ha, "%s: in\n", __func__));
+
+ bsg_reply->reply_payload_rcv_len = 0;
+
+ if (test_bit(AF_LOOPBACK, &ha->flags)) {
+ ql4_printk(KERN_INFO, ha, "%s: Loopback Diagnostics already in progress. Invalid Request\n",
+ __func__);
+ bsg_reply->result = DID_ERROR << 16;
+ goto exit_loopback_cmd;
+ }
+
+ if (test_bit(DPC_RESET_HA, &ha->dpc_flags)) {
+ ql4_printk(KERN_INFO, ha, "%s: Adapter reset in progress. Invalid Request\n",
+ __func__);
+ bsg_reply->result = DID_ERROR << 16;
+ goto exit_loopback_cmd;
+ }
+
+ memcpy(mbox_cmd, &bsg_req->rqst_data.h_vendor.vendor_cmd[1],
+ sizeof(uint32_t) * MBOX_REG_COUNT);
+
+ if (is_qla8032(ha) || is_qla8042(ha)) {
+ status = qla4_83xx_pre_loopback_config(ha, mbox_cmd);
+ if (status != QLA_SUCCESS) {
+ bsg_reply->result = DID_ERROR << 16;
+ goto exit_loopback_cmd;
+ }
+
+ status = qla4_83xx_wait_for_loopback_config_comp(ha,
+ wait_for_link);
+ if (status != QLA_SUCCESS) {
+ bsg_reply->result = DID_TIME_OUT << 16;
+ goto restore;
+ }
+ }
+
+ DEBUG2(ql4_printk(KERN_INFO, ha,
+ "%s: mbox_cmd: %08X %08X %08X %08X %08X %08X %08X %08X\n",
+ __func__, mbox_cmd[0], mbox_cmd[1], mbox_cmd[2],
+ mbox_cmd[3], mbox_cmd[4], mbox_cmd[5], mbox_cmd[6],
+ mbox_cmd[7]));
+
+ status = qla4xxx_mailbox_command(ha, MBOX_REG_COUNT, 8, &mbox_cmd[0],
+ &mbox_sts[0]);
+
+ if (status == QLA_SUCCESS)
+ bsg_reply->result = DID_OK << 16;
+ else
+ bsg_reply->result = DID_ERROR << 16;
+
+ DEBUG2(ql4_printk(KERN_INFO, ha,
+ "%s: mbox_sts: %08X %08X %08X %08X %08X %08X %08X %08X\n",
+ __func__, mbox_sts[0], mbox_sts[1], mbox_sts[2],
+ mbox_sts[3], mbox_sts[4], mbox_sts[5], mbox_sts[6],
+ mbox_sts[7]));
+
+ /* Send mbox_sts to application */
+ bsg_job->reply_len = sizeof(struct iscsi_bsg_reply) + sizeof(mbox_sts);
+ rsp_ptr = ((uint8_t *)bsg_reply) + sizeof(struct iscsi_bsg_reply);
+ memcpy(rsp_ptr, mbox_sts, sizeof(mbox_sts));
+restore:
+ if (is_qla8032(ha) || is_qla8042(ha)) {
+ status = qla4_83xx_post_loopback_config(ha, mbox_cmd);
+ if (status != QLA_SUCCESS) {
+ bsg_reply->result = DID_ERROR << 16;
+ goto exit_loopback_cmd;
+ }
+
+ /* for pre_loopback_config() wait for LINK UP only
+ * if PHY LINK is UP */
+ if (!(ha->addl_fw_state & FW_ADDSTATE_LINK_UP))
+ wait_for_link = 0;
+
+ status = qla4_83xx_wait_for_loopback_config_comp(ha,
+ wait_for_link);
+ if (status != QLA_SUCCESS) {
+ bsg_reply->result = DID_TIME_OUT << 16;
+ goto exit_loopback_cmd;
+ }
+ }
+exit_loopback_cmd:
+ DEBUG2(ql4_printk(KERN_INFO, ha,
+ "%s: bsg_reply->result = x%x, status = %s\n",
+ __func__, bsg_reply->result, STATUS(status)));
+ bsg_job_done(bsg_job, bsg_reply->result,
+ bsg_reply->reply_payload_rcv_len);
+}
+
+static int qla4xxx_execute_diag_test(struct bsg_job *bsg_job)
+{
+ struct Scsi_Host *host = iscsi_job_to_shost(bsg_job);
+ struct scsi_qla_host *ha = to_qla_host(host);
+ struct iscsi_bsg_request *bsg_req = bsg_job->request;
+ uint32_t diag_cmd;
+ int rval = -EINVAL;
+
+ DEBUG2(ql4_printk(KERN_INFO, ha, "%s: in\n", __func__));
+
+ diag_cmd = bsg_req->rqst_data.h_vendor.vendor_cmd[1];
+ if (diag_cmd == MBOX_CMD_DIAG_TEST) {
+ switch (bsg_req->rqst_data.h_vendor.vendor_cmd[2]) {
+ case QL_DIAG_CMD_TEST_DDR_SIZE:
+ case QL_DIAG_CMD_TEST_DDR_RW:
+ case QL_DIAG_CMD_TEST_ONCHIP_MEM_RW:
+ case QL_DIAG_CMD_TEST_NVRAM:
+ case QL_DIAG_CMD_TEST_FLASH_ROM:
+ case QL_DIAG_CMD_TEST_DMA_XFER:
+ case QL_DIAG_CMD_SELF_DDR_RW:
+ case QL_DIAG_CMD_SELF_ONCHIP_MEM_RW:
+ /* Execute diag test for adapter RAM/FLASH */
+ ql4xxx_execute_diag_cmd(bsg_job);
+ /* Always return success as we want to sent bsg_reply
+ * to Application */
+ rval = QLA_SUCCESS;
+ break;
+
+ case QL_DIAG_CMD_TEST_INT_LOOPBACK:
+ case QL_DIAG_CMD_TEST_EXT_LOOPBACK:
+ /* Execute diag test for Network */
+ qla4xxx_execute_diag_loopback_cmd(bsg_job);
+ /* Always return success as we want to sent bsg_reply
+ * to Application */
+ rval = QLA_SUCCESS;
+ break;
+ default:
+ ql4_printk(KERN_ERR, ha, "%s: Invalid diag test: 0x%x\n",
+ __func__,
+ bsg_req->rqst_data.h_vendor.vendor_cmd[2]);
+ }
+ } else if ((diag_cmd == MBOX_CMD_SET_LED_CONFIG) ||
+ (diag_cmd == MBOX_CMD_GET_LED_CONFIG)) {
+ ql4xxx_execute_diag_cmd(bsg_job);
+ rval = QLA_SUCCESS;
+ } else {
+ ql4_printk(KERN_ERR, ha, "%s: Invalid diag cmd: 0x%x\n",
+ __func__, diag_cmd);
+ }
+
+ return rval;
+}
+
+/**
+ * qla4xxx_process_vendor_specific - handle vendor specific bsg request
+ * @job: iscsi_bsg_job to handle
+ **/
+int qla4xxx_process_vendor_specific(struct bsg_job *bsg_job)
+{
+ struct iscsi_bsg_reply *bsg_reply = bsg_job->reply;
+ struct iscsi_bsg_request *bsg_req = bsg_job->request;
+ struct Scsi_Host *host = iscsi_job_to_shost(bsg_job);
+ struct scsi_qla_host *ha = to_qla_host(host);
+
+ switch (bsg_req->rqst_data.h_vendor.vendor_cmd[0]) {
+ case QLISCSI_VND_READ_FLASH:
+ return qla4xxx_read_flash(bsg_job);
+
+ case QLISCSI_VND_UPDATE_FLASH:
+ return qla4xxx_update_flash(bsg_job);
+
+ case QLISCSI_VND_GET_ACB_STATE:
+ return qla4xxx_get_acb_state(bsg_job);
+
+ case QLISCSI_VND_READ_NVRAM:
+ return qla4xxx_read_nvram(bsg_job);
+
+ case QLISCSI_VND_UPDATE_NVRAM:
+ return qla4xxx_update_nvram(bsg_job);
+
+ case QLISCSI_VND_RESTORE_DEFAULTS:
+ return qla4xxx_restore_defaults(bsg_job);
+
+ case QLISCSI_VND_GET_ACB:
+ return qla4xxx_bsg_get_acb(bsg_job);
+
+ case QLISCSI_VND_DIAG_TEST:
+ return qla4xxx_execute_diag_test(bsg_job);
+
+ default:
+ ql4_printk(KERN_ERR, ha, "%s: invalid BSG vendor command: "
+ "0x%x\n", __func__, bsg_req->msgcode);
+ bsg_reply->result = (DID_ERROR << 16);
+ bsg_reply->reply_payload_rcv_len = 0;
+ bsg_job_done(bsg_job, bsg_reply->result,
+ bsg_reply->reply_payload_rcv_len);
+ return -ENOSYS;
+ }
+}
+
+/**
+ * qla4xxx_bsg_request - handle bsg request from ISCSI transport
+ * @job: iscsi_bsg_job to handle
+ */
+int qla4xxx_bsg_request(struct bsg_job *bsg_job)
+{
+ struct iscsi_bsg_request *bsg_req = bsg_job->request;
+ struct Scsi_Host *host = iscsi_job_to_shost(bsg_job);
+ struct scsi_qla_host *ha = to_qla_host(host);
+
+ switch (bsg_req->msgcode) {
+ case ISCSI_BSG_HST_VENDOR:
+ return qla4xxx_process_vendor_specific(bsg_job);
+
+ default:
+ ql4_printk(KERN_ERR, ha, "%s: invalid BSG command: 0x%x\n",
+ __func__, bsg_req->msgcode);
+ }
+
+ return -ENOSYS;
+}
diff --git a/drivers/scsi/qla4xxx/ql4_bsg.h b/drivers/scsi/qla4xxx/ql4_bsg.h
new file mode 100644
index 000000000..88c240191
--- /dev/null
+++ b/drivers/scsi/qla4xxx/ql4_bsg.h
@@ -0,0 +1,32 @@
+/*
+ * QLogic iSCSI HBA Driver
+ * Copyright (c) 2011 QLogic Corporation
+ *
+ * See LICENSE.qla4xxx for copyright and licensing details.
+ */
+#ifndef __QL4_BSG_H
+#define __QL4_BSG_H
+
+/* BSG Vendor specific commands */
+#define QLISCSI_VND_READ_FLASH 1
+#define QLISCSI_VND_UPDATE_FLASH 2
+#define QLISCSI_VND_GET_ACB_STATE 3
+#define QLISCSI_VND_READ_NVRAM 4
+#define QLISCSI_VND_UPDATE_NVRAM 5
+#define QLISCSI_VND_RESTORE_DEFAULTS 6
+#define QLISCSI_VND_GET_ACB 7
+#define QLISCSI_VND_DIAG_TEST 8
+
+/* QLISCSI_VND_DIAG_CMD sub code */
+#define QL_DIAG_CMD_TEST_DDR_SIZE 0x2
+#define QL_DIAG_CMD_TEST_DDR_RW 0x3
+#define QL_DIAG_CMD_TEST_ONCHIP_MEM_RW 0x4
+#define QL_DIAG_CMD_TEST_NVRAM 0x5 /* Only ISP4XXX */
+#define QL_DIAG_CMD_TEST_FLASH_ROM 0x6
+#define QL_DIAG_CMD_TEST_INT_LOOPBACK 0x7
+#define QL_DIAG_CMD_TEST_EXT_LOOPBACK 0x8
+#define QL_DIAG_CMD_TEST_DMA_XFER 0x9 /* Only ISP4XXX */
+#define QL_DIAG_CMD_SELF_DDR_RW 0xC
+#define QL_DIAG_CMD_SELF_ONCHIP_MEM_RW 0xD
+
+#endif
diff --git a/drivers/scsi/qla4xxx/ql4_dbg.c b/drivers/scsi/qla4xxx/ql4_dbg.c
new file mode 100644
index 000000000..5649e9ef5
--- /dev/null
+++ b/drivers/scsi/qla4xxx/ql4_dbg.c
@@ -0,0 +1,162 @@
+/*
+ * QLogic iSCSI HBA Driver
+ * Copyright (c) 2003-2012 QLogic Corporation
+ *
+ * See LICENSE.qla4xxx for copyright and licensing details.
+ */
+
+#include "ql4_def.h"
+#include "ql4_glbl.h"
+#include "ql4_dbg.h"
+#include "ql4_inline.h"
+
+void qla4xxx_dump_buffer(void *b, uint32_t size)
+{
+ uint32_t cnt;
+ uint8_t *c = b;
+
+ printk(" 0 1 2 3 4 5 6 7 8 9 Ah Bh Ch Dh Eh "
+ "Fh\n");
+ printk("------------------------------------------------------------"
+ "--\n");
+ for (cnt = 0; cnt < size; c++) {
+ printk("%02x", *c);
+ if (!(++cnt % 16))
+ printk("\n");
+
+ else
+ printk(" ");
+ }
+ printk(KERN_INFO "\n");
+}
+
+void qla4xxx_dump_registers(struct scsi_qla_host *ha)
+{
+ uint8_t i;
+
+ if (is_qla8022(ha)) {
+ for (i = 1; i < MBOX_REG_COUNT; i++)
+ printk(KERN_INFO "mailbox[%d] = 0x%08X\n",
+ i, readl(&ha->qla4_82xx_reg->mailbox_in[i]));
+ return;
+ }
+
+ for (i = 0; i < MBOX_REG_COUNT; i++) {
+ printk(KERN_INFO "0x%02X mailbox[%d] = 0x%08X\n",
+ (uint8_t) offsetof(struct isp_reg, mailbox[i]), i,
+ readw(&ha->reg->mailbox[i]));
+ }
+
+ printk(KERN_INFO "0x%02X flash_address = 0x%08X\n",
+ (uint8_t) offsetof(struct isp_reg, flash_address),
+ readw(&ha->reg->flash_address));
+ printk(KERN_INFO "0x%02X flash_data = 0x%08X\n",
+ (uint8_t) offsetof(struct isp_reg, flash_data),
+ readw(&ha->reg->flash_data));
+ printk(KERN_INFO "0x%02X ctrl_status = 0x%08X\n",
+ (uint8_t) offsetof(struct isp_reg, ctrl_status),
+ readw(&ha->reg->ctrl_status));
+
+ if (is_qla4010(ha)) {
+ printk(KERN_INFO "0x%02X nvram = 0x%08X\n",
+ (uint8_t) offsetof(struct isp_reg, u1.isp4010.nvram),
+ readw(&ha->reg->u1.isp4010.nvram));
+ } else if (is_qla4022(ha) | is_qla4032(ha)) {
+ printk(KERN_INFO "0x%02X intr_mask = 0x%08X\n",
+ (uint8_t) offsetof(struct isp_reg, u1.isp4022.intr_mask),
+ readw(&ha->reg->u1.isp4022.intr_mask));
+ printk(KERN_INFO "0x%02X nvram = 0x%08X\n",
+ (uint8_t) offsetof(struct isp_reg, u1.isp4022.nvram),
+ readw(&ha->reg->u1.isp4022.nvram));
+ printk(KERN_INFO "0x%02X semaphore = 0x%08X\n",
+ (uint8_t) offsetof(struct isp_reg, u1.isp4022.semaphore),
+ readw(&ha->reg->u1.isp4022.semaphore));
+ }
+ printk(KERN_INFO "0x%02X req_q_in = 0x%08X\n",
+ (uint8_t) offsetof(struct isp_reg, req_q_in),
+ readw(&ha->reg->req_q_in));
+ printk(KERN_INFO "0x%02X rsp_q_out = 0x%08X\n",
+ (uint8_t) offsetof(struct isp_reg, rsp_q_out),
+ readw(&ha->reg->rsp_q_out));
+
+ if (is_qla4010(ha)) {
+ printk(KERN_INFO "0x%02X ext_hw_conf = 0x%08X\n",
+ (uint8_t) offsetof(struct isp_reg, u2.isp4010.ext_hw_conf),
+ readw(&ha->reg->u2.isp4010.ext_hw_conf));
+ printk(KERN_INFO "0x%02X port_ctrl = 0x%08X\n",
+ (uint8_t) offsetof(struct isp_reg, u2.isp4010.port_ctrl),
+ readw(&ha->reg->u2.isp4010.port_ctrl));
+ printk(KERN_INFO "0x%02X port_status = 0x%08X\n",
+ (uint8_t) offsetof(struct isp_reg, u2.isp4010.port_status),
+ readw(&ha->reg->u2.isp4010.port_status));
+ printk(KERN_INFO "0x%02X req_q_out = 0x%08X\n",
+ (uint8_t) offsetof(struct isp_reg, u2.isp4010.req_q_out),
+ readw(&ha->reg->u2.isp4010.req_q_out));
+ printk(KERN_INFO "0x%02X gp_out = 0x%08X\n",
+ (uint8_t) offsetof(struct isp_reg, u2.isp4010.gp_out),
+ readw(&ha->reg->u2.isp4010.gp_out));
+ printk(KERN_INFO "0x%02X gp_in = 0x%08X\n",
+ (uint8_t) offsetof(struct isp_reg, u2.isp4010.gp_in),
+ readw(&ha->reg->u2.isp4010.gp_in));
+ printk(KERN_INFO "0x%02X port_err_status = 0x%08X\n", (uint8_t)
+ offsetof(struct isp_reg, u2.isp4010.port_err_status),
+ readw(&ha->reg->u2.isp4010.port_err_status));
+ } else if (is_qla4022(ha) | is_qla4032(ha)) {
+ printk(KERN_INFO "Page 0 Registers:\n");
+ printk(KERN_INFO "0x%02X ext_hw_conf = 0x%08X\n", (uint8_t)
+ offsetof(struct isp_reg, u2.isp4022.p0.ext_hw_conf),
+ readw(&ha->reg->u2.isp4022.p0.ext_hw_conf));
+ printk(KERN_INFO "0x%02X port_ctrl = 0x%08X\n", (uint8_t)
+ offsetof(struct isp_reg, u2.isp4022.p0.port_ctrl),
+ readw(&ha->reg->u2.isp4022.p0.port_ctrl));
+ printk(KERN_INFO "0x%02X port_status = 0x%08X\n", (uint8_t)
+ offsetof(struct isp_reg, u2.isp4022.p0.port_status),
+ readw(&ha->reg->u2.isp4022.p0.port_status));
+ printk(KERN_INFO "0x%02X gp_out = 0x%08X\n",
+ (uint8_t) offsetof(struct isp_reg, u2.isp4022.p0.gp_out),
+ readw(&ha->reg->u2.isp4022.p0.gp_out));
+ printk(KERN_INFO "0x%02X gp_in = 0x%08X\n",
+ (uint8_t) offsetof(struct isp_reg, u2.isp4022.p0.gp_in),
+ readw(&ha->reg->u2.isp4022.p0.gp_in));
+ printk(KERN_INFO "0x%02X port_err_status = 0x%08X\n", (uint8_t)
+ offsetof(struct isp_reg, u2.isp4022.p0.port_err_status),
+ readw(&ha->reg->u2.isp4022.p0.port_err_status));
+ printk(KERN_INFO "Page 1 Registers:\n");
+ writel(HOST_MEM_CFG_PAGE & set_rmask(CSR_SCSI_PAGE_SELECT),
+ &ha->reg->ctrl_status);
+ printk(KERN_INFO "0x%02X req_q_out = 0x%08X\n",
+ (uint8_t) offsetof(struct isp_reg, u2.isp4022.p1.req_q_out),
+ readw(&ha->reg->u2.isp4022.p1.req_q_out));
+ writel(PORT_CTRL_STAT_PAGE & set_rmask(CSR_SCSI_PAGE_SELECT),
+ &ha->reg->ctrl_status);
+ }
+}
+
+void qla4_8xxx_dump_peg_reg(struct scsi_qla_host *ha)
+{
+ uint32_t halt_status1, halt_status2;
+
+ halt_status1 = qla4_8xxx_rd_direct(ha, QLA8XXX_PEG_HALT_STATUS1);
+ halt_status2 = qla4_8xxx_rd_direct(ha, QLA8XXX_PEG_HALT_STATUS2);
+
+ if (is_qla8022(ha)) {
+ ql4_printk(KERN_INFO, ha,
+ "scsi(%ld): %s, ISP%04x Dumping hw/fw registers:\n"
+ " PEG_HALT_STATUS1: 0x%x, PEG_HALT_STATUS2: 0x%x,\n"
+ " PEG_NET_0_PC: 0x%x, PEG_NET_1_PC: 0x%x,\n"
+ " PEG_NET_2_PC: 0x%x, PEG_NET_3_PC: 0x%x,\n"
+ " PEG_NET_4_PC: 0x%x\n", ha->host_no, __func__,
+ ha->pdev->device, halt_status1, halt_status2,
+ qla4_82xx_rd_32(ha, QLA82XX_CRB_PEG_NET_0 + 0x3c),
+ qla4_82xx_rd_32(ha, QLA82XX_CRB_PEG_NET_1 + 0x3c),
+ qla4_82xx_rd_32(ha, QLA82XX_CRB_PEG_NET_2 + 0x3c),
+ qla4_82xx_rd_32(ha, QLA82XX_CRB_PEG_NET_3 + 0x3c),
+ qla4_82xx_rd_32(ha, QLA82XX_CRB_PEG_NET_4 + 0x3c));
+ } else if (is_qla8032(ha) || is_qla8042(ha)) {
+ ql4_printk(KERN_INFO, ha,
+ "scsi(%ld): %s, ISP%04x Dumping hw/fw registers:\n"
+ " PEG_HALT_STATUS1: 0x%x, PEG_HALT_STATUS2: 0x%x,\n",
+ ha->host_no, __func__, ha->pdev->device,
+ halt_status1, halt_status2);
+ }
+}
diff --git a/drivers/scsi/qla4xxx/ql4_dbg.h b/drivers/scsi/qla4xxx/ql4_dbg.h
new file mode 100644
index 000000000..51c365bcf
--- /dev/null
+++ b/drivers/scsi/qla4xxx/ql4_dbg.h
@@ -0,0 +1,62 @@
+/*
+ * QLogic iSCSI HBA Driver
+ * Copyright (c) 2003-2012 QLogic Corporation
+ *
+ * See LICENSE.qla4xxx for copyright and licensing details.
+ */
+
+/*
+ * Driver debug definitions.
+ */
+/* #define QL_DEBUG */ /* DEBUG messages */
+/* #define QL_DEBUG_LEVEL_3 */ /* Output function tracing */
+/* #define QL_DEBUG_LEVEL_4 */
+/* #define QL_DEBUG_LEVEL_5 */
+/* #define QL_DEBUG_LEVEL_7 */
+/* #define QL_DEBUG_LEVEL_9 */
+
+#define QL_DEBUG_LEVEL_2 /* ALways enable error messagess */
+#if defined(QL_DEBUG)
+#define DEBUG(x) do {x;} while (0);
+#else
+#define DEBUG(x) do {} while (0);
+#endif
+
+#if defined(QL_DEBUG_LEVEL_2)
+#define DEBUG2(x) do {if(ql4xextended_error_logging == 2) x;} while (0);
+#define DEBUG2_3(x) do {x;} while (0);
+#else /* */
+#define DEBUG2(x) do {} while (0);
+#endif /* */
+
+#if defined(QL_DEBUG_LEVEL_3)
+#define DEBUG3(x) do {if(ql4xextended_error_logging == 3) x;} while (0);
+#else /* */
+#define DEBUG3(x) do {} while (0);
+#if !defined(QL_DEBUG_LEVEL_2)
+#define DEBUG2_3(x) do {} while (0);
+#endif /* */
+#endif /* */
+#if defined(QL_DEBUG_LEVEL_4)
+#define DEBUG4(x) do {x;} while (0);
+#else /* */
+#define DEBUG4(x) do {} while (0);
+#endif /* */
+
+#if defined(QL_DEBUG_LEVEL_5)
+#define DEBUG5(x) do {x;} while (0);
+#else /* */
+#define DEBUG5(x) do {} while (0);
+#endif /* */
+
+#if defined(QL_DEBUG_LEVEL_7)
+#define DEBUG7(x) do {x; } while (0)
+#else /* */
+#define DEBUG7(x) do {} while (0)
+#endif /* */
+
+#if defined(QL_DEBUG_LEVEL_9)
+#define DEBUG9(x) do {x;} while (0);
+#else /* */
+#define DEBUG9(x) do {} while (0);
+#endif /* */
diff --git a/drivers/scsi/qla4xxx/ql4_def.h b/drivers/scsi/qla4xxx/ql4_def.h
new file mode 100644
index 000000000..8f6d0fb2c
--- /dev/null
+++ b/drivers/scsi/qla4xxx/ql4_def.h
@@ -0,0 +1,1090 @@
+/*
+ * QLogic iSCSI HBA Driver
+ * Copyright (c) 2003-2013 QLogic Corporation
+ *
+ * See LICENSE.qla4xxx for copyright and licensing details.
+ */
+
+#ifndef __QL4_DEF_H
+#define __QL4_DEF_H
+
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/types.h>
+#include <linux/module.h>
+#include <linux/list.h>
+#include <linux/pci.h>
+#include <linux/dma-mapping.h>
+#include <linux/sched.h>
+#include <linux/slab.h>
+#include <linux/dmapool.h>
+#include <linux/mempool.h>
+#include <linux/spinlock.h>
+#include <linux/workqueue.h>
+#include <linux/delay.h>
+#include <linux/interrupt.h>
+#include <linux/mutex.h>
+#include <linux/aer.h>
+#include <linux/bsg-lib.h>
+
+#include <net/tcp.h>
+#include <scsi/scsi.h>
+#include <scsi/scsi_host.h>
+#include <scsi/scsi_device.h>
+#include <scsi/scsi_cmnd.h>
+#include <scsi/scsi_transport.h>
+#include <scsi/scsi_transport_iscsi.h>
+#include <scsi/scsi_bsg_iscsi.h>
+#include <scsi/scsi_netlink.h>
+#include <scsi/libiscsi.h>
+
+#include "ql4_dbg.h"
+#include "ql4_nx.h"
+#include "ql4_fw.h"
+#include "ql4_nvram.h"
+#include "ql4_83xx.h"
+
+#ifndef PCI_DEVICE_ID_QLOGIC_ISP4010
+#define PCI_DEVICE_ID_QLOGIC_ISP4010 0x4010
+#endif
+
+#ifndef PCI_DEVICE_ID_QLOGIC_ISP4022
+#define PCI_DEVICE_ID_QLOGIC_ISP4022 0x4022
+#endif
+
+#ifndef PCI_DEVICE_ID_QLOGIC_ISP4032
+#define PCI_DEVICE_ID_QLOGIC_ISP4032 0x4032
+#endif
+
+#ifndef PCI_DEVICE_ID_QLOGIC_ISP8022
+#define PCI_DEVICE_ID_QLOGIC_ISP8022 0x8022
+#endif
+
+#ifndef PCI_DEVICE_ID_QLOGIC_ISP8324
+#define PCI_DEVICE_ID_QLOGIC_ISP8324 0x8032
+#endif
+
+#ifndef PCI_DEVICE_ID_QLOGIC_ISP8042
+#define PCI_DEVICE_ID_QLOGIC_ISP8042 0x8042
+#endif
+
+#define ISP4XXX_PCI_FN_1 0x1
+#define ISP4XXX_PCI_FN_2 0x3
+
+#define QLA_SUCCESS 0
+#define QLA_ERROR 1
+#define STATUS(status) status == QLA_ERROR ? "FAILED" : "SUCCEEDED"
+
+/*
+ * Data bit definitions
+ */
+#define BIT_0 0x1
+#define BIT_1 0x2
+#define BIT_2 0x4
+#define BIT_3 0x8
+#define BIT_4 0x10
+#define BIT_5 0x20
+#define BIT_6 0x40
+#define BIT_7 0x80
+#define BIT_8 0x100
+#define BIT_9 0x200
+#define BIT_10 0x400
+#define BIT_11 0x800
+#define BIT_12 0x1000
+#define BIT_13 0x2000
+#define BIT_14 0x4000
+#define BIT_15 0x8000
+#define BIT_16 0x10000
+#define BIT_17 0x20000
+#define BIT_18 0x40000
+#define BIT_19 0x80000
+#define BIT_20 0x100000
+#define BIT_21 0x200000
+#define BIT_22 0x400000
+#define BIT_23 0x800000
+#define BIT_24 0x1000000
+#define BIT_25 0x2000000
+#define BIT_26 0x4000000
+#define BIT_27 0x8000000
+#define BIT_28 0x10000000
+#define BIT_29 0x20000000
+#define BIT_30 0x40000000
+#define BIT_31 0x80000000
+
+/**
+ * Macros to help code, maintain, etc.
+ **/
+#define ql4_printk(level, ha, format, arg...) \
+ dev_printk(level , &((ha)->pdev->dev) , format , ## arg)
+
+
+/*
+ * Host adapter default definitions
+ ***********************************/
+#define MAX_HBAS 16
+#define MAX_BUSES 1
+#define MAX_TARGETS MAX_DEV_DB_ENTRIES
+#define MAX_LUNS 0xffff
+#define MAX_AEN_ENTRIES MAX_DEV_DB_ENTRIES
+#define MAX_DDB_ENTRIES MAX_DEV_DB_ENTRIES
+#define MAX_PDU_ENTRIES 32
+#define INVALID_ENTRY 0xFFFF
+#define MAX_CMDS_TO_RISC 1024
+#define MAX_SRBS MAX_CMDS_TO_RISC
+#define MBOX_AEN_REG_COUNT 8
+#define MAX_INIT_RETRIES 5
+
+/*
+ * Buffer sizes
+ */
+#define REQUEST_QUEUE_DEPTH MAX_CMDS_TO_RISC
+#define RESPONSE_QUEUE_DEPTH 64
+#define QUEUE_SIZE 64
+#define DMA_BUFFER_SIZE 512
+#define IOCB_HIWAT_CUSHION 4
+
+/*
+ * Misc
+ */
+#define MAC_ADDR_LEN 6 /* in bytes */
+#define IP_ADDR_LEN 4 /* in bytes */
+#define IPv6_ADDR_LEN 16 /* IPv6 address size */
+#define DRIVER_NAME "qla4xxx"
+
+#define MAX_LINKED_CMDS_PER_LUN 3
+#define MAX_REQS_SERVICED_PER_INTR 1
+
+#define ISCSI_IPADDR_SIZE 4 /* IP address size */
+#define ISCSI_ALIAS_SIZE 32 /* ISCSI Alias name size */
+#define ISCSI_NAME_SIZE 0xE0 /* ISCSI Name size */
+
+#define QL4_SESS_RECOVERY_TMO 120 /* iSCSI session */
+ /* recovery timeout */
+
+#define LSDW(x) ((u32)((u64)(x)))
+#define MSDW(x) ((u32)((((u64)(x)) >> 16) >> 16))
+
+#define DEV_DB_NON_PERSISTENT 0
+#define DEV_DB_PERSISTENT 1
+
+#define COPY_ISID(dst_isid, src_isid) { \
+ int i, j; \
+ for (i = 0, j = ISID_SIZE - 1; i < ISID_SIZE;) \
+ dst_isid[i++] = src_isid[j--]; \
+}
+
+#define SET_BITVAL(o, n, v) { \
+ if (o) \
+ n |= v; \
+ else \
+ n &= ~v; \
+}
+
+#define OP_STATE(o, f, p) { \
+ p = (o & f) ? "enable" : "disable"; \
+}
+
+/*
+ * Retry & Timeout Values
+ */
+#define MBOX_TOV 60
+#define SOFT_RESET_TOV 30
+#define RESET_INTR_TOV 3
+#define SEMAPHORE_TOV 10
+#define ADAPTER_INIT_TOV 30
+#define ADAPTER_RESET_TOV 180
+#define EXTEND_CMD_TOV 60
+#define WAIT_CMD_TOV 5
+#define EH_WAIT_CMD_TOV 120
+#define FIRMWARE_UP_TOV 60
+#define RESET_FIRMWARE_TOV 30
+#define LOGOUT_TOV 10
+#define IOCB_TOV_MARGIN 10
+#define RELOGIN_TOV 18
+#define ISNS_DEREG_TOV 5
+#define HBA_ONLINE_TOV 30
+#define DISABLE_ACB_TOV 30
+#define IP_CONFIG_TOV 30
+#define LOGIN_TOV 12
+#define BOOT_LOGIN_RESP_TOV 60
+
+#define MAX_RESET_HA_RETRIES 2
+#define FW_ALIVE_WAIT_TOV 3
+#define IDC_EXTEND_TOV 8
+#define IDC_COMP_TOV 5
+#define LINK_UP_COMP_TOV 30
+
+#define CMD_SP(Cmnd) ((Cmnd)->SCp.ptr)
+
+/*
+ * SCSI Request Block structure (srb) that is placed
+ * on cmd->SCp location of every I/O [We have 22 bytes available]
+ */
+struct srb {
+ struct list_head list; /* (8) */
+ struct scsi_qla_host *ha; /* HA the SP is queued on */
+ struct ddb_entry *ddb;
+ uint16_t flags; /* (1) Status flags. */
+
+#define SRB_DMA_VALID BIT_3 /* DMA Buffer mapped. */
+#define SRB_GOT_SENSE BIT_4 /* sense data received. */
+ uint8_t state; /* (1) Status flags. */
+
+#define SRB_NO_QUEUE_STATE 0 /* Request is in between states */
+#define SRB_FREE_STATE 1
+#define SRB_ACTIVE_STATE 3
+#define SRB_ACTIVE_TIMEOUT_STATE 4
+#define SRB_SUSPENDED_STATE 7 /* Request in suspended state */
+
+ struct scsi_cmnd *cmd; /* (4) SCSI command block */
+ dma_addr_t dma_handle; /* (4) for unmap of single transfers */
+ struct kref srb_ref; /* reference count for this srb */
+ uint8_t err_id; /* error id */
+#define SRB_ERR_PORT 1 /* Request failed because "port down" */
+#define SRB_ERR_LOOP 2 /* Request failed because "loop down" */
+#define SRB_ERR_DEVICE 3 /* Request failed because "device error" */
+#define SRB_ERR_OTHER 4
+
+ uint16_t reserved;
+ uint16_t iocb_tov;
+ uint16_t iocb_cnt; /* Number of used iocbs */
+ uint16_t cc_stat;
+
+ /* Used for extended sense / status continuation */
+ uint8_t *req_sense_ptr;
+ uint16_t req_sense_len;
+ uint16_t reserved2;
+};
+
+/* Mailbox request block structure */
+struct mrb {
+ struct scsi_qla_host *ha;
+ struct mbox_cmd_iocb *mbox;
+ uint32_t mbox_cmd;
+ uint16_t iocb_cnt; /* Number of used iocbs */
+ uint32_t pid;
+};
+
+/*
+ * Asynchronous Event Queue structure
+ */
+struct aen {
+ uint32_t mbox_sts[MBOX_AEN_REG_COUNT];
+};
+
+struct ql4_aen_log {
+ int count;
+ struct aen entry[MAX_AEN_ENTRIES];
+};
+
+/*
+ * Device Database (DDB) structure
+ */
+struct ddb_entry {
+ struct scsi_qla_host *ha;
+ struct iscsi_cls_session *sess;
+ struct iscsi_cls_conn *conn;
+
+ uint16_t fw_ddb_index; /* DDB firmware index */
+ uint32_t fw_ddb_device_state; /* F/W Device State -- see ql4_fw.h */
+ uint16_t ddb_type;
+#define FLASH_DDB 0x01
+
+ struct dev_db_entry fw_ddb_entry;
+ int (*unblock_sess)(struct iscsi_cls_session *cls_session);
+ int (*ddb_change)(struct scsi_qla_host *ha, uint32_t fw_ddb_index,
+ struct ddb_entry *ddb_entry, uint32_t state);
+
+ /* Driver Re-login */
+ unsigned long flags; /* DDB Flags */
+#define DDB_CONN_CLOSE_FAILURE 0 /* 0x00000001 */
+
+ uint16_t default_relogin_timeout; /* Max time to wait for
+ * relogin to complete */
+ atomic_t retry_relogin_timer; /* Min Time between relogins
+ * (4000 only) */
+ atomic_t relogin_timer; /* Max Time to wait for
+ * relogin to complete */
+ atomic_t relogin_retry_count; /* Num of times relogin has been
+ * retried */
+ uint32_t default_time2wait; /* Default Min time between
+ * relogins (+aens) */
+ uint16_t chap_tbl_idx;
+};
+
+struct qla_ddb_index {
+ struct list_head list;
+ uint16_t fw_ddb_idx;
+ uint16_t flash_ddb_idx;
+ struct dev_db_entry fw_ddb;
+ uint8_t flash_isid[6];
+};
+
+#define DDB_IPADDR_LEN 64
+
+struct ql4_tuple_ddb {
+ int port;
+ int tpgt;
+ char ip_addr[DDB_IPADDR_LEN];
+ char iscsi_name[ISCSI_NAME_SIZE];
+ uint16_t options;
+#define DDB_OPT_IPV6 0x0e0e
+#define DDB_OPT_IPV4 0x0f0f
+ uint8_t isid[6];
+};
+
+/*
+ * DDB states.
+ */
+#define DDB_STATE_DEAD 0 /* We can no longer talk to
+ * this device */
+#define DDB_STATE_ONLINE 1 /* Device ready to accept
+ * commands */
+#define DDB_STATE_MISSING 2 /* Device logged off, trying
+ * to re-login */
+
+/*
+ * DDB flags.
+ */
+#define DF_RELOGIN 0 /* Relogin to device */
+#define DF_BOOT_TGT 1 /* Boot target entry */
+#define DF_ISNS_DISCOVERED 2 /* Device was discovered via iSNS */
+#define DF_FO_MASKED 3
+#define DF_DISABLE_RELOGIN 4 /* Disable relogin to device */
+
+enum qla4_work_type {
+ QLA4_EVENT_AEN,
+ QLA4_EVENT_PING_STATUS,
+};
+
+struct qla4_work_evt {
+ struct list_head list;
+ enum qla4_work_type type;
+ union {
+ struct {
+ enum iscsi_host_event_code code;
+ uint32_t data_size;
+ uint8_t data[0];
+ } aen;
+ struct {
+ uint32_t status;
+ uint32_t pid;
+ uint32_t data_size;
+ uint8_t data[0];
+ } ping;
+ } u;
+};
+
+struct ql82xx_hw_data {
+ /* Offsets for flash/nvram access (set to ~0 if not used). */
+ uint32_t flash_conf_off;
+ uint32_t flash_data_off;
+
+ uint32_t fdt_wrt_disable;
+ uint32_t fdt_erase_cmd;
+ uint32_t fdt_block_size;
+ uint32_t fdt_unprotect_sec_cmd;
+ uint32_t fdt_protect_sec_cmd;
+
+ uint32_t flt_region_flt;
+ uint32_t flt_region_fdt;
+ uint32_t flt_region_boot;
+ uint32_t flt_region_bootload;
+ uint32_t flt_region_fw;
+
+ uint32_t flt_iscsi_param;
+ uint32_t flt_region_chap;
+ uint32_t flt_chap_size;
+ uint32_t flt_region_ddb;
+ uint32_t flt_ddb_size;
+};
+
+struct qla4_8xxx_legacy_intr_set {
+ uint32_t int_vec_bit;
+ uint32_t tgt_status_reg;
+ uint32_t tgt_mask_reg;
+ uint32_t pci_int_reg;
+};
+
+/* MSI-X Support */
+
+#define QLA_MSIX_DEFAULT 0x00
+#define QLA_MSIX_RSP_Q 0x01
+
+#define QLA_MSIX_ENTRIES 2
+#define QLA_MIDX_DEFAULT 0
+#define QLA_MIDX_RSP_Q 1
+
+struct ql4_msix_entry {
+ int have_irq;
+ uint16_t msix_vector;
+ uint16_t msix_entry;
+};
+
+/*
+ * ISP Operations
+ */
+struct isp_operations {
+ int (*iospace_config) (struct scsi_qla_host *ha);
+ void (*pci_config) (struct scsi_qla_host *);
+ void (*disable_intrs) (struct scsi_qla_host *);
+ void (*enable_intrs) (struct scsi_qla_host *);
+ int (*start_firmware) (struct scsi_qla_host *);
+ int (*restart_firmware) (struct scsi_qla_host *);
+ irqreturn_t (*intr_handler) (int , void *);
+ void (*interrupt_service_routine) (struct scsi_qla_host *, uint32_t);
+ int (*need_reset) (struct scsi_qla_host *);
+ int (*reset_chip) (struct scsi_qla_host *);
+ int (*reset_firmware) (struct scsi_qla_host *);
+ void (*queue_iocb) (struct scsi_qla_host *);
+ void (*complete_iocb) (struct scsi_qla_host *);
+ uint16_t (*rd_shdw_req_q_out) (struct scsi_qla_host *);
+ uint16_t (*rd_shdw_rsp_q_in) (struct scsi_qla_host *);
+ int (*get_sys_info) (struct scsi_qla_host *);
+ uint32_t (*rd_reg_direct) (struct scsi_qla_host *, ulong);
+ void (*wr_reg_direct) (struct scsi_qla_host *, ulong, uint32_t);
+ int (*rd_reg_indirect) (struct scsi_qla_host *, uint32_t, uint32_t *);
+ int (*wr_reg_indirect) (struct scsi_qla_host *, uint32_t, uint32_t);
+ int (*idc_lock) (struct scsi_qla_host *);
+ void (*idc_unlock) (struct scsi_qla_host *);
+ void (*rom_lock_recovery) (struct scsi_qla_host *);
+ void (*queue_mailbox_command) (struct scsi_qla_host *, uint32_t *, int);
+ void (*process_mailbox_interrupt) (struct scsi_qla_host *, int);
+};
+
+struct ql4_mdump_size_table {
+ uint32_t size;
+ uint32_t size_cmask_02;
+ uint32_t size_cmask_04;
+ uint32_t size_cmask_08;
+ uint32_t size_cmask_10;
+ uint32_t size_cmask_FF;
+ uint32_t version;
+};
+
+/*qla4xxx ipaddress configuration details */
+struct ipaddress_config {
+ uint16_t ipv4_options;
+ uint16_t tcp_options;
+ uint16_t ipv4_vlan_tag;
+ uint8_t ipv4_addr_state;
+ uint8_t ip_address[IP_ADDR_LEN];
+ uint8_t subnet_mask[IP_ADDR_LEN];
+ uint8_t gateway[IP_ADDR_LEN];
+ uint32_t ipv6_options;
+ uint32_t ipv6_addl_options;
+ uint8_t ipv6_link_local_state;
+ uint8_t ipv6_addr0_state;
+ uint8_t ipv6_addr1_state;
+ uint8_t ipv6_default_router_state;
+ uint16_t ipv6_vlan_tag;
+ struct in6_addr ipv6_link_local_addr;
+ struct in6_addr ipv6_addr0;
+ struct in6_addr ipv6_addr1;
+ struct in6_addr ipv6_default_router_addr;
+ uint16_t eth_mtu_size;
+ uint16_t ipv4_port;
+ uint16_t ipv6_port;
+ uint8_t control;
+ uint16_t ipv6_tcp_options;
+ uint8_t tcp_wsf;
+ uint8_t ipv6_tcp_wsf;
+ uint8_t ipv4_tos;
+ uint8_t ipv4_cache_id;
+ uint8_t ipv6_cache_id;
+ uint8_t ipv4_alt_cid_len;
+ uint8_t ipv4_alt_cid[11];
+ uint8_t ipv4_vid_len;
+ uint8_t ipv4_vid[11];
+ uint8_t ipv4_ttl;
+ uint16_t ipv6_flow_lbl;
+ uint8_t ipv6_traffic_class;
+ uint8_t ipv6_hop_limit;
+ uint32_t ipv6_nd_reach_time;
+ uint32_t ipv6_nd_rexmit_timer;
+ uint32_t ipv6_nd_stale_timeout;
+ uint8_t ipv6_dup_addr_detect_count;
+ uint32_t ipv6_gw_advrt_mtu;
+ uint16_t def_timeout;
+ uint8_t abort_timer;
+ uint16_t iscsi_options;
+ uint16_t iscsi_max_pdu_size;
+ uint16_t iscsi_first_burst_len;
+ uint16_t iscsi_max_outstnd_r2t;
+ uint16_t iscsi_max_burst_len;
+ uint8_t iscsi_name[224];
+};
+
+#define QL4_CHAP_MAX_NAME_LEN 256
+#define QL4_CHAP_MAX_SECRET_LEN 100
+#define LOCAL_CHAP 0
+#define BIDI_CHAP 1
+
+struct ql4_chap_format {
+ u8 intr_chap_name[QL4_CHAP_MAX_NAME_LEN];
+ u8 intr_secret[QL4_CHAP_MAX_SECRET_LEN];
+ u8 target_chap_name[QL4_CHAP_MAX_NAME_LEN];
+ u8 target_secret[QL4_CHAP_MAX_SECRET_LEN];
+ u16 intr_chap_name_length;
+ u16 intr_secret_length;
+ u16 target_chap_name_length;
+ u16 target_secret_length;
+};
+
+struct ip_address_format {
+ u8 ip_type;
+ u8 ip_address[16];
+};
+
+struct ql4_conn_info {
+ u16 dest_port;
+ struct ip_address_format dest_ipaddr;
+ struct ql4_chap_format chap;
+};
+
+struct ql4_boot_session_info {
+ u8 target_name[224];
+ struct ql4_conn_info conn_list[1];
+};
+
+struct ql4_boot_tgt_info {
+ struct ql4_boot_session_info boot_pri_sess;
+ struct ql4_boot_session_info boot_sec_sess;
+};
+
+/*
+ * Linux Host Adapter structure
+ */
+struct scsi_qla_host {
+ /* Linux adapter configuration data */
+ unsigned long flags;
+
+#define AF_ONLINE 0 /* 0x00000001 */
+#define AF_INIT_DONE 1 /* 0x00000002 */
+#define AF_MBOX_COMMAND 2 /* 0x00000004 */
+#define AF_MBOX_COMMAND_DONE 3 /* 0x00000008 */
+#define AF_ST_DISCOVERY_IN_PROGRESS 4 /* 0x00000010 */
+#define AF_INTERRUPTS_ON 6 /* 0x00000040 */
+#define AF_GET_CRASH_RECORD 7 /* 0x00000080 */
+#define AF_LINK_UP 8 /* 0x00000100 */
+#define AF_LOOPBACK 9 /* 0x00000200 */
+#define AF_IRQ_ATTACHED 10 /* 0x00000400 */
+#define AF_DISABLE_ACB_COMPLETE 11 /* 0x00000800 */
+#define AF_HA_REMOVAL 12 /* 0x00001000 */
+#define AF_INTx_ENABLED 15 /* 0x00008000 */
+#define AF_MSI_ENABLED 16 /* 0x00010000 */
+#define AF_MSIX_ENABLED 17 /* 0x00020000 */
+#define AF_MBOX_COMMAND_NOPOLL 18 /* 0x00040000 */
+#define AF_FW_RECOVERY 19 /* 0x00080000 */
+#define AF_EEH_BUSY 20 /* 0x00100000 */
+#define AF_PCI_CHANNEL_IO_PERM_FAILURE 21 /* 0x00200000 */
+#define AF_BUILD_DDB_LIST 22 /* 0x00400000 */
+#define AF_82XX_FW_DUMPED 24 /* 0x01000000 */
+#define AF_8XXX_RST_OWNER 25 /* 0x02000000 */
+#define AF_82XX_DUMP_READING 26 /* 0x04000000 */
+#define AF_83XX_IOCB_INTR_ON 28 /* 0x10000000 */
+#define AF_83XX_MBOX_INTR_ON 29 /* 0x20000000 */
+
+ unsigned long dpc_flags;
+
+#define DPC_RESET_HA 1 /* 0x00000002 */
+#define DPC_RETRY_RESET_HA 2 /* 0x00000004 */
+#define DPC_RELOGIN_DEVICE 3 /* 0x00000008 */
+#define DPC_RESET_HA_FW_CONTEXT 4 /* 0x00000010 */
+#define DPC_RESET_HA_INTR 5 /* 0x00000020 */
+#define DPC_ISNS_RESTART 7 /* 0x00000080 */
+#define DPC_AEN 9 /* 0x00000200 */
+#define DPC_GET_DHCP_IP_ADDR 15 /* 0x00008000 */
+#define DPC_LINK_CHANGED 18 /* 0x00040000 */
+#define DPC_RESET_ACTIVE 20 /* 0x00100000 */
+#define DPC_HA_UNRECOVERABLE 21 /* 0x00200000 ISP-82xx only*/
+#define DPC_HA_NEED_QUIESCENT 22 /* 0x00400000 ISP-82xx only*/
+#define DPC_POST_IDC_ACK 23 /* 0x00800000 */
+#define DPC_RESTORE_ACB 24 /* 0x01000000 */
+#define DPC_SYSFS_DDB_EXPORT 25 /* 0x02000000 */
+
+ struct Scsi_Host *host; /* pointer to host data */
+ uint32_t tot_ddbs;
+
+ uint16_t iocb_cnt;
+ uint16_t iocb_hiwat;
+
+ /* SRB cache. */
+#define SRB_MIN_REQ 128
+ mempool_t *srb_mempool;
+
+ /* pci information */
+ struct pci_dev *pdev;
+
+ struct isp_reg __iomem *reg; /* Base I/O address */
+ unsigned long pio_address;
+ unsigned long pio_length;
+#define MIN_IOBASE_LEN 0x100
+
+ uint16_t req_q_count;
+
+ unsigned long host_no;
+
+ /* NVRAM registers */
+ struct eeprom_data *nvram;
+ spinlock_t hardware_lock ____cacheline_aligned;
+ uint32_t eeprom_cmd_data;
+
+ /* Counters for general statistics */
+ uint64_t isr_count;
+ uint64_t adapter_error_count;
+ uint64_t device_error_count;
+ uint64_t total_io_count;
+ uint64_t total_mbytes_xferred;
+ uint64_t link_failure_count;
+ uint64_t invalid_crc_count;
+ uint32_t bytes_xfered;
+ uint32_t spurious_int_count;
+ uint32_t aborted_io_count;
+ uint32_t io_timeout_count;
+ uint32_t mailbox_timeout_count;
+ uint32_t seconds_since_last_intr;
+ uint32_t seconds_since_last_heartbeat;
+ uint32_t mac_index;
+
+ /* Info Needed for Management App */
+ /* --- From GetFwVersion --- */
+ uint32_t firmware_version[2];
+ uint32_t patch_number;
+ uint32_t build_number;
+ uint32_t board_id;
+
+ /* --- From Init_FW --- */
+ /* init_cb_t *init_cb; */
+ uint16_t firmware_options;
+ uint8_t alias[32];
+ uint8_t name_string[256];
+ uint8_t heartbeat_interval;
+
+ /* --- From FlashSysInfo --- */
+ uint8_t my_mac[MAC_ADDR_LEN];
+ uint8_t serial_number[16];
+ uint16_t port_num;
+ /* --- From GetFwState --- */
+ uint32_t firmware_state;
+ uint32_t addl_fw_state;
+
+ /* Linux kernel thread */
+ struct workqueue_struct *dpc_thread;
+ struct work_struct dpc_work;
+
+ /* Linux timer thread */
+ struct timer_list timer;
+ uint32_t timer_active;
+
+ /* Recovery Timers */
+ atomic_t check_relogin_timeouts;
+ uint32_t retry_reset_ha_cnt;
+ uint32_t isp_reset_timer; /* reset test timer */
+ uint32_t nic_reset_timer; /* simulated nic reset test timer */
+ int eh_start;
+ struct list_head free_srb_q;
+ uint16_t free_srb_q_count;
+ uint16_t num_srbs_allocated;
+
+ /* DMA Memory Block */
+ void *queues;
+ dma_addr_t queues_dma;
+ unsigned long queues_len;
+
+#define MEM_ALIGN_VALUE \
+ ((max(REQUEST_QUEUE_DEPTH, RESPONSE_QUEUE_DEPTH)) * \
+ sizeof(struct queue_entry))
+ /* request and response queue variables */
+ dma_addr_t request_dma;
+ struct queue_entry *request_ring;
+ struct queue_entry *request_ptr;
+ dma_addr_t response_dma;
+ struct queue_entry *response_ring;
+ struct queue_entry *response_ptr;
+ dma_addr_t shadow_regs_dma;
+ struct shadow_regs *shadow_regs;
+ uint16_t request_in; /* Current indexes. */
+ uint16_t request_out;
+ uint16_t response_in;
+ uint16_t response_out;
+
+ /* aen queue variables */
+ uint16_t aen_q_count; /* Number of available aen_q entries */
+ uint16_t aen_in; /* Current indexes */
+ uint16_t aen_out;
+ struct aen aen_q[MAX_AEN_ENTRIES];
+
+ struct ql4_aen_log aen_log;/* tracks all aens */
+
+ /* This mutex protects several threads to do mailbox commands
+ * concurrently.
+ */
+ struct mutex mbox_sem;
+
+ /* temporary mailbox status registers */
+ volatile uint8_t mbox_status_count;
+ volatile uint32_t mbox_status[MBOX_REG_COUNT];
+
+ /* FW ddb index map */
+ struct ddb_entry *fw_ddb_index_map[MAX_DDB_ENTRIES];
+
+ /* Saved srb for status continuation entry processing */
+ struct srb *status_srb;
+
+ uint8_t acb_version;
+
+ /* qla82xx specific fields */
+ struct device_reg_82xx __iomem *qla4_82xx_reg; /* Base I/O address */
+ unsigned long nx_pcibase; /* Base I/O address */
+ uint8_t *nx_db_rd_ptr; /* Doorbell read pointer */
+ unsigned long nx_db_wr_ptr; /* Door bell write pointer */
+ unsigned long first_page_group_start;
+ unsigned long first_page_group_end;
+
+ uint32_t crb_win;
+ uint32_t curr_window;
+ uint32_t ddr_mn_window;
+ unsigned long mn_win_crb;
+ unsigned long ms_win_crb;
+ int qdr_sn_window;
+ rwlock_t hw_lock;
+ uint16_t func_num;
+ int link_width;
+
+ struct qla4_8xxx_legacy_intr_set nx_legacy_intr;
+ u32 nx_crb_mask;
+
+ uint8_t revision_id;
+ uint32_t fw_heartbeat_counter;
+
+ struct isp_operations *isp_ops;
+ struct ql82xx_hw_data hw;
+
+ struct ql4_msix_entry msix_entries[QLA_MSIX_ENTRIES];
+
+ uint32_t nx_dev_init_timeout;
+ uint32_t nx_reset_timeout;
+ void *fw_dump;
+ uint32_t fw_dump_size;
+ uint32_t fw_dump_capture_mask;
+ void *fw_dump_tmplt_hdr;
+ uint32_t fw_dump_tmplt_size;
+ uint32_t fw_dump_skip_size;
+
+ struct completion mbx_intr_comp;
+
+ struct ipaddress_config ip_config;
+ struct iscsi_iface *iface_ipv4;
+ struct iscsi_iface *iface_ipv6_0;
+ struct iscsi_iface *iface_ipv6_1;
+
+ /* --- From About Firmware --- */
+ struct about_fw_info fw_info;
+ uint32_t fw_uptime_secs; /* seconds elapsed since fw bootup */
+ uint32_t fw_uptime_msecs; /* milliseconds beyond elapsed seconds */
+ uint16_t def_timeout; /* Default login timeout */
+
+ uint32_t flash_state;
+#define QLFLASH_WAITING 0
+#define QLFLASH_READING 1
+#define QLFLASH_WRITING 2
+ struct dma_pool *chap_dma_pool;
+ uint8_t *chap_list; /* CHAP table cache */
+ struct mutex chap_sem;
+
+#define CHAP_DMA_BLOCK_SIZE 512
+ struct workqueue_struct *task_wq;
+ unsigned long ddb_idx_map[MAX_DDB_ENTRIES / BITS_PER_LONG];
+#define SYSFS_FLAG_FW_SEL_BOOT 2
+ struct iscsi_boot_kset *boot_kset;
+ struct ql4_boot_tgt_info boot_tgt;
+ uint16_t phy_port_num;
+ uint16_t phy_port_cnt;
+ uint16_t iscsi_pci_func_cnt;
+ uint8_t model_name[16];
+ struct completion disable_acb_comp;
+ struct dma_pool *fw_ddb_dma_pool;
+#define DDB_DMA_BLOCK_SIZE 512
+ uint16_t pri_ddb_idx;
+ uint16_t sec_ddb_idx;
+ int is_reset;
+ uint16_t temperature;
+
+ /* event work list */
+ struct list_head work_list;
+ spinlock_t work_lock;
+
+ /* mbox iocb */
+#define MAX_MRB 128
+ struct mrb *active_mrb_array[MAX_MRB];
+ uint32_t mrb_index;
+
+ uint32_t *reg_tbl;
+ struct qla4_83xx_reset_template reset_tmplt;
+ struct device_reg_83xx __iomem *qla4_83xx_reg; /* Base I/O address
+ for ISP8324 and
+ and ISP8042 */
+ uint32_t pf_bit;
+ struct qla4_83xx_idc_information idc_info;
+ struct addr_ctrl_blk *saved_acb;
+ int notify_idc_comp;
+ int notify_link_up_comp;
+ int idc_extend_tmo;
+ struct completion idc_comp;
+ struct completion link_up_comp;
+};
+
+struct ql4_task_data {
+ struct scsi_qla_host *ha;
+ uint8_t iocb_req_cnt;
+ dma_addr_t data_dma;
+ void *req_buffer;
+ dma_addr_t req_dma;
+ uint32_t req_len;
+ void *resp_buffer;
+ dma_addr_t resp_dma;
+ uint32_t resp_len;
+ struct iscsi_task *task;
+ struct passthru_status sts;
+ struct work_struct task_work;
+};
+
+struct qla_endpoint {
+ struct Scsi_Host *host;
+ struct sockaddr_storage dst_addr;
+};
+
+struct qla_conn {
+ struct qla_endpoint *qla_ep;
+};
+
+static inline int is_ipv4_enabled(struct scsi_qla_host *ha)
+{
+ return ((ha->ip_config.ipv4_options & IPOPT_IPV4_PROTOCOL_ENABLE) != 0);
+}
+
+static inline int is_ipv6_enabled(struct scsi_qla_host *ha)
+{
+ return ((ha->ip_config.ipv6_options &
+ IPV6_OPT_IPV6_PROTOCOL_ENABLE) != 0);
+}
+
+static inline int is_qla4010(struct scsi_qla_host *ha)
+{
+ return ha->pdev->device == PCI_DEVICE_ID_QLOGIC_ISP4010;
+}
+
+static inline int is_qla4022(struct scsi_qla_host *ha)
+{
+ return ha->pdev->device == PCI_DEVICE_ID_QLOGIC_ISP4022;
+}
+
+static inline int is_qla4032(struct scsi_qla_host *ha)
+{
+ return ha->pdev->device == PCI_DEVICE_ID_QLOGIC_ISP4032;
+}
+
+static inline int is_qla40XX(struct scsi_qla_host *ha)
+{
+ return is_qla4032(ha) || is_qla4022(ha) || is_qla4010(ha);
+}
+
+static inline int is_qla8022(struct scsi_qla_host *ha)
+{
+ return ha->pdev->device == PCI_DEVICE_ID_QLOGIC_ISP8022;
+}
+
+static inline int is_qla8032(struct scsi_qla_host *ha)
+{
+ return ha->pdev->device == PCI_DEVICE_ID_QLOGIC_ISP8324;
+}
+
+static inline int is_qla8042(struct scsi_qla_host *ha)
+{
+ return ha->pdev->device == PCI_DEVICE_ID_QLOGIC_ISP8042;
+}
+
+static inline int is_qla80XX(struct scsi_qla_host *ha)
+{
+ return is_qla8022(ha) || is_qla8032(ha) || is_qla8042(ha);
+}
+
+static inline int is_aer_supported(struct scsi_qla_host *ha)
+{
+ return ((ha->pdev->device == PCI_DEVICE_ID_QLOGIC_ISP8022) ||
+ (ha->pdev->device == PCI_DEVICE_ID_QLOGIC_ISP8324) ||
+ (ha->pdev->device == PCI_DEVICE_ID_QLOGIC_ISP8042));
+}
+
+static inline int adapter_up(struct scsi_qla_host *ha)
+{
+ return (test_bit(AF_ONLINE, &ha->flags) != 0) &&
+ (test_bit(AF_LINK_UP, &ha->flags) != 0) &&
+ (!test_bit(AF_LOOPBACK, &ha->flags));
+}
+
+static inline struct scsi_qla_host* to_qla_host(struct Scsi_Host *shost)
+{
+ return (struct scsi_qla_host *)iscsi_host_priv(shost);
+}
+
+static inline void __iomem* isp_semaphore(struct scsi_qla_host *ha)
+{
+ return (is_qla4010(ha) ?
+ &ha->reg->u1.isp4010.nvram :
+ &ha->reg->u1.isp4022.semaphore);
+}
+
+static inline void __iomem* isp_nvram(struct scsi_qla_host *ha)
+{
+ return (is_qla4010(ha) ?
+ &ha->reg->u1.isp4010.nvram :
+ &ha->reg->u1.isp4022.nvram);
+}
+
+static inline void __iomem* isp_ext_hw_conf(struct scsi_qla_host *ha)
+{
+ return (is_qla4010(ha) ?
+ &ha->reg->u2.isp4010.ext_hw_conf :
+ &ha->reg->u2.isp4022.p0.ext_hw_conf);
+}
+
+static inline void __iomem* isp_port_status(struct scsi_qla_host *ha)
+{
+ return (is_qla4010(ha) ?
+ &ha->reg->u2.isp4010.port_status :
+ &ha->reg->u2.isp4022.p0.port_status);
+}
+
+static inline void __iomem* isp_port_ctrl(struct scsi_qla_host *ha)
+{
+ return (is_qla4010(ha) ?
+ &ha->reg->u2.isp4010.port_ctrl :
+ &ha->reg->u2.isp4022.p0.port_ctrl);
+}
+
+static inline void __iomem* isp_port_error_status(struct scsi_qla_host *ha)
+{
+ return (is_qla4010(ha) ?
+ &ha->reg->u2.isp4010.port_err_status :
+ &ha->reg->u2.isp4022.p0.port_err_status);
+}
+
+static inline void __iomem * isp_gp_out(struct scsi_qla_host *ha)
+{
+ return (is_qla4010(ha) ?
+ &ha->reg->u2.isp4010.gp_out :
+ &ha->reg->u2.isp4022.p0.gp_out);
+}
+
+static inline int eeprom_ext_hw_conf_offset(struct scsi_qla_host *ha)
+{
+ return (is_qla4010(ha) ?
+ offsetof(struct eeprom_data, isp4010.ext_hw_conf) / 2 :
+ offsetof(struct eeprom_data, isp4022.ext_hw_conf) / 2);
+}
+
+int ql4xxx_sem_spinlock(struct scsi_qla_host * ha, u32 sem_mask, u32 sem_bits);
+void ql4xxx_sem_unlock(struct scsi_qla_host * ha, u32 sem_mask);
+int ql4xxx_sem_lock(struct scsi_qla_host * ha, u32 sem_mask, u32 sem_bits);
+
+static inline int ql4xxx_lock_flash(struct scsi_qla_host *a)
+{
+ if (is_qla4010(a))
+ return ql4xxx_sem_spinlock(a, QL4010_FLASH_SEM_MASK,
+ QL4010_FLASH_SEM_BITS);
+ else
+ return ql4xxx_sem_spinlock(a, QL4022_FLASH_SEM_MASK,
+ (QL4022_RESOURCE_BITS_BASE_CODE |
+ (a->mac_index)) << 13);
+}
+
+static inline void ql4xxx_unlock_flash(struct scsi_qla_host *a)
+{
+ if (is_qla4010(a))
+ ql4xxx_sem_unlock(a, QL4010_FLASH_SEM_MASK);
+ else
+ ql4xxx_sem_unlock(a, QL4022_FLASH_SEM_MASK);
+}
+
+static inline int ql4xxx_lock_nvram(struct scsi_qla_host *a)
+{
+ if (is_qla4010(a))
+ return ql4xxx_sem_spinlock(a, QL4010_NVRAM_SEM_MASK,
+ QL4010_NVRAM_SEM_BITS);
+ else
+ return ql4xxx_sem_spinlock(a, QL4022_NVRAM_SEM_MASK,
+ (QL4022_RESOURCE_BITS_BASE_CODE |
+ (a->mac_index)) << 10);
+}
+
+static inline void ql4xxx_unlock_nvram(struct scsi_qla_host *a)
+{
+ if (is_qla4010(a))
+ ql4xxx_sem_unlock(a, QL4010_NVRAM_SEM_MASK);
+ else
+ ql4xxx_sem_unlock(a, QL4022_NVRAM_SEM_MASK);
+}
+
+static inline int ql4xxx_lock_drvr(struct scsi_qla_host *a)
+{
+ if (is_qla4010(a))
+ return ql4xxx_sem_lock(a, QL4010_DRVR_SEM_MASK,
+ QL4010_DRVR_SEM_BITS);
+ else
+ return ql4xxx_sem_lock(a, QL4022_DRVR_SEM_MASK,
+ (QL4022_RESOURCE_BITS_BASE_CODE |
+ (a->mac_index)) << 1);
+}
+
+static inline void ql4xxx_unlock_drvr(struct scsi_qla_host *a)
+{
+ if (is_qla4010(a))
+ ql4xxx_sem_unlock(a, QL4010_DRVR_SEM_MASK);
+ else
+ ql4xxx_sem_unlock(a, QL4022_DRVR_SEM_MASK);
+}
+
+static inline int ql4xxx_reset_active(struct scsi_qla_host *ha)
+{
+ return test_bit(DPC_RESET_ACTIVE, &ha->dpc_flags) ||
+ test_bit(DPC_RESET_HA, &ha->dpc_flags) ||
+ test_bit(DPC_RETRY_RESET_HA, &ha->dpc_flags) ||
+ test_bit(DPC_RESET_HA_INTR, &ha->dpc_flags) ||
+ test_bit(DPC_RESET_HA_FW_CONTEXT, &ha->dpc_flags) ||
+ test_bit(DPC_HA_UNRECOVERABLE, &ha->dpc_flags);
+
+}
+
+static inline int qla4_8xxx_rd_direct(struct scsi_qla_host *ha,
+ const uint32_t crb_reg)
+{
+ return ha->isp_ops->rd_reg_direct(ha, ha->reg_tbl[crb_reg]);
+}
+
+static inline void qla4_8xxx_wr_direct(struct scsi_qla_host *ha,
+ const uint32_t crb_reg,
+ const uint32_t value)
+{
+ ha->isp_ops->wr_reg_direct(ha, ha->reg_tbl[crb_reg], value);
+}
+
+/*---------------------------------------------------------------------------*/
+
+/* Defines for qla4xxx_initialize_adapter() and qla4xxx_recover_adapter() */
+
+#define INIT_ADAPTER 0
+#define RESET_ADAPTER 1
+
+#define PRESERVE_DDB_LIST 0
+#define REBUILD_DDB_LIST 1
+
+/* Defines for process_aen() */
+#define PROCESS_ALL_AENS 0
+#define FLUSH_DDB_CHANGED_AENS 1
+
+/* Defines for udev events */
+#define QL4_UEVENT_CODE_FW_DUMP 0
+
+#endif /*_QLA4XXX_H */
diff --git a/drivers/scsi/qla4xxx/ql4_fw.h b/drivers/scsi/qla4xxx/ql4_fw.h
new file mode 100644
index 000000000..699575efc
--- /dev/null
+++ b/drivers/scsi/qla4xxx/ql4_fw.h
@@ -0,0 +1,1443 @@
+/*
+ * QLogic iSCSI HBA Driver
+ * Copyright (c) 2003-2013 QLogic Corporation
+ *
+ * See LICENSE.qla4xxx for copyright and licensing details.
+ */
+
+#ifndef _QLA4X_FW_H
+#define _QLA4X_FW_H
+
+
+#define MAX_PRST_DEV_DB_ENTRIES 64
+#define MIN_DISC_DEV_DB_ENTRY MAX_PRST_DEV_DB_ENTRIES
+#define MAX_DEV_DB_ENTRIES 512
+#define MAX_DEV_DB_ENTRIES_40XX 256
+
+/*************************************************************************
+ *
+ * ISP 4010 I/O Register Set Structure and Definitions
+ *
+ *************************************************************************/
+
+struct port_ctrl_stat_regs {
+ __le32 ext_hw_conf; /* 0x50 R/W */
+ __le32 rsrvd0; /* 0x54 */
+ __le32 port_ctrl; /* 0x58 */
+ __le32 port_status; /* 0x5c */
+ __le32 rsrvd1[32]; /* 0x60-0xdf */
+ __le32 gp_out; /* 0xe0 */
+ __le32 gp_in; /* 0xe4 */
+ __le32 rsrvd2[5]; /* 0xe8-0xfb */
+ __le32 port_err_status; /* 0xfc */
+};
+
+struct host_mem_cfg_regs {
+ __le32 rsrvd0[12]; /* 0x50-0x79 */
+ __le32 req_q_out; /* 0x80 */
+ __le32 rsrvd1[31]; /* 0x84-0xFF */
+};
+
+/*
+ * ISP 82xx I/O Register Set structure definitions.
+ */
+struct device_reg_82xx {
+ __le32 req_q_out; /* 0x0000 (R): Request Queue out-Pointer. */
+ __le32 reserve1[63]; /* Request Queue out-Pointer. (64 * 4) */
+ __le32 rsp_q_in; /* 0x0100 (R/W): Response Queue In-Pointer. */
+ __le32 reserve2[63]; /* Response Queue In-Pointer. */
+ __le32 rsp_q_out; /* 0x0200 (R/W): Response Queue Out-Pointer. */
+ __le32 reserve3[63]; /* Response Queue Out-Pointer. */
+
+ __le32 mailbox_in[8]; /* 0x0300 (R/W): Mail box In registers */
+ __le32 reserve4[24];
+ __le32 hint; /* 0x0380 (R/W): Host interrupt register */
+#define HINT_MBX_INT_PENDING BIT_0
+ __le32 reserve5[31];
+ __le32 mailbox_out[8]; /* 0x0400 (R): Mail box Out registers */
+ __le32 reserve6[56];
+
+ __le32 host_status; /* Offset 0x500 (R): host status */
+#define HSRX_RISC_MB_INT BIT_0 /* RISC to Host Mailbox interrupt */
+#define HSRX_RISC_IOCB_INT BIT_1 /* RISC to Host IOCB interrupt */
+
+ __le32 host_int; /* Offset 0x0504 (R/W): Interrupt status. */
+#define ISRX_82XX_RISC_INT BIT_0 /* RISC interrupt. */
+};
+
+/* ISP 83xx I/O Register Set structure */
+struct device_reg_83xx {
+ __le32 mailbox_in[16]; /* 0x0000 */
+ __le32 reserve1[496]; /* 0x0040 */
+ __le32 mailbox_out[16]; /* 0x0800 */
+ __le32 reserve2[496];
+ __le32 mbox_int; /* 0x1000 */
+ __le32 reserve3[63];
+ __le32 req_q_out; /* 0x1100 */
+ __le32 reserve4[63];
+
+ __le32 rsp_q_in; /* 0x1200 */
+ __le32 reserve5[1919];
+
+ __le32 req_q_in; /* 0x3000 */
+ __le32 reserve6[3];
+ __le32 iocb_int_mask; /* 0x3010 */
+ __le32 reserve7[3];
+ __le32 rsp_q_out; /* 0x3020 */
+ __le32 reserve8[3];
+ __le32 anonymousbuff; /* 0x3030 */
+ __le32 mb_int_mask; /* 0x3034 */
+
+ __le32 host_intr; /* 0x3038 - Host Interrupt Register */
+ __le32 risc_intr; /* 0x303C - RISC Interrupt Register */
+ __le32 reserve9[544];
+ __le32 leg_int_ptr; /* 0x38C0 - Legacy Interrupt Pointer Register */
+ __le32 leg_int_trig; /* 0x38C4 - Legacy Interrupt Trigger Control */
+ __le32 leg_int_mask; /* 0x38C8 - Legacy Interrupt Mask Register */
+};
+
+#define INT_ENABLE_FW_MB (1 << 2)
+#define INT_MASK_FW_MB (1 << 2)
+
+/* remote register set (access via PCI memory read/write) */
+struct isp_reg {
+#define MBOX_REG_COUNT 8
+ __le32 mailbox[MBOX_REG_COUNT];
+
+ __le32 flash_address; /* 0x20 */
+ __le32 flash_data;
+ __le32 ctrl_status;
+
+ union {
+ struct {
+ __le32 nvram;
+ __le32 reserved1[2]; /* 0x30 */
+ } __attribute__ ((packed)) isp4010;
+ struct {
+ __le32 intr_mask;
+ __le32 nvram; /* 0x30 */
+ __le32 semaphore;
+ } __attribute__ ((packed)) isp4022;
+ } u1;
+
+ __le32 req_q_in; /* SCSI Request Queue Producer Index */
+ __le32 rsp_q_out; /* SCSI Completion Queue Consumer Index */
+
+ __le32 reserved2[4]; /* 0x40 */
+
+ union {
+ struct {
+ __le32 ext_hw_conf; /* 0x50 */
+ __le32 flow_ctrl;
+ __le32 port_ctrl;
+ __le32 port_status;
+
+ __le32 reserved3[8]; /* 0x60 */
+
+ __le32 req_q_out; /* 0x80 */
+
+ __le32 reserved4[23]; /* 0x84 */
+
+ __le32 gp_out; /* 0xe0 */
+ __le32 gp_in;
+
+ __le32 reserved5[5];
+
+ __le32 port_err_status; /* 0xfc */
+ } __attribute__ ((packed)) isp4010;
+ struct {
+ union {
+ struct port_ctrl_stat_regs p0;
+ struct host_mem_cfg_regs p1;
+ };
+ } __attribute__ ((packed)) isp4022;
+ } u2;
+}; /* 256 x100 */
+
+
+/* Semaphore Defines for 4010 */
+#define QL4010_DRVR_SEM_BITS 0x00000030
+#define QL4010_GPIO_SEM_BITS 0x000000c0
+#define QL4010_SDRAM_SEM_BITS 0x00000300
+#define QL4010_PHY_SEM_BITS 0x00000c00
+#define QL4010_NVRAM_SEM_BITS 0x00003000
+#define QL4010_FLASH_SEM_BITS 0x0000c000
+
+#define QL4010_DRVR_SEM_MASK 0x00300000
+#define QL4010_GPIO_SEM_MASK 0x00c00000
+#define QL4010_SDRAM_SEM_MASK 0x03000000
+#define QL4010_PHY_SEM_MASK 0x0c000000
+#define QL4010_NVRAM_SEM_MASK 0x30000000
+#define QL4010_FLASH_SEM_MASK 0xc0000000
+
+/* Semaphore Defines for 4022 */
+#define QL4022_RESOURCE_MASK_BASE_CODE 0x7
+#define QL4022_RESOURCE_BITS_BASE_CODE 0x4
+
+
+#define QL4022_DRVR_SEM_MASK (QL4022_RESOURCE_MASK_BASE_CODE << (1+16))
+#define QL4022_DDR_RAM_SEM_MASK (QL4022_RESOURCE_MASK_BASE_CODE << (4+16))
+#define QL4022_PHY_GIO_SEM_MASK (QL4022_RESOURCE_MASK_BASE_CODE << (7+16))
+#define QL4022_NVRAM_SEM_MASK (QL4022_RESOURCE_MASK_BASE_CODE << (10+16))
+#define QL4022_FLASH_SEM_MASK (QL4022_RESOURCE_MASK_BASE_CODE << (13+16))
+
+/* nvram address for 4032 */
+#define NVRAM_PORT0_BOOT_MODE 0x03b1
+#define NVRAM_PORT0_BOOT_PRI_TGT 0x03b2
+#define NVRAM_PORT0_BOOT_SEC_TGT 0x03bb
+#define NVRAM_PORT1_BOOT_MODE 0x07b1
+#define NVRAM_PORT1_BOOT_PRI_TGT 0x07b2
+#define NVRAM_PORT1_BOOT_SEC_TGT 0x07bb
+
+
+/* Page # defines for 4022 */
+#define PORT_CTRL_STAT_PAGE 0 /* 4022 */
+#define HOST_MEM_CFG_PAGE 1 /* 4022 */
+#define LOCAL_RAM_CFG_PAGE 2 /* 4022 */
+#define PROT_STAT_PAGE 3 /* 4022 */
+
+/* Register Mask - sets corresponding mask bits in the upper word */
+static inline uint32_t set_rmask(uint32_t val)
+{
+ return (val & 0xffff) | (val << 16);
+}
+
+
+static inline uint32_t clr_rmask(uint32_t val)
+{
+ return 0 | (val << 16);
+}
+
+/* ctrl_status definitions */
+#define CSR_SCSI_PAGE_SELECT 0x00000003
+#define CSR_SCSI_INTR_ENABLE 0x00000004 /* 4010 */
+#define CSR_SCSI_RESET_INTR 0x00000008
+#define CSR_SCSI_COMPLETION_INTR 0x00000010
+#define CSR_SCSI_PROCESSOR_INTR 0x00000020
+#define CSR_INTR_RISC 0x00000040
+#define CSR_BOOT_ENABLE 0x00000080
+#define CSR_NET_PAGE_SELECT 0x00000300 /* 4010 */
+#define CSR_FUNC_NUM 0x00000700 /* 4022 */
+#define CSR_NET_RESET_INTR 0x00000800 /* 4010 */
+#define CSR_FORCE_SOFT_RESET 0x00002000 /* 4022 */
+#define CSR_FATAL_ERROR 0x00004000
+#define CSR_SOFT_RESET 0x00008000
+#define ISP_CONTROL_FN_MASK CSR_FUNC_NUM
+#define ISP_CONTROL_FN0_SCSI 0x0500
+#define ISP_CONTROL_FN1_SCSI 0x0700
+
+#define INTR_PENDING (CSR_SCSI_COMPLETION_INTR |\
+ CSR_SCSI_PROCESSOR_INTR |\
+ CSR_SCSI_RESET_INTR)
+
+/* ISP InterruptMask definitions */
+#define IMR_SCSI_INTR_ENABLE 0x00000004 /* 4022 */
+
+/* ISP 4022 nvram definitions */
+#define NVR_WRITE_ENABLE 0x00000010 /* 4022 */
+
+#define QL4010_NVRAM_SIZE 0x200
+#define QL40X2_NVRAM_SIZE 0x800
+
+/* ISP port_status definitions */
+
+/* ISP Semaphore definitions */
+
+/* ISP General Purpose Output definitions */
+#define GPOR_TOPCAT_RESET 0x00000004
+
+/* shadow registers (DMA'd from HA to system memory. read only) */
+struct shadow_regs {
+ /* SCSI Request Queue Consumer Index */
+ __le32 req_q_out; /* 0 x0 R */
+
+ /* SCSI Completion Queue Producer Index */
+ __le32 rsp_q_in; /* 4 x4 R */
+}; /* 8 x8 */
+
+
+/* External hardware configuration register */
+union external_hw_config_reg {
+ struct {
+ /* FIXME: Do we even need this? All values are
+ * referred to by 16 bit quantities. Platform and
+ * endianess issues. */
+ __le32 bReserved0:1;
+ __le32 bSDRAMProtectionMethod:2;
+ __le32 bSDRAMBanks:1;
+ __le32 bSDRAMChipWidth:1;
+ __le32 bSDRAMChipSize:2;
+ __le32 bParityDisable:1;
+ __le32 bExternalMemoryType:1;
+ __le32 bFlashBIOSWriteEnable:1;
+ __le32 bFlashUpperBankSelect:1;
+ __le32 bWriteBurst:2;
+ __le32 bReserved1:3;
+ __le32 bMask:16;
+ };
+ uint32_t Asuint32_t;
+};
+
+/* 82XX Support start */
+/* 82xx Default FLT Addresses */
+#define FA_FLASH_LAYOUT_ADDR_82 0xFC400
+#define FA_FLASH_DESCR_ADDR_82 0xFC000
+#define FA_BOOT_LOAD_ADDR_82 0x04000
+#define FA_BOOT_CODE_ADDR_82 0x20000
+#define FA_RISC_CODE_ADDR_82 0x40000
+#define FA_GOLD_RISC_CODE_ADDR_82 0x80000
+#define FA_FLASH_ISCSI_CHAP 0x540000
+#define FA_FLASH_CHAP_SIZE 0xC0000
+#define FA_FLASH_ISCSI_DDB 0x420000
+#define FA_FLASH_DDB_SIZE 0x080000
+
+/* Flash Description Table */
+struct qla_fdt_layout {
+ uint8_t sig[4];
+ uint16_t version;
+ uint16_t len;
+ uint16_t checksum;
+ uint8_t unused1[2];
+ uint8_t model[16];
+ uint16_t man_id;
+ uint16_t id;
+ uint8_t flags;
+ uint8_t erase_cmd;
+ uint8_t alt_erase_cmd;
+ uint8_t wrt_enable_cmd;
+ uint8_t wrt_enable_bits;
+ uint8_t wrt_sts_reg_cmd;
+ uint8_t unprotect_sec_cmd;
+ uint8_t read_man_id_cmd;
+ uint32_t block_size;
+ uint32_t alt_block_size;
+ uint32_t flash_size;
+ uint32_t wrt_enable_data;
+ uint8_t read_id_addr_len;
+ uint8_t wrt_disable_bits;
+ uint8_t read_dev_id_len;
+ uint8_t chip_erase_cmd;
+ uint16_t read_timeout;
+ uint8_t protect_sec_cmd;
+ uint8_t unused2[65];
+};
+
+/* Flash Layout Table */
+
+struct qla_flt_location {
+ uint8_t sig[4];
+ uint16_t start_lo;
+ uint16_t start_hi;
+ uint8_t version;
+ uint8_t unused[5];
+ uint16_t checksum;
+};
+
+struct qla_flt_header {
+ uint16_t version;
+ uint16_t length;
+ uint16_t checksum;
+ uint16_t unused;
+};
+
+/* 82xx FLT Regions */
+#define FLT_REG_FDT 0x1a
+#define FLT_REG_FLT 0x1c
+#define FLT_REG_BOOTLOAD_82 0x72
+#define FLT_REG_FW_82 0x74
+#define FLT_REG_FW_82_1 0x97
+#define FLT_REG_GOLD_FW_82 0x75
+#define FLT_REG_BOOT_CODE_82 0x78
+#define FLT_REG_ISCSI_PARAM 0x65
+#define FLT_REG_ISCSI_CHAP 0x63
+#define FLT_REG_ISCSI_DDB 0x6A
+
+struct qla_flt_region {
+ uint32_t code;
+ uint32_t size;
+ uint32_t start;
+ uint32_t end;
+};
+
+/*************************************************************************
+ *
+ * Mailbox Commands Structures and Definitions
+ *
+ *************************************************************************/
+
+/* Mailbox command definitions */
+#define MBOX_CMD_ABOUT_FW 0x0009
+#define MBOX_CMD_PING 0x000B
+#define PING_IPV6_PROTOCOL_ENABLE 0x1
+#define PING_IPV6_LINKLOCAL_ADDR 0x4
+#define PING_IPV6_ADDR0 0x8
+#define PING_IPV6_ADDR1 0xC
+#define MBOX_CMD_ENABLE_INTRS 0x0010
+#define INTR_DISABLE 0
+#define INTR_ENABLE 1
+#define MBOX_CMD_STOP_FW 0x0014
+#define MBOX_CMD_ABORT_TASK 0x0015
+#define MBOX_CMD_LUN_RESET 0x0016
+#define MBOX_CMD_TARGET_WARM_RESET 0x0017
+#define MBOX_CMD_GET_MANAGEMENT_DATA 0x001E
+#define MBOX_CMD_GET_FW_STATUS 0x001F
+#define MBOX_CMD_SET_ISNS_SERVICE 0x0021
+#define ISNS_DISABLE 0
+#define ISNS_ENABLE 1
+#define MBOX_CMD_COPY_FLASH 0x0024
+#define MBOX_CMD_WRITE_FLASH 0x0025
+#define MBOX_CMD_READ_FLASH 0x0026
+#define MBOX_CMD_CLEAR_DATABASE_ENTRY 0x0031
+#define MBOX_CMD_CONN_OPEN 0x0074
+#define MBOX_CMD_CONN_CLOSE_SESS_LOGOUT 0x0056
+#define DDB_NOT_LOGGED_IN 0x09
+#define LOGOUT_OPTION_CLOSE_SESSION 0x0002
+#define LOGOUT_OPTION_RELOGIN 0x0004
+#define LOGOUT_OPTION_FREE_DDB 0x0008
+#define MBOX_CMD_SET_PARAM 0x0059
+#define SET_DRVR_VERSION 0x200
+#define MAX_DRVR_VER_LEN 24
+#define MBOX_CMD_EXECUTE_IOCB_A64 0x005A
+#define MBOX_CMD_INITIALIZE_FIRMWARE 0x0060
+#define MBOX_CMD_GET_INIT_FW_CTRL_BLOCK 0x0061
+#define MBOX_CMD_REQUEST_DATABASE_ENTRY 0x0062
+#define MBOX_CMD_SET_DATABASE_ENTRY 0x0063
+#define MBOX_CMD_GET_DATABASE_ENTRY 0x0064
+#define DDB_DS_UNASSIGNED 0x00
+#define DDB_DS_NO_CONNECTION_ACTIVE 0x01
+#define DDB_DS_DISCOVERY 0x02
+#define DDB_DS_SESSION_ACTIVE 0x04
+#define DDB_DS_SESSION_FAILED 0x06
+#define DDB_DS_LOGIN_IN_PROCESS 0x07
+#define MBOX_CMD_GET_FW_STATE 0x0069
+#define MBOX_CMD_GET_INIT_FW_CTRL_BLOCK_DEFAULTS 0x006A
+#define MBOX_CMD_DIAG_TEST 0x0075
+#define MBOX_CMD_GET_SYS_INFO 0x0078
+#define MBOX_CMD_GET_NVRAM 0x0078 /* For 40xx */
+#define MBOX_CMD_SET_NVRAM 0x0079 /* For 40xx */
+#define MBOX_CMD_RESTORE_FACTORY_DEFAULTS 0x0087
+#define MBOX_CMD_SET_ACB 0x0088
+#define MBOX_CMD_GET_ACB 0x0089
+#define MBOX_CMD_DISABLE_ACB 0x008A
+#define MBOX_CMD_GET_IPV6_NEIGHBOR_CACHE 0x008B
+#define MBOX_CMD_GET_IPV6_DEST_CACHE 0x008C
+#define MBOX_CMD_GET_IPV6_DEF_ROUTER_LIST 0x008D
+#define MBOX_CMD_GET_IPV6_LCL_PREFIX_LIST 0x008E
+#define MBOX_CMD_SET_IPV6_NEIGHBOR_CACHE 0x0090
+#define MBOX_CMD_GET_IP_ADDR_STATE 0x0091
+#define MBOX_CMD_SEND_IPV6_ROUTER_SOL 0x0092
+#define MBOX_CMD_GET_DB_ENTRY_CURRENT_IP_ADDR 0x0093
+#define MBOX_CMD_SET_PORT_CONFIG 0x0122
+#define MBOX_CMD_GET_PORT_CONFIG 0x0123
+#define MBOX_CMD_SET_LED_CONFIG 0x0125
+#define MBOX_CMD_GET_LED_CONFIG 0x0126
+#define MBOX_CMD_MINIDUMP 0x0129
+
+/* Port Config */
+#define ENABLE_INTERNAL_LOOPBACK 0x04
+#define ENABLE_EXTERNAL_LOOPBACK 0x08
+#define ENABLE_DCBX 0x10
+
+/* Minidump subcommand */
+#define MINIDUMP_GET_SIZE_SUBCOMMAND 0x00
+#define MINIDUMP_GET_TMPLT_SUBCOMMAND 0x01
+
+/* Mailbox 1 */
+#define FW_STATE_READY 0x0000
+#define FW_STATE_CONFIG_WAIT 0x0001
+#define FW_STATE_WAIT_AUTOCONNECT 0x0002
+#define FW_STATE_ERROR 0x0004
+#define FW_STATE_CONFIGURING_IP 0x0008
+
+/* Mailbox 3 */
+#define FW_ADDSTATE_OPTICAL_MEDIA 0x0001
+#define FW_ADDSTATE_DHCPv4_ENABLED 0x0002
+#define FW_ADDSTATE_DHCPv4_LEASE_ACQUIRED 0x0004
+#define FW_ADDSTATE_DHCPv4_LEASE_EXPIRED 0x0008
+#define FW_ADDSTATE_LINK_UP 0x0010
+#define FW_ADDSTATE_ISNS_SVC_ENABLED 0x0020
+#define FW_ADDSTATE_LINK_SPEED_10MBPS 0x0100
+#define FW_ADDSTATE_LINK_SPEED_100MBPS 0x0200
+#define FW_ADDSTATE_LINK_SPEED_1GBPS 0x0400
+#define FW_ADDSTATE_LINK_SPEED_10GBPS 0x0800
+
+#define MBOX_CMD_GET_DATABASE_ENTRY_DEFAULTS 0x006B
+#define IPV6_DEFAULT_DDB_ENTRY 0x0001
+
+#define MBOX_CMD_CONN_OPEN_SESS_LOGIN 0x0074
+#define MBOX_CMD_GET_CRASH_RECORD 0x0076 /* 4010 only */
+#define MBOX_CMD_GET_CONN_EVENT_LOG 0x0077
+
+#define MBOX_CMD_IDC_ACK 0x0101
+#define MBOX_CMD_IDC_TIME_EXTEND 0x0102
+#define MBOX_CMD_PORT_RESET 0x0120
+#define MBOX_CMD_SET_PORT_CONFIG 0x0122
+
+/* Mailbox status definitions */
+#define MBOX_COMPLETION_STATUS 4
+#define MBOX_STS_BUSY 0x0007
+#define MBOX_STS_INTERMEDIATE_COMPLETION 0x1000
+#define MBOX_STS_COMMAND_COMPLETE 0x4000
+#define MBOX_STS_COMMAND_ERROR 0x4005
+
+#define MBOX_ASYNC_EVENT_STATUS 8
+#define MBOX_ASTS_SYSTEM_ERROR 0x8002
+#define MBOX_ASTS_REQUEST_TRANSFER_ERROR 0x8003
+#define MBOX_ASTS_RESPONSE_TRANSFER_ERROR 0x8004
+#define MBOX_ASTS_PROTOCOL_STATISTIC_ALARM 0x8005
+#define MBOX_ASTS_SCSI_COMMAND_PDU_REJECTED 0x8006
+#define MBOX_ASTS_LINK_UP 0x8010
+#define MBOX_ASTS_LINK_DOWN 0x8011
+#define MBOX_ASTS_DATABASE_CHANGED 0x8014
+#define MBOX_ASTS_UNSOLICITED_PDU_RECEIVED 0x8015
+#define MBOX_ASTS_SELF_TEST_FAILED 0x8016
+#define MBOX_ASTS_LOGIN_FAILED 0x8017
+#define MBOX_ASTS_DNS 0x8018
+#define MBOX_ASTS_HEARTBEAT 0x8019
+#define MBOX_ASTS_NVRAM_INVALID 0x801A
+#define MBOX_ASTS_MAC_ADDRESS_CHANGED 0x801B
+#define MBOX_ASTS_IP_ADDRESS_CHANGED 0x801C
+#define MBOX_ASTS_DHCP_LEASE_EXPIRED 0x801D
+#define MBOX_ASTS_DHCP_LEASE_ACQUIRED 0x801F
+#define MBOX_ASTS_ISNS_UNSOLICITED_PDU_RECEIVED 0x8021
+#define MBOX_ASTS_DUPLICATE_IP 0x8025
+#define MBOX_ASTS_ARP_COMPLETE 0x8026
+#define MBOX_ASTS_SUBNET_STATE_CHANGE 0x8027
+#define MBOX_ASTS_RESPONSE_QUEUE_FULL 0x8028
+#define MBOX_ASTS_IP_ADDR_STATE_CHANGED 0x8029
+#define MBOX_ASTS_IPV6_DEFAULT_ROUTER_CHANGED 0x802A
+#define MBOX_ASTS_IPV6_LINK_MTU_CHANGE 0x802B
+#define MBOX_ASTS_IPV6_AUTO_PREFIX_IGNORED 0x802C
+#define MBOX_ASTS_IPV6_ND_LOCAL_PREFIX_IGNORED 0x802D
+#define MBOX_ASTS_ICMPV6_ERROR_MSG_RCVD 0x802E
+#define MBOX_ASTS_INITIALIZATION_FAILED 0x8031
+#define MBOX_ASTS_SYSTEM_WARNING_EVENT 0x8036
+#define MBOX_ASTS_IDC_COMPLETE 0x8100
+#define MBOX_ASTS_IDC_REQUEST_NOTIFICATION 0x8101
+#define MBOX_ASTS_IDC_TIME_EXTEND_NOTIFICATION 0x8102
+#define MBOX_ASTS_DCBX_CONF_CHANGE 0x8110
+#define MBOX_ASTS_TXSCVR_INSERTED 0x8130
+#define MBOX_ASTS_TXSCVR_REMOVED 0x8131
+
+#define ISNS_EVENT_DATA_RECEIVED 0x0000
+#define ISNS_EVENT_CONNECTION_OPENED 0x0001
+#define ISNS_EVENT_CONNECTION_FAILED 0x0002
+#define MBOX_ASTS_IPSEC_SYSTEM_FATAL_ERROR 0x8022
+#define MBOX_ASTS_SUBNET_STATE_CHANGE 0x8027
+
+/* ACB Configuration Defines */
+#define ACB_CONFIG_DISABLE 0x00
+#define ACB_CONFIG_SET 0x01
+
+/* ACB/IP Address State Defines */
+#define IP_ADDRSTATE_UNCONFIGURED 0
+#define IP_ADDRSTATE_INVALID 1
+#define IP_ADDRSTATE_ACQUIRING 2
+#define IP_ADDRSTATE_TENTATIVE 3
+#define IP_ADDRSTATE_DEPRICATED 4
+#define IP_ADDRSTATE_PREFERRED 5
+#define IP_ADDRSTATE_DISABLING 6
+
+/* FLASH offsets */
+#define FLASH_SEGMENT_IFCB 0x04000000
+
+#define FLASH_OPT_RMW_HOLD 0
+#define FLASH_OPT_RMW_INIT 1
+#define FLASH_OPT_COMMIT 2
+#define FLASH_OPT_RMW_COMMIT 3
+
+/* generic defines to enable/disable params */
+#define QL4_PARAM_DISABLE 0
+#define QL4_PARAM_ENABLE 1
+
+/*************************************************************************/
+
+/* Host Adapter Initialization Control Block (from host) */
+struct addr_ctrl_blk {
+ uint8_t version; /* 00 */
+#define IFCB_VER_MIN 0x01
+#define IFCB_VER_MAX 0x02
+ uint8_t control; /* 01 */
+#define CTRLOPT_NEW_CONN_DISABLE 0x0002
+
+ uint16_t fw_options; /* 02-03 */
+#define FWOPT_HEARTBEAT_ENABLE 0x1000
+#define FWOPT_SESSION_MODE 0x0040
+#define FWOPT_INITIATOR_MODE 0x0020
+#define FWOPT_TARGET_MODE 0x0010
+#define FWOPT_ENABLE_CRBDB 0x8000
+
+ uint16_t exec_throttle; /* 04-05 */
+ uint8_t zio_count; /* 06 */
+ uint8_t res0; /* 07 */
+ uint16_t eth_mtu_size; /* 08-09 */
+ uint16_t add_fw_options; /* 0A-0B */
+#define ADFWOPT_SERIALIZE_TASK_MGMT 0x0400
+#define ADFWOPT_AUTOCONN_DISABLE 0x0002
+
+ uint8_t hb_interval; /* 0C */
+ uint8_t inst_num; /* 0D */
+ uint16_t res1; /* 0E-0F */
+ uint16_t rqq_consumer_idx; /* 10-11 */
+ uint16_t compq_producer_idx; /* 12-13 */
+ uint16_t rqq_len; /* 14-15 */
+ uint16_t compq_len; /* 16-17 */
+ uint32_t rqq_addr_lo; /* 18-1B */
+ uint32_t rqq_addr_hi; /* 1C-1F */
+ uint32_t compq_addr_lo; /* 20-23 */
+ uint32_t compq_addr_hi; /* 24-27 */
+ uint32_t shdwreg_addr_lo; /* 28-2B */
+ uint32_t shdwreg_addr_hi; /* 2C-2F */
+
+ uint16_t iscsi_opts; /* 30-31 */
+#define ISCSIOPTS_HEADER_DIGEST_EN 0x2000
+#define ISCSIOPTS_DATA_DIGEST_EN 0x1000
+#define ISCSIOPTS_IMMEDIATE_DATA_EN 0x0800
+#define ISCSIOPTS_INITIAL_R2T_EN 0x0400
+#define ISCSIOPTS_DATA_SEQ_INORDER_EN 0x0200
+#define ISCSIOPTS_DATA_PDU_INORDER_EN 0x0100
+#define ISCSIOPTS_CHAP_AUTH_EN 0x0080
+#define ISCSIOPTS_SNACK_EN 0x0040
+#define ISCSIOPTS_DISCOVERY_LOGOUT_EN 0x0020
+#define ISCSIOPTS_BIDI_CHAP_EN 0x0010
+#define ISCSIOPTS_DISCOVERY_AUTH_EN 0x0008
+#define ISCSIOPTS_STRICT_LOGIN_COMP_EN 0x0004
+#define ISCSIOPTS_ERL 0x0003
+ uint16_t ipv4_tcp_opts; /* 32-33 */
+#define TCPOPT_DELAYED_ACK_DISABLE 0x8000
+#define TCPOPT_DHCP_ENABLE 0x0200
+#define TCPOPT_DNS_SERVER_IP_EN 0x0100
+#define TCPOPT_SLP_DA_INFO_EN 0x0080
+#define TCPOPT_NAGLE_ALGO_DISABLE 0x0020
+#define TCPOPT_WINDOW_SCALE_DISABLE 0x0010
+#define TCPOPT_TIMER_SCALE 0x000E
+#define TCPOPT_TIMESTAMP_ENABLE 0x0001
+ uint16_t ipv4_ip_opts; /* 34-35 */
+#define IPOPT_IPV4_PROTOCOL_ENABLE 0x8000
+#define IPOPT_IPV4_TOS_EN 0x4000
+#define IPOPT_VLAN_TAGGING_ENABLE 0x2000
+#define IPOPT_GRAT_ARP_EN 0x1000
+#define IPOPT_ALT_CID_EN 0x0800
+#define IPOPT_REQ_VID_EN 0x0400
+#define IPOPT_USE_VID_EN 0x0200
+#define IPOPT_LEARN_IQN_EN 0x0100
+#define IPOPT_FRAGMENTATION_DISABLE 0x0010
+#define IPOPT_IN_FORWARD_EN 0x0008
+#define IPOPT_ARP_REDIRECT_EN 0x0004
+
+ uint16_t iscsi_max_pdu_size; /* 36-37 */
+ uint8_t ipv4_tos; /* 38 */
+ uint8_t ipv4_ttl; /* 39 */
+ uint8_t acb_version; /* 3A */
+#define ACB_NOT_SUPPORTED 0x00
+#define ACB_SUPPORTED 0x02 /* Capable of ACB Version 2
+ Features */
+
+ uint8_t res2; /* 3B */
+ uint16_t def_timeout; /* 3C-3D */
+ uint16_t iscsi_fburst_len; /* 3E-3F */
+ uint16_t iscsi_def_time2wait; /* 40-41 */
+ uint16_t iscsi_def_time2retain; /* 42-43 */
+ uint16_t iscsi_max_outstnd_r2t; /* 44-45 */
+ uint16_t conn_ka_timeout; /* 46-47 */
+ uint16_t ipv4_port; /* 48-49 */
+ uint16_t iscsi_max_burst_len; /* 4A-4B */
+ uint32_t res5; /* 4C-4F */
+ uint8_t ipv4_addr[4]; /* 50-53 */
+ uint16_t ipv4_vlan_tag; /* 54-55 */
+ uint8_t ipv4_addr_state; /* 56 */
+ uint8_t ipv4_cacheid; /* 57 */
+ uint8_t res6[8]; /* 58-5F */
+ uint8_t ipv4_subnet[4]; /* 60-63 */
+ uint8_t res7[12]; /* 64-6F */
+ uint8_t ipv4_gw_addr[4]; /* 70-73 */
+ uint8_t res8[0xc]; /* 74-7F */
+ uint8_t pri_dns_srvr_ip[4];/* 80-83 */
+ uint8_t sec_dns_srvr_ip[4];/* 84-87 */
+ uint16_t min_eph_port; /* 88-89 */
+ uint16_t max_eph_port; /* 8A-8B */
+ uint8_t res9[4]; /* 8C-8F */
+ uint8_t iscsi_alias[32];/* 90-AF */
+ uint8_t res9_1[0x16]; /* B0-C5 */
+ uint16_t tgt_portal_grp;/* C6-C7 */
+ uint8_t abort_timer; /* C8 */
+ uint8_t ipv4_tcp_wsf; /* C9 */
+ uint8_t res10[6]; /* CA-CF */
+ uint8_t ipv4_sec_ip_addr[4]; /* D0-D3 */
+ uint8_t ipv4_dhcp_vid_len; /* D4 */
+ uint8_t ipv4_dhcp_vid[11]; /* D5-DF */
+ uint8_t res11[20]; /* E0-F3 */
+ uint8_t ipv4_dhcp_alt_cid_len; /* F4 */
+ uint8_t ipv4_dhcp_alt_cid[11]; /* F5-FF */
+ uint8_t iscsi_name[224]; /* 100-1DF */
+ uint8_t res12[32]; /* 1E0-1FF */
+ uint32_t cookie; /* 200-203 */
+ uint16_t ipv6_port; /* 204-205 */
+ uint16_t ipv6_opts; /* 206-207 */
+#define IPV6_OPT_IPV6_PROTOCOL_ENABLE 0x8000
+#define IPV6_OPT_VLAN_TAGGING_ENABLE 0x2000
+#define IPV6_OPT_GRAT_NEIGHBOR_ADV_EN 0x1000
+#define IPV6_OPT_REDIRECT_EN 0x0004
+
+ uint16_t ipv6_addtl_opts; /* 208-209 */
+#define IPV6_ADDOPT_IGNORE_ICMP_ECHO_REQ 0x0040
+#define IPV6_ADDOPT_MLD_EN 0x0004
+#define IPV6_ADDOPT_NEIGHBOR_DISCOVERY_ADDR_ENABLE 0x0002 /* Pri ACB
+ Only */
+#define IPV6_ADDOPT_AUTOCONFIG_LINK_LOCAL_ADDR 0x0001
+
+ uint16_t ipv6_tcp_opts; /* 20A-20B */
+#define IPV6_TCPOPT_DELAYED_ACK_DISABLE 0x8000
+#define IPV6_TCPOPT_NAGLE_ALGO_DISABLE 0x0020
+#define IPV6_TCPOPT_WINDOW_SCALE_DISABLE 0x0010
+#define IPV6_TCPOPT_TIMER_SCALE 0x000E
+#define IPV6_TCPOPT_TIMESTAMP_EN 0x0001
+ uint8_t ipv6_tcp_wsf; /* 20C */
+ uint16_t ipv6_flow_lbl; /* 20D-20F */
+ uint8_t ipv6_dflt_rtr_addr[16]; /* 210-21F */
+ uint16_t ipv6_vlan_tag; /* 220-221 */
+ uint8_t ipv6_lnk_lcl_addr_state;/* 222 */
+ uint8_t ipv6_addr0_state; /* 223 */
+ uint8_t ipv6_addr1_state; /* 224 */
+ uint8_t ipv6_dflt_rtr_state; /* 225 */
+#define IPV6_RTRSTATE_UNKNOWN 0
+#define IPV6_RTRSTATE_MANUAL 1
+#define IPV6_RTRSTATE_ADVERTISED 3
+#define IPV6_RTRSTATE_STALE 4
+
+ uint8_t ipv6_traffic_class; /* 226 */
+ uint8_t ipv6_hop_limit; /* 227 */
+ uint8_t ipv6_if_id[8]; /* 228-22F */
+ uint8_t ipv6_addr0[16]; /* 230-23F */
+ uint8_t ipv6_addr1[16]; /* 240-24F */
+ uint32_t ipv6_nd_reach_time; /* 250-253 */
+ uint32_t ipv6_nd_rexmit_timer; /* 254-257 */
+ uint32_t ipv6_nd_stale_timeout; /* 258-25B */
+ uint8_t ipv6_dup_addr_detect_count; /* 25C */
+ uint8_t ipv6_cache_id; /* 25D */
+ uint8_t res13[18]; /* 25E-26F */
+ uint32_t ipv6_gw_advrt_mtu; /* 270-273 */
+ uint8_t res14[140]; /* 274-2FF */
+};
+
+#define IP_ADDR_COUNT 4 /* Total 4 IP address supported in one interface
+ * One IPv4, one IPv6 link local and 2 IPv6
+ */
+
+#define IP_STATE_MASK 0x0F000000
+#define IP_STATE_SHIFT 24
+
+struct init_fw_ctrl_blk {
+ struct addr_ctrl_blk pri;
+/* struct addr_ctrl_blk sec;*/
+};
+
+#define PRIMARI_ACB 0
+#define SECONDARY_ACB 1
+
+struct addr_ctrl_blk_def {
+ uint8_t reserved1[1]; /* 00 */
+ uint8_t control; /* 01 */
+ uint8_t reserved2[11]; /* 02-0C */
+ uint8_t inst_num; /* 0D */
+ uint8_t reserved3[34]; /* 0E-2F */
+ uint16_t iscsi_opts; /* 30-31 */
+ uint16_t ipv4_tcp_opts; /* 32-33 */
+ uint16_t ipv4_ip_opts; /* 34-35 */
+ uint16_t iscsi_max_pdu_size; /* 36-37 */
+ uint8_t ipv4_tos; /* 38 */
+ uint8_t ipv4_ttl; /* 39 */
+ uint8_t reserved4[2]; /* 3A-3B */
+ uint16_t def_timeout; /* 3C-3D */
+ uint16_t iscsi_fburst_len; /* 3E-3F */
+ uint8_t reserved5[4]; /* 40-43 */
+ uint16_t iscsi_max_outstnd_r2t; /* 44-45 */
+ uint8_t reserved6[2]; /* 46-47 */
+ uint16_t ipv4_port; /* 48-49 */
+ uint16_t iscsi_max_burst_len; /* 4A-4B */
+ uint8_t reserved7[4]; /* 4C-4F */
+ uint8_t ipv4_addr[4]; /* 50-53 */
+ uint16_t ipv4_vlan_tag; /* 54-55 */
+ uint8_t ipv4_addr_state; /* 56 */
+ uint8_t ipv4_cacheid; /* 57 */
+ uint8_t reserved8[8]; /* 58-5F */
+ uint8_t ipv4_subnet[4]; /* 60-63 */
+ uint8_t reserved9[12]; /* 64-6F */
+ uint8_t ipv4_gw_addr[4]; /* 70-73 */
+ uint8_t reserved10[84]; /* 74-C7 */
+ uint8_t abort_timer; /* C8 */
+ uint8_t ipv4_tcp_wsf; /* C9 */
+ uint8_t reserved11[10]; /* CA-D3 */
+ uint8_t ipv4_dhcp_vid_len; /* D4 */
+ uint8_t ipv4_dhcp_vid[11]; /* D5-DF */
+ uint8_t reserved12[20]; /* E0-F3 */
+ uint8_t ipv4_dhcp_alt_cid_len; /* F4 */
+ uint8_t ipv4_dhcp_alt_cid[11]; /* F5-FF */
+ uint8_t iscsi_name[224]; /* 100-1DF */
+ uint8_t reserved13[32]; /* 1E0-1FF */
+ uint32_t cookie; /* 200-203 */
+ uint16_t ipv6_port; /* 204-205 */
+ uint16_t ipv6_opts; /* 206-207 */
+ uint16_t ipv6_addtl_opts; /* 208-209 */
+ uint16_t ipv6_tcp_opts; /* 20A-20B */
+ uint8_t ipv6_tcp_wsf; /* 20C */
+ uint16_t ipv6_flow_lbl; /* 20D-20F */
+ uint8_t ipv6_dflt_rtr_addr[16]; /* 210-21F */
+ uint16_t ipv6_vlan_tag; /* 220-221 */
+ uint8_t ipv6_lnk_lcl_addr_state; /* 222 */
+ uint8_t ipv6_addr0_state; /* 223 */
+ uint8_t ipv6_addr1_state; /* 224 */
+ uint8_t ipv6_dflt_rtr_state; /* 225 */
+ uint8_t ipv6_traffic_class; /* 226 */
+ uint8_t ipv6_hop_limit; /* 227 */
+ uint8_t ipv6_if_id[8]; /* 228-22F */
+ uint8_t ipv6_addr0[16]; /* 230-23F */
+ uint8_t ipv6_addr1[16]; /* 240-24F */
+ uint32_t ipv6_nd_reach_time; /* 250-253 */
+ uint32_t ipv6_nd_rexmit_timer; /* 254-257 */
+ uint32_t ipv6_nd_stale_timeout; /* 258-25B */
+ uint8_t ipv6_dup_addr_detect_count; /* 25C */
+ uint8_t ipv6_cache_id; /* 25D */
+ uint8_t reserved14[18]; /* 25E-26F */
+ uint32_t ipv6_gw_advrt_mtu; /* 270-273 */
+ uint8_t reserved15[140]; /* 274-2FF */
+};
+
+/*************************************************************************/
+
+#define MAX_CHAP_ENTRIES_40XX 128
+#define MAX_CHAP_ENTRIES_82XX 1024
+#define MAX_RESRV_CHAP_IDX 3
+#define FLASH_CHAP_OFFSET 0x06000000
+
+struct ql4_chap_table {
+ uint16_t link;
+ uint8_t flags;
+ uint8_t secret_len;
+#define MIN_CHAP_SECRET_LEN 12
+#define MAX_CHAP_SECRET_LEN 100
+ uint8_t secret[MAX_CHAP_SECRET_LEN];
+#define MAX_CHAP_NAME_LEN 256
+ uint8_t name[MAX_CHAP_NAME_LEN];
+ uint16_t reserved;
+#define CHAP_VALID_COOKIE 0x4092
+#define CHAP_INVALID_COOKIE 0xFFEE
+ uint16_t cookie;
+};
+
+struct dev_db_entry {
+ uint16_t options; /* 00-01 */
+#define DDB_OPT_DISC_SESSION 0x10
+#define DDB_OPT_TARGET 0x02 /* device is a target */
+#define DDB_OPT_IPV6_DEVICE 0x100
+#define DDB_OPT_AUTO_SENDTGTS_DISABLE 0x40
+#define DDB_OPT_IPV6_NULL_LINK_LOCAL 0x800 /* post connection */
+#define DDB_OPT_IPV6_FW_DEFINED_LINK_LOCAL 0x800 /* pre connection */
+
+#define OPT_IS_FW_ASSIGNED_IPV6 11
+#define OPT_IPV6_DEVICE 8
+#define OPT_AUTO_SENDTGTS_DISABLE 6
+#define OPT_DISC_SESSION 4
+#define OPT_ENTRY_STATE 3
+ uint16_t exec_throttle; /* 02-03 */
+ uint16_t exec_count; /* 04-05 */
+ uint16_t res0; /* 06-07 */
+ uint16_t iscsi_options; /* 08-09 */
+#define ISCSIOPT_HEADER_DIGEST_EN 13
+#define ISCSIOPT_DATA_DIGEST_EN 12
+#define ISCSIOPT_IMMEDIATE_DATA_EN 11
+#define ISCSIOPT_INITIAL_R2T_EN 10
+#define ISCSIOPT_DATA_SEQ_IN_ORDER 9
+#define ISCSIOPT_DATA_PDU_IN_ORDER 8
+#define ISCSIOPT_CHAP_AUTH_EN 7
+#define ISCSIOPT_SNACK_REQ_EN 6
+#define ISCSIOPT_DISCOVERY_LOGOUT_EN 5
+#define ISCSIOPT_BIDI_CHAP_EN 4
+#define ISCSIOPT_DISCOVERY_AUTH_OPTIONAL 3
+#define ISCSIOPT_ERL1 1
+#define ISCSIOPT_ERL0 0
+
+ uint16_t tcp_options; /* 0A-0B */
+#define TCPOPT_TIMESTAMP_STAT 6
+#define TCPOPT_NAGLE_DISABLE 5
+#define TCPOPT_WSF_DISABLE 4
+#define TCPOPT_TIMER_SCALE3 3
+#define TCPOPT_TIMER_SCALE2 2
+#define TCPOPT_TIMER_SCALE1 1
+#define TCPOPT_TIMESTAMP_EN 0
+
+ uint16_t ip_options; /* 0C-0D */
+#define IPOPT_FRAGMENT_DISABLE 4
+
+ uint16_t iscsi_max_rcv_data_seg_len; /* 0E-0F */
+#define BYTE_UNITS 512
+ uint32_t res1; /* 10-13 */
+ uint16_t iscsi_max_snd_data_seg_len; /* 14-15 */
+ uint16_t iscsi_first_burst_len; /* 16-17 */
+ uint16_t iscsi_def_time2wait; /* 18-19 */
+ uint16_t iscsi_def_time2retain; /* 1A-1B */
+ uint16_t iscsi_max_outsnd_r2t; /* 1C-1D */
+ uint16_t ka_timeout; /* 1E-1F */
+ uint8_t isid[6]; /* 20-25 big-endian, must be converted
+ * to little-endian */
+ uint16_t tsid; /* 26-27 */
+ uint16_t port; /* 28-29 */
+ uint16_t iscsi_max_burst_len; /* 2A-2B */
+ uint16_t def_timeout; /* 2C-2D */
+ uint16_t res2; /* 2E-2F */
+ uint8_t ip_addr[0x10]; /* 30-3F */
+ uint8_t iscsi_alias[0x20]; /* 40-5F */
+ uint8_t tgt_addr[0x20]; /* 60-7F */
+ uint16_t mss; /* 80-81 */
+ uint16_t res3; /* 82-83 */
+ uint16_t lcl_port; /* 84-85 */
+ uint8_t ipv4_tos; /* 86 */
+ uint16_t ipv6_flow_lbl; /* 87-89 */
+ uint8_t res4[0x36]; /* 8A-BF */
+ uint8_t iscsi_name[0xE0]; /* C0-19F : xxzzy Make this a
+ * pointer to a string so we
+ * don't have to reserve so
+ * much RAM */
+ uint8_t link_local_ipv6_addr[0x10]; /* 1A0-1AF */
+ uint8_t res5[0x10]; /* 1B0-1BF */
+#define DDB_NO_LINK 0xFFFF
+#define DDB_ISNS 0xFFFD
+ uint16_t ddb_link; /* 1C0-1C1 */
+ uint16_t chap_tbl_idx; /* 1C2-1C3 */
+ uint16_t tgt_portal_grp; /* 1C4-1C5 */
+ uint8_t tcp_xmt_wsf; /* 1C6 */
+ uint8_t tcp_rcv_wsf; /* 1C7 */
+ uint32_t stat_sn; /* 1C8-1CB */
+ uint32_t exp_stat_sn; /* 1CC-1CF */
+ uint8_t res6[0x2b]; /* 1D0-1FB */
+#define DDB_VALID_COOKIE 0x9034
+ uint16_t cookie; /* 1FC-1FD */
+ uint16_t len; /* 1FE-1FF */
+};
+
+/*************************************************************************/
+
+/* Flash definitions */
+
+#define FLASH_OFFSET_SYS_INFO 0x02000000
+#define FLASH_DEFAULTBLOCKSIZE 0x20000
+#define FLASH_EOF_OFFSET (FLASH_DEFAULTBLOCKSIZE-8) /* 4 bytes
+ * for EOF
+ * signature */
+#define FLASH_RAW_ACCESS_ADDR 0x8e000000
+
+#define BOOT_PARAM_OFFSET_PORT0 0x3b0
+#define BOOT_PARAM_OFFSET_PORT1 0x7b0
+
+#define FLASH_OFFSET_DB_INFO 0x05000000
+#define FLASH_OFFSET_DB_END (FLASH_OFFSET_DB_INFO + 0x7fff)
+
+
+struct sys_info_phys_addr {
+ uint8_t address[6]; /* 00-05 */
+ uint8_t filler[2]; /* 06-07 */
+};
+
+struct flash_sys_info {
+ uint32_t cookie; /* 00-03 */
+ uint32_t physAddrCount; /* 04-07 */
+ struct sys_info_phys_addr physAddr[4]; /* 08-27 */
+ uint8_t vendorId[128]; /* 28-A7 */
+ uint8_t productId[128]; /* A8-127 */
+ uint32_t serialNumber; /* 128-12B */
+
+ /* PCI Configuration values */
+ uint32_t pciDeviceVendor; /* 12C-12F */
+ uint32_t pciDeviceId; /* 130-133 */
+ uint32_t pciSubsysVendor; /* 134-137 */
+ uint32_t pciSubsysId; /* 138-13B */
+
+ /* This validates version 1. */
+ uint32_t crumbs; /* 13C-13F */
+
+ uint32_t enterpriseNumber; /* 140-143 */
+
+ uint32_t mtu; /* 144-147 */
+ uint32_t reserved0; /* 148-14b */
+ uint32_t crumbs2; /* 14c-14f */
+ uint8_t acSerialNumber[16]; /* 150-15f */
+ uint32_t crumbs3; /* 160-16f */
+
+ /* Leave this last in the struct so it is declared invalid if
+ * any new items are added.
+ */
+ uint32_t reserved1[39]; /* 170-1ff */
+}; /* 200 */
+
+struct mbx_sys_info {
+ uint8_t board_id_str[16]; /* 0-f Keep board ID string first */
+ /* in this structure for GUI. */
+ uint16_t board_id; /* 10-11 board ID code */
+ uint16_t phys_port_cnt; /* 12-13 number of physical network ports */
+ uint16_t port_num; /* 14-15 network port for this PCI function */
+ /* (port 0 is first port) */
+ uint8_t mac_addr[6]; /* 16-1b MAC address for this PCI function */
+ uint32_t iscsi_pci_func_cnt; /* 1c-1f number of iSCSI PCI functions */
+ uint32_t pci_func; /* 20-23 this PCI function */
+ unsigned char serial_number[16]; /* 24-33 serial number string */
+ uint8_t reserved[12]; /* 34-3f */
+};
+
+struct about_fw_info {
+ uint16_t fw_major; /* 00 - 01 */
+ uint16_t fw_minor; /* 02 - 03 */
+ uint16_t fw_patch; /* 04 - 05 */
+ uint16_t fw_build; /* 06 - 07 */
+ uint8_t fw_build_date[16]; /* 08 - 17 ASCII String */
+ uint8_t fw_build_time[16]; /* 18 - 27 ASCII String */
+ uint8_t fw_build_user[16]; /* 28 - 37 ASCII String */
+ uint16_t fw_load_source; /* 38 - 39 */
+ /* 1 = Flash Primary,
+ 2 = Flash Secondary,
+ 3 = Host Download
+ */
+ uint8_t reserved1[6]; /* 3A - 3F */
+ uint16_t iscsi_major; /* 40 - 41 */
+ uint16_t iscsi_minor; /* 42 - 43 */
+ uint16_t bootload_major; /* 44 - 45 */
+ uint16_t bootload_minor; /* 46 - 47 */
+ uint16_t bootload_patch; /* 48 - 49 */
+ uint16_t bootload_build; /* 4A - 4B */
+ uint8_t extended_timestamp[180];/* 4C - FF */
+};
+
+struct crash_record {
+ uint16_t fw_major_version; /* 00 - 01 */
+ uint16_t fw_minor_version; /* 02 - 03 */
+ uint16_t fw_patch_version; /* 04 - 05 */
+ uint16_t fw_build_version; /* 06 - 07 */
+
+ uint8_t build_date[16]; /* 08 - 17 */
+ uint8_t build_time[16]; /* 18 - 27 */
+ uint8_t build_user[16]; /* 28 - 37 */
+ uint8_t card_serial_num[16]; /* 38 - 47 */
+
+ uint32_t time_of_crash_in_secs; /* 48 - 4B */
+ uint32_t time_of_crash_in_ms; /* 4C - 4F */
+
+ uint16_t out_RISC_sd_num_frames; /* 50 - 51 */
+ uint16_t OAP_sd_num_words; /* 52 - 53 */
+ uint16_t IAP_sd_num_frames; /* 54 - 55 */
+ uint16_t in_RISC_sd_num_words; /* 56 - 57 */
+
+ uint8_t reserved1[28]; /* 58 - 7F */
+
+ uint8_t out_RISC_reg_dump[256]; /* 80 -17F */
+ uint8_t in_RISC_reg_dump[256]; /*180 -27F */
+ uint8_t in_out_RISC_stack_dump[0]; /*280 - ??? */
+};
+
+struct conn_event_log_entry {
+#define MAX_CONN_EVENT_LOG_ENTRIES 100
+ uint32_t timestamp_sec; /* 00 - 03 seconds since boot */
+ uint32_t timestamp_ms; /* 04 - 07 milliseconds since boot */
+ uint16_t device_index; /* 08 - 09 */
+ uint16_t fw_conn_state; /* 0A - 0B */
+ uint8_t event_type; /* 0C - 0C */
+ uint8_t error_code; /* 0D - 0D */
+ uint16_t error_code_detail; /* 0E - 0F */
+ uint8_t num_consecutive_events; /* 10 - 10 */
+ uint8_t rsvd[3]; /* 11 - 13 */
+};
+
+/*************************************************************************
+ *
+ * IOCB Commands Structures and Definitions
+ *
+ *************************************************************************/
+#define IOCB_MAX_CDB_LEN 16 /* Bytes in a CBD */
+#define IOCB_MAX_SENSEDATA_LEN 32 /* Bytes of sense data */
+#define IOCB_MAX_EXT_SENSEDATA_LEN 60 /* Bytes of extended sense data */
+
+/* IOCB header structure */
+struct qla4_header {
+ uint8_t entryType;
+#define ET_STATUS 0x03
+#define ET_MARKER 0x04
+#define ET_CONT_T1 0x0A
+#define ET_STATUS_CONTINUATION 0x10
+#define ET_CMND_T3 0x19
+#define ET_PASSTHRU0 0x3A
+#define ET_PASSTHRU_STATUS 0x3C
+#define ET_MBOX_CMD 0x38
+#define ET_MBOX_STATUS 0x39
+
+ uint8_t entryStatus;
+ uint8_t systemDefined;
+#define SD_ISCSI_PDU 0x01
+ uint8_t entryCount;
+
+ /* SyetemDefined definition */
+};
+
+/* Generic queue entry structure*/
+struct queue_entry {
+ uint8_t data[60];
+ uint32_t signature;
+
+};
+
+/* 64 bit addressing segment counts*/
+
+#define COMMAND_SEG_A64 1
+#define CONTINUE_SEG_A64 5
+
+/* 64 bit addressing segment definition*/
+
+struct data_seg_a64 {
+ struct {
+ uint32_t addrLow;
+ uint32_t addrHigh;
+
+ } base;
+
+ uint32_t count;
+
+};
+
+/* Command Type 3 entry structure*/
+
+struct command_t3_entry {
+ struct qla4_header hdr; /* 00-03 */
+
+ uint32_t handle; /* 04-07 */
+ uint16_t target; /* 08-09 */
+ uint16_t connection_id; /* 0A-0B */
+
+ uint8_t control_flags; /* 0C */
+
+ /* data direction (bits 5-6) */
+#define CF_WRITE 0x20
+#define CF_READ 0x40
+#define CF_NO_DATA 0x00
+
+ /* task attributes (bits 2-0) */
+#define CF_HEAD_TAG 0x03
+#define CF_ORDERED_TAG 0x02
+#define CF_SIMPLE_TAG 0x01
+
+ /* STATE FLAGS FIELD IS A PLACE HOLDER. THE FW WILL SET BITS
+ * IN THIS FIELD AS THE COMMAND IS PROCESSED. WHEN THE IOCB IS
+ * CHANGED TO AN IOSB THIS FIELD WILL HAVE THE STATE FLAGS SET
+ * PROPERLY.
+ */
+ uint8_t state_flags; /* 0D */
+ uint8_t cmdRefNum; /* 0E */
+ uint8_t reserved1; /* 0F */
+ uint8_t cdb[IOCB_MAX_CDB_LEN]; /* 10-1F */
+ struct scsi_lun lun; /* FCP LUN (BE). */
+ uint32_t cmdSeqNum; /* 28-2B */
+ uint16_t timeout; /* 2C-2D */
+ uint16_t dataSegCnt; /* 2E-2F */
+ uint32_t ttlByteCnt; /* 30-33 */
+ struct data_seg_a64 dataseg[COMMAND_SEG_A64]; /* 34-3F */
+
+};
+
+
+/* Continuation Type 1 entry structure*/
+struct continuation_t1_entry {
+ struct qla4_header hdr;
+
+ struct data_seg_a64 dataseg[CONTINUE_SEG_A64];
+
+};
+
+/* Parameterize for 64 or 32 bits */
+#define COMMAND_SEG COMMAND_SEG_A64
+#define CONTINUE_SEG CONTINUE_SEG_A64
+
+#define ET_COMMAND ET_CMND_T3
+#define ET_CONTINUE ET_CONT_T1
+
+/* Marker entry structure*/
+struct qla4_marker_entry {
+ struct qla4_header hdr; /* 00-03 */
+
+ uint32_t system_defined; /* 04-07 */
+ uint16_t target; /* 08-09 */
+ uint16_t modifier; /* 0A-0B */
+#define MM_LUN_RESET 0
+#define MM_TGT_WARM_RESET 1
+
+ uint16_t flags; /* 0C-0D */
+ uint16_t reserved1; /* 0E-0F */
+ struct scsi_lun lun; /* FCP LUN (BE). */
+ uint64_t reserved2; /* 18-1F */
+ uint64_t reserved3; /* 20-27 */
+ uint64_t reserved4; /* 28-2F */
+ uint64_t reserved5; /* 30-37 */
+ uint64_t reserved6; /* 38-3F */
+};
+
+/* Status entry structure*/
+struct status_entry {
+ struct qla4_header hdr; /* 00-03 */
+
+ uint32_t handle; /* 04-07 */
+
+ uint8_t scsiStatus; /* 08 */
+#define SCSI_CHECK_CONDITION 0x02
+
+ uint8_t iscsiFlags; /* 09 */
+#define ISCSI_FLAG_RESIDUAL_UNDER 0x02
+#define ISCSI_FLAG_RESIDUAL_OVER 0x04
+
+ uint8_t iscsiResponse; /* 0A */
+
+ uint8_t completionStatus; /* 0B */
+#define SCS_COMPLETE 0x00
+#define SCS_INCOMPLETE 0x01
+#define SCS_RESET_OCCURRED 0x04
+#define SCS_ABORTED 0x05
+#define SCS_TIMEOUT 0x06
+#define SCS_DATA_OVERRUN 0x07
+#define SCS_DATA_UNDERRUN 0x15
+#define SCS_QUEUE_FULL 0x1C
+#define SCS_DEVICE_UNAVAILABLE 0x28
+#define SCS_DEVICE_LOGGED_OUT 0x29
+
+ uint8_t reserved1; /* 0C */
+
+ /* state_flags MUST be at the same location as state_flags in
+ * the Command_T3/4_Entry */
+ uint8_t state_flags; /* 0D */
+
+ uint16_t senseDataByteCnt; /* 0E-0F */
+ uint32_t residualByteCnt; /* 10-13 */
+ uint32_t bidiResidualByteCnt; /* 14-17 */
+ uint32_t expSeqNum; /* 18-1B */
+ uint32_t maxCmdSeqNum; /* 1C-1F */
+ uint8_t senseData[IOCB_MAX_SENSEDATA_LEN]; /* 20-3F */
+
+};
+
+/* Status Continuation entry */
+struct status_cont_entry {
+ struct qla4_header hdr; /* 00-03 */
+ uint8_t ext_sense_data[IOCB_MAX_EXT_SENSEDATA_LEN]; /* 04-63 */
+};
+
+struct passthru0 {
+ struct qla4_header hdr; /* 00-03 */
+ uint32_t handle; /* 04-07 */
+ uint16_t target; /* 08-09 */
+ uint16_t connection_id; /* 0A-0B */
+#define ISNS_DEFAULT_SERVER_CONN_ID ((uint16_t)0x8000)
+
+ uint16_t control_flags; /* 0C-0D */
+#define PT_FLAG_ETHERNET_FRAME 0x8000
+#define PT_FLAG_ISNS_PDU 0x8000
+#define PT_FLAG_SEND_BUFFER 0x0200
+#define PT_FLAG_WAIT_4_RESPONSE 0x0100
+#define PT_FLAG_ISCSI_PDU 0x1000
+
+ uint16_t timeout; /* 0E-0F */
+#define PT_DEFAULT_TIMEOUT 30 /* seconds */
+
+ struct data_seg_a64 out_dsd; /* 10-1B */
+ uint32_t res1; /* 1C-1F */
+ struct data_seg_a64 in_dsd; /* 20-2B */
+ uint8_t res2[20]; /* 2C-3F */
+};
+
+struct passthru_status {
+ struct qla4_header hdr; /* 00-03 */
+ uint32_t handle; /* 04-07 */
+ uint16_t target; /* 08-09 */
+ uint16_t connectionID; /* 0A-0B */
+
+ uint8_t completionStatus; /* 0C */
+#define PASSTHRU_STATUS_COMPLETE 0x01
+
+ uint8_t residualFlags; /* 0D */
+
+ uint16_t timeout; /* 0E-0F */
+ uint16_t portNumber; /* 10-11 */
+ uint8_t res1[10]; /* 12-1B */
+ uint32_t outResidual; /* 1C-1F */
+ uint8_t res2[12]; /* 20-2B */
+ uint32_t inResidual; /* 2C-2F */
+ uint8_t res4[16]; /* 30-3F */
+};
+
+struct mbox_cmd_iocb {
+ struct qla4_header hdr; /* 00-03 */
+ uint32_t handle; /* 04-07 */
+ uint32_t in_mbox[8]; /* 08-25 */
+ uint32_t res1[6]; /* 26-3F */
+};
+
+struct mbox_status_iocb {
+ struct qla4_header hdr; /* 00-03 */
+ uint32_t handle; /* 04-07 */
+ uint32_t out_mbox[8]; /* 08-25 */
+ uint32_t res1[6]; /* 26-3F */
+};
+
+/*
+ * ISP queue - response queue entry definition.
+ */
+struct response {
+ uint8_t data[60];
+ uint32_t signature;
+#define RESPONSE_PROCESSED 0xDEADDEAD /* Signature */
+};
+
+struct ql_iscsi_stats {
+ uint64_t mac_tx_frames; /* 0000–0007 */
+ uint64_t mac_tx_bytes; /* 0008–000F */
+ uint64_t mac_tx_multicast_frames; /* 0010–0017 */
+ uint64_t mac_tx_broadcast_frames; /* 0018–001F */
+ uint64_t mac_tx_pause_frames; /* 0020–0027 */
+ uint64_t mac_tx_control_frames; /* 0028–002F */
+ uint64_t mac_tx_deferral; /* 0030–0037 */
+ uint64_t mac_tx_excess_deferral; /* 0038–003F */
+ uint64_t mac_tx_late_collision; /* 0040–0047 */
+ uint64_t mac_tx_abort; /* 0048–004F */
+ uint64_t mac_tx_single_collision; /* 0050–0057 */
+ uint64_t mac_tx_multiple_collision; /* 0058–005F */
+ uint64_t mac_tx_collision; /* 0060–0067 */
+ uint64_t mac_tx_frames_dropped; /* 0068–006F */
+ uint64_t mac_tx_jumbo_frames; /* 0070–0077 */
+ uint64_t mac_rx_frames; /* 0078–007F */
+ uint64_t mac_rx_bytes; /* 0080–0087 */
+ uint64_t mac_rx_unknown_control_frames; /* 0088–008F */
+ uint64_t mac_rx_pause_frames; /* 0090–0097 */
+ uint64_t mac_rx_control_frames; /* 0098–009F */
+ uint64_t mac_rx_dribble; /* 00A0–00A7 */
+ uint64_t mac_rx_frame_length_error; /* 00A8–00AF */
+ uint64_t mac_rx_jabber; /* 00B0–00B7 */
+ uint64_t mac_rx_carrier_sense_error; /* 00B8–00BF */
+ uint64_t mac_rx_frame_discarded; /* 00C0–00C7 */
+ uint64_t mac_rx_frames_dropped; /* 00C8–00CF */
+ uint64_t mac_crc_error; /* 00D0–00D7 */
+ uint64_t mac_encoding_error; /* 00D8–00DF */
+ uint64_t mac_rx_length_error_large; /* 00E0–00E7 */
+ uint64_t mac_rx_length_error_small; /* 00E8–00EF */
+ uint64_t mac_rx_multicast_frames; /* 00F0–00F7 */
+ uint64_t mac_rx_broadcast_frames; /* 00F8–00FF */
+ uint64_t ip_tx_packets; /* 0100–0107 */
+ uint64_t ip_tx_bytes; /* 0108–010F */
+ uint64_t ip_tx_fragments; /* 0110–0117 */
+ uint64_t ip_rx_packets; /* 0118–011F */
+ uint64_t ip_rx_bytes; /* 0120–0127 */
+ uint64_t ip_rx_fragments; /* 0128–012F */
+ uint64_t ip_datagram_reassembly; /* 0130–0137 */
+ uint64_t ip_invalid_address_error; /* 0138–013F */
+ uint64_t ip_error_packets; /* 0140–0147 */
+ uint64_t ip_fragrx_overlap; /* 0148–014F */
+ uint64_t ip_fragrx_outoforder; /* 0150–0157 */
+ uint64_t ip_datagram_reassembly_timeout; /* 0158–015F */
+ uint64_t ipv6_tx_packets; /* 0160–0167 */
+ uint64_t ipv6_tx_bytes; /* 0168–016F */
+ uint64_t ipv6_tx_fragments; /* 0170–0177 */
+ uint64_t ipv6_rx_packets; /* 0178–017F */
+ uint64_t ipv6_rx_bytes; /* 0180–0187 */
+ uint64_t ipv6_rx_fragments; /* 0188–018F */
+ uint64_t ipv6_datagram_reassembly; /* 0190–0197 */
+ uint64_t ipv6_invalid_address_error; /* 0198–019F */
+ uint64_t ipv6_error_packets; /* 01A0–01A7 */
+ uint64_t ipv6_fragrx_overlap; /* 01A8–01AF */
+ uint64_t ipv6_fragrx_outoforder; /* 01B0–01B7 */
+ uint64_t ipv6_datagram_reassembly_timeout; /* 01B8–01BF */
+ uint64_t tcp_tx_segments; /* 01C0–01C7 */
+ uint64_t tcp_tx_bytes; /* 01C8–01CF */
+ uint64_t tcp_rx_segments; /* 01D0–01D7 */
+ uint64_t tcp_rx_byte; /* 01D8–01DF */
+ uint64_t tcp_duplicate_ack_retx; /* 01E0–01E7 */
+ uint64_t tcp_retx_timer_expired; /* 01E8–01EF */
+ uint64_t tcp_rx_duplicate_ack; /* 01F0–01F7 */
+ uint64_t tcp_rx_pure_ackr; /* 01F8–01FF */
+ uint64_t tcp_tx_delayed_ack; /* 0200–0207 */
+ uint64_t tcp_tx_pure_ack; /* 0208–020F */
+ uint64_t tcp_rx_segment_error; /* 0210–0217 */
+ uint64_t tcp_rx_segment_outoforder; /* 0218–021F */
+ uint64_t tcp_rx_window_probe; /* 0220–0227 */
+ uint64_t tcp_rx_window_update; /* 0228–022F */
+ uint64_t tcp_tx_window_probe_persist; /* 0230–0237 */
+ uint64_t ecc_error_correction; /* 0238–023F */
+ uint64_t iscsi_pdu_tx; /* 0240-0247 */
+ uint64_t iscsi_data_bytes_tx; /* 0248-024F */
+ uint64_t iscsi_pdu_rx; /* 0250-0257 */
+ uint64_t iscsi_data_bytes_rx; /* 0258-025F */
+ uint64_t iscsi_io_completed; /* 0260-0267 */
+ uint64_t iscsi_unexpected_io_rx; /* 0268-026F */
+ uint64_t iscsi_format_error; /* 0270-0277 */
+ uint64_t iscsi_hdr_digest_error; /* 0278-027F */
+ uint64_t iscsi_data_digest_error; /* 0280-0287 */
+ uint64_t iscsi_sequence_error; /* 0288-028F */
+ uint32_t tx_cmd_pdu; /* 0290-0293 */
+ uint32_t tx_resp_pdu; /* 0294-0297 */
+ uint32_t rx_cmd_pdu; /* 0298-029B */
+ uint32_t rx_resp_pdu; /* 029C-029F */
+
+ uint64_t tx_data_octets; /* 02A0-02A7 */
+ uint64_t rx_data_octets; /* 02A8-02AF */
+
+ uint32_t hdr_digest_err; /* 02B0–02B3 */
+ uint32_t data_digest_err; /* 02B4–02B7 */
+ uint32_t conn_timeout_err; /* 02B8–02BB */
+ uint32_t framing_err; /* 02BC–02BF */
+
+ uint32_t tx_nopout_pdus; /* 02C0–02C3 */
+ uint32_t tx_scsi_cmd_pdus; /* 02C4–02C7 */
+ uint32_t tx_tmf_cmd_pdus; /* 02C8–02CB */
+ uint32_t tx_login_cmd_pdus; /* 02CC–02CF */
+ uint32_t tx_text_cmd_pdus; /* 02D0–02D3 */
+ uint32_t tx_scsi_write_pdus; /* 02D4–02D7 */
+ uint32_t tx_logout_cmd_pdus; /* 02D8–02DB */
+ uint32_t tx_snack_req_pdus; /* 02DC–02DF */
+
+ uint32_t rx_nopin_pdus; /* 02E0–02E3 */
+ uint32_t rx_scsi_resp_pdus; /* 02E4–02E7 */
+ uint32_t rx_tmf_resp_pdus; /* 02E8–02EB */
+ uint32_t rx_login_resp_pdus; /* 02EC–02EF */
+ uint32_t rx_text_resp_pdus; /* 02F0–02F3 */
+ uint32_t rx_scsi_read_pdus; /* 02F4–02F7 */
+ uint32_t rx_logout_resp_pdus; /* 02F8–02FB */
+
+ uint32_t rx_r2t_pdus; /* 02FC–02FF */
+ uint32_t rx_async_pdus; /* 0300–0303 */
+ uint32_t rx_reject_pdus; /* 0304–0307 */
+
+ uint8_t reserved2[264]; /* 0x0308 - 0x040F */
+};
+
+#define QLA8XXX_DBG_STATE_ARRAY_LEN 16
+#define QLA8XXX_DBG_CAP_SIZE_ARRAY_LEN 8
+#define QLA8XXX_DBG_RSVD_ARRAY_LEN 8
+#define QLA83XX_DBG_OCM_WNDREG_ARRAY_LEN 16
+#define QLA83XX_SS_OCM_WNDREG_INDEX 3
+#define QLA83XX_SS_PCI_INDEX 0
+#define QLA8022_TEMPLATE_CAP_OFFSET 172
+#define QLA83XX_TEMPLATE_CAP_OFFSET 268
+#define QLA80XX_TEMPLATE_RESERVED_BITS 16
+
+struct qla4_8xxx_minidump_template_hdr {
+ uint32_t entry_type;
+ uint32_t first_entry_offset;
+ uint32_t size_of_template;
+ uint32_t capture_debug_level;
+ uint32_t num_of_entries;
+ uint32_t version;
+ uint32_t driver_timestamp;
+ uint32_t checksum;
+
+ uint32_t driver_capture_mask;
+ uint32_t driver_info_word2;
+ uint32_t driver_info_word3;
+ uint32_t driver_info_word4;
+
+ uint32_t saved_state_array[QLA8XXX_DBG_STATE_ARRAY_LEN];
+ uint32_t capture_size_array[QLA8XXX_DBG_CAP_SIZE_ARRAY_LEN];
+ uint32_t ocm_window_reg[QLA83XX_DBG_OCM_WNDREG_ARRAY_LEN];
+ uint32_t capabilities[QLA80XX_TEMPLATE_RESERVED_BITS];
+};
+
+#endif /* _QLA4X_FW_H */
diff --git a/drivers/scsi/qla4xxx/ql4_glbl.h b/drivers/scsi/qla4xxx/ql4_glbl.h
new file mode 100644
index 000000000..2559144f5
--- /dev/null
+++ b/drivers/scsi/qla4xxx/ql4_glbl.h
@@ -0,0 +1,293 @@
+/*
+ * QLogic iSCSI HBA Driver
+ * Copyright (c) 2003-2013 QLogic Corporation
+ *
+ * See LICENSE.qla4xxx for copyright and licensing details.
+ */
+
+#ifndef __QLA4x_GBL_H
+#define __QLA4x_GBL_H
+
+struct iscsi_cls_conn;
+
+int qla4xxx_hw_reset(struct scsi_qla_host *ha);
+int ql4xxx_lock_drvr_wait(struct scsi_qla_host *a);
+int qla4xxx_send_command_to_isp(struct scsi_qla_host *ha, struct srb *srb);
+int qla4xxx_initialize_adapter(struct scsi_qla_host *ha, int is_reset);
+int qla4xxx_soft_reset(struct scsi_qla_host *ha);
+irqreturn_t qla4xxx_intr_handler(int irq, void *dev_id);
+
+void qla4xxx_free_ddb(struct scsi_qla_host *ha, struct ddb_entry *ddb_entry);
+void qla4xxx_process_aen(struct scsi_qla_host *ha, uint8_t process_aen);
+
+int qla4xxx_get_dhcp_ip_address(struct scsi_qla_host *ha);
+int qla4xxx_abort_task(struct scsi_qla_host *ha, struct srb *srb);
+int qla4xxx_reset_lun(struct scsi_qla_host *ha, struct ddb_entry *ddb_entry,
+ uint64_t lun);
+int qla4xxx_reset_target(struct scsi_qla_host *ha,
+ struct ddb_entry *ddb_entry);
+int qla4xxx_get_flash(struct scsi_qla_host *ha, dma_addr_t dma_addr,
+ uint32_t offset, uint32_t len);
+int qla4xxx_get_firmware_status(struct scsi_qla_host *ha);
+int qla4xxx_get_firmware_state(struct scsi_qla_host *ha);
+int qla4xxx_initialize_fw_cb(struct scsi_qla_host *ha);
+
+/* FIXME: Goodness! this really wants a small struct to hold the
+ * parameters. On x86 the args will get passed on the stack! */
+int qla4xxx_get_fwddb_entry(struct scsi_qla_host *ha,
+ uint16_t fw_ddb_index,
+ struct dev_db_entry *fw_ddb_entry,
+ dma_addr_t fw_ddb_entry_dma,
+ uint32_t *num_valid_ddb_entries,
+ uint32_t *next_ddb_index,
+ uint32_t *fw_ddb_device_state,
+ uint32_t *conn_err_detail,
+ uint16_t *tcp_source_port_num,
+ uint16_t *connection_id);
+
+int qla4xxx_set_ddb_entry(struct scsi_qla_host * ha, uint16_t fw_ddb_index,
+ dma_addr_t fw_ddb_entry_dma, uint32_t *mbx_sts);
+uint8_t qla4xxx_get_ifcb(struct scsi_qla_host *ha, uint32_t *mbox_cmd,
+ uint32_t *mbox_sts, dma_addr_t init_fw_cb_dma);
+int qla4xxx_conn_close_sess_logout(struct scsi_qla_host *ha,
+ uint16_t fw_ddb_index,
+ uint16_t connection_id,
+ uint16_t option);
+int qla4xxx_disable_acb(struct scsi_qla_host *ha);
+int qla4xxx_set_acb(struct scsi_qla_host *ha, uint32_t *mbox_cmd,
+ uint32_t *mbox_sts, dma_addr_t acb_dma);
+int qla4xxx_get_acb(struct scsi_qla_host *ha, dma_addr_t acb_dma,
+ uint32_t acb_type, uint32_t len);
+int qla4xxx_get_ip_state(struct scsi_qla_host *ha, uint32_t acb_idx,
+ uint32_t ip_idx, uint32_t *sts);
+void qla4xxx_mark_device_missing(struct iscsi_cls_session *cls_session);
+u16 rd_nvram_word(struct scsi_qla_host *ha, int offset);
+u8 rd_nvram_byte(struct scsi_qla_host *ha, int offset);
+void qla4xxx_get_crash_record(struct scsi_qla_host *ha);
+int qla4xxx_is_nvram_configuration_valid(struct scsi_qla_host *ha);
+int qla4xxx_about_firmware(struct scsi_qla_host *ha);
+void qla4xxx_interrupt_service_routine(struct scsi_qla_host *ha,
+ uint32_t intr_status);
+int qla4xxx_init_rings(struct scsi_qla_host *ha);
+void qla4xxx_srb_compl(struct kref *ref);
+struct srb *qla4xxx_del_from_active_array(struct scsi_qla_host *ha,
+ uint32_t index);
+int qla4xxx_process_ddb_changed(struct scsi_qla_host *ha, uint32_t fw_ddb_index,
+ uint32_t state, uint32_t conn_error);
+void qla4xxx_dump_buffer(void *b, uint32_t size);
+int qla4xxx_send_marker_iocb(struct scsi_qla_host *ha,
+ struct ddb_entry *ddb_entry, uint64_t lun, uint16_t mrkr_mod);
+int qla4xxx_set_flash(struct scsi_qla_host *ha, dma_addr_t dma_addr,
+ uint32_t offset, uint32_t length, uint32_t options);
+int qla4xxx_mailbox_command(struct scsi_qla_host *ha, uint8_t inCount,
+ uint8_t outCount, uint32_t *mbx_cmd, uint32_t *mbx_sts);
+int qla4xxx_get_chap_index(struct scsi_qla_host *ha, char *username,
+ char *password, int bidi, uint16_t *chap_index);
+int qla4xxx_set_chap(struct scsi_qla_host *ha, char *username, char *password,
+ uint16_t idx, int bidi);
+
+void qla4xxx_queue_iocb(struct scsi_qla_host *ha);
+void qla4xxx_complete_iocb(struct scsi_qla_host *ha);
+int qla4xxx_get_sys_info(struct scsi_qla_host *ha);
+int qla4xxx_iospace_config(struct scsi_qla_host *ha);
+void qla4xxx_pci_config(struct scsi_qla_host *ha);
+int qla4xxx_start_firmware(struct scsi_qla_host *ha);
+irqreturn_t qla4xxx_intr_handler(int irq, void *dev_id);
+uint16_t qla4xxx_rd_shdw_req_q_out(struct scsi_qla_host *ha);
+uint16_t qla4xxx_rd_shdw_rsp_q_in(struct scsi_qla_host *ha);
+int qla4xxx_request_irqs(struct scsi_qla_host *ha);
+void qla4xxx_free_irqs(struct scsi_qla_host *ha);
+void qla4xxx_process_response_queue(struct scsi_qla_host *ha);
+void qla4xxx_wake_dpc(struct scsi_qla_host *ha);
+void qla4xxx_get_conn_event_log(struct scsi_qla_host *ha);
+void qla4xxx_mailbox_premature_completion(struct scsi_qla_host *ha);
+void qla4xxx_dump_registers(struct scsi_qla_host *ha);
+uint8_t qla4xxx_update_local_ifcb(struct scsi_qla_host *ha,
+ uint32_t *mbox_cmd,
+ uint32_t *mbox_sts,
+ struct addr_ctrl_blk *init_fw_cb,
+ dma_addr_t init_fw_cb_dma);
+
+void qla4_8xxx_pci_config(struct scsi_qla_host *);
+int qla4_8xxx_iospace_config(struct scsi_qla_host *ha);
+int qla4_8xxx_load_risc(struct scsi_qla_host *);
+irqreturn_t qla4_82xx_intr_handler(int irq, void *dev_id);
+void qla4_82xx_queue_iocb(struct scsi_qla_host *ha);
+void qla4_82xx_complete_iocb(struct scsi_qla_host *ha);
+
+int qla4_82xx_crb_win_lock(struct scsi_qla_host *);
+void qla4_82xx_crb_win_unlock(struct scsi_qla_host *);
+int qla4_82xx_pci_get_crb_addr_2M(struct scsi_qla_host *, ulong *);
+void qla4_82xx_wr_32(struct scsi_qla_host *, ulong, u32);
+uint32_t qla4_82xx_rd_32(struct scsi_qla_host *, ulong);
+int qla4_82xx_pci_mem_read_2M(struct scsi_qla_host *, u64, void *, int);
+int qla4_82xx_pci_mem_write_2M(struct scsi_qla_host *ha, u64, void *, int);
+int qla4_82xx_isp_reset(struct scsi_qla_host *ha);
+void qla4_82xx_interrupt_service_routine(struct scsi_qla_host *ha,
+ uint32_t intr_status);
+uint16_t qla4_82xx_rd_shdw_req_q_out(struct scsi_qla_host *ha);
+uint16_t qla4_82xx_rd_shdw_rsp_q_in(struct scsi_qla_host *ha);
+int qla4_8xxx_get_sys_info(struct scsi_qla_host *ha);
+void qla4_8xxx_watchdog(struct scsi_qla_host *ha);
+int qla4_8xxx_stop_firmware(struct scsi_qla_host *ha);
+int qla4_8xxx_get_flash_info(struct scsi_qla_host *ha);
+void qla4_82xx_enable_intrs(struct scsi_qla_host *ha);
+void qla4_82xx_disable_intrs(struct scsi_qla_host *ha);
+int qla4_8xxx_enable_msix(struct scsi_qla_host *ha);
+void qla4_8xxx_disable_msix(struct scsi_qla_host *ha);
+irqreturn_t qla4_8xxx_msi_handler(int irq, void *dev_id);
+irqreturn_t qla4_8xxx_default_intr_handler(int irq, void *dev_id);
+irqreturn_t qla4_8xxx_msix_rsp_q(int irq, void *dev_id);
+void qla4xxx_mark_all_devices_missing(struct scsi_qla_host *ha);
+void qla4xxx_dead_adapter_cleanup(struct scsi_qla_host *ha);
+int qla4_82xx_idc_lock(struct scsi_qla_host *ha);
+void qla4_82xx_idc_unlock(struct scsi_qla_host *ha);
+int qla4_8xxx_device_state_handler(struct scsi_qla_host *ha);
+void qla4_8xxx_need_qsnt_handler(struct scsi_qla_host *ha);
+void qla4_8xxx_clear_drv_active(struct scsi_qla_host *ha);
+void qla4_8xxx_set_drv_active(struct scsi_qla_host *ha);
+int qla4xxx_conn_open(struct scsi_qla_host *ha, uint16_t fw_ddb_index);
+int qla4xxx_set_param_ddbentry(struct scsi_qla_host *ha,
+ struct ddb_entry *ddb_entry,
+ struct iscsi_cls_conn *cls_conn,
+ uint32_t *mbx_sts);
+int qla4xxx_session_logout_ddb(struct scsi_qla_host *ha,
+ struct ddb_entry *ddb_entry, int options);
+int qla4xxx_req_ddb_entry(struct scsi_qla_host *ha, uint32_t fw_ddb_index,
+ uint32_t *mbx_sts);
+int qla4xxx_clear_ddb_entry(struct scsi_qla_host *ha, uint32_t fw_ddb_index);
+int qla4xxx_send_passthru0(struct iscsi_task *task);
+void qla4xxx_free_ddb_index(struct scsi_qla_host *ha);
+int qla4xxx_get_mgmt_data(struct scsi_qla_host *ha, uint16_t fw_ddb_index,
+ uint16_t stats_size, dma_addr_t stats_dma);
+void qla4xxx_update_session_conn_param(struct scsi_qla_host *ha,
+ struct ddb_entry *ddb_entry);
+void qla4xxx_update_session_conn_fwddb_param(struct scsi_qla_host *ha,
+ struct ddb_entry *ddb_entry);
+int qla4xxx_bootdb_by_index(struct scsi_qla_host *ha,
+ struct dev_db_entry *fw_ddb_entry,
+ dma_addr_t fw_ddb_entry_dma, uint16_t ddb_index);
+int qla4xxx_get_chap(struct scsi_qla_host *ha, char *username,
+ char *password, uint16_t idx);
+int qla4xxx_get_nvram(struct scsi_qla_host *ha, dma_addr_t nvram_dma,
+ uint32_t offset, uint32_t size);
+int qla4xxx_set_nvram(struct scsi_qla_host *ha, dma_addr_t nvram_dma,
+ uint32_t offset, uint32_t size);
+int qla4xxx_restore_factory_defaults(struct scsi_qla_host *ha,
+ uint32_t region, uint32_t field0,
+ uint32_t field1);
+int qla4xxx_get_ddb_index(struct scsi_qla_host *ha, uint16_t *ddb_index);
+void qla4xxx_login_flash_ddb(struct iscsi_cls_session *cls_session);
+int qla4xxx_unblock_ddb(struct iscsi_cls_session *cls_session);
+int qla4xxx_unblock_flash_ddb(struct iscsi_cls_session *cls_session);
+int qla4xxx_flash_ddb_change(struct scsi_qla_host *ha, uint32_t fw_ddb_index,
+ struct ddb_entry *ddb_entry, uint32_t state);
+int qla4xxx_ddb_change(struct scsi_qla_host *ha, uint32_t fw_ddb_index,
+ struct ddb_entry *ddb_entry, uint32_t state);
+void qla4xxx_build_ddb_list(struct scsi_qla_host *ha, int is_reset);
+int qla4xxx_post_aen_work(struct scsi_qla_host *ha,
+ enum iscsi_host_event_code aen_code,
+ uint32_t data_size, uint8_t *data);
+int qla4xxx_ping_iocb(struct scsi_qla_host *ha, uint32_t options,
+ uint32_t payload_size, uint32_t pid, uint8_t *ipaddr);
+int qla4xxx_post_ping_evt_work(struct scsi_qla_host *ha,
+ uint32_t status, uint32_t pid,
+ uint32_t data_size, uint8_t *data);
+int qla4xxx_flashdb_by_index(struct scsi_qla_host *ha,
+ struct dev_db_entry *fw_ddb_entry,
+ dma_addr_t fw_ddb_entry_dma, uint16_t ddb_index);
+
+/* BSG Functions */
+int qla4xxx_bsg_request(struct bsg_job *bsg_job);
+int qla4xxx_process_vendor_specific(struct bsg_job *bsg_job);
+
+void qla4xxx_arm_relogin_timer(struct ddb_entry *ddb_entry);
+int qla4xxx_get_minidump_template(struct scsi_qla_host *ha,
+ dma_addr_t phys_addr);
+int qla4xxx_req_template_size(struct scsi_qla_host *ha);
+void qla4_8xxx_alloc_sysfs_attr(struct scsi_qla_host *ha);
+void qla4_8xxx_free_sysfs_attr(struct scsi_qla_host *ha);
+void qla4xxx_alloc_fw_dump(struct scsi_qla_host *ha);
+int qla4_82xx_try_start_fw(struct scsi_qla_host *ha);
+int qla4_8xxx_need_reset(struct scsi_qla_host *ha);
+int qla4_82xx_md_rd_32(struct scsi_qla_host *ha, uint32_t off, uint32_t *data);
+int qla4_82xx_md_wr_32(struct scsi_qla_host *ha, uint32_t off, uint32_t data);
+void qla4_82xx_rom_lock_recovery(struct scsi_qla_host *ha);
+void qla4_82xx_queue_mbox_cmd(struct scsi_qla_host *ha, uint32_t *mbx_cmd,
+ int incount);
+void qla4_82xx_process_mbox_intr(struct scsi_qla_host *ha, int outcount);
+void qla4xxx_queue_mbox_cmd(struct scsi_qla_host *ha, uint32_t *mbx_cmd,
+ int incount);
+void qla4xxx_process_mbox_intr(struct scsi_qla_host *ha, int outcount);
+void qla4_8xxx_dump_peg_reg(struct scsi_qla_host *ha);
+void qla4_83xx_disable_intrs(struct scsi_qla_host *ha);
+void qla4_83xx_enable_intrs(struct scsi_qla_host *ha);
+int qla4_83xx_start_firmware(struct scsi_qla_host *ha);
+irqreturn_t qla4_83xx_intr_handler(int irq, void *dev_id);
+void qla4_83xx_interrupt_service_routine(struct scsi_qla_host *ha,
+ uint32_t intr_status);
+int qla4_83xx_isp_reset(struct scsi_qla_host *ha);
+void qla4_83xx_queue_iocb(struct scsi_qla_host *ha);
+void qla4_83xx_complete_iocb(struct scsi_qla_host *ha);
+uint32_t qla4_83xx_rd_reg(struct scsi_qla_host *ha, ulong addr);
+void qla4_83xx_wr_reg(struct scsi_qla_host *ha, ulong addr, uint32_t val);
+int qla4_83xx_rd_reg_indirect(struct scsi_qla_host *ha, uint32_t addr,
+ uint32_t *data);
+int qla4_83xx_wr_reg_indirect(struct scsi_qla_host *ha, uint32_t addr,
+ uint32_t data);
+int qla4_83xx_drv_lock(struct scsi_qla_host *ha);
+void qla4_83xx_drv_unlock(struct scsi_qla_host *ha);
+void qla4_83xx_rom_lock_recovery(struct scsi_qla_host *ha);
+void qla4_83xx_queue_mbox_cmd(struct scsi_qla_host *ha, uint32_t *mbx_cmd,
+ int incount);
+void qla4_83xx_process_mbox_intr(struct scsi_qla_host *ha, int outcount);
+void qla4_83xx_read_reset_template(struct scsi_qla_host *ha);
+void qla4_83xx_set_idc_dontreset(struct scsi_qla_host *ha);
+int qla4_83xx_idc_dontreset(struct scsi_qla_host *ha);
+int qla4_83xx_lockless_flash_read_u32(struct scsi_qla_host *ha,
+ uint32_t flash_addr, uint8_t *p_data,
+ int u32_word_count);
+void qla4_83xx_clear_idc_dontreset(struct scsi_qla_host *ha);
+void qla4_83xx_need_reset_handler(struct scsi_qla_host *ha);
+int qla4_83xx_flash_read_u32(struct scsi_qla_host *ha, uint32_t flash_addr,
+ uint8_t *p_data, int u32_word_count);
+void qla4_83xx_get_idc_param(struct scsi_qla_host *ha);
+void qla4_8xxx_set_rst_ready(struct scsi_qla_host *ha);
+void qla4_8xxx_clear_rst_ready(struct scsi_qla_host *ha);
+int qla4_8xxx_device_bootstrap(struct scsi_qla_host *ha);
+void qla4_8xxx_get_minidump(struct scsi_qla_host *ha);
+int qla4_8xxx_intr_disable(struct scsi_qla_host *ha);
+int qla4_8xxx_intr_enable(struct scsi_qla_host *ha);
+int qla4_8xxx_set_param(struct scsi_qla_host *ha, int param);
+int qla4_8xxx_update_idc_reg(struct scsi_qla_host *ha);
+int qla4_83xx_post_idc_ack(struct scsi_qla_host *ha);
+void qla4_83xx_disable_pause(struct scsi_qla_host *ha);
+void qla4_83xx_enable_mbox_intrs(struct scsi_qla_host *ha);
+int qla4_83xx_can_perform_reset(struct scsi_qla_host *ha);
+int qla4xxx_get_default_ddb(struct scsi_qla_host *ha, uint32_t options,
+ dma_addr_t dma_addr);
+int qla4xxx_get_uni_chap_at_index(struct scsi_qla_host *ha, char *username,
+ char *password, uint16_t chap_index);
+int qla4xxx_disable_acb(struct scsi_qla_host *ha);
+int qla4xxx_set_acb(struct scsi_qla_host *ha, uint32_t *mbox_cmd,
+ uint32_t *mbox_sts, dma_addr_t acb_dma);
+int qla4xxx_get_acb(struct scsi_qla_host *ha, dma_addr_t acb_dma,
+ uint32_t acb_type, uint32_t len);
+int qla4_84xx_config_acb(struct scsi_qla_host *ha, int acb_config);
+int qla4_8xxx_ms_mem_write_128b(struct scsi_qla_host *ha,
+ uint64_t addr, uint32_t *data, uint32_t count);
+uint8_t qla4xxx_set_ipaddr_state(uint8_t fw_ipaddr_state);
+int qla4_83xx_get_port_config(struct scsi_qla_host *ha, uint32_t *config);
+int qla4_83xx_set_port_config(struct scsi_qla_host *ha, uint32_t *config);
+int qla4_8xxx_check_init_adapter_retry(struct scsi_qla_host *ha);
+int qla4_83xx_is_detached(struct scsi_qla_host *ha);
+int qla4xxx_sysfs_ddb_export(struct scsi_qla_host *ha);
+
+extern int ql4xextended_error_logging;
+extern int ql4xdontresethba;
+extern int ql4xenablemsix;
+extern int ql4xmdcapmask;
+extern int ql4xenablemd;
+
+extern struct device_attribute *qla4xxx_host_attrs[];
+#endif /* _QLA4x_GBL_H */
diff --git a/drivers/scsi/qla4xxx/ql4_init.c b/drivers/scsi/qla4xxx/ql4_init.c
new file mode 100644
index 000000000..4180d6d9f
--- /dev/null
+++ b/drivers/scsi/qla4xxx/ql4_init.c
@@ -0,0 +1,1267 @@
+/*
+ * QLogic iSCSI HBA Driver
+ * Copyright (c) 2003-2013 QLogic Corporation
+ *
+ * See LICENSE.qla4xxx for copyright and licensing details.
+ */
+
+#include <scsi/iscsi_if.h>
+#include "ql4_def.h"
+#include "ql4_glbl.h"
+#include "ql4_dbg.h"
+#include "ql4_inline.h"
+
+static void ql4xxx_set_mac_number(struct scsi_qla_host *ha)
+{
+ uint32_t value;
+ uint8_t func_number;
+ unsigned long flags;
+
+ /* Get the function number */
+ spin_lock_irqsave(&ha->hardware_lock, flags);
+ value = readw(&ha->reg->ctrl_status);
+ spin_unlock_irqrestore(&ha->hardware_lock, flags);
+
+ func_number = (uint8_t) ((value >> 4) & 0x30);
+ switch (value & ISP_CONTROL_FN_MASK) {
+ case ISP_CONTROL_FN0_SCSI:
+ ha->mac_index = 1;
+ break;
+ case ISP_CONTROL_FN1_SCSI:
+ ha->mac_index = 3;
+ break;
+ default:
+ DEBUG2(printk("scsi%ld: %s: Invalid function number, "
+ "ispControlStatus = 0x%x\n", ha->host_no,
+ __func__, value));
+ break;
+ }
+ DEBUG2(printk("scsi%ld: %s: mac_index %d.\n", ha->host_no, __func__,
+ ha->mac_index));
+}
+
+/**
+ * qla4xxx_free_ddb - deallocate ddb
+ * @ha: pointer to host adapter structure.
+ * @ddb_entry: pointer to device database entry
+ *
+ * This routine marks a DDB entry INVALID
+ **/
+void qla4xxx_free_ddb(struct scsi_qla_host *ha,
+ struct ddb_entry *ddb_entry)
+{
+ /* Remove device pointer from index mapping arrays */
+ ha->fw_ddb_index_map[ddb_entry->fw_ddb_index] =
+ (struct ddb_entry *) INVALID_ENTRY;
+ ha->tot_ddbs--;
+}
+
+/**
+ * qla4xxx_init_response_q_entries() - Initializes response queue entries.
+ * @ha: HA context
+ *
+ * Beginning of request ring has initialization control block already built
+ * by nvram config routine.
+ **/
+static void qla4xxx_init_response_q_entries(struct scsi_qla_host *ha)
+{
+ uint16_t cnt;
+ struct response *pkt;
+
+ pkt = (struct response *)ha->response_ptr;
+ for (cnt = 0; cnt < RESPONSE_QUEUE_DEPTH; cnt++) {
+ pkt->signature = RESPONSE_PROCESSED;
+ pkt++;
+ }
+}
+
+/**
+ * qla4xxx_init_rings - initialize hw queues
+ * @ha: pointer to host adapter structure.
+ *
+ * This routine initializes the internal queues for the specified adapter.
+ * The QLA4010 requires us to restart the queues at index 0.
+ * The QLA4000 doesn't care, so just default to QLA4010's requirement.
+ **/
+int qla4xxx_init_rings(struct scsi_qla_host *ha)
+{
+ unsigned long flags = 0;
+ int i;
+
+ /* Initialize request queue. */
+ spin_lock_irqsave(&ha->hardware_lock, flags);
+ ha->request_out = 0;
+ ha->request_in = 0;
+ ha->request_ptr = &ha->request_ring[ha->request_in];
+ ha->req_q_count = REQUEST_QUEUE_DEPTH;
+
+ /* Initialize response queue. */
+ ha->response_in = 0;
+ ha->response_out = 0;
+ ha->response_ptr = &ha->response_ring[ha->response_out];
+
+ if (is_qla8022(ha)) {
+ writel(0,
+ (unsigned long __iomem *)&ha->qla4_82xx_reg->req_q_out);
+ writel(0,
+ (unsigned long __iomem *)&ha->qla4_82xx_reg->rsp_q_in);
+ writel(0,
+ (unsigned long __iomem *)&ha->qla4_82xx_reg->rsp_q_out);
+ } else if (is_qla8032(ha) || is_qla8042(ha)) {
+ writel(0,
+ (unsigned long __iomem *)&ha->qla4_83xx_reg->req_q_in);
+ writel(0,
+ (unsigned long __iomem *)&ha->qla4_83xx_reg->rsp_q_in);
+ writel(0,
+ (unsigned long __iomem *)&ha->qla4_83xx_reg->rsp_q_out);
+ } else {
+ /*
+ * Initialize DMA Shadow registers. The firmware is really
+ * supposed to take care of this, but on some uniprocessor
+ * systems, the shadow registers aren't cleared-- causing
+ * the interrupt_handler to think there are responses to be
+ * processed when there aren't.
+ */
+ ha->shadow_regs->req_q_out = __constant_cpu_to_le32(0);
+ ha->shadow_regs->rsp_q_in = __constant_cpu_to_le32(0);
+ wmb();
+
+ writel(0, &ha->reg->req_q_in);
+ writel(0, &ha->reg->rsp_q_out);
+ readl(&ha->reg->rsp_q_out);
+ }
+
+ qla4xxx_init_response_q_entries(ha);
+
+ /* Initialize mailbox active array */
+ for (i = 0; i < MAX_MRB; i++)
+ ha->active_mrb_array[i] = NULL;
+
+ spin_unlock_irqrestore(&ha->hardware_lock, flags);
+
+ return QLA_SUCCESS;
+}
+
+/**
+ * qla4xxx_get_sys_info - validate adapter MAC address(es)
+ * @ha: pointer to host adapter structure.
+ *
+ **/
+int qla4xxx_get_sys_info(struct scsi_qla_host *ha)
+{
+ struct flash_sys_info *sys_info;
+ dma_addr_t sys_info_dma;
+ int status = QLA_ERROR;
+
+ sys_info = dma_alloc_coherent(&ha->pdev->dev, sizeof(*sys_info),
+ &sys_info_dma, GFP_KERNEL);
+ if (sys_info == NULL) {
+ DEBUG2(printk("scsi%ld: %s: Unable to allocate dma buffer.\n",
+ ha->host_no, __func__));
+
+ goto exit_get_sys_info_no_free;
+ }
+ memset(sys_info, 0, sizeof(*sys_info));
+
+ /* Get flash sys info */
+ if (qla4xxx_get_flash(ha, sys_info_dma, FLASH_OFFSET_SYS_INFO,
+ sizeof(*sys_info)) != QLA_SUCCESS) {
+ DEBUG2(printk("scsi%ld: %s: get_flash FLASH_OFFSET_SYS_INFO "
+ "failed\n", ha->host_no, __func__));
+
+ goto exit_get_sys_info;
+ }
+
+ /* Save M.A.C. address & serial_number */
+ memcpy(ha->my_mac, &sys_info->physAddr[0].address[0],
+ min(sizeof(ha->my_mac),
+ sizeof(sys_info->physAddr[0].address)));
+ memcpy(ha->serial_number, &sys_info->acSerialNumber,
+ min(sizeof(ha->serial_number),
+ sizeof(sys_info->acSerialNumber)));
+
+ status = QLA_SUCCESS;
+
+exit_get_sys_info:
+ dma_free_coherent(&ha->pdev->dev, sizeof(*sys_info), sys_info,
+ sys_info_dma);
+
+exit_get_sys_info_no_free:
+ return status;
+}
+
+/**
+ * qla4xxx_init_local_data - initialize adapter specific local data
+ * @ha: pointer to host adapter structure.
+ *
+ **/
+static void qla4xxx_init_local_data(struct scsi_qla_host *ha)
+{
+ /* Initialize aen queue */
+ ha->aen_q_count = MAX_AEN_ENTRIES;
+}
+
+static uint8_t
+qla4xxx_wait_for_ip_config(struct scsi_qla_host *ha)
+{
+ uint8_t ipv4_wait = 0;
+ uint8_t ipv6_wait = 0;
+ int8_t ip_address[IPv6_ADDR_LEN] = {0} ;
+
+ /* If both IPv4 & IPv6 are enabled, possibly only one
+ * IP address may be acquired, so check to see if we
+ * need to wait for another */
+ if (is_ipv4_enabled(ha) && is_ipv6_enabled(ha)) {
+ if (((ha->addl_fw_state & FW_ADDSTATE_DHCPv4_ENABLED) != 0) &&
+ ((ha->addl_fw_state &
+ FW_ADDSTATE_DHCPv4_LEASE_ACQUIRED) == 0)) {
+ ipv4_wait = 1;
+ }
+ if (((ha->ip_config.ipv6_addl_options &
+ IPV6_ADDOPT_NEIGHBOR_DISCOVERY_ADDR_ENABLE) != 0) &&
+ ((ha->ip_config.ipv6_link_local_state ==
+ IP_ADDRSTATE_ACQUIRING) ||
+ (ha->ip_config.ipv6_addr0_state ==
+ IP_ADDRSTATE_ACQUIRING) ||
+ (ha->ip_config.ipv6_addr1_state ==
+ IP_ADDRSTATE_ACQUIRING))) {
+
+ ipv6_wait = 1;
+
+ if ((ha->ip_config.ipv6_link_local_state ==
+ IP_ADDRSTATE_PREFERRED) ||
+ (ha->ip_config.ipv6_addr0_state ==
+ IP_ADDRSTATE_PREFERRED) ||
+ (ha->ip_config.ipv6_addr1_state ==
+ IP_ADDRSTATE_PREFERRED)) {
+ DEBUG2(printk(KERN_INFO "scsi%ld: %s: "
+ "Preferred IP configured."
+ " Don't wait!\n", ha->host_no,
+ __func__));
+ ipv6_wait = 0;
+ }
+ if (memcmp(&ha->ip_config.ipv6_default_router_addr,
+ ip_address, IPv6_ADDR_LEN) == 0) {
+ DEBUG2(printk(KERN_INFO "scsi%ld: %s: "
+ "No Router configured. "
+ "Don't wait!\n", ha->host_no,
+ __func__));
+ ipv6_wait = 0;
+ }
+ if ((ha->ip_config.ipv6_default_router_state ==
+ IPV6_RTRSTATE_MANUAL) &&
+ (ha->ip_config.ipv6_link_local_state ==
+ IP_ADDRSTATE_TENTATIVE) &&
+ (memcmp(&ha->ip_config.ipv6_link_local_addr,
+ &ha->ip_config.ipv6_default_router_addr, 4) ==
+ 0)) {
+ DEBUG2(printk("scsi%ld: %s: LinkLocal Router & "
+ "IP configured. Don't wait!\n",
+ ha->host_no, __func__));
+ ipv6_wait = 0;
+ }
+ }
+ if (ipv4_wait || ipv6_wait) {
+ DEBUG2(printk("scsi%ld: %s: Wait for additional "
+ "IP(s) \"", ha->host_no, __func__));
+ if (ipv4_wait)
+ DEBUG2(printk("IPv4 "));
+ if (ha->ip_config.ipv6_link_local_state ==
+ IP_ADDRSTATE_ACQUIRING)
+ DEBUG2(printk("IPv6LinkLocal "));
+ if (ha->ip_config.ipv6_addr0_state ==
+ IP_ADDRSTATE_ACQUIRING)
+ DEBUG2(printk("IPv6Addr0 "));
+ if (ha->ip_config.ipv6_addr1_state ==
+ IP_ADDRSTATE_ACQUIRING)
+ DEBUG2(printk("IPv6Addr1 "));
+ DEBUG2(printk("\"\n"));
+ }
+ }
+
+ return ipv4_wait|ipv6_wait;
+}
+
+static int qla4_80xx_is_minidump_dma_capable(struct scsi_qla_host *ha,
+ struct qla4_8xxx_minidump_template_hdr *md_hdr)
+{
+ int offset = (is_qla8022(ha)) ? QLA8022_TEMPLATE_CAP_OFFSET :
+ QLA83XX_TEMPLATE_CAP_OFFSET;
+ int rval = 1;
+ uint32_t *cap_offset;
+
+ cap_offset = (uint32_t *)((char *)md_hdr + offset);
+
+ if (!(le32_to_cpu(*cap_offset) & BIT_0)) {
+ ql4_printk(KERN_INFO, ha, "PEX DMA Not supported %d\n",
+ *cap_offset);
+ rval = 0;
+ }
+
+ return rval;
+}
+
+/**
+ * qla4xxx_alloc_fw_dump - Allocate memory for minidump data.
+ * @ha: pointer to host adapter structure.
+ **/
+void qla4xxx_alloc_fw_dump(struct scsi_qla_host *ha)
+{
+ int status;
+ uint32_t capture_debug_level;
+ int hdr_entry_bit, k;
+ void *md_tmp;
+ dma_addr_t md_tmp_dma;
+ struct qla4_8xxx_minidump_template_hdr *md_hdr;
+ int dma_capable;
+
+ if (ha->fw_dump) {
+ ql4_printk(KERN_WARNING, ha,
+ "Firmware dump previously allocated.\n");
+ return;
+ }
+
+ status = qla4xxx_req_template_size(ha);
+ if (status != QLA_SUCCESS) {
+ ql4_printk(KERN_INFO, ha,
+ "scsi%ld: Failed to get template size\n",
+ ha->host_no);
+ return;
+ }
+
+ clear_bit(AF_82XX_FW_DUMPED, &ha->flags);
+
+ /* Allocate memory for saving the template */
+ md_tmp = dma_alloc_coherent(&ha->pdev->dev, ha->fw_dump_tmplt_size,
+ &md_tmp_dma, GFP_KERNEL);
+ if (!md_tmp) {
+ ql4_printk(KERN_INFO, ha,
+ "scsi%ld: Failed to allocate DMA memory\n",
+ ha->host_no);
+ return;
+ }
+
+ /* Request template */
+ status = qla4xxx_get_minidump_template(ha, md_tmp_dma);
+ if (status != QLA_SUCCESS) {
+ ql4_printk(KERN_INFO, ha,
+ "scsi%ld: Failed to get minidump template\n",
+ ha->host_no);
+ goto alloc_cleanup;
+ }
+
+ md_hdr = (struct qla4_8xxx_minidump_template_hdr *)md_tmp;
+
+ dma_capable = qla4_80xx_is_minidump_dma_capable(ha, md_hdr);
+
+ capture_debug_level = md_hdr->capture_debug_level;
+
+ /* Get capture mask based on module loadtime setting. */
+ if ((ql4xmdcapmask >= 0x3 && ql4xmdcapmask <= 0x7F) ||
+ (ql4xmdcapmask == 0xFF && dma_capable)) {
+ ha->fw_dump_capture_mask = ql4xmdcapmask;
+ } else {
+ if (ql4xmdcapmask == 0xFF)
+ ql4_printk(KERN_INFO, ha, "Falling back to default capture mask, as PEX DMA is not supported\n");
+ ha->fw_dump_capture_mask = capture_debug_level;
+ }
+
+ md_hdr->driver_capture_mask = ha->fw_dump_capture_mask;
+
+ DEBUG2(ql4_printk(KERN_INFO, ha, "Minimum num of entries = %d\n",
+ md_hdr->num_of_entries));
+ DEBUG2(ql4_printk(KERN_INFO, ha, "Dump template size = %d\n",
+ ha->fw_dump_tmplt_size));
+ DEBUG2(ql4_printk(KERN_INFO, ha, "Selected Capture mask =0x%x\n",
+ ha->fw_dump_capture_mask));
+
+ /* Calculate fw_dump_size */
+ for (hdr_entry_bit = 0x2, k = 1; (hdr_entry_bit & 0xFF);
+ hdr_entry_bit <<= 1, k++) {
+ if (hdr_entry_bit & ha->fw_dump_capture_mask)
+ ha->fw_dump_size += md_hdr->capture_size_array[k];
+ }
+
+ /* Total firmware dump size including command header */
+ ha->fw_dump_size += ha->fw_dump_tmplt_size;
+ ha->fw_dump = vmalloc(ha->fw_dump_size);
+ if (!ha->fw_dump)
+ goto alloc_cleanup;
+
+ DEBUG2(ql4_printk(KERN_INFO, ha,
+ "Minidump Tempalate Size = 0x%x KB\n",
+ ha->fw_dump_tmplt_size));
+ DEBUG2(ql4_printk(KERN_INFO, ha,
+ "Total Minidump size = 0x%x KB\n", ha->fw_dump_size));
+
+ memcpy(ha->fw_dump, md_tmp, ha->fw_dump_tmplt_size);
+ ha->fw_dump_tmplt_hdr = ha->fw_dump;
+
+alloc_cleanup:
+ dma_free_coherent(&ha->pdev->dev, ha->fw_dump_tmplt_size,
+ md_tmp, md_tmp_dma);
+}
+
+static int qla4xxx_fw_ready(struct scsi_qla_host *ha)
+{
+ uint32_t timeout_count;
+ int ready = 0;
+
+ DEBUG2(ql4_printk(KERN_INFO, ha, "Waiting for Firmware Ready..\n"));
+ for (timeout_count = ADAPTER_INIT_TOV; timeout_count > 0;
+ timeout_count--) {
+ if (test_and_clear_bit(DPC_GET_DHCP_IP_ADDR, &ha->dpc_flags))
+ qla4xxx_get_dhcp_ip_address(ha);
+
+ /* Get firmware state. */
+ if (qla4xxx_get_firmware_state(ha) != QLA_SUCCESS) {
+ DEBUG2(printk("scsi%ld: %s: unable to get firmware "
+ "state\n", ha->host_no, __func__));
+ break;
+ }
+
+ if (ha->firmware_state & FW_STATE_ERROR) {
+ DEBUG2(printk("scsi%ld: %s: an unrecoverable error has"
+ " occurred\n", ha->host_no, __func__));
+ break;
+
+ }
+ if (ha->firmware_state & FW_STATE_CONFIG_WAIT) {
+ /*
+ * The firmware has not yet been issued an Initialize
+ * Firmware command, so issue it now.
+ */
+ if (qla4xxx_initialize_fw_cb(ha) == QLA_ERROR)
+ break;
+
+ /* Go back and test for ready state - no wait. */
+ continue;
+ }
+
+ if (ha->firmware_state & FW_STATE_WAIT_AUTOCONNECT) {
+ DEBUG2(printk(KERN_INFO "scsi%ld: %s: fwstate:"
+ "AUTOCONNECT in progress\n",
+ ha->host_no, __func__));
+ }
+
+ if (ha->firmware_state & FW_STATE_CONFIGURING_IP) {
+ DEBUG2(printk(KERN_INFO "scsi%ld: %s: fwstate:"
+ " CONFIGURING IP\n",
+ ha->host_no, __func__));
+ /*
+ * Check for link state after 15 secs and if link is
+ * still DOWN then, cable is unplugged. Ignore "DHCP
+ * in Progress/CONFIGURING IP" bit to check if firmware
+ * is in ready state or not after 15 secs.
+ * This is applicable for both 2.x & 3.x firmware
+ */
+ if (timeout_count <= (ADAPTER_INIT_TOV - 15)) {
+ if (ha->addl_fw_state & FW_ADDSTATE_LINK_UP) {
+ DEBUG2(printk(KERN_INFO "scsi%ld: %s:"
+ " LINK UP (Cable plugged)\n",
+ ha->host_no, __func__));
+ } else if (ha->firmware_state &
+ (FW_STATE_CONFIGURING_IP |
+ FW_STATE_READY)) {
+ DEBUG2(printk(KERN_INFO "scsi%ld: %s: "
+ "LINK DOWN (Cable unplugged)\n",
+ ha->host_no, __func__));
+ ha->firmware_state = FW_STATE_READY;
+ }
+ }
+ }
+
+ if (ha->firmware_state == FW_STATE_READY) {
+ /* If DHCP IP Addr is available, retrieve it now. */
+ if (test_and_clear_bit(DPC_GET_DHCP_IP_ADDR,
+ &ha->dpc_flags))
+ qla4xxx_get_dhcp_ip_address(ha);
+
+ if (!qla4xxx_wait_for_ip_config(ha) ||
+ timeout_count == 1) {
+ DEBUG2(ql4_printk(KERN_INFO, ha,
+ "Firmware Ready..\n"));
+ /* The firmware is ready to process SCSI
+ commands. */
+ DEBUG2(ql4_printk(KERN_INFO, ha,
+ "scsi%ld: %s: MEDIA TYPE"
+ " - %s\n", ha->host_no,
+ __func__, (ha->addl_fw_state &
+ FW_ADDSTATE_OPTICAL_MEDIA)
+ != 0 ? "OPTICAL" : "COPPER"));
+ DEBUG2(ql4_printk(KERN_INFO, ha,
+ "scsi%ld: %s: DHCPv4 STATE"
+ " Enabled %s\n", ha->host_no,
+ __func__, (ha->addl_fw_state &
+ FW_ADDSTATE_DHCPv4_ENABLED) != 0 ?
+ "YES" : "NO"));
+ DEBUG2(ql4_printk(KERN_INFO, ha,
+ "scsi%ld: %s: LINK %s\n",
+ ha->host_no, __func__,
+ (ha->addl_fw_state &
+ FW_ADDSTATE_LINK_UP) != 0 ?
+ "UP" : "DOWN"));
+ DEBUG2(ql4_printk(KERN_INFO, ha,
+ "scsi%ld: %s: iSNS Service "
+ "Started %s\n",
+ ha->host_no, __func__,
+ (ha->addl_fw_state &
+ FW_ADDSTATE_ISNS_SVC_ENABLED) != 0 ?
+ "YES" : "NO"));
+
+ ready = 1;
+ break;
+ }
+ }
+ DEBUG2(printk("scsi%ld: %s: waiting on fw, state=%x:%x - "
+ "seconds expired= %d\n", ha->host_no, __func__,
+ ha->firmware_state, ha->addl_fw_state,
+ timeout_count));
+ if (is_qla4032(ha) &&
+ !(ha->addl_fw_state & FW_ADDSTATE_LINK_UP) &&
+ (timeout_count < ADAPTER_INIT_TOV - 5)) {
+ break;
+ }
+
+ msleep(1000);
+ } /* end of for */
+
+ if (timeout_count <= 0)
+ DEBUG2(printk("scsi%ld: %s: FW Initialization timed out!\n",
+ ha->host_no, __func__));
+
+ if (ha->firmware_state & FW_STATE_CONFIGURING_IP) {
+ DEBUG2(printk("scsi%ld: %s: FW initialized, but is reporting "
+ "it's waiting to configure an IP address\n",
+ ha->host_no, __func__));
+ ready = 1;
+ } else if (ha->firmware_state & FW_STATE_WAIT_AUTOCONNECT) {
+ DEBUG2(printk("scsi%ld: %s: FW initialized, but "
+ "auto-discovery still in process\n",
+ ha->host_no, __func__));
+ ready = 1;
+ }
+
+ return ready;
+}
+
+/**
+ * qla4xxx_init_firmware - initializes the firmware.
+ * @ha: pointer to host adapter structure.
+ *
+ **/
+static int qla4xxx_init_firmware(struct scsi_qla_host *ha)
+{
+ int status = QLA_ERROR;
+
+ if (is_aer_supported(ha) &&
+ test_bit(AF_PCI_CHANNEL_IO_PERM_FAILURE, &ha->flags))
+ return status;
+
+ /* For 82xx, stop firmware before initializing because if BIOS
+ * has previously initialized firmware, then driver's initialize
+ * firmware will fail. */
+ if (is_qla80XX(ha))
+ qla4_8xxx_stop_firmware(ha);
+
+ ql4_printk(KERN_INFO, ha, "Initializing firmware..\n");
+ if (qla4xxx_initialize_fw_cb(ha) == QLA_ERROR) {
+ DEBUG2(printk("scsi%ld: %s: Failed to initialize firmware "
+ "control block\n", ha->host_no, __func__));
+ return status;
+ }
+
+ if (!qla4xxx_fw_ready(ha))
+ return status;
+
+ if (is_qla80XX(ha) && !test_bit(AF_INIT_DONE, &ha->flags))
+ qla4xxx_alloc_fw_dump(ha);
+
+ return qla4xxx_get_firmware_status(ha);
+}
+
+static void qla4xxx_set_model_info(struct scsi_qla_host *ha)
+{
+ uint16_t board_id_string[8];
+ int i;
+ int size = sizeof(ha->nvram->isp4022.boardIdStr);
+ int offset = offsetof(struct eeprom_data, isp4022.boardIdStr) / 2;
+
+ for (i = 0; i < (size / 2) ; i++) {
+ board_id_string[i] = rd_nvram_word(ha, offset);
+ offset += 1;
+ }
+
+ memcpy(ha->model_name, board_id_string, size);
+}
+
+static int qla4xxx_config_nvram(struct scsi_qla_host *ha)
+{
+ unsigned long flags;
+ union external_hw_config_reg extHwConfig;
+
+ DEBUG2(printk("scsi%ld: %s: Get EEProm parameters \n", ha->host_no,
+ __func__));
+ if (ql4xxx_lock_flash(ha) != QLA_SUCCESS)
+ return QLA_ERROR;
+ if (ql4xxx_lock_nvram(ha) != QLA_SUCCESS) {
+ ql4xxx_unlock_flash(ha);
+ return QLA_ERROR;
+ }
+
+ /* Get EEPRom Parameters from NVRAM and validate */
+ ql4_printk(KERN_INFO, ha, "Configuring NVRAM ...\n");
+ if (qla4xxx_is_nvram_configuration_valid(ha) == QLA_SUCCESS) {
+ spin_lock_irqsave(&ha->hardware_lock, flags);
+ extHwConfig.Asuint32_t =
+ rd_nvram_word(ha, eeprom_ext_hw_conf_offset(ha));
+ spin_unlock_irqrestore(&ha->hardware_lock, flags);
+ } else {
+ ql4_printk(KERN_WARNING, ha,
+ "scsi%ld: %s: EEProm checksum invalid. "
+ "Please update your EEPROM\n", ha->host_no,
+ __func__);
+
+ /* Attempt to set defaults */
+ if (is_qla4010(ha))
+ extHwConfig.Asuint32_t = 0x1912;
+ else if (is_qla4022(ha) | is_qla4032(ha))
+ extHwConfig.Asuint32_t = 0x0023;
+ else
+ return QLA_ERROR;
+ }
+
+ if (is_qla4022(ha) || is_qla4032(ha))
+ qla4xxx_set_model_info(ha);
+ else
+ strcpy(ha->model_name, "QLA4010");
+
+ DEBUG(printk("scsi%ld: %s: Setting extHwConfig to 0xFFFF%04x\n",
+ ha->host_no, __func__, extHwConfig.Asuint32_t));
+
+ spin_lock_irqsave(&ha->hardware_lock, flags);
+ writel((0xFFFF << 16) | extHwConfig.Asuint32_t, isp_ext_hw_conf(ha));
+ readl(isp_ext_hw_conf(ha));
+ spin_unlock_irqrestore(&ha->hardware_lock, flags);
+
+ ql4xxx_unlock_nvram(ha);
+ ql4xxx_unlock_flash(ha);
+
+ return QLA_SUCCESS;
+}
+
+/**
+ * qla4_8xxx_pci_config() - Setup ISP82xx PCI configuration registers.
+ * @ha: HA context
+ */
+void qla4_8xxx_pci_config(struct scsi_qla_host *ha)
+{
+ pci_set_master(ha->pdev);
+}
+
+void qla4xxx_pci_config(struct scsi_qla_host *ha)
+{
+ uint16_t w;
+ int status;
+
+ ql4_printk(KERN_INFO, ha, "Configuring PCI space...\n");
+
+ pci_set_master(ha->pdev);
+ status = pci_set_mwi(ha->pdev);
+ /*
+ * We want to respect framework's setting of PCI configuration space
+ * command register and also want to make sure that all bits of
+ * interest to us are properly set in command register.
+ */
+ pci_read_config_word(ha->pdev, PCI_COMMAND, &w);
+ w |= PCI_COMMAND_PARITY | PCI_COMMAND_SERR;
+ w &= ~PCI_COMMAND_INTX_DISABLE;
+ pci_write_config_word(ha->pdev, PCI_COMMAND, w);
+}
+
+static int qla4xxx_start_firmware_from_flash(struct scsi_qla_host *ha)
+{
+ int status = QLA_ERROR;
+ unsigned long max_wait_time;
+ unsigned long flags;
+ uint32_t mbox_status;
+
+ ql4_printk(KERN_INFO, ha, "Starting firmware ...\n");
+
+ /*
+ * Start firmware from flash ROM
+ *
+ * WORKAROUND: Stuff a non-constant value that the firmware can
+ * use as a seed for a random number generator in MB7 prior to
+ * setting BOOT_ENABLE. Fixes problem where the TCP
+ * connections use the same TCP ports after each reboot,
+ * causing some connections to not get re-established.
+ */
+ DEBUG(printk("scsi%d: %s: Start firmware from flash ROM\n",
+ ha->host_no, __func__));
+
+ spin_lock_irqsave(&ha->hardware_lock, flags);
+ writel(jiffies, &ha->reg->mailbox[7]);
+ if (is_qla4022(ha) | is_qla4032(ha))
+ writel(set_rmask(NVR_WRITE_ENABLE),
+ &ha->reg->u1.isp4022.nvram);
+
+ writel(2, &ha->reg->mailbox[6]);
+ readl(&ha->reg->mailbox[6]);
+
+ writel(set_rmask(CSR_BOOT_ENABLE), &ha->reg->ctrl_status);
+ readl(&ha->reg->ctrl_status);
+ spin_unlock_irqrestore(&ha->hardware_lock, flags);
+
+ /* Wait for firmware to come UP. */
+ DEBUG2(printk(KERN_INFO "scsi%ld: %s: Wait up to %d seconds for "
+ "boot firmware to complete...\n",
+ ha->host_no, __func__, FIRMWARE_UP_TOV));
+ max_wait_time = jiffies + (FIRMWARE_UP_TOV * HZ);
+ do {
+ uint32_t ctrl_status;
+
+ spin_lock_irqsave(&ha->hardware_lock, flags);
+ ctrl_status = readw(&ha->reg->ctrl_status);
+ mbox_status = readw(&ha->reg->mailbox[0]);
+ spin_unlock_irqrestore(&ha->hardware_lock, flags);
+
+ if (ctrl_status & set_rmask(CSR_SCSI_PROCESSOR_INTR))
+ break;
+ if (mbox_status == MBOX_STS_COMMAND_COMPLETE)
+ break;
+
+ DEBUG2(printk(KERN_INFO "scsi%ld: %s: Waiting for boot "
+ "firmware to complete... ctrl_sts=0x%x, remaining=%ld\n",
+ ha->host_no, __func__, ctrl_status, max_wait_time));
+
+ msleep_interruptible(250);
+ } while (!time_after_eq(jiffies, max_wait_time));
+
+ if (mbox_status == MBOX_STS_COMMAND_COMPLETE) {
+ DEBUG(printk(KERN_INFO "scsi%ld: %s: Firmware has started\n",
+ ha->host_no, __func__));
+
+ spin_lock_irqsave(&ha->hardware_lock, flags);
+ writel(set_rmask(CSR_SCSI_PROCESSOR_INTR),
+ &ha->reg->ctrl_status);
+ readl(&ha->reg->ctrl_status);
+ spin_unlock_irqrestore(&ha->hardware_lock, flags);
+
+ status = QLA_SUCCESS;
+ } else {
+ printk(KERN_INFO "scsi%ld: %s: Boot firmware failed "
+ "- mbox status 0x%x\n", ha->host_no, __func__,
+ mbox_status);
+ status = QLA_ERROR;
+ }
+ return status;
+}
+
+int ql4xxx_lock_drvr_wait(struct scsi_qla_host *a)
+{
+#define QL4_LOCK_DRVR_WAIT 60
+#define QL4_LOCK_DRVR_SLEEP 1
+
+ int drvr_wait = QL4_LOCK_DRVR_WAIT;
+ while (drvr_wait) {
+ if (ql4xxx_lock_drvr(a) == 0) {
+ ssleep(QL4_LOCK_DRVR_SLEEP);
+ if (drvr_wait) {
+ DEBUG2(printk("scsi%ld: %s: Waiting for "
+ "Global Init Semaphore(%d)...\n",
+ a->host_no,
+ __func__, drvr_wait));
+ }
+ drvr_wait -= QL4_LOCK_DRVR_SLEEP;
+ } else {
+ DEBUG2(printk("scsi%ld: %s: Global Init Semaphore "
+ "acquired\n", a->host_no, __func__));
+ return QLA_SUCCESS;
+ }
+ }
+ return QLA_ERROR;
+}
+
+/**
+ * qla4xxx_start_firmware - starts qla4xxx firmware
+ * @ha: Pointer to host adapter structure.
+ *
+ * This routine performs the necessary steps to start the firmware for
+ * the QLA4010 adapter.
+ **/
+int qla4xxx_start_firmware(struct scsi_qla_host *ha)
+{
+ unsigned long flags = 0;
+ uint32_t mbox_status;
+ int status = QLA_ERROR;
+ int soft_reset = 1;
+ int config_chip = 0;
+
+ if (is_qla4022(ha) | is_qla4032(ha))
+ ql4xxx_set_mac_number(ha);
+
+ if (ql4xxx_lock_drvr_wait(ha) != QLA_SUCCESS)
+ return QLA_ERROR;
+
+ spin_lock_irqsave(&ha->hardware_lock, flags);
+
+ DEBUG2(printk("scsi%ld: %s: port_ctrl = 0x%08X\n", ha->host_no,
+ __func__, readw(isp_port_ctrl(ha))));
+ DEBUG(printk("scsi%ld: %s: port_status = 0x%08X\n", ha->host_no,
+ __func__, readw(isp_port_status(ha))));
+
+ /* Is Hardware already initialized? */
+ if ((readw(isp_port_ctrl(ha)) & 0x8000) != 0) {
+ DEBUG(printk("scsi%ld: %s: Hardware has already been "
+ "initialized\n", ha->host_no, __func__));
+
+ /* Receive firmware boot acknowledgement */
+ mbox_status = readw(&ha->reg->mailbox[0]);
+
+ DEBUG2(printk("scsi%ld: %s: H/W Config complete - mbox[0]= "
+ "0x%x\n", ha->host_no, __func__, mbox_status));
+
+ /* Is firmware already booted? */
+ if (mbox_status == 0) {
+ /* F/W not running, must be config by net driver */
+ config_chip = 1;
+ soft_reset = 0;
+ } else {
+ writel(set_rmask(CSR_SCSI_PROCESSOR_INTR),
+ &ha->reg->ctrl_status);
+ readl(&ha->reg->ctrl_status);
+ writel(set_rmask(CSR_SCSI_COMPLETION_INTR),
+ &ha->reg->ctrl_status);
+ readl(&ha->reg->ctrl_status);
+ spin_unlock_irqrestore(&ha->hardware_lock, flags);
+ if (qla4xxx_get_firmware_state(ha) == QLA_SUCCESS) {
+ DEBUG2(printk("scsi%ld: %s: Get firmware "
+ "state -- state = 0x%x\n",
+ ha->host_no,
+ __func__, ha->firmware_state));
+ /* F/W is running */
+ if (ha->firmware_state &
+ FW_STATE_CONFIG_WAIT) {
+ DEBUG2(printk("scsi%ld: %s: Firmware "
+ "in known state -- "
+ "config and "
+ "boot, state = 0x%x\n",
+ ha->host_no, __func__,
+ ha->firmware_state));
+ config_chip = 1;
+ soft_reset = 0;
+ }
+ } else {
+ DEBUG2(printk("scsi%ld: %s: Firmware in "
+ "unknown state -- resetting,"
+ " state = "
+ "0x%x\n", ha->host_no, __func__,
+ ha->firmware_state));
+ }
+ spin_lock_irqsave(&ha->hardware_lock, flags);
+ }
+ } else {
+ DEBUG(printk("scsi%ld: %s: H/W initialization hasn't been "
+ "started - resetting\n", ha->host_no, __func__));
+ }
+ spin_unlock_irqrestore(&ha->hardware_lock, flags);
+
+ DEBUG(printk("scsi%ld: %s: Flags soft_rest=%d, config= %d\n ",
+ ha->host_no, __func__, soft_reset, config_chip));
+ if (soft_reset) {
+ DEBUG(printk("scsi%ld: %s: Issue Soft Reset\n", ha->host_no,
+ __func__));
+ status = qla4xxx_soft_reset(ha); /* NOTE: acquires drvr
+ * lock again, but ok */
+ if (status == QLA_ERROR) {
+ DEBUG(printk("scsi%d: %s: Soft Reset failed!\n",
+ ha->host_no, __func__));
+ ql4xxx_unlock_drvr(ha);
+ return QLA_ERROR;
+ }
+ config_chip = 1;
+
+ /* Reset clears the semaphore, so acquire again */
+ if (ql4xxx_lock_drvr_wait(ha) != QLA_SUCCESS)
+ return QLA_ERROR;
+ }
+
+ if (config_chip) {
+ if ((status = qla4xxx_config_nvram(ha)) == QLA_SUCCESS)
+ status = qla4xxx_start_firmware_from_flash(ha);
+ }
+
+ ql4xxx_unlock_drvr(ha);
+ if (status == QLA_SUCCESS) {
+ if (test_and_clear_bit(AF_GET_CRASH_RECORD, &ha->flags))
+ qla4xxx_get_crash_record(ha);
+
+ qla4xxx_init_rings(ha);
+ } else {
+ DEBUG(printk("scsi%ld: %s: Firmware has NOT started\n",
+ ha->host_no, __func__));
+ }
+ return status;
+}
+/**
+ * qla4xxx_free_ddb_index - Free DDBs reserved by firmware
+ * @ha: pointer to adapter structure
+ *
+ * Since firmware is not running in autoconnect mode the DDB indices should
+ * be freed so that when login happens from user space there are free DDB
+ * indices available.
+ **/
+void qla4xxx_free_ddb_index(struct scsi_qla_host *ha)
+{
+ int max_ddbs;
+ int ret;
+ uint32_t idx = 0, next_idx = 0;
+ uint32_t state = 0, conn_err = 0;
+
+ max_ddbs = is_qla40XX(ha) ? MAX_DEV_DB_ENTRIES_40XX :
+ MAX_DEV_DB_ENTRIES;
+
+ for (idx = 0; idx < max_ddbs; idx = next_idx) {
+ ret = qla4xxx_get_fwddb_entry(ha, idx, NULL, 0, NULL,
+ &next_idx, &state, &conn_err,
+ NULL, NULL);
+ if (ret == QLA_ERROR) {
+ next_idx++;
+ continue;
+ }
+ if (state == DDB_DS_NO_CONNECTION_ACTIVE ||
+ state == DDB_DS_SESSION_FAILED) {
+ DEBUG2(ql4_printk(KERN_INFO, ha,
+ "Freeing DDB index = 0x%x\n", idx));
+ ret = qla4xxx_clear_ddb_entry(ha, idx);
+ if (ret == QLA_ERROR)
+ ql4_printk(KERN_ERR, ha,
+ "Unable to clear DDB index = "
+ "0x%x\n", idx);
+ }
+ if (next_idx == 0)
+ break;
+ }
+}
+
+/**
+ * qla4xxx_initialize_adapter - initiailizes hba
+ * @ha: Pointer to host adapter structure.
+ *
+ * This routine parforms all of the steps necessary to initialize the adapter.
+ *
+ **/
+int qla4xxx_initialize_adapter(struct scsi_qla_host *ha, int is_reset)
+{
+ int status = QLA_ERROR;
+
+ ha->eeprom_cmd_data = 0;
+
+ ql4_printk(KERN_INFO, ha, "Configuring PCI space...\n");
+ ha->isp_ops->pci_config(ha);
+
+ ha->isp_ops->disable_intrs(ha);
+
+ /* Initialize the Host adapter request/response queues and firmware */
+ if (ha->isp_ops->start_firmware(ha) == QLA_ERROR)
+ goto exit_init_hba;
+
+ /*
+ * For ISP83XX, mailbox and IOCB interrupts are enabled separately.
+ * Mailbox interrupts must be enabled prior to issuing any mailbox
+ * command in order to prevent the possibility of losing interrupts
+ * while switching from polling to interrupt mode. IOCB interrupts are
+ * enabled via isp_ops->enable_intrs.
+ */
+ if (is_qla8032(ha) || is_qla8042(ha))
+ qla4_83xx_enable_mbox_intrs(ha);
+
+ if (qla4xxx_about_firmware(ha) == QLA_ERROR)
+ goto exit_init_hba;
+
+ if (ha->isp_ops->get_sys_info(ha) == QLA_ERROR)
+ goto exit_init_hba;
+
+ qla4xxx_init_local_data(ha);
+
+ status = qla4xxx_init_firmware(ha);
+ if (status == QLA_ERROR)
+ goto exit_init_hba;
+
+ if (is_reset == RESET_ADAPTER)
+ qla4xxx_build_ddb_list(ha, is_reset);
+
+ set_bit(AF_ONLINE, &ha->flags);
+
+exit_init_hba:
+ DEBUG2(printk("scsi%ld: initialize adapter: %s\n", ha->host_no,
+ status == QLA_ERROR ? "FAILED" : "SUCCEEDED"));
+ return status;
+}
+
+int qla4xxx_ddb_change(struct scsi_qla_host *ha, uint32_t fw_ddb_index,
+ struct ddb_entry *ddb_entry, uint32_t state)
+{
+ uint32_t old_fw_ddb_device_state;
+ int status = QLA_ERROR;
+
+ old_fw_ddb_device_state = ddb_entry->fw_ddb_device_state;
+ DEBUG2(ql4_printk(KERN_INFO, ha,
+ "%s: DDB - old state = 0x%x, new state = 0x%x for "
+ "index [%d]\n", __func__,
+ ddb_entry->fw_ddb_device_state, state, fw_ddb_index));
+
+ ddb_entry->fw_ddb_device_state = state;
+
+ switch (old_fw_ddb_device_state) {
+ case DDB_DS_LOGIN_IN_PROCESS:
+ switch (state) {
+ case DDB_DS_SESSION_ACTIVE:
+ case DDB_DS_DISCOVERY:
+ qla4xxx_update_session_conn_param(ha, ddb_entry);
+ ddb_entry->unblock_sess(ddb_entry->sess);
+ status = QLA_SUCCESS;
+ break;
+ case DDB_DS_SESSION_FAILED:
+ case DDB_DS_NO_CONNECTION_ACTIVE:
+ iscsi_conn_login_event(ddb_entry->conn,
+ ISCSI_CONN_STATE_FREE);
+ status = QLA_SUCCESS;
+ break;
+ }
+ break;
+ case DDB_DS_SESSION_ACTIVE:
+ case DDB_DS_DISCOVERY:
+ switch (state) {
+ case DDB_DS_SESSION_FAILED:
+ /*
+ * iscsi_session failure will cause userspace to
+ * stop the connection which in turn would block the
+ * iscsi_session and start relogin
+ */
+ iscsi_session_failure(ddb_entry->sess->dd_data,
+ ISCSI_ERR_CONN_FAILED);
+ status = QLA_SUCCESS;
+ break;
+ case DDB_DS_NO_CONNECTION_ACTIVE:
+ clear_bit(fw_ddb_index, ha->ddb_idx_map);
+ status = QLA_SUCCESS;
+ break;
+ }
+ break;
+ case DDB_DS_SESSION_FAILED:
+ switch (state) {
+ case DDB_DS_SESSION_ACTIVE:
+ case DDB_DS_DISCOVERY:
+ ddb_entry->unblock_sess(ddb_entry->sess);
+ qla4xxx_update_session_conn_param(ha, ddb_entry);
+ status = QLA_SUCCESS;
+ break;
+ case DDB_DS_SESSION_FAILED:
+ iscsi_session_failure(ddb_entry->sess->dd_data,
+ ISCSI_ERR_CONN_FAILED);
+ status = QLA_SUCCESS;
+ break;
+ }
+ break;
+ default:
+ DEBUG2(ql4_printk(KERN_INFO, ha, "%s: Unknown Event\n",
+ __func__));
+ break;
+ }
+ return status;
+}
+
+void qla4xxx_arm_relogin_timer(struct ddb_entry *ddb_entry)
+{
+ /*
+ * This triggers a relogin. After the relogin_timer
+ * expires, the relogin gets scheduled. We must wait a
+ * minimum amount of time since receiving an 0x8014 AEN
+ * with failed device_state or a logout response before
+ * we can issue another relogin.
+ *
+ * Firmware pads this timeout: (time2wait +1).
+ * Driver retry to login should be longer than F/W.
+ * Otherwise F/W will fail
+ * set_ddb() mbx cmd with 0x4005 since it still
+ * counting down its time2wait.
+ */
+ atomic_set(&ddb_entry->relogin_timer, 0);
+ atomic_set(&ddb_entry->retry_relogin_timer,
+ ddb_entry->default_time2wait + 4);
+
+}
+
+int qla4xxx_flash_ddb_change(struct scsi_qla_host *ha, uint32_t fw_ddb_index,
+ struct ddb_entry *ddb_entry, uint32_t state)
+{
+ uint32_t old_fw_ddb_device_state;
+ int status = QLA_ERROR;
+
+ old_fw_ddb_device_state = ddb_entry->fw_ddb_device_state;
+ DEBUG2(ql4_printk(KERN_INFO, ha,
+ "%s: DDB - old state = 0x%x, new state = 0x%x for "
+ "index [%d]\n", __func__,
+ ddb_entry->fw_ddb_device_state, state, fw_ddb_index));
+
+ ddb_entry->fw_ddb_device_state = state;
+
+ switch (old_fw_ddb_device_state) {
+ case DDB_DS_LOGIN_IN_PROCESS:
+ case DDB_DS_NO_CONNECTION_ACTIVE:
+ switch (state) {
+ case DDB_DS_SESSION_ACTIVE:
+ ddb_entry->unblock_sess(ddb_entry->sess);
+ qla4xxx_update_session_conn_fwddb_param(ha, ddb_entry);
+ status = QLA_SUCCESS;
+ break;
+ case DDB_DS_SESSION_FAILED:
+ iscsi_block_session(ddb_entry->sess);
+ if (!test_bit(DF_RELOGIN, &ddb_entry->flags))
+ qla4xxx_arm_relogin_timer(ddb_entry);
+ status = QLA_SUCCESS;
+ break;
+ }
+ break;
+ case DDB_DS_SESSION_ACTIVE:
+ switch (state) {
+ case DDB_DS_SESSION_FAILED:
+ iscsi_block_session(ddb_entry->sess);
+ if (!test_bit(DF_RELOGIN, &ddb_entry->flags))
+ qla4xxx_arm_relogin_timer(ddb_entry);
+ status = QLA_SUCCESS;
+ break;
+ }
+ break;
+ case DDB_DS_SESSION_FAILED:
+ switch (state) {
+ case DDB_DS_SESSION_ACTIVE:
+ ddb_entry->unblock_sess(ddb_entry->sess);
+ qla4xxx_update_session_conn_fwddb_param(ha, ddb_entry);
+ status = QLA_SUCCESS;
+ break;
+ case DDB_DS_SESSION_FAILED:
+ if (!test_bit(DF_RELOGIN, &ddb_entry->flags))
+ qla4xxx_arm_relogin_timer(ddb_entry);
+ status = QLA_SUCCESS;
+ break;
+ }
+ break;
+ default:
+ DEBUG2(ql4_printk(KERN_INFO, ha, "%s: Unknown Event\n",
+ __func__));
+ break;
+ }
+ return status;
+}
+
+/**
+ * qla4xxx_process_ddb_changed - process ddb state change
+ * @ha - Pointer to host adapter structure.
+ * @fw_ddb_index - Firmware's device database index
+ * @state - Device state
+ *
+ * This routine processes a Decive Database Changed AEN Event.
+ **/
+int qla4xxx_process_ddb_changed(struct scsi_qla_host *ha,
+ uint32_t fw_ddb_index,
+ uint32_t state, uint32_t conn_err)
+{
+ struct ddb_entry *ddb_entry;
+ int status = QLA_ERROR;
+
+ /* check for out of range index */
+ if (fw_ddb_index >= MAX_DDB_ENTRIES)
+ goto exit_ddb_event;
+
+ /* Get the corresponging ddb entry */
+ ddb_entry = qla4xxx_lookup_ddb_by_fw_index(ha, fw_ddb_index);
+ /* Device does not currently exist in our database. */
+ if (ddb_entry == NULL) {
+ ql4_printk(KERN_ERR, ha, "%s: No ddb_entry at FW index [%d]\n",
+ __func__, fw_ddb_index);
+
+ if (state == DDB_DS_NO_CONNECTION_ACTIVE)
+ clear_bit(fw_ddb_index, ha->ddb_idx_map);
+
+ goto exit_ddb_event;
+ }
+
+ ddb_entry->ddb_change(ha, fw_ddb_index, ddb_entry, state);
+
+exit_ddb_event:
+ return status;
+}
+
+/**
+ * qla4xxx_login_flash_ddb - Login to target (DDB)
+ * @cls_session: Pointer to the session to login
+ *
+ * This routine logins to the target.
+ * Issues setddb and conn open mbx
+ **/
+void qla4xxx_login_flash_ddb(struct iscsi_cls_session *cls_session)
+{
+ struct iscsi_session *sess;
+ struct ddb_entry *ddb_entry;
+ struct scsi_qla_host *ha;
+ struct dev_db_entry *fw_ddb_entry = NULL;
+ dma_addr_t fw_ddb_dma;
+ uint32_t mbx_sts = 0;
+ int ret;
+
+ sess = cls_session->dd_data;
+ ddb_entry = sess->dd_data;
+ ha = ddb_entry->ha;
+
+ if (!test_bit(AF_LINK_UP, &ha->flags))
+ return;
+
+ if (ddb_entry->ddb_type != FLASH_DDB) {
+ DEBUG2(ql4_printk(KERN_INFO, ha,
+ "Skipping login to non FLASH DB"));
+ goto exit_login;
+ }
+
+ fw_ddb_entry = dma_pool_alloc(ha->fw_ddb_dma_pool, GFP_KERNEL,
+ &fw_ddb_dma);
+ if (fw_ddb_entry == NULL) {
+ DEBUG2(ql4_printk(KERN_ERR, ha, "Out of memory\n"));
+ goto exit_login;
+ }
+
+ if (ddb_entry->fw_ddb_index == INVALID_ENTRY) {
+ ret = qla4xxx_get_ddb_index(ha, &ddb_entry->fw_ddb_index);
+ if (ret == QLA_ERROR)
+ goto exit_login;
+
+ ha->fw_ddb_index_map[ddb_entry->fw_ddb_index] = ddb_entry;
+ ha->tot_ddbs++;
+ }
+
+ memcpy(fw_ddb_entry, &ddb_entry->fw_ddb_entry,
+ sizeof(struct dev_db_entry));
+ ddb_entry->sess->target_id = ddb_entry->fw_ddb_index;
+
+ ret = qla4xxx_set_ddb_entry(ha, ddb_entry->fw_ddb_index,
+ fw_ddb_dma, &mbx_sts);
+ if (ret == QLA_ERROR) {
+ DEBUG2(ql4_printk(KERN_ERR, ha, "Set DDB failed\n"));
+ goto exit_login;
+ }
+
+ ddb_entry->fw_ddb_device_state = DDB_DS_LOGIN_IN_PROCESS;
+ ret = qla4xxx_conn_open(ha, ddb_entry->fw_ddb_index);
+ if (ret == QLA_ERROR) {
+ ql4_printk(KERN_ERR, ha, "%s: Login failed: %s\n", __func__,
+ sess->targetname);
+ goto exit_login;
+ }
+
+exit_login:
+ if (fw_ddb_entry)
+ dma_pool_free(ha->fw_ddb_dma_pool, fw_ddb_entry, fw_ddb_dma);
+}
+
diff --git a/drivers/scsi/qla4xxx/ql4_inline.h b/drivers/scsi/qla4xxx/ql4_inline.h
new file mode 100644
index 000000000..655b7bb64
--- /dev/null
+++ b/drivers/scsi/qla4xxx/ql4_inline.h
@@ -0,0 +1,96 @@
+/*
+ * QLogic iSCSI HBA Driver
+ * Copyright (c) 2003-2013 QLogic Corporation
+ *
+ * See LICENSE.qla4xxx for copyright and licensing details.
+ */
+
+/*
+ *
+ * qla4xxx_lookup_ddb_by_fw_index
+ * This routine locates a device handle given the firmware device
+ * database index. If device doesn't exist, returns NULL.
+ *
+ * Input:
+ * ha - Pointer to host adapter structure.
+ * fw_ddb_index - Firmware's device database index
+ *
+ * Returns:
+ * Pointer to the corresponding internal device database structure
+ */
+static inline struct ddb_entry *
+qla4xxx_lookup_ddb_by_fw_index(struct scsi_qla_host *ha, uint32_t fw_ddb_index)
+{
+ struct ddb_entry *ddb_entry = NULL;
+
+ if ((fw_ddb_index < MAX_DDB_ENTRIES) &&
+ (ha->fw_ddb_index_map[fw_ddb_index] !=
+ (struct ddb_entry *) INVALID_ENTRY)) {
+ ddb_entry = ha->fw_ddb_index_map[fw_ddb_index];
+ }
+
+ DEBUG3(printk("scsi%d: %s: ddb [%d], ddb_entry = %p\n",
+ ha->host_no, __func__, fw_ddb_index, ddb_entry));
+
+ return ddb_entry;
+}
+
+static inline void
+__qla4xxx_enable_intrs(struct scsi_qla_host *ha)
+{
+ if (is_qla4022(ha) | is_qla4032(ha)) {
+ writel(set_rmask(IMR_SCSI_INTR_ENABLE),
+ &ha->reg->u1.isp4022.intr_mask);
+ readl(&ha->reg->u1.isp4022.intr_mask);
+ } else {
+ writel(set_rmask(CSR_SCSI_INTR_ENABLE), &ha->reg->ctrl_status);
+ readl(&ha->reg->ctrl_status);
+ }
+ set_bit(AF_INTERRUPTS_ON, &ha->flags);
+}
+
+static inline void
+__qla4xxx_disable_intrs(struct scsi_qla_host *ha)
+{
+ if (is_qla4022(ha) | is_qla4032(ha)) {
+ writel(clr_rmask(IMR_SCSI_INTR_ENABLE),
+ &ha->reg->u1.isp4022.intr_mask);
+ readl(&ha->reg->u1.isp4022.intr_mask);
+ } else {
+ writel(clr_rmask(CSR_SCSI_INTR_ENABLE), &ha->reg->ctrl_status);
+ readl(&ha->reg->ctrl_status);
+ }
+ clear_bit(AF_INTERRUPTS_ON, &ha->flags);
+}
+
+static inline void
+qla4xxx_enable_intrs(struct scsi_qla_host *ha)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&ha->hardware_lock, flags);
+ __qla4xxx_enable_intrs(ha);
+ spin_unlock_irqrestore(&ha->hardware_lock, flags);
+}
+
+static inline void
+qla4xxx_disable_intrs(struct scsi_qla_host *ha)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&ha->hardware_lock, flags);
+ __qla4xxx_disable_intrs(ha);
+ spin_unlock_irqrestore(&ha->hardware_lock, flags);
+}
+
+static inline int qla4xxx_get_chap_type(struct ql4_chap_table *chap_entry)
+{
+ int type;
+
+ if (chap_entry->flags & BIT_7)
+ type = LOCAL_CHAP;
+ else
+ type = BIDI_CHAP;
+
+ return type;
+}
diff --git a/drivers/scsi/qla4xxx/ql4_iocb.c b/drivers/scsi/qla4xxx/ql4_iocb.c
new file mode 100644
index 000000000..17222eb49
--- /dev/null
+++ b/drivers/scsi/qla4xxx/ql4_iocb.c
@@ -0,0 +1,542 @@
+/*
+ * QLogic iSCSI HBA Driver
+ * Copyright (c) 2003-2013 QLogic Corporation
+ *
+ * See LICENSE.qla4xxx for copyright and licensing details.
+ */
+
+#include "ql4_def.h"
+#include "ql4_glbl.h"
+#include "ql4_dbg.h"
+#include "ql4_inline.h"
+
+#include <scsi/scsi_tcq.h>
+
+static int
+qla4xxx_space_in_req_ring(struct scsi_qla_host *ha, uint16_t req_cnt)
+{
+ uint16_t cnt;
+
+ /* Calculate number of free request entries. */
+ if ((req_cnt + 2) >= ha->req_q_count) {
+ cnt = (uint16_t) ha->isp_ops->rd_shdw_req_q_out(ha);
+ if (ha->request_in < cnt)
+ ha->req_q_count = cnt - ha->request_in;
+ else
+ ha->req_q_count = REQUEST_QUEUE_DEPTH -
+ (ha->request_in - cnt);
+ }
+
+ /* Check if room for request in request ring. */
+ if ((req_cnt + 2) < ha->req_q_count)
+ return 1;
+ else
+ return 0;
+}
+
+static void qla4xxx_advance_req_ring_ptr(struct scsi_qla_host *ha)
+{
+ /* Advance request queue pointer */
+ if (ha->request_in == (REQUEST_QUEUE_DEPTH - 1)) {
+ ha->request_in = 0;
+ ha->request_ptr = ha->request_ring;
+ } else {
+ ha->request_in++;
+ ha->request_ptr++;
+ }
+}
+
+/**
+ * qla4xxx_get_req_pkt - returns a valid entry in request queue.
+ * @ha: Pointer to host adapter structure.
+ * @queue_entry: Pointer to pointer to queue entry structure
+ *
+ * This routine performs the following tasks:
+ * - returns the current request_in pointer (if queue not full)
+ * - advances the request_in pointer
+ * - checks for queue full
+ **/
+static int qla4xxx_get_req_pkt(struct scsi_qla_host *ha,
+ struct queue_entry **queue_entry)
+{
+ uint16_t req_cnt = 1;
+
+ if (qla4xxx_space_in_req_ring(ha, req_cnt)) {
+ *queue_entry = ha->request_ptr;
+ memset(*queue_entry, 0, sizeof(**queue_entry));
+
+ qla4xxx_advance_req_ring_ptr(ha);
+ ha->req_q_count -= req_cnt;
+ return QLA_SUCCESS;
+ }
+
+ return QLA_ERROR;
+}
+
+/**
+ * qla4xxx_send_marker_iocb - issues marker iocb to HBA
+ * @ha: Pointer to host adapter structure.
+ * @ddb_entry: Pointer to device database entry
+ * @lun: SCSI LUN
+ * @marker_type: marker identifier
+ *
+ * This routine issues a marker IOCB.
+ **/
+int qla4xxx_send_marker_iocb(struct scsi_qla_host *ha,
+ struct ddb_entry *ddb_entry, uint64_t lun, uint16_t mrkr_mod)
+{
+ struct qla4_marker_entry *marker_entry;
+ unsigned long flags = 0;
+ uint8_t status = QLA_SUCCESS;
+
+ /* Acquire hardware specific lock */
+ spin_lock_irqsave(&ha->hardware_lock, flags);
+
+ /* Get pointer to the queue entry for the marker */
+ if (qla4xxx_get_req_pkt(ha, (struct queue_entry **) &marker_entry) !=
+ QLA_SUCCESS) {
+ status = QLA_ERROR;
+ goto exit_send_marker;
+ }
+
+ /* Put the marker in the request queue */
+ marker_entry->hdr.entryType = ET_MARKER;
+ marker_entry->hdr.entryCount = 1;
+ marker_entry->target = cpu_to_le16(ddb_entry->fw_ddb_index);
+ marker_entry->modifier = cpu_to_le16(mrkr_mod);
+ int_to_scsilun(lun, &marker_entry->lun);
+ wmb();
+
+ /* Tell ISP it's got a new I/O request */
+ ha->isp_ops->queue_iocb(ha);
+
+exit_send_marker:
+ spin_unlock_irqrestore(&ha->hardware_lock, flags);
+ return status;
+}
+
+static struct continuation_t1_entry *
+qla4xxx_alloc_cont_entry(struct scsi_qla_host *ha)
+{
+ struct continuation_t1_entry *cont_entry;
+
+ cont_entry = (struct continuation_t1_entry *)ha->request_ptr;
+
+ qla4xxx_advance_req_ring_ptr(ha);
+
+ /* Load packet defaults */
+ cont_entry->hdr.entryType = ET_CONTINUE;
+ cont_entry->hdr.entryCount = 1;
+ cont_entry->hdr.systemDefined = (uint8_t) cpu_to_le16(ha->request_in);
+
+ return cont_entry;
+}
+
+static uint16_t qla4xxx_calc_request_entries(uint16_t dsds)
+{
+ uint16_t iocbs;
+
+ iocbs = 1;
+ if (dsds > COMMAND_SEG) {
+ iocbs += (dsds - COMMAND_SEG) / CONTINUE_SEG;
+ if ((dsds - COMMAND_SEG) % CONTINUE_SEG)
+ iocbs++;
+ }
+ return iocbs;
+}
+
+static void qla4xxx_build_scsi_iocbs(struct srb *srb,
+ struct command_t3_entry *cmd_entry,
+ uint16_t tot_dsds)
+{
+ struct scsi_qla_host *ha;
+ uint16_t avail_dsds;
+ struct data_seg_a64 *cur_dsd;
+ struct scsi_cmnd *cmd;
+ struct scatterlist *sg;
+ int i;
+
+ cmd = srb->cmd;
+ ha = srb->ha;
+
+ if (!scsi_bufflen(cmd) || cmd->sc_data_direction == DMA_NONE) {
+ /* No data being transferred */
+ cmd_entry->ttlByteCnt = __constant_cpu_to_le32(0);
+ return;
+ }
+
+ avail_dsds = COMMAND_SEG;
+ cur_dsd = (struct data_seg_a64 *) & (cmd_entry->dataseg[0]);
+
+ scsi_for_each_sg(cmd, sg, tot_dsds, i) {
+ dma_addr_t sle_dma;
+
+ /* Allocate additional continuation packets? */
+ if (avail_dsds == 0) {
+ struct continuation_t1_entry *cont_entry;
+
+ cont_entry = qla4xxx_alloc_cont_entry(ha);
+ cur_dsd =
+ (struct data_seg_a64 *)
+ &cont_entry->dataseg[0];
+ avail_dsds = CONTINUE_SEG;
+ }
+
+ sle_dma = sg_dma_address(sg);
+ cur_dsd->base.addrLow = cpu_to_le32(LSDW(sle_dma));
+ cur_dsd->base.addrHigh = cpu_to_le32(MSDW(sle_dma));
+ cur_dsd->count = cpu_to_le32(sg_dma_len(sg));
+ avail_dsds--;
+
+ cur_dsd++;
+ }
+}
+
+void qla4_83xx_queue_iocb(struct scsi_qla_host *ha)
+{
+ writel(ha->request_in, &ha->qla4_83xx_reg->req_q_in);
+ readl(&ha->qla4_83xx_reg->req_q_in);
+}
+
+void qla4_83xx_complete_iocb(struct scsi_qla_host *ha)
+{
+ writel(ha->response_out, &ha->qla4_83xx_reg->rsp_q_out);
+ readl(&ha->qla4_83xx_reg->rsp_q_out);
+}
+
+/**
+ * qla4_82xx_queue_iocb - Tell ISP it's got new request(s)
+ * @ha: pointer to host adapter structure.
+ *
+ * This routine notifies the ISP that one or more new request
+ * queue entries have been placed on the request queue.
+ **/
+void qla4_82xx_queue_iocb(struct scsi_qla_host *ha)
+{
+ uint32_t dbval = 0;
+
+ dbval = 0x14 | (ha->func_num << 5);
+ dbval = dbval | (0 << 8) | (ha->request_in << 16);
+
+ qla4_82xx_wr_32(ha, ha->nx_db_wr_ptr, ha->request_in);
+}
+
+/**
+ * qla4_82xx_complete_iocb - Tell ISP we're done with response(s)
+ * @ha: pointer to host adapter structure.
+ *
+ * This routine notifies the ISP that one or more response/completion
+ * queue entries have been processed by the driver.
+ * This also clears the interrupt.
+ **/
+void qla4_82xx_complete_iocb(struct scsi_qla_host *ha)
+{
+ writel(ha->response_out, &ha->qla4_82xx_reg->rsp_q_out);
+ readl(&ha->qla4_82xx_reg->rsp_q_out);
+}
+
+/**
+ * qla4xxx_queue_iocb - Tell ISP it's got new request(s)
+ * @ha: pointer to host adapter structure.
+ *
+ * This routine is notifies the ISP that one or more new request
+ * queue entries have been placed on the request queue.
+ **/
+void qla4xxx_queue_iocb(struct scsi_qla_host *ha)
+{
+ writel(ha->request_in, &ha->reg->req_q_in);
+ readl(&ha->reg->req_q_in);
+}
+
+/**
+ * qla4xxx_complete_iocb - Tell ISP we're done with response(s)
+ * @ha: pointer to host adapter structure.
+ *
+ * This routine is notifies the ISP that one or more response/completion
+ * queue entries have been processed by the driver.
+ * This also clears the interrupt.
+ **/
+void qla4xxx_complete_iocb(struct scsi_qla_host *ha)
+{
+ writel(ha->response_out, &ha->reg->rsp_q_out);
+ readl(&ha->reg->rsp_q_out);
+}
+
+/**
+ * qla4xxx_send_command_to_isp - issues command to HBA
+ * @ha: pointer to host adapter structure.
+ * @srb: pointer to SCSI Request Block to be sent to ISP
+ *
+ * This routine is called by qla4xxx_queuecommand to build an ISP
+ * command and pass it to the ISP for execution.
+ **/
+int qla4xxx_send_command_to_isp(struct scsi_qla_host *ha, struct srb * srb)
+{
+ struct scsi_cmnd *cmd = srb->cmd;
+ struct ddb_entry *ddb_entry;
+ struct command_t3_entry *cmd_entry;
+ int nseg;
+ uint16_t tot_dsds;
+ uint16_t req_cnt;
+ unsigned long flags;
+ uint32_t index;
+
+ /* Get real lun and adapter */
+ ddb_entry = srb->ddb;
+
+ tot_dsds = 0;
+
+ /* Acquire hardware specific lock */
+ spin_lock_irqsave(&ha->hardware_lock, flags);
+
+ index = (uint32_t)cmd->request->tag;
+
+ /*
+ * Check to see if adapter is online before placing request on
+ * request queue. If a reset occurs and a request is in the queue,
+ * the firmware will still attempt to process the request, retrieving
+ * garbage for pointers.
+ */
+ if (!test_bit(AF_ONLINE, &ha->flags)) {
+ DEBUG2(printk("scsi%ld: %s: Adapter OFFLINE! "
+ "Do not issue command.\n",
+ ha->host_no, __func__));
+ goto queuing_error;
+ }
+
+ /* Calculate the number of request entries needed. */
+ nseg = scsi_dma_map(cmd);
+ if (nseg < 0)
+ goto queuing_error;
+ tot_dsds = nseg;
+
+ req_cnt = qla4xxx_calc_request_entries(tot_dsds);
+ if (!qla4xxx_space_in_req_ring(ha, req_cnt))
+ goto queuing_error;
+
+ /* total iocbs active */
+ if ((ha->iocb_cnt + req_cnt) >= ha->iocb_hiwat)
+ goto queuing_error;
+
+ /* Build command packet */
+ cmd_entry = (struct command_t3_entry *) ha->request_ptr;
+ memset(cmd_entry, 0, sizeof(struct command_t3_entry));
+ cmd_entry->hdr.entryType = ET_COMMAND;
+ cmd_entry->handle = cpu_to_le32(index);
+ cmd_entry->target = cpu_to_le16(ddb_entry->fw_ddb_index);
+
+ int_to_scsilun(cmd->device->lun, &cmd_entry->lun);
+ cmd_entry->ttlByteCnt = cpu_to_le32(scsi_bufflen(cmd));
+ memcpy(cmd_entry->cdb, cmd->cmnd, cmd->cmd_len);
+ cmd_entry->dataSegCnt = cpu_to_le16(tot_dsds);
+ cmd_entry->hdr.entryCount = req_cnt;
+
+ /* Set data transfer direction control flags
+ * NOTE: Look at data_direction bits iff there is data to be
+ * transferred, as the data direction bit is sometimed filled
+ * in when there is no data to be transferred */
+ cmd_entry->control_flags = CF_NO_DATA;
+ if (scsi_bufflen(cmd)) {
+ if (cmd->sc_data_direction == DMA_TO_DEVICE)
+ cmd_entry->control_flags = CF_WRITE;
+ else if (cmd->sc_data_direction == DMA_FROM_DEVICE)
+ cmd_entry->control_flags = CF_READ;
+
+ ha->bytes_xfered += scsi_bufflen(cmd);
+ if (ha->bytes_xfered & ~0xFFFFF){
+ ha->total_mbytes_xferred += ha->bytes_xfered >> 20;
+ ha->bytes_xfered &= 0xFFFFF;
+ }
+ }
+
+ /* Set tagged queueing control flags */
+ cmd_entry->control_flags |= CF_SIMPLE_TAG;
+
+ qla4xxx_advance_req_ring_ptr(ha);
+ qla4xxx_build_scsi_iocbs(srb, cmd_entry, tot_dsds);
+ wmb();
+
+ srb->cmd->host_scribble = (unsigned char *)(unsigned long)index;
+
+ /* update counters */
+ srb->state = SRB_ACTIVE_STATE;
+ srb->flags |= SRB_DMA_VALID;
+
+ /* Track IOCB used */
+ ha->iocb_cnt += req_cnt;
+ srb->iocb_cnt = req_cnt;
+ ha->req_q_count -= req_cnt;
+
+ ha->isp_ops->queue_iocb(ha);
+ spin_unlock_irqrestore(&ha->hardware_lock, flags);
+
+ return QLA_SUCCESS;
+
+queuing_error:
+ if (tot_dsds)
+ scsi_dma_unmap(cmd);
+
+ spin_unlock_irqrestore(&ha->hardware_lock, flags);
+
+ return QLA_ERROR;
+}
+
+int qla4xxx_send_passthru0(struct iscsi_task *task)
+{
+ struct passthru0 *passthru_iocb;
+ struct iscsi_session *sess = task->conn->session;
+ struct ddb_entry *ddb_entry = sess->dd_data;
+ struct scsi_qla_host *ha = ddb_entry->ha;
+ struct ql4_task_data *task_data = task->dd_data;
+ uint16_t ctrl_flags = 0;
+ unsigned long flags;
+ int ret = QLA_ERROR;
+
+ spin_lock_irqsave(&ha->hardware_lock, flags);
+ task_data->iocb_req_cnt = 1;
+ /* Put the IOCB on the request queue */
+ if (!qla4xxx_space_in_req_ring(ha, task_data->iocb_req_cnt))
+ goto queuing_error;
+
+ passthru_iocb = (struct passthru0 *) ha->request_ptr;
+
+ memset(passthru_iocb, 0, sizeof(struct passthru0));
+ passthru_iocb->hdr.entryType = ET_PASSTHRU0;
+ passthru_iocb->hdr.systemDefined = SD_ISCSI_PDU;
+ passthru_iocb->hdr.entryCount = task_data->iocb_req_cnt;
+ passthru_iocb->handle = task->itt;
+ passthru_iocb->target = cpu_to_le16(ddb_entry->fw_ddb_index);
+ passthru_iocb->timeout = cpu_to_le16(PT_DEFAULT_TIMEOUT);
+
+ /* Setup the out & in DSDs */
+ if (task_data->req_len) {
+ memcpy((uint8_t *)task_data->req_buffer +
+ sizeof(struct iscsi_hdr), task->data, task->data_count);
+ ctrl_flags |= PT_FLAG_SEND_BUFFER;
+ passthru_iocb->out_dsd.base.addrLow =
+ cpu_to_le32(LSDW(task_data->req_dma));
+ passthru_iocb->out_dsd.base.addrHigh =
+ cpu_to_le32(MSDW(task_data->req_dma));
+ passthru_iocb->out_dsd.count =
+ cpu_to_le32(task->data_count +
+ sizeof(struct iscsi_hdr));
+ }
+ if (task_data->resp_len) {
+ passthru_iocb->in_dsd.base.addrLow =
+ cpu_to_le32(LSDW(task_data->resp_dma));
+ passthru_iocb->in_dsd.base.addrHigh =
+ cpu_to_le32(MSDW(task_data->resp_dma));
+ passthru_iocb->in_dsd.count =
+ cpu_to_le32(task_data->resp_len);
+ }
+
+ ctrl_flags |= (PT_FLAG_ISCSI_PDU | PT_FLAG_WAIT_4_RESPONSE);
+ passthru_iocb->control_flags = cpu_to_le16(ctrl_flags);
+
+ /* Update the request pointer */
+ qla4xxx_advance_req_ring_ptr(ha);
+ wmb();
+
+ /* Track IOCB used */
+ ha->iocb_cnt += task_data->iocb_req_cnt;
+ ha->req_q_count -= task_data->iocb_req_cnt;
+ ha->isp_ops->queue_iocb(ha);
+ ret = QLA_SUCCESS;
+
+queuing_error:
+ spin_unlock_irqrestore(&ha->hardware_lock, flags);
+ return ret;
+}
+
+static struct mrb *qla4xxx_get_new_mrb(struct scsi_qla_host *ha)
+{
+ struct mrb *mrb;
+
+ mrb = kzalloc(sizeof(*mrb), GFP_KERNEL);
+ if (!mrb)
+ return mrb;
+
+ mrb->ha = ha;
+ return mrb;
+}
+
+static int qla4xxx_send_mbox_iocb(struct scsi_qla_host *ha, struct mrb *mrb,
+ uint32_t *in_mbox)
+{
+ int rval = QLA_SUCCESS;
+ uint32_t i;
+ unsigned long flags;
+ uint32_t index = 0;
+
+ /* Acquire hardware specific lock */
+ spin_lock_irqsave(&ha->hardware_lock, flags);
+
+ /* Get pointer to the queue entry for the marker */
+ rval = qla4xxx_get_req_pkt(ha, (struct queue_entry **) &(mrb->mbox));
+ if (rval != QLA_SUCCESS)
+ goto exit_mbox_iocb;
+
+ index = ha->mrb_index;
+ /* get valid mrb index*/
+ for (i = 0; i < MAX_MRB; i++) {
+ index++;
+ if (index == MAX_MRB)
+ index = 1;
+ if (ha->active_mrb_array[index] == NULL) {
+ ha->mrb_index = index;
+ break;
+ }
+ }
+
+ mrb->iocb_cnt = 1;
+ ha->active_mrb_array[index] = mrb;
+ mrb->mbox->handle = index;
+ mrb->mbox->hdr.entryType = ET_MBOX_CMD;
+ mrb->mbox->hdr.entryCount = mrb->iocb_cnt;
+ memcpy(mrb->mbox->in_mbox, in_mbox, 32);
+ mrb->mbox_cmd = in_mbox[0];
+ wmb();
+
+ ha->iocb_cnt += mrb->iocb_cnt;
+ ha->isp_ops->queue_iocb(ha);
+exit_mbox_iocb:
+ spin_unlock_irqrestore(&ha->hardware_lock, flags);
+ return rval;
+}
+
+int qla4xxx_ping_iocb(struct scsi_qla_host *ha, uint32_t options,
+ uint32_t payload_size, uint32_t pid, uint8_t *ipaddr)
+{
+ uint32_t in_mbox[8];
+ struct mrb *mrb = NULL;
+ int rval = QLA_SUCCESS;
+
+ memset(in_mbox, 0, sizeof(in_mbox));
+
+ mrb = qla4xxx_get_new_mrb(ha);
+ if (!mrb) {
+ DEBUG2(ql4_printk(KERN_WARNING, ha, "%s: fail to get new mrb\n",
+ __func__));
+ rval = QLA_ERROR;
+ goto exit_ping;
+ }
+
+ in_mbox[0] = MBOX_CMD_PING;
+ in_mbox[1] = options;
+ memcpy(&in_mbox[2], &ipaddr[0], 4);
+ memcpy(&in_mbox[3], &ipaddr[4], 4);
+ memcpy(&in_mbox[4], &ipaddr[8], 4);
+ memcpy(&in_mbox[5], &ipaddr[12], 4);
+ in_mbox[6] = payload_size;
+
+ mrb->pid = pid;
+ rval = qla4xxx_send_mbox_iocb(ha, mrb, in_mbox);
+
+ if (rval != QLA_SUCCESS)
+ goto exit_ping;
+
+ return rval;
+exit_ping:
+ kfree(mrb);
+ return rval;
+}
diff --git a/drivers/scsi/qla4xxx/ql4_isr.c b/drivers/scsi/qla4xxx/ql4_isr.c
new file mode 100644
index 000000000..4f9c0f2be
--- /dev/null
+++ b/drivers/scsi/qla4xxx/ql4_isr.c
@@ -0,0 +1,1627 @@
+/*
+ * QLogic iSCSI HBA Driver
+ * Copyright (c) 2003-2013 QLogic Corporation
+ *
+ * See LICENSE.qla4xxx for copyright and licensing details.
+ */
+
+#include "ql4_def.h"
+#include "ql4_glbl.h"
+#include "ql4_dbg.h"
+#include "ql4_inline.h"
+
+/**
+ * qla4xxx_copy_sense - copy sense data into cmd sense buffer
+ * @ha: Pointer to host adapter structure.
+ * @sts_entry: Pointer to status entry structure.
+ * @srb: Pointer to srb structure.
+ **/
+static void qla4xxx_copy_sense(struct scsi_qla_host *ha,
+ struct status_entry *sts_entry,
+ struct srb *srb)
+{
+ struct scsi_cmnd *cmd = srb->cmd;
+ uint16_t sense_len;
+
+ memset(cmd->sense_buffer, 0, SCSI_SENSE_BUFFERSIZE);
+ sense_len = le16_to_cpu(sts_entry->senseDataByteCnt);
+ if (sense_len == 0) {
+ DEBUG2(ql4_printk(KERN_INFO, ha, "scsi%ld:%d:%d:%llu: %s:"
+ " sense len 0\n", ha->host_no,
+ cmd->device->channel, cmd->device->id,
+ cmd->device->lun, __func__));
+ ha->status_srb = NULL;
+ return;
+ }
+ /* Save total available sense length,
+ * not to exceed cmd's sense buffer size */
+ sense_len = min_t(uint16_t, sense_len, SCSI_SENSE_BUFFERSIZE);
+ srb->req_sense_ptr = cmd->sense_buffer;
+ srb->req_sense_len = sense_len;
+
+ /* Copy sense from sts_entry pkt */
+ sense_len = min_t(uint16_t, sense_len, IOCB_MAX_SENSEDATA_LEN);
+ memcpy(cmd->sense_buffer, sts_entry->senseData, sense_len);
+
+ DEBUG2(printk(KERN_INFO "scsi%ld:%d:%d:%llu: %s: sense key = %x, "
+ "ASL= %02x, ASC/ASCQ = %02x/%02x\n", ha->host_no,
+ cmd->device->channel, cmd->device->id,
+ cmd->device->lun, __func__,
+ sts_entry->senseData[2] & 0x0f,
+ sts_entry->senseData[7],
+ sts_entry->senseData[12],
+ sts_entry->senseData[13]));
+
+ DEBUG5(qla4xxx_dump_buffer(cmd->sense_buffer, sense_len));
+ srb->flags |= SRB_GOT_SENSE;
+
+ /* Update srb, in case a sts_cont pkt follows */
+ srb->req_sense_ptr += sense_len;
+ srb->req_sense_len -= sense_len;
+ if (srb->req_sense_len != 0)
+ ha->status_srb = srb;
+ else
+ ha->status_srb = NULL;
+}
+
+/**
+ * qla4xxx_status_cont_entry - Process a Status Continuations entry.
+ * @ha: SCSI driver HA context
+ * @sts_cont: Entry pointer
+ *
+ * Extended sense data.
+ */
+static void
+qla4xxx_status_cont_entry(struct scsi_qla_host *ha,
+ struct status_cont_entry *sts_cont)
+{
+ struct srb *srb = ha->status_srb;
+ struct scsi_cmnd *cmd;
+ uint16_t sense_len;
+
+ if (srb == NULL)
+ return;
+
+ cmd = srb->cmd;
+ if (cmd == NULL) {
+ DEBUG2(printk(KERN_INFO "scsi%ld: %s: Cmd already returned "
+ "back to OS srb=%p srb->state:%d\n", ha->host_no,
+ __func__, srb, srb->state));
+ ha->status_srb = NULL;
+ return;
+ }
+
+ /* Copy sense data. */
+ sense_len = min_t(uint16_t, srb->req_sense_len,
+ IOCB_MAX_EXT_SENSEDATA_LEN);
+ memcpy(srb->req_sense_ptr, sts_cont->ext_sense_data, sense_len);
+ DEBUG5(qla4xxx_dump_buffer(srb->req_sense_ptr, sense_len));
+
+ srb->req_sense_ptr += sense_len;
+ srb->req_sense_len -= sense_len;
+
+ /* Place command on done queue. */
+ if (srb->req_sense_len == 0) {
+ kref_put(&srb->srb_ref, qla4xxx_srb_compl);
+ ha->status_srb = NULL;
+ }
+}
+
+/**
+ * qla4xxx_status_entry - processes status IOCBs
+ * @ha: Pointer to host adapter structure.
+ * @sts_entry: Pointer to status entry structure.
+ **/
+static void qla4xxx_status_entry(struct scsi_qla_host *ha,
+ struct status_entry *sts_entry)
+{
+ uint8_t scsi_status;
+ struct scsi_cmnd *cmd;
+ struct srb *srb;
+ struct ddb_entry *ddb_entry;
+ uint32_t residual;
+
+ srb = qla4xxx_del_from_active_array(ha, le32_to_cpu(sts_entry->handle));
+ if (!srb) {
+ ql4_printk(KERN_WARNING, ha, "%s invalid status entry: "
+ "handle=0x%0x, srb=%p\n", __func__,
+ sts_entry->handle, srb);
+ if (is_qla80XX(ha))
+ set_bit(DPC_RESET_HA_FW_CONTEXT, &ha->dpc_flags);
+ else
+ set_bit(DPC_RESET_HA, &ha->dpc_flags);
+ return;
+ }
+
+ cmd = srb->cmd;
+ if (cmd == NULL) {
+ DEBUG2(printk("scsi%ld: %s: Command already returned back to "
+ "OS pkt->handle=%d srb=%p srb->state:%d\n",
+ ha->host_no, __func__, sts_entry->handle,
+ srb, srb->state));
+ ql4_printk(KERN_WARNING, ha, "Command is NULL:"
+ " already returned to OS (srb=%p)\n", srb);
+ return;
+ }
+
+ ddb_entry = srb->ddb;
+ if (ddb_entry == NULL) {
+ cmd->result = DID_NO_CONNECT << 16;
+ goto status_entry_exit;
+ }
+
+ residual = le32_to_cpu(sts_entry->residualByteCnt);
+
+ /* Translate ISP error to a Linux SCSI error. */
+ scsi_status = sts_entry->scsiStatus;
+ switch (sts_entry->completionStatus) {
+ case SCS_COMPLETE:
+
+ if (sts_entry->iscsiFlags & ISCSI_FLAG_RESIDUAL_OVER) {
+ cmd->result = DID_ERROR << 16;
+ break;
+ }
+
+ if (sts_entry->iscsiFlags &ISCSI_FLAG_RESIDUAL_UNDER) {
+ scsi_set_resid(cmd, residual);
+ if (!scsi_status && ((scsi_bufflen(cmd) - residual) <
+ cmd->underflow)) {
+
+ cmd->result = DID_ERROR << 16;
+
+ DEBUG2(printk("scsi%ld:%d:%d:%llu: %s: "
+ "Mid-layer Data underrun0, "
+ "xferlen = 0x%x, "
+ "residual = 0x%x\n", ha->host_no,
+ cmd->device->channel,
+ cmd->device->id,
+ cmd->device->lun, __func__,
+ scsi_bufflen(cmd), residual));
+ break;
+ }
+ }
+
+ cmd->result = DID_OK << 16 | scsi_status;
+
+ if (scsi_status != SCSI_CHECK_CONDITION)
+ break;
+
+ /* Copy Sense Data into sense buffer. */
+ qla4xxx_copy_sense(ha, sts_entry, srb);
+ break;
+
+ case SCS_INCOMPLETE:
+ /* Always set the status to DID_ERROR, since
+ * all conditions result in that status anyway */
+ cmd->result = DID_ERROR << 16;
+ break;
+
+ case SCS_RESET_OCCURRED:
+ DEBUG2(printk("scsi%ld:%d:%d:%llu: %s: Device RESET occurred\n",
+ ha->host_no, cmd->device->channel,
+ cmd->device->id, cmd->device->lun, __func__));
+
+ cmd->result = DID_RESET << 16;
+ break;
+
+ case SCS_ABORTED:
+ DEBUG2(printk("scsi%ld:%d:%d:%llu: %s: Abort occurred\n",
+ ha->host_no, cmd->device->channel,
+ cmd->device->id, cmd->device->lun, __func__));
+
+ cmd->result = DID_RESET << 16;
+ break;
+
+ case SCS_TIMEOUT:
+ DEBUG2(printk(KERN_INFO "scsi%ld:%d:%d:%llu: Timeout\n",
+ ha->host_no, cmd->device->channel,
+ cmd->device->id, cmd->device->lun));
+
+ cmd->result = DID_TRANSPORT_DISRUPTED << 16;
+
+ /*
+ * Mark device missing so that we won't continue to send
+ * I/O to this device. We should get a ddb state change
+ * AEN soon.
+ */
+ if (iscsi_is_session_online(ddb_entry->sess))
+ qla4xxx_mark_device_missing(ddb_entry->sess);
+ break;
+
+ case SCS_DATA_UNDERRUN:
+ case SCS_DATA_OVERRUN:
+ if ((sts_entry->iscsiFlags & ISCSI_FLAG_RESIDUAL_OVER) ||
+ (sts_entry->completionStatus == SCS_DATA_OVERRUN)) {
+ DEBUG2(printk("scsi%ld:%d:%d:%llu: %s: " "Data overrun\n",
+ ha->host_no,
+ cmd->device->channel, cmd->device->id,
+ cmd->device->lun, __func__));
+
+ cmd->result = DID_ERROR << 16;
+ break;
+ }
+
+ scsi_set_resid(cmd, residual);
+
+ if (sts_entry->iscsiFlags & ISCSI_FLAG_RESIDUAL_UNDER) {
+
+ /* Both the firmware and target reported UNDERRUN:
+ *
+ * MID-LAYER UNDERFLOW case:
+ * Some kernels do not properly detect midlayer
+ * underflow, so we manually check it and return
+ * ERROR if the minimum required data was not
+ * received.
+ *
+ * ALL OTHER cases:
+ * Fall thru to check scsi_status
+ */
+ if (!scsi_status && (scsi_bufflen(cmd) - residual) <
+ cmd->underflow) {
+ DEBUG2(ql4_printk(KERN_INFO, ha,
+ "scsi%ld:%d:%d:%llu: %s: Mid-layer Data underrun, xferlen = 0x%x,residual = 0x%x\n",
+ ha->host_no,
+ cmd->device->channel,
+ cmd->device->id,
+ cmd->device->lun, __func__,
+ scsi_bufflen(cmd),
+ residual));
+
+ cmd->result = DID_ERROR << 16;
+ break;
+ }
+
+ } else if (scsi_status != SAM_STAT_TASK_SET_FULL &&
+ scsi_status != SAM_STAT_BUSY) {
+
+ /*
+ * The firmware reports UNDERRUN, but the target does
+ * not report it:
+ *
+ * scsi_status | host_byte device_byte
+ * | (19:16) (7:0)
+ * ============= | ========= ===========
+ * TASK_SET_FULL | DID_OK scsi_status
+ * BUSY | DID_OK scsi_status
+ * ALL OTHERS | DID_ERROR scsi_status
+ *
+ * Note: If scsi_status is task set full or busy,
+ * then this else if would fall thru to check the
+ * scsi_status and return DID_OK.
+ */
+
+ DEBUG2(ql4_printk(KERN_INFO, ha,
+ "scsi%ld:%d:%d:%llu: %s: Dropped frame(s) detected (0x%x of 0x%x bytes).\n",
+ ha->host_no,
+ cmd->device->channel,
+ cmd->device->id,
+ cmd->device->lun, __func__,
+ residual,
+ scsi_bufflen(cmd)));
+
+ cmd->result = DID_ERROR << 16 | scsi_status;
+ goto check_scsi_status;
+ }
+
+ cmd->result = DID_OK << 16 | scsi_status;
+
+check_scsi_status:
+ if (scsi_status == SAM_STAT_CHECK_CONDITION)
+ qla4xxx_copy_sense(ha, sts_entry, srb);
+
+ break;
+
+ case SCS_DEVICE_LOGGED_OUT:
+ case SCS_DEVICE_UNAVAILABLE:
+ DEBUG2(printk(KERN_INFO "scsi%ld:%d:%d:%llu: SCS_DEVICE "
+ "state: 0x%x\n", ha->host_no,
+ cmd->device->channel, cmd->device->id,
+ cmd->device->lun, sts_entry->completionStatus));
+ /*
+ * Mark device missing so that we won't continue to
+ * send I/O to this device. We should get a ddb
+ * state change AEN soon.
+ */
+ if (iscsi_is_session_online(ddb_entry->sess))
+ qla4xxx_mark_device_missing(ddb_entry->sess);
+
+ cmd->result = DID_TRANSPORT_DISRUPTED << 16;
+ break;
+
+ case SCS_QUEUE_FULL:
+ /*
+ * SCSI Mid-Layer handles device queue full
+ */
+ cmd->result = DID_OK << 16 | sts_entry->scsiStatus;
+ DEBUG2(printk("scsi%ld:%d:%llu: %s: QUEUE FULL detected "
+ "compl=%02x, scsi=%02x, state=%02x, iFlags=%02x,"
+ " iResp=%02x\n", ha->host_no, cmd->device->id,
+ cmd->device->lun, __func__,
+ sts_entry->completionStatus,
+ sts_entry->scsiStatus, sts_entry->state_flags,
+ sts_entry->iscsiFlags,
+ sts_entry->iscsiResponse));
+ break;
+
+ default:
+ cmd->result = DID_ERROR << 16;
+ break;
+ }
+
+status_entry_exit:
+
+ /* complete the request, if not waiting for status_continuation pkt */
+ srb->cc_stat = sts_entry->completionStatus;
+ if (ha->status_srb == NULL)
+ kref_put(&srb->srb_ref, qla4xxx_srb_compl);
+}
+
+/**
+ * qla4xxx_passthru_status_entry - processes passthru status IOCBs (0x3C)
+ * @ha: Pointer to host adapter structure.
+ * @sts_entry: Pointer to status entry structure.
+ **/
+static void qla4xxx_passthru_status_entry(struct scsi_qla_host *ha,
+ struct passthru_status *sts_entry)
+{
+ struct iscsi_task *task;
+ struct ddb_entry *ddb_entry;
+ struct ql4_task_data *task_data;
+ struct iscsi_cls_conn *cls_conn;
+ struct iscsi_conn *conn;
+ itt_t itt;
+ uint32_t fw_ddb_index;
+
+ itt = sts_entry->handle;
+ fw_ddb_index = le32_to_cpu(sts_entry->target);
+
+ ddb_entry = qla4xxx_lookup_ddb_by_fw_index(ha, fw_ddb_index);
+
+ if (ddb_entry == NULL) {
+ ql4_printk(KERN_ERR, ha, "%s: Invalid target index = 0x%x\n",
+ __func__, sts_entry->target);
+ return;
+ }
+
+ cls_conn = ddb_entry->conn;
+ conn = cls_conn->dd_data;
+ spin_lock(&conn->session->back_lock);
+ task = iscsi_itt_to_task(conn, itt);
+ spin_unlock(&conn->session->back_lock);
+
+ if (task == NULL) {
+ ql4_printk(KERN_ERR, ha, "%s: Task is NULL\n", __func__);
+ return;
+ }
+
+ task_data = task->dd_data;
+ memcpy(&task_data->sts, sts_entry, sizeof(struct passthru_status));
+ ha->iocb_cnt -= task_data->iocb_req_cnt;
+ queue_work(ha->task_wq, &task_data->task_work);
+}
+
+static struct mrb *qla4xxx_del_mrb_from_active_array(struct scsi_qla_host *ha,
+ uint32_t index)
+{
+ struct mrb *mrb = NULL;
+
+ /* validate handle and remove from active array */
+ if (index >= MAX_MRB)
+ return mrb;
+
+ mrb = ha->active_mrb_array[index];
+ ha->active_mrb_array[index] = NULL;
+ if (!mrb)
+ return mrb;
+
+ /* update counters */
+ ha->iocb_cnt -= mrb->iocb_cnt;
+
+ return mrb;
+}
+
+static void qla4xxx_mbox_status_entry(struct scsi_qla_host *ha,
+ struct mbox_status_iocb *mbox_sts_entry)
+{
+ struct mrb *mrb;
+ uint32_t status;
+ uint32_t data_size;
+
+ mrb = qla4xxx_del_mrb_from_active_array(ha,
+ le32_to_cpu(mbox_sts_entry->handle));
+
+ if (mrb == NULL) {
+ ql4_printk(KERN_WARNING, ha, "%s: mrb[%d] is null\n", __func__,
+ mbox_sts_entry->handle);
+ return;
+ }
+
+ switch (mrb->mbox_cmd) {
+ case MBOX_CMD_PING:
+ DEBUG2(ql4_printk(KERN_INFO, ha, "%s: mbox_cmd = 0x%x, "
+ "mbox_sts[0] = 0x%x, mbox_sts[6] = 0x%x\n",
+ __func__, mrb->mbox_cmd,
+ mbox_sts_entry->out_mbox[0],
+ mbox_sts_entry->out_mbox[6]));
+
+ if (mbox_sts_entry->out_mbox[0] == MBOX_STS_COMMAND_COMPLETE)
+ status = ISCSI_PING_SUCCESS;
+ else
+ status = mbox_sts_entry->out_mbox[6];
+
+ data_size = sizeof(mbox_sts_entry->out_mbox);
+
+ qla4xxx_post_ping_evt_work(ha, status, mrb->pid, data_size,
+ (uint8_t *) mbox_sts_entry->out_mbox);
+ break;
+
+ default:
+ DEBUG2(ql4_printk(KERN_WARNING, ha, "%s: invalid mbox_cmd = "
+ "0x%x\n", __func__, mrb->mbox_cmd));
+ }
+
+ kfree(mrb);
+ return;
+}
+
+/**
+ * qla4xxx_process_response_queue - process response queue completions
+ * @ha: Pointer to host adapter structure.
+ *
+ * This routine process response queue completions in interrupt context.
+ * Hardware_lock locked upon entry
+ **/
+void qla4xxx_process_response_queue(struct scsi_qla_host *ha)
+{
+ uint32_t count = 0;
+ struct srb *srb = NULL;
+ struct status_entry *sts_entry;
+
+ /* Process all responses from response queue */
+ while ((ha->response_ptr->signature != RESPONSE_PROCESSED)) {
+ sts_entry = (struct status_entry *) ha->response_ptr;
+ count++;
+
+ /* Advance pointers for next entry */
+ if (ha->response_out == (RESPONSE_QUEUE_DEPTH - 1)) {
+ ha->response_out = 0;
+ ha->response_ptr = ha->response_ring;
+ } else {
+ ha->response_out++;
+ ha->response_ptr++;
+ }
+
+ /* process entry */
+ switch (sts_entry->hdr.entryType) {
+ case ET_STATUS:
+ /* Common status */
+ qla4xxx_status_entry(ha, sts_entry);
+ break;
+
+ case ET_PASSTHRU_STATUS:
+ if (sts_entry->hdr.systemDefined == SD_ISCSI_PDU)
+ qla4xxx_passthru_status_entry(ha,
+ (struct passthru_status *)sts_entry);
+ else
+ ql4_printk(KERN_ERR, ha,
+ "%s: Invalid status received\n",
+ __func__);
+
+ break;
+
+ case ET_STATUS_CONTINUATION:
+ qla4xxx_status_cont_entry(ha,
+ (struct status_cont_entry *) sts_entry);
+ break;
+
+ case ET_COMMAND:
+ /* ISP device queue is full. Command not
+ * accepted by ISP. Queue command for
+ * later */
+
+ srb = qla4xxx_del_from_active_array(ha,
+ le32_to_cpu(sts_entry->
+ handle));
+ if (srb == NULL)
+ goto exit_prq_invalid_handle;
+
+ DEBUG2(printk("scsi%ld: %s: FW device queue full, "
+ "srb %p\n", ha->host_no, __func__, srb));
+
+ /* ETRY normally by sending it back with
+ * DID_BUS_BUSY */
+ srb->cmd->result = DID_BUS_BUSY << 16;
+ kref_put(&srb->srb_ref, qla4xxx_srb_compl);
+ break;
+
+ case ET_CONTINUE:
+ /* Just throw away the continuation entries */
+ DEBUG2(printk("scsi%ld: %s: Continuation entry - "
+ "ignoring\n", ha->host_no, __func__));
+ break;
+
+ case ET_MBOX_STATUS:
+ DEBUG2(ql4_printk(KERN_INFO, ha,
+ "%s: mbox status IOCB\n", __func__));
+ qla4xxx_mbox_status_entry(ha,
+ (struct mbox_status_iocb *)sts_entry);
+ break;
+
+ default:
+ /*
+ * Invalid entry in response queue, reset RISC
+ * firmware.
+ */
+ DEBUG2(printk("scsi%ld: %s: Invalid entry %x in "
+ "response queue \n", ha->host_no,
+ __func__,
+ sts_entry->hdr.entryType));
+ goto exit_prq_error;
+ }
+ ((struct response *)sts_entry)->signature = RESPONSE_PROCESSED;
+ wmb();
+ }
+
+ /*
+ * Tell ISP we're done with response(s). This also clears the interrupt.
+ */
+ ha->isp_ops->complete_iocb(ha);
+
+ return;
+
+exit_prq_invalid_handle:
+ DEBUG2(printk("scsi%ld: %s: Invalid handle(srb)=%p type=%x IOCS=%x\n",
+ ha->host_no, __func__, srb, sts_entry->hdr.entryType,
+ sts_entry->completionStatus));
+
+exit_prq_error:
+ ha->isp_ops->complete_iocb(ha);
+ set_bit(DPC_RESET_HA, &ha->dpc_flags);
+}
+
+/**
+ * qla4_83xx_loopback_in_progress: Is loopback in progress?
+ * @ha: Pointer to host adapter structure.
+ * @ret: 1 = loopback in progress, 0 = loopback not in progress
+ **/
+static int qla4_83xx_loopback_in_progress(struct scsi_qla_host *ha)
+{
+ int rval = 1;
+
+ if (is_qla8032(ha) || is_qla8042(ha)) {
+ if ((ha->idc_info.info2 & ENABLE_INTERNAL_LOOPBACK) ||
+ (ha->idc_info.info2 & ENABLE_EXTERNAL_LOOPBACK)) {
+ DEBUG2(ql4_printk(KERN_INFO, ha,
+ "%s: Loopback diagnostics in progress\n",
+ __func__));
+ rval = 1;
+ } else {
+ DEBUG2(ql4_printk(KERN_INFO, ha,
+ "%s: Loopback diagnostics not in progress\n",
+ __func__));
+ rval = 0;
+ }
+ }
+
+ return rval;
+}
+
+static void qla4xxx_update_ipaddr_state(struct scsi_qla_host *ha,
+ uint32_t ipaddr_idx,
+ uint32_t ipaddr_fw_state)
+{
+ uint8_t ipaddr_state;
+ uint8_t ip_idx;
+
+ ip_idx = ipaddr_idx & 0xF;
+ ipaddr_state = qla4xxx_set_ipaddr_state((uint8_t)ipaddr_fw_state);
+
+ switch (ip_idx) {
+ case 0:
+ ha->ip_config.ipv4_addr_state = ipaddr_state;
+ break;
+ case 1:
+ ha->ip_config.ipv6_link_local_state = ipaddr_state;
+ break;
+ case 2:
+ ha->ip_config.ipv6_addr0_state = ipaddr_state;
+ break;
+ case 3:
+ ha->ip_config.ipv6_addr1_state = ipaddr_state;
+ break;
+ default:
+ ql4_printk(KERN_INFO, ha, "%s: Invalid IPADDR index %d\n",
+ __func__, ip_idx);
+ }
+}
+
+static void qla4xxx_default_router_changed(struct scsi_qla_host *ha,
+ uint32_t *mbox_sts)
+{
+ memcpy(&ha->ip_config.ipv6_default_router_addr.s6_addr32[0],
+ &mbox_sts[2], sizeof(uint32_t));
+ memcpy(&ha->ip_config.ipv6_default_router_addr.s6_addr32[1],
+ &mbox_sts[3], sizeof(uint32_t));
+ memcpy(&ha->ip_config.ipv6_default_router_addr.s6_addr32[2],
+ &mbox_sts[4], sizeof(uint32_t));
+ memcpy(&ha->ip_config.ipv6_default_router_addr.s6_addr32[3],
+ &mbox_sts[5], sizeof(uint32_t));
+}
+
+/**
+ * qla4xxx_isr_decode_mailbox - decodes mailbox status
+ * @ha: Pointer to host adapter structure.
+ * @mailbox_status: Mailbox status.
+ *
+ * This routine decodes the mailbox status during the ISR.
+ * Hardware_lock locked upon entry. runs in interrupt context.
+ **/
+static void qla4xxx_isr_decode_mailbox(struct scsi_qla_host * ha,
+ uint32_t mbox_status)
+{
+ int i;
+ uint32_t mbox_sts[MBOX_AEN_REG_COUNT];
+ __le32 __iomem *mailbox_out;
+ uint32_t opcode = 0;
+
+ if (is_qla8032(ha) || is_qla8042(ha))
+ mailbox_out = &ha->qla4_83xx_reg->mailbox_out[0];
+ else if (is_qla8022(ha))
+ mailbox_out = &ha->qla4_82xx_reg->mailbox_out[0];
+ else
+ mailbox_out = &ha->reg->mailbox[0];
+
+ if ((mbox_status == MBOX_STS_BUSY) ||
+ (mbox_status == MBOX_STS_INTERMEDIATE_COMPLETION) ||
+ (mbox_status >> 12 == MBOX_COMPLETION_STATUS)) {
+ ha->mbox_status[0] = mbox_status;
+
+ if (test_bit(AF_MBOX_COMMAND, &ha->flags)) {
+ /*
+ * Copy all mailbox registers to a temporary
+ * location and set mailbox command done flag
+ */
+ for (i = 0; i < ha->mbox_status_count; i++)
+ ha->mbox_status[i] = readl(&mailbox_out[i]);
+
+ set_bit(AF_MBOX_COMMAND_DONE, &ha->flags);
+
+ if (test_bit(AF_MBOX_COMMAND_NOPOLL, &ha->flags))
+ complete(&ha->mbx_intr_comp);
+ }
+ } else if (mbox_status >> 12 == MBOX_ASYNC_EVENT_STATUS) {
+ for (i = 0; i < MBOX_AEN_REG_COUNT; i++)
+ mbox_sts[i] = readl(&mailbox_out[i]);
+
+ /* Immediately process the AENs that don't require much work.
+ * Only queue the database_changed AENs */
+ if (ha->aen_log.count < MAX_AEN_ENTRIES) {
+ for (i = 0; i < MBOX_AEN_REG_COUNT; i++)
+ ha->aen_log.entry[ha->aen_log.count].mbox_sts[i] =
+ mbox_sts[i];
+ ha->aen_log.count++;
+ }
+ switch (mbox_status) {
+ case MBOX_ASTS_SYSTEM_ERROR:
+ /* Log Mailbox registers */
+ ql4_printk(KERN_INFO, ha, "%s: System Err\n", __func__);
+ qla4xxx_dump_registers(ha);
+
+ if ((is_qla8022(ha) && ql4xdontresethba) ||
+ ((is_qla8032(ha) || is_qla8042(ha)) &&
+ qla4_83xx_idc_dontreset(ha))) {
+ DEBUG2(printk("scsi%ld: %s:Don't Reset HBA\n",
+ ha->host_no, __func__));
+ } else {
+ set_bit(AF_GET_CRASH_RECORD, &ha->flags);
+ set_bit(DPC_RESET_HA, &ha->dpc_flags);
+ }
+ break;
+
+ case MBOX_ASTS_REQUEST_TRANSFER_ERROR:
+ case MBOX_ASTS_RESPONSE_TRANSFER_ERROR:
+ case MBOX_ASTS_NVRAM_INVALID:
+ case MBOX_ASTS_IP_ADDRESS_CHANGED:
+ case MBOX_ASTS_DHCP_LEASE_EXPIRED:
+ DEBUG2(printk("scsi%ld: AEN %04x, ERROR Status, "
+ "Reset HA\n", ha->host_no, mbox_status));
+ if (is_qla80XX(ha))
+ set_bit(DPC_RESET_HA_FW_CONTEXT,
+ &ha->dpc_flags);
+ else
+ set_bit(DPC_RESET_HA, &ha->dpc_flags);
+ break;
+
+ case MBOX_ASTS_LINK_UP:
+ set_bit(AF_LINK_UP, &ha->flags);
+ if (test_bit(AF_INIT_DONE, &ha->flags))
+ set_bit(DPC_LINK_CHANGED, &ha->dpc_flags);
+
+ ql4_printk(KERN_INFO, ha, "%s: LINK UP\n", __func__);
+ qla4xxx_post_aen_work(ha, ISCSI_EVENT_LINKUP,
+ sizeof(mbox_sts),
+ (uint8_t *) mbox_sts);
+
+ if ((is_qla8032(ha) || is_qla8042(ha)) &&
+ ha->notify_link_up_comp)
+ complete(&ha->link_up_comp);
+
+ break;
+
+ case MBOX_ASTS_LINK_DOWN:
+ clear_bit(AF_LINK_UP, &ha->flags);
+ if (test_bit(AF_INIT_DONE, &ha->flags)) {
+ set_bit(DPC_LINK_CHANGED, &ha->dpc_flags);
+ qla4xxx_wake_dpc(ha);
+ }
+
+ ql4_printk(KERN_INFO, ha, "%s: LINK DOWN\n", __func__);
+ qla4xxx_post_aen_work(ha, ISCSI_EVENT_LINKDOWN,
+ sizeof(mbox_sts),
+ (uint8_t *) mbox_sts);
+ break;
+
+ case MBOX_ASTS_HEARTBEAT:
+ ha->seconds_since_last_heartbeat = 0;
+ break;
+
+ case MBOX_ASTS_DHCP_LEASE_ACQUIRED:
+ DEBUG2(printk("scsi%ld: AEN %04x DHCP LEASE "
+ "ACQUIRED\n", ha->host_no, mbox_status));
+ set_bit(DPC_GET_DHCP_IP_ADDR, &ha->dpc_flags);
+ break;
+
+ case MBOX_ASTS_PROTOCOL_STATISTIC_ALARM:
+ case MBOX_ASTS_SCSI_COMMAND_PDU_REJECTED: /* Target
+ * mode
+ * only */
+ case MBOX_ASTS_UNSOLICITED_PDU_RECEIVED: /* Connection mode */
+ case MBOX_ASTS_IPSEC_SYSTEM_FATAL_ERROR:
+ case MBOX_ASTS_SUBNET_STATE_CHANGE:
+ case MBOX_ASTS_DUPLICATE_IP:
+ /* No action */
+ DEBUG2(printk("scsi%ld: AEN %04x\n", ha->host_no,
+ mbox_status));
+ break;
+
+ case MBOX_ASTS_IP_ADDR_STATE_CHANGED:
+ printk("scsi%ld: AEN %04x, mbox_sts[2]=%04x, "
+ "mbox_sts[3]=%04x\n", ha->host_no, mbox_sts[0],
+ mbox_sts[2], mbox_sts[3]);
+
+ qla4xxx_update_ipaddr_state(ha, mbox_sts[5],
+ mbox_sts[3]);
+ /* mbox_sts[2] = Old ACB state
+ * mbox_sts[3] = new ACB state */
+ if ((mbox_sts[3] == IP_ADDRSTATE_PREFERRED) &&
+ ((mbox_sts[2] == IP_ADDRSTATE_TENTATIVE) ||
+ (mbox_sts[2] == IP_ADDRSTATE_ACQUIRING))) {
+ set_bit(DPC_GET_DHCP_IP_ADDR, &ha->dpc_flags);
+ } else if ((mbox_sts[3] == IP_ADDRSTATE_ACQUIRING) &&
+ (mbox_sts[2] == IP_ADDRSTATE_PREFERRED)) {
+ if (is_qla80XX(ha))
+ set_bit(DPC_RESET_HA_FW_CONTEXT,
+ &ha->dpc_flags);
+ else
+ set_bit(DPC_RESET_HA, &ha->dpc_flags);
+ } else if (mbox_sts[3] == IP_ADDRSTATE_DISABLING) {
+ ql4_printk(KERN_INFO, ha, "scsi%ld: %s: ACB in disabling state\n",
+ ha->host_no, __func__);
+ } else if (mbox_sts[3] == IP_ADDRSTATE_UNCONFIGURED) {
+ complete(&ha->disable_acb_comp);
+ ql4_printk(KERN_INFO, ha, "scsi%ld: %s: ACB state unconfigured\n",
+ ha->host_no, __func__);
+ }
+ break;
+
+ case MBOX_ASTS_IPV6_LINK_MTU_CHANGE:
+ case MBOX_ASTS_IPV6_AUTO_PREFIX_IGNORED:
+ case MBOX_ASTS_IPV6_ND_LOCAL_PREFIX_IGNORED:
+ /* No action */
+ DEBUG2(ql4_printk(KERN_INFO, ha, "scsi%ld: AEN %04x\n",
+ ha->host_no, mbox_status));
+ break;
+
+ case MBOX_ASTS_ICMPV6_ERROR_MSG_RCVD:
+ DEBUG2(ql4_printk(KERN_INFO, ha,
+ "scsi%ld: AEN %04x, IPv6 ERROR, "
+ "mbox_sts[1]=%08x, mbox_sts[2]=%08x, mbox_sts[3}=%08x, mbox_sts[4]=%08x mbox_sts[5]=%08x\n",
+ ha->host_no, mbox_sts[0], mbox_sts[1],
+ mbox_sts[2], mbox_sts[3], mbox_sts[4],
+ mbox_sts[5]));
+ break;
+
+ case MBOX_ASTS_MAC_ADDRESS_CHANGED:
+ case MBOX_ASTS_DNS:
+ /* No action */
+ DEBUG2(printk(KERN_INFO "scsi%ld: AEN %04x, "
+ "mbox_sts[1]=%04x, mbox_sts[2]=%04x\n",
+ ha->host_no, mbox_sts[0],
+ mbox_sts[1], mbox_sts[2]));
+ break;
+
+ case MBOX_ASTS_SELF_TEST_FAILED:
+ case MBOX_ASTS_LOGIN_FAILED:
+ /* No action */
+ DEBUG2(printk("scsi%ld: AEN %04x, mbox_sts[1]=%04x, "
+ "mbox_sts[2]=%04x, mbox_sts[3]=%04x\n",
+ ha->host_no, mbox_sts[0], mbox_sts[1],
+ mbox_sts[2], mbox_sts[3]));
+ break;
+
+ case MBOX_ASTS_DATABASE_CHANGED:
+ /* Queue AEN information and process it in the DPC
+ * routine */
+ if (ha->aen_q_count > 0) {
+
+ /* decrement available counter */
+ ha->aen_q_count--;
+
+ for (i = 0; i < MBOX_AEN_REG_COUNT; i++)
+ ha->aen_q[ha->aen_in].mbox_sts[i] =
+ mbox_sts[i];
+
+ /* print debug message */
+ DEBUG2(printk("scsi%ld: AEN[%d] %04x queued "
+ "mb1:0x%x mb2:0x%x mb3:0x%x "
+ "mb4:0x%x mb5:0x%x\n",
+ ha->host_no, ha->aen_in,
+ mbox_sts[0], mbox_sts[1],
+ mbox_sts[2], mbox_sts[3],
+ mbox_sts[4], mbox_sts[5]));
+
+ /* advance pointer */
+ ha->aen_in++;
+ if (ha->aen_in == MAX_AEN_ENTRIES)
+ ha->aen_in = 0;
+
+ /* The DPC routine will process the aen */
+ set_bit(DPC_AEN, &ha->dpc_flags);
+ } else {
+ DEBUG2(printk("scsi%ld: %s: aen %04x, queue "
+ "overflowed! AEN LOST!!\n",
+ ha->host_no, __func__,
+ mbox_sts[0]));
+
+ DEBUG2(printk("scsi%ld: DUMP AEN QUEUE\n",
+ ha->host_no));
+
+ for (i = 0; i < MAX_AEN_ENTRIES; i++) {
+ DEBUG2(printk("AEN[%d] %04x %04x %04x "
+ "%04x\n", i, mbox_sts[0],
+ mbox_sts[1], mbox_sts[2],
+ mbox_sts[3]));
+ }
+ }
+ break;
+
+ case MBOX_ASTS_TXSCVR_INSERTED:
+ DEBUG2(printk(KERN_WARNING
+ "scsi%ld: AEN %04x Transceiver"
+ " inserted\n", ha->host_no, mbox_sts[0]));
+ break;
+
+ case MBOX_ASTS_TXSCVR_REMOVED:
+ DEBUG2(printk(KERN_WARNING
+ "scsi%ld: AEN %04x Transceiver"
+ " removed\n", ha->host_no, mbox_sts[0]));
+ break;
+
+ case MBOX_ASTS_IDC_REQUEST_NOTIFICATION:
+ if (is_qla8032(ha) || is_qla8042(ha)) {
+ DEBUG2(ql4_printk(KERN_INFO, ha,
+ "scsi%ld: AEN %04x, mbox_sts[1]=%08x, mbox_sts[2]=%08x, mbox_sts[3]=%08x, mbox_sts[4]=%08x\n",
+ ha->host_no, mbox_sts[0],
+ mbox_sts[1], mbox_sts[2],
+ mbox_sts[3], mbox_sts[4]));
+ opcode = mbox_sts[1] >> 16;
+ if ((opcode == MBOX_CMD_SET_PORT_CONFIG) ||
+ (opcode == MBOX_CMD_PORT_RESET)) {
+ set_bit(DPC_POST_IDC_ACK,
+ &ha->dpc_flags);
+ ha->idc_info.request_desc = mbox_sts[1];
+ ha->idc_info.info1 = mbox_sts[2];
+ ha->idc_info.info2 = mbox_sts[3];
+ ha->idc_info.info3 = mbox_sts[4];
+ qla4xxx_wake_dpc(ha);
+ }
+ }
+ break;
+
+ case MBOX_ASTS_IDC_COMPLETE:
+ if (is_qla8032(ha) || is_qla8042(ha)) {
+ DEBUG2(ql4_printk(KERN_INFO, ha,
+ "scsi%ld: AEN %04x, mbox_sts[1]=%08x, mbox_sts[2]=%08x, mbox_sts[3]=%08x, mbox_sts[4]=%08x\n",
+ ha->host_no, mbox_sts[0],
+ mbox_sts[1], mbox_sts[2],
+ mbox_sts[3], mbox_sts[4]));
+ DEBUG2(ql4_printk(KERN_INFO, ha,
+ "scsi:%ld: AEN %04x IDC Complete notification\n",
+ ha->host_no, mbox_sts[0]));
+
+ opcode = mbox_sts[1] >> 16;
+ if (ha->notify_idc_comp)
+ complete(&ha->idc_comp);
+
+ if ((opcode == MBOX_CMD_SET_PORT_CONFIG) ||
+ (opcode == MBOX_CMD_PORT_RESET))
+ ha->idc_info.info2 = mbox_sts[3];
+
+ if (qla4_83xx_loopback_in_progress(ha)) {
+ set_bit(AF_LOOPBACK, &ha->flags);
+ } else {
+ clear_bit(AF_LOOPBACK, &ha->flags);
+ if (ha->saved_acb)
+ set_bit(DPC_RESTORE_ACB,
+ &ha->dpc_flags);
+ }
+ qla4xxx_wake_dpc(ha);
+ }
+ break;
+
+ case MBOX_ASTS_IPV6_DEFAULT_ROUTER_CHANGED:
+ DEBUG2(ql4_printk(KERN_INFO, ha,
+ "scsi%ld: AEN %04x, mbox_sts[1]=%08x, mbox_sts[2]=%08x, mbox_sts[3]=%08x, mbox_sts[4]=%08x mbox_sts[5]=%08x\n",
+ ha->host_no, mbox_sts[0], mbox_sts[1],
+ mbox_sts[2], mbox_sts[3], mbox_sts[4],
+ mbox_sts[5]));
+ DEBUG2(ql4_printk(KERN_INFO, ha,
+ "scsi%ld: AEN %04x Received IPv6 default router changed notification\n",
+ ha->host_no, mbox_sts[0]));
+ qla4xxx_default_router_changed(ha, mbox_sts);
+ break;
+
+ case MBOX_ASTS_IDC_TIME_EXTEND_NOTIFICATION:
+ DEBUG2(ql4_printk(KERN_INFO, ha,
+ "scsi%ld: AEN %04x, mbox_sts[1]=%08x, mbox_sts[2]=%08x, mbox_sts[3]=%08x, mbox_sts[4]=%08x mbox_sts[5]=%08x\n",
+ ha->host_no, mbox_sts[0], mbox_sts[1],
+ mbox_sts[2], mbox_sts[3], mbox_sts[4],
+ mbox_sts[5]));
+ DEBUG2(ql4_printk(KERN_INFO, ha,
+ "scsi%ld: AEN %04x Received IDC Extend Timeout notification\n",
+ ha->host_no, mbox_sts[0]));
+ /* new IDC timeout */
+ ha->idc_extend_tmo = mbox_sts[1];
+ break;
+
+ case MBOX_ASTS_INITIALIZATION_FAILED:
+ DEBUG2(ql4_printk(KERN_INFO, ha,
+ "scsi%ld: AEN %04x, mbox_sts[3]=%08x\n",
+ ha->host_no, mbox_sts[0],
+ mbox_sts[3]));
+ break;
+
+ case MBOX_ASTS_SYSTEM_WARNING_EVENT:
+ DEBUG2(ql4_printk(KERN_WARNING, ha,
+ "scsi%ld: AEN %04x, mbox_sts[1]=%08x, mbox_sts[2]=%08x, mbox_sts[3]=%08x, mbox_sts[4]=%08x mbox_sts[5]=%08x\n",
+ ha->host_no, mbox_sts[0], mbox_sts[1],
+ mbox_sts[2], mbox_sts[3], mbox_sts[4],
+ mbox_sts[5]));
+ break;
+
+ case MBOX_ASTS_DCBX_CONF_CHANGE:
+ DEBUG2(ql4_printk(KERN_INFO, ha,
+ "scsi%ld: AEN %04x, mbox_sts[1]=%08x, mbox_sts[2]=%08x, mbox_sts[3]=%08x, mbox_sts[4]=%08x mbox_sts[5]=%08x\n",
+ ha->host_no, mbox_sts[0], mbox_sts[1],
+ mbox_sts[2], mbox_sts[3], mbox_sts[4],
+ mbox_sts[5]));
+ DEBUG2(ql4_printk(KERN_INFO, ha,
+ "scsi%ld: AEN %04x Received DCBX configuration changed notification\n",
+ ha->host_no, mbox_sts[0]));
+ break;
+
+ default:
+ DEBUG2(printk(KERN_WARNING
+ "scsi%ld: AEN %04x UNKNOWN\n",
+ ha->host_no, mbox_sts[0]));
+ break;
+ }
+ } else {
+ DEBUG2(printk("scsi%ld: Unknown mailbox status %08X\n",
+ ha->host_no, mbox_status));
+
+ ha->mbox_status[0] = mbox_status;
+ }
+}
+
+void qla4_83xx_interrupt_service_routine(struct scsi_qla_host *ha,
+ uint32_t intr_status)
+{
+ /* Process mailbox/asynch event interrupt.*/
+ if (intr_status) {
+ qla4xxx_isr_decode_mailbox(ha,
+ readl(&ha->qla4_83xx_reg->mailbox_out[0]));
+ /* clear the interrupt */
+ writel(0, &ha->qla4_83xx_reg->risc_intr);
+ } else {
+ qla4xxx_process_response_queue(ha);
+ }
+
+ /* clear the interrupt */
+ writel(0, &ha->qla4_83xx_reg->mb_int_mask);
+}
+
+/**
+ * qla4_82xx_interrupt_service_routine - isr
+ * @ha: pointer to host adapter structure.
+ *
+ * This is the main interrupt service routine.
+ * hardware_lock locked upon entry. runs in interrupt context.
+ **/
+void qla4_82xx_interrupt_service_routine(struct scsi_qla_host *ha,
+ uint32_t intr_status)
+{
+ /* Process response queue interrupt. */
+ if ((intr_status & HSRX_RISC_IOCB_INT) &&
+ test_bit(AF_INIT_DONE, &ha->flags))
+ qla4xxx_process_response_queue(ha);
+
+ /* Process mailbox/asynch event interrupt.*/
+ if (intr_status & HSRX_RISC_MB_INT)
+ qla4xxx_isr_decode_mailbox(ha,
+ readl(&ha->qla4_82xx_reg->mailbox_out[0]));
+
+ /* clear the interrupt */
+ writel(0, &ha->qla4_82xx_reg->host_int);
+ readl(&ha->qla4_82xx_reg->host_int);
+}
+
+/**
+ * qla4xxx_interrupt_service_routine - isr
+ * @ha: pointer to host adapter structure.
+ *
+ * This is the main interrupt service routine.
+ * hardware_lock locked upon entry. runs in interrupt context.
+ **/
+void qla4xxx_interrupt_service_routine(struct scsi_qla_host * ha,
+ uint32_t intr_status)
+{
+ /* Process response queue interrupt. */
+ if (intr_status & CSR_SCSI_COMPLETION_INTR)
+ qla4xxx_process_response_queue(ha);
+
+ /* Process mailbox/asynch event interrupt.*/
+ if (intr_status & CSR_SCSI_PROCESSOR_INTR) {
+ qla4xxx_isr_decode_mailbox(ha,
+ readl(&ha->reg->mailbox[0]));
+
+ /* Clear Mailbox Interrupt */
+ writel(set_rmask(CSR_SCSI_PROCESSOR_INTR),
+ &ha->reg->ctrl_status);
+ readl(&ha->reg->ctrl_status);
+ }
+}
+
+/**
+ * qla4_82xx_spurious_interrupt - processes spurious interrupt
+ * @ha: pointer to host adapter structure.
+ * @reqs_count: .
+ *
+ **/
+static void qla4_82xx_spurious_interrupt(struct scsi_qla_host *ha,
+ uint8_t reqs_count)
+{
+ if (reqs_count)
+ return;
+
+ DEBUG2(ql4_printk(KERN_INFO, ha, "Spurious Interrupt\n"));
+ if (is_qla8022(ha)) {
+ writel(0, &ha->qla4_82xx_reg->host_int);
+ if (test_bit(AF_INTx_ENABLED, &ha->flags))
+ qla4_82xx_wr_32(ha, ha->nx_legacy_intr.tgt_mask_reg,
+ 0xfbff);
+ }
+ ha->spurious_int_count++;
+}
+
+/**
+ * qla4xxx_intr_handler - hardware interrupt handler.
+ * @irq: Unused
+ * @dev_id: Pointer to host adapter structure
+ **/
+irqreturn_t qla4xxx_intr_handler(int irq, void *dev_id)
+{
+ struct scsi_qla_host *ha;
+ uint32_t intr_status;
+ unsigned long flags = 0;
+ uint8_t reqs_count = 0;
+
+ ha = (struct scsi_qla_host *) dev_id;
+ if (!ha) {
+ DEBUG2(printk(KERN_INFO
+ "qla4xxx: Interrupt with NULL host ptr\n"));
+ return IRQ_NONE;
+ }
+
+ spin_lock_irqsave(&ha->hardware_lock, flags);
+
+ ha->isr_count++;
+ /*
+ * Repeatedly service interrupts up to a maximum of
+ * MAX_REQS_SERVICED_PER_INTR
+ */
+ while (1) {
+ /*
+ * Read interrupt status
+ */
+ if (ha->isp_ops->rd_shdw_rsp_q_in(ha) !=
+ ha->response_out)
+ intr_status = CSR_SCSI_COMPLETION_INTR;
+ else
+ intr_status = readl(&ha->reg->ctrl_status);
+
+ if ((intr_status &
+ (CSR_SCSI_RESET_INTR|CSR_FATAL_ERROR|INTR_PENDING)) == 0) {
+ if (reqs_count == 0)
+ ha->spurious_int_count++;
+ break;
+ }
+
+ if (intr_status & CSR_FATAL_ERROR) {
+ DEBUG2(printk(KERN_INFO "scsi%ld: Fatal Error, "
+ "Status 0x%04x\n", ha->host_no,
+ readl(isp_port_error_status (ha))));
+
+ /* Issue Soft Reset to clear this error condition.
+ * This will prevent the RISC from repeatedly
+ * interrupting the driver; thus, allowing the DPC to
+ * get scheduled to continue error recovery.
+ * NOTE: Disabling RISC interrupts does not work in
+ * this case, as CSR_FATAL_ERROR overrides
+ * CSR_SCSI_INTR_ENABLE */
+ if ((readl(&ha->reg->ctrl_status) &
+ CSR_SCSI_RESET_INTR) == 0) {
+ writel(set_rmask(CSR_SOFT_RESET),
+ &ha->reg->ctrl_status);
+ readl(&ha->reg->ctrl_status);
+ }
+
+ writel(set_rmask(CSR_FATAL_ERROR),
+ &ha->reg->ctrl_status);
+ readl(&ha->reg->ctrl_status);
+
+ __qla4xxx_disable_intrs(ha);
+
+ set_bit(DPC_RESET_HA, &ha->dpc_flags);
+
+ break;
+ } else if (intr_status & CSR_SCSI_RESET_INTR) {
+ clear_bit(AF_ONLINE, &ha->flags);
+ __qla4xxx_disable_intrs(ha);
+
+ writel(set_rmask(CSR_SCSI_RESET_INTR),
+ &ha->reg->ctrl_status);
+ readl(&ha->reg->ctrl_status);
+
+ if (!test_bit(AF_HA_REMOVAL, &ha->flags))
+ set_bit(DPC_RESET_HA_INTR, &ha->dpc_flags);
+
+ break;
+ } else if (intr_status & INTR_PENDING) {
+ ha->isp_ops->interrupt_service_routine(ha, intr_status);
+ ha->total_io_count++;
+ if (++reqs_count == MAX_REQS_SERVICED_PER_INTR)
+ break;
+ }
+ }
+
+ spin_unlock_irqrestore(&ha->hardware_lock, flags);
+
+ return IRQ_HANDLED;
+}
+
+/**
+ * qla4_82xx_intr_handler - hardware interrupt handler.
+ * @irq: Unused
+ * @dev_id: Pointer to host adapter structure
+ **/
+irqreturn_t qla4_82xx_intr_handler(int irq, void *dev_id)
+{
+ struct scsi_qla_host *ha = dev_id;
+ uint32_t intr_status;
+ uint32_t status;
+ unsigned long flags = 0;
+ uint8_t reqs_count = 0;
+
+ if (unlikely(pci_channel_offline(ha->pdev)))
+ return IRQ_HANDLED;
+
+ ha->isr_count++;
+ status = qla4_82xx_rd_32(ha, ISR_INT_VECTOR);
+ if (!(status & ha->nx_legacy_intr.int_vec_bit))
+ return IRQ_NONE;
+
+ status = qla4_82xx_rd_32(ha, ISR_INT_STATE_REG);
+ if (!ISR_IS_LEGACY_INTR_TRIGGERED(status)) {
+ DEBUG7(ql4_printk(KERN_INFO, ha,
+ "%s legacy Int not triggered\n", __func__));
+ return IRQ_NONE;
+ }
+
+ /* clear the interrupt */
+ qla4_82xx_wr_32(ha, ha->nx_legacy_intr.tgt_status_reg, 0xffffffff);
+
+ /* read twice to ensure write is flushed */
+ qla4_82xx_rd_32(ha, ISR_INT_VECTOR);
+ qla4_82xx_rd_32(ha, ISR_INT_VECTOR);
+
+ spin_lock_irqsave(&ha->hardware_lock, flags);
+ while (1) {
+ if (!(readl(&ha->qla4_82xx_reg->host_int) &
+ ISRX_82XX_RISC_INT)) {
+ qla4_82xx_spurious_interrupt(ha, reqs_count);
+ break;
+ }
+ intr_status = readl(&ha->qla4_82xx_reg->host_status);
+ if ((intr_status &
+ (HSRX_RISC_MB_INT | HSRX_RISC_IOCB_INT)) == 0) {
+ qla4_82xx_spurious_interrupt(ha, reqs_count);
+ break;
+ }
+
+ ha->isp_ops->interrupt_service_routine(ha, intr_status);
+
+ /* Enable Interrupt */
+ qla4_82xx_wr_32(ha, ha->nx_legacy_intr.tgt_mask_reg, 0xfbff);
+
+ if (++reqs_count == MAX_REQS_SERVICED_PER_INTR)
+ break;
+ }
+
+ spin_unlock_irqrestore(&ha->hardware_lock, flags);
+ return IRQ_HANDLED;
+}
+
+#define LEG_INT_PTR_B31 (1 << 31)
+#define LEG_INT_PTR_B30 (1 << 30)
+#define PF_BITS_MASK (0xF << 16)
+
+/**
+ * qla4_83xx_intr_handler - hardware interrupt handler.
+ * @irq: Unused
+ * @dev_id: Pointer to host adapter structure
+ **/
+irqreturn_t qla4_83xx_intr_handler(int irq, void *dev_id)
+{
+ struct scsi_qla_host *ha = dev_id;
+ uint32_t leg_int_ptr = 0;
+ unsigned long flags = 0;
+
+ ha->isr_count++;
+ leg_int_ptr = readl(&ha->qla4_83xx_reg->leg_int_ptr);
+
+ /* Legacy interrupt is valid if bit31 of leg_int_ptr is set */
+ if (!(leg_int_ptr & LEG_INT_PTR_B31)) {
+ DEBUG7(ql4_printk(KERN_ERR, ha,
+ "%s: Legacy Interrupt Bit 31 not set, spurious interrupt!\n",
+ __func__));
+ return IRQ_NONE;
+ }
+
+ /* Validate the PCIE function ID set in leg_int_ptr bits [19..16] */
+ if ((leg_int_ptr & PF_BITS_MASK) != ha->pf_bit) {
+ DEBUG7(ql4_printk(KERN_ERR, ha,
+ "%s: Incorrect function ID 0x%x in legacy interrupt register, ha->pf_bit = 0x%x\n",
+ __func__, (leg_int_ptr & PF_BITS_MASK),
+ ha->pf_bit));
+ return IRQ_NONE;
+ }
+
+ /* To de-assert legacy interrupt, write 0 to Legacy Interrupt Trigger
+ * Control register and poll till Legacy Interrupt Pointer register
+ * bit30 is 0.
+ */
+ writel(0, &ha->qla4_83xx_reg->leg_int_trig);
+ do {
+ leg_int_ptr = readl(&ha->qla4_83xx_reg->leg_int_ptr);
+ if ((leg_int_ptr & PF_BITS_MASK) != ha->pf_bit)
+ break;
+ } while (leg_int_ptr & LEG_INT_PTR_B30);
+
+ spin_lock_irqsave(&ha->hardware_lock, flags);
+ leg_int_ptr = readl(&ha->qla4_83xx_reg->risc_intr);
+ ha->isp_ops->interrupt_service_routine(ha, leg_int_ptr);
+ spin_unlock_irqrestore(&ha->hardware_lock, flags);
+
+ return IRQ_HANDLED;
+}
+
+irqreturn_t
+qla4_8xxx_msi_handler(int irq, void *dev_id)
+{
+ struct scsi_qla_host *ha;
+
+ ha = (struct scsi_qla_host *) dev_id;
+ if (!ha) {
+ DEBUG2(printk(KERN_INFO
+ "qla4xxx: MSIX: Interrupt with NULL host ptr\n"));
+ return IRQ_NONE;
+ }
+
+ ha->isr_count++;
+ /* clear the interrupt */
+ qla4_82xx_wr_32(ha, ha->nx_legacy_intr.tgt_status_reg, 0xffffffff);
+
+ /* read twice to ensure write is flushed */
+ qla4_82xx_rd_32(ha, ISR_INT_VECTOR);
+ qla4_82xx_rd_32(ha, ISR_INT_VECTOR);
+
+ return qla4_8xxx_default_intr_handler(irq, dev_id);
+}
+
+static irqreturn_t qla4_83xx_mailbox_intr_handler(int irq, void *dev_id)
+{
+ struct scsi_qla_host *ha = dev_id;
+ unsigned long flags;
+ uint32_t ival = 0;
+
+ spin_lock_irqsave(&ha->hardware_lock, flags);
+
+ ival = readl(&ha->qla4_83xx_reg->risc_intr);
+ if (ival == 0) {
+ ql4_printk(KERN_INFO, ha,
+ "%s: It is a spurious mailbox interrupt!\n",
+ __func__);
+ ival = readl(&ha->qla4_83xx_reg->mb_int_mask);
+ ival &= ~INT_MASK_FW_MB;
+ writel(ival, &ha->qla4_83xx_reg->mb_int_mask);
+ goto exit;
+ }
+
+ qla4xxx_isr_decode_mailbox(ha,
+ readl(&ha->qla4_83xx_reg->mailbox_out[0]));
+ writel(0, &ha->qla4_83xx_reg->risc_intr);
+ ival = readl(&ha->qla4_83xx_reg->mb_int_mask);
+ ival &= ~INT_MASK_FW_MB;
+ writel(ival, &ha->qla4_83xx_reg->mb_int_mask);
+ ha->isr_count++;
+exit:
+ spin_unlock_irqrestore(&ha->hardware_lock, flags);
+ return IRQ_HANDLED;
+}
+
+/**
+ * qla4_8xxx_default_intr_handler - hardware interrupt handler.
+ * @irq: Unused
+ * @dev_id: Pointer to host adapter structure
+ *
+ * This interrupt handler is called directly for MSI-X, and
+ * called indirectly for MSI.
+ **/
+irqreturn_t
+qla4_8xxx_default_intr_handler(int irq, void *dev_id)
+{
+ struct scsi_qla_host *ha = dev_id;
+ unsigned long flags;
+ uint32_t intr_status;
+ uint8_t reqs_count = 0;
+
+ if (is_qla8032(ha) || is_qla8042(ha)) {
+ qla4_83xx_mailbox_intr_handler(irq, dev_id);
+ } else {
+ spin_lock_irqsave(&ha->hardware_lock, flags);
+ while (1) {
+ if (!(readl(&ha->qla4_82xx_reg->host_int) &
+ ISRX_82XX_RISC_INT)) {
+ qla4_82xx_spurious_interrupt(ha, reqs_count);
+ break;
+ }
+
+ intr_status = readl(&ha->qla4_82xx_reg->host_status);
+ if ((intr_status &
+ (HSRX_RISC_MB_INT | HSRX_RISC_IOCB_INT)) == 0) {
+ qla4_82xx_spurious_interrupt(ha, reqs_count);
+ break;
+ }
+
+ ha->isp_ops->interrupt_service_routine(ha, intr_status);
+
+ if (++reqs_count == MAX_REQS_SERVICED_PER_INTR)
+ break;
+ }
+ ha->isr_count++;
+ spin_unlock_irqrestore(&ha->hardware_lock, flags);
+ }
+ return IRQ_HANDLED;
+}
+
+irqreturn_t
+qla4_8xxx_msix_rsp_q(int irq, void *dev_id)
+{
+ struct scsi_qla_host *ha = dev_id;
+ unsigned long flags;
+ int intr_status;
+ uint32_t ival = 0;
+
+ spin_lock_irqsave(&ha->hardware_lock, flags);
+ if (is_qla8032(ha) || is_qla8042(ha)) {
+ ival = readl(&ha->qla4_83xx_reg->iocb_int_mask);
+ if (ival == 0) {
+ ql4_printk(KERN_INFO, ha, "%s: It is a spurious iocb interrupt!\n",
+ __func__);
+ goto exit_msix_rsp_q;
+ }
+ qla4xxx_process_response_queue(ha);
+ writel(0, &ha->qla4_83xx_reg->iocb_int_mask);
+ } else {
+ intr_status = readl(&ha->qla4_82xx_reg->host_status);
+ if (intr_status & HSRX_RISC_IOCB_INT) {
+ qla4xxx_process_response_queue(ha);
+ writel(0, &ha->qla4_82xx_reg->host_int);
+ } else {
+ ql4_printk(KERN_INFO, ha, "%s: spurious iocb interrupt...\n",
+ __func__);
+ goto exit_msix_rsp_q;
+ }
+ }
+ ha->isr_count++;
+exit_msix_rsp_q:
+ spin_unlock_irqrestore(&ha->hardware_lock, flags);
+ return IRQ_HANDLED;
+}
+
+/**
+ * qla4xxx_process_aen - processes AENs generated by firmware
+ * @ha: pointer to host adapter structure.
+ * @process_aen: type of AENs to process
+ *
+ * Processes specific types of Asynchronous Events generated by firmware.
+ * The type of AENs to process is specified by process_aen and can be
+ * PROCESS_ALL_AENS 0
+ * FLUSH_DDB_CHANGED_AENS 1
+ * RELOGIN_DDB_CHANGED_AENS 2
+ **/
+void qla4xxx_process_aen(struct scsi_qla_host * ha, uint8_t process_aen)
+{
+ uint32_t mbox_sts[MBOX_AEN_REG_COUNT];
+ struct aen *aen;
+ int i;
+ unsigned long flags;
+
+ spin_lock_irqsave(&ha->hardware_lock, flags);
+ while (ha->aen_out != ha->aen_in) {
+ aen = &ha->aen_q[ha->aen_out];
+ /* copy aen information to local structure */
+ for (i = 0; i < MBOX_AEN_REG_COUNT; i++)
+ mbox_sts[i] = aen->mbox_sts[i];
+
+ ha->aen_q_count++;
+ ha->aen_out++;
+
+ if (ha->aen_out == MAX_AEN_ENTRIES)
+ ha->aen_out = 0;
+
+ spin_unlock_irqrestore(&ha->hardware_lock, flags);
+
+ DEBUG2(printk("qla4xxx(%ld): AEN[%d]=0x%08x, mbx1=0x%08x mbx2=0x%08x"
+ " mbx3=0x%08x mbx4=0x%08x\n", ha->host_no,
+ (ha->aen_out ? (ha->aen_out-1): (MAX_AEN_ENTRIES-1)),
+ mbox_sts[0], mbox_sts[1], mbox_sts[2],
+ mbox_sts[3], mbox_sts[4]));
+
+ switch (mbox_sts[0]) {
+ case MBOX_ASTS_DATABASE_CHANGED:
+ switch (process_aen) {
+ case FLUSH_DDB_CHANGED_AENS:
+ DEBUG2(printk("scsi%ld: AEN[%d] %04x, index "
+ "[%d] state=%04x FLUSHED!\n",
+ ha->host_no, ha->aen_out,
+ mbox_sts[0], mbox_sts[2],
+ mbox_sts[3]));
+ break;
+ case PROCESS_ALL_AENS:
+ default:
+ /* Specific device. */
+ if (mbox_sts[1] == 1)
+ qla4xxx_process_ddb_changed(ha,
+ mbox_sts[2], mbox_sts[3],
+ mbox_sts[4]);
+ break;
+ }
+ }
+ spin_lock_irqsave(&ha->hardware_lock, flags);
+ }
+ spin_unlock_irqrestore(&ha->hardware_lock, flags);
+}
+
+int qla4xxx_request_irqs(struct scsi_qla_host *ha)
+{
+ int ret = 0;
+ int rval = QLA_ERROR;
+
+ if (is_qla40XX(ha))
+ goto try_intx;
+
+ if (ql4xenablemsix == 2) {
+ /* Note: MSI Interrupts not supported for ISP8324 and ISP8042 */
+ if (is_qla8032(ha) || is_qla8042(ha)) {
+ ql4_printk(KERN_INFO, ha, "%s: MSI Interrupts not supported for ISP%04x, Falling back-to INTx mode\n",
+ __func__, ha->pdev->device);
+ goto try_intx;
+ }
+ goto try_msi;
+ }
+
+ if (ql4xenablemsix == 0 || ql4xenablemsix != 1)
+ goto try_intx;
+
+ /* Trying MSI-X */
+ ret = qla4_8xxx_enable_msix(ha);
+ if (!ret) {
+ DEBUG2(ql4_printk(KERN_INFO, ha,
+ "MSI-X: Enabled (0x%X).\n", ha->revision_id));
+ goto irq_attached;
+ } else {
+ if (is_qla8032(ha) || is_qla8042(ha)) {
+ ql4_printk(KERN_INFO, ha, "%s: ISP%04x: MSI-X: Falling back-to INTx mode. ret = %d\n",
+ __func__, ha->pdev->device, ret);
+ goto try_intx;
+ }
+ }
+
+ ql4_printk(KERN_WARNING, ha,
+ "MSI-X: Falling back-to MSI mode -- %d.\n", ret);
+
+try_msi:
+ /* Trying MSI */
+ ret = pci_enable_msi(ha->pdev);
+ if (!ret) {
+ ret = request_irq(ha->pdev->irq, qla4_8xxx_msi_handler,
+ 0, DRIVER_NAME, ha);
+ if (!ret) {
+ DEBUG2(ql4_printk(KERN_INFO, ha, "MSI: Enabled.\n"));
+ set_bit(AF_MSI_ENABLED, &ha->flags);
+ goto irq_attached;
+ } else {
+ ql4_printk(KERN_WARNING, ha,
+ "MSI: Failed to reserve interrupt %d "
+ "already in use.\n", ha->pdev->irq);
+ pci_disable_msi(ha->pdev);
+ }
+ }
+
+try_intx:
+ if (is_qla8022(ha)) {
+ ql4_printk(KERN_WARNING, ha, "%s: ISP82xx Legacy interrupt not supported\n",
+ __func__);
+ goto irq_not_attached;
+ }
+
+ /* Trying INTx */
+ ret = request_irq(ha->pdev->irq, ha->isp_ops->intr_handler,
+ IRQF_SHARED, DRIVER_NAME, ha);
+ if (!ret) {
+ DEBUG2(ql4_printk(KERN_INFO, ha, "INTx: Enabled.\n"));
+ set_bit(AF_INTx_ENABLED, &ha->flags);
+ goto irq_attached;
+
+ } else {
+ ql4_printk(KERN_WARNING, ha,
+ "INTx: Failed to reserve interrupt %d already in"
+ " use.\n", ha->pdev->irq);
+ goto irq_not_attached;
+ }
+
+irq_attached:
+ set_bit(AF_IRQ_ATTACHED, &ha->flags);
+ ha->host->irq = ha->pdev->irq;
+ ql4_printk(KERN_INFO, ha, "%s: irq %d attached\n",
+ __func__, ha->pdev->irq);
+ rval = QLA_SUCCESS;
+irq_not_attached:
+ return rval;
+}
+
+void qla4xxx_free_irqs(struct scsi_qla_host *ha)
+{
+ if (test_and_clear_bit(AF_IRQ_ATTACHED, &ha->flags)) {
+ if (test_bit(AF_MSIX_ENABLED, &ha->flags)) {
+ qla4_8xxx_disable_msix(ha);
+ } else if (test_and_clear_bit(AF_MSI_ENABLED, &ha->flags)) {
+ free_irq(ha->pdev->irq, ha);
+ pci_disable_msi(ha->pdev);
+ } else if (test_and_clear_bit(AF_INTx_ENABLED, &ha->flags)) {
+ free_irq(ha->pdev->irq, ha);
+ }
+ }
+}
diff --git a/drivers/scsi/qla4xxx/ql4_mbx.c b/drivers/scsi/qla4xxx/ql4_mbx.c
new file mode 100644
index 000000000..c291fdff1
--- /dev/null
+++ b/drivers/scsi/qla4xxx/ql4_mbx.c
@@ -0,0 +1,2464 @@
+/*
+ * QLogic iSCSI HBA Driver
+ * Copyright (c) 2003-2013 QLogic Corporation
+ *
+ * See LICENSE.qla4xxx for copyright and licensing details.
+ */
+
+#include <linux/ctype.h>
+#include "ql4_def.h"
+#include "ql4_glbl.h"
+#include "ql4_dbg.h"
+#include "ql4_inline.h"
+#include "ql4_version.h"
+
+void qla4xxx_queue_mbox_cmd(struct scsi_qla_host *ha, uint32_t *mbx_cmd,
+ int in_count)
+{
+ int i;
+
+ /* Load all mailbox registers, except mailbox 0. */
+ for (i = 1; i < in_count; i++)
+ writel(mbx_cmd[i], &ha->reg->mailbox[i]);
+
+ /* Wakeup firmware */
+ writel(mbx_cmd[0], &ha->reg->mailbox[0]);
+ readl(&ha->reg->mailbox[0]);
+ writel(set_rmask(CSR_INTR_RISC), &ha->reg->ctrl_status);
+ readl(&ha->reg->ctrl_status);
+}
+
+void qla4xxx_process_mbox_intr(struct scsi_qla_host *ha, int out_count)
+{
+ int intr_status;
+
+ intr_status = readl(&ha->reg->ctrl_status);
+ if (intr_status & INTR_PENDING) {
+ /*
+ * Service the interrupt.
+ * The ISR will save the mailbox status registers
+ * to a temporary storage location in the adapter structure.
+ */
+ ha->mbox_status_count = out_count;
+ ha->isp_ops->interrupt_service_routine(ha, intr_status);
+ }
+}
+
+/**
+ * qla4xxx_is_intr_poll_mode – Are we allowed to poll for interrupts?
+ * @ha: Pointer to host adapter structure.
+ * @ret: 1=polling mode, 0=non-polling mode
+ **/
+static int qla4xxx_is_intr_poll_mode(struct scsi_qla_host *ha)
+{
+ int rval = 1;
+
+ if (is_qla8032(ha) || is_qla8042(ha)) {
+ if (test_bit(AF_IRQ_ATTACHED, &ha->flags) &&
+ test_bit(AF_83XX_MBOX_INTR_ON, &ha->flags))
+ rval = 0;
+ } else {
+ if (test_bit(AF_IRQ_ATTACHED, &ha->flags) &&
+ test_bit(AF_INTERRUPTS_ON, &ha->flags) &&
+ test_bit(AF_ONLINE, &ha->flags) &&
+ !test_bit(AF_HA_REMOVAL, &ha->flags))
+ rval = 0;
+ }
+
+ return rval;
+}
+
+/**
+ * qla4xxx_mailbox_command - issues mailbox commands
+ * @ha: Pointer to host adapter structure.
+ * @inCount: number of mailbox registers to load.
+ * @outCount: number of mailbox registers to return.
+ * @mbx_cmd: data pointer for mailbox in registers.
+ * @mbx_sts: data pointer for mailbox out registers.
+ *
+ * This routine issue mailbox commands and waits for completion.
+ * If outCount is 0, this routine completes successfully WITHOUT waiting
+ * for the mailbox command to complete.
+ **/
+int qla4xxx_mailbox_command(struct scsi_qla_host *ha, uint8_t inCount,
+ uint8_t outCount, uint32_t *mbx_cmd,
+ uint32_t *mbx_sts)
+{
+ int status = QLA_ERROR;
+ uint8_t i;
+ u_long wait_count;
+ unsigned long flags = 0;
+ uint32_t dev_state;
+
+ /* Make sure that pointers are valid */
+ if (!mbx_cmd || !mbx_sts) {
+ DEBUG2(printk("scsi%ld: %s: Invalid mbx_cmd or mbx_sts "
+ "pointer\n", ha->host_no, __func__));
+ return status;
+ }
+
+ if (is_qla40XX(ha)) {
+ if (test_bit(AF_HA_REMOVAL, &ha->flags)) {
+ DEBUG2(ql4_printk(KERN_WARNING, ha, "scsi%ld: %s: "
+ "prematurely completing mbx cmd as "
+ "adapter removal detected\n",
+ ha->host_no, __func__));
+ return status;
+ }
+ }
+
+ if ((is_aer_supported(ha)) &&
+ (test_bit(AF_PCI_CHANNEL_IO_PERM_FAILURE, &ha->flags))) {
+ DEBUG2(printk(KERN_WARNING "scsi%ld: %s: Perm failure on EEH, "
+ "timeout MBX Exiting.\n", ha->host_no, __func__));
+ return status;
+ }
+
+ /* Mailbox code active */
+ wait_count = MBOX_TOV * 100;
+
+ while (wait_count--) {
+ mutex_lock(&ha->mbox_sem);
+ if (!test_bit(AF_MBOX_COMMAND, &ha->flags)) {
+ set_bit(AF_MBOX_COMMAND, &ha->flags);
+ mutex_unlock(&ha->mbox_sem);
+ break;
+ }
+ mutex_unlock(&ha->mbox_sem);
+ if (!wait_count) {
+ DEBUG2(printk("scsi%ld: %s: mbox_sem failed\n",
+ ha->host_no, __func__));
+ return status;
+ }
+ msleep(10);
+ }
+
+ if (is_qla80XX(ha)) {
+ if (test_bit(AF_FW_RECOVERY, &ha->flags)) {
+ DEBUG2(ql4_printk(KERN_WARNING, ha,
+ "scsi%ld: %s: prematurely completing mbx cmd as firmware recovery detected\n",
+ ha->host_no, __func__));
+ goto mbox_exit;
+ }
+ /* Do not send any mbx cmd if h/w is in failed state*/
+ ha->isp_ops->idc_lock(ha);
+ dev_state = qla4_8xxx_rd_direct(ha, QLA8XXX_CRB_DEV_STATE);
+ ha->isp_ops->idc_unlock(ha);
+ if (dev_state == QLA8XXX_DEV_FAILED) {
+ ql4_printk(KERN_WARNING, ha,
+ "scsi%ld: %s: H/W is in failed state, do not send any mailbox commands\n",
+ ha->host_no, __func__);
+ goto mbox_exit;
+ }
+ }
+
+ spin_lock_irqsave(&ha->hardware_lock, flags);
+
+ ha->mbox_status_count = outCount;
+ for (i = 0; i < outCount; i++)
+ ha->mbox_status[i] = 0;
+
+ /* Queue the mailbox command to the firmware */
+ ha->isp_ops->queue_mailbox_command(ha, mbx_cmd, inCount);
+
+ spin_unlock_irqrestore(&ha->hardware_lock, flags);
+
+ /* Wait for completion */
+
+ /*
+ * If we don't want status, don't wait for the mailbox command to
+ * complete. For example, MBOX_CMD_RESET_FW doesn't return status,
+ * you must poll the inbound Interrupt Mask for completion.
+ */
+ if (outCount == 0) {
+ status = QLA_SUCCESS;
+ goto mbox_exit;
+ }
+
+ /*
+ * Wait for completion: Poll or completion queue
+ */
+ if (qla4xxx_is_intr_poll_mode(ha)) {
+ /* Poll for command to complete */
+ wait_count = jiffies + MBOX_TOV * HZ;
+ while (test_bit(AF_MBOX_COMMAND_DONE, &ha->flags) == 0) {
+ if (time_after_eq(jiffies, wait_count))
+ break;
+ /*
+ * Service the interrupt.
+ * The ISR will save the mailbox status registers
+ * to a temporary storage location in the adapter
+ * structure.
+ */
+ spin_lock_irqsave(&ha->hardware_lock, flags);
+ ha->isp_ops->process_mailbox_interrupt(ha, outCount);
+ spin_unlock_irqrestore(&ha->hardware_lock, flags);
+ msleep(10);
+ }
+ } else {
+ /* Do not poll for completion. Use completion queue */
+ set_bit(AF_MBOX_COMMAND_NOPOLL, &ha->flags);
+ wait_for_completion_timeout(&ha->mbx_intr_comp, MBOX_TOV * HZ);
+ clear_bit(AF_MBOX_COMMAND_NOPOLL, &ha->flags);
+ }
+
+ /* Check for mailbox timeout. */
+ if (!test_bit(AF_MBOX_COMMAND_DONE, &ha->flags)) {
+ if (is_qla80XX(ha) &&
+ test_bit(AF_FW_RECOVERY, &ha->flags)) {
+ DEBUG2(ql4_printk(KERN_INFO, ha,
+ "scsi%ld: %s: prematurely completing mbx cmd as "
+ "firmware recovery detected\n",
+ ha->host_no, __func__));
+ goto mbox_exit;
+ }
+ ql4_printk(KERN_WARNING, ha, "scsi%ld: Mailbox Cmd 0x%08X timed out, Scheduling Adapter Reset\n",
+ ha->host_no, mbx_cmd[0]);
+ ha->mailbox_timeout_count++;
+ mbx_sts[0] = (-1);
+ set_bit(DPC_RESET_HA, &ha->dpc_flags);
+ if (is_qla8022(ha)) {
+ ql4_printk(KERN_INFO, ha,
+ "disabling pause transmit on port 0 & 1.\n");
+ qla4_82xx_wr_32(ha, QLA82XX_CRB_NIU + 0x98,
+ CRB_NIU_XG_PAUSE_CTL_P0 |
+ CRB_NIU_XG_PAUSE_CTL_P1);
+ } else if (is_qla8032(ha) || is_qla8042(ha)) {
+ ql4_printk(KERN_INFO, ha, " %s: disabling pause transmit on port 0 & 1.\n",
+ __func__);
+ qla4_83xx_disable_pause(ha);
+ }
+ goto mbox_exit;
+ }
+
+ /*
+ * Copy the mailbox out registers to the caller's mailbox in/out
+ * structure.
+ */
+ spin_lock_irqsave(&ha->hardware_lock, flags);
+ for (i = 0; i < outCount; i++)
+ mbx_sts[i] = ha->mbox_status[i];
+
+ /* Set return status and error flags (if applicable). */
+ switch (ha->mbox_status[0]) {
+ case MBOX_STS_COMMAND_COMPLETE:
+ status = QLA_SUCCESS;
+ break;
+
+ case MBOX_STS_INTERMEDIATE_COMPLETION:
+ status = QLA_SUCCESS;
+ break;
+
+ case MBOX_STS_BUSY:
+ ql4_printk(KERN_WARNING, ha, "scsi%ld: %s: Cmd = %08X, ISP BUSY\n",
+ ha->host_no, __func__, mbx_cmd[0]);
+ ha->mailbox_timeout_count++;
+ break;
+
+ default:
+ ql4_printk(KERN_WARNING, ha, "scsi%ld: %s: FAILED, MBOX CMD = %08X, MBOX STS = %08X %08X %08X %08X %08X %08X %08X %08X\n",
+ ha->host_no, __func__, mbx_cmd[0], mbx_sts[0],
+ mbx_sts[1], mbx_sts[2], mbx_sts[3], mbx_sts[4],
+ mbx_sts[5], mbx_sts[6], mbx_sts[7]);
+ break;
+ }
+ spin_unlock_irqrestore(&ha->hardware_lock, flags);
+
+mbox_exit:
+ mutex_lock(&ha->mbox_sem);
+ clear_bit(AF_MBOX_COMMAND, &ha->flags);
+ mutex_unlock(&ha->mbox_sem);
+ clear_bit(AF_MBOX_COMMAND_DONE, &ha->flags);
+
+ return status;
+}
+
+/**
+ * qla4xxx_get_minidump_template - Get the firmware template
+ * @ha: Pointer to host adapter structure.
+ * @phys_addr: dma address for template
+ *
+ * Obtain the minidump template from firmware during initialization
+ * as it may not be available when minidump is desired.
+ **/
+int qla4xxx_get_minidump_template(struct scsi_qla_host *ha,
+ dma_addr_t phys_addr)
+{
+ uint32_t mbox_cmd[MBOX_REG_COUNT];
+ uint32_t mbox_sts[MBOX_REG_COUNT];
+ int status;
+
+ memset(&mbox_cmd, 0, sizeof(mbox_cmd));
+ memset(&mbox_sts, 0, sizeof(mbox_sts));
+
+ mbox_cmd[0] = MBOX_CMD_MINIDUMP;
+ mbox_cmd[1] = MINIDUMP_GET_TMPLT_SUBCOMMAND;
+ mbox_cmd[2] = LSDW(phys_addr);
+ mbox_cmd[3] = MSDW(phys_addr);
+ mbox_cmd[4] = ha->fw_dump_tmplt_size;
+ mbox_cmd[5] = 0;
+
+ status = qla4xxx_mailbox_command(ha, MBOX_REG_COUNT, 2, &mbox_cmd[0],
+ &mbox_sts[0]);
+ if (status != QLA_SUCCESS) {
+ DEBUG2(ql4_printk(KERN_INFO, ha,
+ "scsi%ld: %s: Cmd = %08X, mbx[0] = 0x%04x, mbx[1] = 0x%04x\n",
+ ha->host_no, __func__, mbox_cmd[0],
+ mbox_sts[0], mbox_sts[1]));
+ }
+ return status;
+}
+
+/**
+ * qla4xxx_req_template_size - Get minidump template size from firmware.
+ * @ha: Pointer to host adapter structure.
+ **/
+int qla4xxx_req_template_size(struct scsi_qla_host *ha)
+{
+ uint32_t mbox_cmd[MBOX_REG_COUNT];
+ uint32_t mbox_sts[MBOX_REG_COUNT];
+ int status;
+
+ memset(&mbox_cmd, 0, sizeof(mbox_cmd));
+ memset(&mbox_sts, 0, sizeof(mbox_sts));
+
+ mbox_cmd[0] = MBOX_CMD_MINIDUMP;
+ mbox_cmd[1] = MINIDUMP_GET_SIZE_SUBCOMMAND;
+
+ status = qla4xxx_mailbox_command(ha, MBOX_REG_COUNT, 8, &mbox_cmd[0],
+ &mbox_sts[0]);
+ if (status == QLA_SUCCESS) {
+ ha->fw_dump_tmplt_size = mbox_sts[1];
+ DEBUG2(ql4_printk(KERN_INFO, ha,
+ "%s: sts[0]=0x%04x, template size=0x%04x, size_cm_02=0x%04x, size_cm_04=0x%04x, size_cm_08=0x%04x, size_cm_10=0x%04x, size_cm_FF=0x%04x, version=0x%04x\n",
+ __func__, mbox_sts[0], mbox_sts[1],
+ mbox_sts[2], mbox_sts[3], mbox_sts[4],
+ mbox_sts[5], mbox_sts[6], mbox_sts[7]));
+ if (ha->fw_dump_tmplt_size == 0)
+ status = QLA_ERROR;
+ } else {
+ ql4_printk(KERN_WARNING, ha,
+ "%s: Error sts[0]=0x%04x, mbx[1]=0x%04x\n",
+ __func__, mbox_sts[0], mbox_sts[1]);
+ status = QLA_ERROR;
+ }
+
+ return status;
+}
+
+void qla4xxx_mailbox_premature_completion(struct scsi_qla_host *ha)
+{
+ set_bit(AF_FW_RECOVERY, &ha->flags);
+ ql4_printk(KERN_INFO, ha, "scsi%ld: %s: set FW RECOVERY!\n",
+ ha->host_no, __func__);
+
+ if (test_bit(AF_MBOX_COMMAND, &ha->flags)) {
+ if (test_bit(AF_MBOX_COMMAND_NOPOLL, &ha->flags)) {
+ complete(&ha->mbx_intr_comp);
+ ql4_printk(KERN_INFO, ha, "scsi%ld: %s: Due to fw "
+ "recovery, doing premature completion of "
+ "mbx cmd\n", ha->host_no, __func__);
+
+ } else {
+ set_bit(AF_MBOX_COMMAND_DONE, &ha->flags);
+ ql4_printk(KERN_INFO, ha, "scsi%ld: %s: Due to fw "
+ "recovery, doing premature completion of "
+ "polling mbx cmd\n", ha->host_no, __func__);
+ }
+ }
+}
+
+static uint8_t
+qla4xxx_set_ifcb(struct scsi_qla_host *ha, uint32_t *mbox_cmd,
+ uint32_t *mbox_sts, dma_addr_t init_fw_cb_dma)
+{
+ memset(mbox_cmd, 0, sizeof(mbox_cmd[0]) * MBOX_REG_COUNT);
+ memset(mbox_sts, 0, sizeof(mbox_sts[0]) * MBOX_REG_COUNT);
+
+ if (is_qla8022(ha))
+ qla4_82xx_wr_32(ha, ha->nx_db_wr_ptr, 0);
+
+ mbox_cmd[0] = MBOX_CMD_INITIALIZE_FIRMWARE;
+ mbox_cmd[1] = 0;
+ mbox_cmd[2] = LSDW(init_fw_cb_dma);
+ mbox_cmd[3] = MSDW(init_fw_cb_dma);
+ mbox_cmd[4] = sizeof(struct addr_ctrl_blk);
+
+ if (qla4xxx_mailbox_command(ha, 6, 6, mbox_cmd, mbox_sts) !=
+ QLA_SUCCESS) {
+ DEBUG2(printk(KERN_WARNING "scsi%ld: %s: "
+ "MBOX_CMD_INITIALIZE_FIRMWARE"
+ " failed w/ status %04X\n",
+ ha->host_no, __func__, mbox_sts[0]));
+ return QLA_ERROR;
+ }
+ return QLA_SUCCESS;
+}
+
+uint8_t
+qla4xxx_get_ifcb(struct scsi_qla_host *ha, uint32_t *mbox_cmd,
+ uint32_t *mbox_sts, dma_addr_t init_fw_cb_dma)
+{
+ memset(mbox_cmd, 0, sizeof(mbox_cmd[0]) * MBOX_REG_COUNT);
+ memset(mbox_sts, 0, sizeof(mbox_sts[0]) * MBOX_REG_COUNT);
+ mbox_cmd[0] = MBOX_CMD_GET_INIT_FW_CTRL_BLOCK;
+ mbox_cmd[2] = LSDW(init_fw_cb_dma);
+ mbox_cmd[3] = MSDW(init_fw_cb_dma);
+ mbox_cmd[4] = sizeof(struct addr_ctrl_blk);
+
+ if (qla4xxx_mailbox_command(ha, 5, 5, mbox_cmd, mbox_sts) !=
+ QLA_SUCCESS) {
+ DEBUG2(printk(KERN_WARNING "scsi%ld: %s: "
+ "MBOX_CMD_GET_INIT_FW_CTRL_BLOCK"
+ " failed w/ status %04X\n",
+ ha->host_no, __func__, mbox_sts[0]));
+ return QLA_ERROR;
+ }
+ return QLA_SUCCESS;
+}
+
+uint8_t qla4xxx_set_ipaddr_state(uint8_t fw_ipaddr_state)
+{
+ uint8_t ipaddr_state;
+
+ switch (fw_ipaddr_state) {
+ case IP_ADDRSTATE_UNCONFIGURED:
+ ipaddr_state = ISCSI_IPDDRESS_STATE_UNCONFIGURED;
+ break;
+ case IP_ADDRSTATE_INVALID:
+ ipaddr_state = ISCSI_IPDDRESS_STATE_INVALID;
+ break;
+ case IP_ADDRSTATE_ACQUIRING:
+ ipaddr_state = ISCSI_IPDDRESS_STATE_ACQUIRING;
+ break;
+ case IP_ADDRSTATE_TENTATIVE:
+ ipaddr_state = ISCSI_IPDDRESS_STATE_TENTATIVE;
+ break;
+ case IP_ADDRSTATE_DEPRICATED:
+ ipaddr_state = ISCSI_IPDDRESS_STATE_DEPRECATED;
+ break;
+ case IP_ADDRSTATE_PREFERRED:
+ ipaddr_state = ISCSI_IPDDRESS_STATE_VALID;
+ break;
+ case IP_ADDRSTATE_DISABLING:
+ ipaddr_state = ISCSI_IPDDRESS_STATE_DISABLING;
+ break;
+ default:
+ ipaddr_state = ISCSI_IPDDRESS_STATE_UNCONFIGURED;
+ }
+ return ipaddr_state;
+}
+
+static void
+qla4xxx_update_local_ip(struct scsi_qla_host *ha,
+ struct addr_ctrl_blk *init_fw_cb)
+{
+ ha->ip_config.tcp_options = le16_to_cpu(init_fw_cb->ipv4_tcp_opts);
+ ha->ip_config.ipv4_options = le16_to_cpu(init_fw_cb->ipv4_ip_opts);
+ ha->ip_config.ipv4_addr_state =
+ qla4xxx_set_ipaddr_state(init_fw_cb->ipv4_addr_state);
+ ha->ip_config.eth_mtu_size =
+ le16_to_cpu(init_fw_cb->eth_mtu_size);
+ ha->ip_config.ipv4_port = le16_to_cpu(init_fw_cb->ipv4_port);
+
+ if (ha->acb_version == ACB_SUPPORTED) {
+ ha->ip_config.ipv6_options = le16_to_cpu(init_fw_cb->ipv6_opts);
+ ha->ip_config.ipv6_addl_options =
+ le16_to_cpu(init_fw_cb->ipv6_addtl_opts);
+ ha->ip_config.ipv6_tcp_options =
+ le16_to_cpu(init_fw_cb->ipv6_tcp_opts);
+ }
+
+ /* Save IPv4 Address Info */
+ memcpy(ha->ip_config.ip_address, init_fw_cb->ipv4_addr,
+ min(sizeof(ha->ip_config.ip_address),
+ sizeof(init_fw_cb->ipv4_addr)));
+ memcpy(ha->ip_config.subnet_mask, init_fw_cb->ipv4_subnet,
+ min(sizeof(ha->ip_config.subnet_mask),
+ sizeof(init_fw_cb->ipv4_subnet)));
+ memcpy(ha->ip_config.gateway, init_fw_cb->ipv4_gw_addr,
+ min(sizeof(ha->ip_config.gateway),
+ sizeof(init_fw_cb->ipv4_gw_addr)));
+
+ ha->ip_config.ipv4_vlan_tag = be16_to_cpu(init_fw_cb->ipv4_vlan_tag);
+ ha->ip_config.control = init_fw_cb->control;
+ ha->ip_config.tcp_wsf = init_fw_cb->ipv4_tcp_wsf;
+ ha->ip_config.ipv4_tos = init_fw_cb->ipv4_tos;
+ ha->ip_config.ipv4_cache_id = init_fw_cb->ipv4_cacheid;
+ ha->ip_config.ipv4_alt_cid_len = init_fw_cb->ipv4_dhcp_alt_cid_len;
+ memcpy(ha->ip_config.ipv4_alt_cid, init_fw_cb->ipv4_dhcp_alt_cid,
+ min(sizeof(ha->ip_config.ipv4_alt_cid),
+ sizeof(init_fw_cb->ipv4_dhcp_alt_cid)));
+ ha->ip_config.ipv4_vid_len = init_fw_cb->ipv4_dhcp_vid_len;
+ memcpy(ha->ip_config.ipv4_vid, init_fw_cb->ipv4_dhcp_vid,
+ min(sizeof(ha->ip_config.ipv4_vid),
+ sizeof(init_fw_cb->ipv4_dhcp_vid)));
+ ha->ip_config.ipv4_ttl = init_fw_cb->ipv4_ttl;
+ ha->ip_config.def_timeout = le16_to_cpu(init_fw_cb->def_timeout);
+ ha->ip_config.abort_timer = init_fw_cb->abort_timer;
+ ha->ip_config.iscsi_options = le16_to_cpu(init_fw_cb->iscsi_opts);
+ ha->ip_config.iscsi_max_pdu_size =
+ le16_to_cpu(init_fw_cb->iscsi_max_pdu_size);
+ ha->ip_config.iscsi_first_burst_len =
+ le16_to_cpu(init_fw_cb->iscsi_fburst_len);
+ ha->ip_config.iscsi_max_outstnd_r2t =
+ le16_to_cpu(init_fw_cb->iscsi_max_outstnd_r2t);
+ ha->ip_config.iscsi_max_burst_len =
+ le16_to_cpu(init_fw_cb->iscsi_max_burst_len);
+ memcpy(ha->ip_config.iscsi_name, init_fw_cb->iscsi_name,
+ min(sizeof(ha->ip_config.iscsi_name),
+ sizeof(init_fw_cb->iscsi_name)));
+
+ if (is_ipv6_enabled(ha)) {
+ /* Save IPv6 Address */
+ ha->ip_config.ipv6_link_local_state =
+ qla4xxx_set_ipaddr_state(init_fw_cb->ipv6_lnk_lcl_addr_state);
+ ha->ip_config.ipv6_addr0_state =
+ qla4xxx_set_ipaddr_state(init_fw_cb->ipv6_addr0_state);
+ ha->ip_config.ipv6_addr1_state =
+ qla4xxx_set_ipaddr_state(init_fw_cb->ipv6_addr1_state);
+
+ switch (le16_to_cpu(init_fw_cb->ipv6_dflt_rtr_state)) {
+ case IPV6_RTRSTATE_UNKNOWN:
+ ha->ip_config.ipv6_default_router_state =
+ ISCSI_ROUTER_STATE_UNKNOWN;
+ break;
+ case IPV6_RTRSTATE_MANUAL:
+ ha->ip_config.ipv6_default_router_state =
+ ISCSI_ROUTER_STATE_MANUAL;
+ break;
+ case IPV6_RTRSTATE_ADVERTISED:
+ ha->ip_config.ipv6_default_router_state =
+ ISCSI_ROUTER_STATE_ADVERTISED;
+ break;
+ case IPV6_RTRSTATE_STALE:
+ ha->ip_config.ipv6_default_router_state =
+ ISCSI_ROUTER_STATE_STALE;
+ break;
+ default:
+ ha->ip_config.ipv6_default_router_state =
+ ISCSI_ROUTER_STATE_UNKNOWN;
+ }
+
+ ha->ip_config.ipv6_link_local_addr.in6_u.u6_addr8[0] = 0xFE;
+ ha->ip_config.ipv6_link_local_addr.in6_u.u6_addr8[1] = 0x80;
+
+ memcpy(&ha->ip_config.ipv6_link_local_addr.in6_u.u6_addr8[8],
+ init_fw_cb->ipv6_if_id,
+ min(sizeof(ha->ip_config.ipv6_link_local_addr)/2,
+ sizeof(init_fw_cb->ipv6_if_id)));
+ memcpy(&ha->ip_config.ipv6_addr0, init_fw_cb->ipv6_addr0,
+ min(sizeof(ha->ip_config.ipv6_addr0),
+ sizeof(init_fw_cb->ipv6_addr0)));
+ memcpy(&ha->ip_config.ipv6_addr1, init_fw_cb->ipv6_addr1,
+ min(sizeof(ha->ip_config.ipv6_addr1),
+ sizeof(init_fw_cb->ipv6_addr1)));
+ memcpy(&ha->ip_config.ipv6_default_router_addr,
+ init_fw_cb->ipv6_dflt_rtr_addr,
+ min(sizeof(ha->ip_config.ipv6_default_router_addr),
+ sizeof(init_fw_cb->ipv6_dflt_rtr_addr)));
+ ha->ip_config.ipv6_vlan_tag =
+ be16_to_cpu(init_fw_cb->ipv6_vlan_tag);
+ ha->ip_config.ipv6_port = le16_to_cpu(init_fw_cb->ipv6_port);
+ ha->ip_config.ipv6_cache_id = init_fw_cb->ipv6_cache_id;
+ ha->ip_config.ipv6_flow_lbl =
+ le16_to_cpu(init_fw_cb->ipv6_flow_lbl);
+ ha->ip_config.ipv6_traffic_class =
+ init_fw_cb->ipv6_traffic_class;
+ ha->ip_config.ipv6_hop_limit = init_fw_cb->ipv6_hop_limit;
+ ha->ip_config.ipv6_nd_reach_time =
+ le32_to_cpu(init_fw_cb->ipv6_nd_reach_time);
+ ha->ip_config.ipv6_nd_rexmit_timer =
+ le32_to_cpu(init_fw_cb->ipv6_nd_rexmit_timer);
+ ha->ip_config.ipv6_nd_stale_timeout =
+ le32_to_cpu(init_fw_cb->ipv6_nd_stale_timeout);
+ ha->ip_config.ipv6_dup_addr_detect_count =
+ init_fw_cb->ipv6_dup_addr_detect_count;
+ ha->ip_config.ipv6_gw_advrt_mtu =
+ le32_to_cpu(init_fw_cb->ipv6_gw_advrt_mtu);
+ ha->ip_config.ipv6_tcp_wsf = init_fw_cb->ipv6_tcp_wsf;
+ }
+}
+
+uint8_t
+qla4xxx_update_local_ifcb(struct scsi_qla_host *ha,
+ uint32_t *mbox_cmd,
+ uint32_t *mbox_sts,
+ struct addr_ctrl_blk *init_fw_cb,
+ dma_addr_t init_fw_cb_dma)
+{
+ if (qla4xxx_get_ifcb(ha, mbox_cmd, mbox_sts, init_fw_cb_dma)
+ != QLA_SUCCESS) {
+ DEBUG2(printk(KERN_WARNING
+ "scsi%ld: %s: Failed to get init_fw_ctrl_blk\n",
+ ha->host_no, __func__));
+ return QLA_ERROR;
+ }
+
+ DEBUG2(qla4xxx_dump_buffer(init_fw_cb, sizeof(struct addr_ctrl_blk)));
+
+ /* Save some info in adapter structure. */
+ ha->acb_version = init_fw_cb->acb_version;
+ ha->firmware_options = le16_to_cpu(init_fw_cb->fw_options);
+ ha->heartbeat_interval = init_fw_cb->hb_interval;
+ memcpy(ha->name_string, init_fw_cb->iscsi_name,
+ min(sizeof(ha->name_string),
+ sizeof(init_fw_cb->iscsi_name)));
+ ha->def_timeout = le16_to_cpu(init_fw_cb->def_timeout);
+ /*memcpy(ha->alias, init_fw_cb->Alias,
+ min(sizeof(ha->alias), sizeof(init_fw_cb->Alias)));*/
+
+ qla4xxx_update_local_ip(ha, init_fw_cb);
+
+ return QLA_SUCCESS;
+}
+
+/**
+ * qla4xxx_initialize_fw_cb - initializes firmware control block.
+ * @ha: Pointer to host adapter structure.
+ **/
+int qla4xxx_initialize_fw_cb(struct scsi_qla_host * ha)
+{
+ struct addr_ctrl_blk *init_fw_cb;
+ dma_addr_t init_fw_cb_dma;
+ uint32_t mbox_cmd[MBOX_REG_COUNT];
+ uint32_t mbox_sts[MBOX_REG_COUNT];
+ int status = QLA_ERROR;
+
+ init_fw_cb = dma_alloc_coherent(&ha->pdev->dev,
+ sizeof(struct addr_ctrl_blk),
+ &init_fw_cb_dma, GFP_KERNEL);
+ if (init_fw_cb == NULL) {
+ DEBUG2(printk("scsi%ld: %s: Unable to alloc init_cb\n",
+ ha->host_no, __func__));
+ goto exit_init_fw_cb_no_free;
+ }
+ memset(init_fw_cb, 0, sizeof(struct addr_ctrl_blk));
+
+ /* Get Initialize Firmware Control Block. */
+ memset(&mbox_cmd, 0, sizeof(mbox_cmd));
+ memset(&mbox_sts, 0, sizeof(mbox_sts));
+
+ if (qla4xxx_get_ifcb(ha, &mbox_cmd[0], &mbox_sts[0], init_fw_cb_dma) !=
+ QLA_SUCCESS) {
+ dma_free_coherent(&ha->pdev->dev,
+ sizeof(struct addr_ctrl_blk),
+ init_fw_cb, init_fw_cb_dma);
+ goto exit_init_fw_cb;
+ }
+
+ /* Fill in the request and response queue information. */
+ init_fw_cb->rqq_consumer_idx = cpu_to_le16(ha->request_out);
+ init_fw_cb->compq_producer_idx = cpu_to_le16(ha->response_in);
+ init_fw_cb->rqq_len = __constant_cpu_to_le16(REQUEST_QUEUE_DEPTH);
+ init_fw_cb->compq_len = __constant_cpu_to_le16(RESPONSE_QUEUE_DEPTH);
+ init_fw_cb->rqq_addr_lo = cpu_to_le32(LSDW(ha->request_dma));
+ init_fw_cb->rqq_addr_hi = cpu_to_le32(MSDW(ha->request_dma));
+ init_fw_cb->compq_addr_lo = cpu_to_le32(LSDW(ha->response_dma));
+ init_fw_cb->compq_addr_hi = cpu_to_le32(MSDW(ha->response_dma));
+ init_fw_cb->shdwreg_addr_lo = cpu_to_le32(LSDW(ha->shadow_regs_dma));
+ init_fw_cb->shdwreg_addr_hi = cpu_to_le32(MSDW(ha->shadow_regs_dma));
+
+ /* Set up required options. */
+ init_fw_cb->fw_options |=
+ __constant_cpu_to_le16(FWOPT_SESSION_MODE |
+ FWOPT_INITIATOR_MODE);
+
+ if (is_qla80XX(ha))
+ init_fw_cb->fw_options |=
+ __constant_cpu_to_le16(FWOPT_ENABLE_CRBDB);
+
+ init_fw_cb->fw_options &= __constant_cpu_to_le16(~FWOPT_TARGET_MODE);
+
+ init_fw_cb->add_fw_options = 0;
+ init_fw_cb->add_fw_options |=
+ __constant_cpu_to_le16(ADFWOPT_SERIALIZE_TASK_MGMT);
+ init_fw_cb->add_fw_options |=
+ __constant_cpu_to_le16(ADFWOPT_AUTOCONN_DISABLE);
+
+ if (qla4xxx_set_ifcb(ha, &mbox_cmd[0], &mbox_sts[0], init_fw_cb_dma)
+ != QLA_SUCCESS) {
+ DEBUG2(printk(KERN_WARNING
+ "scsi%ld: %s: Failed to set init_fw_ctrl_blk\n",
+ ha->host_no, __func__));
+ goto exit_init_fw_cb;
+ }
+
+ if (qla4xxx_update_local_ifcb(ha, &mbox_cmd[0], &mbox_sts[0],
+ init_fw_cb, init_fw_cb_dma) != QLA_SUCCESS) {
+ DEBUG2(printk("scsi%ld: %s: Failed to update local ifcb\n",
+ ha->host_no, __func__));
+ goto exit_init_fw_cb;
+ }
+ status = QLA_SUCCESS;
+
+exit_init_fw_cb:
+ dma_free_coherent(&ha->pdev->dev, sizeof(struct addr_ctrl_blk),
+ init_fw_cb, init_fw_cb_dma);
+exit_init_fw_cb_no_free:
+ return status;
+}
+
+/**
+ * qla4xxx_get_dhcp_ip_address - gets HBA ip address via DHCP
+ * @ha: Pointer to host adapter structure.
+ **/
+int qla4xxx_get_dhcp_ip_address(struct scsi_qla_host * ha)
+{
+ struct addr_ctrl_blk *init_fw_cb;
+ dma_addr_t init_fw_cb_dma;
+ uint32_t mbox_cmd[MBOX_REG_COUNT];
+ uint32_t mbox_sts[MBOX_REG_COUNT];
+
+ init_fw_cb = dma_alloc_coherent(&ha->pdev->dev,
+ sizeof(struct addr_ctrl_blk),
+ &init_fw_cb_dma, GFP_KERNEL);
+ if (init_fw_cb == NULL) {
+ printk("scsi%ld: %s: Unable to alloc init_cb\n", ha->host_no,
+ __func__);
+ return QLA_ERROR;
+ }
+
+ /* Get Initialize Firmware Control Block. */
+ memset(init_fw_cb, 0, sizeof(struct addr_ctrl_blk));
+ if (qla4xxx_get_ifcb(ha, &mbox_cmd[0], &mbox_sts[0], init_fw_cb_dma) !=
+ QLA_SUCCESS) {
+ DEBUG2(printk("scsi%ld: %s: Failed to get init_fw_ctrl_blk\n",
+ ha->host_no, __func__));
+ dma_free_coherent(&ha->pdev->dev,
+ sizeof(struct addr_ctrl_blk),
+ init_fw_cb, init_fw_cb_dma);
+ return QLA_ERROR;
+ }
+
+ /* Save IP Address. */
+ qla4xxx_update_local_ip(ha, init_fw_cb);
+ dma_free_coherent(&ha->pdev->dev, sizeof(struct addr_ctrl_blk),
+ init_fw_cb, init_fw_cb_dma);
+
+ return QLA_SUCCESS;
+}
+
+/**
+ * qla4xxx_get_firmware_state - gets firmware state of HBA
+ * @ha: Pointer to host adapter structure.
+ **/
+int qla4xxx_get_firmware_state(struct scsi_qla_host * ha)
+{
+ uint32_t mbox_cmd[MBOX_REG_COUNT];
+ uint32_t mbox_sts[MBOX_REG_COUNT];
+
+ /* Get firmware version */
+ memset(&mbox_cmd, 0, sizeof(mbox_cmd));
+ memset(&mbox_sts, 0, sizeof(mbox_sts));
+
+ mbox_cmd[0] = MBOX_CMD_GET_FW_STATE;
+
+ if (qla4xxx_mailbox_command(ha, MBOX_REG_COUNT, 4, &mbox_cmd[0], &mbox_sts[0]) !=
+ QLA_SUCCESS) {
+ DEBUG2(printk("scsi%ld: %s: MBOX_CMD_GET_FW_STATE failed w/ "
+ "status %04X\n", ha->host_no, __func__,
+ mbox_sts[0]));
+ return QLA_ERROR;
+ }
+ ha->firmware_state = mbox_sts[1];
+ ha->board_id = mbox_sts[2];
+ ha->addl_fw_state = mbox_sts[3];
+ DEBUG2(printk("scsi%ld: %s firmware_state=0x%x\n",
+ ha->host_no, __func__, ha->firmware_state);)
+
+ return QLA_SUCCESS;
+}
+
+/**
+ * qla4xxx_get_firmware_status - retrieves firmware status
+ * @ha: Pointer to host adapter structure.
+ **/
+int qla4xxx_get_firmware_status(struct scsi_qla_host * ha)
+{
+ uint32_t mbox_cmd[MBOX_REG_COUNT];
+ uint32_t mbox_sts[MBOX_REG_COUNT];
+
+ /* Get firmware version */
+ memset(&mbox_cmd, 0, sizeof(mbox_cmd));
+ memset(&mbox_sts, 0, sizeof(mbox_sts));
+
+ mbox_cmd[0] = MBOX_CMD_GET_FW_STATUS;
+
+ if (qla4xxx_mailbox_command(ha, MBOX_REG_COUNT, 3, &mbox_cmd[0], &mbox_sts[0]) !=
+ QLA_SUCCESS) {
+ DEBUG2(printk("scsi%ld: %s: MBOX_CMD_GET_FW_STATUS failed w/ "
+ "status %04X\n", ha->host_no, __func__,
+ mbox_sts[0]));
+ return QLA_ERROR;
+ }
+
+ /* High-water mark of IOCBs */
+ ha->iocb_hiwat = mbox_sts[2];
+ DEBUG2(ql4_printk(KERN_INFO, ha,
+ "%s: firmware IOCBs available = %d\n", __func__,
+ ha->iocb_hiwat));
+
+ if (ha->iocb_hiwat > IOCB_HIWAT_CUSHION)
+ ha->iocb_hiwat -= IOCB_HIWAT_CUSHION;
+
+ /* Ideally, we should not enter this code, as the # of firmware
+ * IOCBs is hard-coded in the firmware. We set a default
+ * iocb_hiwat here just in case */
+ if (ha->iocb_hiwat == 0) {
+ ha->iocb_hiwat = REQUEST_QUEUE_DEPTH / 4;
+ DEBUG2(ql4_printk(KERN_WARNING, ha,
+ "%s: Setting IOCB's to = %d\n", __func__,
+ ha->iocb_hiwat));
+ }
+
+ return QLA_SUCCESS;
+}
+
+/**
+ * qla4xxx_get_fwddb_entry - retrieves firmware ddb entry
+ * @ha: Pointer to host adapter structure.
+ * @fw_ddb_index: Firmware's device database index
+ * @fw_ddb_entry: Pointer to firmware's device database entry structure
+ * @num_valid_ddb_entries: Pointer to number of valid ddb entries
+ * @next_ddb_index: Pointer to next valid device database index
+ * @fw_ddb_device_state: Pointer to device state
+ **/
+int qla4xxx_get_fwddb_entry(struct scsi_qla_host *ha,
+ uint16_t fw_ddb_index,
+ struct dev_db_entry *fw_ddb_entry,
+ dma_addr_t fw_ddb_entry_dma,
+ uint32_t *num_valid_ddb_entries,
+ uint32_t *next_ddb_index,
+ uint32_t *fw_ddb_device_state,
+ uint32_t *conn_err_detail,
+ uint16_t *tcp_source_port_num,
+ uint16_t *connection_id)
+{
+ int status = QLA_ERROR;
+ uint16_t options;
+ uint32_t mbox_cmd[MBOX_REG_COUNT];
+ uint32_t mbox_sts[MBOX_REG_COUNT];
+
+ /* Make sure the device index is valid */
+ if (fw_ddb_index >= MAX_DDB_ENTRIES) {
+ DEBUG2(printk("scsi%ld: %s: ddb [%d] out of range.\n",
+ ha->host_no, __func__, fw_ddb_index));
+ goto exit_get_fwddb;
+ }
+ memset(&mbox_cmd, 0, sizeof(mbox_cmd));
+ memset(&mbox_sts, 0, sizeof(mbox_sts));
+ if (fw_ddb_entry)
+ memset(fw_ddb_entry, 0, sizeof(struct dev_db_entry));
+
+ mbox_cmd[0] = MBOX_CMD_GET_DATABASE_ENTRY;
+ mbox_cmd[1] = (uint32_t) fw_ddb_index;
+ mbox_cmd[2] = LSDW(fw_ddb_entry_dma);
+ mbox_cmd[3] = MSDW(fw_ddb_entry_dma);
+ mbox_cmd[4] = sizeof(struct dev_db_entry);
+
+ if (qla4xxx_mailbox_command(ha, MBOX_REG_COUNT, 7, &mbox_cmd[0], &mbox_sts[0]) ==
+ QLA_ERROR) {
+ DEBUG2(printk("scsi%ld: %s: MBOX_CMD_GET_DATABASE_ENTRY failed"
+ " with status 0x%04X\n", ha->host_no, __func__,
+ mbox_sts[0]));
+ goto exit_get_fwddb;
+ }
+ if (fw_ddb_index != mbox_sts[1]) {
+ DEBUG2(printk("scsi%ld: %s: ddb mismatch [%d] != [%d].\n",
+ ha->host_no, __func__, fw_ddb_index,
+ mbox_sts[1]));
+ goto exit_get_fwddb;
+ }
+ if (fw_ddb_entry) {
+ options = le16_to_cpu(fw_ddb_entry->options);
+ if (options & DDB_OPT_IPV6_DEVICE) {
+ ql4_printk(KERN_INFO, ha, "%s: DDB[%d] MB0 %04x Tot %d "
+ "Next %d State %04x ConnErr %08x %pI6 "
+ ":%04d \"%s\"\n", __func__, fw_ddb_index,
+ mbox_sts[0], mbox_sts[2], mbox_sts[3],
+ mbox_sts[4], mbox_sts[5],
+ fw_ddb_entry->ip_addr,
+ le16_to_cpu(fw_ddb_entry->port),
+ fw_ddb_entry->iscsi_name);
+ } else {
+ ql4_printk(KERN_INFO, ha, "%s: DDB[%d] MB0 %04x Tot %d "
+ "Next %d State %04x ConnErr %08x %pI4 "
+ ":%04d \"%s\"\n", __func__, fw_ddb_index,
+ mbox_sts[0], mbox_sts[2], mbox_sts[3],
+ mbox_sts[4], mbox_sts[5],
+ fw_ddb_entry->ip_addr,
+ le16_to_cpu(fw_ddb_entry->port),
+ fw_ddb_entry->iscsi_name);
+ }
+ }
+ if (num_valid_ddb_entries)
+ *num_valid_ddb_entries = mbox_sts[2];
+ if (next_ddb_index)
+ *next_ddb_index = mbox_sts[3];
+ if (fw_ddb_device_state)
+ *fw_ddb_device_state = mbox_sts[4];
+
+ /*
+ * RA: This mailbox has been changed to pass connection error and
+ * details. Its true for ISP4010 as per Version E - Not sure when it
+ * was changed. Get the time2wait from the fw_dd_entry field :
+ * default_time2wait which we call it as minTime2Wait DEV_DB_ENTRY
+ * struct.
+ */
+ if (conn_err_detail)
+ *conn_err_detail = mbox_sts[5];
+ if (tcp_source_port_num)
+ *tcp_source_port_num = (uint16_t) (mbox_sts[6] >> 16);
+ if (connection_id)
+ *connection_id = (uint16_t) mbox_sts[6] & 0x00FF;
+ status = QLA_SUCCESS;
+
+exit_get_fwddb:
+ return status;
+}
+
+int qla4xxx_conn_open(struct scsi_qla_host *ha, uint16_t fw_ddb_index)
+{
+ uint32_t mbox_cmd[MBOX_REG_COUNT];
+ uint32_t mbox_sts[MBOX_REG_COUNT];
+ int status;
+
+ memset(&mbox_cmd, 0, sizeof(mbox_cmd));
+ memset(&mbox_sts, 0, sizeof(mbox_sts));
+
+ mbox_cmd[0] = MBOX_CMD_CONN_OPEN;
+ mbox_cmd[1] = fw_ddb_index;
+
+ status = qla4xxx_mailbox_command(ha, MBOX_REG_COUNT, 2, &mbox_cmd[0],
+ &mbox_sts[0]);
+ DEBUG2(ql4_printk(KERN_INFO, ha,
+ "%s: status = %d mbx0 = 0x%x mbx1 = 0x%x\n",
+ __func__, status, mbox_sts[0], mbox_sts[1]));
+ return status;
+}
+
+/**
+ * qla4xxx_set_fwddb_entry - sets a ddb entry.
+ * @ha: Pointer to host adapter structure.
+ * @fw_ddb_index: Firmware's device database index
+ * @fw_ddb_entry_dma: dma address of ddb entry
+ * @mbx_sts: mailbox 0 to be returned or NULL
+ *
+ * This routine initializes or updates the adapter's device database
+ * entry for the specified device.
+ **/
+int qla4xxx_set_ddb_entry(struct scsi_qla_host * ha, uint16_t fw_ddb_index,
+ dma_addr_t fw_ddb_entry_dma, uint32_t *mbx_sts)
+{
+ uint32_t mbox_cmd[MBOX_REG_COUNT];
+ uint32_t mbox_sts[MBOX_REG_COUNT];
+ int status;
+
+ /* Do not wait for completion. The firmware will send us an
+ * ASTS_DATABASE_CHANGED (0x8014) to notify us of the login status.
+ */
+ memset(&mbox_cmd, 0, sizeof(mbox_cmd));
+ memset(&mbox_sts, 0, sizeof(mbox_sts));
+
+ mbox_cmd[0] = MBOX_CMD_SET_DATABASE_ENTRY;
+ mbox_cmd[1] = (uint32_t) fw_ddb_index;
+ mbox_cmd[2] = LSDW(fw_ddb_entry_dma);
+ mbox_cmd[3] = MSDW(fw_ddb_entry_dma);
+ mbox_cmd[4] = sizeof(struct dev_db_entry);
+
+ status = qla4xxx_mailbox_command(ha, MBOX_REG_COUNT, 5, &mbox_cmd[0],
+ &mbox_sts[0]);
+ if (mbx_sts)
+ *mbx_sts = mbox_sts[0];
+ DEBUG2(printk("scsi%ld: %s: status=%d mbx0=0x%x mbx4=0x%x\n",
+ ha->host_no, __func__, status, mbox_sts[0], mbox_sts[4]);)
+
+ return status;
+}
+
+int qla4xxx_session_logout_ddb(struct scsi_qla_host *ha,
+ struct ddb_entry *ddb_entry, int options)
+{
+ int status;
+ uint32_t mbox_cmd[MBOX_REG_COUNT];
+ uint32_t mbox_sts[MBOX_REG_COUNT];
+
+ memset(&mbox_cmd, 0, sizeof(mbox_cmd));
+ memset(&mbox_sts, 0, sizeof(mbox_sts));
+
+ mbox_cmd[0] = MBOX_CMD_CONN_CLOSE_SESS_LOGOUT;
+ mbox_cmd[1] = ddb_entry->fw_ddb_index;
+ mbox_cmd[3] = options;
+
+ status = qla4xxx_mailbox_command(ha, MBOX_REG_COUNT, 2, &mbox_cmd[0],
+ &mbox_sts[0]);
+ if (status != QLA_SUCCESS) {
+ DEBUG2(ql4_printk(KERN_INFO, ha,
+ "%s: MBOX_CMD_CONN_CLOSE_SESS_LOGOUT "
+ "failed sts %04X %04X", __func__,
+ mbox_sts[0], mbox_sts[1]));
+ if ((mbox_sts[0] == MBOX_STS_COMMAND_ERROR) &&
+ (mbox_sts[1] == DDB_NOT_LOGGED_IN)) {
+ set_bit(DDB_CONN_CLOSE_FAILURE, &ddb_entry->flags);
+ }
+ }
+
+ return status;
+}
+
+/**
+ * qla4xxx_get_crash_record - retrieves crash record.
+ * @ha: Pointer to host adapter structure.
+ *
+ * This routine retrieves a crash record from the QLA4010 after an 8002h aen.
+ **/
+void qla4xxx_get_crash_record(struct scsi_qla_host * ha)
+{
+ uint32_t mbox_cmd[MBOX_REG_COUNT];
+ uint32_t mbox_sts[MBOX_REG_COUNT];
+ struct crash_record *crash_record = NULL;
+ dma_addr_t crash_record_dma = 0;
+ uint32_t crash_record_size = 0;
+
+ memset(&mbox_cmd, 0, sizeof(mbox_cmd));
+ memset(&mbox_sts, 0, sizeof(mbox_cmd));
+
+ /* Get size of crash record. */
+ mbox_cmd[0] = MBOX_CMD_GET_CRASH_RECORD;
+
+ if (qla4xxx_mailbox_command(ha, MBOX_REG_COUNT, 5, &mbox_cmd[0], &mbox_sts[0]) !=
+ QLA_SUCCESS) {
+ DEBUG2(printk("scsi%ld: %s: ERROR: Unable to retrieve size!\n",
+ ha->host_no, __func__));
+ goto exit_get_crash_record;
+ }
+ crash_record_size = mbox_sts[4];
+ if (crash_record_size == 0) {
+ DEBUG2(printk("scsi%ld: %s: ERROR: Crash record size is 0!\n",
+ ha->host_no, __func__));
+ goto exit_get_crash_record;
+ }
+
+ /* Alloc Memory for Crash Record. */
+ crash_record = dma_alloc_coherent(&ha->pdev->dev, crash_record_size,
+ &crash_record_dma, GFP_KERNEL);
+ if (crash_record == NULL)
+ goto exit_get_crash_record;
+
+ /* Get Crash Record. */
+ memset(&mbox_cmd, 0, sizeof(mbox_cmd));
+ memset(&mbox_sts, 0, sizeof(mbox_cmd));
+
+ mbox_cmd[0] = MBOX_CMD_GET_CRASH_RECORD;
+ mbox_cmd[2] = LSDW(crash_record_dma);
+ mbox_cmd[3] = MSDW(crash_record_dma);
+ mbox_cmd[4] = crash_record_size;
+
+ if (qla4xxx_mailbox_command(ha, MBOX_REG_COUNT, 5, &mbox_cmd[0], &mbox_sts[0]) !=
+ QLA_SUCCESS)
+ goto exit_get_crash_record;
+
+ /* Dump Crash Record. */
+
+exit_get_crash_record:
+ if (crash_record)
+ dma_free_coherent(&ha->pdev->dev, crash_record_size,
+ crash_record, crash_record_dma);
+}
+
+/**
+ * qla4xxx_get_conn_event_log - retrieves connection event log
+ * @ha: Pointer to host adapter structure.
+ **/
+void qla4xxx_get_conn_event_log(struct scsi_qla_host * ha)
+{
+ uint32_t mbox_cmd[MBOX_REG_COUNT];
+ uint32_t mbox_sts[MBOX_REG_COUNT];
+ struct conn_event_log_entry *event_log = NULL;
+ dma_addr_t event_log_dma = 0;
+ uint32_t event_log_size = 0;
+ uint32_t num_valid_entries;
+ uint32_t oldest_entry = 0;
+ uint32_t max_event_log_entries;
+ uint8_t i;
+
+ memset(&mbox_cmd, 0, sizeof(mbox_cmd));
+ memset(&mbox_sts, 0, sizeof(mbox_cmd));
+
+ /* Get size of crash record. */
+ mbox_cmd[0] = MBOX_CMD_GET_CONN_EVENT_LOG;
+
+ if (qla4xxx_mailbox_command(ha, MBOX_REG_COUNT, 5, &mbox_cmd[0], &mbox_sts[0]) !=
+ QLA_SUCCESS)
+ goto exit_get_event_log;
+
+ event_log_size = mbox_sts[4];
+ if (event_log_size == 0)
+ goto exit_get_event_log;
+
+ /* Alloc Memory for Crash Record. */
+ event_log = dma_alloc_coherent(&ha->pdev->dev, event_log_size,
+ &event_log_dma, GFP_KERNEL);
+ if (event_log == NULL)
+ goto exit_get_event_log;
+
+ /* Get Crash Record. */
+ memset(&mbox_cmd, 0, sizeof(mbox_cmd));
+ memset(&mbox_sts, 0, sizeof(mbox_cmd));
+
+ mbox_cmd[0] = MBOX_CMD_GET_CONN_EVENT_LOG;
+ mbox_cmd[2] = LSDW(event_log_dma);
+ mbox_cmd[3] = MSDW(event_log_dma);
+
+ if (qla4xxx_mailbox_command(ha, MBOX_REG_COUNT, 5, &mbox_cmd[0], &mbox_sts[0]) !=
+ QLA_SUCCESS) {
+ DEBUG2(printk("scsi%ld: %s: ERROR: Unable to retrieve event "
+ "log!\n", ha->host_no, __func__));
+ goto exit_get_event_log;
+ }
+
+ /* Dump Event Log. */
+ num_valid_entries = mbox_sts[1];
+
+ max_event_log_entries = event_log_size /
+ sizeof(struct conn_event_log_entry);
+
+ if (num_valid_entries > max_event_log_entries)
+ oldest_entry = num_valid_entries % max_event_log_entries;
+
+ DEBUG3(printk("scsi%ld: Connection Event Log Dump (%d entries):\n",
+ ha->host_no, num_valid_entries));
+
+ if (ql4xextended_error_logging == 3) {
+ if (oldest_entry == 0) {
+ /* Circular Buffer has not wrapped around */
+ for (i=0; i < num_valid_entries; i++) {
+ qla4xxx_dump_buffer((uint8_t *)event_log+
+ (i*sizeof(*event_log)),
+ sizeof(*event_log));
+ }
+ }
+ else {
+ /* Circular Buffer has wrapped around -
+ * display accordingly*/
+ for (i=oldest_entry; i < max_event_log_entries; i++) {
+ qla4xxx_dump_buffer((uint8_t *)event_log+
+ (i*sizeof(*event_log)),
+ sizeof(*event_log));
+ }
+ for (i=0; i < oldest_entry; i++) {
+ qla4xxx_dump_buffer((uint8_t *)event_log+
+ (i*sizeof(*event_log)),
+ sizeof(*event_log));
+ }
+ }
+ }
+
+exit_get_event_log:
+ if (event_log)
+ dma_free_coherent(&ha->pdev->dev, event_log_size, event_log,
+ event_log_dma);
+}
+
+/**
+ * qla4xxx_abort_task - issues Abort Task
+ * @ha: Pointer to host adapter structure.
+ * @srb: Pointer to srb entry
+ *
+ * This routine performs a LUN RESET on the specified target/lun.
+ * The caller must ensure that the ddb_entry and lun_entry pointers
+ * are valid before calling this routine.
+ **/
+int qla4xxx_abort_task(struct scsi_qla_host *ha, struct srb *srb)
+{
+ uint32_t mbox_cmd[MBOX_REG_COUNT];
+ uint32_t mbox_sts[MBOX_REG_COUNT];
+ struct scsi_cmnd *cmd = srb->cmd;
+ int status = QLA_SUCCESS;
+ unsigned long flags = 0;
+ uint32_t index;
+
+ /*
+ * Send abort task command to ISP, so that the ISP will return
+ * request with ABORT status
+ */
+ memset(&mbox_cmd, 0, sizeof(mbox_cmd));
+ memset(&mbox_sts, 0, sizeof(mbox_sts));
+
+ spin_lock_irqsave(&ha->hardware_lock, flags);
+ index = (unsigned long)(unsigned char *)cmd->host_scribble;
+ spin_unlock_irqrestore(&ha->hardware_lock, flags);
+
+ /* Firmware already posted completion on response queue */
+ if (index == MAX_SRBS)
+ return status;
+
+ mbox_cmd[0] = MBOX_CMD_ABORT_TASK;
+ mbox_cmd[1] = srb->ddb->fw_ddb_index;
+ mbox_cmd[2] = index;
+ /* Immediate Command Enable */
+ mbox_cmd[5] = 0x01;
+
+ qla4xxx_mailbox_command(ha, MBOX_REG_COUNT, 5, &mbox_cmd[0],
+ &mbox_sts[0]);
+ if (mbox_sts[0] != MBOX_STS_COMMAND_COMPLETE) {
+ status = QLA_ERROR;
+
+ DEBUG2(printk(KERN_WARNING "scsi%ld:%d:%llu: abort task FAILED: "
+ "mbx0=%04X, mb1=%04X, mb2=%04X, mb3=%04X, mb4=%04X\n",
+ ha->host_no, cmd->device->id, cmd->device->lun, mbox_sts[0],
+ mbox_sts[1], mbox_sts[2], mbox_sts[3], mbox_sts[4]));
+ }
+
+ return status;
+}
+
+/**
+ * qla4xxx_reset_lun - issues LUN Reset
+ * @ha: Pointer to host adapter structure.
+ * @ddb_entry: Pointer to device database entry
+ * @lun: lun number
+ *
+ * This routine performs a LUN RESET on the specified target/lun.
+ * The caller must ensure that the ddb_entry and lun_entry pointers
+ * are valid before calling this routine.
+ **/
+int qla4xxx_reset_lun(struct scsi_qla_host * ha, struct ddb_entry * ddb_entry,
+ uint64_t lun)
+{
+ uint32_t mbox_cmd[MBOX_REG_COUNT];
+ uint32_t mbox_sts[MBOX_REG_COUNT];
+ uint32_t scsi_lun[2];
+ int status = QLA_SUCCESS;
+
+ DEBUG2(printk("scsi%ld:%d:%llu: lun reset issued\n", ha->host_no,
+ ddb_entry->fw_ddb_index, lun));
+
+ /*
+ * Send lun reset command to ISP, so that the ISP will return all
+ * outstanding requests with RESET status
+ */
+ memset(&mbox_cmd, 0, sizeof(mbox_cmd));
+ memset(&mbox_sts, 0, sizeof(mbox_sts));
+ int_to_scsilun(lun, (struct scsi_lun *) scsi_lun);
+
+ mbox_cmd[0] = MBOX_CMD_LUN_RESET;
+ mbox_cmd[1] = ddb_entry->fw_ddb_index;
+ /* FW expects LUN bytes 0-3 in Incoming Mailbox 2
+ * (LUN byte 0 is LSByte, byte 3 is MSByte) */
+ mbox_cmd[2] = cpu_to_le32(scsi_lun[0]);
+ /* FW expects LUN bytes 4-7 in Incoming Mailbox 3
+ * (LUN byte 4 is LSByte, byte 7 is MSByte) */
+ mbox_cmd[3] = cpu_to_le32(scsi_lun[1]);
+ mbox_cmd[5] = 0x01; /* Immediate Command Enable */
+
+ qla4xxx_mailbox_command(ha, MBOX_REG_COUNT, 1, &mbox_cmd[0], &mbox_sts[0]);
+ if (mbox_sts[0] != MBOX_STS_COMMAND_COMPLETE &&
+ mbox_sts[0] != MBOX_STS_COMMAND_ERROR)
+ status = QLA_ERROR;
+
+ return status;
+}
+
+/**
+ * qla4xxx_reset_target - issues target Reset
+ * @ha: Pointer to host adapter structure.
+ * @db_entry: Pointer to device database entry
+ * @un_entry: Pointer to lun entry structure
+ *
+ * This routine performs a TARGET RESET on the specified target.
+ * The caller must ensure that the ddb_entry pointers
+ * are valid before calling this routine.
+ **/
+int qla4xxx_reset_target(struct scsi_qla_host *ha,
+ struct ddb_entry *ddb_entry)
+{
+ uint32_t mbox_cmd[MBOX_REG_COUNT];
+ uint32_t mbox_sts[MBOX_REG_COUNT];
+ int status = QLA_SUCCESS;
+
+ DEBUG2(printk("scsi%ld:%d: target reset issued\n", ha->host_no,
+ ddb_entry->fw_ddb_index));
+
+ /*
+ * Send target reset command to ISP, so that the ISP will return all
+ * outstanding requests with RESET status
+ */
+ memset(&mbox_cmd, 0, sizeof(mbox_cmd));
+ memset(&mbox_sts, 0, sizeof(mbox_sts));
+
+ mbox_cmd[0] = MBOX_CMD_TARGET_WARM_RESET;
+ mbox_cmd[1] = ddb_entry->fw_ddb_index;
+ mbox_cmd[5] = 0x01; /* Immediate Command Enable */
+
+ qla4xxx_mailbox_command(ha, MBOX_REG_COUNT, 1, &mbox_cmd[0],
+ &mbox_sts[0]);
+ if (mbox_sts[0] != MBOX_STS_COMMAND_COMPLETE &&
+ mbox_sts[0] != MBOX_STS_COMMAND_ERROR)
+ status = QLA_ERROR;
+
+ return status;
+}
+
+int qla4xxx_get_flash(struct scsi_qla_host * ha, dma_addr_t dma_addr,
+ uint32_t offset, uint32_t len)
+{
+ uint32_t mbox_cmd[MBOX_REG_COUNT];
+ uint32_t mbox_sts[MBOX_REG_COUNT];
+
+ memset(&mbox_cmd, 0, sizeof(mbox_cmd));
+ memset(&mbox_sts, 0, sizeof(mbox_sts));
+
+ mbox_cmd[0] = MBOX_CMD_READ_FLASH;
+ mbox_cmd[1] = LSDW(dma_addr);
+ mbox_cmd[2] = MSDW(dma_addr);
+ mbox_cmd[3] = offset;
+ mbox_cmd[4] = len;
+
+ if (qla4xxx_mailbox_command(ha, MBOX_REG_COUNT, 2, &mbox_cmd[0], &mbox_sts[0]) !=
+ QLA_SUCCESS) {
+ DEBUG2(printk("scsi%ld: %s: MBOX_CMD_READ_FLASH, failed w/ "
+ "status %04X %04X, offset %08x, len %08x\n", ha->host_no,
+ __func__, mbox_sts[0], mbox_sts[1], offset, len));
+ return QLA_ERROR;
+ }
+ return QLA_SUCCESS;
+}
+
+/**
+ * qla4xxx_about_firmware - gets FW, iscsi draft and boot loader version
+ * @ha: Pointer to host adapter structure.
+ *
+ * Retrieves the FW version, iSCSI draft version & bootloader version of HBA.
+ * Mailboxes 2 & 3 may hold an address for data. Make sure that we write 0 to
+ * those mailboxes, if unused.
+ **/
+int qla4xxx_about_firmware(struct scsi_qla_host *ha)
+{
+ struct about_fw_info *about_fw = NULL;
+ dma_addr_t about_fw_dma;
+ uint32_t mbox_cmd[MBOX_REG_COUNT];
+ uint32_t mbox_sts[MBOX_REG_COUNT];
+ int status = QLA_ERROR;
+
+ about_fw = dma_alloc_coherent(&ha->pdev->dev,
+ sizeof(struct about_fw_info),
+ &about_fw_dma, GFP_KERNEL);
+ if (!about_fw) {
+ DEBUG2(ql4_printk(KERN_ERR, ha, "%s: Unable to alloc memory "
+ "for about_fw\n", __func__));
+ return status;
+ }
+
+ memset(about_fw, 0, sizeof(struct about_fw_info));
+ memset(&mbox_cmd, 0, sizeof(mbox_cmd));
+ memset(&mbox_sts, 0, sizeof(mbox_sts));
+
+ mbox_cmd[0] = MBOX_CMD_ABOUT_FW;
+ mbox_cmd[2] = LSDW(about_fw_dma);
+ mbox_cmd[3] = MSDW(about_fw_dma);
+ mbox_cmd[4] = sizeof(struct about_fw_info);
+
+ status = qla4xxx_mailbox_command(ha, MBOX_REG_COUNT, MBOX_REG_COUNT,
+ &mbox_cmd[0], &mbox_sts[0]);
+ if (status != QLA_SUCCESS) {
+ DEBUG2(ql4_printk(KERN_WARNING, ha, "%s: MBOX_CMD_ABOUT_FW "
+ "failed w/ status %04X\n", __func__,
+ mbox_sts[0]));
+ goto exit_about_fw;
+ }
+
+ /* Save version information. */
+ ha->fw_info.fw_major = le16_to_cpu(about_fw->fw_major);
+ ha->fw_info.fw_minor = le16_to_cpu(about_fw->fw_minor);
+ ha->fw_info.fw_patch = le16_to_cpu(about_fw->fw_patch);
+ ha->fw_info.fw_build = le16_to_cpu(about_fw->fw_build);
+ memcpy(ha->fw_info.fw_build_date, about_fw->fw_build_date,
+ sizeof(about_fw->fw_build_date));
+ memcpy(ha->fw_info.fw_build_time, about_fw->fw_build_time,
+ sizeof(about_fw->fw_build_time));
+ strcpy((char *)ha->fw_info.fw_build_user,
+ skip_spaces((char *)about_fw->fw_build_user));
+ ha->fw_info.fw_load_source = le16_to_cpu(about_fw->fw_load_source);
+ ha->fw_info.iscsi_major = le16_to_cpu(about_fw->iscsi_major);
+ ha->fw_info.iscsi_minor = le16_to_cpu(about_fw->iscsi_minor);
+ ha->fw_info.bootload_major = le16_to_cpu(about_fw->bootload_major);
+ ha->fw_info.bootload_minor = le16_to_cpu(about_fw->bootload_minor);
+ ha->fw_info.bootload_patch = le16_to_cpu(about_fw->bootload_patch);
+ ha->fw_info.bootload_build = le16_to_cpu(about_fw->bootload_build);
+ strcpy((char *)ha->fw_info.extended_timestamp,
+ skip_spaces((char *)about_fw->extended_timestamp));
+
+ ha->fw_uptime_secs = le32_to_cpu(mbox_sts[5]);
+ ha->fw_uptime_msecs = le32_to_cpu(mbox_sts[6]);
+ status = QLA_SUCCESS;
+
+exit_about_fw:
+ dma_free_coherent(&ha->pdev->dev, sizeof(struct about_fw_info),
+ about_fw, about_fw_dma);
+ return status;
+}
+
+int qla4xxx_get_default_ddb(struct scsi_qla_host *ha, uint32_t options,
+ dma_addr_t dma_addr)
+{
+ uint32_t mbox_cmd[MBOX_REG_COUNT];
+ uint32_t mbox_sts[MBOX_REG_COUNT];
+
+ memset(&mbox_cmd, 0, sizeof(mbox_cmd));
+ memset(&mbox_sts, 0, sizeof(mbox_sts));
+
+ mbox_cmd[0] = MBOX_CMD_GET_DATABASE_ENTRY_DEFAULTS;
+ mbox_cmd[1] = options;
+ mbox_cmd[2] = LSDW(dma_addr);
+ mbox_cmd[3] = MSDW(dma_addr);
+
+ if (qla4xxx_mailbox_command(ha, MBOX_REG_COUNT, 1, &mbox_cmd[0], &mbox_sts[0]) !=
+ QLA_SUCCESS) {
+ DEBUG2(printk("scsi%ld: %s: failed status %04X\n",
+ ha->host_no, __func__, mbox_sts[0]));
+ return QLA_ERROR;
+ }
+ return QLA_SUCCESS;
+}
+
+int qla4xxx_req_ddb_entry(struct scsi_qla_host *ha, uint32_t ddb_index,
+ uint32_t *mbx_sts)
+{
+ int status;
+ uint32_t mbox_cmd[MBOX_REG_COUNT];
+ uint32_t mbox_sts[MBOX_REG_COUNT];
+
+ memset(&mbox_cmd, 0, sizeof(mbox_cmd));
+ memset(&mbox_sts, 0, sizeof(mbox_sts));
+
+ mbox_cmd[0] = MBOX_CMD_REQUEST_DATABASE_ENTRY;
+ mbox_cmd[1] = ddb_index;
+
+ status = qla4xxx_mailbox_command(ha, MBOX_REG_COUNT, 1, &mbox_cmd[0],
+ &mbox_sts[0]);
+ if (status != QLA_SUCCESS) {
+ DEBUG2(ql4_printk(KERN_ERR, ha, "%s: failed status %04X\n",
+ __func__, mbox_sts[0]));
+ }
+
+ *mbx_sts = mbox_sts[0];
+ return status;
+}
+
+int qla4xxx_clear_ddb_entry(struct scsi_qla_host *ha, uint32_t ddb_index)
+{
+ int status;
+ uint32_t mbox_cmd[MBOX_REG_COUNT];
+ uint32_t mbox_sts[MBOX_REG_COUNT];
+
+ memset(&mbox_cmd, 0, sizeof(mbox_cmd));
+ memset(&mbox_sts, 0, sizeof(mbox_sts));
+
+ mbox_cmd[0] = MBOX_CMD_CLEAR_DATABASE_ENTRY;
+ mbox_cmd[1] = ddb_index;
+
+ status = qla4xxx_mailbox_command(ha, 2, 1, &mbox_cmd[0],
+ &mbox_sts[0]);
+ if (status != QLA_SUCCESS) {
+ DEBUG2(ql4_printk(KERN_ERR, ha, "%s: failed status %04X\n",
+ __func__, mbox_sts[0]));
+ }
+
+ return status;
+}
+
+int qla4xxx_set_flash(struct scsi_qla_host *ha, dma_addr_t dma_addr,
+ uint32_t offset, uint32_t length, uint32_t options)
+{
+ uint32_t mbox_cmd[MBOX_REG_COUNT];
+ uint32_t mbox_sts[MBOX_REG_COUNT];
+ int status = QLA_SUCCESS;
+
+ memset(&mbox_cmd, 0, sizeof(mbox_cmd));
+ memset(&mbox_sts, 0, sizeof(mbox_sts));
+
+ mbox_cmd[0] = MBOX_CMD_WRITE_FLASH;
+ mbox_cmd[1] = LSDW(dma_addr);
+ mbox_cmd[2] = MSDW(dma_addr);
+ mbox_cmd[3] = offset;
+ mbox_cmd[4] = length;
+ mbox_cmd[5] = options;
+
+ status = qla4xxx_mailbox_command(ha, 6, 2, &mbox_cmd[0], &mbox_sts[0]);
+ if (status != QLA_SUCCESS) {
+ DEBUG2(ql4_printk(KERN_WARNING, ha, "%s: MBOX_CMD_WRITE_FLASH "
+ "failed w/ status %04X, mbx1 %04X\n",
+ __func__, mbox_sts[0], mbox_sts[1]));
+ }
+ return status;
+}
+
+int qla4xxx_bootdb_by_index(struct scsi_qla_host *ha,
+ struct dev_db_entry *fw_ddb_entry,
+ dma_addr_t fw_ddb_entry_dma, uint16_t ddb_index)
+{
+ uint32_t dev_db_start_offset = FLASH_OFFSET_DB_INFO;
+ uint32_t dev_db_end_offset;
+ int status = QLA_ERROR;
+
+ memset(fw_ddb_entry, 0, sizeof(*fw_ddb_entry));
+
+ dev_db_start_offset += (ddb_index * sizeof(*fw_ddb_entry));
+ dev_db_end_offset = FLASH_OFFSET_DB_END;
+
+ if (dev_db_start_offset > dev_db_end_offset) {
+ DEBUG2(ql4_printk(KERN_ERR, ha,
+ "%s:Invalid DDB index %d", __func__,
+ ddb_index));
+ goto exit_bootdb_failed;
+ }
+
+ if (qla4xxx_get_flash(ha, fw_ddb_entry_dma, dev_db_start_offset,
+ sizeof(*fw_ddb_entry)) != QLA_SUCCESS) {
+ ql4_printk(KERN_ERR, ha, "scsi%ld: %s: Get Flash"
+ "failed\n", ha->host_no, __func__);
+ goto exit_bootdb_failed;
+ }
+
+ if (fw_ddb_entry->cookie == DDB_VALID_COOKIE)
+ status = QLA_SUCCESS;
+
+exit_bootdb_failed:
+ return status;
+}
+
+int qla4xxx_flashdb_by_index(struct scsi_qla_host *ha,
+ struct dev_db_entry *fw_ddb_entry,
+ dma_addr_t fw_ddb_entry_dma, uint16_t ddb_index)
+{
+ uint32_t dev_db_start_offset;
+ uint32_t dev_db_end_offset;
+ int status = QLA_ERROR;
+
+ memset(fw_ddb_entry, 0, sizeof(*fw_ddb_entry));
+
+ if (is_qla40XX(ha)) {
+ dev_db_start_offset = FLASH_OFFSET_DB_INFO;
+ dev_db_end_offset = FLASH_OFFSET_DB_END;
+ } else {
+ dev_db_start_offset = FLASH_RAW_ACCESS_ADDR +
+ (ha->hw.flt_region_ddb << 2);
+ /* flt_ddb_size is DDB table size for both ports
+ * so divide it by 2 to calculate the offset for second port
+ */
+ if (ha->port_num == 1)
+ dev_db_start_offset += (ha->hw.flt_ddb_size / 2);
+
+ dev_db_end_offset = dev_db_start_offset +
+ (ha->hw.flt_ddb_size / 2);
+ }
+
+ dev_db_start_offset += (ddb_index * sizeof(*fw_ddb_entry));
+
+ if (dev_db_start_offset > dev_db_end_offset) {
+ DEBUG2(ql4_printk(KERN_ERR, ha,
+ "%s:Invalid DDB index %d", __func__,
+ ddb_index));
+ goto exit_fdb_failed;
+ }
+
+ if (qla4xxx_get_flash(ha, fw_ddb_entry_dma, dev_db_start_offset,
+ sizeof(*fw_ddb_entry)) != QLA_SUCCESS) {
+ ql4_printk(KERN_ERR, ha, "scsi%ld: %s: Get Flash failed\n",
+ ha->host_no, __func__);
+ goto exit_fdb_failed;
+ }
+
+ if (fw_ddb_entry->cookie == DDB_VALID_COOKIE)
+ status = QLA_SUCCESS;
+
+exit_fdb_failed:
+ return status;
+}
+
+int qla4xxx_get_chap(struct scsi_qla_host *ha, char *username, char *password,
+ uint16_t idx)
+{
+ int ret = 0;
+ int rval = QLA_ERROR;
+ uint32_t offset = 0, chap_size;
+ struct ql4_chap_table *chap_table;
+ dma_addr_t chap_dma;
+
+ chap_table = dma_pool_alloc(ha->chap_dma_pool, GFP_KERNEL, &chap_dma);
+ if (chap_table == NULL)
+ return -ENOMEM;
+
+ chap_size = sizeof(struct ql4_chap_table);
+ memset(chap_table, 0, chap_size);
+
+ if (is_qla40XX(ha))
+ offset = FLASH_CHAP_OFFSET | (idx * chap_size);
+ else {
+ offset = FLASH_RAW_ACCESS_ADDR + (ha->hw.flt_region_chap << 2);
+ /* flt_chap_size is CHAP table size for both ports
+ * so divide it by 2 to calculate the offset for second port
+ */
+ if (ha->port_num == 1)
+ offset += (ha->hw.flt_chap_size / 2);
+ offset += (idx * chap_size);
+ }
+
+ rval = qla4xxx_get_flash(ha, chap_dma, offset, chap_size);
+ if (rval != QLA_SUCCESS) {
+ ret = -EINVAL;
+ goto exit_get_chap;
+ }
+
+ DEBUG2(ql4_printk(KERN_INFO, ha, "Chap Cookie: x%x\n",
+ __le16_to_cpu(chap_table->cookie)));
+
+ if (__le16_to_cpu(chap_table->cookie) != CHAP_VALID_COOKIE) {
+ ql4_printk(KERN_ERR, ha, "No valid chap entry found\n");
+ goto exit_get_chap;
+ }
+
+ strlcpy(password, chap_table->secret, QL4_CHAP_MAX_SECRET_LEN);
+ strlcpy(username, chap_table->name, QL4_CHAP_MAX_NAME_LEN);
+ chap_table->cookie = __constant_cpu_to_le16(CHAP_VALID_COOKIE);
+
+exit_get_chap:
+ dma_pool_free(ha->chap_dma_pool, chap_table, chap_dma);
+ return ret;
+}
+
+/**
+ * qla4xxx_set_chap - Make a chap entry at the given index
+ * @ha: pointer to adapter structure
+ * @username: CHAP username to set
+ * @password: CHAP password to set
+ * @idx: CHAP index at which to make the entry
+ * @bidi: type of chap entry (chap_in or chap_out)
+ *
+ * Create chap entry at the given index with the information provided.
+ *
+ * Note: Caller should acquire the chap lock before getting here.
+ **/
+int qla4xxx_set_chap(struct scsi_qla_host *ha, char *username, char *password,
+ uint16_t idx, int bidi)
+{
+ int ret = 0;
+ int rval = QLA_ERROR;
+ uint32_t offset = 0;
+ struct ql4_chap_table *chap_table;
+ uint32_t chap_size = 0;
+ dma_addr_t chap_dma;
+
+ chap_table = dma_pool_alloc(ha->chap_dma_pool, GFP_KERNEL, &chap_dma);
+ if (chap_table == NULL) {
+ ret = -ENOMEM;
+ goto exit_set_chap;
+ }
+
+ memset(chap_table, 0, sizeof(struct ql4_chap_table));
+ if (bidi)
+ chap_table->flags |= BIT_6; /* peer */
+ else
+ chap_table->flags |= BIT_7; /* local */
+ chap_table->secret_len = strlen(password);
+ strncpy(chap_table->secret, password, MAX_CHAP_SECRET_LEN - 1);
+ strncpy(chap_table->name, username, MAX_CHAP_NAME_LEN - 1);
+ chap_table->cookie = __constant_cpu_to_le16(CHAP_VALID_COOKIE);
+
+ if (is_qla40XX(ha)) {
+ chap_size = MAX_CHAP_ENTRIES_40XX * sizeof(*chap_table);
+ offset = FLASH_CHAP_OFFSET;
+ } else { /* Single region contains CHAP info for both ports which is
+ * divided into half for each port.
+ */
+ chap_size = ha->hw.flt_chap_size / 2;
+ offset = FLASH_RAW_ACCESS_ADDR + (ha->hw.flt_region_chap << 2);
+ if (ha->port_num == 1)
+ offset += chap_size;
+ }
+
+ offset += (idx * sizeof(struct ql4_chap_table));
+ rval = qla4xxx_set_flash(ha, chap_dma, offset,
+ sizeof(struct ql4_chap_table),
+ FLASH_OPT_RMW_COMMIT);
+
+ if (rval == QLA_SUCCESS && ha->chap_list) {
+ /* Update ha chap_list cache */
+ memcpy((struct ql4_chap_table *)ha->chap_list + idx,
+ chap_table, sizeof(struct ql4_chap_table));
+ }
+ dma_pool_free(ha->chap_dma_pool, chap_table, chap_dma);
+ if (rval != QLA_SUCCESS)
+ ret = -EINVAL;
+
+exit_set_chap:
+ return ret;
+}
+
+
+int qla4xxx_get_uni_chap_at_index(struct scsi_qla_host *ha, char *username,
+ char *password, uint16_t chap_index)
+{
+ int rval = QLA_ERROR;
+ struct ql4_chap_table *chap_table = NULL;
+ int max_chap_entries;
+
+ if (!ha->chap_list) {
+ ql4_printk(KERN_ERR, ha, "Do not have CHAP table cache\n");
+ rval = QLA_ERROR;
+ goto exit_uni_chap;
+ }
+
+ if (!username || !password) {
+ ql4_printk(KERN_ERR, ha, "No memory for username & secret\n");
+ rval = QLA_ERROR;
+ goto exit_uni_chap;
+ }
+
+ if (is_qla80XX(ha))
+ max_chap_entries = (ha->hw.flt_chap_size / 2) /
+ sizeof(struct ql4_chap_table);
+ else
+ max_chap_entries = MAX_CHAP_ENTRIES_40XX;
+
+ if (chap_index > max_chap_entries) {
+ ql4_printk(KERN_ERR, ha, "Invalid Chap index\n");
+ rval = QLA_ERROR;
+ goto exit_uni_chap;
+ }
+
+ mutex_lock(&ha->chap_sem);
+ chap_table = (struct ql4_chap_table *)ha->chap_list + chap_index;
+ if (chap_table->cookie != __constant_cpu_to_le16(CHAP_VALID_COOKIE)) {
+ rval = QLA_ERROR;
+ goto exit_unlock_uni_chap;
+ }
+
+ if (!(chap_table->flags & BIT_7)) {
+ ql4_printk(KERN_ERR, ha, "Unidirectional entry not set\n");
+ rval = QLA_ERROR;
+ goto exit_unlock_uni_chap;
+ }
+
+ strlcpy(password, chap_table->secret, MAX_CHAP_SECRET_LEN);
+ strlcpy(username, chap_table->name, MAX_CHAP_NAME_LEN);
+
+ rval = QLA_SUCCESS;
+
+exit_unlock_uni_chap:
+ mutex_unlock(&ha->chap_sem);
+exit_uni_chap:
+ return rval;
+}
+
+/**
+ * qla4xxx_get_chap_index - Get chap index given username and secret
+ * @ha: pointer to adapter structure
+ * @username: CHAP username to be searched
+ * @password: CHAP password to be searched
+ * @bidi: Is this a BIDI CHAP
+ * @chap_index: CHAP index to be returned
+ *
+ * Match the username and password in the chap_list, return the index if a
+ * match is found. If a match is not found then add the entry in FLASH and
+ * return the index at which entry is written in the FLASH.
+ **/
+int qla4xxx_get_chap_index(struct scsi_qla_host *ha, char *username,
+ char *password, int bidi, uint16_t *chap_index)
+{
+ int i, rval;
+ int free_index = -1;
+ int found_index = 0;
+ int max_chap_entries = 0;
+ struct ql4_chap_table *chap_table;
+
+ if (is_qla80XX(ha))
+ max_chap_entries = (ha->hw.flt_chap_size / 2) /
+ sizeof(struct ql4_chap_table);
+ else
+ max_chap_entries = MAX_CHAP_ENTRIES_40XX;
+
+ if (!ha->chap_list) {
+ ql4_printk(KERN_ERR, ha, "Do not have CHAP table cache\n");
+ return QLA_ERROR;
+ }
+
+ if (!username || !password) {
+ ql4_printk(KERN_ERR, ha, "Do not have username and psw\n");
+ return QLA_ERROR;
+ }
+
+ mutex_lock(&ha->chap_sem);
+ for (i = 0; i < max_chap_entries; i++) {
+ chap_table = (struct ql4_chap_table *)ha->chap_list + i;
+ if (chap_table->cookie !=
+ __constant_cpu_to_le16(CHAP_VALID_COOKIE)) {
+ if (i > MAX_RESRV_CHAP_IDX && free_index == -1)
+ free_index = i;
+ continue;
+ }
+ if (bidi) {
+ if (chap_table->flags & BIT_7)
+ continue;
+ } else {
+ if (chap_table->flags & BIT_6)
+ continue;
+ }
+ if (!strncmp(chap_table->secret, password,
+ MAX_CHAP_SECRET_LEN) &&
+ !strncmp(chap_table->name, username,
+ MAX_CHAP_NAME_LEN)) {
+ *chap_index = i;
+ found_index = 1;
+ break;
+ }
+ }
+
+ /* If chap entry is not present and a free index is available then
+ * write the entry in flash
+ */
+ if (!found_index && free_index != -1) {
+ rval = qla4xxx_set_chap(ha, username, password,
+ free_index, bidi);
+ if (!rval) {
+ *chap_index = free_index;
+ found_index = 1;
+ }
+ }
+
+ mutex_unlock(&ha->chap_sem);
+
+ if (found_index)
+ return QLA_SUCCESS;
+ return QLA_ERROR;
+}
+
+int qla4xxx_conn_close_sess_logout(struct scsi_qla_host *ha,
+ uint16_t fw_ddb_index,
+ uint16_t connection_id,
+ uint16_t option)
+{
+ uint32_t mbox_cmd[MBOX_REG_COUNT];
+ uint32_t mbox_sts[MBOX_REG_COUNT];
+ int status = QLA_SUCCESS;
+
+ memset(&mbox_cmd, 0, sizeof(mbox_cmd));
+ memset(&mbox_sts, 0, sizeof(mbox_sts));
+
+ mbox_cmd[0] = MBOX_CMD_CONN_CLOSE_SESS_LOGOUT;
+ mbox_cmd[1] = fw_ddb_index;
+ mbox_cmd[2] = connection_id;
+ mbox_cmd[3] = option;
+
+ status = qla4xxx_mailbox_command(ha, 4, 2, &mbox_cmd[0], &mbox_sts[0]);
+ if (status != QLA_SUCCESS) {
+ DEBUG2(ql4_printk(KERN_WARNING, ha, "%s: MBOX_CMD_CONN_CLOSE "
+ "option %04x failed w/ status %04X %04X\n",
+ __func__, option, mbox_sts[0], mbox_sts[1]));
+ }
+ return status;
+}
+
+/**
+ * qla4_84xx_extend_idc_tmo - Extend IDC Timeout.
+ * @ha: Pointer to host adapter structure.
+ * @ext_tmo: idc timeout value
+ *
+ * Requests firmware to extend the idc timeout value.
+ **/
+static int qla4_84xx_extend_idc_tmo(struct scsi_qla_host *ha, uint32_t ext_tmo)
+{
+ uint32_t mbox_cmd[MBOX_REG_COUNT];
+ uint32_t mbox_sts[MBOX_REG_COUNT];
+ int status;
+
+ memset(&mbox_cmd, 0, sizeof(mbox_cmd));
+ memset(&mbox_sts, 0, sizeof(mbox_sts));
+ ext_tmo &= 0xf;
+
+ mbox_cmd[0] = MBOX_CMD_IDC_TIME_EXTEND;
+ mbox_cmd[1] = ((ha->idc_info.request_desc & 0xfffff0ff) |
+ (ext_tmo << 8)); /* new timeout */
+ mbox_cmd[2] = ha->idc_info.info1;
+ mbox_cmd[3] = ha->idc_info.info2;
+ mbox_cmd[4] = ha->idc_info.info3;
+
+ status = qla4xxx_mailbox_command(ha, MBOX_REG_COUNT, MBOX_REG_COUNT,
+ mbox_cmd, mbox_sts);
+ if (status != QLA_SUCCESS) {
+ DEBUG2(ql4_printk(KERN_INFO, ha,
+ "scsi%ld: %s: failed status %04X\n",
+ ha->host_no, __func__, mbox_sts[0]));
+ return QLA_ERROR;
+ } else {
+ ql4_printk(KERN_INFO, ha, "%s: IDC timeout extended by %d secs\n",
+ __func__, ext_tmo);
+ }
+
+ return QLA_SUCCESS;
+}
+
+int qla4xxx_disable_acb(struct scsi_qla_host *ha)
+{
+ uint32_t mbox_cmd[MBOX_REG_COUNT];
+ uint32_t mbox_sts[MBOX_REG_COUNT];
+ int status = QLA_SUCCESS;
+
+ memset(&mbox_cmd, 0, sizeof(mbox_cmd));
+ memset(&mbox_sts, 0, sizeof(mbox_sts));
+
+ mbox_cmd[0] = MBOX_CMD_DISABLE_ACB;
+
+ status = qla4xxx_mailbox_command(ha, 8, 5, &mbox_cmd[0], &mbox_sts[0]);
+ if (status != QLA_SUCCESS) {
+ DEBUG2(ql4_printk(KERN_WARNING, ha, "%s: MBOX_CMD_DISABLE_ACB "
+ "failed w/ status %04X %04X %04X", __func__,
+ mbox_sts[0], mbox_sts[1], mbox_sts[2]));
+ } else {
+ if (is_qla8042(ha) &&
+ test_bit(DPC_POST_IDC_ACK, &ha->dpc_flags) &&
+ (mbox_sts[0] != MBOX_STS_COMMAND_COMPLETE)) {
+ /*
+ * Disable ACB mailbox command takes time to complete
+ * based on the total number of targets connected.
+ * For 512 targets, it took approximately 5 secs to
+ * complete. Setting the timeout value to 8, with the 3
+ * secs buffer.
+ */
+ qla4_84xx_extend_idc_tmo(ha, IDC_EXTEND_TOV);
+ if (!wait_for_completion_timeout(&ha->disable_acb_comp,
+ IDC_EXTEND_TOV * HZ)) {
+ ql4_printk(KERN_WARNING, ha, "%s: Disable ACB Completion not received\n",
+ __func__);
+ }
+ }
+ }
+ return status;
+}
+
+int qla4xxx_get_acb(struct scsi_qla_host *ha, dma_addr_t acb_dma,
+ uint32_t acb_type, uint32_t len)
+{
+ uint32_t mbox_cmd[MBOX_REG_COUNT];
+ uint32_t mbox_sts[MBOX_REG_COUNT];
+ int status = QLA_SUCCESS;
+
+ memset(&mbox_cmd, 0, sizeof(mbox_cmd));
+ memset(&mbox_sts, 0, sizeof(mbox_sts));
+
+ mbox_cmd[0] = MBOX_CMD_GET_ACB;
+ mbox_cmd[1] = acb_type;
+ mbox_cmd[2] = LSDW(acb_dma);
+ mbox_cmd[3] = MSDW(acb_dma);
+ mbox_cmd[4] = len;
+
+ status = qla4xxx_mailbox_command(ha, 5, 5, &mbox_cmd[0], &mbox_sts[0]);
+ if (status != QLA_SUCCESS) {
+ DEBUG2(ql4_printk(KERN_WARNING, ha, "%s: MBOX_CMD_GET_ACB "
+ "failed w/ status %04X\n", __func__,
+ mbox_sts[0]));
+ }
+ return status;
+}
+
+int qla4xxx_set_acb(struct scsi_qla_host *ha, uint32_t *mbox_cmd,
+ uint32_t *mbox_sts, dma_addr_t acb_dma)
+{
+ int status = QLA_SUCCESS;
+
+ memset(mbox_cmd, 0, sizeof(mbox_cmd[0]) * MBOX_REG_COUNT);
+ memset(mbox_sts, 0, sizeof(mbox_sts[0]) * MBOX_REG_COUNT);
+ mbox_cmd[0] = MBOX_CMD_SET_ACB;
+ mbox_cmd[1] = 0; /* Primary ACB */
+ mbox_cmd[2] = LSDW(acb_dma);
+ mbox_cmd[3] = MSDW(acb_dma);
+ mbox_cmd[4] = sizeof(struct addr_ctrl_blk);
+
+ status = qla4xxx_mailbox_command(ha, 5, 5, &mbox_cmd[0], &mbox_sts[0]);
+ if (status != QLA_SUCCESS) {
+ DEBUG2(ql4_printk(KERN_WARNING, ha, "%s: MBOX_CMD_SET_ACB "
+ "failed w/ status %04X\n", __func__,
+ mbox_sts[0]));
+ }
+ return status;
+}
+
+int qla4xxx_set_param_ddbentry(struct scsi_qla_host *ha,
+ struct ddb_entry *ddb_entry,
+ struct iscsi_cls_conn *cls_conn,
+ uint32_t *mbx_sts)
+{
+ struct dev_db_entry *fw_ddb_entry;
+ struct iscsi_conn *conn;
+ struct iscsi_session *sess;
+ struct qla_conn *qla_conn;
+ struct sockaddr *dst_addr;
+ dma_addr_t fw_ddb_entry_dma;
+ int status = QLA_SUCCESS;
+ int rval = 0;
+ struct sockaddr_in *addr;
+ struct sockaddr_in6 *addr6;
+ char *ip;
+ uint16_t iscsi_opts = 0;
+ uint32_t options = 0;
+ uint16_t idx, *ptid;
+
+ fw_ddb_entry = dma_alloc_coherent(&ha->pdev->dev, sizeof(*fw_ddb_entry),
+ &fw_ddb_entry_dma, GFP_KERNEL);
+ if (!fw_ddb_entry) {
+ DEBUG2(ql4_printk(KERN_ERR, ha,
+ "%s: Unable to allocate dma buffer.\n",
+ __func__));
+ rval = -ENOMEM;
+ goto exit_set_param_no_free;
+ }
+
+ conn = cls_conn->dd_data;
+ qla_conn = conn->dd_data;
+ sess = conn->session;
+ dst_addr = (struct sockaddr *)&qla_conn->qla_ep->dst_addr;
+
+ if (dst_addr->sa_family == AF_INET6)
+ options |= IPV6_DEFAULT_DDB_ENTRY;
+
+ status = qla4xxx_get_default_ddb(ha, options, fw_ddb_entry_dma);
+ if (status == QLA_ERROR) {
+ rval = -EINVAL;
+ goto exit_set_param;
+ }
+
+ ptid = (uint16_t *)&fw_ddb_entry->isid[1];
+ *ptid = cpu_to_le16((uint16_t)ddb_entry->sess->target_id);
+
+ DEBUG2(ql4_printk(KERN_INFO, ha, "ISID [%02x%02x%02x%02x%02x%02x]\n",
+ fw_ddb_entry->isid[5], fw_ddb_entry->isid[4],
+ fw_ddb_entry->isid[3], fw_ddb_entry->isid[2],
+ fw_ddb_entry->isid[1], fw_ddb_entry->isid[0]));
+
+ iscsi_opts = le16_to_cpu(fw_ddb_entry->iscsi_options);
+ memset(fw_ddb_entry->iscsi_alias, 0, sizeof(fw_ddb_entry->iscsi_alias));
+
+ memset(fw_ddb_entry->iscsi_name, 0, sizeof(fw_ddb_entry->iscsi_name));
+
+ if (sess->targetname != NULL) {
+ memcpy(fw_ddb_entry->iscsi_name, sess->targetname,
+ min(strlen(sess->targetname),
+ sizeof(fw_ddb_entry->iscsi_name)));
+ }
+
+ memset(fw_ddb_entry->ip_addr, 0, sizeof(fw_ddb_entry->ip_addr));
+ memset(fw_ddb_entry->tgt_addr, 0, sizeof(fw_ddb_entry->tgt_addr));
+
+ fw_ddb_entry->options = DDB_OPT_TARGET | DDB_OPT_AUTO_SENDTGTS_DISABLE;
+
+ if (dst_addr->sa_family == AF_INET) {
+ addr = (struct sockaddr_in *)dst_addr;
+ ip = (char *)&addr->sin_addr;
+ memcpy(fw_ddb_entry->ip_addr, ip, IP_ADDR_LEN);
+ fw_ddb_entry->port = cpu_to_le16(ntohs(addr->sin_port));
+ DEBUG2(ql4_printk(KERN_INFO, ha,
+ "%s: Destination Address [%pI4]: index [%d]\n",
+ __func__, fw_ddb_entry->ip_addr,
+ ddb_entry->fw_ddb_index));
+ } else if (dst_addr->sa_family == AF_INET6) {
+ addr6 = (struct sockaddr_in6 *)dst_addr;
+ ip = (char *)&addr6->sin6_addr;
+ memcpy(fw_ddb_entry->ip_addr, ip, IPv6_ADDR_LEN);
+ fw_ddb_entry->port = cpu_to_le16(ntohs(addr6->sin6_port));
+ fw_ddb_entry->options |= DDB_OPT_IPV6_DEVICE;
+ DEBUG2(ql4_printk(KERN_INFO, ha,
+ "%s: Destination Address [%pI6]: index [%d]\n",
+ __func__, fw_ddb_entry->ip_addr,
+ ddb_entry->fw_ddb_index));
+ } else {
+ ql4_printk(KERN_ERR, ha,
+ "%s: Failed to get IP Address\n",
+ __func__);
+ rval = -EINVAL;
+ goto exit_set_param;
+ }
+
+ /* CHAP */
+ if (sess->username != NULL && sess->password != NULL) {
+ if (strlen(sess->username) && strlen(sess->password)) {
+ iscsi_opts |= BIT_7;
+
+ rval = qla4xxx_get_chap_index(ha, sess->username,
+ sess->password,
+ LOCAL_CHAP, &idx);
+ if (rval)
+ goto exit_set_param;
+
+ fw_ddb_entry->chap_tbl_idx = cpu_to_le16(idx);
+ }
+ }
+
+ if (sess->username_in != NULL && sess->password_in != NULL) {
+ /* Check if BIDI CHAP */
+ if (strlen(sess->username_in) && strlen(sess->password_in)) {
+ iscsi_opts |= BIT_4;
+
+ rval = qla4xxx_get_chap_index(ha, sess->username_in,
+ sess->password_in,
+ BIDI_CHAP, &idx);
+ if (rval)
+ goto exit_set_param;
+ }
+ }
+
+ if (sess->initial_r2t_en)
+ iscsi_opts |= BIT_10;
+
+ if (sess->imm_data_en)
+ iscsi_opts |= BIT_11;
+
+ fw_ddb_entry->iscsi_options = cpu_to_le16(iscsi_opts);
+
+ if (conn->max_recv_dlength)
+ fw_ddb_entry->iscsi_max_rcv_data_seg_len =
+ __constant_cpu_to_le16((conn->max_recv_dlength / BYTE_UNITS));
+
+ if (sess->max_r2t)
+ fw_ddb_entry->iscsi_max_outsnd_r2t = cpu_to_le16(sess->max_r2t);
+
+ if (sess->first_burst)
+ fw_ddb_entry->iscsi_first_burst_len =
+ __constant_cpu_to_le16((sess->first_burst / BYTE_UNITS));
+
+ if (sess->max_burst)
+ fw_ddb_entry->iscsi_max_burst_len =
+ __constant_cpu_to_le16((sess->max_burst / BYTE_UNITS));
+
+ if (sess->time2wait)
+ fw_ddb_entry->iscsi_def_time2wait =
+ cpu_to_le16(sess->time2wait);
+
+ if (sess->time2retain)
+ fw_ddb_entry->iscsi_def_time2retain =
+ cpu_to_le16(sess->time2retain);
+
+ status = qla4xxx_set_ddb_entry(ha, ddb_entry->fw_ddb_index,
+ fw_ddb_entry_dma, mbx_sts);
+
+ if (status != QLA_SUCCESS)
+ rval = -EINVAL;
+exit_set_param:
+ dma_free_coherent(&ha->pdev->dev, sizeof(*fw_ddb_entry),
+ fw_ddb_entry, fw_ddb_entry_dma);
+exit_set_param_no_free:
+ return rval;
+}
+
+int qla4xxx_get_mgmt_data(struct scsi_qla_host *ha, uint16_t fw_ddb_index,
+ uint16_t stats_size, dma_addr_t stats_dma)
+{
+ int status = QLA_SUCCESS;
+ uint32_t mbox_cmd[MBOX_REG_COUNT];
+ uint32_t mbox_sts[MBOX_REG_COUNT];
+
+ memset(mbox_cmd, 0, sizeof(mbox_cmd[0]) * MBOX_REG_COUNT);
+ memset(mbox_sts, 0, sizeof(mbox_sts[0]) * MBOX_REG_COUNT);
+ mbox_cmd[0] = MBOX_CMD_GET_MANAGEMENT_DATA;
+ mbox_cmd[1] = fw_ddb_index;
+ mbox_cmd[2] = LSDW(stats_dma);
+ mbox_cmd[3] = MSDW(stats_dma);
+ mbox_cmd[4] = stats_size;
+
+ status = qla4xxx_mailbox_command(ha, 5, 1, &mbox_cmd[0], &mbox_sts[0]);
+ if (status != QLA_SUCCESS) {
+ DEBUG2(ql4_printk(KERN_WARNING, ha,
+ "%s: MBOX_CMD_GET_MANAGEMENT_DATA "
+ "failed w/ status %04X\n", __func__,
+ mbox_sts[0]));
+ }
+ return status;
+}
+
+int qla4xxx_get_ip_state(struct scsi_qla_host *ha, uint32_t acb_idx,
+ uint32_t ip_idx, uint32_t *sts)
+{
+ uint32_t mbox_cmd[MBOX_REG_COUNT];
+ uint32_t mbox_sts[MBOX_REG_COUNT];
+ int status = QLA_SUCCESS;
+
+ memset(&mbox_cmd, 0, sizeof(mbox_cmd));
+ memset(&mbox_sts, 0, sizeof(mbox_sts));
+ mbox_cmd[0] = MBOX_CMD_GET_IP_ADDR_STATE;
+ mbox_cmd[1] = acb_idx;
+ mbox_cmd[2] = ip_idx;
+
+ status = qla4xxx_mailbox_command(ha, 3, 8, &mbox_cmd[0], &mbox_sts[0]);
+ if (status != QLA_SUCCESS) {
+ DEBUG2(ql4_printk(KERN_WARNING, ha, "%s: "
+ "MBOX_CMD_GET_IP_ADDR_STATE failed w/ "
+ "status %04X\n", __func__, mbox_sts[0]));
+ }
+ memcpy(sts, mbox_sts, sizeof(mbox_sts));
+ return status;
+}
+
+int qla4xxx_get_nvram(struct scsi_qla_host *ha, dma_addr_t nvram_dma,
+ uint32_t offset, uint32_t size)
+{
+ int status = QLA_SUCCESS;
+ uint32_t mbox_cmd[MBOX_REG_COUNT];
+ uint32_t mbox_sts[MBOX_REG_COUNT];
+
+ memset(&mbox_cmd, 0, sizeof(mbox_cmd));
+ memset(&mbox_sts, 0, sizeof(mbox_sts));
+
+ mbox_cmd[0] = MBOX_CMD_GET_NVRAM;
+ mbox_cmd[1] = LSDW(nvram_dma);
+ mbox_cmd[2] = MSDW(nvram_dma);
+ mbox_cmd[3] = offset;
+ mbox_cmd[4] = size;
+
+ status = qla4xxx_mailbox_command(ha, MBOX_REG_COUNT, 1, &mbox_cmd[0],
+ &mbox_sts[0]);
+ if (status != QLA_SUCCESS) {
+ DEBUG2(ql4_printk(KERN_ERR, ha, "scsi%ld: %s: failed "
+ "status %04X\n", ha->host_no, __func__,
+ mbox_sts[0]));
+ }
+ return status;
+}
+
+int qla4xxx_set_nvram(struct scsi_qla_host *ha, dma_addr_t nvram_dma,
+ uint32_t offset, uint32_t size)
+{
+ int status = QLA_SUCCESS;
+ uint32_t mbox_cmd[MBOX_REG_COUNT];
+ uint32_t mbox_sts[MBOX_REG_COUNT];
+
+ memset(&mbox_cmd, 0, sizeof(mbox_cmd));
+ memset(&mbox_sts, 0, sizeof(mbox_sts));
+
+ mbox_cmd[0] = MBOX_CMD_SET_NVRAM;
+ mbox_cmd[1] = LSDW(nvram_dma);
+ mbox_cmd[2] = MSDW(nvram_dma);
+ mbox_cmd[3] = offset;
+ mbox_cmd[4] = size;
+
+ status = qla4xxx_mailbox_command(ha, MBOX_REG_COUNT, 1, &mbox_cmd[0],
+ &mbox_sts[0]);
+ if (status != QLA_SUCCESS) {
+ DEBUG2(ql4_printk(KERN_ERR, ha, "scsi%ld: %s: failed "
+ "status %04X\n", ha->host_no, __func__,
+ mbox_sts[0]));
+ }
+ return status;
+}
+
+int qla4xxx_restore_factory_defaults(struct scsi_qla_host *ha,
+ uint32_t region, uint32_t field0,
+ uint32_t field1)
+{
+ int status = QLA_SUCCESS;
+ uint32_t mbox_cmd[MBOX_REG_COUNT];
+ uint32_t mbox_sts[MBOX_REG_COUNT];
+
+ memset(&mbox_cmd, 0, sizeof(mbox_cmd));
+ memset(&mbox_sts, 0, sizeof(mbox_sts));
+
+ mbox_cmd[0] = MBOX_CMD_RESTORE_FACTORY_DEFAULTS;
+ mbox_cmd[3] = region;
+ mbox_cmd[4] = field0;
+ mbox_cmd[5] = field1;
+
+ status = qla4xxx_mailbox_command(ha, MBOX_REG_COUNT, 3, &mbox_cmd[0],
+ &mbox_sts[0]);
+ if (status != QLA_SUCCESS) {
+ DEBUG2(ql4_printk(KERN_ERR, ha, "scsi%ld: %s: failed "
+ "status %04X\n", ha->host_no, __func__,
+ mbox_sts[0]));
+ }
+ return status;
+}
+
+/**
+ * qla4_8xxx_set_param - set driver version in firmware.
+ * @ha: Pointer to host adapter structure.
+ * @param: Parameter to set i.e driver version
+ **/
+int qla4_8xxx_set_param(struct scsi_qla_host *ha, int param)
+{
+ uint32_t mbox_cmd[MBOX_REG_COUNT];
+ uint32_t mbox_sts[MBOX_REG_COUNT];
+ uint32_t status;
+
+ memset(&mbox_cmd, 0, sizeof(mbox_cmd));
+ memset(&mbox_sts, 0, sizeof(mbox_sts));
+
+ mbox_cmd[0] = MBOX_CMD_SET_PARAM;
+ if (param == SET_DRVR_VERSION) {
+ mbox_cmd[1] = SET_DRVR_VERSION;
+ strncpy((char *)&mbox_cmd[2], QLA4XXX_DRIVER_VERSION,
+ MAX_DRVR_VER_LEN - 1);
+ } else {
+ ql4_printk(KERN_ERR, ha, "%s: invalid parameter 0x%x\n",
+ __func__, param);
+ status = QLA_ERROR;
+ goto exit_set_param;
+ }
+
+ status = qla4xxx_mailbox_command(ha, MBOX_REG_COUNT, 2, mbox_cmd,
+ mbox_sts);
+ if (status == QLA_ERROR)
+ ql4_printk(KERN_ERR, ha, "%s: failed status %04X\n",
+ __func__, mbox_sts[0]);
+
+exit_set_param:
+ return status;
+}
+
+/**
+ * qla4_83xx_post_idc_ack - post IDC ACK
+ * @ha: Pointer to host adapter structure.
+ *
+ * Posts IDC ACK for IDC Request Notification AEN.
+ **/
+int qla4_83xx_post_idc_ack(struct scsi_qla_host *ha)
+{
+ uint32_t mbox_cmd[MBOX_REG_COUNT];
+ uint32_t mbox_sts[MBOX_REG_COUNT];
+ int status;
+
+ memset(&mbox_cmd, 0, sizeof(mbox_cmd));
+ memset(&mbox_sts, 0, sizeof(mbox_sts));
+
+ mbox_cmd[0] = MBOX_CMD_IDC_ACK;
+ mbox_cmd[1] = ha->idc_info.request_desc;
+ mbox_cmd[2] = ha->idc_info.info1;
+ mbox_cmd[3] = ha->idc_info.info2;
+ mbox_cmd[4] = ha->idc_info.info3;
+
+ status = qla4xxx_mailbox_command(ha, MBOX_REG_COUNT, MBOX_REG_COUNT,
+ mbox_cmd, mbox_sts);
+ if (status == QLA_ERROR)
+ ql4_printk(KERN_ERR, ha, "%s: failed status %04X\n", __func__,
+ mbox_sts[0]);
+ else
+ ql4_printk(KERN_INFO, ha, "%s: IDC ACK posted\n", __func__);
+
+ return status;
+}
+
+int qla4_84xx_config_acb(struct scsi_qla_host *ha, int acb_config)
+{
+ uint32_t mbox_cmd[MBOX_REG_COUNT];
+ uint32_t mbox_sts[MBOX_REG_COUNT];
+ struct addr_ctrl_blk *acb = NULL;
+ uint32_t acb_len = sizeof(struct addr_ctrl_blk);
+ int rval = QLA_SUCCESS;
+ dma_addr_t acb_dma;
+
+ acb = dma_alloc_coherent(&ha->pdev->dev,
+ sizeof(struct addr_ctrl_blk),
+ &acb_dma, GFP_KERNEL);
+ if (!acb) {
+ ql4_printk(KERN_ERR, ha, "%s: Unable to alloc acb\n", __func__);
+ rval = QLA_ERROR;
+ goto exit_config_acb;
+ }
+ memset(acb, 0, acb_len);
+
+ switch (acb_config) {
+ case ACB_CONFIG_DISABLE:
+ rval = qla4xxx_get_acb(ha, acb_dma, 0, acb_len);
+ if (rval != QLA_SUCCESS)
+ goto exit_free_acb;
+
+ rval = qla4xxx_disable_acb(ha);
+ if (rval != QLA_SUCCESS)
+ goto exit_free_acb;
+
+ if (!ha->saved_acb)
+ ha->saved_acb = kzalloc(acb_len, GFP_KERNEL);
+
+ if (!ha->saved_acb) {
+ ql4_printk(KERN_ERR, ha, "%s: Unable to alloc acb\n",
+ __func__);
+ rval = QLA_ERROR;
+ goto exit_free_acb;
+ }
+ memcpy(ha->saved_acb, acb, acb_len);
+ break;
+ case ACB_CONFIG_SET:
+
+ if (!ha->saved_acb) {
+ ql4_printk(KERN_ERR, ha, "%s: Can't set ACB, Saved ACB not available\n",
+ __func__);
+ rval = QLA_ERROR;
+ goto exit_free_acb;
+ }
+
+ memcpy(acb, ha->saved_acb, acb_len);
+
+ rval = qla4xxx_set_acb(ha, &mbox_cmd[0], &mbox_sts[0], acb_dma);
+ if (rval != QLA_SUCCESS)
+ goto exit_free_acb;
+
+ break;
+ default:
+ ql4_printk(KERN_ERR, ha, "%s: Invalid ACB Configuration\n",
+ __func__);
+ }
+
+exit_free_acb:
+ dma_free_coherent(&ha->pdev->dev, sizeof(struct addr_ctrl_blk), acb,
+ acb_dma);
+exit_config_acb:
+ if ((acb_config == ACB_CONFIG_SET) && ha->saved_acb) {
+ kfree(ha->saved_acb);
+ ha->saved_acb = NULL;
+ }
+ DEBUG2(ql4_printk(KERN_INFO, ha,
+ "%s %s\n", __func__,
+ rval == QLA_SUCCESS ? "SUCCEEDED" : "FAILED"));
+ return rval;
+}
+
+int qla4_83xx_get_port_config(struct scsi_qla_host *ha, uint32_t *config)
+{
+ uint32_t mbox_cmd[MBOX_REG_COUNT];
+ uint32_t mbox_sts[MBOX_REG_COUNT];
+ int status;
+
+ memset(&mbox_cmd, 0, sizeof(mbox_cmd));
+ memset(&mbox_sts, 0, sizeof(mbox_sts));
+
+ mbox_cmd[0] = MBOX_CMD_GET_PORT_CONFIG;
+
+ status = qla4xxx_mailbox_command(ha, MBOX_REG_COUNT, MBOX_REG_COUNT,
+ mbox_cmd, mbox_sts);
+ if (status == QLA_SUCCESS)
+ *config = mbox_sts[1];
+ else
+ ql4_printk(KERN_ERR, ha, "%s: failed status %04X\n", __func__,
+ mbox_sts[0]);
+
+ return status;
+}
+
+int qla4_83xx_set_port_config(struct scsi_qla_host *ha, uint32_t *config)
+{
+ uint32_t mbox_cmd[MBOX_REG_COUNT];
+ uint32_t mbox_sts[MBOX_REG_COUNT];
+ int status;
+
+ memset(&mbox_cmd, 0, sizeof(mbox_cmd));
+ memset(&mbox_sts, 0, sizeof(mbox_sts));
+
+ mbox_cmd[0] = MBOX_CMD_SET_PORT_CONFIG;
+ mbox_cmd[1] = *config;
+
+ status = qla4xxx_mailbox_command(ha, MBOX_REG_COUNT, MBOX_REG_COUNT,
+ mbox_cmd, mbox_sts);
+ if (status != QLA_SUCCESS)
+ ql4_printk(KERN_ERR, ha, "%s: failed status %04X\n", __func__,
+ mbox_sts[0]);
+
+ return status;
+}
diff --git a/drivers/scsi/qla4xxx/ql4_nvram.c b/drivers/scsi/qla4xxx/ql4_nvram.c
new file mode 100644
index 000000000..3bf418fbd
--- /dev/null
+++ b/drivers/scsi/qla4xxx/ql4_nvram.c
@@ -0,0 +1,256 @@
+/*
+ * QLogic iSCSI HBA Driver
+ * Copyright (c) 2003-2013 QLogic Corporation
+ *
+ * See LICENSE.qla4xxx for copyright and licensing details.
+ */
+
+#include "ql4_def.h"
+#include "ql4_glbl.h"
+#include "ql4_dbg.h"
+#include "ql4_inline.h"
+
+static inline void eeprom_cmd(uint32_t cmd, struct scsi_qla_host *ha)
+{
+ writel(cmd, isp_nvram(ha));
+ readl(isp_nvram(ha));
+ udelay(1);
+}
+
+static inline int eeprom_size(struct scsi_qla_host *ha)
+{
+ return is_qla4010(ha) ? FM93C66A_SIZE_16 : FM93C86A_SIZE_16;
+}
+
+static inline int eeprom_no_addr_bits(struct scsi_qla_host *ha)
+{
+ return is_qla4010(ha) ? FM93C56A_NO_ADDR_BITS_16 :
+ FM93C86A_NO_ADDR_BITS_16 ;
+}
+
+static inline int eeprom_no_data_bits(struct scsi_qla_host *ha)
+{
+ return FM93C56A_DATA_BITS_16;
+}
+
+static int fm93c56a_select(struct scsi_qla_host * ha)
+{
+ DEBUG5(printk(KERN_ERR "fm93c56a_select:\n"));
+
+ ha->eeprom_cmd_data = AUBURN_EEPROM_CS_1 | 0x000f0000;
+ eeprom_cmd(ha->eeprom_cmd_data, ha);
+ return 1;
+}
+
+static int fm93c56a_cmd(struct scsi_qla_host * ha, int cmd, int addr)
+{
+ int i;
+ int mask;
+ int dataBit;
+ int previousBit;
+
+ /* Clock in a zero, then do the start bit. */
+ eeprom_cmd(ha->eeprom_cmd_data | AUBURN_EEPROM_DO_1, ha);
+
+ eeprom_cmd(ha->eeprom_cmd_data | AUBURN_EEPROM_DO_1 |
+ AUBURN_EEPROM_CLK_RISE, ha);
+ eeprom_cmd(ha->eeprom_cmd_data | AUBURN_EEPROM_DO_1 |
+ AUBURN_EEPROM_CLK_FALL, ha);
+
+ mask = 1 << (FM93C56A_CMD_BITS - 1);
+
+ /* Force the previous data bit to be different. */
+ previousBit = 0xffff;
+ for (i = 0; i < FM93C56A_CMD_BITS; i++) {
+ dataBit =
+ (cmd & mask) ? AUBURN_EEPROM_DO_1 : AUBURN_EEPROM_DO_0;
+ if (previousBit != dataBit) {
+
+ /*
+ * If the bit changed, then change the DO state to
+ * match.
+ */
+ eeprom_cmd(ha->eeprom_cmd_data | dataBit, ha);
+ previousBit = dataBit;
+ }
+ eeprom_cmd(ha->eeprom_cmd_data | dataBit |
+ AUBURN_EEPROM_CLK_RISE, ha);
+ eeprom_cmd(ha->eeprom_cmd_data | dataBit |
+ AUBURN_EEPROM_CLK_FALL, ha);
+
+ cmd = cmd << 1;
+ }
+ mask = 1 << (eeprom_no_addr_bits(ha) - 1);
+
+ /* Force the previous data bit to be different. */
+ previousBit = 0xffff;
+ for (i = 0; i < eeprom_no_addr_bits(ha); i++) {
+ dataBit = addr & mask ? AUBURN_EEPROM_DO_1 :
+ AUBURN_EEPROM_DO_0;
+ if (previousBit != dataBit) {
+ /*
+ * If the bit changed, then change the DO state to
+ * match.
+ */
+ eeprom_cmd(ha->eeprom_cmd_data | dataBit, ha);
+
+ previousBit = dataBit;
+ }
+ eeprom_cmd(ha->eeprom_cmd_data | dataBit |
+ AUBURN_EEPROM_CLK_RISE, ha);
+ eeprom_cmd(ha->eeprom_cmd_data | dataBit |
+ AUBURN_EEPROM_CLK_FALL, ha);
+
+ addr = addr << 1;
+ }
+ return 1;
+}
+
+static int fm93c56a_deselect(struct scsi_qla_host * ha)
+{
+ ha->eeprom_cmd_data = AUBURN_EEPROM_CS_0 | 0x000f0000;
+ eeprom_cmd(ha->eeprom_cmd_data, ha);
+ return 1;
+}
+
+static int fm93c56a_datain(struct scsi_qla_host * ha, unsigned short *value)
+{
+ int i;
+ int data = 0;
+ int dataBit;
+
+ /* Read the data bits
+ * The first bit is a dummy. Clock right over it. */
+ for (i = 0; i < eeprom_no_data_bits(ha); i++) {
+ eeprom_cmd(ha->eeprom_cmd_data |
+ AUBURN_EEPROM_CLK_RISE, ha);
+ eeprom_cmd(ha->eeprom_cmd_data |
+ AUBURN_EEPROM_CLK_FALL, ha);
+
+ dataBit = (readw(isp_nvram(ha)) & AUBURN_EEPROM_DI_1) ? 1 : 0;
+
+ data = (data << 1) | dataBit;
+ }
+
+ *value = data;
+ return 1;
+}
+
+static int eeprom_readword(int eepromAddr, u16 * value,
+ struct scsi_qla_host * ha)
+{
+ fm93c56a_select(ha);
+ fm93c56a_cmd(ha, FM93C56A_READ, eepromAddr);
+ fm93c56a_datain(ha, value);
+ fm93c56a_deselect(ha);
+ return 1;
+}
+
+/* Hardware_lock must be set before calling */
+u16 rd_nvram_word(struct scsi_qla_host * ha, int offset)
+{
+ u16 val = 0;
+
+ /* NOTE: NVRAM uses half-word addresses */
+ eeprom_readword(offset, &val, ha);
+ return val;
+}
+
+u8 rd_nvram_byte(struct scsi_qla_host *ha, int offset)
+{
+ u16 val = 0;
+ u8 rval = 0;
+ int index = 0;
+
+ if (offset & 0x1)
+ index = (offset - 1) / 2;
+ else
+ index = offset / 2;
+
+ val = le16_to_cpu(rd_nvram_word(ha, index));
+
+ if (offset & 0x1)
+ rval = (u8)((val & 0xff00) >> 8);
+ else
+ rval = (u8)((val & 0x00ff));
+
+ return rval;
+}
+
+int qla4xxx_is_nvram_configuration_valid(struct scsi_qla_host * ha)
+{
+ int status = QLA_ERROR;
+ uint16_t checksum = 0;
+ uint32_t index;
+ unsigned long flags;
+
+ spin_lock_irqsave(&ha->hardware_lock, flags);
+ for (index = 0; index < eeprom_size(ha); index++)
+ checksum += rd_nvram_word(ha, index);
+ spin_unlock_irqrestore(&ha->hardware_lock, flags);
+
+ if (checksum == 0)
+ status = QLA_SUCCESS;
+
+ return status;
+}
+
+/*************************************************************************
+ *
+ * Hardware Semaphore routines
+ *
+ *************************************************************************/
+int ql4xxx_sem_spinlock(struct scsi_qla_host * ha, u32 sem_mask, u32 sem_bits)
+{
+ uint32_t value;
+ unsigned long flags;
+ unsigned int seconds = 30;
+
+ DEBUG2(printk("scsi%ld : Trying to get SEM lock - mask= 0x%x, code = "
+ "0x%x\n", ha->host_no, sem_mask, sem_bits));
+ do {
+ spin_lock_irqsave(&ha->hardware_lock, flags);
+ writel((sem_mask | sem_bits), isp_semaphore(ha));
+ value = readw(isp_semaphore(ha));
+ spin_unlock_irqrestore(&ha->hardware_lock, flags);
+ if ((value & (sem_mask >> 16)) == sem_bits) {
+ DEBUG2(printk("scsi%ld : Got SEM LOCK - mask= 0x%x, "
+ "code = 0x%x\n", ha->host_no,
+ sem_mask, sem_bits));
+ return QLA_SUCCESS;
+ }
+ ssleep(1);
+ } while (--seconds);
+ return QLA_ERROR;
+}
+
+void ql4xxx_sem_unlock(struct scsi_qla_host * ha, u32 sem_mask)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&ha->hardware_lock, flags);
+ writel(sem_mask, isp_semaphore(ha));
+ readl(isp_semaphore(ha));
+ spin_unlock_irqrestore(&ha->hardware_lock, flags);
+
+ DEBUG2(printk("scsi%ld : UNLOCK SEM - mask= 0x%x\n", ha->host_no,
+ sem_mask));
+}
+
+int ql4xxx_sem_lock(struct scsi_qla_host * ha, u32 sem_mask, u32 sem_bits)
+{
+ uint32_t value;
+ unsigned long flags;
+
+ spin_lock_irqsave(&ha->hardware_lock, flags);
+ writel((sem_mask | sem_bits), isp_semaphore(ha));
+ value = readw(isp_semaphore(ha));
+ spin_unlock_irqrestore(&ha->hardware_lock, flags);
+ if ((value & (sem_mask >> 16)) == sem_bits) {
+ DEBUG2(printk("scsi%ld : Got SEM LOCK - mask= 0x%x, code = "
+ "0x%x, sema code=0x%x\n", ha->host_no,
+ sem_mask, sem_bits, value));
+ return 1;
+ }
+ return 0;
+}
diff --git a/drivers/scsi/qla4xxx/ql4_nvram.h b/drivers/scsi/qla4xxx/ql4_nvram.h
new file mode 100644
index 000000000..e97d79ff1
--- /dev/null
+++ b/drivers/scsi/qla4xxx/ql4_nvram.h
@@ -0,0 +1,254 @@
+/*
+ * QLogic iSCSI HBA Driver
+ * Copyright (c) 2003-2013 QLogic Corporation
+ *
+ * See LICENSE.qla4xxx for copyright and licensing details.
+ */
+
+#ifndef _QL4XNVRM_H_
+#define _QL4XNVRM_H_
+
+/**
+ * AM29LV Flash definitions
+ **/
+#define FM93C56A_SIZE_8 0x100
+#define FM93C56A_SIZE_16 0x80
+#define FM93C66A_SIZE_8 0x200
+#define FM93C66A_SIZE_16 0x100/* 4010 */
+#define FM93C86A_SIZE_16 0x400/* 4022 */
+
+#define FM93C56A_START 0x1
+
+/* Commands */
+#define FM93C56A_READ 0x2
+#define FM93C56A_WEN 0x0
+#define FM93C56A_WRITE 0x1
+#define FM93C56A_WRITE_ALL 0x0
+#define FM93C56A_WDS 0x0
+#define FM93C56A_ERASE 0x3
+#define FM93C56A_ERASE_ALL 0x0
+
+/* Command Extensions */
+#define FM93C56A_WEN_EXT 0x3
+#define FM93C56A_WRITE_ALL_EXT 0x1
+#define FM93C56A_WDS_EXT 0x0
+#define FM93C56A_ERASE_ALL_EXT 0x2
+
+/* Address Bits */
+#define FM93C56A_NO_ADDR_BITS_16 8 /* 4010 */
+#define FM93C56A_NO_ADDR_BITS_8 9 /* 4010 */
+#define FM93C86A_NO_ADDR_BITS_16 10 /* 4022 */
+
+/* Data Bits */
+#define FM93C56A_DATA_BITS_16 16
+#define FM93C56A_DATA_BITS_8 8
+
+/* Special Bits */
+#define FM93C56A_READ_DUMMY_BITS 1
+#define FM93C56A_READY 0
+#define FM93C56A_BUSY 1
+#define FM93C56A_CMD_BITS 2
+
+/* Auburn Bits */
+#define AUBURN_EEPROM_DI 0x8
+#define AUBURN_EEPROM_DI_0 0x0
+#define AUBURN_EEPROM_DI_1 0x8
+#define AUBURN_EEPROM_DO 0x4
+#define AUBURN_EEPROM_DO_0 0x0
+#define AUBURN_EEPROM_DO_1 0x4
+#define AUBURN_EEPROM_CS 0x2
+#define AUBURN_EEPROM_CS_0 0x0
+#define AUBURN_EEPROM_CS_1 0x2
+#define AUBURN_EEPROM_CLK_RISE 0x1
+#define AUBURN_EEPROM_CLK_FALL 0x0
+
+/**/
+/* EEPROM format */
+/**/
+struct bios_params {
+ uint16_t SpinUpDelay:1;
+ uint16_t BIOSDisable:1;
+ uint16_t MMAPEnable:1;
+ uint16_t BootEnable:1;
+ uint16_t Reserved0:12;
+ uint8_t bootID0:7;
+ uint8_t bootID0Valid:1;
+ uint8_t bootLUN0[8];
+ uint8_t bootID1:7;
+ uint8_t bootID1Valid:1;
+ uint8_t bootLUN1[8];
+ uint16_t MaxLunsPerTarget;
+ uint8_t Reserved1[10];
+};
+
+struct eeprom_port_cfg {
+
+ /* MTU MAC 0 */
+ u16 etherMtu_mac;
+
+ /* Flow Control MAC 0 */
+ u16 pauseThreshold_mac;
+ u16 resumeThreshold_mac;
+ u16 reserved[13];
+};
+
+struct eeprom_function_cfg {
+ u8 reserved[30];
+
+ /* MAC ADDR */
+ u8 macAddress[6];
+ u8 macAddressSecondary[6];
+ u16 subsysVendorId;
+ u16 subsysDeviceId;
+};
+
+struct eeprom_data {
+ union {
+ struct { /* isp4010 */
+ u8 asic_id[4]; /* x00 */
+ u8 version; /* x04 */
+ u8 reserved; /* x05 */
+ u16 board_id; /* x06 */
+#define EEPROM_BOARDID_ELDORADO 1
+#define EEPROM_BOARDID_PLACER 2
+
+#define EEPROM_SERIAL_NUM_SIZE 16
+ u8 serial_number[EEPROM_SERIAL_NUM_SIZE]; /* x08 */
+
+ /* ExtHwConfig: */
+ /* Offset = 24bytes
+ *
+ * | SSRAM Size| |ST|PD|SDRAM SZ| W| B| SP | |
+ * |15|14|13|12|11|10| 9| 8| 7| 6| 5| 4| 3| 2| 1| 0|
+ * +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
+ */
+ u16 ext_hw_conf; /* x18 */
+ u8 mac0[6]; /* x1A */
+ u8 mac1[6]; /* x20 */
+ u8 mac2[6]; /* x26 */
+ u8 mac3[6]; /* x2C */
+ u16 etherMtu; /* x32 */
+ u16 macConfig; /* x34 */
+#define MAC_CONFIG_ENABLE_ANEG 0x0001
+#define MAC_CONFIG_ENABLE_PAUSE 0x0002
+ u16 phyConfig; /* x36 */
+#define PHY_CONFIG_PHY_ADDR_MASK 0x1f
+#define PHY_CONFIG_ENABLE_FW_MANAGEMENT_MASK 0x20
+ u16 reserved_56; /* x38 */
+
+#define EEPROM_UNUSED_1_SIZE 2
+ u8 unused_1[EEPROM_UNUSED_1_SIZE]; /* x3A */
+ u16 bufletSize; /* x3C */
+ u16 bufletCount; /* x3E */
+ u16 bufletPauseThreshold; /* x40 */
+ u16 tcpWindowThreshold50; /* x42 */
+ u16 tcpWindowThreshold25; /* x44 */
+ u16 tcpWindowThreshold0; /* x46 */
+ u16 ipHashTableBaseHi; /* x48 */
+ u16 ipHashTableBaseLo; /* x4A */
+ u16 ipHashTableSize; /* x4C */
+ u16 tcpHashTableBaseHi; /* x4E */
+ u16 tcpHashTableBaseLo; /* x50 */
+ u16 tcpHashTableSize; /* x52 */
+ u16 ncbTableBaseHi; /* x54 */
+ u16 ncbTableBaseLo; /* x56 */
+ u16 ncbTableSize; /* x58 */
+ u16 drbTableBaseHi; /* x5A */
+ u16 drbTableBaseLo; /* x5C */
+ u16 drbTableSize; /* x5E */
+
+#define EEPROM_UNUSED_2_SIZE 4
+ u8 unused_2[EEPROM_UNUSED_2_SIZE]; /* x60 */
+ u16 ipReassemblyTimeout; /* x64 */
+ u16 tcpMaxWindowSizeHi; /* x66 */
+ u16 tcpMaxWindowSizeLo; /* x68 */
+ u32 net_ip_addr0; /* x6A Added for TOE
+ * functionality. */
+ u32 net_ip_addr1; /* x6E */
+ u32 scsi_ip_addr0; /* x72 */
+ u32 scsi_ip_addr1; /* x76 */
+#define EEPROM_UNUSED_3_SIZE 128 /* changed from 144 to account
+ * for ip addresses */
+ u8 unused_3[EEPROM_UNUSED_3_SIZE]; /* x7A */
+ u16 subsysVendorId_f0; /* xFA */
+ u16 subsysDeviceId_f0; /* xFC */
+
+ /* Address = 0x7F */
+#define FM93C56A_SIGNATURE 0x9356
+#define FM93C66A_SIGNATURE 0x9366
+ u16 signature; /* xFE */
+
+#define EEPROM_UNUSED_4_SIZE 250
+ u8 unused_4[EEPROM_UNUSED_4_SIZE]; /* x100 */
+ u16 subsysVendorId_f1; /* x1FA */
+ u16 subsysDeviceId_f1; /* x1FC */
+ u16 checksum; /* x1FE */
+ } __attribute__ ((packed)) isp4010;
+ struct { /* isp4022 */
+ u8 asicId[4]; /* x00 */
+ u8 version; /* x04 */
+ u8 reserved_5; /* x05 */
+ u16 boardId; /* x06 */
+ u8 boardIdStr[16]; /* x08 */
+ u8 serialNumber[16]; /* x18 */
+
+ /* External Hardware Configuration */
+ u16 ext_hw_conf; /* x28 */
+
+ /* MAC 0 CONFIGURATION */
+ struct eeprom_port_cfg macCfg_port0; /* x2A */
+
+ /* MAC 1 CONFIGURATION */
+ struct eeprom_port_cfg macCfg_port1; /* x4A */
+
+ /* DDR SDRAM Configuration */
+ u16 bufletSize; /* x6A */
+ u16 bufletCount; /* x6C */
+ u16 tcpWindowThreshold50; /* x6E */
+ u16 tcpWindowThreshold25; /* x70 */
+ u16 tcpWindowThreshold0; /* x72 */
+ u16 ipHashTableBaseHi; /* x74 */
+ u16 ipHashTableBaseLo; /* x76 */
+ u16 ipHashTableSize; /* x78 */
+ u16 tcpHashTableBaseHi; /* x7A */
+ u16 tcpHashTableBaseLo; /* x7C */
+ u16 tcpHashTableSize; /* x7E */
+ u16 ncbTableBaseHi; /* x80 */
+ u16 ncbTableBaseLo; /* x82 */
+ u16 ncbTableSize; /* x84 */
+ u16 drbTableBaseHi; /* x86 */
+ u16 drbTableBaseLo; /* x88 */
+ u16 drbTableSize; /* x8A */
+ u16 reserved_142[4]; /* x8C */
+
+ /* TCP/IP Parameters */
+ u16 ipReassemblyTimeout; /* x94 */
+ u16 tcpMaxWindowSize; /* x96 */
+ u16 ipSecurity; /* x98 */
+ u8 reserved_156[294]; /* x9A */
+ u16 qDebug[8]; /* QLOGIC USE ONLY x1C0 */
+ struct eeprom_function_cfg funcCfg_fn0; /* x1D0 */
+ u16 reserved_510; /* x1FE */
+
+ /* Address = 512 */
+ u8 oemSpace[432]; /* x200 */
+ struct bios_params sBIOSParams_fn1; /* x3B0 */
+ struct eeprom_function_cfg funcCfg_fn1; /* x3D0 */
+ u16 reserved_1022; /* x3FE */
+
+ /* Address = 1024 */
+ u8 reserved_1024[464]; /* x400 */
+ struct eeprom_function_cfg funcCfg_fn2; /* x5D0 */
+ u16 reserved_1534; /* x5FE */
+
+ /* Address = 1536 */
+ u8 reserved_1536[432]; /* x600 */
+ struct bios_params sBIOSParams_fn3; /* x7B0 */
+ struct eeprom_function_cfg funcCfg_fn3; /* x7D0 */
+ u16 checksum; /* x7FE */
+ } __attribute__ ((packed)) isp4022;
+ };
+};
+
+
+#endif /* _QL4XNVRM_H_ */
diff --git a/drivers/scsi/qla4xxx/ql4_nx.c b/drivers/scsi/qla4xxx/ql4_nx.c
new file mode 100644
index 000000000..7c3365864
--- /dev/null
+++ b/drivers/scsi/qla4xxx/ql4_nx.c
@@ -0,0 +1,4275 @@
+/*
+ * QLogic iSCSI HBA Driver
+ * Copyright (c) 2003-2013 QLogic Corporation
+ *
+ * See LICENSE.qla4xxx for copyright and licensing details.
+ */
+#include <linux/delay.h>
+#include <linux/io.h>
+#include <linux/pci.h>
+#include <linux/ratelimit.h>
+#include "ql4_def.h"
+#include "ql4_glbl.h"
+#include "ql4_inline.h"
+
+#include <asm-generic/io-64-nonatomic-lo-hi.h>
+
+#define TIMEOUT_100_MS 100
+#define MASK(n) DMA_BIT_MASK(n)
+#define MN_WIN(addr) (((addr & 0x1fc0000) >> 1) | ((addr >> 25) & 0x3ff))
+#define OCM_WIN(addr) (((addr & 0x1ff0000) >> 1) | ((addr >> 25) & 0x3ff))
+#define MS_WIN(addr) (addr & 0x0ffc0000)
+#define QLA82XX_PCI_MN_2M (0)
+#define QLA82XX_PCI_MS_2M (0x80000)
+#define QLA82XX_PCI_OCM0_2M (0xc0000)
+#define VALID_OCM_ADDR(addr) (((addr) & 0x3f800) != 0x3f800)
+#define GET_MEM_OFFS_2M(addr) (addr & MASK(18))
+
+/* CRB window related */
+#define CRB_BLK(off) ((off >> 20) & 0x3f)
+#define CRB_SUBBLK(off) ((off >> 16) & 0xf)
+#define CRB_WINDOW_2M (0x130060)
+#define CRB_HI(off) ((qla4_82xx_crb_hub_agt[CRB_BLK(off)] << 20) | \
+ ((off) & 0xf0000))
+#define QLA82XX_PCI_CAMQM_2M_END (0x04800800UL)
+#define QLA82XX_PCI_CAMQM_2M_BASE (0x000ff800UL)
+#define CRB_INDIRECT_2M (0x1e0000UL)
+
+static inline void __iomem *
+qla4_8xxx_pci_base_offsetfset(struct scsi_qla_host *ha, unsigned long off)
+{
+ if ((off < ha->first_page_group_end) &&
+ (off >= ha->first_page_group_start))
+ return (void __iomem *)(ha->nx_pcibase + off);
+
+ return NULL;
+}
+
+#define MAX_CRB_XFORM 60
+static unsigned long crb_addr_xform[MAX_CRB_XFORM];
+static int qla4_8xxx_crb_table_initialized;
+
+#define qla4_8xxx_crb_addr_transform(name) \
+ (crb_addr_xform[QLA82XX_HW_PX_MAP_CRB_##name] = \
+ QLA82XX_HW_CRB_HUB_AGT_ADR_##name << 20)
+static void
+qla4_82xx_crb_addr_transform_setup(void)
+{
+ qla4_8xxx_crb_addr_transform(XDMA);
+ qla4_8xxx_crb_addr_transform(TIMR);
+ qla4_8xxx_crb_addr_transform(SRE);
+ qla4_8xxx_crb_addr_transform(SQN3);
+ qla4_8xxx_crb_addr_transform(SQN2);
+ qla4_8xxx_crb_addr_transform(SQN1);
+ qla4_8xxx_crb_addr_transform(SQN0);
+ qla4_8xxx_crb_addr_transform(SQS3);
+ qla4_8xxx_crb_addr_transform(SQS2);
+ qla4_8xxx_crb_addr_transform(SQS1);
+ qla4_8xxx_crb_addr_transform(SQS0);
+ qla4_8xxx_crb_addr_transform(RPMX7);
+ qla4_8xxx_crb_addr_transform(RPMX6);
+ qla4_8xxx_crb_addr_transform(RPMX5);
+ qla4_8xxx_crb_addr_transform(RPMX4);
+ qla4_8xxx_crb_addr_transform(RPMX3);
+ qla4_8xxx_crb_addr_transform(RPMX2);
+ qla4_8xxx_crb_addr_transform(RPMX1);
+ qla4_8xxx_crb_addr_transform(RPMX0);
+ qla4_8xxx_crb_addr_transform(ROMUSB);
+ qla4_8xxx_crb_addr_transform(SN);
+ qla4_8xxx_crb_addr_transform(QMN);
+ qla4_8xxx_crb_addr_transform(QMS);
+ qla4_8xxx_crb_addr_transform(PGNI);
+ qla4_8xxx_crb_addr_transform(PGND);
+ qla4_8xxx_crb_addr_transform(PGN3);
+ qla4_8xxx_crb_addr_transform(PGN2);
+ qla4_8xxx_crb_addr_transform(PGN1);
+ qla4_8xxx_crb_addr_transform(PGN0);
+ qla4_8xxx_crb_addr_transform(PGSI);
+ qla4_8xxx_crb_addr_transform(PGSD);
+ qla4_8xxx_crb_addr_transform(PGS3);
+ qla4_8xxx_crb_addr_transform(PGS2);
+ qla4_8xxx_crb_addr_transform(PGS1);
+ qla4_8xxx_crb_addr_transform(PGS0);
+ qla4_8xxx_crb_addr_transform(PS);
+ qla4_8xxx_crb_addr_transform(PH);
+ qla4_8xxx_crb_addr_transform(NIU);
+ qla4_8xxx_crb_addr_transform(I2Q);
+ qla4_8xxx_crb_addr_transform(EG);
+ qla4_8xxx_crb_addr_transform(MN);
+ qla4_8xxx_crb_addr_transform(MS);
+ qla4_8xxx_crb_addr_transform(CAS2);
+ qla4_8xxx_crb_addr_transform(CAS1);
+ qla4_8xxx_crb_addr_transform(CAS0);
+ qla4_8xxx_crb_addr_transform(CAM);
+ qla4_8xxx_crb_addr_transform(C2C1);
+ qla4_8xxx_crb_addr_transform(C2C0);
+ qla4_8xxx_crb_addr_transform(SMB);
+ qla4_8xxx_crb_addr_transform(OCM0);
+ qla4_8xxx_crb_addr_transform(I2C0);
+
+ qla4_8xxx_crb_table_initialized = 1;
+}
+
+static struct crb_128M_2M_block_map crb_128M_2M_map[64] = {
+ {{{0, 0, 0, 0} } }, /* 0: PCI */
+ {{{1, 0x0100000, 0x0102000, 0x120000}, /* 1: PCIE */
+ {1, 0x0110000, 0x0120000, 0x130000},
+ {1, 0x0120000, 0x0122000, 0x124000},
+ {1, 0x0130000, 0x0132000, 0x126000},
+ {1, 0x0140000, 0x0142000, 0x128000},
+ {1, 0x0150000, 0x0152000, 0x12a000},
+ {1, 0x0160000, 0x0170000, 0x110000},
+ {1, 0x0170000, 0x0172000, 0x12e000},
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {1, 0x01e0000, 0x01e0800, 0x122000},
+ {0, 0x0000000, 0x0000000, 0x000000} } },
+ {{{1, 0x0200000, 0x0210000, 0x180000} } },/* 2: MN */
+ {{{0, 0, 0, 0} } }, /* 3: */
+ {{{1, 0x0400000, 0x0401000, 0x169000} } },/* 4: P2NR1 */
+ {{{1, 0x0500000, 0x0510000, 0x140000} } },/* 5: SRE */
+ {{{1, 0x0600000, 0x0610000, 0x1c0000} } },/* 6: NIU */
+ {{{1, 0x0700000, 0x0704000, 0x1b8000} } },/* 7: QM */
+ {{{1, 0x0800000, 0x0802000, 0x170000}, /* 8: SQM0 */
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {1, 0x08f0000, 0x08f2000, 0x172000} } },
+ {{{1, 0x0900000, 0x0902000, 0x174000}, /* 9: SQM1*/
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {1, 0x09f0000, 0x09f2000, 0x176000} } },
+ {{{0, 0x0a00000, 0x0a02000, 0x178000}, /* 10: SQM2*/
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {1, 0x0af0000, 0x0af2000, 0x17a000} } },
+ {{{0, 0x0b00000, 0x0b02000, 0x17c000}, /* 11: SQM3*/
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {1, 0x0bf0000, 0x0bf2000, 0x17e000} } },
+ {{{1, 0x0c00000, 0x0c04000, 0x1d4000} } },/* 12: I2Q */
+ {{{1, 0x0d00000, 0x0d04000, 0x1a4000} } },/* 13: TMR */
+ {{{1, 0x0e00000, 0x0e04000, 0x1a0000} } },/* 14: ROMUSB */
+ {{{1, 0x0f00000, 0x0f01000, 0x164000} } },/* 15: PEG4 */
+ {{{0, 0x1000000, 0x1004000, 0x1a8000} } },/* 16: XDMA */
+ {{{1, 0x1100000, 0x1101000, 0x160000} } },/* 17: PEG0 */
+ {{{1, 0x1200000, 0x1201000, 0x161000} } },/* 18: PEG1 */
+ {{{1, 0x1300000, 0x1301000, 0x162000} } },/* 19: PEG2 */
+ {{{1, 0x1400000, 0x1401000, 0x163000} } },/* 20: PEG3 */
+ {{{1, 0x1500000, 0x1501000, 0x165000} } },/* 21: P2ND */
+ {{{1, 0x1600000, 0x1601000, 0x166000} } },/* 22: P2NI */
+ {{{0, 0, 0, 0} } }, /* 23: */
+ {{{0, 0, 0, 0} } }, /* 24: */
+ {{{0, 0, 0, 0} } }, /* 25: */
+ {{{0, 0, 0, 0} } }, /* 26: */
+ {{{0, 0, 0, 0} } }, /* 27: */
+ {{{0, 0, 0, 0} } }, /* 28: */
+ {{{1, 0x1d00000, 0x1d10000, 0x190000} } },/* 29: MS */
+ {{{1, 0x1e00000, 0x1e01000, 0x16a000} } },/* 30: P2NR2 */
+ {{{1, 0x1f00000, 0x1f10000, 0x150000} } },/* 31: EPG */
+ {{{0} } }, /* 32: PCI */
+ {{{1, 0x2100000, 0x2102000, 0x120000}, /* 33: PCIE */
+ {1, 0x2110000, 0x2120000, 0x130000},
+ {1, 0x2120000, 0x2122000, 0x124000},
+ {1, 0x2130000, 0x2132000, 0x126000},
+ {1, 0x2140000, 0x2142000, 0x128000},
+ {1, 0x2150000, 0x2152000, 0x12a000},
+ {1, 0x2160000, 0x2170000, 0x110000},
+ {1, 0x2170000, 0x2172000, 0x12e000},
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {0, 0x0000000, 0x0000000, 0x000000} } },
+ {{{1, 0x2200000, 0x2204000, 0x1b0000} } },/* 34: CAM */
+ {{{0} } }, /* 35: */
+ {{{0} } }, /* 36: */
+ {{{0} } }, /* 37: */
+ {{{0} } }, /* 38: */
+ {{{0} } }, /* 39: */
+ {{{1, 0x2800000, 0x2804000, 0x1a4000} } },/* 40: TMR */
+ {{{1, 0x2900000, 0x2901000, 0x16b000} } },/* 41: P2NR3 */
+ {{{1, 0x2a00000, 0x2a00400, 0x1ac400} } },/* 42: RPMX1 */
+ {{{1, 0x2b00000, 0x2b00400, 0x1ac800} } },/* 43: RPMX2 */
+ {{{1, 0x2c00000, 0x2c00400, 0x1acc00} } },/* 44: RPMX3 */
+ {{{1, 0x2d00000, 0x2d00400, 0x1ad000} } },/* 45: RPMX4 */
+ {{{1, 0x2e00000, 0x2e00400, 0x1ad400} } },/* 46: RPMX5 */
+ {{{1, 0x2f00000, 0x2f00400, 0x1ad800} } },/* 47: RPMX6 */
+ {{{1, 0x3000000, 0x3000400, 0x1adc00} } },/* 48: RPMX7 */
+ {{{0, 0x3100000, 0x3104000, 0x1a8000} } },/* 49: XDMA */
+ {{{1, 0x3200000, 0x3204000, 0x1d4000} } },/* 50: I2Q */
+ {{{1, 0x3300000, 0x3304000, 0x1a0000} } },/* 51: ROMUSB */
+ {{{0} } }, /* 52: */
+ {{{1, 0x3500000, 0x3500400, 0x1ac000} } },/* 53: RPMX0 */
+ {{{1, 0x3600000, 0x3600400, 0x1ae000} } },/* 54: RPMX8 */
+ {{{1, 0x3700000, 0x3700400, 0x1ae400} } },/* 55: RPMX9 */
+ {{{1, 0x3800000, 0x3804000, 0x1d0000} } },/* 56: OCM0 */
+ {{{1, 0x3900000, 0x3904000, 0x1b4000} } },/* 57: CRYPTO */
+ {{{1, 0x3a00000, 0x3a04000, 0x1d8000} } },/* 58: SMB */
+ {{{0} } }, /* 59: I2C0 */
+ {{{0} } }, /* 60: I2C1 */
+ {{{1, 0x3d00000, 0x3d04000, 0x1dc000} } },/* 61: LPC */
+ {{{1, 0x3e00000, 0x3e01000, 0x167000} } },/* 62: P2NC */
+ {{{1, 0x3f00000, 0x3f01000, 0x168000} } } /* 63: P2NR0 */
+};
+
+/*
+ * top 12 bits of crb internal address (hub, agent)
+ */
+static unsigned qla4_82xx_crb_hub_agt[64] = {
+ 0,
+ QLA82XX_HW_CRB_HUB_AGT_ADR_PS,
+ QLA82XX_HW_CRB_HUB_AGT_ADR_MN,
+ QLA82XX_HW_CRB_HUB_AGT_ADR_MS,
+ 0,
+ QLA82XX_HW_CRB_HUB_AGT_ADR_SRE,
+ QLA82XX_HW_CRB_HUB_AGT_ADR_NIU,
+ QLA82XX_HW_CRB_HUB_AGT_ADR_QMN,
+ QLA82XX_HW_CRB_HUB_AGT_ADR_SQN0,
+ QLA82XX_HW_CRB_HUB_AGT_ADR_SQN1,
+ QLA82XX_HW_CRB_HUB_AGT_ADR_SQN2,
+ QLA82XX_HW_CRB_HUB_AGT_ADR_SQN3,
+ QLA82XX_HW_CRB_HUB_AGT_ADR_I2Q,
+ QLA82XX_HW_CRB_HUB_AGT_ADR_TIMR,
+ QLA82XX_HW_CRB_HUB_AGT_ADR_ROMUSB,
+ QLA82XX_HW_CRB_HUB_AGT_ADR_PGN4,
+ QLA82XX_HW_CRB_HUB_AGT_ADR_XDMA,
+ QLA82XX_HW_CRB_HUB_AGT_ADR_PGN0,
+ QLA82XX_HW_CRB_HUB_AGT_ADR_PGN1,
+ QLA82XX_HW_CRB_HUB_AGT_ADR_PGN2,
+ QLA82XX_HW_CRB_HUB_AGT_ADR_PGN3,
+ QLA82XX_HW_CRB_HUB_AGT_ADR_PGND,
+ QLA82XX_HW_CRB_HUB_AGT_ADR_PGNI,
+ QLA82XX_HW_CRB_HUB_AGT_ADR_PGS0,
+ QLA82XX_HW_CRB_HUB_AGT_ADR_PGS1,
+ QLA82XX_HW_CRB_HUB_AGT_ADR_PGS2,
+ QLA82XX_HW_CRB_HUB_AGT_ADR_PGS3,
+ 0,
+ QLA82XX_HW_CRB_HUB_AGT_ADR_PGSI,
+ QLA82XX_HW_CRB_HUB_AGT_ADR_SN,
+ 0,
+ QLA82XX_HW_CRB_HUB_AGT_ADR_EG,
+ 0,
+ QLA82XX_HW_CRB_HUB_AGT_ADR_PS,
+ QLA82XX_HW_CRB_HUB_AGT_ADR_CAM,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ QLA82XX_HW_CRB_HUB_AGT_ADR_TIMR,
+ 0,
+ QLA82XX_HW_CRB_HUB_AGT_ADR_RPMX1,
+ QLA82XX_HW_CRB_HUB_AGT_ADR_RPMX2,
+ QLA82XX_HW_CRB_HUB_AGT_ADR_RPMX3,
+ QLA82XX_HW_CRB_HUB_AGT_ADR_RPMX4,
+ QLA82XX_HW_CRB_HUB_AGT_ADR_RPMX5,
+ QLA82XX_HW_CRB_HUB_AGT_ADR_RPMX6,
+ QLA82XX_HW_CRB_HUB_AGT_ADR_RPMX7,
+ QLA82XX_HW_CRB_HUB_AGT_ADR_XDMA,
+ QLA82XX_HW_CRB_HUB_AGT_ADR_I2Q,
+ QLA82XX_HW_CRB_HUB_AGT_ADR_ROMUSB,
+ 0,
+ QLA82XX_HW_CRB_HUB_AGT_ADR_RPMX0,
+ QLA82XX_HW_CRB_HUB_AGT_ADR_RPMX8,
+ QLA82XX_HW_CRB_HUB_AGT_ADR_RPMX9,
+ QLA82XX_HW_CRB_HUB_AGT_ADR_OCM0,
+ 0,
+ QLA82XX_HW_CRB_HUB_AGT_ADR_SMB,
+ QLA82XX_HW_CRB_HUB_AGT_ADR_I2C0,
+ QLA82XX_HW_CRB_HUB_AGT_ADR_I2C1,
+ 0,
+ QLA82XX_HW_CRB_HUB_AGT_ADR_PGNC,
+ 0,
+};
+
+/* Device states */
+static char *qdev_state[] = {
+ "Unknown",
+ "Cold",
+ "Initializing",
+ "Ready",
+ "Need Reset",
+ "Need Quiescent",
+ "Failed",
+ "Quiescent",
+};
+
+/*
+ * In: 'off' is offset from CRB space in 128M pci map
+ * Out: 'off' is 2M pci map addr
+ * side effect: lock crb window
+ */
+static void
+qla4_82xx_pci_set_crbwindow_2M(struct scsi_qla_host *ha, ulong *off)
+{
+ u32 win_read;
+
+ ha->crb_win = CRB_HI(*off);
+ writel(ha->crb_win,
+ (void __iomem *)(CRB_WINDOW_2M + ha->nx_pcibase));
+
+ /* Read back value to make sure write has gone through before trying
+ * to use it. */
+ win_read = readl((void __iomem *)(CRB_WINDOW_2M + ha->nx_pcibase));
+ if (win_read != ha->crb_win) {
+ DEBUG2(ql4_printk(KERN_INFO, ha,
+ "%s: Written crbwin (0x%x) != Read crbwin (0x%x),"
+ " off=0x%lx\n", __func__, ha->crb_win, win_read, *off));
+ }
+ *off = (*off & MASK(16)) + CRB_INDIRECT_2M + ha->nx_pcibase;
+}
+
+void
+qla4_82xx_wr_32(struct scsi_qla_host *ha, ulong off, u32 data)
+{
+ unsigned long flags = 0;
+ int rv;
+
+ rv = qla4_82xx_pci_get_crb_addr_2M(ha, &off);
+
+ BUG_ON(rv == -1);
+
+ if (rv == 1) {
+ write_lock_irqsave(&ha->hw_lock, flags);
+ qla4_82xx_crb_win_lock(ha);
+ qla4_82xx_pci_set_crbwindow_2M(ha, &off);
+ }
+
+ writel(data, (void __iomem *)off);
+
+ if (rv == 1) {
+ qla4_82xx_crb_win_unlock(ha);
+ write_unlock_irqrestore(&ha->hw_lock, flags);
+ }
+}
+
+uint32_t qla4_82xx_rd_32(struct scsi_qla_host *ha, ulong off)
+{
+ unsigned long flags = 0;
+ int rv;
+ u32 data;
+
+ rv = qla4_82xx_pci_get_crb_addr_2M(ha, &off);
+
+ BUG_ON(rv == -1);
+
+ if (rv == 1) {
+ write_lock_irqsave(&ha->hw_lock, flags);
+ qla4_82xx_crb_win_lock(ha);
+ qla4_82xx_pci_set_crbwindow_2M(ha, &off);
+ }
+ data = readl((void __iomem *)off);
+
+ if (rv == 1) {
+ qla4_82xx_crb_win_unlock(ha);
+ write_unlock_irqrestore(&ha->hw_lock, flags);
+ }
+ return data;
+}
+
+/* Minidump related functions */
+int qla4_82xx_md_rd_32(struct scsi_qla_host *ha, uint32_t off, uint32_t *data)
+{
+ uint32_t win_read, off_value;
+ int rval = QLA_SUCCESS;
+
+ off_value = off & 0xFFFF0000;
+ writel(off_value, (void __iomem *)(CRB_WINDOW_2M + ha->nx_pcibase));
+
+ /*
+ * Read back value to make sure write has gone through before trying
+ * to use it.
+ */
+ win_read = readl((void __iomem *)(CRB_WINDOW_2M + ha->nx_pcibase));
+ if (win_read != off_value) {
+ DEBUG2(ql4_printk(KERN_INFO, ha,
+ "%s: Written (0x%x) != Read (0x%x), off=0x%x\n",
+ __func__, off_value, win_read, off));
+ rval = QLA_ERROR;
+ } else {
+ off_value = off & 0x0000FFFF;
+ *data = readl((void __iomem *)(off_value + CRB_INDIRECT_2M +
+ ha->nx_pcibase));
+ }
+ return rval;
+}
+
+int qla4_82xx_md_wr_32(struct scsi_qla_host *ha, uint32_t off, uint32_t data)
+{
+ uint32_t win_read, off_value;
+ int rval = QLA_SUCCESS;
+
+ off_value = off & 0xFFFF0000;
+ writel(off_value, (void __iomem *)(CRB_WINDOW_2M + ha->nx_pcibase));
+
+ /* Read back value to make sure write has gone through before trying
+ * to use it.
+ */
+ win_read = readl((void __iomem *)(CRB_WINDOW_2M + ha->nx_pcibase));
+ if (win_read != off_value) {
+ DEBUG2(ql4_printk(KERN_INFO, ha,
+ "%s: Written (0x%x) != Read (0x%x), off=0x%x\n",
+ __func__, off_value, win_read, off));
+ rval = QLA_ERROR;
+ } else {
+ off_value = off & 0x0000FFFF;
+ writel(data, (void __iomem *)(off_value + CRB_INDIRECT_2M +
+ ha->nx_pcibase));
+ }
+ return rval;
+}
+
+#define CRB_WIN_LOCK_TIMEOUT 100000000
+
+int qla4_82xx_crb_win_lock(struct scsi_qla_host *ha)
+{
+ int i;
+ int done = 0, timeout = 0;
+
+ while (!done) {
+ /* acquire semaphore3 from PCI HW block */
+ done = qla4_82xx_rd_32(ha, QLA82XX_PCIE_REG(PCIE_SEM7_LOCK));
+ if (done == 1)
+ break;
+ if (timeout >= CRB_WIN_LOCK_TIMEOUT)
+ return -1;
+
+ timeout++;
+
+ /* Yield CPU */
+ if (!in_interrupt())
+ schedule();
+ else {
+ for (i = 0; i < 20; i++)
+ cpu_relax(); /*This a nop instr on i386*/
+ }
+ }
+ qla4_82xx_wr_32(ha, QLA82XX_CRB_WIN_LOCK_ID, ha->func_num);
+ return 0;
+}
+
+void qla4_82xx_crb_win_unlock(struct scsi_qla_host *ha)
+{
+ qla4_82xx_rd_32(ha, QLA82XX_PCIE_REG(PCIE_SEM7_UNLOCK));
+}
+
+#define IDC_LOCK_TIMEOUT 100000000
+
+/**
+ * qla4_82xx_idc_lock - hw_lock
+ * @ha: pointer to adapter structure
+ *
+ * General purpose lock used to synchronize access to
+ * CRB_DEV_STATE, CRB_DEV_REF_COUNT, etc.
+ **/
+int qla4_82xx_idc_lock(struct scsi_qla_host *ha)
+{
+ int i;
+ int done = 0, timeout = 0;
+
+ while (!done) {
+ /* acquire semaphore5 from PCI HW block */
+ done = qla4_82xx_rd_32(ha, QLA82XX_PCIE_REG(PCIE_SEM5_LOCK));
+ if (done == 1)
+ break;
+ if (timeout >= IDC_LOCK_TIMEOUT)
+ return -1;
+
+ timeout++;
+
+ /* Yield CPU */
+ if (!in_interrupt())
+ schedule();
+ else {
+ for (i = 0; i < 20; i++)
+ cpu_relax(); /*This a nop instr on i386*/
+ }
+ }
+ return 0;
+}
+
+void qla4_82xx_idc_unlock(struct scsi_qla_host *ha)
+{
+ qla4_82xx_rd_32(ha, QLA82XX_PCIE_REG(PCIE_SEM5_UNLOCK));
+}
+
+int
+qla4_82xx_pci_get_crb_addr_2M(struct scsi_qla_host *ha, ulong *off)
+{
+ struct crb_128M_2M_sub_block_map *m;
+
+ if (*off >= QLA82XX_CRB_MAX)
+ return -1;
+
+ if (*off >= QLA82XX_PCI_CAMQM && (*off < QLA82XX_PCI_CAMQM_2M_END)) {
+ *off = (*off - QLA82XX_PCI_CAMQM) +
+ QLA82XX_PCI_CAMQM_2M_BASE + ha->nx_pcibase;
+ return 0;
+ }
+
+ if (*off < QLA82XX_PCI_CRBSPACE)
+ return -1;
+
+ *off -= QLA82XX_PCI_CRBSPACE;
+ /*
+ * Try direct map
+ */
+
+ m = &crb_128M_2M_map[CRB_BLK(*off)].sub_block[CRB_SUBBLK(*off)];
+
+ if (m->valid && (m->start_128M <= *off) && (m->end_128M > *off)) {
+ *off = *off + m->start_2M - m->start_128M + ha->nx_pcibase;
+ return 0;
+ }
+
+ /*
+ * Not in direct map, use crb window
+ */
+ return 1;
+}
+
+/*
+* check memory access boundary.
+* used by test agent. support ddr access only for now
+*/
+static unsigned long
+qla4_82xx_pci_mem_bound_check(struct scsi_qla_host *ha,
+ unsigned long long addr, int size)
+{
+ if (!QLA8XXX_ADDR_IN_RANGE(addr, QLA8XXX_ADDR_DDR_NET,
+ QLA8XXX_ADDR_DDR_NET_MAX) ||
+ !QLA8XXX_ADDR_IN_RANGE(addr + size - 1,
+ QLA8XXX_ADDR_DDR_NET, QLA8XXX_ADDR_DDR_NET_MAX) ||
+ ((size != 1) && (size != 2) && (size != 4) && (size != 8))) {
+ return 0;
+ }
+ return 1;
+}
+
+static int qla4_82xx_pci_set_window_warning_count;
+
+static unsigned long
+qla4_82xx_pci_set_window(struct scsi_qla_host *ha, unsigned long long addr)
+{
+ int window;
+ u32 win_read;
+
+ if (QLA8XXX_ADDR_IN_RANGE(addr, QLA8XXX_ADDR_DDR_NET,
+ QLA8XXX_ADDR_DDR_NET_MAX)) {
+ /* DDR network side */
+ window = MN_WIN(addr);
+ ha->ddr_mn_window = window;
+ qla4_82xx_wr_32(ha, ha->mn_win_crb |
+ QLA82XX_PCI_CRBSPACE, window);
+ win_read = qla4_82xx_rd_32(ha, ha->mn_win_crb |
+ QLA82XX_PCI_CRBSPACE);
+ if ((win_read << 17) != window) {
+ ql4_printk(KERN_WARNING, ha,
+ "%s: Written MNwin (0x%x) != Read MNwin (0x%x)\n",
+ __func__, window, win_read);
+ }
+ addr = GET_MEM_OFFS_2M(addr) + QLA82XX_PCI_DDR_NET;
+ } else if (QLA8XXX_ADDR_IN_RANGE(addr, QLA8XXX_ADDR_OCM0,
+ QLA8XXX_ADDR_OCM0_MAX)) {
+ unsigned int temp1;
+ /* if bits 19:18&17:11 are on */
+ if ((addr & 0x00ff800) == 0xff800) {
+ printk("%s: QM access not handled.\n", __func__);
+ addr = -1UL;
+ }
+
+ window = OCM_WIN(addr);
+ ha->ddr_mn_window = window;
+ qla4_82xx_wr_32(ha, ha->mn_win_crb |
+ QLA82XX_PCI_CRBSPACE, window);
+ win_read = qla4_82xx_rd_32(ha, ha->mn_win_crb |
+ QLA82XX_PCI_CRBSPACE);
+ temp1 = ((window & 0x1FF) << 7) |
+ ((window & 0x0FFFE0000) >> 17);
+ if (win_read != temp1) {
+ printk("%s: Written OCMwin (0x%x) != Read"
+ " OCMwin (0x%x)\n", __func__, temp1, win_read);
+ }
+ addr = GET_MEM_OFFS_2M(addr) + QLA82XX_PCI_OCM0_2M;
+
+ } else if (QLA8XXX_ADDR_IN_RANGE(addr, QLA8XXX_ADDR_QDR_NET,
+ QLA82XX_P3_ADDR_QDR_NET_MAX)) {
+ /* QDR network side */
+ window = MS_WIN(addr);
+ ha->qdr_sn_window = window;
+ qla4_82xx_wr_32(ha, ha->ms_win_crb |
+ QLA82XX_PCI_CRBSPACE, window);
+ win_read = qla4_82xx_rd_32(ha,
+ ha->ms_win_crb | QLA82XX_PCI_CRBSPACE);
+ if (win_read != window) {
+ printk("%s: Written MSwin (0x%x) != Read "
+ "MSwin (0x%x)\n", __func__, window, win_read);
+ }
+ addr = GET_MEM_OFFS_2M(addr) + QLA82XX_PCI_QDR_NET;
+
+ } else {
+ /*
+ * peg gdb frequently accesses memory that doesn't exist,
+ * this limits the chit chat so debugging isn't slowed down.
+ */
+ if ((qla4_82xx_pci_set_window_warning_count++ < 8) ||
+ (qla4_82xx_pci_set_window_warning_count%64 == 0)) {
+ printk("%s: Warning:%s Unknown address range!\n",
+ __func__, DRIVER_NAME);
+ }
+ addr = -1UL;
+ }
+ return addr;
+}
+
+/* check if address is in the same windows as the previous access */
+static int qla4_82xx_pci_is_same_window(struct scsi_qla_host *ha,
+ unsigned long long addr)
+{
+ int window;
+ unsigned long long qdr_max;
+
+ qdr_max = QLA82XX_P3_ADDR_QDR_NET_MAX;
+
+ if (QLA8XXX_ADDR_IN_RANGE(addr, QLA8XXX_ADDR_DDR_NET,
+ QLA8XXX_ADDR_DDR_NET_MAX)) {
+ /* DDR network side */
+ BUG(); /* MN access can not come here */
+ } else if (QLA8XXX_ADDR_IN_RANGE(addr, QLA8XXX_ADDR_OCM0,
+ QLA8XXX_ADDR_OCM0_MAX)) {
+ return 1;
+ } else if (QLA8XXX_ADDR_IN_RANGE(addr, QLA8XXX_ADDR_OCM1,
+ QLA8XXX_ADDR_OCM1_MAX)) {
+ return 1;
+ } else if (QLA8XXX_ADDR_IN_RANGE(addr, QLA8XXX_ADDR_QDR_NET,
+ qdr_max)) {
+ /* QDR network side */
+ window = ((addr - QLA8XXX_ADDR_QDR_NET) >> 22) & 0x3f;
+ if (ha->qdr_sn_window == window)
+ return 1;
+ }
+
+ return 0;
+}
+
+static int qla4_82xx_pci_mem_read_direct(struct scsi_qla_host *ha,
+ u64 off, void *data, int size)
+{
+ unsigned long flags;
+ void __iomem *addr;
+ int ret = 0;
+ u64 start;
+ void __iomem *mem_ptr = NULL;
+ unsigned long mem_base;
+ unsigned long mem_page;
+
+ write_lock_irqsave(&ha->hw_lock, flags);
+
+ /*
+ * If attempting to access unknown address or straddle hw windows,
+ * do not access.
+ */
+ start = qla4_82xx_pci_set_window(ha, off);
+ if ((start == -1UL) ||
+ (qla4_82xx_pci_is_same_window(ha, off + size - 1) == 0)) {
+ write_unlock_irqrestore(&ha->hw_lock, flags);
+ printk(KERN_ERR"%s out of bound pci memory access. "
+ "offset is 0x%llx\n", DRIVER_NAME, off);
+ return -1;
+ }
+
+ addr = qla4_8xxx_pci_base_offsetfset(ha, start);
+ if (!addr) {
+ write_unlock_irqrestore(&ha->hw_lock, flags);
+ mem_base = pci_resource_start(ha->pdev, 0);
+ mem_page = start & PAGE_MASK;
+ /* Map two pages whenever user tries to access addresses in two
+ consecutive pages.
+ */
+ if (mem_page != ((start + size - 1) & PAGE_MASK))
+ mem_ptr = ioremap(mem_base + mem_page, PAGE_SIZE * 2);
+ else
+ mem_ptr = ioremap(mem_base + mem_page, PAGE_SIZE);
+
+ if (mem_ptr == NULL) {
+ *(u8 *)data = 0;
+ return -1;
+ }
+ addr = mem_ptr;
+ addr += start & (PAGE_SIZE - 1);
+ write_lock_irqsave(&ha->hw_lock, flags);
+ }
+
+ switch (size) {
+ case 1:
+ *(u8 *)data = readb(addr);
+ break;
+ case 2:
+ *(u16 *)data = readw(addr);
+ break;
+ case 4:
+ *(u32 *)data = readl(addr);
+ break;
+ case 8:
+ *(u64 *)data = readq(addr);
+ break;
+ default:
+ ret = -1;
+ break;
+ }
+ write_unlock_irqrestore(&ha->hw_lock, flags);
+
+ if (mem_ptr)
+ iounmap(mem_ptr);
+ return ret;
+}
+
+static int
+qla4_82xx_pci_mem_write_direct(struct scsi_qla_host *ha, u64 off,
+ void *data, int size)
+{
+ unsigned long flags;
+ void __iomem *addr;
+ int ret = 0;
+ u64 start;
+ void __iomem *mem_ptr = NULL;
+ unsigned long mem_base;
+ unsigned long mem_page;
+
+ write_lock_irqsave(&ha->hw_lock, flags);
+
+ /*
+ * If attempting to access unknown address or straddle hw windows,
+ * do not access.
+ */
+ start = qla4_82xx_pci_set_window(ha, off);
+ if ((start == -1UL) ||
+ (qla4_82xx_pci_is_same_window(ha, off + size - 1) == 0)) {
+ write_unlock_irqrestore(&ha->hw_lock, flags);
+ printk(KERN_ERR"%s out of bound pci memory access. "
+ "offset is 0x%llx\n", DRIVER_NAME, off);
+ return -1;
+ }
+
+ addr = qla4_8xxx_pci_base_offsetfset(ha, start);
+ if (!addr) {
+ write_unlock_irqrestore(&ha->hw_lock, flags);
+ mem_base = pci_resource_start(ha->pdev, 0);
+ mem_page = start & PAGE_MASK;
+ /* Map two pages whenever user tries to access addresses in two
+ consecutive pages.
+ */
+ if (mem_page != ((start + size - 1) & PAGE_MASK))
+ mem_ptr = ioremap(mem_base + mem_page, PAGE_SIZE*2);
+ else
+ mem_ptr = ioremap(mem_base + mem_page, PAGE_SIZE);
+ if (mem_ptr == NULL)
+ return -1;
+
+ addr = mem_ptr;
+ addr += start & (PAGE_SIZE - 1);
+ write_lock_irqsave(&ha->hw_lock, flags);
+ }
+
+ switch (size) {
+ case 1:
+ writeb(*(u8 *)data, addr);
+ break;
+ case 2:
+ writew(*(u16 *)data, addr);
+ break;
+ case 4:
+ writel(*(u32 *)data, addr);
+ break;
+ case 8:
+ writeq(*(u64 *)data, addr);
+ break;
+ default:
+ ret = -1;
+ break;
+ }
+ write_unlock_irqrestore(&ha->hw_lock, flags);
+ if (mem_ptr)
+ iounmap(mem_ptr);
+ return ret;
+}
+
+#define MTU_FUDGE_FACTOR 100
+
+static unsigned long
+qla4_82xx_decode_crb_addr(unsigned long addr)
+{
+ int i;
+ unsigned long base_addr, offset, pci_base;
+
+ if (!qla4_8xxx_crb_table_initialized)
+ qla4_82xx_crb_addr_transform_setup();
+
+ pci_base = ADDR_ERROR;
+ base_addr = addr & 0xfff00000;
+ offset = addr & 0x000fffff;
+
+ for (i = 0; i < MAX_CRB_XFORM; i++) {
+ if (crb_addr_xform[i] == base_addr) {
+ pci_base = i << 20;
+ break;
+ }
+ }
+ if (pci_base == ADDR_ERROR)
+ return pci_base;
+ else
+ return pci_base + offset;
+}
+
+static long rom_max_timeout = 100;
+static long qla4_82xx_rom_lock_timeout = 100;
+
+static int
+qla4_82xx_rom_lock(struct scsi_qla_host *ha)
+{
+ int i;
+ int done = 0, timeout = 0;
+
+ while (!done) {
+ /* acquire semaphore2 from PCI HW block */
+
+ done = qla4_82xx_rd_32(ha, QLA82XX_PCIE_REG(PCIE_SEM2_LOCK));
+ if (done == 1)
+ break;
+ if (timeout >= qla4_82xx_rom_lock_timeout)
+ return -1;
+
+ timeout++;
+
+ /* Yield CPU */
+ if (!in_interrupt())
+ schedule();
+ else {
+ for (i = 0; i < 20; i++)
+ cpu_relax(); /*This a nop instr on i386*/
+ }
+ }
+ qla4_82xx_wr_32(ha, QLA82XX_ROM_LOCK_ID, ROM_LOCK_DRIVER);
+ return 0;
+}
+
+static void
+qla4_82xx_rom_unlock(struct scsi_qla_host *ha)
+{
+ qla4_82xx_rd_32(ha, QLA82XX_PCIE_REG(PCIE_SEM2_UNLOCK));
+}
+
+static int
+qla4_82xx_wait_rom_done(struct scsi_qla_host *ha)
+{
+ long timeout = 0;
+ long done = 0 ;
+
+ while (done == 0) {
+ done = qla4_82xx_rd_32(ha, QLA82XX_ROMUSB_GLB_STATUS);
+ done &= 2;
+ timeout++;
+ if (timeout >= rom_max_timeout) {
+ printk("%s: Timeout reached waiting for rom done",
+ DRIVER_NAME);
+ return -1;
+ }
+ }
+ return 0;
+}
+
+static int
+qla4_82xx_do_rom_fast_read(struct scsi_qla_host *ha, int addr, int *valp)
+{
+ qla4_82xx_wr_32(ha, QLA82XX_ROMUSB_ROM_ADDRESS, addr);
+ qla4_82xx_wr_32(ha, QLA82XX_ROMUSB_ROM_DUMMY_BYTE_CNT, 0);
+ qla4_82xx_wr_32(ha, QLA82XX_ROMUSB_ROM_ABYTE_CNT, 3);
+ qla4_82xx_wr_32(ha, QLA82XX_ROMUSB_ROM_INSTR_OPCODE, 0xb);
+ if (qla4_82xx_wait_rom_done(ha)) {
+ printk("%s: Error waiting for rom done\n", DRIVER_NAME);
+ return -1;
+ }
+ /* reset abyte_cnt and dummy_byte_cnt */
+ qla4_82xx_wr_32(ha, QLA82XX_ROMUSB_ROM_DUMMY_BYTE_CNT, 0);
+ udelay(10);
+ qla4_82xx_wr_32(ha, QLA82XX_ROMUSB_ROM_ABYTE_CNT, 0);
+
+ *valp = qla4_82xx_rd_32(ha, QLA82XX_ROMUSB_ROM_RDATA);
+ return 0;
+}
+
+static int
+qla4_82xx_rom_fast_read(struct scsi_qla_host *ha, int addr, int *valp)
+{
+ int ret, loops = 0;
+
+ while ((qla4_82xx_rom_lock(ha) != 0) && (loops < 50000)) {
+ udelay(100);
+ loops++;
+ }
+ if (loops >= 50000) {
+ ql4_printk(KERN_WARNING, ha, "%s: qla4_82xx_rom_lock failed\n",
+ DRIVER_NAME);
+ return -1;
+ }
+ ret = qla4_82xx_do_rom_fast_read(ha, addr, valp);
+ qla4_82xx_rom_unlock(ha);
+ return ret;
+}
+
+/**
+ * This routine does CRB initialize sequence
+ * to put the ISP into operational state
+ **/
+static int
+qla4_82xx_pinit_from_rom(struct scsi_qla_host *ha, int verbose)
+{
+ int addr, val;
+ int i ;
+ struct crb_addr_pair *buf;
+ unsigned long off;
+ unsigned offset, n;
+
+ struct crb_addr_pair {
+ long addr;
+ long data;
+ };
+
+ /* Halt all the indiviual PEGs and other blocks of the ISP */
+ qla4_82xx_rom_lock(ha);
+
+ /* disable all I2Q */
+ qla4_82xx_wr_32(ha, QLA82XX_CRB_I2Q + 0x10, 0x0);
+ qla4_82xx_wr_32(ha, QLA82XX_CRB_I2Q + 0x14, 0x0);
+ qla4_82xx_wr_32(ha, QLA82XX_CRB_I2Q + 0x18, 0x0);
+ qla4_82xx_wr_32(ha, QLA82XX_CRB_I2Q + 0x1c, 0x0);
+ qla4_82xx_wr_32(ha, QLA82XX_CRB_I2Q + 0x20, 0x0);
+ qla4_82xx_wr_32(ha, QLA82XX_CRB_I2Q + 0x24, 0x0);
+
+ /* disable all niu interrupts */
+ qla4_82xx_wr_32(ha, QLA82XX_CRB_NIU + 0x40, 0xff);
+ /* disable xge rx/tx */
+ qla4_82xx_wr_32(ha, QLA82XX_CRB_NIU + 0x70000, 0x00);
+ /* disable xg1 rx/tx */
+ qla4_82xx_wr_32(ha, QLA82XX_CRB_NIU + 0x80000, 0x00);
+ /* disable sideband mac */
+ qla4_82xx_wr_32(ha, QLA82XX_CRB_NIU + 0x90000, 0x00);
+ /* disable ap0 mac */
+ qla4_82xx_wr_32(ha, QLA82XX_CRB_NIU + 0xa0000, 0x00);
+ /* disable ap1 mac */
+ qla4_82xx_wr_32(ha, QLA82XX_CRB_NIU + 0xb0000, 0x00);
+
+ /* halt sre */
+ val = qla4_82xx_rd_32(ha, QLA82XX_CRB_SRE + 0x1000);
+ qla4_82xx_wr_32(ha, QLA82XX_CRB_SRE + 0x1000, val & (~(0x1)));
+
+ /* halt epg */
+ qla4_82xx_wr_32(ha, QLA82XX_CRB_EPG + 0x1300, 0x1);
+
+ /* halt timers */
+ qla4_82xx_wr_32(ha, QLA82XX_CRB_TIMER + 0x0, 0x0);
+ qla4_82xx_wr_32(ha, QLA82XX_CRB_TIMER + 0x8, 0x0);
+ qla4_82xx_wr_32(ha, QLA82XX_CRB_TIMER + 0x10, 0x0);
+ qla4_82xx_wr_32(ha, QLA82XX_CRB_TIMER + 0x18, 0x0);
+ qla4_82xx_wr_32(ha, QLA82XX_CRB_TIMER + 0x100, 0x0);
+ qla4_82xx_wr_32(ha, QLA82XX_CRB_TIMER + 0x200, 0x0);
+
+ /* halt pegs */
+ qla4_82xx_wr_32(ha, QLA82XX_CRB_PEG_NET_0 + 0x3c, 1);
+ qla4_82xx_wr_32(ha, QLA82XX_CRB_PEG_NET_1 + 0x3c, 1);
+ qla4_82xx_wr_32(ha, QLA82XX_CRB_PEG_NET_2 + 0x3c, 1);
+ qla4_82xx_wr_32(ha, QLA82XX_CRB_PEG_NET_3 + 0x3c, 1);
+ qla4_82xx_wr_32(ha, QLA82XX_CRB_PEG_NET_4 + 0x3c, 1);
+ msleep(5);
+
+ /* big hammer */
+ if (test_bit(DPC_RESET_HA, &ha->dpc_flags))
+ /* don't reset CAM block on reset */
+ qla4_82xx_wr_32(ha, QLA82XX_ROMUSB_GLB_SW_RESET, 0xfeffffff);
+ else
+ qla4_82xx_wr_32(ha, QLA82XX_ROMUSB_GLB_SW_RESET, 0xffffffff);
+
+ qla4_82xx_rom_unlock(ha);
+
+ /* Read the signature value from the flash.
+ * Offset 0: Contain signature (0xcafecafe)
+ * Offset 4: Offset and number of addr/value pairs
+ * that present in CRB initialize sequence
+ */
+ if (qla4_82xx_rom_fast_read(ha, 0, &n) != 0 || n != 0xcafecafeUL ||
+ qla4_82xx_rom_fast_read(ha, 4, &n) != 0) {
+ ql4_printk(KERN_WARNING, ha,
+ "[ERROR] Reading crb_init area: n: %08x\n", n);
+ return -1;
+ }
+
+ /* Offset in flash = lower 16 bits
+ * Number of enteries = upper 16 bits
+ */
+ offset = n & 0xffffU;
+ n = (n >> 16) & 0xffffU;
+
+ /* number of addr/value pair should not exceed 1024 enteries */
+ if (n >= 1024) {
+ ql4_printk(KERN_WARNING, ha,
+ "%s: %s:n=0x%x [ERROR] Card flash not initialized.\n",
+ DRIVER_NAME, __func__, n);
+ return -1;
+ }
+
+ ql4_printk(KERN_INFO, ha,
+ "%s: %d CRB init values found in ROM.\n", DRIVER_NAME, n);
+
+ buf = kmalloc(n * sizeof(struct crb_addr_pair), GFP_KERNEL);
+ if (buf == NULL) {
+ ql4_printk(KERN_WARNING, ha,
+ "%s: [ERROR] Unable to malloc memory.\n", DRIVER_NAME);
+ return -1;
+ }
+
+ for (i = 0; i < n; i++) {
+ if (qla4_82xx_rom_fast_read(ha, 8*i + 4*offset, &val) != 0 ||
+ qla4_82xx_rom_fast_read(ha, 8*i + 4*offset + 4, &addr) !=
+ 0) {
+ kfree(buf);
+ return -1;
+ }
+
+ buf[i].addr = addr;
+ buf[i].data = val;
+ }
+
+ for (i = 0; i < n; i++) {
+ /* Translate internal CRB initialization
+ * address to PCI bus address
+ */
+ off = qla4_82xx_decode_crb_addr((unsigned long)buf[i].addr) +
+ QLA82XX_PCI_CRBSPACE;
+ /* Not all CRB addr/value pair to be written,
+ * some of them are skipped
+ */
+
+ /* skip if LS bit is set*/
+ if (off & 0x1) {
+ DEBUG2(ql4_printk(KERN_WARNING, ha,
+ "Skip CRB init replay for offset = 0x%lx\n", off));
+ continue;
+ }
+
+ /* skipping cold reboot MAGIC */
+ if (off == QLA82XX_CAM_RAM(0x1fc))
+ continue;
+
+ /* do not reset PCI */
+ if (off == (ROMUSB_GLB + 0xbc))
+ continue;
+
+ /* skip core clock, so that firmware can increase the clock */
+ if (off == (ROMUSB_GLB + 0xc8))
+ continue;
+
+ /* skip the function enable register */
+ if (off == QLA82XX_PCIE_REG(PCIE_SETUP_FUNCTION))
+ continue;
+
+ if (off == QLA82XX_PCIE_REG(PCIE_SETUP_FUNCTION2))
+ continue;
+
+ if ((off & 0x0ff00000) == QLA82XX_CRB_SMB)
+ continue;
+
+ if ((off & 0x0ff00000) == QLA82XX_CRB_DDR_NET)
+ continue;
+
+ if (off == ADDR_ERROR) {
+ ql4_printk(KERN_WARNING, ha,
+ "%s: [ERROR] Unknown addr: 0x%08lx\n",
+ DRIVER_NAME, buf[i].addr);
+ continue;
+ }
+
+ qla4_82xx_wr_32(ha, off, buf[i].data);
+
+ /* ISP requires much bigger delay to settle down,
+ * else crb_window returns 0xffffffff
+ */
+ if (off == QLA82XX_ROMUSB_GLB_SW_RESET)
+ msleep(1000);
+
+ /* ISP requires millisec delay between
+ * successive CRB register updation
+ */
+ msleep(1);
+ }
+
+ kfree(buf);
+
+ /* Resetting the data and instruction cache */
+ qla4_82xx_wr_32(ha, QLA82XX_CRB_PEG_NET_D+0xec, 0x1e);
+ qla4_82xx_wr_32(ha, QLA82XX_CRB_PEG_NET_D+0x4c, 8);
+ qla4_82xx_wr_32(ha, QLA82XX_CRB_PEG_NET_I+0x4c, 8);
+
+ /* Clear all protocol processing engines */
+ qla4_82xx_wr_32(ha, QLA82XX_CRB_PEG_NET_0+0x8, 0);
+ qla4_82xx_wr_32(ha, QLA82XX_CRB_PEG_NET_0+0xc, 0);
+ qla4_82xx_wr_32(ha, QLA82XX_CRB_PEG_NET_1+0x8, 0);
+ qla4_82xx_wr_32(ha, QLA82XX_CRB_PEG_NET_1+0xc, 0);
+ qla4_82xx_wr_32(ha, QLA82XX_CRB_PEG_NET_2+0x8, 0);
+ qla4_82xx_wr_32(ha, QLA82XX_CRB_PEG_NET_2+0xc, 0);
+ qla4_82xx_wr_32(ha, QLA82XX_CRB_PEG_NET_3+0x8, 0);
+ qla4_82xx_wr_32(ha, QLA82XX_CRB_PEG_NET_3+0xc, 0);
+
+ return 0;
+}
+
+/**
+ * qla4_8xxx_ms_mem_write_128b - Writes data to MS/off-chip memory
+ * @ha: Pointer to adapter structure
+ * @addr: Flash address to write to
+ * @data: Data to be written
+ * @count: word_count to be written
+ *
+ * Return: On success return QLA_SUCCESS
+ * On error return QLA_ERROR
+ **/
+int qla4_8xxx_ms_mem_write_128b(struct scsi_qla_host *ha, uint64_t addr,
+ uint32_t *data, uint32_t count)
+{
+ int i, j;
+ uint32_t agt_ctrl;
+ unsigned long flags;
+ int ret_val = QLA_SUCCESS;
+
+ /* Only 128-bit aligned access */
+ if (addr & 0xF) {
+ ret_val = QLA_ERROR;
+ goto exit_ms_mem_write;
+ }
+
+ write_lock_irqsave(&ha->hw_lock, flags);
+
+ /* Write address */
+ ret_val = ha->isp_ops->wr_reg_indirect(ha, MD_MIU_TEST_AGT_ADDR_HI, 0);
+ if (ret_val == QLA_ERROR) {
+ ql4_printk(KERN_ERR, ha, "%s: write to AGT_ADDR_HI failed\n",
+ __func__);
+ goto exit_ms_mem_write_unlock;
+ }
+
+ for (i = 0; i < count; i++, addr += 16) {
+ if (!((QLA8XXX_ADDR_IN_RANGE(addr, QLA8XXX_ADDR_QDR_NET,
+ QLA8XXX_ADDR_QDR_NET_MAX)) ||
+ (QLA8XXX_ADDR_IN_RANGE(addr, QLA8XXX_ADDR_DDR_NET,
+ QLA8XXX_ADDR_DDR_NET_MAX)))) {
+ ret_val = QLA_ERROR;
+ goto exit_ms_mem_write_unlock;
+ }
+
+ ret_val = ha->isp_ops->wr_reg_indirect(ha,
+ MD_MIU_TEST_AGT_ADDR_LO,
+ addr);
+ /* Write data */
+ ret_val |= ha->isp_ops->wr_reg_indirect(ha,
+ MD_MIU_TEST_AGT_WRDATA_LO,
+ *data++);
+ ret_val |= ha->isp_ops->wr_reg_indirect(ha,
+ MD_MIU_TEST_AGT_WRDATA_HI,
+ *data++);
+ ret_val |= ha->isp_ops->wr_reg_indirect(ha,
+ MD_MIU_TEST_AGT_WRDATA_ULO,
+ *data++);
+ ret_val |= ha->isp_ops->wr_reg_indirect(ha,
+ MD_MIU_TEST_AGT_WRDATA_UHI,
+ *data++);
+ if (ret_val == QLA_ERROR) {
+ ql4_printk(KERN_ERR, ha, "%s: write to AGT_WRDATA failed\n",
+ __func__);
+ goto exit_ms_mem_write_unlock;
+ }
+
+ /* Check write status */
+ ret_val = ha->isp_ops->wr_reg_indirect(ha, MD_MIU_TEST_AGT_CTRL,
+ MIU_TA_CTL_WRITE_ENABLE);
+ ret_val |= ha->isp_ops->wr_reg_indirect(ha,
+ MD_MIU_TEST_AGT_CTRL,
+ MIU_TA_CTL_WRITE_START);
+ if (ret_val == QLA_ERROR) {
+ ql4_printk(KERN_ERR, ha, "%s: write to AGT_CTRL failed\n",
+ __func__);
+ goto exit_ms_mem_write_unlock;
+ }
+
+ for (j = 0; j < MAX_CTL_CHECK; j++) {
+ ret_val = ha->isp_ops->rd_reg_indirect(ha,
+ MD_MIU_TEST_AGT_CTRL,
+ &agt_ctrl);
+ if (ret_val == QLA_ERROR) {
+ ql4_printk(KERN_ERR, ha, "%s: failed to read MD_MIU_TEST_AGT_CTRL\n",
+ __func__);
+ goto exit_ms_mem_write_unlock;
+ }
+ if ((agt_ctrl & MIU_TA_CTL_BUSY) == 0)
+ break;
+ }
+
+ /* Status check failed */
+ if (j >= MAX_CTL_CHECK) {
+ printk_ratelimited(KERN_ERR "%s: MS memory write failed!\n",
+ __func__);
+ ret_val = QLA_ERROR;
+ goto exit_ms_mem_write_unlock;
+ }
+ }
+
+exit_ms_mem_write_unlock:
+ write_unlock_irqrestore(&ha->hw_lock, flags);
+
+exit_ms_mem_write:
+ return ret_val;
+}
+
+static int
+qla4_82xx_load_from_flash(struct scsi_qla_host *ha, uint32_t image_start)
+{
+ int i, rval = 0;
+ long size = 0;
+ long flashaddr, memaddr;
+ u64 data;
+ u32 high, low;
+
+ flashaddr = memaddr = ha->hw.flt_region_bootload;
+ size = (image_start - flashaddr) / 8;
+
+ DEBUG2(printk("scsi%ld: %s: bootldr=0x%lx, fw_image=0x%x\n",
+ ha->host_no, __func__, flashaddr, image_start));
+
+ for (i = 0; i < size; i++) {
+ if ((qla4_82xx_rom_fast_read(ha, flashaddr, (int *)&low)) ||
+ (qla4_82xx_rom_fast_read(ha, flashaddr + 4,
+ (int *)&high))) {
+ rval = -1;
+ goto exit_load_from_flash;
+ }
+ data = ((u64)high << 32) | low ;
+ rval = qla4_82xx_pci_mem_write_2M(ha, memaddr, &data, 8);
+ if (rval)
+ goto exit_load_from_flash;
+
+ flashaddr += 8;
+ memaddr += 8;
+
+ if (i % 0x1000 == 0)
+ msleep(1);
+
+ }
+
+ udelay(100);
+
+ read_lock(&ha->hw_lock);
+ qla4_82xx_wr_32(ha, QLA82XX_CRB_PEG_NET_0 + 0x18, 0x1020);
+ qla4_82xx_wr_32(ha, QLA82XX_ROMUSB_GLB_SW_RESET, 0x80001e);
+ read_unlock(&ha->hw_lock);
+
+exit_load_from_flash:
+ return rval;
+}
+
+static int qla4_82xx_load_fw(struct scsi_qla_host *ha, uint32_t image_start)
+{
+ u32 rst;
+
+ qla4_82xx_wr_32(ha, CRB_CMDPEG_STATE, 0);
+ if (qla4_82xx_pinit_from_rom(ha, 0) != QLA_SUCCESS) {
+ printk(KERN_WARNING "%s: Error during CRB Initialization\n",
+ __func__);
+ return QLA_ERROR;
+ }
+
+ udelay(500);
+
+ /* at this point, QM is in reset. This could be a problem if there are
+ * incoming d* transition queue messages. QM/PCIE could wedge.
+ * To get around this, QM is brought out of reset.
+ */
+
+ rst = qla4_82xx_rd_32(ha, QLA82XX_ROMUSB_GLB_SW_RESET);
+ /* unreset qm */
+ rst &= ~(1 << 28);
+ qla4_82xx_wr_32(ha, QLA82XX_ROMUSB_GLB_SW_RESET, rst);
+
+ if (qla4_82xx_load_from_flash(ha, image_start)) {
+ printk("%s: Error trying to load fw from flash!\n", __func__);
+ return QLA_ERROR;
+ }
+
+ return QLA_SUCCESS;
+}
+
+int
+qla4_82xx_pci_mem_read_2M(struct scsi_qla_host *ha,
+ u64 off, void *data, int size)
+{
+ int i, j = 0, k, start, end, loop, sz[2], off0[2];
+ int shift_amount;
+ uint32_t temp;
+ uint64_t off8, val, mem_crb, word[2] = {0, 0};
+
+ /*
+ * If not MN, go check for MS or invalid.
+ */
+
+ if (off >= QLA8XXX_ADDR_QDR_NET && off <= QLA82XX_P3_ADDR_QDR_NET_MAX)
+ mem_crb = QLA82XX_CRB_QDR_NET;
+ else {
+ mem_crb = QLA82XX_CRB_DDR_NET;
+ if (qla4_82xx_pci_mem_bound_check(ha, off, size) == 0)
+ return qla4_82xx_pci_mem_read_direct(ha,
+ off, data, size);
+ }
+
+
+ off8 = off & 0xfffffff0;
+ off0[0] = off & 0xf;
+ sz[0] = (size < (16 - off0[0])) ? size : (16 - off0[0]);
+ shift_amount = 4;
+
+ loop = ((off0[0] + size - 1) >> shift_amount) + 1;
+ off0[1] = 0;
+ sz[1] = size - sz[0];
+
+ for (i = 0; i < loop; i++) {
+ temp = off8 + (i << shift_amount);
+ qla4_82xx_wr_32(ha, mem_crb + MIU_TEST_AGT_ADDR_LO, temp);
+ temp = 0;
+ qla4_82xx_wr_32(ha, mem_crb + MIU_TEST_AGT_ADDR_HI, temp);
+ temp = MIU_TA_CTL_ENABLE;
+ qla4_82xx_wr_32(ha, mem_crb + MIU_TEST_AGT_CTRL, temp);
+ temp = MIU_TA_CTL_START_ENABLE;
+ qla4_82xx_wr_32(ha, mem_crb + MIU_TEST_AGT_CTRL, temp);
+
+ for (j = 0; j < MAX_CTL_CHECK; j++) {
+ temp = qla4_82xx_rd_32(ha, mem_crb + MIU_TEST_AGT_CTRL);
+ if ((temp & MIU_TA_CTL_BUSY) == 0)
+ break;
+ }
+
+ if (j >= MAX_CTL_CHECK) {
+ printk_ratelimited(KERN_ERR
+ "%s: failed to read through agent\n",
+ __func__);
+ break;
+ }
+
+ start = off0[i] >> 2;
+ end = (off0[i] + sz[i] - 1) >> 2;
+ for (k = start; k <= end; k++) {
+ temp = qla4_82xx_rd_32(ha,
+ mem_crb + MIU_TEST_AGT_RDDATA(k));
+ word[i] |= ((uint64_t)temp << (32 * (k & 1)));
+ }
+ }
+
+ if (j >= MAX_CTL_CHECK)
+ return -1;
+
+ if ((off0[0] & 7) == 0) {
+ val = word[0];
+ } else {
+ val = ((word[0] >> (off0[0] * 8)) & (~(~0ULL << (sz[0] * 8)))) |
+ ((word[1] & (~(~0ULL << (sz[1] * 8)))) << (sz[0] * 8));
+ }
+
+ switch (size) {
+ case 1:
+ *(uint8_t *)data = val;
+ break;
+ case 2:
+ *(uint16_t *)data = val;
+ break;
+ case 4:
+ *(uint32_t *)data = val;
+ break;
+ case 8:
+ *(uint64_t *)data = val;
+ break;
+ }
+ return 0;
+}
+
+int
+qla4_82xx_pci_mem_write_2M(struct scsi_qla_host *ha,
+ u64 off, void *data, int size)
+{
+ int i, j, ret = 0, loop, sz[2], off0;
+ int scale, shift_amount, startword;
+ uint32_t temp;
+ uint64_t off8, mem_crb, tmpw, word[2] = {0, 0};
+
+ /*
+ * If not MN, go check for MS or invalid.
+ */
+ if (off >= QLA8XXX_ADDR_QDR_NET && off <= QLA82XX_P3_ADDR_QDR_NET_MAX)
+ mem_crb = QLA82XX_CRB_QDR_NET;
+ else {
+ mem_crb = QLA82XX_CRB_DDR_NET;
+ if (qla4_82xx_pci_mem_bound_check(ha, off, size) == 0)
+ return qla4_82xx_pci_mem_write_direct(ha,
+ off, data, size);
+ }
+
+ off0 = off & 0x7;
+ sz[0] = (size < (8 - off0)) ? size : (8 - off0);
+ sz[1] = size - sz[0];
+
+ off8 = off & 0xfffffff0;
+ loop = (((off & 0xf) + size - 1) >> 4) + 1;
+ shift_amount = 4;
+ scale = 2;
+ startword = (off & 0xf)/8;
+
+ for (i = 0; i < loop; i++) {
+ if (qla4_82xx_pci_mem_read_2M(ha, off8 +
+ (i << shift_amount), &word[i * scale], 8))
+ return -1;
+ }
+
+ switch (size) {
+ case 1:
+ tmpw = *((uint8_t *)data);
+ break;
+ case 2:
+ tmpw = *((uint16_t *)data);
+ break;
+ case 4:
+ tmpw = *((uint32_t *)data);
+ break;
+ case 8:
+ default:
+ tmpw = *((uint64_t *)data);
+ break;
+ }
+
+ if (sz[0] == 8)
+ word[startword] = tmpw;
+ else {
+ word[startword] &=
+ ~((~(~0ULL << (sz[0] * 8))) << (off0 * 8));
+ word[startword] |= tmpw << (off0 * 8);
+ }
+
+ if (sz[1] != 0) {
+ word[startword+1] &= ~(~0ULL << (sz[1] * 8));
+ word[startword+1] |= tmpw >> (sz[0] * 8);
+ }
+
+ for (i = 0; i < loop; i++) {
+ temp = off8 + (i << shift_amount);
+ qla4_82xx_wr_32(ha, mem_crb+MIU_TEST_AGT_ADDR_LO, temp);
+ temp = 0;
+ qla4_82xx_wr_32(ha, mem_crb+MIU_TEST_AGT_ADDR_HI, temp);
+ temp = word[i * scale] & 0xffffffff;
+ qla4_82xx_wr_32(ha, mem_crb+MIU_TEST_AGT_WRDATA_LO, temp);
+ temp = (word[i * scale] >> 32) & 0xffffffff;
+ qla4_82xx_wr_32(ha, mem_crb+MIU_TEST_AGT_WRDATA_HI, temp);
+ temp = word[i*scale + 1] & 0xffffffff;
+ qla4_82xx_wr_32(ha, mem_crb + MIU_TEST_AGT_WRDATA_UPPER_LO,
+ temp);
+ temp = (word[i*scale + 1] >> 32) & 0xffffffff;
+ qla4_82xx_wr_32(ha, mem_crb + MIU_TEST_AGT_WRDATA_UPPER_HI,
+ temp);
+
+ temp = MIU_TA_CTL_WRITE_ENABLE;
+ qla4_82xx_wr_32(ha, mem_crb+MIU_TEST_AGT_CTRL, temp);
+ temp = MIU_TA_CTL_WRITE_START;
+ qla4_82xx_wr_32(ha, mem_crb+MIU_TEST_AGT_CTRL, temp);
+
+ for (j = 0; j < MAX_CTL_CHECK; j++) {
+ temp = qla4_82xx_rd_32(ha, mem_crb + MIU_TEST_AGT_CTRL);
+ if ((temp & MIU_TA_CTL_BUSY) == 0)
+ break;
+ }
+
+ if (j >= MAX_CTL_CHECK) {
+ if (printk_ratelimit())
+ ql4_printk(KERN_ERR, ha,
+ "%s: failed to read through agent\n",
+ __func__);
+ ret = -1;
+ break;
+ }
+ }
+
+ return ret;
+}
+
+static int qla4_82xx_cmdpeg_ready(struct scsi_qla_host *ha, int pegtune_val)
+{
+ u32 val = 0;
+ int retries = 60;
+
+ if (!pegtune_val) {
+ do {
+ val = qla4_82xx_rd_32(ha, CRB_CMDPEG_STATE);
+ if ((val == PHAN_INITIALIZE_COMPLETE) ||
+ (val == PHAN_INITIALIZE_ACK))
+ return 0;
+ set_current_state(TASK_UNINTERRUPTIBLE);
+ schedule_timeout(500);
+
+ } while (--retries);
+
+ if (!retries) {
+ pegtune_val = qla4_82xx_rd_32(ha,
+ QLA82XX_ROMUSB_GLB_PEGTUNE_DONE);
+ printk(KERN_WARNING "%s: init failed, "
+ "pegtune_val = %x\n", __func__, pegtune_val);
+ return -1;
+ }
+ }
+ return 0;
+}
+
+static int qla4_82xx_rcvpeg_ready(struct scsi_qla_host *ha)
+{
+ uint32_t state = 0;
+ int loops = 0;
+
+ /* Window 1 call */
+ read_lock(&ha->hw_lock);
+ state = qla4_82xx_rd_32(ha, CRB_RCVPEG_STATE);
+ read_unlock(&ha->hw_lock);
+
+ while ((state != PHAN_PEG_RCV_INITIALIZED) && (loops < 30000)) {
+ udelay(100);
+ /* Window 1 call */
+ read_lock(&ha->hw_lock);
+ state = qla4_82xx_rd_32(ha, CRB_RCVPEG_STATE);
+ read_unlock(&ha->hw_lock);
+
+ loops++;
+ }
+
+ if (loops >= 30000) {
+ DEBUG2(ql4_printk(KERN_INFO, ha,
+ "Receive Peg initialization not complete: 0x%x.\n", state));
+ return QLA_ERROR;
+ }
+
+ return QLA_SUCCESS;
+}
+
+void
+qla4_8xxx_set_drv_active(struct scsi_qla_host *ha)
+{
+ uint32_t drv_active;
+
+ drv_active = qla4_8xxx_rd_direct(ha, QLA8XXX_CRB_DRV_ACTIVE);
+
+ /*
+ * For ISP8324 and ISP8042, drv_active register has 1 bit per function,
+ * shift 1 by func_num to set a bit for the function.
+ * For ISP8022, drv_active has 4 bits per function
+ */
+ if (is_qla8032(ha) || is_qla8042(ha))
+ drv_active |= (1 << ha->func_num);
+ else
+ drv_active |= (1 << (ha->func_num * 4));
+
+ ql4_printk(KERN_INFO, ha, "%s(%ld): drv_active: 0x%08x\n",
+ __func__, ha->host_no, drv_active);
+ qla4_8xxx_wr_direct(ha, QLA8XXX_CRB_DRV_ACTIVE, drv_active);
+}
+
+void
+qla4_8xxx_clear_drv_active(struct scsi_qla_host *ha)
+{
+ uint32_t drv_active;
+
+ drv_active = qla4_8xxx_rd_direct(ha, QLA8XXX_CRB_DRV_ACTIVE);
+
+ /*
+ * For ISP8324 and ISP8042, drv_active register has 1 bit per function,
+ * shift 1 by func_num to set a bit for the function.
+ * For ISP8022, drv_active has 4 bits per function
+ */
+ if (is_qla8032(ha) || is_qla8042(ha))
+ drv_active &= ~(1 << (ha->func_num));
+ else
+ drv_active &= ~(1 << (ha->func_num * 4));
+
+ ql4_printk(KERN_INFO, ha, "%s(%ld): drv_active: 0x%08x\n",
+ __func__, ha->host_no, drv_active);
+ qla4_8xxx_wr_direct(ha, QLA8XXX_CRB_DRV_ACTIVE, drv_active);
+}
+
+inline int qla4_8xxx_need_reset(struct scsi_qla_host *ha)
+{
+ uint32_t drv_state, drv_active;
+ int rval;
+
+ drv_active = qla4_8xxx_rd_direct(ha, QLA8XXX_CRB_DRV_ACTIVE);
+ drv_state = qla4_8xxx_rd_direct(ha, QLA8XXX_CRB_DRV_STATE);
+
+ /*
+ * For ISP8324 and ISP8042, drv_active register has 1 bit per function,
+ * shift 1 by func_num to set a bit for the function.
+ * For ISP8022, drv_active has 4 bits per function
+ */
+ if (is_qla8032(ha) || is_qla8042(ha))
+ rval = drv_state & (1 << ha->func_num);
+ else
+ rval = drv_state & (1 << (ha->func_num * 4));
+
+ if ((test_bit(AF_EEH_BUSY, &ha->flags)) && drv_active)
+ rval = 1;
+
+ return rval;
+}
+
+void qla4_8xxx_set_rst_ready(struct scsi_qla_host *ha)
+{
+ uint32_t drv_state;
+
+ drv_state = qla4_8xxx_rd_direct(ha, QLA8XXX_CRB_DRV_STATE);
+
+ /*
+ * For ISP8324 and ISP8042, drv_active register has 1 bit per function,
+ * shift 1 by func_num to set a bit for the function.
+ * For ISP8022, drv_active has 4 bits per function
+ */
+ if (is_qla8032(ha) || is_qla8042(ha))
+ drv_state |= (1 << ha->func_num);
+ else
+ drv_state |= (1 << (ha->func_num * 4));
+
+ ql4_printk(KERN_INFO, ha, "%s(%ld): drv_state: 0x%08x\n",
+ __func__, ha->host_no, drv_state);
+ qla4_8xxx_wr_direct(ha, QLA8XXX_CRB_DRV_STATE, drv_state);
+}
+
+void qla4_8xxx_clear_rst_ready(struct scsi_qla_host *ha)
+{
+ uint32_t drv_state;
+
+ drv_state = qla4_8xxx_rd_direct(ha, QLA8XXX_CRB_DRV_STATE);
+
+ /*
+ * For ISP8324 and ISP8042, drv_active register has 1 bit per function,
+ * shift 1 by func_num to set a bit for the function.
+ * For ISP8022, drv_active has 4 bits per function
+ */
+ if (is_qla8032(ha) || is_qla8042(ha))
+ drv_state &= ~(1 << ha->func_num);
+ else
+ drv_state &= ~(1 << (ha->func_num * 4));
+
+ ql4_printk(KERN_INFO, ha, "%s(%ld): drv_state: 0x%08x\n",
+ __func__, ha->host_no, drv_state);
+ qla4_8xxx_wr_direct(ha, QLA8XXX_CRB_DRV_STATE, drv_state);
+}
+
+static inline void
+qla4_8xxx_set_qsnt_ready(struct scsi_qla_host *ha)
+{
+ uint32_t qsnt_state;
+
+ qsnt_state = qla4_8xxx_rd_direct(ha, QLA8XXX_CRB_DRV_STATE);
+
+ /*
+ * For ISP8324 and ISP8042, drv_active register has 1 bit per function,
+ * shift 1 by func_num to set a bit for the function.
+ * For ISP8022, drv_active has 4 bits per function.
+ */
+ if (is_qla8032(ha) || is_qla8042(ha))
+ qsnt_state |= (1 << ha->func_num);
+ else
+ qsnt_state |= (2 << (ha->func_num * 4));
+
+ qla4_8xxx_wr_direct(ha, QLA8XXX_CRB_DRV_STATE, qsnt_state);
+}
+
+
+static int
+qla4_82xx_start_firmware(struct scsi_qla_host *ha, uint32_t image_start)
+{
+ uint16_t lnk;
+
+ /* scrub dma mask expansion register */
+ qla4_82xx_wr_32(ha, CRB_DMA_SHIFT, 0x55555555);
+
+ /* Overwrite stale initialization register values */
+ qla4_82xx_wr_32(ha, CRB_CMDPEG_STATE, 0);
+ qla4_82xx_wr_32(ha, CRB_RCVPEG_STATE, 0);
+ qla4_82xx_wr_32(ha, QLA82XX_PEG_HALT_STATUS1, 0);
+ qla4_82xx_wr_32(ha, QLA82XX_PEG_HALT_STATUS2, 0);
+
+ if (qla4_82xx_load_fw(ha, image_start) != QLA_SUCCESS) {
+ printk("%s: Error trying to start fw!\n", __func__);
+ return QLA_ERROR;
+ }
+
+ /* Handshake with the card before we register the devices. */
+ if (qla4_82xx_cmdpeg_ready(ha, 0) != QLA_SUCCESS) {
+ printk("%s: Error during card handshake!\n", __func__);
+ return QLA_ERROR;
+ }
+
+ /* Negotiated Link width */
+ pcie_capability_read_word(ha->pdev, PCI_EXP_LNKSTA, &lnk);
+ ha->link_width = (lnk >> 4) & 0x3f;
+
+ /* Synchronize with Receive peg */
+ return qla4_82xx_rcvpeg_ready(ha);
+}
+
+int qla4_82xx_try_start_fw(struct scsi_qla_host *ha)
+{
+ int rval = QLA_ERROR;
+
+ /*
+ * FW Load priority:
+ * 1) Operational firmware residing in flash.
+ * 2) Fail
+ */
+
+ ql4_printk(KERN_INFO, ha,
+ "FW: Retrieving flash offsets from FLT/FDT ...\n");
+ rval = qla4_8xxx_get_flash_info(ha);
+ if (rval != QLA_SUCCESS)
+ return rval;
+
+ ql4_printk(KERN_INFO, ha,
+ "FW: Attempting to load firmware from flash...\n");
+ rval = qla4_82xx_start_firmware(ha, ha->hw.flt_region_fw);
+
+ if (rval != QLA_SUCCESS) {
+ ql4_printk(KERN_ERR, ha, "FW: Load firmware from flash"
+ " FAILED...\n");
+ return rval;
+ }
+
+ return rval;
+}
+
+void qla4_82xx_rom_lock_recovery(struct scsi_qla_host *ha)
+{
+ if (qla4_82xx_rom_lock(ha)) {
+ /* Someone else is holding the lock. */
+ dev_info(&ha->pdev->dev, "Resetting rom_lock\n");
+ }
+
+ /*
+ * Either we got the lock, or someone
+ * else died while holding it.
+ * In either case, unlock.
+ */
+ qla4_82xx_rom_unlock(ha);
+}
+
+static uint32_t ql4_84xx_poll_wait_for_ready(struct scsi_qla_host *ha,
+ uint32_t addr1, uint32_t mask)
+{
+ unsigned long timeout;
+ uint32_t rval = QLA_SUCCESS;
+ uint32_t temp;
+
+ timeout = jiffies + msecs_to_jiffies(TIMEOUT_100_MS);
+ do {
+ ha->isp_ops->rd_reg_indirect(ha, addr1, &temp);
+ if ((temp & mask) != 0)
+ break;
+
+ if (time_after_eq(jiffies, timeout)) {
+ ql4_printk(KERN_INFO, ha, "Error in processing rdmdio entry\n");
+ return QLA_ERROR;
+ }
+ } while (1);
+
+ return rval;
+}
+
+uint32_t ql4_84xx_ipmdio_rd_reg(struct scsi_qla_host *ha, uint32_t addr1,
+ uint32_t addr3, uint32_t mask, uint32_t addr,
+ uint32_t *data_ptr)
+{
+ int rval = QLA_SUCCESS;
+ uint32_t temp;
+ uint32_t data;
+
+ rval = ql4_84xx_poll_wait_for_ready(ha, addr1, mask);
+ if (rval)
+ goto exit_ipmdio_rd_reg;
+
+ temp = (0x40000000 | addr);
+ ha->isp_ops->wr_reg_indirect(ha, addr1, temp);
+
+ rval = ql4_84xx_poll_wait_for_ready(ha, addr1, mask);
+ if (rval)
+ goto exit_ipmdio_rd_reg;
+
+ ha->isp_ops->rd_reg_indirect(ha, addr3, &data);
+ *data_ptr = data;
+
+exit_ipmdio_rd_reg:
+ return rval;
+}
+
+
+static uint32_t ql4_84xx_poll_wait_ipmdio_bus_idle(struct scsi_qla_host *ha,
+ uint32_t addr1,
+ uint32_t addr2,
+ uint32_t addr3,
+ uint32_t mask)
+{
+ unsigned long timeout;
+ uint32_t temp;
+ uint32_t rval = QLA_SUCCESS;
+
+ timeout = jiffies + msecs_to_jiffies(TIMEOUT_100_MS);
+ do {
+ ql4_84xx_ipmdio_rd_reg(ha, addr1, addr3, mask, addr2, &temp);
+ if ((temp & 0x1) != 1)
+ break;
+ if (time_after_eq(jiffies, timeout)) {
+ ql4_printk(KERN_INFO, ha, "Error in processing mdiobus idle\n");
+ return QLA_ERROR;
+ }
+ } while (1);
+
+ return rval;
+}
+
+static int ql4_84xx_ipmdio_wr_reg(struct scsi_qla_host *ha,
+ uint32_t addr1, uint32_t addr3,
+ uint32_t mask, uint32_t addr,
+ uint32_t value)
+{
+ int rval = QLA_SUCCESS;
+
+ rval = ql4_84xx_poll_wait_for_ready(ha, addr1, mask);
+ if (rval)
+ goto exit_ipmdio_wr_reg;
+
+ ha->isp_ops->wr_reg_indirect(ha, addr3, value);
+ ha->isp_ops->wr_reg_indirect(ha, addr1, addr);
+
+ rval = ql4_84xx_poll_wait_for_ready(ha, addr1, mask);
+ if (rval)
+ goto exit_ipmdio_wr_reg;
+
+exit_ipmdio_wr_reg:
+ return rval;
+}
+
+static void qla4_8xxx_minidump_process_rdcrb(struct scsi_qla_host *ha,
+ struct qla8xxx_minidump_entry_hdr *entry_hdr,
+ uint32_t **d_ptr)
+{
+ uint32_t r_addr, r_stride, loop_cnt, i, r_value;
+ struct qla8xxx_minidump_entry_crb *crb_hdr;
+ uint32_t *data_ptr = *d_ptr;
+
+ DEBUG2(ql4_printk(KERN_INFO, ha, "Entering fn: %s\n", __func__));
+ crb_hdr = (struct qla8xxx_minidump_entry_crb *)entry_hdr;
+ r_addr = crb_hdr->addr;
+ r_stride = crb_hdr->crb_strd.addr_stride;
+ loop_cnt = crb_hdr->op_count;
+
+ for (i = 0; i < loop_cnt; i++) {
+ ha->isp_ops->rd_reg_indirect(ha, r_addr, &r_value);
+ *data_ptr++ = cpu_to_le32(r_addr);
+ *data_ptr++ = cpu_to_le32(r_value);
+ r_addr += r_stride;
+ }
+ *d_ptr = data_ptr;
+}
+
+static int qla4_83xx_check_dma_engine_state(struct scsi_qla_host *ha)
+{
+ int rval = QLA_SUCCESS;
+ uint32_t dma_eng_num = 0, cmd_sts_and_cntrl = 0;
+ uint64_t dma_base_addr = 0;
+ struct qla4_8xxx_minidump_template_hdr *tmplt_hdr = NULL;
+
+ tmplt_hdr = (struct qla4_8xxx_minidump_template_hdr *)
+ ha->fw_dump_tmplt_hdr;
+ dma_eng_num =
+ tmplt_hdr->saved_state_array[QLA83XX_PEX_DMA_ENGINE_INDEX];
+ dma_base_addr = QLA83XX_PEX_DMA_BASE_ADDRESS +
+ (dma_eng_num * QLA83XX_PEX_DMA_NUM_OFFSET);
+
+ /* Read the pex-dma's command-status-and-control register. */
+ rval = ha->isp_ops->rd_reg_indirect(ha,
+ (dma_base_addr + QLA83XX_PEX_DMA_CMD_STS_AND_CNTRL),
+ &cmd_sts_and_cntrl);
+
+ if (rval)
+ return QLA_ERROR;
+
+ /* Check if requested pex-dma engine is available. */
+ if (cmd_sts_and_cntrl & BIT_31)
+ return QLA_SUCCESS;
+ else
+ return QLA_ERROR;
+}
+
+static int qla4_83xx_start_pex_dma(struct scsi_qla_host *ha,
+ struct qla4_83xx_minidump_entry_rdmem_pex_dma *m_hdr)
+{
+ int rval = QLA_SUCCESS, wait = 0;
+ uint32_t dma_eng_num = 0, cmd_sts_and_cntrl = 0;
+ uint64_t dma_base_addr = 0;
+ struct qla4_8xxx_minidump_template_hdr *tmplt_hdr = NULL;
+
+ tmplt_hdr = (struct qla4_8xxx_minidump_template_hdr *)
+ ha->fw_dump_tmplt_hdr;
+ dma_eng_num =
+ tmplt_hdr->saved_state_array[QLA83XX_PEX_DMA_ENGINE_INDEX];
+ dma_base_addr = QLA83XX_PEX_DMA_BASE_ADDRESS +
+ (dma_eng_num * QLA83XX_PEX_DMA_NUM_OFFSET);
+
+ rval = ha->isp_ops->wr_reg_indirect(ha,
+ dma_base_addr + QLA83XX_PEX_DMA_CMD_ADDR_LOW,
+ m_hdr->desc_card_addr);
+ if (rval)
+ goto error_exit;
+
+ rval = ha->isp_ops->wr_reg_indirect(ha,
+ dma_base_addr + QLA83XX_PEX_DMA_CMD_ADDR_HIGH, 0);
+ if (rval)
+ goto error_exit;
+
+ rval = ha->isp_ops->wr_reg_indirect(ha,
+ dma_base_addr + QLA83XX_PEX_DMA_CMD_STS_AND_CNTRL,
+ m_hdr->start_dma_cmd);
+ if (rval)
+ goto error_exit;
+
+ /* Wait for dma operation to complete. */
+ for (wait = 0; wait < QLA83XX_PEX_DMA_MAX_WAIT; wait++) {
+ rval = ha->isp_ops->rd_reg_indirect(ha,
+ (dma_base_addr + QLA83XX_PEX_DMA_CMD_STS_AND_CNTRL),
+ &cmd_sts_and_cntrl);
+ if (rval)
+ goto error_exit;
+
+ if ((cmd_sts_and_cntrl & BIT_1) == 0)
+ break;
+ else
+ udelay(10);
+ }
+
+ /* Wait a max of 100 ms, otherwise fallback to rdmem entry read */
+ if (wait >= QLA83XX_PEX_DMA_MAX_WAIT) {
+ rval = QLA_ERROR;
+ goto error_exit;
+ }
+
+error_exit:
+ return rval;
+}
+
+static int qla4_8xxx_minidump_pex_dma_read(struct scsi_qla_host *ha,
+ struct qla8xxx_minidump_entry_hdr *entry_hdr,
+ uint32_t **d_ptr)
+{
+ int rval = QLA_SUCCESS;
+ struct qla4_83xx_minidump_entry_rdmem_pex_dma *m_hdr = NULL;
+ uint32_t size, read_size;
+ uint8_t *data_ptr = (uint8_t *)*d_ptr;
+ void *rdmem_buffer = NULL;
+ dma_addr_t rdmem_dma;
+ struct qla4_83xx_pex_dma_descriptor dma_desc;
+
+ DEBUG2(ql4_printk(KERN_INFO, ha, "Entering fn: %s\n", __func__));
+
+ rval = qla4_83xx_check_dma_engine_state(ha);
+ if (rval != QLA_SUCCESS) {
+ DEBUG2(ql4_printk(KERN_INFO, ha,
+ "%s: DMA engine not available. Fallback to rdmem-read.\n",
+ __func__));
+ return QLA_ERROR;
+ }
+
+ m_hdr = (struct qla4_83xx_minidump_entry_rdmem_pex_dma *)entry_hdr;
+ rdmem_buffer = dma_alloc_coherent(&ha->pdev->dev,
+ QLA83XX_PEX_DMA_READ_SIZE,
+ &rdmem_dma, GFP_KERNEL);
+ if (!rdmem_buffer) {
+ DEBUG2(ql4_printk(KERN_INFO, ha,
+ "%s: Unable to allocate rdmem dma buffer\n",
+ __func__));
+ return QLA_ERROR;
+ }
+
+ /* Prepare pex-dma descriptor to be written to MS memory. */
+ /* dma-desc-cmd layout:
+ * 0-3: dma-desc-cmd 0-3
+ * 4-7: pcid function number
+ * 8-15: dma-desc-cmd 8-15
+ */
+ dma_desc.cmd.dma_desc_cmd = (m_hdr->dma_desc_cmd & 0xff0f);
+ dma_desc.cmd.dma_desc_cmd |= ((PCI_FUNC(ha->pdev->devfn) & 0xf) << 0x4);
+ dma_desc.dma_bus_addr = rdmem_dma;
+
+ size = 0;
+ read_size = 0;
+ /*
+ * Perform rdmem operation using pex-dma.
+ * Prepare dma in chunks of QLA83XX_PEX_DMA_READ_SIZE.
+ */
+ while (read_size < m_hdr->read_data_size) {
+ if (m_hdr->read_data_size - read_size >=
+ QLA83XX_PEX_DMA_READ_SIZE)
+ size = QLA83XX_PEX_DMA_READ_SIZE;
+ else {
+ size = (m_hdr->read_data_size - read_size);
+
+ if (rdmem_buffer)
+ dma_free_coherent(&ha->pdev->dev,
+ QLA83XX_PEX_DMA_READ_SIZE,
+ rdmem_buffer, rdmem_dma);
+
+ rdmem_buffer = dma_alloc_coherent(&ha->pdev->dev, size,
+ &rdmem_dma,
+ GFP_KERNEL);
+ if (!rdmem_buffer) {
+ DEBUG2(ql4_printk(KERN_INFO, ha,
+ "%s: Unable to allocate rdmem dma buffer\n",
+ __func__));
+ return QLA_ERROR;
+ }
+ dma_desc.dma_bus_addr = rdmem_dma;
+ }
+
+ dma_desc.src_addr = m_hdr->read_addr + read_size;
+ dma_desc.cmd.read_data_size = size;
+
+ /* Prepare: Write pex-dma descriptor to MS memory. */
+ rval = qla4_8xxx_ms_mem_write_128b(ha,
+ (uint64_t)m_hdr->desc_card_addr,
+ (uint32_t *)&dma_desc,
+ (sizeof(struct qla4_83xx_pex_dma_descriptor)/16));
+ if (rval != QLA_SUCCESS) {
+ ql4_printk(KERN_INFO, ha,
+ "%s: Error writing rdmem-dma-init to MS !!!\n",
+ __func__);
+ goto error_exit;
+ }
+
+ DEBUG2(ql4_printk(KERN_INFO, ha,
+ "%s: Dma-desc: Instruct for rdmem dma (size 0x%x).\n",
+ __func__, size));
+ /* Execute: Start pex-dma operation. */
+ rval = qla4_83xx_start_pex_dma(ha, m_hdr);
+ if (rval != QLA_SUCCESS) {
+ DEBUG2(ql4_printk(KERN_INFO, ha,
+ "scsi(%ld): start-pex-dma failed rval=0x%x\n",
+ ha->host_no, rval));
+ goto error_exit;
+ }
+
+ memcpy(data_ptr, rdmem_buffer, size);
+ data_ptr += size;
+ read_size += size;
+ }
+
+ DEBUG2(ql4_printk(KERN_INFO, ha, "Leaving fn: %s\n", __func__));
+
+ *d_ptr = (uint32_t *)data_ptr;
+
+error_exit:
+ if (rdmem_buffer)
+ dma_free_coherent(&ha->pdev->dev, size, rdmem_buffer,
+ rdmem_dma);
+
+ return rval;
+}
+
+static int qla4_8xxx_minidump_process_l2tag(struct scsi_qla_host *ha,
+ struct qla8xxx_minidump_entry_hdr *entry_hdr,
+ uint32_t **d_ptr)
+{
+ uint32_t addr, r_addr, c_addr, t_r_addr;
+ uint32_t i, k, loop_count, t_value, r_cnt, r_value;
+ unsigned long p_wait, w_time, p_mask;
+ uint32_t c_value_w, c_value_r;
+ struct qla8xxx_minidump_entry_cache *cache_hdr;
+ int rval = QLA_ERROR;
+ uint32_t *data_ptr = *d_ptr;
+
+ DEBUG2(ql4_printk(KERN_INFO, ha, "Entering fn: %s\n", __func__));
+ cache_hdr = (struct qla8xxx_minidump_entry_cache *)entry_hdr;
+
+ loop_count = cache_hdr->op_count;
+ r_addr = cache_hdr->read_addr;
+ c_addr = cache_hdr->control_addr;
+ c_value_w = cache_hdr->cache_ctrl.write_value;
+
+ t_r_addr = cache_hdr->tag_reg_addr;
+ t_value = cache_hdr->addr_ctrl.init_tag_value;
+ r_cnt = cache_hdr->read_ctrl.read_addr_cnt;
+ p_wait = cache_hdr->cache_ctrl.poll_wait;
+ p_mask = cache_hdr->cache_ctrl.poll_mask;
+
+ for (i = 0; i < loop_count; i++) {
+ ha->isp_ops->wr_reg_indirect(ha, t_r_addr, t_value);
+
+ if (c_value_w)
+ ha->isp_ops->wr_reg_indirect(ha, c_addr, c_value_w);
+
+ if (p_mask) {
+ w_time = jiffies + p_wait;
+ do {
+ ha->isp_ops->rd_reg_indirect(ha, c_addr,
+ &c_value_r);
+ if ((c_value_r & p_mask) == 0) {
+ break;
+ } else if (time_after_eq(jiffies, w_time)) {
+ /* capturing dump failed */
+ return rval;
+ }
+ } while (1);
+ }
+
+ addr = r_addr;
+ for (k = 0; k < r_cnt; k++) {
+ ha->isp_ops->rd_reg_indirect(ha, addr, &r_value);
+ *data_ptr++ = cpu_to_le32(r_value);
+ addr += cache_hdr->read_ctrl.read_addr_stride;
+ }
+
+ t_value += cache_hdr->addr_ctrl.tag_value_stride;
+ }
+ *d_ptr = data_ptr;
+ return QLA_SUCCESS;
+}
+
+static int qla4_8xxx_minidump_process_control(struct scsi_qla_host *ha,
+ struct qla8xxx_minidump_entry_hdr *entry_hdr)
+{
+ struct qla8xxx_minidump_entry_crb *crb_entry;
+ uint32_t read_value, opcode, poll_time, addr, index, rval = QLA_SUCCESS;
+ uint32_t crb_addr;
+ unsigned long wtime;
+ struct qla4_8xxx_minidump_template_hdr *tmplt_hdr;
+ int i;
+
+ DEBUG2(ql4_printk(KERN_INFO, ha, "Entering fn: %s\n", __func__));
+ tmplt_hdr = (struct qla4_8xxx_minidump_template_hdr *)
+ ha->fw_dump_tmplt_hdr;
+ crb_entry = (struct qla8xxx_minidump_entry_crb *)entry_hdr;
+
+ crb_addr = crb_entry->addr;
+ for (i = 0; i < crb_entry->op_count; i++) {
+ opcode = crb_entry->crb_ctrl.opcode;
+ if (opcode & QLA8XXX_DBG_OPCODE_WR) {
+ ha->isp_ops->wr_reg_indirect(ha, crb_addr,
+ crb_entry->value_1);
+ opcode &= ~QLA8XXX_DBG_OPCODE_WR;
+ }
+ if (opcode & QLA8XXX_DBG_OPCODE_RW) {
+ ha->isp_ops->rd_reg_indirect(ha, crb_addr, &read_value);
+ ha->isp_ops->wr_reg_indirect(ha, crb_addr, read_value);
+ opcode &= ~QLA8XXX_DBG_OPCODE_RW;
+ }
+ if (opcode & QLA8XXX_DBG_OPCODE_AND) {
+ ha->isp_ops->rd_reg_indirect(ha, crb_addr, &read_value);
+ read_value &= crb_entry->value_2;
+ opcode &= ~QLA8XXX_DBG_OPCODE_AND;
+ if (opcode & QLA8XXX_DBG_OPCODE_OR) {
+ read_value |= crb_entry->value_3;
+ opcode &= ~QLA8XXX_DBG_OPCODE_OR;
+ }
+ ha->isp_ops->wr_reg_indirect(ha, crb_addr, read_value);
+ }
+ if (opcode & QLA8XXX_DBG_OPCODE_OR) {
+ ha->isp_ops->rd_reg_indirect(ha, crb_addr, &read_value);
+ read_value |= crb_entry->value_3;
+ ha->isp_ops->wr_reg_indirect(ha, crb_addr, read_value);
+ opcode &= ~QLA8XXX_DBG_OPCODE_OR;
+ }
+ if (opcode & QLA8XXX_DBG_OPCODE_POLL) {
+ poll_time = crb_entry->crb_strd.poll_timeout;
+ wtime = jiffies + poll_time;
+ ha->isp_ops->rd_reg_indirect(ha, crb_addr, &read_value);
+
+ do {
+ if ((read_value & crb_entry->value_2) ==
+ crb_entry->value_1) {
+ break;
+ } else if (time_after_eq(jiffies, wtime)) {
+ /* capturing dump failed */
+ rval = QLA_ERROR;
+ break;
+ } else {
+ ha->isp_ops->rd_reg_indirect(ha,
+ crb_addr, &read_value);
+ }
+ } while (1);
+ opcode &= ~QLA8XXX_DBG_OPCODE_POLL;
+ }
+
+ if (opcode & QLA8XXX_DBG_OPCODE_RDSTATE) {
+ if (crb_entry->crb_strd.state_index_a) {
+ index = crb_entry->crb_strd.state_index_a;
+ addr = tmplt_hdr->saved_state_array[index];
+ } else {
+ addr = crb_addr;
+ }
+
+ ha->isp_ops->rd_reg_indirect(ha, addr, &read_value);
+ index = crb_entry->crb_ctrl.state_index_v;
+ tmplt_hdr->saved_state_array[index] = read_value;
+ opcode &= ~QLA8XXX_DBG_OPCODE_RDSTATE;
+ }
+
+ if (opcode & QLA8XXX_DBG_OPCODE_WRSTATE) {
+ if (crb_entry->crb_strd.state_index_a) {
+ index = crb_entry->crb_strd.state_index_a;
+ addr = tmplt_hdr->saved_state_array[index];
+ } else {
+ addr = crb_addr;
+ }
+
+ if (crb_entry->crb_ctrl.state_index_v) {
+ index = crb_entry->crb_ctrl.state_index_v;
+ read_value =
+ tmplt_hdr->saved_state_array[index];
+ } else {
+ read_value = crb_entry->value_1;
+ }
+
+ ha->isp_ops->wr_reg_indirect(ha, addr, read_value);
+ opcode &= ~QLA8XXX_DBG_OPCODE_WRSTATE;
+ }
+
+ if (opcode & QLA8XXX_DBG_OPCODE_MDSTATE) {
+ index = crb_entry->crb_ctrl.state_index_v;
+ read_value = tmplt_hdr->saved_state_array[index];
+ read_value <<= crb_entry->crb_ctrl.shl;
+ read_value >>= crb_entry->crb_ctrl.shr;
+ if (crb_entry->value_2)
+ read_value &= crb_entry->value_2;
+ read_value |= crb_entry->value_3;
+ read_value += crb_entry->value_1;
+ tmplt_hdr->saved_state_array[index] = read_value;
+ opcode &= ~QLA8XXX_DBG_OPCODE_MDSTATE;
+ }
+ crb_addr += crb_entry->crb_strd.addr_stride;
+ }
+ DEBUG2(ql4_printk(KERN_INFO, ha, "Leaving fn: %s\n", __func__));
+ return rval;
+}
+
+static void qla4_8xxx_minidump_process_rdocm(struct scsi_qla_host *ha,
+ struct qla8xxx_minidump_entry_hdr *entry_hdr,
+ uint32_t **d_ptr)
+{
+ uint32_t r_addr, r_stride, loop_cnt, i, r_value;
+ struct qla8xxx_minidump_entry_rdocm *ocm_hdr;
+ uint32_t *data_ptr = *d_ptr;
+
+ DEBUG2(ql4_printk(KERN_INFO, ha, "Entering fn: %s\n", __func__));
+ ocm_hdr = (struct qla8xxx_minidump_entry_rdocm *)entry_hdr;
+ r_addr = ocm_hdr->read_addr;
+ r_stride = ocm_hdr->read_addr_stride;
+ loop_cnt = ocm_hdr->op_count;
+
+ DEBUG2(ql4_printk(KERN_INFO, ha,
+ "[%s]: r_addr: 0x%x, r_stride: 0x%x, loop_cnt: 0x%x\n",
+ __func__, r_addr, r_stride, loop_cnt));
+
+ for (i = 0; i < loop_cnt; i++) {
+ r_value = readl((void __iomem *)(r_addr + ha->nx_pcibase));
+ *data_ptr++ = cpu_to_le32(r_value);
+ r_addr += r_stride;
+ }
+ DEBUG2(ql4_printk(KERN_INFO, ha, "Leaving fn: %s datacount: 0x%lx\n",
+ __func__, (long unsigned int) (loop_cnt * sizeof(uint32_t))));
+ *d_ptr = data_ptr;
+}
+
+static void qla4_8xxx_minidump_process_rdmux(struct scsi_qla_host *ha,
+ struct qla8xxx_minidump_entry_hdr *entry_hdr,
+ uint32_t **d_ptr)
+{
+ uint32_t r_addr, s_stride, s_addr, s_value, loop_cnt, i, r_value;
+ struct qla8xxx_minidump_entry_mux *mux_hdr;
+ uint32_t *data_ptr = *d_ptr;
+
+ DEBUG2(ql4_printk(KERN_INFO, ha, "Entering fn: %s\n", __func__));
+ mux_hdr = (struct qla8xxx_minidump_entry_mux *)entry_hdr;
+ r_addr = mux_hdr->read_addr;
+ s_addr = mux_hdr->select_addr;
+ s_stride = mux_hdr->select_value_stride;
+ s_value = mux_hdr->select_value;
+ loop_cnt = mux_hdr->op_count;
+
+ for (i = 0; i < loop_cnt; i++) {
+ ha->isp_ops->wr_reg_indirect(ha, s_addr, s_value);
+ ha->isp_ops->rd_reg_indirect(ha, r_addr, &r_value);
+ *data_ptr++ = cpu_to_le32(s_value);
+ *data_ptr++ = cpu_to_le32(r_value);
+ s_value += s_stride;
+ }
+ *d_ptr = data_ptr;
+}
+
+static void qla4_8xxx_minidump_process_l1cache(struct scsi_qla_host *ha,
+ struct qla8xxx_minidump_entry_hdr *entry_hdr,
+ uint32_t **d_ptr)
+{
+ uint32_t addr, r_addr, c_addr, t_r_addr;
+ uint32_t i, k, loop_count, t_value, r_cnt, r_value;
+ uint32_t c_value_w;
+ struct qla8xxx_minidump_entry_cache *cache_hdr;
+ uint32_t *data_ptr = *d_ptr;
+
+ cache_hdr = (struct qla8xxx_minidump_entry_cache *)entry_hdr;
+ loop_count = cache_hdr->op_count;
+ r_addr = cache_hdr->read_addr;
+ c_addr = cache_hdr->control_addr;
+ c_value_w = cache_hdr->cache_ctrl.write_value;
+
+ t_r_addr = cache_hdr->tag_reg_addr;
+ t_value = cache_hdr->addr_ctrl.init_tag_value;
+ r_cnt = cache_hdr->read_ctrl.read_addr_cnt;
+
+ for (i = 0; i < loop_count; i++) {
+ ha->isp_ops->wr_reg_indirect(ha, t_r_addr, t_value);
+ ha->isp_ops->wr_reg_indirect(ha, c_addr, c_value_w);
+ addr = r_addr;
+ for (k = 0; k < r_cnt; k++) {
+ ha->isp_ops->rd_reg_indirect(ha, addr, &r_value);
+ *data_ptr++ = cpu_to_le32(r_value);
+ addr += cache_hdr->read_ctrl.read_addr_stride;
+ }
+ t_value += cache_hdr->addr_ctrl.tag_value_stride;
+ }
+ *d_ptr = data_ptr;
+}
+
+static void qla4_8xxx_minidump_process_queue(struct scsi_qla_host *ha,
+ struct qla8xxx_minidump_entry_hdr *entry_hdr,
+ uint32_t **d_ptr)
+{
+ uint32_t s_addr, r_addr;
+ uint32_t r_stride, r_value, r_cnt, qid = 0;
+ uint32_t i, k, loop_cnt;
+ struct qla8xxx_minidump_entry_queue *q_hdr;
+ uint32_t *data_ptr = *d_ptr;
+
+ DEBUG2(ql4_printk(KERN_INFO, ha, "Entering fn: %s\n", __func__));
+ q_hdr = (struct qla8xxx_minidump_entry_queue *)entry_hdr;
+ s_addr = q_hdr->select_addr;
+ r_cnt = q_hdr->rd_strd.read_addr_cnt;
+ r_stride = q_hdr->rd_strd.read_addr_stride;
+ loop_cnt = q_hdr->op_count;
+
+ for (i = 0; i < loop_cnt; i++) {
+ ha->isp_ops->wr_reg_indirect(ha, s_addr, qid);
+ r_addr = q_hdr->read_addr;
+ for (k = 0; k < r_cnt; k++) {
+ ha->isp_ops->rd_reg_indirect(ha, r_addr, &r_value);
+ *data_ptr++ = cpu_to_le32(r_value);
+ r_addr += r_stride;
+ }
+ qid += q_hdr->q_strd.queue_id_stride;
+ }
+ *d_ptr = data_ptr;
+}
+
+#define MD_DIRECT_ROM_WINDOW 0x42110030
+#define MD_DIRECT_ROM_READ_BASE 0x42150000
+
+static void qla4_82xx_minidump_process_rdrom(struct scsi_qla_host *ha,
+ struct qla8xxx_minidump_entry_hdr *entry_hdr,
+ uint32_t **d_ptr)
+{
+ uint32_t r_addr, r_value;
+ uint32_t i, loop_cnt;
+ struct qla8xxx_minidump_entry_rdrom *rom_hdr;
+ uint32_t *data_ptr = *d_ptr;
+
+ DEBUG2(ql4_printk(KERN_INFO, ha, "Entering fn: %s\n", __func__));
+ rom_hdr = (struct qla8xxx_minidump_entry_rdrom *)entry_hdr;
+ r_addr = rom_hdr->read_addr;
+ loop_cnt = rom_hdr->read_data_size/sizeof(uint32_t);
+
+ DEBUG2(ql4_printk(KERN_INFO, ha,
+ "[%s]: flash_addr: 0x%x, read_data_size: 0x%x\n",
+ __func__, r_addr, loop_cnt));
+
+ for (i = 0; i < loop_cnt; i++) {
+ ha->isp_ops->wr_reg_indirect(ha, MD_DIRECT_ROM_WINDOW,
+ (r_addr & 0xFFFF0000));
+ ha->isp_ops->rd_reg_indirect(ha,
+ MD_DIRECT_ROM_READ_BASE + (r_addr & 0x0000FFFF),
+ &r_value);
+ *data_ptr++ = cpu_to_le32(r_value);
+ r_addr += sizeof(uint32_t);
+ }
+ *d_ptr = data_ptr;
+}
+
+#define MD_MIU_TEST_AGT_CTRL 0x41000090
+#define MD_MIU_TEST_AGT_ADDR_LO 0x41000094
+#define MD_MIU_TEST_AGT_ADDR_HI 0x41000098
+
+static int __qla4_8xxx_minidump_process_rdmem(struct scsi_qla_host *ha,
+ struct qla8xxx_minidump_entry_hdr *entry_hdr,
+ uint32_t **d_ptr)
+{
+ uint32_t r_addr, r_value, r_data;
+ uint32_t i, j, loop_cnt;
+ struct qla8xxx_minidump_entry_rdmem *m_hdr;
+ unsigned long flags;
+ uint32_t *data_ptr = *d_ptr;
+
+ DEBUG2(ql4_printk(KERN_INFO, ha, "Entering fn: %s\n", __func__));
+ m_hdr = (struct qla8xxx_minidump_entry_rdmem *)entry_hdr;
+ r_addr = m_hdr->read_addr;
+ loop_cnt = m_hdr->read_data_size/16;
+
+ DEBUG2(ql4_printk(KERN_INFO, ha,
+ "[%s]: Read addr: 0x%x, read_data_size: 0x%x\n",
+ __func__, r_addr, m_hdr->read_data_size));
+
+ if (r_addr & 0xf) {
+ DEBUG2(ql4_printk(KERN_INFO, ha,
+ "[%s]: Read addr 0x%x not 16 bytes aligned\n",
+ __func__, r_addr));
+ return QLA_ERROR;
+ }
+
+ if (m_hdr->read_data_size % 16) {
+ DEBUG2(ql4_printk(KERN_INFO, ha,
+ "[%s]: Read data[0x%x] not multiple of 16 bytes\n",
+ __func__, m_hdr->read_data_size));
+ return QLA_ERROR;
+ }
+
+ DEBUG2(ql4_printk(KERN_INFO, ha,
+ "[%s]: rdmem_addr: 0x%x, read_data_size: 0x%x, loop_cnt: 0x%x\n",
+ __func__, r_addr, m_hdr->read_data_size, loop_cnt));
+
+ write_lock_irqsave(&ha->hw_lock, flags);
+ for (i = 0; i < loop_cnt; i++) {
+ ha->isp_ops->wr_reg_indirect(ha, MD_MIU_TEST_AGT_ADDR_LO,
+ r_addr);
+ r_value = 0;
+ ha->isp_ops->wr_reg_indirect(ha, MD_MIU_TEST_AGT_ADDR_HI,
+ r_value);
+ r_value = MIU_TA_CTL_ENABLE;
+ ha->isp_ops->wr_reg_indirect(ha, MD_MIU_TEST_AGT_CTRL, r_value);
+ r_value = MIU_TA_CTL_START_ENABLE;
+ ha->isp_ops->wr_reg_indirect(ha, MD_MIU_TEST_AGT_CTRL, r_value);
+
+ for (j = 0; j < MAX_CTL_CHECK; j++) {
+ ha->isp_ops->rd_reg_indirect(ha, MD_MIU_TEST_AGT_CTRL,
+ &r_value);
+ if ((r_value & MIU_TA_CTL_BUSY) == 0)
+ break;
+ }
+
+ if (j >= MAX_CTL_CHECK) {
+ printk_ratelimited(KERN_ERR
+ "%s: failed to read through agent\n",
+ __func__);
+ write_unlock_irqrestore(&ha->hw_lock, flags);
+ return QLA_SUCCESS;
+ }
+
+ for (j = 0; j < 4; j++) {
+ ha->isp_ops->rd_reg_indirect(ha,
+ MD_MIU_TEST_AGT_RDDATA[j],
+ &r_data);
+ *data_ptr++ = cpu_to_le32(r_data);
+ }
+
+ r_addr += 16;
+ }
+ write_unlock_irqrestore(&ha->hw_lock, flags);
+
+ DEBUG2(ql4_printk(KERN_INFO, ha, "Leaving fn: %s datacount: 0x%x\n",
+ __func__, (loop_cnt * 16)));
+
+ *d_ptr = data_ptr;
+ return QLA_SUCCESS;
+}
+
+static int qla4_8xxx_minidump_process_rdmem(struct scsi_qla_host *ha,
+ struct qla8xxx_minidump_entry_hdr *entry_hdr,
+ uint32_t **d_ptr)
+{
+ uint32_t *data_ptr = *d_ptr;
+ int rval = QLA_SUCCESS;
+
+ rval = qla4_8xxx_minidump_pex_dma_read(ha, entry_hdr, &data_ptr);
+ if (rval != QLA_SUCCESS)
+ rval = __qla4_8xxx_minidump_process_rdmem(ha, entry_hdr,
+ &data_ptr);
+ *d_ptr = data_ptr;
+ return rval;
+}
+
+static void qla4_8xxx_mark_entry_skipped(struct scsi_qla_host *ha,
+ struct qla8xxx_minidump_entry_hdr *entry_hdr,
+ int index)
+{
+ entry_hdr->d_ctrl.driver_flags |= QLA8XXX_DBG_SKIPPED_FLAG;
+ DEBUG2(ql4_printk(KERN_INFO, ha,
+ "scsi(%ld): Skipping entry[%d]: ETYPE[0x%x]-ELEVEL[0x%x]\n",
+ ha->host_no, index, entry_hdr->entry_type,
+ entry_hdr->d_ctrl.entry_capture_mask));
+ /* If driver encounters a new entry type that it cannot process,
+ * it should just skip the entry and adjust the total buffer size by
+ * from subtracting the skipped bytes from it
+ */
+ ha->fw_dump_skip_size += entry_hdr->entry_capture_size;
+}
+
+/* ISP83xx functions to process new minidump entries... */
+static uint32_t qla83xx_minidump_process_pollrd(struct scsi_qla_host *ha,
+ struct qla8xxx_minidump_entry_hdr *entry_hdr,
+ uint32_t **d_ptr)
+{
+ uint32_t r_addr, s_addr, s_value, r_value, poll_wait, poll_mask;
+ uint16_t s_stride, i;
+ uint32_t *data_ptr = *d_ptr;
+ uint32_t rval = QLA_SUCCESS;
+ struct qla83xx_minidump_entry_pollrd *pollrd_hdr;
+
+ pollrd_hdr = (struct qla83xx_minidump_entry_pollrd *)entry_hdr;
+ s_addr = le32_to_cpu(pollrd_hdr->select_addr);
+ r_addr = le32_to_cpu(pollrd_hdr->read_addr);
+ s_value = le32_to_cpu(pollrd_hdr->select_value);
+ s_stride = le32_to_cpu(pollrd_hdr->select_value_stride);
+
+ poll_wait = le32_to_cpu(pollrd_hdr->poll_wait);
+ poll_mask = le32_to_cpu(pollrd_hdr->poll_mask);
+
+ for (i = 0; i < le32_to_cpu(pollrd_hdr->op_count); i++) {
+ ha->isp_ops->wr_reg_indirect(ha, s_addr, s_value);
+ poll_wait = le32_to_cpu(pollrd_hdr->poll_wait);
+ while (1) {
+ ha->isp_ops->rd_reg_indirect(ha, s_addr, &r_value);
+
+ if ((r_value & poll_mask) != 0) {
+ break;
+ } else {
+ msleep(1);
+ if (--poll_wait == 0) {
+ ql4_printk(KERN_ERR, ha, "%s: TIMEOUT\n",
+ __func__);
+ rval = QLA_ERROR;
+ goto exit_process_pollrd;
+ }
+ }
+ }
+ ha->isp_ops->rd_reg_indirect(ha, r_addr, &r_value);
+ *data_ptr++ = cpu_to_le32(s_value);
+ *data_ptr++ = cpu_to_le32(r_value);
+ s_value += s_stride;
+ }
+
+ *d_ptr = data_ptr;
+
+exit_process_pollrd:
+ return rval;
+}
+
+static uint32_t qla4_84xx_minidump_process_rddfe(struct scsi_qla_host *ha,
+ struct qla8xxx_minidump_entry_hdr *entry_hdr,
+ uint32_t **d_ptr)
+{
+ int loop_cnt;
+ uint32_t addr1, addr2, value, data, temp, wrval;
+ uint8_t stride, stride2;
+ uint16_t count;
+ uint32_t poll, mask, data_size, modify_mask;
+ uint32_t wait_count = 0;
+ uint32_t *data_ptr = *d_ptr;
+ struct qla8044_minidump_entry_rddfe *rddfe;
+ uint32_t rval = QLA_SUCCESS;
+
+ rddfe = (struct qla8044_minidump_entry_rddfe *)entry_hdr;
+ addr1 = le32_to_cpu(rddfe->addr_1);
+ value = le32_to_cpu(rddfe->value);
+ stride = le32_to_cpu(rddfe->stride);
+ stride2 = le32_to_cpu(rddfe->stride2);
+ count = le32_to_cpu(rddfe->count);
+
+ poll = le32_to_cpu(rddfe->poll);
+ mask = le32_to_cpu(rddfe->mask);
+ modify_mask = le32_to_cpu(rddfe->modify_mask);
+ data_size = le32_to_cpu(rddfe->data_size);
+
+ addr2 = addr1 + stride;
+
+ for (loop_cnt = 0x0; loop_cnt < count; loop_cnt++) {
+ ha->isp_ops->wr_reg_indirect(ha, addr1, (0x40000000 | value));
+
+ wait_count = 0;
+ while (wait_count < poll) {
+ ha->isp_ops->rd_reg_indirect(ha, addr1, &temp);
+ if ((temp & mask) != 0)
+ break;
+ wait_count++;
+ }
+
+ if (wait_count == poll) {
+ ql4_printk(KERN_ERR, ha, "%s: TIMEOUT\n", __func__);
+ rval = QLA_ERROR;
+ goto exit_process_rddfe;
+ } else {
+ ha->isp_ops->rd_reg_indirect(ha, addr2, &temp);
+ temp = temp & modify_mask;
+ temp = (temp | ((loop_cnt << 16) | loop_cnt));
+ wrval = ((temp << 16) | temp);
+
+ ha->isp_ops->wr_reg_indirect(ha, addr2, wrval);
+ ha->isp_ops->wr_reg_indirect(ha, addr1, value);
+
+ wait_count = 0;
+ while (wait_count < poll) {
+ ha->isp_ops->rd_reg_indirect(ha, addr1, &temp);
+ if ((temp & mask) != 0)
+ break;
+ wait_count++;
+ }
+ if (wait_count == poll) {
+ ql4_printk(KERN_ERR, ha, "%s: TIMEOUT\n",
+ __func__);
+ rval = QLA_ERROR;
+ goto exit_process_rddfe;
+ }
+
+ ha->isp_ops->wr_reg_indirect(ha, addr1,
+ ((0x40000000 | value) +
+ stride2));
+ wait_count = 0;
+ while (wait_count < poll) {
+ ha->isp_ops->rd_reg_indirect(ha, addr1, &temp);
+ if ((temp & mask) != 0)
+ break;
+ wait_count++;
+ }
+
+ if (wait_count == poll) {
+ ql4_printk(KERN_ERR, ha, "%s: TIMEOUT\n",
+ __func__);
+ rval = QLA_ERROR;
+ goto exit_process_rddfe;
+ }
+
+ ha->isp_ops->rd_reg_indirect(ha, addr2, &data);
+
+ *data_ptr++ = cpu_to_le32(wrval);
+ *data_ptr++ = cpu_to_le32(data);
+ }
+ }
+
+ *d_ptr = data_ptr;
+exit_process_rddfe:
+ return rval;
+}
+
+static uint32_t qla4_84xx_minidump_process_rdmdio(struct scsi_qla_host *ha,
+ struct qla8xxx_minidump_entry_hdr *entry_hdr,
+ uint32_t **d_ptr)
+{
+ int rval = QLA_SUCCESS;
+ uint32_t addr1, addr2, value1, value2, data, selval;
+ uint8_t stride1, stride2;
+ uint32_t addr3, addr4, addr5, addr6, addr7;
+ uint16_t count, loop_cnt;
+ uint32_t poll, mask;
+ uint32_t *data_ptr = *d_ptr;
+ struct qla8044_minidump_entry_rdmdio *rdmdio;
+
+ rdmdio = (struct qla8044_minidump_entry_rdmdio *)entry_hdr;
+ addr1 = le32_to_cpu(rdmdio->addr_1);
+ addr2 = le32_to_cpu(rdmdio->addr_2);
+ value1 = le32_to_cpu(rdmdio->value_1);
+ stride1 = le32_to_cpu(rdmdio->stride_1);
+ stride2 = le32_to_cpu(rdmdio->stride_2);
+ count = le32_to_cpu(rdmdio->count);
+
+ poll = le32_to_cpu(rdmdio->poll);
+ mask = le32_to_cpu(rdmdio->mask);
+ value2 = le32_to_cpu(rdmdio->value_2);
+
+ addr3 = addr1 + stride1;
+
+ for (loop_cnt = 0; loop_cnt < count; loop_cnt++) {
+ rval = ql4_84xx_poll_wait_ipmdio_bus_idle(ha, addr1, addr2,
+ addr3, mask);
+ if (rval)
+ goto exit_process_rdmdio;
+
+ addr4 = addr2 - stride1;
+ rval = ql4_84xx_ipmdio_wr_reg(ha, addr1, addr3, mask, addr4,
+ value2);
+ if (rval)
+ goto exit_process_rdmdio;
+
+ addr5 = addr2 - (2 * stride1);
+ rval = ql4_84xx_ipmdio_wr_reg(ha, addr1, addr3, mask, addr5,
+ value1);
+ if (rval)
+ goto exit_process_rdmdio;
+
+ addr6 = addr2 - (3 * stride1);
+ rval = ql4_84xx_ipmdio_wr_reg(ha, addr1, addr3, mask,
+ addr6, 0x2);
+ if (rval)
+ goto exit_process_rdmdio;
+
+ rval = ql4_84xx_poll_wait_ipmdio_bus_idle(ha, addr1, addr2,
+ addr3, mask);
+ if (rval)
+ goto exit_process_rdmdio;
+
+ addr7 = addr2 - (4 * stride1);
+ rval = ql4_84xx_ipmdio_rd_reg(ha, addr1, addr3,
+ mask, addr7, &data);
+ if (rval)
+ goto exit_process_rdmdio;
+
+ selval = (value2 << 18) | (value1 << 2) | 2;
+
+ stride2 = le32_to_cpu(rdmdio->stride_2);
+ *data_ptr++ = cpu_to_le32(selval);
+ *data_ptr++ = cpu_to_le32(data);
+
+ value1 = value1 + stride2;
+ *d_ptr = data_ptr;
+ }
+
+exit_process_rdmdio:
+ return rval;
+}
+
+static uint32_t qla4_84xx_minidump_process_pollwr(struct scsi_qla_host *ha,
+ struct qla8xxx_minidump_entry_hdr *entry_hdr,
+ uint32_t **d_ptr)
+{
+ uint32_t addr1, addr2, value1, value2, poll, mask, r_value;
+ struct qla8044_minidump_entry_pollwr *pollwr_hdr;
+ uint32_t wait_count = 0;
+ uint32_t rval = QLA_SUCCESS;
+
+ pollwr_hdr = (struct qla8044_minidump_entry_pollwr *)entry_hdr;
+ addr1 = le32_to_cpu(pollwr_hdr->addr_1);
+ addr2 = le32_to_cpu(pollwr_hdr->addr_2);
+ value1 = le32_to_cpu(pollwr_hdr->value_1);
+ value2 = le32_to_cpu(pollwr_hdr->value_2);
+
+ poll = le32_to_cpu(pollwr_hdr->poll);
+ mask = le32_to_cpu(pollwr_hdr->mask);
+
+ while (wait_count < poll) {
+ ha->isp_ops->rd_reg_indirect(ha, addr1, &r_value);
+
+ if ((r_value & poll) != 0)
+ break;
+
+ wait_count++;
+ }
+
+ if (wait_count == poll) {
+ ql4_printk(KERN_ERR, ha, "%s: TIMEOUT\n", __func__);
+ rval = QLA_ERROR;
+ goto exit_process_pollwr;
+ }
+
+ ha->isp_ops->wr_reg_indirect(ha, addr2, value2);
+ ha->isp_ops->wr_reg_indirect(ha, addr1, value1);
+
+ wait_count = 0;
+ while (wait_count < poll) {
+ ha->isp_ops->rd_reg_indirect(ha, addr1, &r_value);
+
+ if ((r_value & poll) != 0)
+ break;
+ wait_count++;
+ }
+
+exit_process_pollwr:
+ return rval;
+}
+
+static void qla83xx_minidump_process_rdmux2(struct scsi_qla_host *ha,
+ struct qla8xxx_minidump_entry_hdr *entry_hdr,
+ uint32_t **d_ptr)
+{
+ uint32_t sel_val1, sel_val2, t_sel_val, data, i;
+ uint32_t sel_addr1, sel_addr2, sel_val_mask, read_addr;
+ struct qla83xx_minidump_entry_rdmux2 *rdmux2_hdr;
+ uint32_t *data_ptr = *d_ptr;
+
+ rdmux2_hdr = (struct qla83xx_minidump_entry_rdmux2 *)entry_hdr;
+ sel_val1 = le32_to_cpu(rdmux2_hdr->select_value_1);
+ sel_val2 = le32_to_cpu(rdmux2_hdr->select_value_2);
+ sel_addr1 = le32_to_cpu(rdmux2_hdr->select_addr_1);
+ sel_addr2 = le32_to_cpu(rdmux2_hdr->select_addr_2);
+ sel_val_mask = le32_to_cpu(rdmux2_hdr->select_value_mask);
+ read_addr = le32_to_cpu(rdmux2_hdr->read_addr);
+
+ for (i = 0; i < rdmux2_hdr->op_count; i++) {
+ ha->isp_ops->wr_reg_indirect(ha, sel_addr1, sel_val1);
+ t_sel_val = sel_val1 & sel_val_mask;
+ *data_ptr++ = cpu_to_le32(t_sel_val);
+
+ ha->isp_ops->wr_reg_indirect(ha, sel_addr2, t_sel_val);
+ ha->isp_ops->rd_reg_indirect(ha, read_addr, &data);
+
+ *data_ptr++ = cpu_to_le32(data);
+
+ ha->isp_ops->wr_reg_indirect(ha, sel_addr1, sel_val2);
+ t_sel_val = sel_val2 & sel_val_mask;
+ *data_ptr++ = cpu_to_le32(t_sel_val);
+
+ ha->isp_ops->wr_reg_indirect(ha, sel_addr2, t_sel_val);
+ ha->isp_ops->rd_reg_indirect(ha, read_addr, &data);
+
+ *data_ptr++ = cpu_to_le32(data);
+
+ sel_val1 += rdmux2_hdr->select_value_stride;
+ sel_val2 += rdmux2_hdr->select_value_stride;
+ }
+
+ *d_ptr = data_ptr;
+}
+
+static uint32_t qla83xx_minidump_process_pollrdmwr(struct scsi_qla_host *ha,
+ struct qla8xxx_minidump_entry_hdr *entry_hdr,
+ uint32_t **d_ptr)
+{
+ uint32_t poll_wait, poll_mask, r_value, data;
+ uint32_t addr_1, addr_2, value_1, value_2;
+ uint32_t *data_ptr = *d_ptr;
+ uint32_t rval = QLA_SUCCESS;
+ struct qla83xx_minidump_entry_pollrdmwr *poll_hdr;
+
+ poll_hdr = (struct qla83xx_minidump_entry_pollrdmwr *)entry_hdr;
+ addr_1 = le32_to_cpu(poll_hdr->addr_1);
+ addr_2 = le32_to_cpu(poll_hdr->addr_2);
+ value_1 = le32_to_cpu(poll_hdr->value_1);
+ value_2 = le32_to_cpu(poll_hdr->value_2);
+ poll_mask = le32_to_cpu(poll_hdr->poll_mask);
+
+ ha->isp_ops->wr_reg_indirect(ha, addr_1, value_1);
+
+ poll_wait = le32_to_cpu(poll_hdr->poll_wait);
+ while (1) {
+ ha->isp_ops->rd_reg_indirect(ha, addr_1, &r_value);
+
+ if ((r_value & poll_mask) != 0) {
+ break;
+ } else {
+ msleep(1);
+ if (--poll_wait == 0) {
+ ql4_printk(KERN_ERR, ha, "%s: TIMEOUT_1\n",
+ __func__);
+ rval = QLA_ERROR;
+ goto exit_process_pollrdmwr;
+ }
+ }
+ }
+
+ ha->isp_ops->rd_reg_indirect(ha, addr_2, &data);
+ data &= le32_to_cpu(poll_hdr->modify_mask);
+ ha->isp_ops->wr_reg_indirect(ha, addr_2, data);
+ ha->isp_ops->wr_reg_indirect(ha, addr_1, value_2);
+
+ poll_wait = le32_to_cpu(poll_hdr->poll_wait);
+ while (1) {
+ ha->isp_ops->rd_reg_indirect(ha, addr_1, &r_value);
+
+ if ((r_value & poll_mask) != 0) {
+ break;
+ } else {
+ msleep(1);
+ if (--poll_wait == 0) {
+ ql4_printk(KERN_ERR, ha, "%s: TIMEOUT_2\n",
+ __func__);
+ rval = QLA_ERROR;
+ goto exit_process_pollrdmwr;
+ }
+ }
+ }
+
+ *data_ptr++ = cpu_to_le32(addr_2);
+ *data_ptr++ = cpu_to_le32(data);
+ *d_ptr = data_ptr;
+
+exit_process_pollrdmwr:
+ return rval;
+}
+
+static uint32_t qla4_83xx_minidump_process_rdrom(struct scsi_qla_host *ha,
+ struct qla8xxx_minidump_entry_hdr *entry_hdr,
+ uint32_t **d_ptr)
+{
+ uint32_t fl_addr, u32_count, rval;
+ struct qla8xxx_minidump_entry_rdrom *rom_hdr;
+ uint32_t *data_ptr = *d_ptr;
+
+ rom_hdr = (struct qla8xxx_minidump_entry_rdrom *)entry_hdr;
+ fl_addr = le32_to_cpu(rom_hdr->read_addr);
+ u32_count = le32_to_cpu(rom_hdr->read_data_size)/sizeof(uint32_t);
+
+ DEBUG2(ql4_printk(KERN_INFO, ha, "[%s]: fl_addr: 0x%x, count: 0x%x\n",
+ __func__, fl_addr, u32_count));
+
+ rval = qla4_83xx_lockless_flash_read_u32(ha, fl_addr,
+ (u8 *)(data_ptr), u32_count);
+
+ if (rval == QLA_ERROR) {
+ ql4_printk(KERN_ERR, ha, "%s: Flash Read Error,Count=%d\n",
+ __func__, u32_count);
+ goto exit_process_rdrom;
+ }
+
+ data_ptr += u32_count;
+ *d_ptr = data_ptr;
+
+exit_process_rdrom:
+ return rval;
+}
+
+/**
+ * qla4_8xxx_collect_md_data - Retrieve firmware minidump data.
+ * @ha: pointer to adapter structure
+ **/
+static int qla4_8xxx_collect_md_data(struct scsi_qla_host *ha)
+{
+ int num_entry_hdr = 0;
+ struct qla8xxx_minidump_entry_hdr *entry_hdr;
+ struct qla4_8xxx_minidump_template_hdr *tmplt_hdr;
+ uint32_t *data_ptr;
+ uint32_t data_collected = 0;
+ int i, rval = QLA_ERROR;
+ uint64_t now;
+ uint32_t timestamp;
+
+ ha->fw_dump_skip_size = 0;
+ if (!ha->fw_dump) {
+ ql4_printk(KERN_INFO, ha, "%s(%ld) No buffer to dump\n",
+ __func__, ha->host_no);
+ return rval;
+ }
+
+ tmplt_hdr = (struct qla4_8xxx_minidump_template_hdr *)
+ ha->fw_dump_tmplt_hdr;
+ data_ptr = (uint32_t *)((uint8_t *)ha->fw_dump +
+ ha->fw_dump_tmplt_size);
+ data_collected += ha->fw_dump_tmplt_size;
+
+ num_entry_hdr = tmplt_hdr->num_of_entries;
+ ql4_printk(KERN_INFO, ha, "[%s]: starting data ptr: %p\n",
+ __func__, data_ptr);
+ ql4_printk(KERN_INFO, ha,
+ "[%s]: no of entry headers in Template: 0x%x\n",
+ __func__, num_entry_hdr);
+ ql4_printk(KERN_INFO, ha, "[%s]: Capture Mask obtained: 0x%x\n",
+ __func__, ha->fw_dump_capture_mask);
+ ql4_printk(KERN_INFO, ha, "[%s]: Total_data_size 0x%x, %d obtained\n",
+ __func__, ha->fw_dump_size, ha->fw_dump_size);
+
+ /* Update current timestamp before taking dump */
+ now = get_jiffies_64();
+ timestamp = (u32)(jiffies_to_msecs(now) / 1000);
+ tmplt_hdr->driver_timestamp = timestamp;
+
+ entry_hdr = (struct qla8xxx_minidump_entry_hdr *)
+ (((uint8_t *)ha->fw_dump_tmplt_hdr) +
+ tmplt_hdr->first_entry_offset);
+
+ if (is_qla8032(ha) || is_qla8042(ha))
+ tmplt_hdr->saved_state_array[QLA83XX_SS_OCM_WNDREG_INDEX] =
+ tmplt_hdr->ocm_window_reg[ha->func_num];
+
+ /* Walk through the entry headers - validate/perform required action */
+ for (i = 0; i < num_entry_hdr; i++) {
+ if (data_collected > ha->fw_dump_size) {
+ ql4_printk(KERN_INFO, ha,
+ "Data collected: [0x%x], Total Dump size: [0x%x]\n",
+ data_collected, ha->fw_dump_size);
+ return rval;
+ }
+
+ if (!(entry_hdr->d_ctrl.entry_capture_mask &
+ ha->fw_dump_capture_mask)) {
+ entry_hdr->d_ctrl.driver_flags |=
+ QLA8XXX_DBG_SKIPPED_FLAG;
+ goto skip_nxt_entry;
+ }
+
+ DEBUG2(ql4_printk(KERN_INFO, ha,
+ "Data collected: [0x%x], Dump size left:[0x%x]\n",
+ data_collected,
+ (ha->fw_dump_size - data_collected)));
+
+ /* Decode the entry type and take required action to capture
+ * debug data
+ */
+ switch (entry_hdr->entry_type) {
+ case QLA8XXX_RDEND:
+ qla4_8xxx_mark_entry_skipped(ha, entry_hdr, i);
+ break;
+ case QLA8XXX_CNTRL:
+ rval = qla4_8xxx_minidump_process_control(ha,
+ entry_hdr);
+ if (rval != QLA_SUCCESS) {
+ qla4_8xxx_mark_entry_skipped(ha, entry_hdr, i);
+ goto md_failed;
+ }
+ break;
+ case QLA8XXX_RDCRB:
+ qla4_8xxx_minidump_process_rdcrb(ha, entry_hdr,
+ &data_ptr);
+ break;
+ case QLA8XXX_RDMEM:
+ rval = qla4_8xxx_minidump_process_rdmem(ha, entry_hdr,
+ &data_ptr);
+ if (rval != QLA_SUCCESS) {
+ qla4_8xxx_mark_entry_skipped(ha, entry_hdr, i);
+ goto md_failed;
+ }
+ break;
+ case QLA8XXX_BOARD:
+ case QLA8XXX_RDROM:
+ if (is_qla8022(ha)) {
+ qla4_82xx_minidump_process_rdrom(ha, entry_hdr,
+ &data_ptr);
+ } else if (is_qla8032(ha) || is_qla8042(ha)) {
+ rval = qla4_83xx_minidump_process_rdrom(ha,
+ entry_hdr,
+ &data_ptr);
+ if (rval != QLA_SUCCESS)
+ qla4_8xxx_mark_entry_skipped(ha,
+ entry_hdr,
+ i);
+ }
+ break;
+ case QLA8XXX_L2DTG:
+ case QLA8XXX_L2ITG:
+ case QLA8XXX_L2DAT:
+ case QLA8XXX_L2INS:
+ rval = qla4_8xxx_minidump_process_l2tag(ha, entry_hdr,
+ &data_ptr);
+ if (rval != QLA_SUCCESS) {
+ qla4_8xxx_mark_entry_skipped(ha, entry_hdr, i);
+ goto md_failed;
+ }
+ break;
+ case QLA8XXX_L1DTG:
+ case QLA8XXX_L1ITG:
+ case QLA8XXX_L1DAT:
+ case QLA8XXX_L1INS:
+ qla4_8xxx_minidump_process_l1cache(ha, entry_hdr,
+ &data_ptr);
+ break;
+ case QLA8XXX_RDOCM:
+ qla4_8xxx_minidump_process_rdocm(ha, entry_hdr,
+ &data_ptr);
+ break;
+ case QLA8XXX_RDMUX:
+ qla4_8xxx_minidump_process_rdmux(ha, entry_hdr,
+ &data_ptr);
+ break;
+ case QLA8XXX_QUEUE:
+ qla4_8xxx_minidump_process_queue(ha, entry_hdr,
+ &data_ptr);
+ break;
+ case QLA83XX_POLLRD:
+ if (is_qla8022(ha)) {
+ qla4_8xxx_mark_entry_skipped(ha, entry_hdr, i);
+ break;
+ }
+ rval = qla83xx_minidump_process_pollrd(ha, entry_hdr,
+ &data_ptr);
+ if (rval != QLA_SUCCESS)
+ qla4_8xxx_mark_entry_skipped(ha, entry_hdr, i);
+ break;
+ case QLA83XX_RDMUX2:
+ if (is_qla8022(ha)) {
+ qla4_8xxx_mark_entry_skipped(ha, entry_hdr, i);
+ break;
+ }
+ qla83xx_minidump_process_rdmux2(ha, entry_hdr,
+ &data_ptr);
+ break;
+ case QLA83XX_POLLRDMWR:
+ if (is_qla8022(ha)) {
+ qla4_8xxx_mark_entry_skipped(ha, entry_hdr, i);
+ break;
+ }
+ rval = qla83xx_minidump_process_pollrdmwr(ha, entry_hdr,
+ &data_ptr);
+ if (rval != QLA_SUCCESS)
+ qla4_8xxx_mark_entry_skipped(ha, entry_hdr, i);
+ break;
+ case QLA8044_RDDFE:
+ rval = qla4_84xx_minidump_process_rddfe(ha, entry_hdr,
+ &data_ptr);
+ if (rval != QLA_SUCCESS)
+ qla4_8xxx_mark_entry_skipped(ha, entry_hdr, i);
+ break;
+ case QLA8044_RDMDIO:
+ rval = qla4_84xx_minidump_process_rdmdio(ha, entry_hdr,
+ &data_ptr);
+ if (rval != QLA_SUCCESS)
+ qla4_8xxx_mark_entry_skipped(ha, entry_hdr, i);
+ break;
+ case QLA8044_POLLWR:
+ rval = qla4_84xx_minidump_process_pollwr(ha, entry_hdr,
+ &data_ptr);
+ if (rval != QLA_SUCCESS)
+ qla4_8xxx_mark_entry_skipped(ha, entry_hdr, i);
+ break;
+ case QLA8XXX_RDNOP:
+ default:
+ qla4_8xxx_mark_entry_skipped(ha, entry_hdr, i);
+ break;
+ }
+
+ data_collected = (uint8_t *)data_ptr - (uint8_t *)ha->fw_dump;
+skip_nxt_entry:
+ /* next entry in the template */
+ entry_hdr = (struct qla8xxx_minidump_entry_hdr *)
+ (((uint8_t *)entry_hdr) +
+ entry_hdr->entry_size);
+ }
+
+ if ((data_collected + ha->fw_dump_skip_size) != ha->fw_dump_size) {
+ ql4_printk(KERN_INFO, ha,
+ "Dump data mismatch: Data collected: [0x%x], total_data_size:[0x%x]\n",
+ data_collected, ha->fw_dump_size);
+ rval = QLA_ERROR;
+ goto md_failed;
+ }
+
+ DEBUG2(ql4_printk(KERN_INFO, ha, "Leaving fn: %s Last entry: 0x%x\n",
+ __func__, i));
+md_failed:
+ return rval;
+}
+
+/**
+ * qla4_8xxx_uevent_emit - Send uevent when the firmware dump is ready.
+ * @ha: pointer to adapter structure
+ **/
+static void qla4_8xxx_uevent_emit(struct scsi_qla_host *ha, u32 code)
+{
+ char event_string[40];
+ char *envp[] = { event_string, NULL };
+
+ switch (code) {
+ case QL4_UEVENT_CODE_FW_DUMP:
+ snprintf(event_string, sizeof(event_string), "FW_DUMP=%ld",
+ ha->host_no);
+ break;
+ default:
+ /*do nothing*/
+ break;
+ }
+
+ kobject_uevent_env(&(&ha->pdev->dev)->kobj, KOBJ_CHANGE, envp);
+}
+
+void qla4_8xxx_get_minidump(struct scsi_qla_host *ha)
+{
+ if (ql4xenablemd && test_bit(AF_FW_RECOVERY, &ha->flags) &&
+ !test_bit(AF_82XX_FW_DUMPED, &ha->flags)) {
+ if (!qla4_8xxx_collect_md_data(ha)) {
+ qla4_8xxx_uevent_emit(ha, QL4_UEVENT_CODE_FW_DUMP);
+ set_bit(AF_82XX_FW_DUMPED, &ha->flags);
+ } else {
+ ql4_printk(KERN_INFO, ha, "%s: Unable to collect minidump\n",
+ __func__);
+ }
+ }
+}
+
+/**
+ * qla4_8xxx_device_bootstrap - Initialize device, set DEV_READY, start fw
+ * @ha: pointer to adapter structure
+ *
+ * Note: IDC lock must be held upon entry
+ **/
+int qla4_8xxx_device_bootstrap(struct scsi_qla_host *ha)
+{
+ int rval = QLA_ERROR;
+ int i;
+ uint32_t old_count, count;
+ int need_reset = 0;
+
+ need_reset = ha->isp_ops->need_reset(ha);
+
+ if (need_reset) {
+ /* We are trying to perform a recovery here. */
+ if (test_bit(AF_FW_RECOVERY, &ha->flags))
+ ha->isp_ops->rom_lock_recovery(ha);
+ } else {
+ old_count = qla4_8xxx_rd_direct(ha, QLA8XXX_PEG_ALIVE_COUNTER);
+ for (i = 0; i < 10; i++) {
+ msleep(200);
+ count = qla4_8xxx_rd_direct(ha,
+ QLA8XXX_PEG_ALIVE_COUNTER);
+ if (count != old_count) {
+ rval = QLA_SUCCESS;
+ goto dev_ready;
+ }
+ }
+ ha->isp_ops->rom_lock_recovery(ha);
+ }
+
+ /* set to DEV_INITIALIZING */
+ ql4_printk(KERN_INFO, ha, "HW State: INITIALIZING\n");
+ qla4_8xxx_wr_direct(ha, QLA8XXX_CRB_DEV_STATE,
+ QLA8XXX_DEV_INITIALIZING);
+
+ ha->isp_ops->idc_unlock(ha);
+
+ if (is_qla8022(ha))
+ qla4_8xxx_get_minidump(ha);
+
+ rval = ha->isp_ops->restart_firmware(ha);
+ ha->isp_ops->idc_lock(ha);
+
+ if (rval != QLA_SUCCESS) {
+ ql4_printk(KERN_INFO, ha, "HW State: FAILED\n");
+ qla4_8xxx_clear_drv_active(ha);
+ qla4_8xxx_wr_direct(ha, QLA8XXX_CRB_DEV_STATE,
+ QLA8XXX_DEV_FAILED);
+ return rval;
+ }
+
+dev_ready:
+ ql4_printk(KERN_INFO, ha, "HW State: READY\n");
+ qla4_8xxx_wr_direct(ha, QLA8XXX_CRB_DEV_STATE, QLA8XXX_DEV_READY);
+
+ return rval;
+}
+
+/**
+ * qla4_82xx_need_reset_handler - Code to start reset sequence
+ * @ha: pointer to adapter structure
+ *
+ * Note: IDC lock must be held upon entry
+ **/
+static void
+qla4_82xx_need_reset_handler(struct scsi_qla_host *ha)
+{
+ uint32_t dev_state, drv_state, drv_active;
+ uint32_t active_mask = 0xFFFFFFFF;
+ unsigned long reset_timeout;
+
+ ql4_printk(KERN_INFO, ha,
+ "Performing ISP error recovery\n");
+
+ if (test_and_clear_bit(AF_ONLINE, &ha->flags)) {
+ qla4_82xx_idc_unlock(ha);
+ ha->isp_ops->disable_intrs(ha);
+ qla4_82xx_idc_lock(ha);
+ }
+
+ if (!test_bit(AF_8XXX_RST_OWNER, &ha->flags)) {
+ DEBUG2(ql4_printk(KERN_INFO, ha,
+ "%s(%ld): reset acknowledged\n",
+ __func__, ha->host_no));
+ qla4_8xxx_set_rst_ready(ha);
+ } else {
+ active_mask = (~(1 << (ha->func_num * 4)));
+ }
+
+ /* wait for 10 seconds for reset ack from all functions */
+ reset_timeout = jiffies + (ha->nx_reset_timeout * HZ);
+
+ drv_state = qla4_82xx_rd_32(ha, QLA82XX_CRB_DRV_STATE);
+ drv_active = qla4_82xx_rd_32(ha, QLA82XX_CRB_DRV_ACTIVE);
+
+ ql4_printk(KERN_INFO, ha,
+ "%s(%ld): drv_state = 0x%x, drv_active = 0x%x\n",
+ __func__, ha->host_no, drv_state, drv_active);
+
+ while (drv_state != (drv_active & active_mask)) {
+ if (time_after_eq(jiffies, reset_timeout)) {
+ ql4_printk(KERN_INFO, ha,
+ "%s: RESET TIMEOUT! drv_state: 0x%08x, drv_active: 0x%08x\n",
+ DRIVER_NAME, drv_state, drv_active);
+ break;
+ }
+
+ /*
+ * When reset_owner times out, check which functions
+ * acked/did not ack
+ */
+ if (test_bit(AF_8XXX_RST_OWNER, &ha->flags)) {
+ ql4_printk(KERN_INFO, ha,
+ "%s(%ld): drv_state = 0x%x, drv_active = 0x%x\n",
+ __func__, ha->host_no, drv_state,
+ drv_active);
+ }
+ qla4_82xx_idc_unlock(ha);
+ msleep(1000);
+ qla4_82xx_idc_lock(ha);
+
+ drv_state = qla4_82xx_rd_32(ha, QLA82XX_CRB_DRV_STATE);
+ drv_active = qla4_82xx_rd_32(ha, QLA82XX_CRB_DRV_ACTIVE);
+ }
+
+ /* Clear RESET OWNER as we are not going to use it any further */
+ clear_bit(AF_8XXX_RST_OWNER, &ha->flags);
+
+ dev_state = qla4_82xx_rd_32(ha, QLA82XX_CRB_DEV_STATE);
+ ql4_printk(KERN_INFO, ha, "Device state is 0x%x = %s\n", dev_state,
+ dev_state < MAX_STATES ? qdev_state[dev_state] : "Unknown");
+
+ /* Force to DEV_COLD unless someone else is starting a reset */
+ if (dev_state != QLA8XXX_DEV_INITIALIZING) {
+ ql4_printk(KERN_INFO, ha, "HW State: COLD/RE-INIT\n");
+ qla4_82xx_wr_32(ha, QLA82XX_CRB_DEV_STATE, QLA8XXX_DEV_COLD);
+ qla4_8xxx_set_rst_ready(ha);
+ }
+}
+
+/**
+ * qla4_8xxx_need_qsnt_handler - Code to start qsnt
+ * @ha: pointer to adapter structure
+ **/
+void
+qla4_8xxx_need_qsnt_handler(struct scsi_qla_host *ha)
+{
+ ha->isp_ops->idc_lock(ha);
+ qla4_8xxx_set_qsnt_ready(ha);
+ ha->isp_ops->idc_unlock(ha);
+}
+
+static void qla4_82xx_set_idc_ver(struct scsi_qla_host *ha)
+{
+ int idc_ver;
+ uint32_t drv_active;
+
+ drv_active = qla4_8xxx_rd_direct(ha, QLA8XXX_CRB_DRV_ACTIVE);
+ if (drv_active == (1 << (ha->func_num * 4))) {
+ qla4_8xxx_wr_direct(ha, QLA8XXX_CRB_DRV_IDC_VERSION,
+ QLA82XX_IDC_VERSION);
+ ql4_printk(KERN_INFO, ha,
+ "%s: IDC version updated to %d\n", __func__,
+ QLA82XX_IDC_VERSION);
+ } else {
+ idc_ver = qla4_8xxx_rd_direct(ha, QLA8XXX_CRB_DRV_IDC_VERSION);
+ if (QLA82XX_IDC_VERSION != idc_ver) {
+ ql4_printk(KERN_INFO, ha,
+ "%s: qla4xxx driver IDC version %d is not compatible with IDC version %d of other drivers!\n",
+ __func__, QLA82XX_IDC_VERSION, idc_ver);
+ }
+ }
+}
+
+static int qla4_83xx_set_idc_ver(struct scsi_qla_host *ha)
+{
+ int idc_ver;
+ uint32_t drv_active;
+ int rval = QLA_SUCCESS;
+
+ drv_active = qla4_8xxx_rd_direct(ha, QLA8XXX_CRB_DRV_ACTIVE);
+ if (drv_active == (1 << ha->func_num)) {
+ idc_ver = qla4_8xxx_rd_direct(ha, QLA8XXX_CRB_DRV_IDC_VERSION);
+ idc_ver &= (~0xFF);
+ idc_ver |= QLA83XX_IDC_VER_MAJ_VALUE;
+ qla4_8xxx_wr_direct(ha, QLA8XXX_CRB_DRV_IDC_VERSION, idc_ver);
+ ql4_printk(KERN_INFO, ha,
+ "%s: IDC version updated to %d\n", __func__,
+ idc_ver);
+ } else {
+ idc_ver = qla4_8xxx_rd_direct(ha, QLA8XXX_CRB_DRV_IDC_VERSION);
+ idc_ver &= 0xFF;
+ if (QLA83XX_IDC_VER_MAJ_VALUE != idc_ver) {
+ ql4_printk(KERN_INFO, ha,
+ "%s: qla4xxx driver IDC version %d is not compatible with IDC version %d of other drivers!\n",
+ __func__, QLA83XX_IDC_VER_MAJ_VALUE,
+ idc_ver);
+ rval = QLA_ERROR;
+ goto exit_set_idc_ver;
+ }
+ }
+
+ /* Update IDC_MINOR_VERSION */
+ idc_ver = qla4_83xx_rd_reg(ha, QLA83XX_CRB_IDC_VER_MINOR);
+ idc_ver &= ~(0x03 << (ha->func_num * 2));
+ idc_ver |= (QLA83XX_IDC_VER_MIN_VALUE << (ha->func_num * 2));
+ qla4_83xx_wr_reg(ha, QLA83XX_CRB_IDC_VER_MINOR, idc_ver);
+
+exit_set_idc_ver:
+ return rval;
+}
+
+int qla4_8xxx_update_idc_reg(struct scsi_qla_host *ha)
+{
+ uint32_t drv_active;
+ int rval = QLA_SUCCESS;
+
+ if (test_bit(AF_INIT_DONE, &ha->flags))
+ goto exit_update_idc_reg;
+
+ ha->isp_ops->idc_lock(ha);
+ qla4_8xxx_set_drv_active(ha);
+
+ /*
+ * If we are the first driver to load and
+ * ql4xdontresethba is not set, clear IDC_CTRL BIT0.
+ */
+ if (is_qla8032(ha) || is_qla8042(ha)) {
+ drv_active = qla4_8xxx_rd_direct(ha, QLA8XXX_CRB_DRV_ACTIVE);
+ if ((drv_active == (1 << ha->func_num)) && !ql4xdontresethba)
+ qla4_83xx_clear_idc_dontreset(ha);
+ }
+
+ if (is_qla8022(ha)) {
+ qla4_82xx_set_idc_ver(ha);
+ } else if (is_qla8032(ha) || is_qla8042(ha)) {
+ rval = qla4_83xx_set_idc_ver(ha);
+ if (rval == QLA_ERROR)
+ qla4_8xxx_clear_drv_active(ha);
+ }
+
+ ha->isp_ops->idc_unlock(ha);
+
+exit_update_idc_reg:
+ return rval;
+}
+
+/**
+ * qla4_8xxx_device_state_handler - Adapter state machine
+ * @ha: pointer to host adapter structure.
+ *
+ * Note: IDC lock must be UNLOCKED upon entry
+ **/
+int qla4_8xxx_device_state_handler(struct scsi_qla_host *ha)
+{
+ uint32_t dev_state;
+ int rval = QLA_SUCCESS;
+ unsigned long dev_init_timeout;
+
+ rval = qla4_8xxx_update_idc_reg(ha);
+ if (rval == QLA_ERROR)
+ goto exit_state_handler;
+
+ dev_state = qla4_8xxx_rd_direct(ha, QLA8XXX_CRB_DEV_STATE);
+ DEBUG2(ql4_printk(KERN_INFO, ha, "Device state is 0x%x = %s\n",
+ dev_state, dev_state < MAX_STATES ?
+ qdev_state[dev_state] : "Unknown"));
+
+ /* wait for 30 seconds for device to go ready */
+ dev_init_timeout = jiffies + (ha->nx_dev_init_timeout * HZ);
+
+ ha->isp_ops->idc_lock(ha);
+ while (1) {
+
+ if (time_after_eq(jiffies, dev_init_timeout)) {
+ ql4_printk(KERN_WARNING, ha,
+ "%s: Device Init Failed 0x%x = %s\n",
+ DRIVER_NAME,
+ dev_state, dev_state < MAX_STATES ?
+ qdev_state[dev_state] : "Unknown");
+ qla4_8xxx_wr_direct(ha, QLA8XXX_CRB_DEV_STATE,
+ QLA8XXX_DEV_FAILED);
+ }
+
+ dev_state = qla4_8xxx_rd_direct(ha, QLA8XXX_CRB_DEV_STATE);
+ ql4_printk(KERN_INFO, ha, "Device state is 0x%x = %s\n",
+ dev_state, dev_state < MAX_STATES ?
+ qdev_state[dev_state] : "Unknown");
+
+ /* NOTE: Make sure idc unlocked upon exit of switch statement */
+ switch (dev_state) {
+ case QLA8XXX_DEV_READY:
+ goto exit;
+ case QLA8XXX_DEV_COLD:
+ rval = qla4_8xxx_device_bootstrap(ha);
+ goto exit;
+ case QLA8XXX_DEV_INITIALIZING:
+ ha->isp_ops->idc_unlock(ha);
+ msleep(1000);
+ ha->isp_ops->idc_lock(ha);
+ break;
+ case QLA8XXX_DEV_NEED_RESET:
+ /*
+ * For ISP8324 and ISP8042, if NEED_RESET is set by any
+ * driver, it should be honored, irrespective of
+ * IDC_CTRL DONTRESET_BIT0
+ */
+ if (is_qla8032(ha) || is_qla8042(ha)) {
+ qla4_83xx_need_reset_handler(ha);
+ } else if (is_qla8022(ha)) {
+ if (!ql4xdontresethba) {
+ qla4_82xx_need_reset_handler(ha);
+ /* Update timeout value after need
+ * reset handler */
+ dev_init_timeout = jiffies +
+ (ha->nx_dev_init_timeout * HZ);
+ } else {
+ ha->isp_ops->idc_unlock(ha);
+ msleep(1000);
+ ha->isp_ops->idc_lock(ha);
+ }
+ }
+ break;
+ case QLA8XXX_DEV_NEED_QUIESCENT:
+ /* idc locked/unlocked in handler */
+ qla4_8xxx_need_qsnt_handler(ha);
+ break;
+ case QLA8XXX_DEV_QUIESCENT:
+ ha->isp_ops->idc_unlock(ha);
+ msleep(1000);
+ ha->isp_ops->idc_lock(ha);
+ break;
+ case QLA8XXX_DEV_FAILED:
+ ha->isp_ops->idc_unlock(ha);
+ qla4xxx_dead_adapter_cleanup(ha);
+ rval = QLA_ERROR;
+ ha->isp_ops->idc_lock(ha);
+ goto exit;
+ default:
+ ha->isp_ops->idc_unlock(ha);
+ qla4xxx_dead_adapter_cleanup(ha);
+ rval = QLA_ERROR;
+ ha->isp_ops->idc_lock(ha);
+ goto exit;
+ }
+ }
+exit:
+ ha->isp_ops->idc_unlock(ha);
+exit_state_handler:
+ return rval;
+}
+
+int qla4_8xxx_load_risc(struct scsi_qla_host *ha)
+{
+ int retval;
+
+ /* clear the interrupt */
+ if (is_qla8032(ha) || is_qla8042(ha)) {
+ writel(0, &ha->qla4_83xx_reg->risc_intr);
+ readl(&ha->qla4_83xx_reg->risc_intr);
+ } else if (is_qla8022(ha)) {
+ writel(0, &ha->qla4_82xx_reg->host_int);
+ readl(&ha->qla4_82xx_reg->host_int);
+ }
+
+ retval = qla4_8xxx_device_state_handler(ha);
+
+ /* Initialize request and response queues. */
+ if (retval == QLA_SUCCESS)
+ qla4xxx_init_rings(ha);
+
+ if (retval == QLA_SUCCESS && !test_bit(AF_IRQ_ATTACHED, &ha->flags))
+ retval = qla4xxx_request_irqs(ha);
+
+ return retval;
+}
+
+/*****************************************************************************/
+/* Flash Manipulation Routines */
+/*****************************************************************************/
+
+#define OPTROM_BURST_SIZE 0x1000
+#define OPTROM_BURST_DWORDS (OPTROM_BURST_SIZE / 4)
+
+#define FARX_DATA_FLAG BIT_31
+#define FARX_ACCESS_FLASH_CONF 0x7FFD0000
+#define FARX_ACCESS_FLASH_DATA 0x7FF00000
+
+static inline uint32_t
+flash_conf_addr(struct ql82xx_hw_data *hw, uint32_t faddr)
+{
+ return hw->flash_conf_off | faddr;
+}
+
+static inline uint32_t
+flash_data_addr(struct ql82xx_hw_data *hw, uint32_t faddr)
+{
+ return hw->flash_data_off | faddr;
+}
+
+static uint32_t *
+qla4_82xx_read_flash_data(struct scsi_qla_host *ha, uint32_t *dwptr,
+ uint32_t faddr, uint32_t length)
+{
+ uint32_t i;
+ uint32_t val;
+ int loops = 0;
+ while ((qla4_82xx_rom_lock(ha) != 0) && (loops < 50000)) {
+ udelay(100);
+ cond_resched();
+ loops++;
+ }
+ if (loops >= 50000) {
+ ql4_printk(KERN_WARNING, ha, "ROM lock failed\n");
+ return dwptr;
+ }
+
+ /* Dword reads to flash. */
+ for (i = 0; i < length/4; i++, faddr += 4) {
+ if (qla4_82xx_do_rom_fast_read(ha, faddr, &val)) {
+ ql4_printk(KERN_WARNING, ha,
+ "Do ROM fast read failed\n");
+ goto done_read;
+ }
+ dwptr[i] = __constant_cpu_to_le32(val);
+ }
+
+done_read:
+ qla4_82xx_rom_unlock(ha);
+ return dwptr;
+}
+
+/**
+ * Address and length are byte address
+ **/
+static uint8_t *
+qla4_82xx_read_optrom_data(struct scsi_qla_host *ha, uint8_t *buf,
+ uint32_t offset, uint32_t length)
+{
+ qla4_82xx_read_flash_data(ha, (uint32_t *)buf, offset, length);
+ return buf;
+}
+
+static int
+qla4_8xxx_find_flt_start(struct scsi_qla_host *ha, uint32_t *start)
+{
+ const char *loc, *locations[] = { "DEF", "PCI" };
+
+ /*
+ * FLT-location structure resides after the last PCI region.
+ */
+
+ /* Begin with sane defaults. */
+ loc = locations[0];
+ *start = FA_FLASH_LAYOUT_ADDR_82;
+
+ DEBUG2(ql4_printk(KERN_INFO, ha, "FLTL[%s] = 0x%x.\n", loc, *start));
+ return QLA_SUCCESS;
+}
+
+static void
+qla4_8xxx_get_flt_info(struct scsi_qla_host *ha, uint32_t flt_addr)
+{
+ const char *loc, *locations[] = { "DEF", "FLT" };
+ uint16_t *wptr;
+ uint16_t cnt, chksum;
+ uint32_t start, status;
+ struct qla_flt_header *flt;
+ struct qla_flt_region *region;
+ struct ql82xx_hw_data *hw = &ha->hw;
+
+ hw->flt_region_flt = flt_addr;
+ wptr = (uint16_t *)ha->request_ring;
+ flt = (struct qla_flt_header *)ha->request_ring;
+ region = (struct qla_flt_region *)&flt[1];
+
+ if (is_qla8022(ha)) {
+ qla4_82xx_read_optrom_data(ha, (uint8_t *)ha->request_ring,
+ flt_addr << 2, OPTROM_BURST_SIZE);
+ } else if (is_qla8032(ha) || is_qla8042(ha)) {
+ status = qla4_83xx_flash_read_u32(ha, flt_addr << 2,
+ (uint8_t *)ha->request_ring,
+ 0x400);
+ if (status != QLA_SUCCESS)
+ goto no_flash_data;
+ }
+
+ if (*wptr == __constant_cpu_to_le16(0xffff))
+ goto no_flash_data;
+ if (flt->version != __constant_cpu_to_le16(1)) {
+ DEBUG2(ql4_printk(KERN_INFO, ha, "Unsupported FLT detected: "
+ "version=0x%x length=0x%x checksum=0x%x.\n",
+ le16_to_cpu(flt->version), le16_to_cpu(flt->length),
+ le16_to_cpu(flt->checksum)));
+ goto no_flash_data;
+ }
+
+ cnt = (sizeof(struct qla_flt_header) + le16_to_cpu(flt->length)) >> 1;
+ for (chksum = 0; cnt; cnt--)
+ chksum += le16_to_cpu(*wptr++);
+ if (chksum) {
+ DEBUG2(ql4_printk(KERN_INFO, ha, "Inconsistent FLT detected: "
+ "version=0x%x length=0x%x checksum=0x%x.\n",
+ le16_to_cpu(flt->version), le16_to_cpu(flt->length),
+ chksum));
+ goto no_flash_data;
+ }
+
+ loc = locations[1];
+ cnt = le16_to_cpu(flt->length) / sizeof(struct qla_flt_region);
+ for ( ; cnt; cnt--, region++) {
+ /* Store addresses as DWORD offsets. */
+ start = le32_to_cpu(region->start) >> 2;
+
+ DEBUG3(ql4_printk(KERN_DEBUG, ha, "FLT[%02x]: start=0x%x "
+ "end=0x%x size=0x%x.\n", le32_to_cpu(region->code), start,
+ le32_to_cpu(region->end) >> 2, le32_to_cpu(region->size)));
+
+ switch (le32_to_cpu(region->code) & 0xff) {
+ case FLT_REG_FDT:
+ hw->flt_region_fdt = start;
+ break;
+ case FLT_REG_BOOT_CODE_82:
+ hw->flt_region_boot = start;
+ break;
+ case FLT_REG_FW_82:
+ case FLT_REG_FW_82_1:
+ hw->flt_region_fw = start;
+ break;
+ case FLT_REG_BOOTLOAD_82:
+ hw->flt_region_bootload = start;
+ break;
+ case FLT_REG_ISCSI_PARAM:
+ hw->flt_iscsi_param = start;
+ break;
+ case FLT_REG_ISCSI_CHAP:
+ hw->flt_region_chap = start;
+ hw->flt_chap_size = le32_to_cpu(region->size);
+ break;
+ case FLT_REG_ISCSI_DDB:
+ hw->flt_region_ddb = start;
+ hw->flt_ddb_size = le32_to_cpu(region->size);
+ break;
+ }
+ }
+ goto done;
+
+no_flash_data:
+ /* Use hardcoded defaults. */
+ loc = locations[0];
+
+ hw->flt_region_fdt = FA_FLASH_DESCR_ADDR_82;
+ hw->flt_region_boot = FA_BOOT_CODE_ADDR_82;
+ hw->flt_region_bootload = FA_BOOT_LOAD_ADDR_82;
+ hw->flt_region_fw = FA_RISC_CODE_ADDR_82;
+ hw->flt_region_chap = FA_FLASH_ISCSI_CHAP >> 2;
+ hw->flt_chap_size = FA_FLASH_CHAP_SIZE;
+ hw->flt_region_ddb = FA_FLASH_ISCSI_DDB >> 2;
+ hw->flt_ddb_size = FA_FLASH_DDB_SIZE;
+
+done:
+ DEBUG2(ql4_printk(KERN_INFO, ha,
+ "FLT[%s]: flt=0x%x fdt=0x%x boot=0x%x bootload=0x%x fw=0x%x chap=0x%x chap_size=0x%x ddb=0x%x ddb_size=0x%x\n",
+ loc, hw->flt_region_flt, hw->flt_region_fdt,
+ hw->flt_region_boot, hw->flt_region_bootload,
+ hw->flt_region_fw, hw->flt_region_chap,
+ hw->flt_chap_size, hw->flt_region_ddb,
+ hw->flt_ddb_size));
+}
+
+static void
+qla4_82xx_get_fdt_info(struct scsi_qla_host *ha)
+{
+#define FLASH_BLK_SIZE_4K 0x1000
+#define FLASH_BLK_SIZE_32K 0x8000
+#define FLASH_BLK_SIZE_64K 0x10000
+ const char *loc, *locations[] = { "MID", "FDT" };
+ uint16_t cnt, chksum;
+ uint16_t *wptr;
+ struct qla_fdt_layout *fdt;
+ uint16_t mid = 0;
+ uint16_t fid = 0;
+ struct ql82xx_hw_data *hw = &ha->hw;
+
+ hw->flash_conf_off = FARX_ACCESS_FLASH_CONF;
+ hw->flash_data_off = FARX_ACCESS_FLASH_DATA;
+
+ wptr = (uint16_t *)ha->request_ring;
+ fdt = (struct qla_fdt_layout *)ha->request_ring;
+ qla4_82xx_read_optrom_data(ha, (uint8_t *)ha->request_ring,
+ hw->flt_region_fdt << 2, OPTROM_BURST_SIZE);
+
+ if (*wptr == __constant_cpu_to_le16(0xffff))
+ goto no_flash_data;
+
+ if (fdt->sig[0] != 'Q' || fdt->sig[1] != 'L' || fdt->sig[2] != 'I' ||
+ fdt->sig[3] != 'D')
+ goto no_flash_data;
+
+ for (cnt = 0, chksum = 0; cnt < sizeof(struct qla_fdt_layout) >> 1;
+ cnt++)
+ chksum += le16_to_cpu(*wptr++);
+
+ if (chksum) {
+ DEBUG2(ql4_printk(KERN_INFO, ha, "Inconsistent FDT detected: "
+ "checksum=0x%x id=%c version=0x%x.\n", chksum, fdt->sig[0],
+ le16_to_cpu(fdt->version)));
+ goto no_flash_data;
+ }
+
+ loc = locations[1];
+ mid = le16_to_cpu(fdt->man_id);
+ fid = le16_to_cpu(fdt->id);
+ hw->fdt_wrt_disable = fdt->wrt_disable_bits;
+ hw->fdt_erase_cmd = flash_conf_addr(hw, 0x0300 | fdt->erase_cmd);
+ hw->fdt_block_size = le32_to_cpu(fdt->block_size);
+
+ if (fdt->unprotect_sec_cmd) {
+ hw->fdt_unprotect_sec_cmd = flash_conf_addr(hw, 0x0300 |
+ fdt->unprotect_sec_cmd);
+ hw->fdt_protect_sec_cmd = fdt->protect_sec_cmd ?
+ flash_conf_addr(hw, 0x0300 | fdt->protect_sec_cmd) :
+ flash_conf_addr(hw, 0x0336);
+ }
+ goto done;
+
+no_flash_data:
+ loc = locations[0];
+ hw->fdt_block_size = FLASH_BLK_SIZE_64K;
+done:
+ DEBUG2(ql4_printk(KERN_INFO, ha, "FDT[%s]: (0x%x/0x%x) erase=0x%x "
+ "pro=%x upro=%x wrtd=0x%x blk=0x%x.\n", loc, mid, fid,
+ hw->fdt_erase_cmd, hw->fdt_protect_sec_cmd,
+ hw->fdt_unprotect_sec_cmd, hw->fdt_wrt_disable,
+ hw->fdt_block_size));
+}
+
+static void
+qla4_82xx_get_idc_param(struct scsi_qla_host *ha)
+{
+#define QLA82XX_IDC_PARAM_ADDR 0x003e885c
+ uint32_t *wptr;
+
+ if (!is_qla8022(ha))
+ return;
+ wptr = (uint32_t *)ha->request_ring;
+ qla4_82xx_read_optrom_data(ha, (uint8_t *)ha->request_ring,
+ QLA82XX_IDC_PARAM_ADDR , 8);
+
+ if (*wptr == __constant_cpu_to_le32(0xffffffff)) {
+ ha->nx_dev_init_timeout = ROM_DEV_INIT_TIMEOUT;
+ ha->nx_reset_timeout = ROM_DRV_RESET_ACK_TIMEOUT;
+ } else {
+ ha->nx_dev_init_timeout = le32_to_cpu(*wptr++);
+ ha->nx_reset_timeout = le32_to_cpu(*wptr);
+ }
+
+ DEBUG2(ql4_printk(KERN_DEBUG, ha,
+ "ha->nx_dev_init_timeout = %d\n", ha->nx_dev_init_timeout));
+ DEBUG2(ql4_printk(KERN_DEBUG, ha,
+ "ha->nx_reset_timeout = %d\n", ha->nx_reset_timeout));
+ return;
+}
+
+void qla4_82xx_queue_mbox_cmd(struct scsi_qla_host *ha, uint32_t *mbx_cmd,
+ int in_count)
+{
+ int i;
+
+ /* Load all mailbox registers, except mailbox 0. */
+ for (i = 1; i < in_count; i++)
+ writel(mbx_cmd[i], &ha->qla4_82xx_reg->mailbox_in[i]);
+
+ /* Wakeup firmware */
+ writel(mbx_cmd[0], &ha->qla4_82xx_reg->mailbox_in[0]);
+ readl(&ha->qla4_82xx_reg->mailbox_in[0]);
+ writel(HINT_MBX_INT_PENDING, &ha->qla4_82xx_reg->hint);
+ readl(&ha->qla4_82xx_reg->hint);
+}
+
+void qla4_82xx_process_mbox_intr(struct scsi_qla_host *ha, int out_count)
+{
+ int intr_status;
+
+ intr_status = readl(&ha->qla4_82xx_reg->host_int);
+ if (intr_status & ISRX_82XX_RISC_INT) {
+ ha->mbox_status_count = out_count;
+ intr_status = readl(&ha->qla4_82xx_reg->host_status);
+ ha->isp_ops->interrupt_service_routine(ha, intr_status);
+
+ if (test_bit(AF_INTERRUPTS_ON, &ha->flags) &&
+ test_bit(AF_INTx_ENABLED, &ha->flags))
+ qla4_82xx_wr_32(ha, ha->nx_legacy_intr.tgt_mask_reg,
+ 0xfbff);
+ }
+}
+
+int
+qla4_8xxx_get_flash_info(struct scsi_qla_host *ha)
+{
+ int ret;
+ uint32_t flt_addr;
+
+ ret = qla4_8xxx_find_flt_start(ha, &flt_addr);
+ if (ret != QLA_SUCCESS)
+ return ret;
+
+ qla4_8xxx_get_flt_info(ha, flt_addr);
+ if (is_qla8022(ha)) {
+ qla4_82xx_get_fdt_info(ha);
+ qla4_82xx_get_idc_param(ha);
+ } else if (is_qla8032(ha) || is_qla8042(ha)) {
+ qla4_83xx_get_idc_param(ha);
+ }
+
+ return QLA_SUCCESS;
+}
+
+/**
+ * qla4_8xxx_stop_firmware - stops firmware on specified adapter instance
+ * @ha: pointer to host adapter structure.
+ *
+ * Remarks:
+ * For iSCSI, throws away all I/O and AENs into bit bucket, so they will
+ * not be available after successful return. Driver must cleanup potential
+ * outstanding I/O's after calling this funcion.
+ **/
+int
+qla4_8xxx_stop_firmware(struct scsi_qla_host *ha)
+{
+ int status;
+ uint32_t mbox_cmd[MBOX_REG_COUNT];
+ uint32_t mbox_sts[MBOX_REG_COUNT];
+
+ memset(&mbox_cmd, 0, sizeof(mbox_cmd));
+ memset(&mbox_sts, 0, sizeof(mbox_sts));
+
+ mbox_cmd[0] = MBOX_CMD_STOP_FW;
+ status = qla4xxx_mailbox_command(ha, MBOX_REG_COUNT, 1,
+ &mbox_cmd[0], &mbox_sts[0]);
+
+ DEBUG2(printk("scsi%ld: %s: status = %d\n", ha->host_no,
+ __func__, status));
+ return status;
+}
+
+/**
+ * qla4_82xx_isp_reset - Resets ISP and aborts all outstanding commands.
+ * @ha: pointer to host adapter structure.
+ **/
+int
+qla4_82xx_isp_reset(struct scsi_qla_host *ha)
+{
+ int rval;
+ uint32_t dev_state;
+
+ qla4_82xx_idc_lock(ha);
+ dev_state = qla4_82xx_rd_32(ha, QLA82XX_CRB_DEV_STATE);
+
+ if (dev_state == QLA8XXX_DEV_READY) {
+ ql4_printk(KERN_INFO, ha, "HW State: NEED RESET\n");
+ qla4_82xx_wr_32(ha, QLA82XX_CRB_DEV_STATE,
+ QLA8XXX_DEV_NEED_RESET);
+ set_bit(AF_8XXX_RST_OWNER, &ha->flags);
+ } else
+ ql4_printk(KERN_INFO, ha, "HW State: DEVICE INITIALIZING\n");
+
+ qla4_82xx_idc_unlock(ha);
+
+ rval = qla4_8xxx_device_state_handler(ha);
+
+ qla4_82xx_idc_lock(ha);
+ qla4_8xxx_clear_rst_ready(ha);
+ qla4_82xx_idc_unlock(ha);
+
+ if (rval == QLA_SUCCESS) {
+ ql4_printk(KERN_INFO, ha, "Clearing AF_RECOVERY in qla4_82xx_isp_reset\n");
+ clear_bit(AF_FW_RECOVERY, &ha->flags);
+ }
+
+ return rval;
+}
+
+/**
+ * qla4_8xxx_get_sys_info - get adapter MAC address(es) and serial number
+ * @ha: pointer to host adapter structure.
+ *
+ **/
+int qla4_8xxx_get_sys_info(struct scsi_qla_host *ha)
+{
+ uint32_t mbox_cmd[MBOX_REG_COUNT];
+ uint32_t mbox_sts[MBOX_REG_COUNT];
+ struct mbx_sys_info *sys_info;
+ dma_addr_t sys_info_dma;
+ int status = QLA_ERROR;
+
+ sys_info = dma_alloc_coherent(&ha->pdev->dev, sizeof(*sys_info),
+ &sys_info_dma, GFP_KERNEL);
+ if (sys_info == NULL) {
+ DEBUG2(printk("scsi%ld: %s: Unable to allocate dma buffer.\n",
+ ha->host_no, __func__));
+ return status;
+ }
+
+ memset(sys_info, 0, sizeof(*sys_info));
+ memset(&mbox_cmd, 0, sizeof(mbox_cmd));
+ memset(&mbox_sts, 0, sizeof(mbox_sts));
+
+ mbox_cmd[0] = MBOX_CMD_GET_SYS_INFO;
+ mbox_cmd[1] = LSDW(sys_info_dma);
+ mbox_cmd[2] = MSDW(sys_info_dma);
+ mbox_cmd[4] = sizeof(*sys_info);
+
+ if (qla4xxx_mailbox_command(ha, MBOX_REG_COUNT, 6, &mbox_cmd[0],
+ &mbox_sts[0]) != QLA_SUCCESS) {
+ DEBUG2(printk("scsi%ld: %s: GET_SYS_INFO failed\n",
+ ha->host_no, __func__));
+ goto exit_validate_mac82;
+ }
+
+ /* Make sure we receive the minimum required data to cache internally */
+ if (((is_qla8032(ha) || is_qla8042(ha)) ? mbox_sts[3] : mbox_sts[4]) <
+ offsetof(struct mbx_sys_info, reserved)) {
+ DEBUG2(printk("scsi%ld: %s: GET_SYS_INFO data receive"
+ " error (%x)\n", ha->host_no, __func__, mbox_sts[4]));
+ goto exit_validate_mac82;
+ }
+
+ /* Save M.A.C. address & serial_number */
+ ha->port_num = sys_info->port_num;
+ memcpy(ha->my_mac, &sys_info->mac_addr[0],
+ min(sizeof(ha->my_mac), sizeof(sys_info->mac_addr)));
+ memcpy(ha->serial_number, &sys_info->serial_number,
+ min(sizeof(ha->serial_number), sizeof(sys_info->serial_number)));
+ memcpy(ha->model_name, &sys_info->board_id_str,
+ min(sizeof(ha->model_name), sizeof(sys_info->board_id_str)));
+ ha->phy_port_cnt = sys_info->phys_port_cnt;
+ ha->phy_port_num = sys_info->port_num;
+ ha->iscsi_pci_func_cnt = sys_info->iscsi_pci_func_cnt;
+
+ DEBUG2(printk("scsi%ld: %s: "
+ "mac %02x:%02x:%02x:%02x:%02x:%02x "
+ "serial %s\n", ha->host_no, __func__,
+ ha->my_mac[0], ha->my_mac[1], ha->my_mac[2],
+ ha->my_mac[3], ha->my_mac[4], ha->my_mac[5],
+ ha->serial_number));
+
+ status = QLA_SUCCESS;
+
+exit_validate_mac82:
+ dma_free_coherent(&ha->pdev->dev, sizeof(*sys_info), sys_info,
+ sys_info_dma);
+ return status;
+}
+
+/* Interrupt handling helpers. */
+
+int qla4_8xxx_intr_enable(struct scsi_qla_host *ha)
+{
+ uint32_t mbox_cmd[MBOX_REG_COUNT];
+ uint32_t mbox_sts[MBOX_REG_COUNT];
+
+ DEBUG2(ql4_printk(KERN_INFO, ha, "%s\n", __func__));
+
+ memset(&mbox_cmd, 0, sizeof(mbox_cmd));
+ memset(&mbox_sts, 0, sizeof(mbox_sts));
+ mbox_cmd[0] = MBOX_CMD_ENABLE_INTRS;
+ mbox_cmd[1] = INTR_ENABLE;
+ if (qla4xxx_mailbox_command(ha, MBOX_REG_COUNT, 1, &mbox_cmd[0],
+ &mbox_sts[0]) != QLA_SUCCESS) {
+ DEBUG2(ql4_printk(KERN_INFO, ha,
+ "%s: MBOX_CMD_ENABLE_INTRS failed (0x%04x)\n",
+ __func__, mbox_sts[0]));
+ return QLA_ERROR;
+ }
+ return QLA_SUCCESS;
+}
+
+int qla4_8xxx_intr_disable(struct scsi_qla_host *ha)
+{
+ uint32_t mbox_cmd[MBOX_REG_COUNT];
+ uint32_t mbox_sts[MBOX_REG_COUNT];
+
+ DEBUG2(ql4_printk(KERN_INFO, ha, "%s\n", __func__));
+
+ memset(&mbox_cmd, 0, sizeof(mbox_cmd));
+ memset(&mbox_sts, 0, sizeof(mbox_sts));
+ mbox_cmd[0] = MBOX_CMD_ENABLE_INTRS;
+ mbox_cmd[1] = INTR_DISABLE;
+ if (qla4xxx_mailbox_command(ha, MBOX_REG_COUNT, 1, &mbox_cmd[0],
+ &mbox_sts[0]) != QLA_SUCCESS) {
+ DEBUG2(ql4_printk(KERN_INFO, ha,
+ "%s: MBOX_CMD_ENABLE_INTRS failed (0x%04x)\n",
+ __func__, mbox_sts[0]));
+ return QLA_ERROR;
+ }
+
+ return QLA_SUCCESS;
+}
+
+void
+qla4_82xx_enable_intrs(struct scsi_qla_host *ha)
+{
+ qla4_8xxx_intr_enable(ha);
+
+ spin_lock_irq(&ha->hardware_lock);
+ /* BIT 10 - reset */
+ qla4_82xx_wr_32(ha, ha->nx_legacy_intr.tgt_mask_reg, 0xfbff);
+ spin_unlock_irq(&ha->hardware_lock);
+ set_bit(AF_INTERRUPTS_ON, &ha->flags);
+}
+
+void
+qla4_82xx_disable_intrs(struct scsi_qla_host *ha)
+{
+ if (test_and_clear_bit(AF_INTERRUPTS_ON, &ha->flags))
+ qla4_8xxx_intr_disable(ha);
+
+ spin_lock_irq(&ha->hardware_lock);
+ /* BIT 10 - set */
+ qla4_82xx_wr_32(ha, ha->nx_legacy_intr.tgt_mask_reg, 0x0400);
+ spin_unlock_irq(&ha->hardware_lock);
+}
+
+struct ql4_init_msix_entry {
+ uint16_t entry;
+ uint16_t index;
+ const char *name;
+ irq_handler_t handler;
+};
+
+static struct ql4_init_msix_entry qla4_8xxx_msix_entries[QLA_MSIX_ENTRIES] = {
+ { QLA_MSIX_DEFAULT, QLA_MIDX_DEFAULT,
+ "qla4xxx (default)",
+ (irq_handler_t)qla4_8xxx_default_intr_handler },
+ { QLA_MSIX_RSP_Q, QLA_MIDX_RSP_Q,
+ "qla4xxx (rsp_q)", (irq_handler_t)qla4_8xxx_msix_rsp_q },
+};
+
+void
+qla4_8xxx_disable_msix(struct scsi_qla_host *ha)
+{
+ int i;
+ struct ql4_msix_entry *qentry;
+
+ for (i = 0; i < QLA_MSIX_ENTRIES; i++) {
+ qentry = &ha->msix_entries[qla4_8xxx_msix_entries[i].index];
+ if (qentry->have_irq) {
+ free_irq(qentry->msix_vector, ha);
+ DEBUG2(ql4_printk(KERN_INFO, ha, "%s: %s\n",
+ __func__, qla4_8xxx_msix_entries[i].name));
+ }
+ }
+ pci_disable_msix(ha->pdev);
+ clear_bit(AF_MSIX_ENABLED, &ha->flags);
+}
+
+int
+qla4_8xxx_enable_msix(struct scsi_qla_host *ha)
+{
+ int i, ret;
+ struct msix_entry entries[QLA_MSIX_ENTRIES];
+ struct ql4_msix_entry *qentry;
+
+ for (i = 0; i < QLA_MSIX_ENTRIES; i++)
+ entries[i].entry = qla4_8xxx_msix_entries[i].entry;
+
+ ret = pci_enable_msix_exact(ha->pdev, entries, ARRAY_SIZE(entries));
+ if (ret) {
+ ql4_printk(KERN_WARNING, ha,
+ "MSI-X: Failed to enable support -- %d/%d\n",
+ QLA_MSIX_ENTRIES, ret);
+ goto msix_out;
+ }
+ set_bit(AF_MSIX_ENABLED, &ha->flags);
+
+ for (i = 0; i < QLA_MSIX_ENTRIES; i++) {
+ qentry = &ha->msix_entries[qla4_8xxx_msix_entries[i].index];
+ qentry->msix_vector = entries[i].vector;
+ qentry->msix_entry = entries[i].entry;
+ qentry->have_irq = 0;
+ ret = request_irq(qentry->msix_vector,
+ qla4_8xxx_msix_entries[i].handler, 0,
+ qla4_8xxx_msix_entries[i].name, ha);
+ if (ret) {
+ ql4_printk(KERN_WARNING, ha,
+ "MSI-X: Unable to register handler -- %x/%d.\n",
+ qla4_8xxx_msix_entries[i].index, ret);
+ qla4_8xxx_disable_msix(ha);
+ goto msix_out;
+ }
+ qentry->have_irq = 1;
+ DEBUG2(ql4_printk(KERN_INFO, ha, "%s: %s\n",
+ __func__, qla4_8xxx_msix_entries[i].name));
+ }
+msix_out:
+ return ret;
+}
+
+int qla4_8xxx_check_init_adapter_retry(struct scsi_qla_host *ha)
+{
+ int status = QLA_SUCCESS;
+
+ /* Dont retry adapter initialization if IRQ allocation failed */
+ if (!test_bit(AF_IRQ_ATTACHED, &ha->flags)) {
+ ql4_printk(KERN_WARNING, ha, "%s: Skipping retry of adapter initialization as IRQs are not attached\n",
+ __func__);
+ status = QLA_ERROR;
+ goto exit_init_adapter_failure;
+ }
+
+ /* Since interrupts are registered in start_firmware for
+ * 8xxx, release them here if initialize_adapter fails
+ * and retry adapter initialization */
+ qla4xxx_free_irqs(ha);
+
+exit_init_adapter_failure:
+ return status;
+}
diff --git a/drivers/scsi/qla4xxx/ql4_nx.h b/drivers/scsi/qla4xxx/ql4_nx.h
new file mode 100644
index 000000000..337d9fcf6
--- /dev/null
+++ b/drivers/scsi/qla4xxx/ql4_nx.h
@@ -0,0 +1,1032 @@
+/*
+ * QLogic iSCSI HBA Driver
+ * Copyright (c) 2003-2013 QLogic Corporation
+ *
+ * See LICENSE.qla4xxx for copyright and licensing details.
+ */
+#ifndef __QLA_NX_H
+#define __QLA_NX_H
+
+/*
+ * Following are the states of the Phantom. Phantom will set them and
+ * Host will read to check if the fields are correct.
+*/
+#define PHAN_INITIALIZE_FAILED 0xffff
+#define PHAN_INITIALIZE_COMPLETE 0xff01
+
+/* Host writes the following to notify that it has done the init-handshake */
+#define PHAN_INITIALIZE_ACK 0xf00f
+#define PHAN_PEG_RCV_INITIALIZED 0xff01
+
+/*CRB_RELATED*/
+#define QLA82XX_CRB_BASE (QLA82XX_CAM_RAM(0x200))
+#define QLA82XX_REG(X) (QLA82XX_CRB_BASE+(X))
+#define CRB_CMDPEG_STATE QLA82XX_REG(0x50)
+#define CRB_RCVPEG_STATE QLA82XX_REG(0x13c)
+#define CRB_DMA_SHIFT QLA82XX_REG(0xcc)
+#define CRB_TEMP_STATE QLA82XX_REG(0x1b4)
+#define CRB_CMDPEG_CHECK_RETRY_COUNT 60
+#define CRB_CMDPEG_CHECK_DELAY 500
+
+#define qla82xx_get_temp_val(x) ((x) >> 16)
+#define qla82xx_get_temp_state(x) ((x) & 0xffff)
+#define qla82xx_encode_temp(val, state) (((val) << 16) | (state))
+
+/*
+ * Temperature control.
+ */
+enum {
+ QLA82XX_TEMP_NORMAL = 0x1, /* Normal operating range */
+ QLA82XX_TEMP_WARN, /* Sound alert, temperature getting high */
+ QLA82XX_TEMP_PANIC /* Fatal error, hardware has shut down. */
+};
+
+#define CRB_NIU_XG_PAUSE_CTL_P0 0x1
+#define CRB_NIU_XG_PAUSE_CTL_P1 0x8
+
+#define QLA82XX_HW_H0_CH_HUB_ADR 0x05
+#define QLA82XX_HW_H1_CH_HUB_ADR 0x0E
+#define QLA82XX_HW_H2_CH_HUB_ADR 0x03
+#define QLA82XX_HW_H3_CH_HUB_ADR 0x01
+#define QLA82XX_HW_H4_CH_HUB_ADR 0x06
+#define QLA82XX_HW_H5_CH_HUB_ADR 0x07
+#define QLA82XX_HW_H6_CH_HUB_ADR 0x08
+
+/* Hub 0 */
+#define QLA82XX_HW_MN_CRB_AGT_ADR 0x15
+#define QLA82XX_HW_MS_CRB_AGT_ADR 0x25
+
+/* Hub 1 */
+#define QLA82XX_HW_PS_CRB_AGT_ADR 0x73
+#define QLA82XX_HW_QMS_CRB_AGT_ADR 0x00
+#define QLA82XX_HW_RPMX3_CRB_AGT_ADR 0x0b
+#define QLA82XX_HW_SQGS0_CRB_AGT_ADR 0x01
+#define QLA82XX_HW_SQGS1_CRB_AGT_ADR 0x02
+#define QLA82XX_HW_SQGS2_CRB_AGT_ADR 0x03
+#define QLA82XX_HW_SQGS3_CRB_AGT_ADR 0x04
+#define QLA82XX_HW_C2C0_CRB_AGT_ADR 0x58
+#define QLA82XX_HW_C2C1_CRB_AGT_ADR 0x59
+#define QLA82XX_HW_C2C2_CRB_AGT_ADR 0x5a
+#define QLA82XX_HW_RPMX2_CRB_AGT_ADR 0x0a
+#define QLA82XX_HW_RPMX4_CRB_AGT_ADR 0x0c
+#define QLA82XX_HW_RPMX7_CRB_AGT_ADR 0x0f
+#define QLA82XX_HW_RPMX9_CRB_AGT_ADR 0x12
+#define QLA82XX_HW_SMB_CRB_AGT_ADR 0x18
+
+/* Hub 2 */
+#define QLA82XX_HW_NIU_CRB_AGT_ADR 0x31
+#define QLA82XX_HW_I2C0_CRB_AGT_ADR 0x19
+#define QLA82XX_HW_I2C1_CRB_AGT_ADR 0x29
+
+#define QLA82XX_HW_SN_CRB_AGT_ADR 0x10
+#define QLA82XX_HW_I2Q_CRB_AGT_ADR 0x20
+#define QLA82XX_HW_LPC_CRB_AGT_ADR 0x22
+#define QLA82XX_HW_ROMUSB_CRB_AGT_ADR 0x21
+#define QLA82XX_HW_QM_CRB_AGT_ADR 0x66
+#define QLA82XX_HW_SQG0_CRB_AGT_ADR 0x60
+#define QLA82XX_HW_SQG1_CRB_AGT_ADR 0x61
+#define QLA82XX_HW_SQG2_CRB_AGT_ADR 0x62
+#define QLA82XX_HW_SQG3_CRB_AGT_ADR 0x63
+#define QLA82XX_HW_RPMX1_CRB_AGT_ADR 0x09
+#define QLA82XX_HW_RPMX5_CRB_AGT_ADR 0x0d
+#define QLA82XX_HW_RPMX6_CRB_AGT_ADR 0x0e
+#define QLA82XX_HW_RPMX8_CRB_AGT_ADR 0x11
+
+/* Hub 3 */
+#define QLA82XX_HW_PH_CRB_AGT_ADR 0x1A
+#define QLA82XX_HW_SRE_CRB_AGT_ADR 0x50
+#define QLA82XX_HW_EG_CRB_AGT_ADR 0x51
+#define QLA82XX_HW_RPMX0_CRB_AGT_ADR 0x08
+
+/* Hub 4 */
+#define QLA82XX_HW_PEGN0_CRB_AGT_ADR 0x40
+#define QLA82XX_HW_PEGN1_CRB_AGT_ADR 0x41
+#define QLA82XX_HW_PEGN2_CRB_AGT_ADR 0x42
+#define QLA82XX_HW_PEGN3_CRB_AGT_ADR 0x43
+#define QLA82XX_HW_PEGNI_CRB_AGT_ADR 0x44
+#define QLA82XX_HW_PEGND_CRB_AGT_ADR 0x45
+#define QLA82XX_HW_PEGNC_CRB_AGT_ADR 0x46
+#define QLA82XX_HW_PEGR0_CRB_AGT_ADR 0x47
+#define QLA82XX_HW_PEGR1_CRB_AGT_ADR 0x48
+#define QLA82XX_HW_PEGR2_CRB_AGT_ADR 0x49
+#define QLA82XX_HW_PEGR3_CRB_AGT_ADR 0x4a
+#define QLA82XX_HW_PEGN4_CRB_AGT_ADR 0x4b
+
+/* Hub 5 */
+#define QLA82XX_HW_PEGS0_CRB_AGT_ADR 0x40
+#define QLA82XX_HW_PEGS1_CRB_AGT_ADR 0x41
+#define QLA82XX_HW_PEGS2_CRB_AGT_ADR 0x42
+#define QLA82XX_HW_PEGS3_CRB_AGT_ADR 0x43
+
+#define QLA82XX_HW_PEGSI_CRB_AGT_ADR 0x44
+#define QLA82XX_HW_PEGSD_CRB_AGT_ADR 0x45
+#define QLA82XX_HW_PEGSC_CRB_AGT_ADR 0x46
+
+/* Hub 6 */
+#define QLA82XX_HW_CAS0_CRB_AGT_ADR 0x46
+#define QLA82XX_HW_CAS1_CRB_AGT_ADR 0x47
+#define QLA82XX_HW_CAS2_CRB_AGT_ADR 0x48
+#define QLA82XX_HW_CAS3_CRB_AGT_ADR 0x49
+#define QLA82XX_HW_NCM_CRB_AGT_ADR 0x16
+#define QLA82XX_HW_TMR_CRB_AGT_ADR 0x17
+#define QLA82XX_HW_XDMA_CRB_AGT_ADR 0x05
+#define QLA82XX_HW_OCM0_CRB_AGT_ADR 0x06
+#define QLA82XX_HW_OCM1_CRB_AGT_ADR 0x07
+
+/* This field defines PCI/X adr [25:20] of agents on the CRB */
+/* */
+#define QLA82XX_HW_PX_MAP_CRB_PH 0
+#define QLA82XX_HW_PX_MAP_CRB_PS 1
+#define QLA82XX_HW_PX_MAP_CRB_MN 2
+#define QLA82XX_HW_PX_MAP_CRB_MS 3
+#define QLA82XX_HW_PX_MAP_CRB_SRE 5
+#define QLA82XX_HW_PX_MAP_CRB_NIU 6
+#define QLA82XX_HW_PX_MAP_CRB_QMN 7
+#define QLA82XX_HW_PX_MAP_CRB_SQN0 8
+#define QLA82XX_HW_PX_MAP_CRB_SQN1 9
+#define QLA82XX_HW_PX_MAP_CRB_SQN2 10
+#define QLA82XX_HW_PX_MAP_CRB_SQN3 11
+#define QLA82XX_HW_PX_MAP_CRB_QMS 12
+#define QLA82XX_HW_PX_MAP_CRB_SQS0 13
+#define QLA82XX_HW_PX_MAP_CRB_SQS1 14
+#define QLA82XX_HW_PX_MAP_CRB_SQS2 15
+#define QLA82XX_HW_PX_MAP_CRB_SQS3 16
+#define QLA82XX_HW_PX_MAP_CRB_PGN0 17
+#define QLA82XX_HW_PX_MAP_CRB_PGN1 18
+#define QLA82XX_HW_PX_MAP_CRB_PGN2 19
+#define QLA82XX_HW_PX_MAP_CRB_PGN3 20
+#define QLA82XX_HW_PX_MAP_CRB_PGN4 QLA82XX_HW_PX_MAP_CRB_SQS2
+#define QLA82XX_HW_PX_MAP_CRB_PGND 21
+#define QLA82XX_HW_PX_MAP_CRB_PGNI 22
+#define QLA82XX_HW_PX_MAP_CRB_PGS0 23
+#define QLA82XX_HW_PX_MAP_CRB_PGS1 24
+#define QLA82XX_HW_PX_MAP_CRB_PGS2 25
+#define QLA82XX_HW_PX_MAP_CRB_PGS3 26
+#define QLA82XX_HW_PX_MAP_CRB_PGSD 27
+#define QLA82XX_HW_PX_MAP_CRB_PGSI 28
+#define QLA82XX_HW_PX_MAP_CRB_SN 29
+#define QLA82XX_HW_PX_MAP_CRB_EG 31
+#define QLA82XX_HW_PX_MAP_CRB_PH2 32
+#define QLA82XX_HW_PX_MAP_CRB_PS2 33
+#define QLA82XX_HW_PX_MAP_CRB_CAM 34
+#define QLA82XX_HW_PX_MAP_CRB_CAS0 35
+#define QLA82XX_HW_PX_MAP_CRB_CAS1 36
+#define QLA82XX_HW_PX_MAP_CRB_CAS2 37
+#define QLA82XX_HW_PX_MAP_CRB_C2C0 38
+#define QLA82XX_HW_PX_MAP_CRB_C2C1 39
+#define QLA82XX_HW_PX_MAP_CRB_TIMR 40
+#define QLA82XX_HW_PX_MAP_CRB_RPMX1 42
+#define QLA82XX_HW_PX_MAP_CRB_RPMX2 43
+#define QLA82XX_HW_PX_MAP_CRB_RPMX3 44
+#define QLA82XX_HW_PX_MAP_CRB_RPMX4 45
+#define QLA82XX_HW_PX_MAP_CRB_RPMX5 46
+#define QLA82XX_HW_PX_MAP_CRB_RPMX6 47
+#define QLA82XX_HW_PX_MAP_CRB_RPMX7 48
+#define QLA82XX_HW_PX_MAP_CRB_XDMA 49
+#define QLA82XX_HW_PX_MAP_CRB_I2Q 50
+#define QLA82XX_HW_PX_MAP_CRB_ROMUSB 51
+#define QLA82XX_HW_PX_MAP_CRB_CAS3 52
+#define QLA82XX_HW_PX_MAP_CRB_RPMX0 53
+#define QLA82XX_HW_PX_MAP_CRB_RPMX8 54
+#define QLA82XX_HW_PX_MAP_CRB_RPMX9 55
+#define QLA82XX_HW_PX_MAP_CRB_OCM0 56
+#define QLA82XX_HW_PX_MAP_CRB_OCM1 57
+#define QLA82XX_HW_PX_MAP_CRB_SMB 58
+#define QLA82XX_HW_PX_MAP_CRB_I2C0 59
+#define QLA82XX_HW_PX_MAP_CRB_I2C1 60
+#define QLA82XX_HW_PX_MAP_CRB_LPC 61
+#define QLA82XX_HW_PX_MAP_CRB_PGNC 62
+#define QLA82XX_HW_PX_MAP_CRB_PGR0 63
+#define QLA82XX_HW_PX_MAP_CRB_PGR1 4
+#define QLA82XX_HW_PX_MAP_CRB_PGR2 30
+#define QLA82XX_HW_PX_MAP_CRB_PGR3 41
+
+/* This field defines CRB adr [31:20] of the agents */
+/* */
+
+#define QLA82XX_HW_CRB_HUB_AGT_ADR_MN ((QLA82XX_HW_H0_CH_HUB_ADR << 7) | \
+ QLA82XX_HW_MN_CRB_AGT_ADR)
+#define QLA82XX_HW_CRB_HUB_AGT_ADR_PH ((QLA82XX_HW_H0_CH_HUB_ADR << 7) | \
+ QLA82XX_HW_PH_CRB_AGT_ADR)
+#define QLA82XX_HW_CRB_HUB_AGT_ADR_MS ((QLA82XX_HW_H0_CH_HUB_ADR << 7) | \
+ QLA82XX_HW_MS_CRB_AGT_ADR)
+#define QLA82XX_HW_CRB_HUB_AGT_ADR_PS ((QLA82XX_HW_H1_CH_HUB_ADR << 7) | \
+ QLA82XX_HW_PS_CRB_AGT_ADR)
+#define QLA82XX_HW_CRB_HUB_AGT_ADR_SS ((QLA82XX_HW_H1_CH_HUB_ADR << 7) | \
+ QLA82XX_HW_SS_CRB_AGT_ADR)
+#define QLA82XX_HW_CRB_HUB_AGT_ADR_RPMX3 ((QLA82XX_HW_H1_CH_HUB_ADR << 7) | \
+ QLA82XX_HW_RPMX3_CRB_AGT_ADR)
+#define QLA82XX_HW_CRB_HUB_AGT_ADR_QMS ((QLA82XX_HW_H1_CH_HUB_ADR << 7) | \
+ QLA82XX_HW_QMS_CRB_AGT_ADR)
+#define QLA82XX_HW_CRB_HUB_AGT_ADR_SQS0 ((QLA82XX_HW_H1_CH_HUB_ADR << 7) | \
+ QLA82XX_HW_SQGS0_CRB_AGT_ADR)
+#define QLA82XX_HW_CRB_HUB_AGT_ADR_SQS1 ((QLA82XX_HW_H1_CH_HUB_ADR << 7) | \
+ QLA82XX_HW_SQGS1_CRB_AGT_ADR)
+#define QLA82XX_HW_CRB_HUB_AGT_ADR_SQS2 ((QLA82XX_HW_H1_CH_HUB_ADR << 7) | \
+ QLA82XX_HW_SQGS2_CRB_AGT_ADR)
+#define QLA82XX_HW_CRB_HUB_AGT_ADR_SQS3 ((QLA82XX_HW_H1_CH_HUB_ADR << 7) | \
+ QLA82XX_HW_SQGS3_CRB_AGT_ADR)
+#define QLA82XX_HW_CRB_HUB_AGT_ADR_C2C0 ((QLA82XX_HW_H1_CH_HUB_ADR << 7) | \
+ QLA82XX_HW_C2C0_CRB_AGT_ADR)
+#define QLA82XX_HW_CRB_HUB_AGT_ADR_C2C1 ((QLA82XX_HW_H1_CH_HUB_ADR << 7) | \
+ QLA82XX_HW_C2C1_CRB_AGT_ADR)
+#define QLA82XX_HW_CRB_HUB_AGT_ADR_RPMX2 ((QLA82XX_HW_H1_CH_HUB_ADR << 7) | \
+ QLA82XX_HW_RPMX2_CRB_AGT_ADR)
+#define QLA82XX_HW_CRB_HUB_AGT_ADR_RPMX4 ((QLA82XX_HW_H1_CH_HUB_ADR << 7) | \
+ QLA82XX_HW_RPMX4_CRB_AGT_ADR)
+#define QLA82XX_HW_CRB_HUB_AGT_ADR_RPMX7 ((QLA82XX_HW_H1_CH_HUB_ADR << 7) | \
+ QLA82XX_HW_RPMX7_CRB_AGT_ADR)
+#define QLA82XX_HW_CRB_HUB_AGT_ADR_RPMX9 ((QLA82XX_HW_H1_CH_HUB_ADR << 7) | \
+ QLA82XX_HW_RPMX9_CRB_AGT_ADR)
+#define QLA82XX_HW_CRB_HUB_AGT_ADR_SMB ((QLA82XX_HW_H1_CH_HUB_ADR << 7) | \
+ QLA82XX_HW_SMB_CRB_AGT_ADR)
+
+#define QLA82XX_HW_CRB_HUB_AGT_ADR_NIU ((QLA82XX_HW_H2_CH_HUB_ADR << 7) | \
+ QLA82XX_HW_NIU_CRB_AGT_ADR)
+#define QLA82XX_HW_CRB_HUB_AGT_ADR_I2C0 ((QLA82XX_HW_H2_CH_HUB_ADR << 7) | \
+ QLA82XX_HW_I2C0_CRB_AGT_ADR)
+#define QLA82XX_HW_CRB_HUB_AGT_ADR_I2C1 ((QLA82XX_HW_H2_CH_HUB_ADR << 7) | \
+ QLA82XX_HW_I2C1_CRB_AGT_ADR)
+
+#define QLA82XX_HW_CRB_HUB_AGT_ADR_SRE ((QLA82XX_HW_H3_CH_HUB_ADR << 7) | \
+ QLA82XX_HW_SRE_CRB_AGT_ADR)
+#define QLA82XX_HW_CRB_HUB_AGT_ADR_EG ((QLA82XX_HW_H3_CH_HUB_ADR << 7) | \
+ QLA82XX_HW_EG_CRB_AGT_ADR)
+#define QLA82XX_HW_CRB_HUB_AGT_ADR_RPMX0 ((QLA82XX_HW_H3_CH_HUB_ADR << 7) | \
+ QLA82XX_HW_RPMX0_CRB_AGT_ADR)
+#define QLA82XX_HW_CRB_HUB_AGT_ADR_QMN ((QLA82XX_HW_H3_CH_HUB_ADR << 7) | \
+ QLA82XX_HW_QM_CRB_AGT_ADR)
+#define QLA82XX_HW_CRB_HUB_AGT_ADR_SQN0 ((QLA82XX_HW_H3_CH_HUB_ADR << 7) | \
+ QLA82XX_HW_SQG0_CRB_AGT_ADR)
+#define QLA82XX_HW_CRB_HUB_AGT_ADR_SQN1 ((QLA82XX_HW_H3_CH_HUB_ADR << 7) | \
+ QLA82XX_HW_SQG1_CRB_AGT_ADR)
+#define QLA82XX_HW_CRB_HUB_AGT_ADR_SQN2 ((QLA82XX_HW_H3_CH_HUB_ADR << 7) | \
+ QLA82XX_HW_SQG2_CRB_AGT_ADR)
+#define QLA82XX_HW_CRB_HUB_AGT_ADR_SQN3 ((QLA82XX_HW_H3_CH_HUB_ADR << 7) | \
+ QLA82XX_HW_SQG3_CRB_AGT_ADR)
+#define QLA82XX_HW_CRB_HUB_AGT_ADR_RPMX1 ((QLA82XX_HW_H3_CH_HUB_ADR << 7) | \
+ QLA82XX_HW_RPMX1_CRB_AGT_ADR)
+#define QLA82XX_HW_CRB_HUB_AGT_ADR_RPMX5 ((QLA82XX_HW_H3_CH_HUB_ADR << 7) | \
+ QLA82XX_HW_RPMX5_CRB_AGT_ADR)
+#define QLA82XX_HW_CRB_HUB_AGT_ADR_RPMX6 ((QLA82XX_HW_H3_CH_HUB_ADR << 7) | \
+ QLA82XX_HW_RPMX6_CRB_AGT_ADR)
+#define QLA82XX_HW_CRB_HUB_AGT_ADR_RPMX8 ((QLA82XX_HW_H3_CH_HUB_ADR << 7) | \
+ QLA82XX_HW_RPMX8_CRB_AGT_ADR)
+#define QLA82XX_HW_CRB_HUB_AGT_ADR_CAS0 ((QLA82XX_HW_H3_CH_HUB_ADR << 7) | \
+ QLA82XX_HW_CAS0_CRB_AGT_ADR)
+#define QLA82XX_HW_CRB_HUB_AGT_ADR_CAS1 ((QLA82XX_HW_H3_CH_HUB_ADR << 7) | \
+ QLA82XX_HW_CAS1_CRB_AGT_ADR)
+#define QLA82XX_HW_CRB_HUB_AGT_ADR_CAS2 ((QLA82XX_HW_H3_CH_HUB_ADR << 7) | \
+ QLA82XX_HW_CAS2_CRB_AGT_ADR)
+#define QLA82XX_HW_CRB_HUB_AGT_ADR_CAS3 ((QLA82XX_HW_H3_CH_HUB_ADR << 7) | \
+ QLA82XX_HW_CAS3_CRB_AGT_ADR)
+
+#define QLA82XX_HW_CRB_HUB_AGT_ADR_PGNI ((QLA82XX_HW_H4_CH_HUB_ADR << 7) | \
+ QLA82XX_HW_PEGNI_CRB_AGT_ADR)
+#define QLA82XX_HW_CRB_HUB_AGT_ADR_PGND ((QLA82XX_HW_H4_CH_HUB_ADR << 7) | \
+ QLA82XX_HW_PEGND_CRB_AGT_ADR)
+#define QLA82XX_HW_CRB_HUB_AGT_ADR_PGN0 ((QLA82XX_HW_H4_CH_HUB_ADR << 7) | \
+ QLA82XX_HW_PEGN0_CRB_AGT_ADR)
+#define QLA82XX_HW_CRB_HUB_AGT_ADR_PGN1 ((QLA82XX_HW_H4_CH_HUB_ADR << 7) | \
+ QLA82XX_HW_PEGN1_CRB_AGT_ADR)
+#define QLA82XX_HW_CRB_HUB_AGT_ADR_PGN2 ((QLA82XX_HW_H4_CH_HUB_ADR << 7) | \
+ QLA82XX_HW_PEGN2_CRB_AGT_ADR)
+#define QLA82XX_HW_CRB_HUB_AGT_ADR_PGN3 ((QLA82XX_HW_H4_CH_HUB_ADR << 7) | \
+ QLA82XX_HW_PEGN3_CRB_AGT_ADR)
+#define QLA82XX_HW_CRB_HUB_AGT_ADR_PGN4 ((QLA82XX_HW_H4_CH_HUB_ADR << 7) | \
+ QLA82XX_HW_PEGN4_CRB_AGT_ADR)
+
+#define QLA82XX_HW_CRB_HUB_AGT_ADR_PGNC ((QLA82XX_HW_H4_CH_HUB_ADR << 7) | \
+ QLA82XX_HW_PEGNC_CRB_AGT_ADR)
+#define QLA82XX_HW_CRB_HUB_AGT_ADR_PGR0 ((QLA82XX_HW_H4_CH_HUB_ADR << 7) | \
+ QLA82XX_HW_PEGR0_CRB_AGT_ADR)
+#define QLA82XX_HW_CRB_HUB_AGT_ADR_PGR1 ((QLA82XX_HW_H4_CH_HUB_ADR << 7) | \
+ QLA82XX_HW_PEGR1_CRB_AGT_ADR)
+#define QLA82XX_HW_CRB_HUB_AGT_ADR_PGR2 ((QLA82XX_HW_H4_CH_HUB_ADR << 7) | \
+ QLA82XX_HW_PEGR2_CRB_AGT_ADR)
+#define QLA82XX_HW_CRB_HUB_AGT_ADR_PGR3 ((QLA82XX_HW_H4_CH_HUB_ADR << 7) | \
+ QLA82XX_HW_PEGR3_CRB_AGT_ADR)
+
+#define QLA82XX_HW_CRB_HUB_AGT_ADR_PGSI ((QLA82XX_HW_H5_CH_HUB_ADR << 7) | \
+ QLA82XX_HW_PEGSI_CRB_AGT_ADR)
+#define QLA82XX_HW_CRB_HUB_AGT_ADR_PGSD ((QLA82XX_HW_H5_CH_HUB_ADR << 7) | \
+ QLA82XX_HW_PEGSD_CRB_AGT_ADR)
+#define QLA82XX_HW_CRB_HUB_AGT_ADR_PGS0 ((QLA82XX_HW_H5_CH_HUB_ADR << 7) | \
+ QLA82XX_HW_PEGS0_CRB_AGT_ADR)
+#define QLA82XX_HW_CRB_HUB_AGT_ADR_PGS1 ((QLA82XX_HW_H5_CH_HUB_ADR << 7) | \
+ QLA82XX_HW_PEGS1_CRB_AGT_ADR)
+#define QLA82XX_HW_CRB_HUB_AGT_ADR_PGS2 ((QLA82XX_HW_H5_CH_HUB_ADR << 7) | \
+ QLA82XX_HW_PEGS2_CRB_AGT_ADR)
+#define QLA82XX_HW_CRB_HUB_AGT_ADR_PGS3 ((QLA82XX_HW_H5_CH_HUB_ADR << 7) | \
+ QLA82XX_HW_PEGS3_CRB_AGT_ADR)
+#define QLA82XX_HW_CRB_HUB_AGT_ADR_PGSC ((QLA82XX_HW_H5_CH_HUB_ADR << 7) | \
+ QLA82XX_HW_PEGSC_CRB_AGT_ADR)
+
+#define QLA82XX_HW_CRB_HUB_AGT_ADR_CAM ((QLA82XX_HW_H6_CH_HUB_ADR << 7) | \
+ QLA82XX_HW_NCM_CRB_AGT_ADR)
+#define QLA82XX_HW_CRB_HUB_AGT_ADR_TIMR ((QLA82XX_HW_H6_CH_HUB_ADR << 7) | \
+ QLA82XX_HW_TMR_CRB_AGT_ADR)
+#define QLA82XX_HW_CRB_HUB_AGT_ADR_XDMA ((QLA82XX_HW_H6_CH_HUB_ADR << 7) | \
+ QLA82XX_HW_XDMA_CRB_AGT_ADR)
+#define QLA82XX_HW_CRB_HUB_AGT_ADR_SN ((QLA82XX_HW_H6_CH_HUB_ADR << 7) | \
+ QLA82XX_HW_SN_CRB_AGT_ADR)
+#define QLA82XX_HW_CRB_HUB_AGT_ADR_I2Q ((QLA82XX_HW_H6_CH_HUB_ADR << 7) | \
+ QLA82XX_HW_I2Q_CRB_AGT_ADR)
+#define QLA82XX_HW_CRB_HUB_AGT_ADR_ROMUSB ((QLA82XX_HW_H6_CH_HUB_ADR << 7) | \
+ QLA82XX_HW_ROMUSB_CRB_AGT_ADR)
+#define QLA82XX_HW_CRB_HUB_AGT_ADR_OCM0 ((QLA82XX_HW_H6_CH_HUB_ADR << 7) | \
+ QLA82XX_HW_OCM0_CRB_AGT_ADR)
+#define QLA82XX_HW_CRB_HUB_AGT_ADR_OCM1 ((QLA82XX_HW_H6_CH_HUB_ADR << 7) | \
+ QLA82XX_HW_OCM1_CRB_AGT_ADR)
+#define QLA82XX_HW_CRB_HUB_AGT_ADR_LPC ((QLA82XX_HW_H6_CH_HUB_ADR << 7) | \
+ QLA82XX_HW_LPC_CRB_AGT_ADR)
+
+#define ROMUSB_GLB (QLA82XX_CRB_ROMUSB + 0x00000)
+#define QLA82XX_ROMUSB_GLB_PEGTUNE_DONE (ROMUSB_GLB + 0x005c)
+#define QLA82XX_ROMUSB_GLB_STATUS (ROMUSB_GLB + 0x0004)
+#define QLA82XX_ROMUSB_GLB_SW_RESET (ROMUSB_GLB + 0x0008)
+#define QLA82XX_ROMUSB_ROM_ADDRESS (ROMUSB_ROM + 0x0008)
+#define QLA82XX_ROMUSB_ROM_WDATA (ROMUSB_ROM + 0x000c)
+#define QLA82XX_ROMUSB_ROM_ABYTE_CNT (ROMUSB_ROM + 0x0010)
+#define QLA82XX_ROMUSB_ROM_DUMMY_BYTE_CNT (ROMUSB_ROM + 0x0014)
+#define QLA82XX_ROMUSB_ROM_RDATA (ROMUSB_ROM + 0x0018)
+
+#define ROMUSB_ROM (QLA82XX_CRB_ROMUSB + 0x10000)
+#define QLA82XX_ROMUSB_ROM_INSTR_OPCODE (ROMUSB_ROM + 0x0004)
+#define QLA82XX_ROMUSB_GLB_CAS_RST (ROMUSB_GLB + 0x0038)
+
+/* Lock IDs for ROM lock */
+#define ROM_LOCK_DRIVER 0x0d417340
+
+#define QLA82XX_PCI_CRB_WINDOWSIZE 0x00100000 /* all are 1MB windows */
+#define QLA82XX_PCI_CRB_WINDOW(A) (QLA82XX_PCI_CRBSPACE + \
+ (A)*QLA82XX_PCI_CRB_WINDOWSIZE)
+
+#define QLA82XX_CRB_C2C_0 \
+ QLA82XX_PCI_CRB_WINDOW(QLA82XX_HW_PX_MAP_CRB_C2C0)
+#define QLA82XX_CRB_C2C_1 \
+ QLA82XX_PCI_CRB_WINDOW(QLA82XX_HW_PX_MAP_CRB_C2C1)
+#define QLA82XX_CRB_C2C_2 \
+ QLA82XX_PCI_CRB_WINDOW(QLA82XX_HW_PX_MAP_CRB_C2C2)
+#define QLA82XX_CRB_CAM \
+ QLA82XX_PCI_CRB_WINDOW(QLA82XX_HW_PX_MAP_CRB_CAM)
+#define QLA82XX_CRB_CASPER \
+ QLA82XX_PCI_CRB_WINDOW(QLA82XX_HW_PX_MAP_CRB_CAS)
+#define QLA82XX_CRB_CASPER_0 \
+ QLA82XX_PCI_CRB_WINDOW(QLA82XX_HW_PX_MAP_CRB_CAS0)
+#define QLA82XX_CRB_CASPER_1 \
+ QLA82XX_PCI_CRB_WINDOW(QLA82XX_HW_PX_MAP_CRB_CAS1)
+#define QLA82XX_CRB_CASPER_2 \
+ QLA82XX_PCI_CRB_WINDOW(QLA82XX_HW_PX_MAP_CRB_CAS2)
+#define QLA82XX_CRB_DDR_MD \
+ QLA82XX_PCI_CRB_WINDOW(QLA82XX_HW_PX_MAP_CRB_MS)
+#define QLA82XX_CRB_DDR_NET \
+ QLA82XX_PCI_CRB_WINDOW(QLA82XX_HW_PX_MAP_CRB_MN)
+#define QLA82XX_CRB_EPG \
+ QLA82XX_PCI_CRB_WINDOW(QLA82XX_HW_PX_MAP_CRB_EG)
+#define QLA82XX_CRB_I2Q \
+ QLA82XX_PCI_CRB_WINDOW(QLA82XX_HW_PX_MAP_CRB_I2Q)
+#define QLA82XX_CRB_NIU \
+ QLA82XX_PCI_CRB_WINDOW(QLA82XX_HW_PX_MAP_CRB_NIU)
+/* HACK upon HACK upon HACK (for PCIE builds) */
+#define QLA82XX_CRB_PCIX_HOST \
+ QLA82XX_PCI_CRB_WINDOW(QLA82XX_HW_PX_MAP_CRB_PH)
+#define QLA82XX_CRB_PCIX_HOST2 \
+ QLA82XX_PCI_CRB_WINDOW(QLA82XX_HW_PX_MAP_CRB_PH2)
+#define QLA82XX_CRB_PCIX_MD \
+ QLA82XX_PCI_CRB_WINDOW(QLA82XX_HW_PX_MAP_CRB_PS)
+#define QLA82XX_CRB_PCIE QLA82XX_CRB_PCIX_MD
+/* window 1 pcie slot */
+#define QLA82XX_CRB_PCIE2 \
+ QLA82XX_PCI_CRB_WINDOW(QLA82XX_HW_PX_MAP_CRB_PS2)
+
+#define QLA82XX_CRB_PEG_MD_0 \
+ QLA82XX_PCI_CRB_WINDOW(QLA82XX_HW_PX_MAP_CRB_PGS0)
+#define QLA82XX_CRB_PEG_MD_1 \
+ QLA82XX_PCI_CRB_WINDOW(QLA82XX_HW_PX_MAP_CRB_PGS1)
+#define QLA82XX_CRB_PEG_MD_2 \
+ QLA82XX_PCI_CRB_WINDOW(QLA82XX_HW_PX_MAP_CRB_PGS2)
+#define QLA82XX_CRB_PEG_MD_3 \
+ QLA82XX_PCI_CRB_WINDOW(QLA82XX_HW_PX_MAP_CRB_PGS3)
+#define QLA82XX_CRB_PEG_MD_3 \
+ QLA82XX_PCI_CRB_WINDOW(QLA82XX_HW_PX_MAP_CRB_PGS3)
+#define QLA82XX_CRB_PEG_MD_D \
+ QLA82XX_PCI_CRB_WINDOW(QLA82XX_HW_PX_MAP_CRB_PGSD)
+#define QLA82XX_CRB_PEG_MD_I \
+ QLA82XX_PCI_CRB_WINDOW(QLA82XX_HW_PX_MAP_CRB_PGSI)
+#define QLA82XX_CRB_PEG_NET_0 \
+ QLA82XX_PCI_CRB_WINDOW(QLA82XX_HW_PX_MAP_CRB_PGN0)
+#define QLA82XX_CRB_PEG_NET_1 \
+ QLA82XX_PCI_CRB_WINDOW(QLA82XX_HW_PX_MAP_CRB_PGN1)
+#define QLA82XX_CRB_PEG_NET_2 \
+ QLA82XX_PCI_CRB_WINDOW(QLA82XX_HW_PX_MAP_CRB_PGN2)
+#define QLA82XX_CRB_PEG_NET_3 \
+ QLA82XX_PCI_CRB_WINDOW(QLA82XX_HW_PX_MAP_CRB_PGN3)
+#define QLA82XX_CRB_PEG_NET_4 \
+ QLA82XX_PCI_CRB_WINDOW(QLA82XX_HW_PX_MAP_CRB_PGN4)
+#define QLA82XX_CRB_PEG_NET_D \
+ QLA82XX_PCI_CRB_WINDOW(QLA82XX_HW_PX_MAP_CRB_PGND)
+#define QLA82XX_CRB_PEG_NET_I \
+ QLA82XX_PCI_CRB_WINDOW(QLA82XX_HW_PX_MAP_CRB_PGNI)
+#define QLA82XX_CRB_PQM_MD \
+ QLA82XX_PCI_CRB_WINDOW(QLA82XX_HW_PX_MAP_CRB_QMS)
+#define QLA82XX_CRB_PQM_NET \
+ QLA82XX_PCI_CRB_WINDOW(QLA82XX_HW_PX_MAP_CRB_QMN)
+#define QLA82XX_CRB_QDR_MD \
+ QLA82XX_PCI_CRB_WINDOW(QLA82XX_HW_PX_MAP_CRB_SS)
+#define QLA82XX_CRB_QDR_NET \
+ QLA82XX_PCI_CRB_WINDOW(QLA82XX_HW_PX_MAP_CRB_SN)
+#define QLA82XX_CRB_ROMUSB \
+ QLA82XX_PCI_CRB_WINDOW(QLA82XX_HW_PX_MAP_CRB_ROMUSB)
+#define QLA82XX_CRB_RPMX_0 \
+ QLA82XX_PCI_CRB_WINDOW(QLA82XX_HW_PX_MAP_CRB_RPMX0)
+#define QLA82XX_CRB_RPMX_1 \
+ QLA82XX_PCI_CRB_WINDOW(QLA82XX_HW_PX_MAP_CRB_RPMX1)
+#define QLA82XX_CRB_RPMX_2 \
+ QLA82XX_PCI_CRB_WINDOW(QLA82XX_HW_PX_MAP_CRB_RPMX2)
+#define QLA82XX_CRB_RPMX_3 \
+ QLA82XX_PCI_CRB_WINDOW(QLA82XX_HW_PX_MAP_CRB_RPMX3)
+#define QLA82XX_CRB_RPMX_4 \
+ QLA82XX_PCI_CRB_WINDOW(QLA82XX_HW_PX_MAP_CRB_RPMX4)
+#define QLA82XX_CRB_RPMX_5 \
+ QLA82XX_PCI_CRB_WINDOW(QLA82XX_HW_PX_MAP_CRB_RPMX5)
+#define QLA82XX_CRB_RPMX_6 \
+ QLA82XX_PCI_CRB_WINDOW(QLA82XX_HW_PX_MAP_CRB_RPMX6)
+#define QLA82XX_CRB_RPMX_7 \
+ QLA82XX_PCI_CRB_WINDOW(QLA82XX_HW_PX_MAP_CRB_RPMX7)
+#define QLA82XX_CRB_SQM_MD_0 \
+ QLA82XX_PCI_CRB_WINDOW(QLA82XX_HW_PX_MAP_CRB_SQS0)
+#define QLA82XX_CRB_SQM_MD_1 \
+ QLA82XX_PCI_CRB_WINDOW(QLA82XX_HW_PX_MAP_CRB_SQS1)
+#define QLA82XX_CRB_SQM_MD_2 \
+ QLA82XX_PCI_CRB_WINDOW(QLA82XX_HW_PX_MAP_CRB_SQS2)
+#define QLA82XX_CRB_SQM_MD_3 \
+ QLA82XX_PCI_CRB_WINDOW(QLA82XX_HW_PX_MAP_CRB_SQS3)
+#define QLA82XX_CRB_SQM_NET_0 \
+ QLA82XX_PCI_CRB_WINDOW(QLA82XX_HW_PX_MAP_CRB_SQN0)
+#define QLA82XX_CRB_SQM_NET_1 \
+ QLA82XX_PCI_CRB_WINDOW(QLA82XX_HW_PX_MAP_CRB_SQN1)
+#define QLA82XX_CRB_SQM_NET_2 \
+ QLA82XX_PCI_CRB_WINDOW(QLA82XX_HW_PX_MAP_CRB_SQN2)
+#define QLA82XX_CRB_SQM_NET_3 \
+ QLA82XX_PCI_CRB_WINDOW(QLA82XX_HW_PX_MAP_CRB_SQN3)
+#define QLA82XX_CRB_SRE \
+ QLA82XX_PCI_CRB_WINDOW(QLA82XX_HW_PX_MAP_CRB_SRE)
+#define QLA82XX_CRB_TIMER \
+ QLA82XX_PCI_CRB_WINDOW(QLA82XX_HW_PX_MAP_CRB_TIMR)
+#define QLA82XX_CRB_XDMA \
+ QLA82XX_PCI_CRB_WINDOW(QLA82XX_HW_PX_MAP_CRB_XDMA)
+#define QLA82XX_CRB_I2C0 \
+ QLA82XX_PCI_CRB_WINDOW(QLA82XX_HW_PX_MAP_CRB_I2C0)
+#define QLA82XX_CRB_I2C1 \
+ QLA82XX_PCI_CRB_WINDOW(QLA82XX_HW_PX_MAP_CRB_I2C1)
+#define QLA82XX_CRB_OCM0 \
+ QLA82XX_PCI_CRB_WINDOW(QLA82XX_HW_PX_MAP_CRB_OCM0)
+#define QLA82XX_CRB_SMB \
+ QLA82XX_PCI_CRB_WINDOW(QLA82XX_HW_PX_MAP_CRB_SMB)
+
+#define QLA82XX_CRB_MAX QLA82XX_PCI_CRB_WINDOW(64)
+
+/*
+ * ====================== BASE ADDRESSES ON-CHIP ======================
+ * Base addresses of major components on-chip.
+ * ====================== BASE ADDRESSES ON-CHIP ======================
+ */
+#define QLA8XXX_ADDR_DDR_NET (0x0000000000000000ULL)
+#define QLA8XXX_ADDR_DDR_NET_MAX (0x000000000fffffffULL)
+
+/* Imbus address bit used to indicate a host address. This bit is
+ * eliminated by the pcie bar and bar select before presentation
+ * over pcie. */
+/* host memory via IMBUS */
+#define QLA82XX_P2_ADDR_PCIE (0x0000000800000000ULL)
+#define QLA82XX_P3_ADDR_PCIE (0x0000008000000000ULL)
+#define QLA82XX_ADDR_PCIE_MAX (0x0000000FFFFFFFFFULL)
+#define QLA8XXX_ADDR_OCM0 (0x0000000200000000ULL)
+#define QLA8XXX_ADDR_OCM0_MAX (0x00000002000fffffULL)
+#define QLA8XXX_ADDR_OCM1 (0x0000000200400000ULL)
+#define QLA8XXX_ADDR_OCM1_MAX (0x00000002004fffffULL)
+#define QLA8XXX_ADDR_QDR_NET (0x0000000300000000ULL)
+
+#define QLA82XX_P2_ADDR_QDR_NET_MAX (0x00000003001fffffULL)
+#define QLA82XX_P3_ADDR_QDR_NET_MAX (0x0000000303ffffffULL)
+#define QLA8XXX_ADDR_QDR_NET_MAX (0x0000000307ffffffULL)
+
+#define QLA82XX_PCI_CRBSPACE (unsigned long)0x06000000
+#define QLA82XX_PCI_DIRECT_CRB (unsigned long)0x04400000
+#define QLA82XX_PCI_CAMQM (unsigned long)0x04800000
+#define QLA82XX_PCI_CAMQM_MAX (unsigned long)0x04ffffff
+#define QLA82XX_PCI_DDR_NET (unsigned long)0x00000000
+#define QLA82XX_PCI_QDR_NET (unsigned long)0x04000000
+#define QLA82XX_PCI_QDR_NET_MAX (unsigned long)0x043fffff
+
+/* PCI Windowing for DDR regions. */
+#define QLA8XXX_ADDR_IN_RANGE(addr, low, high) \
+ (((addr) <= (high)) && ((addr) >= (low)))
+
+/*
+ * Register offsets for MN
+ */
+#define MIU_CONTROL (0x000)
+#define MIU_TAG (0x004)
+#define MIU_TEST_AGT_CTRL (0x090)
+#define MIU_TEST_AGT_ADDR_LO (0x094)
+#define MIU_TEST_AGT_ADDR_HI (0x098)
+#define MIU_TEST_AGT_WRDATA_LO (0x0a0)
+#define MIU_TEST_AGT_WRDATA_HI (0x0a4)
+#define MIU_TEST_AGT_WRDATA(i) (0x0a0+(4*(i)))
+#define MIU_TEST_AGT_RDDATA_LO (0x0a8)
+#define MIU_TEST_AGT_RDDATA_HI (0x0ac)
+#define MIU_TEST_AGT_RDDATA(i) (0x0a8+(4*(i)))
+#define MIU_TEST_AGT_ADDR_MASK 0xfffffff8
+#define MIU_TEST_AGT_UPPER_ADDR(off) (0)
+
+/* MIU_TEST_AGT_CTRL flags. work for SIU as well */
+#define MIU_TA_CTL_START 1
+#define MIU_TA_CTL_ENABLE 2
+#define MIU_TA_CTL_WRITE 4
+#define MIU_TA_CTL_BUSY 8
+
+#define MIU_TA_CTL_WRITE_ENABLE (MIU_TA_CTL_WRITE | MIU_TA_CTL_ENABLE)
+#define MIU_TA_CTL_WRITE_START (MIU_TA_CTL_WRITE | MIU_TA_CTL_ENABLE |\
+ MIU_TA_CTL_START)
+#define MIU_TA_CTL_START_ENABLE (MIU_TA_CTL_START | MIU_TA_CTL_ENABLE)
+
+/*CAM RAM */
+# define QLA82XX_CAM_RAM_BASE (QLA82XX_CRB_CAM + 0x02000)
+# define QLA82XX_CAM_RAM(reg) (QLA82XX_CAM_RAM_BASE + (reg))
+
+#define QLA82XX_PORT_MODE_ADDR (QLA82XX_CAM_RAM(0x24))
+#define QLA82XX_PEG_HALT_STATUS1 (QLA82XX_CAM_RAM(0xa8))
+#define QLA82XX_PEG_HALT_STATUS2 (QLA82XX_CAM_RAM(0xac))
+#define QLA82XX_PEG_ALIVE_COUNTER (QLA82XX_CAM_RAM(0xb0))
+#define QLA82XX_CAM_RAM_DB1 (QLA82XX_CAM_RAM(0x1b0))
+#define QLA82XX_CAM_RAM_DB2 (QLA82XX_CAM_RAM(0x1b4))
+
+#define HALT_STATUS_UNRECOVERABLE 0x80000000
+#define HALT_STATUS_RECOVERABLE 0x40000000
+
+
+#define QLA82XX_ROM_LOCK_ID (QLA82XX_CAM_RAM(0x100))
+#define QLA82XX_CRB_WIN_LOCK_ID (QLA82XX_CAM_RAM(0x124))
+#define QLA82XX_FW_VERSION_MAJOR (QLA82XX_CAM_RAM(0x150))
+#define QLA82XX_FW_VERSION_MINOR (QLA82XX_CAM_RAM(0x154))
+#define QLA82XX_FW_VERSION_SUB (QLA82XX_CAM_RAM(0x158))
+#define QLA82XX_PCIE_REG(reg) (QLA82XX_CRB_PCIE + (reg))
+
+/* Driver Coexistence Defines */
+#define QLA82XX_CRB_DRV_ACTIVE (QLA82XX_CAM_RAM(0x138))
+#define QLA82XX_CRB_DEV_STATE (QLA82XX_CAM_RAM(0x140))
+#define QLA82XX_CRB_DRV_STATE (QLA82XX_CAM_RAM(0x144))
+#define QLA82XX_CRB_DRV_SCRATCH (QLA82XX_CAM_RAM(0x148))
+#define QLA82XX_CRB_DEV_PART_INFO (QLA82XX_CAM_RAM(0x14c))
+#define QLA82XX_CRB_DRV_IDC_VERSION (QLA82XX_CAM_RAM(0x174))
+
+enum qla_regs {
+ QLA8XXX_PEG_HALT_STATUS1 = 0,
+ QLA8XXX_PEG_HALT_STATUS2,
+ QLA8XXX_PEG_ALIVE_COUNTER,
+ QLA8XXX_CRB_DRV_ACTIVE,
+ QLA8XXX_CRB_DEV_STATE,
+ QLA8XXX_CRB_DRV_STATE,
+ QLA8XXX_CRB_DRV_SCRATCH,
+ QLA8XXX_CRB_DEV_PART_INFO,
+ QLA8XXX_CRB_DRV_IDC_VERSION,
+ QLA8XXX_FW_VERSION_MAJOR,
+ QLA8XXX_FW_VERSION_MINOR,
+ QLA8XXX_FW_VERSION_SUB,
+ QLA8XXX_CRB_CMDPEG_STATE,
+ QLA8XXX_CRB_TEMP_STATE,
+};
+
+static const uint32_t qla4_82xx_reg_tbl[] = {
+ QLA82XX_PEG_HALT_STATUS1,
+ QLA82XX_PEG_HALT_STATUS2,
+ QLA82XX_PEG_ALIVE_COUNTER,
+ QLA82XX_CRB_DRV_ACTIVE,
+ QLA82XX_CRB_DEV_STATE,
+ QLA82XX_CRB_DRV_STATE,
+ QLA82XX_CRB_DRV_SCRATCH,
+ QLA82XX_CRB_DEV_PART_INFO,
+ QLA82XX_CRB_DRV_IDC_VERSION,
+ QLA82XX_FW_VERSION_MAJOR,
+ QLA82XX_FW_VERSION_MINOR,
+ QLA82XX_FW_VERSION_SUB,
+ CRB_CMDPEG_STATE,
+ CRB_TEMP_STATE,
+};
+
+/* Every driver should use these Device State */
+#define QLA8XXX_DEV_COLD 1
+#define QLA8XXX_DEV_INITIALIZING 2
+#define QLA8XXX_DEV_READY 3
+#define QLA8XXX_DEV_NEED_RESET 4
+#define QLA8XXX_DEV_NEED_QUIESCENT 5
+#define QLA8XXX_DEV_FAILED 6
+#define QLA8XXX_DEV_QUIESCENT 7
+#define MAX_STATES 8 /* Increment if new state added */
+
+#define QLA82XX_IDC_VERSION 0x1
+#define ROM_DEV_INIT_TIMEOUT 30
+#define ROM_DRV_RESET_ACK_TIMEOUT 10
+
+#define PCIE_SETUP_FUNCTION (0x12040)
+#define PCIE_SETUP_FUNCTION2 (0x12048)
+
+#define QLA82XX_PCIX_PS_REG(reg) (QLA82XX_CRB_PCIX_MD + (reg))
+#define QLA82XX_PCIX_PS2_REG(reg) (QLA82XX_CRB_PCIE2 + (reg))
+
+#define PCIE_SEM2_LOCK (0x1c010) /* Flash lock */
+#define PCIE_SEM2_UNLOCK (0x1c014) /* Flash unlock */
+#define PCIE_SEM5_LOCK (0x1c028) /* Coexistence lock */
+#define PCIE_SEM5_UNLOCK (0x1c02c) /* Coexistence unlock */
+#define PCIE_SEM7_LOCK (0x1c038) /* crb win lock */
+#define PCIE_SEM7_UNLOCK (0x1c03c) /* crbwin unlock*/
+
+/*
+ * The PCI VendorID and DeviceID for our board.
+ */
+#define QLA82XX_MSIX_TBL_SPACE 8192
+#define QLA82XX_PCI_REG_MSIX_TBL 0x44
+#define QLA82XX_PCI_MSIX_CONTROL 0x40
+
+struct crb_128M_2M_sub_block_map {
+ unsigned valid;
+ unsigned start_128M;
+ unsigned end_128M;
+ unsigned start_2M;
+};
+
+struct crb_128M_2M_block_map {
+ struct crb_128M_2M_sub_block_map sub_block[16];
+};
+
+struct crb_addr_pair {
+ long addr;
+ long data;
+};
+
+#define ADDR_ERROR ((unsigned long) 0xffffffff)
+#define MAX_CTL_CHECK 1000
+#define QLA82XX_FWERROR_CODE(code) ((code >> 8) & 0x1fffff)
+
+/***************************************************************************
+ * PCI related defines.
+ **************************************************************************/
+
+/*
+ * Interrupt related defines.
+ */
+#define PCIX_TARGET_STATUS (0x10118)
+#define PCIX_TARGET_STATUS_F1 (0x10160)
+#define PCIX_TARGET_STATUS_F2 (0x10164)
+#define PCIX_TARGET_STATUS_F3 (0x10168)
+#define PCIX_TARGET_STATUS_F4 (0x10360)
+#define PCIX_TARGET_STATUS_F5 (0x10364)
+#define PCIX_TARGET_STATUS_F6 (0x10368)
+#define PCIX_TARGET_STATUS_F7 (0x1036c)
+
+#define PCIX_TARGET_MASK (0x10128)
+#define PCIX_TARGET_MASK_F1 (0x10170)
+#define PCIX_TARGET_MASK_F2 (0x10174)
+#define PCIX_TARGET_MASK_F3 (0x10178)
+#define PCIX_TARGET_MASK_F4 (0x10370)
+#define PCIX_TARGET_MASK_F5 (0x10374)
+#define PCIX_TARGET_MASK_F6 (0x10378)
+#define PCIX_TARGET_MASK_F7 (0x1037c)
+
+/*
+ * Message Signaled Interrupts
+ */
+#define PCIX_MSI_F0 (0x13000)
+#define PCIX_MSI_F1 (0x13004)
+#define PCIX_MSI_F2 (0x13008)
+#define PCIX_MSI_F3 (0x1300c)
+#define PCIX_MSI_F4 (0x13010)
+#define PCIX_MSI_F5 (0x13014)
+#define PCIX_MSI_F6 (0x13018)
+#define PCIX_MSI_F7 (0x1301c)
+#define PCIX_MSI_F(FUNC) (0x13000 + ((FUNC) * 4))
+
+/*
+ *
+ */
+#define PCIX_INT_VECTOR (0x10100)
+#define PCIX_INT_MASK (0x10104)
+
+/*
+ * Interrupt state machine and other bits.
+ */
+#define PCIE_MISCCFG_RC (0x1206c)
+
+
+#define ISR_INT_TARGET_STATUS \
+ (QLA82XX_PCIX_PS_REG(PCIX_TARGET_STATUS))
+#define ISR_INT_TARGET_STATUS_F1 \
+ (QLA82XX_PCIX_PS_REG(PCIX_TARGET_STATUS_F1))
+#define ISR_INT_TARGET_STATUS_F2 \
+ (QLA82XX_PCIX_PS_REG(PCIX_TARGET_STATUS_F2))
+#define ISR_INT_TARGET_STATUS_F3 \
+ (QLA82XX_PCIX_PS_REG(PCIX_TARGET_STATUS_F3))
+#define ISR_INT_TARGET_STATUS_F4 \
+ (QLA82XX_PCIX_PS_REG(PCIX_TARGET_STATUS_F4))
+#define ISR_INT_TARGET_STATUS_F5 \
+ (QLA82XX_PCIX_PS_REG(PCIX_TARGET_STATUS_F5))
+#define ISR_INT_TARGET_STATUS_F6 \
+ (QLA82XX_PCIX_PS_REG(PCIX_TARGET_STATUS_F6))
+#define ISR_INT_TARGET_STATUS_F7 \
+ (QLA82XX_PCIX_PS_REG(PCIX_TARGET_STATUS_F7))
+
+#define ISR_INT_TARGET_MASK \
+ (QLA82XX_PCIX_PS_REG(PCIX_TARGET_MASK))
+#define ISR_INT_TARGET_MASK_F1 \
+ (QLA82XX_PCIX_PS_REG(PCIX_TARGET_MASK_F1))
+#define ISR_INT_TARGET_MASK_F2 \
+ (QLA82XX_PCIX_PS_REG(PCIX_TARGET_MASK_F2))
+#define ISR_INT_TARGET_MASK_F3 \
+ (QLA82XX_PCIX_PS_REG(PCIX_TARGET_MASK_F3))
+#define ISR_INT_TARGET_MASK_F4 \
+ (QLA82XX_PCIX_PS_REG(PCIX_TARGET_MASK_F4))
+#define ISR_INT_TARGET_MASK_F5 \
+ (QLA82XX_PCIX_PS_REG(PCIX_TARGET_MASK_F5))
+#define ISR_INT_TARGET_MASK_F6 \
+ (QLA82XX_PCIX_PS_REG(PCIX_TARGET_MASK_F6))
+#define ISR_INT_TARGET_MASK_F7 \
+ (QLA82XX_PCIX_PS_REG(PCIX_TARGET_MASK_F7))
+
+#define ISR_INT_VECTOR (QLA82XX_PCIX_PS_REG(PCIX_INT_VECTOR))
+#define ISR_INT_MASK (QLA82XX_PCIX_PS_REG(PCIX_INT_MASK))
+#define ISR_INT_STATE_REG (QLA82XX_PCIX_PS_REG(PCIE_MISCCFG_RC))
+
+#define ISR_MSI_INT_TRIGGER(FUNC) (QLA82XX_PCIX_PS_REG(PCIX_MSI_F(FUNC)))
+
+
+#define ISR_IS_LEGACY_INTR_IDLE(VAL) (((VAL) & 0x300) == 0)
+#define ISR_IS_LEGACY_INTR_TRIGGERED(VAL) (((VAL) & 0x300) == 0x200)
+
+/*
+ * PCI Interrupt Vector Values.
+ */
+#define PCIX_INT_VECTOR_BIT_F0 0x0080
+#define PCIX_INT_VECTOR_BIT_F1 0x0100
+#define PCIX_INT_VECTOR_BIT_F2 0x0200
+#define PCIX_INT_VECTOR_BIT_F3 0x0400
+#define PCIX_INT_VECTOR_BIT_F4 0x0800
+#define PCIX_INT_VECTOR_BIT_F5 0x1000
+#define PCIX_INT_VECTOR_BIT_F6 0x2000
+#define PCIX_INT_VECTOR_BIT_F7 0x4000
+
+/* struct qla4_8xxx_legacy_intr_set defined in ql4_def.h */
+
+#define QLA82XX_LEGACY_INTR_CONFIG \
+{ \
+ { \
+ .int_vec_bit = PCIX_INT_VECTOR_BIT_F0, \
+ .tgt_status_reg = ISR_INT_TARGET_STATUS, \
+ .tgt_mask_reg = ISR_INT_TARGET_MASK, \
+ .pci_int_reg = ISR_MSI_INT_TRIGGER(0) }, \
+ \
+ { \
+ .int_vec_bit = PCIX_INT_VECTOR_BIT_F1, \
+ .tgt_status_reg = ISR_INT_TARGET_STATUS_F1, \
+ .tgt_mask_reg = ISR_INT_TARGET_MASK_F1, \
+ .pci_int_reg = ISR_MSI_INT_TRIGGER(1) }, \
+ \
+ { \
+ .int_vec_bit = PCIX_INT_VECTOR_BIT_F2, \
+ .tgt_status_reg = ISR_INT_TARGET_STATUS_F2, \
+ .tgt_mask_reg = ISR_INT_TARGET_MASK_F2, \
+ .pci_int_reg = ISR_MSI_INT_TRIGGER(2) }, \
+ \
+ { \
+ .int_vec_bit = PCIX_INT_VECTOR_BIT_F3, \
+ .tgt_status_reg = ISR_INT_TARGET_STATUS_F3, \
+ .tgt_mask_reg = ISR_INT_TARGET_MASK_F3, \
+ .pci_int_reg = ISR_MSI_INT_TRIGGER(3) }, \
+ \
+ { \
+ .int_vec_bit = PCIX_INT_VECTOR_BIT_F4, \
+ .tgt_status_reg = ISR_INT_TARGET_STATUS_F4, \
+ .tgt_mask_reg = ISR_INT_TARGET_MASK_F4, \
+ .pci_int_reg = ISR_MSI_INT_TRIGGER(4) }, \
+ \
+ { \
+ .int_vec_bit = PCIX_INT_VECTOR_BIT_F5, \
+ .tgt_status_reg = ISR_INT_TARGET_STATUS_F5, \
+ .tgt_mask_reg = ISR_INT_TARGET_MASK_F5, \
+ .pci_int_reg = ISR_MSI_INT_TRIGGER(5) }, \
+ \
+ { \
+ .int_vec_bit = PCIX_INT_VECTOR_BIT_F6, \
+ .tgt_status_reg = ISR_INT_TARGET_STATUS_F6, \
+ .tgt_mask_reg = ISR_INT_TARGET_MASK_F6, \
+ .pci_int_reg = ISR_MSI_INT_TRIGGER(6) }, \
+ \
+ { \
+ .int_vec_bit = PCIX_INT_VECTOR_BIT_F7, \
+ .tgt_status_reg = ISR_INT_TARGET_STATUS_F7, \
+ .tgt_mask_reg = ISR_INT_TARGET_MASK_F7, \
+ .pci_int_reg = ISR_MSI_INT_TRIGGER(7) }, \
+}
+
+/* Magic number to let user know flash is programmed */
+#define QLA82XX_BDINFO_MAGIC 0x12345678
+#define FW_SIZE_OFFSET (0x3e840c)
+
+/* QLA82XX additions */
+#define MIU_TEST_AGT_WRDATA_UPPER_LO (0x0b0)
+#define MIU_TEST_AGT_WRDATA_UPPER_HI (0x0b4)
+
+/* Minidump related */
+
+/* Entry Type Defines */
+#define QLA8XXX_RDNOP 0
+#define QLA8XXX_RDCRB 1
+#define QLA8XXX_RDMUX 2
+#define QLA8XXX_QUEUE 3
+#define QLA8XXX_BOARD 4
+#define QLA8XXX_RDOCM 6
+#define QLA8XXX_PREGS 7
+#define QLA8XXX_L1DTG 8
+#define QLA8XXX_L1ITG 9
+#define QLA8XXX_L1DAT 11
+#define QLA8XXX_L1INS 12
+#define QLA8XXX_L2DTG 21
+#define QLA8XXX_L2ITG 22
+#define QLA8XXX_L2DAT 23
+#define QLA8XXX_L2INS 24
+#define QLA83XX_POLLRD 35
+#define QLA83XX_RDMUX2 36
+#define QLA83XX_POLLRDMWR 37
+#define QLA8044_RDDFE 38
+#define QLA8044_RDMDIO 39
+#define QLA8044_POLLWR 40
+#define QLA8XXX_RDROM 71
+#define QLA8XXX_RDMEM 72
+#define QLA8XXX_CNTRL 98
+#define QLA83XX_TLHDR 99
+#define QLA8XXX_RDEND 255
+
+/* Opcodes for Control Entries.
+ * These Flags are bit fields.
+ */
+#define QLA8XXX_DBG_OPCODE_WR 0x01
+#define QLA8XXX_DBG_OPCODE_RW 0x02
+#define QLA8XXX_DBG_OPCODE_AND 0x04
+#define QLA8XXX_DBG_OPCODE_OR 0x08
+#define QLA8XXX_DBG_OPCODE_POLL 0x10
+#define QLA8XXX_DBG_OPCODE_RDSTATE 0x20
+#define QLA8XXX_DBG_OPCODE_WRSTATE 0x40
+#define QLA8XXX_DBG_OPCODE_MDSTATE 0x80
+
+/* Driver Flags */
+#define QLA8XXX_DBG_SKIPPED_FLAG 0x80 /* driver skipped this entry */
+#define QLA8XXX_DBG_SIZE_ERR_FLAG 0x40 /* Entry vs Capture size
+ * mismatch */
+
+/* Driver_code is for driver to write some info about the entry
+ * currently not used.
+ */
+struct qla8xxx_minidump_entry_hdr {
+ uint32_t entry_type;
+ uint32_t entry_size;
+ uint32_t entry_capture_size;
+ struct {
+ uint8_t entry_capture_mask;
+ uint8_t entry_code;
+ uint8_t driver_code;
+ uint8_t driver_flags;
+ } d_ctrl;
+};
+
+/* Read CRB entry header */
+struct qla8xxx_minidump_entry_crb {
+ struct qla8xxx_minidump_entry_hdr h;
+ uint32_t addr;
+ struct {
+ uint8_t addr_stride;
+ uint8_t state_index_a;
+ uint16_t poll_timeout;
+ } crb_strd;
+ uint32_t data_size;
+ uint32_t op_count;
+
+ struct {
+ uint8_t opcode;
+ uint8_t state_index_v;
+ uint8_t shl;
+ uint8_t shr;
+ } crb_ctrl;
+
+ uint32_t value_1;
+ uint32_t value_2;
+ uint32_t value_3;
+};
+
+struct qla8xxx_minidump_entry_cache {
+ struct qla8xxx_minidump_entry_hdr h;
+ uint32_t tag_reg_addr;
+ struct {
+ uint16_t tag_value_stride;
+ uint16_t init_tag_value;
+ } addr_ctrl;
+ uint32_t data_size;
+ uint32_t op_count;
+ uint32_t control_addr;
+ struct {
+ uint16_t write_value;
+ uint8_t poll_mask;
+ uint8_t poll_wait;
+ } cache_ctrl;
+ uint32_t read_addr;
+ struct {
+ uint8_t read_addr_stride;
+ uint8_t read_addr_cnt;
+ uint16_t rsvd_1;
+ } read_ctrl;
+};
+
+/* Read OCM */
+struct qla8xxx_minidump_entry_rdocm {
+ struct qla8xxx_minidump_entry_hdr h;
+ uint32_t rsvd_0;
+ uint32_t rsvd_1;
+ uint32_t data_size;
+ uint32_t op_count;
+ uint32_t rsvd_2;
+ uint32_t rsvd_3;
+ uint32_t read_addr;
+ uint32_t read_addr_stride;
+};
+
+/* Read Memory */
+struct qla8xxx_minidump_entry_rdmem {
+ struct qla8xxx_minidump_entry_hdr h;
+ uint32_t rsvd[6];
+ uint32_t read_addr;
+ uint32_t read_data_size;
+};
+
+/* Read ROM */
+struct qla8xxx_minidump_entry_rdrom {
+ struct qla8xxx_minidump_entry_hdr h;
+ uint32_t rsvd[6];
+ uint32_t read_addr;
+ uint32_t read_data_size;
+};
+
+/* Mux entry */
+struct qla8xxx_minidump_entry_mux {
+ struct qla8xxx_minidump_entry_hdr h;
+ uint32_t select_addr;
+ uint32_t rsvd_0;
+ uint32_t data_size;
+ uint32_t op_count;
+ uint32_t select_value;
+ uint32_t select_value_stride;
+ uint32_t read_addr;
+ uint32_t rsvd_1;
+};
+
+/* Queue entry */
+struct qla8xxx_minidump_entry_queue {
+ struct qla8xxx_minidump_entry_hdr h;
+ uint32_t select_addr;
+ struct {
+ uint16_t queue_id_stride;
+ uint16_t rsvd_0;
+ } q_strd;
+ uint32_t data_size;
+ uint32_t op_count;
+ uint32_t rsvd_1;
+ uint32_t rsvd_2;
+ uint32_t read_addr;
+ struct {
+ uint8_t read_addr_stride;
+ uint8_t read_addr_cnt;
+ uint16_t rsvd_3;
+ } rd_strd;
+};
+
+#define MBC_DIAGNOSTIC_MINIDUMP_TEMPLATE 0x129
+#define RQST_TMPLT_SIZE 0x0
+#define RQST_TMPLT 0x1
+#define MD_DIRECT_ROM_WINDOW 0x42110030
+#define MD_DIRECT_ROM_READ_BASE 0x42150000
+#define MD_MIU_TEST_AGT_CTRL 0x41000090
+#define MD_MIU_TEST_AGT_ADDR_LO 0x41000094
+#define MD_MIU_TEST_AGT_ADDR_HI 0x41000098
+
+#define MD_MIU_TEST_AGT_WRDATA_LO 0x410000A0
+#define MD_MIU_TEST_AGT_WRDATA_HI 0x410000A4
+#define MD_MIU_TEST_AGT_WRDATA_ULO 0x410000B0
+#define MD_MIU_TEST_AGT_WRDATA_UHI 0x410000B4
+
+#define MD_MIU_TEST_AGT_RDDATA_LO 0x410000A8
+#define MD_MIU_TEST_AGT_RDDATA_HI 0x410000AC
+#define MD_MIU_TEST_AGT_RDDATA_ULO 0x410000B8
+#define MD_MIU_TEST_AGT_RDDATA_UHI 0x410000BC
+
+static const int MD_MIU_TEST_AGT_RDDATA[] = { 0x410000A8,
+ 0x410000AC, 0x410000B8, 0x410000BC };
+#endif
diff --git a/drivers/scsi/qla4xxx/ql4_os.c b/drivers/scsi/qla4xxx/ql4_os.c
new file mode 100644
index 000000000..6d25879d8
--- /dev/null
+++ b/drivers/scsi/qla4xxx/ql4_os.c
@@ -0,0 +1,9919 @@
+/*
+ * QLogic iSCSI HBA Driver
+ * Copyright (c) 2003-2013 QLogic Corporation
+ *
+ * See LICENSE.qla4xxx for copyright and licensing details.
+ */
+#include <linux/moduleparam.h>
+#include <linux/slab.h>
+#include <linux/blkdev.h>
+#include <linux/iscsi_boot_sysfs.h>
+#include <linux/inet.h>
+
+#include <scsi/scsi_tcq.h>
+#include <scsi/scsicam.h>
+
+#include "ql4_def.h"
+#include "ql4_version.h"
+#include "ql4_glbl.h"
+#include "ql4_dbg.h"
+#include "ql4_inline.h"
+#include "ql4_83xx.h"
+
+/*
+ * Driver version
+ */
+static char qla4xxx_version_str[40];
+
+/*
+ * SRB allocation cache
+ */
+static struct kmem_cache *srb_cachep;
+
+/*
+ * Module parameter information and variables
+ */
+static int ql4xdisablesysfsboot = 1;
+module_param(ql4xdisablesysfsboot, int, S_IRUGO | S_IWUSR);
+MODULE_PARM_DESC(ql4xdisablesysfsboot,
+ " Set to disable exporting boot targets to sysfs.\n"
+ "\t\t 0 - Export boot targets\n"
+ "\t\t 1 - Do not export boot targets (Default)");
+
+int ql4xdontresethba;
+module_param(ql4xdontresethba, int, S_IRUGO | S_IWUSR);
+MODULE_PARM_DESC(ql4xdontresethba,
+ " Don't reset the HBA for driver recovery.\n"
+ "\t\t 0 - It will reset HBA (Default)\n"
+ "\t\t 1 - It will NOT reset HBA");
+
+int ql4xextended_error_logging;
+module_param(ql4xextended_error_logging, int, S_IRUGO | S_IWUSR);
+MODULE_PARM_DESC(ql4xextended_error_logging,
+ " Option to enable extended error logging.\n"
+ "\t\t 0 - no logging (Default)\n"
+ "\t\t 2 - debug logging");
+
+int ql4xenablemsix = 1;
+module_param(ql4xenablemsix, int, S_IRUGO|S_IWUSR);
+MODULE_PARM_DESC(ql4xenablemsix,
+ " Set to enable MSI or MSI-X interrupt mechanism.\n"
+ "\t\t 0 = enable INTx interrupt mechanism.\n"
+ "\t\t 1 = enable MSI-X interrupt mechanism (Default).\n"
+ "\t\t 2 = enable MSI interrupt mechanism.");
+
+#define QL4_DEF_QDEPTH 32
+static int ql4xmaxqdepth = QL4_DEF_QDEPTH;
+module_param(ql4xmaxqdepth, int, S_IRUGO | S_IWUSR);
+MODULE_PARM_DESC(ql4xmaxqdepth,
+ " Maximum queue depth to report for target devices.\n"
+ "\t\t Default: 32.");
+
+static int ql4xqfulltracking = 1;
+module_param(ql4xqfulltracking, int, S_IRUGO | S_IWUSR);
+MODULE_PARM_DESC(ql4xqfulltracking,
+ " Enable or disable dynamic tracking and adjustment of\n"
+ "\t\t scsi device queue depth.\n"
+ "\t\t 0 - Disable.\n"
+ "\t\t 1 - Enable. (Default)");
+
+static int ql4xsess_recovery_tmo = QL4_SESS_RECOVERY_TMO;
+module_param(ql4xsess_recovery_tmo, int, S_IRUGO);
+MODULE_PARM_DESC(ql4xsess_recovery_tmo,
+ " Target Session Recovery Timeout.\n"
+ "\t\t Default: 120 sec.");
+
+int ql4xmdcapmask = 0;
+module_param(ql4xmdcapmask, int, S_IRUGO);
+MODULE_PARM_DESC(ql4xmdcapmask,
+ " Set the Minidump driver capture mask level.\n"
+ "\t\t Default is 0 (firmware default capture mask)\n"
+ "\t\t Can be set to 0x3, 0x7, 0xF, 0x1F, 0x3F, 0x7F, 0xFF");
+
+int ql4xenablemd = 1;
+module_param(ql4xenablemd, int, S_IRUGO | S_IWUSR);
+MODULE_PARM_DESC(ql4xenablemd,
+ " Set to enable minidump.\n"
+ "\t\t 0 - disable minidump\n"
+ "\t\t 1 - enable minidump (Default)");
+
+static int qla4xxx_wait_for_hba_online(struct scsi_qla_host *ha);
+/*
+ * SCSI host template entry points
+ */
+static void qla4xxx_config_dma_addressing(struct scsi_qla_host *ha);
+
+/*
+ * iSCSI template entry points
+ */
+static int qla4xxx_session_get_param(struct iscsi_cls_session *cls_sess,
+ enum iscsi_param param, char *buf);
+static int qla4xxx_conn_get_param(struct iscsi_cls_conn *conn,
+ enum iscsi_param param, char *buf);
+static int qla4xxx_host_get_param(struct Scsi_Host *shost,
+ enum iscsi_host_param param, char *buf);
+static int qla4xxx_iface_set_param(struct Scsi_Host *shost, void *data,
+ uint32_t len);
+static int qla4xxx_get_iface_param(struct iscsi_iface *iface,
+ enum iscsi_param_type param_type,
+ int param, char *buf);
+static enum blk_eh_timer_return qla4xxx_eh_cmd_timed_out(struct scsi_cmnd *sc);
+static struct iscsi_endpoint *qla4xxx_ep_connect(struct Scsi_Host *shost,
+ struct sockaddr *dst_addr,
+ int non_blocking);
+static int qla4xxx_ep_poll(struct iscsi_endpoint *ep, int timeout_ms);
+static void qla4xxx_ep_disconnect(struct iscsi_endpoint *ep);
+static int qla4xxx_get_ep_param(struct iscsi_endpoint *ep,
+ enum iscsi_param param, char *buf);
+static int qla4xxx_conn_start(struct iscsi_cls_conn *conn);
+static struct iscsi_cls_conn *
+qla4xxx_conn_create(struct iscsi_cls_session *cls_sess, uint32_t conn_idx);
+static int qla4xxx_conn_bind(struct iscsi_cls_session *cls_session,
+ struct iscsi_cls_conn *cls_conn,
+ uint64_t transport_fd, int is_leading);
+static void qla4xxx_conn_destroy(struct iscsi_cls_conn *conn);
+static struct iscsi_cls_session *
+qla4xxx_session_create(struct iscsi_endpoint *ep, uint16_t cmds_max,
+ uint16_t qdepth, uint32_t initial_cmdsn);
+static void qla4xxx_session_destroy(struct iscsi_cls_session *sess);
+static void qla4xxx_task_work(struct work_struct *wdata);
+static int qla4xxx_alloc_pdu(struct iscsi_task *, uint8_t);
+static int qla4xxx_task_xmit(struct iscsi_task *);
+static void qla4xxx_task_cleanup(struct iscsi_task *);
+static void qla4xxx_fail_session(struct iscsi_cls_session *cls_session);
+static void qla4xxx_conn_get_stats(struct iscsi_cls_conn *cls_conn,
+ struct iscsi_stats *stats);
+static int qla4xxx_send_ping(struct Scsi_Host *shost, uint32_t iface_num,
+ uint32_t iface_type, uint32_t payload_size,
+ uint32_t pid, struct sockaddr *dst_addr);
+static int qla4xxx_get_chap_list(struct Scsi_Host *shost, uint16_t chap_tbl_idx,
+ uint32_t *num_entries, char *buf);
+static int qla4xxx_delete_chap(struct Scsi_Host *shost, uint16_t chap_tbl_idx);
+static int qla4xxx_set_chap_entry(struct Scsi_Host *shost, void *data,
+ int len);
+static int qla4xxx_get_host_stats(struct Scsi_Host *shost, char *buf, int len);
+
+/*
+ * SCSI host template entry points
+ */
+static int qla4xxx_queuecommand(struct Scsi_Host *h, struct scsi_cmnd *cmd);
+static int qla4xxx_eh_abort(struct scsi_cmnd *cmd);
+static int qla4xxx_eh_device_reset(struct scsi_cmnd *cmd);
+static int qla4xxx_eh_target_reset(struct scsi_cmnd *cmd);
+static int qla4xxx_eh_host_reset(struct scsi_cmnd *cmd);
+static int qla4xxx_slave_alloc(struct scsi_device *device);
+static umode_t qla4_attr_is_visible(int param_type, int param);
+static int qla4xxx_host_reset(struct Scsi_Host *shost, int reset_type);
+
+/*
+ * iSCSI Flash DDB sysfs entry points
+ */
+static int
+qla4xxx_sysfs_ddb_set_param(struct iscsi_bus_flash_session *fnode_sess,
+ struct iscsi_bus_flash_conn *fnode_conn,
+ void *data, int len);
+static int
+qla4xxx_sysfs_ddb_get_param(struct iscsi_bus_flash_session *fnode_sess,
+ int param, char *buf);
+static int qla4xxx_sysfs_ddb_add(struct Scsi_Host *shost, const char *buf,
+ int len);
+static int
+qla4xxx_sysfs_ddb_delete(struct iscsi_bus_flash_session *fnode_sess);
+static int qla4xxx_sysfs_ddb_login(struct iscsi_bus_flash_session *fnode_sess,
+ struct iscsi_bus_flash_conn *fnode_conn);
+static int qla4xxx_sysfs_ddb_logout(struct iscsi_bus_flash_session *fnode_sess,
+ struct iscsi_bus_flash_conn *fnode_conn);
+static int qla4xxx_sysfs_ddb_logout_sid(struct iscsi_cls_session *cls_sess);
+
+static struct qla4_8xxx_legacy_intr_set legacy_intr[] =
+ QLA82XX_LEGACY_INTR_CONFIG;
+
+static struct scsi_host_template qla4xxx_driver_template = {
+ .module = THIS_MODULE,
+ .name = DRIVER_NAME,
+ .proc_name = DRIVER_NAME,
+ .queuecommand = qla4xxx_queuecommand,
+
+ .eh_abort_handler = qla4xxx_eh_abort,
+ .eh_device_reset_handler = qla4xxx_eh_device_reset,
+ .eh_target_reset_handler = qla4xxx_eh_target_reset,
+ .eh_host_reset_handler = qla4xxx_eh_host_reset,
+ .eh_timed_out = qla4xxx_eh_cmd_timed_out,
+
+ .slave_alloc = qla4xxx_slave_alloc,
+ .change_queue_depth = scsi_change_queue_depth,
+
+ .this_id = -1,
+ .cmd_per_lun = 3,
+ .use_clustering = ENABLE_CLUSTERING,
+ .sg_tablesize = SG_ALL,
+
+ .max_sectors = 0xFFFF,
+ .shost_attrs = qla4xxx_host_attrs,
+ .host_reset = qla4xxx_host_reset,
+ .vendor_id = SCSI_NL_VID_TYPE_PCI | PCI_VENDOR_ID_QLOGIC,
+ .use_blk_tags = 1,
+};
+
+static struct iscsi_transport qla4xxx_iscsi_transport = {
+ .owner = THIS_MODULE,
+ .name = DRIVER_NAME,
+ .caps = CAP_TEXT_NEGO |
+ CAP_DATA_PATH_OFFLOAD | CAP_HDRDGST |
+ CAP_DATADGST | CAP_LOGIN_OFFLOAD |
+ CAP_MULTI_R2T,
+ .attr_is_visible = qla4_attr_is_visible,
+ .create_session = qla4xxx_session_create,
+ .destroy_session = qla4xxx_session_destroy,
+ .start_conn = qla4xxx_conn_start,
+ .create_conn = qla4xxx_conn_create,
+ .bind_conn = qla4xxx_conn_bind,
+ .stop_conn = iscsi_conn_stop,
+ .destroy_conn = qla4xxx_conn_destroy,
+ .set_param = iscsi_set_param,
+ .get_conn_param = qla4xxx_conn_get_param,
+ .get_session_param = qla4xxx_session_get_param,
+ .get_ep_param = qla4xxx_get_ep_param,
+ .ep_connect = qla4xxx_ep_connect,
+ .ep_poll = qla4xxx_ep_poll,
+ .ep_disconnect = qla4xxx_ep_disconnect,
+ .get_stats = qla4xxx_conn_get_stats,
+ .send_pdu = iscsi_conn_send_pdu,
+ .xmit_task = qla4xxx_task_xmit,
+ .cleanup_task = qla4xxx_task_cleanup,
+ .alloc_pdu = qla4xxx_alloc_pdu,
+
+ .get_host_param = qla4xxx_host_get_param,
+ .set_iface_param = qla4xxx_iface_set_param,
+ .get_iface_param = qla4xxx_get_iface_param,
+ .bsg_request = qla4xxx_bsg_request,
+ .send_ping = qla4xxx_send_ping,
+ .get_chap = qla4xxx_get_chap_list,
+ .delete_chap = qla4xxx_delete_chap,
+ .set_chap = qla4xxx_set_chap_entry,
+ .get_flashnode_param = qla4xxx_sysfs_ddb_get_param,
+ .set_flashnode_param = qla4xxx_sysfs_ddb_set_param,
+ .new_flashnode = qla4xxx_sysfs_ddb_add,
+ .del_flashnode = qla4xxx_sysfs_ddb_delete,
+ .login_flashnode = qla4xxx_sysfs_ddb_login,
+ .logout_flashnode = qla4xxx_sysfs_ddb_logout,
+ .logout_flashnode_sid = qla4xxx_sysfs_ddb_logout_sid,
+ .get_host_stats = qla4xxx_get_host_stats,
+};
+
+static struct scsi_transport_template *qla4xxx_scsi_transport;
+
+static int qla4xxx_send_ping(struct Scsi_Host *shost, uint32_t iface_num,
+ uint32_t iface_type, uint32_t payload_size,
+ uint32_t pid, struct sockaddr *dst_addr)
+{
+ struct scsi_qla_host *ha = to_qla_host(shost);
+ struct sockaddr_in *addr;
+ struct sockaddr_in6 *addr6;
+ uint32_t options = 0;
+ uint8_t ipaddr[IPv6_ADDR_LEN];
+ int rval;
+
+ memset(ipaddr, 0, IPv6_ADDR_LEN);
+ /* IPv4 to IPv4 */
+ if ((iface_type == ISCSI_IFACE_TYPE_IPV4) &&
+ (dst_addr->sa_family == AF_INET)) {
+ addr = (struct sockaddr_in *)dst_addr;
+ memcpy(ipaddr, &addr->sin_addr.s_addr, IP_ADDR_LEN);
+ DEBUG2(ql4_printk(KERN_INFO, ha, "%s: IPv4 Ping src: %pI4 "
+ "dest: %pI4\n", __func__,
+ &ha->ip_config.ip_address, ipaddr));
+ rval = qla4xxx_ping_iocb(ha, options, payload_size, pid,
+ ipaddr);
+ if (rval)
+ rval = -EINVAL;
+ } else if ((iface_type == ISCSI_IFACE_TYPE_IPV6) &&
+ (dst_addr->sa_family == AF_INET6)) {
+ /* IPv6 to IPv6 */
+ addr6 = (struct sockaddr_in6 *)dst_addr;
+ memcpy(ipaddr, &addr6->sin6_addr.in6_u.u6_addr8, IPv6_ADDR_LEN);
+
+ options |= PING_IPV6_PROTOCOL_ENABLE;
+
+ /* Ping using LinkLocal address */
+ if ((iface_num == 0) || (iface_num == 1)) {
+ DEBUG2(ql4_printk(KERN_INFO, ha, "%s: LinkLocal Ping "
+ "src: %pI6 dest: %pI6\n", __func__,
+ &ha->ip_config.ipv6_link_local_addr,
+ ipaddr));
+ options |= PING_IPV6_LINKLOCAL_ADDR;
+ rval = qla4xxx_ping_iocb(ha, options, payload_size,
+ pid, ipaddr);
+ } else {
+ ql4_printk(KERN_WARNING, ha, "%s: iface num = %d "
+ "not supported\n", __func__, iface_num);
+ rval = -ENOSYS;
+ goto exit_send_ping;
+ }
+
+ /*
+ * If ping using LinkLocal address fails, try ping using
+ * IPv6 address
+ */
+ if (rval != QLA_SUCCESS) {
+ options &= ~PING_IPV6_LINKLOCAL_ADDR;
+ if (iface_num == 0) {
+ options |= PING_IPV6_ADDR0;
+ DEBUG2(ql4_printk(KERN_INFO, ha, "%s: IPv6 "
+ "Ping src: %pI6 "
+ "dest: %pI6\n", __func__,
+ &ha->ip_config.ipv6_addr0,
+ ipaddr));
+ } else if (iface_num == 1) {
+ options |= PING_IPV6_ADDR1;
+ DEBUG2(ql4_printk(KERN_INFO, ha, "%s: IPv6 "
+ "Ping src: %pI6 "
+ "dest: %pI6\n", __func__,
+ &ha->ip_config.ipv6_addr1,
+ ipaddr));
+ }
+ rval = qla4xxx_ping_iocb(ha, options, payload_size,
+ pid, ipaddr);
+ if (rval)
+ rval = -EINVAL;
+ }
+ } else
+ rval = -ENOSYS;
+exit_send_ping:
+ return rval;
+}
+
+static umode_t qla4_attr_is_visible(int param_type, int param)
+{
+ switch (param_type) {
+ case ISCSI_HOST_PARAM:
+ switch (param) {
+ case ISCSI_HOST_PARAM_HWADDRESS:
+ case ISCSI_HOST_PARAM_IPADDRESS:
+ case ISCSI_HOST_PARAM_INITIATOR_NAME:
+ case ISCSI_HOST_PARAM_PORT_STATE:
+ case ISCSI_HOST_PARAM_PORT_SPEED:
+ return S_IRUGO;
+ default:
+ return 0;
+ }
+ case ISCSI_PARAM:
+ switch (param) {
+ case ISCSI_PARAM_PERSISTENT_ADDRESS:
+ case ISCSI_PARAM_PERSISTENT_PORT:
+ case ISCSI_PARAM_CONN_ADDRESS:
+ case ISCSI_PARAM_CONN_PORT:
+ case ISCSI_PARAM_TARGET_NAME:
+ case ISCSI_PARAM_TPGT:
+ case ISCSI_PARAM_TARGET_ALIAS:
+ case ISCSI_PARAM_MAX_BURST:
+ case ISCSI_PARAM_MAX_R2T:
+ case ISCSI_PARAM_FIRST_BURST:
+ case ISCSI_PARAM_MAX_RECV_DLENGTH:
+ case ISCSI_PARAM_MAX_XMIT_DLENGTH:
+ case ISCSI_PARAM_IFACE_NAME:
+ case ISCSI_PARAM_CHAP_OUT_IDX:
+ case ISCSI_PARAM_CHAP_IN_IDX:
+ case ISCSI_PARAM_USERNAME:
+ case ISCSI_PARAM_PASSWORD:
+ case ISCSI_PARAM_USERNAME_IN:
+ case ISCSI_PARAM_PASSWORD_IN:
+ case ISCSI_PARAM_AUTO_SND_TGT_DISABLE:
+ case ISCSI_PARAM_DISCOVERY_SESS:
+ case ISCSI_PARAM_PORTAL_TYPE:
+ case ISCSI_PARAM_CHAP_AUTH_EN:
+ case ISCSI_PARAM_DISCOVERY_LOGOUT_EN:
+ case ISCSI_PARAM_BIDI_CHAP_EN:
+ case ISCSI_PARAM_DISCOVERY_AUTH_OPTIONAL:
+ case ISCSI_PARAM_DEF_TIME2WAIT:
+ case ISCSI_PARAM_DEF_TIME2RETAIN:
+ case ISCSI_PARAM_HDRDGST_EN:
+ case ISCSI_PARAM_DATADGST_EN:
+ case ISCSI_PARAM_INITIAL_R2T_EN:
+ case ISCSI_PARAM_IMM_DATA_EN:
+ case ISCSI_PARAM_PDU_INORDER_EN:
+ case ISCSI_PARAM_DATASEQ_INORDER_EN:
+ case ISCSI_PARAM_MAX_SEGMENT_SIZE:
+ case ISCSI_PARAM_TCP_TIMESTAMP_STAT:
+ case ISCSI_PARAM_TCP_WSF_DISABLE:
+ case ISCSI_PARAM_TCP_NAGLE_DISABLE:
+ case ISCSI_PARAM_TCP_TIMER_SCALE:
+ case ISCSI_PARAM_TCP_TIMESTAMP_EN:
+ case ISCSI_PARAM_TCP_XMIT_WSF:
+ case ISCSI_PARAM_TCP_RECV_WSF:
+ case ISCSI_PARAM_IP_FRAGMENT_DISABLE:
+ case ISCSI_PARAM_IPV4_TOS:
+ case ISCSI_PARAM_IPV6_TC:
+ case ISCSI_PARAM_IPV6_FLOW_LABEL:
+ case ISCSI_PARAM_IS_FW_ASSIGNED_IPV6:
+ case ISCSI_PARAM_KEEPALIVE_TMO:
+ case ISCSI_PARAM_LOCAL_PORT:
+ case ISCSI_PARAM_ISID:
+ case ISCSI_PARAM_TSID:
+ case ISCSI_PARAM_DEF_TASKMGMT_TMO:
+ case ISCSI_PARAM_ERL:
+ case ISCSI_PARAM_STATSN:
+ case ISCSI_PARAM_EXP_STATSN:
+ case ISCSI_PARAM_DISCOVERY_PARENT_IDX:
+ case ISCSI_PARAM_DISCOVERY_PARENT_TYPE:
+ case ISCSI_PARAM_LOCAL_IPADDR:
+ return S_IRUGO;
+ default:
+ return 0;
+ }
+ case ISCSI_NET_PARAM:
+ switch (param) {
+ case ISCSI_NET_PARAM_IPV4_ADDR:
+ case ISCSI_NET_PARAM_IPV4_SUBNET:
+ case ISCSI_NET_PARAM_IPV4_GW:
+ case ISCSI_NET_PARAM_IPV4_BOOTPROTO:
+ case ISCSI_NET_PARAM_IFACE_ENABLE:
+ case ISCSI_NET_PARAM_IPV6_LINKLOCAL:
+ case ISCSI_NET_PARAM_IPV6_ADDR:
+ case ISCSI_NET_PARAM_IPV6_ROUTER:
+ case ISCSI_NET_PARAM_IPV6_ADDR_AUTOCFG:
+ case ISCSI_NET_PARAM_IPV6_LINKLOCAL_AUTOCFG:
+ case ISCSI_NET_PARAM_VLAN_ID:
+ case ISCSI_NET_PARAM_VLAN_PRIORITY:
+ case ISCSI_NET_PARAM_VLAN_ENABLED:
+ case ISCSI_NET_PARAM_MTU:
+ case ISCSI_NET_PARAM_PORT:
+ case ISCSI_NET_PARAM_IPADDR_STATE:
+ case ISCSI_NET_PARAM_IPV6_LINKLOCAL_STATE:
+ case ISCSI_NET_PARAM_IPV6_ROUTER_STATE:
+ case ISCSI_NET_PARAM_DELAYED_ACK_EN:
+ case ISCSI_NET_PARAM_TCP_NAGLE_DISABLE:
+ case ISCSI_NET_PARAM_TCP_WSF_DISABLE:
+ case ISCSI_NET_PARAM_TCP_WSF:
+ case ISCSI_NET_PARAM_TCP_TIMER_SCALE:
+ case ISCSI_NET_PARAM_TCP_TIMESTAMP_EN:
+ case ISCSI_NET_PARAM_CACHE_ID:
+ case ISCSI_NET_PARAM_IPV4_DHCP_DNS_ADDR_EN:
+ case ISCSI_NET_PARAM_IPV4_DHCP_SLP_DA_EN:
+ case ISCSI_NET_PARAM_IPV4_TOS_EN:
+ case ISCSI_NET_PARAM_IPV4_TOS:
+ case ISCSI_NET_PARAM_IPV4_GRAT_ARP_EN:
+ case ISCSI_NET_PARAM_IPV4_DHCP_ALT_CLIENT_ID_EN:
+ case ISCSI_NET_PARAM_IPV4_DHCP_ALT_CLIENT_ID:
+ case ISCSI_NET_PARAM_IPV4_DHCP_REQ_VENDOR_ID_EN:
+ case ISCSI_NET_PARAM_IPV4_DHCP_USE_VENDOR_ID_EN:
+ case ISCSI_NET_PARAM_IPV4_DHCP_VENDOR_ID:
+ case ISCSI_NET_PARAM_IPV4_DHCP_LEARN_IQN_EN:
+ case ISCSI_NET_PARAM_IPV4_FRAGMENT_DISABLE:
+ case ISCSI_NET_PARAM_IPV4_IN_FORWARD_EN:
+ case ISCSI_NET_PARAM_REDIRECT_EN:
+ case ISCSI_NET_PARAM_IPV4_TTL:
+ case ISCSI_NET_PARAM_IPV6_GRAT_NEIGHBOR_ADV_EN:
+ case ISCSI_NET_PARAM_IPV6_MLD_EN:
+ case ISCSI_NET_PARAM_IPV6_FLOW_LABEL:
+ case ISCSI_NET_PARAM_IPV6_TRAFFIC_CLASS:
+ case ISCSI_NET_PARAM_IPV6_HOP_LIMIT:
+ case ISCSI_NET_PARAM_IPV6_ND_REACHABLE_TMO:
+ case ISCSI_NET_PARAM_IPV6_ND_REXMIT_TIME:
+ case ISCSI_NET_PARAM_IPV6_ND_STALE_TMO:
+ case ISCSI_NET_PARAM_IPV6_DUP_ADDR_DETECT_CNT:
+ case ISCSI_NET_PARAM_IPV6_RTR_ADV_LINK_MTU:
+ return S_IRUGO;
+ default:
+ return 0;
+ }
+ case ISCSI_IFACE_PARAM:
+ switch (param) {
+ case ISCSI_IFACE_PARAM_DEF_TASKMGMT_TMO:
+ case ISCSI_IFACE_PARAM_HDRDGST_EN:
+ case ISCSI_IFACE_PARAM_DATADGST_EN:
+ case ISCSI_IFACE_PARAM_IMM_DATA_EN:
+ case ISCSI_IFACE_PARAM_INITIAL_R2T_EN:
+ case ISCSI_IFACE_PARAM_DATASEQ_INORDER_EN:
+ case ISCSI_IFACE_PARAM_PDU_INORDER_EN:
+ case ISCSI_IFACE_PARAM_ERL:
+ case ISCSI_IFACE_PARAM_MAX_RECV_DLENGTH:
+ case ISCSI_IFACE_PARAM_FIRST_BURST:
+ case ISCSI_IFACE_PARAM_MAX_R2T:
+ case ISCSI_IFACE_PARAM_MAX_BURST:
+ case ISCSI_IFACE_PARAM_CHAP_AUTH_EN:
+ case ISCSI_IFACE_PARAM_BIDI_CHAP_EN:
+ case ISCSI_IFACE_PARAM_DISCOVERY_AUTH_OPTIONAL:
+ case ISCSI_IFACE_PARAM_DISCOVERY_LOGOUT_EN:
+ case ISCSI_IFACE_PARAM_STRICT_LOGIN_COMP_EN:
+ case ISCSI_IFACE_PARAM_INITIATOR_NAME:
+ return S_IRUGO;
+ default:
+ return 0;
+ }
+ case ISCSI_FLASHNODE_PARAM:
+ switch (param) {
+ case ISCSI_FLASHNODE_IS_FW_ASSIGNED_IPV6:
+ case ISCSI_FLASHNODE_PORTAL_TYPE:
+ case ISCSI_FLASHNODE_AUTO_SND_TGT_DISABLE:
+ case ISCSI_FLASHNODE_DISCOVERY_SESS:
+ case ISCSI_FLASHNODE_ENTRY_EN:
+ case ISCSI_FLASHNODE_HDR_DGST_EN:
+ case ISCSI_FLASHNODE_DATA_DGST_EN:
+ case ISCSI_FLASHNODE_IMM_DATA_EN:
+ case ISCSI_FLASHNODE_INITIAL_R2T_EN:
+ case ISCSI_FLASHNODE_DATASEQ_INORDER:
+ case ISCSI_FLASHNODE_PDU_INORDER:
+ case ISCSI_FLASHNODE_CHAP_AUTH_EN:
+ case ISCSI_FLASHNODE_SNACK_REQ_EN:
+ case ISCSI_FLASHNODE_DISCOVERY_LOGOUT_EN:
+ case ISCSI_FLASHNODE_BIDI_CHAP_EN:
+ case ISCSI_FLASHNODE_DISCOVERY_AUTH_OPTIONAL:
+ case ISCSI_FLASHNODE_ERL:
+ case ISCSI_FLASHNODE_TCP_TIMESTAMP_STAT:
+ case ISCSI_FLASHNODE_TCP_NAGLE_DISABLE:
+ case ISCSI_FLASHNODE_TCP_WSF_DISABLE:
+ case ISCSI_FLASHNODE_TCP_TIMER_SCALE:
+ case ISCSI_FLASHNODE_TCP_TIMESTAMP_EN:
+ case ISCSI_FLASHNODE_IP_FRAG_DISABLE:
+ case ISCSI_FLASHNODE_MAX_RECV_DLENGTH:
+ case ISCSI_FLASHNODE_MAX_XMIT_DLENGTH:
+ case ISCSI_FLASHNODE_FIRST_BURST:
+ case ISCSI_FLASHNODE_DEF_TIME2WAIT:
+ case ISCSI_FLASHNODE_DEF_TIME2RETAIN:
+ case ISCSI_FLASHNODE_MAX_R2T:
+ case ISCSI_FLASHNODE_KEEPALIVE_TMO:
+ case ISCSI_FLASHNODE_ISID:
+ case ISCSI_FLASHNODE_TSID:
+ case ISCSI_FLASHNODE_PORT:
+ case ISCSI_FLASHNODE_MAX_BURST:
+ case ISCSI_FLASHNODE_DEF_TASKMGMT_TMO:
+ case ISCSI_FLASHNODE_IPADDR:
+ case ISCSI_FLASHNODE_ALIAS:
+ case ISCSI_FLASHNODE_REDIRECT_IPADDR:
+ case ISCSI_FLASHNODE_MAX_SEGMENT_SIZE:
+ case ISCSI_FLASHNODE_LOCAL_PORT:
+ case ISCSI_FLASHNODE_IPV4_TOS:
+ case ISCSI_FLASHNODE_IPV6_TC:
+ case ISCSI_FLASHNODE_IPV6_FLOW_LABEL:
+ case ISCSI_FLASHNODE_NAME:
+ case ISCSI_FLASHNODE_TPGT:
+ case ISCSI_FLASHNODE_LINK_LOCAL_IPV6:
+ case ISCSI_FLASHNODE_DISCOVERY_PARENT_IDX:
+ case ISCSI_FLASHNODE_DISCOVERY_PARENT_TYPE:
+ case ISCSI_FLASHNODE_TCP_XMIT_WSF:
+ case ISCSI_FLASHNODE_TCP_RECV_WSF:
+ case ISCSI_FLASHNODE_CHAP_OUT_IDX:
+ case ISCSI_FLASHNODE_USERNAME:
+ case ISCSI_FLASHNODE_PASSWORD:
+ case ISCSI_FLASHNODE_STATSN:
+ case ISCSI_FLASHNODE_EXP_STATSN:
+ case ISCSI_FLASHNODE_IS_BOOT_TGT:
+ return S_IRUGO;
+ default:
+ return 0;
+ }
+ }
+
+ return 0;
+}
+
+/**
+ * qla4xxx_create chap_list - Create CHAP list from FLASH
+ * @ha: pointer to adapter structure
+ *
+ * Read flash and make a list of CHAP entries, during login when a CHAP entry
+ * is received, it will be checked in this list. If entry exist then the CHAP
+ * entry index is set in the DDB. If CHAP entry does not exist in this list
+ * then a new entry is added in FLASH in CHAP table and the index obtained is
+ * used in the DDB.
+ **/
+static void qla4xxx_create_chap_list(struct scsi_qla_host *ha)
+{
+ int rval = 0;
+ uint8_t *chap_flash_data = NULL;
+ uint32_t offset;
+ dma_addr_t chap_dma;
+ uint32_t chap_size = 0;
+
+ if (is_qla40XX(ha))
+ chap_size = MAX_CHAP_ENTRIES_40XX *
+ sizeof(struct ql4_chap_table);
+ else /* Single region contains CHAP info for both
+ * ports which is divided into half for each port.
+ */
+ chap_size = ha->hw.flt_chap_size / 2;
+
+ chap_flash_data = dma_alloc_coherent(&ha->pdev->dev, chap_size,
+ &chap_dma, GFP_KERNEL);
+ if (!chap_flash_data) {
+ ql4_printk(KERN_ERR, ha, "No memory for chap_flash_data\n");
+ return;
+ }
+
+ if (is_qla40XX(ha)) {
+ offset = FLASH_CHAP_OFFSET;
+ } else {
+ offset = FLASH_RAW_ACCESS_ADDR + (ha->hw.flt_region_chap << 2);
+ if (ha->port_num == 1)
+ offset += chap_size;
+ }
+
+ rval = qla4xxx_get_flash(ha, chap_dma, offset, chap_size);
+ if (rval != QLA_SUCCESS)
+ goto exit_chap_list;
+
+ if (ha->chap_list == NULL)
+ ha->chap_list = vmalloc(chap_size);
+ if (ha->chap_list == NULL) {
+ ql4_printk(KERN_ERR, ha, "No memory for ha->chap_list\n");
+ goto exit_chap_list;
+ }
+
+ memset(ha->chap_list, 0, chap_size);
+ memcpy(ha->chap_list, chap_flash_data, chap_size);
+
+exit_chap_list:
+ dma_free_coherent(&ha->pdev->dev, chap_size, chap_flash_data, chap_dma);
+}
+
+static int qla4xxx_get_chap_by_index(struct scsi_qla_host *ha,
+ int16_t chap_index,
+ struct ql4_chap_table **chap_entry)
+{
+ int rval = QLA_ERROR;
+ int max_chap_entries;
+
+ if (!ha->chap_list) {
+ ql4_printk(KERN_ERR, ha, "CHAP table cache is empty!\n");
+ rval = QLA_ERROR;
+ goto exit_get_chap;
+ }
+
+ if (is_qla80XX(ha))
+ max_chap_entries = (ha->hw.flt_chap_size / 2) /
+ sizeof(struct ql4_chap_table);
+ else
+ max_chap_entries = MAX_CHAP_ENTRIES_40XX;
+
+ if (chap_index > max_chap_entries) {
+ ql4_printk(KERN_ERR, ha, "Invalid Chap index\n");
+ rval = QLA_ERROR;
+ goto exit_get_chap;
+ }
+
+ *chap_entry = (struct ql4_chap_table *)ha->chap_list + chap_index;
+ if ((*chap_entry)->cookie !=
+ __constant_cpu_to_le16(CHAP_VALID_COOKIE)) {
+ rval = QLA_ERROR;
+ *chap_entry = NULL;
+ } else {
+ rval = QLA_SUCCESS;
+ }
+
+exit_get_chap:
+ return rval;
+}
+
+/**
+ * qla4xxx_find_free_chap_index - Find the first free chap index
+ * @ha: pointer to adapter structure
+ * @chap_index: CHAP index to be returned
+ *
+ * Find the first free chap index available in the chap table
+ *
+ * Note: Caller should acquire the chap lock before getting here.
+ **/
+static int qla4xxx_find_free_chap_index(struct scsi_qla_host *ha,
+ uint16_t *chap_index)
+{
+ int i, rval;
+ int free_index = -1;
+ int max_chap_entries = 0;
+ struct ql4_chap_table *chap_table;
+
+ if (is_qla80XX(ha))
+ max_chap_entries = (ha->hw.flt_chap_size / 2) /
+ sizeof(struct ql4_chap_table);
+ else
+ max_chap_entries = MAX_CHAP_ENTRIES_40XX;
+
+ if (!ha->chap_list) {
+ ql4_printk(KERN_ERR, ha, "CHAP table cache is empty!\n");
+ rval = QLA_ERROR;
+ goto exit_find_chap;
+ }
+
+ for (i = 0; i < max_chap_entries; i++) {
+ chap_table = (struct ql4_chap_table *)ha->chap_list + i;
+
+ if ((chap_table->cookie !=
+ __constant_cpu_to_le16(CHAP_VALID_COOKIE)) &&
+ (i > MAX_RESRV_CHAP_IDX)) {
+ free_index = i;
+ break;
+ }
+ }
+
+ if (free_index != -1) {
+ *chap_index = free_index;
+ rval = QLA_SUCCESS;
+ } else {
+ rval = QLA_ERROR;
+ }
+
+exit_find_chap:
+ return rval;
+}
+
+static int qla4xxx_get_chap_list(struct Scsi_Host *shost, uint16_t chap_tbl_idx,
+ uint32_t *num_entries, char *buf)
+{
+ struct scsi_qla_host *ha = to_qla_host(shost);
+ struct ql4_chap_table *chap_table;
+ struct iscsi_chap_rec *chap_rec;
+ int max_chap_entries = 0;
+ int valid_chap_entries = 0;
+ int ret = 0, i;
+
+ if (is_qla80XX(ha))
+ max_chap_entries = (ha->hw.flt_chap_size / 2) /
+ sizeof(struct ql4_chap_table);
+ else
+ max_chap_entries = MAX_CHAP_ENTRIES_40XX;
+
+ ql4_printk(KERN_INFO, ha, "%s: num_entries = %d, CHAP idx = %d\n",
+ __func__, *num_entries, chap_tbl_idx);
+
+ if (!buf) {
+ ret = -ENOMEM;
+ goto exit_get_chap_list;
+ }
+
+ qla4xxx_create_chap_list(ha);
+
+ chap_rec = (struct iscsi_chap_rec *) buf;
+ mutex_lock(&ha->chap_sem);
+ for (i = chap_tbl_idx; i < max_chap_entries; i++) {
+ chap_table = (struct ql4_chap_table *)ha->chap_list + i;
+ if (chap_table->cookie !=
+ __constant_cpu_to_le16(CHAP_VALID_COOKIE))
+ continue;
+
+ chap_rec->chap_tbl_idx = i;
+ strlcpy(chap_rec->username, chap_table->name,
+ ISCSI_CHAP_AUTH_NAME_MAX_LEN);
+ strlcpy(chap_rec->password, chap_table->secret,
+ QL4_CHAP_MAX_SECRET_LEN);
+ chap_rec->password_length = chap_table->secret_len;
+
+ if (chap_table->flags & BIT_7) /* local */
+ chap_rec->chap_type = CHAP_TYPE_OUT;
+
+ if (chap_table->flags & BIT_6) /* peer */
+ chap_rec->chap_type = CHAP_TYPE_IN;
+
+ chap_rec++;
+
+ valid_chap_entries++;
+ if (valid_chap_entries == *num_entries)
+ break;
+ else
+ continue;
+ }
+ mutex_unlock(&ha->chap_sem);
+
+exit_get_chap_list:
+ ql4_printk(KERN_INFO, ha, "%s: Valid CHAP Entries = %d\n",
+ __func__, valid_chap_entries);
+ *num_entries = valid_chap_entries;
+ return ret;
+}
+
+static int __qla4xxx_is_chap_active(struct device *dev, void *data)
+{
+ int ret = 0;
+ uint16_t *chap_tbl_idx = (uint16_t *) data;
+ struct iscsi_cls_session *cls_session;
+ struct iscsi_session *sess;
+ struct ddb_entry *ddb_entry;
+
+ if (!iscsi_is_session_dev(dev))
+ goto exit_is_chap_active;
+
+ cls_session = iscsi_dev_to_session(dev);
+ sess = cls_session->dd_data;
+ ddb_entry = sess->dd_data;
+
+ if (iscsi_session_chkready(cls_session))
+ goto exit_is_chap_active;
+
+ if (ddb_entry->chap_tbl_idx == *chap_tbl_idx)
+ ret = 1;
+
+exit_is_chap_active:
+ return ret;
+}
+
+static int qla4xxx_is_chap_active(struct Scsi_Host *shost,
+ uint16_t chap_tbl_idx)
+{
+ int ret = 0;
+
+ ret = device_for_each_child(&shost->shost_gendev, &chap_tbl_idx,
+ __qla4xxx_is_chap_active);
+
+ return ret;
+}
+
+static int qla4xxx_delete_chap(struct Scsi_Host *shost, uint16_t chap_tbl_idx)
+{
+ struct scsi_qla_host *ha = to_qla_host(shost);
+ struct ql4_chap_table *chap_table;
+ dma_addr_t chap_dma;
+ int max_chap_entries = 0;
+ uint32_t offset = 0;
+ uint32_t chap_size;
+ int ret = 0;
+
+ chap_table = dma_pool_alloc(ha->chap_dma_pool, GFP_KERNEL, &chap_dma);
+ if (chap_table == NULL)
+ return -ENOMEM;
+
+ memset(chap_table, 0, sizeof(struct ql4_chap_table));
+
+ if (is_qla80XX(ha))
+ max_chap_entries = (ha->hw.flt_chap_size / 2) /
+ sizeof(struct ql4_chap_table);
+ else
+ max_chap_entries = MAX_CHAP_ENTRIES_40XX;
+
+ if (chap_tbl_idx > max_chap_entries) {
+ ret = -EINVAL;
+ goto exit_delete_chap;
+ }
+
+ /* Check if chap index is in use.
+ * If chap is in use don't delet chap entry */
+ ret = qla4xxx_is_chap_active(shost, chap_tbl_idx);
+ if (ret) {
+ ql4_printk(KERN_INFO, ha, "CHAP entry %d is in use, cannot "
+ "delete from flash\n", chap_tbl_idx);
+ ret = -EBUSY;
+ goto exit_delete_chap;
+ }
+
+ chap_size = sizeof(struct ql4_chap_table);
+ if (is_qla40XX(ha))
+ offset = FLASH_CHAP_OFFSET | (chap_tbl_idx * chap_size);
+ else {
+ offset = FLASH_RAW_ACCESS_ADDR + (ha->hw.flt_region_chap << 2);
+ /* flt_chap_size is CHAP table size for both ports
+ * so divide it by 2 to calculate the offset for second port
+ */
+ if (ha->port_num == 1)
+ offset += (ha->hw.flt_chap_size / 2);
+ offset += (chap_tbl_idx * chap_size);
+ }
+
+ ret = qla4xxx_get_flash(ha, chap_dma, offset, chap_size);
+ if (ret != QLA_SUCCESS) {
+ ret = -EINVAL;
+ goto exit_delete_chap;
+ }
+
+ DEBUG2(ql4_printk(KERN_INFO, ha, "Chap Cookie: x%x\n",
+ __le16_to_cpu(chap_table->cookie)));
+
+ if (__le16_to_cpu(chap_table->cookie) != CHAP_VALID_COOKIE) {
+ ql4_printk(KERN_ERR, ha, "No valid chap entry found\n");
+ goto exit_delete_chap;
+ }
+
+ chap_table->cookie = __constant_cpu_to_le16(0xFFFF);
+
+ offset = FLASH_CHAP_OFFSET |
+ (chap_tbl_idx * sizeof(struct ql4_chap_table));
+ ret = qla4xxx_set_flash(ha, chap_dma, offset, chap_size,
+ FLASH_OPT_RMW_COMMIT);
+ if (ret == QLA_SUCCESS && ha->chap_list) {
+ mutex_lock(&ha->chap_sem);
+ /* Update ha chap_list cache */
+ memcpy((struct ql4_chap_table *)ha->chap_list + chap_tbl_idx,
+ chap_table, sizeof(struct ql4_chap_table));
+ mutex_unlock(&ha->chap_sem);
+ }
+ if (ret != QLA_SUCCESS)
+ ret = -EINVAL;
+
+exit_delete_chap:
+ dma_pool_free(ha->chap_dma_pool, chap_table, chap_dma);
+ return ret;
+}
+
+/**
+ * qla4xxx_set_chap_entry - Make chap entry with given information
+ * @shost: pointer to host
+ * @data: chap info - credentials, index and type to make chap entry
+ * @len: length of data
+ *
+ * Add or update chap entry with the given information
+ **/
+static int qla4xxx_set_chap_entry(struct Scsi_Host *shost, void *data, int len)
+{
+ struct scsi_qla_host *ha = to_qla_host(shost);
+ struct iscsi_chap_rec chap_rec;
+ struct ql4_chap_table *chap_entry = NULL;
+ struct iscsi_param_info *param_info;
+ struct nlattr *attr;
+ int max_chap_entries = 0;
+ int type;
+ int rem = len;
+ int rc = 0;
+ int size;
+
+ memset(&chap_rec, 0, sizeof(chap_rec));
+
+ nla_for_each_attr(attr, data, len, rem) {
+ param_info = nla_data(attr);
+
+ switch (param_info->param) {
+ case ISCSI_CHAP_PARAM_INDEX:
+ chap_rec.chap_tbl_idx = *(uint16_t *)param_info->value;
+ break;
+ case ISCSI_CHAP_PARAM_CHAP_TYPE:
+ chap_rec.chap_type = param_info->value[0];
+ break;
+ case ISCSI_CHAP_PARAM_USERNAME:
+ size = min_t(size_t, sizeof(chap_rec.username),
+ param_info->len);
+ memcpy(chap_rec.username, param_info->value, size);
+ break;
+ case ISCSI_CHAP_PARAM_PASSWORD:
+ size = min_t(size_t, sizeof(chap_rec.password),
+ param_info->len);
+ memcpy(chap_rec.password, param_info->value, size);
+ break;
+ case ISCSI_CHAP_PARAM_PASSWORD_LEN:
+ chap_rec.password_length = param_info->value[0];
+ break;
+ default:
+ ql4_printk(KERN_ERR, ha,
+ "%s: No such sysfs attribute\n", __func__);
+ rc = -ENOSYS;
+ goto exit_set_chap;
+ };
+ }
+
+ if (chap_rec.chap_type == CHAP_TYPE_IN)
+ type = BIDI_CHAP;
+ else
+ type = LOCAL_CHAP;
+
+ if (is_qla80XX(ha))
+ max_chap_entries = (ha->hw.flt_chap_size / 2) /
+ sizeof(struct ql4_chap_table);
+ else
+ max_chap_entries = MAX_CHAP_ENTRIES_40XX;
+
+ mutex_lock(&ha->chap_sem);
+ if (chap_rec.chap_tbl_idx < max_chap_entries) {
+ rc = qla4xxx_get_chap_by_index(ha, chap_rec.chap_tbl_idx,
+ &chap_entry);
+ if (!rc) {
+ if (!(type == qla4xxx_get_chap_type(chap_entry))) {
+ ql4_printk(KERN_INFO, ha,
+ "Type mismatch for CHAP entry %d\n",
+ chap_rec.chap_tbl_idx);
+ rc = -EINVAL;
+ goto exit_unlock_chap;
+ }
+
+ /* If chap index is in use then don't modify it */
+ rc = qla4xxx_is_chap_active(shost,
+ chap_rec.chap_tbl_idx);
+ if (rc) {
+ ql4_printk(KERN_INFO, ha,
+ "CHAP entry %d is in use\n",
+ chap_rec.chap_tbl_idx);
+ rc = -EBUSY;
+ goto exit_unlock_chap;
+ }
+ }
+ } else {
+ rc = qla4xxx_find_free_chap_index(ha, &chap_rec.chap_tbl_idx);
+ if (rc) {
+ ql4_printk(KERN_INFO, ha, "CHAP entry not available\n");
+ rc = -EBUSY;
+ goto exit_unlock_chap;
+ }
+ }
+
+ rc = qla4xxx_set_chap(ha, chap_rec.username, chap_rec.password,
+ chap_rec.chap_tbl_idx, type);
+
+exit_unlock_chap:
+ mutex_unlock(&ha->chap_sem);
+
+exit_set_chap:
+ return rc;
+}
+
+
+static int qla4xxx_get_host_stats(struct Scsi_Host *shost, char *buf, int len)
+{
+ struct scsi_qla_host *ha = to_qla_host(shost);
+ struct iscsi_offload_host_stats *host_stats = NULL;
+ int host_stats_size;
+ int ret = 0;
+ int ddb_idx = 0;
+ struct ql_iscsi_stats *ql_iscsi_stats = NULL;
+ int stats_size;
+ dma_addr_t iscsi_stats_dma;
+
+ DEBUG2(ql4_printk(KERN_INFO, ha, "Func: %s\n", __func__));
+
+ host_stats_size = sizeof(struct iscsi_offload_host_stats);
+
+ if (host_stats_size != len) {
+ ql4_printk(KERN_INFO, ha, "%s: host_stats size mismatch expected = %d, is = %d\n",
+ __func__, len, host_stats_size);
+ ret = -EINVAL;
+ goto exit_host_stats;
+ }
+ host_stats = (struct iscsi_offload_host_stats *)buf;
+
+ if (!buf) {
+ ret = -ENOMEM;
+ goto exit_host_stats;
+ }
+
+ stats_size = PAGE_ALIGN(sizeof(struct ql_iscsi_stats));
+
+ ql_iscsi_stats = dma_alloc_coherent(&ha->pdev->dev, stats_size,
+ &iscsi_stats_dma, GFP_KERNEL);
+ if (!ql_iscsi_stats) {
+ ql4_printk(KERN_ERR, ha,
+ "Unable to allocate memory for iscsi stats\n");
+ ret = -ENOMEM;
+ goto exit_host_stats;
+ }
+
+ ret = qla4xxx_get_mgmt_data(ha, ddb_idx, stats_size,
+ iscsi_stats_dma);
+ if (ret != QLA_SUCCESS) {
+ ql4_printk(KERN_ERR, ha,
+ "Unable to retrieve iscsi stats\n");
+ ret = -EIO;
+ goto exit_host_stats;
+ }
+ host_stats->mactx_frames = le64_to_cpu(ql_iscsi_stats->mac_tx_frames);
+ host_stats->mactx_bytes = le64_to_cpu(ql_iscsi_stats->mac_tx_bytes);
+ host_stats->mactx_multicast_frames =
+ le64_to_cpu(ql_iscsi_stats->mac_tx_multicast_frames);
+ host_stats->mactx_broadcast_frames =
+ le64_to_cpu(ql_iscsi_stats->mac_tx_broadcast_frames);
+ host_stats->mactx_pause_frames =
+ le64_to_cpu(ql_iscsi_stats->mac_tx_pause_frames);
+ host_stats->mactx_control_frames =
+ le64_to_cpu(ql_iscsi_stats->mac_tx_control_frames);
+ host_stats->mactx_deferral =
+ le64_to_cpu(ql_iscsi_stats->mac_tx_deferral);
+ host_stats->mactx_excess_deferral =
+ le64_to_cpu(ql_iscsi_stats->mac_tx_excess_deferral);
+ host_stats->mactx_late_collision =
+ le64_to_cpu(ql_iscsi_stats->mac_tx_late_collision);
+ host_stats->mactx_abort = le64_to_cpu(ql_iscsi_stats->mac_tx_abort);
+ host_stats->mactx_single_collision =
+ le64_to_cpu(ql_iscsi_stats->mac_tx_single_collision);
+ host_stats->mactx_multiple_collision =
+ le64_to_cpu(ql_iscsi_stats->mac_tx_multiple_collision);
+ host_stats->mactx_collision =
+ le64_to_cpu(ql_iscsi_stats->mac_tx_collision);
+ host_stats->mactx_frames_dropped =
+ le64_to_cpu(ql_iscsi_stats->mac_tx_frames_dropped);
+ host_stats->mactx_jumbo_frames =
+ le64_to_cpu(ql_iscsi_stats->mac_tx_jumbo_frames);
+ host_stats->macrx_frames = le64_to_cpu(ql_iscsi_stats->mac_rx_frames);
+ host_stats->macrx_bytes = le64_to_cpu(ql_iscsi_stats->mac_rx_bytes);
+ host_stats->macrx_unknown_control_frames =
+ le64_to_cpu(ql_iscsi_stats->mac_rx_unknown_control_frames);
+ host_stats->macrx_pause_frames =
+ le64_to_cpu(ql_iscsi_stats->mac_rx_pause_frames);
+ host_stats->macrx_control_frames =
+ le64_to_cpu(ql_iscsi_stats->mac_rx_control_frames);
+ host_stats->macrx_dribble =
+ le64_to_cpu(ql_iscsi_stats->mac_rx_dribble);
+ host_stats->macrx_frame_length_error =
+ le64_to_cpu(ql_iscsi_stats->mac_rx_frame_length_error);
+ host_stats->macrx_jabber = le64_to_cpu(ql_iscsi_stats->mac_rx_jabber);
+ host_stats->macrx_carrier_sense_error =
+ le64_to_cpu(ql_iscsi_stats->mac_rx_carrier_sense_error);
+ host_stats->macrx_frame_discarded =
+ le64_to_cpu(ql_iscsi_stats->mac_rx_frame_discarded);
+ host_stats->macrx_frames_dropped =
+ le64_to_cpu(ql_iscsi_stats->mac_rx_frames_dropped);
+ host_stats->mac_crc_error = le64_to_cpu(ql_iscsi_stats->mac_crc_error);
+ host_stats->mac_encoding_error =
+ le64_to_cpu(ql_iscsi_stats->mac_encoding_error);
+ host_stats->macrx_length_error_large =
+ le64_to_cpu(ql_iscsi_stats->mac_rx_length_error_large);
+ host_stats->macrx_length_error_small =
+ le64_to_cpu(ql_iscsi_stats->mac_rx_length_error_small);
+ host_stats->macrx_multicast_frames =
+ le64_to_cpu(ql_iscsi_stats->mac_rx_multicast_frames);
+ host_stats->macrx_broadcast_frames =
+ le64_to_cpu(ql_iscsi_stats->mac_rx_broadcast_frames);
+ host_stats->iptx_packets = le64_to_cpu(ql_iscsi_stats->ip_tx_packets);
+ host_stats->iptx_bytes = le64_to_cpu(ql_iscsi_stats->ip_tx_bytes);
+ host_stats->iptx_fragments =
+ le64_to_cpu(ql_iscsi_stats->ip_tx_fragments);
+ host_stats->iprx_packets = le64_to_cpu(ql_iscsi_stats->ip_rx_packets);
+ host_stats->iprx_bytes = le64_to_cpu(ql_iscsi_stats->ip_rx_bytes);
+ host_stats->iprx_fragments =
+ le64_to_cpu(ql_iscsi_stats->ip_rx_fragments);
+ host_stats->ip_datagram_reassembly =
+ le64_to_cpu(ql_iscsi_stats->ip_datagram_reassembly);
+ host_stats->ip_invalid_address_error =
+ le64_to_cpu(ql_iscsi_stats->ip_invalid_address_error);
+ host_stats->ip_error_packets =
+ le64_to_cpu(ql_iscsi_stats->ip_error_packets);
+ host_stats->ip_fragrx_overlap =
+ le64_to_cpu(ql_iscsi_stats->ip_fragrx_overlap);
+ host_stats->ip_fragrx_outoforder =
+ le64_to_cpu(ql_iscsi_stats->ip_fragrx_outoforder);
+ host_stats->ip_datagram_reassembly_timeout =
+ le64_to_cpu(ql_iscsi_stats->ip_datagram_reassembly_timeout);
+ host_stats->ipv6tx_packets =
+ le64_to_cpu(ql_iscsi_stats->ipv6_tx_packets);
+ host_stats->ipv6tx_bytes = le64_to_cpu(ql_iscsi_stats->ipv6_tx_bytes);
+ host_stats->ipv6tx_fragments =
+ le64_to_cpu(ql_iscsi_stats->ipv6_tx_fragments);
+ host_stats->ipv6rx_packets =
+ le64_to_cpu(ql_iscsi_stats->ipv6_rx_packets);
+ host_stats->ipv6rx_bytes = le64_to_cpu(ql_iscsi_stats->ipv6_rx_bytes);
+ host_stats->ipv6rx_fragments =
+ le64_to_cpu(ql_iscsi_stats->ipv6_rx_fragments);
+ host_stats->ipv6_datagram_reassembly =
+ le64_to_cpu(ql_iscsi_stats->ipv6_datagram_reassembly);
+ host_stats->ipv6_invalid_address_error =
+ le64_to_cpu(ql_iscsi_stats->ipv6_invalid_address_error);
+ host_stats->ipv6_error_packets =
+ le64_to_cpu(ql_iscsi_stats->ipv6_error_packets);
+ host_stats->ipv6_fragrx_overlap =
+ le64_to_cpu(ql_iscsi_stats->ipv6_fragrx_overlap);
+ host_stats->ipv6_fragrx_outoforder =
+ le64_to_cpu(ql_iscsi_stats->ipv6_fragrx_outoforder);
+ host_stats->ipv6_datagram_reassembly_timeout =
+ le64_to_cpu(ql_iscsi_stats->ipv6_datagram_reassembly_timeout);
+ host_stats->tcptx_segments =
+ le64_to_cpu(ql_iscsi_stats->tcp_tx_segments);
+ host_stats->tcptx_bytes = le64_to_cpu(ql_iscsi_stats->tcp_tx_bytes);
+ host_stats->tcprx_segments =
+ le64_to_cpu(ql_iscsi_stats->tcp_rx_segments);
+ host_stats->tcprx_byte = le64_to_cpu(ql_iscsi_stats->tcp_rx_byte);
+ host_stats->tcp_duplicate_ack_retx =
+ le64_to_cpu(ql_iscsi_stats->tcp_duplicate_ack_retx);
+ host_stats->tcp_retx_timer_expired =
+ le64_to_cpu(ql_iscsi_stats->tcp_retx_timer_expired);
+ host_stats->tcprx_duplicate_ack =
+ le64_to_cpu(ql_iscsi_stats->tcp_rx_duplicate_ack);
+ host_stats->tcprx_pure_ackr =
+ le64_to_cpu(ql_iscsi_stats->tcp_rx_pure_ackr);
+ host_stats->tcptx_delayed_ack =
+ le64_to_cpu(ql_iscsi_stats->tcp_tx_delayed_ack);
+ host_stats->tcptx_pure_ack =
+ le64_to_cpu(ql_iscsi_stats->tcp_tx_pure_ack);
+ host_stats->tcprx_segment_error =
+ le64_to_cpu(ql_iscsi_stats->tcp_rx_segment_error);
+ host_stats->tcprx_segment_outoforder =
+ le64_to_cpu(ql_iscsi_stats->tcp_rx_segment_outoforder);
+ host_stats->tcprx_window_probe =
+ le64_to_cpu(ql_iscsi_stats->tcp_rx_window_probe);
+ host_stats->tcprx_window_update =
+ le64_to_cpu(ql_iscsi_stats->tcp_rx_window_update);
+ host_stats->tcptx_window_probe_persist =
+ le64_to_cpu(ql_iscsi_stats->tcp_tx_window_probe_persist);
+ host_stats->ecc_error_correction =
+ le64_to_cpu(ql_iscsi_stats->ecc_error_correction);
+ host_stats->iscsi_pdu_tx = le64_to_cpu(ql_iscsi_stats->iscsi_pdu_tx);
+ host_stats->iscsi_data_bytes_tx =
+ le64_to_cpu(ql_iscsi_stats->iscsi_data_bytes_tx);
+ host_stats->iscsi_pdu_rx = le64_to_cpu(ql_iscsi_stats->iscsi_pdu_rx);
+ host_stats->iscsi_data_bytes_rx =
+ le64_to_cpu(ql_iscsi_stats->iscsi_data_bytes_rx);
+ host_stats->iscsi_io_completed =
+ le64_to_cpu(ql_iscsi_stats->iscsi_io_completed);
+ host_stats->iscsi_unexpected_io_rx =
+ le64_to_cpu(ql_iscsi_stats->iscsi_unexpected_io_rx);
+ host_stats->iscsi_format_error =
+ le64_to_cpu(ql_iscsi_stats->iscsi_format_error);
+ host_stats->iscsi_hdr_digest_error =
+ le64_to_cpu(ql_iscsi_stats->iscsi_hdr_digest_error);
+ host_stats->iscsi_data_digest_error =
+ le64_to_cpu(ql_iscsi_stats->iscsi_data_digest_error);
+ host_stats->iscsi_sequence_error =
+ le64_to_cpu(ql_iscsi_stats->iscsi_sequence_error);
+exit_host_stats:
+ if (ql_iscsi_stats)
+ dma_free_coherent(&ha->pdev->dev, host_stats_size,
+ ql_iscsi_stats, iscsi_stats_dma);
+
+ ql4_printk(KERN_INFO, ha, "%s: Get host stats done\n",
+ __func__);
+ return ret;
+}
+
+static int qla4xxx_get_iface_param(struct iscsi_iface *iface,
+ enum iscsi_param_type param_type,
+ int param, char *buf)
+{
+ struct Scsi_Host *shost = iscsi_iface_to_shost(iface);
+ struct scsi_qla_host *ha = to_qla_host(shost);
+ int ival;
+ char *pval = NULL;
+ int len = -ENOSYS;
+
+ if (param_type == ISCSI_NET_PARAM) {
+ switch (param) {
+ case ISCSI_NET_PARAM_IPV4_ADDR:
+ len = sprintf(buf, "%pI4\n", &ha->ip_config.ip_address);
+ break;
+ case ISCSI_NET_PARAM_IPV4_SUBNET:
+ len = sprintf(buf, "%pI4\n",
+ &ha->ip_config.subnet_mask);
+ break;
+ case ISCSI_NET_PARAM_IPV4_GW:
+ len = sprintf(buf, "%pI4\n", &ha->ip_config.gateway);
+ break;
+ case ISCSI_NET_PARAM_IFACE_ENABLE:
+ if (iface->iface_type == ISCSI_IFACE_TYPE_IPV4) {
+ OP_STATE(ha->ip_config.ipv4_options,
+ IPOPT_IPV4_PROTOCOL_ENABLE, pval);
+ } else {
+ OP_STATE(ha->ip_config.ipv6_options,
+ IPV6_OPT_IPV6_PROTOCOL_ENABLE, pval);
+ }
+
+ len = sprintf(buf, "%s\n", pval);
+ break;
+ case ISCSI_NET_PARAM_IPV4_BOOTPROTO:
+ len = sprintf(buf, "%s\n",
+ (ha->ip_config.tcp_options &
+ TCPOPT_DHCP_ENABLE) ?
+ "dhcp" : "static");
+ break;
+ case ISCSI_NET_PARAM_IPV6_ADDR:
+ if (iface->iface_num == 0)
+ len = sprintf(buf, "%pI6\n",
+ &ha->ip_config.ipv6_addr0);
+ if (iface->iface_num == 1)
+ len = sprintf(buf, "%pI6\n",
+ &ha->ip_config.ipv6_addr1);
+ break;
+ case ISCSI_NET_PARAM_IPV6_LINKLOCAL:
+ len = sprintf(buf, "%pI6\n",
+ &ha->ip_config.ipv6_link_local_addr);
+ break;
+ case ISCSI_NET_PARAM_IPV6_ROUTER:
+ len = sprintf(buf, "%pI6\n",
+ &ha->ip_config.ipv6_default_router_addr);
+ break;
+ case ISCSI_NET_PARAM_IPV6_ADDR_AUTOCFG:
+ pval = (ha->ip_config.ipv6_addl_options &
+ IPV6_ADDOPT_NEIGHBOR_DISCOVERY_ADDR_ENABLE) ?
+ "nd" : "static";
+
+ len = sprintf(buf, "%s\n", pval);
+ break;
+ case ISCSI_NET_PARAM_IPV6_LINKLOCAL_AUTOCFG:
+ pval = (ha->ip_config.ipv6_addl_options &
+ IPV6_ADDOPT_AUTOCONFIG_LINK_LOCAL_ADDR) ?
+ "auto" : "static";
+
+ len = sprintf(buf, "%s\n", pval);
+ break;
+ case ISCSI_NET_PARAM_VLAN_ID:
+ if (iface->iface_type == ISCSI_IFACE_TYPE_IPV4)
+ ival = ha->ip_config.ipv4_vlan_tag &
+ ISCSI_MAX_VLAN_ID;
+ else
+ ival = ha->ip_config.ipv6_vlan_tag &
+ ISCSI_MAX_VLAN_ID;
+
+ len = sprintf(buf, "%d\n", ival);
+ break;
+ case ISCSI_NET_PARAM_VLAN_PRIORITY:
+ if (iface->iface_type == ISCSI_IFACE_TYPE_IPV4)
+ ival = (ha->ip_config.ipv4_vlan_tag >> 13) &
+ ISCSI_MAX_VLAN_PRIORITY;
+ else
+ ival = (ha->ip_config.ipv6_vlan_tag >> 13) &
+ ISCSI_MAX_VLAN_PRIORITY;
+
+ len = sprintf(buf, "%d\n", ival);
+ break;
+ case ISCSI_NET_PARAM_VLAN_ENABLED:
+ if (iface->iface_type == ISCSI_IFACE_TYPE_IPV4) {
+ OP_STATE(ha->ip_config.ipv4_options,
+ IPOPT_VLAN_TAGGING_ENABLE, pval);
+ } else {
+ OP_STATE(ha->ip_config.ipv6_options,
+ IPV6_OPT_VLAN_TAGGING_ENABLE, pval);
+ }
+ len = sprintf(buf, "%s\n", pval);
+ break;
+ case ISCSI_NET_PARAM_MTU:
+ len = sprintf(buf, "%d\n", ha->ip_config.eth_mtu_size);
+ break;
+ case ISCSI_NET_PARAM_PORT:
+ if (iface->iface_type == ISCSI_IFACE_TYPE_IPV4)
+ len = sprintf(buf, "%d\n",
+ ha->ip_config.ipv4_port);
+ else
+ len = sprintf(buf, "%d\n",
+ ha->ip_config.ipv6_port);
+ break;
+ case ISCSI_NET_PARAM_IPADDR_STATE:
+ if (iface->iface_type == ISCSI_IFACE_TYPE_IPV4) {
+ pval = iscsi_get_ipaddress_state_name(
+ ha->ip_config.ipv4_addr_state);
+ } else {
+ if (iface->iface_num == 0)
+ pval = iscsi_get_ipaddress_state_name(
+ ha->ip_config.ipv6_addr0_state);
+ else if (iface->iface_num == 1)
+ pval = iscsi_get_ipaddress_state_name(
+ ha->ip_config.ipv6_addr1_state);
+ }
+
+ len = sprintf(buf, "%s\n", pval);
+ break;
+ case ISCSI_NET_PARAM_IPV6_LINKLOCAL_STATE:
+ pval = iscsi_get_ipaddress_state_name(
+ ha->ip_config.ipv6_link_local_state);
+ len = sprintf(buf, "%s\n", pval);
+ break;
+ case ISCSI_NET_PARAM_IPV6_ROUTER_STATE:
+ pval = iscsi_get_router_state_name(
+ ha->ip_config.ipv6_default_router_state);
+ len = sprintf(buf, "%s\n", pval);
+ break;
+ case ISCSI_NET_PARAM_DELAYED_ACK_EN:
+ if (iface->iface_type == ISCSI_IFACE_TYPE_IPV4) {
+ OP_STATE(~ha->ip_config.tcp_options,
+ TCPOPT_DELAYED_ACK_DISABLE, pval);
+ } else {
+ OP_STATE(~ha->ip_config.ipv6_tcp_options,
+ IPV6_TCPOPT_DELAYED_ACK_DISABLE, pval);
+ }
+ len = sprintf(buf, "%s\n", pval);
+ break;
+ case ISCSI_NET_PARAM_TCP_NAGLE_DISABLE:
+ if (iface->iface_type == ISCSI_IFACE_TYPE_IPV4) {
+ OP_STATE(~ha->ip_config.tcp_options,
+ TCPOPT_NAGLE_ALGO_DISABLE, pval);
+ } else {
+ OP_STATE(~ha->ip_config.ipv6_tcp_options,
+ IPV6_TCPOPT_NAGLE_ALGO_DISABLE, pval);
+ }
+ len = sprintf(buf, "%s\n", pval);
+ break;
+ case ISCSI_NET_PARAM_TCP_WSF_DISABLE:
+ if (iface->iface_type == ISCSI_IFACE_TYPE_IPV4) {
+ OP_STATE(~ha->ip_config.tcp_options,
+ TCPOPT_WINDOW_SCALE_DISABLE, pval);
+ } else {
+ OP_STATE(~ha->ip_config.ipv6_tcp_options,
+ IPV6_TCPOPT_WINDOW_SCALE_DISABLE,
+ pval);
+ }
+ len = sprintf(buf, "%s\n", pval);
+ break;
+ case ISCSI_NET_PARAM_TCP_WSF:
+ if (iface->iface_type == ISCSI_IFACE_TYPE_IPV4)
+ len = sprintf(buf, "%d\n",
+ ha->ip_config.tcp_wsf);
+ else
+ len = sprintf(buf, "%d\n",
+ ha->ip_config.ipv6_tcp_wsf);
+ break;
+ case ISCSI_NET_PARAM_TCP_TIMER_SCALE:
+ if (iface->iface_type == ISCSI_IFACE_TYPE_IPV4)
+ ival = (ha->ip_config.tcp_options &
+ TCPOPT_TIMER_SCALE) >> 1;
+ else
+ ival = (ha->ip_config.ipv6_tcp_options &
+ IPV6_TCPOPT_TIMER_SCALE) >> 1;
+
+ len = sprintf(buf, "%d\n", ival);
+ break;
+ case ISCSI_NET_PARAM_TCP_TIMESTAMP_EN:
+ if (iface->iface_type == ISCSI_IFACE_TYPE_IPV4) {
+ OP_STATE(ha->ip_config.tcp_options,
+ TCPOPT_TIMESTAMP_ENABLE, pval);
+ } else {
+ OP_STATE(ha->ip_config.ipv6_tcp_options,
+ IPV6_TCPOPT_TIMESTAMP_EN, pval);
+ }
+ len = sprintf(buf, "%s\n", pval);
+ break;
+ case ISCSI_NET_PARAM_CACHE_ID:
+ if (iface->iface_type == ISCSI_IFACE_TYPE_IPV4)
+ len = sprintf(buf, "%d\n",
+ ha->ip_config.ipv4_cache_id);
+ else
+ len = sprintf(buf, "%d\n",
+ ha->ip_config.ipv6_cache_id);
+ break;
+ case ISCSI_NET_PARAM_IPV4_DHCP_DNS_ADDR_EN:
+ OP_STATE(ha->ip_config.tcp_options,
+ TCPOPT_DNS_SERVER_IP_EN, pval);
+
+ len = sprintf(buf, "%s\n", pval);
+ break;
+ case ISCSI_NET_PARAM_IPV4_DHCP_SLP_DA_EN:
+ OP_STATE(ha->ip_config.tcp_options,
+ TCPOPT_SLP_DA_INFO_EN, pval);
+
+ len = sprintf(buf, "%s\n", pval);
+ break;
+ case ISCSI_NET_PARAM_IPV4_TOS_EN:
+ OP_STATE(ha->ip_config.ipv4_options,
+ IPOPT_IPV4_TOS_EN, pval);
+
+ len = sprintf(buf, "%s\n", pval);
+ break;
+ case ISCSI_NET_PARAM_IPV4_TOS:
+ len = sprintf(buf, "%d\n", ha->ip_config.ipv4_tos);
+ break;
+ case ISCSI_NET_PARAM_IPV4_GRAT_ARP_EN:
+ OP_STATE(ha->ip_config.ipv4_options,
+ IPOPT_GRAT_ARP_EN, pval);
+
+ len = sprintf(buf, "%s\n", pval);
+ break;
+ case ISCSI_NET_PARAM_IPV4_DHCP_ALT_CLIENT_ID_EN:
+ OP_STATE(ha->ip_config.ipv4_options, IPOPT_ALT_CID_EN,
+ pval);
+
+ len = sprintf(buf, "%s\n", pval);
+ break;
+ case ISCSI_NET_PARAM_IPV4_DHCP_ALT_CLIENT_ID:
+ pval = (ha->ip_config.ipv4_alt_cid_len) ?
+ (char *)ha->ip_config.ipv4_alt_cid : "";
+
+ len = sprintf(buf, "%s\n", pval);
+ break;
+ case ISCSI_NET_PARAM_IPV4_DHCP_REQ_VENDOR_ID_EN:
+ OP_STATE(ha->ip_config.ipv4_options,
+ IPOPT_REQ_VID_EN, pval);
+
+ len = sprintf(buf, "%s\n", pval);
+ break;
+ case ISCSI_NET_PARAM_IPV4_DHCP_USE_VENDOR_ID_EN:
+ OP_STATE(ha->ip_config.ipv4_options,
+ IPOPT_USE_VID_EN, pval);
+
+ len = sprintf(buf, "%s\n", pval);
+ break;
+ case ISCSI_NET_PARAM_IPV4_DHCP_VENDOR_ID:
+ pval = (ha->ip_config.ipv4_vid_len) ?
+ (char *)ha->ip_config.ipv4_vid : "";
+
+ len = sprintf(buf, "%s\n", pval);
+ break;
+ case ISCSI_NET_PARAM_IPV4_DHCP_LEARN_IQN_EN:
+ OP_STATE(ha->ip_config.ipv4_options,
+ IPOPT_LEARN_IQN_EN, pval);
+
+ len = sprintf(buf, "%s\n", pval);
+ break;
+ case ISCSI_NET_PARAM_IPV4_FRAGMENT_DISABLE:
+ OP_STATE(~ha->ip_config.ipv4_options,
+ IPOPT_FRAGMENTATION_DISABLE, pval);
+
+ len = sprintf(buf, "%s\n", pval);
+ break;
+ case ISCSI_NET_PARAM_IPV4_IN_FORWARD_EN:
+ OP_STATE(ha->ip_config.ipv4_options,
+ IPOPT_IN_FORWARD_EN, pval);
+
+ len = sprintf(buf, "%s\n", pval);
+ break;
+ case ISCSI_NET_PARAM_REDIRECT_EN:
+ if (iface->iface_type == ISCSI_IFACE_TYPE_IPV4) {
+ OP_STATE(ha->ip_config.ipv4_options,
+ IPOPT_ARP_REDIRECT_EN, pval);
+ } else {
+ OP_STATE(ha->ip_config.ipv6_options,
+ IPV6_OPT_REDIRECT_EN, pval);
+ }
+ len = sprintf(buf, "%s\n", pval);
+ break;
+ case ISCSI_NET_PARAM_IPV4_TTL:
+ len = sprintf(buf, "%d\n", ha->ip_config.ipv4_ttl);
+ break;
+ case ISCSI_NET_PARAM_IPV6_GRAT_NEIGHBOR_ADV_EN:
+ OP_STATE(ha->ip_config.ipv6_options,
+ IPV6_OPT_GRAT_NEIGHBOR_ADV_EN, pval);
+
+ len = sprintf(buf, "%s\n", pval);
+ break;
+ case ISCSI_NET_PARAM_IPV6_MLD_EN:
+ OP_STATE(ha->ip_config.ipv6_addl_options,
+ IPV6_ADDOPT_MLD_EN, pval);
+
+ len = sprintf(buf, "%s\n", pval);
+ break;
+ case ISCSI_NET_PARAM_IPV6_FLOW_LABEL:
+ len = sprintf(buf, "%u\n", ha->ip_config.ipv6_flow_lbl);
+ break;
+ case ISCSI_NET_PARAM_IPV6_TRAFFIC_CLASS:
+ len = sprintf(buf, "%d\n",
+ ha->ip_config.ipv6_traffic_class);
+ break;
+ case ISCSI_NET_PARAM_IPV6_HOP_LIMIT:
+ len = sprintf(buf, "%d\n",
+ ha->ip_config.ipv6_hop_limit);
+ break;
+ case ISCSI_NET_PARAM_IPV6_ND_REACHABLE_TMO:
+ len = sprintf(buf, "%d\n",
+ ha->ip_config.ipv6_nd_reach_time);
+ break;
+ case ISCSI_NET_PARAM_IPV6_ND_REXMIT_TIME:
+ len = sprintf(buf, "%d\n",
+ ha->ip_config.ipv6_nd_rexmit_timer);
+ break;
+ case ISCSI_NET_PARAM_IPV6_ND_STALE_TMO:
+ len = sprintf(buf, "%d\n",
+ ha->ip_config.ipv6_nd_stale_timeout);
+ break;
+ case ISCSI_NET_PARAM_IPV6_DUP_ADDR_DETECT_CNT:
+ len = sprintf(buf, "%d\n",
+ ha->ip_config.ipv6_dup_addr_detect_count);
+ break;
+ case ISCSI_NET_PARAM_IPV6_RTR_ADV_LINK_MTU:
+ len = sprintf(buf, "%d\n",
+ ha->ip_config.ipv6_gw_advrt_mtu);
+ break;
+ default:
+ len = -ENOSYS;
+ }
+ } else if (param_type == ISCSI_IFACE_PARAM) {
+ switch (param) {
+ case ISCSI_IFACE_PARAM_DEF_TASKMGMT_TMO:
+ len = sprintf(buf, "%d\n", ha->ip_config.def_timeout);
+ break;
+ case ISCSI_IFACE_PARAM_HDRDGST_EN:
+ OP_STATE(ha->ip_config.iscsi_options,
+ ISCSIOPTS_HEADER_DIGEST_EN, pval);
+
+ len = sprintf(buf, "%s\n", pval);
+ break;
+ case ISCSI_IFACE_PARAM_DATADGST_EN:
+ OP_STATE(ha->ip_config.iscsi_options,
+ ISCSIOPTS_DATA_DIGEST_EN, pval);
+
+ len = sprintf(buf, "%s\n", pval);
+ break;
+ case ISCSI_IFACE_PARAM_IMM_DATA_EN:
+ OP_STATE(ha->ip_config.iscsi_options,
+ ISCSIOPTS_IMMEDIATE_DATA_EN, pval);
+
+ len = sprintf(buf, "%s\n", pval);
+ break;
+ case ISCSI_IFACE_PARAM_INITIAL_R2T_EN:
+ OP_STATE(ha->ip_config.iscsi_options,
+ ISCSIOPTS_INITIAL_R2T_EN, pval);
+
+ len = sprintf(buf, "%s\n", pval);
+ break;
+ case ISCSI_IFACE_PARAM_DATASEQ_INORDER_EN:
+ OP_STATE(ha->ip_config.iscsi_options,
+ ISCSIOPTS_DATA_SEQ_INORDER_EN, pval);
+
+ len = sprintf(buf, "%s\n", pval);
+ break;
+ case ISCSI_IFACE_PARAM_PDU_INORDER_EN:
+ OP_STATE(ha->ip_config.iscsi_options,
+ ISCSIOPTS_DATA_PDU_INORDER_EN, pval);
+
+ len = sprintf(buf, "%s\n", pval);
+ break;
+ case ISCSI_IFACE_PARAM_ERL:
+ len = sprintf(buf, "%d\n",
+ (ha->ip_config.iscsi_options &
+ ISCSIOPTS_ERL));
+ break;
+ case ISCSI_IFACE_PARAM_MAX_RECV_DLENGTH:
+ len = sprintf(buf, "%u\n",
+ ha->ip_config.iscsi_max_pdu_size *
+ BYTE_UNITS);
+ break;
+ case ISCSI_IFACE_PARAM_FIRST_BURST:
+ len = sprintf(buf, "%u\n",
+ ha->ip_config.iscsi_first_burst_len *
+ BYTE_UNITS);
+ break;
+ case ISCSI_IFACE_PARAM_MAX_R2T:
+ len = sprintf(buf, "%d\n",
+ ha->ip_config.iscsi_max_outstnd_r2t);
+ break;
+ case ISCSI_IFACE_PARAM_MAX_BURST:
+ len = sprintf(buf, "%u\n",
+ ha->ip_config.iscsi_max_burst_len *
+ BYTE_UNITS);
+ break;
+ case ISCSI_IFACE_PARAM_CHAP_AUTH_EN:
+ OP_STATE(ha->ip_config.iscsi_options,
+ ISCSIOPTS_CHAP_AUTH_EN, pval);
+
+ len = sprintf(buf, "%s\n", pval);
+ break;
+ case ISCSI_IFACE_PARAM_BIDI_CHAP_EN:
+ OP_STATE(ha->ip_config.iscsi_options,
+ ISCSIOPTS_BIDI_CHAP_EN, pval);
+
+ len = sprintf(buf, "%s\n", pval);
+ break;
+ case ISCSI_IFACE_PARAM_DISCOVERY_AUTH_OPTIONAL:
+ OP_STATE(ha->ip_config.iscsi_options,
+ ISCSIOPTS_DISCOVERY_AUTH_EN, pval);
+
+ len = sprintf(buf, "%s\n", pval);
+ break;
+ case ISCSI_IFACE_PARAM_DISCOVERY_LOGOUT_EN:
+ OP_STATE(ha->ip_config.iscsi_options,
+ ISCSIOPTS_DISCOVERY_LOGOUT_EN, pval);
+
+ len = sprintf(buf, "%s\n", pval);
+ break;
+ case ISCSI_IFACE_PARAM_STRICT_LOGIN_COMP_EN:
+ OP_STATE(ha->ip_config.iscsi_options,
+ ISCSIOPTS_STRICT_LOGIN_COMP_EN, pval);
+
+ len = sprintf(buf, "%s\n", pval);
+ break;
+ case ISCSI_IFACE_PARAM_INITIATOR_NAME:
+ len = sprintf(buf, "%s\n", ha->ip_config.iscsi_name);
+ break;
+ default:
+ len = -ENOSYS;
+ }
+ }
+
+ return len;
+}
+
+static struct iscsi_endpoint *
+qla4xxx_ep_connect(struct Scsi_Host *shost, struct sockaddr *dst_addr,
+ int non_blocking)
+{
+ int ret;
+ struct iscsi_endpoint *ep;
+ struct qla_endpoint *qla_ep;
+ struct scsi_qla_host *ha;
+ struct sockaddr_in *addr;
+ struct sockaddr_in6 *addr6;
+
+ if (!shost) {
+ ret = -ENXIO;
+ pr_err("%s: shost is NULL\n", __func__);
+ return ERR_PTR(ret);
+ }
+
+ ha = iscsi_host_priv(shost);
+ ep = iscsi_create_endpoint(sizeof(struct qla_endpoint));
+ if (!ep) {
+ ret = -ENOMEM;
+ return ERR_PTR(ret);
+ }
+
+ qla_ep = ep->dd_data;
+ memset(qla_ep, 0, sizeof(struct qla_endpoint));
+ if (dst_addr->sa_family == AF_INET) {
+ memcpy(&qla_ep->dst_addr, dst_addr, sizeof(struct sockaddr_in));
+ addr = (struct sockaddr_in *)&qla_ep->dst_addr;
+ DEBUG2(ql4_printk(KERN_INFO, ha, "%s: %pI4\n", __func__,
+ (char *)&addr->sin_addr));
+ } else if (dst_addr->sa_family == AF_INET6) {
+ memcpy(&qla_ep->dst_addr, dst_addr,
+ sizeof(struct sockaddr_in6));
+ addr6 = (struct sockaddr_in6 *)&qla_ep->dst_addr;
+ DEBUG2(ql4_printk(KERN_INFO, ha, "%s: %pI6\n", __func__,
+ (char *)&addr6->sin6_addr));
+ } else {
+ ql4_printk(KERN_WARNING, ha, "%s: Invalid endpoint\n",
+ __func__);
+ }
+
+ qla_ep->host = shost;
+
+ return ep;
+}
+
+static int qla4xxx_ep_poll(struct iscsi_endpoint *ep, int timeout_ms)
+{
+ struct qla_endpoint *qla_ep;
+ struct scsi_qla_host *ha;
+ int ret = 0;
+
+ qla_ep = ep->dd_data;
+ ha = to_qla_host(qla_ep->host);
+ DEBUG2(pr_info_ratelimited("%s: host: %ld\n", __func__, ha->host_no));
+
+ if (adapter_up(ha) && !test_bit(AF_BUILD_DDB_LIST, &ha->flags))
+ ret = 1;
+
+ return ret;
+}
+
+static void qla4xxx_ep_disconnect(struct iscsi_endpoint *ep)
+{
+ struct qla_endpoint *qla_ep;
+ struct scsi_qla_host *ha;
+
+ qla_ep = ep->dd_data;
+ ha = to_qla_host(qla_ep->host);
+ DEBUG2(ql4_printk(KERN_INFO, ha, "%s: host: %ld\n", __func__,
+ ha->host_no));
+ iscsi_destroy_endpoint(ep);
+}
+
+static int qla4xxx_get_ep_param(struct iscsi_endpoint *ep,
+ enum iscsi_param param,
+ char *buf)
+{
+ struct qla_endpoint *qla_ep = ep->dd_data;
+ struct sockaddr *dst_addr;
+ struct scsi_qla_host *ha;
+
+ if (!qla_ep)
+ return -ENOTCONN;
+
+ ha = to_qla_host(qla_ep->host);
+ DEBUG2(ql4_printk(KERN_INFO, ha, "%s: host: %ld\n", __func__,
+ ha->host_no));
+
+ switch (param) {
+ case ISCSI_PARAM_CONN_PORT:
+ case ISCSI_PARAM_CONN_ADDRESS:
+ dst_addr = (struct sockaddr *)&qla_ep->dst_addr;
+ if (!dst_addr)
+ return -ENOTCONN;
+
+ return iscsi_conn_get_addr_param((struct sockaddr_storage *)
+ &qla_ep->dst_addr, param, buf);
+ default:
+ return -ENOSYS;
+ }
+}
+
+static void qla4xxx_conn_get_stats(struct iscsi_cls_conn *cls_conn,
+ struct iscsi_stats *stats)
+{
+ struct iscsi_session *sess;
+ struct iscsi_cls_session *cls_sess;
+ struct ddb_entry *ddb_entry;
+ struct scsi_qla_host *ha;
+ struct ql_iscsi_stats *ql_iscsi_stats;
+ int stats_size;
+ int ret;
+ dma_addr_t iscsi_stats_dma;
+
+ cls_sess = iscsi_conn_to_session(cls_conn);
+ sess = cls_sess->dd_data;
+ ddb_entry = sess->dd_data;
+ ha = ddb_entry->ha;
+
+ DEBUG2(ql4_printk(KERN_INFO, ha, "%s: host: %ld\n", __func__,
+ ha->host_no));
+ stats_size = PAGE_ALIGN(sizeof(struct ql_iscsi_stats));
+ /* Allocate memory */
+ ql_iscsi_stats = dma_alloc_coherent(&ha->pdev->dev, stats_size,
+ &iscsi_stats_dma, GFP_KERNEL);
+ if (!ql_iscsi_stats) {
+ ql4_printk(KERN_ERR, ha,
+ "Unable to allocate memory for iscsi stats\n");
+ goto exit_get_stats;
+ }
+
+ ret = qla4xxx_get_mgmt_data(ha, ddb_entry->fw_ddb_index, stats_size,
+ iscsi_stats_dma);
+ if (ret != QLA_SUCCESS) {
+ ql4_printk(KERN_ERR, ha,
+ "Unable to retrieve iscsi stats\n");
+ goto free_stats;
+ }
+
+ /* octets */
+ stats->txdata_octets = le64_to_cpu(ql_iscsi_stats->tx_data_octets);
+ stats->rxdata_octets = le64_to_cpu(ql_iscsi_stats->rx_data_octets);
+ /* xmit pdus */
+ stats->noptx_pdus = le32_to_cpu(ql_iscsi_stats->tx_nopout_pdus);
+ stats->scsicmd_pdus = le32_to_cpu(ql_iscsi_stats->tx_scsi_cmd_pdus);
+ stats->tmfcmd_pdus = le32_to_cpu(ql_iscsi_stats->tx_tmf_cmd_pdus);
+ stats->login_pdus = le32_to_cpu(ql_iscsi_stats->tx_login_cmd_pdus);
+ stats->text_pdus = le32_to_cpu(ql_iscsi_stats->tx_text_cmd_pdus);
+ stats->dataout_pdus = le32_to_cpu(ql_iscsi_stats->tx_scsi_write_pdus);
+ stats->logout_pdus = le32_to_cpu(ql_iscsi_stats->tx_logout_cmd_pdus);
+ stats->snack_pdus = le32_to_cpu(ql_iscsi_stats->tx_snack_req_pdus);
+ /* recv pdus */
+ stats->noprx_pdus = le32_to_cpu(ql_iscsi_stats->rx_nopin_pdus);
+ stats->scsirsp_pdus = le32_to_cpu(ql_iscsi_stats->rx_scsi_resp_pdus);
+ stats->tmfrsp_pdus = le32_to_cpu(ql_iscsi_stats->rx_tmf_resp_pdus);
+ stats->textrsp_pdus = le32_to_cpu(ql_iscsi_stats->rx_text_resp_pdus);
+ stats->datain_pdus = le32_to_cpu(ql_iscsi_stats->rx_scsi_read_pdus);
+ stats->logoutrsp_pdus =
+ le32_to_cpu(ql_iscsi_stats->rx_logout_resp_pdus);
+ stats->r2t_pdus = le32_to_cpu(ql_iscsi_stats->rx_r2t_pdus);
+ stats->async_pdus = le32_to_cpu(ql_iscsi_stats->rx_async_pdus);
+ stats->rjt_pdus = le32_to_cpu(ql_iscsi_stats->rx_reject_pdus);
+
+free_stats:
+ dma_free_coherent(&ha->pdev->dev, stats_size, ql_iscsi_stats,
+ iscsi_stats_dma);
+exit_get_stats:
+ return;
+}
+
+static enum blk_eh_timer_return qla4xxx_eh_cmd_timed_out(struct scsi_cmnd *sc)
+{
+ struct iscsi_cls_session *session;
+ struct iscsi_session *sess;
+ unsigned long flags;
+ enum blk_eh_timer_return ret = BLK_EH_NOT_HANDLED;
+
+ session = starget_to_session(scsi_target(sc->device));
+ sess = session->dd_data;
+
+ spin_lock_irqsave(&session->lock, flags);
+ if (session->state == ISCSI_SESSION_FAILED)
+ ret = BLK_EH_RESET_TIMER;
+ spin_unlock_irqrestore(&session->lock, flags);
+
+ return ret;
+}
+
+static void qla4xxx_set_port_speed(struct Scsi_Host *shost)
+{
+ struct scsi_qla_host *ha = to_qla_host(shost);
+ struct iscsi_cls_host *ihost = shost->shost_data;
+ uint32_t speed = ISCSI_PORT_SPEED_UNKNOWN;
+
+ qla4xxx_get_firmware_state(ha);
+
+ switch (ha->addl_fw_state & 0x0F00) {
+ case FW_ADDSTATE_LINK_SPEED_10MBPS:
+ speed = ISCSI_PORT_SPEED_10MBPS;
+ break;
+ case FW_ADDSTATE_LINK_SPEED_100MBPS:
+ speed = ISCSI_PORT_SPEED_100MBPS;
+ break;
+ case FW_ADDSTATE_LINK_SPEED_1GBPS:
+ speed = ISCSI_PORT_SPEED_1GBPS;
+ break;
+ case FW_ADDSTATE_LINK_SPEED_10GBPS:
+ speed = ISCSI_PORT_SPEED_10GBPS;
+ break;
+ }
+ ihost->port_speed = speed;
+}
+
+static void qla4xxx_set_port_state(struct Scsi_Host *shost)
+{
+ struct scsi_qla_host *ha = to_qla_host(shost);
+ struct iscsi_cls_host *ihost = shost->shost_data;
+ uint32_t state = ISCSI_PORT_STATE_DOWN;
+
+ if (test_bit(AF_LINK_UP, &ha->flags))
+ state = ISCSI_PORT_STATE_UP;
+
+ ihost->port_state = state;
+}
+
+static int qla4xxx_host_get_param(struct Scsi_Host *shost,
+ enum iscsi_host_param param, char *buf)
+{
+ struct scsi_qla_host *ha = to_qla_host(shost);
+ int len;
+
+ switch (param) {
+ case ISCSI_HOST_PARAM_HWADDRESS:
+ len = sysfs_format_mac(buf, ha->my_mac, MAC_ADDR_LEN);
+ break;
+ case ISCSI_HOST_PARAM_IPADDRESS:
+ len = sprintf(buf, "%pI4\n", &ha->ip_config.ip_address);
+ break;
+ case ISCSI_HOST_PARAM_INITIATOR_NAME:
+ len = sprintf(buf, "%s\n", ha->name_string);
+ break;
+ case ISCSI_HOST_PARAM_PORT_STATE:
+ qla4xxx_set_port_state(shost);
+ len = sprintf(buf, "%s\n", iscsi_get_port_state_name(shost));
+ break;
+ case ISCSI_HOST_PARAM_PORT_SPEED:
+ qla4xxx_set_port_speed(shost);
+ len = sprintf(buf, "%s\n", iscsi_get_port_speed_name(shost));
+ break;
+ default:
+ return -ENOSYS;
+ }
+
+ return len;
+}
+
+static void qla4xxx_create_ipv4_iface(struct scsi_qla_host *ha)
+{
+ if (ha->iface_ipv4)
+ return;
+
+ /* IPv4 */
+ ha->iface_ipv4 = iscsi_create_iface(ha->host,
+ &qla4xxx_iscsi_transport,
+ ISCSI_IFACE_TYPE_IPV4, 0, 0);
+ if (!ha->iface_ipv4)
+ ql4_printk(KERN_ERR, ha, "Could not create IPv4 iSCSI "
+ "iface0.\n");
+}
+
+static void qla4xxx_create_ipv6_iface(struct scsi_qla_host *ha)
+{
+ if (!ha->iface_ipv6_0)
+ /* IPv6 iface-0 */
+ ha->iface_ipv6_0 = iscsi_create_iface(ha->host,
+ &qla4xxx_iscsi_transport,
+ ISCSI_IFACE_TYPE_IPV6, 0,
+ 0);
+ if (!ha->iface_ipv6_0)
+ ql4_printk(KERN_ERR, ha, "Could not create IPv6 iSCSI "
+ "iface0.\n");
+
+ if (!ha->iface_ipv6_1)
+ /* IPv6 iface-1 */
+ ha->iface_ipv6_1 = iscsi_create_iface(ha->host,
+ &qla4xxx_iscsi_transport,
+ ISCSI_IFACE_TYPE_IPV6, 1,
+ 0);
+ if (!ha->iface_ipv6_1)
+ ql4_printk(KERN_ERR, ha, "Could not create IPv6 iSCSI "
+ "iface1.\n");
+}
+
+static void qla4xxx_create_ifaces(struct scsi_qla_host *ha)
+{
+ if (ha->ip_config.ipv4_options & IPOPT_IPV4_PROTOCOL_ENABLE)
+ qla4xxx_create_ipv4_iface(ha);
+
+ if (ha->ip_config.ipv6_options & IPV6_OPT_IPV6_PROTOCOL_ENABLE)
+ qla4xxx_create_ipv6_iface(ha);
+}
+
+static void qla4xxx_destroy_ipv4_iface(struct scsi_qla_host *ha)
+{
+ if (ha->iface_ipv4) {
+ iscsi_destroy_iface(ha->iface_ipv4);
+ ha->iface_ipv4 = NULL;
+ }
+}
+
+static void qla4xxx_destroy_ipv6_iface(struct scsi_qla_host *ha)
+{
+ if (ha->iface_ipv6_0) {
+ iscsi_destroy_iface(ha->iface_ipv6_0);
+ ha->iface_ipv6_0 = NULL;
+ }
+ if (ha->iface_ipv6_1) {
+ iscsi_destroy_iface(ha->iface_ipv6_1);
+ ha->iface_ipv6_1 = NULL;
+ }
+}
+
+static void qla4xxx_destroy_ifaces(struct scsi_qla_host *ha)
+{
+ qla4xxx_destroy_ipv4_iface(ha);
+ qla4xxx_destroy_ipv6_iface(ha);
+}
+
+static void qla4xxx_set_ipv6(struct scsi_qla_host *ha,
+ struct iscsi_iface_param_info *iface_param,
+ struct addr_ctrl_blk *init_fw_cb)
+{
+ /*
+ * iface_num 0 is valid for IPv6 Addr, linklocal, router, autocfg.
+ * iface_num 1 is valid only for IPv6 Addr.
+ */
+ switch (iface_param->param) {
+ case ISCSI_NET_PARAM_IPV6_ADDR:
+ if (iface_param->iface_num & 0x1)
+ /* IPv6 Addr 1 */
+ memcpy(init_fw_cb->ipv6_addr1, iface_param->value,
+ sizeof(init_fw_cb->ipv6_addr1));
+ else
+ /* IPv6 Addr 0 */
+ memcpy(init_fw_cb->ipv6_addr0, iface_param->value,
+ sizeof(init_fw_cb->ipv6_addr0));
+ break;
+ case ISCSI_NET_PARAM_IPV6_LINKLOCAL:
+ if (iface_param->iface_num & 0x1)
+ break;
+ memcpy(init_fw_cb->ipv6_if_id, &iface_param->value[8],
+ sizeof(init_fw_cb->ipv6_if_id));
+ break;
+ case ISCSI_NET_PARAM_IPV6_ROUTER:
+ if (iface_param->iface_num & 0x1)
+ break;
+ memcpy(init_fw_cb->ipv6_dflt_rtr_addr, iface_param->value,
+ sizeof(init_fw_cb->ipv6_dflt_rtr_addr));
+ break;
+ case ISCSI_NET_PARAM_IPV6_ADDR_AUTOCFG:
+ /* Autocfg applies to even interface */
+ if (iface_param->iface_num & 0x1)
+ break;
+
+ if (iface_param->value[0] == ISCSI_IPV6_AUTOCFG_DISABLE)
+ init_fw_cb->ipv6_addtl_opts &=
+ cpu_to_le16(
+ ~IPV6_ADDOPT_NEIGHBOR_DISCOVERY_ADDR_ENABLE);
+ else if (iface_param->value[0] == ISCSI_IPV6_AUTOCFG_ND_ENABLE)
+ init_fw_cb->ipv6_addtl_opts |=
+ cpu_to_le16(
+ IPV6_ADDOPT_NEIGHBOR_DISCOVERY_ADDR_ENABLE);
+ else
+ ql4_printk(KERN_ERR, ha,
+ "Invalid autocfg setting for IPv6 addr\n");
+ break;
+ case ISCSI_NET_PARAM_IPV6_LINKLOCAL_AUTOCFG:
+ /* Autocfg applies to even interface */
+ if (iface_param->iface_num & 0x1)
+ break;
+
+ if (iface_param->value[0] ==
+ ISCSI_IPV6_LINKLOCAL_AUTOCFG_ENABLE)
+ init_fw_cb->ipv6_addtl_opts |= cpu_to_le16(
+ IPV6_ADDOPT_AUTOCONFIG_LINK_LOCAL_ADDR);
+ else if (iface_param->value[0] ==
+ ISCSI_IPV6_LINKLOCAL_AUTOCFG_DISABLE)
+ init_fw_cb->ipv6_addtl_opts &= cpu_to_le16(
+ ~IPV6_ADDOPT_AUTOCONFIG_LINK_LOCAL_ADDR);
+ else
+ ql4_printk(KERN_ERR, ha,
+ "Invalid autocfg setting for IPv6 linklocal addr\n");
+ break;
+ case ISCSI_NET_PARAM_IPV6_ROUTER_AUTOCFG:
+ /* Autocfg applies to even interface */
+ if (iface_param->iface_num & 0x1)
+ break;
+
+ if (iface_param->value[0] == ISCSI_IPV6_ROUTER_AUTOCFG_ENABLE)
+ memset(init_fw_cb->ipv6_dflt_rtr_addr, 0,
+ sizeof(init_fw_cb->ipv6_dflt_rtr_addr));
+ break;
+ case ISCSI_NET_PARAM_IFACE_ENABLE:
+ if (iface_param->value[0] == ISCSI_IFACE_ENABLE) {
+ init_fw_cb->ipv6_opts |=
+ cpu_to_le16(IPV6_OPT_IPV6_PROTOCOL_ENABLE);
+ qla4xxx_create_ipv6_iface(ha);
+ } else {
+ init_fw_cb->ipv6_opts &=
+ cpu_to_le16(~IPV6_OPT_IPV6_PROTOCOL_ENABLE &
+ 0xFFFF);
+ qla4xxx_destroy_ipv6_iface(ha);
+ }
+ break;
+ case ISCSI_NET_PARAM_VLAN_TAG:
+ if (iface_param->len != sizeof(init_fw_cb->ipv6_vlan_tag))
+ break;
+ init_fw_cb->ipv6_vlan_tag =
+ cpu_to_be16(*(uint16_t *)iface_param->value);
+ break;
+ case ISCSI_NET_PARAM_VLAN_ENABLED:
+ if (iface_param->value[0] == ISCSI_VLAN_ENABLE)
+ init_fw_cb->ipv6_opts |=
+ cpu_to_le16(IPV6_OPT_VLAN_TAGGING_ENABLE);
+ else
+ init_fw_cb->ipv6_opts &=
+ cpu_to_le16(~IPV6_OPT_VLAN_TAGGING_ENABLE);
+ break;
+ case ISCSI_NET_PARAM_MTU:
+ init_fw_cb->eth_mtu_size =
+ cpu_to_le16(*(uint16_t *)iface_param->value);
+ break;
+ case ISCSI_NET_PARAM_PORT:
+ /* Autocfg applies to even interface */
+ if (iface_param->iface_num & 0x1)
+ break;
+
+ init_fw_cb->ipv6_port =
+ cpu_to_le16(*(uint16_t *)iface_param->value);
+ break;
+ case ISCSI_NET_PARAM_DELAYED_ACK_EN:
+ if (iface_param->iface_num & 0x1)
+ break;
+ if (iface_param->value[0] == ISCSI_NET_PARAM_DISABLE)
+ init_fw_cb->ipv6_tcp_opts |=
+ cpu_to_le16(IPV6_TCPOPT_DELAYED_ACK_DISABLE);
+ else
+ init_fw_cb->ipv6_tcp_opts &=
+ cpu_to_le16(~IPV6_TCPOPT_DELAYED_ACK_DISABLE &
+ 0xFFFF);
+ break;
+ case ISCSI_NET_PARAM_TCP_NAGLE_DISABLE:
+ if (iface_param->iface_num & 0x1)
+ break;
+ if (iface_param->value[0] == ISCSI_NET_PARAM_DISABLE)
+ init_fw_cb->ipv6_tcp_opts |=
+ cpu_to_le16(IPV6_TCPOPT_NAGLE_ALGO_DISABLE);
+ else
+ init_fw_cb->ipv6_tcp_opts &=
+ cpu_to_le16(~IPV6_TCPOPT_NAGLE_ALGO_DISABLE);
+ break;
+ case ISCSI_NET_PARAM_TCP_WSF_DISABLE:
+ if (iface_param->iface_num & 0x1)
+ break;
+ if (iface_param->value[0] == ISCSI_NET_PARAM_DISABLE)
+ init_fw_cb->ipv6_tcp_opts |=
+ cpu_to_le16(IPV6_TCPOPT_WINDOW_SCALE_DISABLE);
+ else
+ init_fw_cb->ipv6_tcp_opts &=
+ cpu_to_le16(~IPV6_TCPOPT_WINDOW_SCALE_DISABLE);
+ break;
+ case ISCSI_NET_PARAM_TCP_WSF:
+ if (iface_param->iface_num & 0x1)
+ break;
+ init_fw_cb->ipv6_tcp_wsf = iface_param->value[0];
+ break;
+ case ISCSI_NET_PARAM_TCP_TIMER_SCALE:
+ if (iface_param->iface_num & 0x1)
+ break;
+ init_fw_cb->ipv6_tcp_opts &=
+ cpu_to_le16(~IPV6_TCPOPT_TIMER_SCALE);
+ init_fw_cb->ipv6_tcp_opts |=
+ cpu_to_le16((iface_param->value[0] << 1) &
+ IPV6_TCPOPT_TIMER_SCALE);
+ break;
+ case ISCSI_NET_PARAM_TCP_TIMESTAMP_EN:
+ if (iface_param->iface_num & 0x1)
+ break;
+ if (iface_param->value[0] == ISCSI_NET_PARAM_ENABLE)
+ init_fw_cb->ipv6_tcp_opts |=
+ cpu_to_le16(IPV6_TCPOPT_TIMESTAMP_EN);
+ else
+ init_fw_cb->ipv6_tcp_opts &=
+ cpu_to_le16(~IPV6_TCPOPT_TIMESTAMP_EN);
+ break;
+ case ISCSI_NET_PARAM_IPV6_GRAT_NEIGHBOR_ADV_EN:
+ if (iface_param->iface_num & 0x1)
+ break;
+ if (iface_param->value[0] == ISCSI_NET_PARAM_ENABLE)
+ init_fw_cb->ipv6_opts |=
+ cpu_to_le16(IPV6_OPT_GRAT_NEIGHBOR_ADV_EN);
+ else
+ init_fw_cb->ipv6_opts &=
+ cpu_to_le16(~IPV6_OPT_GRAT_NEIGHBOR_ADV_EN);
+ break;
+ case ISCSI_NET_PARAM_REDIRECT_EN:
+ if (iface_param->iface_num & 0x1)
+ break;
+ if (iface_param->value[0] == ISCSI_NET_PARAM_ENABLE)
+ init_fw_cb->ipv6_opts |=
+ cpu_to_le16(IPV6_OPT_REDIRECT_EN);
+ else
+ init_fw_cb->ipv6_opts &=
+ cpu_to_le16(~IPV6_OPT_REDIRECT_EN);
+ break;
+ case ISCSI_NET_PARAM_IPV6_MLD_EN:
+ if (iface_param->iface_num & 0x1)
+ break;
+ if (iface_param->value[0] == ISCSI_NET_PARAM_ENABLE)
+ init_fw_cb->ipv6_addtl_opts |=
+ cpu_to_le16(IPV6_ADDOPT_MLD_EN);
+ else
+ init_fw_cb->ipv6_addtl_opts &=
+ cpu_to_le16(~IPV6_ADDOPT_MLD_EN);
+ break;
+ case ISCSI_NET_PARAM_IPV6_FLOW_LABEL:
+ if (iface_param->iface_num & 0x1)
+ break;
+ init_fw_cb->ipv6_flow_lbl =
+ cpu_to_le16(*(uint16_t *)iface_param->value);
+ break;
+ case ISCSI_NET_PARAM_IPV6_TRAFFIC_CLASS:
+ if (iface_param->iface_num & 0x1)
+ break;
+ init_fw_cb->ipv6_traffic_class = iface_param->value[0];
+ break;
+ case ISCSI_NET_PARAM_IPV6_HOP_LIMIT:
+ if (iface_param->iface_num & 0x1)
+ break;
+ init_fw_cb->ipv6_hop_limit = iface_param->value[0];
+ break;
+ case ISCSI_NET_PARAM_IPV6_ND_REACHABLE_TMO:
+ if (iface_param->iface_num & 0x1)
+ break;
+ init_fw_cb->ipv6_nd_reach_time =
+ cpu_to_le32(*(uint32_t *)iface_param->value);
+ break;
+ case ISCSI_NET_PARAM_IPV6_ND_REXMIT_TIME:
+ if (iface_param->iface_num & 0x1)
+ break;
+ init_fw_cb->ipv6_nd_rexmit_timer =
+ cpu_to_le32(*(uint32_t *)iface_param->value);
+ break;
+ case ISCSI_NET_PARAM_IPV6_ND_STALE_TMO:
+ if (iface_param->iface_num & 0x1)
+ break;
+ init_fw_cb->ipv6_nd_stale_timeout =
+ cpu_to_le32(*(uint32_t *)iface_param->value);
+ break;
+ case ISCSI_NET_PARAM_IPV6_DUP_ADDR_DETECT_CNT:
+ if (iface_param->iface_num & 0x1)
+ break;
+ init_fw_cb->ipv6_dup_addr_detect_count = iface_param->value[0];
+ break;
+ case ISCSI_NET_PARAM_IPV6_RTR_ADV_LINK_MTU:
+ if (iface_param->iface_num & 0x1)
+ break;
+ init_fw_cb->ipv6_gw_advrt_mtu =
+ cpu_to_le32(*(uint32_t *)iface_param->value);
+ break;
+ default:
+ ql4_printk(KERN_ERR, ha, "Unknown IPv6 param = %d\n",
+ iface_param->param);
+ break;
+ }
+}
+
+static void qla4xxx_set_ipv4(struct scsi_qla_host *ha,
+ struct iscsi_iface_param_info *iface_param,
+ struct addr_ctrl_blk *init_fw_cb)
+{
+ switch (iface_param->param) {
+ case ISCSI_NET_PARAM_IPV4_ADDR:
+ memcpy(init_fw_cb->ipv4_addr, iface_param->value,
+ sizeof(init_fw_cb->ipv4_addr));
+ break;
+ case ISCSI_NET_PARAM_IPV4_SUBNET:
+ memcpy(init_fw_cb->ipv4_subnet, iface_param->value,
+ sizeof(init_fw_cb->ipv4_subnet));
+ break;
+ case ISCSI_NET_PARAM_IPV4_GW:
+ memcpy(init_fw_cb->ipv4_gw_addr, iface_param->value,
+ sizeof(init_fw_cb->ipv4_gw_addr));
+ break;
+ case ISCSI_NET_PARAM_IPV4_BOOTPROTO:
+ if (iface_param->value[0] == ISCSI_BOOTPROTO_DHCP)
+ init_fw_cb->ipv4_tcp_opts |=
+ cpu_to_le16(TCPOPT_DHCP_ENABLE);
+ else if (iface_param->value[0] == ISCSI_BOOTPROTO_STATIC)
+ init_fw_cb->ipv4_tcp_opts &=
+ cpu_to_le16(~TCPOPT_DHCP_ENABLE);
+ else
+ ql4_printk(KERN_ERR, ha, "Invalid IPv4 bootproto\n");
+ break;
+ case ISCSI_NET_PARAM_IFACE_ENABLE:
+ if (iface_param->value[0] == ISCSI_IFACE_ENABLE) {
+ init_fw_cb->ipv4_ip_opts |=
+ cpu_to_le16(IPOPT_IPV4_PROTOCOL_ENABLE);
+ qla4xxx_create_ipv4_iface(ha);
+ } else {
+ init_fw_cb->ipv4_ip_opts &=
+ cpu_to_le16(~IPOPT_IPV4_PROTOCOL_ENABLE &
+ 0xFFFF);
+ qla4xxx_destroy_ipv4_iface(ha);
+ }
+ break;
+ case ISCSI_NET_PARAM_VLAN_TAG:
+ if (iface_param->len != sizeof(init_fw_cb->ipv4_vlan_tag))
+ break;
+ init_fw_cb->ipv4_vlan_tag =
+ cpu_to_be16(*(uint16_t *)iface_param->value);
+ break;
+ case ISCSI_NET_PARAM_VLAN_ENABLED:
+ if (iface_param->value[0] == ISCSI_VLAN_ENABLE)
+ init_fw_cb->ipv4_ip_opts |=
+ cpu_to_le16(IPOPT_VLAN_TAGGING_ENABLE);
+ else
+ init_fw_cb->ipv4_ip_opts &=
+ cpu_to_le16(~IPOPT_VLAN_TAGGING_ENABLE);
+ break;
+ case ISCSI_NET_PARAM_MTU:
+ init_fw_cb->eth_mtu_size =
+ cpu_to_le16(*(uint16_t *)iface_param->value);
+ break;
+ case ISCSI_NET_PARAM_PORT:
+ init_fw_cb->ipv4_port =
+ cpu_to_le16(*(uint16_t *)iface_param->value);
+ break;
+ case ISCSI_NET_PARAM_DELAYED_ACK_EN:
+ if (iface_param->iface_num & 0x1)
+ break;
+ if (iface_param->value[0] == ISCSI_NET_PARAM_DISABLE)
+ init_fw_cb->ipv4_tcp_opts |=
+ cpu_to_le16(TCPOPT_DELAYED_ACK_DISABLE);
+ else
+ init_fw_cb->ipv4_tcp_opts &=
+ cpu_to_le16(~TCPOPT_DELAYED_ACK_DISABLE &
+ 0xFFFF);
+ break;
+ case ISCSI_NET_PARAM_TCP_NAGLE_DISABLE:
+ if (iface_param->iface_num & 0x1)
+ break;
+ if (iface_param->value[0] == ISCSI_NET_PARAM_DISABLE)
+ init_fw_cb->ipv4_tcp_opts |=
+ cpu_to_le16(TCPOPT_NAGLE_ALGO_DISABLE);
+ else
+ init_fw_cb->ipv4_tcp_opts &=
+ cpu_to_le16(~TCPOPT_NAGLE_ALGO_DISABLE);
+ break;
+ case ISCSI_NET_PARAM_TCP_WSF_DISABLE:
+ if (iface_param->iface_num & 0x1)
+ break;
+ if (iface_param->value[0] == ISCSI_NET_PARAM_DISABLE)
+ init_fw_cb->ipv4_tcp_opts |=
+ cpu_to_le16(TCPOPT_WINDOW_SCALE_DISABLE);
+ else
+ init_fw_cb->ipv4_tcp_opts &=
+ cpu_to_le16(~TCPOPT_WINDOW_SCALE_DISABLE);
+ break;
+ case ISCSI_NET_PARAM_TCP_WSF:
+ if (iface_param->iface_num & 0x1)
+ break;
+ init_fw_cb->ipv4_tcp_wsf = iface_param->value[0];
+ break;
+ case ISCSI_NET_PARAM_TCP_TIMER_SCALE:
+ if (iface_param->iface_num & 0x1)
+ break;
+ init_fw_cb->ipv4_tcp_opts &= cpu_to_le16(~TCPOPT_TIMER_SCALE);
+ init_fw_cb->ipv4_tcp_opts |=
+ cpu_to_le16((iface_param->value[0] << 1) &
+ TCPOPT_TIMER_SCALE);
+ break;
+ case ISCSI_NET_PARAM_TCP_TIMESTAMP_EN:
+ if (iface_param->iface_num & 0x1)
+ break;
+ if (iface_param->value[0] == ISCSI_NET_PARAM_ENABLE)
+ init_fw_cb->ipv4_tcp_opts |=
+ cpu_to_le16(TCPOPT_TIMESTAMP_ENABLE);
+ else
+ init_fw_cb->ipv4_tcp_opts &=
+ cpu_to_le16(~TCPOPT_TIMESTAMP_ENABLE);
+ break;
+ case ISCSI_NET_PARAM_IPV4_DHCP_DNS_ADDR_EN:
+ if (iface_param->iface_num & 0x1)
+ break;
+ if (iface_param->value[0] == ISCSI_NET_PARAM_ENABLE)
+ init_fw_cb->ipv4_tcp_opts |=
+ cpu_to_le16(TCPOPT_DNS_SERVER_IP_EN);
+ else
+ init_fw_cb->ipv4_tcp_opts &=
+ cpu_to_le16(~TCPOPT_DNS_SERVER_IP_EN);
+ break;
+ case ISCSI_NET_PARAM_IPV4_DHCP_SLP_DA_EN:
+ if (iface_param->iface_num & 0x1)
+ break;
+ if (iface_param->value[0] == ISCSI_NET_PARAM_ENABLE)
+ init_fw_cb->ipv4_tcp_opts |=
+ cpu_to_le16(TCPOPT_SLP_DA_INFO_EN);
+ else
+ init_fw_cb->ipv4_tcp_opts &=
+ cpu_to_le16(~TCPOPT_SLP_DA_INFO_EN);
+ break;
+ case ISCSI_NET_PARAM_IPV4_TOS_EN:
+ if (iface_param->iface_num & 0x1)
+ break;
+ if (iface_param->value[0] == ISCSI_NET_PARAM_ENABLE)
+ init_fw_cb->ipv4_ip_opts |=
+ cpu_to_le16(IPOPT_IPV4_TOS_EN);
+ else
+ init_fw_cb->ipv4_ip_opts &=
+ cpu_to_le16(~IPOPT_IPV4_TOS_EN);
+ break;
+ case ISCSI_NET_PARAM_IPV4_TOS:
+ if (iface_param->iface_num & 0x1)
+ break;
+ init_fw_cb->ipv4_tos = iface_param->value[0];
+ break;
+ case ISCSI_NET_PARAM_IPV4_GRAT_ARP_EN:
+ if (iface_param->iface_num & 0x1)
+ break;
+ if (iface_param->value[0] == ISCSI_NET_PARAM_ENABLE)
+ init_fw_cb->ipv4_ip_opts |=
+ cpu_to_le16(IPOPT_GRAT_ARP_EN);
+ else
+ init_fw_cb->ipv4_ip_opts &=
+ cpu_to_le16(~IPOPT_GRAT_ARP_EN);
+ break;
+ case ISCSI_NET_PARAM_IPV4_DHCP_ALT_CLIENT_ID_EN:
+ if (iface_param->iface_num & 0x1)
+ break;
+ if (iface_param->value[0] == ISCSI_NET_PARAM_ENABLE)
+ init_fw_cb->ipv4_ip_opts |=
+ cpu_to_le16(IPOPT_ALT_CID_EN);
+ else
+ init_fw_cb->ipv4_ip_opts &=
+ cpu_to_le16(~IPOPT_ALT_CID_EN);
+ break;
+ case ISCSI_NET_PARAM_IPV4_DHCP_ALT_CLIENT_ID:
+ if (iface_param->iface_num & 0x1)
+ break;
+ memcpy(init_fw_cb->ipv4_dhcp_alt_cid, iface_param->value,
+ (sizeof(init_fw_cb->ipv4_dhcp_alt_cid) - 1));
+ init_fw_cb->ipv4_dhcp_alt_cid_len =
+ strlen(init_fw_cb->ipv4_dhcp_alt_cid);
+ break;
+ case ISCSI_NET_PARAM_IPV4_DHCP_REQ_VENDOR_ID_EN:
+ if (iface_param->iface_num & 0x1)
+ break;
+ if (iface_param->value[0] == ISCSI_NET_PARAM_ENABLE)
+ init_fw_cb->ipv4_ip_opts |=
+ cpu_to_le16(IPOPT_REQ_VID_EN);
+ else
+ init_fw_cb->ipv4_ip_opts &=
+ cpu_to_le16(~IPOPT_REQ_VID_EN);
+ break;
+ case ISCSI_NET_PARAM_IPV4_DHCP_USE_VENDOR_ID_EN:
+ if (iface_param->iface_num & 0x1)
+ break;
+ if (iface_param->value[0] == ISCSI_NET_PARAM_ENABLE)
+ init_fw_cb->ipv4_ip_opts |=
+ cpu_to_le16(IPOPT_USE_VID_EN);
+ else
+ init_fw_cb->ipv4_ip_opts &=
+ cpu_to_le16(~IPOPT_USE_VID_EN);
+ break;
+ case ISCSI_NET_PARAM_IPV4_DHCP_VENDOR_ID:
+ if (iface_param->iface_num & 0x1)
+ break;
+ memcpy(init_fw_cb->ipv4_dhcp_vid, iface_param->value,
+ (sizeof(init_fw_cb->ipv4_dhcp_vid) - 1));
+ init_fw_cb->ipv4_dhcp_vid_len =
+ strlen(init_fw_cb->ipv4_dhcp_vid);
+ break;
+ case ISCSI_NET_PARAM_IPV4_DHCP_LEARN_IQN_EN:
+ if (iface_param->iface_num & 0x1)
+ break;
+ if (iface_param->value[0] == ISCSI_NET_PARAM_ENABLE)
+ init_fw_cb->ipv4_ip_opts |=
+ cpu_to_le16(IPOPT_LEARN_IQN_EN);
+ else
+ init_fw_cb->ipv4_ip_opts &=
+ cpu_to_le16(~IPOPT_LEARN_IQN_EN);
+ break;
+ case ISCSI_NET_PARAM_IPV4_FRAGMENT_DISABLE:
+ if (iface_param->iface_num & 0x1)
+ break;
+ if (iface_param->value[0] == ISCSI_NET_PARAM_DISABLE)
+ init_fw_cb->ipv4_ip_opts |=
+ cpu_to_le16(IPOPT_FRAGMENTATION_DISABLE);
+ else
+ init_fw_cb->ipv4_ip_opts &=
+ cpu_to_le16(~IPOPT_FRAGMENTATION_DISABLE);
+ break;
+ case ISCSI_NET_PARAM_IPV4_IN_FORWARD_EN:
+ if (iface_param->iface_num & 0x1)
+ break;
+ if (iface_param->value[0] == ISCSI_NET_PARAM_ENABLE)
+ init_fw_cb->ipv4_ip_opts |=
+ cpu_to_le16(IPOPT_IN_FORWARD_EN);
+ else
+ init_fw_cb->ipv4_ip_opts &=
+ cpu_to_le16(~IPOPT_IN_FORWARD_EN);
+ break;
+ case ISCSI_NET_PARAM_REDIRECT_EN:
+ if (iface_param->iface_num & 0x1)
+ break;
+ if (iface_param->value[0] == ISCSI_NET_PARAM_ENABLE)
+ init_fw_cb->ipv4_ip_opts |=
+ cpu_to_le16(IPOPT_ARP_REDIRECT_EN);
+ else
+ init_fw_cb->ipv4_ip_opts &=
+ cpu_to_le16(~IPOPT_ARP_REDIRECT_EN);
+ break;
+ case ISCSI_NET_PARAM_IPV4_TTL:
+ if (iface_param->iface_num & 0x1)
+ break;
+ init_fw_cb->ipv4_ttl = iface_param->value[0];
+ break;
+ default:
+ ql4_printk(KERN_ERR, ha, "Unknown IPv4 param = %d\n",
+ iface_param->param);
+ break;
+ }
+}
+
+static void qla4xxx_set_iscsi_param(struct scsi_qla_host *ha,
+ struct iscsi_iface_param_info *iface_param,
+ struct addr_ctrl_blk *init_fw_cb)
+{
+ switch (iface_param->param) {
+ case ISCSI_IFACE_PARAM_DEF_TASKMGMT_TMO:
+ if (iface_param->iface_num & 0x1)
+ break;
+ init_fw_cb->def_timeout =
+ cpu_to_le16(*(uint16_t *)iface_param->value);
+ break;
+ case ISCSI_IFACE_PARAM_HDRDGST_EN:
+ if (iface_param->iface_num & 0x1)
+ break;
+ if (iface_param->value[0] == ISCSI_NET_PARAM_ENABLE)
+ init_fw_cb->iscsi_opts |=
+ cpu_to_le16(ISCSIOPTS_HEADER_DIGEST_EN);
+ else
+ init_fw_cb->iscsi_opts &=
+ cpu_to_le16(~ISCSIOPTS_HEADER_DIGEST_EN);
+ break;
+ case ISCSI_IFACE_PARAM_DATADGST_EN:
+ if (iface_param->iface_num & 0x1)
+ break;
+ if (iface_param->value[0] == ISCSI_NET_PARAM_ENABLE)
+ init_fw_cb->iscsi_opts |=
+ cpu_to_le16(ISCSIOPTS_DATA_DIGEST_EN);
+ else
+ init_fw_cb->iscsi_opts &=
+ cpu_to_le16(~ISCSIOPTS_DATA_DIGEST_EN);
+ break;
+ case ISCSI_IFACE_PARAM_IMM_DATA_EN:
+ if (iface_param->iface_num & 0x1)
+ break;
+ if (iface_param->value[0] == ISCSI_NET_PARAM_ENABLE)
+ init_fw_cb->iscsi_opts |=
+ cpu_to_le16(ISCSIOPTS_IMMEDIATE_DATA_EN);
+ else
+ init_fw_cb->iscsi_opts &=
+ cpu_to_le16(~ISCSIOPTS_IMMEDIATE_DATA_EN);
+ break;
+ case ISCSI_IFACE_PARAM_INITIAL_R2T_EN:
+ if (iface_param->iface_num & 0x1)
+ break;
+ if (iface_param->value[0] == ISCSI_NET_PARAM_ENABLE)
+ init_fw_cb->iscsi_opts |=
+ cpu_to_le16(ISCSIOPTS_INITIAL_R2T_EN);
+ else
+ init_fw_cb->iscsi_opts &=
+ cpu_to_le16(~ISCSIOPTS_INITIAL_R2T_EN);
+ break;
+ case ISCSI_IFACE_PARAM_DATASEQ_INORDER_EN:
+ if (iface_param->iface_num & 0x1)
+ break;
+ if (iface_param->value[0] == ISCSI_NET_PARAM_ENABLE)
+ init_fw_cb->iscsi_opts |=
+ cpu_to_le16(ISCSIOPTS_DATA_SEQ_INORDER_EN);
+ else
+ init_fw_cb->iscsi_opts &=
+ cpu_to_le16(~ISCSIOPTS_DATA_SEQ_INORDER_EN);
+ break;
+ case ISCSI_IFACE_PARAM_PDU_INORDER_EN:
+ if (iface_param->iface_num & 0x1)
+ break;
+ if (iface_param->value[0] == ISCSI_NET_PARAM_ENABLE)
+ init_fw_cb->iscsi_opts |=
+ cpu_to_le16(ISCSIOPTS_DATA_PDU_INORDER_EN);
+ else
+ init_fw_cb->iscsi_opts &=
+ cpu_to_le16(~ISCSIOPTS_DATA_PDU_INORDER_EN);
+ break;
+ case ISCSI_IFACE_PARAM_ERL:
+ if (iface_param->iface_num & 0x1)
+ break;
+ init_fw_cb->iscsi_opts &= cpu_to_le16(~ISCSIOPTS_ERL);
+ init_fw_cb->iscsi_opts |= cpu_to_le16(iface_param->value[0] &
+ ISCSIOPTS_ERL);
+ break;
+ case ISCSI_IFACE_PARAM_MAX_RECV_DLENGTH:
+ if (iface_param->iface_num & 0x1)
+ break;
+ init_fw_cb->iscsi_max_pdu_size =
+ cpu_to_le32(*(uint32_t *)iface_param->value) /
+ BYTE_UNITS;
+ break;
+ case ISCSI_IFACE_PARAM_FIRST_BURST:
+ if (iface_param->iface_num & 0x1)
+ break;
+ init_fw_cb->iscsi_fburst_len =
+ cpu_to_le32(*(uint32_t *)iface_param->value) /
+ BYTE_UNITS;
+ break;
+ case ISCSI_IFACE_PARAM_MAX_R2T:
+ if (iface_param->iface_num & 0x1)
+ break;
+ init_fw_cb->iscsi_max_outstnd_r2t =
+ cpu_to_le16(*(uint16_t *)iface_param->value);
+ break;
+ case ISCSI_IFACE_PARAM_MAX_BURST:
+ if (iface_param->iface_num & 0x1)
+ break;
+ init_fw_cb->iscsi_max_burst_len =
+ cpu_to_le32(*(uint32_t *)iface_param->value) /
+ BYTE_UNITS;
+ break;
+ case ISCSI_IFACE_PARAM_CHAP_AUTH_EN:
+ if (iface_param->iface_num & 0x1)
+ break;
+ if (iface_param->value[0] == ISCSI_NET_PARAM_ENABLE)
+ init_fw_cb->iscsi_opts |=
+ cpu_to_le16(ISCSIOPTS_CHAP_AUTH_EN);
+ else
+ init_fw_cb->iscsi_opts &=
+ cpu_to_le16(~ISCSIOPTS_CHAP_AUTH_EN);
+ break;
+ case ISCSI_IFACE_PARAM_BIDI_CHAP_EN:
+ if (iface_param->iface_num & 0x1)
+ break;
+ if (iface_param->value[0] == ISCSI_NET_PARAM_ENABLE)
+ init_fw_cb->iscsi_opts |=
+ cpu_to_le16(ISCSIOPTS_BIDI_CHAP_EN);
+ else
+ init_fw_cb->iscsi_opts &=
+ cpu_to_le16(~ISCSIOPTS_BIDI_CHAP_EN);
+ break;
+ case ISCSI_IFACE_PARAM_DISCOVERY_AUTH_OPTIONAL:
+ if (iface_param->iface_num & 0x1)
+ break;
+ if (iface_param->value[0] == ISCSI_NET_PARAM_ENABLE)
+ init_fw_cb->iscsi_opts |=
+ cpu_to_le16(ISCSIOPTS_DISCOVERY_AUTH_EN);
+ else
+ init_fw_cb->iscsi_opts &=
+ cpu_to_le16(~ISCSIOPTS_DISCOVERY_AUTH_EN);
+ break;
+ case ISCSI_IFACE_PARAM_DISCOVERY_LOGOUT_EN:
+ if (iface_param->iface_num & 0x1)
+ break;
+ if (iface_param->value[0] == ISCSI_NET_PARAM_ENABLE)
+ init_fw_cb->iscsi_opts |=
+ cpu_to_le16(ISCSIOPTS_DISCOVERY_LOGOUT_EN);
+ else
+ init_fw_cb->iscsi_opts &=
+ cpu_to_le16(~ISCSIOPTS_DISCOVERY_LOGOUT_EN);
+ break;
+ case ISCSI_IFACE_PARAM_STRICT_LOGIN_COMP_EN:
+ if (iface_param->iface_num & 0x1)
+ break;
+ if (iface_param->value[0] == ISCSI_NET_PARAM_ENABLE)
+ init_fw_cb->iscsi_opts |=
+ cpu_to_le16(ISCSIOPTS_STRICT_LOGIN_COMP_EN);
+ else
+ init_fw_cb->iscsi_opts &=
+ cpu_to_le16(~ISCSIOPTS_STRICT_LOGIN_COMP_EN);
+ break;
+ default:
+ ql4_printk(KERN_ERR, ha, "Unknown iscsi param = %d\n",
+ iface_param->param);
+ break;
+ }
+}
+
+static void
+qla4xxx_initcb_to_acb(struct addr_ctrl_blk *init_fw_cb)
+{
+ struct addr_ctrl_blk_def *acb;
+ acb = (struct addr_ctrl_blk_def *)init_fw_cb;
+ memset(acb->reserved1, 0, sizeof(acb->reserved1));
+ memset(acb->reserved2, 0, sizeof(acb->reserved2));
+ memset(acb->reserved3, 0, sizeof(acb->reserved3));
+ memset(acb->reserved4, 0, sizeof(acb->reserved4));
+ memset(acb->reserved5, 0, sizeof(acb->reserved5));
+ memset(acb->reserved6, 0, sizeof(acb->reserved6));
+ memset(acb->reserved7, 0, sizeof(acb->reserved7));
+ memset(acb->reserved8, 0, sizeof(acb->reserved8));
+ memset(acb->reserved9, 0, sizeof(acb->reserved9));
+ memset(acb->reserved10, 0, sizeof(acb->reserved10));
+ memset(acb->reserved11, 0, sizeof(acb->reserved11));
+ memset(acb->reserved12, 0, sizeof(acb->reserved12));
+ memset(acb->reserved13, 0, sizeof(acb->reserved13));
+ memset(acb->reserved14, 0, sizeof(acb->reserved14));
+ memset(acb->reserved15, 0, sizeof(acb->reserved15));
+}
+
+static int
+qla4xxx_iface_set_param(struct Scsi_Host *shost, void *data, uint32_t len)
+{
+ struct scsi_qla_host *ha = to_qla_host(shost);
+ int rval = 0;
+ struct iscsi_iface_param_info *iface_param = NULL;
+ struct addr_ctrl_blk *init_fw_cb = NULL;
+ dma_addr_t init_fw_cb_dma;
+ uint32_t mbox_cmd[MBOX_REG_COUNT];
+ uint32_t mbox_sts[MBOX_REG_COUNT];
+ uint32_t rem = len;
+ struct nlattr *attr;
+
+ init_fw_cb = dma_alloc_coherent(&ha->pdev->dev,
+ sizeof(struct addr_ctrl_blk),
+ &init_fw_cb_dma, GFP_KERNEL);
+ if (!init_fw_cb) {
+ ql4_printk(KERN_ERR, ha, "%s: Unable to alloc init_cb\n",
+ __func__);
+ return -ENOMEM;
+ }
+
+ memset(init_fw_cb, 0, sizeof(struct addr_ctrl_blk));
+ memset(&mbox_cmd, 0, sizeof(mbox_cmd));
+ memset(&mbox_sts, 0, sizeof(mbox_sts));
+
+ if (qla4xxx_get_ifcb(ha, &mbox_cmd[0], &mbox_sts[0], init_fw_cb_dma)) {
+ ql4_printk(KERN_ERR, ha, "%s: get ifcb failed\n", __func__);
+ rval = -EIO;
+ goto exit_init_fw_cb;
+ }
+
+ nla_for_each_attr(attr, data, len, rem) {
+ iface_param = nla_data(attr);
+
+ if (iface_param->param_type == ISCSI_NET_PARAM) {
+ switch (iface_param->iface_type) {
+ case ISCSI_IFACE_TYPE_IPV4:
+ switch (iface_param->iface_num) {
+ case 0:
+ qla4xxx_set_ipv4(ha, iface_param,
+ init_fw_cb);
+ break;
+ default:
+ /* Cannot have more than one IPv4 interface */
+ ql4_printk(KERN_ERR, ha,
+ "Invalid IPv4 iface number = %d\n",
+ iface_param->iface_num);
+ break;
+ }
+ break;
+ case ISCSI_IFACE_TYPE_IPV6:
+ switch (iface_param->iface_num) {
+ case 0:
+ case 1:
+ qla4xxx_set_ipv6(ha, iface_param,
+ init_fw_cb);
+ break;
+ default:
+ /* Cannot have more than two IPv6 interface */
+ ql4_printk(KERN_ERR, ha,
+ "Invalid IPv6 iface number = %d\n",
+ iface_param->iface_num);
+ break;
+ }
+ break;
+ default:
+ ql4_printk(KERN_ERR, ha,
+ "Invalid iface type\n");
+ break;
+ }
+ } else if (iface_param->param_type == ISCSI_IFACE_PARAM) {
+ qla4xxx_set_iscsi_param(ha, iface_param,
+ init_fw_cb);
+ } else {
+ continue;
+ }
+ }
+
+ init_fw_cb->cookie = cpu_to_le32(0x11BEAD5A);
+
+ rval = qla4xxx_set_flash(ha, init_fw_cb_dma, FLASH_SEGMENT_IFCB,
+ sizeof(struct addr_ctrl_blk),
+ FLASH_OPT_RMW_COMMIT);
+ if (rval != QLA_SUCCESS) {
+ ql4_printk(KERN_ERR, ha, "%s: set flash mbx failed\n",
+ __func__);
+ rval = -EIO;
+ goto exit_init_fw_cb;
+ }
+
+ rval = qla4xxx_disable_acb(ha);
+ if (rval != QLA_SUCCESS) {
+ ql4_printk(KERN_ERR, ha, "%s: disable acb mbx failed\n",
+ __func__);
+ rval = -EIO;
+ goto exit_init_fw_cb;
+ }
+
+ wait_for_completion_timeout(&ha->disable_acb_comp,
+ DISABLE_ACB_TOV * HZ);
+
+ qla4xxx_initcb_to_acb(init_fw_cb);
+
+ rval = qla4xxx_set_acb(ha, &mbox_cmd[0], &mbox_sts[0], init_fw_cb_dma);
+ if (rval != QLA_SUCCESS) {
+ ql4_printk(KERN_ERR, ha, "%s: set acb mbx failed\n",
+ __func__);
+ rval = -EIO;
+ goto exit_init_fw_cb;
+ }
+
+ memset(init_fw_cb, 0, sizeof(struct addr_ctrl_blk));
+ qla4xxx_update_local_ifcb(ha, &mbox_cmd[0], &mbox_sts[0], init_fw_cb,
+ init_fw_cb_dma);
+
+exit_init_fw_cb:
+ dma_free_coherent(&ha->pdev->dev, sizeof(struct addr_ctrl_blk),
+ init_fw_cb, init_fw_cb_dma);
+
+ return rval;
+}
+
+static int qla4xxx_session_get_param(struct iscsi_cls_session *cls_sess,
+ enum iscsi_param param, char *buf)
+{
+ struct iscsi_session *sess = cls_sess->dd_data;
+ struct ddb_entry *ddb_entry = sess->dd_data;
+ struct scsi_qla_host *ha = ddb_entry->ha;
+ struct iscsi_cls_conn *cls_conn = ddb_entry->conn;
+ struct ql4_chap_table chap_tbl;
+ int rval, len;
+ uint16_t idx;
+
+ memset(&chap_tbl, 0, sizeof(chap_tbl));
+ switch (param) {
+ case ISCSI_PARAM_CHAP_IN_IDX:
+ rval = qla4xxx_get_chap_index(ha, sess->username_in,
+ sess->password_in, BIDI_CHAP,
+ &idx);
+ if (rval)
+ len = sprintf(buf, "\n");
+ else
+ len = sprintf(buf, "%hu\n", idx);
+ break;
+ case ISCSI_PARAM_CHAP_OUT_IDX:
+ if (ddb_entry->ddb_type == FLASH_DDB) {
+ if (ddb_entry->chap_tbl_idx != INVALID_ENTRY) {
+ idx = ddb_entry->chap_tbl_idx;
+ rval = QLA_SUCCESS;
+ } else {
+ rval = QLA_ERROR;
+ }
+ } else {
+ rval = qla4xxx_get_chap_index(ha, sess->username,
+ sess->password,
+ LOCAL_CHAP, &idx);
+ }
+ if (rval)
+ len = sprintf(buf, "\n");
+ else
+ len = sprintf(buf, "%hu\n", idx);
+ break;
+ case ISCSI_PARAM_USERNAME:
+ case ISCSI_PARAM_PASSWORD:
+ /* First, populate session username and password for FLASH DDB,
+ * if not already done. This happens when session login fails
+ * for a FLASH DDB.
+ */
+ if (ddb_entry->ddb_type == FLASH_DDB &&
+ ddb_entry->chap_tbl_idx != INVALID_ENTRY &&
+ !sess->username && !sess->password) {
+ idx = ddb_entry->chap_tbl_idx;
+ rval = qla4xxx_get_uni_chap_at_index(ha, chap_tbl.name,
+ chap_tbl.secret,
+ idx);
+ if (!rval) {
+ iscsi_set_param(cls_conn, ISCSI_PARAM_USERNAME,
+ (char *)chap_tbl.name,
+ strlen((char *)chap_tbl.name));
+ iscsi_set_param(cls_conn, ISCSI_PARAM_PASSWORD,
+ (char *)chap_tbl.secret,
+ chap_tbl.secret_len);
+ }
+ }
+ /* allow fall-through */
+ default:
+ return iscsi_session_get_param(cls_sess, param, buf);
+ }
+
+ return len;
+}
+
+static int qla4xxx_conn_get_param(struct iscsi_cls_conn *cls_conn,
+ enum iscsi_param param, char *buf)
+{
+ struct iscsi_conn *conn;
+ struct qla_conn *qla_conn;
+ struct sockaddr *dst_addr;
+
+ conn = cls_conn->dd_data;
+ qla_conn = conn->dd_data;
+ dst_addr = (struct sockaddr *)&qla_conn->qla_ep->dst_addr;
+
+ switch (param) {
+ case ISCSI_PARAM_CONN_PORT:
+ case ISCSI_PARAM_CONN_ADDRESS:
+ return iscsi_conn_get_addr_param((struct sockaddr_storage *)
+ dst_addr, param, buf);
+ default:
+ return iscsi_conn_get_param(cls_conn, param, buf);
+ }
+}
+
+int qla4xxx_get_ddb_index(struct scsi_qla_host *ha, uint16_t *ddb_index)
+{
+ uint32_t mbx_sts = 0;
+ uint16_t tmp_ddb_index;
+ int ret;
+
+get_ddb_index:
+ tmp_ddb_index = find_first_zero_bit(ha->ddb_idx_map, MAX_DDB_ENTRIES);
+
+ if (tmp_ddb_index >= MAX_DDB_ENTRIES) {
+ DEBUG2(ql4_printk(KERN_INFO, ha,
+ "Free DDB index not available\n"));
+ ret = QLA_ERROR;
+ goto exit_get_ddb_index;
+ }
+
+ if (test_and_set_bit(tmp_ddb_index, ha->ddb_idx_map))
+ goto get_ddb_index;
+
+ DEBUG2(ql4_printk(KERN_INFO, ha,
+ "Found a free DDB index at %d\n", tmp_ddb_index));
+ ret = qla4xxx_req_ddb_entry(ha, tmp_ddb_index, &mbx_sts);
+ if (ret == QLA_ERROR) {
+ if (mbx_sts == MBOX_STS_COMMAND_ERROR) {
+ ql4_printk(KERN_INFO, ha,
+ "DDB index = %d not available trying next\n",
+ tmp_ddb_index);
+ goto get_ddb_index;
+ }
+ DEBUG2(ql4_printk(KERN_INFO, ha,
+ "Free FW DDB not available\n"));
+ }
+
+ *ddb_index = tmp_ddb_index;
+
+exit_get_ddb_index:
+ return ret;
+}
+
+static int qla4xxx_match_ipaddress(struct scsi_qla_host *ha,
+ struct ddb_entry *ddb_entry,
+ char *existing_ipaddr,
+ char *user_ipaddr)
+{
+ uint8_t dst_ipaddr[IPv6_ADDR_LEN];
+ char formatted_ipaddr[DDB_IPADDR_LEN];
+ int status = QLA_SUCCESS, ret = 0;
+
+ if (ddb_entry->fw_ddb_entry.options & DDB_OPT_IPV6_DEVICE) {
+ ret = in6_pton(user_ipaddr, strlen(user_ipaddr), dst_ipaddr,
+ '\0', NULL);
+ if (ret == 0) {
+ status = QLA_ERROR;
+ goto out_match;
+ }
+ ret = sprintf(formatted_ipaddr, "%pI6", dst_ipaddr);
+ } else {
+ ret = in4_pton(user_ipaddr, strlen(user_ipaddr), dst_ipaddr,
+ '\0', NULL);
+ if (ret == 0) {
+ status = QLA_ERROR;
+ goto out_match;
+ }
+ ret = sprintf(formatted_ipaddr, "%pI4", dst_ipaddr);
+ }
+
+ if (strcmp(existing_ipaddr, formatted_ipaddr))
+ status = QLA_ERROR;
+
+out_match:
+ return status;
+}
+
+static int qla4xxx_match_fwdb_session(struct scsi_qla_host *ha,
+ struct iscsi_cls_conn *cls_conn)
+{
+ int idx = 0, max_ddbs, rval;
+ struct iscsi_cls_session *cls_sess = iscsi_conn_to_session(cls_conn);
+ struct iscsi_session *sess, *existing_sess;
+ struct iscsi_conn *conn, *existing_conn;
+ struct ddb_entry *ddb_entry;
+
+ sess = cls_sess->dd_data;
+ conn = cls_conn->dd_data;
+
+ if (sess->targetname == NULL ||
+ conn->persistent_address == NULL ||
+ conn->persistent_port == 0)
+ return QLA_ERROR;
+
+ max_ddbs = is_qla40XX(ha) ? MAX_DEV_DB_ENTRIES_40XX :
+ MAX_DEV_DB_ENTRIES;
+
+ for (idx = 0; idx < max_ddbs; idx++) {
+ ddb_entry = qla4xxx_lookup_ddb_by_fw_index(ha, idx);
+ if (ddb_entry == NULL)
+ continue;
+
+ if (ddb_entry->ddb_type != FLASH_DDB)
+ continue;
+
+ existing_sess = ddb_entry->sess->dd_data;
+ existing_conn = ddb_entry->conn->dd_data;
+
+ if (existing_sess->targetname == NULL ||
+ existing_conn->persistent_address == NULL ||
+ existing_conn->persistent_port == 0)
+ continue;
+
+ DEBUG2(ql4_printk(KERN_INFO, ha,
+ "IQN = %s User IQN = %s\n",
+ existing_sess->targetname,
+ sess->targetname));
+
+ DEBUG2(ql4_printk(KERN_INFO, ha,
+ "IP = %s User IP = %s\n",
+ existing_conn->persistent_address,
+ conn->persistent_address));
+
+ DEBUG2(ql4_printk(KERN_INFO, ha,
+ "Port = %d User Port = %d\n",
+ existing_conn->persistent_port,
+ conn->persistent_port));
+
+ if (strcmp(existing_sess->targetname, sess->targetname))
+ continue;
+ rval = qla4xxx_match_ipaddress(ha, ddb_entry,
+ existing_conn->persistent_address,
+ conn->persistent_address);
+ if (rval == QLA_ERROR)
+ continue;
+ if (existing_conn->persistent_port != conn->persistent_port)
+ continue;
+ break;
+ }
+
+ if (idx == max_ddbs)
+ return QLA_ERROR;
+
+ DEBUG2(ql4_printk(KERN_INFO, ha,
+ "Match found in fwdb sessions\n"));
+ return QLA_SUCCESS;
+}
+
+static struct iscsi_cls_session *
+qla4xxx_session_create(struct iscsi_endpoint *ep,
+ uint16_t cmds_max, uint16_t qdepth,
+ uint32_t initial_cmdsn)
+{
+ struct iscsi_cls_session *cls_sess;
+ struct scsi_qla_host *ha;
+ struct qla_endpoint *qla_ep;
+ struct ddb_entry *ddb_entry;
+ uint16_t ddb_index;
+ struct iscsi_session *sess;
+ struct sockaddr *dst_addr;
+ int ret;
+
+ if (!ep) {
+ printk(KERN_ERR "qla4xxx: missing ep.\n");
+ return NULL;
+ }
+
+ qla_ep = ep->dd_data;
+ dst_addr = (struct sockaddr *)&qla_ep->dst_addr;
+ ha = to_qla_host(qla_ep->host);
+ DEBUG2(ql4_printk(KERN_INFO, ha, "%s: host: %ld\n", __func__,
+ ha->host_no));
+
+ ret = qla4xxx_get_ddb_index(ha, &ddb_index);
+ if (ret == QLA_ERROR)
+ return NULL;
+
+ cls_sess = iscsi_session_setup(&qla4xxx_iscsi_transport, qla_ep->host,
+ cmds_max, sizeof(struct ddb_entry),
+ sizeof(struct ql4_task_data),
+ initial_cmdsn, ddb_index);
+ if (!cls_sess)
+ return NULL;
+
+ sess = cls_sess->dd_data;
+ ddb_entry = sess->dd_data;
+ ddb_entry->fw_ddb_index = ddb_index;
+ ddb_entry->fw_ddb_device_state = DDB_DS_NO_CONNECTION_ACTIVE;
+ ddb_entry->ha = ha;
+ ddb_entry->sess = cls_sess;
+ ddb_entry->unblock_sess = qla4xxx_unblock_ddb;
+ ddb_entry->ddb_change = qla4xxx_ddb_change;
+ clear_bit(DDB_CONN_CLOSE_FAILURE, &ddb_entry->flags);
+ cls_sess->recovery_tmo = ql4xsess_recovery_tmo;
+ ha->fw_ddb_index_map[ddb_entry->fw_ddb_index] = ddb_entry;
+ ha->tot_ddbs++;
+
+ return cls_sess;
+}
+
+static void qla4xxx_session_destroy(struct iscsi_cls_session *cls_sess)
+{
+ struct iscsi_session *sess;
+ struct ddb_entry *ddb_entry;
+ struct scsi_qla_host *ha;
+ unsigned long flags, wtime;
+ struct dev_db_entry *fw_ddb_entry = NULL;
+ dma_addr_t fw_ddb_entry_dma;
+ uint32_t ddb_state;
+ int ret;
+
+ sess = cls_sess->dd_data;
+ ddb_entry = sess->dd_data;
+ ha = ddb_entry->ha;
+ DEBUG2(ql4_printk(KERN_INFO, ha, "%s: host: %ld\n", __func__,
+ ha->host_no));
+
+ fw_ddb_entry = dma_alloc_coherent(&ha->pdev->dev, sizeof(*fw_ddb_entry),
+ &fw_ddb_entry_dma, GFP_KERNEL);
+ if (!fw_ddb_entry) {
+ ql4_printk(KERN_ERR, ha,
+ "%s: Unable to allocate dma buffer\n", __func__);
+ goto destroy_session;
+ }
+
+ wtime = jiffies + (HZ * LOGOUT_TOV);
+ do {
+ ret = qla4xxx_get_fwddb_entry(ha, ddb_entry->fw_ddb_index,
+ fw_ddb_entry, fw_ddb_entry_dma,
+ NULL, NULL, &ddb_state, NULL,
+ NULL, NULL);
+ if (ret == QLA_ERROR)
+ goto destroy_session;
+
+ if ((ddb_state == DDB_DS_NO_CONNECTION_ACTIVE) ||
+ (ddb_state == DDB_DS_SESSION_FAILED))
+ goto destroy_session;
+
+ schedule_timeout_uninterruptible(HZ);
+ } while ((time_after(wtime, jiffies)));
+
+destroy_session:
+ qla4xxx_clear_ddb_entry(ha, ddb_entry->fw_ddb_index);
+ if (test_and_clear_bit(DDB_CONN_CLOSE_FAILURE, &ddb_entry->flags))
+ clear_bit(ddb_entry->fw_ddb_index, ha->ddb_idx_map);
+ spin_lock_irqsave(&ha->hardware_lock, flags);
+ qla4xxx_free_ddb(ha, ddb_entry);
+ spin_unlock_irqrestore(&ha->hardware_lock, flags);
+
+ iscsi_session_teardown(cls_sess);
+
+ if (fw_ddb_entry)
+ dma_free_coherent(&ha->pdev->dev, sizeof(*fw_ddb_entry),
+ fw_ddb_entry, fw_ddb_entry_dma);
+}
+
+static struct iscsi_cls_conn *
+qla4xxx_conn_create(struct iscsi_cls_session *cls_sess, uint32_t conn_idx)
+{
+ struct iscsi_cls_conn *cls_conn;
+ struct iscsi_session *sess;
+ struct ddb_entry *ddb_entry;
+ struct scsi_qla_host *ha;
+
+ cls_conn = iscsi_conn_setup(cls_sess, sizeof(struct qla_conn),
+ conn_idx);
+ if (!cls_conn) {
+ pr_info("%s: Can not create connection for conn_idx = %u\n",
+ __func__, conn_idx);
+ return NULL;
+ }
+
+ sess = cls_sess->dd_data;
+ ddb_entry = sess->dd_data;
+ ddb_entry->conn = cls_conn;
+
+ ha = ddb_entry->ha;
+ DEBUG2(ql4_printk(KERN_INFO, ha, "%s: conn_idx = %u\n", __func__,
+ conn_idx));
+ return cls_conn;
+}
+
+static int qla4xxx_conn_bind(struct iscsi_cls_session *cls_session,
+ struct iscsi_cls_conn *cls_conn,
+ uint64_t transport_fd, int is_leading)
+{
+ struct iscsi_conn *conn;
+ struct qla_conn *qla_conn;
+ struct iscsi_endpoint *ep;
+ struct ddb_entry *ddb_entry;
+ struct scsi_qla_host *ha;
+ struct iscsi_session *sess;
+
+ sess = cls_session->dd_data;
+ ddb_entry = sess->dd_data;
+ ha = ddb_entry->ha;
+
+ DEBUG2(ql4_printk(KERN_INFO, ha, "%s: sid = %d, cid = %d\n", __func__,
+ cls_session->sid, cls_conn->cid));
+
+ if (iscsi_conn_bind(cls_session, cls_conn, is_leading))
+ return -EINVAL;
+ ep = iscsi_lookup_endpoint(transport_fd);
+ conn = cls_conn->dd_data;
+ qla_conn = conn->dd_data;
+ qla_conn->qla_ep = ep->dd_data;
+ return 0;
+}
+
+static int qla4xxx_conn_start(struct iscsi_cls_conn *cls_conn)
+{
+ struct iscsi_cls_session *cls_sess = iscsi_conn_to_session(cls_conn);
+ struct iscsi_session *sess;
+ struct ddb_entry *ddb_entry;
+ struct scsi_qla_host *ha;
+ struct dev_db_entry *fw_ddb_entry = NULL;
+ dma_addr_t fw_ddb_entry_dma;
+ uint32_t mbx_sts = 0;
+ int ret = 0;
+ int status = QLA_SUCCESS;
+
+ sess = cls_sess->dd_data;
+ ddb_entry = sess->dd_data;
+ ha = ddb_entry->ha;
+ DEBUG2(ql4_printk(KERN_INFO, ha, "%s: sid = %d, cid = %d\n", __func__,
+ cls_sess->sid, cls_conn->cid));
+
+ /* Check if we have matching FW DDB, if yes then do not
+ * login to this target. This could cause target to logout previous
+ * connection
+ */
+ ret = qla4xxx_match_fwdb_session(ha, cls_conn);
+ if (ret == QLA_SUCCESS) {
+ ql4_printk(KERN_INFO, ha,
+ "Session already exist in FW.\n");
+ ret = -EEXIST;
+ goto exit_conn_start;
+ }
+
+ fw_ddb_entry = dma_alloc_coherent(&ha->pdev->dev, sizeof(*fw_ddb_entry),
+ &fw_ddb_entry_dma, GFP_KERNEL);
+ if (!fw_ddb_entry) {
+ ql4_printk(KERN_ERR, ha,
+ "%s: Unable to allocate dma buffer\n", __func__);
+ ret = -ENOMEM;
+ goto exit_conn_start;
+ }
+
+ ret = qla4xxx_set_param_ddbentry(ha, ddb_entry, cls_conn, &mbx_sts);
+ if (ret) {
+ /* If iscsid is stopped and started then no need to do
+ * set param again since ddb state will be already
+ * active and FW does not allow set ddb to an
+ * active session.
+ */
+ if (mbx_sts)
+ if (ddb_entry->fw_ddb_device_state ==
+ DDB_DS_SESSION_ACTIVE) {
+ ddb_entry->unblock_sess(ddb_entry->sess);
+ goto exit_set_param;
+ }
+
+ ql4_printk(KERN_ERR, ha, "%s: Failed set param for index[%d]\n",
+ __func__, ddb_entry->fw_ddb_index);
+ goto exit_conn_start;
+ }
+
+ status = qla4xxx_conn_open(ha, ddb_entry->fw_ddb_index);
+ if (status == QLA_ERROR) {
+ ql4_printk(KERN_ERR, ha, "%s: Login failed: %s\n", __func__,
+ sess->targetname);
+ ret = -EINVAL;
+ goto exit_conn_start;
+ }
+
+ if (ddb_entry->fw_ddb_device_state == DDB_DS_NO_CONNECTION_ACTIVE)
+ ddb_entry->fw_ddb_device_state = DDB_DS_LOGIN_IN_PROCESS;
+
+ DEBUG2(printk(KERN_INFO "%s: DDB state [%d]\n", __func__,
+ ddb_entry->fw_ddb_device_state));
+
+exit_set_param:
+ ret = 0;
+
+exit_conn_start:
+ if (fw_ddb_entry)
+ dma_free_coherent(&ha->pdev->dev, sizeof(*fw_ddb_entry),
+ fw_ddb_entry, fw_ddb_entry_dma);
+ return ret;
+}
+
+static void qla4xxx_conn_destroy(struct iscsi_cls_conn *cls_conn)
+{
+ struct iscsi_cls_session *cls_sess = iscsi_conn_to_session(cls_conn);
+ struct iscsi_session *sess;
+ struct scsi_qla_host *ha;
+ struct ddb_entry *ddb_entry;
+ int options;
+
+ sess = cls_sess->dd_data;
+ ddb_entry = sess->dd_data;
+ ha = ddb_entry->ha;
+ DEBUG2(ql4_printk(KERN_INFO, ha, "%s: cid = %d\n", __func__,
+ cls_conn->cid));
+
+ options = LOGOUT_OPTION_CLOSE_SESSION;
+ if (qla4xxx_session_logout_ddb(ha, ddb_entry, options) == QLA_ERROR)
+ ql4_printk(KERN_ERR, ha, "%s: Logout failed\n", __func__);
+}
+
+static void qla4xxx_task_work(struct work_struct *wdata)
+{
+ struct ql4_task_data *task_data;
+ struct scsi_qla_host *ha;
+ struct passthru_status *sts;
+ struct iscsi_task *task;
+ struct iscsi_hdr *hdr;
+ uint8_t *data;
+ uint32_t data_len;
+ struct iscsi_conn *conn;
+ int hdr_len;
+ itt_t itt;
+
+ task_data = container_of(wdata, struct ql4_task_data, task_work);
+ ha = task_data->ha;
+ task = task_data->task;
+ sts = &task_data->sts;
+ hdr_len = sizeof(struct iscsi_hdr);
+
+ DEBUG3(printk(KERN_INFO "Status returned\n"));
+ DEBUG3(qla4xxx_dump_buffer(sts, 64));
+ DEBUG3(printk(KERN_INFO "Response buffer"));
+ DEBUG3(qla4xxx_dump_buffer(task_data->resp_buffer, 64));
+
+ conn = task->conn;
+
+ switch (sts->completionStatus) {
+ case PASSTHRU_STATUS_COMPLETE:
+ hdr = (struct iscsi_hdr *)task_data->resp_buffer;
+ /* Assign back the itt in hdr, until we use the PREASSIGN_TAG */
+ itt = sts->handle;
+ hdr->itt = itt;
+ data = task_data->resp_buffer + hdr_len;
+ data_len = task_data->resp_len - hdr_len;
+ iscsi_complete_pdu(conn, hdr, data, data_len);
+ break;
+ default:
+ ql4_printk(KERN_ERR, ha, "Passthru failed status = 0x%x\n",
+ sts->completionStatus);
+ break;
+ }
+ return;
+}
+
+static int qla4xxx_alloc_pdu(struct iscsi_task *task, uint8_t opcode)
+{
+ struct ql4_task_data *task_data;
+ struct iscsi_session *sess;
+ struct ddb_entry *ddb_entry;
+ struct scsi_qla_host *ha;
+ int hdr_len;
+
+ sess = task->conn->session;
+ ddb_entry = sess->dd_data;
+ ha = ddb_entry->ha;
+ task_data = task->dd_data;
+ memset(task_data, 0, sizeof(struct ql4_task_data));
+
+ if (task->sc) {
+ ql4_printk(KERN_INFO, ha,
+ "%s: SCSI Commands not implemented\n", __func__);
+ return -EINVAL;
+ }
+
+ hdr_len = sizeof(struct iscsi_hdr);
+ task_data->ha = ha;
+ task_data->task = task;
+
+ if (task->data_count) {
+ task_data->data_dma = dma_map_single(&ha->pdev->dev, task->data,
+ task->data_count,
+ PCI_DMA_TODEVICE);
+ }
+
+ DEBUG2(ql4_printk(KERN_INFO, ha, "%s: MaxRecvLen %u, iscsi hrd %d\n",
+ __func__, task->conn->max_recv_dlength, hdr_len));
+
+ task_data->resp_len = task->conn->max_recv_dlength + hdr_len;
+ task_data->resp_buffer = dma_alloc_coherent(&ha->pdev->dev,
+ task_data->resp_len,
+ &task_data->resp_dma,
+ GFP_ATOMIC);
+ if (!task_data->resp_buffer)
+ goto exit_alloc_pdu;
+
+ task_data->req_len = task->data_count + hdr_len;
+ task_data->req_buffer = dma_alloc_coherent(&ha->pdev->dev,
+ task_data->req_len,
+ &task_data->req_dma,
+ GFP_ATOMIC);
+ if (!task_data->req_buffer)
+ goto exit_alloc_pdu;
+
+ task->hdr = task_data->req_buffer;
+
+ INIT_WORK(&task_data->task_work, qla4xxx_task_work);
+
+ return 0;
+
+exit_alloc_pdu:
+ if (task_data->resp_buffer)
+ dma_free_coherent(&ha->pdev->dev, task_data->resp_len,
+ task_data->resp_buffer, task_data->resp_dma);
+
+ if (task_data->req_buffer)
+ dma_free_coherent(&ha->pdev->dev, task_data->req_len,
+ task_data->req_buffer, task_data->req_dma);
+ return -ENOMEM;
+}
+
+static void qla4xxx_task_cleanup(struct iscsi_task *task)
+{
+ struct ql4_task_data *task_data;
+ struct iscsi_session *sess;
+ struct ddb_entry *ddb_entry;
+ struct scsi_qla_host *ha;
+ int hdr_len;
+
+ hdr_len = sizeof(struct iscsi_hdr);
+ sess = task->conn->session;
+ ddb_entry = sess->dd_data;
+ ha = ddb_entry->ha;
+ task_data = task->dd_data;
+
+ if (task->data_count) {
+ dma_unmap_single(&ha->pdev->dev, task_data->data_dma,
+ task->data_count, PCI_DMA_TODEVICE);
+ }
+
+ DEBUG2(ql4_printk(KERN_INFO, ha, "%s: MaxRecvLen %u, iscsi hrd %d\n",
+ __func__, task->conn->max_recv_dlength, hdr_len));
+
+ dma_free_coherent(&ha->pdev->dev, task_data->resp_len,
+ task_data->resp_buffer, task_data->resp_dma);
+ dma_free_coherent(&ha->pdev->dev, task_data->req_len,
+ task_data->req_buffer, task_data->req_dma);
+ return;
+}
+
+static int qla4xxx_task_xmit(struct iscsi_task *task)
+{
+ struct scsi_cmnd *sc = task->sc;
+ struct iscsi_session *sess = task->conn->session;
+ struct ddb_entry *ddb_entry = sess->dd_data;
+ struct scsi_qla_host *ha = ddb_entry->ha;
+
+ if (!sc)
+ return qla4xxx_send_passthru0(task);
+
+ ql4_printk(KERN_INFO, ha, "%s: scsi cmd xmit not implemented\n",
+ __func__);
+ return -ENOSYS;
+}
+
+static int qla4xxx_copy_from_fwddb_param(struct iscsi_bus_flash_session *sess,
+ struct iscsi_bus_flash_conn *conn,
+ struct dev_db_entry *fw_ddb_entry)
+{
+ unsigned long options = 0;
+ int rc = 0;
+
+ options = le16_to_cpu(fw_ddb_entry->options);
+ conn->is_fw_assigned_ipv6 = test_bit(OPT_IS_FW_ASSIGNED_IPV6, &options);
+ if (test_bit(OPT_IPV6_DEVICE, &options)) {
+ rc = iscsi_switch_str_param(&sess->portal_type,
+ PORTAL_TYPE_IPV6);
+ if (rc)
+ goto exit_copy;
+ } else {
+ rc = iscsi_switch_str_param(&sess->portal_type,
+ PORTAL_TYPE_IPV4);
+ if (rc)
+ goto exit_copy;
+ }
+
+ sess->auto_snd_tgt_disable = test_bit(OPT_AUTO_SENDTGTS_DISABLE,
+ &options);
+ sess->discovery_sess = test_bit(OPT_DISC_SESSION, &options);
+ sess->entry_state = test_bit(OPT_ENTRY_STATE, &options);
+
+ options = le16_to_cpu(fw_ddb_entry->iscsi_options);
+ conn->hdrdgst_en = test_bit(ISCSIOPT_HEADER_DIGEST_EN, &options);
+ conn->datadgst_en = test_bit(ISCSIOPT_DATA_DIGEST_EN, &options);
+ sess->imm_data_en = test_bit(ISCSIOPT_IMMEDIATE_DATA_EN, &options);
+ sess->initial_r2t_en = test_bit(ISCSIOPT_INITIAL_R2T_EN, &options);
+ sess->dataseq_inorder_en = test_bit(ISCSIOPT_DATA_SEQ_IN_ORDER,
+ &options);
+ sess->pdu_inorder_en = test_bit(ISCSIOPT_DATA_PDU_IN_ORDER, &options);
+ sess->chap_auth_en = test_bit(ISCSIOPT_CHAP_AUTH_EN, &options);
+ conn->snack_req_en = test_bit(ISCSIOPT_SNACK_REQ_EN, &options);
+ sess->discovery_logout_en = test_bit(ISCSIOPT_DISCOVERY_LOGOUT_EN,
+ &options);
+ sess->bidi_chap_en = test_bit(ISCSIOPT_BIDI_CHAP_EN, &options);
+ sess->discovery_auth_optional =
+ test_bit(ISCSIOPT_DISCOVERY_AUTH_OPTIONAL, &options);
+ if (test_bit(ISCSIOPT_ERL1, &options))
+ sess->erl |= BIT_1;
+ if (test_bit(ISCSIOPT_ERL0, &options))
+ sess->erl |= BIT_0;
+
+ options = le16_to_cpu(fw_ddb_entry->tcp_options);
+ conn->tcp_timestamp_stat = test_bit(TCPOPT_TIMESTAMP_STAT, &options);
+ conn->tcp_nagle_disable = test_bit(TCPOPT_NAGLE_DISABLE, &options);
+ conn->tcp_wsf_disable = test_bit(TCPOPT_WSF_DISABLE, &options);
+ if (test_bit(TCPOPT_TIMER_SCALE3, &options))
+ conn->tcp_timer_scale |= BIT_3;
+ if (test_bit(TCPOPT_TIMER_SCALE2, &options))
+ conn->tcp_timer_scale |= BIT_2;
+ if (test_bit(TCPOPT_TIMER_SCALE1, &options))
+ conn->tcp_timer_scale |= BIT_1;
+
+ conn->tcp_timer_scale >>= 1;
+ conn->tcp_timestamp_en = test_bit(TCPOPT_TIMESTAMP_EN, &options);
+
+ options = le16_to_cpu(fw_ddb_entry->ip_options);
+ conn->fragment_disable = test_bit(IPOPT_FRAGMENT_DISABLE, &options);
+
+ conn->max_recv_dlength = BYTE_UNITS *
+ le16_to_cpu(fw_ddb_entry->iscsi_max_rcv_data_seg_len);
+ conn->max_xmit_dlength = BYTE_UNITS *
+ le16_to_cpu(fw_ddb_entry->iscsi_max_snd_data_seg_len);
+ sess->first_burst = BYTE_UNITS *
+ le16_to_cpu(fw_ddb_entry->iscsi_first_burst_len);
+ sess->max_burst = BYTE_UNITS *
+ le16_to_cpu(fw_ddb_entry->iscsi_max_burst_len);
+ sess->max_r2t = le16_to_cpu(fw_ddb_entry->iscsi_max_outsnd_r2t);
+ sess->time2wait = le16_to_cpu(fw_ddb_entry->iscsi_def_time2wait);
+ sess->time2retain = le16_to_cpu(fw_ddb_entry->iscsi_def_time2retain);
+ sess->tpgt = le32_to_cpu(fw_ddb_entry->tgt_portal_grp);
+ conn->max_segment_size = le16_to_cpu(fw_ddb_entry->mss);
+ conn->tcp_xmit_wsf = fw_ddb_entry->tcp_xmt_wsf;
+ conn->tcp_recv_wsf = fw_ddb_entry->tcp_rcv_wsf;
+ conn->ipv6_flow_label = le16_to_cpu(fw_ddb_entry->ipv6_flow_lbl);
+ conn->keepalive_timeout = le16_to_cpu(fw_ddb_entry->ka_timeout);
+ conn->local_port = le16_to_cpu(fw_ddb_entry->lcl_port);
+ conn->statsn = le32_to_cpu(fw_ddb_entry->stat_sn);
+ conn->exp_statsn = le32_to_cpu(fw_ddb_entry->exp_stat_sn);
+ sess->discovery_parent_idx = le16_to_cpu(fw_ddb_entry->ddb_link);
+ sess->discovery_parent_type = le16_to_cpu(fw_ddb_entry->ddb_link);
+ sess->chap_out_idx = le16_to_cpu(fw_ddb_entry->chap_tbl_idx);
+ sess->tsid = le16_to_cpu(fw_ddb_entry->tsid);
+
+ sess->default_taskmgmt_timeout =
+ le16_to_cpu(fw_ddb_entry->def_timeout);
+ conn->port = le16_to_cpu(fw_ddb_entry->port);
+
+ options = le16_to_cpu(fw_ddb_entry->options);
+ conn->ipaddress = kzalloc(IPv6_ADDR_LEN, GFP_KERNEL);
+ if (!conn->ipaddress) {
+ rc = -ENOMEM;
+ goto exit_copy;
+ }
+
+ conn->redirect_ipaddr = kzalloc(IPv6_ADDR_LEN, GFP_KERNEL);
+ if (!conn->redirect_ipaddr) {
+ rc = -ENOMEM;
+ goto exit_copy;
+ }
+
+ memcpy(conn->ipaddress, fw_ddb_entry->ip_addr, IPv6_ADDR_LEN);
+ memcpy(conn->redirect_ipaddr, fw_ddb_entry->tgt_addr, IPv6_ADDR_LEN);
+
+ if (test_bit(OPT_IPV6_DEVICE, &options)) {
+ conn->ipv6_traffic_class = fw_ddb_entry->ipv4_tos;
+
+ conn->link_local_ipv6_addr = kmemdup(
+ fw_ddb_entry->link_local_ipv6_addr,
+ IPv6_ADDR_LEN, GFP_KERNEL);
+ if (!conn->link_local_ipv6_addr) {
+ rc = -ENOMEM;
+ goto exit_copy;
+ }
+ } else {
+ conn->ipv4_tos = fw_ddb_entry->ipv4_tos;
+ }
+
+ if (fw_ddb_entry->iscsi_name[0]) {
+ rc = iscsi_switch_str_param(&sess->targetname,
+ (char *)fw_ddb_entry->iscsi_name);
+ if (rc)
+ goto exit_copy;
+ }
+
+ if (fw_ddb_entry->iscsi_alias[0]) {
+ rc = iscsi_switch_str_param(&sess->targetalias,
+ (char *)fw_ddb_entry->iscsi_alias);
+ if (rc)
+ goto exit_copy;
+ }
+
+ COPY_ISID(sess->isid, fw_ddb_entry->isid);
+
+exit_copy:
+ return rc;
+}
+
+static int qla4xxx_copy_to_fwddb_param(struct iscsi_bus_flash_session *sess,
+ struct iscsi_bus_flash_conn *conn,
+ struct dev_db_entry *fw_ddb_entry)
+{
+ uint16_t options;
+ int rc = 0;
+
+ options = le16_to_cpu(fw_ddb_entry->options);
+ SET_BITVAL(conn->is_fw_assigned_ipv6, options, BIT_11);
+ if (!strncmp(sess->portal_type, PORTAL_TYPE_IPV6, 4))
+ options |= BIT_8;
+ else
+ options &= ~BIT_8;
+
+ SET_BITVAL(sess->auto_snd_tgt_disable, options, BIT_6);
+ SET_BITVAL(sess->discovery_sess, options, BIT_4);
+ SET_BITVAL(sess->entry_state, options, BIT_3);
+ fw_ddb_entry->options = cpu_to_le16(options);
+
+ options = le16_to_cpu(fw_ddb_entry->iscsi_options);
+ SET_BITVAL(conn->hdrdgst_en, options, BIT_13);
+ SET_BITVAL(conn->datadgst_en, options, BIT_12);
+ SET_BITVAL(sess->imm_data_en, options, BIT_11);
+ SET_BITVAL(sess->initial_r2t_en, options, BIT_10);
+ SET_BITVAL(sess->dataseq_inorder_en, options, BIT_9);
+ SET_BITVAL(sess->pdu_inorder_en, options, BIT_8);
+ SET_BITVAL(sess->chap_auth_en, options, BIT_7);
+ SET_BITVAL(conn->snack_req_en, options, BIT_6);
+ SET_BITVAL(sess->discovery_logout_en, options, BIT_5);
+ SET_BITVAL(sess->bidi_chap_en, options, BIT_4);
+ SET_BITVAL(sess->discovery_auth_optional, options, BIT_3);
+ SET_BITVAL(sess->erl & BIT_1, options, BIT_1);
+ SET_BITVAL(sess->erl & BIT_0, options, BIT_0);
+ fw_ddb_entry->iscsi_options = cpu_to_le16(options);
+
+ options = le16_to_cpu(fw_ddb_entry->tcp_options);
+ SET_BITVAL(conn->tcp_timestamp_stat, options, BIT_6);
+ SET_BITVAL(conn->tcp_nagle_disable, options, BIT_5);
+ SET_BITVAL(conn->tcp_wsf_disable, options, BIT_4);
+ SET_BITVAL(conn->tcp_timer_scale & BIT_2, options, BIT_3);
+ SET_BITVAL(conn->tcp_timer_scale & BIT_1, options, BIT_2);
+ SET_BITVAL(conn->tcp_timer_scale & BIT_0, options, BIT_1);
+ SET_BITVAL(conn->tcp_timestamp_en, options, BIT_0);
+ fw_ddb_entry->tcp_options = cpu_to_le16(options);
+
+ options = le16_to_cpu(fw_ddb_entry->ip_options);
+ SET_BITVAL(conn->fragment_disable, options, BIT_4);
+ fw_ddb_entry->ip_options = cpu_to_le16(options);
+
+ fw_ddb_entry->iscsi_max_outsnd_r2t = cpu_to_le16(sess->max_r2t);
+ fw_ddb_entry->iscsi_max_rcv_data_seg_len =
+ cpu_to_le16(conn->max_recv_dlength / BYTE_UNITS);
+ fw_ddb_entry->iscsi_max_snd_data_seg_len =
+ cpu_to_le16(conn->max_xmit_dlength / BYTE_UNITS);
+ fw_ddb_entry->iscsi_first_burst_len =
+ cpu_to_le16(sess->first_burst / BYTE_UNITS);
+ fw_ddb_entry->iscsi_max_burst_len = cpu_to_le16(sess->max_burst /
+ BYTE_UNITS);
+ fw_ddb_entry->iscsi_def_time2wait = cpu_to_le16(sess->time2wait);
+ fw_ddb_entry->iscsi_def_time2retain = cpu_to_le16(sess->time2retain);
+ fw_ddb_entry->tgt_portal_grp = cpu_to_le16(sess->tpgt);
+ fw_ddb_entry->mss = cpu_to_le16(conn->max_segment_size);
+ fw_ddb_entry->tcp_xmt_wsf = (uint8_t) cpu_to_le32(conn->tcp_xmit_wsf);
+ fw_ddb_entry->tcp_rcv_wsf = (uint8_t) cpu_to_le32(conn->tcp_recv_wsf);
+ fw_ddb_entry->ipv6_flow_lbl = cpu_to_le16(conn->ipv6_flow_label);
+ fw_ddb_entry->ka_timeout = cpu_to_le16(conn->keepalive_timeout);
+ fw_ddb_entry->lcl_port = cpu_to_le16(conn->local_port);
+ fw_ddb_entry->stat_sn = cpu_to_le32(conn->statsn);
+ fw_ddb_entry->exp_stat_sn = cpu_to_le32(conn->exp_statsn);
+ fw_ddb_entry->ddb_link = cpu_to_le16(sess->discovery_parent_idx);
+ fw_ddb_entry->chap_tbl_idx = cpu_to_le16(sess->chap_out_idx);
+ fw_ddb_entry->tsid = cpu_to_le16(sess->tsid);
+ fw_ddb_entry->port = cpu_to_le16(conn->port);
+ fw_ddb_entry->def_timeout =
+ cpu_to_le16(sess->default_taskmgmt_timeout);
+
+ if (!strncmp(sess->portal_type, PORTAL_TYPE_IPV6, 4))
+ fw_ddb_entry->ipv4_tos = conn->ipv6_traffic_class;
+ else
+ fw_ddb_entry->ipv4_tos = conn->ipv4_tos;
+
+ if (conn->ipaddress)
+ memcpy(fw_ddb_entry->ip_addr, conn->ipaddress,
+ sizeof(fw_ddb_entry->ip_addr));
+
+ if (conn->redirect_ipaddr)
+ memcpy(fw_ddb_entry->tgt_addr, conn->redirect_ipaddr,
+ sizeof(fw_ddb_entry->tgt_addr));
+
+ if (conn->link_local_ipv6_addr)
+ memcpy(fw_ddb_entry->link_local_ipv6_addr,
+ conn->link_local_ipv6_addr,
+ sizeof(fw_ddb_entry->link_local_ipv6_addr));
+
+ if (sess->targetname)
+ memcpy(fw_ddb_entry->iscsi_name, sess->targetname,
+ sizeof(fw_ddb_entry->iscsi_name));
+
+ if (sess->targetalias)
+ memcpy(fw_ddb_entry->iscsi_alias, sess->targetalias,
+ sizeof(fw_ddb_entry->iscsi_alias));
+
+ COPY_ISID(fw_ddb_entry->isid, sess->isid);
+
+ return rc;
+}
+
+static void qla4xxx_copy_to_sess_conn_params(struct iscsi_conn *conn,
+ struct iscsi_session *sess,
+ struct dev_db_entry *fw_ddb_entry)
+{
+ unsigned long options = 0;
+ uint16_t ddb_link;
+ uint16_t disc_parent;
+ char ip_addr[DDB_IPADDR_LEN];
+
+ options = le16_to_cpu(fw_ddb_entry->options);
+ conn->is_fw_assigned_ipv6 = test_bit(OPT_IS_FW_ASSIGNED_IPV6, &options);
+ sess->auto_snd_tgt_disable = test_bit(OPT_AUTO_SENDTGTS_DISABLE,
+ &options);
+ sess->discovery_sess = test_bit(OPT_DISC_SESSION, &options);
+
+ options = le16_to_cpu(fw_ddb_entry->iscsi_options);
+ conn->hdrdgst_en = test_bit(ISCSIOPT_HEADER_DIGEST_EN, &options);
+ conn->datadgst_en = test_bit(ISCSIOPT_DATA_DIGEST_EN, &options);
+ sess->imm_data_en = test_bit(ISCSIOPT_IMMEDIATE_DATA_EN, &options);
+ sess->initial_r2t_en = test_bit(ISCSIOPT_INITIAL_R2T_EN, &options);
+ sess->dataseq_inorder_en = test_bit(ISCSIOPT_DATA_SEQ_IN_ORDER,
+ &options);
+ sess->pdu_inorder_en = test_bit(ISCSIOPT_DATA_PDU_IN_ORDER, &options);
+ sess->chap_auth_en = test_bit(ISCSIOPT_CHAP_AUTH_EN, &options);
+ sess->discovery_logout_en = test_bit(ISCSIOPT_DISCOVERY_LOGOUT_EN,
+ &options);
+ sess->bidi_chap_en = test_bit(ISCSIOPT_BIDI_CHAP_EN, &options);
+ sess->discovery_auth_optional =
+ test_bit(ISCSIOPT_DISCOVERY_AUTH_OPTIONAL, &options);
+ if (test_bit(ISCSIOPT_ERL1, &options))
+ sess->erl |= BIT_1;
+ if (test_bit(ISCSIOPT_ERL0, &options))
+ sess->erl |= BIT_0;
+
+ options = le16_to_cpu(fw_ddb_entry->tcp_options);
+ conn->tcp_timestamp_stat = test_bit(TCPOPT_TIMESTAMP_STAT, &options);
+ conn->tcp_nagle_disable = test_bit(TCPOPT_NAGLE_DISABLE, &options);
+ conn->tcp_wsf_disable = test_bit(TCPOPT_WSF_DISABLE, &options);
+ if (test_bit(TCPOPT_TIMER_SCALE3, &options))
+ conn->tcp_timer_scale |= BIT_3;
+ if (test_bit(TCPOPT_TIMER_SCALE2, &options))
+ conn->tcp_timer_scale |= BIT_2;
+ if (test_bit(TCPOPT_TIMER_SCALE1, &options))
+ conn->tcp_timer_scale |= BIT_1;
+
+ conn->tcp_timer_scale >>= 1;
+ conn->tcp_timestamp_en = test_bit(TCPOPT_TIMESTAMP_EN, &options);
+
+ options = le16_to_cpu(fw_ddb_entry->ip_options);
+ conn->fragment_disable = test_bit(IPOPT_FRAGMENT_DISABLE, &options);
+
+ conn->max_recv_dlength = BYTE_UNITS *
+ le16_to_cpu(fw_ddb_entry->iscsi_max_rcv_data_seg_len);
+ conn->max_xmit_dlength = BYTE_UNITS *
+ le16_to_cpu(fw_ddb_entry->iscsi_max_snd_data_seg_len);
+ sess->max_r2t = le16_to_cpu(fw_ddb_entry->iscsi_max_outsnd_r2t);
+ sess->first_burst = BYTE_UNITS *
+ le16_to_cpu(fw_ddb_entry->iscsi_first_burst_len);
+ sess->max_burst = BYTE_UNITS *
+ le16_to_cpu(fw_ddb_entry->iscsi_max_burst_len);
+ sess->time2wait = le16_to_cpu(fw_ddb_entry->iscsi_def_time2wait);
+ sess->time2retain = le16_to_cpu(fw_ddb_entry->iscsi_def_time2retain);
+ sess->tpgt = le32_to_cpu(fw_ddb_entry->tgt_portal_grp);
+ conn->max_segment_size = le16_to_cpu(fw_ddb_entry->mss);
+ conn->tcp_xmit_wsf = fw_ddb_entry->tcp_xmt_wsf;
+ conn->tcp_recv_wsf = fw_ddb_entry->tcp_rcv_wsf;
+ conn->ipv4_tos = fw_ddb_entry->ipv4_tos;
+ conn->keepalive_tmo = le16_to_cpu(fw_ddb_entry->ka_timeout);
+ conn->local_port = le16_to_cpu(fw_ddb_entry->lcl_port);
+ conn->statsn = le32_to_cpu(fw_ddb_entry->stat_sn);
+ conn->exp_statsn = le32_to_cpu(fw_ddb_entry->exp_stat_sn);
+ sess->tsid = le16_to_cpu(fw_ddb_entry->tsid);
+ COPY_ISID(sess->isid, fw_ddb_entry->isid);
+
+ ddb_link = le16_to_cpu(fw_ddb_entry->ddb_link);
+ if (ddb_link == DDB_ISNS)
+ disc_parent = ISCSI_DISC_PARENT_ISNS;
+ else if (ddb_link == DDB_NO_LINK)
+ disc_parent = ISCSI_DISC_PARENT_UNKNOWN;
+ else if (ddb_link < MAX_DDB_ENTRIES)
+ disc_parent = ISCSI_DISC_PARENT_SENDTGT;
+ else
+ disc_parent = ISCSI_DISC_PARENT_UNKNOWN;
+
+ iscsi_set_param(conn->cls_conn, ISCSI_PARAM_DISCOVERY_PARENT_TYPE,
+ iscsi_get_discovery_parent_name(disc_parent), 0);
+
+ iscsi_set_param(conn->cls_conn, ISCSI_PARAM_TARGET_ALIAS,
+ (char *)fw_ddb_entry->iscsi_alias, 0);
+
+ options = le16_to_cpu(fw_ddb_entry->options);
+ if (options & DDB_OPT_IPV6_DEVICE) {
+ memset(ip_addr, 0, sizeof(ip_addr));
+ sprintf(ip_addr, "%pI6", fw_ddb_entry->link_local_ipv6_addr);
+ iscsi_set_param(conn->cls_conn, ISCSI_PARAM_LOCAL_IPADDR,
+ (char *)ip_addr, 0);
+ }
+}
+
+static void qla4xxx_copy_fwddb_param(struct scsi_qla_host *ha,
+ struct dev_db_entry *fw_ddb_entry,
+ struct iscsi_cls_session *cls_sess,
+ struct iscsi_cls_conn *cls_conn)
+{
+ int buflen = 0;
+ struct iscsi_session *sess;
+ struct ddb_entry *ddb_entry;
+ struct ql4_chap_table chap_tbl;
+ struct iscsi_conn *conn;
+ char ip_addr[DDB_IPADDR_LEN];
+ uint16_t options = 0;
+
+ sess = cls_sess->dd_data;
+ ddb_entry = sess->dd_data;
+ conn = cls_conn->dd_data;
+ memset(&chap_tbl, 0, sizeof(chap_tbl));
+
+ ddb_entry->chap_tbl_idx = le16_to_cpu(fw_ddb_entry->chap_tbl_idx);
+
+ qla4xxx_copy_to_sess_conn_params(conn, sess, fw_ddb_entry);
+
+ sess->def_taskmgmt_tmo = le16_to_cpu(fw_ddb_entry->def_timeout);
+ conn->persistent_port = le16_to_cpu(fw_ddb_entry->port);
+
+ memset(ip_addr, 0, sizeof(ip_addr));
+ options = le16_to_cpu(fw_ddb_entry->options);
+ if (options & DDB_OPT_IPV6_DEVICE) {
+ iscsi_set_param(cls_conn, ISCSI_PARAM_PORTAL_TYPE, "ipv6", 4);
+
+ memset(ip_addr, 0, sizeof(ip_addr));
+ sprintf(ip_addr, "%pI6", fw_ddb_entry->ip_addr);
+ } else {
+ iscsi_set_param(cls_conn, ISCSI_PARAM_PORTAL_TYPE, "ipv4", 4);
+ sprintf(ip_addr, "%pI4", fw_ddb_entry->ip_addr);
+ }
+
+ iscsi_set_param(cls_conn, ISCSI_PARAM_PERSISTENT_ADDRESS,
+ (char *)ip_addr, buflen);
+ iscsi_set_param(cls_conn, ISCSI_PARAM_TARGET_NAME,
+ (char *)fw_ddb_entry->iscsi_name, buflen);
+ iscsi_set_param(cls_conn, ISCSI_PARAM_INITIATOR_NAME,
+ (char *)ha->name_string, buflen);
+
+ if (ddb_entry->chap_tbl_idx != INVALID_ENTRY) {
+ if (!qla4xxx_get_uni_chap_at_index(ha, chap_tbl.name,
+ chap_tbl.secret,
+ ddb_entry->chap_tbl_idx)) {
+ iscsi_set_param(cls_conn, ISCSI_PARAM_USERNAME,
+ (char *)chap_tbl.name,
+ strlen((char *)chap_tbl.name));
+ iscsi_set_param(cls_conn, ISCSI_PARAM_PASSWORD,
+ (char *)chap_tbl.secret,
+ chap_tbl.secret_len);
+ }
+ }
+}
+
+void qla4xxx_update_session_conn_fwddb_param(struct scsi_qla_host *ha,
+ struct ddb_entry *ddb_entry)
+{
+ struct iscsi_cls_session *cls_sess;
+ struct iscsi_cls_conn *cls_conn;
+ uint32_t ddb_state;
+ dma_addr_t fw_ddb_entry_dma;
+ struct dev_db_entry *fw_ddb_entry;
+
+ fw_ddb_entry = dma_alloc_coherent(&ha->pdev->dev, sizeof(*fw_ddb_entry),
+ &fw_ddb_entry_dma, GFP_KERNEL);
+ if (!fw_ddb_entry) {
+ ql4_printk(KERN_ERR, ha,
+ "%s: Unable to allocate dma buffer\n", __func__);
+ goto exit_session_conn_fwddb_param;
+ }
+
+ if (qla4xxx_get_fwddb_entry(ha, ddb_entry->fw_ddb_index, fw_ddb_entry,
+ fw_ddb_entry_dma, NULL, NULL, &ddb_state,
+ NULL, NULL, NULL) == QLA_ERROR) {
+ DEBUG2(ql4_printk(KERN_ERR, ha, "scsi%ld: %s: failed "
+ "get_ddb_entry for fw_ddb_index %d\n",
+ ha->host_no, __func__,
+ ddb_entry->fw_ddb_index));
+ goto exit_session_conn_fwddb_param;
+ }
+
+ cls_sess = ddb_entry->sess;
+
+ cls_conn = ddb_entry->conn;
+
+ /* Update params */
+ qla4xxx_copy_fwddb_param(ha, fw_ddb_entry, cls_sess, cls_conn);
+
+exit_session_conn_fwddb_param:
+ if (fw_ddb_entry)
+ dma_free_coherent(&ha->pdev->dev, sizeof(*fw_ddb_entry),
+ fw_ddb_entry, fw_ddb_entry_dma);
+}
+
+void qla4xxx_update_session_conn_param(struct scsi_qla_host *ha,
+ struct ddb_entry *ddb_entry)
+{
+ struct iscsi_cls_session *cls_sess;
+ struct iscsi_cls_conn *cls_conn;
+ struct iscsi_session *sess;
+ struct iscsi_conn *conn;
+ uint32_t ddb_state;
+ dma_addr_t fw_ddb_entry_dma;
+ struct dev_db_entry *fw_ddb_entry;
+
+ fw_ddb_entry = dma_alloc_coherent(&ha->pdev->dev, sizeof(*fw_ddb_entry),
+ &fw_ddb_entry_dma, GFP_KERNEL);
+ if (!fw_ddb_entry) {
+ ql4_printk(KERN_ERR, ha,
+ "%s: Unable to allocate dma buffer\n", __func__);
+ goto exit_session_conn_param;
+ }
+
+ if (qla4xxx_get_fwddb_entry(ha, ddb_entry->fw_ddb_index, fw_ddb_entry,
+ fw_ddb_entry_dma, NULL, NULL, &ddb_state,
+ NULL, NULL, NULL) == QLA_ERROR) {
+ DEBUG2(ql4_printk(KERN_ERR, ha, "scsi%ld: %s: failed "
+ "get_ddb_entry for fw_ddb_index %d\n",
+ ha->host_no, __func__,
+ ddb_entry->fw_ddb_index));
+ goto exit_session_conn_param;
+ }
+
+ cls_sess = ddb_entry->sess;
+ sess = cls_sess->dd_data;
+
+ cls_conn = ddb_entry->conn;
+ conn = cls_conn->dd_data;
+
+ /* Update timers after login */
+ ddb_entry->default_relogin_timeout =
+ (le16_to_cpu(fw_ddb_entry->def_timeout) > LOGIN_TOV) &&
+ (le16_to_cpu(fw_ddb_entry->def_timeout) < LOGIN_TOV * 10) ?
+ le16_to_cpu(fw_ddb_entry->def_timeout) : LOGIN_TOV;
+ ddb_entry->default_time2wait =
+ le16_to_cpu(fw_ddb_entry->iscsi_def_time2wait);
+
+ /* Update params */
+ ddb_entry->chap_tbl_idx = le16_to_cpu(fw_ddb_entry->chap_tbl_idx);
+ qla4xxx_copy_to_sess_conn_params(conn, sess, fw_ddb_entry);
+
+ memcpy(sess->initiatorname, ha->name_string,
+ min(sizeof(ha->name_string), sizeof(sess->initiatorname)));
+
+exit_session_conn_param:
+ if (fw_ddb_entry)
+ dma_free_coherent(&ha->pdev->dev, sizeof(*fw_ddb_entry),
+ fw_ddb_entry, fw_ddb_entry_dma);
+}
+
+/*
+ * Timer routines
+ */
+
+static void qla4xxx_start_timer(struct scsi_qla_host *ha, void *func,
+ unsigned long interval)
+{
+ DEBUG(printk("scsi: %s: Starting timer thread for adapter %d\n",
+ __func__, ha->host->host_no));
+ init_timer(&ha->timer);
+ ha->timer.expires = jiffies + interval * HZ;
+ ha->timer.data = (unsigned long)ha;
+ ha->timer.function = (void (*)(unsigned long))func;
+ add_timer(&ha->timer);
+ ha->timer_active = 1;
+}
+
+static void qla4xxx_stop_timer(struct scsi_qla_host *ha)
+{
+ del_timer_sync(&ha->timer);
+ ha->timer_active = 0;
+}
+
+/***
+ * qla4xxx_mark_device_missing - blocks the session
+ * @cls_session: Pointer to the session to be blocked
+ * @ddb_entry: Pointer to device database entry
+ *
+ * This routine marks a device missing and close connection.
+ **/
+void qla4xxx_mark_device_missing(struct iscsi_cls_session *cls_session)
+{
+ iscsi_block_session(cls_session);
+}
+
+/**
+ * qla4xxx_mark_all_devices_missing - mark all devices as missing.
+ * @ha: Pointer to host adapter structure.
+ *
+ * This routine marks a device missing and resets the relogin retry count.
+ **/
+void qla4xxx_mark_all_devices_missing(struct scsi_qla_host *ha)
+{
+ iscsi_host_for_each_session(ha->host, qla4xxx_mark_device_missing);
+}
+
+static struct srb* qla4xxx_get_new_srb(struct scsi_qla_host *ha,
+ struct ddb_entry *ddb_entry,
+ struct scsi_cmnd *cmd)
+{
+ struct srb *srb;
+
+ srb = mempool_alloc(ha->srb_mempool, GFP_ATOMIC);
+ if (!srb)
+ return srb;
+
+ kref_init(&srb->srb_ref);
+ srb->ha = ha;
+ srb->ddb = ddb_entry;
+ srb->cmd = cmd;
+ srb->flags = 0;
+ CMD_SP(cmd) = (void *)srb;
+
+ return srb;
+}
+
+static void qla4xxx_srb_free_dma(struct scsi_qla_host *ha, struct srb *srb)
+{
+ struct scsi_cmnd *cmd = srb->cmd;
+
+ if (srb->flags & SRB_DMA_VALID) {
+ scsi_dma_unmap(cmd);
+ srb->flags &= ~SRB_DMA_VALID;
+ }
+ CMD_SP(cmd) = NULL;
+}
+
+void qla4xxx_srb_compl(struct kref *ref)
+{
+ struct srb *srb = container_of(ref, struct srb, srb_ref);
+ struct scsi_cmnd *cmd = srb->cmd;
+ struct scsi_qla_host *ha = srb->ha;
+
+ qla4xxx_srb_free_dma(ha, srb);
+
+ mempool_free(srb, ha->srb_mempool);
+
+ cmd->scsi_done(cmd);
+}
+
+/**
+ * qla4xxx_queuecommand - scsi layer issues scsi command to driver.
+ * @host: scsi host
+ * @cmd: Pointer to Linux's SCSI command structure
+ *
+ * Remarks:
+ * This routine is invoked by Linux to send a SCSI command to the driver.
+ * The mid-level driver tries to ensure that queuecommand never gets
+ * invoked concurrently with itself or the interrupt handler (although
+ * the interrupt handler may call this routine as part of request-
+ * completion handling). Unfortunely, it sometimes calls the scheduler
+ * in interrupt context which is a big NO! NO!.
+ **/
+static int qla4xxx_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *cmd)
+{
+ struct scsi_qla_host *ha = to_qla_host(host);
+ struct ddb_entry *ddb_entry = cmd->device->hostdata;
+ struct iscsi_cls_session *sess = ddb_entry->sess;
+ struct srb *srb;
+ int rval;
+
+ if (test_bit(AF_EEH_BUSY, &ha->flags)) {
+ if (test_bit(AF_PCI_CHANNEL_IO_PERM_FAILURE, &ha->flags))
+ cmd->result = DID_NO_CONNECT << 16;
+ else
+ cmd->result = DID_REQUEUE << 16;
+ goto qc_fail_command;
+ }
+
+ if (!sess) {
+ cmd->result = DID_IMM_RETRY << 16;
+ goto qc_fail_command;
+ }
+
+ rval = iscsi_session_chkready(sess);
+ if (rval) {
+ cmd->result = rval;
+ goto qc_fail_command;
+ }
+
+ if (test_bit(DPC_RESET_HA_INTR, &ha->dpc_flags) ||
+ test_bit(DPC_RESET_ACTIVE, &ha->dpc_flags) ||
+ test_bit(DPC_RESET_HA, &ha->dpc_flags) ||
+ test_bit(DPC_HA_UNRECOVERABLE, &ha->dpc_flags) ||
+ test_bit(DPC_HA_NEED_QUIESCENT, &ha->dpc_flags) ||
+ !test_bit(AF_ONLINE, &ha->flags) ||
+ !test_bit(AF_LINK_UP, &ha->flags) ||
+ test_bit(AF_LOOPBACK, &ha->flags) ||
+ test_bit(DPC_POST_IDC_ACK, &ha->dpc_flags) ||
+ test_bit(DPC_RESTORE_ACB, &ha->dpc_flags) ||
+ test_bit(DPC_RESET_HA_FW_CONTEXT, &ha->dpc_flags))
+ goto qc_host_busy;
+
+ srb = qla4xxx_get_new_srb(ha, ddb_entry, cmd);
+ if (!srb)
+ goto qc_host_busy;
+
+ rval = qla4xxx_send_command_to_isp(ha, srb);
+ if (rval != QLA_SUCCESS)
+ goto qc_host_busy_free_sp;
+
+ return 0;
+
+qc_host_busy_free_sp:
+ qla4xxx_srb_free_dma(ha, srb);
+ mempool_free(srb, ha->srb_mempool);
+
+qc_host_busy:
+ return SCSI_MLQUEUE_HOST_BUSY;
+
+qc_fail_command:
+ cmd->scsi_done(cmd);
+
+ return 0;
+}
+
+/**
+ * qla4xxx_mem_free - frees memory allocated to adapter
+ * @ha: Pointer to host adapter structure.
+ *
+ * Frees memory previously allocated by qla4xxx_mem_alloc
+ **/
+static void qla4xxx_mem_free(struct scsi_qla_host *ha)
+{
+ if (ha->queues)
+ dma_free_coherent(&ha->pdev->dev, ha->queues_len, ha->queues,
+ ha->queues_dma);
+
+ if (ha->fw_dump)
+ vfree(ha->fw_dump);
+
+ ha->queues_len = 0;
+ ha->queues = NULL;
+ ha->queues_dma = 0;
+ ha->request_ring = NULL;
+ ha->request_dma = 0;
+ ha->response_ring = NULL;
+ ha->response_dma = 0;
+ ha->shadow_regs = NULL;
+ ha->shadow_regs_dma = 0;
+ ha->fw_dump = NULL;
+ ha->fw_dump_size = 0;
+
+ /* Free srb pool. */
+ if (ha->srb_mempool)
+ mempool_destroy(ha->srb_mempool);
+
+ ha->srb_mempool = NULL;
+
+ if (ha->chap_dma_pool)
+ dma_pool_destroy(ha->chap_dma_pool);
+
+ if (ha->chap_list)
+ vfree(ha->chap_list);
+ ha->chap_list = NULL;
+
+ if (ha->fw_ddb_dma_pool)
+ dma_pool_destroy(ha->fw_ddb_dma_pool);
+
+ /* release io space registers */
+ if (is_qla8022(ha)) {
+ if (ha->nx_pcibase)
+ iounmap(
+ (struct device_reg_82xx __iomem *)ha->nx_pcibase);
+ } else if (is_qla8032(ha) || is_qla8042(ha)) {
+ if (ha->nx_pcibase)
+ iounmap(
+ (struct device_reg_83xx __iomem *)ha->nx_pcibase);
+ } else if (ha->reg) {
+ iounmap(ha->reg);
+ }
+
+ if (ha->reset_tmplt.buff)
+ vfree(ha->reset_tmplt.buff);
+
+ pci_release_regions(ha->pdev);
+}
+
+/**
+ * qla4xxx_mem_alloc - allocates memory for use by adapter.
+ * @ha: Pointer to host adapter structure
+ *
+ * Allocates DMA memory for request and response queues. Also allocates memory
+ * for srbs.
+ **/
+static int qla4xxx_mem_alloc(struct scsi_qla_host *ha)
+{
+ unsigned long align;
+
+ /* Allocate contiguous block of DMA memory for queues. */
+ ha->queues_len = ((REQUEST_QUEUE_DEPTH * QUEUE_SIZE) +
+ (RESPONSE_QUEUE_DEPTH * QUEUE_SIZE) +
+ sizeof(struct shadow_regs) +
+ MEM_ALIGN_VALUE +
+ (PAGE_SIZE - 1)) & ~(PAGE_SIZE - 1);
+ ha->queues = dma_alloc_coherent(&ha->pdev->dev, ha->queues_len,
+ &ha->queues_dma, GFP_KERNEL);
+ if (ha->queues == NULL) {
+ ql4_printk(KERN_WARNING, ha,
+ "Memory Allocation failed - queues.\n");
+
+ goto mem_alloc_error_exit;
+ }
+ memset(ha->queues, 0, ha->queues_len);
+
+ /*
+ * As per RISC alignment requirements -- the bus-address must be a
+ * multiple of the request-ring size (in bytes).
+ */
+ align = 0;
+ if ((unsigned long)ha->queues_dma & (MEM_ALIGN_VALUE - 1))
+ align = MEM_ALIGN_VALUE - ((unsigned long)ha->queues_dma &
+ (MEM_ALIGN_VALUE - 1));
+
+ /* Update request and response queue pointers. */
+ ha->request_dma = ha->queues_dma + align;
+ ha->request_ring = (struct queue_entry *) (ha->queues + align);
+ ha->response_dma = ha->queues_dma + align +
+ (REQUEST_QUEUE_DEPTH * QUEUE_SIZE);
+ ha->response_ring = (struct queue_entry *) (ha->queues + align +
+ (REQUEST_QUEUE_DEPTH *
+ QUEUE_SIZE));
+ ha->shadow_regs_dma = ha->queues_dma + align +
+ (REQUEST_QUEUE_DEPTH * QUEUE_SIZE) +
+ (RESPONSE_QUEUE_DEPTH * QUEUE_SIZE);
+ ha->shadow_regs = (struct shadow_regs *) (ha->queues + align +
+ (REQUEST_QUEUE_DEPTH *
+ QUEUE_SIZE) +
+ (RESPONSE_QUEUE_DEPTH *
+ QUEUE_SIZE));
+
+ /* Allocate memory for srb pool. */
+ ha->srb_mempool = mempool_create(SRB_MIN_REQ, mempool_alloc_slab,
+ mempool_free_slab, srb_cachep);
+ if (ha->srb_mempool == NULL) {
+ ql4_printk(KERN_WARNING, ha,
+ "Memory Allocation failed - SRB Pool.\n");
+
+ goto mem_alloc_error_exit;
+ }
+
+ ha->chap_dma_pool = dma_pool_create("ql4_chap", &ha->pdev->dev,
+ CHAP_DMA_BLOCK_SIZE, 8, 0);
+
+ if (ha->chap_dma_pool == NULL) {
+ ql4_printk(KERN_WARNING, ha,
+ "%s: chap_dma_pool allocation failed..\n", __func__);
+ goto mem_alloc_error_exit;
+ }
+
+ ha->fw_ddb_dma_pool = dma_pool_create("ql4_fw_ddb", &ha->pdev->dev,
+ DDB_DMA_BLOCK_SIZE, 8, 0);
+
+ if (ha->fw_ddb_dma_pool == NULL) {
+ ql4_printk(KERN_WARNING, ha,
+ "%s: fw_ddb_dma_pool allocation failed..\n",
+ __func__);
+ goto mem_alloc_error_exit;
+ }
+
+ return QLA_SUCCESS;
+
+mem_alloc_error_exit:
+ qla4xxx_mem_free(ha);
+ return QLA_ERROR;
+}
+
+/**
+ * qla4_8xxx_check_temp - Check the ISP82XX temperature.
+ * @ha: adapter block pointer.
+ *
+ * Note: The caller should not hold the idc lock.
+ **/
+static int qla4_8xxx_check_temp(struct scsi_qla_host *ha)
+{
+ uint32_t temp, temp_state, temp_val;
+ int status = QLA_SUCCESS;
+
+ temp = qla4_8xxx_rd_direct(ha, QLA8XXX_CRB_TEMP_STATE);
+
+ temp_state = qla82xx_get_temp_state(temp);
+ temp_val = qla82xx_get_temp_val(temp);
+
+ if (temp_state == QLA82XX_TEMP_PANIC) {
+ ql4_printk(KERN_WARNING, ha, "Device temperature %d degrees C"
+ " exceeds maximum allowed. Hardware has been shut"
+ " down.\n", temp_val);
+ status = QLA_ERROR;
+ } else if (temp_state == QLA82XX_TEMP_WARN) {
+ if (ha->temperature == QLA82XX_TEMP_NORMAL)
+ ql4_printk(KERN_WARNING, ha, "Device temperature %d"
+ " degrees C exceeds operating range."
+ " Immediate action needed.\n", temp_val);
+ } else {
+ if (ha->temperature == QLA82XX_TEMP_WARN)
+ ql4_printk(KERN_INFO, ha, "Device temperature is"
+ " now %d degrees C in normal range.\n",
+ temp_val);
+ }
+ ha->temperature = temp_state;
+ return status;
+}
+
+/**
+ * qla4_8xxx_check_fw_alive - Check firmware health
+ * @ha: Pointer to host adapter structure.
+ *
+ * Context: Interrupt
+ **/
+static int qla4_8xxx_check_fw_alive(struct scsi_qla_host *ha)
+{
+ uint32_t fw_heartbeat_counter;
+ int status = QLA_SUCCESS;
+
+ fw_heartbeat_counter = qla4_8xxx_rd_direct(ha,
+ QLA8XXX_PEG_ALIVE_COUNTER);
+ /* If PEG_ALIVE_COUNTER is 0xffffffff, AER/EEH is in progress, ignore */
+ if (fw_heartbeat_counter == 0xffffffff) {
+ DEBUG2(printk(KERN_WARNING "scsi%ld: %s: Device in frozen "
+ "state, QLA82XX_PEG_ALIVE_COUNTER is 0xffffffff\n",
+ ha->host_no, __func__));
+ return status;
+ }
+
+ if (ha->fw_heartbeat_counter == fw_heartbeat_counter) {
+ ha->seconds_since_last_heartbeat++;
+ /* FW not alive after 2 seconds */
+ if (ha->seconds_since_last_heartbeat == 2) {
+ ha->seconds_since_last_heartbeat = 0;
+ qla4_8xxx_dump_peg_reg(ha);
+ status = QLA_ERROR;
+ }
+ } else
+ ha->seconds_since_last_heartbeat = 0;
+
+ ha->fw_heartbeat_counter = fw_heartbeat_counter;
+ return status;
+}
+
+static void qla4_8xxx_process_fw_error(struct scsi_qla_host *ha)
+{
+ uint32_t halt_status;
+ int halt_status_unrecoverable = 0;
+
+ halt_status = qla4_8xxx_rd_direct(ha, QLA8XXX_PEG_HALT_STATUS1);
+
+ if (is_qla8022(ha)) {
+ ql4_printk(KERN_INFO, ha, "%s: disabling pause transmit on port 0 & 1.\n",
+ __func__);
+ qla4_82xx_wr_32(ha, QLA82XX_CRB_NIU + 0x98,
+ CRB_NIU_XG_PAUSE_CTL_P0 |
+ CRB_NIU_XG_PAUSE_CTL_P1);
+
+ if (QLA82XX_FWERROR_CODE(halt_status) == 0x67)
+ ql4_printk(KERN_ERR, ha, "%s: Firmware aborted with error code 0x00006700. Device is being reset\n",
+ __func__);
+ if (halt_status & HALT_STATUS_UNRECOVERABLE)
+ halt_status_unrecoverable = 1;
+ } else if (is_qla8032(ha) || is_qla8042(ha)) {
+ if (halt_status & QLA83XX_HALT_STATUS_FW_RESET)
+ ql4_printk(KERN_ERR, ha, "%s: Firmware error detected device is being reset\n",
+ __func__);
+ else if (halt_status & QLA83XX_HALT_STATUS_UNRECOVERABLE)
+ halt_status_unrecoverable = 1;
+ }
+
+ /*
+ * Since we cannot change dev_state in interrupt context,
+ * set appropriate DPC flag then wakeup DPC
+ */
+ if (halt_status_unrecoverable) {
+ set_bit(DPC_HA_UNRECOVERABLE, &ha->dpc_flags);
+ } else {
+ ql4_printk(KERN_INFO, ha, "%s: detect abort needed!\n",
+ __func__);
+ set_bit(DPC_RESET_HA, &ha->dpc_flags);
+ }
+ qla4xxx_mailbox_premature_completion(ha);
+ qla4xxx_wake_dpc(ha);
+}
+
+/**
+ * qla4_8xxx_watchdog - Poll dev state
+ * @ha: Pointer to host adapter structure.
+ *
+ * Context: Interrupt
+ **/
+void qla4_8xxx_watchdog(struct scsi_qla_host *ha)
+{
+ uint32_t dev_state;
+ uint32_t idc_ctrl;
+
+ if (is_qla8032(ha) &&
+ (qla4_83xx_is_detached(ha) == QLA_SUCCESS))
+ WARN_ONCE(1, "%s: iSCSI function %d marked invisible\n",
+ __func__, ha->func_num);
+
+ /* don't poll if reset is going on */
+ if (!(test_bit(DPC_RESET_ACTIVE, &ha->dpc_flags) ||
+ test_bit(DPC_RESET_HA, &ha->dpc_flags) ||
+ test_bit(DPC_RETRY_RESET_HA, &ha->dpc_flags))) {
+ dev_state = qla4_8xxx_rd_direct(ha, QLA8XXX_CRB_DEV_STATE);
+
+ if (qla4_8xxx_check_temp(ha)) {
+ if (is_qla8022(ha)) {
+ ql4_printk(KERN_INFO, ha, "disabling pause transmit on port 0 & 1.\n");
+ qla4_82xx_wr_32(ha, QLA82XX_CRB_NIU + 0x98,
+ CRB_NIU_XG_PAUSE_CTL_P0 |
+ CRB_NIU_XG_PAUSE_CTL_P1);
+ }
+ set_bit(DPC_HA_UNRECOVERABLE, &ha->dpc_flags);
+ qla4xxx_wake_dpc(ha);
+ } else if (dev_state == QLA8XXX_DEV_NEED_RESET &&
+ !test_bit(DPC_RESET_HA, &ha->dpc_flags)) {
+
+ ql4_printk(KERN_INFO, ha, "%s: HW State: NEED RESET!\n",
+ __func__);
+
+ if (is_qla8032(ha) || is_qla8042(ha)) {
+ idc_ctrl = qla4_83xx_rd_reg(ha,
+ QLA83XX_IDC_DRV_CTRL);
+ if (!(idc_ctrl & GRACEFUL_RESET_BIT1)) {
+ ql4_printk(KERN_INFO, ha, "%s: Graceful reset bit is not set\n",
+ __func__);
+ qla4xxx_mailbox_premature_completion(
+ ha);
+ }
+ }
+
+ if ((is_qla8032(ha) || is_qla8042(ha)) ||
+ (is_qla8022(ha) && !ql4xdontresethba)) {
+ set_bit(DPC_RESET_HA, &ha->dpc_flags);
+ qla4xxx_wake_dpc(ha);
+ }
+ } else if (dev_state == QLA8XXX_DEV_NEED_QUIESCENT &&
+ !test_bit(DPC_HA_NEED_QUIESCENT, &ha->dpc_flags)) {
+ ql4_printk(KERN_INFO, ha, "%s: HW State: NEED QUIES!\n",
+ __func__);
+ set_bit(DPC_HA_NEED_QUIESCENT, &ha->dpc_flags);
+ qla4xxx_wake_dpc(ha);
+ } else {
+ /* Check firmware health */
+ if (qla4_8xxx_check_fw_alive(ha))
+ qla4_8xxx_process_fw_error(ha);
+ }
+ }
+}
+
+static void qla4xxx_check_relogin_flash_ddb(struct iscsi_cls_session *cls_sess)
+{
+ struct iscsi_session *sess;
+ struct ddb_entry *ddb_entry;
+ struct scsi_qla_host *ha;
+
+ sess = cls_sess->dd_data;
+ ddb_entry = sess->dd_data;
+ ha = ddb_entry->ha;
+
+ if (!(ddb_entry->ddb_type == FLASH_DDB))
+ return;
+
+ if (adapter_up(ha) && !test_bit(DF_RELOGIN, &ddb_entry->flags) &&
+ !iscsi_is_session_online(cls_sess)) {
+ if (atomic_read(&ddb_entry->retry_relogin_timer) !=
+ INVALID_ENTRY) {
+ if (atomic_read(&ddb_entry->retry_relogin_timer) ==
+ 0) {
+ atomic_set(&ddb_entry->retry_relogin_timer,
+ INVALID_ENTRY);
+ set_bit(DPC_RELOGIN_DEVICE, &ha->dpc_flags);
+ set_bit(DF_RELOGIN, &ddb_entry->flags);
+ DEBUG2(ql4_printk(KERN_INFO, ha,
+ "%s: index [%d] login device\n",
+ __func__, ddb_entry->fw_ddb_index));
+ } else
+ atomic_dec(&ddb_entry->retry_relogin_timer);
+ }
+ }
+
+ /* Wait for relogin to timeout */
+ if (atomic_read(&ddb_entry->relogin_timer) &&
+ (atomic_dec_and_test(&ddb_entry->relogin_timer) != 0)) {
+ /*
+ * If the relogin times out and the device is
+ * still NOT ONLINE then try and relogin again.
+ */
+ if (!iscsi_is_session_online(cls_sess)) {
+ /* Reset retry relogin timer */
+ atomic_inc(&ddb_entry->relogin_retry_count);
+ DEBUG2(ql4_printk(KERN_INFO, ha,
+ "%s: index[%d] relogin timed out-retrying"
+ " relogin (%d), retry (%d)\n", __func__,
+ ddb_entry->fw_ddb_index,
+ atomic_read(&ddb_entry->relogin_retry_count),
+ ddb_entry->default_time2wait + 4));
+ set_bit(DPC_RELOGIN_DEVICE, &ha->dpc_flags);
+ atomic_set(&ddb_entry->retry_relogin_timer,
+ ddb_entry->default_time2wait + 4);
+ }
+ }
+}
+
+/**
+ * qla4xxx_timer - checks every second for work to do.
+ * @ha: Pointer to host adapter structure.
+ **/
+static void qla4xxx_timer(struct scsi_qla_host *ha)
+{
+ int start_dpc = 0;
+ uint16_t w;
+
+ iscsi_host_for_each_session(ha->host, qla4xxx_check_relogin_flash_ddb);
+
+ /* If we are in the middle of AER/EEH processing
+ * skip any processing and reschedule the timer
+ */
+ if (test_bit(AF_EEH_BUSY, &ha->flags)) {
+ mod_timer(&ha->timer, jiffies + HZ);
+ return;
+ }
+
+ /* Hardware read to trigger an EEH error during mailbox waits. */
+ if (!pci_channel_offline(ha->pdev))
+ pci_read_config_word(ha->pdev, PCI_VENDOR_ID, &w);
+
+ if (is_qla80XX(ha))
+ qla4_8xxx_watchdog(ha);
+
+ if (is_qla40XX(ha)) {
+ /* Check for heartbeat interval. */
+ if (ha->firmware_options & FWOPT_HEARTBEAT_ENABLE &&
+ ha->heartbeat_interval != 0) {
+ ha->seconds_since_last_heartbeat++;
+ if (ha->seconds_since_last_heartbeat >
+ ha->heartbeat_interval + 2)
+ set_bit(DPC_RESET_HA, &ha->dpc_flags);
+ }
+ }
+
+ /* Process any deferred work. */
+ if (!list_empty(&ha->work_list))
+ start_dpc++;
+
+ /* Wakeup the dpc routine for this adapter, if needed. */
+ if (start_dpc ||
+ test_bit(DPC_RESET_HA, &ha->dpc_flags) ||
+ test_bit(DPC_RETRY_RESET_HA, &ha->dpc_flags) ||
+ test_bit(DPC_RELOGIN_DEVICE, &ha->dpc_flags) ||
+ test_bit(DPC_RESET_HA_FW_CONTEXT, &ha->dpc_flags) ||
+ test_bit(DPC_RESET_HA_INTR, &ha->dpc_flags) ||
+ test_bit(DPC_GET_DHCP_IP_ADDR, &ha->dpc_flags) ||
+ test_bit(DPC_LINK_CHANGED, &ha->dpc_flags) ||
+ test_bit(DPC_HA_UNRECOVERABLE, &ha->dpc_flags) ||
+ test_bit(DPC_HA_NEED_QUIESCENT, &ha->dpc_flags) ||
+ test_bit(DPC_SYSFS_DDB_EXPORT, &ha->dpc_flags) ||
+ test_bit(DPC_AEN, &ha->dpc_flags)) {
+ DEBUG2(printk("scsi%ld: %s: scheduling dpc routine"
+ " - dpc flags = 0x%lx\n",
+ ha->host_no, __func__, ha->dpc_flags));
+ qla4xxx_wake_dpc(ha);
+ }
+
+ /* Reschedule timer thread to call us back in one second */
+ mod_timer(&ha->timer, jiffies + HZ);
+
+ DEBUG2(ha->seconds_since_last_intr++);
+}
+
+/**
+ * qla4xxx_cmd_wait - waits for all outstanding commands to complete
+ * @ha: Pointer to host adapter structure.
+ *
+ * This routine stalls the driver until all outstanding commands are returned.
+ * Caller must release the Hardware Lock prior to calling this routine.
+ **/
+static int qla4xxx_cmd_wait(struct scsi_qla_host *ha)
+{
+ uint32_t index = 0;
+ unsigned long flags;
+ struct scsi_cmnd *cmd;
+ unsigned long wtime;
+ uint32_t wtmo;
+
+ if (is_qla40XX(ha))
+ wtmo = WAIT_CMD_TOV;
+ else
+ wtmo = ha->nx_reset_timeout / 2;
+
+ wtime = jiffies + (wtmo * HZ);
+
+ DEBUG2(ql4_printk(KERN_INFO, ha,
+ "Wait up to %u seconds for cmds to complete\n",
+ wtmo));
+
+ while (!time_after_eq(jiffies, wtime)) {
+ spin_lock_irqsave(&ha->hardware_lock, flags);
+ /* Find a command that hasn't completed. */
+ for (index = 0; index < ha->host->can_queue; index++) {
+ cmd = scsi_host_find_tag(ha->host, index);
+ /*
+ * We cannot just check if the index is valid,
+ * becase if we are run from the scsi eh, then
+ * the scsi/block layer is going to prevent
+ * the tag from being released.
+ */
+ if (cmd != NULL && CMD_SP(cmd))
+ break;
+ }
+ spin_unlock_irqrestore(&ha->hardware_lock, flags);
+
+ /* If No Commands are pending, wait is complete */
+ if (index == ha->host->can_queue)
+ return QLA_SUCCESS;
+
+ msleep(1000);
+ }
+ /* If we timed out on waiting for commands to come back
+ * return ERROR. */
+ return QLA_ERROR;
+}
+
+int qla4xxx_hw_reset(struct scsi_qla_host *ha)
+{
+ uint32_t ctrl_status;
+ unsigned long flags = 0;
+
+ DEBUG2(printk(KERN_ERR "scsi%ld: %s\n", ha->host_no, __func__));
+
+ if (ql4xxx_lock_drvr_wait(ha) != QLA_SUCCESS)
+ return QLA_ERROR;
+
+ spin_lock_irqsave(&ha->hardware_lock, flags);
+
+ /*
+ * If the SCSI Reset Interrupt bit is set, clear it.
+ * Otherwise, the Soft Reset won't work.
+ */
+ ctrl_status = readw(&ha->reg->ctrl_status);
+ if ((ctrl_status & CSR_SCSI_RESET_INTR) != 0)
+ writel(set_rmask(CSR_SCSI_RESET_INTR), &ha->reg->ctrl_status);
+
+ /* Issue Soft Reset */
+ writel(set_rmask(CSR_SOFT_RESET), &ha->reg->ctrl_status);
+ readl(&ha->reg->ctrl_status);
+
+ spin_unlock_irqrestore(&ha->hardware_lock, flags);
+ return QLA_SUCCESS;
+}
+
+/**
+ * qla4xxx_soft_reset - performs soft reset.
+ * @ha: Pointer to host adapter structure.
+ **/
+int qla4xxx_soft_reset(struct scsi_qla_host *ha)
+{
+ uint32_t max_wait_time;
+ unsigned long flags = 0;
+ int status;
+ uint32_t ctrl_status;
+
+ status = qla4xxx_hw_reset(ha);
+ if (status != QLA_SUCCESS)
+ return status;
+
+ status = QLA_ERROR;
+ /* Wait until the Network Reset Intr bit is cleared */
+ max_wait_time = RESET_INTR_TOV;
+ do {
+ spin_lock_irqsave(&ha->hardware_lock, flags);
+ ctrl_status = readw(&ha->reg->ctrl_status);
+ spin_unlock_irqrestore(&ha->hardware_lock, flags);
+
+ if ((ctrl_status & CSR_NET_RESET_INTR) == 0)
+ break;
+
+ msleep(1000);
+ } while ((--max_wait_time));
+
+ if ((ctrl_status & CSR_NET_RESET_INTR) != 0) {
+ DEBUG2(printk(KERN_WARNING
+ "scsi%ld: Network Reset Intr not cleared by "
+ "Network function, clearing it now!\n",
+ ha->host_no));
+ spin_lock_irqsave(&ha->hardware_lock, flags);
+ writel(set_rmask(CSR_NET_RESET_INTR), &ha->reg->ctrl_status);
+ readl(&ha->reg->ctrl_status);
+ spin_unlock_irqrestore(&ha->hardware_lock, flags);
+ }
+
+ /* Wait until the firmware tells us the Soft Reset is done */
+ max_wait_time = SOFT_RESET_TOV;
+ do {
+ spin_lock_irqsave(&ha->hardware_lock, flags);
+ ctrl_status = readw(&ha->reg->ctrl_status);
+ spin_unlock_irqrestore(&ha->hardware_lock, flags);
+
+ if ((ctrl_status & CSR_SOFT_RESET) == 0) {
+ status = QLA_SUCCESS;
+ break;
+ }
+
+ msleep(1000);
+ } while ((--max_wait_time));
+
+ /*
+ * Also, make sure that the SCSI Reset Interrupt bit has been cleared
+ * after the soft reset has taken place.
+ */
+ spin_lock_irqsave(&ha->hardware_lock, flags);
+ ctrl_status = readw(&ha->reg->ctrl_status);
+ if ((ctrl_status & CSR_SCSI_RESET_INTR) != 0) {
+ writel(set_rmask(CSR_SCSI_RESET_INTR), &ha->reg->ctrl_status);
+ readl(&ha->reg->ctrl_status);
+ }
+ spin_unlock_irqrestore(&ha->hardware_lock, flags);
+
+ /* If soft reset fails then most probably the bios on other
+ * function is also enabled.
+ * Since the initialization is sequential the other fn
+ * wont be able to acknowledge the soft reset.
+ * Issue a force soft reset to workaround this scenario.
+ */
+ if (max_wait_time == 0) {
+ /* Issue Force Soft Reset */
+ spin_lock_irqsave(&ha->hardware_lock, flags);
+ writel(set_rmask(CSR_FORCE_SOFT_RESET), &ha->reg->ctrl_status);
+ readl(&ha->reg->ctrl_status);
+ spin_unlock_irqrestore(&ha->hardware_lock, flags);
+ /* Wait until the firmware tells us the Soft Reset is done */
+ max_wait_time = SOFT_RESET_TOV;
+ do {
+ spin_lock_irqsave(&ha->hardware_lock, flags);
+ ctrl_status = readw(&ha->reg->ctrl_status);
+ spin_unlock_irqrestore(&ha->hardware_lock, flags);
+
+ if ((ctrl_status & CSR_FORCE_SOFT_RESET) == 0) {
+ status = QLA_SUCCESS;
+ break;
+ }
+
+ msleep(1000);
+ } while ((--max_wait_time));
+ }
+
+ return status;
+}
+
+/**
+ * qla4xxx_abort_active_cmds - returns all outstanding i/o requests to O.S.
+ * @ha: Pointer to host adapter structure.
+ * @res: returned scsi status
+ *
+ * This routine is called just prior to a HARD RESET to return all
+ * outstanding commands back to the Operating System.
+ * Caller should make sure that the following locks are released
+ * before this calling routine: Hardware lock, and io_request_lock.
+ **/
+static void qla4xxx_abort_active_cmds(struct scsi_qla_host *ha, int res)
+{
+ struct srb *srb;
+ int i;
+ unsigned long flags;
+
+ spin_lock_irqsave(&ha->hardware_lock, flags);
+ for (i = 0; i < ha->host->can_queue; i++) {
+ srb = qla4xxx_del_from_active_array(ha, i);
+ if (srb != NULL) {
+ srb->cmd->result = res;
+ kref_put(&srb->srb_ref, qla4xxx_srb_compl);
+ }
+ }
+ spin_unlock_irqrestore(&ha->hardware_lock, flags);
+}
+
+void qla4xxx_dead_adapter_cleanup(struct scsi_qla_host *ha)
+{
+ clear_bit(AF_ONLINE, &ha->flags);
+
+ /* Disable the board */
+ ql4_printk(KERN_INFO, ha, "Disabling the board\n");
+
+ qla4xxx_abort_active_cmds(ha, DID_NO_CONNECT << 16);
+ qla4xxx_mark_all_devices_missing(ha);
+ clear_bit(AF_INIT_DONE, &ha->flags);
+}
+
+static void qla4xxx_fail_session(struct iscsi_cls_session *cls_session)
+{
+ struct iscsi_session *sess;
+ struct ddb_entry *ddb_entry;
+
+ sess = cls_session->dd_data;
+ ddb_entry = sess->dd_data;
+ ddb_entry->fw_ddb_device_state = DDB_DS_SESSION_FAILED;
+
+ if (ddb_entry->ddb_type == FLASH_DDB)
+ iscsi_block_session(ddb_entry->sess);
+ else
+ iscsi_session_failure(cls_session->dd_data,
+ ISCSI_ERR_CONN_FAILED);
+}
+
+/**
+ * qla4xxx_recover_adapter - recovers adapter after a fatal error
+ * @ha: Pointer to host adapter structure.
+ **/
+static int qla4xxx_recover_adapter(struct scsi_qla_host *ha)
+{
+ int status = QLA_ERROR;
+ uint8_t reset_chip = 0;
+ uint32_t dev_state;
+ unsigned long wait;
+
+ /* Stall incoming I/O until we are done */
+ scsi_block_requests(ha->host);
+ clear_bit(AF_ONLINE, &ha->flags);
+ clear_bit(AF_LINK_UP, &ha->flags);
+
+ DEBUG2(ql4_printk(KERN_INFO, ha, "%s: adapter OFFLINE\n", __func__));
+
+ set_bit(DPC_RESET_ACTIVE, &ha->dpc_flags);
+
+ if ((is_qla8032(ha) || is_qla8042(ha)) &&
+ !test_bit(DPC_RESET_HA_FW_CONTEXT, &ha->dpc_flags)) {
+ ql4_printk(KERN_INFO, ha, "%s: disabling pause transmit on port 0 & 1.\n",
+ __func__);
+ /* disable pause frame for ISP83xx */
+ qla4_83xx_disable_pause(ha);
+ }
+
+ iscsi_host_for_each_session(ha->host, qla4xxx_fail_session);
+
+ if (test_bit(DPC_RESET_HA, &ha->dpc_flags))
+ reset_chip = 1;
+
+ /* For the DPC_RESET_HA_INTR case (ISP-4xxx specific)
+ * do not reset adapter, jump to initialize_adapter */
+ if (test_bit(DPC_RESET_HA_INTR, &ha->dpc_flags)) {
+ status = QLA_SUCCESS;
+ goto recover_ha_init_adapter;
+ }
+
+ /* For the ISP-8xxx adapter, issue a stop_firmware if invoked
+ * from eh_host_reset or ioctl module */
+ if (is_qla80XX(ha) && !reset_chip &&
+ test_bit(DPC_RESET_HA_FW_CONTEXT, &ha->dpc_flags)) {
+
+ DEBUG2(ql4_printk(KERN_INFO, ha,
+ "scsi%ld: %s - Performing stop_firmware...\n",
+ ha->host_no, __func__));
+ status = ha->isp_ops->reset_firmware(ha);
+ if (status == QLA_SUCCESS) {
+ ha->isp_ops->disable_intrs(ha);
+ qla4xxx_process_aen(ha, FLUSH_DDB_CHANGED_AENS);
+ qla4xxx_abort_active_cmds(ha, DID_RESET << 16);
+ } else {
+ /* If the stop_firmware fails then
+ * reset the entire chip */
+ reset_chip = 1;
+ clear_bit(DPC_RESET_HA_FW_CONTEXT, &ha->dpc_flags);
+ set_bit(DPC_RESET_HA, &ha->dpc_flags);
+ }
+ }
+
+ /* Issue full chip reset if recovering from a catastrophic error,
+ * or if stop_firmware fails for ISP-8xxx.
+ * This is the default case for ISP-4xxx */
+ if (is_qla40XX(ha) || reset_chip) {
+ if (is_qla40XX(ha))
+ goto chip_reset;
+
+ /* Check if 8XXX firmware is alive or not
+ * We may have arrived here from NEED_RESET
+ * detection only */
+ if (test_bit(AF_FW_RECOVERY, &ha->flags))
+ goto chip_reset;
+
+ wait = jiffies + (FW_ALIVE_WAIT_TOV * HZ);
+ while (time_before(jiffies, wait)) {
+ if (qla4_8xxx_check_fw_alive(ha)) {
+ qla4xxx_mailbox_premature_completion(ha);
+ break;
+ }
+
+ set_current_state(TASK_UNINTERRUPTIBLE);
+ schedule_timeout(HZ);
+ }
+chip_reset:
+ if (!test_bit(AF_FW_RECOVERY, &ha->flags))
+ qla4xxx_cmd_wait(ha);
+
+ qla4xxx_process_aen(ha, FLUSH_DDB_CHANGED_AENS);
+ DEBUG2(ql4_printk(KERN_INFO, ha,
+ "scsi%ld: %s - Performing chip reset..\n",
+ ha->host_no, __func__));
+ status = ha->isp_ops->reset_chip(ha);
+ qla4xxx_abort_active_cmds(ha, DID_RESET << 16);
+ }
+
+ /* Flush any pending ddb changed AENs */
+ qla4xxx_process_aen(ha, FLUSH_DDB_CHANGED_AENS);
+
+recover_ha_init_adapter:
+ /* Upon successful firmware/chip reset, re-initialize the adapter */
+ if (status == QLA_SUCCESS) {
+ /* For ISP-4xxx, force function 1 to always initialize
+ * before function 3 to prevent both funcions from
+ * stepping on top of the other */
+ if (is_qla40XX(ha) && (ha->mac_index == 3))
+ ssleep(6);
+
+ /* NOTE: AF_ONLINE flag set upon successful completion of
+ * qla4xxx_initialize_adapter */
+ status = qla4xxx_initialize_adapter(ha, RESET_ADAPTER);
+ if (is_qla80XX(ha) && (status == QLA_ERROR)) {
+ status = qla4_8xxx_check_init_adapter_retry(ha);
+ if (status == QLA_ERROR) {
+ ql4_printk(KERN_INFO, ha, "scsi%ld: %s: Don't retry recover adapter\n",
+ ha->host_no, __func__);
+ qla4xxx_dead_adapter_cleanup(ha);
+ clear_bit(DPC_RETRY_RESET_HA, &ha->dpc_flags);
+ clear_bit(DPC_RESET_HA, &ha->dpc_flags);
+ clear_bit(DPC_RESET_HA_FW_CONTEXT,
+ &ha->dpc_flags);
+ goto exit_recover;
+ }
+ }
+ }
+
+ /* Retry failed adapter initialization, if necessary
+ * Do not retry initialize_adapter for RESET_HA_INTR (ISP-4xxx specific)
+ * case to prevent ping-pong resets between functions */
+ if (!test_bit(AF_ONLINE, &ha->flags) &&
+ !test_bit(DPC_RESET_HA_INTR, &ha->dpc_flags)) {
+ /* Adapter initialization failed, see if we can retry
+ * resetting the ha.
+ * Since we don't want to block the DPC for too long
+ * with multiple resets in the same thread,
+ * utilize DPC to retry */
+ if (is_qla80XX(ha)) {
+ ha->isp_ops->idc_lock(ha);
+ dev_state = qla4_8xxx_rd_direct(ha,
+ QLA8XXX_CRB_DEV_STATE);
+ ha->isp_ops->idc_unlock(ha);
+ if (dev_state == QLA8XXX_DEV_FAILED) {
+ ql4_printk(KERN_INFO, ha, "%s: don't retry "
+ "recover adapter. H/W is in Failed "
+ "state\n", __func__);
+ qla4xxx_dead_adapter_cleanup(ha);
+ clear_bit(DPC_RETRY_RESET_HA, &ha->dpc_flags);
+ clear_bit(DPC_RESET_HA, &ha->dpc_flags);
+ clear_bit(DPC_RESET_HA_FW_CONTEXT,
+ &ha->dpc_flags);
+ status = QLA_ERROR;
+
+ goto exit_recover;
+ }
+ }
+
+ if (!test_bit(DPC_RETRY_RESET_HA, &ha->dpc_flags)) {
+ ha->retry_reset_ha_cnt = MAX_RESET_HA_RETRIES;
+ DEBUG2(printk("scsi%ld: recover adapter - retrying "
+ "(%d) more times\n", ha->host_no,
+ ha->retry_reset_ha_cnt));
+ set_bit(DPC_RETRY_RESET_HA, &ha->dpc_flags);
+ status = QLA_ERROR;
+ } else {
+ if (ha->retry_reset_ha_cnt > 0) {
+ /* Schedule another Reset HA--DPC will retry */
+ ha->retry_reset_ha_cnt--;
+ DEBUG2(printk("scsi%ld: recover adapter - "
+ "retry remaining %d\n",
+ ha->host_no,
+ ha->retry_reset_ha_cnt));
+ status = QLA_ERROR;
+ }
+
+ if (ha->retry_reset_ha_cnt == 0) {
+ /* Recover adapter retries have been exhausted.
+ * Adapter DEAD */
+ DEBUG2(printk("scsi%ld: recover adapter "
+ "failed - board disabled\n",
+ ha->host_no));
+ qla4xxx_dead_adapter_cleanup(ha);
+ clear_bit(DPC_RETRY_RESET_HA, &ha->dpc_flags);
+ clear_bit(DPC_RESET_HA, &ha->dpc_flags);
+ clear_bit(DPC_RESET_HA_FW_CONTEXT,
+ &ha->dpc_flags);
+ status = QLA_ERROR;
+ }
+ }
+ } else {
+ clear_bit(DPC_RESET_HA, &ha->dpc_flags);
+ clear_bit(DPC_RESET_HA_FW_CONTEXT, &ha->dpc_flags);
+ clear_bit(DPC_RETRY_RESET_HA, &ha->dpc_flags);
+ }
+
+exit_recover:
+ ha->adapter_error_count++;
+
+ if (test_bit(AF_ONLINE, &ha->flags))
+ ha->isp_ops->enable_intrs(ha);
+
+ scsi_unblock_requests(ha->host);
+
+ clear_bit(DPC_RESET_ACTIVE, &ha->dpc_flags);
+ DEBUG2(printk("scsi%ld: recover adapter: %s\n", ha->host_no,
+ status == QLA_ERROR ? "FAILED" : "SUCCEEDED"));
+
+ return status;
+}
+
+static void qla4xxx_relogin_devices(struct iscsi_cls_session *cls_session)
+{
+ struct iscsi_session *sess;
+ struct ddb_entry *ddb_entry;
+ struct scsi_qla_host *ha;
+
+ sess = cls_session->dd_data;
+ ddb_entry = sess->dd_data;
+ ha = ddb_entry->ha;
+ if (!iscsi_is_session_online(cls_session)) {
+ if (ddb_entry->fw_ddb_device_state == DDB_DS_SESSION_ACTIVE) {
+ ql4_printk(KERN_INFO, ha, "scsi%ld: %s: ddb[%d]"
+ " unblock session\n", ha->host_no, __func__,
+ ddb_entry->fw_ddb_index);
+ iscsi_unblock_session(ddb_entry->sess);
+ } else {
+ /* Trigger relogin */
+ if (ddb_entry->ddb_type == FLASH_DDB) {
+ if (!(test_bit(DF_RELOGIN, &ddb_entry->flags) ||
+ test_bit(DF_DISABLE_RELOGIN,
+ &ddb_entry->flags)))
+ qla4xxx_arm_relogin_timer(ddb_entry);
+ } else
+ iscsi_session_failure(cls_session->dd_data,
+ ISCSI_ERR_CONN_FAILED);
+ }
+ }
+}
+
+int qla4xxx_unblock_flash_ddb(struct iscsi_cls_session *cls_session)
+{
+ struct iscsi_session *sess;
+ struct ddb_entry *ddb_entry;
+ struct scsi_qla_host *ha;
+
+ sess = cls_session->dd_data;
+ ddb_entry = sess->dd_data;
+ ha = ddb_entry->ha;
+ ql4_printk(KERN_INFO, ha, "scsi%ld: %s: ddb[%d]"
+ " unblock session\n", ha->host_no, __func__,
+ ddb_entry->fw_ddb_index);
+
+ iscsi_unblock_session(ddb_entry->sess);
+
+ /* Start scan target */
+ if (test_bit(AF_ONLINE, &ha->flags)) {
+ ql4_printk(KERN_INFO, ha, "scsi%ld: %s: ddb[%d]"
+ " start scan\n", ha->host_no, __func__,
+ ddb_entry->fw_ddb_index);
+ scsi_queue_work(ha->host, &ddb_entry->sess->scan_work);
+ }
+ return QLA_SUCCESS;
+}
+
+int qla4xxx_unblock_ddb(struct iscsi_cls_session *cls_session)
+{
+ struct iscsi_session *sess;
+ struct ddb_entry *ddb_entry;
+ struct scsi_qla_host *ha;
+ int status = QLA_SUCCESS;
+
+ sess = cls_session->dd_data;
+ ddb_entry = sess->dd_data;
+ ha = ddb_entry->ha;
+ ql4_printk(KERN_INFO, ha, "scsi%ld: %s: ddb[%d]"
+ " unblock user space session\n", ha->host_no, __func__,
+ ddb_entry->fw_ddb_index);
+
+ if (!iscsi_is_session_online(cls_session)) {
+ iscsi_conn_start(ddb_entry->conn);
+ iscsi_conn_login_event(ddb_entry->conn,
+ ISCSI_CONN_STATE_LOGGED_IN);
+ } else {
+ ql4_printk(KERN_INFO, ha,
+ "scsi%ld: %s: ddb[%d] session [%d] already logged in\n",
+ ha->host_no, __func__, ddb_entry->fw_ddb_index,
+ cls_session->sid);
+ status = QLA_ERROR;
+ }
+
+ return status;
+}
+
+static void qla4xxx_relogin_all_devices(struct scsi_qla_host *ha)
+{
+ iscsi_host_for_each_session(ha->host, qla4xxx_relogin_devices);
+}
+
+static void qla4xxx_relogin_flash_ddb(struct iscsi_cls_session *cls_sess)
+{
+ uint16_t relogin_timer;
+ struct iscsi_session *sess;
+ struct ddb_entry *ddb_entry;
+ struct scsi_qla_host *ha;
+
+ sess = cls_sess->dd_data;
+ ddb_entry = sess->dd_data;
+ ha = ddb_entry->ha;
+
+ relogin_timer = max(ddb_entry->default_relogin_timeout,
+ (uint16_t)RELOGIN_TOV);
+ atomic_set(&ddb_entry->relogin_timer, relogin_timer);
+
+ DEBUG2(ql4_printk(KERN_INFO, ha,
+ "scsi%ld: Relogin index [%d]. TOV=%d\n", ha->host_no,
+ ddb_entry->fw_ddb_index, relogin_timer));
+
+ qla4xxx_login_flash_ddb(cls_sess);
+}
+
+static void qla4xxx_dpc_relogin(struct iscsi_cls_session *cls_sess)
+{
+ struct iscsi_session *sess;
+ struct ddb_entry *ddb_entry;
+ struct scsi_qla_host *ha;
+
+ sess = cls_sess->dd_data;
+ ddb_entry = sess->dd_data;
+ ha = ddb_entry->ha;
+
+ if (!(ddb_entry->ddb_type == FLASH_DDB))
+ return;
+
+ if (test_bit(DF_DISABLE_RELOGIN, &ddb_entry->flags))
+ return;
+
+ if (test_and_clear_bit(DF_RELOGIN, &ddb_entry->flags) &&
+ !iscsi_is_session_online(cls_sess)) {
+ DEBUG2(ql4_printk(KERN_INFO, ha,
+ "relogin issued\n"));
+ qla4xxx_relogin_flash_ddb(cls_sess);
+ }
+}
+
+void qla4xxx_wake_dpc(struct scsi_qla_host *ha)
+{
+ if (ha->dpc_thread)
+ queue_work(ha->dpc_thread, &ha->dpc_work);
+}
+
+static struct qla4_work_evt *
+qla4xxx_alloc_work(struct scsi_qla_host *ha, uint32_t data_size,
+ enum qla4_work_type type)
+{
+ struct qla4_work_evt *e;
+ uint32_t size = sizeof(struct qla4_work_evt) + data_size;
+
+ e = kzalloc(size, GFP_ATOMIC);
+ if (!e)
+ return NULL;
+
+ INIT_LIST_HEAD(&e->list);
+ e->type = type;
+ return e;
+}
+
+static void qla4xxx_post_work(struct scsi_qla_host *ha,
+ struct qla4_work_evt *e)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&ha->work_lock, flags);
+ list_add_tail(&e->list, &ha->work_list);
+ spin_unlock_irqrestore(&ha->work_lock, flags);
+ qla4xxx_wake_dpc(ha);
+}
+
+int qla4xxx_post_aen_work(struct scsi_qla_host *ha,
+ enum iscsi_host_event_code aen_code,
+ uint32_t data_size, uint8_t *data)
+{
+ struct qla4_work_evt *e;
+
+ e = qla4xxx_alloc_work(ha, data_size, QLA4_EVENT_AEN);
+ if (!e)
+ return QLA_ERROR;
+
+ e->u.aen.code = aen_code;
+ e->u.aen.data_size = data_size;
+ memcpy(e->u.aen.data, data, data_size);
+
+ qla4xxx_post_work(ha, e);
+
+ return QLA_SUCCESS;
+}
+
+int qla4xxx_post_ping_evt_work(struct scsi_qla_host *ha,
+ uint32_t status, uint32_t pid,
+ uint32_t data_size, uint8_t *data)
+{
+ struct qla4_work_evt *e;
+
+ e = qla4xxx_alloc_work(ha, data_size, QLA4_EVENT_PING_STATUS);
+ if (!e)
+ return QLA_ERROR;
+
+ e->u.ping.status = status;
+ e->u.ping.pid = pid;
+ e->u.ping.data_size = data_size;
+ memcpy(e->u.ping.data, data, data_size);
+
+ qla4xxx_post_work(ha, e);
+
+ return QLA_SUCCESS;
+}
+
+static void qla4xxx_do_work(struct scsi_qla_host *ha)
+{
+ struct qla4_work_evt *e, *tmp;
+ unsigned long flags;
+ LIST_HEAD(work);
+
+ spin_lock_irqsave(&ha->work_lock, flags);
+ list_splice_init(&ha->work_list, &work);
+ spin_unlock_irqrestore(&ha->work_lock, flags);
+
+ list_for_each_entry_safe(e, tmp, &work, list) {
+ list_del_init(&e->list);
+
+ switch (e->type) {
+ case QLA4_EVENT_AEN:
+ iscsi_post_host_event(ha->host_no,
+ &qla4xxx_iscsi_transport,
+ e->u.aen.code,
+ e->u.aen.data_size,
+ e->u.aen.data);
+ break;
+ case QLA4_EVENT_PING_STATUS:
+ iscsi_ping_comp_event(ha->host_no,
+ &qla4xxx_iscsi_transport,
+ e->u.ping.status,
+ e->u.ping.pid,
+ e->u.ping.data_size,
+ e->u.ping.data);
+ break;
+ default:
+ ql4_printk(KERN_WARNING, ha, "event type: 0x%x not "
+ "supported", e->type);
+ }
+ kfree(e);
+ }
+}
+
+/**
+ * qla4xxx_do_dpc - dpc routine
+ * @data: in our case pointer to adapter structure
+ *
+ * This routine is a task that is schedule by the interrupt handler
+ * to perform the background processing for interrupts. We put it
+ * on a task queue that is consumed whenever the scheduler runs; that's
+ * so you can do anything (i.e. put the process to sleep etc). In fact,
+ * the mid-level tries to sleep when it reaches the driver threshold
+ * "host->can_queue". This can cause a panic if we were in our interrupt code.
+ **/
+static void qla4xxx_do_dpc(struct work_struct *work)
+{
+ struct scsi_qla_host *ha =
+ container_of(work, struct scsi_qla_host, dpc_work);
+ int status = QLA_ERROR;
+
+ DEBUG2(ql4_printk(KERN_INFO, ha,
+ "scsi%ld: %s: DPC handler waking up. flags = 0x%08lx, dpc_flags = 0x%08lx\n",
+ ha->host_no, __func__, ha->flags, ha->dpc_flags));
+
+ /* Initialization not yet finished. Don't do anything yet. */
+ if (!test_bit(AF_INIT_DONE, &ha->flags))
+ return;
+
+ if (test_bit(AF_EEH_BUSY, &ha->flags)) {
+ DEBUG2(printk(KERN_INFO "scsi%ld: %s: flags = %lx\n",
+ ha->host_no, __func__, ha->flags));
+ return;
+ }
+
+ /* post events to application */
+ qla4xxx_do_work(ha);
+
+ if (is_qla80XX(ha)) {
+ if (test_bit(DPC_HA_UNRECOVERABLE, &ha->dpc_flags)) {
+ if (is_qla8032(ha) || is_qla8042(ha)) {
+ ql4_printk(KERN_INFO, ha, "%s: disabling pause transmit on port 0 & 1.\n",
+ __func__);
+ /* disable pause frame for ISP83xx */
+ qla4_83xx_disable_pause(ha);
+ }
+
+ ha->isp_ops->idc_lock(ha);
+ qla4_8xxx_wr_direct(ha, QLA8XXX_CRB_DEV_STATE,
+ QLA8XXX_DEV_FAILED);
+ ha->isp_ops->idc_unlock(ha);
+ ql4_printk(KERN_INFO, ha, "HW State: FAILED\n");
+ qla4_8xxx_device_state_handler(ha);
+ }
+
+ if (test_bit(DPC_POST_IDC_ACK, &ha->dpc_flags)) {
+ if (is_qla8042(ha)) {
+ if (ha->idc_info.info2 &
+ ENABLE_INTERNAL_LOOPBACK) {
+ ql4_printk(KERN_INFO, ha, "%s: Disabling ACB\n",
+ __func__);
+ status = qla4_84xx_config_acb(ha,
+ ACB_CONFIG_DISABLE);
+ if (status != QLA_SUCCESS) {
+ ql4_printk(KERN_INFO, ha, "%s: ACB config failed\n",
+ __func__);
+ }
+ }
+ }
+ qla4_83xx_post_idc_ack(ha);
+ clear_bit(DPC_POST_IDC_ACK, &ha->dpc_flags);
+ }
+
+ if (is_qla8042(ha) &&
+ test_bit(DPC_RESTORE_ACB, &ha->dpc_flags)) {
+ ql4_printk(KERN_INFO, ha, "%s: Restoring ACB\n",
+ __func__);
+ if (qla4_84xx_config_acb(ha, ACB_CONFIG_SET) !=
+ QLA_SUCCESS) {
+ ql4_printk(KERN_INFO, ha, "%s: ACB config failed ",
+ __func__);
+ }
+ clear_bit(DPC_RESTORE_ACB, &ha->dpc_flags);
+ }
+
+ if (test_and_clear_bit(DPC_HA_NEED_QUIESCENT, &ha->dpc_flags)) {
+ qla4_8xxx_need_qsnt_handler(ha);
+ }
+ }
+
+ if (!test_bit(DPC_RESET_ACTIVE, &ha->dpc_flags) &&
+ (test_bit(DPC_RESET_HA, &ha->dpc_flags) ||
+ test_bit(DPC_RESET_HA_INTR, &ha->dpc_flags) ||
+ test_bit(DPC_RESET_HA_FW_CONTEXT, &ha->dpc_flags))) {
+ if ((is_qla8022(ha) && ql4xdontresethba) ||
+ ((is_qla8032(ha) || is_qla8042(ha)) &&
+ qla4_83xx_idc_dontreset(ha))) {
+ DEBUG2(printk("scsi%ld: %s: Don't Reset HBA\n",
+ ha->host_no, __func__));
+ clear_bit(DPC_RESET_HA, &ha->dpc_flags);
+ clear_bit(DPC_RESET_HA_INTR, &ha->dpc_flags);
+ clear_bit(DPC_RESET_HA_FW_CONTEXT, &ha->dpc_flags);
+ goto dpc_post_reset_ha;
+ }
+ if (test_bit(DPC_RESET_HA_FW_CONTEXT, &ha->dpc_flags) ||
+ test_bit(DPC_RESET_HA, &ha->dpc_flags))
+ qla4xxx_recover_adapter(ha);
+
+ if (test_bit(DPC_RESET_HA_INTR, &ha->dpc_flags)) {
+ uint8_t wait_time = RESET_INTR_TOV;
+
+ while ((readw(&ha->reg->ctrl_status) &
+ (CSR_SOFT_RESET | CSR_FORCE_SOFT_RESET)) != 0) {
+ if (--wait_time == 0)
+ break;
+ msleep(1000);
+ }
+ if (wait_time == 0)
+ DEBUG2(printk("scsi%ld: %s: SR|FSR "
+ "bit not cleared-- resetting\n",
+ ha->host_no, __func__));
+ qla4xxx_abort_active_cmds(ha, DID_RESET << 16);
+ if (ql4xxx_lock_drvr_wait(ha) == QLA_SUCCESS) {
+ qla4xxx_process_aen(ha, FLUSH_DDB_CHANGED_AENS);
+ status = qla4xxx_recover_adapter(ha);
+ }
+ clear_bit(DPC_RESET_HA_INTR, &ha->dpc_flags);
+ if (status == QLA_SUCCESS)
+ ha->isp_ops->enable_intrs(ha);
+ }
+ }
+
+dpc_post_reset_ha:
+ /* ---- process AEN? --- */
+ if (test_and_clear_bit(DPC_AEN, &ha->dpc_flags))
+ qla4xxx_process_aen(ha, PROCESS_ALL_AENS);
+
+ /* ---- Get DHCP IP Address? --- */
+ if (test_and_clear_bit(DPC_GET_DHCP_IP_ADDR, &ha->dpc_flags))
+ qla4xxx_get_dhcp_ip_address(ha);
+
+ /* ---- relogin device? --- */
+ if (adapter_up(ha) &&
+ test_and_clear_bit(DPC_RELOGIN_DEVICE, &ha->dpc_flags)) {
+ iscsi_host_for_each_session(ha->host, qla4xxx_dpc_relogin);
+ }
+
+ /* ---- link change? --- */
+ if (!test_bit(AF_LOOPBACK, &ha->flags) &&
+ test_and_clear_bit(DPC_LINK_CHANGED, &ha->dpc_flags)) {
+ if (!test_bit(AF_LINK_UP, &ha->flags)) {
+ /* ---- link down? --- */
+ qla4xxx_mark_all_devices_missing(ha);
+ } else {
+ /* ---- link up? --- *
+ * F/W will auto login to all devices ONLY ONCE after
+ * link up during driver initialization and runtime
+ * fatal error recovery. Therefore, the driver must
+ * manually relogin to devices when recovering from
+ * connection failures, logouts, expired KATO, etc. */
+ if (test_and_clear_bit(AF_BUILD_DDB_LIST, &ha->flags)) {
+ qla4xxx_build_ddb_list(ha, ha->is_reset);
+ iscsi_host_for_each_session(ha->host,
+ qla4xxx_login_flash_ddb);
+ } else
+ qla4xxx_relogin_all_devices(ha);
+ }
+ }
+ if (test_and_clear_bit(DPC_SYSFS_DDB_EXPORT, &ha->dpc_flags)) {
+ if (qla4xxx_sysfs_ddb_export(ha))
+ ql4_printk(KERN_ERR, ha, "%s: Error exporting ddb to sysfs\n",
+ __func__);
+ }
+}
+
+/**
+ * qla4xxx_free_adapter - release the adapter
+ * @ha: pointer to adapter structure
+ **/
+static void qla4xxx_free_adapter(struct scsi_qla_host *ha)
+{
+ qla4xxx_abort_active_cmds(ha, DID_NO_CONNECT << 16);
+
+ /* Turn-off interrupts on the card. */
+ ha->isp_ops->disable_intrs(ha);
+
+ if (is_qla40XX(ha)) {
+ writel(set_rmask(CSR_SCSI_PROCESSOR_INTR),
+ &ha->reg->ctrl_status);
+ readl(&ha->reg->ctrl_status);
+ } else if (is_qla8022(ha)) {
+ writel(0, &ha->qla4_82xx_reg->host_int);
+ readl(&ha->qla4_82xx_reg->host_int);
+ } else if (is_qla8032(ha) || is_qla8042(ha)) {
+ writel(0, &ha->qla4_83xx_reg->risc_intr);
+ readl(&ha->qla4_83xx_reg->risc_intr);
+ }
+
+ /* Remove timer thread, if present */
+ if (ha->timer_active)
+ qla4xxx_stop_timer(ha);
+
+ /* Kill the kernel thread for this host */
+ if (ha->dpc_thread)
+ destroy_workqueue(ha->dpc_thread);
+
+ /* Kill the kernel thread for this host */
+ if (ha->task_wq)
+ destroy_workqueue(ha->task_wq);
+
+ /* Put firmware in known state */
+ ha->isp_ops->reset_firmware(ha);
+
+ if (is_qla80XX(ha)) {
+ ha->isp_ops->idc_lock(ha);
+ qla4_8xxx_clear_drv_active(ha);
+ ha->isp_ops->idc_unlock(ha);
+ }
+
+ /* Detach interrupts */
+ qla4xxx_free_irqs(ha);
+
+ /* free extra memory */
+ qla4xxx_mem_free(ha);
+}
+
+int qla4_8xxx_iospace_config(struct scsi_qla_host *ha)
+{
+ int status = 0;
+ unsigned long mem_base, mem_len, db_base, db_len;
+ struct pci_dev *pdev = ha->pdev;
+
+ status = pci_request_regions(pdev, DRIVER_NAME);
+ if (status) {
+ printk(KERN_WARNING
+ "scsi(%ld) Failed to reserve PIO regions (%s) "
+ "status=%d\n", ha->host_no, pci_name(pdev), status);
+ goto iospace_error_exit;
+ }
+
+ DEBUG2(printk(KERN_INFO "%s: revision-id=%d\n",
+ __func__, pdev->revision));
+ ha->revision_id = pdev->revision;
+
+ /* remap phys address */
+ mem_base = pci_resource_start(pdev, 0); /* 0 is for BAR 0 */
+ mem_len = pci_resource_len(pdev, 0);
+ DEBUG2(printk(KERN_INFO "%s: ioremap from %lx a size of %lx\n",
+ __func__, mem_base, mem_len));
+
+ /* mapping of pcibase pointer */
+ ha->nx_pcibase = (unsigned long)ioremap(mem_base, mem_len);
+ if (!ha->nx_pcibase) {
+ printk(KERN_ERR
+ "cannot remap MMIO (%s), aborting\n", pci_name(pdev));
+ pci_release_regions(ha->pdev);
+ goto iospace_error_exit;
+ }
+
+ /* Mapping of IO base pointer, door bell read and write pointer */
+
+ /* mapping of IO base pointer */
+ if (is_qla8022(ha)) {
+ ha->qla4_82xx_reg = (struct device_reg_82xx __iomem *)
+ ((uint8_t *)ha->nx_pcibase + 0xbc000 +
+ (ha->pdev->devfn << 11));
+ ha->nx_db_wr_ptr = (ha->pdev->devfn == 4 ? QLA82XX_CAM_RAM_DB1 :
+ QLA82XX_CAM_RAM_DB2);
+ } else if (is_qla8032(ha) || is_qla8042(ha)) {
+ ha->qla4_83xx_reg = (struct device_reg_83xx __iomem *)
+ ((uint8_t *)ha->nx_pcibase);
+ }
+
+ db_base = pci_resource_start(pdev, 4); /* doorbell is on bar 4 */
+ db_len = pci_resource_len(pdev, 4);
+
+ return 0;
+iospace_error_exit:
+ return -ENOMEM;
+}
+
+/***
+ * qla4xxx_iospace_config - maps registers
+ * @ha: pointer to adapter structure
+ *
+ * This routines maps HBA's registers from the pci address space
+ * into the kernel virtual address space for memory mapped i/o.
+ **/
+int qla4xxx_iospace_config(struct scsi_qla_host *ha)
+{
+ unsigned long pio, pio_len, pio_flags;
+ unsigned long mmio, mmio_len, mmio_flags;
+
+ pio = pci_resource_start(ha->pdev, 0);
+ pio_len = pci_resource_len(ha->pdev, 0);
+ pio_flags = pci_resource_flags(ha->pdev, 0);
+ if (pio_flags & IORESOURCE_IO) {
+ if (pio_len < MIN_IOBASE_LEN) {
+ ql4_printk(KERN_WARNING, ha,
+ "Invalid PCI I/O region size\n");
+ pio = 0;
+ }
+ } else {
+ ql4_printk(KERN_WARNING, ha, "region #0 not a PIO resource\n");
+ pio = 0;
+ }
+
+ /* Use MMIO operations for all accesses. */
+ mmio = pci_resource_start(ha->pdev, 1);
+ mmio_len = pci_resource_len(ha->pdev, 1);
+ mmio_flags = pci_resource_flags(ha->pdev, 1);
+
+ if (!(mmio_flags & IORESOURCE_MEM)) {
+ ql4_printk(KERN_ERR, ha,
+ "region #0 not an MMIO resource, aborting\n");
+
+ goto iospace_error_exit;
+ }
+
+ if (mmio_len < MIN_IOBASE_LEN) {
+ ql4_printk(KERN_ERR, ha,
+ "Invalid PCI mem region size, aborting\n");
+ goto iospace_error_exit;
+ }
+
+ if (pci_request_regions(ha->pdev, DRIVER_NAME)) {
+ ql4_printk(KERN_WARNING, ha,
+ "Failed to reserve PIO/MMIO regions\n");
+
+ goto iospace_error_exit;
+ }
+
+ ha->pio_address = pio;
+ ha->pio_length = pio_len;
+ ha->reg = ioremap(mmio, MIN_IOBASE_LEN);
+ if (!ha->reg) {
+ ql4_printk(KERN_ERR, ha,
+ "cannot remap MMIO, aborting\n");
+
+ goto iospace_error_exit;
+ }
+
+ return 0;
+
+iospace_error_exit:
+ return -ENOMEM;
+}
+
+static struct isp_operations qla4xxx_isp_ops = {
+ .iospace_config = qla4xxx_iospace_config,
+ .pci_config = qla4xxx_pci_config,
+ .disable_intrs = qla4xxx_disable_intrs,
+ .enable_intrs = qla4xxx_enable_intrs,
+ .start_firmware = qla4xxx_start_firmware,
+ .intr_handler = qla4xxx_intr_handler,
+ .interrupt_service_routine = qla4xxx_interrupt_service_routine,
+ .reset_chip = qla4xxx_soft_reset,
+ .reset_firmware = qla4xxx_hw_reset,
+ .queue_iocb = qla4xxx_queue_iocb,
+ .complete_iocb = qla4xxx_complete_iocb,
+ .rd_shdw_req_q_out = qla4xxx_rd_shdw_req_q_out,
+ .rd_shdw_rsp_q_in = qla4xxx_rd_shdw_rsp_q_in,
+ .get_sys_info = qla4xxx_get_sys_info,
+ .queue_mailbox_command = qla4xxx_queue_mbox_cmd,
+ .process_mailbox_interrupt = qla4xxx_process_mbox_intr,
+};
+
+static struct isp_operations qla4_82xx_isp_ops = {
+ .iospace_config = qla4_8xxx_iospace_config,
+ .pci_config = qla4_8xxx_pci_config,
+ .disable_intrs = qla4_82xx_disable_intrs,
+ .enable_intrs = qla4_82xx_enable_intrs,
+ .start_firmware = qla4_8xxx_load_risc,
+ .restart_firmware = qla4_82xx_try_start_fw,
+ .intr_handler = qla4_82xx_intr_handler,
+ .interrupt_service_routine = qla4_82xx_interrupt_service_routine,
+ .need_reset = qla4_8xxx_need_reset,
+ .reset_chip = qla4_82xx_isp_reset,
+ .reset_firmware = qla4_8xxx_stop_firmware,
+ .queue_iocb = qla4_82xx_queue_iocb,
+ .complete_iocb = qla4_82xx_complete_iocb,
+ .rd_shdw_req_q_out = qla4_82xx_rd_shdw_req_q_out,
+ .rd_shdw_rsp_q_in = qla4_82xx_rd_shdw_rsp_q_in,
+ .get_sys_info = qla4_8xxx_get_sys_info,
+ .rd_reg_direct = qla4_82xx_rd_32,
+ .wr_reg_direct = qla4_82xx_wr_32,
+ .rd_reg_indirect = qla4_82xx_md_rd_32,
+ .wr_reg_indirect = qla4_82xx_md_wr_32,
+ .idc_lock = qla4_82xx_idc_lock,
+ .idc_unlock = qla4_82xx_idc_unlock,
+ .rom_lock_recovery = qla4_82xx_rom_lock_recovery,
+ .queue_mailbox_command = qla4_82xx_queue_mbox_cmd,
+ .process_mailbox_interrupt = qla4_82xx_process_mbox_intr,
+};
+
+static struct isp_operations qla4_83xx_isp_ops = {
+ .iospace_config = qla4_8xxx_iospace_config,
+ .pci_config = qla4_8xxx_pci_config,
+ .disable_intrs = qla4_83xx_disable_intrs,
+ .enable_intrs = qla4_83xx_enable_intrs,
+ .start_firmware = qla4_8xxx_load_risc,
+ .restart_firmware = qla4_83xx_start_firmware,
+ .intr_handler = qla4_83xx_intr_handler,
+ .interrupt_service_routine = qla4_83xx_interrupt_service_routine,
+ .need_reset = qla4_8xxx_need_reset,
+ .reset_chip = qla4_83xx_isp_reset,
+ .reset_firmware = qla4_8xxx_stop_firmware,
+ .queue_iocb = qla4_83xx_queue_iocb,
+ .complete_iocb = qla4_83xx_complete_iocb,
+ .rd_shdw_req_q_out = qla4xxx_rd_shdw_req_q_out,
+ .rd_shdw_rsp_q_in = qla4xxx_rd_shdw_rsp_q_in,
+ .get_sys_info = qla4_8xxx_get_sys_info,
+ .rd_reg_direct = qla4_83xx_rd_reg,
+ .wr_reg_direct = qla4_83xx_wr_reg,
+ .rd_reg_indirect = qla4_83xx_rd_reg_indirect,
+ .wr_reg_indirect = qla4_83xx_wr_reg_indirect,
+ .idc_lock = qla4_83xx_drv_lock,
+ .idc_unlock = qla4_83xx_drv_unlock,
+ .rom_lock_recovery = qla4_83xx_rom_lock_recovery,
+ .queue_mailbox_command = qla4_83xx_queue_mbox_cmd,
+ .process_mailbox_interrupt = qla4_83xx_process_mbox_intr,
+};
+
+uint16_t qla4xxx_rd_shdw_req_q_out(struct scsi_qla_host *ha)
+{
+ return (uint16_t)le32_to_cpu(ha->shadow_regs->req_q_out);
+}
+
+uint16_t qla4_82xx_rd_shdw_req_q_out(struct scsi_qla_host *ha)
+{
+ return (uint16_t)le32_to_cpu(readl(&ha->qla4_82xx_reg->req_q_out));
+}
+
+uint16_t qla4xxx_rd_shdw_rsp_q_in(struct scsi_qla_host *ha)
+{
+ return (uint16_t)le32_to_cpu(ha->shadow_regs->rsp_q_in);
+}
+
+uint16_t qla4_82xx_rd_shdw_rsp_q_in(struct scsi_qla_host *ha)
+{
+ return (uint16_t)le32_to_cpu(readl(&ha->qla4_82xx_reg->rsp_q_in));
+}
+
+static ssize_t qla4xxx_show_boot_eth_info(void *data, int type, char *buf)
+{
+ struct scsi_qla_host *ha = data;
+ char *str = buf;
+ int rc;
+
+ switch (type) {
+ case ISCSI_BOOT_ETH_FLAGS:
+ rc = sprintf(str, "%d\n", SYSFS_FLAG_FW_SEL_BOOT);
+ break;
+ case ISCSI_BOOT_ETH_INDEX:
+ rc = sprintf(str, "0\n");
+ break;
+ case ISCSI_BOOT_ETH_MAC:
+ rc = sysfs_format_mac(str, ha->my_mac,
+ MAC_ADDR_LEN);
+ break;
+ default:
+ rc = -ENOSYS;
+ break;
+ }
+ return rc;
+}
+
+static umode_t qla4xxx_eth_get_attr_visibility(void *data, int type)
+{
+ int rc;
+
+ switch (type) {
+ case ISCSI_BOOT_ETH_FLAGS:
+ case ISCSI_BOOT_ETH_MAC:
+ case ISCSI_BOOT_ETH_INDEX:
+ rc = S_IRUGO;
+ break;
+ default:
+ rc = 0;
+ break;
+ }
+ return rc;
+}
+
+static ssize_t qla4xxx_show_boot_ini_info(void *data, int type, char *buf)
+{
+ struct scsi_qla_host *ha = data;
+ char *str = buf;
+ int rc;
+
+ switch (type) {
+ case ISCSI_BOOT_INI_INITIATOR_NAME:
+ rc = sprintf(str, "%s\n", ha->name_string);
+ break;
+ default:
+ rc = -ENOSYS;
+ break;
+ }
+ return rc;
+}
+
+static umode_t qla4xxx_ini_get_attr_visibility(void *data, int type)
+{
+ int rc;
+
+ switch (type) {
+ case ISCSI_BOOT_INI_INITIATOR_NAME:
+ rc = S_IRUGO;
+ break;
+ default:
+ rc = 0;
+ break;
+ }
+ return rc;
+}
+
+static ssize_t
+qla4xxx_show_boot_tgt_info(struct ql4_boot_session_info *boot_sess, int type,
+ char *buf)
+{
+ struct ql4_conn_info *boot_conn = &boot_sess->conn_list[0];
+ char *str = buf;
+ int rc;
+
+ switch (type) {
+ case ISCSI_BOOT_TGT_NAME:
+ rc = sprintf(buf, "%s\n", (char *)&boot_sess->target_name);
+ break;
+ case ISCSI_BOOT_TGT_IP_ADDR:
+ if (boot_sess->conn_list[0].dest_ipaddr.ip_type == 0x1)
+ rc = sprintf(buf, "%pI4\n",
+ &boot_conn->dest_ipaddr.ip_address);
+ else
+ rc = sprintf(str, "%pI6\n",
+ &boot_conn->dest_ipaddr.ip_address);
+ break;
+ case ISCSI_BOOT_TGT_PORT:
+ rc = sprintf(str, "%d\n", boot_conn->dest_port);
+ break;
+ case ISCSI_BOOT_TGT_CHAP_NAME:
+ rc = sprintf(str, "%.*s\n",
+ boot_conn->chap.target_chap_name_length,
+ (char *)&boot_conn->chap.target_chap_name);
+ break;
+ case ISCSI_BOOT_TGT_CHAP_SECRET:
+ rc = sprintf(str, "%.*s\n",
+ boot_conn->chap.target_secret_length,
+ (char *)&boot_conn->chap.target_secret);
+ break;
+ case ISCSI_BOOT_TGT_REV_CHAP_NAME:
+ rc = sprintf(str, "%.*s\n",
+ boot_conn->chap.intr_chap_name_length,
+ (char *)&boot_conn->chap.intr_chap_name);
+ break;
+ case ISCSI_BOOT_TGT_REV_CHAP_SECRET:
+ rc = sprintf(str, "%.*s\n",
+ boot_conn->chap.intr_secret_length,
+ (char *)&boot_conn->chap.intr_secret);
+ break;
+ case ISCSI_BOOT_TGT_FLAGS:
+ rc = sprintf(str, "%d\n", SYSFS_FLAG_FW_SEL_BOOT);
+ break;
+ case ISCSI_BOOT_TGT_NIC_ASSOC:
+ rc = sprintf(str, "0\n");
+ break;
+ default:
+ rc = -ENOSYS;
+ break;
+ }
+ return rc;
+}
+
+static ssize_t qla4xxx_show_boot_tgt_pri_info(void *data, int type, char *buf)
+{
+ struct scsi_qla_host *ha = data;
+ struct ql4_boot_session_info *boot_sess = &(ha->boot_tgt.boot_pri_sess);
+
+ return qla4xxx_show_boot_tgt_info(boot_sess, type, buf);
+}
+
+static ssize_t qla4xxx_show_boot_tgt_sec_info(void *data, int type, char *buf)
+{
+ struct scsi_qla_host *ha = data;
+ struct ql4_boot_session_info *boot_sess = &(ha->boot_tgt.boot_sec_sess);
+
+ return qla4xxx_show_boot_tgt_info(boot_sess, type, buf);
+}
+
+static umode_t qla4xxx_tgt_get_attr_visibility(void *data, int type)
+{
+ int rc;
+
+ switch (type) {
+ case ISCSI_BOOT_TGT_NAME:
+ case ISCSI_BOOT_TGT_IP_ADDR:
+ case ISCSI_BOOT_TGT_PORT:
+ case ISCSI_BOOT_TGT_CHAP_NAME:
+ case ISCSI_BOOT_TGT_CHAP_SECRET:
+ case ISCSI_BOOT_TGT_REV_CHAP_NAME:
+ case ISCSI_BOOT_TGT_REV_CHAP_SECRET:
+ case ISCSI_BOOT_TGT_NIC_ASSOC:
+ case ISCSI_BOOT_TGT_FLAGS:
+ rc = S_IRUGO;
+ break;
+ default:
+ rc = 0;
+ break;
+ }
+ return rc;
+}
+
+static void qla4xxx_boot_release(void *data)
+{
+ struct scsi_qla_host *ha = data;
+
+ scsi_host_put(ha->host);
+}
+
+static int get_fw_boot_info(struct scsi_qla_host *ha, uint16_t ddb_index[])
+{
+ dma_addr_t buf_dma;
+ uint32_t addr, pri_addr, sec_addr;
+ uint32_t offset;
+ uint16_t func_num;
+ uint8_t val;
+ uint8_t *buf = NULL;
+ size_t size = 13 * sizeof(uint8_t);
+ int ret = QLA_SUCCESS;
+
+ func_num = PCI_FUNC(ha->pdev->devfn);
+
+ ql4_printk(KERN_INFO, ha, "%s: Get FW boot info for 0x%x func %d\n",
+ __func__, ha->pdev->device, func_num);
+
+ if (is_qla40XX(ha)) {
+ if (func_num == 1) {
+ addr = NVRAM_PORT0_BOOT_MODE;
+ pri_addr = NVRAM_PORT0_BOOT_PRI_TGT;
+ sec_addr = NVRAM_PORT0_BOOT_SEC_TGT;
+ } else if (func_num == 3) {
+ addr = NVRAM_PORT1_BOOT_MODE;
+ pri_addr = NVRAM_PORT1_BOOT_PRI_TGT;
+ sec_addr = NVRAM_PORT1_BOOT_SEC_TGT;
+ } else {
+ ret = QLA_ERROR;
+ goto exit_boot_info;
+ }
+
+ /* Check Boot Mode */
+ val = rd_nvram_byte(ha, addr);
+ if (!(val & 0x07)) {
+ DEBUG2(ql4_printk(KERN_INFO, ha, "%s: Adapter boot "
+ "options : 0x%x\n", __func__, val));
+ ret = QLA_ERROR;
+ goto exit_boot_info;
+ }
+
+ /* get primary valid target index */
+ val = rd_nvram_byte(ha, pri_addr);
+ if (val & BIT_7)
+ ddb_index[0] = (val & 0x7f);
+
+ /* get secondary valid target index */
+ val = rd_nvram_byte(ha, sec_addr);
+ if (val & BIT_7)
+ ddb_index[1] = (val & 0x7f);
+
+ } else if (is_qla80XX(ha)) {
+ buf = dma_alloc_coherent(&ha->pdev->dev, size,
+ &buf_dma, GFP_KERNEL);
+ if (!buf) {
+ DEBUG2(ql4_printk(KERN_ERR, ha,
+ "%s: Unable to allocate dma buffer\n",
+ __func__));
+ ret = QLA_ERROR;
+ goto exit_boot_info;
+ }
+
+ if (ha->port_num == 0)
+ offset = BOOT_PARAM_OFFSET_PORT0;
+ else if (ha->port_num == 1)
+ offset = BOOT_PARAM_OFFSET_PORT1;
+ else {
+ ret = QLA_ERROR;
+ goto exit_boot_info_free;
+ }
+ addr = FLASH_RAW_ACCESS_ADDR + (ha->hw.flt_iscsi_param * 4) +
+ offset;
+ if (qla4xxx_get_flash(ha, buf_dma, addr,
+ 13 * sizeof(uint8_t)) != QLA_SUCCESS) {
+ DEBUG2(ql4_printk(KERN_ERR, ha, "scsi%ld: %s: Get Flash"
+ " failed\n", ha->host_no, __func__));
+ ret = QLA_ERROR;
+ goto exit_boot_info_free;
+ }
+ /* Check Boot Mode */
+ if (!(buf[1] & 0x07)) {
+ DEBUG2(ql4_printk(KERN_INFO, ha, "Firmware boot options"
+ " : 0x%x\n", buf[1]));
+ ret = QLA_ERROR;
+ goto exit_boot_info_free;
+ }
+
+ /* get primary valid target index */
+ if (buf[2] & BIT_7)
+ ddb_index[0] = buf[2] & 0x7f;
+
+ /* get secondary valid target index */
+ if (buf[11] & BIT_7)
+ ddb_index[1] = buf[11] & 0x7f;
+ } else {
+ ret = QLA_ERROR;
+ goto exit_boot_info;
+ }
+
+ DEBUG2(ql4_printk(KERN_INFO, ha, "%s: Primary target ID %d, Secondary"
+ " target ID %d\n", __func__, ddb_index[0],
+ ddb_index[1]));
+
+exit_boot_info_free:
+ dma_free_coherent(&ha->pdev->dev, size, buf, buf_dma);
+exit_boot_info:
+ ha->pri_ddb_idx = ddb_index[0];
+ ha->sec_ddb_idx = ddb_index[1];
+ return ret;
+}
+
+/**
+ * qla4xxx_get_bidi_chap - Get a BIDI CHAP user and password
+ * @ha: pointer to adapter structure
+ * @username: CHAP username to be returned
+ * @password: CHAP password to be returned
+ *
+ * If a boot entry has BIDI CHAP enabled then we need to set the BIDI CHAP
+ * user and password in the sysfs entry in /sys/firmware/iscsi_boot#/.
+ * So from the CHAP cache find the first BIDI CHAP entry and set it
+ * to the boot record in sysfs.
+ **/
+static int qla4xxx_get_bidi_chap(struct scsi_qla_host *ha, char *username,
+ char *password)
+{
+ int i, ret = -EINVAL;
+ int max_chap_entries = 0;
+ struct ql4_chap_table *chap_table;
+
+ if (is_qla80XX(ha))
+ max_chap_entries = (ha->hw.flt_chap_size / 2) /
+ sizeof(struct ql4_chap_table);
+ else
+ max_chap_entries = MAX_CHAP_ENTRIES_40XX;
+
+ if (!ha->chap_list) {
+ ql4_printk(KERN_ERR, ha, "Do not have CHAP table cache\n");
+ return ret;
+ }
+
+ mutex_lock(&ha->chap_sem);
+ for (i = 0; i < max_chap_entries; i++) {
+ chap_table = (struct ql4_chap_table *)ha->chap_list + i;
+ if (chap_table->cookie !=
+ __constant_cpu_to_le16(CHAP_VALID_COOKIE)) {
+ continue;
+ }
+
+ if (chap_table->flags & BIT_7) /* local */
+ continue;
+
+ if (!(chap_table->flags & BIT_6)) /* Not BIDI */
+ continue;
+
+ strlcpy(password, chap_table->secret, QL4_CHAP_MAX_SECRET_LEN);
+ strlcpy(username, chap_table->name, QL4_CHAP_MAX_NAME_LEN);
+ ret = 0;
+ break;
+ }
+ mutex_unlock(&ha->chap_sem);
+
+ return ret;
+}
+
+
+static int qla4xxx_get_boot_target(struct scsi_qla_host *ha,
+ struct ql4_boot_session_info *boot_sess,
+ uint16_t ddb_index)
+{
+ struct ql4_conn_info *boot_conn = &boot_sess->conn_list[0];
+ struct dev_db_entry *fw_ddb_entry;
+ dma_addr_t fw_ddb_entry_dma;
+ uint16_t idx;
+ uint16_t options;
+ int ret = QLA_SUCCESS;
+
+ fw_ddb_entry = dma_alloc_coherent(&ha->pdev->dev, sizeof(*fw_ddb_entry),
+ &fw_ddb_entry_dma, GFP_KERNEL);
+ if (!fw_ddb_entry) {
+ DEBUG2(ql4_printk(KERN_ERR, ha,
+ "%s: Unable to allocate dma buffer.\n",
+ __func__));
+ ret = QLA_ERROR;
+ return ret;
+ }
+
+ if (qla4xxx_bootdb_by_index(ha, fw_ddb_entry,
+ fw_ddb_entry_dma, ddb_index)) {
+ DEBUG2(ql4_printk(KERN_INFO, ha, "%s: No Flash DDB found at "
+ "index [%d]\n", __func__, ddb_index));
+ ret = QLA_ERROR;
+ goto exit_boot_target;
+ }
+
+ /* Update target name and IP from DDB */
+ memcpy(boot_sess->target_name, fw_ddb_entry->iscsi_name,
+ min(sizeof(boot_sess->target_name),
+ sizeof(fw_ddb_entry->iscsi_name)));
+
+ options = le16_to_cpu(fw_ddb_entry->options);
+ if (options & DDB_OPT_IPV6_DEVICE) {
+ memcpy(&boot_conn->dest_ipaddr.ip_address,
+ &fw_ddb_entry->ip_addr[0], IPv6_ADDR_LEN);
+ } else {
+ boot_conn->dest_ipaddr.ip_type = 0x1;
+ memcpy(&boot_conn->dest_ipaddr.ip_address,
+ &fw_ddb_entry->ip_addr[0], IP_ADDR_LEN);
+ }
+
+ boot_conn->dest_port = le16_to_cpu(fw_ddb_entry->port);
+
+ /* update chap information */
+ idx = __le16_to_cpu(fw_ddb_entry->chap_tbl_idx);
+
+ if (BIT_7 & le16_to_cpu(fw_ddb_entry->iscsi_options)) {
+
+ DEBUG2(ql4_printk(KERN_INFO, ha, "Setting chap\n"));
+
+ ret = qla4xxx_get_chap(ha, (char *)&boot_conn->chap.
+ target_chap_name,
+ (char *)&boot_conn->chap.target_secret,
+ idx);
+ if (ret) {
+ ql4_printk(KERN_ERR, ha, "Failed to set chap\n");
+ ret = QLA_ERROR;
+ goto exit_boot_target;
+ }
+
+ boot_conn->chap.target_chap_name_length = QL4_CHAP_MAX_NAME_LEN;
+ boot_conn->chap.target_secret_length = QL4_CHAP_MAX_SECRET_LEN;
+ }
+
+ if (BIT_4 & le16_to_cpu(fw_ddb_entry->iscsi_options)) {
+
+ DEBUG2(ql4_printk(KERN_INFO, ha, "Setting BIDI chap\n"));
+
+ ret = qla4xxx_get_bidi_chap(ha,
+ (char *)&boot_conn->chap.intr_chap_name,
+ (char *)&boot_conn->chap.intr_secret);
+
+ if (ret) {
+ ql4_printk(KERN_ERR, ha, "Failed to set BIDI chap\n");
+ ret = QLA_ERROR;
+ goto exit_boot_target;
+ }
+
+ boot_conn->chap.intr_chap_name_length = QL4_CHAP_MAX_NAME_LEN;
+ boot_conn->chap.intr_secret_length = QL4_CHAP_MAX_SECRET_LEN;
+ }
+
+exit_boot_target:
+ dma_free_coherent(&ha->pdev->dev, sizeof(*fw_ddb_entry),
+ fw_ddb_entry, fw_ddb_entry_dma);
+ return ret;
+}
+
+static int qla4xxx_get_boot_info(struct scsi_qla_host *ha)
+{
+ uint16_t ddb_index[2];
+ int ret = QLA_ERROR;
+ int rval;
+
+ memset(ddb_index, 0, sizeof(ddb_index));
+ ddb_index[0] = 0xffff;
+ ddb_index[1] = 0xffff;
+ ret = get_fw_boot_info(ha, ddb_index);
+ if (ret != QLA_SUCCESS) {
+ DEBUG2(ql4_printk(KERN_INFO, ha,
+ "%s: No boot target configured.\n", __func__));
+ return ret;
+ }
+
+ if (ql4xdisablesysfsboot)
+ return QLA_SUCCESS;
+
+ if (ddb_index[0] == 0xffff)
+ goto sec_target;
+
+ rval = qla4xxx_get_boot_target(ha, &(ha->boot_tgt.boot_pri_sess),
+ ddb_index[0]);
+ if (rval != QLA_SUCCESS) {
+ DEBUG2(ql4_printk(KERN_INFO, ha, "%s: Primary boot target not "
+ "configured\n", __func__));
+ } else
+ ret = QLA_SUCCESS;
+
+sec_target:
+ if (ddb_index[1] == 0xffff)
+ goto exit_get_boot_info;
+
+ rval = qla4xxx_get_boot_target(ha, &(ha->boot_tgt.boot_sec_sess),
+ ddb_index[1]);
+ if (rval != QLA_SUCCESS) {
+ DEBUG2(ql4_printk(KERN_INFO, ha, "%s: Secondary boot target not"
+ " configured\n", __func__));
+ } else
+ ret = QLA_SUCCESS;
+
+exit_get_boot_info:
+ return ret;
+}
+
+static int qla4xxx_setup_boot_info(struct scsi_qla_host *ha)
+{
+ struct iscsi_boot_kobj *boot_kobj;
+
+ if (qla4xxx_get_boot_info(ha) != QLA_SUCCESS)
+ return QLA_ERROR;
+
+ if (ql4xdisablesysfsboot) {
+ ql4_printk(KERN_INFO, ha,
+ "%s: syfsboot disabled - driver will trigger login "
+ "and publish session for discovery .\n", __func__);
+ return QLA_SUCCESS;
+ }
+
+
+ ha->boot_kset = iscsi_boot_create_host_kset(ha->host->host_no);
+ if (!ha->boot_kset)
+ goto kset_free;
+
+ if (!scsi_host_get(ha->host))
+ goto kset_free;
+ boot_kobj = iscsi_boot_create_target(ha->boot_kset, 0, ha,
+ qla4xxx_show_boot_tgt_pri_info,
+ qla4xxx_tgt_get_attr_visibility,
+ qla4xxx_boot_release);
+ if (!boot_kobj)
+ goto put_host;
+
+ if (!scsi_host_get(ha->host))
+ goto kset_free;
+ boot_kobj = iscsi_boot_create_target(ha->boot_kset, 1, ha,
+ qla4xxx_show_boot_tgt_sec_info,
+ qla4xxx_tgt_get_attr_visibility,
+ qla4xxx_boot_release);
+ if (!boot_kobj)
+ goto put_host;
+
+ if (!scsi_host_get(ha->host))
+ goto kset_free;
+ boot_kobj = iscsi_boot_create_initiator(ha->boot_kset, 0, ha,
+ qla4xxx_show_boot_ini_info,
+ qla4xxx_ini_get_attr_visibility,
+ qla4xxx_boot_release);
+ if (!boot_kobj)
+ goto put_host;
+
+ if (!scsi_host_get(ha->host))
+ goto kset_free;
+ boot_kobj = iscsi_boot_create_ethernet(ha->boot_kset, 0, ha,
+ qla4xxx_show_boot_eth_info,
+ qla4xxx_eth_get_attr_visibility,
+ qla4xxx_boot_release);
+ if (!boot_kobj)
+ goto put_host;
+
+ return QLA_SUCCESS;
+
+put_host:
+ scsi_host_put(ha->host);
+kset_free:
+ iscsi_boot_destroy_kset(ha->boot_kset);
+ return -ENOMEM;
+}
+
+
+static void qla4xxx_get_param_ddb(struct ddb_entry *ddb_entry,
+ struct ql4_tuple_ddb *tddb)
+{
+ struct scsi_qla_host *ha;
+ struct iscsi_cls_session *cls_sess;
+ struct iscsi_cls_conn *cls_conn;
+ struct iscsi_session *sess;
+ struct iscsi_conn *conn;
+
+ DEBUG2(printk(KERN_INFO "Func: %s\n", __func__));
+ ha = ddb_entry->ha;
+ cls_sess = ddb_entry->sess;
+ sess = cls_sess->dd_data;
+ cls_conn = ddb_entry->conn;
+ conn = cls_conn->dd_data;
+
+ tddb->tpgt = sess->tpgt;
+ tddb->port = conn->persistent_port;
+ strlcpy(tddb->iscsi_name, sess->targetname, ISCSI_NAME_SIZE);
+ strlcpy(tddb->ip_addr, conn->persistent_address, DDB_IPADDR_LEN);
+}
+
+static void qla4xxx_convert_param_ddb(struct dev_db_entry *fw_ddb_entry,
+ struct ql4_tuple_ddb *tddb,
+ uint8_t *flash_isid)
+{
+ uint16_t options = 0;
+
+ tddb->tpgt = le32_to_cpu(fw_ddb_entry->tgt_portal_grp);
+ memcpy(&tddb->iscsi_name[0], &fw_ddb_entry->iscsi_name[0],
+ min(sizeof(tddb->iscsi_name), sizeof(fw_ddb_entry->iscsi_name)));
+
+ options = le16_to_cpu(fw_ddb_entry->options);
+ if (options & DDB_OPT_IPV6_DEVICE)
+ sprintf(tddb->ip_addr, "%pI6", fw_ddb_entry->ip_addr);
+ else
+ sprintf(tddb->ip_addr, "%pI4", fw_ddb_entry->ip_addr);
+
+ tddb->port = le16_to_cpu(fw_ddb_entry->port);
+
+ if (flash_isid == NULL)
+ memcpy(&tddb->isid[0], &fw_ddb_entry->isid[0],
+ sizeof(tddb->isid));
+ else
+ memcpy(&tddb->isid[0], &flash_isid[0], sizeof(tddb->isid));
+}
+
+static int qla4xxx_compare_tuple_ddb(struct scsi_qla_host *ha,
+ struct ql4_tuple_ddb *old_tddb,
+ struct ql4_tuple_ddb *new_tddb,
+ uint8_t is_isid_compare)
+{
+ if (strcmp(old_tddb->iscsi_name, new_tddb->iscsi_name))
+ return QLA_ERROR;
+
+ if (strcmp(old_tddb->ip_addr, new_tddb->ip_addr))
+ return QLA_ERROR;
+
+ if (old_tddb->port != new_tddb->port)
+ return QLA_ERROR;
+
+ /* For multi sessions, driver generates the ISID, so do not compare
+ * ISID in reset path since it would be a comparison between the
+ * driver generated ISID and firmware generated ISID. This could
+ * lead to adding duplicated DDBs in the list as driver generated
+ * ISID would not match firmware generated ISID.
+ */
+ if (is_isid_compare) {
+ DEBUG2(ql4_printk(KERN_INFO, ha, "%s: old ISID [%02x%02x%02x"
+ "%02x%02x%02x] New ISID [%02x%02x%02x%02x%02x%02x]\n",
+ __func__, old_tddb->isid[5], old_tddb->isid[4],
+ old_tddb->isid[3], old_tddb->isid[2], old_tddb->isid[1],
+ old_tddb->isid[0], new_tddb->isid[5], new_tddb->isid[4],
+ new_tddb->isid[3], new_tddb->isid[2], new_tddb->isid[1],
+ new_tddb->isid[0]));
+
+ if (memcmp(&old_tddb->isid[0], &new_tddb->isid[0],
+ sizeof(old_tddb->isid)))
+ return QLA_ERROR;
+ }
+
+ DEBUG2(ql4_printk(KERN_INFO, ha,
+ "Match Found, fw[%d,%d,%s,%s], [%d,%d,%s,%s]",
+ old_tddb->port, old_tddb->tpgt, old_tddb->ip_addr,
+ old_tddb->iscsi_name, new_tddb->port, new_tddb->tpgt,
+ new_tddb->ip_addr, new_tddb->iscsi_name));
+
+ return QLA_SUCCESS;
+}
+
+static int qla4xxx_is_session_exists(struct scsi_qla_host *ha,
+ struct dev_db_entry *fw_ddb_entry,
+ uint32_t *index)
+{
+ struct ddb_entry *ddb_entry;
+ struct ql4_tuple_ddb *fw_tddb = NULL;
+ struct ql4_tuple_ddb *tmp_tddb = NULL;
+ int idx;
+ int ret = QLA_ERROR;
+
+ fw_tddb = vzalloc(sizeof(*fw_tddb));
+ if (!fw_tddb) {
+ DEBUG2(ql4_printk(KERN_WARNING, ha,
+ "Memory Allocation failed.\n"));
+ ret = QLA_SUCCESS;
+ goto exit_check;
+ }
+
+ tmp_tddb = vzalloc(sizeof(*tmp_tddb));
+ if (!tmp_tddb) {
+ DEBUG2(ql4_printk(KERN_WARNING, ha,
+ "Memory Allocation failed.\n"));
+ ret = QLA_SUCCESS;
+ goto exit_check;
+ }
+
+ qla4xxx_convert_param_ddb(fw_ddb_entry, fw_tddb, NULL);
+
+ for (idx = 0; idx < MAX_DDB_ENTRIES; idx++) {
+ ddb_entry = qla4xxx_lookup_ddb_by_fw_index(ha, idx);
+ if (ddb_entry == NULL)
+ continue;
+
+ qla4xxx_get_param_ddb(ddb_entry, tmp_tddb);
+ if (!qla4xxx_compare_tuple_ddb(ha, fw_tddb, tmp_tddb, false)) {
+ ret = QLA_SUCCESS; /* found */
+ if (index != NULL)
+ *index = idx;
+ goto exit_check;
+ }
+ }
+
+exit_check:
+ if (fw_tddb)
+ vfree(fw_tddb);
+ if (tmp_tddb)
+ vfree(tmp_tddb);
+ return ret;
+}
+
+/**
+ * qla4xxx_check_existing_isid - check if target with same isid exist
+ * in target list
+ * @list_nt: list of target
+ * @isid: isid to check
+ *
+ * This routine return QLA_SUCCESS if target with same isid exist
+ **/
+static int qla4xxx_check_existing_isid(struct list_head *list_nt, uint8_t *isid)
+{
+ struct qla_ddb_index *nt_ddb_idx, *nt_ddb_idx_tmp;
+ struct dev_db_entry *fw_ddb_entry;
+
+ list_for_each_entry_safe(nt_ddb_idx, nt_ddb_idx_tmp, list_nt, list) {
+ fw_ddb_entry = &nt_ddb_idx->fw_ddb;
+
+ if (memcmp(&fw_ddb_entry->isid[0], &isid[0],
+ sizeof(nt_ddb_idx->fw_ddb.isid)) == 0) {
+ return QLA_SUCCESS;
+ }
+ }
+ return QLA_ERROR;
+}
+
+/**
+ * qla4xxx_update_isid - compare ddbs and updated isid
+ * @ha: Pointer to host adapter structure.
+ * @list_nt: list of nt target
+ * @fw_ddb_entry: firmware ddb entry
+ *
+ * This routine update isid if ddbs have same iqn, same isid and
+ * different IP addr.
+ * Return QLA_SUCCESS if isid is updated.
+ **/
+static int qla4xxx_update_isid(struct scsi_qla_host *ha,
+ struct list_head *list_nt,
+ struct dev_db_entry *fw_ddb_entry)
+{
+ uint8_t base_value, i;
+
+ base_value = fw_ddb_entry->isid[1] & 0x1f;
+ for (i = 0; i < 8; i++) {
+ fw_ddb_entry->isid[1] = (base_value | (i << 5));
+ if (qla4xxx_check_existing_isid(list_nt, fw_ddb_entry->isid))
+ break;
+ }
+
+ if (!qla4xxx_check_existing_isid(list_nt, fw_ddb_entry->isid))
+ return QLA_ERROR;
+
+ return QLA_SUCCESS;
+}
+
+/**
+ * qla4xxx_should_update_isid - check if isid need to update
+ * @ha: Pointer to host adapter structure.
+ * @old_tddb: ddb tuple
+ * @new_tddb: ddb tuple
+ *
+ * Return QLA_SUCCESS if different IP, different PORT, same iqn,
+ * same isid
+ **/
+static int qla4xxx_should_update_isid(struct scsi_qla_host *ha,
+ struct ql4_tuple_ddb *old_tddb,
+ struct ql4_tuple_ddb *new_tddb)
+{
+ if (strcmp(old_tddb->ip_addr, new_tddb->ip_addr) == 0) {
+ /* Same ip */
+ if (old_tddb->port == new_tddb->port)
+ return QLA_ERROR;
+ }
+
+ if (strcmp(old_tddb->iscsi_name, new_tddb->iscsi_name))
+ /* different iqn */
+ return QLA_ERROR;
+
+ if (memcmp(&old_tddb->isid[0], &new_tddb->isid[0],
+ sizeof(old_tddb->isid)))
+ /* different isid */
+ return QLA_ERROR;
+
+ return QLA_SUCCESS;
+}
+
+/**
+ * qla4xxx_is_flash_ddb_exists - check if fw_ddb_entry already exists in list_nt
+ * @ha: Pointer to host adapter structure.
+ * @list_nt: list of nt target.
+ * @fw_ddb_entry: firmware ddb entry.
+ *
+ * This routine check if fw_ddb_entry already exists in list_nt to avoid
+ * duplicate ddb in list_nt.
+ * Return QLA_SUCCESS if duplicate ddb exit in list_nl.
+ * Note: This function also update isid of DDB if required.
+ **/
+
+static int qla4xxx_is_flash_ddb_exists(struct scsi_qla_host *ha,
+ struct list_head *list_nt,
+ struct dev_db_entry *fw_ddb_entry)
+{
+ struct qla_ddb_index *nt_ddb_idx, *nt_ddb_idx_tmp;
+ struct ql4_tuple_ddb *fw_tddb = NULL;
+ struct ql4_tuple_ddb *tmp_tddb = NULL;
+ int rval, ret = QLA_ERROR;
+
+ fw_tddb = vzalloc(sizeof(*fw_tddb));
+ if (!fw_tddb) {
+ DEBUG2(ql4_printk(KERN_WARNING, ha,
+ "Memory Allocation failed.\n"));
+ ret = QLA_SUCCESS;
+ goto exit_check;
+ }
+
+ tmp_tddb = vzalloc(sizeof(*tmp_tddb));
+ if (!tmp_tddb) {
+ DEBUG2(ql4_printk(KERN_WARNING, ha,
+ "Memory Allocation failed.\n"));
+ ret = QLA_SUCCESS;
+ goto exit_check;
+ }
+
+ qla4xxx_convert_param_ddb(fw_ddb_entry, fw_tddb, NULL);
+
+ list_for_each_entry_safe(nt_ddb_idx, nt_ddb_idx_tmp, list_nt, list) {
+ qla4xxx_convert_param_ddb(&nt_ddb_idx->fw_ddb, tmp_tddb,
+ nt_ddb_idx->flash_isid);
+ ret = qla4xxx_compare_tuple_ddb(ha, fw_tddb, tmp_tddb, true);
+ /* found duplicate ddb */
+ if (ret == QLA_SUCCESS)
+ goto exit_check;
+ }
+
+ list_for_each_entry_safe(nt_ddb_idx, nt_ddb_idx_tmp, list_nt, list) {
+ qla4xxx_convert_param_ddb(&nt_ddb_idx->fw_ddb, tmp_tddb, NULL);
+
+ ret = qla4xxx_should_update_isid(ha, tmp_tddb, fw_tddb);
+ if (ret == QLA_SUCCESS) {
+ rval = qla4xxx_update_isid(ha, list_nt, fw_ddb_entry);
+ if (rval == QLA_SUCCESS)
+ ret = QLA_ERROR;
+ else
+ ret = QLA_SUCCESS;
+
+ goto exit_check;
+ }
+ }
+
+exit_check:
+ if (fw_tddb)
+ vfree(fw_tddb);
+ if (tmp_tddb)
+ vfree(tmp_tddb);
+ return ret;
+}
+
+static void qla4xxx_free_ddb_list(struct list_head *list_ddb)
+{
+ struct qla_ddb_index *ddb_idx, *ddb_idx_tmp;
+
+ list_for_each_entry_safe(ddb_idx, ddb_idx_tmp, list_ddb, list) {
+ list_del_init(&ddb_idx->list);
+ vfree(ddb_idx);
+ }
+}
+
+static struct iscsi_endpoint *qla4xxx_get_ep_fwdb(struct scsi_qla_host *ha,
+ struct dev_db_entry *fw_ddb_entry)
+{
+ struct iscsi_endpoint *ep;
+ struct sockaddr_in *addr;
+ struct sockaddr_in6 *addr6;
+ struct sockaddr *t_addr;
+ struct sockaddr_storage *dst_addr;
+ char *ip;
+
+ /* TODO: need to destroy on unload iscsi_endpoint*/
+ dst_addr = vmalloc(sizeof(*dst_addr));
+ if (!dst_addr)
+ return NULL;
+
+ if (fw_ddb_entry->options & DDB_OPT_IPV6_DEVICE) {
+ t_addr = (struct sockaddr *)dst_addr;
+ t_addr->sa_family = AF_INET6;
+ addr6 = (struct sockaddr_in6 *)dst_addr;
+ ip = (char *)&addr6->sin6_addr;
+ memcpy(ip, fw_ddb_entry->ip_addr, IPv6_ADDR_LEN);
+ addr6->sin6_port = htons(le16_to_cpu(fw_ddb_entry->port));
+
+ } else {
+ t_addr = (struct sockaddr *)dst_addr;
+ t_addr->sa_family = AF_INET;
+ addr = (struct sockaddr_in *)dst_addr;
+ ip = (char *)&addr->sin_addr;
+ memcpy(ip, fw_ddb_entry->ip_addr, IP_ADDR_LEN);
+ addr->sin_port = htons(le16_to_cpu(fw_ddb_entry->port));
+ }
+
+ ep = qla4xxx_ep_connect(ha->host, (struct sockaddr *)dst_addr, 0);
+ vfree(dst_addr);
+ return ep;
+}
+
+static int qla4xxx_verify_boot_idx(struct scsi_qla_host *ha, uint16_t idx)
+{
+ if (ql4xdisablesysfsboot)
+ return QLA_SUCCESS;
+ if (idx == ha->pri_ddb_idx || idx == ha->sec_ddb_idx)
+ return QLA_ERROR;
+ return QLA_SUCCESS;
+}
+
+static void qla4xxx_setup_flash_ddb_entry(struct scsi_qla_host *ha,
+ struct ddb_entry *ddb_entry,
+ uint16_t idx)
+{
+ uint16_t def_timeout;
+
+ ddb_entry->ddb_type = FLASH_DDB;
+ ddb_entry->fw_ddb_index = INVALID_ENTRY;
+ ddb_entry->fw_ddb_device_state = DDB_DS_NO_CONNECTION_ACTIVE;
+ ddb_entry->ha = ha;
+ ddb_entry->unblock_sess = qla4xxx_unblock_flash_ddb;
+ ddb_entry->ddb_change = qla4xxx_flash_ddb_change;
+ ddb_entry->chap_tbl_idx = INVALID_ENTRY;
+
+ atomic_set(&ddb_entry->retry_relogin_timer, INVALID_ENTRY);
+ atomic_set(&ddb_entry->relogin_timer, 0);
+ atomic_set(&ddb_entry->relogin_retry_count, 0);
+ def_timeout = le16_to_cpu(ddb_entry->fw_ddb_entry.def_timeout);
+ ddb_entry->default_relogin_timeout =
+ (def_timeout > LOGIN_TOV) && (def_timeout < LOGIN_TOV * 10) ?
+ def_timeout : LOGIN_TOV;
+ ddb_entry->default_time2wait =
+ le16_to_cpu(ddb_entry->fw_ddb_entry.iscsi_def_time2wait);
+
+ if (ql4xdisablesysfsboot &&
+ (idx == ha->pri_ddb_idx || idx == ha->sec_ddb_idx))
+ set_bit(DF_BOOT_TGT, &ddb_entry->flags);
+}
+
+static void qla4xxx_wait_for_ip_configuration(struct scsi_qla_host *ha)
+{
+ uint32_t idx = 0;
+ uint32_t ip_idx[IP_ADDR_COUNT] = {0, 1, 2, 3}; /* 4 IP interfaces */
+ uint32_t sts[MBOX_REG_COUNT];
+ uint32_t ip_state;
+ unsigned long wtime;
+ int ret;
+
+ wtime = jiffies + (HZ * IP_CONFIG_TOV);
+ do {
+ for (idx = 0; idx < IP_ADDR_COUNT; idx++) {
+ if (ip_idx[idx] == -1)
+ continue;
+
+ ret = qla4xxx_get_ip_state(ha, 0, ip_idx[idx], sts);
+
+ if (ret == QLA_ERROR) {
+ ip_idx[idx] = -1;
+ continue;
+ }
+
+ ip_state = (sts[1] & IP_STATE_MASK) >> IP_STATE_SHIFT;
+
+ DEBUG2(ql4_printk(KERN_INFO, ha,
+ "Waiting for IP state for idx = %d, state = 0x%x\n",
+ ip_idx[idx], ip_state));
+ if (ip_state == IP_ADDRSTATE_UNCONFIGURED ||
+ ip_state == IP_ADDRSTATE_INVALID ||
+ ip_state == IP_ADDRSTATE_PREFERRED ||
+ ip_state == IP_ADDRSTATE_DEPRICATED ||
+ ip_state == IP_ADDRSTATE_DISABLING)
+ ip_idx[idx] = -1;
+ }
+
+ /* Break if all IP states checked */
+ if ((ip_idx[0] == -1) &&
+ (ip_idx[1] == -1) &&
+ (ip_idx[2] == -1) &&
+ (ip_idx[3] == -1))
+ break;
+ schedule_timeout_uninterruptible(HZ);
+ } while (time_after(wtime, jiffies));
+}
+
+static int qla4xxx_cmp_fw_stentry(struct dev_db_entry *fw_ddb_entry,
+ struct dev_db_entry *flash_ddb_entry)
+{
+ uint16_t options = 0;
+ size_t ip_len = IP_ADDR_LEN;
+
+ options = le16_to_cpu(fw_ddb_entry->options);
+ if (options & DDB_OPT_IPV6_DEVICE)
+ ip_len = IPv6_ADDR_LEN;
+
+ if (memcmp(fw_ddb_entry->ip_addr, flash_ddb_entry->ip_addr, ip_len))
+ return QLA_ERROR;
+
+ if (memcmp(&fw_ddb_entry->isid[0], &flash_ddb_entry->isid[0],
+ sizeof(fw_ddb_entry->isid)))
+ return QLA_ERROR;
+
+ if (memcmp(&fw_ddb_entry->port, &flash_ddb_entry->port,
+ sizeof(fw_ddb_entry->port)))
+ return QLA_ERROR;
+
+ return QLA_SUCCESS;
+}
+
+static int qla4xxx_find_flash_st_idx(struct scsi_qla_host *ha,
+ struct dev_db_entry *fw_ddb_entry,
+ uint32_t fw_idx, uint32_t *flash_index)
+{
+ struct dev_db_entry *flash_ddb_entry;
+ dma_addr_t flash_ddb_entry_dma;
+ uint32_t idx = 0;
+ int max_ddbs;
+ int ret = QLA_ERROR, status;
+
+ max_ddbs = is_qla40XX(ha) ? MAX_DEV_DB_ENTRIES_40XX :
+ MAX_DEV_DB_ENTRIES;
+
+ flash_ddb_entry = dma_pool_alloc(ha->fw_ddb_dma_pool, GFP_KERNEL,
+ &flash_ddb_entry_dma);
+ if (flash_ddb_entry == NULL || fw_ddb_entry == NULL) {
+ ql4_printk(KERN_ERR, ha, "Out of memory\n");
+ goto exit_find_st_idx;
+ }
+
+ status = qla4xxx_flashdb_by_index(ha, flash_ddb_entry,
+ flash_ddb_entry_dma, fw_idx);
+ if (status == QLA_SUCCESS) {
+ status = qla4xxx_cmp_fw_stentry(fw_ddb_entry, flash_ddb_entry);
+ if (status == QLA_SUCCESS) {
+ *flash_index = fw_idx;
+ ret = QLA_SUCCESS;
+ goto exit_find_st_idx;
+ }
+ }
+
+ for (idx = 0; idx < max_ddbs; idx++) {
+ status = qla4xxx_flashdb_by_index(ha, flash_ddb_entry,
+ flash_ddb_entry_dma, idx);
+ if (status == QLA_ERROR)
+ continue;
+
+ status = qla4xxx_cmp_fw_stentry(fw_ddb_entry, flash_ddb_entry);
+ if (status == QLA_SUCCESS) {
+ *flash_index = idx;
+ ret = QLA_SUCCESS;
+ goto exit_find_st_idx;
+ }
+ }
+
+ if (idx == max_ddbs)
+ ql4_printk(KERN_ERR, ha, "Failed to find ST [%d] in flash\n",
+ fw_idx);
+
+exit_find_st_idx:
+ if (flash_ddb_entry)
+ dma_pool_free(ha->fw_ddb_dma_pool, flash_ddb_entry,
+ flash_ddb_entry_dma);
+
+ return ret;
+}
+
+static void qla4xxx_build_st_list(struct scsi_qla_host *ha,
+ struct list_head *list_st)
+{
+ struct qla_ddb_index *st_ddb_idx;
+ int max_ddbs;
+ int fw_idx_size;
+ struct dev_db_entry *fw_ddb_entry;
+ dma_addr_t fw_ddb_dma;
+ int ret;
+ uint32_t idx = 0, next_idx = 0;
+ uint32_t state = 0, conn_err = 0;
+ uint32_t flash_index = -1;
+ uint16_t conn_id = 0;
+
+ fw_ddb_entry = dma_pool_alloc(ha->fw_ddb_dma_pool, GFP_KERNEL,
+ &fw_ddb_dma);
+ if (fw_ddb_entry == NULL) {
+ DEBUG2(ql4_printk(KERN_ERR, ha, "Out of memory\n"));
+ goto exit_st_list;
+ }
+
+ max_ddbs = is_qla40XX(ha) ? MAX_DEV_DB_ENTRIES_40XX :
+ MAX_DEV_DB_ENTRIES;
+ fw_idx_size = sizeof(struct qla_ddb_index);
+
+ for (idx = 0; idx < max_ddbs; idx = next_idx) {
+ ret = qla4xxx_get_fwddb_entry(ha, idx, fw_ddb_entry, fw_ddb_dma,
+ NULL, &next_idx, &state,
+ &conn_err, NULL, &conn_id);
+ if (ret == QLA_ERROR)
+ break;
+
+ /* Ignore DDB if invalid state (unassigned) */
+ if (state == DDB_DS_UNASSIGNED)
+ goto continue_next_st;
+
+ /* Check if ST, add to the list_st */
+ if (strlen((char *) fw_ddb_entry->iscsi_name) != 0)
+ goto continue_next_st;
+
+ st_ddb_idx = vzalloc(fw_idx_size);
+ if (!st_ddb_idx)
+ break;
+
+ ret = qla4xxx_find_flash_st_idx(ha, fw_ddb_entry, idx,
+ &flash_index);
+ if (ret == QLA_ERROR) {
+ ql4_printk(KERN_ERR, ha,
+ "No flash entry for ST at idx [%d]\n", idx);
+ st_ddb_idx->flash_ddb_idx = idx;
+ } else {
+ ql4_printk(KERN_INFO, ha,
+ "ST at idx [%d] is stored at flash [%d]\n",
+ idx, flash_index);
+ st_ddb_idx->flash_ddb_idx = flash_index;
+ }
+
+ st_ddb_idx->fw_ddb_idx = idx;
+
+ list_add_tail(&st_ddb_idx->list, list_st);
+continue_next_st:
+ if (next_idx == 0)
+ break;
+ }
+
+exit_st_list:
+ if (fw_ddb_entry)
+ dma_pool_free(ha->fw_ddb_dma_pool, fw_ddb_entry, fw_ddb_dma);
+}
+
+/**
+ * qla4xxx_remove_failed_ddb - Remove inactive or failed ddb from list
+ * @ha: pointer to adapter structure
+ * @list_ddb: List from which failed ddb to be removed
+ *
+ * Iterate over the list of DDBs and find and remove DDBs that are either in
+ * no connection active state or failed state
+ **/
+static void qla4xxx_remove_failed_ddb(struct scsi_qla_host *ha,
+ struct list_head *list_ddb)
+{
+ struct qla_ddb_index *ddb_idx, *ddb_idx_tmp;
+ uint32_t next_idx = 0;
+ uint32_t state = 0, conn_err = 0;
+ int ret;
+
+ list_for_each_entry_safe(ddb_idx, ddb_idx_tmp, list_ddb, list) {
+ ret = qla4xxx_get_fwddb_entry(ha, ddb_idx->fw_ddb_idx,
+ NULL, 0, NULL, &next_idx, &state,
+ &conn_err, NULL, NULL);
+ if (ret == QLA_ERROR)
+ continue;
+
+ if (state == DDB_DS_NO_CONNECTION_ACTIVE ||
+ state == DDB_DS_SESSION_FAILED) {
+ list_del_init(&ddb_idx->list);
+ vfree(ddb_idx);
+ }
+ }
+}
+
+static void qla4xxx_update_sess_disc_idx(struct scsi_qla_host *ha,
+ struct ddb_entry *ddb_entry,
+ struct dev_db_entry *fw_ddb_entry)
+{
+ struct iscsi_cls_session *cls_sess;
+ struct iscsi_session *sess;
+ uint32_t max_ddbs = 0;
+ uint16_t ddb_link = -1;
+
+ max_ddbs = is_qla40XX(ha) ? MAX_DEV_DB_ENTRIES_40XX :
+ MAX_DEV_DB_ENTRIES;
+
+ cls_sess = ddb_entry->sess;
+ sess = cls_sess->dd_data;
+
+ ddb_link = le16_to_cpu(fw_ddb_entry->ddb_link);
+ if (ddb_link < max_ddbs)
+ sess->discovery_parent_idx = ddb_link;
+ else
+ sess->discovery_parent_idx = DDB_NO_LINK;
+}
+
+static int qla4xxx_sess_conn_setup(struct scsi_qla_host *ha,
+ struct dev_db_entry *fw_ddb_entry,
+ int is_reset, uint16_t idx)
+{
+ struct iscsi_cls_session *cls_sess;
+ struct iscsi_session *sess;
+ struct iscsi_cls_conn *cls_conn;
+ struct iscsi_endpoint *ep;
+ uint16_t cmds_max = 32;
+ uint16_t conn_id = 0;
+ uint32_t initial_cmdsn = 0;
+ int ret = QLA_SUCCESS;
+
+ struct ddb_entry *ddb_entry = NULL;
+
+ /* Create session object, with INVALID_ENTRY,
+ * the targer_id would get set when we issue the login
+ */
+ cls_sess = iscsi_session_setup(&qla4xxx_iscsi_transport, ha->host,
+ cmds_max, sizeof(struct ddb_entry),
+ sizeof(struct ql4_task_data),
+ initial_cmdsn, INVALID_ENTRY);
+ if (!cls_sess) {
+ ret = QLA_ERROR;
+ goto exit_setup;
+ }
+
+ /*
+ * so calling module_put function to decrement the
+ * reference count.
+ **/
+ module_put(qla4xxx_iscsi_transport.owner);
+ sess = cls_sess->dd_data;
+ ddb_entry = sess->dd_data;
+ ddb_entry->sess = cls_sess;
+
+ cls_sess->recovery_tmo = ql4xsess_recovery_tmo;
+ memcpy(&ddb_entry->fw_ddb_entry, fw_ddb_entry,
+ sizeof(struct dev_db_entry));
+
+ qla4xxx_setup_flash_ddb_entry(ha, ddb_entry, idx);
+
+ cls_conn = iscsi_conn_setup(cls_sess, sizeof(struct qla_conn), conn_id);
+
+ if (!cls_conn) {
+ ret = QLA_ERROR;
+ goto exit_setup;
+ }
+
+ ddb_entry->conn = cls_conn;
+
+ /* Setup ep, for displaying attributes in sysfs */
+ ep = qla4xxx_get_ep_fwdb(ha, fw_ddb_entry);
+ if (ep) {
+ ep->conn = cls_conn;
+ cls_conn->ep = ep;
+ } else {
+ DEBUG2(ql4_printk(KERN_ERR, ha, "Unable to get ep\n"));
+ ret = QLA_ERROR;
+ goto exit_setup;
+ }
+
+ /* Update sess/conn params */
+ qla4xxx_copy_fwddb_param(ha, fw_ddb_entry, cls_sess, cls_conn);
+ qla4xxx_update_sess_disc_idx(ha, ddb_entry, fw_ddb_entry);
+
+ if (is_reset == RESET_ADAPTER) {
+ iscsi_block_session(cls_sess);
+ /* Use the relogin path to discover new devices
+ * by short-circuting the logic of setting
+ * timer to relogin - instead set the flags
+ * to initiate login right away.
+ */
+ set_bit(DPC_RELOGIN_DEVICE, &ha->dpc_flags);
+ set_bit(DF_RELOGIN, &ddb_entry->flags);
+ }
+
+exit_setup:
+ return ret;
+}
+
+static void qla4xxx_update_fw_ddb_link(struct scsi_qla_host *ha,
+ struct list_head *list_ddb,
+ struct dev_db_entry *fw_ddb_entry)
+{
+ struct qla_ddb_index *ddb_idx, *ddb_idx_tmp;
+ uint16_t ddb_link;
+
+ ddb_link = le16_to_cpu(fw_ddb_entry->ddb_link);
+
+ list_for_each_entry_safe(ddb_idx, ddb_idx_tmp, list_ddb, list) {
+ if (ddb_idx->fw_ddb_idx == ddb_link) {
+ DEBUG2(ql4_printk(KERN_INFO, ha,
+ "Updating NT parent idx from [%d] to [%d]\n",
+ ddb_link, ddb_idx->flash_ddb_idx));
+ fw_ddb_entry->ddb_link =
+ cpu_to_le16(ddb_idx->flash_ddb_idx);
+ return;
+ }
+ }
+}
+
+static void qla4xxx_build_nt_list(struct scsi_qla_host *ha,
+ struct list_head *list_nt,
+ struct list_head *list_st,
+ int is_reset)
+{
+ struct dev_db_entry *fw_ddb_entry;
+ struct ddb_entry *ddb_entry = NULL;
+ dma_addr_t fw_ddb_dma;
+ int max_ddbs;
+ int fw_idx_size;
+ int ret;
+ uint32_t idx = 0, next_idx = 0;
+ uint32_t state = 0, conn_err = 0;
+ uint32_t ddb_idx = -1;
+ uint16_t conn_id = 0;
+ uint16_t ddb_link = -1;
+ struct qla_ddb_index *nt_ddb_idx;
+
+ fw_ddb_entry = dma_pool_alloc(ha->fw_ddb_dma_pool, GFP_KERNEL,
+ &fw_ddb_dma);
+ if (fw_ddb_entry == NULL) {
+ DEBUG2(ql4_printk(KERN_ERR, ha, "Out of memory\n"));
+ goto exit_nt_list;
+ }
+ max_ddbs = is_qla40XX(ha) ? MAX_DEV_DB_ENTRIES_40XX :
+ MAX_DEV_DB_ENTRIES;
+ fw_idx_size = sizeof(struct qla_ddb_index);
+
+ for (idx = 0; idx < max_ddbs; idx = next_idx) {
+ ret = qla4xxx_get_fwddb_entry(ha, idx, fw_ddb_entry, fw_ddb_dma,
+ NULL, &next_idx, &state,
+ &conn_err, NULL, &conn_id);
+ if (ret == QLA_ERROR)
+ break;
+
+ if (qla4xxx_verify_boot_idx(ha, idx) != QLA_SUCCESS)
+ goto continue_next_nt;
+
+ /* Check if NT, then add to list it */
+ if (strlen((char *) fw_ddb_entry->iscsi_name) == 0)
+ goto continue_next_nt;
+
+ ddb_link = le16_to_cpu(fw_ddb_entry->ddb_link);
+ if (ddb_link < max_ddbs)
+ qla4xxx_update_fw_ddb_link(ha, list_st, fw_ddb_entry);
+
+ if (!(state == DDB_DS_NO_CONNECTION_ACTIVE ||
+ state == DDB_DS_SESSION_FAILED) &&
+ (is_reset == INIT_ADAPTER))
+ goto continue_next_nt;
+
+ DEBUG2(ql4_printk(KERN_INFO, ha,
+ "Adding DDB to session = 0x%x\n", idx));
+
+ if (is_reset == INIT_ADAPTER) {
+ nt_ddb_idx = vmalloc(fw_idx_size);
+ if (!nt_ddb_idx)
+ break;
+
+ nt_ddb_idx->fw_ddb_idx = idx;
+
+ /* Copy original isid as it may get updated in function
+ * qla4xxx_update_isid(). We need original isid in
+ * function qla4xxx_compare_tuple_ddb to find duplicate
+ * target */
+ memcpy(&nt_ddb_idx->flash_isid[0],
+ &fw_ddb_entry->isid[0],
+ sizeof(nt_ddb_idx->flash_isid));
+
+ ret = qla4xxx_is_flash_ddb_exists(ha, list_nt,
+ fw_ddb_entry);
+ if (ret == QLA_SUCCESS) {
+ /* free nt_ddb_idx and do not add to list_nt */
+ vfree(nt_ddb_idx);
+ goto continue_next_nt;
+ }
+
+ /* Copy updated isid */
+ memcpy(&nt_ddb_idx->fw_ddb, fw_ddb_entry,
+ sizeof(struct dev_db_entry));
+
+ list_add_tail(&nt_ddb_idx->list, list_nt);
+ } else if (is_reset == RESET_ADAPTER) {
+ ret = qla4xxx_is_session_exists(ha, fw_ddb_entry,
+ &ddb_idx);
+ if (ret == QLA_SUCCESS) {
+ ddb_entry = qla4xxx_lookup_ddb_by_fw_index(ha,
+ ddb_idx);
+ if (ddb_entry != NULL)
+ qla4xxx_update_sess_disc_idx(ha,
+ ddb_entry,
+ fw_ddb_entry);
+ goto continue_next_nt;
+ }
+ }
+
+ ret = qla4xxx_sess_conn_setup(ha, fw_ddb_entry, is_reset, idx);
+ if (ret == QLA_ERROR)
+ goto exit_nt_list;
+
+continue_next_nt:
+ if (next_idx == 0)
+ break;
+ }
+
+exit_nt_list:
+ if (fw_ddb_entry)
+ dma_pool_free(ha->fw_ddb_dma_pool, fw_ddb_entry, fw_ddb_dma);
+}
+
+static void qla4xxx_build_new_nt_list(struct scsi_qla_host *ha,
+ struct list_head *list_nt,
+ uint16_t target_id)
+{
+ struct dev_db_entry *fw_ddb_entry;
+ dma_addr_t fw_ddb_dma;
+ int max_ddbs;
+ int fw_idx_size;
+ int ret;
+ uint32_t idx = 0, next_idx = 0;
+ uint32_t state = 0, conn_err = 0;
+ uint16_t conn_id = 0;
+ struct qla_ddb_index *nt_ddb_idx;
+
+ fw_ddb_entry = dma_pool_alloc(ha->fw_ddb_dma_pool, GFP_KERNEL,
+ &fw_ddb_dma);
+ if (fw_ddb_entry == NULL) {
+ DEBUG2(ql4_printk(KERN_ERR, ha, "Out of memory\n"));
+ goto exit_new_nt_list;
+ }
+ max_ddbs = is_qla40XX(ha) ? MAX_DEV_DB_ENTRIES_40XX :
+ MAX_DEV_DB_ENTRIES;
+ fw_idx_size = sizeof(struct qla_ddb_index);
+
+ for (idx = 0; idx < max_ddbs; idx = next_idx) {
+ ret = qla4xxx_get_fwddb_entry(ha, idx, fw_ddb_entry, fw_ddb_dma,
+ NULL, &next_idx, &state,
+ &conn_err, NULL, &conn_id);
+ if (ret == QLA_ERROR)
+ break;
+
+ /* Check if NT, then add it to list */
+ if (strlen((char *)fw_ddb_entry->iscsi_name) == 0)
+ goto continue_next_new_nt;
+
+ if (!(state == DDB_DS_NO_CONNECTION_ACTIVE))
+ goto continue_next_new_nt;
+
+ DEBUG2(ql4_printk(KERN_INFO, ha,
+ "Adding DDB to session = 0x%x\n", idx));
+
+ nt_ddb_idx = vmalloc(fw_idx_size);
+ if (!nt_ddb_idx)
+ break;
+
+ nt_ddb_idx->fw_ddb_idx = idx;
+
+ ret = qla4xxx_is_session_exists(ha, fw_ddb_entry, NULL);
+ if (ret == QLA_SUCCESS) {
+ /* free nt_ddb_idx and do not add to list_nt */
+ vfree(nt_ddb_idx);
+ goto continue_next_new_nt;
+ }
+
+ if (target_id < max_ddbs)
+ fw_ddb_entry->ddb_link = cpu_to_le16(target_id);
+
+ list_add_tail(&nt_ddb_idx->list, list_nt);
+
+ ret = qla4xxx_sess_conn_setup(ha, fw_ddb_entry, RESET_ADAPTER,
+ idx);
+ if (ret == QLA_ERROR)
+ goto exit_new_nt_list;
+
+continue_next_new_nt:
+ if (next_idx == 0)
+ break;
+ }
+
+exit_new_nt_list:
+ if (fw_ddb_entry)
+ dma_pool_free(ha->fw_ddb_dma_pool, fw_ddb_entry, fw_ddb_dma);
+}
+
+/**
+ * qla4xxx_sysfs_ddb_is_non_persistent - check for non-persistence of ddb entry
+ * @dev: dev associated with the sysfs entry
+ * @data: pointer to flashnode session object
+ *
+ * Returns:
+ * 1: if flashnode entry is non-persistent
+ * 0: if flashnode entry is persistent
+ **/
+static int qla4xxx_sysfs_ddb_is_non_persistent(struct device *dev, void *data)
+{
+ struct iscsi_bus_flash_session *fnode_sess;
+
+ if (!iscsi_flashnode_bus_match(dev, NULL))
+ return 0;
+
+ fnode_sess = iscsi_dev_to_flash_session(dev);
+
+ return (fnode_sess->flash_state == DEV_DB_NON_PERSISTENT);
+}
+
+/**
+ * qla4xxx_sysfs_ddb_tgt_create - Create sysfs entry for target
+ * @ha: pointer to host
+ * @fw_ddb_entry: flash ddb data
+ * @idx: target index
+ * @user: if set then this call is made from userland else from kernel
+ *
+ * Returns:
+ * On sucess: QLA_SUCCESS
+ * On failure: QLA_ERROR
+ *
+ * This create separate sysfs entries for session and connection attributes of
+ * the given fw ddb entry.
+ * If this is invoked as a result of a userspace call then the entry is marked
+ * as nonpersistent using flash_state field.
+ **/
+static int qla4xxx_sysfs_ddb_tgt_create(struct scsi_qla_host *ha,
+ struct dev_db_entry *fw_ddb_entry,
+ uint16_t *idx, int user)
+{
+ struct iscsi_bus_flash_session *fnode_sess = NULL;
+ struct iscsi_bus_flash_conn *fnode_conn = NULL;
+ int rc = QLA_ERROR;
+
+ fnode_sess = iscsi_create_flashnode_sess(ha->host, *idx,
+ &qla4xxx_iscsi_transport, 0);
+ if (!fnode_sess) {
+ ql4_printk(KERN_ERR, ha,
+ "%s: Unable to create session sysfs entry for flashnode %d of host%lu\n",
+ __func__, *idx, ha->host_no);
+ goto exit_tgt_create;
+ }
+
+ fnode_conn = iscsi_create_flashnode_conn(ha->host, fnode_sess,
+ &qla4xxx_iscsi_transport, 0);
+ if (!fnode_conn) {
+ ql4_printk(KERN_ERR, ha,
+ "%s: Unable to create conn sysfs entry for flashnode %d of host%lu\n",
+ __func__, *idx, ha->host_no);
+ goto free_sess;
+ }
+
+ if (user) {
+ fnode_sess->flash_state = DEV_DB_NON_PERSISTENT;
+ } else {
+ fnode_sess->flash_state = DEV_DB_PERSISTENT;
+
+ if (*idx == ha->pri_ddb_idx || *idx == ha->sec_ddb_idx)
+ fnode_sess->is_boot_target = 1;
+ else
+ fnode_sess->is_boot_target = 0;
+ }
+
+ rc = qla4xxx_copy_from_fwddb_param(fnode_sess, fnode_conn,
+ fw_ddb_entry);
+
+ ql4_printk(KERN_INFO, ha, "%s: sysfs entry %s created\n",
+ __func__, fnode_sess->dev.kobj.name);
+
+ ql4_printk(KERN_INFO, ha, "%s: sysfs entry %s created\n",
+ __func__, fnode_conn->dev.kobj.name);
+
+ return QLA_SUCCESS;
+
+free_sess:
+ iscsi_destroy_flashnode_sess(fnode_sess);
+
+exit_tgt_create:
+ return QLA_ERROR;
+}
+
+/**
+ * qla4xxx_sysfs_ddb_add - Add new ddb entry in flash
+ * @shost: pointer to host
+ * @buf: type of ddb entry (ipv4/ipv6)
+ * @len: length of buf
+ *
+ * This creates new ddb entry in the flash by finding first free index and
+ * storing default ddb there. And then create sysfs entry for the new ddb entry.
+ **/
+static int qla4xxx_sysfs_ddb_add(struct Scsi_Host *shost, const char *buf,
+ int len)
+{
+ struct scsi_qla_host *ha = to_qla_host(shost);
+ struct dev_db_entry *fw_ddb_entry = NULL;
+ dma_addr_t fw_ddb_entry_dma;
+ struct device *dev;
+ uint16_t idx = 0;
+ uint16_t max_ddbs = 0;
+ uint32_t options = 0;
+ uint32_t rval = QLA_ERROR;
+
+ if (strncasecmp(PORTAL_TYPE_IPV4, buf, 4) &&
+ strncasecmp(PORTAL_TYPE_IPV6, buf, 4)) {
+ DEBUG2(ql4_printk(KERN_ERR, ha, "%s: Invalid portal type\n",
+ __func__));
+ goto exit_ddb_add;
+ }
+
+ max_ddbs = is_qla40XX(ha) ? MAX_PRST_DEV_DB_ENTRIES :
+ MAX_DEV_DB_ENTRIES;
+
+ fw_ddb_entry = dma_alloc_coherent(&ha->pdev->dev, sizeof(*fw_ddb_entry),
+ &fw_ddb_entry_dma, GFP_KERNEL);
+ if (!fw_ddb_entry) {
+ DEBUG2(ql4_printk(KERN_ERR, ha,
+ "%s: Unable to allocate dma buffer\n",
+ __func__));
+ goto exit_ddb_add;
+ }
+
+ dev = iscsi_find_flashnode_sess(ha->host, NULL,
+ qla4xxx_sysfs_ddb_is_non_persistent);
+ if (dev) {
+ ql4_printk(KERN_ERR, ha,
+ "%s: A non-persistent entry %s found\n",
+ __func__, dev->kobj.name);
+ put_device(dev);
+ goto exit_ddb_add;
+ }
+
+ /* Index 0 and 1 are reserved for boot target entries */
+ for (idx = 2; idx < max_ddbs; idx++) {
+ if (qla4xxx_flashdb_by_index(ha, fw_ddb_entry,
+ fw_ddb_entry_dma, idx))
+ break;
+ }
+
+ if (idx == max_ddbs)
+ goto exit_ddb_add;
+
+ if (!strncasecmp("ipv6", buf, 4))
+ options |= IPV6_DEFAULT_DDB_ENTRY;
+
+ rval = qla4xxx_get_default_ddb(ha, options, fw_ddb_entry_dma);
+ if (rval == QLA_ERROR)
+ goto exit_ddb_add;
+
+ rval = qla4xxx_sysfs_ddb_tgt_create(ha, fw_ddb_entry, &idx, 1);
+
+exit_ddb_add:
+ if (fw_ddb_entry)
+ dma_free_coherent(&ha->pdev->dev, sizeof(*fw_ddb_entry),
+ fw_ddb_entry, fw_ddb_entry_dma);
+ if (rval == QLA_SUCCESS)
+ return idx;
+ else
+ return -EIO;
+}
+
+/**
+ * qla4xxx_sysfs_ddb_apply - write the target ddb contents to Flash
+ * @fnode_sess: pointer to session attrs of flash ddb entry
+ * @fnode_conn: pointer to connection attrs of flash ddb entry
+ *
+ * This writes the contents of target ddb buffer to Flash with a valid cookie
+ * value in order to make the ddb entry persistent.
+ **/
+static int qla4xxx_sysfs_ddb_apply(struct iscsi_bus_flash_session *fnode_sess,
+ struct iscsi_bus_flash_conn *fnode_conn)
+{
+ struct Scsi_Host *shost = iscsi_flash_session_to_shost(fnode_sess);
+ struct scsi_qla_host *ha = to_qla_host(shost);
+ uint32_t dev_db_start_offset = FLASH_OFFSET_DB_INFO;
+ struct dev_db_entry *fw_ddb_entry = NULL;
+ dma_addr_t fw_ddb_entry_dma;
+ uint32_t options = 0;
+ int rval = 0;
+
+ fw_ddb_entry = dma_alloc_coherent(&ha->pdev->dev, sizeof(*fw_ddb_entry),
+ &fw_ddb_entry_dma, GFP_KERNEL);
+ if (!fw_ddb_entry) {
+ DEBUG2(ql4_printk(KERN_ERR, ha,
+ "%s: Unable to allocate dma buffer\n",
+ __func__));
+ rval = -ENOMEM;
+ goto exit_ddb_apply;
+ }
+
+ if (!strncasecmp(fnode_sess->portal_type, PORTAL_TYPE_IPV6, 4))
+ options |= IPV6_DEFAULT_DDB_ENTRY;
+
+ rval = qla4xxx_get_default_ddb(ha, options, fw_ddb_entry_dma);
+ if (rval == QLA_ERROR)
+ goto exit_ddb_apply;
+
+ dev_db_start_offset += (fnode_sess->target_id *
+ sizeof(*fw_ddb_entry));
+
+ qla4xxx_copy_to_fwddb_param(fnode_sess, fnode_conn, fw_ddb_entry);
+ fw_ddb_entry->cookie = DDB_VALID_COOKIE;
+
+ rval = qla4xxx_set_flash(ha, fw_ddb_entry_dma, dev_db_start_offset,
+ sizeof(*fw_ddb_entry), FLASH_OPT_RMW_COMMIT);
+
+ if (rval == QLA_SUCCESS) {
+ fnode_sess->flash_state = DEV_DB_PERSISTENT;
+ ql4_printk(KERN_INFO, ha,
+ "%s: flash node %u of host %lu written to flash\n",
+ __func__, fnode_sess->target_id, ha->host_no);
+ } else {
+ rval = -EIO;
+ ql4_printk(KERN_ERR, ha,
+ "%s: Error while writing flash node %u of host %lu to flash\n",
+ __func__, fnode_sess->target_id, ha->host_no);
+ }
+
+exit_ddb_apply:
+ if (fw_ddb_entry)
+ dma_free_coherent(&ha->pdev->dev, sizeof(*fw_ddb_entry),
+ fw_ddb_entry, fw_ddb_entry_dma);
+ return rval;
+}
+
+static ssize_t qla4xxx_sysfs_ddb_conn_open(struct scsi_qla_host *ha,
+ struct dev_db_entry *fw_ddb_entry,
+ uint16_t idx)
+{
+ struct dev_db_entry *ddb_entry = NULL;
+ dma_addr_t ddb_entry_dma;
+ unsigned long wtime;
+ uint32_t mbx_sts = 0;
+ uint32_t state = 0, conn_err = 0;
+ uint16_t tmo = 0;
+ int ret = 0;
+
+ ddb_entry = dma_alloc_coherent(&ha->pdev->dev, sizeof(*ddb_entry),
+ &ddb_entry_dma, GFP_KERNEL);
+ if (!ddb_entry) {
+ DEBUG2(ql4_printk(KERN_ERR, ha,
+ "%s: Unable to allocate dma buffer\n",
+ __func__));
+ return QLA_ERROR;
+ }
+
+ memcpy(ddb_entry, fw_ddb_entry, sizeof(*ddb_entry));
+
+ ret = qla4xxx_set_ddb_entry(ha, idx, ddb_entry_dma, &mbx_sts);
+ if (ret != QLA_SUCCESS) {
+ DEBUG2(ql4_printk(KERN_ERR, ha,
+ "%s: Unable to set ddb entry for index %d\n",
+ __func__, idx));
+ goto exit_ddb_conn_open;
+ }
+
+ qla4xxx_conn_open(ha, idx);
+
+ /* To ensure that sendtargets is done, wait for at least 12 secs */
+ tmo = ((ha->def_timeout > LOGIN_TOV) &&
+ (ha->def_timeout < LOGIN_TOV * 10) ?
+ ha->def_timeout : LOGIN_TOV);
+
+ DEBUG2(ql4_printk(KERN_INFO, ha,
+ "Default time to wait for login to ddb %d\n", tmo));
+
+ wtime = jiffies + (HZ * tmo);
+ do {
+ ret = qla4xxx_get_fwddb_entry(ha, idx, NULL, 0, NULL,
+ NULL, &state, &conn_err, NULL,
+ NULL);
+ if (ret == QLA_ERROR)
+ continue;
+
+ if (state == DDB_DS_NO_CONNECTION_ACTIVE ||
+ state == DDB_DS_SESSION_FAILED)
+ break;
+
+ schedule_timeout_uninterruptible(HZ / 10);
+ } while (time_after(wtime, jiffies));
+
+exit_ddb_conn_open:
+ if (ddb_entry)
+ dma_free_coherent(&ha->pdev->dev, sizeof(*ddb_entry),
+ ddb_entry, ddb_entry_dma);
+ return ret;
+}
+
+static int qla4xxx_ddb_login_st(struct scsi_qla_host *ha,
+ struct dev_db_entry *fw_ddb_entry,
+ uint16_t target_id)
+{
+ struct qla_ddb_index *ddb_idx, *ddb_idx_tmp;
+ struct list_head list_nt;
+ uint16_t ddb_index;
+ int ret = 0;
+
+ if (test_bit(AF_ST_DISCOVERY_IN_PROGRESS, &ha->flags)) {
+ ql4_printk(KERN_WARNING, ha,
+ "%s: A discovery already in progress!\n", __func__);
+ return QLA_ERROR;
+ }
+
+ INIT_LIST_HEAD(&list_nt);
+
+ set_bit(AF_ST_DISCOVERY_IN_PROGRESS, &ha->flags);
+
+ ret = qla4xxx_get_ddb_index(ha, &ddb_index);
+ if (ret == QLA_ERROR)
+ goto exit_login_st_clr_bit;
+
+ ret = qla4xxx_sysfs_ddb_conn_open(ha, fw_ddb_entry, ddb_index);
+ if (ret == QLA_ERROR)
+ goto exit_login_st;
+
+ qla4xxx_build_new_nt_list(ha, &list_nt, target_id);
+
+ list_for_each_entry_safe(ddb_idx, ddb_idx_tmp, &list_nt, list) {
+ list_del_init(&ddb_idx->list);
+ qla4xxx_clear_ddb_entry(ha, ddb_idx->fw_ddb_idx);
+ vfree(ddb_idx);
+ }
+
+exit_login_st:
+ if (qla4xxx_clear_ddb_entry(ha, ddb_index) == QLA_ERROR) {
+ ql4_printk(KERN_ERR, ha,
+ "Unable to clear DDB index = 0x%x\n", ddb_index);
+ }
+
+ clear_bit(ddb_index, ha->ddb_idx_map);
+
+exit_login_st_clr_bit:
+ clear_bit(AF_ST_DISCOVERY_IN_PROGRESS, &ha->flags);
+ return ret;
+}
+
+static int qla4xxx_ddb_login_nt(struct scsi_qla_host *ha,
+ struct dev_db_entry *fw_ddb_entry,
+ uint16_t idx)
+{
+ int ret = QLA_ERROR;
+
+ ret = qla4xxx_is_session_exists(ha, fw_ddb_entry, NULL);
+ if (ret != QLA_SUCCESS)
+ ret = qla4xxx_sess_conn_setup(ha, fw_ddb_entry, RESET_ADAPTER,
+ idx);
+ else
+ ret = -EPERM;
+
+ return ret;
+}
+
+/**
+ * qla4xxx_sysfs_ddb_login - Login to the specified target
+ * @fnode_sess: pointer to session attrs of flash ddb entry
+ * @fnode_conn: pointer to connection attrs of flash ddb entry
+ *
+ * This logs in to the specified target
+ **/
+static int qla4xxx_sysfs_ddb_login(struct iscsi_bus_flash_session *fnode_sess,
+ struct iscsi_bus_flash_conn *fnode_conn)
+{
+ struct Scsi_Host *shost = iscsi_flash_session_to_shost(fnode_sess);
+ struct scsi_qla_host *ha = to_qla_host(shost);
+ struct dev_db_entry *fw_ddb_entry = NULL;
+ dma_addr_t fw_ddb_entry_dma;
+ uint32_t options = 0;
+ int ret = 0;
+
+ if (fnode_sess->flash_state == DEV_DB_NON_PERSISTENT) {
+ ql4_printk(KERN_ERR, ha,
+ "%s: Target info is not persistent\n", __func__);
+ ret = -EIO;
+ goto exit_ddb_login;
+ }
+
+ fw_ddb_entry = dma_alloc_coherent(&ha->pdev->dev, sizeof(*fw_ddb_entry),
+ &fw_ddb_entry_dma, GFP_KERNEL);
+ if (!fw_ddb_entry) {
+ DEBUG2(ql4_printk(KERN_ERR, ha,
+ "%s: Unable to allocate dma buffer\n",
+ __func__));
+ ret = -ENOMEM;
+ goto exit_ddb_login;
+ }
+
+ if (!strncasecmp(fnode_sess->portal_type, PORTAL_TYPE_IPV6, 4))
+ options |= IPV6_DEFAULT_DDB_ENTRY;
+
+ ret = qla4xxx_get_default_ddb(ha, options, fw_ddb_entry_dma);
+ if (ret == QLA_ERROR)
+ goto exit_ddb_login;
+
+ qla4xxx_copy_to_fwddb_param(fnode_sess, fnode_conn, fw_ddb_entry);
+ fw_ddb_entry->cookie = DDB_VALID_COOKIE;
+
+ if (strlen((char *)fw_ddb_entry->iscsi_name) == 0)
+ ret = qla4xxx_ddb_login_st(ha, fw_ddb_entry,
+ fnode_sess->target_id);
+ else
+ ret = qla4xxx_ddb_login_nt(ha, fw_ddb_entry,
+ fnode_sess->target_id);
+
+ if (ret > 0)
+ ret = -EIO;
+
+exit_ddb_login:
+ if (fw_ddb_entry)
+ dma_free_coherent(&ha->pdev->dev, sizeof(*fw_ddb_entry),
+ fw_ddb_entry, fw_ddb_entry_dma);
+ return ret;
+}
+
+/**
+ * qla4xxx_sysfs_ddb_logout_sid - Logout session for the specified target
+ * @cls_sess: pointer to session to be logged out
+ *
+ * This performs session log out from the specified target
+ **/
+static int qla4xxx_sysfs_ddb_logout_sid(struct iscsi_cls_session *cls_sess)
+{
+ struct iscsi_session *sess;
+ struct ddb_entry *ddb_entry = NULL;
+ struct scsi_qla_host *ha;
+ struct dev_db_entry *fw_ddb_entry = NULL;
+ dma_addr_t fw_ddb_entry_dma;
+ unsigned long flags;
+ unsigned long wtime;
+ uint32_t ddb_state;
+ int options;
+ int ret = 0;
+
+ sess = cls_sess->dd_data;
+ ddb_entry = sess->dd_data;
+ ha = ddb_entry->ha;
+
+ if (ddb_entry->ddb_type != FLASH_DDB) {
+ ql4_printk(KERN_ERR, ha, "%s: Not a flash node session\n",
+ __func__);
+ ret = -ENXIO;
+ goto exit_ddb_logout;
+ }
+
+ if (test_bit(DF_BOOT_TGT, &ddb_entry->flags)) {
+ ql4_printk(KERN_ERR, ha,
+ "%s: Logout from boot target entry is not permitted.\n",
+ __func__);
+ ret = -EPERM;
+ goto exit_ddb_logout;
+ }
+
+ fw_ddb_entry = dma_alloc_coherent(&ha->pdev->dev, sizeof(*fw_ddb_entry),
+ &fw_ddb_entry_dma, GFP_KERNEL);
+ if (!fw_ddb_entry) {
+ ql4_printk(KERN_ERR, ha,
+ "%s: Unable to allocate dma buffer\n", __func__);
+ ret = -ENOMEM;
+ goto exit_ddb_logout;
+ }
+
+ if (test_and_set_bit(DF_DISABLE_RELOGIN, &ddb_entry->flags))
+ goto ddb_logout_init;
+
+ ret = qla4xxx_get_fwddb_entry(ha, ddb_entry->fw_ddb_index,
+ fw_ddb_entry, fw_ddb_entry_dma,
+ NULL, NULL, &ddb_state, NULL,
+ NULL, NULL);
+ if (ret == QLA_ERROR)
+ goto ddb_logout_init;
+
+ if (ddb_state == DDB_DS_SESSION_ACTIVE)
+ goto ddb_logout_init;
+
+ /* wait until next relogin is triggered using DF_RELOGIN and
+ * clear DF_RELOGIN to avoid invocation of further relogin
+ */
+ wtime = jiffies + (HZ * RELOGIN_TOV);
+ do {
+ if (test_and_clear_bit(DF_RELOGIN, &ddb_entry->flags))
+ goto ddb_logout_init;
+
+ schedule_timeout_uninterruptible(HZ);
+ } while ((time_after(wtime, jiffies)));
+
+ddb_logout_init:
+ atomic_set(&ddb_entry->retry_relogin_timer, INVALID_ENTRY);
+ atomic_set(&ddb_entry->relogin_timer, 0);
+
+ options = LOGOUT_OPTION_CLOSE_SESSION;
+ qla4xxx_session_logout_ddb(ha, ddb_entry, options);
+
+ memset(fw_ddb_entry, 0, sizeof(*fw_ddb_entry));
+ wtime = jiffies + (HZ * LOGOUT_TOV);
+ do {
+ ret = qla4xxx_get_fwddb_entry(ha, ddb_entry->fw_ddb_index,
+ fw_ddb_entry, fw_ddb_entry_dma,
+ NULL, NULL, &ddb_state, NULL,
+ NULL, NULL);
+ if (ret == QLA_ERROR)
+ goto ddb_logout_clr_sess;
+
+ if ((ddb_state == DDB_DS_NO_CONNECTION_ACTIVE) ||
+ (ddb_state == DDB_DS_SESSION_FAILED))
+ goto ddb_logout_clr_sess;
+
+ schedule_timeout_uninterruptible(HZ);
+ } while ((time_after(wtime, jiffies)));
+
+ddb_logout_clr_sess:
+ qla4xxx_clear_ddb_entry(ha, ddb_entry->fw_ddb_index);
+ /*
+ * we have decremented the reference count of the driver
+ * when we setup the session to have the driver unload
+ * to be seamless without actually destroying the
+ * session
+ **/
+ try_module_get(qla4xxx_iscsi_transport.owner);
+ iscsi_destroy_endpoint(ddb_entry->conn->ep);
+
+ spin_lock_irqsave(&ha->hardware_lock, flags);
+ qla4xxx_free_ddb(ha, ddb_entry);
+ clear_bit(ddb_entry->fw_ddb_index, ha->ddb_idx_map);
+ spin_unlock_irqrestore(&ha->hardware_lock, flags);
+
+ iscsi_session_teardown(ddb_entry->sess);
+
+ clear_bit(DF_DISABLE_RELOGIN, &ddb_entry->flags);
+ ret = QLA_SUCCESS;
+
+exit_ddb_logout:
+ if (fw_ddb_entry)
+ dma_free_coherent(&ha->pdev->dev, sizeof(*fw_ddb_entry),
+ fw_ddb_entry, fw_ddb_entry_dma);
+ return ret;
+}
+
+/**
+ * qla4xxx_sysfs_ddb_logout - Logout from the specified target
+ * @fnode_sess: pointer to session attrs of flash ddb entry
+ * @fnode_conn: pointer to connection attrs of flash ddb entry
+ *
+ * This performs log out from the specified target
+ **/
+static int qla4xxx_sysfs_ddb_logout(struct iscsi_bus_flash_session *fnode_sess,
+ struct iscsi_bus_flash_conn *fnode_conn)
+{
+ struct Scsi_Host *shost = iscsi_flash_session_to_shost(fnode_sess);
+ struct scsi_qla_host *ha = to_qla_host(shost);
+ struct ql4_tuple_ddb *flash_tddb = NULL;
+ struct ql4_tuple_ddb *tmp_tddb = NULL;
+ struct dev_db_entry *fw_ddb_entry = NULL;
+ struct ddb_entry *ddb_entry = NULL;
+ dma_addr_t fw_ddb_dma;
+ uint32_t next_idx = 0;
+ uint32_t state = 0, conn_err = 0;
+ uint16_t conn_id = 0;
+ int idx, index;
+ int status, ret = 0;
+
+ fw_ddb_entry = dma_pool_alloc(ha->fw_ddb_dma_pool, GFP_KERNEL,
+ &fw_ddb_dma);
+ if (fw_ddb_entry == NULL) {
+ ql4_printk(KERN_ERR, ha, "%s:Out of memory\n", __func__);
+ ret = -ENOMEM;
+ goto exit_ddb_logout;
+ }
+
+ flash_tddb = vzalloc(sizeof(*flash_tddb));
+ if (!flash_tddb) {
+ ql4_printk(KERN_WARNING, ha,
+ "%s:Memory Allocation failed.\n", __func__);
+ ret = -ENOMEM;
+ goto exit_ddb_logout;
+ }
+
+ tmp_tddb = vzalloc(sizeof(*tmp_tddb));
+ if (!tmp_tddb) {
+ ql4_printk(KERN_WARNING, ha,
+ "%s:Memory Allocation failed.\n", __func__);
+ ret = -ENOMEM;
+ goto exit_ddb_logout;
+ }
+
+ if (!fnode_sess->targetname) {
+ ql4_printk(KERN_ERR, ha,
+ "%s:Cannot logout from SendTarget entry\n",
+ __func__);
+ ret = -EPERM;
+ goto exit_ddb_logout;
+ }
+
+ if (fnode_sess->is_boot_target) {
+ ql4_printk(KERN_ERR, ha,
+ "%s: Logout from boot target entry is not permitted.\n",
+ __func__);
+ ret = -EPERM;
+ goto exit_ddb_logout;
+ }
+
+ strlcpy(flash_tddb->iscsi_name, fnode_sess->targetname,
+ ISCSI_NAME_SIZE);
+
+ if (!strncmp(fnode_sess->portal_type, PORTAL_TYPE_IPV6, 4))
+ sprintf(flash_tddb->ip_addr, "%pI6", fnode_conn->ipaddress);
+ else
+ sprintf(flash_tddb->ip_addr, "%pI4", fnode_conn->ipaddress);
+
+ flash_tddb->tpgt = fnode_sess->tpgt;
+ flash_tddb->port = fnode_conn->port;
+
+ COPY_ISID(flash_tddb->isid, fnode_sess->isid);
+
+ for (idx = 0; idx < MAX_DDB_ENTRIES; idx++) {
+ ddb_entry = qla4xxx_lookup_ddb_by_fw_index(ha, idx);
+ if (ddb_entry == NULL)
+ continue;
+
+ if (ddb_entry->ddb_type != FLASH_DDB)
+ continue;
+
+ index = ddb_entry->sess->target_id;
+ status = qla4xxx_get_fwddb_entry(ha, index, fw_ddb_entry,
+ fw_ddb_dma, NULL, &next_idx,
+ &state, &conn_err, NULL,
+ &conn_id);
+ if (status == QLA_ERROR) {
+ ret = -ENOMEM;
+ break;
+ }
+
+ qla4xxx_convert_param_ddb(fw_ddb_entry, tmp_tddb, NULL);
+
+ status = qla4xxx_compare_tuple_ddb(ha, flash_tddb, tmp_tddb,
+ true);
+ if (status == QLA_SUCCESS) {
+ ret = qla4xxx_sysfs_ddb_logout_sid(ddb_entry->sess);
+ break;
+ }
+ }
+
+ if (idx == MAX_DDB_ENTRIES)
+ ret = -ESRCH;
+
+exit_ddb_logout:
+ if (flash_tddb)
+ vfree(flash_tddb);
+ if (tmp_tddb)
+ vfree(tmp_tddb);
+ if (fw_ddb_entry)
+ dma_pool_free(ha->fw_ddb_dma_pool, fw_ddb_entry, fw_ddb_dma);
+
+ return ret;
+}
+
+static int
+qla4xxx_sysfs_ddb_get_param(struct iscsi_bus_flash_session *fnode_sess,
+ int param, char *buf)
+{
+ struct Scsi_Host *shost = iscsi_flash_session_to_shost(fnode_sess);
+ struct scsi_qla_host *ha = to_qla_host(shost);
+ struct iscsi_bus_flash_conn *fnode_conn;
+ struct ql4_chap_table chap_tbl;
+ struct device *dev;
+ int parent_type;
+ int rc = 0;
+
+ dev = iscsi_find_flashnode_conn(fnode_sess);
+ if (!dev)
+ return -EIO;
+
+ fnode_conn = iscsi_dev_to_flash_conn(dev);
+
+ switch (param) {
+ case ISCSI_FLASHNODE_IS_FW_ASSIGNED_IPV6:
+ rc = sprintf(buf, "%u\n", fnode_conn->is_fw_assigned_ipv6);
+ break;
+ case ISCSI_FLASHNODE_PORTAL_TYPE:
+ rc = sprintf(buf, "%s\n", fnode_sess->portal_type);
+ break;
+ case ISCSI_FLASHNODE_AUTO_SND_TGT_DISABLE:
+ rc = sprintf(buf, "%u\n", fnode_sess->auto_snd_tgt_disable);
+ break;
+ case ISCSI_FLASHNODE_DISCOVERY_SESS:
+ rc = sprintf(buf, "%u\n", fnode_sess->discovery_sess);
+ break;
+ case ISCSI_FLASHNODE_ENTRY_EN:
+ rc = sprintf(buf, "%u\n", fnode_sess->entry_state);
+ break;
+ case ISCSI_FLASHNODE_HDR_DGST_EN:
+ rc = sprintf(buf, "%u\n", fnode_conn->hdrdgst_en);
+ break;
+ case ISCSI_FLASHNODE_DATA_DGST_EN:
+ rc = sprintf(buf, "%u\n", fnode_conn->datadgst_en);
+ break;
+ case ISCSI_FLASHNODE_IMM_DATA_EN:
+ rc = sprintf(buf, "%u\n", fnode_sess->imm_data_en);
+ break;
+ case ISCSI_FLASHNODE_INITIAL_R2T_EN:
+ rc = sprintf(buf, "%u\n", fnode_sess->initial_r2t_en);
+ break;
+ case ISCSI_FLASHNODE_DATASEQ_INORDER:
+ rc = sprintf(buf, "%u\n", fnode_sess->dataseq_inorder_en);
+ break;
+ case ISCSI_FLASHNODE_PDU_INORDER:
+ rc = sprintf(buf, "%u\n", fnode_sess->pdu_inorder_en);
+ break;
+ case ISCSI_FLASHNODE_CHAP_AUTH_EN:
+ rc = sprintf(buf, "%u\n", fnode_sess->chap_auth_en);
+ break;
+ case ISCSI_FLASHNODE_SNACK_REQ_EN:
+ rc = sprintf(buf, "%u\n", fnode_conn->snack_req_en);
+ break;
+ case ISCSI_FLASHNODE_DISCOVERY_LOGOUT_EN:
+ rc = sprintf(buf, "%u\n", fnode_sess->discovery_logout_en);
+ break;
+ case ISCSI_FLASHNODE_BIDI_CHAP_EN:
+ rc = sprintf(buf, "%u\n", fnode_sess->bidi_chap_en);
+ break;
+ case ISCSI_FLASHNODE_DISCOVERY_AUTH_OPTIONAL:
+ rc = sprintf(buf, "%u\n", fnode_sess->discovery_auth_optional);
+ break;
+ case ISCSI_FLASHNODE_ERL:
+ rc = sprintf(buf, "%u\n", fnode_sess->erl);
+ break;
+ case ISCSI_FLASHNODE_TCP_TIMESTAMP_STAT:
+ rc = sprintf(buf, "%u\n", fnode_conn->tcp_timestamp_stat);
+ break;
+ case ISCSI_FLASHNODE_TCP_NAGLE_DISABLE:
+ rc = sprintf(buf, "%u\n", fnode_conn->tcp_nagle_disable);
+ break;
+ case ISCSI_FLASHNODE_TCP_WSF_DISABLE:
+ rc = sprintf(buf, "%u\n", fnode_conn->tcp_wsf_disable);
+ break;
+ case ISCSI_FLASHNODE_TCP_TIMER_SCALE:
+ rc = sprintf(buf, "%u\n", fnode_conn->tcp_timer_scale);
+ break;
+ case ISCSI_FLASHNODE_TCP_TIMESTAMP_EN:
+ rc = sprintf(buf, "%u\n", fnode_conn->tcp_timestamp_en);
+ break;
+ case ISCSI_FLASHNODE_IP_FRAG_DISABLE:
+ rc = sprintf(buf, "%u\n", fnode_conn->fragment_disable);
+ break;
+ case ISCSI_FLASHNODE_MAX_RECV_DLENGTH:
+ rc = sprintf(buf, "%u\n", fnode_conn->max_recv_dlength);
+ break;
+ case ISCSI_FLASHNODE_MAX_XMIT_DLENGTH:
+ rc = sprintf(buf, "%u\n", fnode_conn->max_xmit_dlength);
+ break;
+ case ISCSI_FLASHNODE_FIRST_BURST:
+ rc = sprintf(buf, "%u\n", fnode_sess->first_burst);
+ break;
+ case ISCSI_FLASHNODE_DEF_TIME2WAIT:
+ rc = sprintf(buf, "%u\n", fnode_sess->time2wait);
+ break;
+ case ISCSI_FLASHNODE_DEF_TIME2RETAIN:
+ rc = sprintf(buf, "%u\n", fnode_sess->time2retain);
+ break;
+ case ISCSI_FLASHNODE_MAX_R2T:
+ rc = sprintf(buf, "%u\n", fnode_sess->max_r2t);
+ break;
+ case ISCSI_FLASHNODE_KEEPALIVE_TMO:
+ rc = sprintf(buf, "%u\n", fnode_conn->keepalive_timeout);
+ break;
+ case ISCSI_FLASHNODE_ISID:
+ rc = sprintf(buf, "%02x%02x%02x%02x%02x%02x\n",
+ fnode_sess->isid[0], fnode_sess->isid[1],
+ fnode_sess->isid[2], fnode_sess->isid[3],
+ fnode_sess->isid[4], fnode_sess->isid[5]);
+ break;
+ case ISCSI_FLASHNODE_TSID:
+ rc = sprintf(buf, "%u\n", fnode_sess->tsid);
+ break;
+ case ISCSI_FLASHNODE_PORT:
+ rc = sprintf(buf, "%d\n", fnode_conn->port);
+ break;
+ case ISCSI_FLASHNODE_MAX_BURST:
+ rc = sprintf(buf, "%u\n", fnode_sess->max_burst);
+ break;
+ case ISCSI_FLASHNODE_DEF_TASKMGMT_TMO:
+ rc = sprintf(buf, "%u\n",
+ fnode_sess->default_taskmgmt_timeout);
+ break;
+ case ISCSI_FLASHNODE_IPADDR:
+ if (!strncmp(fnode_sess->portal_type, PORTAL_TYPE_IPV6, 4))
+ rc = sprintf(buf, "%pI6\n", fnode_conn->ipaddress);
+ else
+ rc = sprintf(buf, "%pI4\n", fnode_conn->ipaddress);
+ break;
+ case ISCSI_FLASHNODE_ALIAS:
+ if (fnode_sess->targetalias)
+ rc = sprintf(buf, "%s\n", fnode_sess->targetalias);
+ else
+ rc = sprintf(buf, "\n");
+ break;
+ case ISCSI_FLASHNODE_REDIRECT_IPADDR:
+ if (!strncmp(fnode_sess->portal_type, PORTAL_TYPE_IPV6, 4))
+ rc = sprintf(buf, "%pI6\n",
+ fnode_conn->redirect_ipaddr);
+ else
+ rc = sprintf(buf, "%pI4\n",
+ fnode_conn->redirect_ipaddr);
+ break;
+ case ISCSI_FLASHNODE_MAX_SEGMENT_SIZE:
+ rc = sprintf(buf, "%u\n", fnode_conn->max_segment_size);
+ break;
+ case ISCSI_FLASHNODE_LOCAL_PORT:
+ rc = sprintf(buf, "%u\n", fnode_conn->local_port);
+ break;
+ case ISCSI_FLASHNODE_IPV4_TOS:
+ rc = sprintf(buf, "%u\n", fnode_conn->ipv4_tos);
+ break;
+ case ISCSI_FLASHNODE_IPV6_TC:
+ if (!strncmp(fnode_sess->portal_type, PORTAL_TYPE_IPV6, 4))
+ rc = sprintf(buf, "%u\n",
+ fnode_conn->ipv6_traffic_class);
+ else
+ rc = sprintf(buf, "\n");
+ break;
+ case ISCSI_FLASHNODE_IPV6_FLOW_LABEL:
+ rc = sprintf(buf, "%u\n", fnode_conn->ipv6_flow_label);
+ break;
+ case ISCSI_FLASHNODE_LINK_LOCAL_IPV6:
+ if (!strncmp(fnode_sess->portal_type, PORTAL_TYPE_IPV6, 4))
+ rc = sprintf(buf, "%pI6\n",
+ fnode_conn->link_local_ipv6_addr);
+ else
+ rc = sprintf(buf, "\n");
+ break;
+ case ISCSI_FLASHNODE_DISCOVERY_PARENT_IDX:
+ rc = sprintf(buf, "%u\n", fnode_sess->discovery_parent_idx);
+ break;
+ case ISCSI_FLASHNODE_DISCOVERY_PARENT_TYPE:
+ if (fnode_sess->discovery_parent_type == DDB_ISNS)
+ parent_type = ISCSI_DISC_PARENT_ISNS;
+ else if (fnode_sess->discovery_parent_type == DDB_NO_LINK)
+ parent_type = ISCSI_DISC_PARENT_UNKNOWN;
+ else if (fnode_sess->discovery_parent_type < MAX_DDB_ENTRIES)
+ parent_type = ISCSI_DISC_PARENT_SENDTGT;
+ else
+ parent_type = ISCSI_DISC_PARENT_UNKNOWN;
+
+ rc = sprintf(buf, "%s\n",
+ iscsi_get_discovery_parent_name(parent_type));
+ break;
+ case ISCSI_FLASHNODE_NAME:
+ if (fnode_sess->targetname)
+ rc = sprintf(buf, "%s\n", fnode_sess->targetname);
+ else
+ rc = sprintf(buf, "\n");
+ break;
+ case ISCSI_FLASHNODE_TPGT:
+ rc = sprintf(buf, "%u\n", fnode_sess->tpgt);
+ break;
+ case ISCSI_FLASHNODE_TCP_XMIT_WSF:
+ rc = sprintf(buf, "%u\n", fnode_conn->tcp_xmit_wsf);
+ break;
+ case ISCSI_FLASHNODE_TCP_RECV_WSF:
+ rc = sprintf(buf, "%u\n", fnode_conn->tcp_recv_wsf);
+ break;
+ case ISCSI_FLASHNODE_CHAP_OUT_IDX:
+ rc = sprintf(buf, "%u\n", fnode_sess->chap_out_idx);
+ break;
+ case ISCSI_FLASHNODE_USERNAME:
+ if (fnode_sess->chap_auth_en) {
+ qla4xxx_get_uni_chap_at_index(ha,
+ chap_tbl.name,
+ chap_tbl.secret,
+ fnode_sess->chap_out_idx);
+ rc = sprintf(buf, "%s\n", chap_tbl.name);
+ } else {
+ rc = sprintf(buf, "\n");
+ }
+ break;
+ case ISCSI_FLASHNODE_PASSWORD:
+ if (fnode_sess->chap_auth_en) {
+ qla4xxx_get_uni_chap_at_index(ha,
+ chap_tbl.name,
+ chap_tbl.secret,
+ fnode_sess->chap_out_idx);
+ rc = sprintf(buf, "%s\n", chap_tbl.secret);
+ } else {
+ rc = sprintf(buf, "\n");
+ }
+ break;
+ case ISCSI_FLASHNODE_STATSN:
+ rc = sprintf(buf, "%u\n", fnode_conn->statsn);
+ break;
+ case ISCSI_FLASHNODE_EXP_STATSN:
+ rc = sprintf(buf, "%u\n", fnode_conn->exp_statsn);
+ break;
+ case ISCSI_FLASHNODE_IS_BOOT_TGT:
+ rc = sprintf(buf, "%u\n", fnode_sess->is_boot_target);
+ break;
+ default:
+ rc = -ENOSYS;
+ break;
+ }
+
+ put_device(dev);
+ return rc;
+}
+
+/**
+ * qla4xxx_sysfs_ddb_set_param - Set parameter for firmware DDB entry
+ * @fnode_sess: pointer to session attrs of flash ddb entry
+ * @fnode_conn: pointer to connection attrs of flash ddb entry
+ * @data: Parameters and their values to update
+ * @len: len of data
+ *
+ * This sets the parameter of flash ddb entry and writes them to flash
+ **/
+static int
+qla4xxx_sysfs_ddb_set_param(struct iscsi_bus_flash_session *fnode_sess,
+ struct iscsi_bus_flash_conn *fnode_conn,
+ void *data, int len)
+{
+ struct Scsi_Host *shost = iscsi_flash_session_to_shost(fnode_sess);
+ struct scsi_qla_host *ha = to_qla_host(shost);
+ struct iscsi_flashnode_param_info *fnode_param;
+ struct ql4_chap_table chap_tbl;
+ struct nlattr *attr;
+ uint16_t chap_out_idx = INVALID_ENTRY;
+ int rc = QLA_ERROR;
+ uint32_t rem = len;
+
+ memset((void *)&chap_tbl, 0, sizeof(chap_tbl));
+ nla_for_each_attr(attr, data, len, rem) {
+ fnode_param = nla_data(attr);
+
+ switch (fnode_param->param) {
+ case ISCSI_FLASHNODE_IS_FW_ASSIGNED_IPV6:
+ fnode_conn->is_fw_assigned_ipv6 = fnode_param->value[0];
+ break;
+ case ISCSI_FLASHNODE_PORTAL_TYPE:
+ memcpy(fnode_sess->portal_type, fnode_param->value,
+ strlen(fnode_sess->portal_type));
+ break;
+ case ISCSI_FLASHNODE_AUTO_SND_TGT_DISABLE:
+ fnode_sess->auto_snd_tgt_disable =
+ fnode_param->value[0];
+ break;
+ case ISCSI_FLASHNODE_DISCOVERY_SESS:
+ fnode_sess->discovery_sess = fnode_param->value[0];
+ break;
+ case ISCSI_FLASHNODE_ENTRY_EN:
+ fnode_sess->entry_state = fnode_param->value[0];
+ break;
+ case ISCSI_FLASHNODE_HDR_DGST_EN:
+ fnode_conn->hdrdgst_en = fnode_param->value[0];
+ break;
+ case ISCSI_FLASHNODE_DATA_DGST_EN:
+ fnode_conn->datadgst_en = fnode_param->value[0];
+ break;
+ case ISCSI_FLASHNODE_IMM_DATA_EN:
+ fnode_sess->imm_data_en = fnode_param->value[0];
+ break;
+ case ISCSI_FLASHNODE_INITIAL_R2T_EN:
+ fnode_sess->initial_r2t_en = fnode_param->value[0];
+ break;
+ case ISCSI_FLASHNODE_DATASEQ_INORDER:
+ fnode_sess->dataseq_inorder_en = fnode_param->value[0];
+ break;
+ case ISCSI_FLASHNODE_PDU_INORDER:
+ fnode_sess->pdu_inorder_en = fnode_param->value[0];
+ break;
+ case ISCSI_FLASHNODE_CHAP_AUTH_EN:
+ fnode_sess->chap_auth_en = fnode_param->value[0];
+ /* Invalidate chap index if chap auth is disabled */
+ if (!fnode_sess->chap_auth_en)
+ fnode_sess->chap_out_idx = INVALID_ENTRY;
+
+ break;
+ case ISCSI_FLASHNODE_SNACK_REQ_EN:
+ fnode_conn->snack_req_en = fnode_param->value[0];
+ break;
+ case ISCSI_FLASHNODE_DISCOVERY_LOGOUT_EN:
+ fnode_sess->discovery_logout_en = fnode_param->value[0];
+ break;
+ case ISCSI_FLASHNODE_BIDI_CHAP_EN:
+ fnode_sess->bidi_chap_en = fnode_param->value[0];
+ break;
+ case ISCSI_FLASHNODE_DISCOVERY_AUTH_OPTIONAL:
+ fnode_sess->discovery_auth_optional =
+ fnode_param->value[0];
+ break;
+ case ISCSI_FLASHNODE_ERL:
+ fnode_sess->erl = fnode_param->value[0];
+ break;
+ case ISCSI_FLASHNODE_TCP_TIMESTAMP_STAT:
+ fnode_conn->tcp_timestamp_stat = fnode_param->value[0];
+ break;
+ case ISCSI_FLASHNODE_TCP_NAGLE_DISABLE:
+ fnode_conn->tcp_nagle_disable = fnode_param->value[0];
+ break;
+ case ISCSI_FLASHNODE_TCP_WSF_DISABLE:
+ fnode_conn->tcp_wsf_disable = fnode_param->value[0];
+ break;
+ case ISCSI_FLASHNODE_TCP_TIMER_SCALE:
+ fnode_conn->tcp_timer_scale = fnode_param->value[0];
+ break;
+ case ISCSI_FLASHNODE_TCP_TIMESTAMP_EN:
+ fnode_conn->tcp_timestamp_en = fnode_param->value[0];
+ break;
+ case ISCSI_FLASHNODE_IP_FRAG_DISABLE:
+ fnode_conn->fragment_disable = fnode_param->value[0];
+ break;
+ case ISCSI_FLASHNODE_MAX_RECV_DLENGTH:
+ fnode_conn->max_recv_dlength =
+ *(unsigned *)fnode_param->value;
+ break;
+ case ISCSI_FLASHNODE_MAX_XMIT_DLENGTH:
+ fnode_conn->max_xmit_dlength =
+ *(unsigned *)fnode_param->value;
+ break;
+ case ISCSI_FLASHNODE_FIRST_BURST:
+ fnode_sess->first_burst =
+ *(unsigned *)fnode_param->value;
+ break;
+ case ISCSI_FLASHNODE_DEF_TIME2WAIT:
+ fnode_sess->time2wait = *(uint16_t *)fnode_param->value;
+ break;
+ case ISCSI_FLASHNODE_DEF_TIME2RETAIN:
+ fnode_sess->time2retain =
+ *(uint16_t *)fnode_param->value;
+ break;
+ case ISCSI_FLASHNODE_MAX_R2T:
+ fnode_sess->max_r2t =
+ *(uint16_t *)fnode_param->value;
+ break;
+ case ISCSI_FLASHNODE_KEEPALIVE_TMO:
+ fnode_conn->keepalive_timeout =
+ *(uint16_t *)fnode_param->value;
+ break;
+ case ISCSI_FLASHNODE_ISID:
+ memcpy(fnode_sess->isid, fnode_param->value,
+ sizeof(fnode_sess->isid));
+ break;
+ case ISCSI_FLASHNODE_TSID:
+ fnode_sess->tsid = *(uint16_t *)fnode_param->value;
+ break;
+ case ISCSI_FLASHNODE_PORT:
+ fnode_conn->port = *(uint16_t *)fnode_param->value;
+ break;
+ case ISCSI_FLASHNODE_MAX_BURST:
+ fnode_sess->max_burst = *(unsigned *)fnode_param->value;
+ break;
+ case ISCSI_FLASHNODE_DEF_TASKMGMT_TMO:
+ fnode_sess->default_taskmgmt_timeout =
+ *(uint16_t *)fnode_param->value;
+ break;
+ case ISCSI_FLASHNODE_IPADDR:
+ memcpy(fnode_conn->ipaddress, fnode_param->value,
+ IPv6_ADDR_LEN);
+ break;
+ case ISCSI_FLASHNODE_ALIAS:
+ rc = iscsi_switch_str_param(&fnode_sess->targetalias,
+ (char *)fnode_param->value);
+ break;
+ case ISCSI_FLASHNODE_REDIRECT_IPADDR:
+ memcpy(fnode_conn->redirect_ipaddr, fnode_param->value,
+ IPv6_ADDR_LEN);
+ break;
+ case ISCSI_FLASHNODE_MAX_SEGMENT_SIZE:
+ fnode_conn->max_segment_size =
+ *(unsigned *)fnode_param->value;
+ break;
+ case ISCSI_FLASHNODE_LOCAL_PORT:
+ fnode_conn->local_port =
+ *(uint16_t *)fnode_param->value;
+ break;
+ case ISCSI_FLASHNODE_IPV4_TOS:
+ fnode_conn->ipv4_tos = fnode_param->value[0];
+ break;
+ case ISCSI_FLASHNODE_IPV6_TC:
+ fnode_conn->ipv6_traffic_class = fnode_param->value[0];
+ break;
+ case ISCSI_FLASHNODE_IPV6_FLOW_LABEL:
+ fnode_conn->ipv6_flow_label = fnode_param->value[0];
+ break;
+ case ISCSI_FLASHNODE_NAME:
+ rc = iscsi_switch_str_param(&fnode_sess->targetname,
+ (char *)fnode_param->value);
+ break;
+ case ISCSI_FLASHNODE_TPGT:
+ fnode_sess->tpgt = *(uint16_t *)fnode_param->value;
+ break;
+ case ISCSI_FLASHNODE_LINK_LOCAL_IPV6:
+ memcpy(fnode_conn->link_local_ipv6_addr,
+ fnode_param->value, IPv6_ADDR_LEN);
+ break;
+ case ISCSI_FLASHNODE_DISCOVERY_PARENT_IDX:
+ fnode_sess->discovery_parent_idx =
+ *(uint16_t *)fnode_param->value;
+ break;
+ case ISCSI_FLASHNODE_TCP_XMIT_WSF:
+ fnode_conn->tcp_xmit_wsf =
+ *(uint8_t *)fnode_param->value;
+ break;
+ case ISCSI_FLASHNODE_TCP_RECV_WSF:
+ fnode_conn->tcp_recv_wsf =
+ *(uint8_t *)fnode_param->value;
+ break;
+ case ISCSI_FLASHNODE_STATSN:
+ fnode_conn->statsn = *(uint32_t *)fnode_param->value;
+ break;
+ case ISCSI_FLASHNODE_EXP_STATSN:
+ fnode_conn->exp_statsn =
+ *(uint32_t *)fnode_param->value;
+ break;
+ case ISCSI_FLASHNODE_CHAP_OUT_IDX:
+ chap_out_idx = *(uint16_t *)fnode_param->value;
+ if (!qla4xxx_get_uni_chap_at_index(ha,
+ chap_tbl.name,
+ chap_tbl.secret,
+ chap_out_idx)) {
+ fnode_sess->chap_out_idx = chap_out_idx;
+ /* Enable chap auth if chap index is valid */
+ fnode_sess->chap_auth_en = QL4_PARAM_ENABLE;
+ }
+ break;
+ default:
+ ql4_printk(KERN_ERR, ha,
+ "%s: No such sysfs attribute\n", __func__);
+ rc = -ENOSYS;
+ goto exit_set_param;
+ }
+ }
+
+ rc = qla4xxx_sysfs_ddb_apply(fnode_sess, fnode_conn);
+
+exit_set_param:
+ return rc;
+}
+
+/**
+ * qla4xxx_sysfs_ddb_delete - Delete firmware DDB entry
+ * @fnode_sess: pointer to session attrs of flash ddb entry
+ *
+ * This invalidates the flash ddb entry at the given index
+ **/
+static int qla4xxx_sysfs_ddb_delete(struct iscsi_bus_flash_session *fnode_sess)
+{
+ struct Scsi_Host *shost = iscsi_flash_session_to_shost(fnode_sess);
+ struct scsi_qla_host *ha = to_qla_host(shost);
+ uint32_t dev_db_start_offset;
+ uint32_t dev_db_end_offset;
+ struct dev_db_entry *fw_ddb_entry = NULL;
+ dma_addr_t fw_ddb_entry_dma;
+ uint16_t *ddb_cookie = NULL;
+ size_t ddb_size = 0;
+ void *pddb = NULL;
+ int target_id;
+ int rc = 0;
+
+ if (fnode_sess->is_boot_target) {
+ rc = -EPERM;
+ DEBUG2(ql4_printk(KERN_ERR, ha,
+ "%s: Deletion of boot target entry is not permitted.\n",
+ __func__));
+ goto exit_ddb_del;
+ }
+
+ if (fnode_sess->flash_state == DEV_DB_NON_PERSISTENT)
+ goto sysfs_ddb_del;
+
+ if (is_qla40XX(ha)) {
+ dev_db_start_offset = FLASH_OFFSET_DB_INFO;
+ dev_db_end_offset = FLASH_OFFSET_DB_END;
+ dev_db_start_offset += (fnode_sess->target_id *
+ sizeof(*fw_ddb_entry));
+ ddb_size = sizeof(*fw_ddb_entry);
+ } else {
+ dev_db_start_offset = FLASH_RAW_ACCESS_ADDR +
+ (ha->hw.flt_region_ddb << 2);
+ /* flt_ddb_size is DDB table size for both ports
+ * so divide it by 2 to calculate the offset for second port
+ */
+ if (ha->port_num == 1)
+ dev_db_start_offset += (ha->hw.flt_ddb_size / 2);
+
+ dev_db_end_offset = dev_db_start_offset +
+ (ha->hw.flt_ddb_size / 2);
+
+ dev_db_start_offset += (fnode_sess->target_id *
+ sizeof(*fw_ddb_entry));
+ dev_db_start_offset += offsetof(struct dev_db_entry, cookie);
+
+ ddb_size = sizeof(*ddb_cookie);
+ }
+
+ DEBUG2(ql4_printk(KERN_ERR, ha, "%s: start offset=%u, end offset=%u\n",
+ __func__, dev_db_start_offset, dev_db_end_offset));
+
+ if (dev_db_start_offset > dev_db_end_offset) {
+ rc = -EIO;
+ DEBUG2(ql4_printk(KERN_ERR, ha, "%s:Invalid DDB index %u\n",
+ __func__, fnode_sess->target_id));
+ goto exit_ddb_del;
+ }
+
+ pddb = dma_alloc_coherent(&ha->pdev->dev, ddb_size,
+ &fw_ddb_entry_dma, GFP_KERNEL);
+ if (!pddb) {
+ rc = -ENOMEM;
+ DEBUG2(ql4_printk(KERN_ERR, ha,
+ "%s: Unable to allocate dma buffer\n",
+ __func__));
+ goto exit_ddb_del;
+ }
+
+ if (is_qla40XX(ha)) {
+ fw_ddb_entry = pddb;
+ memset(fw_ddb_entry, 0, ddb_size);
+ ddb_cookie = &fw_ddb_entry->cookie;
+ } else {
+ ddb_cookie = pddb;
+ }
+
+ /* invalidate the cookie */
+ *ddb_cookie = 0xFFEE;
+ qla4xxx_set_flash(ha, fw_ddb_entry_dma, dev_db_start_offset,
+ ddb_size, FLASH_OPT_RMW_COMMIT);
+
+sysfs_ddb_del:
+ target_id = fnode_sess->target_id;
+ iscsi_destroy_flashnode_sess(fnode_sess);
+ ql4_printk(KERN_INFO, ha,
+ "%s: session and conn entries for flashnode %u of host %lu deleted\n",
+ __func__, target_id, ha->host_no);
+exit_ddb_del:
+ if (pddb)
+ dma_free_coherent(&ha->pdev->dev, ddb_size, pddb,
+ fw_ddb_entry_dma);
+ return rc;
+}
+
+/**
+ * qla4xxx_sysfs_ddb_export - Create sysfs entries for firmware DDBs
+ * @ha: pointer to adapter structure
+ *
+ * Export the firmware DDB for all send targets and normal targets to sysfs.
+ **/
+int qla4xxx_sysfs_ddb_export(struct scsi_qla_host *ha)
+{
+ struct dev_db_entry *fw_ddb_entry = NULL;
+ dma_addr_t fw_ddb_entry_dma;
+ uint16_t max_ddbs;
+ uint16_t idx = 0;
+ int ret = QLA_SUCCESS;
+
+ fw_ddb_entry = dma_alloc_coherent(&ha->pdev->dev,
+ sizeof(*fw_ddb_entry),
+ &fw_ddb_entry_dma, GFP_KERNEL);
+ if (!fw_ddb_entry) {
+ DEBUG2(ql4_printk(KERN_ERR, ha,
+ "%s: Unable to allocate dma buffer\n",
+ __func__));
+ return -ENOMEM;
+ }
+
+ max_ddbs = is_qla40XX(ha) ? MAX_PRST_DEV_DB_ENTRIES :
+ MAX_DEV_DB_ENTRIES;
+
+ for (idx = 0; idx < max_ddbs; idx++) {
+ if (qla4xxx_flashdb_by_index(ha, fw_ddb_entry, fw_ddb_entry_dma,
+ idx))
+ continue;
+
+ ret = qla4xxx_sysfs_ddb_tgt_create(ha, fw_ddb_entry, &idx, 0);
+ if (ret) {
+ ret = -EIO;
+ break;
+ }
+ }
+
+ dma_free_coherent(&ha->pdev->dev, sizeof(*fw_ddb_entry), fw_ddb_entry,
+ fw_ddb_entry_dma);
+
+ return ret;
+}
+
+static void qla4xxx_sysfs_ddb_remove(struct scsi_qla_host *ha)
+{
+ iscsi_destroy_all_flashnode(ha->host);
+}
+
+/**
+ * qla4xxx_build_ddb_list - Build ddb list and setup sessions
+ * @ha: pointer to adapter structure
+ * @is_reset: Is this init path or reset path
+ *
+ * Create a list of sendtargets (st) from firmware DDBs, issue send targets
+ * using connection open, then create the list of normal targets (nt)
+ * from firmware DDBs. Based on the list of nt setup session and connection
+ * objects.
+ **/
+void qla4xxx_build_ddb_list(struct scsi_qla_host *ha, int is_reset)
+{
+ uint16_t tmo = 0;
+ struct list_head list_st, list_nt;
+ struct qla_ddb_index *st_ddb_idx, *st_ddb_idx_tmp;
+ unsigned long wtime;
+
+ if (!test_bit(AF_LINK_UP, &ha->flags)) {
+ set_bit(AF_BUILD_DDB_LIST, &ha->flags);
+ ha->is_reset = is_reset;
+ return;
+ }
+
+ INIT_LIST_HEAD(&list_st);
+ INIT_LIST_HEAD(&list_nt);
+
+ qla4xxx_build_st_list(ha, &list_st);
+
+ /* Before issuing conn open mbox, ensure all IPs states are configured
+ * Note, conn open fails if IPs are not configured
+ */
+ qla4xxx_wait_for_ip_configuration(ha);
+
+ /* Go thru the STs and fire the sendtargets by issuing conn open mbx */
+ list_for_each_entry_safe(st_ddb_idx, st_ddb_idx_tmp, &list_st, list) {
+ qla4xxx_conn_open(ha, st_ddb_idx->fw_ddb_idx);
+ }
+
+ /* Wait to ensure all sendtargets are done for min 12 sec wait */
+ tmo = ((ha->def_timeout > LOGIN_TOV) &&
+ (ha->def_timeout < LOGIN_TOV * 10) ?
+ ha->def_timeout : LOGIN_TOV);
+
+ DEBUG2(ql4_printk(KERN_INFO, ha,
+ "Default time to wait for build ddb %d\n", tmo));
+
+ wtime = jiffies + (HZ * tmo);
+ do {
+ if (list_empty(&list_st))
+ break;
+
+ qla4xxx_remove_failed_ddb(ha, &list_st);
+ schedule_timeout_uninterruptible(HZ / 10);
+ } while (time_after(wtime, jiffies));
+
+
+ qla4xxx_build_nt_list(ha, &list_nt, &list_st, is_reset);
+
+ qla4xxx_free_ddb_list(&list_st);
+ qla4xxx_free_ddb_list(&list_nt);
+
+ qla4xxx_free_ddb_index(ha);
+}
+
+/**
+ * qla4xxx_wait_login_resp_boot_tgt - Wait for iSCSI boot target login
+ * response.
+ * @ha: pointer to adapter structure
+ *
+ * When the boot entry is normal iSCSI target then DF_BOOT_TGT flag will be
+ * set in DDB and we will wait for login response of boot targets during
+ * probe.
+ **/
+static void qla4xxx_wait_login_resp_boot_tgt(struct scsi_qla_host *ha)
+{
+ struct ddb_entry *ddb_entry;
+ struct dev_db_entry *fw_ddb_entry = NULL;
+ dma_addr_t fw_ddb_entry_dma;
+ unsigned long wtime;
+ uint32_t ddb_state;
+ int max_ddbs, idx, ret;
+
+ max_ddbs = is_qla40XX(ha) ? MAX_DEV_DB_ENTRIES_40XX :
+ MAX_DEV_DB_ENTRIES;
+
+ fw_ddb_entry = dma_alloc_coherent(&ha->pdev->dev, sizeof(*fw_ddb_entry),
+ &fw_ddb_entry_dma, GFP_KERNEL);
+ if (!fw_ddb_entry) {
+ ql4_printk(KERN_ERR, ha,
+ "%s: Unable to allocate dma buffer\n", __func__);
+ goto exit_login_resp;
+ }
+
+ wtime = jiffies + (HZ * BOOT_LOGIN_RESP_TOV);
+
+ for (idx = 0; idx < max_ddbs; idx++) {
+ ddb_entry = qla4xxx_lookup_ddb_by_fw_index(ha, idx);
+ if (ddb_entry == NULL)
+ continue;
+
+ if (test_bit(DF_BOOT_TGT, &ddb_entry->flags)) {
+ DEBUG2(ql4_printk(KERN_INFO, ha,
+ "%s: DDB index [%d]\n", __func__,
+ ddb_entry->fw_ddb_index));
+ do {
+ ret = qla4xxx_get_fwddb_entry(ha,
+ ddb_entry->fw_ddb_index,
+ fw_ddb_entry, fw_ddb_entry_dma,
+ NULL, NULL, &ddb_state, NULL,
+ NULL, NULL);
+ if (ret == QLA_ERROR)
+ goto exit_login_resp;
+
+ if ((ddb_state == DDB_DS_SESSION_ACTIVE) ||
+ (ddb_state == DDB_DS_SESSION_FAILED))
+ break;
+
+ schedule_timeout_uninterruptible(HZ);
+
+ } while ((time_after(wtime, jiffies)));
+
+ if (!time_after(wtime, jiffies)) {
+ DEBUG2(ql4_printk(KERN_INFO, ha,
+ "%s: Login response wait timer expired\n",
+ __func__));
+ goto exit_login_resp;
+ }
+ }
+ }
+
+exit_login_resp:
+ if (fw_ddb_entry)
+ dma_free_coherent(&ha->pdev->dev, sizeof(*fw_ddb_entry),
+ fw_ddb_entry, fw_ddb_entry_dma);
+}
+
+/**
+ * qla4xxx_probe_adapter - callback function to probe HBA
+ * @pdev: pointer to pci_dev structure
+ * @pci_device_id: pointer to pci_device entry
+ *
+ * This routine will probe for Qlogic 4xxx iSCSI host adapters.
+ * It returns zero if successful. It also initializes all data necessary for
+ * the driver.
+ **/
+static int qla4xxx_probe_adapter(struct pci_dev *pdev,
+ const struct pci_device_id *ent)
+{
+ int ret = -ENODEV, status;
+ struct Scsi_Host *host;
+ struct scsi_qla_host *ha;
+ uint8_t init_retry_count = 0;
+ char buf[34];
+ struct qla4_8xxx_legacy_intr_set *nx_legacy_intr;
+ uint32_t dev_state;
+
+ if (pci_enable_device(pdev))
+ return -1;
+
+ host = iscsi_host_alloc(&qla4xxx_driver_template, sizeof(*ha), 0);
+ if (host == NULL) {
+ printk(KERN_WARNING
+ "qla4xxx: Couldn't allocate host from scsi layer!\n");
+ goto probe_disable_device;
+ }
+
+ /* Clear our data area */
+ ha = to_qla_host(host);
+ memset(ha, 0, sizeof(*ha));
+
+ /* Save the information from PCI BIOS. */
+ ha->pdev = pdev;
+ ha->host = host;
+ ha->host_no = host->host_no;
+ ha->func_num = PCI_FUNC(ha->pdev->devfn);
+
+ pci_enable_pcie_error_reporting(pdev);
+
+ /* Setup Runtime configurable options */
+ if (is_qla8022(ha)) {
+ ha->isp_ops = &qla4_82xx_isp_ops;
+ ha->reg_tbl = (uint32_t *) qla4_82xx_reg_tbl;
+ ha->qdr_sn_window = -1;
+ ha->ddr_mn_window = -1;
+ ha->curr_window = 255;
+ nx_legacy_intr = &legacy_intr[ha->func_num];
+ ha->nx_legacy_intr.int_vec_bit = nx_legacy_intr->int_vec_bit;
+ ha->nx_legacy_intr.tgt_status_reg =
+ nx_legacy_intr->tgt_status_reg;
+ ha->nx_legacy_intr.tgt_mask_reg = nx_legacy_intr->tgt_mask_reg;
+ ha->nx_legacy_intr.pci_int_reg = nx_legacy_intr->pci_int_reg;
+ } else if (is_qla8032(ha) || is_qla8042(ha)) {
+ ha->isp_ops = &qla4_83xx_isp_ops;
+ ha->reg_tbl = (uint32_t *)qla4_83xx_reg_tbl;
+ } else {
+ ha->isp_ops = &qla4xxx_isp_ops;
+ }
+
+ if (is_qla80XX(ha)) {
+ rwlock_init(&ha->hw_lock);
+ ha->pf_bit = ha->func_num << 16;
+ /* Set EEH reset type to fundamental if required by hba */
+ pdev->needs_freset = 1;
+ }
+
+ /* Configure PCI I/O space. */
+ ret = ha->isp_ops->iospace_config(ha);
+ if (ret)
+ goto probe_failed_ioconfig;
+
+ ql4_printk(KERN_INFO, ha, "Found an ISP%04x, irq %d, iobase 0x%p\n",
+ pdev->device, pdev->irq, ha->reg);
+
+ qla4xxx_config_dma_addressing(ha);
+
+ /* Initialize lists and spinlocks. */
+ INIT_LIST_HEAD(&ha->free_srb_q);
+
+ mutex_init(&ha->mbox_sem);
+ mutex_init(&ha->chap_sem);
+ init_completion(&ha->mbx_intr_comp);
+ init_completion(&ha->disable_acb_comp);
+ init_completion(&ha->idc_comp);
+ init_completion(&ha->link_up_comp);
+ init_completion(&ha->disable_acb_comp);
+
+ spin_lock_init(&ha->hardware_lock);
+ spin_lock_init(&ha->work_lock);
+
+ /* Initialize work list */
+ INIT_LIST_HEAD(&ha->work_list);
+
+ /* Allocate dma buffers */
+ if (qla4xxx_mem_alloc(ha)) {
+ ql4_printk(KERN_WARNING, ha,
+ "[ERROR] Failed to allocate memory for adapter\n");
+
+ ret = -ENOMEM;
+ goto probe_failed;
+ }
+
+ host->cmd_per_lun = 3;
+ host->max_channel = 0;
+ host->max_lun = MAX_LUNS - 1;
+ host->max_id = MAX_TARGETS;
+ host->max_cmd_len = IOCB_MAX_CDB_LEN;
+ host->can_queue = MAX_SRBS ;
+ host->transportt = qla4xxx_scsi_transport;
+
+ ret = scsi_init_shared_tag_map(host, MAX_SRBS);
+ if (ret) {
+ ql4_printk(KERN_WARNING, ha,
+ "%s: scsi_init_shared_tag_map failed\n", __func__);
+ goto probe_failed;
+ }
+
+ pci_set_drvdata(pdev, ha);
+
+ ret = scsi_add_host(host, &pdev->dev);
+ if (ret)
+ goto probe_failed;
+
+ if (is_qla80XX(ha))
+ qla4_8xxx_get_flash_info(ha);
+
+ if (is_qla8032(ha) || is_qla8042(ha)) {
+ qla4_83xx_read_reset_template(ha);
+ /*
+ * NOTE: If ql4dontresethba==1, set IDC_CTRL DONTRESET_BIT0.
+ * If DONRESET_BIT0 is set, drivers should not set dev_state
+ * to NEED_RESET. But if NEED_RESET is set, drivers should
+ * should honor the reset.
+ */
+ if (ql4xdontresethba == 1)
+ qla4_83xx_set_idc_dontreset(ha);
+ }
+
+ /*
+ * Initialize the Host adapter request/response queues and
+ * firmware
+ * NOTE: interrupts enabled upon successful completion
+ */
+ status = qla4xxx_initialize_adapter(ha, INIT_ADAPTER);
+
+ /* Dont retry adapter initialization if IRQ allocation failed */
+ if (is_qla80XX(ha) && (status == QLA_ERROR))
+ goto skip_retry_init;
+
+ while ((!test_bit(AF_ONLINE, &ha->flags)) &&
+ init_retry_count++ < MAX_INIT_RETRIES) {
+
+ if (is_qla80XX(ha)) {
+ ha->isp_ops->idc_lock(ha);
+ dev_state = qla4_8xxx_rd_direct(ha,
+ QLA8XXX_CRB_DEV_STATE);
+ ha->isp_ops->idc_unlock(ha);
+ if (dev_state == QLA8XXX_DEV_FAILED) {
+ ql4_printk(KERN_WARNING, ha, "%s: don't retry "
+ "initialize adapter. H/W is in failed state\n",
+ __func__);
+ break;
+ }
+ }
+ DEBUG2(printk("scsi: %s: retrying adapter initialization "
+ "(%d)\n", __func__, init_retry_count));
+
+ if (ha->isp_ops->reset_chip(ha) == QLA_ERROR)
+ continue;
+
+ status = qla4xxx_initialize_adapter(ha, INIT_ADAPTER);
+ if (is_qla80XX(ha) && (status == QLA_ERROR)) {
+ if (qla4_8xxx_check_init_adapter_retry(ha) == QLA_ERROR)
+ goto skip_retry_init;
+ }
+ }
+
+skip_retry_init:
+ if (!test_bit(AF_ONLINE, &ha->flags)) {
+ ql4_printk(KERN_WARNING, ha, "Failed to initialize adapter\n");
+
+ if ((is_qla8022(ha) && ql4xdontresethba) ||
+ ((is_qla8032(ha) || is_qla8042(ha)) &&
+ qla4_83xx_idc_dontreset(ha))) {
+ /* Put the device in failed state. */
+ DEBUG2(printk(KERN_ERR "HW STATE: FAILED\n"));
+ ha->isp_ops->idc_lock(ha);
+ qla4_8xxx_wr_direct(ha, QLA8XXX_CRB_DEV_STATE,
+ QLA8XXX_DEV_FAILED);
+ ha->isp_ops->idc_unlock(ha);
+ }
+ ret = -ENODEV;
+ goto remove_host;
+ }
+
+ /* Startup the kernel thread for this host adapter. */
+ DEBUG2(printk("scsi: %s: Starting kernel thread for "
+ "qla4xxx_dpc\n", __func__));
+ sprintf(buf, "qla4xxx_%lu_dpc", ha->host_no);
+ ha->dpc_thread = create_singlethread_workqueue(buf);
+ if (!ha->dpc_thread) {
+ ql4_printk(KERN_WARNING, ha, "Unable to start DPC thread!\n");
+ ret = -ENODEV;
+ goto remove_host;
+ }
+ INIT_WORK(&ha->dpc_work, qla4xxx_do_dpc);
+
+ ha->task_wq = alloc_workqueue("qla4xxx_%lu_task", WQ_MEM_RECLAIM, 1,
+ ha->host_no);
+ if (!ha->task_wq) {
+ ql4_printk(KERN_WARNING, ha, "Unable to start task thread!\n");
+ ret = -ENODEV;
+ goto remove_host;
+ }
+
+ /*
+ * For ISP-8XXX, request_irqs is called in qla4_8xxx_load_risc
+ * (which is called indirectly by qla4xxx_initialize_adapter),
+ * so that irqs will be registered after crbinit but before
+ * mbx_intr_enable.
+ */
+ if (is_qla40XX(ha)) {
+ ret = qla4xxx_request_irqs(ha);
+ if (ret) {
+ ql4_printk(KERN_WARNING, ha, "Failed to reserve "
+ "interrupt %d already in use.\n", pdev->irq);
+ goto remove_host;
+ }
+ }
+
+ pci_save_state(ha->pdev);
+ ha->isp_ops->enable_intrs(ha);
+
+ /* Start timer thread. */
+ qla4xxx_start_timer(ha, qla4xxx_timer, 1);
+
+ set_bit(AF_INIT_DONE, &ha->flags);
+
+ qla4_8xxx_alloc_sysfs_attr(ha);
+
+ printk(KERN_INFO
+ " QLogic iSCSI HBA Driver version: %s\n"
+ " QLogic ISP%04x @ %s, host#=%ld, fw=%02d.%02d.%02d.%02d\n",
+ qla4xxx_version_str, ha->pdev->device, pci_name(ha->pdev),
+ ha->host_no, ha->fw_info.fw_major, ha->fw_info.fw_minor,
+ ha->fw_info.fw_patch, ha->fw_info.fw_build);
+
+ /* Set the driver version */
+ if (is_qla80XX(ha))
+ qla4_8xxx_set_param(ha, SET_DRVR_VERSION);
+
+ if (qla4xxx_setup_boot_info(ha))
+ ql4_printk(KERN_ERR, ha,
+ "%s: No iSCSI boot target configured\n", __func__);
+
+ set_bit(DPC_SYSFS_DDB_EXPORT, &ha->dpc_flags);
+ /* Perform the build ddb list and login to each */
+ qla4xxx_build_ddb_list(ha, INIT_ADAPTER);
+ iscsi_host_for_each_session(ha->host, qla4xxx_login_flash_ddb);
+ qla4xxx_wait_login_resp_boot_tgt(ha);
+
+ qla4xxx_create_chap_list(ha);
+
+ qla4xxx_create_ifaces(ha);
+ return 0;
+
+remove_host:
+ scsi_remove_host(ha->host);
+
+probe_failed:
+ qla4xxx_free_adapter(ha);
+
+probe_failed_ioconfig:
+ pci_disable_pcie_error_reporting(pdev);
+ scsi_host_put(ha->host);
+
+probe_disable_device:
+ pci_disable_device(pdev);
+
+ return ret;
+}
+
+/**
+ * qla4xxx_prevent_other_port_reinit - prevent other port from re-initialize
+ * @ha: pointer to adapter structure
+ *
+ * Mark the other ISP-4xxx port to indicate that the driver is being removed,
+ * so that the other port will not re-initialize while in the process of
+ * removing the ha due to driver unload or hba hotplug.
+ **/
+static void qla4xxx_prevent_other_port_reinit(struct scsi_qla_host *ha)
+{
+ struct scsi_qla_host *other_ha = NULL;
+ struct pci_dev *other_pdev = NULL;
+ int fn = ISP4XXX_PCI_FN_2;
+
+ /*iscsi function numbers for ISP4xxx is 1 and 3*/
+ if (PCI_FUNC(ha->pdev->devfn) & BIT_1)
+ fn = ISP4XXX_PCI_FN_1;
+
+ other_pdev =
+ pci_get_domain_bus_and_slot(pci_domain_nr(ha->pdev->bus),
+ ha->pdev->bus->number, PCI_DEVFN(PCI_SLOT(ha->pdev->devfn),
+ fn));
+
+ /* Get other_ha if other_pdev is valid and state is enable*/
+ if (other_pdev) {
+ if (atomic_read(&other_pdev->enable_cnt)) {
+ other_ha = pci_get_drvdata(other_pdev);
+ if (other_ha) {
+ set_bit(AF_HA_REMOVAL, &other_ha->flags);
+ DEBUG2(ql4_printk(KERN_INFO, ha, "%s: "
+ "Prevent %s reinit\n", __func__,
+ dev_name(&other_ha->pdev->dev)));
+ }
+ }
+ pci_dev_put(other_pdev);
+ }
+}
+
+static void qla4xxx_destroy_ddb(struct scsi_qla_host *ha,
+ struct ddb_entry *ddb_entry)
+{
+ struct dev_db_entry *fw_ddb_entry = NULL;
+ dma_addr_t fw_ddb_entry_dma;
+ unsigned long wtime;
+ uint32_t ddb_state;
+ int options;
+ int status;
+
+ options = LOGOUT_OPTION_CLOSE_SESSION;
+ if (qla4xxx_session_logout_ddb(ha, ddb_entry, options) == QLA_ERROR) {
+ ql4_printk(KERN_ERR, ha, "%s: Logout failed\n", __func__);
+ goto clear_ddb;
+ }
+
+ fw_ddb_entry = dma_alloc_coherent(&ha->pdev->dev, sizeof(*fw_ddb_entry),
+ &fw_ddb_entry_dma, GFP_KERNEL);
+ if (!fw_ddb_entry) {
+ ql4_printk(KERN_ERR, ha,
+ "%s: Unable to allocate dma buffer\n", __func__);
+ goto clear_ddb;
+ }
+
+ wtime = jiffies + (HZ * LOGOUT_TOV);
+ do {
+ status = qla4xxx_get_fwddb_entry(ha, ddb_entry->fw_ddb_index,
+ fw_ddb_entry, fw_ddb_entry_dma,
+ NULL, NULL, &ddb_state, NULL,
+ NULL, NULL);
+ if (status == QLA_ERROR)
+ goto free_ddb;
+
+ if ((ddb_state == DDB_DS_NO_CONNECTION_ACTIVE) ||
+ (ddb_state == DDB_DS_SESSION_FAILED))
+ goto free_ddb;
+
+ schedule_timeout_uninterruptible(HZ);
+ } while ((time_after(wtime, jiffies)));
+
+free_ddb:
+ dma_free_coherent(&ha->pdev->dev, sizeof(*fw_ddb_entry),
+ fw_ddb_entry, fw_ddb_entry_dma);
+clear_ddb:
+ qla4xxx_clear_ddb_entry(ha, ddb_entry->fw_ddb_index);
+}
+
+static void qla4xxx_destroy_fw_ddb_session(struct scsi_qla_host *ha)
+{
+ struct ddb_entry *ddb_entry;
+ int idx;
+
+ for (idx = 0; idx < MAX_DDB_ENTRIES; idx++) {
+
+ ddb_entry = qla4xxx_lookup_ddb_by_fw_index(ha, idx);
+ if ((ddb_entry != NULL) &&
+ (ddb_entry->ddb_type == FLASH_DDB)) {
+
+ qla4xxx_destroy_ddb(ha, ddb_entry);
+ /*
+ * we have decremented the reference count of the driver
+ * when we setup the session to have the driver unload
+ * to be seamless without actually destroying the
+ * session
+ **/
+ try_module_get(qla4xxx_iscsi_transport.owner);
+ iscsi_destroy_endpoint(ddb_entry->conn->ep);
+ qla4xxx_free_ddb(ha, ddb_entry);
+ iscsi_session_teardown(ddb_entry->sess);
+ }
+ }
+}
+/**
+ * qla4xxx_remove_adapter - callback function to remove adapter.
+ * @pci_dev: PCI device pointer
+ **/
+static void qla4xxx_remove_adapter(struct pci_dev *pdev)
+{
+ struct scsi_qla_host *ha;
+
+ /*
+ * If the PCI device is disabled then it means probe_adapter had
+ * failed and resources already cleaned up on probe_adapter exit.
+ */
+ if (!pci_is_enabled(pdev))
+ return;
+
+ ha = pci_get_drvdata(pdev);
+
+ if (is_qla40XX(ha))
+ qla4xxx_prevent_other_port_reinit(ha);
+
+ /* destroy iface from sysfs */
+ qla4xxx_destroy_ifaces(ha);
+
+ if ((!ql4xdisablesysfsboot) && ha->boot_kset)
+ iscsi_boot_destroy_kset(ha->boot_kset);
+
+ qla4xxx_destroy_fw_ddb_session(ha);
+ qla4_8xxx_free_sysfs_attr(ha);
+
+ qla4xxx_sysfs_ddb_remove(ha);
+ scsi_remove_host(ha->host);
+
+ qla4xxx_free_adapter(ha);
+
+ scsi_host_put(ha->host);
+
+ pci_disable_pcie_error_reporting(pdev);
+ pci_disable_device(pdev);
+}
+
+/**
+ * qla4xxx_config_dma_addressing() - Configure OS DMA addressing method.
+ * @ha: HA context
+ *
+ * At exit, the @ha's flags.enable_64bit_addressing set to indicated
+ * supported addressing method.
+ */
+static void qla4xxx_config_dma_addressing(struct scsi_qla_host *ha)
+{
+ int retval;
+
+ /* Update our PCI device dma_mask for full 64 bit mask */
+ if (pci_set_dma_mask(ha->pdev, DMA_BIT_MASK(64)) == 0) {
+ if (pci_set_consistent_dma_mask(ha->pdev, DMA_BIT_MASK(64))) {
+ dev_dbg(&ha->pdev->dev,
+ "Failed to set 64 bit PCI consistent mask; "
+ "using 32 bit.\n");
+ retval = pci_set_consistent_dma_mask(ha->pdev,
+ DMA_BIT_MASK(32));
+ }
+ } else
+ retval = pci_set_dma_mask(ha->pdev, DMA_BIT_MASK(32));
+}
+
+static int qla4xxx_slave_alloc(struct scsi_device *sdev)
+{
+ struct iscsi_cls_session *cls_sess;
+ struct iscsi_session *sess;
+ struct ddb_entry *ddb;
+ int queue_depth = QL4_DEF_QDEPTH;
+
+ cls_sess = starget_to_session(sdev->sdev_target);
+ sess = cls_sess->dd_data;
+ ddb = sess->dd_data;
+
+ sdev->hostdata = ddb;
+
+ if (ql4xmaxqdepth != 0 && ql4xmaxqdepth <= 0xffffU)
+ queue_depth = ql4xmaxqdepth;
+
+ scsi_change_queue_depth(sdev, queue_depth);
+ return 0;
+}
+
+/**
+ * qla4xxx_del_from_active_array - returns an active srb
+ * @ha: Pointer to host adapter structure.
+ * @index: index into the active_array
+ *
+ * This routine removes and returns the srb at the specified index
+ **/
+struct srb *qla4xxx_del_from_active_array(struct scsi_qla_host *ha,
+ uint32_t index)
+{
+ struct srb *srb = NULL;
+ struct scsi_cmnd *cmd = NULL;
+
+ cmd = scsi_host_find_tag(ha->host, index);
+ if (!cmd)
+ return srb;
+
+ srb = (struct srb *)CMD_SP(cmd);
+ if (!srb)
+ return srb;
+
+ /* update counters */
+ if (srb->flags & SRB_DMA_VALID) {
+ ha->iocb_cnt -= srb->iocb_cnt;
+ if (srb->cmd)
+ srb->cmd->host_scribble =
+ (unsigned char *)(unsigned long) MAX_SRBS;
+ }
+ return srb;
+}
+
+/**
+ * qla4xxx_eh_wait_on_command - waits for command to be returned by firmware
+ * @ha: Pointer to host adapter structure.
+ * @cmd: Scsi Command to wait on.
+ *
+ * This routine waits for the command to be returned by the Firmware
+ * for some max time.
+ **/
+static int qla4xxx_eh_wait_on_command(struct scsi_qla_host *ha,
+ struct scsi_cmnd *cmd)
+{
+ int done = 0;
+ struct srb *rp;
+ uint32_t max_wait_time = EH_WAIT_CMD_TOV;
+ int ret = SUCCESS;
+
+ /* Dont wait on command if PCI error is being handled
+ * by PCI AER driver
+ */
+ if (unlikely(pci_channel_offline(ha->pdev)) ||
+ (test_bit(AF_EEH_BUSY, &ha->flags))) {
+ ql4_printk(KERN_WARNING, ha, "scsi%ld: Return from %s\n",
+ ha->host_no, __func__);
+ return ret;
+ }
+
+ do {
+ /* Checking to see if its returned to OS */
+ rp = (struct srb *) CMD_SP(cmd);
+ if (rp == NULL) {
+ done++;
+ break;
+ }
+
+ msleep(2000);
+ } while (max_wait_time--);
+
+ return done;
+}
+
+/**
+ * qla4xxx_wait_for_hba_online - waits for HBA to come online
+ * @ha: Pointer to host adapter structure
+ **/
+static int qla4xxx_wait_for_hba_online(struct scsi_qla_host *ha)
+{
+ unsigned long wait_online;
+
+ wait_online = jiffies + (HBA_ONLINE_TOV * HZ);
+ while (time_before(jiffies, wait_online)) {
+
+ if (adapter_up(ha))
+ return QLA_SUCCESS;
+
+ msleep(2000);
+ }
+
+ return QLA_ERROR;
+}
+
+/**
+ * qla4xxx_eh_wait_for_commands - wait for active cmds to finish.
+ * @ha: pointer to HBA
+ * @t: target id
+ * @l: lun id
+ *
+ * This function waits for all outstanding commands to a lun to complete. It
+ * returns 0 if all pending commands are returned and 1 otherwise.
+ **/
+static int qla4xxx_eh_wait_for_commands(struct scsi_qla_host *ha,
+ struct scsi_target *stgt,
+ struct scsi_device *sdev)
+{
+ int cnt;
+ int status = 0;
+ struct scsi_cmnd *cmd;
+
+ /*
+ * Waiting for all commands for the designated target or dev
+ * in the active array
+ */
+ for (cnt = 0; cnt < ha->host->can_queue; cnt++) {
+ cmd = scsi_host_find_tag(ha->host, cnt);
+ if (cmd && stgt == scsi_target(cmd->device) &&
+ (!sdev || sdev == cmd->device)) {
+ if (!qla4xxx_eh_wait_on_command(ha, cmd)) {
+ status++;
+ break;
+ }
+ }
+ }
+ return status;
+}
+
+/**
+ * qla4xxx_eh_abort - callback for abort task.
+ * @cmd: Pointer to Linux's SCSI command structure
+ *
+ * This routine is called by the Linux OS to abort the specified
+ * command.
+ **/
+static int qla4xxx_eh_abort(struct scsi_cmnd *cmd)
+{
+ struct scsi_qla_host *ha = to_qla_host(cmd->device->host);
+ unsigned int id = cmd->device->id;
+ uint64_t lun = cmd->device->lun;
+ unsigned long flags;
+ struct srb *srb = NULL;
+ int ret = SUCCESS;
+ int wait = 0;
+
+ ql4_printk(KERN_INFO, ha, "scsi%ld:%d:%llu: Abort command issued cmd=%p, cdb=0x%x\n",
+ ha->host_no, id, lun, cmd, cmd->cmnd[0]);
+
+ spin_lock_irqsave(&ha->hardware_lock, flags);
+ srb = (struct srb *) CMD_SP(cmd);
+ if (!srb) {
+ spin_unlock_irqrestore(&ha->hardware_lock, flags);
+ ql4_printk(KERN_INFO, ha, "scsi%ld:%d:%llu: Specified command has already completed.\n",
+ ha->host_no, id, lun);
+ return SUCCESS;
+ }
+ kref_get(&srb->srb_ref);
+ spin_unlock_irqrestore(&ha->hardware_lock, flags);
+
+ if (qla4xxx_abort_task(ha, srb) != QLA_SUCCESS) {
+ DEBUG3(printk("scsi%ld:%d:%llu: Abort_task mbx failed.\n",
+ ha->host_no, id, lun));
+ ret = FAILED;
+ } else {
+ DEBUG3(printk("scsi%ld:%d:%llu: Abort_task mbx success.\n",
+ ha->host_no, id, lun));
+ wait = 1;
+ }
+
+ kref_put(&srb->srb_ref, qla4xxx_srb_compl);
+
+ /* Wait for command to complete */
+ if (wait) {
+ if (!qla4xxx_eh_wait_on_command(ha, cmd)) {
+ DEBUG2(printk("scsi%ld:%d:%llu: Abort handler timed out\n",
+ ha->host_no, id, lun));
+ ret = FAILED;
+ }
+ }
+
+ ql4_printk(KERN_INFO, ha,
+ "scsi%ld:%d:%llu: Abort command - %s\n",
+ ha->host_no, id, lun, (ret == SUCCESS) ? "succeeded" : "failed");
+
+ return ret;
+}
+
+/**
+ * qla4xxx_eh_device_reset - callback for target reset.
+ * @cmd: Pointer to Linux's SCSI command structure
+ *
+ * This routine is called by the Linux OS to reset all luns on the
+ * specified target.
+ **/
+static int qla4xxx_eh_device_reset(struct scsi_cmnd *cmd)
+{
+ struct scsi_qla_host *ha = to_qla_host(cmd->device->host);
+ struct ddb_entry *ddb_entry = cmd->device->hostdata;
+ int ret = FAILED, stat;
+
+ if (!ddb_entry)
+ return ret;
+
+ ret = iscsi_block_scsi_eh(cmd);
+ if (ret)
+ return ret;
+ ret = FAILED;
+
+ ql4_printk(KERN_INFO, ha,
+ "scsi%ld:%d:%d:%llu: DEVICE RESET ISSUED.\n", ha->host_no,
+ cmd->device->channel, cmd->device->id, cmd->device->lun);
+
+ DEBUG2(printk(KERN_INFO
+ "scsi%ld: DEVICE_RESET cmd=%p jiffies = 0x%lx, to=%x,"
+ "dpc_flags=%lx, status=%x allowed=%d\n", ha->host_no,
+ cmd, jiffies, cmd->request->timeout / HZ,
+ ha->dpc_flags, cmd->result, cmd->allowed));
+
+ /* FIXME: wait for hba to go online */
+ stat = qla4xxx_reset_lun(ha, ddb_entry, cmd->device->lun);
+ if (stat != QLA_SUCCESS) {
+ ql4_printk(KERN_INFO, ha, "DEVICE RESET FAILED. %d\n", stat);
+ goto eh_dev_reset_done;
+ }
+
+ if (qla4xxx_eh_wait_for_commands(ha, scsi_target(cmd->device),
+ cmd->device)) {
+ ql4_printk(KERN_INFO, ha,
+ "DEVICE RESET FAILED - waiting for "
+ "commands.\n");
+ goto eh_dev_reset_done;
+ }
+
+ /* Send marker. */
+ if (qla4xxx_send_marker_iocb(ha, ddb_entry, cmd->device->lun,
+ MM_LUN_RESET) != QLA_SUCCESS)
+ goto eh_dev_reset_done;
+
+ ql4_printk(KERN_INFO, ha,
+ "scsi(%ld:%d:%d:%llu): DEVICE RESET SUCCEEDED.\n",
+ ha->host_no, cmd->device->channel, cmd->device->id,
+ cmd->device->lun);
+
+ ret = SUCCESS;
+
+eh_dev_reset_done:
+
+ return ret;
+}
+
+/**
+ * qla4xxx_eh_target_reset - callback for target reset.
+ * @cmd: Pointer to Linux's SCSI command structure
+ *
+ * This routine is called by the Linux OS to reset the target.
+ **/
+static int qla4xxx_eh_target_reset(struct scsi_cmnd *cmd)
+{
+ struct scsi_qla_host *ha = to_qla_host(cmd->device->host);
+ struct ddb_entry *ddb_entry = cmd->device->hostdata;
+ int stat, ret;
+
+ if (!ddb_entry)
+ return FAILED;
+
+ ret = iscsi_block_scsi_eh(cmd);
+ if (ret)
+ return ret;
+
+ starget_printk(KERN_INFO, scsi_target(cmd->device),
+ "WARM TARGET RESET ISSUED.\n");
+
+ DEBUG2(printk(KERN_INFO
+ "scsi%ld: TARGET_DEVICE_RESET cmd=%p jiffies = 0x%lx, "
+ "to=%x,dpc_flags=%lx, status=%x allowed=%d\n",
+ ha->host_no, cmd, jiffies, cmd->request->timeout / HZ,
+ ha->dpc_flags, cmd->result, cmd->allowed));
+
+ stat = qla4xxx_reset_target(ha, ddb_entry);
+ if (stat != QLA_SUCCESS) {
+ starget_printk(KERN_INFO, scsi_target(cmd->device),
+ "WARM TARGET RESET FAILED.\n");
+ return FAILED;
+ }
+
+ if (qla4xxx_eh_wait_for_commands(ha, scsi_target(cmd->device),
+ NULL)) {
+ starget_printk(KERN_INFO, scsi_target(cmd->device),
+ "WARM TARGET DEVICE RESET FAILED - "
+ "waiting for commands.\n");
+ return FAILED;
+ }
+
+ /* Send marker. */
+ if (qla4xxx_send_marker_iocb(ha, ddb_entry, cmd->device->lun,
+ MM_TGT_WARM_RESET) != QLA_SUCCESS) {
+ starget_printk(KERN_INFO, scsi_target(cmd->device),
+ "WARM TARGET DEVICE RESET FAILED - "
+ "marker iocb failed.\n");
+ return FAILED;
+ }
+
+ starget_printk(KERN_INFO, scsi_target(cmd->device),
+ "WARM TARGET RESET SUCCEEDED.\n");
+ return SUCCESS;
+}
+
+/**
+ * qla4xxx_is_eh_active - check if error handler is running
+ * @shost: Pointer to SCSI Host struct
+ *
+ * This routine finds that if reset host is called in EH
+ * scenario or from some application like sg_reset
+ **/
+static int qla4xxx_is_eh_active(struct Scsi_Host *shost)
+{
+ if (shost->shost_state == SHOST_RECOVERY)
+ return 1;
+ return 0;
+}
+
+/**
+ * qla4xxx_eh_host_reset - kernel callback
+ * @cmd: Pointer to Linux's SCSI command structure
+ *
+ * This routine is invoked by the Linux kernel to perform fatal error
+ * recovery on the specified adapter.
+ **/
+static int qla4xxx_eh_host_reset(struct scsi_cmnd *cmd)
+{
+ int return_status = FAILED;
+ struct scsi_qla_host *ha;
+
+ ha = to_qla_host(cmd->device->host);
+
+ if ((is_qla8032(ha) || is_qla8042(ha)) && ql4xdontresethba)
+ qla4_83xx_set_idc_dontreset(ha);
+
+ /*
+ * For ISP8324 and ISP8042, if IDC_CTRL DONTRESET_BIT0 is set by other
+ * protocol drivers, we should not set device_state to NEED_RESET
+ */
+ if (ql4xdontresethba ||
+ ((is_qla8032(ha) || is_qla8042(ha)) &&
+ qla4_83xx_idc_dontreset(ha))) {
+ DEBUG2(printk("scsi%ld: %s: Don't Reset HBA\n",
+ ha->host_no, __func__));
+
+ /* Clear outstanding srb in queues */
+ if (qla4xxx_is_eh_active(cmd->device->host))
+ qla4xxx_abort_active_cmds(ha, DID_ABORT << 16);
+
+ return FAILED;
+ }
+
+ ql4_printk(KERN_INFO, ha,
+ "scsi(%ld:%d:%d:%llu): HOST RESET ISSUED.\n", ha->host_no,
+ cmd->device->channel, cmd->device->id, cmd->device->lun);
+
+ if (qla4xxx_wait_for_hba_online(ha) != QLA_SUCCESS) {
+ DEBUG2(printk("scsi%ld:%d: %s: Unable to reset host. Adapter "
+ "DEAD.\n", ha->host_no, cmd->device->channel,
+ __func__));
+
+ return FAILED;
+ }
+
+ if (!test_bit(DPC_RESET_HA, &ha->dpc_flags)) {
+ if (is_qla80XX(ha))
+ set_bit(DPC_RESET_HA_FW_CONTEXT, &ha->dpc_flags);
+ else
+ set_bit(DPC_RESET_HA, &ha->dpc_flags);
+ }
+
+ if (qla4xxx_recover_adapter(ha) == QLA_SUCCESS)
+ return_status = SUCCESS;
+
+ ql4_printk(KERN_INFO, ha, "HOST RESET %s.\n",
+ return_status == FAILED ? "FAILED" : "SUCCEEDED");
+
+ return return_status;
+}
+
+static int qla4xxx_context_reset(struct scsi_qla_host *ha)
+{
+ uint32_t mbox_cmd[MBOX_REG_COUNT];
+ uint32_t mbox_sts[MBOX_REG_COUNT];
+ struct addr_ctrl_blk_def *acb = NULL;
+ uint32_t acb_len = sizeof(struct addr_ctrl_blk_def);
+ int rval = QLA_SUCCESS;
+ dma_addr_t acb_dma;
+
+ acb = dma_alloc_coherent(&ha->pdev->dev,
+ sizeof(struct addr_ctrl_blk_def),
+ &acb_dma, GFP_KERNEL);
+ if (!acb) {
+ ql4_printk(KERN_ERR, ha, "%s: Unable to alloc acb\n",
+ __func__);
+ rval = -ENOMEM;
+ goto exit_port_reset;
+ }
+
+ memset(acb, 0, acb_len);
+
+ rval = qla4xxx_get_acb(ha, acb_dma, PRIMARI_ACB, acb_len);
+ if (rval != QLA_SUCCESS) {
+ rval = -EIO;
+ goto exit_free_acb;
+ }
+
+ rval = qla4xxx_disable_acb(ha);
+ if (rval != QLA_SUCCESS) {
+ rval = -EIO;
+ goto exit_free_acb;
+ }
+
+ wait_for_completion_timeout(&ha->disable_acb_comp,
+ DISABLE_ACB_TOV * HZ);
+
+ rval = qla4xxx_set_acb(ha, &mbox_cmd[0], &mbox_sts[0], acb_dma);
+ if (rval != QLA_SUCCESS) {
+ rval = -EIO;
+ goto exit_free_acb;
+ }
+
+exit_free_acb:
+ dma_free_coherent(&ha->pdev->dev, sizeof(struct addr_ctrl_blk_def),
+ acb, acb_dma);
+exit_port_reset:
+ DEBUG2(ql4_printk(KERN_INFO, ha, "%s %s\n", __func__,
+ rval == QLA_SUCCESS ? "SUCCEEDED" : "FAILED"));
+ return rval;
+}
+
+static int qla4xxx_host_reset(struct Scsi_Host *shost, int reset_type)
+{
+ struct scsi_qla_host *ha = to_qla_host(shost);
+ int rval = QLA_SUCCESS;
+ uint32_t idc_ctrl;
+
+ if (ql4xdontresethba) {
+ DEBUG2(ql4_printk(KERN_INFO, ha, "%s: Don't Reset HBA\n",
+ __func__));
+ rval = -EPERM;
+ goto exit_host_reset;
+ }
+
+ if (test_bit(DPC_RESET_HA, &ha->dpc_flags))
+ goto recover_adapter;
+
+ switch (reset_type) {
+ case SCSI_ADAPTER_RESET:
+ set_bit(DPC_RESET_HA, &ha->dpc_flags);
+ break;
+ case SCSI_FIRMWARE_RESET:
+ if (!test_bit(DPC_RESET_HA, &ha->dpc_flags)) {
+ if (is_qla80XX(ha))
+ /* set firmware context reset */
+ set_bit(DPC_RESET_HA_FW_CONTEXT,
+ &ha->dpc_flags);
+ else {
+ rval = qla4xxx_context_reset(ha);
+ goto exit_host_reset;
+ }
+ }
+ break;
+ }
+
+recover_adapter:
+ /* For ISP8324 and ISP8042 set graceful reset bit in IDC_DRV_CTRL if
+ * reset is issued by application */
+ if ((is_qla8032(ha) || is_qla8042(ha)) &&
+ test_bit(DPC_RESET_HA, &ha->dpc_flags)) {
+ idc_ctrl = qla4_83xx_rd_reg(ha, QLA83XX_IDC_DRV_CTRL);
+ qla4_83xx_wr_reg(ha, QLA83XX_IDC_DRV_CTRL,
+ (idc_ctrl | GRACEFUL_RESET_BIT1));
+ }
+
+ rval = qla4xxx_recover_adapter(ha);
+ if (rval != QLA_SUCCESS) {
+ DEBUG2(ql4_printk(KERN_INFO, ha, "%s: recover adapter fail\n",
+ __func__));
+ rval = -EIO;
+ }
+
+exit_host_reset:
+ return rval;
+}
+
+/* PCI AER driver recovers from all correctable errors w/o
+ * driver intervention. For uncorrectable errors PCI AER
+ * driver calls the following device driver's callbacks
+ *
+ * - Fatal Errors - link_reset
+ * - Non-Fatal Errors - driver's pci_error_detected() which
+ * returns CAN_RECOVER, NEED_RESET or DISCONNECT.
+ *
+ * PCI AER driver calls
+ * CAN_RECOVER - driver's pci_mmio_enabled(), mmio_enabled
+ * returns RECOVERED or NEED_RESET if fw_hung
+ * NEED_RESET - driver's slot_reset()
+ * DISCONNECT - device is dead & cannot recover
+ * RECOVERED - driver's pci_resume()
+ */
+static pci_ers_result_t
+qla4xxx_pci_error_detected(struct pci_dev *pdev, pci_channel_state_t state)
+{
+ struct scsi_qla_host *ha = pci_get_drvdata(pdev);
+
+ ql4_printk(KERN_WARNING, ha, "scsi%ld: %s: error detected:state %x\n",
+ ha->host_no, __func__, state);
+
+ if (!is_aer_supported(ha))
+ return PCI_ERS_RESULT_NONE;
+
+ switch (state) {
+ case pci_channel_io_normal:
+ clear_bit(AF_EEH_BUSY, &ha->flags);
+ return PCI_ERS_RESULT_CAN_RECOVER;
+ case pci_channel_io_frozen:
+ set_bit(AF_EEH_BUSY, &ha->flags);
+ qla4xxx_mailbox_premature_completion(ha);
+ qla4xxx_free_irqs(ha);
+ pci_disable_device(pdev);
+ /* Return back all IOs */
+ qla4xxx_abort_active_cmds(ha, DID_RESET << 16);
+ return PCI_ERS_RESULT_NEED_RESET;
+ case pci_channel_io_perm_failure:
+ set_bit(AF_EEH_BUSY, &ha->flags);
+ set_bit(AF_PCI_CHANNEL_IO_PERM_FAILURE, &ha->flags);
+ qla4xxx_abort_active_cmds(ha, DID_NO_CONNECT << 16);
+ return PCI_ERS_RESULT_DISCONNECT;
+ }
+ return PCI_ERS_RESULT_NEED_RESET;
+}
+
+/**
+ * qla4xxx_pci_mmio_enabled() gets called if
+ * qla4xxx_pci_error_detected() returns PCI_ERS_RESULT_CAN_RECOVER
+ * and read/write to the device still works.
+ **/
+static pci_ers_result_t
+qla4xxx_pci_mmio_enabled(struct pci_dev *pdev)
+{
+ struct scsi_qla_host *ha = pci_get_drvdata(pdev);
+
+ if (!is_aer_supported(ha))
+ return PCI_ERS_RESULT_NONE;
+
+ return PCI_ERS_RESULT_RECOVERED;
+}
+
+static uint32_t qla4_8xxx_error_recovery(struct scsi_qla_host *ha)
+{
+ uint32_t rval = QLA_ERROR;
+ int fn;
+ struct pci_dev *other_pdev = NULL;
+
+ ql4_printk(KERN_WARNING, ha, "scsi%ld: In %s\n", ha->host_no, __func__);
+
+ set_bit(DPC_RESET_ACTIVE, &ha->dpc_flags);
+
+ if (test_bit(AF_ONLINE, &ha->flags)) {
+ clear_bit(AF_ONLINE, &ha->flags);
+ clear_bit(AF_LINK_UP, &ha->flags);
+ iscsi_host_for_each_session(ha->host, qla4xxx_fail_session);
+ qla4xxx_process_aen(ha, FLUSH_DDB_CHANGED_AENS);
+ }
+
+ fn = PCI_FUNC(ha->pdev->devfn);
+ if (is_qla8022(ha)) {
+ while (fn > 0) {
+ fn--;
+ ql4_printk(KERN_INFO, ha, "scsi%ld: %s: Finding PCI device at func %x\n",
+ ha->host_no, __func__, fn);
+ /* Get the pci device given the domain, bus,
+ * slot/function number */
+ other_pdev = pci_get_domain_bus_and_slot(
+ pci_domain_nr(ha->pdev->bus),
+ ha->pdev->bus->number,
+ PCI_DEVFN(PCI_SLOT(ha->pdev->devfn),
+ fn));
+
+ if (!other_pdev)
+ continue;
+
+ if (atomic_read(&other_pdev->enable_cnt)) {
+ ql4_printk(KERN_INFO, ha, "scsi%ld: %s: Found PCI func in enabled state%x\n",
+ ha->host_no, __func__, fn);
+ pci_dev_put(other_pdev);
+ break;
+ }
+ pci_dev_put(other_pdev);
+ }
+ } else {
+ /* this case is meant for ISP83xx/ISP84xx only */
+ if (qla4_83xx_can_perform_reset(ha)) {
+ /* reset fn as iSCSI is going to perform the reset */
+ fn = 0;
+ }
+ }
+
+ /* The first function on the card, the reset owner will
+ * start & initialize the firmware. The other functions
+ * on the card will reset the firmware context
+ */
+ if (!fn) {
+ ql4_printk(KERN_INFO, ha, "scsi%ld: %s: devfn being reset "
+ "0x%x is the owner\n", ha->host_no, __func__,
+ ha->pdev->devfn);
+
+ ha->isp_ops->idc_lock(ha);
+ qla4_8xxx_wr_direct(ha, QLA8XXX_CRB_DEV_STATE,
+ QLA8XXX_DEV_COLD);
+ ha->isp_ops->idc_unlock(ha);
+
+ rval = qla4_8xxx_update_idc_reg(ha);
+ if (rval == QLA_ERROR) {
+ ql4_printk(KERN_INFO, ha, "scsi%ld: %s: HW State: FAILED\n",
+ ha->host_no, __func__);
+ ha->isp_ops->idc_lock(ha);
+ qla4_8xxx_wr_direct(ha, QLA8XXX_CRB_DEV_STATE,
+ QLA8XXX_DEV_FAILED);
+ ha->isp_ops->idc_unlock(ha);
+ goto exit_error_recovery;
+ }
+
+ clear_bit(AF_FW_RECOVERY, &ha->flags);
+ rval = qla4xxx_initialize_adapter(ha, RESET_ADAPTER);
+
+ if (rval != QLA_SUCCESS) {
+ ql4_printk(KERN_INFO, ha, "scsi%ld: %s: HW State: "
+ "FAILED\n", ha->host_no, __func__);
+ qla4xxx_free_irqs(ha);
+ ha->isp_ops->idc_lock(ha);
+ qla4_8xxx_clear_drv_active(ha);
+ qla4_8xxx_wr_direct(ha, QLA8XXX_CRB_DEV_STATE,
+ QLA8XXX_DEV_FAILED);
+ ha->isp_ops->idc_unlock(ha);
+ } else {
+ ql4_printk(KERN_INFO, ha, "scsi%ld: %s: HW State: "
+ "READY\n", ha->host_no, __func__);
+ ha->isp_ops->idc_lock(ha);
+ qla4_8xxx_wr_direct(ha, QLA8XXX_CRB_DEV_STATE,
+ QLA8XXX_DEV_READY);
+ /* Clear driver state register */
+ qla4_8xxx_wr_direct(ha, QLA8XXX_CRB_DRV_STATE, 0);
+ qla4_8xxx_set_drv_active(ha);
+ ha->isp_ops->idc_unlock(ha);
+ ha->isp_ops->enable_intrs(ha);
+ }
+ } else {
+ ql4_printk(KERN_INFO, ha, "scsi%ld: %s: devfn 0x%x is not "
+ "the reset owner\n", ha->host_no, __func__,
+ ha->pdev->devfn);
+ if ((qla4_8xxx_rd_direct(ha, QLA8XXX_CRB_DEV_STATE) ==
+ QLA8XXX_DEV_READY)) {
+ clear_bit(AF_FW_RECOVERY, &ha->flags);
+ rval = qla4xxx_initialize_adapter(ha, RESET_ADAPTER);
+ if (rval == QLA_SUCCESS)
+ ha->isp_ops->enable_intrs(ha);
+ else
+ qla4xxx_free_irqs(ha);
+
+ ha->isp_ops->idc_lock(ha);
+ qla4_8xxx_set_drv_active(ha);
+ ha->isp_ops->idc_unlock(ha);
+ }
+ }
+exit_error_recovery:
+ clear_bit(DPC_RESET_ACTIVE, &ha->dpc_flags);
+ return rval;
+}
+
+static pci_ers_result_t
+qla4xxx_pci_slot_reset(struct pci_dev *pdev)
+{
+ pci_ers_result_t ret = PCI_ERS_RESULT_DISCONNECT;
+ struct scsi_qla_host *ha = pci_get_drvdata(pdev);
+ int rc;
+
+ ql4_printk(KERN_WARNING, ha, "scsi%ld: %s: slot_reset\n",
+ ha->host_no, __func__);
+
+ if (!is_aer_supported(ha))
+ return PCI_ERS_RESULT_NONE;
+
+ /* Restore the saved state of PCIe device -
+ * BAR registers, PCI Config space, PCIX, MSI,
+ * IOV states
+ */
+ pci_restore_state(pdev);
+
+ /* pci_restore_state() clears the saved_state flag of the device
+ * save restored state which resets saved_state flag
+ */
+ pci_save_state(pdev);
+
+ /* Initialize device or resume if in suspended state */
+ rc = pci_enable_device(pdev);
+ if (rc) {
+ ql4_printk(KERN_WARNING, ha, "scsi%ld: %s: Can't re-enable "
+ "device after reset\n", ha->host_no, __func__);
+ goto exit_slot_reset;
+ }
+
+ ha->isp_ops->disable_intrs(ha);
+
+ if (is_qla80XX(ha)) {
+ if (qla4_8xxx_error_recovery(ha) == QLA_SUCCESS) {
+ ret = PCI_ERS_RESULT_RECOVERED;
+ goto exit_slot_reset;
+ } else
+ goto exit_slot_reset;
+ }
+
+exit_slot_reset:
+ ql4_printk(KERN_WARNING, ha, "scsi%ld: %s: Return=%x\n"
+ "device after reset\n", ha->host_no, __func__, ret);
+ return ret;
+}
+
+static void
+qla4xxx_pci_resume(struct pci_dev *pdev)
+{
+ struct scsi_qla_host *ha = pci_get_drvdata(pdev);
+ int ret;
+
+ ql4_printk(KERN_WARNING, ha, "scsi%ld: %s: pci_resume\n",
+ ha->host_no, __func__);
+
+ ret = qla4xxx_wait_for_hba_online(ha);
+ if (ret != QLA_SUCCESS) {
+ ql4_printk(KERN_ERR, ha, "scsi%ld: %s: the device failed to "
+ "resume I/O from slot/link_reset\n", ha->host_no,
+ __func__);
+ }
+
+ pci_cleanup_aer_uncorrect_error_status(pdev);
+ clear_bit(AF_EEH_BUSY, &ha->flags);
+}
+
+static const struct pci_error_handlers qla4xxx_err_handler = {
+ .error_detected = qla4xxx_pci_error_detected,
+ .mmio_enabled = qla4xxx_pci_mmio_enabled,
+ .slot_reset = qla4xxx_pci_slot_reset,
+ .resume = qla4xxx_pci_resume,
+};
+
+static struct pci_device_id qla4xxx_pci_tbl[] = {
+ {
+ .vendor = PCI_VENDOR_ID_QLOGIC,
+ .device = PCI_DEVICE_ID_QLOGIC_ISP4010,
+ .subvendor = PCI_ANY_ID,
+ .subdevice = PCI_ANY_ID,
+ },
+ {
+ .vendor = PCI_VENDOR_ID_QLOGIC,
+ .device = PCI_DEVICE_ID_QLOGIC_ISP4022,
+ .subvendor = PCI_ANY_ID,
+ .subdevice = PCI_ANY_ID,
+ },
+ {
+ .vendor = PCI_VENDOR_ID_QLOGIC,
+ .device = PCI_DEVICE_ID_QLOGIC_ISP4032,
+ .subvendor = PCI_ANY_ID,
+ .subdevice = PCI_ANY_ID,
+ },
+ {
+ .vendor = PCI_VENDOR_ID_QLOGIC,
+ .device = PCI_DEVICE_ID_QLOGIC_ISP8022,
+ .subvendor = PCI_ANY_ID,
+ .subdevice = PCI_ANY_ID,
+ },
+ {
+ .vendor = PCI_VENDOR_ID_QLOGIC,
+ .device = PCI_DEVICE_ID_QLOGIC_ISP8324,
+ .subvendor = PCI_ANY_ID,
+ .subdevice = PCI_ANY_ID,
+ },
+ {
+ .vendor = PCI_VENDOR_ID_QLOGIC,
+ .device = PCI_DEVICE_ID_QLOGIC_ISP8042,
+ .subvendor = PCI_ANY_ID,
+ .subdevice = PCI_ANY_ID,
+ },
+ {0, 0},
+};
+MODULE_DEVICE_TABLE(pci, qla4xxx_pci_tbl);
+
+static struct pci_driver qla4xxx_pci_driver = {
+ .name = DRIVER_NAME,
+ .id_table = qla4xxx_pci_tbl,
+ .probe = qla4xxx_probe_adapter,
+ .remove = qla4xxx_remove_adapter,
+ .err_handler = &qla4xxx_err_handler,
+};
+
+static int __init qla4xxx_module_init(void)
+{
+ int ret;
+
+ if (ql4xqfulltracking)
+ qla4xxx_driver_template.track_queue_depth = 1;
+
+ /* Allocate cache for SRBs. */
+ srb_cachep = kmem_cache_create("qla4xxx_srbs", sizeof(struct srb), 0,
+ SLAB_HWCACHE_ALIGN, NULL);
+ if (srb_cachep == NULL) {
+ printk(KERN_ERR
+ "%s: Unable to allocate SRB cache..."
+ "Failing load!\n", DRIVER_NAME);
+ ret = -ENOMEM;
+ goto no_srp_cache;
+ }
+
+ /* Derive version string. */
+ strcpy(qla4xxx_version_str, QLA4XXX_DRIVER_VERSION);
+ if (ql4xextended_error_logging)
+ strcat(qla4xxx_version_str, "-debug");
+
+ qla4xxx_scsi_transport =
+ iscsi_register_transport(&qla4xxx_iscsi_transport);
+ if (!qla4xxx_scsi_transport){
+ ret = -ENODEV;
+ goto release_srb_cache;
+ }
+
+ ret = pci_register_driver(&qla4xxx_pci_driver);
+ if (ret)
+ goto unregister_transport;
+
+ printk(KERN_INFO "QLogic iSCSI HBA Driver\n");
+ return 0;
+
+unregister_transport:
+ iscsi_unregister_transport(&qla4xxx_iscsi_transport);
+release_srb_cache:
+ kmem_cache_destroy(srb_cachep);
+no_srp_cache:
+ return ret;
+}
+
+static void __exit qla4xxx_module_exit(void)
+{
+ pci_unregister_driver(&qla4xxx_pci_driver);
+ iscsi_unregister_transport(&qla4xxx_iscsi_transport);
+ kmem_cache_destroy(srb_cachep);
+}
+
+module_init(qla4xxx_module_init);
+module_exit(qla4xxx_module_exit);
+
+MODULE_AUTHOR("QLogic Corporation");
+MODULE_DESCRIPTION("QLogic iSCSI HBA Driver");
+MODULE_LICENSE("GPL");
+MODULE_VERSION(QLA4XXX_DRIVER_VERSION);
diff --git a/drivers/scsi/qla4xxx/ql4_version.h b/drivers/scsi/qla4xxx/ql4_version.h
new file mode 100644
index 000000000..f11eaa773
--- /dev/null
+++ b/drivers/scsi/qla4xxx/ql4_version.h
@@ -0,0 +1,8 @@
+/*
+ * QLogic iSCSI HBA Driver
+ * Copyright (c) 2003-2013 QLogic Corporation
+ *
+ * See LICENSE.qla4xxx for copyright and licensing details.
+ */
+
+#define QLA4XXX_DRIVER_VERSION "5.04.00-k6"
diff --git a/drivers/scsi/qlogicfas.c b/drivers/scsi/qlogicfas.c
new file mode 100644
index 000000000..a22bb1b40
--- /dev/null
+++ b/drivers/scsi/qlogicfas.c
@@ -0,0 +1,226 @@
+/*
+ * Qlogic FAS408 ISA card driver
+ *
+ * Copyright 1994, Tom Zerucha.
+ * tz@execpc.com
+ *
+ * Redistributable under terms of the GNU General Public License
+ *
+ * For the avoidance of doubt the "preferred form" of this code is one which
+ * is in an open non patent encumbered format. Where cryptographic key signing
+ * forms part of the process of creating an executable the information
+ * including keys needed to generate an equivalently functional executable
+ * are deemed to be part of the source code.
+ *
+ * Check qlogicfas408.c for more credits and info.
+ */
+
+#include <linux/module.h>
+#include <linux/blkdev.h> /* to get disk capacity */
+#include <linux/kernel.h>
+#include <linux/string.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/ioport.h>
+#include <linux/proc_fs.h>
+#include <linux/unistd.h>
+#include <linux/spinlock.h>
+#include <linux/stat.h>
+
+#include <asm/io.h>
+#include <asm/irq.h>
+#include <asm/dma.h>
+
+#include "scsi.h"
+#include <scsi/scsi_host.h>
+#include "qlogicfas408.h"
+
+/* Set the following to 2 to use normal interrupt (active high/totempole-
+ * tristate), otherwise use 0 (REQUIRED FOR PCMCIA) for active low, open
+ * drain
+ */
+#define INT_TYPE 2
+
+static char qlogicfas_name[] = "qlogicfas";
+
+/*
+ * Look for qlogic card and init if found
+ */
+
+static struct Scsi_Host *__qlogicfas_detect(struct scsi_host_template *host,
+ int qbase,
+ int qlirq)
+{
+ int qltyp; /* type of chip */
+ int qinitid;
+ struct Scsi_Host *hreg; /* registered host structure */
+ struct qlogicfas408_priv *priv;
+
+ /* Qlogic Cards only exist at 0x230 or 0x330 (the chip itself
+ * decodes the address - I check 230 first since MIDI cards are
+ * typically at 0x330
+ *
+ * Theoretically, two Qlogic cards can coexist in the same system.
+ * This should work by simply using this as a loadable module for
+ * the second card, but I haven't tested this.
+ */
+
+ if (!qbase || qlirq == -1)
+ goto err;
+
+ if (!request_region(qbase, 0x10, qlogicfas_name)) {
+ printk(KERN_INFO "%s: address %#x is busy\n", qlogicfas_name,
+ qbase);
+ goto err;
+ }
+
+ if (!qlogicfas408_detect(qbase, INT_TYPE)) {
+ printk(KERN_WARNING "%s: probe failed for %#x\n",
+ qlogicfas_name,
+ qbase);
+ goto err_release_mem;
+ }
+
+ printk(KERN_INFO "%s: Using preset base address of %03x,"
+ " IRQ %d\n", qlogicfas_name, qbase, qlirq);
+
+ qltyp = qlogicfas408_get_chip_type(qbase, INT_TYPE);
+ qinitid = host->this_id;
+ if (qinitid < 0)
+ qinitid = 7; /* if no ID, use 7 */
+
+ qlogicfas408_setup(qbase, qinitid, INT_TYPE);
+
+ hreg = scsi_host_alloc(host, sizeof(struct qlogicfas408_priv));
+ if (!hreg)
+ goto err_release_mem;
+ priv = get_priv_by_host(hreg);
+ hreg->io_port = qbase;
+ hreg->n_io_port = 16;
+ hreg->dma_channel = -1;
+ if (qlirq != -1)
+ hreg->irq = qlirq;
+ priv->qbase = qbase;
+ priv->qlirq = qlirq;
+ priv->qinitid = qinitid;
+ priv->shost = hreg;
+ priv->int_type = INT_TYPE;
+
+ sprintf(priv->qinfo,
+ "Qlogicfas Driver version 0.46, chip %02X at %03X, IRQ %d, TPdma:%d",
+ qltyp, qbase, qlirq, QL_TURBO_PDMA);
+ host->name = qlogicfas_name;
+
+ if (request_irq(qlirq, qlogicfas408_ihandl, 0, qlogicfas_name, hreg))
+ goto free_scsi_host;
+
+ if (scsi_add_host(hreg, NULL))
+ goto free_interrupt;
+
+ scsi_scan_host(hreg);
+
+ return hreg;
+
+free_interrupt:
+ free_irq(qlirq, hreg);
+
+free_scsi_host:
+ scsi_host_put(hreg);
+
+err_release_mem:
+ release_region(qbase, 0x10);
+err:
+ return NULL;
+}
+
+#define MAX_QLOGICFAS 8
+static struct qlogicfas408_priv *cards;
+static int iobase[MAX_QLOGICFAS];
+static int irq[MAX_QLOGICFAS] = { [0 ... MAX_QLOGICFAS-1] = -1 };
+module_param_array(iobase, int, NULL, 0);
+module_param_array(irq, int, NULL, 0);
+MODULE_PARM_DESC(iobase, "I/O address");
+MODULE_PARM_DESC(irq, "IRQ");
+
+static int qlogicfas_detect(struct scsi_host_template *sht)
+{
+ struct Scsi_Host *shost;
+ struct qlogicfas408_priv *priv;
+ int num;
+
+ for (num = 0; num < MAX_QLOGICFAS; num++) {
+ shost = __qlogicfas_detect(sht, iobase[num], irq[num]);
+ if (shost == NULL) {
+ /* no more devices */
+ break;
+ }
+ priv = get_priv_by_host(shost);
+ priv->next = cards;
+ cards = priv;
+ }
+
+ return num;
+}
+
+static int qlogicfas_release(struct Scsi_Host *shost)
+{
+ struct qlogicfas408_priv *priv = get_priv_by_host(shost);
+
+ scsi_remove_host(shost);
+ if (shost->irq) {
+ qlogicfas408_disable_ints(priv);
+ free_irq(shost->irq, shost);
+ }
+ if (shost->io_port && shost->n_io_port)
+ release_region(shost->io_port, shost->n_io_port);
+ scsi_host_put(shost);
+
+ return 0;
+}
+
+/*
+ * The driver template is also needed for PCMCIA
+ */
+static struct scsi_host_template qlogicfas_driver_template = {
+ .module = THIS_MODULE,
+ .name = qlogicfas_name,
+ .proc_name = qlogicfas_name,
+ .info = qlogicfas408_info,
+ .queuecommand = qlogicfas408_queuecommand,
+ .eh_abort_handler = qlogicfas408_abort,
+ .eh_bus_reset_handler = qlogicfas408_bus_reset,
+ .bios_param = qlogicfas408_biosparam,
+ .can_queue = 1,
+ .this_id = -1,
+ .sg_tablesize = SG_ALL,
+ .cmd_per_lun = 1,
+ .use_clustering = DISABLE_CLUSTERING,
+};
+
+static __init int qlogicfas_init(void)
+{
+ if (!qlogicfas_detect(&qlogicfas_driver_template)) {
+ /* no cards found */
+ printk(KERN_INFO "%s: no cards were found, please specify "
+ "I/O address and IRQ using iobase= and irq= "
+ "options", qlogicfas_name);
+ return -ENODEV;
+ }
+
+ return 0;
+}
+
+static __exit void qlogicfas_exit(void)
+{
+ struct qlogicfas408_priv *priv;
+
+ for (priv = cards; priv != NULL; priv = priv->next)
+ qlogicfas_release(priv->shost);
+}
+
+MODULE_AUTHOR("Tom Zerucha, Michael Griffith");
+MODULE_DESCRIPTION("Driver for the Qlogic FAS408 based ISA card");
+MODULE_LICENSE("GPL");
+module_init(qlogicfas_init);
+module_exit(qlogicfas_exit);
+
diff --git a/drivers/scsi/qlogicfas408.c b/drivers/scsi/qlogicfas408.c
new file mode 100644
index 000000000..c3a9151ca
--- /dev/null
+++ b/drivers/scsi/qlogicfas408.c
@@ -0,0 +1,617 @@
+/*----------------------------------------------------------------*/
+/*
+ Qlogic linux driver - work in progress. No Warranty express or implied.
+ Use at your own risk. Support Tort Reform so you won't have to read all
+ these silly disclaimers.
+
+ Copyright 1994, Tom Zerucha.
+ tz@execpc.com
+
+ Additional Code, and much appreciated help by
+ Michael A. Griffith
+ grif@cs.ucr.edu
+
+ Thanks to Eric Youngdale and Dave Hinds for loadable module and PCMCIA
+ help respectively, and for suffering through my foolishness during the
+ debugging process.
+
+ Reference Qlogic FAS408 Technical Manual, 53408-510-00A, May 10, 1994
+ (you can reference it, but it is incomplete and inaccurate in places)
+
+ Version 0.46 1/30/97 - kernel 1.2.0+
+
+ Functions as standalone, loadable, and PCMCIA driver, the latter from
+ Dave Hinds' PCMCIA package.
+
+ Cleaned up 26/10/2002 by Alan Cox <alan@lxorguk.ukuu.org.uk> as part of the 2.5
+ SCSI driver cleanup and audit. This driver still needs work on the
+ following
+ - Non terminating hardware waits
+ - Some layering violations with its pcmcia stub
+
+ Redistributable under terms of the GNU General Public License
+
+ For the avoidance of doubt the "preferred form" of this code is one which
+ is in an open non patent encumbered format. Where cryptographic key signing
+ forms part of the process of creating an executable the information
+ including keys needed to generate an equivalently functional executable
+ are deemed to be part of the source code.
+
+*/
+
+#include <linux/module.h>
+#include <linux/blkdev.h> /* to get disk capacity */
+#include <linux/kernel.h>
+#include <linux/string.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/ioport.h>
+#include <linux/proc_fs.h>
+#include <linux/unistd.h>
+#include <linux/spinlock.h>
+#include <linux/stat.h>
+
+#include <asm/io.h>
+#include <asm/irq.h>
+#include <asm/dma.h>
+
+#include "scsi.h"
+#include <scsi/scsi_host.h>
+#include "qlogicfas408.h"
+
+/*----------------------------------------------------------------*/
+static int qlcfg5 = (XTALFREQ << 5); /* 15625/512 */
+static int qlcfg6 = SYNCXFRPD;
+static int qlcfg7 = SYNCOFFST;
+static int qlcfg8 = (SLOWCABLE << 7) | (QL_ENABLE_PARITY << 4);
+static int qlcfg9 = ((XTALFREQ + 4) / 5);
+static int qlcfgc = (FASTCLK << 3) | (FASTSCSI << 4);
+
+/*----------------------------------------------------------------*/
+
+/*----------------------------------------------------------------*/
+/* local functions */
+/*----------------------------------------------------------------*/
+
+/* error recovery - reset everything */
+
+static void ql_zap(struct qlogicfas408_priv *priv)
+{
+ int x;
+ int qbase = priv->qbase;
+ int int_type = priv->int_type;
+
+ x = inb(qbase + 0xd);
+ REG0;
+ outb(3, qbase + 3); /* reset SCSI */
+ outb(2, qbase + 3); /* reset chip */
+ if (x & 0x80)
+ REG1;
+}
+
+/*
+ * Do a pseudo-dma tranfer
+ */
+
+static int ql_pdma(struct qlogicfas408_priv *priv, int phase, char *request, int reqlen)
+{
+ int j;
+ int qbase = priv->qbase;
+ j = 0;
+ if (phase & 1) { /* in */
+#if QL_TURBO_PDMA
+ rtrc(4)
+ /* empty fifo in large chunks */
+ if (reqlen >= 128 && (inb(qbase + 8) & 2)) { /* full */
+ insl(qbase + 4, request, 32);
+ reqlen -= 128;
+ request += 128;
+ }
+ while (reqlen >= 84 && !(j & 0xc0)) /* 2/3 */
+ if ((j = inb(qbase + 8)) & 4)
+ {
+ insl(qbase + 4, request, 21);
+ reqlen -= 84;
+ request += 84;
+ }
+ if (reqlen >= 44 && (inb(qbase + 8) & 8)) { /* 1/3 */
+ insl(qbase + 4, request, 11);
+ reqlen -= 44;
+ request += 44;
+ }
+#endif
+ /* until both empty and int (or until reclen is 0) */
+ rtrc(7)
+ j = 0;
+ while (reqlen && !((j & 0x10) && (j & 0xc0)))
+ {
+ /* while bytes to receive and not empty */
+ j &= 0xc0;
+ while (reqlen && !((j = inb(qbase + 8)) & 0x10))
+ {
+ *request++ = inb(qbase + 4);
+ reqlen--;
+ }
+ if (j & 0x10)
+ j = inb(qbase + 8);
+
+ }
+ } else { /* out */
+#if QL_TURBO_PDMA
+ rtrc(4)
+ if (reqlen >= 128 && inb(qbase + 8) & 0x10) { /* empty */
+ outsl(qbase + 4, request, 32);
+ reqlen -= 128;
+ request += 128;
+ }
+ while (reqlen >= 84 && !(j & 0xc0)) /* 1/3 */
+ if (!((j = inb(qbase + 8)) & 8)) {
+ outsl(qbase + 4, request, 21);
+ reqlen -= 84;
+ request += 84;
+ }
+ if (reqlen >= 40 && !(inb(qbase + 8) & 4)) { /* 2/3 */
+ outsl(qbase + 4, request, 10);
+ reqlen -= 40;
+ request += 40;
+ }
+#endif
+ /* until full and int (or until reclen is 0) */
+ rtrc(7)
+ j = 0;
+ while (reqlen && !((j & 2) && (j & 0xc0))) {
+ /* while bytes to send and not full */
+ while (reqlen && !((j = inb(qbase + 8)) & 2))
+ {
+ outb(*request++, qbase + 4);
+ reqlen--;
+ }
+ if (j & 2)
+ j = inb(qbase + 8);
+ }
+ }
+ /* maybe return reqlen */
+ return inb(qbase + 8) & 0xc0;
+}
+
+/*
+ * Wait for interrupt flag (polled - not real hardware interrupt)
+ */
+
+static int ql_wai(struct qlogicfas408_priv *priv)
+{
+ int k;
+ int qbase = priv->qbase;
+ unsigned long i;
+
+ k = 0;
+ i = jiffies + WATCHDOG;
+ while (time_before(jiffies, i) && !priv->qabort &&
+ !((k = inb(qbase + 4)) & 0xe0)) {
+ barrier();
+ cpu_relax();
+ }
+ if (time_after_eq(jiffies, i))
+ return (DID_TIME_OUT);
+ if (priv->qabort)
+ return (priv->qabort == 1 ? DID_ABORT : DID_RESET);
+ if (k & 0x60)
+ ql_zap(priv);
+ if (k & 0x20)
+ return (DID_PARITY);
+ if (k & 0x40)
+ return (DID_ERROR);
+ return 0;
+}
+
+/*
+ * Initiate scsi command - queueing handler
+ * caller must hold host lock
+ */
+
+static void ql_icmd(struct scsi_cmnd *cmd)
+{
+ struct qlogicfas408_priv *priv = get_priv_by_cmd(cmd);
+ int qbase = priv->qbase;
+ int int_type = priv->int_type;
+ unsigned int i;
+
+ priv->qabort = 0;
+
+ REG0;
+ /* clearing of interrupts and the fifo is needed */
+
+ inb(qbase + 5); /* clear interrupts */
+ if (inb(qbase + 5)) /* if still interrupting */
+ outb(2, qbase + 3); /* reset chip */
+ else if (inb(qbase + 7) & 0x1f)
+ outb(1, qbase + 3); /* clear fifo */
+ while (inb(qbase + 5)); /* clear ints */
+ REG1;
+ outb(1, qbase + 8); /* set for PIO pseudo DMA */
+ outb(0, qbase + 0xb); /* disable ints */
+ inb(qbase + 8); /* clear int bits */
+ REG0;
+ outb(0x40, qbase + 0xb); /* enable features */
+
+ /* configurables */
+ outb(qlcfgc, qbase + 0xc);
+ /* config: no reset interrupt, (initiator) bus id */
+ outb(0x40 | qlcfg8 | priv->qinitid, qbase + 8);
+ outb(qlcfg7, qbase + 7);
+ outb(qlcfg6, qbase + 6);
+ /**/ outb(qlcfg5, qbase + 5); /* select timer */
+ outb(qlcfg9 & 7, qbase + 9); /* prescaler */
+/* outb(0x99, qbase + 5); */
+ outb(scmd_id(cmd), qbase + 4);
+
+ for (i = 0; i < cmd->cmd_len; i++)
+ outb(cmd->cmnd[i], qbase + 2);
+
+ priv->qlcmd = cmd;
+ outb(0x41, qbase + 3); /* select and send command */
+}
+
+/*
+ * Process scsi command - usually after interrupt
+ */
+
+static unsigned int ql_pcmd(struct scsi_cmnd *cmd)
+{
+ unsigned int i, j;
+ unsigned long k;
+ unsigned int result; /* ultimate return result */
+ unsigned int status; /* scsi returned status */
+ unsigned int message; /* scsi returned message */
+ unsigned int phase; /* recorded scsi phase */
+ unsigned int reqlen; /* total length of transfer */
+ char *buf;
+ struct qlogicfas408_priv *priv = get_priv_by_cmd(cmd);
+ int qbase = priv->qbase;
+ int int_type = priv->int_type;
+
+ rtrc(1)
+ j = inb(qbase + 6);
+ i = inb(qbase + 5);
+ if (i == 0x20) {
+ return (DID_NO_CONNECT << 16);
+ }
+ i |= inb(qbase + 5); /* the 0x10 bit can be set after the 0x08 */
+ if (i != 0x18) {
+ printk(KERN_ERR "Ql:Bad Interrupt status:%02x\n", i);
+ ql_zap(priv);
+ return (DID_BAD_INTR << 16);
+ }
+ j &= 7; /* j = inb( qbase + 7 ) >> 5; */
+
+ /* correct status is supposed to be step 4 */
+ /* it sometimes returns step 3 but with 0 bytes left to send */
+ /* We can try stuffing the FIFO with the max each time, but we will get a
+ sequence of 3 if any bytes are left (but we do flush the FIFO anyway */
+
+ if (j != 3 && j != 4) {
+ printk(KERN_ERR "Ql:Bad sequence for command %d, int %02X, cmdleft = %d\n",
+ j, i, inb(qbase + 7) & 0x1f);
+ ql_zap(priv);
+ return (DID_ERROR << 16);
+ }
+ result = DID_OK;
+ if (inb(qbase + 7) & 0x1f) /* if some bytes in fifo */
+ outb(1, qbase + 3); /* clear fifo */
+ /* note that request_bufflen is the total xfer size when sg is used */
+ reqlen = scsi_bufflen(cmd);
+ /* note that it won't work if transfers > 16M are requested */
+ if (reqlen && !((phase = inb(qbase + 4)) & 6)) { /* data phase */
+ struct scatterlist *sg;
+ rtrc(2)
+ outb(reqlen, qbase); /* low-mid xfer cnt */
+ outb(reqlen >> 8, qbase + 1); /* low-mid xfer cnt */
+ outb(reqlen >> 16, qbase + 0xe); /* high xfer cnt */
+ outb(0x90, qbase + 3); /* command do xfer */
+ /* PIO pseudo DMA to buffer or sglist */
+ REG1;
+
+ scsi_for_each_sg(cmd, sg, scsi_sg_count(cmd), i) {
+ if (priv->qabort) {
+ REG0;
+ return ((priv->qabort == 1 ?
+ DID_ABORT : DID_RESET) << 16);
+ }
+ buf = sg_virt(sg);
+ if (ql_pdma(priv, phase, buf, sg->length))
+ break;
+ }
+ REG0;
+ rtrc(2)
+ /*
+ * Wait for irq (split into second state of irq handler
+ * if this can take time)
+ */
+ if ((k = ql_wai(priv)))
+ return (k << 16);
+ k = inb(qbase + 5); /* should be 0x10, bus service */
+ }
+
+ /*
+ * Enter Status (and Message In) Phase
+ */
+
+ k = jiffies + WATCHDOG;
+
+ while (time_before(jiffies, k) && !priv->qabort &&
+ !(inb(qbase + 4) & 6))
+ cpu_relax(); /* wait for status phase */
+
+ if (time_after_eq(jiffies, k)) {
+ ql_zap(priv);
+ return (DID_TIME_OUT << 16);
+ }
+
+ /* FIXME: timeout ?? */
+ while (inb(qbase + 5))
+ cpu_relax(); /* clear pending ints */
+
+ if (priv->qabort)
+ return ((priv->qabort == 1 ? DID_ABORT : DID_RESET) << 16);
+
+ outb(0x11, qbase + 3); /* get status and message */
+ if ((k = ql_wai(priv)))
+ return (k << 16);
+ i = inb(qbase + 5); /* get chip irq stat */
+ j = inb(qbase + 7) & 0x1f; /* and bytes rec'd */
+ status = inb(qbase + 2);
+ message = inb(qbase + 2);
+
+ /*
+ * Should get function complete int if Status and message, else
+ * bus serv if only status
+ */
+ if (!((i == 8 && j == 2) || (i == 0x10 && j == 1))) {
+ printk(KERN_ERR "Ql:Error during status phase, int=%02X, %d bytes recd\n", i, j);
+ result = DID_ERROR;
+ }
+ outb(0x12, qbase + 3); /* done, disconnect */
+ rtrc(1)
+ if ((k = ql_wai(priv)))
+ return (k << 16);
+
+ /*
+ * Should get bus service interrupt and disconnect interrupt
+ */
+
+ i = inb(qbase + 5); /* should be bus service */
+ while (!priv->qabort && ((i & 0x20) != 0x20)) {
+ barrier();
+ cpu_relax();
+ i |= inb(qbase + 5);
+ }
+ rtrc(0)
+
+ if (priv->qabort)
+ return ((priv->qabort == 1 ? DID_ABORT : DID_RESET) << 16);
+
+ return (result << 16) | (message << 8) | (status & STATUS_MASK);
+}
+
+/*
+ * Interrupt handler
+ */
+
+static void ql_ihandl(void *dev_id)
+{
+ struct scsi_cmnd *icmd;
+ struct Scsi_Host *host = dev_id;
+ struct qlogicfas408_priv *priv = get_priv_by_host(host);
+ int qbase = priv->qbase;
+ REG0;
+
+ if (!(inb(qbase + 4) & 0x80)) /* false alarm? */
+ return;
+
+ if (priv->qlcmd == NULL) { /* no command to process? */
+ int i;
+ i = 16;
+ while (i-- && inb(qbase + 5)); /* maybe also ql_zap() */
+ return;
+ }
+ icmd = priv->qlcmd;
+ icmd->result = ql_pcmd(icmd);
+ priv->qlcmd = NULL;
+ /*
+ * If result is CHECK CONDITION done calls qcommand to request
+ * sense
+ */
+ (icmd->scsi_done) (icmd);
+}
+
+irqreturn_t qlogicfas408_ihandl(int irq, void *dev_id)
+{
+ unsigned long flags;
+ struct Scsi_Host *host = dev_id;
+
+ spin_lock_irqsave(host->host_lock, flags);
+ ql_ihandl(dev_id);
+ spin_unlock_irqrestore(host->host_lock, flags);
+ return IRQ_HANDLED;
+}
+
+/*
+ * Queued command
+ */
+
+static int qlogicfas408_queuecommand_lck(struct scsi_cmnd *cmd,
+ void (*done) (struct scsi_cmnd *))
+{
+ struct qlogicfas408_priv *priv = get_priv_by_cmd(cmd);
+ if (scmd_id(cmd) == priv->qinitid) {
+ cmd->result = DID_BAD_TARGET << 16;
+ done(cmd);
+ return 0;
+ }
+
+ cmd->scsi_done = done;
+ /* wait for the last command's interrupt to finish */
+ while (priv->qlcmd != NULL) {
+ barrier();
+ cpu_relax();
+ }
+ ql_icmd(cmd);
+ return 0;
+}
+
+DEF_SCSI_QCMD(qlogicfas408_queuecommand)
+
+/*
+ * Return bios parameters
+ */
+
+int qlogicfas408_biosparam(struct scsi_device *disk, struct block_device *dev,
+ sector_t capacity, int ip[])
+{
+/* This should mimic the DOS Qlogic driver's behavior exactly */
+ ip[0] = 0x40;
+ ip[1] = 0x20;
+ ip[2] = (unsigned long) capacity / (ip[0] * ip[1]);
+ if (ip[2] > 1024) {
+ ip[0] = 0xff;
+ ip[1] = 0x3f;
+ ip[2] = (unsigned long) capacity / (ip[0] * ip[1]);
+#if 0
+ if (ip[2] > 1023)
+ ip[2] = 1023;
+#endif
+ }
+ return 0;
+}
+
+/*
+ * Abort a command in progress
+ */
+
+int qlogicfas408_abort(struct scsi_cmnd *cmd)
+{
+ struct qlogicfas408_priv *priv = get_priv_by_cmd(cmd);
+ priv->qabort = 1;
+ ql_zap(priv);
+ return SUCCESS;
+}
+
+/*
+ * Reset SCSI bus
+ * FIXME: This function is invoked with cmd = NULL directly by
+ * the PCMCIA qlogic_stub code. This wants fixing
+ */
+
+int qlogicfas408_bus_reset(struct scsi_cmnd *cmd)
+{
+ struct qlogicfas408_priv *priv = get_priv_by_cmd(cmd);
+ unsigned long flags;
+
+ priv->qabort = 2;
+
+ spin_lock_irqsave(cmd->device->host->host_lock, flags);
+ ql_zap(priv);
+ spin_unlock_irqrestore(cmd->device->host->host_lock, flags);
+
+ return SUCCESS;
+}
+
+/*
+ * Return info string
+ */
+
+const char *qlogicfas408_info(struct Scsi_Host *host)
+{
+ struct qlogicfas408_priv *priv = get_priv_by_host(host);
+ return priv->qinfo;
+}
+
+/*
+ * Get type of chip
+ */
+
+int qlogicfas408_get_chip_type(int qbase, int int_type)
+{
+ REG1;
+ return inb(qbase + 0xe) & 0xf8;
+}
+
+/*
+ * Perform initialization tasks
+ */
+
+void qlogicfas408_setup(int qbase, int id, int int_type)
+{
+ outb(1, qbase + 8); /* set for PIO pseudo DMA */
+ REG0;
+ outb(0x40 | qlcfg8 | id, qbase + 8); /* (ini) bus id, disable scsi rst */
+ outb(qlcfg5, qbase + 5); /* select timer */
+ outb(qlcfg9, qbase + 9); /* prescaler */
+
+#if QL_RESET_AT_START
+ outb(3, qbase + 3);
+
+ REG1;
+ /* FIXME: timeout */
+ while (inb(qbase + 0xf) & 4)
+ cpu_relax();
+
+ REG0;
+#endif
+}
+
+/*
+ * Checks if this is a QLogic FAS 408
+ */
+
+int qlogicfas408_detect(int qbase, int int_type)
+{
+ REG1;
+ return (((inb(qbase + 0xe) ^ inb(qbase + 0xe)) == 7) &&
+ ((inb(qbase + 0xe) ^ inb(qbase + 0xe)) == 7));
+}
+
+/*
+ * Disable interrupts
+ */
+
+void qlogicfas408_disable_ints(struct qlogicfas408_priv *priv)
+{
+ int qbase = priv->qbase;
+ int int_type = priv->int_type;
+
+ REG1;
+ outb(0, qbase + 0xb); /* disable ints */
+}
+
+/*
+ * Init and exit functions
+ */
+
+static int __init qlogicfas408_init(void)
+{
+ return 0;
+}
+
+static void __exit qlogicfas408_exit(void)
+{
+
+}
+
+MODULE_AUTHOR("Tom Zerucha, Michael Griffith");
+MODULE_DESCRIPTION("Driver for the Qlogic FAS SCSI controllers");
+MODULE_LICENSE("GPL");
+module_init(qlogicfas408_init);
+module_exit(qlogicfas408_exit);
+
+EXPORT_SYMBOL(qlogicfas408_info);
+EXPORT_SYMBOL(qlogicfas408_queuecommand);
+EXPORT_SYMBOL(qlogicfas408_abort);
+EXPORT_SYMBOL(qlogicfas408_bus_reset);
+EXPORT_SYMBOL(qlogicfas408_biosparam);
+EXPORT_SYMBOL(qlogicfas408_ihandl);
+EXPORT_SYMBOL(qlogicfas408_get_chip_type);
+EXPORT_SYMBOL(qlogicfas408_setup);
+EXPORT_SYMBOL(qlogicfas408_detect);
+EXPORT_SYMBOL(qlogicfas408_disable_ints);
+
diff --git a/drivers/scsi/qlogicfas408.h b/drivers/scsi/qlogicfas408.h
new file mode 100644
index 000000000..2f6c0a166
--- /dev/null
+++ b/drivers/scsi/qlogicfas408.h
@@ -0,0 +1,118 @@
+/* to be used by qlogicfas and qlogic_cs */
+#ifndef __QLOGICFAS408_H
+#define __QLOGICFAS408_H
+
+/*----------------------------------------------------------------*/
+/* Configuration */
+
+/* Set the following to max out the speed of the PIO PseudoDMA transfers,
+ again, 0 tends to be slower, but more stable. */
+
+#define QL_TURBO_PDMA 1
+
+/* This should be 1 to enable parity detection */
+
+#define QL_ENABLE_PARITY 1
+
+/* This will reset all devices when the driver is initialized (during bootup).
+ The other linux drivers don't do this, but the DOS drivers do, and after
+ using DOS or some kind of crash or lockup this will bring things back
+ without requiring a cold boot. It does take some time to recover from a
+ reset, so it is slower, and I have seen timeouts so that devices weren't
+ recognized when this was set. */
+
+#define QL_RESET_AT_START 0
+
+/* crystal frequency in megahertz (for offset 5 and 9)
+ Please set this for your card. Most Qlogic cards are 40 Mhz. The
+ Control Concepts ISA (not VLB) is 24 Mhz */
+
+#define XTALFREQ 40
+
+/**********/
+/* DANGER! modify these at your own risk */
+/* SLOWCABLE can usually be reset to zero if you have a clean setup and
+ proper termination. The rest are for synchronous transfers and other
+ advanced features if your device can transfer faster than 5Mb/sec.
+ If you are really curious, email me for a quick howto until I have
+ something official */
+/**********/
+
+/*****/
+/* config register 1 (offset 8) options */
+/* This needs to be set to 1 if your cabling is long or noisy */
+#define SLOWCABLE 1
+
+/*****/
+/* offset 0xc */
+/* This will set fast (10Mhz) synchronous timing when set to 1
+ For this to have an effect, FASTCLK must also be 1 */
+#define FASTSCSI 0
+
+/* This when set to 1 will set a faster sync transfer rate */
+#define FASTCLK 0 /*(XTALFREQ>25?1:0)*/
+
+/*****/
+/* offset 6 */
+/* This is the sync transfer divisor, XTALFREQ/X will be the maximum
+ achievable data rate (assuming the rest of the system is capable
+ and set properly) */
+#define SYNCXFRPD 5 /*(XTALFREQ/5)*/
+
+/*****/
+/* offset 7 */
+/* This is the count of how many synchronous transfers can take place
+ i.e. how many reqs can occur before an ack is given.
+ The maximum value for this is 15, the upper bits can modify
+ REQ/ACK assertion and deassertion during synchronous transfers
+ If this is 0, the bus will only transfer asynchronously */
+#define SYNCOFFST 0
+/* for the curious, bits 7&6 control the deassertion delay in 1/2 cycles
+ of the 40Mhz clock. If FASTCLK is 1, specifying 01 (1/2) will
+ cause the deassertion to be early by 1/2 clock. Bits 5&4 control
+ the assertion delay, also in 1/2 clocks (FASTCLK is ignored here). */
+
+/*----------------------------------------------------------------*/
+
+struct qlogicfas408_priv {
+ int qbase; /* Port */
+ int qinitid; /* initiator ID */
+ int qabort; /* Flag to cause an abort */
+ int qlirq; /* IRQ being used */
+ int int_type; /* type of irq, 2 for ISA board, 0 for PCMCIA */
+ char qinfo[80]; /* description */
+ struct scsi_cmnd *qlcmd; /* current command being processed */
+ struct Scsi_Host *shost; /* pointer back to host */
+ struct qlogicfas408_priv *next; /* next private struct */
+};
+
+/* The qlogic card uses two register maps - These macros select which one */
+#define REG0 ( outb( inb( qbase + 0xd ) & 0x7f , qbase + 0xd ), outb( 4 , qbase + 0xd ))
+#define REG1 ( outb( inb( qbase + 0xd ) | 0x80 , qbase + 0xd ), outb( 0xb4 | int_type, qbase + 0xd ))
+
+/* following is watchdog timeout in microseconds */
+#define WATCHDOG 5000000
+
+/*----------------------------------------------------------------*/
+/* the following will set the monitor border color (useful to find
+ where something crashed or gets stuck at and as a simple profiler) */
+
+#define rtrc(i) {}
+
+#define get_priv_by_cmd(x) (struct qlogicfas408_priv *)&((x)->device->host->hostdata[0])
+#define get_priv_by_host(x) (struct qlogicfas408_priv *)&((x)->hostdata[0])
+
+irqreturn_t qlogicfas408_ihandl(int irq, void *dev_id);
+int qlogicfas408_queuecommand(struct Scsi_Host *h, struct scsi_cmnd * cmd);
+int qlogicfas408_biosparam(struct scsi_device * disk,
+ struct block_device *dev,
+ sector_t capacity, int ip[]);
+int qlogicfas408_abort(struct scsi_cmnd * cmd);
+int qlogicfas408_bus_reset(struct scsi_cmnd * cmd);
+const char *qlogicfas408_info(struct Scsi_Host *host);
+int qlogicfas408_get_chip_type(int qbase, int int_type);
+void qlogicfas408_setup(int qbase, int id, int int_type);
+int qlogicfas408_detect(int qbase, int int_type);
+void qlogicfas408_disable_ints(struct qlogicfas408_priv *priv);
+#endif /* __QLOGICFAS408_H */
+
diff --git a/drivers/scsi/qlogicpti.c b/drivers/scsi/qlogicpti.c
new file mode 100644
index 000000000..d8726245e
--- /dev/null
+++ b/drivers/scsi/qlogicpti.c
@@ -0,0 +1,1481 @@
+/* qlogicpti.c: Performance Technologies QlogicISP sbus card driver.
+ *
+ * Copyright (C) 1996, 2006, 2008 David S. Miller (davem@davemloft.net)
+ *
+ * A lot of this driver was directly stolen from Erik H. Moe's PCI
+ * Qlogic ISP driver. Mucho kudos to him for this code.
+ *
+ * An even bigger kudos to John Grana at Performance Technologies
+ * for providing me with the hardware to write this driver, you rule
+ * John you really do.
+ *
+ * May, 2, 1997: Added support for QLGC,isp --jj
+ */
+
+#include <linux/kernel.h>
+#include <linux/delay.h>
+#include <linux/types.h>
+#include <linux/string.h>
+#include <linux/gfp.h>
+#include <linux/blkdev.h>
+#include <linux/proc_fs.h>
+#include <linux/stat.h>
+#include <linux/init.h>
+#include <linux/spinlock.h>
+#include <linux/interrupt.h>
+#include <linux/module.h>
+#include <linux/jiffies.h>
+#include <linux/dma-mapping.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/firmware.h>
+
+#include <asm/byteorder.h>
+
+#include "qlogicpti.h"
+
+#include <asm/dma.h>
+#include <asm/ptrace.h>
+#include <asm/pgtable.h>
+#include <asm/oplib.h>
+#include <asm/io.h>
+#include <asm/irq.h>
+
+#include <scsi/scsi.h>
+#include <scsi/scsi_cmnd.h>
+#include <scsi/scsi_device.h>
+#include <scsi/scsi_eh.h>
+#include <scsi/scsi_tcq.h>
+#include <scsi/scsi_host.h>
+
+#define MAX_TARGETS 16
+#define MAX_LUNS 8 /* 32 for 1.31 F/W */
+
+#define DEFAULT_LOOP_COUNT 10000
+
+static struct qlogicpti *qptichain = NULL;
+static DEFINE_SPINLOCK(qptichain_lock);
+
+#define PACKB(a, b) (((a)<<4)|(b))
+
+static const u_char mbox_param[] = {
+ PACKB(1, 1), /* MBOX_NO_OP */
+ PACKB(5, 5), /* MBOX_LOAD_RAM */
+ PACKB(2, 0), /* MBOX_EXEC_FIRMWARE */
+ PACKB(5, 5), /* MBOX_DUMP_RAM */
+ PACKB(3, 3), /* MBOX_WRITE_RAM_WORD */
+ PACKB(2, 3), /* MBOX_READ_RAM_WORD */
+ PACKB(6, 6), /* MBOX_MAILBOX_REG_TEST */
+ PACKB(2, 3), /* MBOX_VERIFY_CHECKSUM */
+ PACKB(1, 3), /* MBOX_ABOUT_FIRMWARE */
+ PACKB(0, 0), /* 0x0009 */
+ PACKB(0, 0), /* 0x000a */
+ PACKB(0, 0), /* 0x000b */
+ PACKB(0, 0), /* 0x000c */
+ PACKB(0, 0), /* 0x000d */
+ PACKB(1, 2), /* MBOX_CHECK_FIRMWARE */
+ PACKB(0, 0), /* 0x000f */
+ PACKB(5, 5), /* MBOX_INIT_REQ_QUEUE */
+ PACKB(6, 6), /* MBOX_INIT_RES_QUEUE */
+ PACKB(4, 4), /* MBOX_EXECUTE_IOCB */
+ PACKB(2, 2), /* MBOX_WAKE_UP */
+ PACKB(1, 6), /* MBOX_STOP_FIRMWARE */
+ PACKB(4, 4), /* MBOX_ABORT */
+ PACKB(2, 2), /* MBOX_ABORT_DEVICE */
+ PACKB(3, 3), /* MBOX_ABORT_TARGET */
+ PACKB(2, 2), /* MBOX_BUS_RESET */
+ PACKB(2, 3), /* MBOX_STOP_QUEUE */
+ PACKB(2, 3), /* MBOX_START_QUEUE */
+ PACKB(2, 3), /* MBOX_SINGLE_STEP_QUEUE */
+ PACKB(2, 3), /* MBOX_ABORT_QUEUE */
+ PACKB(2, 4), /* MBOX_GET_DEV_QUEUE_STATUS */
+ PACKB(0, 0), /* 0x001e */
+ PACKB(1, 3), /* MBOX_GET_FIRMWARE_STATUS */
+ PACKB(1, 2), /* MBOX_GET_INIT_SCSI_ID */
+ PACKB(1, 2), /* MBOX_GET_SELECT_TIMEOUT */
+ PACKB(1, 3), /* MBOX_GET_RETRY_COUNT */
+ PACKB(1, 2), /* MBOX_GET_TAG_AGE_LIMIT */
+ PACKB(1, 2), /* MBOX_GET_CLOCK_RATE */
+ PACKB(1, 2), /* MBOX_GET_ACT_NEG_STATE */
+ PACKB(1, 2), /* MBOX_GET_ASYNC_DATA_SETUP_TIME */
+ PACKB(1, 3), /* MBOX_GET_SBUS_PARAMS */
+ PACKB(2, 4), /* MBOX_GET_TARGET_PARAMS */
+ PACKB(2, 4), /* MBOX_GET_DEV_QUEUE_PARAMS */
+ PACKB(0, 0), /* 0x002a */
+ PACKB(0, 0), /* 0x002b */
+ PACKB(0, 0), /* 0x002c */
+ PACKB(0, 0), /* 0x002d */
+ PACKB(0, 0), /* 0x002e */
+ PACKB(0, 0), /* 0x002f */
+ PACKB(2, 2), /* MBOX_SET_INIT_SCSI_ID */
+ PACKB(2, 2), /* MBOX_SET_SELECT_TIMEOUT */
+ PACKB(3, 3), /* MBOX_SET_RETRY_COUNT */
+ PACKB(2, 2), /* MBOX_SET_TAG_AGE_LIMIT */
+ PACKB(2, 2), /* MBOX_SET_CLOCK_RATE */
+ PACKB(2, 2), /* MBOX_SET_ACTIVE_NEG_STATE */
+ PACKB(2, 2), /* MBOX_SET_ASYNC_DATA_SETUP_TIME */
+ PACKB(3, 3), /* MBOX_SET_SBUS_CONTROL_PARAMS */
+ PACKB(4, 4), /* MBOX_SET_TARGET_PARAMS */
+ PACKB(4, 4), /* MBOX_SET_DEV_QUEUE_PARAMS */
+ PACKB(0, 0), /* 0x003a */
+ PACKB(0, 0), /* 0x003b */
+ PACKB(0, 0), /* 0x003c */
+ PACKB(0, 0), /* 0x003d */
+ PACKB(0, 0), /* 0x003e */
+ PACKB(0, 0), /* 0x003f */
+ PACKB(0, 0), /* 0x0040 */
+ PACKB(0, 0), /* 0x0041 */
+ PACKB(0, 0) /* 0x0042 */
+};
+
+#define MAX_MBOX_COMMAND ARRAY_SIZE(mbox_param)
+
+/* queue length's _must_ be power of two: */
+#define QUEUE_DEPTH(in, out, ql) ((in - out) & (ql))
+#define REQ_QUEUE_DEPTH(in, out) QUEUE_DEPTH(in, out, \
+ QLOGICPTI_REQ_QUEUE_LEN)
+#define RES_QUEUE_DEPTH(in, out) QUEUE_DEPTH(in, out, RES_QUEUE_LEN)
+
+static inline void qlogicpti_enable_irqs(struct qlogicpti *qpti)
+{
+ sbus_writew(SBUS_CTRL_ERIRQ | SBUS_CTRL_GENAB,
+ qpti->qregs + SBUS_CTRL);
+}
+
+static inline void qlogicpti_disable_irqs(struct qlogicpti *qpti)
+{
+ sbus_writew(0, qpti->qregs + SBUS_CTRL);
+}
+
+static inline void set_sbus_cfg1(struct qlogicpti *qpti)
+{
+ u16 val;
+ u8 bursts = qpti->bursts;
+
+#if 0 /* It appears that at least PTI cards do not support
+ * 64-byte bursts and that setting the B64 bit actually
+ * is a nop and the chip ends up using the smallest burst
+ * size. -DaveM
+ */
+ if (sbus_can_burst64() && (bursts & DMA_BURST64)) {
+ val = (SBUS_CFG1_BENAB | SBUS_CFG1_B64);
+ } else
+#endif
+ if (bursts & DMA_BURST32) {
+ val = (SBUS_CFG1_BENAB | SBUS_CFG1_B32);
+ } else if (bursts & DMA_BURST16) {
+ val = (SBUS_CFG1_BENAB | SBUS_CFG1_B16);
+ } else if (bursts & DMA_BURST8) {
+ val = (SBUS_CFG1_BENAB | SBUS_CFG1_B8);
+ } else {
+ val = 0; /* No sbus bursts for you... */
+ }
+ sbus_writew(val, qpti->qregs + SBUS_CFG1);
+}
+
+static int qlogicpti_mbox_command(struct qlogicpti *qpti, u_short param[], int force)
+{
+ int loop_count;
+ u16 tmp;
+
+ if (mbox_param[param[0]] == 0)
+ return 1;
+
+ /* Set SBUS semaphore. */
+ tmp = sbus_readw(qpti->qregs + SBUS_SEMAPHORE);
+ tmp |= SBUS_SEMAPHORE_LCK;
+ sbus_writew(tmp, qpti->qregs + SBUS_SEMAPHORE);
+
+ /* Wait for host IRQ bit to clear. */
+ loop_count = DEFAULT_LOOP_COUNT;
+ while (--loop_count && (sbus_readw(qpti->qregs + HCCTRL) & HCCTRL_HIRQ)) {
+ barrier();
+ cpu_relax();
+ }
+ if (!loop_count)
+ printk(KERN_EMERG "qlogicpti%d: mbox_command loop timeout #1\n",
+ qpti->qpti_id);
+
+ /* Write mailbox command registers. */
+ switch (mbox_param[param[0]] >> 4) {
+ case 6: sbus_writew(param[5], qpti->qregs + MBOX5);
+ case 5: sbus_writew(param[4], qpti->qregs + MBOX4);
+ case 4: sbus_writew(param[3], qpti->qregs + MBOX3);
+ case 3: sbus_writew(param[2], qpti->qregs + MBOX2);
+ case 2: sbus_writew(param[1], qpti->qregs + MBOX1);
+ case 1: sbus_writew(param[0], qpti->qregs + MBOX0);
+ }
+
+ /* Clear RISC interrupt. */
+ tmp = sbus_readw(qpti->qregs + HCCTRL);
+ tmp |= HCCTRL_CRIRQ;
+ sbus_writew(tmp, qpti->qregs + HCCTRL);
+
+ /* Clear SBUS semaphore. */
+ sbus_writew(0, qpti->qregs + SBUS_SEMAPHORE);
+
+ /* Set HOST interrupt. */
+ tmp = sbus_readw(qpti->qregs + HCCTRL);
+ tmp |= HCCTRL_SHIRQ;
+ sbus_writew(tmp, qpti->qregs + HCCTRL);
+
+ /* Wait for HOST interrupt clears. */
+ loop_count = DEFAULT_LOOP_COUNT;
+ while (--loop_count &&
+ (sbus_readw(qpti->qregs + HCCTRL) & HCCTRL_CRIRQ))
+ udelay(20);
+ if (!loop_count)
+ printk(KERN_EMERG "qlogicpti%d: mbox_command[%04x] loop timeout #2\n",
+ qpti->qpti_id, param[0]);
+
+ /* Wait for SBUS semaphore to get set. */
+ loop_count = DEFAULT_LOOP_COUNT;
+ while (--loop_count &&
+ !(sbus_readw(qpti->qregs + SBUS_SEMAPHORE) & SBUS_SEMAPHORE_LCK)) {
+ udelay(20);
+
+ /* Workaround for some buggy chips. */
+ if (sbus_readw(qpti->qregs + MBOX0) & 0x4000)
+ break;
+ }
+ if (!loop_count)
+ printk(KERN_EMERG "qlogicpti%d: mbox_command[%04x] loop timeout #3\n",
+ qpti->qpti_id, param[0]);
+
+ /* Wait for MBOX busy condition to go away. */
+ loop_count = DEFAULT_LOOP_COUNT;
+ while (--loop_count && (sbus_readw(qpti->qregs + MBOX0) == 0x04))
+ udelay(20);
+ if (!loop_count)
+ printk(KERN_EMERG "qlogicpti%d: mbox_command[%04x] loop timeout #4\n",
+ qpti->qpti_id, param[0]);
+
+ /* Read back output parameters. */
+ switch (mbox_param[param[0]] & 0xf) {
+ case 6: param[5] = sbus_readw(qpti->qregs + MBOX5);
+ case 5: param[4] = sbus_readw(qpti->qregs + MBOX4);
+ case 4: param[3] = sbus_readw(qpti->qregs + MBOX3);
+ case 3: param[2] = sbus_readw(qpti->qregs + MBOX2);
+ case 2: param[1] = sbus_readw(qpti->qregs + MBOX1);
+ case 1: param[0] = sbus_readw(qpti->qregs + MBOX0);
+ }
+
+ /* Clear RISC interrupt. */
+ tmp = sbus_readw(qpti->qregs + HCCTRL);
+ tmp |= HCCTRL_CRIRQ;
+ sbus_writew(tmp, qpti->qregs + HCCTRL);
+
+ /* Release SBUS semaphore. */
+ tmp = sbus_readw(qpti->qregs + SBUS_SEMAPHORE);
+ tmp &= ~(SBUS_SEMAPHORE_LCK);
+ sbus_writew(tmp, qpti->qregs + SBUS_SEMAPHORE);
+
+ /* We're done. */
+ return 0;
+}
+
+static inline void qlogicpti_set_hostdev_defaults(struct qlogicpti *qpti)
+{
+ int i;
+
+ qpti->host_param.initiator_scsi_id = qpti->scsi_id;
+ qpti->host_param.bus_reset_delay = 3;
+ qpti->host_param.retry_count = 0;
+ qpti->host_param.retry_delay = 5;
+ qpti->host_param.async_data_setup_time = 3;
+ qpti->host_param.req_ack_active_negation = 1;
+ qpti->host_param.data_line_active_negation = 1;
+ qpti->host_param.data_dma_burst_enable = 1;
+ qpti->host_param.command_dma_burst_enable = 1;
+ qpti->host_param.tag_aging = 8;
+ qpti->host_param.selection_timeout = 250;
+ qpti->host_param.max_queue_depth = 256;
+
+ for(i = 0; i < MAX_TARGETS; i++) {
+ /*
+ * disconnect, parity, arq, reneg on reset, and, oddly enough
+ * tags...the midlayer's notion of tagged support has to match
+ * our device settings, and since we base whether we enable a
+ * tag on a per-cmnd basis upon what the midlayer sez, we
+ * actually enable the capability here.
+ */
+ qpti->dev_param[i].device_flags = 0xcd;
+ qpti->dev_param[i].execution_throttle = 16;
+ if (qpti->ultra) {
+ qpti->dev_param[i].synchronous_period = 12;
+ qpti->dev_param[i].synchronous_offset = 8;
+ } else {
+ qpti->dev_param[i].synchronous_period = 25;
+ qpti->dev_param[i].synchronous_offset = 12;
+ }
+ qpti->dev_param[i].device_enable = 1;
+ }
+}
+
+static int qlogicpti_reset_hardware(struct Scsi_Host *host)
+{
+ struct qlogicpti *qpti = (struct qlogicpti *) host->hostdata;
+ u_short param[6];
+ unsigned short risc_code_addr;
+ int loop_count, i;
+ unsigned long flags;
+
+ risc_code_addr = 0x1000; /* all load addresses are at 0x1000 */
+
+ spin_lock_irqsave(host->host_lock, flags);
+
+ sbus_writew(HCCTRL_PAUSE, qpti->qregs + HCCTRL);
+
+ /* Only reset the scsi bus if it is not free. */
+ if (sbus_readw(qpti->qregs + CPU_PCTRL) & CPU_PCTRL_BSY) {
+ sbus_writew(CPU_ORIDE_RMOD, qpti->qregs + CPU_ORIDE);
+ sbus_writew(CPU_CMD_BRESET, qpti->qregs + CPU_CMD);
+ udelay(400);
+ }
+
+ sbus_writew(SBUS_CTRL_RESET, qpti->qregs + SBUS_CTRL);
+ sbus_writew((DMA_CTRL_CCLEAR | DMA_CTRL_CIRQ), qpti->qregs + CMD_DMA_CTRL);
+ sbus_writew((DMA_CTRL_CCLEAR | DMA_CTRL_CIRQ), qpti->qregs + DATA_DMA_CTRL);
+
+ loop_count = DEFAULT_LOOP_COUNT;
+ while (--loop_count && ((sbus_readw(qpti->qregs + MBOX0) & 0xff) == 0x04))
+ udelay(20);
+ if (!loop_count)
+ printk(KERN_EMERG "qlogicpti%d: reset_hardware loop timeout\n",
+ qpti->qpti_id);
+
+ sbus_writew(HCCTRL_PAUSE, qpti->qregs + HCCTRL);
+ set_sbus_cfg1(qpti);
+ qlogicpti_enable_irqs(qpti);
+
+ if (sbus_readw(qpti->qregs + RISC_PSR) & RISC_PSR_ULTRA) {
+ qpti->ultra = 1;
+ sbus_writew((RISC_MTREG_P0ULTRA | RISC_MTREG_P1ULTRA),
+ qpti->qregs + RISC_MTREG);
+ } else {
+ qpti->ultra = 0;
+ sbus_writew((RISC_MTREG_P0DFLT | RISC_MTREG_P1DFLT),
+ qpti->qregs + RISC_MTREG);
+ }
+
+ /* reset adapter and per-device default values. */
+ /* do it after finding out whether we're ultra mode capable */
+ qlogicpti_set_hostdev_defaults(qpti);
+
+ /* Release the RISC processor. */
+ sbus_writew(HCCTRL_REL, qpti->qregs + HCCTRL);
+
+ /* Get RISC to start executing the firmware code. */
+ param[0] = MBOX_EXEC_FIRMWARE;
+ param[1] = risc_code_addr;
+ if (qlogicpti_mbox_command(qpti, param, 1)) {
+ printk(KERN_EMERG "qlogicpti%d: Cannot execute ISP firmware.\n",
+ qpti->qpti_id);
+ spin_unlock_irqrestore(host->host_lock, flags);
+ return 1;
+ }
+
+ /* Set initiator scsi ID. */
+ param[0] = MBOX_SET_INIT_SCSI_ID;
+ param[1] = qpti->host_param.initiator_scsi_id;
+ if (qlogicpti_mbox_command(qpti, param, 1) ||
+ (param[0] != MBOX_COMMAND_COMPLETE)) {
+ printk(KERN_EMERG "qlogicpti%d: Cannot set initiator SCSI ID.\n",
+ qpti->qpti_id);
+ spin_unlock_irqrestore(host->host_lock, flags);
+ return 1;
+ }
+
+ /* Initialize state of the queues, both hw and sw. */
+ qpti->req_in_ptr = qpti->res_out_ptr = 0;
+
+ param[0] = MBOX_INIT_RES_QUEUE;
+ param[1] = RES_QUEUE_LEN + 1;
+ param[2] = (u_short) (qpti->res_dvma >> 16);
+ param[3] = (u_short) (qpti->res_dvma & 0xffff);
+ param[4] = param[5] = 0;
+ if (qlogicpti_mbox_command(qpti, param, 1)) {
+ printk(KERN_EMERG "qlogicpti%d: Cannot init response queue.\n",
+ qpti->qpti_id);
+ spin_unlock_irqrestore(host->host_lock, flags);
+ return 1;
+ }
+
+ param[0] = MBOX_INIT_REQ_QUEUE;
+ param[1] = QLOGICPTI_REQ_QUEUE_LEN + 1;
+ param[2] = (u_short) (qpti->req_dvma >> 16);
+ param[3] = (u_short) (qpti->req_dvma & 0xffff);
+ param[4] = param[5] = 0;
+ if (qlogicpti_mbox_command(qpti, param, 1)) {
+ printk(KERN_EMERG "qlogicpti%d: Cannot init request queue.\n",
+ qpti->qpti_id);
+ spin_unlock_irqrestore(host->host_lock, flags);
+ return 1;
+ }
+
+ param[0] = MBOX_SET_RETRY_COUNT;
+ param[1] = qpti->host_param.retry_count;
+ param[2] = qpti->host_param.retry_delay;
+ qlogicpti_mbox_command(qpti, param, 0);
+
+ param[0] = MBOX_SET_TAG_AGE_LIMIT;
+ param[1] = qpti->host_param.tag_aging;
+ qlogicpti_mbox_command(qpti, param, 0);
+
+ for (i = 0; i < MAX_TARGETS; i++) {
+ param[0] = MBOX_GET_DEV_QUEUE_PARAMS;
+ param[1] = (i << 8);
+ qlogicpti_mbox_command(qpti, param, 0);
+ }
+
+ param[0] = MBOX_GET_FIRMWARE_STATUS;
+ qlogicpti_mbox_command(qpti, param, 0);
+
+ param[0] = MBOX_SET_SELECT_TIMEOUT;
+ param[1] = qpti->host_param.selection_timeout;
+ qlogicpti_mbox_command(qpti, param, 0);
+
+ for (i = 0; i < MAX_TARGETS; i++) {
+ param[0] = MBOX_SET_TARGET_PARAMS;
+ param[1] = (i << 8);
+ param[2] = (qpti->dev_param[i].device_flags << 8);
+ /*
+ * Since we're now loading 1.31 f/w, force narrow/async.
+ */
+ param[2] |= 0xc0;
+ param[3] = 0; /* no offset, we do not have sync mode yet */
+ qlogicpti_mbox_command(qpti, param, 0);
+ }
+
+ /*
+ * Always (sigh) do an initial bus reset (kicks f/w).
+ */
+ param[0] = MBOX_BUS_RESET;
+ param[1] = qpti->host_param.bus_reset_delay;
+ qlogicpti_mbox_command(qpti, param, 0);
+ qpti->send_marker = 1;
+
+ spin_unlock_irqrestore(host->host_lock, flags);
+ return 0;
+}
+
+#define PTI_RESET_LIMIT 400
+
+static int qlogicpti_load_firmware(struct qlogicpti *qpti)
+{
+ const struct firmware *fw;
+ const char fwname[] = "/*(DEBLOBBED)*/";
+ const __le16 *fw_data;
+ struct Scsi_Host *host = qpti->qhost;
+ unsigned short csum = 0;
+ unsigned short param[6];
+ unsigned short risc_code_addr, risc_code_length;
+ int err;
+ unsigned long flags;
+ int i, timeout;
+
+ err = reject_firmware(&fw, fwname, &qpti->op->dev);
+ if (err) {
+ printk(KERN_ERR "Failed to load image \"%s\" err %d\n",
+ fwname, err);
+ return err;
+ }
+ if (fw->size % 2) {
+ printk(KERN_ERR "Bogus length %zu in image \"%s\"\n",
+ fw->size, fwname);
+ err = -EINVAL;
+ goto outfirm;
+ }
+ fw_data = (const __le16 *)&fw->data[0];
+ risc_code_addr = 0x1000; /* all f/w modules load at 0x1000 */
+ risc_code_length = fw->size / 2;
+
+ spin_lock_irqsave(host->host_lock, flags);
+
+ /* Verify the checksum twice, one before loading it, and once
+ * afterwards via the mailbox commands.
+ */
+ for (i = 0; i < risc_code_length; i++)
+ csum += __le16_to_cpu(fw_data[i]);
+ if (csum) {
+ printk(KERN_EMERG "qlogicpti%d: Aieee, firmware checksum failed!",
+ qpti->qpti_id);
+ err = 1;
+ goto out;
+ }
+ sbus_writew(SBUS_CTRL_RESET, qpti->qregs + SBUS_CTRL);
+ sbus_writew((DMA_CTRL_CCLEAR | DMA_CTRL_CIRQ), qpti->qregs + CMD_DMA_CTRL);
+ sbus_writew((DMA_CTRL_CCLEAR | DMA_CTRL_CIRQ), qpti->qregs + DATA_DMA_CTRL);
+ timeout = PTI_RESET_LIMIT;
+ while (--timeout && (sbus_readw(qpti->qregs + SBUS_CTRL) & SBUS_CTRL_RESET))
+ udelay(20);
+ if (!timeout) {
+ printk(KERN_EMERG "qlogicpti%d: Cannot reset the ISP.", qpti->qpti_id);
+ err = 1;
+ goto out;
+ }
+
+ sbus_writew(HCCTRL_RESET, qpti->qregs + HCCTRL);
+ mdelay(1);
+
+ sbus_writew((SBUS_CTRL_GENAB | SBUS_CTRL_ERIRQ), qpti->qregs + SBUS_CTRL);
+ set_sbus_cfg1(qpti);
+ sbus_writew(0, qpti->qregs + SBUS_SEMAPHORE);
+
+ if (sbus_readw(qpti->qregs + RISC_PSR) & RISC_PSR_ULTRA) {
+ qpti->ultra = 1;
+ sbus_writew((RISC_MTREG_P0ULTRA | RISC_MTREG_P1ULTRA),
+ qpti->qregs + RISC_MTREG);
+ } else {
+ qpti->ultra = 0;
+ sbus_writew((RISC_MTREG_P0DFLT | RISC_MTREG_P1DFLT),
+ qpti->qregs + RISC_MTREG);
+ }
+
+ sbus_writew(HCCTRL_REL, qpti->qregs + HCCTRL);
+
+ /* Pin lines are only stable while RISC is paused. */
+ sbus_writew(HCCTRL_PAUSE, qpti->qregs + HCCTRL);
+ if (sbus_readw(qpti->qregs + CPU_PDIFF) & CPU_PDIFF_MODE)
+ qpti->differential = 1;
+ else
+ qpti->differential = 0;
+ sbus_writew(HCCTRL_REL, qpti->qregs + HCCTRL);
+
+ /* This shouldn't be necessary- we've reset things so we should be
+ running from the ROM now.. */
+
+ param[0] = MBOX_STOP_FIRMWARE;
+ param[1] = param[2] = param[3] = param[4] = param[5] = 0;
+ if (qlogicpti_mbox_command(qpti, param, 1)) {
+ printk(KERN_EMERG "qlogicpti%d: Cannot stop firmware for reload.\n",
+ qpti->qpti_id);
+ err = 1;
+ goto out;
+ }
+
+ /* Load it up.. */
+ for (i = 0; i < risc_code_length; i++) {
+ param[0] = MBOX_WRITE_RAM_WORD;
+ param[1] = risc_code_addr + i;
+ param[2] = __le16_to_cpu(fw_data[i]);
+ if (qlogicpti_mbox_command(qpti, param, 1) ||
+ param[0] != MBOX_COMMAND_COMPLETE) {
+ printk("qlogicpti%d: Firmware dload failed, I'm bolixed!\n",
+ qpti->qpti_id);
+ err = 1;
+ goto out;
+ }
+ }
+
+ /* Reset the ISP again. */
+ sbus_writew(HCCTRL_RESET, qpti->qregs + HCCTRL);
+ mdelay(1);
+
+ qlogicpti_enable_irqs(qpti);
+ sbus_writew(0, qpti->qregs + SBUS_SEMAPHORE);
+ sbus_writew(HCCTRL_REL, qpti->qregs + HCCTRL);
+
+ /* Ask ISP to verify the checksum of the new code. */
+ param[0] = MBOX_VERIFY_CHECKSUM;
+ param[1] = risc_code_addr;
+ if (qlogicpti_mbox_command(qpti, param, 1) ||
+ (param[0] != MBOX_COMMAND_COMPLETE)) {
+ printk(KERN_EMERG "qlogicpti%d: New firmware csum failure!\n",
+ qpti->qpti_id);
+ err = 1;
+ goto out;
+ }
+
+ /* Start using newly downloaded firmware. */
+ param[0] = MBOX_EXEC_FIRMWARE;
+ param[1] = risc_code_addr;
+ qlogicpti_mbox_command(qpti, param, 1);
+
+ param[0] = MBOX_ABOUT_FIRMWARE;
+ if (qlogicpti_mbox_command(qpti, param, 1) ||
+ (param[0] != MBOX_COMMAND_COMPLETE)) {
+ printk(KERN_EMERG "qlogicpti%d: AboutFirmware cmd fails.\n",
+ qpti->qpti_id);
+ err = 1;
+ goto out;
+ }
+
+ /* Snag the major and minor revisions from the result. */
+ qpti->fware_majrev = param[1];
+ qpti->fware_minrev = param[2];
+ qpti->fware_micrev = param[3];
+
+ /* Set the clock rate */
+ param[0] = MBOX_SET_CLOCK_RATE;
+ param[1] = qpti->clock;
+ if (qlogicpti_mbox_command(qpti, param, 1) ||
+ (param[0] != MBOX_COMMAND_COMPLETE)) {
+ printk(KERN_EMERG "qlogicpti%d: could not set clock rate.\n",
+ qpti->qpti_id);
+ err = 1;
+ goto out;
+ }
+
+ if (qpti->is_pti != 0) {
+ /* Load scsi initiator ID and interrupt level into sbus static ram. */
+ param[0] = MBOX_WRITE_RAM_WORD;
+ param[1] = 0xff80;
+ param[2] = (unsigned short) qpti->scsi_id;
+ qlogicpti_mbox_command(qpti, param, 1);
+
+ param[0] = MBOX_WRITE_RAM_WORD;
+ param[1] = 0xff00;
+ param[2] = (unsigned short) 3;
+ qlogicpti_mbox_command(qpti, param, 1);
+ }
+
+out:
+ spin_unlock_irqrestore(host->host_lock, flags);
+outfirm:
+ release_firmware(fw);
+ return err;
+}
+
+static int qlogicpti_verify_tmon(struct qlogicpti *qpti)
+{
+ int curstat = sbus_readb(qpti->sreg);
+
+ curstat &= 0xf0;
+ if (!(curstat & SREG_FUSE) && (qpti->swsreg & SREG_FUSE))
+ printk("qlogicpti%d: Fuse returned to normal state.\n", qpti->qpti_id);
+ if (!(curstat & SREG_TPOWER) && (qpti->swsreg & SREG_TPOWER))
+ printk("qlogicpti%d: termpwr back to normal state.\n", qpti->qpti_id);
+ if (curstat != qpti->swsreg) {
+ int error = 0;
+ if (curstat & SREG_FUSE) {
+ error++;
+ printk("qlogicpti%d: Fuse is open!\n", qpti->qpti_id);
+ }
+ if (curstat & SREG_TPOWER) {
+ error++;
+ printk("qlogicpti%d: termpwr failure\n", qpti->qpti_id);
+ }
+ if (qpti->differential &&
+ (curstat & SREG_DSENSE) != SREG_DSENSE) {
+ error++;
+ printk("qlogicpti%d: You have a single ended device on a "
+ "differential bus! Please fix!\n", qpti->qpti_id);
+ }
+ qpti->swsreg = curstat;
+ return error;
+ }
+ return 0;
+}
+
+static irqreturn_t qpti_intr(int irq, void *dev_id);
+
+static void qpti_chain_add(struct qlogicpti *qpti)
+{
+ spin_lock_irq(&qptichain_lock);
+ if (qptichain != NULL) {
+ struct qlogicpti *qlink = qptichain;
+
+ while(qlink->next)
+ qlink = qlink->next;
+ qlink->next = qpti;
+ } else {
+ qptichain = qpti;
+ }
+ qpti->next = NULL;
+ spin_unlock_irq(&qptichain_lock);
+}
+
+static void qpti_chain_del(struct qlogicpti *qpti)
+{
+ spin_lock_irq(&qptichain_lock);
+ if (qptichain == qpti) {
+ qptichain = qpti->next;
+ } else {
+ struct qlogicpti *qlink = qptichain;
+ while(qlink->next != qpti)
+ qlink = qlink->next;
+ qlink->next = qpti->next;
+ }
+ qpti->next = NULL;
+ spin_unlock_irq(&qptichain_lock);
+}
+
+static int qpti_map_regs(struct qlogicpti *qpti)
+{
+ struct platform_device *op = qpti->op;
+
+ qpti->qregs = of_ioremap(&op->resource[0], 0,
+ resource_size(&op->resource[0]),
+ "PTI Qlogic/ISP");
+ if (!qpti->qregs) {
+ printk("PTI: Qlogic/ISP registers are unmappable\n");
+ return -1;
+ }
+ if (qpti->is_pti) {
+ qpti->sreg = of_ioremap(&op->resource[0], (16 * 4096),
+ sizeof(unsigned char),
+ "PTI Qlogic/ISP statreg");
+ if (!qpti->sreg) {
+ printk("PTI: Qlogic/ISP status register is unmappable\n");
+ return -1;
+ }
+ }
+ return 0;
+}
+
+static int qpti_register_irq(struct qlogicpti *qpti)
+{
+ struct platform_device *op = qpti->op;
+
+ qpti->qhost->irq = qpti->irq = op->archdata.irqs[0];
+
+ /* We used to try various overly-clever things to
+ * reduce the interrupt processing overhead on
+ * sun4c/sun4m when multiple PTI's shared the
+ * same IRQ. It was too complex and messy to
+ * sanely maintain.
+ */
+ if (request_irq(qpti->irq, qpti_intr,
+ IRQF_SHARED, "QlogicPTI", qpti))
+ goto fail;
+
+ printk("qlogicpti%d: IRQ %d ", qpti->qpti_id, qpti->irq);
+
+ return 0;
+
+fail:
+ printk("qlogicpti%d: Cannot acquire irq line\n", qpti->qpti_id);
+ return -1;
+}
+
+static void qpti_get_scsi_id(struct qlogicpti *qpti)
+{
+ struct platform_device *op = qpti->op;
+ struct device_node *dp;
+
+ dp = op->dev.of_node;
+
+ qpti->scsi_id = of_getintprop_default(dp, "initiator-id", -1);
+ if (qpti->scsi_id == -1)
+ qpti->scsi_id = of_getintprop_default(dp, "scsi-initiator-id",
+ -1);
+ if (qpti->scsi_id == -1)
+ qpti->scsi_id =
+ of_getintprop_default(dp->parent,
+ "scsi-initiator-id", 7);
+ qpti->qhost->this_id = qpti->scsi_id;
+ qpti->qhost->max_sectors = 64;
+
+ printk("SCSI ID %d ", qpti->scsi_id);
+}
+
+static void qpti_get_bursts(struct qlogicpti *qpti)
+{
+ struct platform_device *op = qpti->op;
+ u8 bursts, bmask;
+
+ bursts = of_getintprop_default(op->dev.of_node, "burst-sizes", 0xff);
+ bmask = of_getintprop_default(op->dev.of_node->parent, "burst-sizes", 0xff);
+ if (bmask != 0xff)
+ bursts &= bmask;
+ if (bursts == 0xff ||
+ (bursts & DMA_BURST16) == 0 ||
+ (bursts & DMA_BURST32) == 0)
+ bursts = (DMA_BURST32 - 1);
+
+ qpti->bursts = bursts;
+}
+
+static void qpti_get_clock(struct qlogicpti *qpti)
+{
+ unsigned int cfreq;
+
+ /* Check for what the clock input to this card is.
+ * Default to 40Mhz.
+ */
+ cfreq = prom_getintdefault(qpti->prom_node,"clock-frequency",40000000);
+ qpti->clock = (cfreq + 500000)/1000000;
+ if (qpti->clock == 0) /* bullshit */
+ qpti->clock = 40;
+}
+
+/* The request and response queues must each be aligned
+ * on a page boundary.
+ */
+static int qpti_map_queues(struct qlogicpti *qpti)
+{
+ struct platform_device *op = qpti->op;
+
+#define QSIZE(entries) (((entries) + 1) * QUEUE_ENTRY_LEN)
+ qpti->res_cpu = dma_alloc_coherent(&op->dev,
+ QSIZE(RES_QUEUE_LEN),
+ &qpti->res_dvma, GFP_ATOMIC);
+ if (qpti->res_cpu == NULL ||
+ qpti->res_dvma == 0) {
+ printk("QPTI: Cannot map response queue.\n");
+ return -1;
+ }
+
+ qpti->req_cpu = dma_alloc_coherent(&op->dev,
+ QSIZE(QLOGICPTI_REQ_QUEUE_LEN),
+ &qpti->req_dvma, GFP_ATOMIC);
+ if (qpti->req_cpu == NULL ||
+ qpti->req_dvma == 0) {
+ dma_free_coherent(&op->dev, QSIZE(RES_QUEUE_LEN),
+ qpti->res_cpu, qpti->res_dvma);
+ printk("QPTI: Cannot map request queue.\n");
+ return -1;
+ }
+ memset(qpti->res_cpu, 0, QSIZE(RES_QUEUE_LEN));
+ memset(qpti->req_cpu, 0, QSIZE(QLOGICPTI_REQ_QUEUE_LEN));
+ return 0;
+}
+
+const char *qlogicpti_info(struct Scsi_Host *host)
+{
+ static char buf[80];
+ struct qlogicpti *qpti = (struct qlogicpti *) host->hostdata;
+
+ sprintf(buf, "PTI Qlogic,ISP SBUS SCSI irq %d regs at %p",
+ qpti->qhost->irq, qpti->qregs);
+ return buf;
+}
+
+/* I am a certified frobtronicist. */
+static inline void marker_frob(struct Command_Entry *cmd)
+{
+ struct Marker_Entry *marker = (struct Marker_Entry *) cmd;
+
+ memset(marker, 0, sizeof(struct Marker_Entry));
+ marker->hdr.entry_cnt = 1;
+ marker->hdr.entry_type = ENTRY_MARKER;
+ marker->modifier = SYNC_ALL;
+ marker->rsvd = 0;
+}
+
+static inline void cmd_frob(struct Command_Entry *cmd, struct scsi_cmnd *Cmnd,
+ struct qlogicpti *qpti)
+{
+ memset(cmd, 0, sizeof(struct Command_Entry));
+ cmd->hdr.entry_cnt = 1;
+ cmd->hdr.entry_type = ENTRY_COMMAND;
+ cmd->target_id = Cmnd->device->id;
+ cmd->target_lun = Cmnd->device->lun;
+ cmd->cdb_length = Cmnd->cmd_len;
+ cmd->control_flags = 0;
+ if (Cmnd->device->tagged_supported) {
+ if (qpti->cmd_count[Cmnd->device->id] == 0)
+ qpti->tag_ages[Cmnd->device->id] = jiffies;
+ if (time_after(jiffies, qpti->tag_ages[Cmnd->device->id] + (5*HZ))) {
+ cmd->control_flags = CFLAG_ORDERED_TAG;
+ qpti->tag_ages[Cmnd->device->id] = jiffies;
+ } else
+ cmd->control_flags = CFLAG_SIMPLE_TAG;
+ }
+ if ((Cmnd->cmnd[0] == WRITE_6) ||
+ (Cmnd->cmnd[0] == WRITE_10) ||
+ (Cmnd->cmnd[0] == WRITE_12))
+ cmd->control_flags |= CFLAG_WRITE;
+ else
+ cmd->control_flags |= CFLAG_READ;
+ cmd->time_out = Cmnd->request->timeout/HZ;
+ memcpy(cmd->cdb, Cmnd->cmnd, Cmnd->cmd_len);
+}
+
+/* Do it to it baby. */
+static inline int load_cmd(struct scsi_cmnd *Cmnd, struct Command_Entry *cmd,
+ struct qlogicpti *qpti, u_int in_ptr, u_int out_ptr)
+{
+ struct dataseg *ds;
+ struct scatterlist *sg, *s;
+ int i, n;
+
+ if (scsi_bufflen(Cmnd)) {
+ int sg_count;
+
+ sg = scsi_sglist(Cmnd);
+ sg_count = dma_map_sg(&qpti->op->dev, sg,
+ scsi_sg_count(Cmnd),
+ Cmnd->sc_data_direction);
+
+ ds = cmd->dataseg;
+ cmd->segment_cnt = sg_count;
+
+ /* Fill in first four sg entries: */
+ n = sg_count;
+ if (n > 4)
+ n = 4;
+ for_each_sg(sg, s, n, i) {
+ ds[i].d_base = sg_dma_address(s);
+ ds[i].d_count = sg_dma_len(s);
+ }
+ sg_count -= 4;
+ sg = s;
+ while (sg_count > 0) {
+ struct Continuation_Entry *cont;
+
+ ++cmd->hdr.entry_cnt;
+ cont = (struct Continuation_Entry *) &qpti->req_cpu[in_ptr];
+ in_ptr = NEXT_REQ_PTR(in_ptr);
+ if (in_ptr == out_ptr)
+ return -1;
+
+ cont->hdr.entry_type = ENTRY_CONTINUATION;
+ cont->hdr.entry_cnt = 0;
+ cont->hdr.sys_def_1 = 0;
+ cont->hdr.flags = 0;
+ cont->reserved = 0;
+ ds = cont->dataseg;
+ n = sg_count;
+ if (n > 7)
+ n = 7;
+ for_each_sg(sg, s, n, i) {
+ ds[i].d_base = sg_dma_address(s);
+ ds[i].d_count = sg_dma_len(s);
+ }
+ sg_count -= n;
+ sg = s;
+ }
+ } else {
+ cmd->dataseg[0].d_base = 0;
+ cmd->dataseg[0].d_count = 0;
+ cmd->segment_cnt = 1; /* Shouldn't this be 0? */
+ }
+
+ /* Committed, record Scsi_Cmd so we can find it later. */
+ cmd->handle = in_ptr;
+ qpti->cmd_slots[in_ptr] = Cmnd;
+
+ qpti->cmd_count[Cmnd->device->id]++;
+ sbus_writew(in_ptr, qpti->qregs + MBOX4);
+ qpti->req_in_ptr = in_ptr;
+
+ return in_ptr;
+}
+
+static inline void update_can_queue(struct Scsi_Host *host, u_int in_ptr, u_int out_ptr)
+{
+ /* Temporary workaround until bug is found and fixed (one bug has been found
+ already, but fixing it makes things even worse) -jj */
+ int num_free = QLOGICPTI_REQ_QUEUE_LEN - REQ_QUEUE_DEPTH(in_ptr, out_ptr) - 64;
+ host->can_queue = atomic_read(&host->host_busy) + num_free;
+ host->sg_tablesize = QLOGICPTI_MAX_SG(num_free);
+}
+
+static int qlogicpti_slave_configure(struct scsi_device *sdev)
+{
+ struct qlogicpti *qpti = shost_priv(sdev->host);
+ int tgt = sdev->id;
+ u_short param[6];
+
+ /* tags handled in midlayer */
+ /* enable sync mode? */
+ if (sdev->sdtr) {
+ qpti->dev_param[tgt].device_flags |= 0x10;
+ } else {
+ qpti->dev_param[tgt].synchronous_offset = 0;
+ qpti->dev_param[tgt].synchronous_period = 0;
+ }
+ /* are we wide capable? */
+ if (sdev->wdtr)
+ qpti->dev_param[tgt].device_flags |= 0x20;
+
+ param[0] = MBOX_SET_TARGET_PARAMS;
+ param[1] = (tgt << 8);
+ param[2] = (qpti->dev_param[tgt].device_flags << 8);
+ if (qpti->dev_param[tgt].device_flags & 0x10) {
+ param[3] = (qpti->dev_param[tgt].synchronous_offset << 8) |
+ qpti->dev_param[tgt].synchronous_period;
+ } else {
+ param[3] = 0;
+ }
+ qlogicpti_mbox_command(qpti, param, 0);
+ return 0;
+}
+
+/*
+ * The middle SCSI layer ensures that queuecommand never gets invoked
+ * concurrently with itself or the interrupt handler (though the
+ * interrupt handler may call this routine as part of
+ * request-completion handling).
+ *
+ * "This code must fly." -davem
+ */
+static int qlogicpti_queuecommand_lck(struct scsi_cmnd *Cmnd, void (*done)(struct scsi_cmnd *))
+{
+ struct Scsi_Host *host = Cmnd->device->host;
+ struct qlogicpti *qpti = (struct qlogicpti *) host->hostdata;
+ struct Command_Entry *cmd;
+ u_int out_ptr;
+ int in_ptr;
+
+ Cmnd->scsi_done = done;
+
+ in_ptr = qpti->req_in_ptr;
+ cmd = (struct Command_Entry *) &qpti->req_cpu[in_ptr];
+ out_ptr = sbus_readw(qpti->qregs + MBOX4);
+ in_ptr = NEXT_REQ_PTR(in_ptr);
+ if (in_ptr == out_ptr)
+ goto toss_command;
+
+ if (qpti->send_marker) {
+ marker_frob(cmd);
+ qpti->send_marker = 0;
+ if (NEXT_REQ_PTR(in_ptr) == out_ptr) {
+ sbus_writew(in_ptr, qpti->qregs + MBOX4);
+ qpti->req_in_ptr = in_ptr;
+ goto toss_command;
+ }
+ cmd = (struct Command_Entry *) &qpti->req_cpu[in_ptr];
+ in_ptr = NEXT_REQ_PTR(in_ptr);
+ }
+ cmd_frob(cmd, Cmnd, qpti);
+ if ((in_ptr = load_cmd(Cmnd, cmd, qpti, in_ptr, out_ptr)) == -1)
+ goto toss_command;
+
+ update_can_queue(host, in_ptr, out_ptr);
+
+ return 0;
+
+toss_command:
+ printk(KERN_EMERG "qlogicpti%d: request queue overflow\n",
+ qpti->qpti_id);
+
+ /* Unfortunately, unless you use the new EH code, which
+ * we don't, the midlayer will ignore the return value,
+ * which is insane. We pick up the pieces like this.
+ */
+ Cmnd->result = DID_BUS_BUSY;
+ done(Cmnd);
+ return 1;
+}
+
+static DEF_SCSI_QCMD(qlogicpti_queuecommand)
+
+static int qlogicpti_return_status(struct Status_Entry *sts, int id)
+{
+ int host_status = DID_ERROR;
+
+ switch (sts->completion_status) {
+ case CS_COMPLETE:
+ host_status = DID_OK;
+ break;
+ case CS_INCOMPLETE:
+ if (!(sts->state_flags & SF_GOT_BUS))
+ host_status = DID_NO_CONNECT;
+ else if (!(sts->state_flags & SF_GOT_TARGET))
+ host_status = DID_BAD_TARGET;
+ else if (!(sts->state_flags & SF_SENT_CDB))
+ host_status = DID_ERROR;
+ else if (!(sts->state_flags & SF_TRANSFERRED_DATA))
+ host_status = DID_ERROR;
+ else if (!(sts->state_flags & SF_GOT_STATUS))
+ host_status = DID_ERROR;
+ else if (!(sts->state_flags & SF_GOT_SENSE))
+ host_status = DID_ERROR;
+ break;
+ case CS_DMA_ERROR:
+ case CS_TRANSPORT_ERROR:
+ host_status = DID_ERROR;
+ break;
+ case CS_RESET_OCCURRED:
+ case CS_BUS_RESET:
+ host_status = DID_RESET;
+ break;
+ case CS_ABORTED:
+ host_status = DID_ABORT;
+ break;
+ case CS_TIMEOUT:
+ host_status = DID_TIME_OUT;
+ break;
+ case CS_DATA_OVERRUN:
+ case CS_COMMAND_OVERRUN:
+ case CS_STATUS_OVERRUN:
+ case CS_BAD_MESSAGE:
+ case CS_NO_MESSAGE_OUT:
+ case CS_EXT_ID_FAILED:
+ case CS_IDE_MSG_FAILED:
+ case CS_ABORT_MSG_FAILED:
+ case CS_NOP_MSG_FAILED:
+ case CS_PARITY_ERROR_MSG_FAILED:
+ case CS_DEVICE_RESET_MSG_FAILED:
+ case CS_ID_MSG_FAILED:
+ case CS_UNEXP_BUS_FREE:
+ host_status = DID_ERROR;
+ break;
+ case CS_DATA_UNDERRUN:
+ host_status = DID_OK;
+ break;
+ default:
+ printk(KERN_EMERG "qlogicpti%d: unknown completion status 0x%04x\n",
+ id, sts->completion_status);
+ host_status = DID_ERROR;
+ break;
+ }
+
+ return (sts->scsi_status & STATUS_MASK) | (host_status << 16);
+}
+
+static struct scsi_cmnd *qlogicpti_intr_handler(struct qlogicpti *qpti)
+{
+ struct scsi_cmnd *Cmnd, *done_queue = NULL;
+ struct Status_Entry *sts;
+ u_int in_ptr, out_ptr;
+
+ if (!(sbus_readw(qpti->qregs + SBUS_STAT) & SBUS_STAT_RINT))
+ return NULL;
+
+ in_ptr = sbus_readw(qpti->qregs + MBOX5);
+ sbus_writew(HCCTRL_CRIRQ, qpti->qregs + HCCTRL);
+ if (sbus_readw(qpti->qregs + SBUS_SEMAPHORE) & SBUS_SEMAPHORE_LCK) {
+ switch (sbus_readw(qpti->qregs + MBOX0)) {
+ case ASYNC_SCSI_BUS_RESET:
+ case EXECUTION_TIMEOUT_RESET:
+ qpti->send_marker = 1;
+ break;
+ case INVALID_COMMAND:
+ case HOST_INTERFACE_ERROR:
+ case COMMAND_ERROR:
+ case COMMAND_PARAM_ERROR:
+ break;
+ };
+ sbus_writew(0, qpti->qregs + SBUS_SEMAPHORE);
+ }
+
+ /* This looks like a network driver! */
+ out_ptr = qpti->res_out_ptr;
+ while (out_ptr != in_ptr) {
+ u_int cmd_slot;
+
+ sts = (struct Status_Entry *) &qpti->res_cpu[out_ptr];
+ out_ptr = NEXT_RES_PTR(out_ptr);
+
+ /* We store an index in the handle, not the pointer in
+ * some form. This avoids problems due to the fact
+ * that the handle provided is only 32-bits. -DaveM
+ */
+ cmd_slot = sts->handle;
+ Cmnd = qpti->cmd_slots[cmd_slot];
+ qpti->cmd_slots[cmd_slot] = NULL;
+
+ if (sts->completion_status == CS_RESET_OCCURRED ||
+ sts->completion_status == CS_ABORTED ||
+ (sts->status_flags & STF_BUS_RESET))
+ qpti->send_marker = 1;
+
+ if (sts->state_flags & SF_GOT_SENSE)
+ memcpy(Cmnd->sense_buffer, sts->req_sense_data,
+ SCSI_SENSE_BUFFERSIZE);
+
+ if (sts->hdr.entry_type == ENTRY_STATUS)
+ Cmnd->result =
+ qlogicpti_return_status(sts, qpti->qpti_id);
+ else
+ Cmnd->result = DID_ERROR << 16;
+
+ if (scsi_bufflen(Cmnd))
+ dma_unmap_sg(&qpti->op->dev,
+ scsi_sglist(Cmnd), scsi_sg_count(Cmnd),
+ Cmnd->sc_data_direction);
+
+ qpti->cmd_count[Cmnd->device->id]--;
+ sbus_writew(out_ptr, qpti->qregs + MBOX5);
+ Cmnd->host_scribble = (unsigned char *) done_queue;
+ done_queue = Cmnd;
+ }
+ qpti->res_out_ptr = out_ptr;
+
+ return done_queue;
+}
+
+static irqreturn_t qpti_intr(int irq, void *dev_id)
+{
+ struct qlogicpti *qpti = dev_id;
+ unsigned long flags;
+ struct scsi_cmnd *dq;
+
+ spin_lock_irqsave(qpti->qhost->host_lock, flags);
+ dq = qlogicpti_intr_handler(qpti);
+
+ if (dq != NULL) {
+ do {
+ struct scsi_cmnd *next;
+
+ next = (struct scsi_cmnd *) dq->host_scribble;
+ dq->scsi_done(dq);
+ dq = next;
+ } while (dq != NULL);
+ }
+ spin_unlock_irqrestore(qpti->qhost->host_lock, flags);
+
+ return IRQ_HANDLED;
+}
+
+static int qlogicpti_abort(struct scsi_cmnd *Cmnd)
+{
+ u_short param[6];
+ struct Scsi_Host *host = Cmnd->device->host;
+ struct qlogicpti *qpti = (struct qlogicpti *) host->hostdata;
+ int return_status = SUCCESS;
+ u32 cmd_cookie;
+ int i;
+
+ printk(KERN_WARNING "qlogicpti%d: Aborting cmd for tgt[%d] lun[%d]\n",
+ qpti->qpti_id, (int)Cmnd->device->id, (int)Cmnd->device->lun);
+
+ qlogicpti_disable_irqs(qpti);
+
+ /* Find the 32-bit cookie we gave to the firmware for
+ * this command.
+ */
+ for (i = 0; i < QLOGICPTI_REQ_QUEUE_LEN + 1; i++)
+ if (qpti->cmd_slots[i] == Cmnd)
+ break;
+ cmd_cookie = i;
+
+ param[0] = MBOX_ABORT;
+ param[1] = (((u_short) Cmnd->device->id) << 8) | Cmnd->device->lun;
+ param[2] = cmd_cookie >> 16;
+ param[3] = cmd_cookie & 0xffff;
+ if (qlogicpti_mbox_command(qpti, param, 0) ||
+ (param[0] != MBOX_COMMAND_COMPLETE)) {
+ printk(KERN_EMERG "qlogicpti%d: scsi abort failure: %x\n",
+ qpti->qpti_id, param[0]);
+ return_status = FAILED;
+ }
+
+ qlogicpti_enable_irqs(qpti);
+
+ return return_status;
+}
+
+static int qlogicpti_reset(struct scsi_cmnd *Cmnd)
+{
+ u_short param[6];
+ struct Scsi_Host *host = Cmnd->device->host;
+ struct qlogicpti *qpti = (struct qlogicpti *) host->hostdata;
+ int return_status = SUCCESS;
+
+ printk(KERN_WARNING "qlogicpti%d: Resetting SCSI bus!\n",
+ qpti->qpti_id);
+
+ qlogicpti_disable_irqs(qpti);
+
+ param[0] = MBOX_BUS_RESET;
+ param[1] = qpti->host_param.bus_reset_delay;
+ if (qlogicpti_mbox_command(qpti, param, 0) ||
+ (param[0] != MBOX_COMMAND_COMPLETE)) {
+ printk(KERN_EMERG "qlogicisp%d: scsi bus reset failure: %x\n",
+ qpti->qpti_id, param[0]);
+ return_status = FAILED;
+ }
+
+ qlogicpti_enable_irqs(qpti);
+
+ return return_status;
+}
+
+static struct scsi_host_template qpti_template = {
+ .module = THIS_MODULE,
+ .name = "qlogicpti",
+ .info = qlogicpti_info,
+ .queuecommand = qlogicpti_queuecommand,
+ .slave_configure = qlogicpti_slave_configure,
+ .eh_abort_handler = qlogicpti_abort,
+ .eh_bus_reset_handler = qlogicpti_reset,
+ .can_queue = QLOGICPTI_REQ_QUEUE_LEN,
+ .this_id = 7,
+ .sg_tablesize = QLOGICPTI_MAX_SG(QLOGICPTI_REQ_QUEUE_LEN),
+ .cmd_per_lun = 1,
+ .use_clustering = ENABLE_CLUSTERING,
+};
+
+static const struct of_device_id qpti_match[];
+static int qpti_sbus_probe(struct platform_device *op)
+{
+ struct device_node *dp = op->dev.of_node;
+ struct Scsi_Host *host;
+ struct qlogicpti *qpti;
+ static int nqptis;
+ const char *fcode;
+
+ /* Sometimes Antares cards come up not completely
+ * setup, and we get a report of a zero IRQ.
+ */
+ if (op->archdata.irqs[0] == 0)
+ return -ENODEV;
+
+ host = scsi_host_alloc(&qpti_template, sizeof(struct qlogicpti));
+ if (!host)
+ return -ENOMEM;
+
+ qpti = shost_priv(host);
+
+ host->max_id = MAX_TARGETS;
+ qpti->qhost = host;
+ qpti->op = op;
+ qpti->qpti_id = nqptis;
+ strcpy(qpti->prom_name, op->dev.of_node->name);
+ qpti->is_pti = strcmp(qpti->prom_name, "QLGC,isp");
+
+ if (qpti_map_regs(qpti) < 0)
+ goto fail_unlink;
+
+ if (qpti_register_irq(qpti) < 0)
+ goto fail_unmap_regs;
+
+ qpti_get_scsi_id(qpti);
+ qpti_get_bursts(qpti);
+ qpti_get_clock(qpti);
+
+ /* Clear out scsi_cmnd array. */
+ memset(qpti->cmd_slots, 0, sizeof(qpti->cmd_slots));
+
+ if (qpti_map_queues(qpti) < 0)
+ goto fail_free_irq;
+
+ /* Load the firmware. */
+ if (qlogicpti_load_firmware(qpti))
+ goto fail_unmap_queues;
+ if (qpti->is_pti) {
+ /* Check the PTI status reg. */
+ if (qlogicpti_verify_tmon(qpti))
+ goto fail_unmap_queues;
+ }
+
+ /* Reset the ISP and init res/req queues. */
+ if (qlogicpti_reset_hardware(host))
+ goto fail_unmap_queues;
+
+ printk("(Firmware v%d.%d.%d)", qpti->fware_majrev,
+ qpti->fware_minrev, qpti->fware_micrev);
+
+ fcode = of_get_property(dp, "isp-fcode", NULL);
+ if (fcode && fcode[0])
+ printk("(FCode %s)", fcode);
+ if (of_find_property(dp, "differential", NULL) != NULL)
+ qpti->differential = 1;
+
+ printk("\nqlogicpti%d: [%s Wide, using %s interface]\n",
+ qpti->qpti_id,
+ (qpti->ultra ? "Ultra" : "Fast"),
+ (qpti->differential ? "differential" : "single ended"));
+
+ if (scsi_add_host(host, &op->dev)) {
+ printk("qlogicpti%d: Failed scsi_add_host\n", qpti->qpti_id);
+ goto fail_unmap_queues;
+ }
+
+ dev_set_drvdata(&op->dev, qpti);
+
+ qpti_chain_add(qpti);
+
+ scsi_scan_host(host);
+ nqptis++;
+
+ return 0;
+
+fail_unmap_queues:
+#define QSIZE(entries) (((entries) + 1) * QUEUE_ENTRY_LEN)
+ dma_free_coherent(&op->dev,
+ QSIZE(RES_QUEUE_LEN),
+ qpti->res_cpu, qpti->res_dvma);
+ dma_free_coherent(&op->dev,
+ QSIZE(QLOGICPTI_REQ_QUEUE_LEN),
+ qpti->req_cpu, qpti->req_dvma);
+#undef QSIZE
+
+fail_unmap_regs:
+ of_iounmap(&op->resource[0], qpti->qregs,
+ resource_size(&op->resource[0]));
+ if (qpti->is_pti)
+ of_iounmap(&op->resource[0], qpti->sreg,
+ sizeof(unsigned char));
+
+fail_free_irq:
+ free_irq(qpti->irq, qpti);
+
+fail_unlink:
+ scsi_host_put(host);
+
+ return -ENODEV;
+}
+
+static int qpti_sbus_remove(struct platform_device *op)
+{
+ struct qlogicpti *qpti = dev_get_drvdata(&op->dev);
+
+ qpti_chain_del(qpti);
+
+ scsi_remove_host(qpti->qhost);
+
+ /* Shut up the card. */
+ sbus_writew(0, qpti->qregs + SBUS_CTRL);
+
+ /* Free IRQ handler and unmap Qlogic,ISP and PTI status regs. */
+ free_irq(qpti->irq, qpti);
+
+#define QSIZE(entries) (((entries) + 1) * QUEUE_ENTRY_LEN)
+ dma_free_coherent(&op->dev,
+ QSIZE(RES_QUEUE_LEN),
+ qpti->res_cpu, qpti->res_dvma);
+ dma_free_coherent(&op->dev,
+ QSIZE(QLOGICPTI_REQ_QUEUE_LEN),
+ qpti->req_cpu, qpti->req_dvma);
+#undef QSIZE
+
+ of_iounmap(&op->resource[0], qpti->qregs,
+ resource_size(&op->resource[0]));
+ if (qpti->is_pti)
+ of_iounmap(&op->resource[0], qpti->sreg, sizeof(unsigned char));
+
+ scsi_host_put(qpti->qhost);
+
+ return 0;
+}
+
+static const struct of_device_id qpti_match[] = {
+ {
+ .name = "ptisp",
+ },
+ {
+ .name = "PTI,ptisp",
+ },
+ {
+ .name = "QLGC,isp",
+ },
+ {
+ .name = "SUNW,isp",
+ },
+ {},
+};
+MODULE_DEVICE_TABLE(of, qpti_match);
+
+static struct platform_driver qpti_sbus_driver = {
+ .driver = {
+ .name = "qpti",
+ .of_match_table = qpti_match,
+ },
+ .probe = qpti_sbus_probe,
+ .remove = qpti_sbus_remove,
+};
+
+static int __init qpti_init(void)
+{
+ return platform_driver_register(&qpti_sbus_driver);
+}
+
+static void __exit qpti_exit(void)
+{
+ platform_driver_unregister(&qpti_sbus_driver);
+}
+
+MODULE_DESCRIPTION("QlogicISP SBUS driver");
+MODULE_AUTHOR("David S. Miller (davem@davemloft.net)");
+MODULE_LICENSE("GPL");
+MODULE_VERSION("2.1");
+/*(DEBLOBBED)*/
+
+module_init(qpti_init);
+module_exit(qpti_exit);
diff --git a/drivers/scsi/qlogicpti.h b/drivers/scsi/qlogicpti.h
new file mode 100644
index 000000000..4377e87ee
--- /dev/null
+++ b/drivers/scsi/qlogicpti.h
@@ -0,0 +1,507 @@
+/* qlogicpti.h: Performance Technologies QlogicISP sbus card defines.
+ *
+ * Copyright (C) 1996 David S. Miller (davem@caipfs.rutgers.edu)
+ */
+
+#ifndef _QLOGICPTI_H
+#define _QLOGICPTI_H
+
+/* Qlogic/SBUS controller registers. */
+#define SBUS_CFG1 0x006UL
+#define SBUS_CTRL 0x008UL
+#define SBUS_STAT 0x00aUL
+#define SBUS_SEMAPHORE 0x00cUL
+#define CMD_DMA_CTRL 0x022UL
+#define DATA_DMA_CTRL 0x042UL
+#define MBOX0 0x080UL
+#define MBOX1 0x082UL
+#define MBOX2 0x084UL
+#define MBOX3 0x086UL
+#define MBOX4 0x088UL
+#define MBOX5 0x08aUL
+#define CPU_CMD 0x214UL
+#define CPU_ORIDE 0x224UL
+#define CPU_PCTRL 0x272UL
+#define CPU_PDIFF 0x276UL
+#define RISC_PSR 0x420UL
+#define RISC_MTREG 0x42EUL
+#define HCCTRL 0x440UL
+
+/* SCSI parameters for this driver. */
+#define MAX_TARGETS 16
+#define MAX_LUNS 8
+
+/* With the qlogic interface, every queue slot can hold a SCSI
+ * command with up to 4 scatter/gather entries. If we need more
+ * than 4 entries, continuation entries can be used that hold
+ * another 7 entries each. Unlike for other drivers, this means
+ * that the maximum number of scatter/gather entries we can
+ * support at any given time is a function of the number of queue
+ * slots available. That is, host->can_queue and host->sg_tablesize
+ * are dynamic and _not_ independent. This all works fine because
+ * requests are queued serially and the scatter/gather limit is
+ * determined for each queue request anew.
+ */
+#define QLOGICPTI_REQ_QUEUE_LEN 255 /* must be power of two - 1 */
+#define QLOGICPTI_MAX_SG(ql) (4 + (((ql) > 0) ? 7*((ql) - 1) : 0))
+
+/* mailbox command complete status codes */
+#define MBOX_COMMAND_COMPLETE 0x4000
+#define INVALID_COMMAND 0x4001
+#define HOST_INTERFACE_ERROR 0x4002
+#define TEST_FAILED 0x4003
+#define COMMAND_ERROR 0x4005
+#define COMMAND_PARAM_ERROR 0x4006
+
+/* async event status codes */
+#define ASYNC_SCSI_BUS_RESET 0x8001
+#define SYSTEM_ERROR 0x8002
+#define REQUEST_TRANSFER_ERROR 0x8003
+#define RESPONSE_TRANSFER_ERROR 0x8004
+#define REQUEST_QUEUE_WAKEUP 0x8005
+#define EXECUTION_TIMEOUT_RESET 0x8006
+
+/* Am I fucking pedantic or what? */
+struct Entry_header {
+#ifdef __BIG_ENDIAN
+ u8 entry_cnt;
+ u8 entry_type;
+ u8 flags;
+ u8 sys_def_1;
+#else /* __LITTLE_ENDIAN */
+ u8 entry_type;
+ u8 entry_cnt;
+ u8 sys_def_1;
+ u8 flags;
+#endif
+};
+
+/* entry header type commands */
+#define ENTRY_COMMAND 1
+#define ENTRY_CONTINUATION 2
+#define ENTRY_STATUS 3
+#define ENTRY_MARKER 4
+#define ENTRY_EXTENDED_COMMAND 5
+
+/* entry header flag definitions */
+#define EFLAG_CONTINUATION 1
+#define EFLAG_BUSY 2
+#define EFLAG_BAD_HEADER 4
+#define EFLAG_BAD_PAYLOAD 8
+
+struct dataseg {
+ u32 d_base;
+ u32 d_count;
+};
+
+struct Command_Entry {
+ struct Entry_header hdr;
+ u32 handle;
+#ifdef __BIG_ENDIAN
+ u8 target_id;
+ u8 target_lun;
+#else /* __LITTLE_ENDIAN */
+ u8 target_lun;
+ u8 target_id;
+#endif
+ u16 cdb_length;
+ u16 control_flags;
+ u16 rsvd;
+ u16 time_out;
+ u16 segment_cnt;
+ u8 cdb[12];
+ struct dataseg dataseg[4];
+};
+
+/* command entry control flag definitions */
+#define CFLAG_NODISC 0x01
+#define CFLAG_HEAD_TAG 0x02
+#define CFLAG_ORDERED_TAG 0x04
+#define CFLAG_SIMPLE_TAG 0x08
+#define CFLAG_TAR_RTN 0x10
+#define CFLAG_READ 0x20
+#define CFLAG_WRITE 0x40
+
+struct Ext_Command_Entry {
+ struct Entry_header hdr;
+ u32 handle;
+#ifdef __BIG_ENDIAN
+ u8 target_id;
+ u8 target_lun;
+#else /* __LITTLE_ENDIAN */
+ u8 target_lun;
+ u8 target_id;
+#endif
+ u16 cdb_length;
+ u16 control_flags;
+ u16 rsvd;
+ u16 time_out;
+ u16 segment_cnt;
+ u8 cdb[44];
+};
+
+struct Continuation_Entry {
+ struct Entry_header hdr;
+ u32 reserved;
+ struct dataseg dataseg[7];
+};
+
+struct Marker_Entry {
+ struct Entry_header hdr;
+ u32 reserved;
+#ifdef __BIG_ENDIAN
+ u8 target_id;
+ u8 target_lun;
+#else /* __LITTLE_ENDIAN */
+ u8 target_lun;
+ u8 target_id;
+#endif
+#ifdef __BIG_ENDIAN
+ u8 rsvd;
+ u8 modifier;
+#else /* __LITTLE_ENDIAN */
+ u8 modifier;
+ u8 rsvd;
+#endif
+ u8 rsvds[52];
+};
+
+/* marker entry modifier definitions */
+#define SYNC_DEVICE 0
+#define SYNC_TARGET 1
+#define SYNC_ALL 2
+
+struct Status_Entry {
+ struct Entry_header hdr;
+ u32 handle;
+ u16 scsi_status;
+ u16 completion_status;
+ u16 state_flags;
+ u16 status_flags;
+ u16 time;
+ u16 req_sense_len;
+ u32 residual;
+ u8 rsvd[8];
+ u8 req_sense_data[32];
+};
+
+/* status entry completion status definitions */
+#define CS_COMPLETE 0x0000
+#define CS_INCOMPLETE 0x0001
+#define CS_DMA_ERROR 0x0002
+#define CS_TRANSPORT_ERROR 0x0003
+#define CS_RESET_OCCURRED 0x0004
+#define CS_ABORTED 0x0005
+#define CS_TIMEOUT 0x0006
+#define CS_DATA_OVERRUN 0x0007
+#define CS_COMMAND_OVERRUN 0x0008
+#define CS_STATUS_OVERRUN 0x0009
+#define CS_BAD_MESSAGE 0x000a
+#define CS_NO_MESSAGE_OUT 0x000b
+#define CS_EXT_ID_FAILED 0x000c
+#define CS_IDE_MSG_FAILED 0x000d
+#define CS_ABORT_MSG_FAILED 0x000e
+#define CS_REJECT_MSG_FAILED 0x000f
+#define CS_NOP_MSG_FAILED 0x0010
+#define CS_PARITY_ERROR_MSG_FAILED 0x0011
+#define CS_DEVICE_RESET_MSG_FAILED 0x0012
+#define CS_ID_MSG_FAILED 0x0013
+#define CS_UNEXP_BUS_FREE 0x0014
+#define CS_DATA_UNDERRUN 0x0015
+#define CS_BUS_RESET 0x001c
+
+/* status entry state flag definitions */
+#define SF_GOT_BUS 0x0100
+#define SF_GOT_TARGET 0x0200
+#define SF_SENT_CDB 0x0400
+#define SF_TRANSFERRED_DATA 0x0800
+#define SF_GOT_STATUS 0x1000
+#define SF_GOT_SENSE 0x2000
+
+/* status entry status flag definitions */
+#define STF_DISCONNECT 0x0001
+#define STF_SYNCHRONOUS 0x0002
+#define STF_PARITY_ERROR 0x0004
+#define STF_BUS_RESET 0x0008
+#define STF_DEVICE_RESET 0x0010
+#define STF_ABORTED 0x0020
+#define STF_TIMEOUT 0x0040
+#define STF_NEGOTIATION 0x0080
+
+/* mailbox commands */
+#define MBOX_NO_OP 0x0000
+#define MBOX_LOAD_RAM 0x0001
+#define MBOX_EXEC_FIRMWARE 0x0002
+#define MBOX_DUMP_RAM 0x0003
+#define MBOX_WRITE_RAM_WORD 0x0004
+#define MBOX_READ_RAM_WORD 0x0005
+#define MBOX_MAILBOX_REG_TEST 0x0006
+#define MBOX_VERIFY_CHECKSUM 0x0007
+#define MBOX_ABOUT_FIRMWARE 0x0008
+#define MBOX_CHECK_FIRMWARE 0x000e
+#define MBOX_INIT_REQ_QUEUE 0x0010
+#define MBOX_INIT_RES_QUEUE 0x0011
+#define MBOX_EXECUTE_IOCB 0x0012
+#define MBOX_WAKE_UP 0x0013
+#define MBOX_STOP_FIRMWARE 0x0014
+#define MBOX_ABORT 0x0015
+#define MBOX_ABORT_DEVICE 0x0016
+#define MBOX_ABORT_TARGET 0x0017
+#define MBOX_BUS_RESET 0x0018
+#define MBOX_STOP_QUEUE 0x0019
+#define MBOX_START_QUEUE 0x001a
+#define MBOX_SINGLE_STEP_QUEUE 0x001b
+#define MBOX_ABORT_QUEUE 0x001c
+#define MBOX_GET_DEV_QUEUE_STATUS 0x001d
+#define MBOX_GET_FIRMWARE_STATUS 0x001f
+#define MBOX_GET_INIT_SCSI_ID 0x0020
+#define MBOX_GET_SELECT_TIMEOUT 0x0021
+#define MBOX_GET_RETRY_COUNT 0x0022
+#define MBOX_GET_TAG_AGE_LIMIT 0x0023
+#define MBOX_GET_CLOCK_RATE 0x0024
+#define MBOX_GET_ACT_NEG_STATE 0x0025
+#define MBOX_GET_ASYNC_DATA_SETUP_TIME 0x0026
+#define MBOX_GET_SBUS_PARAMS 0x0027
+#define MBOX_GET_TARGET_PARAMS 0x0028
+#define MBOX_GET_DEV_QUEUE_PARAMS 0x0029
+#define MBOX_SET_INIT_SCSI_ID 0x0030
+#define MBOX_SET_SELECT_TIMEOUT 0x0031
+#define MBOX_SET_RETRY_COUNT 0x0032
+#define MBOX_SET_TAG_AGE_LIMIT 0x0033
+#define MBOX_SET_CLOCK_RATE 0x0034
+#define MBOX_SET_ACTIVE_NEG_STATE 0x0035
+#define MBOX_SET_ASYNC_DATA_SETUP_TIME 0x0036
+#define MBOX_SET_SBUS_CONTROL_PARAMS 0x0037
+#define MBOX_SET_TARGET_PARAMS 0x0038
+#define MBOX_SET_DEV_QUEUE_PARAMS 0x0039
+
+struct host_param {
+ u_short initiator_scsi_id;
+ u_short bus_reset_delay;
+ u_short retry_count;
+ u_short retry_delay;
+ u_short async_data_setup_time;
+ u_short req_ack_active_negation;
+ u_short data_line_active_negation;
+ u_short data_dma_burst_enable;
+ u_short command_dma_burst_enable;
+ u_short tag_aging;
+ u_short selection_timeout;
+ u_short max_queue_depth;
+};
+
+/*
+ * Device Flags:
+ *
+ * Bit Name
+ * ---------
+ * 7 Disconnect Privilege
+ * 6 Parity Checking
+ * 5 Wide Data Transfers
+ * 4 Synchronous Data Transfers
+ * 3 Tagged Queuing
+ * 2 Automatic Request Sense
+ * 1 Stop Queue on Check Condition
+ * 0 Renegotiate on Error
+ */
+
+struct dev_param {
+ u_short device_flags;
+ u_short execution_throttle;
+ u_short synchronous_period;
+ u_short synchronous_offset;
+ u_short device_enable;
+ u_short reserved; /* pad */
+};
+
+/*
+ * The result queue can be quite a bit smaller since continuation entries
+ * do not show up there:
+ */
+#define RES_QUEUE_LEN 255 /* Must be power of two - 1 */
+#define QUEUE_ENTRY_LEN 64
+
+#define NEXT_REQ_PTR(wheee) (((wheee) + 1) & QLOGICPTI_REQ_QUEUE_LEN)
+#define NEXT_RES_PTR(wheee) (((wheee) + 1) & RES_QUEUE_LEN)
+#define PREV_REQ_PTR(wheee) (((wheee) - 1) & QLOGICPTI_REQ_QUEUE_LEN)
+#define PREV_RES_PTR(wheee) (((wheee) - 1) & RES_QUEUE_LEN)
+
+struct pti_queue_entry {
+ char __opaque[QUEUE_ENTRY_LEN];
+};
+
+struct scsi_cmnd;
+
+/* Software state for the driver. */
+struct qlogicpti {
+ /* These are the hot elements in the cache, so they come first. */
+ void __iomem *qregs; /* Adapter registers */
+ struct pti_queue_entry *res_cpu; /* Ptr to RESPONSE bufs (CPU) */
+ struct pti_queue_entry *req_cpu; /* Ptr to REQUEST bufs (CPU) */
+
+ u_int req_in_ptr; /* index of next request slot */
+ u_int res_out_ptr; /* index of next result slot */
+ long send_marker; /* must we send a marker? */
+ struct platform_device *op;
+ unsigned long __pad;
+
+ int cmd_count[MAX_TARGETS];
+ unsigned long tag_ages[MAX_TARGETS];
+
+ /* The cmd->handler is only 32-bits, so that things work even on monster
+ * Ex000 sparc64 machines with >4GB of ram we just keep track of the
+ * scsi command pointers here. This is essentially what Matt Jacob does. -DaveM
+ */
+ struct scsi_cmnd *cmd_slots[QLOGICPTI_REQ_QUEUE_LEN + 1];
+
+ /* The rest of the elements are unimportant for performance. */
+ struct qlogicpti *next;
+ __u32 res_dvma; /* Ptr to RESPONSE bufs (DVMA)*/
+ __u32 req_dvma; /* Ptr to REQUEST bufs (DVMA) */
+ u_char fware_majrev, fware_minrev, fware_micrev;
+ struct Scsi_Host *qhost;
+ int qpti_id;
+ int scsi_id;
+ int prom_node;
+ char prom_name[64];
+ int irq;
+ char differential, ultra, clock;
+ unsigned char bursts;
+ struct host_param host_param;
+ struct dev_param dev_param[MAX_TARGETS];
+
+ void __iomem *sreg;
+#define SREG_TPOWER 0x80 /* State of termpwr */
+#define SREG_FUSE 0x40 /* State of on board fuse */
+#define SREG_PDISAB 0x20 /* Disable state for power on */
+#define SREG_DSENSE 0x10 /* Sense for differential */
+#define SREG_IMASK 0x0c /* Interrupt level */
+#define SREG_SPMASK 0x03 /* Mask for switch pack */
+ unsigned char swsreg;
+ unsigned int
+ gotirq : 1, /* this instance got an irq */
+ is_pti : 1; /* Non-zero if this is a PTI board. */
+};
+
+/* How to twiddle them bits... */
+
+/* SBUS config register one. */
+#define SBUS_CFG1_EPAR 0x0100 /* Enable parity checking */
+#define SBUS_CFG1_FMASK 0x00f0 /* Forth code cycle mask */
+#define SBUS_CFG1_BENAB 0x0004 /* Burst dvma enable */
+#define SBUS_CFG1_B64 0x0003 /* Enable 64byte bursts */
+#define SBUS_CFG1_B32 0x0002 /* Enable 32byte bursts */
+#define SBUS_CFG1_B16 0x0001 /* Enable 16byte bursts */
+#define SBUS_CFG1_B8 0x0008 /* Enable 8byte bursts */
+
+/* SBUS control register */
+#define SBUS_CTRL_EDIRQ 0x0020 /* Enable Data DVMA Interrupts */
+#define SBUS_CTRL_ECIRQ 0x0010 /* Enable Command DVMA Interrupts */
+#define SBUS_CTRL_ESIRQ 0x0008 /* Enable SCSI Processor Interrupts */
+#define SBUS_CTRL_ERIRQ 0x0004 /* Enable RISC Processor Interrupts */
+#define SBUS_CTRL_GENAB 0x0002 /* Global Interrupt Enable */
+#define SBUS_CTRL_RESET 0x0001 /* Soft Reset */
+
+/* SBUS status register */
+#define SBUS_STAT_DINT 0x0020 /* Data DVMA IRQ pending */
+#define SBUS_STAT_CINT 0x0010 /* Command DVMA IRQ pending */
+#define SBUS_STAT_SINT 0x0008 /* SCSI Processor IRQ pending */
+#define SBUS_STAT_RINT 0x0004 /* RISC Processor IRQ pending */
+#define SBUS_STAT_GINT 0x0002 /* Global IRQ pending */
+
+/* SBUS semaphore register */
+#define SBUS_SEMAPHORE_STAT 0x0002 /* Semaphore status bit */
+#define SBUS_SEMAPHORE_LCK 0x0001 /* Semaphore lock bit */
+
+/* DVMA control register */
+#define DMA_CTRL_CSUSPEND 0x0010 /* DMA channel suspend */
+#define DMA_CTRL_CCLEAR 0x0008 /* DMA channel clear and reset */
+#define DMA_CTRL_FCLEAR 0x0004 /* DMA fifo clear */
+#define DMA_CTRL_CIRQ 0x0002 /* DMA irq clear */
+#define DMA_CTRL_DMASTART 0x0001 /* DMA transfer start */
+
+/* SCSI processor override register */
+#define CPU_ORIDE_ETRIG 0x8000 /* External trigger enable */
+#define CPU_ORIDE_STEP 0x4000 /* Single step mode enable */
+#define CPU_ORIDE_BKPT 0x2000 /* Breakpoint reg enable */
+#define CPU_ORIDE_PWRITE 0x1000 /* SCSI pin write enable */
+#define CPU_ORIDE_OFORCE 0x0800 /* Force outputs on */
+#define CPU_ORIDE_LBACK 0x0400 /* SCSI loopback enable */
+#define CPU_ORIDE_PTEST 0x0200 /* Parity test enable */
+#define CPU_ORIDE_TENAB 0x0100 /* SCSI pins tristate enable */
+#define CPU_ORIDE_TPINS 0x0080 /* SCSI pins enable */
+#define CPU_ORIDE_FRESET 0x0008 /* FIFO reset */
+#define CPU_ORIDE_CTERM 0x0004 /* Command terminate */
+#define CPU_ORIDE_RREG 0x0002 /* Reset SCSI processor regs */
+#define CPU_ORIDE_RMOD 0x0001 /* Reset SCSI processor module */
+
+/* SCSI processor commands */
+#define CPU_CMD_BRESET 0x300b /* Reset SCSI bus */
+
+/* SCSI processor pin control register */
+#define CPU_PCTRL_PVALID 0x8000 /* Phase bits are valid */
+#define CPU_PCTRL_PHI 0x0400 /* Parity bit high */
+#define CPU_PCTRL_PLO 0x0200 /* Parity bit low */
+#define CPU_PCTRL_REQ 0x0100 /* REQ bus signal */
+#define CPU_PCTRL_ACK 0x0080 /* ACK bus signal */
+#define CPU_PCTRL_RST 0x0040 /* RST bus signal */
+#define CPU_PCTRL_BSY 0x0020 /* BSY bus signal */
+#define CPU_PCTRL_SEL 0x0010 /* SEL bus signal */
+#define CPU_PCTRL_ATN 0x0008 /* ATN bus signal */
+#define CPU_PCTRL_MSG 0x0004 /* MSG bus signal */
+#define CPU_PCTRL_CD 0x0002 /* CD bus signal */
+#define CPU_PCTRL_IO 0x0001 /* IO bus signal */
+
+/* SCSI processor differential pins register */
+#define CPU_PDIFF_SENSE 0x0200 /* Differential sense */
+#define CPU_PDIFF_MODE 0x0100 /* Differential mode */
+#define CPU_PDIFF_OENAB 0x0080 /* Outputs enable */
+#define CPU_PDIFF_PMASK 0x007c /* Differential control pins */
+#define CPU_PDIFF_TGT 0x0002 /* Target mode enable */
+#define CPU_PDIFF_INIT 0x0001 /* Initiator mode enable */
+
+/* RISC processor status register */
+#define RISC_PSR_FTRUE 0x8000 /* Force true */
+#define RISC_PSR_LCD 0x4000 /* Loop counter shows done status */
+#define RISC_PSR_RIRQ 0x2000 /* RISC irq status */
+#define RISC_PSR_TOFLOW 0x1000 /* Timer overflow (rollover) */
+#define RISC_PSR_AOFLOW 0x0800 /* Arithmetic overflow */
+#define RISC_PSR_AMSB 0x0400 /* Arithmetic big endian */
+#define RISC_PSR_ACARRY 0x0200 /* Arithmetic carry */
+#define RISC_PSR_AZERO 0x0100 /* Arithmetic zero */
+#define RISC_PSR_ULTRA 0x0020 /* Ultra mode */
+#define RISC_PSR_DIRQ 0x0010 /* DVMA interrupt */
+#define RISC_PSR_SIRQ 0x0008 /* SCSI processor interrupt */
+#define RISC_PSR_HIRQ 0x0004 /* Host interrupt */
+#define RISC_PSR_IPEND 0x0002 /* Interrupt pending */
+#define RISC_PSR_FFALSE 0x0001 /* Force false */
+
+/* RISC processor memory timing register */
+#define RISC_MTREG_P1DFLT 0x1200 /* Default read/write timing, pg1 */
+#define RISC_MTREG_P0DFLT 0x0012 /* Default read/write timing, pg0 */
+#define RISC_MTREG_P1ULTRA 0x2300 /* Ultra-mode rw timing, pg1 */
+#define RISC_MTREG_P0ULTRA 0x0023 /* Ultra-mode rw timing, pg0 */
+
+/* Host command/ctrl register */
+#define HCCTRL_NOP 0x0000 /* CMD: No operation */
+#define HCCTRL_RESET 0x1000 /* CMD: Reset RISC cpu */
+#define HCCTRL_PAUSE 0x2000 /* CMD: Pause RISC cpu */
+#define HCCTRL_REL 0x3000 /* CMD: Release paused RISC cpu */
+#define HCCTRL_STEP 0x4000 /* CMD: Single step RISC cpu */
+#define HCCTRL_SHIRQ 0x5000 /* CMD: Set host irq */
+#define HCCTRL_CHIRQ 0x6000 /* CMD: Clear host irq */
+#define HCCTRL_CRIRQ 0x7000 /* CMD: Clear RISC cpu irq */
+#define HCCTRL_BKPT 0x8000 /* CMD: Breakpoint enables change */
+#define HCCTRL_TMODE 0xf000 /* CMD: Enable test mode */
+#define HCCTRL_HIRQ 0x0080 /* Host IRQ pending */
+#define HCCTRL_RRIP 0x0040 /* RISC cpu reset in happening now */
+#define HCCTRL_RPAUSED 0x0020 /* RISC cpu is paused now */
+#define HCCTRL_EBENAB 0x0010 /* External breakpoint enable */
+#define HCCTRL_B1ENAB 0x0008 /* Breakpoint 1 enable */
+#define HCCTRL_B0ENAB 0x0004 /* Breakpoint 0 enable */
+
+/* For our interrupt engine. */
+#define for_each_qlogicpti(qp) \
+ for((qp) = qptichain; (qp); (qp) = (qp)->next)
+
+#endif /* !(_QLOGICPTI_H) */
diff --git a/drivers/scsi/raid_class.c b/drivers/scsi/raid_class.c
new file mode 100644
index 000000000..2c146b44d
--- /dev/null
+++ b/drivers/scsi/raid_class.c
@@ -0,0 +1,317 @@
+/*
+ * raid_class.c - implementation of a simple raid visualisation class
+ *
+ * Copyright (c) 2005 - James Bottomley <James.Bottomley@steeleye.com>
+ *
+ * This file is licensed under GPLv2
+ *
+ * This class is designed to allow raid attributes to be visualised and
+ * manipulated in a form independent of the underlying raid. Ultimately this
+ * should work for both hardware and software raids.
+ */
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/list.h>
+#include <linux/slab.h>
+#include <linux/string.h>
+#include <linux/raid_class.h>
+#include <scsi/scsi_device.h>
+#include <scsi/scsi_host.h>
+
+#define RAID_NUM_ATTRS 3
+
+struct raid_internal {
+ struct raid_template r;
+ struct raid_function_template *f;
+ /* The actual attributes */
+ struct device_attribute private_attrs[RAID_NUM_ATTRS];
+ /* The array of null terminated pointers to attributes
+ * needed by scsi_sysfs.c */
+ struct device_attribute *attrs[RAID_NUM_ATTRS + 1];
+};
+
+struct raid_component {
+ struct list_head node;
+ struct device dev;
+ int num;
+};
+
+#define to_raid_internal(tmpl) container_of(tmpl, struct raid_internal, r)
+
+#define tc_to_raid_internal(tcont) ({ \
+ struct raid_template *r = \
+ container_of(tcont, struct raid_template, raid_attrs); \
+ to_raid_internal(r); \
+})
+
+#define ac_to_raid_internal(acont) ({ \
+ struct transport_container *tc = \
+ container_of(acont, struct transport_container, ac); \
+ tc_to_raid_internal(tc); \
+})
+
+#define device_to_raid_internal(dev) ({ \
+ struct attribute_container *ac = \
+ attribute_container_classdev_to_container(dev); \
+ ac_to_raid_internal(ac); \
+})
+
+
+static int raid_match(struct attribute_container *cont, struct device *dev)
+{
+ /* We have to look for every subsystem that could house
+ * emulated RAID devices, so start with SCSI */
+ struct raid_internal *i = ac_to_raid_internal(cont);
+
+#if defined(CONFIG_SCSI) || defined(CONFIG_SCSI_MODULE)
+ if (scsi_is_sdev_device(dev)) {
+ struct scsi_device *sdev = to_scsi_device(dev);
+
+ if (i->f->cookie != sdev->host->hostt)
+ return 0;
+
+ return i->f->is_raid(dev);
+ }
+#endif
+ /* FIXME: look at other subsystems too */
+ return 0;
+}
+
+static int raid_setup(struct transport_container *tc, struct device *dev,
+ struct device *cdev)
+{
+ struct raid_data *rd;
+
+ BUG_ON(dev_get_drvdata(cdev));
+
+ rd = kzalloc(sizeof(*rd), GFP_KERNEL);
+ if (!rd)
+ return -ENOMEM;
+
+ INIT_LIST_HEAD(&rd->component_list);
+ dev_set_drvdata(cdev, rd);
+
+ return 0;
+}
+
+static int raid_remove(struct transport_container *tc, struct device *dev,
+ struct device *cdev)
+{
+ struct raid_data *rd = dev_get_drvdata(cdev);
+ struct raid_component *rc, *next;
+ dev_printk(KERN_ERR, dev, "RAID REMOVE\n");
+ dev_set_drvdata(cdev, NULL);
+ list_for_each_entry_safe(rc, next, &rd->component_list, node) {
+ list_del(&rc->node);
+ dev_printk(KERN_ERR, rc->dev.parent, "RAID COMPONENT REMOVE\n");
+ device_unregister(&rc->dev);
+ }
+ dev_printk(KERN_ERR, dev, "RAID REMOVE DONE\n");
+ kfree(rd);
+ return 0;
+}
+
+static DECLARE_TRANSPORT_CLASS(raid_class,
+ "raid_devices",
+ raid_setup,
+ raid_remove,
+ NULL);
+
+static const struct {
+ enum raid_state value;
+ char *name;
+} raid_states[] = {
+ { RAID_STATE_UNKNOWN, "unknown" },
+ { RAID_STATE_ACTIVE, "active" },
+ { RAID_STATE_DEGRADED, "degraded" },
+ { RAID_STATE_RESYNCING, "resyncing" },
+ { RAID_STATE_OFFLINE, "offline" },
+};
+
+static const char *raid_state_name(enum raid_state state)
+{
+ int i;
+ char *name = NULL;
+
+ for (i = 0; i < ARRAY_SIZE(raid_states); i++) {
+ if (raid_states[i].value == state) {
+ name = raid_states[i].name;
+ break;
+ }
+ }
+ return name;
+}
+
+static struct {
+ enum raid_level value;
+ char *name;
+} raid_levels[] = {
+ { RAID_LEVEL_UNKNOWN, "unknown" },
+ { RAID_LEVEL_LINEAR, "linear" },
+ { RAID_LEVEL_0, "raid0" },
+ { RAID_LEVEL_1, "raid1" },
+ { RAID_LEVEL_10, "raid10" },
+ { RAID_LEVEL_1E, "raid1e" },
+ { RAID_LEVEL_3, "raid3" },
+ { RAID_LEVEL_4, "raid4" },
+ { RAID_LEVEL_5, "raid5" },
+ { RAID_LEVEL_50, "raid50" },
+ { RAID_LEVEL_6, "raid6" },
+};
+
+static const char *raid_level_name(enum raid_level level)
+{
+ int i;
+ char *name = NULL;
+
+ for (i = 0; i < ARRAY_SIZE(raid_levels); i++) {
+ if (raid_levels[i].value == level) {
+ name = raid_levels[i].name;
+ break;
+ }
+ }
+ return name;
+}
+
+#define raid_attr_show_internal(attr, fmt, var, code) \
+static ssize_t raid_show_##attr(struct device *dev, \
+ struct device_attribute *attr, \
+ char *buf) \
+{ \
+ struct raid_data *rd = dev_get_drvdata(dev); \
+ code \
+ return snprintf(buf, 20, #fmt "\n", var); \
+}
+
+#define raid_attr_ro_states(attr, states, code) \
+raid_attr_show_internal(attr, %s, name, \
+ const char *name; \
+ code \
+ name = raid_##states##_name(rd->attr); \
+) \
+static DEVICE_ATTR(attr, S_IRUGO, raid_show_##attr, NULL)
+
+
+#define raid_attr_ro_internal(attr, code) \
+raid_attr_show_internal(attr, %d, rd->attr, code) \
+static DEVICE_ATTR(attr, S_IRUGO, raid_show_##attr, NULL)
+
+#define ATTR_CODE(attr) \
+ struct raid_internal *i = device_to_raid_internal(dev); \
+ if (i->f->get_##attr) \
+ i->f->get_##attr(dev->parent);
+
+#define raid_attr_ro(attr) raid_attr_ro_internal(attr, )
+#define raid_attr_ro_fn(attr) raid_attr_ro_internal(attr, ATTR_CODE(attr))
+#define raid_attr_ro_state(attr) raid_attr_ro_states(attr, attr, )
+#define raid_attr_ro_state_fn(attr) raid_attr_ro_states(attr, attr, ATTR_CODE(attr))
+
+
+raid_attr_ro_state(level);
+raid_attr_ro_fn(resync);
+raid_attr_ro_state_fn(state);
+
+static void raid_component_release(struct device *dev)
+{
+ struct raid_component *rc =
+ container_of(dev, struct raid_component, dev);
+ dev_printk(KERN_ERR, rc->dev.parent, "COMPONENT RELEASE\n");
+ put_device(rc->dev.parent);
+ kfree(rc);
+}
+
+int raid_component_add(struct raid_template *r,struct device *raid_dev,
+ struct device *component_dev)
+{
+ struct device *cdev =
+ attribute_container_find_class_device(&r->raid_attrs.ac,
+ raid_dev);
+ struct raid_component *rc;
+ struct raid_data *rd = dev_get_drvdata(cdev);
+ int err;
+
+ rc = kzalloc(sizeof(*rc), GFP_KERNEL);
+ if (!rc)
+ return -ENOMEM;
+
+ INIT_LIST_HEAD(&rc->node);
+ device_initialize(&rc->dev);
+ rc->dev.release = raid_component_release;
+ rc->dev.parent = get_device(component_dev);
+ rc->num = rd->component_count++;
+
+ dev_set_name(&rc->dev, "component-%d", rc->num);
+ list_add_tail(&rc->node, &rd->component_list);
+ rc->dev.class = &raid_class.class;
+ err = device_add(&rc->dev);
+ if (err)
+ goto err_out;
+
+ return 0;
+
+err_out:
+ list_del(&rc->node);
+ rd->component_count--;
+ put_device(component_dev);
+ kfree(rc);
+ return err;
+}
+EXPORT_SYMBOL(raid_component_add);
+
+struct raid_template *
+raid_class_attach(struct raid_function_template *ft)
+{
+ struct raid_internal *i = kzalloc(sizeof(struct raid_internal),
+ GFP_KERNEL);
+ int count = 0;
+
+ if (unlikely(!i))
+ return NULL;
+
+ i->f = ft;
+
+ i->r.raid_attrs.ac.class = &raid_class.class;
+ i->r.raid_attrs.ac.match = raid_match;
+ i->r.raid_attrs.ac.attrs = &i->attrs[0];
+
+ attribute_container_register(&i->r.raid_attrs.ac);
+
+ i->attrs[count++] = &dev_attr_level;
+ i->attrs[count++] = &dev_attr_resync;
+ i->attrs[count++] = &dev_attr_state;
+
+ i->attrs[count] = NULL;
+ BUG_ON(count > RAID_NUM_ATTRS);
+
+ return &i->r;
+}
+EXPORT_SYMBOL(raid_class_attach);
+
+void
+raid_class_release(struct raid_template *r)
+{
+ struct raid_internal *i = to_raid_internal(r);
+
+ BUG_ON(attribute_container_unregister(&i->r.raid_attrs.ac));
+
+ kfree(i);
+}
+EXPORT_SYMBOL(raid_class_release);
+
+static __init int raid_init(void)
+{
+ return transport_class_register(&raid_class);
+}
+
+static __exit void raid_exit(void)
+{
+ transport_class_unregister(&raid_class);
+}
+
+MODULE_AUTHOR("James Bottomley");
+MODULE_DESCRIPTION("RAID device class");
+MODULE_LICENSE("GPL");
+
+module_init(raid_init);
+module_exit(raid_exit);
+
diff --git a/drivers/scsi/script_asm.pl b/drivers/scsi/script_asm.pl
new file mode 100644
index 000000000..7d651d99a
--- /dev/null
+++ b/drivers/scsi/script_asm.pl
@@ -0,0 +1,984 @@
+#!/usr/bin/perl -s
+
+# NCR 53c810 script assembler
+# Sponsored by
+# iX Multiuser Multitasking Magazine
+#
+# Copyright 1993, Drew Eckhardt
+# Visionary Computing
+# (Unix and Linux consulting and custom programming)
+# drew@Colorado.EDU
+# +1 (303) 786-7975
+#
+# Support for 53c710 (via -ncr7x0_family switch) added by Richard
+# Hirst <richard@sleepie.demon.co.uk> - 15th March 1997
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+#
+# TolerANT and SCSI SCRIPTS are registered trademarks of NCR Corporation.
+#
+
+#
+# Basically, I follow the NCR syntax documented in the NCR53c710
+# Programmer's guide, with the new instructions, registers, etc.
+# from the NCR53c810.
+#
+# Differences between this assembler and NCR's are that
+# 1. PASS, REL (data, JUMPs work fine), and the option to start a new
+# script, are unimplemented, since I didn't use them in my scripts.
+#
+# 2. I also emit a script_u.h file, which will undefine all of
+# the A_*, E_*, etc. symbols defined in the script. This
+# makes including multiple scripts in one program easier
+#
+# 3. This is a single pass assembler, which only emits
+# .h files.
+#
+
+
+# XXX - set these with command line options
+$debug = 0; # Print general debugging messages
+$debug_external = 0; # Print external/forward reference messages
+$list_in_array = 1; # Emit original SCRIPTS assembler in comments in
+ # script.h
+#$prefix; # (set by perl -s)
+ # define all arrays having this prefix so we
+ # don't have name space collisions after
+ # assembling this file in different ways for
+ # different host adapters
+
+# Constants
+
+
+# Table of the SCSI phase encodings
+%scsi_phases = (
+ 'DATA_OUT', 0x00_00_00_00, 'DATA_IN', 0x01_00_00_00, 'CMD', 0x02_00_00_00,
+ 'STATUS', 0x03_00_00_00, 'MSG_OUT', 0x06_00_00_00, 'MSG_IN', 0x07_00_00_00
+);
+
+# XXX - replace references to the *_810 constants with general constants
+# assigned at compile time based on chip type.
+
+# Table of operator encodings
+# XXX - NCR53c710 only implements
+# move (nop) = 0x00_00_00_00
+# or = 0x02_00_00_00
+# and = 0x04_00_00_00
+# add = 0x06_00_00_00
+
+if ($ncr7x0_family) {
+ %operators = (
+ '|', 0x02_00_00_00, 'OR', 0x02_00_00_00,
+ '&', 0x04_00_00_00, 'AND', 0x04_00_00_00,
+ '+', 0x06_00_00_00
+ );
+}
+else {
+ %operators = (
+ 'SHL', 0x01_00_00_00,
+ '|', 0x02_00_00_00, 'OR', 0x02_00_00_00,
+ 'XOR', 0x03_00_00_00,
+ '&', 0x04_00_00_00, 'AND', 0x04_00_00_00,
+ 'SHR', 0x05_00_00_00,
+ # Note : low bit of the operator bit should be set for add with
+ # carry.
+ '+', 0x06_00_00_00
+ );
+}
+
+# Table of register addresses
+
+if ($ncr7x0_family) {
+ %registers = (
+ 'SCNTL0', 0, 'SCNTL1', 1, 'SDID', 2, 'SIEN', 3,
+ 'SCID', 4, 'SXFER', 5, 'SODL', 6, 'SOCL', 7,
+ 'SFBR', 8, 'SIDL', 9, 'SBDL', 10, 'SBCL', 11,
+ 'DSTAT', 12, 'SSTAT0', 13, 'SSTAT1', 14, 'SSTAT2', 15,
+ 'DSA0', 16, 'DSA1', 17, 'DSA2', 18, 'DSA3', 19,
+ 'CTEST0', 20, 'CTEST1', 21, 'CTEST2', 22, 'CTEST3', 23,
+ 'CTEST4', 24, 'CTEST5', 25, 'CTEST6', 26, 'CTEST7', 27,
+ 'TEMP0', 28, 'TEMP1', 29, 'TEMP2', 30, 'TEMP3', 31,
+ 'DFIFO', 32, 'ISTAT', 33, 'CTEST8', 34, 'LCRC', 35,
+ 'DBC0', 36, 'DBC1', 37, 'DBC2', 38, 'DCMD', 39,
+ 'DNAD0', 40, 'DNAD1', 41, 'DNAD2', 42, 'DNAD3', 43,
+ 'DSP0', 44, 'DSP1', 45, 'DSP2', 46, 'DSP3', 47,
+ 'DSPS0', 48, 'DSPS1', 49, 'DSPS2', 50, 'DSPS3', 51,
+ 'SCRATCH0', 52, 'SCRATCH1', 53, 'SCRATCH2', 54, 'SCRATCH3', 55,
+ 'DMODE', 56, 'DIEN', 57, 'DWT', 58, 'DCNTL', 59,
+ 'ADDER0', 60, 'ADDER1', 61, 'ADDER2', 62, 'ADDER3', 63,
+ );
+}
+else {
+ %registers = (
+ 'SCNTL0', 0, 'SCNTL1', 1, 'SCNTL2', 2, 'SCNTL3', 3,
+ 'SCID', 4, 'SXFER', 5, 'SDID', 6, 'GPREG', 7,
+ 'SFBR', 8, 'SOCL', 9, 'SSID', 10, 'SBCL', 11,
+ 'DSTAT', 12, 'SSTAT0', 13, 'SSTAT1', 14, 'SSTAT2', 15,
+ 'DSA0', 16, 'DSA1', 17, 'DSA2', 18, 'DSA3', 19,
+ 'ISTAT', 20,
+ 'CTEST0', 24, 'CTEST1', 25, 'CTEST2', 26, 'CTEST3', 27,
+ 'TEMP0', 28, 'TEMP1', 29, 'TEMP2', 30, 'TEMP3', 31,
+ 'DFIFO', 32, 'CTEST4', 33, 'CTEST5', 34, 'CTEST6', 35,
+ 'DBC0', 36, 'DBC1', 37, 'DBC2', 38, 'DCMD', 39,
+ 'DNAD0', 40, 'DNAD1', 41, 'DNAD2', 42, 'DNAD3', 43,
+ 'DSP0', 44, 'DSP1', 45, 'DSP2', 46, 'DSP3', 47,
+ 'DSPS0', 48, 'DSPS1', 49, 'DSPS2', 50, 'DSPS3', 51,
+ 'SCRATCH0', 52, 'SCRATCH1', 53, 'SCRATCH2', 54, 'SCRATCH3', 55,
+ 'SCRATCHA0', 52, 'SCRATCHA1', 53, 'SCRATCHA2', 54, 'SCRATCHA3', 55,
+ 'DMODE', 56, 'DIEN', 57, 'DWT', 58, 'DCNTL', 59,
+ 'ADDER0', 60, 'ADDER1', 61, 'ADDER2', 62, 'ADDER3', 63,
+ 'SIEN0', 64, 'SIEN1', 65, 'SIST0', 66, 'SIST1', 67,
+ 'SLPAR', 68, 'MACNTL', 70, 'GPCNTL', 71,
+ 'STIME0', 72, 'STIME1', 73, 'RESPID', 74,
+ 'STEST0', 76, 'STEST1', 77, 'STEST2', 78, 'STEST3', 79,
+ 'SIDL', 80,
+ 'SODL', 84,
+ 'SBDL', 88,
+ 'SCRATCHB0', 92, 'SCRATCHB1', 93, 'SCRATCHB2', 94, 'SCRATCHB3', 95
+ );
+}
+
+# Parsing regular expressions
+$identifier = '[A-Za-z_][A-Za-z_0-9]*';
+$decnum = '-?\\d+';
+$hexnum = '0[xX][0-9A-Fa-f]+';
+$constant = "$hexnum|$decnum";
+
+# yucky - since we can't control grouping of # $constant, we need to
+# expand out each alternative for $value.
+
+$value = "$identifier|$identifier\\s*[+\-]\\s*$decnum|".
+ "$identifier\\s*[+-]\s*$hexnum|$constant";
+
+print STDERR "value regex = $value\n" if ($debug);
+
+$phase = join ('|', keys %scsi_phases);
+print STDERR "phase regex = $phase\n" if ($debug);
+$register = join ('|', keys %registers);
+
+# yucky - since %operators includes meta-characters which must
+# be escaped, I can't use the join() trick I used for the register
+# regex
+
+if ($ncr7x0_family) {
+ $operator = '\||OR|AND|\&|\+';
+}
+else {
+ $operator = '\||OR|AND|XOR|\&|\+';
+}
+
+# Global variables
+
+%symbol_values = (%registers) ; # Traditional symbol table
+
+%symbol_references = () ; # Table of symbol references, where
+ # the index is the symbol name,
+ # and the contents a white space
+ # delimited list of address,size
+ # tuples where size is in bytes.
+
+@code = (); # Array of 32 bit words for SIOP
+
+@entry = (); # Array of entry point names
+
+@label = (); # Array of label names
+
+@absolute = (); # Array of absolute names
+
+@relative = (); # Array of relative names
+
+@external = (); # Array of external names
+
+$address = 0; # Address of current instruction
+
+$lineno = 0; # Line number we are parsing
+
+$output = 'script.h'; # Output file
+$outputu = 'scriptu.h';
+
+# &patch ($address, $offset, $length, $value) patches $code[$address]
+# so that the $length bytes at $offset have $value added to
+# them.
+
+@inverted_masks = (0x00_00_00_00, 0x00_00_00_ff, 0x00_00_ff_ff, 0x00_ff_ff_ff,
+ 0xff_ff_ff_ff);
+
+sub patch {
+ local ($address, $offset, $length, $value) = @_;
+ if ($debug) {
+ print STDERR "Patching $address at offset $offset, length $length to $value\n";
+ printf STDERR "Old code : %08x\n", $code[$address];
+ }
+
+ $mask = ($inverted_masks[$length] << ($offset * 8));
+
+ $code[$address] = ($code[$address] & ~$mask) |
+ (($code[$address] & $mask) + ($value << ($offset * 8)) &
+ $mask);
+
+ printf STDERR "New code : %08x\n", $code[$address] if ($debug);
+}
+
+# &parse_value($value, $word, $offset, $length) where $value is
+# an identifier or constant, $word is the word offset relative to
+# $address, $offset is the starting byte within that word, and
+# $length is the length of the field in bytes.
+#
+# Side effects are that the bytes are combined into the @code array
+# relative to $address, and that the %symbol_references table is
+# updated as appropriate.
+
+sub parse_value {
+ local ($value, $word, $offset, $length) = @_;
+ local ($tmp);
+
+ $symbol = '';
+
+ if ($value =~ /^REL\s*\(\s*($identifier)\s*\)\s*(.*)/i) {
+ $relative = 'REL';
+ $symbol = $1;
+ $value = $2;
+print STDERR "Relative reference $symbol\n" if ($debug);
+ } elsif ($value =~ /^($identifier)\s*(.*)/) {
+ $relative = 'ABS';
+ $symbol = $1;
+ $value = $2;
+print STDERR "Absolute reference $symbol\n" if ($debug);
+ }
+
+ if ($symbol ne '') {
+print STDERR "Referencing symbol $1, length = $length in $_\n" if ($debug);
+ $tmp = ($address + $word) * 4 + $offset;
+ if ($symbol_references{$symbol} ne undef) {
+ $symbol_references{$symbol} =
+ "$symbol_references{$symbol} $relative,$tmp,$length";
+ } else {
+ if (!defined($symbol_values{$symbol})) {
+print STDERR "forward $1\n" if ($debug_external);
+ $forward{$symbol} = "line $lineno : $_";
+ }
+ $symbol_references{$symbol} = "$relative,$tmp,$length";
+ }
+ }
+
+ $value = eval $value;
+ &patch ($address + $word, $offset, $length, $value);
+}
+
+# &parse_conditional ($conditional) where $conditional is the conditional
+# clause from a transfer control instruction (RETURN, CALL, JUMP, INT).
+
+sub parse_conditional {
+ local ($conditional) = @_;
+ if ($conditional =~ /^\s*(IF|WHEN)\s*(.*)/i) {
+ $if = $1;
+ $conditional = $2;
+ if ($if =~ /WHEN/i) {
+ $allow_atn = 0;
+ $code[$address] |= 0x00_01_00_00;
+ $allow_atn = 0;
+ print STDERR "$0 : parsed WHEN\n" if ($debug);
+ } else {
+ $allow_atn = 1;
+ print STDERR "$0 : parsed IF\n" if ($debug);
+ }
+ } else {
+ die "$0 : syntax error in line $lineno : $_
+ expected IF or WHEN
+";
+ }
+
+ if ($conditional =~ /^NOT\s+(.*)$/i) {
+ $not = 'NOT ';
+ $other = 'OR';
+ $conditional = $1;
+ print STDERR "$0 : parsed NOT\n" if ($debug);
+ } else {
+ $code[$address] |= 0x00_08_00_00;
+ $not = '';
+ $other = 'AND'
+ }
+
+ $need_data = 0;
+ if ($conditional =~ /^ATN\s*(.*)/i) {#
+ die "$0 : syntax error in line $lineno : $_
+ WHEN conditional is incompatible with ATN
+" if (!$allow_atn);
+ $code[$address] |= 0x00_02_00_00;
+ $conditional = $1;
+ print STDERR "$0 : parsed ATN\n" if ($debug);
+ } elsif ($conditional =~ /^($phase)\s*(.*)/i) {
+ $phase_index = "\U$1\E";
+ $p = $scsi_phases{$phase_index};
+ $code[$address] |= $p | 0x00_02_00_00;
+ $conditional = $2;
+ print STDERR "$0 : parsed phase $phase_index\n" if ($debug);
+ } else {
+ $other = '';
+ $need_data = 1;
+ }
+
+print STDERR "Parsing conjunction, expecting $other\n" if ($debug);
+ if ($conditional =~ /^(AND|OR)\s*(.*)/i) {
+ $conjunction = $1;
+ $conditional = $2;
+ $need_data = 1;
+ die "$0 : syntax error in line $lineno : $_
+ Illegal use of $1. Valid uses are
+ ".$not."<phase> $1 data
+ ".$not."ATN $1 data
+" if ($other eq '');
+ die "$0 : syntax error in line $lineno : $_
+ Illegal use of $conjunction. Valid syntaxes are
+ NOT <phase>|ATN OR data
+ <phase>|ATN AND data
+" if ($conjunction !~ /\s*$other\s*/i);
+ print STDERR "$0 : parsed $1\n" if ($debug);
+ }
+
+ if ($need_data) {
+print STDERR "looking for data in $conditional\n" if ($debug);
+ if ($conditional=~ /^($value)\s*(.*)/i) {
+ $code[$address] |= 0x00_04_00_00;
+ $conditional = $2;
+ &parse_value($1, 0, 0, 1);
+ print STDERR "$0 : parsed data\n" if ($debug);
+ } else {
+ die "$0 : syntax error in line $lineno : $_
+ expected <data>.
+";
+ }
+ }
+
+ if ($conditional =~ /^\s*,\s*(.*)/) {
+ $conditional = $1;
+ if ($conditional =~ /^AND\s\s*MASK\s\s*($value)\s*(.*)/i) {
+ &parse_value ($1, 0, 1, 1);
+ print STDERR "$0 parsed AND MASK $1\n" if ($debug);
+ die "$0 : syntax error in line $lineno : $_
+ expected end of line, not \"$2\"
+" if ($2 ne '');
+ } else {
+ die "$0 : syntax error in line $lineno : $_
+ expected \",AND MASK <data>\", not \"$2\"
+";
+ }
+ } elsif ($conditional !~ /^\s*$/) {
+ die "$0 : syntax error in line $lineno : $_
+ expected end of line" . (($need_data) ? " or \"AND MASK <data>\"" : "") . "
+ not \"$conditional\"
+";
+ }
+}
+
+# Parse command line
+$output = shift;
+$outputu = shift;
+
+
+# Main loop
+while (<STDIN>) {
+ $lineno = $lineno + 1;
+ $list[$address] = $list[$address].$_;
+ s/;.*$//; # Strip comments
+
+
+ chop; # Leave new line out of error messages
+
+# Handle symbol definitions of the form label:
+ if (/^\s*($identifier)\s*:(.*)/) {
+ if (!defined($symbol_values{$1})) {
+ $symbol_values{$1} = $address * 4; # Address is an index into
+ delete $forward{$1}; # an array of longs
+ push (@label, $1);
+ $_ = $2;
+ } else {
+ die "$0 : redefinition of symbol $1 in line $lineno : $_\n";
+ }
+ }
+
+# Handle symbol definitions of the form ABSOLUTE or RELATIVE identifier =
+# value
+ if (/^\s*(ABSOLUTE|RELATIVE)\s+(.*)/i) {
+ $is_absolute = $1;
+ $rest = $2;
+ foreach $rest (split (/\s*,\s*/, $rest)) {
+ if ($rest =~ /^($identifier)\s*=\s*($constant)\s*$/) {
+ local ($id, $cnst) = ($1, $2);
+ if ($symbol_values{$id} eq undef) {
+ $symbol_values{$id} = eval $cnst;
+ delete $forward{$id};
+ if ($is_absolute =~ /ABSOLUTE/i) {
+ push (@absolute , $id);
+ } else {
+ push (@relative, $id);
+ }
+ } else {
+ die "$0 : redefinition of symbol $id in line $lineno : $_\n";
+ }
+ } else {
+ die
+"$0 : syntax error in line $lineno : $_
+ expected <identifier> = <value>
+";
+ }
+ }
+ } elsif (/^\s*EXTERNAL\s+(.*)/i) {
+ $externals = $1;
+ foreach $external (split (/,/,$externals)) {
+ if ($external =~ /\s*($identifier)\s*$/) {
+ $external = $1;
+ push (@external, $external);
+ delete $forward{$external};
+ if (defined($symbol_values{$external})) {
+ die "$0 : redefinition of symbol $1 in line $lineno : $_\n";
+ }
+ $symbol_values{$external} = $external;
+print STDERR "defined external $1 to $external\n" if ($debug_external);
+ } else {
+ die
+"$0 : syntax error in line $lineno : $_
+ expected <identifier>, got $external
+";
+ }
+ }
+# Process ENTRY identifier declarations
+ } elsif (/^\s*ENTRY\s+(.*)/i) {
+ if ($1 =~ /^($identifier)\s*$/) {
+ push (@entry, $1);
+ } else {
+ die
+"$0 : syntax error in line $lineno : $_
+ expected ENTRY <identifier>
+";
+ }
+# Process MOVE length, address, WITH|WHEN phase instruction
+ } elsif (/^\s*MOVE\s+(.*)/i) {
+ $rest = $1;
+ if ($rest =~ /^FROM\s+($value)\s*,\s*(WITH|WHEN)\s+($phase)\s*$/i) {
+ $transfer_addr = $1;
+ $with_when = $2;
+ $scsi_phase = $3;
+print STDERR "Parsing MOVE FROM $transfer_addr, $with_when $3\n" if ($debug);
+ $code[$address] = 0x18_00_00_00 | (($with_when =~ /WITH/i) ?
+ 0x00_00_00_00 : 0x08_00_00_00) | $scsi_phases{$scsi_phase};
+ &parse_value ($transfer_addr, 1, 0, 4);
+ $address += 2;
+ } elsif ($rest =~ /^($value)\s*,\s*(PTR\s+|)($value)\s*,\s*(WITH|WHEN)\s+($phase)\s*$/i) {
+ $transfer_len = $1;
+ $ptr = $2;
+ $transfer_addr = $3;
+ $with_when = $4;
+ $scsi_phase = $5;
+ $code[$address] = (($with_when =~ /WITH/i) ? 0x00_00_00_00 :
+ 0x08_00_00_00) | (($ptr =~ /PTR/i) ? (1 << 29) : 0) |
+ $scsi_phases{$scsi_phase};
+ &parse_value ($transfer_len, 0, 0, 3);
+ &parse_value ($transfer_addr, 1, 0, 4);
+ $address += 2;
+ } elsif ($rest =~ /^MEMORY\s+(.*)/i) {
+ $rest = $1;
+ $code[$address] = 0xc0_00_00_00;
+ if ($rest =~ /^($value)\s*,\s*($value)\s*,\s*($value)\s*$/) {
+ $count = $1;
+ $source = $2;
+ $dest = $3;
+print STDERR "Parsing MOVE MEMORY $count, $source, $dest\n" if ($debug);
+ &parse_value ($count, 0, 0, 3);
+ &parse_value ($source, 1, 0, 4);
+ &parse_value ($dest, 2, 0, 4);
+printf STDERR "Move memory instruction = %08x,%08x,%08x\n",
+ $code[$address], $code[$address+1], $code[$address +2] if
+ ($debug);
+ $address += 3;
+
+ } else {
+ die
+"$0 : syntax error in line $lineno : $_
+ expected <count>, <source>, <destination>
+"
+ }
+ } elsif ($1 =~ /^(.*)\s+(TO|SHL|SHR)\s+(.*)/i) {
+print STDERR "Parsing register to register move\n" if ($debug);
+ $src = $1;
+ $op = "\U$2\E";
+ $rest = $3;
+
+ $code[$address] = 0x40_00_00_00;
+
+ $force = ($op !~ /TO/i);
+
+
+print STDERR "Forcing register source \n" if ($force && $debug);
+
+ if (!$force && $src =~
+ /^($register)\s+(-|$operator)\s+($value)\s*$/i) {
+print STDERR "register operand data8 source\n" if ($debug);
+ $src_reg = "\U$1\E";
+ $op = "\U$2\E";
+ if ($op ne '-') {
+ $data8 = $3;
+ } else {
+ die "- is not implemented yet.\n"
+ }
+ } elsif ($src =~ /^($register)\s*$/i) {
+print STDERR "register source\n" if ($debug);
+ $src_reg = "\U$1\E";
+ # Encode register to register move as a register | 0
+ # move to register.
+ if (!$force) {
+ $op = '|';
+ }
+ $data8 = 0;
+ } elsif (!$force && $src =~ /^($value)\s*$/i) {
+print STDERR "data8 source\n" if ($debug);
+ $src_reg = undef;
+ $op = 'NONE';
+ $data8 = $1;
+ } else {
+ if (!$force) {
+ die
+"$0 : syntax error in line $lineno : $_
+ expected <register>
+ <data8>
+ <register> <operand> <data8>
+";
+ } else {
+ die
+"$0 : syntax error in line $lineno : $_
+ expected <register>
+";
+ }
+ }
+ if ($rest =~ /^($register)\s*(.*)$/i) {
+ $dst_reg = "\U$1\E";
+ $rest = $2;
+ } else {
+ die
+"$0 : syntax error in $lineno : $_
+ expected <register>, got $rest
+";
+ }
+
+ if ($rest =~ /^WITH\s+CARRY\s*(.*)/i) {
+ $rest = $1;
+ if ($op eq '+') {
+ $code[$address] |= 0x01_00_00_00;
+ } else {
+ die
+"$0 : syntax error in $lineno : $_
+ WITH CARRY option is incompatible with the $op operator.
+";
+ }
+ }
+
+ if ($rest !~ /^\s*$/) {
+ die
+"$0 : syntax error in $lineno : $_
+ Expected end of line, got $rest
+";
+ }
+
+ print STDERR "source = $src_reg, data = $data8 , destination = $dst_reg\n"
+ if ($debug);
+ # Note that Move data8 to reg is encoded as a read-modify-write
+ # instruction.
+ if (($src_reg eq undef) || ($src_reg eq $dst_reg)) {
+ $code[$address] |= 0x38_00_00_00 |
+ ($registers{$dst_reg} << 16);
+ } elsif ($dst_reg =~ /SFBR/i) {
+ $code[$address] |= 0x30_00_00_00 |
+ ($registers{$src_reg} << 16);
+ } elsif ($src_reg =~ /SFBR/i) {
+ $code[$address] |= 0x28_00_00_00 |
+ ($registers{$dst_reg} << 16);
+ } else {
+ die
+"$0 : Illegal combination of registers in line $lineno : $_
+ Either source and destination registers must be the same,
+ or either source or destination register must be SFBR.
+";
+ }
+
+ $code[$address] |= $operators{$op};
+
+ &parse_value ($data8, 0, 1, 1);
+ $code[$address] |= $operators{$op};
+ $code[$address + 1] = 0x00_00_00_00;# Reserved
+ $address += 2;
+ } else {
+ die
+"$0 : syntax error in line $lineno : $_
+ expected (initiator) <length>, <address>, WHEN <phase>
+ (target) <length>, <address>, WITH <phase>
+ MEMORY <length>, <source>, <destination>
+ <expression> TO <register>
+";
+ }
+# Process SELECT {ATN|} id, fail_address
+ } elsif (/^\s*(SELECT|RESELECT)\s+(.*)/i) {
+ $rest = $2;
+ if ($rest =~ /^(ATN|)\s*($value)\s*,\s*($identifier)\s*$/i) {
+ $atn = $1;
+ $id = $2;
+ $alt_addr = $3;
+ $code[$address] = 0x40_00_00_00 |
+ (($atn =~ /ATN/i) ? 0x01_00_00_00 : 0);
+ $code[$address + 1] = 0x00_00_00_00;
+ &parse_value($id, 0, 2, 1);
+ &parse_value($alt_addr, 1, 0, 4);
+ $address += 2;
+ } elsif ($rest =~ /^(ATN|)\s*FROM\s+($value)\s*,\s*($identifier)\s*$/i) {
+ $atn = $1;
+ $addr = $2;
+ $alt_addr = $3;
+ $code[$address] = 0x42_00_00_00 |
+ (($atn =~ /ATN/i) ? 0x01_00_00_00 : 0);
+ $code[$address + 1] = 0x00_00_00_00;
+ &parse_value($addr, 0, 0, 3);
+ &parse_value($alt_addr, 1, 0, 4);
+ $address += 2;
+ } else {
+ die
+"$0 : syntax error in line $lineno : $_
+ expected SELECT id, alternate_address or
+ SELECT FROM address, alternate_address or
+ RESELECT id, alternate_address or
+ RESELECT FROM address, alternate_address
+";
+ }
+ } elsif (/^\s*WAIT\s+(.*)/i) {
+ $rest = $1;
+print STDERR "Parsing WAIT $rest\n" if ($debug);
+ if ($rest =~ /^DISCONNECT\s*$/i) {
+ $code[$address] = 0x48_00_00_00;
+ $code[$address + 1] = 0x00_00_00_00;
+ $address += 2;
+ } elsif ($rest =~ /^(RESELECT|SELECT)\s+($identifier)\s*$/i) {
+ $alt_addr = $2;
+ $code[$address] = 0x50_00_00_00;
+ &parse_value ($alt_addr, 1, 0, 4);
+ $address += 2;
+ } else {
+ die
+"$0 : syntax error in line $lineno : $_
+ expected (initiator) WAIT DISCONNECT or
+ (initiator) WAIT RESELECT alternate_address or
+ (target) WAIT SELECT alternate_address
+";
+ }
+# Handle SET and CLEAR instructions. Note that we should also do something
+# with this syntax to set target mode.
+ } elsif (/^\s*(SET|CLEAR)\s+(.*)/i) {
+ $set = $1;
+ $list = $2;
+ $code[$address] = ($set =~ /SET/i) ? 0x58_00_00_00 :
+ 0x60_00_00_00;
+ foreach $arg (split (/\s+AND\s+/i,$list)) {
+ if ($arg =~ /ATN/i) {
+ $code[$address] |= 0x00_00_00_08;
+ } elsif ($arg =~ /ACK/i) {
+ $code[$address] |= 0x00_00_00_40;
+ } elsif ($arg =~ /TARGET/i) {
+ $code[$address] |= 0x00_00_02_00;
+ } elsif ($arg =~ /CARRY/i) {
+ $code[$address] |= 0x00_00_04_00;
+ } else {
+ die
+"$0 : syntax error in line $lineno : $_
+ expected $set followed by a AND delimited list of one or
+ more strings from the list ACK, ATN, CARRY, TARGET.
+";
+ }
+ }
+ $code[$address + 1] = 0x00_00_00_00;
+ $address += 2;
+ } elsif (/^\s*(JUMP|CALL|INT)\s+(.*)/i) {
+ $instruction = $1;
+ $rest = $2;
+ if ($instruction =~ /JUMP/i) {
+ $code[$address] = 0x80_00_00_00;
+ } elsif ($instruction =~ /CALL/i) {
+ $code[$address] = 0x88_00_00_00;
+ } else {
+ $code[$address] = 0x98_00_00_00;
+ }
+print STDERR "parsing JUMP, rest = $rest\n" if ($debug);
+
+# Relative jump.
+ if ($rest =~ /^(REL\s*\(\s*$identifier\s*\))\s*(.*)/i) {
+ $addr = $1;
+ $rest = $2;
+print STDERR "parsing JUMP REL, addr = $addr, rest = $rest\n" if ($debug);
+ $code[$address] |= 0x00_80_00_00;
+ &parse_value($addr, 1, 0, 4);
+# Absolute jump, requires no more gunk
+ } elsif ($rest =~ /^($value)\s*(.*)/) {
+ $addr = $1;
+ $rest = $2;
+ &parse_value($addr, 1, 0, 4);
+ } else {
+ die
+"$0 : syntax error in line $lineno : $_
+ expected <address> or REL (address)
+";
+ }
+
+ if ($rest =~ /^,\s*(.*)/) {
+ &parse_conditional($1);
+ } elsif ($rest =~ /^\s*$/) {
+ $code[$address] |= (1 << 19);
+ } else {
+ die
+"$0 : syntax error in line $lineno : $_
+ expected , <conditional> or end of line, got $1
+";
+ }
+
+ $address += 2;
+ } elsif (/^\s*(RETURN|INTFLY)\s*(.*)/i) {
+ $instruction = $1;
+ $conditional = $2;
+print STDERR "Parsing $instruction\n" if ($debug);
+ $code[$address] = ($instruction =~ /RETURN/i) ? 0x90_00_00_00 :
+ 0x98_10_00_00;
+ if ($conditional =~ /^,\s*(.*)/) {
+ $conditional = $1;
+ &parse_conditional ($conditional);
+ } elsif ($conditional !~ /^\s*$/) {
+ die
+"$0 : syntax error in line $lineno : $_
+ expected , <conditional>
+";
+ } else {
+ $code[$address] |= 0x00_08_00_00;
+ }
+
+ $code[$address + 1] = 0x00_00_00_00;
+ $address += 2;
+ } elsif (/^\s*DISCONNECT\s*$/) {
+ $code[$address] = 0x48_00_00_00;
+ $code[$address + 1] = 0x00_00_00_00;
+ $address += 2;
+# I'm not sure that I should be including this extension, but
+# what the hell?
+ } elsif (/^\s*NOP\s*$/i) {
+ $code[$address] = 0x80_88_00_00;
+ $code[$address + 1] = 0x00_00_00_00;
+ $address += 2;
+# Ignore lines consisting entirely of white space
+ } elsif (/^\s*$/) {
+ } else {
+ die
+"$0 : syntax error in line $lineno: $_
+ expected label:, ABSOLUTE, CLEAR, DISCONNECT, EXTERNAL, MOVE, RESELECT,
+ SELECT SET, or WAIT
+";
+ }
+}
+
+# Fill in label references
+
+@undefined = keys %forward;
+if ($#undefined >= 0) {
+ print STDERR "Undefined symbols : \n";
+ foreach $undef (@undefined) {
+ print STDERR "$undef in $forward{$undef}\n";
+ }
+ exit 1;
+}
+
+@label_patches = ();
+
+@external_patches = ();
+
+@absolute = sort @absolute;
+
+foreach $i (@absolute) {
+ foreach $j (split (/\s+/,$symbol_references{$i})) {
+ $j =~ /(REL|ABS),(.*),(.*)/;
+ $type = $1;
+ $address = $2;
+ $length = $3;
+ die
+"$0 : $symbol $i has invalid relative reference at address $address,
+ size $length\n"
+ if ($type eq 'REL');
+
+ &patch ($address / 4, $address % 4, $length, $symbol_values{$i});
+ }
+}
+
+foreach $external (@external) {
+print STDERR "checking external $external \n" if ($debug_external);
+ if ($symbol_references{$external} ne undef) {
+ for $reference (split(/\s+/,$symbol_references{$external})) {
+ $reference =~ /(REL|ABS),(.*),(.*)/;
+ $type = $1;
+ $address = $2;
+ $length = $3;
+
+ die
+"$0 : symbol $label is external, has invalid relative reference at $address,
+ size $length\n"
+ if ($type eq 'REL');
+
+ die
+"$0 : symbol $label has invalid reference at $address, size $length\n"
+ if ((($address % 4) !=0) || ($length != 4));
+
+ $symbol = $symbol_values{$external};
+ $add = $code[$address / 4];
+ if ($add eq 0) {
+ $code[$address / 4] = $symbol;
+ } else {
+ $add = sprintf ("0x%08x", $add);
+ $code[$address / 4] = "$symbol + $add";
+ }
+
+print STDERR "referenced external $external at $1\n" if ($debug_external);
+ }
+ }
+}
+
+foreach $label (@label) {
+ if ($symbol_references{$label} ne undef) {
+ for $reference (split(/\s+/,$symbol_references{$label})) {
+ $reference =~ /(REL|ABS),(.*),(.*)/;
+ $type = $1;
+ $address = $2;
+ $length = $3;
+
+ if ((($address % 4) !=0) || ($length != 4)) {
+ die "$0 : symbol $label has invalid reference at $1, size $2\n";
+ }
+
+ if ($type eq 'ABS') {
+ $code[$address / 4] += $symbol_values{$label};
+ push (@label_patches, $address / 4);
+ } else {
+#
+# - The address of the reference should be in the second and last word
+# of an instruction
+# - Relative jumps, etc. are relative to the DSP of the _next_ instruction
+#
+# So, we need to add four to the address of the reference, to get
+# the address of the next instruction, when computing the reference.
+
+ $tmp = $symbol_values{$label} -
+ ($address + 4);
+ die
+# Relative addressing is limited to 24 bits.
+"$0 : symbol $label is too far ($tmp) from $address to reference as
+ relative/\n" if (($tmp >= 0x80_00_00) || ($tmp < -0x80_00_00));
+ $code[$address / 4] = $tmp & 0x00_ff_ff_ff;
+ }
+ }
+ }
+}
+
+# Output SCRIPT[] array, one instruction per line. Optionally
+# print the original code too.
+
+open (OUTPUT, ">$output") || die "$0 : can't open $output for writing\n";
+open (OUTPUTU, ">$outputu") || die "$0 : can't open $outputu for writing\n";
+
+($_ = $0) =~ s:.*/::;
+print OUTPUT "/* DO NOT EDIT - Generated automatically by ".$_." */\n";
+print OUTPUT "static u32 ".$prefix."SCRIPT[] = {\n";
+$instructions = 0;
+for ($i = 0; $i < $#code; ) {
+ if ($list_in_array) {
+ printf OUTPUT "/*\n$list[$i]\nat 0x%08x : */", $i;
+ }
+ printf OUTPUT "\t0x%08x,", $code[$i];
+ printf STDERR "Address $i = %x\n", $code[$i] if ($debug);
+ if ($code[$i + 1] =~ /\s*($identifier)(.*)$/) {
+ push (@external_patches, $i+1, $1);
+ printf OUTPUT "0%s,", $2
+ } else {
+ printf OUTPUT "0x%08x,",$code[$i+1];
+ }
+
+ if (($code[$i] & 0xff_00_00_00) == 0xc0_00_00_00) {
+ if ($code[$i + 2] =~ /$identifier/) {
+ push (@external_patches, $i+2, $code[$i+2]);
+ printf OUTPUT "0,\n";
+ } else {
+ printf OUTPUT "0x%08x,\n",$code[$i+2];
+ }
+ $i += 3;
+ } else {
+ printf OUTPUT "\n";
+ $i += 2;
+ }
+ $instructions += 1;
+}
+print OUTPUT "};\n\n";
+
+foreach $i (@absolute) {
+ printf OUTPUT "#define A_$i\t0x%08x\n", $symbol_values{$i};
+ if (defined($prefix) && $prefix ne '') {
+ printf OUTPUT "#define A_".$i."_used ".$prefix."A_".$i."_used\n";
+ printf OUTPUTU "#undef A_".$i."_used\n";
+ }
+ printf OUTPUTU "#undef A_$i\n";
+
+ printf OUTPUT "static u32 A_".$i."_used\[\] __attribute((unused)) = {\n";
+printf STDERR "$i is used $symbol_references{$i}\n" if ($debug);
+ foreach $j (split (/\s+/,$symbol_references{$i})) {
+ $j =~ /(ABS|REL),(.*),(.*)/;
+ if ($1 eq 'ABS') {
+ $address = $2;
+ $length = $3;
+ printf OUTPUT "\t0x%08x,\n", $address / 4;
+ }
+ }
+ printf OUTPUT "};\n\n";
+}
+
+foreach $i (sort @entry) {
+ printf OUTPUT "#define Ent_$i\t0x%08x\n", $symbol_values{$i};
+ printf OUTPUTU "#undef Ent_$i\n", $symbol_values{$i};
+}
+
+#
+# NCR assembler outputs label patches in the form of indices into
+# the code.
+#
+printf OUTPUT "static u32 ".$prefix."LABELPATCHES[] __attribute((unused)) = {\n";
+for $patch (sort {$a <=> $b} @label_patches) {
+ printf OUTPUT "\t0x%08x,\n", $patch;
+}
+printf OUTPUT "};\n\n";
+
+$num_external_patches = 0;
+printf OUTPUT "static struct {\n\tu32\toffset;\n\tvoid\t\t*address;\n".
+ "} ".$prefix."EXTERNAL_PATCHES[] __attribute((unused)) = {\n";
+while ($ident = pop(@external_patches)) {
+ $off = pop(@external_patches);
+ printf OUTPUT "\t{0x%08x, &%s},\n", $off, $ident;
+ ++$num_external_patches;
+}
+printf OUTPUT "};\n\n";
+
+printf OUTPUT "static u32 ".$prefix."INSTRUCTIONS __attribute((unused))\t= %d;\n",
+ $instructions;
+printf OUTPUT "static u32 ".$prefix."PATCHES __attribute((unused))\t= %d;\n",
+ $#label_patches+1;
+printf OUTPUT "static u32 ".$prefix."EXTERNAL_PATCHES_LEN __attribute((unused))\t= %d;\n",
+ $num_external_patches;
+close OUTPUT;
+close OUTPUTU;
diff --git a/drivers/scsi/scsi.c b/drivers/scsi/scsi.c
new file mode 100644
index 000000000..3833bf59f
--- /dev/null
+++ b/drivers/scsi/scsi.c
@@ -0,0 +1,1272 @@
+/*
+ * scsi.c Copyright (C) 1992 Drew Eckhardt
+ * Copyright (C) 1993, 1994, 1995, 1999 Eric Youngdale
+ * Copyright (C) 2002, 2003 Christoph Hellwig
+ *
+ * generic mid-level SCSI driver
+ * Initial versions: Drew Eckhardt
+ * Subsequent revisions: Eric Youngdale
+ *
+ * <drew@colorado.edu>
+ *
+ * Bug correction thanks go to :
+ * Rik Faith <faith@cs.unc.edu>
+ * Tommy Thorn <tthorn>
+ * Thomas Wuensche <tw@fgb1.fgb.mw.tu-muenchen.de>
+ *
+ * Modified by Eric Youngdale eric@andante.org or ericy@gnu.ai.mit.edu to
+ * add scatter-gather, multiple outstanding request, and other
+ * enhancements.
+ *
+ * Native multichannel, wide scsi, /proc/scsi and hot plugging
+ * support added by Michael Neuffer <mike@i-connect.net>
+ *
+ * Added request_module("scsi_hostadapter") for kerneld:
+ * (Put an "alias scsi_hostadapter your_hostadapter" in /etc/modprobe.conf)
+ * Bjorn Ekwall <bj0rn@blox.se>
+ * (changed to kmod)
+ *
+ * Major improvements to the timeout, abort, and reset processing,
+ * as well as performance modifications for large queue depths by
+ * Leonard N. Zubkoff <lnz@dandelion.com>
+ *
+ * Converted cli() code to spinlocks, Ingo Molnar
+ *
+ * Jiffies wrap fixes (host->resetting), 3 Dec 1998 Andrea Arcangeli
+ *
+ * out_of_space hacks, D. Gilbert (dpg) 990608
+ */
+
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/kernel.h>
+#include <linux/timer.h>
+#include <linux/string.h>
+#include <linux/slab.h>
+#include <linux/blkdev.h>
+#include <linux/delay.h>
+#include <linux/init.h>
+#include <linux/completion.h>
+#include <linux/unistd.h>
+#include <linux/spinlock.h>
+#include <linux/kmod.h>
+#include <linux/interrupt.h>
+#include <linux/notifier.h>
+#include <linux/cpu.h>
+#include <linux/mutex.h>
+#include <linux/async.h>
+#include <asm/unaligned.h>
+
+#include <scsi/scsi.h>
+#include <scsi/scsi_cmnd.h>
+#include <scsi/scsi_dbg.h>
+#include <scsi/scsi_device.h>
+#include <scsi/scsi_driver.h>
+#include <scsi/scsi_eh.h>
+#include <scsi/scsi_host.h>
+#include <scsi/scsi_tcq.h>
+
+#include "scsi_priv.h"
+#include "scsi_logging.h"
+
+#define CREATE_TRACE_POINTS
+#include <trace/events/scsi.h>
+
+/*
+ * Definitions and constants.
+ */
+
+/*
+ * Note - the initial logging level can be set here to log events at boot time.
+ * After the system is up, you may enable logging via the /proc interface.
+ */
+unsigned int scsi_logging_level;
+#if defined(CONFIG_SCSI_LOGGING)
+EXPORT_SYMBOL(scsi_logging_level);
+#endif
+
+/* sd, scsi core and power management need to coordinate flushing async actions */
+ASYNC_DOMAIN(scsi_sd_probe_domain);
+EXPORT_SYMBOL(scsi_sd_probe_domain);
+
+/*
+ * Separate domain (from scsi_sd_probe_domain) to maximize the benefit of
+ * asynchronous system resume operations. It is marked 'exclusive' to avoid
+ * being included in the async_synchronize_full() that is invoked by
+ * dpm_resume()
+ */
+ASYNC_DOMAIN_EXCLUSIVE(scsi_sd_pm_domain);
+EXPORT_SYMBOL(scsi_sd_pm_domain);
+
+/* NB: These are exposed through /proc/scsi/scsi and form part of the ABI.
+ * You may not alter any existing entry (although adding new ones is
+ * encouraged once assigned by ANSI/INCITS T10
+ */
+static const char *const scsi_device_types[] = {
+ "Direct-Access ",
+ "Sequential-Access",
+ "Printer ",
+ "Processor ",
+ "WORM ",
+ "CD-ROM ",
+ "Scanner ",
+ "Optical Device ",
+ "Medium Changer ",
+ "Communications ",
+ "ASC IT8 ",
+ "ASC IT8 ",
+ "RAID ",
+ "Enclosure ",
+ "Direct-Access-RBC",
+ "Optical card ",
+ "Bridge controller",
+ "Object storage ",
+ "Automation/Drive ",
+ "Security Manager ",
+ "Direct-Access-ZBC",
+};
+
+/**
+ * scsi_device_type - Return 17 char string indicating device type.
+ * @type: type number to look up
+ */
+
+const char * scsi_device_type(unsigned type)
+{
+ if (type == 0x1e)
+ return "Well-known LUN ";
+ if (type == 0x1f)
+ return "No Device ";
+ if (type >= ARRAY_SIZE(scsi_device_types))
+ return "Unknown ";
+ return scsi_device_types[type];
+}
+
+EXPORT_SYMBOL(scsi_device_type);
+
+struct scsi_host_cmd_pool {
+ struct kmem_cache *cmd_slab;
+ struct kmem_cache *sense_slab;
+ unsigned int users;
+ char *cmd_name;
+ char *sense_name;
+ unsigned int slab_flags;
+ gfp_t gfp_mask;
+};
+
+static struct scsi_host_cmd_pool scsi_cmd_pool = {
+ .cmd_name = "scsi_cmd_cache",
+ .sense_name = "scsi_sense_cache",
+ .slab_flags = SLAB_HWCACHE_ALIGN,
+};
+
+static struct scsi_host_cmd_pool scsi_cmd_dma_pool = {
+ .cmd_name = "scsi_cmd_cache(DMA)",
+ .sense_name = "scsi_sense_cache(DMA)",
+ .slab_flags = SLAB_HWCACHE_ALIGN|SLAB_CACHE_DMA,
+ .gfp_mask = __GFP_DMA,
+};
+
+static DEFINE_MUTEX(host_cmd_pool_mutex);
+
+/**
+ * scsi_host_free_command - internal function to release a command
+ * @shost: host to free the command for
+ * @cmd: command to release
+ *
+ * the command must previously have been allocated by
+ * scsi_host_alloc_command.
+ */
+static void
+scsi_host_free_command(struct Scsi_Host *shost, struct scsi_cmnd *cmd)
+{
+ struct scsi_host_cmd_pool *pool = shost->cmd_pool;
+
+ if (cmd->prot_sdb)
+ kmem_cache_free(scsi_sdb_cache, cmd->prot_sdb);
+ kmem_cache_free(pool->sense_slab, cmd->sense_buffer);
+ kmem_cache_free(pool->cmd_slab, cmd);
+}
+
+/**
+ * scsi_host_alloc_command - internal function to allocate command
+ * @shost: SCSI host whose pool to allocate from
+ * @gfp_mask: mask for the allocation
+ *
+ * Returns a fully allocated command with sense buffer and protection
+ * data buffer (where applicable) or NULL on failure
+ */
+static struct scsi_cmnd *
+scsi_host_alloc_command(struct Scsi_Host *shost, gfp_t gfp_mask)
+{
+ struct scsi_host_cmd_pool *pool = shost->cmd_pool;
+ struct scsi_cmnd *cmd;
+
+ cmd = kmem_cache_zalloc(pool->cmd_slab, gfp_mask | pool->gfp_mask);
+ if (!cmd)
+ goto fail;
+
+ cmd->sense_buffer = kmem_cache_alloc(pool->sense_slab,
+ gfp_mask | pool->gfp_mask);
+ if (!cmd->sense_buffer)
+ goto fail_free_cmd;
+
+ if (scsi_host_get_prot(shost) >= SHOST_DIX_TYPE0_PROTECTION) {
+ cmd->prot_sdb = kmem_cache_zalloc(scsi_sdb_cache, gfp_mask);
+ if (!cmd->prot_sdb)
+ goto fail_free_sense;
+ }
+
+ return cmd;
+
+fail_free_sense:
+ kmem_cache_free(pool->sense_slab, cmd->sense_buffer);
+fail_free_cmd:
+ kmem_cache_free(pool->cmd_slab, cmd);
+fail:
+ return NULL;
+}
+
+/**
+ * __scsi_get_command - Allocate a struct scsi_cmnd
+ * @shost: host to transmit command
+ * @gfp_mask: allocation mask
+ *
+ * Description: allocate a struct scsi_cmd from host's slab, recycling from the
+ * host's free_list if necessary.
+ */
+static struct scsi_cmnd *
+__scsi_get_command(struct Scsi_Host *shost, gfp_t gfp_mask)
+{
+ struct scsi_cmnd *cmd = scsi_host_alloc_command(shost, gfp_mask);
+
+ if (unlikely(!cmd)) {
+ unsigned long flags;
+
+ spin_lock_irqsave(&shost->free_list_lock, flags);
+ if (likely(!list_empty(&shost->free_list))) {
+ cmd = list_entry(shost->free_list.next,
+ struct scsi_cmnd, list);
+ list_del_init(&cmd->list);
+ }
+ spin_unlock_irqrestore(&shost->free_list_lock, flags);
+
+ if (cmd) {
+ void *buf, *prot;
+
+ buf = cmd->sense_buffer;
+ prot = cmd->prot_sdb;
+
+ memset(cmd, 0, sizeof(*cmd));
+
+ cmd->sense_buffer = buf;
+ cmd->prot_sdb = prot;
+ }
+ }
+
+ return cmd;
+}
+
+/**
+ * scsi_get_command - Allocate and setup a scsi command block
+ * @dev: parent scsi device
+ * @gfp_mask: allocator flags
+ *
+ * Returns: The allocated scsi command structure.
+ */
+struct scsi_cmnd *scsi_get_command(struct scsi_device *dev, gfp_t gfp_mask)
+{
+ struct scsi_cmnd *cmd = __scsi_get_command(dev->host, gfp_mask);
+ unsigned long flags;
+
+ if (unlikely(cmd == NULL))
+ return NULL;
+
+ cmd->device = dev;
+ INIT_LIST_HEAD(&cmd->list);
+ INIT_DELAYED_WORK(&cmd->abort_work, scmd_eh_abort_handler);
+ spin_lock_irqsave(&dev->list_lock, flags);
+ list_add_tail(&cmd->list, &dev->cmd_list);
+ spin_unlock_irqrestore(&dev->list_lock, flags);
+ cmd->jiffies_at_alloc = jiffies;
+ return cmd;
+}
+
+/**
+ * __scsi_put_command - Free a struct scsi_cmnd
+ * @shost: dev->host
+ * @cmd: Command to free
+ */
+static void __scsi_put_command(struct Scsi_Host *shost, struct scsi_cmnd *cmd)
+{
+ unsigned long flags;
+
+ if (unlikely(list_empty(&shost->free_list))) {
+ spin_lock_irqsave(&shost->free_list_lock, flags);
+ if (list_empty(&shost->free_list)) {
+ list_add(&cmd->list, &shost->free_list);
+ cmd = NULL;
+ }
+ spin_unlock_irqrestore(&shost->free_list_lock, flags);
+ }
+
+ if (likely(cmd != NULL))
+ scsi_host_free_command(shost, cmd);
+}
+
+/**
+ * scsi_put_command - Free a scsi command block
+ * @cmd: command block to free
+ *
+ * Returns: Nothing.
+ *
+ * Notes: The command must not belong to any lists.
+ */
+void scsi_put_command(struct scsi_cmnd *cmd)
+{
+ unsigned long flags;
+
+ /* serious error if the command hasn't come from a device list */
+ spin_lock_irqsave(&cmd->device->list_lock, flags);
+ BUG_ON(list_empty(&cmd->list));
+ list_del_init(&cmd->list);
+ spin_unlock_irqrestore(&cmd->device->list_lock, flags);
+
+ BUG_ON(delayed_work_pending(&cmd->abort_work));
+
+ __scsi_put_command(cmd->device->host, cmd);
+}
+
+static struct scsi_host_cmd_pool *
+scsi_find_host_cmd_pool(struct Scsi_Host *shost)
+{
+ if (shost->hostt->cmd_size)
+ return shost->hostt->cmd_pool;
+ if (shost->unchecked_isa_dma)
+ return &scsi_cmd_dma_pool;
+ return &scsi_cmd_pool;
+}
+
+static void
+scsi_free_host_cmd_pool(struct scsi_host_cmd_pool *pool)
+{
+ kfree(pool->sense_name);
+ kfree(pool->cmd_name);
+ kfree(pool);
+}
+
+static struct scsi_host_cmd_pool *
+scsi_alloc_host_cmd_pool(struct Scsi_Host *shost)
+{
+ struct scsi_host_template *hostt = shost->hostt;
+ struct scsi_host_cmd_pool *pool;
+
+ pool = kzalloc(sizeof(*pool), GFP_KERNEL);
+ if (!pool)
+ return NULL;
+
+ pool->cmd_name = kasprintf(GFP_KERNEL, "%s_cmd", hostt->proc_name);
+ pool->sense_name = kasprintf(GFP_KERNEL, "%s_sense", hostt->proc_name);
+ if (!pool->cmd_name || !pool->sense_name) {
+ scsi_free_host_cmd_pool(pool);
+ return NULL;
+ }
+
+ pool->slab_flags = SLAB_HWCACHE_ALIGN;
+ if (shost->unchecked_isa_dma) {
+ pool->slab_flags |= SLAB_CACHE_DMA;
+ pool->gfp_mask = __GFP_DMA;
+ }
+
+ if (hostt->cmd_size)
+ hostt->cmd_pool = pool;
+
+ return pool;
+}
+
+static struct scsi_host_cmd_pool *
+scsi_get_host_cmd_pool(struct Scsi_Host *shost)
+{
+ struct scsi_host_template *hostt = shost->hostt;
+ struct scsi_host_cmd_pool *retval = NULL, *pool;
+ size_t cmd_size = sizeof(struct scsi_cmnd) + hostt->cmd_size;
+
+ /*
+ * Select a command slab for this host and create it if not
+ * yet existent.
+ */
+ mutex_lock(&host_cmd_pool_mutex);
+ pool = scsi_find_host_cmd_pool(shost);
+ if (!pool) {
+ pool = scsi_alloc_host_cmd_pool(shost);
+ if (!pool)
+ goto out;
+ }
+
+ if (!pool->users) {
+ pool->cmd_slab = kmem_cache_create(pool->cmd_name, cmd_size, 0,
+ pool->slab_flags, NULL);
+ if (!pool->cmd_slab)
+ goto out_free_pool;
+
+ pool->sense_slab = kmem_cache_create(pool->sense_name,
+ SCSI_SENSE_BUFFERSIZE, 0,
+ pool->slab_flags, NULL);
+ if (!pool->sense_slab)
+ goto out_free_slab;
+ }
+
+ pool->users++;
+ retval = pool;
+out:
+ mutex_unlock(&host_cmd_pool_mutex);
+ return retval;
+
+out_free_slab:
+ kmem_cache_destroy(pool->cmd_slab);
+out_free_pool:
+ if (hostt->cmd_size) {
+ scsi_free_host_cmd_pool(pool);
+ hostt->cmd_pool = NULL;
+ }
+ goto out;
+}
+
+static void scsi_put_host_cmd_pool(struct Scsi_Host *shost)
+{
+ struct scsi_host_template *hostt = shost->hostt;
+ struct scsi_host_cmd_pool *pool;
+
+ mutex_lock(&host_cmd_pool_mutex);
+ pool = scsi_find_host_cmd_pool(shost);
+
+ /*
+ * This may happen if a driver has a mismatched get and put
+ * of the command pool; the driver should be implicated in
+ * the stack trace
+ */
+ BUG_ON(pool->users == 0);
+
+ if (!--pool->users) {
+ kmem_cache_destroy(pool->cmd_slab);
+ kmem_cache_destroy(pool->sense_slab);
+ if (hostt->cmd_size) {
+ scsi_free_host_cmd_pool(pool);
+ hostt->cmd_pool = NULL;
+ }
+ }
+ mutex_unlock(&host_cmd_pool_mutex);
+}
+
+/**
+ * scsi_setup_command_freelist - Setup the command freelist for a scsi host.
+ * @shost: host to allocate the freelist for.
+ *
+ * Description: The command freelist protects against system-wide out of memory
+ * deadlock by preallocating one SCSI command structure for each host, so the
+ * system can always write to a swap file on a device associated with that host.
+ *
+ * Returns: Nothing.
+ */
+int scsi_setup_command_freelist(struct Scsi_Host *shost)
+{
+ const gfp_t gfp_mask = shost->unchecked_isa_dma ? GFP_DMA : GFP_KERNEL;
+ struct scsi_cmnd *cmd;
+
+ spin_lock_init(&shost->free_list_lock);
+ INIT_LIST_HEAD(&shost->free_list);
+
+ shost->cmd_pool = scsi_get_host_cmd_pool(shost);
+ if (!shost->cmd_pool)
+ return -ENOMEM;
+
+ /*
+ * Get one backup command for this host.
+ */
+ cmd = scsi_host_alloc_command(shost, gfp_mask);
+ if (!cmd) {
+ scsi_put_host_cmd_pool(shost);
+ shost->cmd_pool = NULL;
+ return -ENOMEM;
+ }
+ list_add(&cmd->list, &shost->free_list);
+ return 0;
+}
+
+/**
+ * scsi_destroy_command_freelist - Release the command freelist for a scsi host.
+ * @shost: host whose freelist is going to be destroyed
+ */
+void scsi_destroy_command_freelist(struct Scsi_Host *shost)
+{
+ /*
+ * If cmd_pool is NULL the free list was not initialized, so
+ * do not attempt to release resources.
+ */
+ if (!shost->cmd_pool)
+ return;
+
+ while (!list_empty(&shost->free_list)) {
+ struct scsi_cmnd *cmd;
+
+ cmd = list_entry(shost->free_list.next, struct scsi_cmnd, list);
+ list_del_init(&cmd->list);
+ scsi_host_free_command(shost, cmd);
+ }
+ shost->cmd_pool = NULL;
+ scsi_put_host_cmd_pool(shost);
+}
+
+#ifdef CONFIG_SCSI_LOGGING
+void scsi_log_send(struct scsi_cmnd *cmd)
+{
+ unsigned int level;
+
+ /*
+ * If ML QUEUE log level is greater than or equal to:
+ *
+ * 1: nothing (match completion)
+ *
+ * 2: log opcode + command of all commands + cmd address
+ *
+ * 3: same as 2
+ *
+ * 4: same as 3
+ */
+ if (unlikely(scsi_logging_level)) {
+ level = SCSI_LOG_LEVEL(SCSI_LOG_MLQUEUE_SHIFT,
+ SCSI_LOG_MLQUEUE_BITS);
+ if (level > 1) {
+ scmd_printk(KERN_INFO, cmd,
+ "Send: scmd 0x%p\n", cmd);
+ scsi_print_command(cmd);
+ }
+ }
+}
+
+void scsi_log_completion(struct scsi_cmnd *cmd, int disposition)
+{
+ unsigned int level;
+
+ /*
+ * If ML COMPLETE log level is greater than or equal to:
+ *
+ * 1: log disposition, result, opcode + command, and conditionally
+ * sense data for failures or non SUCCESS dispositions.
+ *
+ * 2: same as 1 but for all command completions.
+ *
+ * 3: same as 2
+ *
+ * 4: same as 3 plus dump extra junk
+ */
+ if (unlikely(scsi_logging_level)) {
+ level = SCSI_LOG_LEVEL(SCSI_LOG_MLCOMPLETE_SHIFT,
+ SCSI_LOG_MLCOMPLETE_BITS);
+ if (((level > 0) && (cmd->result || disposition != SUCCESS)) ||
+ (level > 1)) {
+ scsi_print_result(cmd, "Done", disposition);
+ scsi_print_command(cmd);
+ if (status_byte(cmd->result) & CHECK_CONDITION)
+ scsi_print_sense(cmd);
+ if (level > 3)
+ scmd_printk(KERN_INFO, cmd,
+ "scsi host busy %d failed %d\n",
+ atomic_read(&cmd->device->host->host_busy),
+ cmd->device->host->host_failed);
+ }
+ }
+}
+#endif
+
+/**
+ * scsi_cmd_get_serial - Assign a serial number to a command
+ * @host: the scsi host
+ * @cmd: command to assign serial number to
+ *
+ * Description: a serial number identifies a request for error recovery
+ * and debugging purposes. Protected by the Host_Lock of host.
+ */
+void scsi_cmd_get_serial(struct Scsi_Host *host, struct scsi_cmnd *cmd)
+{
+ cmd->serial_number = host->cmd_serial_number++;
+ if (cmd->serial_number == 0)
+ cmd->serial_number = host->cmd_serial_number++;
+}
+EXPORT_SYMBOL(scsi_cmd_get_serial);
+
+/**
+ * scsi_finish_command - cleanup and pass command back to upper layer
+ * @cmd: the command
+ *
+ * Description: Pass command off to upper layer for finishing of I/O
+ * request, waking processes that are waiting on results,
+ * etc.
+ */
+void scsi_finish_command(struct scsi_cmnd *cmd)
+{
+ struct scsi_device *sdev = cmd->device;
+ struct scsi_target *starget = scsi_target(sdev);
+ struct Scsi_Host *shost = sdev->host;
+ struct scsi_driver *drv;
+ unsigned int good_bytes;
+
+ scsi_device_unbusy(sdev);
+
+ /*
+ * Clear the flags that say that the device/target/host is no longer
+ * capable of accepting new commands.
+ */
+ if (atomic_read(&shost->host_blocked))
+ atomic_set(&shost->host_blocked, 0);
+ if (atomic_read(&starget->target_blocked))
+ atomic_set(&starget->target_blocked, 0);
+ if (atomic_read(&sdev->device_blocked))
+ atomic_set(&sdev->device_blocked, 0);
+
+ /*
+ * If we have valid sense information, then some kind of recovery
+ * must have taken place. Make a note of this.
+ */
+ if (SCSI_SENSE_VALID(cmd))
+ cmd->result |= (DRIVER_SENSE << 24);
+
+ SCSI_LOG_MLCOMPLETE(4, sdev_printk(KERN_INFO, sdev,
+ "Notifying upper driver of completion "
+ "(result %x)\n", cmd->result));
+
+ good_bytes = scsi_bufflen(cmd);
+ if (cmd->request->cmd_type != REQ_TYPE_BLOCK_PC) {
+ int old_good_bytes = good_bytes;
+ drv = scsi_cmd_to_driver(cmd);
+ if (drv->done)
+ good_bytes = drv->done(cmd);
+ /*
+ * USB may not give sense identifying bad sector and
+ * simply return a residue instead, so subtract off the
+ * residue if drv->done() error processing indicates no
+ * change to the completion length.
+ */
+ if (good_bytes == old_good_bytes)
+ good_bytes -= scsi_get_resid(cmd);
+ }
+ scsi_io_completion(cmd, good_bytes);
+}
+
+/**
+ * scsi_change_queue_depth - change a device's queue depth
+ * @sdev: SCSI Device in question
+ * @depth: number of commands allowed to be queued to the driver
+ *
+ * Sets the device queue depth and returns the new value.
+ */
+int scsi_change_queue_depth(struct scsi_device *sdev, int depth)
+{
+ unsigned long flags;
+
+ if (depth <= 0)
+ goto out;
+
+ spin_lock_irqsave(sdev->request_queue->queue_lock, flags);
+
+ /*
+ * Check to see if the queue is managed by the block layer.
+ * If it is, and we fail to adjust the depth, exit.
+ *
+ * Do not resize the tag map if it is a host wide share bqt,
+ * because the size should be the hosts's can_queue. If there
+ * is more IO than the LLD's can_queue (so there are not enuogh
+ * tags) request_fn's host queue ready check will handle it.
+ */
+ if (!shost_use_blk_mq(sdev->host) && !sdev->host->bqt) {
+ if (blk_queue_tagged(sdev->request_queue) &&
+ blk_queue_resize_tags(sdev->request_queue, depth) != 0)
+ goto out_unlock;
+ }
+
+ sdev->queue_depth = depth;
+out_unlock:
+ spin_unlock_irqrestore(sdev->request_queue->queue_lock, flags);
+out:
+ return sdev->queue_depth;
+}
+EXPORT_SYMBOL(scsi_change_queue_depth);
+
+/**
+ * scsi_track_queue_full - track QUEUE_FULL events to adjust queue depth
+ * @sdev: SCSI Device in question
+ * @depth: Current number of outstanding SCSI commands on this device,
+ * not counting the one returned as QUEUE_FULL.
+ *
+ * Description: This function will track successive QUEUE_FULL events on a
+ * specific SCSI device to determine if and when there is a
+ * need to adjust the queue depth on the device.
+ *
+ * Returns: 0 - No change needed, >0 - Adjust queue depth to this new depth,
+ * -1 - Drop back to untagged operation using host->cmd_per_lun
+ * as the untagged command depth
+ *
+ * Lock Status: None held on entry
+ *
+ * Notes: Low level drivers may call this at any time and we will do
+ * "The Right Thing." We are interrupt context safe.
+ */
+int scsi_track_queue_full(struct scsi_device *sdev, int depth)
+{
+
+ /*
+ * Don't let QUEUE_FULLs on the same
+ * jiffies count, they could all be from
+ * same event.
+ */
+ if ((jiffies >> 4) == (sdev->last_queue_full_time >> 4))
+ return 0;
+
+ sdev->last_queue_full_time = jiffies;
+ if (sdev->last_queue_full_depth != depth) {
+ sdev->last_queue_full_count = 1;
+ sdev->last_queue_full_depth = depth;
+ } else {
+ sdev->last_queue_full_count++;
+ }
+
+ if (sdev->last_queue_full_count <= 10)
+ return 0;
+
+ return scsi_change_queue_depth(sdev, depth);
+}
+EXPORT_SYMBOL(scsi_track_queue_full);
+
+/**
+ * scsi_vpd_inquiry - Request a device provide us with a VPD page
+ * @sdev: The device to ask
+ * @buffer: Where to put the result
+ * @page: Which Vital Product Data to return
+ * @len: The length of the buffer
+ *
+ * This is an internal helper function. You probably want to use
+ * scsi_get_vpd_page instead.
+ *
+ * Returns size of the vpd page on success or a negative error number.
+ */
+static int scsi_vpd_inquiry(struct scsi_device *sdev, unsigned char *buffer,
+ u8 page, unsigned len)
+{
+ int result;
+ unsigned char cmd[16];
+
+ if (len < 4)
+ return -EINVAL;
+
+ cmd[0] = INQUIRY;
+ cmd[1] = 1; /* EVPD */
+ cmd[2] = page;
+ cmd[3] = len >> 8;
+ cmd[4] = len & 0xff;
+ cmd[5] = 0; /* Control byte */
+
+ /*
+ * I'm not convinced we need to try quite this hard to get VPD, but
+ * all the existing users tried this hard.
+ */
+ result = scsi_execute_req(sdev, cmd, DMA_FROM_DEVICE, buffer,
+ len, NULL, 30 * HZ, 3, NULL);
+ if (result)
+ return -EIO;
+
+ /* Sanity check that we got the page back that we asked for */
+ if (buffer[1] != page)
+ return -EIO;
+
+ return get_unaligned_be16(&buffer[2]) + 4;
+}
+
+/**
+ * scsi_get_vpd_page - Get Vital Product Data from a SCSI device
+ * @sdev: The device to ask
+ * @page: Which Vital Product Data to return
+ * @buf: where to store the VPD
+ * @buf_len: number of bytes in the VPD buffer area
+ *
+ * SCSI devices may optionally supply Vital Product Data. Each 'page'
+ * of VPD is defined in the appropriate SCSI document (eg SPC, SBC).
+ * If the device supports this VPD page, this routine returns a pointer
+ * to a buffer containing the data from that page. The caller is
+ * responsible for calling kfree() on this pointer when it is no longer
+ * needed. If we cannot retrieve the VPD page this routine returns %NULL.
+ */
+int scsi_get_vpd_page(struct scsi_device *sdev, u8 page, unsigned char *buf,
+ int buf_len)
+{
+ int i, result;
+
+ if (sdev->skip_vpd_pages)
+ goto fail;
+
+ /* Ask for all the pages supported by this device */
+ result = scsi_vpd_inquiry(sdev, buf, 0, buf_len);
+ if (result < 4)
+ goto fail;
+
+ /* If the user actually wanted this page, we can skip the rest */
+ if (page == 0)
+ return 0;
+
+ for (i = 4; i < min(result, buf_len); i++)
+ if (buf[i] == page)
+ goto found;
+
+ if (i < result && i >= buf_len)
+ /* ran off the end of the buffer, give us benefit of doubt */
+ goto found;
+ /* The device claims it doesn't support the requested page */
+ goto fail;
+
+ found:
+ result = scsi_vpd_inquiry(sdev, buf, page, buf_len);
+ if (result < 0)
+ goto fail;
+
+ return 0;
+
+ fail:
+ return -EINVAL;
+}
+EXPORT_SYMBOL_GPL(scsi_get_vpd_page);
+
+/**
+ * scsi_attach_vpd - Attach Vital Product Data to a SCSI device structure
+ * @sdev: The device to ask
+ *
+ * Attach the 'Device Identification' VPD page (0x83) and the
+ * 'Unit Serial Number' VPD page (0x80) to a SCSI device
+ * structure. This information can be used to identify the device
+ * uniquely.
+ */
+void scsi_attach_vpd(struct scsi_device *sdev)
+{
+ int result, i;
+ int vpd_len = SCSI_VPD_PG_LEN;
+ int pg80_supported = 0;
+ int pg83_supported = 0;
+ unsigned char *vpd_buf;
+
+ if (sdev->skip_vpd_pages)
+ return;
+retry_pg0:
+ vpd_buf = kmalloc(vpd_len, GFP_KERNEL);
+ if (!vpd_buf)
+ return;
+
+ /* Ask for all the pages supported by this device */
+ result = scsi_vpd_inquiry(sdev, vpd_buf, 0, vpd_len);
+ if (result < 0) {
+ kfree(vpd_buf);
+ return;
+ }
+ if (result > vpd_len) {
+ vpd_len = result;
+ kfree(vpd_buf);
+ goto retry_pg0;
+ }
+
+ for (i = 4; i < result; i++) {
+ if (vpd_buf[i] == 0x80)
+ pg80_supported = 1;
+ if (vpd_buf[i] == 0x83)
+ pg83_supported = 1;
+ }
+ kfree(vpd_buf);
+ vpd_len = SCSI_VPD_PG_LEN;
+
+ if (pg80_supported) {
+retry_pg80:
+ vpd_buf = kmalloc(vpd_len, GFP_KERNEL);
+ if (!vpd_buf)
+ return;
+
+ result = scsi_vpd_inquiry(sdev, vpd_buf, 0x80, vpd_len);
+ if (result < 0) {
+ kfree(vpd_buf);
+ return;
+ }
+ if (result > vpd_len) {
+ vpd_len = result;
+ kfree(vpd_buf);
+ goto retry_pg80;
+ }
+ sdev->vpd_pg80_len = result;
+ sdev->vpd_pg80 = vpd_buf;
+ vpd_len = SCSI_VPD_PG_LEN;
+ }
+
+ if (pg83_supported) {
+retry_pg83:
+ vpd_buf = kmalloc(vpd_len, GFP_KERNEL);
+ if (!vpd_buf)
+ return;
+
+ result = scsi_vpd_inquiry(sdev, vpd_buf, 0x83, vpd_len);
+ if (result < 0) {
+ kfree(vpd_buf);
+ return;
+ }
+ if (result > vpd_len) {
+ vpd_len = result;
+ kfree(vpd_buf);
+ goto retry_pg83;
+ }
+ sdev->vpd_pg83_len = result;
+ sdev->vpd_pg83 = vpd_buf;
+ }
+}
+
+/**
+ * scsi_report_opcode - Find out if a given command opcode is supported
+ * @sdev: scsi device to query
+ * @buffer: scratch buffer (must be at least 20 bytes long)
+ * @len: length of buffer
+ * @opcode: opcode for command to look up
+ *
+ * Uses the REPORT SUPPORTED OPERATION CODES to look up the given
+ * opcode. Returns -EINVAL if RSOC fails, 0 if the command opcode is
+ * unsupported and 1 if the device claims to support the command.
+ */
+int scsi_report_opcode(struct scsi_device *sdev, unsigned char *buffer,
+ unsigned int len, unsigned char opcode)
+{
+ unsigned char cmd[16];
+ struct scsi_sense_hdr sshdr;
+ int result;
+
+ if (sdev->no_report_opcodes || sdev->scsi_level < SCSI_SPC_3)
+ return -EINVAL;
+
+ memset(cmd, 0, 16);
+ cmd[0] = MAINTENANCE_IN;
+ cmd[1] = MI_REPORT_SUPPORTED_OPERATION_CODES;
+ cmd[2] = 1; /* One command format */
+ cmd[3] = opcode;
+ put_unaligned_be32(len, &cmd[6]);
+ memset(buffer, 0, len);
+
+ result = scsi_execute_req(sdev, cmd, DMA_FROM_DEVICE, buffer, len,
+ &sshdr, 30 * HZ, 3, NULL);
+
+ if (result && scsi_sense_valid(&sshdr) &&
+ sshdr.sense_key == ILLEGAL_REQUEST &&
+ (sshdr.asc == 0x20 || sshdr.asc == 0x24) && sshdr.ascq == 0x00)
+ return -EINVAL;
+
+ if ((buffer[1] & 3) == 3) /* Command supported */
+ return 1;
+
+ return 0;
+}
+EXPORT_SYMBOL(scsi_report_opcode);
+
+/**
+ * scsi_device_get - get an additional reference to a scsi_device
+ * @sdev: device to get a reference to
+ *
+ * Description: Gets a reference to the scsi_device and increments the use count
+ * of the underlying LLDD module. You must hold host_lock of the
+ * parent Scsi_Host or already have a reference when calling this.
+ *
+ * This will fail if a device is deleted or cancelled, or when the LLD module
+ * is in the process of being unloaded.
+ */
+int scsi_device_get(struct scsi_device *sdev)
+{
+ if (sdev->sdev_state == SDEV_DEL || sdev->sdev_state == SDEV_CANCEL)
+ goto fail;
+ if (!get_device(&sdev->sdev_gendev))
+ goto fail;
+ if (!try_module_get(sdev->host->hostt->module))
+ goto fail_put_device;
+ return 0;
+
+fail_put_device:
+ put_device(&sdev->sdev_gendev);
+fail:
+ return -ENXIO;
+}
+EXPORT_SYMBOL(scsi_device_get);
+
+/**
+ * scsi_device_put - release a reference to a scsi_device
+ * @sdev: device to release a reference on.
+ *
+ * Description: Release a reference to the scsi_device and decrements the use
+ * count of the underlying LLDD module. The device is freed once the last
+ * user vanishes.
+ */
+void scsi_device_put(struct scsi_device *sdev)
+{
+ module_put(sdev->host->hostt->module);
+ put_device(&sdev->sdev_gendev);
+}
+EXPORT_SYMBOL(scsi_device_put);
+
+/* helper for shost_for_each_device, see that for documentation */
+struct scsi_device *__scsi_iterate_devices(struct Scsi_Host *shost,
+ struct scsi_device *prev)
+{
+ struct list_head *list = (prev ? &prev->siblings : &shost->__devices);
+ struct scsi_device *next = NULL;
+ unsigned long flags;
+
+ spin_lock_irqsave(shost->host_lock, flags);
+ while (list->next != &shost->__devices) {
+ next = list_entry(list->next, struct scsi_device, siblings);
+ /* skip devices that we can't get a reference to */
+ if (!scsi_device_get(next))
+ break;
+ next = NULL;
+ list = list->next;
+ }
+ spin_unlock_irqrestore(shost->host_lock, flags);
+
+ if (prev)
+ scsi_device_put(prev);
+ return next;
+}
+EXPORT_SYMBOL(__scsi_iterate_devices);
+
+/**
+ * starget_for_each_device - helper to walk all devices of a target
+ * @starget: target whose devices we want to iterate over.
+ * @data: Opaque passed to each function call.
+ * @fn: Function to call on each device
+ *
+ * This traverses over each device of @starget. The devices have
+ * a reference that must be released by scsi_host_put when breaking
+ * out of the loop.
+ */
+void starget_for_each_device(struct scsi_target *starget, void *data,
+ void (*fn)(struct scsi_device *, void *))
+{
+ struct Scsi_Host *shost = dev_to_shost(starget->dev.parent);
+ struct scsi_device *sdev;
+
+ shost_for_each_device(sdev, shost) {
+ if ((sdev->channel == starget->channel) &&
+ (sdev->id == starget->id))
+ fn(sdev, data);
+ }
+}
+EXPORT_SYMBOL(starget_for_each_device);
+
+/**
+ * __starget_for_each_device - helper to walk all devices of a target (UNLOCKED)
+ * @starget: target whose devices we want to iterate over.
+ * @data: parameter for callback @fn()
+ * @fn: callback function that is invoked for each device
+ *
+ * This traverses over each device of @starget. It does _not_
+ * take a reference on the scsi_device, so the whole loop must be
+ * protected by shost->host_lock.
+ *
+ * Note: The only reason why drivers would want to use this is because
+ * they need to access the device list in irq context. Otherwise you
+ * really want to use starget_for_each_device instead.
+ **/
+void __starget_for_each_device(struct scsi_target *starget, void *data,
+ void (*fn)(struct scsi_device *, void *))
+{
+ struct Scsi_Host *shost = dev_to_shost(starget->dev.parent);
+ struct scsi_device *sdev;
+
+ __shost_for_each_device(sdev, shost) {
+ if ((sdev->channel == starget->channel) &&
+ (sdev->id == starget->id))
+ fn(sdev, data);
+ }
+}
+EXPORT_SYMBOL(__starget_for_each_device);
+
+/**
+ * __scsi_device_lookup_by_target - find a device given the target (UNLOCKED)
+ * @starget: SCSI target pointer
+ * @lun: SCSI Logical Unit Number
+ *
+ * Description: Looks up the scsi_device with the specified @lun for a given
+ * @starget. The returned scsi_device does not have an additional
+ * reference. You must hold the host's host_lock over this call and
+ * any access to the returned scsi_device. A scsi_device in state
+ * SDEV_DEL is skipped.
+ *
+ * Note: The only reason why drivers should use this is because
+ * they need to access the device list in irq context. Otherwise you
+ * really want to use scsi_device_lookup_by_target instead.
+ **/
+struct scsi_device *__scsi_device_lookup_by_target(struct scsi_target *starget,
+ u64 lun)
+{
+ struct scsi_device *sdev;
+
+ list_for_each_entry(sdev, &starget->devices, same_target_siblings) {
+ if (sdev->sdev_state == SDEV_DEL)
+ continue;
+ if (sdev->lun ==lun)
+ return sdev;
+ }
+
+ return NULL;
+}
+EXPORT_SYMBOL(__scsi_device_lookup_by_target);
+
+/**
+ * scsi_device_lookup_by_target - find a device given the target
+ * @starget: SCSI target pointer
+ * @lun: SCSI Logical Unit Number
+ *
+ * Description: Looks up the scsi_device with the specified @lun for a given
+ * @starget. The returned scsi_device has an additional reference that
+ * needs to be released with scsi_device_put once you're done with it.
+ **/
+struct scsi_device *scsi_device_lookup_by_target(struct scsi_target *starget,
+ u64 lun)
+{
+ struct scsi_device *sdev;
+ struct Scsi_Host *shost = dev_to_shost(starget->dev.parent);
+ unsigned long flags;
+
+ spin_lock_irqsave(shost->host_lock, flags);
+ sdev = __scsi_device_lookup_by_target(starget, lun);
+ if (sdev && scsi_device_get(sdev))
+ sdev = NULL;
+ spin_unlock_irqrestore(shost->host_lock, flags);
+
+ return sdev;
+}
+EXPORT_SYMBOL(scsi_device_lookup_by_target);
+
+/**
+ * __scsi_device_lookup - find a device given the host (UNLOCKED)
+ * @shost: SCSI host pointer
+ * @channel: SCSI channel (zero if only one channel)
+ * @id: SCSI target number (physical unit number)
+ * @lun: SCSI Logical Unit Number
+ *
+ * Description: Looks up the scsi_device with the specified @channel, @id, @lun
+ * for a given host. The returned scsi_device does not have an additional
+ * reference. You must hold the host's host_lock over this call and any access
+ * to the returned scsi_device.
+ *
+ * Note: The only reason why drivers would want to use this is because
+ * they need to access the device list in irq context. Otherwise you
+ * really want to use scsi_device_lookup instead.
+ **/
+struct scsi_device *__scsi_device_lookup(struct Scsi_Host *shost,
+ uint channel, uint id, u64 lun)
+{
+ struct scsi_device *sdev;
+
+ list_for_each_entry(sdev, &shost->__devices, siblings) {
+ if (sdev->channel == channel && sdev->id == id &&
+ sdev->lun ==lun)
+ return sdev;
+ }
+
+ return NULL;
+}
+EXPORT_SYMBOL(__scsi_device_lookup);
+
+/**
+ * scsi_device_lookup - find a device given the host
+ * @shost: SCSI host pointer
+ * @channel: SCSI channel (zero if only one channel)
+ * @id: SCSI target number (physical unit number)
+ * @lun: SCSI Logical Unit Number
+ *
+ * Description: Looks up the scsi_device with the specified @channel, @id, @lun
+ * for a given host. The returned scsi_device has an additional reference that
+ * needs to be released with scsi_device_put once you're done with it.
+ **/
+struct scsi_device *scsi_device_lookup(struct Scsi_Host *shost,
+ uint channel, uint id, u64 lun)
+{
+ struct scsi_device *sdev;
+ unsigned long flags;
+
+ spin_lock_irqsave(shost->host_lock, flags);
+ sdev = __scsi_device_lookup(shost, channel, id, lun);
+ if (sdev && scsi_device_get(sdev))
+ sdev = NULL;
+ spin_unlock_irqrestore(shost->host_lock, flags);
+
+ return sdev;
+}
+EXPORT_SYMBOL(scsi_device_lookup);
+
+MODULE_DESCRIPTION("SCSI core");
+MODULE_LICENSE("GPL");
+
+module_param(scsi_logging_level, int, S_IRUGO|S_IWUSR);
+MODULE_PARM_DESC(scsi_logging_level, "a bit mask of logging levels");
+
+#ifdef CONFIG_SCSI_MQ_DEFAULT
+bool scsi_use_blk_mq = true;
+#else
+bool scsi_use_blk_mq = false;
+#endif
+module_param_named(use_blk_mq, scsi_use_blk_mq, bool, S_IWUSR | S_IRUGO);
+
+static int __init init_scsi(void)
+{
+ int error;
+
+ error = scsi_init_queue();
+ if (error)
+ return error;
+ error = scsi_init_procfs();
+ if (error)
+ goto cleanup_queue;
+ error = scsi_init_devinfo();
+ if (error)
+ goto cleanup_procfs;
+ error = scsi_init_hosts();
+ if (error)
+ goto cleanup_devlist;
+ error = scsi_init_sysctl();
+ if (error)
+ goto cleanup_hosts;
+ error = scsi_sysfs_register();
+ if (error)
+ goto cleanup_sysctl;
+
+ scsi_netlink_init();
+
+ printk(KERN_NOTICE "SCSI subsystem initialized\n");
+ return 0;
+
+cleanup_sysctl:
+ scsi_exit_sysctl();
+cleanup_hosts:
+ scsi_exit_hosts();
+cleanup_devlist:
+ scsi_exit_devinfo();
+cleanup_procfs:
+ scsi_exit_procfs();
+cleanup_queue:
+ scsi_exit_queue();
+ printk(KERN_ERR "SCSI subsystem failed to initialize, error = %d\n",
+ -error);
+ return error;
+}
+
+static void __exit exit_scsi(void)
+{
+ scsi_netlink_exit();
+ scsi_sysfs_unregister();
+ scsi_exit_sysctl();
+ scsi_exit_hosts();
+ scsi_exit_devinfo();
+ scsi_exit_procfs();
+ scsi_exit_queue();
+ async_unregister_domain(&scsi_sd_probe_domain);
+}
+
+subsys_initcall(init_scsi);
+module_exit(exit_scsi);
diff --git a/drivers/scsi/scsi.h b/drivers/scsi/scsi.h
new file mode 100644
index 000000000..d5a55fae6
--- /dev/null
+++ b/drivers/scsi/scsi.h
@@ -0,0 +1,48 @@
+/*
+ * scsi.h Copyright (C) 1992 Drew Eckhardt
+ * Copyright (C) 1993, 1994, 1995, 1998, 1999 Eric Youngdale
+ * generic SCSI package header file by
+ * Initial versions: Drew Eckhardt
+ * Subsequent revisions: Eric Youngdale
+ *
+ * <drew@colorado.edu>
+ *
+ * Modified by Eric Youngdale eric@andante.org to
+ * add scatter-gather, multiple outstanding request, and other
+ * enhancements.
+ */
+/*
+ * NOTE: this file only contains compatibility glue for old drivers. All
+ * these wrappers will be removed sooner or later. For new code please use
+ * the interfaces declared in the headers in include/scsi/
+ */
+
+#ifndef _SCSI_H
+#define _SCSI_H
+
+#include <scsi/scsi_cmnd.h>
+#include <scsi/scsi_device.h>
+#include <scsi/scsi_eh.h>
+#include <scsi/scsi_tcq.h>
+#include <scsi/scsi.h>
+
+/*
+ * Some defs, in case these are not defined elsewhere.
+ */
+#ifndef TRUE
+#define TRUE 1
+#endif
+#ifndef FALSE
+#define FALSE 0
+#endif
+
+struct Scsi_Host;
+struct scsi_cmnd;
+struct scsi_device;
+struct scsi_target;
+struct scatterlist;
+
+/* obsolete typedef junk. */
+#include "scsi_typedefs.h"
+
+#endif /* _SCSI_H */
diff --git a/drivers/scsi/scsi_debug.c b/drivers/scsi/scsi_debug.c
new file mode 100644
index 000000000..1f8e2dc9c
--- /dev/null
+++ b/drivers/scsi/scsi_debug.c
@@ -0,0 +1,5462 @@
+/*
+ * vvvvvvvvvvvvvvvvvvvvvvv Original vvvvvvvvvvvvvvvvvvvvvvvvvvvvvvv
+ * Copyright (C) 1992 Eric Youngdale
+ * Simulate a host adapter with 2 disks attached. Do a lot of checking
+ * to make sure that we are not getting blocks mixed up, and PANIC if
+ * anything out of the ordinary is seen.
+ * ^^^^^^^^^^^^^^^^^^^^^^^ Original ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+ *
+ * This version is more generic, simulating a variable number of disk
+ * (or disk like devices) sharing a common amount of RAM. To be more
+ * realistic, the simulated devices have the transport attributes of
+ * SAS disks.
+ *
+ *
+ * For documentation see http://sg.danny.cz/sg/sdebug26.html
+ *
+ * D. Gilbert (dpg) work for Magneto-Optical device test [20010421]
+ * dpg: work for devfs large number of disks [20010809]
+ * forked for lk 2.5 series [20011216, 20020101]
+ * use vmalloc() more inquiry+mode_sense [20020302]
+ * add timers for delayed responses [20020721]
+ * Patrick Mansfield <patmans@us.ibm.com> max_luns+scsi_level [20021031]
+ * Mike Anderson <andmike@us.ibm.com> sysfs work [20021118]
+ * dpg: change style of boot options to "scsi_debug.num_tgts=2" and
+ * module options to "modprobe scsi_debug num_tgts=2" [20021221]
+ */
+
+#include <linux/module.h>
+
+#include <linux/kernel.h>
+#include <linux/errno.h>
+#include <linux/timer.h>
+#include <linux/slab.h>
+#include <linux/types.h>
+#include <linux/string.h>
+#include <linux/genhd.h>
+#include <linux/fs.h>
+#include <linux/init.h>
+#include <linux/proc_fs.h>
+#include <linux/vmalloc.h>
+#include <linux/moduleparam.h>
+#include <linux/scatterlist.h>
+#include <linux/blkdev.h>
+#include <linux/crc-t10dif.h>
+#include <linux/spinlock.h>
+#include <linux/interrupt.h>
+#include <linux/atomic.h>
+#include <linux/hrtimer.h>
+
+#include <net/checksum.h>
+
+#include <asm/unaligned.h>
+
+#include <scsi/scsi.h>
+#include <scsi/scsi_cmnd.h>
+#include <scsi/scsi_device.h>
+#include <scsi/scsi_host.h>
+#include <scsi/scsicam.h>
+#include <scsi/scsi_eh.h>
+#include <scsi/scsi_tcq.h>
+#include <scsi/scsi_dbg.h>
+
+#include "sd.h"
+#include "scsi_logging.h"
+
+#define SCSI_DEBUG_VERSION "1.85"
+static const char *scsi_debug_version_date = "20141022";
+
+#define MY_NAME "scsi_debug"
+
+/* Additional Sense Code (ASC) */
+#define NO_ADDITIONAL_SENSE 0x0
+#define LOGICAL_UNIT_NOT_READY 0x4
+#define LOGICAL_UNIT_COMMUNICATION_FAILURE 0x8
+#define UNRECOVERED_READ_ERR 0x11
+#define PARAMETER_LIST_LENGTH_ERR 0x1a
+#define INVALID_OPCODE 0x20
+#define LBA_OUT_OF_RANGE 0x21
+#define INVALID_FIELD_IN_CDB 0x24
+#define INVALID_FIELD_IN_PARAM_LIST 0x26
+#define UA_RESET_ASC 0x29
+#define UA_CHANGED_ASC 0x2a
+#define TARGET_CHANGED_ASC 0x3f
+#define LUNS_CHANGED_ASCQ 0x0e
+#define INSUFF_RES_ASC 0x55
+#define INSUFF_RES_ASCQ 0x3
+#define POWER_ON_RESET_ASCQ 0x0
+#define BUS_RESET_ASCQ 0x2 /* scsi bus reset occurred */
+#define MODE_CHANGED_ASCQ 0x1 /* mode parameters changed */
+#define CAPACITY_CHANGED_ASCQ 0x9
+#define SAVING_PARAMS_UNSUP 0x39
+#define TRANSPORT_PROBLEM 0x4b
+#define THRESHOLD_EXCEEDED 0x5d
+#define LOW_POWER_COND_ON 0x5e
+#define MISCOMPARE_VERIFY_ASC 0x1d
+#define MICROCODE_CHANGED_ASCQ 0x1 /* with TARGET_CHANGED_ASC */
+#define MICROCODE_CHANGED_WO_RESET_ASCQ 0x16
+
+/* Additional Sense Code Qualifier (ASCQ) */
+#define ACK_NAK_TO 0x3
+
+
+/* Default values for driver parameters */
+#define DEF_NUM_HOST 1
+#define DEF_NUM_TGTS 1
+#define DEF_MAX_LUNS 1
+/* With these defaults, this driver will make 1 host with 1 target
+ * (id 0) containing 1 logical unit (lun 0). That is 1 device.
+ */
+#define DEF_ATO 1
+#define DEF_DELAY 1 /* if > 0 unit is a jiffy */
+#define DEF_DEV_SIZE_MB 8
+#define DEF_DIF 0
+#define DEF_DIX 0
+#define DEF_D_SENSE 0
+#define DEF_EVERY_NTH 0
+#define DEF_FAKE_RW 0
+#define DEF_GUARD 0
+#define DEF_HOST_LOCK 0
+#define DEF_LBPU 0
+#define DEF_LBPWS 0
+#define DEF_LBPWS10 0
+#define DEF_LBPRZ 1
+#define DEF_LOWEST_ALIGNED 0
+#define DEF_NDELAY 0 /* if > 0 unit is a nanosecond */
+#define DEF_NO_LUN_0 0
+#define DEF_NUM_PARTS 0
+#define DEF_OPTS 0
+#define DEF_OPT_BLKS 64
+#define DEF_PHYSBLK_EXP 0
+#define DEF_PTYPE 0
+#define DEF_REMOVABLE false
+#define DEF_SCSI_LEVEL 6 /* INQUIRY, byte2 [6->SPC-4] */
+#define DEF_SECTOR_SIZE 512
+#define DEF_UNMAP_ALIGNMENT 0
+#define DEF_UNMAP_GRANULARITY 1
+#define DEF_UNMAP_MAX_BLOCKS 0xFFFFFFFF
+#define DEF_UNMAP_MAX_DESC 256
+#define DEF_VIRTUAL_GB 0
+#define DEF_VPD_USE_HOSTNO 1
+#define DEF_WRITESAME_LENGTH 0xFFFF
+#define DEF_STRICT 0
+#define DELAY_OVERRIDDEN -9999
+
+/* bit mask values for scsi_debug_opts */
+#define SCSI_DEBUG_OPT_NOISE 1
+#define SCSI_DEBUG_OPT_MEDIUM_ERR 2
+#define SCSI_DEBUG_OPT_TIMEOUT 4
+#define SCSI_DEBUG_OPT_RECOVERED_ERR 8
+#define SCSI_DEBUG_OPT_TRANSPORT_ERR 16
+#define SCSI_DEBUG_OPT_DIF_ERR 32
+#define SCSI_DEBUG_OPT_DIX_ERR 64
+#define SCSI_DEBUG_OPT_MAC_TIMEOUT 128
+#define SCSI_DEBUG_OPT_SHORT_TRANSFER 0x100
+#define SCSI_DEBUG_OPT_Q_NOISE 0x200
+#define SCSI_DEBUG_OPT_ALL_TSF 0x400
+#define SCSI_DEBUG_OPT_RARE_TSF 0x800
+#define SCSI_DEBUG_OPT_N_WCE 0x1000
+#define SCSI_DEBUG_OPT_RESET_NOISE 0x2000
+#define SCSI_DEBUG_OPT_NO_CDB_NOISE 0x4000
+#define SCSI_DEBUG_OPT_ALL_NOISE (0x1 | 0x200 | 0x2000)
+/* When "every_nth" > 0 then modulo "every_nth" commands:
+ * - a no response is simulated if SCSI_DEBUG_OPT_TIMEOUT is set
+ * - a RECOVERED_ERROR is simulated on successful read and write
+ * commands if SCSI_DEBUG_OPT_RECOVERED_ERR is set.
+ * - a TRANSPORT_ERROR is simulated on successful read and write
+ * commands if SCSI_DEBUG_OPT_TRANSPORT_ERR is set.
+ *
+ * When "every_nth" < 0 then after "- every_nth" commands:
+ * - a no response is simulated if SCSI_DEBUG_OPT_TIMEOUT is set
+ * - a RECOVERED_ERROR is simulated on successful read and write
+ * commands if SCSI_DEBUG_OPT_RECOVERED_ERR is set.
+ * - a TRANSPORT_ERROR is simulated on successful read and write
+ * commands if SCSI_DEBUG_OPT_TRANSPORT_ERR is set.
+ * This will continue until some other action occurs (e.g. the user
+ * writing a new value (other than -1 or 1) to every_nth via sysfs).
+ */
+
+/* As indicated in SAM-5 and SPC-4 Unit Attentions (UAs)are returned in
+ * priority order. In the subset implemented here lower numbers have higher
+ * priority. The UA numbers should be a sequence starting from 0 with
+ * SDEBUG_NUM_UAS being 1 higher than the highest numbered UA. */
+#define SDEBUG_UA_POR 0 /* Power on, reset, or bus device reset */
+#define SDEBUG_UA_BUS_RESET 1
+#define SDEBUG_UA_MODE_CHANGED 2
+#define SDEBUG_UA_CAPACITY_CHANGED 3
+#define SDEBUG_UA_LUNS_CHANGED 4
+#define SDEBUG_UA_MICROCODE_CHANGED 5 /* simulate firmware change */
+#define SDEBUG_UA_MICROCODE_CHANGED_WO_RESET 6
+#define SDEBUG_NUM_UAS 7
+
+/* for check_readiness() */
+#define UAS_ONLY 1 /* check for UAs only */
+#define UAS_TUR 0 /* if no UAs then check if media access possible */
+
+/* when 1==SCSI_DEBUG_OPT_MEDIUM_ERR, a medium error is simulated at this
+ * sector on read commands: */
+#define OPT_MEDIUM_ERR_ADDR 0x1234 /* that's sector 4660 in decimal */
+#define OPT_MEDIUM_ERR_NUM 10 /* number of consecutive medium errs */
+
+/* If REPORT LUNS has luns >= 256 it can choose "flat space" (value 1)
+ * or "peripheral device" addressing (value 0) */
+#define SAM2_LUN_ADDRESS_METHOD 0
+#define SAM2_WLUN_REPORT_LUNS 0xc101
+
+/* SCSI_DEBUG_CANQUEUE is the maximum number of commands that can be queued
+ * (for response) at one time. Can be reduced by max_queue option. Command
+ * responses are not queued when delay=0 and ndelay=0. The per-device
+ * DEF_CMD_PER_LUN can be changed via sysfs:
+ * /sys/class/scsi_device/<h:c:t:l>/device/queue_depth but cannot exceed
+ * SCSI_DEBUG_CANQUEUE. */
+#define SCSI_DEBUG_CANQUEUE_WORDS 9 /* a WORD is bits in a long */
+#define SCSI_DEBUG_CANQUEUE (SCSI_DEBUG_CANQUEUE_WORDS * BITS_PER_LONG)
+#define DEF_CMD_PER_LUN 255
+
+#if DEF_CMD_PER_LUN > SCSI_DEBUG_CANQUEUE
+#warning "Expect DEF_CMD_PER_LUN <= SCSI_DEBUG_CANQUEUE"
+#endif
+
+/* SCSI opcodes (first byte of cdb) mapped onto these indexes */
+enum sdeb_opcode_index {
+ SDEB_I_INVALID_OPCODE = 0,
+ SDEB_I_INQUIRY = 1,
+ SDEB_I_REPORT_LUNS = 2,
+ SDEB_I_REQUEST_SENSE = 3,
+ SDEB_I_TEST_UNIT_READY = 4,
+ SDEB_I_MODE_SENSE = 5, /* 6, 10 */
+ SDEB_I_MODE_SELECT = 6, /* 6, 10 */
+ SDEB_I_LOG_SENSE = 7,
+ SDEB_I_READ_CAPACITY = 8, /* 10; 16 is in SA_IN(16) */
+ SDEB_I_READ = 9, /* 6, 10, 12, 16 */
+ SDEB_I_WRITE = 10, /* 6, 10, 12, 16 */
+ SDEB_I_START_STOP = 11,
+ SDEB_I_SERV_ACT_IN = 12, /* 12, 16 */
+ SDEB_I_SERV_ACT_OUT = 13, /* 12, 16 */
+ SDEB_I_MAINT_IN = 14,
+ SDEB_I_MAINT_OUT = 15,
+ SDEB_I_VERIFY = 16, /* 10 only */
+ SDEB_I_VARIABLE_LEN = 17,
+ SDEB_I_RESERVE = 18, /* 6, 10 */
+ SDEB_I_RELEASE = 19, /* 6, 10 */
+ SDEB_I_ALLOW_REMOVAL = 20, /* PREVENT ALLOW MEDIUM REMOVAL */
+ SDEB_I_REZERO_UNIT = 21, /* REWIND in SSC */
+ SDEB_I_ATA_PT = 22, /* 12, 16 */
+ SDEB_I_SEND_DIAG = 23,
+ SDEB_I_UNMAP = 24,
+ SDEB_I_XDWRITEREAD = 25, /* 10 only */
+ SDEB_I_WRITE_BUFFER = 26,
+ SDEB_I_WRITE_SAME = 27, /* 10, 16 */
+ SDEB_I_SYNC_CACHE = 28, /* 10 only */
+ SDEB_I_COMP_WRITE = 29,
+ SDEB_I_LAST_ELEMENT = 30, /* keep this last */
+};
+
+static const unsigned char opcode_ind_arr[256] = {
+/* 0x0; 0x0->0x1f: 6 byte cdbs */
+ SDEB_I_TEST_UNIT_READY, SDEB_I_REZERO_UNIT, 0, SDEB_I_REQUEST_SENSE,
+ 0, 0, 0, 0,
+ SDEB_I_READ, 0, SDEB_I_WRITE, 0, 0, 0, 0, 0,
+ 0, 0, SDEB_I_INQUIRY, 0, 0, SDEB_I_MODE_SELECT, SDEB_I_RESERVE,
+ SDEB_I_RELEASE,
+ 0, 0, SDEB_I_MODE_SENSE, SDEB_I_START_STOP, 0, SDEB_I_SEND_DIAG,
+ SDEB_I_ALLOW_REMOVAL, 0,
+/* 0x20; 0x20->0x3f: 10 byte cdbs */
+ 0, 0, 0, 0, 0, SDEB_I_READ_CAPACITY, 0, 0,
+ SDEB_I_READ, 0, SDEB_I_WRITE, 0, 0, 0, 0, SDEB_I_VERIFY,
+ 0, 0, 0, 0, 0, SDEB_I_SYNC_CACHE, 0, 0,
+ 0, 0, 0, SDEB_I_WRITE_BUFFER, 0, 0, 0, 0,
+/* 0x40; 0x40->0x5f: 10 byte cdbs */
+ 0, SDEB_I_WRITE_SAME, SDEB_I_UNMAP, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, SDEB_I_LOG_SENSE, 0, 0,
+ 0, 0, 0, SDEB_I_XDWRITEREAD, 0, SDEB_I_MODE_SELECT, SDEB_I_RESERVE,
+ SDEB_I_RELEASE,
+ 0, 0, SDEB_I_MODE_SENSE, 0, 0, 0, 0, 0,
+/* 0x60; 0x60->0x7d are reserved */
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, SDEB_I_VARIABLE_LEN,
+/* 0x80; 0x80->0x9f: 16 byte cdbs */
+ 0, 0, 0, 0, 0, SDEB_I_ATA_PT, 0, 0,
+ SDEB_I_READ, SDEB_I_COMP_WRITE, SDEB_I_WRITE, 0, 0, 0, 0, 0,
+ 0, 0, 0, SDEB_I_WRITE_SAME, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, SDEB_I_SERV_ACT_IN, SDEB_I_SERV_ACT_OUT,
+/* 0xa0; 0xa0->0xbf: 12 byte cdbs */
+ SDEB_I_REPORT_LUNS, SDEB_I_ATA_PT, 0, SDEB_I_MAINT_IN,
+ SDEB_I_MAINT_OUT, 0, 0, 0,
+ SDEB_I_READ, SDEB_I_SERV_ACT_OUT, SDEB_I_WRITE, SDEB_I_SERV_ACT_IN,
+ 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+/* 0xc0; 0xc0->0xff: vendor specific */
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+};
+
+#define F_D_IN 1
+#define F_D_OUT 2
+#define F_D_OUT_MAYBE 4 /* WRITE SAME, NDOB bit */
+#define F_D_UNKN 8
+#define F_RL_WLUN_OK 0x10
+#define F_SKIP_UA 0x20
+#define F_DELAY_OVERR 0x40
+#define F_SA_LOW 0x80 /* cdb byte 1, bits 4 to 0 */
+#define F_SA_HIGH 0x100 /* as used by variable length cdbs */
+#define F_INV_OP 0x200
+#define F_FAKE_RW 0x400
+#define F_M_ACCESS 0x800 /* media access */
+
+#define FF_RESPOND (F_RL_WLUN_OK | F_SKIP_UA | F_DELAY_OVERR)
+#define FF_DIRECT_IO (F_M_ACCESS | F_FAKE_RW)
+#define FF_SA (F_SA_HIGH | F_SA_LOW)
+
+struct sdebug_dev_info;
+static int resp_inquiry(struct scsi_cmnd *, struct sdebug_dev_info *);
+static int resp_report_luns(struct scsi_cmnd *, struct sdebug_dev_info *);
+static int resp_requests(struct scsi_cmnd *, struct sdebug_dev_info *);
+static int resp_mode_sense(struct scsi_cmnd *, struct sdebug_dev_info *);
+static int resp_mode_select(struct scsi_cmnd *, struct sdebug_dev_info *);
+static int resp_log_sense(struct scsi_cmnd *, struct sdebug_dev_info *);
+static int resp_readcap(struct scsi_cmnd *, struct sdebug_dev_info *);
+static int resp_read_dt0(struct scsi_cmnd *, struct sdebug_dev_info *);
+static int resp_write_dt0(struct scsi_cmnd *, struct sdebug_dev_info *);
+static int resp_start_stop(struct scsi_cmnd *, struct sdebug_dev_info *);
+static int resp_readcap16(struct scsi_cmnd *, struct sdebug_dev_info *);
+static int resp_get_lba_status(struct scsi_cmnd *, struct sdebug_dev_info *);
+static int resp_report_tgtpgs(struct scsi_cmnd *, struct sdebug_dev_info *);
+static int resp_unmap(struct scsi_cmnd *, struct sdebug_dev_info *);
+static int resp_rsup_opcodes(struct scsi_cmnd *, struct sdebug_dev_info *);
+static int resp_rsup_tmfs(struct scsi_cmnd *, struct sdebug_dev_info *);
+static int resp_write_same_10(struct scsi_cmnd *, struct sdebug_dev_info *);
+static int resp_write_same_16(struct scsi_cmnd *, struct sdebug_dev_info *);
+static int resp_xdwriteread_10(struct scsi_cmnd *, struct sdebug_dev_info *);
+static int resp_comp_write(struct scsi_cmnd *, struct sdebug_dev_info *);
+static int resp_write_buffer(struct scsi_cmnd *, struct sdebug_dev_info *);
+
+struct opcode_info_t {
+ u8 num_attached; /* 0 if this is it (i.e. a leaf); use 0xff
+ * for terminating element */
+ u8 opcode; /* if num_attached > 0, preferred */
+ u16 sa; /* service action */
+ u32 flags; /* OR-ed set of SDEB_F_* */
+ int (*pfp)(struct scsi_cmnd *, struct sdebug_dev_info *);
+ const struct opcode_info_t *arrp; /* num_attached elements or NULL */
+ u8 len_mask[16]; /* len=len_mask[0], then mask for cdb[1]... */
+ /* ignore cdb bytes after position 15 */
+};
+
+static const struct opcode_info_t msense_iarr[1] = {
+ {0, 0x1a, 0, F_D_IN, NULL, NULL,
+ {6, 0xe8, 0xff, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
+};
+
+static const struct opcode_info_t mselect_iarr[1] = {
+ {0, 0x15, 0, F_D_OUT, NULL, NULL,
+ {6, 0xf1, 0, 0, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
+};
+
+static const struct opcode_info_t read_iarr[3] = {
+ {0, 0x28, 0, F_D_IN | FF_DIRECT_IO, resp_read_dt0, NULL,/* READ(10) */
+ {10, 0xff, 0xff, 0xff, 0xff, 0xff, 0x1f, 0xff, 0xff, 0xc7, 0, 0,
+ 0, 0, 0, 0} },
+ {0, 0x8, 0, F_D_IN | FF_DIRECT_IO, resp_read_dt0, NULL, /* READ(6) */
+ {6, 0xff, 0xff, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
+ {0, 0xa8, 0, F_D_IN | FF_DIRECT_IO, resp_read_dt0, NULL,/* READ(12) */
+ {12, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x9f,
+ 0xc7, 0, 0, 0, 0} },
+};
+
+static const struct opcode_info_t write_iarr[3] = {
+ {0, 0x2a, 0, F_D_OUT | FF_DIRECT_IO, resp_write_dt0, NULL, /* 10 */
+ {10, 0xfb, 0xff, 0xff, 0xff, 0xff, 0x1f, 0xff, 0xff, 0xc7, 0, 0,
+ 0, 0, 0, 0} },
+ {0, 0xa, 0, F_D_OUT | FF_DIRECT_IO, resp_write_dt0, NULL, /* 6 */
+ {6, 0xff, 0xff, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
+ {0, 0xaa, 0, F_D_OUT | FF_DIRECT_IO, resp_write_dt0, NULL, /* 12 */
+ {12, 0xfb, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x9f,
+ 0xc7, 0, 0, 0, 0} },
+};
+
+static const struct opcode_info_t sa_in_iarr[1] = {
+ {0, 0x9e, 0x12, F_SA_LOW | F_D_IN, resp_get_lba_status, NULL,
+ {16, 0x12, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0, 0xc7} },
+};
+
+static const struct opcode_info_t vl_iarr[1] = { /* VARIABLE LENGTH */
+ {0, 0x7f, 0xb, F_SA_HIGH | F_D_OUT | FF_DIRECT_IO, resp_write_dt0,
+ NULL, {32, 0xc7, 0, 0, 0, 0, 0x1f, 0x18, 0x0, 0xb, 0xfa,
+ 0, 0xff, 0xff, 0xff, 0xff} }, /* WRITE(32) */
+};
+
+static const struct opcode_info_t maint_in_iarr[2] = {
+ {0, 0xa3, 0xc, F_SA_LOW | F_D_IN, resp_rsup_opcodes, NULL,
+ {12, 0xc, 0x87, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0,
+ 0xc7, 0, 0, 0, 0} },
+ {0, 0xa3, 0xd, F_SA_LOW | F_D_IN, resp_rsup_tmfs, NULL,
+ {12, 0xd, 0x80, 0, 0, 0, 0xff, 0xff, 0xff, 0xff, 0, 0xc7, 0, 0,
+ 0, 0} },
+};
+
+static const struct opcode_info_t write_same_iarr[1] = {
+ {0, 0x93, 0, F_D_OUT_MAYBE | FF_DIRECT_IO, resp_write_same_16, NULL,
+ {16, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0x1f, 0xc7} },
+};
+
+static const struct opcode_info_t reserve_iarr[1] = {
+ {0, 0x16, 0, F_D_OUT, NULL, NULL, /* RESERVE(6) */
+ {6, 0x1f, 0xff, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
+};
+
+static const struct opcode_info_t release_iarr[1] = {
+ {0, 0x17, 0, F_D_OUT, NULL, NULL, /* RELEASE(6) */
+ {6, 0x1f, 0xff, 0, 0, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
+};
+
+
+/* This array is accessed via SDEB_I_* values. Make sure all are mapped,
+ * plus the terminating elements for logic that scans this table such as
+ * REPORT SUPPORTED OPERATION CODES. */
+static const struct opcode_info_t opcode_info_arr[SDEB_I_LAST_ELEMENT + 1] = {
+/* 0 */
+ {0, 0, 0, F_INV_OP | FF_RESPOND, NULL, NULL,
+ {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
+ {0, 0x12, 0, FF_RESPOND | F_D_IN, resp_inquiry, NULL,
+ {6, 0xe3, 0xff, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
+ {0, 0xa0, 0, FF_RESPOND | F_D_IN, resp_report_luns, NULL,
+ {12, 0xe3, 0xff, 0, 0, 0, 0xff, 0xff, 0xff, 0xff, 0, 0xc7, 0, 0,
+ 0, 0} },
+ {0, 0x3, 0, FF_RESPOND | F_D_IN, resp_requests, NULL,
+ {6, 0xe1, 0, 0, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
+ {0, 0x0, 0, F_M_ACCESS | F_RL_WLUN_OK, NULL, NULL,/* TEST UNIT READY */
+ {6, 0, 0, 0, 0, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
+ {1, 0x5a, 0, F_D_IN, resp_mode_sense, msense_iarr,
+ {10, 0xf8, 0xff, 0xff, 0, 0, 0, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0,
+ 0} },
+ {1, 0x55, 0, F_D_OUT, resp_mode_select, mselect_iarr,
+ {10, 0xf1, 0, 0, 0, 0, 0, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0} },
+ {0, 0x4d, 0, F_D_IN, resp_log_sense, NULL,
+ {10, 0xe3, 0xff, 0xff, 0, 0xff, 0xff, 0xff, 0xff, 0xc7, 0, 0, 0,
+ 0, 0, 0} },
+ {0, 0x25, 0, F_D_IN, resp_readcap, NULL,
+ {10, 0xe1, 0xff, 0xff, 0xff, 0xff, 0, 0, 0x1, 0xc7, 0, 0, 0, 0,
+ 0, 0} },
+ {3, 0x88, 0, F_D_IN | FF_DIRECT_IO, resp_read_dt0, read_iarr,
+ {16, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0x9f, 0xc7} }, /* READ(16) */
+/* 10 */
+ {3, 0x8a, 0, F_D_OUT | FF_DIRECT_IO, resp_write_dt0, write_iarr,
+ {16, 0xfa, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0x9f, 0xc7} }, /* WRITE(16) */
+ {0, 0x1b, 0, 0, resp_start_stop, NULL, /* START STOP UNIT */
+ {6, 0x1, 0, 0xf, 0xf7, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
+ {1, 0x9e, 0x10, F_SA_LOW | F_D_IN, resp_readcap16, sa_in_iarr,
+ {16, 0x10, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0x1, 0xc7} }, /* READ CAPACITY(16) */
+ {0, 0, 0, F_INV_OP | FF_RESPOND, NULL, NULL, /* SA OUT */
+ {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
+ {2, 0xa3, 0xa, F_SA_LOW | F_D_IN, resp_report_tgtpgs, maint_in_iarr,
+ {12, 0xea, 0, 0, 0, 0, 0xff, 0xff, 0xff, 0xff, 0, 0xc7, 0, 0, 0,
+ 0} },
+ {0, 0, 0, F_INV_OP | FF_RESPOND, NULL, NULL, /* MAINT OUT */
+ {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
+ {0, 0, 0, F_INV_OP | FF_RESPOND, NULL, NULL, /* VERIFY */
+ {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
+ {1, 0x7f, 0x9, F_SA_HIGH | F_D_IN | FF_DIRECT_IO, resp_read_dt0,
+ vl_iarr, {32, 0xc7, 0, 0, 0, 0, 0x1f, 0x18, 0x0, 0x9, 0xfe, 0,
+ 0xff, 0xff, 0xff, 0xff} },/* VARIABLE LENGTH, READ(32) */
+ {1, 0x56, 0, F_D_OUT, NULL, reserve_iarr, /* RESERVE(10) */
+ {10, 0xff, 0xff, 0xff, 0, 0, 0, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0,
+ 0} },
+ {1, 0x57, 0, F_D_OUT, NULL, release_iarr, /* RELEASE(10) */
+ {10, 0x13, 0xff, 0xff, 0, 0, 0, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0,
+ 0} },
+/* 20 */
+ {0, 0, 0, F_INV_OP | FF_RESPOND, NULL, NULL, /* ALLOW REMOVAL */
+ {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
+ {0, 0x1, 0, 0, resp_start_stop, NULL, /* REWIND ?? */
+ {6, 0x1, 0, 0, 0, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
+ {0, 0, 0, F_INV_OP | FF_RESPOND, NULL, NULL, /* ATA_PT */
+ {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
+ {0, 0x1d, F_D_OUT, 0, NULL, NULL, /* SEND DIAGNOSTIC */
+ {6, 0xf7, 0, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
+ {0, 0x42, 0, F_D_OUT | FF_DIRECT_IO, resp_unmap, NULL, /* UNMAP */
+ {10, 0x1, 0, 0, 0, 0, 0x1f, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0} },
+ {0, 0x53, 0, F_D_IN | F_D_OUT | FF_DIRECT_IO, resp_xdwriteread_10,
+ NULL, {10, 0xff, 0xff, 0xff, 0xff, 0xff, 0x1f, 0xff, 0xff, 0xc7,
+ 0, 0, 0, 0, 0, 0} },
+ {0, 0x3b, 0, F_D_OUT_MAYBE, resp_write_buffer, NULL,
+ {10, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xc7, 0, 0,
+ 0, 0, 0, 0} }, /* WRITE_BUFFER */
+ {1, 0x41, 0, F_D_OUT_MAYBE | FF_DIRECT_IO, resp_write_same_10,
+ write_same_iarr, {10, 0xff, 0xff, 0xff, 0xff, 0xff, 0x1f, 0xff,
+ 0xff, 0xc7, 0, 0, 0, 0, 0, 0} },
+ {0, 0x35, 0, F_DELAY_OVERR | FF_DIRECT_IO, NULL, NULL, /* SYNC_CACHE */
+ {10, 0x7, 0xff, 0xff, 0xff, 0xff, 0x1f, 0xff, 0xff, 0xc7, 0, 0,
+ 0, 0, 0, 0} },
+ {0, 0x89, 0, F_D_OUT | FF_DIRECT_IO, resp_comp_write, NULL,
+ {16, 0xf8, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0, 0,
+ 0, 0xff, 0x1f, 0xc7} }, /* COMPARE AND WRITE */
+
+/* 30 */
+ {0xff, 0, 0, 0, NULL, NULL, /* terminating element */
+ {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
+};
+
+struct sdebug_scmd_extra_t {
+ bool inj_recovered;
+ bool inj_transport;
+ bool inj_dif;
+ bool inj_dix;
+ bool inj_short;
+};
+
+static int scsi_debug_add_host = DEF_NUM_HOST;
+static int scsi_debug_ato = DEF_ATO;
+static int scsi_debug_delay = DEF_DELAY;
+static int scsi_debug_dev_size_mb = DEF_DEV_SIZE_MB;
+static int scsi_debug_dif = DEF_DIF;
+static int scsi_debug_dix = DEF_DIX;
+static int scsi_debug_dsense = DEF_D_SENSE;
+static int scsi_debug_every_nth = DEF_EVERY_NTH;
+static int scsi_debug_fake_rw = DEF_FAKE_RW;
+static unsigned int scsi_debug_guard = DEF_GUARD;
+static int scsi_debug_lowest_aligned = DEF_LOWEST_ALIGNED;
+static int scsi_debug_max_luns = DEF_MAX_LUNS;
+static int scsi_debug_max_queue = SCSI_DEBUG_CANQUEUE;
+static atomic_t retired_max_queue; /* if > 0 then was prior max_queue */
+static int scsi_debug_ndelay = DEF_NDELAY;
+static int scsi_debug_no_lun_0 = DEF_NO_LUN_0;
+static int scsi_debug_no_uld = 0;
+static int scsi_debug_num_parts = DEF_NUM_PARTS;
+static int scsi_debug_num_tgts = DEF_NUM_TGTS; /* targets per host */
+static int scsi_debug_opt_blks = DEF_OPT_BLKS;
+static int scsi_debug_opts = DEF_OPTS;
+static int scsi_debug_physblk_exp = DEF_PHYSBLK_EXP;
+static int scsi_debug_ptype = DEF_PTYPE; /* SCSI peripheral type (0==disk) */
+static int scsi_debug_scsi_level = DEF_SCSI_LEVEL;
+static int scsi_debug_sector_size = DEF_SECTOR_SIZE;
+static int scsi_debug_virtual_gb = DEF_VIRTUAL_GB;
+static int scsi_debug_vpd_use_hostno = DEF_VPD_USE_HOSTNO;
+static unsigned int scsi_debug_lbpu = DEF_LBPU;
+static unsigned int scsi_debug_lbpws = DEF_LBPWS;
+static unsigned int scsi_debug_lbpws10 = DEF_LBPWS10;
+static unsigned int scsi_debug_lbprz = DEF_LBPRZ;
+static unsigned int scsi_debug_unmap_alignment = DEF_UNMAP_ALIGNMENT;
+static unsigned int scsi_debug_unmap_granularity = DEF_UNMAP_GRANULARITY;
+static unsigned int scsi_debug_unmap_max_blocks = DEF_UNMAP_MAX_BLOCKS;
+static unsigned int scsi_debug_unmap_max_desc = DEF_UNMAP_MAX_DESC;
+static unsigned int scsi_debug_write_same_length = DEF_WRITESAME_LENGTH;
+static bool scsi_debug_removable = DEF_REMOVABLE;
+static bool scsi_debug_clustering;
+static bool scsi_debug_host_lock = DEF_HOST_LOCK;
+static bool scsi_debug_strict = DEF_STRICT;
+static bool sdebug_any_injecting_opt;
+
+static atomic_t sdebug_cmnd_count;
+static atomic_t sdebug_completions;
+static atomic_t sdebug_a_tsf; /* counter of 'almost' TSFs */
+
+#define DEV_READONLY(TGT) (0)
+
+static unsigned int sdebug_store_sectors;
+static sector_t sdebug_capacity; /* in sectors */
+
+/* old BIOS stuff, kernel may get rid of them but some mode sense pages
+ may still need them */
+static int sdebug_heads; /* heads per disk */
+static int sdebug_cylinders_per; /* cylinders per surface */
+static int sdebug_sectors_per; /* sectors per cylinder */
+
+#define SDEBUG_MAX_PARTS 4
+
+#define SCSI_DEBUG_MAX_CMD_LEN 32
+
+static unsigned int scsi_debug_lbp(void)
+{
+ return ((0 == scsi_debug_fake_rw) &&
+ (scsi_debug_lbpu | scsi_debug_lbpws | scsi_debug_lbpws10));
+}
+
+struct sdebug_dev_info {
+ struct list_head dev_list;
+ unsigned int channel;
+ unsigned int target;
+ u64 lun;
+ struct sdebug_host_info *sdbg_host;
+ unsigned long uas_bm[1];
+ atomic_t num_in_q;
+ char stopped; /* TODO: should be atomic */
+ bool used;
+};
+
+struct sdebug_host_info {
+ struct list_head host_list;
+ struct Scsi_Host *shost;
+ struct device dev;
+ struct list_head dev_info_list;
+};
+
+#define to_sdebug_host(d) \
+ container_of(d, struct sdebug_host_info, dev)
+
+static LIST_HEAD(sdebug_host_list);
+static DEFINE_SPINLOCK(sdebug_host_list_lock);
+
+
+struct sdebug_hrtimer { /* ... is derived from hrtimer */
+ struct hrtimer hrt; /* must be first element */
+ int qa_indx;
+};
+
+struct sdebug_queued_cmd {
+ /* in_use flagged by a bit in queued_in_use_bm[] */
+ struct timer_list *cmnd_timerp;
+ struct tasklet_struct *tletp;
+ struct sdebug_hrtimer *sd_hrtp;
+ struct scsi_cmnd * a_cmnd;
+};
+static struct sdebug_queued_cmd queued_arr[SCSI_DEBUG_CANQUEUE];
+static unsigned long queued_in_use_bm[SCSI_DEBUG_CANQUEUE_WORDS];
+
+
+static unsigned char * fake_storep; /* ramdisk storage */
+static struct sd_dif_tuple *dif_storep; /* protection info */
+static void *map_storep; /* provisioning map */
+
+static unsigned long map_size;
+static int num_aborts;
+static int num_dev_resets;
+static int num_target_resets;
+static int num_bus_resets;
+static int num_host_resets;
+static int dix_writes;
+static int dix_reads;
+static int dif_errors;
+
+static DEFINE_SPINLOCK(queued_arr_lock);
+static DEFINE_RWLOCK(atomic_rw);
+
+static char sdebug_proc_name[] = MY_NAME;
+static const char *my_name = MY_NAME;
+
+static struct bus_type pseudo_lld_bus;
+
+static struct device_driver sdebug_driverfs_driver = {
+ .name = sdebug_proc_name,
+ .bus = &pseudo_lld_bus,
+};
+
+static const int check_condition_result =
+ (DRIVER_SENSE << 24) | SAM_STAT_CHECK_CONDITION;
+
+static const int illegal_condition_result =
+ (DRIVER_SENSE << 24) | (DID_ABORT << 16) | SAM_STAT_CHECK_CONDITION;
+
+static const int device_qfull_result =
+ (DID_OK << 16) | (COMMAND_COMPLETE << 8) | SAM_STAT_TASK_SET_FULL;
+
+static unsigned char caching_pg[] = {0x8, 18, 0x14, 0, 0xff, 0xff, 0, 0,
+ 0xff, 0xff, 0xff, 0xff, 0x80, 0x14, 0, 0,
+ 0, 0, 0, 0};
+static unsigned char ctrl_m_pg[] = {0xa, 10, 2, 0, 0, 0, 0, 0,
+ 0, 0, 0x2, 0x4b};
+static unsigned char iec_m_pg[] = {0x1c, 0xa, 0x08, 0, 0, 0, 0, 0,
+ 0, 0, 0x0, 0x0};
+
+static void *fake_store(unsigned long long lba)
+{
+ lba = do_div(lba, sdebug_store_sectors);
+
+ return fake_storep + lba * scsi_debug_sector_size;
+}
+
+static struct sd_dif_tuple *dif_store(sector_t sector)
+{
+ sector = do_div(sector, sdebug_store_sectors);
+
+ return dif_storep + sector;
+}
+
+static int sdebug_add_adapter(void);
+static void sdebug_remove_adapter(void);
+
+static void sdebug_max_tgts_luns(void)
+{
+ struct sdebug_host_info *sdbg_host;
+ struct Scsi_Host *hpnt;
+
+ spin_lock(&sdebug_host_list_lock);
+ list_for_each_entry(sdbg_host, &sdebug_host_list, host_list) {
+ hpnt = sdbg_host->shost;
+ if ((hpnt->this_id >= 0) &&
+ (scsi_debug_num_tgts > hpnt->this_id))
+ hpnt->max_id = scsi_debug_num_tgts + 1;
+ else
+ hpnt->max_id = scsi_debug_num_tgts;
+ /* scsi_debug_max_luns; */
+ hpnt->max_lun = SAM2_WLUN_REPORT_LUNS;
+ }
+ spin_unlock(&sdebug_host_list_lock);
+}
+
+enum sdeb_cmd_data {SDEB_IN_DATA = 0, SDEB_IN_CDB = 1};
+
+/* Set in_bit to -1 to indicate no bit position of invalid field */
+static void
+mk_sense_invalid_fld(struct scsi_cmnd *scp, enum sdeb_cmd_data c_d,
+ int in_byte, int in_bit)
+{
+ unsigned char *sbuff;
+ u8 sks[4];
+ int sl, asc;
+
+ sbuff = scp->sense_buffer;
+ if (!sbuff) {
+ sdev_printk(KERN_ERR, scp->device,
+ "%s: sense_buffer is NULL\n", __func__);
+ return;
+ }
+ asc = c_d ? INVALID_FIELD_IN_CDB : INVALID_FIELD_IN_PARAM_LIST;
+ memset(sbuff, 0, SCSI_SENSE_BUFFERSIZE);
+ scsi_build_sense_buffer(scsi_debug_dsense, sbuff, ILLEGAL_REQUEST,
+ asc, 0);
+ memset(sks, 0, sizeof(sks));
+ sks[0] = 0x80;
+ if (c_d)
+ sks[0] |= 0x40;
+ if (in_bit >= 0) {
+ sks[0] |= 0x8;
+ sks[0] |= 0x7 & in_bit;
+ }
+ put_unaligned_be16(in_byte, sks + 1);
+ if (scsi_debug_dsense) {
+ sl = sbuff[7] + 8;
+ sbuff[7] = sl;
+ sbuff[sl] = 0x2;
+ sbuff[sl + 1] = 0x6;
+ memcpy(sbuff + sl + 4, sks, 3);
+ } else
+ memcpy(sbuff + 15, sks, 3);
+ if (SCSI_DEBUG_OPT_NOISE & scsi_debug_opts)
+ sdev_printk(KERN_INFO, scp->device, "%s: [sense_key,asc,ascq"
+ "]: [0x5,0x%x,0x0] %c byte=%d, bit=%d\n",
+ my_name, asc, c_d ? 'C' : 'D', in_byte, in_bit);
+}
+
+static void mk_sense_buffer(struct scsi_cmnd *scp, int key, int asc, int asq)
+{
+ unsigned char *sbuff;
+
+ sbuff = scp->sense_buffer;
+ if (!sbuff) {
+ sdev_printk(KERN_ERR, scp->device,
+ "%s: sense_buffer is NULL\n", __func__);
+ return;
+ }
+ memset(sbuff, 0, SCSI_SENSE_BUFFERSIZE);
+
+ scsi_build_sense_buffer(scsi_debug_dsense, sbuff, key, asc, asq);
+
+ if (SCSI_DEBUG_OPT_NOISE & scsi_debug_opts)
+ sdev_printk(KERN_INFO, scp->device,
+ "%s: [sense_key,asc,ascq]: [0x%x,0x%x,0x%x]\n",
+ my_name, key, asc, asq);
+}
+
+static void
+mk_sense_invalid_opcode(struct scsi_cmnd *scp)
+{
+ mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_OPCODE, 0);
+}
+
+static int scsi_debug_ioctl(struct scsi_device *dev, int cmd, void __user *arg)
+{
+ if (SCSI_DEBUG_OPT_NOISE & scsi_debug_opts) {
+ if (0x1261 == cmd)
+ sdev_printk(KERN_INFO, dev,
+ "%s: BLKFLSBUF [0x1261]\n", __func__);
+ else if (0x5331 == cmd)
+ sdev_printk(KERN_INFO, dev,
+ "%s: CDROM_GET_CAPABILITY [0x5331]\n",
+ __func__);
+ else
+ sdev_printk(KERN_INFO, dev, "%s: cmd=0x%x\n",
+ __func__, cmd);
+ }
+ return -EINVAL;
+ /* return -ENOTTY; // correct return but upsets fdisk */
+}
+
+static void clear_luns_changed_on_target(struct sdebug_dev_info *devip)
+{
+ struct sdebug_host_info *sdhp;
+ struct sdebug_dev_info *dp;
+
+ spin_lock(&sdebug_host_list_lock);
+ list_for_each_entry(sdhp, &sdebug_host_list, host_list) {
+ list_for_each_entry(dp, &sdhp->dev_info_list, dev_list) {
+ if ((devip->sdbg_host == dp->sdbg_host) &&
+ (devip->target == dp->target))
+ clear_bit(SDEBUG_UA_LUNS_CHANGED, dp->uas_bm);
+ }
+ }
+ spin_unlock(&sdebug_host_list_lock);
+}
+
+static int check_readiness(struct scsi_cmnd *SCpnt, int uas_only,
+ struct sdebug_dev_info * devip)
+{
+ int k;
+ bool debug = !!(SCSI_DEBUG_OPT_NOISE & scsi_debug_opts);
+
+ k = find_first_bit(devip->uas_bm, SDEBUG_NUM_UAS);
+ if (k != SDEBUG_NUM_UAS) {
+ const char *cp = NULL;
+
+ switch (k) {
+ case SDEBUG_UA_POR:
+ mk_sense_buffer(SCpnt, UNIT_ATTENTION,
+ UA_RESET_ASC, POWER_ON_RESET_ASCQ);
+ if (debug)
+ cp = "power on reset";
+ break;
+ case SDEBUG_UA_BUS_RESET:
+ mk_sense_buffer(SCpnt, UNIT_ATTENTION,
+ UA_RESET_ASC, BUS_RESET_ASCQ);
+ if (debug)
+ cp = "bus reset";
+ break;
+ case SDEBUG_UA_MODE_CHANGED:
+ mk_sense_buffer(SCpnt, UNIT_ATTENTION,
+ UA_CHANGED_ASC, MODE_CHANGED_ASCQ);
+ if (debug)
+ cp = "mode parameters changed";
+ break;
+ case SDEBUG_UA_CAPACITY_CHANGED:
+ mk_sense_buffer(SCpnt, UNIT_ATTENTION,
+ UA_CHANGED_ASC, CAPACITY_CHANGED_ASCQ);
+ if (debug)
+ cp = "capacity data changed";
+ break;
+ case SDEBUG_UA_MICROCODE_CHANGED:
+ mk_sense_buffer(SCpnt, UNIT_ATTENTION,
+ TARGET_CHANGED_ASC, MICROCODE_CHANGED_ASCQ);
+ if (debug)
+ cp = "microcode has been changed";
+ break;
+ case SDEBUG_UA_MICROCODE_CHANGED_WO_RESET:
+ mk_sense_buffer(SCpnt, UNIT_ATTENTION,
+ TARGET_CHANGED_ASC,
+ MICROCODE_CHANGED_WO_RESET_ASCQ);
+ if (debug)
+ cp = "microcode has been changed without reset";
+ break;
+ case SDEBUG_UA_LUNS_CHANGED:
+ /*
+ * SPC-3 behavior is to report a UNIT ATTENTION with
+ * ASC/ASCQ REPORTED LUNS DATA HAS CHANGED on every LUN
+ * on the target, until a REPORT LUNS command is
+ * received. SPC-4 behavior is to report it only once.
+ * NOTE: scsi_debug_scsi_level does not use the same
+ * values as struct scsi_device->scsi_level.
+ */
+ if (scsi_debug_scsi_level >= 6) /* SPC-4 and above */
+ clear_luns_changed_on_target(devip);
+ mk_sense_buffer(SCpnt, UNIT_ATTENTION,
+ TARGET_CHANGED_ASC,
+ LUNS_CHANGED_ASCQ);
+ if (debug)
+ cp = "reported luns data has changed";
+ break;
+ default:
+ pr_warn("%s: unexpected unit attention code=%d\n",
+ __func__, k);
+ if (debug)
+ cp = "unknown";
+ break;
+ }
+ clear_bit(k, devip->uas_bm);
+ if (debug)
+ sdev_printk(KERN_INFO, SCpnt->device,
+ "%s reports: Unit attention: %s\n",
+ my_name, cp);
+ return check_condition_result;
+ }
+ if ((UAS_TUR == uas_only) && devip->stopped) {
+ mk_sense_buffer(SCpnt, NOT_READY, LOGICAL_UNIT_NOT_READY,
+ 0x2);
+ if (debug)
+ sdev_printk(KERN_INFO, SCpnt->device,
+ "%s reports: Not ready: %s\n", my_name,
+ "initializing command required");
+ return check_condition_result;
+ }
+ return 0;
+}
+
+/* Returns 0 if ok else (DID_ERROR << 16). Sets scp->resid . */
+static int fill_from_dev_buffer(struct scsi_cmnd *scp, unsigned char *arr,
+ int arr_len)
+{
+ int act_len;
+ struct scsi_data_buffer *sdb = scsi_in(scp);
+
+ if (!sdb->length)
+ return 0;
+ if (!(scsi_bidi_cmnd(scp) || scp->sc_data_direction == DMA_FROM_DEVICE))
+ return (DID_ERROR << 16);
+
+ act_len = sg_copy_from_buffer(sdb->table.sgl, sdb->table.nents,
+ arr, arr_len);
+ sdb->resid = scsi_bufflen(scp) - act_len;
+
+ return 0;
+}
+
+/* Returns number of bytes fetched into 'arr' or -1 if error. */
+static int fetch_to_dev_buffer(struct scsi_cmnd *scp, unsigned char *arr,
+ int arr_len)
+{
+ if (!scsi_bufflen(scp))
+ return 0;
+ if (!(scsi_bidi_cmnd(scp) || scp->sc_data_direction == DMA_TO_DEVICE))
+ return -1;
+
+ return scsi_sg_copy_to_buffer(scp, arr, arr_len);
+}
+
+
+static const char * inq_vendor_id = "Linux ";
+static const char * inq_product_id = "scsi_debug ";
+static const char *inq_product_rev = "0184"; /* version less '.' */
+
+/* Device identification VPD page. Returns number of bytes placed in arr */
+static int inquiry_evpd_83(unsigned char * arr, int port_group_id,
+ int target_dev_id, int dev_id_num,
+ const char * dev_id_str,
+ int dev_id_str_len)
+{
+ int num, port_a;
+ char b[32];
+
+ port_a = target_dev_id + 1;
+ /* T10 vendor identifier field format (faked) */
+ arr[0] = 0x2; /* ASCII */
+ arr[1] = 0x1;
+ arr[2] = 0x0;
+ memcpy(&arr[4], inq_vendor_id, 8);
+ memcpy(&arr[12], inq_product_id, 16);
+ memcpy(&arr[28], dev_id_str, dev_id_str_len);
+ num = 8 + 16 + dev_id_str_len;
+ arr[3] = num;
+ num += 4;
+ if (dev_id_num >= 0) {
+ /* NAA-5, Logical unit identifier (binary) */
+ arr[num++] = 0x1; /* binary (not necessarily sas) */
+ arr[num++] = 0x3; /* PIV=0, lu, naa */
+ arr[num++] = 0x0;
+ arr[num++] = 0x8;
+ arr[num++] = 0x53; /* naa-5 ieee company id=0x333333 (fake) */
+ arr[num++] = 0x33;
+ arr[num++] = 0x33;
+ arr[num++] = 0x30;
+ arr[num++] = (dev_id_num >> 24);
+ arr[num++] = (dev_id_num >> 16) & 0xff;
+ arr[num++] = (dev_id_num >> 8) & 0xff;
+ arr[num++] = dev_id_num & 0xff;
+ /* Target relative port number */
+ arr[num++] = 0x61; /* proto=sas, binary */
+ arr[num++] = 0x94; /* PIV=1, target port, rel port */
+ arr[num++] = 0x0; /* reserved */
+ arr[num++] = 0x4; /* length */
+ arr[num++] = 0x0; /* reserved */
+ arr[num++] = 0x0; /* reserved */
+ arr[num++] = 0x0;
+ arr[num++] = 0x1; /* relative port A */
+ }
+ /* NAA-5, Target port identifier */
+ arr[num++] = 0x61; /* proto=sas, binary */
+ arr[num++] = 0x93; /* piv=1, target port, naa */
+ arr[num++] = 0x0;
+ arr[num++] = 0x8;
+ arr[num++] = 0x52; /* naa-5, company id=0x222222 (fake) */
+ arr[num++] = 0x22;
+ arr[num++] = 0x22;
+ arr[num++] = 0x20;
+ arr[num++] = (port_a >> 24);
+ arr[num++] = (port_a >> 16) & 0xff;
+ arr[num++] = (port_a >> 8) & 0xff;
+ arr[num++] = port_a & 0xff;
+ /* NAA-5, Target port group identifier */
+ arr[num++] = 0x61; /* proto=sas, binary */
+ arr[num++] = 0x95; /* piv=1, target port group id */
+ arr[num++] = 0x0;
+ arr[num++] = 0x4;
+ arr[num++] = 0;
+ arr[num++] = 0;
+ arr[num++] = (port_group_id >> 8) & 0xff;
+ arr[num++] = port_group_id & 0xff;
+ /* NAA-5, Target device identifier */
+ arr[num++] = 0x61; /* proto=sas, binary */
+ arr[num++] = 0xa3; /* piv=1, target device, naa */
+ arr[num++] = 0x0;
+ arr[num++] = 0x8;
+ arr[num++] = 0x52; /* naa-5, company id=0x222222 (fake) */
+ arr[num++] = 0x22;
+ arr[num++] = 0x22;
+ arr[num++] = 0x20;
+ arr[num++] = (target_dev_id >> 24);
+ arr[num++] = (target_dev_id >> 16) & 0xff;
+ arr[num++] = (target_dev_id >> 8) & 0xff;
+ arr[num++] = target_dev_id & 0xff;
+ /* SCSI name string: Target device identifier */
+ arr[num++] = 0x63; /* proto=sas, UTF-8 */
+ arr[num++] = 0xa8; /* piv=1, target device, SCSI name string */
+ arr[num++] = 0x0;
+ arr[num++] = 24;
+ memcpy(arr + num, "naa.52222220", 12);
+ num += 12;
+ snprintf(b, sizeof(b), "%08X", target_dev_id);
+ memcpy(arr + num, b, 8);
+ num += 8;
+ memset(arr + num, 0, 4);
+ num += 4;
+ return num;
+}
+
+
+static unsigned char vpd84_data[] = {
+/* from 4th byte */ 0x22,0x22,0x22,0x0,0xbb,0x0,
+ 0x22,0x22,0x22,0x0,0xbb,0x1,
+ 0x22,0x22,0x22,0x0,0xbb,0x2,
+};
+
+/* Software interface identification VPD page */
+static int inquiry_evpd_84(unsigned char * arr)
+{
+ memcpy(arr, vpd84_data, sizeof(vpd84_data));
+ return sizeof(vpd84_data);
+}
+
+/* Management network addresses VPD page */
+static int inquiry_evpd_85(unsigned char * arr)
+{
+ int num = 0;
+ const char * na1 = "https://www.kernel.org/config";
+ const char * na2 = "http://www.kernel.org/log";
+ int plen, olen;
+
+ arr[num++] = 0x1; /* lu, storage config */
+ arr[num++] = 0x0; /* reserved */
+ arr[num++] = 0x0;
+ olen = strlen(na1);
+ plen = olen + 1;
+ if (plen % 4)
+ plen = ((plen / 4) + 1) * 4;
+ arr[num++] = plen; /* length, null termianted, padded */
+ memcpy(arr + num, na1, olen);
+ memset(arr + num + olen, 0, plen - olen);
+ num += plen;
+
+ arr[num++] = 0x4; /* lu, logging */
+ arr[num++] = 0x0; /* reserved */
+ arr[num++] = 0x0;
+ olen = strlen(na2);
+ plen = olen + 1;
+ if (plen % 4)
+ plen = ((plen / 4) + 1) * 4;
+ arr[num++] = plen; /* length, null terminated, padded */
+ memcpy(arr + num, na2, olen);
+ memset(arr + num + olen, 0, plen - olen);
+ num += plen;
+
+ return num;
+}
+
+/* SCSI ports VPD page */
+static int inquiry_evpd_88(unsigned char * arr, int target_dev_id)
+{
+ int num = 0;
+ int port_a, port_b;
+
+ port_a = target_dev_id + 1;
+ port_b = port_a + 1;
+ arr[num++] = 0x0; /* reserved */
+ arr[num++] = 0x0; /* reserved */
+ arr[num++] = 0x0;
+ arr[num++] = 0x1; /* relative port 1 (primary) */
+ memset(arr + num, 0, 6);
+ num += 6;
+ arr[num++] = 0x0;
+ arr[num++] = 12; /* length tp descriptor */
+ /* naa-5 target port identifier (A) */
+ arr[num++] = 0x61; /* proto=sas, binary */
+ arr[num++] = 0x93; /* PIV=1, target port, NAA */
+ arr[num++] = 0x0; /* reserved */
+ arr[num++] = 0x8; /* length */
+ arr[num++] = 0x52; /* NAA-5, company_id=0x222222 (fake) */
+ arr[num++] = 0x22;
+ arr[num++] = 0x22;
+ arr[num++] = 0x20;
+ arr[num++] = (port_a >> 24);
+ arr[num++] = (port_a >> 16) & 0xff;
+ arr[num++] = (port_a >> 8) & 0xff;
+ arr[num++] = port_a & 0xff;
+
+ arr[num++] = 0x0; /* reserved */
+ arr[num++] = 0x0; /* reserved */
+ arr[num++] = 0x0;
+ arr[num++] = 0x2; /* relative port 2 (secondary) */
+ memset(arr + num, 0, 6);
+ num += 6;
+ arr[num++] = 0x0;
+ arr[num++] = 12; /* length tp descriptor */
+ /* naa-5 target port identifier (B) */
+ arr[num++] = 0x61; /* proto=sas, binary */
+ arr[num++] = 0x93; /* PIV=1, target port, NAA */
+ arr[num++] = 0x0; /* reserved */
+ arr[num++] = 0x8; /* length */
+ arr[num++] = 0x52; /* NAA-5, company_id=0x222222 (fake) */
+ arr[num++] = 0x22;
+ arr[num++] = 0x22;
+ arr[num++] = 0x20;
+ arr[num++] = (port_b >> 24);
+ arr[num++] = (port_b >> 16) & 0xff;
+ arr[num++] = (port_b >> 8) & 0xff;
+ arr[num++] = port_b & 0xff;
+
+ return num;
+}
+
+
+static unsigned char vpd89_data[] = {
+/* from 4th byte */ 0,0,0,0,
+'l','i','n','u','x',' ',' ',' ',
+'S','A','T',' ','s','c','s','i','_','d','e','b','u','g',' ',' ',
+'1','2','3','4',
+0x34,0,0,0,1,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,
+0xec,0,0,0,
+0x5a,0xc,0xff,0x3f,0x37,0xc8,0x10,0,0,0,0,0,0x3f,0,0,0,
+0,0,0,0,0x58,0x58,0x58,0x58,0x58,0x58,0x58,0x58,0x20,0x20,0x20,0x20,
+0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0,0,0,0x40,0x4,0,0x2e,0x33,
+0x38,0x31,0x20,0x20,0x20,0x20,0x54,0x53,0x38,0x33,0x30,0x30,0x33,0x31,
+0x53,0x41,
+0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,
+0x20,0x20,
+0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,
+0x10,0x80,
+0,0,0,0x2f,0,0,0,0x2,0,0x2,0x7,0,0xff,0xff,0x1,0,
+0x3f,0,0xc1,0xff,0x3e,0,0x10,0x1,0xb0,0xf8,0x50,0x9,0,0,0x7,0,
+0x3,0,0x78,0,0x78,0,0xf0,0,0x78,0,0,0,0,0,0,0,
+0,0,0,0,0,0,0,0,0x2,0,0,0,0,0,0,0,
+0x7e,0,0x1b,0,0x6b,0x34,0x1,0x7d,0x3,0x40,0x69,0x34,0x1,0x3c,0x3,0x40,
+0x7f,0x40,0,0,0,0,0xfe,0xfe,0,0,0,0,0,0xfe,0,0,
+0,0,0,0,0,0,0,0,0xb0,0xf8,0x50,0x9,0,0,0,0,
+0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
+0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
+0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
+0x1,0,0xb0,0xf8,0x50,0x9,0xb0,0xf8,0x50,0x9,0x20,0x20,0x2,0,0xb6,0x42,
+0,0x80,0x8a,0,0x6,0x3c,0xa,0x3c,0xff,0xff,0xc6,0x7,0,0x1,0,0x8,
+0xf0,0xf,0,0x10,0x2,0,0x30,0,0,0,0,0,0,0,0x6,0xfe,
+0,0,0x2,0,0x50,0,0x8a,0,0x4f,0x95,0,0,0x21,0,0xb,0,
+0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
+0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
+0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
+0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
+0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
+0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
+0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
+0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
+0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
+0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
+0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
+0,0,0,0,0,0,0,0,0,0,0,0,0,0,0xa5,0x51,
+};
+
+/* ATA Information VPD page */
+static int inquiry_evpd_89(unsigned char * arr)
+{
+ memcpy(arr, vpd89_data, sizeof(vpd89_data));
+ return sizeof(vpd89_data);
+}
+
+
+static unsigned char vpdb0_data[] = {
+ /* from 4th byte */ 0,0,0,4, 0,0,0x4,0, 0,0,0,64,
+ 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
+ 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
+ 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
+};
+
+/* Block limits VPD page (SBC-3) */
+static int inquiry_evpd_b0(unsigned char * arr)
+{
+ unsigned int gran;
+
+ memcpy(arr, vpdb0_data, sizeof(vpdb0_data));
+
+ /* Optimal transfer length granularity */
+ gran = 1 << scsi_debug_physblk_exp;
+ arr[2] = (gran >> 8) & 0xff;
+ arr[3] = gran & 0xff;
+
+ /* Maximum Transfer Length */
+ if (sdebug_store_sectors > 0x400) {
+ arr[4] = (sdebug_store_sectors >> 24) & 0xff;
+ arr[5] = (sdebug_store_sectors >> 16) & 0xff;
+ arr[6] = (sdebug_store_sectors >> 8) & 0xff;
+ arr[7] = sdebug_store_sectors & 0xff;
+ }
+
+ /* Optimal Transfer Length */
+ put_unaligned_be32(scsi_debug_opt_blks, &arr[8]);
+
+ if (scsi_debug_lbpu) {
+ /* Maximum Unmap LBA Count */
+ put_unaligned_be32(scsi_debug_unmap_max_blocks, &arr[16]);
+
+ /* Maximum Unmap Block Descriptor Count */
+ put_unaligned_be32(scsi_debug_unmap_max_desc, &arr[20]);
+ }
+
+ /* Unmap Granularity Alignment */
+ if (scsi_debug_unmap_alignment) {
+ put_unaligned_be32(scsi_debug_unmap_alignment, &arr[28]);
+ arr[28] |= 0x80; /* UGAVALID */
+ }
+
+ /* Optimal Unmap Granularity */
+ put_unaligned_be32(scsi_debug_unmap_granularity, &arr[24]);
+
+ /* Maximum WRITE SAME Length */
+ put_unaligned_be64(scsi_debug_write_same_length, &arr[32]);
+
+ return 0x3c; /* Mandatory page length for Logical Block Provisioning */
+
+ return sizeof(vpdb0_data);
+}
+
+/* Block device characteristics VPD page (SBC-3) */
+static int inquiry_evpd_b1(unsigned char *arr)
+{
+ memset(arr, 0, 0x3c);
+ arr[0] = 0;
+ arr[1] = 1; /* non rotating medium (e.g. solid state) */
+ arr[2] = 0;
+ arr[3] = 5; /* less than 1.8" */
+
+ return 0x3c;
+}
+
+/* Logical block provisioning VPD page (SBC-3) */
+static int inquiry_evpd_b2(unsigned char *arr)
+{
+ memset(arr, 0, 0x4);
+ arr[0] = 0; /* threshold exponent */
+
+ if (scsi_debug_lbpu)
+ arr[1] = 1 << 7;
+
+ if (scsi_debug_lbpws)
+ arr[1] |= 1 << 6;
+
+ if (scsi_debug_lbpws10)
+ arr[1] |= 1 << 5;
+
+ if (scsi_debug_lbprz)
+ arr[1] |= 1 << 2;
+
+ return 0x4;
+}
+
+#define SDEBUG_LONG_INQ_SZ 96
+#define SDEBUG_MAX_INQ_ARR_SZ 584
+
+static int resp_inquiry(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
+{
+ unsigned char pq_pdt;
+ unsigned char * arr;
+ unsigned char *cmd = scp->cmnd;
+ int alloc_len, n, ret;
+ bool have_wlun;
+
+ alloc_len = (cmd[3] << 8) + cmd[4];
+ arr = kzalloc(SDEBUG_MAX_INQ_ARR_SZ, GFP_ATOMIC);
+ if (! arr)
+ return DID_REQUEUE << 16;
+ have_wlun = (scp->device->lun == SAM2_WLUN_REPORT_LUNS);
+ if (have_wlun)
+ pq_pdt = 0x1e; /* present, wlun */
+ else if (scsi_debug_no_lun_0 && (0 == devip->lun))
+ pq_pdt = 0x7f; /* not present, no device type */
+ else
+ pq_pdt = (scsi_debug_ptype & 0x1f);
+ arr[0] = pq_pdt;
+ if (0x2 & cmd[1]) { /* CMDDT bit set */
+ mk_sense_invalid_fld(scp, SDEB_IN_CDB, 1, 1);
+ kfree(arr);
+ return check_condition_result;
+ } else if (0x1 & cmd[1]) { /* EVPD bit set */
+ int lu_id_num, port_group_id, target_dev_id, len;
+ char lu_id_str[6];
+ int host_no = devip->sdbg_host->shost->host_no;
+
+ port_group_id = (((host_no + 1) & 0x7f) << 8) +
+ (devip->channel & 0x7f);
+ if (0 == scsi_debug_vpd_use_hostno)
+ host_no = 0;
+ lu_id_num = have_wlun ? -1 : (((host_no + 1) * 2000) +
+ (devip->target * 1000) + devip->lun);
+ target_dev_id = ((host_no + 1) * 2000) +
+ (devip->target * 1000) - 3;
+ len = scnprintf(lu_id_str, 6, "%d", lu_id_num);
+ if (0 == cmd[2]) { /* supported vital product data pages */
+ arr[1] = cmd[2]; /*sanity */
+ n = 4;
+ arr[n++] = 0x0; /* this page */
+ arr[n++] = 0x80; /* unit serial number */
+ arr[n++] = 0x83; /* device identification */
+ arr[n++] = 0x84; /* software interface ident. */
+ arr[n++] = 0x85; /* management network addresses */
+ arr[n++] = 0x86; /* extended inquiry */
+ arr[n++] = 0x87; /* mode page policy */
+ arr[n++] = 0x88; /* SCSI ports */
+ arr[n++] = 0x89; /* ATA information */
+ arr[n++] = 0xb0; /* Block limits (SBC) */
+ arr[n++] = 0xb1; /* Block characteristics (SBC) */
+ if (scsi_debug_lbp()) /* Logical Block Prov. (SBC) */
+ arr[n++] = 0xb2;
+ arr[3] = n - 4; /* number of supported VPD pages */
+ } else if (0x80 == cmd[2]) { /* unit serial number */
+ arr[1] = cmd[2]; /*sanity */
+ arr[3] = len;
+ memcpy(&arr[4], lu_id_str, len);
+ } else if (0x83 == cmd[2]) { /* device identification */
+ arr[1] = cmd[2]; /*sanity */
+ arr[3] = inquiry_evpd_83(&arr[4], port_group_id,
+ target_dev_id, lu_id_num,
+ lu_id_str, len);
+ } else if (0x84 == cmd[2]) { /* Software interface ident. */
+ arr[1] = cmd[2]; /*sanity */
+ arr[3] = inquiry_evpd_84(&arr[4]);
+ } else if (0x85 == cmd[2]) { /* Management network addresses */
+ arr[1] = cmd[2]; /*sanity */
+ arr[3] = inquiry_evpd_85(&arr[4]);
+ } else if (0x86 == cmd[2]) { /* extended inquiry */
+ arr[1] = cmd[2]; /*sanity */
+ arr[3] = 0x3c; /* number of following entries */
+ if (scsi_debug_dif == SD_DIF_TYPE3_PROTECTION)
+ arr[4] = 0x4; /* SPT: GRD_CHK:1 */
+ else if (scsi_debug_dif)
+ arr[4] = 0x5; /* SPT: GRD_CHK:1, REF_CHK:1 */
+ else
+ arr[4] = 0x0; /* no protection stuff */
+ arr[5] = 0x7; /* head of q, ordered + simple q's */
+ } else if (0x87 == cmd[2]) { /* mode page policy */
+ arr[1] = cmd[2]; /*sanity */
+ arr[3] = 0x8; /* number of following entries */
+ arr[4] = 0x2; /* disconnect-reconnect mp */
+ arr[6] = 0x80; /* mlus, shared */
+ arr[8] = 0x18; /* protocol specific lu */
+ arr[10] = 0x82; /* mlus, per initiator port */
+ } else if (0x88 == cmd[2]) { /* SCSI Ports */
+ arr[1] = cmd[2]; /*sanity */
+ arr[3] = inquiry_evpd_88(&arr[4], target_dev_id);
+ } else if (0x89 == cmd[2]) { /* ATA information */
+ arr[1] = cmd[2]; /*sanity */
+ n = inquiry_evpd_89(&arr[4]);
+ arr[2] = (n >> 8);
+ arr[3] = (n & 0xff);
+ } else if (0xb0 == cmd[2]) { /* Block limits (SBC) */
+ arr[1] = cmd[2]; /*sanity */
+ arr[3] = inquiry_evpd_b0(&arr[4]);
+ } else if (0xb1 == cmd[2]) { /* Block characteristics (SBC) */
+ arr[1] = cmd[2]; /*sanity */
+ arr[3] = inquiry_evpd_b1(&arr[4]);
+ } else if (0xb2 == cmd[2]) { /* Logical Block Prov. (SBC) */
+ arr[1] = cmd[2]; /*sanity */
+ arr[3] = inquiry_evpd_b2(&arr[4]);
+ } else {
+ mk_sense_invalid_fld(scp, SDEB_IN_CDB, 2, -1);
+ kfree(arr);
+ return check_condition_result;
+ }
+ len = min(((arr[2] << 8) + arr[3]) + 4, alloc_len);
+ ret = fill_from_dev_buffer(scp, arr,
+ min(len, SDEBUG_MAX_INQ_ARR_SZ));
+ kfree(arr);
+ return ret;
+ }
+ /* drops through here for a standard inquiry */
+ arr[1] = scsi_debug_removable ? 0x80 : 0; /* Removable disk */
+ arr[2] = scsi_debug_scsi_level;
+ arr[3] = 2; /* response_data_format==2 */
+ arr[4] = SDEBUG_LONG_INQ_SZ - 5;
+ arr[5] = scsi_debug_dif ? 1 : 0; /* PROTECT bit */
+ if (0 == scsi_debug_vpd_use_hostno)
+ arr[5] = 0x10; /* claim: implicit TGPS */
+ arr[6] = 0x10; /* claim: MultiP */
+ /* arr[6] |= 0x40; ... claim: EncServ (enclosure services) */
+ arr[7] = 0xa; /* claim: LINKED + CMDQUE */
+ memcpy(&arr[8], inq_vendor_id, 8);
+ memcpy(&arr[16], inq_product_id, 16);
+ memcpy(&arr[32], inq_product_rev, 4);
+ /* version descriptors (2 bytes each) follow */
+ arr[58] = 0x0; arr[59] = 0xa2; /* SAM-5 rev 4 */
+ arr[60] = 0x4; arr[61] = 0x68; /* SPC-4 rev 37 */
+ n = 62;
+ if (scsi_debug_ptype == 0) {
+ arr[n++] = 0x4; arr[n++] = 0xc5; /* SBC-4 rev 36 */
+ } else if (scsi_debug_ptype == 1) {
+ arr[n++] = 0x5; arr[n++] = 0x25; /* SSC-4 rev 3 */
+ }
+ arr[n++] = 0x20; arr[n++] = 0xe6; /* SPL-3 rev 7 */
+ ret = fill_from_dev_buffer(scp, arr,
+ min(alloc_len, SDEBUG_LONG_INQ_SZ));
+ kfree(arr);
+ return ret;
+}
+
+static int resp_requests(struct scsi_cmnd * scp,
+ struct sdebug_dev_info * devip)
+{
+ unsigned char * sbuff;
+ unsigned char *cmd = scp->cmnd;
+ unsigned char arr[SCSI_SENSE_BUFFERSIZE];
+ bool dsense, want_dsense;
+ int len = 18;
+
+ memset(arr, 0, sizeof(arr));
+ dsense = !!(cmd[1] & 1);
+ want_dsense = dsense || scsi_debug_dsense;
+ sbuff = scp->sense_buffer;
+ if ((iec_m_pg[2] & 0x4) && (6 == (iec_m_pg[3] & 0xf))) {
+ if (dsense) {
+ arr[0] = 0x72;
+ arr[1] = 0x0; /* NO_SENSE in sense_key */
+ arr[2] = THRESHOLD_EXCEEDED;
+ arr[3] = 0xff; /* TEST set and MRIE==6 */
+ len = 8;
+ } else {
+ arr[0] = 0x70;
+ arr[2] = 0x0; /* NO_SENSE in sense_key */
+ arr[7] = 0xa; /* 18 byte sense buffer */
+ arr[12] = THRESHOLD_EXCEEDED;
+ arr[13] = 0xff; /* TEST set and MRIE==6 */
+ }
+ } else {
+ memcpy(arr, sbuff, SCSI_SENSE_BUFFERSIZE);
+ if (arr[0] >= 0x70 && dsense == scsi_debug_dsense)
+ ; /* have sense and formats match */
+ else if (arr[0] <= 0x70) {
+ if (dsense) {
+ memset(arr, 0, 8);
+ arr[0] = 0x72;
+ len = 8;
+ } else {
+ memset(arr, 0, 18);
+ arr[0] = 0x70;
+ arr[7] = 0xa;
+ }
+ } else if (dsense) {
+ memset(arr, 0, 8);
+ arr[0] = 0x72;
+ arr[1] = sbuff[2]; /* sense key */
+ arr[2] = sbuff[12]; /* asc */
+ arr[3] = sbuff[13]; /* ascq */
+ len = 8;
+ } else {
+ memset(arr, 0, 18);
+ arr[0] = 0x70;
+ arr[2] = sbuff[1];
+ arr[7] = 0xa;
+ arr[12] = sbuff[1];
+ arr[13] = sbuff[3];
+ }
+
+ }
+ mk_sense_buffer(scp, 0, NO_ADDITIONAL_SENSE, 0);
+ return fill_from_dev_buffer(scp, arr, len);
+}
+
+static int resp_start_stop(struct scsi_cmnd * scp,
+ struct sdebug_dev_info * devip)
+{
+ unsigned char *cmd = scp->cmnd;
+ int power_cond, start;
+
+ power_cond = (cmd[4] & 0xf0) >> 4;
+ if (power_cond) {
+ mk_sense_invalid_fld(scp, SDEB_IN_CDB, 4, 7);
+ return check_condition_result;
+ }
+ start = cmd[4] & 1;
+ if (start == devip->stopped)
+ devip->stopped = !start;
+ return 0;
+}
+
+static sector_t get_sdebug_capacity(void)
+{
+ if (scsi_debug_virtual_gb > 0)
+ return (sector_t)scsi_debug_virtual_gb *
+ (1073741824 / scsi_debug_sector_size);
+ else
+ return sdebug_store_sectors;
+}
+
+#define SDEBUG_READCAP_ARR_SZ 8
+static int resp_readcap(struct scsi_cmnd * scp,
+ struct sdebug_dev_info * devip)
+{
+ unsigned char arr[SDEBUG_READCAP_ARR_SZ];
+ unsigned int capac;
+
+ /* following just in case virtual_gb changed */
+ sdebug_capacity = get_sdebug_capacity();
+ memset(arr, 0, SDEBUG_READCAP_ARR_SZ);
+ if (sdebug_capacity < 0xffffffff) {
+ capac = (unsigned int)sdebug_capacity - 1;
+ arr[0] = (capac >> 24);
+ arr[1] = (capac >> 16) & 0xff;
+ arr[2] = (capac >> 8) & 0xff;
+ arr[3] = capac & 0xff;
+ } else {
+ arr[0] = 0xff;
+ arr[1] = 0xff;
+ arr[2] = 0xff;
+ arr[3] = 0xff;
+ }
+ arr[6] = (scsi_debug_sector_size >> 8) & 0xff;
+ arr[7] = scsi_debug_sector_size & 0xff;
+ return fill_from_dev_buffer(scp, arr, SDEBUG_READCAP_ARR_SZ);
+}
+
+#define SDEBUG_READCAP16_ARR_SZ 32
+static int resp_readcap16(struct scsi_cmnd * scp,
+ struct sdebug_dev_info * devip)
+{
+ unsigned char *cmd = scp->cmnd;
+ unsigned char arr[SDEBUG_READCAP16_ARR_SZ];
+ unsigned long long capac;
+ int k, alloc_len;
+
+ alloc_len = ((cmd[10] << 24) + (cmd[11] << 16) + (cmd[12] << 8)
+ + cmd[13]);
+ /* following just in case virtual_gb changed */
+ sdebug_capacity = get_sdebug_capacity();
+ memset(arr, 0, SDEBUG_READCAP16_ARR_SZ);
+ capac = sdebug_capacity - 1;
+ for (k = 0; k < 8; ++k, capac >>= 8)
+ arr[7 - k] = capac & 0xff;
+ arr[8] = (scsi_debug_sector_size >> 24) & 0xff;
+ arr[9] = (scsi_debug_sector_size >> 16) & 0xff;
+ arr[10] = (scsi_debug_sector_size >> 8) & 0xff;
+ arr[11] = scsi_debug_sector_size & 0xff;
+ arr[13] = scsi_debug_physblk_exp & 0xf;
+ arr[14] = (scsi_debug_lowest_aligned >> 8) & 0x3f;
+
+ if (scsi_debug_lbp()) {
+ arr[14] |= 0x80; /* LBPME */
+ if (scsi_debug_lbprz)
+ arr[14] |= 0x40; /* LBPRZ */
+ }
+
+ arr[15] = scsi_debug_lowest_aligned & 0xff;
+
+ if (scsi_debug_dif) {
+ arr[12] = (scsi_debug_dif - 1) << 1; /* P_TYPE */
+ arr[12] |= 1; /* PROT_EN */
+ }
+
+ return fill_from_dev_buffer(scp, arr,
+ min(alloc_len, SDEBUG_READCAP16_ARR_SZ));
+}
+
+#define SDEBUG_MAX_TGTPGS_ARR_SZ 1412
+
+static int resp_report_tgtpgs(struct scsi_cmnd * scp,
+ struct sdebug_dev_info * devip)
+{
+ unsigned char *cmd = scp->cmnd;
+ unsigned char * arr;
+ int host_no = devip->sdbg_host->shost->host_no;
+ int n, ret, alen, rlen;
+ int port_group_a, port_group_b, port_a, port_b;
+
+ alen = ((cmd[6] << 24) + (cmd[7] << 16) + (cmd[8] << 8)
+ + cmd[9]);
+
+ arr = kzalloc(SDEBUG_MAX_TGTPGS_ARR_SZ, GFP_ATOMIC);
+ if (! arr)
+ return DID_REQUEUE << 16;
+ /*
+ * EVPD page 0x88 states we have two ports, one
+ * real and a fake port with no device connected.
+ * So we create two port groups with one port each
+ * and set the group with port B to unavailable.
+ */
+ port_a = 0x1; /* relative port A */
+ port_b = 0x2; /* relative port B */
+ port_group_a = (((host_no + 1) & 0x7f) << 8) +
+ (devip->channel & 0x7f);
+ port_group_b = (((host_no + 1) & 0x7f) << 8) +
+ (devip->channel & 0x7f) + 0x80;
+
+ /*
+ * The asymmetric access state is cycled according to the host_id.
+ */
+ n = 4;
+ if (0 == scsi_debug_vpd_use_hostno) {
+ arr[n++] = host_no % 3; /* Asymm access state */
+ arr[n++] = 0x0F; /* claim: all states are supported */
+ } else {
+ arr[n++] = 0x0; /* Active/Optimized path */
+ arr[n++] = 0x01; /* claim: only support active/optimized paths */
+ }
+ arr[n++] = (port_group_a >> 8) & 0xff;
+ arr[n++] = port_group_a & 0xff;
+ arr[n++] = 0; /* Reserved */
+ arr[n++] = 0; /* Status code */
+ arr[n++] = 0; /* Vendor unique */
+ arr[n++] = 0x1; /* One port per group */
+ arr[n++] = 0; /* Reserved */
+ arr[n++] = 0; /* Reserved */
+ arr[n++] = (port_a >> 8) & 0xff;
+ arr[n++] = port_a & 0xff;
+ arr[n++] = 3; /* Port unavailable */
+ arr[n++] = 0x08; /* claim: only unavailalbe paths are supported */
+ arr[n++] = (port_group_b >> 8) & 0xff;
+ arr[n++] = port_group_b & 0xff;
+ arr[n++] = 0; /* Reserved */
+ arr[n++] = 0; /* Status code */
+ arr[n++] = 0; /* Vendor unique */
+ arr[n++] = 0x1; /* One port per group */
+ arr[n++] = 0; /* Reserved */
+ arr[n++] = 0; /* Reserved */
+ arr[n++] = (port_b >> 8) & 0xff;
+ arr[n++] = port_b & 0xff;
+
+ rlen = n - 4;
+ arr[0] = (rlen >> 24) & 0xff;
+ arr[1] = (rlen >> 16) & 0xff;
+ arr[2] = (rlen >> 8) & 0xff;
+ arr[3] = rlen & 0xff;
+
+ /*
+ * Return the smallest value of either
+ * - The allocated length
+ * - The constructed command length
+ * - The maximum array size
+ */
+ rlen = min(alen,n);
+ ret = fill_from_dev_buffer(scp, arr,
+ min(rlen, SDEBUG_MAX_TGTPGS_ARR_SZ));
+ kfree(arr);
+ return ret;
+}
+
+static int
+resp_rsup_opcodes(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
+{
+ bool rctd;
+ u8 reporting_opts, req_opcode, sdeb_i, supp;
+ u16 req_sa, u;
+ u32 alloc_len, a_len;
+ int k, offset, len, errsts, count, bump, na;
+ const struct opcode_info_t *oip;
+ const struct opcode_info_t *r_oip;
+ u8 *arr;
+ u8 *cmd = scp->cmnd;
+
+ rctd = !!(cmd[2] & 0x80);
+ reporting_opts = cmd[2] & 0x7;
+ req_opcode = cmd[3];
+ req_sa = get_unaligned_be16(cmd + 4);
+ alloc_len = get_unaligned_be32(cmd + 6);
+ if (alloc_len < 4 || alloc_len > 0xffff) {
+ mk_sense_invalid_fld(scp, SDEB_IN_CDB, 6, -1);
+ return check_condition_result;
+ }
+ if (alloc_len > 8192)
+ a_len = 8192;
+ else
+ a_len = alloc_len;
+ arr = kzalloc((a_len < 256) ? 320 : a_len + 64, GFP_ATOMIC);
+ if (NULL == arr) {
+ mk_sense_buffer(scp, ILLEGAL_REQUEST, INSUFF_RES_ASC,
+ INSUFF_RES_ASCQ);
+ return check_condition_result;
+ }
+ switch (reporting_opts) {
+ case 0: /* all commands */
+ /* count number of commands */
+ for (count = 0, oip = opcode_info_arr;
+ oip->num_attached != 0xff; ++oip) {
+ if (F_INV_OP & oip->flags)
+ continue;
+ count += (oip->num_attached + 1);
+ }
+ bump = rctd ? 20 : 8;
+ put_unaligned_be32(count * bump, arr);
+ for (offset = 4, oip = opcode_info_arr;
+ oip->num_attached != 0xff && offset < a_len; ++oip) {
+ if (F_INV_OP & oip->flags)
+ continue;
+ na = oip->num_attached;
+ arr[offset] = oip->opcode;
+ put_unaligned_be16(oip->sa, arr + offset + 2);
+ if (rctd)
+ arr[offset + 5] |= 0x2;
+ if (FF_SA & oip->flags)
+ arr[offset + 5] |= 0x1;
+ put_unaligned_be16(oip->len_mask[0], arr + offset + 6);
+ if (rctd)
+ put_unaligned_be16(0xa, arr + offset + 8);
+ r_oip = oip;
+ for (k = 0, oip = oip->arrp; k < na; ++k, ++oip) {
+ if (F_INV_OP & oip->flags)
+ continue;
+ offset += bump;
+ arr[offset] = oip->opcode;
+ put_unaligned_be16(oip->sa, arr + offset + 2);
+ if (rctd)
+ arr[offset + 5] |= 0x2;
+ if (FF_SA & oip->flags)
+ arr[offset + 5] |= 0x1;
+ put_unaligned_be16(oip->len_mask[0],
+ arr + offset + 6);
+ if (rctd)
+ put_unaligned_be16(0xa,
+ arr + offset + 8);
+ }
+ oip = r_oip;
+ offset += bump;
+ }
+ break;
+ case 1: /* one command: opcode only */
+ case 2: /* one command: opcode plus service action */
+ case 3: /* one command: if sa==0 then opcode only else opcode+sa */
+ sdeb_i = opcode_ind_arr[req_opcode];
+ oip = &opcode_info_arr[sdeb_i];
+ if (F_INV_OP & oip->flags) {
+ supp = 1;
+ offset = 4;
+ } else {
+ if (1 == reporting_opts) {
+ if (FF_SA & oip->flags) {
+ mk_sense_invalid_fld(scp, SDEB_IN_CDB,
+ 2, 2);
+ kfree(arr);
+ return check_condition_result;
+ }
+ req_sa = 0;
+ } else if (2 == reporting_opts &&
+ 0 == (FF_SA & oip->flags)) {
+ mk_sense_invalid_fld(scp, SDEB_IN_CDB, 4, -1);
+ kfree(arr); /* point at requested sa */
+ return check_condition_result;
+ }
+ if (0 == (FF_SA & oip->flags) &&
+ req_opcode == oip->opcode)
+ supp = 3;
+ else if (0 == (FF_SA & oip->flags)) {
+ na = oip->num_attached;
+ for (k = 0, oip = oip->arrp; k < na;
+ ++k, ++oip) {
+ if (req_opcode == oip->opcode)
+ break;
+ }
+ supp = (k >= na) ? 1 : 3;
+ } else if (req_sa != oip->sa) {
+ na = oip->num_attached;
+ for (k = 0, oip = oip->arrp; k < na;
+ ++k, ++oip) {
+ if (req_sa == oip->sa)
+ break;
+ }
+ supp = (k >= na) ? 1 : 3;
+ } else
+ supp = 3;
+ if (3 == supp) {
+ u = oip->len_mask[0];
+ put_unaligned_be16(u, arr + 2);
+ arr[4] = oip->opcode;
+ for (k = 1; k < u; ++k)
+ arr[4 + k] = (k < 16) ?
+ oip->len_mask[k] : 0xff;
+ offset = 4 + u;
+ } else
+ offset = 4;
+ }
+ arr[1] = (rctd ? 0x80 : 0) | supp;
+ if (rctd) {
+ put_unaligned_be16(0xa, arr + offset);
+ offset += 12;
+ }
+ break;
+ default:
+ mk_sense_invalid_fld(scp, SDEB_IN_CDB, 2, 2);
+ kfree(arr);
+ return check_condition_result;
+ }
+ offset = (offset < a_len) ? offset : a_len;
+ len = (offset < alloc_len) ? offset : alloc_len;
+ errsts = fill_from_dev_buffer(scp, arr, len);
+ kfree(arr);
+ return errsts;
+}
+
+static int
+resp_rsup_tmfs(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
+{
+ bool repd;
+ u32 alloc_len, len;
+ u8 arr[16];
+ u8 *cmd = scp->cmnd;
+
+ memset(arr, 0, sizeof(arr));
+ repd = !!(cmd[2] & 0x80);
+ alloc_len = get_unaligned_be32(cmd + 6);
+ if (alloc_len < 4) {
+ mk_sense_invalid_fld(scp, SDEB_IN_CDB, 6, -1);
+ return check_condition_result;
+ }
+ arr[0] = 0xc8; /* ATS | ATSS | LURS */
+ arr[1] = 0x1; /* ITNRS */
+ if (repd) {
+ arr[3] = 0xc;
+ len = 16;
+ } else
+ len = 4;
+
+ len = (len < alloc_len) ? len : alloc_len;
+ return fill_from_dev_buffer(scp, arr, len);
+}
+
+/* <<Following mode page info copied from ST318451LW>> */
+
+static int resp_err_recov_pg(unsigned char * p, int pcontrol, int target)
+{ /* Read-Write Error Recovery page for mode_sense */
+ unsigned char err_recov_pg[] = {0x1, 0xa, 0xc0, 11, 240, 0, 0, 0,
+ 5, 0, 0xff, 0xff};
+
+ memcpy(p, err_recov_pg, sizeof(err_recov_pg));
+ if (1 == pcontrol)
+ memset(p + 2, 0, sizeof(err_recov_pg) - 2);
+ return sizeof(err_recov_pg);
+}
+
+static int resp_disconnect_pg(unsigned char * p, int pcontrol, int target)
+{ /* Disconnect-Reconnect page for mode_sense */
+ unsigned char disconnect_pg[] = {0x2, 0xe, 128, 128, 0, 10, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0};
+
+ memcpy(p, disconnect_pg, sizeof(disconnect_pg));
+ if (1 == pcontrol)
+ memset(p + 2, 0, sizeof(disconnect_pg) - 2);
+ return sizeof(disconnect_pg);
+}
+
+static int resp_format_pg(unsigned char * p, int pcontrol, int target)
+{ /* Format device page for mode_sense */
+ unsigned char format_pg[] = {0x3, 0x16, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0x40, 0, 0, 0};
+
+ memcpy(p, format_pg, sizeof(format_pg));
+ p[10] = (sdebug_sectors_per >> 8) & 0xff;
+ p[11] = sdebug_sectors_per & 0xff;
+ p[12] = (scsi_debug_sector_size >> 8) & 0xff;
+ p[13] = scsi_debug_sector_size & 0xff;
+ if (scsi_debug_removable)
+ p[20] |= 0x20; /* should agree with INQUIRY */
+ if (1 == pcontrol)
+ memset(p + 2, 0, sizeof(format_pg) - 2);
+ return sizeof(format_pg);
+}
+
+static int resp_caching_pg(unsigned char * p, int pcontrol, int target)
+{ /* Caching page for mode_sense */
+ unsigned char ch_caching_pg[] = {/* 0x8, 18, */ 0x4, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0};
+ unsigned char d_caching_pg[] = {0x8, 18, 0x14, 0, 0xff, 0xff, 0, 0,
+ 0xff, 0xff, 0xff, 0xff, 0x80, 0x14, 0, 0, 0, 0, 0, 0};
+
+ if (SCSI_DEBUG_OPT_N_WCE & scsi_debug_opts)
+ caching_pg[2] &= ~0x4; /* set WCE=0 (default WCE=1) */
+ memcpy(p, caching_pg, sizeof(caching_pg));
+ if (1 == pcontrol)
+ memcpy(p + 2, ch_caching_pg, sizeof(ch_caching_pg));
+ else if (2 == pcontrol)
+ memcpy(p, d_caching_pg, sizeof(d_caching_pg));
+ return sizeof(caching_pg);
+}
+
+static int resp_ctrl_m_pg(unsigned char * p, int pcontrol, int target)
+{ /* Control mode page for mode_sense */
+ unsigned char ch_ctrl_m_pg[] = {/* 0xa, 10, */ 0x6, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0};
+ unsigned char d_ctrl_m_pg[] = {0xa, 10, 2, 0, 0, 0, 0, 0,
+ 0, 0, 0x2, 0x4b};
+
+ if (scsi_debug_dsense)
+ ctrl_m_pg[2] |= 0x4;
+ else
+ ctrl_m_pg[2] &= ~0x4;
+
+ if (scsi_debug_ato)
+ ctrl_m_pg[5] |= 0x80; /* ATO=1 */
+
+ memcpy(p, ctrl_m_pg, sizeof(ctrl_m_pg));
+ if (1 == pcontrol)
+ memcpy(p + 2, ch_ctrl_m_pg, sizeof(ch_ctrl_m_pg));
+ else if (2 == pcontrol)
+ memcpy(p, d_ctrl_m_pg, sizeof(d_ctrl_m_pg));
+ return sizeof(ctrl_m_pg);
+}
+
+
+static int resp_iec_m_pg(unsigned char * p, int pcontrol, int target)
+{ /* Informational Exceptions control mode page for mode_sense */
+ unsigned char ch_iec_m_pg[] = {/* 0x1c, 0xa, */ 0x4, 0xf, 0, 0, 0, 0,
+ 0, 0, 0x0, 0x0};
+ unsigned char d_iec_m_pg[] = {0x1c, 0xa, 0x08, 0, 0, 0, 0, 0,
+ 0, 0, 0x0, 0x0};
+
+ memcpy(p, iec_m_pg, sizeof(iec_m_pg));
+ if (1 == pcontrol)
+ memcpy(p + 2, ch_iec_m_pg, sizeof(ch_iec_m_pg));
+ else if (2 == pcontrol)
+ memcpy(p, d_iec_m_pg, sizeof(d_iec_m_pg));
+ return sizeof(iec_m_pg);
+}
+
+static int resp_sas_sf_m_pg(unsigned char * p, int pcontrol, int target)
+{ /* SAS SSP mode page - short format for mode_sense */
+ unsigned char sas_sf_m_pg[] = {0x19, 0x6,
+ 0x6, 0x0, 0x7, 0xd0, 0x0, 0x0};
+
+ memcpy(p, sas_sf_m_pg, sizeof(sas_sf_m_pg));
+ if (1 == pcontrol)
+ memset(p + 2, 0, sizeof(sas_sf_m_pg) - 2);
+ return sizeof(sas_sf_m_pg);
+}
+
+
+static int resp_sas_pcd_m_spg(unsigned char * p, int pcontrol, int target,
+ int target_dev_id)
+{ /* SAS phy control and discover mode page for mode_sense */
+ unsigned char sas_pcd_m_pg[] = {0x59, 0x1, 0, 0x64, 0, 0x6, 0, 2,
+ 0, 0, 0, 0, 0x10, 0x9, 0x8, 0x0,
+ 0x52, 0x22, 0x22, 0x20, 0x0, 0x0, 0x0, 0x0,
+ 0x51, 0x11, 0x11, 0x10, 0x0, 0x0, 0x0, 0x1,
+ 0x2, 0, 0, 0, 0, 0, 0, 0,
+ 0x88, 0x99, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 1, 0, 0, 0x10, 0x9, 0x8, 0x0,
+ 0x52, 0x22, 0x22, 0x20, 0x0, 0x0, 0x0, 0x0,
+ 0x51, 0x11, 0x11, 0x10, 0x0, 0x0, 0x0, 0x1,
+ 0x3, 0, 0, 0, 0, 0, 0, 0,
+ 0x88, 0x99, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ };
+ int port_a, port_b;
+
+ port_a = target_dev_id + 1;
+ port_b = port_a + 1;
+ memcpy(p, sas_pcd_m_pg, sizeof(sas_pcd_m_pg));
+ p[20] = (port_a >> 24);
+ p[21] = (port_a >> 16) & 0xff;
+ p[22] = (port_a >> 8) & 0xff;
+ p[23] = port_a & 0xff;
+ p[48 + 20] = (port_b >> 24);
+ p[48 + 21] = (port_b >> 16) & 0xff;
+ p[48 + 22] = (port_b >> 8) & 0xff;
+ p[48 + 23] = port_b & 0xff;
+ if (1 == pcontrol)
+ memset(p + 4, 0, sizeof(sas_pcd_m_pg) - 4);
+ return sizeof(sas_pcd_m_pg);
+}
+
+static int resp_sas_sha_m_spg(unsigned char * p, int pcontrol)
+{ /* SAS SSP shared protocol specific port mode subpage */
+ unsigned char sas_sha_m_pg[] = {0x59, 0x2, 0, 0xc, 0, 0x6, 0x10, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ };
+
+ memcpy(p, sas_sha_m_pg, sizeof(sas_sha_m_pg));
+ if (1 == pcontrol)
+ memset(p + 4, 0, sizeof(sas_sha_m_pg) - 4);
+ return sizeof(sas_sha_m_pg);
+}
+
+#define SDEBUG_MAX_MSENSE_SZ 256
+
+static int
+resp_mode_sense(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
+{
+ unsigned char dbd, llbaa;
+ int pcontrol, pcode, subpcode, bd_len;
+ unsigned char dev_spec;
+ int k, alloc_len, msense_6, offset, len, target_dev_id;
+ int target = scp->device->id;
+ unsigned char * ap;
+ unsigned char arr[SDEBUG_MAX_MSENSE_SZ];
+ unsigned char *cmd = scp->cmnd;
+
+ dbd = !!(cmd[1] & 0x8);
+ pcontrol = (cmd[2] & 0xc0) >> 6;
+ pcode = cmd[2] & 0x3f;
+ subpcode = cmd[3];
+ msense_6 = (MODE_SENSE == cmd[0]);
+ llbaa = msense_6 ? 0 : !!(cmd[1] & 0x10);
+ if ((0 == scsi_debug_ptype) && (0 == dbd))
+ bd_len = llbaa ? 16 : 8;
+ else
+ bd_len = 0;
+ alloc_len = msense_6 ? cmd[4] : ((cmd[7] << 8) | cmd[8]);
+ memset(arr, 0, SDEBUG_MAX_MSENSE_SZ);
+ if (0x3 == pcontrol) { /* Saving values not supported */
+ mk_sense_buffer(scp, ILLEGAL_REQUEST, SAVING_PARAMS_UNSUP, 0);
+ return check_condition_result;
+ }
+ target_dev_id = ((devip->sdbg_host->shost->host_no + 1) * 2000) +
+ (devip->target * 1000) - 3;
+ /* set DPOFUA bit for disks */
+ if (0 == scsi_debug_ptype)
+ dev_spec = (DEV_READONLY(target) ? 0x80 : 0x0) | 0x10;
+ else
+ dev_spec = 0x0;
+ if (msense_6) {
+ arr[2] = dev_spec;
+ arr[3] = bd_len;
+ offset = 4;
+ } else {
+ arr[3] = dev_spec;
+ if (16 == bd_len)
+ arr[4] = 0x1; /* set LONGLBA bit */
+ arr[7] = bd_len; /* assume 255 or less */
+ offset = 8;
+ }
+ ap = arr + offset;
+ if ((bd_len > 0) && (!sdebug_capacity))
+ sdebug_capacity = get_sdebug_capacity();
+
+ if (8 == bd_len) {
+ if (sdebug_capacity > 0xfffffffe) {
+ ap[0] = 0xff;
+ ap[1] = 0xff;
+ ap[2] = 0xff;
+ ap[3] = 0xff;
+ } else {
+ ap[0] = (sdebug_capacity >> 24) & 0xff;
+ ap[1] = (sdebug_capacity >> 16) & 0xff;
+ ap[2] = (sdebug_capacity >> 8) & 0xff;
+ ap[3] = sdebug_capacity & 0xff;
+ }
+ ap[6] = (scsi_debug_sector_size >> 8) & 0xff;
+ ap[7] = scsi_debug_sector_size & 0xff;
+ offset += bd_len;
+ ap = arr + offset;
+ } else if (16 == bd_len) {
+ unsigned long long capac = sdebug_capacity;
+
+ for (k = 0; k < 8; ++k, capac >>= 8)
+ ap[7 - k] = capac & 0xff;
+ ap[12] = (scsi_debug_sector_size >> 24) & 0xff;
+ ap[13] = (scsi_debug_sector_size >> 16) & 0xff;
+ ap[14] = (scsi_debug_sector_size >> 8) & 0xff;
+ ap[15] = scsi_debug_sector_size & 0xff;
+ offset += bd_len;
+ ap = arr + offset;
+ }
+
+ if ((subpcode > 0x0) && (subpcode < 0xff) && (0x19 != pcode)) {
+ /* TODO: Control Extension page */
+ mk_sense_invalid_fld(scp, SDEB_IN_CDB, 3, -1);
+ return check_condition_result;
+ }
+ switch (pcode) {
+ case 0x1: /* Read-Write error recovery page, direct access */
+ len = resp_err_recov_pg(ap, pcontrol, target);
+ offset += len;
+ break;
+ case 0x2: /* Disconnect-Reconnect page, all devices */
+ len = resp_disconnect_pg(ap, pcontrol, target);
+ offset += len;
+ break;
+ case 0x3: /* Format device page, direct access */
+ len = resp_format_pg(ap, pcontrol, target);
+ offset += len;
+ break;
+ case 0x8: /* Caching page, direct access */
+ len = resp_caching_pg(ap, pcontrol, target);
+ offset += len;
+ break;
+ case 0xa: /* Control Mode page, all devices */
+ len = resp_ctrl_m_pg(ap, pcontrol, target);
+ offset += len;
+ break;
+ case 0x19: /* if spc==1 then sas phy, control+discover */
+ if ((subpcode > 0x2) && (subpcode < 0xff)) {
+ mk_sense_invalid_fld(scp, SDEB_IN_CDB, 3, -1);
+ return check_condition_result;
+ }
+ len = 0;
+ if ((0x0 == subpcode) || (0xff == subpcode))
+ len += resp_sas_sf_m_pg(ap + len, pcontrol, target);
+ if ((0x1 == subpcode) || (0xff == subpcode))
+ len += resp_sas_pcd_m_spg(ap + len, pcontrol, target,
+ target_dev_id);
+ if ((0x2 == subpcode) || (0xff == subpcode))
+ len += resp_sas_sha_m_spg(ap + len, pcontrol);
+ offset += len;
+ break;
+ case 0x1c: /* Informational Exceptions Mode page, all devices */
+ len = resp_iec_m_pg(ap, pcontrol, target);
+ offset += len;
+ break;
+ case 0x3f: /* Read all Mode pages */
+ if ((0 == subpcode) || (0xff == subpcode)) {
+ len = resp_err_recov_pg(ap, pcontrol, target);
+ len += resp_disconnect_pg(ap + len, pcontrol, target);
+ len += resp_format_pg(ap + len, pcontrol, target);
+ len += resp_caching_pg(ap + len, pcontrol, target);
+ len += resp_ctrl_m_pg(ap + len, pcontrol, target);
+ len += resp_sas_sf_m_pg(ap + len, pcontrol, target);
+ if (0xff == subpcode) {
+ len += resp_sas_pcd_m_spg(ap + len, pcontrol,
+ target, target_dev_id);
+ len += resp_sas_sha_m_spg(ap + len, pcontrol);
+ }
+ len += resp_iec_m_pg(ap + len, pcontrol, target);
+ } else {
+ mk_sense_invalid_fld(scp, SDEB_IN_CDB, 3, -1);
+ return check_condition_result;
+ }
+ offset += len;
+ break;
+ default:
+ mk_sense_invalid_fld(scp, SDEB_IN_CDB, 2, 5);
+ return check_condition_result;
+ }
+ if (msense_6)
+ arr[0] = offset - 1;
+ else {
+ arr[0] = ((offset - 2) >> 8) & 0xff;
+ arr[1] = (offset - 2) & 0xff;
+ }
+ return fill_from_dev_buffer(scp, arr, min(alloc_len, offset));
+}
+
+#define SDEBUG_MAX_MSELECT_SZ 512
+
+static int
+resp_mode_select(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
+{
+ int pf, sp, ps, md_len, bd_len, off, spf, pg_len;
+ int param_len, res, mpage;
+ unsigned char arr[SDEBUG_MAX_MSELECT_SZ];
+ unsigned char *cmd = scp->cmnd;
+ int mselect6 = (MODE_SELECT == cmd[0]);
+
+ memset(arr, 0, sizeof(arr));
+ pf = cmd[1] & 0x10;
+ sp = cmd[1] & 0x1;
+ param_len = mselect6 ? cmd[4] : ((cmd[7] << 8) + cmd[8]);
+ if ((0 == pf) || sp || (param_len > SDEBUG_MAX_MSELECT_SZ)) {
+ mk_sense_invalid_fld(scp, SDEB_IN_CDB, mselect6 ? 4 : 7, -1);
+ return check_condition_result;
+ }
+ res = fetch_to_dev_buffer(scp, arr, param_len);
+ if (-1 == res)
+ return (DID_ERROR << 16);
+ else if ((res < param_len) &&
+ (SCSI_DEBUG_OPT_NOISE & scsi_debug_opts))
+ sdev_printk(KERN_INFO, scp->device,
+ "%s: cdb indicated=%d, IO sent=%d bytes\n",
+ __func__, param_len, res);
+ md_len = mselect6 ? (arr[0] + 1) : ((arr[0] << 8) + arr[1] + 2);
+ bd_len = mselect6 ? arr[3] : ((arr[6] << 8) + arr[7]);
+ if (md_len > 2) {
+ mk_sense_invalid_fld(scp, SDEB_IN_DATA, 0, -1);
+ return check_condition_result;
+ }
+ off = bd_len + (mselect6 ? 4 : 8);
+ mpage = arr[off] & 0x3f;
+ ps = !!(arr[off] & 0x80);
+ if (ps) {
+ mk_sense_invalid_fld(scp, SDEB_IN_DATA, off, 7);
+ return check_condition_result;
+ }
+ spf = !!(arr[off] & 0x40);
+ pg_len = spf ? ((arr[off + 2] << 8) + arr[off + 3] + 4) :
+ (arr[off + 1] + 2);
+ if ((pg_len + off) > param_len) {
+ mk_sense_buffer(scp, ILLEGAL_REQUEST,
+ PARAMETER_LIST_LENGTH_ERR, 0);
+ return check_condition_result;
+ }
+ switch (mpage) {
+ case 0x8: /* Caching Mode page */
+ if (caching_pg[1] == arr[off + 1]) {
+ memcpy(caching_pg + 2, arr + off + 2,
+ sizeof(caching_pg) - 2);
+ goto set_mode_changed_ua;
+ }
+ break;
+ case 0xa: /* Control Mode page */
+ if (ctrl_m_pg[1] == arr[off + 1]) {
+ memcpy(ctrl_m_pg + 2, arr + off + 2,
+ sizeof(ctrl_m_pg) - 2);
+ scsi_debug_dsense = !!(ctrl_m_pg[2] & 0x4);
+ goto set_mode_changed_ua;
+ }
+ break;
+ case 0x1c: /* Informational Exceptions Mode page */
+ if (iec_m_pg[1] == arr[off + 1]) {
+ memcpy(iec_m_pg + 2, arr + off + 2,
+ sizeof(iec_m_pg) - 2);
+ goto set_mode_changed_ua;
+ }
+ break;
+ default:
+ break;
+ }
+ mk_sense_invalid_fld(scp, SDEB_IN_DATA, off, 5);
+ return check_condition_result;
+set_mode_changed_ua:
+ set_bit(SDEBUG_UA_MODE_CHANGED, devip->uas_bm);
+ return 0;
+}
+
+static int resp_temp_l_pg(unsigned char * arr)
+{
+ unsigned char temp_l_pg[] = {0x0, 0x0, 0x3, 0x2, 0x0, 38,
+ 0x0, 0x1, 0x3, 0x2, 0x0, 65,
+ };
+
+ memcpy(arr, temp_l_pg, sizeof(temp_l_pg));
+ return sizeof(temp_l_pg);
+}
+
+static int resp_ie_l_pg(unsigned char * arr)
+{
+ unsigned char ie_l_pg[] = {0x0, 0x0, 0x3, 0x3, 0x0, 0x0, 38,
+ };
+
+ memcpy(arr, ie_l_pg, sizeof(ie_l_pg));
+ if (iec_m_pg[2] & 0x4) { /* TEST bit set */
+ arr[4] = THRESHOLD_EXCEEDED;
+ arr[5] = 0xff;
+ }
+ return sizeof(ie_l_pg);
+}
+
+#define SDEBUG_MAX_LSENSE_SZ 512
+
+static int resp_log_sense(struct scsi_cmnd * scp,
+ struct sdebug_dev_info * devip)
+{
+ int ppc, sp, pcontrol, pcode, subpcode, alloc_len, len, n;
+ unsigned char arr[SDEBUG_MAX_LSENSE_SZ];
+ unsigned char *cmd = scp->cmnd;
+
+ memset(arr, 0, sizeof(arr));
+ ppc = cmd[1] & 0x2;
+ sp = cmd[1] & 0x1;
+ if (ppc || sp) {
+ mk_sense_invalid_fld(scp, SDEB_IN_CDB, 1, ppc ? 1 : 0);
+ return check_condition_result;
+ }
+ pcontrol = (cmd[2] & 0xc0) >> 6;
+ pcode = cmd[2] & 0x3f;
+ subpcode = cmd[3] & 0xff;
+ alloc_len = (cmd[7] << 8) + cmd[8];
+ arr[0] = pcode;
+ if (0 == subpcode) {
+ switch (pcode) {
+ case 0x0: /* Supported log pages log page */
+ n = 4;
+ arr[n++] = 0x0; /* this page */
+ arr[n++] = 0xd; /* Temperature */
+ arr[n++] = 0x2f; /* Informational exceptions */
+ arr[3] = n - 4;
+ break;
+ case 0xd: /* Temperature log page */
+ arr[3] = resp_temp_l_pg(arr + 4);
+ break;
+ case 0x2f: /* Informational exceptions log page */
+ arr[3] = resp_ie_l_pg(arr + 4);
+ break;
+ default:
+ mk_sense_invalid_fld(scp, SDEB_IN_CDB, 2, 5);
+ return check_condition_result;
+ }
+ } else if (0xff == subpcode) {
+ arr[0] |= 0x40;
+ arr[1] = subpcode;
+ switch (pcode) {
+ case 0x0: /* Supported log pages and subpages log page */
+ n = 4;
+ arr[n++] = 0x0;
+ arr[n++] = 0x0; /* 0,0 page */
+ arr[n++] = 0x0;
+ arr[n++] = 0xff; /* this page */
+ arr[n++] = 0xd;
+ arr[n++] = 0x0; /* Temperature */
+ arr[n++] = 0x2f;
+ arr[n++] = 0x0; /* Informational exceptions */
+ arr[3] = n - 4;
+ break;
+ case 0xd: /* Temperature subpages */
+ n = 4;
+ arr[n++] = 0xd;
+ arr[n++] = 0x0; /* Temperature */
+ arr[3] = n - 4;
+ break;
+ case 0x2f: /* Informational exceptions subpages */
+ n = 4;
+ arr[n++] = 0x2f;
+ arr[n++] = 0x0; /* Informational exceptions */
+ arr[3] = n - 4;
+ break;
+ default:
+ mk_sense_invalid_fld(scp, SDEB_IN_CDB, 2, 5);
+ return check_condition_result;
+ }
+ } else {
+ mk_sense_invalid_fld(scp, SDEB_IN_CDB, 3, -1);
+ return check_condition_result;
+ }
+ len = min(((arr[2] << 8) + arr[3]) + 4, alloc_len);
+ return fill_from_dev_buffer(scp, arr,
+ min(len, SDEBUG_MAX_INQ_ARR_SZ));
+}
+
+static int check_device_access_params(struct scsi_cmnd *scp,
+ unsigned long long lba, unsigned int num)
+{
+ if (lba + num > sdebug_capacity) {
+ mk_sense_buffer(scp, ILLEGAL_REQUEST, LBA_OUT_OF_RANGE, 0);
+ return check_condition_result;
+ }
+ /* transfer length excessive (tie in to block limits VPD page) */
+ if (num > sdebug_store_sectors) {
+ /* needs work to find which cdb byte 'num' comes from */
+ mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
+ return check_condition_result;
+ }
+ return 0;
+}
+
+/* Returns number of bytes copied or -1 if error. */
+static int
+do_device_access(struct scsi_cmnd *scmd, u64 lba, u32 num, bool do_write)
+{
+ int ret;
+ u64 block, rest = 0;
+ struct scsi_data_buffer *sdb;
+ enum dma_data_direction dir;
+ size_t (*func)(struct scatterlist *, unsigned int, void *, size_t,
+ off_t);
+
+ if (do_write) {
+ sdb = scsi_out(scmd);
+ dir = DMA_TO_DEVICE;
+ func = sg_pcopy_to_buffer;
+ } else {
+ sdb = scsi_in(scmd);
+ dir = DMA_FROM_DEVICE;
+ func = sg_pcopy_from_buffer;
+ }
+
+ if (!sdb->length)
+ return 0;
+ if (!(scsi_bidi_cmnd(scmd) || scmd->sc_data_direction == dir))
+ return -1;
+
+ block = do_div(lba, sdebug_store_sectors);
+ if (block + num > sdebug_store_sectors)
+ rest = block + num - sdebug_store_sectors;
+
+ ret = func(sdb->table.sgl, sdb->table.nents,
+ fake_storep + (block * scsi_debug_sector_size),
+ (num - rest) * scsi_debug_sector_size, 0);
+ if (ret != (num - rest) * scsi_debug_sector_size)
+ return ret;
+
+ if (rest) {
+ ret += func(sdb->table.sgl, sdb->table.nents,
+ fake_storep, rest * scsi_debug_sector_size,
+ (num - rest) * scsi_debug_sector_size);
+ }
+
+ return ret;
+}
+
+/* If fake_store(lba,num) compares equal to arr(num), then copy top half of
+ * arr into fake_store(lba,num) and return true. If comparison fails then
+ * return false. */
+static bool
+comp_write_worker(u64 lba, u32 num, const u8 *arr)
+{
+ bool res;
+ u64 block, rest = 0;
+ u32 store_blks = sdebug_store_sectors;
+ u32 lb_size = scsi_debug_sector_size;
+
+ block = do_div(lba, store_blks);
+ if (block + num > store_blks)
+ rest = block + num - store_blks;
+
+ res = !memcmp(fake_storep + (block * lb_size), arr,
+ (num - rest) * lb_size);
+ if (!res)
+ return res;
+ if (rest)
+ res = memcmp(fake_storep, arr + ((num - rest) * lb_size),
+ rest * lb_size);
+ if (!res)
+ return res;
+ arr += num * lb_size;
+ memcpy(fake_storep + (block * lb_size), arr, (num - rest) * lb_size);
+ if (rest)
+ memcpy(fake_storep, arr + ((num - rest) * lb_size),
+ rest * lb_size);
+ return res;
+}
+
+static __be16 dif_compute_csum(const void *buf, int len)
+{
+ __be16 csum;
+
+ if (scsi_debug_guard)
+ csum = (__force __be16)ip_compute_csum(buf, len);
+ else
+ csum = cpu_to_be16(crc_t10dif(buf, len));
+
+ return csum;
+}
+
+static int dif_verify(struct sd_dif_tuple *sdt, const void *data,
+ sector_t sector, u32 ei_lba)
+{
+ __be16 csum = dif_compute_csum(data, scsi_debug_sector_size);
+
+ if (sdt->guard_tag != csum) {
+ pr_err("%s: GUARD check failed on sector %lu rcvd 0x%04x, data 0x%04x\n",
+ __func__,
+ (unsigned long)sector,
+ be16_to_cpu(sdt->guard_tag),
+ be16_to_cpu(csum));
+ return 0x01;
+ }
+ if (scsi_debug_dif == SD_DIF_TYPE1_PROTECTION &&
+ be32_to_cpu(sdt->ref_tag) != (sector & 0xffffffff)) {
+ pr_err("%s: REF check failed on sector %lu\n",
+ __func__, (unsigned long)sector);
+ return 0x03;
+ }
+ if (scsi_debug_dif == SD_DIF_TYPE2_PROTECTION &&
+ be32_to_cpu(sdt->ref_tag) != ei_lba) {
+ pr_err("%s: REF check failed on sector %lu\n",
+ __func__, (unsigned long)sector);
+ return 0x03;
+ }
+ return 0;
+}
+
+static void dif_copy_prot(struct scsi_cmnd *SCpnt, sector_t sector,
+ unsigned int sectors, bool read)
+{
+ size_t resid;
+ void *paddr;
+ const void *dif_store_end = dif_storep + sdebug_store_sectors;
+ struct sg_mapping_iter miter;
+
+ /* Bytes of protection data to copy into sgl */
+ resid = sectors * sizeof(*dif_storep);
+
+ sg_miter_start(&miter, scsi_prot_sglist(SCpnt),
+ scsi_prot_sg_count(SCpnt), SG_MITER_ATOMIC |
+ (read ? SG_MITER_TO_SG : SG_MITER_FROM_SG));
+
+ while (sg_miter_next(&miter) && resid > 0) {
+ size_t len = min(miter.length, resid);
+ void *start = dif_store(sector);
+ size_t rest = 0;
+
+ if (dif_store_end < start + len)
+ rest = start + len - dif_store_end;
+
+ paddr = miter.addr;
+
+ if (read)
+ memcpy(paddr, start, len - rest);
+ else
+ memcpy(start, paddr, len - rest);
+
+ if (rest) {
+ if (read)
+ memcpy(paddr + len - rest, dif_storep, rest);
+ else
+ memcpy(dif_storep, paddr + len - rest, rest);
+ }
+
+ sector += len / sizeof(*dif_storep);
+ resid -= len;
+ }
+ sg_miter_stop(&miter);
+}
+
+static int prot_verify_read(struct scsi_cmnd *SCpnt, sector_t start_sec,
+ unsigned int sectors, u32 ei_lba)
+{
+ unsigned int i;
+ struct sd_dif_tuple *sdt;
+ sector_t sector;
+
+ for (i = 0; i < sectors; i++, ei_lba++) {
+ int ret;
+
+ sector = start_sec + i;
+ sdt = dif_store(sector);
+
+ if (sdt->app_tag == cpu_to_be16(0xffff))
+ continue;
+
+ ret = dif_verify(sdt, fake_store(sector), sector, ei_lba);
+ if (ret) {
+ dif_errors++;
+ return ret;
+ }
+ }
+
+ dif_copy_prot(SCpnt, start_sec, sectors, true);
+ dix_reads++;
+
+ return 0;
+}
+
+static int
+resp_read_dt0(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
+{
+ u8 *cmd = scp->cmnd;
+ u64 lba;
+ u32 num;
+ u32 ei_lba;
+ unsigned long iflags;
+ int ret;
+ bool check_prot;
+
+ switch (cmd[0]) {
+ case READ_16:
+ ei_lba = 0;
+ lba = get_unaligned_be64(cmd + 2);
+ num = get_unaligned_be32(cmd + 10);
+ check_prot = true;
+ break;
+ case READ_10:
+ ei_lba = 0;
+ lba = get_unaligned_be32(cmd + 2);
+ num = get_unaligned_be16(cmd + 7);
+ check_prot = true;
+ break;
+ case READ_6:
+ ei_lba = 0;
+ lba = (u32)cmd[3] | (u32)cmd[2] << 8 |
+ (u32)(cmd[1] & 0x1f) << 16;
+ num = (0 == cmd[4]) ? 256 : cmd[4];
+ check_prot = true;
+ break;
+ case READ_12:
+ ei_lba = 0;
+ lba = get_unaligned_be32(cmd + 2);
+ num = get_unaligned_be32(cmd + 6);
+ check_prot = true;
+ break;
+ case XDWRITEREAD_10:
+ ei_lba = 0;
+ lba = get_unaligned_be32(cmd + 2);
+ num = get_unaligned_be16(cmd + 7);
+ check_prot = false;
+ break;
+ default: /* assume READ(32) */
+ lba = get_unaligned_be64(cmd + 12);
+ ei_lba = get_unaligned_be32(cmd + 20);
+ num = get_unaligned_be32(cmd + 28);
+ check_prot = false;
+ break;
+ }
+ if (check_prot) {
+ if (scsi_debug_dif == SD_DIF_TYPE2_PROTECTION &&
+ (cmd[1] & 0xe0)) {
+ mk_sense_invalid_opcode(scp);
+ return check_condition_result;
+ }
+ if ((scsi_debug_dif == SD_DIF_TYPE1_PROTECTION ||
+ scsi_debug_dif == SD_DIF_TYPE3_PROTECTION) &&
+ (cmd[1] & 0xe0) == 0)
+ sdev_printk(KERN_ERR, scp->device, "Unprotected RD "
+ "to DIF device\n");
+ }
+ if (sdebug_any_injecting_opt) {
+ struct sdebug_scmd_extra_t *ep = scsi_cmd_priv(scp);
+
+ if (ep->inj_short)
+ num /= 2;
+ }
+
+ /* inline check_device_access_params() */
+ if (lba + num > sdebug_capacity) {
+ mk_sense_buffer(scp, ILLEGAL_REQUEST, LBA_OUT_OF_RANGE, 0);
+ return check_condition_result;
+ }
+ /* transfer length excessive (tie in to block limits VPD page) */
+ if (num > sdebug_store_sectors) {
+ /* needs work to find which cdb byte 'num' comes from */
+ mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
+ return check_condition_result;
+ }
+
+ if ((SCSI_DEBUG_OPT_MEDIUM_ERR & scsi_debug_opts) &&
+ (lba <= (OPT_MEDIUM_ERR_ADDR + OPT_MEDIUM_ERR_NUM - 1)) &&
+ ((lba + num) > OPT_MEDIUM_ERR_ADDR)) {
+ /* claim unrecoverable read error */
+ mk_sense_buffer(scp, MEDIUM_ERROR, UNRECOVERED_READ_ERR, 0);
+ /* set info field and valid bit for fixed descriptor */
+ if (0x70 == (scp->sense_buffer[0] & 0x7f)) {
+ scp->sense_buffer[0] |= 0x80; /* Valid bit */
+ ret = (lba < OPT_MEDIUM_ERR_ADDR)
+ ? OPT_MEDIUM_ERR_ADDR : (int)lba;
+ put_unaligned_be32(ret, scp->sense_buffer + 3);
+ }
+ scsi_set_resid(scp, scsi_bufflen(scp));
+ return check_condition_result;
+ }
+
+ read_lock_irqsave(&atomic_rw, iflags);
+
+ /* DIX + T10 DIF */
+ if (scsi_debug_dix && scsi_prot_sg_count(scp)) {
+ int prot_ret = prot_verify_read(scp, lba, num, ei_lba);
+
+ if (prot_ret) {
+ read_unlock_irqrestore(&atomic_rw, iflags);
+ mk_sense_buffer(scp, ABORTED_COMMAND, 0x10, prot_ret);
+ return illegal_condition_result;
+ }
+ }
+
+ ret = do_device_access(scp, lba, num, false);
+ read_unlock_irqrestore(&atomic_rw, iflags);
+ if (ret == -1)
+ return DID_ERROR << 16;
+
+ scsi_in(scp)->resid = scsi_bufflen(scp) - ret;
+
+ if (sdebug_any_injecting_opt) {
+ struct sdebug_scmd_extra_t *ep = scsi_cmd_priv(scp);
+
+ if (ep->inj_recovered) {
+ mk_sense_buffer(scp, RECOVERED_ERROR,
+ THRESHOLD_EXCEEDED, 0);
+ return check_condition_result;
+ } else if (ep->inj_transport) {
+ mk_sense_buffer(scp, ABORTED_COMMAND,
+ TRANSPORT_PROBLEM, ACK_NAK_TO);
+ return check_condition_result;
+ } else if (ep->inj_dif) {
+ /* Logical block guard check failed */
+ mk_sense_buffer(scp, ABORTED_COMMAND, 0x10, 1);
+ return illegal_condition_result;
+ } else if (ep->inj_dix) {
+ mk_sense_buffer(scp, ILLEGAL_REQUEST, 0x10, 1);
+ return illegal_condition_result;
+ }
+ }
+ return 0;
+}
+
+void dump_sector(unsigned char *buf, int len)
+{
+ int i, j, n;
+
+ pr_err(">>> Sector Dump <<<\n");
+ for (i = 0 ; i < len ; i += 16) {
+ char b[128];
+
+ for (j = 0, n = 0; j < 16; j++) {
+ unsigned char c = buf[i+j];
+
+ if (c >= 0x20 && c < 0x7e)
+ n += scnprintf(b + n, sizeof(b) - n,
+ " %c ", buf[i+j]);
+ else
+ n += scnprintf(b + n, sizeof(b) - n,
+ "%02x ", buf[i+j]);
+ }
+ pr_err("%04d: %s\n", i, b);
+ }
+}
+
+static int prot_verify_write(struct scsi_cmnd *SCpnt, sector_t start_sec,
+ unsigned int sectors, u32 ei_lba)
+{
+ int ret;
+ struct sd_dif_tuple *sdt;
+ void *daddr;
+ sector_t sector = start_sec;
+ int ppage_offset;
+ int dpage_offset;
+ struct sg_mapping_iter diter;
+ struct sg_mapping_iter piter;
+
+ BUG_ON(scsi_sg_count(SCpnt) == 0);
+ BUG_ON(scsi_prot_sg_count(SCpnt) == 0);
+
+ sg_miter_start(&piter, scsi_prot_sglist(SCpnt),
+ scsi_prot_sg_count(SCpnt),
+ SG_MITER_ATOMIC | SG_MITER_FROM_SG);
+ sg_miter_start(&diter, scsi_sglist(SCpnt), scsi_sg_count(SCpnt),
+ SG_MITER_ATOMIC | SG_MITER_FROM_SG);
+
+ /* For each protection page */
+ while (sg_miter_next(&piter)) {
+ dpage_offset = 0;
+ if (WARN_ON(!sg_miter_next(&diter))) {
+ ret = 0x01;
+ goto out;
+ }
+
+ for (ppage_offset = 0; ppage_offset < piter.length;
+ ppage_offset += sizeof(struct sd_dif_tuple)) {
+ /* If we're at the end of the current
+ * data page advance to the next one
+ */
+ if (dpage_offset >= diter.length) {
+ if (WARN_ON(!sg_miter_next(&diter))) {
+ ret = 0x01;
+ goto out;
+ }
+ dpage_offset = 0;
+ }
+
+ sdt = piter.addr + ppage_offset;
+ daddr = diter.addr + dpage_offset;
+
+ ret = dif_verify(sdt, daddr, sector, ei_lba);
+ if (ret) {
+ dump_sector(daddr, scsi_debug_sector_size);
+ goto out;
+ }
+
+ sector++;
+ ei_lba++;
+ dpage_offset += scsi_debug_sector_size;
+ }
+ diter.consumed = dpage_offset;
+ sg_miter_stop(&diter);
+ }
+ sg_miter_stop(&piter);
+
+ dif_copy_prot(SCpnt, start_sec, sectors, false);
+ dix_writes++;
+
+ return 0;
+
+out:
+ dif_errors++;
+ sg_miter_stop(&diter);
+ sg_miter_stop(&piter);
+ return ret;
+}
+
+static unsigned long lba_to_map_index(sector_t lba)
+{
+ if (scsi_debug_unmap_alignment) {
+ lba += scsi_debug_unmap_granularity -
+ scsi_debug_unmap_alignment;
+ }
+ do_div(lba, scsi_debug_unmap_granularity);
+
+ return lba;
+}
+
+static sector_t map_index_to_lba(unsigned long index)
+{
+ sector_t lba = index * scsi_debug_unmap_granularity;
+
+ if (scsi_debug_unmap_alignment) {
+ lba -= scsi_debug_unmap_granularity -
+ scsi_debug_unmap_alignment;
+ }
+
+ return lba;
+}
+
+static unsigned int map_state(sector_t lba, unsigned int *num)
+{
+ sector_t end;
+ unsigned int mapped;
+ unsigned long index;
+ unsigned long next;
+
+ index = lba_to_map_index(lba);
+ mapped = test_bit(index, map_storep);
+
+ if (mapped)
+ next = find_next_zero_bit(map_storep, map_size, index);
+ else
+ next = find_next_bit(map_storep, map_size, index);
+
+ end = min_t(sector_t, sdebug_store_sectors, map_index_to_lba(next));
+ *num = end - lba;
+
+ return mapped;
+}
+
+static void map_region(sector_t lba, unsigned int len)
+{
+ sector_t end = lba + len;
+
+ while (lba < end) {
+ unsigned long index = lba_to_map_index(lba);
+
+ if (index < map_size)
+ set_bit(index, map_storep);
+
+ lba = map_index_to_lba(index + 1);
+ }
+}
+
+static void unmap_region(sector_t lba, unsigned int len)
+{
+ sector_t end = lba + len;
+
+ while (lba < end) {
+ unsigned long index = lba_to_map_index(lba);
+
+ if (lba == map_index_to_lba(index) &&
+ lba + scsi_debug_unmap_granularity <= end &&
+ index < map_size) {
+ clear_bit(index, map_storep);
+ if (scsi_debug_lbprz) {
+ memset(fake_storep +
+ lba * scsi_debug_sector_size, 0,
+ scsi_debug_sector_size *
+ scsi_debug_unmap_granularity);
+ }
+ if (dif_storep) {
+ memset(dif_storep + lba, 0xff,
+ sizeof(*dif_storep) *
+ scsi_debug_unmap_granularity);
+ }
+ }
+ lba = map_index_to_lba(index + 1);
+ }
+}
+
+static int
+resp_write_dt0(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
+{
+ u8 *cmd = scp->cmnd;
+ u64 lba;
+ u32 num;
+ u32 ei_lba;
+ unsigned long iflags;
+ int ret;
+ bool check_prot;
+
+ switch (cmd[0]) {
+ case WRITE_16:
+ ei_lba = 0;
+ lba = get_unaligned_be64(cmd + 2);
+ num = get_unaligned_be32(cmd + 10);
+ check_prot = true;
+ break;
+ case WRITE_10:
+ ei_lba = 0;
+ lba = get_unaligned_be32(cmd + 2);
+ num = get_unaligned_be16(cmd + 7);
+ check_prot = true;
+ break;
+ case WRITE_6:
+ ei_lba = 0;
+ lba = (u32)cmd[3] | (u32)cmd[2] << 8 |
+ (u32)(cmd[1] & 0x1f) << 16;
+ num = (0 == cmd[4]) ? 256 : cmd[4];
+ check_prot = true;
+ break;
+ case WRITE_12:
+ ei_lba = 0;
+ lba = get_unaligned_be32(cmd + 2);
+ num = get_unaligned_be32(cmd + 6);
+ check_prot = true;
+ break;
+ case 0x53: /* XDWRITEREAD(10) */
+ ei_lba = 0;
+ lba = get_unaligned_be32(cmd + 2);
+ num = get_unaligned_be16(cmd + 7);
+ check_prot = false;
+ break;
+ default: /* assume WRITE(32) */
+ lba = get_unaligned_be64(cmd + 12);
+ ei_lba = get_unaligned_be32(cmd + 20);
+ num = get_unaligned_be32(cmd + 28);
+ check_prot = false;
+ break;
+ }
+ if (check_prot) {
+ if (scsi_debug_dif == SD_DIF_TYPE2_PROTECTION &&
+ (cmd[1] & 0xe0)) {
+ mk_sense_invalid_opcode(scp);
+ return check_condition_result;
+ }
+ if ((scsi_debug_dif == SD_DIF_TYPE1_PROTECTION ||
+ scsi_debug_dif == SD_DIF_TYPE3_PROTECTION) &&
+ (cmd[1] & 0xe0) == 0)
+ sdev_printk(KERN_ERR, scp->device, "Unprotected WR "
+ "to DIF device\n");
+ }
+
+ /* inline check_device_access_params() */
+ if (lba + num > sdebug_capacity) {
+ mk_sense_buffer(scp, ILLEGAL_REQUEST, LBA_OUT_OF_RANGE, 0);
+ return check_condition_result;
+ }
+ /* transfer length excessive (tie in to block limits VPD page) */
+ if (num > sdebug_store_sectors) {
+ /* needs work to find which cdb byte 'num' comes from */
+ mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
+ return check_condition_result;
+ }
+
+ write_lock_irqsave(&atomic_rw, iflags);
+
+ /* DIX + T10 DIF */
+ if (scsi_debug_dix && scsi_prot_sg_count(scp)) {
+ int prot_ret = prot_verify_write(scp, lba, num, ei_lba);
+
+ if (prot_ret) {
+ write_unlock_irqrestore(&atomic_rw, iflags);
+ mk_sense_buffer(scp, ILLEGAL_REQUEST, 0x10, prot_ret);
+ return illegal_condition_result;
+ }
+ }
+
+ ret = do_device_access(scp, lba, num, true);
+ if (scsi_debug_lbp())
+ map_region(lba, num);
+ write_unlock_irqrestore(&atomic_rw, iflags);
+ if (-1 == ret)
+ return (DID_ERROR << 16);
+ else if ((ret < (num * scsi_debug_sector_size)) &&
+ (SCSI_DEBUG_OPT_NOISE & scsi_debug_opts))
+ sdev_printk(KERN_INFO, scp->device,
+ "%s: write: cdb indicated=%u, IO sent=%d bytes\n",
+ my_name, num * scsi_debug_sector_size, ret);
+
+ if (sdebug_any_injecting_opt) {
+ struct sdebug_scmd_extra_t *ep = scsi_cmd_priv(scp);
+
+ if (ep->inj_recovered) {
+ mk_sense_buffer(scp, RECOVERED_ERROR,
+ THRESHOLD_EXCEEDED, 0);
+ return check_condition_result;
+ } else if (ep->inj_dif) {
+ /* Logical block guard check failed */
+ mk_sense_buffer(scp, ABORTED_COMMAND, 0x10, 1);
+ return illegal_condition_result;
+ } else if (ep->inj_dix) {
+ mk_sense_buffer(scp, ILLEGAL_REQUEST, 0x10, 1);
+ return illegal_condition_result;
+ }
+ }
+ return 0;
+}
+
+static int
+resp_write_same(struct scsi_cmnd *scp, u64 lba, u32 num, u32 ei_lba,
+ bool unmap, bool ndob)
+{
+ unsigned long iflags;
+ unsigned long long i;
+ int ret;
+
+ ret = check_device_access_params(scp, lba, num);
+ if (ret)
+ return ret;
+
+ write_lock_irqsave(&atomic_rw, iflags);
+
+ if (unmap && scsi_debug_lbp()) {
+ unmap_region(lba, num);
+ goto out;
+ }
+
+ /* if ndob then zero 1 logical block, else fetch 1 logical block */
+ if (ndob) {
+ memset(fake_storep + (lba * scsi_debug_sector_size), 0,
+ scsi_debug_sector_size);
+ ret = 0;
+ } else
+ ret = fetch_to_dev_buffer(scp, fake_storep +
+ (lba * scsi_debug_sector_size),
+ scsi_debug_sector_size);
+
+ if (-1 == ret) {
+ write_unlock_irqrestore(&atomic_rw, iflags);
+ return (DID_ERROR << 16);
+ } else if ((ret < (num * scsi_debug_sector_size)) &&
+ (SCSI_DEBUG_OPT_NOISE & scsi_debug_opts))
+ sdev_printk(KERN_INFO, scp->device,
+ "%s: %s: cdb indicated=%u, IO sent=%d bytes\n",
+ my_name, "write same",
+ num * scsi_debug_sector_size, ret);
+
+ /* Copy first sector to remaining blocks */
+ for (i = 1 ; i < num ; i++)
+ memcpy(fake_storep + ((lba + i) * scsi_debug_sector_size),
+ fake_storep + (lba * scsi_debug_sector_size),
+ scsi_debug_sector_size);
+
+ if (scsi_debug_lbp())
+ map_region(lba, num);
+out:
+ write_unlock_irqrestore(&atomic_rw, iflags);
+
+ return 0;
+}
+
+static int
+resp_write_same_10(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
+{
+ u8 *cmd = scp->cmnd;
+ u32 lba;
+ u16 num;
+ u32 ei_lba = 0;
+ bool unmap = false;
+
+ if (cmd[1] & 0x8) {
+ if (scsi_debug_lbpws10 == 0) {
+ mk_sense_invalid_fld(scp, SDEB_IN_CDB, 1, 3);
+ return check_condition_result;
+ } else
+ unmap = true;
+ }
+ lba = get_unaligned_be32(cmd + 2);
+ num = get_unaligned_be16(cmd + 7);
+ if (num > scsi_debug_write_same_length) {
+ mk_sense_invalid_fld(scp, SDEB_IN_CDB, 7, -1);
+ return check_condition_result;
+ }
+ return resp_write_same(scp, lba, num, ei_lba, unmap, false);
+}
+
+static int
+resp_write_same_16(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
+{
+ u8 *cmd = scp->cmnd;
+ u64 lba;
+ u32 num;
+ u32 ei_lba = 0;
+ bool unmap = false;
+ bool ndob = false;
+
+ if (cmd[1] & 0x8) { /* UNMAP */
+ if (scsi_debug_lbpws == 0) {
+ mk_sense_invalid_fld(scp, SDEB_IN_CDB, 1, 3);
+ return check_condition_result;
+ } else
+ unmap = true;
+ }
+ if (cmd[1] & 0x1) /* NDOB (no data-out buffer, assumes zeroes) */
+ ndob = true;
+ lba = get_unaligned_be64(cmd + 2);
+ num = get_unaligned_be32(cmd + 10);
+ if (num > scsi_debug_write_same_length) {
+ mk_sense_invalid_fld(scp, SDEB_IN_CDB, 10, -1);
+ return check_condition_result;
+ }
+ return resp_write_same(scp, lba, num, ei_lba, unmap, ndob);
+}
+
+/* Note the mode field is in the same position as the (lower) service action
+ * field. For the Report supported operation codes command, SPC-4 suggests
+ * each mode of this command should be reported separately; for future. */
+static int
+resp_write_buffer(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
+{
+ u8 *cmd = scp->cmnd;
+ struct scsi_device *sdp = scp->device;
+ struct sdebug_dev_info *dp;
+ u8 mode;
+
+ mode = cmd[1] & 0x1f;
+ switch (mode) {
+ case 0x4: /* download microcode (MC) and activate (ACT) */
+ /* set UAs on this device only */
+ set_bit(SDEBUG_UA_BUS_RESET, devip->uas_bm);
+ set_bit(SDEBUG_UA_MICROCODE_CHANGED, devip->uas_bm);
+ break;
+ case 0x5: /* download MC, save and ACT */
+ set_bit(SDEBUG_UA_MICROCODE_CHANGED_WO_RESET, devip->uas_bm);
+ break;
+ case 0x6: /* download MC with offsets and ACT */
+ /* set UAs on most devices (LUs) in this target */
+ list_for_each_entry(dp,
+ &devip->sdbg_host->dev_info_list,
+ dev_list)
+ if (dp->target == sdp->id) {
+ set_bit(SDEBUG_UA_BUS_RESET, dp->uas_bm);
+ if (devip != dp)
+ set_bit(SDEBUG_UA_MICROCODE_CHANGED,
+ dp->uas_bm);
+ }
+ break;
+ case 0x7: /* download MC with offsets, save, and ACT */
+ /* set UA on all devices (LUs) in this target */
+ list_for_each_entry(dp,
+ &devip->sdbg_host->dev_info_list,
+ dev_list)
+ if (dp->target == sdp->id)
+ set_bit(SDEBUG_UA_MICROCODE_CHANGED_WO_RESET,
+ dp->uas_bm);
+ break;
+ default:
+ /* do nothing for this command for other mode values */
+ break;
+ }
+ return 0;
+}
+
+static int
+resp_comp_write(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
+{
+ u8 *cmd = scp->cmnd;
+ u8 *arr;
+ u8 *fake_storep_hold;
+ u64 lba;
+ u32 dnum;
+ u32 lb_size = scsi_debug_sector_size;
+ u8 num;
+ unsigned long iflags;
+ int ret;
+ int retval = 0;
+
+ lba = get_unaligned_be64(cmd + 2);
+ num = cmd[13]; /* 1 to a maximum of 255 logical blocks */
+ if (0 == num)
+ return 0; /* degenerate case, not an error */
+ if (scsi_debug_dif == SD_DIF_TYPE2_PROTECTION &&
+ (cmd[1] & 0xe0)) {
+ mk_sense_invalid_opcode(scp);
+ return check_condition_result;
+ }
+ if ((scsi_debug_dif == SD_DIF_TYPE1_PROTECTION ||
+ scsi_debug_dif == SD_DIF_TYPE3_PROTECTION) &&
+ (cmd[1] & 0xe0) == 0)
+ sdev_printk(KERN_ERR, scp->device, "Unprotected WR "
+ "to DIF device\n");
+
+ /* inline check_device_access_params() */
+ if (lba + num > sdebug_capacity) {
+ mk_sense_buffer(scp, ILLEGAL_REQUEST, LBA_OUT_OF_RANGE, 0);
+ return check_condition_result;
+ }
+ /* transfer length excessive (tie in to block limits VPD page) */
+ if (num > sdebug_store_sectors) {
+ /* needs work to find which cdb byte 'num' comes from */
+ mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
+ return check_condition_result;
+ }
+ dnum = 2 * num;
+ arr = kzalloc(dnum * lb_size, GFP_ATOMIC);
+ if (NULL == arr) {
+ mk_sense_buffer(scp, ILLEGAL_REQUEST, INSUFF_RES_ASC,
+ INSUFF_RES_ASCQ);
+ return check_condition_result;
+ }
+
+ write_lock_irqsave(&atomic_rw, iflags);
+
+ /* trick do_device_access() to fetch both compare and write buffers
+ * from data-in into arr. Safe (atomic) since write_lock held. */
+ fake_storep_hold = fake_storep;
+ fake_storep = arr;
+ ret = do_device_access(scp, 0, dnum, true);
+ fake_storep = fake_storep_hold;
+ if (ret == -1) {
+ retval = DID_ERROR << 16;
+ goto cleanup;
+ } else if ((ret < (dnum * lb_size)) &&
+ (SCSI_DEBUG_OPT_NOISE & scsi_debug_opts))
+ sdev_printk(KERN_INFO, scp->device, "%s: compare_write: cdb "
+ "indicated=%u, IO sent=%d bytes\n", my_name,
+ dnum * lb_size, ret);
+ if (!comp_write_worker(lba, num, arr)) {
+ mk_sense_buffer(scp, MISCOMPARE, MISCOMPARE_VERIFY_ASC, 0);
+ retval = check_condition_result;
+ goto cleanup;
+ }
+ if (scsi_debug_lbp())
+ map_region(lba, num);
+cleanup:
+ write_unlock_irqrestore(&atomic_rw, iflags);
+ kfree(arr);
+ return retval;
+}
+
+struct unmap_block_desc {
+ __be64 lba;
+ __be32 blocks;
+ __be32 __reserved;
+};
+
+static int
+resp_unmap(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
+{
+ unsigned char *buf;
+ struct unmap_block_desc *desc;
+ unsigned int i, payload_len, descriptors;
+ int ret;
+ unsigned long iflags;
+
+
+ if (!scsi_debug_lbp())
+ return 0; /* fib and say its done */
+ payload_len = get_unaligned_be16(scp->cmnd + 7);
+ BUG_ON(scsi_bufflen(scp) != payload_len);
+
+ descriptors = (payload_len - 8) / 16;
+ if (descriptors > scsi_debug_unmap_max_desc) {
+ mk_sense_invalid_fld(scp, SDEB_IN_CDB, 7, -1);
+ return check_condition_result;
+ }
+
+ buf = kmalloc(scsi_bufflen(scp), GFP_ATOMIC);
+ if (!buf) {
+ mk_sense_buffer(scp, ILLEGAL_REQUEST, INSUFF_RES_ASC,
+ INSUFF_RES_ASCQ);
+ return check_condition_result;
+ }
+
+ scsi_sg_copy_to_buffer(scp, buf, scsi_bufflen(scp));
+
+ BUG_ON(get_unaligned_be16(&buf[0]) != payload_len - 2);
+ BUG_ON(get_unaligned_be16(&buf[2]) != descriptors * 16);
+
+ desc = (void *)&buf[8];
+
+ write_lock_irqsave(&atomic_rw, iflags);
+
+ for (i = 0 ; i < descriptors ; i++) {
+ unsigned long long lba = get_unaligned_be64(&desc[i].lba);
+ unsigned int num = get_unaligned_be32(&desc[i].blocks);
+
+ ret = check_device_access_params(scp, lba, num);
+ if (ret)
+ goto out;
+
+ unmap_region(lba, num);
+ }
+
+ ret = 0;
+
+out:
+ write_unlock_irqrestore(&atomic_rw, iflags);
+ kfree(buf);
+
+ return ret;
+}
+
+#define SDEBUG_GET_LBA_STATUS_LEN 32
+
+static int
+resp_get_lba_status(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
+{
+ u8 *cmd = scp->cmnd;
+ u64 lba;
+ u32 alloc_len, mapped, num;
+ u8 arr[SDEBUG_GET_LBA_STATUS_LEN];
+ int ret;
+
+ lba = get_unaligned_be64(cmd + 2);
+ alloc_len = get_unaligned_be32(cmd + 10);
+
+ if (alloc_len < 24)
+ return 0;
+
+ ret = check_device_access_params(scp, lba, 1);
+ if (ret)
+ return ret;
+
+ if (scsi_debug_lbp())
+ mapped = map_state(lba, &num);
+ else {
+ mapped = 1;
+ /* following just in case virtual_gb changed */
+ sdebug_capacity = get_sdebug_capacity();
+ if (sdebug_capacity - lba <= 0xffffffff)
+ num = sdebug_capacity - lba;
+ else
+ num = 0xffffffff;
+ }
+
+ memset(arr, 0, SDEBUG_GET_LBA_STATUS_LEN);
+ put_unaligned_be32(20, arr); /* Parameter Data Length */
+ put_unaligned_be64(lba, arr + 8); /* LBA */
+ put_unaligned_be32(num, arr + 16); /* Number of blocks */
+ arr[20] = !mapped; /* prov_stat=0: mapped; 1: dealloc */
+
+ return fill_from_dev_buffer(scp, arr, SDEBUG_GET_LBA_STATUS_LEN);
+}
+
+#define SDEBUG_RLUN_ARR_SZ 256
+
+static int resp_report_luns(struct scsi_cmnd * scp,
+ struct sdebug_dev_info * devip)
+{
+ unsigned int alloc_len;
+ int lun_cnt, i, upper, num, n, want_wlun, shortish;
+ u64 lun;
+ unsigned char *cmd = scp->cmnd;
+ int select_report = (int)cmd[2];
+ struct scsi_lun *one_lun;
+ unsigned char arr[SDEBUG_RLUN_ARR_SZ];
+ unsigned char * max_addr;
+
+ clear_luns_changed_on_target(devip);
+ alloc_len = cmd[9] + (cmd[8] << 8) + (cmd[7] << 16) + (cmd[6] << 24);
+ shortish = (alloc_len < 4);
+ if (shortish || (select_report > 2)) {
+ mk_sense_invalid_fld(scp, SDEB_IN_CDB, shortish ? 6 : 2, -1);
+ return check_condition_result;
+ }
+ /* can produce response with up to 16k luns (lun 0 to lun 16383) */
+ memset(arr, 0, SDEBUG_RLUN_ARR_SZ);
+ lun_cnt = scsi_debug_max_luns;
+ if (1 == select_report)
+ lun_cnt = 0;
+ else if (scsi_debug_no_lun_0 && (lun_cnt > 0))
+ --lun_cnt;
+ want_wlun = (select_report > 0) ? 1 : 0;
+ num = lun_cnt + want_wlun;
+ arr[2] = ((sizeof(struct scsi_lun) * num) >> 8) & 0xff;
+ arr[3] = (sizeof(struct scsi_lun) * num) & 0xff;
+ n = min((int)((SDEBUG_RLUN_ARR_SZ - 8) /
+ sizeof(struct scsi_lun)), num);
+ if (n < num) {
+ want_wlun = 0;
+ lun_cnt = n;
+ }
+ one_lun = (struct scsi_lun *) &arr[8];
+ max_addr = arr + SDEBUG_RLUN_ARR_SZ;
+ for (i = 0, lun = (scsi_debug_no_lun_0 ? 1 : 0);
+ ((i < lun_cnt) && ((unsigned char *)(one_lun + i) < max_addr));
+ i++, lun++) {
+ upper = (lun >> 8) & 0x3f;
+ if (upper)
+ one_lun[i].scsi_lun[0] =
+ (upper | (SAM2_LUN_ADDRESS_METHOD << 6));
+ one_lun[i].scsi_lun[1] = lun & 0xff;
+ }
+ if (want_wlun) {
+ one_lun[i].scsi_lun[0] = (SAM2_WLUN_REPORT_LUNS >> 8) & 0xff;
+ one_lun[i].scsi_lun[1] = SAM2_WLUN_REPORT_LUNS & 0xff;
+ i++;
+ }
+ alloc_len = (unsigned char *)(one_lun + i) - arr;
+ return fill_from_dev_buffer(scp, arr,
+ min((int)alloc_len, SDEBUG_RLUN_ARR_SZ));
+}
+
+static int resp_xdwriteread(struct scsi_cmnd *scp, unsigned long long lba,
+ unsigned int num, struct sdebug_dev_info *devip)
+{
+ int j;
+ unsigned char *kaddr, *buf;
+ unsigned int offset;
+ struct scsi_data_buffer *sdb = scsi_in(scp);
+ struct sg_mapping_iter miter;
+
+ /* better not to use temporary buffer. */
+ buf = kmalloc(scsi_bufflen(scp), GFP_ATOMIC);
+ if (!buf) {
+ mk_sense_buffer(scp, ILLEGAL_REQUEST, INSUFF_RES_ASC,
+ INSUFF_RES_ASCQ);
+ return check_condition_result;
+ }
+
+ scsi_sg_copy_to_buffer(scp, buf, scsi_bufflen(scp));
+
+ offset = 0;
+ sg_miter_start(&miter, sdb->table.sgl, sdb->table.nents,
+ SG_MITER_ATOMIC | SG_MITER_TO_SG);
+
+ while (sg_miter_next(&miter)) {
+ kaddr = miter.addr;
+ for (j = 0; j < miter.length; j++)
+ *(kaddr + j) ^= *(buf + offset + j);
+
+ offset += miter.length;
+ }
+ sg_miter_stop(&miter);
+ kfree(buf);
+
+ return 0;
+}
+
+static int
+resp_xdwriteread_10(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
+{
+ u8 *cmd = scp->cmnd;
+ u64 lba;
+ u32 num;
+ int errsts;
+
+ if (!scsi_bidi_cmnd(scp)) {
+ mk_sense_buffer(scp, ILLEGAL_REQUEST, INSUFF_RES_ASC,
+ INSUFF_RES_ASCQ);
+ return check_condition_result;
+ }
+ errsts = resp_read_dt0(scp, devip);
+ if (errsts)
+ return errsts;
+ if (!(cmd[1] & 0x4)) { /* DISABLE_WRITE is not set */
+ errsts = resp_write_dt0(scp, devip);
+ if (errsts)
+ return errsts;
+ }
+ lba = get_unaligned_be32(cmd + 2);
+ num = get_unaligned_be16(cmd + 7);
+ return resp_xdwriteread(scp, lba, num, devip);
+}
+
+/* When timer or tasklet goes off this function is called. */
+static void sdebug_q_cmd_complete(unsigned long indx)
+{
+ int qa_indx;
+ int retiring = 0;
+ unsigned long iflags;
+ struct sdebug_queued_cmd *sqcp;
+ struct scsi_cmnd *scp;
+ struct sdebug_dev_info *devip;
+
+ atomic_inc(&sdebug_completions);
+ qa_indx = indx;
+ if ((qa_indx < 0) || (qa_indx >= SCSI_DEBUG_CANQUEUE)) {
+ pr_err("%s: wild qa_indx=%d\n", __func__, qa_indx);
+ return;
+ }
+ spin_lock_irqsave(&queued_arr_lock, iflags);
+ sqcp = &queued_arr[qa_indx];
+ scp = sqcp->a_cmnd;
+ if (NULL == scp) {
+ spin_unlock_irqrestore(&queued_arr_lock, iflags);
+ pr_err("%s: scp is NULL\n", __func__);
+ return;
+ }
+ devip = (struct sdebug_dev_info *)scp->device->hostdata;
+ if (devip)
+ atomic_dec(&devip->num_in_q);
+ else
+ pr_err("%s: devip=NULL\n", __func__);
+ if (atomic_read(&retired_max_queue) > 0)
+ retiring = 1;
+
+ sqcp->a_cmnd = NULL;
+ if (!test_and_clear_bit(qa_indx, queued_in_use_bm)) {
+ spin_unlock_irqrestore(&queued_arr_lock, iflags);
+ pr_err("%s: Unexpected completion\n", __func__);
+ return;
+ }
+
+ if (unlikely(retiring)) { /* user has reduced max_queue */
+ int k, retval;
+
+ retval = atomic_read(&retired_max_queue);
+ if (qa_indx >= retval) {
+ spin_unlock_irqrestore(&queued_arr_lock, iflags);
+ pr_err("%s: index %d too large\n", __func__, retval);
+ return;
+ }
+ k = find_last_bit(queued_in_use_bm, retval);
+ if ((k < scsi_debug_max_queue) || (k == retval))
+ atomic_set(&retired_max_queue, 0);
+ else
+ atomic_set(&retired_max_queue, k + 1);
+ }
+ spin_unlock_irqrestore(&queued_arr_lock, iflags);
+ scp->scsi_done(scp); /* callback to mid level */
+}
+
+/* When high resolution timer goes off this function is called. */
+static enum hrtimer_restart
+sdebug_q_cmd_hrt_complete(struct hrtimer *timer)
+{
+ int qa_indx;
+ int retiring = 0;
+ unsigned long iflags;
+ struct sdebug_hrtimer *sd_hrtp = (struct sdebug_hrtimer *)timer;
+ struct sdebug_queued_cmd *sqcp;
+ struct scsi_cmnd *scp;
+ struct sdebug_dev_info *devip;
+
+ atomic_inc(&sdebug_completions);
+ qa_indx = sd_hrtp->qa_indx;
+ if ((qa_indx < 0) || (qa_indx >= SCSI_DEBUG_CANQUEUE)) {
+ pr_err("%s: wild qa_indx=%d\n", __func__, qa_indx);
+ goto the_end;
+ }
+ spin_lock_irqsave(&queued_arr_lock, iflags);
+ sqcp = &queued_arr[qa_indx];
+ scp = sqcp->a_cmnd;
+ if (NULL == scp) {
+ spin_unlock_irqrestore(&queued_arr_lock, iflags);
+ pr_err("%s: scp is NULL\n", __func__);
+ goto the_end;
+ }
+ devip = (struct sdebug_dev_info *)scp->device->hostdata;
+ if (devip)
+ atomic_dec(&devip->num_in_q);
+ else
+ pr_err("%s: devip=NULL\n", __func__);
+ if (atomic_read(&retired_max_queue) > 0)
+ retiring = 1;
+
+ sqcp->a_cmnd = NULL;
+ if (!test_and_clear_bit(qa_indx, queued_in_use_bm)) {
+ spin_unlock_irqrestore(&queued_arr_lock, iflags);
+ pr_err("%s: Unexpected completion\n", __func__);
+ goto the_end;
+ }
+
+ if (unlikely(retiring)) { /* user has reduced max_queue */
+ int k, retval;
+
+ retval = atomic_read(&retired_max_queue);
+ if (qa_indx >= retval) {
+ spin_unlock_irqrestore(&queued_arr_lock, iflags);
+ pr_err("%s: index %d too large\n", __func__, retval);
+ goto the_end;
+ }
+ k = find_last_bit(queued_in_use_bm, retval);
+ if ((k < scsi_debug_max_queue) || (k == retval))
+ atomic_set(&retired_max_queue, 0);
+ else
+ atomic_set(&retired_max_queue, k + 1);
+ }
+ spin_unlock_irqrestore(&queued_arr_lock, iflags);
+ scp->scsi_done(scp); /* callback to mid level */
+the_end:
+ return HRTIMER_NORESTART;
+}
+
+static struct sdebug_dev_info *
+sdebug_device_create(struct sdebug_host_info *sdbg_host, gfp_t flags)
+{
+ struct sdebug_dev_info *devip;
+
+ devip = kzalloc(sizeof(*devip), flags);
+ if (devip) {
+ devip->sdbg_host = sdbg_host;
+ list_add_tail(&devip->dev_list, &sdbg_host->dev_info_list);
+ }
+ return devip;
+}
+
+static struct sdebug_dev_info * devInfoReg(struct scsi_device * sdev)
+{
+ struct sdebug_host_info * sdbg_host;
+ struct sdebug_dev_info * open_devip = NULL;
+ struct sdebug_dev_info * devip =
+ (struct sdebug_dev_info *)sdev->hostdata;
+
+ if (devip)
+ return devip;
+ sdbg_host = *(struct sdebug_host_info **)shost_priv(sdev->host);
+ if (!sdbg_host) {
+ pr_err("%s: Host info NULL\n", __func__);
+ return NULL;
+ }
+ list_for_each_entry(devip, &sdbg_host->dev_info_list, dev_list) {
+ if ((devip->used) && (devip->channel == sdev->channel) &&
+ (devip->target == sdev->id) &&
+ (devip->lun == sdev->lun))
+ return devip;
+ else {
+ if ((!devip->used) && (!open_devip))
+ open_devip = devip;
+ }
+ }
+ if (!open_devip) { /* try and make a new one */
+ open_devip = sdebug_device_create(sdbg_host, GFP_ATOMIC);
+ if (!open_devip) {
+ printk(KERN_ERR "%s: out of memory at line %d\n",
+ __func__, __LINE__);
+ return NULL;
+ }
+ }
+
+ open_devip->channel = sdev->channel;
+ open_devip->target = sdev->id;
+ open_devip->lun = sdev->lun;
+ open_devip->sdbg_host = sdbg_host;
+ atomic_set(&open_devip->num_in_q, 0);
+ set_bit(SDEBUG_UA_POR, open_devip->uas_bm);
+ open_devip->used = true;
+ return open_devip;
+}
+
+static int scsi_debug_slave_alloc(struct scsi_device *sdp)
+{
+ if (SCSI_DEBUG_OPT_NOISE & scsi_debug_opts)
+ printk(KERN_INFO "scsi_debug: slave_alloc <%u %u %u %llu>\n",
+ sdp->host->host_no, sdp->channel, sdp->id, sdp->lun);
+ queue_flag_set_unlocked(QUEUE_FLAG_BIDI, sdp->request_queue);
+ return 0;
+}
+
+static int scsi_debug_slave_configure(struct scsi_device *sdp)
+{
+ struct sdebug_dev_info *devip;
+
+ if (SCSI_DEBUG_OPT_NOISE & scsi_debug_opts)
+ printk(KERN_INFO "scsi_debug: slave_configure <%u %u %u %llu>\n",
+ sdp->host->host_no, sdp->channel, sdp->id, sdp->lun);
+ if (sdp->host->max_cmd_len != SCSI_DEBUG_MAX_CMD_LEN)
+ sdp->host->max_cmd_len = SCSI_DEBUG_MAX_CMD_LEN;
+ devip = devInfoReg(sdp);
+ if (NULL == devip)
+ return 1; /* no resources, will be marked offline */
+ sdp->hostdata = devip;
+ blk_queue_max_segment_size(sdp->request_queue, -1U);
+ if (scsi_debug_no_uld)
+ sdp->no_uld_attach = 1;
+ return 0;
+}
+
+static void scsi_debug_slave_destroy(struct scsi_device *sdp)
+{
+ struct sdebug_dev_info *devip =
+ (struct sdebug_dev_info *)sdp->hostdata;
+
+ if (SCSI_DEBUG_OPT_NOISE & scsi_debug_opts)
+ printk(KERN_INFO "scsi_debug: slave_destroy <%u %u %u %llu>\n",
+ sdp->host->host_no, sdp->channel, sdp->id, sdp->lun);
+ if (devip) {
+ /* make this slot available for re-use */
+ devip->used = false;
+ sdp->hostdata = NULL;
+ }
+}
+
+/* Returns 1 if cmnd found (deletes its timer or tasklet), else returns 0 */
+static int stop_queued_cmnd(struct scsi_cmnd *cmnd)
+{
+ unsigned long iflags;
+ int k, qmax, r_qmax;
+ struct sdebug_queued_cmd *sqcp;
+ struct sdebug_dev_info *devip;
+
+ spin_lock_irqsave(&queued_arr_lock, iflags);
+ qmax = scsi_debug_max_queue;
+ r_qmax = atomic_read(&retired_max_queue);
+ if (r_qmax > qmax)
+ qmax = r_qmax;
+ for (k = 0; k < qmax; ++k) {
+ if (test_bit(k, queued_in_use_bm)) {
+ sqcp = &queued_arr[k];
+ if (cmnd == sqcp->a_cmnd) {
+ devip = (struct sdebug_dev_info *)
+ cmnd->device->hostdata;
+ if (devip)
+ atomic_dec(&devip->num_in_q);
+ sqcp->a_cmnd = NULL;
+ spin_unlock_irqrestore(&queued_arr_lock,
+ iflags);
+ if (scsi_debug_ndelay > 0) {
+ if (sqcp->sd_hrtp)
+ hrtimer_cancel(
+ &sqcp->sd_hrtp->hrt);
+ } else if (scsi_debug_delay > 0) {
+ if (sqcp->cmnd_timerp)
+ del_timer_sync(
+ sqcp->cmnd_timerp);
+ } else if (scsi_debug_delay < 0) {
+ if (sqcp->tletp)
+ tasklet_kill(sqcp->tletp);
+ }
+ clear_bit(k, queued_in_use_bm);
+ return 1;
+ }
+ }
+ }
+ spin_unlock_irqrestore(&queued_arr_lock, iflags);
+ return 0;
+}
+
+/* Deletes (stops) timers or tasklets of all queued commands */
+static void stop_all_queued(void)
+{
+ unsigned long iflags;
+ int k;
+ struct sdebug_queued_cmd *sqcp;
+ struct sdebug_dev_info *devip;
+
+ spin_lock_irqsave(&queued_arr_lock, iflags);
+ for (k = 0; k < SCSI_DEBUG_CANQUEUE; ++k) {
+ if (test_bit(k, queued_in_use_bm)) {
+ sqcp = &queued_arr[k];
+ if (sqcp->a_cmnd) {
+ devip = (struct sdebug_dev_info *)
+ sqcp->a_cmnd->device->hostdata;
+ if (devip)
+ atomic_dec(&devip->num_in_q);
+ sqcp->a_cmnd = NULL;
+ spin_unlock_irqrestore(&queued_arr_lock,
+ iflags);
+ if (scsi_debug_ndelay > 0) {
+ if (sqcp->sd_hrtp)
+ hrtimer_cancel(
+ &sqcp->sd_hrtp->hrt);
+ } else if (scsi_debug_delay > 0) {
+ if (sqcp->cmnd_timerp)
+ del_timer_sync(
+ sqcp->cmnd_timerp);
+ } else if (scsi_debug_delay < 0) {
+ if (sqcp->tletp)
+ tasklet_kill(sqcp->tletp);
+ }
+ clear_bit(k, queued_in_use_bm);
+ spin_lock_irqsave(&queued_arr_lock, iflags);
+ }
+ }
+ }
+ spin_unlock_irqrestore(&queued_arr_lock, iflags);
+}
+
+/* Free queued command memory on heap */
+static void free_all_queued(void)
+{
+ unsigned long iflags;
+ int k;
+ struct sdebug_queued_cmd *sqcp;
+
+ spin_lock_irqsave(&queued_arr_lock, iflags);
+ for (k = 0; k < SCSI_DEBUG_CANQUEUE; ++k) {
+ sqcp = &queued_arr[k];
+ kfree(sqcp->cmnd_timerp);
+ sqcp->cmnd_timerp = NULL;
+ kfree(sqcp->tletp);
+ sqcp->tletp = NULL;
+ kfree(sqcp->sd_hrtp);
+ sqcp->sd_hrtp = NULL;
+ }
+ spin_unlock_irqrestore(&queued_arr_lock, iflags);
+}
+
+static int scsi_debug_abort(struct scsi_cmnd *SCpnt)
+{
+ ++num_aborts;
+ if (SCpnt) {
+ if (SCpnt->device &&
+ (SCSI_DEBUG_OPT_ALL_NOISE & scsi_debug_opts))
+ sdev_printk(KERN_INFO, SCpnt->device, "%s\n",
+ __func__);
+ stop_queued_cmnd(SCpnt);
+ }
+ return SUCCESS;
+}
+
+static int scsi_debug_device_reset(struct scsi_cmnd * SCpnt)
+{
+ struct sdebug_dev_info * devip;
+
+ ++num_dev_resets;
+ if (SCpnt && SCpnt->device) {
+ struct scsi_device *sdp = SCpnt->device;
+
+ if (SCSI_DEBUG_OPT_ALL_NOISE & scsi_debug_opts)
+ sdev_printk(KERN_INFO, sdp, "%s\n", __func__);
+ devip = devInfoReg(sdp);
+ if (devip)
+ set_bit(SDEBUG_UA_POR, devip->uas_bm);
+ }
+ return SUCCESS;
+}
+
+static int scsi_debug_target_reset(struct scsi_cmnd *SCpnt)
+{
+ struct sdebug_host_info *sdbg_host;
+ struct sdebug_dev_info *devip;
+ struct scsi_device *sdp;
+ struct Scsi_Host *hp;
+ int k = 0;
+
+ ++num_target_resets;
+ if (!SCpnt)
+ goto lie;
+ sdp = SCpnt->device;
+ if (!sdp)
+ goto lie;
+ if (SCSI_DEBUG_OPT_ALL_NOISE & scsi_debug_opts)
+ sdev_printk(KERN_INFO, sdp, "%s\n", __func__);
+ hp = sdp->host;
+ if (!hp)
+ goto lie;
+ sdbg_host = *(struct sdebug_host_info **)shost_priv(hp);
+ if (sdbg_host) {
+ list_for_each_entry(devip,
+ &sdbg_host->dev_info_list,
+ dev_list)
+ if (devip->target == sdp->id) {
+ set_bit(SDEBUG_UA_BUS_RESET, devip->uas_bm);
+ ++k;
+ }
+ }
+ if (SCSI_DEBUG_OPT_RESET_NOISE & scsi_debug_opts)
+ sdev_printk(KERN_INFO, sdp,
+ "%s: %d device(s) found in target\n", __func__, k);
+lie:
+ return SUCCESS;
+}
+
+static int scsi_debug_bus_reset(struct scsi_cmnd * SCpnt)
+{
+ struct sdebug_host_info *sdbg_host;
+ struct sdebug_dev_info *devip;
+ struct scsi_device * sdp;
+ struct Scsi_Host * hp;
+ int k = 0;
+
+ ++num_bus_resets;
+ if (!(SCpnt && SCpnt->device))
+ goto lie;
+ sdp = SCpnt->device;
+ if (SCSI_DEBUG_OPT_ALL_NOISE & scsi_debug_opts)
+ sdev_printk(KERN_INFO, sdp, "%s\n", __func__);
+ hp = sdp->host;
+ if (hp) {
+ sdbg_host = *(struct sdebug_host_info **)shost_priv(hp);
+ if (sdbg_host) {
+ list_for_each_entry(devip,
+ &sdbg_host->dev_info_list,
+ dev_list) {
+ set_bit(SDEBUG_UA_BUS_RESET, devip->uas_bm);
+ ++k;
+ }
+ }
+ }
+ if (SCSI_DEBUG_OPT_RESET_NOISE & scsi_debug_opts)
+ sdev_printk(KERN_INFO, sdp,
+ "%s: %d device(s) found in host\n", __func__, k);
+lie:
+ return SUCCESS;
+}
+
+static int scsi_debug_host_reset(struct scsi_cmnd * SCpnt)
+{
+ struct sdebug_host_info * sdbg_host;
+ struct sdebug_dev_info *devip;
+ int k = 0;
+
+ ++num_host_resets;
+ if ((SCpnt->device) && (SCSI_DEBUG_OPT_ALL_NOISE & scsi_debug_opts))
+ sdev_printk(KERN_INFO, SCpnt->device, "%s\n", __func__);
+ spin_lock(&sdebug_host_list_lock);
+ list_for_each_entry(sdbg_host, &sdebug_host_list, host_list) {
+ list_for_each_entry(devip, &sdbg_host->dev_info_list,
+ dev_list) {
+ set_bit(SDEBUG_UA_BUS_RESET, devip->uas_bm);
+ ++k;
+ }
+ }
+ spin_unlock(&sdebug_host_list_lock);
+ stop_all_queued();
+ if (SCSI_DEBUG_OPT_RESET_NOISE & scsi_debug_opts)
+ sdev_printk(KERN_INFO, SCpnt->device,
+ "%s: %d device(s) found\n", __func__, k);
+ return SUCCESS;
+}
+
+static void __init sdebug_build_parts(unsigned char *ramp,
+ unsigned long store_size)
+{
+ struct partition * pp;
+ int starts[SDEBUG_MAX_PARTS + 2];
+ int sectors_per_part, num_sectors, k;
+ int heads_by_sects, start_sec, end_sec;
+
+ /* assume partition table already zeroed */
+ if ((scsi_debug_num_parts < 1) || (store_size < 1048576))
+ return;
+ if (scsi_debug_num_parts > SDEBUG_MAX_PARTS) {
+ scsi_debug_num_parts = SDEBUG_MAX_PARTS;
+ pr_warn("%s: reducing partitions to %d\n", __func__,
+ SDEBUG_MAX_PARTS);
+ }
+ num_sectors = (int)sdebug_store_sectors;
+ sectors_per_part = (num_sectors - sdebug_sectors_per)
+ / scsi_debug_num_parts;
+ heads_by_sects = sdebug_heads * sdebug_sectors_per;
+ starts[0] = sdebug_sectors_per;
+ for (k = 1; k < scsi_debug_num_parts; ++k)
+ starts[k] = ((k * sectors_per_part) / heads_by_sects)
+ * heads_by_sects;
+ starts[scsi_debug_num_parts] = num_sectors;
+ starts[scsi_debug_num_parts + 1] = 0;
+
+ ramp[510] = 0x55; /* magic partition markings */
+ ramp[511] = 0xAA;
+ pp = (struct partition *)(ramp + 0x1be);
+ for (k = 0; starts[k + 1]; ++k, ++pp) {
+ start_sec = starts[k];
+ end_sec = starts[k + 1] - 1;
+ pp->boot_ind = 0;
+
+ pp->cyl = start_sec / heads_by_sects;
+ pp->head = (start_sec - (pp->cyl * heads_by_sects))
+ / sdebug_sectors_per;
+ pp->sector = (start_sec % sdebug_sectors_per) + 1;
+
+ pp->end_cyl = end_sec / heads_by_sects;
+ pp->end_head = (end_sec - (pp->end_cyl * heads_by_sects))
+ / sdebug_sectors_per;
+ pp->end_sector = (end_sec % sdebug_sectors_per) + 1;
+
+ pp->start_sect = cpu_to_le32(start_sec);
+ pp->nr_sects = cpu_to_le32(end_sec - start_sec + 1);
+ pp->sys_ind = 0x83; /* plain Linux partition */
+ }
+}
+
+static int
+schedule_resp(struct scsi_cmnd *cmnd, struct sdebug_dev_info *devip,
+ int scsi_result, int delta_jiff)
+{
+ unsigned long iflags;
+ int k, num_in_q, qdepth, inject;
+ struct sdebug_queued_cmd *sqcp = NULL;
+ struct scsi_device *sdp = cmnd->device;
+
+ if (NULL == cmnd || NULL == devip) {
+ pr_warn("%s: called with NULL cmnd or devip pointer\n",
+ __func__);
+ /* no particularly good error to report back */
+ return SCSI_MLQUEUE_HOST_BUSY;
+ }
+ if ((scsi_result) && (SCSI_DEBUG_OPT_NOISE & scsi_debug_opts))
+ sdev_printk(KERN_INFO, sdp, "%s: non-zero result=0x%x\n",
+ __func__, scsi_result);
+ if (delta_jiff == 0)
+ goto respond_in_thread;
+
+ /* schedule the response at a later time if resources permit */
+ spin_lock_irqsave(&queued_arr_lock, iflags);
+ num_in_q = atomic_read(&devip->num_in_q);
+ qdepth = cmnd->device->queue_depth;
+ inject = 0;
+ if ((qdepth > 0) && (num_in_q >= qdepth)) {
+ if (scsi_result) {
+ spin_unlock_irqrestore(&queued_arr_lock, iflags);
+ goto respond_in_thread;
+ } else
+ scsi_result = device_qfull_result;
+ } else if ((scsi_debug_every_nth != 0) &&
+ (SCSI_DEBUG_OPT_RARE_TSF & scsi_debug_opts) &&
+ (scsi_result == 0)) {
+ if ((num_in_q == (qdepth - 1)) &&
+ (atomic_inc_return(&sdebug_a_tsf) >=
+ abs(scsi_debug_every_nth))) {
+ atomic_set(&sdebug_a_tsf, 0);
+ inject = 1;
+ scsi_result = device_qfull_result;
+ }
+ }
+
+ k = find_first_zero_bit(queued_in_use_bm, scsi_debug_max_queue);
+ if (k >= scsi_debug_max_queue) {
+ spin_unlock_irqrestore(&queued_arr_lock, iflags);
+ if (scsi_result)
+ goto respond_in_thread;
+ else if (SCSI_DEBUG_OPT_ALL_TSF & scsi_debug_opts)
+ scsi_result = device_qfull_result;
+ if (SCSI_DEBUG_OPT_Q_NOISE & scsi_debug_opts)
+ sdev_printk(KERN_INFO, sdp,
+ "%s: max_queue=%d exceeded, %s\n",
+ __func__, scsi_debug_max_queue,
+ (scsi_result ? "status: TASK SET FULL" :
+ "report: host busy"));
+ if (scsi_result)
+ goto respond_in_thread;
+ else
+ return SCSI_MLQUEUE_HOST_BUSY;
+ }
+ __set_bit(k, queued_in_use_bm);
+ atomic_inc(&devip->num_in_q);
+ sqcp = &queued_arr[k];
+ sqcp->a_cmnd = cmnd;
+ cmnd->result = scsi_result;
+ spin_unlock_irqrestore(&queued_arr_lock, iflags);
+ if (delta_jiff > 0) {
+ if (NULL == sqcp->cmnd_timerp) {
+ sqcp->cmnd_timerp = kmalloc(sizeof(struct timer_list),
+ GFP_ATOMIC);
+ if (NULL == sqcp->cmnd_timerp)
+ return SCSI_MLQUEUE_HOST_BUSY;
+ init_timer(sqcp->cmnd_timerp);
+ }
+ sqcp->cmnd_timerp->function = sdebug_q_cmd_complete;
+ sqcp->cmnd_timerp->data = k;
+ sqcp->cmnd_timerp->expires = get_jiffies_64() + delta_jiff;
+ add_timer(sqcp->cmnd_timerp);
+ } else if (scsi_debug_ndelay > 0) {
+ ktime_t kt = ktime_set(0, scsi_debug_ndelay);
+ struct sdebug_hrtimer *sd_hp = sqcp->sd_hrtp;
+
+ if (NULL == sd_hp) {
+ sd_hp = kmalloc(sizeof(*sd_hp), GFP_ATOMIC);
+ if (NULL == sd_hp)
+ return SCSI_MLQUEUE_HOST_BUSY;
+ sqcp->sd_hrtp = sd_hp;
+ hrtimer_init(&sd_hp->hrt, CLOCK_MONOTONIC,
+ HRTIMER_MODE_REL);
+ sd_hp->hrt.function = sdebug_q_cmd_hrt_complete;
+ sd_hp->qa_indx = k;
+ }
+ hrtimer_start(&sd_hp->hrt, kt, HRTIMER_MODE_REL);
+ } else { /* delay < 0 */
+ if (NULL == sqcp->tletp) {
+ sqcp->tletp = kmalloc(sizeof(*sqcp->tletp),
+ GFP_ATOMIC);
+ if (NULL == sqcp->tletp)
+ return SCSI_MLQUEUE_HOST_BUSY;
+ tasklet_init(sqcp->tletp,
+ sdebug_q_cmd_complete, k);
+ }
+ if (-1 == delta_jiff)
+ tasklet_hi_schedule(sqcp->tletp);
+ else
+ tasklet_schedule(sqcp->tletp);
+ }
+ if ((SCSI_DEBUG_OPT_Q_NOISE & scsi_debug_opts) &&
+ (scsi_result == device_qfull_result))
+ sdev_printk(KERN_INFO, sdp,
+ "%s: num_in_q=%d +1, %s%s\n", __func__,
+ num_in_q, (inject ? "<inject> " : ""),
+ "status: TASK SET FULL");
+ return 0;
+
+respond_in_thread: /* call back to mid-layer using invocation thread */
+ cmnd->result = scsi_result;
+ cmnd->scsi_done(cmnd);
+ return 0;
+}
+
+/* Note: The following macros create attribute files in the
+ /sys/module/scsi_debug/parameters directory. Unfortunately this
+ driver is unaware of a change and cannot trigger auxiliary actions
+ as it can when the corresponding attribute in the
+ /sys/bus/pseudo/drivers/scsi_debug directory is changed.
+ */
+module_param_named(add_host, scsi_debug_add_host, int, S_IRUGO | S_IWUSR);
+module_param_named(ato, scsi_debug_ato, int, S_IRUGO);
+module_param_named(clustering, scsi_debug_clustering, bool, S_IRUGO | S_IWUSR);
+module_param_named(delay, scsi_debug_delay, int, S_IRUGO | S_IWUSR);
+module_param_named(dev_size_mb, scsi_debug_dev_size_mb, int, S_IRUGO);
+module_param_named(dif, scsi_debug_dif, int, S_IRUGO);
+module_param_named(dix, scsi_debug_dix, int, S_IRUGO);
+module_param_named(dsense, scsi_debug_dsense, int, S_IRUGO | S_IWUSR);
+module_param_named(every_nth, scsi_debug_every_nth, int, S_IRUGO | S_IWUSR);
+module_param_named(fake_rw, scsi_debug_fake_rw, int, S_IRUGO | S_IWUSR);
+module_param_named(guard, scsi_debug_guard, uint, S_IRUGO);
+module_param_named(host_lock, scsi_debug_host_lock, bool, S_IRUGO | S_IWUSR);
+module_param_named(lbpu, scsi_debug_lbpu, int, S_IRUGO);
+module_param_named(lbpws, scsi_debug_lbpws, int, S_IRUGO);
+module_param_named(lbpws10, scsi_debug_lbpws10, int, S_IRUGO);
+module_param_named(lbprz, scsi_debug_lbprz, int, S_IRUGO);
+module_param_named(lowest_aligned, scsi_debug_lowest_aligned, int, S_IRUGO);
+module_param_named(max_luns, scsi_debug_max_luns, int, S_IRUGO | S_IWUSR);
+module_param_named(max_queue, scsi_debug_max_queue, int, S_IRUGO | S_IWUSR);
+module_param_named(ndelay, scsi_debug_ndelay, int, S_IRUGO | S_IWUSR);
+module_param_named(no_lun_0, scsi_debug_no_lun_0, int, S_IRUGO | S_IWUSR);
+module_param_named(no_uld, scsi_debug_no_uld, int, S_IRUGO);
+module_param_named(num_parts, scsi_debug_num_parts, int, S_IRUGO);
+module_param_named(num_tgts, scsi_debug_num_tgts, int, S_IRUGO | S_IWUSR);
+module_param_named(opt_blks, scsi_debug_opt_blks, int, S_IRUGO);
+module_param_named(opts, scsi_debug_opts, int, S_IRUGO | S_IWUSR);
+module_param_named(physblk_exp, scsi_debug_physblk_exp, int, S_IRUGO);
+module_param_named(ptype, scsi_debug_ptype, int, S_IRUGO | S_IWUSR);
+module_param_named(removable, scsi_debug_removable, bool, S_IRUGO | S_IWUSR);
+module_param_named(scsi_level, scsi_debug_scsi_level, int, S_IRUGO);
+module_param_named(sector_size, scsi_debug_sector_size, int, S_IRUGO);
+module_param_named(strict, scsi_debug_strict, bool, S_IRUGO | S_IWUSR);
+module_param_named(unmap_alignment, scsi_debug_unmap_alignment, int, S_IRUGO);
+module_param_named(unmap_granularity, scsi_debug_unmap_granularity, int, S_IRUGO);
+module_param_named(unmap_max_blocks, scsi_debug_unmap_max_blocks, int, S_IRUGO);
+module_param_named(unmap_max_desc, scsi_debug_unmap_max_desc, int, S_IRUGO);
+module_param_named(virtual_gb, scsi_debug_virtual_gb, int, S_IRUGO | S_IWUSR);
+module_param_named(vpd_use_hostno, scsi_debug_vpd_use_hostno, int,
+ S_IRUGO | S_IWUSR);
+module_param_named(write_same_length, scsi_debug_write_same_length, int,
+ S_IRUGO | S_IWUSR);
+
+MODULE_AUTHOR("Eric Youngdale + Douglas Gilbert");
+MODULE_DESCRIPTION("SCSI debug adapter driver");
+MODULE_LICENSE("GPL");
+MODULE_VERSION(SCSI_DEBUG_VERSION);
+
+MODULE_PARM_DESC(add_host, "0..127 hosts allowed(def=1)");
+MODULE_PARM_DESC(ato, "application tag ownership: 0=disk 1=host (def=1)");
+MODULE_PARM_DESC(clustering, "when set enables larger transfers (def=0)");
+MODULE_PARM_DESC(delay, "response delay (def=1 jiffy); 0:imm, -1,-2:tiny");
+MODULE_PARM_DESC(dev_size_mb, "size in MiB of ram shared by devs(def=8)");
+MODULE_PARM_DESC(dif, "data integrity field type: 0-3 (def=0)");
+MODULE_PARM_DESC(dix, "data integrity extensions mask (def=0)");
+MODULE_PARM_DESC(dsense, "use descriptor sense format(def=0 -> fixed)");
+MODULE_PARM_DESC(every_nth, "timeout every nth command(def=0)");
+MODULE_PARM_DESC(fake_rw, "fake reads/writes instead of copying (def=0)");
+MODULE_PARM_DESC(guard, "protection checksum: 0=crc, 1=ip (def=0)");
+MODULE_PARM_DESC(host_lock, "use host_lock around all commands (def=0)");
+MODULE_PARM_DESC(lbpu, "enable LBP, support UNMAP command (def=0)");
+MODULE_PARM_DESC(lbpws, "enable LBP, support WRITE SAME(16) with UNMAP bit (def=0)");
+MODULE_PARM_DESC(lbpws10, "enable LBP, support WRITE SAME(10) with UNMAP bit (def=0)");
+MODULE_PARM_DESC(lbprz, "unmapped blocks return 0 on read (def=1)");
+MODULE_PARM_DESC(lowest_aligned, "lowest aligned lba (def=0)");
+MODULE_PARM_DESC(max_luns, "number of LUNs per target to simulate(def=1)");
+MODULE_PARM_DESC(max_queue, "max number of queued commands (1 to max(def))");
+MODULE_PARM_DESC(ndelay, "response delay in nanoseconds (def=0 -> ignore)");
+MODULE_PARM_DESC(no_lun_0, "no LU number 0 (def=0 -> have lun 0)");
+MODULE_PARM_DESC(no_uld, "stop ULD (e.g. sd driver) attaching (def=0))");
+MODULE_PARM_DESC(num_parts, "number of partitions(def=0)");
+MODULE_PARM_DESC(num_tgts, "number of targets per host to simulate(def=1)");
+MODULE_PARM_DESC(opt_blks, "optimal transfer length in block (def=64)");
+MODULE_PARM_DESC(opts, "1->noise, 2->medium_err, 4->timeout, 8->recovered_err... (def=0)");
+MODULE_PARM_DESC(physblk_exp, "physical block exponent (def=0)");
+MODULE_PARM_DESC(ptype, "SCSI peripheral type(def=0[disk])");
+MODULE_PARM_DESC(removable, "claim to have removable media (def=0)");
+MODULE_PARM_DESC(scsi_level, "SCSI level to simulate(def=6[SPC-4])");
+MODULE_PARM_DESC(sector_size, "logical block size in bytes (def=512)");
+MODULE_PARM_DESC(strict, "stricter checks: reserved field in cdb (def=0)");
+MODULE_PARM_DESC(unmap_alignment, "lowest aligned thin provisioning lba (def=0)");
+MODULE_PARM_DESC(unmap_granularity, "thin provisioning granularity in blocks (def=1)");
+MODULE_PARM_DESC(unmap_max_blocks, "max # of blocks can be unmapped in one cmd (def=0xffffffff)");
+MODULE_PARM_DESC(unmap_max_desc, "max # of ranges that can be unmapped in one cmd (def=256)");
+MODULE_PARM_DESC(virtual_gb, "virtual gigabyte (GiB) size (def=0 -> use dev_size_mb)");
+MODULE_PARM_DESC(vpd_use_hostno, "0 -> dev ids ignore hostno (def=1 -> unique dev ids)");
+MODULE_PARM_DESC(write_same_length, "Maximum blocks per WRITE SAME cmd (def=0xffff)");
+
+static char sdebug_info[256];
+
+static const char * scsi_debug_info(struct Scsi_Host * shp)
+{
+ sprintf(sdebug_info, "scsi_debug, version %s [%s], "
+ "dev_size_mb=%d, opts=0x%x", SCSI_DEBUG_VERSION,
+ scsi_debug_version_date, scsi_debug_dev_size_mb,
+ scsi_debug_opts);
+ return sdebug_info;
+}
+
+/* 'echo <val> > /proc/scsi/scsi_debug/<host_id>' writes to opts */
+static int scsi_debug_write_info(struct Scsi_Host *host, char *buffer, int length)
+{
+ char arr[16];
+ int opts;
+ int minLen = length > 15 ? 15 : length;
+
+ if (!capable(CAP_SYS_ADMIN) || !capable(CAP_SYS_RAWIO))
+ return -EACCES;
+ memcpy(arr, buffer, minLen);
+ arr[minLen] = '\0';
+ if (1 != sscanf(arr, "%d", &opts))
+ return -EINVAL;
+ scsi_debug_opts = opts;
+ if (scsi_debug_every_nth != 0)
+ atomic_set(&sdebug_cmnd_count, 0);
+ return length;
+}
+
+/* Output seen with 'cat /proc/scsi/scsi_debug/<host_id>'. It will be the
+ * same for each scsi_debug host (if more than one). Some of the counters
+ * output are not atomics so might be inaccurate in a busy system. */
+static int scsi_debug_show_info(struct seq_file *m, struct Scsi_Host *host)
+{
+ int f, l;
+ char b[32];
+
+ if (scsi_debug_every_nth > 0)
+ snprintf(b, sizeof(b), " (curr:%d)",
+ ((SCSI_DEBUG_OPT_RARE_TSF & scsi_debug_opts) ?
+ atomic_read(&sdebug_a_tsf) :
+ atomic_read(&sdebug_cmnd_count)));
+ else
+ b[0] = '\0';
+
+ seq_printf(m, "scsi_debug adapter driver, version %s [%s]\n"
+ "num_tgts=%d, shared (ram) size=%d MB, opts=0x%x, "
+ "every_nth=%d%s\n"
+ "delay=%d, ndelay=%d, max_luns=%d, q_completions=%d\n"
+ "sector_size=%d bytes, cylinders=%d, heads=%d, sectors=%d\n"
+ "command aborts=%d; RESETs: device=%d, target=%d, bus=%d, "
+ "host=%d\ndix_reads=%d dix_writes=%d dif_errors=%d "
+ "usec_in_jiffy=%lu\n",
+ SCSI_DEBUG_VERSION, scsi_debug_version_date,
+ scsi_debug_num_tgts, scsi_debug_dev_size_mb, scsi_debug_opts,
+ scsi_debug_every_nth, b, scsi_debug_delay, scsi_debug_ndelay,
+ scsi_debug_max_luns, atomic_read(&sdebug_completions),
+ scsi_debug_sector_size, sdebug_cylinders_per, sdebug_heads,
+ sdebug_sectors_per, num_aborts, num_dev_resets,
+ num_target_resets, num_bus_resets, num_host_resets,
+ dix_reads, dix_writes, dif_errors, TICK_NSEC / 1000);
+
+ f = find_first_bit(queued_in_use_bm, scsi_debug_max_queue);
+ if (f != scsi_debug_max_queue) {
+ l = find_last_bit(queued_in_use_bm, scsi_debug_max_queue);
+ seq_printf(m, " %s BUSY: first,last bits set: %d,%d\n",
+ "queued_in_use_bm", f, l);
+ }
+ return 0;
+}
+
+static ssize_t delay_show(struct device_driver *ddp, char *buf)
+{
+ return scnprintf(buf, PAGE_SIZE, "%d\n", scsi_debug_delay);
+}
+/* Returns -EBUSY if delay is being changed and commands are queued */
+static ssize_t delay_store(struct device_driver *ddp, const char *buf,
+ size_t count)
+{
+ int delay, res;
+
+ if ((count > 0) && (1 == sscanf(buf, "%d", &delay))) {
+ res = count;
+ if (scsi_debug_delay != delay) {
+ unsigned long iflags;
+ int k;
+
+ spin_lock_irqsave(&queued_arr_lock, iflags);
+ k = find_first_bit(queued_in_use_bm,
+ scsi_debug_max_queue);
+ if (k != scsi_debug_max_queue)
+ res = -EBUSY; /* have queued commands */
+ else {
+ scsi_debug_delay = delay;
+ scsi_debug_ndelay = 0;
+ }
+ spin_unlock_irqrestore(&queued_arr_lock, iflags);
+ }
+ return res;
+ }
+ return -EINVAL;
+}
+static DRIVER_ATTR_RW(delay);
+
+static ssize_t ndelay_show(struct device_driver *ddp, char *buf)
+{
+ return scnprintf(buf, PAGE_SIZE, "%d\n", scsi_debug_ndelay);
+}
+/* Returns -EBUSY if ndelay is being changed and commands are queued */
+/* If > 0 and accepted then scsi_debug_delay is set to DELAY_OVERRIDDEN */
+static ssize_t ndelay_store(struct device_driver *ddp, const char *buf,
+ size_t count)
+{
+ unsigned long iflags;
+ int ndelay, res, k;
+
+ if ((count > 0) && (1 == sscanf(buf, "%d", &ndelay)) &&
+ (ndelay >= 0) && (ndelay < 1000000000)) {
+ res = count;
+ if (scsi_debug_ndelay != ndelay) {
+ spin_lock_irqsave(&queued_arr_lock, iflags);
+ k = find_first_bit(queued_in_use_bm,
+ scsi_debug_max_queue);
+ if (k != scsi_debug_max_queue)
+ res = -EBUSY; /* have queued commands */
+ else {
+ scsi_debug_ndelay = ndelay;
+ scsi_debug_delay = ndelay ? DELAY_OVERRIDDEN
+ : DEF_DELAY;
+ }
+ spin_unlock_irqrestore(&queued_arr_lock, iflags);
+ }
+ return res;
+ }
+ return -EINVAL;
+}
+static DRIVER_ATTR_RW(ndelay);
+
+static ssize_t opts_show(struct device_driver *ddp, char *buf)
+{
+ return scnprintf(buf, PAGE_SIZE, "0x%x\n", scsi_debug_opts);
+}
+
+static ssize_t opts_store(struct device_driver *ddp, const char *buf,
+ size_t count)
+{
+ int opts;
+ char work[20];
+
+ if (1 == sscanf(buf, "%10s", work)) {
+ if (0 == strncasecmp(work,"0x", 2)) {
+ if (1 == sscanf(&work[2], "%x", &opts))
+ goto opts_done;
+ } else {
+ if (1 == sscanf(work, "%d", &opts))
+ goto opts_done;
+ }
+ }
+ return -EINVAL;
+opts_done:
+ scsi_debug_opts = opts;
+ if (SCSI_DEBUG_OPT_RECOVERED_ERR & opts)
+ sdebug_any_injecting_opt = true;
+ else if (SCSI_DEBUG_OPT_TRANSPORT_ERR & opts)
+ sdebug_any_injecting_opt = true;
+ else if (SCSI_DEBUG_OPT_DIF_ERR & opts)
+ sdebug_any_injecting_opt = true;
+ else if (SCSI_DEBUG_OPT_DIX_ERR & opts)
+ sdebug_any_injecting_opt = true;
+ else if (SCSI_DEBUG_OPT_SHORT_TRANSFER & opts)
+ sdebug_any_injecting_opt = true;
+ atomic_set(&sdebug_cmnd_count, 0);
+ atomic_set(&sdebug_a_tsf, 0);
+ return count;
+}
+static DRIVER_ATTR_RW(opts);
+
+static ssize_t ptype_show(struct device_driver *ddp, char *buf)
+{
+ return scnprintf(buf, PAGE_SIZE, "%d\n", scsi_debug_ptype);
+}
+static ssize_t ptype_store(struct device_driver *ddp, const char *buf,
+ size_t count)
+{
+ int n;
+
+ if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
+ scsi_debug_ptype = n;
+ return count;
+ }
+ return -EINVAL;
+}
+static DRIVER_ATTR_RW(ptype);
+
+static ssize_t dsense_show(struct device_driver *ddp, char *buf)
+{
+ return scnprintf(buf, PAGE_SIZE, "%d\n", scsi_debug_dsense);
+}
+static ssize_t dsense_store(struct device_driver *ddp, const char *buf,
+ size_t count)
+{
+ int n;
+
+ if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
+ scsi_debug_dsense = n;
+ return count;
+ }
+ return -EINVAL;
+}
+static DRIVER_ATTR_RW(dsense);
+
+static ssize_t fake_rw_show(struct device_driver *ddp, char *buf)
+{
+ return scnprintf(buf, PAGE_SIZE, "%d\n", scsi_debug_fake_rw);
+}
+static ssize_t fake_rw_store(struct device_driver *ddp, const char *buf,
+ size_t count)
+{
+ int n;
+
+ if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
+ n = (n > 0);
+ scsi_debug_fake_rw = (scsi_debug_fake_rw > 0);
+ if (scsi_debug_fake_rw != n) {
+ if ((0 == n) && (NULL == fake_storep)) {
+ unsigned long sz =
+ (unsigned long)scsi_debug_dev_size_mb *
+ 1048576;
+
+ fake_storep = vmalloc(sz);
+ if (NULL == fake_storep) {
+ pr_err("%s: out of memory, 9\n",
+ __func__);
+ return -ENOMEM;
+ }
+ memset(fake_storep, 0, sz);
+ }
+ scsi_debug_fake_rw = n;
+ }
+ return count;
+ }
+ return -EINVAL;
+}
+static DRIVER_ATTR_RW(fake_rw);
+
+static ssize_t no_lun_0_show(struct device_driver *ddp, char *buf)
+{
+ return scnprintf(buf, PAGE_SIZE, "%d\n", scsi_debug_no_lun_0);
+}
+static ssize_t no_lun_0_store(struct device_driver *ddp, const char *buf,
+ size_t count)
+{
+ int n;
+
+ if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
+ scsi_debug_no_lun_0 = n;
+ return count;
+ }
+ return -EINVAL;
+}
+static DRIVER_ATTR_RW(no_lun_0);
+
+static ssize_t num_tgts_show(struct device_driver *ddp, char *buf)
+{
+ return scnprintf(buf, PAGE_SIZE, "%d\n", scsi_debug_num_tgts);
+}
+static ssize_t num_tgts_store(struct device_driver *ddp, const char *buf,
+ size_t count)
+{
+ int n;
+
+ if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
+ scsi_debug_num_tgts = n;
+ sdebug_max_tgts_luns();
+ return count;
+ }
+ return -EINVAL;
+}
+static DRIVER_ATTR_RW(num_tgts);
+
+static ssize_t dev_size_mb_show(struct device_driver *ddp, char *buf)
+{
+ return scnprintf(buf, PAGE_SIZE, "%d\n", scsi_debug_dev_size_mb);
+}
+static DRIVER_ATTR_RO(dev_size_mb);
+
+static ssize_t num_parts_show(struct device_driver *ddp, char *buf)
+{
+ return scnprintf(buf, PAGE_SIZE, "%d\n", scsi_debug_num_parts);
+}
+static DRIVER_ATTR_RO(num_parts);
+
+static ssize_t every_nth_show(struct device_driver *ddp, char *buf)
+{
+ return scnprintf(buf, PAGE_SIZE, "%d\n", scsi_debug_every_nth);
+}
+static ssize_t every_nth_store(struct device_driver *ddp, const char *buf,
+ size_t count)
+{
+ int nth;
+
+ if ((count > 0) && (1 == sscanf(buf, "%d", &nth))) {
+ scsi_debug_every_nth = nth;
+ atomic_set(&sdebug_cmnd_count, 0);
+ return count;
+ }
+ return -EINVAL;
+}
+static DRIVER_ATTR_RW(every_nth);
+
+static ssize_t max_luns_show(struct device_driver *ddp, char *buf)
+{
+ return scnprintf(buf, PAGE_SIZE, "%d\n", scsi_debug_max_luns);
+}
+static ssize_t max_luns_store(struct device_driver *ddp, const char *buf,
+ size_t count)
+{
+ int n;
+ bool changed;
+
+ if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
+ changed = (scsi_debug_max_luns != n);
+ scsi_debug_max_luns = n;
+ sdebug_max_tgts_luns();
+ if (changed && (scsi_debug_scsi_level >= 5)) { /* >= SPC-3 */
+ struct sdebug_host_info *sdhp;
+ struct sdebug_dev_info *dp;
+
+ spin_lock(&sdebug_host_list_lock);
+ list_for_each_entry(sdhp, &sdebug_host_list,
+ host_list) {
+ list_for_each_entry(dp, &sdhp->dev_info_list,
+ dev_list) {
+ set_bit(SDEBUG_UA_LUNS_CHANGED,
+ dp->uas_bm);
+ }
+ }
+ spin_unlock(&sdebug_host_list_lock);
+ }
+ return count;
+ }
+ return -EINVAL;
+}
+static DRIVER_ATTR_RW(max_luns);
+
+static ssize_t max_queue_show(struct device_driver *ddp, char *buf)
+{
+ return scnprintf(buf, PAGE_SIZE, "%d\n", scsi_debug_max_queue);
+}
+/* N.B. max_queue can be changed while there are queued commands. In flight
+ * commands beyond the new max_queue will be completed. */
+static ssize_t max_queue_store(struct device_driver *ddp, const char *buf,
+ size_t count)
+{
+ unsigned long iflags;
+ int n, k;
+
+ if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n > 0) &&
+ (n <= SCSI_DEBUG_CANQUEUE)) {
+ spin_lock_irqsave(&queued_arr_lock, iflags);
+ k = find_last_bit(queued_in_use_bm, SCSI_DEBUG_CANQUEUE);
+ scsi_debug_max_queue = n;
+ if (SCSI_DEBUG_CANQUEUE == k)
+ atomic_set(&retired_max_queue, 0);
+ else if (k >= n)
+ atomic_set(&retired_max_queue, k + 1);
+ else
+ atomic_set(&retired_max_queue, 0);
+ spin_unlock_irqrestore(&queued_arr_lock, iflags);
+ return count;
+ }
+ return -EINVAL;
+}
+static DRIVER_ATTR_RW(max_queue);
+
+static ssize_t no_uld_show(struct device_driver *ddp, char *buf)
+{
+ return scnprintf(buf, PAGE_SIZE, "%d\n", scsi_debug_no_uld);
+}
+static DRIVER_ATTR_RO(no_uld);
+
+static ssize_t scsi_level_show(struct device_driver *ddp, char *buf)
+{
+ return scnprintf(buf, PAGE_SIZE, "%d\n", scsi_debug_scsi_level);
+}
+static DRIVER_ATTR_RO(scsi_level);
+
+static ssize_t virtual_gb_show(struct device_driver *ddp, char *buf)
+{
+ return scnprintf(buf, PAGE_SIZE, "%d\n", scsi_debug_virtual_gb);
+}
+static ssize_t virtual_gb_store(struct device_driver *ddp, const char *buf,
+ size_t count)
+{
+ int n;
+ bool changed;
+
+ if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
+ changed = (scsi_debug_virtual_gb != n);
+ scsi_debug_virtual_gb = n;
+ sdebug_capacity = get_sdebug_capacity();
+ if (changed) {
+ struct sdebug_host_info *sdhp;
+ struct sdebug_dev_info *dp;
+
+ spin_lock(&sdebug_host_list_lock);
+ list_for_each_entry(sdhp, &sdebug_host_list,
+ host_list) {
+ list_for_each_entry(dp, &sdhp->dev_info_list,
+ dev_list) {
+ set_bit(SDEBUG_UA_CAPACITY_CHANGED,
+ dp->uas_bm);
+ }
+ }
+ spin_unlock(&sdebug_host_list_lock);
+ }
+ return count;
+ }
+ return -EINVAL;
+}
+static DRIVER_ATTR_RW(virtual_gb);
+
+static ssize_t add_host_show(struct device_driver *ddp, char *buf)
+{
+ return scnprintf(buf, PAGE_SIZE, "%d\n", scsi_debug_add_host);
+}
+
+static ssize_t add_host_store(struct device_driver *ddp, const char *buf,
+ size_t count)
+{
+ int delta_hosts;
+
+ if (sscanf(buf, "%d", &delta_hosts) != 1)
+ return -EINVAL;
+ if (delta_hosts > 0) {
+ do {
+ sdebug_add_adapter();
+ } while (--delta_hosts);
+ } else if (delta_hosts < 0) {
+ do {
+ sdebug_remove_adapter();
+ } while (++delta_hosts);
+ }
+ return count;
+}
+static DRIVER_ATTR_RW(add_host);
+
+static ssize_t vpd_use_hostno_show(struct device_driver *ddp, char *buf)
+{
+ return scnprintf(buf, PAGE_SIZE, "%d\n", scsi_debug_vpd_use_hostno);
+}
+static ssize_t vpd_use_hostno_store(struct device_driver *ddp, const char *buf,
+ size_t count)
+{
+ int n;
+
+ if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
+ scsi_debug_vpd_use_hostno = n;
+ return count;
+ }
+ return -EINVAL;
+}
+static DRIVER_ATTR_RW(vpd_use_hostno);
+
+static ssize_t sector_size_show(struct device_driver *ddp, char *buf)
+{
+ return scnprintf(buf, PAGE_SIZE, "%u\n", scsi_debug_sector_size);
+}
+static DRIVER_ATTR_RO(sector_size);
+
+static ssize_t dix_show(struct device_driver *ddp, char *buf)
+{
+ return scnprintf(buf, PAGE_SIZE, "%d\n", scsi_debug_dix);
+}
+static DRIVER_ATTR_RO(dix);
+
+static ssize_t dif_show(struct device_driver *ddp, char *buf)
+{
+ return scnprintf(buf, PAGE_SIZE, "%d\n", scsi_debug_dif);
+}
+static DRIVER_ATTR_RO(dif);
+
+static ssize_t guard_show(struct device_driver *ddp, char *buf)
+{
+ return scnprintf(buf, PAGE_SIZE, "%u\n", scsi_debug_guard);
+}
+static DRIVER_ATTR_RO(guard);
+
+static ssize_t ato_show(struct device_driver *ddp, char *buf)
+{
+ return scnprintf(buf, PAGE_SIZE, "%d\n", scsi_debug_ato);
+}
+static DRIVER_ATTR_RO(ato);
+
+static ssize_t map_show(struct device_driver *ddp, char *buf)
+{
+ ssize_t count;
+
+ if (!scsi_debug_lbp())
+ return scnprintf(buf, PAGE_SIZE, "0-%u\n",
+ sdebug_store_sectors);
+
+ count = scnprintf(buf, PAGE_SIZE - 1, "%*pbl",
+ (int)map_size, map_storep);
+ buf[count++] = '\n';
+ buf[count] = '\0';
+
+ return count;
+}
+static DRIVER_ATTR_RO(map);
+
+static ssize_t removable_show(struct device_driver *ddp, char *buf)
+{
+ return scnprintf(buf, PAGE_SIZE, "%d\n", scsi_debug_removable ? 1 : 0);
+}
+static ssize_t removable_store(struct device_driver *ddp, const char *buf,
+ size_t count)
+{
+ int n;
+
+ if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
+ scsi_debug_removable = (n > 0);
+ return count;
+ }
+ return -EINVAL;
+}
+static DRIVER_ATTR_RW(removable);
+
+static ssize_t host_lock_show(struct device_driver *ddp, char *buf)
+{
+ return scnprintf(buf, PAGE_SIZE, "%d\n", !!scsi_debug_host_lock);
+}
+/* Returns -EBUSY if host_lock is being changed and commands are queued */
+static ssize_t host_lock_store(struct device_driver *ddp, const char *buf,
+ size_t count)
+{
+ int n, res;
+
+ if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
+ bool new_host_lock = (n > 0);
+
+ res = count;
+ if (new_host_lock != scsi_debug_host_lock) {
+ unsigned long iflags;
+ int k;
+
+ spin_lock_irqsave(&queued_arr_lock, iflags);
+ k = find_first_bit(queued_in_use_bm,
+ scsi_debug_max_queue);
+ if (k != scsi_debug_max_queue)
+ res = -EBUSY; /* have queued commands */
+ else
+ scsi_debug_host_lock = new_host_lock;
+ spin_unlock_irqrestore(&queued_arr_lock, iflags);
+ }
+ return res;
+ }
+ return -EINVAL;
+}
+static DRIVER_ATTR_RW(host_lock);
+
+static ssize_t strict_show(struct device_driver *ddp, char *buf)
+{
+ return scnprintf(buf, PAGE_SIZE, "%d\n", !!scsi_debug_strict);
+}
+static ssize_t strict_store(struct device_driver *ddp, const char *buf,
+ size_t count)
+{
+ int n;
+
+ if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
+ scsi_debug_strict = (n > 0);
+ return count;
+ }
+ return -EINVAL;
+}
+static DRIVER_ATTR_RW(strict);
+
+
+/* Note: The following array creates attribute files in the
+ /sys/bus/pseudo/drivers/scsi_debug directory. The advantage of these
+ files (over those found in the /sys/module/scsi_debug/parameters
+ directory) is that auxiliary actions can be triggered when an attribute
+ is changed. For example see: sdebug_add_host_store() above.
+ */
+
+static struct attribute *sdebug_drv_attrs[] = {
+ &driver_attr_delay.attr,
+ &driver_attr_opts.attr,
+ &driver_attr_ptype.attr,
+ &driver_attr_dsense.attr,
+ &driver_attr_fake_rw.attr,
+ &driver_attr_no_lun_0.attr,
+ &driver_attr_num_tgts.attr,
+ &driver_attr_dev_size_mb.attr,
+ &driver_attr_num_parts.attr,
+ &driver_attr_every_nth.attr,
+ &driver_attr_max_luns.attr,
+ &driver_attr_max_queue.attr,
+ &driver_attr_no_uld.attr,
+ &driver_attr_scsi_level.attr,
+ &driver_attr_virtual_gb.attr,
+ &driver_attr_add_host.attr,
+ &driver_attr_vpd_use_hostno.attr,
+ &driver_attr_sector_size.attr,
+ &driver_attr_dix.attr,
+ &driver_attr_dif.attr,
+ &driver_attr_guard.attr,
+ &driver_attr_ato.attr,
+ &driver_attr_map.attr,
+ &driver_attr_removable.attr,
+ &driver_attr_host_lock.attr,
+ &driver_attr_ndelay.attr,
+ &driver_attr_strict.attr,
+ NULL,
+};
+ATTRIBUTE_GROUPS(sdebug_drv);
+
+static struct device *pseudo_primary;
+
+static int __init scsi_debug_init(void)
+{
+ unsigned long sz;
+ int host_to_add;
+ int k;
+ int ret;
+
+ atomic_set(&sdebug_cmnd_count, 0);
+ atomic_set(&sdebug_completions, 0);
+ atomic_set(&retired_max_queue, 0);
+
+ if (scsi_debug_ndelay >= 1000000000) {
+ pr_warn("%s: ndelay must be less than 1 second, ignored\n",
+ __func__);
+ scsi_debug_ndelay = 0;
+ } else if (scsi_debug_ndelay > 0)
+ scsi_debug_delay = DELAY_OVERRIDDEN;
+
+ switch (scsi_debug_sector_size) {
+ case 512:
+ case 1024:
+ case 2048:
+ case 4096:
+ break;
+ default:
+ pr_err("%s: invalid sector_size %d\n", __func__,
+ scsi_debug_sector_size);
+ return -EINVAL;
+ }
+
+ switch (scsi_debug_dif) {
+
+ case SD_DIF_TYPE0_PROTECTION:
+ case SD_DIF_TYPE1_PROTECTION:
+ case SD_DIF_TYPE2_PROTECTION:
+ case SD_DIF_TYPE3_PROTECTION:
+ break;
+
+ default:
+ pr_err("%s: dif must be 0, 1, 2 or 3\n", __func__);
+ return -EINVAL;
+ }
+
+ if (scsi_debug_guard > 1) {
+ pr_err("%s: guard must be 0 or 1\n", __func__);
+ return -EINVAL;
+ }
+
+ if (scsi_debug_ato > 1) {
+ pr_err("%s: ato must be 0 or 1\n", __func__);
+ return -EINVAL;
+ }
+
+ if (scsi_debug_physblk_exp > 15) {
+ pr_err("%s: invalid physblk_exp %u\n", __func__,
+ scsi_debug_physblk_exp);
+ return -EINVAL;
+ }
+
+ if (scsi_debug_lowest_aligned > 0x3fff) {
+ pr_err("%s: lowest_aligned too big: %u\n", __func__,
+ scsi_debug_lowest_aligned);
+ return -EINVAL;
+ }
+
+ if (scsi_debug_dev_size_mb < 1)
+ scsi_debug_dev_size_mb = 1; /* force minimum 1 MB ramdisk */
+ sz = (unsigned long)scsi_debug_dev_size_mb * 1048576;
+ sdebug_store_sectors = sz / scsi_debug_sector_size;
+ sdebug_capacity = get_sdebug_capacity();
+
+ /* play around with geometry, don't waste too much on track 0 */
+ sdebug_heads = 8;
+ sdebug_sectors_per = 32;
+ if (scsi_debug_dev_size_mb >= 16)
+ sdebug_heads = 32;
+ else if (scsi_debug_dev_size_mb >= 256)
+ sdebug_heads = 64;
+ sdebug_cylinders_per = (unsigned long)sdebug_capacity /
+ (sdebug_sectors_per * sdebug_heads);
+ if (sdebug_cylinders_per >= 1024) {
+ /* other LLDs do this; implies >= 1GB ram disk ... */
+ sdebug_heads = 255;
+ sdebug_sectors_per = 63;
+ sdebug_cylinders_per = (unsigned long)sdebug_capacity /
+ (sdebug_sectors_per * sdebug_heads);
+ }
+
+ if (0 == scsi_debug_fake_rw) {
+ fake_storep = vmalloc(sz);
+ if (NULL == fake_storep) {
+ pr_err("%s: out of memory, 1\n", __func__);
+ return -ENOMEM;
+ }
+ memset(fake_storep, 0, sz);
+ if (scsi_debug_num_parts > 0)
+ sdebug_build_parts(fake_storep, sz);
+ }
+
+ if (scsi_debug_dix) {
+ int dif_size;
+
+ dif_size = sdebug_store_sectors * sizeof(struct sd_dif_tuple);
+ dif_storep = vmalloc(dif_size);
+
+ pr_err("%s: dif_storep %u bytes @ %p\n", __func__, dif_size,
+ dif_storep);
+
+ if (dif_storep == NULL) {
+ pr_err("%s: out of mem. (DIX)\n", __func__);
+ ret = -ENOMEM;
+ goto free_vm;
+ }
+
+ memset(dif_storep, 0xff, dif_size);
+ }
+
+ /* Logical Block Provisioning */
+ if (scsi_debug_lbp()) {
+ scsi_debug_unmap_max_blocks =
+ clamp(scsi_debug_unmap_max_blocks, 0U, 0xffffffffU);
+
+ scsi_debug_unmap_max_desc =
+ clamp(scsi_debug_unmap_max_desc, 0U, 256U);
+
+ scsi_debug_unmap_granularity =
+ clamp(scsi_debug_unmap_granularity, 1U, 0xffffffffU);
+
+ if (scsi_debug_unmap_alignment &&
+ scsi_debug_unmap_granularity <=
+ scsi_debug_unmap_alignment) {
+ pr_err("%s: ERR: unmap_granularity <= unmap_alignment\n",
+ __func__);
+ return -EINVAL;
+ }
+
+ map_size = lba_to_map_index(sdebug_store_sectors - 1) + 1;
+ map_storep = vmalloc(BITS_TO_LONGS(map_size) * sizeof(long));
+
+ pr_info("%s: %lu provisioning blocks\n", __func__, map_size);
+
+ if (map_storep == NULL) {
+ pr_err("%s: out of mem. (MAP)\n", __func__);
+ ret = -ENOMEM;
+ goto free_vm;
+ }
+
+ bitmap_zero(map_storep, map_size);
+
+ /* Map first 1KB for partition table */
+ if (scsi_debug_num_parts)
+ map_region(0, 2);
+ }
+
+ pseudo_primary = root_device_register("pseudo_0");
+ if (IS_ERR(pseudo_primary)) {
+ pr_warn("%s: root_device_register() error\n", __func__);
+ ret = PTR_ERR(pseudo_primary);
+ goto free_vm;
+ }
+ ret = bus_register(&pseudo_lld_bus);
+ if (ret < 0) {
+ pr_warn("%s: bus_register error: %d\n", __func__, ret);
+ goto dev_unreg;
+ }
+ ret = driver_register(&sdebug_driverfs_driver);
+ if (ret < 0) {
+ pr_warn("%s: driver_register error: %d\n", __func__, ret);
+ goto bus_unreg;
+ }
+
+ host_to_add = scsi_debug_add_host;
+ scsi_debug_add_host = 0;
+
+ for (k = 0; k < host_to_add; k++) {
+ if (sdebug_add_adapter()) {
+ pr_err("%s: sdebug_add_adapter failed k=%d\n",
+ __func__, k);
+ break;
+ }
+ }
+
+ if (SCSI_DEBUG_OPT_NOISE & scsi_debug_opts) {
+ pr_info("%s: built %d host(s)\n", __func__,
+ scsi_debug_add_host);
+ }
+ return 0;
+
+bus_unreg:
+ bus_unregister(&pseudo_lld_bus);
+dev_unreg:
+ root_device_unregister(pseudo_primary);
+free_vm:
+ if (map_storep)
+ vfree(map_storep);
+ if (dif_storep)
+ vfree(dif_storep);
+ vfree(fake_storep);
+
+ return ret;
+}
+
+static void __exit scsi_debug_exit(void)
+{
+ int k = scsi_debug_add_host;
+
+ stop_all_queued();
+ free_all_queued();
+ for (; k; k--)
+ sdebug_remove_adapter();
+ driver_unregister(&sdebug_driverfs_driver);
+ bus_unregister(&pseudo_lld_bus);
+ root_device_unregister(pseudo_primary);
+
+ if (dif_storep)
+ vfree(dif_storep);
+
+ vfree(fake_storep);
+}
+
+device_initcall(scsi_debug_init);
+module_exit(scsi_debug_exit);
+
+static void sdebug_release_adapter(struct device * dev)
+{
+ struct sdebug_host_info *sdbg_host;
+
+ sdbg_host = to_sdebug_host(dev);
+ kfree(sdbg_host);
+}
+
+static int sdebug_add_adapter(void)
+{
+ int k, devs_per_host;
+ int error = 0;
+ struct sdebug_host_info *sdbg_host;
+ struct sdebug_dev_info *sdbg_devinfo, *tmp;
+
+ sdbg_host = kzalloc(sizeof(*sdbg_host),GFP_KERNEL);
+ if (NULL == sdbg_host) {
+ printk(KERN_ERR "%s: out of memory at line %d\n",
+ __func__, __LINE__);
+ return -ENOMEM;
+ }
+
+ INIT_LIST_HEAD(&sdbg_host->dev_info_list);
+
+ devs_per_host = scsi_debug_num_tgts * scsi_debug_max_luns;
+ for (k = 0; k < devs_per_host; k++) {
+ sdbg_devinfo = sdebug_device_create(sdbg_host, GFP_KERNEL);
+ if (!sdbg_devinfo) {
+ printk(KERN_ERR "%s: out of memory at line %d\n",
+ __func__, __LINE__);
+ error = -ENOMEM;
+ goto clean;
+ }
+ }
+
+ spin_lock(&sdebug_host_list_lock);
+ list_add_tail(&sdbg_host->host_list, &sdebug_host_list);
+ spin_unlock(&sdebug_host_list_lock);
+
+ sdbg_host->dev.bus = &pseudo_lld_bus;
+ sdbg_host->dev.parent = pseudo_primary;
+ sdbg_host->dev.release = &sdebug_release_adapter;
+ dev_set_name(&sdbg_host->dev, "adapter%d", scsi_debug_add_host);
+
+ error = device_register(&sdbg_host->dev);
+
+ if (error)
+ goto clean;
+
+ ++scsi_debug_add_host;
+ return error;
+
+clean:
+ list_for_each_entry_safe(sdbg_devinfo, tmp, &sdbg_host->dev_info_list,
+ dev_list) {
+ list_del(&sdbg_devinfo->dev_list);
+ kfree(sdbg_devinfo);
+ }
+
+ kfree(sdbg_host);
+ return error;
+}
+
+static void sdebug_remove_adapter(void)
+{
+ struct sdebug_host_info * sdbg_host = NULL;
+
+ spin_lock(&sdebug_host_list_lock);
+ if (!list_empty(&sdebug_host_list)) {
+ sdbg_host = list_entry(sdebug_host_list.prev,
+ struct sdebug_host_info, host_list);
+ list_del(&sdbg_host->host_list);
+ }
+ spin_unlock(&sdebug_host_list_lock);
+
+ if (!sdbg_host)
+ return;
+
+ device_unregister(&sdbg_host->dev);
+ --scsi_debug_add_host;
+}
+
+static int
+sdebug_change_qdepth(struct scsi_device *sdev, int qdepth)
+{
+ int num_in_q = 0;
+ unsigned long iflags;
+ struct sdebug_dev_info *devip;
+
+ spin_lock_irqsave(&queued_arr_lock, iflags);
+ devip = (struct sdebug_dev_info *)sdev->hostdata;
+ if (NULL == devip) {
+ spin_unlock_irqrestore(&queued_arr_lock, iflags);
+ return -ENODEV;
+ }
+ num_in_q = atomic_read(&devip->num_in_q);
+ spin_unlock_irqrestore(&queued_arr_lock, iflags);
+
+ if (qdepth < 1)
+ qdepth = 1;
+ /* allow to exceed max host queued_arr elements for testing */
+ if (qdepth > SCSI_DEBUG_CANQUEUE + 10)
+ qdepth = SCSI_DEBUG_CANQUEUE + 10;
+ scsi_change_queue_depth(sdev, qdepth);
+
+ if (SCSI_DEBUG_OPT_Q_NOISE & scsi_debug_opts) {
+ sdev_printk(KERN_INFO, sdev,
+ "%s: qdepth=%d, num_in_q=%d\n",
+ __func__, qdepth, num_in_q);
+ }
+ return sdev->queue_depth;
+}
+
+static int
+check_inject(struct scsi_cmnd *scp)
+{
+ struct sdebug_scmd_extra_t *ep = scsi_cmd_priv(scp);
+
+ memset(ep, 0, sizeof(struct sdebug_scmd_extra_t));
+
+ if (atomic_inc_return(&sdebug_cmnd_count) >=
+ abs(scsi_debug_every_nth)) {
+ atomic_set(&sdebug_cmnd_count, 0);
+ if (scsi_debug_every_nth < -1)
+ scsi_debug_every_nth = -1;
+ if (SCSI_DEBUG_OPT_TIMEOUT & scsi_debug_opts)
+ return 1; /* ignore command causing timeout */
+ else if (SCSI_DEBUG_OPT_MAC_TIMEOUT & scsi_debug_opts &&
+ scsi_medium_access_command(scp))
+ return 1; /* time out reads and writes */
+ if (sdebug_any_injecting_opt) {
+ int opts = scsi_debug_opts;
+
+ if (SCSI_DEBUG_OPT_RECOVERED_ERR & opts)
+ ep->inj_recovered = true;
+ else if (SCSI_DEBUG_OPT_TRANSPORT_ERR & opts)
+ ep->inj_transport = true;
+ else if (SCSI_DEBUG_OPT_DIF_ERR & opts)
+ ep->inj_dif = true;
+ else if (SCSI_DEBUG_OPT_DIX_ERR & opts)
+ ep->inj_dix = true;
+ else if (SCSI_DEBUG_OPT_SHORT_TRANSFER & opts)
+ ep->inj_short = true;
+ }
+ }
+ return 0;
+}
+
+static int
+scsi_debug_queuecommand(struct scsi_cmnd *scp)
+{
+ u8 sdeb_i;
+ struct scsi_device *sdp = scp->device;
+ const struct opcode_info_t *oip;
+ const struct opcode_info_t *r_oip;
+ struct sdebug_dev_info *devip;
+ u8 *cmd = scp->cmnd;
+ int (*r_pfp)(struct scsi_cmnd *, struct sdebug_dev_info *);
+ int k, na;
+ int errsts = 0;
+ int errsts_no_connect = DID_NO_CONNECT << 16;
+ u32 flags;
+ u16 sa;
+ u8 opcode = cmd[0];
+ bool has_wlun_rl;
+ bool debug = !!(SCSI_DEBUG_OPT_NOISE & scsi_debug_opts);
+
+ scsi_set_resid(scp, 0);
+ if (debug && !(SCSI_DEBUG_OPT_NO_CDB_NOISE & scsi_debug_opts)) {
+ char b[120];
+ int n, len, sb;
+
+ len = scp->cmd_len;
+ sb = (int)sizeof(b);
+ if (len > 32)
+ strcpy(b, "too long, over 32 bytes");
+ else {
+ for (k = 0, n = 0; k < len && n < sb; ++k)
+ n += scnprintf(b + n, sb - n, "%02x ",
+ (u32)cmd[k]);
+ }
+ sdev_printk(KERN_INFO, sdp, "%s: cmd %s\n", my_name, b);
+ }
+ has_wlun_rl = (sdp->lun == SAM2_WLUN_REPORT_LUNS);
+ if ((sdp->lun >= scsi_debug_max_luns) && !has_wlun_rl)
+ return schedule_resp(scp, NULL, errsts_no_connect, 0);
+
+ sdeb_i = opcode_ind_arr[opcode]; /* fully mapped */
+ oip = &opcode_info_arr[sdeb_i]; /* safe if table consistent */
+ devip = (struct sdebug_dev_info *)sdp->hostdata;
+ if (!devip) {
+ devip = devInfoReg(sdp);
+ if (NULL == devip)
+ return schedule_resp(scp, NULL, errsts_no_connect, 0);
+ }
+ na = oip->num_attached;
+ r_pfp = oip->pfp;
+ if (na) { /* multiple commands with this opcode */
+ r_oip = oip;
+ if (FF_SA & r_oip->flags) {
+ if (F_SA_LOW & oip->flags)
+ sa = 0x1f & cmd[1];
+ else
+ sa = get_unaligned_be16(cmd + 8);
+ for (k = 0; k <= na; oip = r_oip->arrp + k++) {
+ if (opcode == oip->opcode && sa == oip->sa)
+ break;
+ }
+ } else { /* since no service action only check opcode */
+ for (k = 0; k <= na; oip = r_oip->arrp + k++) {
+ if (opcode == oip->opcode)
+ break;
+ }
+ }
+ if (k > na) {
+ if (F_SA_LOW & r_oip->flags)
+ mk_sense_invalid_fld(scp, SDEB_IN_CDB, 1, 4);
+ else if (F_SA_HIGH & r_oip->flags)
+ mk_sense_invalid_fld(scp, SDEB_IN_CDB, 8, 7);
+ else
+ mk_sense_invalid_opcode(scp);
+ goto check_cond;
+ }
+ } /* else (when na==0) we assume the oip is a match */
+ flags = oip->flags;
+ if (F_INV_OP & flags) {
+ mk_sense_invalid_opcode(scp);
+ goto check_cond;
+ }
+ if (has_wlun_rl && !(F_RL_WLUN_OK & flags)) {
+ if (debug)
+ sdev_printk(KERN_INFO, sdp, "scsi_debug: Opcode: "
+ "0x%x not supported for wlun\n", opcode);
+ mk_sense_invalid_opcode(scp);
+ goto check_cond;
+ }
+ if (scsi_debug_strict) { /* check cdb against mask */
+ u8 rem;
+ int j;
+
+ for (k = 1; k < oip->len_mask[0] && k < 16; ++k) {
+ rem = ~oip->len_mask[k] & cmd[k];
+ if (rem) {
+ for (j = 7; j >= 0; --j, rem <<= 1) {
+ if (0x80 & rem)
+ break;
+ }
+ mk_sense_invalid_fld(scp, SDEB_IN_CDB, k, j);
+ goto check_cond;
+ }
+ }
+ }
+ if (!(F_SKIP_UA & flags) &&
+ SDEBUG_NUM_UAS != find_first_bit(devip->uas_bm, SDEBUG_NUM_UAS)) {
+ errsts = check_readiness(scp, UAS_ONLY, devip);
+ if (errsts)
+ goto check_cond;
+ }
+ if ((F_M_ACCESS & flags) && devip->stopped) {
+ mk_sense_buffer(scp, NOT_READY, LOGICAL_UNIT_NOT_READY, 0x2);
+ if (debug)
+ sdev_printk(KERN_INFO, sdp, "%s reports: Not ready: "
+ "%s\n", my_name, "initializing command "
+ "required");
+ errsts = check_condition_result;
+ goto fini;
+ }
+ if (scsi_debug_fake_rw && (F_FAKE_RW & flags))
+ goto fini;
+ if (scsi_debug_every_nth) {
+ if (check_inject(scp))
+ return 0; /* ignore command: make trouble */
+ }
+ if (oip->pfp) /* if this command has a resp_* function, call it */
+ errsts = oip->pfp(scp, devip);
+ else if (r_pfp) /* if leaf function ptr NULL, try the root's */
+ errsts = r_pfp(scp, devip);
+
+fini:
+ return schedule_resp(scp, devip, errsts,
+ ((F_DELAY_OVERR & flags) ? 0 : scsi_debug_delay));
+check_cond:
+ return schedule_resp(scp, devip, check_condition_result, 0);
+}
+
+static int
+sdebug_queuecommand_lock_or_not(struct Scsi_Host *shost, struct scsi_cmnd *cmd)
+{
+ if (scsi_debug_host_lock) {
+ unsigned long iflags;
+ int rc;
+
+ spin_lock_irqsave(shost->host_lock, iflags);
+ rc = scsi_debug_queuecommand(cmd);
+ spin_unlock_irqrestore(shost->host_lock, iflags);
+ return rc;
+ } else
+ return scsi_debug_queuecommand(cmd);
+}
+
+static struct scsi_host_template sdebug_driver_template = {
+ .show_info = scsi_debug_show_info,
+ .write_info = scsi_debug_write_info,
+ .proc_name = sdebug_proc_name,
+ .name = "SCSI DEBUG",
+ .info = scsi_debug_info,
+ .slave_alloc = scsi_debug_slave_alloc,
+ .slave_configure = scsi_debug_slave_configure,
+ .slave_destroy = scsi_debug_slave_destroy,
+ .ioctl = scsi_debug_ioctl,
+ .queuecommand = sdebug_queuecommand_lock_or_not,
+ .change_queue_depth = sdebug_change_qdepth,
+ .eh_abort_handler = scsi_debug_abort,
+ .eh_device_reset_handler = scsi_debug_device_reset,
+ .eh_target_reset_handler = scsi_debug_target_reset,
+ .eh_bus_reset_handler = scsi_debug_bus_reset,
+ .eh_host_reset_handler = scsi_debug_host_reset,
+ .can_queue = SCSI_DEBUG_CANQUEUE,
+ .this_id = 7,
+ .sg_tablesize = SCSI_MAX_SG_CHAIN_SEGMENTS,
+ .cmd_per_lun = DEF_CMD_PER_LUN,
+ .max_sectors = -1U,
+ .use_clustering = DISABLE_CLUSTERING,
+ .module = THIS_MODULE,
+ .track_queue_depth = 1,
+ .cmd_size = sizeof(struct sdebug_scmd_extra_t),
+};
+
+static int sdebug_driver_probe(struct device * dev)
+{
+ int error = 0;
+ int opts;
+ struct sdebug_host_info *sdbg_host;
+ struct Scsi_Host *hpnt;
+ int host_prot;
+
+ sdbg_host = to_sdebug_host(dev);
+
+ sdebug_driver_template.can_queue = scsi_debug_max_queue;
+ if (scsi_debug_clustering)
+ sdebug_driver_template.use_clustering = ENABLE_CLUSTERING;
+ hpnt = scsi_host_alloc(&sdebug_driver_template, sizeof(sdbg_host));
+ if (NULL == hpnt) {
+ pr_err("%s: scsi_host_alloc failed\n", __func__);
+ error = -ENODEV;
+ return error;
+ }
+
+ sdbg_host->shost = hpnt;
+ *((struct sdebug_host_info **)hpnt->hostdata) = sdbg_host;
+ if ((hpnt->this_id >= 0) && (scsi_debug_num_tgts > hpnt->this_id))
+ hpnt->max_id = scsi_debug_num_tgts + 1;
+ else
+ hpnt->max_id = scsi_debug_num_tgts;
+ hpnt->max_lun = SAM2_WLUN_REPORT_LUNS; /* = scsi_debug_max_luns; */
+
+ host_prot = 0;
+
+ switch (scsi_debug_dif) {
+
+ case SD_DIF_TYPE1_PROTECTION:
+ host_prot = SHOST_DIF_TYPE1_PROTECTION;
+ if (scsi_debug_dix)
+ host_prot |= SHOST_DIX_TYPE1_PROTECTION;
+ break;
+
+ case SD_DIF_TYPE2_PROTECTION:
+ host_prot = SHOST_DIF_TYPE2_PROTECTION;
+ if (scsi_debug_dix)
+ host_prot |= SHOST_DIX_TYPE2_PROTECTION;
+ break;
+
+ case SD_DIF_TYPE3_PROTECTION:
+ host_prot = SHOST_DIF_TYPE3_PROTECTION;
+ if (scsi_debug_dix)
+ host_prot |= SHOST_DIX_TYPE3_PROTECTION;
+ break;
+
+ default:
+ if (scsi_debug_dix)
+ host_prot |= SHOST_DIX_TYPE0_PROTECTION;
+ break;
+ }
+
+ scsi_host_set_prot(hpnt, host_prot);
+
+ printk(KERN_INFO "scsi_debug: host protection%s%s%s%s%s%s%s\n",
+ (host_prot & SHOST_DIF_TYPE1_PROTECTION) ? " DIF1" : "",
+ (host_prot & SHOST_DIF_TYPE2_PROTECTION) ? " DIF2" : "",
+ (host_prot & SHOST_DIF_TYPE3_PROTECTION) ? " DIF3" : "",
+ (host_prot & SHOST_DIX_TYPE0_PROTECTION) ? " DIX0" : "",
+ (host_prot & SHOST_DIX_TYPE1_PROTECTION) ? " DIX1" : "",
+ (host_prot & SHOST_DIX_TYPE2_PROTECTION) ? " DIX2" : "",
+ (host_prot & SHOST_DIX_TYPE3_PROTECTION) ? " DIX3" : "");
+
+ if (scsi_debug_guard == 1)
+ scsi_host_set_guard(hpnt, SHOST_DIX_GUARD_IP);
+ else
+ scsi_host_set_guard(hpnt, SHOST_DIX_GUARD_CRC);
+
+ opts = scsi_debug_opts;
+ if (SCSI_DEBUG_OPT_RECOVERED_ERR & opts)
+ sdebug_any_injecting_opt = true;
+ else if (SCSI_DEBUG_OPT_TRANSPORT_ERR & opts)
+ sdebug_any_injecting_opt = true;
+ else if (SCSI_DEBUG_OPT_DIF_ERR & opts)
+ sdebug_any_injecting_opt = true;
+ else if (SCSI_DEBUG_OPT_DIX_ERR & opts)
+ sdebug_any_injecting_opt = true;
+ else if (SCSI_DEBUG_OPT_SHORT_TRANSFER & opts)
+ sdebug_any_injecting_opt = true;
+
+ error = scsi_add_host(hpnt, &sdbg_host->dev);
+ if (error) {
+ printk(KERN_ERR "%s: scsi_add_host failed\n", __func__);
+ error = -ENODEV;
+ scsi_host_put(hpnt);
+ } else
+ scsi_scan_host(hpnt);
+
+ return error;
+}
+
+static int sdebug_driver_remove(struct device * dev)
+{
+ struct sdebug_host_info *sdbg_host;
+ struct sdebug_dev_info *sdbg_devinfo, *tmp;
+
+ sdbg_host = to_sdebug_host(dev);
+
+ if (!sdbg_host) {
+ printk(KERN_ERR "%s: Unable to locate host info\n",
+ __func__);
+ return -ENODEV;
+ }
+
+ scsi_remove_host(sdbg_host->shost);
+
+ list_for_each_entry_safe(sdbg_devinfo, tmp, &sdbg_host->dev_info_list,
+ dev_list) {
+ list_del(&sdbg_devinfo->dev_list);
+ kfree(sdbg_devinfo);
+ }
+
+ scsi_host_put(sdbg_host->shost);
+ return 0;
+}
+
+static int pseudo_lld_bus_match(struct device *dev,
+ struct device_driver *dev_driver)
+{
+ return 1;
+}
+
+static struct bus_type pseudo_lld_bus = {
+ .name = "pseudo",
+ .match = pseudo_lld_bus_match,
+ .probe = sdebug_driver_probe,
+ .remove = sdebug_driver_remove,
+ .drv_groups = sdebug_drv_groups,
+};
diff --git a/drivers/scsi/scsi_devinfo.c b/drivers/scsi/scsi_devinfo.c
new file mode 100644
index 000000000..9f77d2323
--- /dev/null
+++ b/drivers/scsi/scsi_devinfo.c
@@ -0,0 +1,902 @@
+
+#include <linux/blkdev.h>
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/proc_fs.h>
+#include <linux/seq_file.h>
+#include <linux/slab.h>
+
+#include <scsi/scsi_device.h>
+#include <scsi/scsi_devinfo.h>
+
+#include "scsi_priv.h"
+
+
+/*
+ * scsi_dev_info_list: structure to hold black/white listed devices.
+ */
+struct scsi_dev_info_list {
+ struct list_head dev_info_list;
+ char vendor[8];
+ char model[16];
+ unsigned flags;
+ unsigned compatible; /* for use with scsi_static_device_list entries */
+};
+
+struct scsi_dev_info_list_table {
+ struct list_head node; /* our node for being on the master list */
+ struct list_head scsi_dev_info_list; /* head of dev info list */
+ const char *name; /* name of list for /proc (NULL for global) */
+ int key; /* unique numeric identifier */
+};
+
+
+static const char spaces[] = " "; /* 16 of them */
+static unsigned scsi_default_dev_flags;
+static LIST_HEAD(scsi_dev_info_list);
+static char scsi_dev_flags[256];
+
+/*
+ * scsi_static_device_list: deprecated list of devices that require
+ * settings that differ from the default, includes black-listed (broken)
+ * devices. The entries here are added to the tail of scsi_dev_info_list
+ * via scsi_dev_info_list_init.
+ *
+ * Do not add to this list, use the command line or proc interface to add
+ * to the scsi_dev_info_list. This table will eventually go away.
+ */
+static struct {
+ char *vendor;
+ char *model;
+ char *revision; /* revision known to be bad, unused */
+ unsigned flags;
+} scsi_static_device_list[] __initdata = {
+ /*
+ * The following devices are known not to tolerate a lun != 0 scan
+ * for one reason or another. Some will respond to all luns,
+ * others will lock up.
+ */
+ {"Aashima", "IMAGERY 2400SP", "1.03", BLIST_NOLUN}, /* locks up */
+ {"CHINON", "CD-ROM CDS-431", "H42", BLIST_NOLUN}, /* locks up */
+ {"CHINON", "CD-ROM CDS-535", "Q14", BLIST_NOLUN}, /* locks up */
+ {"DENON", "DRD-25X", "V", BLIST_NOLUN}, /* locks up */
+ {"HITACHI", "DK312C", "CM81", BLIST_NOLUN}, /* responds to all lun */
+ {"HITACHI", "DK314C", "CR21", BLIST_NOLUN}, /* responds to all lun */
+ {"IBM", "2104-DU3", NULL, BLIST_NOLUN}, /* locks up */
+ {"IBM", "2104-TU3", NULL, BLIST_NOLUN}, /* locks up */
+ {"IMS", "CDD521/10", "2.06", BLIST_NOLUN}, /* locks up */
+ {"MAXTOR", "XT-3280", "PR02", BLIST_NOLUN}, /* locks up */
+ {"MAXTOR", "XT-4380S", "B3C", BLIST_NOLUN}, /* locks up */
+ {"MAXTOR", "MXT-1240S", "I1.2", BLIST_NOLUN}, /* locks up */
+ {"MAXTOR", "XT-4170S", "B5A", BLIST_NOLUN}, /* locks up */
+ {"MAXTOR", "XT-8760S", "B7B", BLIST_NOLUN}, /* locks up */
+ {"MEDIAVIS", "RENO CD-ROMX2A", "2.03", BLIST_NOLUN}, /* responds to all lun */
+ {"MICROTEK", "ScanMakerIII", "2.30", BLIST_NOLUN}, /* responds to all lun */
+ {"NEC", "CD-ROM DRIVE:841", "1.0", BLIST_NOLUN},/* locks up */
+ {"PHILIPS", "PCA80SC", "V4-2", BLIST_NOLUN}, /* responds to all lun */
+ {"RODIME", "RO3000S", "2.33", BLIST_NOLUN}, /* locks up */
+ {"SUN", "SENA", NULL, BLIST_NOLUN}, /* responds to all luns */
+ /*
+ * The following causes a failed REQUEST SENSE on lun 1 for
+ * aha152x controller, which causes SCSI code to reset bus.
+ */
+ {"SANYO", "CRD-250S", "1.20", BLIST_NOLUN},
+ /*
+ * The following causes a failed REQUEST SENSE on lun 1 for
+ * aha152x controller, which causes SCSI code to reset bus.
+ */
+ {"SEAGATE", "ST157N", "\004|j", BLIST_NOLUN},
+ {"SEAGATE", "ST296", "921", BLIST_NOLUN}, /* responds to all lun */
+ {"SEAGATE", "ST1581", "6538", BLIST_NOLUN}, /* responds to all lun */
+ {"SONY", "CD-ROM CDU-541", "4.3d", BLIST_NOLUN},
+ {"SONY", "CD-ROM CDU-55S", "1.0i", BLIST_NOLUN},
+ {"SONY", "CD-ROM CDU-561", "1.7x", BLIST_NOLUN},
+ {"SONY", "CD-ROM CDU-8012", NULL, BLIST_NOLUN},
+ {"SONY", "SDT-5000", "3.17", BLIST_SELECT_NO_ATN},
+ {"TANDBERG", "TDC 3600", "U07", BLIST_NOLUN}, /* locks up */
+ {"TEAC", "CD-R55S", "1.0H", BLIST_NOLUN}, /* locks up */
+ /*
+ * The following causes a failed REQUEST SENSE on lun 1 for
+ * seagate controller, which causes SCSI code to reset bus.
+ */
+ {"TEAC", "CD-ROM", "1.06", BLIST_NOLUN},
+ {"TEAC", "MT-2ST/45S2-27", "RV M", BLIST_NOLUN}, /* responds to all lun */
+ /*
+ * The following causes a failed REQUEST SENSE on lun 1 for
+ * seagate controller, which causes SCSI code to reset bus.
+ */
+ {"HP", "C1750A", "3226", BLIST_NOLUN}, /* scanjet iic */
+ {"HP", "C1790A", "", BLIST_NOLUN}, /* scanjet iip */
+ {"HP", "C2500A", "", BLIST_NOLUN}, /* scanjet iicx */
+ {"MEDIAVIS", "CDR-H93MV", "1.31", BLIST_NOLUN}, /* locks up */
+ {"MICROTEK", "ScanMaker II", "5.61", BLIST_NOLUN}, /* responds to all lun */
+ {"MITSUMI", "CD-R CR-2201CS", "6119", BLIST_NOLUN}, /* locks up */
+ {"NEC", "D3856", "0009", BLIST_NOLUN},
+ {"QUANTUM", "LPS525S", "3110", BLIST_NOLUN}, /* locks up */
+ {"QUANTUM", "PD1225S", "3110", BLIST_NOLUN}, /* locks up */
+ {"QUANTUM", "FIREBALL ST4.3S", "0F0C", BLIST_NOLUN}, /* locks up */
+ {"RELISYS", "Scorpio", NULL, BLIST_NOLUN}, /* responds to all lun */
+ {"SANKYO", "CP525", "6.64", BLIST_NOLUN}, /* causes failed REQ SENSE, extra reset */
+ {"TEXEL", "CD-ROM", "1.06", BLIST_NOLUN},
+ {"transtec", "T5008", "0001", BLIST_NOREPORTLUN },
+ {"YAMAHA", "CDR100", "1.00", BLIST_NOLUN}, /* locks up */
+ {"YAMAHA", "CDR102", "1.00", BLIST_NOLUN}, /* locks up */
+ {"YAMAHA", "CRW8424S", "1.0", BLIST_NOLUN}, /* locks up */
+ {"YAMAHA", "CRW6416S", "1.0c", BLIST_NOLUN}, /* locks up */
+ {"", "Scanner", "1.80", BLIST_NOLUN}, /* responds to all lun */
+
+ /*
+ * Other types of devices that have special flags.
+ * Note that all USB devices should have the BLIST_INQUIRY_36 flag.
+ */
+ {"3PARdata", "VV", NULL, BLIST_REPORTLUN2},
+ {"ADAPTEC", "AACRAID", NULL, BLIST_FORCELUN},
+ {"ADAPTEC", "Adaptec 5400S", NULL, BLIST_FORCELUN},
+ {"AFT PRO", "-IX CF", "0.0>", BLIST_FORCELUN},
+ {"BELKIN", "USB 2 HS-CF", "1.95", BLIST_FORCELUN | BLIST_INQUIRY_36},
+ {"BROWNIE", "1200U3P", NULL, BLIST_NOREPORTLUN},
+ {"BROWNIE", "1600U3P", NULL, BLIST_NOREPORTLUN},
+ {"CANON", "IPUBJD", NULL, BLIST_SPARSELUN},
+ {"CBOX3", "USB Storage-SMC", "300A", BLIST_FORCELUN | BLIST_INQUIRY_36},
+ {"CMD", "CRA-7280", NULL, BLIST_SPARSELUN}, /* CMD RAID Controller */
+ {"CNSI", "G7324", NULL, BLIST_SPARSELUN}, /* Chaparral G7324 RAID */
+ {"CNSi", "G8324", NULL, BLIST_SPARSELUN}, /* Chaparral G8324 RAID */
+ {"COMPAQ", "ARRAY CONTROLLER", NULL, BLIST_SPARSELUN | BLIST_LARGELUN |
+ BLIST_MAX_512 | BLIST_REPORTLUN2}, /* Compaq RA4x00 */
+ {"COMPAQ", "LOGICAL VOLUME", NULL, BLIST_FORCELUN | BLIST_MAX_512}, /* Compaq RA4x00 */
+ {"COMPAQ", "CR3500", NULL, BLIST_FORCELUN},
+ {"COMPAQ", "MSA1000", NULL, BLIST_SPARSELUN | BLIST_NOSTARTONADD},
+ {"COMPAQ", "MSA1000 VOLUME", NULL, BLIST_SPARSELUN | BLIST_NOSTARTONADD},
+ {"COMPAQ", "HSV110", NULL, BLIST_REPORTLUN2 | BLIST_NOSTARTONADD},
+ {"DDN", "SAN DataDirector", "*", BLIST_SPARSELUN},
+ {"DEC", "HSG80", NULL, BLIST_REPORTLUN2 | BLIST_NOSTARTONADD},
+ {"DELL", "PV660F", NULL, BLIST_SPARSELUN},
+ {"DELL", "PV660F PSEUDO", NULL, BLIST_SPARSELUN},
+ {"DELL", "PSEUDO DEVICE .", NULL, BLIST_SPARSELUN}, /* Dell PV 530F */
+ {"DELL", "PV530F", NULL, BLIST_SPARSELUN},
+ {"DELL", "PERCRAID", NULL, BLIST_FORCELUN},
+ {"DGC", "RAID", NULL, BLIST_SPARSELUN}, /* Dell PV 650F, storage on LUN 0 */
+ {"DGC", "DISK", NULL, BLIST_SPARSELUN}, /* Dell PV 650F, no storage on LUN 0 */
+ {"EMC", "Invista", "*", BLIST_SPARSELUN | BLIST_LARGELUN},
+ {"EMC", "SYMMETRIX", NULL, BLIST_SPARSELUN | BLIST_LARGELUN | BLIST_FORCELUN},
+ {"EMULEX", "MD21/S2 ESDI", NULL, BLIST_SINGLELUN},
+ {"easyRAID", "16P", NULL, BLIST_NOREPORTLUN},
+ {"easyRAID", "X6P", NULL, BLIST_NOREPORTLUN},
+ {"easyRAID", "F8", NULL, BLIST_NOREPORTLUN},
+ {"FSC", "CentricStor", "*", BLIST_SPARSELUN | BLIST_LARGELUN},
+ {"Generic", "USB SD Reader", "1.00", BLIST_FORCELUN | BLIST_INQUIRY_36},
+ {"Generic", "USB Storage-SMC", "0180", BLIST_FORCELUN | BLIST_INQUIRY_36},
+ {"Generic", "USB Storage-SMC", "0207", BLIST_FORCELUN | BLIST_INQUIRY_36},
+ {"HITACHI", "DF400", "*", BLIST_REPORTLUN2},
+ {"HITACHI", "DF500", "*", BLIST_REPORTLUN2},
+ {"HITACHI", "DISK-SUBSYSTEM", "*", BLIST_REPORTLUN2},
+ {"HITACHI", "HUS1530", "*", BLIST_NO_DIF},
+ {"HITACHI", "OPEN-", "*", BLIST_REPORTLUN2},
+ {"HITACHI", "OP-C-", "*", BLIST_SPARSELUN | BLIST_LARGELUN},
+ {"HITACHI", "3380-", "*", BLIST_SPARSELUN | BLIST_LARGELUN},
+ {"HITACHI", "3390-", "*", BLIST_SPARSELUN | BLIST_LARGELUN},
+ {"HITACHI", "6586-", "*", BLIST_SPARSELUN | BLIST_LARGELUN},
+ {"HITACHI", "6588-", "*", BLIST_SPARSELUN | BLIST_LARGELUN},
+ {"HP", "A6189A", NULL, BLIST_SPARSELUN | BLIST_LARGELUN}, /* HP VA7400 */
+ {"HP", "OPEN-", "*", BLIST_REPORTLUN2}, /* HP XP Arrays */
+ {"HP", "NetRAID-4M", NULL, BLIST_FORCELUN},
+ {"HP", "HSV100", NULL, BLIST_REPORTLUN2 | BLIST_NOSTARTONADD},
+ {"HP", "C1557A", NULL, BLIST_FORCELUN},
+ {"HP", "C3323-300", "4269", BLIST_NOTQ},
+ {"HP", "C5713A", NULL, BLIST_NOREPORTLUN},
+ {"HP", "DF400", "*", BLIST_SPARSELUN | BLIST_LARGELUN},
+ {"HP", "DF500", "*", BLIST_SPARSELUN | BLIST_LARGELUN},
+ {"HP", "DF600", "*", BLIST_SPARSELUN | BLIST_LARGELUN},
+ {"HP", "OP-C-", "*", BLIST_SPARSELUN | BLIST_LARGELUN},
+ {"HP", "3380-", "*", BLIST_SPARSELUN | BLIST_LARGELUN},
+ {"HP", "3390-", "*", BLIST_SPARSELUN | BLIST_LARGELUN},
+ {"HP", "6586-", "*", BLIST_SPARSELUN | BLIST_LARGELUN},
+ {"HP", "6588-", "*", BLIST_SPARSELUN | BLIST_LARGELUN},
+ {"IBM", "AuSaV1S2", NULL, BLIST_FORCELUN},
+ {"IBM", "ProFibre 4000R", "*", BLIST_SPARSELUN | BLIST_LARGELUN},
+ {"IBM", "2105", NULL, BLIST_RETRY_HWERROR},
+ {"iomega", "jaz 1GB", "J.86", BLIST_NOTQ | BLIST_NOLUN},
+ {"IOMEGA", "ZIP", NULL, BLIST_NOTQ | BLIST_NOLUN},
+ {"IOMEGA", "Io20S *F", NULL, BLIST_KEY},
+ {"INSITE", "Floptical F*8I", NULL, BLIST_KEY},
+ {"INSITE", "I325VM", NULL, BLIST_KEY},
+ {"Intel", "Multi-Flex", NULL, BLIST_NO_RSOC},
+ {"iRiver", "iFP Mass Driver", NULL, BLIST_NOT_LOCKABLE | BLIST_INQUIRY_36},
+ {"LASOUND", "CDX7405", "3.10", BLIST_MAX5LUN | BLIST_SINGLELUN},
+ {"MATSHITA", "PD-1", NULL, BLIST_FORCELUN | BLIST_SINGLELUN},
+ {"MATSHITA", "DMC-LC5", NULL, BLIST_NOT_LOCKABLE | BLIST_INQUIRY_36},
+ {"MATSHITA", "DMC-LC40", NULL, BLIST_NOT_LOCKABLE | BLIST_INQUIRY_36},
+ {"Medion", "Flash XL MMC/SD", "2.6D", BLIST_FORCELUN},
+ {"MegaRAID", "LD", NULL, BLIST_FORCELUN},
+ {"MICROP", "4110", NULL, BLIST_NOTQ},
+ {"MSFT", "Virtual HD", NULL, BLIST_NO_RSOC},
+ {"MYLEX", "DACARMRB", "*", BLIST_REPORTLUN2},
+ {"nCipher", "Fastness Crypto", NULL, BLIST_FORCELUN},
+ {"NAKAMICH", "MJ-4.8S", NULL, BLIST_FORCELUN | BLIST_SINGLELUN},
+ {"NAKAMICH", "MJ-5.16S", NULL, BLIST_FORCELUN | BLIST_SINGLELUN},
+ {"NEC", "PD-1 ODX654P", NULL, BLIST_FORCELUN | BLIST_SINGLELUN},
+ {"NEC", "iStorage", NULL, BLIST_REPORTLUN2},
+ {"NRC", "MBR-7", NULL, BLIST_FORCELUN | BLIST_SINGLELUN},
+ {"NRC", "MBR-7.4", NULL, BLIST_FORCELUN | BLIST_SINGLELUN},
+ {"PIONEER", "CD-ROM DRM-600", NULL, BLIST_FORCELUN | BLIST_SINGLELUN},
+ {"PIONEER", "CD-ROM DRM-602X", NULL, BLIST_FORCELUN | BLIST_SINGLELUN},
+ {"PIONEER", "CD-ROM DRM-604X", NULL, BLIST_FORCELUN | BLIST_SINGLELUN},
+ {"PIONEER", "CD-ROM DRM-624X", NULL, BLIST_FORCELUN | BLIST_SINGLELUN},
+ {"Promise", "VTrak E610f", NULL, BLIST_SPARSELUN | BLIST_NO_RSOC},
+ {"Promise", "", NULL, BLIST_SPARSELUN},
+ {"QNAP", "iSCSI Storage", NULL, BLIST_MAX_1024},
+ {"QUANTUM", "XP34301", "1071", BLIST_NOTQ},
+ {"REGAL", "CDC-4X", NULL, BLIST_MAX5LUN | BLIST_SINGLELUN},
+ {"SanDisk", "ImageMate CF-SD1", NULL, BLIST_FORCELUN},
+ {"SEAGATE", "ST34555N", "0930", BLIST_NOTQ}, /* Chokes on tagged INQUIRY */
+ {"SEAGATE", "ST3390N", "9546", BLIST_NOTQ},
+ {"SEAGATE", "ST900MM0006", NULL, BLIST_SKIP_VPD_PAGES},
+ {"SGI", "RAID3", "*", BLIST_SPARSELUN},
+ {"SGI", "RAID5", "*", BLIST_SPARSELUN},
+ {"SGI", "TP9100", "*", BLIST_REPORTLUN2},
+ {"SGI", "Universal Xport", "*", BLIST_NO_ULD_ATTACH},
+ {"IBM", "Universal Xport", "*", BLIST_NO_ULD_ATTACH},
+ {"SUN", "Universal Xport", "*", BLIST_NO_ULD_ATTACH},
+ {"DELL", "Universal Xport", "*", BLIST_NO_ULD_ATTACH},
+ {"SMSC", "USB 2 HS-CF", NULL, BLIST_SPARSELUN | BLIST_INQUIRY_36},
+ {"SONY", "CD-ROM CDU-8001", NULL, BLIST_BORKEN},
+ {"SONY", "TSL", NULL, BLIST_FORCELUN}, /* DDS3 & DDS4 autoloaders */
+ {"ST650211", "CF", NULL, BLIST_RETRY_HWERROR},
+ {"SUN", "T300", "*", BLIST_SPARSELUN},
+ {"SUN", "T4", "*", BLIST_SPARSELUN},
+ {"TEXEL", "CD-ROM", "1.06", BLIST_BORKEN},
+ {"Tornado-", "F4", "*", BLIST_NOREPORTLUN},
+ {"TOSHIBA", "CDROM", NULL, BLIST_ISROM},
+ {"TOSHIBA", "CD-ROM", NULL, BLIST_ISROM},
+ {"Traxdata", "CDR4120", NULL, BLIST_NOLUN}, /* locks up */
+ {"USB2.0", "SMARTMEDIA/XD", NULL, BLIST_FORCELUN | BLIST_INQUIRY_36},
+ {"WangDAT", "Model 2600", "01.7", BLIST_SELECT_NO_ATN},
+ {"WangDAT", "Model 3200", "02.2", BLIST_SELECT_NO_ATN},
+ {"WangDAT", "Model 1300", "02.4", BLIST_SELECT_NO_ATN},
+ {"WDC WD25", "00JB-00FUA0", NULL, BLIST_NOREPORTLUN},
+ {"XYRATEX", "RS", "*", BLIST_SPARSELUN | BLIST_LARGELUN},
+ {"Zzyzx", "RocketStor 500S", NULL, BLIST_SPARSELUN},
+ {"Zzyzx", "RocketStor 2000", NULL, BLIST_SPARSELUN},
+ { NULL, NULL, NULL, 0 },
+};
+
+static struct scsi_dev_info_list_table *scsi_devinfo_lookup_by_key(int key)
+{
+ struct scsi_dev_info_list_table *devinfo_table;
+ int found = 0;
+
+ list_for_each_entry(devinfo_table, &scsi_dev_info_list, node)
+ if (devinfo_table->key == key) {
+ found = 1;
+ break;
+ }
+ if (!found)
+ return ERR_PTR(-EINVAL);
+
+ return devinfo_table;
+}
+
+/*
+ * scsi_strcpy_devinfo: called from scsi_dev_info_list_add to copy into
+ * devinfo vendor and model strings.
+ */
+static void scsi_strcpy_devinfo(char *name, char *to, size_t to_length,
+ char *from, int compatible)
+{
+ size_t from_length;
+
+ from_length = strlen(from);
+ strncpy(to, from, min(to_length, from_length));
+ if (from_length < to_length) {
+ if (compatible) {
+ /*
+ * NUL terminate the string if it is short.
+ */
+ to[from_length] = '\0';
+ } else {
+ /*
+ * space pad the string if it is short.
+ */
+ strncpy(&to[from_length], spaces,
+ to_length - from_length);
+ }
+ }
+ if (from_length > to_length)
+ printk(KERN_WARNING "%s: %s string '%s' is too long\n",
+ __func__, name, from);
+}
+
+/**
+ * scsi_dev_info_list_add - add one dev_info list entry.
+ * @compatible: if true, null terminate short strings. Otherwise space pad.
+ * @vendor: vendor string
+ * @model: model (product) string
+ * @strflags: integer string
+ * @flags: if strflags NULL, use this flag value
+ *
+ * Description:
+ * Create and add one dev_info entry for @vendor, @model, @strflags or
+ * @flag. If @compatible, add to the tail of the list, do not space
+ * pad, and set devinfo->compatible. The scsi_static_device_list entries
+ * are added with @compatible 1 and @clfags NULL.
+ *
+ * Returns: 0 OK, -error on failure.
+ **/
+static int scsi_dev_info_list_add(int compatible, char *vendor, char *model,
+ char *strflags, int flags)
+{
+ return scsi_dev_info_list_add_keyed(compatible, vendor, model,
+ strflags, flags,
+ SCSI_DEVINFO_GLOBAL);
+}
+
+/**
+ * scsi_dev_info_list_add_keyed - add one dev_info list entry.
+ * @compatible: if true, null terminate short strings. Otherwise space pad.
+ * @vendor: vendor string
+ * @model: model (product) string
+ * @strflags: integer string
+ * @flags: if strflags NULL, use this flag value
+ * @key: specify list to use
+ *
+ * Description:
+ * Create and add one dev_info entry for @vendor, @model,
+ * @strflags or @flag in list specified by @key. If @compatible,
+ * add to the tail of the list, do not space pad, and set
+ * devinfo->compatible. The scsi_static_device_list entries are
+ * added with @compatible 1 and @clfags NULL.
+ *
+ * Returns: 0 OK, -error on failure.
+ **/
+int scsi_dev_info_list_add_keyed(int compatible, char *vendor, char *model,
+ char *strflags, int flags, int key)
+{
+ struct scsi_dev_info_list *devinfo;
+ struct scsi_dev_info_list_table *devinfo_table =
+ scsi_devinfo_lookup_by_key(key);
+
+ if (IS_ERR(devinfo_table))
+ return PTR_ERR(devinfo_table);
+
+ devinfo = kmalloc(sizeof(*devinfo), GFP_KERNEL);
+ if (!devinfo) {
+ printk(KERN_ERR "%s: no memory\n", __func__);
+ return -ENOMEM;
+ }
+
+ scsi_strcpy_devinfo("vendor", devinfo->vendor, sizeof(devinfo->vendor),
+ vendor, compatible);
+ scsi_strcpy_devinfo("model", devinfo->model, sizeof(devinfo->model),
+ model, compatible);
+
+ if (strflags)
+ devinfo->flags = simple_strtoul(strflags, NULL, 0);
+ else
+ devinfo->flags = flags;
+
+ devinfo->compatible = compatible;
+
+ if (compatible)
+ list_add_tail(&devinfo->dev_info_list,
+ &devinfo_table->scsi_dev_info_list);
+ else
+ list_add(&devinfo->dev_info_list,
+ &devinfo_table->scsi_dev_info_list);
+
+ return 0;
+}
+EXPORT_SYMBOL(scsi_dev_info_list_add_keyed);
+
+/**
+ * scsi_dev_info_list_del_keyed - remove one dev_info list entry.
+ * @vendor: vendor string
+ * @model: model (product) string
+ * @key: specify list to use
+ *
+ * Description:
+ * Remove and destroy one dev_info entry for @vendor, @model
+ * in list specified by @key.
+ *
+ * Returns: 0 OK, -error on failure.
+ **/
+int scsi_dev_info_list_del_keyed(char *vendor, char *model, int key)
+{
+ struct scsi_dev_info_list *devinfo, *found = NULL;
+ struct scsi_dev_info_list_table *devinfo_table =
+ scsi_devinfo_lookup_by_key(key);
+
+ if (IS_ERR(devinfo_table))
+ return PTR_ERR(devinfo_table);
+
+ list_for_each_entry(devinfo, &devinfo_table->scsi_dev_info_list,
+ dev_info_list) {
+ if (devinfo->compatible) {
+ /*
+ * Behave like the older version of get_device_flags.
+ */
+ size_t max;
+ /*
+ * XXX why skip leading spaces? If an odd INQUIRY
+ * value, that should have been part of the
+ * scsi_static_device_list[] entry, such as " FOO"
+ * rather than "FOO". Since this code is already
+ * here, and we don't know what device it is
+ * trying to work with, leave it as-is.
+ */
+ max = 8; /* max length of vendor */
+ while ((max > 0) && *vendor == ' ') {
+ max--;
+ vendor++;
+ }
+ /*
+ * XXX removing the following strlen() would be
+ * good, using it means that for a an entry not in
+ * the list, we scan every byte of every vendor
+ * listed in scsi_static_device_list[], and never match
+ * a single one (and still have to compare at
+ * least the first byte of each vendor).
+ */
+ if (memcmp(devinfo->vendor, vendor,
+ min(max, strlen(devinfo->vendor))))
+ continue;
+ /*
+ * Skip spaces again.
+ */
+ max = 16; /* max length of model */
+ while ((max > 0) && *model == ' ') {
+ max--;
+ model++;
+ }
+ if (memcmp(devinfo->model, model,
+ min(max, strlen(devinfo->model))))
+ continue;
+ found = devinfo;
+ } else {
+ if (!memcmp(devinfo->vendor, vendor,
+ sizeof(devinfo->vendor)) &&
+ !memcmp(devinfo->model, model,
+ sizeof(devinfo->model)))
+ found = devinfo;
+ }
+ if (found)
+ break;
+ }
+
+ if (found) {
+ list_del(&found->dev_info_list);
+ kfree(found);
+ return 0;
+ }
+
+ return -ENOENT;
+}
+EXPORT_SYMBOL(scsi_dev_info_list_del_keyed);
+
+/**
+ * scsi_dev_info_list_add_str - parse dev_list and add to the scsi_dev_info_list.
+ * @dev_list: string of device flags to add
+ *
+ * Description:
+ * Parse dev_list, and add entries to the scsi_dev_info_list.
+ * dev_list is of the form "vendor:product:flag,vendor:product:flag".
+ * dev_list is modified via strsep. Can be called for command line
+ * addition, for proc or mabye a sysfs interface.
+ *
+ * Returns: 0 if OK, -error on failure.
+ **/
+static int scsi_dev_info_list_add_str(char *dev_list)
+{
+ char *vendor, *model, *strflags, *next;
+ char *next_check;
+ int res = 0;
+
+ next = dev_list;
+ if (next && next[0] == '"') {
+ /*
+ * Ignore both the leading and trailing quote.
+ */
+ next++;
+ next_check = ",\"";
+ } else {
+ next_check = ",";
+ }
+
+ /*
+ * For the leading and trailing '"' case, the for loop comes
+ * through the last time with vendor[0] == '\0'.
+ */
+ for (vendor = strsep(&next, ":"); vendor && (vendor[0] != '\0')
+ && (res == 0); vendor = strsep(&next, ":")) {
+ strflags = NULL;
+ model = strsep(&next, ":");
+ if (model)
+ strflags = strsep(&next, next_check);
+ if (!model || !strflags) {
+ printk(KERN_ERR "%s: bad dev info string '%s' '%s'"
+ " '%s'\n", __func__, vendor, model,
+ strflags);
+ res = -EINVAL;
+ } else
+ res = scsi_dev_info_list_add(0 /* compatible */, vendor,
+ model, strflags, 0);
+ }
+ return res;
+}
+
+/**
+ * get_device_flags - get device specific flags from the dynamic device list.
+ * @sdev: &scsi_device to get flags for
+ * @vendor: vendor name
+ * @model: model name
+ *
+ * Description:
+ * Search the global scsi_dev_info_list (specified by list zero)
+ * for an entry matching @vendor and @model, if found, return the
+ * matching flags value, else return the host or global default
+ * settings. Called during scan time.
+ **/
+int scsi_get_device_flags(struct scsi_device *sdev,
+ const unsigned char *vendor,
+ const unsigned char *model)
+{
+ return scsi_get_device_flags_keyed(sdev, vendor, model,
+ SCSI_DEVINFO_GLOBAL);
+}
+
+
+/**
+ * scsi_get_device_flags_keyed - get device specific flags from the dynamic device list
+ * @sdev: &scsi_device to get flags for
+ * @vendor: vendor name
+ * @model: model name
+ * @key: list to look up
+ *
+ * Description:
+ * Search the scsi_dev_info_list specified by @key for an entry
+ * matching @vendor and @model, if found, return the matching
+ * flags value, else return the host or global default settings.
+ * Called during scan time.
+ **/
+int scsi_get_device_flags_keyed(struct scsi_device *sdev,
+ const unsigned char *vendor,
+ const unsigned char *model,
+ int key)
+{
+ struct scsi_dev_info_list *devinfo;
+ struct scsi_dev_info_list_table *devinfo_table;
+
+ devinfo_table = scsi_devinfo_lookup_by_key(key);
+
+ if (IS_ERR(devinfo_table))
+ return PTR_ERR(devinfo_table);
+
+ list_for_each_entry(devinfo, &devinfo_table->scsi_dev_info_list,
+ dev_info_list) {
+ if (devinfo->compatible) {
+ /*
+ * Behave like the older version of get_device_flags.
+ */
+ size_t max;
+ /*
+ * XXX why skip leading spaces? If an odd INQUIRY
+ * value, that should have been part of the
+ * scsi_static_device_list[] entry, such as " FOO"
+ * rather than "FOO". Since this code is already
+ * here, and we don't know what device it is
+ * trying to work with, leave it as-is.
+ */
+ max = 8; /* max length of vendor */
+ while ((max > 0) && *vendor == ' ') {
+ max--;
+ vendor++;
+ }
+ /*
+ * XXX removing the following strlen() would be
+ * good, using it means that for a an entry not in
+ * the list, we scan every byte of every vendor
+ * listed in scsi_static_device_list[], and never match
+ * a single one (and still have to compare at
+ * least the first byte of each vendor).
+ */
+ if (memcmp(devinfo->vendor, vendor,
+ min(max, strlen(devinfo->vendor))))
+ continue;
+ /*
+ * Skip spaces again.
+ */
+ max = 16; /* max length of model */
+ while ((max > 0) && *model == ' ') {
+ max--;
+ model++;
+ }
+ if (memcmp(devinfo->model, model,
+ min(max, strlen(devinfo->model))))
+ continue;
+ return devinfo->flags;
+ } else {
+ if (!memcmp(devinfo->vendor, vendor,
+ sizeof(devinfo->vendor)) &&
+ !memcmp(devinfo->model, model,
+ sizeof(devinfo->model)))
+ return devinfo->flags;
+ }
+ }
+ /* nothing found, return nothing */
+ if (key != SCSI_DEVINFO_GLOBAL)
+ return 0;
+
+ /* except for the global list, where we have an exception */
+ if (sdev->sdev_bflags)
+ return sdev->sdev_bflags;
+
+ return scsi_default_dev_flags;
+}
+EXPORT_SYMBOL(scsi_get_device_flags_keyed);
+
+#ifdef CONFIG_SCSI_PROC_FS
+struct double_list {
+ struct list_head *top;
+ struct list_head *bottom;
+};
+
+static int devinfo_seq_show(struct seq_file *m, void *v)
+{
+ struct double_list *dl = v;
+ struct scsi_dev_info_list_table *devinfo_table =
+ list_entry(dl->top, struct scsi_dev_info_list_table, node);
+ struct scsi_dev_info_list *devinfo =
+ list_entry(dl->bottom, struct scsi_dev_info_list,
+ dev_info_list);
+
+ if (devinfo_table->scsi_dev_info_list.next == dl->bottom &&
+ devinfo_table->name)
+ seq_printf(m, "[%s]:\n", devinfo_table->name);
+
+ seq_printf(m, "'%.8s' '%.16s' 0x%x\n",
+ devinfo->vendor, devinfo->model, devinfo->flags);
+ return 0;
+}
+
+static void *devinfo_seq_start(struct seq_file *m, loff_t *ppos)
+{
+ struct double_list *dl = kmalloc(sizeof(*dl), GFP_KERNEL);
+ loff_t pos = *ppos;
+
+ if (!dl)
+ return NULL;
+
+ list_for_each(dl->top, &scsi_dev_info_list) {
+ struct scsi_dev_info_list_table *devinfo_table =
+ list_entry(dl->top, struct scsi_dev_info_list_table,
+ node);
+ list_for_each(dl->bottom, &devinfo_table->scsi_dev_info_list)
+ if (pos-- == 0)
+ return dl;
+ }
+
+ kfree(dl);
+ return NULL;
+}
+
+static void *devinfo_seq_next(struct seq_file *m, void *v, loff_t *ppos)
+{
+ struct double_list *dl = v;
+ struct scsi_dev_info_list_table *devinfo_table =
+ list_entry(dl->top, struct scsi_dev_info_list_table, node);
+
+ ++*ppos;
+ dl->bottom = dl->bottom->next;
+ while (&devinfo_table->scsi_dev_info_list == dl->bottom) {
+ dl->top = dl->top->next;
+ if (dl->top == &scsi_dev_info_list) {
+ kfree(dl);
+ return NULL;
+ }
+ devinfo_table = list_entry(dl->top,
+ struct scsi_dev_info_list_table,
+ node);
+ dl->bottom = devinfo_table->scsi_dev_info_list.next;
+ }
+
+ return dl;
+}
+
+static void devinfo_seq_stop(struct seq_file *m, void *v)
+{
+ kfree(v);
+}
+
+static const struct seq_operations scsi_devinfo_seq_ops = {
+ .start = devinfo_seq_start,
+ .next = devinfo_seq_next,
+ .stop = devinfo_seq_stop,
+ .show = devinfo_seq_show,
+};
+
+static int proc_scsi_devinfo_open(struct inode *inode, struct file *file)
+{
+ return seq_open(file, &scsi_devinfo_seq_ops);
+}
+
+/*
+ * proc_scsi_dev_info_write - allow additions to scsi_dev_info_list via /proc.
+ *
+ * Description: Adds a black/white list entry for vendor and model with an
+ * integer value of flag to the scsi device info list.
+ * To use, echo "vendor:model:flag" > /proc/scsi/device_info
+ */
+static ssize_t proc_scsi_devinfo_write(struct file *file,
+ const char __user *buf,
+ size_t length, loff_t *ppos)
+{
+ char *buffer;
+ ssize_t err = length;
+
+ if (!buf || length>PAGE_SIZE)
+ return -EINVAL;
+ if (!(buffer = (char *) __get_free_page(GFP_KERNEL)))
+ return -ENOMEM;
+ if (copy_from_user(buffer, buf, length)) {
+ err =-EFAULT;
+ goto out;
+ }
+
+ if (length < PAGE_SIZE)
+ buffer[length] = '\0';
+ else if (buffer[PAGE_SIZE-1]) {
+ err = -EINVAL;
+ goto out;
+ }
+
+ scsi_dev_info_list_add_str(buffer);
+
+out:
+ free_page((unsigned long)buffer);
+ return err;
+}
+
+static const struct file_operations scsi_devinfo_proc_fops = {
+ .owner = THIS_MODULE,
+ .open = proc_scsi_devinfo_open,
+ .read = seq_read,
+ .write = proc_scsi_devinfo_write,
+ .llseek = seq_lseek,
+ .release = seq_release,
+};
+#endif /* CONFIG_SCSI_PROC_FS */
+
+module_param_string(dev_flags, scsi_dev_flags, sizeof(scsi_dev_flags), 0);
+MODULE_PARM_DESC(dev_flags,
+ "Given scsi_dev_flags=vendor:model:flags[,v:m:f] add black/white"
+ " list entries for vendor and model with an integer value of flags"
+ " to the scsi device info list");
+
+module_param_named(default_dev_flags, scsi_default_dev_flags, int, S_IRUGO|S_IWUSR);
+MODULE_PARM_DESC(default_dev_flags,
+ "scsi default device flag integer value");
+
+/**
+ * scsi_exit_devinfo - remove /proc/scsi/device_info & the scsi_dev_info_list
+ **/
+void scsi_exit_devinfo(void)
+{
+#ifdef CONFIG_SCSI_PROC_FS
+ remove_proc_entry("scsi/device_info", NULL);
+#endif
+
+ scsi_dev_info_remove_list(SCSI_DEVINFO_GLOBAL);
+}
+
+/**
+ * scsi_dev_info_add_list - add a new devinfo list
+ * @key: key of the list to add
+ * @name: Name of the list to add (for /proc/scsi/device_info)
+ *
+ * Adds the requested list, returns zero on success, -EEXIST if the
+ * key is already registered to a list, or other error on failure.
+ */
+int scsi_dev_info_add_list(int key, const char *name)
+{
+ struct scsi_dev_info_list_table *devinfo_table =
+ scsi_devinfo_lookup_by_key(key);
+
+ if (!IS_ERR(devinfo_table))
+ /* list already exists */
+ return -EEXIST;
+
+ devinfo_table = kmalloc(sizeof(*devinfo_table), GFP_KERNEL);
+
+ if (!devinfo_table)
+ return -ENOMEM;
+
+ INIT_LIST_HEAD(&devinfo_table->node);
+ INIT_LIST_HEAD(&devinfo_table->scsi_dev_info_list);
+ devinfo_table->name = name;
+ devinfo_table->key = key;
+ list_add_tail(&devinfo_table->node, &scsi_dev_info_list);
+
+ return 0;
+}
+EXPORT_SYMBOL(scsi_dev_info_add_list);
+
+/**
+ * scsi_dev_info_remove_list - destroy an added devinfo list
+ * @key: key of the list to destroy
+ *
+ * Iterates over the entire list first, freeing all the values, then
+ * frees the list itself. Returns 0 on success or -EINVAL if the key
+ * can't be found.
+ */
+int scsi_dev_info_remove_list(int key)
+{
+ struct list_head *lh, *lh_next;
+ struct scsi_dev_info_list_table *devinfo_table =
+ scsi_devinfo_lookup_by_key(key);
+
+ if (IS_ERR(devinfo_table))
+ /* no such list */
+ return -EINVAL;
+
+ /* remove from the master list */
+ list_del(&devinfo_table->node);
+
+ list_for_each_safe(lh, lh_next, &devinfo_table->scsi_dev_info_list) {
+ struct scsi_dev_info_list *devinfo;
+
+ devinfo = list_entry(lh, struct scsi_dev_info_list,
+ dev_info_list);
+ kfree(devinfo);
+ }
+ kfree(devinfo_table);
+
+ return 0;
+}
+EXPORT_SYMBOL(scsi_dev_info_remove_list);
+
+/**
+ * scsi_init_devinfo - set up the dynamic device list.
+ *
+ * Description:
+ * Add command line entries from scsi_dev_flags, then add
+ * scsi_static_device_list entries to the scsi device info list.
+ */
+int __init scsi_init_devinfo(void)
+{
+#ifdef CONFIG_SCSI_PROC_FS
+ struct proc_dir_entry *p;
+#endif
+ int error, i;
+
+ error = scsi_dev_info_add_list(SCSI_DEVINFO_GLOBAL, NULL);
+ if (error)
+ return error;
+
+ error = scsi_dev_info_list_add_str(scsi_dev_flags);
+ if (error)
+ goto out;
+
+ for (i = 0; scsi_static_device_list[i].vendor; i++) {
+ error = scsi_dev_info_list_add(1 /* compatibile */,
+ scsi_static_device_list[i].vendor,
+ scsi_static_device_list[i].model,
+ NULL,
+ scsi_static_device_list[i].flags);
+ if (error)
+ goto out;
+ }
+
+#ifdef CONFIG_SCSI_PROC_FS
+ p = proc_create("scsi/device_info", 0, NULL, &scsi_devinfo_proc_fops);
+ if (!p) {
+ error = -ENOMEM;
+ goto out;
+ }
+#endif /* CONFIG_SCSI_PROC_FS */
+
+ out:
+ if (error)
+ scsi_exit_devinfo();
+ return error;
+}
diff --git a/drivers/scsi/scsi_error.c b/drivers/scsi/scsi_error.c
new file mode 100644
index 000000000..c95a4e943
--- /dev/null
+++ b/drivers/scsi/scsi_error.c
@@ -0,0 +1,2619 @@
+/*
+ * scsi_error.c Copyright (C) 1997 Eric Youngdale
+ *
+ * SCSI error/timeout handling
+ * Initial versions: Eric Youngdale. Based upon conversations with
+ * Leonard Zubkoff and David Miller at Linux Expo,
+ * ideas originating from all over the place.
+ *
+ * Restructured scsi_unjam_host and associated functions.
+ * September 04, 2002 Mike Anderson (andmike@us.ibm.com)
+ *
+ * Forward port of Russell King's (rmk@arm.linux.org.uk) changes and
+ * minor cleanups.
+ * September 30, 2002 Mike Anderson (andmike@us.ibm.com)
+ */
+
+#include <linux/module.h>
+#include <linux/sched.h>
+#include <linux/gfp.h>
+#include <linux/timer.h>
+#include <linux/string.h>
+#include <linux/kernel.h>
+#include <linux/freezer.h>
+#include <linux/kthread.h>
+#include <linux/interrupt.h>
+#include <linux/blkdev.h>
+#include <linux/delay.h>
+#include <linux/jiffies.h>
+#include <asm/unaligned.h>
+
+#include <scsi/scsi.h>
+#include <scsi/scsi_cmnd.h>
+#include <scsi/scsi_dbg.h>
+#include <scsi/scsi_device.h>
+#include <scsi/scsi_driver.h>
+#include <scsi/scsi_eh.h>
+#include <scsi/scsi_transport.h>
+#include <scsi/scsi_host.h>
+#include <scsi/scsi_ioctl.h>
+#include <scsi/sg.h>
+
+#include "scsi_priv.h"
+#include "scsi_logging.h"
+#include "scsi_transport_api.h"
+
+#include <trace/events/scsi.h>
+
+static void scsi_eh_done(struct scsi_cmnd *scmd);
+
+/*
+ * These should *probably* be handled by the host itself.
+ * Since it is allowed to sleep, it probably should.
+ */
+#define BUS_RESET_SETTLE_TIME (10)
+#define HOST_RESET_SETTLE_TIME (10)
+
+static int scsi_eh_try_stu(struct scsi_cmnd *scmd);
+static int scsi_try_to_abort_cmd(struct scsi_host_template *,
+ struct scsi_cmnd *);
+
+/* called with shost->host_lock held */
+void scsi_eh_wakeup(struct Scsi_Host *shost)
+{
+ if (atomic_read(&shost->host_busy) == shost->host_failed) {
+ trace_scsi_eh_wakeup(shost);
+ wake_up_process(shost->ehandler);
+ SCSI_LOG_ERROR_RECOVERY(5, shost_printk(KERN_INFO, shost,
+ "Waking error handler thread\n"));
+ }
+}
+
+/**
+ * scsi_schedule_eh - schedule EH for SCSI host
+ * @shost: SCSI host to invoke error handling on.
+ *
+ * Schedule SCSI EH without scmd.
+ */
+void scsi_schedule_eh(struct Scsi_Host *shost)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(shost->host_lock, flags);
+
+ if (scsi_host_set_state(shost, SHOST_RECOVERY) == 0 ||
+ scsi_host_set_state(shost, SHOST_CANCEL_RECOVERY) == 0) {
+ shost->host_eh_scheduled++;
+ scsi_eh_wakeup(shost);
+ }
+
+ spin_unlock_irqrestore(shost->host_lock, flags);
+}
+EXPORT_SYMBOL_GPL(scsi_schedule_eh);
+
+static int scsi_host_eh_past_deadline(struct Scsi_Host *shost)
+{
+ if (!shost->last_reset || shost->eh_deadline == -1)
+ return 0;
+
+ /*
+ * 32bit accesses are guaranteed to be atomic
+ * (on all supported architectures), so instead
+ * of using a spinlock we can as well double check
+ * if eh_deadline has been set to 'off' during the
+ * time_before call.
+ */
+ if (time_before(jiffies, shost->last_reset + shost->eh_deadline) &&
+ shost->eh_deadline > -1)
+ return 0;
+
+ return 1;
+}
+
+/**
+ * scmd_eh_abort_handler - Handle command aborts
+ * @work: command to be aborted.
+ */
+void
+scmd_eh_abort_handler(struct work_struct *work)
+{
+ struct scsi_cmnd *scmd =
+ container_of(work, struct scsi_cmnd, abort_work.work);
+ struct scsi_device *sdev = scmd->device;
+ int rtn;
+
+ if (scsi_host_eh_past_deadline(sdev->host)) {
+ SCSI_LOG_ERROR_RECOVERY(3,
+ scmd_printk(KERN_INFO, scmd,
+ "eh timeout, not aborting\n"));
+ } else {
+ SCSI_LOG_ERROR_RECOVERY(3,
+ scmd_printk(KERN_INFO, scmd,
+ "aborting command\n"));
+ rtn = scsi_try_to_abort_cmd(sdev->host->hostt, scmd);
+ if (rtn == SUCCESS) {
+ set_host_byte(scmd, DID_TIME_OUT);
+ if (scsi_host_eh_past_deadline(sdev->host)) {
+ SCSI_LOG_ERROR_RECOVERY(3,
+ scmd_printk(KERN_INFO, scmd,
+ "eh timeout, not retrying "
+ "aborted command\n"));
+ } else if (!scsi_noretry_cmd(scmd) &&
+ (++scmd->retries <= scmd->allowed)) {
+ SCSI_LOG_ERROR_RECOVERY(3,
+ scmd_printk(KERN_WARNING, scmd,
+ "retry aborted command\n"));
+ scsi_queue_insert(scmd, SCSI_MLQUEUE_EH_RETRY);
+ return;
+ } else {
+ SCSI_LOG_ERROR_RECOVERY(3,
+ scmd_printk(KERN_WARNING, scmd,
+ "finish aborted command\n"));
+ scsi_finish_command(scmd);
+ return;
+ }
+ } else {
+ SCSI_LOG_ERROR_RECOVERY(3,
+ scmd_printk(KERN_INFO, scmd,
+ "cmd abort %s\n",
+ (rtn == FAST_IO_FAIL) ?
+ "not send" : "failed"));
+ }
+ }
+
+ if (!scsi_eh_scmd_add(scmd, 0)) {
+ SCSI_LOG_ERROR_RECOVERY(3,
+ scmd_printk(KERN_WARNING, scmd,
+ "terminate aborted command\n"));
+ set_host_byte(scmd, DID_TIME_OUT);
+ scsi_finish_command(scmd);
+ }
+}
+
+/**
+ * scsi_abort_command - schedule a command abort
+ * @scmd: scmd to abort.
+ *
+ * We only need to abort commands after a command timeout
+ */
+static int
+scsi_abort_command(struct scsi_cmnd *scmd)
+{
+ struct scsi_device *sdev = scmd->device;
+ struct Scsi_Host *shost = sdev->host;
+ unsigned long flags;
+
+ if (scmd->eh_eflags & SCSI_EH_ABORT_SCHEDULED) {
+ /*
+ * Retry after abort failed, escalate to next level.
+ */
+ scmd->eh_eflags &= ~SCSI_EH_ABORT_SCHEDULED;
+ SCSI_LOG_ERROR_RECOVERY(3,
+ scmd_printk(KERN_INFO, scmd,
+ "previous abort failed\n"));
+ BUG_ON(delayed_work_pending(&scmd->abort_work));
+ return FAILED;
+ }
+
+ /*
+ * Do not try a command abort if
+ * SCSI EH has already started.
+ */
+ spin_lock_irqsave(shost->host_lock, flags);
+ if (scsi_host_in_recovery(shost)) {
+ spin_unlock_irqrestore(shost->host_lock, flags);
+ SCSI_LOG_ERROR_RECOVERY(3,
+ scmd_printk(KERN_INFO, scmd,
+ "not aborting, host in recovery\n"));
+ return FAILED;
+ }
+
+ if (shost->eh_deadline != -1 && !shost->last_reset)
+ shost->last_reset = jiffies;
+ spin_unlock_irqrestore(shost->host_lock, flags);
+
+ scmd->eh_eflags |= SCSI_EH_ABORT_SCHEDULED;
+ SCSI_LOG_ERROR_RECOVERY(3,
+ scmd_printk(KERN_INFO, scmd, "abort scheduled\n"));
+ queue_delayed_work(shost->tmf_work_q, &scmd->abort_work, HZ / 100);
+ return SUCCESS;
+}
+
+/**
+ * scsi_eh_scmd_add - add scsi cmd to error handling.
+ * @scmd: scmd to run eh on.
+ * @eh_flag: optional SCSI_EH flag.
+ *
+ * Return value:
+ * 0 on failure.
+ */
+int scsi_eh_scmd_add(struct scsi_cmnd *scmd, int eh_flag)
+{
+ struct Scsi_Host *shost = scmd->device->host;
+ unsigned long flags;
+ int ret = 0;
+
+ if (!shost->ehandler)
+ return 0;
+
+ spin_lock_irqsave(shost->host_lock, flags);
+ if (scsi_host_set_state(shost, SHOST_RECOVERY))
+ if (scsi_host_set_state(shost, SHOST_CANCEL_RECOVERY))
+ goto out_unlock;
+
+ if (shost->eh_deadline != -1 && !shost->last_reset)
+ shost->last_reset = jiffies;
+
+ ret = 1;
+ if (scmd->eh_eflags & SCSI_EH_ABORT_SCHEDULED)
+ eh_flag &= ~SCSI_EH_CANCEL_CMD;
+ scmd->eh_eflags |= eh_flag;
+ list_add_tail(&scmd->eh_entry, &shost->eh_cmd_q);
+ shost->host_failed++;
+ scsi_eh_wakeup(shost);
+ out_unlock:
+ spin_unlock_irqrestore(shost->host_lock, flags);
+ return ret;
+}
+
+/**
+ * scsi_times_out - Timeout function for normal scsi commands.
+ * @req: request that is timing out.
+ *
+ * Notes:
+ * We do not need to lock this. There is the potential for a race
+ * only in that the normal completion handling might run, but if the
+ * normal completion function determines that the timer has already
+ * fired, then it mustn't do anything.
+ */
+enum blk_eh_timer_return scsi_times_out(struct request *req)
+{
+ struct scsi_cmnd *scmd = req->special;
+ enum blk_eh_timer_return rtn = BLK_EH_NOT_HANDLED;
+ struct Scsi_Host *host = scmd->device->host;
+
+ trace_scsi_dispatch_cmd_timeout(scmd);
+ scsi_log_completion(scmd, TIMEOUT_ERROR);
+
+ if (host->eh_deadline != -1 && !host->last_reset)
+ host->last_reset = jiffies;
+
+ if (host->transportt->eh_timed_out)
+ rtn = host->transportt->eh_timed_out(scmd);
+ else if (host->hostt->eh_timed_out)
+ rtn = host->hostt->eh_timed_out(scmd);
+
+ if (rtn == BLK_EH_NOT_HANDLED) {
+ if (!host->hostt->no_async_abort &&
+ scsi_abort_command(scmd) == SUCCESS)
+ return BLK_EH_NOT_HANDLED;
+
+ set_host_byte(scmd, DID_TIME_OUT);
+ if (!scsi_eh_scmd_add(scmd, SCSI_EH_CANCEL_CMD))
+ rtn = BLK_EH_HANDLED;
+ }
+
+ return rtn;
+}
+
+/**
+ * scsi_block_when_processing_errors - Prevent cmds from being queued.
+ * @sdev: Device on which we are performing recovery.
+ *
+ * Description:
+ * We block until the host is out of error recovery, and then check to
+ * see whether the host or the device is offline.
+ *
+ * Return value:
+ * 0 when dev was taken offline by error recovery. 1 OK to proceed.
+ */
+int scsi_block_when_processing_errors(struct scsi_device *sdev)
+{
+ int online;
+
+ wait_event(sdev->host->host_wait, !scsi_host_in_recovery(sdev->host));
+
+ online = scsi_device_online(sdev);
+
+ SCSI_LOG_ERROR_RECOVERY(5, sdev_printk(KERN_INFO, sdev,
+ "%s: rtn: %d\n", __func__, online));
+
+ return online;
+}
+EXPORT_SYMBOL(scsi_block_when_processing_errors);
+
+#ifdef CONFIG_SCSI_LOGGING
+/**
+ * scsi_eh_prt_fail_stats - Log info on failures.
+ * @shost: scsi host being recovered.
+ * @work_q: Queue of scsi cmds to process.
+ */
+static inline void scsi_eh_prt_fail_stats(struct Scsi_Host *shost,
+ struct list_head *work_q)
+{
+ struct scsi_cmnd *scmd;
+ struct scsi_device *sdev;
+ int total_failures = 0;
+ int cmd_failed = 0;
+ int cmd_cancel = 0;
+ int devices_failed = 0;
+
+ shost_for_each_device(sdev, shost) {
+ list_for_each_entry(scmd, work_q, eh_entry) {
+ if (scmd->device == sdev) {
+ ++total_failures;
+ if (scmd->eh_eflags & SCSI_EH_CANCEL_CMD)
+ ++cmd_cancel;
+ else
+ ++cmd_failed;
+ }
+ }
+
+ if (cmd_cancel || cmd_failed) {
+ SCSI_LOG_ERROR_RECOVERY(3,
+ shost_printk(KERN_INFO, shost,
+ "%s: cmds failed: %d, cancel: %d\n",
+ __func__, cmd_failed,
+ cmd_cancel));
+ cmd_cancel = 0;
+ cmd_failed = 0;
+ ++devices_failed;
+ }
+ }
+
+ SCSI_LOG_ERROR_RECOVERY(2, shost_printk(KERN_INFO, shost,
+ "Total of %d commands on %d"
+ " devices require eh work\n",
+ total_failures, devices_failed));
+}
+#endif
+
+ /**
+ * scsi_report_lun_change - Set flag on all *other* devices on the same target
+ * to indicate that a UNIT ATTENTION is expected.
+ * @sdev: Device reporting the UNIT ATTENTION
+ */
+static void scsi_report_lun_change(struct scsi_device *sdev)
+{
+ sdev->sdev_target->expecting_lun_change = 1;
+}
+
+/**
+ * scsi_report_sense - Examine scsi sense information and log messages for
+ * certain conditions, also issue uevents for some of them.
+ * @sdev: Device reporting the sense code
+ * @sshdr: sshdr to be examined
+ */
+static void scsi_report_sense(struct scsi_device *sdev,
+ struct scsi_sense_hdr *sshdr)
+{
+ enum scsi_device_event evt_type = SDEV_EVT_MAXBITS; /* i.e. none */
+
+ if (sshdr->sense_key == UNIT_ATTENTION) {
+ if (sshdr->asc == 0x3f && sshdr->ascq == 0x03) {
+ evt_type = SDEV_EVT_INQUIRY_CHANGE_REPORTED;
+ sdev_printk(KERN_WARNING, sdev,
+ "Inquiry data has changed");
+ } else if (sshdr->asc == 0x3f && sshdr->ascq == 0x0e) {
+ evt_type = SDEV_EVT_LUN_CHANGE_REPORTED;
+ scsi_report_lun_change(sdev);
+ sdev_printk(KERN_WARNING, sdev,
+ "Warning! Received an indication that the "
+ "LUN assignments on this target have "
+ "changed. The Linux SCSI layer does not "
+ "automatically remap LUN assignments.\n");
+ } else if (sshdr->asc == 0x3f)
+ sdev_printk(KERN_WARNING, sdev,
+ "Warning! Received an indication that the "
+ "operating parameters on this target have "
+ "changed. The Linux SCSI layer does not "
+ "automatically adjust these parameters.\n");
+
+ if (sshdr->asc == 0x38 && sshdr->ascq == 0x07) {
+ evt_type = SDEV_EVT_SOFT_THRESHOLD_REACHED_REPORTED;
+ sdev_printk(KERN_WARNING, sdev,
+ "Warning! Received an indication that the "
+ "LUN reached a thin provisioning soft "
+ "threshold.\n");
+ }
+
+ if (sshdr->asc == 0x2a && sshdr->ascq == 0x01) {
+ evt_type = SDEV_EVT_MODE_PARAMETER_CHANGE_REPORTED;
+ sdev_printk(KERN_WARNING, sdev,
+ "Mode parameters changed");
+ } else if (sshdr->asc == 0x2a && sshdr->ascq == 0x09) {
+ evt_type = SDEV_EVT_CAPACITY_CHANGE_REPORTED;
+ sdev_printk(KERN_WARNING, sdev,
+ "Capacity data has changed");
+ } else if (sshdr->asc == 0x2a)
+ sdev_printk(KERN_WARNING, sdev,
+ "Parameters changed");
+ }
+
+ if (evt_type != SDEV_EVT_MAXBITS) {
+ set_bit(evt_type, sdev->pending_events);
+ schedule_work(&sdev->event_work);
+ }
+}
+
+/**
+ * scsi_check_sense - Examine scsi cmd sense
+ * @scmd: Cmd to have sense checked.
+ *
+ * Return value:
+ * SUCCESS or FAILED or NEEDS_RETRY or ADD_TO_MLQUEUE
+ *
+ * Notes:
+ * When a deferred error is detected the current command has
+ * not been executed and needs retrying.
+ */
+static int scsi_check_sense(struct scsi_cmnd *scmd)
+{
+ struct scsi_device *sdev = scmd->device;
+ struct scsi_sense_hdr sshdr;
+
+ if (! scsi_command_normalize_sense(scmd, &sshdr))
+ return FAILED; /* no valid sense data */
+
+ scsi_report_sense(sdev, &sshdr);
+
+ if (scsi_sense_is_deferred(&sshdr))
+ return NEEDS_RETRY;
+
+ if (sdev->scsi_dh_data && sdev->scsi_dh_data->scsi_dh &&
+ sdev->scsi_dh_data->scsi_dh->check_sense) {
+ int rc;
+
+ rc = sdev->scsi_dh_data->scsi_dh->check_sense(sdev, &sshdr);
+ if (rc != SCSI_RETURN_NOT_HANDLED)
+ return rc;
+ /* handler does not care. Drop down to default handling */
+ }
+
+ if (scmd->cmnd[0] == TEST_UNIT_READY && scmd->scsi_done != scsi_eh_done)
+ /*
+ * nasty: for mid-layer issued TURs, we need to return the
+ * actual sense data without any recovery attempt. For eh
+ * issued ones, we need to try to recover and interpret
+ */
+ return SUCCESS;
+
+ /*
+ * Previous logic looked for FILEMARK, EOM or ILI which are
+ * mainly associated with tapes and returned SUCCESS.
+ */
+ if (sshdr.response_code == 0x70) {
+ /* fixed format */
+ if (scmd->sense_buffer[2] & 0xe0)
+ return SUCCESS;
+ } else {
+ /*
+ * descriptor format: look for "stream commands sense data
+ * descriptor" (see SSC-3). Assume single sense data
+ * descriptor. Ignore ILI from SBC-2 READ LONG and WRITE LONG.
+ */
+ if ((sshdr.additional_length > 3) &&
+ (scmd->sense_buffer[8] == 0x4) &&
+ (scmd->sense_buffer[11] & 0xe0))
+ return SUCCESS;
+ }
+
+ switch (sshdr.sense_key) {
+ case NO_SENSE:
+ return SUCCESS;
+ case RECOVERED_ERROR:
+ return /* soft_error */ SUCCESS;
+
+ case ABORTED_COMMAND:
+ if (sshdr.asc == 0x10) /* DIF */
+ return SUCCESS;
+
+ return NEEDS_RETRY;
+ case NOT_READY:
+ case UNIT_ATTENTION:
+ /*
+ * if we are expecting a cc/ua because of a bus reset that we
+ * performed, treat this just as a retry. otherwise this is
+ * information that we should pass up to the upper-level driver
+ * so that we can deal with it there.
+ */
+ if (scmd->device->expecting_cc_ua) {
+ /*
+ * Because some device does not queue unit
+ * attentions correctly, we carefully check
+ * additional sense code and qualifier so as
+ * not to squash media change unit attention.
+ */
+ if (sshdr.asc != 0x28 || sshdr.ascq != 0x00) {
+ scmd->device->expecting_cc_ua = 0;
+ return NEEDS_RETRY;
+ }
+ }
+ /*
+ * we might also expect a cc/ua if another LUN on the target
+ * reported a UA with an ASC/ASCQ of 3F 0E -
+ * REPORTED LUNS DATA HAS CHANGED.
+ */
+ if (scmd->device->sdev_target->expecting_lun_change &&
+ sshdr.asc == 0x3f && sshdr.ascq == 0x0e)
+ return NEEDS_RETRY;
+ /*
+ * if the device is in the process of becoming ready, we
+ * should retry.
+ */
+ if ((sshdr.asc == 0x04) && (sshdr.ascq == 0x01))
+ return NEEDS_RETRY;
+ /*
+ * if the device is not started, we need to wake
+ * the error handler to start the motor
+ */
+ if (scmd->device->allow_restart &&
+ (sshdr.asc == 0x04) && (sshdr.ascq == 0x02))
+ return FAILED;
+ /*
+ * Pass the UA upwards for a determination in the completion
+ * functions.
+ */
+ return SUCCESS;
+
+ /* these are not supported */
+ case DATA_PROTECT:
+ if (sshdr.asc == 0x27 && sshdr.ascq == 0x07) {
+ /* Thin provisioning hard threshold reached */
+ set_host_byte(scmd, DID_ALLOC_FAILURE);
+ return SUCCESS;
+ }
+ case COPY_ABORTED:
+ case VOLUME_OVERFLOW:
+ case MISCOMPARE:
+ case BLANK_CHECK:
+ set_host_byte(scmd, DID_TARGET_FAILURE);
+ return SUCCESS;
+
+ case MEDIUM_ERROR:
+ if (sshdr.asc == 0x11 || /* UNRECOVERED READ ERR */
+ sshdr.asc == 0x13 || /* AMNF DATA FIELD */
+ sshdr.asc == 0x14) { /* RECORD NOT FOUND */
+ set_host_byte(scmd, DID_MEDIUM_ERROR);
+ return SUCCESS;
+ }
+ return NEEDS_RETRY;
+
+ case HARDWARE_ERROR:
+ if (scmd->device->retry_hwerror)
+ return ADD_TO_MLQUEUE;
+ else
+ set_host_byte(scmd, DID_TARGET_FAILURE);
+
+ case ILLEGAL_REQUEST:
+ if (sshdr.asc == 0x20 || /* Invalid command operation code */
+ sshdr.asc == 0x21 || /* Logical block address out of range */
+ sshdr.asc == 0x24 || /* Invalid field in cdb */
+ sshdr.asc == 0x26) { /* Parameter value invalid */
+ set_host_byte(scmd, DID_TARGET_FAILURE);
+ }
+ return SUCCESS;
+
+ default:
+ return SUCCESS;
+ }
+}
+
+static void scsi_handle_queue_ramp_up(struct scsi_device *sdev)
+{
+ struct scsi_host_template *sht = sdev->host->hostt;
+ struct scsi_device *tmp_sdev;
+
+ if (!sht->track_queue_depth ||
+ sdev->queue_depth >= sdev->max_queue_depth)
+ return;
+
+ if (time_before(jiffies,
+ sdev->last_queue_ramp_up + sdev->queue_ramp_up_period))
+ return;
+
+ if (time_before(jiffies,
+ sdev->last_queue_full_time + sdev->queue_ramp_up_period))
+ return;
+
+ /*
+ * Walk all devices of a target and do
+ * ramp up on them.
+ */
+ shost_for_each_device(tmp_sdev, sdev->host) {
+ if (tmp_sdev->channel != sdev->channel ||
+ tmp_sdev->id != sdev->id ||
+ tmp_sdev->queue_depth == sdev->max_queue_depth)
+ continue;
+
+ scsi_change_queue_depth(tmp_sdev, tmp_sdev->queue_depth + 1);
+ sdev->last_queue_ramp_up = jiffies;
+ }
+}
+
+static void scsi_handle_queue_full(struct scsi_device *sdev)
+{
+ struct scsi_host_template *sht = sdev->host->hostt;
+ struct scsi_device *tmp_sdev;
+
+ if (!sht->track_queue_depth)
+ return;
+
+ shost_for_each_device(tmp_sdev, sdev->host) {
+ if (tmp_sdev->channel != sdev->channel ||
+ tmp_sdev->id != sdev->id)
+ continue;
+ /*
+ * We do not know the number of commands that were at
+ * the device when we got the queue full so we start
+ * from the highest possible value and work our way down.
+ */
+ scsi_track_queue_full(tmp_sdev, tmp_sdev->queue_depth - 1);
+ }
+}
+
+/**
+ * scsi_eh_completed_normally - Disposition a eh cmd on return from LLD.
+ * @scmd: SCSI cmd to examine.
+ *
+ * Notes:
+ * This is *only* called when we are examining the status of commands
+ * queued during error recovery. the main difference here is that we
+ * don't allow for the possibility of retries here, and we are a lot
+ * more restrictive about what we consider acceptable.
+ */
+static int scsi_eh_completed_normally(struct scsi_cmnd *scmd)
+{
+ /*
+ * first check the host byte, to see if there is anything in there
+ * that would indicate what we need to do.
+ */
+ if (host_byte(scmd->result) == DID_RESET) {
+ /*
+ * rats. we are already in the error handler, so we now
+ * get to try and figure out what to do next. if the sense
+ * is valid, we have a pretty good idea of what to do.
+ * if not, we mark it as FAILED.
+ */
+ return scsi_check_sense(scmd);
+ }
+ if (host_byte(scmd->result) != DID_OK)
+ return FAILED;
+
+ /*
+ * next, check the message byte.
+ */
+ if (msg_byte(scmd->result) != COMMAND_COMPLETE)
+ return FAILED;
+
+ /*
+ * now, check the status byte to see if this indicates
+ * anything special.
+ */
+ switch (status_byte(scmd->result)) {
+ case GOOD:
+ scsi_handle_queue_ramp_up(scmd->device);
+ case COMMAND_TERMINATED:
+ return SUCCESS;
+ case CHECK_CONDITION:
+ return scsi_check_sense(scmd);
+ case CONDITION_GOOD:
+ case INTERMEDIATE_GOOD:
+ case INTERMEDIATE_C_GOOD:
+ /*
+ * who knows? FIXME(eric)
+ */
+ return SUCCESS;
+ case RESERVATION_CONFLICT:
+ if (scmd->cmnd[0] == TEST_UNIT_READY)
+ /* it is a success, we probed the device and
+ * found it */
+ return SUCCESS;
+ /* otherwise, we failed to send the command */
+ return FAILED;
+ case QUEUE_FULL:
+ scsi_handle_queue_full(scmd->device);
+ /* fall through */
+ case BUSY:
+ return NEEDS_RETRY;
+ default:
+ return FAILED;
+ }
+ return FAILED;
+}
+
+/**
+ * scsi_eh_done - Completion function for error handling.
+ * @scmd: Cmd that is done.
+ */
+static void scsi_eh_done(struct scsi_cmnd *scmd)
+{
+ struct completion *eh_action;
+
+ SCSI_LOG_ERROR_RECOVERY(3, scmd_printk(KERN_INFO, scmd,
+ "%s result: %x\n", __func__, scmd->result));
+
+ eh_action = scmd->device->host->eh_action;
+ if (eh_action)
+ complete(eh_action);
+}
+
+/**
+ * scsi_try_host_reset - ask host adapter to reset itself
+ * @scmd: SCSI cmd to send host reset.
+ */
+static int scsi_try_host_reset(struct scsi_cmnd *scmd)
+{
+ unsigned long flags;
+ int rtn;
+ struct Scsi_Host *host = scmd->device->host;
+ struct scsi_host_template *hostt = host->hostt;
+
+ SCSI_LOG_ERROR_RECOVERY(3,
+ shost_printk(KERN_INFO, host, "Snd Host RST\n"));
+
+ if (!hostt->eh_host_reset_handler)
+ return FAILED;
+
+ rtn = hostt->eh_host_reset_handler(scmd);
+
+ if (rtn == SUCCESS) {
+ if (!hostt->skip_settle_delay)
+ ssleep(HOST_RESET_SETTLE_TIME);
+ spin_lock_irqsave(host->host_lock, flags);
+ scsi_report_bus_reset(host, scmd_channel(scmd));
+ spin_unlock_irqrestore(host->host_lock, flags);
+ }
+
+ return rtn;
+}
+
+/**
+ * scsi_try_bus_reset - ask host to perform a bus reset
+ * @scmd: SCSI cmd to send bus reset.
+ */
+static int scsi_try_bus_reset(struct scsi_cmnd *scmd)
+{
+ unsigned long flags;
+ int rtn;
+ struct Scsi_Host *host = scmd->device->host;
+ struct scsi_host_template *hostt = host->hostt;
+
+ SCSI_LOG_ERROR_RECOVERY(3, scmd_printk(KERN_INFO, scmd,
+ "%s: Snd Bus RST\n", __func__));
+
+ if (!hostt->eh_bus_reset_handler)
+ return FAILED;
+
+ rtn = hostt->eh_bus_reset_handler(scmd);
+
+ if (rtn == SUCCESS) {
+ if (!hostt->skip_settle_delay)
+ ssleep(BUS_RESET_SETTLE_TIME);
+ spin_lock_irqsave(host->host_lock, flags);
+ scsi_report_bus_reset(host, scmd_channel(scmd));
+ spin_unlock_irqrestore(host->host_lock, flags);
+ }
+
+ return rtn;
+}
+
+static void __scsi_report_device_reset(struct scsi_device *sdev, void *data)
+{
+ sdev->was_reset = 1;
+ sdev->expecting_cc_ua = 1;
+}
+
+/**
+ * scsi_try_target_reset - Ask host to perform a target reset
+ * @scmd: SCSI cmd used to send a target reset
+ *
+ * Notes:
+ * There is no timeout for this operation. if this operation is
+ * unreliable for a given host, then the host itself needs to put a
+ * timer on it, and set the host back to a consistent state prior to
+ * returning.
+ */
+static int scsi_try_target_reset(struct scsi_cmnd *scmd)
+{
+ unsigned long flags;
+ int rtn;
+ struct Scsi_Host *host = scmd->device->host;
+ struct scsi_host_template *hostt = host->hostt;
+
+ if (!hostt->eh_target_reset_handler)
+ return FAILED;
+
+ rtn = hostt->eh_target_reset_handler(scmd);
+ if (rtn == SUCCESS) {
+ spin_lock_irqsave(host->host_lock, flags);
+ __starget_for_each_device(scsi_target(scmd->device), NULL,
+ __scsi_report_device_reset);
+ spin_unlock_irqrestore(host->host_lock, flags);
+ }
+
+ return rtn;
+}
+
+/**
+ * scsi_try_bus_device_reset - Ask host to perform a BDR on a dev
+ * @scmd: SCSI cmd used to send BDR
+ *
+ * Notes:
+ * There is no timeout for this operation. if this operation is
+ * unreliable for a given host, then the host itself needs to put a
+ * timer on it, and set the host back to a consistent state prior to
+ * returning.
+ */
+static int scsi_try_bus_device_reset(struct scsi_cmnd *scmd)
+{
+ int rtn;
+ struct scsi_host_template *hostt = scmd->device->host->hostt;
+
+ if (!hostt->eh_device_reset_handler)
+ return FAILED;
+
+ rtn = hostt->eh_device_reset_handler(scmd);
+ if (rtn == SUCCESS)
+ __scsi_report_device_reset(scmd->device, NULL);
+ return rtn;
+}
+
+/**
+ * scsi_try_to_abort_cmd - Ask host to abort a SCSI command
+ * @hostt: SCSI driver host template
+ * @scmd: SCSI cmd used to send a target reset
+ *
+ * Return value:
+ * SUCCESS, FAILED, or FAST_IO_FAIL
+ *
+ * Notes:
+ * SUCCESS does not necessarily indicate that the command
+ * has been aborted; it only indicates that the LLDDs
+ * has cleared all references to that command.
+ * LLDDs should return FAILED only if an abort was required
+ * but could not be executed. LLDDs should return FAST_IO_FAIL
+ * if the device is temporarily unavailable (eg due to a
+ * link down on FibreChannel)
+ */
+static int scsi_try_to_abort_cmd(struct scsi_host_template *hostt,
+ struct scsi_cmnd *scmd)
+{
+ if (!hostt->eh_abort_handler)
+ return FAILED;
+
+ return hostt->eh_abort_handler(scmd);
+}
+
+static void scsi_abort_eh_cmnd(struct scsi_cmnd *scmd)
+{
+ if (scsi_try_to_abort_cmd(scmd->device->host->hostt, scmd) != SUCCESS)
+ if (scsi_try_bus_device_reset(scmd) != SUCCESS)
+ if (scsi_try_target_reset(scmd) != SUCCESS)
+ if (scsi_try_bus_reset(scmd) != SUCCESS)
+ scsi_try_host_reset(scmd);
+}
+
+/**
+ * scsi_eh_prep_cmnd - Save a scsi command info as part of error recovery
+ * @scmd: SCSI command structure to hijack
+ * @ses: structure to save restore information
+ * @cmnd: CDB to send. Can be NULL if no new cmnd is needed
+ * @cmnd_size: size in bytes of @cmnd (must be <= BLK_MAX_CDB)
+ * @sense_bytes: size of sense data to copy. or 0 (if != 0 @cmnd is ignored)
+ *
+ * This function is used to save a scsi command information before re-execution
+ * as part of the error recovery process. If @sense_bytes is 0 the command
+ * sent must be one that does not transfer any data. If @sense_bytes != 0
+ * @cmnd is ignored and this functions sets up a REQUEST_SENSE command
+ * and cmnd buffers to read @sense_bytes into @scmd->sense_buffer.
+ */
+void scsi_eh_prep_cmnd(struct scsi_cmnd *scmd, struct scsi_eh_save *ses,
+ unsigned char *cmnd, int cmnd_size, unsigned sense_bytes)
+{
+ struct scsi_device *sdev = scmd->device;
+
+ /*
+ * We need saved copies of a number of fields - this is because
+ * error handling may need to overwrite these with different values
+ * to run different commands, and once error handling is complete,
+ * we will need to restore these values prior to running the actual
+ * command.
+ */
+ ses->cmd_len = scmd->cmd_len;
+ ses->cmnd = scmd->cmnd;
+ ses->data_direction = scmd->sc_data_direction;
+ ses->sdb = scmd->sdb;
+ ses->next_rq = scmd->request->next_rq;
+ ses->result = scmd->result;
+ ses->underflow = scmd->underflow;
+ ses->prot_op = scmd->prot_op;
+
+ scmd->prot_op = SCSI_PROT_NORMAL;
+ scmd->eh_eflags = 0;
+ scmd->cmnd = ses->eh_cmnd;
+ memset(scmd->cmnd, 0, BLK_MAX_CDB);
+ memset(&scmd->sdb, 0, sizeof(scmd->sdb));
+ scmd->request->next_rq = NULL;
+ scmd->result = 0;
+
+ if (sense_bytes) {
+ scmd->sdb.length = min_t(unsigned, SCSI_SENSE_BUFFERSIZE,
+ sense_bytes);
+ sg_init_one(&ses->sense_sgl, scmd->sense_buffer,
+ scmd->sdb.length);
+ scmd->sdb.table.sgl = &ses->sense_sgl;
+ scmd->sc_data_direction = DMA_FROM_DEVICE;
+ scmd->sdb.table.nents = 1;
+ scmd->cmnd[0] = REQUEST_SENSE;
+ scmd->cmnd[4] = scmd->sdb.length;
+ scmd->cmd_len = COMMAND_SIZE(scmd->cmnd[0]);
+ } else {
+ scmd->sc_data_direction = DMA_NONE;
+ if (cmnd) {
+ BUG_ON(cmnd_size > BLK_MAX_CDB);
+ memcpy(scmd->cmnd, cmnd, cmnd_size);
+ scmd->cmd_len = COMMAND_SIZE(scmd->cmnd[0]);
+ }
+ }
+
+ scmd->underflow = 0;
+
+ if (sdev->scsi_level <= SCSI_2 && sdev->scsi_level != SCSI_UNKNOWN)
+ scmd->cmnd[1] = (scmd->cmnd[1] & 0x1f) |
+ (sdev->lun << 5 & 0xe0);
+
+ /*
+ * Zero the sense buffer. The scsi spec mandates that any
+ * untransferred sense data should be interpreted as being zero.
+ */
+ memset(scmd->sense_buffer, 0, SCSI_SENSE_BUFFERSIZE);
+}
+EXPORT_SYMBOL(scsi_eh_prep_cmnd);
+
+/**
+ * scsi_eh_restore_cmnd - Restore a scsi command info as part of error recovery
+ * @scmd: SCSI command structure to restore
+ * @ses: saved information from a coresponding call to scsi_eh_prep_cmnd
+ *
+ * Undo any damage done by above scsi_eh_prep_cmnd().
+ */
+void scsi_eh_restore_cmnd(struct scsi_cmnd* scmd, struct scsi_eh_save *ses)
+{
+ /*
+ * Restore original data
+ */
+ scmd->cmd_len = ses->cmd_len;
+ scmd->cmnd = ses->cmnd;
+ scmd->sc_data_direction = ses->data_direction;
+ scmd->sdb = ses->sdb;
+ scmd->request->next_rq = ses->next_rq;
+ scmd->result = ses->result;
+ scmd->underflow = ses->underflow;
+ scmd->prot_op = ses->prot_op;
+}
+EXPORT_SYMBOL(scsi_eh_restore_cmnd);
+
+/**
+ * scsi_send_eh_cmnd - submit a scsi command as part of error recovery
+ * @scmd: SCSI command structure to hijack
+ * @cmnd: CDB to send
+ * @cmnd_size: size in bytes of @cmnd
+ * @timeout: timeout for this request
+ * @sense_bytes: size of sense data to copy or 0
+ *
+ * This function is used to send a scsi command down to a target device
+ * as part of the error recovery process. See also scsi_eh_prep_cmnd() above.
+ *
+ * Return value:
+ * SUCCESS or FAILED or NEEDS_RETRY
+ */
+static int scsi_send_eh_cmnd(struct scsi_cmnd *scmd, unsigned char *cmnd,
+ int cmnd_size, int timeout, unsigned sense_bytes)
+{
+ struct scsi_device *sdev = scmd->device;
+ struct Scsi_Host *shost = sdev->host;
+ DECLARE_COMPLETION_ONSTACK(done);
+ unsigned long timeleft = timeout;
+ struct scsi_eh_save ses;
+ const unsigned long stall_for = msecs_to_jiffies(100);
+ int rtn;
+
+retry:
+ scsi_eh_prep_cmnd(scmd, &ses, cmnd, cmnd_size, sense_bytes);
+ shost->eh_action = &done;
+
+ scsi_log_send(scmd);
+ scmd->scsi_done = scsi_eh_done;
+ rtn = shost->hostt->queuecommand(shost, scmd);
+ if (rtn) {
+ if (timeleft > stall_for) {
+ scsi_eh_restore_cmnd(scmd, &ses);
+ timeleft -= stall_for;
+ msleep(jiffies_to_msecs(stall_for));
+ goto retry;
+ }
+ /* signal not to enter either branch of the if () below */
+ timeleft = 0;
+ rtn = FAILED;
+ } else {
+ timeleft = wait_for_completion_timeout(&done, timeout);
+ rtn = SUCCESS;
+ }
+
+ shost->eh_action = NULL;
+
+ scsi_log_completion(scmd, rtn);
+
+ SCSI_LOG_ERROR_RECOVERY(3, scmd_printk(KERN_INFO, scmd,
+ "%s timeleft: %ld\n",
+ __func__, timeleft));
+
+ /*
+ * If there is time left scsi_eh_done got called, and we will examine
+ * the actual status codes to see whether the command actually did
+ * complete normally, else if we have a zero return and no time left,
+ * the command must still be pending, so abort it and return FAILED.
+ * If we never actually managed to issue the command, because
+ * ->queuecommand() kept returning non zero, use the rtn = FAILED
+ * value above (so don't execute either branch of the if)
+ */
+ if (timeleft) {
+ rtn = scsi_eh_completed_normally(scmd);
+ SCSI_LOG_ERROR_RECOVERY(3, scmd_printk(KERN_INFO, scmd,
+ "%s: scsi_eh_completed_normally %x\n", __func__, rtn));
+
+ switch (rtn) {
+ case SUCCESS:
+ case NEEDS_RETRY:
+ case FAILED:
+ break;
+ case ADD_TO_MLQUEUE:
+ rtn = NEEDS_RETRY;
+ break;
+ default:
+ rtn = FAILED;
+ break;
+ }
+ } else if (rtn != FAILED) {
+ scsi_abort_eh_cmnd(scmd);
+ rtn = FAILED;
+ }
+
+ scsi_eh_restore_cmnd(scmd, &ses);
+
+ return rtn;
+}
+
+/**
+ * scsi_request_sense - Request sense data from a particular target.
+ * @scmd: SCSI cmd for request sense.
+ *
+ * Notes:
+ * Some hosts automatically obtain this information, others require
+ * that we obtain it on our own. This function will *not* return until
+ * the command either times out, or it completes.
+ */
+static int scsi_request_sense(struct scsi_cmnd *scmd)
+{
+ return scsi_send_eh_cmnd(scmd, NULL, 0, scmd->device->eh_timeout, ~0);
+}
+
+static int scsi_eh_action(struct scsi_cmnd *scmd, int rtn)
+{
+ if (scmd->request->cmd_type != REQ_TYPE_BLOCK_PC) {
+ struct scsi_driver *sdrv = scsi_cmd_to_driver(scmd);
+ if (sdrv->eh_action)
+ rtn = sdrv->eh_action(scmd, rtn);
+ }
+ return rtn;
+}
+
+/**
+ * scsi_eh_finish_cmd - Handle a cmd that eh is finished with.
+ * @scmd: Original SCSI cmd that eh has finished.
+ * @done_q: Queue for processed commands.
+ *
+ * Notes:
+ * We don't want to use the normal command completion while we are are
+ * still handling errors - it may cause other commands to be queued,
+ * and that would disturb what we are doing. Thus we really want to
+ * keep a list of pending commands for final completion, and once we
+ * are ready to leave error handling we handle completion for real.
+ */
+void scsi_eh_finish_cmd(struct scsi_cmnd *scmd, struct list_head *done_q)
+{
+ scmd->device->host->host_failed--;
+ scmd->eh_eflags = 0;
+ list_move_tail(&scmd->eh_entry, done_q);
+}
+EXPORT_SYMBOL(scsi_eh_finish_cmd);
+
+/**
+ * scsi_eh_get_sense - Get device sense data.
+ * @work_q: Queue of commands to process.
+ * @done_q: Queue of processed commands.
+ *
+ * Description:
+ * See if we need to request sense information. if so, then get it
+ * now, so we have a better idea of what to do.
+ *
+ * Notes:
+ * This has the unfortunate side effect that if a shost adapter does
+ * not automatically request sense information, we end up shutting
+ * it down before we request it.
+ *
+ * All drivers should request sense information internally these days,
+ * so for now all I have to say is tough noogies if you end up in here.
+ *
+ * XXX: Long term this code should go away, but that needs an audit of
+ * all LLDDs first.
+ */
+int scsi_eh_get_sense(struct list_head *work_q,
+ struct list_head *done_q)
+{
+ struct scsi_cmnd *scmd, *next;
+ struct Scsi_Host *shost;
+ int rtn;
+
+ list_for_each_entry_safe(scmd, next, work_q, eh_entry) {
+ if ((scmd->eh_eflags & SCSI_EH_CANCEL_CMD) ||
+ SCSI_SENSE_VALID(scmd))
+ continue;
+
+ shost = scmd->device->host;
+ if (scsi_host_eh_past_deadline(shost)) {
+ SCSI_LOG_ERROR_RECOVERY(3,
+ scmd_printk(KERN_INFO, scmd,
+ "%s: skip request sense, past eh deadline\n",
+ current->comm));
+ break;
+ }
+ if (status_byte(scmd->result) != CHECK_CONDITION)
+ /*
+ * don't request sense if there's no check condition
+ * status because the error we're processing isn't one
+ * that has a sense code (and some devices get
+ * confused by sense requests out of the blue)
+ */
+ continue;
+
+ SCSI_LOG_ERROR_RECOVERY(2, scmd_printk(KERN_INFO, scmd,
+ "%s: requesting sense\n",
+ current->comm));
+ rtn = scsi_request_sense(scmd);
+ if (rtn != SUCCESS)
+ continue;
+
+ SCSI_LOG_ERROR_RECOVERY(3, scmd_printk(KERN_INFO, scmd,
+ "sense requested, result %x\n", scmd->result));
+ SCSI_LOG_ERROR_RECOVERY(3, scsi_print_sense(scmd));
+
+ rtn = scsi_decide_disposition(scmd);
+
+ /*
+ * if the result was normal, then just pass it along to the
+ * upper level.
+ */
+ if (rtn == SUCCESS)
+ /* we don't want this command reissued, just
+ * finished with the sense data, so set
+ * retries to the max allowed to ensure it
+ * won't get reissued */
+ scmd->retries = scmd->allowed;
+ else if (rtn != NEEDS_RETRY)
+ continue;
+
+ scsi_eh_finish_cmd(scmd, done_q);
+ }
+
+ return list_empty(work_q);
+}
+EXPORT_SYMBOL_GPL(scsi_eh_get_sense);
+
+/**
+ * scsi_eh_tur - Send TUR to device.
+ * @scmd: &scsi_cmnd to send TUR
+ *
+ * Return value:
+ * 0 - Device is ready. 1 - Device NOT ready.
+ */
+static int scsi_eh_tur(struct scsi_cmnd *scmd)
+{
+ static unsigned char tur_command[6] = {TEST_UNIT_READY, 0, 0, 0, 0, 0};
+ int retry_cnt = 1, rtn;
+
+retry_tur:
+ rtn = scsi_send_eh_cmnd(scmd, tur_command, 6,
+ scmd->device->eh_timeout, 0);
+
+ SCSI_LOG_ERROR_RECOVERY(3, scmd_printk(KERN_INFO, scmd,
+ "%s return: %x\n", __func__, rtn));
+
+ switch (rtn) {
+ case NEEDS_RETRY:
+ if (retry_cnt--)
+ goto retry_tur;
+ /*FALLTHRU*/
+ case SUCCESS:
+ return 0;
+ default:
+ return 1;
+ }
+}
+
+/**
+ * scsi_eh_test_devices - check if devices are responding from error recovery.
+ * @cmd_list: scsi commands in error recovery.
+ * @work_q: queue for commands which still need more error recovery
+ * @done_q: queue for commands which are finished
+ * @try_stu: boolean on if a STU command should be tried in addition to TUR.
+ *
+ * Decription:
+ * Tests if devices are in a working state. Commands to devices now in
+ * a working state are sent to the done_q while commands to devices which
+ * are still failing to respond are returned to the work_q for more
+ * processing.
+ **/
+static int scsi_eh_test_devices(struct list_head *cmd_list,
+ struct list_head *work_q,
+ struct list_head *done_q, int try_stu)
+{
+ struct scsi_cmnd *scmd, *next;
+ struct scsi_device *sdev;
+ int finish_cmds;
+
+ while (!list_empty(cmd_list)) {
+ scmd = list_entry(cmd_list->next, struct scsi_cmnd, eh_entry);
+ sdev = scmd->device;
+
+ if (!try_stu) {
+ if (scsi_host_eh_past_deadline(sdev->host)) {
+ /* Push items back onto work_q */
+ list_splice_init(cmd_list, work_q);
+ SCSI_LOG_ERROR_RECOVERY(3,
+ sdev_printk(KERN_INFO, sdev,
+ "%s: skip test device, past eh deadline",
+ current->comm));
+ break;
+ }
+ }
+
+ finish_cmds = !scsi_device_online(scmd->device) ||
+ (try_stu && !scsi_eh_try_stu(scmd) &&
+ !scsi_eh_tur(scmd)) ||
+ !scsi_eh_tur(scmd);
+
+ list_for_each_entry_safe(scmd, next, cmd_list, eh_entry)
+ if (scmd->device == sdev) {
+ if (finish_cmds &&
+ (try_stu ||
+ scsi_eh_action(scmd, SUCCESS) == SUCCESS))
+ scsi_eh_finish_cmd(scmd, done_q);
+ else
+ list_move_tail(&scmd->eh_entry, work_q);
+ }
+ }
+ return list_empty(work_q);
+}
+
+
+/**
+ * scsi_eh_abort_cmds - abort pending commands.
+ * @work_q: &list_head for pending commands.
+ * @done_q: &list_head for processed commands.
+ *
+ * Decription:
+ * Try and see whether or not it makes sense to try and abort the
+ * running command. This only works out to be the case if we have one
+ * command that has timed out. If the command simply failed, it makes
+ * no sense to try and abort the command, since as far as the shost
+ * adapter is concerned, it isn't running.
+ */
+static int scsi_eh_abort_cmds(struct list_head *work_q,
+ struct list_head *done_q)
+{
+ struct scsi_cmnd *scmd, *next;
+ LIST_HEAD(check_list);
+ int rtn;
+ struct Scsi_Host *shost;
+
+ list_for_each_entry_safe(scmd, next, work_q, eh_entry) {
+ if (!(scmd->eh_eflags & SCSI_EH_CANCEL_CMD))
+ continue;
+ shost = scmd->device->host;
+ if (scsi_host_eh_past_deadline(shost)) {
+ list_splice_init(&check_list, work_q);
+ SCSI_LOG_ERROR_RECOVERY(3,
+ scmd_printk(KERN_INFO, scmd,
+ "%s: skip aborting cmd, past eh deadline\n",
+ current->comm));
+ return list_empty(work_q);
+ }
+ SCSI_LOG_ERROR_RECOVERY(3,
+ scmd_printk(KERN_INFO, scmd,
+ "%s: aborting cmd\n", current->comm));
+ rtn = scsi_try_to_abort_cmd(shost->hostt, scmd);
+ if (rtn == FAILED) {
+ SCSI_LOG_ERROR_RECOVERY(3,
+ scmd_printk(KERN_INFO, scmd,
+ "%s: aborting cmd failed\n",
+ current->comm));
+ list_splice_init(&check_list, work_q);
+ return list_empty(work_q);
+ }
+ scmd->eh_eflags &= ~SCSI_EH_CANCEL_CMD;
+ if (rtn == FAST_IO_FAIL)
+ scsi_eh_finish_cmd(scmd, done_q);
+ else
+ list_move_tail(&scmd->eh_entry, &check_list);
+ }
+
+ return scsi_eh_test_devices(&check_list, work_q, done_q, 0);
+}
+
+/**
+ * scsi_eh_try_stu - Send START_UNIT to device.
+ * @scmd: &scsi_cmnd to send START_UNIT
+ *
+ * Return value:
+ * 0 - Device is ready. 1 - Device NOT ready.
+ */
+static int scsi_eh_try_stu(struct scsi_cmnd *scmd)
+{
+ static unsigned char stu_command[6] = {START_STOP, 0, 0, 0, 1, 0};
+
+ if (scmd->device->allow_restart) {
+ int i, rtn = NEEDS_RETRY;
+
+ for (i = 0; rtn == NEEDS_RETRY && i < 2; i++)
+ rtn = scsi_send_eh_cmnd(scmd, stu_command, 6, scmd->device->request_queue->rq_timeout, 0);
+
+ if (rtn == SUCCESS)
+ return 0;
+ }
+
+ return 1;
+}
+
+ /**
+ * scsi_eh_stu - send START_UNIT if needed
+ * @shost: &scsi host being recovered.
+ * @work_q: &list_head for pending commands.
+ * @done_q: &list_head for processed commands.
+ *
+ * Notes:
+ * If commands are failing due to not ready, initializing command required,
+ * try revalidating the device, which will end up sending a start unit.
+ */
+static int scsi_eh_stu(struct Scsi_Host *shost,
+ struct list_head *work_q,
+ struct list_head *done_q)
+{
+ struct scsi_cmnd *scmd, *stu_scmd, *next;
+ struct scsi_device *sdev;
+
+ shost_for_each_device(sdev, shost) {
+ if (scsi_host_eh_past_deadline(shost)) {
+ SCSI_LOG_ERROR_RECOVERY(3,
+ sdev_printk(KERN_INFO, sdev,
+ "%s: skip START_UNIT, past eh deadline\n",
+ current->comm));
+ break;
+ }
+ stu_scmd = NULL;
+ list_for_each_entry(scmd, work_q, eh_entry)
+ if (scmd->device == sdev && SCSI_SENSE_VALID(scmd) &&
+ scsi_check_sense(scmd) == FAILED ) {
+ stu_scmd = scmd;
+ break;
+ }
+
+ if (!stu_scmd)
+ continue;
+
+ SCSI_LOG_ERROR_RECOVERY(3,
+ sdev_printk(KERN_INFO, sdev,
+ "%s: Sending START_UNIT\n",
+ current->comm));
+
+ if (!scsi_eh_try_stu(stu_scmd)) {
+ if (!scsi_device_online(sdev) ||
+ !scsi_eh_tur(stu_scmd)) {
+ list_for_each_entry_safe(scmd, next,
+ work_q, eh_entry) {
+ if (scmd->device == sdev &&
+ scsi_eh_action(scmd, SUCCESS) == SUCCESS)
+ scsi_eh_finish_cmd(scmd, done_q);
+ }
+ }
+ } else {
+ SCSI_LOG_ERROR_RECOVERY(3,
+ sdev_printk(KERN_INFO, sdev,
+ "%s: START_UNIT failed\n",
+ current->comm));
+ }
+ }
+
+ return list_empty(work_q);
+}
+
+
+/**
+ * scsi_eh_bus_device_reset - send bdr if needed
+ * @shost: scsi host being recovered.
+ * @work_q: &list_head for pending commands.
+ * @done_q: &list_head for processed commands.
+ *
+ * Notes:
+ * Try a bus device reset. Still, look to see whether we have multiple
+ * devices that are jammed or not - if we have multiple devices, it
+ * makes no sense to try bus_device_reset - we really would need to try
+ * a bus_reset instead.
+ */
+static int scsi_eh_bus_device_reset(struct Scsi_Host *shost,
+ struct list_head *work_q,
+ struct list_head *done_q)
+{
+ struct scsi_cmnd *scmd, *bdr_scmd, *next;
+ struct scsi_device *sdev;
+ int rtn;
+
+ shost_for_each_device(sdev, shost) {
+ if (scsi_host_eh_past_deadline(shost)) {
+ SCSI_LOG_ERROR_RECOVERY(3,
+ sdev_printk(KERN_INFO, sdev,
+ "%s: skip BDR, past eh deadline\n",
+ current->comm));
+ break;
+ }
+ bdr_scmd = NULL;
+ list_for_each_entry(scmd, work_q, eh_entry)
+ if (scmd->device == sdev) {
+ bdr_scmd = scmd;
+ break;
+ }
+
+ if (!bdr_scmd)
+ continue;
+
+ SCSI_LOG_ERROR_RECOVERY(3,
+ sdev_printk(KERN_INFO, sdev,
+ "%s: Sending BDR\n", current->comm));
+ rtn = scsi_try_bus_device_reset(bdr_scmd);
+ if (rtn == SUCCESS || rtn == FAST_IO_FAIL) {
+ if (!scsi_device_online(sdev) ||
+ rtn == FAST_IO_FAIL ||
+ !scsi_eh_tur(bdr_scmd)) {
+ list_for_each_entry_safe(scmd, next,
+ work_q, eh_entry) {
+ if (scmd->device == sdev &&
+ scsi_eh_action(scmd, rtn) != FAILED)
+ scsi_eh_finish_cmd(scmd,
+ done_q);
+ }
+ }
+ } else {
+ SCSI_LOG_ERROR_RECOVERY(3,
+ sdev_printk(KERN_INFO, sdev,
+ "%s: BDR failed\n", current->comm));
+ }
+ }
+
+ return list_empty(work_q);
+}
+
+/**
+ * scsi_eh_target_reset - send target reset if needed
+ * @shost: scsi host being recovered.
+ * @work_q: &list_head for pending commands.
+ * @done_q: &list_head for processed commands.
+ *
+ * Notes:
+ * Try a target reset.
+ */
+static int scsi_eh_target_reset(struct Scsi_Host *shost,
+ struct list_head *work_q,
+ struct list_head *done_q)
+{
+ LIST_HEAD(tmp_list);
+ LIST_HEAD(check_list);
+
+ list_splice_init(work_q, &tmp_list);
+
+ while (!list_empty(&tmp_list)) {
+ struct scsi_cmnd *next, *scmd;
+ int rtn;
+ unsigned int id;
+
+ if (scsi_host_eh_past_deadline(shost)) {
+ /* push back on work queue for further processing */
+ list_splice_init(&check_list, work_q);
+ list_splice_init(&tmp_list, work_q);
+ SCSI_LOG_ERROR_RECOVERY(3,
+ shost_printk(KERN_INFO, shost,
+ "%s: Skip target reset, past eh deadline\n",
+ current->comm));
+ return list_empty(work_q);
+ }
+
+ scmd = list_entry(tmp_list.next, struct scsi_cmnd, eh_entry);
+ id = scmd_id(scmd);
+
+ SCSI_LOG_ERROR_RECOVERY(3,
+ shost_printk(KERN_INFO, shost,
+ "%s: Sending target reset to target %d\n",
+ current->comm, id));
+ rtn = scsi_try_target_reset(scmd);
+ if (rtn != SUCCESS && rtn != FAST_IO_FAIL)
+ SCSI_LOG_ERROR_RECOVERY(3,
+ shost_printk(KERN_INFO, shost,
+ "%s: Target reset failed"
+ " target: %d\n",
+ current->comm, id));
+ list_for_each_entry_safe(scmd, next, &tmp_list, eh_entry) {
+ if (scmd_id(scmd) != id)
+ continue;
+
+ if (rtn == SUCCESS)
+ list_move_tail(&scmd->eh_entry, &check_list);
+ else if (rtn == FAST_IO_FAIL)
+ scsi_eh_finish_cmd(scmd, done_q);
+ else
+ /* push back on work queue for further processing */
+ list_move(&scmd->eh_entry, work_q);
+ }
+ }
+
+ return scsi_eh_test_devices(&check_list, work_q, done_q, 0);
+}
+
+/**
+ * scsi_eh_bus_reset - send a bus reset
+ * @shost: &scsi host being recovered.
+ * @work_q: &list_head for pending commands.
+ * @done_q: &list_head for processed commands.
+ */
+static int scsi_eh_bus_reset(struct Scsi_Host *shost,
+ struct list_head *work_q,
+ struct list_head *done_q)
+{
+ struct scsi_cmnd *scmd, *chan_scmd, *next;
+ LIST_HEAD(check_list);
+ unsigned int channel;
+ int rtn;
+
+ /*
+ * we really want to loop over the various channels, and do this on
+ * a channel by channel basis. we should also check to see if any
+ * of the failed commands are on soft_reset devices, and if so, skip
+ * the reset.
+ */
+
+ for (channel = 0; channel <= shost->max_channel; channel++) {
+ if (scsi_host_eh_past_deadline(shost)) {
+ list_splice_init(&check_list, work_q);
+ SCSI_LOG_ERROR_RECOVERY(3,
+ shost_printk(KERN_INFO, shost,
+ "%s: skip BRST, past eh deadline\n",
+ current->comm));
+ return list_empty(work_q);
+ }
+
+ chan_scmd = NULL;
+ list_for_each_entry(scmd, work_q, eh_entry) {
+ if (channel == scmd_channel(scmd)) {
+ chan_scmd = scmd;
+ break;
+ /*
+ * FIXME add back in some support for
+ * soft_reset devices.
+ */
+ }
+ }
+
+ if (!chan_scmd)
+ continue;
+ SCSI_LOG_ERROR_RECOVERY(3,
+ shost_printk(KERN_INFO, shost,
+ "%s: Sending BRST chan: %d\n",
+ current->comm, channel));
+ rtn = scsi_try_bus_reset(chan_scmd);
+ if (rtn == SUCCESS || rtn == FAST_IO_FAIL) {
+ list_for_each_entry_safe(scmd, next, work_q, eh_entry) {
+ if (channel == scmd_channel(scmd)) {
+ if (rtn == FAST_IO_FAIL)
+ scsi_eh_finish_cmd(scmd,
+ done_q);
+ else
+ list_move_tail(&scmd->eh_entry,
+ &check_list);
+ }
+ }
+ } else {
+ SCSI_LOG_ERROR_RECOVERY(3,
+ shost_printk(KERN_INFO, shost,
+ "%s: BRST failed chan: %d\n",
+ current->comm, channel));
+ }
+ }
+ return scsi_eh_test_devices(&check_list, work_q, done_q, 0);
+}
+
+/**
+ * scsi_eh_host_reset - send a host reset
+ * @shost: host to be reset.
+ * @work_q: &list_head for pending commands.
+ * @done_q: &list_head for processed commands.
+ */
+static int scsi_eh_host_reset(struct Scsi_Host *shost,
+ struct list_head *work_q,
+ struct list_head *done_q)
+{
+ struct scsi_cmnd *scmd, *next;
+ LIST_HEAD(check_list);
+ int rtn;
+
+ if (!list_empty(work_q)) {
+ scmd = list_entry(work_q->next,
+ struct scsi_cmnd, eh_entry);
+
+ SCSI_LOG_ERROR_RECOVERY(3,
+ shost_printk(KERN_INFO, shost,
+ "%s: Sending HRST\n",
+ current->comm));
+
+ rtn = scsi_try_host_reset(scmd);
+ if (rtn == SUCCESS) {
+ list_splice_init(work_q, &check_list);
+ } else if (rtn == FAST_IO_FAIL) {
+ list_for_each_entry_safe(scmd, next, work_q, eh_entry) {
+ scsi_eh_finish_cmd(scmd, done_q);
+ }
+ } else {
+ SCSI_LOG_ERROR_RECOVERY(3,
+ shost_printk(KERN_INFO, shost,
+ "%s: HRST failed\n",
+ current->comm));
+ }
+ }
+ return scsi_eh_test_devices(&check_list, work_q, done_q, 1);
+}
+
+/**
+ * scsi_eh_offline_sdevs - offline scsi devices that fail to recover
+ * @work_q: &list_head for pending commands.
+ * @done_q: &list_head for processed commands.
+ */
+static void scsi_eh_offline_sdevs(struct list_head *work_q,
+ struct list_head *done_q)
+{
+ struct scsi_cmnd *scmd, *next;
+
+ list_for_each_entry_safe(scmd, next, work_q, eh_entry) {
+ sdev_printk(KERN_INFO, scmd->device, "Device offlined - "
+ "not ready after error recovery\n");
+ scsi_device_set_state(scmd->device, SDEV_OFFLINE);
+ if (scmd->eh_eflags & SCSI_EH_CANCEL_CMD) {
+ /*
+ * FIXME: Handle lost cmds.
+ */
+ }
+ scsi_eh_finish_cmd(scmd, done_q);
+ }
+ return;
+}
+
+/**
+ * scsi_noretry_cmd - determine if command should be failed fast
+ * @scmd: SCSI cmd to examine.
+ */
+int scsi_noretry_cmd(struct scsi_cmnd *scmd)
+{
+ switch (host_byte(scmd->result)) {
+ case DID_OK:
+ break;
+ case DID_TIME_OUT:
+ goto check_type;
+ case DID_BUS_BUSY:
+ return (scmd->request->cmd_flags & REQ_FAILFAST_TRANSPORT);
+ case DID_PARITY:
+ return (scmd->request->cmd_flags & REQ_FAILFAST_DEV);
+ case DID_ERROR:
+ if (msg_byte(scmd->result) == COMMAND_COMPLETE &&
+ status_byte(scmd->result) == RESERVATION_CONFLICT)
+ return 0;
+ /* fall through */
+ case DID_SOFT_ERROR:
+ return (scmd->request->cmd_flags & REQ_FAILFAST_DRIVER);
+ }
+
+ if (status_byte(scmd->result) != CHECK_CONDITION)
+ return 0;
+
+check_type:
+ /*
+ * assume caller has checked sense and determined
+ * the check condition was retryable.
+ */
+ if (scmd->request->cmd_flags & REQ_FAILFAST_DEV ||
+ scmd->request->cmd_type == REQ_TYPE_BLOCK_PC)
+ return 1;
+ else
+ return 0;
+}
+
+/**
+ * scsi_decide_disposition - Disposition a cmd on return from LLD.
+ * @scmd: SCSI cmd to examine.
+ *
+ * Notes:
+ * This is *only* called when we are examining the status after sending
+ * out the actual data command. any commands that are queued for error
+ * recovery (e.g. test_unit_ready) do *not* come through here.
+ *
+ * When this routine returns failed, it means the error handler thread
+ * is woken. In cases where the error code indicates an error that
+ * doesn't require the error handler read (i.e. we don't need to
+ * abort/reset), this function should return SUCCESS.
+ */
+int scsi_decide_disposition(struct scsi_cmnd *scmd)
+{
+ int rtn;
+
+ /*
+ * if the device is offline, then we clearly just pass the result back
+ * up to the top level.
+ */
+ if (!scsi_device_online(scmd->device)) {
+ SCSI_LOG_ERROR_RECOVERY(5, scmd_printk(KERN_INFO, scmd,
+ "%s: device offline - report as SUCCESS\n", __func__));
+ return SUCCESS;
+ }
+
+ /*
+ * first check the host byte, to see if there is anything in there
+ * that would indicate what we need to do.
+ */
+ switch (host_byte(scmd->result)) {
+ case DID_PASSTHROUGH:
+ /*
+ * no matter what, pass this through to the upper layer.
+ * nuke this special code so that it looks like we are saying
+ * did_ok.
+ */
+ scmd->result &= 0xff00ffff;
+ return SUCCESS;
+ case DID_OK:
+ /*
+ * looks good. drop through, and check the next byte.
+ */
+ break;
+ case DID_ABORT:
+ if (scmd->eh_eflags & SCSI_EH_ABORT_SCHEDULED) {
+ set_host_byte(scmd, DID_TIME_OUT);
+ return SUCCESS;
+ }
+ case DID_NO_CONNECT:
+ case DID_BAD_TARGET:
+ /*
+ * note - this means that we just report the status back
+ * to the top level driver, not that we actually think
+ * that it indicates SUCCESS.
+ */
+ return SUCCESS;
+ /*
+ * when the low level driver returns did_soft_error,
+ * it is responsible for keeping an internal retry counter
+ * in order to avoid endless loops (db)
+ *
+ * actually this is a bug in this function here. we should
+ * be mindful of the maximum number of retries specified
+ * and not get stuck in a loop.
+ */
+ case DID_SOFT_ERROR:
+ goto maybe_retry;
+ case DID_IMM_RETRY:
+ return NEEDS_RETRY;
+
+ case DID_REQUEUE:
+ return ADD_TO_MLQUEUE;
+ case DID_TRANSPORT_DISRUPTED:
+ /*
+ * LLD/transport was disrupted during processing of the IO.
+ * The transport class is now blocked/blocking,
+ * and the transport will decide what to do with the IO
+ * based on its timers and recovery capablilities if
+ * there are enough retries.
+ */
+ goto maybe_retry;
+ case DID_TRANSPORT_FAILFAST:
+ /*
+ * The transport decided to failfast the IO (most likely
+ * the fast io fail tmo fired), so send IO directly upwards.
+ */
+ return SUCCESS;
+ case DID_ERROR:
+ if (msg_byte(scmd->result) == COMMAND_COMPLETE &&
+ status_byte(scmd->result) == RESERVATION_CONFLICT)
+ /*
+ * execute reservation conflict processing code
+ * lower down
+ */
+ break;
+ /* fallthrough */
+ case DID_BUS_BUSY:
+ case DID_PARITY:
+ goto maybe_retry;
+ case DID_TIME_OUT:
+ /*
+ * when we scan the bus, we get timeout messages for
+ * these commands if there is no device available.
+ * other hosts report did_no_connect for the same thing.
+ */
+ if ((scmd->cmnd[0] == TEST_UNIT_READY ||
+ scmd->cmnd[0] == INQUIRY)) {
+ return SUCCESS;
+ } else {
+ return FAILED;
+ }
+ case DID_RESET:
+ return SUCCESS;
+ default:
+ return FAILED;
+ }
+
+ /*
+ * next, check the message byte.
+ */
+ if (msg_byte(scmd->result) != COMMAND_COMPLETE)
+ return FAILED;
+
+ /*
+ * check the status byte to see if this indicates anything special.
+ */
+ switch (status_byte(scmd->result)) {
+ case QUEUE_FULL:
+ scsi_handle_queue_full(scmd->device);
+ /*
+ * the case of trying to send too many commands to a
+ * tagged queueing device.
+ */
+ case BUSY:
+ /*
+ * device can't talk to us at the moment. Should only
+ * occur (SAM-3) when the task queue is empty, so will cause
+ * the empty queue handling to trigger a stall in the
+ * device.
+ */
+ return ADD_TO_MLQUEUE;
+ case GOOD:
+ if (scmd->cmnd[0] == REPORT_LUNS)
+ scmd->device->sdev_target->expecting_lun_change = 0;
+ scsi_handle_queue_ramp_up(scmd->device);
+ case COMMAND_TERMINATED:
+ return SUCCESS;
+ case TASK_ABORTED:
+ goto maybe_retry;
+ case CHECK_CONDITION:
+ rtn = scsi_check_sense(scmd);
+ if (rtn == NEEDS_RETRY)
+ goto maybe_retry;
+ /* if rtn == FAILED, we have no sense information;
+ * returning FAILED will wake the error handler thread
+ * to collect the sense and redo the decide
+ * disposition */
+ return rtn;
+ case CONDITION_GOOD:
+ case INTERMEDIATE_GOOD:
+ case INTERMEDIATE_C_GOOD:
+ case ACA_ACTIVE:
+ /*
+ * who knows? FIXME(eric)
+ */
+ return SUCCESS;
+
+ case RESERVATION_CONFLICT:
+ sdev_printk(KERN_INFO, scmd->device,
+ "reservation conflict\n");
+ set_host_byte(scmd, DID_NEXUS_FAILURE);
+ return SUCCESS; /* causes immediate i/o error */
+ default:
+ return FAILED;
+ }
+ return FAILED;
+
+ maybe_retry:
+
+ /* we requeue for retry because the error was retryable, and
+ * the request was not marked fast fail. Note that above,
+ * even if the request is marked fast fail, we still requeue
+ * for queue congestion conditions (QUEUE_FULL or BUSY) */
+ if ((++scmd->retries) <= scmd->allowed
+ && !scsi_noretry_cmd(scmd)) {
+ return NEEDS_RETRY;
+ } else {
+ /*
+ * no more retries - report this one back to upper level.
+ */
+ return SUCCESS;
+ }
+}
+
+static void eh_lock_door_done(struct request *req, int uptodate)
+{
+ __blk_put_request(req->q, req);
+}
+
+/**
+ * scsi_eh_lock_door - Prevent medium removal for the specified device
+ * @sdev: SCSI device to prevent medium removal
+ *
+ * Locking:
+ * We must be called from process context.
+ *
+ * Notes:
+ * We queue up an asynchronous "ALLOW MEDIUM REMOVAL" request on the
+ * head of the devices request queue, and continue.
+ */
+static void scsi_eh_lock_door(struct scsi_device *sdev)
+{
+ struct request *req;
+
+ /*
+ * blk_get_request with GFP_KERNEL (__GFP_WAIT) sleeps until a
+ * request becomes available
+ */
+ req = blk_get_request(sdev->request_queue, READ, GFP_KERNEL);
+ if (IS_ERR(req))
+ return;
+
+ blk_rq_set_block_pc(req);
+
+ req->cmd[0] = ALLOW_MEDIUM_REMOVAL;
+ req->cmd[1] = 0;
+ req->cmd[2] = 0;
+ req->cmd[3] = 0;
+ req->cmd[4] = SCSI_REMOVAL_PREVENT;
+ req->cmd[5] = 0;
+
+ req->cmd_len = COMMAND_SIZE(req->cmd[0]);
+
+ req->cmd_flags |= REQ_QUIET;
+ req->timeout = 10 * HZ;
+ req->retries = 5;
+
+ blk_execute_rq_nowait(req->q, NULL, req, 1, eh_lock_door_done);
+}
+
+/**
+ * scsi_restart_operations - restart io operations to the specified host.
+ * @shost: Host we are restarting.
+ *
+ * Notes:
+ * When we entered the error handler, we blocked all further i/o to
+ * this device. we need to 'reverse' this process.
+ */
+static void scsi_restart_operations(struct Scsi_Host *shost)
+{
+ struct scsi_device *sdev;
+ unsigned long flags;
+
+ /*
+ * If the door was locked, we need to insert a door lock request
+ * onto the head of the SCSI request queue for the device. There
+ * is no point trying to lock the door of an off-line device.
+ */
+ shost_for_each_device(sdev, shost) {
+ if (scsi_device_online(sdev) && sdev->was_reset && sdev->locked) {
+ scsi_eh_lock_door(sdev);
+ sdev->was_reset = 0;
+ }
+ }
+
+ /*
+ * next free up anything directly waiting upon the host. this
+ * will be requests for character device operations, and also for
+ * ioctls to queued block devices.
+ */
+ SCSI_LOG_ERROR_RECOVERY(3,
+ shost_printk(KERN_INFO, shost, "waking up host to restart\n"));
+
+ spin_lock_irqsave(shost->host_lock, flags);
+ if (scsi_host_set_state(shost, SHOST_RUNNING))
+ if (scsi_host_set_state(shost, SHOST_CANCEL))
+ BUG_ON(scsi_host_set_state(shost, SHOST_DEL));
+ spin_unlock_irqrestore(shost->host_lock, flags);
+
+ wake_up(&shost->host_wait);
+
+ /*
+ * finally we need to re-initiate requests that may be pending. we will
+ * have had everything blocked while error handling is taking place, and
+ * now that error recovery is done, we will need to ensure that these
+ * requests are started.
+ */
+ scsi_run_host_queues(shost);
+
+ /*
+ * if eh is active and host_eh_scheduled is pending we need to re-run
+ * recovery. we do this check after scsi_run_host_queues() to allow
+ * everything pent up since the last eh run a chance to make forward
+ * progress before we sync again. Either we'll immediately re-run
+ * recovery or scsi_device_unbusy() will wake us again when these
+ * pending commands complete.
+ */
+ spin_lock_irqsave(shost->host_lock, flags);
+ if (shost->host_eh_scheduled)
+ if (scsi_host_set_state(shost, SHOST_RECOVERY))
+ WARN_ON(scsi_host_set_state(shost, SHOST_CANCEL_RECOVERY));
+ spin_unlock_irqrestore(shost->host_lock, flags);
+}
+
+/**
+ * scsi_eh_ready_devs - check device ready state and recover if not.
+ * @shost: host to be recovered.
+ * @work_q: &list_head for pending commands.
+ * @done_q: &list_head for processed commands.
+ */
+void scsi_eh_ready_devs(struct Scsi_Host *shost,
+ struct list_head *work_q,
+ struct list_head *done_q)
+{
+ if (!scsi_eh_stu(shost, work_q, done_q))
+ if (!scsi_eh_bus_device_reset(shost, work_q, done_q))
+ if (!scsi_eh_target_reset(shost, work_q, done_q))
+ if (!scsi_eh_bus_reset(shost, work_q, done_q))
+ if (!scsi_eh_host_reset(shost, work_q, done_q))
+ scsi_eh_offline_sdevs(work_q,
+ done_q);
+}
+EXPORT_SYMBOL_GPL(scsi_eh_ready_devs);
+
+/**
+ * scsi_eh_flush_done_q - finish processed commands or retry them.
+ * @done_q: list_head of processed commands.
+ */
+void scsi_eh_flush_done_q(struct list_head *done_q)
+{
+ struct scsi_cmnd *scmd, *next;
+
+ list_for_each_entry_safe(scmd, next, done_q, eh_entry) {
+ list_del_init(&scmd->eh_entry);
+ if (scsi_device_online(scmd->device) &&
+ !scsi_noretry_cmd(scmd) &&
+ (++scmd->retries <= scmd->allowed)) {
+ SCSI_LOG_ERROR_RECOVERY(3,
+ scmd_printk(KERN_INFO, scmd,
+ "%s: flush retry cmd\n",
+ current->comm));
+ scsi_queue_insert(scmd, SCSI_MLQUEUE_EH_RETRY);
+ } else {
+ /*
+ * If just we got sense for the device (called
+ * scsi_eh_get_sense), scmd->result is already
+ * set, do not set DRIVER_TIMEOUT.
+ */
+ if (!scmd->result)
+ scmd->result |= (DRIVER_TIMEOUT << 24);
+ SCSI_LOG_ERROR_RECOVERY(3,
+ scmd_printk(KERN_INFO, scmd,
+ "%s: flush finish cmd\n",
+ current->comm));
+ scsi_finish_command(scmd);
+ }
+ }
+}
+EXPORT_SYMBOL(scsi_eh_flush_done_q);
+
+/**
+ * scsi_unjam_host - Attempt to fix a host which has a cmd that failed.
+ * @shost: Host to unjam.
+ *
+ * Notes:
+ * When we come in here, we *know* that all commands on the bus have
+ * either completed, failed or timed out. we also know that no further
+ * commands are being sent to the host, so things are relatively quiet
+ * and we have freedom to fiddle with things as we wish.
+ *
+ * This is only the *default* implementation. it is possible for
+ * individual drivers to supply their own version of this function, and
+ * if the maintainer wishes to do this, it is strongly suggested that
+ * this function be taken as a template and modified. this function
+ * was designed to correctly handle problems for about 95% of the
+ * different cases out there, and it should always provide at least a
+ * reasonable amount of error recovery.
+ *
+ * Any command marked 'failed' or 'timeout' must eventually have
+ * scsi_finish_cmd() called for it. we do all of the retry stuff
+ * here, so when we restart the host after we return it should have an
+ * empty queue.
+ */
+static void scsi_unjam_host(struct Scsi_Host *shost)
+{
+ unsigned long flags;
+ LIST_HEAD(eh_work_q);
+ LIST_HEAD(eh_done_q);
+
+ spin_lock_irqsave(shost->host_lock, flags);
+ list_splice_init(&shost->eh_cmd_q, &eh_work_q);
+ spin_unlock_irqrestore(shost->host_lock, flags);
+
+ SCSI_LOG_ERROR_RECOVERY(1, scsi_eh_prt_fail_stats(shost, &eh_work_q));
+
+ if (!scsi_eh_get_sense(&eh_work_q, &eh_done_q))
+ if (!scsi_eh_abort_cmds(&eh_work_q, &eh_done_q))
+ scsi_eh_ready_devs(shost, &eh_work_q, &eh_done_q);
+
+ spin_lock_irqsave(shost->host_lock, flags);
+ if (shost->eh_deadline != -1)
+ shost->last_reset = 0;
+ spin_unlock_irqrestore(shost->host_lock, flags);
+ scsi_eh_flush_done_q(&eh_done_q);
+}
+
+/**
+ * scsi_error_handler - SCSI error handler thread
+ * @data: Host for which we are running.
+ *
+ * Notes:
+ * This is the main error handling loop. This is run as a kernel thread
+ * for every SCSI host and handles all error handling activity.
+ */
+int scsi_error_handler(void *data)
+{
+ struct Scsi_Host *shost = data;
+
+ /*
+ * We use TASK_INTERRUPTIBLE so that the thread is not
+ * counted against the load average as a running process.
+ * We never actually get interrupted because kthread_run
+ * disables signal delivery for the created thread.
+ */
+ while (!kthread_should_stop()) {
+ set_current_state(TASK_INTERRUPTIBLE);
+ if ((shost->host_failed == 0 && shost->host_eh_scheduled == 0) ||
+ shost->host_failed != atomic_read(&shost->host_busy)) {
+ SCSI_LOG_ERROR_RECOVERY(1,
+ shost_printk(KERN_INFO, shost,
+ "scsi_eh_%d: sleeping\n",
+ shost->host_no));
+ schedule();
+ continue;
+ }
+
+ __set_current_state(TASK_RUNNING);
+ SCSI_LOG_ERROR_RECOVERY(1,
+ shost_printk(KERN_INFO, shost,
+ "scsi_eh_%d: waking up %d/%d/%d\n",
+ shost->host_no, shost->host_eh_scheduled,
+ shost->host_failed,
+ atomic_read(&shost->host_busy)));
+
+ /*
+ * We have a host that is failing for some reason. Figure out
+ * what we need to do to get it up and online again (if we can).
+ * If we fail, we end up taking the thing offline.
+ */
+ if (!shost->eh_noresume && scsi_autopm_get_host(shost) != 0) {
+ SCSI_LOG_ERROR_RECOVERY(1,
+ shost_printk(KERN_ERR, shost,
+ "scsi_eh_%d: unable to autoresume\n",
+ shost->host_no));
+ continue;
+ }
+
+ if (shost->transportt->eh_strategy_handler)
+ shost->transportt->eh_strategy_handler(shost);
+ else
+ scsi_unjam_host(shost);
+
+ /*
+ * Note - if the above fails completely, the action is to take
+ * individual devices offline and flush the queue of any
+ * outstanding requests that may have been pending. When we
+ * restart, we restart any I/O to any other devices on the bus
+ * which are still online.
+ */
+ scsi_restart_operations(shost);
+ if (!shost->eh_noresume)
+ scsi_autopm_put_host(shost);
+ }
+ __set_current_state(TASK_RUNNING);
+
+ SCSI_LOG_ERROR_RECOVERY(1,
+ shost_printk(KERN_INFO, shost,
+ "Error handler scsi_eh_%d exiting\n",
+ shost->host_no));
+ shost->ehandler = NULL;
+ return 0;
+}
+
+/*
+ * Function: scsi_report_bus_reset()
+ *
+ * Purpose: Utility function used by low-level drivers to report that
+ * they have observed a bus reset on the bus being handled.
+ *
+ * Arguments: shost - Host in question
+ * channel - channel on which reset was observed.
+ *
+ * Returns: Nothing
+ *
+ * Lock status: Host lock must be held.
+ *
+ * Notes: This only needs to be called if the reset is one which
+ * originates from an unknown location. Resets originated
+ * by the mid-level itself don't need to call this, but there
+ * should be no harm.
+ *
+ * The main purpose of this is to make sure that a CHECK_CONDITION
+ * is properly treated.
+ */
+void scsi_report_bus_reset(struct Scsi_Host *shost, int channel)
+{
+ struct scsi_device *sdev;
+
+ __shost_for_each_device(sdev, shost) {
+ if (channel == sdev_channel(sdev))
+ __scsi_report_device_reset(sdev, NULL);
+ }
+}
+EXPORT_SYMBOL(scsi_report_bus_reset);
+
+/*
+ * Function: scsi_report_device_reset()
+ *
+ * Purpose: Utility function used by low-level drivers to report that
+ * they have observed a device reset on the device being handled.
+ *
+ * Arguments: shost - Host in question
+ * channel - channel on which reset was observed
+ * target - target on which reset was observed
+ *
+ * Returns: Nothing
+ *
+ * Lock status: Host lock must be held
+ *
+ * Notes: This only needs to be called if the reset is one which
+ * originates from an unknown location. Resets originated
+ * by the mid-level itself don't need to call this, but there
+ * should be no harm.
+ *
+ * The main purpose of this is to make sure that a CHECK_CONDITION
+ * is properly treated.
+ */
+void scsi_report_device_reset(struct Scsi_Host *shost, int channel, int target)
+{
+ struct scsi_device *sdev;
+
+ __shost_for_each_device(sdev, shost) {
+ if (channel == sdev_channel(sdev) &&
+ target == sdev_id(sdev))
+ __scsi_report_device_reset(sdev, NULL);
+ }
+}
+EXPORT_SYMBOL(scsi_report_device_reset);
+
+static void
+scsi_reset_provider_done_command(struct scsi_cmnd *scmd)
+{
+}
+
+/**
+ * scsi_ioctl_reset: explicitly reset a host/bus/target/device
+ * @dev: scsi_device to operate on
+ * @arg: reset type (see sg.h)
+ */
+int
+scsi_ioctl_reset(struct scsi_device *dev, int __user *arg)
+{
+ struct scsi_cmnd *scmd;
+ struct Scsi_Host *shost = dev->host;
+ struct request req;
+ unsigned long flags;
+ int error = 0, rtn, val;
+
+ if (!capable(CAP_SYS_ADMIN) || !capable(CAP_SYS_RAWIO))
+ return -EACCES;
+
+ error = get_user(val, arg);
+ if (error)
+ return error;
+
+ if (scsi_autopm_get_host(shost) < 0)
+ return -EIO;
+
+ error = -EIO;
+ scmd = scsi_get_command(dev, GFP_KERNEL);
+ if (!scmd)
+ goto out_put_autopm_host;
+
+ blk_rq_init(NULL, &req);
+ scmd->request = &req;
+
+ scmd->cmnd = req.cmd;
+
+ scmd->scsi_done = scsi_reset_provider_done_command;
+ memset(&scmd->sdb, 0, sizeof(scmd->sdb));
+
+ scmd->cmd_len = 0;
+
+ scmd->sc_data_direction = DMA_BIDIRECTIONAL;
+
+ spin_lock_irqsave(shost->host_lock, flags);
+ shost->tmf_in_progress = 1;
+ spin_unlock_irqrestore(shost->host_lock, flags);
+
+ switch (val & ~SG_SCSI_RESET_NO_ESCALATE) {
+ case SG_SCSI_RESET_NOTHING:
+ rtn = SUCCESS;
+ break;
+ case SG_SCSI_RESET_DEVICE:
+ rtn = scsi_try_bus_device_reset(scmd);
+ if (rtn == SUCCESS || (val & SG_SCSI_RESET_NO_ESCALATE))
+ break;
+ /* FALLTHROUGH */
+ case SG_SCSI_RESET_TARGET:
+ rtn = scsi_try_target_reset(scmd);
+ if (rtn == SUCCESS || (val & SG_SCSI_RESET_NO_ESCALATE))
+ break;
+ /* FALLTHROUGH */
+ case SG_SCSI_RESET_BUS:
+ rtn = scsi_try_bus_reset(scmd);
+ if (rtn == SUCCESS || (val & SG_SCSI_RESET_NO_ESCALATE))
+ break;
+ /* FALLTHROUGH */
+ case SG_SCSI_RESET_HOST:
+ rtn = scsi_try_host_reset(scmd);
+ if (rtn == SUCCESS)
+ break;
+ default:
+ /* FALLTHROUGH */
+ rtn = FAILED;
+ break;
+ }
+
+ error = (rtn == SUCCESS) ? 0 : -EIO;
+
+ spin_lock_irqsave(shost->host_lock, flags);
+ shost->tmf_in_progress = 0;
+ spin_unlock_irqrestore(shost->host_lock, flags);
+
+ /*
+ * be sure to wake up anyone who was sleeping or had their queue
+ * suspended while we performed the TMF.
+ */
+ SCSI_LOG_ERROR_RECOVERY(3,
+ shost_printk(KERN_INFO, shost,
+ "waking up host to restart after TMF\n"));
+
+ wake_up(&shost->host_wait);
+ scsi_run_host_queues(shost);
+
+ scsi_put_command(scmd);
+
+out_put_autopm_host:
+ scsi_autopm_put_host(shost);
+ return error;
+}
+EXPORT_SYMBOL(scsi_ioctl_reset);
+
+/**
+ * scsi_normalize_sense - normalize main elements from either fixed or
+ * descriptor sense data format into a common format.
+ *
+ * @sense_buffer: byte array containing sense data returned by device
+ * @sb_len: number of valid bytes in sense_buffer
+ * @sshdr: pointer to instance of structure that common
+ * elements are written to.
+ *
+ * Notes:
+ * The "main elements" from sense data are: response_code, sense_key,
+ * asc, ascq and additional_length (only for descriptor format).
+ *
+ * Typically this function can be called after a device has
+ * responded to a SCSI command with the CHECK_CONDITION status.
+ *
+ * Return value:
+ * true if valid sense data information found, else false;
+ */
+bool scsi_normalize_sense(const u8 *sense_buffer, int sb_len,
+ struct scsi_sense_hdr *sshdr)
+{
+ if (!sense_buffer || !sb_len)
+ return false;
+
+ memset(sshdr, 0, sizeof(struct scsi_sense_hdr));
+
+ sshdr->response_code = (sense_buffer[0] & 0x7f);
+
+ if (!scsi_sense_valid(sshdr))
+ return false;
+
+ if (sshdr->response_code >= 0x72) {
+ /*
+ * descriptor format
+ */
+ if (sb_len > 1)
+ sshdr->sense_key = (sense_buffer[1] & 0xf);
+ if (sb_len > 2)
+ sshdr->asc = sense_buffer[2];
+ if (sb_len > 3)
+ sshdr->ascq = sense_buffer[3];
+ if (sb_len > 7)
+ sshdr->additional_length = sense_buffer[7];
+ } else {
+ /*
+ * fixed format
+ */
+ if (sb_len > 2)
+ sshdr->sense_key = (sense_buffer[2] & 0xf);
+ if (sb_len > 7) {
+ sb_len = (sb_len < (sense_buffer[7] + 8)) ?
+ sb_len : (sense_buffer[7] + 8);
+ if (sb_len > 12)
+ sshdr->asc = sense_buffer[12];
+ if (sb_len > 13)
+ sshdr->ascq = sense_buffer[13];
+ }
+ }
+
+ return true;
+}
+EXPORT_SYMBOL(scsi_normalize_sense);
+
+bool scsi_command_normalize_sense(const struct scsi_cmnd *cmd,
+ struct scsi_sense_hdr *sshdr)
+{
+ return scsi_normalize_sense(cmd->sense_buffer,
+ SCSI_SENSE_BUFFERSIZE, sshdr);
+}
+EXPORT_SYMBOL(scsi_command_normalize_sense);
+
+/**
+ * scsi_sense_desc_find - search for a given descriptor type in descriptor sense data format.
+ * @sense_buffer: byte array of descriptor format sense data
+ * @sb_len: number of valid bytes in sense_buffer
+ * @desc_type: value of descriptor type to find
+ * (e.g. 0 -> information)
+ *
+ * Notes:
+ * only valid when sense data is in descriptor format
+ *
+ * Return value:
+ * pointer to start of (first) descriptor if found else NULL
+ */
+const u8 * scsi_sense_desc_find(const u8 * sense_buffer, int sb_len,
+ int desc_type)
+{
+ int add_sen_len, add_len, desc_len, k;
+ const u8 * descp;
+
+ if ((sb_len < 8) || (0 == (add_sen_len = sense_buffer[7])))
+ return NULL;
+ if ((sense_buffer[0] < 0x72) || (sense_buffer[0] > 0x73))
+ return NULL;
+ add_sen_len = (add_sen_len < (sb_len - 8)) ?
+ add_sen_len : (sb_len - 8);
+ descp = &sense_buffer[8];
+ for (desc_len = 0, k = 0; k < add_sen_len; k += desc_len) {
+ descp += desc_len;
+ add_len = (k < (add_sen_len - 1)) ? descp[1]: -1;
+ desc_len = add_len + 2;
+ if (descp[0] == desc_type)
+ return descp;
+ if (add_len < 0) // short descriptor ??
+ break;
+ }
+ return NULL;
+}
+EXPORT_SYMBOL(scsi_sense_desc_find);
+
+/**
+ * scsi_get_sense_info_fld - get information field from sense data (either fixed or descriptor format)
+ * @sense_buffer: byte array of sense data
+ * @sb_len: number of valid bytes in sense_buffer
+ * @info_out: pointer to 64 integer where 8 or 4 byte information
+ * field will be placed if found.
+ *
+ * Return value:
+ * 1 if information field found, 0 if not found.
+ */
+int scsi_get_sense_info_fld(const u8 * sense_buffer, int sb_len,
+ u64 * info_out)
+{
+ int j;
+ const u8 * ucp;
+ u64 ull;
+
+ if (sb_len < 7)
+ return 0;
+ switch (sense_buffer[0] & 0x7f) {
+ case 0x70:
+ case 0x71:
+ if (sense_buffer[0] & 0x80) {
+ *info_out = (sense_buffer[3] << 24) +
+ (sense_buffer[4] << 16) +
+ (sense_buffer[5] << 8) + sense_buffer[6];
+ return 1;
+ } else
+ return 0;
+ case 0x72:
+ case 0x73:
+ ucp = scsi_sense_desc_find(sense_buffer, sb_len,
+ 0 /* info desc */);
+ if (ucp && (0xa == ucp[1])) {
+ ull = 0;
+ for (j = 0; j < 8; ++j) {
+ if (j > 0)
+ ull <<= 8;
+ ull |= ucp[4 + j];
+ }
+ *info_out = ull;
+ return 1;
+ } else
+ return 0;
+ default:
+ return 0;
+ }
+}
+EXPORT_SYMBOL(scsi_get_sense_info_fld);
+
+/**
+ * scsi_build_sense_buffer - build sense data in a buffer
+ * @desc: Sense format (non zero == descriptor format,
+ * 0 == fixed format)
+ * @buf: Where to build sense data
+ * @key: Sense key
+ * @asc: Additional sense code
+ * @ascq: Additional sense code qualifier
+ *
+ **/
+void scsi_build_sense_buffer(int desc, u8 *buf, u8 key, u8 asc, u8 ascq)
+{
+ if (desc) {
+ buf[0] = 0x72; /* descriptor, current */
+ buf[1] = key;
+ buf[2] = asc;
+ buf[3] = ascq;
+ buf[7] = 0;
+ } else {
+ buf[0] = 0x70; /* fixed, current */
+ buf[2] = key;
+ buf[7] = 0xa;
+ buf[12] = asc;
+ buf[13] = ascq;
+ }
+}
+EXPORT_SYMBOL(scsi_build_sense_buffer);
+
+/**
+ * scsi_set_sense_information - set the information field in a
+ * formatted sense data buffer
+ * @buf: Where to build sense data
+ * @info: 64-bit information value to be set
+ *
+ **/
+void scsi_set_sense_information(u8 *buf, u64 info)
+{
+ if ((buf[0] & 0x7f) == 0x72) {
+ u8 *ucp, len;
+
+ len = buf[7];
+ ucp = (char *)scsi_sense_desc_find(buf, len + 8, 0);
+ if (!ucp) {
+ buf[7] = len + 0xa;
+ ucp = buf + 8 + len;
+ }
+ ucp[0] = 0;
+ ucp[1] = 0xa;
+ ucp[2] = 0x80; /* Valid bit */
+ ucp[3] = 0;
+ put_unaligned_be64(info, &ucp[4]);
+ } else if ((buf[0] & 0x7f) == 0x70) {
+ buf[0] |= 0x80;
+ put_unaligned_be64(info, &buf[3]);
+ }
+}
+EXPORT_SYMBOL(scsi_set_sense_information);
diff --git a/drivers/scsi/scsi_ioctl.c b/drivers/scsi/scsi_ioctl.c
new file mode 100644
index 000000000..c4f7b56fa
--- /dev/null
+++ b/drivers/scsi/scsi_ioctl.c
@@ -0,0 +1,289 @@
+/*
+ * Changes:
+ * Arnaldo Carvalho de Melo <acme@conectiva.com.br> 08/23/2000
+ * - get rid of some verify_areas and use __copy*user and __get/put_user
+ * for the ones that remain
+ */
+#include <linux/module.h>
+#include <linux/blkdev.h>
+#include <linux/interrupt.h>
+#include <linux/errno.h>
+#include <linux/kernel.h>
+#include <linux/sched.h>
+#include <linux/mm.h>
+#include <linux/string.h>
+#include <asm/uaccess.h>
+
+#include <scsi/scsi.h>
+#include <scsi/scsi_cmnd.h>
+#include <scsi/scsi_device.h>
+#include <scsi/scsi_eh.h>
+#include <scsi/scsi_host.h>
+#include <scsi/scsi_ioctl.h>
+#include <scsi/sg.h>
+#include <scsi/scsi_dbg.h>
+
+#include "scsi_logging.h"
+
+#define NORMAL_RETRIES 5
+#define IOCTL_NORMAL_TIMEOUT (10 * HZ)
+
+#define MAX_BUF PAGE_SIZE
+
+/**
+ * ioctl_probe -- return host identification
+ * @host: host to identify
+ * @buffer: userspace buffer for identification
+ *
+ * Return an identifying string at @buffer, if @buffer is non-NULL, filling
+ * to the length stored at * (int *) @buffer.
+ */
+static int ioctl_probe(struct Scsi_Host *host, void __user *buffer)
+{
+ unsigned int len, slen;
+ const char *string;
+
+ if (buffer) {
+ if (get_user(len, (unsigned int __user *) buffer))
+ return -EFAULT;
+
+ if (host->hostt->info)
+ string = host->hostt->info(host);
+ else
+ string = host->hostt->name;
+ if (string) {
+ slen = strlen(string);
+ if (len > slen)
+ len = slen + 1;
+ if (copy_to_user(buffer, string, len))
+ return -EFAULT;
+ }
+ }
+ return 1;
+}
+
+/*
+
+ * The SCSI_IOCTL_SEND_COMMAND ioctl sends a command out to the SCSI host.
+ * The IOCTL_NORMAL_TIMEOUT and NORMAL_RETRIES variables are used.
+ *
+ * dev is the SCSI device struct ptr, *(int *) arg is the length of the
+ * input data, if any, not including the command string & counts,
+ * *((int *)arg + 1) is the output buffer size in bytes.
+ *
+ * *(char *) ((int *) arg)[2] the actual command byte.
+ *
+ * Note that if more than MAX_BUF bytes are requested to be transferred,
+ * the ioctl will fail with error EINVAL.
+ *
+ * This size *does not* include the initial lengths that were passed.
+ *
+ * The SCSI command is read from the memory location immediately after the
+ * length words, and the input data is right after the command. The SCSI
+ * routines know the command size based on the opcode decode.
+ *
+ * The output area is then filled in starting from the command byte.
+ */
+
+static int ioctl_internal_command(struct scsi_device *sdev, char *cmd,
+ int timeout, int retries)
+{
+ int result;
+ struct scsi_sense_hdr sshdr;
+
+ SCSI_LOG_IOCTL(1, sdev_printk(KERN_INFO, sdev,
+ "Trying ioctl with scsi command %d\n", *cmd));
+
+ result = scsi_execute_req(sdev, cmd, DMA_NONE, NULL, 0,
+ &sshdr, timeout, retries, NULL);
+
+ SCSI_LOG_IOCTL(2, sdev_printk(KERN_INFO, sdev,
+ "Ioctl returned 0x%x\n", result));
+
+ if ((driver_byte(result) & DRIVER_SENSE) &&
+ (scsi_sense_valid(&sshdr))) {
+ switch (sshdr.sense_key) {
+ case ILLEGAL_REQUEST:
+ if (cmd[0] == ALLOW_MEDIUM_REMOVAL)
+ sdev->lockable = 0;
+ else
+ sdev_printk(KERN_INFO, sdev,
+ "ioctl_internal_command: "
+ "ILLEGAL REQUEST "
+ "asc=0x%x ascq=0x%x\n",
+ sshdr.asc, sshdr.ascq);
+ break;
+ case NOT_READY: /* This happens if there is no disc in drive */
+ if (sdev->removable)
+ break;
+ case UNIT_ATTENTION:
+ if (sdev->removable) {
+ sdev->changed = 1;
+ result = 0; /* This is no longer considered an error */
+ break;
+ }
+ default: /* Fall through for non-removable media */
+ sdev_printk(KERN_INFO, sdev,
+ "ioctl_internal_command return code = %x\n",
+ result);
+ scsi_print_sense_hdr(sdev, NULL, &sshdr);
+ break;
+ }
+ }
+
+ SCSI_LOG_IOCTL(2, sdev_printk(KERN_INFO, sdev,
+ "IOCTL Releasing command\n"));
+ return result;
+}
+
+int scsi_set_medium_removal(struct scsi_device *sdev, char state)
+{
+ char scsi_cmd[MAX_COMMAND_SIZE];
+ int ret;
+
+ if (!sdev->removable || !sdev->lockable)
+ return 0;
+
+ scsi_cmd[0] = ALLOW_MEDIUM_REMOVAL;
+ scsi_cmd[1] = 0;
+ scsi_cmd[2] = 0;
+ scsi_cmd[3] = 0;
+ scsi_cmd[4] = state;
+ scsi_cmd[5] = 0;
+
+ ret = ioctl_internal_command(sdev, scsi_cmd,
+ IOCTL_NORMAL_TIMEOUT, NORMAL_RETRIES);
+ if (ret == 0)
+ sdev->locked = (state == SCSI_REMOVAL_PREVENT);
+ return ret;
+}
+EXPORT_SYMBOL(scsi_set_medium_removal);
+
+/*
+ * The scsi_ioctl_get_pci() function places into arg the value
+ * pci_dev::slot_name (8 characters) for the PCI device (if any).
+ * Returns: 0 on success
+ * -ENXIO if there isn't a PCI device pointer
+ * (could be because the SCSI driver hasn't been
+ * updated yet, or because it isn't a SCSI
+ * device)
+ * any copy_to_user() error on failure there
+ */
+static int scsi_ioctl_get_pci(struct scsi_device *sdev, void __user *arg)
+{
+ struct device *dev = scsi_get_device(sdev->host);
+ const char *name;
+
+ if (!dev)
+ return -ENXIO;
+
+ name = dev_name(dev);
+
+ /* compatibility with old ioctl which only returned
+ * 20 characters */
+ return copy_to_user(arg, name, min(strlen(name), (size_t)20))
+ ? -EFAULT: 0;
+}
+
+
+/**
+ * scsi_ioctl - Dispatch ioctl to scsi device
+ * @sdev: scsi device receiving ioctl
+ * @cmd: which ioctl is it
+ * @arg: data associated with ioctl
+ *
+ * Description: The scsi_ioctl() function differs from most ioctls in that it
+ * does not take a major/minor number as the dev field. Rather, it takes
+ * a pointer to a &struct scsi_device.
+ */
+int scsi_ioctl(struct scsi_device *sdev, int cmd, void __user *arg)
+{
+ char scsi_cmd[MAX_COMMAND_SIZE];
+
+ /* Check for deprecated ioctls ... all the ioctls which don't
+ * follow the new unique numbering scheme are deprecated */
+ switch (cmd) {
+ case SCSI_IOCTL_SEND_COMMAND:
+ case SCSI_IOCTL_TEST_UNIT_READY:
+ case SCSI_IOCTL_BENCHMARK_COMMAND:
+ case SCSI_IOCTL_SYNC:
+ case SCSI_IOCTL_START_UNIT:
+ case SCSI_IOCTL_STOP_UNIT:
+ printk(KERN_WARNING "program %s is using a deprecated SCSI "
+ "ioctl, please convert it to SG_IO\n", current->comm);
+ break;
+ default:
+ break;
+ }
+
+ switch (cmd) {
+ case SCSI_IOCTL_GET_IDLUN:
+ if (!access_ok(VERIFY_WRITE, arg, sizeof(struct scsi_idlun)))
+ return -EFAULT;
+
+ __put_user((sdev->id & 0xff)
+ + ((sdev->lun & 0xff) << 8)
+ + ((sdev->channel & 0xff) << 16)
+ + ((sdev->host->host_no & 0xff) << 24),
+ &((struct scsi_idlun __user *)arg)->dev_id);
+ __put_user(sdev->host->unique_id,
+ &((struct scsi_idlun __user *)arg)->host_unique_id);
+ return 0;
+ case SCSI_IOCTL_GET_BUS_NUMBER:
+ return put_user(sdev->host->host_no, (int __user *)arg);
+ case SCSI_IOCTL_PROBE_HOST:
+ return ioctl_probe(sdev->host, arg);
+ case SCSI_IOCTL_SEND_COMMAND:
+ if (!capable(CAP_SYS_ADMIN) || !capable(CAP_SYS_RAWIO))
+ return -EACCES;
+ return sg_scsi_ioctl(sdev->request_queue, NULL, 0, arg);
+ case SCSI_IOCTL_DOORLOCK:
+ return scsi_set_medium_removal(sdev, SCSI_REMOVAL_PREVENT);
+ case SCSI_IOCTL_DOORUNLOCK:
+ return scsi_set_medium_removal(sdev, SCSI_REMOVAL_ALLOW);
+ case SCSI_IOCTL_TEST_UNIT_READY:
+ return scsi_test_unit_ready(sdev, IOCTL_NORMAL_TIMEOUT,
+ NORMAL_RETRIES, NULL);
+ case SCSI_IOCTL_START_UNIT:
+ scsi_cmd[0] = START_STOP;
+ scsi_cmd[1] = 0;
+ scsi_cmd[2] = scsi_cmd[3] = scsi_cmd[5] = 0;
+ scsi_cmd[4] = 1;
+ return ioctl_internal_command(sdev, scsi_cmd,
+ START_STOP_TIMEOUT, NORMAL_RETRIES);
+ case SCSI_IOCTL_STOP_UNIT:
+ scsi_cmd[0] = START_STOP;
+ scsi_cmd[1] = 0;
+ scsi_cmd[2] = scsi_cmd[3] = scsi_cmd[5] = 0;
+ scsi_cmd[4] = 0;
+ return ioctl_internal_command(sdev, scsi_cmd,
+ START_STOP_TIMEOUT, NORMAL_RETRIES);
+ case SCSI_IOCTL_GET_PCI:
+ return scsi_ioctl_get_pci(sdev, arg);
+ case SG_SCSI_RESET:
+ return scsi_ioctl_reset(sdev, arg);
+ default:
+ if (sdev->host->hostt->ioctl)
+ return sdev->host->hostt->ioctl(sdev, cmd, arg);
+ }
+ return -EINVAL;
+}
+EXPORT_SYMBOL(scsi_ioctl);
+
+/*
+ * We can process a reset even when a device isn't fully operable.
+ */
+int scsi_ioctl_block_when_processing_errors(struct scsi_device *sdev, int cmd,
+ bool ndelay)
+{
+ if (cmd == SG_SCSI_RESET && ndelay) {
+ if (scsi_host_in_recovery(sdev->host))
+ return -EAGAIN;
+ } else {
+ if (!scsi_block_when_processing_errors(sdev))
+ return -ENODEV;
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(scsi_ioctl_block_when_processing_errors);
diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c
new file mode 100644
index 000000000..b1a263137
--- /dev/null
+++ b/drivers/scsi/scsi_lib.c
@@ -0,0 +1,3147 @@
+/*
+ * Copyright (C) 1999 Eric Youngdale
+ * Copyright (C) 2014 Christoph Hellwig
+ *
+ * SCSI queueing library.
+ * Initial versions: Eric Youngdale (eric@andante.org).
+ * Based upon conversations with large numbers
+ * of people at Linux Expo.
+ */
+
+#include <linux/bio.h>
+#include <linux/bitops.h>
+#include <linux/blkdev.h>
+#include <linux/completion.h>
+#include <linux/kernel.h>
+#include <linux/export.h>
+#include <linux/mempool.h>
+#include <linux/slab.h>
+#include <linux/init.h>
+#include <linux/pci.h>
+#include <linux/delay.h>
+#include <linux/hardirq.h>
+#include <linux/scatterlist.h>
+#include <linux/blk-mq.h>
+#include <linux/ratelimit.h>
+
+#include <scsi/scsi.h>
+#include <scsi/scsi_cmnd.h>
+#include <scsi/scsi_dbg.h>
+#include <scsi/scsi_device.h>
+#include <scsi/scsi_driver.h>
+#include <scsi/scsi_eh.h>
+#include <scsi/scsi_host.h>
+
+#include <trace/events/scsi.h>
+
+#include "scsi_priv.h"
+#include "scsi_logging.h"
+
+
+#define SG_MEMPOOL_NR ARRAY_SIZE(scsi_sg_pools)
+#define SG_MEMPOOL_SIZE 2
+
+struct scsi_host_sg_pool {
+ size_t size;
+ char *name;
+ struct kmem_cache *slab;
+ mempool_t *pool;
+};
+
+#define SP(x) { .size = x, "sgpool-" __stringify(x) }
+#if (SCSI_MAX_SG_SEGMENTS < 32)
+#error SCSI_MAX_SG_SEGMENTS is too small (must be 32 or greater)
+#endif
+static struct scsi_host_sg_pool scsi_sg_pools[] = {
+ SP(8),
+ SP(16),
+#if (SCSI_MAX_SG_SEGMENTS > 32)
+ SP(32),
+#if (SCSI_MAX_SG_SEGMENTS > 64)
+ SP(64),
+#if (SCSI_MAX_SG_SEGMENTS > 128)
+ SP(128),
+#if (SCSI_MAX_SG_SEGMENTS > 256)
+#error SCSI_MAX_SG_SEGMENTS is too large (256 MAX)
+#endif
+#endif
+#endif
+#endif
+ SP(SCSI_MAX_SG_SEGMENTS)
+};
+#undef SP
+
+struct kmem_cache *scsi_sdb_cache;
+
+/*
+ * When to reinvoke queueing after a resource shortage. It's 3 msecs to
+ * not change behaviour from the previous unplug mechanism, experimentation
+ * may prove this needs changing.
+ */
+#define SCSI_QUEUE_DELAY 3
+
+static void
+scsi_set_blocked(struct scsi_cmnd *cmd, int reason)
+{
+ struct Scsi_Host *host = cmd->device->host;
+ struct scsi_device *device = cmd->device;
+ struct scsi_target *starget = scsi_target(device);
+
+ /*
+ * Set the appropriate busy bit for the device/host.
+ *
+ * If the host/device isn't busy, assume that something actually
+ * completed, and that we should be able to queue a command now.
+ *
+ * Note that the prior mid-layer assumption that any host could
+ * always queue at least one command is now broken. The mid-layer
+ * will implement a user specifiable stall (see
+ * scsi_host.max_host_blocked and scsi_device.max_device_blocked)
+ * if a command is requeued with no other commands outstanding
+ * either for the device or for the host.
+ */
+ switch (reason) {
+ case SCSI_MLQUEUE_HOST_BUSY:
+ atomic_set(&host->host_blocked, host->max_host_blocked);
+ break;
+ case SCSI_MLQUEUE_DEVICE_BUSY:
+ case SCSI_MLQUEUE_EH_RETRY:
+ atomic_set(&device->device_blocked,
+ device->max_device_blocked);
+ break;
+ case SCSI_MLQUEUE_TARGET_BUSY:
+ atomic_set(&starget->target_blocked,
+ starget->max_target_blocked);
+ break;
+ }
+}
+
+static void scsi_mq_requeue_cmd(struct scsi_cmnd *cmd)
+{
+ struct scsi_device *sdev = cmd->device;
+ struct request_queue *q = cmd->request->q;
+
+ blk_mq_requeue_request(cmd->request);
+ blk_mq_kick_requeue_list(q);
+ put_device(&sdev->sdev_gendev);
+}
+
+/**
+ * __scsi_queue_insert - private queue insertion
+ * @cmd: The SCSI command being requeued
+ * @reason: The reason for the requeue
+ * @unbusy: Whether the queue should be unbusied
+ *
+ * This is a private queue insertion. The public interface
+ * scsi_queue_insert() always assumes the queue should be unbusied
+ * because it's always called before the completion. This function is
+ * for a requeue after completion, which should only occur in this
+ * file.
+ */
+static void __scsi_queue_insert(struct scsi_cmnd *cmd, int reason, int unbusy)
+{
+ struct scsi_device *device = cmd->device;
+ struct request_queue *q = device->request_queue;
+ unsigned long flags;
+
+ SCSI_LOG_MLQUEUE(1, scmd_printk(KERN_INFO, cmd,
+ "Inserting command %p into mlqueue\n", cmd));
+
+ scsi_set_blocked(cmd, reason);
+
+ /*
+ * Decrement the counters, since these commands are no longer
+ * active on the host/device.
+ */
+ if (unbusy)
+ scsi_device_unbusy(device);
+
+ /*
+ * Requeue this command. It will go before all other commands
+ * that are already in the queue. Schedule requeue work under
+ * lock such that the kblockd_schedule_work() call happens
+ * before blk_cleanup_queue() finishes.
+ */
+ cmd->result = 0;
+ if (q->mq_ops) {
+ scsi_mq_requeue_cmd(cmd);
+ return;
+ }
+ spin_lock_irqsave(q->queue_lock, flags);
+ blk_requeue_request(q, cmd->request);
+ kblockd_schedule_work(&device->requeue_work);
+ spin_unlock_irqrestore(q->queue_lock, flags);
+}
+
+/*
+ * Function: scsi_queue_insert()
+ *
+ * Purpose: Insert a command in the midlevel queue.
+ *
+ * Arguments: cmd - command that we are adding to queue.
+ * reason - why we are inserting command to queue.
+ *
+ * Lock status: Assumed that lock is not held upon entry.
+ *
+ * Returns: Nothing.
+ *
+ * Notes: We do this for one of two cases. Either the host is busy
+ * and it cannot accept any more commands for the time being,
+ * or the device returned QUEUE_FULL and can accept no more
+ * commands.
+ * Notes: This could be called either from an interrupt context or a
+ * normal process context.
+ */
+void scsi_queue_insert(struct scsi_cmnd *cmd, int reason)
+{
+ __scsi_queue_insert(cmd, reason, 1);
+}
+/**
+ * scsi_execute - insert request and wait for the result
+ * @sdev: scsi device
+ * @cmd: scsi command
+ * @data_direction: data direction
+ * @buffer: data buffer
+ * @bufflen: len of buffer
+ * @sense: optional sense buffer
+ * @timeout: request timeout in seconds
+ * @retries: number of times to retry request
+ * @flags: or into request flags;
+ * @resid: optional residual length
+ *
+ * returns the req->errors value which is the scsi_cmnd result
+ * field.
+ */
+int scsi_execute(struct scsi_device *sdev, const unsigned char *cmd,
+ int data_direction, void *buffer, unsigned bufflen,
+ unsigned char *sense, int timeout, int retries, u64 flags,
+ int *resid)
+{
+ struct request *req;
+ int write = (data_direction == DMA_TO_DEVICE);
+ int ret = DRIVER_ERROR << 24;
+
+ req = blk_get_request(sdev->request_queue, write, __GFP_WAIT);
+ if (IS_ERR(req))
+ return ret;
+ blk_rq_set_block_pc(req);
+
+ if (bufflen && blk_rq_map_kern(sdev->request_queue, req,
+ buffer, bufflen, __GFP_WAIT))
+ goto out;
+
+ req->cmd_len = COMMAND_SIZE(cmd[0]);
+ memcpy(req->cmd, cmd, req->cmd_len);
+ req->sense = sense;
+ req->sense_len = 0;
+ req->retries = retries;
+ req->timeout = timeout;
+ req->cmd_flags |= flags | REQ_QUIET | REQ_PREEMPT;
+
+ /*
+ * head injection *required* here otherwise quiesce won't work
+ */
+ blk_execute_rq(req->q, NULL, req, 1);
+
+ /*
+ * Some devices (USB mass-storage in particular) may transfer
+ * garbage data together with a residue indicating that the data
+ * is invalid. Prevent the garbage from being misinterpreted
+ * and prevent security leaks by zeroing out the excess data.
+ */
+ if (unlikely(req->resid_len > 0 && req->resid_len <= bufflen))
+ memset(buffer + (bufflen - req->resid_len), 0, req->resid_len);
+
+ if (resid)
+ *resid = req->resid_len;
+ ret = req->errors;
+ out:
+ blk_put_request(req);
+
+ return ret;
+}
+EXPORT_SYMBOL(scsi_execute);
+
+int scsi_execute_req_flags(struct scsi_device *sdev, const unsigned char *cmd,
+ int data_direction, void *buffer, unsigned bufflen,
+ struct scsi_sense_hdr *sshdr, int timeout, int retries,
+ int *resid, u64 flags)
+{
+ char *sense = NULL;
+ int result;
+
+ if (sshdr) {
+ sense = kzalloc(SCSI_SENSE_BUFFERSIZE, GFP_NOIO);
+ if (!sense)
+ return DRIVER_ERROR << 24;
+ }
+ result = scsi_execute(sdev, cmd, data_direction, buffer, bufflen,
+ sense, timeout, retries, flags, resid);
+ if (sshdr)
+ scsi_normalize_sense(sense, SCSI_SENSE_BUFFERSIZE, sshdr);
+
+ kfree(sense);
+ return result;
+}
+EXPORT_SYMBOL(scsi_execute_req_flags);
+
+/*
+ * Function: scsi_init_cmd_errh()
+ *
+ * Purpose: Initialize cmd fields related to error handling.
+ *
+ * Arguments: cmd - command that is ready to be queued.
+ *
+ * Notes: This function has the job of initializing a number of
+ * fields related to error handling. Typically this will
+ * be called once for each command, as required.
+ */
+static void scsi_init_cmd_errh(struct scsi_cmnd *cmd)
+{
+ cmd->serial_number = 0;
+ scsi_set_resid(cmd, 0);
+ memset(cmd->sense_buffer, 0, SCSI_SENSE_BUFFERSIZE);
+ if (cmd->cmd_len == 0)
+ cmd->cmd_len = scsi_command_size(cmd->cmnd);
+}
+
+void scsi_device_unbusy(struct scsi_device *sdev)
+{
+ struct Scsi_Host *shost = sdev->host;
+ struct scsi_target *starget = scsi_target(sdev);
+ unsigned long flags;
+
+ atomic_dec(&shost->host_busy);
+ if (starget->can_queue > 0)
+ atomic_dec(&starget->target_busy);
+
+ if (unlikely(scsi_host_in_recovery(shost) &&
+ (shost->host_failed || shost->host_eh_scheduled))) {
+ spin_lock_irqsave(shost->host_lock, flags);
+ scsi_eh_wakeup(shost);
+ spin_unlock_irqrestore(shost->host_lock, flags);
+ }
+
+ atomic_dec(&sdev->device_busy);
+}
+
+static void scsi_kick_queue(struct request_queue *q)
+{
+ if (q->mq_ops)
+ blk_mq_start_hw_queues(q);
+ else
+ blk_run_queue(q);
+}
+
+/*
+ * Called for single_lun devices on IO completion. Clear starget_sdev_user,
+ * and call blk_run_queue for all the scsi_devices on the target -
+ * including current_sdev first.
+ *
+ * Called with *no* scsi locks held.
+ */
+static void scsi_single_lun_run(struct scsi_device *current_sdev)
+{
+ struct Scsi_Host *shost = current_sdev->host;
+ struct scsi_device *sdev, *tmp;
+ struct scsi_target *starget = scsi_target(current_sdev);
+ unsigned long flags;
+
+ spin_lock_irqsave(shost->host_lock, flags);
+ starget->starget_sdev_user = NULL;
+ spin_unlock_irqrestore(shost->host_lock, flags);
+
+ /*
+ * Call blk_run_queue for all LUNs on the target, starting with
+ * current_sdev. We race with others (to set starget_sdev_user),
+ * but in most cases, we will be first. Ideally, each LU on the
+ * target would get some limited time or requests on the target.
+ */
+ scsi_kick_queue(current_sdev->request_queue);
+
+ spin_lock_irqsave(shost->host_lock, flags);
+ if (starget->starget_sdev_user)
+ goto out;
+ list_for_each_entry_safe(sdev, tmp, &starget->devices,
+ same_target_siblings) {
+ if (sdev == current_sdev)
+ continue;
+ if (scsi_device_get(sdev))
+ continue;
+
+ spin_unlock_irqrestore(shost->host_lock, flags);
+ scsi_kick_queue(sdev->request_queue);
+ spin_lock_irqsave(shost->host_lock, flags);
+
+ scsi_device_put(sdev);
+ }
+ out:
+ spin_unlock_irqrestore(shost->host_lock, flags);
+}
+
+static inline bool scsi_device_is_busy(struct scsi_device *sdev)
+{
+ if (atomic_read(&sdev->device_busy) >= sdev->queue_depth)
+ return true;
+ if (atomic_read(&sdev->device_blocked) > 0)
+ return true;
+ return false;
+}
+
+static inline bool scsi_target_is_busy(struct scsi_target *starget)
+{
+ if (starget->can_queue > 0) {
+ if (atomic_read(&starget->target_busy) >= starget->can_queue)
+ return true;
+ if (atomic_read(&starget->target_blocked) > 0)
+ return true;
+ }
+ return false;
+}
+
+static inline bool scsi_host_is_busy(struct Scsi_Host *shost)
+{
+ if (shost->can_queue > 0 &&
+ atomic_read(&shost->host_busy) >= shost->can_queue)
+ return true;
+ if (atomic_read(&shost->host_blocked) > 0)
+ return true;
+ if (shost->host_self_blocked)
+ return true;
+ return false;
+}
+
+static void scsi_starved_list_run(struct Scsi_Host *shost)
+{
+ LIST_HEAD(starved_list);
+ struct scsi_device *sdev;
+ unsigned long flags;
+
+ spin_lock_irqsave(shost->host_lock, flags);
+ list_splice_init(&shost->starved_list, &starved_list);
+
+ while (!list_empty(&starved_list)) {
+ struct request_queue *slq;
+
+ /*
+ * As long as shost is accepting commands and we have
+ * starved queues, call blk_run_queue. scsi_request_fn
+ * drops the queue_lock and can add us back to the
+ * starved_list.
+ *
+ * host_lock protects the starved_list and starved_entry.
+ * scsi_request_fn must get the host_lock before checking
+ * or modifying starved_list or starved_entry.
+ */
+ if (scsi_host_is_busy(shost))
+ break;
+
+ sdev = list_entry(starved_list.next,
+ struct scsi_device, starved_entry);
+ list_del_init(&sdev->starved_entry);
+ if (scsi_target_is_busy(scsi_target(sdev))) {
+ list_move_tail(&sdev->starved_entry,
+ &shost->starved_list);
+ continue;
+ }
+
+ /*
+ * Once we drop the host lock, a racing scsi_remove_device()
+ * call may remove the sdev from the starved list and destroy
+ * it and the queue. Mitigate by taking a reference to the
+ * queue and never touching the sdev again after we drop the
+ * host lock. Note: if __scsi_remove_device() invokes
+ * blk_cleanup_queue() before the queue is run from this
+ * function then blk_run_queue() will return immediately since
+ * blk_cleanup_queue() marks the queue with QUEUE_FLAG_DYING.
+ */
+ slq = sdev->request_queue;
+ if (!blk_get_queue(slq))
+ continue;
+ spin_unlock_irqrestore(shost->host_lock, flags);
+
+ scsi_kick_queue(slq);
+ blk_put_queue(slq);
+
+ spin_lock_irqsave(shost->host_lock, flags);
+ }
+ /* put any unprocessed entries back */
+ list_splice(&starved_list, &shost->starved_list);
+ spin_unlock_irqrestore(shost->host_lock, flags);
+}
+
+/*
+ * Function: scsi_run_queue()
+ *
+ * Purpose: Select a proper request queue to serve next
+ *
+ * Arguments: q - last request's queue
+ *
+ * Returns: Nothing
+ *
+ * Notes: The previous command was completely finished, start
+ * a new one if possible.
+ */
+static void scsi_run_queue(struct request_queue *q)
+{
+ struct scsi_device *sdev = q->queuedata;
+
+ if (scsi_target(sdev)->single_lun)
+ scsi_single_lun_run(sdev);
+ if (!list_empty(&sdev->host->starved_list))
+ scsi_starved_list_run(sdev->host);
+
+ if (q->mq_ops)
+ blk_mq_start_stopped_hw_queues(q, false);
+ else
+ blk_run_queue(q);
+}
+
+void scsi_requeue_run_queue(struct work_struct *work)
+{
+ struct scsi_device *sdev;
+ struct request_queue *q;
+
+ sdev = container_of(work, struct scsi_device, requeue_work);
+ q = sdev->request_queue;
+ scsi_run_queue(q);
+}
+
+/*
+ * Function: scsi_requeue_command()
+ *
+ * Purpose: Handle post-processing of completed commands.
+ *
+ * Arguments: q - queue to operate on
+ * cmd - command that may need to be requeued.
+ *
+ * Returns: Nothing
+ *
+ * Notes: After command completion, there may be blocks left
+ * over which weren't finished by the previous command
+ * this can be for a number of reasons - the main one is
+ * I/O errors in the middle of the request, in which case
+ * we need to request the blocks that come after the bad
+ * sector.
+ * Notes: Upon return, cmd is a stale pointer.
+ */
+static void scsi_requeue_command(struct request_queue *q, struct scsi_cmnd *cmd)
+{
+ struct scsi_device *sdev = cmd->device;
+ struct request *req = cmd->request;
+ unsigned long flags;
+
+ spin_lock_irqsave(q->queue_lock, flags);
+ blk_unprep_request(req);
+ req->special = NULL;
+ scsi_put_command(cmd);
+ blk_requeue_request(q, req);
+ spin_unlock_irqrestore(q->queue_lock, flags);
+
+ scsi_run_queue(q);
+
+ put_device(&sdev->sdev_gendev);
+}
+
+void scsi_run_host_queues(struct Scsi_Host *shost)
+{
+ struct scsi_device *sdev;
+
+ shost_for_each_device(sdev, shost)
+ scsi_run_queue(sdev->request_queue);
+}
+
+static inline unsigned int scsi_sgtable_index(unsigned short nents)
+{
+ unsigned int index;
+
+ BUG_ON(nents > SCSI_MAX_SG_SEGMENTS);
+
+ if (nents <= 8)
+ index = 0;
+ else
+ index = get_count_order(nents) - 3;
+
+ return index;
+}
+
+static void scsi_sg_free(struct scatterlist *sgl, unsigned int nents)
+{
+ struct scsi_host_sg_pool *sgp;
+
+ sgp = scsi_sg_pools + scsi_sgtable_index(nents);
+ mempool_free(sgl, sgp->pool);
+}
+
+static struct scatterlist *scsi_sg_alloc(unsigned int nents, gfp_t gfp_mask)
+{
+ struct scsi_host_sg_pool *sgp;
+
+ sgp = scsi_sg_pools + scsi_sgtable_index(nents);
+ return mempool_alloc(sgp->pool, gfp_mask);
+}
+
+static void scsi_free_sgtable(struct scsi_data_buffer *sdb, bool mq)
+{
+ if (mq && sdb->table.nents <= SCSI_MAX_SG_SEGMENTS)
+ return;
+ __sg_free_table(&sdb->table, SCSI_MAX_SG_SEGMENTS, mq, scsi_sg_free);
+}
+
+static int scsi_alloc_sgtable(struct scsi_data_buffer *sdb, int nents, bool mq)
+{
+ struct scatterlist *first_chunk = NULL;
+ int ret;
+
+ BUG_ON(!nents);
+
+ if (mq) {
+ if (nents <= SCSI_MAX_SG_SEGMENTS) {
+ sdb->table.nents = nents;
+ sg_init_table(sdb->table.sgl, sdb->table.nents);
+ return 0;
+ }
+ first_chunk = sdb->table.sgl;
+ }
+
+ ret = __sg_alloc_table(&sdb->table, nents, SCSI_MAX_SG_SEGMENTS,
+ first_chunk, GFP_ATOMIC, scsi_sg_alloc);
+ if (unlikely(ret))
+ scsi_free_sgtable(sdb, mq);
+ return ret;
+}
+
+static void scsi_uninit_cmd(struct scsi_cmnd *cmd)
+{
+ if (cmd->request->cmd_type == REQ_TYPE_FS) {
+ struct scsi_driver *drv = scsi_cmd_to_driver(cmd);
+
+ if (drv->uninit_command)
+ drv->uninit_command(cmd);
+ }
+}
+
+static void scsi_mq_free_sgtables(struct scsi_cmnd *cmd)
+{
+ if (cmd->sdb.table.nents)
+ scsi_free_sgtable(&cmd->sdb, true);
+ if (cmd->request->next_rq && cmd->request->next_rq->special)
+ scsi_free_sgtable(cmd->request->next_rq->special, true);
+ if (scsi_prot_sg_count(cmd))
+ scsi_free_sgtable(cmd->prot_sdb, true);
+}
+
+static void scsi_mq_uninit_cmd(struct scsi_cmnd *cmd)
+{
+ struct scsi_device *sdev = cmd->device;
+ struct Scsi_Host *shost = sdev->host;
+ unsigned long flags;
+
+ scsi_mq_free_sgtables(cmd);
+ scsi_uninit_cmd(cmd);
+
+ if (shost->use_cmd_list) {
+ BUG_ON(list_empty(&cmd->list));
+ spin_lock_irqsave(&sdev->list_lock, flags);
+ list_del_init(&cmd->list);
+ spin_unlock_irqrestore(&sdev->list_lock, flags);
+ }
+}
+
+/*
+ * Function: scsi_release_buffers()
+ *
+ * Purpose: Free resources allocate for a scsi_command.
+ *
+ * Arguments: cmd - command that we are bailing.
+ *
+ * Lock status: Assumed that no lock is held upon entry.
+ *
+ * Returns: Nothing
+ *
+ * Notes: In the event that an upper level driver rejects a
+ * command, we must release resources allocated during
+ * the __init_io() function. Primarily this would involve
+ * the scatter-gather table.
+ */
+static void scsi_release_buffers(struct scsi_cmnd *cmd)
+{
+ if (cmd->sdb.table.nents)
+ scsi_free_sgtable(&cmd->sdb, false);
+
+ memset(&cmd->sdb, 0, sizeof(cmd->sdb));
+
+ if (scsi_prot_sg_count(cmd))
+ scsi_free_sgtable(cmd->prot_sdb, false);
+}
+
+static void scsi_release_bidi_buffers(struct scsi_cmnd *cmd)
+{
+ struct scsi_data_buffer *bidi_sdb = cmd->request->next_rq->special;
+
+ scsi_free_sgtable(bidi_sdb, false);
+ kmem_cache_free(scsi_sdb_cache, bidi_sdb);
+ cmd->request->next_rq->special = NULL;
+}
+
+static bool scsi_end_request(struct request *req, int error,
+ unsigned int bytes, unsigned int bidi_bytes)
+{
+ struct scsi_cmnd *cmd = req->special;
+ struct scsi_device *sdev = cmd->device;
+ struct request_queue *q = sdev->request_queue;
+
+ if (blk_update_request(req, error, bytes))
+ return true;
+
+ /* Bidi request must be completed as a whole */
+ if (unlikely(bidi_bytes) &&
+ blk_update_request(req->next_rq, error, bidi_bytes))
+ return true;
+
+ if (blk_queue_add_random(q))
+ add_disk_randomness(req->rq_disk);
+
+ if (req->mq_ctx) {
+ /*
+ * In the MQ case the command gets freed by __blk_mq_end_request,
+ * so we have to do all cleanup that depends on it earlier.
+ *
+ * We also can't kick the queues from irq context, so we
+ * will have to defer it to a workqueue.
+ */
+ scsi_mq_uninit_cmd(cmd);
+
+ __blk_mq_end_request(req, error);
+
+ if (scsi_target(sdev)->single_lun ||
+ !list_empty(&sdev->host->starved_list))
+ kblockd_schedule_work(&sdev->requeue_work);
+ else
+ blk_mq_start_stopped_hw_queues(q, true);
+ } else {
+ unsigned long flags;
+
+ if (bidi_bytes)
+ scsi_release_bidi_buffers(cmd);
+
+ spin_lock_irqsave(q->queue_lock, flags);
+ blk_finish_request(req, error);
+ spin_unlock_irqrestore(q->queue_lock, flags);
+
+ scsi_release_buffers(cmd);
+
+ scsi_put_command(cmd);
+ scsi_run_queue(q);
+ }
+
+ put_device(&sdev->sdev_gendev);
+ return false;
+}
+
+/**
+ * __scsi_error_from_host_byte - translate SCSI error code into errno
+ * @cmd: SCSI command (unused)
+ * @result: scsi error code
+ *
+ * Translate SCSI error code into standard UNIX errno.
+ * Return values:
+ * -ENOLINK temporary transport failure
+ * -EREMOTEIO permanent target failure, do not retry
+ * -EBADE permanent nexus failure, retry on other path
+ * -ENOSPC No write space available
+ * -ENODATA Medium error
+ * -EIO unspecified I/O error
+ */
+static int __scsi_error_from_host_byte(struct scsi_cmnd *cmd, int result)
+{
+ int error = 0;
+
+ switch(host_byte(result)) {
+ case DID_TRANSPORT_FAILFAST:
+ error = -ENOLINK;
+ break;
+ case DID_TARGET_FAILURE:
+ set_host_byte(cmd, DID_OK);
+ error = -EREMOTEIO;
+ break;
+ case DID_NEXUS_FAILURE:
+ set_host_byte(cmd, DID_OK);
+ error = -EBADE;
+ break;
+ case DID_ALLOC_FAILURE:
+ set_host_byte(cmd, DID_OK);
+ error = -ENOSPC;
+ break;
+ case DID_MEDIUM_ERROR:
+ set_host_byte(cmd, DID_OK);
+ error = -ENODATA;
+ break;
+ default:
+ error = -EIO;
+ break;
+ }
+
+ return error;
+}
+
+/*
+ * Function: scsi_io_completion()
+ *
+ * Purpose: Completion processing for block device I/O requests.
+ *
+ * Arguments: cmd - command that is finished.
+ *
+ * Lock status: Assumed that no lock is held upon entry.
+ *
+ * Returns: Nothing
+ *
+ * Notes: We will finish off the specified number of sectors. If we
+ * are done, the command block will be released and the queue
+ * function will be goosed. If we are not done then we have to
+ * figure out what to do next:
+ *
+ * a) We can call scsi_requeue_command(). The request
+ * will be unprepared and put back on the queue. Then
+ * a new command will be created for it. This should
+ * be used if we made forward progress, or if we want
+ * to switch from READ(10) to READ(6) for example.
+ *
+ * b) We can call __scsi_queue_insert(). The request will
+ * be put back on the queue and retried using the same
+ * command as before, possibly after a delay.
+ *
+ * c) We can call scsi_end_request() with -EIO to fail
+ * the remainder of the request.
+ */
+void scsi_io_completion(struct scsi_cmnd *cmd, unsigned int good_bytes)
+{
+ int result = cmd->result;
+ struct request_queue *q = cmd->device->request_queue;
+ struct request *req = cmd->request;
+ int error = 0;
+ struct scsi_sense_hdr sshdr;
+ bool sense_valid = false;
+ int sense_deferred = 0, level = 0;
+ enum {ACTION_FAIL, ACTION_REPREP, ACTION_RETRY,
+ ACTION_DELAYED_RETRY} action;
+ unsigned long wait_for = (cmd->allowed + 1) * req->timeout;
+
+ if (result) {
+ sense_valid = scsi_command_normalize_sense(cmd, &sshdr);
+ if (sense_valid)
+ sense_deferred = scsi_sense_is_deferred(&sshdr);
+ }
+
+ if (req->cmd_type == REQ_TYPE_BLOCK_PC) { /* SG_IO ioctl from block level */
+ if (result) {
+ if (sense_valid && req->sense) {
+ /*
+ * SG_IO wants current and deferred errors
+ */
+ int len = 8 + cmd->sense_buffer[7];
+
+ if (len > SCSI_SENSE_BUFFERSIZE)
+ len = SCSI_SENSE_BUFFERSIZE;
+ memcpy(req->sense, cmd->sense_buffer, len);
+ req->sense_len = len;
+ }
+ if (!sense_deferred)
+ error = __scsi_error_from_host_byte(cmd, result);
+ }
+ /*
+ * __scsi_error_from_host_byte may have reset the host_byte
+ */
+ req->errors = cmd->result;
+
+ req->resid_len = scsi_get_resid(cmd);
+
+ if (scsi_bidi_cmnd(cmd)) {
+ /*
+ * Bidi commands Must be complete as a whole,
+ * both sides at once.
+ */
+ req->next_rq->resid_len = scsi_in(cmd)->resid;
+ if (scsi_end_request(req, 0, blk_rq_bytes(req),
+ blk_rq_bytes(req->next_rq)))
+ BUG();
+ return;
+ }
+ } else if (blk_rq_bytes(req) == 0 && result && !sense_deferred) {
+ /*
+ * Certain non BLOCK_PC requests are commands that don't
+ * actually transfer anything (FLUSH), so cannot use
+ * good_bytes != blk_rq_bytes(req) as the signal for an error.
+ * This sets the error explicitly for the problem case.
+ */
+ error = __scsi_error_from_host_byte(cmd, result);
+ }
+
+ /* no bidi support for !REQ_TYPE_BLOCK_PC yet */
+ BUG_ON(blk_bidi_rq(req));
+
+ /*
+ * Next deal with any sectors which we were able to correctly
+ * handle.
+ */
+ SCSI_LOG_HLCOMPLETE(1, scmd_printk(KERN_INFO, cmd,
+ "%u sectors total, %d bytes done.\n",
+ blk_rq_sectors(req), good_bytes));
+
+ /*
+ * Recovered errors need reporting, but they're always treated
+ * as success, so fiddle the result code here. For BLOCK_PC
+ * we already took a copy of the original into rq->errors which
+ * is what gets returned to the user
+ */
+ if (sense_valid && (sshdr.sense_key == RECOVERED_ERROR)) {
+ /* if ATA PASS-THROUGH INFORMATION AVAILABLE skip
+ * print since caller wants ATA registers. Only occurs on
+ * SCSI ATA PASS_THROUGH commands when CK_COND=1
+ */
+ if ((sshdr.asc == 0x0) && (sshdr.ascq == 0x1d))
+ ;
+ else if (!(req->cmd_flags & REQ_QUIET))
+ scsi_print_sense(cmd);
+ result = 0;
+ /* BLOCK_PC may have set error */
+ error = 0;
+ }
+
+ /*
+ * If we finished all bytes in the request we are done now.
+ */
+ if (!scsi_end_request(req, error, good_bytes, 0))
+ return;
+
+ /*
+ * Kill remainder if no retrys.
+ */
+ if (error && scsi_noretry_cmd(cmd)) {
+ if (scsi_end_request(req, error, blk_rq_bytes(req), 0))
+ BUG();
+ return;
+ }
+
+ /*
+ * If there had been no error, but we have leftover bytes in the
+ * requeues just queue the command up again.
+ */
+ if (result == 0)
+ goto requeue;
+
+ error = __scsi_error_from_host_byte(cmd, result);
+
+ if (host_byte(result) == DID_RESET) {
+ /* Third party bus reset or reset for error recovery
+ * reasons. Just retry the command and see what
+ * happens.
+ */
+ action = ACTION_RETRY;
+ } else if (sense_valid && !sense_deferred) {
+ switch (sshdr.sense_key) {
+ case UNIT_ATTENTION:
+ if (cmd->device->removable) {
+ /* Detected disc change. Set a bit
+ * and quietly refuse further access.
+ */
+ cmd->device->changed = 1;
+ action = ACTION_FAIL;
+ } else {
+ /* Must have been a power glitch, or a
+ * bus reset. Could not have been a
+ * media change, so we just retry the
+ * command and see what happens.
+ */
+ action = ACTION_RETRY;
+ }
+ break;
+ case ILLEGAL_REQUEST:
+ /* If we had an ILLEGAL REQUEST returned, then
+ * we may have performed an unsupported
+ * command. The only thing this should be
+ * would be a ten byte read where only a six
+ * byte read was supported. Also, on a system
+ * where READ CAPACITY failed, we may have
+ * read past the end of the disk.
+ */
+ if ((cmd->device->use_10_for_rw &&
+ sshdr.asc == 0x20 && sshdr.ascq == 0x00) &&
+ (cmd->cmnd[0] == READ_10 ||
+ cmd->cmnd[0] == WRITE_10)) {
+ /* This will issue a new 6-byte command. */
+ cmd->device->use_10_for_rw = 0;
+ action = ACTION_REPREP;
+ } else if (sshdr.asc == 0x10) /* DIX */ {
+ action = ACTION_FAIL;
+ error = -EILSEQ;
+ /* INVALID COMMAND OPCODE or INVALID FIELD IN CDB */
+ } else if (sshdr.asc == 0x20 || sshdr.asc == 0x24) {
+ action = ACTION_FAIL;
+ error = -EREMOTEIO;
+ } else
+ action = ACTION_FAIL;
+ break;
+ case ABORTED_COMMAND:
+ action = ACTION_FAIL;
+ if (sshdr.asc == 0x10) /* DIF */
+ error = -EILSEQ;
+ break;
+ case NOT_READY:
+ /* If the device is in the process of becoming
+ * ready, or has a temporary blockage, retry.
+ */
+ if (sshdr.asc == 0x04) {
+ switch (sshdr.ascq) {
+ case 0x01: /* becoming ready */
+ case 0x04: /* format in progress */
+ case 0x05: /* rebuild in progress */
+ case 0x06: /* recalculation in progress */
+ case 0x07: /* operation in progress */
+ case 0x08: /* Long write in progress */
+ case 0x09: /* self test in progress */
+ case 0x14: /* space allocation in progress */
+ action = ACTION_DELAYED_RETRY;
+ break;
+ default:
+ action = ACTION_FAIL;
+ break;
+ }
+ } else
+ action = ACTION_FAIL;
+ break;
+ case VOLUME_OVERFLOW:
+ /* See SSC3rXX or current. */
+ action = ACTION_FAIL;
+ break;
+ default:
+ action = ACTION_FAIL;
+ break;
+ }
+ } else
+ action = ACTION_FAIL;
+
+ if (action != ACTION_FAIL &&
+ time_before(cmd->jiffies_at_alloc + wait_for, jiffies))
+ action = ACTION_FAIL;
+
+ switch (action) {
+ case ACTION_FAIL:
+ /* Give up and fail the remainder of the request */
+ if (!(req->cmd_flags & REQ_QUIET)) {
+ static DEFINE_RATELIMIT_STATE(_rs,
+ DEFAULT_RATELIMIT_INTERVAL,
+ DEFAULT_RATELIMIT_BURST);
+
+ if (unlikely(scsi_logging_level))
+ level = SCSI_LOG_LEVEL(SCSI_LOG_MLCOMPLETE_SHIFT,
+ SCSI_LOG_MLCOMPLETE_BITS);
+
+ /*
+ * if logging is enabled the failure will be printed
+ * in scsi_log_completion(), so avoid duplicate messages
+ */
+ if (!level && __ratelimit(&_rs)) {
+ scsi_print_result(cmd, NULL, FAILED);
+ if (driver_byte(result) & DRIVER_SENSE)
+ scsi_print_sense(cmd);
+ scsi_print_command(cmd);
+ }
+ }
+ if (!scsi_end_request(req, error, blk_rq_err_bytes(req), 0))
+ return;
+ /*FALLTHRU*/
+ case ACTION_REPREP:
+ requeue:
+ /* Unprep the request and put it back at the head of the queue.
+ * A new command will be prepared and issued.
+ */
+ if (q->mq_ops) {
+ cmd->request->cmd_flags &= ~REQ_DONTPREP;
+ scsi_mq_uninit_cmd(cmd);
+ scsi_mq_requeue_cmd(cmd);
+ } else {
+ scsi_release_buffers(cmd);
+ scsi_requeue_command(q, cmd);
+ }
+ break;
+ case ACTION_RETRY:
+ /* Retry the same command immediately */
+ __scsi_queue_insert(cmd, SCSI_MLQUEUE_EH_RETRY, 0);
+ break;
+ case ACTION_DELAYED_RETRY:
+ /* Retry the same command after a delay */
+ __scsi_queue_insert(cmd, SCSI_MLQUEUE_DEVICE_BUSY, 0);
+ break;
+ }
+}
+
+static int scsi_init_sgtable(struct request *req, struct scsi_data_buffer *sdb)
+{
+ int count;
+
+ /*
+ * If sg table allocation fails, requeue request later.
+ */
+ if (unlikely(scsi_alloc_sgtable(sdb, req->nr_phys_segments,
+ req->mq_ctx != NULL)))
+ return BLKPREP_DEFER;
+
+ /*
+ * Next, walk the list, and fill in the addresses and sizes of
+ * each segment.
+ */
+ count = blk_rq_map_sg(req->q, req, sdb->table.sgl);
+ BUG_ON(count > sdb->table.nents);
+ sdb->table.nents = count;
+ sdb->length = blk_rq_bytes(req);
+ return BLKPREP_OK;
+}
+
+/*
+ * Function: scsi_init_io()
+ *
+ * Purpose: SCSI I/O initialize function.
+ *
+ * Arguments: cmd - Command descriptor we wish to initialize
+ *
+ * Returns: 0 on success
+ * BLKPREP_DEFER if the failure is retryable
+ * BLKPREP_KILL if the failure is fatal
+ */
+int scsi_init_io(struct scsi_cmnd *cmd)
+{
+ struct scsi_device *sdev = cmd->device;
+ struct request *rq = cmd->request;
+ bool is_mq = (rq->mq_ctx != NULL);
+ int error;
+
+ BUG_ON(!rq->nr_phys_segments);
+
+ error = scsi_init_sgtable(rq, &cmd->sdb);
+ if (error)
+ goto err_exit;
+
+ if (blk_bidi_rq(rq)) {
+ if (!rq->q->mq_ops) {
+ struct scsi_data_buffer *bidi_sdb =
+ kmem_cache_zalloc(scsi_sdb_cache, GFP_ATOMIC);
+ if (!bidi_sdb) {
+ error = BLKPREP_DEFER;
+ goto err_exit;
+ }
+
+ rq->next_rq->special = bidi_sdb;
+ }
+
+ error = scsi_init_sgtable(rq->next_rq, rq->next_rq->special);
+ if (error)
+ goto err_exit;
+ }
+
+ if (blk_integrity_rq(rq)) {
+ struct scsi_data_buffer *prot_sdb = cmd->prot_sdb;
+ int ivecs, count;
+
+ if (prot_sdb == NULL) {
+ /*
+ * This can happen if someone (e.g. multipath)
+ * queues a command to a device on an adapter
+ * that does not support DIX.
+ */
+ WARN_ON_ONCE(1);
+ error = BLKPREP_KILL;
+ goto err_exit;
+ }
+
+ ivecs = blk_rq_count_integrity_sg(rq->q, rq->bio);
+
+ if (scsi_alloc_sgtable(prot_sdb, ivecs, is_mq)) {
+ error = BLKPREP_DEFER;
+ goto err_exit;
+ }
+
+ count = blk_rq_map_integrity_sg(rq->q, rq->bio,
+ prot_sdb->table.sgl);
+ BUG_ON(unlikely(count > ivecs));
+ BUG_ON(unlikely(count > queue_max_integrity_segments(rq->q)));
+
+ cmd->prot_sdb = prot_sdb;
+ cmd->prot_sdb->table.nents = count;
+ }
+
+ return BLKPREP_OK;
+err_exit:
+ if (is_mq) {
+ scsi_mq_free_sgtables(cmd);
+ } else {
+ scsi_release_buffers(cmd);
+ cmd->request->special = NULL;
+ scsi_put_command(cmd);
+ put_device(&sdev->sdev_gendev);
+ }
+ return error;
+}
+EXPORT_SYMBOL(scsi_init_io);
+
+static struct scsi_cmnd *scsi_get_cmd_from_req(struct scsi_device *sdev,
+ struct request *req)
+{
+ struct scsi_cmnd *cmd;
+
+ if (!req->special) {
+ /* Bail if we can't get a reference to the device */
+ if (!get_device(&sdev->sdev_gendev))
+ return NULL;
+
+ cmd = scsi_get_command(sdev, GFP_ATOMIC);
+ if (unlikely(!cmd)) {
+ put_device(&sdev->sdev_gendev);
+ return NULL;
+ }
+ req->special = cmd;
+ } else {
+ cmd = req->special;
+ }
+
+ /* pull a tag out of the request if we have one */
+ cmd->tag = req->tag;
+ cmd->request = req;
+
+ cmd->cmnd = req->cmd;
+ cmd->prot_op = SCSI_PROT_NORMAL;
+
+ return cmd;
+}
+
+static int scsi_setup_blk_pc_cmnd(struct scsi_device *sdev, struct request *req)
+{
+ struct scsi_cmnd *cmd = req->special;
+
+ /*
+ * BLOCK_PC requests may transfer data, in which case they must
+ * a bio attached to them. Or they might contain a SCSI command
+ * that does not transfer data, in which case they may optionally
+ * submit a request without an attached bio.
+ */
+ if (req->bio) {
+ int ret = scsi_init_io(cmd);
+ if (unlikely(ret))
+ return ret;
+ } else {
+ BUG_ON(blk_rq_bytes(req));
+
+ memset(&cmd->sdb, 0, sizeof(cmd->sdb));
+ }
+
+ cmd->cmd_len = req->cmd_len;
+ cmd->transfersize = blk_rq_bytes(req);
+ cmd->allowed = req->retries;
+ return BLKPREP_OK;
+}
+
+/*
+ * Setup a REQ_TYPE_FS command. These are simple request from filesystems
+ * that still need to be translated to SCSI CDBs from the ULD.
+ */
+static int scsi_setup_fs_cmnd(struct scsi_device *sdev, struct request *req)
+{
+ struct scsi_cmnd *cmd = req->special;
+
+ if (unlikely(sdev->scsi_dh_data && sdev->scsi_dh_data->scsi_dh
+ && sdev->scsi_dh_data->scsi_dh->prep_fn)) {
+ int ret = sdev->scsi_dh_data->scsi_dh->prep_fn(sdev, req);
+ if (ret != BLKPREP_OK)
+ return ret;
+ }
+
+ memset(cmd->cmnd, 0, BLK_MAX_CDB);
+ return scsi_cmd_to_driver(cmd)->init_command(cmd);
+}
+
+static int scsi_setup_cmnd(struct scsi_device *sdev, struct request *req)
+{
+ struct scsi_cmnd *cmd = req->special;
+
+ if (!blk_rq_bytes(req))
+ cmd->sc_data_direction = DMA_NONE;
+ else if (rq_data_dir(req) == WRITE)
+ cmd->sc_data_direction = DMA_TO_DEVICE;
+ else
+ cmd->sc_data_direction = DMA_FROM_DEVICE;
+
+ switch (req->cmd_type) {
+ case REQ_TYPE_FS:
+ return scsi_setup_fs_cmnd(sdev, req);
+ case REQ_TYPE_BLOCK_PC:
+ return scsi_setup_blk_pc_cmnd(sdev, req);
+ default:
+ return BLKPREP_KILL;
+ }
+}
+
+static int
+scsi_prep_state_check(struct scsi_device *sdev, struct request *req)
+{
+ int ret = BLKPREP_OK;
+
+ /*
+ * If the device is not in running state we will reject some
+ * or all commands.
+ */
+ if (unlikely(sdev->sdev_state != SDEV_RUNNING)) {
+ switch (sdev->sdev_state) {
+ case SDEV_OFFLINE:
+ case SDEV_TRANSPORT_OFFLINE:
+ /*
+ * If the device is offline we refuse to process any
+ * commands. The device must be brought online
+ * before trying any recovery commands.
+ */
+ sdev_printk(KERN_ERR, sdev,
+ "rejecting I/O to offline device\n");
+ ret = BLKPREP_KILL;
+ break;
+ case SDEV_DEL:
+ /*
+ * If the device is fully deleted, we refuse to
+ * process any commands as well.
+ */
+ sdev_printk(KERN_ERR, sdev,
+ "rejecting I/O to dead device\n");
+ ret = BLKPREP_KILL;
+ break;
+ case SDEV_BLOCK:
+ case SDEV_CREATED_BLOCK:
+ ret = BLKPREP_DEFER;
+ break;
+ case SDEV_QUIESCE:
+ /*
+ * If the devices is blocked we defer normal commands.
+ */
+ if (!(req->cmd_flags & REQ_PREEMPT))
+ ret = BLKPREP_DEFER;
+ break;
+ default:
+ /*
+ * For any other not fully online state we only allow
+ * special commands. In particular any user initiated
+ * command is not allowed.
+ */
+ if (!(req->cmd_flags & REQ_PREEMPT))
+ ret = BLKPREP_KILL;
+ break;
+ }
+ }
+ return ret;
+}
+
+static int
+scsi_prep_return(struct request_queue *q, struct request *req, int ret)
+{
+ struct scsi_device *sdev = q->queuedata;
+
+ switch (ret) {
+ case BLKPREP_KILL:
+ req->errors = DID_NO_CONNECT << 16;
+ /* release the command and kill it */
+ if (req->special) {
+ struct scsi_cmnd *cmd = req->special;
+ scsi_release_buffers(cmd);
+ scsi_put_command(cmd);
+ put_device(&sdev->sdev_gendev);
+ req->special = NULL;
+ }
+ break;
+ case BLKPREP_DEFER:
+ /*
+ * If we defer, the blk_peek_request() returns NULL, but the
+ * queue must be restarted, so we schedule a callback to happen
+ * shortly.
+ */
+ if (atomic_read(&sdev->device_busy) == 0)
+ blk_delay_queue(q, SCSI_QUEUE_DELAY);
+ break;
+ default:
+ req->cmd_flags |= REQ_DONTPREP;
+ }
+
+ return ret;
+}
+
+static int scsi_prep_fn(struct request_queue *q, struct request *req)
+{
+ struct scsi_device *sdev = q->queuedata;
+ struct scsi_cmnd *cmd;
+ int ret;
+
+ ret = scsi_prep_state_check(sdev, req);
+ if (ret != BLKPREP_OK)
+ goto out;
+
+ cmd = scsi_get_cmd_from_req(sdev, req);
+ if (unlikely(!cmd)) {
+ ret = BLKPREP_DEFER;
+ goto out;
+ }
+
+ ret = scsi_setup_cmnd(sdev, req);
+out:
+ return scsi_prep_return(q, req, ret);
+}
+
+static void scsi_unprep_fn(struct request_queue *q, struct request *req)
+{
+ scsi_uninit_cmd(req->special);
+}
+
+/*
+ * scsi_dev_queue_ready: if we can send requests to sdev, return 1 else
+ * return 0.
+ *
+ * Called with the queue_lock held.
+ */
+static inline int scsi_dev_queue_ready(struct request_queue *q,
+ struct scsi_device *sdev)
+{
+ unsigned int busy;
+
+ busy = atomic_inc_return(&sdev->device_busy) - 1;
+ if (atomic_read(&sdev->device_blocked)) {
+ if (busy)
+ goto out_dec;
+
+ /*
+ * unblock after device_blocked iterates to zero
+ */
+ if (atomic_dec_return(&sdev->device_blocked) > 0) {
+ /*
+ * For the MQ case we take care of this in the caller.
+ */
+ if (!q->mq_ops)
+ blk_delay_queue(q, SCSI_QUEUE_DELAY);
+ goto out_dec;
+ }
+ SCSI_LOG_MLQUEUE(3, sdev_printk(KERN_INFO, sdev,
+ "unblocking device at zero depth\n"));
+ }
+
+ if (busy >= sdev->queue_depth)
+ goto out_dec;
+
+ return 1;
+out_dec:
+ atomic_dec(&sdev->device_busy);
+ return 0;
+}
+
+/*
+ * scsi_target_queue_ready: checks if there we can send commands to target
+ * @sdev: scsi device on starget to check.
+ */
+static inline int scsi_target_queue_ready(struct Scsi_Host *shost,
+ struct scsi_device *sdev)
+{
+ struct scsi_target *starget = scsi_target(sdev);
+ unsigned int busy;
+
+ if (starget->single_lun) {
+ spin_lock_irq(shost->host_lock);
+ if (starget->starget_sdev_user &&
+ starget->starget_sdev_user != sdev) {
+ spin_unlock_irq(shost->host_lock);
+ return 0;
+ }
+ starget->starget_sdev_user = sdev;
+ spin_unlock_irq(shost->host_lock);
+ }
+
+ if (starget->can_queue <= 0)
+ return 1;
+
+ busy = atomic_inc_return(&starget->target_busy) - 1;
+ if (atomic_read(&starget->target_blocked) > 0) {
+ if (busy)
+ goto starved;
+
+ /*
+ * unblock after target_blocked iterates to zero
+ */
+ if (atomic_dec_return(&starget->target_blocked) > 0)
+ goto out_dec;
+
+ SCSI_LOG_MLQUEUE(3, starget_printk(KERN_INFO, starget,
+ "unblocking target at zero depth\n"));
+ }
+
+ if (busy >= starget->can_queue)
+ goto starved;
+
+ return 1;
+
+starved:
+ spin_lock_irq(shost->host_lock);
+ list_move_tail(&sdev->starved_entry, &shost->starved_list);
+ spin_unlock_irq(shost->host_lock);
+out_dec:
+ if (starget->can_queue > 0)
+ atomic_dec(&starget->target_busy);
+ return 0;
+}
+
+/*
+ * scsi_host_queue_ready: if we can send requests to shost, return 1 else
+ * return 0. We must end up running the queue again whenever 0 is
+ * returned, else IO can hang.
+ */
+static inline int scsi_host_queue_ready(struct request_queue *q,
+ struct Scsi_Host *shost,
+ struct scsi_device *sdev)
+{
+ unsigned int busy;
+
+ if (scsi_host_in_recovery(shost))
+ return 0;
+
+ busy = atomic_inc_return(&shost->host_busy) - 1;
+ if (atomic_read(&shost->host_blocked) > 0) {
+ if (busy)
+ goto starved;
+
+ /*
+ * unblock after host_blocked iterates to zero
+ */
+ if (atomic_dec_return(&shost->host_blocked) > 0)
+ goto out_dec;
+
+ SCSI_LOG_MLQUEUE(3,
+ shost_printk(KERN_INFO, shost,
+ "unblocking host at zero depth\n"));
+ }
+
+ if (shost->can_queue > 0 && busy >= shost->can_queue)
+ goto starved;
+ if (shost->host_self_blocked)
+ goto starved;
+
+ /* We're OK to process the command, so we can't be starved */
+ if (!list_empty(&sdev->starved_entry)) {
+ spin_lock_irq(shost->host_lock);
+ if (!list_empty(&sdev->starved_entry))
+ list_del_init(&sdev->starved_entry);
+ spin_unlock_irq(shost->host_lock);
+ }
+
+ return 1;
+
+starved:
+ spin_lock_irq(shost->host_lock);
+ if (list_empty(&sdev->starved_entry))
+ list_add_tail(&sdev->starved_entry, &shost->starved_list);
+ spin_unlock_irq(shost->host_lock);
+out_dec:
+ atomic_dec(&shost->host_busy);
+ return 0;
+}
+
+/*
+ * Busy state exporting function for request stacking drivers.
+ *
+ * For efficiency, no lock is taken to check the busy state of
+ * shost/starget/sdev, since the returned value is not guaranteed and
+ * may be changed after request stacking drivers call the function,
+ * regardless of taking lock or not.
+ *
+ * When scsi can't dispatch I/Os anymore and needs to kill I/Os scsi
+ * needs to return 'not busy'. Otherwise, request stacking drivers
+ * may hold requests forever.
+ */
+static int scsi_lld_busy(struct request_queue *q)
+{
+ struct scsi_device *sdev = q->queuedata;
+ struct Scsi_Host *shost;
+
+ if (blk_queue_dying(q))
+ return 0;
+
+ shost = sdev->host;
+
+ /*
+ * Ignore host/starget busy state.
+ * Since block layer does not have a concept of fairness across
+ * multiple queues, congestion of host/starget needs to be handled
+ * in SCSI layer.
+ */
+ if (scsi_host_in_recovery(shost) || scsi_device_is_busy(sdev))
+ return 1;
+
+ return 0;
+}
+
+/*
+ * Kill a request for a dead device
+ */
+static void scsi_kill_request(struct request *req, struct request_queue *q)
+{
+ struct scsi_cmnd *cmd = req->special;
+ struct scsi_device *sdev;
+ struct scsi_target *starget;
+ struct Scsi_Host *shost;
+
+ blk_start_request(req);
+
+ scmd_printk(KERN_INFO, cmd, "killing request\n");
+
+ sdev = cmd->device;
+ starget = scsi_target(sdev);
+ shost = sdev->host;
+ scsi_init_cmd_errh(cmd);
+ cmd->result = DID_NO_CONNECT << 16;
+ atomic_inc(&cmd->device->iorequest_cnt);
+
+ /*
+ * SCSI request completion path will do scsi_device_unbusy(),
+ * bump busy counts. To bump the counters, we need to dance
+ * with the locks as normal issue path does.
+ */
+ atomic_inc(&sdev->device_busy);
+ atomic_inc(&shost->host_busy);
+ if (starget->can_queue > 0)
+ atomic_inc(&starget->target_busy);
+
+ blk_complete_request(req);
+}
+
+static void scsi_softirq_done(struct request *rq)
+{
+ struct scsi_cmnd *cmd = rq->special;
+ unsigned long wait_for = (cmd->allowed + 1) * rq->timeout;
+ int disposition;
+
+ INIT_LIST_HEAD(&cmd->eh_entry);
+
+ atomic_inc(&cmd->device->iodone_cnt);
+ if (cmd->result)
+ atomic_inc(&cmd->device->ioerr_cnt);
+
+ disposition = scsi_decide_disposition(cmd);
+ if (disposition != SUCCESS &&
+ time_before(cmd->jiffies_at_alloc + wait_for, jiffies)) {
+ sdev_printk(KERN_ERR, cmd->device,
+ "timing out command, waited %lus\n",
+ wait_for/HZ);
+ disposition = SUCCESS;
+ }
+
+ scsi_log_completion(cmd, disposition);
+
+ switch (disposition) {
+ case SUCCESS:
+ scsi_finish_command(cmd);
+ break;
+ case NEEDS_RETRY:
+ scsi_queue_insert(cmd, SCSI_MLQUEUE_EH_RETRY);
+ break;
+ case ADD_TO_MLQUEUE:
+ scsi_queue_insert(cmd, SCSI_MLQUEUE_DEVICE_BUSY);
+ break;
+ default:
+ if (!scsi_eh_scmd_add(cmd, 0))
+ scsi_finish_command(cmd);
+ }
+}
+
+/**
+ * scsi_dispatch_command - Dispatch a command to the low-level driver.
+ * @cmd: command block we are dispatching.
+ *
+ * Return: nonzero return request was rejected and device's queue needs to be
+ * plugged.
+ */
+static int scsi_dispatch_cmd(struct scsi_cmnd *cmd)
+{
+ struct Scsi_Host *host = cmd->device->host;
+ int rtn = 0;
+
+ atomic_inc(&cmd->device->iorequest_cnt);
+
+ /* check if the device is still usable */
+ if (unlikely(cmd->device->sdev_state == SDEV_DEL)) {
+ /* in SDEV_DEL we error all commands. DID_NO_CONNECT
+ * returns an immediate error upwards, and signals
+ * that the device is no longer present */
+ cmd->result = DID_NO_CONNECT << 16;
+ goto done;
+ }
+
+ /* Check to see if the scsi lld made this device blocked. */
+ if (unlikely(scsi_device_blocked(cmd->device))) {
+ /*
+ * in blocked state, the command is just put back on
+ * the device queue. The suspend state has already
+ * blocked the queue so future requests should not
+ * occur until the device transitions out of the
+ * suspend state.
+ */
+ SCSI_LOG_MLQUEUE(3, scmd_printk(KERN_INFO, cmd,
+ "queuecommand : device blocked\n"));
+ return SCSI_MLQUEUE_DEVICE_BUSY;
+ }
+
+ /* Store the LUN value in cmnd, if needed. */
+ if (cmd->device->lun_in_cdb)
+ cmd->cmnd[1] = (cmd->cmnd[1] & 0x1f) |
+ (cmd->device->lun << 5 & 0xe0);
+
+ scsi_log_send(cmd);
+
+ /*
+ * Before we queue this command, check if the command
+ * length exceeds what the host adapter can handle.
+ */
+ if (cmd->cmd_len > cmd->device->host->max_cmd_len) {
+ SCSI_LOG_MLQUEUE(3, scmd_printk(KERN_INFO, cmd,
+ "queuecommand : command too long. "
+ "cdb_size=%d host->max_cmd_len=%d\n",
+ cmd->cmd_len, cmd->device->host->max_cmd_len));
+ cmd->result = (DID_ABORT << 16);
+ goto done;
+ }
+
+ if (unlikely(host->shost_state == SHOST_DEL)) {
+ cmd->result = (DID_NO_CONNECT << 16);
+ goto done;
+
+ }
+
+ trace_scsi_dispatch_cmd_start(cmd);
+ rtn = host->hostt->queuecommand(host, cmd);
+ if (rtn) {
+ trace_scsi_dispatch_cmd_error(cmd, rtn);
+ if (rtn != SCSI_MLQUEUE_DEVICE_BUSY &&
+ rtn != SCSI_MLQUEUE_TARGET_BUSY)
+ rtn = SCSI_MLQUEUE_HOST_BUSY;
+
+ SCSI_LOG_MLQUEUE(3, scmd_printk(KERN_INFO, cmd,
+ "queuecommand : request rejected\n"));
+ }
+
+ return rtn;
+ done:
+ cmd->scsi_done(cmd);
+ return 0;
+}
+
+/**
+ * scsi_done - Invoke completion on finished SCSI command.
+ * @cmd: The SCSI Command for which a low-level device driver (LLDD) gives
+ * ownership back to SCSI Core -- i.e. the LLDD has finished with it.
+ *
+ * Description: This function is the mid-level's (SCSI Core) interrupt routine,
+ * which regains ownership of the SCSI command (de facto) from a LLDD, and
+ * calls blk_complete_request() for further processing.
+ *
+ * This function is interrupt context safe.
+ */
+static void scsi_done(struct scsi_cmnd *cmd)
+{
+ trace_scsi_dispatch_cmd_done(cmd);
+ blk_complete_request(cmd->request);
+}
+
+/*
+ * Function: scsi_request_fn()
+ *
+ * Purpose: Main strategy routine for SCSI.
+ *
+ * Arguments: q - Pointer to actual queue.
+ *
+ * Returns: Nothing
+ *
+ * Lock status: IO request lock assumed to be held when called.
+ */
+static void scsi_request_fn(struct request_queue *q)
+ __releases(q->queue_lock)
+ __acquires(q->queue_lock)
+{
+ struct scsi_device *sdev = q->queuedata;
+ struct Scsi_Host *shost;
+ struct scsi_cmnd *cmd;
+ struct request *req;
+
+ /*
+ * To start with, we keep looping until the queue is empty, or until
+ * the host is no longer able to accept any more requests.
+ */
+ shost = sdev->host;
+ for (;;) {
+ int rtn;
+ /*
+ * get next queueable request. We do this early to make sure
+ * that the request is fully prepared even if we cannot
+ * accept it.
+ */
+ req = blk_peek_request(q);
+ if (!req)
+ break;
+
+ if (unlikely(!scsi_device_online(sdev))) {
+ sdev_printk(KERN_ERR, sdev,
+ "rejecting I/O to offline device\n");
+ scsi_kill_request(req, q);
+ continue;
+ }
+
+ if (!scsi_dev_queue_ready(q, sdev))
+ break;
+
+ /*
+ * Remove the request from the request list.
+ */
+ if (!(blk_queue_tagged(q) && !blk_queue_start_tag(q, req)))
+ blk_start_request(req);
+
+ spin_unlock_irq(q->queue_lock);
+ cmd = req->special;
+ if (unlikely(cmd == NULL)) {
+ printk(KERN_CRIT "impossible request in %s.\n"
+ "please mail a stack trace to "
+ "linux-scsi@vger.kernel.org\n",
+ __func__);
+ blk_dump_rq_flags(req, "foo");
+ BUG();
+ }
+
+ /*
+ * We hit this when the driver is using a host wide
+ * tag map. For device level tag maps the queue_depth check
+ * in the device ready fn would prevent us from trying
+ * to allocate a tag. Since the map is a shared host resource
+ * we add the dev to the starved list so it eventually gets
+ * a run when a tag is freed.
+ */
+ if (blk_queue_tagged(q) && !(req->cmd_flags & REQ_QUEUED)) {
+ spin_lock_irq(shost->host_lock);
+ if (list_empty(&sdev->starved_entry))
+ list_add_tail(&sdev->starved_entry,
+ &shost->starved_list);
+ spin_unlock_irq(shost->host_lock);
+ goto not_ready;
+ }
+
+ if (!scsi_target_queue_ready(shost, sdev))
+ goto not_ready;
+
+ if (!scsi_host_queue_ready(q, shost, sdev))
+ goto host_not_ready;
+
+ if (sdev->simple_tags)
+ cmd->flags |= SCMD_TAGGED;
+ else
+ cmd->flags &= ~SCMD_TAGGED;
+
+ /*
+ * Finally, initialize any error handling parameters, and set up
+ * the timers for timeouts.
+ */
+ scsi_init_cmd_errh(cmd);
+
+ /*
+ * Dispatch the command to the low-level driver.
+ */
+ cmd->scsi_done = scsi_done;
+ rtn = scsi_dispatch_cmd(cmd);
+ if (rtn) {
+ scsi_queue_insert(cmd, rtn);
+ spin_lock_irq(q->queue_lock);
+ goto out_delay;
+ }
+ spin_lock_irq(q->queue_lock);
+ }
+
+ return;
+
+ host_not_ready:
+ if (scsi_target(sdev)->can_queue > 0)
+ atomic_dec(&scsi_target(sdev)->target_busy);
+ not_ready:
+ /*
+ * lock q, handle tag, requeue req, and decrement device_busy. We
+ * must return with queue_lock held.
+ *
+ * Decrementing device_busy without checking it is OK, as all such
+ * cases (host limits or settings) should run the queue at some
+ * later time.
+ */
+ spin_lock_irq(q->queue_lock);
+ blk_requeue_request(q, req);
+ atomic_dec(&sdev->device_busy);
+out_delay:
+ if (!atomic_read(&sdev->device_busy) && !scsi_device_blocked(sdev))
+ blk_delay_queue(q, SCSI_QUEUE_DELAY);
+}
+
+static inline int prep_to_mq(int ret)
+{
+ switch (ret) {
+ case BLKPREP_OK:
+ return 0;
+ case BLKPREP_DEFER:
+ return BLK_MQ_RQ_QUEUE_BUSY;
+ default:
+ return BLK_MQ_RQ_QUEUE_ERROR;
+ }
+}
+
+static int scsi_mq_prep_fn(struct request *req)
+{
+ struct scsi_cmnd *cmd = blk_mq_rq_to_pdu(req);
+ struct scsi_device *sdev = req->q->queuedata;
+ struct Scsi_Host *shost = sdev->host;
+ unsigned char *sense_buf = cmd->sense_buffer;
+ struct scatterlist *sg;
+
+ memset(cmd, 0, sizeof(struct scsi_cmnd));
+
+ req->special = cmd;
+
+ cmd->request = req;
+ cmd->device = sdev;
+ cmd->sense_buffer = sense_buf;
+
+ cmd->tag = req->tag;
+
+ cmd->cmnd = req->cmd;
+ cmd->prot_op = SCSI_PROT_NORMAL;
+
+ INIT_LIST_HEAD(&cmd->list);
+ INIT_DELAYED_WORK(&cmd->abort_work, scmd_eh_abort_handler);
+ cmd->jiffies_at_alloc = jiffies;
+
+ if (shost->use_cmd_list) {
+ spin_lock_irq(&sdev->list_lock);
+ list_add_tail(&cmd->list, &sdev->cmd_list);
+ spin_unlock_irq(&sdev->list_lock);
+ }
+
+ sg = (void *)cmd + sizeof(struct scsi_cmnd) + shost->hostt->cmd_size;
+ cmd->sdb.table.sgl = sg;
+
+ if (scsi_host_get_prot(shost)) {
+ cmd->prot_sdb = (void *)sg +
+ min_t(unsigned int,
+ shost->sg_tablesize, SCSI_MAX_SG_SEGMENTS) *
+ sizeof(struct scatterlist);
+ memset(cmd->prot_sdb, 0, sizeof(struct scsi_data_buffer));
+
+ cmd->prot_sdb->table.sgl =
+ (struct scatterlist *)(cmd->prot_sdb + 1);
+ }
+
+ if (blk_bidi_rq(req)) {
+ struct request *next_rq = req->next_rq;
+ struct scsi_data_buffer *bidi_sdb = blk_mq_rq_to_pdu(next_rq);
+
+ memset(bidi_sdb, 0, sizeof(struct scsi_data_buffer));
+ bidi_sdb->table.sgl =
+ (struct scatterlist *)(bidi_sdb + 1);
+
+ next_rq->special = bidi_sdb;
+ }
+
+ blk_mq_start_request(req);
+
+ return scsi_setup_cmnd(sdev, req);
+}
+
+static void scsi_mq_done(struct scsi_cmnd *cmd)
+{
+ trace_scsi_dispatch_cmd_done(cmd);
+ blk_mq_complete_request(cmd->request);
+}
+
+static int scsi_queue_rq(struct blk_mq_hw_ctx *hctx,
+ const struct blk_mq_queue_data *bd)
+{
+ struct request *req = bd->rq;
+ struct request_queue *q = req->q;
+ struct scsi_device *sdev = q->queuedata;
+ struct Scsi_Host *shost = sdev->host;
+ struct scsi_cmnd *cmd = blk_mq_rq_to_pdu(req);
+ int ret;
+ int reason;
+
+ ret = prep_to_mq(scsi_prep_state_check(sdev, req));
+ if (ret)
+ goto out;
+
+ ret = BLK_MQ_RQ_QUEUE_BUSY;
+ if (!get_device(&sdev->sdev_gendev))
+ goto out;
+
+ if (!scsi_dev_queue_ready(q, sdev))
+ goto out_put_device;
+ if (!scsi_target_queue_ready(shost, sdev))
+ goto out_dec_device_busy;
+ if (!scsi_host_queue_ready(q, shost, sdev))
+ goto out_dec_target_busy;
+
+
+ if (!(req->cmd_flags & REQ_DONTPREP)) {
+ ret = prep_to_mq(scsi_mq_prep_fn(req));
+ if (ret)
+ goto out_dec_host_busy;
+ req->cmd_flags |= REQ_DONTPREP;
+ } else {
+ blk_mq_start_request(req);
+ }
+
+ if (sdev->simple_tags)
+ cmd->flags |= SCMD_TAGGED;
+ else
+ cmd->flags &= ~SCMD_TAGGED;
+
+ scsi_init_cmd_errh(cmd);
+ cmd->scsi_done = scsi_mq_done;
+
+ reason = scsi_dispatch_cmd(cmd);
+ if (reason) {
+ scsi_set_blocked(cmd, reason);
+ ret = BLK_MQ_RQ_QUEUE_BUSY;
+ goto out_dec_host_busy;
+ }
+
+ return BLK_MQ_RQ_QUEUE_OK;
+
+out_dec_host_busy:
+ atomic_dec(&shost->host_busy);
+out_dec_target_busy:
+ if (scsi_target(sdev)->can_queue > 0)
+ atomic_dec(&scsi_target(sdev)->target_busy);
+out_dec_device_busy:
+ atomic_dec(&sdev->device_busy);
+out_put_device:
+ put_device(&sdev->sdev_gendev);
+out:
+ switch (ret) {
+ case BLK_MQ_RQ_QUEUE_BUSY:
+ blk_mq_stop_hw_queue(hctx);
+ if (atomic_read(&sdev->device_busy) == 0 &&
+ !scsi_device_blocked(sdev))
+ blk_mq_delay_queue(hctx, SCSI_QUEUE_DELAY);
+ break;
+ case BLK_MQ_RQ_QUEUE_ERROR:
+ /*
+ * Make sure to release all allocated ressources when
+ * we hit an error, as we will never see this command
+ * again.
+ */
+ if (req->cmd_flags & REQ_DONTPREP)
+ scsi_mq_uninit_cmd(cmd);
+ break;
+ default:
+ break;
+ }
+ return ret;
+}
+
+static enum blk_eh_timer_return scsi_timeout(struct request *req,
+ bool reserved)
+{
+ if (reserved)
+ return BLK_EH_RESET_TIMER;
+ return scsi_times_out(req);
+}
+
+static int scsi_init_request(void *data, struct request *rq,
+ unsigned int hctx_idx, unsigned int request_idx,
+ unsigned int numa_node)
+{
+ struct scsi_cmnd *cmd = blk_mq_rq_to_pdu(rq);
+
+ cmd->sense_buffer = kzalloc_node(SCSI_SENSE_BUFFERSIZE, GFP_KERNEL,
+ numa_node);
+ if (!cmd->sense_buffer)
+ return -ENOMEM;
+ return 0;
+}
+
+static void scsi_exit_request(void *data, struct request *rq,
+ unsigned int hctx_idx, unsigned int request_idx)
+{
+ struct scsi_cmnd *cmd = blk_mq_rq_to_pdu(rq);
+
+ kfree(cmd->sense_buffer);
+}
+
+static u64 scsi_calculate_bounce_limit(struct Scsi_Host *shost)
+{
+ struct device *host_dev;
+ u64 bounce_limit = 0xffffffff;
+
+ if (shost->unchecked_isa_dma)
+ return BLK_BOUNCE_ISA;
+ /*
+ * Platforms with virtual-DMA translation
+ * hardware have no practical limit.
+ */
+ if (!PCI_DMA_BUS_IS_PHYS)
+ return BLK_BOUNCE_ANY;
+
+ host_dev = scsi_get_device(shost);
+ if (host_dev && host_dev->dma_mask)
+ bounce_limit = (u64)dma_max_pfn(host_dev) << PAGE_SHIFT;
+
+ return bounce_limit;
+}
+
+static void __scsi_init_queue(struct Scsi_Host *shost, struct request_queue *q)
+{
+ struct device *dev = shost->dma_dev;
+
+ /*
+ * this limit is imposed by hardware restrictions
+ */
+ blk_queue_max_segments(q, min_t(unsigned short, shost->sg_tablesize,
+ SCSI_MAX_SG_CHAIN_SEGMENTS));
+
+ if (scsi_host_prot_dma(shost)) {
+ shost->sg_prot_tablesize =
+ min_not_zero(shost->sg_prot_tablesize,
+ (unsigned short)SCSI_MAX_PROT_SG_SEGMENTS);
+ BUG_ON(shost->sg_prot_tablesize < shost->sg_tablesize);
+ blk_queue_max_integrity_segments(q, shost->sg_prot_tablesize);
+ }
+
+ blk_queue_max_hw_sectors(q, shost->max_sectors);
+ blk_queue_bounce_limit(q, scsi_calculate_bounce_limit(shost));
+ blk_queue_segment_boundary(q, shost->dma_boundary);
+ dma_set_seg_boundary(dev, shost->dma_boundary);
+
+ blk_queue_max_segment_size(q, dma_get_max_seg_size(dev));
+
+ if (!shost->use_clustering)
+ q->limits.cluster = 0;
+
+ /*
+ * set a reasonable default alignment on word boundaries: the
+ * host and device may alter it using
+ * blk_queue_update_dma_alignment() later.
+ */
+ blk_queue_dma_alignment(q, 0x03);
+}
+
+struct request_queue *__scsi_alloc_queue(struct Scsi_Host *shost,
+ request_fn_proc *request_fn)
+{
+ struct request_queue *q;
+
+ q = blk_init_queue(request_fn, NULL);
+ if (!q)
+ return NULL;
+ __scsi_init_queue(shost, q);
+ return q;
+}
+EXPORT_SYMBOL(__scsi_alloc_queue);
+
+struct request_queue *scsi_alloc_queue(struct scsi_device *sdev)
+{
+ struct request_queue *q;
+
+ q = __scsi_alloc_queue(sdev->host, scsi_request_fn);
+ if (!q)
+ return NULL;
+
+ blk_queue_prep_rq(q, scsi_prep_fn);
+ blk_queue_unprep_rq(q, scsi_unprep_fn);
+ blk_queue_softirq_done(q, scsi_softirq_done);
+ blk_queue_rq_timed_out(q, scsi_times_out);
+ blk_queue_lld_busy(q, scsi_lld_busy);
+ return q;
+}
+
+static struct blk_mq_ops scsi_mq_ops = {
+ .map_queue = blk_mq_map_queue,
+ .queue_rq = scsi_queue_rq,
+ .complete = scsi_softirq_done,
+ .timeout = scsi_timeout,
+ .init_request = scsi_init_request,
+ .exit_request = scsi_exit_request,
+};
+
+struct request_queue *scsi_mq_alloc_queue(struct scsi_device *sdev)
+{
+ sdev->request_queue = blk_mq_init_queue(&sdev->host->tag_set);
+ if (IS_ERR(sdev->request_queue))
+ return NULL;
+
+ sdev->request_queue->queuedata = sdev;
+ __scsi_init_queue(sdev->host, sdev->request_queue);
+ return sdev->request_queue;
+}
+
+int scsi_mq_setup_tags(struct Scsi_Host *shost)
+{
+ unsigned int cmd_size, sgl_size, tbl_size;
+
+ tbl_size = shost->sg_tablesize;
+ if (tbl_size > SCSI_MAX_SG_SEGMENTS)
+ tbl_size = SCSI_MAX_SG_SEGMENTS;
+ sgl_size = tbl_size * sizeof(struct scatterlist);
+ cmd_size = sizeof(struct scsi_cmnd) + shost->hostt->cmd_size + sgl_size;
+ if (scsi_host_get_prot(shost))
+ cmd_size += sizeof(struct scsi_data_buffer) + sgl_size;
+
+ memset(&shost->tag_set, 0, sizeof(shost->tag_set));
+ shost->tag_set.ops = &scsi_mq_ops;
+ shost->tag_set.nr_hw_queues = shost->nr_hw_queues ? : 1;
+ shost->tag_set.queue_depth = shost->can_queue;
+ shost->tag_set.cmd_size = cmd_size;
+ shost->tag_set.numa_node = NUMA_NO_NODE;
+ shost->tag_set.flags = BLK_MQ_F_SHOULD_MERGE | BLK_MQ_F_SG_MERGE;
+ shost->tag_set.flags |=
+ BLK_ALLOC_POLICY_TO_MQ_FLAG(shost->hostt->tag_alloc_policy);
+ shost->tag_set.driver_data = shost;
+
+ return blk_mq_alloc_tag_set(&shost->tag_set);
+}
+
+void scsi_mq_destroy_tags(struct Scsi_Host *shost)
+{
+ blk_mq_free_tag_set(&shost->tag_set);
+}
+
+/*
+ * Function: scsi_block_requests()
+ *
+ * Purpose: Utility function used by low-level drivers to prevent further
+ * commands from being queued to the device.
+ *
+ * Arguments: shost - Host in question
+ *
+ * Returns: Nothing
+ *
+ * Lock status: No locks are assumed held.
+ *
+ * Notes: There is no timer nor any other means by which the requests
+ * get unblocked other than the low-level driver calling
+ * scsi_unblock_requests().
+ */
+void scsi_block_requests(struct Scsi_Host *shost)
+{
+ shost->host_self_blocked = 1;
+}
+EXPORT_SYMBOL(scsi_block_requests);
+
+/*
+ * Function: scsi_unblock_requests()
+ *
+ * Purpose: Utility function used by low-level drivers to allow further
+ * commands from being queued to the device.
+ *
+ * Arguments: shost - Host in question
+ *
+ * Returns: Nothing
+ *
+ * Lock status: No locks are assumed held.
+ *
+ * Notes: There is no timer nor any other means by which the requests
+ * get unblocked other than the low-level driver calling
+ * scsi_unblock_requests().
+ *
+ * This is done as an API function so that changes to the
+ * internals of the scsi mid-layer won't require wholesale
+ * changes to drivers that use this feature.
+ */
+void scsi_unblock_requests(struct Scsi_Host *shost)
+{
+ shost->host_self_blocked = 0;
+ scsi_run_host_queues(shost);
+}
+EXPORT_SYMBOL(scsi_unblock_requests);
+
+int __init scsi_init_queue(void)
+{
+ int i;
+
+ scsi_sdb_cache = kmem_cache_create("scsi_data_buffer",
+ sizeof(struct scsi_data_buffer),
+ 0, 0, NULL);
+ if (!scsi_sdb_cache) {
+ printk(KERN_ERR "SCSI: can't init scsi sdb cache\n");
+ return -ENOMEM;
+ }
+
+ for (i = 0; i < SG_MEMPOOL_NR; i++) {
+ struct scsi_host_sg_pool *sgp = scsi_sg_pools + i;
+ int size = sgp->size * sizeof(struct scatterlist);
+
+ sgp->slab = kmem_cache_create(sgp->name, size, 0,
+ SLAB_HWCACHE_ALIGN, NULL);
+ if (!sgp->slab) {
+ printk(KERN_ERR "SCSI: can't init sg slab %s\n",
+ sgp->name);
+ goto cleanup_sdb;
+ }
+
+ sgp->pool = mempool_create_slab_pool(SG_MEMPOOL_SIZE,
+ sgp->slab);
+ if (!sgp->pool) {
+ printk(KERN_ERR "SCSI: can't init sg mempool %s\n",
+ sgp->name);
+ goto cleanup_sdb;
+ }
+ }
+
+ return 0;
+
+cleanup_sdb:
+ for (i = 0; i < SG_MEMPOOL_NR; i++) {
+ struct scsi_host_sg_pool *sgp = scsi_sg_pools + i;
+ if (sgp->pool)
+ mempool_destroy(sgp->pool);
+ if (sgp->slab)
+ kmem_cache_destroy(sgp->slab);
+ }
+ kmem_cache_destroy(scsi_sdb_cache);
+
+ return -ENOMEM;
+}
+
+void scsi_exit_queue(void)
+{
+ int i;
+
+ kmem_cache_destroy(scsi_sdb_cache);
+
+ for (i = 0; i < SG_MEMPOOL_NR; i++) {
+ struct scsi_host_sg_pool *sgp = scsi_sg_pools + i;
+ mempool_destroy(sgp->pool);
+ kmem_cache_destroy(sgp->slab);
+ }
+}
+
+/**
+ * scsi_mode_select - issue a mode select
+ * @sdev: SCSI device to be queried
+ * @pf: Page format bit (1 == standard, 0 == vendor specific)
+ * @sp: Save page bit (0 == don't save, 1 == save)
+ * @modepage: mode page being requested
+ * @buffer: request buffer (may not be smaller than eight bytes)
+ * @len: length of request buffer.
+ * @timeout: command timeout
+ * @retries: number of retries before failing
+ * @data: returns a structure abstracting the mode header data
+ * @sshdr: place to put sense data (or NULL if no sense to be collected).
+ * must be SCSI_SENSE_BUFFERSIZE big.
+ *
+ * Returns zero if successful; negative error number or scsi
+ * status on error
+ *
+ */
+int
+scsi_mode_select(struct scsi_device *sdev, int pf, int sp, int modepage,
+ unsigned char *buffer, int len, int timeout, int retries,
+ struct scsi_mode_data *data, struct scsi_sense_hdr *sshdr)
+{
+ unsigned char cmd[10];
+ unsigned char *real_buffer;
+ int ret;
+
+ memset(cmd, 0, sizeof(cmd));
+ cmd[1] = (pf ? 0x10 : 0) | (sp ? 0x01 : 0);
+
+ if (sdev->use_10_for_ms) {
+ if (len > 65535)
+ return -EINVAL;
+ real_buffer = kmalloc(8 + len, GFP_KERNEL);
+ if (!real_buffer)
+ return -ENOMEM;
+ memcpy(real_buffer + 8, buffer, len);
+ len += 8;
+ real_buffer[0] = 0;
+ real_buffer[1] = 0;
+ real_buffer[2] = data->medium_type;
+ real_buffer[3] = data->device_specific;
+ real_buffer[4] = data->longlba ? 0x01 : 0;
+ real_buffer[5] = 0;
+ real_buffer[6] = data->block_descriptor_length >> 8;
+ real_buffer[7] = data->block_descriptor_length;
+
+ cmd[0] = MODE_SELECT_10;
+ cmd[7] = len >> 8;
+ cmd[8] = len;
+ } else {
+ if (len > 255 || data->block_descriptor_length > 255 ||
+ data->longlba)
+ return -EINVAL;
+
+ real_buffer = kmalloc(4 + len, GFP_KERNEL);
+ if (!real_buffer)
+ return -ENOMEM;
+ memcpy(real_buffer + 4, buffer, len);
+ len += 4;
+ real_buffer[0] = 0;
+ real_buffer[1] = data->medium_type;
+ real_buffer[2] = data->device_specific;
+ real_buffer[3] = data->block_descriptor_length;
+
+
+ cmd[0] = MODE_SELECT;
+ cmd[4] = len;
+ }
+
+ ret = scsi_execute_req(sdev, cmd, DMA_TO_DEVICE, real_buffer, len,
+ sshdr, timeout, retries, NULL);
+ kfree(real_buffer);
+ return ret;
+}
+EXPORT_SYMBOL_GPL(scsi_mode_select);
+
+/**
+ * scsi_mode_sense - issue a mode sense, falling back from 10 to six bytes if necessary.
+ * @sdev: SCSI device to be queried
+ * @dbd: set if mode sense will allow block descriptors to be returned
+ * @modepage: mode page being requested
+ * @buffer: request buffer (may not be smaller than eight bytes)
+ * @len: length of request buffer.
+ * @timeout: command timeout
+ * @retries: number of retries before failing
+ * @data: returns a structure abstracting the mode header data
+ * @sshdr: place to put sense data (or NULL if no sense to be collected).
+ * must be SCSI_SENSE_BUFFERSIZE big.
+ *
+ * Returns zero if unsuccessful, or the header offset (either 4
+ * or 8 depending on whether a six or ten byte command was
+ * issued) if successful.
+ */
+int
+scsi_mode_sense(struct scsi_device *sdev, int dbd, int modepage,
+ unsigned char *buffer, int len, int timeout, int retries,
+ struct scsi_mode_data *data, struct scsi_sense_hdr *sshdr)
+{
+ unsigned char cmd[12];
+ int use_10_for_ms;
+ int header_length;
+ int result;
+ struct scsi_sense_hdr my_sshdr;
+
+ memset(data, 0, sizeof(*data));
+ memset(&cmd[0], 0, 12);
+ cmd[1] = dbd & 0x18; /* allows DBD and LLBA bits */
+ cmd[2] = modepage;
+
+ /* caller might not be interested in sense, but we need it */
+ if (!sshdr)
+ sshdr = &my_sshdr;
+
+ retry:
+ use_10_for_ms = sdev->use_10_for_ms;
+
+ if (use_10_for_ms) {
+ if (len < 8)
+ len = 8;
+
+ cmd[0] = MODE_SENSE_10;
+ cmd[8] = len;
+ header_length = 8;
+ } else {
+ if (len < 4)
+ len = 4;
+
+ cmd[0] = MODE_SENSE;
+ cmd[4] = len;
+ header_length = 4;
+ }
+
+ memset(buffer, 0, len);
+
+ result = scsi_execute_req(sdev, cmd, DMA_FROM_DEVICE, buffer, len,
+ sshdr, timeout, retries, NULL);
+
+ /* This code looks awful: what it's doing is making sure an
+ * ILLEGAL REQUEST sense return identifies the actual command
+ * byte as the problem. MODE_SENSE commands can return
+ * ILLEGAL REQUEST if the code page isn't supported */
+
+ if (use_10_for_ms && !scsi_status_is_good(result) &&
+ (driver_byte(result) & DRIVER_SENSE)) {
+ if (scsi_sense_valid(sshdr)) {
+ if ((sshdr->sense_key == ILLEGAL_REQUEST) &&
+ (sshdr->asc == 0x20) && (sshdr->ascq == 0)) {
+ /*
+ * Invalid command operation code
+ */
+ sdev->use_10_for_ms = 0;
+ goto retry;
+ }
+ }
+ }
+
+ if(scsi_status_is_good(result)) {
+ if (unlikely(buffer[0] == 0x86 && buffer[1] == 0x0b &&
+ (modepage == 6 || modepage == 8))) {
+ /* Initio breakage? */
+ header_length = 0;
+ data->length = 13;
+ data->medium_type = 0;
+ data->device_specific = 0;
+ data->longlba = 0;
+ data->block_descriptor_length = 0;
+ } else if(use_10_for_ms) {
+ data->length = buffer[0]*256 + buffer[1] + 2;
+ data->medium_type = buffer[2];
+ data->device_specific = buffer[3];
+ data->longlba = buffer[4] & 0x01;
+ data->block_descriptor_length = buffer[6]*256
+ + buffer[7];
+ } else {
+ data->length = buffer[0] + 1;
+ data->medium_type = buffer[1];
+ data->device_specific = buffer[2];
+ data->block_descriptor_length = buffer[3];
+ }
+ data->header_length = header_length;
+ }
+
+ return result;
+}
+EXPORT_SYMBOL(scsi_mode_sense);
+
+/**
+ * scsi_test_unit_ready - test if unit is ready
+ * @sdev: scsi device to change the state of.
+ * @timeout: command timeout
+ * @retries: number of retries before failing
+ * @sshdr_external: Optional pointer to struct scsi_sense_hdr for
+ * returning sense. Make sure that this is cleared before passing
+ * in.
+ *
+ * Returns zero if unsuccessful or an error if TUR failed. For
+ * removable media, UNIT_ATTENTION sets ->changed flag.
+ **/
+int
+scsi_test_unit_ready(struct scsi_device *sdev, int timeout, int retries,
+ struct scsi_sense_hdr *sshdr_external)
+{
+ char cmd[] = {
+ TEST_UNIT_READY, 0, 0, 0, 0, 0,
+ };
+ struct scsi_sense_hdr *sshdr;
+ int result;
+
+ if (!sshdr_external)
+ sshdr = kzalloc(sizeof(*sshdr), GFP_KERNEL);
+ else
+ sshdr = sshdr_external;
+
+ /* try to eat the UNIT_ATTENTION if there are enough retries */
+ do {
+ result = scsi_execute_req(sdev, cmd, DMA_NONE, NULL, 0, sshdr,
+ timeout, retries, NULL);
+ if (sdev->removable && scsi_sense_valid(sshdr) &&
+ sshdr->sense_key == UNIT_ATTENTION)
+ sdev->changed = 1;
+ } while (scsi_sense_valid(sshdr) &&
+ sshdr->sense_key == UNIT_ATTENTION && --retries);
+
+ if (!sshdr_external)
+ kfree(sshdr);
+ return result;
+}
+EXPORT_SYMBOL(scsi_test_unit_ready);
+
+/**
+ * scsi_device_set_state - Take the given device through the device state model.
+ * @sdev: scsi device to change the state of.
+ * @state: state to change to.
+ *
+ * Returns zero if unsuccessful or an error if the requested
+ * transition is illegal.
+ */
+int
+scsi_device_set_state(struct scsi_device *sdev, enum scsi_device_state state)
+{
+ enum scsi_device_state oldstate = sdev->sdev_state;
+
+ if (state == oldstate)
+ return 0;
+
+ switch (state) {
+ case SDEV_CREATED:
+ switch (oldstate) {
+ case SDEV_CREATED_BLOCK:
+ break;
+ default:
+ goto illegal;
+ }
+ break;
+
+ case SDEV_RUNNING:
+ switch (oldstate) {
+ case SDEV_CREATED:
+ case SDEV_OFFLINE:
+ case SDEV_TRANSPORT_OFFLINE:
+ case SDEV_QUIESCE:
+ case SDEV_BLOCK:
+ break;
+ default:
+ goto illegal;
+ }
+ break;
+
+ case SDEV_QUIESCE:
+ switch (oldstate) {
+ case SDEV_RUNNING:
+ case SDEV_OFFLINE:
+ case SDEV_TRANSPORT_OFFLINE:
+ break;
+ default:
+ goto illegal;
+ }
+ break;
+
+ case SDEV_OFFLINE:
+ case SDEV_TRANSPORT_OFFLINE:
+ switch (oldstate) {
+ case SDEV_CREATED:
+ case SDEV_RUNNING:
+ case SDEV_QUIESCE:
+ case SDEV_BLOCK:
+ break;
+ default:
+ goto illegal;
+ }
+ break;
+
+ case SDEV_BLOCK:
+ switch (oldstate) {
+ case SDEV_RUNNING:
+ case SDEV_CREATED_BLOCK:
+ break;
+ default:
+ goto illegal;
+ }
+ break;
+
+ case SDEV_CREATED_BLOCK:
+ switch (oldstate) {
+ case SDEV_CREATED:
+ break;
+ default:
+ goto illegal;
+ }
+ break;
+
+ case SDEV_CANCEL:
+ switch (oldstate) {
+ case SDEV_CREATED:
+ case SDEV_RUNNING:
+ case SDEV_QUIESCE:
+ case SDEV_OFFLINE:
+ case SDEV_TRANSPORT_OFFLINE:
+ case SDEV_BLOCK:
+ break;
+ default:
+ goto illegal;
+ }
+ break;
+
+ case SDEV_DEL:
+ switch (oldstate) {
+ case SDEV_CREATED:
+ case SDEV_RUNNING:
+ case SDEV_OFFLINE:
+ case SDEV_TRANSPORT_OFFLINE:
+ case SDEV_CANCEL:
+ case SDEV_CREATED_BLOCK:
+ break;
+ default:
+ goto illegal;
+ }
+ break;
+
+ }
+ sdev->sdev_state = state;
+ return 0;
+
+ illegal:
+ SCSI_LOG_ERROR_RECOVERY(1,
+ sdev_printk(KERN_ERR, sdev,
+ "Illegal state transition %s->%s",
+ scsi_device_state_name(oldstate),
+ scsi_device_state_name(state))
+ );
+ return -EINVAL;
+}
+EXPORT_SYMBOL(scsi_device_set_state);
+
+/**
+ * sdev_evt_emit - emit a single SCSI device uevent
+ * @sdev: associated SCSI device
+ * @evt: event to emit
+ *
+ * Send a single uevent (scsi_event) to the associated scsi_device.
+ */
+static void scsi_evt_emit(struct scsi_device *sdev, struct scsi_event *evt)
+{
+ int idx = 0;
+ char *envp[3];
+
+ switch (evt->evt_type) {
+ case SDEV_EVT_MEDIA_CHANGE:
+ envp[idx++] = "SDEV_MEDIA_CHANGE=1";
+ break;
+ case SDEV_EVT_INQUIRY_CHANGE_REPORTED:
+ envp[idx++] = "SDEV_UA=INQUIRY_DATA_HAS_CHANGED";
+ break;
+ case SDEV_EVT_CAPACITY_CHANGE_REPORTED:
+ envp[idx++] = "SDEV_UA=CAPACITY_DATA_HAS_CHANGED";
+ break;
+ case SDEV_EVT_SOFT_THRESHOLD_REACHED_REPORTED:
+ envp[idx++] = "SDEV_UA=THIN_PROVISIONING_SOFT_THRESHOLD_REACHED";
+ break;
+ case SDEV_EVT_MODE_PARAMETER_CHANGE_REPORTED:
+ envp[idx++] = "SDEV_UA=MODE_PARAMETERS_CHANGED";
+ break;
+ case SDEV_EVT_LUN_CHANGE_REPORTED:
+ envp[idx++] = "SDEV_UA=REPORTED_LUNS_DATA_HAS_CHANGED";
+ break;
+ default:
+ /* do nothing */
+ break;
+ }
+
+ envp[idx++] = NULL;
+
+ kobject_uevent_env(&sdev->sdev_gendev.kobj, KOBJ_CHANGE, envp);
+}
+
+/**
+ * sdev_evt_thread - send a uevent for each scsi event
+ * @work: work struct for scsi_device
+ *
+ * Dispatch queued events to their associated scsi_device kobjects
+ * as uevents.
+ */
+void scsi_evt_thread(struct work_struct *work)
+{
+ struct scsi_device *sdev;
+ enum scsi_device_event evt_type;
+ LIST_HEAD(event_list);
+
+ sdev = container_of(work, struct scsi_device, event_work);
+
+ for (evt_type = SDEV_EVT_FIRST; evt_type <= SDEV_EVT_LAST; evt_type++)
+ if (test_and_clear_bit(evt_type, sdev->pending_events))
+ sdev_evt_send_simple(sdev, evt_type, GFP_KERNEL);
+
+ while (1) {
+ struct scsi_event *evt;
+ struct list_head *this, *tmp;
+ unsigned long flags;
+
+ spin_lock_irqsave(&sdev->list_lock, flags);
+ list_splice_init(&sdev->event_list, &event_list);
+ spin_unlock_irqrestore(&sdev->list_lock, flags);
+
+ if (list_empty(&event_list))
+ break;
+
+ list_for_each_safe(this, tmp, &event_list) {
+ evt = list_entry(this, struct scsi_event, node);
+ list_del(&evt->node);
+ scsi_evt_emit(sdev, evt);
+ kfree(evt);
+ }
+ }
+}
+
+/**
+ * sdev_evt_send - send asserted event to uevent thread
+ * @sdev: scsi_device event occurred on
+ * @evt: event to send
+ *
+ * Assert scsi device event asynchronously.
+ */
+void sdev_evt_send(struct scsi_device *sdev, struct scsi_event *evt)
+{
+ unsigned long flags;
+
+#if 0
+ /* FIXME: currently this check eliminates all media change events
+ * for polled devices. Need to update to discriminate between AN
+ * and polled events */
+ if (!test_bit(evt->evt_type, sdev->supported_events)) {
+ kfree(evt);
+ return;
+ }
+#endif
+
+ spin_lock_irqsave(&sdev->list_lock, flags);
+ list_add_tail(&evt->node, &sdev->event_list);
+ schedule_work(&sdev->event_work);
+ spin_unlock_irqrestore(&sdev->list_lock, flags);
+}
+EXPORT_SYMBOL_GPL(sdev_evt_send);
+
+/**
+ * sdev_evt_alloc - allocate a new scsi event
+ * @evt_type: type of event to allocate
+ * @gfpflags: GFP flags for allocation
+ *
+ * Allocates and returns a new scsi_event.
+ */
+struct scsi_event *sdev_evt_alloc(enum scsi_device_event evt_type,
+ gfp_t gfpflags)
+{
+ struct scsi_event *evt = kzalloc(sizeof(struct scsi_event), gfpflags);
+ if (!evt)
+ return NULL;
+
+ evt->evt_type = evt_type;
+ INIT_LIST_HEAD(&evt->node);
+
+ /* evt_type-specific initialization, if any */
+ switch (evt_type) {
+ case SDEV_EVT_MEDIA_CHANGE:
+ case SDEV_EVT_INQUIRY_CHANGE_REPORTED:
+ case SDEV_EVT_CAPACITY_CHANGE_REPORTED:
+ case SDEV_EVT_SOFT_THRESHOLD_REACHED_REPORTED:
+ case SDEV_EVT_MODE_PARAMETER_CHANGE_REPORTED:
+ case SDEV_EVT_LUN_CHANGE_REPORTED:
+ default:
+ /* do nothing */
+ break;
+ }
+
+ return evt;
+}
+EXPORT_SYMBOL_GPL(sdev_evt_alloc);
+
+/**
+ * sdev_evt_send_simple - send asserted event to uevent thread
+ * @sdev: scsi_device event occurred on
+ * @evt_type: type of event to send
+ * @gfpflags: GFP flags for allocation
+ *
+ * Assert scsi device event asynchronously, given an event type.
+ */
+void sdev_evt_send_simple(struct scsi_device *sdev,
+ enum scsi_device_event evt_type, gfp_t gfpflags)
+{
+ struct scsi_event *evt = sdev_evt_alloc(evt_type, gfpflags);
+ if (!evt) {
+ sdev_printk(KERN_ERR, sdev, "event %d eaten due to OOM\n",
+ evt_type);
+ return;
+ }
+
+ sdev_evt_send(sdev, evt);
+}
+EXPORT_SYMBOL_GPL(sdev_evt_send_simple);
+
+/**
+ * scsi_device_quiesce - Block user issued commands.
+ * @sdev: scsi device to quiesce.
+ *
+ * This works by trying to transition to the SDEV_QUIESCE state
+ * (which must be a legal transition). When the device is in this
+ * state, only special requests will be accepted, all others will
+ * be deferred. Since special requests may also be requeued requests,
+ * a successful return doesn't guarantee the device will be
+ * totally quiescent.
+ *
+ * Must be called with user context, may sleep.
+ *
+ * Returns zero if unsuccessful or an error if not.
+ */
+int
+scsi_device_quiesce(struct scsi_device *sdev)
+{
+ int err = scsi_device_set_state(sdev, SDEV_QUIESCE);
+ if (err)
+ return err;
+
+ scsi_run_queue(sdev->request_queue);
+ while (atomic_read(&sdev->device_busy)) {
+ msleep_interruptible(200);
+ scsi_run_queue(sdev->request_queue);
+ }
+ return 0;
+}
+EXPORT_SYMBOL(scsi_device_quiesce);
+
+/**
+ * scsi_device_resume - Restart user issued commands to a quiesced device.
+ * @sdev: scsi device to resume.
+ *
+ * Moves the device from quiesced back to running and restarts the
+ * queues.
+ *
+ * Must be called with user context, may sleep.
+ */
+void scsi_device_resume(struct scsi_device *sdev)
+{
+ /* check if the device state was mutated prior to resume, and if
+ * so assume the state is being managed elsewhere (for example
+ * device deleted during suspend)
+ */
+ if (sdev->sdev_state != SDEV_QUIESCE ||
+ scsi_device_set_state(sdev, SDEV_RUNNING))
+ return;
+ scsi_run_queue(sdev->request_queue);
+}
+EXPORT_SYMBOL(scsi_device_resume);
+
+static void
+device_quiesce_fn(struct scsi_device *sdev, void *data)
+{
+ scsi_device_quiesce(sdev);
+}
+
+void
+scsi_target_quiesce(struct scsi_target *starget)
+{
+ starget_for_each_device(starget, NULL, device_quiesce_fn);
+}
+EXPORT_SYMBOL(scsi_target_quiesce);
+
+static void
+device_resume_fn(struct scsi_device *sdev, void *data)
+{
+ scsi_device_resume(sdev);
+}
+
+void
+scsi_target_resume(struct scsi_target *starget)
+{
+ starget_for_each_device(starget, NULL, device_resume_fn);
+}
+EXPORT_SYMBOL(scsi_target_resume);
+
+/**
+ * scsi_internal_device_block - internal function to put a device temporarily into the SDEV_BLOCK state
+ * @sdev: device to block
+ *
+ * Block request made by scsi lld's to temporarily stop all
+ * scsi commands on the specified device. Called from interrupt
+ * or normal process context.
+ *
+ * Returns zero if successful or error if not
+ *
+ * Notes:
+ * This routine transitions the device to the SDEV_BLOCK state
+ * (which must be a legal transition). When the device is in this
+ * state, all commands are deferred until the scsi lld reenables
+ * the device with scsi_device_unblock or device_block_tmo fires.
+ */
+int
+scsi_internal_device_block(struct scsi_device *sdev)
+{
+ struct request_queue *q = sdev->request_queue;
+ unsigned long flags;
+ int err = 0;
+
+ err = scsi_device_set_state(sdev, SDEV_BLOCK);
+ if (err) {
+ err = scsi_device_set_state(sdev, SDEV_CREATED_BLOCK);
+
+ if (err)
+ return err;
+ }
+
+ /*
+ * The device has transitioned to SDEV_BLOCK. Stop the
+ * block layer from calling the midlayer with this device's
+ * request queue.
+ */
+ if (q->mq_ops) {
+ blk_mq_stop_hw_queues(q);
+ } else {
+ spin_lock_irqsave(q->queue_lock, flags);
+ blk_stop_queue(q);
+ spin_unlock_irqrestore(q->queue_lock, flags);
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(scsi_internal_device_block);
+
+/**
+ * scsi_internal_device_unblock - resume a device after a block request
+ * @sdev: device to resume
+ * @new_state: state to set devices to after unblocking
+ *
+ * Called by scsi lld's or the midlayer to restart the device queue
+ * for the previously suspended scsi device. Called from interrupt or
+ * normal process context.
+ *
+ * Returns zero if successful or error if not.
+ *
+ * Notes:
+ * This routine transitions the device to the SDEV_RUNNING state
+ * or to one of the offline states (which must be a legal transition)
+ * allowing the midlayer to goose the queue for this device.
+ */
+int
+scsi_internal_device_unblock(struct scsi_device *sdev,
+ enum scsi_device_state new_state)
+{
+ struct request_queue *q = sdev->request_queue;
+ unsigned long flags;
+
+ /*
+ * Try to transition the scsi device to SDEV_RUNNING or one of the
+ * offlined states and goose the device queue if successful.
+ */
+ if ((sdev->sdev_state == SDEV_BLOCK) ||
+ (sdev->sdev_state == SDEV_TRANSPORT_OFFLINE))
+ sdev->sdev_state = new_state;
+ else if (sdev->sdev_state == SDEV_CREATED_BLOCK) {
+ if (new_state == SDEV_TRANSPORT_OFFLINE ||
+ new_state == SDEV_OFFLINE)
+ sdev->sdev_state = new_state;
+ else
+ sdev->sdev_state = SDEV_CREATED;
+ } else if (sdev->sdev_state != SDEV_CANCEL &&
+ sdev->sdev_state != SDEV_OFFLINE)
+ return -EINVAL;
+
+ if (q->mq_ops) {
+ blk_mq_start_stopped_hw_queues(q, false);
+ } else {
+ spin_lock_irqsave(q->queue_lock, flags);
+ blk_start_queue(q);
+ spin_unlock_irqrestore(q->queue_lock, flags);
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(scsi_internal_device_unblock);
+
+static void
+device_block(struct scsi_device *sdev, void *data)
+{
+ scsi_internal_device_block(sdev);
+}
+
+static int
+target_block(struct device *dev, void *data)
+{
+ if (scsi_is_target_device(dev))
+ starget_for_each_device(to_scsi_target(dev), NULL,
+ device_block);
+ return 0;
+}
+
+void
+scsi_target_block(struct device *dev)
+{
+ if (scsi_is_target_device(dev))
+ starget_for_each_device(to_scsi_target(dev), NULL,
+ device_block);
+ else
+ device_for_each_child(dev, NULL, target_block);
+}
+EXPORT_SYMBOL_GPL(scsi_target_block);
+
+static void
+device_unblock(struct scsi_device *sdev, void *data)
+{
+ scsi_internal_device_unblock(sdev, *(enum scsi_device_state *)data);
+}
+
+static int
+target_unblock(struct device *dev, void *data)
+{
+ if (scsi_is_target_device(dev))
+ starget_for_each_device(to_scsi_target(dev), data,
+ device_unblock);
+ return 0;
+}
+
+void
+scsi_target_unblock(struct device *dev, enum scsi_device_state new_state)
+{
+ if (scsi_is_target_device(dev))
+ starget_for_each_device(to_scsi_target(dev), &new_state,
+ device_unblock);
+ else
+ device_for_each_child(dev, &new_state, target_unblock);
+}
+EXPORT_SYMBOL_GPL(scsi_target_unblock);
+
+/**
+ * scsi_kmap_atomic_sg - find and atomically map an sg-elemnt
+ * @sgl: scatter-gather list
+ * @sg_count: number of segments in sg
+ * @offset: offset in bytes into sg, on return offset into the mapped area
+ * @len: bytes to map, on return number of bytes mapped
+ *
+ * Returns virtual address of the start of the mapped page
+ */
+void *scsi_kmap_atomic_sg(struct scatterlist *sgl, int sg_count,
+ size_t *offset, size_t *len)
+{
+ int i;
+ size_t sg_len = 0, len_complete = 0;
+ struct scatterlist *sg;
+ struct page *page;
+
+ WARN_ON(!irqs_disabled());
+
+ for_each_sg(sgl, sg, sg_count, i) {
+ len_complete = sg_len; /* Complete sg-entries */
+ sg_len += sg->length;
+ if (sg_len > *offset)
+ break;
+ }
+
+ if (unlikely(i == sg_count)) {
+ printk(KERN_ERR "%s: Bytes in sg: %zu, requested offset %zu, "
+ "elements %d\n",
+ __func__, sg_len, *offset, sg_count);
+ WARN_ON(1);
+ return NULL;
+ }
+
+ /* Offset starting from the beginning of first page in this sg-entry */
+ *offset = *offset - len_complete + sg->offset;
+
+ /* Assumption: contiguous pages can be accessed as "page + i" */
+ page = nth_page(sg_page(sg), (*offset >> PAGE_SHIFT));
+ *offset &= ~PAGE_MASK;
+
+ /* Bytes in this sg-entry from *offset to the end of the page */
+ sg_len = PAGE_SIZE - *offset;
+ if (*len > sg_len)
+ *len = sg_len;
+
+ return kmap_atomic(page);
+}
+EXPORT_SYMBOL(scsi_kmap_atomic_sg);
+
+/**
+ * scsi_kunmap_atomic_sg - atomically unmap a virtual address, previously mapped with scsi_kmap_atomic_sg
+ * @virt: virtual address to be unmapped
+ */
+void scsi_kunmap_atomic_sg(void *virt)
+{
+ kunmap_atomic(virt);
+}
+EXPORT_SYMBOL(scsi_kunmap_atomic_sg);
+
+void sdev_disable_disk_events(struct scsi_device *sdev)
+{
+ atomic_inc(&sdev->disk_events_disable_depth);
+}
+EXPORT_SYMBOL(sdev_disable_disk_events);
+
+void sdev_enable_disk_events(struct scsi_device *sdev)
+{
+ if (WARN_ON_ONCE(atomic_read(&sdev->disk_events_disable_depth) <= 0))
+ return;
+ atomic_dec(&sdev->disk_events_disable_depth);
+}
+EXPORT_SYMBOL(sdev_enable_disk_events);
diff --git a/drivers/scsi/scsi_lib_dma.c b/drivers/scsi/scsi_lib_dma.c
new file mode 100644
index 000000000..2ac3f3975
--- /dev/null
+++ b/drivers/scsi/scsi_lib_dma.c
@@ -0,0 +1,51 @@
+/*
+ * SCSI library functions depending on DMA
+ */
+
+#include <linux/blkdev.h>
+#include <linux/device.h>
+#include <linux/export.h>
+#include <linux/kernel.h>
+
+#include <scsi/scsi.h>
+#include <scsi/scsi_cmnd.h>
+#include <scsi/scsi_device.h>
+#include <scsi/scsi_host.h>
+
+/**
+ * scsi_dma_map - perform DMA mapping against command's sg lists
+ * @cmd: scsi command
+ *
+ * Returns the number of sg lists actually used, zero if the sg lists
+ * is NULL, or -ENOMEM if the mapping failed.
+ */
+int scsi_dma_map(struct scsi_cmnd *cmd)
+{
+ int nseg = 0;
+
+ if (scsi_sg_count(cmd)) {
+ struct device *dev = cmd->device->host->dma_dev;
+
+ nseg = dma_map_sg(dev, scsi_sglist(cmd), scsi_sg_count(cmd),
+ cmd->sc_data_direction);
+ if (unlikely(!nseg))
+ return -ENOMEM;
+ }
+ return nseg;
+}
+EXPORT_SYMBOL(scsi_dma_map);
+
+/**
+ * scsi_dma_unmap - unmap command's sg lists mapped by scsi_dma_map
+ * @cmd: scsi command
+ */
+void scsi_dma_unmap(struct scsi_cmnd *cmd)
+{
+ if (scsi_sg_count(cmd)) {
+ struct device *dev = cmd->device->host->dma_dev;
+
+ dma_unmap_sg(dev, scsi_sglist(cmd), scsi_sg_count(cmd),
+ cmd->sc_data_direction);
+ }
+}
+EXPORT_SYMBOL(scsi_dma_unmap);
diff --git a/drivers/scsi/scsi_logging.c b/drivers/scsi/scsi_logging.c
new file mode 100644
index 000000000..bd70339c1
--- /dev/null
+++ b/drivers/scsi/scsi_logging.c
@@ -0,0 +1,485 @@
+/*
+ * scsi_logging.c
+ *
+ * Copyright (C) 2014 SUSE Linux Products GmbH
+ * Copyright (C) 2014 Hannes Reinecke <hare@suse.de>
+ *
+ * This file is released under the GPLv2
+ */
+
+#include <linux/kernel.h>
+#include <linux/atomic.h>
+
+#include <scsi/scsi.h>
+#include <scsi/scsi_cmnd.h>
+#include <scsi/scsi_device.h>
+#include <scsi/scsi_eh.h>
+#include <scsi/scsi_dbg.h>
+
+#define SCSI_LOG_SPOOLSIZE 4096
+
+#if (SCSI_LOG_SPOOLSIZE / SCSI_LOG_BUFSIZE) > BITS_PER_LONG
+#warning SCSI logging bitmask too large
+#endif
+
+struct scsi_log_buf {
+ char buffer[SCSI_LOG_SPOOLSIZE];
+ unsigned long map;
+};
+
+static DEFINE_PER_CPU(struct scsi_log_buf, scsi_format_log);
+
+static char *scsi_log_reserve_buffer(size_t *len)
+{
+ struct scsi_log_buf *buf;
+ unsigned long map_bits = sizeof(buf->buffer) / SCSI_LOG_BUFSIZE;
+ unsigned long idx = 0;
+
+ preempt_disable();
+ buf = this_cpu_ptr(&scsi_format_log);
+ idx = find_first_zero_bit(&buf->map, map_bits);
+ if (likely(idx < map_bits)) {
+ while (test_and_set_bit(idx, &buf->map)) {
+ idx = find_next_zero_bit(&buf->map, map_bits, idx);
+ if (idx >= map_bits)
+ break;
+ }
+ }
+ if (WARN_ON(idx >= map_bits)) {
+ preempt_enable();
+ return NULL;
+ }
+ *len = SCSI_LOG_BUFSIZE;
+ return buf->buffer + idx * SCSI_LOG_BUFSIZE;
+}
+
+static void scsi_log_release_buffer(char *bufptr)
+{
+ struct scsi_log_buf *buf;
+ unsigned long idx;
+ int ret;
+
+ buf = this_cpu_ptr(&scsi_format_log);
+ if (bufptr >= buf->buffer &&
+ bufptr < buf->buffer + SCSI_LOG_SPOOLSIZE) {
+ idx = (bufptr - buf->buffer) / SCSI_LOG_BUFSIZE;
+ ret = test_and_clear_bit(idx, &buf->map);
+ WARN_ON(!ret);
+ }
+ preempt_enable();
+}
+
+static inline const char *scmd_name(const struct scsi_cmnd *scmd)
+{
+ return scmd->request->rq_disk ?
+ scmd->request->rq_disk->disk_name : NULL;
+}
+
+static size_t sdev_format_header(char *logbuf, size_t logbuf_len,
+ const char *name, int tag)
+{
+ size_t off = 0;
+
+ if (name)
+ off += scnprintf(logbuf + off, logbuf_len - off,
+ "[%s] ", name);
+
+ if (WARN_ON(off >= logbuf_len))
+ return off;
+
+ if (tag >= 0)
+ off += scnprintf(logbuf + off, logbuf_len - off,
+ "tag#%d ", tag);
+ return off;
+}
+
+void sdev_prefix_printk(const char *level, const struct scsi_device *sdev,
+ const char *name, const char *fmt, ...)
+{
+ va_list args;
+ char *logbuf;
+ size_t off = 0, logbuf_len;
+
+ if (!sdev)
+ return;
+
+ logbuf = scsi_log_reserve_buffer(&logbuf_len);
+ if (!logbuf)
+ return;
+
+ if (name)
+ off += scnprintf(logbuf + off, logbuf_len - off,
+ "[%s] ", name);
+ if (!WARN_ON(off >= logbuf_len)) {
+ va_start(args, fmt);
+ off += vscnprintf(logbuf + off, logbuf_len - off, fmt, args);
+ va_end(args);
+ }
+ dev_printk(level, &sdev->sdev_gendev, "%s", logbuf);
+ scsi_log_release_buffer(logbuf);
+}
+EXPORT_SYMBOL(sdev_prefix_printk);
+
+void scmd_printk(const char *level, const struct scsi_cmnd *scmd,
+ const char *fmt, ...)
+{
+ va_list args;
+ char *logbuf;
+ size_t off = 0, logbuf_len;
+
+ if (!scmd || !scmd->cmnd)
+ return;
+
+ logbuf = scsi_log_reserve_buffer(&logbuf_len);
+ if (!logbuf)
+ return;
+ off = sdev_format_header(logbuf, logbuf_len, scmd_name(scmd),
+ scmd->request->tag);
+ if (off < logbuf_len) {
+ va_start(args, fmt);
+ off += vscnprintf(logbuf + off, logbuf_len - off, fmt, args);
+ va_end(args);
+ }
+ dev_printk(level, &scmd->device->sdev_gendev, "%s", logbuf);
+ scsi_log_release_buffer(logbuf);
+}
+EXPORT_SYMBOL(scmd_printk);
+
+static size_t scsi_format_opcode_name(char *buffer, size_t buf_len,
+ const unsigned char *cdbp)
+{
+ int sa, cdb0;
+ const char *cdb_name = NULL, *sa_name = NULL;
+ size_t off;
+
+ cdb0 = cdbp[0];
+ if (cdb0 == VARIABLE_LENGTH_CMD) {
+ int len = scsi_varlen_cdb_length(cdbp);
+
+ if (len < 10) {
+ off = scnprintf(buffer, buf_len,
+ "short variable length command, len=%d",
+ len);
+ return off;
+ }
+ sa = (cdbp[8] << 8) + cdbp[9];
+ } else
+ sa = cdbp[1] & 0x1f;
+
+ if (!scsi_opcode_sa_name(cdb0, sa, &cdb_name, &sa_name)) {
+ if (cdb_name)
+ off = scnprintf(buffer, buf_len, "%s", cdb_name);
+ else {
+ off = scnprintf(buffer, buf_len, "opcode=0x%x", cdb0);
+ if (WARN_ON(off >= buf_len))
+ return off;
+ if (cdb0 >= VENDOR_SPECIFIC_CDB)
+ off += scnprintf(buffer + off, buf_len - off,
+ " (vendor)");
+ else if (cdb0 >= 0x60 && cdb0 < 0x7e)
+ off += scnprintf(buffer + off, buf_len - off,
+ " (reserved)");
+ }
+ } else {
+ if (sa_name)
+ off = scnprintf(buffer, buf_len, "%s", sa_name);
+ else if (cdb_name)
+ off = scnprintf(buffer, buf_len, "%s, sa=0x%x",
+ cdb_name, sa);
+ else
+ off = scnprintf(buffer, buf_len,
+ "opcode=0x%x, sa=0x%x", cdb0, sa);
+ }
+ WARN_ON(off >= buf_len);
+ return off;
+}
+
+size_t __scsi_format_command(char *logbuf, size_t logbuf_len,
+ const unsigned char *cdb, size_t cdb_len)
+{
+ int len, k;
+ size_t off;
+
+ off = scsi_format_opcode_name(logbuf, logbuf_len, cdb);
+ if (off >= logbuf_len)
+ return off;
+ len = scsi_command_size(cdb);
+ if (cdb_len < len)
+ len = cdb_len;
+ /* print out all bytes in cdb */
+ for (k = 0; k < len; ++k) {
+ if (off > logbuf_len - 3)
+ break;
+ off += scnprintf(logbuf + off, logbuf_len - off,
+ " %02x", cdb[k]);
+ }
+ return off;
+}
+EXPORT_SYMBOL(__scsi_format_command);
+
+void scsi_print_command(struct scsi_cmnd *cmd)
+{
+ int k;
+ char *logbuf;
+ size_t off, logbuf_len;
+
+ if (!cmd->cmnd)
+ return;
+
+ logbuf = scsi_log_reserve_buffer(&logbuf_len);
+ if (!logbuf)
+ return;
+
+ off = sdev_format_header(logbuf, logbuf_len,
+ scmd_name(cmd), cmd->request->tag);
+ if (off >= logbuf_len)
+ goto out_printk;
+ off += scnprintf(logbuf + off, logbuf_len - off, "CDB: ");
+ if (WARN_ON(off >= logbuf_len))
+ goto out_printk;
+
+ off += scsi_format_opcode_name(logbuf + off, logbuf_len - off,
+ cmd->cmnd);
+ if (off >= logbuf_len)
+ goto out_printk;
+
+ /* print out all bytes in cdb */
+ if (cmd->cmd_len > 16) {
+ /* Print opcode in one line and use separate lines for CDB */
+ off += scnprintf(logbuf + off, logbuf_len - off, "\n");
+ dev_printk(KERN_INFO, &cmd->device->sdev_gendev, "%s", logbuf);
+ scsi_log_release_buffer(logbuf);
+ for (k = 0; k < cmd->cmd_len; k += 16) {
+ size_t linelen = min(cmd->cmd_len - k, 16);
+
+ logbuf = scsi_log_reserve_buffer(&logbuf_len);
+ if (!logbuf)
+ break;
+ off = sdev_format_header(logbuf, logbuf_len,
+ scmd_name(cmd),
+ cmd->request->tag);
+ if (!WARN_ON(off > logbuf_len - 58)) {
+ off += scnprintf(logbuf + off, logbuf_len - off,
+ "CDB[%02x]: ", k);
+ hex_dump_to_buffer(&cmd->cmnd[k], linelen,
+ 16, 1, logbuf + off,
+ logbuf_len - off, false);
+ }
+ dev_printk(KERN_INFO, &cmd->device->sdev_gendev, "%s",
+ logbuf);
+ scsi_log_release_buffer(logbuf);
+ }
+ return;
+ }
+ if (!WARN_ON(off > logbuf_len - 49)) {
+ off += scnprintf(logbuf + off, logbuf_len - off, " ");
+ hex_dump_to_buffer(cmd->cmnd, cmd->cmd_len, 16, 1,
+ logbuf + off, logbuf_len - off,
+ false);
+ }
+out_printk:
+ dev_printk(KERN_INFO, &cmd->device->sdev_gendev, "%s", logbuf);
+ scsi_log_release_buffer(logbuf);
+}
+EXPORT_SYMBOL(scsi_print_command);
+
+static size_t
+scsi_format_extd_sense(char *buffer, size_t buf_len,
+ unsigned char asc, unsigned char ascq)
+{
+ size_t off = 0;
+ const char *extd_sense_fmt = NULL;
+ const char *extd_sense_str = scsi_extd_sense_format(asc, ascq,
+ &extd_sense_fmt);
+
+ if (extd_sense_str) {
+ off = scnprintf(buffer, buf_len, "Add. Sense: %s",
+ extd_sense_str);
+ if (extd_sense_fmt)
+ off += scnprintf(buffer + off, buf_len - off,
+ "(%s%x)", extd_sense_fmt, ascq);
+ } else {
+ if (asc >= 0x80)
+ off = scnprintf(buffer, buf_len, "<<vendor>>");
+ off += scnprintf(buffer + off, buf_len - off,
+ "ASC=0x%x ", asc);
+ if (ascq >= 0x80)
+ off += scnprintf(buffer + off, buf_len - off,
+ "<<vendor>>");
+ off += scnprintf(buffer + off, buf_len - off,
+ "ASCQ=0x%x ", ascq);
+ }
+ return off;
+}
+
+static size_t
+scsi_format_sense_hdr(char *buffer, size_t buf_len,
+ const struct scsi_sense_hdr *sshdr)
+{
+ const char *sense_txt;
+ size_t off;
+
+ off = scnprintf(buffer, buf_len, "Sense Key : ");
+ sense_txt = scsi_sense_key_string(sshdr->sense_key);
+ if (sense_txt)
+ off += scnprintf(buffer + off, buf_len - off,
+ "%s ", sense_txt);
+ else
+ off += scnprintf(buffer + off, buf_len - off,
+ "0x%x ", sshdr->sense_key);
+ off += scnprintf(buffer + off, buf_len - off,
+ scsi_sense_is_deferred(sshdr) ? "[deferred] " : "[current] ");
+
+ if (sshdr->response_code >= 0x72)
+ off += scnprintf(buffer + off, buf_len - off, "[descriptor] ");
+ return off;
+}
+
+static void
+scsi_log_dump_sense(const struct scsi_device *sdev, const char *name, int tag,
+ const unsigned char *sense_buffer, int sense_len)
+{
+ char *logbuf;
+ size_t logbuf_len;
+ int i;
+
+ logbuf = scsi_log_reserve_buffer(&logbuf_len);
+ if (!logbuf)
+ return;
+
+ for (i = 0; i < sense_len; i += 16) {
+ int len = min(sense_len - i, 16);
+ size_t off;
+
+ off = sdev_format_header(logbuf, logbuf_len,
+ name, tag);
+ hex_dump_to_buffer(&sense_buffer[i], len, 16, 1,
+ logbuf + off, logbuf_len - off,
+ false);
+ dev_printk(KERN_INFO, &sdev->sdev_gendev, "%s", logbuf);
+ }
+ scsi_log_release_buffer(logbuf);
+}
+
+static void
+scsi_log_print_sense_hdr(const struct scsi_device *sdev, const char *name,
+ int tag, const struct scsi_sense_hdr *sshdr)
+{
+ char *logbuf;
+ size_t off, logbuf_len;
+
+ logbuf = scsi_log_reserve_buffer(&logbuf_len);
+ if (!logbuf)
+ return;
+ off = sdev_format_header(logbuf, logbuf_len, name, tag);
+ off += scsi_format_sense_hdr(logbuf + off, logbuf_len - off, sshdr);
+ dev_printk(KERN_INFO, &sdev->sdev_gendev, "%s", logbuf);
+ scsi_log_release_buffer(logbuf);
+
+ logbuf = scsi_log_reserve_buffer(&logbuf_len);
+ if (!logbuf)
+ return;
+ off = sdev_format_header(logbuf, logbuf_len, name, tag);
+ off += scsi_format_extd_sense(logbuf + off, logbuf_len - off,
+ sshdr->asc, sshdr->ascq);
+ dev_printk(KERN_INFO, &sdev->sdev_gendev, "%s", logbuf);
+ scsi_log_release_buffer(logbuf);
+}
+
+static void
+scsi_log_print_sense(const struct scsi_device *sdev, const char *name, int tag,
+ const unsigned char *sense_buffer, int sense_len)
+{
+ struct scsi_sense_hdr sshdr;
+
+ if (scsi_normalize_sense(sense_buffer, sense_len, &sshdr))
+ scsi_log_print_sense_hdr(sdev, name, tag, &sshdr);
+ else
+ scsi_log_dump_sense(sdev, name, tag, sense_buffer, sense_len);
+}
+
+/*
+ * Print normalized SCSI sense header with a prefix.
+ */
+void
+scsi_print_sense_hdr(const struct scsi_device *sdev, const char *name,
+ const struct scsi_sense_hdr *sshdr)
+{
+ scsi_log_print_sense_hdr(sdev, name, -1, sshdr);
+}
+EXPORT_SYMBOL(scsi_print_sense_hdr);
+
+/* Normalize and print sense buffer with name prefix */
+void __scsi_print_sense(const struct scsi_device *sdev, const char *name,
+ const unsigned char *sense_buffer, int sense_len)
+{
+ scsi_log_print_sense(sdev, name, -1, sense_buffer, sense_len);
+}
+EXPORT_SYMBOL(__scsi_print_sense);
+
+/* Normalize and print sense buffer in SCSI command */
+void scsi_print_sense(const struct scsi_cmnd *cmd)
+{
+ scsi_log_print_sense(cmd->device, scmd_name(cmd), cmd->request->tag,
+ cmd->sense_buffer, SCSI_SENSE_BUFFERSIZE);
+}
+EXPORT_SYMBOL(scsi_print_sense);
+
+void scsi_print_result(const struct scsi_cmnd *cmd, const char *msg,
+ int disposition)
+{
+ char *logbuf;
+ size_t off, logbuf_len;
+ const char *mlret_string = scsi_mlreturn_string(disposition);
+ const char *hb_string = scsi_hostbyte_string(cmd->result);
+ const char *db_string = scsi_driverbyte_string(cmd->result);
+
+ logbuf = scsi_log_reserve_buffer(&logbuf_len);
+ if (!logbuf)
+ return;
+
+ off = sdev_format_header(logbuf, logbuf_len,
+ scmd_name(cmd), cmd->request->tag);
+
+ if (off >= logbuf_len)
+ goto out_printk;
+
+ if (msg) {
+ off += scnprintf(logbuf + off, logbuf_len - off,
+ "%s: ", msg);
+ if (WARN_ON(off >= logbuf_len))
+ goto out_printk;
+ }
+ if (mlret_string)
+ off += scnprintf(logbuf + off, logbuf_len - off,
+ "%s ", mlret_string);
+ else
+ off += scnprintf(logbuf + off, logbuf_len - off,
+ "UNKNOWN(0x%02x) ", disposition);
+ if (WARN_ON(off >= logbuf_len))
+ goto out_printk;
+
+ off += scnprintf(logbuf + off, logbuf_len - off, "Result: ");
+ if (WARN_ON(off >= logbuf_len))
+ goto out_printk;
+
+ if (hb_string)
+ off += scnprintf(logbuf + off, logbuf_len - off,
+ "hostbyte=%s ", hb_string);
+ else
+ off += scnprintf(logbuf + off, logbuf_len - off,
+ "hostbyte=0x%02x ", host_byte(cmd->result));
+ if (WARN_ON(off >= logbuf_len))
+ goto out_printk;
+
+ if (db_string)
+ off += scnprintf(logbuf + off, logbuf_len - off,
+ "driverbyte=%s", db_string);
+ else
+ off += scnprintf(logbuf + off, logbuf_len - off,
+ "driverbyte=0x%02x", driver_byte(cmd->result));
+out_printk:
+ dev_printk(KERN_INFO, &cmd->device->sdev_gendev, "%s", logbuf);
+ scsi_log_release_buffer(logbuf);
+}
+EXPORT_SYMBOL(scsi_print_result);
diff --git a/drivers/scsi/scsi_logging.h b/drivers/scsi/scsi_logging.h
new file mode 100644
index 000000000..7fe64a847
--- /dev/null
+++ b/drivers/scsi/scsi_logging.h
@@ -0,0 +1,84 @@
+#ifndef _SCSI_LOGGING_H
+#define _SCSI_LOGGING_H
+
+
+/*
+ * This defines the scsi logging feature. It is a means by which the user
+ * can select how much information they get about various goings on, and it
+ * can be really useful for fault tracing. The logging word is divided into
+ * 8 nibbles, each of which describes a loglevel. The division of things is
+ * somewhat arbitrary, and the division of the word could be changed if it
+ * were really needed for any reason. The numbers below are the only place
+ * where these are specified. For a first go-around, 3 bits is more than
+ * enough, since this gives 8 levels of logging (really 7, since 0 is always
+ * off). Cutting to 2 bits might be wise at some point.
+ */
+
+#define SCSI_LOG_ERROR_SHIFT 0
+#define SCSI_LOG_TIMEOUT_SHIFT 3
+#define SCSI_LOG_SCAN_SHIFT 6
+#define SCSI_LOG_MLQUEUE_SHIFT 9
+#define SCSI_LOG_MLCOMPLETE_SHIFT 12
+#define SCSI_LOG_LLQUEUE_SHIFT 15
+#define SCSI_LOG_LLCOMPLETE_SHIFT 18
+#define SCSI_LOG_HLQUEUE_SHIFT 21
+#define SCSI_LOG_HLCOMPLETE_SHIFT 24
+#define SCSI_LOG_IOCTL_SHIFT 27
+
+#define SCSI_LOG_ERROR_BITS 3
+#define SCSI_LOG_TIMEOUT_BITS 3
+#define SCSI_LOG_SCAN_BITS 3
+#define SCSI_LOG_MLQUEUE_BITS 3
+#define SCSI_LOG_MLCOMPLETE_BITS 3
+#define SCSI_LOG_LLQUEUE_BITS 3
+#define SCSI_LOG_LLCOMPLETE_BITS 3
+#define SCSI_LOG_HLQUEUE_BITS 3
+#define SCSI_LOG_HLCOMPLETE_BITS 3
+#define SCSI_LOG_IOCTL_BITS 3
+
+extern unsigned int scsi_logging_level;
+
+#ifdef CONFIG_SCSI_LOGGING
+
+#define SCSI_LOG_LEVEL(SHIFT, BITS) \
+ ((scsi_logging_level >> (SHIFT)) & ((1 << (BITS)) - 1))
+
+#define SCSI_CHECK_LOGGING(SHIFT, BITS, LEVEL, CMD) \
+do { \
+ if (unlikely((SCSI_LOG_LEVEL(SHIFT, BITS)) > (LEVEL))) \
+ do { \
+ CMD; \
+ } while (0); \
+} while (0)
+#else
+#define SCSI_LOG_LEVEL(SHIFT, BITS) 0
+#define SCSI_CHECK_LOGGING(SHIFT, BITS, LEVEL, CMD)
+#endif /* CONFIG_SCSI_LOGGING */
+
+/*
+ * These are the macros that are actually used throughout the code to
+ * log events. If logging isn't enabled, they are no-ops and will be
+ * completely absent from the user's code.
+ */
+#define SCSI_LOG_ERROR_RECOVERY(LEVEL,CMD) \
+ SCSI_CHECK_LOGGING(SCSI_LOG_ERROR_SHIFT, SCSI_LOG_ERROR_BITS, LEVEL,CMD);
+#define SCSI_LOG_TIMEOUT(LEVEL,CMD) \
+ SCSI_CHECK_LOGGING(SCSI_LOG_TIMEOUT_SHIFT, SCSI_LOG_TIMEOUT_BITS, LEVEL,CMD);
+#define SCSI_LOG_SCAN_BUS(LEVEL,CMD) \
+ SCSI_CHECK_LOGGING(SCSI_LOG_SCAN_SHIFT, SCSI_LOG_SCAN_BITS, LEVEL,CMD);
+#define SCSI_LOG_MLQUEUE(LEVEL,CMD) \
+ SCSI_CHECK_LOGGING(SCSI_LOG_MLQUEUE_SHIFT, SCSI_LOG_MLQUEUE_BITS, LEVEL,CMD);
+#define SCSI_LOG_MLCOMPLETE(LEVEL,CMD) \
+ SCSI_CHECK_LOGGING(SCSI_LOG_MLCOMPLETE_SHIFT, SCSI_LOG_MLCOMPLETE_BITS, LEVEL,CMD);
+#define SCSI_LOG_LLQUEUE(LEVEL,CMD) \
+ SCSI_CHECK_LOGGING(SCSI_LOG_LLQUEUE_SHIFT, SCSI_LOG_LLQUEUE_BITS, LEVEL,CMD);
+#define SCSI_LOG_LLCOMPLETE(LEVEL,CMD) \
+ SCSI_CHECK_LOGGING(SCSI_LOG_LLCOMPLETE_SHIFT, SCSI_LOG_LLCOMPLETE_BITS, LEVEL,CMD);
+#define SCSI_LOG_HLQUEUE(LEVEL,CMD) \
+ SCSI_CHECK_LOGGING(SCSI_LOG_HLQUEUE_SHIFT, SCSI_LOG_HLQUEUE_BITS, LEVEL,CMD);
+#define SCSI_LOG_HLCOMPLETE(LEVEL,CMD) \
+ SCSI_CHECK_LOGGING(SCSI_LOG_HLCOMPLETE_SHIFT, SCSI_LOG_HLCOMPLETE_BITS, LEVEL,CMD);
+#define SCSI_LOG_IOCTL(LEVEL,CMD) \
+ SCSI_CHECK_LOGGING(SCSI_LOG_IOCTL_SHIFT, SCSI_LOG_IOCTL_BITS, LEVEL,CMD);
+
+#endif /* _SCSI_LOGGING_H */
diff --git a/drivers/scsi/scsi_module.c b/drivers/scsi/scsi_module.c
new file mode 100644
index 000000000..489175833
--- /dev/null
+++ b/drivers/scsi/scsi_module.c
@@ -0,0 +1,73 @@
+/*
+ * Copyright (C) 2003 Christoph Hellwig.
+ * Released under GPL v2.
+ *
+ * Support for old-style host templates.
+ *
+ * NOTE: Do not use this for new drivers ever.
+ */
+
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+
+#include <scsi/scsi_host.h>
+
+
+static int __init init_this_scsi_driver(void)
+{
+ struct scsi_host_template *sht = &driver_template;
+ struct Scsi_Host *shost;
+ struct list_head *l;
+ int error;
+
+ if (!sht->release) {
+ printk(KERN_ERR
+ "scsi HBA driver %s didn't set a release method.\n",
+ sht->name);
+ return -EINVAL;
+ }
+
+ sht->module = THIS_MODULE;
+ INIT_LIST_HEAD(&sht->legacy_hosts);
+
+ sht->detect(sht);
+ if (list_empty(&sht->legacy_hosts))
+ return -ENODEV;
+
+ list_for_each_entry(shost, &sht->legacy_hosts, sht_legacy_list) {
+ error = scsi_add_host(shost, NULL);
+ if (error)
+ goto fail;
+ scsi_scan_host(shost);
+ }
+ return 0;
+ fail:
+ l = &shost->sht_legacy_list;
+ while ((l = l->prev) != &sht->legacy_hosts)
+ scsi_remove_host(list_entry(l, struct Scsi_Host, sht_legacy_list));
+ return error;
+}
+
+static void __exit exit_this_scsi_driver(void)
+{
+ struct scsi_host_template *sht = &driver_template;
+ struct Scsi_Host *shost, *s;
+
+ list_for_each_entry(shost, &sht->legacy_hosts, sht_legacy_list)
+ scsi_remove_host(shost);
+ list_for_each_entry_safe(shost, s, &sht->legacy_hosts, sht_legacy_list)
+ sht->release(shost);
+
+ if (list_empty(&sht->legacy_hosts))
+ return;
+
+ printk(KERN_WARNING "%s did not call scsi_unregister\n", sht->name);
+ dump_stack();
+
+ list_for_each_entry_safe(shost, s, &sht->legacy_hosts, sht_legacy_list)
+ scsi_unregister(shost);
+}
+
+module_init(init_this_scsi_driver);
+module_exit(exit_this_scsi_driver);
diff --git a/drivers/scsi/scsi_netlink.c b/drivers/scsi/scsi_netlink.c
new file mode 100644
index 000000000..109802f77
--- /dev/null
+++ b/drivers/scsi/scsi_netlink.c
@@ -0,0 +1,158 @@
+/*
+ * scsi_netlink.c - SCSI Transport Netlink Interface
+ *
+ * Copyright (C) 2006 James Smart, Emulex Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ *
+ */
+#include <linux/time.h>
+#include <linux/jiffies.h>
+#include <linux/security.h>
+#include <linux/delay.h>
+#include <linux/slab.h>
+#include <linux/export.h>
+#include <net/sock.h>
+#include <net/netlink.h>
+
+#include <scsi/scsi_netlink.h>
+#include "scsi_priv.h"
+
+struct sock *scsi_nl_sock = NULL;
+EXPORT_SYMBOL_GPL(scsi_nl_sock);
+
+/**
+ * scsi_nl_rcv_msg - Receive message handler.
+ * @skb: socket receive buffer
+ *
+ * Description: Extracts message from a receive buffer.
+ * Validates message header and calls appropriate transport message handler
+ *
+ *
+ **/
+static void
+scsi_nl_rcv_msg(struct sk_buff *skb)
+{
+ struct nlmsghdr *nlh;
+ struct scsi_nl_hdr *hdr;
+ u32 rlen;
+ int err, tport;
+
+ while (skb->len >= NLMSG_HDRLEN) {
+ err = 0;
+
+ nlh = nlmsg_hdr(skb);
+ if ((nlh->nlmsg_len < (sizeof(*nlh) + sizeof(*hdr))) ||
+ (skb->len < nlh->nlmsg_len)) {
+ printk(KERN_WARNING "%s: discarding partial skb\n",
+ __func__);
+ return;
+ }
+
+ rlen = NLMSG_ALIGN(nlh->nlmsg_len);
+ if (rlen > skb->len)
+ rlen = skb->len;
+
+ if (nlh->nlmsg_type != SCSI_TRANSPORT_MSG) {
+ err = -EBADMSG;
+ goto next_msg;
+ }
+
+ hdr = nlmsg_data(nlh);
+ if ((hdr->version != SCSI_NL_VERSION) ||
+ (hdr->magic != SCSI_NL_MAGIC)) {
+ err = -EPROTOTYPE;
+ goto next_msg;
+ }
+
+ if (!netlink_capable(skb, CAP_SYS_ADMIN)) {
+ err = -EPERM;
+ goto next_msg;
+ }
+
+ if (nlh->nlmsg_len < (sizeof(*nlh) + hdr->msglen)) {
+ printk(KERN_WARNING "%s: discarding partial message\n",
+ __func__);
+ goto next_msg;
+ }
+
+ /*
+ * Deliver message to the appropriate transport
+ */
+ tport = hdr->transport;
+ if (tport == SCSI_NL_TRANSPORT) {
+ switch (hdr->msgtype) {
+ case SCSI_NL_SHOST_VENDOR:
+ /* Locate the driver that corresponds to the message */
+ err = -ESRCH;
+ break;
+ default:
+ err = -EBADR;
+ break;
+ }
+ if (err)
+ printk(KERN_WARNING "%s: Msgtype %d failed - err %d\n",
+ __func__, hdr->msgtype, err);
+ }
+ else
+ err = -ENOENT;
+
+next_msg:
+ if ((err) || (nlh->nlmsg_flags & NLM_F_ACK))
+ netlink_ack(skb, nlh, err);
+
+ skb_pull(skb, rlen);
+ }
+}
+
+/**
+ * scsi_netlink_init - Called by SCSI subsystem to initialize
+ * the SCSI transport netlink interface
+ *
+ **/
+void
+scsi_netlink_init(void)
+{
+ struct netlink_kernel_cfg cfg = {
+ .input = scsi_nl_rcv_msg,
+ .groups = SCSI_NL_GRP_CNT,
+ };
+
+ scsi_nl_sock = netlink_kernel_create(&init_net, NETLINK_SCSITRANSPORT,
+ &cfg);
+ if (!scsi_nl_sock) {
+ printk(KERN_ERR "%s: register of receive handler failed\n",
+ __func__);
+ return;
+ }
+
+ return;
+}
+
+
+/**
+ * scsi_netlink_exit - Called by SCSI subsystem to disable the SCSI transport netlink interface
+ *
+ **/
+void
+scsi_netlink_exit(void)
+{
+ if (scsi_nl_sock) {
+ netlink_kernel_release(scsi_nl_sock);
+ }
+
+ return;
+}
+
diff --git a/drivers/scsi/scsi_pm.c b/drivers/scsi/scsi_pm.c
new file mode 100644
index 000000000..9e43ae1d2
--- /dev/null
+++ b/drivers/scsi/scsi_pm.c
@@ -0,0 +1,344 @@
+/*
+ * scsi_pm.c Copyright (C) 2010 Alan Stern
+ *
+ * SCSI dynamic Power Management
+ * Initial version: Alan Stern <stern@rowland.harvard.edu>
+ */
+
+#include <linux/pm_runtime.h>
+#include <linux/export.h>
+#include <linux/async.h>
+
+#include <scsi/scsi.h>
+#include <scsi/scsi_device.h>
+#include <scsi/scsi_driver.h>
+#include <scsi/scsi_host.h>
+
+#include "scsi_priv.h"
+
+#ifdef CONFIG_PM_SLEEP
+
+static int do_scsi_suspend(struct device *dev, const struct dev_pm_ops *pm)
+{
+ return pm && pm->suspend ? pm->suspend(dev) : 0;
+}
+
+static int do_scsi_freeze(struct device *dev, const struct dev_pm_ops *pm)
+{
+ return pm && pm->freeze ? pm->freeze(dev) : 0;
+}
+
+static int do_scsi_poweroff(struct device *dev, const struct dev_pm_ops *pm)
+{
+ return pm && pm->poweroff ? pm->poweroff(dev) : 0;
+}
+
+static int do_scsi_resume(struct device *dev, const struct dev_pm_ops *pm)
+{
+ return pm && pm->resume ? pm->resume(dev) : 0;
+}
+
+static int do_scsi_thaw(struct device *dev, const struct dev_pm_ops *pm)
+{
+ return pm && pm->thaw ? pm->thaw(dev) : 0;
+}
+
+static int do_scsi_restore(struct device *dev, const struct dev_pm_ops *pm)
+{
+ return pm && pm->restore ? pm->restore(dev) : 0;
+}
+
+static int scsi_dev_type_suspend(struct device *dev,
+ int (*cb)(struct device *, const struct dev_pm_ops *))
+{
+ const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
+ int err;
+
+ /* flush pending in-flight resume operations, suspend is synchronous */
+ async_synchronize_full_domain(&scsi_sd_pm_domain);
+
+ err = scsi_device_quiesce(to_scsi_device(dev));
+ if (err == 0) {
+ err = cb(dev, pm);
+ if (err)
+ scsi_device_resume(to_scsi_device(dev));
+ }
+ dev_dbg(dev, "scsi suspend: %d\n", err);
+ return err;
+}
+
+static int scsi_dev_type_resume(struct device *dev,
+ int (*cb)(struct device *, const struct dev_pm_ops *))
+{
+ const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
+ int err = 0;
+
+ err = cb(dev, pm);
+ scsi_device_resume(to_scsi_device(dev));
+ dev_dbg(dev, "scsi resume: %d\n", err);
+
+ if (err == 0) {
+ pm_runtime_disable(dev);
+ pm_runtime_set_active(dev);
+ pm_runtime_enable(dev);
+ }
+
+ return err;
+}
+
+static int
+scsi_bus_suspend_common(struct device *dev,
+ int (*cb)(struct device *, const struct dev_pm_ops *))
+{
+ int err = 0;
+
+ if (scsi_is_sdev_device(dev)) {
+ /*
+ * All the high-level SCSI drivers that implement runtime
+ * PM treat runtime suspend, system suspend, and system
+ * hibernate nearly identically. In all cases the requirements
+ * for runtime suspension are stricter.
+ */
+ if (pm_runtime_suspended(dev))
+ return 0;
+
+ err = scsi_dev_type_suspend(dev, cb);
+ }
+
+ return err;
+}
+
+static void async_sdev_resume(void *dev, async_cookie_t cookie)
+{
+ scsi_dev_type_resume(dev, do_scsi_resume);
+}
+
+static void async_sdev_thaw(void *dev, async_cookie_t cookie)
+{
+ scsi_dev_type_resume(dev, do_scsi_thaw);
+}
+
+static void async_sdev_restore(void *dev, async_cookie_t cookie)
+{
+ scsi_dev_type_resume(dev, do_scsi_restore);
+}
+
+static int scsi_bus_resume_common(struct device *dev,
+ int (*cb)(struct device *, const struct dev_pm_ops *))
+{
+ async_func_t fn;
+
+ if (!scsi_is_sdev_device(dev))
+ fn = NULL;
+ else if (cb == do_scsi_resume)
+ fn = async_sdev_resume;
+ else if (cb == do_scsi_thaw)
+ fn = async_sdev_thaw;
+ else if (cb == do_scsi_restore)
+ fn = async_sdev_restore;
+ else
+ fn = NULL;
+
+ if (fn) {
+ async_schedule_domain(fn, dev, &scsi_sd_pm_domain);
+
+ /*
+ * If a user has disabled async probing a likely reason
+ * is due to a storage enclosure that does not inject
+ * staggered spin-ups. For safety, make resume
+ * synchronous as well in that case.
+ */
+ if (strncmp(scsi_scan_type, "async", 5) != 0)
+ async_synchronize_full_domain(&scsi_sd_pm_domain);
+ } else {
+ pm_runtime_disable(dev);
+ pm_runtime_set_active(dev);
+ pm_runtime_enable(dev);
+ }
+ return 0;
+}
+
+static int scsi_bus_prepare(struct device *dev)
+{
+ if (scsi_is_sdev_device(dev)) {
+ /* sd probing uses async_schedule. Wait until it finishes. */
+ async_synchronize_full_domain(&scsi_sd_probe_domain);
+
+ } else if (scsi_is_host_device(dev)) {
+ /* Wait until async scanning is finished */
+ scsi_complete_async_scans();
+ }
+ return 0;
+}
+
+static int scsi_bus_suspend(struct device *dev)
+{
+ return scsi_bus_suspend_common(dev, do_scsi_suspend);
+}
+
+static int scsi_bus_resume(struct device *dev)
+{
+ return scsi_bus_resume_common(dev, do_scsi_resume);
+}
+
+static int scsi_bus_freeze(struct device *dev)
+{
+ return scsi_bus_suspend_common(dev, do_scsi_freeze);
+}
+
+static int scsi_bus_thaw(struct device *dev)
+{
+ return scsi_bus_resume_common(dev, do_scsi_thaw);
+}
+
+static int scsi_bus_poweroff(struct device *dev)
+{
+ return scsi_bus_suspend_common(dev, do_scsi_poweroff);
+}
+
+static int scsi_bus_restore(struct device *dev)
+{
+ return scsi_bus_resume_common(dev, do_scsi_restore);
+}
+
+#else /* CONFIG_PM_SLEEP */
+
+#define scsi_bus_prepare NULL
+#define scsi_bus_suspend NULL
+#define scsi_bus_resume NULL
+#define scsi_bus_freeze NULL
+#define scsi_bus_thaw NULL
+#define scsi_bus_poweroff NULL
+#define scsi_bus_restore NULL
+
+#endif /* CONFIG_PM_SLEEP */
+
+static int sdev_runtime_suspend(struct device *dev)
+{
+ const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
+ struct scsi_device *sdev = to_scsi_device(dev);
+ int err;
+
+ err = blk_pre_runtime_suspend(sdev->request_queue);
+ if (err)
+ return err;
+ if (pm && pm->runtime_suspend)
+ err = pm->runtime_suspend(dev);
+ blk_post_runtime_suspend(sdev->request_queue, err);
+
+ return err;
+}
+
+static int scsi_runtime_suspend(struct device *dev)
+{
+ int err = 0;
+
+ dev_dbg(dev, "scsi_runtime_suspend\n");
+ if (scsi_is_sdev_device(dev))
+ err = sdev_runtime_suspend(dev);
+
+ /* Insert hooks here for targets, hosts, and transport classes */
+
+ return err;
+}
+
+static int sdev_runtime_resume(struct device *dev)
+{
+ struct scsi_device *sdev = to_scsi_device(dev);
+ const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
+ int err = 0;
+
+ blk_pre_runtime_resume(sdev->request_queue);
+ if (pm && pm->runtime_resume)
+ err = pm->runtime_resume(dev);
+ blk_post_runtime_resume(sdev->request_queue, err);
+
+ return err;
+}
+
+static int scsi_runtime_resume(struct device *dev)
+{
+ int err = 0;
+
+ dev_dbg(dev, "scsi_runtime_resume\n");
+ if (scsi_is_sdev_device(dev))
+ err = sdev_runtime_resume(dev);
+
+ /* Insert hooks here for targets, hosts, and transport classes */
+
+ return err;
+}
+
+static int scsi_runtime_idle(struct device *dev)
+{
+ dev_dbg(dev, "scsi_runtime_idle\n");
+
+ /* Insert hooks here for targets, hosts, and transport classes */
+
+ if (scsi_is_sdev_device(dev)) {
+ pm_runtime_mark_last_busy(dev);
+ pm_runtime_autosuspend(dev);
+ return -EBUSY;
+ }
+
+ return 0;
+}
+
+int scsi_autopm_get_device(struct scsi_device *sdev)
+{
+ int err;
+
+ err = pm_runtime_get_sync(&sdev->sdev_gendev);
+ if (err < 0 && err !=-EACCES)
+ pm_runtime_put_sync(&sdev->sdev_gendev);
+ else
+ err = 0;
+ return err;
+}
+EXPORT_SYMBOL_GPL(scsi_autopm_get_device);
+
+void scsi_autopm_put_device(struct scsi_device *sdev)
+{
+ pm_runtime_put_sync(&sdev->sdev_gendev);
+}
+EXPORT_SYMBOL_GPL(scsi_autopm_put_device);
+
+void scsi_autopm_get_target(struct scsi_target *starget)
+{
+ pm_runtime_get_sync(&starget->dev);
+}
+
+void scsi_autopm_put_target(struct scsi_target *starget)
+{
+ pm_runtime_put_sync(&starget->dev);
+}
+
+int scsi_autopm_get_host(struct Scsi_Host *shost)
+{
+ int err;
+
+ err = pm_runtime_get_sync(&shost->shost_gendev);
+ if (err < 0 && err !=-EACCES)
+ pm_runtime_put_sync(&shost->shost_gendev);
+ else
+ err = 0;
+ return err;
+}
+
+void scsi_autopm_put_host(struct Scsi_Host *shost)
+{
+ pm_runtime_put_sync(&shost->shost_gendev);
+}
+
+const struct dev_pm_ops scsi_bus_pm_ops = {
+ .prepare = scsi_bus_prepare,
+ .suspend = scsi_bus_suspend,
+ .resume = scsi_bus_resume,
+ .freeze = scsi_bus_freeze,
+ .thaw = scsi_bus_thaw,
+ .poweroff = scsi_bus_poweroff,
+ .restore = scsi_bus_restore,
+ .runtime_suspend = scsi_runtime_suspend,
+ .runtime_resume = scsi_runtime_resume,
+ .runtime_idle = scsi_runtime_idle,
+};
diff --git a/drivers/scsi/scsi_priv.h b/drivers/scsi/scsi_priv.h
new file mode 100644
index 000000000..e3902fc66
--- /dev/null
+++ b/drivers/scsi/scsi_priv.h
@@ -0,0 +1,183 @@
+#ifndef _SCSI_PRIV_H
+#define _SCSI_PRIV_H
+
+#include <linux/device.h>
+#include <linux/async.h>
+#include <scsi/scsi_device.h>
+
+struct request_queue;
+struct request;
+struct scsi_cmnd;
+struct scsi_device;
+struct scsi_target;
+struct scsi_host_template;
+struct Scsi_Host;
+struct scsi_nl_hdr;
+
+
+/*
+ * Scsi Error Handler Flags
+ */
+#define SCSI_EH_CANCEL_CMD 0x0001 /* Cancel this cmd */
+#define SCSI_EH_ABORT_SCHEDULED 0x0002 /* Abort has been scheduled */
+
+#define SCSI_SENSE_VALID(scmd) \
+ (((scmd)->sense_buffer[0] & 0x70) == 0x70)
+
+/* hosts.c */
+extern int scsi_init_hosts(void);
+extern void scsi_exit_hosts(void);
+
+/* scsi.c */
+extern int scsi_setup_command_freelist(struct Scsi_Host *shost);
+extern void scsi_destroy_command_freelist(struct Scsi_Host *shost);
+#ifdef CONFIG_SCSI_LOGGING
+void scsi_log_send(struct scsi_cmnd *cmd);
+void scsi_log_completion(struct scsi_cmnd *cmd, int disposition);
+#else
+static inline void scsi_log_send(struct scsi_cmnd *cmd)
+ { };
+static inline void scsi_log_completion(struct scsi_cmnd *cmd, int disposition)
+ { };
+#endif
+
+/* scsi_devinfo.c */
+
+/* list of keys for the lists */
+enum {
+ SCSI_DEVINFO_GLOBAL = 0,
+ SCSI_DEVINFO_SPI,
+};
+
+extern int scsi_get_device_flags(struct scsi_device *sdev,
+ const unsigned char *vendor,
+ const unsigned char *model);
+extern int scsi_get_device_flags_keyed(struct scsi_device *sdev,
+ const unsigned char *vendor,
+ const unsigned char *model, int key);
+extern int scsi_dev_info_list_add_keyed(int compatible, char *vendor,
+ char *model, char *strflags,
+ int flags, int key);
+extern int scsi_dev_info_list_del_keyed(char *vendor, char *model, int key);
+extern int scsi_dev_info_add_list(int key, const char *name);
+extern int scsi_dev_info_remove_list(int key);
+
+extern int __init scsi_init_devinfo(void);
+extern void scsi_exit_devinfo(void);
+
+/* scsi_error.c */
+extern void scmd_eh_abort_handler(struct work_struct *work);
+extern enum blk_eh_timer_return scsi_times_out(struct request *req);
+extern int scsi_error_handler(void *host);
+extern int scsi_decide_disposition(struct scsi_cmnd *cmd);
+extern void scsi_eh_wakeup(struct Scsi_Host *shost);
+extern int scsi_eh_scmd_add(struct scsi_cmnd *, int);
+void scsi_eh_ready_devs(struct Scsi_Host *shost,
+ struct list_head *work_q,
+ struct list_head *done_q);
+int scsi_eh_get_sense(struct list_head *work_q,
+ struct list_head *done_q);
+int scsi_noretry_cmd(struct scsi_cmnd *scmd);
+
+/* scsi_lib.c */
+extern int scsi_maybe_unblock_host(struct scsi_device *sdev);
+extern void scsi_device_unbusy(struct scsi_device *sdev);
+extern void scsi_queue_insert(struct scsi_cmnd *cmd, int reason);
+extern void scsi_io_completion(struct scsi_cmnd *, unsigned int);
+extern void scsi_run_host_queues(struct Scsi_Host *shost);
+extern struct request_queue *scsi_alloc_queue(struct scsi_device *sdev);
+extern struct request_queue *scsi_mq_alloc_queue(struct scsi_device *sdev);
+extern int scsi_mq_setup_tags(struct Scsi_Host *shost);
+extern void scsi_mq_destroy_tags(struct Scsi_Host *shost);
+extern int scsi_init_queue(void);
+extern void scsi_exit_queue(void);
+struct request_queue;
+struct request;
+extern struct kmem_cache *scsi_sdb_cache;
+
+/* scsi_proc.c */
+#ifdef CONFIG_SCSI_PROC_FS
+extern void scsi_proc_hostdir_add(struct scsi_host_template *);
+extern void scsi_proc_hostdir_rm(struct scsi_host_template *);
+extern void scsi_proc_host_add(struct Scsi_Host *);
+extern void scsi_proc_host_rm(struct Scsi_Host *);
+extern int scsi_init_procfs(void);
+extern void scsi_exit_procfs(void);
+#else
+# define scsi_proc_hostdir_add(sht) do { } while (0)
+# define scsi_proc_hostdir_rm(sht) do { } while (0)
+# define scsi_proc_host_add(shost) do { } while (0)
+# define scsi_proc_host_rm(shost) do { } while (0)
+# define scsi_init_procfs() (0)
+# define scsi_exit_procfs() do { } while (0)
+#endif /* CONFIG_PROC_FS */
+
+/* scsi_scan.c */
+extern char scsi_scan_type[];
+extern int scsi_complete_async_scans(void);
+extern int scsi_scan_host_selected(struct Scsi_Host *, unsigned int,
+ unsigned int, u64, int);
+extern void scsi_forget_host(struct Scsi_Host *);
+extern void scsi_rescan_device(struct device *);
+
+/* scsi_sysctl.c */
+#ifdef CONFIG_SYSCTL
+extern int scsi_init_sysctl(void);
+extern void scsi_exit_sysctl(void);
+#else
+# define scsi_init_sysctl() (0)
+# define scsi_exit_sysctl() do { } while (0)
+#endif /* CONFIG_SYSCTL */
+
+/* scsi_sysfs.c */
+extern int scsi_sysfs_add_sdev(struct scsi_device *);
+extern int scsi_sysfs_add_host(struct Scsi_Host *);
+extern int scsi_sysfs_register(void);
+extern void scsi_sysfs_unregister(void);
+extern void scsi_sysfs_device_initialize(struct scsi_device *);
+extern int scsi_sysfs_target_initialize(struct scsi_device *);
+extern struct scsi_transport_template blank_transport_template;
+extern void __scsi_remove_device(struct scsi_device *);
+
+extern struct bus_type scsi_bus_type;
+extern const struct attribute_group *scsi_sysfs_shost_attr_groups[];
+
+/* scsi_netlink.c */
+#ifdef CONFIG_SCSI_NETLINK
+extern void scsi_netlink_init(void);
+extern void scsi_netlink_exit(void);
+extern struct sock *scsi_nl_sock;
+#else
+static inline void scsi_netlink_init(void) {}
+static inline void scsi_netlink_exit(void) {}
+#endif
+
+/* scsi_pm.c */
+#ifdef CONFIG_PM
+extern const struct dev_pm_ops scsi_bus_pm_ops;
+
+extern void scsi_autopm_get_target(struct scsi_target *);
+extern void scsi_autopm_put_target(struct scsi_target *);
+extern int scsi_autopm_get_host(struct Scsi_Host *);
+extern void scsi_autopm_put_host(struct Scsi_Host *);
+#else
+static inline void scsi_autopm_get_target(struct scsi_target *t) {}
+static inline void scsi_autopm_put_target(struct scsi_target *t) {}
+static inline int scsi_autopm_get_host(struct Scsi_Host *h) { return 0; }
+static inline void scsi_autopm_put_host(struct Scsi_Host *h) {}
+#endif /* CONFIG_PM */
+
+extern struct async_domain scsi_sd_pm_domain;
+extern struct async_domain scsi_sd_probe_domain;
+
+/*
+ * internal scsi timeout functions: for use by mid-layer and transport
+ * classes.
+ */
+
+#define SCSI_DEVICE_BLOCK_MAX_TIMEOUT 600 /* units in seconds */
+extern int scsi_internal_device_block(struct scsi_device *sdev);
+extern int scsi_internal_device_unblock(struct scsi_device *sdev,
+ enum scsi_device_state new_state);
+
+#endif /* _SCSI_PRIV_H */
diff --git a/drivers/scsi/scsi_proc.c b/drivers/scsi/scsi_proc.c
new file mode 100644
index 000000000..251598eb3
--- /dev/null
+++ b/drivers/scsi/scsi_proc.c
@@ -0,0 +1,481 @@
+/*
+ * linux/drivers/scsi/scsi_proc.c
+ *
+ * The functions in this file provide an interface between
+ * the PROC file system and the SCSI device drivers
+ * It is mainly used for debugging, statistics and to pass
+ * information directly to the lowlevel driver.
+ *
+ * (c) 1995 Michael Neuffer neuffer@goofy.zdv.uni-mainz.de
+ * Version: 0.99.8 last change: 95/09/13
+ *
+ * generic command parser provided by:
+ * Andreas Heilwagen <crashcar@informatik.uni-koblenz.de>
+ *
+ * generic_proc_info() support of xxxx_info() by:
+ * Michael A. Griffith <grif@acm.org>
+ */
+
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/string.h>
+#include <linux/mm.h>
+#include <linux/proc_fs.h>
+#include <linux/errno.h>
+#include <linux/blkdev.h>
+#include <linux/seq_file.h>
+#include <linux/mutex.h>
+#include <linux/gfp.h>
+#include <asm/uaccess.h>
+
+#include <scsi/scsi.h>
+#include <scsi/scsi_device.h>
+#include <scsi/scsi_host.h>
+#include <scsi/scsi_transport.h>
+
+#include "scsi_priv.h"
+#include "scsi_logging.h"
+
+
+/* 4K page size, but our output routines, use some slack for overruns */
+#define PROC_BLOCK_SIZE (3*1024)
+
+static struct proc_dir_entry *proc_scsi;
+
+/* Protect sht->present and sht->proc_dir */
+static DEFINE_MUTEX(global_host_template_mutex);
+
+static ssize_t proc_scsi_host_write(struct file *file, const char __user *buf,
+ size_t count, loff_t *ppos)
+{
+ struct Scsi_Host *shost = PDE_DATA(file_inode(file));
+ ssize_t ret = -ENOMEM;
+ char *page;
+
+ if (count > PROC_BLOCK_SIZE)
+ return -EOVERFLOW;
+
+ if (!shost->hostt->write_info)
+ return -EINVAL;
+
+ page = (char *)__get_free_page(GFP_KERNEL);
+ if (page) {
+ ret = -EFAULT;
+ if (copy_from_user(page, buf, count))
+ goto out;
+ ret = shost->hostt->write_info(shost, page, count);
+ }
+out:
+ free_page((unsigned long)page);
+ return ret;
+}
+
+static int proc_scsi_show(struct seq_file *m, void *v)
+{
+ struct Scsi_Host *shost = m->private;
+ return shost->hostt->show_info(m, shost);
+}
+
+static int proc_scsi_host_open(struct inode *inode, struct file *file)
+{
+ return single_open_size(file, proc_scsi_show, PDE_DATA(inode),
+ 4 * PAGE_SIZE);
+}
+
+static const struct file_operations proc_scsi_fops = {
+ .open = proc_scsi_host_open,
+ .release = single_release,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .write = proc_scsi_host_write
+};
+
+/**
+ * scsi_proc_hostdir_add - Create directory in /proc for a scsi host
+ * @sht: owner of this directory
+ *
+ * Sets sht->proc_dir to the new directory.
+ */
+
+void scsi_proc_hostdir_add(struct scsi_host_template *sht)
+{
+ if (!sht->show_info)
+ return;
+
+ mutex_lock(&global_host_template_mutex);
+ if (!sht->present++) {
+ sht->proc_dir = proc_mkdir(sht->proc_name, proc_scsi);
+ if (!sht->proc_dir)
+ printk(KERN_ERR "%s: proc_mkdir failed for %s\n",
+ __func__, sht->proc_name);
+ }
+ mutex_unlock(&global_host_template_mutex);
+}
+
+/**
+ * scsi_proc_hostdir_rm - remove directory in /proc for a scsi host
+ * @sht: owner of directory
+ */
+void scsi_proc_hostdir_rm(struct scsi_host_template *sht)
+{
+ if (!sht->show_info)
+ return;
+
+ mutex_lock(&global_host_template_mutex);
+ if (!--sht->present && sht->proc_dir) {
+ remove_proc_entry(sht->proc_name, proc_scsi);
+ sht->proc_dir = NULL;
+ }
+ mutex_unlock(&global_host_template_mutex);
+}
+
+
+/**
+ * scsi_proc_host_add - Add entry for this host to appropriate /proc dir
+ * @shost: host to add
+ */
+void scsi_proc_host_add(struct Scsi_Host *shost)
+{
+ struct scsi_host_template *sht = shost->hostt;
+ struct proc_dir_entry *p;
+ char name[10];
+
+ if (!sht->proc_dir)
+ return;
+
+ sprintf(name,"%d", shost->host_no);
+ p = proc_create_data(name, S_IRUGO | S_IWUSR,
+ sht->proc_dir, &proc_scsi_fops, shost);
+ if (!p)
+ printk(KERN_ERR "%s: Failed to register host %d in"
+ "%s\n", __func__, shost->host_no,
+ sht->proc_name);
+}
+
+/**
+ * scsi_proc_host_rm - remove this host's entry from /proc
+ * @shost: which host
+ */
+void scsi_proc_host_rm(struct Scsi_Host *shost)
+{
+ char name[10];
+
+ if (!shost->hostt->proc_dir)
+ return;
+
+ sprintf(name,"%d", shost->host_no);
+ remove_proc_entry(name, shost->hostt->proc_dir);
+}
+/**
+ * proc_print_scsidevice - return data about this host
+ * @dev: A scsi device
+ * @data: &struct seq_file to output to.
+ *
+ * Description: prints Host, Channel, Id, Lun, Vendor, Model, Rev, Type,
+ * and revision.
+ */
+static int proc_print_scsidevice(struct device *dev, void *data)
+{
+ struct scsi_device *sdev;
+ struct seq_file *s = data;
+ int i;
+
+ if (!scsi_is_sdev_device(dev))
+ goto out;
+
+ sdev = to_scsi_device(dev);
+ seq_printf(s,
+ "Host: scsi%d Channel: %02d Id: %02d Lun: %02llu\n Vendor: ",
+ sdev->host->host_no, sdev->channel, sdev->id, sdev->lun);
+ for (i = 0; i < 8; i++) {
+ if (sdev->vendor[i] >= 0x20)
+ seq_putc(s, sdev->vendor[i]);
+ else
+ seq_putc(s, ' ');
+ }
+
+ seq_puts(s, " Model: ");
+ for (i = 0; i < 16; i++) {
+ if (sdev->model[i] >= 0x20)
+ seq_putc(s, sdev->model[i]);
+ else
+ seq_putc(s, ' ');
+ }
+
+ seq_puts(s, " Rev: ");
+ for (i = 0; i < 4; i++) {
+ if (sdev->rev[i] >= 0x20)
+ seq_putc(s, sdev->rev[i]);
+ else
+ seq_putc(s, ' ');
+ }
+
+ seq_putc(s, '\n');
+
+ seq_printf(s, " Type: %s ", scsi_device_type(sdev->type));
+ seq_printf(s, " ANSI SCSI revision: %02x",
+ sdev->scsi_level - (sdev->scsi_level > 1));
+ if (sdev->scsi_level == 2)
+ seq_puts(s, " CCS\n");
+ else
+ seq_putc(s, '\n');
+
+out:
+ return 0;
+}
+
+/**
+ * scsi_add_single_device - Respond to user request to probe for/add device
+ * @host: user-supplied decimal integer
+ * @channel: user-supplied decimal integer
+ * @id: user-supplied decimal integer
+ * @lun: user-supplied decimal integer
+ *
+ * Description: called by writing "scsi add-single-device" to /proc/scsi/scsi.
+ *
+ * does scsi_host_lookup() and either user_scan() if that transport
+ * type supports it, or else scsi_scan_host_selected()
+ *
+ * Note: this seems to be aimed exclusively at SCSI parallel busses.
+ */
+
+static int scsi_add_single_device(uint host, uint channel, uint id, uint lun)
+{
+ struct Scsi_Host *shost;
+ int error = -ENXIO;
+
+ shost = scsi_host_lookup(host);
+ if (!shost)
+ return error;
+
+ if (shost->transportt->user_scan)
+ error = shost->transportt->user_scan(shost, channel, id, lun);
+ else
+ error = scsi_scan_host_selected(shost, channel, id, lun, 1);
+ scsi_host_put(shost);
+ return error;
+}
+
+/**
+ * scsi_remove_single_device - Respond to user request to remove a device
+ * @host: user-supplied decimal integer
+ * @channel: user-supplied decimal integer
+ * @id: user-supplied decimal integer
+ * @lun: user-supplied decimal integer
+ *
+ * Description: called by writing "scsi remove-single-device" to
+ * /proc/scsi/scsi. Does a scsi_device_lookup() and scsi_remove_device()
+ */
+static int scsi_remove_single_device(uint host, uint channel, uint id, uint lun)
+{
+ struct scsi_device *sdev;
+ struct Scsi_Host *shost;
+ int error = -ENXIO;
+
+ shost = scsi_host_lookup(host);
+ if (!shost)
+ return error;
+ sdev = scsi_device_lookup(shost, channel, id, lun);
+ if (sdev) {
+ scsi_remove_device(sdev);
+ scsi_device_put(sdev);
+ error = 0;
+ }
+
+ scsi_host_put(shost);
+ return error;
+}
+
+/**
+ * proc_scsi_write - handle writes to /proc/scsi/scsi
+ * @file: not used
+ * @buf: buffer to write
+ * @length: length of buf, at most PAGE_SIZE
+ * @ppos: not used
+ *
+ * Description: this provides a legacy mechanism to add or remove devices by
+ * Host, Channel, ID, and Lun. To use,
+ * "echo 'scsi add-single-device 0 1 2 3' > /proc/scsi/scsi" or
+ * "echo 'scsi remove-single-device 0 1 2 3' > /proc/scsi/scsi" with
+ * "0 1 2 3" replaced by the Host, Channel, Id, and Lun.
+ *
+ * Note: this seems to be aimed at parallel SCSI. Most modern busses (USB,
+ * SATA, Firewire, Fibre Channel, etc) dynamically assign these values to
+ * provide a unique identifier and nothing more.
+ */
+
+
+static ssize_t proc_scsi_write(struct file *file, const char __user *buf,
+ size_t length, loff_t *ppos)
+{
+ int host, channel, id, lun;
+ char *buffer, *p;
+ int err;
+
+ if (!buf || length > PAGE_SIZE)
+ return -EINVAL;
+
+ buffer = (char *)__get_free_page(GFP_KERNEL);
+ if (!buffer)
+ return -ENOMEM;
+
+ err = -EFAULT;
+ if (copy_from_user(buffer, buf, length))
+ goto out;
+
+ err = -EINVAL;
+ if (length < PAGE_SIZE)
+ buffer[length] = '\0';
+ else if (buffer[PAGE_SIZE-1])
+ goto out;
+
+ /*
+ * Usage: echo "scsi add-single-device 0 1 2 3" >/proc/scsi/scsi
+ * with "0 1 2 3" replaced by your "Host Channel Id Lun".
+ */
+ if (!strncmp("scsi add-single-device", buffer, 22)) {
+ p = buffer + 23;
+
+ host = simple_strtoul(p, &p, 0);
+ channel = simple_strtoul(p + 1, &p, 0);
+ id = simple_strtoul(p + 1, &p, 0);
+ lun = simple_strtoul(p + 1, &p, 0);
+
+ err = scsi_add_single_device(host, channel, id, lun);
+
+ /*
+ * Usage: echo "scsi remove-single-device 0 1 2 3" >/proc/scsi/scsi
+ * with "0 1 2 3" replaced by your "Host Channel Id Lun".
+ */
+ } else if (!strncmp("scsi remove-single-device", buffer, 25)) {
+ p = buffer + 26;
+
+ host = simple_strtoul(p, &p, 0);
+ channel = simple_strtoul(p + 1, &p, 0);
+ id = simple_strtoul(p + 1, &p, 0);
+ lun = simple_strtoul(p + 1, &p, 0);
+
+ err = scsi_remove_single_device(host, channel, id, lun);
+ }
+
+ /*
+ * convert success returns so that we return the
+ * number of bytes consumed.
+ */
+ if (!err)
+ err = length;
+
+ out:
+ free_page((unsigned long)buffer);
+ return err;
+}
+
+static int always_match(struct device *dev, void *data)
+{
+ return 1;
+}
+
+static inline struct device *next_scsi_device(struct device *start)
+{
+ struct device *next = bus_find_device(&scsi_bus_type, start, NULL,
+ always_match);
+ put_device(start);
+ return next;
+}
+
+static void *scsi_seq_start(struct seq_file *sfile, loff_t *pos)
+{
+ struct device *dev = NULL;
+ loff_t n = *pos;
+
+ while ((dev = next_scsi_device(dev))) {
+ if (!n--)
+ break;
+ sfile->private++;
+ }
+ return dev;
+}
+
+static void *scsi_seq_next(struct seq_file *sfile, void *v, loff_t *pos)
+{
+ (*pos)++;
+ sfile->private++;
+ return next_scsi_device(v);
+}
+
+static void scsi_seq_stop(struct seq_file *sfile, void *v)
+{
+ put_device(v);
+}
+
+static int scsi_seq_show(struct seq_file *sfile, void *dev)
+{
+ if (!sfile->private)
+ seq_puts(sfile, "Attached devices:\n");
+
+ return proc_print_scsidevice(dev, sfile);
+}
+
+static const struct seq_operations scsi_seq_ops = {
+ .start = scsi_seq_start,
+ .next = scsi_seq_next,
+ .stop = scsi_seq_stop,
+ .show = scsi_seq_show
+};
+
+/**
+ * proc_scsi_open - glue function
+ * @inode: not used
+ * @file: passed to single_open()
+ *
+ * Associates proc_scsi_show with this file
+ */
+static int proc_scsi_open(struct inode *inode, struct file *file)
+{
+ /*
+ * We don't really need this for the write case but it doesn't
+ * harm either.
+ */
+ return seq_open(file, &scsi_seq_ops);
+}
+
+static const struct file_operations proc_scsi_operations = {
+ .owner = THIS_MODULE,
+ .open = proc_scsi_open,
+ .read = seq_read,
+ .write = proc_scsi_write,
+ .llseek = seq_lseek,
+ .release = seq_release,
+};
+
+/**
+ * scsi_init_procfs - create scsi and scsi/scsi in procfs
+ */
+int __init scsi_init_procfs(void)
+{
+ struct proc_dir_entry *pde;
+
+ proc_scsi = proc_mkdir("scsi", NULL);
+ if (!proc_scsi)
+ goto err1;
+
+ pde = proc_create("scsi/scsi", 0, NULL, &proc_scsi_operations);
+ if (!pde)
+ goto err2;
+
+ return 0;
+
+err2:
+ remove_proc_entry("scsi", NULL);
+err1:
+ return -ENOMEM;
+}
+
+/**
+ * scsi_exit_procfs - Remove scsi/scsi and scsi from procfs
+ */
+void scsi_exit_procfs(void)
+{
+ remove_proc_entry("scsi/scsi", NULL);
+ remove_proc_entry("scsi", NULL);
+}
diff --git a/drivers/scsi/scsi_sas_internal.h b/drivers/scsi/scsi_sas_internal.h
new file mode 100644
index 000000000..6266a5d73
--- /dev/null
+++ b/drivers/scsi/scsi_sas_internal.h
@@ -0,0 +1,42 @@
+#ifndef _SCSI_SAS_INTERNAL_H
+#define _SCSI_SAS_INTERNAL_H
+
+#define SAS_HOST_ATTRS 0
+#define SAS_PHY_ATTRS 17
+#define SAS_PORT_ATTRS 1
+#define SAS_RPORT_ATTRS 7
+#define SAS_END_DEV_ATTRS 5
+#define SAS_EXPANDER_ATTRS 7
+
+struct sas_internal {
+ struct scsi_transport_template t;
+ struct sas_function_template *f;
+ struct sas_domain_function_template *dft;
+
+ struct device_attribute private_host_attrs[SAS_HOST_ATTRS];
+ struct device_attribute private_phy_attrs[SAS_PHY_ATTRS];
+ struct device_attribute private_port_attrs[SAS_PORT_ATTRS];
+ struct device_attribute private_rphy_attrs[SAS_RPORT_ATTRS];
+ struct device_attribute private_end_dev_attrs[SAS_END_DEV_ATTRS];
+ struct device_attribute private_expander_attrs[SAS_EXPANDER_ATTRS];
+
+ struct transport_container phy_attr_cont;
+ struct transport_container port_attr_cont;
+ struct transport_container rphy_attr_cont;
+ struct transport_container end_dev_attr_cont;
+ struct transport_container expander_attr_cont;
+
+ /*
+ * The array of null terminated pointers to attributes
+ * needed by scsi_sysfs.c
+ */
+ struct device_attribute *host_attrs[SAS_HOST_ATTRS + 1];
+ struct device_attribute *phy_attrs[SAS_PHY_ATTRS + 1];
+ struct device_attribute *port_attrs[SAS_PORT_ATTRS + 1];
+ struct device_attribute *rphy_attrs[SAS_RPORT_ATTRS + 1];
+ struct device_attribute *end_dev_attrs[SAS_END_DEV_ATTRS + 1];
+ struct device_attribute *expander_attrs[SAS_EXPANDER_ATTRS + 1];
+};
+#define to_sas_internal(tmpl) container_of(tmpl, struct sas_internal, t)
+
+#endif
diff --git a/drivers/scsi/scsi_scan.c b/drivers/scsi/scsi_scan.c
new file mode 100644
index 000000000..6efab1c45
--- /dev/null
+++ b/drivers/scsi/scsi_scan.c
@@ -0,0 +1,1985 @@
+/*
+ * scsi_scan.c
+ *
+ * Copyright (C) 2000 Eric Youngdale,
+ * Copyright (C) 2002 Patrick Mansfield
+ *
+ * The general scanning/probing algorithm is as follows, exceptions are
+ * made to it depending on device specific flags, compilation options, and
+ * global variable (boot or module load time) settings.
+ *
+ * A specific LUN is scanned via an INQUIRY command; if the LUN has a
+ * device attached, a scsi_device is allocated and setup for it.
+ *
+ * For every id of every channel on the given host:
+ *
+ * Scan LUN 0; if the target responds to LUN 0 (even if there is no
+ * device or storage attached to LUN 0):
+ *
+ * If LUN 0 has a device attached, allocate and setup a
+ * scsi_device for it.
+ *
+ * If target is SCSI-3 or up, issue a REPORT LUN, and scan
+ * all of the LUNs returned by the REPORT LUN; else,
+ * sequentially scan LUNs up until some maximum is reached,
+ * or a LUN is seen that cannot have a device attached to it.
+ */
+
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/init.h>
+#include <linux/blkdev.h>
+#include <linux/delay.h>
+#include <linux/kthread.h>
+#include <linux/spinlock.h>
+#include <linux/async.h>
+#include <linux/slab.h>
+#include <asm/unaligned.h>
+
+#include <scsi/scsi.h>
+#include <scsi/scsi_cmnd.h>
+#include <scsi/scsi_device.h>
+#include <scsi/scsi_driver.h>
+#include <scsi/scsi_devinfo.h>
+#include <scsi/scsi_host.h>
+#include <scsi/scsi_transport.h>
+#include <scsi/scsi_eh.h>
+
+#include "scsi_priv.h"
+#include "scsi_logging.h"
+
+#define ALLOC_FAILURE_MSG KERN_ERR "%s: Allocation failure during" \
+ " SCSI scanning, some SCSI devices might not be configured\n"
+
+/*
+ * Default timeout
+ */
+#define SCSI_TIMEOUT (2*HZ)
+
+/*
+ * Prefix values for the SCSI id's (stored in sysfs name field)
+ */
+#define SCSI_UID_SER_NUM 'S'
+#define SCSI_UID_UNKNOWN 'Z'
+
+/*
+ * Return values of some of the scanning functions.
+ *
+ * SCSI_SCAN_NO_RESPONSE: no valid response received from the target, this
+ * includes allocation or general failures preventing IO from being sent.
+ *
+ * SCSI_SCAN_TARGET_PRESENT: target responded, but no device is available
+ * on the given LUN.
+ *
+ * SCSI_SCAN_LUN_PRESENT: target responded, and a device is available on a
+ * given LUN.
+ */
+#define SCSI_SCAN_NO_RESPONSE 0
+#define SCSI_SCAN_TARGET_PRESENT 1
+#define SCSI_SCAN_LUN_PRESENT 2
+
+static const char *scsi_null_device_strs = "nullnullnullnull";
+
+#define MAX_SCSI_LUNS 512
+
+static u64 max_scsi_luns = MAX_SCSI_LUNS;
+
+module_param_named(max_luns, max_scsi_luns, ullong, S_IRUGO|S_IWUSR);
+MODULE_PARM_DESC(max_luns,
+ "last scsi LUN (should be between 1 and 2^64-1)");
+
+#ifdef CONFIG_SCSI_SCAN_ASYNC
+#define SCSI_SCAN_TYPE_DEFAULT "async"
+#else
+#define SCSI_SCAN_TYPE_DEFAULT "sync"
+#endif
+
+char scsi_scan_type[6] = SCSI_SCAN_TYPE_DEFAULT;
+
+module_param_string(scan, scsi_scan_type, sizeof(scsi_scan_type), S_IRUGO);
+MODULE_PARM_DESC(scan, "sync, async or none");
+
+static unsigned int scsi_inq_timeout = SCSI_TIMEOUT/HZ + 18;
+
+module_param_named(inq_timeout, scsi_inq_timeout, uint, S_IRUGO|S_IWUSR);
+MODULE_PARM_DESC(inq_timeout,
+ "Timeout (in seconds) waiting for devices to answer INQUIRY."
+ " Default is 20. Some devices may need more; most need less.");
+
+/* This lock protects only this list */
+static DEFINE_SPINLOCK(async_scan_lock);
+static LIST_HEAD(scanning_hosts);
+
+struct async_scan_data {
+ struct list_head list;
+ struct Scsi_Host *shost;
+ struct completion prev_finished;
+};
+
+/**
+ * scsi_complete_async_scans - Wait for asynchronous scans to complete
+ *
+ * When this function returns, any host which started scanning before
+ * this function was called will have finished its scan. Hosts which
+ * started scanning after this function was called may or may not have
+ * finished.
+ */
+int scsi_complete_async_scans(void)
+{
+ struct async_scan_data *data;
+
+ do {
+ if (list_empty(&scanning_hosts))
+ return 0;
+ /* If we can't get memory immediately, that's OK. Just
+ * sleep a little. Even if we never get memory, the async
+ * scans will finish eventually.
+ */
+ data = kmalloc(sizeof(*data), GFP_KERNEL);
+ if (!data)
+ msleep(1);
+ } while (!data);
+
+ data->shost = NULL;
+ init_completion(&data->prev_finished);
+
+ spin_lock(&async_scan_lock);
+ /* Check that there's still somebody else on the list */
+ if (list_empty(&scanning_hosts))
+ goto done;
+ list_add_tail(&data->list, &scanning_hosts);
+ spin_unlock(&async_scan_lock);
+
+ printk(KERN_INFO "scsi: waiting for bus probes to complete ...\n");
+ wait_for_completion(&data->prev_finished);
+
+ spin_lock(&async_scan_lock);
+ list_del(&data->list);
+ if (!list_empty(&scanning_hosts)) {
+ struct async_scan_data *next = list_entry(scanning_hosts.next,
+ struct async_scan_data, list);
+ complete(&next->prev_finished);
+ }
+ done:
+ spin_unlock(&async_scan_lock);
+
+ kfree(data);
+ return 0;
+}
+
+/**
+ * scsi_unlock_floptical - unlock device via a special MODE SENSE command
+ * @sdev: scsi device to send command to
+ * @result: area to store the result of the MODE SENSE
+ *
+ * Description:
+ * Send a vendor specific MODE SENSE (not a MODE SELECT) command.
+ * Called for BLIST_KEY devices.
+ **/
+static void scsi_unlock_floptical(struct scsi_device *sdev,
+ unsigned char *result)
+{
+ unsigned char scsi_cmd[MAX_COMMAND_SIZE];
+
+ sdev_printk(KERN_NOTICE, sdev, "unlocking floptical drive\n");
+ scsi_cmd[0] = MODE_SENSE;
+ scsi_cmd[1] = 0;
+ scsi_cmd[2] = 0x2e;
+ scsi_cmd[3] = 0;
+ scsi_cmd[4] = 0x2a; /* size */
+ scsi_cmd[5] = 0;
+ scsi_execute_req(sdev, scsi_cmd, DMA_FROM_DEVICE, result, 0x2a, NULL,
+ SCSI_TIMEOUT, 3, NULL);
+}
+
+/**
+ * scsi_alloc_sdev - allocate and setup a scsi_Device
+ * @starget: which target to allocate a &scsi_device for
+ * @lun: which lun
+ * @hostdata: usually NULL and set by ->slave_alloc instead
+ *
+ * Description:
+ * Allocate, initialize for io, and return a pointer to a scsi_Device.
+ * Stores the @shost, @channel, @id, and @lun in the scsi_Device, and
+ * adds scsi_Device to the appropriate list.
+ *
+ * Return value:
+ * scsi_Device pointer, or NULL on failure.
+ **/
+static struct scsi_device *scsi_alloc_sdev(struct scsi_target *starget,
+ u64 lun, void *hostdata)
+{
+ struct scsi_device *sdev;
+ int display_failure_msg = 1, ret;
+ struct Scsi_Host *shost = dev_to_shost(starget->dev.parent);
+ extern void scsi_evt_thread(struct work_struct *work);
+ extern void scsi_requeue_run_queue(struct work_struct *work);
+
+ sdev = kzalloc(sizeof(*sdev) + shost->transportt->device_size,
+ GFP_ATOMIC);
+ if (!sdev)
+ goto out;
+
+ sdev->vendor = scsi_null_device_strs;
+ sdev->model = scsi_null_device_strs;
+ sdev->rev = scsi_null_device_strs;
+ sdev->host = shost;
+ sdev->queue_ramp_up_period = SCSI_DEFAULT_RAMP_UP_PERIOD;
+ sdev->id = starget->id;
+ sdev->lun = lun;
+ sdev->channel = starget->channel;
+ sdev->sdev_state = SDEV_CREATED;
+ INIT_LIST_HEAD(&sdev->siblings);
+ INIT_LIST_HEAD(&sdev->same_target_siblings);
+ INIT_LIST_HEAD(&sdev->cmd_list);
+ INIT_LIST_HEAD(&sdev->starved_entry);
+ INIT_LIST_HEAD(&sdev->event_list);
+ spin_lock_init(&sdev->list_lock);
+ INIT_WORK(&sdev->event_work, scsi_evt_thread);
+ INIT_WORK(&sdev->requeue_work, scsi_requeue_run_queue);
+
+ sdev->sdev_gendev.parent = get_device(&starget->dev);
+ sdev->sdev_target = starget;
+
+ /* usually NULL and set by ->slave_alloc instead */
+ sdev->hostdata = hostdata;
+
+ /* if the device needs this changing, it may do so in the
+ * slave_configure function */
+ sdev->max_device_blocked = SCSI_DEFAULT_DEVICE_BLOCKED;
+
+ /*
+ * Some low level driver could use device->type
+ */
+ sdev->type = -1;
+
+ /*
+ * Assume that the device will have handshaking problems,
+ * and then fix this field later if it turns out it
+ * doesn't
+ */
+ sdev->borken = 1;
+
+ if (shost_use_blk_mq(shost))
+ sdev->request_queue = scsi_mq_alloc_queue(sdev);
+ else
+ sdev->request_queue = scsi_alloc_queue(sdev);
+ if (!sdev->request_queue) {
+ /* release fn is set up in scsi_sysfs_device_initialise, so
+ * have to free and put manually here */
+ put_device(&starget->dev);
+ kfree(sdev);
+ goto out;
+ }
+ WARN_ON_ONCE(!blk_get_queue(sdev->request_queue));
+ sdev->request_queue->queuedata = sdev;
+
+ if (!shost_use_blk_mq(sdev->host) &&
+ (shost->bqt || shost->hostt->use_blk_tags)) {
+ blk_queue_init_tags(sdev->request_queue,
+ sdev->host->cmd_per_lun, shost->bqt,
+ shost->hostt->tag_alloc_policy);
+ }
+ scsi_change_queue_depth(sdev, sdev->host->cmd_per_lun);
+
+ scsi_sysfs_device_initialize(sdev);
+
+ if (shost->hostt->slave_alloc) {
+ ret = shost->hostt->slave_alloc(sdev);
+ if (ret) {
+ /*
+ * if LLDD reports slave not present, don't clutter
+ * console with alloc failure messages
+ */
+ if (ret == -ENXIO)
+ display_failure_msg = 0;
+ goto out_device_destroy;
+ }
+ }
+
+ return sdev;
+
+out_device_destroy:
+ __scsi_remove_device(sdev);
+out:
+ if (display_failure_msg)
+ printk(ALLOC_FAILURE_MSG, __func__);
+ return NULL;
+}
+
+static void scsi_target_destroy(struct scsi_target *starget)
+{
+ struct device *dev = &starget->dev;
+ struct Scsi_Host *shost = dev_to_shost(dev->parent);
+ unsigned long flags;
+
+ starget->state = STARGET_DEL;
+ transport_destroy_device(dev);
+ spin_lock_irqsave(shost->host_lock, flags);
+ if (shost->hostt->target_destroy)
+ shost->hostt->target_destroy(starget);
+ list_del_init(&starget->siblings);
+ spin_unlock_irqrestore(shost->host_lock, flags);
+ put_device(dev);
+}
+
+static void scsi_target_dev_release(struct device *dev)
+{
+ struct device *parent = dev->parent;
+ struct scsi_target *starget = to_scsi_target(dev);
+
+ kfree(starget);
+ put_device(parent);
+}
+
+static struct device_type scsi_target_type = {
+ .name = "scsi_target",
+ .release = scsi_target_dev_release,
+};
+
+int scsi_is_target_device(const struct device *dev)
+{
+ return dev->type == &scsi_target_type;
+}
+EXPORT_SYMBOL(scsi_is_target_device);
+
+static struct scsi_target *__scsi_find_target(struct device *parent,
+ int channel, uint id)
+{
+ struct scsi_target *starget, *found_starget = NULL;
+ struct Scsi_Host *shost = dev_to_shost(parent);
+ /*
+ * Search for an existing target for this sdev.
+ */
+ list_for_each_entry(starget, &shost->__targets, siblings) {
+ if (starget->id == id &&
+ starget->channel == channel) {
+ found_starget = starget;
+ break;
+ }
+ }
+ if (found_starget)
+ get_device(&found_starget->dev);
+
+ return found_starget;
+}
+
+/**
+ * scsi_target_reap_ref_release - remove target from visibility
+ * @kref: the reap_ref in the target being released
+ *
+ * Called on last put of reap_ref, which is the indication that no device
+ * under this target is visible anymore, so render the target invisible in
+ * sysfs. Note: we have to be in user context here because the target reaps
+ * should be done in places where the scsi device visibility is being removed.
+ */
+static void scsi_target_reap_ref_release(struct kref *kref)
+{
+ struct scsi_target *starget
+ = container_of(kref, struct scsi_target, reap_ref);
+
+ /*
+ * if we get here and the target is still in the CREATED state that
+ * means it was allocated but never made visible (because a scan
+ * turned up no LUNs), so don't call device_del() on it.
+ */
+ if (starget->state != STARGET_CREATED) {
+ transport_remove_device(&starget->dev);
+ device_del(&starget->dev);
+ }
+ scsi_target_destroy(starget);
+}
+
+static void scsi_target_reap_ref_put(struct scsi_target *starget)
+{
+ kref_put(&starget->reap_ref, scsi_target_reap_ref_release);
+}
+
+/**
+ * scsi_alloc_target - allocate a new or find an existing target
+ * @parent: parent of the target (need not be a scsi host)
+ * @channel: target channel number (zero if no channels)
+ * @id: target id number
+ *
+ * Return an existing target if one exists, provided it hasn't already
+ * gone into STARGET_DEL state, otherwise allocate a new target.
+ *
+ * The target is returned with an incremented reference, so the caller
+ * is responsible for both reaping and doing a last put
+ */
+static struct scsi_target *scsi_alloc_target(struct device *parent,
+ int channel, uint id)
+{
+ struct Scsi_Host *shost = dev_to_shost(parent);
+ struct device *dev = NULL;
+ unsigned long flags;
+ const int size = sizeof(struct scsi_target)
+ + shost->transportt->target_size;
+ struct scsi_target *starget;
+ struct scsi_target *found_target;
+ int error, ref_got;
+
+ starget = kzalloc(size, GFP_KERNEL);
+ if (!starget) {
+ printk(KERN_ERR "%s: allocation failure\n", __func__);
+ return NULL;
+ }
+ dev = &starget->dev;
+ device_initialize(dev);
+ kref_init(&starget->reap_ref);
+ dev->parent = get_device(parent);
+ dev_set_name(dev, "target%d:%d:%d", shost->host_no, channel, id);
+ dev->bus = &scsi_bus_type;
+ dev->type = &scsi_target_type;
+ starget->id = id;
+ starget->channel = channel;
+ starget->can_queue = 0;
+ INIT_LIST_HEAD(&starget->siblings);
+ INIT_LIST_HEAD(&starget->devices);
+ starget->state = STARGET_CREATED;
+ starget->scsi_level = SCSI_2;
+ starget->max_target_blocked = SCSI_DEFAULT_TARGET_BLOCKED;
+ retry:
+ spin_lock_irqsave(shost->host_lock, flags);
+
+ found_target = __scsi_find_target(parent, channel, id);
+ if (found_target)
+ goto found;
+
+ list_add_tail(&starget->siblings, &shost->__targets);
+ spin_unlock_irqrestore(shost->host_lock, flags);
+ /* allocate and add */
+ transport_setup_device(dev);
+ if (shost->hostt->target_alloc) {
+ error = shost->hostt->target_alloc(starget);
+
+ if(error) {
+ dev_printk(KERN_ERR, dev, "target allocation failed, error %d\n", error);
+ /* don't want scsi_target_reap to do the final
+ * put because it will be under the host lock */
+ scsi_target_destroy(starget);
+ return NULL;
+ }
+ }
+ get_device(dev);
+
+ return starget;
+
+ found:
+ /*
+ * release routine already fired if kref is zero, so if we can still
+ * take the reference, the target must be alive. If we can't, it must
+ * be dying and we need to wait for a new target
+ */
+ ref_got = kref_get_unless_zero(&found_target->reap_ref);
+
+ spin_unlock_irqrestore(shost->host_lock, flags);
+ if (ref_got) {
+ put_device(dev);
+ return found_target;
+ }
+ /*
+ * Unfortunately, we found a dying target; need to wait until it's
+ * dead before we can get a new one. There is an anomaly here. We
+ * *should* call scsi_target_reap() to balance the kref_get() of the
+ * reap_ref above. However, since the target being released, it's
+ * already invisible and the reap_ref is irrelevant. If we call
+ * scsi_target_reap() we might spuriously do another device_del() on
+ * an already invisible target.
+ */
+ put_device(&found_target->dev);
+ /*
+ * length of time is irrelevant here, we just want to yield the CPU
+ * for a tick to avoid busy waiting for the target to die.
+ */
+ msleep(1);
+ goto retry;
+}
+
+/**
+ * scsi_target_reap - check to see if target is in use and destroy if not
+ * @starget: target to be checked
+ *
+ * This is used after removing a LUN or doing a last put of the target
+ * it checks atomically that nothing is using the target and removes
+ * it if so.
+ */
+void scsi_target_reap(struct scsi_target *starget)
+{
+ /*
+ * serious problem if this triggers: STARGET_DEL is only set in the if
+ * the reap_ref drops to zero, so we're trying to do another final put
+ * on an already released kref
+ */
+ BUG_ON(starget->state == STARGET_DEL);
+ scsi_target_reap_ref_put(starget);
+}
+
+/**
+ * sanitize_inquiry_string - remove non-graphical chars from an INQUIRY result string
+ * @s: INQUIRY result string to sanitize
+ * @len: length of the string
+ *
+ * Description:
+ * The SCSI spec says that INQUIRY vendor, product, and revision
+ * strings must consist entirely of graphic ASCII characters,
+ * padded on the right with spaces. Since not all devices obey
+ * this rule, we will replace non-graphic or non-ASCII characters
+ * with spaces. Exception: a NUL character is interpreted as a
+ * string terminator, so all the following characters are set to
+ * spaces.
+ **/
+static void sanitize_inquiry_string(unsigned char *s, int len)
+{
+ int terminated = 0;
+
+ for (; len > 0; (--len, ++s)) {
+ if (*s == 0)
+ terminated = 1;
+ if (terminated || *s < 0x20 || *s > 0x7e)
+ *s = ' ';
+ }
+}
+
+/**
+ * scsi_probe_lun - probe a single LUN using a SCSI INQUIRY
+ * @sdev: scsi_device to probe
+ * @inq_result: area to store the INQUIRY result
+ * @result_len: len of inq_result
+ * @bflags: store any bflags found here
+ *
+ * Description:
+ * Probe the lun associated with @req using a standard SCSI INQUIRY;
+ *
+ * If the INQUIRY is successful, zero is returned and the
+ * INQUIRY data is in @inq_result; the scsi_level and INQUIRY length
+ * are copied to the scsi_device any flags value is stored in *@bflags.
+ **/
+static int scsi_probe_lun(struct scsi_device *sdev, unsigned char *inq_result,
+ int result_len, int *bflags)
+{
+ unsigned char scsi_cmd[MAX_COMMAND_SIZE];
+ int first_inquiry_len, try_inquiry_len, next_inquiry_len;
+ int response_len = 0;
+ int pass, count, result;
+ struct scsi_sense_hdr sshdr;
+
+ *bflags = 0;
+
+ /* Perform up to 3 passes. The first pass uses a conservative
+ * transfer length of 36 unless sdev->inquiry_len specifies a
+ * different value. */
+ first_inquiry_len = sdev->inquiry_len ? sdev->inquiry_len : 36;
+ try_inquiry_len = first_inquiry_len;
+ pass = 1;
+
+ next_pass:
+ SCSI_LOG_SCAN_BUS(3, sdev_printk(KERN_INFO, sdev,
+ "scsi scan: INQUIRY pass %d length %d\n",
+ pass, try_inquiry_len));
+
+ /* Each pass gets up to three chances to ignore Unit Attention */
+ for (count = 0; count < 3; ++count) {
+ int resid;
+
+ memset(scsi_cmd, 0, 6);
+ scsi_cmd[0] = INQUIRY;
+ scsi_cmd[4] = (unsigned char) try_inquiry_len;
+
+ memset(inq_result, 0, try_inquiry_len);
+
+ result = scsi_execute_req(sdev, scsi_cmd, DMA_FROM_DEVICE,
+ inq_result, try_inquiry_len, &sshdr,
+ HZ / 2 + HZ * scsi_inq_timeout, 3,
+ &resid);
+
+ SCSI_LOG_SCAN_BUS(3, sdev_printk(KERN_INFO, sdev,
+ "scsi scan: INQUIRY %s with code 0x%x\n",
+ result ? "failed" : "successful", result));
+
+ if (result) {
+ /*
+ * not-ready to ready transition [asc/ascq=0x28/0x0]
+ * or power-on, reset [asc/ascq=0x29/0x0], continue.
+ * INQUIRY should not yield UNIT_ATTENTION
+ * but many buggy devices do so anyway.
+ */
+ if ((driver_byte(result) & DRIVER_SENSE) &&
+ scsi_sense_valid(&sshdr)) {
+ if ((sshdr.sense_key == UNIT_ATTENTION) &&
+ ((sshdr.asc == 0x28) ||
+ (sshdr.asc == 0x29)) &&
+ (sshdr.ascq == 0))
+ continue;
+ }
+ } else {
+ /*
+ * if nothing was transferred, we try
+ * again. It's a workaround for some USB
+ * devices.
+ */
+ if (resid == try_inquiry_len)
+ continue;
+ }
+ break;
+ }
+
+ if (result == 0) {
+ sanitize_inquiry_string(&inq_result[8], 8);
+ sanitize_inquiry_string(&inq_result[16], 16);
+ sanitize_inquiry_string(&inq_result[32], 4);
+
+ response_len = inq_result[4] + 5;
+ if (response_len > 255)
+ response_len = first_inquiry_len; /* sanity */
+
+ /*
+ * Get any flags for this device.
+ *
+ * XXX add a bflags to scsi_device, and replace the
+ * corresponding bit fields in scsi_device, so bflags
+ * need not be passed as an argument.
+ */
+ *bflags = scsi_get_device_flags(sdev, &inq_result[8],
+ &inq_result[16]);
+
+ /* When the first pass succeeds we gain information about
+ * what larger transfer lengths might work. */
+ if (pass == 1) {
+ if (BLIST_INQUIRY_36 & *bflags)
+ next_inquiry_len = 36;
+ else if (BLIST_INQUIRY_58 & *bflags)
+ next_inquiry_len = 58;
+ else if (sdev->inquiry_len)
+ next_inquiry_len = sdev->inquiry_len;
+ else
+ next_inquiry_len = response_len;
+
+ /* If more data is available perform the second pass */
+ if (next_inquiry_len > try_inquiry_len) {
+ try_inquiry_len = next_inquiry_len;
+ pass = 2;
+ goto next_pass;
+ }
+ }
+
+ } else if (pass == 2) {
+ sdev_printk(KERN_INFO, sdev,
+ "scsi scan: %d byte inquiry failed. "
+ "Consider BLIST_INQUIRY_36 for this device\n",
+ try_inquiry_len);
+
+ /* If this pass failed, the third pass goes back and transfers
+ * the same amount as we successfully got in the first pass. */
+ try_inquiry_len = first_inquiry_len;
+ pass = 3;
+ goto next_pass;
+ }
+
+ /* If the last transfer attempt got an error, assume the
+ * peripheral doesn't exist or is dead. */
+ if (result)
+ return -EIO;
+
+ /* Don't report any more data than the device says is valid */
+ sdev->inquiry_len = min(try_inquiry_len, response_len);
+
+ /*
+ * XXX Abort if the response length is less than 36? If less than
+ * 32, the lookup of the device flags (above) could be invalid,
+ * and it would be possible to take an incorrect action - we do
+ * not want to hang because of a short INQUIRY. On the flip side,
+ * if the device is spun down or becoming ready (and so it gives a
+ * short INQUIRY), an abort here prevents any further use of the
+ * device, including spin up.
+ *
+ * On the whole, the best approach seems to be to assume the first
+ * 36 bytes are valid no matter what the device says. That's
+ * better than copying < 36 bytes to the inquiry-result buffer
+ * and displaying garbage for the Vendor, Product, or Revision
+ * strings.
+ */
+ if (sdev->inquiry_len < 36) {
+ sdev_printk(KERN_INFO, sdev,
+ "scsi scan: INQUIRY result too short (%d),"
+ " using 36\n", sdev->inquiry_len);
+ sdev->inquiry_len = 36;
+ }
+
+ /*
+ * Related to the above issue:
+ *
+ * XXX Devices (disk or all?) should be sent a TEST UNIT READY,
+ * and if not ready, sent a START_STOP to start (maybe spin up) and
+ * then send the INQUIRY again, since the INQUIRY can change after
+ * a device is initialized.
+ *
+ * Ideally, start a device if explicitly asked to do so. This
+ * assumes that a device is spun up on power on, spun down on
+ * request, and then spun up on request.
+ */
+
+ /*
+ * The scanning code needs to know the scsi_level, even if no
+ * device is attached at LUN 0 (SCSI_SCAN_TARGET_PRESENT) so
+ * non-zero LUNs can be scanned.
+ */
+ sdev->scsi_level = inq_result[2] & 0x07;
+ if (sdev->scsi_level >= 2 ||
+ (sdev->scsi_level == 1 && (inq_result[3] & 0x0f) == 1))
+ sdev->scsi_level++;
+ sdev->sdev_target->scsi_level = sdev->scsi_level;
+
+ /*
+ * If SCSI-2 or lower, and if the transport requires it,
+ * store the LUN value in CDB[1].
+ */
+ sdev->lun_in_cdb = 0;
+ if (sdev->scsi_level <= SCSI_2 &&
+ sdev->scsi_level != SCSI_UNKNOWN &&
+ !sdev->host->no_scsi2_lun_in_cdb)
+ sdev->lun_in_cdb = 1;
+
+ return 0;
+}
+
+/**
+ * scsi_add_lun - allocate and fully initialze a scsi_device
+ * @sdev: holds information to be stored in the new scsi_device
+ * @inq_result: holds the result of a previous INQUIRY to the LUN
+ * @bflags: black/white list flag
+ * @async: 1 if this device is being scanned asynchronously
+ *
+ * Description:
+ * Initialize the scsi_device @sdev. Optionally set fields based
+ * on values in *@bflags.
+ *
+ * Return:
+ * SCSI_SCAN_NO_RESPONSE: could not allocate or setup a scsi_device
+ * SCSI_SCAN_LUN_PRESENT: a new scsi_device was allocated and initialized
+ **/
+static int scsi_add_lun(struct scsi_device *sdev, unsigned char *inq_result,
+ int *bflags, int async)
+{
+ int ret;
+
+ /*
+ * XXX do not save the inquiry, since it can change underneath us,
+ * save just vendor/model/rev.
+ *
+ * Rather than save it and have an ioctl that retrieves the saved
+ * value, have an ioctl that executes the same INQUIRY code used
+ * in scsi_probe_lun, let user level programs doing INQUIRY
+ * scanning run at their own risk, or supply a user level program
+ * that can correctly scan.
+ */
+
+ /*
+ * Copy at least 36 bytes of INQUIRY data, so that we don't
+ * dereference unallocated memory when accessing the Vendor,
+ * Product, and Revision strings. Badly behaved devices may set
+ * the INQUIRY Additional Length byte to a small value, indicating
+ * these strings are invalid, but often they contain plausible data
+ * nonetheless. It doesn't matter if the device sent < 36 bytes
+ * total, since scsi_probe_lun() initializes inq_result with 0s.
+ */
+ sdev->inquiry = kmemdup(inq_result,
+ max_t(size_t, sdev->inquiry_len, 36),
+ GFP_ATOMIC);
+ if (sdev->inquiry == NULL)
+ return SCSI_SCAN_NO_RESPONSE;
+
+ sdev->vendor = (char *) (sdev->inquiry + 8);
+ sdev->model = (char *) (sdev->inquiry + 16);
+ sdev->rev = (char *) (sdev->inquiry + 32);
+
+ if (strncmp(sdev->vendor, "ATA ", 8) == 0) {
+ /*
+ * sata emulation layer device. This is a hack to work around
+ * the SATL power management specifications which state that
+ * when the SATL detects the device has gone into standby
+ * mode, it shall respond with NOT READY.
+ */
+ sdev->allow_restart = 1;
+ }
+
+ if (*bflags & BLIST_ISROM) {
+ sdev->type = TYPE_ROM;
+ sdev->removable = 1;
+ } else {
+ sdev->type = (inq_result[0] & 0x1f);
+ sdev->removable = (inq_result[1] & 0x80) >> 7;
+
+ /*
+ * some devices may respond with wrong type for
+ * well-known logical units. Force well-known type
+ * to enumerate them correctly.
+ */
+ if (scsi_is_wlun(sdev->lun) && sdev->type != TYPE_WLUN) {
+ sdev_printk(KERN_WARNING, sdev,
+ "%s: correcting incorrect peripheral device type 0x%x for W-LUN 0x%16xhN\n",
+ __func__, sdev->type, (unsigned int)sdev->lun);
+ sdev->type = TYPE_WLUN;
+ }
+
+ }
+
+ if (sdev->type == TYPE_RBC || sdev->type == TYPE_ROM) {
+ /* RBC and MMC devices can return SCSI-3 compliance and yet
+ * still not support REPORT LUNS, so make them act as
+ * BLIST_NOREPORTLUN unless BLIST_REPORTLUN2 is
+ * specifically set */
+ if ((*bflags & BLIST_REPORTLUN2) == 0)
+ *bflags |= BLIST_NOREPORTLUN;
+ }
+
+ /*
+ * For a peripheral qualifier (PQ) value of 1 (001b), the SCSI
+ * spec says: The device server is capable of supporting the
+ * specified peripheral device type on this logical unit. However,
+ * the physical device is not currently connected to this logical
+ * unit.
+ *
+ * The above is vague, as it implies that we could treat 001 and
+ * 011 the same. Stay compatible with previous code, and create a
+ * scsi_device for a PQ of 1
+ *
+ * Don't set the device offline here; rather let the upper
+ * level drivers eval the PQ to decide whether they should
+ * attach. So remove ((inq_result[0] >> 5) & 7) == 1 check.
+ */
+
+ sdev->inq_periph_qual = (inq_result[0] >> 5) & 7;
+ sdev->lockable = sdev->removable;
+ sdev->soft_reset = (inq_result[7] & 1) && ((inq_result[3] & 7) == 2);
+
+ if (sdev->scsi_level >= SCSI_3 ||
+ (sdev->inquiry_len > 56 && inq_result[56] & 0x04))
+ sdev->ppr = 1;
+ if (inq_result[7] & 0x60)
+ sdev->wdtr = 1;
+ if (inq_result[7] & 0x10)
+ sdev->sdtr = 1;
+
+ sdev_printk(KERN_NOTICE, sdev, "%s %.8s %.16s %.4s PQ: %d "
+ "ANSI: %d%s\n", scsi_device_type(sdev->type),
+ sdev->vendor, sdev->model, sdev->rev,
+ sdev->inq_periph_qual, inq_result[2] & 0x07,
+ (inq_result[3] & 0x0f) == 1 ? " CCS" : "");
+
+ if ((sdev->scsi_level >= SCSI_2) && (inq_result[7] & 2) &&
+ !(*bflags & BLIST_NOTQ)) {
+ sdev->tagged_supported = 1;
+ sdev->simple_tags = 1;
+ }
+
+ /*
+ * Some devices (Texel CD ROM drives) have handshaking problems
+ * when used with the Seagate controllers. borken is initialized
+ * to 1, and then set it to 0 here.
+ */
+ if ((*bflags & BLIST_BORKEN) == 0)
+ sdev->borken = 0;
+
+ if (*bflags & BLIST_NO_ULD_ATTACH)
+ sdev->no_uld_attach = 1;
+
+ /*
+ * Apparently some really broken devices (contrary to the SCSI
+ * standards) need to be selected without asserting ATN
+ */
+ if (*bflags & BLIST_SELECT_NO_ATN)
+ sdev->select_no_atn = 1;
+
+ /*
+ * Maximum 512 sector transfer length
+ * broken RA4x00 Compaq Disk Array
+ */
+ if (*bflags & BLIST_MAX_512)
+ blk_queue_max_hw_sectors(sdev->request_queue, 512);
+ /*
+ * Max 1024 sector transfer length for targets that report incorrect
+ * max/optimal lengths and relied on the old block layer safe default
+ */
+ else if (*bflags & BLIST_MAX_1024)
+ blk_queue_max_hw_sectors(sdev->request_queue, 1024);
+
+ /*
+ * Some devices may not want to have a start command automatically
+ * issued when a device is added.
+ */
+ if (*bflags & BLIST_NOSTARTONADD)
+ sdev->no_start_on_add = 1;
+
+ if (*bflags & BLIST_SINGLELUN)
+ scsi_target(sdev)->single_lun = 1;
+
+ sdev->use_10_for_rw = 1;
+
+ if (*bflags & BLIST_MS_SKIP_PAGE_08)
+ sdev->skip_ms_page_8 = 1;
+
+ if (*bflags & BLIST_MS_SKIP_PAGE_3F)
+ sdev->skip_ms_page_3f = 1;
+
+ if (*bflags & BLIST_USE_10_BYTE_MS)
+ sdev->use_10_for_ms = 1;
+
+ /* some devices don't like REPORT SUPPORTED OPERATION CODES
+ * and will simply timeout causing sd_mod init to take a very
+ * very long time */
+ if (*bflags & BLIST_NO_RSOC)
+ sdev->no_report_opcodes = 1;
+
+ /* set the device running here so that slave configure
+ * may do I/O */
+ ret = scsi_device_set_state(sdev, SDEV_RUNNING);
+ if (ret) {
+ ret = scsi_device_set_state(sdev, SDEV_BLOCK);
+
+ if (ret) {
+ sdev_printk(KERN_ERR, sdev,
+ "in wrong state %s to complete scan\n",
+ scsi_device_state_name(sdev->sdev_state));
+ return SCSI_SCAN_NO_RESPONSE;
+ }
+ }
+
+ if (*bflags & BLIST_MS_192_BYTES_FOR_3F)
+ sdev->use_192_bytes_for_3f = 1;
+
+ if (*bflags & BLIST_NOT_LOCKABLE)
+ sdev->lockable = 0;
+
+ if (*bflags & BLIST_RETRY_HWERROR)
+ sdev->retry_hwerror = 1;
+
+ if (*bflags & BLIST_NO_DIF)
+ sdev->no_dif = 1;
+
+ sdev->eh_timeout = SCSI_DEFAULT_EH_TIMEOUT;
+
+ if (*bflags & BLIST_TRY_VPD_PAGES)
+ sdev->try_vpd_pages = 1;
+ else if (*bflags & BLIST_SKIP_VPD_PAGES)
+ sdev->skip_vpd_pages = 1;
+
+ transport_configure_device(&sdev->sdev_gendev);
+
+ if (sdev->host->hostt->slave_configure) {
+ ret = sdev->host->hostt->slave_configure(sdev);
+ if (ret) {
+ /*
+ * if LLDD reports slave not present, don't clutter
+ * console with alloc failure messages
+ */
+ if (ret != -ENXIO) {
+ sdev_printk(KERN_ERR, sdev,
+ "failed to configure device\n");
+ }
+ return SCSI_SCAN_NO_RESPONSE;
+ }
+ }
+
+ if (sdev->scsi_level >= SCSI_3)
+ scsi_attach_vpd(sdev);
+
+ sdev->max_queue_depth = sdev->queue_depth;
+
+ /*
+ * Ok, the device is now all set up, we can
+ * register it and tell the rest of the kernel
+ * about it.
+ */
+ if (!async && scsi_sysfs_add_sdev(sdev) != 0)
+ return SCSI_SCAN_NO_RESPONSE;
+
+ return SCSI_SCAN_LUN_PRESENT;
+}
+
+#ifdef CONFIG_SCSI_LOGGING
+/**
+ * scsi_inq_str - print INQUIRY data from min to max index, strip trailing whitespace
+ * @buf: Output buffer with at least end-first+1 bytes of space
+ * @inq: Inquiry buffer (input)
+ * @first: Offset of string into inq
+ * @end: Index after last character in inq
+ */
+static unsigned char *scsi_inq_str(unsigned char *buf, unsigned char *inq,
+ unsigned first, unsigned end)
+{
+ unsigned term = 0, idx;
+
+ for (idx = 0; idx + first < end && idx + first < inq[4] + 5; idx++) {
+ if (inq[idx+first] > ' ') {
+ buf[idx] = inq[idx+first];
+ term = idx+1;
+ } else {
+ buf[idx] = ' ';
+ }
+ }
+ buf[term] = 0;
+ return buf;
+}
+#endif
+
+/**
+ * scsi_probe_and_add_lun - probe a LUN, if a LUN is found add it
+ * @starget: pointer to target device structure
+ * @lun: LUN of target device
+ * @bflagsp: store bflags here if not NULL
+ * @sdevp: probe the LUN corresponding to this scsi_device
+ * @rescan: if nonzero skip some code only needed on first scan
+ * @hostdata: passed to scsi_alloc_sdev()
+ *
+ * Description:
+ * Call scsi_probe_lun, if a LUN with an attached device is found,
+ * allocate and set it up by calling scsi_add_lun.
+ *
+ * Return:
+ * SCSI_SCAN_NO_RESPONSE: could not allocate or setup a scsi_device
+ * SCSI_SCAN_TARGET_PRESENT: target responded, but no device is
+ * attached at the LUN
+ * SCSI_SCAN_LUN_PRESENT: a new scsi_device was allocated and initialized
+ **/
+static int scsi_probe_and_add_lun(struct scsi_target *starget,
+ u64 lun, int *bflagsp,
+ struct scsi_device **sdevp, int rescan,
+ void *hostdata)
+{
+ struct scsi_device *sdev;
+ unsigned char *result;
+ int bflags, res = SCSI_SCAN_NO_RESPONSE, result_len = 256;
+ struct Scsi_Host *shost = dev_to_shost(starget->dev.parent);
+
+ /*
+ * The rescan flag is used as an optimization, the first scan of a
+ * host adapter calls into here with rescan == 0.
+ */
+ sdev = scsi_device_lookup_by_target(starget, lun);
+ if (sdev) {
+ if (rescan || !scsi_device_created(sdev)) {
+ SCSI_LOG_SCAN_BUS(3, sdev_printk(KERN_INFO, sdev,
+ "scsi scan: device exists on %s\n",
+ dev_name(&sdev->sdev_gendev)));
+ if (sdevp)
+ *sdevp = sdev;
+ else
+ scsi_device_put(sdev);
+
+ if (bflagsp)
+ *bflagsp = scsi_get_device_flags(sdev,
+ sdev->vendor,
+ sdev->model);
+ return SCSI_SCAN_LUN_PRESENT;
+ }
+ scsi_device_put(sdev);
+ } else
+ sdev = scsi_alloc_sdev(starget, lun, hostdata);
+ if (!sdev)
+ goto out;
+
+ result = kmalloc(result_len, GFP_ATOMIC |
+ ((shost->unchecked_isa_dma) ? __GFP_DMA : 0));
+ if (!result)
+ goto out_free_sdev;
+
+ if (scsi_probe_lun(sdev, result, result_len, &bflags))
+ goto out_free_result;
+
+ if (bflagsp)
+ *bflagsp = bflags;
+ /*
+ * result contains valid SCSI INQUIRY data.
+ */
+ if (((result[0] >> 5) == 3) && !(bflags & BLIST_ATTACH_PQ3)) {
+ /*
+ * For a Peripheral qualifier 3 (011b), the SCSI
+ * spec says: The device server is not capable of
+ * supporting a physical device on this logical
+ * unit.
+ *
+ * For disks, this implies that there is no
+ * logical disk configured at sdev->lun, but there
+ * is a target id responding.
+ */
+ SCSI_LOG_SCAN_BUS(2, sdev_printk(KERN_INFO, sdev, "scsi scan:"
+ " peripheral qualifier of 3, device not"
+ " added\n"))
+ if (lun == 0) {
+ SCSI_LOG_SCAN_BUS(1, {
+ unsigned char vend[9];
+ unsigned char mod[17];
+
+ sdev_printk(KERN_INFO, sdev,
+ "scsi scan: consider passing scsi_mod."
+ "dev_flags=%s:%s:0x240 or 0x1000240\n",
+ scsi_inq_str(vend, result, 8, 16),
+ scsi_inq_str(mod, result, 16, 32));
+ });
+
+ }
+
+ res = SCSI_SCAN_TARGET_PRESENT;
+ goto out_free_result;
+ }
+
+ /*
+ * Some targets may set slight variations of PQ and PDT to signal
+ * that no LUN is present, so don't add sdev in these cases.
+ * Two specific examples are:
+ * 1) NetApp targets: return PQ=1, PDT=0x1f
+ * 2) USB UFI: returns PDT=0x1f, with the PQ bits being "reserved"
+ * in the UFI 1.0 spec (we cannot rely on reserved bits).
+ *
+ * References:
+ * 1) SCSI SPC-3, pp. 145-146
+ * PQ=1: "A peripheral device having the specified peripheral
+ * device type is not connected to this logical unit. However, the
+ * device server is capable of supporting the specified peripheral
+ * device type on this logical unit."
+ * PDT=0x1f: "Unknown or no device type"
+ * 2) USB UFI 1.0, p. 20
+ * PDT=00h Direct-access device (floppy)
+ * PDT=1Fh none (no FDD connected to the requested logical unit)
+ */
+ if (((result[0] >> 5) == 1 || starget->pdt_1f_for_no_lun) &&
+ (result[0] & 0x1f) == 0x1f &&
+ !scsi_is_wlun(lun)) {
+ SCSI_LOG_SCAN_BUS(3, sdev_printk(KERN_INFO, sdev,
+ "scsi scan: peripheral device type"
+ " of 31, no device added\n"));
+ res = SCSI_SCAN_TARGET_PRESENT;
+ goto out_free_result;
+ }
+
+ res = scsi_add_lun(sdev, result, &bflags, shost->async_scan);
+ if (res == SCSI_SCAN_LUN_PRESENT) {
+ if (bflags & BLIST_KEY) {
+ sdev->lockable = 0;
+ scsi_unlock_floptical(sdev, result);
+ }
+ }
+
+ out_free_result:
+ kfree(result);
+ out_free_sdev:
+ if (res == SCSI_SCAN_LUN_PRESENT) {
+ if (sdevp) {
+ if (scsi_device_get(sdev) == 0) {
+ *sdevp = sdev;
+ } else {
+ __scsi_remove_device(sdev);
+ res = SCSI_SCAN_NO_RESPONSE;
+ }
+ }
+ } else
+ __scsi_remove_device(sdev);
+ out:
+ return res;
+}
+
+/**
+ * scsi_sequential_lun_scan - sequentially scan a SCSI target
+ * @starget: pointer to target structure to scan
+ * @bflags: black/white list flag for LUN 0
+ * @scsi_level: Which version of the standard does this device adhere to
+ * @rescan: passed to scsi_probe_add_lun()
+ *
+ * Description:
+ * Generally, scan from LUN 1 (LUN 0 is assumed to already have been
+ * scanned) to some maximum lun until a LUN is found with no device
+ * attached. Use the bflags to figure out any oddities.
+ *
+ * Modifies sdevscan->lun.
+ **/
+static void scsi_sequential_lun_scan(struct scsi_target *starget,
+ int bflags, int scsi_level, int rescan)
+{
+ uint max_dev_lun;
+ u64 sparse_lun, lun;
+ struct Scsi_Host *shost = dev_to_shost(starget->dev.parent);
+
+ SCSI_LOG_SCAN_BUS(3, starget_printk(KERN_INFO, starget,
+ "scsi scan: Sequential scan\n"));
+
+ max_dev_lun = min(max_scsi_luns, shost->max_lun);
+ /*
+ * If this device is known to support sparse multiple units,
+ * override the other settings, and scan all of them. Normally,
+ * SCSI-3 devices should be scanned via the REPORT LUNS.
+ */
+ if (bflags & BLIST_SPARSELUN) {
+ max_dev_lun = shost->max_lun;
+ sparse_lun = 1;
+ } else
+ sparse_lun = 0;
+
+ /*
+ * If less than SCSI_1_CCS, and no special lun scanning, stop
+ * scanning; this matches 2.4 behaviour, but could just be a bug
+ * (to continue scanning a SCSI_1_CCS device).
+ *
+ * This test is broken. We might not have any device on lun0 for
+ * a sparselun device, and if that's the case then how would we
+ * know the real scsi_level, eh? It might make sense to just not
+ * scan any SCSI_1 device for non-0 luns, but that check would best
+ * go into scsi_alloc_sdev() and just have it return null when asked
+ * to alloc an sdev for lun > 0 on an already found SCSI_1 device.
+ *
+ if ((sdevscan->scsi_level < SCSI_1_CCS) &&
+ ((bflags & (BLIST_FORCELUN | BLIST_SPARSELUN | BLIST_MAX5LUN))
+ == 0))
+ return;
+ */
+ /*
+ * If this device is known to support multiple units, override
+ * the other settings, and scan all of them.
+ */
+ if (bflags & BLIST_FORCELUN)
+ max_dev_lun = shost->max_lun;
+ /*
+ * REGAL CDC-4X: avoid hang after LUN 4
+ */
+ if (bflags & BLIST_MAX5LUN)
+ max_dev_lun = min(5U, max_dev_lun);
+ /*
+ * Do not scan SCSI-2 or lower device past LUN 7, unless
+ * BLIST_LARGELUN.
+ */
+ if (scsi_level < SCSI_3 && !(bflags & BLIST_LARGELUN))
+ max_dev_lun = min(8U, max_dev_lun);
+
+ /*
+ * Stop scanning at 255 unless BLIST_SCSI3LUN
+ */
+ if (!(bflags & BLIST_SCSI3LUN))
+ max_dev_lun = min(256U, max_dev_lun);
+
+ /*
+ * We have already scanned LUN 0, so start at LUN 1. Keep scanning
+ * until we reach the max, or no LUN is found and we are not
+ * sparse_lun.
+ */
+ for (lun = 1; lun < max_dev_lun; ++lun)
+ if ((scsi_probe_and_add_lun(starget, lun, NULL, NULL, rescan,
+ NULL) != SCSI_SCAN_LUN_PRESENT) &&
+ !sparse_lun)
+ return;
+}
+
+/**
+ * scsilun_to_int - convert a scsi_lun to an int
+ * @scsilun: struct scsi_lun to be converted.
+ *
+ * Description:
+ * Convert @scsilun from a struct scsi_lun to a four byte host byte-ordered
+ * integer, and return the result. The caller must check for
+ * truncation before using this function.
+ *
+ * Notes:
+ * For a description of the LUN format, post SCSI-3 see the SCSI
+ * Architecture Model, for SCSI-3 see the SCSI Controller Commands.
+ *
+ * Given a struct scsi_lun of: d2 04 0b 03 00 00 00 00, this function
+ * returns the integer: 0x0b03d204
+ *
+ * This encoding will return a standard integer LUN for LUNs smaller
+ * than 256, which typically use a single level LUN structure with
+ * addressing method 0.
+ **/
+u64 scsilun_to_int(struct scsi_lun *scsilun)
+{
+ int i;
+ u64 lun;
+
+ lun = 0;
+ for (i = 0; i < sizeof(lun); i += 2)
+ lun = lun | (((u64)scsilun->scsi_lun[i] << ((i + 1) * 8)) |
+ ((u64)scsilun->scsi_lun[i + 1] << (i * 8)));
+ return lun;
+}
+EXPORT_SYMBOL(scsilun_to_int);
+
+/**
+ * int_to_scsilun - reverts an int into a scsi_lun
+ * @lun: integer to be reverted
+ * @scsilun: struct scsi_lun to be set.
+ *
+ * Description:
+ * Reverts the functionality of the scsilun_to_int, which packed
+ * an 8-byte lun value into an int. This routine unpacks the int
+ * back into the lun value.
+ *
+ * Notes:
+ * Given an integer : 0x0b03d204, this function returns a
+ * struct scsi_lun of: d2 04 0b 03 00 00 00 00
+ *
+ **/
+void int_to_scsilun(u64 lun, struct scsi_lun *scsilun)
+{
+ int i;
+
+ memset(scsilun->scsi_lun, 0, sizeof(scsilun->scsi_lun));
+
+ for (i = 0; i < sizeof(lun); i += 2) {
+ scsilun->scsi_lun[i] = (lun >> 8) & 0xFF;
+ scsilun->scsi_lun[i+1] = lun & 0xFF;
+ lun = lun >> 16;
+ }
+}
+EXPORT_SYMBOL(int_to_scsilun);
+
+/**
+ * scsi_report_lun_scan - Scan using SCSI REPORT LUN results
+ * @starget: which target
+ * @bflags: Zero or a mix of BLIST_NOLUN, BLIST_REPORTLUN2, or BLIST_NOREPORTLUN
+ * @rescan: nonzero if we can skip code only needed on first scan
+ *
+ * Description:
+ * Fast scanning for modern (SCSI-3) devices by sending a REPORT LUN command.
+ * Scan the resulting list of LUNs by calling scsi_probe_and_add_lun.
+ *
+ * If BLINK_REPORTLUN2 is set, scan a target that supports more than 8
+ * LUNs even if it's older than SCSI-3.
+ * If BLIST_NOREPORTLUN is set, return 1 always.
+ * If BLIST_NOLUN is set, return 0 always.
+ * If starget->no_report_luns is set, return 1 always.
+ *
+ * Return:
+ * 0: scan completed (or no memory, so further scanning is futile)
+ * 1: could not scan with REPORT LUN
+ **/
+static int scsi_report_lun_scan(struct scsi_target *starget, int bflags,
+ int rescan)
+{
+ char devname[64];
+ unsigned char scsi_cmd[MAX_COMMAND_SIZE];
+ unsigned int length;
+ u64 lun;
+ unsigned int num_luns;
+ unsigned int retries;
+ int result;
+ struct scsi_lun *lunp, *lun_data;
+ struct scsi_sense_hdr sshdr;
+ struct scsi_device *sdev;
+ struct Scsi_Host *shost = dev_to_shost(&starget->dev);
+ int ret = 0;
+
+ /*
+ * Only support SCSI-3 and up devices if BLIST_NOREPORTLUN is not set.
+ * Also allow SCSI-2 if BLIST_REPORTLUN2 is set and host adapter does
+ * support more than 8 LUNs.
+ * Don't attempt if the target doesn't support REPORT LUNS.
+ */
+ if (bflags & BLIST_NOREPORTLUN)
+ return 1;
+ if (starget->scsi_level < SCSI_2 &&
+ starget->scsi_level != SCSI_UNKNOWN)
+ return 1;
+ if (starget->scsi_level < SCSI_3 &&
+ (!(bflags & BLIST_REPORTLUN2) || shost->max_lun <= 8))
+ return 1;
+ if (bflags & BLIST_NOLUN)
+ return 0;
+ if (starget->no_report_luns)
+ return 1;
+
+ if (!(sdev = scsi_device_lookup_by_target(starget, 0))) {
+ sdev = scsi_alloc_sdev(starget, 0, NULL);
+ if (!sdev)
+ return 0;
+ if (scsi_device_get(sdev)) {
+ __scsi_remove_device(sdev);
+ return 0;
+ }
+ }
+
+ sprintf(devname, "host %d channel %d id %d",
+ shost->host_no, sdev->channel, sdev->id);
+
+ /*
+ * Allocate enough to hold the header (the same size as one scsi_lun)
+ * plus the number of luns we are requesting. 511 was the default
+ * value of the now removed max_report_luns parameter.
+ */
+ length = (511 + 1) * sizeof(struct scsi_lun);
+retry:
+ lun_data = kmalloc(length, GFP_KERNEL |
+ (sdev->host->unchecked_isa_dma ? __GFP_DMA : 0));
+ if (!lun_data) {
+ printk(ALLOC_FAILURE_MSG, __func__);
+ goto out;
+ }
+
+ scsi_cmd[0] = REPORT_LUNS;
+
+ /*
+ * bytes 1 - 5: reserved, set to zero.
+ */
+ memset(&scsi_cmd[1], 0, 5);
+
+ /*
+ * bytes 6 - 9: length of the command.
+ */
+ put_unaligned_be32(length, &scsi_cmd[6]);
+
+ scsi_cmd[10] = 0; /* reserved */
+ scsi_cmd[11] = 0; /* control */
+
+ /*
+ * We can get a UNIT ATTENTION, for example a power on/reset, so
+ * retry a few times (like sd.c does for TEST UNIT READY).
+ * Experience shows some combinations of adapter/devices get at
+ * least two power on/resets.
+ *
+ * Illegal requests (for devices that do not support REPORT LUNS)
+ * should come through as a check condition, and will not generate
+ * a retry.
+ */
+ for (retries = 0; retries < 3; retries++) {
+ SCSI_LOG_SCAN_BUS(3, sdev_printk (KERN_INFO, sdev,
+ "scsi scan: Sending REPORT LUNS to (try %d)\n",
+ retries));
+
+ result = scsi_execute_req(sdev, scsi_cmd, DMA_FROM_DEVICE,
+ lun_data, length, &sshdr,
+ SCSI_TIMEOUT + 4 * HZ, 3, NULL);
+
+ SCSI_LOG_SCAN_BUS(3, sdev_printk (KERN_INFO, sdev,
+ "scsi scan: REPORT LUNS"
+ " %s (try %d) result 0x%x\n",
+ result ? "failed" : "successful",
+ retries, result));
+ if (result == 0)
+ break;
+ else if (scsi_sense_valid(&sshdr)) {
+ if (sshdr.sense_key != UNIT_ATTENTION)
+ break;
+ }
+ }
+
+ if (result) {
+ /*
+ * The device probably does not support a REPORT LUN command
+ */
+ ret = 1;
+ goto out_err;
+ }
+
+ /*
+ * Get the length from the first four bytes of lun_data.
+ */
+ if (get_unaligned_be32(lun_data->scsi_lun) +
+ sizeof(struct scsi_lun) > length) {
+ length = get_unaligned_be32(lun_data->scsi_lun) +
+ sizeof(struct scsi_lun);
+ kfree(lun_data);
+ goto retry;
+ }
+ length = get_unaligned_be32(lun_data->scsi_lun);
+
+ num_luns = (length / sizeof(struct scsi_lun));
+
+ SCSI_LOG_SCAN_BUS(3, sdev_printk (KERN_INFO, sdev,
+ "scsi scan: REPORT LUN scan\n"));
+
+ /*
+ * Scan the luns in lun_data. The entry at offset 0 is really
+ * the header, so start at 1 and go up to and including num_luns.
+ */
+ for (lunp = &lun_data[1]; lunp <= &lun_data[num_luns]; lunp++) {
+ lun = scsilun_to_int(lunp);
+
+ if (lun > sdev->host->max_lun) {
+ sdev_printk(KERN_WARNING, sdev,
+ "lun%llu has a LUN larger than"
+ " allowed by the host adapter\n", lun);
+ } else {
+ int res;
+
+ res = scsi_probe_and_add_lun(starget,
+ lun, NULL, NULL, rescan, NULL);
+ if (res == SCSI_SCAN_NO_RESPONSE) {
+ /*
+ * Got some results, but now none, abort.
+ */
+ sdev_printk(KERN_ERR, sdev,
+ "Unexpected response"
+ " from lun %llu while scanning, scan"
+ " aborted\n", (unsigned long long)lun);
+ break;
+ }
+ }
+ }
+
+ out_err:
+ kfree(lun_data);
+ out:
+ scsi_device_put(sdev);
+ if (scsi_device_created(sdev))
+ /*
+ * the sdev we used didn't appear in the report luns scan
+ */
+ __scsi_remove_device(sdev);
+ return ret;
+}
+
+struct scsi_device *__scsi_add_device(struct Scsi_Host *shost, uint channel,
+ uint id, u64 lun, void *hostdata)
+{
+ struct scsi_device *sdev = ERR_PTR(-ENODEV);
+ struct device *parent = &shost->shost_gendev;
+ struct scsi_target *starget;
+
+ if (strncmp(scsi_scan_type, "none", 4) == 0)
+ return ERR_PTR(-ENODEV);
+
+ starget = scsi_alloc_target(parent, channel, id);
+ if (!starget)
+ return ERR_PTR(-ENOMEM);
+ scsi_autopm_get_target(starget);
+
+ mutex_lock(&shost->scan_mutex);
+ if (!shost->async_scan)
+ scsi_complete_async_scans();
+
+ if (scsi_host_scan_allowed(shost) && scsi_autopm_get_host(shost) == 0) {
+ scsi_probe_and_add_lun(starget, lun, NULL, &sdev, 1, hostdata);
+ scsi_autopm_put_host(shost);
+ }
+ mutex_unlock(&shost->scan_mutex);
+ scsi_autopm_put_target(starget);
+ /*
+ * paired with scsi_alloc_target(). Target will be destroyed unless
+ * scsi_probe_and_add_lun made an underlying device visible
+ */
+ scsi_target_reap(starget);
+ put_device(&starget->dev);
+
+ return sdev;
+}
+EXPORT_SYMBOL(__scsi_add_device);
+
+int scsi_add_device(struct Scsi_Host *host, uint channel,
+ uint target, u64 lun)
+{
+ struct scsi_device *sdev =
+ __scsi_add_device(host, channel, target, lun, NULL);
+ if (IS_ERR(sdev))
+ return PTR_ERR(sdev);
+
+ scsi_device_put(sdev);
+ return 0;
+}
+EXPORT_SYMBOL(scsi_add_device);
+
+void scsi_rescan_device(struct device *dev)
+{
+ device_lock(dev);
+ if (dev->driver && try_module_get(dev->driver->owner)) {
+ struct scsi_driver *drv = to_scsi_driver(dev->driver);
+
+ if (drv->rescan)
+ drv->rescan(dev);
+ module_put(dev->driver->owner);
+ }
+ device_unlock(dev);
+}
+EXPORT_SYMBOL(scsi_rescan_device);
+
+static void __scsi_scan_target(struct device *parent, unsigned int channel,
+ unsigned int id, u64 lun, int rescan)
+{
+ struct Scsi_Host *shost = dev_to_shost(parent);
+ int bflags = 0;
+ int res;
+ struct scsi_target *starget;
+
+ if (shost->this_id == id)
+ /*
+ * Don't scan the host adapter
+ */
+ return;
+
+ starget = scsi_alloc_target(parent, channel, id);
+ if (!starget)
+ return;
+ scsi_autopm_get_target(starget);
+
+ if (lun != SCAN_WILD_CARD) {
+ /*
+ * Scan for a specific host/chan/id/lun.
+ */
+ scsi_probe_and_add_lun(starget, lun, NULL, NULL, rescan, NULL);
+ goto out_reap;
+ }
+
+ /*
+ * Scan LUN 0, if there is some response, scan further. Ideally, we
+ * would not configure LUN 0 until all LUNs are scanned.
+ */
+ res = scsi_probe_and_add_lun(starget, 0, &bflags, NULL, rescan, NULL);
+ if (res == SCSI_SCAN_LUN_PRESENT || res == SCSI_SCAN_TARGET_PRESENT) {
+ if (scsi_report_lun_scan(starget, bflags, rescan) != 0)
+ /*
+ * The REPORT LUN did not scan the target,
+ * do a sequential scan.
+ */
+ scsi_sequential_lun_scan(starget, bflags,
+ starget->scsi_level, rescan);
+ }
+
+ out_reap:
+ scsi_autopm_put_target(starget);
+ /*
+ * paired with scsi_alloc_target(): determine if the target has
+ * any children at all and if not, nuke it
+ */
+ scsi_target_reap(starget);
+
+ put_device(&starget->dev);
+}
+
+/**
+ * scsi_scan_target - scan a target id, possibly including all LUNs on the target.
+ * @parent: host to scan
+ * @channel: channel to scan
+ * @id: target id to scan
+ * @lun: Specific LUN to scan or SCAN_WILD_CARD
+ * @rescan: passed to LUN scanning routines
+ *
+ * Description:
+ * Scan the target id on @parent, @channel, and @id. Scan at least LUN 0,
+ * and possibly all LUNs on the target id.
+ *
+ * First try a REPORT LUN scan, if that does not scan the target, do a
+ * sequential scan of LUNs on the target id.
+ **/
+void scsi_scan_target(struct device *parent, unsigned int channel,
+ unsigned int id, u64 lun, int rescan)
+{
+ struct Scsi_Host *shost = dev_to_shost(parent);
+
+ if (strncmp(scsi_scan_type, "none", 4) == 0)
+ return;
+
+ mutex_lock(&shost->scan_mutex);
+ if (!shost->async_scan)
+ scsi_complete_async_scans();
+
+ if (scsi_host_scan_allowed(shost) && scsi_autopm_get_host(shost) == 0) {
+ __scsi_scan_target(parent, channel, id, lun, rescan);
+ scsi_autopm_put_host(shost);
+ }
+ mutex_unlock(&shost->scan_mutex);
+}
+EXPORT_SYMBOL(scsi_scan_target);
+
+static void scsi_scan_channel(struct Scsi_Host *shost, unsigned int channel,
+ unsigned int id, u64 lun, int rescan)
+{
+ uint order_id;
+
+ if (id == SCAN_WILD_CARD)
+ for (id = 0; id < shost->max_id; ++id) {
+ /*
+ * XXX adapter drivers when possible (FCP, iSCSI)
+ * could modify max_id to match the current max,
+ * not the absolute max.
+ *
+ * XXX add a shost id iterator, so for example,
+ * the FC ID can be the same as a target id
+ * without a huge overhead of sparse id's.
+ */
+ if (shost->reverse_ordering)
+ /*
+ * Scan from high to low id.
+ */
+ order_id = shost->max_id - id - 1;
+ else
+ order_id = id;
+ __scsi_scan_target(&shost->shost_gendev, channel,
+ order_id, lun, rescan);
+ }
+ else
+ __scsi_scan_target(&shost->shost_gendev, channel,
+ id, lun, rescan);
+}
+
+int scsi_scan_host_selected(struct Scsi_Host *shost, unsigned int channel,
+ unsigned int id, u64 lun, int rescan)
+{
+ SCSI_LOG_SCAN_BUS(3, shost_printk (KERN_INFO, shost,
+ "%s: <%u:%u:%llu>\n",
+ __func__, channel, id, lun));
+
+ if (((channel != SCAN_WILD_CARD) && (channel > shost->max_channel)) ||
+ ((id != SCAN_WILD_CARD) && (id >= shost->max_id)) ||
+ ((lun != SCAN_WILD_CARD) && (lun >= shost->max_lun)))
+ return -EINVAL;
+
+ mutex_lock(&shost->scan_mutex);
+ if (!shost->async_scan)
+ scsi_complete_async_scans();
+
+ if (scsi_host_scan_allowed(shost) && scsi_autopm_get_host(shost) == 0) {
+ if (channel == SCAN_WILD_CARD)
+ for (channel = 0; channel <= shost->max_channel;
+ channel++)
+ scsi_scan_channel(shost, channel, id, lun,
+ rescan);
+ else
+ scsi_scan_channel(shost, channel, id, lun, rescan);
+ scsi_autopm_put_host(shost);
+ }
+ mutex_unlock(&shost->scan_mutex);
+
+ return 0;
+}
+
+static void scsi_sysfs_add_devices(struct Scsi_Host *shost)
+{
+ struct scsi_device *sdev;
+ shost_for_each_device(sdev, shost) {
+ /* target removed before the device could be added */
+ if (sdev->sdev_state == SDEV_DEL)
+ continue;
+ /* If device is already visible, skip adding it to sysfs */
+ if (sdev->is_visible)
+ continue;
+ if (!scsi_host_scan_allowed(shost) ||
+ scsi_sysfs_add_sdev(sdev) != 0)
+ __scsi_remove_device(sdev);
+ }
+}
+
+/**
+ * scsi_prep_async_scan - prepare for an async scan
+ * @shost: the host which will be scanned
+ * Returns: a cookie to be passed to scsi_finish_async_scan()
+ *
+ * Tells the midlayer this host is going to do an asynchronous scan.
+ * It reserves the host's position in the scanning list and ensures
+ * that other asynchronous scans started after this one won't affect the
+ * ordering of the discovered devices.
+ */
+static struct async_scan_data *scsi_prep_async_scan(struct Scsi_Host *shost)
+{
+ struct async_scan_data *data;
+ unsigned long flags;
+
+ if (strncmp(scsi_scan_type, "sync", 4) == 0)
+ return NULL;
+
+ if (shost->async_scan) {
+ shost_printk(KERN_INFO, shost, "%s called twice\n", __func__);
+ dump_stack();
+ return NULL;
+ }
+
+ data = kmalloc(sizeof(*data), GFP_KERNEL);
+ if (!data)
+ goto err;
+ data->shost = scsi_host_get(shost);
+ if (!data->shost)
+ goto err;
+ init_completion(&data->prev_finished);
+
+ mutex_lock(&shost->scan_mutex);
+ spin_lock_irqsave(shost->host_lock, flags);
+ shost->async_scan = 1;
+ spin_unlock_irqrestore(shost->host_lock, flags);
+ mutex_unlock(&shost->scan_mutex);
+
+ spin_lock(&async_scan_lock);
+ if (list_empty(&scanning_hosts))
+ complete(&data->prev_finished);
+ list_add_tail(&data->list, &scanning_hosts);
+ spin_unlock(&async_scan_lock);
+
+ return data;
+
+ err:
+ kfree(data);
+ return NULL;
+}
+
+/**
+ * scsi_finish_async_scan - asynchronous scan has finished
+ * @data: cookie returned from earlier call to scsi_prep_async_scan()
+ *
+ * All the devices currently attached to this host have been found.
+ * This function announces all the devices it has found to the rest
+ * of the system.
+ */
+static void scsi_finish_async_scan(struct async_scan_data *data)
+{
+ struct Scsi_Host *shost;
+ unsigned long flags;
+
+ if (!data)
+ return;
+
+ shost = data->shost;
+
+ mutex_lock(&shost->scan_mutex);
+
+ if (!shost->async_scan) {
+ shost_printk(KERN_INFO, shost, "%s called twice\n", __func__);
+ dump_stack();
+ mutex_unlock(&shost->scan_mutex);
+ return;
+ }
+
+ wait_for_completion(&data->prev_finished);
+
+ scsi_sysfs_add_devices(shost);
+
+ spin_lock_irqsave(shost->host_lock, flags);
+ shost->async_scan = 0;
+ spin_unlock_irqrestore(shost->host_lock, flags);
+
+ mutex_unlock(&shost->scan_mutex);
+
+ spin_lock(&async_scan_lock);
+ list_del(&data->list);
+ if (!list_empty(&scanning_hosts)) {
+ struct async_scan_data *next = list_entry(scanning_hosts.next,
+ struct async_scan_data, list);
+ complete(&next->prev_finished);
+ }
+ spin_unlock(&async_scan_lock);
+
+ scsi_autopm_put_host(shost);
+ scsi_host_put(shost);
+ kfree(data);
+}
+
+static void do_scsi_scan_host(struct Scsi_Host *shost)
+{
+ if (shost->hostt->scan_finished) {
+ unsigned long start = jiffies;
+ if (shost->hostt->scan_start)
+ shost->hostt->scan_start(shost);
+
+ while (!shost->hostt->scan_finished(shost, jiffies - start))
+ msleep(10);
+ } else {
+ scsi_scan_host_selected(shost, SCAN_WILD_CARD, SCAN_WILD_CARD,
+ SCAN_WILD_CARD, 0);
+ }
+}
+
+static void do_scan_async(void *_data, async_cookie_t c)
+{
+ struct async_scan_data *data = _data;
+ struct Scsi_Host *shost = data->shost;
+
+ do_scsi_scan_host(shost);
+ scsi_finish_async_scan(data);
+}
+
+/**
+ * scsi_scan_host - scan the given adapter
+ * @shost: adapter to scan
+ **/
+void scsi_scan_host(struct Scsi_Host *shost)
+{
+ struct async_scan_data *data;
+
+ if (strncmp(scsi_scan_type, "none", 4) == 0)
+ return;
+ if (scsi_autopm_get_host(shost) < 0)
+ return;
+
+ data = scsi_prep_async_scan(shost);
+ if (!data) {
+ do_scsi_scan_host(shost);
+ scsi_autopm_put_host(shost);
+ return;
+ }
+
+ /* register with the async subsystem so wait_for_device_probe()
+ * will flush this work
+ */
+ async_schedule(do_scan_async, data);
+
+ /* scsi_autopm_put_host(shost) is called in scsi_finish_async_scan() */
+}
+EXPORT_SYMBOL(scsi_scan_host);
+
+void scsi_forget_host(struct Scsi_Host *shost)
+{
+ struct scsi_device *sdev;
+ unsigned long flags;
+
+ restart:
+ spin_lock_irqsave(shost->host_lock, flags);
+ list_for_each_entry(sdev, &shost->__devices, siblings) {
+ if (sdev->sdev_state == SDEV_DEL)
+ continue;
+ spin_unlock_irqrestore(shost->host_lock, flags);
+ __scsi_remove_device(sdev);
+ goto restart;
+ }
+ spin_unlock_irqrestore(shost->host_lock, flags);
+}
+
+/**
+ * scsi_get_host_dev - Create a scsi_device that points to the host adapter itself
+ * @shost: Host that needs a scsi_device
+ *
+ * Lock status: None assumed.
+ *
+ * Returns: The scsi_device or NULL
+ *
+ * Notes:
+ * Attach a single scsi_device to the Scsi_Host - this should
+ * be made to look like a "pseudo-device" that points to the
+ * HA itself.
+ *
+ * Note - this device is not accessible from any high-level
+ * drivers (including generics), which is probably not
+ * optimal. We can add hooks later to attach.
+ */
+struct scsi_device *scsi_get_host_dev(struct Scsi_Host *shost)
+{
+ struct scsi_device *sdev = NULL;
+ struct scsi_target *starget;
+
+ mutex_lock(&shost->scan_mutex);
+ if (!scsi_host_scan_allowed(shost))
+ goto out;
+ starget = scsi_alloc_target(&shost->shost_gendev, 0, shost->this_id);
+ if (!starget)
+ goto out;
+
+ sdev = scsi_alloc_sdev(starget, 0, NULL);
+ if (sdev)
+ sdev->borken = 0;
+ else
+ scsi_target_reap(starget);
+ put_device(&starget->dev);
+ out:
+ mutex_unlock(&shost->scan_mutex);
+ return sdev;
+}
+EXPORT_SYMBOL(scsi_get_host_dev);
+
+/**
+ * scsi_free_host_dev - Free a scsi_device that points to the host adapter itself
+ * @sdev: Host device to be freed
+ *
+ * Lock status: None assumed.
+ *
+ * Returns: Nothing
+ */
+void scsi_free_host_dev(struct scsi_device *sdev)
+{
+ BUG_ON(sdev->id != sdev->host->this_id);
+
+ __scsi_remove_device(sdev);
+}
+EXPORT_SYMBOL(scsi_free_host_dev);
+
diff --git a/drivers/scsi/scsi_sysctl.c b/drivers/scsi/scsi_sysctl.c
new file mode 100644
index 000000000..546f16299
--- /dev/null
+++ b/drivers/scsi/scsi_sysctl.c
@@ -0,0 +1,51 @@
+/*
+ * Copyright (C) 2003 Christoph Hellwig.
+ * Released under GPL v2.
+ */
+
+#include <linux/errno.h>
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/sysctl.h>
+
+#include "scsi_logging.h"
+#include "scsi_priv.h"
+
+
+static struct ctl_table scsi_table[] = {
+ { .procname = "logging_level",
+ .data = &scsi_logging_level,
+ .maxlen = sizeof(scsi_logging_level),
+ .mode = 0644,
+ .proc_handler = proc_dointvec },
+ { }
+};
+
+static struct ctl_table scsi_dir_table[] = {
+ { .procname = "scsi",
+ .mode = 0555,
+ .child = scsi_table },
+ { }
+};
+
+static struct ctl_table scsi_root_table[] = {
+ { .procname = "dev",
+ .mode = 0555,
+ .child = scsi_dir_table },
+ { }
+};
+
+static struct ctl_table_header *scsi_table_header;
+
+int __init scsi_init_sysctl(void)
+{
+ scsi_table_header = register_sysctl_table(scsi_root_table);
+ if (!scsi_table_header)
+ return -ENOMEM;
+ return 0;
+}
+
+void scsi_exit_sysctl(void)
+{
+ unregister_sysctl_table(scsi_table_header);
+}
diff --git a/drivers/scsi/scsi_sysfs.c b/drivers/scsi/scsi_sysfs.c
new file mode 100644
index 000000000..1ac38e73d
--- /dev/null
+++ b/drivers/scsi/scsi_sysfs.c
@@ -0,0 +1,1275 @@
+/*
+ * scsi_sysfs.c
+ *
+ * SCSI sysfs interface routines.
+ *
+ * Created to pull SCSI mid layer sysfs routines into one file.
+ */
+
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/init.h>
+#include <linux/blkdev.h>
+#include <linux/device.h>
+#include <linux/pm_runtime.h>
+
+#include <scsi/scsi.h>
+#include <scsi/scsi_device.h>
+#include <scsi/scsi_host.h>
+#include <scsi/scsi_tcq.h>
+#include <scsi/scsi_transport.h>
+#include <scsi/scsi_driver.h>
+
+#include "scsi_priv.h"
+#include "scsi_logging.h"
+
+static struct device_type scsi_dev_type;
+
+static const struct {
+ enum scsi_device_state value;
+ char *name;
+} sdev_states[] = {
+ { SDEV_CREATED, "created" },
+ { SDEV_RUNNING, "running" },
+ { SDEV_CANCEL, "cancel" },
+ { SDEV_DEL, "deleted" },
+ { SDEV_QUIESCE, "quiesce" },
+ { SDEV_OFFLINE, "offline" },
+ { SDEV_TRANSPORT_OFFLINE, "transport-offline" },
+ { SDEV_BLOCK, "blocked" },
+ { SDEV_CREATED_BLOCK, "created-blocked" },
+};
+
+const char *scsi_device_state_name(enum scsi_device_state state)
+{
+ int i;
+ char *name = NULL;
+
+ for (i = 0; i < ARRAY_SIZE(sdev_states); i++) {
+ if (sdev_states[i].value == state) {
+ name = sdev_states[i].name;
+ break;
+ }
+ }
+ return name;
+}
+
+static const struct {
+ enum scsi_host_state value;
+ char *name;
+} shost_states[] = {
+ { SHOST_CREATED, "created" },
+ { SHOST_RUNNING, "running" },
+ { SHOST_CANCEL, "cancel" },
+ { SHOST_DEL, "deleted" },
+ { SHOST_RECOVERY, "recovery" },
+ { SHOST_CANCEL_RECOVERY, "cancel/recovery" },
+ { SHOST_DEL_RECOVERY, "deleted/recovery", },
+};
+const char *scsi_host_state_name(enum scsi_host_state state)
+{
+ int i;
+ char *name = NULL;
+
+ for (i = 0; i < ARRAY_SIZE(shost_states); i++) {
+ if (shost_states[i].value == state) {
+ name = shost_states[i].name;
+ break;
+ }
+ }
+ return name;
+}
+
+static int check_set(unsigned long long *val, char *src)
+{
+ char *last;
+
+ if (strncmp(src, "-", 20) == 0) {
+ *val = SCAN_WILD_CARD;
+ } else {
+ /*
+ * Doesn't check for int overflow
+ */
+ *val = simple_strtoull(src, &last, 0);
+ if (*last != '\0')
+ return 1;
+ }
+ return 0;
+}
+
+static int scsi_scan(struct Scsi_Host *shost, const char *str)
+{
+ char s1[15], s2[15], s3[17], junk;
+ unsigned long long channel, id, lun;
+ int res;
+
+ res = sscanf(str, "%10s %10s %16s %c", s1, s2, s3, &junk);
+ if (res != 3)
+ return -EINVAL;
+ if (check_set(&channel, s1))
+ return -EINVAL;
+ if (check_set(&id, s2))
+ return -EINVAL;
+ if (check_set(&lun, s3))
+ return -EINVAL;
+ if (shost->transportt->user_scan)
+ res = shost->transportt->user_scan(shost, channel, id, lun);
+ else
+ res = scsi_scan_host_selected(shost, channel, id, lun, 1);
+ return res;
+}
+
+/*
+ * shost_show_function: macro to create an attr function that can be used to
+ * show a non-bit field.
+ */
+#define shost_show_function(name, field, format_string) \
+static ssize_t \
+show_##name (struct device *dev, struct device_attribute *attr, \
+ char *buf) \
+{ \
+ struct Scsi_Host *shost = class_to_shost(dev); \
+ return snprintf (buf, 20, format_string, shost->field); \
+}
+
+/*
+ * shost_rd_attr: macro to create a function and attribute variable for a
+ * read only field.
+ */
+#define shost_rd_attr2(name, field, format_string) \
+ shost_show_function(name, field, format_string) \
+static DEVICE_ATTR(name, S_IRUGO, show_##name, NULL);
+
+#define shost_rd_attr(field, format_string) \
+shost_rd_attr2(field, field, format_string)
+
+/*
+ * Create the actual show/store functions and data structures.
+ */
+
+static ssize_t
+store_scan(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct Scsi_Host *shost = class_to_shost(dev);
+ int res;
+
+ res = scsi_scan(shost, buf);
+ if (res == 0)
+ res = count;
+ return res;
+};
+static DEVICE_ATTR(scan, S_IWUSR, NULL, store_scan);
+
+static ssize_t
+store_shost_state(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ int i;
+ struct Scsi_Host *shost = class_to_shost(dev);
+ enum scsi_host_state state = 0;
+
+ for (i = 0; i < ARRAY_SIZE(shost_states); i++) {
+ const int len = strlen(shost_states[i].name);
+ if (strncmp(shost_states[i].name, buf, len) == 0 &&
+ buf[len] == '\n') {
+ state = shost_states[i].value;
+ break;
+ }
+ }
+ if (!state)
+ return -EINVAL;
+
+ if (scsi_host_set_state(shost, state))
+ return -EINVAL;
+ return count;
+}
+
+static ssize_t
+show_shost_state(struct device *dev, struct device_attribute *attr, char *buf)
+{
+ struct Scsi_Host *shost = class_to_shost(dev);
+ const char *name = scsi_host_state_name(shost->shost_state);
+
+ if (!name)
+ return -EINVAL;
+
+ return snprintf(buf, 20, "%s\n", name);
+}
+
+/* DEVICE_ATTR(state) clashes with dev_attr_state for sdev */
+struct device_attribute dev_attr_hstate =
+ __ATTR(state, S_IRUGO | S_IWUSR, show_shost_state, store_shost_state);
+
+static ssize_t
+show_shost_mode(unsigned int mode, char *buf)
+{
+ ssize_t len = 0;
+
+ if (mode & MODE_INITIATOR)
+ len = sprintf(buf, "%s", "Initiator");
+
+ if (mode & MODE_TARGET)
+ len += sprintf(buf + len, "%s%s", len ? ", " : "", "Target");
+
+ len += sprintf(buf + len, "\n");
+
+ return len;
+}
+
+static ssize_t
+show_shost_supported_mode(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct Scsi_Host *shost = class_to_shost(dev);
+ unsigned int supported_mode = shost->hostt->supported_mode;
+
+ if (supported_mode == MODE_UNKNOWN)
+ /* by default this should be initiator */
+ supported_mode = MODE_INITIATOR;
+
+ return show_shost_mode(supported_mode, buf);
+}
+
+static DEVICE_ATTR(supported_mode, S_IRUGO | S_IWUSR, show_shost_supported_mode, NULL);
+
+static ssize_t
+show_shost_active_mode(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct Scsi_Host *shost = class_to_shost(dev);
+
+ if (shost->active_mode == MODE_UNKNOWN)
+ return snprintf(buf, 20, "unknown\n");
+ else
+ return show_shost_mode(shost->active_mode, buf);
+}
+
+static DEVICE_ATTR(active_mode, S_IRUGO | S_IWUSR, show_shost_active_mode, NULL);
+
+static int check_reset_type(const char *str)
+{
+ if (sysfs_streq(str, "adapter"))
+ return SCSI_ADAPTER_RESET;
+ else if (sysfs_streq(str, "firmware"))
+ return SCSI_FIRMWARE_RESET;
+ else
+ return 0;
+}
+
+static ssize_t
+store_host_reset(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct Scsi_Host *shost = class_to_shost(dev);
+ struct scsi_host_template *sht = shost->hostt;
+ int ret = -EINVAL;
+ int type;
+
+ type = check_reset_type(buf);
+ if (!type)
+ goto exit_store_host_reset;
+
+ if (sht->host_reset)
+ ret = sht->host_reset(shost, type);
+
+exit_store_host_reset:
+ if (ret == 0)
+ ret = count;
+ return ret;
+}
+
+static DEVICE_ATTR(host_reset, S_IWUSR, NULL, store_host_reset);
+
+static ssize_t
+show_shost_eh_deadline(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct Scsi_Host *shost = class_to_shost(dev);
+
+ if (shost->eh_deadline == -1)
+ return snprintf(buf, strlen("off") + 2, "off\n");
+ return sprintf(buf, "%u\n", shost->eh_deadline / HZ);
+}
+
+static ssize_t
+store_shost_eh_deadline(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct Scsi_Host *shost = class_to_shost(dev);
+ int ret = -EINVAL;
+ unsigned long deadline, flags;
+
+ if (shost->transportt &&
+ (shost->transportt->eh_strategy_handler ||
+ !shost->hostt->eh_host_reset_handler))
+ return ret;
+
+ if (!strncmp(buf, "off", strlen("off")))
+ deadline = -1;
+ else {
+ ret = kstrtoul(buf, 10, &deadline);
+ if (ret)
+ return ret;
+ if (deadline * HZ > UINT_MAX)
+ return -EINVAL;
+ }
+
+ spin_lock_irqsave(shost->host_lock, flags);
+ if (scsi_host_in_recovery(shost))
+ ret = -EBUSY;
+ else {
+ if (deadline == -1)
+ shost->eh_deadline = -1;
+ else
+ shost->eh_deadline = deadline * HZ;
+
+ ret = count;
+ }
+ spin_unlock_irqrestore(shost->host_lock, flags);
+
+ return ret;
+}
+
+static DEVICE_ATTR(eh_deadline, S_IRUGO | S_IWUSR, show_shost_eh_deadline, store_shost_eh_deadline);
+
+shost_rd_attr(use_blk_mq, "%d\n");
+shost_rd_attr(unique_id, "%u\n");
+shost_rd_attr(cmd_per_lun, "%hd\n");
+shost_rd_attr(can_queue, "%hd\n");
+shost_rd_attr(sg_tablesize, "%hu\n");
+shost_rd_attr(sg_prot_tablesize, "%hu\n");
+shost_rd_attr(unchecked_isa_dma, "%d\n");
+shost_rd_attr(prot_capabilities, "%u\n");
+shost_rd_attr(prot_guard_type, "%hd\n");
+shost_rd_attr2(proc_name, hostt->proc_name, "%s\n");
+
+static ssize_t
+show_host_busy(struct device *dev, struct device_attribute *attr, char *buf)
+{
+ struct Scsi_Host *shost = class_to_shost(dev);
+ return snprintf(buf, 20, "%d\n", atomic_read(&shost->host_busy));
+}
+static DEVICE_ATTR(host_busy, S_IRUGO, show_host_busy, NULL);
+
+static struct attribute *scsi_sysfs_shost_attrs[] = {
+ &dev_attr_use_blk_mq.attr,
+ &dev_attr_unique_id.attr,
+ &dev_attr_host_busy.attr,
+ &dev_attr_cmd_per_lun.attr,
+ &dev_attr_can_queue.attr,
+ &dev_attr_sg_tablesize.attr,
+ &dev_attr_sg_prot_tablesize.attr,
+ &dev_attr_unchecked_isa_dma.attr,
+ &dev_attr_proc_name.attr,
+ &dev_attr_scan.attr,
+ &dev_attr_hstate.attr,
+ &dev_attr_supported_mode.attr,
+ &dev_attr_active_mode.attr,
+ &dev_attr_prot_capabilities.attr,
+ &dev_attr_prot_guard_type.attr,
+ &dev_attr_host_reset.attr,
+ &dev_attr_eh_deadline.attr,
+ NULL
+};
+
+struct attribute_group scsi_shost_attr_group = {
+ .attrs = scsi_sysfs_shost_attrs,
+};
+
+const struct attribute_group *scsi_sysfs_shost_attr_groups[] = {
+ &scsi_shost_attr_group,
+ NULL
+};
+
+static void scsi_device_cls_release(struct device *class_dev)
+{
+ struct scsi_device *sdev;
+
+ sdev = class_to_sdev(class_dev);
+ put_device(&sdev->sdev_gendev);
+}
+
+static void scsi_device_dev_release_usercontext(struct work_struct *work)
+{
+ struct scsi_device *sdev;
+ struct device *parent;
+ struct list_head *this, *tmp;
+ unsigned long flags;
+
+ sdev = container_of(work, struct scsi_device, ew.work);
+
+ parent = sdev->sdev_gendev.parent;
+
+ spin_lock_irqsave(sdev->host->host_lock, flags);
+ list_del(&sdev->siblings);
+ list_del(&sdev->same_target_siblings);
+ list_del(&sdev->starved_entry);
+ spin_unlock_irqrestore(sdev->host->host_lock, flags);
+
+ cancel_work_sync(&sdev->event_work);
+
+ list_for_each_safe(this, tmp, &sdev->event_list) {
+ struct scsi_event *evt;
+
+ evt = list_entry(this, struct scsi_event, node);
+ list_del(&evt->node);
+ kfree(evt);
+ }
+
+ blk_put_queue(sdev->request_queue);
+ /* NULL queue means the device can't be used */
+ sdev->request_queue = NULL;
+
+ kfree(sdev->vpd_pg83);
+ kfree(sdev->vpd_pg80);
+ kfree(sdev->inquiry);
+ kfree(sdev);
+
+ if (parent)
+ put_device(parent);
+}
+
+static void scsi_device_dev_release(struct device *dev)
+{
+ struct scsi_device *sdp = to_scsi_device(dev);
+ execute_in_process_context(scsi_device_dev_release_usercontext,
+ &sdp->ew);
+}
+
+static struct class sdev_class = {
+ .name = "scsi_device",
+ .dev_release = scsi_device_cls_release,
+};
+
+/* all probing is done in the individual ->probe routines */
+static int scsi_bus_match(struct device *dev, struct device_driver *gendrv)
+{
+ struct scsi_device *sdp;
+
+ if (dev->type != &scsi_dev_type)
+ return 0;
+
+ sdp = to_scsi_device(dev);
+ if (sdp->no_uld_attach)
+ return 0;
+ return (sdp->inq_periph_qual == SCSI_INQ_PQ_CON)? 1: 0;
+}
+
+static int scsi_bus_uevent(struct device *dev, struct kobj_uevent_env *env)
+{
+ struct scsi_device *sdev;
+
+ if (dev->type != &scsi_dev_type)
+ return 0;
+
+ sdev = to_scsi_device(dev);
+
+ add_uevent_var(env, "MODALIAS=" SCSI_DEVICE_MODALIAS_FMT, sdev->type);
+ return 0;
+}
+
+struct bus_type scsi_bus_type = {
+ .name = "scsi",
+ .match = scsi_bus_match,
+ .uevent = scsi_bus_uevent,
+#ifdef CONFIG_PM
+ .pm = &scsi_bus_pm_ops,
+#endif
+};
+EXPORT_SYMBOL_GPL(scsi_bus_type);
+
+int scsi_sysfs_register(void)
+{
+ int error;
+
+ error = bus_register(&scsi_bus_type);
+ if (!error) {
+ error = class_register(&sdev_class);
+ if (error)
+ bus_unregister(&scsi_bus_type);
+ }
+
+ return error;
+}
+
+void scsi_sysfs_unregister(void)
+{
+ class_unregister(&sdev_class);
+ bus_unregister(&scsi_bus_type);
+}
+
+/*
+ * sdev_show_function: macro to create an attr function that can be used to
+ * show a non-bit field.
+ */
+#define sdev_show_function(field, format_string) \
+static ssize_t \
+sdev_show_##field (struct device *dev, struct device_attribute *attr, \
+ char *buf) \
+{ \
+ struct scsi_device *sdev; \
+ sdev = to_scsi_device(dev); \
+ return snprintf (buf, 20, format_string, sdev->field); \
+} \
+
+/*
+ * sdev_rd_attr: macro to create a function and attribute variable for a
+ * read only field.
+ */
+#define sdev_rd_attr(field, format_string) \
+ sdev_show_function(field, format_string) \
+static DEVICE_ATTR(field, S_IRUGO, sdev_show_##field, NULL);
+
+
+/*
+ * sdev_rw_attr: create a function and attribute variable for a
+ * read/write field.
+ */
+#define sdev_rw_attr(field, format_string) \
+ sdev_show_function(field, format_string) \
+ \
+static ssize_t \
+sdev_store_##field (struct device *dev, struct device_attribute *attr, \
+ const char *buf, size_t count) \
+{ \
+ struct scsi_device *sdev; \
+ sdev = to_scsi_device(dev); \
+ sscanf (buf, format_string, &sdev->field); \
+ return count; \
+} \
+static DEVICE_ATTR(field, S_IRUGO | S_IWUSR, sdev_show_##field, sdev_store_##field);
+
+/* Currently we don't export bit fields, but we might in future,
+ * so leave this code in */
+#if 0
+/*
+ * sdev_rd_attr: create a function and attribute variable for a
+ * read/write bit field.
+ */
+#define sdev_rw_attr_bit(field) \
+ sdev_show_function(field, "%d\n") \
+ \
+static ssize_t \
+sdev_store_##field (struct device *dev, struct device_attribute *attr, \
+ const char *buf, size_t count) \
+{ \
+ int ret; \
+ struct scsi_device *sdev; \
+ ret = scsi_sdev_check_buf_bit(buf); \
+ if (ret >= 0) { \
+ sdev = to_scsi_device(dev); \
+ sdev->field = ret; \
+ ret = count; \
+ } \
+ return ret; \
+} \
+static DEVICE_ATTR(field, S_IRUGO | S_IWUSR, sdev_show_##field, sdev_store_##field);
+
+/*
+ * scsi_sdev_check_buf_bit: return 0 if buf is "0", return 1 if buf is "1",
+ * else return -EINVAL.
+ */
+static int scsi_sdev_check_buf_bit(const char *buf)
+{
+ if ((buf[1] == '\0') || ((buf[1] == '\n') && (buf[2] == '\0'))) {
+ if (buf[0] == '1')
+ return 1;
+ else if (buf[0] == '0')
+ return 0;
+ else
+ return -EINVAL;
+ } else
+ return -EINVAL;
+}
+#endif
+/*
+ * Create the actual show/store functions and data structures.
+ */
+sdev_rd_attr (type, "%d\n");
+sdev_rd_attr (scsi_level, "%d\n");
+sdev_rd_attr (vendor, "%.8s\n");
+sdev_rd_attr (model, "%.16s\n");
+sdev_rd_attr (rev, "%.4s\n");
+
+static ssize_t
+sdev_show_device_busy(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct scsi_device *sdev = to_scsi_device(dev);
+ return snprintf(buf, 20, "%d\n", atomic_read(&sdev->device_busy));
+}
+static DEVICE_ATTR(device_busy, S_IRUGO, sdev_show_device_busy, NULL);
+
+static ssize_t
+sdev_show_device_blocked(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct scsi_device *sdev = to_scsi_device(dev);
+ return snprintf(buf, 20, "%d\n", atomic_read(&sdev->device_blocked));
+}
+static DEVICE_ATTR(device_blocked, S_IRUGO, sdev_show_device_blocked, NULL);
+
+/*
+ * TODO: can we make these symlinks to the block layer ones?
+ */
+static ssize_t
+sdev_show_timeout (struct device *dev, struct device_attribute *attr, char *buf)
+{
+ struct scsi_device *sdev;
+ sdev = to_scsi_device(dev);
+ return snprintf(buf, 20, "%d\n", sdev->request_queue->rq_timeout / HZ);
+}
+
+static ssize_t
+sdev_store_timeout (struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct scsi_device *sdev;
+ int timeout;
+ sdev = to_scsi_device(dev);
+ sscanf (buf, "%d\n", &timeout);
+ blk_queue_rq_timeout(sdev->request_queue, timeout * HZ);
+ return count;
+}
+static DEVICE_ATTR(timeout, S_IRUGO | S_IWUSR, sdev_show_timeout, sdev_store_timeout);
+
+static ssize_t
+sdev_show_eh_timeout(struct device *dev, struct device_attribute *attr, char *buf)
+{
+ struct scsi_device *sdev;
+ sdev = to_scsi_device(dev);
+ return snprintf(buf, 20, "%u\n", sdev->eh_timeout / HZ);
+}
+
+static ssize_t
+sdev_store_eh_timeout(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct scsi_device *sdev;
+ unsigned int eh_timeout;
+ int err;
+
+ if (!capable(CAP_SYS_ADMIN))
+ return -EACCES;
+
+ sdev = to_scsi_device(dev);
+ err = kstrtouint(buf, 10, &eh_timeout);
+ if (err)
+ return err;
+ sdev->eh_timeout = eh_timeout * HZ;
+
+ return count;
+}
+static DEVICE_ATTR(eh_timeout, S_IRUGO | S_IWUSR, sdev_show_eh_timeout, sdev_store_eh_timeout);
+
+static ssize_t
+store_rescan_field (struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ scsi_rescan_device(dev);
+ return count;
+}
+static DEVICE_ATTR(rescan, S_IWUSR, NULL, store_rescan_field);
+
+static ssize_t
+sdev_store_delete(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ if (device_remove_file_self(dev, attr))
+ scsi_remove_device(to_scsi_device(dev));
+ return count;
+};
+static DEVICE_ATTR(delete, S_IWUSR, NULL, sdev_store_delete);
+
+static ssize_t
+store_state_field(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ int i;
+ struct scsi_device *sdev = to_scsi_device(dev);
+ enum scsi_device_state state = 0;
+
+ for (i = 0; i < ARRAY_SIZE(sdev_states); i++) {
+ const int len = strlen(sdev_states[i].name);
+ if (strncmp(sdev_states[i].name, buf, len) == 0 &&
+ buf[len] == '\n') {
+ state = sdev_states[i].value;
+ break;
+ }
+ }
+ if (!state)
+ return -EINVAL;
+
+ if (scsi_device_set_state(sdev, state))
+ return -EINVAL;
+ return count;
+}
+
+static ssize_t
+show_state_field(struct device *dev, struct device_attribute *attr, char *buf)
+{
+ struct scsi_device *sdev = to_scsi_device(dev);
+ const char *name = scsi_device_state_name(sdev->sdev_state);
+
+ if (!name)
+ return -EINVAL;
+
+ return snprintf(buf, 20, "%s\n", name);
+}
+
+static DEVICE_ATTR(state, S_IRUGO | S_IWUSR, show_state_field, store_state_field);
+
+static ssize_t
+show_queue_type_field(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct scsi_device *sdev = to_scsi_device(dev);
+ const char *name = "none";
+
+ if (sdev->simple_tags)
+ name = "simple";
+
+ return snprintf(buf, 20, "%s\n", name);
+}
+
+static ssize_t
+store_queue_type_field(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct scsi_device *sdev = to_scsi_device(dev);
+
+ if (!sdev->tagged_supported)
+ return -EINVAL;
+
+ sdev_printk(KERN_INFO, sdev,
+ "ignoring write to deprecated queue_type attribute");
+ return count;
+}
+
+static DEVICE_ATTR(queue_type, S_IRUGO | S_IWUSR, show_queue_type_field,
+ store_queue_type_field);
+
+#define sdev_vpd_pg_attr(_page) \
+static ssize_t \
+show_vpd_##_page(struct file *filp, struct kobject *kobj, \
+ struct bin_attribute *bin_attr, \
+ char *buf, loff_t off, size_t count) \
+{ \
+ struct device *dev = container_of(kobj, struct device, kobj); \
+ struct scsi_device *sdev = to_scsi_device(dev); \
+ if (!sdev->vpd_##_page) \
+ return -EINVAL; \
+ return memory_read_from_buffer(buf, count, &off, \
+ sdev->vpd_##_page, \
+ sdev->vpd_##_page##_len); \
+} \
+static struct bin_attribute dev_attr_vpd_##_page = { \
+ .attr = {.name = __stringify(vpd_##_page), .mode = S_IRUGO }, \
+ .size = 0, \
+ .read = show_vpd_##_page, \
+};
+
+sdev_vpd_pg_attr(pg83);
+sdev_vpd_pg_attr(pg80);
+
+static ssize_t
+show_iostat_counterbits(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ return snprintf(buf, 20, "%d\n", (int)sizeof(atomic_t) * 8);
+}
+
+static DEVICE_ATTR(iocounterbits, S_IRUGO, show_iostat_counterbits, NULL);
+
+#define show_sdev_iostat(field) \
+static ssize_t \
+show_iostat_##field(struct device *dev, struct device_attribute *attr, \
+ char *buf) \
+{ \
+ struct scsi_device *sdev = to_scsi_device(dev); \
+ unsigned long long count = atomic_read(&sdev->field); \
+ return snprintf(buf, 20, "0x%llx\n", count); \
+} \
+static DEVICE_ATTR(field, S_IRUGO, show_iostat_##field, NULL)
+
+show_sdev_iostat(iorequest_cnt);
+show_sdev_iostat(iodone_cnt);
+show_sdev_iostat(ioerr_cnt);
+
+static ssize_t
+sdev_show_modalias(struct device *dev, struct device_attribute *attr, char *buf)
+{
+ struct scsi_device *sdev;
+ sdev = to_scsi_device(dev);
+ return snprintf (buf, 20, SCSI_DEVICE_MODALIAS_FMT "\n", sdev->type);
+}
+static DEVICE_ATTR(modalias, S_IRUGO, sdev_show_modalias, NULL);
+
+#define DECLARE_EVT_SHOW(name, Cap_name) \
+static ssize_t \
+sdev_show_evt_##name(struct device *dev, struct device_attribute *attr, \
+ char *buf) \
+{ \
+ struct scsi_device *sdev = to_scsi_device(dev); \
+ int val = test_bit(SDEV_EVT_##Cap_name, sdev->supported_events);\
+ return snprintf(buf, 20, "%d\n", val); \
+}
+
+#define DECLARE_EVT_STORE(name, Cap_name) \
+static ssize_t \
+sdev_store_evt_##name(struct device *dev, struct device_attribute *attr,\
+ const char *buf, size_t count) \
+{ \
+ struct scsi_device *sdev = to_scsi_device(dev); \
+ int val = simple_strtoul(buf, NULL, 0); \
+ if (val == 0) \
+ clear_bit(SDEV_EVT_##Cap_name, sdev->supported_events); \
+ else if (val == 1) \
+ set_bit(SDEV_EVT_##Cap_name, sdev->supported_events); \
+ else \
+ return -EINVAL; \
+ return count; \
+}
+
+#define DECLARE_EVT(name, Cap_name) \
+ DECLARE_EVT_SHOW(name, Cap_name) \
+ DECLARE_EVT_STORE(name, Cap_name) \
+ static DEVICE_ATTR(evt_##name, S_IRUGO, sdev_show_evt_##name, \
+ sdev_store_evt_##name);
+#define REF_EVT(name) &dev_attr_evt_##name.attr
+
+DECLARE_EVT(media_change, MEDIA_CHANGE)
+DECLARE_EVT(inquiry_change_reported, INQUIRY_CHANGE_REPORTED)
+DECLARE_EVT(capacity_change_reported, CAPACITY_CHANGE_REPORTED)
+DECLARE_EVT(soft_threshold_reached, SOFT_THRESHOLD_REACHED_REPORTED)
+DECLARE_EVT(mode_parameter_change_reported, MODE_PARAMETER_CHANGE_REPORTED)
+DECLARE_EVT(lun_change_reported, LUN_CHANGE_REPORTED)
+
+static ssize_t
+sdev_store_queue_depth(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ int depth, retval;
+ struct scsi_device *sdev = to_scsi_device(dev);
+ struct scsi_host_template *sht = sdev->host->hostt;
+
+ if (!sht->change_queue_depth)
+ return -EINVAL;
+
+ depth = simple_strtoul(buf, NULL, 0);
+
+ if (depth < 1 || depth > sht->can_queue)
+ return -EINVAL;
+
+ retval = sht->change_queue_depth(sdev, depth);
+ if (retval < 0)
+ return retval;
+
+ sdev->max_queue_depth = sdev->queue_depth;
+
+ return count;
+}
+sdev_show_function(queue_depth, "%d\n");
+
+static DEVICE_ATTR(queue_depth, S_IRUGO | S_IWUSR, sdev_show_queue_depth,
+ sdev_store_queue_depth);
+
+static ssize_t
+sdev_show_queue_ramp_up_period(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct scsi_device *sdev;
+ sdev = to_scsi_device(dev);
+ return snprintf(buf, 20, "%u\n",
+ jiffies_to_msecs(sdev->queue_ramp_up_period));
+}
+
+static ssize_t
+sdev_store_queue_ramp_up_period(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct scsi_device *sdev = to_scsi_device(dev);
+ unsigned int period;
+
+ if (kstrtouint(buf, 10, &period))
+ return -EINVAL;
+
+ sdev->queue_ramp_up_period = msecs_to_jiffies(period);
+ return period;
+}
+
+static DEVICE_ATTR(queue_ramp_up_period, S_IRUGO | S_IWUSR,
+ sdev_show_queue_ramp_up_period,
+ sdev_store_queue_ramp_up_period);
+
+static umode_t scsi_sdev_attr_is_visible(struct kobject *kobj,
+ struct attribute *attr, int i)
+{
+ struct device *dev = container_of(kobj, struct device, kobj);
+ struct scsi_device *sdev = to_scsi_device(dev);
+
+
+ if (attr == &dev_attr_queue_depth.attr &&
+ !sdev->host->hostt->change_queue_depth)
+ return S_IRUGO;
+
+ if (attr == &dev_attr_queue_ramp_up_period.attr &&
+ !sdev->host->hostt->change_queue_depth)
+ return 0;
+
+ return attr->mode;
+}
+
+/* Default template for device attributes. May NOT be modified */
+static struct attribute *scsi_sdev_attrs[] = {
+ &dev_attr_device_blocked.attr,
+ &dev_attr_type.attr,
+ &dev_attr_scsi_level.attr,
+ &dev_attr_device_busy.attr,
+ &dev_attr_vendor.attr,
+ &dev_attr_model.attr,
+ &dev_attr_rev.attr,
+ &dev_attr_rescan.attr,
+ &dev_attr_delete.attr,
+ &dev_attr_state.attr,
+ &dev_attr_timeout.attr,
+ &dev_attr_eh_timeout.attr,
+ &dev_attr_iocounterbits.attr,
+ &dev_attr_iorequest_cnt.attr,
+ &dev_attr_iodone_cnt.attr,
+ &dev_attr_ioerr_cnt.attr,
+ &dev_attr_modalias.attr,
+ &dev_attr_queue_depth.attr,
+ &dev_attr_queue_type.attr,
+ &dev_attr_queue_ramp_up_period.attr,
+ REF_EVT(media_change),
+ REF_EVT(inquiry_change_reported),
+ REF_EVT(capacity_change_reported),
+ REF_EVT(soft_threshold_reached),
+ REF_EVT(mode_parameter_change_reported),
+ REF_EVT(lun_change_reported),
+ NULL
+};
+
+static struct bin_attribute *scsi_sdev_bin_attrs[] = {
+ &dev_attr_vpd_pg83,
+ &dev_attr_vpd_pg80,
+ NULL
+};
+static struct attribute_group scsi_sdev_attr_group = {
+ .attrs = scsi_sdev_attrs,
+ .bin_attrs = scsi_sdev_bin_attrs,
+ .is_visible = scsi_sdev_attr_is_visible,
+};
+
+static const struct attribute_group *scsi_sdev_attr_groups[] = {
+ &scsi_sdev_attr_group,
+ NULL
+};
+
+static int scsi_target_add(struct scsi_target *starget)
+{
+ int error;
+
+ if (starget->state != STARGET_CREATED)
+ return 0;
+
+ error = device_add(&starget->dev);
+ if (error) {
+ dev_err(&starget->dev, "target device_add failed, error %d\n", error);
+ return error;
+ }
+ transport_add_device(&starget->dev);
+ starget->state = STARGET_RUNNING;
+
+ pm_runtime_set_active(&starget->dev);
+ pm_runtime_enable(&starget->dev);
+ device_enable_async_suspend(&starget->dev);
+
+ return 0;
+}
+
+/**
+ * scsi_sysfs_add_sdev - add scsi device to sysfs
+ * @sdev: scsi_device to add
+ *
+ * Return value:
+ * 0 on Success / non-zero on Failure
+ **/
+int scsi_sysfs_add_sdev(struct scsi_device *sdev)
+{
+ int error, i;
+ struct request_queue *rq = sdev->request_queue;
+ struct scsi_target *starget = sdev->sdev_target;
+
+ error = scsi_device_set_state(sdev, SDEV_RUNNING);
+ if (error)
+ return error;
+
+ error = scsi_target_add(starget);
+ if (error)
+ return error;
+
+ transport_configure_device(&starget->dev);
+
+ device_enable_async_suspend(&sdev->sdev_gendev);
+ scsi_autopm_get_target(starget);
+ pm_runtime_set_active(&sdev->sdev_gendev);
+ pm_runtime_forbid(&sdev->sdev_gendev);
+ pm_runtime_enable(&sdev->sdev_gendev);
+ scsi_autopm_put_target(starget);
+
+ scsi_autopm_get_device(sdev);
+
+ error = device_add(&sdev->sdev_gendev);
+ if (error) {
+ sdev_printk(KERN_INFO, sdev,
+ "failed to add device: %d\n", error);
+ return error;
+ }
+ device_enable_async_suspend(&sdev->sdev_dev);
+ error = device_add(&sdev->sdev_dev);
+ if (error) {
+ sdev_printk(KERN_INFO, sdev,
+ "failed to add class device: %d\n", error);
+ device_del(&sdev->sdev_gendev);
+ return error;
+ }
+ transport_add_device(&sdev->sdev_gendev);
+ sdev->is_visible = 1;
+
+ error = bsg_register_queue(rq, &sdev->sdev_gendev, NULL, NULL);
+
+ if (error)
+ /* we're treating error on bsg register as non-fatal,
+ * so pretend nothing went wrong */
+ sdev_printk(KERN_INFO, sdev,
+ "Failed to register bsg queue, errno=%d\n", error);
+
+ /* add additional host specific attributes */
+ if (sdev->host->hostt->sdev_attrs) {
+ for (i = 0; sdev->host->hostt->sdev_attrs[i]; i++) {
+ error = device_create_file(&sdev->sdev_gendev,
+ sdev->host->hostt->sdev_attrs[i]);
+ if (error)
+ return error;
+ }
+ }
+
+ scsi_autopm_put_device(sdev);
+ return error;
+}
+
+void __scsi_remove_device(struct scsi_device *sdev)
+{
+ struct device *dev = &sdev->sdev_gendev;
+
+ if (sdev->is_visible) {
+ if (scsi_device_set_state(sdev, SDEV_CANCEL) != 0)
+ return;
+
+ bsg_unregister_queue(sdev->request_queue);
+ device_unregister(&sdev->sdev_dev);
+ transport_remove_device(dev);
+ device_del(dev);
+ } else
+ put_device(&sdev->sdev_dev);
+
+ /*
+ * Stop accepting new requests and wait until all queuecommand() and
+ * scsi_run_queue() invocations have finished before tearing down the
+ * device.
+ */
+ scsi_device_set_state(sdev, SDEV_DEL);
+ blk_cleanup_queue(sdev->request_queue);
+ cancel_work_sync(&sdev->requeue_work);
+
+ if (sdev->host->hostt->slave_destroy)
+ sdev->host->hostt->slave_destroy(sdev);
+ transport_destroy_device(dev);
+
+ /*
+ * Paired with the kref_get() in scsi_sysfs_initialize(). We have
+ * remoed sysfs visibility from the device, so make the target
+ * invisible if this was the last device underneath it.
+ */
+ scsi_target_reap(scsi_target(sdev));
+
+ put_device(dev);
+}
+
+/**
+ * scsi_remove_device - unregister a device from the scsi bus
+ * @sdev: scsi_device to unregister
+ **/
+void scsi_remove_device(struct scsi_device *sdev)
+{
+ struct Scsi_Host *shost = sdev->host;
+
+ mutex_lock(&shost->scan_mutex);
+ __scsi_remove_device(sdev);
+ mutex_unlock(&shost->scan_mutex);
+}
+EXPORT_SYMBOL(scsi_remove_device);
+
+static void __scsi_remove_target(struct scsi_target *starget)
+{
+ struct Scsi_Host *shost = dev_to_shost(starget->dev.parent);
+ unsigned long flags;
+ struct scsi_device *sdev;
+
+ spin_lock_irqsave(shost->host_lock, flags);
+ restart:
+ list_for_each_entry(sdev, &shost->__devices, siblings) {
+ if (sdev->channel != starget->channel ||
+ sdev->id != starget->id ||
+ scsi_device_get(sdev))
+ continue;
+ spin_unlock_irqrestore(shost->host_lock, flags);
+ scsi_remove_device(sdev);
+ scsi_device_put(sdev);
+ spin_lock_irqsave(shost->host_lock, flags);
+ goto restart;
+ }
+ spin_unlock_irqrestore(shost->host_lock, flags);
+}
+
+/**
+ * scsi_remove_target - try to remove a target and all its devices
+ * @dev: generic starget or parent of generic stargets to be removed
+ *
+ * Note: This is slightly racy. It is possible that if the user
+ * requests the addition of another device then the target won't be
+ * removed.
+ */
+void scsi_remove_target(struct device *dev)
+{
+ struct Scsi_Host *shost = dev_to_shost(dev->parent);
+ struct scsi_target *starget, *last = NULL;
+ unsigned long flags;
+
+ /* remove targets being careful to lookup next entry before
+ * deleting the last
+ */
+ spin_lock_irqsave(shost->host_lock, flags);
+ list_for_each_entry(starget, &shost->__targets, siblings) {
+ if (starget->state == STARGET_DEL)
+ continue;
+ if (starget->dev.parent == dev || &starget->dev == dev) {
+ /* assuming new targets arrive at the end */
+ kref_get(&starget->reap_ref);
+ spin_unlock_irqrestore(shost->host_lock, flags);
+ if (last)
+ scsi_target_reap(last);
+ last = starget;
+ __scsi_remove_target(starget);
+ spin_lock_irqsave(shost->host_lock, flags);
+ }
+ }
+ spin_unlock_irqrestore(shost->host_lock, flags);
+
+ if (last)
+ scsi_target_reap(last);
+}
+EXPORT_SYMBOL(scsi_remove_target);
+
+int scsi_register_driver(struct device_driver *drv)
+{
+ drv->bus = &scsi_bus_type;
+
+ return driver_register(drv);
+}
+EXPORT_SYMBOL(scsi_register_driver);
+
+int scsi_register_interface(struct class_interface *intf)
+{
+ intf->class = &sdev_class;
+
+ return class_interface_register(intf);
+}
+EXPORT_SYMBOL(scsi_register_interface);
+
+/**
+ * scsi_sysfs_add_host - add scsi host to subsystem
+ * @shost: scsi host struct to add to subsystem
+ **/
+int scsi_sysfs_add_host(struct Scsi_Host *shost)
+{
+ int error, i;
+
+ /* add host specific attributes */
+ if (shost->hostt->shost_attrs) {
+ for (i = 0; shost->hostt->shost_attrs[i]; i++) {
+ error = device_create_file(&shost->shost_dev,
+ shost->hostt->shost_attrs[i]);
+ if (error)
+ return error;
+ }
+ }
+
+ transport_register_device(&shost->shost_gendev);
+ transport_configure_device(&shost->shost_gendev);
+ return 0;
+}
+
+static struct device_type scsi_dev_type = {
+ .name = "scsi_device",
+ .release = scsi_device_dev_release,
+ .groups = scsi_sdev_attr_groups,
+};
+
+void scsi_sysfs_device_initialize(struct scsi_device *sdev)
+{
+ unsigned long flags;
+ struct Scsi_Host *shost = sdev->host;
+ struct scsi_target *starget = sdev->sdev_target;
+
+ device_initialize(&sdev->sdev_gendev);
+ sdev->sdev_gendev.bus = &scsi_bus_type;
+ sdev->sdev_gendev.type = &scsi_dev_type;
+ dev_set_name(&sdev->sdev_gendev, "%d:%d:%d:%llu",
+ sdev->host->host_no, sdev->channel, sdev->id, sdev->lun);
+
+ device_initialize(&sdev->sdev_dev);
+ sdev->sdev_dev.parent = get_device(&sdev->sdev_gendev);
+ sdev->sdev_dev.class = &sdev_class;
+ dev_set_name(&sdev->sdev_dev, "%d:%d:%d:%llu",
+ sdev->host->host_no, sdev->channel, sdev->id, sdev->lun);
+ /*
+ * Get a default scsi_level from the target (derived from sibling
+ * devices). This is the best we can do for guessing how to set
+ * sdev->lun_in_cdb for the initial INQUIRY command. For LUN 0 the
+ * setting doesn't matter, because all the bits are zero anyway.
+ * But it does matter for higher LUNs.
+ */
+ sdev->scsi_level = starget->scsi_level;
+ if (sdev->scsi_level <= SCSI_2 &&
+ sdev->scsi_level != SCSI_UNKNOWN &&
+ !shost->no_scsi2_lun_in_cdb)
+ sdev->lun_in_cdb = 1;
+
+ transport_setup_device(&sdev->sdev_gendev);
+ spin_lock_irqsave(shost->host_lock, flags);
+ list_add_tail(&sdev->same_target_siblings, &starget->devices);
+ list_add_tail(&sdev->siblings, &shost->__devices);
+ spin_unlock_irqrestore(shost->host_lock, flags);
+ /*
+ * device can now only be removed via __scsi_remove_device() so hold
+ * the target. Target will be held in CREATED state until something
+ * beneath it becomes visible (in which case it moves to RUNNING)
+ */
+ kref_get(&starget->reap_ref);
+}
+
+int scsi_is_sdev_device(const struct device *dev)
+{
+ return dev->type == &scsi_dev_type;
+}
+EXPORT_SYMBOL(scsi_is_sdev_device);
+
+/* A blank transport template that is used in drivers that don't
+ * yet implement Transport Attributes */
+struct scsi_transport_template blank_transport_template = { { { {NULL, }, }, }, };
diff --git a/drivers/scsi/scsi_trace.c b/drivers/scsi/scsi_trace.c
new file mode 100644
index 000000000..08bb47b53
--- /dev/null
+++ b/drivers/scsi/scsi_trace.c
@@ -0,0 +1,288 @@
+/*
+ * Copyright (C) 2010 FUJITSU LIMITED
+ * Copyright (C) 2010 Tomohiro Kusumi <kusumi.tomohiro@jp.fujitsu.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+#include <linux/kernel.h>
+#include <linux/trace_seq.h>
+#include <trace/events/scsi.h>
+
+#define SERVICE_ACTION16(cdb) (cdb[1] & 0x1f)
+#define SERVICE_ACTION32(cdb) ((cdb[8] << 8) | cdb[9])
+
+static const char *
+scsi_trace_misc(struct trace_seq *, unsigned char *, int);
+
+static const char *
+scsi_trace_rw6(struct trace_seq *p, unsigned char *cdb, int len)
+{
+ const char *ret = trace_seq_buffer_ptr(p);
+ sector_t lba = 0, txlen = 0;
+
+ lba |= ((cdb[1] & 0x1F) << 16);
+ lba |= (cdb[2] << 8);
+ lba |= cdb[3];
+ txlen = cdb[4];
+
+ trace_seq_printf(p, "lba=%llu txlen=%llu",
+ (unsigned long long)lba, (unsigned long long)txlen);
+ trace_seq_putc(p, 0);
+
+ return ret;
+}
+
+static const char *
+scsi_trace_rw10(struct trace_seq *p, unsigned char *cdb, int len)
+{
+ const char *ret = trace_seq_buffer_ptr(p);
+ sector_t lba = 0, txlen = 0;
+
+ lba |= (cdb[2] << 24);
+ lba |= (cdb[3] << 16);
+ lba |= (cdb[4] << 8);
+ lba |= cdb[5];
+ txlen |= (cdb[7] << 8);
+ txlen |= cdb[8];
+
+ trace_seq_printf(p, "lba=%llu txlen=%llu protect=%u",
+ (unsigned long long)lba, (unsigned long long)txlen,
+ cdb[1] >> 5);
+
+ if (cdb[0] == WRITE_SAME)
+ trace_seq_printf(p, " unmap=%u", cdb[1] >> 3 & 1);
+
+ trace_seq_putc(p, 0);
+
+ return ret;
+}
+
+static const char *
+scsi_trace_rw12(struct trace_seq *p, unsigned char *cdb, int len)
+{
+ const char *ret = trace_seq_buffer_ptr(p);
+ sector_t lba = 0, txlen = 0;
+
+ lba |= (cdb[2] << 24);
+ lba |= (cdb[3] << 16);
+ lba |= (cdb[4] << 8);
+ lba |= cdb[5];
+ txlen |= (cdb[6] << 24);
+ txlen |= (cdb[7] << 16);
+ txlen |= (cdb[8] << 8);
+ txlen |= cdb[9];
+
+ trace_seq_printf(p, "lba=%llu txlen=%llu protect=%u",
+ (unsigned long long)lba, (unsigned long long)txlen,
+ cdb[1] >> 5);
+ trace_seq_putc(p, 0);
+
+ return ret;
+}
+
+static const char *
+scsi_trace_rw16(struct trace_seq *p, unsigned char *cdb, int len)
+{
+ const char *ret = trace_seq_buffer_ptr(p);
+ sector_t lba = 0, txlen = 0;
+
+ lba |= ((u64)cdb[2] << 56);
+ lba |= ((u64)cdb[3] << 48);
+ lba |= ((u64)cdb[4] << 40);
+ lba |= ((u64)cdb[5] << 32);
+ lba |= (cdb[6] << 24);
+ lba |= (cdb[7] << 16);
+ lba |= (cdb[8] << 8);
+ lba |= cdb[9];
+ txlen |= (cdb[10] << 24);
+ txlen |= (cdb[11] << 16);
+ txlen |= (cdb[12] << 8);
+ txlen |= cdb[13];
+
+ trace_seq_printf(p, "lba=%llu txlen=%llu protect=%u",
+ (unsigned long long)lba, (unsigned long long)txlen,
+ cdb[1] >> 5);
+
+ if (cdb[0] == WRITE_SAME_16)
+ trace_seq_printf(p, " unmap=%u", cdb[1] >> 3 & 1);
+
+ trace_seq_putc(p, 0);
+
+ return ret;
+}
+
+static const char *
+scsi_trace_rw32(struct trace_seq *p, unsigned char *cdb, int len)
+{
+ const char *ret = trace_seq_buffer_ptr(p), *cmd;
+ sector_t lba = 0, txlen = 0;
+ u32 ei_lbrt = 0;
+
+ switch (SERVICE_ACTION32(cdb)) {
+ case READ_32:
+ cmd = "READ";
+ break;
+ case VERIFY_32:
+ cmd = "VERIFY";
+ break;
+ case WRITE_32:
+ cmd = "WRITE";
+ break;
+ case WRITE_SAME_32:
+ cmd = "WRITE_SAME";
+ break;
+ default:
+ trace_seq_puts(p, "UNKNOWN");
+ goto out;
+ }
+
+ lba |= ((u64)cdb[12] << 56);
+ lba |= ((u64)cdb[13] << 48);
+ lba |= ((u64)cdb[14] << 40);
+ lba |= ((u64)cdb[15] << 32);
+ lba |= (cdb[16] << 24);
+ lba |= (cdb[17] << 16);
+ lba |= (cdb[18] << 8);
+ lba |= cdb[19];
+ ei_lbrt |= (cdb[20] << 24);
+ ei_lbrt |= (cdb[21] << 16);
+ ei_lbrt |= (cdb[22] << 8);
+ ei_lbrt |= cdb[23];
+ txlen |= (cdb[28] << 24);
+ txlen |= (cdb[29] << 16);
+ txlen |= (cdb[30] << 8);
+ txlen |= cdb[31];
+
+ trace_seq_printf(p, "%s_32 lba=%llu txlen=%llu protect=%u ei_lbrt=%u",
+ cmd, (unsigned long long)lba,
+ (unsigned long long)txlen, cdb[10] >> 5, ei_lbrt);
+
+ if (SERVICE_ACTION32(cdb) == WRITE_SAME_32)
+ trace_seq_printf(p, " unmap=%u", cdb[10] >> 3 & 1);
+
+out:
+ trace_seq_putc(p, 0);
+
+ return ret;
+}
+
+static const char *
+scsi_trace_unmap(struct trace_seq *p, unsigned char *cdb, int len)
+{
+ const char *ret = trace_seq_buffer_ptr(p);
+ unsigned int regions = cdb[7] << 8 | cdb[8];
+
+ trace_seq_printf(p, "regions=%u", (regions - 8) / 16);
+ trace_seq_putc(p, 0);
+
+ return ret;
+}
+
+static const char *
+scsi_trace_service_action_in(struct trace_seq *p, unsigned char *cdb, int len)
+{
+ const char *ret = trace_seq_buffer_ptr(p), *cmd;
+ sector_t lba = 0;
+ u32 alloc_len = 0;
+
+ switch (SERVICE_ACTION16(cdb)) {
+ case SAI_READ_CAPACITY_16:
+ cmd = "READ_CAPACITY_16";
+ break;
+ case SAI_GET_LBA_STATUS:
+ cmd = "GET_LBA_STATUS";
+ break;
+ default:
+ trace_seq_puts(p, "UNKNOWN");
+ goto out;
+ }
+
+ lba |= ((u64)cdb[2] << 56);
+ lba |= ((u64)cdb[3] << 48);
+ lba |= ((u64)cdb[4] << 40);
+ lba |= ((u64)cdb[5] << 32);
+ lba |= (cdb[6] << 24);
+ lba |= (cdb[7] << 16);
+ lba |= (cdb[8] << 8);
+ lba |= cdb[9];
+ alloc_len |= (cdb[10] << 24);
+ alloc_len |= (cdb[11] << 16);
+ alloc_len |= (cdb[12] << 8);
+ alloc_len |= cdb[13];
+
+ trace_seq_printf(p, "%s lba=%llu alloc_len=%u", cmd,
+ (unsigned long long)lba, alloc_len);
+
+out:
+ trace_seq_putc(p, 0);
+
+ return ret;
+}
+
+static const char *
+scsi_trace_varlen(struct trace_seq *p, unsigned char *cdb, int len)
+{
+ switch (SERVICE_ACTION32(cdb)) {
+ case READ_32:
+ case VERIFY_32:
+ case WRITE_32:
+ case WRITE_SAME_32:
+ return scsi_trace_rw32(p, cdb, len);
+ default:
+ return scsi_trace_misc(p, cdb, len);
+ }
+}
+
+static const char *
+scsi_trace_misc(struct trace_seq *p, unsigned char *cdb, int len)
+{
+ const char *ret = trace_seq_buffer_ptr(p);
+
+ trace_seq_putc(p, '-');
+ trace_seq_putc(p, 0);
+
+ return ret;
+}
+
+const char *
+scsi_trace_parse_cdb(struct trace_seq *p, unsigned char *cdb, int len)
+{
+ switch (cdb[0]) {
+ case READ_6:
+ case WRITE_6:
+ return scsi_trace_rw6(p, cdb, len);
+ case READ_10:
+ case VERIFY:
+ case WRITE_10:
+ case WRITE_SAME:
+ return scsi_trace_rw10(p, cdb, len);
+ case READ_12:
+ case VERIFY_12:
+ case WRITE_12:
+ return scsi_trace_rw12(p, cdb, len);
+ case READ_16:
+ case VERIFY_16:
+ case WRITE_16:
+ case WRITE_SAME_16:
+ return scsi_trace_rw16(p, cdb, len);
+ case UNMAP:
+ return scsi_trace_unmap(p, cdb, len);
+ case SERVICE_ACTION_IN_16:
+ return scsi_trace_service_action_in(p, cdb, len);
+ case VARIABLE_LENGTH_CMD:
+ return scsi_trace_varlen(p, cdb, len);
+ default:
+ return scsi_trace_misc(p, cdb, len);
+ }
+}
diff --git a/drivers/scsi/scsi_transport_api.h b/drivers/scsi/scsi_transport_api.h
new file mode 100644
index 000000000..934f0e62b
--- /dev/null
+++ b/drivers/scsi/scsi_transport_api.h
@@ -0,0 +1,6 @@
+#ifndef _SCSI_TRANSPORT_API_H
+#define _SCSI_TRANSPORT_API_H
+
+void scsi_schedule_eh(struct Scsi_Host *shost);
+
+#endif /* _SCSI_TRANSPORT_API_H */
diff --git a/drivers/scsi/scsi_transport_fc.c b/drivers/scsi/scsi_transport_fc.c
new file mode 100644
index 000000000..24eaaf66a
--- /dev/null
+++ b/drivers/scsi/scsi_transport_fc.c
@@ -0,0 +1,4152 @@
+/*
+ * FiberChannel transport specific attributes exported to sysfs.
+ *
+ * Copyright (c) 2003 Silicon Graphics, Inc. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ *
+ * ========
+ *
+ * Copyright (C) 2004-2007 James Smart, Emulex Corporation
+ * Rewrite for host, target, device, and remote port attributes,
+ * statistics, and service functions...
+ * Add vports, etc
+ *
+ */
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/slab.h>
+#include <linux/delay.h>
+#include <linux/kernel.h>
+#include <scsi/scsi_device.h>
+#include <scsi/scsi_host.h>
+#include <scsi/scsi_transport.h>
+#include <scsi/scsi_transport_fc.h>
+#include <scsi/scsi_cmnd.h>
+#include <net/netlink.h>
+#include <scsi/scsi_netlink_fc.h>
+#include <scsi/scsi_bsg_fc.h>
+#include "scsi_priv.h"
+
+static int fc_queue_work(struct Scsi_Host *, struct work_struct *);
+static void fc_vport_sched_delete(struct work_struct *work);
+static int fc_vport_setup(struct Scsi_Host *shost, int channel,
+ struct device *pdev, struct fc_vport_identifiers *ids,
+ struct fc_vport **vport);
+static int fc_bsg_hostadd(struct Scsi_Host *, struct fc_host_attrs *);
+static int fc_bsg_rportadd(struct Scsi_Host *, struct fc_rport *);
+static void fc_bsg_remove(struct request_queue *);
+static void fc_bsg_goose_queue(struct fc_rport *);
+
+/*
+ * Module Parameters
+ */
+
+/*
+ * dev_loss_tmo: the default number of seconds that the FC transport
+ * should insulate the loss of a remote port.
+ * The maximum will be capped by the value of SCSI_DEVICE_BLOCK_MAX_TIMEOUT.
+ */
+static unsigned int fc_dev_loss_tmo = 60; /* seconds */
+
+module_param_named(dev_loss_tmo, fc_dev_loss_tmo, uint, S_IRUGO|S_IWUSR);
+MODULE_PARM_DESC(dev_loss_tmo,
+ "Maximum number of seconds that the FC transport should"
+ " insulate the loss of a remote port. Once this value is"
+ " exceeded, the scsi target is removed. Value should be"
+ " between 1 and SCSI_DEVICE_BLOCK_MAX_TIMEOUT if"
+ " fast_io_fail_tmo is not set.");
+
+/*
+ * Redefine so that we can have same named attributes in the
+ * sdev/starget/host objects.
+ */
+#define FC_DEVICE_ATTR(_prefix,_name,_mode,_show,_store) \
+struct device_attribute device_attr_##_prefix##_##_name = \
+ __ATTR(_name,_mode,_show,_store)
+
+#define fc_enum_name_search(title, table_type, table) \
+static const char *get_fc_##title##_name(enum table_type table_key) \
+{ \
+ int i; \
+ char *name = NULL; \
+ \
+ for (i = 0; i < ARRAY_SIZE(table); i++) { \
+ if (table[i].value == table_key) { \
+ name = table[i].name; \
+ break; \
+ } \
+ } \
+ return name; \
+}
+
+#define fc_enum_name_match(title, table_type, table) \
+static int get_fc_##title##_match(const char *table_key, \
+ enum table_type *value) \
+{ \
+ int i; \
+ \
+ for (i = 0; i < ARRAY_SIZE(table); i++) { \
+ if (strncmp(table_key, table[i].name, \
+ table[i].matchlen) == 0) { \
+ *value = table[i].value; \
+ return 0; /* success */ \
+ } \
+ } \
+ return 1; /* failure */ \
+}
+
+
+/* Convert fc_port_type values to ascii string name */
+static struct {
+ enum fc_port_type value;
+ char *name;
+} fc_port_type_names[] = {
+ { FC_PORTTYPE_UNKNOWN, "Unknown" },
+ { FC_PORTTYPE_OTHER, "Other" },
+ { FC_PORTTYPE_NOTPRESENT, "Not Present" },
+ { FC_PORTTYPE_NPORT, "NPort (fabric via point-to-point)" },
+ { FC_PORTTYPE_NLPORT, "NLPort (fabric via loop)" },
+ { FC_PORTTYPE_LPORT, "LPort (private loop)" },
+ { FC_PORTTYPE_PTP, "Point-To-Point (direct nport connection)" },
+ { FC_PORTTYPE_NPIV, "NPIV VPORT" },
+};
+fc_enum_name_search(port_type, fc_port_type, fc_port_type_names)
+#define FC_PORTTYPE_MAX_NAMELEN 50
+
+/* Reuse fc_port_type enum function for vport_type */
+#define get_fc_vport_type_name get_fc_port_type_name
+
+
+/* Convert fc_host_event_code values to ascii string name */
+static const struct {
+ enum fc_host_event_code value;
+ char *name;
+} fc_host_event_code_names[] = {
+ { FCH_EVT_LIP, "lip" },
+ { FCH_EVT_LINKUP, "link_up" },
+ { FCH_EVT_LINKDOWN, "link_down" },
+ { FCH_EVT_LIPRESET, "lip_reset" },
+ { FCH_EVT_RSCN, "rscn" },
+ { FCH_EVT_ADAPTER_CHANGE, "adapter_chg" },
+ { FCH_EVT_PORT_UNKNOWN, "port_unknown" },
+ { FCH_EVT_PORT_ONLINE, "port_online" },
+ { FCH_EVT_PORT_OFFLINE, "port_offline" },
+ { FCH_EVT_PORT_FABRIC, "port_fabric" },
+ { FCH_EVT_LINK_UNKNOWN, "link_unknown" },
+ { FCH_EVT_VENDOR_UNIQUE, "vendor_unique" },
+};
+fc_enum_name_search(host_event_code, fc_host_event_code,
+ fc_host_event_code_names)
+#define FC_HOST_EVENT_CODE_MAX_NAMELEN 30
+
+
+/* Convert fc_port_state values to ascii string name */
+static struct {
+ enum fc_port_state value;
+ char *name;
+} fc_port_state_names[] = {
+ { FC_PORTSTATE_UNKNOWN, "Unknown" },
+ { FC_PORTSTATE_NOTPRESENT, "Not Present" },
+ { FC_PORTSTATE_ONLINE, "Online" },
+ { FC_PORTSTATE_OFFLINE, "Offline" },
+ { FC_PORTSTATE_BLOCKED, "Blocked" },
+ { FC_PORTSTATE_BYPASSED, "Bypassed" },
+ { FC_PORTSTATE_DIAGNOSTICS, "Diagnostics" },
+ { FC_PORTSTATE_LINKDOWN, "Linkdown" },
+ { FC_PORTSTATE_ERROR, "Error" },
+ { FC_PORTSTATE_LOOPBACK, "Loopback" },
+ { FC_PORTSTATE_DELETED, "Deleted" },
+};
+fc_enum_name_search(port_state, fc_port_state, fc_port_state_names)
+#define FC_PORTSTATE_MAX_NAMELEN 20
+
+
+/* Convert fc_vport_state values to ascii string name */
+static struct {
+ enum fc_vport_state value;
+ char *name;
+} fc_vport_state_names[] = {
+ { FC_VPORT_UNKNOWN, "Unknown" },
+ { FC_VPORT_ACTIVE, "Active" },
+ { FC_VPORT_DISABLED, "Disabled" },
+ { FC_VPORT_LINKDOWN, "Linkdown" },
+ { FC_VPORT_INITIALIZING, "Initializing" },
+ { FC_VPORT_NO_FABRIC_SUPP, "No Fabric Support" },
+ { FC_VPORT_NO_FABRIC_RSCS, "No Fabric Resources" },
+ { FC_VPORT_FABRIC_LOGOUT, "Fabric Logout" },
+ { FC_VPORT_FABRIC_REJ_WWN, "Fabric Rejected WWN" },
+ { FC_VPORT_FAILED, "VPort Failed" },
+};
+fc_enum_name_search(vport_state, fc_vport_state, fc_vport_state_names)
+#define FC_VPORTSTATE_MAX_NAMELEN 24
+
+/* Reuse fc_vport_state enum function for vport_last_state */
+#define get_fc_vport_last_state_name get_fc_vport_state_name
+
+
+/* Convert fc_tgtid_binding_type values to ascii string name */
+static const struct {
+ enum fc_tgtid_binding_type value;
+ char *name;
+ int matchlen;
+} fc_tgtid_binding_type_names[] = {
+ { FC_TGTID_BIND_NONE, "none", 4 },
+ { FC_TGTID_BIND_BY_WWPN, "wwpn (World Wide Port Name)", 4 },
+ { FC_TGTID_BIND_BY_WWNN, "wwnn (World Wide Node Name)", 4 },
+ { FC_TGTID_BIND_BY_ID, "port_id (FC Address)", 7 },
+};
+fc_enum_name_search(tgtid_bind_type, fc_tgtid_binding_type,
+ fc_tgtid_binding_type_names)
+fc_enum_name_match(tgtid_bind_type, fc_tgtid_binding_type,
+ fc_tgtid_binding_type_names)
+#define FC_BINDTYPE_MAX_NAMELEN 30
+
+
+#define fc_bitfield_name_search(title, table) \
+static ssize_t \
+get_fc_##title##_names(u32 table_key, char *buf) \
+{ \
+ char *prefix = ""; \
+ ssize_t len = 0; \
+ int i; \
+ \
+ for (i = 0; i < ARRAY_SIZE(table); i++) { \
+ if (table[i].value & table_key) { \
+ len += sprintf(buf + len, "%s%s", \
+ prefix, table[i].name); \
+ prefix = ", "; \
+ } \
+ } \
+ len += sprintf(buf + len, "\n"); \
+ return len; \
+}
+
+
+/* Convert FC_COS bit values to ascii string name */
+static const struct {
+ u32 value;
+ char *name;
+} fc_cos_names[] = {
+ { FC_COS_CLASS1, "Class 1" },
+ { FC_COS_CLASS2, "Class 2" },
+ { FC_COS_CLASS3, "Class 3" },
+ { FC_COS_CLASS4, "Class 4" },
+ { FC_COS_CLASS6, "Class 6" },
+};
+fc_bitfield_name_search(cos, fc_cos_names)
+
+
+/* Convert FC_PORTSPEED bit values to ascii string name */
+static const struct {
+ u32 value;
+ char *name;
+} fc_port_speed_names[] = {
+ { FC_PORTSPEED_1GBIT, "1 Gbit" },
+ { FC_PORTSPEED_2GBIT, "2 Gbit" },
+ { FC_PORTSPEED_4GBIT, "4 Gbit" },
+ { FC_PORTSPEED_10GBIT, "10 Gbit" },
+ { FC_PORTSPEED_8GBIT, "8 Gbit" },
+ { FC_PORTSPEED_16GBIT, "16 Gbit" },
+ { FC_PORTSPEED_32GBIT, "32 Gbit" },
+ { FC_PORTSPEED_20GBIT, "20 Gbit" },
+ { FC_PORTSPEED_40GBIT, "40 Gbit" },
+ { FC_PORTSPEED_50GBIT, "50 Gbit" },
+ { FC_PORTSPEED_100GBIT, "100 Gbit" },
+ { FC_PORTSPEED_25GBIT, "25 Gbit" },
+ { FC_PORTSPEED_NOT_NEGOTIATED, "Not Negotiated" },
+};
+fc_bitfield_name_search(port_speed, fc_port_speed_names)
+
+
+static int
+show_fc_fc4s (char *buf, u8 *fc4_list)
+{
+ int i, len=0;
+
+ for (i = 0; i < FC_FC4_LIST_SIZE; i++, fc4_list++)
+ len += sprintf(buf + len , "0x%02x ", *fc4_list);
+ len += sprintf(buf + len, "\n");
+ return len;
+}
+
+
+/* Convert FC_PORT_ROLE bit values to ascii string name */
+static const struct {
+ u32 value;
+ char *name;
+} fc_port_role_names[] = {
+ { FC_PORT_ROLE_FCP_TARGET, "FCP Target" },
+ { FC_PORT_ROLE_FCP_INITIATOR, "FCP Initiator" },
+ { FC_PORT_ROLE_IP_PORT, "IP Port" },
+};
+fc_bitfield_name_search(port_roles, fc_port_role_names)
+
+/*
+ * Define roles that are specific to port_id. Values are relative to ROLE_MASK.
+ */
+#define FC_WELLKNOWN_PORTID_MASK 0xfffff0
+#define FC_WELLKNOWN_ROLE_MASK 0x00000f
+#define FC_FPORT_PORTID 0x00000e
+#define FC_FABCTLR_PORTID 0x00000d
+#define FC_DIRSRVR_PORTID 0x00000c
+#define FC_TIMESRVR_PORTID 0x00000b
+#define FC_MGMTSRVR_PORTID 0x00000a
+
+
+static void fc_timeout_deleted_rport(struct work_struct *work);
+static void fc_timeout_fail_rport_io(struct work_struct *work);
+static void fc_scsi_scan_rport(struct work_struct *work);
+
+/*
+ * Attribute counts pre object type...
+ * Increase these values if you add attributes
+ */
+#define FC_STARGET_NUM_ATTRS 3
+#define FC_RPORT_NUM_ATTRS 10
+#define FC_VPORT_NUM_ATTRS 9
+#define FC_HOST_NUM_ATTRS 29
+
+struct fc_internal {
+ struct scsi_transport_template t;
+ struct fc_function_template *f;
+
+ /*
+ * For attributes : each object has :
+ * An array of the actual attributes structures
+ * An array of null-terminated pointers to the attribute
+ * structures - used for mid-layer interaction.
+ *
+ * The attribute containers for the starget and host are are
+ * part of the midlayer. As the remote port is specific to the
+ * fc transport, we must provide the attribute container.
+ */
+ struct device_attribute private_starget_attrs[
+ FC_STARGET_NUM_ATTRS];
+ struct device_attribute *starget_attrs[FC_STARGET_NUM_ATTRS + 1];
+
+ struct device_attribute private_host_attrs[FC_HOST_NUM_ATTRS];
+ struct device_attribute *host_attrs[FC_HOST_NUM_ATTRS + 1];
+
+ struct transport_container rport_attr_cont;
+ struct device_attribute private_rport_attrs[FC_RPORT_NUM_ATTRS];
+ struct device_attribute *rport_attrs[FC_RPORT_NUM_ATTRS + 1];
+
+ struct transport_container vport_attr_cont;
+ struct device_attribute private_vport_attrs[FC_VPORT_NUM_ATTRS];
+ struct device_attribute *vport_attrs[FC_VPORT_NUM_ATTRS + 1];
+};
+
+#define to_fc_internal(tmpl) container_of(tmpl, struct fc_internal, t)
+
+static int fc_target_setup(struct transport_container *tc, struct device *dev,
+ struct device *cdev)
+{
+ struct scsi_target *starget = to_scsi_target(dev);
+ struct fc_rport *rport = starget_to_rport(starget);
+
+ /*
+ * if parent is remote port, use values from remote port.
+ * Otherwise, this host uses the fc_transport, but not the
+ * remote port interface. As such, initialize to known non-values.
+ */
+ if (rport) {
+ fc_starget_node_name(starget) = rport->node_name;
+ fc_starget_port_name(starget) = rport->port_name;
+ fc_starget_port_id(starget) = rport->port_id;
+ } else {
+ fc_starget_node_name(starget) = -1;
+ fc_starget_port_name(starget) = -1;
+ fc_starget_port_id(starget) = -1;
+ }
+
+ return 0;
+}
+
+static DECLARE_TRANSPORT_CLASS(fc_transport_class,
+ "fc_transport",
+ fc_target_setup,
+ NULL,
+ NULL);
+
+static int fc_host_setup(struct transport_container *tc, struct device *dev,
+ struct device *cdev)
+{
+ struct Scsi_Host *shost = dev_to_shost(dev);
+ struct fc_host_attrs *fc_host = shost_to_fc_host(shost);
+
+ /*
+ * Set default values easily detected by the midlayer as
+ * failure cases. The scsi lldd is responsible for initializing
+ * all transport attributes to valid values per host.
+ */
+ fc_host->node_name = -1;
+ fc_host->port_name = -1;
+ fc_host->permanent_port_name = -1;
+ fc_host->supported_classes = FC_COS_UNSPECIFIED;
+ memset(fc_host->supported_fc4s, 0,
+ sizeof(fc_host->supported_fc4s));
+ fc_host->supported_speeds = FC_PORTSPEED_UNKNOWN;
+ fc_host->maxframe_size = -1;
+ fc_host->max_npiv_vports = 0;
+ memset(fc_host->serial_number, 0,
+ sizeof(fc_host->serial_number));
+ memset(fc_host->manufacturer, 0,
+ sizeof(fc_host->manufacturer));
+ memset(fc_host->model, 0,
+ sizeof(fc_host->model));
+ memset(fc_host->model_description, 0,
+ sizeof(fc_host->model_description));
+ memset(fc_host->hardware_version, 0,
+ sizeof(fc_host->hardware_version));
+ memset(fc_host->driver_version, 0,
+ sizeof(fc_host->driver_version));
+ memset(fc_host->firmware_version, 0,
+ sizeof(fc_host->firmware_version));
+ memset(fc_host->optionrom_version, 0,
+ sizeof(fc_host->optionrom_version));
+
+ fc_host->port_id = -1;
+ fc_host->port_type = FC_PORTTYPE_UNKNOWN;
+ fc_host->port_state = FC_PORTSTATE_UNKNOWN;
+ memset(fc_host->active_fc4s, 0,
+ sizeof(fc_host->active_fc4s));
+ fc_host->speed = FC_PORTSPEED_UNKNOWN;
+ fc_host->fabric_name = -1;
+ memset(fc_host->symbolic_name, 0, sizeof(fc_host->symbolic_name));
+ memset(fc_host->system_hostname, 0, sizeof(fc_host->system_hostname));
+
+ fc_host->tgtid_bind_type = FC_TGTID_BIND_BY_WWPN;
+
+ INIT_LIST_HEAD(&fc_host->rports);
+ INIT_LIST_HEAD(&fc_host->rport_bindings);
+ INIT_LIST_HEAD(&fc_host->vports);
+ fc_host->next_rport_number = 0;
+ fc_host->next_target_id = 0;
+ fc_host->next_vport_number = 0;
+ fc_host->npiv_vports_inuse = 0;
+
+ snprintf(fc_host->work_q_name, sizeof(fc_host->work_q_name),
+ "fc_wq_%d", shost->host_no);
+ fc_host->work_q = alloc_workqueue("%s", 0, 0, fc_host->work_q_name);
+ if (!fc_host->work_q)
+ return -ENOMEM;
+
+ fc_host->dev_loss_tmo = fc_dev_loss_tmo;
+ snprintf(fc_host->devloss_work_q_name,
+ sizeof(fc_host->devloss_work_q_name),
+ "fc_dl_%d", shost->host_no);
+ fc_host->devloss_work_q = alloc_workqueue("%s", 0, 0,
+ fc_host->devloss_work_q_name);
+ if (!fc_host->devloss_work_q) {
+ destroy_workqueue(fc_host->work_q);
+ fc_host->work_q = NULL;
+ return -ENOMEM;
+ }
+
+ fc_bsg_hostadd(shost, fc_host);
+ /* ignore any bsg add error - we just can't do sgio */
+
+ return 0;
+}
+
+static int fc_host_remove(struct transport_container *tc, struct device *dev,
+ struct device *cdev)
+{
+ struct Scsi_Host *shost = dev_to_shost(dev);
+ struct fc_host_attrs *fc_host = shost_to_fc_host(shost);
+
+ fc_bsg_remove(fc_host->rqst_q);
+ return 0;
+}
+
+static DECLARE_TRANSPORT_CLASS(fc_host_class,
+ "fc_host",
+ fc_host_setup,
+ fc_host_remove,
+ NULL);
+
+/*
+ * Setup and Remove actions for remote ports are handled
+ * in the service functions below.
+ */
+static DECLARE_TRANSPORT_CLASS(fc_rport_class,
+ "fc_remote_ports",
+ NULL,
+ NULL,
+ NULL);
+
+/*
+ * Setup and Remove actions for virtual ports are handled
+ * in the service functions below.
+ */
+static DECLARE_TRANSPORT_CLASS(fc_vport_class,
+ "fc_vports",
+ NULL,
+ NULL,
+ NULL);
+
+/*
+ * Netlink Infrastructure
+ */
+
+static atomic_t fc_event_seq;
+
+/**
+ * fc_get_event_number - Obtain the next sequential FC event number
+ *
+ * Notes:
+ * We could have inlined this, but it would have required fc_event_seq to
+ * be exposed. For now, live with the subroutine call.
+ * Atomic used to avoid lock/unlock...
+ */
+u32
+fc_get_event_number(void)
+{
+ return atomic_add_return(1, &fc_event_seq);
+}
+EXPORT_SYMBOL(fc_get_event_number);
+
+
+/**
+ * fc_host_post_event - called to post an even on an fc_host.
+ * @shost: host the event occurred on
+ * @event_number: fc event number obtained from get_fc_event_number()
+ * @event_code: fc_host event being posted
+ * @event_data: 32bits of data for the event being posted
+ *
+ * Notes:
+ * This routine assumes no locks are held on entry.
+ */
+void
+fc_host_post_event(struct Scsi_Host *shost, u32 event_number,
+ enum fc_host_event_code event_code, u32 event_data)
+{
+ struct sk_buff *skb;
+ struct nlmsghdr *nlh;
+ struct fc_nl_event *event;
+ const char *name;
+ u32 len;
+ int err;
+
+ if (!scsi_nl_sock) {
+ err = -ENOENT;
+ goto send_fail;
+ }
+
+ len = FC_NL_MSGALIGN(sizeof(*event));
+
+ skb = nlmsg_new(len, GFP_KERNEL);
+ if (!skb) {
+ err = -ENOBUFS;
+ goto send_fail;
+ }
+
+ nlh = nlmsg_put(skb, 0, 0, SCSI_TRANSPORT_MSG, len, 0);
+ if (!nlh) {
+ err = -ENOBUFS;
+ goto send_fail_skb;
+ }
+ event = nlmsg_data(nlh);
+
+ INIT_SCSI_NL_HDR(&event->snlh, SCSI_NL_TRANSPORT_FC,
+ FC_NL_ASYNC_EVENT, len);
+ event->seconds = get_seconds();
+ event->vendor_id = 0;
+ event->host_no = shost->host_no;
+ event->event_datalen = sizeof(u32); /* bytes */
+ event->event_num = event_number;
+ event->event_code = event_code;
+ event->event_data = event_data;
+
+ nlmsg_multicast(scsi_nl_sock, skb, 0, SCSI_NL_GRP_FC_EVENTS,
+ GFP_KERNEL);
+ return;
+
+send_fail_skb:
+ kfree_skb(skb);
+send_fail:
+ name = get_fc_host_event_code_name(event_code);
+ printk(KERN_WARNING
+ "%s: Dropped Event : host %d %s data 0x%08x - err %d\n",
+ __func__, shost->host_no,
+ (name) ? name : "<unknown>", event_data, err);
+ return;
+}
+EXPORT_SYMBOL(fc_host_post_event);
+
+
+/**
+ * fc_host_post_vendor_event - called to post a vendor unique event on an fc_host
+ * @shost: host the event occurred on
+ * @event_number: fc event number obtained from get_fc_event_number()
+ * @data_len: amount, in bytes, of vendor unique data
+ * @data_buf: pointer to vendor unique data
+ * @vendor_id: Vendor id
+ *
+ * Notes:
+ * This routine assumes no locks are held on entry.
+ */
+void
+fc_host_post_vendor_event(struct Scsi_Host *shost, u32 event_number,
+ u32 data_len, char * data_buf, u64 vendor_id)
+{
+ struct sk_buff *skb;
+ struct nlmsghdr *nlh;
+ struct fc_nl_event *event;
+ u32 len;
+ int err;
+
+ if (!scsi_nl_sock) {
+ err = -ENOENT;
+ goto send_vendor_fail;
+ }
+
+ len = FC_NL_MSGALIGN(sizeof(*event) + data_len);
+
+ skb = nlmsg_new(len, GFP_KERNEL);
+ if (!skb) {
+ err = -ENOBUFS;
+ goto send_vendor_fail;
+ }
+
+ nlh = nlmsg_put(skb, 0, 0, SCSI_TRANSPORT_MSG, len, 0);
+ if (!nlh) {
+ err = -ENOBUFS;
+ goto send_vendor_fail_skb;
+ }
+ event = nlmsg_data(nlh);
+
+ INIT_SCSI_NL_HDR(&event->snlh, SCSI_NL_TRANSPORT_FC,
+ FC_NL_ASYNC_EVENT, len);
+ event->seconds = get_seconds();
+ event->vendor_id = vendor_id;
+ event->host_no = shost->host_no;
+ event->event_datalen = data_len; /* bytes */
+ event->event_num = event_number;
+ event->event_code = FCH_EVT_VENDOR_UNIQUE;
+ memcpy(&event->event_data, data_buf, data_len);
+
+ nlmsg_multicast(scsi_nl_sock, skb, 0, SCSI_NL_GRP_FC_EVENTS,
+ GFP_KERNEL);
+ return;
+
+send_vendor_fail_skb:
+ kfree_skb(skb);
+send_vendor_fail:
+ printk(KERN_WARNING
+ "%s: Dropped Event : host %d vendor_unique - err %d\n",
+ __func__, shost->host_no, err);
+ return;
+}
+EXPORT_SYMBOL(fc_host_post_vendor_event);
+
+
+
+static __init int fc_transport_init(void)
+{
+ int error;
+
+ atomic_set(&fc_event_seq, 0);
+
+ error = transport_class_register(&fc_host_class);
+ if (error)
+ return error;
+ error = transport_class_register(&fc_vport_class);
+ if (error)
+ goto unreg_host_class;
+ error = transport_class_register(&fc_rport_class);
+ if (error)
+ goto unreg_vport_class;
+ error = transport_class_register(&fc_transport_class);
+ if (error)
+ goto unreg_rport_class;
+ return 0;
+
+unreg_rport_class:
+ transport_class_unregister(&fc_rport_class);
+unreg_vport_class:
+ transport_class_unregister(&fc_vport_class);
+unreg_host_class:
+ transport_class_unregister(&fc_host_class);
+ return error;
+}
+
+static void __exit fc_transport_exit(void)
+{
+ transport_class_unregister(&fc_transport_class);
+ transport_class_unregister(&fc_rport_class);
+ transport_class_unregister(&fc_host_class);
+ transport_class_unregister(&fc_vport_class);
+}
+
+/*
+ * FC Remote Port Attribute Management
+ */
+
+#define fc_rport_show_function(field, format_string, sz, cast) \
+static ssize_t \
+show_fc_rport_##field (struct device *dev, \
+ struct device_attribute *attr, char *buf) \
+{ \
+ struct fc_rport *rport = transport_class_to_rport(dev); \
+ struct Scsi_Host *shost = rport_to_shost(rport); \
+ struct fc_internal *i = to_fc_internal(shost->transportt); \
+ if ((i->f->get_rport_##field) && \
+ !((rport->port_state == FC_PORTSTATE_BLOCKED) || \
+ (rport->port_state == FC_PORTSTATE_DELETED) || \
+ (rport->port_state == FC_PORTSTATE_NOTPRESENT))) \
+ i->f->get_rport_##field(rport); \
+ return snprintf(buf, sz, format_string, cast rport->field); \
+}
+
+#define fc_rport_store_function(field) \
+static ssize_t \
+store_fc_rport_##field(struct device *dev, \
+ struct device_attribute *attr, \
+ const char *buf, size_t count) \
+{ \
+ int val; \
+ struct fc_rport *rport = transport_class_to_rport(dev); \
+ struct Scsi_Host *shost = rport_to_shost(rport); \
+ struct fc_internal *i = to_fc_internal(shost->transportt); \
+ char *cp; \
+ if ((rport->port_state == FC_PORTSTATE_BLOCKED) || \
+ (rport->port_state == FC_PORTSTATE_DELETED) || \
+ (rport->port_state == FC_PORTSTATE_NOTPRESENT)) \
+ return -EBUSY; \
+ val = simple_strtoul(buf, &cp, 0); \
+ if (*cp && (*cp != '\n')) \
+ return -EINVAL; \
+ i->f->set_rport_##field(rport, val); \
+ return count; \
+}
+
+#define fc_rport_rd_attr(field, format_string, sz) \
+ fc_rport_show_function(field, format_string, sz, ) \
+static FC_DEVICE_ATTR(rport, field, S_IRUGO, \
+ show_fc_rport_##field, NULL)
+
+#define fc_rport_rd_attr_cast(field, format_string, sz, cast) \
+ fc_rport_show_function(field, format_string, sz, (cast)) \
+static FC_DEVICE_ATTR(rport, field, S_IRUGO, \
+ show_fc_rport_##field, NULL)
+
+#define fc_rport_rw_attr(field, format_string, sz) \
+ fc_rport_show_function(field, format_string, sz, ) \
+ fc_rport_store_function(field) \
+static FC_DEVICE_ATTR(rport, field, S_IRUGO | S_IWUSR, \
+ show_fc_rport_##field, \
+ store_fc_rport_##field)
+
+
+#define fc_private_rport_show_function(field, format_string, sz, cast) \
+static ssize_t \
+show_fc_rport_##field (struct device *dev, \
+ struct device_attribute *attr, char *buf) \
+{ \
+ struct fc_rport *rport = transport_class_to_rport(dev); \
+ return snprintf(buf, sz, format_string, cast rport->field); \
+}
+
+#define fc_private_rport_rd_attr(field, format_string, sz) \
+ fc_private_rport_show_function(field, format_string, sz, ) \
+static FC_DEVICE_ATTR(rport, field, S_IRUGO, \
+ show_fc_rport_##field, NULL)
+
+#define fc_private_rport_rd_attr_cast(field, format_string, sz, cast) \
+ fc_private_rport_show_function(field, format_string, sz, (cast)) \
+static FC_DEVICE_ATTR(rport, field, S_IRUGO, \
+ show_fc_rport_##field, NULL)
+
+
+#define fc_private_rport_rd_enum_attr(title, maxlen) \
+static ssize_t \
+show_fc_rport_##title (struct device *dev, \
+ struct device_attribute *attr, char *buf) \
+{ \
+ struct fc_rport *rport = transport_class_to_rport(dev); \
+ const char *name; \
+ name = get_fc_##title##_name(rport->title); \
+ if (!name) \
+ return -EINVAL; \
+ return snprintf(buf, maxlen, "%s\n", name); \
+} \
+static FC_DEVICE_ATTR(rport, title, S_IRUGO, \
+ show_fc_rport_##title, NULL)
+
+
+#define SETUP_RPORT_ATTRIBUTE_RD(field) \
+ i->private_rport_attrs[count] = device_attr_rport_##field; \
+ i->private_rport_attrs[count].attr.mode = S_IRUGO; \
+ i->private_rport_attrs[count].store = NULL; \
+ i->rport_attrs[count] = &i->private_rport_attrs[count]; \
+ if (i->f->show_rport_##field) \
+ count++
+
+#define SETUP_PRIVATE_RPORT_ATTRIBUTE_RD(field) \
+ i->private_rport_attrs[count] = device_attr_rport_##field; \
+ i->private_rport_attrs[count].attr.mode = S_IRUGO; \
+ i->private_rport_attrs[count].store = NULL; \
+ i->rport_attrs[count] = &i->private_rport_attrs[count]; \
+ count++
+
+#define SETUP_RPORT_ATTRIBUTE_RW(field) \
+ i->private_rport_attrs[count] = device_attr_rport_##field; \
+ if (!i->f->set_rport_##field) { \
+ i->private_rport_attrs[count].attr.mode = S_IRUGO; \
+ i->private_rport_attrs[count].store = NULL; \
+ } \
+ i->rport_attrs[count] = &i->private_rport_attrs[count]; \
+ if (i->f->show_rport_##field) \
+ count++
+
+#define SETUP_PRIVATE_RPORT_ATTRIBUTE_RW(field) \
+{ \
+ i->private_rport_attrs[count] = device_attr_rport_##field; \
+ i->rport_attrs[count] = &i->private_rport_attrs[count]; \
+ count++; \
+}
+
+
+/* The FC Transport Remote Port Attributes: */
+
+/* Fixed Remote Port Attributes */
+
+fc_private_rport_rd_attr(maxframe_size, "%u bytes\n", 20);
+
+static ssize_t
+show_fc_rport_supported_classes (struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct fc_rport *rport = transport_class_to_rport(dev);
+ if (rport->supported_classes == FC_COS_UNSPECIFIED)
+ return snprintf(buf, 20, "unspecified\n");
+ return get_fc_cos_names(rport->supported_classes, buf);
+}
+static FC_DEVICE_ATTR(rport, supported_classes, S_IRUGO,
+ show_fc_rport_supported_classes, NULL);
+
+/* Dynamic Remote Port Attributes */
+
+/*
+ * dev_loss_tmo attribute
+ */
+static int fc_str_to_dev_loss(const char *buf, unsigned long *val)
+{
+ char *cp;
+
+ *val = simple_strtoul(buf, &cp, 0);
+ if ((*cp && (*cp != '\n')) || (*val < 0))
+ return -EINVAL;
+ /*
+ * Check for overflow; dev_loss_tmo is u32
+ */
+ if (*val > UINT_MAX)
+ return -EINVAL;
+
+ return 0;
+}
+
+static int fc_rport_set_dev_loss_tmo(struct fc_rport *rport,
+ unsigned long val)
+{
+ struct Scsi_Host *shost = rport_to_shost(rport);
+ struct fc_internal *i = to_fc_internal(shost->transportt);
+
+ if ((rport->port_state == FC_PORTSTATE_BLOCKED) ||
+ (rport->port_state == FC_PORTSTATE_DELETED) ||
+ (rport->port_state == FC_PORTSTATE_NOTPRESENT))
+ return -EBUSY;
+ /*
+ * Check for overflow; dev_loss_tmo is u32
+ */
+ if (val > UINT_MAX)
+ return -EINVAL;
+
+ /*
+ * If fast_io_fail is off we have to cap
+ * dev_loss_tmo at SCSI_DEVICE_BLOCK_MAX_TIMEOUT
+ */
+ if (rport->fast_io_fail_tmo == -1 &&
+ val > SCSI_DEVICE_BLOCK_MAX_TIMEOUT)
+ return -EINVAL;
+
+ i->f->set_rport_dev_loss_tmo(rport, val);
+ return 0;
+}
+
+fc_rport_show_function(dev_loss_tmo, "%d\n", 20, )
+static ssize_t
+store_fc_rport_dev_loss_tmo(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct fc_rport *rport = transport_class_to_rport(dev);
+ unsigned long val;
+ int rc;
+
+ rc = fc_str_to_dev_loss(buf, &val);
+ if (rc)
+ return rc;
+
+ rc = fc_rport_set_dev_loss_tmo(rport, val);
+ if (rc)
+ return rc;
+ return count;
+}
+static FC_DEVICE_ATTR(rport, dev_loss_tmo, S_IRUGO | S_IWUSR,
+ show_fc_rport_dev_loss_tmo, store_fc_rport_dev_loss_tmo);
+
+
+/* Private Remote Port Attributes */
+
+fc_private_rport_rd_attr_cast(node_name, "0x%llx\n", 20, unsigned long long);
+fc_private_rport_rd_attr_cast(port_name, "0x%llx\n", 20, unsigned long long);
+fc_private_rport_rd_attr(port_id, "0x%06x\n", 20);
+
+static ssize_t
+show_fc_rport_roles (struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct fc_rport *rport = transport_class_to_rport(dev);
+
+ /* identify any roles that are port_id specific */
+ if ((rport->port_id != -1) &&
+ (rport->port_id & FC_WELLKNOWN_PORTID_MASK) ==
+ FC_WELLKNOWN_PORTID_MASK) {
+ switch (rport->port_id & FC_WELLKNOWN_ROLE_MASK) {
+ case FC_FPORT_PORTID:
+ return snprintf(buf, 30, "Fabric Port\n");
+ case FC_FABCTLR_PORTID:
+ return snprintf(buf, 30, "Fabric Controller\n");
+ case FC_DIRSRVR_PORTID:
+ return snprintf(buf, 30, "Directory Server\n");
+ case FC_TIMESRVR_PORTID:
+ return snprintf(buf, 30, "Time Server\n");
+ case FC_MGMTSRVR_PORTID:
+ return snprintf(buf, 30, "Management Server\n");
+ default:
+ return snprintf(buf, 30, "Unknown Fabric Entity\n");
+ }
+ } else {
+ if (rport->roles == FC_PORT_ROLE_UNKNOWN)
+ return snprintf(buf, 20, "unknown\n");
+ return get_fc_port_roles_names(rport->roles, buf);
+ }
+}
+static FC_DEVICE_ATTR(rport, roles, S_IRUGO,
+ show_fc_rport_roles, NULL);
+
+fc_private_rport_rd_enum_attr(port_state, FC_PORTSTATE_MAX_NAMELEN);
+fc_private_rport_rd_attr(scsi_target_id, "%d\n", 20);
+
+/*
+ * fast_io_fail_tmo attribute
+ */
+static ssize_t
+show_fc_rport_fast_io_fail_tmo (struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct fc_rport *rport = transport_class_to_rport(dev);
+
+ if (rport->fast_io_fail_tmo == -1)
+ return snprintf(buf, 5, "off\n");
+ return snprintf(buf, 20, "%d\n", rport->fast_io_fail_tmo);
+}
+
+static ssize_t
+store_fc_rport_fast_io_fail_tmo(struct device *dev,
+ struct device_attribute *attr, const char *buf,
+ size_t count)
+{
+ int val;
+ char *cp;
+ struct fc_rport *rport = transport_class_to_rport(dev);
+
+ if ((rport->port_state == FC_PORTSTATE_BLOCKED) ||
+ (rport->port_state == FC_PORTSTATE_DELETED) ||
+ (rport->port_state == FC_PORTSTATE_NOTPRESENT))
+ return -EBUSY;
+ if (strncmp(buf, "off", 3) == 0)
+ rport->fast_io_fail_tmo = -1;
+ else {
+ val = simple_strtoul(buf, &cp, 0);
+ if ((*cp && (*cp != '\n')) || (val < 0))
+ return -EINVAL;
+ /*
+ * Cap fast_io_fail by dev_loss_tmo or
+ * SCSI_DEVICE_BLOCK_MAX_TIMEOUT.
+ */
+ if ((val >= rport->dev_loss_tmo) ||
+ (val > SCSI_DEVICE_BLOCK_MAX_TIMEOUT))
+ return -EINVAL;
+
+ rport->fast_io_fail_tmo = val;
+ }
+ return count;
+}
+static FC_DEVICE_ATTR(rport, fast_io_fail_tmo, S_IRUGO | S_IWUSR,
+ show_fc_rport_fast_io_fail_tmo, store_fc_rport_fast_io_fail_tmo);
+
+
+/*
+ * FC SCSI Target Attribute Management
+ */
+
+/*
+ * Note: in the target show function we recognize when the remote
+ * port is in the hierarchy and do not allow the driver to get
+ * involved in sysfs functions. The driver only gets involved if
+ * it's the "old" style that doesn't use rports.
+ */
+#define fc_starget_show_function(field, format_string, sz, cast) \
+static ssize_t \
+show_fc_starget_##field (struct device *dev, \
+ struct device_attribute *attr, char *buf) \
+{ \
+ struct scsi_target *starget = transport_class_to_starget(dev); \
+ struct Scsi_Host *shost = dev_to_shost(starget->dev.parent); \
+ struct fc_internal *i = to_fc_internal(shost->transportt); \
+ struct fc_rport *rport = starget_to_rport(starget); \
+ if (rport) \
+ fc_starget_##field(starget) = rport->field; \
+ else if (i->f->get_starget_##field) \
+ i->f->get_starget_##field(starget); \
+ return snprintf(buf, sz, format_string, \
+ cast fc_starget_##field(starget)); \
+}
+
+#define fc_starget_rd_attr(field, format_string, sz) \
+ fc_starget_show_function(field, format_string, sz, ) \
+static FC_DEVICE_ATTR(starget, field, S_IRUGO, \
+ show_fc_starget_##field, NULL)
+
+#define fc_starget_rd_attr_cast(field, format_string, sz, cast) \
+ fc_starget_show_function(field, format_string, sz, (cast)) \
+static FC_DEVICE_ATTR(starget, field, S_IRUGO, \
+ show_fc_starget_##field, NULL)
+
+#define SETUP_STARGET_ATTRIBUTE_RD(field) \
+ i->private_starget_attrs[count] = device_attr_starget_##field; \
+ i->private_starget_attrs[count].attr.mode = S_IRUGO; \
+ i->private_starget_attrs[count].store = NULL; \
+ i->starget_attrs[count] = &i->private_starget_attrs[count]; \
+ if (i->f->show_starget_##field) \
+ count++
+
+#define SETUP_STARGET_ATTRIBUTE_RW(field) \
+ i->private_starget_attrs[count] = device_attr_starget_##field; \
+ if (!i->f->set_starget_##field) { \
+ i->private_starget_attrs[count].attr.mode = S_IRUGO; \
+ i->private_starget_attrs[count].store = NULL; \
+ } \
+ i->starget_attrs[count] = &i->private_starget_attrs[count]; \
+ if (i->f->show_starget_##field) \
+ count++
+
+/* The FC Transport SCSI Target Attributes: */
+fc_starget_rd_attr_cast(node_name, "0x%llx\n", 20, unsigned long long);
+fc_starget_rd_attr_cast(port_name, "0x%llx\n", 20, unsigned long long);
+fc_starget_rd_attr(port_id, "0x%06x\n", 20);
+
+
+/*
+ * FC Virtual Port Attribute Management
+ */
+
+#define fc_vport_show_function(field, format_string, sz, cast) \
+static ssize_t \
+show_fc_vport_##field (struct device *dev, \
+ struct device_attribute *attr, char *buf) \
+{ \
+ struct fc_vport *vport = transport_class_to_vport(dev); \
+ struct Scsi_Host *shost = vport_to_shost(vport); \
+ struct fc_internal *i = to_fc_internal(shost->transportt); \
+ if ((i->f->get_vport_##field) && \
+ !(vport->flags & (FC_VPORT_DEL | FC_VPORT_CREATING))) \
+ i->f->get_vport_##field(vport); \
+ return snprintf(buf, sz, format_string, cast vport->field); \
+}
+
+#define fc_vport_store_function(field) \
+static ssize_t \
+store_fc_vport_##field(struct device *dev, \
+ struct device_attribute *attr, \
+ const char *buf, size_t count) \
+{ \
+ int val; \
+ struct fc_vport *vport = transport_class_to_vport(dev); \
+ struct Scsi_Host *shost = vport_to_shost(vport); \
+ struct fc_internal *i = to_fc_internal(shost->transportt); \
+ char *cp; \
+ if (vport->flags & (FC_VPORT_DEL | FC_VPORT_CREATING)) \
+ return -EBUSY; \
+ val = simple_strtoul(buf, &cp, 0); \
+ if (*cp && (*cp != '\n')) \
+ return -EINVAL; \
+ i->f->set_vport_##field(vport, val); \
+ return count; \
+}
+
+#define fc_vport_store_str_function(field, slen) \
+static ssize_t \
+store_fc_vport_##field(struct device *dev, \
+ struct device_attribute *attr, \
+ const char *buf, size_t count) \
+{ \
+ struct fc_vport *vport = transport_class_to_vport(dev); \
+ struct Scsi_Host *shost = vport_to_shost(vport); \
+ struct fc_internal *i = to_fc_internal(shost->transportt); \
+ unsigned int cnt=count; \
+ \
+ /* count may include a LF at end of string */ \
+ if (buf[cnt-1] == '\n') \
+ cnt--; \
+ if (cnt > ((slen) - 1)) \
+ return -EINVAL; \
+ memcpy(vport->field, buf, cnt); \
+ i->f->set_vport_##field(vport); \
+ return count; \
+}
+
+#define fc_vport_rd_attr(field, format_string, sz) \
+ fc_vport_show_function(field, format_string, sz, ) \
+static FC_DEVICE_ATTR(vport, field, S_IRUGO, \
+ show_fc_vport_##field, NULL)
+
+#define fc_vport_rd_attr_cast(field, format_string, sz, cast) \
+ fc_vport_show_function(field, format_string, sz, (cast)) \
+static FC_DEVICE_ATTR(vport, field, S_IRUGO, \
+ show_fc_vport_##field, NULL)
+
+#define fc_vport_rw_attr(field, format_string, sz) \
+ fc_vport_show_function(field, format_string, sz, ) \
+ fc_vport_store_function(field) \
+static FC_DEVICE_ATTR(vport, field, S_IRUGO | S_IWUSR, \
+ show_fc_vport_##field, \
+ store_fc_vport_##field)
+
+#define fc_private_vport_show_function(field, format_string, sz, cast) \
+static ssize_t \
+show_fc_vport_##field (struct device *dev, \
+ struct device_attribute *attr, char *buf) \
+{ \
+ struct fc_vport *vport = transport_class_to_vport(dev); \
+ return snprintf(buf, sz, format_string, cast vport->field); \
+}
+
+#define fc_private_vport_store_u32_function(field) \
+static ssize_t \
+store_fc_vport_##field(struct device *dev, \
+ struct device_attribute *attr, \
+ const char *buf, size_t count) \
+{ \
+ u32 val; \
+ struct fc_vport *vport = transport_class_to_vport(dev); \
+ char *cp; \
+ if (vport->flags & (FC_VPORT_DEL | FC_VPORT_CREATING)) \
+ return -EBUSY; \
+ val = simple_strtoul(buf, &cp, 0); \
+ if (*cp && (*cp != '\n')) \
+ return -EINVAL; \
+ vport->field = val; \
+ return count; \
+}
+
+
+#define fc_private_vport_rd_attr(field, format_string, sz) \
+ fc_private_vport_show_function(field, format_string, sz, ) \
+static FC_DEVICE_ATTR(vport, field, S_IRUGO, \
+ show_fc_vport_##field, NULL)
+
+#define fc_private_vport_rd_attr_cast(field, format_string, sz, cast) \
+ fc_private_vport_show_function(field, format_string, sz, (cast)) \
+static FC_DEVICE_ATTR(vport, field, S_IRUGO, \
+ show_fc_vport_##field, NULL)
+
+#define fc_private_vport_rw_u32_attr(field, format_string, sz) \
+ fc_private_vport_show_function(field, format_string, sz, ) \
+ fc_private_vport_store_u32_function(field) \
+static FC_DEVICE_ATTR(vport, field, S_IRUGO | S_IWUSR, \
+ show_fc_vport_##field, \
+ store_fc_vport_##field)
+
+
+#define fc_private_vport_rd_enum_attr(title, maxlen) \
+static ssize_t \
+show_fc_vport_##title (struct device *dev, \
+ struct device_attribute *attr, \
+ char *buf) \
+{ \
+ struct fc_vport *vport = transport_class_to_vport(dev); \
+ const char *name; \
+ name = get_fc_##title##_name(vport->title); \
+ if (!name) \
+ return -EINVAL; \
+ return snprintf(buf, maxlen, "%s\n", name); \
+} \
+static FC_DEVICE_ATTR(vport, title, S_IRUGO, \
+ show_fc_vport_##title, NULL)
+
+
+#define SETUP_VPORT_ATTRIBUTE_RD(field) \
+ i->private_vport_attrs[count] = device_attr_vport_##field; \
+ i->private_vport_attrs[count].attr.mode = S_IRUGO; \
+ i->private_vport_attrs[count].store = NULL; \
+ i->vport_attrs[count] = &i->private_vport_attrs[count]; \
+ if (i->f->get_##field) \
+ count++
+ /* NOTE: Above MACRO differs: checks function not show bit */
+
+#define SETUP_PRIVATE_VPORT_ATTRIBUTE_RD(field) \
+ i->private_vport_attrs[count] = device_attr_vport_##field; \
+ i->private_vport_attrs[count].attr.mode = S_IRUGO; \
+ i->private_vport_attrs[count].store = NULL; \
+ i->vport_attrs[count] = &i->private_vport_attrs[count]; \
+ count++
+
+#define SETUP_VPORT_ATTRIBUTE_WR(field) \
+ i->private_vport_attrs[count] = device_attr_vport_##field; \
+ i->vport_attrs[count] = &i->private_vport_attrs[count]; \
+ if (i->f->field) \
+ count++
+ /* NOTE: Above MACRO differs: checks function */
+
+#define SETUP_VPORT_ATTRIBUTE_RW(field) \
+ i->private_vport_attrs[count] = device_attr_vport_##field; \
+ if (!i->f->set_vport_##field) { \
+ i->private_vport_attrs[count].attr.mode = S_IRUGO; \
+ i->private_vport_attrs[count].store = NULL; \
+ } \
+ i->vport_attrs[count] = &i->private_vport_attrs[count]; \
+ count++
+ /* NOTE: Above MACRO differs: does not check show bit */
+
+#define SETUP_PRIVATE_VPORT_ATTRIBUTE_RW(field) \
+{ \
+ i->private_vport_attrs[count] = device_attr_vport_##field; \
+ i->vport_attrs[count] = &i->private_vport_attrs[count]; \
+ count++; \
+}
+
+
+/* The FC Transport Virtual Port Attributes: */
+
+/* Fixed Virtual Port Attributes */
+
+/* Dynamic Virtual Port Attributes */
+
+/* Private Virtual Port Attributes */
+
+fc_private_vport_rd_enum_attr(vport_state, FC_VPORTSTATE_MAX_NAMELEN);
+fc_private_vport_rd_enum_attr(vport_last_state, FC_VPORTSTATE_MAX_NAMELEN);
+fc_private_vport_rd_attr_cast(node_name, "0x%llx\n", 20, unsigned long long);
+fc_private_vport_rd_attr_cast(port_name, "0x%llx\n", 20, unsigned long long);
+
+static ssize_t
+show_fc_vport_roles (struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct fc_vport *vport = transport_class_to_vport(dev);
+
+ if (vport->roles == FC_PORT_ROLE_UNKNOWN)
+ return snprintf(buf, 20, "unknown\n");
+ return get_fc_port_roles_names(vport->roles, buf);
+}
+static FC_DEVICE_ATTR(vport, roles, S_IRUGO, show_fc_vport_roles, NULL);
+
+fc_private_vport_rd_enum_attr(vport_type, FC_PORTTYPE_MAX_NAMELEN);
+
+fc_private_vport_show_function(symbolic_name, "%s\n",
+ FC_VPORT_SYMBOLIC_NAMELEN + 1, )
+fc_vport_store_str_function(symbolic_name, FC_VPORT_SYMBOLIC_NAMELEN)
+static FC_DEVICE_ATTR(vport, symbolic_name, S_IRUGO | S_IWUSR,
+ show_fc_vport_symbolic_name, store_fc_vport_symbolic_name);
+
+static ssize_t
+store_fc_vport_delete(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct fc_vport *vport = transport_class_to_vport(dev);
+ struct Scsi_Host *shost = vport_to_shost(vport);
+ unsigned long flags;
+
+ spin_lock_irqsave(shost->host_lock, flags);
+ if (vport->flags & (FC_VPORT_DEL | FC_VPORT_CREATING)) {
+ spin_unlock_irqrestore(shost->host_lock, flags);
+ return -EBUSY;
+ }
+ vport->flags |= FC_VPORT_DELETING;
+ spin_unlock_irqrestore(shost->host_lock, flags);
+
+ fc_queue_work(shost, &vport->vport_delete_work);
+ return count;
+}
+static FC_DEVICE_ATTR(vport, vport_delete, S_IWUSR,
+ NULL, store_fc_vport_delete);
+
+
+/*
+ * Enable/Disable vport
+ * Write "1" to disable, write "0" to enable
+ */
+static ssize_t
+store_fc_vport_disable(struct device *dev, struct device_attribute *attr,
+ const char *buf,
+ size_t count)
+{
+ struct fc_vport *vport = transport_class_to_vport(dev);
+ struct Scsi_Host *shost = vport_to_shost(vport);
+ struct fc_internal *i = to_fc_internal(shost->transportt);
+ int stat;
+
+ if (vport->flags & (FC_VPORT_DEL | FC_VPORT_CREATING))
+ return -EBUSY;
+
+ if (*buf == '0') {
+ if (vport->vport_state != FC_VPORT_DISABLED)
+ return -EALREADY;
+ } else if (*buf == '1') {
+ if (vport->vport_state == FC_VPORT_DISABLED)
+ return -EALREADY;
+ } else
+ return -EINVAL;
+
+ stat = i->f->vport_disable(vport, ((*buf == '0') ? false : true));
+ return stat ? stat : count;
+}
+static FC_DEVICE_ATTR(vport, vport_disable, S_IWUSR,
+ NULL, store_fc_vport_disable);
+
+
+/*
+ * Host Attribute Management
+ */
+
+#define fc_host_show_function(field, format_string, sz, cast) \
+static ssize_t \
+show_fc_host_##field (struct device *dev, \
+ struct device_attribute *attr, char *buf) \
+{ \
+ struct Scsi_Host *shost = transport_class_to_shost(dev); \
+ struct fc_internal *i = to_fc_internal(shost->transportt); \
+ if (i->f->get_host_##field) \
+ i->f->get_host_##field(shost); \
+ return snprintf(buf, sz, format_string, cast fc_host_##field(shost)); \
+}
+
+#define fc_host_store_function(field) \
+static ssize_t \
+store_fc_host_##field(struct device *dev, \
+ struct device_attribute *attr, \
+ const char *buf, size_t count) \
+{ \
+ int val; \
+ struct Scsi_Host *shost = transport_class_to_shost(dev); \
+ struct fc_internal *i = to_fc_internal(shost->transportt); \
+ char *cp; \
+ \
+ val = simple_strtoul(buf, &cp, 0); \
+ if (*cp && (*cp != '\n')) \
+ return -EINVAL; \
+ i->f->set_host_##field(shost, val); \
+ return count; \
+}
+
+#define fc_host_store_str_function(field, slen) \
+static ssize_t \
+store_fc_host_##field(struct device *dev, \
+ struct device_attribute *attr, \
+ const char *buf, size_t count) \
+{ \
+ struct Scsi_Host *shost = transport_class_to_shost(dev); \
+ struct fc_internal *i = to_fc_internal(shost->transportt); \
+ unsigned int cnt=count; \
+ \
+ /* count may include a LF at end of string */ \
+ if (buf[cnt-1] == '\n') \
+ cnt--; \
+ if (cnt > ((slen) - 1)) \
+ return -EINVAL; \
+ memcpy(fc_host_##field(shost), buf, cnt); \
+ i->f->set_host_##field(shost); \
+ return count; \
+}
+
+#define fc_host_rd_attr(field, format_string, sz) \
+ fc_host_show_function(field, format_string, sz, ) \
+static FC_DEVICE_ATTR(host, field, S_IRUGO, \
+ show_fc_host_##field, NULL)
+
+#define fc_host_rd_attr_cast(field, format_string, sz, cast) \
+ fc_host_show_function(field, format_string, sz, (cast)) \
+static FC_DEVICE_ATTR(host, field, S_IRUGO, \
+ show_fc_host_##field, NULL)
+
+#define fc_host_rw_attr(field, format_string, sz) \
+ fc_host_show_function(field, format_string, sz, ) \
+ fc_host_store_function(field) \
+static FC_DEVICE_ATTR(host, field, S_IRUGO | S_IWUSR, \
+ show_fc_host_##field, \
+ store_fc_host_##field)
+
+#define fc_host_rd_enum_attr(title, maxlen) \
+static ssize_t \
+show_fc_host_##title (struct device *dev, \
+ struct device_attribute *attr, char *buf) \
+{ \
+ struct Scsi_Host *shost = transport_class_to_shost(dev); \
+ struct fc_internal *i = to_fc_internal(shost->transportt); \
+ const char *name; \
+ if (i->f->get_host_##title) \
+ i->f->get_host_##title(shost); \
+ name = get_fc_##title##_name(fc_host_##title(shost)); \
+ if (!name) \
+ return -EINVAL; \
+ return snprintf(buf, maxlen, "%s\n", name); \
+} \
+static FC_DEVICE_ATTR(host, title, S_IRUGO, show_fc_host_##title, NULL)
+
+#define SETUP_HOST_ATTRIBUTE_RD(field) \
+ i->private_host_attrs[count] = device_attr_host_##field; \
+ i->private_host_attrs[count].attr.mode = S_IRUGO; \
+ i->private_host_attrs[count].store = NULL; \
+ i->host_attrs[count] = &i->private_host_attrs[count]; \
+ if (i->f->show_host_##field) \
+ count++
+
+#define SETUP_HOST_ATTRIBUTE_RD_NS(field) \
+ i->private_host_attrs[count] = device_attr_host_##field; \
+ i->private_host_attrs[count].attr.mode = S_IRUGO; \
+ i->private_host_attrs[count].store = NULL; \
+ i->host_attrs[count] = &i->private_host_attrs[count]; \
+ count++
+
+#define SETUP_HOST_ATTRIBUTE_RW(field) \
+ i->private_host_attrs[count] = device_attr_host_##field; \
+ if (!i->f->set_host_##field) { \
+ i->private_host_attrs[count].attr.mode = S_IRUGO; \
+ i->private_host_attrs[count].store = NULL; \
+ } \
+ i->host_attrs[count] = &i->private_host_attrs[count]; \
+ if (i->f->show_host_##field) \
+ count++
+
+
+#define fc_private_host_show_function(field, format_string, sz, cast) \
+static ssize_t \
+show_fc_host_##field (struct device *dev, \
+ struct device_attribute *attr, char *buf) \
+{ \
+ struct Scsi_Host *shost = transport_class_to_shost(dev); \
+ return snprintf(buf, sz, format_string, cast fc_host_##field(shost)); \
+}
+
+#define fc_private_host_rd_attr(field, format_string, sz) \
+ fc_private_host_show_function(field, format_string, sz, ) \
+static FC_DEVICE_ATTR(host, field, S_IRUGO, \
+ show_fc_host_##field, NULL)
+
+#define fc_private_host_rd_attr_cast(field, format_string, sz, cast) \
+ fc_private_host_show_function(field, format_string, sz, (cast)) \
+static FC_DEVICE_ATTR(host, field, S_IRUGO, \
+ show_fc_host_##field, NULL)
+
+#define SETUP_PRIVATE_HOST_ATTRIBUTE_RD(field) \
+ i->private_host_attrs[count] = device_attr_host_##field; \
+ i->private_host_attrs[count].attr.mode = S_IRUGO; \
+ i->private_host_attrs[count].store = NULL; \
+ i->host_attrs[count] = &i->private_host_attrs[count]; \
+ count++
+
+#define SETUP_PRIVATE_HOST_ATTRIBUTE_RW(field) \
+{ \
+ i->private_host_attrs[count] = device_attr_host_##field; \
+ i->host_attrs[count] = &i->private_host_attrs[count]; \
+ count++; \
+}
+
+
+/* Fixed Host Attributes */
+
+static ssize_t
+show_fc_host_supported_classes (struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct Scsi_Host *shost = transport_class_to_shost(dev);
+
+ if (fc_host_supported_classes(shost) == FC_COS_UNSPECIFIED)
+ return snprintf(buf, 20, "unspecified\n");
+
+ return get_fc_cos_names(fc_host_supported_classes(shost), buf);
+}
+static FC_DEVICE_ATTR(host, supported_classes, S_IRUGO,
+ show_fc_host_supported_classes, NULL);
+
+static ssize_t
+show_fc_host_supported_fc4s (struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct Scsi_Host *shost = transport_class_to_shost(dev);
+ return (ssize_t)show_fc_fc4s(buf, fc_host_supported_fc4s(shost));
+}
+static FC_DEVICE_ATTR(host, supported_fc4s, S_IRUGO,
+ show_fc_host_supported_fc4s, NULL);
+
+static ssize_t
+show_fc_host_supported_speeds (struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct Scsi_Host *shost = transport_class_to_shost(dev);
+
+ if (fc_host_supported_speeds(shost) == FC_PORTSPEED_UNKNOWN)
+ return snprintf(buf, 20, "unknown\n");
+
+ return get_fc_port_speed_names(fc_host_supported_speeds(shost), buf);
+}
+static FC_DEVICE_ATTR(host, supported_speeds, S_IRUGO,
+ show_fc_host_supported_speeds, NULL);
+
+
+fc_private_host_rd_attr_cast(node_name, "0x%llx\n", 20, unsigned long long);
+fc_private_host_rd_attr_cast(port_name, "0x%llx\n", 20, unsigned long long);
+fc_private_host_rd_attr_cast(permanent_port_name, "0x%llx\n", 20,
+ unsigned long long);
+fc_private_host_rd_attr(maxframe_size, "%u bytes\n", 20);
+fc_private_host_rd_attr(max_npiv_vports, "%u\n", 20);
+fc_private_host_rd_attr(serial_number, "%s\n", (FC_SERIAL_NUMBER_SIZE +1));
+fc_private_host_rd_attr(manufacturer, "%s\n", FC_SERIAL_NUMBER_SIZE + 1);
+fc_private_host_rd_attr(model, "%s\n", FC_SYMBOLIC_NAME_SIZE + 1);
+fc_private_host_rd_attr(model_description, "%s\n", FC_SYMBOLIC_NAME_SIZE + 1);
+fc_private_host_rd_attr(hardware_version, "%s\n", FC_VERSION_STRING_SIZE + 1);
+fc_private_host_rd_attr(driver_version, "%s\n", FC_VERSION_STRING_SIZE + 1);
+fc_private_host_rd_attr(firmware_version, "%s\n", FC_VERSION_STRING_SIZE + 1);
+fc_private_host_rd_attr(optionrom_version, "%s\n", FC_VERSION_STRING_SIZE + 1);
+
+
+/* Dynamic Host Attributes */
+
+static ssize_t
+show_fc_host_active_fc4s (struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct Scsi_Host *shost = transport_class_to_shost(dev);
+ struct fc_internal *i = to_fc_internal(shost->transportt);
+
+ if (i->f->get_host_active_fc4s)
+ i->f->get_host_active_fc4s(shost);
+
+ return (ssize_t)show_fc_fc4s(buf, fc_host_active_fc4s(shost));
+}
+static FC_DEVICE_ATTR(host, active_fc4s, S_IRUGO,
+ show_fc_host_active_fc4s, NULL);
+
+static ssize_t
+show_fc_host_speed (struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct Scsi_Host *shost = transport_class_to_shost(dev);
+ struct fc_internal *i = to_fc_internal(shost->transportt);
+
+ if (i->f->get_host_speed)
+ i->f->get_host_speed(shost);
+
+ if (fc_host_speed(shost) == FC_PORTSPEED_UNKNOWN)
+ return snprintf(buf, 20, "unknown\n");
+
+ return get_fc_port_speed_names(fc_host_speed(shost), buf);
+}
+static FC_DEVICE_ATTR(host, speed, S_IRUGO,
+ show_fc_host_speed, NULL);
+
+
+fc_host_rd_attr(port_id, "0x%06x\n", 20);
+fc_host_rd_enum_attr(port_type, FC_PORTTYPE_MAX_NAMELEN);
+fc_host_rd_enum_attr(port_state, FC_PORTSTATE_MAX_NAMELEN);
+fc_host_rd_attr_cast(fabric_name, "0x%llx\n", 20, unsigned long long);
+fc_host_rd_attr(symbolic_name, "%s\n", FC_SYMBOLIC_NAME_SIZE + 1);
+
+fc_private_host_show_function(system_hostname, "%s\n",
+ FC_SYMBOLIC_NAME_SIZE + 1, )
+fc_host_store_str_function(system_hostname, FC_SYMBOLIC_NAME_SIZE)
+static FC_DEVICE_ATTR(host, system_hostname, S_IRUGO | S_IWUSR,
+ show_fc_host_system_hostname, store_fc_host_system_hostname);
+
+
+/* Private Host Attributes */
+
+static ssize_t
+show_fc_private_host_tgtid_bind_type(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct Scsi_Host *shost = transport_class_to_shost(dev);
+ const char *name;
+
+ name = get_fc_tgtid_bind_type_name(fc_host_tgtid_bind_type(shost));
+ if (!name)
+ return -EINVAL;
+ return snprintf(buf, FC_BINDTYPE_MAX_NAMELEN, "%s\n", name);
+}
+
+#define get_list_head_entry(pos, head, member) \
+ pos = list_entry((head)->next, typeof(*pos), member)
+
+static ssize_t
+store_fc_private_host_tgtid_bind_type(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t count)
+{
+ struct Scsi_Host *shost = transport_class_to_shost(dev);
+ struct fc_rport *rport;
+ enum fc_tgtid_binding_type val;
+ unsigned long flags;
+
+ if (get_fc_tgtid_bind_type_match(buf, &val))
+ return -EINVAL;
+
+ /* if changing bind type, purge all unused consistent bindings */
+ if (val != fc_host_tgtid_bind_type(shost)) {
+ spin_lock_irqsave(shost->host_lock, flags);
+ while (!list_empty(&fc_host_rport_bindings(shost))) {
+ get_list_head_entry(rport,
+ &fc_host_rport_bindings(shost), peers);
+ list_del(&rport->peers);
+ rport->port_state = FC_PORTSTATE_DELETED;
+ fc_queue_work(shost, &rport->rport_delete_work);
+ }
+ spin_unlock_irqrestore(shost->host_lock, flags);
+ }
+
+ fc_host_tgtid_bind_type(shost) = val;
+ return count;
+}
+
+static FC_DEVICE_ATTR(host, tgtid_bind_type, S_IRUGO | S_IWUSR,
+ show_fc_private_host_tgtid_bind_type,
+ store_fc_private_host_tgtid_bind_type);
+
+static ssize_t
+store_fc_private_host_issue_lip(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t count)
+{
+ struct Scsi_Host *shost = transport_class_to_shost(dev);
+ struct fc_internal *i = to_fc_internal(shost->transportt);
+ int ret;
+
+ /* ignore any data value written to the attribute */
+ if (i->f->issue_fc_host_lip) {
+ ret = i->f->issue_fc_host_lip(shost);
+ return ret ? ret: count;
+ }
+
+ return -ENOENT;
+}
+
+static FC_DEVICE_ATTR(host, issue_lip, S_IWUSR, NULL,
+ store_fc_private_host_issue_lip);
+
+static ssize_t
+store_fc_private_host_dev_loss_tmo(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct Scsi_Host *shost = transport_class_to_shost(dev);
+ struct fc_host_attrs *fc_host = shost_to_fc_host(shost);
+ struct fc_rport *rport;
+ unsigned long val, flags;
+ int rc;
+
+ rc = fc_str_to_dev_loss(buf, &val);
+ if (rc)
+ return rc;
+
+ fc_host_dev_loss_tmo(shost) = val;
+ spin_lock_irqsave(shost->host_lock, flags);
+ list_for_each_entry(rport, &fc_host->rports, peers)
+ fc_rport_set_dev_loss_tmo(rport, val);
+ spin_unlock_irqrestore(shost->host_lock, flags);
+ return count;
+}
+
+fc_private_host_show_function(dev_loss_tmo, "%d\n", 20, );
+static FC_DEVICE_ATTR(host, dev_loss_tmo, S_IRUGO | S_IWUSR,
+ show_fc_host_dev_loss_tmo,
+ store_fc_private_host_dev_loss_tmo);
+
+fc_private_host_rd_attr(npiv_vports_inuse, "%u\n", 20);
+
+/*
+ * Host Statistics Management
+ */
+
+/* Show a given an attribute in the statistics group */
+static ssize_t
+fc_stat_show(const struct device *dev, char *buf, unsigned long offset)
+{
+ struct Scsi_Host *shost = transport_class_to_shost(dev);
+ struct fc_internal *i = to_fc_internal(shost->transportt);
+ struct fc_host_statistics *stats;
+ ssize_t ret = -ENOENT;
+
+ if (offset > sizeof(struct fc_host_statistics) ||
+ offset % sizeof(u64) != 0)
+ WARN_ON(1);
+
+ if (i->f->get_fc_host_stats) {
+ stats = (i->f->get_fc_host_stats)(shost);
+ if (stats)
+ ret = snprintf(buf, 20, "0x%llx\n",
+ (unsigned long long)*(u64 *)(((u8 *) stats) + offset));
+ }
+ return ret;
+}
+
+
+/* generate a read-only statistics attribute */
+#define fc_host_statistic(name) \
+static ssize_t show_fcstat_##name(struct device *cd, \
+ struct device_attribute *attr, \
+ char *buf) \
+{ \
+ return fc_stat_show(cd, buf, \
+ offsetof(struct fc_host_statistics, name)); \
+} \
+static FC_DEVICE_ATTR(host, name, S_IRUGO, show_fcstat_##name, NULL)
+
+fc_host_statistic(seconds_since_last_reset);
+fc_host_statistic(tx_frames);
+fc_host_statistic(tx_words);
+fc_host_statistic(rx_frames);
+fc_host_statistic(rx_words);
+fc_host_statistic(lip_count);
+fc_host_statistic(nos_count);
+fc_host_statistic(error_frames);
+fc_host_statistic(dumped_frames);
+fc_host_statistic(link_failure_count);
+fc_host_statistic(loss_of_sync_count);
+fc_host_statistic(loss_of_signal_count);
+fc_host_statistic(prim_seq_protocol_err_count);
+fc_host_statistic(invalid_tx_word_count);
+fc_host_statistic(invalid_crc_count);
+fc_host_statistic(fcp_input_requests);
+fc_host_statistic(fcp_output_requests);
+fc_host_statistic(fcp_control_requests);
+fc_host_statistic(fcp_input_megabytes);
+fc_host_statistic(fcp_output_megabytes);
+fc_host_statistic(fcp_packet_alloc_failures);
+fc_host_statistic(fcp_packet_aborts);
+fc_host_statistic(fcp_frame_alloc_failures);
+fc_host_statistic(fc_no_free_exch);
+fc_host_statistic(fc_no_free_exch_xid);
+fc_host_statistic(fc_xid_not_found);
+fc_host_statistic(fc_xid_busy);
+fc_host_statistic(fc_seq_not_found);
+fc_host_statistic(fc_non_bls_resp);
+
+static ssize_t
+fc_reset_statistics(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct Scsi_Host *shost = transport_class_to_shost(dev);
+ struct fc_internal *i = to_fc_internal(shost->transportt);
+
+ /* ignore any data value written to the attribute */
+ if (i->f->reset_fc_host_stats) {
+ i->f->reset_fc_host_stats(shost);
+ return count;
+ }
+
+ return -ENOENT;
+}
+static FC_DEVICE_ATTR(host, reset_statistics, S_IWUSR, NULL,
+ fc_reset_statistics);
+
+static struct attribute *fc_statistics_attrs[] = {
+ &device_attr_host_seconds_since_last_reset.attr,
+ &device_attr_host_tx_frames.attr,
+ &device_attr_host_tx_words.attr,
+ &device_attr_host_rx_frames.attr,
+ &device_attr_host_rx_words.attr,
+ &device_attr_host_lip_count.attr,
+ &device_attr_host_nos_count.attr,
+ &device_attr_host_error_frames.attr,
+ &device_attr_host_dumped_frames.attr,
+ &device_attr_host_link_failure_count.attr,
+ &device_attr_host_loss_of_sync_count.attr,
+ &device_attr_host_loss_of_signal_count.attr,
+ &device_attr_host_prim_seq_protocol_err_count.attr,
+ &device_attr_host_invalid_tx_word_count.attr,
+ &device_attr_host_invalid_crc_count.attr,
+ &device_attr_host_fcp_input_requests.attr,
+ &device_attr_host_fcp_output_requests.attr,
+ &device_attr_host_fcp_control_requests.attr,
+ &device_attr_host_fcp_input_megabytes.attr,
+ &device_attr_host_fcp_output_megabytes.attr,
+ &device_attr_host_fcp_packet_alloc_failures.attr,
+ &device_attr_host_fcp_packet_aborts.attr,
+ &device_attr_host_fcp_frame_alloc_failures.attr,
+ &device_attr_host_fc_no_free_exch.attr,
+ &device_attr_host_fc_no_free_exch_xid.attr,
+ &device_attr_host_fc_xid_not_found.attr,
+ &device_attr_host_fc_xid_busy.attr,
+ &device_attr_host_fc_seq_not_found.attr,
+ &device_attr_host_fc_non_bls_resp.attr,
+ &device_attr_host_reset_statistics.attr,
+ NULL
+};
+
+static struct attribute_group fc_statistics_group = {
+ .name = "statistics",
+ .attrs = fc_statistics_attrs,
+};
+
+
+/* Host Vport Attributes */
+
+static int
+fc_parse_wwn(const char *ns, u64 *nm)
+{
+ unsigned int i, j;
+ u8 wwn[8];
+
+ memset(wwn, 0, sizeof(wwn));
+
+ /* Validate and store the new name */
+ for (i=0, j=0; i < 16; i++) {
+ int value;
+
+ value = hex_to_bin(*ns++);
+ if (value >= 0)
+ j = (j << 4) | value;
+ else
+ return -EINVAL;
+ if (i % 2) {
+ wwn[i/2] = j & 0xff;
+ j = 0;
+ }
+ }
+
+ *nm = wwn_to_u64(wwn);
+
+ return 0;
+}
+
+
+/*
+ * "Short-cut" sysfs variable to create a new vport on a FC Host.
+ * Input is a string of the form "<WWPN>:<WWNN>". Other attributes
+ * will default to a NPIV-based FCP_Initiator; The WWNs are specified
+ * as hex characters, and may *not* contain any prefixes (e.g. 0x, x, etc)
+ */
+static ssize_t
+store_fc_host_vport_create(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct Scsi_Host *shost = transport_class_to_shost(dev);
+ struct fc_vport_identifiers vid;
+ struct fc_vport *vport;
+ unsigned int cnt=count;
+ int stat;
+
+ memset(&vid, 0, sizeof(vid));
+
+ /* count may include a LF at end of string */
+ if (buf[cnt-1] == '\n')
+ cnt--;
+
+ /* validate we have enough characters for WWPN */
+ if ((cnt != (16+1+16)) || (buf[16] != ':'))
+ return -EINVAL;
+
+ stat = fc_parse_wwn(&buf[0], &vid.port_name);
+ if (stat)
+ return stat;
+
+ stat = fc_parse_wwn(&buf[17], &vid.node_name);
+ if (stat)
+ return stat;
+
+ vid.roles = FC_PORT_ROLE_FCP_INITIATOR;
+ vid.vport_type = FC_PORTTYPE_NPIV;
+ /* vid.symbolic_name is already zero/NULL's */
+ vid.disable = false; /* always enabled */
+
+ /* we only allow support on Channel 0 !!! */
+ stat = fc_vport_setup(shost, 0, &shost->shost_gendev, &vid, &vport);
+ return stat ? stat : count;
+}
+static FC_DEVICE_ATTR(host, vport_create, S_IWUSR, NULL,
+ store_fc_host_vport_create);
+
+
+/*
+ * "Short-cut" sysfs variable to delete a vport on a FC Host.
+ * Vport is identified by a string containing "<WWPN>:<WWNN>".
+ * The WWNs are specified as hex characters, and may *not* contain
+ * any prefixes (e.g. 0x, x, etc)
+ */
+static ssize_t
+store_fc_host_vport_delete(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct Scsi_Host *shost = transport_class_to_shost(dev);
+ struct fc_host_attrs *fc_host = shost_to_fc_host(shost);
+ struct fc_vport *vport;
+ u64 wwpn, wwnn;
+ unsigned long flags;
+ unsigned int cnt=count;
+ int stat, match;
+
+ /* count may include a LF at end of string */
+ if (buf[cnt-1] == '\n')
+ cnt--;
+
+ /* validate we have enough characters for WWPN */
+ if ((cnt != (16+1+16)) || (buf[16] != ':'))
+ return -EINVAL;
+
+ stat = fc_parse_wwn(&buf[0], &wwpn);
+ if (stat)
+ return stat;
+
+ stat = fc_parse_wwn(&buf[17], &wwnn);
+ if (stat)
+ return stat;
+
+ spin_lock_irqsave(shost->host_lock, flags);
+ match = 0;
+ /* we only allow support on Channel 0 !!! */
+ list_for_each_entry(vport, &fc_host->vports, peers) {
+ if ((vport->channel == 0) &&
+ (vport->port_name == wwpn) && (vport->node_name == wwnn)) {
+ if (vport->flags & (FC_VPORT_DEL | FC_VPORT_CREATING))
+ break;
+ vport->flags |= FC_VPORT_DELETING;
+ match = 1;
+ break;
+ }
+ }
+ spin_unlock_irqrestore(shost->host_lock, flags);
+
+ if (!match)
+ return -ENODEV;
+
+ stat = fc_vport_terminate(vport);
+ return stat ? stat : count;
+}
+static FC_DEVICE_ATTR(host, vport_delete, S_IWUSR, NULL,
+ store_fc_host_vport_delete);
+
+
+static int fc_host_match(struct attribute_container *cont,
+ struct device *dev)
+{
+ struct Scsi_Host *shost;
+ struct fc_internal *i;
+
+ if (!scsi_is_host_device(dev))
+ return 0;
+
+ shost = dev_to_shost(dev);
+ if (!shost->transportt || shost->transportt->host_attrs.ac.class
+ != &fc_host_class.class)
+ return 0;
+
+ i = to_fc_internal(shost->transportt);
+
+ return &i->t.host_attrs.ac == cont;
+}
+
+static int fc_target_match(struct attribute_container *cont,
+ struct device *dev)
+{
+ struct Scsi_Host *shost;
+ struct fc_internal *i;
+
+ if (!scsi_is_target_device(dev))
+ return 0;
+
+ shost = dev_to_shost(dev->parent);
+ if (!shost->transportt || shost->transportt->host_attrs.ac.class
+ != &fc_host_class.class)
+ return 0;
+
+ i = to_fc_internal(shost->transportt);
+
+ return &i->t.target_attrs.ac == cont;
+}
+
+static void fc_rport_dev_release(struct device *dev)
+{
+ struct fc_rport *rport = dev_to_rport(dev);
+ put_device(dev->parent);
+ kfree(rport);
+}
+
+int scsi_is_fc_rport(const struct device *dev)
+{
+ return dev->release == fc_rport_dev_release;
+}
+EXPORT_SYMBOL(scsi_is_fc_rport);
+
+static int fc_rport_match(struct attribute_container *cont,
+ struct device *dev)
+{
+ struct Scsi_Host *shost;
+ struct fc_internal *i;
+
+ if (!scsi_is_fc_rport(dev))
+ return 0;
+
+ shost = dev_to_shost(dev->parent);
+ if (!shost->transportt || shost->transportt->host_attrs.ac.class
+ != &fc_host_class.class)
+ return 0;
+
+ i = to_fc_internal(shost->transportt);
+
+ return &i->rport_attr_cont.ac == cont;
+}
+
+
+static void fc_vport_dev_release(struct device *dev)
+{
+ struct fc_vport *vport = dev_to_vport(dev);
+ put_device(dev->parent); /* release kobj parent */
+ kfree(vport);
+}
+
+int scsi_is_fc_vport(const struct device *dev)
+{
+ return dev->release == fc_vport_dev_release;
+}
+EXPORT_SYMBOL(scsi_is_fc_vport);
+
+static int fc_vport_match(struct attribute_container *cont,
+ struct device *dev)
+{
+ struct fc_vport *vport;
+ struct Scsi_Host *shost;
+ struct fc_internal *i;
+
+ if (!scsi_is_fc_vport(dev))
+ return 0;
+ vport = dev_to_vport(dev);
+
+ shost = vport_to_shost(vport);
+ if (!shost->transportt || shost->transportt->host_attrs.ac.class
+ != &fc_host_class.class)
+ return 0;
+
+ i = to_fc_internal(shost->transportt);
+ return &i->vport_attr_cont.ac == cont;
+}
+
+
+/**
+ * fc_timed_out - FC Transport I/O timeout intercept handler
+ * @scmd: The SCSI command which timed out
+ *
+ * This routine protects against error handlers getting invoked while a
+ * rport is in a blocked state, typically due to a temporarily loss of
+ * connectivity. If the error handlers are allowed to proceed, requests
+ * to abort i/o, reset the target, etc will likely fail as there is no way
+ * to communicate with the device to perform the requested function. These
+ * failures may result in the midlayer taking the device offline, requiring
+ * manual intervention to restore operation.
+ *
+ * This routine, called whenever an i/o times out, validates the state of
+ * the underlying rport. If the rport is blocked, it returns
+ * EH_RESET_TIMER, which will continue to reschedule the timeout.
+ * Eventually, either the device will return, or devloss_tmo will fire,
+ * and when the timeout then fires, it will be handled normally.
+ * If the rport is not blocked, normal error handling continues.
+ *
+ * Notes:
+ * This routine assumes no locks are held on entry.
+ */
+static enum blk_eh_timer_return
+fc_timed_out(struct scsi_cmnd *scmd)
+{
+ struct fc_rport *rport = starget_to_rport(scsi_target(scmd->device));
+
+ if (rport->port_state == FC_PORTSTATE_BLOCKED)
+ return BLK_EH_RESET_TIMER;
+
+ return BLK_EH_NOT_HANDLED;
+}
+
+/*
+ * Called by fc_user_scan to locate an rport on the shost that
+ * matches the channel and target id, and invoke scsi_scan_target()
+ * on the rport.
+ */
+static void
+fc_user_scan_tgt(struct Scsi_Host *shost, uint channel, uint id, u64 lun)
+{
+ struct fc_rport *rport;
+ unsigned long flags;
+
+ spin_lock_irqsave(shost->host_lock, flags);
+
+ list_for_each_entry(rport, &fc_host_rports(shost), peers) {
+ if (rport->scsi_target_id == -1)
+ continue;
+
+ if (rport->port_state != FC_PORTSTATE_ONLINE)
+ continue;
+
+ if ((channel == rport->channel) &&
+ (id == rport->scsi_target_id)) {
+ spin_unlock_irqrestore(shost->host_lock, flags);
+ scsi_scan_target(&rport->dev, channel, id, lun, 1);
+ return;
+ }
+ }
+
+ spin_unlock_irqrestore(shost->host_lock, flags);
+}
+
+/*
+ * Called via sysfs scan routines. Necessary, as the FC transport
+ * wants to place all target objects below the rport object. So this
+ * routine must invoke the scsi_scan_target() routine with the rport
+ * object as the parent.
+ */
+static int
+fc_user_scan(struct Scsi_Host *shost, uint channel, uint id, u64 lun)
+{
+ uint chlo, chhi;
+ uint tgtlo, tgthi;
+
+ if (((channel != SCAN_WILD_CARD) && (channel > shost->max_channel)) ||
+ ((id != SCAN_WILD_CARD) && (id >= shost->max_id)) ||
+ ((lun != SCAN_WILD_CARD) && (lun > shost->max_lun)))
+ return -EINVAL;
+
+ if (channel == SCAN_WILD_CARD) {
+ chlo = 0;
+ chhi = shost->max_channel + 1;
+ } else {
+ chlo = channel;
+ chhi = channel + 1;
+ }
+
+ if (id == SCAN_WILD_CARD) {
+ tgtlo = 0;
+ tgthi = shost->max_id;
+ } else {
+ tgtlo = id;
+ tgthi = id + 1;
+ }
+
+ for ( ; chlo < chhi; chlo++)
+ for ( ; tgtlo < tgthi; tgtlo++)
+ fc_user_scan_tgt(shost, chlo, tgtlo, lun);
+
+ return 0;
+}
+
+static int fc_tsk_mgmt_response(struct Scsi_Host *shost, u64 nexus, u64 tm_id,
+ int result)
+{
+ struct fc_internal *i = to_fc_internal(shost->transportt);
+ return i->f->tsk_mgmt_response(shost, nexus, tm_id, result);
+}
+
+static int fc_it_nexus_response(struct Scsi_Host *shost, u64 nexus, int result)
+{
+ struct fc_internal *i = to_fc_internal(shost->transportt);
+ return i->f->it_nexus_response(shost, nexus, result);
+}
+
+struct scsi_transport_template *
+fc_attach_transport(struct fc_function_template *ft)
+{
+ int count;
+ struct fc_internal *i = kzalloc(sizeof(struct fc_internal),
+ GFP_KERNEL);
+
+ if (unlikely(!i))
+ return NULL;
+
+ i->t.target_attrs.ac.attrs = &i->starget_attrs[0];
+ i->t.target_attrs.ac.class = &fc_transport_class.class;
+ i->t.target_attrs.ac.match = fc_target_match;
+ i->t.target_size = sizeof(struct fc_starget_attrs);
+ transport_container_register(&i->t.target_attrs);
+
+ i->t.host_attrs.ac.attrs = &i->host_attrs[0];
+ i->t.host_attrs.ac.class = &fc_host_class.class;
+ i->t.host_attrs.ac.match = fc_host_match;
+ i->t.host_size = sizeof(struct fc_host_attrs);
+ if (ft->get_fc_host_stats)
+ i->t.host_attrs.statistics = &fc_statistics_group;
+ transport_container_register(&i->t.host_attrs);
+
+ i->rport_attr_cont.ac.attrs = &i->rport_attrs[0];
+ i->rport_attr_cont.ac.class = &fc_rport_class.class;
+ i->rport_attr_cont.ac.match = fc_rport_match;
+ transport_container_register(&i->rport_attr_cont);
+
+ i->vport_attr_cont.ac.attrs = &i->vport_attrs[0];
+ i->vport_attr_cont.ac.class = &fc_vport_class.class;
+ i->vport_attr_cont.ac.match = fc_vport_match;
+ transport_container_register(&i->vport_attr_cont);
+
+ i->f = ft;
+
+ /* Transport uses the shost workq for scsi scanning */
+ i->t.create_work_queue = 1;
+
+ i->t.eh_timed_out = fc_timed_out;
+
+ i->t.user_scan = fc_user_scan;
+
+ /* target-mode drivers' functions */
+ i->t.tsk_mgmt_response = fc_tsk_mgmt_response;
+ i->t.it_nexus_response = fc_it_nexus_response;
+
+ /*
+ * Setup SCSI Target Attributes.
+ */
+ count = 0;
+ SETUP_STARGET_ATTRIBUTE_RD(node_name);
+ SETUP_STARGET_ATTRIBUTE_RD(port_name);
+ SETUP_STARGET_ATTRIBUTE_RD(port_id);
+
+ BUG_ON(count > FC_STARGET_NUM_ATTRS);
+
+ i->starget_attrs[count] = NULL;
+
+
+ /*
+ * Setup SCSI Host Attributes.
+ */
+ count=0;
+ SETUP_HOST_ATTRIBUTE_RD(node_name);
+ SETUP_HOST_ATTRIBUTE_RD(port_name);
+ SETUP_HOST_ATTRIBUTE_RD(permanent_port_name);
+ SETUP_HOST_ATTRIBUTE_RD(supported_classes);
+ SETUP_HOST_ATTRIBUTE_RD(supported_fc4s);
+ SETUP_HOST_ATTRIBUTE_RD(supported_speeds);
+ SETUP_HOST_ATTRIBUTE_RD(maxframe_size);
+ if (ft->vport_create) {
+ SETUP_HOST_ATTRIBUTE_RD_NS(max_npiv_vports);
+ SETUP_HOST_ATTRIBUTE_RD_NS(npiv_vports_inuse);
+ }
+ SETUP_HOST_ATTRIBUTE_RD(serial_number);
+ SETUP_HOST_ATTRIBUTE_RD(manufacturer);
+ SETUP_HOST_ATTRIBUTE_RD(model);
+ SETUP_HOST_ATTRIBUTE_RD(model_description);
+ SETUP_HOST_ATTRIBUTE_RD(hardware_version);
+ SETUP_HOST_ATTRIBUTE_RD(driver_version);
+ SETUP_HOST_ATTRIBUTE_RD(firmware_version);
+ SETUP_HOST_ATTRIBUTE_RD(optionrom_version);
+
+ SETUP_HOST_ATTRIBUTE_RD(port_id);
+ SETUP_HOST_ATTRIBUTE_RD(port_type);
+ SETUP_HOST_ATTRIBUTE_RD(port_state);
+ SETUP_HOST_ATTRIBUTE_RD(active_fc4s);
+ SETUP_HOST_ATTRIBUTE_RD(speed);
+ SETUP_HOST_ATTRIBUTE_RD(fabric_name);
+ SETUP_HOST_ATTRIBUTE_RD(symbolic_name);
+ SETUP_HOST_ATTRIBUTE_RW(system_hostname);
+
+ /* Transport-managed attributes */
+ SETUP_PRIVATE_HOST_ATTRIBUTE_RW(dev_loss_tmo);
+ SETUP_PRIVATE_HOST_ATTRIBUTE_RW(tgtid_bind_type);
+ if (ft->issue_fc_host_lip)
+ SETUP_PRIVATE_HOST_ATTRIBUTE_RW(issue_lip);
+ if (ft->vport_create)
+ SETUP_PRIVATE_HOST_ATTRIBUTE_RW(vport_create);
+ if (ft->vport_delete)
+ SETUP_PRIVATE_HOST_ATTRIBUTE_RW(vport_delete);
+
+ BUG_ON(count > FC_HOST_NUM_ATTRS);
+
+ i->host_attrs[count] = NULL;
+
+ /*
+ * Setup Remote Port Attributes.
+ */
+ count=0;
+ SETUP_RPORT_ATTRIBUTE_RD(maxframe_size);
+ SETUP_RPORT_ATTRIBUTE_RD(supported_classes);
+ SETUP_RPORT_ATTRIBUTE_RW(dev_loss_tmo);
+ SETUP_PRIVATE_RPORT_ATTRIBUTE_RD(node_name);
+ SETUP_PRIVATE_RPORT_ATTRIBUTE_RD(port_name);
+ SETUP_PRIVATE_RPORT_ATTRIBUTE_RD(port_id);
+ SETUP_PRIVATE_RPORT_ATTRIBUTE_RD(roles);
+ SETUP_PRIVATE_RPORT_ATTRIBUTE_RD(port_state);
+ SETUP_PRIVATE_RPORT_ATTRIBUTE_RD(scsi_target_id);
+ SETUP_PRIVATE_RPORT_ATTRIBUTE_RW(fast_io_fail_tmo);
+
+ BUG_ON(count > FC_RPORT_NUM_ATTRS);
+
+ i->rport_attrs[count] = NULL;
+
+ /*
+ * Setup Virtual Port Attributes.
+ */
+ count=0;
+ SETUP_PRIVATE_VPORT_ATTRIBUTE_RD(vport_state);
+ SETUP_PRIVATE_VPORT_ATTRIBUTE_RD(vport_last_state);
+ SETUP_PRIVATE_VPORT_ATTRIBUTE_RD(node_name);
+ SETUP_PRIVATE_VPORT_ATTRIBUTE_RD(port_name);
+ SETUP_PRIVATE_VPORT_ATTRIBUTE_RD(roles);
+ SETUP_PRIVATE_VPORT_ATTRIBUTE_RD(vport_type);
+ SETUP_VPORT_ATTRIBUTE_RW(symbolic_name);
+ SETUP_VPORT_ATTRIBUTE_WR(vport_delete);
+ SETUP_VPORT_ATTRIBUTE_WR(vport_disable);
+
+ BUG_ON(count > FC_VPORT_NUM_ATTRS);
+
+ i->vport_attrs[count] = NULL;
+
+ return &i->t;
+}
+EXPORT_SYMBOL(fc_attach_transport);
+
+void fc_release_transport(struct scsi_transport_template *t)
+{
+ struct fc_internal *i = to_fc_internal(t);
+
+ transport_container_unregister(&i->t.target_attrs);
+ transport_container_unregister(&i->t.host_attrs);
+ transport_container_unregister(&i->rport_attr_cont);
+ transport_container_unregister(&i->vport_attr_cont);
+
+ kfree(i);
+}
+EXPORT_SYMBOL(fc_release_transport);
+
+/**
+ * fc_queue_work - Queue work to the fc_host workqueue.
+ * @shost: Pointer to Scsi_Host bound to fc_host.
+ * @work: Work to queue for execution.
+ *
+ * Return value:
+ * 1 - work queued for execution
+ * 0 - work is already queued
+ * -EINVAL - work queue doesn't exist
+ */
+static int
+fc_queue_work(struct Scsi_Host *shost, struct work_struct *work)
+{
+ if (unlikely(!fc_host_work_q(shost))) {
+ printk(KERN_ERR
+ "ERROR: FC host '%s' attempted to queue work, "
+ "when no workqueue created.\n", shost->hostt->name);
+ dump_stack();
+
+ return -EINVAL;
+ }
+
+ return queue_work(fc_host_work_q(shost), work);
+}
+
+/**
+ * fc_flush_work - Flush a fc_host's workqueue.
+ * @shost: Pointer to Scsi_Host bound to fc_host.
+ */
+static void
+fc_flush_work(struct Scsi_Host *shost)
+{
+ if (!fc_host_work_q(shost)) {
+ printk(KERN_ERR
+ "ERROR: FC host '%s' attempted to flush work, "
+ "when no workqueue created.\n", shost->hostt->name);
+ dump_stack();
+ return;
+ }
+
+ flush_workqueue(fc_host_work_q(shost));
+}
+
+/**
+ * fc_queue_devloss_work - Schedule work for the fc_host devloss workqueue.
+ * @shost: Pointer to Scsi_Host bound to fc_host.
+ * @work: Work to queue for execution.
+ * @delay: jiffies to delay the work queuing
+ *
+ * Return value:
+ * 1 on success / 0 already queued / < 0 for error
+ */
+static int
+fc_queue_devloss_work(struct Scsi_Host *shost, struct delayed_work *work,
+ unsigned long delay)
+{
+ if (unlikely(!fc_host_devloss_work_q(shost))) {
+ printk(KERN_ERR
+ "ERROR: FC host '%s' attempted to queue work, "
+ "when no workqueue created.\n", shost->hostt->name);
+ dump_stack();
+
+ return -EINVAL;
+ }
+
+ return queue_delayed_work(fc_host_devloss_work_q(shost), work, delay);
+}
+
+/**
+ * fc_flush_devloss - Flush a fc_host's devloss workqueue.
+ * @shost: Pointer to Scsi_Host bound to fc_host.
+ */
+static void
+fc_flush_devloss(struct Scsi_Host *shost)
+{
+ if (!fc_host_devloss_work_q(shost)) {
+ printk(KERN_ERR
+ "ERROR: FC host '%s' attempted to flush work, "
+ "when no workqueue created.\n", shost->hostt->name);
+ dump_stack();
+ return;
+ }
+
+ flush_workqueue(fc_host_devloss_work_q(shost));
+}
+
+
+/**
+ * fc_remove_host - called to terminate any fc_transport-related elements for a scsi host.
+ * @shost: Which &Scsi_Host
+ *
+ * This routine is expected to be called immediately preceding the
+ * a driver's call to scsi_remove_host().
+ *
+ * WARNING: A driver utilizing the fc_transport, which fails to call
+ * this routine prior to scsi_remove_host(), will leave dangling
+ * objects in /sys/class/fc_remote_ports. Access to any of these
+ * objects can result in a system crash !!!
+ *
+ * Notes:
+ * This routine assumes no locks are held on entry.
+ */
+void
+fc_remove_host(struct Scsi_Host *shost)
+{
+ struct fc_vport *vport = NULL, *next_vport = NULL;
+ struct fc_rport *rport = NULL, *next_rport = NULL;
+ struct workqueue_struct *work_q;
+ struct fc_host_attrs *fc_host = shost_to_fc_host(shost);
+ unsigned long flags;
+
+ spin_lock_irqsave(shost->host_lock, flags);
+
+ /* Remove any vports */
+ list_for_each_entry_safe(vport, next_vport, &fc_host->vports, peers)
+ fc_queue_work(shost, &vport->vport_delete_work);
+
+ /* Remove any remote ports */
+ list_for_each_entry_safe(rport, next_rport,
+ &fc_host->rports, peers) {
+ list_del(&rport->peers);
+ rport->port_state = FC_PORTSTATE_DELETED;
+ fc_queue_work(shost, &rport->rport_delete_work);
+ }
+
+ list_for_each_entry_safe(rport, next_rport,
+ &fc_host->rport_bindings, peers) {
+ list_del(&rport->peers);
+ rport->port_state = FC_PORTSTATE_DELETED;
+ fc_queue_work(shost, &rport->rport_delete_work);
+ }
+
+ spin_unlock_irqrestore(shost->host_lock, flags);
+
+ /* flush all scan work items */
+ scsi_flush_work(shost);
+
+ /* flush all stgt delete, and rport delete work items, then kill it */
+ if (fc_host->work_q) {
+ work_q = fc_host->work_q;
+ fc_host->work_q = NULL;
+ destroy_workqueue(work_q);
+ }
+
+ /* flush all devloss work items, then kill it */
+ if (fc_host->devloss_work_q) {
+ work_q = fc_host->devloss_work_q;
+ fc_host->devloss_work_q = NULL;
+ destroy_workqueue(work_q);
+ }
+}
+EXPORT_SYMBOL(fc_remove_host);
+
+static void fc_terminate_rport_io(struct fc_rport *rport)
+{
+ struct Scsi_Host *shost = rport_to_shost(rport);
+ struct fc_internal *i = to_fc_internal(shost->transportt);
+
+ /* Involve the LLDD if possible to terminate all io on the rport. */
+ if (i->f->terminate_rport_io)
+ i->f->terminate_rport_io(rport);
+
+ /*
+ * Must unblock to flush queued IO. scsi-ml will fail incoming reqs.
+ */
+ scsi_target_unblock(&rport->dev, SDEV_TRANSPORT_OFFLINE);
+}
+
+/**
+ * fc_starget_delete - called to delete the scsi descendants of an rport
+ * @work: remote port to be operated on.
+ *
+ * Deletes target and all sdevs.
+ */
+static void
+fc_starget_delete(struct work_struct *work)
+{
+ struct fc_rport *rport =
+ container_of(work, struct fc_rport, stgt_delete_work);
+
+ fc_terminate_rport_io(rport);
+ scsi_remove_target(&rport->dev);
+}
+
+
+/**
+ * fc_rport_final_delete - finish rport termination and delete it.
+ * @work: remote port to be deleted.
+ */
+static void
+fc_rport_final_delete(struct work_struct *work)
+{
+ struct fc_rport *rport =
+ container_of(work, struct fc_rport, rport_delete_work);
+ struct device *dev = &rport->dev;
+ struct Scsi_Host *shost = rport_to_shost(rport);
+ struct fc_internal *i = to_fc_internal(shost->transportt);
+ unsigned long flags;
+ int do_callback = 0;
+
+ fc_terminate_rport_io(rport);
+
+ /*
+ * if a scan is pending, flush the SCSI Host work_q so that
+ * that we can reclaim the rport scan work element.
+ */
+ if (rport->flags & FC_RPORT_SCAN_PENDING)
+ scsi_flush_work(shost);
+
+ /*
+ * Cancel any outstanding timers. These should really exist
+ * only when rmmod'ing the LLDD and we're asking for
+ * immediate termination of the rports
+ */
+ spin_lock_irqsave(shost->host_lock, flags);
+ if (rport->flags & FC_RPORT_DEVLOSS_PENDING) {
+ spin_unlock_irqrestore(shost->host_lock, flags);
+ if (!cancel_delayed_work(&rport->fail_io_work))
+ fc_flush_devloss(shost);
+ if (!cancel_delayed_work(&rport->dev_loss_work))
+ fc_flush_devloss(shost);
+ cancel_work_sync(&rport->scan_work);
+ spin_lock_irqsave(shost->host_lock, flags);
+ rport->flags &= ~FC_RPORT_DEVLOSS_PENDING;
+ }
+ spin_unlock_irqrestore(shost->host_lock, flags);
+
+ /* Delete SCSI target and sdevs */
+ if (rport->scsi_target_id != -1)
+ fc_starget_delete(&rport->stgt_delete_work);
+
+ /*
+ * Notify the driver that the rport is now dead. The LLDD will
+ * also guarantee that any communication to the rport is terminated
+ *
+ * Avoid this call if we already called it when we preserved the
+ * rport for the binding.
+ */
+ spin_lock_irqsave(shost->host_lock, flags);
+ if (!(rport->flags & FC_RPORT_DEVLOSS_CALLBK_DONE) &&
+ (i->f->dev_loss_tmo_callbk)) {
+ rport->flags |= FC_RPORT_DEVLOSS_CALLBK_DONE;
+ do_callback = 1;
+ }
+ spin_unlock_irqrestore(shost->host_lock, flags);
+
+ if (do_callback)
+ i->f->dev_loss_tmo_callbk(rport);
+
+ fc_bsg_remove(rport->rqst_q);
+
+ transport_remove_device(dev);
+ device_del(dev);
+ transport_destroy_device(dev);
+ put_device(&shost->shost_gendev); /* for fc_host->rport list */
+ put_device(dev); /* for self-reference */
+}
+
+
+/**
+ * fc_rport_create - allocates and creates a remote FC port.
+ * @shost: scsi host the remote port is connected to.
+ * @channel: Channel on shost port connected to.
+ * @ids: The world wide names, fc address, and FC4 port
+ * roles for the remote port.
+ *
+ * Allocates and creates the remoter port structure, including the
+ * class and sysfs creation.
+ *
+ * Notes:
+ * This routine assumes no locks are held on entry.
+ */
+static struct fc_rport *
+fc_rport_create(struct Scsi_Host *shost, int channel,
+ struct fc_rport_identifiers *ids)
+{
+ struct fc_host_attrs *fc_host = shost_to_fc_host(shost);
+ struct fc_internal *fci = to_fc_internal(shost->transportt);
+ struct fc_rport *rport;
+ struct device *dev;
+ unsigned long flags;
+ int error;
+ size_t size;
+
+ size = (sizeof(struct fc_rport) + fci->f->dd_fcrport_size);
+ rport = kzalloc(size, GFP_KERNEL);
+ if (unlikely(!rport)) {
+ printk(KERN_ERR "%s: allocation failure\n", __func__);
+ return NULL;
+ }
+
+ rport->maxframe_size = -1;
+ rport->supported_classes = FC_COS_UNSPECIFIED;
+ rport->dev_loss_tmo = fc_host->dev_loss_tmo;
+ memcpy(&rport->node_name, &ids->node_name, sizeof(rport->node_name));
+ memcpy(&rport->port_name, &ids->port_name, sizeof(rport->port_name));
+ rport->port_id = ids->port_id;
+ rport->roles = ids->roles;
+ rport->port_state = FC_PORTSTATE_ONLINE;
+ if (fci->f->dd_fcrport_size)
+ rport->dd_data = &rport[1];
+ rport->channel = channel;
+ rport->fast_io_fail_tmo = -1;
+
+ INIT_DELAYED_WORK(&rport->dev_loss_work, fc_timeout_deleted_rport);
+ INIT_DELAYED_WORK(&rport->fail_io_work, fc_timeout_fail_rport_io);
+ INIT_WORK(&rport->scan_work, fc_scsi_scan_rport);
+ INIT_WORK(&rport->stgt_delete_work, fc_starget_delete);
+ INIT_WORK(&rport->rport_delete_work, fc_rport_final_delete);
+
+ spin_lock_irqsave(shost->host_lock, flags);
+
+ rport->number = fc_host->next_rport_number++;
+ if (rport->roles & FC_PORT_ROLE_FCP_TARGET)
+ rport->scsi_target_id = fc_host->next_target_id++;
+ else
+ rport->scsi_target_id = -1;
+ list_add_tail(&rport->peers, &fc_host->rports);
+ get_device(&shost->shost_gendev); /* for fc_host->rport list */
+
+ spin_unlock_irqrestore(shost->host_lock, flags);
+
+ dev = &rport->dev;
+ device_initialize(dev); /* takes self reference */
+ dev->parent = get_device(&shost->shost_gendev); /* parent reference */
+ dev->release = fc_rport_dev_release;
+ dev_set_name(dev, "rport-%d:%d-%d",
+ shost->host_no, channel, rport->number);
+ transport_setup_device(dev);
+
+ error = device_add(dev);
+ if (error) {
+ printk(KERN_ERR "FC Remote Port device_add failed\n");
+ goto delete_rport;
+ }
+ transport_add_device(dev);
+ transport_configure_device(dev);
+
+ fc_bsg_rportadd(shost, rport);
+ /* ignore any bsg add error - we just can't do sgio */
+
+ if (rport->roles & FC_PORT_ROLE_FCP_TARGET) {
+ /* initiate a scan of the target */
+ rport->flags |= FC_RPORT_SCAN_PENDING;
+ scsi_queue_work(shost, &rport->scan_work);
+ }
+
+ return rport;
+
+delete_rport:
+ transport_destroy_device(dev);
+ spin_lock_irqsave(shost->host_lock, flags);
+ list_del(&rport->peers);
+ put_device(&shost->shost_gendev); /* for fc_host->rport list */
+ spin_unlock_irqrestore(shost->host_lock, flags);
+ put_device(dev->parent);
+ kfree(rport);
+ return NULL;
+}
+
+/**
+ * fc_remote_port_add - notify fc transport of the existence of a remote FC port.
+ * @shost: scsi host the remote port is connected to.
+ * @channel: Channel on shost port connected to.
+ * @ids: The world wide names, fc address, and FC4 port
+ * roles for the remote port.
+ *
+ * The LLDD calls this routine to notify the transport of the existence
+ * of a remote port. The LLDD provides the unique identifiers (wwpn,wwn)
+ * of the port, it's FC address (port_id), and the FC4 roles that are
+ * active for the port.
+ *
+ * For ports that are FCP targets (aka scsi targets), the FC transport
+ * maintains consistent target id bindings on behalf of the LLDD.
+ * A consistent target id binding is an assignment of a target id to
+ * a remote port identifier, which persists while the scsi host is
+ * attached. The remote port can disappear, then later reappear, and
+ * it's target id assignment remains the same. This allows for shifts
+ * in FC addressing (if binding by wwpn or wwnn) with no apparent
+ * changes to the scsi subsystem which is based on scsi host number and
+ * target id values. Bindings are only valid during the attachment of
+ * the scsi host. If the host detaches, then later re-attaches, target
+ * id bindings may change.
+ *
+ * This routine is responsible for returning a remote port structure.
+ * The routine will search the list of remote ports it maintains
+ * internally on behalf of consistent target id mappings. If found, the
+ * remote port structure will be reused. Otherwise, a new remote port
+ * structure will be allocated.
+ *
+ * Whenever a remote port is allocated, a new fc_remote_port class
+ * device is created.
+ *
+ * Should not be called from interrupt context.
+ *
+ * Notes:
+ * This routine assumes no locks are held on entry.
+ */
+struct fc_rport *
+fc_remote_port_add(struct Scsi_Host *shost, int channel,
+ struct fc_rport_identifiers *ids)
+{
+ struct fc_internal *fci = to_fc_internal(shost->transportt);
+ struct fc_host_attrs *fc_host = shost_to_fc_host(shost);
+ struct fc_rport *rport;
+ unsigned long flags;
+ int match = 0;
+
+ /* ensure any stgt delete functions are done */
+ fc_flush_work(shost);
+
+ /*
+ * Search the list of "active" rports, for an rport that has been
+ * deleted, but we've held off the real delete while the target
+ * is in a "blocked" state.
+ */
+ spin_lock_irqsave(shost->host_lock, flags);
+
+ list_for_each_entry(rport, &fc_host->rports, peers) {
+
+ if ((rport->port_state == FC_PORTSTATE_BLOCKED) &&
+ (rport->channel == channel)) {
+
+ switch (fc_host->tgtid_bind_type) {
+ case FC_TGTID_BIND_BY_WWPN:
+ case FC_TGTID_BIND_NONE:
+ if (rport->port_name == ids->port_name)
+ match = 1;
+ break;
+ case FC_TGTID_BIND_BY_WWNN:
+ if (rport->node_name == ids->node_name)
+ match = 1;
+ break;
+ case FC_TGTID_BIND_BY_ID:
+ if (rport->port_id == ids->port_id)
+ match = 1;
+ break;
+ }
+
+ if (match) {
+
+ memcpy(&rport->node_name, &ids->node_name,
+ sizeof(rport->node_name));
+ memcpy(&rport->port_name, &ids->port_name,
+ sizeof(rport->port_name));
+ rport->port_id = ids->port_id;
+
+ rport->port_state = FC_PORTSTATE_ONLINE;
+ rport->roles = ids->roles;
+
+ spin_unlock_irqrestore(shost->host_lock, flags);
+
+ if (fci->f->dd_fcrport_size)
+ memset(rport->dd_data, 0,
+ fci->f->dd_fcrport_size);
+
+ /*
+ * If we were not a target, cancel the
+ * io terminate and rport timers, and
+ * we're done.
+ *
+ * If we were a target, but our new role
+ * doesn't indicate a target, leave the
+ * timers running expecting the role to
+ * change as the target fully logs in. If
+ * it doesn't, the target will be torn down.
+ *
+ * If we were a target, and our role shows
+ * we're still a target, cancel the timers
+ * and kick off a scan.
+ */
+
+ /* was a target, not in roles */
+ if ((rport->scsi_target_id != -1) &&
+ (!(ids->roles & FC_PORT_ROLE_FCP_TARGET)))
+ return rport;
+
+ /*
+ * Stop the fail io and dev_loss timers.
+ * If they flush, the port_state will
+ * be checked and will NOOP the function.
+ */
+ if (!cancel_delayed_work(&rport->fail_io_work))
+ fc_flush_devloss(shost);
+ if (!cancel_delayed_work(&rport->dev_loss_work))
+ fc_flush_devloss(shost);
+
+ spin_lock_irqsave(shost->host_lock, flags);
+
+ rport->flags &= ~(FC_RPORT_FAST_FAIL_TIMEDOUT |
+ FC_RPORT_DEVLOSS_PENDING |
+ FC_RPORT_DEVLOSS_CALLBK_DONE);
+
+ spin_unlock_irqrestore(shost->host_lock, flags);
+
+ /* if target, initiate a scan */
+ if (rport->scsi_target_id != -1) {
+ scsi_target_unblock(&rport->dev,
+ SDEV_RUNNING);
+ spin_lock_irqsave(shost->host_lock,
+ flags);
+ rport->flags |= FC_RPORT_SCAN_PENDING;
+ scsi_queue_work(shost,
+ &rport->scan_work);
+ spin_unlock_irqrestore(shost->host_lock,
+ flags);
+ }
+
+ fc_bsg_goose_queue(rport);
+
+ return rport;
+ }
+ }
+ }
+
+ /*
+ * Search the bindings array
+ * Note: if never a FCP target, you won't be on this list
+ */
+ if (fc_host->tgtid_bind_type != FC_TGTID_BIND_NONE) {
+
+ /* search for a matching consistent binding */
+
+ list_for_each_entry(rport, &fc_host->rport_bindings,
+ peers) {
+ if (rport->channel != channel)
+ continue;
+
+ switch (fc_host->tgtid_bind_type) {
+ case FC_TGTID_BIND_BY_WWPN:
+ if (rport->port_name == ids->port_name)
+ match = 1;
+ break;
+ case FC_TGTID_BIND_BY_WWNN:
+ if (rport->node_name == ids->node_name)
+ match = 1;
+ break;
+ case FC_TGTID_BIND_BY_ID:
+ if (rport->port_id == ids->port_id)
+ match = 1;
+ break;
+ case FC_TGTID_BIND_NONE: /* to keep compiler happy */
+ break;
+ }
+
+ if (match) {
+ list_move_tail(&rport->peers, &fc_host->rports);
+ break;
+ }
+ }
+
+ if (match) {
+ memcpy(&rport->node_name, &ids->node_name,
+ sizeof(rport->node_name));
+ memcpy(&rport->port_name, &ids->port_name,
+ sizeof(rport->port_name));
+ rport->port_id = ids->port_id;
+ rport->roles = ids->roles;
+ rport->port_state = FC_PORTSTATE_ONLINE;
+ rport->flags &= ~FC_RPORT_FAST_FAIL_TIMEDOUT;
+
+ if (fci->f->dd_fcrport_size)
+ memset(rport->dd_data, 0,
+ fci->f->dd_fcrport_size);
+ spin_unlock_irqrestore(shost->host_lock, flags);
+
+ if (ids->roles & FC_PORT_ROLE_FCP_TARGET) {
+ scsi_target_unblock(&rport->dev, SDEV_RUNNING);
+
+ /* initiate a scan of the target */
+ spin_lock_irqsave(shost->host_lock, flags);
+ rport->flags |= FC_RPORT_SCAN_PENDING;
+ scsi_queue_work(shost, &rport->scan_work);
+ spin_unlock_irqrestore(shost->host_lock, flags);
+ }
+ return rport;
+ }
+ }
+
+ spin_unlock_irqrestore(shost->host_lock, flags);
+
+ /* No consistent binding found - create new remote port entry */
+ rport = fc_rport_create(shost, channel, ids);
+
+ return rport;
+}
+EXPORT_SYMBOL(fc_remote_port_add);
+
+
+/**
+ * fc_remote_port_delete - notifies the fc transport that a remote port is no longer in existence.
+ * @rport: The remote port that no longer exists
+ *
+ * The LLDD calls this routine to notify the transport that a remote
+ * port is no longer part of the topology. Note: Although a port
+ * may no longer be part of the topology, it may persist in the remote
+ * ports displayed by the fc_host. We do this under 2 conditions:
+ * 1) If the port was a scsi target, we delay its deletion by "blocking" it.
+ * This allows the port to temporarily disappear, then reappear without
+ * disrupting the SCSI device tree attached to it. During the "blocked"
+ * period the port will still exist.
+ * 2) If the port was a scsi target and disappears for longer than we
+ * expect, we'll delete the port and the tear down the SCSI device tree
+ * attached to it. However, we want to semi-persist the target id assigned
+ * to that port if it eventually does exist. The port structure will
+ * remain (although with minimal information) so that the target id
+ * bindings remails.
+ *
+ * If the remote port is not an FCP Target, it will be fully torn down
+ * and deallocated, including the fc_remote_port class device.
+ *
+ * If the remote port is an FCP Target, the port will be placed in a
+ * temporary blocked state. From the LLDD's perspective, the rport no
+ * longer exists. From the SCSI midlayer's perspective, the SCSI target
+ * exists, but all sdevs on it are blocked from further I/O. The following
+ * is then expected.
+ *
+ * If the remote port does not return (signaled by a LLDD call to
+ * fc_remote_port_add()) within the dev_loss_tmo timeout, then the
+ * scsi target is removed - killing all outstanding i/o and removing the
+ * scsi devices attached ot it. The port structure will be marked Not
+ * Present and be partially cleared, leaving only enough information to
+ * recognize the remote port relative to the scsi target id binding if
+ * it later appears. The port will remain as long as there is a valid
+ * binding (e.g. until the user changes the binding type or unloads the
+ * scsi host with the binding).
+ *
+ * If the remote port returns within the dev_loss_tmo value (and matches
+ * according to the target id binding type), the port structure will be
+ * reused. If it is no longer a SCSI target, the target will be torn
+ * down. If it continues to be a SCSI target, then the target will be
+ * unblocked (allowing i/o to be resumed), and a scan will be activated
+ * to ensure that all luns are detected.
+ *
+ * Called from normal process context only - cannot be called from interrupt.
+ *
+ * Notes:
+ * This routine assumes no locks are held on entry.
+ */
+void
+fc_remote_port_delete(struct fc_rport *rport)
+{
+ struct Scsi_Host *shost = rport_to_shost(rport);
+ unsigned long timeout = rport->dev_loss_tmo;
+ unsigned long flags;
+
+ /*
+ * No need to flush the fc_host work_q's, as all adds are synchronous.
+ *
+ * We do need to reclaim the rport scan work element, so eventually
+ * (in fc_rport_final_delete()) we'll flush the scsi host work_q if
+ * there's still a scan pending.
+ */
+
+ spin_lock_irqsave(shost->host_lock, flags);
+
+ if (rport->port_state != FC_PORTSTATE_ONLINE) {
+ spin_unlock_irqrestore(shost->host_lock, flags);
+ return;
+ }
+
+ /*
+ * In the past, we if this was not an FCP-Target, we would
+ * unconditionally just jump to deleting the rport.
+ * However, rports can be used as node containers by the LLDD,
+ * and its not appropriate to just terminate the rport at the
+ * first sign of a loss in connectivity. The LLDD may want to
+ * send ELS traffic to re-validate the login. If the rport is
+ * immediately deleted, it makes it inappropriate for a node
+ * container.
+ * So... we now unconditionally wait dev_loss_tmo before
+ * destroying an rport.
+ */
+
+ rport->port_state = FC_PORTSTATE_BLOCKED;
+
+ rport->flags |= FC_RPORT_DEVLOSS_PENDING;
+
+ spin_unlock_irqrestore(shost->host_lock, flags);
+
+ scsi_target_block(&rport->dev);
+
+ /* see if we need to kill io faster than waiting for device loss */
+ if ((rport->fast_io_fail_tmo != -1) &&
+ (rport->fast_io_fail_tmo < timeout))
+ fc_queue_devloss_work(shost, &rport->fail_io_work,
+ rport->fast_io_fail_tmo * HZ);
+
+ /* cap the length the devices can be blocked until they are deleted */
+ fc_queue_devloss_work(shost, &rport->dev_loss_work, timeout * HZ);
+}
+EXPORT_SYMBOL(fc_remote_port_delete);
+
+/**
+ * fc_remote_port_rolechg - notifies the fc transport that the roles on a remote may have changed.
+ * @rport: The remote port that changed.
+ * @roles: New roles for this port.
+ *
+ * Description: The LLDD calls this routine to notify the transport that the
+ * roles on a remote port may have changed. The largest effect of this is
+ * if a port now becomes a FCP Target, it must be allocated a
+ * scsi target id. If the port is no longer a FCP target, any
+ * scsi target id value assigned to it will persist in case the
+ * role changes back to include FCP Target. No changes in the scsi
+ * midlayer will be invoked if the role changes (in the expectation
+ * that the role will be resumed. If it doesn't normal error processing
+ * will take place).
+ *
+ * Should not be called from interrupt context.
+ *
+ * Notes:
+ * This routine assumes no locks are held on entry.
+ */
+void
+fc_remote_port_rolechg(struct fc_rport *rport, u32 roles)
+{
+ struct Scsi_Host *shost = rport_to_shost(rport);
+ struct fc_host_attrs *fc_host = shost_to_fc_host(shost);
+ unsigned long flags;
+ int create = 0;
+
+ spin_lock_irqsave(shost->host_lock, flags);
+ if (roles & FC_PORT_ROLE_FCP_TARGET) {
+ if (rport->scsi_target_id == -1) {
+ rport->scsi_target_id = fc_host->next_target_id++;
+ create = 1;
+ } else if (!(rport->roles & FC_PORT_ROLE_FCP_TARGET))
+ create = 1;
+ }
+
+ rport->roles = roles;
+
+ spin_unlock_irqrestore(shost->host_lock, flags);
+
+ if (create) {
+ /*
+ * There may have been a delete timer running on the
+ * port. Ensure that it is cancelled as we now know
+ * the port is an FCP Target.
+ * Note: we know the rport is exists and in an online
+ * state as the LLDD would not have had an rport
+ * reference to pass us.
+ *
+ * Take no action on the del_timer failure as the state
+ * machine state change will validate the
+ * transaction.
+ */
+ if (!cancel_delayed_work(&rport->fail_io_work))
+ fc_flush_devloss(shost);
+ if (!cancel_delayed_work(&rport->dev_loss_work))
+ fc_flush_devloss(shost);
+
+ spin_lock_irqsave(shost->host_lock, flags);
+ rport->flags &= ~(FC_RPORT_FAST_FAIL_TIMEDOUT |
+ FC_RPORT_DEVLOSS_PENDING |
+ FC_RPORT_DEVLOSS_CALLBK_DONE);
+ spin_unlock_irqrestore(shost->host_lock, flags);
+
+ /* ensure any stgt delete functions are done */
+ fc_flush_work(shost);
+
+ scsi_target_unblock(&rport->dev, SDEV_RUNNING);
+ /* initiate a scan of the target */
+ spin_lock_irqsave(shost->host_lock, flags);
+ rport->flags |= FC_RPORT_SCAN_PENDING;
+ scsi_queue_work(shost, &rport->scan_work);
+ spin_unlock_irqrestore(shost->host_lock, flags);
+ }
+}
+EXPORT_SYMBOL(fc_remote_port_rolechg);
+
+/**
+ * fc_timeout_deleted_rport - Timeout handler for a deleted remote port.
+ * @work: rport target that failed to reappear in the allotted time.
+ *
+ * Description: An attempt to delete a remote port blocks, and if it fails
+ * to return in the allotted time this gets called.
+ */
+static void
+fc_timeout_deleted_rport(struct work_struct *work)
+{
+ struct fc_rport *rport =
+ container_of(work, struct fc_rport, dev_loss_work.work);
+ struct Scsi_Host *shost = rport_to_shost(rport);
+ struct fc_internal *i = to_fc_internal(shost->transportt);
+ struct fc_host_attrs *fc_host = shost_to_fc_host(shost);
+ unsigned long flags;
+ int do_callback = 0;
+
+ spin_lock_irqsave(shost->host_lock, flags);
+
+ rport->flags &= ~FC_RPORT_DEVLOSS_PENDING;
+
+ /*
+ * If the port is ONLINE, then it came back. If it was a SCSI
+ * target, validate it still is. If not, tear down the
+ * scsi_target on it.
+ */
+ if ((rport->port_state == FC_PORTSTATE_ONLINE) &&
+ (rport->scsi_target_id != -1) &&
+ !(rport->roles & FC_PORT_ROLE_FCP_TARGET)) {
+ dev_printk(KERN_ERR, &rport->dev,
+ "blocked FC remote port time out: no longer"
+ " a FCP target, removing starget\n");
+ spin_unlock_irqrestore(shost->host_lock, flags);
+ scsi_target_unblock(&rport->dev, SDEV_TRANSPORT_OFFLINE);
+ fc_queue_work(shost, &rport->stgt_delete_work);
+ return;
+ }
+
+ /* NOOP state - we're flushing workq's */
+ if (rport->port_state != FC_PORTSTATE_BLOCKED) {
+ spin_unlock_irqrestore(shost->host_lock, flags);
+ dev_printk(KERN_ERR, &rport->dev,
+ "blocked FC remote port time out: leaving"
+ " rport%s alone\n",
+ (rport->scsi_target_id != -1) ? " and starget" : "");
+ return;
+ }
+
+ if ((fc_host->tgtid_bind_type == FC_TGTID_BIND_NONE) ||
+ (rport->scsi_target_id == -1)) {
+ list_del(&rport->peers);
+ rport->port_state = FC_PORTSTATE_DELETED;
+ dev_printk(KERN_ERR, &rport->dev,
+ "blocked FC remote port time out: removing"
+ " rport%s\n",
+ (rport->scsi_target_id != -1) ? " and starget" : "");
+ fc_queue_work(shost, &rport->rport_delete_work);
+ spin_unlock_irqrestore(shost->host_lock, flags);
+ return;
+ }
+
+ dev_printk(KERN_ERR, &rport->dev,
+ "blocked FC remote port time out: removing target and "
+ "saving binding\n");
+
+ list_move_tail(&rport->peers, &fc_host->rport_bindings);
+
+ /*
+ * Note: We do not remove or clear the hostdata area. This allows
+ * host-specific target data to persist along with the
+ * scsi_target_id. It's up to the host to manage it's hostdata area.
+ */
+
+ /*
+ * Reinitialize port attributes that may change if the port comes back.
+ */
+ rport->maxframe_size = -1;
+ rport->supported_classes = FC_COS_UNSPECIFIED;
+ rport->roles = FC_PORT_ROLE_UNKNOWN;
+ rport->port_state = FC_PORTSTATE_NOTPRESENT;
+ rport->flags &= ~FC_RPORT_FAST_FAIL_TIMEDOUT;
+
+ /*
+ * Pre-emptively kill I/O rather than waiting for the work queue
+ * item to teardown the starget. (FCOE libFC folks prefer this
+ * and to have the rport_port_id still set when it's done).
+ */
+ spin_unlock_irqrestore(shost->host_lock, flags);
+ fc_terminate_rport_io(rport);
+
+ spin_lock_irqsave(shost->host_lock, flags);
+
+ if (rport->port_state == FC_PORTSTATE_NOTPRESENT) { /* still missing */
+
+ /* remove the identifiers that aren't used in the consisting binding */
+ switch (fc_host->tgtid_bind_type) {
+ case FC_TGTID_BIND_BY_WWPN:
+ rport->node_name = -1;
+ rport->port_id = -1;
+ break;
+ case FC_TGTID_BIND_BY_WWNN:
+ rport->port_name = -1;
+ rport->port_id = -1;
+ break;
+ case FC_TGTID_BIND_BY_ID:
+ rport->node_name = -1;
+ rport->port_name = -1;
+ break;
+ case FC_TGTID_BIND_NONE: /* to keep compiler happy */
+ break;
+ }
+
+ /*
+ * As this only occurs if the remote port (scsi target)
+ * went away and didn't come back - we'll remove
+ * all attached scsi devices.
+ */
+ rport->flags |= FC_RPORT_DEVLOSS_CALLBK_DONE;
+ fc_queue_work(shost, &rport->stgt_delete_work);
+
+ do_callback = 1;
+ }
+
+ spin_unlock_irqrestore(shost->host_lock, flags);
+
+ /*
+ * Notify the driver that the rport is now dead. The LLDD will
+ * also guarantee that any communication to the rport is terminated
+ *
+ * Note: we set the CALLBK_DONE flag above to correspond
+ */
+ if (do_callback && i->f->dev_loss_tmo_callbk)
+ i->f->dev_loss_tmo_callbk(rport);
+}
+
+
+/**
+ * fc_timeout_fail_rport_io - Timeout handler for a fast io failing on a disconnected SCSI target.
+ * @work: rport to terminate io on.
+ *
+ * Notes: Only requests the failure of the io, not that all are flushed
+ * prior to returning.
+ */
+static void
+fc_timeout_fail_rport_io(struct work_struct *work)
+{
+ struct fc_rport *rport =
+ container_of(work, struct fc_rport, fail_io_work.work);
+
+ if (rport->port_state != FC_PORTSTATE_BLOCKED)
+ return;
+
+ rport->flags |= FC_RPORT_FAST_FAIL_TIMEDOUT;
+ fc_terminate_rport_io(rport);
+}
+
+/**
+ * fc_scsi_scan_rport - called to perform a scsi scan on a remote port.
+ * @work: remote port to be scanned.
+ */
+static void
+fc_scsi_scan_rport(struct work_struct *work)
+{
+ struct fc_rport *rport =
+ container_of(work, struct fc_rport, scan_work);
+ struct Scsi_Host *shost = rport_to_shost(rport);
+ struct fc_internal *i = to_fc_internal(shost->transportt);
+ unsigned long flags;
+
+ if ((rport->port_state == FC_PORTSTATE_ONLINE) &&
+ (rport->roles & FC_PORT_ROLE_FCP_TARGET) &&
+ !(i->f->disable_target_scan)) {
+ scsi_scan_target(&rport->dev, rport->channel,
+ rport->scsi_target_id, SCAN_WILD_CARD, 1);
+ }
+
+ spin_lock_irqsave(shost->host_lock, flags);
+ rport->flags &= ~FC_RPORT_SCAN_PENDING;
+ spin_unlock_irqrestore(shost->host_lock, flags);
+}
+
+/**
+ * fc_block_scsi_eh - Block SCSI eh thread for blocked fc_rport
+ * @cmnd: SCSI command that scsi_eh is trying to recover
+ *
+ * This routine can be called from a FC LLD scsi_eh callback. It
+ * blocks the scsi_eh thread until the fc_rport leaves the
+ * FC_PORTSTATE_BLOCKED, or the fast_io_fail_tmo fires. This is
+ * necessary to avoid the scsi_eh failing recovery actions for blocked
+ * rports which would lead to offlined SCSI devices.
+ *
+ * Returns: 0 if the fc_rport left the state FC_PORTSTATE_BLOCKED.
+ * FAST_IO_FAIL if the fast_io_fail_tmo fired, this should be
+ * passed back to scsi_eh.
+ */
+int fc_block_scsi_eh(struct scsi_cmnd *cmnd)
+{
+ struct Scsi_Host *shost = cmnd->device->host;
+ struct fc_rport *rport = starget_to_rport(scsi_target(cmnd->device));
+ unsigned long flags;
+
+ spin_lock_irqsave(shost->host_lock, flags);
+ while (rport->port_state == FC_PORTSTATE_BLOCKED &&
+ !(rport->flags & FC_RPORT_FAST_FAIL_TIMEDOUT)) {
+ spin_unlock_irqrestore(shost->host_lock, flags);
+ msleep(1000);
+ spin_lock_irqsave(shost->host_lock, flags);
+ }
+ spin_unlock_irqrestore(shost->host_lock, flags);
+
+ if (rport->flags & FC_RPORT_FAST_FAIL_TIMEDOUT)
+ return FAST_IO_FAIL;
+
+ return 0;
+}
+EXPORT_SYMBOL(fc_block_scsi_eh);
+
+/**
+ * fc_vport_setup - allocates and creates a FC virtual port.
+ * @shost: scsi host the virtual port is connected to.
+ * @channel: Channel on shost port connected to.
+ * @pdev: parent device for vport
+ * @ids: The world wide names, FC4 port roles, etc for
+ * the virtual port.
+ * @ret_vport: The pointer to the created vport.
+ *
+ * Allocates and creates the vport structure, calls the parent host
+ * to instantiate the vport, the completes w/ class and sysfs creation.
+ *
+ * Notes:
+ * This routine assumes no locks are held on entry.
+ */
+static int
+fc_vport_setup(struct Scsi_Host *shost, int channel, struct device *pdev,
+ struct fc_vport_identifiers *ids, struct fc_vport **ret_vport)
+{
+ struct fc_host_attrs *fc_host = shost_to_fc_host(shost);
+ struct fc_internal *fci = to_fc_internal(shost->transportt);
+ struct fc_vport *vport;
+ struct device *dev;
+ unsigned long flags;
+ size_t size;
+ int error;
+
+ *ret_vport = NULL;
+
+ if ( ! fci->f->vport_create)
+ return -ENOENT;
+
+ size = (sizeof(struct fc_vport) + fci->f->dd_fcvport_size);
+ vport = kzalloc(size, GFP_KERNEL);
+ if (unlikely(!vport)) {
+ printk(KERN_ERR "%s: allocation failure\n", __func__);
+ return -ENOMEM;
+ }
+
+ vport->vport_state = FC_VPORT_UNKNOWN;
+ vport->vport_last_state = FC_VPORT_UNKNOWN;
+ vport->node_name = ids->node_name;
+ vport->port_name = ids->port_name;
+ vport->roles = ids->roles;
+ vport->vport_type = ids->vport_type;
+ if (fci->f->dd_fcvport_size)
+ vport->dd_data = &vport[1];
+ vport->shost = shost;
+ vport->channel = channel;
+ vport->flags = FC_VPORT_CREATING;
+ INIT_WORK(&vport->vport_delete_work, fc_vport_sched_delete);
+
+ spin_lock_irqsave(shost->host_lock, flags);
+
+ if (fc_host->npiv_vports_inuse >= fc_host->max_npiv_vports) {
+ spin_unlock_irqrestore(shost->host_lock, flags);
+ kfree(vport);
+ return -ENOSPC;
+ }
+ fc_host->npiv_vports_inuse++;
+ vport->number = fc_host->next_vport_number++;
+ list_add_tail(&vport->peers, &fc_host->vports);
+ get_device(&shost->shost_gendev); /* for fc_host->vport list */
+
+ spin_unlock_irqrestore(shost->host_lock, flags);
+
+ dev = &vport->dev;
+ device_initialize(dev); /* takes self reference */
+ dev->parent = get_device(pdev); /* takes parent reference */
+ dev->release = fc_vport_dev_release;
+ dev_set_name(dev, "vport-%d:%d-%d",
+ shost->host_no, channel, vport->number);
+ transport_setup_device(dev);
+
+ error = device_add(dev);
+ if (error) {
+ printk(KERN_ERR "FC Virtual Port device_add failed\n");
+ goto delete_vport;
+ }
+ transport_add_device(dev);
+ transport_configure_device(dev);
+
+ error = fci->f->vport_create(vport, ids->disable);
+ if (error) {
+ printk(KERN_ERR "FC Virtual Port LLDD Create failed\n");
+ goto delete_vport_all;
+ }
+
+ /*
+ * if the parent isn't the physical adapter's Scsi_Host, ensure
+ * the Scsi_Host at least contains ia symlink to the vport.
+ */
+ if (pdev != &shost->shost_gendev) {
+ error = sysfs_create_link(&shost->shost_gendev.kobj,
+ &dev->kobj, dev_name(dev));
+ if (error)
+ printk(KERN_ERR
+ "%s: Cannot create vport symlinks for "
+ "%s, err=%d\n",
+ __func__, dev_name(dev), error);
+ }
+ spin_lock_irqsave(shost->host_lock, flags);
+ vport->flags &= ~FC_VPORT_CREATING;
+ spin_unlock_irqrestore(shost->host_lock, flags);
+
+ dev_printk(KERN_NOTICE, pdev,
+ "%s created via shost%d channel %d\n", dev_name(dev),
+ shost->host_no, channel);
+
+ *ret_vport = vport;
+
+ return 0;
+
+delete_vport_all:
+ transport_remove_device(dev);
+ device_del(dev);
+delete_vport:
+ transport_destroy_device(dev);
+ spin_lock_irqsave(shost->host_lock, flags);
+ list_del(&vport->peers);
+ put_device(&shost->shost_gendev); /* for fc_host->vport list */
+ fc_host->npiv_vports_inuse--;
+ spin_unlock_irqrestore(shost->host_lock, flags);
+ put_device(dev->parent);
+ kfree(vport);
+
+ return error;
+}
+
+/**
+ * fc_vport_create - Admin App or LLDD requests creation of a vport
+ * @shost: scsi host the virtual port is connected to.
+ * @channel: channel on shost port connected to.
+ * @ids: The world wide names, FC4 port roles, etc for
+ * the virtual port.
+ *
+ * Notes:
+ * This routine assumes no locks are held on entry.
+ */
+struct fc_vport *
+fc_vport_create(struct Scsi_Host *shost, int channel,
+ struct fc_vport_identifiers *ids)
+{
+ int stat;
+ struct fc_vport *vport;
+
+ stat = fc_vport_setup(shost, channel, &shost->shost_gendev,
+ ids, &vport);
+ return stat ? NULL : vport;
+}
+EXPORT_SYMBOL(fc_vport_create);
+
+/**
+ * fc_vport_terminate - Admin App or LLDD requests termination of a vport
+ * @vport: fc_vport to be terminated
+ *
+ * Calls the LLDD vport_delete() function, then deallocates and removes
+ * the vport from the shost and object tree.
+ *
+ * Notes:
+ * This routine assumes no locks are held on entry.
+ */
+int
+fc_vport_terminate(struct fc_vport *vport)
+{
+ struct Scsi_Host *shost = vport_to_shost(vport);
+ struct fc_host_attrs *fc_host = shost_to_fc_host(shost);
+ struct fc_internal *i = to_fc_internal(shost->transportt);
+ struct device *dev = &vport->dev;
+ unsigned long flags;
+ int stat;
+
+ if (i->f->vport_delete)
+ stat = i->f->vport_delete(vport);
+ else
+ stat = -ENOENT;
+
+ spin_lock_irqsave(shost->host_lock, flags);
+ vport->flags &= ~FC_VPORT_DELETING;
+ if (!stat) {
+ vport->flags |= FC_VPORT_DELETED;
+ list_del(&vport->peers);
+ fc_host->npiv_vports_inuse--;
+ put_device(&shost->shost_gendev); /* for fc_host->vport list */
+ }
+ spin_unlock_irqrestore(shost->host_lock, flags);
+
+ if (stat)
+ return stat;
+
+ if (dev->parent != &shost->shost_gendev)
+ sysfs_remove_link(&shost->shost_gendev.kobj, dev_name(dev));
+ transport_remove_device(dev);
+ device_del(dev);
+ transport_destroy_device(dev);
+
+ /*
+ * Removing our self-reference should mean our
+ * release function gets called, which will drop the remaining
+ * parent reference and free the data structure.
+ */
+ put_device(dev); /* for self-reference */
+
+ return 0; /* SUCCESS */
+}
+EXPORT_SYMBOL(fc_vport_terminate);
+
+/**
+ * fc_vport_sched_delete - workq-based delete request for a vport
+ * @work: vport to be deleted.
+ */
+static void
+fc_vport_sched_delete(struct work_struct *work)
+{
+ struct fc_vport *vport =
+ container_of(work, struct fc_vport, vport_delete_work);
+ int stat;
+
+ stat = fc_vport_terminate(vport);
+ if (stat)
+ dev_printk(KERN_ERR, vport->dev.parent,
+ "%s: %s could not be deleted created via "
+ "shost%d channel %d - error %d\n", __func__,
+ dev_name(&vport->dev), vport->shost->host_no,
+ vport->channel, stat);
+}
+
+
+/*
+ * BSG support
+ */
+
+
+/**
+ * fc_destroy_bsgjob - routine to teardown/delete a fc bsg job
+ * @job: fc_bsg_job that is to be torn down
+ */
+static void
+fc_destroy_bsgjob(struct fc_bsg_job *job)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&job->job_lock, flags);
+ if (job->ref_cnt) {
+ spin_unlock_irqrestore(&job->job_lock, flags);
+ return;
+ }
+ spin_unlock_irqrestore(&job->job_lock, flags);
+
+ put_device(job->dev); /* release reference for the request */
+
+ kfree(job->request_payload.sg_list);
+ kfree(job->reply_payload.sg_list);
+ kfree(job);
+}
+
+/**
+ * fc_bsg_jobdone - completion routine for bsg requests that the LLD has
+ * completed
+ * @job: fc_bsg_job that is complete
+ */
+static void
+fc_bsg_jobdone(struct fc_bsg_job *job)
+{
+ struct request *req = job->req;
+ struct request *rsp = req->next_rq;
+ int err;
+
+ err = job->req->errors = job->reply->result;
+
+ if (err < 0)
+ /* we're only returning the result field in the reply */
+ job->req->sense_len = sizeof(uint32_t);
+ else
+ job->req->sense_len = job->reply_len;
+
+ /* we assume all request payload was transferred, residual == 0 */
+ req->resid_len = 0;
+
+ if (rsp) {
+ WARN_ON(job->reply->reply_payload_rcv_len > rsp->resid_len);
+
+ /* set reply (bidi) residual */
+ rsp->resid_len -= min(job->reply->reply_payload_rcv_len,
+ rsp->resid_len);
+ }
+ blk_complete_request(req);
+}
+
+/**
+ * fc_bsg_softirq_done - softirq done routine for destroying the bsg requests
+ * @rq: BSG request that holds the job to be destroyed
+ */
+static void fc_bsg_softirq_done(struct request *rq)
+{
+ struct fc_bsg_job *job = rq->special;
+ unsigned long flags;
+
+ spin_lock_irqsave(&job->job_lock, flags);
+ job->state_flags |= FC_RQST_STATE_DONE;
+ job->ref_cnt--;
+ spin_unlock_irqrestore(&job->job_lock, flags);
+
+ blk_end_request_all(rq, rq->errors);
+ fc_destroy_bsgjob(job);
+}
+
+/**
+ * fc_bsg_job_timeout - handler for when a bsg request timesout
+ * @req: request that timed out
+ */
+static enum blk_eh_timer_return
+fc_bsg_job_timeout(struct request *req)
+{
+ struct fc_bsg_job *job = (void *) req->special;
+ struct Scsi_Host *shost = job->shost;
+ struct fc_internal *i = to_fc_internal(shost->transportt);
+ unsigned long flags;
+ int err = 0, done = 0;
+
+ if (job->rport && job->rport->port_state == FC_PORTSTATE_BLOCKED)
+ return BLK_EH_RESET_TIMER;
+
+ spin_lock_irqsave(&job->job_lock, flags);
+ if (job->state_flags & FC_RQST_STATE_DONE)
+ done = 1;
+ else
+ job->ref_cnt++;
+ spin_unlock_irqrestore(&job->job_lock, flags);
+
+ if (!done && i->f->bsg_timeout) {
+ /* call LLDD to abort the i/o as it has timed out */
+ err = i->f->bsg_timeout(job);
+ if (err == -EAGAIN) {
+ job->ref_cnt--;
+ return BLK_EH_RESET_TIMER;
+ } else if (err)
+ printk(KERN_ERR "ERROR: FC BSG request timeout - LLD "
+ "abort failed with status %d\n", err);
+ }
+
+ /* the blk_end_sync_io() doesn't check the error */
+ if (done)
+ return BLK_EH_NOT_HANDLED;
+ else
+ return BLK_EH_HANDLED;
+}
+
+static int
+fc_bsg_map_buffer(struct fc_bsg_buffer *buf, struct request *req)
+{
+ size_t sz = (sizeof(struct scatterlist) * req->nr_phys_segments);
+
+ BUG_ON(!req->nr_phys_segments);
+
+ buf->sg_list = kzalloc(sz, GFP_KERNEL);
+ if (!buf->sg_list)
+ return -ENOMEM;
+ sg_init_table(buf->sg_list, req->nr_phys_segments);
+ buf->sg_cnt = blk_rq_map_sg(req->q, req, buf->sg_list);
+ buf->payload_len = blk_rq_bytes(req);
+ return 0;
+}
+
+
+/**
+ * fc_req_to_bsgjob - Allocate/create the fc_bsg_job structure for the
+ * bsg request
+ * @shost: SCSI Host corresponding to the bsg object
+ * @rport: (optional) FC Remote Port corresponding to the bsg object
+ * @req: BSG request that needs a job structure
+ */
+static int
+fc_req_to_bsgjob(struct Scsi_Host *shost, struct fc_rport *rport,
+ struct request *req)
+{
+ struct fc_internal *i = to_fc_internal(shost->transportt);
+ struct request *rsp = req->next_rq;
+ struct fc_bsg_job *job;
+ int ret;
+
+ BUG_ON(req->special);
+
+ job = kzalloc(sizeof(struct fc_bsg_job) + i->f->dd_bsg_size,
+ GFP_KERNEL);
+ if (!job)
+ return -ENOMEM;
+
+ /*
+ * Note: this is a bit silly.
+ * The request gets formatted as a SGIO v4 ioctl request, which
+ * then gets reformatted as a blk request, which then gets
+ * reformatted as a fc bsg request. And on completion, we have
+ * to wrap return results such that SGIO v4 thinks it was a scsi
+ * status. I hope this was all worth it.
+ */
+
+ req->special = job;
+ job->shost = shost;
+ job->rport = rport;
+ job->req = req;
+ if (i->f->dd_bsg_size)
+ job->dd_data = (void *)&job[1];
+ spin_lock_init(&job->job_lock);
+ job->request = (struct fc_bsg_request *)req->cmd;
+ job->request_len = req->cmd_len;
+ job->reply = req->sense;
+ job->reply_len = SCSI_SENSE_BUFFERSIZE; /* Size of sense buffer
+ * allocated */
+ if (req->bio) {
+ ret = fc_bsg_map_buffer(&job->request_payload, req);
+ if (ret)
+ goto failjob_rls_job;
+ }
+ if (rsp && rsp->bio) {
+ ret = fc_bsg_map_buffer(&job->reply_payload, rsp);
+ if (ret)
+ goto failjob_rls_rqst_payload;
+ }
+ job->job_done = fc_bsg_jobdone;
+ if (rport)
+ job->dev = &rport->dev;
+ else
+ job->dev = &shost->shost_gendev;
+ get_device(job->dev); /* take a reference for the request */
+
+ job->ref_cnt = 1;
+
+ return 0;
+
+
+failjob_rls_rqst_payload:
+ kfree(job->request_payload.sg_list);
+failjob_rls_job:
+ kfree(job);
+ return -ENOMEM;
+}
+
+
+enum fc_dispatch_result {
+ FC_DISPATCH_BREAK, /* on return, q is locked, break from q loop */
+ FC_DISPATCH_LOCKED, /* on return, q is locked, continue on */
+ FC_DISPATCH_UNLOCKED, /* on return, q is unlocked, continue on */
+};
+
+
+/**
+ * fc_bsg_host_dispatch - process fc host bsg requests and dispatch to LLDD
+ * @q: fc host request queue
+ * @shost: scsi host rport attached to
+ * @job: bsg job to be processed
+ */
+static enum fc_dispatch_result
+fc_bsg_host_dispatch(struct request_queue *q, struct Scsi_Host *shost,
+ struct fc_bsg_job *job)
+{
+ struct fc_internal *i = to_fc_internal(shost->transportt);
+ int cmdlen = sizeof(uint32_t); /* start with length of msgcode */
+ int ret;
+
+ /* Validate the host command */
+ switch (job->request->msgcode) {
+ case FC_BSG_HST_ADD_RPORT:
+ cmdlen += sizeof(struct fc_bsg_host_add_rport);
+ break;
+
+ case FC_BSG_HST_DEL_RPORT:
+ cmdlen += sizeof(struct fc_bsg_host_del_rport);
+ break;
+
+ case FC_BSG_HST_ELS_NOLOGIN:
+ cmdlen += sizeof(struct fc_bsg_host_els);
+ /* there better be a xmt and rcv payloads */
+ if ((!job->request_payload.payload_len) ||
+ (!job->reply_payload.payload_len)) {
+ ret = -EINVAL;
+ goto fail_host_msg;
+ }
+ break;
+
+ case FC_BSG_HST_CT:
+ cmdlen += sizeof(struct fc_bsg_host_ct);
+ /* there better be xmt and rcv payloads */
+ if ((!job->request_payload.payload_len) ||
+ (!job->reply_payload.payload_len)) {
+ ret = -EINVAL;
+ goto fail_host_msg;
+ }
+ break;
+
+ case FC_BSG_HST_VENDOR:
+ cmdlen += sizeof(struct fc_bsg_host_vendor);
+ if ((shost->hostt->vendor_id == 0L) ||
+ (job->request->rqst_data.h_vendor.vendor_id !=
+ shost->hostt->vendor_id)) {
+ ret = -ESRCH;
+ goto fail_host_msg;
+ }
+ break;
+
+ default:
+ ret = -EBADR;
+ goto fail_host_msg;
+ }
+
+ /* check if we really have all the request data needed */
+ if (job->request_len < cmdlen) {
+ ret = -ENOMSG;
+ goto fail_host_msg;
+ }
+
+ ret = i->f->bsg_request(job);
+ if (!ret)
+ return FC_DISPATCH_UNLOCKED;
+
+fail_host_msg:
+ /* return the errno failure code as the only status */
+ BUG_ON(job->reply_len < sizeof(uint32_t));
+ job->reply->reply_payload_rcv_len = 0;
+ job->reply->result = ret;
+ job->reply_len = sizeof(uint32_t);
+ fc_bsg_jobdone(job);
+ return FC_DISPATCH_UNLOCKED;
+}
+
+
+/*
+ * fc_bsg_goose_queue - restart rport queue in case it was stopped
+ * @rport: rport to be restarted
+ */
+static void
+fc_bsg_goose_queue(struct fc_rport *rport)
+{
+ if (!rport->rqst_q)
+ return;
+
+ /*
+ * This get/put dance makes no sense
+ */
+ get_device(&rport->dev);
+ blk_run_queue_async(rport->rqst_q);
+ put_device(&rport->dev);
+}
+
+/**
+ * fc_bsg_rport_dispatch - process rport bsg requests and dispatch to LLDD
+ * @q: rport request queue
+ * @shost: scsi host rport attached to
+ * @rport: rport request destined to
+ * @job: bsg job to be processed
+ */
+static enum fc_dispatch_result
+fc_bsg_rport_dispatch(struct request_queue *q, struct Scsi_Host *shost,
+ struct fc_rport *rport, struct fc_bsg_job *job)
+{
+ struct fc_internal *i = to_fc_internal(shost->transportt);
+ int cmdlen = sizeof(uint32_t); /* start with length of msgcode */
+ int ret;
+
+ /* Validate the rport command */
+ switch (job->request->msgcode) {
+ case FC_BSG_RPT_ELS:
+ cmdlen += sizeof(struct fc_bsg_rport_els);
+ goto check_bidi;
+
+ case FC_BSG_RPT_CT:
+ cmdlen += sizeof(struct fc_bsg_rport_ct);
+check_bidi:
+ /* there better be xmt and rcv payloads */
+ if ((!job->request_payload.payload_len) ||
+ (!job->reply_payload.payload_len)) {
+ ret = -EINVAL;
+ goto fail_rport_msg;
+ }
+ break;
+ default:
+ ret = -EBADR;
+ goto fail_rport_msg;
+ }
+
+ /* check if we really have all the request data needed */
+ if (job->request_len < cmdlen) {
+ ret = -ENOMSG;
+ goto fail_rport_msg;
+ }
+
+ ret = i->f->bsg_request(job);
+ if (!ret)
+ return FC_DISPATCH_UNLOCKED;
+
+fail_rport_msg:
+ /* return the errno failure code as the only status */
+ BUG_ON(job->reply_len < sizeof(uint32_t));
+ job->reply->reply_payload_rcv_len = 0;
+ job->reply->result = ret;
+ job->reply_len = sizeof(uint32_t);
+ fc_bsg_jobdone(job);
+ return FC_DISPATCH_UNLOCKED;
+}
+
+
+/**
+ * fc_bsg_request_handler - generic handler for bsg requests
+ * @q: request queue to manage
+ * @shost: Scsi_Host related to the bsg object
+ * @rport: FC remote port related to the bsg object (optional)
+ * @dev: device structure for bsg object
+ */
+static void
+fc_bsg_request_handler(struct request_queue *q, struct Scsi_Host *shost,
+ struct fc_rport *rport, struct device *dev)
+{
+ struct request *req;
+ struct fc_bsg_job *job;
+ enum fc_dispatch_result ret;
+
+ if (!get_device(dev))
+ return;
+
+ while (1) {
+ if (rport && (rport->port_state == FC_PORTSTATE_BLOCKED) &&
+ !(rport->flags & FC_RPORT_FAST_FAIL_TIMEDOUT))
+ break;
+
+ req = blk_fetch_request(q);
+ if (!req)
+ break;
+
+ if (rport && (rport->port_state != FC_PORTSTATE_ONLINE)) {
+ req->errors = -ENXIO;
+ spin_unlock_irq(q->queue_lock);
+ blk_end_request_all(req, -ENXIO);
+ spin_lock_irq(q->queue_lock);
+ continue;
+ }
+
+ spin_unlock_irq(q->queue_lock);
+
+ ret = fc_req_to_bsgjob(shost, rport, req);
+ if (ret) {
+ req->errors = ret;
+ blk_end_request_all(req, ret);
+ spin_lock_irq(q->queue_lock);
+ continue;
+ }
+
+ job = req->special;
+
+ /* check if we have the msgcode value at least */
+ if (job->request_len < sizeof(uint32_t)) {
+ BUG_ON(job->reply_len < sizeof(uint32_t));
+ job->reply->reply_payload_rcv_len = 0;
+ job->reply->result = -ENOMSG;
+ job->reply_len = sizeof(uint32_t);
+ fc_bsg_jobdone(job);
+ spin_lock_irq(q->queue_lock);
+ continue;
+ }
+
+ /* the dispatch routines will unlock the queue_lock */
+ if (rport)
+ ret = fc_bsg_rport_dispatch(q, shost, rport, job);
+ else
+ ret = fc_bsg_host_dispatch(q, shost, job);
+
+ /* did dispatcher hit state that can't process any more */
+ if (ret == FC_DISPATCH_BREAK)
+ break;
+
+ /* did dispatcher had released the lock */
+ if (ret == FC_DISPATCH_UNLOCKED)
+ spin_lock_irq(q->queue_lock);
+ }
+
+ spin_unlock_irq(q->queue_lock);
+ put_device(dev);
+ spin_lock_irq(q->queue_lock);
+}
+
+
+/**
+ * fc_bsg_host_handler - handler for bsg requests for a fc host
+ * @q: fc host request queue
+ */
+static void
+fc_bsg_host_handler(struct request_queue *q)
+{
+ struct Scsi_Host *shost = q->queuedata;
+
+ fc_bsg_request_handler(q, shost, NULL, &shost->shost_gendev);
+}
+
+
+/**
+ * fc_bsg_rport_handler - handler for bsg requests for a fc rport
+ * @q: rport request queue
+ */
+static void
+fc_bsg_rport_handler(struct request_queue *q)
+{
+ struct fc_rport *rport = q->queuedata;
+ struct Scsi_Host *shost = rport_to_shost(rport);
+
+ fc_bsg_request_handler(q, shost, rport, &rport->dev);
+}
+
+
+/**
+ * fc_bsg_hostadd - Create and add the bsg hooks so we can receive requests
+ * @shost: shost for fc_host
+ * @fc_host: fc_host adding the structures to
+ */
+static int
+fc_bsg_hostadd(struct Scsi_Host *shost, struct fc_host_attrs *fc_host)
+{
+ struct device *dev = &shost->shost_gendev;
+ struct fc_internal *i = to_fc_internal(shost->transportt);
+ struct request_queue *q;
+ int err;
+ char bsg_name[20];
+
+ fc_host->rqst_q = NULL;
+
+ if (!i->f->bsg_request)
+ return -ENOTSUPP;
+
+ snprintf(bsg_name, sizeof(bsg_name),
+ "fc_host%d", shost->host_no);
+
+ q = __scsi_alloc_queue(shost, fc_bsg_host_handler);
+ if (!q) {
+ printk(KERN_ERR "fc_host%d: bsg interface failed to "
+ "initialize - no request queue\n",
+ shost->host_no);
+ return -ENOMEM;
+ }
+
+ q->queuedata = shost;
+ queue_flag_set_unlocked(QUEUE_FLAG_BIDI, q);
+ blk_queue_softirq_done(q, fc_bsg_softirq_done);
+ blk_queue_rq_timed_out(q, fc_bsg_job_timeout);
+ blk_queue_rq_timeout(q, FC_DEFAULT_BSG_TIMEOUT);
+
+ err = bsg_register_queue(q, dev, bsg_name, NULL);
+ if (err) {
+ printk(KERN_ERR "fc_host%d: bsg interface failed to "
+ "initialize - register queue\n",
+ shost->host_no);
+ blk_cleanup_queue(q);
+ return err;
+ }
+
+ fc_host->rqst_q = q;
+ return 0;
+}
+
+
+/**
+ * fc_bsg_rportadd - Create and add the bsg hooks so we can receive requests
+ * @shost: shost that rport is attached to
+ * @rport: rport that the bsg hooks are being attached to
+ */
+static int
+fc_bsg_rportadd(struct Scsi_Host *shost, struct fc_rport *rport)
+{
+ struct device *dev = &rport->dev;
+ struct fc_internal *i = to_fc_internal(shost->transportt);
+ struct request_queue *q;
+ int err;
+
+ rport->rqst_q = NULL;
+
+ if (!i->f->bsg_request)
+ return -ENOTSUPP;
+
+ q = __scsi_alloc_queue(shost, fc_bsg_rport_handler);
+ if (!q) {
+ printk(KERN_ERR "%s: bsg interface failed to "
+ "initialize - no request queue\n",
+ dev->kobj.name);
+ return -ENOMEM;
+ }
+
+ q->queuedata = rport;
+ queue_flag_set_unlocked(QUEUE_FLAG_BIDI, q);
+ blk_queue_softirq_done(q, fc_bsg_softirq_done);
+ blk_queue_rq_timed_out(q, fc_bsg_job_timeout);
+ blk_queue_rq_timeout(q, BLK_DEFAULT_SG_TIMEOUT);
+
+ err = bsg_register_queue(q, dev, NULL, NULL);
+ if (err) {
+ printk(KERN_ERR "%s: bsg interface failed to "
+ "initialize - register queue\n",
+ dev->kobj.name);
+ blk_cleanup_queue(q);
+ return err;
+ }
+
+ rport->rqst_q = q;
+ return 0;
+}
+
+
+/**
+ * fc_bsg_remove - Deletes the bsg hooks on fchosts/rports
+ * @q: the request_queue that is to be torn down.
+ *
+ * Notes:
+ * Before unregistering the queue empty any requests that are blocked
+ *
+ *
+ */
+static void
+fc_bsg_remove(struct request_queue *q)
+{
+ if (q) {
+ bsg_unregister_queue(q);
+ blk_cleanup_queue(q);
+ }
+}
+
+
+/* Original Author: Martin Hicks */
+MODULE_AUTHOR("James Smart");
+MODULE_DESCRIPTION("FC Transport Attributes");
+MODULE_LICENSE("GPL");
+
+module_init(fc_transport_init);
+module_exit(fc_transport_exit);
diff --git a/drivers/scsi/scsi_transport_iscsi.c b/drivers/scsi/scsi_transport_iscsi.c
new file mode 100644
index 000000000..67d43e356
--- /dev/null
+++ b/drivers/scsi/scsi_transport_iscsi.c
@@ -0,0 +1,4603 @@
+/*
+ * iSCSI transport class definitions
+ *
+ * Copyright (C) IBM Corporation, 2004
+ * Copyright (C) Mike Christie, 2004 - 2005
+ * Copyright (C) Dmitry Yusupov, 2004 - 2005
+ * Copyright (C) Alex Aizman, 2004 - 2005
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ */
+#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/slab.h>
+#include <linux/bsg-lib.h>
+#include <linux/idr.h>
+#include <net/tcp.h>
+#include <scsi/scsi.h>
+#include <scsi/scsi_host.h>
+#include <scsi/scsi_device.h>
+#include <scsi/scsi_transport.h>
+#include <scsi/scsi_transport_iscsi.h>
+#include <scsi/iscsi_if.h>
+#include <scsi/scsi_cmnd.h>
+#include <scsi/scsi_bsg_iscsi.h>
+
+#define ISCSI_TRANSPORT_VERSION "2.0-870"
+
+static int dbg_session;
+module_param_named(debug_session, dbg_session, int,
+ S_IRUGO | S_IWUSR);
+MODULE_PARM_DESC(debug_session,
+ "Turn on debugging for sessions in scsi_transport_iscsi "
+ "module. Set to 1 to turn on, and zero to turn off. Default "
+ "is off.");
+
+static int dbg_conn;
+module_param_named(debug_conn, dbg_conn, int,
+ S_IRUGO | S_IWUSR);
+MODULE_PARM_DESC(debug_conn,
+ "Turn on debugging for connections in scsi_transport_iscsi "
+ "module. Set to 1 to turn on, and zero to turn off. Default "
+ "is off.");
+
+#define ISCSI_DBG_TRANS_SESSION(_session, dbg_fmt, arg...) \
+ do { \
+ if (dbg_session) \
+ iscsi_cls_session_printk(KERN_INFO, _session, \
+ "%s: " dbg_fmt, \
+ __func__, ##arg); \
+ } while (0);
+
+#define ISCSI_DBG_TRANS_CONN(_conn, dbg_fmt, arg...) \
+ do { \
+ if (dbg_conn) \
+ iscsi_cls_conn_printk(KERN_INFO, _conn, \
+ "%s: " dbg_fmt, \
+ __func__, ##arg); \
+ } while (0);
+
+struct iscsi_internal {
+ struct scsi_transport_template t;
+ struct iscsi_transport *iscsi_transport;
+ struct list_head list;
+ struct device dev;
+
+ struct transport_container conn_cont;
+ struct transport_container session_cont;
+};
+
+static atomic_t iscsi_session_nr; /* sysfs session id for next new session */
+static struct workqueue_struct *iscsi_eh_timer_workq;
+
+static DEFINE_IDA(iscsi_sess_ida);
+/*
+ * list of registered transports and lock that must
+ * be held while accessing list. The iscsi_transport_lock must
+ * be acquired after the rx_queue_mutex.
+ */
+static LIST_HEAD(iscsi_transports);
+static DEFINE_SPINLOCK(iscsi_transport_lock);
+
+#define to_iscsi_internal(tmpl) \
+ container_of(tmpl, struct iscsi_internal, t)
+
+#define dev_to_iscsi_internal(_dev) \
+ container_of(_dev, struct iscsi_internal, dev)
+
+static void iscsi_transport_release(struct device *dev)
+{
+ struct iscsi_internal *priv = dev_to_iscsi_internal(dev);
+ kfree(priv);
+}
+
+/*
+ * iscsi_transport_class represents the iscsi_transports that are
+ * registered.
+ */
+static struct class iscsi_transport_class = {
+ .name = "iscsi_transport",
+ .dev_release = iscsi_transport_release,
+};
+
+static ssize_t
+show_transport_handle(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct iscsi_internal *priv = dev_to_iscsi_internal(dev);
+ return sprintf(buf, "%llu\n", (unsigned long long)iscsi_handle(priv->iscsi_transport));
+}
+static DEVICE_ATTR(handle, S_IRUGO, show_transport_handle, NULL);
+
+#define show_transport_attr(name, format) \
+static ssize_t \
+show_transport_##name(struct device *dev, \
+ struct device_attribute *attr,char *buf) \
+{ \
+ struct iscsi_internal *priv = dev_to_iscsi_internal(dev); \
+ return sprintf(buf, format"\n", priv->iscsi_transport->name); \
+} \
+static DEVICE_ATTR(name, S_IRUGO, show_transport_##name, NULL);
+
+show_transport_attr(caps, "0x%x");
+
+static struct attribute *iscsi_transport_attrs[] = {
+ &dev_attr_handle.attr,
+ &dev_attr_caps.attr,
+ NULL,
+};
+
+static struct attribute_group iscsi_transport_group = {
+ .attrs = iscsi_transport_attrs,
+};
+
+/*
+ * iSCSI endpoint attrs
+ */
+#define iscsi_dev_to_endpoint(_dev) \
+ container_of(_dev, struct iscsi_endpoint, dev)
+
+#define ISCSI_ATTR(_prefix,_name,_mode,_show,_store) \
+struct device_attribute dev_attr_##_prefix##_##_name = \
+ __ATTR(_name,_mode,_show,_store)
+
+static void iscsi_endpoint_release(struct device *dev)
+{
+ struct iscsi_endpoint *ep = iscsi_dev_to_endpoint(dev);
+ kfree(ep);
+}
+
+static struct class iscsi_endpoint_class = {
+ .name = "iscsi_endpoint",
+ .dev_release = iscsi_endpoint_release,
+};
+
+static ssize_t
+show_ep_handle(struct device *dev, struct device_attribute *attr, char *buf)
+{
+ struct iscsi_endpoint *ep = iscsi_dev_to_endpoint(dev);
+ return sprintf(buf, "%llu\n", (unsigned long long) ep->id);
+}
+static ISCSI_ATTR(ep, handle, S_IRUGO, show_ep_handle, NULL);
+
+static struct attribute *iscsi_endpoint_attrs[] = {
+ &dev_attr_ep_handle.attr,
+ NULL,
+};
+
+static struct attribute_group iscsi_endpoint_group = {
+ .attrs = iscsi_endpoint_attrs,
+};
+
+#define ISCSI_MAX_EPID -1
+
+static int iscsi_match_epid(struct device *dev, const void *data)
+{
+ struct iscsi_endpoint *ep = iscsi_dev_to_endpoint(dev);
+ const uint64_t *epid = data;
+
+ return *epid == ep->id;
+}
+
+struct iscsi_endpoint *
+iscsi_create_endpoint(int dd_size)
+{
+ struct device *dev;
+ struct iscsi_endpoint *ep;
+ uint64_t id;
+ int err;
+
+ for (id = 1; id < ISCSI_MAX_EPID; id++) {
+ dev = class_find_device(&iscsi_endpoint_class, NULL, &id,
+ iscsi_match_epid);
+ if (!dev)
+ break;
+ }
+ if (id == ISCSI_MAX_EPID) {
+ printk(KERN_ERR "Too many connections. Max supported %u\n",
+ ISCSI_MAX_EPID - 1);
+ return NULL;
+ }
+
+ ep = kzalloc(sizeof(*ep) + dd_size, GFP_KERNEL);
+ if (!ep)
+ return NULL;
+
+ ep->id = id;
+ ep->dev.class = &iscsi_endpoint_class;
+ dev_set_name(&ep->dev, "ep-%llu", (unsigned long long) id);
+ err = device_register(&ep->dev);
+ if (err)
+ goto free_ep;
+
+ err = sysfs_create_group(&ep->dev.kobj, &iscsi_endpoint_group);
+ if (err)
+ goto unregister_dev;
+
+ if (dd_size)
+ ep->dd_data = &ep[1];
+ return ep;
+
+unregister_dev:
+ device_unregister(&ep->dev);
+ return NULL;
+
+free_ep:
+ kfree(ep);
+ return NULL;
+}
+EXPORT_SYMBOL_GPL(iscsi_create_endpoint);
+
+void iscsi_destroy_endpoint(struct iscsi_endpoint *ep)
+{
+ sysfs_remove_group(&ep->dev.kobj, &iscsi_endpoint_group);
+ device_unregister(&ep->dev);
+}
+EXPORT_SYMBOL_GPL(iscsi_destroy_endpoint);
+
+struct iscsi_endpoint *iscsi_lookup_endpoint(u64 handle)
+{
+ struct iscsi_endpoint *ep;
+ struct device *dev;
+
+ dev = class_find_device(&iscsi_endpoint_class, NULL, &handle,
+ iscsi_match_epid);
+ if (!dev)
+ return NULL;
+
+ ep = iscsi_dev_to_endpoint(dev);
+ /*
+ * we can drop this now because the interface will prevent
+ * removals and lookups from racing.
+ */
+ put_device(dev);
+ return ep;
+}
+EXPORT_SYMBOL_GPL(iscsi_lookup_endpoint);
+
+/*
+ * Interface to display network param to sysfs
+ */
+
+static void iscsi_iface_release(struct device *dev)
+{
+ struct iscsi_iface *iface = iscsi_dev_to_iface(dev);
+ struct device *parent = iface->dev.parent;
+
+ kfree(iface);
+ put_device(parent);
+}
+
+
+static struct class iscsi_iface_class = {
+ .name = "iscsi_iface",
+ .dev_release = iscsi_iface_release,
+};
+
+#define ISCSI_IFACE_ATTR(_prefix, _name, _mode, _show, _store) \
+struct device_attribute dev_attr_##_prefix##_##_name = \
+ __ATTR(_name, _mode, _show, _store)
+
+/* iface attrs show */
+#define iscsi_iface_attr_show(type, name, param_type, param) \
+static ssize_t \
+show_##type##_##name(struct device *dev, struct device_attribute *attr, \
+ char *buf) \
+{ \
+ struct iscsi_iface *iface = iscsi_dev_to_iface(dev); \
+ struct iscsi_transport *t = iface->transport; \
+ return t->get_iface_param(iface, param_type, param, buf); \
+} \
+
+#define iscsi_iface_net_attr(type, name, param) \
+ iscsi_iface_attr_show(type, name, ISCSI_NET_PARAM, param) \
+static ISCSI_IFACE_ATTR(type, name, S_IRUGO, show_##type##_##name, NULL);
+
+#define iscsi_iface_attr(type, name, param) \
+ iscsi_iface_attr_show(type, name, ISCSI_IFACE_PARAM, param) \
+static ISCSI_IFACE_ATTR(type, name, S_IRUGO, show_##type##_##name, NULL);
+
+/* generic read only ipv4 attribute */
+iscsi_iface_net_attr(ipv4_iface, ipaddress, ISCSI_NET_PARAM_IPV4_ADDR);
+iscsi_iface_net_attr(ipv4_iface, gateway, ISCSI_NET_PARAM_IPV4_GW);
+iscsi_iface_net_attr(ipv4_iface, subnet, ISCSI_NET_PARAM_IPV4_SUBNET);
+iscsi_iface_net_attr(ipv4_iface, bootproto, ISCSI_NET_PARAM_IPV4_BOOTPROTO);
+iscsi_iface_net_attr(ipv4_iface, dhcp_dns_address_en,
+ ISCSI_NET_PARAM_IPV4_DHCP_DNS_ADDR_EN);
+iscsi_iface_net_attr(ipv4_iface, dhcp_slp_da_info_en,
+ ISCSI_NET_PARAM_IPV4_DHCP_SLP_DA_EN);
+iscsi_iface_net_attr(ipv4_iface, tos_en, ISCSI_NET_PARAM_IPV4_TOS_EN);
+iscsi_iface_net_attr(ipv4_iface, tos, ISCSI_NET_PARAM_IPV4_TOS);
+iscsi_iface_net_attr(ipv4_iface, grat_arp_en,
+ ISCSI_NET_PARAM_IPV4_GRAT_ARP_EN);
+iscsi_iface_net_attr(ipv4_iface, dhcp_alt_client_id_en,
+ ISCSI_NET_PARAM_IPV4_DHCP_ALT_CLIENT_ID_EN);
+iscsi_iface_net_attr(ipv4_iface, dhcp_alt_client_id,
+ ISCSI_NET_PARAM_IPV4_DHCP_ALT_CLIENT_ID);
+iscsi_iface_net_attr(ipv4_iface, dhcp_req_vendor_id_en,
+ ISCSI_NET_PARAM_IPV4_DHCP_REQ_VENDOR_ID_EN);
+iscsi_iface_net_attr(ipv4_iface, dhcp_use_vendor_id_en,
+ ISCSI_NET_PARAM_IPV4_DHCP_USE_VENDOR_ID_EN);
+iscsi_iface_net_attr(ipv4_iface, dhcp_vendor_id,
+ ISCSI_NET_PARAM_IPV4_DHCP_VENDOR_ID);
+iscsi_iface_net_attr(ipv4_iface, dhcp_learn_iqn_en,
+ ISCSI_NET_PARAM_IPV4_DHCP_LEARN_IQN_EN);
+iscsi_iface_net_attr(ipv4_iface, fragment_disable,
+ ISCSI_NET_PARAM_IPV4_FRAGMENT_DISABLE);
+iscsi_iface_net_attr(ipv4_iface, incoming_forwarding_en,
+ ISCSI_NET_PARAM_IPV4_IN_FORWARD_EN);
+iscsi_iface_net_attr(ipv4_iface, ttl, ISCSI_NET_PARAM_IPV4_TTL);
+
+/* generic read only ipv6 attribute */
+iscsi_iface_net_attr(ipv6_iface, ipaddress, ISCSI_NET_PARAM_IPV6_ADDR);
+iscsi_iface_net_attr(ipv6_iface, link_local_addr,
+ ISCSI_NET_PARAM_IPV6_LINKLOCAL);
+iscsi_iface_net_attr(ipv6_iface, router_addr, ISCSI_NET_PARAM_IPV6_ROUTER);
+iscsi_iface_net_attr(ipv6_iface, ipaddr_autocfg,
+ ISCSI_NET_PARAM_IPV6_ADDR_AUTOCFG);
+iscsi_iface_net_attr(ipv6_iface, link_local_autocfg,
+ ISCSI_NET_PARAM_IPV6_LINKLOCAL_AUTOCFG);
+iscsi_iface_net_attr(ipv6_iface, link_local_state,
+ ISCSI_NET_PARAM_IPV6_LINKLOCAL_STATE);
+iscsi_iface_net_attr(ipv6_iface, router_state,
+ ISCSI_NET_PARAM_IPV6_ROUTER_STATE);
+iscsi_iface_net_attr(ipv6_iface, grat_neighbor_adv_en,
+ ISCSI_NET_PARAM_IPV6_GRAT_NEIGHBOR_ADV_EN);
+iscsi_iface_net_attr(ipv6_iface, mld_en, ISCSI_NET_PARAM_IPV6_MLD_EN);
+iscsi_iface_net_attr(ipv6_iface, flow_label, ISCSI_NET_PARAM_IPV6_FLOW_LABEL);
+iscsi_iface_net_attr(ipv6_iface, traffic_class,
+ ISCSI_NET_PARAM_IPV6_TRAFFIC_CLASS);
+iscsi_iface_net_attr(ipv6_iface, hop_limit, ISCSI_NET_PARAM_IPV6_HOP_LIMIT);
+iscsi_iface_net_attr(ipv6_iface, nd_reachable_tmo,
+ ISCSI_NET_PARAM_IPV6_ND_REACHABLE_TMO);
+iscsi_iface_net_attr(ipv6_iface, nd_rexmit_time,
+ ISCSI_NET_PARAM_IPV6_ND_REXMIT_TIME);
+iscsi_iface_net_attr(ipv6_iface, nd_stale_tmo,
+ ISCSI_NET_PARAM_IPV6_ND_STALE_TMO);
+iscsi_iface_net_attr(ipv6_iface, dup_addr_detect_cnt,
+ ISCSI_NET_PARAM_IPV6_DUP_ADDR_DETECT_CNT);
+iscsi_iface_net_attr(ipv6_iface, router_adv_link_mtu,
+ ISCSI_NET_PARAM_IPV6_RTR_ADV_LINK_MTU);
+
+/* common read only iface attribute */
+iscsi_iface_net_attr(iface, enabled, ISCSI_NET_PARAM_IFACE_ENABLE);
+iscsi_iface_net_attr(iface, vlan_id, ISCSI_NET_PARAM_VLAN_ID);
+iscsi_iface_net_attr(iface, vlan_priority, ISCSI_NET_PARAM_VLAN_PRIORITY);
+iscsi_iface_net_attr(iface, vlan_enabled, ISCSI_NET_PARAM_VLAN_ENABLED);
+iscsi_iface_net_attr(iface, mtu, ISCSI_NET_PARAM_MTU);
+iscsi_iface_net_attr(iface, port, ISCSI_NET_PARAM_PORT);
+iscsi_iface_net_attr(iface, ipaddress_state, ISCSI_NET_PARAM_IPADDR_STATE);
+iscsi_iface_net_attr(iface, delayed_ack_en, ISCSI_NET_PARAM_DELAYED_ACK_EN);
+iscsi_iface_net_attr(iface, tcp_nagle_disable,
+ ISCSI_NET_PARAM_TCP_NAGLE_DISABLE);
+iscsi_iface_net_attr(iface, tcp_wsf_disable, ISCSI_NET_PARAM_TCP_WSF_DISABLE);
+iscsi_iface_net_attr(iface, tcp_wsf, ISCSI_NET_PARAM_TCP_WSF);
+iscsi_iface_net_attr(iface, tcp_timer_scale, ISCSI_NET_PARAM_TCP_TIMER_SCALE);
+iscsi_iface_net_attr(iface, tcp_timestamp_en, ISCSI_NET_PARAM_TCP_TIMESTAMP_EN);
+iscsi_iface_net_attr(iface, cache_id, ISCSI_NET_PARAM_CACHE_ID);
+iscsi_iface_net_attr(iface, redirect_en, ISCSI_NET_PARAM_REDIRECT_EN);
+
+/* common iscsi specific settings attributes */
+iscsi_iface_attr(iface, def_taskmgmt_tmo, ISCSI_IFACE_PARAM_DEF_TASKMGMT_TMO);
+iscsi_iface_attr(iface, header_digest, ISCSI_IFACE_PARAM_HDRDGST_EN);
+iscsi_iface_attr(iface, data_digest, ISCSI_IFACE_PARAM_DATADGST_EN);
+iscsi_iface_attr(iface, immediate_data, ISCSI_IFACE_PARAM_IMM_DATA_EN);
+iscsi_iface_attr(iface, initial_r2t, ISCSI_IFACE_PARAM_INITIAL_R2T_EN);
+iscsi_iface_attr(iface, data_seq_in_order,
+ ISCSI_IFACE_PARAM_DATASEQ_INORDER_EN);
+iscsi_iface_attr(iface, data_pdu_in_order, ISCSI_IFACE_PARAM_PDU_INORDER_EN);
+iscsi_iface_attr(iface, erl, ISCSI_IFACE_PARAM_ERL);
+iscsi_iface_attr(iface, max_recv_dlength, ISCSI_IFACE_PARAM_MAX_RECV_DLENGTH);
+iscsi_iface_attr(iface, first_burst_len, ISCSI_IFACE_PARAM_FIRST_BURST);
+iscsi_iface_attr(iface, max_outstanding_r2t, ISCSI_IFACE_PARAM_MAX_R2T);
+iscsi_iface_attr(iface, max_burst_len, ISCSI_IFACE_PARAM_MAX_BURST);
+iscsi_iface_attr(iface, chap_auth, ISCSI_IFACE_PARAM_CHAP_AUTH_EN);
+iscsi_iface_attr(iface, bidi_chap, ISCSI_IFACE_PARAM_BIDI_CHAP_EN);
+iscsi_iface_attr(iface, discovery_auth_optional,
+ ISCSI_IFACE_PARAM_DISCOVERY_AUTH_OPTIONAL);
+iscsi_iface_attr(iface, discovery_logout,
+ ISCSI_IFACE_PARAM_DISCOVERY_LOGOUT_EN);
+iscsi_iface_attr(iface, strict_login_comp_en,
+ ISCSI_IFACE_PARAM_STRICT_LOGIN_COMP_EN);
+iscsi_iface_attr(iface, initiator_name, ISCSI_IFACE_PARAM_INITIATOR_NAME);
+
+static umode_t iscsi_iface_attr_is_visible(struct kobject *kobj,
+ struct attribute *attr, int i)
+{
+ struct device *dev = container_of(kobj, struct device, kobj);
+ struct iscsi_iface *iface = iscsi_dev_to_iface(dev);
+ struct iscsi_transport *t = iface->transport;
+ int param;
+ int param_type;
+
+ if (attr == &dev_attr_iface_enabled.attr)
+ param = ISCSI_NET_PARAM_IFACE_ENABLE;
+ else if (attr == &dev_attr_iface_vlan_id.attr)
+ param = ISCSI_NET_PARAM_VLAN_ID;
+ else if (attr == &dev_attr_iface_vlan_priority.attr)
+ param = ISCSI_NET_PARAM_VLAN_PRIORITY;
+ else if (attr == &dev_attr_iface_vlan_enabled.attr)
+ param = ISCSI_NET_PARAM_VLAN_ENABLED;
+ else if (attr == &dev_attr_iface_mtu.attr)
+ param = ISCSI_NET_PARAM_MTU;
+ else if (attr == &dev_attr_iface_port.attr)
+ param = ISCSI_NET_PARAM_PORT;
+ else if (attr == &dev_attr_iface_ipaddress_state.attr)
+ param = ISCSI_NET_PARAM_IPADDR_STATE;
+ else if (attr == &dev_attr_iface_delayed_ack_en.attr)
+ param = ISCSI_NET_PARAM_DELAYED_ACK_EN;
+ else if (attr == &dev_attr_iface_tcp_nagle_disable.attr)
+ param = ISCSI_NET_PARAM_TCP_NAGLE_DISABLE;
+ else if (attr == &dev_attr_iface_tcp_wsf_disable.attr)
+ param = ISCSI_NET_PARAM_TCP_WSF_DISABLE;
+ else if (attr == &dev_attr_iface_tcp_wsf.attr)
+ param = ISCSI_NET_PARAM_TCP_WSF;
+ else if (attr == &dev_attr_iface_tcp_timer_scale.attr)
+ param = ISCSI_NET_PARAM_TCP_TIMER_SCALE;
+ else if (attr == &dev_attr_iface_tcp_timestamp_en.attr)
+ param = ISCSI_NET_PARAM_TCP_TIMESTAMP_EN;
+ else if (attr == &dev_attr_iface_cache_id.attr)
+ param = ISCSI_NET_PARAM_CACHE_ID;
+ else if (attr == &dev_attr_iface_redirect_en.attr)
+ param = ISCSI_NET_PARAM_REDIRECT_EN;
+ else if (attr == &dev_attr_iface_def_taskmgmt_tmo.attr)
+ param = ISCSI_IFACE_PARAM_DEF_TASKMGMT_TMO;
+ else if (attr == &dev_attr_iface_header_digest.attr)
+ param = ISCSI_IFACE_PARAM_HDRDGST_EN;
+ else if (attr == &dev_attr_iface_data_digest.attr)
+ param = ISCSI_IFACE_PARAM_DATADGST_EN;
+ else if (attr == &dev_attr_iface_immediate_data.attr)
+ param = ISCSI_IFACE_PARAM_IMM_DATA_EN;
+ else if (attr == &dev_attr_iface_initial_r2t.attr)
+ param = ISCSI_IFACE_PARAM_INITIAL_R2T_EN;
+ else if (attr == &dev_attr_iface_data_seq_in_order.attr)
+ param = ISCSI_IFACE_PARAM_DATASEQ_INORDER_EN;
+ else if (attr == &dev_attr_iface_data_pdu_in_order.attr)
+ param = ISCSI_IFACE_PARAM_PDU_INORDER_EN;
+ else if (attr == &dev_attr_iface_erl.attr)
+ param = ISCSI_IFACE_PARAM_ERL;
+ else if (attr == &dev_attr_iface_max_recv_dlength.attr)
+ param = ISCSI_IFACE_PARAM_MAX_RECV_DLENGTH;
+ else if (attr == &dev_attr_iface_first_burst_len.attr)
+ param = ISCSI_IFACE_PARAM_FIRST_BURST;
+ else if (attr == &dev_attr_iface_max_outstanding_r2t.attr)
+ param = ISCSI_IFACE_PARAM_MAX_R2T;
+ else if (attr == &dev_attr_iface_max_burst_len.attr)
+ param = ISCSI_IFACE_PARAM_MAX_BURST;
+ else if (attr == &dev_attr_iface_chap_auth.attr)
+ param = ISCSI_IFACE_PARAM_CHAP_AUTH_EN;
+ else if (attr == &dev_attr_iface_bidi_chap.attr)
+ param = ISCSI_IFACE_PARAM_BIDI_CHAP_EN;
+ else if (attr == &dev_attr_iface_discovery_auth_optional.attr)
+ param = ISCSI_IFACE_PARAM_DISCOVERY_AUTH_OPTIONAL;
+ else if (attr == &dev_attr_iface_discovery_logout.attr)
+ param = ISCSI_IFACE_PARAM_DISCOVERY_LOGOUT_EN;
+ else if (attr == &dev_attr_iface_strict_login_comp_en.attr)
+ param = ISCSI_IFACE_PARAM_STRICT_LOGIN_COMP_EN;
+ else if (attr == &dev_attr_iface_initiator_name.attr)
+ param = ISCSI_IFACE_PARAM_INITIATOR_NAME;
+ else if (iface->iface_type == ISCSI_IFACE_TYPE_IPV4) {
+ if (attr == &dev_attr_ipv4_iface_ipaddress.attr)
+ param = ISCSI_NET_PARAM_IPV4_ADDR;
+ else if (attr == &dev_attr_ipv4_iface_gateway.attr)
+ param = ISCSI_NET_PARAM_IPV4_GW;
+ else if (attr == &dev_attr_ipv4_iface_subnet.attr)
+ param = ISCSI_NET_PARAM_IPV4_SUBNET;
+ else if (attr == &dev_attr_ipv4_iface_bootproto.attr)
+ param = ISCSI_NET_PARAM_IPV4_BOOTPROTO;
+ else if (attr ==
+ &dev_attr_ipv4_iface_dhcp_dns_address_en.attr)
+ param = ISCSI_NET_PARAM_IPV4_DHCP_DNS_ADDR_EN;
+ else if (attr ==
+ &dev_attr_ipv4_iface_dhcp_slp_da_info_en.attr)
+ param = ISCSI_NET_PARAM_IPV4_DHCP_SLP_DA_EN;
+ else if (attr == &dev_attr_ipv4_iface_tos_en.attr)
+ param = ISCSI_NET_PARAM_IPV4_TOS_EN;
+ else if (attr == &dev_attr_ipv4_iface_tos.attr)
+ param = ISCSI_NET_PARAM_IPV4_TOS;
+ else if (attr == &dev_attr_ipv4_iface_grat_arp_en.attr)
+ param = ISCSI_NET_PARAM_IPV4_GRAT_ARP_EN;
+ else if (attr ==
+ &dev_attr_ipv4_iface_dhcp_alt_client_id_en.attr)
+ param = ISCSI_NET_PARAM_IPV4_DHCP_ALT_CLIENT_ID_EN;
+ else if (attr == &dev_attr_ipv4_iface_dhcp_alt_client_id.attr)
+ param = ISCSI_NET_PARAM_IPV4_DHCP_ALT_CLIENT_ID;
+ else if (attr ==
+ &dev_attr_ipv4_iface_dhcp_req_vendor_id_en.attr)
+ param = ISCSI_NET_PARAM_IPV4_DHCP_REQ_VENDOR_ID_EN;
+ else if (attr ==
+ &dev_attr_ipv4_iface_dhcp_use_vendor_id_en.attr)
+ param = ISCSI_NET_PARAM_IPV4_DHCP_USE_VENDOR_ID_EN;
+ else if (attr == &dev_attr_ipv4_iface_dhcp_vendor_id.attr)
+ param = ISCSI_NET_PARAM_IPV4_DHCP_VENDOR_ID;
+ else if (attr ==
+ &dev_attr_ipv4_iface_dhcp_learn_iqn_en.attr)
+ param = ISCSI_NET_PARAM_IPV4_DHCP_LEARN_IQN_EN;
+ else if (attr ==
+ &dev_attr_ipv4_iface_fragment_disable.attr)
+ param = ISCSI_NET_PARAM_IPV4_FRAGMENT_DISABLE;
+ else if (attr ==
+ &dev_attr_ipv4_iface_incoming_forwarding_en.attr)
+ param = ISCSI_NET_PARAM_IPV4_IN_FORWARD_EN;
+ else if (attr == &dev_attr_ipv4_iface_ttl.attr)
+ param = ISCSI_NET_PARAM_IPV4_TTL;
+ else
+ return 0;
+ } else if (iface->iface_type == ISCSI_IFACE_TYPE_IPV6) {
+ if (attr == &dev_attr_ipv6_iface_ipaddress.attr)
+ param = ISCSI_NET_PARAM_IPV6_ADDR;
+ else if (attr == &dev_attr_ipv6_iface_link_local_addr.attr)
+ param = ISCSI_NET_PARAM_IPV6_LINKLOCAL;
+ else if (attr == &dev_attr_ipv6_iface_router_addr.attr)
+ param = ISCSI_NET_PARAM_IPV6_ROUTER;
+ else if (attr == &dev_attr_ipv6_iface_ipaddr_autocfg.attr)
+ param = ISCSI_NET_PARAM_IPV6_ADDR_AUTOCFG;
+ else if (attr == &dev_attr_ipv6_iface_link_local_autocfg.attr)
+ param = ISCSI_NET_PARAM_IPV6_LINKLOCAL_AUTOCFG;
+ else if (attr == &dev_attr_ipv6_iface_link_local_state.attr)
+ param = ISCSI_NET_PARAM_IPV6_LINKLOCAL_STATE;
+ else if (attr == &dev_attr_ipv6_iface_router_state.attr)
+ param = ISCSI_NET_PARAM_IPV6_ROUTER_STATE;
+ else if (attr ==
+ &dev_attr_ipv6_iface_grat_neighbor_adv_en.attr)
+ param = ISCSI_NET_PARAM_IPV6_GRAT_NEIGHBOR_ADV_EN;
+ else if (attr == &dev_attr_ipv6_iface_mld_en.attr)
+ param = ISCSI_NET_PARAM_IPV6_MLD_EN;
+ else if (attr == &dev_attr_ipv6_iface_flow_label.attr)
+ param = ISCSI_NET_PARAM_IPV6_FLOW_LABEL;
+ else if (attr == &dev_attr_ipv6_iface_traffic_class.attr)
+ param = ISCSI_NET_PARAM_IPV6_TRAFFIC_CLASS;
+ else if (attr == &dev_attr_ipv6_iface_hop_limit.attr)
+ param = ISCSI_NET_PARAM_IPV6_HOP_LIMIT;
+ else if (attr == &dev_attr_ipv6_iface_nd_reachable_tmo.attr)
+ param = ISCSI_NET_PARAM_IPV6_ND_REACHABLE_TMO;
+ else if (attr == &dev_attr_ipv6_iface_nd_rexmit_time.attr)
+ param = ISCSI_NET_PARAM_IPV6_ND_REXMIT_TIME;
+ else if (attr == &dev_attr_ipv6_iface_nd_stale_tmo.attr)
+ param = ISCSI_NET_PARAM_IPV6_ND_STALE_TMO;
+ else if (attr == &dev_attr_ipv6_iface_dup_addr_detect_cnt.attr)
+ param = ISCSI_NET_PARAM_IPV6_DUP_ADDR_DETECT_CNT;
+ else if (attr == &dev_attr_ipv6_iface_router_adv_link_mtu.attr)
+ param = ISCSI_NET_PARAM_IPV6_RTR_ADV_LINK_MTU;
+ else
+ return 0;
+ } else {
+ WARN_ONCE(1, "Invalid iface attr");
+ return 0;
+ }
+
+ switch (param) {
+ case ISCSI_IFACE_PARAM_DEF_TASKMGMT_TMO:
+ case ISCSI_IFACE_PARAM_HDRDGST_EN:
+ case ISCSI_IFACE_PARAM_DATADGST_EN:
+ case ISCSI_IFACE_PARAM_IMM_DATA_EN:
+ case ISCSI_IFACE_PARAM_INITIAL_R2T_EN:
+ case ISCSI_IFACE_PARAM_DATASEQ_INORDER_EN:
+ case ISCSI_IFACE_PARAM_PDU_INORDER_EN:
+ case ISCSI_IFACE_PARAM_ERL:
+ case ISCSI_IFACE_PARAM_MAX_RECV_DLENGTH:
+ case ISCSI_IFACE_PARAM_FIRST_BURST:
+ case ISCSI_IFACE_PARAM_MAX_R2T:
+ case ISCSI_IFACE_PARAM_MAX_BURST:
+ case ISCSI_IFACE_PARAM_CHAP_AUTH_EN:
+ case ISCSI_IFACE_PARAM_BIDI_CHAP_EN:
+ case ISCSI_IFACE_PARAM_DISCOVERY_AUTH_OPTIONAL:
+ case ISCSI_IFACE_PARAM_DISCOVERY_LOGOUT_EN:
+ case ISCSI_IFACE_PARAM_STRICT_LOGIN_COMP_EN:
+ case ISCSI_IFACE_PARAM_INITIATOR_NAME:
+ param_type = ISCSI_IFACE_PARAM;
+ break;
+ default:
+ param_type = ISCSI_NET_PARAM;
+ }
+
+ return t->attr_is_visible(param_type, param);
+}
+
+static struct attribute *iscsi_iface_attrs[] = {
+ &dev_attr_iface_enabled.attr,
+ &dev_attr_iface_vlan_id.attr,
+ &dev_attr_iface_vlan_priority.attr,
+ &dev_attr_iface_vlan_enabled.attr,
+ &dev_attr_ipv4_iface_ipaddress.attr,
+ &dev_attr_ipv4_iface_gateway.attr,
+ &dev_attr_ipv4_iface_subnet.attr,
+ &dev_attr_ipv4_iface_bootproto.attr,
+ &dev_attr_ipv6_iface_ipaddress.attr,
+ &dev_attr_ipv6_iface_link_local_addr.attr,
+ &dev_attr_ipv6_iface_router_addr.attr,
+ &dev_attr_ipv6_iface_ipaddr_autocfg.attr,
+ &dev_attr_ipv6_iface_link_local_autocfg.attr,
+ &dev_attr_iface_mtu.attr,
+ &dev_attr_iface_port.attr,
+ &dev_attr_iface_ipaddress_state.attr,
+ &dev_attr_iface_delayed_ack_en.attr,
+ &dev_attr_iface_tcp_nagle_disable.attr,
+ &dev_attr_iface_tcp_wsf_disable.attr,
+ &dev_attr_iface_tcp_wsf.attr,
+ &dev_attr_iface_tcp_timer_scale.attr,
+ &dev_attr_iface_tcp_timestamp_en.attr,
+ &dev_attr_iface_cache_id.attr,
+ &dev_attr_iface_redirect_en.attr,
+ &dev_attr_iface_def_taskmgmt_tmo.attr,
+ &dev_attr_iface_header_digest.attr,
+ &dev_attr_iface_data_digest.attr,
+ &dev_attr_iface_immediate_data.attr,
+ &dev_attr_iface_initial_r2t.attr,
+ &dev_attr_iface_data_seq_in_order.attr,
+ &dev_attr_iface_data_pdu_in_order.attr,
+ &dev_attr_iface_erl.attr,
+ &dev_attr_iface_max_recv_dlength.attr,
+ &dev_attr_iface_first_burst_len.attr,
+ &dev_attr_iface_max_outstanding_r2t.attr,
+ &dev_attr_iface_max_burst_len.attr,
+ &dev_attr_iface_chap_auth.attr,
+ &dev_attr_iface_bidi_chap.attr,
+ &dev_attr_iface_discovery_auth_optional.attr,
+ &dev_attr_iface_discovery_logout.attr,
+ &dev_attr_iface_strict_login_comp_en.attr,
+ &dev_attr_iface_initiator_name.attr,
+ &dev_attr_ipv4_iface_dhcp_dns_address_en.attr,
+ &dev_attr_ipv4_iface_dhcp_slp_da_info_en.attr,
+ &dev_attr_ipv4_iface_tos_en.attr,
+ &dev_attr_ipv4_iface_tos.attr,
+ &dev_attr_ipv4_iface_grat_arp_en.attr,
+ &dev_attr_ipv4_iface_dhcp_alt_client_id_en.attr,
+ &dev_attr_ipv4_iface_dhcp_alt_client_id.attr,
+ &dev_attr_ipv4_iface_dhcp_req_vendor_id_en.attr,
+ &dev_attr_ipv4_iface_dhcp_use_vendor_id_en.attr,
+ &dev_attr_ipv4_iface_dhcp_vendor_id.attr,
+ &dev_attr_ipv4_iface_dhcp_learn_iqn_en.attr,
+ &dev_attr_ipv4_iface_fragment_disable.attr,
+ &dev_attr_ipv4_iface_incoming_forwarding_en.attr,
+ &dev_attr_ipv4_iface_ttl.attr,
+ &dev_attr_ipv6_iface_link_local_state.attr,
+ &dev_attr_ipv6_iface_router_state.attr,
+ &dev_attr_ipv6_iface_grat_neighbor_adv_en.attr,
+ &dev_attr_ipv6_iface_mld_en.attr,
+ &dev_attr_ipv6_iface_flow_label.attr,
+ &dev_attr_ipv6_iface_traffic_class.attr,
+ &dev_attr_ipv6_iface_hop_limit.attr,
+ &dev_attr_ipv6_iface_nd_reachable_tmo.attr,
+ &dev_attr_ipv6_iface_nd_rexmit_time.attr,
+ &dev_attr_ipv6_iface_nd_stale_tmo.attr,
+ &dev_attr_ipv6_iface_dup_addr_detect_cnt.attr,
+ &dev_attr_ipv6_iface_router_adv_link_mtu.attr,
+ NULL,
+};
+
+static struct attribute_group iscsi_iface_group = {
+ .attrs = iscsi_iface_attrs,
+ .is_visible = iscsi_iface_attr_is_visible,
+};
+
+/* convert iscsi_ipaddress_state values to ascii string name */
+static const struct {
+ enum iscsi_ipaddress_state value;
+ char *name;
+} iscsi_ipaddress_state_names[] = {
+ {ISCSI_IPDDRESS_STATE_UNCONFIGURED, "Unconfigured" },
+ {ISCSI_IPDDRESS_STATE_ACQUIRING, "Acquiring" },
+ {ISCSI_IPDDRESS_STATE_TENTATIVE, "Tentative" },
+ {ISCSI_IPDDRESS_STATE_VALID, "Valid" },
+ {ISCSI_IPDDRESS_STATE_DISABLING, "Disabling" },
+ {ISCSI_IPDDRESS_STATE_INVALID, "Invalid" },
+ {ISCSI_IPDDRESS_STATE_DEPRECATED, "Deprecated" },
+};
+
+char *iscsi_get_ipaddress_state_name(enum iscsi_ipaddress_state port_state)
+{
+ int i;
+ char *state = NULL;
+
+ for (i = 0; i < ARRAY_SIZE(iscsi_ipaddress_state_names); i++) {
+ if (iscsi_ipaddress_state_names[i].value == port_state) {
+ state = iscsi_ipaddress_state_names[i].name;
+ break;
+ }
+ }
+ return state;
+}
+EXPORT_SYMBOL_GPL(iscsi_get_ipaddress_state_name);
+
+/* convert iscsi_router_state values to ascii string name */
+static const struct {
+ enum iscsi_router_state value;
+ char *name;
+} iscsi_router_state_names[] = {
+ {ISCSI_ROUTER_STATE_UNKNOWN, "Unknown" },
+ {ISCSI_ROUTER_STATE_ADVERTISED, "Advertised" },
+ {ISCSI_ROUTER_STATE_MANUAL, "Manual" },
+ {ISCSI_ROUTER_STATE_STALE, "Stale" },
+};
+
+char *iscsi_get_router_state_name(enum iscsi_router_state router_state)
+{
+ int i;
+ char *state = NULL;
+
+ for (i = 0; i < ARRAY_SIZE(iscsi_router_state_names); i++) {
+ if (iscsi_router_state_names[i].value == router_state) {
+ state = iscsi_router_state_names[i].name;
+ break;
+ }
+ }
+ return state;
+}
+EXPORT_SYMBOL_GPL(iscsi_get_router_state_name);
+
+struct iscsi_iface *
+iscsi_create_iface(struct Scsi_Host *shost, struct iscsi_transport *transport,
+ uint32_t iface_type, uint32_t iface_num, int dd_size)
+{
+ struct iscsi_iface *iface;
+ int err;
+
+ iface = kzalloc(sizeof(*iface) + dd_size, GFP_KERNEL);
+ if (!iface)
+ return NULL;
+
+ iface->transport = transport;
+ iface->iface_type = iface_type;
+ iface->iface_num = iface_num;
+ iface->dev.release = iscsi_iface_release;
+ iface->dev.class = &iscsi_iface_class;
+ /* parent reference released in iscsi_iface_release */
+ iface->dev.parent = get_device(&shost->shost_gendev);
+ if (iface_type == ISCSI_IFACE_TYPE_IPV4)
+ dev_set_name(&iface->dev, "ipv4-iface-%u-%u", shost->host_no,
+ iface_num);
+ else
+ dev_set_name(&iface->dev, "ipv6-iface-%u-%u", shost->host_no,
+ iface_num);
+
+ err = device_register(&iface->dev);
+ if (err)
+ goto free_iface;
+
+ err = sysfs_create_group(&iface->dev.kobj, &iscsi_iface_group);
+ if (err)
+ goto unreg_iface;
+
+ if (dd_size)
+ iface->dd_data = &iface[1];
+ return iface;
+
+unreg_iface:
+ device_unregister(&iface->dev);
+ return NULL;
+
+free_iface:
+ put_device(iface->dev.parent);
+ kfree(iface);
+ return NULL;
+}
+EXPORT_SYMBOL_GPL(iscsi_create_iface);
+
+void iscsi_destroy_iface(struct iscsi_iface *iface)
+{
+ sysfs_remove_group(&iface->dev.kobj, &iscsi_iface_group);
+ device_unregister(&iface->dev);
+}
+EXPORT_SYMBOL_GPL(iscsi_destroy_iface);
+
+/*
+ * Interface to display flash node params to sysfs
+ */
+
+#define ISCSI_FLASHNODE_ATTR(_prefix, _name, _mode, _show, _store) \
+struct device_attribute dev_attr_##_prefix##_##_name = \
+ __ATTR(_name, _mode, _show, _store)
+
+/* flash node session attrs show */
+#define iscsi_flashnode_sess_attr_show(type, name, param) \
+static ssize_t \
+show_##type##_##name(struct device *dev, struct device_attribute *attr, \
+ char *buf) \
+{ \
+ struct iscsi_bus_flash_session *fnode_sess = \
+ iscsi_dev_to_flash_session(dev);\
+ struct iscsi_transport *t = fnode_sess->transport; \
+ return t->get_flashnode_param(fnode_sess, param, buf); \
+} \
+
+
+#define iscsi_flashnode_sess_attr(type, name, param) \
+ iscsi_flashnode_sess_attr_show(type, name, param) \
+static ISCSI_FLASHNODE_ATTR(type, name, S_IRUGO, \
+ show_##type##_##name, NULL);
+
+/* Flash node session attributes */
+
+iscsi_flashnode_sess_attr(fnode, auto_snd_tgt_disable,
+ ISCSI_FLASHNODE_AUTO_SND_TGT_DISABLE);
+iscsi_flashnode_sess_attr(fnode, discovery_session,
+ ISCSI_FLASHNODE_DISCOVERY_SESS);
+iscsi_flashnode_sess_attr(fnode, portal_type, ISCSI_FLASHNODE_PORTAL_TYPE);
+iscsi_flashnode_sess_attr(fnode, entry_enable, ISCSI_FLASHNODE_ENTRY_EN);
+iscsi_flashnode_sess_attr(fnode, immediate_data, ISCSI_FLASHNODE_IMM_DATA_EN);
+iscsi_flashnode_sess_attr(fnode, initial_r2t, ISCSI_FLASHNODE_INITIAL_R2T_EN);
+iscsi_flashnode_sess_attr(fnode, data_seq_in_order,
+ ISCSI_FLASHNODE_DATASEQ_INORDER);
+iscsi_flashnode_sess_attr(fnode, data_pdu_in_order,
+ ISCSI_FLASHNODE_PDU_INORDER);
+iscsi_flashnode_sess_attr(fnode, chap_auth, ISCSI_FLASHNODE_CHAP_AUTH_EN);
+iscsi_flashnode_sess_attr(fnode, discovery_logout,
+ ISCSI_FLASHNODE_DISCOVERY_LOGOUT_EN);
+iscsi_flashnode_sess_attr(fnode, bidi_chap, ISCSI_FLASHNODE_BIDI_CHAP_EN);
+iscsi_flashnode_sess_attr(fnode, discovery_auth_optional,
+ ISCSI_FLASHNODE_DISCOVERY_AUTH_OPTIONAL);
+iscsi_flashnode_sess_attr(fnode, erl, ISCSI_FLASHNODE_ERL);
+iscsi_flashnode_sess_attr(fnode, first_burst_len, ISCSI_FLASHNODE_FIRST_BURST);
+iscsi_flashnode_sess_attr(fnode, def_time2wait, ISCSI_FLASHNODE_DEF_TIME2WAIT);
+iscsi_flashnode_sess_attr(fnode, def_time2retain,
+ ISCSI_FLASHNODE_DEF_TIME2RETAIN);
+iscsi_flashnode_sess_attr(fnode, max_outstanding_r2t, ISCSI_FLASHNODE_MAX_R2T);
+iscsi_flashnode_sess_attr(fnode, isid, ISCSI_FLASHNODE_ISID);
+iscsi_flashnode_sess_attr(fnode, tsid, ISCSI_FLASHNODE_TSID);
+iscsi_flashnode_sess_attr(fnode, max_burst_len, ISCSI_FLASHNODE_MAX_BURST);
+iscsi_flashnode_sess_attr(fnode, def_taskmgmt_tmo,
+ ISCSI_FLASHNODE_DEF_TASKMGMT_TMO);
+iscsi_flashnode_sess_attr(fnode, targetalias, ISCSI_FLASHNODE_ALIAS);
+iscsi_flashnode_sess_attr(fnode, targetname, ISCSI_FLASHNODE_NAME);
+iscsi_flashnode_sess_attr(fnode, tpgt, ISCSI_FLASHNODE_TPGT);
+iscsi_flashnode_sess_attr(fnode, discovery_parent_idx,
+ ISCSI_FLASHNODE_DISCOVERY_PARENT_IDX);
+iscsi_flashnode_sess_attr(fnode, discovery_parent_type,
+ ISCSI_FLASHNODE_DISCOVERY_PARENT_TYPE);
+iscsi_flashnode_sess_attr(fnode, chap_in_idx, ISCSI_FLASHNODE_CHAP_IN_IDX);
+iscsi_flashnode_sess_attr(fnode, chap_out_idx, ISCSI_FLASHNODE_CHAP_OUT_IDX);
+iscsi_flashnode_sess_attr(fnode, username, ISCSI_FLASHNODE_USERNAME);
+iscsi_flashnode_sess_attr(fnode, username_in, ISCSI_FLASHNODE_USERNAME_IN);
+iscsi_flashnode_sess_attr(fnode, password, ISCSI_FLASHNODE_PASSWORD);
+iscsi_flashnode_sess_attr(fnode, password_in, ISCSI_FLASHNODE_PASSWORD_IN);
+iscsi_flashnode_sess_attr(fnode, is_boot_target, ISCSI_FLASHNODE_IS_BOOT_TGT);
+
+static struct attribute *iscsi_flashnode_sess_attrs[] = {
+ &dev_attr_fnode_auto_snd_tgt_disable.attr,
+ &dev_attr_fnode_discovery_session.attr,
+ &dev_attr_fnode_portal_type.attr,
+ &dev_attr_fnode_entry_enable.attr,
+ &dev_attr_fnode_immediate_data.attr,
+ &dev_attr_fnode_initial_r2t.attr,
+ &dev_attr_fnode_data_seq_in_order.attr,
+ &dev_attr_fnode_data_pdu_in_order.attr,
+ &dev_attr_fnode_chap_auth.attr,
+ &dev_attr_fnode_discovery_logout.attr,
+ &dev_attr_fnode_bidi_chap.attr,
+ &dev_attr_fnode_discovery_auth_optional.attr,
+ &dev_attr_fnode_erl.attr,
+ &dev_attr_fnode_first_burst_len.attr,
+ &dev_attr_fnode_def_time2wait.attr,
+ &dev_attr_fnode_def_time2retain.attr,
+ &dev_attr_fnode_max_outstanding_r2t.attr,
+ &dev_attr_fnode_isid.attr,
+ &dev_attr_fnode_tsid.attr,
+ &dev_attr_fnode_max_burst_len.attr,
+ &dev_attr_fnode_def_taskmgmt_tmo.attr,
+ &dev_attr_fnode_targetalias.attr,
+ &dev_attr_fnode_targetname.attr,
+ &dev_attr_fnode_tpgt.attr,
+ &dev_attr_fnode_discovery_parent_idx.attr,
+ &dev_attr_fnode_discovery_parent_type.attr,
+ &dev_attr_fnode_chap_in_idx.attr,
+ &dev_attr_fnode_chap_out_idx.attr,
+ &dev_attr_fnode_username.attr,
+ &dev_attr_fnode_username_in.attr,
+ &dev_attr_fnode_password.attr,
+ &dev_attr_fnode_password_in.attr,
+ &dev_attr_fnode_is_boot_target.attr,
+ NULL,
+};
+
+static umode_t iscsi_flashnode_sess_attr_is_visible(struct kobject *kobj,
+ struct attribute *attr,
+ int i)
+{
+ struct device *dev = container_of(kobj, struct device, kobj);
+ struct iscsi_bus_flash_session *fnode_sess =
+ iscsi_dev_to_flash_session(dev);
+ struct iscsi_transport *t = fnode_sess->transport;
+ int param;
+
+ if (attr == &dev_attr_fnode_auto_snd_tgt_disable.attr) {
+ param = ISCSI_FLASHNODE_AUTO_SND_TGT_DISABLE;
+ } else if (attr == &dev_attr_fnode_discovery_session.attr) {
+ param = ISCSI_FLASHNODE_DISCOVERY_SESS;
+ } else if (attr == &dev_attr_fnode_portal_type.attr) {
+ param = ISCSI_FLASHNODE_PORTAL_TYPE;
+ } else if (attr == &dev_attr_fnode_entry_enable.attr) {
+ param = ISCSI_FLASHNODE_ENTRY_EN;
+ } else if (attr == &dev_attr_fnode_immediate_data.attr) {
+ param = ISCSI_FLASHNODE_IMM_DATA_EN;
+ } else if (attr == &dev_attr_fnode_initial_r2t.attr) {
+ param = ISCSI_FLASHNODE_INITIAL_R2T_EN;
+ } else if (attr == &dev_attr_fnode_data_seq_in_order.attr) {
+ param = ISCSI_FLASHNODE_DATASEQ_INORDER;
+ } else if (attr == &dev_attr_fnode_data_pdu_in_order.attr) {
+ param = ISCSI_FLASHNODE_PDU_INORDER;
+ } else if (attr == &dev_attr_fnode_chap_auth.attr) {
+ param = ISCSI_FLASHNODE_CHAP_AUTH_EN;
+ } else if (attr == &dev_attr_fnode_discovery_logout.attr) {
+ param = ISCSI_FLASHNODE_DISCOVERY_LOGOUT_EN;
+ } else if (attr == &dev_attr_fnode_bidi_chap.attr) {
+ param = ISCSI_FLASHNODE_BIDI_CHAP_EN;
+ } else if (attr == &dev_attr_fnode_discovery_auth_optional.attr) {
+ param = ISCSI_FLASHNODE_DISCOVERY_AUTH_OPTIONAL;
+ } else if (attr == &dev_attr_fnode_erl.attr) {
+ param = ISCSI_FLASHNODE_ERL;
+ } else if (attr == &dev_attr_fnode_first_burst_len.attr) {
+ param = ISCSI_FLASHNODE_FIRST_BURST;
+ } else if (attr == &dev_attr_fnode_def_time2wait.attr) {
+ param = ISCSI_FLASHNODE_DEF_TIME2WAIT;
+ } else if (attr == &dev_attr_fnode_def_time2retain.attr) {
+ param = ISCSI_FLASHNODE_DEF_TIME2RETAIN;
+ } else if (attr == &dev_attr_fnode_max_outstanding_r2t.attr) {
+ param = ISCSI_FLASHNODE_MAX_R2T;
+ } else if (attr == &dev_attr_fnode_isid.attr) {
+ param = ISCSI_FLASHNODE_ISID;
+ } else if (attr == &dev_attr_fnode_tsid.attr) {
+ param = ISCSI_FLASHNODE_TSID;
+ } else if (attr == &dev_attr_fnode_max_burst_len.attr) {
+ param = ISCSI_FLASHNODE_MAX_BURST;
+ } else if (attr == &dev_attr_fnode_def_taskmgmt_tmo.attr) {
+ param = ISCSI_FLASHNODE_DEF_TASKMGMT_TMO;
+ } else if (attr == &dev_attr_fnode_targetalias.attr) {
+ param = ISCSI_FLASHNODE_ALIAS;
+ } else if (attr == &dev_attr_fnode_targetname.attr) {
+ param = ISCSI_FLASHNODE_NAME;
+ } else if (attr == &dev_attr_fnode_tpgt.attr) {
+ param = ISCSI_FLASHNODE_TPGT;
+ } else if (attr == &dev_attr_fnode_discovery_parent_idx.attr) {
+ param = ISCSI_FLASHNODE_DISCOVERY_PARENT_IDX;
+ } else if (attr == &dev_attr_fnode_discovery_parent_type.attr) {
+ param = ISCSI_FLASHNODE_DISCOVERY_PARENT_TYPE;
+ } else if (attr == &dev_attr_fnode_chap_in_idx.attr) {
+ param = ISCSI_FLASHNODE_CHAP_IN_IDX;
+ } else if (attr == &dev_attr_fnode_chap_out_idx.attr) {
+ param = ISCSI_FLASHNODE_CHAP_OUT_IDX;
+ } else if (attr == &dev_attr_fnode_username.attr) {
+ param = ISCSI_FLASHNODE_USERNAME;
+ } else if (attr == &dev_attr_fnode_username_in.attr) {
+ param = ISCSI_FLASHNODE_USERNAME_IN;
+ } else if (attr == &dev_attr_fnode_password.attr) {
+ param = ISCSI_FLASHNODE_PASSWORD;
+ } else if (attr == &dev_attr_fnode_password_in.attr) {
+ param = ISCSI_FLASHNODE_PASSWORD_IN;
+ } else if (attr == &dev_attr_fnode_is_boot_target.attr) {
+ param = ISCSI_FLASHNODE_IS_BOOT_TGT;
+ } else {
+ WARN_ONCE(1, "Invalid flashnode session attr");
+ return 0;
+ }
+
+ return t->attr_is_visible(ISCSI_FLASHNODE_PARAM, param);
+}
+
+static struct attribute_group iscsi_flashnode_sess_attr_group = {
+ .attrs = iscsi_flashnode_sess_attrs,
+ .is_visible = iscsi_flashnode_sess_attr_is_visible,
+};
+
+static const struct attribute_group *iscsi_flashnode_sess_attr_groups[] = {
+ &iscsi_flashnode_sess_attr_group,
+ NULL,
+};
+
+static void iscsi_flashnode_sess_release(struct device *dev)
+{
+ struct iscsi_bus_flash_session *fnode_sess =
+ iscsi_dev_to_flash_session(dev);
+
+ kfree(fnode_sess->targetname);
+ kfree(fnode_sess->targetalias);
+ kfree(fnode_sess->portal_type);
+ kfree(fnode_sess);
+}
+
+struct device_type iscsi_flashnode_sess_dev_type = {
+ .name = "iscsi_flashnode_sess_dev_type",
+ .groups = iscsi_flashnode_sess_attr_groups,
+ .release = iscsi_flashnode_sess_release,
+};
+
+/* flash node connection attrs show */
+#define iscsi_flashnode_conn_attr_show(type, name, param) \
+static ssize_t \
+show_##type##_##name(struct device *dev, struct device_attribute *attr, \
+ char *buf) \
+{ \
+ struct iscsi_bus_flash_conn *fnode_conn = iscsi_dev_to_flash_conn(dev);\
+ struct iscsi_bus_flash_session *fnode_sess = \
+ iscsi_flash_conn_to_flash_session(fnode_conn);\
+ struct iscsi_transport *t = fnode_conn->transport; \
+ return t->get_flashnode_param(fnode_sess, param, buf); \
+} \
+
+
+#define iscsi_flashnode_conn_attr(type, name, param) \
+ iscsi_flashnode_conn_attr_show(type, name, param) \
+static ISCSI_FLASHNODE_ATTR(type, name, S_IRUGO, \
+ show_##type##_##name, NULL);
+
+/* Flash node connection attributes */
+
+iscsi_flashnode_conn_attr(fnode, is_fw_assigned_ipv6,
+ ISCSI_FLASHNODE_IS_FW_ASSIGNED_IPV6);
+iscsi_flashnode_conn_attr(fnode, header_digest, ISCSI_FLASHNODE_HDR_DGST_EN);
+iscsi_flashnode_conn_attr(fnode, data_digest, ISCSI_FLASHNODE_DATA_DGST_EN);
+iscsi_flashnode_conn_attr(fnode, snack_req, ISCSI_FLASHNODE_SNACK_REQ_EN);
+iscsi_flashnode_conn_attr(fnode, tcp_timestamp_stat,
+ ISCSI_FLASHNODE_TCP_TIMESTAMP_STAT);
+iscsi_flashnode_conn_attr(fnode, tcp_nagle_disable,
+ ISCSI_FLASHNODE_TCP_NAGLE_DISABLE);
+iscsi_flashnode_conn_attr(fnode, tcp_wsf_disable,
+ ISCSI_FLASHNODE_TCP_WSF_DISABLE);
+iscsi_flashnode_conn_attr(fnode, tcp_timer_scale,
+ ISCSI_FLASHNODE_TCP_TIMER_SCALE);
+iscsi_flashnode_conn_attr(fnode, tcp_timestamp_enable,
+ ISCSI_FLASHNODE_TCP_TIMESTAMP_EN);
+iscsi_flashnode_conn_attr(fnode, fragment_disable,
+ ISCSI_FLASHNODE_IP_FRAG_DISABLE);
+iscsi_flashnode_conn_attr(fnode, keepalive_tmo, ISCSI_FLASHNODE_KEEPALIVE_TMO);
+iscsi_flashnode_conn_attr(fnode, port, ISCSI_FLASHNODE_PORT);
+iscsi_flashnode_conn_attr(fnode, ipaddress, ISCSI_FLASHNODE_IPADDR);
+iscsi_flashnode_conn_attr(fnode, max_recv_dlength,
+ ISCSI_FLASHNODE_MAX_RECV_DLENGTH);
+iscsi_flashnode_conn_attr(fnode, max_xmit_dlength,
+ ISCSI_FLASHNODE_MAX_XMIT_DLENGTH);
+iscsi_flashnode_conn_attr(fnode, local_port, ISCSI_FLASHNODE_LOCAL_PORT);
+iscsi_flashnode_conn_attr(fnode, ipv4_tos, ISCSI_FLASHNODE_IPV4_TOS);
+iscsi_flashnode_conn_attr(fnode, ipv6_traffic_class, ISCSI_FLASHNODE_IPV6_TC);
+iscsi_flashnode_conn_attr(fnode, ipv6_flow_label,
+ ISCSI_FLASHNODE_IPV6_FLOW_LABEL);
+iscsi_flashnode_conn_attr(fnode, redirect_ipaddr,
+ ISCSI_FLASHNODE_REDIRECT_IPADDR);
+iscsi_flashnode_conn_attr(fnode, max_segment_size,
+ ISCSI_FLASHNODE_MAX_SEGMENT_SIZE);
+iscsi_flashnode_conn_attr(fnode, link_local_ipv6,
+ ISCSI_FLASHNODE_LINK_LOCAL_IPV6);
+iscsi_flashnode_conn_attr(fnode, tcp_xmit_wsf, ISCSI_FLASHNODE_TCP_XMIT_WSF);
+iscsi_flashnode_conn_attr(fnode, tcp_recv_wsf, ISCSI_FLASHNODE_TCP_RECV_WSF);
+iscsi_flashnode_conn_attr(fnode, statsn, ISCSI_FLASHNODE_STATSN);
+iscsi_flashnode_conn_attr(fnode, exp_statsn, ISCSI_FLASHNODE_EXP_STATSN);
+
+static struct attribute *iscsi_flashnode_conn_attrs[] = {
+ &dev_attr_fnode_is_fw_assigned_ipv6.attr,
+ &dev_attr_fnode_header_digest.attr,
+ &dev_attr_fnode_data_digest.attr,
+ &dev_attr_fnode_snack_req.attr,
+ &dev_attr_fnode_tcp_timestamp_stat.attr,
+ &dev_attr_fnode_tcp_nagle_disable.attr,
+ &dev_attr_fnode_tcp_wsf_disable.attr,
+ &dev_attr_fnode_tcp_timer_scale.attr,
+ &dev_attr_fnode_tcp_timestamp_enable.attr,
+ &dev_attr_fnode_fragment_disable.attr,
+ &dev_attr_fnode_max_recv_dlength.attr,
+ &dev_attr_fnode_max_xmit_dlength.attr,
+ &dev_attr_fnode_keepalive_tmo.attr,
+ &dev_attr_fnode_port.attr,
+ &dev_attr_fnode_ipaddress.attr,
+ &dev_attr_fnode_redirect_ipaddr.attr,
+ &dev_attr_fnode_max_segment_size.attr,
+ &dev_attr_fnode_local_port.attr,
+ &dev_attr_fnode_ipv4_tos.attr,
+ &dev_attr_fnode_ipv6_traffic_class.attr,
+ &dev_attr_fnode_ipv6_flow_label.attr,
+ &dev_attr_fnode_link_local_ipv6.attr,
+ &dev_attr_fnode_tcp_xmit_wsf.attr,
+ &dev_attr_fnode_tcp_recv_wsf.attr,
+ &dev_attr_fnode_statsn.attr,
+ &dev_attr_fnode_exp_statsn.attr,
+ NULL,
+};
+
+static umode_t iscsi_flashnode_conn_attr_is_visible(struct kobject *kobj,
+ struct attribute *attr,
+ int i)
+{
+ struct device *dev = container_of(kobj, struct device, kobj);
+ struct iscsi_bus_flash_conn *fnode_conn = iscsi_dev_to_flash_conn(dev);
+ struct iscsi_transport *t = fnode_conn->transport;
+ int param;
+
+ if (attr == &dev_attr_fnode_is_fw_assigned_ipv6.attr) {
+ param = ISCSI_FLASHNODE_IS_FW_ASSIGNED_IPV6;
+ } else if (attr == &dev_attr_fnode_header_digest.attr) {
+ param = ISCSI_FLASHNODE_HDR_DGST_EN;
+ } else if (attr == &dev_attr_fnode_data_digest.attr) {
+ param = ISCSI_FLASHNODE_DATA_DGST_EN;
+ } else if (attr == &dev_attr_fnode_snack_req.attr) {
+ param = ISCSI_FLASHNODE_SNACK_REQ_EN;
+ } else if (attr == &dev_attr_fnode_tcp_timestamp_stat.attr) {
+ param = ISCSI_FLASHNODE_TCP_TIMESTAMP_STAT;
+ } else if (attr == &dev_attr_fnode_tcp_nagle_disable.attr) {
+ param = ISCSI_FLASHNODE_TCP_NAGLE_DISABLE;
+ } else if (attr == &dev_attr_fnode_tcp_wsf_disable.attr) {
+ param = ISCSI_FLASHNODE_TCP_WSF_DISABLE;
+ } else if (attr == &dev_attr_fnode_tcp_timer_scale.attr) {
+ param = ISCSI_FLASHNODE_TCP_TIMER_SCALE;
+ } else if (attr == &dev_attr_fnode_tcp_timestamp_enable.attr) {
+ param = ISCSI_FLASHNODE_TCP_TIMESTAMP_EN;
+ } else if (attr == &dev_attr_fnode_fragment_disable.attr) {
+ param = ISCSI_FLASHNODE_IP_FRAG_DISABLE;
+ } else if (attr == &dev_attr_fnode_max_recv_dlength.attr) {
+ param = ISCSI_FLASHNODE_MAX_RECV_DLENGTH;
+ } else if (attr == &dev_attr_fnode_max_xmit_dlength.attr) {
+ param = ISCSI_FLASHNODE_MAX_XMIT_DLENGTH;
+ } else if (attr == &dev_attr_fnode_keepalive_tmo.attr) {
+ param = ISCSI_FLASHNODE_KEEPALIVE_TMO;
+ } else if (attr == &dev_attr_fnode_port.attr) {
+ param = ISCSI_FLASHNODE_PORT;
+ } else if (attr == &dev_attr_fnode_ipaddress.attr) {
+ param = ISCSI_FLASHNODE_IPADDR;
+ } else if (attr == &dev_attr_fnode_redirect_ipaddr.attr) {
+ param = ISCSI_FLASHNODE_REDIRECT_IPADDR;
+ } else if (attr == &dev_attr_fnode_max_segment_size.attr) {
+ param = ISCSI_FLASHNODE_MAX_SEGMENT_SIZE;
+ } else if (attr == &dev_attr_fnode_local_port.attr) {
+ param = ISCSI_FLASHNODE_LOCAL_PORT;
+ } else if (attr == &dev_attr_fnode_ipv4_tos.attr) {
+ param = ISCSI_FLASHNODE_IPV4_TOS;
+ } else if (attr == &dev_attr_fnode_ipv6_traffic_class.attr) {
+ param = ISCSI_FLASHNODE_IPV6_TC;
+ } else if (attr == &dev_attr_fnode_ipv6_flow_label.attr) {
+ param = ISCSI_FLASHNODE_IPV6_FLOW_LABEL;
+ } else if (attr == &dev_attr_fnode_link_local_ipv6.attr) {
+ param = ISCSI_FLASHNODE_LINK_LOCAL_IPV6;
+ } else if (attr == &dev_attr_fnode_tcp_xmit_wsf.attr) {
+ param = ISCSI_FLASHNODE_TCP_XMIT_WSF;
+ } else if (attr == &dev_attr_fnode_tcp_recv_wsf.attr) {
+ param = ISCSI_FLASHNODE_TCP_RECV_WSF;
+ } else if (attr == &dev_attr_fnode_statsn.attr) {
+ param = ISCSI_FLASHNODE_STATSN;
+ } else if (attr == &dev_attr_fnode_exp_statsn.attr) {
+ param = ISCSI_FLASHNODE_EXP_STATSN;
+ } else {
+ WARN_ONCE(1, "Invalid flashnode connection attr");
+ return 0;
+ }
+
+ return t->attr_is_visible(ISCSI_FLASHNODE_PARAM, param);
+}
+
+static struct attribute_group iscsi_flashnode_conn_attr_group = {
+ .attrs = iscsi_flashnode_conn_attrs,
+ .is_visible = iscsi_flashnode_conn_attr_is_visible,
+};
+
+static const struct attribute_group *iscsi_flashnode_conn_attr_groups[] = {
+ &iscsi_flashnode_conn_attr_group,
+ NULL,
+};
+
+static void iscsi_flashnode_conn_release(struct device *dev)
+{
+ struct iscsi_bus_flash_conn *fnode_conn = iscsi_dev_to_flash_conn(dev);
+
+ kfree(fnode_conn->ipaddress);
+ kfree(fnode_conn->redirect_ipaddr);
+ kfree(fnode_conn->link_local_ipv6_addr);
+ kfree(fnode_conn);
+}
+
+struct device_type iscsi_flashnode_conn_dev_type = {
+ .name = "iscsi_flashnode_conn_dev_type",
+ .groups = iscsi_flashnode_conn_attr_groups,
+ .release = iscsi_flashnode_conn_release,
+};
+
+struct bus_type iscsi_flashnode_bus;
+
+int iscsi_flashnode_bus_match(struct device *dev,
+ struct device_driver *drv)
+{
+ if (dev->bus == &iscsi_flashnode_bus)
+ return 1;
+ return 0;
+}
+EXPORT_SYMBOL_GPL(iscsi_flashnode_bus_match);
+
+struct bus_type iscsi_flashnode_bus = {
+ .name = "iscsi_flashnode",
+ .match = &iscsi_flashnode_bus_match,
+};
+
+/**
+ * iscsi_create_flashnode_sess - Add flashnode session entry in sysfs
+ * @shost: pointer to host data
+ * @index: index of flashnode to add in sysfs
+ * @transport: pointer to transport data
+ * @dd_size: total size to allocate
+ *
+ * Adds a sysfs entry for the flashnode session attributes
+ *
+ * Returns:
+ * pointer to allocated flashnode sess on success
+ * %NULL on failure
+ */
+struct iscsi_bus_flash_session *
+iscsi_create_flashnode_sess(struct Scsi_Host *shost, int index,
+ struct iscsi_transport *transport,
+ int dd_size)
+{
+ struct iscsi_bus_flash_session *fnode_sess;
+ int err;
+
+ fnode_sess = kzalloc(sizeof(*fnode_sess) + dd_size, GFP_KERNEL);
+ if (!fnode_sess)
+ return NULL;
+
+ fnode_sess->transport = transport;
+ fnode_sess->target_id = index;
+ fnode_sess->dev.type = &iscsi_flashnode_sess_dev_type;
+ fnode_sess->dev.bus = &iscsi_flashnode_bus;
+ fnode_sess->dev.parent = &shost->shost_gendev;
+ dev_set_name(&fnode_sess->dev, "flashnode_sess-%u:%u",
+ shost->host_no, index);
+
+ err = device_register(&fnode_sess->dev);
+ if (err)
+ goto free_fnode_sess;
+
+ if (dd_size)
+ fnode_sess->dd_data = &fnode_sess[1];
+
+ return fnode_sess;
+
+free_fnode_sess:
+ kfree(fnode_sess);
+ return NULL;
+}
+EXPORT_SYMBOL_GPL(iscsi_create_flashnode_sess);
+
+/**
+ * iscsi_create_flashnode_conn - Add flashnode conn entry in sysfs
+ * @shost: pointer to host data
+ * @fnode_sess: pointer to the parent flashnode session entry
+ * @transport: pointer to transport data
+ * @dd_size: total size to allocate
+ *
+ * Adds a sysfs entry for the flashnode connection attributes
+ *
+ * Returns:
+ * pointer to allocated flashnode conn on success
+ * %NULL on failure
+ */
+struct iscsi_bus_flash_conn *
+iscsi_create_flashnode_conn(struct Scsi_Host *shost,
+ struct iscsi_bus_flash_session *fnode_sess,
+ struct iscsi_transport *transport,
+ int dd_size)
+{
+ struct iscsi_bus_flash_conn *fnode_conn;
+ int err;
+
+ fnode_conn = kzalloc(sizeof(*fnode_conn) + dd_size, GFP_KERNEL);
+ if (!fnode_conn)
+ return NULL;
+
+ fnode_conn->transport = transport;
+ fnode_conn->dev.type = &iscsi_flashnode_conn_dev_type;
+ fnode_conn->dev.bus = &iscsi_flashnode_bus;
+ fnode_conn->dev.parent = &fnode_sess->dev;
+ dev_set_name(&fnode_conn->dev, "flashnode_conn-%u:%u:0",
+ shost->host_no, fnode_sess->target_id);
+
+ err = device_register(&fnode_conn->dev);
+ if (err)
+ goto free_fnode_conn;
+
+ if (dd_size)
+ fnode_conn->dd_data = &fnode_conn[1];
+
+ return fnode_conn;
+
+free_fnode_conn:
+ kfree(fnode_conn);
+ return NULL;
+}
+EXPORT_SYMBOL_GPL(iscsi_create_flashnode_conn);
+
+/**
+ * iscsi_is_flashnode_conn_dev - verify passed device is to be flashnode conn
+ * @dev: device to verify
+ * @data: pointer to data containing value to use for verification
+ *
+ * Verifies if the passed device is flashnode conn device
+ *
+ * Returns:
+ * 1 on success
+ * 0 on failure
+ */
+int iscsi_is_flashnode_conn_dev(struct device *dev, void *data)
+{
+ return dev->bus == &iscsi_flashnode_bus;
+}
+EXPORT_SYMBOL_GPL(iscsi_is_flashnode_conn_dev);
+
+static int iscsi_destroy_flashnode_conn(struct iscsi_bus_flash_conn *fnode_conn)
+{
+ device_unregister(&fnode_conn->dev);
+ return 0;
+}
+
+static int flashnode_match_index(struct device *dev, void *data)
+{
+ struct iscsi_bus_flash_session *fnode_sess = NULL;
+ int ret = 0;
+
+ if (!iscsi_flashnode_bus_match(dev, NULL))
+ goto exit_match_index;
+
+ fnode_sess = iscsi_dev_to_flash_session(dev);
+ ret = (fnode_sess->target_id == *((int *)data)) ? 1 : 0;
+
+exit_match_index:
+ return ret;
+}
+
+/**
+ * iscsi_get_flashnode_by_index -finds flashnode session entry by index
+ * @shost: pointer to host data
+ * @idx: index to match
+ *
+ * Finds the flashnode session object for the passed index
+ *
+ * Returns:
+ * pointer to found flashnode session object on success
+ * %NULL on failure
+ */
+static struct iscsi_bus_flash_session *
+iscsi_get_flashnode_by_index(struct Scsi_Host *shost, uint32_t idx)
+{
+ struct iscsi_bus_flash_session *fnode_sess = NULL;
+ struct device *dev;
+
+ dev = device_find_child(&shost->shost_gendev, &idx,
+ flashnode_match_index);
+ if (dev)
+ fnode_sess = iscsi_dev_to_flash_session(dev);
+
+ return fnode_sess;
+}
+
+/**
+ * iscsi_find_flashnode_sess - finds flashnode session entry
+ * @shost: pointer to host data
+ * @data: pointer to data containing value to use for comparison
+ * @fn: function pointer that does actual comparison
+ *
+ * Finds the flashnode session object comparing the data passed using logic
+ * defined in passed function pointer
+ *
+ * Returns:
+ * pointer to found flashnode session device object on success
+ * %NULL on failure
+ */
+struct device *
+iscsi_find_flashnode_sess(struct Scsi_Host *shost, void *data,
+ int (*fn)(struct device *dev, void *data))
+{
+ return device_find_child(&shost->shost_gendev, data, fn);
+}
+EXPORT_SYMBOL_GPL(iscsi_find_flashnode_sess);
+
+/**
+ * iscsi_find_flashnode_conn - finds flashnode connection entry
+ * @fnode_sess: pointer to parent flashnode session entry
+ *
+ * Finds the flashnode connection object comparing the data passed using logic
+ * defined in passed function pointer
+ *
+ * Returns:
+ * pointer to found flashnode connection device object on success
+ * %NULL on failure
+ */
+struct device *
+iscsi_find_flashnode_conn(struct iscsi_bus_flash_session *fnode_sess)
+{
+ return device_find_child(&fnode_sess->dev, NULL,
+ iscsi_is_flashnode_conn_dev);
+}
+EXPORT_SYMBOL_GPL(iscsi_find_flashnode_conn);
+
+static int iscsi_iter_destroy_flashnode_conn_fn(struct device *dev, void *data)
+{
+ if (!iscsi_is_flashnode_conn_dev(dev, NULL))
+ return 0;
+
+ return iscsi_destroy_flashnode_conn(iscsi_dev_to_flash_conn(dev));
+}
+
+/**
+ * iscsi_destroy_flashnode_sess - destroy flashnode session entry
+ * @fnode_sess: pointer to flashnode session entry to be destroyed
+ *
+ * Deletes the flashnode session entry and all children flashnode connection
+ * entries from sysfs
+ */
+void iscsi_destroy_flashnode_sess(struct iscsi_bus_flash_session *fnode_sess)
+{
+ int err;
+
+ err = device_for_each_child(&fnode_sess->dev, NULL,
+ iscsi_iter_destroy_flashnode_conn_fn);
+ if (err)
+ pr_err("Could not delete all connections for %s. Error %d.\n",
+ fnode_sess->dev.kobj.name, err);
+
+ device_unregister(&fnode_sess->dev);
+}
+EXPORT_SYMBOL_GPL(iscsi_destroy_flashnode_sess);
+
+static int iscsi_iter_destroy_flashnode_fn(struct device *dev, void *data)
+{
+ if (!iscsi_flashnode_bus_match(dev, NULL))
+ return 0;
+
+ iscsi_destroy_flashnode_sess(iscsi_dev_to_flash_session(dev));
+ return 0;
+}
+
+/**
+ * iscsi_destroy_all_flashnode - destroy all flashnode session entries
+ * @shost: pointer to host data
+ *
+ * Destroys all the flashnode session entries and all corresponding children
+ * flashnode connection entries from sysfs
+ */
+void iscsi_destroy_all_flashnode(struct Scsi_Host *shost)
+{
+ device_for_each_child(&shost->shost_gendev, NULL,
+ iscsi_iter_destroy_flashnode_fn);
+}
+EXPORT_SYMBOL_GPL(iscsi_destroy_all_flashnode);
+
+/*
+ * BSG support
+ */
+/**
+ * iscsi_bsg_host_dispatch - Dispatch command to LLD.
+ * @job: bsg job to be processed
+ */
+static int iscsi_bsg_host_dispatch(struct bsg_job *job)
+{
+ struct Scsi_Host *shost = iscsi_job_to_shost(job);
+ struct iscsi_bsg_request *req = job->request;
+ struct iscsi_bsg_reply *reply = job->reply;
+ struct iscsi_internal *i = to_iscsi_internal(shost->transportt);
+ int cmdlen = sizeof(uint32_t); /* start with length of msgcode */
+ int ret;
+
+ /* check if we have the msgcode value at least */
+ if (job->request_len < sizeof(uint32_t)) {
+ ret = -ENOMSG;
+ goto fail_host_msg;
+ }
+
+ /* Validate the host command */
+ switch (req->msgcode) {
+ case ISCSI_BSG_HST_VENDOR:
+ cmdlen += sizeof(struct iscsi_bsg_host_vendor);
+ if ((shost->hostt->vendor_id == 0L) ||
+ (req->rqst_data.h_vendor.vendor_id !=
+ shost->hostt->vendor_id)) {
+ ret = -ESRCH;
+ goto fail_host_msg;
+ }
+ break;
+ default:
+ ret = -EBADR;
+ goto fail_host_msg;
+ }
+
+ /* check if we really have all the request data needed */
+ if (job->request_len < cmdlen) {
+ ret = -ENOMSG;
+ goto fail_host_msg;
+ }
+
+ ret = i->iscsi_transport->bsg_request(job);
+ if (!ret)
+ return 0;
+
+fail_host_msg:
+ /* return the errno failure code as the only status */
+ BUG_ON(job->reply_len < sizeof(uint32_t));
+ reply->reply_payload_rcv_len = 0;
+ reply->result = ret;
+ job->reply_len = sizeof(uint32_t);
+ bsg_job_done(job, ret, 0);
+ return 0;
+}
+
+/**
+ * iscsi_bsg_host_add - Create and add the bsg hooks to receive requests
+ * @shost: shost for iscsi_host
+ * @ihost: iscsi_cls_host adding the structures to
+ */
+static int
+iscsi_bsg_host_add(struct Scsi_Host *shost, struct iscsi_cls_host *ihost)
+{
+ struct device *dev = &shost->shost_gendev;
+ struct iscsi_internal *i = to_iscsi_internal(shost->transportt);
+ struct request_queue *q;
+ char bsg_name[20];
+ int ret;
+
+ if (!i->iscsi_transport->bsg_request)
+ return -ENOTSUPP;
+
+ snprintf(bsg_name, sizeof(bsg_name), "iscsi_host%d", shost->host_no);
+
+ q = __scsi_alloc_queue(shost, bsg_request_fn);
+ if (!q)
+ return -ENOMEM;
+
+ ret = bsg_setup_queue(dev, q, bsg_name, iscsi_bsg_host_dispatch, 0);
+ if (ret) {
+ shost_printk(KERN_ERR, shost, "bsg interface failed to "
+ "initialize - no request queue\n");
+ blk_cleanup_queue(q);
+ return ret;
+ }
+
+ ihost->bsg_q = q;
+ return 0;
+}
+
+static int iscsi_setup_host(struct transport_container *tc, struct device *dev,
+ struct device *cdev)
+{
+ struct Scsi_Host *shost = dev_to_shost(dev);
+ struct iscsi_cls_host *ihost = shost->shost_data;
+
+ memset(ihost, 0, sizeof(*ihost));
+ atomic_set(&ihost->nr_scans, 0);
+ mutex_init(&ihost->mutex);
+
+ iscsi_bsg_host_add(shost, ihost);
+ /* ignore any bsg add error - we just can't do sgio */
+
+ return 0;
+}
+
+static int iscsi_remove_host(struct transport_container *tc,
+ struct device *dev, struct device *cdev)
+{
+ struct Scsi_Host *shost = dev_to_shost(dev);
+ struct iscsi_cls_host *ihost = shost->shost_data;
+
+ if (ihost->bsg_q) {
+ bsg_unregister_queue(ihost->bsg_q);
+ blk_cleanup_queue(ihost->bsg_q);
+ }
+ return 0;
+}
+
+static DECLARE_TRANSPORT_CLASS(iscsi_host_class,
+ "iscsi_host",
+ iscsi_setup_host,
+ iscsi_remove_host,
+ NULL);
+
+static DECLARE_TRANSPORT_CLASS(iscsi_session_class,
+ "iscsi_session",
+ NULL,
+ NULL,
+ NULL);
+
+static DECLARE_TRANSPORT_CLASS(iscsi_connection_class,
+ "iscsi_connection",
+ NULL,
+ NULL,
+ NULL);
+
+static struct sock *nls;
+static DEFINE_MUTEX(rx_queue_mutex);
+
+static LIST_HEAD(sesslist);
+static DEFINE_SPINLOCK(sesslock);
+static LIST_HEAD(connlist);
+static DEFINE_SPINLOCK(connlock);
+
+static uint32_t iscsi_conn_get_sid(struct iscsi_cls_conn *conn)
+{
+ struct iscsi_cls_session *sess = iscsi_dev_to_session(conn->dev.parent);
+ return sess->sid;
+}
+
+/*
+ * Returns the matching session to a given sid
+ */
+static struct iscsi_cls_session *iscsi_session_lookup(uint32_t sid)
+{
+ unsigned long flags;
+ struct iscsi_cls_session *sess;
+
+ spin_lock_irqsave(&sesslock, flags);
+ list_for_each_entry(sess, &sesslist, sess_list) {
+ if (sess->sid == sid) {
+ spin_unlock_irqrestore(&sesslock, flags);
+ return sess;
+ }
+ }
+ spin_unlock_irqrestore(&sesslock, flags);
+ return NULL;
+}
+
+/*
+ * Returns the matching connection to a given sid / cid tuple
+ */
+static struct iscsi_cls_conn *iscsi_conn_lookup(uint32_t sid, uint32_t cid)
+{
+ unsigned long flags;
+ struct iscsi_cls_conn *conn;
+
+ spin_lock_irqsave(&connlock, flags);
+ list_for_each_entry(conn, &connlist, conn_list) {
+ if ((conn->cid == cid) && (iscsi_conn_get_sid(conn) == sid)) {
+ spin_unlock_irqrestore(&connlock, flags);
+ return conn;
+ }
+ }
+ spin_unlock_irqrestore(&connlock, flags);
+ return NULL;
+}
+
+/*
+ * The following functions can be used by LLDs that allocate
+ * their own scsi_hosts or by software iscsi LLDs
+ */
+static struct {
+ int value;
+ char *name;
+} iscsi_session_state_names[] = {
+ { ISCSI_SESSION_LOGGED_IN, "LOGGED_IN" },
+ { ISCSI_SESSION_FAILED, "FAILED" },
+ { ISCSI_SESSION_FREE, "FREE" },
+};
+
+static const char *iscsi_session_state_name(int state)
+{
+ int i;
+ char *name = NULL;
+
+ for (i = 0; i < ARRAY_SIZE(iscsi_session_state_names); i++) {
+ if (iscsi_session_state_names[i].value == state) {
+ name = iscsi_session_state_names[i].name;
+ break;
+ }
+ }
+ return name;
+}
+
+int iscsi_session_chkready(struct iscsi_cls_session *session)
+{
+ unsigned long flags;
+ int err;
+
+ spin_lock_irqsave(&session->lock, flags);
+ switch (session->state) {
+ case ISCSI_SESSION_LOGGED_IN:
+ err = 0;
+ break;
+ case ISCSI_SESSION_FAILED:
+ err = DID_IMM_RETRY << 16;
+ break;
+ case ISCSI_SESSION_FREE:
+ err = DID_TRANSPORT_FAILFAST << 16;
+ break;
+ default:
+ err = DID_NO_CONNECT << 16;
+ break;
+ }
+ spin_unlock_irqrestore(&session->lock, flags);
+ return err;
+}
+EXPORT_SYMBOL_GPL(iscsi_session_chkready);
+
+int iscsi_is_session_online(struct iscsi_cls_session *session)
+{
+ unsigned long flags;
+ int ret = 0;
+
+ spin_lock_irqsave(&session->lock, flags);
+ if (session->state == ISCSI_SESSION_LOGGED_IN)
+ ret = 1;
+ spin_unlock_irqrestore(&session->lock, flags);
+ return ret;
+}
+EXPORT_SYMBOL_GPL(iscsi_is_session_online);
+
+static void iscsi_session_release(struct device *dev)
+{
+ struct iscsi_cls_session *session = iscsi_dev_to_session(dev);
+ struct Scsi_Host *shost;
+
+ shost = iscsi_session_to_shost(session);
+ scsi_host_put(shost);
+ ISCSI_DBG_TRANS_SESSION(session, "Completing session release\n");
+ kfree(session);
+}
+
+int iscsi_is_session_dev(const struct device *dev)
+{
+ return dev->release == iscsi_session_release;
+}
+EXPORT_SYMBOL_GPL(iscsi_is_session_dev);
+
+static int iscsi_iter_session_fn(struct device *dev, void *data)
+{
+ void (* fn) (struct iscsi_cls_session *) = data;
+
+ if (!iscsi_is_session_dev(dev))
+ return 0;
+ fn(iscsi_dev_to_session(dev));
+ return 0;
+}
+
+void iscsi_host_for_each_session(struct Scsi_Host *shost,
+ void (*fn)(struct iscsi_cls_session *))
+{
+ device_for_each_child(&shost->shost_gendev, fn,
+ iscsi_iter_session_fn);
+}
+EXPORT_SYMBOL_GPL(iscsi_host_for_each_session);
+
+/**
+ * iscsi_scan_finished - helper to report when running scans are done
+ * @shost: scsi host
+ * @time: scan run time
+ *
+ * This function can be used by drives like qla4xxx to report to the scsi
+ * layer when the scans it kicked off at module load time are done.
+ */
+int iscsi_scan_finished(struct Scsi_Host *shost, unsigned long time)
+{
+ struct iscsi_cls_host *ihost = shost->shost_data;
+ /*
+ * qla4xxx will have kicked off some session unblocks before calling
+ * scsi_scan_host, so just wait for them to complete.
+ */
+ return !atomic_read(&ihost->nr_scans);
+}
+EXPORT_SYMBOL_GPL(iscsi_scan_finished);
+
+struct iscsi_scan_data {
+ unsigned int channel;
+ unsigned int id;
+ u64 lun;
+};
+
+static int iscsi_user_scan_session(struct device *dev, void *data)
+{
+ struct iscsi_scan_data *scan_data = data;
+ struct iscsi_cls_session *session;
+ struct Scsi_Host *shost;
+ struct iscsi_cls_host *ihost;
+ unsigned long flags;
+ unsigned int id;
+
+ if (!iscsi_is_session_dev(dev))
+ return 0;
+
+ session = iscsi_dev_to_session(dev);
+
+ ISCSI_DBG_TRANS_SESSION(session, "Scanning session\n");
+
+ shost = iscsi_session_to_shost(session);
+ ihost = shost->shost_data;
+
+ mutex_lock(&ihost->mutex);
+ spin_lock_irqsave(&session->lock, flags);
+ if (session->state != ISCSI_SESSION_LOGGED_IN) {
+ spin_unlock_irqrestore(&session->lock, flags);
+ goto user_scan_exit;
+ }
+ id = session->target_id;
+ spin_unlock_irqrestore(&session->lock, flags);
+
+ if (id != ISCSI_MAX_TARGET) {
+ if ((scan_data->channel == SCAN_WILD_CARD ||
+ scan_data->channel == 0) &&
+ (scan_data->id == SCAN_WILD_CARD ||
+ scan_data->id == id))
+ scsi_scan_target(&session->dev, 0, id,
+ scan_data->lun, 1);
+ }
+
+user_scan_exit:
+ mutex_unlock(&ihost->mutex);
+ ISCSI_DBG_TRANS_SESSION(session, "Completed session scan\n");
+ return 0;
+}
+
+static int iscsi_user_scan(struct Scsi_Host *shost, uint channel,
+ uint id, u64 lun)
+{
+ struct iscsi_scan_data scan_data;
+
+ scan_data.channel = channel;
+ scan_data.id = id;
+ scan_data.lun = lun;
+
+ return device_for_each_child(&shost->shost_gendev, &scan_data,
+ iscsi_user_scan_session);
+}
+
+static void iscsi_scan_session(struct work_struct *work)
+{
+ struct iscsi_cls_session *session =
+ container_of(work, struct iscsi_cls_session, scan_work);
+ struct Scsi_Host *shost = iscsi_session_to_shost(session);
+ struct iscsi_cls_host *ihost = shost->shost_data;
+ struct iscsi_scan_data scan_data;
+
+ scan_data.channel = 0;
+ scan_data.id = SCAN_WILD_CARD;
+ scan_data.lun = SCAN_WILD_CARD;
+
+ iscsi_user_scan_session(&session->dev, &scan_data);
+ atomic_dec(&ihost->nr_scans);
+}
+
+/**
+ * iscsi_block_scsi_eh - block scsi eh until session state has transistioned
+ * @cmd: scsi cmd passed to scsi eh handler
+ *
+ * If the session is down this function will wait for the recovery
+ * timer to fire or for the session to be logged back in. If the
+ * recovery timer fires then FAST_IO_FAIL is returned. The caller
+ * should pass this error value to the scsi eh.
+ */
+int iscsi_block_scsi_eh(struct scsi_cmnd *cmd)
+{
+ struct iscsi_cls_session *session =
+ starget_to_session(scsi_target(cmd->device));
+ unsigned long flags;
+ int ret = 0;
+
+ spin_lock_irqsave(&session->lock, flags);
+ while (session->state != ISCSI_SESSION_LOGGED_IN) {
+ if (session->state == ISCSI_SESSION_FREE) {
+ ret = FAST_IO_FAIL;
+ break;
+ }
+ spin_unlock_irqrestore(&session->lock, flags);
+ msleep(1000);
+ spin_lock_irqsave(&session->lock, flags);
+ }
+ spin_unlock_irqrestore(&session->lock, flags);
+ return ret;
+}
+EXPORT_SYMBOL_GPL(iscsi_block_scsi_eh);
+
+static void session_recovery_timedout(struct work_struct *work)
+{
+ struct iscsi_cls_session *session =
+ container_of(work, struct iscsi_cls_session,
+ recovery_work.work);
+ unsigned long flags;
+
+ iscsi_cls_session_printk(KERN_INFO, session,
+ "session recovery timed out after %d secs\n",
+ session->recovery_tmo);
+
+ spin_lock_irqsave(&session->lock, flags);
+ switch (session->state) {
+ case ISCSI_SESSION_FAILED:
+ session->state = ISCSI_SESSION_FREE;
+ break;
+ case ISCSI_SESSION_LOGGED_IN:
+ case ISCSI_SESSION_FREE:
+ /* we raced with the unblock's flush */
+ spin_unlock_irqrestore(&session->lock, flags);
+ return;
+ }
+ spin_unlock_irqrestore(&session->lock, flags);
+
+ if (session->transport->session_recovery_timedout)
+ session->transport->session_recovery_timedout(session);
+
+ ISCSI_DBG_TRANS_SESSION(session, "Unblocking SCSI target\n");
+ scsi_target_unblock(&session->dev, SDEV_TRANSPORT_OFFLINE);
+ ISCSI_DBG_TRANS_SESSION(session, "Completed unblocking SCSI target\n");
+}
+
+static void __iscsi_unblock_session(struct work_struct *work)
+{
+ struct iscsi_cls_session *session =
+ container_of(work, struct iscsi_cls_session,
+ unblock_work);
+ struct Scsi_Host *shost = iscsi_session_to_shost(session);
+ struct iscsi_cls_host *ihost = shost->shost_data;
+ unsigned long flags;
+
+ ISCSI_DBG_TRANS_SESSION(session, "Unblocking session\n");
+ /*
+ * The recovery and unblock work get run from the same workqueue,
+ * so try to cancel it if it was going to run after this unblock.
+ */
+ cancel_delayed_work(&session->recovery_work);
+ spin_lock_irqsave(&session->lock, flags);
+ session->state = ISCSI_SESSION_LOGGED_IN;
+ spin_unlock_irqrestore(&session->lock, flags);
+ /* start IO */
+ scsi_target_unblock(&session->dev, SDEV_RUNNING);
+ /*
+ * Only do kernel scanning if the driver is properly hooked into
+ * the async scanning code (drivers like iscsi_tcp do login and
+ * scanning from userspace).
+ */
+ if (shost->hostt->scan_finished) {
+ if (scsi_queue_work(shost, &session->scan_work))
+ atomic_inc(&ihost->nr_scans);
+ }
+ ISCSI_DBG_TRANS_SESSION(session, "Completed unblocking session\n");
+}
+
+/**
+ * iscsi_unblock_session - set a session as logged in and start IO.
+ * @session: iscsi session
+ *
+ * Mark a session as ready to accept IO.
+ */
+void iscsi_unblock_session(struct iscsi_cls_session *session)
+{
+ queue_work(iscsi_eh_timer_workq, &session->unblock_work);
+ /*
+ * make sure all the events have completed before tell the driver
+ * it is safe
+ */
+ flush_workqueue(iscsi_eh_timer_workq);
+}
+EXPORT_SYMBOL_GPL(iscsi_unblock_session);
+
+static void __iscsi_block_session(struct work_struct *work)
+{
+ struct iscsi_cls_session *session =
+ container_of(work, struct iscsi_cls_session,
+ block_work);
+ unsigned long flags;
+
+ ISCSI_DBG_TRANS_SESSION(session, "Blocking session\n");
+ spin_lock_irqsave(&session->lock, flags);
+ session->state = ISCSI_SESSION_FAILED;
+ spin_unlock_irqrestore(&session->lock, flags);
+ scsi_target_block(&session->dev);
+ ISCSI_DBG_TRANS_SESSION(session, "Completed SCSI target blocking\n");
+ if (session->recovery_tmo >= 0)
+ queue_delayed_work(iscsi_eh_timer_workq,
+ &session->recovery_work,
+ session->recovery_tmo * HZ);
+}
+
+void iscsi_block_session(struct iscsi_cls_session *session)
+{
+ queue_work(iscsi_eh_timer_workq, &session->block_work);
+}
+EXPORT_SYMBOL_GPL(iscsi_block_session);
+
+static void __iscsi_unbind_session(struct work_struct *work)
+{
+ struct iscsi_cls_session *session =
+ container_of(work, struct iscsi_cls_session,
+ unbind_work);
+ struct Scsi_Host *shost = iscsi_session_to_shost(session);
+ struct iscsi_cls_host *ihost = shost->shost_data;
+ unsigned long flags;
+ unsigned int target_id;
+
+ ISCSI_DBG_TRANS_SESSION(session, "Unbinding session\n");
+
+ /* Prevent new scans and make sure scanning is not in progress */
+ mutex_lock(&ihost->mutex);
+ spin_lock_irqsave(&session->lock, flags);
+ if (session->target_id == ISCSI_MAX_TARGET) {
+ spin_unlock_irqrestore(&session->lock, flags);
+ mutex_unlock(&ihost->mutex);
+ return;
+ }
+
+ target_id = session->target_id;
+ session->target_id = ISCSI_MAX_TARGET;
+ spin_unlock_irqrestore(&session->lock, flags);
+ mutex_unlock(&ihost->mutex);
+
+ if (session->ida_used)
+ ida_simple_remove(&iscsi_sess_ida, target_id);
+
+ scsi_remove_target(&session->dev);
+ iscsi_session_event(session, ISCSI_KEVENT_UNBIND_SESSION);
+ ISCSI_DBG_TRANS_SESSION(session, "Completed target removal\n");
+}
+
+struct iscsi_cls_session *
+iscsi_alloc_session(struct Scsi_Host *shost, struct iscsi_transport *transport,
+ int dd_size)
+{
+ struct iscsi_cls_session *session;
+
+ session = kzalloc(sizeof(*session) + dd_size,
+ GFP_KERNEL);
+ if (!session)
+ return NULL;
+
+ session->transport = transport;
+ session->creator = -1;
+ session->recovery_tmo = 120;
+ session->state = ISCSI_SESSION_FREE;
+ INIT_DELAYED_WORK(&session->recovery_work, session_recovery_timedout);
+ INIT_LIST_HEAD(&session->sess_list);
+ INIT_WORK(&session->unblock_work, __iscsi_unblock_session);
+ INIT_WORK(&session->block_work, __iscsi_block_session);
+ INIT_WORK(&session->unbind_work, __iscsi_unbind_session);
+ INIT_WORK(&session->scan_work, iscsi_scan_session);
+ spin_lock_init(&session->lock);
+
+ /* this is released in the dev's release function */
+ scsi_host_get(shost);
+ session->dev.parent = &shost->shost_gendev;
+ session->dev.release = iscsi_session_release;
+ device_initialize(&session->dev);
+ if (dd_size)
+ session->dd_data = &session[1];
+
+ ISCSI_DBG_TRANS_SESSION(session, "Completed session allocation\n");
+ return session;
+}
+EXPORT_SYMBOL_GPL(iscsi_alloc_session);
+
+int iscsi_add_session(struct iscsi_cls_session *session, unsigned int target_id)
+{
+ struct Scsi_Host *shost = iscsi_session_to_shost(session);
+ struct iscsi_cls_host *ihost;
+ unsigned long flags;
+ int id = 0;
+ int err;
+
+ ihost = shost->shost_data;
+ session->sid = atomic_add_return(1, &iscsi_session_nr);
+
+ if (target_id == ISCSI_MAX_TARGET) {
+ id = ida_simple_get(&iscsi_sess_ida, 0, 0, GFP_KERNEL);
+
+ if (id < 0) {
+ iscsi_cls_session_printk(KERN_ERR, session,
+ "Failure in Target ID Allocation\n");
+ return id;
+ }
+ session->target_id = (unsigned int)id;
+ session->ida_used = true;
+ } else
+ session->target_id = target_id;
+
+ dev_set_name(&session->dev, "session%u", session->sid);
+ err = device_add(&session->dev);
+ if (err) {
+ iscsi_cls_session_printk(KERN_ERR, session,
+ "could not register session's dev\n");
+ goto release_ida;
+ }
+ transport_register_device(&session->dev);
+
+ spin_lock_irqsave(&sesslock, flags);
+ list_add(&session->sess_list, &sesslist);
+ spin_unlock_irqrestore(&sesslock, flags);
+
+ iscsi_session_event(session, ISCSI_KEVENT_CREATE_SESSION);
+ ISCSI_DBG_TRANS_SESSION(session, "Completed session adding\n");
+ return 0;
+
+release_ida:
+ if (session->ida_used)
+ ida_simple_remove(&iscsi_sess_ida, session->target_id);
+
+ return err;
+}
+EXPORT_SYMBOL_GPL(iscsi_add_session);
+
+/**
+ * iscsi_create_session - create iscsi class session
+ * @shost: scsi host
+ * @transport: iscsi transport
+ * @dd_size: private driver data size
+ * @target_id: which target
+ *
+ * This can be called from a LLD or iscsi_transport.
+ */
+struct iscsi_cls_session *
+iscsi_create_session(struct Scsi_Host *shost, struct iscsi_transport *transport,
+ int dd_size, unsigned int target_id)
+{
+ struct iscsi_cls_session *session;
+
+ session = iscsi_alloc_session(shost, transport, dd_size);
+ if (!session)
+ return NULL;
+
+ if (iscsi_add_session(session, target_id)) {
+ iscsi_free_session(session);
+ return NULL;
+ }
+ return session;
+}
+EXPORT_SYMBOL_GPL(iscsi_create_session);
+
+static void iscsi_conn_release(struct device *dev)
+{
+ struct iscsi_cls_conn *conn = iscsi_dev_to_conn(dev);
+ struct device *parent = conn->dev.parent;
+
+ ISCSI_DBG_TRANS_CONN(conn, "Releasing conn\n");
+ kfree(conn);
+ put_device(parent);
+}
+
+static int iscsi_is_conn_dev(const struct device *dev)
+{
+ return dev->release == iscsi_conn_release;
+}
+
+static int iscsi_iter_destroy_conn_fn(struct device *dev, void *data)
+{
+ if (!iscsi_is_conn_dev(dev))
+ return 0;
+ return iscsi_destroy_conn(iscsi_dev_to_conn(dev));
+}
+
+void iscsi_remove_session(struct iscsi_cls_session *session)
+{
+ struct Scsi_Host *shost = iscsi_session_to_shost(session);
+ unsigned long flags;
+ int err;
+
+ ISCSI_DBG_TRANS_SESSION(session, "Removing session\n");
+
+ spin_lock_irqsave(&sesslock, flags);
+ list_del(&session->sess_list);
+ spin_unlock_irqrestore(&sesslock, flags);
+
+ /* make sure there are no blocks/unblocks queued */
+ flush_workqueue(iscsi_eh_timer_workq);
+ /* make sure the timedout callout is not running */
+ if (!cancel_delayed_work(&session->recovery_work))
+ flush_workqueue(iscsi_eh_timer_workq);
+ /*
+ * If we are blocked let commands flow again. The lld or iscsi
+ * layer should set up the queuecommand to fail commands.
+ * We assume that LLD will not be calling block/unblock while
+ * removing the session.
+ */
+ spin_lock_irqsave(&session->lock, flags);
+ session->state = ISCSI_SESSION_FREE;
+ spin_unlock_irqrestore(&session->lock, flags);
+
+ scsi_target_unblock(&session->dev, SDEV_TRANSPORT_OFFLINE);
+ /* flush running scans then delete devices */
+ scsi_flush_work(shost);
+ __iscsi_unbind_session(&session->unbind_work);
+
+ /* hw iscsi may not have removed all connections from session */
+ err = device_for_each_child(&session->dev, NULL,
+ iscsi_iter_destroy_conn_fn);
+ if (err)
+ iscsi_cls_session_printk(KERN_ERR, session,
+ "Could not delete all connections "
+ "for session. Error %d.\n", err);
+
+ transport_unregister_device(&session->dev);
+
+ ISCSI_DBG_TRANS_SESSION(session, "Completing session removal\n");
+ device_del(&session->dev);
+}
+EXPORT_SYMBOL_GPL(iscsi_remove_session);
+
+void iscsi_free_session(struct iscsi_cls_session *session)
+{
+ ISCSI_DBG_TRANS_SESSION(session, "Freeing session\n");
+ iscsi_session_event(session, ISCSI_KEVENT_DESTROY_SESSION);
+ put_device(&session->dev);
+}
+EXPORT_SYMBOL_GPL(iscsi_free_session);
+
+/**
+ * iscsi_destroy_session - destroy iscsi session
+ * @session: iscsi_session
+ *
+ * Can be called by a LLD or iscsi_transport. There must not be
+ * any running connections.
+ */
+int iscsi_destroy_session(struct iscsi_cls_session *session)
+{
+ iscsi_remove_session(session);
+ ISCSI_DBG_TRANS_SESSION(session, "Completing session destruction\n");
+ iscsi_free_session(session);
+ return 0;
+}
+EXPORT_SYMBOL_GPL(iscsi_destroy_session);
+
+/**
+ * iscsi_create_conn - create iscsi class connection
+ * @session: iscsi cls session
+ * @dd_size: private driver data size
+ * @cid: connection id
+ *
+ * This can be called from a LLD or iscsi_transport. The connection
+ * is child of the session so cid must be unique for all connections
+ * on the session.
+ *
+ * Since we do not support MCS, cid will normally be zero. In some cases
+ * for software iscsi we could be trying to preallocate a connection struct
+ * in which case there could be two connection structs and cid would be
+ * non-zero.
+ */
+struct iscsi_cls_conn *
+iscsi_create_conn(struct iscsi_cls_session *session, int dd_size, uint32_t cid)
+{
+ struct iscsi_transport *transport = session->transport;
+ struct iscsi_cls_conn *conn;
+ unsigned long flags;
+ int err;
+
+ conn = kzalloc(sizeof(*conn) + dd_size, GFP_KERNEL);
+ if (!conn)
+ return NULL;
+ if (dd_size)
+ conn->dd_data = &conn[1];
+
+ mutex_init(&conn->ep_mutex);
+ INIT_LIST_HEAD(&conn->conn_list);
+ conn->transport = transport;
+ conn->cid = cid;
+
+ /* this is released in the dev's release function */
+ if (!get_device(&session->dev))
+ goto free_conn;
+
+ dev_set_name(&conn->dev, "connection%d:%u", session->sid, cid);
+ conn->dev.parent = &session->dev;
+ conn->dev.release = iscsi_conn_release;
+ err = device_register(&conn->dev);
+ if (err) {
+ iscsi_cls_session_printk(KERN_ERR, session, "could not "
+ "register connection's dev\n");
+ goto release_parent_ref;
+ }
+ transport_register_device(&conn->dev);
+
+ spin_lock_irqsave(&connlock, flags);
+ list_add(&conn->conn_list, &connlist);
+ spin_unlock_irqrestore(&connlock, flags);
+
+ ISCSI_DBG_TRANS_CONN(conn, "Completed conn creation\n");
+ return conn;
+
+release_parent_ref:
+ put_device(&session->dev);
+free_conn:
+ kfree(conn);
+ return NULL;
+}
+
+EXPORT_SYMBOL_GPL(iscsi_create_conn);
+
+/**
+ * iscsi_destroy_conn - destroy iscsi class connection
+ * @conn: iscsi cls session
+ *
+ * This can be called from a LLD or iscsi_transport.
+ */
+int iscsi_destroy_conn(struct iscsi_cls_conn *conn)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&connlock, flags);
+ list_del(&conn->conn_list);
+ spin_unlock_irqrestore(&connlock, flags);
+
+ transport_unregister_device(&conn->dev);
+ ISCSI_DBG_TRANS_CONN(conn, "Completing conn destruction\n");
+ device_unregister(&conn->dev);
+ return 0;
+}
+EXPORT_SYMBOL_GPL(iscsi_destroy_conn);
+
+/*
+ * iscsi interface functions
+ */
+static struct iscsi_internal *
+iscsi_if_transport_lookup(struct iscsi_transport *tt)
+{
+ struct iscsi_internal *priv;
+ unsigned long flags;
+
+ spin_lock_irqsave(&iscsi_transport_lock, flags);
+ list_for_each_entry(priv, &iscsi_transports, list) {
+ if (tt == priv->iscsi_transport) {
+ spin_unlock_irqrestore(&iscsi_transport_lock, flags);
+ return priv;
+ }
+ }
+ spin_unlock_irqrestore(&iscsi_transport_lock, flags);
+ return NULL;
+}
+
+static int
+iscsi_multicast_skb(struct sk_buff *skb, uint32_t group, gfp_t gfp)
+{
+ return nlmsg_multicast(nls, skb, 0, group, gfp);
+}
+
+int iscsi_recv_pdu(struct iscsi_cls_conn *conn, struct iscsi_hdr *hdr,
+ char *data, uint32_t data_size)
+{
+ struct nlmsghdr *nlh;
+ struct sk_buff *skb;
+ struct iscsi_uevent *ev;
+ char *pdu;
+ struct iscsi_internal *priv;
+ int len = nlmsg_total_size(sizeof(*ev) + sizeof(struct iscsi_hdr) +
+ data_size);
+
+ priv = iscsi_if_transport_lookup(conn->transport);
+ if (!priv)
+ return -EINVAL;
+
+ skb = alloc_skb(len, GFP_ATOMIC);
+ if (!skb) {
+ iscsi_conn_error_event(conn, ISCSI_ERR_CONN_FAILED);
+ iscsi_cls_conn_printk(KERN_ERR, conn, "can not deliver "
+ "control PDU: OOM\n");
+ return -ENOMEM;
+ }
+
+ nlh = __nlmsg_put(skb, 0, 0, 0, (len - sizeof(*nlh)), 0);
+ ev = nlmsg_data(nlh);
+ memset(ev, 0, sizeof(*ev));
+ ev->transport_handle = iscsi_handle(conn->transport);
+ ev->type = ISCSI_KEVENT_RECV_PDU;
+ ev->r.recv_req.cid = conn->cid;
+ ev->r.recv_req.sid = iscsi_conn_get_sid(conn);
+ pdu = (char*)ev + sizeof(*ev);
+ memcpy(pdu, hdr, sizeof(struct iscsi_hdr));
+ memcpy(pdu + sizeof(struct iscsi_hdr), data, data_size);
+
+ return iscsi_multicast_skb(skb, ISCSI_NL_GRP_ISCSID, GFP_ATOMIC);
+}
+EXPORT_SYMBOL_GPL(iscsi_recv_pdu);
+
+int iscsi_offload_mesg(struct Scsi_Host *shost,
+ struct iscsi_transport *transport, uint32_t type,
+ char *data, uint16_t data_size)
+{
+ struct nlmsghdr *nlh;
+ struct sk_buff *skb;
+ struct iscsi_uevent *ev;
+ int len = nlmsg_total_size(sizeof(*ev) + data_size);
+
+ skb = alloc_skb(len, GFP_ATOMIC);
+ if (!skb) {
+ printk(KERN_ERR "can not deliver iscsi offload message:OOM\n");
+ return -ENOMEM;
+ }
+
+ nlh = __nlmsg_put(skb, 0, 0, 0, (len - sizeof(*nlh)), 0);
+ ev = nlmsg_data(nlh);
+ memset(ev, 0, sizeof(*ev));
+ ev->type = type;
+ ev->transport_handle = iscsi_handle(transport);
+ switch (type) {
+ case ISCSI_KEVENT_PATH_REQ:
+ ev->r.req_path.host_no = shost->host_no;
+ break;
+ case ISCSI_KEVENT_IF_DOWN:
+ ev->r.notify_if_down.host_no = shost->host_no;
+ break;
+ }
+
+ memcpy((char *)ev + sizeof(*ev), data, data_size);
+
+ return iscsi_multicast_skb(skb, ISCSI_NL_GRP_UIP, GFP_ATOMIC);
+}
+EXPORT_SYMBOL_GPL(iscsi_offload_mesg);
+
+void iscsi_conn_error_event(struct iscsi_cls_conn *conn, enum iscsi_err error)
+{
+ struct nlmsghdr *nlh;
+ struct sk_buff *skb;
+ struct iscsi_uevent *ev;
+ struct iscsi_internal *priv;
+ int len = nlmsg_total_size(sizeof(*ev));
+
+ priv = iscsi_if_transport_lookup(conn->transport);
+ if (!priv)
+ return;
+
+ skb = alloc_skb(len, GFP_ATOMIC);
+ if (!skb) {
+ iscsi_cls_conn_printk(KERN_ERR, conn, "gracefully ignored "
+ "conn error (%d)\n", error);
+ return;
+ }
+
+ nlh = __nlmsg_put(skb, 0, 0, 0, (len - sizeof(*nlh)), 0);
+ ev = nlmsg_data(nlh);
+ ev->transport_handle = iscsi_handle(conn->transport);
+ ev->type = ISCSI_KEVENT_CONN_ERROR;
+ ev->r.connerror.error = error;
+ ev->r.connerror.cid = conn->cid;
+ ev->r.connerror.sid = iscsi_conn_get_sid(conn);
+
+ iscsi_multicast_skb(skb, ISCSI_NL_GRP_ISCSID, GFP_ATOMIC);
+
+ iscsi_cls_conn_printk(KERN_INFO, conn, "detected conn error (%d)\n",
+ error);
+}
+EXPORT_SYMBOL_GPL(iscsi_conn_error_event);
+
+void iscsi_conn_login_event(struct iscsi_cls_conn *conn,
+ enum iscsi_conn_state state)
+{
+ struct nlmsghdr *nlh;
+ struct sk_buff *skb;
+ struct iscsi_uevent *ev;
+ struct iscsi_internal *priv;
+ int len = nlmsg_total_size(sizeof(*ev));
+
+ priv = iscsi_if_transport_lookup(conn->transport);
+ if (!priv)
+ return;
+
+ skb = alloc_skb(len, GFP_ATOMIC);
+ if (!skb) {
+ iscsi_cls_conn_printk(KERN_ERR, conn, "gracefully ignored "
+ "conn login (%d)\n", state);
+ return;
+ }
+
+ nlh = __nlmsg_put(skb, 0, 0, 0, (len - sizeof(*nlh)), 0);
+ ev = nlmsg_data(nlh);
+ ev->transport_handle = iscsi_handle(conn->transport);
+ ev->type = ISCSI_KEVENT_CONN_LOGIN_STATE;
+ ev->r.conn_login.state = state;
+ ev->r.conn_login.cid = conn->cid;
+ ev->r.conn_login.sid = iscsi_conn_get_sid(conn);
+ iscsi_multicast_skb(skb, ISCSI_NL_GRP_ISCSID, GFP_ATOMIC);
+
+ iscsi_cls_conn_printk(KERN_INFO, conn, "detected conn login (%d)\n",
+ state);
+}
+EXPORT_SYMBOL_GPL(iscsi_conn_login_event);
+
+void iscsi_post_host_event(uint32_t host_no, struct iscsi_transport *transport,
+ enum iscsi_host_event_code code, uint32_t data_size,
+ uint8_t *data)
+{
+ struct nlmsghdr *nlh;
+ struct sk_buff *skb;
+ struct iscsi_uevent *ev;
+ int len = nlmsg_total_size(sizeof(*ev) + data_size);
+
+ skb = alloc_skb(len, GFP_NOIO);
+ if (!skb) {
+ printk(KERN_ERR "gracefully ignored host event (%d):%d OOM\n",
+ host_no, code);
+ return;
+ }
+
+ nlh = __nlmsg_put(skb, 0, 0, 0, (len - sizeof(*nlh)), 0);
+ ev = nlmsg_data(nlh);
+ ev->transport_handle = iscsi_handle(transport);
+ ev->type = ISCSI_KEVENT_HOST_EVENT;
+ ev->r.host_event.host_no = host_no;
+ ev->r.host_event.code = code;
+ ev->r.host_event.data_size = data_size;
+
+ if (data_size)
+ memcpy((char *)ev + sizeof(*ev), data, data_size);
+
+ iscsi_multicast_skb(skb, ISCSI_NL_GRP_ISCSID, GFP_NOIO);
+}
+EXPORT_SYMBOL_GPL(iscsi_post_host_event);
+
+void iscsi_ping_comp_event(uint32_t host_no, struct iscsi_transport *transport,
+ uint32_t status, uint32_t pid, uint32_t data_size,
+ uint8_t *data)
+{
+ struct nlmsghdr *nlh;
+ struct sk_buff *skb;
+ struct iscsi_uevent *ev;
+ int len = nlmsg_total_size(sizeof(*ev) + data_size);
+
+ skb = alloc_skb(len, GFP_NOIO);
+ if (!skb) {
+ printk(KERN_ERR "gracefully ignored ping comp: OOM\n");
+ return;
+ }
+
+ nlh = __nlmsg_put(skb, 0, 0, 0, (len - sizeof(*nlh)), 0);
+ ev = nlmsg_data(nlh);
+ ev->transport_handle = iscsi_handle(transport);
+ ev->type = ISCSI_KEVENT_PING_COMP;
+ ev->r.ping_comp.host_no = host_no;
+ ev->r.ping_comp.status = status;
+ ev->r.ping_comp.pid = pid;
+ ev->r.ping_comp.data_size = data_size;
+ memcpy((char *)ev + sizeof(*ev), data, data_size);
+
+ iscsi_multicast_skb(skb, ISCSI_NL_GRP_ISCSID, GFP_NOIO);
+}
+EXPORT_SYMBOL_GPL(iscsi_ping_comp_event);
+
+static int
+iscsi_if_send_reply(uint32_t group, int seq, int type, int done, int multi,
+ void *payload, int size)
+{
+ struct sk_buff *skb;
+ struct nlmsghdr *nlh;
+ int len = nlmsg_total_size(size);
+ int flags = multi ? NLM_F_MULTI : 0;
+ int t = done ? NLMSG_DONE : type;
+
+ skb = alloc_skb(len, GFP_ATOMIC);
+ if (!skb) {
+ printk(KERN_ERR "Could not allocate skb to send reply.\n");
+ return -ENOMEM;
+ }
+
+ nlh = __nlmsg_put(skb, 0, 0, t, (len - sizeof(*nlh)), 0);
+ nlh->nlmsg_flags = flags;
+ memcpy(nlmsg_data(nlh), payload, size);
+ return iscsi_multicast_skb(skb, group, GFP_ATOMIC);
+}
+
+static int
+iscsi_if_get_stats(struct iscsi_transport *transport, struct nlmsghdr *nlh)
+{
+ struct iscsi_uevent *ev = nlmsg_data(nlh);
+ struct iscsi_stats *stats;
+ struct sk_buff *skbstat;
+ struct iscsi_cls_conn *conn;
+ struct nlmsghdr *nlhstat;
+ struct iscsi_uevent *evstat;
+ struct iscsi_internal *priv;
+ int len = nlmsg_total_size(sizeof(*ev) +
+ sizeof(struct iscsi_stats) +
+ sizeof(struct iscsi_stats_custom) *
+ ISCSI_STATS_CUSTOM_MAX);
+ int err = 0;
+
+ priv = iscsi_if_transport_lookup(transport);
+ if (!priv)
+ return -EINVAL;
+
+ conn = iscsi_conn_lookup(ev->u.get_stats.sid, ev->u.get_stats.cid);
+ if (!conn)
+ return -EEXIST;
+
+ do {
+ int actual_size;
+
+ skbstat = alloc_skb(len, GFP_ATOMIC);
+ if (!skbstat) {
+ iscsi_cls_conn_printk(KERN_ERR, conn, "can not "
+ "deliver stats: OOM\n");
+ return -ENOMEM;
+ }
+
+ nlhstat = __nlmsg_put(skbstat, 0, 0, 0,
+ (len - sizeof(*nlhstat)), 0);
+ evstat = nlmsg_data(nlhstat);
+ memset(evstat, 0, sizeof(*evstat));
+ evstat->transport_handle = iscsi_handle(conn->transport);
+ evstat->type = nlh->nlmsg_type;
+ evstat->u.get_stats.cid =
+ ev->u.get_stats.cid;
+ evstat->u.get_stats.sid =
+ ev->u.get_stats.sid;
+ stats = (struct iscsi_stats *)
+ ((char*)evstat + sizeof(*evstat));
+ memset(stats, 0, sizeof(*stats));
+
+ transport->get_stats(conn, stats);
+ actual_size = nlmsg_total_size(sizeof(struct iscsi_uevent) +
+ sizeof(struct iscsi_stats) +
+ sizeof(struct iscsi_stats_custom) *
+ stats->custom_length);
+ actual_size -= sizeof(*nlhstat);
+ actual_size = nlmsg_msg_size(actual_size);
+ skb_trim(skbstat, NLMSG_ALIGN(actual_size));
+ nlhstat->nlmsg_len = actual_size;
+
+ err = iscsi_multicast_skb(skbstat, ISCSI_NL_GRP_ISCSID,
+ GFP_ATOMIC);
+ } while (err < 0 && err != -ECONNREFUSED);
+
+ return err;
+}
+
+/**
+ * iscsi_session_event - send session destr. completion event
+ * @session: iscsi class session
+ * @event: type of event
+ */
+int iscsi_session_event(struct iscsi_cls_session *session,
+ enum iscsi_uevent_e event)
+{
+ struct iscsi_internal *priv;
+ struct Scsi_Host *shost;
+ struct iscsi_uevent *ev;
+ struct sk_buff *skb;
+ struct nlmsghdr *nlh;
+ int rc, len = nlmsg_total_size(sizeof(*ev));
+
+ priv = iscsi_if_transport_lookup(session->transport);
+ if (!priv)
+ return -EINVAL;
+ shost = iscsi_session_to_shost(session);
+
+ skb = alloc_skb(len, GFP_KERNEL);
+ if (!skb) {
+ iscsi_cls_session_printk(KERN_ERR, session,
+ "Cannot notify userspace of session "
+ "event %u\n", event);
+ return -ENOMEM;
+ }
+
+ nlh = __nlmsg_put(skb, 0, 0, 0, (len - sizeof(*nlh)), 0);
+ ev = nlmsg_data(nlh);
+ ev->transport_handle = iscsi_handle(session->transport);
+
+ ev->type = event;
+ switch (event) {
+ case ISCSI_KEVENT_DESTROY_SESSION:
+ ev->r.d_session.host_no = shost->host_no;
+ ev->r.d_session.sid = session->sid;
+ break;
+ case ISCSI_KEVENT_CREATE_SESSION:
+ ev->r.c_session_ret.host_no = shost->host_no;
+ ev->r.c_session_ret.sid = session->sid;
+ break;
+ case ISCSI_KEVENT_UNBIND_SESSION:
+ ev->r.unbind_session.host_no = shost->host_no;
+ ev->r.unbind_session.sid = session->sid;
+ break;
+ default:
+ iscsi_cls_session_printk(KERN_ERR, session, "Invalid event "
+ "%u.\n", event);
+ kfree_skb(skb);
+ return -EINVAL;
+ }
+
+ /*
+ * this will occur if the daemon is not up, so we just warn
+ * the user and when the daemon is restarted it will handle it
+ */
+ rc = iscsi_multicast_skb(skb, ISCSI_NL_GRP_ISCSID, GFP_KERNEL);
+ if (rc == -ESRCH)
+ iscsi_cls_session_printk(KERN_ERR, session,
+ "Cannot notify userspace of session "
+ "event %u. Check iscsi daemon\n",
+ event);
+
+ ISCSI_DBG_TRANS_SESSION(session, "Completed handling event %d rc %d\n",
+ event, rc);
+ return rc;
+}
+EXPORT_SYMBOL_GPL(iscsi_session_event);
+
+static int
+iscsi_if_create_session(struct iscsi_internal *priv, struct iscsi_endpoint *ep,
+ struct iscsi_uevent *ev, pid_t pid,
+ uint32_t initial_cmdsn, uint16_t cmds_max,
+ uint16_t queue_depth)
+{
+ struct iscsi_transport *transport = priv->iscsi_transport;
+ struct iscsi_cls_session *session;
+ struct Scsi_Host *shost;
+
+ session = transport->create_session(ep, cmds_max, queue_depth,
+ initial_cmdsn);
+ if (!session)
+ return -ENOMEM;
+
+ session->creator = pid;
+ shost = iscsi_session_to_shost(session);
+ ev->r.c_session_ret.host_no = shost->host_no;
+ ev->r.c_session_ret.sid = session->sid;
+ ISCSI_DBG_TRANS_SESSION(session,
+ "Completed creating transport session\n");
+ return 0;
+}
+
+static int
+iscsi_if_create_conn(struct iscsi_transport *transport, struct iscsi_uevent *ev)
+{
+ struct iscsi_cls_conn *conn;
+ struct iscsi_cls_session *session;
+
+ session = iscsi_session_lookup(ev->u.c_conn.sid);
+ if (!session) {
+ printk(KERN_ERR "iscsi: invalid session %d.\n",
+ ev->u.c_conn.sid);
+ return -EINVAL;
+ }
+
+ conn = transport->create_conn(session, ev->u.c_conn.cid);
+ if (!conn) {
+ iscsi_cls_session_printk(KERN_ERR, session,
+ "couldn't create a new connection.");
+ return -ENOMEM;
+ }
+
+ ev->r.c_conn_ret.sid = session->sid;
+ ev->r.c_conn_ret.cid = conn->cid;
+
+ ISCSI_DBG_TRANS_CONN(conn, "Completed creating transport conn\n");
+ return 0;
+}
+
+static int
+iscsi_if_destroy_conn(struct iscsi_transport *transport, struct iscsi_uevent *ev)
+{
+ struct iscsi_cls_conn *conn;
+
+ conn = iscsi_conn_lookup(ev->u.d_conn.sid, ev->u.d_conn.cid);
+ if (!conn)
+ return -EINVAL;
+
+ ISCSI_DBG_TRANS_CONN(conn, "Destroying transport conn\n");
+ if (transport->destroy_conn)
+ transport->destroy_conn(conn);
+
+ return 0;
+}
+
+static int
+iscsi_set_param(struct iscsi_transport *transport, struct iscsi_uevent *ev)
+{
+ char *data = (char*)ev + sizeof(*ev);
+ struct iscsi_cls_conn *conn;
+ struct iscsi_cls_session *session;
+ int err = 0, value = 0;
+
+ session = iscsi_session_lookup(ev->u.set_param.sid);
+ conn = iscsi_conn_lookup(ev->u.set_param.sid, ev->u.set_param.cid);
+ if (!conn || !session)
+ return -EINVAL;
+
+ switch (ev->u.set_param.param) {
+ case ISCSI_PARAM_SESS_RECOVERY_TMO:
+ sscanf(data, "%d", &value);
+ session->recovery_tmo = value;
+ break;
+ default:
+ err = transport->set_param(conn, ev->u.set_param.param,
+ data, ev->u.set_param.len);
+ }
+
+ return err;
+}
+
+static int iscsi_if_ep_connect(struct iscsi_transport *transport,
+ struct iscsi_uevent *ev, int msg_type)
+{
+ struct iscsi_endpoint *ep;
+ struct sockaddr *dst_addr;
+ struct Scsi_Host *shost = NULL;
+ int non_blocking, err = 0;
+
+ if (!transport->ep_connect)
+ return -EINVAL;
+
+ if (msg_type == ISCSI_UEVENT_TRANSPORT_EP_CONNECT_THROUGH_HOST) {
+ shost = scsi_host_lookup(ev->u.ep_connect_through_host.host_no);
+ if (!shost) {
+ printk(KERN_ERR "ep connect failed. Could not find "
+ "host no %u\n",
+ ev->u.ep_connect_through_host.host_no);
+ return -ENODEV;
+ }
+ non_blocking = ev->u.ep_connect_through_host.non_blocking;
+ } else
+ non_blocking = ev->u.ep_connect.non_blocking;
+
+ dst_addr = (struct sockaddr *)((char*)ev + sizeof(*ev));
+ ep = transport->ep_connect(shost, dst_addr, non_blocking);
+ if (IS_ERR(ep)) {
+ err = PTR_ERR(ep);
+ goto release_host;
+ }
+
+ ev->r.ep_connect_ret.handle = ep->id;
+release_host:
+ if (shost)
+ scsi_host_put(shost);
+ return err;
+}
+
+static int iscsi_if_ep_disconnect(struct iscsi_transport *transport,
+ u64 ep_handle)
+{
+ struct iscsi_cls_conn *conn;
+ struct iscsi_endpoint *ep;
+
+ if (!transport->ep_disconnect)
+ return -EINVAL;
+
+ ep = iscsi_lookup_endpoint(ep_handle);
+ if (!ep)
+ return -EINVAL;
+ conn = ep->conn;
+ if (conn) {
+ mutex_lock(&conn->ep_mutex);
+ conn->ep = NULL;
+ mutex_unlock(&conn->ep_mutex);
+ }
+
+ transport->ep_disconnect(ep);
+ return 0;
+}
+
+static int
+iscsi_if_transport_ep(struct iscsi_transport *transport,
+ struct iscsi_uevent *ev, int msg_type)
+{
+ struct iscsi_endpoint *ep;
+ int rc = 0;
+
+ switch (msg_type) {
+ case ISCSI_UEVENT_TRANSPORT_EP_CONNECT_THROUGH_HOST:
+ case ISCSI_UEVENT_TRANSPORT_EP_CONNECT:
+ rc = iscsi_if_ep_connect(transport, ev, msg_type);
+ break;
+ case ISCSI_UEVENT_TRANSPORT_EP_POLL:
+ if (!transport->ep_poll)
+ return -EINVAL;
+
+ ep = iscsi_lookup_endpoint(ev->u.ep_poll.ep_handle);
+ if (!ep)
+ return -EINVAL;
+
+ ev->r.retcode = transport->ep_poll(ep,
+ ev->u.ep_poll.timeout_ms);
+ break;
+ case ISCSI_UEVENT_TRANSPORT_EP_DISCONNECT:
+ rc = iscsi_if_ep_disconnect(transport,
+ ev->u.ep_disconnect.ep_handle);
+ break;
+ }
+ return rc;
+}
+
+static int
+iscsi_tgt_dscvr(struct iscsi_transport *transport,
+ struct iscsi_uevent *ev)
+{
+ struct Scsi_Host *shost;
+ struct sockaddr *dst_addr;
+ int err;
+
+ if (!transport->tgt_dscvr)
+ return -EINVAL;
+
+ shost = scsi_host_lookup(ev->u.tgt_dscvr.host_no);
+ if (!shost) {
+ printk(KERN_ERR "target discovery could not find host no %u\n",
+ ev->u.tgt_dscvr.host_no);
+ return -ENODEV;
+ }
+
+
+ dst_addr = (struct sockaddr *)((char*)ev + sizeof(*ev));
+ err = transport->tgt_dscvr(shost, ev->u.tgt_dscvr.type,
+ ev->u.tgt_dscvr.enable, dst_addr);
+ scsi_host_put(shost);
+ return err;
+}
+
+static int
+iscsi_set_host_param(struct iscsi_transport *transport,
+ struct iscsi_uevent *ev)
+{
+ char *data = (char*)ev + sizeof(*ev);
+ struct Scsi_Host *shost;
+ int err;
+
+ if (!transport->set_host_param)
+ return -ENOSYS;
+
+ shost = scsi_host_lookup(ev->u.set_host_param.host_no);
+ if (!shost) {
+ printk(KERN_ERR "set_host_param could not find host no %u\n",
+ ev->u.set_host_param.host_no);
+ return -ENODEV;
+ }
+
+ err = transport->set_host_param(shost, ev->u.set_host_param.param,
+ data, ev->u.set_host_param.len);
+ scsi_host_put(shost);
+ return err;
+}
+
+static int
+iscsi_set_path(struct iscsi_transport *transport, struct iscsi_uevent *ev)
+{
+ struct Scsi_Host *shost;
+ struct iscsi_path *params;
+ int err;
+
+ if (!transport->set_path)
+ return -ENOSYS;
+
+ shost = scsi_host_lookup(ev->u.set_path.host_no);
+ if (!shost) {
+ printk(KERN_ERR "set path could not find host no %u\n",
+ ev->u.set_path.host_no);
+ return -ENODEV;
+ }
+
+ params = (struct iscsi_path *)((char *)ev + sizeof(*ev));
+ err = transport->set_path(shost, params);
+
+ scsi_host_put(shost);
+ return err;
+}
+
+static int
+iscsi_set_iface_params(struct iscsi_transport *transport,
+ struct iscsi_uevent *ev, uint32_t len)
+{
+ char *data = (char *)ev + sizeof(*ev);
+ struct Scsi_Host *shost;
+ int err;
+
+ if (!transport->set_iface_param)
+ return -ENOSYS;
+
+ shost = scsi_host_lookup(ev->u.set_iface_params.host_no);
+ if (!shost) {
+ printk(KERN_ERR "set_iface_params could not find host no %u\n",
+ ev->u.set_iface_params.host_no);
+ return -ENODEV;
+ }
+
+ err = transport->set_iface_param(shost, data, len);
+ scsi_host_put(shost);
+ return err;
+}
+
+static int
+iscsi_send_ping(struct iscsi_transport *transport, struct iscsi_uevent *ev)
+{
+ struct Scsi_Host *shost;
+ struct sockaddr *dst_addr;
+ int err;
+
+ if (!transport->send_ping)
+ return -ENOSYS;
+
+ shost = scsi_host_lookup(ev->u.iscsi_ping.host_no);
+ if (!shost) {
+ printk(KERN_ERR "iscsi_ping could not find host no %u\n",
+ ev->u.iscsi_ping.host_no);
+ return -ENODEV;
+ }
+
+ dst_addr = (struct sockaddr *)((char *)ev + sizeof(*ev));
+ err = transport->send_ping(shost, ev->u.iscsi_ping.iface_num,
+ ev->u.iscsi_ping.iface_type,
+ ev->u.iscsi_ping.payload_size,
+ ev->u.iscsi_ping.pid,
+ dst_addr);
+ scsi_host_put(shost);
+ return err;
+}
+
+static int
+iscsi_get_chap(struct iscsi_transport *transport, struct nlmsghdr *nlh)
+{
+ struct iscsi_uevent *ev = nlmsg_data(nlh);
+ struct Scsi_Host *shost = NULL;
+ struct iscsi_chap_rec *chap_rec;
+ struct iscsi_internal *priv;
+ struct sk_buff *skbchap;
+ struct nlmsghdr *nlhchap;
+ struct iscsi_uevent *evchap;
+ uint32_t chap_buf_size;
+ int len, err = 0;
+ char *buf;
+
+ if (!transport->get_chap)
+ return -EINVAL;
+
+ priv = iscsi_if_transport_lookup(transport);
+ if (!priv)
+ return -EINVAL;
+
+ chap_buf_size = (ev->u.get_chap.num_entries * sizeof(*chap_rec));
+ len = nlmsg_total_size(sizeof(*ev) + chap_buf_size);
+
+ shost = scsi_host_lookup(ev->u.get_chap.host_no);
+ if (!shost) {
+ printk(KERN_ERR "%s: failed. Cound not find host no %u\n",
+ __func__, ev->u.get_chap.host_no);
+ return -ENODEV;
+ }
+
+ do {
+ int actual_size;
+
+ skbchap = alloc_skb(len, GFP_KERNEL);
+ if (!skbchap) {
+ printk(KERN_ERR "can not deliver chap: OOM\n");
+ err = -ENOMEM;
+ goto exit_get_chap;
+ }
+
+ nlhchap = __nlmsg_put(skbchap, 0, 0, 0,
+ (len - sizeof(*nlhchap)), 0);
+ evchap = nlmsg_data(nlhchap);
+ memset(evchap, 0, sizeof(*evchap));
+ evchap->transport_handle = iscsi_handle(transport);
+ evchap->type = nlh->nlmsg_type;
+ evchap->u.get_chap.host_no = ev->u.get_chap.host_no;
+ evchap->u.get_chap.chap_tbl_idx = ev->u.get_chap.chap_tbl_idx;
+ evchap->u.get_chap.num_entries = ev->u.get_chap.num_entries;
+ buf = (char *)evchap + sizeof(*evchap);
+ memset(buf, 0, chap_buf_size);
+
+ err = transport->get_chap(shost, ev->u.get_chap.chap_tbl_idx,
+ &evchap->u.get_chap.num_entries, buf);
+
+ actual_size = nlmsg_total_size(sizeof(*ev) + chap_buf_size);
+ skb_trim(skbchap, NLMSG_ALIGN(actual_size));
+ nlhchap->nlmsg_len = actual_size;
+
+ err = iscsi_multicast_skb(skbchap, ISCSI_NL_GRP_ISCSID,
+ GFP_KERNEL);
+ } while (err < 0 && err != -ECONNREFUSED);
+
+exit_get_chap:
+ scsi_host_put(shost);
+ return err;
+}
+
+static int iscsi_set_chap(struct iscsi_transport *transport,
+ struct iscsi_uevent *ev, uint32_t len)
+{
+ char *data = (char *)ev + sizeof(*ev);
+ struct Scsi_Host *shost;
+ int err = 0;
+
+ if (!transport->set_chap)
+ return -ENOSYS;
+
+ shost = scsi_host_lookup(ev->u.set_path.host_no);
+ if (!shost) {
+ pr_err("%s could not find host no %u\n",
+ __func__, ev->u.set_path.host_no);
+ return -ENODEV;
+ }
+
+ err = transport->set_chap(shost, data, len);
+ scsi_host_put(shost);
+ return err;
+}
+
+static int iscsi_delete_chap(struct iscsi_transport *transport,
+ struct iscsi_uevent *ev)
+{
+ struct Scsi_Host *shost;
+ int err = 0;
+
+ if (!transport->delete_chap)
+ return -ENOSYS;
+
+ shost = scsi_host_lookup(ev->u.delete_chap.host_no);
+ if (!shost) {
+ printk(KERN_ERR "%s could not find host no %u\n",
+ __func__, ev->u.delete_chap.host_no);
+ return -ENODEV;
+ }
+
+ err = transport->delete_chap(shost, ev->u.delete_chap.chap_tbl_idx);
+ scsi_host_put(shost);
+ return err;
+}
+
+static const struct {
+ enum iscsi_discovery_parent_type value;
+ char *name;
+} iscsi_discovery_parent_names[] = {
+ {ISCSI_DISC_PARENT_UNKNOWN, "Unknown" },
+ {ISCSI_DISC_PARENT_SENDTGT, "Sendtarget" },
+ {ISCSI_DISC_PARENT_ISNS, "isns" },
+};
+
+char *iscsi_get_discovery_parent_name(int parent_type)
+{
+ int i;
+ char *state = "Unknown!";
+
+ for (i = 0; i < ARRAY_SIZE(iscsi_discovery_parent_names); i++) {
+ if (iscsi_discovery_parent_names[i].value & parent_type) {
+ state = iscsi_discovery_parent_names[i].name;
+ break;
+ }
+ }
+ return state;
+}
+EXPORT_SYMBOL_GPL(iscsi_get_discovery_parent_name);
+
+static int iscsi_set_flashnode_param(struct iscsi_transport *transport,
+ struct iscsi_uevent *ev, uint32_t len)
+{
+ char *data = (char *)ev + sizeof(*ev);
+ struct Scsi_Host *shost;
+ struct iscsi_bus_flash_session *fnode_sess;
+ struct iscsi_bus_flash_conn *fnode_conn;
+ struct device *dev;
+ uint32_t idx;
+ int err = 0;
+
+ if (!transport->set_flashnode_param) {
+ err = -ENOSYS;
+ goto exit_set_fnode;
+ }
+
+ shost = scsi_host_lookup(ev->u.set_flashnode.host_no);
+ if (!shost) {
+ pr_err("%s could not find host no %u\n",
+ __func__, ev->u.set_flashnode.host_no);
+ err = -ENODEV;
+ goto put_host;
+ }
+
+ idx = ev->u.set_flashnode.flashnode_idx;
+ fnode_sess = iscsi_get_flashnode_by_index(shost, idx);
+ if (!fnode_sess) {
+ pr_err("%s could not find flashnode %u for host no %u\n",
+ __func__, idx, ev->u.set_flashnode.host_no);
+ err = -ENODEV;
+ goto put_host;
+ }
+
+ dev = iscsi_find_flashnode_conn(fnode_sess);
+ if (!dev) {
+ err = -ENODEV;
+ goto put_sess;
+ }
+
+ fnode_conn = iscsi_dev_to_flash_conn(dev);
+ err = transport->set_flashnode_param(fnode_sess, fnode_conn, data, len);
+ put_device(dev);
+
+put_sess:
+ put_device(&fnode_sess->dev);
+
+put_host:
+ scsi_host_put(shost);
+
+exit_set_fnode:
+ return err;
+}
+
+static int iscsi_new_flashnode(struct iscsi_transport *transport,
+ struct iscsi_uevent *ev, uint32_t len)
+{
+ char *data = (char *)ev + sizeof(*ev);
+ struct Scsi_Host *shost;
+ int index;
+ int err = 0;
+
+ if (!transport->new_flashnode) {
+ err = -ENOSYS;
+ goto exit_new_fnode;
+ }
+
+ shost = scsi_host_lookup(ev->u.new_flashnode.host_no);
+ if (!shost) {
+ pr_err("%s could not find host no %u\n",
+ __func__, ev->u.new_flashnode.host_no);
+ err = -ENODEV;
+ goto put_host;
+ }
+
+ index = transport->new_flashnode(shost, data, len);
+
+ if (index >= 0)
+ ev->r.new_flashnode_ret.flashnode_idx = index;
+ else
+ err = -EIO;
+
+put_host:
+ scsi_host_put(shost);
+
+exit_new_fnode:
+ return err;
+}
+
+static int iscsi_del_flashnode(struct iscsi_transport *transport,
+ struct iscsi_uevent *ev)
+{
+ struct Scsi_Host *shost;
+ struct iscsi_bus_flash_session *fnode_sess;
+ uint32_t idx;
+ int err = 0;
+
+ if (!transport->del_flashnode) {
+ err = -ENOSYS;
+ goto exit_del_fnode;
+ }
+
+ shost = scsi_host_lookup(ev->u.del_flashnode.host_no);
+ if (!shost) {
+ pr_err("%s could not find host no %u\n",
+ __func__, ev->u.del_flashnode.host_no);
+ err = -ENODEV;
+ goto put_host;
+ }
+
+ idx = ev->u.del_flashnode.flashnode_idx;
+ fnode_sess = iscsi_get_flashnode_by_index(shost, idx);
+ if (!fnode_sess) {
+ pr_err("%s could not find flashnode %u for host no %u\n",
+ __func__, idx, ev->u.del_flashnode.host_no);
+ err = -ENODEV;
+ goto put_host;
+ }
+
+ err = transport->del_flashnode(fnode_sess);
+ put_device(&fnode_sess->dev);
+
+put_host:
+ scsi_host_put(shost);
+
+exit_del_fnode:
+ return err;
+}
+
+static int iscsi_login_flashnode(struct iscsi_transport *transport,
+ struct iscsi_uevent *ev)
+{
+ struct Scsi_Host *shost;
+ struct iscsi_bus_flash_session *fnode_sess;
+ struct iscsi_bus_flash_conn *fnode_conn;
+ struct device *dev;
+ uint32_t idx;
+ int err = 0;
+
+ if (!transport->login_flashnode) {
+ err = -ENOSYS;
+ goto exit_login_fnode;
+ }
+
+ shost = scsi_host_lookup(ev->u.login_flashnode.host_no);
+ if (!shost) {
+ pr_err("%s could not find host no %u\n",
+ __func__, ev->u.login_flashnode.host_no);
+ err = -ENODEV;
+ goto put_host;
+ }
+
+ idx = ev->u.login_flashnode.flashnode_idx;
+ fnode_sess = iscsi_get_flashnode_by_index(shost, idx);
+ if (!fnode_sess) {
+ pr_err("%s could not find flashnode %u for host no %u\n",
+ __func__, idx, ev->u.login_flashnode.host_no);
+ err = -ENODEV;
+ goto put_host;
+ }
+
+ dev = iscsi_find_flashnode_conn(fnode_sess);
+ if (!dev) {
+ err = -ENODEV;
+ goto put_sess;
+ }
+
+ fnode_conn = iscsi_dev_to_flash_conn(dev);
+ err = transport->login_flashnode(fnode_sess, fnode_conn);
+ put_device(dev);
+
+put_sess:
+ put_device(&fnode_sess->dev);
+
+put_host:
+ scsi_host_put(shost);
+
+exit_login_fnode:
+ return err;
+}
+
+static int iscsi_logout_flashnode(struct iscsi_transport *transport,
+ struct iscsi_uevent *ev)
+{
+ struct Scsi_Host *shost;
+ struct iscsi_bus_flash_session *fnode_sess;
+ struct iscsi_bus_flash_conn *fnode_conn;
+ struct device *dev;
+ uint32_t idx;
+ int err = 0;
+
+ if (!transport->logout_flashnode) {
+ err = -ENOSYS;
+ goto exit_logout_fnode;
+ }
+
+ shost = scsi_host_lookup(ev->u.logout_flashnode.host_no);
+ if (!shost) {
+ pr_err("%s could not find host no %u\n",
+ __func__, ev->u.logout_flashnode.host_no);
+ err = -ENODEV;
+ goto put_host;
+ }
+
+ idx = ev->u.logout_flashnode.flashnode_idx;
+ fnode_sess = iscsi_get_flashnode_by_index(shost, idx);
+ if (!fnode_sess) {
+ pr_err("%s could not find flashnode %u for host no %u\n",
+ __func__, idx, ev->u.logout_flashnode.host_no);
+ err = -ENODEV;
+ goto put_host;
+ }
+
+ dev = iscsi_find_flashnode_conn(fnode_sess);
+ if (!dev) {
+ err = -ENODEV;
+ goto put_sess;
+ }
+
+ fnode_conn = iscsi_dev_to_flash_conn(dev);
+
+ err = transport->logout_flashnode(fnode_sess, fnode_conn);
+ put_device(dev);
+
+put_sess:
+ put_device(&fnode_sess->dev);
+
+put_host:
+ scsi_host_put(shost);
+
+exit_logout_fnode:
+ return err;
+}
+
+static int iscsi_logout_flashnode_sid(struct iscsi_transport *transport,
+ struct iscsi_uevent *ev)
+{
+ struct Scsi_Host *shost;
+ struct iscsi_cls_session *session;
+ int err = 0;
+
+ if (!transport->logout_flashnode_sid) {
+ err = -ENOSYS;
+ goto exit_logout_sid;
+ }
+
+ shost = scsi_host_lookup(ev->u.logout_flashnode_sid.host_no);
+ if (!shost) {
+ pr_err("%s could not find host no %u\n",
+ __func__, ev->u.logout_flashnode.host_no);
+ err = -ENODEV;
+ goto put_host;
+ }
+
+ session = iscsi_session_lookup(ev->u.logout_flashnode_sid.sid);
+ if (!session) {
+ pr_err("%s could not find session id %u\n",
+ __func__, ev->u.logout_flashnode_sid.sid);
+ err = -EINVAL;
+ goto put_host;
+ }
+
+ err = transport->logout_flashnode_sid(session);
+
+put_host:
+ scsi_host_put(shost);
+
+exit_logout_sid:
+ return err;
+}
+
+static int
+iscsi_get_host_stats(struct iscsi_transport *transport, struct nlmsghdr *nlh)
+{
+ struct iscsi_uevent *ev = nlmsg_data(nlh);
+ struct Scsi_Host *shost = NULL;
+ struct iscsi_internal *priv;
+ struct sk_buff *skbhost_stats;
+ struct nlmsghdr *nlhhost_stats;
+ struct iscsi_uevent *evhost_stats;
+ int host_stats_size = 0;
+ int len, err = 0;
+ char *buf;
+
+ if (!transport->get_host_stats)
+ return -ENOSYS;
+
+ priv = iscsi_if_transport_lookup(transport);
+ if (!priv)
+ return -EINVAL;
+
+ host_stats_size = sizeof(struct iscsi_offload_host_stats);
+ len = nlmsg_total_size(sizeof(*ev) + host_stats_size);
+
+ shost = scsi_host_lookup(ev->u.get_host_stats.host_no);
+ if (!shost) {
+ pr_err("%s: failed. Cound not find host no %u\n",
+ __func__, ev->u.get_host_stats.host_no);
+ return -ENODEV;
+ }
+
+ do {
+ int actual_size;
+
+ skbhost_stats = alloc_skb(len, GFP_KERNEL);
+ if (!skbhost_stats) {
+ pr_err("cannot deliver host stats: OOM\n");
+ err = -ENOMEM;
+ goto exit_host_stats;
+ }
+
+ nlhhost_stats = __nlmsg_put(skbhost_stats, 0, 0, 0,
+ (len - sizeof(*nlhhost_stats)), 0);
+ evhost_stats = nlmsg_data(nlhhost_stats);
+ memset(evhost_stats, 0, sizeof(*evhost_stats));
+ evhost_stats->transport_handle = iscsi_handle(transport);
+ evhost_stats->type = nlh->nlmsg_type;
+ evhost_stats->u.get_host_stats.host_no =
+ ev->u.get_host_stats.host_no;
+ buf = (char *)evhost_stats + sizeof(*evhost_stats);
+ memset(buf, 0, host_stats_size);
+
+ err = transport->get_host_stats(shost, buf, host_stats_size);
+ if (err) {
+ kfree_skb(skbhost_stats);
+ goto exit_host_stats;
+ }
+
+ actual_size = nlmsg_total_size(sizeof(*ev) + host_stats_size);
+ skb_trim(skbhost_stats, NLMSG_ALIGN(actual_size));
+ nlhhost_stats->nlmsg_len = actual_size;
+
+ err = iscsi_multicast_skb(skbhost_stats, ISCSI_NL_GRP_ISCSID,
+ GFP_KERNEL);
+ } while (err < 0 && err != -ECONNREFUSED);
+
+exit_host_stats:
+ scsi_host_put(shost);
+ return err;
+}
+
+
+static int
+iscsi_if_recv_msg(struct sk_buff *skb, struct nlmsghdr *nlh, uint32_t *group)
+{
+ int err = 0;
+ struct iscsi_uevent *ev = nlmsg_data(nlh);
+ struct iscsi_transport *transport = NULL;
+ struct iscsi_internal *priv;
+ struct iscsi_cls_session *session;
+ struct iscsi_cls_conn *conn;
+ struct iscsi_endpoint *ep = NULL;
+
+ if (nlh->nlmsg_type == ISCSI_UEVENT_PATH_UPDATE)
+ *group = ISCSI_NL_GRP_UIP;
+ else
+ *group = ISCSI_NL_GRP_ISCSID;
+
+ priv = iscsi_if_transport_lookup(iscsi_ptr(ev->transport_handle));
+ if (!priv)
+ return -EINVAL;
+ transport = priv->iscsi_transport;
+
+ if (!try_module_get(transport->owner))
+ return -EINVAL;
+
+ switch (nlh->nlmsg_type) {
+ case ISCSI_UEVENT_CREATE_SESSION:
+ err = iscsi_if_create_session(priv, ep, ev,
+ NETLINK_CB(skb).portid,
+ ev->u.c_session.initial_cmdsn,
+ ev->u.c_session.cmds_max,
+ ev->u.c_session.queue_depth);
+ break;
+ case ISCSI_UEVENT_CREATE_BOUND_SESSION:
+ ep = iscsi_lookup_endpoint(ev->u.c_bound_session.ep_handle);
+ if (!ep) {
+ err = -EINVAL;
+ break;
+ }
+
+ err = iscsi_if_create_session(priv, ep, ev,
+ NETLINK_CB(skb).portid,
+ ev->u.c_bound_session.initial_cmdsn,
+ ev->u.c_bound_session.cmds_max,
+ ev->u.c_bound_session.queue_depth);
+ break;
+ case ISCSI_UEVENT_DESTROY_SESSION:
+ session = iscsi_session_lookup(ev->u.d_session.sid);
+ if (session)
+ transport->destroy_session(session);
+ else
+ err = -EINVAL;
+ break;
+ case ISCSI_UEVENT_UNBIND_SESSION:
+ session = iscsi_session_lookup(ev->u.d_session.sid);
+ if (session)
+ scsi_queue_work(iscsi_session_to_shost(session),
+ &session->unbind_work);
+ else
+ err = -EINVAL;
+ break;
+ case ISCSI_UEVENT_CREATE_CONN:
+ err = iscsi_if_create_conn(transport, ev);
+ break;
+ case ISCSI_UEVENT_DESTROY_CONN:
+ err = iscsi_if_destroy_conn(transport, ev);
+ break;
+ case ISCSI_UEVENT_BIND_CONN:
+ session = iscsi_session_lookup(ev->u.b_conn.sid);
+ conn = iscsi_conn_lookup(ev->u.b_conn.sid, ev->u.b_conn.cid);
+
+ if (conn && conn->ep)
+ iscsi_if_ep_disconnect(transport, conn->ep->id);
+
+ if (!session || !conn) {
+ err = -EINVAL;
+ break;
+ }
+
+ ev->r.retcode = transport->bind_conn(session, conn,
+ ev->u.b_conn.transport_eph,
+ ev->u.b_conn.is_leading);
+ if (ev->r.retcode || !transport->ep_connect)
+ break;
+
+ ep = iscsi_lookup_endpoint(ev->u.b_conn.transport_eph);
+ if (ep) {
+ ep->conn = conn;
+
+ mutex_lock(&conn->ep_mutex);
+ conn->ep = ep;
+ mutex_unlock(&conn->ep_mutex);
+ } else
+ iscsi_cls_conn_printk(KERN_ERR, conn,
+ "Could not set ep conn "
+ "binding\n");
+ break;
+ case ISCSI_UEVENT_SET_PARAM:
+ err = iscsi_set_param(transport, ev);
+ break;
+ case ISCSI_UEVENT_START_CONN:
+ conn = iscsi_conn_lookup(ev->u.start_conn.sid, ev->u.start_conn.cid);
+ if (conn)
+ ev->r.retcode = transport->start_conn(conn);
+ else
+ err = -EINVAL;
+ break;
+ case ISCSI_UEVENT_STOP_CONN:
+ conn = iscsi_conn_lookup(ev->u.stop_conn.sid, ev->u.stop_conn.cid);
+ if (conn)
+ transport->stop_conn(conn, ev->u.stop_conn.flag);
+ else
+ err = -EINVAL;
+ break;
+ case ISCSI_UEVENT_SEND_PDU:
+ conn = iscsi_conn_lookup(ev->u.send_pdu.sid, ev->u.send_pdu.cid);
+ if (conn)
+ ev->r.retcode = transport->send_pdu(conn,
+ (struct iscsi_hdr*)((char*)ev + sizeof(*ev)),
+ (char*)ev + sizeof(*ev) + ev->u.send_pdu.hdr_size,
+ ev->u.send_pdu.data_size);
+ else
+ err = -EINVAL;
+ break;
+ case ISCSI_UEVENT_GET_STATS:
+ err = iscsi_if_get_stats(transport, nlh);
+ break;
+ case ISCSI_UEVENT_TRANSPORT_EP_CONNECT:
+ case ISCSI_UEVENT_TRANSPORT_EP_POLL:
+ case ISCSI_UEVENT_TRANSPORT_EP_DISCONNECT:
+ case ISCSI_UEVENT_TRANSPORT_EP_CONNECT_THROUGH_HOST:
+ err = iscsi_if_transport_ep(transport, ev, nlh->nlmsg_type);
+ break;
+ case ISCSI_UEVENT_TGT_DSCVR:
+ err = iscsi_tgt_dscvr(transport, ev);
+ break;
+ case ISCSI_UEVENT_SET_HOST_PARAM:
+ err = iscsi_set_host_param(transport, ev);
+ break;
+ case ISCSI_UEVENT_PATH_UPDATE:
+ err = iscsi_set_path(transport, ev);
+ break;
+ case ISCSI_UEVENT_SET_IFACE_PARAMS:
+ err = iscsi_set_iface_params(transport, ev,
+ nlmsg_attrlen(nlh, sizeof(*ev)));
+ break;
+ case ISCSI_UEVENT_PING:
+ err = iscsi_send_ping(transport, ev);
+ break;
+ case ISCSI_UEVENT_GET_CHAP:
+ err = iscsi_get_chap(transport, nlh);
+ break;
+ case ISCSI_UEVENT_DELETE_CHAP:
+ err = iscsi_delete_chap(transport, ev);
+ break;
+ case ISCSI_UEVENT_SET_FLASHNODE_PARAMS:
+ err = iscsi_set_flashnode_param(transport, ev,
+ nlmsg_attrlen(nlh,
+ sizeof(*ev)));
+ break;
+ case ISCSI_UEVENT_NEW_FLASHNODE:
+ err = iscsi_new_flashnode(transport, ev,
+ nlmsg_attrlen(nlh, sizeof(*ev)));
+ break;
+ case ISCSI_UEVENT_DEL_FLASHNODE:
+ err = iscsi_del_flashnode(transport, ev);
+ break;
+ case ISCSI_UEVENT_LOGIN_FLASHNODE:
+ err = iscsi_login_flashnode(transport, ev);
+ break;
+ case ISCSI_UEVENT_LOGOUT_FLASHNODE:
+ err = iscsi_logout_flashnode(transport, ev);
+ break;
+ case ISCSI_UEVENT_LOGOUT_FLASHNODE_SID:
+ err = iscsi_logout_flashnode_sid(transport, ev);
+ break;
+ case ISCSI_UEVENT_SET_CHAP:
+ err = iscsi_set_chap(transport, ev,
+ nlmsg_attrlen(nlh, sizeof(*ev)));
+ break;
+ case ISCSI_UEVENT_GET_HOST_STATS:
+ err = iscsi_get_host_stats(transport, nlh);
+ break;
+ default:
+ err = -ENOSYS;
+ break;
+ }
+
+ module_put(transport->owner);
+ return err;
+}
+
+/*
+ * Get message from skb. Each message is processed by iscsi_if_recv_msg.
+ * Malformed skbs with wrong lengths or invalid creds are not processed.
+ */
+static void
+iscsi_if_rx(struct sk_buff *skb)
+{
+ mutex_lock(&rx_queue_mutex);
+ while (skb->len >= NLMSG_HDRLEN) {
+ int err;
+ uint32_t rlen;
+ struct nlmsghdr *nlh;
+ struct iscsi_uevent *ev;
+ uint32_t group;
+
+ nlh = nlmsg_hdr(skb);
+ if (nlh->nlmsg_len < sizeof(*nlh) ||
+ skb->len < nlh->nlmsg_len) {
+ break;
+ }
+
+ ev = nlmsg_data(nlh);
+ rlen = NLMSG_ALIGN(nlh->nlmsg_len);
+ if (rlen > skb->len)
+ rlen = skb->len;
+
+ err = iscsi_if_recv_msg(skb, nlh, &group);
+ if (err) {
+ ev->type = ISCSI_KEVENT_IF_ERROR;
+ ev->iferror = err;
+ }
+ do {
+ /*
+ * special case for GET_STATS:
+ * on success - sending reply and stats from
+ * inside of if_recv_msg(),
+ * on error - fall through.
+ */
+ if (ev->type == ISCSI_UEVENT_GET_STATS && !err)
+ break;
+ if (ev->type == ISCSI_UEVENT_GET_CHAP && !err)
+ break;
+ err = iscsi_if_send_reply(group, nlh->nlmsg_seq,
+ nlh->nlmsg_type, 0, 0, ev, sizeof(*ev));
+ } while (err < 0 && err != -ECONNREFUSED && err != -ESRCH);
+ skb_pull(skb, rlen);
+ }
+ mutex_unlock(&rx_queue_mutex);
+}
+
+#define ISCSI_CLASS_ATTR(_prefix,_name,_mode,_show,_store) \
+struct device_attribute dev_attr_##_prefix##_##_name = \
+ __ATTR(_name,_mode,_show,_store)
+
+/*
+ * iSCSI connection attrs
+ */
+#define iscsi_conn_attr_show(param) \
+static ssize_t \
+show_conn_param_##param(struct device *dev, \
+ struct device_attribute *attr, char *buf) \
+{ \
+ struct iscsi_cls_conn *conn = iscsi_dev_to_conn(dev->parent); \
+ struct iscsi_transport *t = conn->transport; \
+ return t->get_conn_param(conn, param, buf); \
+}
+
+#define iscsi_conn_attr(field, param) \
+ iscsi_conn_attr_show(param) \
+static ISCSI_CLASS_ATTR(conn, field, S_IRUGO, show_conn_param_##param, \
+ NULL);
+
+iscsi_conn_attr(max_recv_dlength, ISCSI_PARAM_MAX_RECV_DLENGTH);
+iscsi_conn_attr(max_xmit_dlength, ISCSI_PARAM_MAX_XMIT_DLENGTH);
+iscsi_conn_attr(header_digest, ISCSI_PARAM_HDRDGST_EN);
+iscsi_conn_attr(data_digest, ISCSI_PARAM_DATADGST_EN);
+iscsi_conn_attr(ifmarker, ISCSI_PARAM_IFMARKER_EN);
+iscsi_conn_attr(ofmarker, ISCSI_PARAM_OFMARKER_EN);
+iscsi_conn_attr(persistent_port, ISCSI_PARAM_PERSISTENT_PORT);
+iscsi_conn_attr(exp_statsn, ISCSI_PARAM_EXP_STATSN);
+iscsi_conn_attr(persistent_address, ISCSI_PARAM_PERSISTENT_ADDRESS);
+iscsi_conn_attr(ping_tmo, ISCSI_PARAM_PING_TMO);
+iscsi_conn_attr(recv_tmo, ISCSI_PARAM_RECV_TMO);
+iscsi_conn_attr(local_port, ISCSI_PARAM_LOCAL_PORT);
+iscsi_conn_attr(statsn, ISCSI_PARAM_STATSN);
+iscsi_conn_attr(keepalive_tmo, ISCSI_PARAM_KEEPALIVE_TMO);
+iscsi_conn_attr(max_segment_size, ISCSI_PARAM_MAX_SEGMENT_SIZE);
+iscsi_conn_attr(tcp_timestamp_stat, ISCSI_PARAM_TCP_TIMESTAMP_STAT);
+iscsi_conn_attr(tcp_wsf_disable, ISCSI_PARAM_TCP_WSF_DISABLE);
+iscsi_conn_attr(tcp_nagle_disable, ISCSI_PARAM_TCP_NAGLE_DISABLE);
+iscsi_conn_attr(tcp_timer_scale, ISCSI_PARAM_TCP_TIMER_SCALE);
+iscsi_conn_attr(tcp_timestamp_enable, ISCSI_PARAM_TCP_TIMESTAMP_EN);
+iscsi_conn_attr(fragment_disable, ISCSI_PARAM_IP_FRAGMENT_DISABLE);
+iscsi_conn_attr(ipv4_tos, ISCSI_PARAM_IPV4_TOS);
+iscsi_conn_attr(ipv6_traffic_class, ISCSI_PARAM_IPV6_TC);
+iscsi_conn_attr(ipv6_flow_label, ISCSI_PARAM_IPV6_FLOW_LABEL);
+iscsi_conn_attr(is_fw_assigned_ipv6, ISCSI_PARAM_IS_FW_ASSIGNED_IPV6);
+iscsi_conn_attr(tcp_xmit_wsf, ISCSI_PARAM_TCP_XMIT_WSF);
+iscsi_conn_attr(tcp_recv_wsf, ISCSI_PARAM_TCP_RECV_WSF);
+iscsi_conn_attr(local_ipaddr, ISCSI_PARAM_LOCAL_IPADDR);
+
+
+#define iscsi_conn_ep_attr_show(param) \
+static ssize_t show_conn_ep_param_##param(struct device *dev, \
+ struct device_attribute *attr,\
+ char *buf) \
+{ \
+ struct iscsi_cls_conn *conn = iscsi_dev_to_conn(dev->parent); \
+ struct iscsi_transport *t = conn->transport; \
+ struct iscsi_endpoint *ep; \
+ ssize_t rc; \
+ \
+ /* \
+ * Need to make sure ep_disconnect does not free the LLD's \
+ * interconnect resources while we are trying to read them. \
+ */ \
+ mutex_lock(&conn->ep_mutex); \
+ ep = conn->ep; \
+ if (!ep && t->ep_connect) { \
+ mutex_unlock(&conn->ep_mutex); \
+ return -ENOTCONN; \
+ } \
+ \
+ if (ep) \
+ rc = t->get_ep_param(ep, param, buf); \
+ else \
+ rc = t->get_conn_param(conn, param, buf); \
+ mutex_unlock(&conn->ep_mutex); \
+ return rc; \
+}
+
+#define iscsi_conn_ep_attr(field, param) \
+ iscsi_conn_ep_attr_show(param) \
+static ISCSI_CLASS_ATTR(conn, field, S_IRUGO, \
+ show_conn_ep_param_##param, NULL);
+
+iscsi_conn_ep_attr(address, ISCSI_PARAM_CONN_ADDRESS);
+iscsi_conn_ep_attr(port, ISCSI_PARAM_CONN_PORT);
+
+static struct attribute *iscsi_conn_attrs[] = {
+ &dev_attr_conn_max_recv_dlength.attr,
+ &dev_attr_conn_max_xmit_dlength.attr,
+ &dev_attr_conn_header_digest.attr,
+ &dev_attr_conn_data_digest.attr,
+ &dev_attr_conn_ifmarker.attr,
+ &dev_attr_conn_ofmarker.attr,
+ &dev_attr_conn_address.attr,
+ &dev_attr_conn_port.attr,
+ &dev_attr_conn_exp_statsn.attr,
+ &dev_attr_conn_persistent_address.attr,
+ &dev_attr_conn_persistent_port.attr,
+ &dev_attr_conn_ping_tmo.attr,
+ &dev_attr_conn_recv_tmo.attr,
+ &dev_attr_conn_local_port.attr,
+ &dev_attr_conn_statsn.attr,
+ &dev_attr_conn_keepalive_tmo.attr,
+ &dev_attr_conn_max_segment_size.attr,
+ &dev_attr_conn_tcp_timestamp_stat.attr,
+ &dev_attr_conn_tcp_wsf_disable.attr,
+ &dev_attr_conn_tcp_nagle_disable.attr,
+ &dev_attr_conn_tcp_timer_scale.attr,
+ &dev_attr_conn_tcp_timestamp_enable.attr,
+ &dev_attr_conn_fragment_disable.attr,
+ &dev_attr_conn_ipv4_tos.attr,
+ &dev_attr_conn_ipv6_traffic_class.attr,
+ &dev_attr_conn_ipv6_flow_label.attr,
+ &dev_attr_conn_is_fw_assigned_ipv6.attr,
+ &dev_attr_conn_tcp_xmit_wsf.attr,
+ &dev_attr_conn_tcp_recv_wsf.attr,
+ &dev_attr_conn_local_ipaddr.attr,
+ NULL,
+};
+
+static umode_t iscsi_conn_attr_is_visible(struct kobject *kobj,
+ struct attribute *attr, int i)
+{
+ struct device *cdev = container_of(kobj, struct device, kobj);
+ struct iscsi_cls_conn *conn = transport_class_to_conn(cdev);
+ struct iscsi_transport *t = conn->transport;
+ int param;
+
+ if (attr == &dev_attr_conn_max_recv_dlength.attr)
+ param = ISCSI_PARAM_MAX_RECV_DLENGTH;
+ else if (attr == &dev_attr_conn_max_xmit_dlength.attr)
+ param = ISCSI_PARAM_MAX_XMIT_DLENGTH;
+ else if (attr == &dev_attr_conn_header_digest.attr)
+ param = ISCSI_PARAM_HDRDGST_EN;
+ else if (attr == &dev_attr_conn_data_digest.attr)
+ param = ISCSI_PARAM_DATADGST_EN;
+ else if (attr == &dev_attr_conn_ifmarker.attr)
+ param = ISCSI_PARAM_IFMARKER_EN;
+ else if (attr == &dev_attr_conn_ofmarker.attr)
+ param = ISCSI_PARAM_OFMARKER_EN;
+ else if (attr == &dev_attr_conn_address.attr)
+ param = ISCSI_PARAM_CONN_ADDRESS;
+ else if (attr == &dev_attr_conn_port.attr)
+ param = ISCSI_PARAM_CONN_PORT;
+ else if (attr == &dev_attr_conn_exp_statsn.attr)
+ param = ISCSI_PARAM_EXP_STATSN;
+ else if (attr == &dev_attr_conn_persistent_address.attr)
+ param = ISCSI_PARAM_PERSISTENT_ADDRESS;
+ else if (attr == &dev_attr_conn_persistent_port.attr)
+ param = ISCSI_PARAM_PERSISTENT_PORT;
+ else if (attr == &dev_attr_conn_ping_tmo.attr)
+ param = ISCSI_PARAM_PING_TMO;
+ else if (attr == &dev_attr_conn_recv_tmo.attr)
+ param = ISCSI_PARAM_RECV_TMO;
+ else if (attr == &dev_attr_conn_local_port.attr)
+ param = ISCSI_PARAM_LOCAL_PORT;
+ else if (attr == &dev_attr_conn_statsn.attr)
+ param = ISCSI_PARAM_STATSN;
+ else if (attr == &dev_attr_conn_keepalive_tmo.attr)
+ param = ISCSI_PARAM_KEEPALIVE_TMO;
+ else if (attr == &dev_attr_conn_max_segment_size.attr)
+ param = ISCSI_PARAM_MAX_SEGMENT_SIZE;
+ else if (attr == &dev_attr_conn_tcp_timestamp_stat.attr)
+ param = ISCSI_PARAM_TCP_TIMESTAMP_STAT;
+ else if (attr == &dev_attr_conn_tcp_wsf_disable.attr)
+ param = ISCSI_PARAM_TCP_WSF_DISABLE;
+ else if (attr == &dev_attr_conn_tcp_nagle_disable.attr)
+ param = ISCSI_PARAM_TCP_NAGLE_DISABLE;
+ else if (attr == &dev_attr_conn_tcp_timer_scale.attr)
+ param = ISCSI_PARAM_TCP_TIMER_SCALE;
+ else if (attr == &dev_attr_conn_tcp_timestamp_enable.attr)
+ param = ISCSI_PARAM_TCP_TIMESTAMP_EN;
+ else if (attr == &dev_attr_conn_fragment_disable.attr)
+ param = ISCSI_PARAM_IP_FRAGMENT_DISABLE;
+ else if (attr == &dev_attr_conn_ipv4_tos.attr)
+ param = ISCSI_PARAM_IPV4_TOS;
+ else if (attr == &dev_attr_conn_ipv6_traffic_class.attr)
+ param = ISCSI_PARAM_IPV6_TC;
+ else if (attr == &dev_attr_conn_ipv6_flow_label.attr)
+ param = ISCSI_PARAM_IPV6_FLOW_LABEL;
+ else if (attr == &dev_attr_conn_is_fw_assigned_ipv6.attr)
+ param = ISCSI_PARAM_IS_FW_ASSIGNED_IPV6;
+ else if (attr == &dev_attr_conn_tcp_xmit_wsf.attr)
+ param = ISCSI_PARAM_TCP_XMIT_WSF;
+ else if (attr == &dev_attr_conn_tcp_recv_wsf.attr)
+ param = ISCSI_PARAM_TCP_RECV_WSF;
+ else if (attr == &dev_attr_conn_local_ipaddr.attr)
+ param = ISCSI_PARAM_LOCAL_IPADDR;
+ else {
+ WARN_ONCE(1, "Invalid conn attr");
+ return 0;
+ }
+
+ return t->attr_is_visible(ISCSI_PARAM, param);
+}
+
+static struct attribute_group iscsi_conn_group = {
+ .attrs = iscsi_conn_attrs,
+ .is_visible = iscsi_conn_attr_is_visible,
+};
+
+/*
+ * iSCSI session attrs
+ */
+#define iscsi_session_attr_show(param, perm) \
+static ssize_t \
+show_session_param_##param(struct device *dev, \
+ struct device_attribute *attr, char *buf) \
+{ \
+ struct iscsi_cls_session *session = \
+ iscsi_dev_to_session(dev->parent); \
+ struct iscsi_transport *t = session->transport; \
+ \
+ if (perm && !capable(CAP_SYS_ADMIN)) \
+ return -EACCES; \
+ return t->get_session_param(session, param, buf); \
+}
+
+#define iscsi_session_attr(field, param, perm) \
+ iscsi_session_attr_show(param, perm) \
+static ISCSI_CLASS_ATTR(sess, field, S_IRUGO, show_session_param_##param, \
+ NULL);
+iscsi_session_attr(targetname, ISCSI_PARAM_TARGET_NAME, 0);
+iscsi_session_attr(initial_r2t, ISCSI_PARAM_INITIAL_R2T_EN, 0);
+iscsi_session_attr(max_outstanding_r2t, ISCSI_PARAM_MAX_R2T, 0);
+iscsi_session_attr(immediate_data, ISCSI_PARAM_IMM_DATA_EN, 0);
+iscsi_session_attr(first_burst_len, ISCSI_PARAM_FIRST_BURST, 0);
+iscsi_session_attr(max_burst_len, ISCSI_PARAM_MAX_BURST, 0);
+iscsi_session_attr(data_pdu_in_order, ISCSI_PARAM_PDU_INORDER_EN, 0);
+iscsi_session_attr(data_seq_in_order, ISCSI_PARAM_DATASEQ_INORDER_EN, 0);
+iscsi_session_attr(erl, ISCSI_PARAM_ERL, 0);
+iscsi_session_attr(tpgt, ISCSI_PARAM_TPGT, 0);
+iscsi_session_attr(username, ISCSI_PARAM_USERNAME, 1);
+iscsi_session_attr(username_in, ISCSI_PARAM_USERNAME_IN, 1);
+iscsi_session_attr(password, ISCSI_PARAM_PASSWORD, 1);
+iscsi_session_attr(password_in, ISCSI_PARAM_PASSWORD_IN, 1);
+iscsi_session_attr(chap_out_idx, ISCSI_PARAM_CHAP_OUT_IDX, 1);
+iscsi_session_attr(chap_in_idx, ISCSI_PARAM_CHAP_IN_IDX, 1);
+iscsi_session_attr(fast_abort, ISCSI_PARAM_FAST_ABORT, 0);
+iscsi_session_attr(abort_tmo, ISCSI_PARAM_ABORT_TMO, 0);
+iscsi_session_attr(lu_reset_tmo, ISCSI_PARAM_LU_RESET_TMO, 0);
+iscsi_session_attr(tgt_reset_tmo, ISCSI_PARAM_TGT_RESET_TMO, 0);
+iscsi_session_attr(ifacename, ISCSI_PARAM_IFACE_NAME, 0);
+iscsi_session_attr(initiatorname, ISCSI_PARAM_INITIATOR_NAME, 0);
+iscsi_session_attr(targetalias, ISCSI_PARAM_TARGET_ALIAS, 0);
+iscsi_session_attr(boot_root, ISCSI_PARAM_BOOT_ROOT, 0);
+iscsi_session_attr(boot_nic, ISCSI_PARAM_BOOT_NIC, 0);
+iscsi_session_attr(boot_target, ISCSI_PARAM_BOOT_TARGET, 0);
+iscsi_session_attr(auto_snd_tgt_disable, ISCSI_PARAM_AUTO_SND_TGT_DISABLE, 0);
+iscsi_session_attr(discovery_session, ISCSI_PARAM_DISCOVERY_SESS, 0);
+iscsi_session_attr(portal_type, ISCSI_PARAM_PORTAL_TYPE, 0);
+iscsi_session_attr(chap_auth, ISCSI_PARAM_CHAP_AUTH_EN, 0);
+iscsi_session_attr(discovery_logout, ISCSI_PARAM_DISCOVERY_LOGOUT_EN, 0);
+iscsi_session_attr(bidi_chap, ISCSI_PARAM_BIDI_CHAP_EN, 0);
+iscsi_session_attr(discovery_auth_optional,
+ ISCSI_PARAM_DISCOVERY_AUTH_OPTIONAL, 0);
+iscsi_session_attr(def_time2wait, ISCSI_PARAM_DEF_TIME2WAIT, 0);
+iscsi_session_attr(def_time2retain, ISCSI_PARAM_DEF_TIME2RETAIN, 0);
+iscsi_session_attr(isid, ISCSI_PARAM_ISID, 0);
+iscsi_session_attr(tsid, ISCSI_PARAM_TSID, 0);
+iscsi_session_attr(def_taskmgmt_tmo, ISCSI_PARAM_DEF_TASKMGMT_TMO, 0);
+iscsi_session_attr(discovery_parent_idx, ISCSI_PARAM_DISCOVERY_PARENT_IDX, 0);
+iscsi_session_attr(discovery_parent_type, ISCSI_PARAM_DISCOVERY_PARENT_TYPE, 0);
+
+static ssize_t
+show_priv_session_state(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct iscsi_cls_session *session = iscsi_dev_to_session(dev->parent);
+ return sprintf(buf, "%s\n", iscsi_session_state_name(session->state));
+}
+static ISCSI_CLASS_ATTR(priv_sess, state, S_IRUGO, show_priv_session_state,
+ NULL);
+static ssize_t
+show_priv_session_creator(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct iscsi_cls_session *session = iscsi_dev_to_session(dev->parent);
+ return sprintf(buf, "%d\n", session->creator);
+}
+static ISCSI_CLASS_ATTR(priv_sess, creator, S_IRUGO, show_priv_session_creator,
+ NULL);
+static ssize_t
+show_priv_session_target_id(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct iscsi_cls_session *session = iscsi_dev_to_session(dev->parent);
+ return sprintf(buf, "%d\n", session->target_id);
+}
+static ISCSI_CLASS_ATTR(priv_sess, target_id, S_IRUGO,
+ show_priv_session_target_id, NULL);
+
+#define iscsi_priv_session_attr_show(field, format) \
+static ssize_t \
+show_priv_session_##field(struct device *dev, \
+ struct device_attribute *attr, char *buf) \
+{ \
+ struct iscsi_cls_session *session = \
+ iscsi_dev_to_session(dev->parent); \
+ if (session->field == -1) \
+ return sprintf(buf, "off\n"); \
+ return sprintf(buf, format"\n", session->field); \
+}
+
+#define iscsi_priv_session_attr_store(field) \
+static ssize_t \
+store_priv_session_##field(struct device *dev, \
+ struct device_attribute *attr, \
+ const char *buf, size_t count) \
+{ \
+ int val; \
+ char *cp; \
+ struct iscsi_cls_session *session = \
+ iscsi_dev_to_session(dev->parent); \
+ if ((session->state == ISCSI_SESSION_FREE) || \
+ (session->state == ISCSI_SESSION_FAILED)) \
+ return -EBUSY; \
+ if (strncmp(buf, "off", 3) == 0) \
+ session->field = -1; \
+ else { \
+ val = simple_strtoul(buf, &cp, 0); \
+ if (*cp != '\0' && *cp != '\n') \
+ return -EINVAL; \
+ session->field = val; \
+ } \
+ return count; \
+}
+
+#define iscsi_priv_session_rw_attr(field, format) \
+ iscsi_priv_session_attr_show(field, format) \
+ iscsi_priv_session_attr_store(field) \
+static ISCSI_CLASS_ATTR(priv_sess, field, S_IRUGO | S_IWUSR, \
+ show_priv_session_##field, \
+ store_priv_session_##field)
+iscsi_priv_session_rw_attr(recovery_tmo, "%d");
+
+static struct attribute *iscsi_session_attrs[] = {
+ &dev_attr_sess_initial_r2t.attr,
+ &dev_attr_sess_max_outstanding_r2t.attr,
+ &dev_attr_sess_immediate_data.attr,
+ &dev_attr_sess_first_burst_len.attr,
+ &dev_attr_sess_max_burst_len.attr,
+ &dev_attr_sess_data_pdu_in_order.attr,
+ &dev_attr_sess_data_seq_in_order.attr,
+ &dev_attr_sess_erl.attr,
+ &dev_attr_sess_targetname.attr,
+ &dev_attr_sess_tpgt.attr,
+ &dev_attr_sess_password.attr,
+ &dev_attr_sess_password_in.attr,
+ &dev_attr_sess_username.attr,
+ &dev_attr_sess_username_in.attr,
+ &dev_attr_sess_fast_abort.attr,
+ &dev_attr_sess_abort_tmo.attr,
+ &dev_attr_sess_lu_reset_tmo.attr,
+ &dev_attr_sess_tgt_reset_tmo.attr,
+ &dev_attr_sess_ifacename.attr,
+ &dev_attr_sess_initiatorname.attr,
+ &dev_attr_sess_targetalias.attr,
+ &dev_attr_sess_boot_root.attr,
+ &dev_attr_sess_boot_nic.attr,
+ &dev_attr_sess_boot_target.attr,
+ &dev_attr_priv_sess_recovery_tmo.attr,
+ &dev_attr_priv_sess_state.attr,
+ &dev_attr_priv_sess_creator.attr,
+ &dev_attr_sess_chap_out_idx.attr,
+ &dev_attr_sess_chap_in_idx.attr,
+ &dev_attr_priv_sess_target_id.attr,
+ &dev_attr_sess_auto_snd_tgt_disable.attr,
+ &dev_attr_sess_discovery_session.attr,
+ &dev_attr_sess_portal_type.attr,
+ &dev_attr_sess_chap_auth.attr,
+ &dev_attr_sess_discovery_logout.attr,
+ &dev_attr_sess_bidi_chap.attr,
+ &dev_attr_sess_discovery_auth_optional.attr,
+ &dev_attr_sess_def_time2wait.attr,
+ &dev_attr_sess_def_time2retain.attr,
+ &dev_attr_sess_isid.attr,
+ &dev_attr_sess_tsid.attr,
+ &dev_attr_sess_def_taskmgmt_tmo.attr,
+ &dev_attr_sess_discovery_parent_idx.attr,
+ &dev_attr_sess_discovery_parent_type.attr,
+ NULL,
+};
+
+static umode_t iscsi_session_attr_is_visible(struct kobject *kobj,
+ struct attribute *attr, int i)
+{
+ struct device *cdev = container_of(kobj, struct device, kobj);
+ struct iscsi_cls_session *session = transport_class_to_session(cdev);
+ struct iscsi_transport *t = session->transport;
+ int param;
+
+ if (attr == &dev_attr_sess_initial_r2t.attr)
+ param = ISCSI_PARAM_INITIAL_R2T_EN;
+ else if (attr == &dev_attr_sess_max_outstanding_r2t.attr)
+ param = ISCSI_PARAM_MAX_R2T;
+ else if (attr == &dev_attr_sess_immediate_data.attr)
+ param = ISCSI_PARAM_IMM_DATA_EN;
+ else if (attr == &dev_attr_sess_first_burst_len.attr)
+ param = ISCSI_PARAM_FIRST_BURST;
+ else if (attr == &dev_attr_sess_max_burst_len.attr)
+ param = ISCSI_PARAM_MAX_BURST;
+ else if (attr == &dev_attr_sess_data_pdu_in_order.attr)
+ param = ISCSI_PARAM_PDU_INORDER_EN;
+ else if (attr == &dev_attr_sess_data_seq_in_order.attr)
+ param = ISCSI_PARAM_DATASEQ_INORDER_EN;
+ else if (attr == &dev_attr_sess_erl.attr)
+ param = ISCSI_PARAM_ERL;
+ else if (attr == &dev_attr_sess_targetname.attr)
+ param = ISCSI_PARAM_TARGET_NAME;
+ else if (attr == &dev_attr_sess_tpgt.attr)
+ param = ISCSI_PARAM_TPGT;
+ else if (attr == &dev_attr_sess_chap_in_idx.attr)
+ param = ISCSI_PARAM_CHAP_IN_IDX;
+ else if (attr == &dev_attr_sess_chap_out_idx.attr)
+ param = ISCSI_PARAM_CHAP_OUT_IDX;
+ else if (attr == &dev_attr_sess_password.attr)
+ param = ISCSI_PARAM_USERNAME;
+ else if (attr == &dev_attr_sess_password_in.attr)
+ param = ISCSI_PARAM_USERNAME_IN;
+ else if (attr == &dev_attr_sess_username.attr)
+ param = ISCSI_PARAM_PASSWORD;
+ else if (attr == &dev_attr_sess_username_in.attr)
+ param = ISCSI_PARAM_PASSWORD_IN;
+ else if (attr == &dev_attr_sess_fast_abort.attr)
+ param = ISCSI_PARAM_FAST_ABORT;
+ else if (attr == &dev_attr_sess_abort_tmo.attr)
+ param = ISCSI_PARAM_ABORT_TMO;
+ else if (attr == &dev_attr_sess_lu_reset_tmo.attr)
+ param = ISCSI_PARAM_LU_RESET_TMO;
+ else if (attr == &dev_attr_sess_tgt_reset_tmo.attr)
+ param = ISCSI_PARAM_TGT_RESET_TMO;
+ else if (attr == &dev_attr_sess_ifacename.attr)
+ param = ISCSI_PARAM_IFACE_NAME;
+ else if (attr == &dev_attr_sess_initiatorname.attr)
+ param = ISCSI_PARAM_INITIATOR_NAME;
+ else if (attr == &dev_attr_sess_targetalias.attr)
+ param = ISCSI_PARAM_TARGET_ALIAS;
+ else if (attr == &dev_attr_sess_boot_root.attr)
+ param = ISCSI_PARAM_BOOT_ROOT;
+ else if (attr == &dev_attr_sess_boot_nic.attr)
+ param = ISCSI_PARAM_BOOT_NIC;
+ else if (attr == &dev_attr_sess_boot_target.attr)
+ param = ISCSI_PARAM_BOOT_TARGET;
+ else if (attr == &dev_attr_sess_auto_snd_tgt_disable.attr)
+ param = ISCSI_PARAM_AUTO_SND_TGT_DISABLE;
+ else if (attr == &dev_attr_sess_discovery_session.attr)
+ param = ISCSI_PARAM_DISCOVERY_SESS;
+ else if (attr == &dev_attr_sess_portal_type.attr)
+ param = ISCSI_PARAM_PORTAL_TYPE;
+ else if (attr == &dev_attr_sess_chap_auth.attr)
+ param = ISCSI_PARAM_CHAP_AUTH_EN;
+ else if (attr == &dev_attr_sess_discovery_logout.attr)
+ param = ISCSI_PARAM_DISCOVERY_LOGOUT_EN;
+ else if (attr == &dev_attr_sess_bidi_chap.attr)
+ param = ISCSI_PARAM_BIDI_CHAP_EN;
+ else if (attr == &dev_attr_sess_discovery_auth_optional.attr)
+ param = ISCSI_PARAM_DISCOVERY_AUTH_OPTIONAL;
+ else if (attr == &dev_attr_sess_def_time2wait.attr)
+ param = ISCSI_PARAM_DEF_TIME2WAIT;
+ else if (attr == &dev_attr_sess_def_time2retain.attr)
+ param = ISCSI_PARAM_DEF_TIME2RETAIN;
+ else if (attr == &dev_attr_sess_isid.attr)
+ param = ISCSI_PARAM_ISID;
+ else if (attr == &dev_attr_sess_tsid.attr)
+ param = ISCSI_PARAM_TSID;
+ else if (attr == &dev_attr_sess_def_taskmgmt_tmo.attr)
+ param = ISCSI_PARAM_DEF_TASKMGMT_TMO;
+ else if (attr == &dev_attr_sess_discovery_parent_idx.attr)
+ param = ISCSI_PARAM_DISCOVERY_PARENT_IDX;
+ else if (attr == &dev_attr_sess_discovery_parent_type.attr)
+ param = ISCSI_PARAM_DISCOVERY_PARENT_TYPE;
+ else if (attr == &dev_attr_priv_sess_recovery_tmo.attr)
+ return S_IRUGO | S_IWUSR;
+ else if (attr == &dev_attr_priv_sess_state.attr)
+ return S_IRUGO;
+ else if (attr == &dev_attr_priv_sess_creator.attr)
+ return S_IRUGO;
+ else if (attr == &dev_attr_priv_sess_target_id.attr)
+ return S_IRUGO;
+ else {
+ WARN_ONCE(1, "Invalid session attr");
+ return 0;
+ }
+
+ return t->attr_is_visible(ISCSI_PARAM, param);
+}
+
+static struct attribute_group iscsi_session_group = {
+ .attrs = iscsi_session_attrs,
+ .is_visible = iscsi_session_attr_is_visible,
+};
+
+/*
+ * iSCSI host attrs
+ */
+#define iscsi_host_attr_show(param) \
+static ssize_t \
+show_host_param_##param(struct device *dev, \
+ struct device_attribute *attr, char *buf) \
+{ \
+ struct Scsi_Host *shost = transport_class_to_shost(dev); \
+ struct iscsi_internal *priv = to_iscsi_internal(shost->transportt); \
+ return priv->iscsi_transport->get_host_param(shost, param, buf); \
+}
+
+#define iscsi_host_attr(field, param) \
+ iscsi_host_attr_show(param) \
+static ISCSI_CLASS_ATTR(host, field, S_IRUGO, show_host_param_##param, \
+ NULL);
+
+iscsi_host_attr(netdev, ISCSI_HOST_PARAM_NETDEV_NAME);
+iscsi_host_attr(hwaddress, ISCSI_HOST_PARAM_HWADDRESS);
+iscsi_host_attr(ipaddress, ISCSI_HOST_PARAM_IPADDRESS);
+iscsi_host_attr(initiatorname, ISCSI_HOST_PARAM_INITIATOR_NAME);
+iscsi_host_attr(port_state, ISCSI_HOST_PARAM_PORT_STATE);
+iscsi_host_attr(port_speed, ISCSI_HOST_PARAM_PORT_SPEED);
+
+static struct attribute *iscsi_host_attrs[] = {
+ &dev_attr_host_netdev.attr,
+ &dev_attr_host_hwaddress.attr,
+ &dev_attr_host_ipaddress.attr,
+ &dev_attr_host_initiatorname.attr,
+ &dev_attr_host_port_state.attr,
+ &dev_attr_host_port_speed.attr,
+ NULL,
+};
+
+static umode_t iscsi_host_attr_is_visible(struct kobject *kobj,
+ struct attribute *attr, int i)
+{
+ struct device *cdev = container_of(kobj, struct device, kobj);
+ struct Scsi_Host *shost = transport_class_to_shost(cdev);
+ struct iscsi_internal *priv = to_iscsi_internal(shost->transportt);
+ int param;
+
+ if (attr == &dev_attr_host_netdev.attr)
+ param = ISCSI_HOST_PARAM_NETDEV_NAME;
+ else if (attr == &dev_attr_host_hwaddress.attr)
+ param = ISCSI_HOST_PARAM_HWADDRESS;
+ else if (attr == &dev_attr_host_ipaddress.attr)
+ param = ISCSI_HOST_PARAM_IPADDRESS;
+ else if (attr == &dev_attr_host_initiatorname.attr)
+ param = ISCSI_HOST_PARAM_INITIATOR_NAME;
+ else if (attr == &dev_attr_host_port_state.attr)
+ param = ISCSI_HOST_PARAM_PORT_STATE;
+ else if (attr == &dev_attr_host_port_speed.attr)
+ param = ISCSI_HOST_PARAM_PORT_SPEED;
+ else {
+ WARN_ONCE(1, "Invalid host attr");
+ return 0;
+ }
+
+ return priv->iscsi_transport->attr_is_visible(ISCSI_HOST_PARAM, param);
+}
+
+static struct attribute_group iscsi_host_group = {
+ .attrs = iscsi_host_attrs,
+ .is_visible = iscsi_host_attr_is_visible,
+};
+
+/* convert iscsi_port_speed values to ascii string name */
+static const struct {
+ enum iscsi_port_speed value;
+ char *name;
+} iscsi_port_speed_names[] = {
+ {ISCSI_PORT_SPEED_UNKNOWN, "Unknown" },
+ {ISCSI_PORT_SPEED_10MBPS, "10 Mbps" },
+ {ISCSI_PORT_SPEED_100MBPS, "100 Mbps" },
+ {ISCSI_PORT_SPEED_1GBPS, "1 Gbps" },
+ {ISCSI_PORT_SPEED_10GBPS, "10 Gbps" },
+};
+
+char *iscsi_get_port_speed_name(struct Scsi_Host *shost)
+{
+ int i;
+ char *speed = "Unknown!";
+ struct iscsi_cls_host *ihost = shost->shost_data;
+ uint32_t port_speed = ihost->port_speed;
+
+ for (i = 0; i < ARRAY_SIZE(iscsi_port_speed_names); i++) {
+ if (iscsi_port_speed_names[i].value & port_speed) {
+ speed = iscsi_port_speed_names[i].name;
+ break;
+ }
+ }
+ return speed;
+}
+EXPORT_SYMBOL_GPL(iscsi_get_port_speed_name);
+
+/* convert iscsi_port_state values to ascii string name */
+static const struct {
+ enum iscsi_port_state value;
+ char *name;
+} iscsi_port_state_names[] = {
+ {ISCSI_PORT_STATE_DOWN, "LINK DOWN" },
+ {ISCSI_PORT_STATE_UP, "LINK UP" },
+};
+
+char *iscsi_get_port_state_name(struct Scsi_Host *shost)
+{
+ int i;
+ char *state = "Unknown!";
+ struct iscsi_cls_host *ihost = shost->shost_data;
+ uint32_t port_state = ihost->port_state;
+
+ for (i = 0; i < ARRAY_SIZE(iscsi_port_state_names); i++) {
+ if (iscsi_port_state_names[i].value & port_state) {
+ state = iscsi_port_state_names[i].name;
+ break;
+ }
+ }
+ return state;
+}
+EXPORT_SYMBOL_GPL(iscsi_get_port_state_name);
+
+static int iscsi_session_match(struct attribute_container *cont,
+ struct device *dev)
+{
+ struct iscsi_cls_session *session;
+ struct Scsi_Host *shost;
+ struct iscsi_internal *priv;
+
+ if (!iscsi_is_session_dev(dev))
+ return 0;
+
+ session = iscsi_dev_to_session(dev);
+ shost = iscsi_session_to_shost(session);
+ if (!shost->transportt)
+ return 0;
+
+ priv = to_iscsi_internal(shost->transportt);
+ if (priv->session_cont.ac.class != &iscsi_session_class.class)
+ return 0;
+
+ return &priv->session_cont.ac == cont;
+}
+
+static int iscsi_conn_match(struct attribute_container *cont,
+ struct device *dev)
+{
+ struct iscsi_cls_session *session;
+ struct iscsi_cls_conn *conn;
+ struct Scsi_Host *shost;
+ struct iscsi_internal *priv;
+
+ if (!iscsi_is_conn_dev(dev))
+ return 0;
+
+ conn = iscsi_dev_to_conn(dev);
+ session = iscsi_dev_to_session(conn->dev.parent);
+ shost = iscsi_session_to_shost(session);
+
+ if (!shost->transportt)
+ return 0;
+
+ priv = to_iscsi_internal(shost->transportt);
+ if (priv->conn_cont.ac.class != &iscsi_connection_class.class)
+ return 0;
+
+ return &priv->conn_cont.ac == cont;
+}
+
+static int iscsi_host_match(struct attribute_container *cont,
+ struct device *dev)
+{
+ struct Scsi_Host *shost;
+ struct iscsi_internal *priv;
+
+ if (!scsi_is_host_device(dev))
+ return 0;
+
+ shost = dev_to_shost(dev);
+ if (!shost->transportt ||
+ shost->transportt->host_attrs.ac.class != &iscsi_host_class.class)
+ return 0;
+
+ priv = to_iscsi_internal(shost->transportt);
+ return &priv->t.host_attrs.ac == cont;
+}
+
+struct scsi_transport_template *
+iscsi_register_transport(struct iscsi_transport *tt)
+{
+ struct iscsi_internal *priv;
+ unsigned long flags;
+ int err;
+
+ BUG_ON(!tt);
+
+ priv = iscsi_if_transport_lookup(tt);
+ if (priv)
+ return NULL;
+
+ priv = kzalloc(sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return NULL;
+ INIT_LIST_HEAD(&priv->list);
+ priv->iscsi_transport = tt;
+ priv->t.user_scan = iscsi_user_scan;
+ priv->t.create_work_queue = 1;
+
+ priv->dev.class = &iscsi_transport_class;
+ dev_set_name(&priv->dev, "%s", tt->name);
+ err = device_register(&priv->dev);
+ if (err)
+ goto free_priv;
+
+ err = sysfs_create_group(&priv->dev.kobj, &iscsi_transport_group);
+ if (err)
+ goto unregister_dev;
+
+ /* host parameters */
+ priv->t.host_attrs.ac.class = &iscsi_host_class.class;
+ priv->t.host_attrs.ac.match = iscsi_host_match;
+ priv->t.host_attrs.ac.grp = &iscsi_host_group;
+ priv->t.host_size = sizeof(struct iscsi_cls_host);
+ transport_container_register(&priv->t.host_attrs);
+
+ /* connection parameters */
+ priv->conn_cont.ac.class = &iscsi_connection_class.class;
+ priv->conn_cont.ac.match = iscsi_conn_match;
+ priv->conn_cont.ac.grp = &iscsi_conn_group;
+ transport_container_register(&priv->conn_cont);
+
+ /* session parameters */
+ priv->session_cont.ac.class = &iscsi_session_class.class;
+ priv->session_cont.ac.match = iscsi_session_match;
+ priv->session_cont.ac.grp = &iscsi_session_group;
+ transport_container_register(&priv->session_cont);
+
+ spin_lock_irqsave(&iscsi_transport_lock, flags);
+ list_add(&priv->list, &iscsi_transports);
+ spin_unlock_irqrestore(&iscsi_transport_lock, flags);
+
+ printk(KERN_NOTICE "iscsi: registered transport (%s)\n", tt->name);
+ return &priv->t;
+
+unregister_dev:
+ device_unregister(&priv->dev);
+ return NULL;
+free_priv:
+ kfree(priv);
+ return NULL;
+}
+EXPORT_SYMBOL_GPL(iscsi_register_transport);
+
+int iscsi_unregister_transport(struct iscsi_transport *tt)
+{
+ struct iscsi_internal *priv;
+ unsigned long flags;
+
+ BUG_ON(!tt);
+
+ mutex_lock(&rx_queue_mutex);
+
+ priv = iscsi_if_transport_lookup(tt);
+ BUG_ON (!priv);
+
+ spin_lock_irqsave(&iscsi_transport_lock, flags);
+ list_del(&priv->list);
+ spin_unlock_irqrestore(&iscsi_transport_lock, flags);
+
+ transport_container_unregister(&priv->conn_cont);
+ transport_container_unregister(&priv->session_cont);
+ transport_container_unregister(&priv->t.host_attrs);
+
+ sysfs_remove_group(&priv->dev.kobj, &iscsi_transport_group);
+ device_unregister(&priv->dev);
+ mutex_unlock(&rx_queue_mutex);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(iscsi_unregister_transport);
+
+static __init int iscsi_transport_init(void)
+{
+ int err;
+ struct netlink_kernel_cfg cfg = {
+ .groups = 1,
+ .input = iscsi_if_rx,
+ };
+ printk(KERN_INFO "Loading iSCSI transport class v%s.\n",
+ ISCSI_TRANSPORT_VERSION);
+
+ atomic_set(&iscsi_session_nr, 0);
+
+ err = class_register(&iscsi_transport_class);
+ if (err)
+ return err;
+
+ err = class_register(&iscsi_endpoint_class);
+ if (err)
+ goto unregister_transport_class;
+
+ err = class_register(&iscsi_iface_class);
+ if (err)
+ goto unregister_endpoint_class;
+
+ err = transport_class_register(&iscsi_host_class);
+ if (err)
+ goto unregister_iface_class;
+
+ err = transport_class_register(&iscsi_connection_class);
+ if (err)
+ goto unregister_host_class;
+
+ err = transport_class_register(&iscsi_session_class);
+ if (err)
+ goto unregister_conn_class;
+
+ err = bus_register(&iscsi_flashnode_bus);
+ if (err)
+ goto unregister_session_class;
+
+ nls = netlink_kernel_create(&init_net, NETLINK_ISCSI, &cfg);
+ if (!nls) {
+ err = -ENOBUFS;
+ goto unregister_flashnode_bus;
+ }
+
+ iscsi_eh_timer_workq = create_singlethread_workqueue("iscsi_eh");
+ if (!iscsi_eh_timer_workq) {
+ err = -ENOMEM;
+ goto release_nls;
+ }
+
+ return 0;
+
+release_nls:
+ netlink_kernel_release(nls);
+unregister_flashnode_bus:
+ bus_unregister(&iscsi_flashnode_bus);
+unregister_session_class:
+ transport_class_unregister(&iscsi_session_class);
+unregister_conn_class:
+ transport_class_unregister(&iscsi_connection_class);
+unregister_host_class:
+ transport_class_unregister(&iscsi_host_class);
+unregister_iface_class:
+ class_unregister(&iscsi_iface_class);
+unregister_endpoint_class:
+ class_unregister(&iscsi_endpoint_class);
+unregister_transport_class:
+ class_unregister(&iscsi_transport_class);
+ return err;
+}
+
+static void __exit iscsi_transport_exit(void)
+{
+ destroy_workqueue(iscsi_eh_timer_workq);
+ netlink_kernel_release(nls);
+ bus_unregister(&iscsi_flashnode_bus);
+ transport_class_unregister(&iscsi_connection_class);
+ transport_class_unregister(&iscsi_session_class);
+ transport_class_unregister(&iscsi_host_class);
+ class_unregister(&iscsi_endpoint_class);
+ class_unregister(&iscsi_iface_class);
+ class_unregister(&iscsi_transport_class);
+}
+
+module_init(iscsi_transport_init);
+module_exit(iscsi_transport_exit);
+
+MODULE_AUTHOR("Mike Christie <michaelc@cs.wisc.edu>, "
+ "Dmitry Yusupov <dmitry_yus@yahoo.com>, "
+ "Alex Aizman <itn780@yahoo.com>");
+MODULE_DESCRIPTION("iSCSI Transport Interface");
+MODULE_LICENSE("GPL");
+MODULE_VERSION(ISCSI_TRANSPORT_VERSION);
+MODULE_ALIAS_NET_PF_PROTO(PF_NETLINK, NETLINK_ISCSI);
diff --git a/drivers/scsi/scsi_transport_sas.c b/drivers/scsi/scsi_transport_sas.c
new file mode 100644
index 000000000..9a058194b
--- /dev/null
+++ b/drivers/scsi/scsi_transport_sas.c
@@ -0,0 +1,1971 @@
+/*
+ * Copyright (C) 2005-2006 Dell Inc.
+ * Released under GPL v2.
+ *
+ * Serial Attached SCSI (SAS) transport class.
+ *
+ * The SAS transport class contains common code to deal with SAS HBAs,
+ * an aproximated representation of SAS topologies in the driver model,
+ * and various sysfs attributes to expose these topologies and management
+ * interfaces to userspace.
+ *
+ * In addition to the basic SCSI core objects this transport class
+ * introduces two additional intermediate objects: The SAS PHY
+ * as represented by struct sas_phy defines an "outgoing" PHY on
+ * a SAS HBA or Expander, and the SAS remote PHY represented by
+ * struct sas_rphy defines an "incoming" PHY on a SAS Expander or
+ * end device. Note that this is purely a software concept, the
+ * underlying hardware for a PHY and a remote PHY is the exactly
+ * the same.
+ *
+ * There is no concept of a SAS port in this code, users can see
+ * what PHYs form a wide port based on the port_identifier attribute,
+ * which is the same for all PHYs in a port.
+ */
+
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/jiffies.h>
+#include <linux/err.h>
+#include <linux/slab.h>
+#include <linux/string.h>
+#include <linux/blkdev.h>
+#include <linux/bsg.h>
+
+#include <scsi/scsi.h>
+#include <scsi/scsi_device.h>
+#include <scsi/scsi_host.h>
+#include <scsi/scsi_transport.h>
+#include <scsi/scsi_transport_sas.h>
+
+#include "scsi_sas_internal.h"
+struct sas_host_attrs {
+ struct list_head rphy_list;
+ struct mutex lock;
+ struct request_queue *q;
+ u32 next_target_id;
+ u32 next_expander_id;
+ int next_port_id;
+};
+#define to_sas_host_attrs(host) ((struct sas_host_attrs *)(host)->shost_data)
+
+
+/*
+ * Hack to allow attributes of the same name in different objects.
+ */
+#define SAS_DEVICE_ATTR(_prefix,_name,_mode,_show,_store) \
+ struct device_attribute dev_attr_##_prefix##_##_name = \
+ __ATTR(_name,_mode,_show,_store)
+
+
+/*
+ * Pretty printing helpers
+ */
+
+#define sas_bitfield_name_match(title, table) \
+static ssize_t \
+get_sas_##title##_names(u32 table_key, char *buf) \
+{ \
+ char *prefix = ""; \
+ ssize_t len = 0; \
+ int i; \
+ \
+ for (i = 0; i < ARRAY_SIZE(table); i++) { \
+ if (table[i].value & table_key) { \
+ len += sprintf(buf + len, "%s%s", \
+ prefix, table[i].name); \
+ prefix = ", "; \
+ } \
+ } \
+ len += sprintf(buf + len, "\n"); \
+ return len; \
+}
+
+#define sas_bitfield_name_set(title, table) \
+static ssize_t \
+set_sas_##title##_names(u32 *table_key, const char *buf) \
+{ \
+ ssize_t len = 0; \
+ int i; \
+ \
+ for (i = 0; i < ARRAY_SIZE(table); i++) { \
+ len = strlen(table[i].name); \
+ if (strncmp(buf, table[i].name, len) == 0 && \
+ (buf[len] == '\n' || buf[len] == '\0')) { \
+ *table_key = table[i].value; \
+ return 0; \
+ } \
+ } \
+ return -EINVAL; \
+}
+
+#define sas_bitfield_name_search(title, table) \
+static ssize_t \
+get_sas_##title##_names(u32 table_key, char *buf) \
+{ \
+ ssize_t len = 0; \
+ int i; \
+ \
+ for (i = 0; i < ARRAY_SIZE(table); i++) { \
+ if (table[i].value == table_key) { \
+ len += sprintf(buf + len, "%s", \
+ table[i].name); \
+ break; \
+ } \
+ } \
+ len += sprintf(buf + len, "\n"); \
+ return len; \
+}
+
+static struct {
+ u32 value;
+ char *name;
+} sas_device_type_names[] = {
+ { SAS_PHY_UNUSED, "unused" },
+ { SAS_END_DEVICE, "end device" },
+ { SAS_EDGE_EXPANDER_DEVICE, "edge expander" },
+ { SAS_FANOUT_EXPANDER_DEVICE, "fanout expander" },
+};
+sas_bitfield_name_search(device_type, sas_device_type_names)
+
+
+static struct {
+ u32 value;
+ char *name;
+} sas_protocol_names[] = {
+ { SAS_PROTOCOL_SATA, "sata" },
+ { SAS_PROTOCOL_SMP, "smp" },
+ { SAS_PROTOCOL_STP, "stp" },
+ { SAS_PROTOCOL_SSP, "ssp" },
+};
+sas_bitfield_name_match(protocol, sas_protocol_names)
+
+static struct {
+ u32 value;
+ char *name;
+} sas_linkspeed_names[] = {
+ { SAS_LINK_RATE_UNKNOWN, "Unknown" },
+ { SAS_PHY_DISABLED, "Phy disabled" },
+ { SAS_LINK_RATE_FAILED, "Link Rate failed" },
+ { SAS_SATA_SPINUP_HOLD, "Spin-up hold" },
+ { SAS_LINK_RATE_1_5_GBPS, "1.5 Gbit" },
+ { SAS_LINK_RATE_3_0_GBPS, "3.0 Gbit" },
+ { SAS_LINK_RATE_6_0_GBPS, "6.0 Gbit" },
+ { SAS_LINK_RATE_12_0_GBPS, "12.0 Gbit" },
+};
+sas_bitfield_name_search(linkspeed, sas_linkspeed_names)
+sas_bitfield_name_set(linkspeed, sas_linkspeed_names)
+
+static struct sas_end_device *sas_sdev_to_rdev(struct scsi_device *sdev)
+{
+ struct sas_rphy *rphy = target_to_rphy(sdev->sdev_target);
+ struct sas_end_device *rdev;
+
+ BUG_ON(rphy->identify.device_type != SAS_END_DEVICE);
+
+ rdev = rphy_to_end_device(rphy);
+ return rdev;
+}
+
+static void sas_smp_request(struct request_queue *q, struct Scsi_Host *shost,
+ struct sas_rphy *rphy)
+{
+ struct request *req;
+ int ret;
+ int (*handler)(struct Scsi_Host *, struct sas_rphy *, struct request *);
+
+ while ((req = blk_fetch_request(q)) != NULL) {
+ spin_unlock_irq(q->queue_lock);
+
+ handler = to_sas_internal(shost->transportt)->f->smp_handler;
+ ret = handler(shost, rphy, req);
+ req->errors = ret;
+
+ blk_end_request_all(req, ret);
+
+ spin_lock_irq(q->queue_lock);
+ }
+}
+
+static void sas_host_smp_request(struct request_queue *q)
+{
+ sas_smp_request(q, (struct Scsi_Host *)q->queuedata, NULL);
+}
+
+static void sas_non_host_smp_request(struct request_queue *q)
+{
+ struct sas_rphy *rphy = q->queuedata;
+ sas_smp_request(q, rphy_to_shost(rphy), rphy);
+}
+
+static void sas_host_release(struct device *dev)
+{
+ struct Scsi_Host *shost = dev_to_shost(dev);
+ struct sas_host_attrs *sas_host = to_sas_host_attrs(shost);
+ struct request_queue *q = sas_host->q;
+
+ if (q)
+ blk_cleanup_queue(q);
+}
+
+static int sas_bsg_initialize(struct Scsi_Host *shost, struct sas_rphy *rphy)
+{
+ struct request_queue *q;
+ int error;
+ struct device *dev;
+ char namebuf[20];
+ const char *name;
+ void (*release)(struct device *);
+
+ if (!to_sas_internal(shost->transportt)->f->smp_handler) {
+ printk("%s can't handle SMP requests\n", shost->hostt->name);
+ return 0;
+ }
+
+ if (rphy) {
+ q = blk_init_queue(sas_non_host_smp_request, NULL);
+ dev = &rphy->dev;
+ name = dev_name(dev);
+ release = NULL;
+ } else {
+ q = blk_init_queue(sas_host_smp_request, NULL);
+ dev = &shost->shost_gendev;
+ snprintf(namebuf, sizeof(namebuf),
+ "sas_host%d", shost->host_no);
+ name = namebuf;
+ release = sas_host_release;
+ }
+ if (!q)
+ return -ENOMEM;
+
+ error = bsg_register_queue(q, dev, name, release);
+ if (error) {
+ blk_cleanup_queue(q);
+ return -ENOMEM;
+ }
+
+ if (rphy)
+ rphy->q = q;
+ else
+ to_sas_host_attrs(shost)->q = q;
+
+ if (rphy)
+ q->queuedata = rphy;
+ else
+ q->queuedata = shost;
+
+ queue_flag_set_unlocked(QUEUE_FLAG_BIDI, q);
+ return 0;
+}
+
+static void sas_bsg_remove(struct Scsi_Host *shost, struct sas_rphy *rphy)
+{
+ struct request_queue *q;
+
+ if (rphy)
+ q = rphy->q;
+ else
+ q = to_sas_host_attrs(shost)->q;
+
+ if (!q)
+ return;
+
+ bsg_unregister_queue(q);
+}
+
+/*
+ * SAS host attributes
+ */
+
+static int sas_host_setup(struct transport_container *tc, struct device *dev,
+ struct device *cdev)
+{
+ struct Scsi_Host *shost = dev_to_shost(dev);
+ struct sas_host_attrs *sas_host = to_sas_host_attrs(shost);
+
+ INIT_LIST_HEAD(&sas_host->rphy_list);
+ mutex_init(&sas_host->lock);
+ sas_host->next_target_id = 0;
+ sas_host->next_expander_id = 0;
+ sas_host->next_port_id = 0;
+
+ if (sas_bsg_initialize(shost, NULL))
+ dev_printk(KERN_ERR, dev, "fail to a bsg device %d\n",
+ shost->host_no);
+
+ return 0;
+}
+
+static int sas_host_remove(struct transport_container *tc, struct device *dev,
+ struct device *cdev)
+{
+ struct Scsi_Host *shost = dev_to_shost(dev);
+
+ sas_bsg_remove(shost, NULL);
+
+ return 0;
+}
+
+static DECLARE_TRANSPORT_CLASS(sas_host_class,
+ "sas_host", sas_host_setup, sas_host_remove, NULL);
+
+static int sas_host_match(struct attribute_container *cont,
+ struct device *dev)
+{
+ struct Scsi_Host *shost;
+ struct sas_internal *i;
+
+ if (!scsi_is_host_device(dev))
+ return 0;
+ shost = dev_to_shost(dev);
+
+ if (!shost->transportt)
+ return 0;
+ if (shost->transportt->host_attrs.ac.class !=
+ &sas_host_class.class)
+ return 0;
+
+ i = to_sas_internal(shost->transportt);
+ return &i->t.host_attrs.ac == cont;
+}
+
+static int do_sas_phy_delete(struct device *dev, void *data)
+{
+ int pass = (int)(unsigned long)data;
+
+ if (pass == 0 && scsi_is_sas_port(dev))
+ sas_port_delete(dev_to_sas_port(dev));
+ else if (pass == 1 && scsi_is_sas_phy(dev))
+ sas_phy_delete(dev_to_phy(dev));
+ return 0;
+}
+
+/**
+ * sas_remove_children - tear down a devices SAS data structures
+ * @dev: device belonging to the sas object
+ *
+ * Removes all SAS PHYs and remote PHYs for a given object
+ */
+void sas_remove_children(struct device *dev)
+{
+ device_for_each_child(dev, (void *)0, do_sas_phy_delete);
+ device_for_each_child(dev, (void *)1, do_sas_phy_delete);
+}
+EXPORT_SYMBOL(sas_remove_children);
+
+/**
+ * sas_remove_host - tear down a Scsi_Host's SAS data structures
+ * @shost: Scsi Host that is torn down
+ *
+ * Removes all SAS PHYs and remote PHYs for a given Scsi_Host.
+ * Must be called just before scsi_remove_host for SAS HBAs.
+ */
+void sas_remove_host(struct Scsi_Host *shost)
+{
+ sas_remove_children(&shost->shost_gendev);
+}
+EXPORT_SYMBOL(sas_remove_host);
+
+/**
+ * sas_tlr_supported - checking TLR bit in vpd 0x90
+ * @sdev: scsi device struct
+ *
+ * Check Transport Layer Retries are supported or not.
+ * If vpd page 0x90 is present, TRL is supported.
+ *
+ */
+unsigned int
+sas_tlr_supported(struct scsi_device *sdev)
+{
+ const int vpd_len = 32;
+ struct sas_end_device *rdev = sas_sdev_to_rdev(sdev);
+ char *buffer = kzalloc(vpd_len, GFP_KERNEL);
+ int ret = 0;
+
+ if (scsi_get_vpd_page(sdev, 0x90, buffer, vpd_len))
+ goto out;
+
+ /*
+ * Magic numbers: the VPD Protocol page (0x90)
+ * has a 4 byte header and then one entry per device port
+ * the TLR bit is at offset 8 on each port entry
+ * if we take the first port, that's at total offset 12
+ */
+ ret = buffer[12] & 0x01;
+
+ out:
+ kfree(buffer);
+ rdev->tlr_supported = ret;
+ return ret;
+
+}
+EXPORT_SYMBOL_GPL(sas_tlr_supported);
+
+/**
+ * sas_disable_tlr - setting TLR flags
+ * @sdev: scsi device struct
+ *
+ * Seting tlr_enabled flag to 0.
+ *
+ */
+void
+sas_disable_tlr(struct scsi_device *sdev)
+{
+ struct sas_end_device *rdev = sas_sdev_to_rdev(sdev);
+
+ rdev->tlr_enabled = 0;
+}
+EXPORT_SYMBOL_GPL(sas_disable_tlr);
+
+/**
+ * sas_enable_tlr - setting TLR flags
+ * @sdev: scsi device struct
+ *
+ * Seting tlr_enabled flag 1.
+ *
+ */
+void sas_enable_tlr(struct scsi_device *sdev)
+{
+ unsigned int tlr_supported = 0;
+ tlr_supported = sas_tlr_supported(sdev);
+
+ if (tlr_supported) {
+ struct sas_end_device *rdev = sas_sdev_to_rdev(sdev);
+
+ rdev->tlr_enabled = 1;
+ }
+
+ return;
+}
+EXPORT_SYMBOL_GPL(sas_enable_tlr);
+
+unsigned int sas_is_tlr_enabled(struct scsi_device *sdev)
+{
+ struct sas_end_device *rdev = sas_sdev_to_rdev(sdev);
+ return rdev->tlr_enabled;
+}
+EXPORT_SYMBOL_GPL(sas_is_tlr_enabled);
+
+/*
+ * SAS Phy attributes
+ */
+
+#define sas_phy_show_simple(field, name, format_string, cast) \
+static ssize_t \
+show_sas_phy_##name(struct device *dev, \
+ struct device_attribute *attr, char *buf) \
+{ \
+ struct sas_phy *phy = transport_class_to_phy(dev); \
+ \
+ return snprintf(buf, 20, format_string, cast phy->field); \
+}
+
+#define sas_phy_simple_attr(field, name, format_string, type) \
+ sas_phy_show_simple(field, name, format_string, (type)) \
+static DEVICE_ATTR(name, S_IRUGO, show_sas_phy_##name, NULL)
+
+#define sas_phy_show_protocol(field, name) \
+static ssize_t \
+show_sas_phy_##name(struct device *dev, \
+ struct device_attribute *attr, char *buf) \
+{ \
+ struct sas_phy *phy = transport_class_to_phy(dev); \
+ \
+ if (!phy->field) \
+ return snprintf(buf, 20, "none\n"); \
+ return get_sas_protocol_names(phy->field, buf); \
+}
+
+#define sas_phy_protocol_attr(field, name) \
+ sas_phy_show_protocol(field, name) \
+static DEVICE_ATTR(name, S_IRUGO, show_sas_phy_##name, NULL)
+
+#define sas_phy_show_linkspeed(field) \
+static ssize_t \
+show_sas_phy_##field(struct device *dev, \
+ struct device_attribute *attr, char *buf) \
+{ \
+ struct sas_phy *phy = transport_class_to_phy(dev); \
+ \
+ return get_sas_linkspeed_names(phy->field, buf); \
+}
+
+/* Fudge to tell if we're minimum or maximum */
+#define sas_phy_store_linkspeed(field) \
+static ssize_t \
+store_sas_phy_##field(struct device *dev, \
+ struct device_attribute *attr, \
+ const char *buf, size_t count) \
+{ \
+ struct sas_phy *phy = transport_class_to_phy(dev); \
+ struct Scsi_Host *shost = dev_to_shost(phy->dev.parent); \
+ struct sas_internal *i = to_sas_internal(shost->transportt); \
+ u32 value; \
+ struct sas_phy_linkrates rates = {0}; \
+ int error; \
+ \
+ error = set_sas_linkspeed_names(&value, buf); \
+ if (error) \
+ return error; \
+ rates.field = value; \
+ error = i->f->set_phy_speed(phy, &rates); \
+ \
+ return error ? error : count; \
+}
+
+#define sas_phy_linkspeed_rw_attr(field) \
+ sas_phy_show_linkspeed(field) \
+ sas_phy_store_linkspeed(field) \
+static DEVICE_ATTR(field, S_IRUGO, show_sas_phy_##field, \
+ store_sas_phy_##field)
+
+#define sas_phy_linkspeed_attr(field) \
+ sas_phy_show_linkspeed(field) \
+static DEVICE_ATTR(field, S_IRUGO, show_sas_phy_##field, NULL)
+
+
+#define sas_phy_show_linkerror(field) \
+static ssize_t \
+show_sas_phy_##field(struct device *dev, \
+ struct device_attribute *attr, char *buf) \
+{ \
+ struct sas_phy *phy = transport_class_to_phy(dev); \
+ struct Scsi_Host *shost = dev_to_shost(phy->dev.parent); \
+ struct sas_internal *i = to_sas_internal(shost->transportt); \
+ int error; \
+ \
+ error = i->f->get_linkerrors ? i->f->get_linkerrors(phy) : 0; \
+ if (error) \
+ return error; \
+ return snprintf(buf, 20, "%u\n", phy->field); \
+}
+
+#define sas_phy_linkerror_attr(field) \
+ sas_phy_show_linkerror(field) \
+static DEVICE_ATTR(field, S_IRUGO, show_sas_phy_##field, NULL)
+
+
+static ssize_t
+show_sas_device_type(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct sas_phy *phy = transport_class_to_phy(dev);
+
+ if (!phy->identify.device_type)
+ return snprintf(buf, 20, "none\n");
+ return get_sas_device_type_names(phy->identify.device_type, buf);
+}
+static DEVICE_ATTR(device_type, S_IRUGO, show_sas_device_type, NULL);
+
+static ssize_t do_sas_phy_enable(struct device *dev,
+ size_t count, int enable)
+{
+ struct sas_phy *phy = transport_class_to_phy(dev);
+ struct Scsi_Host *shost = dev_to_shost(phy->dev.parent);
+ struct sas_internal *i = to_sas_internal(shost->transportt);
+ int error;
+
+ error = i->f->phy_enable(phy, enable);
+ if (error)
+ return error;
+ phy->enabled = enable;
+ return count;
+};
+
+static ssize_t
+store_sas_phy_enable(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ if (count < 1)
+ return -EINVAL;
+
+ switch (buf[0]) {
+ case '0':
+ do_sas_phy_enable(dev, count, 0);
+ break;
+ case '1':
+ do_sas_phy_enable(dev, count, 1);
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return count;
+}
+
+static ssize_t
+show_sas_phy_enable(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct sas_phy *phy = transport_class_to_phy(dev);
+
+ return snprintf(buf, 20, "%d", phy->enabled);
+}
+
+static DEVICE_ATTR(enable, S_IRUGO | S_IWUSR, show_sas_phy_enable,
+ store_sas_phy_enable);
+
+static ssize_t
+do_sas_phy_reset(struct device *dev, size_t count, int hard_reset)
+{
+ struct sas_phy *phy = transport_class_to_phy(dev);
+ struct Scsi_Host *shost = dev_to_shost(phy->dev.parent);
+ struct sas_internal *i = to_sas_internal(shost->transportt);
+ int error;
+
+ error = i->f->phy_reset(phy, hard_reset);
+ if (error)
+ return error;
+ phy->enabled = 1;
+ return count;
+};
+
+static ssize_t
+store_sas_link_reset(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ return do_sas_phy_reset(dev, count, 0);
+}
+static DEVICE_ATTR(link_reset, S_IWUSR, NULL, store_sas_link_reset);
+
+static ssize_t
+store_sas_hard_reset(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ return do_sas_phy_reset(dev, count, 1);
+}
+static DEVICE_ATTR(hard_reset, S_IWUSR, NULL, store_sas_hard_reset);
+
+sas_phy_protocol_attr(identify.initiator_port_protocols,
+ initiator_port_protocols);
+sas_phy_protocol_attr(identify.target_port_protocols,
+ target_port_protocols);
+sas_phy_simple_attr(identify.sas_address, sas_address, "0x%016llx\n",
+ unsigned long long);
+sas_phy_simple_attr(identify.phy_identifier, phy_identifier, "%d\n", u8);
+//sas_phy_simple_attr(port_identifier, port_identifier, "%d\n", int);
+sas_phy_linkspeed_attr(negotiated_linkrate);
+sas_phy_linkspeed_attr(minimum_linkrate_hw);
+sas_phy_linkspeed_rw_attr(minimum_linkrate);
+sas_phy_linkspeed_attr(maximum_linkrate_hw);
+sas_phy_linkspeed_rw_attr(maximum_linkrate);
+sas_phy_linkerror_attr(invalid_dword_count);
+sas_phy_linkerror_attr(running_disparity_error_count);
+sas_phy_linkerror_attr(loss_of_dword_sync_count);
+sas_phy_linkerror_attr(phy_reset_problem_count);
+
+static int sas_phy_setup(struct transport_container *tc, struct device *dev,
+ struct device *cdev)
+{
+ struct sas_phy *phy = dev_to_phy(dev);
+ struct Scsi_Host *shost = dev_to_shost(phy->dev.parent);
+ struct sas_internal *i = to_sas_internal(shost->transportt);
+
+ if (i->f->phy_setup)
+ i->f->phy_setup(phy);
+
+ return 0;
+}
+
+static DECLARE_TRANSPORT_CLASS(sas_phy_class,
+ "sas_phy", sas_phy_setup, NULL, NULL);
+
+static int sas_phy_match(struct attribute_container *cont, struct device *dev)
+{
+ struct Scsi_Host *shost;
+ struct sas_internal *i;
+
+ if (!scsi_is_sas_phy(dev))
+ return 0;
+ shost = dev_to_shost(dev->parent);
+
+ if (!shost->transportt)
+ return 0;
+ if (shost->transportt->host_attrs.ac.class !=
+ &sas_host_class.class)
+ return 0;
+
+ i = to_sas_internal(shost->transportt);
+ return &i->phy_attr_cont.ac == cont;
+}
+
+static void sas_phy_release(struct device *dev)
+{
+ struct sas_phy *phy = dev_to_phy(dev);
+ struct Scsi_Host *shost = dev_to_shost(phy->dev.parent);
+ struct sas_internal *i = to_sas_internal(shost->transportt);
+
+ if (i->f->phy_release)
+ i->f->phy_release(phy);
+ put_device(dev->parent);
+ kfree(phy);
+}
+
+/**
+ * sas_phy_alloc - allocates and initialize a SAS PHY structure
+ * @parent: Parent device
+ * @number: Phy index
+ *
+ * Allocates an SAS PHY structure. It will be added in the device tree
+ * below the device specified by @parent, which has to be either a Scsi_Host
+ * or sas_rphy.
+ *
+ * Returns:
+ * SAS PHY allocated or %NULL if the allocation failed.
+ */
+struct sas_phy *sas_phy_alloc(struct device *parent, int number)
+{
+ struct Scsi_Host *shost = dev_to_shost(parent);
+ struct sas_phy *phy;
+
+ phy = kzalloc(sizeof(*phy), GFP_KERNEL);
+ if (!phy)
+ return NULL;
+
+ phy->number = number;
+ phy->enabled = 1;
+
+ device_initialize(&phy->dev);
+ phy->dev.parent = get_device(parent);
+ phy->dev.release = sas_phy_release;
+ INIT_LIST_HEAD(&phy->port_siblings);
+ if (scsi_is_sas_expander_device(parent)) {
+ struct sas_rphy *rphy = dev_to_rphy(parent);
+ dev_set_name(&phy->dev, "phy-%d:%d:%d", shost->host_no,
+ rphy->scsi_target_id, number);
+ } else
+ dev_set_name(&phy->dev, "phy-%d:%d", shost->host_no, number);
+
+ transport_setup_device(&phy->dev);
+
+ return phy;
+}
+EXPORT_SYMBOL(sas_phy_alloc);
+
+/**
+ * sas_phy_add - add a SAS PHY to the device hierarchy
+ * @phy: The PHY to be added
+ *
+ * Publishes a SAS PHY to the rest of the system.
+ */
+int sas_phy_add(struct sas_phy *phy)
+{
+ int error;
+
+ error = device_add(&phy->dev);
+ if (!error) {
+ transport_add_device(&phy->dev);
+ transport_configure_device(&phy->dev);
+ }
+
+ return error;
+}
+EXPORT_SYMBOL(sas_phy_add);
+
+/**
+ * sas_phy_free - free a SAS PHY
+ * @phy: SAS PHY to free
+ *
+ * Frees the specified SAS PHY.
+ *
+ * Note:
+ * This function must only be called on a PHY that has not
+ * successfully been added using sas_phy_add().
+ */
+void sas_phy_free(struct sas_phy *phy)
+{
+ transport_destroy_device(&phy->dev);
+ put_device(&phy->dev);
+}
+EXPORT_SYMBOL(sas_phy_free);
+
+/**
+ * sas_phy_delete - remove SAS PHY
+ * @phy: SAS PHY to remove
+ *
+ * Removes the specified SAS PHY. If the SAS PHY has an
+ * associated remote PHY it is removed before.
+ */
+void
+sas_phy_delete(struct sas_phy *phy)
+{
+ struct device *dev = &phy->dev;
+
+ /* this happens if the phy is still part of a port when deleted */
+ BUG_ON(!list_empty(&phy->port_siblings));
+
+ transport_remove_device(dev);
+ device_del(dev);
+ transport_destroy_device(dev);
+ put_device(dev);
+}
+EXPORT_SYMBOL(sas_phy_delete);
+
+/**
+ * scsi_is_sas_phy - check if a struct device represents a SAS PHY
+ * @dev: device to check
+ *
+ * Returns:
+ * %1 if the device represents a SAS PHY, %0 else
+ */
+int scsi_is_sas_phy(const struct device *dev)
+{
+ return dev->release == sas_phy_release;
+}
+EXPORT_SYMBOL(scsi_is_sas_phy);
+
+/*
+ * SAS Port attributes
+ */
+#define sas_port_show_simple(field, name, format_string, cast) \
+static ssize_t \
+show_sas_port_##name(struct device *dev, \
+ struct device_attribute *attr, char *buf) \
+{ \
+ struct sas_port *port = transport_class_to_sas_port(dev); \
+ \
+ return snprintf(buf, 20, format_string, cast port->field); \
+}
+
+#define sas_port_simple_attr(field, name, format_string, type) \
+ sas_port_show_simple(field, name, format_string, (type)) \
+static DEVICE_ATTR(name, S_IRUGO, show_sas_port_##name, NULL)
+
+sas_port_simple_attr(num_phys, num_phys, "%d\n", int);
+
+static DECLARE_TRANSPORT_CLASS(sas_port_class,
+ "sas_port", NULL, NULL, NULL);
+
+static int sas_port_match(struct attribute_container *cont, struct device *dev)
+{
+ struct Scsi_Host *shost;
+ struct sas_internal *i;
+
+ if (!scsi_is_sas_port(dev))
+ return 0;
+ shost = dev_to_shost(dev->parent);
+
+ if (!shost->transportt)
+ return 0;
+ if (shost->transportt->host_attrs.ac.class !=
+ &sas_host_class.class)
+ return 0;
+
+ i = to_sas_internal(shost->transportt);
+ return &i->port_attr_cont.ac == cont;
+}
+
+
+static void sas_port_release(struct device *dev)
+{
+ struct sas_port *port = dev_to_sas_port(dev);
+
+ BUG_ON(!list_empty(&port->phy_list));
+
+ put_device(dev->parent);
+ kfree(port);
+}
+
+static void sas_port_create_link(struct sas_port *port,
+ struct sas_phy *phy)
+{
+ int res;
+
+ res = sysfs_create_link(&port->dev.kobj, &phy->dev.kobj,
+ dev_name(&phy->dev));
+ if (res)
+ goto err;
+ res = sysfs_create_link(&phy->dev.kobj, &port->dev.kobj, "port");
+ if (res)
+ goto err;
+ return;
+err:
+ printk(KERN_ERR "%s: Cannot create port links, err=%d\n",
+ __func__, res);
+}
+
+static void sas_port_delete_link(struct sas_port *port,
+ struct sas_phy *phy)
+{
+ sysfs_remove_link(&port->dev.kobj, dev_name(&phy->dev));
+ sysfs_remove_link(&phy->dev.kobj, "port");
+}
+
+/** sas_port_alloc - allocate and initialize a SAS port structure
+ *
+ * @parent: parent device
+ * @port_id: port number
+ *
+ * Allocates a SAS port structure. It will be added to the device tree
+ * below the device specified by @parent which must be either a Scsi_Host
+ * or a sas_expander_device.
+ *
+ * Returns %NULL on error
+ */
+struct sas_port *sas_port_alloc(struct device *parent, int port_id)
+{
+ struct Scsi_Host *shost = dev_to_shost(parent);
+ struct sas_port *port;
+
+ port = kzalloc(sizeof(*port), GFP_KERNEL);
+ if (!port)
+ return NULL;
+
+ port->port_identifier = port_id;
+
+ device_initialize(&port->dev);
+
+ port->dev.parent = get_device(parent);
+ port->dev.release = sas_port_release;
+
+ mutex_init(&port->phy_list_mutex);
+ INIT_LIST_HEAD(&port->phy_list);
+
+ if (scsi_is_sas_expander_device(parent)) {
+ struct sas_rphy *rphy = dev_to_rphy(parent);
+ dev_set_name(&port->dev, "port-%d:%d:%d", shost->host_no,
+ rphy->scsi_target_id, port->port_identifier);
+ } else
+ dev_set_name(&port->dev, "port-%d:%d", shost->host_no,
+ port->port_identifier);
+
+ transport_setup_device(&port->dev);
+
+ return port;
+}
+EXPORT_SYMBOL(sas_port_alloc);
+
+/** sas_port_alloc_num - allocate and initialize a SAS port structure
+ *
+ * @parent: parent device
+ *
+ * Allocates a SAS port structure and a number to go with it. This
+ * interface is really for adapters where the port number has no
+ * meansing, so the sas class should manage them. It will be added to
+ * the device tree below the device specified by @parent which must be
+ * either a Scsi_Host or a sas_expander_device.
+ *
+ * Returns %NULL on error
+ */
+struct sas_port *sas_port_alloc_num(struct device *parent)
+{
+ int index;
+ struct Scsi_Host *shost = dev_to_shost(parent);
+ struct sas_host_attrs *sas_host = to_sas_host_attrs(shost);
+
+ /* FIXME: use idr for this eventually */
+ mutex_lock(&sas_host->lock);
+ if (scsi_is_sas_expander_device(parent)) {
+ struct sas_rphy *rphy = dev_to_rphy(parent);
+ struct sas_expander_device *exp = rphy_to_expander_device(rphy);
+
+ index = exp->next_port_id++;
+ } else
+ index = sas_host->next_port_id++;
+ mutex_unlock(&sas_host->lock);
+ return sas_port_alloc(parent, index);
+}
+EXPORT_SYMBOL(sas_port_alloc_num);
+
+/**
+ * sas_port_add - add a SAS port to the device hierarchy
+ * @port: port to be added
+ *
+ * publishes a port to the rest of the system
+ */
+int sas_port_add(struct sas_port *port)
+{
+ int error;
+
+ /* No phys should be added until this is made visible */
+ BUG_ON(!list_empty(&port->phy_list));
+
+ error = device_add(&port->dev);
+
+ if (error)
+ return error;
+
+ transport_add_device(&port->dev);
+ transport_configure_device(&port->dev);
+
+ return 0;
+}
+EXPORT_SYMBOL(sas_port_add);
+
+/**
+ * sas_port_free - free a SAS PORT
+ * @port: SAS PORT to free
+ *
+ * Frees the specified SAS PORT.
+ *
+ * Note:
+ * This function must only be called on a PORT that has not
+ * successfully been added using sas_port_add().
+ */
+void sas_port_free(struct sas_port *port)
+{
+ transport_destroy_device(&port->dev);
+ put_device(&port->dev);
+}
+EXPORT_SYMBOL(sas_port_free);
+
+/**
+ * sas_port_delete - remove SAS PORT
+ * @port: SAS PORT to remove
+ *
+ * Removes the specified SAS PORT. If the SAS PORT has an
+ * associated phys, unlink them from the port as well.
+ */
+void sas_port_delete(struct sas_port *port)
+{
+ struct device *dev = &port->dev;
+ struct sas_phy *phy, *tmp_phy;
+
+ if (port->rphy) {
+ sas_rphy_delete(port->rphy);
+ port->rphy = NULL;
+ }
+
+ mutex_lock(&port->phy_list_mutex);
+ list_for_each_entry_safe(phy, tmp_phy, &port->phy_list,
+ port_siblings) {
+ sas_port_delete_link(port, phy);
+ list_del_init(&phy->port_siblings);
+ }
+ mutex_unlock(&port->phy_list_mutex);
+
+ if (port->is_backlink) {
+ struct device *parent = port->dev.parent;
+
+ sysfs_remove_link(&port->dev.kobj, dev_name(parent));
+ port->is_backlink = 0;
+ }
+
+ transport_remove_device(dev);
+ device_del(dev);
+ transport_destroy_device(dev);
+ put_device(dev);
+}
+EXPORT_SYMBOL(sas_port_delete);
+
+/**
+ * scsi_is_sas_port - check if a struct device represents a SAS port
+ * @dev: device to check
+ *
+ * Returns:
+ * %1 if the device represents a SAS Port, %0 else
+ */
+int scsi_is_sas_port(const struct device *dev)
+{
+ return dev->release == sas_port_release;
+}
+EXPORT_SYMBOL(scsi_is_sas_port);
+
+/**
+ * sas_port_get_phy - try to take a reference on a port member
+ * @port: port to check
+ */
+struct sas_phy *sas_port_get_phy(struct sas_port *port)
+{
+ struct sas_phy *phy;
+
+ mutex_lock(&port->phy_list_mutex);
+ if (list_empty(&port->phy_list))
+ phy = NULL;
+ else {
+ struct list_head *ent = port->phy_list.next;
+
+ phy = list_entry(ent, typeof(*phy), port_siblings);
+ get_device(&phy->dev);
+ }
+ mutex_unlock(&port->phy_list_mutex);
+
+ return phy;
+}
+EXPORT_SYMBOL(sas_port_get_phy);
+
+/**
+ * sas_port_add_phy - add another phy to a port to form a wide port
+ * @port: port to add the phy to
+ * @phy: phy to add
+ *
+ * When a port is initially created, it is empty (has no phys). All
+ * ports must have at least one phy to operated, and all wide ports
+ * must have at least two. The current code makes no difference
+ * between ports and wide ports, but the only object that can be
+ * connected to a remote device is a port, so ports must be formed on
+ * all devices with phys if they're connected to anything.
+ */
+void sas_port_add_phy(struct sas_port *port, struct sas_phy *phy)
+{
+ mutex_lock(&port->phy_list_mutex);
+ if (unlikely(!list_empty(&phy->port_siblings))) {
+ /* make sure we're already on this port */
+ struct sas_phy *tmp;
+
+ list_for_each_entry(tmp, &port->phy_list, port_siblings)
+ if (tmp == phy)
+ break;
+ /* If this trips, you added a phy that was already
+ * part of a different port */
+ if (unlikely(tmp != phy)) {
+ dev_printk(KERN_ERR, &port->dev, "trying to add phy %s fails: it's already part of another port\n",
+ dev_name(&phy->dev));
+ BUG();
+ }
+ } else {
+ sas_port_create_link(port, phy);
+ list_add_tail(&phy->port_siblings, &port->phy_list);
+ port->num_phys++;
+ }
+ mutex_unlock(&port->phy_list_mutex);
+}
+EXPORT_SYMBOL(sas_port_add_phy);
+
+/**
+ * sas_port_delete_phy - remove a phy from a port or wide port
+ * @port: port to remove the phy from
+ * @phy: phy to remove
+ *
+ * This operation is used for tearing down ports again. It must be
+ * done to every port or wide port before calling sas_port_delete.
+ */
+void sas_port_delete_phy(struct sas_port *port, struct sas_phy *phy)
+{
+ mutex_lock(&port->phy_list_mutex);
+ sas_port_delete_link(port, phy);
+ list_del_init(&phy->port_siblings);
+ port->num_phys--;
+ mutex_unlock(&port->phy_list_mutex);
+}
+EXPORT_SYMBOL(sas_port_delete_phy);
+
+void sas_port_mark_backlink(struct sas_port *port)
+{
+ int res;
+ struct device *parent = port->dev.parent->parent->parent;
+
+ if (port->is_backlink)
+ return;
+ port->is_backlink = 1;
+ res = sysfs_create_link(&port->dev.kobj, &parent->kobj,
+ dev_name(parent));
+ if (res)
+ goto err;
+ return;
+err:
+ printk(KERN_ERR "%s: Cannot create port backlink, err=%d\n",
+ __func__, res);
+
+}
+EXPORT_SYMBOL(sas_port_mark_backlink);
+
+/*
+ * SAS remote PHY attributes.
+ */
+
+#define sas_rphy_show_simple(field, name, format_string, cast) \
+static ssize_t \
+show_sas_rphy_##name(struct device *dev, \
+ struct device_attribute *attr, char *buf) \
+{ \
+ struct sas_rphy *rphy = transport_class_to_rphy(dev); \
+ \
+ return snprintf(buf, 20, format_string, cast rphy->field); \
+}
+
+#define sas_rphy_simple_attr(field, name, format_string, type) \
+ sas_rphy_show_simple(field, name, format_string, (type)) \
+static SAS_DEVICE_ATTR(rphy, name, S_IRUGO, \
+ show_sas_rphy_##name, NULL)
+
+#define sas_rphy_show_protocol(field, name) \
+static ssize_t \
+show_sas_rphy_##name(struct device *dev, \
+ struct device_attribute *attr, char *buf) \
+{ \
+ struct sas_rphy *rphy = transport_class_to_rphy(dev); \
+ \
+ if (!rphy->field) \
+ return snprintf(buf, 20, "none\n"); \
+ return get_sas_protocol_names(rphy->field, buf); \
+}
+
+#define sas_rphy_protocol_attr(field, name) \
+ sas_rphy_show_protocol(field, name) \
+static SAS_DEVICE_ATTR(rphy, name, S_IRUGO, \
+ show_sas_rphy_##name, NULL)
+
+static ssize_t
+show_sas_rphy_device_type(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct sas_rphy *rphy = transport_class_to_rphy(dev);
+
+ if (!rphy->identify.device_type)
+ return snprintf(buf, 20, "none\n");
+ return get_sas_device_type_names(
+ rphy->identify.device_type, buf);
+}
+
+static SAS_DEVICE_ATTR(rphy, device_type, S_IRUGO,
+ show_sas_rphy_device_type, NULL);
+
+static ssize_t
+show_sas_rphy_enclosure_identifier(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct sas_rphy *rphy = transport_class_to_rphy(dev);
+ struct sas_phy *phy = dev_to_phy(rphy->dev.parent);
+ struct Scsi_Host *shost = dev_to_shost(phy->dev.parent);
+ struct sas_internal *i = to_sas_internal(shost->transportt);
+ u64 identifier;
+ int error;
+
+ /*
+ * Only devices behind an expander are supported, because the
+ * enclosure identifier is a SMP feature.
+ */
+ if (scsi_is_sas_phy_local(phy))
+ return -EINVAL;
+
+ error = i->f->get_enclosure_identifier(rphy, &identifier);
+ if (error)
+ return error;
+ return sprintf(buf, "0x%llx\n", (unsigned long long)identifier);
+}
+
+static SAS_DEVICE_ATTR(rphy, enclosure_identifier, S_IRUGO,
+ show_sas_rphy_enclosure_identifier, NULL);
+
+static ssize_t
+show_sas_rphy_bay_identifier(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct sas_rphy *rphy = transport_class_to_rphy(dev);
+ struct sas_phy *phy = dev_to_phy(rphy->dev.parent);
+ struct Scsi_Host *shost = dev_to_shost(phy->dev.parent);
+ struct sas_internal *i = to_sas_internal(shost->transportt);
+ int val;
+
+ if (scsi_is_sas_phy_local(phy))
+ return -EINVAL;
+
+ val = i->f->get_bay_identifier(rphy);
+ if (val < 0)
+ return val;
+ return sprintf(buf, "%d\n", val);
+}
+
+static SAS_DEVICE_ATTR(rphy, bay_identifier, S_IRUGO,
+ show_sas_rphy_bay_identifier, NULL);
+
+sas_rphy_protocol_attr(identify.initiator_port_protocols,
+ initiator_port_protocols);
+sas_rphy_protocol_attr(identify.target_port_protocols, target_port_protocols);
+sas_rphy_simple_attr(identify.sas_address, sas_address, "0x%016llx\n",
+ unsigned long long);
+sas_rphy_simple_attr(identify.phy_identifier, phy_identifier, "%d\n", u8);
+
+/* only need 8 bytes of data plus header (4 or 8) */
+#define BUF_SIZE 64
+
+int sas_read_port_mode_page(struct scsi_device *sdev)
+{
+ char *buffer = kzalloc(BUF_SIZE, GFP_KERNEL), *msdata;
+ struct sas_end_device *rdev = sas_sdev_to_rdev(sdev);
+ struct scsi_mode_data mode_data;
+ int res, error;
+
+ if (!buffer)
+ return -ENOMEM;
+
+ res = scsi_mode_sense(sdev, 1, 0x19, buffer, BUF_SIZE, 30*HZ, 3,
+ &mode_data, NULL);
+
+ error = -EINVAL;
+ if (!scsi_status_is_good(res))
+ goto out;
+
+ msdata = buffer + mode_data.header_length +
+ mode_data.block_descriptor_length;
+
+ if (msdata - buffer > BUF_SIZE - 8)
+ goto out;
+
+ error = 0;
+
+ rdev->ready_led_meaning = msdata[2] & 0x10 ? 1 : 0;
+ rdev->I_T_nexus_loss_timeout = (msdata[4] << 8) + msdata[5];
+ rdev->initiator_response_timeout = (msdata[6] << 8) + msdata[7];
+
+ out:
+ kfree(buffer);
+ return error;
+}
+EXPORT_SYMBOL(sas_read_port_mode_page);
+
+static DECLARE_TRANSPORT_CLASS(sas_end_dev_class,
+ "sas_end_device", NULL, NULL, NULL);
+
+#define sas_end_dev_show_simple(field, name, format_string, cast) \
+static ssize_t \
+show_sas_end_dev_##name(struct device *dev, \
+ struct device_attribute *attr, char *buf) \
+{ \
+ struct sas_rphy *rphy = transport_class_to_rphy(dev); \
+ struct sas_end_device *rdev = rphy_to_end_device(rphy); \
+ \
+ return snprintf(buf, 20, format_string, cast rdev->field); \
+}
+
+#define sas_end_dev_simple_attr(field, name, format_string, type) \
+ sas_end_dev_show_simple(field, name, format_string, (type)) \
+static SAS_DEVICE_ATTR(end_dev, name, S_IRUGO, \
+ show_sas_end_dev_##name, NULL)
+
+sas_end_dev_simple_attr(ready_led_meaning, ready_led_meaning, "%d\n", int);
+sas_end_dev_simple_attr(I_T_nexus_loss_timeout, I_T_nexus_loss_timeout,
+ "%d\n", int);
+sas_end_dev_simple_attr(initiator_response_timeout, initiator_response_timeout,
+ "%d\n", int);
+sas_end_dev_simple_attr(tlr_supported, tlr_supported,
+ "%d\n", int);
+sas_end_dev_simple_attr(tlr_enabled, tlr_enabled,
+ "%d\n", int);
+
+static DECLARE_TRANSPORT_CLASS(sas_expander_class,
+ "sas_expander", NULL, NULL, NULL);
+
+#define sas_expander_show_simple(field, name, format_string, cast) \
+static ssize_t \
+show_sas_expander_##name(struct device *dev, \
+ struct device_attribute *attr, char *buf) \
+{ \
+ struct sas_rphy *rphy = transport_class_to_rphy(dev); \
+ struct sas_expander_device *edev = rphy_to_expander_device(rphy); \
+ \
+ return snprintf(buf, 20, format_string, cast edev->field); \
+}
+
+#define sas_expander_simple_attr(field, name, format_string, type) \
+ sas_expander_show_simple(field, name, format_string, (type)) \
+static SAS_DEVICE_ATTR(expander, name, S_IRUGO, \
+ show_sas_expander_##name, NULL)
+
+sas_expander_simple_attr(vendor_id, vendor_id, "%s\n", char *);
+sas_expander_simple_attr(product_id, product_id, "%s\n", char *);
+sas_expander_simple_attr(product_rev, product_rev, "%s\n", char *);
+sas_expander_simple_attr(component_vendor_id, component_vendor_id,
+ "%s\n", char *);
+sas_expander_simple_attr(component_id, component_id, "%u\n", unsigned int);
+sas_expander_simple_attr(component_revision_id, component_revision_id, "%u\n",
+ unsigned int);
+sas_expander_simple_attr(level, level, "%d\n", int);
+
+static DECLARE_TRANSPORT_CLASS(sas_rphy_class,
+ "sas_device", NULL, NULL, NULL);
+
+static int sas_rphy_match(struct attribute_container *cont, struct device *dev)
+{
+ struct Scsi_Host *shost;
+ struct sas_internal *i;
+
+ if (!scsi_is_sas_rphy(dev))
+ return 0;
+ shost = dev_to_shost(dev->parent->parent);
+
+ if (!shost->transportt)
+ return 0;
+ if (shost->transportt->host_attrs.ac.class !=
+ &sas_host_class.class)
+ return 0;
+
+ i = to_sas_internal(shost->transportt);
+ return &i->rphy_attr_cont.ac == cont;
+}
+
+static int sas_end_dev_match(struct attribute_container *cont,
+ struct device *dev)
+{
+ struct Scsi_Host *shost;
+ struct sas_internal *i;
+ struct sas_rphy *rphy;
+
+ if (!scsi_is_sas_rphy(dev))
+ return 0;
+ shost = dev_to_shost(dev->parent->parent);
+ rphy = dev_to_rphy(dev);
+
+ if (!shost->transportt)
+ return 0;
+ if (shost->transportt->host_attrs.ac.class !=
+ &sas_host_class.class)
+ return 0;
+
+ i = to_sas_internal(shost->transportt);
+ return &i->end_dev_attr_cont.ac == cont &&
+ rphy->identify.device_type == SAS_END_DEVICE;
+}
+
+static int sas_expander_match(struct attribute_container *cont,
+ struct device *dev)
+{
+ struct Scsi_Host *shost;
+ struct sas_internal *i;
+ struct sas_rphy *rphy;
+
+ if (!scsi_is_sas_rphy(dev))
+ return 0;
+ shost = dev_to_shost(dev->parent->parent);
+ rphy = dev_to_rphy(dev);
+
+ if (!shost->transportt)
+ return 0;
+ if (shost->transportt->host_attrs.ac.class !=
+ &sas_host_class.class)
+ return 0;
+
+ i = to_sas_internal(shost->transportt);
+ return &i->expander_attr_cont.ac == cont &&
+ (rphy->identify.device_type == SAS_EDGE_EXPANDER_DEVICE ||
+ rphy->identify.device_type == SAS_FANOUT_EXPANDER_DEVICE);
+}
+
+static void sas_expander_release(struct device *dev)
+{
+ struct sas_rphy *rphy = dev_to_rphy(dev);
+ struct sas_expander_device *edev = rphy_to_expander_device(rphy);
+
+ if (rphy->q)
+ blk_cleanup_queue(rphy->q);
+
+ put_device(dev->parent);
+ kfree(edev);
+}
+
+static void sas_end_device_release(struct device *dev)
+{
+ struct sas_rphy *rphy = dev_to_rphy(dev);
+ struct sas_end_device *edev = rphy_to_end_device(rphy);
+
+ if (rphy->q)
+ blk_cleanup_queue(rphy->q);
+
+ put_device(dev->parent);
+ kfree(edev);
+}
+
+/**
+ * sas_rphy_initialize - common rphy intialization
+ * @rphy: rphy to initialise
+ *
+ * Used by both sas_end_device_alloc() and sas_expander_alloc() to
+ * initialise the common rphy component of each.
+ */
+static void sas_rphy_initialize(struct sas_rphy *rphy)
+{
+ INIT_LIST_HEAD(&rphy->list);
+}
+
+/**
+ * sas_end_device_alloc - allocate an rphy for an end device
+ * @parent: which port
+ *
+ * Allocates an SAS remote PHY structure, connected to @parent.
+ *
+ * Returns:
+ * SAS PHY allocated or %NULL if the allocation failed.
+ */
+struct sas_rphy *sas_end_device_alloc(struct sas_port *parent)
+{
+ struct Scsi_Host *shost = dev_to_shost(&parent->dev);
+ struct sas_end_device *rdev;
+
+ rdev = kzalloc(sizeof(*rdev), GFP_KERNEL);
+ if (!rdev) {
+ return NULL;
+ }
+
+ device_initialize(&rdev->rphy.dev);
+ rdev->rphy.dev.parent = get_device(&parent->dev);
+ rdev->rphy.dev.release = sas_end_device_release;
+ if (scsi_is_sas_expander_device(parent->dev.parent)) {
+ struct sas_rphy *rphy = dev_to_rphy(parent->dev.parent);
+ dev_set_name(&rdev->rphy.dev, "end_device-%d:%d:%d",
+ shost->host_no, rphy->scsi_target_id,
+ parent->port_identifier);
+ } else
+ dev_set_name(&rdev->rphy.dev, "end_device-%d:%d",
+ shost->host_no, parent->port_identifier);
+ rdev->rphy.identify.device_type = SAS_END_DEVICE;
+ sas_rphy_initialize(&rdev->rphy);
+ transport_setup_device(&rdev->rphy.dev);
+
+ return &rdev->rphy;
+}
+EXPORT_SYMBOL(sas_end_device_alloc);
+
+/**
+ * sas_expander_alloc - allocate an rphy for an end device
+ * @parent: which port
+ * @type: SAS_EDGE_EXPANDER_DEVICE or SAS_FANOUT_EXPANDER_DEVICE
+ *
+ * Allocates an SAS remote PHY structure, connected to @parent.
+ *
+ * Returns:
+ * SAS PHY allocated or %NULL if the allocation failed.
+ */
+struct sas_rphy *sas_expander_alloc(struct sas_port *parent,
+ enum sas_device_type type)
+{
+ struct Scsi_Host *shost = dev_to_shost(&parent->dev);
+ struct sas_expander_device *rdev;
+ struct sas_host_attrs *sas_host = to_sas_host_attrs(shost);
+
+ BUG_ON(type != SAS_EDGE_EXPANDER_DEVICE &&
+ type != SAS_FANOUT_EXPANDER_DEVICE);
+
+ rdev = kzalloc(sizeof(*rdev), GFP_KERNEL);
+ if (!rdev) {
+ return NULL;
+ }
+
+ device_initialize(&rdev->rphy.dev);
+ rdev->rphy.dev.parent = get_device(&parent->dev);
+ rdev->rphy.dev.release = sas_expander_release;
+ mutex_lock(&sas_host->lock);
+ rdev->rphy.scsi_target_id = sas_host->next_expander_id++;
+ mutex_unlock(&sas_host->lock);
+ dev_set_name(&rdev->rphy.dev, "expander-%d:%d",
+ shost->host_no, rdev->rphy.scsi_target_id);
+ rdev->rphy.identify.device_type = type;
+ sas_rphy_initialize(&rdev->rphy);
+ transport_setup_device(&rdev->rphy.dev);
+
+ return &rdev->rphy;
+}
+EXPORT_SYMBOL(sas_expander_alloc);
+
+/**
+ * sas_rphy_add - add a SAS remote PHY to the device hierarchy
+ * @rphy: The remote PHY to be added
+ *
+ * Publishes a SAS remote PHY to the rest of the system.
+ */
+int sas_rphy_add(struct sas_rphy *rphy)
+{
+ struct sas_port *parent = dev_to_sas_port(rphy->dev.parent);
+ struct Scsi_Host *shost = dev_to_shost(parent->dev.parent);
+ struct sas_host_attrs *sas_host = to_sas_host_attrs(shost);
+ struct sas_identify *identify = &rphy->identify;
+ int error;
+
+ if (parent->rphy)
+ return -ENXIO;
+ parent->rphy = rphy;
+
+ error = device_add(&rphy->dev);
+ if (error)
+ return error;
+ transport_add_device(&rphy->dev);
+ transport_configure_device(&rphy->dev);
+ if (sas_bsg_initialize(shost, rphy))
+ printk("fail to a bsg device %s\n", dev_name(&rphy->dev));
+
+
+ mutex_lock(&sas_host->lock);
+ list_add_tail(&rphy->list, &sas_host->rphy_list);
+ if (identify->device_type == SAS_END_DEVICE &&
+ (identify->target_port_protocols &
+ (SAS_PROTOCOL_SSP|SAS_PROTOCOL_STP|SAS_PROTOCOL_SATA)))
+ rphy->scsi_target_id = sas_host->next_target_id++;
+ else if (identify->device_type == SAS_END_DEVICE)
+ rphy->scsi_target_id = -1;
+ mutex_unlock(&sas_host->lock);
+
+ if (identify->device_type == SAS_END_DEVICE &&
+ rphy->scsi_target_id != -1) {
+ int lun;
+
+ if (identify->target_port_protocols & SAS_PROTOCOL_SSP)
+ lun = SCAN_WILD_CARD;
+ else
+ lun = 0;
+
+ scsi_scan_target(&rphy->dev, 0, rphy->scsi_target_id, lun, 0);
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL(sas_rphy_add);
+
+/**
+ * sas_rphy_free - free a SAS remote PHY
+ * @rphy: SAS remote PHY to free
+ *
+ * Frees the specified SAS remote PHY.
+ *
+ * Note:
+ * This function must only be called on a remote
+ * PHY that has not successfully been added using
+ * sas_rphy_add() (or has been sas_rphy_remove()'d)
+ */
+void sas_rphy_free(struct sas_rphy *rphy)
+{
+ struct device *dev = &rphy->dev;
+ struct Scsi_Host *shost = dev_to_shost(rphy->dev.parent->parent);
+ struct sas_host_attrs *sas_host = to_sas_host_attrs(shost);
+
+ mutex_lock(&sas_host->lock);
+ list_del(&rphy->list);
+ mutex_unlock(&sas_host->lock);
+
+ transport_destroy_device(dev);
+
+ put_device(dev);
+}
+EXPORT_SYMBOL(sas_rphy_free);
+
+/**
+ * sas_rphy_delete - remove and free SAS remote PHY
+ * @rphy: SAS remote PHY to remove and free
+ *
+ * Removes the specified SAS remote PHY and frees it.
+ */
+void
+sas_rphy_delete(struct sas_rphy *rphy)
+{
+ sas_rphy_remove(rphy);
+ sas_rphy_free(rphy);
+}
+EXPORT_SYMBOL(sas_rphy_delete);
+
+/**
+ * sas_rphy_unlink - unlink SAS remote PHY
+ * @rphy: SAS remote phy to unlink from its parent port
+ *
+ * Removes port reference to an rphy
+ */
+void sas_rphy_unlink(struct sas_rphy *rphy)
+{
+ struct sas_port *parent = dev_to_sas_port(rphy->dev.parent);
+
+ parent->rphy = NULL;
+}
+EXPORT_SYMBOL(sas_rphy_unlink);
+
+/**
+ * sas_rphy_remove - remove SAS remote PHY
+ * @rphy: SAS remote phy to remove
+ *
+ * Removes the specified SAS remote PHY.
+ */
+void
+sas_rphy_remove(struct sas_rphy *rphy)
+{
+ struct device *dev = &rphy->dev;
+
+ switch (rphy->identify.device_type) {
+ case SAS_END_DEVICE:
+ scsi_remove_target(dev);
+ break;
+ case SAS_EDGE_EXPANDER_DEVICE:
+ case SAS_FANOUT_EXPANDER_DEVICE:
+ sas_remove_children(dev);
+ break;
+ default:
+ break;
+ }
+
+ sas_rphy_unlink(rphy);
+ sas_bsg_remove(NULL, rphy);
+ transport_remove_device(dev);
+ device_del(dev);
+}
+EXPORT_SYMBOL(sas_rphy_remove);
+
+/**
+ * scsi_is_sas_rphy - check if a struct device represents a SAS remote PHY
+ * @dev: device to check
+ *
+ * Returns:
+ * %1 if the device represents a SAS remote PHY, %0 else
+ */
+int scsi_is_sas_rphy(const struct device *dev)
+{
+ return dev->release == sas_end_device_release ||
+ dev->release == sas_expander_release;
+}
+EXPORT_SYMBOL(scsi_is_sas_rphy);
+
+
+/*
+ * SCSI scan helper
+ */
+
+static int sas_user_scan(struct Scsi_Host *shost, uint channel,
+ uint id, u64 lun)
+{
+ struct sas_host_attrs *sas_host = to_sas_host_attrs(shost);
+ struct sas_rphy *rphy;
+
+ mutex_lock(&sas_host->lock);
+ list_for_each_entry(rphy, &sas_host->rphy_list, list) {
+ if (rphy->identify.device_type != SAS_END_DEVICE ||
+ rphy->scsi_target_id == -1)
+ continue;
+
+ if ((channel == SCAN_WILD_CARD || channel == 0) &&
+ (id == SCAN_WILD_CARD || id == rphy->scsi_target_id)) {
+ scsi_scan_target(&rphy->dev, 0,
+ rphy->scsi_target_id, lun, 1);
+ }
+ }
+ mutex_unlock(&sas_host->lock);
+
+ return 0;
+}
+
+
+/*
+ * Setup / Teardown code
+ */
+
+#define SETUP_TEMPLATE(attrb, field, perm, test) \
+ i->private_##attrb[count] = dev_attr_##field; \
+ i->private_##attrb[count].attr.mode = perm; \
+ i->attrb[count] = &i->private_##attrb[count]; \
+ if (test) \
+ count++
+
+#define SETUP_TEMPLATE_RW(attrb, field, perm, test, ro_test, ro_perm) \
+ i->private_##attrb[count] = dev_attr_##field; \
+ i->private_##attrb[count].attr.mode = perm; \
+ if (ro_test) { \
+ i->private_##attrb[count].attr.mode = ro_perm; \
+ i->private_##attrb[count].store = NULL; \
+ } \
+ i->attrb[count] = &i->private_##attrb[count]; \
+ if (test) \
+ count++
+
+#define SETUP_RPORT_ATTRIBUTE(field) \
+ SETUP_TEMPLATE(rphy_attrs, field, S_IRUGO, 1)
+
+#define SETUP_OPTIONAL_RPORT_ATTRIBUTE(field, func) \
+ SETUP_TEMPLATE(rphy_attrs, field, S_IRUGO, i->f->func)
+
+#define SETUP_PHY_ATTRIBUTE(field) \
+ SETUP_TEMPLATE(phy_attrs, field, S_IRUGO, 1)
+
+#define SETUP_PHY_ATTRIBUTE_RW(field) \
+ SETUP_TEMPLATE_RW(phy_attrs, field, S_IRUGO | S_IWUSR, 1, \
+ !i->f->set_phy_speed, S_IRUGO)
+
+#define SETUP_OPTIONAL_PHY_ATTRIBUTE_RW(field, func) \
+ SETUP_TEMPLATE_RW(phy_attrs, field, S_IRUGO | S_IWUSR, 1, \
+ !i->f->func, S_IRUGO)
+
+#define SETUP_PORT_ATTRIBUTE(field) \
+ SETUP_TEMPLATE(port_attrs, field, S_IRUGO, 1)
+
+#define SETUP_OPTIONAL_PHY_ATTRIBUTE(field, func) \
+ SETUP_TEMPLATE(phy_attrs, field, S_IRUGO, i->f->func)
+
+#define SETUP_PHY_ATTRIBUTE_WRONLY(field) \
+ SETUP_TEMPLATE(phy_attrs, field, S_IWUSR, 1)
+
+#define SETUP_OPTIONAL_PHY_ATTRIBUTE_WRONLY(field, func) \
+ SETUP_TEMPLATE(phy_attrs, field, S_IWUSR, i->f->func)
+
+#define SETUP_END_DEV_ATTRIBUTE(field) \
+ SETUP_TEMPLATE(end_dev_attrs, field, S_IRUGO, 1)
+
+#define SETUP_EXPANDER_ATTRIBUTE(field) \
+ SETUP_TEMPLATE(expander_attrs, expander_##field, S_IRUGO, 1)
+
+/**
+ * sas_attach_transport - instantiate SAS transport template
+ * @ft: SAS transport class function template
+ */
+struct scsi_transport_template *
+sas_attach_transport(struct sas_function_template *ft)
+{
+ struct sas_internal *i;
+ int count;
+
+ i = kzalloc(sizeof(struct sas_internal), GFP_KERNEL);
+ if (!i)
+ return NULL;
+
+ i->t.user_scan = sas_user_scan;
+
+ i->t.host_attrs.ac.attrs = &i->host_attrs[0];
+ i->t.host_attrs.ac.class = &sas_host_class.class;
+ i->t.host_attrs.ac.match = sas_host_match;
+ transport_container_register(&i->t.host_attrs);
+ i->t.host_size = sizeof(struct sas_host_attrs);
+
+ i->phy_attr_cont.ac.class = &sas_phy_class.class;
+ i->phy_attr_cont.ac.attrs = &i->phy_attrs[0];
+ i->phy_attr_cont.ac.match = sas_phy_match;
+ transport_container_register(&i->phy_attr_cont);
+
+ i->port_attr_cont.ac.class = &sas_port_class.class;
+ i->port_attr_cont.ac.attrs = &i->port_attrs[0];
+ i->port_attr_cont.ac.match = sas_port_match;
+ transport_container_register(&i->port_attr_cont);
+
+ i->rphy_attr_cont.ac.class = &sas_rphy_class.class;
+ i->rphy_attr_cont.ac.attrs = &i->rphy_attrs[0];
+ i->rphy_attr_cont.ac.match = sas_rphy_match;
+ transport_container_register(&i->rphy_attr_cont);
+
+ i->end_dev_attr_cont.ac.class = &sas_end_dev_class.class;
+ i->end_dev_attr_cont.ac.attrs = &i->end_dev_attrs[0];
+ i->end_dev_attr_cont.ac.match = sas_end_dev_match;
+ transport_container_register(&i->end_dev_attr_cont);
+
+ i->expander_attr_cont.ac.class = &sas_expander_class.class;
+ i->expander_attr_cont.ac.attrs = &i->expander_attrs[0];
+ i->expander_attr_cont.ac.match = sas_expander_match;
+ transport_container_register(&i->expander_attr_cont);
+
+ i->f = ft;
+
+ count = 0;
+ SETUP_PHY_ATTRIBUTE(initiator_port_protocols);
+ SETUP_PHY_ATTRIBUTE(target_port_protocols);
+ SETUP_PHY_ATTRIBUTE(device_type);
+ SETUP_PHY_ATTRIBUTE(sas_address);
+ SETUP_PHY_ATTRIBUTE(phy_identifier);
+ //SETUP_PHY_ATTRIBUTE(port_identifier);
+ SETUP_PHY_ATTRIBUTE(negotiated_linkrate);
+ SETUP_PHY_ATTRIBUTE(minimum_linkrate_hw);
+ SETUP_PHY_ATTRIBUTE_RW(minimum_linkrate);
+ SETUP_PHY_ATTRIBUTE(maximum_linkrate_hw);
+ SETUP_PHY_ATTRIBUTE_RW(maximum_linkrate);
+
+ SETUP_PHY_ATTRIBUTE(invalid_dword_count);
+ SETUP_PHY_ATTRIBUTE(running_disparity_error_count);
+ SETUP_PHY_ATTRIBUTE(loss_of_dword_sync_count);
+ SETUP_PHY_ATTRIBUTE(phy_reset_problem_count);
+ SETUP_OPTIONAL_PHY_ATTRIBUTE_WRONLY(link_reset, phy_reset);
+ SETUP_OPTIONAL_PHY_ATTRIBUTE_WRONLY(hard_reset, phy_reset);
+ SETUP_OPTIONAL_PHY_ATTRIBUTE_RW(enable, phy_enable);
+ i->phy_attrs[count] = NULL;
+
+ count = 0;
+ SETUP_PORT_ATTRIBUTE(num_phys);
+ i->port_attrs[count] = NULL;
+
+ count = 0;
+ SETUP_RPORT_ATTRIBUTE(rphy_initiator_port_protocols);
+ SETUP_RPORT_ATTRIBUTE(rphy_target_port_protocols);
+ SETUP_RPORT_ATTRIBUTE(rphy_device_type);
+ SETUP_RPORT_ATTRIBUTE(rphy_sas_address);
+ SETUP_RPORT_ATTRIBUTE(rphy_phy_identifier);
+ SETUP_OPTIONAL_RPORT_ATTRIBUTE(rphy_enclosure_identifier,
+ get_enclosure_identifier);
+ SETUP_OPTIONAL_RPORT_ATTRIBUTE(rphy_bay_identifier,
+ get_bay_identifier);
+ i->rphy_attrs[count] = NULL;
+
+ count = 0;
+ SETUP_END_DEV_ATTRIBUTE(end_dev_ready_led_meaning);
+ SETUP_END_DEV_ATTRIBUTE(end_dev_I_T_nexus_loss_timeout);
+ SETUP_END_DEV_ATTRIBUTE(end_dev_initiator_response_timeout);
+ SETUP_END_DEV_ATTRIBUTE(end_dev_tlr_supported);
+ SETUP_END_DEV_ATTRIBUTE(end_dev_tlr_enabled);
+ i->end_dev_attrs[count] = NULL;
+
+ count = 0;
+ SETUP_EXPANDER_ATTRIBUTE(vendor_id);
+ SETUP_EXPANDER_ATTRIBUTE(product_id);
+ SETUP_EXPANDER_ATTRIBUTE(product_rev);
+ SETUP_EXPANDER_ATTRIBUTE(component_vendor_id);
+ SETUP_EXPANDER_ATTRIBUTE(component_id);
+ SETUP_EXPANDER_ATTRIBUTE(component_revision_id);
+ SETUP_EXPANDER_ATTRIBUTE(level);
+ i->expander_attrs[count] = NULL;
+
+ return &i->t;
+}
+EXPORT_SYMBOL(sas_attach_transport);
+
+/**
+ * sas_release_transport - release SAS transport template instance
+ * @t: transport template instance
+ */
+void sas_release_transport(struct scsi_transport_template *t)
+{
+ struct sas_internal *i = to_sas_internal(t);
+
+ transport_container_unregister(&i->t.host_attrs);
+ transport_container_unregister(&i->phy_attr_cont);
+ transport_container_unregister(&i->port_attr_cont);
+ transport_container_unregister(&i->rphy_attr_cont);
+ transport_container_unregister(&i->end_dev_attr_cont);
+ transport_container_unregister(&i->expander_attr_cont);
+
+ kfree(i);
+}
+EXPORT_SYMBOL(sas_release_transport);
+
+static __init int sas_transport_init(void)
+{
+ int error;
+
+ error = transport_class_register(&sas_host_class);
+ if (error)
+ goto out;
+ error = transport_class_register(&sas_phy_class);
+ if (error)
+ goto out_unregister_transport;
+ error = transport_class_register(&sas_port_class);
+ if (error)
+ goto out_unregister_phy;
+ error = transport_class_register(&sas_rphy_class);
+ if (error)
+ goto out_unregister_port;
+ error = transport_class_register(&sas_end_dev_class);
+ if (error)
+ goto out_unregister_rphy;
+ error = transport_class_register(&sas_expander_class);
+ if (error)
+ goto out_unregister_end_dev;
+
+ return 0;
+
+ out_unregister_end_dev:
+ transport_class_unregister(&sas_end_dev_class);
+ out_unregister_rphy:
+ transport_class_unregister(&sas_rphy_class);
+ out_unregister_port:
+ transport_class_unregister(&sas_port_class);
+ out_unregister_phy:
+ transport_class_unregister(&sas_phy_class);
+ out_unregister_transport:
+ transport_class_unregister(&sas_host_class);
+ out:
+ return error;
+
+}
+
+static void __exit sas_transport_exit(void)
+{
+ transport_class_unregister(&sas_host_class);
+ transport_class_unregister(&sas_phy_class);
+ transport_class_unregister(&sas_port_class);
+ transport_class_unregister(&sas_rphy_class);
+ transport_class_unregister(&sas_end_dev_class);
+ transport_class_unregister(&sas_expander_class);
+}
+
+MODULE_AUTHOR("Christoph Hellwig");
+MODULE_DESCRIPTION("SAS Transport Attributes");
+MODULE_LICENSE("GPL");
+
+module_init(sas_transport_init);
+module_exit(sas_transport_exit);
diff --git a/drivers/scsi/scsi_transport_spi.c b/drivers/scsi/scsi_transport_spi.c
new file mode 100644
index 000000000..31bbb0da3
--- /dev/null
+++ b/drivers/scsi/scsi_transport_spi.c
@@ -0,0 +1,1632 @@
+/*
+ * Parallel SCSI (SPI) transport specific attributes exported to sysfs.
+ *
+ * Copyright (c) 2003 Silicon Graphics, Inc. All rights reserved.
+ * Copyright (c) 2004, 2005 James Bottomley <James.Bottomley@SteelEye.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+#include <linux/ctype.h>
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/workqueue.h>
+#include <linux/blkdev.h>
+#include <linux/mutex.h>
+#include <linux/sysfs.h>
+#include <linux/slab.h>
+#include <scsi/scsi.h>
+#include "scsi_priv.h"
+#include <scsi/scsi_device.h>
+#include <scsi/scsi_host.h>
+#include <scsi/scsi_cmnd.h>
+#include <scsi/scsi_eh.h>
+#include <scsi/scsi_tcq.h>
+#include <scsi/scsi_transport.h>
+#include <scsi/scsi_transport_spi.h>
+
+#define SPI_NUM_ATTRS 14 /* increase this if you add attributes */
+#define SPI_OTHER_ATTRS 1 /* Increase this if you add "always
+ * on" attributes */
+#define SPI_HOST_ATTRS 1
+
+#define SPI_MAX_ECHO_BUFFER_SIZE 4096
+
+#define DV_LOOPS 3
+#define DV_TIMEOUT (10*HZ)
+#define DV_RETRIES 3 /* should only need at most
+ * two cc/ua clears */
+
+/* Our blacklist flags */
+enum {
+ SPI_BLIST_NOIUS = 0x1,
+};
+
+/* blacklist table, modelled on scsi_devinfo.c */
+static struct {
+ char *vendor;
+ char *model;
+ unsigned flags;
+} spi_static_device_list[] __initdata = {
+ {"HP", "Ultrium 3-SCSI", SPI_BLIST_NOIUS },
+ {"IBM", "ULTRIUM-TD3", SPI_BLIST_NOIUS },
+ {NULL, NULL, 0}
+};
+
+/* Private data accessors (keep these out of the header file) */
+#define spi_dv_in_progress(x) (((struct spi_transport_attrs *)&(x)->starget_data)->dv_in_progress)
+#define spi_dv_mutex(x) (((struct spi_transport_attrs *)&(x)->starget_data)->dv_mutex)
+
+struct spi_internal {
+ struct scsi_transport_template t;
+ struct spi_function_template *f;
+};
+
+#define to_spi_internal(tmpl) container_of(tmpl, struct spi_internal, t)
+
+static const int ppr_to_ps[] = {
+ /* The PPR values 0-6 are reserved, fill them in when
+ * the committee defines them */
+ -1, /* 0x00 */
+ -1, /* 0x01 */
+ -1, /* 0x02 */
+ -1, /* 0x03 */
+ -1, /* 0x04 */
+ -1, /* 0x05 */
+ -1, /* 0x06 */
+ 3125, /* 0x07 */
+ 6250, /* 0x08 */
+ 12500, /* 0x09 */
+ 25000, /* 0x0a */
+ 30300, /* 0x0b */
+ 50000, /* 0x0c */
+};
+/* The PPR values at which you calculate the period in ns by multiplying
+ * by 4 */
+#define SPI_STATIC_PPR 0x0c
+
+static int sprint_frac(char *dest, int value, int denom)
+{
+ int frac = value % denom;
+ int result = sprintf(dest, "%d", value / denom);
+
+ if (frac == 0)
+ return result;
+ dest[result++] = '.';
+
+ do {
+ denom /= 10;
+ sprintf(dest + result, "%d", frac / denom);
+ result++;
+ frac %= denom;
+ } while (frac);
+
+ dest[result++] = '\0';
+ return result;
+}
+
+static int spi_execute(struct scsi_device *sdev, const void *cmd,
+ enum dma_data_direction dir,
+ void *buffer, unsigned bufflen,
+ struct scsi_sense_hdr *sshdr)
+{
+ int i, result;
+ unsigned char sense[SCSI_SENSE_BUFFERSIZE];
+
+ for(i = 0; i < DV_RETRIES; i++) {
+ result = scsi_execute(sdev, cmd, dir, buffer, bufflen,
+ sense, DV_TIMEOUT, /* retries */ 1,
+ REQ_FAILFAST_DEV |
+ REQ_FAILFAST_TRANSPORT |
+ REQ_FAILFAST_DRIVER,
+ NULL);
+ if (driver_byte(result) & DRIVER_SENSE) {
+ struct scsi_sense_hdr sshdr_tmp;
+ if (!sshdr)
+ sshdr = &sshdr_tmp;
+
+ if (scsi_normalize_sense(sense, SCSI_SENSE_BUFFERSIZE,
+ sshdr)
+ && sshdr->sense_key == UNIT_ATTENTION)
+ continue;
+ }
+ break;
+ }
+ return result;
+}
+
+static struct {
+ enum spi_signal_type value;
+ char *name;
+} signal_types[] = {
+ { SPI_SIGNAL_UNKNOWN, "unknown" },
+ { SPI_SIGNAL_SE, "SE" },
+ { SPI_SIGNAL_LVD, "LVD" },
+ { SPI_SIGNAL_HVD, "HVD" },
+};
+
+static inline const char *spi_signal_to_string(enum spi_signal_type type)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(signal_types); i++) {
+ if (type == signal_types[i].value)
+ return signal_types[i].name;
+ }
+ return NULL;
+}
+static inline enum spi_signal_type spi_signal_to_value(const char *name)
+{
+ int i, len;
+
+ for (i = 0; i < ARRAY_SIZE(signal_types); i++) {
+ len = strlen(signal_types[i].name);
+ if (strncmp(name, signal_types[i].name, len) == 0 &&
+ (name[len] == '\n' || name[len] == '\0'))
+ return signal_types[i].value;
+ }
+ return SPI_SIGNAL_UNKNOWN;
+}
+
+static int spi_host_setup(struct transport_container *tc, struct device *dev,
+ struct device *cdev)
+{
+ struct Scsi_Host *shost = dev_to_shost(dev);
+
+ spi_signalling(shost) = SPI_SIGNAL_UNKNOWN;
+
+ return 0;
+}
+
+static int spi_host_configure(struct transport_container *tc,
+ struct device *dev,
+ struct device *cdev);
+
+static DECLARE_TRANSPORT_CLASS(spi_host_class,
+ "spi_host",
+ spi_host_setup,
+ NULL,
+ spi_host_configure);
+
+static int spi_host_match(struct attribute_container *cont,
+ struct device *dev)
+{
+ struct Scsi_Host *shost;
+
+ if (!scsi_is_host_device(dev))
+ return 0;
+
+ shost = dev_to_shost(dev);
+ if (!shost->transportt || shost->transportt->host_attrs.ac.class
+ != &spi_host_class.class)
+ return 0;
+
+ return &shost->transportt->host_attrs.ac == cont;
+}
+
+static int spi_target_configure(struct transport_container *tc,
+ struct device *dev,
+ struct device *cdev);
+
+static int spi_device_configure(struct transport_container *tc,
+ struct device *dev,
+ struct device *cdev)
+{
+ struct scsi_device *sdev = to_scsi_device(dev);
+ struct scsi_target *starget = sdev->sdev_target;
+ unsigned bflags = scsi_get_device_flags_keyed(sdev, &sdev->inquiry[8],
+ &sdev->inquiry[16],
+ SCSI_DEVINFO_SPI);
+
+ /* Populate the target capability fields with the values
+ * gleaned from the device inquiry */
+
+ spi_support_sync(starget) = scsi_device_sync(sdev);
+ spi_support_wide(starget) = scsi_device_wide(sdev);
+ spi_support_dt(starget) = scsi_device_dt(sdev);
+ spi_support_dt_only(starget) = scsi_device_dt_only(sdev);
+ spi_support_ius(starget) = scsi_device_ius(sdev);
+ if (bflags & SPI_BLIST_NOIUS) {
+ dev_info(dev, "Information Units disabled by blacklist\n");
+ spi_support_ius(starget) = 0;
+ }
+ spi_support_qas(starget) = scsi_device_qas(sdev);
+
+ return 0;
+}
+
+static int spi_setup_transport_attrs(struct transport_container *tc,
+ struct device *dev,
+ struct device *cdev)
+{
+ struct scsi_target *starget = to_scsi_target(dev);
+
+ spi_period(starget) = -1; /* illegal value */
+ spi_min_period(starget) = 0;
+ spi_offset(starget) = 0; /* async */
+ spi_max_offset(starget) = 255;
+ spi_width(starget) = 0; /* narrow */
+ spi_max_width(starget) = 1;
+ spi_iu(starget) = 0; /* no IU */
+ spi_max_iu(starget) = 1;
+ spi_dt(starget) = 0; /* ST */
+ spi_qas(starget) = 0;
+ spi_max_qas(starget) = 1;
+ spi_wr_flow(starget) = 0;
+ spi_rd_strm(starget) = 0;
+ spi_rti(starget) = 0;
+ spi_pcomp_en(starget) = 0;
+ spi_hold_mcs(starget) = 0;
+ spi_dv_pending(starget) = 0;
+ spi_dv_in_progress(starget) = 0;
+ spi_initial_dv(starget) = 0;
+ mutex_init(&spi_dv_mutex(starget));
+
+ return 0;
+}
+
+#define spi_transport_show_simple(field, format_string) \
+ \
+static ssize_t \
+show_spi_transport_##field(struct device *dev, \
+ struct device_attribute *attr, char *buf) \
+{ \
+ struct scsi_target *starget = transport_class_to_starget(dev); \
+ struct spi_transport_attrs *tp; \
+ \
+ tp = (struct spi_transport_attrs *)&starget->starget_data; \
+ return snprintf(buf, 20, format_string, tp->field); \
+}
+
+#define spi_transport_store_simple(field, format_string) \
+ \
+static ssize_t \
+store_spi_transport_##field(struct device *dev, \
+ struct device_attribute *attr, \
+ const char *buf, size_t count) \
+{ \
+ int val; \
+ struct scsi_target *starget = transport_class_to_starget(dev); \
+ struct spi_transport_attrs *tp; \
+ \
+ tp = (struct spi_transport_attrs *)&starget->starget_data; \
+ val = simple_strtoul(buf, NULL, 0); \
+ tp->field = val; \
+ return count; \
+}
+
+#define spi_transport_show_function(field, format_string) \
+ \
+static ssize_t \
+show_spi_transport_##field(struct device *dev, \
+ struct device_attribute *attr, char *buf) \
+{ \
+ struct scsi_target *starget = transport_class_to_starget(dev); \
+ struct Scsi_Host *shost = dev_to_shost(starget->dev.parent); \
+ struct spi_transport_attrs *tp; \
+ struct spi_internal *i = to_spi_internal(shost->transportt); \
+ tp = (struct spi_transport_attrs *)&starget->starget_data; \
+ if (i->f->get_##field) \
+ i->f->get_##field(starget); \
+ return snprintf(buf, 20, format_string, tp->field); \
+}
+
+#define spi_transport_store_function(field, format_string) \
+static ssize_t \
+store_spi_transport_##field(struct device *dev, \
+ struct device_attribute *attr, \
+ const char *buf, size_t count) \
+{ \
+ int val; \
+ struct scsi_target *starget = transport_class_to_starget(dev); \
+ struct Scsi_Host *shost = dev_to_shost(starget->dev.parent); \
+ struct spi_internal *i = to_spi_internal(shost->transportt); \
+ \
+ if (!i->f->set_##field) \
+ return -EINVAL; \
+ val = simple_strtoul(buf, NULL, 0); \
+ i->f->set_##field(starget, val); \
+ return count; \
+}
+
+#define spi_transport_store_max(field, format_string) \
+static ssize_t \
+store_spi_transport_##field(struct device *dev, \
+ struct device_attribute *attr, \
+ const char *buf, size_t count) \
+{ \
+ int val; \
+ struct scsi_target *starget = transport_class_to_starget(dev); \
+ struct Scsi_Host *shost = dev_to_shost(starget->dev.parent); \
+ struct spi_internal *i = to_spi_internal(shost->transportt); \
+ struct spi_transport_attrs *tp \
+ = (struct spi_transport_attrs *)&starget->starget_data; \
+ \
+ if (i->f->set_##field) \
+ return -EINVAL; \
+ val = simple_strtoul(buf, NULL, 0); \
+ if (val > tp->max_##field) \
+ val = tp->max_##field; \
+ i->f->set_##field(starget, val); \
+ return count; \
+}
+
+#define spi_transport_rd_attr(field, format_string) \
+ spi_transport_show_function(field, format_string) \
+ spi_transport_store_function(field, format_string) \
+static DEVICE_ATTR(field, S_IRUGO, \
+ show_spi_transport_##field, \
+ store_spi_transport_##field);
+
+#define spi_transport_simple_attr(field, format_string) \
+ spi_transport_show_simple(field, format_string) \
+ spi_transport_store_simple(field, format_string) \
+static DEVICE_ATTR(field, S_IRUGO, \
+ show_spi_transport_##field, \
+ store_spi_transport_##field);
+
+#define spi_transport_max_attr(field, format_string) \
+ spi_transport_show_function(field, format_string) \
+ spi_transport_store_max(field, format_string) \
+ spi_transport_simple_attr(max_##field, format_string) \
+static DEVICE_ATTR(field, S_IRUGO, \
+ show_spi_transport_##field, \
+ store_spi_transport_##field);
+
+/* The Parallel SCSI Tranport Attributes: */
+spi_transport_max_attr(offset, "%d\n");
+spi_transport_max_attr(width, "%d\n");
+spi_transport_max_attr(iu, "%d\n");
+spi_transport_rd_attr(dt, "%d\n");
+spi_transport_max_attr(qas, "%d\n");
+spi_transport_rd_attr(wr_flow, "%d\n");
+spi_transport_rd_attr(rd_strm, "%d\n");
+spi_transport_rd_attr(rti, "%d\n");
+spi_transport_rd_attr(pcomp_en, "%d\n");
+spi_transport_rd_attr(hold_mcs, "%d\n");
+
+/* we only care about the first child device that's a real SCSI device
+ * so we return 1 to terminate the iteration when we find it */
+static int child_iter(struct device *dev, void *data)
+{
+ if (!scsi_is_sdev_device(dev))
+ return 0;
+
+ spi_dv_device(to_scsi_device(dev));
+ return 1;
+}
+
+static ssize_t
+store_spi_revalidate(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct scsi_target *starget = transport_class_to_starget(dev);
+
+ device_for_each_child(&starget->dev, NULL, child_iter);
+ return count;
+}
+static DEVICE_ATTR(revalidate, S_IWUSR, NULL, store_spi_revalidate);
+
+/* Translate the period into ns according to the current spec
+ * for SDTR/PPR messages */
+static int period_to_str(char *buf, int period)
+{
+ int len, picosec;
+
+ if (period < 0 || period > 0xff) {
+ picosec = -1;
+ } else if (period <= SPI_STATIC_PPR) {
+ picosec = ppr_to_ps[period];
+ } else {
+ picosec = period * 4000;
+ }
+
+ if (picosec == -1) {
+ len = sprintf(buf, "reserved");
+ } else {
+ len = sprint_frac(buf, picosec, 1000);
+ }
+
+ return len;
+}
+
+static ssize_t
+show_spi_transport_period_helper(char *buf, int period)
+{
+ int len = period_to_str(buf, period);
+ buf[len++] = '\n';
+ buf[len] = '\0';
+ return len;
+}
+
+static ssize_t
+store_spi_transport_period_helper(struct device *dev, const char *buf,
+ size_t count, int *periodp)
+{
+ int j, picosec, period = -1;
+ char *endp;
+
+ picosec = simple_strtoul(buf, &endp, 10) * 1000;
+ if (*endp == '.') {
+ int mult = 100;
+ do {
+ endp++;
+ if (!isdigit(*endp))
+ break;
+ picosec += (*endp - '0') * mult;
+ mult /= 10;
+ } while (mult > 0);
+ }
+
+ for (j = 0; j <= SPI_STATIC_PPR; j++) {
+ if (ppr_to_ps[j] < picosec)
+ continue;
+ period = j;
+ break;
+ }
+
+ if (period == -1)
+ period = picosec / 4000;
+
+ if (period > 0xff)
+ period = 0xff;
+
+ *periodp = period;
+
+ return count;
+}
+
+static ssize_t
+show_spi_transport_period(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct scsi_target *starget = transport_class_to_starget(dev);
+ struct Scsi_Host *shost = dev_to_shost(starget->dev.parent);
+ struct spi_internal *i = to_spi_internal(shost->transportt);
+ struct spi_transport_attrs *tp =
+ (struct spi_transport_attrs *)&starget->starget_data;
+
+ if (i->f->get_period)
+ i->f->get_period(starget);
+
+ return show_spi_transport_period_helper(buf, tp->period);
+}
+
+static ssize_t
+store_spi_transport_period(struct device *cdev, struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct scsi_target *starget = transport_class_to_starget(cdev);
+ struct Scsi_Host *shost = dev_to_shost(starget->dev.parent);
+ struct spi_internal *i = to_spi_internal(shost->transportt);
+ struct spi_transport_attrs *tp =
+ (struct spi_transport_attrs *)&starget->starget_data;
+ int period, retval;
+
+ if (!i->f->set_period)
+ return -EINVAL;
+
+ retval = store_spi_transport_period_helper(cdev, buf, count, &period);
+
+ if (period < tp->min_period)
+ period = tp->min_period;
+
+ i->f->set_period(starget, period);
+
+ return retval;
+}
+
+static DEVICE_ATTR(period, S_IRUGO,
+ show_spi_transport_period,
+ store_spi_transport_period);
+
+static ssize_t
+show_spi_transport_min_period(struct device *cdev,
+ struct device_attribute *attr, char *buf)
+{
+ struct scsi_target *starget = transport_class_to_starget(cdev);
+ struct Scsi_Host *shost = dev_to_shost(starget->dev.parent);
+ struct spi_internal *i = to_spi_internal(shost->transportt);
+ struct spi_transport_attrs *tp =
+ (struct spi_transport_attrs *)&starget->starget_data;
+
+ if (!i->f->set_period)
+ return -EINVAL;
+
+ return show_spi_transport_period_helper(buf, tp->min_period);
+}
+
+static ssize_t
+store_spi_transport_min_period(struct device *cdev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct scsi_target *starget = transport_class_to_starget(cdev);
+ struct spi_transport_attrs *tp =
+ (struct spi_transport_attrs *)&starget->starget_data;
+
+ return store_spi_transport_period_helper(cdev, buf, count,
+ &tp->min_period);
+}
+
+
+static DEVICE_ATTR(min_period, S_IRUGO,
+ show_spi_transport_min_period,
+ store_spi_transport_min_period);
+
+
+static ssize_t show_spi_host_signalling(struct device *cdev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct Scsi_Host *shost = transport_class_to_shost(cdev);
+ struct spi_internal *i = to_spi_internal(shost->transportt);
+
+ if (i->f->get_signalling)
+ i->f->get_signalling(shost);
+
+ return sprintf(buf, "%s\n", spi_signal_to_string(spi_signalling(shost)));
+}
+static ssize_t store_spi_host_signalling(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct Scsi_Host *shost = transport_class_to_shost(dev);
+ struct spi_internal *i = to_spi_internal(shost->transportt);
+ enum spi_signal_type type = spi_signal_to_value(buf);
+
+ if (!i->f->set_signalling)
+ return -EINVAL;
+
+ if (type != SPI_SIGNAL_UNKNOWN)
+ i->f->set_signalling(shost, type);
+
+ return count;
+}
+static DEVICE_ATTR(signalling, S_IRUGO,
+ show_spi_host_signalling,
+ store_spi_host_signalling);
+
+static ssize_t show_spi_host_width(struct device *cdev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct Scsi_Host *shost = transport_class_to_shost(cdev);
+
+ return sprintf(buf, "%s\n", shost->max_id == 16 ? "wide" : "narrow");
+}
+static DEVICE_ATTR(host_width, S_IRUGO,
+ show_spi_host_width, NULL);
+
+static ssize_t show_spi_host_hba_id(struct device *cdev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct Scsi_Host *shost = transport_class_to_shost(cdev);
+
+ return sprintf(buf, "%d\n", shost->this_id);
+}
+static DEVICE_ATTR(hba_id, S_IRUGO,
+ show_spi_host_hba_id, NULL);
+
+#define DV_SET(x, y) \
+ if(i->f->set_##x) \
+ i->f->set_##x(sdev->sdev_target, y)
+
+enum spi_compare_returns {
+ SPI_COMPARE_SUCCESS,
+ SPI_COMPARE_FAILURE,
+ SPI_COMPARE_SKIP_TEST,
+};
+
+
+/* This is for read/write Domain Validation: If the device supports
+ * an echo buffer, we do read/write tests to it */
+static enum spi_compare_returns
+spi_dv_device_echo_buffer(struct scsi_device *sdev, u8 *buffer,
+ u8 *ptr, const int retries)
+{
+ int len = ptr - buffer;
+ int j, k, r, result;
+ unsigned int pattern = 0x0000ffff;
+ struct scsi_sense_hdr sshdr;
+
+ const char spi_write_buffer[] = {
+ WRITE_BUFFER, 0x0a, 0, 0, 0, 0, 0, len >> 8, len & 0xff, 0
+ };
+ const char spi_read_buffer[] = {
+ READ_BUFFER, 0x0a, 0, 0, 0, 0, 0, len >> 8, len & 0xff, 0
+ };
+
+ /* set up the pattern buffer. Doesn't matter if we spill
+ * slightly beyond since that's where the read buffer is */
+ for (j = 0; j < len; ) {
+
+ /* fill the buffer with counting (test a) */
+ for ( ; j < min(len, 32); j++)
+ buffer[j] = j;
+ k = j;
+ /* fill the buffer with alternating words of 0x0 and
+ * 0xffff (test b) */
+ for ( ; j < min(len, k + 32); j += 2) {
+ u16 *word = (u16 *)&buffer[j];
+
+ *word = (j & 0x02) ? 0x0000 : 0xffff;
+ }
+ k = j;
+ /* fill with crosstalk (alternating 0x5555 0xaaa)
+ * (test c) */
+ for ( ; j < min(len, k + 32); j += 2) {
+ u16 *word = (u16 *)&buffer[j];
+
+ *word = (j & 0x02) ? 0x5555 : 0xaaaa;
+ }
+ k = j;
+ /* fill with shifting bits (test d) */
+ for ( ; j < min(len, k + 32); j += 4) {
+ u32 *word = (unsigned int *)&buffer[j];
+ u32 roll = (pattern & 0x80000000) ? 1 : 0;
+
+ *word = pattern;
+ pattern = (pattern << 1) | roll;
+ }
+ /* don't bother with random data (test e) */
+ }
+
+ for (r = 0; r < retries; r++) {
+ result = spi_execute(sdev, spi_write_buffer, DMA_TO_DEVICE,
+ buffer, len, &sshdr);
+ if(result || !scsi_device_online(sdev)) {
+
+ scsi_device_set_state(sdev, SDEV_QUIESCE);
+ if (scsi_sense_valid(&sshdr)
+ && sshdr.sense_key == ILLEGAL_REQUEST
+ /* INVALID FIELD IN CDB */
+ && sshdr.asc == 0x24 && sshdr.ascq == 0x00)
+ /* This would mean that the drive lied
+ * to us about supporting an echo
+ * buffer (unfortunately some Western
+ * Digital drives do precisely this)
+ */
+ return SPI_COMPARE_SKIP_TEST;
+
+
+ sdev_printk(KERN_ERR, sdev, "Write Buffer failure %x\n", result);
+ return SPI_COMPARE_FAILURE;
+ }
+
+ memset(ptr, 0, len);
+ spi_execute(sdev, spi_read_buffer, DMA_FROM_DEVICE,
+ ptr, len, NULL);
+ scsi_device_set_state(sdev, SDEV_QUIESCE);
+
+ if (memcmp(buffer, ptr, len) != 0)
+ return SPI_COMPARE_FAILURE;
+ }
+ return SPI_COMPARE_SUCCESS;
+}
+
+/* This is for the simplest form of Domain Validation: a read test
+ * on the inquiry data from the device */
+static enum spi_compare_returns
+spi_dv_device_compare_inquiry(struct scsi_device *sdev, u8 *buffer,
+ u8 *ptr, const int retries)
+{
+ int r, result;
+ const int len = sdev->inquiry_len;
+ const char spi_inquiry[] = {
+ INQUIRY, 0, 0, 0, len, 0
+ };
+
+ for (r = 0; r < retries; r++) {
+ memset(ptr, 0, len);
+
+ result = spi_execute(sdev, spi_inquiry, DMA_FROM_DEVICE,
+ ptr, len, NULL);
+
+ if(result || !scsi_device_online(sdev)) {
+ scsi_device_set_state(sdev, SDEV_QUIESCE);
+ return SPI_COMPARE_FAILURE;
+ }
+
+ /* If we don't have the inquiry data already, the
+ * first read gets it */
+ if (ptr == buffer) {
+ ptr += len;
+ --r;
+ continue;
+ }
+
+ if (memcmp(buffer, ptr, len) != 0)
+ /* failure */
+ return SPI_COMPARE_FAILURE;
+ }
+ return SPI_COMPARE_SUCCESS;
+}
+
+static enum spi_compare_returns
+spi_dv_retrain(struct scsi_device *sdev, u8 *buffer, u8 *ptr,
+ enum spi_compare_returns
+ (*compare_fn)(struct scsi_device *, u8 *, u8 *, int))
+{
+ struct spi_internal *i = to_spi_internal(sdev->host->transportt);
+ struct scsi_target *starget = sdev->sdev_target;
+ int period = 0, prevperiod = 0;
+ enum spi_compare_returns retval;
+
+
+ for (;;) {
+ int newperiod;
+ retval = compare_fn(sdev, buffer, ptr, DV_LOOPS);
+
+ if (retval == SPI_COMPARE_SUCCESS
+ || retval == SPI_COMPARE_SKIP_TEST)
+ break;
+
+ /* OK, retrain, fallback */
+ if (i->f->get_iu)
+ i->f->get_iu(starget);
+ if (i->f->get_qas)
+ i->f->get_qas(starget);
+ if (i->f->get_period)
+ i->f->get_period(sdev->sdev_target);
+
+ /* Here's the fallback sequence; first try turning off
+ * IU, then QAS (if we can control them), then finally
+ * fall down the periods */
+ if (i->f->set_iu && spi_iu(starget)) {
+ starget_printk(KERN_ERR, starget, "Domain Validation Disabing Information Units\n");
+ DV_SET(iu, 0);
+ } else if (i->f->set_qas && spi_qas(starget)) {
+ starget_printk(KERN_ERR, starget, "Domain Validation Disabing Quick Arbitration and Selection\n");
+ DV_SET(qas, 0);
+ } else {
+ newperiod = spi_period(starget);
+ period = newperiod > period ? newperiod : period;
+ if (period < 0x0d)
+ period++;
+ else
+ period += period >> 1;
+
+ if (unlikely(period > 0xff || period == prevperiod)) {
+ /* Total failure; set to async and return */
+ starget_printk(KERN_ERR, starget, "Domain Validation Failure, dropping back to Asynchronous\n");
+ DV_SET(offset, 0);
+ return SPI_COMPARE_FAILURE;
+ }
+ starget_printk(KERN_ERR, starget, "Domain Validation detected failure, dropping back\n");
+ DV_SET(period, period);
+ prevperiod = period;
+ }
+ }
+ return retval;
+}
+
+static int
+spi_dv_device_get_echo_buffer(struct scsi_device *sdev, u8 *buffer)
+{
+ int l, result;
+
+ /* first off do a test unit ready. This can error out
+ * because of reservations or some other reason. If it
+ * fails, the device won't let us write to the echo buffer
+ * so just return failure */
+
+ const char spi_test_unit_ready[] = {
+ TEST_UNIT_READY, 0, 0, 0, 0, 0
+ };
+
+ const char spi_read_buffer_descriptor[] = {
+ READ_BUFFER, 0x0b, 0, 0, 0, 0, 0, 0, 4, 0
+ };
+
+
+ /* We send a set of three TURs to clear any outstanding
+ * unit attention conditions if they exist (Otherwise the
+ * buffer tests won't be happy). If the TUR still fails
+ * (reservation conflict, device not ready, etc) just
+ * skip the write tests */
+ for (l = 0; ; l++) {
+ result = spi_execute(sdev, spi_test_unit_ready, DMA_NONE,
+ NULL, 0, NULL);
+
+ if(result) {
+ if(l >= 3)
+ return 0;
+ } else {
+ /* TUR succeeded */
+ break;
+ }
+ }
+
+ result = spi_execute(sdev, spi_read_buffer_descriptor,
+ DMA_FROM_DEVICE, buffer, 4, NULL);
+
+ if (result)
+ /* Device has no echo buffer */
+ return 0;
+
+ return buffer[3] + ((buffer[2] & 0x1f) << 8);
+}
+
+static void
+spi_dv_device_internal(struct scsi_device *sdev, u8 *buffer)
+{
+ struct spi_internal *i = to_spi_internal(sdev->host->transportt);
+ struct scsi_target *starget = sdev->sdev_target;
+ struct Scsi_Host *shost = sdev->host;
+ int len = sdev->inquiry_len;
+ int min_period = spi_min_period(starget);
+ int max_width = spi_max_width(starget);
+ /* first set us up for narrow async */
+ DV_SET(offset, 0);
+ DV_SET(width, 0);
+
+ if (spi_dv_device_compare_inquiry(sdev, buffer, buffer, DV_LOOPS)
+ != SPI_COMPARE_SUCCESS) {
+ starget_printk(KERN_ERR, starget, "Domain Validation Initial Inquiry Failed\n");
+ /* FIXME: should probably offline the device here? */
+ return;
+ }
+
+ if (!spi_support_wide(starget)) {
+ spi_max_width(starget) = 0;
+ max_width = 0;
+ }
+
+ /* test width */
+ if (i->f->set_width && max_width) {
+ i->f->set_width(starget, 1);
+
+ if (spi_dv_device_compare_inquiry(sdev, buffer,
+ buffer + len,
+ DV_LOOPS)
+ != SPI_COMPARE_SUCCESS) {
+ starget_printk(KERN_ERR, starget, "Wide Transfers Fail\n");
+ i->f->set_width(starget, 0);
+ /* Make sure we don't force wide back on by asking
+ * for a transfer period that requires it */
+ max_width = 0;
+ if (min_period < 10)
+ min_period = 10;
+ }
+ }
+
+ if (!i->f->set_period)
+ return;
+
+ /* device can't handle synchronous */
+ if (!spi_support_sync(starget) && !spi_support_dt(starget))
+ return;
+
+ /* len == -1 is the signal that we need to ascertain the
+ * presence of an echo buffer before trying to use it. len ==
+ * 0 means we don't have an echo buffer */
+ len = -1;
+
+ retry:
+
+ /* now set up to the maximum */
+ DV_SET(offset, spi_max_offset(starget));
+ DV_SET(period, min_period);
+
+ /* try QAS requests; this should be harmless to set if the
+ * target supports it */
+ if (spi_support_qas(starget) && spi_max_qas(starget)) {
+ DV_SET(qas, 1);
+ } else {
+ DV_SET(qas, 0);
+ }
+
+ if (spi_support_ius(starget) && spi_max_iu(starget) &&
+ min_period < 9) {
+ /* This u320 (or u640). Set IU transfers */
+ DV_SET(iu, 1);
+ /* Then set the optional parameters */
+ DV_SET(rd_strm, 1);
+ DV_SET(wr_flow, 1);
+ DV_SET(rti, 1);
+ if (min_period == 8)
+ DV_SET(pcomp_en, 1);
+ } else {
+ DV_SET(iu, 0);
+ }
+
+ /* now that we've done all this, actually check the bus
+ * signal type (if known). Some devices are stupid on
+ * a SE bus and still claim they can try LVD only settings */
+ if (i->f->get_signalling)
+ i->f->get_signalling(shost);
+ if (spi_signalling(shost) == SPI_SIGNAL_SE ||
+ spi_signalling(shost) == SPI_SIGNAL_HVD ||
+ !spi_support_dt(starget)) {
+ DV_SET(dt, 0);
+ } else {
+ DV_SET(dt, 1);
+ }
+ /* set width last because it will pull all the other
+ * parameters down to required values */
+ DV_SET(width, max_width);
+
+ /* Do the read only INQUIRY tests */
+ spi_dv_retrain(sdev, buffer, buffer + sdev->inquiry_len,
+ spi_dv_device_compare_inquiry);
+ /* See if we actually managed to negotiate and sustain DT */
+ if (i->f->get_dt)
+ i->f->get_dt(starget);
+
+ /* see if the device has an echo buffer. If it does we can do
+ * the SPI pattern write tests. Because of some broken
+ * devices, we *only* try this on a device that has actually
+ * negotiated DT */
+
+ if (len == -1 && spi_dt(starget))
+ len = spi_dv_device_get_echo_buffer(sdev, buffer);
+
+ if (len <= 0) {
+ starget_printk(KERN_INFO, starget, "Domain Validation skipping write tests\n");
+ return;
+ }
+
+ if (len > SPI_MAX_ECHO_BUFFER_SIZE) {
+ starget_printk(KERN_WARNING, starget, "Echo buffer size %d is too big, trimming to %d\n", len, SPI_MAX_ECHO_BUFFER_SIZE);
+ len = SPI_MAX_ECHO_BUFFER_SIZE;
+ }
+
+ if (spi_dv_retrain(sdev, buffer, buffer + len,
+ spi_dv_device_echo_buffer)
+ == SPI_COMPARE_SKIP_TEST) {
+ /* OK, the stupid drive can't do a write echo buffer
+ * test after all, fall back to the read tests */
+ len = 0;
+ goto retry;
+ }
+}
+
+
+/** spi_dv_device - Do Domain Validation on the device
+ * @sdev: scsi device to validate
+ *
+ * Performs the domain validation on the given device in the
+ * current execution thread. Since DV operations may sleep,
+ * the current thread must have user context. Also no SCSI
+ * related locks that would deadlock I/O issued by the DV may
+ * be held.
+ */
+void
+spi_dv_device(struct scsi_device *sdev)
+{
+ struct scsi_target *starget = sdev->sdev_target;
+ u8 *buffer;
+ const int len = SPI_MAX_ECHO_BUFFER_SIZE*2;
+
+ if (unlikely(spi_dv_in_progress(starget)))
+ return;
+
+ if (unlikely(scsi_device_get(sdev)))
+ return;
+ spi_dv_in_progress(starget) = 1;
+
+ buffer = kzalloc(len, GFP_KERNEL);
+
+ if (unlikely(!buffer))
+ goto out_put;
+
+ /* We need to verify that the actual device will quiesce; the
+ * later target quiesce is just a nice to have */
+ if (unlikely(scsi_device_quiesce(sdev)))
+ goto out_free;
+
+ scsi_target_quiesce(starget);
+
+ spi_dv_pending(starget) = 1;
+ mutex_lock(&spi_dv_mutex(starget));
+
+ starget_printk(KERN_INFO, starget, "Beginning Domain Validation\n");
+
+ spi_dv_device_internal(sdev, buffer);
+
+ starget_printk(KERN_INFO, starget, "Ending Domain Validation\n");
+
+ mutex_unlock(&spi_dv_mutex(starget));
+ spi_dv_pending(starget) = 0;
+
+ scsi_target_resume(starget);
+
+ spi_initial_dv(starget) = 1;
+
+ out_free:
+ kfree(buffer);
+ out_put:
+ spi_dv_in_progress(starget) = 0;
+ scsi_device_put(sdev);
+}
+EXPORT_SYMBOL(spi_dv_device);
+
+struct work_queue_wrapper {
+ struct work_struct work;
+ struct scsi_device *sdev;
+};
+
+static void
+spi_dv_device_work_wrapper(struct work_struct *work)
+{
+ struct work_queue_wrapper *wqw =
+ container_of(work, struct work_queue_wrapper, work);
+ struct scsi_device *sdev = wqw->sdev;
+
+ kfree(wqw);
+ spi_dv_device(sdev);
+ spi_dv_pending(sdev->sdev_target) = 0;
+ scsi_device_put(sdev);
+}
+
+
+/**
+ * spi_schedule_dv_device - schedule domain validation to occur on the device
+ * @sdev: The device to validate
+ *
+ * Identical to spi_dv_device() above, except that the DV will be
+ * scheduled to occur in a workqueue later. All memory allocations
+ * are atomic, so may be called from any context including those holding
+ * SCSI locks.
+ */
+void
+spi_schedule_dv_device(struct scsi_device *sdev)
+{
+ struct work_queue_wrapper *wqw =
+ kmalloc(sizeof(struct work_queue_wrapper), GFP_ATOMIC);
+
+ if (unlikely(!wqw))
+ return;
+
+ if (unlikely(spi_dv_pending(sdev->sdev_target))) {
+ kfree(wqw);
+ return;
+ }
+ /* Set pending early (dv_device doesn't check it, only sets it) */
+ spi_dv_pending(sdev->sdev_target) = 1;
+ if (unlikely(scsi_device_get(sdev))) {
+ kfree(wqw);
+ spi_dv_pending(sdev->sdev_target) = 0;
+ return;
+ }
+
+ INIT_WORK(&wqw->work, spi_dv_device_work_wrapper);
+ wqw->sdev = sdev;
+
+ schedule_work(&wqw->work);
+}
+EXPORT_SYMBOL(spi_schedule_dv_device);
+
+/**
+ * spi_display_xfer_agreement - Print the current target transfer agreement
+ * @starget: The target for which to display the agreement
+ *
+ * Each SPI port is required to maintain a transfer agreement for each
+ * other port on the bus. This function prints a one-line summary of
+ * the current agreement; more detailed information is available in sysfs.
+ */
+void spi_display_xfer_agreement(struct scsi_target *starget)
+{
+ struct spi_transport_attrs *tp;
+ tp = (struct spi_transport_attrs *)&starget->starget_data;
+
+ if (tp->offset > 0 && tp->period > 0) {
+ unsigned int picosec, kb100;
+ char *scsi = "FAST-?";
+ char tmp[8];
+
+ if (tp->period <= SPI_STATIC_PPR) {
+ picosec = ppr_to_ps[tp->period];
+ switch (tp->period) {
+ case 7: scsi = "FAST-320"; break;
+ case 8: scsi = "FAST-160"; break;
+ case 9: scsi = "FAST-80"; break;
+ case 10:
+ case 11: scsi = "FAST-40"; break;
+ case 12: scsi = "FAST-20"; break;
+ }
+ } else {
+ picosec = tp->period * 4000;
+ if (tp->period < 25)
+ scsi = "FAST-20";
+ else if (tp->period < 50)
+ scsi = "FAST-10";
+ else
+ scsi = "FAST-5";
+ }
+
+ kb100 = (10000000 + picosec / 2) / picosec;
+ if (tp->width)
+ kb100 *= 2;
+ sprint_frac(tmp, picosec, 1000);
+
+ dev_info(&starget->dev,
+ "%s %sSCSI %d.%d MB/s %s%s%s%s%s%s%s%s (%s ns, offset %d)\n",
+ scsi, tp->width ? "WIDE " : "", kb100/10, kb100 % 10,
+ tp->dt ? "DT" : "ST",
+ tp->iu ? " IU" : "",
+ tp->qas ? " QAS" : "",
+ tp->rd_strm ? " RDSTRM" : "",
+ tp->rti ? " RTI" : "",
+ tp->wr_flow ? " WRFLOW" : "",
+ tp->pcomp_en ? " PCOMP" : "",
+ tp->hold_mcs ? " HMCS" : "",
+ tmp, tp->offset);
+ } else {
+ dev_info(&starget->dev, "%sasynchronous\n",
+ tp->width ? "wide " : "");
+ }
+}
+EXPORT_SYMBOL(spi_display_xfer_agreement);
+
+int spi_populate_width_msg(unsigned char *msg, int width)
+{
+ msg[0] = EXTENDED_MESSAGE;
+ msg[1] = 2;
+ msg[2] = EXTENDED_WDTR;
+ msg[3] = width;
+ return 4;
+}
+EXPORT_SYMBOL_GPL(spi_populate_width_msg);
+
+int spi_populate_sync_msg(unsigned char *msg, int period, int offset)
+{
+ msg[0] = EXTENDED_MESSAGE;
+ msg[1] = 3;
+ msg[2] = EXTENDED_SDTR;
+ msg[3] = period;
+ msg[4] = offset;
+ return 5;
+}
+EXPORT_SYMBOL_GPL(spi_populate_sync_msg);
+
+int spi_populate_ppr_msg(unsigned char *msg, int period, int offset,
+ int width, int options)
+{
+ msg[0] = EXTENDED_MESSAGE;
+ msg[1] = 6;
+ msg[2] = EXTENDED_PPR;
+ msg[3] = period;
+ msg[4] = 0;
+ msg[5] = offset;
+ msg[6] = width;
+ msg[7] = options;
+ return 8;
+}
+EXPORT_SYMBOL_GPL(spi_populate_ppr_msg);
+
+/**
+ * spi_populate_tag_msg - place a tag message in a buffer
+ * @msg: pointer to the area to place the tag
+ * @cmd: pointer to the scsi command for the tag
+ *
+ * Notes:
+ * designed to create the correct type of tag message for the
+ * particular request. Returns the size of the tag message.
+ * May return 0 if TCQ is disabled for this device.
+ **/
+int spi_populate_tag_msg(unsigned char *msg, struct scsi_cmnd *cmd)
+{
+ if (cmd->flags & SCMD_TAGGED) {
+ *msg++ = SIMPLE_QUEUE_TAG;
+ *msg++ = cmd->request->tag;
+ return 2;
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(spi_populate_tag_msg);
+
+#ifdef CONFIG_SCSI_CONSTANTS
+static const char * const one_byte_msgs[] = {
+/* 0x00 */ "Task Complete", NULL /* Extended Message */, "Save Pointers",
+/* 0x03 */ "Restore Pointers", "Disconnect", "Initiator Error",
+/* 0x06 */ "Abort Task Set", "Message Reject", "Nop", "Message Parity Error",
+/* 0x0a */ "Linked Command Complete", "Linked Command Complete w/flag",
+/* 0x0c */ "Target Reset", "Abort Task", "Clear Task Set",
+/* 0x0f */ "Initiate Recovery", "Release Recovery",
+/* 0x11 */ "Terminate Process", "Continue Task", "Target Transfer Disable",
+/* 0x14 */ NULL, NULL, "Clear ACA", "LUN Reset"
+};
+
+static const char * const two_byte_msgs[] = {
+/* 0x20 */ "Simple Queue Tag", "Head of Queue Tag", "Ordered Queue Tag",
+/* 0x23 */ "Ignore Wide Residue", "ACA"
+};
+
+static const char * const extended_msgs[] = {
+/* 0x00 */ "Modify Data Pointer", "Synchronous Data Transfer Request",
+/* 0x02 */ "SCSI-I Extended Identify", "Wide Data Transfer Request",
+/* 0x04 */ "Parallel Protocol Request", "Modify Bidirectional Data Pointer"
+};
+
+static void print_nego(const unsigned char *msg, int per, int off, int width)
+{
+ if (per) {
+ char buf[20];
+ period_to_str(buf, msg[per]);
+ printk("period = %s ns ", buf);
+ }
+
+ if (off)
+ printk("offset = %d ", msg[off]);
+ if (width)
+ printk("width = %d ", 8 << msg[width]);
+}
+
+static void print_ptr(const unsigned char *msg, int msb, const char *desc)
+{
+ int ptr = (msg[msb] << 24) | (msg[msb+1] << 16) | (msg[msb+2] << 8) |
+ msg[msb+3];
+ printk("%s = %d ", desc, ptr);
+}
+
+int spi_print_msg(const unsigned char *msg)
+{
+ int len = 1, i;
+ if (msg[0] == EXTENDED_MESSAGE) {
+ len = 2 + msg[1];
+ if (len == 2)
+ len += 256;
+ if (msg[2] < ARRAY_SIZE(extended_msgs))
+ printk ("%s ", extended_msgs[msg[2]]);
+ else
+ printk ("Extended Message, reserved code (0x%02x) ",
+ (int) msg[2]);
+ switch (msg[2]) {
+ case EXTENDED_MODIFY_DATA_POINTER:
+ print_ptr(msg, 3, "pointer");
+ break;
+ case EXTENDED_SDTR:
+ print_nego(msg, 3, 4, 0);
+ break;
+ case EXTENDED_WDTR:
+ print_nego(msg, 0, 0, 3);
+ break;
+ case EXTENDED_PPR:
+ print_nego(msg, 3, 5, 6);
+ break;
+ case EXTENDED_MODIFY_BIDI_DATA_PTR:
+ print_ptr(msg, 3, "out");
+ print_ptr(msg, 7, "in");
+ break;
+ default:
+ for (i = 2; i < len; ++i)
+ printk("%02x ", msg[i]);
+ }
+ /* Identify */
+ } else if (msg[0] & 0x80) {
+ printk("Identify disconnect %sallowed %s %d ",
+ (msg[0] & 0x40) ? "" : "not ",
+ (msg[0] & 0x20) ? "target routine" : "lun",
+ msg[0] & 0x7);
+ /* Normal One byte */
+ } else if (msg[0] < 0x1f) {
+ if (msg[0] < ARRAY_SIZE(one_byte_msgs) && one_byte_msgs[msg[0]])
+ printk("%s ", one_byte_msgs[msg[0]]);
+ else
+ printk("reserved (%02x) ", msg[0]);
+ } else if (msg[0] == 0x55) {
+ printk("QAS Request ");
+ /* Two byte */
+ } else if (msg[0] <= 0x2f) {
+ if ((msg[0] - 0x20) < ARRAY_SIZE(two_byte_msgs))
+ printk("%s %02x ", two_byte_msgs[msg[0] - 0x20],
+ msg[1]);
+ else
+ printk("reserved two byte (%02x %02x) ",
+ msg[0], msg[1]);
+ len = 2;
+ } else
+ printk("reserved ");
+ return len;
+}
+EXPORT_SYMBOL(spi_print_msg);
+
+#else /* ifndef CONFIG_SCSI_CONSTANTS */
+
+int spi_print_msg(const unsigned char *msg)
+{
+ int len = 1, i;
+
+ if (msg[0] == EXTENDED_MESSAGE) {
+ len = 2 + msg[1];
+ if (len == 2)
+ len += 256;
+ for (i = 0; i < len; ++i)
+ printk("%02x ", msg[i]);
+ /* Identify */
+ } else if (msg[0] & 0x80) {
+ printk("%02x ", msg[0]);
+ /* Normal One byte */
+ } else if ((msg[0] < 0x1f) || (msg[0] == 0x55)) {
+ printk("%02x ", msg[0]);
+ /* Two byte */
+ } else if (msg[0] <= 0x2f) {
+ printk("%02x %02x", msg[0], msg[1]);
+ len = 2;
+ } else
+ printk("%02x ", msg[0]);
+ return len;
+}
+EXPORT_SYMBOL(spi_print_msg);
+#endif /* ! CONFIG_SCSI_CONSTANTS */
+
+static int spi_device_match(struct attribute_container *cont,
+ struct device *dev)
+{
+ struct scsi_device *sdev;
+ struct Scsi_Host *shost;
+ struct spi_internal *i;
+
+ if (!scsi_is_sdev_device(dev))
+ return 0;
+
+ sdev = to_scsi_device(dev);
+ shost = sdev->host;
+ if (!shost->transportt || shost->transportt->host_attrs.ac.class
+ != &spi_host_class.class)
+ return 0;
+ /* Note: this class has no device attributes, so it has
+ * no per-HBA allocation and thus we don't need to distinguish
+ * the attribute containers for the device */
+ i = to_spi_internal(shost->transportt);
+ if (i->f->deny_binding && i->f->deny_binding(sdev->sdev_target))
+ return 0;
+ return 1;
+}
+
+static int spi_target_match(struct attribute_container *cont,
+ struct device *dev)
+{
+ struct Scsi_Host *shost;
+ struct scsi_target *starget;
+ struct spi_internal *i;
+
+ if (!scsi_is_target_device(dev))
+ return 0;
+
+ shost = dev_to_shost(dev->parent);
+ if (!shost->transportt || shost->transportt->host_attrs.ac.class
+ != &spi_host_class.class)
+ return 0;
+
+ i = to_spi_internal(shost->transportt);
+ starget = to_scsi_target(dev);
+
+ if (i->f->deny_binding && i->f->deny_binding(starget))
+ return 0;
+
+ return &i->t.target_attrs.ac == cont;
+}
+
+static DECLARE_TRANSPORT_CLASS(spi_transport_class,
+ "spi_transport",
+ spi_setup_transport_attrs,
+ NULL,
+ spi_target_configure);
+
+static DECLARE_ANON_TRANSPORT_CLASS(spi_device_class,
+ spi_device_match,
+ spi_device_configure);
+
+static struct attribute *host_attributes[] = {
+ &dev_attr_signalling.attr,
+ &dev_attr_host_width.attr,
+ &dev_attr_hba_id.attr,
+ NULL
+};
+
+static struct attribute_group host_attribute_group = {
+ .attrs = host_attributes,
+};
+
+static int spi_host_configure(struct transport_container *tc,
+ struct device *dev,
+ struct device *cdev)
+{
+ struct kobject *kobj = &cdev->kobj;
+ struct Scsi_Host *shost = transport_class_to_shost(cdev);
+ struct spi_internal *si = to_spi_internal(shost->transportt);
+ struct attribute *attr = &dev_attr_signalling.attr;
+ int rc = 0;
+
+ if (si->f->set_signalling)
+ rc = sysfs_chmod_file(kobj, attr, attr->mode | S_IWUSR);
+
+ return rc;
+}
+
+/* returns true if we should be showing the variable. Also
+ * overloads the return by setting 1<<1 if the attribute should
+ * be writeable */
+#define TARGET_ATTRIBUTE_HELPER(name) \
+ (si->f->show_##name ? S_IRUGO : 0) | \
+ (si->f->set_##name ? S_IWUSR : 0)
+
+static umode_t target_attribute_is_visible(struct kobject *kobj,
+ struct attribute *attr, int i)
+{
+ struct device *cdev = container_of(kobj, struct device, kobj);
+ struct scsi_target *starget = transport_class_to_starget(cdev);
+ struct Scsi_Host *shost = transport_class_to_shost(cdev);
+ struct spi_internal *si = to_spi_internal(shost->transportt);
+
+ if (attr == &dev_attr_period.attr &&
+ spi_support_sync(starget))
+ return TARGET_ATTRIBUTE_HELPER(period);
+ else if (attr == &dev_attr_min_period.attr &&
+ spi_support_sync(starget))
+ return TARGET_ATTRIBUTE_HELPER(period);
+ else if (attr == &dev_attr_offset.attr &&
+ spi_support_sync(starget))
+ return TARGET_ATTRIBUTE_HELPER(offset);
+ else if (attr == &dev_attr_max_offset.attr &&
+ spi_support_sync(starget))
+ return TARGET_ATTRIBUTE_HELPER(offset);
+ else if (attr == &dev_attr_width.attr &&
+ spi_support_wide(starget))
+ return TARGET_ATTRIBUTE_HELPER(width);
+ else if (attr == &dev_attr_max_width.attr &&
+ spi_support_wide(starget))
+ return TARGET_ATTRIBUTE_HELPER(width);
+ else if (attr == &dev_attr_iu.attr &&
+ spi_support_ius(starget))
+ return TARGET_ATTRIBUTE_HELPER(iu);
+ else if (attr == &dev_attr_max_iu.attr &&
+ spi_support_ius(starget))
+ return TARGET_ATTRIBUTE_HELPER(iu);
+ else if (attr == &dev_attr_dt.attr &&
+ spi_support_dt(starget))
+ return TARGET_ATTRIBUTE_HELPER(dt);
+ else if (attr == &dev_attr_qas.attr &&
+ spi_support_qas(starget))
+ return TARGET_ATTRIBUTE_HELPER(qas);
+ else if (attr == &dev_attr_max_qas.attr &&
+ spi_support_qas(starget))
+ return TARGET_ATTRIBUTE_HELPER(qas);
+ else if (attr == &dev_attr_wr_flow.attr &&
+ spi_support_ius(starget))
+ return TARGET_ATTRIBUTE_HELPER(wr_flow);
+ else if (attr == &dev_attr_rd_strm.attr &&
+ spi_support_ius(starget))
+ return TARGET_ATTRIBUTE_HELPER(rd_strm);
+ else if (attr == &dev_attr_rti.attr &&
+ spi_support_ius(starget))
+ return TARGET_ATTRIBUTE_HELPER(rti);
+ else if (attr == &dev_attr_pcomp_en.attr &&
+ spi_support_ius(starget))
+ return TARGET_ATTRIBUTE_HELPER(pcomp_en);
+ else if (attr == &dev_attr_hold_mcs.attr &&
+ spi_support_ius(starget))
+ return TARGET_ATTRIBUTE_HELPER(hold_mcs);
+ else if (attr == &dev_attr_revalidate.attr)
+ return S_IWUSR;
+
+ return 0;
+}
+
+static struct attribute *target_attributes[] = {
+ &dev_attr_period.attr,
+ &dev_attr_min_period.attr,
+ &dev_attr_offset.attr,
+ &dev_attr_max_offset.attr,
+ &dev_attr_width.attr,
+ &dev_attr_max_width.attr,
+ &dev_attr_iu.attr,
+ &dev_attr_max_iu.attr,
+ &dev_attr_dt.attr,
+ &dev_attr_qas.attr,
+ &dev_attr_max_qas.attr,
+ &dev_attr_wr_flow.attr,
+ &dev_attr_rd_strm.attr,
+ &dev_attr_rti.attr,
+ &dev_attr_pcomp_en.attr,
+ &dev_attr_hold_mcs.attr,
+ &dev_attr_revalidate.attr,
+ NULL
+};
+
+static struct attribute_group target_attribute_group = {
+ .attrs = target_attributes,
+ .is_visible = target_attribute_is_visible,
+};
+
+static int spi_target_configure(struct transport_container *tc,
+ struct device *dev,
+ struct device *cdev)
+{
+ struct kobject *kobj = &cdev->kobj;
+
+ /* force an update based on parameters read from the device */
+ sysfs_update_group(kobj, &target_attribute_group);
+
+ return 0;
+}
+
+struct scsi_transport_template *
+spi_attach_transport(struct spi_function_template *ft)
+{
+ struct spi_internal *i = kzalloc(sizeof(struct spi_internal),
+ GFP_KERNEL);
+
+ if (unlikely(!i))
+ return NULL;
+
+ i->t.target_attrs.ac.class = &spi_transport_class.class;
+ i->t.target_attrs.ac.grp = &target_attribute_group;
+ i->t.target_attrs.ac.match = spi_target_match;
+ transport_container_register(&i->t.target_attrs);
+ i->t.target_size = sizeof(struct spi_transport_attrs);
+ i->t.host_attrs.ac.class = &spi_host_class.class;
+ i->t.host_attrs.ac.grp = &host_attribute_group;
+ i->t.host_attrs.ac.match = spi_host_match;
+ transport_container_register(&i->t.host_attrs);
+ i->t.host_size = sizeof(struct spi_host_attrs);
+ i->f = ft;
+
+ return &i->t;
+}
+EXPORT_SYMBOL(spi_attach_transport);
+
+void spi_release_transport(struct scsi_transport_template *t)
+{
+ struct spi_internal *i = to_spi_internal(t);
+
+ transport_container_unregister(&i->t.target_attrs);
+ transport_container_unregister(&i->t.host_attrs);
+
+ kfree(i);
+}
+EXPORT_SYMBOL(spi_release_transport);
+
+static __init int spi_transport_init(void)
+{
+ int error = scsi_dev_info_add_list(SCSI_DEVINFO_SPI,
+ "SCSI Parallel Transport Class");
+ if (!error) {
+ int i;
+
+ for (i = 0; spi_static_device_list[i].vendor; i++)
+ scsi_dev_info_list_add_keyed(1, /* compatible */
+ spi_static_device_list[i].vendor,
+ spi_static_device_list[i].model,
+ NULL,
+ spi_static_device_list[i].flags,
+ SCSI_DEVINFO_SPI);
+ }
+
+ error = transport_class_register(&spi_transport_class);
+ if (error)
+ return error;
+ error = anon_transport_class_register(&spi_device_class);
+ return transport_class_register(&spi_host_class);
+}
+
+static void __exit spi_transport_exit(void)
+{
+ transport_class_unregister(&spi_transport_class);
+ anon_transport_class_unregister(&spi_device_class);
+ transport_class_unregister(&spi_host_class);
+ scsi_dev_info_remove_list(SCSI_DEVINFO_SPI);
+}
+
+MODULE_AUTHOR("Martin Hicks");
+MODULE_DESCRIPTION("SPI Transport Attributes");
+MODULE_LICENSE("GPL");
+
+module_init(spi_transport_init);
+module_exit(spi_transport_exit);
diff --git a/drivers/scsi/scsi_transport_srp.c b/drivers/scsi/scsi_transport_srp.c
new file mode 100644
index 000000000..f115f67a6
--- /dev/null
+++ b/drivers/scsi/scsi_transport_srp.c
@@ -0,0 +1,937 @@
+/*
+ * SCSI RDMA (SRP) transport class
+ *
+ * Copyright (C) 2007 FUJITA Tomonori <tomof@acm.org>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation, version 2 of the
+ * License.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
+ * 02110-1301 USA
+ */
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/jiffies.h>
+#include <linux/err.h>
+#include <linux/slab.h>
+#include <linux/string.h>
+#include <linux/delay.h>
+
+#include <scsi/scsi.h>
+#include <scsi/scsi_cmnd.h>
+#include <scsi/scsi_device.h>
+#include <scsi/scsi_host.h>
+#include <scsi/scsi_transport.h>
+#include <scsi/scsi_transport_srp.h>
+#include "scsi_priv.h"
+
+struct srp_host_attrs {
+ atomic_t next_port_id;
+};
+#define to_srp_host_attrs(host) ((struct srp_host_attrs *)(host)->shost_data)
+
+#define SRP_HOST_ATTRS 0
+#define SRP_RPORT_ATTRS 8
+
+struct srp_internal {
+ struct scsi_transport_template t;
+ struct srp_function_template *f;
+
+ struct device_attribute *host_attrs[SRP_HOST_ATTRS + 1];
+
+ struct device_attribute *rport_attrs[SRP_RPORT_ATTRS + 1];
+ struct transport_container rport_attr_cont;
+};
+
+#define to_srp_internal(tmpl) container_of(tmpl, struct srp_internal, t)
+
+#define dev_to_rport(d) container_of(d, struct srp_rport, dev)
+#define transport_class_to_srp_rport(dev) dev_to_rport((dev)->parent)
+static inline struct Scsi_Host *rport_to_shost(struct srp_rport *r)
+{
+ return dev_to_shost(r->dev.parent);
+}
+
+/**
+ * srp_tmo_valid() - check timeout combination validity
+ * @reconnect_delay: Reconnect delay in seconds.
+ * @fast_io_fail_tmo: Fast I/O fail timeout in seconds.
+ * @dev_loss_tmo: Device loss timeout in seconds.
+ *
+ * The combination of the timeout parameters must be such that SCSI commands
+ * are finished in a reasonable time. Hence do not allow the fast I/O fail
+ * timeout to exceed SCSI_DEVICE_BLOCK_MAX_TIMEOUT nor allow dev_loss_tmo to
+ * exceed that limit if failing I/O fast has been disabled. Furthermore, these
+ * parameters must be such that multipath can detect failed paths timely.
+ * Hence do not allow all three parameters to be disabled simultaneously.
+ */
+int srp_tmo_valid(int reconnect_delay, int fast_io_fail_tmo, int dev_loss_tmo)
+{
+ if (reconnect_delay < 0 && fast_io_fail_tmo < 0 && dev_loss_tmo < 0)
+ return -EINVAL;
+ if (reconnect_delay == 0)
+ return -EINVAL;
+ if (fast_io_fail_tmo > SCSI_DEVICE_BLOCK_MAX_TIMEOUT)
+ return -EINVAL;
+ if (fast_io_fail_tmo < 0 &&
+ dev_loss_tmo > SCSI_DEVICE_BLOCK_MAX_TIMEOUT)
+ return -EINVAL;
+ if (dev_loss_tmo >= LONG_MAX / HZ)
+ return -EINVAL;
+ if (fast_io_fail_tmo >= 0 && dev_loss_tmo >= 0 &&
+ fast_io_fail_tmo >= dev_loss_tmo)
+ return -EINVAL;
+ return 0;
+}
+EXPORT_SYMBOL_GPL(srp_tmo_valid);
+
+static int srp_host_setup(struct transport_container *tc, struct device *dev,
+ struct device *cdev)
+{
+ struct Scsi_Host *shost = dev_to_shost(dev);
+ struct srp_host_attrs *srp_host = to_srp_host_attrs(shost);
+
+ atomic_set(&srp_host->next_port_id, 0);
+ return 0;
+}
+
+static DECLARE_TRANSPORT_CLASS(srp_host_class, "srp_host", srp_host_setup,
+ NULL, NULL);
+
+static DECLARE_TRANSPORT_CLASS(srp_rport_class, "srp_remote_ports",
+ NULL, NULL, NULL);
+
+#define SRP_PID(p) \
+ (p)->port_id[0], (p)->port_id[1], (p)->port_id[2], (p)->port_id[3], \
+ (p)->port_id[4], (p)->port_id[5], (p)->port_id[6], (p)->port_id[7], \
+ (p)->port_id[8], (p)->port_id[9], (p)->port_id[10], (p)->port_id[11], \
+ (p)->port_id[12], (p)->port_id[13], (p)->port_id[14], (p)->port_id[15]
+
+#define SRP_PID_FMT "%02x:%02x:%02x:%02x:%02x:%02x:%02x:%02x:" \
+ "%02x:%02x:%02x:%02x:%02x:%02x:%02x:%02x"
+
+static ssize_t
+show_srp_rport_id(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct srp_rport *rport = transport_class_to_srp_rport(dev);
+ return sprintf(buf, SRP_PID_FMT "\n", SRP_PID(rport));
+}
+
+static DEVICE_ATTR(port_id, S_IRUGO, show_srp_rport_id, NULL);
+
+static const struct {
+ u32 value;
+ char *name;
+} srp_rport_role_names[] = {
+ {SRP_RPORT_ROLE_INITIATOR, "SRP Initiator"},
+ {SRP_RPORT_ROLE_TARGET, "SRP Target"},
+};
+
+static ssize_t
+show_srp_rport_roles(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct srp_rport *rport = transport_class_to_srp_rport(dev);
+ int i;
+ char *name = NULL;
+
+ for (i = 0; i < ARRAY_SIZE(srp_rport_role_names); i++)
+ if (srp_rport_role_names[i].value == rport->roles) {
+ name = srp_rport_role_names[i].name;
+ break;
+ }
+ return sprintf(buf, "%s\n", name ? : "unknown");
+}
+
+static DEVICE_ATTR(roles, S_IRUGO, show_srp_rport_roles, NULL);
+
+static ssize_t store_srp_rport_delete(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct srp_rport *rport = transport_class_to_srp_rport(dev);
+ struct Scsi_Host *shost = dev_to_shost(dev);
+ struct srp_internal *i = to_srp_internal(shost->transportt);
+
+ if (i->f->rport_delete) {
+ i->f->rport_delete(rport);
+ return count;
+ } else {
+ return -ENOSYS;
+ }
+}
+
+static DEVICE_ATTR(delete, S_IWUSR, NULL, store_srp_rport_delete);
+
+static ssize_t show_srp_rport_state(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ static const char *const state_name[] = {
+ [SRP_RPORT_RUNNING] = "running",
+ [SRP_RPORT_BLOCKED] = "blocked",
+ [SRP_RPORT_FAIL_FAST] = "fail-fast",
+ [SRP_RPORT_LOST] = "lost",
+ };
+ struct srp_rport *rport = transport_class_to_srp_rport(dev);
+ enum srp_rport_state state = rport->state;
+
+ return sprintf(buf, "%s\n",
+ (unsigned)state < ARRAY_SIZE(state_name) ?
+ state_name[state] : "???");
+}
+
+static DEVICE_ATTR(state, S_IRUGO, show_srp_rport_state, NULL);
+
+static ssize_t srp_show_tmo(char *buf, int tmo)
+{
+ return tmo >= 0 ? sprintf(buf, "%d\n", tmo) : sprintf(buf, "off\n");
+}
+
+static int srp_parse_tmo(int *tmo, const char *buf)
+{
+ int res = 0;
+
+ if (strncmp(buf, "off", 3) != 0)
+ res = kstrtoint(buf, 0, tmo);
+ else
+ *tmo = -1;
+
+ return res;
+}
+
+static ssize_t show_reconnect_delay(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct srp_rport *rport = transport_class_to_srp_rport(dev);
+
+ return srp_show_tmo(buf, rport->reconnect_delay);
+}
+
+static ssize_t store_reconnect_delay(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, const size_t count)
+{
+ struct srp_rport *rport = transport_class_to_srp_rport(dev);
+ int res, delay;
+
+ res = srp_parse_tmo(&delay, buf);
+ if (res)
+ goto out;
+ res = srp_tmo_valid(delay, rport->fast_io_fail_tmo,
+ rport->dev_loss_tmo);
+ if (res)
+ goto out;
+
+ if (rport->reconnect_delay <= 0 && delay > 0 &&
+ rport->state != SRP_RPORT_RUNNING) {
+ queue_delayed_work(system_long_wq, &rport->reconnect_work,
+ delay * HZ);
+ } else if (delay <= 0) {
+ cancel_delayed_work(&rport->reconnect_work);
+ }
+ rport->reconnect_delay = delay;
+ res = count;
+
+out:
+ return res;
+}
+
+static DEVICE_ATTR(reconnect_delay, S_IRUGO | S_IWUSR, show_reconnect_delay,
+ store_reconnect_delay);
+
+static ssize_t show_failed_reconnects(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct srp_rport *rport = transport_class_to_srp_rport(dev);
+
+ return sprintf(buf, "%d\n", rport->failed_reconnects);
+}
+
+static DEVICE_ATTR(failed_reconnects, S_IRUGO, show_failed_reconnects, NULL);
+
+static ssize_t show_srp_rport_fast_io_fail_tmo(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct srp_rport *rport = transport_class_to_srp_rport(dev);
+
+ return srp_show_tmo(buf, rport->fast_io_fail_tmo);
+}
+
+static ssize_t store_srp_rport_fast_io_fail_tmo(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct srp_rport *rport = transport_class_to_srp_rport(dev);
+ int res;
+ int fast_io_fail_tmo;
+
+ res = srp_parse_tmo(&fast_io_fail_tmo, buf);
+ if (res)
+ goto out;
+ res = srp_tmo_valid(rport->reconnect_delay, fast_io_fail_tmo,
+ rport->dev_loss_tmo);
+ if (res)
+ goto out;
+ rport->fast_io_fail_tmo = fast_io_fail_tmo;
+ res = count;
+
+out:
+ return res;
+}
+
+static DEVICE_ATTR(fast_io_fail_tmo, S_IRUGO | S_IWUSR,
+ show_srp_rport_fast_io_fail_tmo,
+ store_srp_rport_fast_io_fail_tmo);
+
+static ssize_t show_srp_rport_dev_loss_tmo(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct srp_rport *rport = transport_class_to_srp_rport(dev);
+
+ return srp_show_tmo(buf, rport->dev_loss_tmo);
+}
+
+static ssize_t store_srp_rport_dev_loss_tmo(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct srp_rport *rport = transport_class_to_srp_rport(dev);
+ int res;
+ int dev_loss_tmo;
+
+ res = srp_parse_tmo(&dev_loss_tmo, buf);
+ if (res)
+ goto out;
+ res = srp_tmo_valid(rport->reconnect_delay, rport->fast_io_fail_tmo,
+ dev_loss_tmo);
+ if (res)
+ goto out;
+ rport->dev_loss_tmo = dev_loss_tmo;
+ res = count;
+
+out:
+ return res;
+}
+
+static DEVICE_ATTR(dev_loss_tmo, S_IRUGO | S_IWUSR,
+ show_srp_rport_dev_loss_tmo,
+ store_srp_rport_dev_loss_tmo);
+
+static int srp_rport_set_state(struct srp_rport *rport,
+ enum srp_rport_state new_state)
+{
+ enum srp_rport_state old_state = rport->state;
+
+ lockdep_assert_held(&rport->mutex);
+
+ switch (new_state) {
+ case SRP_RPORT_RUNNING:
+ switch (old_state) {
+ case SRP_RPORT_LOST:
+ goto invalid;
+ default:
+ break;
+ }
+ break;
+ case SRP_RPORT_BLOCKED:
+ switch (old_state) {
+ case SRP_RPORT_RUNNING:
+ break;
+ default:
+ goto invalid;
+ }
+ break;
+ case SRP_RPORT_FAIL_FAST:
+ switch (old_state) {
+ case SRP_RPORT_LOST:
+ goto invalid;
+ default:
+ break;
+ }
+ break;
+ case SRP_RPORT_LOST:
+ break;
+ }
+ rport->state = new_state;
+ return 0;
+
+invalid:
+ return -EINVAL;
+}
+
+/**
+ * srp_reconnect_work() - reconnect and schedule a new attempt if necessary
+ * @work: Work structure used for scheduling this operation.
+ */
+static void srp_reconnect_work(struct work_struct *work)
+{
+ struct srp_rport *rport = container_of(to_delayed_work(work),
+ struct srp_rport, reconnect_work);
+ struct Scsi_Host *shost = rport_to_shost(rport);
+ int delay, res;
+
+ res = srp_reconnect_rport(rport);
+ if (res != 0) {
+ shost_printk(KERN_ERR, shost,
+ "reconnect attempt %d failed (%d)\n",
+ ++rport->failed_reconnects, res);
+ delay = rport->reconnect_delay *
+ min(100, max(1, rport->failed_reconnects - 10));
+ if (delay > 0)
+ queue_delayed_work(system_long_wq,
+ &rport->reconnect_work, delay * HZ);
+ }
+}
+
+/**
+ * scsi_request_fn_active() - number of kernel threads inside scsi_request_fn()
+ * @shost: SCSI host for which to count the number of scsi_request_fn() callers.
+ *
+ * To do: add support for scsi-mq in this function.
+ */
+static int scsi_request_fn_active(struct Scsi_Host *shost)
+{
+ struct scsi_device *sdev;
+ struct request_queue *q;
+ int request_fn_active = 0;
+
+ shost_for_each_device(sdev, shost) {
+ q = sdev->request_queue;
+
+ spin_lock_irq(q->queue_lock);
+ request_fn_active += q->request_fn_active;
+ spin_unlock_irq(q->queue_lock);
+ }
+
+ return request_fn_active;
+}
+
+/* Wait until ongoing shost->hostt->queuecommand() calls have finished. */
+static void srp_wait_for_queuecommand(struct Scsi_Host *shost)
+{
+ while (scsi_request_fn_active(shost))
+ msleep(20);
+}
+
+static void __rport_fail_io_fast(struct srp_rport *rport)
+{
+ struct Scsi_Host *shost = rport_to_shost(rport);
+ struct srp_internal *i;
+
+ lockdep_assert_held(&rport->mutex);
+
+ if (srp_rport_set_state(rport, SRP_RPORT_FAIL_FAST))
+ return;
+ scsi_target_unblock(rport->dev.parent, SDEV_TRANSPORT_OFFLINE);
+
+ /* Involve the LLD if possible to terminate all I/O on the rport. */
+ i = to_srp_internal(shost->transportt);
+ if (i->f->terminate_rport_io) {
+ srp_wait_for_queuecommand(shost);
+ i->f->terminate_rport_io(rport);
+ }
+}
+
+/**
+ * rport_fast_io_fail_timedout() - fast I/O failure timeout handler
+ * @work: Work structure used for scheduling this operation.
+ */
+static void rport_fast_io_fail_timedout(struct work_struct *work)
+{
+ struct srp_rport *rport = container_of(to_delayed_work(work),
+ struct srp_rport, fast_io_fail_work);
+ struct Scsi_Host *shost = rport_to_shost(rport);
+
+ pr_info("fast_io_fail_tmo expired for SRP %s / %s.\n",
+ dev_name(&rport->dev), dev_name(&shost->shost_gendev));
+
+ mutex_lock(&rport->mutex);
+ if (rport->state == SRP_RPORT_BLOCKED)
+ __rport_fail_io_fast(rport);
+ mutex_unlock(&rport->mutex);
+}
+
+/**
+ * rport_dev_loss_timedout() - device loss timeout handler
+ * @work: Work structure used for scheduling this operation.
+ */
+static void rport_dev_loss_timedout(struct work_struct *work)
+{
+ struct srp_rport *rport = container_of(to_delayed_work(work),
+ struct srp_rport, dev_loss_work);
+ struct Scsi_Host *shost = rport_to_shost(rport);
+ struct srp_internal *i = to_srp_internal(shost->transportt);
+
+ pr_info("dev_loss_tmo expired for SRP %s / %s.\n",
+ dev_name(&rport->dev), dev_name(&shost->shost_gendev));
+
+ mutex_lock(&rport->mutex);
+ WARN_ON(srp_rport_set_state(rport, SRP_RPORT_LOST) != 0);
+ scsi_target_unblock(rport->dev.parent, SDEV_TRANSPORT_OFFLINE);
+ mutex_unlock(&rport->mutex);
+
+ i->f->rport_delete(rport);
+}
+
+static void __srp_start_tl_fail_timers(struct srp_rport *rport)
+{
+ struct Scsi_Host *shost = rport_to_shost(rport);
+ int delay, fast_io_fail_tmo, dev_loss_tmo;
+
+ lockdep_assert_held(&rport->mutex);
+
+ delay = rport->reconnect_delay;
+ fast_io_fail_tmo = rport->fast_io_fail_tmo;
+ dev_loss_tmo = rport->dev_loss_tmo;
+ pr_debug("%s current state: %d\n", dev_name(&shost->shost_gendev),
+ rport->state);
+
+ if (rport->state == SRP_RPORT_LOST)
+ return;
+ if (delay > 0)
+ queue_delayed_work(system_long_wq, &rport->reconnect_work,
+ 1UL * delay * HZ);
+ if ((fast_io_fail_tmo >= 0 || dev_loss_tmo >= 0) &&
+ srp_rport_set_state(rport, SRP_RPORT_BLOCKED) == 0) {
+ pr_debug("%s new state: %d\n", dev_name(&shost->shost_gendev),
+ rport->state);
+ scsi_target_block(&shost->shost_gendev);
+ if (fast_io_fail_tmo >= 0)
+ queue_delayed_work(system_long_wq,
+ &rport->fast_io_fail_work,
+ 1UL * fast_io_fail_tmo * HZ);
+ if (dev_loss_tmo >= 0)
+ queue_delayed_work(system_long_wq,
+ &rport->dev_loss_work,
+ 1UL * dev_loss_tmo * HZ);
+ }
+}
+
+/**
+ * srp_start_tl_fail_timers() - start the transport layer failure timers
+ * @rport: SRP target port.
+ *
+ * Start the transport layer fast I/O failure and device loss timers. Do not
+ * modify a timer that was already started.
+ */
+void srp_start_tl_fail_timers(struct srp_rport *rport)
+{
+ mutex_lock(&rport->mutex);
+ __srp_start_tl_fail_timers(rport);
+ mutex_unlock(&rport->mutex);
+}
+EXPORT_SYMBOL(srp_start_tl_fail_timers);
+
+/**
+ * srp_reconnect_rport() - reconnect to an SRP target port
+ * @rport: SRP target port.
+ *
+ * Blocks SCSI command queueing before invoking reconnect() such that
+ * queuecommand() won't be invoked concurrently with reconnect() from outside
+ * the SCSI EH. This is important since a reconnect() implementation may
+ * reallocate resources needed by queuecommand().
+ *
+ * Notes:
+ * - This function neither waits until outstanding requests have finished nor
+ * tries to abort these. It is the responsibility of the reconnect()
+ * function to finish outstanding commands before reconnecting to the target
+ * port.
+ * - It is the responsibility of the caller to ensure that the resources
+ * reallocated by the reconnect() function won't be used while this function
+ * is in progress. One possible strategy is to invoke this function from
+ * the context of the SCSI EH thread only. Another possible strategy is to
+ * lock the rport mutex inside each SCSI LLD callback that can be invoked by
+ * the SCSI EH (the scsi_host_template.eh_*() functions and also the
+ * scsi_host_template.queuecommand() function).
+ */
+int srp_reconnect_rport(struct srp_rport *rport)
+{
+ struct Scsi_Host *shost = rport_to_shost(rport);
+ struct srp_internal *i = to_srp_internal(shost->transportt);
+ struct scsi_device *sdev;
+ int res;
+
+ pr_debug("SCSI host %s\n", dev_name(&shost->shost_gendev));
+
+ res = mutex_lock_interruptible(&rport->mutex);
+ if (res)
+ goto out;
+ scsi_target_block(&shost->shost_gendev);
+ srp_wait_for_queuecommand(shost);
+ res = rport->state != SRP_RPORT_LOST ? i->f->reconnect(rport) : -ENODEV;
+ pr_debug("%s (state %d): transport.reconnect() returned %d\n",
+ dev_name(&shost->shost_gendev), rport->state, res);
+ if (res == 0) {
+ cancel_delayed_work(&rport->fast_io_fail_work);
+ cancel_delayed_work(&rport->dev_loss_work);
+
+ rport->failed_reconnects = 0;
+ srp_rport_set_state(rport, SRP_RPORT_RUNNING);
+ scsi_target_unblock(&shost->shost_gendev, SDEV_RUNNING);
+ /*
+ * If the SCSI error handler has offlined one or more devices,
+ * invoking scsi_target_unblock() won't change the state of
+ * these devices into running so do that explicitly.
+ */
+ spin_lock_irq(shost->host_lock);
+ __shost_for_each_device(sdev, shost)
+ if (sdev->sdev_state == SDEV_OFFLINE)
+ sdev->sdev_state = SDEV_RUNNING;
+ spin_unlock_irq(shost->host_lock);
+ } else if (rport->state == SRP_RPORT_RUNNING) {
+ /*
+ * srp_reconnect_rport() has been invoked with fast_io_fail
+ * and dev_loss off. Mark the port as failed and start the TL
+ * failure timers if these had not yet been started.
+ */
+ __rport_fail_io_fast(rport);
+ scsi_target_unblock(&shost->shost_gendev,
+ SDEV_TRANSPORT_OFFLINE);
+ __srp_start_tl_fail_timers(rport);
+ } else if (rport->state != SRP_RPORT_BLOCKED) {
+ scsi_target_unblock(&shost->shost_gendev,
+ SDEV_TRANSPORT_OFFLINE);
+ }
+ mutex_unlock(&rport->mutex);
+
+out:
+ return res;
+}
+EXPORT_SYMBOL(srp_reconnect_rport);
+
+/**
+ * srp_timed_out() - SRP transport intercept of the SCSI timeout EH
+ * @scmd: SCSI command.
+ *
+ * If a timeout occurs while an rport is in the blocked state, ask the SCSI
+ * EH to continue waiting (BLK_EH_RESET_TIMER). Otherwise let the SCSI core
+ * handle the timeout (BLK_EH_NOT_HANDLED).
+ *
+ * Note: This function is called from soft-IRQ context and with the request
+ * queue lock held.
+ */
+static enum blk_eh_timer_return srp_timed_out(struct scsi_cmnd *scmd)
+{
+ struct scsi_device *sdev = scmd->device;
+ struct Scsi_Host *shost = sdev->host;
+ struct srp_internal *i = to_srp_internal(shost->transportt);
+
+ pr_debug("timeout for sdev %s\n", dev_name(&sdev->sdev_gendev));
+ return i->f->reset_timer_if_blocked && scsi_device_blocked(sdev) ?
+ BLK_EH_RESET_TIMER : BLK_EH_NOT_HANDLED;
+}
+
+static void srp_rport_release(struct device *dev)
+{
+ struct srp_rport *rport = dev_to_rport(dev);
+
+ put_device(dev->parent);
+ kfree(rport);
+}
+
+static int scsi_is_srp_rport(const struct device *dev)
+{
+ return dev->release == srp_rport_release;
+}
+
+static int srp_rport_match(struct attribute_container *cont,
+ struct device *dev)
+{
+ struct Scsi_Host *shost;
+ struct srp_internal *i;
+
+ if (!scsi_is_srp_rport(dev))
+ return 0;
+
+ shost = dev_to_shost(dev->parent);
+ if (!shost->transportt)
+ return 0;
+ if (shost->transportt->host_attrs.ac.class != &srp_host_class.class)
+ return 0;
+
+ i = to_srp_internal(shost->transportt);
+ return &i->rport_attr_cont.ac == cont;
+}
+
+static int srp_host_match(struct attribute_container *cont, struct device *dev)
+{
+ struct Scsi_Host *shost;
+ struct srp_internal *i;
+
+ if (!scsi_is_host_device(dev))
+ return 0;
+
+ shost = dev_to_shost(dev);
+ if (!shost->transportt)
+ return 0;
+ if (shost->transportt->host_attrs.ac.class != &srp_host_class.class)
+ return 0;
+
+ i = to_srp_internal(shost->transportt);
+ return &i->t.host_attrs.ac == cont;
+}
+
+/**
+ * srp_rport_get() - increment rport reference count
+ * @rport: SRP target port.
+ */
+void srp_rport_get(struct srp_rport *rport)
+{
+ get_device(&rport->dev);
+}
+EXPORT_SYMBOL(srp_rport_get);
+
+/**
+ * srp_rport_put() - decrement rport reference count
+ * @rport: SRP target port.
+ */
+void srp_rport_put(struct srp_rport *rport)
+{
+ put_device(&rport->dev);
+}
+EXPORT_SYMBOL(srp_rport_put);
+
+/**
+ * srp_rport_add - add a SRP remote port to the device hierarchy
+ * @shost: scsi host the remote port is connected to.
+ * @ids: The port id for the remote port.
+ *
+ * Publishes a port to the rest of the system.
+ */
+struct srp_rport *srp_rport_add(struct Scsi_Host *shost,
+ struct srp_rport_identifiers *ids)
+{
+ struct srp_rport *rport;
+ struct device *parent = &shost->shost_gendev;
+ struct srp_internal *i = to_srp_internal(shost->transportt);
+ int id, ret;
+
+ rport = kzalloc(sizeof(*rport), GFP_KERNEL);
+ if (!rport)
+ return ERR_PTR(-ENOMEM);
+
+ mutex_init(&rport->mutex);
+
+ device_initialize(&rport->dev);
+
+ rport->dev.parent = get_device(parent);
+ rport->dev.release = srp_rport_release;
+
+ memcpy(rport->port_id, ids->port_id, sizeof(rport->port_id));
+ rport->roles = ids->roles;
+
+ if (i->f->reconnect)
+ rport->reconnect_delay = i->f->reconnect_delay ?
+ *i->f->reconnect_delay : 10;
+ INIT_DELAYED_WORK(&rport->reconnect_work, srp_reconnect_work);
+ rport->fast_io_fail_tmo = i->f->fast_io_fail_tmo ?
+ *i->f->fast_io_fail_tmo : 15;
+ rport->dev_loss_tmo = i->f->dev_loss_tmo ? *i->f->dev_loss_tmo : 60;
+ INIT_DELAYED_WORK(&rport->fast_io_fail_work,
+ rport_fast_io_fail_timedout);
+ INIT_DELAYED_WORK(&rport->dev_loss_work, rport_dev_loss_timedout);
+
+ id = atomic_inc_return(&to_srp_host_attrs(shost)->next_port_id);
+ dev_set_name(&rport->dev, "port-%d:%d", shost->host_no, id);
+
+ transport_setup_device(&rport->dev);
+
+ ret = device_add(&rport->dev);
+ if (ret) {
+ transport_destroy_device(&rport->dev);
+ put_device(&rport->dev);
+ return ERR_PTR(ret);
+ }
+
+ transport_add_device(&rport->dev);
+ transport_configure_device(&rport->dev);
+
+ return rport;
+}
+EXPORT_SYMBOL_GPL(srp_rport_add);
+
+/**
+ * srp_rport_del - remove a SRP remote port
+ * @rport: SRP remote port to remove
+ *
+ * Removes the specified SRP remote port.
+ */
+void srp_rport_del(struct srp_rport *rport)
+{
+ struct device *dev = &rport->dev;
+
+ transport_remove_device(dev);
+ device_del(dev);
+ transport_destroy_device(dev);
+
+ put_device(dev);
+}
+EXPORT_SYMBOL_GPL(srp_rport_del);
+
+static int do_srp_rport_del(struct device *dev, void *data)
+{
+ if (scsi_is_srp_rport(dev))
+ srp_rport_del(dev_to_rport(dev));
+ return 0;
+}
+
+/**
+ * srp_remove_host - tear down a Scsi_Host's SRP data structures
+ * @shost: Scsi Host that is torn down
+ *
+ * Removes all SRP remote ports for a given Scsi_Host.
+ * Must be called just before scsi_remove_host for SRP HBAs.
+ */
+void srp_remove_host(struct Scsi_Host *shost)
+{
+ device_for_each_child(&shost->shost_gendev, NULL, do_srp_rport_del);
+}
+EXPORT_SYMBOL_GPL(srp_remove_host);
+
+/**
+ * srp_stop_rport_timers - stop the transport layer recovery timers
+ * @rport: SRP remote port for which to stop the timers.
+ *
+ * Must be called after srp_remove_host() and scsi_remove_host(). The caller
+ * must hold a reference on the rport (rport->dev) and on the SCSI host
+ * (rport->dev.parent).
+ */
+void srp_stop_rport_timers(struct srp_rport *rport)
+{
+ mutex_lock(&rport->mutex);
+ if (rport->state == SRP_RPORT_BLOCKED)
+ __rport_fail_io_fast(rport);
+ srp_rport_set_state(rport, SRP_RPORT_LOST);
+ mutex_unlock(&rport->mutex);
+
+ cancel_delayed_work_sync(&rport->reconnect_work);
+ cancel_delayed_work_sync(&rport->fast_io_fail_work);
+ cancel_delayed_work_sync(&rport->dev_loss_work);
+}
+EXPORT_SYMBOL_GPL(srp_stop_rport_timers);
+
+static int srp_tsk_mgmt_response(struct Scsi_Host *shost, u64 nexus, u64 tm_id,
+ int result)
+{
+ struct srp_internal *i = to_srp_internal(shost->transportt);
+ return i->f->tsk_mgmt_response(shost, nexus, tm_id, result);
+}
+
+static int srp_it_nexus_response(struct Scsi_Host *shost, u64 nexus, int result)
+{
+ struct srp_internal *i = to_srp_internal(shost->transportt);
+ return i->f->it_nexus_response(shost, nexus, result);
+}
+
+/**
+ * srp_attach_transport - instantiate SRP transport template
+ * @ft: SRP transport class function template
+ */
+struct scsi_transport_template *
+srp_attach_transport(struct srp_function_template *ft)
+{
+ int count;
+ struct srp_internal *i;
+
+ i = kzalloc(sizeof(*i), GFP_KERNEL);
+ if (!i)
+ return NULL;
+
+ i->t.eh_timed_out = srp_timed_out;
+
+ i->t.tsk_mgmt_response = srp_tsk_mgmt_response;
+ i->t.it_nexus_response = srp_it_nexus_response;
+
+ i->t.host_size = sizeof(struct srp_host_attrs);
+ i->t.host_attrs.ac.attrs = &i->host_attrs[0];
+ i->t.host_attrs.ac.class = &srp_host_class.class;
+ i->t.host_attrs.ac.match = srp_host_match;
+ i->host_attrs[0] = NULL;
+ transport_container_register(&i->t.host_attrs);
+
+ i->rport_attr_cont.ac.attrs = &i->rport_attrs[0];
+ i->rport_attr_cont.ac.class = &srp_rport_class.class;
+ i->rport_attr_cont.ac.match = srp_rport_match;
+
+ count = 0;
+ i->rport_attrs[count++] = &dev_attr_port_id;
+ i->rport_attrs[count++] = &dev_attr_roles;
+ if (ft->has_rport_state) {
+ i->rport_attrs[count++] = &dev_attr_state;
+ i->rport_attrs[count++] = &dev_attr_fast_io_fail_tmo;
+ i->rport_attrs[count++] = &dev_attr_dev_loss_tmo;
+ }
+ if (ft->reconnect) {
+ i->rport_attrs[count++] = &dev_attr_reconnect_delay;
+ i->rport_attrs[count++] = &dev_attr_failed_reconnects;
+ }
+ if (ft->rport_delete)
+ i->rport_attrs[count++] = &dev_attr_delete;
+ i->rport_attrs[count++] = NULL;
+ BUG_ON(count > ARRAY_SIZE(i->rport_attrs));
+
+ transport_container_register(&i->rport_attr_cont);
+
+ i->f = ft;
+
+ return &i->t;
+}
+EXPORT_SYMBOL_GPL(srp_attach_transport);
+
+/**
+ * srp_release_transport - release SRP transport template instance
+ * @t: transport template instance
+ */
+void srp_release_transport(struct scsi_transport_template *t)
+{
+ struct srp_internal *i = to_srp_internal(t);
+
+ transport_container_unregister(&i->t.host_attrs);
+ transport_container_unregister(&i->rport_attr_cont);
+
+ kfree(i);
+}
+EXPORT_SYMBOL_GPL(srp_release_transport);
+
+static __init int srp_transport_init(void)
+{
+ int ret;
+
+ ret = transport_class_register(&srp_host_class);
+ if (ret)
+ return ret;
+ ret = transport_class_register(&srp_rport_class);
+ if (ret)
+ goto unregister_host_class;
+
+ return 0;
+unregister_host_class:
+ transport_class_unregister(&srp_host_class);
+ return ret;
+}
+
+static void __exit srp_transport_exit(void)
+{
+ transport_class_unregister(&srp_host_class);
+ transport_class_unregister(&srp_rport_class);
+}
+
+MODULE_AUTHOR("FUJITA Tomonori");
+MODULE_DESCRIPTION("SRP Transport Attributes");
+MODULE_LICENSE("GPL");
+
+module_init(srp_transport_init);
+module_exit(srp_transport_exit);
diff --git a/drivers/scsi/scsi_typedefs.h b/drivers/scsi/scsi_typedefs.h
new file mode 100644
index 000000000..2ed4c5cb7
--- /dev/null
+++ b/drivers/scsi/scsi_typedefs.h
@@ -0,0 +1,2 @@
+
+typedef struct scsi_cmnd Scsi_Cmnd;
diff --git a/drivers/scsi/scsicam.c b/drivers/scsi/scsicam.c
new file mode 100644
index 000000000..910f4a7a3
--- /dev/null
+++ b/drivers/scsi/scsicam.c
@@ -0,0 +1,259 @@
+/*
+ * scsicam.c - SCSI CAM support functions, use for HDIO_GETGEO, etc.
+ *
+ * Copyright 1993, 1994 Drew Eckhardt
+ * Visionary Computing
+ * (Unix and Linux consulting and custom programming)
+ * drew@Colorado.EDU
+ * +1 (303) 786-7975
+ *
+ * For more information, please consult the SCSI-CAM draft.
+ */
+
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/fs.h>
+#include <linux/genhd.h>
+#include <linux/kernel.h>
+#include <linux/blkdev.h>
+#include <asm/unaligned.h>
+
+#include <scsi/scsicam.h>
+
+
+static int setsize(unsigned long capacity, unsigned int *cyls, unsigned int *hds,
+ unsigned int *secs);
+
+/**
+ * scsi_bios_ptable - Read PC partition table out of first sector of device.
+ * @dev: from this device
+ *
+ * Description: Reads the first sector from the device and returns %0x42 bytes
+ * starting at offset %0x1be.
+ * Returns: partition table in kmalloc(GFP_KERNEL) memory, or NULL on error.
+ */
+unsigned char *scsi_bios_ptable(struct block_device *dev)
+{
+ unsigned char *res = kmalloc(66, GFP_KERNEL);
+ if (res) {
+ struct block_device *bdev = dev->bd_contains;
+ Sector sect;
+ void *data = read_dev_sector(bdev, 0, &sect);
+ if (data) {
+ memcpy(res, data + 0x1be, 66);
+ put_dev_sector(sect);
+ } else {
+ kfree(res);
+ res = NULL;
+ }
+ }
+ return res;
+}
+EXPORT_SYMBOL(scsi_bios_ptable);
+
+/**
+ * scsicam_bios_param - Determine geometry of a disk in cylinders/heads/sectors.
+ * @bdev: which device
+ * @capacity: size of the disk in sectors
+ * @ip: return value: ip[0]=heads, ip[1]=sectors, ip[2]=cylinders
+ *
+ * Description : determine the BIOS mapping/geometry used for a drive in a
+ * SCSI-CAM system, storing the results in ip as required
+ * by the HDIO_GETGEO ioctl().
+ *
+ * Returns : -1 on failure, 0 on success.
+ */
+
+int scsicam_bios_param(struct block_device *bdev, sector_t capacity, int *ip)
+{
+ unsigned char *p;
+ u64 capacity64 = capacity; /* Suppress gcc warning */
+ int ret;
+
+ p = scsi_bios_ptable(bdev);
+ if (!p)
+ return -1;
+
+ /* try to infer mapping from partition table */
+ ret = scsi_partsize(p, (unsigned long)capacity, (unsigned int *)ip + 2,
+ (unsigned int *)ip + 0, (unsigned int *)ip + 1);
+ kfree(p);
+
+ if (ret == -1 && capacity64 < (1ULL << 32)) {
+ /* pick some standard mapping with at most 1024 cylinders,
+ and at most 62 sectors per track - this works up to
+ 7905 MB */
+ ret = setsize((unsigned long)capacity, (unsigned int *)ip + 2,
+ (unsigned int *)ip + 0, (unsigned int *)ip + 1);
+ }
+
+ /* if something went wrong, then apparently we have to return
+ a geometry with more than 1024 cylinders */
+ if (ret || ip[0] > 255 || ip[1] > 63) {
+ if ((capacity >> 11) > 65534) {
+ ip[0] = 255;
+ ip[1] = 63;
+ } else {
+ ip[0] = 64;
+ ip[1] = 32;
+ }
+
+ if (capacity > 65535*63*255)
+ ip[2] = 65535;
+ else
+ ip[2] = (unsigned long)capacity / (ip[0] * ip[1]);
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL(scsicam_bios_param);
+
+/**
+ * scsi_partsize - Parse cylinders/heads/sectors from PC partition table
+ * @buf: partition table, see scsi_bios_ptable()
+ * @capacity: size of the disk in sectors
+ * @cyls: put cylinders here
+ * @hds: put heads here
+ * @secs: put sectors here
+ *
+ * Description: determine the BIOS mapping/geometry used to create the partition
+ * table, storing the results in *cyls, *hds, and *secs
+ *
+ * Returns: -1 on failure, 0 on success.
+ */
+
+int scsi_partsize(unsigned char *buf, unsigned long capacity,
+ unsigned int *cyls, unsigned int *hds, unsigned int *secs)
+{
+ struct partition *p = (struct partition *)buf, *largest = NULL;
+ int i, largest_cyl;
+ int cyl, ext_cyl, end_head, end_cyl, end_sector;
+ unsigned int logical_end, physical_end, ext_physical_end;
+
+
+ if (*(unsigned short *) (buf + 64) == 0xAA55) {
+ for (largest_cyl = -1, i = 0; i < 4; ++i, ++p) {
+ if (!p->sys_ind)
+ continue;
+#ifdef DEBUG
+ printk("scsicam_bios_param : partition %d has system \n",
+ i);
+#endif
+ cyl = p->cyl + ((p->sector & 0xc0) << 2);
+ if (cyl > largest_cyl) {
+ largest_cyl = cyl;
+ largest = p;
+ }
+ }
+ }
+ if (largest) {
+ end_cyl = largest->end_cyl + ((largest->end_sector & 0xc0) << 2);
+ end_head = largest->end_head;
+ end_sector = largest->end_sector & 0x3f;
+
+ if (end_head + 1 == 0 || end_sector == 0)
+ return -1;
+
+#ifdef DEBUG
+ printk("scsicam_bios_param : end at h = %d, c = %d, s = %d\n",
+ end_head, end_cyl, end_sector);
+#endif
+
+ physical_end = end_cyl * (end_head + 1) * end_sector +
+ end_head * end_sector + end_sector;
+
+ /* This is the actual _sector_ number at the end */
+ logical_end = get_unaligned_le32(&largest->start_sect)
+ + get_unaligned_le32(&largest->nr_sects);
+
+ /* This is for >1023 cylinders */
+ ext_cyl = (logical_end - (end_head * end_sector + end_sector))
+ / (end_head + 1) / end_sector;
+ ext_physical_end = ext_cyl * (end_head + 1) * end_sector +
+ end_head * end_sector + end_sector;
+
+#ifdef DEBUG
+ printk("scsicam_bios_param : logical_end=%d physical_end=%d ext_physical_end=%d ext_cyl=%d\n"
+ ,logical_end, physical_end, ext_physical_end, ext_cyl);
+#endif
+
+ if ((logical_end == physical_end) ||
+ (end_cyl == 1023 && ext_physical_end == logical_end)) {
+ *secs = end_sector;
+ *hds = end_head + 1;
+ *cyls = capacity / ((end_head + 1) * end_sector);
+ return 0;
+ }
+#ifdef DEBUG
+ printk("scsicam_bios_param : logical (%u) != physical (%u)\n",
+ logical_end, physical_end);
+#endif
+ }
+ return -1;
+}
+EXPORT_SYMBOL(scsi_partsize);
+
+/*
+ * Function : static int setsize(unsigned long capacity,unsigned int *cyls,
+ * unsigned int *hds, unsigned int *secs);
+ *
+ * Purpose : to determine a near-optimal int 0x13 mapping for a
+ * SCSI disk in terms of lost space of size capacity, storing
+ * the results in *cyls, *hds, and *secs.
+ *
+ * Returns : -1 on failure, 0 on success.
+ *
+ * Extracted from
+ *
+ * WORKING X3T9.2
+ * DRAFT 792D
+ * see http://www.t10.org/ftp/t10/drafts/cam/cam-r12b.pdf
+ *
+ * Revision 6
+ * 10-MAR-94
+ * Information technology -
+ * SCSI-2 Common access method
+ * transport and SCSI interface module
+ *
+ * ANNEX A :
+ *
+ * setsize() converts a read capacity value to int 13h
+ * head-cylinder-sector requirements. It minimizes the value for
+ * number of heads and maximizes the number of cylinders. This
+ * will support rather large disks before the number of heads
+ * will not fit in 4 bits (or 6 bits). This algorithm also
+ * minimizes the number of sectors that will be unused at the end
+ * of the disk while allowing for very large disks to be
+ * accommodated. This algorithm does not use physical geometry.
+ */
+
+static int setsize(unsigned long capacity, unsigned int *cyls, unsigned int *hds,
+ unsigned int *secs)
+{
+ unsigned int rv = 0;
+ unsigned long heads, sectors, cylinders, temp;
+
+ cylinders = 1024L; /* Set number of cylinders to max */
+ sectors = 62L; /* Maximize sectors per track */
+
+ temp = cylinders * sectors; /* Compute divisor for heads */
+ heads = capacity / temp; /* Compute value for number of heads */
+ if (capacity % temp) { /* If no remainder, done! */
+ heads++; /* Else, increment number of heads */
+ temp = cylinders * heads; /* Compute divisor for sectors */
+ sectors = capacity / temp; /* Compute value for sectors per
+ track */
+ if (capacity % temp) { /* If no remainder, done! */
+ sectors++; /* Else, increment number of sectors */
+ temp = heads * sectors; /* Compute divisor for cylinders */
+ cylinders = capacity / temp; /* Compute number of cylinders */
+ }
+ }
+ if (cylinders == 0)
+ rv = (unsigned) -1; /* Give error if 0 cylinders */
+
+ *cyls = (unsigned int) cylinders; /* Stuff return values */
+ *secs = (unsigned int) sectors;
+ *hds = (unsigned int) heads;
+ return (rv);
+}
diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c
new file mode 100644
index 000000000..7f9d65fe4
--- /dev/null
+++ b/drivers/scsi/sd.c
@@ -0,0 +1,3302 @@
+/*
+ * sd.c Copyright (C) 1992 Drew Eckhardt
+ * Copyright (C) 1993, 1994, 1995, 1999 Eric Youngdale
+ *
+ * Linux scsi disk driver
+ * Initial versions: Drew Eckhardt
+ * Subsequent revisions: Eric Youngdale
+ * Modification history:
+ * - Drew Eckhardt <drew@colorado.edu> original
+ * - Eric Youngdale <eric@andante.org> add scatter-gather, multiple
+ * outstanding request, and other enhancements.
+ * Support loadable low-level scsi drivers.
+ * - Jirka Hanika <geo@ff.cuni.cz> support more scsi disks using
+ * eight major numbers.
+ * - Richard Gooch <rgooch@atnf.csiro.au> support devfs.
+ * - Torben Mathiasen <tmm@image.dk> Resource allocation fixes in
+ * sd_init and cleanups.
+ * - Alex Davis <letmein@erols.com> Fix problem where partition info
+ * not being read in sd_open. Fix problem where removable media
+ * could be ejected after sd_open.
+ * - Douglas Gilbert <dgilbert@interlog.com> cleanup for lk 2.5.x
+ * - Badari Pulavarty <pbadari@us.ibm.com>, Matthew Wilcox
+ * <willy@debian.org>, Kurt Garloff <garloff@suse.de>:
+ * Support 32k/1M disks.
+ *
+ * Logging policy (needs CONFIG_SCSI_LOGGING defined):
+ * - setting up transfer: SCSI_LOG_HLQUEUE levels 1 and 2
+ * - end of transfer (bh + scsi_lib): SCSI_LOG_HLCOMPLETE level 1
+ * - entering sd_ioctl: SCSI_LOG_IOCTL level 1
+ * - entering other commands: SCSI_LOG_HLQUEUE level 3
+ * Note: when the logging level is set by the user, it must be greater
+ * than the level indicated above to trigger output.
+ */
+
+#include <linux/module.h>
+#include <linux/fs.h>
+#include <linux/kernel.h>
+#include <linux/mm.h>
+#include <linux/bio.h>
+#include <linux/genhd.h>
+#include <linux/hdreg.h>
+#include <linux/errno.h>
+#include <linux/idr.h>
+#include <linux/interrupt.h>
+#include <linux/init.h>
+#include <linux/blkdev.h>
+#include <linux/blkpg.h>
+#include <linux/delay.h>
+#include <linux/mutex.h>
+#include <linux/string_helpers.h>
+#include <linux/async.h>
+#include <linux/slab.h>
+#include <linux/pm_runtime.h>
+#include <asm/uaccess.h>
+#include <asm/unaligned.h>
+
+#include <scsi/scsi.h>
+#include <scsi/scsi_cmnd.h>
+#include <scsi/scsi_dbg.h>
+#include <scsi/scsi_device.h>
+#include <scsi/scsi_driver.h>
+#include <scsi/scsi_eh.h>
+#include <scsi/scsi_host.h>
+#include <scsi/scsi_ioctl.h>
+#include <scsi/scsicam.h>
+
+#include "sd.h"
+#include "scsi_priv.h"
+#include "scsi_logging.h"
+
+MODULE_AUTHOR("Eric Youngdale");
+MODULE_DESCRIPTION("SCSI disk (sd) driver");
+MODULE_LICENSE("GPL");
+
+MODULE_ALIAS_BLOCKDEV_MAJOR(SCSI_DISK0_MAJOR);
+MODULE_ALIAS_BLOCKDEV_MAJOR(SCSI_DISK1_MAJOR);
+MODULE_ALIAS_BLOCKDEV_MAJOR(SCSI_DISK2_MAJOR);
+MODULE_ALIAS_BLOCKDEV_MAJOR(SCSI_DISK3_MAJOR);
+MODULE_ALIAS_BLOCKDEV_MAJOR(SCSI_DISK4_MAJOR);
+MODULE_ALIAS_BLOCKDEV_MAJOR(SCSI_DISK5_MAJOR);
+MODULE_ALIAS_BLOCKDEV_MAJOR(SCSI_DISK6_MAJOR);
+MODULE_ALIAS_BLOCKDEV_MAJOR(SCSI_DISK7_MAJOR);
+MODULE_ALIAS_BLOCKDEV_MAJOR(SCSI_DISK8_MAJOR);
+MODULE_ALIAS_BLOCKDEV_MAJOR(SCSI_DISK9_MAJOR);
+MODULE_ALIAS_BLOCKDEV_MAJOR(SCSI_DISK10_MAJOR);
+MODULE_ALIAS_BLOCKDEV_MAJOR(SCSI_DISK11_MAJOR);
+MODULE_ALIAS_BLOCKDEV_MAJOR(SCSI_DISK12_MAJOR);
+MODULE_ALIAS_BLOCKDEV_MAJOR(SCSI_DISK13_MAJOR);
+MODULE_ALIAS_BLOCKDEV_MAJOR(SCSI_DISK14_MAJOR);
+MODULE_ALIAS_BLOCKDEV_MAJOR(SCSI_DISK15_MAJOR);
+MODULE_ALIAS_SCSI_DEVICE(TYPE_DISK);
+MODULE_ALIAS_SCSI_DEVICE(TYPE_MOD);
+MODULE_ALIAS_SCSI_DEVICE(TYPE_RBC);
+
+#if !defined(CONFIG_DEBUG_BLOCK_EXT_DEVT)
+#define SD_MINORS 16
+#else
+#define SD_MINORS 0
+#endif
+
+static void sd_config_discard(struct scsi_disk *, unsigned int);
+static void sd_config_write_same(struct scsi_disk *);
+static int sd_revalidate_disk(struct gendisk *);
+static void sd_unlock_native_capacity(struct gendisk *disk);
+static int sd_probe(struct device *);
+static int sd_remove(struct device *);
+static void sd_shutdown(struct device *);
+static int sd_suspend_system(struct device *);
+static int sd_suspend_runtime(struct device *);
+static int sd_resume(struct device *);
+static void sd_rescan(struct device *);
+static int sd_init_command(struct scsi_cmnd *SCpnt);
+static void sd_uninit_command(struct scsi_cmnd *SCpnt);
+static int sd_done(struct scsi_cmnd *);
+static int sd_eh_action(struct scsi_cmnd *, int);
+static void sd_read_capacity(struct scsi_disk *sdkp, unsigned char *buffer);
+static void scsi_disk_release(struct device *cdev);
+static void sd_print_sense_hdr(struct scsi_disk *, struct scsi_sense_hdr *);
+static void sd_print_result(const struct scsi_disk *, const char *, int);
+
+static DEFINE_SPINLOCK(sd_index_lock);
+static DEFINE_IDA(sd_index_ida);
+
+/* This semaphore is used to mediate the 0->1 reference get in the
+ * face of object destruction (i.e. we can't allow a get on an
+ * object after last put) */
+static DEFINE_MUTEX(sd_ref_mutex);
+
+static struct kmem_cache *sd_cdb_cache;
+static mempool_t *sd_cdb_pool;
+
+static const char *sd_cache_types[] = {
+ "write through", "none", "write back",
+ "write back, no read (daft)"
+};
+
+static void sd_set_flush_flag(struct scsi_disk *sdkp)
+{
+ unsigned flush = 0;
+
+ if (sdkp->WCE) {
+ flush |= REQ_FLUSH;
+ if (sdkp->DPOFUA)
+ flush |= REQ_FUA;
+ }
+
+ blk_queue_flush(sdkp->disk->queue, flush);
+}
+
+static ssize_t
+cache_type_store(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ int i, ct = -1, rcd, wce, sp;
+ struct scsi_disk *sdkp = to_scsi_disk(dev);
+ struct scsi_device *sdp = sdkp->device;
+ char buffer[64];
+ char *buffer_data;
+ struct scsi_mode_data data;
+ struct scsi_sense_hdr sshdr;
+ static const char temp[] = "temporary ";
+ int len;
+
+ if (sdp->type != TYPE_DISK)
+ /* no cache control on RBC devices; theoretically they
+ * can do it, but there's probably so many exceptions
+ * it's not worth the risk */
+ return -EINVAL;
+
+ if (strncmp(buf, temp, sizeof(temp) - 1) == 0) {
+ buf += sizeof(temp) - 1;
+ sdkp->cache_override = 1;
+ } else {
+ sdkp->cache_override = 0;
+ }
+
+ for (i = 0; i < ARRAY_SIZE(sd_cache_types); i++) {
+ len = strlen(sd_cache_types[i]);
+ if (strncmp(sd_cache_types[i], buf, len) == 0 &&
+ buf[len] == '\n') {
+ ct = i;
+ break;
+ }
+ }
+ if (ct < 0)
+ return -EINVAL;
+ rcd = ct & 0x01 ? 1 : 0;
+ wce = (ct & 0x02) && !sdkp->write_prot ? 1 : 0;
+
+ if (sdkp->cache_override) {
+ sdkp->WCE = wce;
+ sdkp->RCD = rcd;
+ sd_set_flush_flag(sdkp);
+ return count;
+ }
+
+ if (scsi_mode_sense(sdp, 0x08, 8, buffer, sizeof(buffer), SD_TIMEOUT,
+ SD_MAX_RETRIES, &data, NULL))
+ return -EINVAL;
+ len = min_t(size_t, sizeof(buffer), data.length - data.header_length -
+ data.block_descriptor_length);
+ buffer_data = buffer + data.header_length +
+ data.block_descriptor_length;
+ buffer_data[2] &= ~0x05;
+ buffer_data[2] |= wce << 2 | rcd;
+ sp = buffer_data[0] & 0x80 ? 1 : 0;
+
+ if (scsi_mode_select(sdp, 1, sp, 8, buffer_data, len, SD_TIMEOUT,
+ SD_MAX_RETRIES, &data, &sshdr)) {
+ if (scsi_sense_valid(&sshdr))
+ sd_print_sense_hdr(sdkp, &sshdr);
+ return -EINVAL;
+ }
+ revalidate_disk(sdkp->disk);
+ return count;
+}
+
+static ssize_t
+manage_start_stop_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct scsi_disk *sdkp = to_scsi_disk(dev);
+ struct scsi_device *sdp = sdkp->device;
+
+ return snprintf(buf, 20, "%u\n", sdp->manage_start_stop);
+}
+
+static ssize_t
+manage_start_stop_store(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct scsi_disk *sdkp = to_scsi_disk(dev);
+ struct scsi_device *sdp = sdkp->device;
+
+ if (!capable(CAP_SYS_ADMIN))
+ return -EACCES;
+
+ sdp->manage_start_stop = simple_strtoul(buf, NULL, 10);
+
+ return count;
+}
+static DEVICE_ATTR_RW(manage_start_stop);
+
+static ssize_t
+allow_restart_show(struct device *dev, struct device_attribute *attr, char *buf)
+{
+ struct scsi_disk *sdkp = to_scsi_disk(dev);
+
+ return snprintf(buf, 40, "%d\n", sdkp->device->allow_restart);
+}
+
+static ssize_t
+allow_restart_store(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct scsi_disk *sdkp = to_scsi_disk(dev);
+ struct scsi_device *sdp = sdkp->device;
+
+ if (!capable(CAP_SYS_ADMIN))
+ return -EACCES;
+
+ if (sdp->type != TYPE_DISK)
+ return -EINVAL;
+
+ sdp->allow_restart = simple_strtoul(buf, NULL, 10);
+
+ return count;
+}
+static DEVICE_ATTR_RW(allow_restart);
+
+static ssize_t
+cache_type_show(struct device *dev, struct device_attribute *attr, char *buf)
+{
+ struct scsi_disk *sdkp = to_scsi_disk(dev);
+ int ct = sdkp->RCD + 2*sdkp->WCE;
+
+ return snprintf(buf, 40, "%s\n", sd_cache_types[ct]);
+}
+static DEVICE_ATTR_RW(cache_type);
+
+static ssize_t
+FUA_show(struct device *dev, struct device_attribute *attr, char *buf)
+{
+ struct scsi_disk *sdkp = to_scsi_disk(dev);
+
+ return snprintf(buf, 20, "%u\n", sdkp->DPOFUA);
+}
+static DEVICE_ATTR_RO(FUA);
+
+static ssize_t
+protection_type_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct scsi_disk *sdkp = to_scsi_disk(dev);
+
+ return snprintf(buf, 20, "%u\n", sdkp->protection_type);
+}
+
+static ssize_t
+protection_type_store(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct scsi_disk *sdkp = to_scsi_disk(dev);
+ unsigned int val;
+ int err;
+
+ if (!capable(CAP_SYS_ADMIN))
+ return -EACCES;
+
+ err = kstrtouint(buf, 10, &val);
+
+ if (err)
+ return err;
+
+ if (val >= 0 && val <= SD_DIF_TYPE3_PROTECTION)
+ sdkp->protection_type = val;
+
+ return count;
+}
+static DEVICE_ATTR_RW(protection_type);
+
+static ssize_t
+protection_mode_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct scsi_disk *sdkp = to_scsi_disk(dev);
+ struct scsi_device *sdp = sdkp->device;
+ unsigned int dif, dix;
+
+ dif = scsi_host_dif_capable(sdp->host, sdkp->protection_type);
+ dix = scsi_host_dix_capable(sdp->host, sdkp->protection_type);
+
+ if (!dix && scsi_host_dix_capable(sdp->host, SD_DIF_TYPE0_PROTECTION)) {
+ dif = 0;
+ dix = 1;
+ }
+
+ if (!dif && !dix)
+ return snprintf(buf, 20, "none\n");
+
+ return snprintf(buf, 20, "%s%u\n", dix ? "dix" : "dif", dif);
+}
+static DEVICE_ATTR_RO(protection_mode);
+
+static ssize_t
+app_tag_own_show(struct device *dev, struct device_attribute *attr, char *buf)
+{
+ struct scsi_disk *sdkp = to_scsi_disk(dev);
+
+ return snprintf(buf, 20, "%u\n", sdkp->ATO);
+}
+static DEVICE_ATTR_RO(app_tag_own);
+
+static ssize_t
+thin_provisioning_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct scsi_disk *sdkp = to_scsi_disk(dev);
+
+ return snprintf(buf, 20, "%u\n", sdkp->lbpme);
+}
+static DEVICE_ATTR_RO(thin_provisioning);
+
+static const char *lbp_mode[] = {
+ [SD_LBP_FULL] = "full",
+ [SD_LBP_UNMAP] = "unmap",
+ [SD_LBP_WS16] = "writesame_16",
+ [SD_LBP_WS10] = "writesame_10",
+ [SD_LBP_ZERO] = "writesame_zero",
+ [SD_LBP_DISABLE] = "disabled",
+};
+
+static ssize_t
+provisioning_mode_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct scsi_disk *sdkp = to_scsi_disk(dev);
+
+ return snprintf(buf, 20, "%s\n", lbp_mode[sdkp->provisioning_mode]);
+}
+
+static ssize_t
+provisioning_mode_store(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct scsi_disk *sdkp = to_scsi_disk(dev);
+ struct scsi_device *sdp = sdkp->device;
+
+ if (!capable(CAP_SYS_ADMIN))
+ return -EACCES;
+
+ if (sdp->type != TYPE_DISK)
+ return -EINVAL;
+
+ if (!strncmp(buf, lbp_mode[SD_LBP_UNMAP], 20))
+ sd_config_discard(sdkp, SD_LBP_UNMAP);
+ else if (!strncmp(buf, lbp_mode[SD_LBP_WS16], 20))
+ sd_config_discard(sdkp, SD_LBP_WS16);
+ else if (!strncmp(buf, lbp_mode[SD_LBP_WS10], 20))
+ sd_config_discard(sdkp, SD_LBP_WS10);
+ else if (!strncmp(buf, lbp_mode[SD_LBP_ZERO], 20))
+ sd_config_discard(sdkp, SD_LBP_ZERO);
+ else if (!strncmp(buf, lbp_mode[SD_LBP_DISABLE], 20))
+ sd_config_discard(sdkp, SD_LBP_DISABLE);
+ else
+ return -EINVAL;
+
+ return count;
+}
+static DEVICE_ATTR_RW(provisioning_mode);
+
+static ssize_t
+max_medium_access_timeouts_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct scsi_disk *sdkp = to_scsi_disk(dev);
+
+ return snprintf(buf, 20, "%u\n", sdkp->max_medium_access_timeouts);
+}
+
+static ssize_t
+max_medium_access_timeouts_store(struct device *dev,
+ struct device_attribute *attr, const char *buf,
+ size_t count)
+{
+ struct scsi_disk *sdkp = to_scsi_disk(dev);
+ int err;
+
+ if (!capable(CAP_SYS_ADMIN))
+ return -EACCES;
+
+ err = kstrtouint(buf, 10, &sdkp->max_medium_access_timeouts);
+
+ return err ? err : count;
+}
+static DEVICE_ATTR_RW(max_medium_access_timeouts);
+
+static ssize_t
+max_write_same_blocks_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct scsi_disk *sdkp = to_scsi_disk(dev);
+
+ return snprintf(buf, 20, "%u\n", sdkp->max_ws_blocks);
+}
+
+static ssize_t
+max_write_same_blocks_store(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct scsi_disk *sdkp = to_scsi_disk(dev);
+ struct scsi_device *sdp = sdkp->device;
+ unsigned long max;
+ int err;
+
+ if (!capable(CAP_SYS_ADMIN))
+ return -EACCES;
+
+ if (sdp->type != TYPE_DISK)
+ return -EINVAL;
+
+ err = kstrtoul(buf, 10, &max);
+
+ if (err)
+ return err;
+
+ if (max == 0)
+ sdp->no_write_same = 1;
+ else if (max <= SD_MAX_WS16_BLOCKS) {
+ sdp->no_write_same = 0;
+ sdkp->max_ws_blocks = max;
+ }
+
+ sd_config_write_same(sdkp);
+
+ return count;
+}
+static DEVICE_ATTR_RW(max_write_same_blocks);
+
+static struct attribute *sd_disk_attrs[] = {
+ &dev_attr_cache_type.attr,
+ &dev_attr_FUA.attr,
+ &dev_attr_allow_restart.attr,
+ &dev_attr_manage_start_stop.attr,
+ &dev_attr_protection_type.attr,
+ &dev_attr_protection_mode.attr,
+ &dev_attr_app_tag_own.attr,
+ &dev_attr_thin_provisioning.attr,
+ &dev_attr_provisioning_mode.attr,
+ &dev_attr_max_write_same_blocks.attr,
+ &dev_attr_max_medium_access_timeouts.attr,
+ NULL,
+};
+ATTRIBUTE_GROUPS(sd_disk);
+
+static struct class sd_disk_class = {
+ .name = "scsi_disk",
+ .owner = THIS_MODULE,
+ .dev_release = scsi_disk_release,
+ .dev_groups = sd_disk_groups,
+};
+
+static const struct dev_pm_ops sd_pm_ops = {
+ .suspend = sd_suspend_system,
+ .resume = sd_resume,
+ .poweroff = sd_suspend_system,
+ .restore = sd_resume,
+ .runtime_suspend = sd_suspend_runtime,
+ .runtime_resume = sd_resume,
+};
+
+static struct scsi_driver sd_template = {
+ .gendrv = {
+ .name = "sd",
+ .owner = THIS_MODULE,
+ .probe = sd_probe,
+ .remove = sd_remove,
+ .shutdown = sd_shutdown,
+ .pm = &sd_pm_ops,
+ },
+ .rescan = sd_rescan,
+ .init_command = sd_init_command,
+ .uninit_command = sd_uninit_command,
+ .done = sd_done,
+ .eh_action = sd_eh_action,
+};
+
+/*
+ * Dummy kobj_map->probe function.
+ * The default ->probe function will call modprobe, which is
+ * pointless as this module is already loaded.
+ */
+static struct kobject *sd_default_probe(dev_t devt, int *partno, void *data)
+{
+ return NULL;
+}
+
+/*
+ * Device no to disk mapping:
+ *
+ * major disc2 disc p1
+ * |............|.............|....|....| <- dev_t
+ * 31 20 19 8 7 4 3 0
+ *
+ * Inside a major, we have 16k disks, however mapped non-
+ * contiguously. The first 16 disks are for major0, the next
+ * ones with major1, ... Disk 256 is for major0 again, disk 272
+ * for major1, ...
+ * As we stay compatible with our numbering scheme, we can reuse
+ * the well-know SCSI majors 8, 65--71, 136--143.
+ */
+static int sd_major(int major_idx)
+{
+ switch (major_idx) {
+ case 0:
+ return SCSI_DISK0_MAJOR;
+ case 1 ... 7:
+ return SCSI_DISK1_MAJOR + major_idx - 1;
+ case 8 ... 15:
+ return SCSI_DISK8_MAJOR + major_idx - 8;
+ default:
+ BUG();
+ return 0; /* shut up gcc */
+ }
+}
+
+static struct scsi_disk *scsi_disk_get(struct gendisk *disk)
+{
+ struct scsi_disk *sdkp = NULL;
+
+ mutex_lock(&sd_ref_mutex);
+
+ if (disk->private_data) {
+ sdkp = scsi_disk(disk);
+ if (scsi_device_get(sdkp->device) == 0)
+ get_device(&sdkp->dev);
+ else
+ sdkp = NULL;
+ }
+ mutex_unlock(&sd_ref_mutex);
+ return sdkp;
+}
+
+static void scsi_disk_put(struct scsi_disk *sdkp)
+{
+ struct scsi_device *sdev = sdkp->device;
+
+ mutex_lock(&sd_ref_mutex);
+ put_device(&sdkp->dev);
+ scsi_device_put(sdev);
+ mutex_unlock(&sd_ref_mutex);
+}
+
+static unsigned char sd_setup_protect_cmnd(struct scsi_cmnd *scmd,
+ unsigned int dix, unsigned int dif)
+{
+ struct bio *bio = scmd->request->bio;
+ unsigned int prot_op = sd_prot_op(rq_data_dir(scmd->request), dix, dif);
+ unsigned int protect = 0;
+
+ if (dix) { /* DIX Type 0, 1, 2, 3 */
+ if (bio_integrity_flagged(bio, BIP_IP_CHECKSUM))
+ scmd->prot_flags |= SCSI_PROT_IP_CHECKSUM;
+
+ if (bio_integrity_flagged(bio, BIP_CTRL_NOCHECK) == false)
+ scmd->prot_flags |= SCSI_PROT_GUARD_CHECK;
+ }
+
+ if (dif != SD_DIF_TYPE3_PROTECTION) { /* DIX/DIF Type 0, 1, 2 */
+ scmd->prot_flags |= SCSI_PROT_REF_INCREMENT;
+
+ if (bio_integrity_flagged(bio, BIP_CTRL_NOCHECK) == false)
+ scmd->prot_flags |= SCSI_PROT_REF_CHECK;
+ }
+
+ if (dif) { /* DIX/DIF Type 1, 2, 3 */
+ scmd->prot_flags |= SCSI_PROT_TRANSFER_PI;
+
+ if (bio_integrity_flagged(bio, BIP_DISK_NOCHECK))
+ protect = 3 << 5; /* Disable target PI checking */
+ else
+ protect = 1 << 5; /* Enable target PI checking */
+ }
+
+ scsi_set_prot_op(scmd, prot_op);
+ scsi_set_prot_type(scmd, dif);
+ scmd->prot_flags &= sd_prot_flag_mask(prot_op);
+
+ return protect;
+}
+
+static void sd_config_discard(struct scsi_disk *sdkp, unsigned int mode)
+{
+ struct request_queue *q = sdkp->disk->queue;
+ unsigned int logical_block_size = sdkp->device->sector_size;
+ unsigned int max_blocks = 0;
+
+ q->limits.discard_zeroes_data = 0;
+ q->limits.discard_alignment = sdkp->unmap_alignment *
+ logical_block_size;
+ q->limits.discard_granularity =
+ max(sdkp->physical_block_size,
+ sdkp->unmap_granularity * logical_block_size);
+
+ sdkp->provisioning_mode = mode;
+
+ switch (mode) {
+
+ case SD_LBP_DISABLE:
+ q->limits.max_discard_sectors = 0;
+ queue_flag_clear_unlocked(QUEUE_FLAG_DISCARD, q);
+ return;
+
+ case SD_LBP_UNMAP:
+ max_blocks = min_not_zero(sdkp->max_unmap_blocks,
+ (u32)SD_MAX_WS16_BLOCKS);
+ break;
+
+ case SD_LBP_WS16:
+ max_blocks = min_not_zero(sdkp->max_ws_blocks,
+ (u32)SD_MAX_WS16_BLOCKS);
+ q->limits.discard_zeroes_data = sdkp->lbprz;
+ break;
+
+ case SD_LBP_WS10:
+ max_blocks = min_not_zero(sdkp->max_ws_blocks,
+ (u32)SD_MAX_WS10_BLOCKS);
+ q->limits.discard_zeroes_data = sdkp->lbprz;
+ break;
+
+ case SD_LBP_ZERO:
+ max_blocks = min_not_zero(sdkp->max_ws_blocks,
+ (u32)SD_MAX_WS10_BLOCKS);
+ q->limits.discard_zeroes_data = 1;
+ break;
+ }
+
+ q->limits.max_discard_sectors = max_blocks * (logical_block_size >> 9);
+ queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, q);
+}
+
+/**
+ * sd_setup_discard_cmnd - unmap blocks on thinly provisioned device
+ * @sdp: scsi device to operate one
+ * @rq: Request to prepare
+ *
+ * Will issue either UNMAP or WRITE SAME(16) depending on preference
+ * indicated by target device.
+ **/
+static int sd_setup_discard_cmnd(struct scsi_cmnd *cmd)
+{
+ struct request *rq = cmd->request;
+ struct scsi_device *sdp = cmd->device;
+ struct scsi_disk *sdkp = scsi_disk(rq->rq_disk);
+ sector_t sector = blk_rq_pos(rq);
+ unsigned int nr_sectors = blk_rq_sectors(rq);
+ unsigned int nr_bytes = blk_rq_bytes(rq);
+ unsigned int len;
+ int ret;
+ char *buf;
+ struct page *page;
+
+ sector >>= ilog2(sdp->sector_size) - 9;
+ nr_sectors >>= ilog2(sdp->sector_size) - 9;
+
+ page = alloc_page(GFP_ATOMIC | __GFP_ZERO);
+ if (!page)
+ return BLKPREP_DEFER;
+
+ switch (sdkp->provisioning_mode) {
+ case SD_LBP_UNMAP:
+ buf = page_address(page);
+
+ cmd->cmd_len = 10;
+ cmd->cmnd[0] = UNMAP;
+ cmd->cmnd[8] = 24;
+
+ put_unaligned_be16(6 + 16, &buf[0]);
+ put_unaligned_be16(16, &buf[2]);
+ put_unaligned_be64(sector, &buf[8]);
+ put_unaligned_be32(nr_sectors, &buf[16]);
+
+ len = 24;
+ break;
+
+ case SD_LBP_WS16:
+ cmd->cmd_len = 16;
+ cmd->cmnd[0] = WRITE_SAME_16;
+ cmd->cmnd[1] = 0x8; /* UNMAP */
+ put_unaligned_be64(sector, &cmd->cmnd[2]);
+ put_unaligned_be32(nr_sectors, &cmd->cmnd[10]);
+
+ len = sdkp->device->sector_size;
+ break;
+
+ case SD_LBP_WS10:
+ case SD_LBP_ZERO:
+ cmd->cmd_len = 10;
+ cmd->cmnd[0] = WRITE_SAME;
+ if (sdkp->provisioning_mode == SD_LBP_WS10)
+ cmd->cmnd[1] = 0x8; /* UNMAP */
+ put_unaligned_be32(sector, &cmd->cmnd[2]);
+ put_unaligned_be16(nr_sectors, &cmd->cmnd[7]);
+
+ len = sdkp->device->sector_size;
+ break;
+
+ default:
+ ret = BLKPREP_KILL;
+ goto out;
+ }
+
+ rq->completion_data = page;
+ rq->timeout = SD_TIMEOUT;
+
+ cmd->transfersize = len;
+ cmd->allowed = SD_MAX_RETRIES;
+
+ /*
+ * Initially __data_len is set to the amount of data that needs to be
+ * transferred to the target. This amount depends on whether WRITE SAME
+ * or UNMAP is being used. After the scatterlist has been mapped by
+ * scsi_init_io() we set __data_len to the size of the area to be
+ * discarded on disk. This allows us to report completion on the full
+ * amount of blocks described by the request.
+ */
+ blk_add_request_payload(rq, page, len);
+ ret = scsi_init_io(cmd);
+ rq->__data_len = nr_bytes;
+
+out:
+ if (ret != BLKPREP_OK)
+ __free_page(page);
+ return ret;
+}
+
+static void sd_config_write_same(struct scsi_disk *sdkp)
+{
+ struct request_queue *q = sdkp->disk->queue;
+ unsigned int logical_block_size = sdkp->device->sector_size;
+
+ if (sdkp->device->no_write_same) {
+ sdkp->max_ws_blocks = 0;
+ goto out;
+ }
+
+ /* Some devices can not handle block counts above 0xffff despite
+ * supporting WRITE SAME(16). Consequently we default to 64k
+ * blocks per I/O unless the device explicitly advertises a
+ * bigger limit.
+ */
+ if (sdkp->max_ws_blocks > SD_MAX_WS10_BLOCKS)
+ sdkp->max_ws_blocks = min_not_zero(sdkp->max_ws_blocks,
+ (u32)SD_MAX_WS16_BLOCKS);
+ else if (sdkp->ws16 || sdkp->ws10 || sdkp->device->no_report_opcodes)
+ sdkp->max_ws_blocks = min_not_zero(sdkp->max_ws_blocks,
+ (u32)SD_MAX_WS10_BLOCKS);
+ else {
+ sdkp->device->no_write_same = 1;
+ sdkp->max_ws_blocks = 0;
+ }
+
+out:
+ blk_queue_max_write_same_sectors(q, sdkp->max_ws_blocks *
+ (logical_block_size >> 9));
+}
+
+/**
+ * sd_setup_write_same_cmnd - write the same data to multiple blocks
+ * @cmd: command to prepare
+ *
+ * Will issue either WRITE SAME(10) or WRITE SAME(16) depending on
+ * preference indicated by target device.
+ **/
+static int sd_setup_write_same_cmnd(struct scsi_cmnd *cmd)
+{
+ struct request *rq = cmd->request;
+ struct scsi_device *sdp = cmd->device;
+ struct scsi_disk *sdkp = scsi_disk(rq->rq_disk);
+ struct bio *bio = rq->bio;
+ sector_t sector = blk_rq_pos(rq);
+ unsigned int nr_sectors = blk_rq_sectors(rq);
+ unsigned int nr_bytes = blk_rq_bytes(rq);
+ int ret;
+
+ if (sdkp->device->no_write_same)
+ return BLKPREP_KILL;
+
+ BUG_ON(bio_offset(bio) || bio_iovec(bio).bv_len != sdp->sector_size);
+
+ sector >>= ilog2(sdp->sector_size) - 9;
+ nr_sectors >>= ilog2(sdp->sector_size) - 9;
+
+ rq->timeout = SD_WRITE_SAME_TIMEOUT;
+
+ if (sdkp->ws16 || sector > 0xffffffff || nr_sectors > 0xffff) {
+ cmd->cmd_len = 16;
+ cmd->cmnd[0] = WRITE_SAME_16;
+ put_unaligned_be64(sector, &cmd->cmnd[2]);
+ put_unaligned_be32(nr_sectors, &cmd->cmnd[10]);
+ } else {
+ cmd->cmd_len = 10;
+ cmd->cmnd[0] = WRITE_SAME;
+ put_unaligned_be32(sector, &cmd->cmnd[2]);
+ put_unaligned_be16(nr_sectors, &cmd->cmnd[7]);
+ }
+
+ cmd->transfersize = sdp->sector_size;
+ cmd->allowed = SD_MAX_RETRIES;
+
+ /*
+ * For WRITE_SAME the data transferred in the DATA IN buffer is
+ * different from the amount of data actually written to the target.
+ *
+ * We set up __data_len to the amount of data transferred from the
+ * DATA IN buffer so that blk_rq_map_sg set up the proper S/G list
+ * to transfer a single sector of data first, but then reset it to
+ * the amount of data to be written right after so that the I/O path
+ * knows how much to actually write.
+ */
+ rq->__data_len = sdp->sector_size;
+ ret = scsi_init_io(cmd);
+ rq->__data_len = nr_bytes;
+ return ret;
+}
+
+static int sd_setup_flush_cmnd(struct scsi_cmnd *cmd)
+{
+ struct request *rq = cmd->request;
+
+ /* flush requests don't perform I/O, zero the S/G table */
+ memset(&cmd->sdb, 0, sizeof(cmd->sdb));
+
+ cmd->cmnd[0] = SYNCHRONIZE_CACHE;
+ cmd->cmd_len = 10;
+ cmd->transfersize = 0;
+ cmd->allowed = SD_MAX_RETRIES;
+
+ rq->timeout = rq->q->rq_timeout * SD_FLUSH_TIMEOUT_MULTIPLIER;
+ return BLKPREP_OK;
+}
+
+static int sd_setup_read_write_cmnd(struct scsi_cmnd *SCpnt)
+{
+ struct request *rq = SCpnt->request;
+ struct scsi_device *sdp = SCpnt->device;
+ struct gendisk *disk = rq->rq_disk;
+ struct scsi_disk *sdkp;
+ sector_t block = blk_rq_pos(rq);
+ sector_t threshold;
+ unsigned int this_count = blk_rq_sectors(rq);
+ unsigned int dif, dix;
+ int ret;
+ unsigned char protect;
+
+ ret = scsi_init_io(SCpnt);
+ if (ret != BLKPREP_OK)
+ goto out;
+ SCpnt = rq->special;
+ sdkp = scsi_disk(disk);
+
+ /* from here on until we're complete, any goto out
+ * is used for a killable error condition */
+ ret = BLKPREP_KILL;
+
+ SCSI_LOG_HLQUEUE(1,
+ scmd_printk(KERN_INFO, SCpnt,
+ "%s: block=%llu, count=%d\n",
+ __func__, (unsigned long long)block, this_count));
+
+ if (!sdp || !scsi_device_online(sdp) ||
+ block + blk_rq_sectors(rq) > get_capacity(disk)) {
+ SCSI_LOG_HLQUEUE(2, scmd_printk(KERN_INFO, SCpnt,
+ "Finishing %u sectors\n",
+ blk_rq_sectors(rq)));
+ SCSI_LOG_HLQUEUE(2, scmd_printk(KERN_INFO, SCpnt,
+ "Retry with 0x%p\n", SCpnt));
+ goto out;
+ }
+
+ if (sdp->changed) {
+ /*
+ * quietly refuse to do anything to a changed disc until
+ * the changed bit has been reset
+ */
+ /* printk("SCSI disk has been changed or is not present. Prohibiting further I/O.\n"); */
+ goto out;
+ }
+
+ /*
+ * Some SD card readers can't handle multi-sector accesses which touch
+ * the last one or two hardware sectors. Split accesses as needed.
+ */
+ threshold = get_capacity(disk) - SD_LAST_BUGGY_SECTORS *
+ (sdp->sector_size / 512);
+
+ if (unlikely(sdp->last_sector_bug && block + this_count > threshold)) {
+ if (block < threshold) {
+ /* Access up to the threshold but not beyond */
+ this_count = threshold - block;
+ } else {
+ /* Access only a single hardware sector */
+ this_count = sdp->sector_size / 512;
+ }
+ }
+
+ SCSI_LOG_HLQUEUE(2, scmd_printk(KERN_INFO, SCpnt, "block=%llu\n",
+ (unsigned long long)block));
+
+ /*
+ * If we have a 1K hardware sectorsize, prevent access to single
+ * 512 byte sectors. In theory we could handle this - in fact
+ * the scsi cdrom driver must be able to handle this because
+ * we typically use 1K blocksizes, and cdroms typically have
+ * 2K hardware sectorsizes. Of course, things are simpler
+ * with the cdrom, since it is read-only. For performance
+ * reasons, the filesystems should be able to handle this
+ * and not force the scsi disk driver to use bounce buffers
+ * for this.
+ */
+ if (sdp->sector_size == 1024) {
+ if ((block & 1) || (blk_rq_sectors(rq) & 1)) {
+ scmd_printk(KERN_ERR, SCpnt,
+ "Bad block number requested\n");
+ goto out;
+ } else {
+ block = block >> 1;
+ this_count = this_count >> 1;
+ }
+ }
+ if (sdp->sector_size == 2048) {
+ if ((block & 3) || (blk_rq_sectors(rq) & 3)) {
+ scmd_printk(KERN_ERR, SCpnt,
+ "Bad block number requested\n");
+ goto out;
+ } else {
+ block = block >> 2;
+ this_count = this_count >> 2;
+ }
+ }
+ if (sdp->sector_size == 4096) {
+ if ((block & 7) || (blk_rq_sectors(rq) & 7)) {
+ scmd_printk(KERN_ERR, SCpnt,
+ "Bad block number requested\n");
+ goto out;
+ } else {
+ block = block >> 3;
+ this_count = this_count >> 3;
+ }
+ }
+ if (rq_data_dir(rq) == WRITE) {
+ SCpnt->cmnd[0] = WRITE_6;
+
+ if (blk_integrity_rq(rq))
+ sd_dif_prepare(SCpnt);
+
+ } else if (rq_data_dir(rq) == READ) {
+ SCpnt->cmnd[0] = READ_6;
+ } else {
+ scmd_printk(KERN_ERR, SCpnt, "Unknown command %llx\n", (unsigned long long) rq->cmd_flags);
+ goto out;
+ }
+
+ SCSI_LOG_HLQUEUE(2, scmd_printk(KERN_INFO, SCpnt,
+ "%s %d/%u 512 byte blocks.\n",
+ (rq_data_dir(rq) == WRITE) ?
+ "writing" : "reading", this_count,
+ blk_rq_sectors(rq)));
+
+ dix = scsi_prot_sg_count(SCpnt);
+ dif = scsi_host_dif_capable(SCpnt->device->host, sdkp->protection_type);
+
+ if (dif || dix)
+ protect = sd_setup_protect_cmnd(SCpnt, dix, dif);
+ else
+ protect = 0;
+
+ if (protect && sdkp->protection_type == SD_DIF_TYPE2_PROTECTION) {
+ SCpnt->cmnd = mempool_alloc(sd_cdb_pool, GFP_ATOMIC);
+
+ if (unlikely(SCpnt->cmnd == NULL)) {
+ ret = BLKPREP_DEFER;
+ goto out;
+ }
+
+ SCpnt->cmd_len = SD_EXT_CDB_SIZE;
+ memset(SCpnt->cmnd, 0, SCpnt->cmd_len);
+ SCpnt->cmnd[0] = VARIABLE_LENGTH_CMD;
+ SCpnt->cmnd[7] = 0x18;
+ SCpnt->cmnd[9] = (rq_data_dir(rq) == READ) ? READ_32 : WRITE_32;
+ SCpnt->cmnd[10] = protect | ((rq->cmd_flags & REQ_FUA) ? 0x8 : 0);
+
+ /* LBA */
+ SCpnt->cmnd[12] = sizeof(block) > 4 ? (unsigned char) (block >> 56) & 0xff : 0;
+ SCpnt->cmnd[13] = sizeof(block) > 4 ? (unsigned char) (block >> 48) & 0xff : 0;
+ SCpnt->cmnd[14] = sizeof(block) > 4 ? (unsigned char) (block >> 40) & 0xff : 0;
+ SCpnt->cmnd[15] = sizeof(block) > 4 ? (unsigned char) (block >> 32) & 0xff : 0;
+ SCpnt->cmnd[16] = (unsigned char) (block >> 24) & 0xff;
+ SCpnt->cmnd[17] = (unsigned char) (block >> 16) & 0xff;
+ SCpnt->cmnd[18] = (unsigned char) (block >> 8) & 0xff;
+ SCpnt->cmnd[19] = (unsigned char) block & 0xff;
+
+ /* Expected Indirect LBA */
+ SCpnt->cmnd[20] = (unsigned char) (block >> 24) & 0xff;
+ SCpnt->cmnd[21] = (unsigned char) (block >> 16) & 0xff;
+ SCpnt->cmnd[22] = (unsigned char) (block >> 8) & 0xff;
+ SCpnt->cmnd[23] = (unsigned char) block & 0xff;
+
+ /* Transfer length */
+ SCpnt->cmnd[28] = (unsigned char) (this_count >> 24) & 0xff;
+ SCpnt->cmnd[29] = (unsigned char) (this_count >> 16) & 0xff;
+ SCpnt->cmnd[30] = (unsigned char) (this_count >> 8) & 0xff;
+ SCpnt->cmnd[31] = (unsigned char) this_count & 0xff;
+ } else if (sdp->use_16_for_rw || (this_count > 0xffff)) {
+ SCpnt->cmnd[0] += READ_16 - READ_6;
+ SCpnt->cmnd[1] = protect | ((rq->cmd_flags & REQ_FUA) ? 0x8 : 0);
+ SCpnt->cmnd[2] = sizeof(block) > 4 ? (unsigned char) (block >> 56) & 0xff : 0;
+ SCpnt->cmnd[3] = sizeof(block) > 4 ? (unsigned char) (block >> 48) & 0xff : 0;
+ SCpnt->cmnd[4] = sizeof(block) > 4 ? (unsigned char) (block >> 40) & 0xff : 0;
+ SCpnt->cmnd[5] = sizeof(block) > 4 ? (unsigned char) (block >> 32) & 0xff : 0;
+ SCpnt->cmnd[6] = (unsigned char) (block >> 24) & 0xff;
+ SCpnt->cmnd[7] = (unsigned char) (block >> 16) & 0xff;
+ SCpnt->cmnd[8] = (unsigned char) (block >> 8) & 0xff;
+ SCpnt->cmnd[9] = (unsigned char) block & 0xff;
+ SCpnt->cmnd[10] = (unsigned char) (this_count >> 24) & 0xff;
+ SCpnt->cmnd[11] = (unsigned char) (this_count >> 16) & 0xff;
+ SCpnt->cmnd[12] = (unsigned char) (this_count >> 8) & 0xff;
+ SCpnt->cmnd[13] = (unsigned char) this_count & 0xff;
+ SCpnt->cmnd[14] = SCpnt->cmnd[15] = 0;
+ } else if ((this_count > 0xff) || (block > 0x1fffff) ||
+ scsi_device_protection(SCpnt->device) ||
+ SCpnt->device->use_10_for_rw) {
+ SCpnt->cmnd[0] += READ_10 - READ_6;
+ SCpnt->cmnd[1] = protect | ((rq->cmd_flags & REQ_FUA) ? 0x8 : 0);
+ SCpnt->cmnd[2] = (unsigned char) (block >> 24) & 0xff;
+ SCpnt->cmnd[3] = (unsigned char) (block >> 16) & 0xff;
+ SCpnt->cmnd[4] = (unsigned char) (block >> 8) & 0xff;
+ SCpnt->cmnd[5] = (unsigned char) block & 0xff;
+ SCpnt->cmnd[6] = SCpnt->cmnd[9] = 0;
+ SCpnt->cmnd[7] = (unsigned char) (this_count >> 8) & 0xff;
+ SCpnt->cmnd[8] = (unsigned char) this_count & 0xff;
+ } else {
+ if (unlikely(rq->cmd_flags & REQ_FUA)) {
+ /*
+ * This happens only if this drive failed
+ * 10byte rw command with ILLEGAL_REQUEST
+ * during operation and thus turned off
+ * use_10_for_rw.
+ */
+ scmd_printk(KERN_ERR, SCpnt,
+ "FUA write on READ/WRITE(6) drive\n");
+ goto out;
+ }
+
+ SCpnt->cmnd[1] |= (unsigned char) ((block >> 16) & 0x1f);
+ SCpnt->cmnd[2] = (unsigned char) ((block >> 8) & 0xff);
+ SCpnt->cmnd[3] = (unsigned char) block & 0xff;
+ SCpnt->cmnd[4] = (unsigned char) this_count;
+ SCpnt->cmnd[5] = 0;
+ }
+ SCpnt->sdb.length = this_count * sdp->sector_size;
+
+ /*
+ * We shouldn't disconnect in the middle of a sector, so with a dumb
+ * host adapter, it's safe to assume that we can at least transfer
+ * this many bytes between each connect / disconnect.
+ */
+ SCpnt->transfersize = sdp->sector_size;
+ SCpnt->underflow = this_count << 9;
+ SCpnt->allowed = SD_MAX_RETRIES;
+
+ /*
+ * This indicates that the command is ready from our end to be
+ * queued.
+ */
+ ret = BLKPREP_OK;
+ out:
+ return ret;
+}
+
+static int sd_init_command(struct scsi_cmnd *cmd)
+{
+ struct request *rq = cmd->request;
+
+ if (rq->cmd_flags & REQ_DISCARD)
+ return sd_setup_discard_cmnd(cmd);
+ else if (rq->cmd_flags & REQ_WRITE_SAME)
+ return sd_setup_write_same_cmnd(cmd);
+ else if (rq->cmd_flags & REQ_FLUSH)
+ return sd_setup_flush_cmnd(cmd);
+ else
+ return sd_setup_read_write_cmnd(cmd);
+}
+
+static void sd_uninit_command(struct scsi_cmnd *SCpnt)
+{
+ struct request *rq = SCpnt->request;
+
+ if (rq->cmd_flags & REQ_DISCARD)
+ __free_page(rq->completion_data);
+
+ if (SCpnt->cmnd != rq->cmd) {
+ mempool_free(SCpnt->cmnd, sd_cdb_pool);
+ SCpnt->cmnd = NULL;
+ SCpnt->cmd_len = 0;
+ }
+}
+
+/**
+ * sd_open - open a scsi disk device
+ * @inode: only i_rdev member may be used
+ * @filp: only f_mode and f_flags may be used
+ *
+ * Returns 0 if successful. Returns a negated errno value in case
+ * of error.
+ *
+ * Note: This can be called from a user context (e.g. fsck(1) )
+ * or from within the kernel (e.g. as a result of a mount(1) ).
+ * In the latter case @inode and @filp carry an abridged amount
+ * of information as noted above.
+ *
+ * Locking: called with bdev->bd_mutex held.
+ **/
+static int sd_open(struct block_device *bdev, fmode_t mode)
+{
+ struct scsi_disk *sdkp = scsi_disk_get(bdev->bd_disk);
+ struct scsi_device *sdev;
+ int retval;
+
+ if (!sdkp)
+ return -ENXIO;
+
+ SCSI_LOG_HLQUEUE(3, sd_printk(KERN_INFO, sdkp, "sd_open\n"));
+
+ sdev = sdkp->device;
+
+ /*
+ * If the device is in error recovery, wait until it is done.
+ * If the device is offline, then disallow any access to it.
+ */
+ retval = -ENXIO;
+ if (!scsi_block_when_processing_errors(sdev))
+ goto error_out;
+
+ if (sdev->removable || sdkp->write_prot)
+ check_disk_change(bdev);
+
+ /*
+ * If the drive is empty, just let the open fail.
+ */
+ retval = -ENOMEDIUM;
+ if (sdev->removable && !sdkp->media_present && !(mode & FMODE_NDELAY))
+ goto error_out;
+
+ /*
+ * If the device has the write protect tab set, have the open fail
+ * if the user expects to be able to write to the thing.
+ */
+ retval = -EROFS;
+ if (sdkp->write_prot && (mode & FMODE_WRITE))
+ goto error_out;
+
+ /*
+ * It is possible that the disk changing stuff resulted in
+ * the device being taken offline. If this is the case,
+ * report this to the user, and don't pretend that the
+ * open actually succeeded.
+ */
+ retval = -ENXIO;
+ if (!scsi_device_online(sdev))
+ goto error_out;
+
+ if ((atomic_inc_return(&sdkp->openers) == 1) && sdev->removable) {
+ if (scsi_block_when_processing_errors(sdev))
+ scsi_set_medium_removal(sdev, SCSI_REMOVAL_PREVENT);
+ }
+
+ return 0;
+
+error_out:
+ scsi_disk_put(sdkp);
+ return retval;
+}
+
+/**
+ * sd_release - invoked when the (last) close(2) is called on this
+ * scsi disk.
+ * @inode: only i_rdev member may be used
+ * @filp: only f_mode and f_flags may be used
+ *
+ * Returns 0.
+ *
+ * Note: may block (uninterruptible) if error recovery is underway
+ * on this disk.
+ *
+ * Locking: called with bdev->bd_mutex held.
+ **/
+static void sd_release(struct gendisk *disk, fmode_t mode)
+{
+ struct scsi_disk *sdkp = scsi_disk(disk);
+ struct scsi_device *sdev = sdkp->device;
+
+ SCSI_LOG_HLQUEUE(3, sd_printk(KERN_INFO, sdkp, "sd_release\n"));
+
+ if (atomic_dec_return(&sdkp->openers) == 0 && sdev->removable) {
+ if (scsi_block_when_processing_errors(sdev))
+ scsi_set_medium_removal(sdev, SCSI_REMOVAL_ALLOW);
+ }
+
+ /*
+ * XXX and what if there are packets in flight and this close()
+ * XXX is followed by a "rmmod sd_mod"?
+ */
+
+ scsi_disk_put(sdkp);
+}
+
+static int sd_getgeo(struct block_device *bdev, struct hd_geometry *geo)
+{
+ struct scsi_disk *sdkp = scsi_disk(bdev->bd_disk);
+ struct scsi_device *sdp = sdkp->device;
+ struct Scsi_Host *host = sdp->host;
+ int diskinfo[4];
+
+ /* default to most commonly used values */
+ diskinfo[0] = 0x40; /* 1 << 6 */
+ diskinfo[1] = 0x20; /* 1 << 5 */
+ diskinfo[2] = sdkp->capacity >> 11;
+
+ /* override with calculated, extended default, or driver values */
+ if (host->hostt->bios_param)
+ host->hostt->bios_param(sdp, bdev, sdkp->capacity, diskinfo);
+ else
+ scsicam_bios_param(bdev, sdkp->capacity, diskinfo);
+
+ geo->heads = diskinfo[0];
+ geo->sectors = diskinfo[1];
+ geo->cylinders = diskinfo[2];
+ return 0;
+}
+
+/**
+ * sd_ioctl - process an ioctl
+ * @inode: only i_rdev/i_bdev members may be used
+ * @filp: only f_mode and f_flags may be used
+ * @cmd: ioctl command number
+ * @arg: this is third argument given to ioctl(2) system call.
+ * Often contains a pointer.
+ *
+ * Returns 0 if successful (some ioctls return positive numbers on
+ * success as well). Returns a negated errno value in case of error.
+ *
+ * Note: most ioctls are forward onto the block subsystem or further
+ * down in the scsi subsystem.
+ **/
+static int sd_ioctl(struct block_device *bdev, fmode_t mode,
+ unsigned int cmd, unsigned long arg)
+{
+ struct gendisk *disk = bdev->bd_disk;
+ struct scsi_disk *sdkp = scsi_disk(disk);
+ struct scsi_device *sdp = sdkp->device;
+ void __user *p = (void __user *)arg;
+ int error;
+
+ SCSI_LOG_IOCTL(1, sd_printk(KERN_INFO, sdkp, "sd_ioctl: disk=%s, "
+ "cmd=0x%x\n", disk->disk_name, cmd));
+
+ error = scsi_verify_blk_ioctl(bdev, cmd);
+ if (error < 0)
+ return error;
+
+ /*
+ * If we are in the middle of error recovery, don't let anyone
+ * else try and use this device. Also, if error recovery fails, it
+ * may try and take the device offline, in which case all further
+ * access to the device is prohibited.
+ */
+ error = scsi_ioctl_block_when_processing_errors(sdp, cmd,
+ (mode & FMODE_NDELAY) != 0);
+ if (error)
+ goto out;
+
+ /*
+ * Send SCSI addressing ioctls directly to mid level, send other
+ * ioctls to block level and then onto mid level if they can't be
+ * resolved.
+ */
+ switch (cmd) {
+ case SCSI_IOCTL_GET_IDLUN:
+ case SCSI_IOCTL_GET_BUS_NUMBER:
+ error = scsi_ioctl(sdp, cmd, p);
+ break;
+ default:
+ error = scsi_cmd_blk_ioctl(bdev, mode, cmd, p);
+ if (error != -ENOTTY)
+ break;
+ error = scsi_ioctl(sdp, cmd, p);
+ break;
+ }
+out:
+ return error;
+}
+
+static void set_media_not_present(struct scsi_disk *sdkp)
+{
+ if (sdkp->media_present)
+ sdkp->device->changed = 1;
+
+ if (sdkp->device->removable) {
+ sdkp->media_present = 0;
+ sdkp->capacity = 0;
+ }
+}
+
+static int media_not_present(struct scsi_disk *sdkp,
+ struct scsi_sense_hdr *sshdr)
+{
+ if (!scsi_sense_valid(sshdr))
+ return 0;
+
+ /* not invoked for commands that could return deferred errors */
+ switch (sshdr->sense_key) {
+ case UNIT_ATTENTION:
+ case NOT_READY:
+ /* medium not present */
+ if (sshdr->asc == 0x3A) {
+ set_media_not_present(sdkp);
+ return 1;
+ }
+ }
+ return 0;
+}
+
+/**
+ * sd_check_events - check media events
+ * @disk: kernel device descriptor
+ * @clearing: disk events currently being cleared
+ *
+ * Returns mask of DISK_EVENT_*.
+ *
+ * Note: this function is invoked from the block subsystem.
+ **/
+static unsigned int sd_check_events(struct gendisk *disk, unsigned int clearing)
+{
+ struct scsi_disk *sdkp = scsi_disk(disk);
+ struct scsi_device *sdp = sdkp->device;
+ struct scsi_sense_hdr *sshdr = NULL;
+ int retval;
+
+ SCSI_LOG_HLQUEUE(3, sd_printk(KERN_INFO, sdkp, "sd_check_events\n"));
+
+ /*
+ * If the device is offline, don't send any commands - just pretend as
+ * if the command failed. If the device ever comes back online, we
+ * can deal with it then. It is only because of unrecoverable errors
+ * that we would ever take a device offline in the first place.
+ */
+ if (!scsi_device_online(sdp)) {
+ set_media_not_present(sdkp);
+ goto out;
+ }
+
+ /*
+ * Using TEST_UNIT_READY enables differentiation between drive with
+ * no cartridge loaded - NOT READY, drive with changed cartridge -
+ * UNIT ATTENTION, or with same cartridge - GOOD STATUS.
+ *
+ * Drives that auto spin down. eg iomega jaz 1G, will be started
+ * by sd_spinup_disk() from sd_revalidate_disk(), which happens whenever
+ * sd_revalidate() is called.
+ */
+ retval = -ENODEV;
+
+ if (scsi_block_when_processing_errors(sdp)) {
+ sshdr = kzalloc(sizeof(*sshdr), GFP_KERNEL);
+ retval = scsi_test_unit_ready(sdp, SD_TIMEOUT, SD_MAX_RETRIES,
+ sshdr);
+ }
+
+ /* failed to execute TUR, assume media not present */
+ if (host_byte(retval)) {
+ set_media_not_present(sdkp);
+ goto out;
+ }
+
+ if (media_not_present(sdkp, sshdr))
+ goto out;
+
+ /*
+ * For removable scsi disk we have to recognise the presence
+ * of a disk in the drive.
+ */
+ if (!sdkp->media_present)
+ sdp->changed = 1;
+ sdkp->media_present = 1;
+out:
+ /*
+ * sdp->changed is set under the following conditions:
+ *
+ * Medium present state has changed in either direction.
+ * Device has indicated UNIT_ATTENTION.
+ */
+ kfree(sshdr);
+ retval = sdp->changed ? DISK_EVENT_MEDIA_CHANGE : 0;
+ sdp->changed = 0;
+ return retval;
+}
+
+static int sd_sync_cache(struct scsi_disk *sdkp)
+{
+ int retries, res;
+ struct scsi_device *sdp = sdkp->device;
+ const int timeout = sdp->request_queue->rq_timeout
+ * SD_FLUSH_TIMEOUT_MULTIPLIER;
+ struct scsi_sense_hdr sshdr;
+
+ if (!scsi_device_online(sdp))
+ return -ENODEV;
+
+ for (retries = 3; retries > 0; --retries) {
+ unsigned char cmd[10] = { 0 };
+
+ cmd[0] = SYNCHRONIZE_CACHE;
+ /*
+ * Leave the rest of the command zero to indicate
+ * flush everything.
+ */
+ res = scsi_execute_req_flags(sdp, cmd, DMA_NONE, NULL, 0,
+ &sshdr, timeout, SD_MAX_RETRIES,
+ NULL, REQ_PM);
+ if (res == 0)
+ break;
+ }
+
+ if (res) {
+ sd_print_result(sdkp, "Synchronize Cache(10) failed", res);
+
+ if (driver_byte(res) & DRIVER_SENSE)
+ sd_print_sense_hdr(sdkp, &sshdr);
+ /* we need to evaluate the error return */
+ if (scsi_sense_valid(&sshdr) &&
+ (sshdr.asc == 0x3a || /* medium not present */
+ sshdr.asc == 0x20)) /* invalid command */
+ /* this is no error here */
+ return 0;
+
+ switch (host_byte(res)) {
+ /* ignore errors due to racing a disconnection */
+ case DID_BAD_TARGET:
+ case DID_NO_CONNECT:
+ return 0;
+ /* signal the upper layer it might try again */
+ case DID_BUS_BUSY:
+ case DID_IMM_RETRY:
+ case DID_REQUEUE:
+ case DID_SOFT_ERROR:
+ return -EBUSY;
+ default:
+ return -EIO;
+ }
+ }
+ return 0;
+}
+
+static void sd_rescan(struct device *dev)
+{
+ struct scsi_disk *sdkp = dev_get_drvdata(dev);
+
+ revalidate_disk(sdkp->disk);
+}
+
+
+#ifdef CONFIG_COMPAT
+/*
+ * This gets directly called from VFS. When the ioctl
+ * is not recognized we go back to the other translation paths.
+ */
+static int sd_compat_ioctl(struct block_device *bdev, fmode_t mode,
+ unsigned int cmd, unsigned long arg)
+{
+ struct scsi_device *sdev = scsi_disk(bdev->bd_disk)->device;
+ int error;
+
+ error = scsi_ioctl_block_when_processing_errors(sdev, cmd,
+ (mode & FMODE_NDELAY) != 0);
+ if (error)
+ return error;
+
+ /*
+ * Let the static ioctl translation table take care of it.
+ */
+ if (!sdev->host->hostt->compat_ioctl)
+ return -ENOIOCTLCMD;
+ return sdev->host->hostt->compat_ioctl(sdev, cmd, (void __user *)arg);
+}
+#endif
+
+static const struct block_device_operations sd_fops = {
+ .owner = THIS_MODULE,
+ .open = sd_open,
+ .release = sd_release,
+ .ioctl = sd_ioctl,
+ .getgeo = sd_getgeo,
+#ifdef CONFIG_COMPAT
+ .compat_ioctl = sd_compat_ioctl,
+#endif
+ .check_events = sd_check_events,
+ .revalidate_disk = sd_revalidate_disk,
+ .unlock_native_capacity = sd_unlock_native_capacity,
+};
+
+/**
+ * sd_eh_action - error handling callback
+ * @scmd: sd-issued command that has failed
+ * @eh_disp: The recovery disposition suggested by the midlayer
+ *
+ * This function is called by the SCSI midlayer upon completion of an
+ * error test command (currently TEST UNIT READY). The result of sending
+ * the eh command is passed in eh_disp. We're looking for devices that
+ * fail medium access commands but are OK with non access commands like
+ * test unit ready (so wrongly see the device as having a successful
+ * recovery)
+ **/
+static int sd_eh_action(struct scsi_cmnd *scmd, int eh_disp)
+{
+ struct scsi_disk *sdkp = scsi_disk(scmd->request->rq_disk);
+
+ if (!scsi_device_online(scmd->device) ||
+ !scsi_medium_access_command(scmd) ||
+ host_byte(scmd->result) != DID_TIME_OUT ||
+ eh_disp != SUCCESS)
+ return eh_disp;
+
+ /*
+ * The device has timed out executing a medium access command.
+ * However, the TEST UNIT READY command sent during error
+ * handling completed successfully. Either the device is in the
+ * process of recovering or has it suffered an internal failure
+ * that prevents access to the storage medium.
+ */
+ sdkp->medium_access_timed_out++;
+
+ /*
+ * If the device keeps failing read/write commands but TEST UNIT
+ * READY always completes successfully we assume that medium
+ * access is no longer possible and take the device offline.
+ */
+ if (sdkp->medium_access_timed_out >= sdkp->max_medium_access_timeouts) {
+ scmd_printk(KERN_ERR, scmd,
+ "Medium access timeout failure. Offlining disk!\n");
+ scsi_device_set_state(scmd->device, SDEV_OFFLINE);
+
+ return FAILED;
+ }
+
+ return eh_disp;
+}
+
+static unsigned int sd_completed_bytes(struct scsi_cmnd *scmd)
+{
+ u64 start_lba = blk_rq_pos(scmd->request);
+ u64 end_lba = blk_rq_pos(scmd->request) + (scsi_bufflen(scmd) / 512);
+ u64 factor = scmd->device->sector_size / 512;
+ u64 bad_lba;
+ int info_valid;
+ /*
+ * resid is optional but mostly filled in. When it's unused,
+ * its value is zero, so we assume the whole buffer transferred
+ */
+ unsigned int transferred = scsi_bufflen(scmd) - scsi_get_resid(scmd);
+ unsigned int good_bytes;
+
+ if (scmd->request->cmd_type != REQ_TYPE_FS)
+ return 0;
+
+ info_valid = scsi_get_sense_info_fld(scmd->sense_buffer,
+ SCSI_SENSE_BUFFERSIZE,
+ &bad_lba);
+ if (!info_valid)
+ return 0;
+
+ if (scsi_bufflen(scmd) <= scmd->device->sector_size)
+ return 0;
+
+ /* be careful ... don't want any overflows */
+ do_div(start_lba, factor);
+ do_div(end_lba, factor);
+
+ /* The bad lba was reported incorrectly, we have no idea where
+ * the error is.
+ */
+ if (bad_lba < start_lba || bad_lba >= end_lba)
+ return 0;
+
+ /* This computation should always be done in terms of
+ * the resolution of the device's medium.
+ */
+ good_bytes = (bad_lba - start_lba) * scmd->device->sector_size;
+ return min(good_bytes, transferred);
+}
+
+/**
+ * sd_done - bottom half handler: called when the lower level
+ * driver has completed (successfully or otherwise) a scsi command.
+ * @SCpnt: mid-level's per command structure.
+ *
+ * Note: potentially run from within an ISR. Must not block.
+ **/
+static int sd_done(struct scsi_cmnd *SCpnt)
+{
+ int result = SCpnt->result;
+ unsigned int good_bytes = result ? 0 : scsi_bufflen(SCpnt);
+ struct scsi_sense_hdr sshdr;
+ struct scsi_disk *sdkp = scsi_disk(SCpnt->request->rq_disk);
+ struct request *req = SCpnt->request;
+ int sense_valid = 0;
+ int sense_deferred = 0;
+ unsigned char op = SCpnt->cmnd[0];
+ unsigned char unmap = SCpnt->cmnd[1] & 8;
+
+ if (req->cmd_flags & REQ_DISCARD || req->cmd_flags & REQ_WRITE_SAME) {
+ if (!result) {
+ good_bytes = blk_rq_bytes(req);
+ scsi_set_resid(SCpnt, 0);
+ } else {
+ good_bytes = 0;
+ scsi_set_resid(SCpnt, blk_rq_bytes(req));
+ }
+ }
+
+ if (result) {
+ sense_valid = scsi_command_normalize_sense(SCpnt, &sshdr);
+ if (sense_valid)
+ sense_deferred = scsi_sense_is_deferred(&sshdr);
+ }
+ sdkp->medium_access_timed_out = 0;
+
+ if (driver_byte(result) != DRIVER_SENSE &&
+ (!sense_valid || sense_deferred))
+ goto out;
+
+ switch (sshdr.sense_key) {
+ case HARDWARE_ERROR:
+ case MEDIUM_ERROR:
+ good_bytes = sd_completed_bytes(SCpnt);
+ break;
+ case RECOVERED_ERROR:
+ good_bytes = scsi_bufflen(SCpnt);
+ break;
+ case NO_SENSE:
+ /* This indicates a false check condition, so ignore it. An
+ * unknown amount of data was transferred so treat it as an
+ * error.
+ */
+ SCpnt->result = 0;
+ memset(SCpnt->sense_buffer, 0, SCSI_SENSE_BUFFERSIZE);
+ break;
+ case ABORTED_COMMAND:
+ if (sshdr.asc == 0x10) /* DIF: Target detected corruption */
+ good_bytes = sd_completed_bytes(SCpnt);
+ break;
+ case ILLEGAL_REQUEST:
+ if (sshdr.asc == 0x10) /* DIX: Host detected corruption */
+ good_bytes = sd_completed_bytes(SCpnt);
+ /* INVALID COMMAND OPCODE or INVALID FIELD IN CDB */
+ if (sshdr.asc == 0x20 || sshdr.asc == 0x24) {
+ switch (op) {
+ case UNMAP:
+ sd_config_discard(sdkp, SD_LBP_DISABLE);
+ break;
+ case WRITE_SAME_16:
+ case WRITE_SAME:
+ if (unmap)
+ sd_config_discard(sdkp, SD_LBP_DISABLE);
+ else {
+ sdkp->device->no_write_same = 1;
+ sd_config_write_same(sdkp);
+
+ good_bytes = 0;
+ req->__data_len = blk_rq_bytes(req);
+ req->cmd_flags |= REQ_QUIET;
+ }
+ }
+ }
+ break;
+ default:
+ break;
+ }
+ out:
+ SCSI_LOG_HLCOMPLETE(1, scmd_printk(KERN_INFO, SCpnt,
+ "sd_done: completed %d of %d bytes\n",
+ good_bytes, scsi_bufflen(SCpnt)));
+
+ if (rq_data_dir(SCpnt->request) == READ && scsi_prot_sg_count(SCpnt))
+ sd_dif_complete(SCpnt, good_bytes);
+
+ return good_bytes;
+}
+
+/*
+ * spinup disk - called only in sd_revalidate_disk()
+ */
+static void
+sd_spinup_disk(struct scsi_disk *sdkp)
+{
+ unsigned char cmd[10];
+ unsigned long spintime_expire = 0;
+ int retries, spintime;
+ unsigned int the_result;
+ struct scsi_sense_hdr sshdr;
+ int sense_valid = 0;
+
+ spintime = 0;
+
+ /* Spin up drives, as required. Only do this at boot time */
+ /* Spinup needs to be done for module loads too. */
+ do {
+ retries = 0;
+
+ do {
+ cmd[0] = TEST_UNIT_READY;
+ memset((void *) &cmd[1], 0, 9);
+
+ the_result = scsi_execute_req(sdkp->device, cmd,
+ DMA_NONE, NULL, 0,
+ &sshdr, SD_TIMEOUT,
+ SD_MAX_RETRIES, NULL);
+
+ /*
+ * If the drive has indicated to us that it
+ * doesn't have any media in it, don't bother
+ * with any more polling.
+ */
+ if (media_not_present(sdkp, &sshdr))
+ return;
+
+ if (the_result)
+ sense_valid = scsi_sense_valid(&sshdr);
+ retries++;
+ } while (retries < 3 &&
+ (!scsi_status_is_good(the_result) ||
+ ((driver_byte(the_result) & DRIVER_SENSE) &&
+ sense_valid && sshdr.sense_key == UNIT_ATTENTION)));
+
+ if ((driver_byte(the_result) & DRIVER_SENSE) == 0) {
+ /* no sense, TUR either succeeded or failed
+ * with a status error */
+ if(!spintime && !scsi_status_is_good(the_result)) {
+ sd_print_result(sdkp, "Test Unit Ready failed",
+ the_result);
+ }
+ break;
+ }
+
+ /*
+ * The device does not want the automatic start to be issued.
+ */
+ if (sdkp->device->no_start_on_add)
+ break;
+
+ if (sense_valid && sshdr.sense_key == NOT_READY) {
+ if (sshdr.asc == 4 && sshdr.ascq == 3)
+ break; /* manual intervention required */
+ if (sshdr.asc == 4 && sshdr.ascq == 0xb)
+ break; /* standby */
+ if (sshdr.asc == 4 && sshdr.ascq == 0xc)
+ break; /* unavailable */
+ /*
+ * Issue command to spin up drive when not ready
+ */
+ if (!spintime) {
+ sd_printk(KERN_NOTICE, sdkp, "Spinning up disk...");
+ cmd[0] = START_STOP;
+ cmd[1] = 1; /* Return immediately */
+ memset((void *) &cmd[2], 0, 8);
+ cmd[4] = 1; /* Start spin cycle */
+ if (sdkp->device->start_stop_pwr_cond)
+ cmd[4] |= 1 << 4;
+ scsi_execute_req(sdkp->device, cmd, DMA_NONE,
+ NULL, 0, &sshdr,
+ SD_TIMEOUT, SD_MAX_RETRIES,
+ NULL);
+ spintime_expire = jiffies + 100 * HZ;
+ spintime = 1;
+ }
+ /* Wait 1 second for next try */
+ msleep(1000);
+ printk(".");
+
+ /*
+ * Wait for USB flash devices with slow firmware.
+ * Yes, this sense key/ASC combination shouldn't
+ * occur here. It's characteristic of these devices.
+ */
+ } else if (sense_valid &&
+ sshdr.sense_key == UNIT_ATTENTION &&
+ sshdr.asc == 0x28) {
+ if (!spintime) {
+ spintime_expire = jiffies + 5 * HZ;
+ spintime = 1;
+ }
+ /* Wait 1 second for next try */
+ msleep(1000);
+ } else {
+ /* we don't understand the sense code, so it's
+ * probably pointless to loop */
+ if(!spintime) {
+ sd_printk(KERN_NOTICE, sdkp, "Unit Not Ready\n");
+ sd_print_sense_hdr(sdkp, &sshdr);
+ }
+ break;
+ }
+
+ } while (spintime && time_before_eq(jiffies, spintime_expire));
+
+ if (spintime) {
+ if (scsi_status_is_good(the_result))
+ printk("ready\n");
+ else
+ printk("not responding...\n");
+ }
+}
+
+
+/*
+ * Determine whether disk supports Data Integrity Field.
+ */
+static int sd_read_protection_type(struct scsi_disk *sdkp, unsigned char *buffer)
+{
+ struct scsi_device *sdp = sdkp->device;
+ u8 type;
+ int ret = 0;
+
+ if (scsi_device_protection(sdp) == 0 || (buffer[12] & 1) == 0)
+ return ret;
+
+ type = ((buffer[12] >> 1) & 7) + 1; /* P_TYPE 0 = Type 1 */
+
+ if (type > SD_DIF_TYPE3_PROTECTION)
+ ret = -ENODEV;
+ else if (scsi_host_dif_capable(sdp->host, type))
+ ret = 1;
+
+ if (sdkp->first_scan || type != sdkp->protection_type)
+ switch (ret) {
+ case -ENODEV:
+ sd_printk(KERN_ERR, sdkp, "formatted with unsupported" \
+ " protection type %u. Disabling disk!\n",
+ type);
+ break;
+ case 1:
+ sd_printk(KERN_NOTICE, sdkp,
+ "Enabling DIF Type %u protection\n", type);
+ break;
+ case 0:
+ sd_printk(KERN_NOTICE, sdkp,
+ "Disabling DIF Type %u protection\n", type);
+ break;
+ }
+
+ sdkp->protection_type = type;
+
+ return ret;
+}
+
+static void read_capacity_error(struct scsi_disk *sdkp, struct scsi_device *sdp,
+ struct scsi_sense_hdr *sshdr, int sense_valid,
+ int the_result)
+{
+ if (driver_byte(the_result) & DRIVER_SENSE)
+ sd_print_sense_hdr(sdkp, sshdr);
+ else
+ sd_printk(KERN_NOTICE, sdkp, "Sense not available.\n");
+
+ /*
+ * Set dirty bit for removable devices if not ready -
+ * sometimes drives will not report this properly.
+ */
+ if (sdp->removable &&
+ sense_valid && sshdr->sense_key == NOT_READY)
+ set_media_not_present(sdkp);
+
+ /*
+ * We used to set media_present to 0 here to indicate no media
+ * in the drive, but some drives fail read capacity even with
+ * media present, so we can't do that.
+ */
+ sdkp->capacity = 0; /* unknown mapped to zero - as usual */
+}
+
+#define RC16_LEN 32
+#if RC16_LEN > SD_BUF_SIZE
+#error RC16_LEN must not be more than SD_BUF_SIZE
+#endif
+
+#define READ_CAPACITY_RETRIES_ON_RESET 10
+
+static int read_capacity_16(struct scsi_disk *sdkp, struct scsi_device *sdp,
+ unsigned char *buffer)
+{
+ unsigned char cmd[16];
+ struct scsi_sense_hdr sshdr;
+ int sense_valid = 0;
+ int the_result;
+ int retries = 3, reset_retries = READ_CAPACITY_RETRIES_ON_RESET;
+ unsigned int alignment;
+ unsigned long long lba;
+ unsigned sector_size;
+
+ if (sdp->no_read_capacity_16)
+ return -EINVAL;
+
+ do {
+ memset(cmd, 0, 16);
+ cmd[0] = SERVICE_ACTION_IN_16;
+ cmd[1] = SAI_READ_CAPACITY_16;
+ cmd[13] = RC16_LEN;
+ memset(buffer, 0, RC16_LEN);
+
+ the_result = scsi_execute_req(sdp, cmd, DMA_FROM_DEVICE,
+ buffer, RC16_LEN, &sshdr,
+ SD_TIMEOUT, SD_MAX_RETRIES, NULL);
+
+ if (media_not_present(sdkp, &sshdr))
+ return -ENODEV;
+
+ if (the_result) {
+ sense_valid = scsi_sense_valid(&sshdr);
+ if (sense_valid &&
+ sshdr.sense_key == ILLEGAL_REQUEST &&
+ (sshdr.asc == 0x20 || sshdr.asc == 0x24) &&
+ sshdr.ascq == 0x00)
+ /* Invalid Command Operation Code or
+ * Invalid Field in CDB, just retry
+ * silently with RC10 */
+ return -EINVAL;
+ if (sense_valid &&
+ sshdr.sense_key == UNIT_ATTENTION &&
+ sshdr.asc == 0x29 && sshdr.ascq == 0x00)
+ /* Device reset might occur several times,
+ * give it one more chance */
+ if (--reset_retries > 0)
+ continue;
+ }
+ retries--;
+
+ } while (the_result && retries);
+
+ if (the_result) {
+ sd_print_result(sdkp, "Read Capacity(16) failed", the_result);
+ read_capacity_error(sdkp, sdp, &sshdr, sense_valid, the_result);
+ return -EINVAL;
+ }
+
+ sector_size = get_unaligned_be32(&buffer[8]);
+ lba = get_unaligned_be64(&buffer[0]);
+
+ if (sd_read_protection_type(sdkp, buffer) < 0) {
+ sdkp->capacity = 0;
+ return -ENODEV;
+ }
+
+ if ((sizeof(sdkp->capacity) == 4) && (lba >= 0xffffffffULL)) {
+ sd_printk(KERN_ERR, sdkp, "Too big for this kernel. Use a "
+ "kernel compiled with support for large block "
+ "devices.\n");
+ sdkp->capacity = 0;
+ return -EOVERFLOW;
+ }
+
+ /* Logical blocks per physical block exponent */
+ sdkp->physical_block_size = (1 << (buffer[13] & 0xf)) * sector_size;
+
+ /* Lowest aligned logical block */
+ alignment = ((buffer[14] & 0x3f) << 8 | buffer[15]) * sector_size;
+ blk_queue_alignment_offset(sdp->request_queue, alignment);
+ if (alignment && sdkp->first_scan)
+ sd_printk(KERN_NOTICE, sdkp,
+ "physical block alignment offset: %u\n", alignment);
+
+ if (buffer[14] & 0x80) { /* LBPME */
+ sdkp->lbpme = 1;
+
+ if (buffer[14] & 0x40) /* LBPRZ */
+ sdkp->lbprz = 1;
+
+ sd_config_discard(sdkp, SD_LBP_WS16);
+ }
+
+ sdkp->capacity = lba + 1;
+ return sector_size;
+}
+
+static int read_capacity_10(struct scsi_disk *sdkp, struct scsi_device *sdp,
+ unsigned char *buffer)
+{
+ unsigned char cmd[16];
+ struct scsi_sense_hdr sshdr;
+ int sense_valid = 0;
+ int the_result;
+ int retries = 3, reset_retries = READ_CAPACITY_RETRIES_ON_RESET;
+ sector_t lba;
+ unsigned sector_size;
+
+ do {
+ cmd[0] = READ_CAPACITY;
+ memset(&cmd[1], 0, 9);
+ memset(buffer, 0, 8);
+
+ the_result = scsi_execute_req(sdp, cmd, DMA_FROM_DEVICE,
+ buffer, 8, &sshdr,
+ SD_TIMEOUT, SD_MAX_RETRIES, NULL);
+
+ if (media_not_present(sdkp, &sshdr))
+ return -ENODEV;
+
+ if (the_result) {
+ sense_valid = scsi_sense_valid(&sshdr);
+ if (sense_valid &&
+ sshdr.sense_key == UNIT_ATTENTION &&
+ sshdr.asc == 0x29 && sshdr.ascq == 0x00)
+ /* Device reset might occur several times,
+ * give it one more chance */
+ if (--reset_retries > 0)
+ continue;
+ }
+ retries--;
+
+ } while (the_result && retries);
+
+ if (the_result) {
+ sd_print_result(sdkp, "Read Capacity(10) failed", the_result);
+ read_capacity_error(sdkp, sdp, &sshdr, sense_valid, the_result);
+ return -EINVAL;
+ }
+
+ sector_size = get_unaligned_be32(&buffer[4]);
+ lba = get_unaligned_be32(&buffer[0]);
+
+ if (sdp->no_read_capacity_16 && (lba == 0xffffffff)) {
+ /* Some buggy (usb cardreader) devices return an lba of
+ 0xffffffff when the want to report a size of 0 (with
+ which they really mean no media is present) */
+ sdkp->capacity = 0;
+ sdkp->physical_block_size = sector_size;
+ return sector_size;
+ }
+
+ if ((sizeof(sdkp->capacity) == 4) && (lba == 0xffffffff)) {
+ sd_printk(KERN_ERR, sdkp, "Too big for this kernel. Use a "
+ "kernel compiled with support for large block "
+ "devices.\n");
+ sdkp->capacity = 0;
+ return -EOVERFLOW;
+ }
+
+ sdkp->capacity = lba + 1;
+ sdkp->physical_block_size = sector_size;
+ return sector_size;
+}
+
+static int sd_try_rc16_first(struct scsi_device *sdp)
+{
+ if (sdp->host->max_cmd_len < 16)
+ return 0;
+ if (sdp->try_rc_10_first)
+ return 0;
+ if (sdp->scsi_level > SCSI_SPC_2)
+ return 1;
+ if (scsi_device_protection(sdp))
+ return 1;
+ return 0;
+}
+
+/*
+ * read disk capacity
+ */
+static void
+sd_read_capacity(struct scsi_disk *sdkp, unsigned char *buffer)
+{
+ int sector_size;
+ struct scsi_device *sdp = sdkp->device;
+ sector_t old_capacity = sdkp->capacity;
+
+ if (sd_try_rc16_first(sdp)) {
+ sector_size = read_capacity_16(sdkp, sdp, buffer);
+ if (sector_size == -EOVERFLOW)
+ goto got_data;
+ if (sector_size == -ENODEV)
+ return;
+ if (sector_size < 0)
+ sector_size = read_capacity_10(sdkp, sdp, buffer);
+ if (sector_size < 0)
+ return;
+ } else {
+ sector_size = read_capacity_10(sdkp, sdp, buffer);
+ if (sector_size == -EOVERFLOW)
+ goto got_data;
+ if (sector_size < 0)
+ return;
+ if ((sizeof(sdkp->capacity) > 4) &&
+ (sdkp->capacity > 0xffffffffULL)) {
+ int old_sector_size = sector_size;
+ sd_printk(KERN_NOTICE, sdkp, "Very big device. "
+ "Trying to use READ CAPACITY(16).\n");
+ sector_size = read_capacity_16(sdkp, sdp, buffer);
+ if (sector_size < 0) {
+ sd_printk(KERN_NOTICE, sdkp,
+ "Using 0xffffffff as device size\n");
+ sdkp->capacity = 1 + (sector_t) 0xffffffff;
+ sector_size = old_sector_size;
+ goto got_data;
+ }
+ }
+ }
+
+ /* Some devices are known to return the total number of blocks,
+ * not the highest block number. Some devices have versions
+ * which do this and others which do not. Some devices we might
+ * suspect of doing this but we don't know for certain.
+ *
+ * If we know the reported capacity is wrong, decrement it. If
+ * we can only guess, then assume the number of blocks is even
+ * (usually true but not always) and err on the side of lowering
+ * the capacity.
+ */
+ if (sdp->fix_capacity ||
+ (sdp->guess_capacity && (sdkp->capacity & 0x01))) {
+ sd_printk(KERN_INFO, sdkp, "Adjusting the sector count "
+ "from its reported value: %llu\n",
+ (unsigned long long) sdkp->capacity);
+ --sdkp->capacity;
+ }
+
+got_data:
+ if (sector_size == 0) {
+ sector_size = 512;
+ sd_printk(KERN_NOTICE, sdkp, "Sector size 0 reported, "
+ "assuming 512.\n");
+ }
+
+ if (sector_size != 512 &&
+ sector_size != 1024 &&
+ sector_size != 2048 &&
+ sector_size != 4096) {
+ sd_printk(KERN_NOTICE, sdkp, "Unsupported sector size %d.\n",
+ sector_size);
+ /*
+ * The user might want to re-format the drive with
+ * a supported sectorsize. Once this happens, it
+ * would be relatively trivial to set the thing up.
+ * For this reason, we leave the thing in the table.
+ */
+ sdkp->capacity = 0;
+ /*
+ * set a bogus sector size so the normal read/write
+ * logic in the block layer will eventually refuse any
+ * request on this device without tripping over power
+ * of two sector size assumptions
+ */
+ sector_size = 512;
+ }
+ blk_queue_logical_block_size(sdp->request_queue, sector_size);
+
+ {
+ char cap_str_2[10], cap_str_10[10];
+
+ string_get_size(sdkp->capacity, sector_size,
+ STRING_UNITS_2, cap_str_2, sizeof(cap_str_2));
+ string_get_size(sdkp->capacity, sector_size,
+ STRING_UNITS_10, cap_str_10,
+ sizeof(cap_str_10));
+
+ if (sdkp->first_scan || old_capacity != sdkp->capacity) {
+ sd_printk(KERN_NOTICE, sdkp,
+ "%llu %d-byte logical blocks: (%s/%s)\n",
+ (unsigned long long)sdkp->capacity,
+ sector_size, cap_str_10, cap_str_2);
+
+ if (sdkp->physical_block_size != sector_size)
+ sd_printk(KERN_NOTICE, sdkp,
+ "%u-byte physical blocks\n",
+ sdkp->physical_block_size);
+ }
+ }
+
+ if (sdkp->capacity > 0xffffffff) {
+ sdp->use_16_for_rw = 1;
+ sdkp->max_xfer_blocks = SD_MAX_XFER_BLOCKS;
+ } else
+ sdkp->max_xfer_blocks = SD_DEF_XFER_BLOCKS;
+
+ /* Rescale capacity to 512-byte units */
+ if (sector_size == 4096)
+ sdkp->capacity <<= 3;
+ else if (sector_size == 2048)
+ sdkp->capacity <<= 2;
+ else if (sector_size == 1024)
+ sdkp->capacity <<= 1;
+
+ blk_queue_physical_block_size(sdp->request_queue,
+ sdkp->physical_block_size);
+ sdkp->device->sector_size = sector_size;
+}
+
+/* called with buffer of length 512 */
+static inline int
+sd_do_mode_sense(struct scsi_device *sdp, int dbd, int modepage,
+ unsigned char *buffer, int len, struct scsi_mode_data *data,
+ struct scsi_sense_hdr *sshdr)
+{
+ return scsi_mode_sense(sdp, dbd, modepage, buffer, len,
+ SD_TIMEOUT, SD_MAX_RETRIES, data,
+ sshdr);
+}
+
+/*
+ * read write protect setting, if possible - called only in sd_revalidate_disk()
+ * called with buffer of length SD_BUF_SIZE
+ */
+static void
+sd_read_write_protect_flag(struct scsi_disk *sdkp, unsigned char *buffer)
+{
+ int res;
+ struct scsi_device *sdp = sdkp->device;
+ struct scsi_mode_data data;
+ int old_wp = sdkp->write_prot;
+
+ set_disk_ro(sdkp->disk, 0);
+ if (sdp->skip_ms_page_3f) {
+ sd_first_printk(KERN_NOTICE, sdkp, "Assuming Write Enabled\n");
+ return;
+ }
+
+ if (sdp->use_192_bytes_for_3f) {
+ res = sd_do_mode_sense(sdp, 0, 0x3F, buffer, 192, &data, NULL);
+ } else {
+ /*
+ * First attempt: ask for all pages (0x3F), but only 4 bytes.
+ * We have to start carefully: some devices hang if we ask
+ * for more than is available.
+ */
+ res = sd_do_mode_sense(sdp, 0, 0x3F, buffer, 4, &data, NULL);
+
+ /*
+ * Second attempt: ask for page 0 When only page 0 is
+ * implemented, a request for page 3F may return Sense Key
+ * 5: Illegal Request, Sense Code 24: Invalid field in
+ * CDB.
+ */
+ if (!scsi_status_is_good(res))
+ res = sd_do_mode_sense(sdp, 0, 0, buffer, 4, &data, NULL);
+
+ /*
+ * Third attempt: ask 255 bytes, as we did earlier.
+ */
+ if (!scsi_status_is_good(res))
+ res = sd_do_mode_sense(sdp, 0, 0x3F, buffer, 255,
+ &data, NULL);
+ }
+
+ if (!scsi_status_is_good(res)) {
+ sd_first_printk(KERN_WARNING, sdkp,
+ "Test WP failed, assume Write Enabled\n");
+ } else {
+ sdkp->write_prot = ((data.device_specific & 0x80) != 0);
+ set_disk_ro(sdkp->disk, sdkp->write_prot);
+ if (sdkp->first_scan || old_wp != sdkp->write_prot) {
+ sd_printk(KERN_NOTICE, sdkp, "Write Protect is %s\n",
+ sdkp->write_prot ? "on" : "off");
+ sd_printk(KERN_DEBUG, sdkp,
+ "Mode Sense: %02x %02x %02x %02x\n",
+ buffer[0], buffer[1], buffer[2], buffer[3]);
+ }
+ }
+}
+
+/*
+ * sd_read_cache_type - called only from sd_revalidate_disk()
+ * called with buffer of length SD_BUF_SIZE
+ */
+static void
+sd_read_cache_type(struct scsi_disk *sdkp, unsigned char *buffer)
+{
+ int len = 0, res;
+ struct scsi_device *sdp = sdkp->device;
+
+ int dbd;
+ int modepage;
+ int first_len;
+ struct scsi_mode_data data;
+ struct scsi_sense_hdr sshdr;
+ int old_wce = sdkp->WCE;
+ int old_rcd = sdkp->RCD;
+ int old_dpofua = sdkp->DPOFUA;
+
+
+ if (sdkp->cache_override)
+ return;
+
+ first_len = 4;
+ if (sdp->skip_ms_page_8) {
+ if (sdp->type == TYPE_RBC)
+ goto defaults;
+ else {
+ if (sdp->skip_ms_page_3f)
+ goto defaults;
+ modepage = 0x3F;
+ if (sdp->use_192_bytes_for_3f)
+ first_len = 192;
+ dbd = 0;
+ }
+ } else if (sdp->type == TYPE_RBC) {
+ modepage = 6;
+ dbd = 8;
+ } else {
+ modepage = 8;
+ dbd = 0;
+ }
+
+ /* cautiously ask */
+ res = sd_do_mode_sense(sdp, dbd, modepage, buffer, first_len,
+ &data, &sshdr);
+
+ if (!scsi_status_is_good(res))
+ goto bad_sense;
+
+ if (!data.header_length) {
+ modepage = 6;
+ first_len = 0;
+ sd_first_printk(KERN_ERR, sdkp,
+ "Missing header in MODE_SENSE response\n");
+ }
+
+ /* that went OK, now ask for the proper length */
+ len = data.length;
+
+ /*
+ * We're only interested in the first three bytes, actually.
+ * But the data cache page is defined for the first 20.
+ */
+ if (len < 3)
+ goto bad_sense;
+ else if (len > SD_BUF_SIZE) {
+ sd_first_printk(KERN_NOTICE, sdkp, "Truncating mode parameter "
+ "data from %d to %d bytes\n", len, SD_BUF_SIZE);
+ len = SD_BUF_SIZE;
+ }
+ if (modepage == 0x3F && sdp->use_192_bytes_for_3f)
+ len = 192;
+
+ /* Get the data */
+ if (len > first_len)
+ res = sd_do_mode_sense(sdp, dbd, modepage, buffer, len,
+ &data, &sshdr);
+
+ if (scsi_status_is_good(res)) {
+ int offset = data.header_length + data.block_descriptor_length;
+
+ while (offset < len) {
+ u8 page_code = buffer[offset] & 0x3F;
+ u8 spf = buffer[offset] & 0x40;
+
+ if (page_code == 8 || page_code == 6) {
+ /* We're interested only in the first 3 bytes.
+ */
+ if (len - offset <= 2) {
+ sd_first_printk(KERN_ERR, sdkp,
+ "Incomplete mode parameter "
+ "data\n");
+ goto defaults;
+ } else {
+ modepage = page_code;
+ goto Page_found;
+ }
+ } else {
+ /* Go to the next page */
+ if (spf && len - offset > 3)
+ offset += 4 + (buffer[offset+2] << 8) +
+ buffer[offset+3];
+ else if (!spf && len - offset > 1)
+ offset += 2 + buffer[offset+1];
+ else {
+ sd_first_printk(KERN_ERR, sdkp,
+ "Incomplete mode "
+ "parameter data\n");
+ goto defaults;
+ }
+ }
+ }
+
+ sd_first_printk(KERN_ERR, sdkp, "No Caching mode page found\n");
+ goto defaults;
+
+ Page_found:
+ if (modepage == 8) {
+ sdkp->WCE = ((buffer[offset + 2] & 0x04) != 0);
+ sdkp->RCD = ((buffer[offset + 2] & 0x01) != 0);
+ } else {
+ sdkp->WCE = ((buffer[offset + 2] & 0x01) == 0);
+ sdkp->RCD = 0;
+ }
+
+ sdkp->DPOFUA = (data.device_specific & 0x10) != 0;
+ if (sdp->broken_fua) {
+ sd_first_printk(KERN_NOTICE, sdkp, "Disabling FUA\n");
+ sdkp->DPOFUA = 0;
+ } else if (sdkp->DPOFUA && !sdkp->device->use_10_for_rw) {
+ sd_first_printk(KERN_NOTICE, sdkp,
+ "Uses READ/WRITE(6), disabling FUA\n");
+ sdkp->DPOFUA = 0;
+ }
+
+ /* No cache flush allowed for write protected devices */
+ if (sdkp->WCE && sdkp->write_prot)
+ sdkp->WCE = 0;
+
+ if (sdkp->first_scan || old_wce != sdkp->WCE ||
+ old_rcd != sdkp->RCD || old_dpofua != sdkp->DPOFUA)
+ sd_printk(KERN_NOTICE, sdkp,
+ "Write cache: %s, read cache: %s, %s\n",
+ sdkp->WCE ? "enabled" : "disabled",
+ sdkp->RCD ? "disabled" : "enabled",
+ sdkp->DPOFUA ? "supports DPO and FUA"
+ : "doesn't support DPO or FUA");
+
+ return;
+ }
+
+bad_sense:
+ if (scsi_sense_valid(&sshdr) &&
+ sshdr.sense_key == ILLEGAL_REQUEST &&
+ sshdr.asc == 0x24 && sshdr.ascq == 0x0)
+ /* Invalid field in CDB */
+ sd_first_printk(KERN_NOTICE, sdkp, "Cache data unavailable\n");
+ else
+ sd_first_printk(KERN_ERR, sdkp,
+ "Asking for cache data failed\n");
+
+defaults:
+ if (sdp->wce_default_on) {
+ sd_first_printk(KERN_NOTICE, sdkp,
+ "Assuming drive cache: write back\n");
+ sdkp->WCE = 1;
+ } else {
+ sd_first_printk(KERN_ERR, sdkp,
+ "Assuming drive cache: write through\n");
+ sdkp->WCE = 0;
+ }
+ sdkp->RCD = 0;
+ sdkp->DPOFUA = 0;
+}
+
+/*
+ * The ATO bit indicates whether the DIF application tag is available
+ * for use by the operating system.
+ */
+static void sd_read_app_tag_own(struct scsi_disk *sdkp, unsigned char *buffer)
+{
+ int res, offset;
+ struct scsi_device *sdp = sdkp->device;
+ struct scsi_mode_data data;
+ struct scsi_sense_hdr sshdr;
+
+ if (sdp->type != TYPE_DISK)
+ return;
+
+ if (sdkp->protection_type == 0)
+ return;
+
+ res = scsi_mode_sense(sdp, 1, 0x0a, buffer, 36, SD_TIMEOUT,
+ SD_MAX_RETRIES, &data, &sshdr);
+
+ if (!scsi_status_is_good(res) || !data.header_length ||
+ data.length < 6) {
+ sd_first_printk(KERN_WARNING, sdkp,
+ "getting Control mode page failed, assume no ATO\n");
+
+ if (scsi_sense_valid(&sshdr))
+ sd_print_sense_hdr(sdkp, &sshdr);
+
+ return;
+ }
+
+ offset = data.header_length + data.block_descriptor_length;
+
+ if ((buffer[offset] & 0x3f) != 0x0a) {
+ sd_first_printk(KERN_ERR, sdkp, "ATO Got wrong page\n");
+ return;
+ }
+
+ if ((buffer[offset + 5] & 0x80) == 0)
+ return;
+
+ sdkp->ATO = 1;
+
+ return;
+}
+
+/**
+ * sd_read_block_limits - Query disk device for preferred I/O sizes.
+ * @disk: disk to query
+ */
+static void sd_read_block_limits(struct scsi_disk *sdkp)
+{
+ unsigned int sector_sz = sdkp->device->sector_size;
+ const int vpd_len = 64;
+ u32 max_xfer_length;
+ unsigned char *buffer = kmalloc(vpd_len, GFP_KERNEL);
+
+ if (!buffer ||
+ /* Block Limits VPD */
+ scsi_get_vpd_page(sdkp->device, 0xb0, buffer, vpd_len))
+ goto out;
+
+ max_xfer_length = get_unaligned_be32(&buffer[8]);
+ if (max_xfer_length)
+ sdkp->max_xfer_blocks = max_xfer_length;
+
+ blk_queue_io_min(sdkp->disk->queue,
+ get_unaligned_be16(&buffer[6]) * sector_sz);
+ blk_queue_io_opt(sdkp->disk->queue,
+ get_unaligned_be32(&buffer[12]) * sector_sz);
+
+ if (buffer[3] == 0x3c) {
+ unsigned int lba_count, desc_count;
+
+ sdkp->max_ws_blocks = (u32)get_unaligned_be64(&buffer[36]);
+
+ if (!sdkp->lbpme)
+ goto out;
+
+ lba_count = get_unaligned_be32(&buffer[20]);
+ desc_count = get_unaligned_be32(&buffer[24]);
+
+ if (lba_count && desc_count)
+ sdkp->max_unmap_blocks = lba_count;
+
+ sdkp->unmap_granularity = get_unaligned_be32(&buffer[28]);
+
+ if (buffer[32] & 0x80)
+ sdkp->unmap_alignment =
+ get_unaligned_be32(&buffer[32]) & ~(1 << 31);
+
+ if (!sdkp->lbpvpd) { /* LBP VPD page not provided */
+
+ if (sdkp->max_unmap_blocks)
+ sd_config_discard(sdkp, SD_LBP_UNMAP);
+ else
+ sd_config_discard(sdkp, SD_LBP_WS16);
+
+ } else { /* LBP VPD page tells us what to use */
+ if (sdkp->lbpu && sdkp->max_unmap_blocks && !sdkp->lbprz)
+ sd_config_discard(sdkp, SD_LBP_UNMAP);
+ else if (sdkp->lbpws)
+ sd_config_discard(sdkp, SD_LBP_WS16);
+ else if (sdkp->lbpws10)
+ sd_config_discard(sdkp, SD_LBP_WS10);
+ else if (sdkp->lbpu && sdkp->max_unmap_blocks)
+ sd_config_discard(sdkp, SD_LBP_UNMAP);
+ else
+ sd_config_discard(sdkp, SD_LBP_DISABLE);
+ }
+ }
+
+ out:
+ kfree(buffer);
+}
+
+/**
+ * sd_read_block_characteristics - Query block dev. characteristics
+ * @disk: disk to query
+ */
+static void sd_read_block_characteristics(struct scsi_disk *sdkp)
+{
+ unsigned char *buffer;
+ u16 rot;
+ const int vpd_len = 64;
+
+ buffer = kmalloc(vpd_len, GFP_KERNEL);
+
+ if (!buffer ||
+ /* Block Device Characteristics VPD */
+ scsi_get_vpd_page(sdkp->device, 0xb1, buffer, vpd_len))
+ goto out;
+
+ rot = get_unaligned_be16(&buffer[4]);
+
+ if (rot == 1) {
+ queue_flag_set_unlocked(QUEUE_FLAG_NONROT, sdkp->disk->queue);
+ queue_flag_clear_unlocked(QUEUE_FLAG_ADD_RANDOM, sdkp->disk->queue);
+ }
+
+ out:
+ kfree(buffer);
+}
+
+/**
+ * sd_read_block_provisioning - Query provisioning VPD page
+ * @disk: disk to query
+ */
+static void sd_read_block_provisioning(struct scsi_disk *sdkp)
+{
+ unsigned char *buffer;
+ const int vpd_len = 8;
+
+ if (sdkp->lbpme == 0)
+ return;
+
+ buffer = kmalloc(vpd_len, GFP_KERNEL);
+
+ if (!buffer || scsi_get_vpd_page(sdkp->device, 0xb2, buffer, vpd_len))
+ goto out;
+
+ sdkp->lbpvpd = 1;
+ sdkp->lbpu = (buffer[5] >> 7) & 1; /* UNMAP */
+ sdkp->lbpws = (buffer[5] >> 6) & 1; /* WRITE SAME(16) with UNMAP */
+ sdkp->lbpws10 = (buffer[5] >> 5) & 1; /* WRITE SAME(10) with UNMAP */
+
+ out:
+ kfree(buffer);
+}
+
+static void sd_read_write_same(struct scsi_disk *sdkp, unsigned char *buffer)
+{
+ struct scsi_device *sdev = sdkp->device;
+
+ if (sdev->host->no_write_same) {
+ sdev->no_write_same = 1;
+
+ return;
+ }
+
+ if (scsi_report_opcode(sdev, buffer, SD_BUF_SIZE, INQUIRY) < 0) {
+ /* too large values might cause issues with arcmsr */
+ int vpd_buf_len = 64;
+
+ sdev->no_report_opcodes = 1;
+
+ /* Disable WRITE SAME if REPORT SUPPORTED OPERATION
+ * CODES is unsupported and the device has an ATA
+ * Information VPD page (SAT).
+ */
+ if (!scsi_get_vpd_page(sdev, 0x89, buffer, vpd_buf_len))
+ sdev->no_write_same = 1;
+ }
+
+ if (scsi_report_opcode(sdev, buffer, SD_BUF_SIZE, WRITE_SAME_16) == 1)
+ sdkp->ws16 = 1;
+
+ if (scsi_report_opcode(sdev, buffer, SD_BUF_SIZE, WRITE_SAME) == 1)
+ sdkp->ws10 = 1;
+}
+
+static int sd_try_extended_inquiry(struct scsi_device *sdp)
+{
+ /* Attempt VPD inquiry if the device blacklist explicitly calls
+ * for it.
+ */
+ if (sdp->try_vpd_pages)
+ return 1;
+ /*
+ * Although VPD inquiries can go to SCSI-2 type devices,
+ * some USB ones crash on receiving them, and the pages
+ * we currently ask for are for SPC-3 and beyond
+ */
+ if (sdp->scsi_level > SCSI_SPC_2 && !sdp->skip_vpd_pages)
+ return 1;
+ return 0;
+}
+
+/**
+ * sd_revalidate_disk - called the first time a new disk is seen,
+ * performs disk spin up, read_capacity, etc.
+ * @disk: struct gendisk we care about
+ **/
+static int sd_revalidate_disk(struct gendisk *disk)
+{
+ struct scsi_disk *sdkp = scsi_disk(disk);
+ struct scsi_device *sdp = sdkp->device;
+ unsigned char *buffer;
+ unsigned int max_xfer;
+
+ SCSI_LOG_HLQUEUE(3, sd_printk(KERN_INFO, sdkp,
+ "sd_revalidate_disk\n"));
+
+ /*
+ * If the device is offline, don't try and read capacity or any
+ * of the other niceties.
+ */
+ if (!scsi_device_online(sdp))
+ goto out;
+
+ buffer = kmalloc(SD_BUF_SIZE, GFP_KERNEL);
+ if (!buffer) {
+ sd_printk(KERN_WARNING, sdkp, "sd_revalidate_disk: Memory "
+ "allocation failure.\n");
+ goto out;
+ }
+
+ sd_spinup_disk(sdkp);
+
+ /*
+ * Without media there is no reason to ask; moreover, some devices
+ * react badly if we do.
+ */
+ if (sdkp->media_present) {
+ sd_read_capacity(sdkp, buffer);
+
+ if (sd_try_extended_inquiry(sdp)) {
+ sd_read_block_provisioning(sdkp);
+ sd_read_block_limits(sdkp);
+ sd_read_block_characteristics(sdkp);
+ }
+
+ sd_read_write_protect_flag(sdkp, buffer);
+ sd_read_cache_type(sdkp, buffer);
+ sd_read_app_tag_own(sdkp, buffer);
+ sd_read_write_same(sdkp, buffer);
+ }
+
+ sdkp->first_scan = 0;
+
+ /*
+ * We now have all cache related info, determine how we deal
+ * with flush requests.
+ */
+ sd_set_flush_flag(sdkp);
+
+ max_xfer = sdkp->max_xfer_blocks;
+ max_xfer <<= ilog2(sdp->sector_size) - 9;
+
+ max_xfer = min_not_zero(queue_max_hw_sectors(sdkp->disk->queue),
+ max_xfer);
+ blk_queue_max_hw_sectors(sdkp->disk->queue, max_xfer);
+ set_capacity(disk, sdkp->capacity);
+ sd_config_write_same(sdkp);
+ kfree(buffer);
+
+ out:
+ return 0;
+}
+
+/**
+ * sd_unlock_native_capacity - unlock native capacity
+ * @disk: struct gendisk to set capacity for
+ *
+ * Block layer calls this function if it detects that partitions
+ * on @disk reach beyond the end of the device. If the SCSI host
+ * implements ->unlock_native_capacity() method, it's invoked to
+ * give it a chance to adjust the device capacity.
+ *
+ * CONTEXT:
+ * Defined by block layer. Might sleep.
+ */
+static void sd_unlock_native_capacity(struct gendisk *disk)
+{
+ struct scsi_device *sdev = scsi_disk(disk)->device;
+
+ if (sdev->host->hostt->unlock_native_capacity)
+ sdev->host->hostt->unlock_native_capacity(sdev);
+}
+
+/**
+ * sd_format_disk_name - format disk name
+ * @prefix: name prefix - ie. "sd" for SCSI disks
+ * @index: index of the disk to format name for
+ * @buf: output buffer
+ * @buflen: length of the output buffer
+ *
+ * SCSI disk names starts at sda. The 26th device is sdz and the
+ * 27th is sdaa. The last one for two lettered suffix is sdzz
+ * which is followed by sdaaa.
+ *
+ * This is basically 26 base counting with one extra 'nil' entry
+ * at the beginning from the second digit on and can be
+ * determined using similar method as 26 base conversion with the
+ * index shifted -1 after each digit is computed.
+ *
+ * CONTEXT:
+ * Don't care.
+ *
+ * RETURNS:
+ * 0 on success, -errno on failure.
+ */
+static int sd_format_disk_name(char *prefix, int index, char *buf, int buflen)
+{
+ const int base = 'z' - 'a' + 1;
+ char *begin = buf + strlen(prefix);
+ char *end = buf + buflen;
+ char *p;
+ int unit;
+
+ p = end - 1;
+ *p = '\0';
+ unit = base;
+ do {
+ if (p == begin)
+ return -EINVAL;
+ *--p = 'a' + (index % unit);
+ index = (index / unit) - 1;
+ } while (index >= 0);
+
+ memmove(begin, p, end - p);
+ memcpy(buf, prefix, strlen(prefix));
+
+ return 0;
+}
+
+/*
+ * The asynchronous part of sd_probe
+ */
+static void sd_probe_async(void *data, async_cookie_t cookie)
+{
+ struct scsi_disk *sdkp = data;
+ struct scsi_device *sdp;
+ struct gendisk *gd;
+ u32 index;
+ struct device *dev;
+
+ sdp = sdkp->device;
+ gd = sdkp->disk;
+ index = sdkp->index;
+ dev = &sdp->sdev_gendev;
+
+ gd->major = sd_major((index & 0xf0) >> 4);
+ gd->first_minor = ((index & 0xf) << 4) | (index & 0xfff00);
+ gd->minors = SD_MINORS;
+
+ gd->fops = &sd_fops;
+ gd->private_data = &sdkp->driver;
+ gd->queue = sdkp->device->request_queue;
+
+ /* defaults, until the device tells us otherwise */
+ sdp->sector_size = 512;
+ sdkp->capacity = 0;
+ sdkp->media_present = 1;
+ sdkp->write_prot = 0;
+ sdkp->cache_override = 0;
+ sdkp->WCE = 0;
+ sdkp->RCD = 0;
+ sdkp->ATO = 0;
+ sdkp->first_scan = 1;
+ sdkp->max_medium_access_timeouts = SD_MAX_MEDIUM_TIMEOUTS;
+
+ sd_revalidate_disk(gd);
+
+ gd->driverfs_dev = &sdp->sdev_gendev;
+ gd->flags = GENHD_FL_EXT_DEVT;
+ if (sdp->removable) {
+ gd->flags |= GENHD_FL_REMOVABLE;
+ gd->events |= DISK_EVENT_MEDIA_CHANGE;
+ }
+
+ blk_pm_runtime_init(sdp->request_queue, dev);
+ add_disk(gd);
+ if (sdkp->capacity)
+ sd_dif_config_host(sdkp);
+
+ sd_revalidate_disk(gd);
+
+ sd_printk(KERN_NOTICE, sdkp, "Attached SCSI %sdisk\n",
+ sdp->removable ? "removable " : "");
+ scsi_autopm_put_device(sdp);
+ put_device(&sdkp->dev);
+}
+
+/**
+ * sd_probe - called during driver initialization and whenever a
+ * new scsi device is attached to the system. It is called once
+ * for each scsi device (not just disks) present.
+ * @dev: pointer to device object
+ *
+ * Returns 0 if successful (or not interested in this scsi device
+ * (e.g. scanner)); 1 when there is an error.
+ *
+ * Note: this function is invoked from the scsi mid-level.
+ * This function sets up the mapping between a given
+ * <host,channel,id,lun> (found in sdp) and new device name
+ * (e.g. /dev/sda). More precisely it is the block device major
+ * and minor number that is chosen here.
+ *
+ * Assume sd_probe is not re-entrant (for time being)
+ * Also think about sd_probe() and sd_remove() running coincidentally.
+ **/
+static int sd_probe(struct device *dev)
+{
+ struct scsi_device *sdp = to_scsi_device(dev);
+ struct scsi_disk *sdkp;
+ struct gendisk *gd;
+ int index;
+ int error;
+
+ scsi_autopm_get_device(sdp);
+ error = -ENODEV;
+ if (sdp->type != TYPE_DISK && sdp->type != TYPE_MOD && sdp->type != TYPE_RBC)
+ goto out;
+
+ SCSI_LOG_HLQUEUE(3, sdev_printk(KERN_INFO, sdp,
+ "sd_probe\n"));
+
+ error = -ENOMEM;
+ sdkp = kzalloc(sizeof(*sdkp), GFP_KERNEL);
+ if (!sdkp)
+ goto out;
+
+ gd = alloc_disk(SD_MINORS);
+ if (!gd)
+ goto out_free;
+
+ do {
+ if (!ida_pre_get(&sd_index_ida, GFP_KERNEL))
+ goto out_put;
+
+ spin_lock(&sd_index_lock);
+ error = ida_get_new(&sd_index_ida, &index);
+ spin_unlock(&sd_index_lock);
+ } while (error == -EAGAIN);
+
+ if (error) {
+ sdev_printk(KERN_WARNING, sdp, "sd_probe: memory exhausted.\n");
+ goto out_put;
+ }
+
+ error = sd_format_disk_name("sd", index, gd->disk_name, DISK_NAME_LEN);
+ if (error) {
+ sdev_printk(KERN_WARNING, sdp, "SCSI disk (sd) name length exceeded.\n");
+ goto out_free_index;
+ }
+
+ sdkp->device = sdp;
+ sdkp->driver = &sd_template;
+ sdkp->disk = gd;
+ sdkp->index = index;
+ atomic_set(&sdkp->openers, 0);
+ atomic_set(&sdkp->device->ioerr_cnt, 0);
+
+ if (!sdp->request_queue->rq_timeout) {
+ if (sdp->type != TYPE_MOD)
+ blk_queue_rq_timeout(sdp->request_queue, SD_TIMEOUT);
+ else
+ blk_queue_rq_timeout(sdp->request_queue,
+ SD_MOD_TIMEOUT);
+ }
+
+ device_initialize(&sdkp->dev);
+ sdkp->dev.parent = dev;
+ sdkp->dev.class = &sd_disk_class;
+ dev_set_name(&sdkp->dev, "%s", dev_name(dev));
+
+ if (device_add(&sdkp->dev))
+ goto out_free_index;
+
+ get_device(dev);
+ dev_set_drvdata(dev, sdkp);
+
+ get_device(&sdkp->dev); /* prevent release before async_schedule */
+ async_schedule_domain(sd_probe_async, sdkp, &scsi_sd_probe_domain);
+
+ return 0;
+
+ out_free_index:
+ spin_lock(&sd_index_lock);
+ ida_remove(&sd_index_ida, index);
+ spin_unlock(&sd_index_lock);
+ out_put:
+ put_disk(gd);
+ out_free:
+ kfree(sdkp);
+ out:
+ scsi_autopm_put_device(sdp);
+ return error;
+}
+
+/**
+ * sd_remove - called whenever a scsi disk (previously recognized by
+ * sd_probe) is detached from the system. It is called (potentially
+ * multiple times) during sd module unload.
+ * @sdp: pointer to mid level scsi device object
+ *
+ * Note: this function is invoked from the scsi mid-level.
+ * This function potentially frees up a device name (e.g. /dev/sdc)
+ * that could be re-used by a subsequent sd_probe().
+ * This function is not called when the built-in sd driver is "exit-ed".
+ **/
+static int sd_remove(struct device *dev)
+{
+ struct scsi_disk *sdkp;
+ dev_t devt;
+
+ sdkp = dev_get_drvdata(dev);
+ devt = disk_devt(sdkp->disk);
+ scsi_autopm_get_device(sdkp->device);
+
+ async_synchronize_full_domain(&scsi_sd_pm_domain);
+ async_synchronize_full_domain(&scsi_sd_probe_domain);
+ device_del(&sdkp->dev);
+ del_gendisk(sdkp->disk);
+ sd_shutdown(dev);
+
+ blk_register_region(devt, SD_MINORS, NULL,
+ sd_default_probe, NULL, NULL);
+
+ mutex_lock(&sd_ref_mutex);
+ dev_set_drvdata(dev, NULL);
+ put_device(&sdkp->dev);
+ mutex_unlock(&sd_ref_mutex);
+
+ return 0;
+}
+
+/**
+ * scsi_disk_release - Called to free the scsi_disk structure
+ * @dev: pointer to embedded class device
+ *
+ * sd_ref_mutex must be held entering this routine. Because it is
+ * called on last put, you should always use the scsi_disk_get()
+ * scsi_disk_put() helpers which manipulate the semaphore directly
+ * and never do a direct put_device.
+ **/
+static void scsi_disk_release(struct device *dev)
+{
+ struct scsi_disk *sdkp = to_scsi_disk(dev);
+ struct gendisk *disk = sdkp->disk;
+
+ spin_lock(&sd_index_lock);
+ ida_remove(&sd_index_ida, sdkp->index);
+ spin_unlock(&sd_index_lock);
+
+ blk_integrity_unregister(disk);
+ disk->private_data = NULL;
+ put_disk(disk);
+ put_device(&sdkp->device->sdev_gendev);
+
+ kfree(sdkp);
+}
+
+static int sd_start_stop_device(struct scsi_disk *sdkp, int start)
+{
+ unsigned char cmd[6] = { START_STOP }; /* START_VALID */
+ struct scsi_sense_hdr sshdr;
+ struct scsi_device *sdp = sdkp->device;
+ int res;
+
+ if (start)
+ cmd[4] |= 1; /* START */
+
+ if (sdp->start_stop_pwr_cond)
+ cmd[4] |= start ? 1 << 4 : 3 << 4; /* Active or Standby */
+
+ if (!scsi_device_online(sdp))
+ return -ENODEV;
+
+ res = scsi_execute_req_flags(sdp, cmd, DMA_NONE, NULL, 0, &sshdr,
+ SD_TIMEOUT, SD_MAX_RETRIES, NULL, REQ_PM);
+ if (res) {
+ sd_print_result(sdkp, "Start/Stop Unit failed", res);
+ if (driver_byte(res) & DRIVER_SENSE)
+ sd_print_sense_hdr(sdkp, &sshdr);
+ if (scsi_sense_valid(&sshdr) &&
+ /* 0x3a is medium not present */
+ sshdr.asc == 0x3a)
+ res = 0;
+ }
+
+ /* SCSI error codes must not go to the generic layer */
+ if (res)
+ return -EIO;
+
+ return 0;
+}
+
+/*
+ * Send a SYNCHRONIZE CACHE instruction down to the device through
+ * the normal SCSI command structure. Wait for the command to
+ * complete.
+ */
+static void sd_shutdown(struct device *dev)
+{
+ struct scsi_disk *sdkp = dev_get_drvdata(dev);
+
+ if (!sdkp)
+ return; /* this can happen */
+
+ if (pm_runtime_suspended(dev))
+ return;
+
+ if (sdkp->WCE && sdkp->media_present) {
+ sd_printk(KERN_NOTICE, sdkp, "Synchronizing SCSI cache\n");
+ sd_sync_cache(sdkp);
+ }
+
+ if (system_state != SYSTEM_RESTART && sdkp->device->manage_start_stop) {
+ sd_printk(KERN_NOTICE, sdkp, "Stopping disk\n");
+ sd_start_stop_device(sdkp, 0);
+ }
+}
+
+static int sd_suspend_common(struct device *dev, bool ignore_stop_errors)
+{
+ struct scsi_disk *sdkp = dev_get_drvdata(dev);
+ int ret = 0;
+
+ if (!sdkp)
+ return 0; /* this can happen */
+
+ if (sdkp->WCE && sdkp->media_present) {
+ sd_printk(KERN_NOTICE, sdkp, "Synchronizing SCSI cache\n");
+ ret = sd_sync_cache(sdkp);
+ if (ret) {
+ /* ignore OFFLINE device */
+ if (ret == -ENODEV)
+ ret = 0;
+ goto done;
+ }
+ }
+
+ if (sdkp->device->manage_start_stop) {
+ sd_printk(KERN_NOTICE, sdkp, "Stopping disk\n");
+ /* an error is not worth aborting a system sleep */
+ ret = sd_start_stop_device(sdkp, 0);
+ if (ignore_stop_errors)
+ ret = 0;
+ }
+
+done:
+ return ret;
+}
+
+static int sd_suspend_system(struct device *dev)
+{
+ return sd_suspend_common(dev, true);
+}
+
+static int sd_suspend_runtime(struct device *dev)
+{
+ return sd_suspend_common(dev, false);
+}
+
+static int sd_resume(struct device *dev)
+{
+ struct scsi_disk *sdkp = dev_get_drvdata(dev);
+
+ if (!sdkp->device->manage_start_stop)
+ return 0;
+
+ sd_printk(KERN_NOTICE, sdkp, "Starting disk\n");
+ return sd_start_stop_device(sdkp, 1);
+}
+
+/**
+ * init_sd - entry point for this driver (both when built in or when
+ * a module).
+ *
+ * Note: this function registers this driver with the scsi mid-level.
+ **/
+static int __init init_sd(void)
+{
+ int majors = 0, i, err;
+
+ SCSI_LOG_HLQUEUE(3, printk("init_sd: sd driver entry point\n"));
+
+ for (i = 0; i < SD_MAJORS; i++) {
+ if (register_blkdev(sd_major(i), "sd") != 0)
+ continue;
+ majors++;
+ blk_register_region(sd_major(i), SD_MINORS, NULL,
+ sd_default_probe, NULL, NULL);
+ }
+
+ if (!majors)
+ return -ENODEV;
+
+ err = class_register(&sd_disk_class);
+ if (err)
+ goto err_out;
+
+ sd_cdb_cache = kmem_cache_create("sd_ext_cdb", SD_EXT_CDB_SIZE,
+ 0, 0, NULL);
+ if (!sd_cdb_cache) {
+ printk(KERN_ERR "sd: can't init extended cdb cache\n");
+ err = -ENOMEM;
+ goto err_out_class;
+ }
+
+ sd_cdb_pool = mempool_create_slab_pool(SD_MEMPOOL_SIZE, sd_cdb_cache);
+ if (!sd_cdb_pool) {
+ printk(KERN_ERR "sd: can't init extended cdb pool\n");
+ err = -ENOMEM;
+ goto err_out_cache;
+ }
+
+ err = scsi_register_driver(&sd_template.gendrv);
+ if (err)
+ goto err_out_driver;
+
+ return 0;
+
+err_out_driver:
+ mempool_destroy(sd_cdb_pool);
+
+err_out_cache:
+ kmem_cache_destroy(sd_cdb_cache);
+
+err_out_class:
+ class_unregister(&sd_disk_class);
+err_out:
+ for (i = 0; i < SD_MAJORS; i++)
+ unregister_blkdev(sd_major(i), "sd");
+ return err;
+}
+
+/**
+ * exit_sd - exit point for this driver (when it is a module).
+ *
+ * Note: this function unregisters this driver from the scsi mid-level.
+ **/
+static void __exit exit_sd(void)
+{
+ int i;
+
+ SCSI_LOG_HLQUEUE(3, printk("exit_sd: exiting sd driver\n"));
+
+ scsi_unregister_driver(&sd_template.gendrv);
+ mempool_destroy(sd_cdb_pool);
+ kmem_cache_destroy(sd_cdb_cache);
+
+ class_unregister(&sd_disk_class);
+
+ for (i = 0; i < SD_MAJORS; i++) {
+ blk_unregister_region(sd_major(i), SD_MINORS);
+ unregister_blkdev(sd_major(i), "sd");
+ }
+}
+
+module_init(init_sd);
+module_exit(exit_sd);
+
+static void sd_print_sense_hdr(struct scsi_disk *sdkp,
+ struct scsi_sense_hdr *sshdr)
+{
+ scsi_print_sense_hdr(sdkp->device,
+ sdkp->disk ? sdkp->disk->disk_name : NULL, sshdr);
+}
+
+static void sd_print_result(const struct scsi_disk *sdkp, const char *msg,
+ int result)
+{
+ const char *hb_string = scsi_hostbyte_string(result);
+ const char *db_string = scsi_driverbyte_string(result);
+
+ if (hb_string || db_string)
+ sd_printk(KERN_INFO, sdkp,
+ "%s: Result: hostbyte=%s driverbyte=%s\n", msg,
+ hb_string ? hb_string : "invalid",
+ db_string ? db_string : "invalid");
+ else
+ sd_printk(KERN_INFO, sdkp,
+ "%s: Result: hostbyte=0x%02x driverbyte=0x%02x\n",
+ msg, host_byte(result), driver_byte(result));
+}
+
diff --git a/drivers/scsi/sd.h b/drivers/scsi/sd.h
new file mode 100644
index 000000000..63ba5ca7f
--- /dev/null
+++ b/drivers/scsi/sd.h
@@ -0,0 +1,261 @@
+#ifndef _SCSI_DISK_H
+#define _SCSI_DISK_H
+
+/*
+ * More than enough for everybody ;) The huge number of majors
+ * is a leftover from 16bit dev_t days, we don't really need that
+ * much numberspace.
+ */
+#define SD_MAJORS 16
+
+/*
+ * Time out in seconds for disks and Magneto-opticals (which are slower).
+ */
+#define SD_TIMEOUT (30 * HZ)
+#define SD_MOD_TIMEOUT (75 * HZ)
+/*
+ * Flush timeout is a multiplier over the standard device timeout which is
+ * user modifiable via sysfs but initially set to SD_TIMEOUT
+ */
+#define SD_FLUSH_TIMEOUT_MULTIPLIER 2
+#define SD_WRITE_SAME_TIMEOUT (120 * HZ)
+
+/*
+ * Number of allowed retries
+ */
+#define SD_MAX_RETRIES 5
+#define SD_PASSTHROUGH_RETRIES 1
+#define SD_MAX_MEDIUM_TIMEOUTS 2
+
+/*
+ * Size of the initial data buffer for mode and read capacity data
+ */
+#define SD_BUF_SIZE 512
+
+/*
+ * Number of sectors at the end of the device to avoid multi-sector
+ * accesses to in the case of last_sector_bug
+ */
+#define SD_LAST_BUGGY_SECTORS 8
+
+enum {
+ SD_EXT_CDB_SIZE = 32, /* Extended CDB size */
+ SD_MEMPOOL_SIZE = 2, /* CDB pool size */
+};
+
+enum {
+ SD_DEF_XFER_BLOCKS = 0xffff,
+ SD_MAX_XFER_BLOCKS = 0xffffffff,
+ SD_MAX_WS10_BLOCKS = 0xffff,
+ SD_MAX_WS16_BLOCKS = 0x7fffff,
+};
+
+enum {
+ SD_LBP_FULL = 0, /* Full logical block provisioning */
+ SD_LBP_UNMAP, /* Use UNMAP command */
+ SD_LBP_WS16, /* Use WRITE SAME(16) with UNMAP bit */
+ SD_LBP_WS10, /* Use WRITE SAME(10) with UNMAP bit */
+ SD_LBP_ZERO, /* Use WRITE SAME(10) with zero payload */
+ SD_LBP_DISABLE, /* Discard disabled due to failed cmd */
+};
+
+struct scsi_disk {
+ struct scsi_driver *driver; /* always &sd_template */
+ struct scsi_device *device;
+ struct device dev;
+ struct gendisk *disk;
+ atomic_t openers;
+ sector_t capacity; /* size in 512-byte sectors */
+ u32 max_xfer_blocks;
+ u32 max_ws_blocks;
+ u32 max_unmap_blocks;
+ u32 unmap_granularity;
+ u32 unmap_alignment;
+ u32 index;
+ unsigned int physical_block_size;
+ unsigned int max_medium_access_timeouts;
+ unsigned int medium_access_timed_out;
+ u8 media_present;
+ u8 write_prot;
+ u8 protection_type;/* Data Integrity Field */
+ u8 provisioning_mode;
+ unsigned ATO : 1; /* state of disk ATO bit */
+ unsigned cache_override : 1; /* temp override of WCE,RCD */
+ unsigned WCE : 1; /* state of disk WCE bit */
+ unsigned RCD : 1; /* state of disk RCD bit, unused */
+ unsigned DPOFUA : 1; /* state of disk DPOFUA bit */
+ unsigned first_scan : 1;
+ unsigned lbpme : 1;
+ unsigned lbprz : 1;
+ unsigned lbpu : 1;
+ unsigned lbpws : 1;
+ unsigned lbpws10 : 1;
+ unsigned lbpvpd : 1;
+ unsigned ws10 : 1;
+ unsigned ws16 : 1;
+};
+#define to_scsi_disk(obj) container_of(obj,struct scsi_disk,dev)
+
+static inline struct scsi_disk *scsi_disk(struct gendisk *disk)
+{
+ return container_of(disk->private_data, struct scsi_disk, driver);
+}
+
+#define sd_printk(prefix, sdsk, fmt, a...) \
+ (sdsk)->disk ? \
+ sdev_prefix_printk(prefix, (sdsk)->device, \
+ (sdsk)->disk->disk_name, fmt, ##a) : \
+ sdev_printk(prefix, (sdsk)->device, fmt, ##a)
+
+#define sd_first_printk(prefix, sdsk, fmt, a...) \
+ do { \
+ if ((sdkp)->first_scan) \
+ sd_printk(prefix, sdsk, fmt, ##a); \
+ } while (0)
+
+static inline int scsi_medium_access_command(struct scsi_cmnd *scmd)
+{
+ switch (scmd->cmnd[0]) {
+ case READ_6:
+ case READ_10:
+ case READ_12:
+ case READ_16:
+ case SYNCHRONIZE_CACHE:
+ case VERIFY:
+ case VERIFY_12:
+ case VERIFY_16:
+ case WRITE_6:
+ case WRITE_10:
+ case WRITE_12:
+ case WRITE_16:
+ case WRITE_SAME:
+ case WRITE_SAME_16:
+ case UNMAP:
+ return 1;
+ case VARIABLE_LENGTH_CMD:
+ switch (scmd->cmnd[9]) {
+ case READ_32:
+ case VERIFY_32:
+ case WRITE_32:
+ case WRITE_SAME_32:
+ return 1;
+ }
+ }
+
+ return 0;
+}
+
+/*
+ * A DIF-capable target device can be formatted with different
+ * protection schemes. Currently 0 through 3 are defined:
+ *
+ * Type 0 is regular (unprotected) I/O
+ *
+ * Type 1 defines the contents of the guard and reference tags
+ *
+ * Type 2 defines the contents of the guard and reference tags and
+ * uses 32-byte commands to seed the latter
+ *
+ * Type 3 defines the contents of the guard tag only
+ */
+
+enum sd_dif_target_protection_types {
+ SD_DIF_TYPE0_PROTECTION = 0x0,
+ SD_DIF_TYPE1_PROTECTION = 0x1,
+ SD_DIF_TYPE2_PROTECTION = 0x2,
+ SD_DIF_TYPE3_PROTECTION = 0x3,
+};
+
+/*
+ * Look up the DIX operation based on whether the command is read or
+ * write and whether dix and dif are enabled.
+ */
+static inline unsigned int sd_prot_op(bool write, bool dix, bool dif)
+{
+ /* Lookup table: bit 2 (write), bit 1 (dix), bit 0 (dif) */
+ const unsigned int ops[] = { /* wrt dix dif */
+ SCSI_PROT_NORMAL, /* 0 0 0 */
+ SCSI_PROT_READ_STRIP, /* 0 0 1 */
+ SCSI_PROT_READ_INSERT, /* 0 1 0 */
+ SCSI_PROT_READ_PASS, /* 0 1 1 */
+ SCSI_PROT_NORMAL, /* 1 0 0 */
+ SCSI_PROT_WRITE_INSERT, /* 1 0 1 */
+ SCSI_PROT_WRITE_STRIP, /* 1 1 0 */
+ SCSI_PROT_WRITE_PASS, /* 1 1 1 */
+ };
+
+ return ops[write << 2 | dix << 1 | dif];
+}
+
+/*
+ * Returns a mask of the protection flags that are valid for a given DIX
+ * operation.
+ */
+static inline unsigned int sd_prot_flag_mask(unsigned int prot_op)
+{
+ const unsigned int flag_mask[] = {
+ [SCSI_PROT_NORMAL] = 0,
+
+ [SCSI_PROT_READ_STRIP] = SCSI_PROT_TRANSFER_PI |
+ SCSI_PROT_GUARD_CHECK |
+ SCSI_PROT_REF_CHECK |
+ SCSI_PROT_REF_INCREMENT,
+
+ [SCSI_PROT_READ_INSERT] = SCSI_PROT_REF_INCREMENT |
+ SCSI_PROT_IP_CHECKSUM,
+
+ [SCSI_PROT_READ_PASS] = SCSI_PROT_TRANSFER_PI |
+ SCSI_PROT_GUARD_CHECK |
+ SCSI_PROT_REF_CHECK |
+ SCSI_PROT_REF_INCREMENT |
+ SCSI_PROT_IP_CHECKSUM,
+
+ [SCSI_PROT_WRITE_INSERT] = SCSI_PROT_TRANSFER_PI |
+ SCSI_PROT_REF_INCREMENT,
+
+ [SCSI_PROT_WRITE_STRIP] = SCSI_PROT_GUARD_CHECK |
+ SCSI_PROT_REF_CHECK |
+ SCSI_PROT_REF_INCREMENT |
+ SCSI_PROT_IP_CHECKSUM,
+
+ [SCSI_PROT_WRITE_PASS] = SCSI_PROT_TRANSFER_PI |
+ SCSI_PROT_GUARD_CHECK |
+ SCSI_PROT_REF_CHECK |
+ SCSI_PROT_REF_INCREMENT |
+ SCSI_PROT_IP_CHECKSUM,
+ };
+
+ return flag_mask[prot_op];
+}
+
+/*
+ * Data Integrity Field tuple.
+ */
+struct sd_dif_tuple {
+ __be16 guard_tag; /* Checksum */
+ __be16 app_tag; /* Opaque storage */
+ __be32 ref_tag; /* Target LBA or indirect LBA */
+};
+
+#ifdef CONFIG_BLK_DEV_INTEGRITY
+
+extern void sd_dif_config_host(struct scsi_disk *);
+extern void sd_dif_prepare(struct scsi_cmnd *scmd);
+extern void sd_dif_complete(struct scsi_cmnd *, unsigned int);
+
+#else /* CONFIG_BLK_DEV_INTEGRITY */
+
+static inline void sd_dif_config_host(struct scsi_disk *disk)
+{
+}
+static inline int sd_dif_prepare(struct scsi_cmnd *scmd)
+{
+ return 0;
+}
+static inline void sd_dif_complete(struct scsi_cmnd *cmd, unsigned int a)
+{
+}
+
+#endif /* CONFIG_BLK_DEV_INTEGRITY */
+
+#endif /* _SCSI_DISK_H */
diff --git a/drivers/scsi/sd_dif.c b/drivers/scsi/sd_dif.c
new file mode 100644
index 000000000..5c06d292b
--- /dev/null
+++ b/drivers/scsi/sd_dif.c
@@ -0,0 +1,205 @@
+/*
+ * sd_dif.c - SCSI Data Integrity Field
+ *
+ * Copyright (C) 2007, 2008 Oracle Corporation
+ * Written by: Martin K. Petersen <martin.petersen@oracle.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License version
+ * 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; see the file COPYING. If not, write to
+ * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139,
+ * USA.
+ *
+ */
+
+#include <linux/blkdev.h>
+#include <linux/t10-pi.h>
+
+#include <scsi/scsi.h>
+#include <scsi/scsi_cmnd.h>
+#include <scsi/scsi_dbg.h>
+#include <scsi/scsi_device.h>
+#include <scsi/scsi_driver.h>
+#include <scsi/scsi_eh.h>
+#include <scsi/scsi_host.h>
+#include <scsi/scsi_ioctl.h>
+#include <scsi/scsicam.h>
+
+#include "sd.h"
+
+/*
+ * Configure exchange of protection information between OS and HBA.
+ */
+void sd_dif_config_host(struct scsi_disk *sdkp)
+{
+ struct scsi_device *sdp = sdkp->device;
+ struct gendisk *disk = sdkp->disk;
+ u8 type = sdkp->protection_type;
+ int dif, dix;
+
+ dif = scsi_host_dif_capable(sdp->host, type);
+ dix = scsi_host_dix_capable(sdp->host, type);
+
+ if (!dix && scsi_host_dix_capable(sdp->host, 0)) {
+ dif = 0; dix = 1;
+ }
+
+ if (!dix)
+ return;
+
+ /* Enable DMA of protection information */
+ if (scsi_host_get_guard(sdkp->device->host) & SHOST_DIX_GUARD_IP) {
+ if (type == SD_DIF_TYPE3_PROTECTION)
+ blk_integrity_register(disk, &t10_pi_type3_ip);
+ else
+ blk_integrity_register(disk, &t10_pi_type1_ip);
+
+ disk->integrity->flags |= BLK_INTEGRITY_IP_CHECKSUM;
+ } else
+ if (type == SD_DIF_TYPE3_PROTECTION)
+ blk_integrity_register(disk, &t10_pi_type3_crc);
+ else
+ blk_integrity_register(disk, &t10_pi_type1_crc);
+
+ sd_printk(KERN_NOTICE, sdkp,
+ "Enabling DIX %s protection\n", disk->integrity->name);
+
+ /* Signal to block layer that we support sector tagging */
+ if (dif && type) {
+
+ disk->integrity->flags |= BLK_INTEGRITY_DEVICE_CAPABLE;
+
+ if (!sdkp->ATO)
+ return;
+
+ if (type == SD_DIF_TYPE3_PROTECTION)
+ disk->integrity->tag_size = sizeof(u16) + sizeof(u32);
+ else
+ disk->integrity->tag_size = sizeof(u16);
+
+ sd_printk(KERN_NOTICE, sdkp, "DIF application tag size %u\n",
+ disk->integrity->tag_size);
+ }
+}
+
+/*
+ * The virtual start sector is the one that was originally submitted
+ * by the block layer. Due to partitioning, MD/DM cloning, etc. the
+ * actual physical start sector is likely to be different. Remap
+ * protection information to match the physical LBA.
+ *
+ * From a protocol perspective there's a slight difference between
+ * Type 1 and 2. The latter uses 32-byte CDBs exclusively, and the
+ * reference tag is seeded in the CDB. This gives us the potential to
+ * avoid virt->phys remapping during write. However, at read time we
+ * don't know whether the virt sector is the same as when we wrote it
+ * (we could be reading from real disk as opposed to MD/DM device. So
+ * we always remap Type 2 making it identical to Type 1.
+ *
+ * Type 3 does not have a reference tag so no remapping is required.
+ */
+void sd_dif_prepare(struct scsi_cmnd *scmd)
+{
+ const int tuple_sz = sizeof(struct t10_pi_tuple);
+ struct bio *bio;
+ struct scsi_disk *sdkp;
+ struct t10_pi_tuple *pi;
+ u32 phys, virt;
+
+ sdkp = scsi_disk(scmd->request->rq_disk);
+
+ if (sdkp->protection_type == SD_DIF_TYPE3_PROTECTION)
+ return;
+
+ phys = scsi_prot_ref_tag(scmd);
+
+ __rq_for_each_bio(bio, scmd->request) {
+ struct bio_integrity_payload *bip = bio_integrity(bio);
+ struct bio_vec iv;
+ struct bvec_iter iter;
+ unsigned int j;
+
+ /* Already remapped? */
+ if (bip->bip_flags & BIP_MAPPED_INTEGRITY)
+ break;
+
+ virt = bip_get_seed(bip) & 0xffffffff;
+
+ bip_for_each_vec(iv, bip, iter) {
+ pi = kmap_atomic(iv.bv_page) + iv.bv_offset;
+
+ for (j = 0; j < iv.bv_len; j += tuple_sz, pi++) {
+
+ if (be32_to_cpu(pi->ref_tag) == virt)
+ pi->ref_tag = cpu_to_be32(phys);
+
+ virt++;
+ phys++;
+ }
+
+ kunmap_atomic(pi);
+ }
+
+ bip->bip_flags |= BIP_MAPPED_INTEGRITY;
+ }
+}
+
+/*
+ * Remap physical sector values in the reference tag to the virtual
+ * values expected by the block layer.
+ */
+void sd_dif_complete(struct scsi_cmnd *scmd, unsigned int good_bytes)
+{
+ const int tuple_sz = sizeof(struct t10_pi_tuple);
+ struct scsi_disk *sdkp;
+ struct bio *bio;
+ struct t10_pi_tuple *pi;
+ unsigned int j, intervals;
+ u32 phys, virt;
+
+ sdkp = scsi_disk(scmd->request->rq_disk);
+
+ if (sdkp->protection_type == SD_DIF_TYPE3_PROTECTION || good_bytes == 0)
+ return;
+
+ intervals = good_bytes / scsi_prot_interval(scmd);
+ phys = scsi_prot_ref_tag(scmd);
+
+ __rq_for_each_bio(bio, scmd->request) {
+ struct bio_integrity_payload *bip = bio_integrity(bio);
+ struct bio_vec iv;
+ struct bvec_iter iter;
+
+ virt = bip_get_seed(bip) & 0xffffffff;
+
+ bip_for_each_vec(iv, bip, iter) {
+ pi = kmap_atomic(iv.bv_page) + iv.bv_offset;
+
+ for (j = 0; j < iv.bv_len; j += tuple_sz, pi++) {
+
+ if (intervals == 0) {
+ kunmap_atomic(pi);
+ return;
+ }
+
+ if (be32_to_cpu(pi->ref_tag) == phys)
+ pi->ref_tag = cpu_to_be32(virt);
+
+ virt++;
+ phys++;
+ intervals--;
+ }
+
+ kunmap_atomic(pi);
+ }
+ }
+}
+
diff --git a/drivers/scsi/ses.c b/drivers/scsi/ses.c
new file mode 100644
index 000000000..dcb0d76d7
--- /dev/null
+++ b/drivers/scsi/ses.c
@@ -0,0 +1,840 @@
+/*
+ * SCSI Enclosure Services
+ *
+ * Copyright (C) 2008 James Bottomley <James.Bottomley@HansenPartnership.com>
+ *
+**-----------------------------------------------------------------------------
+**
+** This program is free software; you can redistribute it and/or
+** modify it under the terms of the GNU General Public License
+** version 2 as published by the Free Software Foundation.
+**
+** This program is distributed in the hope that it will be useful,
+** but WITHOUT ANY WARRANTY; without even the implied warranty of
+** MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+** GNU General Public License for more details.
+**
+** You should have received a copy of the GNU General Public License
+** along with this program; if not, write to the Free Software
+** Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+**
+**-----------------------------------------------------------------------------
+*/
+
+#include <linux/slab.h>
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/enclosure.h>
+#include <asm/unaligned.h>
+
+#include <scsi/scsi.h>
+#include <scsi/scsi_cmnd.h>
+#include <scsi/scsi_dbg.h>
+#include <scsi/scsi_device.h>
+#include <scsi/scsi_driver.h>
+#include <scsi/scsi_host.h>
+
+struct ses_device {
+ unsigned char *page1;
+ unsigned char *page1_types;
+ unsigned char *page2;
+ unsigned char *page10;
+ short page1_len;
+ short page1_num_types;
+ short page2_len;
+ short page10_len;
+};
+
+struct ses_component {
+ u64 addr;
+};
+
+static int ses_probe(struct device *dev)
+{
+ struct scsi_device *sdev = to_scsi_device(dev);
+ int err = -ENODEV;
+
+ if (sdev->type != TYPE_ENCLOSURE)
+ goto out;
+
+ err = 0;
+ sdev_printk(KERN_NOTICE, sdev, "Attached Enclosure device\n");
+
+ out:
+ return err;
+}
+
+#define SES_TIMEOUT (30 * HZ)
+#define SES_RETRIES 3
+
+static void init_device_slot_control(unsigned char *dest_desc,
+ struct enclosure_component *ecomp,
+ unsigned char *status)
+{
+ memcpy(dest_desc, status, 4);
+ dest_desc[0] = 0;
+ /* only clear byte 1 for ENCLOSURE_COMPONENT_DEVICE */
+ if (ecomp->type == ENCLOSURE_COMPONENT_DEVICE)
+ dest_desc[1] = 0;
+ dest_desc[2] &= 0xde;
+ dest_desc[3] &= 0x3c;
+}
+
+
+static int ses_recv_diag(struct scsi_device *sdev, int page_code,
+ void *buf, int bufflen)
+{
+ unsigned char cmd[] = {
+ RECEIVE_DIAGNOSTIC,
+ 1, /* Set PCV bit */
+ page_code,
+ bufflen >> 8,
+ bufflen & 0xff,
+ 0
+ };
+
+ return scsi_execute_req(sdev, cmd, DMA_FROM_DEVICE, buf, bufflen,
+ NULL, SES_TIMEOUT, SES_RETRIES, NULL);
+}
+
+static int ses_send_diag(struct scsi_device *sdev, int page_code,
+ void *buf, int bufflen)
+{
+ u32 result;
+
+ unsigned char cmd[] = {
+ SEND_DIAGNOSTIC,
+ 0x10, /* Set PF bit */
+ 0,
+ bufflen >> 8,
+ bufflen & 0xff,
+ 0
+ };
+
+ result = scsi_execute_req(sdev, cmd, DMA_TO_DEVICE, buf, bufflen,
+ NULL, SES_TIMEOUT, SES_RETRIES, NULL);
+ if (result)
+ sdev_printk(KERN_ERR, sdev, "SEND DIAGNOSTIC result: %8x\n",
+ result);
+ return result;
+}
+
+static int ses_set_page2_descriptor(struct enclosure_device *edev,
+ struct enclosure_component *ecomp,
+ unsigned char *desc)
+{
+ int i, j, count = 0, descriptor = ecomp->number;
+ struct scsi_device *sdev = to_scsi_device(edev->edev.parent);
+ struct ses_device *ses_dev = edev->scratch;
+ unsigned char *type_ptr = ses_dev->page1_types;
+ unsigned char *desc_ptr = ses_dev->page2 + 8;
+
+ /* Clear everything */
+ memset(desc_ptr, 0, ses_dev->page2_len - 8);
+ for (i = 0; i < ses_dev->page1_num_types; i++, type_ptr += 4) {
+ for (j = 0; j < type_ptr[1]; j++) {
+ desc_ptr += 4;
+ if (type_ptr[0] != ENCLOSURE_COMPONENT_DEVICE &&
+ type_ptr[0] != ENCLOSURE_COMPONENT_ARRAY_DEVICE)
+ continue;
+ if (count++ == descriptor) {
+ memcpy(desc_ptr, desc, 4);
+ /* set select */
+ desc_ptr[0] |= 0x80;
+ /* clear reserved, just in case */
+ desc_ptr[0] &= 0xf0;
+ }
+ }
+ }
+
+ return ses_send_diag(sdev, 2, ses_dev->page2, ses_dev->page2_len);
+}
+
+static unsigned char *ses_get_page2_descriptor(struct enclosure_device *edev,
+ struct enclosure_component *ecomp)
+{
+ int i, j, count = 0, descriptor = ecomp->number;
+ struct scsi_device *sdev = to_scsi_device(edev->edev.parent);
+ struct ses_device *ses_dev = edev->scratch;
+ unsigned char *type_ptr = ses_dev->page1_types;
+ unsigned char *desc_ptr = ses_dev->page2 + 8;
+
+ ses_recv_diag(sdev, 2, ses_dev->page2, ses_dev->page2_len);
+
+ for (i = 0; i < ses_dev->page1_num_types; i++, type_ptr += 4) {
+ for (j = 0; j < type_ptr[1]; j++) {
+ desc_ptr += 4;
+ if (type_ptr[0] != ENCLOSURE_COMPONENT_DEVICE &&
+ type_ptr[0] != ENCLOSURE_COMPONENT_ARRAY_DEVICE)
+ continue;
+ if (count++ == descriptor)
+ return desc_ptr;
+ }
+ }
+ return NULL;
+}
+
+/* For device slot and array device slot elements, byte 3 bit 6
+ * is "fault sensed" while byte 3 bit 5 is "fault reqstd". As this
+ * code stands these bits are shifted 4 positions right so in
+ * sysfs they will appear as bits 2 and 1 respectively. Strange. */
+static void ses_get_fault(struct enclosure_device *edev,
+ struct enclosure_component *ecomp)
+{
+ unsigned char *desc;
+
+ desc = ses_get_page2_descriptor(edev, ecomp);
+ if (desc)
+ ecomp->fault = (desc[3] & 0x60) >> 4;
+}
+
+static int ses_set_fault(struct enclosure_device *edev,
+ struct enclosure_component *ecomp,
+ enum enclosure_component_setting val)
+{
+ unsigned char desc[4];
+ unsigned char *desc_ptr;
+
+ desc_ptr = ses_get_page2_descriptor(edev, ecomp);
+
+ if (!desc_ptr)
+ return -EIO;
+
+ init_device_slot_control(desc, ecomp, desc_ptr);
+
+ switch (val) {
+ case ENCLOSURE_SETTING_DISABLED:
+ desc[3] &= 0xdf;
+ break;
+ case ENCLOSURE_SETTING_ENABLED:
+ desc[3] |= 0x20;
+ break;
+ default:
+ /* SES doesn't do the SGPIO blink settings */
+ return -EINVAL;
+ }
+
+ return ses_set_page2_descriptor(edev, ecomp, desc);
+}
+
+static void ses_get_status(struct enclosure_device *edev,
+ struct enclosure_component *ecomp)
+{
+ unsigned char *desc;
+
+ desc = ses_get_page2_descriptor(edev, ecomp);
+ if (desc)
+ ecomp->status = (desc[0] & 0x0f);
+}
+
+static void ses_get_locate(struct enclosure_device *edev,
+ struct enclosure_component *ecomp)
+{
+ unsigned char *desc;
+
+ desc = ses_get_page2_descriptor(edev, ecomp);
+ if (desc)
+ ecomp->locate = (desc[2] & 0x02) ? 1 : 0;
+}
+
+static int ses_set_locate(struct enclosure_device *edev,
+ struct enclosure_component *ecomp,
+ enum enclosure_component_setting val)
+{
+ unsigned char desc[4];
+ unsigned char *desc_ptr;
+
+ desc_ptr = ses_get_page2_descriptor(edev, ecomp);
+
+ if (!desc_ptr)
+ return -EIO;
+
+ init_device_slot_control(desc, ecomp, desc_ptr);
+
+ switch (val) {
+ case ENCLOSURE_SETTING_DISABLED:
+ desc[2] &= 0xfd;
+ break;
+ case ENCLOSURE_SETTING_ENABLED:
+ desc[2] |= 0x02;
+ break;
+ default:
+ /* SES doesn't do the SGPIO blink settings */
+ return -EINVAL;
+ }
+ return ses_set_page2_descriptor(edev, ecomp, desc);
+}
+
+static int ses_set_active(struct enclosure_device *edev,
+ struct enclosure_component *ecomp,
+ enum enclosure_component_setting val)
+{
+ unsigned char desc[4];
+ unsigned char *desc_ptr;
+
+ desc_ptr = ses_get_page2_descriptor(edev, ecomp);
+
+ if (!desc_ptr)
+ return -EIO;
+
+ init_device_slot_control(desc, ecomp, desc_ptr);
+
+ switch (val) {
+ case ENCLOSURE_SETTING_DISABLED:
+ desc[2] &= 0x7f;
+ ecomp->active = 0;
+ break;
+ case ENCLOSURE_SETTING_ENABLED:
+ desc[2] |= 0x80;
+ ecomp->active = 1;
+ break;
+ default:
+ /* SES doesn't do the SGPIO blink settings */
+ return -EINVAL;
+ }
+ return ses_set_page2_descriptor(edev, ecomp, desc);
+}
+
+static int ses_show_id(struct enclosure_device *edev, char *buf)
+{
+ struct ses_device *ses_dev = edev->scratch;
+ unsigned long long id = get_unaligned_be64(ses_dev->page1+8+4);
+
+ return sprintf(buf, "%#llx\n", id);
+}
+
+static void ses_get_power_status(struct enclosure_device *edev,
+ struct enclosure_component *ecomp)
+{
+ unsigned char *desc;
+
+ desc = ses_get_page2_descriptor(edev, ecomp);
+ if (desc)
+ ecomp->power_status = (desc[3] & 0x10) ? 0 : 1;
+}
+
+static int ses_set_power_status(struct enclosure_device *edev,
+ struct enclosure_component *ecomp,
+ int val)
+{
+ unsigned char desc[4];
+ unsigned char *desc_ptr;
+
+ desc_ptr = ses_get_page2_descriptor(edev, ecomp);
+
+ if (!desc_ptr)
+ return -EIO;
+
+ init_device_slot_control(desc, ecomp, desc_ptr);
+
+ switch (val) {
+ /* power = 1 is device_off = 0 and vice versa */
+ case 0:
+ desc[3] |= 0x10;
+ break;
+ case 1:
+ desc[3] &= 0xef;
+ break;
+ default:
+ return -EINVAL;
+ }
+ ecomp->power_status = val;
+ return ses_set_page2_descriptor(edev, ecomp, desc);
+}
+
+static struct enclosure_component_callbacks ses_enclosure_callbacks = {
+ .get_fault = ses_get_fault,
+ .set_fault = ses_set_fault,
+ .get_status = ses_get_status,
+ .get_locate = ses_get_locate,
+ .set_locate = ses_set_locate,
+ .get_power_status = ses_get_power_status,
+ .set_power_status = ses_set_power_status,
+ .set_active = ses_set_active,
+ .show_id = ses_show_id,
+};
+
+struct ses_host_edev {
+ struct Scsi_Host *shost;
+ struct enclosure_device *edev;
+};
+
+#if 0
+int ses_match_host(struct enclosure_device *edev, void *data)
+{
+ struct ses_host_edev *sed = data;
+ struct scsi_device *sdev;
+
+ if (!scsi_is_sdev_device(edev->edev.parent))
+ return 0;
+
+ sdev = to_scsi_device(edev->edev.parent);
+
+ if (sdev->host != sed->shost)
+ return 0;
+
+ sed->edev = edev;
+ return 1;
+}
+#endif /* 0 */
+
+static void ses_process_descriptor(struct enclosure_component *ecomp,
+ unsigned char *desc)
+{
+ int eip = desc[0] & 0x10;
+ int invalid = desc[0] & 0x80;
+ enum scsi_protocol proto = desc[0] & 0x0f;
+ u64 addr = 0;
+ int slot = -1;
+ struct ses_component *scomp = ecomp->scratch;
+ unsigned char *d;
+
+ if (invalid)
+ return;
+
+ switch (proto) {
+ case SCSI_PROTOCOL_FCP:
+ if (eip) {
+ d = desc + 4;
+ slot = d[3];
+ }
+ break;
+ case SCSI_PROTOCOL_SAS:
+ if (eip) {
+ d = desc + 4;
+ slot = d[3];
+ d = desc + 8;
+ } else
+ d = desc + 4;
+ /* only take the phy0 addr */
+ addr = (u64)d[12] << 56 |
+ (u64)d[13] << 48 |
+ (u64)d[14] << 40 |
+ (u64)d[15] << 32 |
+ (u64)d[16] << 24 |
+ (u64)d[17] << 16 |
+ (u64)d[18] << 8 |
+ (u64)d[19];
+ break;
+ default:
+ /* FIXME: Need to add more protocols than just SAS */
+ break;
+ }
+ ecomp->slot = slot;
+ scomp->addr = addr;
+}
+
+struct efd {
+ u64 addr;
+ struct device *dev;
+};
+
+static int ses_enclosure_find_by_addr(struct enclosure_device *edev,
+ void *data)
+{
+ struct efd *efd = data;
+ int i;
+ struct ses_component *scomp;
+
+ if (!edev->component[0].scratch)
+ return 0;
+
+ for (i = 0; i < edev->components; i++) {
+ scomp = edev->component[i].scratch;
+ if (scomp->addr != efd->addr)
+ continue;
+
+ if (enclosure_add_device(edev, i, efd->dev) == 0)
+ kobject_uevent(&efd->dev->kobj, KOBJ_CHANGE);
+ return 1;
+ }
+ return 0;
+}
+
+#define INIT_ALLOC_SIZE 32
+
+static void ses_enclosure_data_process(struct enclosure_device *edev,
+ struct scsi_device *sdev,
+ int create)
+{
+ u32 result;
+ unsigned char *buf = NULL, *type_ptr, *desc_ptr, *addl_desc_ptr = NULL;
+ int i, j, page7_len, len, components;
+ struct ses_device *ses_dev = edev->scratch;
+ int types = ses_dev->page1_num_types;
+ unsigned char *hdr_buf = kzalloc(INIT_ALLOC_SIZE, GFP_KERNEL);
+
+ if (!hdr_buf)
+ goto simple_populate;
+
+ /* re-read page 10 */
+ if (ses_dev->page10)
+ ses_recv_diag(sdev, 10, ses_dev->page10, ses_dev->page10_len);
+ /* Page 7 for the descriptors is optional */
+ result = ses_recv_diag(sdev, 7, hdr_buf, INIT_ALLOC_SIZE);
+ if (result)
+ goto simple_populate;
+
+ page7_len = len = (hdr_buf[2] << 8) + hdr_buf[3] + 4;
+ /* add 1 for trailing '\0' we'll use */
+ buf = kzalloc(len + 1, GFP_KERNEL);
+ if (!buf)
+ goto simple_populate;
+ result = ses_recv_diag(sdev, 7, buf, len);
+ if (result) {
+ simple_populate:
+ kfree(buf);
+ buf = NULL;
+ desc_ptr = NULL;
+ len = 0;
+ page7_len = 0;
+ } else {
+ desc_ptr = buf + 8;
+ len = (desc_ptr[2] << 8) + desc_ptr[3];
+ /* skip past overall descriptor */
+ desc_ptr += len + 4;
+ }
+ if (ses_dev->page10)
+ addl_desc_ptr = ses_dev->page10 + 8;
+ type_ptr = ses_dev->page1_types;
+ components = 0;
+ for (i = 0; i < types; i++, type_ptr += 4) {
+ for (j = 0; j < type_ptr[1]; j++) {
+ char *name = NULL;
+ struct enclosure_component *ecomp;
+
+ if (desc_ptr) {
+ if (desc_ptr >= buf + page7_len) {
+ desc_ptr = NULL;
+ } else {
+ len = (desc_ptr[2] << 8) + desc_ptr[3];
+ desc_ptr += 4;
+ /* Add trailing zero - pushes into
+ * reserved space */
+ desc_ptr[len] = '\0';
+ name = desc_ptr;
+ }
+ }
+ if (type_ptr[0] == ENCLOSURE_COMPONENT_DEVICE ||
+ type_ptr[0] == ENCLOSURE_COMPONENT_ARRAY_DEVICE) {
+
+ if (create)
+ ecomp = enclosure_component_alloc(
+ edev,
+ components++,
+ type_ptr[0],
+ name);
+ else
+ ecomp = &edev->component[components++];
+
+ if (!IS_ERR(ecomp)) {
+ ses_get_power_status(edev, ecomp);
+ if (addl_desc_ptr)
+ ses_process_descriptor(
+ ecomp,
+ addl_desc_ptr);
+ if (create)
+ enclosure_component_register(
+ ecomp);
+ }
+ }
+ if (desc_ptr)
+ desc_ptr += len;
+
+ if (addl_desc_ptr)
+ addl_desc_ptr += addl_desc_ptr[1] + 2;
+
+ }
+ }
+ kfree(buf);
+ kfree(hdr_buf);
+}
+
+static void ses_match_to_enclosure(struct enclosure_device *edev,
+ struct scsi_device *sdev)
+{
+ unsigned char *desc;
+ struct efd efd = {
+ .addr = 0,
+ };
+
+ ses_enclosure_data_process(edev, to_scsi_device(edev->edev.parent), 0);
+
+ if (!sdev->vpd_pg83_len)
+ return;
+
+ desc = sdev->vpd_pg83 + 4;
+ while (desc < sdev->vpd_pg83 + sdev->vpd_pg83_len) {
+ enum scsi_protocol proto = desc[0] >> 4;
+ u8 code_set = desc[0] & 0x0f;
+ u8 piv = desc[1] & 0x80;
+ u8 assoc = (desc[1] & 0x30) >> 4;
+ u8 type = desc[1] & 0x0f;
+ u8 len = desc[3];
+
+ if (piv && code_set == 1 && assoc == 1
+ && proto == SCSI_PROTOCOL_SAS && type == 3 && len == 8)
+ efd.addr = get_unaligned_be64(&desc[4]);
+
+ desc += len + 4;
+ }
+ if (efd.addr) {
+ efd.dev = &sdev->sdev_gendev;
+
+ enclosure_for_each_device(ses_enclosure_find_by_addr, &efd);
+ }
+}
+
+static int ses_intf_add(struct device *cdev,
+ struct class_interface *intf)
+{
+ struct scsi_device *sdev = to_scsi_device(cdev->parent);
+ struct scsi_device *tmp_sdev;
+ unsigned char *buf = NULL, *hdr_buf, *type_ptr;
+ struct ses_device *ses_dev;
+ u32 result;
+ int i, types, len, components = 0;
+ int err = -ENOMEM;
+ int num_enclosures;
+ struct enclosure_device *edev;
+ struct ses_component *scomp = NULL;
+
+ if (!scsi_device_enclosure(sdev)) {
+ /* not an enclosure, but might be in one */
+ struct enclosure_device *prev = NULL;
+
+ while ((edev = enclosure_find(&sdev->host->shost_gendev, prev)) != NULL) {
+ ses_match_to_enclosure(edev, sdev);
+ prev = edev;
+ }
+ return -ENODEV;
+ }
+
+ /* TYPE_ENCLOSURE prints a message in probe */
+ if (sdev->type != TYPE_ENCLOSURE)
+ sdev_printk(KERN_NOTICE, sdev, "Embedded Enclosure Device\n");
+
+ ses_dev = kzalloc(sizeof(*ses_dev), GFP_KERNEL);
+ hdr_buf = kzalloc(INIT_ALLOC_SIZE, GFP_KERNEL);
+ if (!hdr_buf || !ses_dev)
+ goto err_init_free;
+
+ result = ses_recv_diag(sdev, 1, hdr_buf, INIT_ALLOC_SIZE);
+ if (result)
+ goto recv_failed;
+
+ len = (hdr_buf[2] << 8) + hdr_buf[3] + 4;
+ buf = kzalloc(len, GFP_KERNEL);
+ if (!buf)
+ goto err_free;
+
+ result = ses_recv_diag(sdev, 1, buf, len);
+ if (result)
+ goto recv_failed;
+
+ types = 0;
+
+ /* we always have one main enclosure and the rest are referred
+ * to as secondary subenclosures */
+ num_enclosures = buf[1] + 1;
+
+ /* begin at the enclosure descriptor */
+ type_ptr = buf + 8;
+ /* skip all the enclosure descriptors */
+ for (i = 0; i < num_enclosures && type_ptr < buf + len; i++) {
+ types += type_ptr[2];
+ type_ptr += type_ptr[3] + 4;
+ }
+
+ ses_dev->page1_types = type_ptr;
+ ses_dev->page1_num_types = types;
+
+ for (i = 0; i < types && type_ptr < buf + len; i++, type_ptr += 4) {
+ if (type_ptr[0] == ENCLOSURE_COMPONENT_DEVICE ||
+ type_ptr[0] == ENCLOSURE_COMPONENT_ARRAY_DEVICE)
+ components += type_ptr[1];
+ }
+ ses_dev->page1 = buf;
+ ses_dev->page1_len = len;
+ buf = NULL;
+
+ result = ses_recv_diag(sdev, 2, hdr_buf, INIT_ALLOC_SIZE);
+ if (result)
+ goto recv_failed;
+
+ len = (hdr_buf[2] << 8) + hdr_buf[3] + 4;
+ buf = kzalloc(len, GFP_KERNEL);
+ if (!buf)
+ goto err_free;
+
+ /* make sure getting page 2 actually works */
+ result = ses_recv_diag(sdev, 2, buf, len);
+ if (result)
+ goto recv_failed;
+ ses_dev->page2 = buf;
+ ses_dev->page2_len = len;
+ buf = NULL;
+
+ /* The additional information page --- allows us
+ * to match up the devices */
+ result = ses_recv_diag(sdev, 10, hdr_buf, INIT_ALLOC_SIZE);
+ if (!result) {
+
+ len = (hdr_buf[2] << 8) + hdr_buf[3] + 4;
+ buf = kzalloc(len, GFP_KERNEL);
+ if (!buf)
+ goto err_free;
+
+ result = ses_recv_diag(sdev, 10, buf, len);
+ if (result)
+ goto recv_failed;
+ ses_dev->page10 = buf;
+ ses_dev->page10_len = len;
+ buf = NULL;
+ }
+ scomp = kzalloc(sizeof(struct ses_component) * components, GFP_KERNEL);
+ if (!scomp)
+ goto err_free;
+
+ edev = enclosure_register(cdev->parent, dev_name(&sdev->sdev_gendev),
+ components, &ses_enclosure_callbacks);
+ if (IS_ERR(edev)) {
+ err = PTR_ERR(edev);
+ goto err_free;
+ }
+
+ kfree(hdr_buf);
+
+ edev->scratch = ses_dev;
+ for (i = 0; i < components; i++)
+ edev->component[i].scratch = scomp + i;
+
+ ses_enclosure_data_process(edev, sdev, 1);
+
+ /* see if there are any devices matching before
+ * we found the enclosure */
+ shost_for_each_device(tmp_sdev, sdev->host) {
+ if (tmp_sdev->lun != 0 || scsi_device_enclosure(tmp_sdev))
+ continue;
+ ses_match_to_enclosure(edev, tmp_sdev);
+ }
+
+ return 0;
+
+ recv_failed:
+ sdev_printk(KERN_ERR, sdev, "Failed to get diagnostic page 0x%x\n",
+ result);
+ err = -ENODEV;
+ err_free:
+ kfree(buf);
+ kfree(scomp);
+ kfree(ses_dev->page10);
+ kfree(ses_dev->page2);
+ kfree(ses_dev->page1);
+ err_init_free:
+ kfree(ses_dev);
+ kfree(hdr_buf);
+ sdev_printk(KERN_ERR, sdev, "Failed to bind enclosure %d\n", err);
+ return err;
+}
+
+static int ses_remove(struct device *dev)
+{
+ return 0;
+}
+
+static void ses_intf_remove_component(struct scsi_device *sdev)
+{
+ struct enclosure_device *edev, *prev = NULL;
+
+ while ((edev = enclosure_find(&sdev->host->shost_gendev, prev)) != NULL) {
+ prev = edev;
+ if (!enclosure_remove_device(edev, &sdev->sdev_gendev))
+ break;
+ }
+ if (edev)
+ put_device(&edev->edev);
+}
+
+static void ses_intf_remove_enclosure(struct scsi_device *sdev)
+{
+ struct enclosure_device *edev;
+ struct ses_device *ses_dev;
+
+ /* exact match to this enclosure */
+ edev = enclosure_find(&sdev->sdev_gendev, NULL);
+ if (!edev)
+ return;
+
+ ses_dev = edev->scratch;
+ edev->scratch = NULL;
+
+ kfree(ses_dev->page10);
+ kfree(ses_dev->page1);
+ kfree(ses_dev->page2);
+ kfree(ses_dev);
+
+ kfree(edev->component[0].scratch);
+
+ put_device(&edev->edev);
+ enclosure_unregister(edev);
+}
+
+static void ses_intf_remove(struct device *cdev,
+ struct class_interface *intf)
+{
+ struct scsi_device *sdev = to_scsi_device(cdev->parent);
+
+ if (!scsi_device_enclosure(sdev))
+ ses_intf_remove_component(sdev);
+ else
+ ses_intf_remove_enclosure(sdev);
+}
+
+static struct class_interface ses_interface = {
+ .add_dev = ses_intf_add,
+ .remove_dev = ses_intf_remove,
+};
+
+static struct scsi_driver ses_template = {
+ .gendrv = {
+ .name = "ses",
+ .owner = THIS_MODULE,
+ .probe = ses_probe,
+ .remove = ses_remove,
+ },
+};
+
+static int __init ses_init(void)
+{
+ int err;
+
+ err = scsi_register_interface(&ses_interface);
+ if (err)
+ return err;
+
+ err = scsi_register_driver(&ses_template.gendrv);
+ if (err)
+ goto out_unreg;
+
+ return 0;
+
+ out_unreg:
+ scsi_unregister_interface(&ses_interface);
+ return err;
+}
+
+static void __exit ses_exit(void)
+{
+ scsi_unregister_driver(&ses_template.gendrv);
+ scsi_unregister_interface(&ses_interface);
+}
+
+module_init(ses_init);
+module_exit(ses_exit);
+
+MODULE_ALIAS_SCSI_DEVICE(TYPE_ENCLOSURE);
+
+MODULE_AUTHOR("James Bottomley");
+MODULE_DESCRIPTION("SCSI Enclosure Services (ses) driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/scsi/sg.c b/drivers/scsi/sg.c
new file mode 100644
index 000000000..9d7b7db75
--- /dev/null
+++ b/drivers/scsi/sg.c
@@ -0,0 +1,2699 @@
+/*
+ * History:
+ * Started: Aug 9 by Lawrence Foard (entropy@world.std.com),
+ * to allow user process control of SCSI devices.
+ * Development Sponsored by Killy Corp. NY NY
+ *
+ * Original driver (sg.c):
+ * Copyright (C) 1992 Lawrence Foard
+ * Version 2 and 3 extensions to driver:
+ * Copyright (C) 1998 - 2014 Douglas Gilbert
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2, or (at your option)
+ * any later version.
+ *
+ */
+
+static int sg_version_num = 30536; /* 2 digits for each component */
+#define SG_VERSION_STR "3.5.36"
+
+/*
+ * D. P. Gilbert (dgilbert@interlog.com), notes:
+ * - scsi logging is available via SCSI_LOG_TIMEOUT macros. First
+ * the kernel/module needs to be built with CONFIG_SCSI_LOGGING
+ * (otherwise the macros compile to empty statements).
+ *
+ */
+#include <linux/module.h>
+
+#include <linux/fs.h>
+#include <linux/kernel.h>
+#include <linux/sched.h>
+#include <linux/string.h>
+#include <linux/mm.h>
+#include <linux/errno.h>
+#include <linux/mtio.h>
+#include <linux/ioctl.h>
+#include <linux/slab.h>
+#include <linux/fcntl.h>
+#include <linux/init.h>
+#include <linux/poll.h>
+#include <linux/moduleparam.h>
+#include <linux/cdev.h>
+#include <linux/idr.h>
+#include <linux/seq_file.h>
+#include <linux/blkdev.h>
+#include <linux/delay.h>
+#include <linux/blktrace_api.h>
+#include <linux/mutex.h>
+#include <linux/atomic.h>
+#include <linux/ratelimit.h>
+#include <linux/uio.h>
+
+#include "scsi.h"
+#include <scsi/scsi_dbg.h>
+#include <scsi/scsi_host.h>
+#include <scsi/scsi_driver.h>
+#include <scsi/scsi_ioctl.h>
+#include <scsi/sg.h>
+
+#include "scsi_logging.h"
+
+#ifdef CONFIG_SCSI_PROC_FS
+#include <linux/proc_fs.h>
+static char *sg_version_date = "20140603";
+
+static int sg_proc_init(void);
+static void sg_proc_cleanup(void);
+#endif
+
+#define SG_ALLOW_DIO_DEF 0
+
+#define SG_MAX_DEVS 32768
+
+/* SG_MAX_CDB_SIZE should be 260 (spc4r37 section 3.1.30) however the type
+ * of sg_io_hdr::cmd_len can only represent 255. All SCSI commands greater
+ * than 16 bytes are "variable length" whose length is a multiple of 4
+ */
+#define SG_MAX_CDB_SIZE 252
+
+/*
+ * Suppose you want to calculate the formula muldiv(x,m,d)=int(x * m / d)
+ * Then when using 32 bit integers x * m may overflow during the calculation.
+ * Replacing muldiv(x) by muldiv(x)=((x % d) * m) / d + int(x / d) * m
+ * calculates the same, but prevents the overflow when both m and d
+ * are "small" numbers (like HZ and USER_HZ).
+ * Of course an overflow is inavoidable if the result of muldiv doesn't fit
+ * in 32 bits.
+ */
+#define MULDIV(X,MUL,DIV) ((((X % DIV) * MUL) / DIV) + ((X / DIV) * MUL))
+
+#define SG_DEFAULT_TIMEOUT MULDIV(SG_DEFAULT_TIMEOUT_USER, HZ, USER_HZ)
+
+int sg_big_buff = SG_DEF_RESERVED_SIZE;
+/* N.B. This variable is readable and writeable via
+ /proc/scsi/sg/def_reserved_size . Each time sg_open() is called a buffer
+ of this size (or less if there is not enough memory) will be reserved
+ for use by this file descriptor. [Deprecated usage: this variable is also
+ readable via /proc/sys/kernel/sg-big-buff if the sg driver is built into
+ the kernel (i.e. it is not a module).] */
+static int def_reserved_size = -1; /* picks up init parameter */
+static int sg_allow_dio = SG_ALLOW_DIO_DEF;
+
+static int scatter_elem_sz = SG_SCATTER_SZ;
+static int scatter_elem_sz_prev = SG_SCATTER_SZ;
+
+#define SG_SECTOR_SZ 512
+
+static int sg_add_device(struct device *, struct class_interface *);
+static void sg_remove_device(struct device *, struct class_interface *);
+
+static DEFINE_IDR(sg_index_idr);
+static DEFINE_RWLOCK(sg_index_lock); /* Also used to lock
+ file descriptor list for device */
+
+static struct class_interface sg_interface = {
+ .add_dev = sg_add_device,
+ .remove_dev = sg_remove_device,
+};
+
+typedef struct sg_scatter_hold { /* holding area for scsi scatter gather info */
+ unsigned short k_use_sg; /* Count of kernel scatter-gather pieces */
+ unsigned sglist_len; /* size of malloc'd scatter-gather list ++ */
+ unsigned bufflen; /* Size of (aggregate) data buffer */
+ struct page **pages;
+ int page_order;
+ char dio_in_use; /* 0->indirect IO (or mmap), 1->dio */
+ unsigned char cmd_opcode; /* first byte of command */
+} Sg_scatter_hold;
+
+struct sg_device; /* forward declarations */
+struct sg_fd;
+
+typedef struct sg_request { /* SG_MAX_QUEUE requests outstanding per file */
+ struct sg_request *nextrp; /* NULL -> tail request (slist) */
+ struct sg_fd *parentfp; /* NULL -> not in use */
+ Sg_scatter_hold data; /* hold buffer, perhaps scatter list */
+ sg_io_hdr_t header; /* scsi command+info, see <scsi/sg.h> */
+ unsigned char sense_b[SCSI_SENSE_BUFFERSIZE];
+ char res_used; /* 1 -> using reserve buffer, 0 -> not ... */
+ char orphan; /* 1 -> drop on sight, 0 -> normal */
+ char sg_io_owned; /* 1 -> packet belongs to SG_IO */
+ /* done protected by rq_list_lock */
+ char done; /* 0->before bh, 1->before read, 2->read */
+ struct request *rq;
+ struct bio *bio;
+ struct execute_work ew;
+} Sg_request;
+
+typedef struct sg_fd { /* holds the state of a file descriptor */
+ struct list_head sfd_siblings; /* protected by device's sfd_lock */
+ struct sg_device *parentdp; /* owning device */
+ wait_queue_head_t read_wait; /* queue read until command done */
+ rwlock_t rq_list_lock; /* protect access to list in req_arr */
+ int timeout; /* defaults to SG_DEFAULT_TIMEOUT */
+ int timeout_user; /* defaults to SG_DEFAULT_TIMEOUT_USER */
+ Sg_scatter_hold reserve; /* buffer held for this file descriptor */
+ unsigned save_scat_len; /* original length of trunc. scat. element */
+ Sg_request *headrp; /* head of request slist, NULL->empty */
+ struct fasync_struct *async_qp; /* used by asynchronous notification */
+ Sg_request req_arr[SG_MAX_QUEUE]; /* used as singly-linked list */
+ char low_dma; /* as in parent but possibly overridden to 1 */
+ char force_packid; /* 1 -> pack_id input to read(), 0 -> ignored */
+ char cmd_q; /* 1 -> allow command queuing, 0 -> don't */
+ unsigned char next_cmd_len; /* 0: automatic, >0: use on next write() */
+ char keep_orphan; /* 0 -> drop orphan (def), 1 -> keep for read() */
+ char mmap_called; /* 0 -> mmap() never called on this fd */
+ struct kref f_ref;
+ struct execute_work ew;
+} Sg_fd;
+
+typedef struct sg_device { /* holds the state of each scsi generic device */
+ struct scsi_device *device;
+ wait_queue_head_t open_wait; /* queue open() when O_EXCL present */
+ struct mutex open_rel_lock; /* held when in open() or release() */
+ int sg_tablesize; /* adapter's max scatter-gather table size */
+ u32 index; /* device index number */
+ struct list_head sfds;
+ rwlock_t sfd_lock; /* protect access to sfd list */
+ atomic_t detaching; /* 0->device usable, 1->device detaching */
+ bool exclude; /* 1->open(O_EXCL) succeeded and is active */
+ int open_cnt; /* count of opens (perhaps < num(sfds) ) */
+ char sgdebug; /* 0->off, 1->sense, 9->dump dev, 10-> all devs */
+ struct gendisk *disk;
+ struct cdev * cdev; /* char_dev [sysfs: /sys/cdev/major/sg<n>] */
+ struct kref d_ref;
+} Sg_device;
+
+/* tasklet or soft irq callback */
+static void sg_rq_end_io(struct request *rq, int uptodate);
+static int sg_start_req(Sg_request *srp, unsigned char *cmd);
+static int sg_finish_rem_req(Sg_request * srp);
+static int sg_build_indirect(Sg_scatter_hold * schp, Sg_fd * sfp, int buff_size);
+static ssize_t sg_new_read(Sg_fd * sfp, char __user *buf, size_t count,
+ Sg_request * srp);
+static ssize_t sg_new_write(Sg_fd *sfp, struct file *file,
+ const char __user *buf, size_t count, int blocking,
+ int read_only, int sg_io_owned, Sg_request **o_srp);
+static int sg_common_write(Sg_fd * sfp, Sg_request * srp,
+ unsigned char *cmnd, int timeout, int blocking);
+static int sg_read_oxfer(Sg_request * srp, char __user *outp, int num_read_xfer);
+static void sg_remove_scat(Sg_fd * sfp, Sg_scatter_hold * schp);
+static void sg_build_reserve(Sg_fd * sfp, int req_size);
+static void sg_link_reserve(Sg_fd * sfp, Sg_request * srp, int size);
+static void sg_unlink_reserve(Sg_fd * sfp, Sg_request * srp);
+static Sg_fd *sg_add_sfp(Sg_device * sdp);
+static void sg_remove_sfp(struct kref *);
+static Sg_request *sg_get_rq_mark(Sg_fd * sfp, int pack_id);
+static Sg_request *sg_add_request(Sg_fd * sfp);
+static int sg_remove_request(Sg_fd * sfp, Sg_request * srp);
+static int sg_res_in_use(Sg_fd * sfp);
+static Sg_device *sg_get_dev(int dev);
+static void sg_device_destroy(struct kref *kref);
+
+#define SZ_SG_HEADER sizeof(struct sg_header)
+#define SZ_SG_IO_HDR sizeof(sg_io_hdr_t)
+#define SZ_SG_IOVEC sizeof(sg_iovec_t)
+#define SZ_SG_REQ_INFO sizeof(sg_req_info_t)
+
+#define sg_printk(prefix, sdp, fmt, a...) \
+ sdev_prefix_printk(prefix, (sdp)->device, \
+ (sdp)->disk->disk_name, fmt, ##a)
+
+static int sg_allow_access(struct file *filp, unsigned char *cmd)
+{
+ struct sg_fd *sfp = filp->private_data;
+
+ if (sfp->parentdp->device->type == TYPE_SCANNER)
+ return 0;
+
+ return blk_verify_command(cmd, filp->f_mode & FMODE_WRITE);
+}
+
+static int
+open_wait(Sg_device *sdp, int flags)
+{
+ int retval = 0;
+
+ if (flags & O_EXCL) {
+ while (sdp->open_cnt > 0) {
+ mutex_unlock(&sdp->open_rel_lock);
+ retval = wait_event_interruptible(sdp->open_wait,
+ (atomic_read(&sdp->detaching) ||
+ !sdp->open_cnt));
+ mutex_lock(&sdp->open_rel_lock);
+
+ if (retval) /* -ERESTARTSYS */
+ return retval;
+ if (atomic_read(&sdp->detaching))
+ return -ENODEV;
+ }
+ } else {
+ while (sdp->exclude) {
+ mutex_unlock(&sdp->open_rel_lock);
+ retval = wait_event_interruptible(sdp->open_wait,
+ (atomic_read(&sdp->detaching) ||
+ !sdp->exclude));
+ mutex_lock(&sdp->open_rel_lock);
+
+ if (retval) /* -ERESTARTSYS */
+ return retval;
+ if (atomic_read(&sdp->detaching))
+ return -ENODEV;
+ }
+ }
+
+ return retval;
+}
+
+/* Returns 0 on success, else a negated errno value */
+static int
+sg_open(struct inode *inode, struct file *filp)
+{
+ int dev = iminor(inode);
+ int flags = filp->f_flags;
+ struct request_queue *q;
+ Sg_device *sdp;
+ Sg_fd *sfp;
+ int retval;
+
+ nonseekable_open(inode, filp);
+ if ((flags & O_EXCL) && (O_RDONLY == (flags & O_ACCMODE)))
+ return -EPERM; /* Can't lock it with read only access */
+ sdp = sg_get_dev(dev);
+ if (IS_ERR(sdp))
+ return PTR_ERR(sdp);
+
+ SCSI_LOG_TIMEOUT(3, sg_printk(KERN_INFO, sdp,
+ "sg_open: flags=0x%x\n", flags));
+
+ /* This driver's module count bumped by fops_get in <linux/fs.h> */
+ /* Prevent the device driver from vanishing while we sleep */
+ retval = scsi_device_get(sdp->device);
+ if (retval)
+ goto sg_put;
+
+ retval = scsi_autopm_get_device(sdp->device);
+ if (retval)
+ goto sdp_put;
+
+ /* scsi_block_when_processing_errors() may block so bypass
+ * check if O_NONBLOCK. Permits SCSI commands to be issued
+ * during error recovery. Tread carefully. */
+ if (!((flags & O_NONBLOCK) ||
+ scsi_block_when_processing_errors(sdp->device))) {
+ retval = -ENXIO;
+ /* we are in error recovery for this device */
+ goto error_out;
+ }
+
+ mutex_lock(&sdp->open_rel_lock);
+ if (flags & O_NONBLOCK) {
+ if (flags & O_EXCL) {
+ if (sdp->open_cnt > 0) {
+ retval = -EBUSY;
+ goto error_mutex_locked;
+ }
+ } else {
+ if (sdp->exclude) {
+ retval = -EBUSY;
+ goto error_mutex_locked;
+ }
+ }
+ } else {
+ retval = open_wait(sdp, flags);
+ if (retval) /* -ERESTARTSYS or -ENODEV */
+ goto error_mutex_locked;
+ }
+
+ /* N.B. at this point we are holding the open_rel_lock */
+ if (flags & O_EXCL)
+ sdp->exclude = true;
+
+ if (sdp->open_cnt < 1) { /* no existing opens */
+ sdp->sgdebug = 0;
+ q = sdp->device->request_queue;
+ sdp->sg_tablesize = queue_max_segments(q);
+ }
+ sfp = sg_add_sfp(sdp);
+ if (IS_ERR(sfp)) {
+ retval = PTR_ERR(sfp);
+ goto out_undo;
+ }
+
+ filp->private_data = sfp;
+ sdp->open_cnt++;
+ mutex_unlock(&sdp->open_rel_lock);
+
+ retval = 0;
+sg_put:
+ kref_put(&sdp->d_ref, sg_device_destroy);
+ return retval;
+
+out_undo:
+ if (flags & O_EXCL) {
+ sdp->exclude = false; /* undo if error */
+ wake_up_interruptible(&sdp->open_wait);
+ }
+error_mutex_locked:
+ mutex_unlock(&sdp->open_rel_lock);
+error_out:
+ scsi_autopm_put_device(sdp->device);
+sdp_put:
+ scsi_device_put(sdp->device);
+ goto sg_put;
+}
+
+/* Release resources associated with a successful sg_open()
+ * Returns 0 on success, else a negated errno value */
+static int
+sg_release(struct inode *inode, struct file *filp)
+{
+ Sg_device *sdp;
+ Sg_fd *sfp;
+
+ if ((!(sfp = (Sg_fd *) filp->private_data)) || (!(sdp = sfp->parentdp)))
+ return -ENXIO;
+ SCSI_LOG_TIMEOUT(3, sg_printk(KERN_INFO, sdp, "sg_release\n"));
+
+ mutex_lock(&sdp->open_rel_lock);
+ scsi_autopm_put_device(sdp->device);
+ kref_put(&sfp->f_ref, sg_remove_sfp);
+ sdp->open_cnt--;
+
+ /* possibly many open()s waiting on exlude clearing, start many;
+ * only open(O_EXCL)s wait on 0==open_cnt so only start one */
+ if (sdp->exclude) {
+ sdp->exclude = false;
+ wake_up_interruptible_all(&sdp->open_wait);
+ } else if (0 == sdp->open_cnt) {
+ wake_up_interruptible(&sdp->open_wait);
+ }
+ mutex_unlock(&sdp->open_rel_lock);
+ return 0;
+}
+
+static ssize_t
+sg_read(struct file *filp, char __user *buf, size_t count, loff_t * ppos)
+{
+ Sg_device *sdp;
+ Sg_fd *sfp;
+ Sg_request *srp;
+ int req_pack_id = -1;
+ sg_io_hdr_t *hp;
+ struct sg_header *old_hdr = NULL;
+ int retval = 0;
+
+ if ((!(sfp = (Sg_fd *) filp->private_data)) || (!(sdp = sfp->parentdp)))
+ return -ENXIO;
+ SCSI_LOG_TIMEOUT(3, sg_printk(KERN_INFO, sdp,
+ "sg_read: count=%d\n", (int) count));
+
+ if (!access_ok(VERIFY_WRITE, buf, count))
+ return -EFAULT;
+ if (sfp->force_packid && (count >= SZ_SG_HEADER)) {
+ old_hdr = kmalloc(SZ_SG_HEADER, GFP_KERNEL);
+ if (!old_hdr)
+ return -ENOMEM;
+ if (__copy_from_user(old_hdr, buf, SZ_SG_HEADER)) {
+ retval = -EFAULT;
+ goto free_old_hdr;
+ }
+ if (old_hdr->reply_len < 0) {
+ if (count >= SZ_SG_IO_HDR) {
+ sg_io_hdr_t *new_hdr;
+ new_hdr = kmalloc(SZ_SG_IO_HDR, GFP_KERNEL);
+ if (!new_hdr) {
+ retval = -ENOMEM;
+ goto free_old_hdr;
+ }
+ retval =__copy_from_user
+ (new_hdr, buf, SZ_SG_IO_HDR);
+ req_pack_id = new_hdr->pack_id;
+ kfree(new_hdr);
+ if (retval) {
+ retval = -EFAULT;
+ goto free_old_hdr;
+ }
+ }
+ } else
+ req_pack_id = old_hdr->pack_id;
+ }
+ srp = sg_get_rq_mark(sfp, req_pack_id);
+ if (!srp) { /* now wait on packet to arrive */
+ if (atomic_read(&sdp->detaching)) {
+ retval = -ENODEV;
+ goto free_old_hdr;
+ }
+ if (filp->f_flags & O_NONBLOCK) {
+ retval = -EAGAIN;
+ goto free_old_hdr;
+ }
+ retval = wait_event_interruptible(sfp->read_wait,
+ (atomic_read(&sdp->detaching) ||
+ (srp = sg_get_rq_mark(sfp, req_pack_id))));
+ if (atomic_read(&sdp->detaching)) {
+ retval = -ENODEV;
+ goto free_old_hdr;
+ }
+ if (retval) {
+ /* -ERESTARTSYS as signal hit process */
+ goto free_old_hdr;
+ }
+ }
+ if (srp->header.interface_id != '\0') {
+ retval = sg_new_read(sfp, buf, count, srp);
+ goto free_old_hdr;
+ }
+
+ hp = &srp->header;
+ if (old_hdr == NULL) {
+ old_hdr = kmalloc(SZ_SG_HEADER, GFP_KERNEL);
+ if (! old_hdr) {
+ retval = -ENOMEM;
+ goto free_old_hdr;
+ }
+ }
+ memset(old_hdr, 0, SZ_SG_HEADER);
+ old_hdr->reply_len = (int) hp->timeout;
+ old_hdr->pack_len = old_hdr->reply_len; /* old, strange behaviour */
+ old_hdr->pack_id = hp->pack_id;
+ old_hdr->twelve_byte =
+ ((srp->data.cmd_opcode >= 0xc0) && (12 == hp->cmd_len)) ? 1 : 0;
+ old_hdr->target_status = hp->masked_status;
+ old_hdr->host_status = hp->host_status;
+ old_hdr->driver_status = hp->driver_status;
+ if ((CHECK_CONDITION & hp->masked_status) ||
+ (DRIVER_SENSE & hp->driver_status))
+ memcpy(old_hdr->sense_buffer, srp->sense_b,
+ sizeof (old_hdr->sense_buffer));
+ switch (hp->host_status) {
+ /* This setup of 'result' is for backward compatibility and is best
+ ignored by the user who should use target, host + driver status */
+ case DID_OK:
+ case DID_PASSTHROUGH:
+ case DID_SOFT_ERROR:
+ old_hdr->result = 0;
+ break;
+ case DID_NO_CONNECT:
+ case DID_BUS_BUSY:
+ case DID_TIME_OUT:
+ old_hdr->result = EBUSY;
+ break;
+ case DID_BAD_TARGET:
+ case DID_ABORT:
+ case DID_PARITY:
+ case DID_RESET:
+ case DID_BAD_INTR:
+ old_hdr->result = EIO;
+ break;
+ case DID_ERROR:
+ old_hdr->result = (srp->sense_b[0] == 0 &&
+ hp->masked_status == GOOD) ? 0 : EIO;
+ break;
+ default:
+ old_hdr->result = EIO;
+ break;
+ }
+
+ /* Now copy the result back to the user buffer. */
+ if (count >= SZ_SG_HEADER) {
+ if (__copy_to_user(buf, old_hdr, SZ_SG_HEADER)) {
+ retval = -EFAULT;
+ goto free_old_hdr;
+ }
+ buf += SZ_SG_HEADER;
+ if (count > old_hdr->reply_len)
+ count = old_hdr->reply_len;
+ if (count > SZ_SG_HEADER) {
+ if (sg_read_oxfer(srp, buf, count - SZ_SG_HEADER)) {
+ retval = -EFAULT;
+ goto free_old_hdr;
+ }
+ }
+ } else
+ count = (old_hdr->result == 0) ? 0 : -EIO;
+ sg_finish_rem_req(srp);
+ retval = count;
+free_old_hdr:
+ kfree(old_hdr);
+ return retval;
+}
+
+static ssize_t
+sg_new_read(Sg_fd * sfp, char __user *buf, size_t count, Sg_request * srp)
+{
+ sg_io_hdr_t *hp = &srp->header;
+ int err = 0, err2;
+ int len;
+
+ if (count < SZ_SG_IO_HDR) {
+ err = -EINVAL;
+ goto err_out;
+ }
+ hp->sb_len_wr = 0;
+ if ((hp->mx_sb_len > 0) && hp->sbp) {
+ if ((CHECK_CONDITION & hp->masked_status) ||
+ (DRIVER_SENSE & hp->driver_status)) {
+ int sb_len = SCSI_SENSE_BUFFERSIZE;
+ sb_len = (hp->mx_sb_len > sb_len) ? sb_len : hp->mx_sb_len;
+ len = 8 + (int) srp->sense_b[7]; /* Additional sense length field */
+ len = (len > sb_len) ? sb_len : len;
+ if (copy_to_user(hp->sbp, srp->sense_b, len)) {
+ err = -EFAULT;
+ goto err_out;
+ }
+ hp->sb_len_wr = len;
+ }
+ }
+ if (hp->masked_status || hp->host_status || hp->driver_status)
+ hp->info |= SG_INFO_CHECK;
+ if (copy_to_user(buf, hp, SZ_SG_IO_HDR)) {
+ err = -EFAULT;
+ goto err_out;
+ }
+err_out:
+ err2 = sg_finish_rem_req(srp);
+ return err ? : err2 ? : count;
+}
+
+static ssize_t
+sg_write(struct file *filp, const char __user *buf, size_t count, loff_t * ppos)
+{
+ int mxsize, cmd_size, k;
+ int input_size, blocking;
+ unsigned char opcode;
+ Sg_device *sdp;
+ Sg_fd *sfp;
+ Sg_request *srp;
+ struct sg_header old_hdr;
+ sg_io_hdr_t *hp;
+ unsigned char cmnd[SG_MAX_CDB_SIZE];
+
+ if ((!(sfp = (Sg_fd *) filp->private_data)) || (!(sdp = sfp->parentdp)))
+ return -ENXIO;
+ SCSI_LOG_TIMEOUT(3, sg_printk(KERN_INFO, sdp,
+ "sg_write: count=%d\n", (int) count));
+ if (atomic_read(&sdp->detaching))
+ return -ENODEV;
+ if (!((filp->f_flags & O_NONBLOCK) ||
+ scsi_block_when_processing_errors(sdp->device)))
+ return -ENXIO;
+
+ if (!access_ok(VERIFY_READ, buf, count))
+ return -EFAULT; /* protects following copy_from_user()s + get_user()s */
+ if (count < SZ_SG_HEADER)
+ return -EIO;
+ if (__copy_from_user(&old_hdr, buf, SZ_SG_HEADER))
+ return -EFAULT;
+ blocking = !(filp->f_flags & O_NONBLOCK);
+ if (old_hdr.reply_len < 0)
+ return sg_new_write(sfp, filp, buf, count,
+ blocking, 0, 0, NULL);
+ if (count < (SZ_SG_HEADER + 6))
+ return -EIO; /* The minimum scsi command length is 6 bytes. */
+
+ if (!(srp = sg_add_request(sfp))) {
+ SCSI_LOG_TIMEOUT(1, sg_printk(KERN_INFO, sdp,
+ "sg_write: queue full\n"));
+ return -EDOM;
+ }
+ buf += SZ_SG_HEADER;
+ __get_user(opcode, buf);
+ if (sfp->next_cmd_len > 0) {
+ cmd_size = sfp->next_cmd_len;
+ sfp->next_cmd_len = 0; /* reset so only this write() effected */
+ } else {
+ cmd_size = COMMAND_SIZE(opcode); /* based on SCSI command group */
+ if ((opcode >= 0xc0) && old_hdr.twelve_byte)
+ cmd_size = 12;
+ }
+ SCSI_LOG_TIMEOUT(4, sg_printk(KERN_INFO, sdp,
+ "sg_write: scsi opcode=0x%02x, cmd_size=%d\n", (int) opcode, cmd_size));
+/* Determine buffer size. */
+ input_size = count - cmd_size;
+ mxsize = (input_size > old_hdr.reply_len) ? input_size : old_hdr.reply_len;
+ mxsize -= SZ_SG_HEADER;
+ input_size -= SZ_SG_HEADER;
+ if (input_size < 0) {
+ sg_remove_request(sfp, srp);
+ return -EIO; /* User did not pass enough bytes for this command. */
+ }
+ hp = &srp->header;
+ hp->interface_id = '\0'; /* indicator of old interface tunnelled */
+ hp->cmd_len = (unsigned char) cmd_size;
+ hp->iovec_count = 0;
+ hp->mx_sb_len = 0;
+ if (input_size > 0)
+ hp->dxfer_direction = (old_hdr.reply_len > SZ_SG_HEADER) ?
+ SG_DXFER_TO_FROM_DEV : SG_DXFER_TO_DEV;
+ else
+ hp->dxfer_direction = (mxsize > 0) ? SG_DXFER_FROM_DEV : SG_DXFER_NONE;
+ hp->dxfer_len = mxsize;
+ if (hp->dxfer_direction == SG_DXFER_TO_DEV)
+ hp->dxferp = (char __user *)buf + cmd_size;
+ else
+ hp->dxferp = NULL;
+ hp->sbp = NULL;
+ hp->timeout = old_hdr.reply_len; /* structure abuse ... */
+ hp->flags = input_size; /* structure abuse ... */
+ hp->pack_id = old_hdr.pack_id;
+ hp->usr_ptr = NULL;
+ if (__copy_from_user(cmnd, buf, cmd_size))
+ return -EFAULT;
+ /*
+ * SG_DXFER_TO_FROM_DEV is functionally equivalent to SG_DXFER_FROM_DEV,
+ * but is is possible that the app intended SG_DXFER_TO_DEV, because there
+ * is a non-zero input_size, so emit a warning.
+ */
+ if (hp->dxfer_direction == SG_DXFER_TO_FROM_DEV) {
+ static char cmd[TASK_COMM_LEN];
+ if (strcmp(current->comm, cmd)) {
+ printk_ratelimited(KERN_WARNING
+ "sg_write: data in/out %d/%d bytes "
+ "for SCSI command 0x%x-- guessing "
+ "data in;\n program %s not setting "
+ "count and/or reply_len properly\n",
+ old_hdr.reply_len - (int)SZ_SG_HEADER,
+ input_size, (unsigned int) cmnd[0],
+ current->comm);
+ strcpy(cmd, current->comm);
+ }
+ }
+ k = sg_common_write(sfp, srp, cmnd, sfp->timeout, blocking);
+ return (k < 0) ? k : count;
+}
+
+static ssize_t
+sg_new_write(Sg_fd *sfp, struct file *file, const char __user *buf,
+ size_t count, int blocking, int read_only, int sg_io_owned,
+ Sg_request **o_srp)
+{
+ int k;
+ Sg_request *srp;
+ sg_io_hdr_t *hp;
+ unsigned char cmnd[SG_MAX_CDB_SIZE];
+ int timeout;
+ unsigned long ul_timeout;
+
+ if (count < SZ_SG_IO_HDR)
+ return -EINVAL;
+ if (!access_ok(VERIFY_READ, buf, count))
+ return -EFAULT; /* protects following copy_from_user()s + get_user()s */
+
+ sfp->cmd_q = 1; /* when sg_io_hdr seen, set command queuing on */
+ if (!(srp = sg_add_request(sfp))) {
+ SCSI_LOG_TIMEOUT(1, sg_printk(KERN_INFO, sfp->parentdp,
+ "sg_new_write: queue full\n"));
+ return -EDOM;
+ }
+ srp->sg_io_owned = sg_io_owned;
+ hp = &srp->header;
+ if (__copy_from_user(hp, buf, SZ_SG_IO_HDR)) {
+ sg_remove_request(sfp, srp);
+ return -EFAULT;
+ }
+ if (hp->interface_id != 'S') {
+ sg_remove_request(sfp, srp);
+ return -ENOSYS;
+ }
+ if (hp->flags & SG_FLAG_MMAP_IO) {
+ if (hp->dxfer_len > sfp->reserve.bufflen) {
+ sg_remove_request(sfp, srp);
+ return -ENOMEM; /* MMAP_IO size must fit in reserve buffer */
+ }
+ if (hp->flags & SG_FLAG_DIRECT_IO) {
+ sg_remove_request(sfp, srp);
+ return -EINVAL; /* either MMAP_IO or DIRECT_IO (not both) */
+ }
+ if (sg_res_in_use(sfp)) {
+ sg_remove_request(sfp, srp);
+ return -EBUSY; /* reserve buffer already being used */
+ }
+ }
+ ul_timeout = msecs_to_jiffies(srp->header.timeout);
+ timeout = (ul_timeout < INT_MAX) ? ul_timeout : INT_MAX;
+ if ((!hp->cmdp) || (hp->cmd_len < 6) || (hp->cmd_len > sizeof (cmnd))) {
+ sg_remove_request(sfp, srp);
+ return -EMSGSIZE;
+ }
+ if (!access_ok(VERIFY_READ, hp->cmdp, hp->cmd_len)) {
+ sg_remove_request(sfp, srp);
+ return -EFAULT; /* protects following copy_from_user()s + get_user()s */
+ }
+ if (__copy_from_user(cmnd, hp->cmdp, hp->cmd_len)) {
+ sg_remove_request(sfp, srp);
+ return -EFAULT;
+ }
+ if (read_only && sg_allow_access(file, cmnd)) {
+ sg_remove_request(sfp, srp);
+ return -EPERM;
+ }
+ k = sg_common_write(sfp, srp, cmnd, timeout, blocking);
+ if (k < 0)
+ return k;
+ if (o_srp)
+ *o_srp = srp;
+ return count;
+}
+
+static int
+sg_common_write(Sg_fd * sfp, Sg_request * srp,
+ unsigned char *cmnd, int timeout, int blocking)
+{
+ int k, at_head;
+ Sg_device *sdp = sfp->parentdp;
+ sg_io_hdr_t *hp = &srp->header;
+
+ srp->data.cmd_opcode = cmnd[0]; /* hold opcode of command */
+ hp->status = 0;
+ hp->masked_status = 0;
+ hp->msg_status = 0;
+ hp->info = 0;
+ hp->host_status = 0;
+ hp->driver_status = 0;
+ hp->resid = 0;
+ SCSI_LOG_TIMEOUT(4, sg_printk(KERN_INFO, sfp->parentdp,
+ "sg_common_write: scsi opcode=0x%02x, cmd_size=%d\n",
+ (int) cmnd[0], (int) hp->cmd_len));
+
+ k = sg_start_req(srp, cmnd);
+ if (k) {
+ SCSI_LOG_TIMEOUT(1, sg_printk(KERN_INFO, sfp->parentdp,
+ "sg_common_write: start_req err=%d\n", k));
+ sg_finish_rem_req(srp);
+ return k; /* probably out of space --> ENOMEM */
+ }
+ if (atomic_read(&sdp->detaching)) {
+ if (srp->bio)
+ blk_end_request_all(srp->rq, -EIO);
+ sg_finish_rem_req(srp);
+ return -ENODEV;
+ }
+
+ hp->duration = jiffies_to_msecs(jiffies);
+ if (hp->interface_id != '\0' && /* v3 (or later) interface */
+ (SG_FLAG_Q_AT_TAIL & hp->flags))
+ at_head = 0;
+ else
+ at_head = 1;
+
+ srp->rq->timeout = timeout;
+ kref_get(&sfp->f_ref); /* sg_rq_end_io() does kref_put(). */
+ blk_execute_rq_nowait(sdp->device->request_queue, sdp->disk,
+ srp->rq, at_head, sg_rq_end_io);
+ return 0;
+}
+
+static int srp_done(Sg_fd *sfp, Sg_request *srp)
+{
+ unsigned long flags;
+ int ret;
+
+ read_lock_irqsave(&sfp->rq_list_lock, flags);
+ ret = srp->done;
+ read_unlock_irqrestore(&sfp->rq_list_lock, flags);
+ return ret;
+}
+
+static int max_sectors_bytes(struct request_queue *q)
+{
+ unsigned int max_sectors = queue_max_sectors(q);
+
+ max_sectors = min_t(unsigned int, max_sectors, INT_MAX >> 9);
+
+ return max_sectors << 9;
+}
+
+static long
+sg_ioctl(struct file *filp, unsigned int cmd_in, unsigned long arg)
+{
+ void __user *p = (void __user *)arg;
+ int __user *ip = p;
+ int result, val, read_only;
+ Sg_device *sdp;
+ Sg_fd *sfp;
+ Sg_request *srp;
+ unsigned long iflags;
+
+ if ((!(sfp = (Sg_fd *) filp->private_data)) || (!(sdp = sfp->parentdp)))
+ return -ENXIO;
+
+ SCSI_LOG_TIMEOUT(3, sg_printk(KERN_INFO, sdp,
+ "sg_ioctl: cmd=0x%x\n", (int) cmd_in));
+ read_only = (O_RDWR != (filp->f_flags & O_ACCMODE));
+
+ switch (cmd_in) {
+ case SG_IO:
+ if (atomic_read(&sdp->detaching))
+ return -ENODEV;
+ if (!scsi_block_when_processing_errors(sdp->device))
+ return -ENXIO;
+ if (!access_ok(VERIFY_WRITE, p, SZ_SG_IO_HDR))
+ return -EFAULT;
+ result = sg_new_write(sfp, filp, p, SZ_SG_IO_HDR,
+ 1, read_only, 1, &srp);
+ if (result < 0)
+ return result;
+ result = wait_event_interruptible(sfp->read_wait,
+ (srp_done(sfp, srp) || atomic_read(&sdp->detaching)));
+ if (atomic_read(&sdp->detaching))
+ return -ENODEV;
+ write_lock_irq(&sfp->rq_list_lock);
+ if (srp->done) {
+ srp->done = 2;
+ write_unlock_irq(&sfp->rq_list_lock);
+ result = sg_new_read(sfp, p, SZ_SG_IO_HDR, srp);
+ return (result < 0) ? result : 0;
+ }
+ srp->orphan = 1;
+ write_unlock_irq(&sfp->rq_list_lock);
+ return result; /* -ERESTARTSYS because signal hit process */
+ case SG_SET_TIMEOUT:
+ result = get_user(val, ip);
+ if (result)
+ return result;
+ if (val < 0)
+ return -EIO;
+ if (val >= MULDIV (INT_MAX, USER_HZ, HZ))
+ val = MULDIV (INT_MAX, USER_HZ, HZ);
+ sfp->timeout_user = val;
+ sfp->timeout = MULDIV (val, HZ, USER_HZ);
+
+ return 0;
+ case SG_GET_TIMEOUT: /* N.B. User receives timeout as return value */
+ /* strange ..., for backward compatibility */
+ return sfp->timeout_user;
+ case SG_SET_FORCE_LOW_DMA:
+ result = get_user(val, ip);
+ if (result)
+ return result;
+ if (val) {
+ sfp->low_dma = 1;
+ if ((0 == sfp->low_dma) && (0 == sg_res_in_use(sfp))) {
+ val = (int) sfp->reserve.bufflen;
+ sg_remove_scat(sfp, &sfp->reserve);
+ sg_build_reserve(sfp, val);
+ }
+ } else {
+ if (atomic_read(&sdp->detaching))
+ return -ENODEV;
+ sfp->low_dma = sdp->device->host->unchecked_isa_dma;
+ }
+ return 0;
+ case SG_GET_LOW_DMA:
+ return put_user((int) sfp->low_dma, ip);
+ case SG_GET_SCSI_ID:
+ if (!access_ok(VERIFY_WRITE, p, sizeof (sg_scsi_id_t)))
+ return -EFAULT;
+ else {
+ sg_scsi_id_t __user *sg_idp = p;
+
+ if (atomic_read(&sdp->detaching))
+ return -ENODEV;
+ __put_user((int) sdp->device->host->host_no,
+ &sg_idp->host_no);
+ __put_user((int) sdp->device->channel,
+ &sg_idp->channel);
+ __put_user((int) sdp->device->id, &sg_idp->scsi_id);
+ __put_user((int) sdp->device->lun, &sg_idp->lun);
+ __put_user((int) sdp->device->type, &sg_idp->scsi_type);
+ __put_user((short) sdp->device->host->cmd_per_lun,
+ &sg_idp->h_cmd_per_lun);
+ __put_user((short) sdp->device->queue_depth,
+ &sg_idp->d_queue_depth);
+ __put_user(0, &sg_idp->unused[0]);
+ __put_user(0, &sg_idp->unused[1]);
+ return 0;
+ }
+ case SG_SET_FORCE_PACK_ID:
+ result = get_user(val, ip);
+ if (result)
+ return result;
+ sfp->force_packid = val ? 1 : 0;
+ return 0;
+ case SG_GET_PACK_ID:
+ if (!access_ok(VERIFY_WRITE, ip, sizeof (int)))
+ return -EFAULT;
+ read_lock_irqsave(&sfp->rq_list_lock, iflags);
+ for (srp = sfp->headrp; srp; srp = srp->nextrp) {
+ if ((1 == srp->done) && (!srp->sg_io_owned)) {
+ read_unlock_irqrestore(&sfp->rq_list_lock,
+ iflags);
+ __put_user(srp->header.pack_id, ip);
+ return 0;
+ }
+ }
+ read_unlock_irqrestore(&sfp->rq_list_lock, iflags);
+ __put_user(-1, ip);
+ return 0;
+ case SG_GET_NUM_WAITING:
+ read_lock_irqsave(&sfp->rq_list_lock, iflags);
+ for (val = 0, srp = sfp->headrp; srp; srp = srp->nextrp) {
+ if ((1 == srp->done) && (!srp->sg_io_owned))
+ ++val;
+ }
+ read_unlock_irqrestore(&sfp->rq_list_lock, iflags);
+ return put_user(val, ip);
+ case SG_GET_SG_TABLESIZE:
+ return put_user(sdp->sg_tablesize, ip);
+ case SG_SET_RESERVED_SIZE:
+ result = get_user(val, ip);
+ if (result)
+ return result;
+ if (val < 0)
+ return -EINVAL;
+ val = min_t(int, val,
+ max_sectors_bytes(sdp->device->request_queue));
+ if (val != sfp->reserve.bufflen) {
+ if (sg_res_in_use(sfp) || sfp->mmap_called)
+ return -EBUSY;
+ sg_remove_scat(sfp, &sfp->reserve);
+ sg_build_reserve(sfp, val);
+ }
+ return 0;
+ case SG_GET_RESERVED_SIZE:
+ val = min_t(int, sfp->reserve.bufflen,
+ max_sectors_bytes(sdp->device->request_queue));
+ return put_user(val, ip);
+ case SG_SET_COMMAND_Q:
+ result = get_user(val, ip);
+ if (result)
+ return result;
+ sfp->cmd_q = val ? 1 : 0;
+ return 0;
+ case SG_GET_COMMAND_Q:
+ return put_user((int) sfp->cmd_q, ip);
+ case SG_SET_KEEP_ORPHAN:
+ result = get_user(val, ip);
+ if (result)
+ return result;
+ sfp->keep_orphan = val;
+ return 0;
+ case SG_GET_KEEP_ORPHAN:
+ return put_user((int) sfp->keep_orphan, ip);
+ case SG_NEXT_CMD_LEN:
+ result = get_user(val, ip);
+ if (result)
+ return result;
+ sfp->next_cmd_len = (val > 0) ? val : 0;
+ return 0;
+ case SG_GET_VERSION_NUM:
+ return put_user(sg_version_num, ip);
+ case SG_GET_ACCESS_COUNT:
+ /* faked - we don't have a real access count anymore */
+ val = (sdp->device ? 1 : 0);
+ return put_user(val, ip);
+ case SG_GET_REQUEST_TABLE:
+ if (!access_ok(VERIFY_WRITE, p, SZ_SG_REQ_INFO * SG_MAX_QUEUE))
+ return -EFAULT;
+ else {
+ sg_req_info_t *rinfo;
+ unsigned int ms;
+
+ rinfo = kmalloc(SZ_SG_REQ_INFO * SG_MAX_QUEUE,
+ GFP_KERNEL);
+ if (!rinfo)
+ return -ENOMEM;
+ read_lock_irqsave(&sfp->rq_list_lock, iflags);
+ for (srp = sfp->headrp, val = 0; val < SG_MAX_QUEUE;
+ ++val, srp = srp ? srp->nextrp : srp) {
+ memset(&rinfo[val], 0, SZ_SG_REQ_INFO);
+ if (srp) {
+ rinfo[val].req_state = srp->done + 1;
+ rinfo[val].problem =
+ srp->header.masked_status &
+ srp->header.host_status &
+ srp->header.driver_status;
+ if (srp->done)
+ rinfo[val].duration =
+ srp->header.duration;
+ else {
+ ms = jiffies_to_msecs(jiffies);
+ rinfo[val].duration =
+ (ms > srp->header.duration) ?
+ (ms - srp->header.duration) : 0;
+ }
+ rinfo[val].orphan = srp->orphan;
+ rinfo[val].sg_io_owned =
+ srp->sg_io_owned;
+ rinfo[val].pack_id =
+ srp->header.pack_id;
+ rinfo[val].usr_ptr =
+ srp->header.usr_ptr;
+ }
+ }
+ read_unlock_irqrestore(&sfp->rq_list_lock, iflags);
+ result = __copy_to_user(p, rinfo,
+ SZ_SG_REQ_INFO * SG_MAX_QUEUE);
+ result = result ? -EFAULT : 0;
+ kfree(rinfo);
+ return result;
+ }
+ case SG_EMULATED_HOST:
+ if (atomic_read(&sdp->detaching))
+ return -ENODEV;
+ return put_user(sdp->device->host->hostt->emulated, ip);
+ case SCSI_IOCTL_SEND_COMMAND:
+ if (atomic_read(&sdp->detaching))
+ return -ENODEV;
+ if (read_only) {
+ unsigned char opcode = WRITE_6;
+ Scsi_Ioctl_Command __user *siocp = p;
+
+ if (copy_from_user(&opcode, siocp->data, 1))
+ return -EFAULT;
+ if (sg_allow_access(filp, &opcode))
+ return -EPERM;
+ }
+ return sg_scsi_ioctl(sdp->device->request_queue, NULL, filp->f_mode, p);
+ case SG_SET_DEBUG:
+ result = get_user(val, ip);
+ if (result)
+ return result;
+ sdp->sgdebug = (char) val;
+ return 0;
+ case BLKSECTGET:
+ return put_user(max_sectors_bytes(sdp->device->request_queue),
+ ip);
+ case BLKTRACESETUP:
+ return blk_trace_setup(sdp->device->request_queue,
+ sdp->disk->disk_name,
+ MKDEV(SCSI_GENERIC_MAJOR, sdp->index),
+ NULL,
+ (char *)arg);
+ case BLKTRACESTART:
+ return blk_trace_startstop(sdp->device->request_queue, 1);
+ case BLKTRACESTOP:
+ return blk_trace_startstop(sdp->device->request_queue, 0);
+ case BLKTRACETEARDOWN:
+ return blk_trace_remove(sdp->device->request_queue);
+ case SCSI_IOCTL_GET_IDLUN:
+ case SCSI_IOCTL_GET_BUS_NUMBER:
+ case SCSI_IOCTL_PROBE_HOST:
+ case SG_GET_TRANSFORM:
+ case SG_SCSI_RESET:
+ if (atomic_read(&sdp->detaching))
+ return -ENODEV;
+ break;
+ default:
+ if (read_only)
+ return -EPERM; /* don't know so take safe approach */
+ break;
+ }
+
+ result = scsi_ioctl_block_when_processing_errors(sdp->device,
+ cmd_in, filp->f_flags & O_NDELAY);
+ if (result)
+ return result;
+ return scsi_ioctl(sdp->device, cmd_in, p);
+}
+
+#ifdef CONFIG_COMPAT
+static long sg_compat_ioctl(struct file *filp, unsigned int cmd_in, unsigned long arg)
+{
+ Sg_device *sdp;
+ Sg_fd *sfp;
+ struct scsi_device *sdev;
+
+ if ((!(sfp = (Sg_fd *) filp->private_data)) || (!(sdp = sfp->parentdp)))
+ return -ENXIO;
+
+ sdev = sdp->device;
+ if (sdev->host->hostt->compat_ioctl) {
+ int ret;
+
+ ret = sdev->host->hostt->compat_ioctl(sdev, cmd_in, (void __user *)arg);
+
+ return ret;
+ }
+
+ return -ENOIOCTLCMD;
+}
+#endif
+
+static unsigned int
+sg_poll(struct file *filp, poll_table * wait)
+{
+ unsigned int res = 0;
+ Sg_device *sdp;
+ Sg_fd *sfp;
+ Sg_request *srp;
+ int count = 0;
+ unsigned long iflags;
+
+ sfp = filp->private_data;
+ if (!sfp)
+ return POLLERR;
+ sdp = sfp->parentdp;
+ if (!sdp)
+ return POLLERR;
+ poll_wait(filp, &sfp->read_wait, wait);
+ read_lock_irqsave(&sfp->rq_list_lock, iflags);
+ for (srp = sfp->headrp; srp; srp = srp->nextrp) {
+ /* if any read waiting, flag it */
+ if ((0 == res) && (1 == srp->done) && (!srp->sg_io_owned))
+ res = POLLIN | POLLRDNORM;
+ ++count;
+ }
+ read_unlock_irqrestore(&sfp->rq_list_lock, iflags);
+
+ if (atomic_read(&sdp->detaching))
+ res |= POLLHUP;
+ else if (!sfp->cmd_q) {
+ if (0 == count)
+ res |= POLLOUT | POLLWRNORM;
+ } else if (count < SG_MAX_QUEUE)
+ res |= POLLOUT | POLLWRNORM;
+ SCSI_LOG_TIMEOUT(3, sg_printk(KERN_INFO, sdp,
+ "sg_poll: res=0x%x\n", (int) res));
+ return res;
+}
+
+static int
+sg_fasync(int fd, struct file *filp, int mode)
+{
+ Sg_device *sdp;
+ Sg_fd *sfp;
+
+ if ((!(sfp = (Sg_fd *) filp->private_data)) || (!(sdp = sfp->parentdp)))
+ return -ENXIO;
+ SCSI_LOG_TIMEOUT(3, sg_printk(KERN_INFO, sdp,
+ "sg_fasync: mode=%d\n", mode));
+
+ return fasync_helper(fd, filp, mode, &sfp->async_qp);
+}
+
+static int
+sg_vma_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
+{
+ Sg_fd *sfp;
+ unsigned long offset, len, sa;
+ Sg_scatter_hold *rsv_schp;
+ int k, length;
+
+ if ((NULL == vma) || (!(sfp = (Sg_fd *) vma->vm_private_data)))
+ return VM_FAULT_SIGBUS;
+ rsv_schp = &sfp->reserve;
+ offset = vmf->pgoff << PAGE_SHIFT;
+ if (offset >= rsv_schp->bufflen)
+ return VM_FAULT_SIGBUS;
+ SCSI_LOG_TIMEOUT(3, sg_printk(KERN_INFO, sfp->parentdp,
+ "sg_vma_fault: offset=%lu, scatg=%d\n",
+ offset, rsv_schp->k_use_sg));
+ sa = vma->vm_start;
+ length = 1 << (PAGE_SHIFT + rsv_schp->page_order);
+ for (k = 0; k < rsv_schp->k_use_sg && sa < vma->vm_end; k++) {
+ len = vma->vm_end - sa;
+ len = (len < length) ? len : length;
+ if (offset < len) {
+ struct page *page = nth_page(rsv_schp->pages[k],
+ offset >> PAGE_SHIFT);
+ get_page(page); /* increment page count */
+ vmf->page = page;
+ return 0; /* success */
+ }
+ sa += len;
+ offset -= len;
+ }
+
+ return VM_FAULT_SIGBUS;
+}
+
+static const struct vm_operations_struct sg_mmap_vm_ops = {
+ .fault = sg_vma_fault,
+};
+
+static int
+sg_mmap(struct file *filp, struct vm_area_struct *vma)
+{
+ Sg_fd *sfp;
+ unsigned long req_sz, len, sa;
+ Sg_scatter_hold *rsv_schp;
+ int k, length;
+
+ if ((!filp) || (!vma) || (!(sfp = (Sg_fd *) filp->private_data)))
+ return -ENXIO;
+ req_sz = vma->vm_end - vma->vm_start;
+ SCSI_LOG_TIMEOUT(3, sg_printk(KERN_INFO, sfp->parentdp,
+ "sg_mmap starting, vm_start=%p, len=%d\n",
+ (void *) vma->vm_start, (int) req_sz));
+ if (vma->vm_pgoff)
+ return -EINVAL; /* want no offset */
+ rsv_schp = &sfp->reserve;
+ if (req_sz > rsv_schp->bufflen)
+ return -ENOMEM; /* cannot map more than reserved buffer */
+
+ sa = vma->vm_start;
+ length = 1 << (PAGE_SHIFT + rsv_schp->page_order);
+ for (k = 0; k < rsv_schp->k_use_sg && sa < vma->vm_end; k++) {
+ len = vma->vm_end - sa;
+ len = (len < length) ? len : length;
+ sa += len;
+ }
+
+ sfp->mmap_called = 1;
+ vma->vm_flags |= VM_DONTEXPAND | VM_DONTDUMP;
+ vma->vm_private_data = sfp;
+ vma->vm_ops = &sg_mmap_vm_ops;
+ return 0;
+}
+
+static void
+sg_rq_end_io_usercontext(struct work_struct *work)
+{
+ struct sg_request *srp = container_of(work, struct sg_request, ew.work);
+ struct sg_fd *sfp = srp->parentfp;
+
+ sg_finish_rem_req(srp);
+ kref_put(&sfp->f_ref, sg_remove_sfp);
+}
+
+/*
+ * This function is a "bottom half" handler that is called by the mid
+ * level when a command is completed (or has failed).
+ */
+static void
+sg_rq_end_io(struct request *rq, int uptodate)
+{
+ struct sg_request *srp = rq->end_io_data;
+ Sg_device *sdp;
+ Sg_fd *sfp;
+ unsigned long iflags;
+ unsigned int ms;
+ char *sense;
+ int result, resid, done = 1;
+
+ if (WARN_ON(srp->done != 0))
+ return;
+
+ sfp = srp->parentfp;
+ if (WARN_ON(sfp == NULL))
+ return;
+
+ sdp = sfp->parentdp;
+ if (unlikely(atomic_read(&sdp->detaching)))
+ pr_info("%s: device detaching\n", __func__);
+
+ sense = rq->sense;
+ result = rq->errors;
+ resid = rq->resid_len;
+
+ SCSI_LOG_TIMEOUT(4, sg_printk(KERN_INFO, sdp,
+ "sg_cmd_done: pack_id=%d, res=0x%x\n",
+ srp->header.pack_id, result));
+ srp->header.resid = resid;
+ ms = jiffies_to_msecs(jiffies);
+ srp->header.duration = (ms > srp->header.duration) ?
+ (ms - srp->header.duration) : 0;
+ if (0 != result) {
+ struct scsi_sense_hdr sshdr;
+
+ srp->header.status = 0xff & result;
+ srp->header.masked_status = status_byte(result);
+ srp->header.msg_status = msg_byte(result);
+ srp->header.host_status = host_byte(result);
+ srp->header.driver_status = driver_byte(result);
+ if ((sdp->sgdebug > 0) &&
+ ((CHECK_CONDITION == srp->header.masked_status) ||
+ (COMMAND_TERMINATED == srp->header.masked_status)))
+ __scsi_print_sense(sdp->device, __func__, sense,
+ SCSI_SENSE_BUFFERSIZE);
+
+ /* Following if statement is a patch supplied by Eric Youngdale */
+ if (driver_byte(result) != 0
+ && scsi_normalize_sense(sense, SCSI_SENSE_BUFFERSIZE, &sshdr)
+ && !scsi_sense_is_deferred(&sshdr)
+ && sshdr.sense_key == UNIT_ATTENTION
+ && sdp->device->removable) {
+ /* Detected possible disc change. Set the bit - this */
+ /* may be used if there are filesystems using this device */
+ sdp->device->changed = 1;
+ }
+ }
+ /* Rely on write phase to clean out srp status values, so no "else" */
+
+ /*
+ * Free the request as soon as it is complete so that its resources
+ * can be reused without waiting for userspace to read() the
+ * result. But keep the associated bio (if any) around until
+ * blk_rq_unmap_user() can be called from user context.
+ */
+ srp->rq = NULL;
+ if (rq->cmd != rq->__cmd)
+ kfree(rq->cmd);
+ __blk_put_request(rq->q, rq);
+
+ write_lock_irqsave(&sfp->rq_list_lock, iflags);
+ if (unlikely(srp->orphan)) {
+ if (sfp->keep_orphan)
+ srp->sg_io_owned = 0;
+ else
+ done = 0;
+ }
+ srp->done = done;
+ write_unlock_irqrestore(&sfp->rq_list_lock, iflags);
+
+ if (likely(done)) {
+ /* Now wake up any sg_read() that is waiting for this
+ * packet.
+ */
+ wake_up_interruptible(&sfp->read_wait);
+ kill_fasync(&sfp->async_qp, SIGPOLL, POLL_IN);
+ kref_put(&sfp->f_ref, sg_remove_sfp);
+ } else {
+ INIT_WORK(&srp->ew.work, sg_rq_end_io_usercontext);
+ schedule_work(&srp->ew.work);
+ }
+}
+
+static const struct file_operations sg_fops = {
+ .owner = THIS_MODULE,
+ .read = sg_read,
+ .write = sg_write,
+ .poll = sg_poll,
+ .unlocked_ioctl = sg_ioctl,
+#ifdef CONFIG_COMPAT
+ .compat_ioctl = sg_compat_ioctl,
+#endif
+ .open = sg_open,
+ .mmap = sg_mmap,
+ .release = sg_release,
+ .fasync = sg_fasync,
+ .llseek = no_llseek,
+};
+
+static struct class *sg_sysfs_class;
+
+static int sg_sysfs_valid = 0;
+
+static Sg_device *
+sg_alloc(struct gendisk *disk, struct scsi_device *scsidp)
+{
+ struct request_queue *q = scsidp->request_queue;
+ Sg_device *sdp;
+ unsigned long iflags;
+ int error;
+ u32 k;
+
+ sdp = kzalloc(sizeof(Sg_device), GFP_KERNEL);
+ if (!sdp) {
+ sdev_printk(KERN_WARNING, scsidp, "%s: kmalloc Sg_device "
+ "failure\n", __func__);
+ return ERR_PTR(-ENOMEM);
+ }
+
+ idr_preload(GFP_KERNEL);
+ write_lock_irqsave(&sg_index_lock, iflags);
+
+ error = idr_alloc(&sg_index_idr, sdp, 0, SG_MAX_DEVS, GFP_NOWAIT);
+ if (error < 0) {
+ if (error == -ENOSPC) {
+ sdev_printk(KERN_WARNING, scsidp,
+ "Unable to attach sg device type=%d, minor number exceeds %d\n",
+ scsidp->type, SG_MAX_DEVS - 1);
+ error = -ENODEV;
+ } else {
+ sdev_printk(KERN_WARNING, scsidp, "%s: idr "
+ "allocation Sg_device failure: %d\n",
+ __func__, error);
+ }
+ goto out_unlock;
+ }
+ k = error;
+
+ SCSI_LOG_TIMEOUT(3, sdev_printk(KERN_INFO, scsidp,
+ "sg_alloc: dev=%d \n", k));
+ sprintf(disk->disk_name, "sg%d", k);
+ disk->first_minor = k;
+ sdp->disk = disk;
+ sdp->device = scsidp;
+ mutex_init(&sdp->open_rel_lock);
+ INIT_LIST_HEAD(&sdp->sfds);
+ init_waitqueue_head(&sdp->open_wait);
+ atomic_set(&sdp->detaching, 0);
+ rwlock_init(&sdp->sfd_lock);
+ sdp->sg_tablesize = queue_max_segments(q);
+ sdp->index = k;
+ kref_init(&sdp->d_ref);
+ error = 0;
+
+out_unlock:
+ write_unlock_irqrestore(&sg_index_lock, iflags);
+ idr_preload_end();
+
+ if (error) {
+ kfree(sdp);
+ return ERR_PTR(error);
+ }
+ return sdp;
+}
+
+static int
+sg_add_device(struct device *cl_dev, struct class_interface *cl_intf)
+{
+ struct scsi_device *scsidp = to_scsi_device(cl_dev->parent);
+ struct gendisk *disk;
+ Sg_device *sdp = NULL;
+ struct cdev * cdev = NULL;
+ int error;
+ unsigned long iflags;
+
+ disk = alloc_disk(1);
+ if (!disk) {
+ pr_warn("%s: alloc_disk failed\n", __func__);
+ return -ENOMEM;
+ }
+ disk->major = SCSI_GENERIC_MAJOR;
+
+ error = -ENOMEM;
+ cdev = cdev_alloc();
+ if (!cdev) {
+ pr_warn("%s: cdev_alloc failed\n", __func__);
+ goto out;
+ }
+ cdev->owner = THIS_MODULE;
+ cdev->ops = &sg_fops;
+
+ sdp = sg_alloc(disk, scsidp);
+ if (IS_ERR(sdp)) {
+ pr_warn("%s: sg_alloc failed\n", __func__);
+ error = PTR_ERR(sdp);
+ goto out;
+ }
+
+ error = cdev_add(cdev, MKDEV(SCSI_GENERIC_MAJOR, sdp->index), 1);
+ if (error)
+ goto cdev_add_err;
+
+ sdp->cdev = cdev;
+ if (sg_sysfs_valid) {
+ struct device *sg_class_member;
+
+ sg_class_member = device_create(sg_sysfs_class, cl_dev->parent,
+ MKDEV(SCSI_GENERIC_MAJOR,
+ sdp->index),
+ sdp, "%s", disk->disk_name);
+ if (IS_ERR(sg_class_member)) {
+ pr_err("%s: device_create failed\n", __func__);
+ error = PTR_ERR(sg_class_member);
+ goto cdev_add_err;
+ }
+ error = sysfs_create_link(&scsidp->sdev_gendev.kobj,
+ &sg_class_member->kobj, "generic");
+ if (error)
+ pr_err("%s: unable to make symlink 'generic' back "
+ "to sg%d\n", __func__, sdp->index);
+ } else
+ pr_warn("%s: sg_sys Invalid\n", __func__);
+
+ sdev_printk(KERN_NOTICE, scsidp, "Attached scsi generic sg%d "
+ "type %d\n", sdp->index, scsidp->type);
+
+ dev_set_drvdata(cl_dev, sdp);
+
+ return 0;
+
+cdev_add_err:
+ write_lock_irqsave(&sg_index_lock, iflags);
+ idr_remove(&sg_index_idr, sdp->index);
+ write_unlock_irqrestore(&sg_index_lock, iflags);
+ kfree(sdp);
+
+out:
+ put_disk(disk);
+ if (cdev)
+ cdev_del(cdev);
+ return error;
+}
+
+static void
+sg_device_destroy(struct kref *kref)
+{
+ struct sg_device *sdp = container_of(kref, struct sg_device, d_ref);
+ unsigned long flags;
+
+ /* CAUTION! Note that the device can still be found via idr_find()
+ * even though the refcount is 0. Therefore, do idr_remove() BEFORE
+ * any other cleanup.
+ */
+
+ write_lock_irqsave(&sg_index_lock, flags);
+ idr_remove(&sg_index_idr, sdp->index);
+ write_unlock_irqrestore(&sg_index_lock, flags);
+
+ SCSI_LOG_TIMEOUT(3,
+ sg_printk(KERN_INFO, sdp, "sg_device_destroy\n"));
+
+ put_disk(sdp->disk);
+ kfree(sdp);
+}
+
+static void
+sg_remove_device(struct device *cl_dev, struct class_interface *cl_intf)
+{
+ struct scsi_device *scsidp = to_scsi_device(cl_dev->parent);
+ Sg_device *sdp = dev_get_drvdata(cl_dev);
+ unsigned long iflags;
+ Sg_fd *sfp;
+ int val;
+
+ if (!sdp)
+ return;
+ /* want sdp->detaching non-zero as soon as possible */
+ val = atomic_inc_return(&sdp->detaching);
+ if (val > 1)
+ return; /* only want to do following once per device */
+
+ SCSI_LOG_TIMEOUT(3, sg_printk(KERN_INFO, sdp,
+ "%s\n", __func__));
+
+ read_lock_irqsave(&sdp->sfd_lock, iflags);
+ list_for_each_entry(sfp, &sdp->sfds, sfd_siblings) {
+ wake_up_interruptible_all(&sfp->read_wait);
+ kill_fasync(&sfp->async_qp, SIGPOLL, POLL_HUP);
+ }
+ wake_up_interruptible_all(&sdp->open_wait);
+ read_unlock_irqrestore(&sdp->sfd_lock, iflags);
+
+ sysfs_remove_link(&scsidp->sdev_gendev.kobj, "generic");
+ device_destroy(sg_sysfs_class, MKDEV(SCSI_GENERIC_MAJOR, sdp->index));
+ cdev_del(sdp->cdev);
+ sdp->cdev = NULL;
+
+ kref_put(&sdp->d_ref, sg_device_destroy);
+}
+
+module_param_named(scatter_elem_sz, scatter_elem_sz, int, S_IRUGO | S_IWUSR);
+module_param_named(def_reserved_size, def_reserved_size, int,
+ S_IRUGO | S_IWUSR);
+module_param_named(allow_dio, sg_allow_dio, int, S_IRUGO | S_IWUSR);
+
+MODULE_AUTHOR("Douglas Gilbert");
+MODULE_DESCRIPTION("SCSI generic (sg) driver");
+MODULE_LICENSE("GPL");
+MODULE_VERSION(SG_VERSION_STR);
+MODULE_ALIAS_CHARDEV_MAJOR(SCSI_GENERIC_MAJOR);
+
+MODULE_PARM_DESC(scatter_elem_sz, "scatter gather element "
+ "size (default: max(SG_SCATTER_SZ, PAGE_SIZE))");
+MODULE_PARM_DESC(def_reserved_size, "size of buffer reserved for each fd");
+MODULE_PARM_DESC(allow_dio, "allow direct I/O (default: 0 (disallow))");
+
+static int __init
+init_sg(void)
+{
+ int rc;
+
+ if (scatter_elem_sz < PAGE_SIZE) {
+ scatter_elem_sz = PAGE_SIZE;
+ scatter_elem_sz_prev = scatter_elem_sz;
+ }
+ if (def_reserved_size >= 0)
+ sg_big_buff = def_reserved_size;
+ else
+ def_reserved_size = sg_big_buff;
+
+ rc = register_chrdev_region(MKDEV(SCSI_GENERIC_MAJOR, 0),
+ SG_MAX_DEVS, "sg");
+ if (rc)
+ return rc;
+ sg_sysfs_class = class_create(THIS_MODULE, "scsi_generic");
+ if ( IS_ERR(sg_sysfs_class) ) {
+ rc = PTR_ERR(sg_sysfs_class);
+ goto err_out;
+ }
+ sg_sysfs_valid = 1;
+ rc = scsi_register_interface(&sg_interface);
+ if (0 == rc) {
+#ifdef CONFIG_SCSI_PROC_FS
+ sg_proc_init();
+#endif /* CONFIG_SCSI_PROC_FS */
+ return 0;
+ }
+ class_destroy(sg_sysfs_class);
+err_out:
+ unregister_chrdev_region(MKDEV(SCSI_GENERIC_MAJOR, 0), SG_MAX_DEVS);
+ return rc;
+}
+
+static void __exit
+exit_sg(void)
+{
+#ifdef CONFIG_SCSI_PROC_FS
+ sg_proc_cleanup();
+#endif /* CONFIG_SCSI_PROC_FS */
+ scsi_unregister_interface(&sg_interface);
+ class_destroy(sg_sysfs_class);
+ sg_sysfs_valid = 0;
+ unregister_chrdev_region(MKDEV(SCSI_GENERIC_MAJOR, 0),
+ SG_MAX_DEVS);
+ idr_destroy(&sg_index_idr);
+}
+
+static int
+sg_start_req(Sg_request *srp, unsigned char *cmd)
+{
+ int res;
+ struct request *rq;
+ Sg_fd *sfp = srp->parentfp;
+ sg_io_hdr_t *hp = &srp->header;
+ int dxfer_len = (int) hp->dxfer_len;
+ int dxfer_dir = hp->dxfer_direction;
+ unsigned int iov_count = hp->iovec_count;
+ Sg_scatter_hold *req_schp = &srp->data;
+ Sg_scatter_hold *rsv_schp = &sfp->reserve;
+ struct request_queue *q = sfp->parentdp->device->request_queue;
+ struct rq_map_data *md, map_data;
+ int rw = hp->dxfer_direction == SG_DXFER_TO_DEV ? WRITE : READ;
+ unsigned char *long_cmdp = NULL;
+
+ SCSI_LOG_TIMEOUT(4, sg_printk(KERN_INFO, sfp->parentdp,
+ "sg_start_req: dxfer_len=%d\n",
+ dxfer_len));
+
+ if (hp->cmd_len > BLK_MAX_CDB) {
+ long_cmdp = kzalloc(hp->cmd_len, GFP_KERNEL);
+ if (!long_cmdp)
+ return -ENOMEM;
+ }
+
+ /*
+ * NOTE
+ *
+ * With scsi-mq enabled, there are a fixed number of preallocated
+ * requests equal in number to shost->can_queue. If all of the
+ * preallocated requests are already in use, then using GFP_ATOMIC with
+ * blk_get_request() will return -EWOULDBLOCK, whereas using GFP_KERNEL
+ * will cause blk_get_request() to sleep until an active command
+ * completes, freeing up a request. Neither option is ideal, but
+ * GFP_KERNEL is the better choice to prevent userspace from getting an
+ * unexpected EWOULDBLOCK.
+ *
+ * With scsi-mq disabled, blk_get_request() with GFP_KERNEL usually
+ * does not sleep except under memory pressure.
+ */
+ rq = blk_get_request(q, rw, GFP_KERNEL);
+ if (IS_ERR(rq)) {
+ kfree(long_cmdp);
+ return PTR_ERR(rq);
+ }
+
+ blk_rq_set_block_pc(rq);
+
+ if (hp->cmd_len > BLK_MAX_CDB)
+ rq->cmd = long_cmdp;
+ memcpy(rq->cmd, cmd, hp->cmd_len);
+ rq->cmd_len = hp->cmd_len;
+
+ srp->rq = rq;
+ rq->end_io_data = srp;
+ rq->sense = srp->sense_b;
+ rq->retries = SG_DEFAULT_RETRIES;
+
+ if ((dxfer_len <= 0) || (dxfer_dir == SG_DXFER_NONE))
+ return 0;
+
+ if (sg_allow_dio && hp->flags & SG_FLAG_DIRECT_IO &&
+ dxfer_dir != SG_DXFER_UNKNOWN && !iov_count &&
+ !sfp->parentdp->device->host->unchecked_isa_dma &&
+ blk_rq_aligned(q, (unsigned long)hp->dxferp, dxfer_len))
+ md = NULL;
+ else
+ md = &map_data;
+
+ if (md) {
+ if (!sg_res_in_use(sfp) && dxfer_len <= rsv_schp->bufflen)
+ sg_link_reserve(sfp, srp, dxfer_len);
+ else {
+ res = sg_build_indirect(req_schp, sfp, dxfer_len);
+ if (res)
+ return res;
+ }
+
+ md->pages = req_schp->pages;
+ md->page_order = req_schp->page_order;
+ md->nr_entries = req_schp->k_use_sg;
+ md->offset = 0;
+ md->null_mapped = hp->dxferp ? 0 : 1;
+ if (dxfer_dir == SG_DXFER_TO_FROM_DEV)
+ md->from_user = 1;
+ else
+ md->from_user = 0;
+ }
+
+ if (iov_count) {
+ struct iovec *iov = NULL;
+ struct iov_iter i;
+
+ res = import_iovec(rw, hp->dxferp, iov_count, 0, &iov, &i);
+ if (res < 0)
+ return res;
+
+ iov_iter_truncate(&i, hp->dxfer_len);
+
+ res = blk_rq_map_user_iov(q, rq, md, &i, GFP_ATOMIC);
+ kfree(iov);
+ } else
+ res = blk_rq_map_user(q, rq, md, hp->dxferp,
+ hp->dxfer_len, GFP_ATOMIC);
+
+ if (!res) {
+ srp->bio = rq->bio;
+
+ if (!md) {
+ req_schp->dio_in_use = 1;
+ hp->info |= SG_INFO_DIRECT_IO;
+ }
+ }
+ return res;
+}
+
+static int
+sg_finish_rem_req(Sg_request *srp)
+{
+ int ret = 0;
+
+ Sg_fd *sfp = srp->parentfp;
+ Sg_scatter_hold *req_schp = &srp->data;
+
+ SCSI_LOG_TIMEOUT(4, sg_printk(KERN_INFO, sfp->parentdp,
+ "sg_finish_rem_req: res_used=%d\n",
+ (int) srp->res_used));
+ if (srp->bio)
+ ret = blk_rq_unmap_user(srp->bio);
+
+ if (srp->rq) {
+ if (srp->rq->cmd != srp->rq->__cmd)
+ kfree(srp->rq->cmd);
+ blk_put_request(srp->rq);
+ }
+
+ if (srp->res_used)
+ sg_unlink_reserve(sfp, srp);
+ else
+ sg_remove_scat(sfp, req_schp);
+
+ sg_remove_request(sfp, srp);
+
+ return ret;
+}
+
+static int
+sg_build_sgat(Sg_scatter_hold * schp, const Sg_fd * sfp, int tablesize)
+{
+ int sg_bufflen = tablesize * sizeof(struct page *);
+ gfp_t gfp_flags = GFP_ATOMIC | __GFP_NOWARN;
+
+ schp->pages = kzalloc(sg_bufflen, gfp_flags);
+ if (!schp->pages)
+ return -ENOMEM;
+ schp->sglist_len = sg_bufflen;
+ return tablesize; /* number of scat_gath elements allocated */
+}
+
+static int
+sg_build_indirect(Sg_scatter_hold * schp, Sg_fd * sfp, int buff_size)
+{
+ int ret_sz = 0, i, k, rem_sz, num, mx_sc_elems;
+ int sg_tablesize = sfp->parentdp->sg_tablesize;
+ int blk_size = buff_size, order;
+ gfp_t gfp_mask = GFP_ATOMIC | __GFP_COMP | __GFP_NOWARN;
+
+ if (blk_size < 0)
+ return -EFAULT;
+ if (0 == blk_size)
+ ++blk_size; /* don't know why */
+ /* round request up to next highest SG_SECTOR_SZ byte boundary */
+ blk_size = ALIGN(blk_size, SG_SECTOR_SZ);
+ SCSI_LOG_TIMEOUT(4, sg_printk(KERN_INFO, sfp->parentdp,
+ "sg_build_indirect: buff_size=%d, blk_size=%d\n",
+ buff_size, blk_size));
+
+ /* N.B. ret_sz carried into this block ... */
+ mx_sc_elems = sg_build_sgat(schp, sfp, sg_tablesize);
+ if (mx_sc_elems < 0)
+ return mx_sc_elems; /* most likely -ENOMEM */
+
+ num = scatter_elem_sz;
+ if (unlikely(num != scatter_elem_sz_prev)) {
+ if (num < PAGE_SIZE) {
+ scatter_elem_sz = PAGE_SIZE;
+ scatter_elem_sz_prev = PAGE_SIZE;
+ } else
+ scatter_elem_sz_prev = num;
+ }
+
+ if (sfp->low_dma)
+ gfp_mask |= GFP_DMA;
+
+ if (!capable(CAP_SYS_ADMIN) || !capable(CAP_SYS_RAWIO))
+ gfp_mask |= __GFP_ZERO;
+
+ order = get_order(num);
+retry:
+ ret_sz = 1 << (PAGE_SHIFT + order);
+
+ for (k = 0, rem_sz = blk_size; rem_sz > 0 && k < mx_sc_elems;
+ k++, rem_sz -= ret_sz) {
+
+ num = (rem_sz > scatter_elem_sz_prev) ?
+ scatter_elem_sz_prev : rem_sz;
+
+ schp->pages[k] = alloc_pages(gfp_mask, order);
+ if (!schp->pages[k])
+ goto out;
+
+ if (num == scatter_elem_sz_prev) {
+ if (unlikely(ret_sz > scatter_elem_sz_prev)) {
+ scatter_elem_sz = ret_sz;
+ scatter_elem_sz_prev = ret_sz;
+ }
+ }
+
+ SCSI_LOG_TIMEOUT(5, sg_printk(KERN_INFO, sfp->parentdp,
+ "sg_build_indirect: k=%d, num=%d, ret_sz=%d\n",
+ k, num, ret_sz));
+ } /* end of for loop */
+
+ schp->page_order = order;
+ schp->k_use_sg = k;
+ SCSI_LOG_TIMEOUT(5, sg_printk(KERN_INFO, sfp->parentdp,
+ "sg_build_indirect: k_use_sg=%d, rem_sz=%d\n",
+ k, rem_sz));
+
+ schp->bufflen = blk_size;
+ if (rem_sz > 0) /* must have failed */
+ return -ENOMEM;
+ return 0;
+out:
+ for (i = 0; i < k; i++)
+ __free_pages(schp->pages[i], order);
+
+ if (--order >= 0)
+ goto retry;
+
+ return -ENOMEM;
+}
+
+static void
+sg_remove_scat(Sg_fd * sfp, Sg_scatter_hold * schp)
+{
+ SCSI_LOG_TIMEOUT(4, sg_printk(KERN_INFO, sfp->parentdp,
+ "sg_remove_scat: k_use_sg=%d\n", schp->k_use_sg));
+ if (schp->pages && schp->sglist_len > 0) {
+ if (!schp->dio_in_use) {
+ int k;
+
+ for (k = 0; k < schp->k_use_sg && schp->pages[k]; k++) {
+ SCSI_LOG_TIMEOUT(5,
+ sg_printk(KERN_INFO, sfp->parentdp,
+ "sg_remove_scat: k=%d, pg=0x%p\n",
+ k, schp->pages[k]));
+ __free_pages(schp->pages[k], schp->page_order);
+ }
+
+ kfree(schp->pages);
+ }
+ }
+ memset(schp, 0, sizeof (*schp));
+}
+
+static int
+sg_read_oxfer(Sg_request * srp, char __user *outp, int num_read_xfer)
+{
+ Sg_scatter_hold *schp = &srp->data;
+ int k, num;
+
+ SCSI_LOG_TIMEOUT(4, sg_printk(KERN_INFO, srp->parentfp->parentdp,
+ "sg_read_oxfer: num_read_xfer=%d\n",
+ num_read_xfer));
+ if ((!outp) || (num_read_xfer <= 0))
+ return 0;
+
+ num = 1 << (PAGE_SHIFT + schp->page_order);
+ for (k = 0; k < schp->k_use_sg && schp->pages[k]; k++) {
+ if (num > num_read_xfer) {
+ if (__copy_to_user(outp, page_address(schp->pages[k]),
+ num_read_xfer))
+ return -EFAULT;
+ break;
+ } else {
+ if (__copy_to_user(outp, page_address(schp->pages[k]),
+ num))
+ return -EFAULT;
+ num_read_xfer -= num;
+ if (num_read_xfer <= 0)
+ break;
+ outp += num;
+ }
+ }
+
+ return 0;
+}
+
+static void
+sg_build_reserve(Sg_fd * sfp, int req_size)
+{
+ Sg_scatter_hold *schp = &sfp->reserve;
+
+ SCSI_LOG_TIMEOUT(4, sg_printk(KERN_INFO, sfp->parentdp,
+ "sg_build_reserve: req_size=%d\n", req_size));
+ do {
+ if (req_size < PAGE_SIZE)
+ req_size = PAGE_SIZE;
+ if (0 == sg_build_indirect(schp, sfp, req_size))
+ return;
+ else
+ sg_remove_scat(sfp, schp);
+ req_size >>= 1; /* divide by 2 */
+ } while (req_size > (PAGE_SIZE / 2));
+}
+
+static void
+sg_link_reserve(Sg_fd * sfp, Sg_request * srp, int size)
+{
+ Sg_scatter_hold *req_schp = &srp->data;
+ Sg_scatter_hold *rsv_schp = &sfp->reserve;
+ int k, num, rem;
+
+ srp->res_used = 1;
+ SCSI_LOG_TIMEOUT(4, sg_printk(KERN_INFO, sfp->parentdp,
+ "sg_link_reserve: size=%d\n", size));
+ rem = size;
+
+ num = 1 << (PAGE_SHIFT + rsv_schp->page_order);
+ for (k = 0; k < rsv_schp->k_use_sg; k++) {
+ if (rem <= num) {
+ req_schp->k_use_sg = k + 1;
+ req_schp->sglist_len = rsv_schp->sglist_len;
+ req_schp->pages = rsv_schp->pages;
+
+ req_schp->bufflen = size;
+ req_schp->page_order = rsv_schp->page_order;
+ break;
+ } else
+ rem -= num;
+ }
+
+ if (k >= rsv_schp->k_use_sg)
+ SCSI_LOG_TIMEOUT(1, sg_printk(KERN_INFO, sfp->parentdp,
+ "sg_link_reserve: BAD size\n"));
+}
+
+static void
+sg_unlink_reserve(Sg_fd * sfp, Sg_request * srp)
+{
+ Sg_scatter_hold *req_schp = &srp->data;
+
+ SCSI_LOG_TIMEOUT(4, sg_printk(KERN_INFO, srp->parentfp->parentdp,
+ "sg_unlink_reserve: req->k_use_sg=%d\n",
+ (int) req_schp->k_use_sg));
+ req_schp->k_use_sg = 0;
+ req_schp->bufflen = 0;
+ req_schp->pages = NULL;
+ req_schp->page_order = 0;
+ req_schp->sglist_len = 0;
+ sfp->save_scat_len = 0;
+ srp->res_used = 0;
+}
+
+static Sg_request *
+sg_get_rq_mark(Sg_fd * sfp, int pack_id)
+{
+ Sg_request *resp;
+ unsigned long iflags;
+
+ write_lock_irqsave(&sfp->rq_list_lock, iflags);
+ for (resp = sfp->headrp; resp; resp = resp->nextrp) {
+ /* look for requests that are ready + not SG_IO owned */
+ if ((1 == resp->done) && (!resp->sg_io_owned) &&
+ ((-1 == pack_id) || (resp->header.pack_id == pack_id))) {
+ resp->done = 2; /* guard against other readers */
+ break;
+ }
+ }
+ write_unlock_irqrestore(&sfp->rq_list_lock, iflags);
+ return resp;
+}
+
+/* always adds to end of list */
+static Sg_request *
+sg_add_request(Sg_fd * sfp)
+{
+ int k;
+ unsigned long iflags;
+ Sg_request *resp;
+ Sg_request *rp = sfp->req_arr;
+
+ write_lock_irqsave(&sfp->rq_list_lock, iflags);
+ resp = sfp->headrp;
+ if (!resp) {
+ memset(rp, 0, sizeof (Sg_request));
+ rp->parentfp = sfp;
+ resp = rp;
+ sfp->headrp = resp;
+ } else {
+ if (0 == sfp->cmd_q)
+ resp = NULL; /* command queuing disallowed */
+ else {
+ for (k = 0; k < SG_MAX_QUEUE; ++k, ++rp) {
+ if (!rp->parentfp)
+ break;
+ }
+ if (k < SG_MAX_QUEUE) {
+ memset(rp, 0, sizeof (Sg_request));
+ rp->parentfp = sfp;
+ while (resp->nextrp)
+ resp = resp->nextrp;
+ resp->nextrp = rp;
+ resp = rp;
+ } else
+ resp = NULL;
+ }
+ }
+ if (resp) {
+ resp->nextrp = NULL;
+ resp->header.duration = jiffies_to_msecs(jiffies);
+ }
+ write_unlock_irqrestore(&sfp->rq_list_lock, iflags);
+ return resp;
+}
+
+/* Return of 1 for found; 0 for not found */
+static int
+sg_remove_request(Sg_fd * sfp, Sg_request * srp)
+{
+ Sg_request *prev_rp;
+ Sg_request *rp;
+ unsigned long iflags;
+ int res = 0;
+
+ if ((!sfp) || (!srp) || (!sfp->headrp))
+ return res;
+ write_lock_irqsave(&sfp->rq_list_lock, iflags);
+ prev_rp = sfp->headrp;
+ if (srp == prev_rp) {
+ sfp->headrp = prev_rp->nextrp;
+ prev_rp->parentfp = NULL;
+ res = 1;
+ } else {
+ while ((rp = prev_rp->nextrp)) {
+ if (srp == rp) {
+ prev_rp->nextrp = rp->nextrp;
+ rp->parentfp = NULL;
+ res = 1;
+ break;
+ }
+ prev_rp = rp;
+ }
+ }
+ write_unlock_irqrestore(&sfp->rq_list_lock, iflags);
+ return res;
+}
+
+static Sg_fd *
+sg_add_sfp(Sg_device * sdp)
+{
+ Sg_fd *sfp;
+ unsigned long iflags;
+ int bufflen;
+
+ sfp = kzalloc(sizeof(*sfp), GFP_ATOMIC | __GFP_NOWARN);
+ if (!sfp)
+ return ERR_PTR(-ENOMEM);
+
+ init_waitqueue_head(&sfp->read_wait);
+ rwlock_init(&sfp->rq_list_lock);
+
+ kref_init(&sfp->f_ref);
+ sfp->timeout = SG_DEFAULT_TIMEOUT;
+ sfp->timeout_user = SG_DEFAULT_TIMEOUT_USER;
+ sfp->force_packid = SG_DEF_FORCE_PACK_ID;
+ sfp->low_dma = (SG_DEF_FORCE_LOW_DMA == 0) ?
+ sdp->device->host->unchecked_isa_dma : 1;
+ sfp->cmd_q = SG_DEF_COMMAND_Q;
+ sfp->keep_orphan = SG_DEF_KEEP_ORPHAN;
+ sfp->parentdp = sdp;
+ write_lock_irqsave(&sdp->sfd_lock, iflags);
+ if (atomic_read(&sdp->detaching)) {
+ write_unlock_irqrestore(&sdp->sfd_lock, iflags);
+ return ERR_PTR(-ENODEV);
+ }
+ list_add_tail(&sfp->sfd_siblings, &sdp->sfds);
+ write_unlock_irqrestore(&sdp->sfd_lock, iflags);
+ SCSI_LOG_TIMEOUT(3, sg_printk(KERN_INFO, sdp,
+ "sg_add_sfp: sfp=0x%p\n", sfp));
+ if (unlikely(sg_big_buff != def_reserved_size))
+ sg_big_buff = def_reserved_size;
+
+ bufflen = min_t(int, sg_big_buff,
+ max_sectors_bytes(sdp->device->request_queue));
+ sg_build_reserve(sfp, bufflen);
+ SCSI_LOG_TIMEOUT(3, sg_printk(KERN_INFO, sdp,
+ "sg_add_sfp: bufflen=%d, k_use_sg=%d\n",
+ sfp->reserve.bufflen,
+ sfp->reserve.k_use_sg));
+
+ kref_get(&sdp->d_ref);
+ __module_get(THIS_MODULE);
+ return sfp;
+}
+
+static void
+sg_remove_sfp_usercontext(struct work_struct *work)
+{
+ struct sg_fd *sfp = container_of(work, struct sg_fd, ew.work);
+ struct sg_device *sdp = sfp->parentdp;
+
+ /* Cleanup any responses which were never read(). */
+ while (sfp->headrp)
+ sg_finish_rem_req(sfp->headrp);
+
+ if (sfp->reserve.bufflen > 0) {
+ SCSI_LOG_TIMEOUT(6, sg_printk(KERN_INFO, sdp,
+ "sg_remove_sfp: bufflen=%d, k_use_sg=%d\n",
+ (int) sfp->reserve.bufflen,
+ (int) sfp->reserve.k_use_sg));
+ sg_remove_scat(sfp, &sfp->reserve);
+ }
+
+ SCSI_LOG_TIMEOUT(6, sg_printk(KERN_INFO, sdp,
+ "sg_remove_sfp: sfp=0x%p\n", sfp));
+ kfree(sfp);
+
+ scsi_device_put(sdp->device);
+ kref_put(&sdp->d_ref, sg_device_destroy);
+ module_put(THIS_MODULE);
+}
+
+static void
+sg_remove_sfp(struct kref *kref)
+{
+ struct sg_fd *sfp = container_of(kref, struct sg_fd, f_ref);
+ struct sg_device *sdp = sfp->parentdp;
+ unsigned long iflags;
+
+ write_lock_irqsave(&sdp->sfd_lock, iflags);
+ list_del(&sfp->sfd_siblings);
+ write_unlock_irqrestore(&sdp->sfd_lock, iflags);
+
+ INIT_WORK(&sfp->ew.work, sg_remove_sfp_usercontext);
+ schedule_work(&sfp->ew.work);
+}
+
+static int
+sg_res_in_use(Sg_fd * sfp)
+{
+ const Sg_request *srp;
+ unsigned long iflags;
+
+ read_lock_irqsave(&sfp->rq_list_lock, iflags);
+ for (srp = sfp->headrp; srp; srp = srp->nextrp)
+ if (srp->res_used)
+ break;
+ read_unlock_irqrestore(&sfp->rq_list_lock, iflags);
+ return srp ? 1 : 0;
+}
+
+#ifdef CONFIG_SCSI_PROC_FS
+static int
+sg_idr_max_id(int id, void *p, void *data)
+{
+ int *k = data;
+
+ if (*k < id)
+ *k = id;
+
+ return 0;
+}
+
+static int
+sg_last_dev(void)
+{
+ int k = -1;
+ unsigned long iflags;
+
+ read_lock_irqsave(&sg_index_lock, iflags);
+ idr_for_each(&sg_index_idr, sg_idr_max_id, &k);
+ read_unlock_irqrestore(&sg_index_lock, iflags);
+ return k + 1; /* origin 1 */
+}
+#endif
+
+/* must be called with sg_index_lock held */
+static Sg_device *sg_lookup_dev(int dev)
+{
+ return idr_find(&sg_index_idr, dev);
+}
+
+static Sg_device *
+sg_get_dev(int dev)
+{
+ struct sg_device *sdp;
+ unsigned long flags;
+
+ read_lock_irqsave(&sg_index_lock, flags);
+ sdp = sg_lookup_dev(dev);
+ if (!sdp)
+ sdp = ERR_PTR(-ENXIO);
+ else if (atomic_read(&sdp->detaching)) {
+ /* If sdp->detaching, then the refcount may already be 0, in
+ * which case it would be a bug to do kref_get().
+ */
+ sdp = ERR_PTR(-ENODEV);
+ } else
+ kref_get(&sdp->d_ref);
+ read_unlock_irqrestore(&sg_index_lock, flags);
+
+ return sdp;
+}
+
+#ifdef CONFIG_SCSI_PROC_FS
+
+static struct proc_dir_entry *sg_proc_sgp = NULL;
+
+static char sg_proc_sg_dirname[] = "scsi/sg";
+
+static int sg_proc_seq_show_int(struct seq_file *s, void *v);
+
+static int sg_proc_single_open_adio(struct inode *inode, struct file *file);
+static ssize_t sg_proc_write_adio(struct file *filp, const char __user *buffer,
+ size_t count, loff_t *off);
+static const struct file_operations adio_fops = {
+ .owner = THIS_MODULE,
+ .open = sg_proc_single_open_adio,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .write = sg_proc_write_adio,
+ .release = single_release,
+};
+
+static int sg_proc_single_open_dressz(struct inode *inode, struct file *file);
+static ssize_t sg_proc_write_dressz(struct file *filp,
+ const char __user *buffer, size_t count, loff_t *off);
+static const struct file_operations dressz_fops = {
+ .owner = THIS_MODULE,
+ .open = sg_proc_single_open_dressz,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .write = sg_proc_write_dressz,
+ .release = single_release,
+};
+
+static int sg_proc_seq_show_version(struct seq_file *s, void *v);
+static int sg_proc_single_open_version(struct inode *inode, struct file *file);
+static const struct file_operations version_fops = {
+ .owner = THIS_MODULE,
+ .open = sg_proc_single_open_version,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+
+static int sg_proc_seq_show_devhdr(struct seq_file *s, void *v);
+static int sg_proc_single_open_devhdr(struct inode *inode, struct file *file);
+static const struct file_operations devhdr_fops = {
+ .owner = THIS_MODULE,
+ .open = sg_proc_single_open_devhdr,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+
+static int sg_proc_seq_show_dev(struct seq_file *s, void *v);
+static int sg_proc_open_dev(struct inode *inode, struct file *file);
+static void * dev_seq_start(struct seq_file *s, loff_t *pos);
+static void * dev_seq_next(struct seq_file *s, void *v, loff_t *pos);
+static void dev_seq_stop(struct seq_file *s, void *v);
+static const struct file_operations dev_fops = {
+ .owner = THIS_MODULE,
+ .open = sg_proc_open_dev,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = seq_release,
+};
+static const struct seq_operations dev_seq_ops = {
+ .start = dev_seq_start,
+ .next = dev_seq_next,
+ .stop = dev_seq_stop,
+ .show = sg_proc_seq_show_dev,
+};
+
+static int sg_proc_seq_show_devstrs(struct seq_file *s, void *v);
+static int sg_proc_open_devstrs(struct inode *inode, struct file *file);
+static const struct file_operations devstrs_fops = {
+ .owner = THIS_MODULE,
+ .open = sg_proc_open_devstrs,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = seq_release,
+};
+static const struct seq_operations devstrs_seq_ops = {
+ .start = dev_seq_start,
+ .next = dev_seq_next,
+ .stop = dev_seq_stop,
+ .show = sg_proc_seq_show_devstrs,
+};
+
+static int sg_proc_seq_show_debug(struct seq_file *s, void *v);
+static int sg_proc_open_debug(struct inode *inode, struct file *file);
+static const struct file_operations debug_fops = {
+ .owner = THIS_MODULE,
+ .open = sg_proc_open_debug,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = seq_release,
+};
+static const struct seq_operations debug_seq_ops = {
+ .start = dev_seq_start,
+ .next = dev_seq_next,
+ .stop = dev_seq_stop,
+ .show = sg_proc_seq_show_debug,
+};
+
+
+struct sg_proc_leaf {
+ const char * name;
+ const struct file_operations * fops;
+};
+
+static const struct sg_proc_leaf sg_proc_leaf_arr[] = {
+ {"allow_dio", &adio_fops},
+ {"debug", &debug_fops},
+ {"def_reserved_size", &dressz_fops},
+ {"device_hdr", &devhdr_fops},
+ {"devices", &dev_fops},
+ {"device_strs", &devstrs_fops},
+ {"version", &version_fops}
+};
+
+static int
+sg_proc_init(void)
+{
+ int num_leaves = ARRAY_SIZE(sg_proc_leaf_arr);
+ int k;
+
+ sg_proc_sgp = proc_mkdir(sg_proc_sg_dirname, NULL);
+ if (!sg_proc_sgp)
+ return 1;
+ for (k = 0; k < num_leaves; ++k) {
+ const struct sg_proc_leaf *leaf = &sg_proc_leaf_arr[k];
+ umode_t mask = leaf->fops->write ? S_IRUGO | S_IWUSR : S_IRUGO;
+ proc_create(leaf->name, mask, sg_proc_sgp, leaf->fops);
+ }
+ return 0;
+}
+
+static void
+sg_proc_cleanup(void)
+{
+ int k;
+ int num_leaves = ARRAY_SIZE(sg_proc_leaf_arr);
+
+ if (!sg_proc_sgp)
+ return;
+ for (k = 0; k < num_leaves; ++k)
+ remove_proc_entry(sg_proc_leaf_arr[k].name, sg_proc_sgp);
+ remove_proc_entry(sg_proc_sg_dirname, NULL);
+}
+
+
+static int sg_proc_seq_show_int(struct seq_file *s, void *v)
+{
+ seq_printf(s, "%d\n", *((int *)s->private));
+ return 0;
+}
+
+static int sg_proc_single_open_adio(struct inode *inode, struct file *file)
+{
+ return single_open(file, sg_proc_seq_show_int, &sg_allow_dio);
+}
+
+static ssize_t
+sg_proc_write_adio(struct file *filp, const char __user *buffer,
+ size_t count, loff_t *off)
+{
+ int err;
+ unsigned long num;
+
+ if (!capable(CAP_SYS_ADMIN) || !capable(CAP_SYS_RAWIO))
+ return -EACCES;
+ err = kstrtoul_from_user(buffer, count, 0, &num);
+ if (err)
+ return err;
+ sg_allow_dio = num ? 1 : 0;
+ return count;
+}
+
+static int sg_proc_single_open_dressz(struct inode *inode, struct file *file)
+{
+ return single_open(file, sg_proc_seq_show_int, &sg_big_buff);
+}
+
+static ssize_t
+sg_proc_write_dressz(struct file *filp, const char __user *buffer,
+ size_t count, loff_t *off)
+{
+ int err;
+ unsigned long k = ULONG_MAX;
+
+ if (!capable(CAP_SYS_ADMIN) || !capable(CAP_SYS_RAWIO))
+ return -EACCES;
+
+ err = kstrtoul_from_user(buffer, count, 0, &k);
+ if (err)
+ return err;
+ if (k <= 1048576) { /* limit "big buff" to 1 MB */
+ sg_big_buff = k;
+ return count;
+ }
+ return -ERANGE;
+}
+
+static int sg_proc_seq_show_version(struct seq_file *s, void *v)
+{
+ seq_printf(s, "%d\t%s [%s]\n", sg_version_num, SG_VERSION_STR,
+ sg_version_date);
+ return 0;
+}
+
+static int sg_proc_single_open_version(struct inode *inode, struct file *file)
+{
+ return single_open(file, sg_proc_seq_show_version, NULL);
+}
+
+static int sg_proc_seq_show_devhdr(struct seq_file *s, void *v)
+{
+ seq_puts(s, "host\tchan\tid\tlun\ttype\topens\tqdepth\tbusy\tonline\n");
+ return 0;
+}
+
+static int sg_proc_single_open_devhdr(struct inode *inode, struct file *file)
+{
+ return single_open(file, sg_proc_seq_show_devhdr, NULL);
+}
+
+struct sg_proc_deviter {
+ loff_t index;
+ size_t max;
+};
+
+static void * dev_seq_start(struct seq_file *s, loff_t *pos)
+{
+ struct sg_proc_deviter * it = kmalloc(sizeof(*it), GFP_KERNEL);
+
+ s->private = it;
+ if (! it)
+ return NULL;
+
+ it->index = *pos;
+ it->max = sg_last_dev();
+ if (it->index >= it->max)
+ return NULL;
+ return it;
+}
+
+static void * dev_seq_next(struct seq_file *s, void *v, loff_t *pos)
+{
+ struct sg_proc_deviter * it = s->private;
+
+ *pos = ++it->index;
+ return (it->index < it->max) ? it : NULL;
+}
+
+static void dev_seq_stop(struct seq_file *s, void *v)
+{
+ kfree(s->private);
+}
+
+static int sg_proc_open_dev(struct inode *inode, struct file *file)
+{
+ return seq_open(file, &dev_seq_ops);
+}
+
+static int sg_proc_seq_show_dev(struct seq_file *s, void *v)
+{
+ struct sg_proc_deviter * it = (struct sg_proc_deviter *) v;
+ Sg_device *sdp;
+ struct scsi_device *scsidp;
+ unsigned long iflags;
+
+ read_lock_irqsave(&sg_index_lock, iflags);
+ sdp = it ? sg_lookup_dev(it->index) : NULL;
+ if ((NULL == sdp) || (NULL == sdp->device) ||
+ (atomic_read(&sdp->detaching)))
+ seq_puts(s, "-1\t-1\t-1\t-1\t-1\t-1\t-1\t-1\t-1\n");
+ else {
+ scsidp = sdp->device;
+ seq_printf(s, "%d\t%d\t%d\t%llu\t%d\t%d\t%d\t%d\t%d\n",
+ scsidp->host->host_no, scsidp->channel,
+ scsidp->id, scsidp->lun, (int) scsidp->type,
+ 1,
+ (int) scsidp->queue_depth,
+ (int) atomic_read(&scsidp->device_busy),
+ (int) scsi_device_online(scsidp));
+ }
+ read_unlock_irqrestore(&sg_index_lock, iflags);
+ return 0;
+}
+
+static int sg_proc_open_devstrs(struct inode *inode, struct file *file)
+{
+ return seq_open(file, &devstrs_seq_ops);
+}
+
+static int sg_proc_seq_show_devstrs(struct seq_file *s, void *v)
+{
+ struct sg_proc_deviter * it = (struct sg_proc_deviter *) v;
+ Sg_device *sdp;
+ struct scsi_device *scsidp;
+ unsigned long iflags;
+
+ read_lock_irqsave(&sg_index_lock, iflags);
+ sdp = it ? sg_lookup_dev(it->index) : NULL;
+ scsidp = sdp ? sdp->device : NULL;
+ if (sdp && scsidp && (!atomic_read(&sdp->detaching)))
+ seq_printf(s, "%8.8s\t%16.16s\t%4.4s\n",
+ scsidp->vendor, scsidp->model, scsidp->rev);
+ else
+ seq_puts(s, "<no active device>\n");
+ read_unlock_irqrestore(&sg_index_lock, iflags);
+ return 0;
+}
+
+/* must be called while holding sg_index_lock */
+static void sg_proc_debug_helper(struct seq_file *s, Sg_device * sdp)
+{
+ int k, m, new_interface, blen, usg;
+ Sg_request *srp;
+ Sg_fd *fp;
+ const sg_io_hdr_t *hp;
+ const char * cp;
+ unsigned int ms;
+
+ k = 0;
+ list_for_each_entry(fp, &sdp->sfds, sfd_siblings) {
+ k++;
+ read_lock(&fp->rq_list_lock); /* irqs already disabled */
+ seq_printf(s, " FD(%d): timeout=%dms bufflen=%d "
+ "(res)sgat=%d low_dma=%d\n", k,
+ jiffies_to_msecs(fp->timeout),
+ fp->reserve.bufflen,
+ (int) fp->reserve.k_use_sg,
+ (int) fp->low_dma);
+ seq_printf(s, " cmd_q=%d f_packid=%d k_orphan=%d closed=0\n",
+ (int) fp->cmd_q, (int) fp->force_packid,
+ (int) fp->keep_orphan);
+ for (m = 0, srp = fp->headrp;
+ srp != NULL;
+ ++m, srp = srp->nextrp) {
+ hp = &srp->header;
+ new_interface = (hp->interface_id == '\0') ? 0 : 1;
+ if (srp->res_used) {
+ if (new_interface &&
+ (SG_FLAG_MMAP_IO & hp->flags))
+ cp = " mmap>> ";
+ else
+ cp = " rb>> ";
+ } else {
+ if (SG_INFO_DIRECT_IO_MASK & hp->info)
+ cp = " dio>> ";
+ else
+ cp = " ";
+ }
+ seq_puts(s, cp);
+ blen = srp->data.bufflen;
+ usg = srp->data.k_use_sg;
+ seq_puts(s, srp->done ?
+ ((1 == srp->done) ? "rcv:" : "fin:")
+ : "act:");
+ seq_printf(s, " id=%d blen=%d",
+ srp->header.pack_id, blen);
+ if (srp->done)
+ seq_printf(s, " dur=%d", hp->duration);
+ else {
+ ms = jiffies_to_msecs(jiffies);
+ seq_printf(s, " t_o/elap=%d/%d",
+ (new_interface ? hp->timeout :
+ jiffies_to_msecs(fp->timeout)),
+ (ms > hp->duration ? ms - hp->duration : 0));
+ }
+ seq_printf(s, "ms sgat=%d op=0x%02x\n", usg,
+ (int) srp->data.cmd_opcode);
+ }
+ if (0 == m)
+ seq_puts(s, " No requests active\n");
+ read_unlock(&fp->rq_list_lock);
+ }
+}
+
+static int sg_proc_open_debug(struct inode *inode, struct file *file)
+{
+ return seq_open(file, &debug_seq_ops);
+}
+
+static int sg_proc_seq_show_debug(struct seq_file *s, void *v)
+{
+ struct sg_proc_deviter * it = (struct sg_proc_deviter *) v;
+ Sg_device *sdp;
+ unsigned long iflags;
+
+ if (it && (0 == it->index))
+ seq_printf(s, "max_active_device=%d def_reserved_size=%d\n",
+ (int)it->max, sg_big_buff);
+
+ read_lock_irqsave(&sg_index_lock, iflags);
+ sdp = it ? sg_lookup_dev(it->index) : NULL;
+ if (NULL == sdp)
+ goto skip;
+ read_lock(&sdp->sfd_lock);
+ if (!list_empty(&sdp->sfds)) {
+ seq_printf(s, " >>> device=%s ", sdp->disk->disk_name);
+ if (atomic_read(&sdp->detaching))
+ seq_puts(s, "detaching pending close ");
+ else if (sdp->device) {
+ struct scsi_device *scsidp = sdp->device;
+
+ seq_printf(s, "%d:%d:%d:%llu em=%d",
+ scsidp->host->host_no,
+ scsidp->channel, scsidp->id,
+ scsidp->lun,
+ scsidp->host->hostt->emulated);
+ }
+ seq_printf(s, " sg_tablesize=%d excl=%d open_cnt=%d\n",
+ sdp->sg_tablesize, sdp->exclude, sdp->open_cnt);
+ sg_proc_debug_helper(s, sdp);
+ }
+ read_unlock(&sdp->sfd_lock);
+skip:
+ read_unlock_irqrestore(&sg_index_lock, iflags);
+ return 0;
+}
+
+#endif /* CONFIG_SCSI_PROC_FS */
+
+module_init(init_sg);
+module_exit(exit_sg);
diff --git a/drivers/scsi/sgiwd93.c b/drivers/scsi/sgiwd93.c
new file mode 100644
index 000000000..6d215e2fb
--- /dev/null
+++ b/drivers/scsi/sgiwd93.c
@@ -0,0 +1,337 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 1996 David S. Miller (davem@davemloft.net)
+ * Copyright (C) 1999 Andrew R. Baker (andrewb@uab.edu)
+ * Copyright (C) 2001 Florian Lohoff (flo@rfc822.org)
+ * Copyright (C) 2003, 07 Ralf Baechle (ralf@linux-mips.org)
+ *
+ * (In all truth, Jed Schimmel wrote all this code.)
+ */
+
+#undef DEBUG
+
+#include <linux/delay.h>
+#include <linux/dma-mapping.h>
+#include <linux/gfp.h>
+#include <linux/interrupt.h>
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/types.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/spinlock.h>
+
+#include <asm/sgi/hpc3.h>
+#include <asm/sgi/ip22.h>
+#include <asm/sgi/wd.h>
+
+#include "scsi.h"
+#include "wd33c93.h"
+
+struct ip22_hostdata {
+ struct WD33C93_hostdata wh;
+ dma_addr_t dma;
+ void *cpu;
+ struct device *dev;
+};
+
+#define host_to_hostdata(host) ((struct ip22_hostdata *)((host)->hostdata))
+
+struct hpc_chunk {
+ struct hpc_dma_desc desc;
+ u32 _padding; /* align to quadword boundary */
+};
+
+/* space for hpc dma descriptors */
+#define HPC_DMA_SIZE PAGE_SIZE
+
+#define DMA_DIR(d) ((d == DATA_OUT_DIR) ? DMA_TO_DEVICE : DMA_FROM_DEVICE)
+
+static irqreturn_t sgiwd93_intr(int irq, void *dev_id)
+{
+ struct Scsi_Host * host = dev_id;
+ unsigned long flags;
+
+ spin_lock_irqsave(host->host_lock, flags);
+ wd33c93_intr(host);
+ spin_unlock_irqrestore(host->host_lock, flags);
+
+ return IRQ_HANDLED;
+}
+
+static inline
+void fill_hpc_entries(struct ip22_hostdata *hd, struct scsi_cmnd *cmd, int din)
+{
+ unsigned long len = cmd->SCp.this_residual;
+ void *addr = cmd->SCp.ptr;
+ dma_addr_t physaddr;
+ unsigned long count;
+ struct hpc_chunk *hcp;
+
+ physaddr = dma_map_single(hd->dev, addr, len, DMA_DIR(din));
+ cmd->SCp.dma_handle = physaddr;
+ hcp = hd->cpu;
+
+ while (len) {
+ /*
+ * even cntinfo could be up to 16383, without
+ * magic only 8192 works correctly
+ */
+ count = len > 8192 ? 8192 : len;
+ hcp->desc.pbuf = physaddr;
+ hcp->desc.cntinfo = count;
+ hcp++;
+ len -= count;
+ physaddr += count;
+ }
+
+ /*
+ * To make sure, if we trip an HPC bug, that we transfer every single
+ * byte, we tag on an extra zero length dma descriptor at the end of
+ * the chain.
+ */
+ hcp->desc.pbuf = 0;
+ hcp->desc.cntinfo = HPCDMA_EOX;
+ dma_cache_sync(hd->dev, hd->cpu,
+ (unsigned long)(hcp + 1) - (unsigned long)hd->cpu,
+ DMA_TO_DEVICE);
+}
+
+static int dma_setup(struct scsi_cmnd *cmd, int datainp)
+{
+ struct ip22_hostdata *hdata = host_to_hostdata(cmd->device->host);
+ struct hpc3_scsiregs *hregs =
+ (struct hpc3_scsiregs *) cmd->device->host->base;
+
+ pr_debug("dma_setup: datainp<%d> hcp<%p> ", datainp, hdata->cpu);
+
+ hdata->wh.dma_dir = datainp;
+
+ /*
+ * wd33c93 shouldn't pass us bogus dma_setups, but it does:-( The
+ * other wd33c93 drivers deal with it the same way (which isn't that
+ * obvious). IMHO a better fix would be, not to do these dma setups
+ * in the first place.
+ */
+ if (cmd->SCp.ptr == NULL || cmd->SCp.this_residual == 0)
+ return 1;
+
+ fill_hpc_entries(hdata, cmd, datainp);
+
+ pr_debug(" HPCGO\n");
+
+ /* Start up the HPC. */
+ hregs->ndptr = hdata->dma;
+ if (datainp)
+ hregs->ctrl = HPC3_SCTRL_ACTIVE;
+ else
+ hregs->ctrl = HPC3_SCTRL_ACTIVE | HPC3_SCTRL_DIR;
+
+ return 0;
+}
+
+static void dma_stop(struct Scsi_Host *instance, struct scsi_cmnd *SCpnt,
+ int status)
+{
+ struct ip22_hostdata *hdata = host_to_hostdata(instance);
+ struct hpc3_scsiregs *hregs;
+
+ if (!SCpnt)
+ return;
+
+ if (SCpnt->SCp.ptr == NULL || SCpnt->SCp.this_residual == 0)
+ return;
+
+ hregs = (struct hpc3_scsiregs *) SCpnt->device->host->base;
+
+ pr_debug("dma_stop: status<%d> ", status);
+
+ /* First stop the HPC and flush it's FIFO. */
+ if (hdata->wh.dma_dir) {
+ hregs->ctrl |= HPC3_SCTRL_FLUSH;
+ while (hregs->ctrl & HPC3_SCTRL_ACTIVE)
+ barrier();
+ }
+ hregs->ctrl = 0;
+ dma_unmap_single(hdata->dev, SCpnt->SCp.dma_handle,
+ SCpnt->SCp.this_residual,
+ DMA_DIR(hdata->wh.dma_dir));
+
+ pr_debug("\n");
+}
+
+void sgiwd93_reset(unsigned long base)
+{
+ struct hpc3_scsiregs *hregs = (struct hpc3_scsiregs *) base;
+
+ hregs->ctrl = HPC3_SCTRL_CRESET;
+ udelay(50);
+ hregs->ctrl = 0;
+}
+EXPORT_SYMBOL_GPL(sgiwd93_reset);
+
+static inline void init_hpc_chain(struct ip22_hostdata *hdata)
+{
+ struct hpc_chunk *hcp = (struct hpc_chunk *)hdata->cpu;
+ dma_addr_t dma = hdata->dma;
+ unsigned long start, end;
+
+ start = (unsigned long) hcp;
+ end = start + HPC_DMA_SIZE;
+ while (start < end) {
+ hcp->desc.pnext = (u32) (dma + sizeof(struct hpc_chunk));
+ hcp->desc.cntinfo = HPCDMA_EOX;
+ hcp++;
+ dma += sizeof(struct hpc_chunk);
+ start += sizeof(struct hpc_chunk);
+ };
+ hcp--;
+ hcp->desc.pnext = hdata->dma;
+}
+
+static int sgiwd93_bus_reset(struct scsi_cmnd *cmd)
+{
+ /* FIXME perform bus-specific reset */
+
+ /* FIXME 2: kill this function, and let midlayer fallback
+ to the same result, calling wd33c93_host_reset() */
+
+ spin_lock_irq(cmd->device->host->host_lock);
+ wd33c93_host_reset(cmd);
+ spin_unlock_irq(cmd->device->host->host_lock);
+
+ return SUCCESS;
+}
+
+/*
+ * Kludge alert - the SCSI code calls the abort and reset method with int
+ * arguments not with pointers. So this is going to blow up beautyfully
+ * on 64-bit systems with memory outside the compat address spaces.
+ */
+static struct scsi_host_template sgiwd93_template = {
+ .module = THIS_MODULE,
+ .proc_name = "SGIWD93",
+ .name = "SGI WD93",
+ .queuecommand = wd33c93_queuecommand,
+ .eh_abort_handler = wd33c93_abort,
+ .eh_bus_reset_handler = sgiwd93_bus_reset,
+ .eh_host_reset_handler = wd33c93_host_reset,
+ .can_queue = 16,
+ .this_id = 7,
+ .sg_tablesize = SG_ALL,
+ .cmd_per_lun = 8,
+ .use_clustering = DISABLE_CLUSTERING,
+};
+
+static int sgiwd93_probe(struct platform_device *pdev)
+{
+ struct sgiwd93_platform_data *pd = pdev->dev.platform_data;
+ unsigned char *wdregs = pd->wdregs;
+ struct hpc3_scsiregs *hregs = pd->hregs;
+ struct ip22_hostdata *hdata;
+ struct Scsi_Host *host;
+ wd33c93_regs regs;
+ unsigned int unit = pd->unit;
+ unsigned int irq = pd->irq;
+ int err;
+
+ host = scsi_host_alloc(&sgiwd93_template, sizeof(struct ip22_hostdata));
+ if (!host) {
+ err = -ENOMEM;
+ goto out;
+ }
+
+ host->base = (unsigned long) hregs;
+ host->irq = irq;
+
+ hdata = host_to_hostdata(host);
+ hdata->dev = &pdev->dev;
+ hdata->cpu = dma_alloc_noncoherent(&pdev->dev, HPC_DMA_SIZE,
+ &hdata->dma, GFP_KERNEL);
+ if (!hdata->cpu) {
+ printk(KERN_WARNING "sgiwd93: Could not allocate memory for "
+ "host %d buffer.\n", unit);
+ err = -ENOMEM;
+ goto out_put;
+ }
+
+ init_hpc_chain(hdata);
+
+ regs.SASR = wdregs + 3;
+ regs.SCMD = wdregs + 7;
+
+ hdata->wh.no_sync = 0;
+ hdata->wh.fast = 1;
+ hdata->wh.dma_mode = CTRL_BURST;
+
+ wd33c93_init(host, regs, dma_setup, dma_stop, WD33C93_FS_MHZ(20));
+
+ err = request_irq(irq, sgiwd93_intr, 0, "SGI WD93", host);
+ if (err) {
+ printk(KERN_WARNING "sgiwd93: Could not register irq %d "
+ "for host %d.\n", irq, unit);
+ goto out_free;
+ }
+
+ platform_set_drvdata(pdev, host);
+
+ err = scsi_add_host(host, NULL);
+ if (err)
+ goto out_irq;
+
+ scsi_scan_host(host);
+
+ return 0;
+
+out_irq:
+ free_irq(irq, host);
+out_free:
+ dma_free_noncoherent(&pdev->dev, HPC_DMA_SIZE, hdata->cpu, hdata->dma);
+out_put:
+ scsi_host_put(host);
+out:
+
+ return err;
+}
+
+static int __exit sgiwd93_remove(struct platform_device *pdev)
+{
+ struct Scsi_Host *host = platform_get_drvdata(pdev);
+ struct ip22_hostdata *hdata = (struct ip22_hostdata *) host->hostdata;
+ struct sgiwd93_platform_data *pd = pdev->dev.platform_data;
+
+ scsi_remove_host(host);
+ free_irq(pd->irq, host);
+ dma_free_noncoherent(&pdev->dev, HPC_DMA_SIZE, hdata->cpu, hdata->dma);
+ scsi_host_put(host);
+ return 0;
+}
+
+static struct platform_driver sgiwd93_driver = {
+ .probe = sgiwd93_probe,
+ .remove = sgiwd93_remove,
+ .driver = {
+ .name = "sgiwd93",
+ }
+};
+
+static int __init sgiwd93_module_init(void)
+{
+ return platform_driver_register(&sgiwd93_driver);
+}
+
+static void __exit sgiwd93_module_exit(void)
+{
+ return platform_driver_unregister(&sgiwd93_driver);
+}
+
+module_init(sgiwd93_module_init);
+module_exit(sgiwd93_module_exit);
+
+MODULE_DESCRIPTION("SGI WD33C93 driver");
+MODULE_AUTHOR("Ralf Baechle <ralf@linux-mips.org>");
+MODULE_LICENSE("GPL");
+MODULE_ALIAS("platform:sgiwd93");
diff --git a/drivers/scsi/sim710.c b/drivers/scsi/sim710.c
new file mode 100644
index 000000000..3b3b56f4a
--- /dev/null
+++ b/drivers/scsi/sim710.c
@@ -0,0 +1,256 @@
+/*
+ * sim710.c - Copyright (C) 1999 Richard Hirst <richard@sleepie.demon.co.uk>
+ *
+ *----------------------------------------------------------------------------
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ *----------------------------------------------------------------------------
+ *
+ * MCA card detection code by Trent McNair. (now deleted)
+ * Fixes to not explicitly nul bss data from Xavier Bestel.
+ * Some multiboard fixes from Rolf Eike Beer.
+ * Auto probing of EISA config space from Trevor Hemsley.
+ *
+ * Rewritten to use 53c700.c by James.Bottomley@SteelEye.com
+ *
+ */
+
+#include <linux/module.h>
+#include <linux/slab.h>
+
+#include <linux/blkdev.h>
+#include <linux/device.h>
+#include <linux/init.h>
+#include <linux/eisa.h>
+#include <linux/interrupt.h>
+#include <scsi/scsi_host.h>
+#include <scsi/scsi_device.h>
+#include <scsi/scsi_transport.h>
+#include <scsi/scsi_transport_spi.h>
+
+#include "53c700.h"
+
+
+/* Must be enough for EISA */
+#define MAX_SLOTS 8
+static __u8 __initdata id_array[MAX_SLOTS] = { [0 ... MAX_SLOTS-1] = 7 };
+
+static char *sim710; /* command line passed by insmod */
+
+MODULE_AUTHOR("Richard Hirst");
+MODULE_DESCRIPTION("Simple NCR53C710 driver");
+MODULE_LICENSE("GPL");
+
+module_param(sim710, charp, 0);
+
+#ifdef MODULE
+#define ARG_SEP ' '
+#else
+#define ARG_SEP ','
+#endif
+
+static __init int
+param_setup(char *str)
+{
+ char *pos = str, *next;
+ int slot = -1;
+
+ while(pos != NULL && (next = strchr(pos, ':')) != NULL) {
+ int val = (int)simple_strtoul(++next, NULL, 0);
+
+ if(!strncmp(pos, "slot:", 5))
+ slot = val;
+ else if(!strncmp(pos, "id:", 3)) {
+ if(slot == -1) {
+ printk(KERN_WARNING "sim710: Must specify slot for id parameter\n");
+ } else if(slot >= MAX_SLOTS) {
+ printk(KERN_WARNING "sim710: Illegal slot %d for id %d\n", slot, val);
+ } else {
+ id_array[slot] = val;
+ }
+ }
+ if((pos = strchr(pos, ARG_SEP)) != NULL)
+ pos++;
+ }
+ return 1;
+}
+__setup("sim710=", param_setup);
+
+static struct scsi_host_template sim710_driver_template = {
+ .name = "LSI (Symbios) 710 EISA",
+ .proc_name = "sim710",
+ .this_id = 7,
+ .module = THIS_MODULE,
+};
+
+static int sim710_probe_common(struct device *dev, unsigned long base_addr,
+ int irq, int clock, int differential,
+ int scsi_id)
+{
+ struct Scsi_Host * host = NULL;
+ struct NCR_700_Host_Parameters *hostdata =
+ kzalloc(sizeof(struct NCR_700_Host_Parameters), GFP_KERNEL);
+
+ printk(KERN_NOTICE "sim710: %s\n", dev_name(dev));
+ printk(KERN_NOTICE "sim710: irq = %d, clock = %d, base = 0x%lx, scsi_id = %d\n",
+ irq, clock, base_addr, scsi_id);
+
+ if(hostdata == NULL) {
+ printk(KERN_ERR "sim710: Failed to allocate host data\n");
+ goto out;
+ }
+
+ if(request_region(base_addr, 64, "sim710") == NULL) {
+ printk(KERN_ERR "sim710: Failed to reserve IO region 0x%lx\n",
+ base_addr);
+ goto out_free;
+ }
+
+ /* Fill in the three required pieces of hostdata */
+ hostdata->base = ioport_map(base_addr, 64);
+ hostdata->differential = differential;
+ hostdata->clock = clock;
+ hostdata->chip710 = 1;
+ hostdata->burst_length = 8;
+
+ /* and register the chip */
+ if((host = NCR_700_detect(&sim710_driver_template, hostdata, dev))
+ == NULL) {
+ printk(KERN_ERR "sim710: No host detected; card configuration problem?\n");
+ goto out_release;
+ }
+ host->this_id = scsi_id;
+ host->base = base_addr;
+ host->irq = irq;
+ if (request_irq(irq, NCR_700_intr, IRQF_SHARED, "sim710", host)) {
+ printk(KERN_ERR "sim710: request_irq failed\n");
+ goto out_put_host;
+ }
+
+ dev_set_drvdata(dev, host);
+ scsi_scan_host(host);
+
+ return 0;
+
+ out_put_host:
+ scsi_host_put(host);
+ out_release:
+ release_region(base_addr, 64);
+ out_free:
+ kfree(hostdata);
+ out:
+ return -ENODEV;
+}
+
+static int sim710_device_remove(struct device *dev)
+{
+ struct Scsi_Host *host = dev_get_drvdata(dev);
+ struct NCR_700_Host_Parameters *hostdata =
+ (struct NCR_700_Host_Parameters *)host->hostdata[0];
+
+ scsi_remove_host(host);
+ NCR_700_release(host);
+ kfree(hostdata);
+ free_irq(host->irq, host);
+ release_region(host->base, 64);
+ return 0;
+}
+
+#ifdef CONFIG_EISA
+static struct eisa_device_id sim710_eisa_ids[] = {
+ { "CPQ4410" },
+ { "CPQ4411" },
+ { "HWP0C80" },
+ { "" }
+};
+MODULE_DEVICE_TABLE(eisa, sim710_eisa_ids);
+
+static __init int
+sim710_eisa_probe(struct device *dev)
+{
+ struct eisa_device *edev = to_eisa_device(dev);
+ unsigned long io_addr = edev->base_addr;
+ char eisa_cpq_irqs[] = { 11, 14, 15, 10, 9, 0 };
+ char eisa_hwp_irqs[] = { 3, 4, 5, 7, 12, 10, 11, 0};
+ char *eisa_irqs;
+ unsigned char irq_index;
+ unsigned char irq, differential = 0, scsi_id = 7;
+
+ if(strcmp(edev->id.sig, "HWP0C80") == 0) {
+ __u8 val;
+ eisa_irqs = eisa_hwp_irqs;
+ irq_index = (inb(io_addr + 0xc85) & 0x7) - 1;
+
+ val = inb(io_addr + 0x4);
+ scsi_id = ffs(val) - 1;
+
+ if(scsi_id > 7 || (val & ~(1<<scsi_id)) != 0) {
+ printk(KERN_ERR "sim710.c, EISA card %s has incorrect scsi_id, setting to 7\n", dev_name(dev));
+ scsi_id = 7;
+ }
+ } else {
+ eisa_irqs = eisa_cpq_irqs;
+ irq_index = inb(io_addr + 0xc88) & 0x07;
+ }
+
+ if(irq_index >= strlen(eisa_irqs)) {
+ printk("sim710.c: irq nasty\n");
+ return -ENODEV;
+ }
+
+ irq = eisa_irqs[irq_index];
+
+ return sim710_probe_common(dev, io_addr, irq, 50,
+ differential, scsi_id);
+}
+
+static struct eisa_driver sim710_eisa_driver = {
+ .id_table = sim710_eisa_ids,
+ .driver = {
+ .name = "sim710",
+ .probe = sim710_eisa_probe,
+ .remove = sim710_device_remove,
+ },
+};
+#endif /* CONFIG_EISA */
+
+static int __init sim710_init(void)
+{
+ int err = -ENODEV;
+
+#ifdef MODULE
+ if (sim710)
+ param_setup(sim710);
+#endif
+
+#ifdef CONFIG_EISA
+ err = eisa_driver_register(&sim710_eisa_driver);
+#endif
+ /* FIXME: what we'd really like to return here is -ENODEV if
+ * no devices have actually been found. Instead, the err
+ * above actually only reports problems with kobject_register,
+ * so for the moment return success */
+
+ return 0;
+}
+
+static void __exit sim710_exit(void)
+{
+#ifdef CONFIG_EISA
+ eisa_driver_unregister(&sim710_eisa_driver);
+#endif
+}
+
+module_init(sim710_init);
+module_exit(sim710_exit);
diff --git a/drivers/scsi/sni_53c710.c b/drivers/scsi/sni_53c710.c
new file mode 100644
index 000000000..762780721
--- /dev/null
+++ b/drivers/scsi/sni_53c710.c
@@ -0,0 +1,154 @@
+/* -*- mode: c; c-basic-offset: 8 -*- */
+
+/* SNI RM driver
+ *
+ * Copyright (C) 2001 by James.Bottomley@HansenPartnership.com
+**-----------------------------------------------------------------------------
+**
+** This program is free software; you can redistribute it and/or modify
+** it under the terms of the GNU General Public License as published by
+** the Free Software Foundation; either version 2 of the License, or
+** (at your option) any later version.
+**
+** This program is distributed in the hope that it will be useful,
+** but WITHOUT ANY WARRANTY; without even the implied warranty of
+** MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+** GNU General Public License for more details.
+**
+** You should have received a copy of the GNU General Public License
+** along with this program; if not, write to the Free Software
+** Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+**
+**-----------------------------------------------------------------------------
+ */
+
+/*
+ * Based on lasi700.c
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/types.h>
+#include <linux/slab.h>
+#include <linux/stat.h>
+#include <linux/mm.h>
+#include <linux/blkdev.h>
+#include <linux/sched.h>
+#include <linux/ioport.h>
+#include <linux/dma-mapping.h>
+#include <linux/platform_device.h>
+
+#include <asm/page.h>
+#include <asm/pgtable.h>
+#include <asm/irq.h>
+#include <asm/delay.h>
+
+#include <scsi/scsi_host.h>
+#include <scsi/scsi_device.h>
+#include <scsi/scsi_transport.h>
+#include <scsi/scsi_transport_spi.h>
+
+#include "53c700.h"
+
+MODULE_AUTHOR("Thomas Bogendörfer");
+MODULE_DESCRIPTION("SNI RM 53c710 SCSI Driver");
+MODULE_LICENSE("GPL");
+MODULE_ALIAS("platform:snirm_53c710");
+
+#define SNIRM710_CLOCK 32
+
+static struct scsi_host_template snirm710_template = {
+ .name = "SNI RM SCSI 53c710",
+ .proc_name = "snirm_53c710",
+ .this_id = 7,
+ .module = THIS_MODULE,
+};
+
+static int snirm710_probe(struct platform_device *dev)
+{
+ unsigned long base;
+ struct NCR_700_Host_Parameters *hostdata;
+ struct Scsi_Host *host;
+ struct resource *res;
+
+ res = platform_get_resource(dev, IORESOURCE_MEM, 0);
+ if (!res)
+ return -ENODEV;
+
+ base = res->start;
+ hostdata = kzalloc(sizeof(*hostdata), GFP_KERNEL);
+ if (!hostdata) {
+ dev_printk(KERN_ERR, dev, "Failed to allocate host data\n");
+ return -ENOMEM;
+ }
+
+ hostdata->dev = &dev->dev;
+ dma_set_mask(&dev->dev, DMA_BIT_MASK(32));
+ hostdata->base = ioremap_nocache(base, 0x100);
+ hostdata->differential = 0;
+
+ hostdata->clock = SNIRM710_CLOCK;
+ hostdata->force_le_on_be = 1;
+ hostdata->chip710 = 1;
+ hostdata->burst_length = 4;
+
+ host = NCR_700_detect(&snirm710_template, hostdata, &dev->dev);
+ if (!host)
+ goto out_kfree;
+ host->this_id = 7;
+ host->base = base;
+ host->irq = platform_get_irq(dev, 0);
+ if(request_irq(host->irq, NCR_700_intr, IRQF_SHARED, "snirm710", host)) {
+ printk(KERN_ERR "snirm710: request_irq failed!\n");
+ goto out_put_host;
+ }
+
+ dev_set_drvdata(&dev->dev, host);
+ scsi_scan_host(host);
+
+ return 0;
+
+ out_put_host:
+ scsi_host_put(host);
+ out_kfree:
+ iounmap(hostdata->base);
+ kfree(hostdata);
+ return -ENODEV;
+}
+
+static int __exit snirm710_driver_remove(struct platform_device *dev)
+{
+ struct Scsi_Host *host = dev_get_drvdata(&dev->dev);
+ struct NCR_700_Host_Parameters *hostdata =
+ (struct NCR_700_Host_Parameters *)host->hostdata[0];
+
+ scsi_remove_host(host);
+ NCR_700_release(host);
+ free_irq(host->irq, host);
+ iounmap(hostdata->base);
+ kfree(hostdata);
+
+ return 0;
+}
+
+static struct platform_driver snirm710_driver = {
+ .probe = snirm710_probe,
+ .remove = snirm710_driver_remove,
+ .driver = {
+ .name = "snirm_53c710",
+ },
+};
+
+static int __init snirm710_init(void)
+{
+ return platform_driver_register(&snirm710_driver);
+}
+
+static void __exit snirm710_exit(void)
+{
+ platform_driver_unregister(&snirm710_driver);
+}
+
+module_init(snirm710_init);
+module_exit(snirm710_exit);
diff --git a/drivers/scsi/sr.c b/drivers/scsi/sr.c
new file mode 100644
index 000000000..8bd54a64e
--- /dev/null
+++ b/drivers/scsi/sr.c
@@ -0,0 +1,1018 @@
+/*
+ * sr.c Copyright (C) 1992 David Giller
+ * Copyright (C) 1993, 1994, 1995, 1999 Eric Youngdale
+ *
+ * adapted from:
+ * sd.c Copyright (C) 1992 Drew Eckhardt
+ * Linux scsi disk driver by
+ * Drew Eckhardt <drew@colorado.edu>
+ *
+ * Modified by Eric Youngdale ericy@andante.org to
+ * add scatter-gather, multiple outstanding request, and other
+ * enhancements.
+ *
+ * Modified by Eric Youngdale eric@andante.org to support loadable
+ * low-level scsi drivers.
+ *
+ * Modified by Thomas Quinot thomas@melchior.cuivre.fdn.fr to
+ * provide auto-eject.
+ *
+ * Modified by Gerd Knorr <kraxel@cs.tu-berlin.de> to support the
+ * generic cdrom interface
+ *
+ * Modified by Jens Axboe <axboe@suse.de> - Uniform sr_packet()
+ * interface, capabilities probe additions, ioctl cleanups, etc.
+ *
+ * Modified by Richard Gooch <rgooch@atnf.csiro.au> to support devfs
+ *
+ * Modified by Jens Axboe <axboe@suse.de> - support DVD-RAM
+ * transparently and lose the GHOST hack
+ *
+ * Modified by Arnaldo Carvalho de Melo <acme@conectiva.com.br>
+ * check resource allocation in sr_init and some cleanups
+ */
+
+#include <linux/module.h>
+#include <linux/fs.h>
+#include <linux/kernel.h>
+#include <linux/mm.h>
+#include <linux/bio.h>
+#include <linux/string.h>
+#include <linux/errno.h>
+#include <linux/cdrom.h>
+#include <linux/interrupt.h>
+#include <linux/init.h>
+#include <linux/blkdev.h>
+#include <linux/mutex.h>
+#include <linux/slab.h>
+#include <linux/pm_runtime.h>
+#include <asm/uaccess.h>
+
+#include <scsi/scsi.h>
+#include <scsi/scsi_dbg.h>
+#include <scsi/scsi_device.h>
+#include <scsi/scsi_driver.h>
+#include <scsi/scsi_cmnd.h>
+#include <scsi/scsi_eh.h>
+#include <scsi/scsi_host.h>
+#include <scsi/scsi_ioctl.h> /* For the door lock/unlock commands */
+
+#include "scsi_logging.h"
+#include "sr.h"
+
+
+MODULE_DESCRIPTION("SCSI cdrom (sr) driver");
+MODULE_LICENSE("GPL");
+MODULE_ALIAS_BLOCKDEV_MAJOR(SCSI_CDROM_MAJOR);
+MODULE_ALIAS_SCSI_DEVICE(TYPE_ROM);
+MODULE_ALIAS_SCSI_DEVICE(TYPE_WORM);
+
+#define SR_DISKS 256
+
+#define SR_CAPABILITIES \
+ (CDC_CLOSE_TRAY|CDC_OPEN_TRAY|CDC_LOCK|CDC_SELECT_SPEED| \
+ CDC_SELECT_DISC|CDC_MULTI_SESSION|CDC_MCN|CDC_MEDIA_CHANGED| \
+ CDC_PLAY_AUDIO|CDC_RESET|CDC_DRIVE_STATUS| \
+ CDC_CD_R|CDC_CD_RW|CDC_DVD|CDC_DVD_R|CDC_DVD_RAM|CDC_GENERIC_PACKET| \
+ CDC_MRW|CDC_MRW_W|CDC_RAM)
+
+static DEFINE_MUTEX(sr_mutex);
+static int sr_probe(struct device *);
+static int sr_remove(struct device *);
+static int sr_init_command(struct scsi_cmnd *SCpnt);
+static int sr_done(struct scsi_cmnd *);
+static int sr_runtime_suspend(struct device *dev);
+
+static struct dev_pm_ops sr_pm_ops = {
+ .runtime_suspend = sr_runtime_suspend,
+};
+
+static struct scsi_driver sr_template = {
+ .gendrv = {
+ .name = "sr",
+ .owner = THIS_MODULE,
+ .probe = sr_probe,
+ .remove = sr_remove,
+ .pm = &sr_pm_ops,
+ },
+ .init_command = sr_init_command,
+ .done = sr_done,
+};
+
+static unsigned long sr_index_bits[SR_DISKS / BITS_PER_LONG];
+static DEFINE_SPINLOCK(sr_index_lock);
+
+/* This semaphore is used to mediate the 0->1 reference get in the
+ * face of object destruction (i.e. we can't allow a get on an
+ * object after last put) */
+static DEFINE_MUTEX(sr_ref_mutex);
+
+static int sr_open(struct cdrom_device_info *, int);
+static void sr_release(struct cdrom_device_info *);
+
+static void get_sectorsize(struct scsi_cd *);
+static void get_capabilities(struct scsi_cd *);
+
+static unsigned int sr_check_events(struct cdrom_device_info *cdi,
+ unsigned int clearing, int slot);
+static int sr_packet(struct cdrom_device_info *, struct packet_command *);
+
+static struct cdrom_device_ops sr_dops = {
+ .open = sr_open,
+ .release = sr_release,
+ .drive_status = sr_drive_status,
+ .check_events = sr_check_events,
+ .tray_move = sr_tray_move,
+ .lock_door = sr_lock_door,
+ .select_speed = sr_select_speed,
+ .get_last_session = sr_get_last_session,
+ .get_mcn = sr_get_mcn,
+ .reset = sr_reset,
+ .audio_ioctl = sr_audio_ioctl,
+ .capability = SR_CAPABILITIES,
+ .generic_packet = sr_packet,
+};
+
+static void sr_kref_release(struct kref *kref);
+
+static inline struct scsi_cd *scsi_cd(struct gendisk *disk)
+{
+ return container_of(disk->private_data, struct scsi_cd, driver);
+}
+
+static int sr_runtime_suspend(struct device *dev)
+{
+ struct scsi_cd *cd = dev_get_drvdata(dev);
+
+ if (cd->media_present)
+ return -EBUSY;
+ else
+ return 0;
+}
+
+/*
+ * The get and put routines for the struct scsi_cd. Note this entity
+ * has a scsi_device pointer and owns a reference to this.
+ */
+static inline struct scsi_cd *scsi_cd_get(struct gendisk *disk)
+{
+ struct scsi_cd *cd = NULL;
+
+ mutex_lock(&sr_ref_mutex);
+ if (disk->private_data == NULL)
+ goto out;
+ cd = scsi_cd(disk);
+ kref_get(&cd->kref);
+ if (scsi_device_get(cd->device)) {
+ kref_put(&cd->kref, sr_kref_release);
+ cd = NULL;
+ }
+ out:
+ mutex_unlock(&sr_ref_mutex);
+ return cd;
+}
+
+static void scsi_cd_put(struct scsi_cd *cd)
+{
+ struct scsi_device *sdev = cd->device;
+
+ mutex_lock(&sr_ref_mutex);
+ kref_put(&cd->kref, sr_kref_release);
+ scsi_device_put(sdev);
+ mutex_unlock(&sr_ref_mutex);
+}
+
+static unsigned int sr_get_events(struct scsi_device *sdev)
+{
+ u8 buf[8];
+ u8 cmd[] = { GET_EVENT_STATUS_NOTIFICATION,
+ 1, /* polled */
+ 0, 0, /* reserved */
+ 1 << 4, /* notification class: media */
+ 0, 0, /* reserved */
+ 0, sizeof(buf), /* allocation length */
+ 0, /* control */
+ };
+ struct event_header *eh = (void *)buf;
+ struct media_event_desc *med = (void *)(buf + 4);
+ struct scsi_sense_hdr sshdr;
+ int result;
+
+ result = scsi_execute_req(sdev, cmd, DMA_FROM_DEVICE, buf, sizeof(buf),
+ &sshdr, SR_TIMEOUT, MAX_RETRIES, NULL);
+ if (scsi_sense_valid(&sshdr) && sshdr.sense_key == UNIT_ATTENTION)
+ return DISK_EVENT_MEDIA_CHANGE;
+
+ if (result || be16_to_cpu(eh->data_len) < sizeof(*med))
+ return 0;
+
+ if (eh->nea || eh->notification_class != 0x4)
+ return 0;
+
+ if (med->media_event_code == 1)
+ return DISK_EVENT_EJECT_REQUEST;
+ else if (med->media_event_code == 2)
+ return DISK_EVENT_MEDIA_CHANGE;
+ return 0;
+}
+
+/*
+ * This function checks to see if the media has been changed or eject
+ * button has been pressed. It is possible that we have already
+ * sensed a change, or the drive may have sensed one and not yet
+ * reported it. The past events are accumulated in sdev->changed and
+ * returned together with the current state.
+ */
+static unsigned int sr_check_events(struct cdrom_device_info *cdi,
+ unsigned int clearing, int slot)
+{
+ struct scsi_cd *cd = cdi->handle;
+ bool last_present;
+ struct scsi_sense_hdr sshdr;
+ unsigned int events;
+ int ret;
+
+ /* no changer support */
+ if (CDSL_CURRENT != slot)
+ return 0;
+
+ events = sr_get_events(cd->device);
+ cd->get_event_changed |= events & DISK_EVENT_MEDIA_CHANGE;
+
+ /*
+ * If earlier GET_EVENT_STATUS_NOTIFICATION and TUR did not agree
+ * for several times in a row. We rely on TUR only for this likely
+ * broken device, to prevent generating incorrect media changed
+ * events for every open().
+ */
+ if (cd->ignore_get_event) {
+ events &= ~DISK_EVENT_MEDIA_CHANGE;
+ goto do_tur;
+ }
+
+ /*
+ * GET_EVENT_STATUS_NOTIFICATION is enough unless MEDIA_CHANGE
+ * is being cleared. Note that there are devices which hang
+ * if asked to execute TUR repeatedly.
+ */
+ if (cd->device->changed) {
+ events |= DISK_EVENT_MEDIA_CHANGE;
+ cd->device->changed = 0;
+ cd->tur_changed = true;
+ }
+
+ if (!(clearing & DISK_EVENT_MEDIA_CHANGE))
+ return events;
+do_tur:
+ /* let's see whether the media is there with TUR */
+ last_present = cd->media_present;
+ ret = scsi_test_unit_ready(cd->device, SR_TIMEOUT, MAX_RETRIES, &sshdr);
+
+ /*
+ * Media is considered to be present if TUR succeeds or fails with
+ * sense data indicating something other than media-not-present
+ * (ASC 0x3a).
+ */
+ cd->media_present = scsi_status_is_good(ret) ||
+ (scsi_sense_valid(&sshdr) && sshdr.asc != 0x3a);
+
+ if (last_present != cd->media_present)
+ cd->device->changed = 1;
+
+ if (cd->device->changed) {
+ events |= DISK_EVENT_MEDIA_CHANGE;
+ cd->device->changed = 0;
+ cd->tur_changed = true;
+ }
+
+ if (cd->ignore_get_event)
+ return events;
+
+ /* check whether GET_EVENT is reporting spurious MEDIA_CHANGE */
+ if (!cd->tur_changed) {
+ if (cd->get_event_changed) {
+ if (cd->tur_mismatch++ > 8) {
+ sr_printk(KERN_WARNING, cd,
+ "GET_EVENT and TUR disagree continuously, suppress GET_EVENT events\n");
+ cd->ignore_get_event = true;
+ }
+ } else {
+ cd->tur_mismatch = 0;
+ }
+ }
+ cd->tur_changed = false;
+ cd->get_event_changed = false;
+
+ return events;
+}
+
+/*
+ * sr_done is the interrupt routine for the device driver.
+ *
+ * It will be notified on the end of a SCSI read / write, and will take one
+ * of several actions based on success or failure.
+ */
+static int sr_done(struct scsi_cmnd *SCpnt)
+{
+ int result = SCpnt->result;
+ int this_count = scsi_bufflen(SCpnt);
+ int good_bytes = (result == 0 ? this_count : 0);
+ int block_sectors = 0;
+ long error_sector;
+ struct scsi_cd *cd = scsi_cd(SCpnt->request->rq_disk);
+
+#ifdef DEBUG
+ scmd_printk(KERN_INFO, SCpnt, "done: %x\n", result);
+#endif
+
+ /*
+ * Handle MEDIUM ERRORs or VOLUME OVERFLOWs that indicate partial
+ * success. Since this is a relatively rare error condition, no
+ * care is taken to avoid unnecessary additional work such as
+ * memcpy's that could be avoided.
+ */
+ if (driver_byte(result) != 0 && /* An error occurred */
+ (SCpnt->sense_buffer[0] & 0x7f) == 0x70) { /* Sense current */
+ switch (SCpnt->sense_buffer[2]) {
+ case MEDIUM_ERROR:
+ case VOLUME_OVERFLOW:
+ case ILLEGAL_REQUEST:
+ if (!(SCpnt->sense_buffer[0] & 0x90))
+ break;
+ error_sector = (SCpnt->sense_buffer[3] << 24) |
+ (SCpnt->sense_buffer[4] << 16) |
+ (SCpnt->sense_buffer[5] << 8) |
+ SCpnt->sense_buffer[6];
+ if (SCpnt->request->bio != NULL)
+ block_sectors =
+ bio_sectors(SCpnt->request->bio);
+ if (block_sectors < 4)
+ block_sectors = 4;
+ if (cd->device->sector_size == 2048)
+ error_sector <<= 2;
+ error_sector &= ~(block_sectors - 1);
+ good_bytes = (error_sector -
+ blk_rq_pos(SCpnt->request)) << 9;
+ if (good_bytes < 0 || good_bytes >= this_count)
+ good_bytes = 0;
+ /*
+ * The SCSI specification allows for the value
+ * returned by READ CAPACITY to be up to 75 2K
+ * sectors past the last readable block.
+ * Therefore, if we hit a medium error within the
+ * last 75 2K sectors, we decrease the saved size
+ * value.
+ */
+ if (error_sector < get_capacity(cd->disk) &&
+ cd->capacity - error_sector < 4 * 75)
+ set_capacity(cd->disk, error_sector);
+ break;
+
+ case RECOVERED_ERROR:
+ good_bytes = this_count;
+ break;
+
+ default:
+ break;
+ }
+ }
+
+ return good_bytes;
+}
+
+static int sr_init_command(struct scsi_cmnd *SCpnt)
+{
+ int block = 0, this_count, s_size;
+ struct scsi_cd *cd;
+ struct request *rq = SCpnt->request;
+ int ret;
+
+ ret = scsi_init_io(SCpnt);
+ if (ret != BLKPREP_OK)
+ goto out;
+ SCpnt = rq->special;
+ cd = scsi_cd(rq->rq_disk);
+
+ /* from here on until we're complete, any goto out
+ * is used for a killable error condition */
+ ret = BLKPREP_KILL;
+
+ SCSI_LOG_HLQUEUE(1, scmd_printk(KERN_INFO, SCpnt,
+ "Doing sr request, block = %d\n", block));
+
+ if (!cd->device || !scsi_device_online(cd->device)) {
+ SCSI_LOG_HLQUEUE(2, scmd_printk(KERN_INFO, SCpnt,
+ "Finishing %u sectors\n", blk_rq_sectors(rq)));
+ SCSI_LOG_HLQUEUE(2, scmd_printk(KERN_INFO, SCpnt,
+ "Retry with 0x%p\n", SCpnt));
+ goto out;
+ }
+
+ if (cd->device->changed) {
+ /*
+ * quietly refuse to do anything to a changed disc until the
+ * changed bit has been reset
+ */
+ goto out;
+ }
+
+ /*
+ * we do lazy blocksize switching (when reading XA sectors,
+ * see CDROMREADMODE2 ioctl)
+ */
+ s_size = cd->device->sector_size;
+ if (s_size > 2048) {
+ if (!in_interrupt())
+ sr_set_blocklength(cd, 2048);
+ else
+ scmd_printk(KERN_INFO, SCpnt,
+ "can't switch blocksize: in interrupt\n");
+ }
+
+ if (s_size != 512 && s_size != 1024 && s_size != 2048) {
+ scmd_printk(KERN_ERR, SCpnt, "bad sector size %d\n", s_size);
+ goto out;
+ }
+
+ if (rq_data_dir(rq) == WRITE) {
+ if (!cd->writeable)
+ goto out;
+ SCpnt->cmnd[0] = WRITE_10;
+ cd->cdi.media_written = 1;
+ } else if (rq_data_dir(rq) == READ) {
+ SCpnt->cmnd[0] = READ_10;
+ } else {
+ blk_dump_rq_flags(rq, "Unknown sr command");
+ goto out;
+ }
+
+ {
+ struct scatterlist *sg;
+ int i, size = 0, sg_count = scsi_sg_count(SCpnt);
+
+ scsi_for_each_sg(SCpnt, sg, sg_count, i)
+ size += sg->length;
+
+ if (size != scsi_bufflen(SCpnt)) {
+ scmd_printk(KERN_ERR, SCpnt,
+ "mismatch count %d, bytes %d\n",
+ size, scsi_bufflen(SCpnt));
+ if (scsi_bufflen(SCpnt) > size)
+ SCpnt->sdb.length = size;
+ }
+ }
+
+ /*
+ * request doesn't start on hw block boundary, add scatter pads
+ */
+ if (((unsigned int)blk_rq_pos(rq) % (s_size >> 9)) ||
+ (scsi_bufflen(SCpnt) % s_size)) {
+ scmd_printk(KERN_NOTICE, SCpnt, "unaligned transfer\n");
+ goto out;
+ }
+
+ this_count = (scsi_bufflen(SCpnt) >> 9) / (s_size >> 9);
+
+
+ SCSI_LOG_HLQUEUE(2, scmd_printk(KERN_INFO, SCpnt,
+ "%s %d/%u 512 byte blocks.\n",
+ (rq_data_dir(rq) == WRITE) ?
+ "writing" : "reading",
+ this_count, blk_rq_sectors(rq)));
+
+ SCpnt->cmnd[1] = 0;
+ block = (unsigned int)blk_rq_pos(rq) / (s_size >> 9);
+
+ if (this_count > 0xffff) {
+ this_count = 0xffff;
+ SCpnt->sdb.length = this_count * s_size;
+ }
+
+ SCpnt->cmnd[2] = (unsigned char) (block >> 24) & 0xff;
+ SCpnt->cmnd[3] = (unsigned char) (block >> 16) & 0xff;
+ SCpnt->cmnd[4] = (unsigned char) (block >> 8) & 0xff;
+ SCpnt->cmnd[5] = (unsigned char) block & 0xff;
+ SCpnt->cmnd[6] = SCpnt->cmnd[9] = 0;
+ SCpnt->cmnd[7] = (unsigned char) (this_count >> 8) & 0xff;
+ SCpnt->cmnd[8] = (unsigned char) this_count & 0xff;
+
+ /*
+ * We shouldn't disconnect in the middle of a sector, so with a dumb
+ * host adapter, it's safe to assume that we can at least transfer
+ * this many bytes between each connect / disconnect.
+ */
+ SCpnt->transfersize = cd->device->sector_size;
+ SCpnt->underflow = this_count << 9;
+ SCpnt->allowed = MAX_RETRIES;
+
+ /*
+ * This indicates that the command is ready from our end to be
+ * queued.
+ */
+ ret = BLKPREP_OK;
+ out:
+ return ret;
+}
+
+static int sr_block_open(struct block_device *bdev, fmode_t mode)
+{
+ struct scsi_cd *cd;
+ int ret = -ENXIO;
+
+ mutex_lock(&sr_mutex);
+ cd = scsi_cd_get(bdev->bd_disk);
+ if (cd) {
+ ret = cdrom_open(&cd->cdi, bdev, mode);
+ if (ret)
+ scsi_cd_put(cd);
+ }
+ mutex_unlock(&sr_mutex);
+ return ret;
+}
+
+static void sr_block_release(struct gendisk *disk, fmode_t mode)
+{
+ struct scsi_cd *cd = scsi_cd(disk);
+ mutex_lock(&sr_mutex);
+ cdrom_release(&cd->cdi, mode);
+ scsi_cd_put(cd);
+ mutex_unlock(&sr_mutex);
+}
+
+static int sr_block_ioctl(struct block_device *bdev, fmode_t mode, unsigned cmd,
+ unsigned long arg)
+{
+ struct scsi_cd *cd = scsi_cd(bdev->bd_disk);
+ struct scsi_device *sdev = cd->device;
+ void __user *argp = (void __user *)arg;
+ int ret;
+
+ mutex_lock(&sr_mutex);
+
+ ret = scsi_ioctl_block_when_processing_errors(sdev, cmd,
+ (mode & FMODE_NDELAY) != 0);
+ if (ret)
+ goto out;
+
+ /*
+ * Send SCSI addressing ioctls directly to mid level, send other
+ * ioctls to cdrom/block level.
+ */
+ switch (cmd) {
+ case SCSI_IOCTL_GET_IDLUN:
+ case SCSI_IOCTL_GET_BUS_NUMBER:
+ ret = scsi_ioctl(sdev, cmd, argp);
+ goto out;
+ }
+
+ ret = cdrom_ioctl(&cd->cdi, bdev, mode, cmd, arg);
+ if (ret != -ENOSYS)
+ goto out;
+
+ ret = scsi_ioctl(sdev, cmd, argp);
+
+out:
+ mutex_unlock(&sr_mutex);
+ return ret;
+}
+
+static unsigned int sr_block_check_events(struct gendisk *disk,
+ unsigned int clearing)
+{
+ struct scsi_cd *cd = scsi_cd(disk);
+
+ if (atomic_read(&cd->device->disk_events_disable_depth))
+ return 0;
+
+ return cdrom_check_events(&cd->cdi, clearing);
+}
+
+static int sr_block_revalidate_disk(struct gendisk *disk)
+{
+ struct scsi_cd *cd = scsi_cd(disk);
+ struct scsi_sense_hdr sshdr;
+
+ /* if the unit is not ready, nothing more to do */
+ if (scsi_test_unit_ready(cd->device, SR_TIMEOUT, MAX_RETRIES, &sshdr))
+ goto out;
+
+ sr_cd_check(&cd->cdi);
+ get_sectorsize(cd);
+out:
+ return 0;
+}
+
+static const struct block_device_operations sr_bdops =
+{
+ .owner = THIS_MODULE,
+ .open = sr_block_open,
+ .release = sr_block_release,
+ .ioctl = sr_block_ioctl,
+ .check_events = sr_block_check_events,
+ .revalidate_disk = sr_block_revalidate_disk,
+ /*
+ * No compat_ioctl for now because sr_block_ioctl never
+ * seems to pass arbitrary ioctls down to host drivers.
+ */
+};
+
+static int sr_open(struct cdrom_device_info *cdi, int purpose)
+{
+ struct scsi_cd *cd = cdi->handle;
+ struct scsi_device *sdev = cd->device;
+ int retval;
+
+ /*
+ * If the device is in error recovery, wait until it is done.
+ * If the device is offline, then disallow any access to it.
+ */
+ retval = -ENXIO;
+ if (!scsi_block_when_processing_errors(sdev))
+ goto error_out;
+
+ return 0;
+
+error_out:
+ return retval;
+}
+
+static void sr_release(struct cdrom_device_info *cdi)
+{
+ struct scsi_cd *cd = cdi->handle;
+
+ if (cd->device->sector_size > 2048)
+ sr_set_blocklength(cd, 2048);
+
+}
+
+static int sr_probe(struct device *dev)
+{
+ struct scsi_device *sdev = to_scsi_device(dev);
+ struct gendisk *disk;
+ struct scsi_cd *cd;
+ int minor, error;
+
+ scsi_autopm_get_device(sdev);
+ error = -ENODEV;
+ if (sdev->type != TYPE_ROM && sdev->type != TYPE_WORM)
+ goto fail;
+
+ error = -ENOMEM;
+ cd = kzalloc(sizeof(*cd), GFP_KERNEL);
+ if (!cd)
+ goto fail;
+
+ kref_init(&cd->kref);
+
+ disk = alloc_disk(1);
+ if (!disk)
+ goto fail_free;
+
+ spin_lock(&sr_index_lock);
+ minor = find_first_zero_bit(sr_index_bits, SR_DISKS);
+ if (minor == SR_DISKS) {
+ spin_unlock(&sr_index_lock);
+ error = -EBUSY;
+ goto fail_put;
+ }
+ __set_bit(minor, sr_index_bits);
+ spin_unlock(&sr_index_lock);
+
+ disk->major = SCSI_CDROM_MAJOR;
+ disk->first_minor = minor;
+ sprintf(disk->disk_name, "sr%d", minor);
+ disk->fops = &sr_bdops;
+ disk->flags = GENHD_FL_CD | GENHD_FL_BLOCK_EVENTS_ON_EXCL_WRITE;
+ disk->events = DISK_EVENT_MEDIA_CHANGE | DISK_EVENT_EJECT_REQUEST;
+
+ blk_queue_rq_timeout(sdev->request_queue, SR_TIMEOUT);
+
+ cd->device = sdev;
+ cd->disk = disk;
+ cd->driver = &sr_template;
+ cd->disk = disk;
+ cd->capacity = 0x1fffff;
+ cd->device->changed = 1; /* force recheck CD type */
+ cd->media_present = 1;
+ cd->use = 1;
+ cd->readcd_known = 0;
+ cd->readcd_cdda = 0;
+
+ cd->cdi.ops = &sr_dops;
+ cd->cdi.handle = cd;
+ cd->cdi.mask = 0;
+ cd->cdi.capacity = 1;
+ sprintf(cd->cdi.name, "sr%d", minor);
+
+ sdev->sector_size = 2048; /* A guess, just in case */
+
+ /* FIXME: need to handle a get_capabilities failure properly ?? */
+ get_capabilities(cd);
+ sr_vendor_init(cd);
+
+ disk->driverfs_dev = &sdev->sdev_gendev;
+ set_capacity(disk, cd->capacity);
+ disk->private_data = &cd->driver;
+ disk->queue = sdev->request_queue;
+ cd->cdi.disk = disk;
+
+ if (register_cdrom(&cd->cdi))
+ goto fail_put;
+
+ /*
+ * Initialize block layer runtime PM stuffs before the
+ * periodic event checking request gets started in add_disk.
+ */
+ blk_pm_runtime_init(sdev->request_queue, dev);
+
+ dev_set_drvdata(dev, cd);
+ disk->flags |= GENHD_FL_REMOVABLE;
+ add_disk(disk);
+
+ sdev_printk(KERN_DEBUG, sdev,
+ "Attached scsi CD-ROM %s\n", cd->cdi.name);
+ scsi_autopm_put_device(cd->device);
+
+ return 0;
+
+fail_put:
+ put_disk(disk);
+fail_free:
+ kfree(cd);
+fail:
+ scsi_autopm_put_device(sdev);
+ return error;
+}
+
+
+static void get_sectorsize(struct scsi_cd *cd)
+{
+ unsigned char cmd[10];
+ unsigned char buffer[8];
+ int the_result, retries = 3;
+ int sector_size;
+ struct request_queue *queue;
+
+ do {
+ cmd[0] = READ_CAPACITY;
+ memset((void *) &cmd[1], 0, 9);
+ memset(buffer, 0, sizeof(buffer));
+
+ /* Do the command and wait.. */
+ the_result = scsi_execute_req(cd->device, cmd, DMA_FROM_DEVICE,
+ buffer, sizeof(buffer), NULL,
+ SR_TIMEOUT, MAX_RETRIES, NULL);
+
+ retries--;
+
+ } while (the_result && retries);
+
+
+ if (the_result) {
+ cd->capacity = 0x1fffff;
+ sector_size = 2048; /* A guess, just in case */
+ } else {
+ long last_written;
+
+ cd->capacity = 1 + ((buffer[0] << 24) | (buffer[1] << 16) |
+ (buffer[2] << 8) | buffer[3]);
+ /*
+ * READ_CAPACITY doesn't return the correct size on
+ * certain UDF media. If last_written is larger, use
+ * it instead.
+ *
+ * http://bugzilla.kernel.org/show_bug.cgi?id=9668
+ */
+ if (!cdrom_get_last_written(&cd->cdi, &last_written))
+ cd->capacity = max_t(long, cd->capacity, last_written);
+
+ sector_size = (buffer[4] << 24) |
+ (buffer[5] << 16) | (buffer[6] << 8) | buffer[7];
+ switch (sector_size) {
+ /*
+ * HP 4020i CD-Recorder reports 2340 byte sectors
+ * Philips CD-Writers report 2352 byte sectors
+ *
+ * Use 2k sectors for them..
+ */
+ case 0:
+ case 2340:
+ case 2352:
+ sector_size = 2048;
+ /* fall through */
+ case 2048:
+ cd->capacity *= 4;
+ /* fall through */
+ case 512:
+ break;
+ default:
+ sr_printk(KERN_INFO, cd,
+ "unsupported sector size %d.", sector_size);
+ cd->capacity = 0;
+ }
+
+ cd->device->sector_size = sector_size;
+
+ /*
+ * Add this so that we have the ability to correctly gauge
+ * what the device is capable of.
+ */
+ set_capacity(cd->disk, cd->capacity);
+ }
+
+ queue = cd->device->request_queue;
+ blk_queue_logical_block_size(queue, sector_size);
+
+ return;
+}
+
+static void get_capabilities(struct scsi_cd *cd)
+{
+ unsigned char *buffer;
+ struct scsi_mode_data data;
+ struct scsi_sense_hdr sshdr;
+ int rc, n;
+
+ static const char *loadmech[] =
+ {
+ "caddy",
+ "tray",
+ "pop-up",
+ "",
+ "changer",
+ "cartridge changer",
+ "",
+ ""
+ };
+
+
+ /* allocate transfer buffer */
+ buffer = kmalloc(512, GFP_KERNEL | GFP_DMA);
+ if (!buffer) {
+ sr_printk(KERN_ERR, cd, "out of memory.\n");
+ return;
+ }
+
+ /* eat unit attentions */
+ scsi_test_unit_ready(cd->device, SR_TIMEOUT, MAX_RETRIES, &sshdr);
+
+ /* ask for mode page 0x2a */
+ rc = scsi_mode_sense(cd->device, 0, 0x2a, buffer, 128,
+ SR_TIMEOUT, 3, &data, NULL);
+
+ if (!scsi_status_is_good(rc)) {
+ /* failed, drive doesn't have capabilities mode page */
+ cd->cdi.speed = 1;
+ cd->cdi.mask |= (CDC_CD_R | CDC_CD_RW | CDC_DVD_R |
+ CDC_DVD | CDC_DVD_RAM |
+ CDC_SELECT_DISC | CDC_SELECT_SPEED |
+ CDC_MRW | CDC_MRW_W | CDC_RAM);
+ kfree(buffer);
+ sr_printk(KERN_INFO, cd, "scsi-1 drive");
+ return;
+ }
+
+ n = data.header_length + data.block_descriptor_length;
+ cd->cdi.speed = ((buffer[n + 8] << 8) + buffer[n + 9]) / 176;
+ cd->readcd_known = 1;
+ cd->readcd_cdda = buffer[n + 5] & 0x01;
+ /* print some capability bits */
+ sr_printk(KERN_INFO, cd,
+ "scsi3-mmc drive: %dx/%dx %s%s%s%s%s%s\n",
+ ((buffer[n + 14] << 8) + buffer[n + 15]) / 176,
+ cd->cdi.speed,
+ buffer[n + 3] & 0x01 ? "writer " : "", /* CD Writer */
+ buffer[n + 3] & 0x20 ? "dvd-ram " : "",
+ buffer[n + 2] & 0x02 ? "cd/rw " : "", /* can read rewriteable */
+ buffer[n + 4] & 0x20 ? "xa/form2 " : "", /* can read xa/from2 */
+ buffer[n + 5] & 0x01 ? "cdda " : "", /* can read audio data */
+ loadmech[buffer[n + 6] >> 5]);
+ if ((buffer[n + 6] >> 5) == 0)
+ /* caddy drives can't close tray... */
+ cd->cdi.mask |= CDC_CLOSE_TRAY;
+ if ((buffer[n + 2] & 0x8) == 0)
+ /* not a DVD drive */
+ cd->cdi.mask |= CDC_DVD;
+ if ((buffer[n + 3] & 0x20) == 0)
+ /* can't write DVD-RAM media */
+ cd->cdi.mask |= CDC_DVD_RAM;
+ if ((buffer[n + 3] & 0x10) == 0)
+ /* can't write DVD-R media */
+ cd->cdi.mask |= CDC_DVD_R;
+ if ((buffer[n + 3] & 0x2) == 0)
+ /* can't write CD-RW media */
+ cd->cdi.mask |= CDC_CD_RW;
+ if ((buffer[n + 3] & 0x1) == 0)
+ /* can't write CD-R media */
+ cd->cdi.mask |= CDC_CD_R;
+ if ((buffer[n + 6] & 0x8) == 0)
+ /* can't eject */
+ cd->cdi.mask |= CDC_OPEN_TRAY;
+
+ if ((buffer[n + 6] >> 5) == mechtype_individual_changer ||
+ (buffer[n + 6] >> 5) == mechtype_cartridge_changer)
+ cd->cdi.capacity =
+ cdrom_number_of_slots(&cd->cdi);
+ if (cd->cdi.capacity <= 1)
+ /* not a changer */
+ cd->cdi.mask |= CDC_SELECT_DISC;
+ /*else I don't think it can close its tray
+ cd->cdi.mask |= CDC_CLOSE_TRAY; */
+
+ /*
+ * if DVD-RAM, MRW-W or CD-RW, we are randomly writable
+ */
+ if ((cd->cdi.mask & (CDC_DVD_RAM | CDC_MRW_W | CDC_RAM | CDC_CD_RW)) !=
+ (CDC_DVD_RAM | CDC_MRW_W | CDC_RAM | CDC_CD_RW)) {
+ cd->writeable = 1;
+ }
+
+ kfree(buffer);
+}
+
+/*
+ * sr_packet() is the entry point for the generic commands generated
+ * by the Uniform CD-ROM layer.
+ */
+static int sr_packet(struct cdrom_device_info *cdi,
+ struct packet_command *cgc)
+{
+ struct scsi_cd *cd = cdi->handle;
+ struct scsi_device *sdev = cd->device;
+
+ if (cgc->cmd[0] == GPCMD_READ_DISC_INFO && sdev->no_read_disc_info)
+ return -EDRIVE_CANT_DO_THIS;
+
+ if (cgc->timeout <= 0)
+ cgc->timeout = IOCTL_TIMEOUT;
+
+ sr_do_ioctl(cd, cgc);
+
+ return cgc->stat;
+}
+
+/**
+ * sr_kref_release - Called to free the scsi_cd structure
+ * @kref: pointer to embedded kref
+ *
+ * sr_ref_mutex must be held entering this routine. Because it is
+ * called on last put, you should always use the scsi_cd_get()
+ * scsi_cd_put() helpers which manipulate the semaphore directly
+ * and never do a direct kref_put().
+ **/
+static void sr_kref_release(struct kref *kref)
+{
+ struct scsi_cd *cd = container_of(kref, struct scsi_cd, kref);
+ struct gendisk *disk = cd->disk;
+
+ spin_lock(&sr_index_lock);
+ clear_bit(MINOR(disk_devt(disk)), sr_index_bits);
+ spin_unlock(&sr_index_lock);
+
+ unregister_cdrom(&cd->cdi);
+
+ disk->private_data = NULL;
+
+ put_disk(disk);
+
+ kfree(cd);
+}
+
+static int sr_remove(struct device *dev)
+{
+ struct scsi_cd *cd = dev_get_drvdata(dev);
+
+ scsi_autopm_get_device(cd->device);
+
+ del_gendisk(cd->disk);
+
+ mutex_lock(&sr_ref_mutex);
+ kref_put(&cd->kref, sr_kref_release);
+ mutex_unlock(&sr_ref_mutex);
+
+ return 0;
+}
+
+static int __init init_sr(void)
+{
+ int rc;
+
+ rc = register_blkdev(SCSI_CDROM_MAJOR, "sr");
+ if (rc)
+ return rc;
+ rc = scsi_register_driver(&sr_template.gendrv);
+ if (rc)
+ unregister_blkdev(SCSI_CDROM_MAJOR, "sr");
+
+ return rc;
+}
+
+static void __exit exit_sr(void)
+{
+ scsi_unregister_driver(&sr_template.gendrv);
+ unregister_blkdev(SCSI_CDROM_MAJOR, "sr");
+}
+
+module_init(init_sr);
+module_exit(exit_sr);
+MODULE_LICENSE("GPL");
diff --git a/drivers/scsi/sr.h b/drivers/scsi/sr.h
new file mode 100644
index 000000000..1de33719a
--- /dev/null
+++ b/drivers/scsi/sr.h
@@ -0,0 +1,81 @@
+/*
+ * sr.h by David Giller
+ * CD-ROM disk driver header file
+ *
+ * adapted from:
+ * sd.h Copyright (C) 1992 Drew Eckhardt
+ * SCSI disk driver header file by
+ * Drew Eckhardt
+ *
+ * <drew@colorado.edu>
+ *
+ * Modified by Eric Youngdale eric@andante.org to
+ * add scatter-gather, multiple outstanding request, and other
+ * enhancements.
+ */
+
+#ifndef _SR_H
+#define _SR_H
+
+#include <linux/genhd.h>
+#include <linux/kref.h>
+
+#define MAX_RETRIES 3
+#define SR_TIMEOUT (30 * HZ)
+
+struct scsi_device;
+
+/* The CDROM is fairly slow, so we need a little extra time */
+/* In fact, it is very slow if it has to spin up first */
+#define IOCTL_TIMEOUT 30*HZ
+
+
+typedef struct scsi_cd {
+ struct scsi_driver *driver;
+ unsigned capacity; /* size in blocks */
+ struct scsi_device *device;
+ unsigned int vendor; /* vendor code, see sr_vendor.c */
+ unsigned long ms_offset; /* for reading multisession-CD's */
+ unsigned writeable : 1;
+ unsigned use:1; /* is this device still supportable */
+ unsigned xa_flag:1; /* CD has XA sectors ? */
+ unsigned readcd_known:1; /* drive supports READ_CD (0xbe) */
+ unsigned readcd_cdda:1; /* reading audio data using READ_CD */
+ unsigned media_present:1; /* media is present */
+
+ /* GET_EVENT spurious event handling, blk layer guarantees exclusion */
+ int tur_mismatch; /* nr of get_event TUR mismatches */
+ bool tur_changed:1; /* changed according to TUR */
+ bool get_event_changed:1; /* changed according to GET_EVENT */
+ bool ignore_get_event:1; /* GET_EVENT is unreliable, use TUR */
+
+ struct cdrom_device_info cdi;
+ /* We hold gendisk and scsi_device references on probe and use
+ * the refs on this kref to decide when to release them */
+ struct kref kref;
+ struct gendisk *disk;
+} Scsi_CD;
+
+#define sr_printk(prefix, cd, fmt, a...) \
+ sdev_prefix_printk(prefix, (cd)->device, (cd)->cdi.name, fmt, ##a)
+
+int sr_do_ioctl(Scsi_CD *, struct packet_command *);
+
+int sr_lock_door(struct cdrom_device_info *, int);
+int sr_tray_move(struct cdrom_device_info *, int);
+int sr_drive_status(struct cdrom_device_info *, int);
+int sr_disk_status(struct cdrom_device_info *);
+int sr_get_last_session(struct cdrom_device_info *, struct cdrom_multisession *);
+int sr_get_mcn(struct cdrom_device_info *, struct cdrom_mcn *);
+int sr_reset(struct cdrom_device_info *);
+int sr_select_speed(struct cdrom_device_info *cdi, int speed);
+int sr_audio_ioctl(struct cdrom_device_info *, unsigned int, void *);
+
+int sr_is_xa(Scsi_CD *);
+
+/* sr_vendor.c */
+void sr_vendor_init(Scsi_CD *);
+int sr_cd_check(struct cdrom_device_info *);
+int sr_set_blocklength(Scsi_CD *, int blocklength);
+
+#endif
diff --git a/drivers/scsi/sr_ioctl.c b/drivers/scsi/sr_ioctl.c
new file mode 100644
index 000000000..03054c0e7
--- /dev/null
+++ b/drivers/scsi/sr_ioctl.c
@@ -0,0 +1,593 @@
+#include <linux/kernel.h>
+#include <linux/mm.h>
+#include <linux/fs.h>
+#include <linux/errno.h>
+#include <linux/string.h>
+#include <linux/blkdev.h>
+#include <linux/module.h>
+#include <linux/blkpg.h>
+#include <linux/cdrom.h>
+#include <linux/delay.h>
+#include <linux/slab.h>
+#include <asm/io.h>
+#include <asm/uaccess.h>
+
+#include <scsi/scsi.h>
+#include <scsi/scsi_dbg.h>
+#include <scsi/scsi_device.h>
+#include <scsi/scsi_eh.h>
+#include <scsi/scsi_host.h>
+#include <scsi/scsi_ioctl.h>
+#include <scsi/scsi_cmnd.h>
+
+#include "sr.h"
+
+#if 0
+#define DEBUG
+#endif
+
+/* The sr_is_xa() seems to trigger firmware bugs with some drives :-(
+ * It is off by default and can be turned on with this module parameter */
+static int xa_test = 0;
+
+module_param(xa_test, int, S_IRUGO | S_IWUSR);
+
+/* primitive to determine whether we need to have GFP_DMA set based on
+ * the status of the unchecked_isa_dma flag in the host structure */
+#define SR_GFP_DMA(cd) (((cd)->device->host->unchecked_isa_dma) ? GFP_DMA : 0)
+
+static int sr_read_tochdr(struct cdrom_device_info *cdi,
+ struct cdrom_tochdr *tochdr)
+{
+ struct scsi_cd *cd = cdi->handle;
+ struct packet_command cgc;
+ int result;
+ unsigned char *buffer;
+
+ buffer = kmalloc(32, GFP_KERNEL | SR_GFP_DMA(cd));
+ if (!buffer)
+ return -ENOMEM;
+
+ memset(&cgc, 0, sizeof(struct packet_command));
+ cgc.timeout = IOCTL_TIMEOUT;
+ cgc.cmd[0] = GPCMD_READ_TOC_PMA_ATIP;
+ cgc.cmd[8] = 12; /* LSB of length */
+ cgc.buffer = buffer;
+ cgc.buflen = 12;
+ cgc.quiet = 1;
+ cgc.data_direction = DMA_FROM_DEVICE;
+
+ result = sr_do_ioctl(cd, &cgc);
+
+ tochdr->cdth_trk0 = buffer[2];
+ tochdr->cdth_trk1 = buffer[3];
+
+ kfree(buffer);
+ return result;
+}
+
+static int sr_read_tocentry(struct cdrom_device_info *cdi,
+ struct cdrom_tocentry *tocentry)
+{
+ struct scsi_cd *cd = cdi->handle;
+ struct packet_command cgc;
+ int result;
+ unsigned char *buffer;
+
+ buffer = kmalloc(32, GFP_KERNEL | SR_GFP_DMA(cd));
+ if (!buffer)
+ return -ENOMEM;
+
+ memset(&cgc, 0, sizeof(struct packet_command));
+ cgc.timeout = IOCTL_TIMEOUT;
+ cgc.cmd[0] = GPCMD_READ_TOC_PMA_ATIP;
+ cgc.cmd[1] |= (tocentry->cdte_format == CDROM_MSF) ? 0x02 : 0;
+ cgc.cmd[6] = tocentry->cdte_track;
+ cgc.cmd[8] = 12; /* LSB of length */
+ cgc.buffer = buffer;
+ cgc.buflen = 12;
+ cgc.data_direction = DMA_FROM_DEVICE;
+
+ result = sr_do_ioctl(cd, &cgc);
+
+ tocentry->cdte_ctrl = buffer[5] & 0xf;
+ tocentry->cdte_adr = buffer[5] >> 4;
+ tocentry->cdte_datamode = (tocentry->cdte_ctrl & 0x04) ? 1 : 0;
+ if (tocentry->cdte_format == CDROM_MSF) {
+ tocentry->cdte_addr.msf.minute = buffer[9];
+ tocentry->cdte_addr.msf.second = buffer[10];
+ tocentry->cdte_addr.msf.frame = buffer[11];
+ } else
+ tocentry->cdte_addr.lba = (((((buffer[8] << 8) + buffer[9]) << 8)
+ + buffer[10]) << 8) + buffer[11];
+
+ kfree(buffer);
+ return result;
+}
+
+#define IOCTL_RETRIES 3
+
+/* ATAPI drives don't have a SCMD_PLAYAUDIO_TI command. When these drives
+ are emulating a SCSI device via the idescsi module, they need to have
+ CDROMPLAYTRKIND commands translated into CDROMPLAYMSF commands for them */
+
+static int sr_fake_playtrkind(struct cdrom_device_info *cdi, struct cdrom_ti *ti)
+{
+ struct cdrom_tocentry trk0_te, trk1_te;
+ struct cdrom_tochdr tochdr;
+ struct packet_command cgc;
+ int ntracks, ret;
+
+ ret = sr_read_tochdr(cdi, &tochdr);
+ if (ret)
+ return ret;
+
+ ntracks = tochdr.cdth_trk1 - tochdr.cdth_trk0 + 1;
+
+ if (ti->cdti_trk1 == ntracks)
+ ti->cdti_trk1 = CDROM_LEADOUT;
+ else if (ti->cdti_trk1 != CDROM_LEADOUT)
+ ti->cdti_trk1 ++;
+
+ trk0_te.cdte_track = ti->cdti_trk0;
+ trk0_te.cdte_format = CDROM_MSF;
+ trk1_te.cdte_track = ti->cdti_trk1;
+ trk1_te.cdte_format = CDROM_MSF;
+
+ ret = sr_read_tocentry(cdi, &trk0_te);
+ if (ret)
+ return ret;
+ ret = sr_read_tocentry(cdi, &trk1_te);
+ if (ret)
+ return ret;
+
+ memset(&cgc, 0, sizeof(struct packet_command));
+ cgc.cmd[0] = GPCMD_PLAY_AUDIO_MSF;
+ cgc.cmd[3] = trk0_te.cdte_addr.msf.minute;
+ cgc.cmd[4] = trk0_te.cdte_addr.msf.second;
+ cgc.cmd[5] = trk0_te.cdte_addr.msf.frame;
+ cgc.cmd[6] = trk1_te.cdte_addr.msf.minute;
+ cgc.cmd[7] = trk1_te.cdte_addr.msf.second;
+ cgc.cmd[8] = trk1_te.cdte_addr.msf.frame;
+ cgc.data_direction = DMA_NONE;
+ cgc.timeout = IOCTL_TIMEOUT;
+ return sr_do_ioctl(cdi->handle, &cgc);
+}
+
+static int sr_play_trkind(struct cdrom_device_info *cdi,
+ struct cdrom_ti *ti)
+
+{
+ struct scsi_cd *cd = cdi->handle;
+ struct packet_command cgc;
+ int result;
+
+ memset(&cgc, 0, sizeof(struct packet_command));
+ cgc.timeout = IOCTL_TIMEOUT;
+ cgc.cmd[0] = GPCMD_PLAYAUDIO_TI;
+ cgc.cmd[4] = ti->cdti_trk0;
+ cgc.cmd[5] = ti->cdti_ind0;
+ cgc.cmd[7] = ti->cdti_trk1;
+ cgc.cmd[8] = ti->cdti_ind1;
+ cgc.data_direction = DMA_NONE;
+
+ result = sr_do_ioctl(cd, &cgc);
+ if (result == -EDRIVE_CANT_DO_THIS)
+ result = sr_fake_playtrkind(cdi, ti);
+
+ return result;
+}
+
+/* We do our own retries because we want to know what the specific
+ error code is. Normally the UNIT_ATTENTION code will automatically
+ clear after one error */
+
+int sr_do_ioctl(Scsi_CD *cd, struct packet_command *cgc)
+{
+ struct scsi_device *SDev;
+ struct scsi_sense_hdr sshdr;
+ int result, err = 0, retries = 0;
+ struct request_sense *sense = cgc->sense;
+
+ SDev = cd->device;
+
+ if (!sense) {
+ sense = kmalloc(SCSI_SENSE_BUFFERSIZE, GFP_KERNEL);
+ if (!sense) {
+ err = -ENOMEM;
+ goto out;
+ }
+ }
+
+ retry:
+ if (!scsi_block_when_processing_errors(SDev)) {
+ err = -ENODEV;
+ goto out;
+ }
+
+ memset(sense, 0, sizeof(*sense));
+ result = scsi_execute(SDev, cgc->cmd, cgc->data_direction,
+ cgc->buffer, cgc->buflen, (char *)sense,
+ cgc->timeout, IOCTL_RETRIES, 0, NULL);
+
+ scsi_normalize_sense((char *)sense, sizeof(*sense), &sshdr);
+
+ /* Minimal error checking. Ignore cases we know about, and report the rest. */
+ if (driver_byte(result) != 0) {
+ switch (sshdr.sense_key) {
+ case UNIT_ATTENTION:
+ SDev->changed = 1;
+ if (!cgc->quiet)
+ sr_printk(KERN_INFO, cd,
+ "disc change detected.\n");
+ if (retries++ < 10)
+ goto retry;
+ err = -ENOMEDIUM;
+ break;
+ case NOT_READY: /* This happens if there is no disc in drive */
+ if (sshdr.asc == 0x04 &&
+ sshdr.ascq == 0x01) {
+ /* sense: Logical unit is in process of becoming ready */
+ if (!cgc->quiet)
+ sr_printk(KERN_INFO, cd,
+ "CDROM not ready yet.\n");
+ if (retries++ < 10) {
+ /* sleep 2 sec and try again */
+ ssleep(2);
+ goto retry;
+ } else {
+ /* 20 secs are enough? */
+ err = -ENOMEDIUM;
+ break;
+ }
+ }
+ if (!cgc->quiet)
+ sr_printk(KERN_INFO, cd,
+ "CDROM not ready. Make sure there "
+ "is a disc in the drive.\n");
+ err = -ENOMEDIUM;
+ break;
+ case ILLEGAL_REQUEST:
+ err = -EIO;
+ if (sshdr.asc == 0x20 &&
+ sshdr.ascq == 0x00)
+ /* sense: Invalid command operation code */
+ err = -EDRIVE_CANT_DO_THIS;
+ break;
+ default:
+ err = -EIO;
+ }
+ }
+
+ /* Wake up a process waiting for device */
+ out:
+ if (!cgc->sense)
+ kfree(sense);
+ cgc->stat = err;
+ return err;
+}
+
+/* ---------------------------------------------------------------------- */
+/* interface to cdrom.c */
+
+int sr_tray_move(struct cdrom_device_info *cdi, int pos)
+{
+ Scsi_CD *cd = cdi->handle;
+ struct packet_command cgc;
+
+ memset(&cgc, 0, sizeof(struct packet_command));
+ cgc.cmd[0] = GPCMD_START_STOP_UNIT;
+ cgc.cmd[4] = (pos == 0) ? 0x03 /* close */ : 0x02 /* eject */ ;
+ cgc.data_direction = DMA_NONE;
+ cgc.timeout = IOCTL_TIMEOUT;
+ return sr_do_ioctl(cd, &cgc);
+}
+
+int sr_lock_door(struct cdrom_device_info *cdi, int lock)
+{
+ Scsi_CD *cd = cdi->handle;
+
+ return scsi_set_medium_removal(cd->device, lock ?
+ SCSI_REMOVAL_PREVENT : SCSI_REMOVAL_ALLOW);
+}
+
+int sr_drive_status(struct cdrom_device_info *cdi, int slot)
+{
+ struct scsi_cd *cd = cdi->handle;
+ struct scsi_sense_hdr sshdr;
+ struct media_event_desc med;
+
+ if (CDSL_CURRENT != slot) {
+ /* we have no changer support */
+ return -EINVAL;
+ }
+ if (!scsi_test_unit_ready(cd->device, SR_TIMEOUT, MAX_RETRIES, &sshdr))
+ return CDS_DISC_OK;
+
+ /* SK/ASC/ASCQ of 2/4/1 means "unit is becoming ready" */
+ if (scsi_sense_valid(&sshdr) && sshdr.sense_key == NOT_READY
+ && sshdr.asc == 0x04 && sshdr.ascq == 0x01)
+ return CDS_DRIVE_NOT_READY;
+
+ if (!cdrom_get_media_event(cdi, &med)) {
+ if (med.media_present)
+ return CDS_DISC_OK;
+ else if (med.door_open)
+ return CDS_TRAY_OPEN;
+ else
+ return CDS_NO_DISC;
+ }
+
+ /*
+ * SK/ASC/ASCQ of 2/4/2 means "initialization required"
+ * Using CD_TRAY_OPEN results in an START_STOP_UNIT to close
+ * the tray, which resolves the initialization requirement.
+ */
+ if (scsi_sense_valid(&sshdr) && sshdr.sense_key == NOT_READY
+ && sshdr.asc == 0x04 && sshdr.ascq == 0x02)
+ return CDS_TRAY_OPEN;
+
+ /*
+ * 0x04 is format in progress .. but there must be a disc present!
+ */
+ if (sshdr.sense_key == NOT_READY && sshdr.asc == 0x04)
+ return CDS_DISC_OK;
+
+ /*
+ * If not using Mt Fuji extended media tray reports,
+ * just return TRAY_OPEN since ATAPI doesn't provide
+ * any other way to detect this...
+ */
+ if (scsi_sense_valid(&sshdr) &&
+ /* 0x3a is medium not present */
+ sshdr.asc == 0x3a)
+ return CDS_NO_DISC;
+ else
+ return CDS_TRAY_OPEN;
+
+ return CDS_DRIVE_NOT_READY;
+}
+
+int sr_disk_status(struct cdrom_device_info *cdi)
+{
+ Scsi_CD *cd = cdi->handle;
+ struct cdrom_tochdr toc_h;
+ struct cdrom_tocentry toc_e;
+ int i, rc, have_datatracks = 0;
+
+ /* look for data tracks */
+ rc = sr_read_tochdr(cdi, &toc_h);
+ if (rc)
+ return (rc == -ENOMEDIUM) ? CDS_NO_DISC : CDS_NO_INFO;
+
+ for (i = toc_h.cdth_trk0; i <= toc_h.cdth_trk1; i++) {
+ toc_e.cdte_track = i;
+ toc_e.cdte_format = CDROM_LBA;
+ if (sr_read_tocentry(cdi, &toc_e))
+ return CDS_NO_INFO;
+ if (toc_e.cdte_ctrl & CDROM_DATA_TRACK) {
+ have_datatracks = 1;
+ break;
+ }
+ }
+ if (!have_datatracks)
+ return CDS_AUDIO;
+
+ if (cd->xa_flag)
+ return CDS_XA_2_1;
+ else
+ return CDS_DATA_1;
+}
+
+int sr_get_last_session(struct cdrom_device_info *cdi,
+ struct cdrom_multisession *ms_info)
+{
+ Scsi_CD *cd = cdi->handle;
+
+ ms_info->addr.lba = cd->ms_offset;
+ ms_info->xa_flag = cd->xa_flag || cd->ms_offset > 0;
+
+ return 0;
+}
+
+int sr_get_mcn(struct cdrom_device_info *cdi, struct cdrom_mcn *mcn)
+{
+ Scsi_CD *cd = cdi->handle;
+ struct packet_command cgc;
+ char *buffer = kmalloc(32, GFP_KERNEL | SR_GFP_DMA(cd));
+ int result;
+
+ if (!buffer)
+ return -ENOMEM;
+
+ memset(&cgc, 0, sizeof(struct packet_command));
+ cgc.cmd[0] = GPCMD_READ_SUBCHANNEL;
+ cgc.cmd[2] = 0x40; /* I do want the subchannel info */
+ cgc.cmd[3] = 0x02; /* Give me medium catalog number info */
+ cgc.cmd[8] = 24;
+ cgc.buffer = buffer;
+ cgc.buflen = 24;
+ cgc.data_direction = DMA_FROM_DEVICE;
+ cgc.timeout = IOCTL_TIMEOUT;
+ result = sr_do_ioctl(cd, &cgc);
+
+ memcpy(mcn->medium_catalog_number, buffer + 9, 13);
+ mcn->medium_catalog_number[13] = 0;
+
+ kfree(buffer);
+ return result;
+}
+
+int sr_reset(struct cdrom_device_info *cdi)
+{
+ return 0;
+}
+
+int sr_select_speed(struct cdrom_device_info *cdi, int speed)
+{
+ Scsi_CD *cd = cdi->handle;
+ struct packet_command cgc;
+
+ if (speed == 0)
+ speed = 0xffff; /* set to max */
+ else
+ speed *= 177; /* Nx to kbyte/s */
+
+ memset(&cgc, 0, sizeof(struct packet_command));
+ cgc.cmd[0] = GPCMD_SET_SPEED; /* SET CD SPEED */
+ cgc.cmd[2] = (speed >> 8) & 0xff; /* MSB for speed (in kbytes/sec) */
+ cgc.cmd[3] = speed & 0xff; /* LSB */
+ cgc.data_direction = DMA_NONE;
+ cgc.timeout = IOCTL_TIMEOUT;
+
+ if (sr_do_ioctl(cd, &cgc))
+ return -EIO;
+ return 0;
+}
+
+/* ----------------------------------------------------------------------- */
+/* this is called by the generic cdrom driver. arg is a _kernel_ pointer, */
+/* because the generic cdrom driver does the user access stuff for us. */
+/* only cdromreadtochdr and cdromreadtocentry are left - for use with the */
+/* sr_disk_status interface for the generic cdrom driver. */
+
+int sr_audio_ioctl(struct cdrom_device_info *cdi, unsigned int cmd, void *arg)
+{
+ switch (cmd) {
+ case CDROMREADTOCHDR:
+ return sr_read_tochdr(cdi, arg);
+ case CDROMREADTOCENTRY:
+ return sr_read_tocentry(cdi, arg);
+ case CDROMPLAYTRKIND:
+ return sr_play_trkind(cdi, arg);
+ default:
+ return -EINVAL;
+ }
+}
+
+/* -----------------------------------------------------------------------
+ * a function to read all sorts of funny cdrom sectors using the READ_CD
+ * scsi-3 mmc command
+ *
+ * lba: linear block address
+ * format: 0 = data (anything)
+ * 1 = audio
+ * 2 = data (mode 1)
+ * 3 = data (mode 2)
+ * 4 = data (mode 2 form1)
+ * 5 = data (mode 2 form2)
+ * blksize: 2048 | 2336 | 2340 | 2352
+ */
+
+static int sr_read_cd(Scsi_CD *cd, unsigned char *dest, int lba, int format, int blksize)
+{
+ struct packet_command cgc;
+
+#ifdef DEBUG
+ sr_printk(KERN_INFO, cd, "sr_read_cd lba=%d format=%d blksize=%d\n",
+ lba, format, blksize);
+#endif
+
+ memset(&cgc, 0, sizeof(struct packet_command));
+ cgc.cmd[0] = GPCMD_READ_CD; /* READ_CD */
+ cgc.cmd[1] = ((format & 7) << 2);
+ cgc.cmd[2] = (unsigned char) (lba >> 24) & 0xff;
+ cgc.cmd[3] = (unsigned char) (lba >> 16) & 0xff;
+ cgc.cmd[4] = (unsigned char) (lba >> 8) & 0xff;
+ cgc.cmd[5] = (unsigned char) lba & 0xff;
+ cgc.cmd[8] = 1;
+ switch (blksize) {
+ case 2336:
+ cgc.cmd[9] = 0x58;
+ break;
+ case 2340:
+ cgc.cmd[9] = 0x78;
+ break;
+ case 2352:
+ cgc.cmd[9] = 0xf8;
+ break;
+ default:
+ cgc.cmd[9] = 0x10;
+ break;
+ }
+ cgc.buffer = dest;
+ cgc.buflen = blksize;
+ cgc.data_direction = DMA_FROM_DEVICE;
+ cgc.timeout = IOCTL_TIMEOUT;
+ return sr_do_ioctl(cd, &cgc);
+}
+
+/*
+ * read sectors with blocksizes other than 2048
+ */
+
+static int sr_read_sector(Scsi_CD *cd, int lba, int blksize, unsigned char *dest)
+{
+ struct packet_command cgc;
+ int rc;
+
+ /* we try the READ CD command first... */
+ if (cd->readcd_known) {
+ rc = sr_read_cd(cd, dest, lba, 0, blksize);
+ if (-EDRIVE_CANT_DO_THIS != rc)
+ return rc;
+ cd->readcd_known = 0;
+ sr_printk(KERN_INFO, cd,
+ "CDROM does'nt support READ CD (0xbe) command\n");
+ /* fall & retry the other way */
+ }
+ /* ... if this fails, we switch the blocksize using MODE SELECT */
+ if (blksize != cd->device->sector_size) {
+ if (0 != (rc = sr_set_blocklength(cd, blksize)))
+ return rc;
+ }
+#ifdef DEBUG
+ sr_printk(KERN_INFO, cd, "sr_read_sector lba=%d blksize=%d\n",
+ lba, blksize);
+#endif
+
+ memset(&cgc, 0, sizeof(struct packet_command));
+ cgc.cmd[0] = GPCMD_READ_10;
+ cgc.cmd[2] = (unsigned char) (lba >> 24) & 0xff;
+ cgc.cmd[3] = (unsigned char) (lba >> 16) & 0xff;
+ cgc.cmd[4] = (unsigned char) (lba >> 8) & 0xff;
+ cgc.cmd[5] = (unsigned char) lba & 0xff;
+ cgc.cmd[8] = 1;
+ cgc.buffer = dest;
+ cgc.buflen = blksize;
+ cgc.data_direction = DMA_FROM_DEVICE;
+ cgc.timeout = IOCTL_TIMEOUT;
+ rc = sr_do_ioctl(cd, &cgc);
+
+ return rc;
+}
+
+/*
+ * read a sector in raw mode to check the sector format
+ * ret: 1 == mode2 (XA), 0 == mode1, <0 == error
+ */
+
+int sr_is_xa(Scsi_CD *cd)
+{
+ unsigned char *raw_sector;
+ int is_xa;
+
+ if (!xa_test)
+ return 0;
+
+ raw_sector = kmalloc(2048, GFP_KERNEL | SR_GFP_DMA(cd));
+ if (!raw_sector)
+ return -ENOMEM;
+ if (0 == sr_read_sector(cd, cd->ms_offset + 16,
+ CD_FRAMESIZE_RAW1, raw_sector)) {
+ is_xa = (raw_sector[3] == 0x02) ? 1 : 0;
+ } else {
+ /* read a raw sector failed for some reason. */
+ is_xa = -1;
+ }
+ kfree(raw_sector);
+#ifdef DEBUG
+ sr_printk(KERN_INFO, cd, "sr_is_xa: %d\n", is_xa);
+#endif
+ return is_xa;
+}
diff --git a/drivers/scsi/sr_vendor.c b/drivers/scsi/sr_vendor.c
new file mode 100644
index 000000000..11a238cb2
--- /dev/null
+++ b/drivers/scsi/sr_vendor.c
@@ -0,0 +1,329 @@
+/* -*-linux-c-*-
+
+ * vendor-specific code for SCSI CD-ROM's goes here.
+ *
+ * This is needed becauce most of the new features (multisession and
+ * the like) are too new to be included into the SCSI-II standard (to
+ * be exact: there is'nt anything in my draft copy).
+ *
+ * Aug 1997: Ha! Got a SCSI-3 cdrom spec across my fingers. SCSI-3 does
+ * multisession using the READ TOC command (like SONY).
+ *
+ * Rearranged stuff here: SCSI-3 is included allways, support
+ * for NEC/TOSHIBA/HP commands is optional.
+ *
+ * Gerd Knorr <kraxel@cs.tu-berlin.de>
+ *
+ * --------------------------------------------------------------------------
+ *
+ * support for XA/multisession-CD's
+ *
+ * - NEC: Detection and support of multisession CD's.
+ *
+ * - TOSHIBA: Detection and support of multisession CD's.
+ * Some XA-Sector tweaking, required for older drives.
+ *
+ * - SONY: Detection and support of multisession CD's.
+ * added by Thomas Quinot <thomas@cuivre.freenix.fr>
+ *
+ * - PIONEER, HITACHI, PLEXTOR, MATSHITA, TEAC, PHILIPS: known to
+ * work with SONY (SCSI3 now) code.
+ *
+ * - HP: Much like SONY, but a little different... (Thomas)
+ * HP-Writers only ??? Maybe other CD-Writers work with this too ?
+ * HP 6020 writers now supported.
+ */
+
+#include <linux/cdrom.h>
+#include <linux/errno.h>
+#include <linux/string.h>
+#include <linux/bcd.h>
+#include <linux/blkdev.h>
+#include <linux/slab.h>
+
+#include <scsi/scsi.h>
+#include <scsi/scsi_cmnd.h>
+#include <scsi/scsi_device.h>
+#include <scsi/scsi_host.h>
+#include <scsi/scsi_ioctl.h>
+
+#include "sr.h"
+
+#if 0
+#define DEBUG
+#endif
+
+/* here are some constants to sort the vendors into groups */
+
+#define VENDOR_SCSI3 1 /* default: scsi-3 mmc */
+
+#define VENDOR_NEC 2
+#define VENDOR_TOSHIBA 3
+#define VENDOR_WRITER 4 /* pre-scsi3 writers */
+
+#define VENDOR_TIMEOUT 30*HZ
+
+void sr_vendor_init(Scsi_CD *cd)
+{
+#ifndef CONFIG_BLK_DEV_SR_VENDOR
+ cd->vendor = VENDOR_SCSI3;
+#else
+ const char *vendor = cd->device->vendor;
+ const char *model = cd->device->model;
+
+ /* default */
+ cd->vendor = VENDOR_SCSI3;
+ if (cd->readcd_known)
+ /* this is true for scsi3/mmc drives - no more checks */
+ return;
+
+ if (cd->device->type == TYPE_WORM) {
+ cd->vendor = VENDOR_WRITER;
+
+ } else if (!strncmp(vendor, "NEC", 3)) {
+ cd->vendor = VENDOR_NEC;
+ if (!strncmp(model, "CD-ROM DRIVE:25", 15) ||
+ !strncmp(model, "CD-ROM DRIVE:36", 15) ||
+ !strncmp(model, "CD-ROM DRIVE:83", 15) ||
+ !strncmp(model, "CD-ROM DRIVE:84 ", 16)
+#if 0
+ /* my NEC 3x returns the read-raw data if a read-raw
+ is followed by a read for the same sector - aeb */
+ || !strncmp(model, "CD-ROM DRIVE:500", 16)
+#endif
+ )
+ /* these can't handle multisession, may hang */
+ cd->cdi.mask |= CDC_MULTI_SESSION;
+
+ } else if (!strncmp(vendor, "TOSHIBA", 7)) {
+ cd->vendor = VENDOR_TOSHIBA;
+
+ }
+#endif
+}
+
+
+/* small handy function for switching block length using MODE SELECT,
+ * used by sr_read_sector() */
+
+int sr_set_blocklength(Scsi_CD *cd, int blocklength)
+{
+ unsigned char *buffer; /* the buffer for the ioctl */
+ struct packet_command cgc;
+ struct ccs_modesel_head *modesel;
+ int rc, density = 0;
+
+#ifdef CONFIG_BLK_DEV_SR_VENDOR
+ if (cd->vendor == VENDOR_TOSHIBA)
+ density = (blocklength > 2048) ? 0x81 : 0x83;
+#endif
+
+ buffer = kmalloc(512, GFP_KERNEL | GFP_DMA);
+ if (!buffer)
+ return -ENOMEM;
+
+#ifdef DEBUG
+ sr_printk(KERN_INFO, cd, "MODE SELECT 0x%x/%d\n", density, blocklength);
+#endif
+ memset(&cgc, 0, sizeof(struct packet_command));
+ cgc.cmd[0] = MODE_SELECT;
+ cgc.cmd[1] = (1 << 4);
+ cgc.cmd[4] = 12;
+ modesel = (struct ccs_modesel_head *) buffer;
+ memset(modesel, 0, sizeof(*modesel));
+ modesel->block_desc_length = 0x08;
+ modesel->density = density;
+ modesel->block_length_med = (blocklength >> 8) & 0xff;
+ modesel->block_length_lo = blocklength & 0xff;
+ cgc.buffer = buffer;
+ cgc.buflen = sizeof(*modesel);
+ cgc.data_direction = DMA_TO_DEVICE;
+ cgc.timeout = VENDOR_TIMEOUT;
+ if (0 == (rc = sr_do_ioctl(cd, &cgc))) {
+ cd->device->sector_size = blocklength;
+ }
+#ifdef DEBUG
+ else
+ sr_printk(KERN_INFO, cd,
+ "switching blocklength to %d bytes failed\n",
+ blocklength);
+#endif
+ kfree(buffer);
+ return rc;
+}
+
+/* This function gets called after a media change. Checks if the CD is
+ multisession, asks for offset etc. */
+
+int sr_cd_check(struct cdrom_device_info *cdi)
+{
+ Scsi_CD *cd = cdi->handle;
+ unsigned long sector;
+ unsigned char *buffer; /* the buffer for the ioctl */
+ struct packet_command cgc;
+ int rc, no_multi;
+
+ if (cd->cdi.mask & CDC_MULTI_SESSION)
+ return 0;
+
+ buffer = kmalloc(512, GFP_KERNEL | GFP_DMA);
+ if (!buffer)
+ return -ENOMEM;
+
+ sector = 0; /* the multisession sector offset goes here */
+ no_multi = 0; /* flag: the drive can't handle multisession */
+ rc = 0;
+
+ memset(&cgc, 0, sizeof(struct packet_command));
+
+ switch (cd->vendor) {
+
+ case VENDOR_SCSI3:
+ cgc.cmd[0] = READ_TOC;
+ cgc.cmd[8] = 12;
+ cgc.cmd[9] = 0x40;
+ cgc.buffer = buffer;
+ cgc.buflen = 12;
+ cgc.quiet = 1;
+ cgc.data_direction = DMA_FROM_DEVICE;
+ cgc.timeout = VENDOR_TIMEOUT;
+ rc = sr_do_ioctl(cd, &cgc);
+ if (rc != 0)
+ break;
+ if ((buffer[0] << 8) + buffer[1] < 0x0a) {
+ sr_printk(KERN_INFO, cd, "Hmm, seems the drive "
+ "doesn't support multisession CD's\n");
+ no_multi = 1;
+ break;
+ }
+ sector = buffer[11] + (buffer[10] << 8) +
+ (buffer[9] << 16) + (buffer[8] << 24);
+ if (buffer[6] <= 1) {
+ /* ignore sector offsets from first track */
+ sector = 0;
+ }
+ break;
+
+#ifdef CONFIG_BLK_DEV_SR_VENDOR
+ case VENDOR_NEC:{
+ unsigned long min, sec, frame;
+ cgc.cmd[0] = 0xde;
+ cgc.cmd[1] = 0x03;
+ cgc.cmd[2] = 0xb0;
+ cgc.buffer = buffer;
+ cgc.buflen = 0x16;
+ cgc.quiet = 1;
+ cgc.data_direction = DMA_FROM_DEVICE;
+ cgc.timeout = VENDOR_TIMEOUT;
+ rc = sr_do_ioctl(cd, &cgc);
+ if (rc != 0)
+ break;
+ if (buffer[14] != 0 && buffer[14] != 0xb0) {
+ sr_printk(KERN_INFO, cd, "Hmm, seems the cdrom "
+ "doesn't support multisession CD's\n");
+
+ no_multi = 1;
+ break;
+ }
+ min = bcd2bin(buffer[15]);
+ sec = bcd2bin(buffer[16]);
+ frame = bcd2bin(buffer[17]);
+ sector = min * CD_SECS * CD_FRAMES + sec * CD_FRAMES + frame;
+ break;
+ }
+
+ case VENDOR_TOSHIBA:{
+ unsigned long min, sec, frame;
+
+ /* we request some disc information (is it a XA-CD ?,
+ * where starts the last session ?) */
+ cgc.cmd[0] = 0xc7;
+ cgc.cmd[1] = 0x03;
+ cgc.buffer = buffer;
+ cgc.buflen = 4;
+ cgc.quiet = 1;
+ cgc.data_direction = DMA_FROM_DEVICE;
+ cgc.timeout = VENDOR_TIMEOUT;
+ rc = sr_do_ioctl(cd, &cgc);
+ if (rc == -EINVAL) {
+ sr_printk(KERN_INFO, cd, "Hmm, seems the drive "
+ "doesn't support multisession CD's\n");
+ no_multi = 1;
+ break;
+ }
+ if (rc != 0)
+ break;
+ min = bcd2bin(buffer[1]);
+ sec = bcd2bin(buffer[2]);
+ frame = bcd2bin(buffer[3]);
+ sector = min * CD_SECS * CD_FRAMES + sec * CD_FRAMES + frame;
+ if (sector)
+ sector -= CD_MSF_OFFSET;
+ sr_set_blocklength(cd, 2048);
+ break;
+ }
+
+ case VENDOR_WRITER:
+ cgc.cmd[0] = READ_TOC;
+ cgc.cmd[8] = 0x04;
+ cgc.cmd[9] = 0x40;
+ cgc.buffer = buffer;
+ cgc.buflen = 0x04;
+ cgc.quiet = 1;
+ cgc.data_direction = DMA_FROM_DEVICE;
+ cgc.timeout = VENDOR_TIMEOUT;
+ rc = sr_do_ioctl(cd, &cgc);
+ if (rc != 0) {
+ break;
+ }
+ if ((rc = buffer[2]) == 0) {
+ sr_printk(KERN_WARNING, cd,
+ "No finished session\n");
+ break;
+ }
+ cgc.cmd[0] = READ_TOC; /* Read TOC */
+ cgc.cmd[6] = rc & 0x7f; /* number of last session */
+ cgc.cmd[8] = 0x0c;
+ cgc.cmd[9] = 0x40;
+ cgc.buffer = buffer;
+ cgc.buflen = 12;
+ cgc.quiet = 1;
+ cgc.data_direction = DMA_FROM_DEVICE;
+ cgc.timeout = VENDOR_TIMEOUT;
+ rc = sr_do_ioctl(cd, &cgc);
+ if (rc != 0) {
+ break;
+ }
+ sector = buffer[11] + (buffer[10] << 8) +
+ (buffer[9] << 16) + (buffer[8] << 24);
+ break;
+#endif /* CONFIG_BLK_DEV_SR_VENDOR */
+
+ default:
+ /* should not happen */
+ sr_printk(KERN_WARNING, cd,
+ "unknown vendor code (%i), not initialized ?\n",
+ cd->vendor);
+ sector = 0;
+ no_multi = 1;
+ break;
+ }
+ cd->ms_offset = sector;
+ cd->xa_flag = 0;
+ if (CDS_AUDIO != sr_disk_status(cdi) && 1 == sr_is_xa(cd))
+ cd->xa_flag = 1;
+
+ if (2048 != cd->device->sector_size) {
+ sr_set_blocklength(cd, 2048);
+ }
+ if (no_multi)
+ cdi->mask |= CDC_MULTI_SESSION;
+
+#ifdef DEBUG
+ if (sector)
+ sr_printk(KERN_DEBUG, cd, "multisession offset=%lu\n",
+ sector);
+#endif
+ kfree(buffer);
+ return rc;
+}
diff --git a/drivers/scsi/st.c b/drivers/scsi/st.c
new file mode 100644
index 000000000..9a1c34205
--- /dev/null
+++ b/drivers/scsi/st.c
@@ -0,0 +1,4610 @@
+/*
+ SCSI Tape Driver for Linux version 1.1 and newer. See the accompanying
+ file Documentation/scsi/st.txt for more information.
+
+ History:
+ Rewritten from Dwayne Forsyth's SCSI tape driver by Kai Makisara.
+ Contribution and ideas from several people including (in alphabetical
+ order) Klaus Ehrenfried, Eugene Exarevsky, Eric Lee Green, Wolfgang Denk,
+ Steve Hirsch, Andreas Koppenh"ofer, Michael Leodolter, Eyal Lebedinsky,
+ Michael Schaefer, J"org Weule, and Eric Youngdale.
+
+ Copyright 1992 - 2010 Kai Makisara
+ email Kai.Makisara@kolumbus.fi
+
+ Some small formal changes - aeb, 950809
+
+ Last modified: 18-JAN-1998 Richard Gooch <rgooch@atnf.csiro.au> Devfs support
+ */
+
+static const char *verstr = "20101219";
+
+#include <linux/module.h>
+
+#include <linux/fs.h>
+#include <linux/kernel.h>
+#include <linux/sched.h>
+#include <linux/mm.h>
+#include <linux/init.h>
+#include <linux/string.h>
+#include <linux/slab.h>
+#include <linux/errno.h>
+#include <linux/mtio.h>
+#include <linux/cdrom.h>
+#include <linux/ioctl.h>
+#include <linux/fcntl.h>
+#include <linux/spinlock.h>
+#include <linux/blkdev.h>
+#include <linux/moduleparam.h>
+#include <linux/cdev.h>
+#include <linux/idr.h>
+#include <linux/delay.h>
+#include <linux/mutex.h>
+
+#include <asm/uaccess.h>
+#include <asm/dma.h>
+
+#include <scsi/scsi.h>
+#include <scsi/scsi_dbg.h>
+#include <scsi/scsi_device.h>
+#include <scsi/scsi_driver.h>
+#include <scsi/scsi_eh.h>
+#include <scsi/scsi_host.h>
+#include <scsi/scsi_ioctl.h>
+#include <scsi/sg.h>
+
+
+/* The driver prints some debugging information on the console if DEBUG
+ is defined and non-zero. */
+#define DEBUG 1
+#define NO_DEBUG 0
+
+#define ST_DEB_MSG KERN_NOTICE
+#if DEBUG
+/* The message level for the debug messages is currently set to KERN_NOTICE
+ so that people can easily see the messages. Later when the debugging messages
+ in the drivers are more widely classified, this may be changed to KERN_DEBUG. */
+#define DEB(a) a
+#define DEBC(a) if (debugging) { a ; }
+#else
+#define DEB(a)
+#define DEBC(a)
+#endif
+
+#define ST_KILOBYTE 1024
+
+#include "st_options.h"
+#include "st.h"
+
+static int buffer_kbs;
+static int max_sg_segs;
+static int try_direct_io = TRY_DIRECT_IO;
+static int try_rdio = 1;
+static int try_wdio = 1;
+static int debug_flag;
+
+static struct class st_sysfs_class;
+static const struct attribute_group *st_dev_groups[];
+
+MODULE_AUTHOR("Kai Makisara");
+MODULE_DESCRIPTION("SCSI tape (st) driver");
+MODULE_LICENSE("GPL");
+MODULE_ALIAS_CHARDEV_MAJOR(SCSI_TAPE_MAJOR);
+MODULE_ALIAS_SCSI_DEVICE(TYPE_TAPE);
+
+/* Set 'perm' (4th argument) to 0 to disable module_param's definition
+ * of sysfs parameters (which module_param doesn't yet support).
+ * Sysfs parameters defined explicitly later.
+ */
+module_param_named(buffer_kbs, buffer_kbs, int, 0);
+MODULE_PARM_DESC(buffer_kbs, "Default driver buffer size for fixed block mode (KB; 32)");
+module_param_named(max_sg_segs, max_sg_segs, int, 0);
+MODULE_PARM_DESC(max_sg_segs, "Maximum number of scatter/gather segments to use (256)");
+module_param_named(try_direct_io, try_direct_io, int, 0);
+MODULE_PARM_DESC(try_direct_io, "Try direct I/O between user buffer and tape drive (1)");
+module_param_named(debug_flag, debug_flag, int, 0);
+MODULE_PARM_DESC(debug_flag, "Enable DEBUG, same as setting debugging=1");
+
+
+/* Extra parameters for testing */
+module_param_named(try_rdio, try_rdio, int, 0);
+MODULE_PARM_DESC(try_rdio, "Try direct read i/o when possible");
+module_param_named(try_wdio, try_wdio, int, 0);
+MODULE_PARM_DESC(try_wdio, "Try direct write i/o when possible");
+
+#ifndef MODULE
+static int write_threshold_kbs; /* retained for compatibility */
+static struct st_dev_parm {
+ char *name;
+ int *val;
+} parms[] __initdata = {
+ {
+ "buffer_kbs", &buffer_kbs
+ },
+ { /* Retained for compatibility with 2.4 */
+ "write_threshold_kbs", &write_threshold_kbs
+ },
+ {
+ "max_sg_segs", NULL
+ },
+ {
+ "try_direct_io", &try_direct_io
+ },
+ {
+ "debug_flag", &debug_flag
+ }
+};
+#endif
+
+/* Restrict the number of modes so that names for all are assigned */
+#if ST_NBR_MODES > 16
+#error "Maximum number of modes is 16"
+#endif
+/* Bit reversed order to get same names for same minors with all
+ mode counts */
+static const char *st_formats[] = {
+ "", "r", "k", "s", "l", "t", "o", "u",
+ "m", "v", "p", "x", "a", "y", "q", "z"};
+
+/* The default definitions have been moved to st_options.h */
+
+#define ST_FIXED_BUFFER_SIZE (ST_FIXED_BUFFER_BLOCKS * ST_KILOBYTE)
+
+/* The buffer size should fit into the 24 bits for length in the
+ 6-byte SCSI read and write commands. */
+#if ST_FIXED_BUFFER_SIZE >= (2 << 24 - 1)
+#error "Buffer size should not exceed (2 << 24 - 1) bytes!"
+#endif
+
+static int debugging = DEBUG;
+
+#define MAX_RETRIES 0
+#define MAX_WRITE_RETRIES 0
+#define MAX_READY_RETRIES 0
+#define NO_TAPE NOT_READY
+
+#define ST_TIMEOUT (900 * HZ)
+#define ST_LONG_TIMEOUT (14000 * HZ)
+
+/* Remove mode bits and auto-rewind bit (7) */
+#define TAPE_NR(x) ( ((iminor(x) & ~255) >> (ST_NBR_MODE_BITS + 1)) | \
+ (iminor(x) & ~(-1 << ST_MODE_SHIFT)) )
+#define TAPE_MODE(x) ((iminor(x) & ST_MODE_MASK) >> ST_MODE_SHIFT)
+
+/* Construct the minor number from the device (d), mode (m), and non-rewind (n) data */
+#define TAPE_MINOR(d, m, n) (((d & ~(255 >> (ST_NBR_MODE_BITS + 1))) << (ST_NBR_MODE_BITS + 1)) | \
+ (d & (255 >> (ST_NBR_MODE_BITS + 1))) | (m << ST_MODE_SHIFT) | ((n != 0) << 7) )
+
+/* Internal ioctl to set both density (uppermost 8 bits) and blocksize (lower
+ 24 bits) */
+#define SET_DENS_AND_BLK 0x10001
+
+static int st_fixed_buffer_size = ST_FIXED_BUFFER_SIZE;
+static int st_max_sg_segs = ST_MAX_SG;
+
+static int modes_defined;
+
+static int enlarge_buffer(struct st_buffer *, int, int);
+static void clear_buffer(struct st_buffer *);
+static void normalize_buffer(struct st_buffer *);
+static int append_to_buffer(const char __user *, struct st_buffer *, int);
+static int from_buffer(struct st_buffer *, char __user *, int);
+static void move_buffer_data(struct st_buffer *, int);
+
+static int sgl_map_user_pages(struct st_buffer *, const unsigned int,
+ unsigned long, size_t, int);
+static int sgl_unmap_user_pages(struct st_buffer *, const unsigned int, int);
+
+static int st_probe(struct device *);
+static int st_remove(struct device *);
+
+static int do_create_sysfs_files(void);
+static void do_remove_sysfs_files(void);
+
+static struct scsi_driver st_template = {
+ .gendrv = {
+ .name = "st",
+ .owner = THIS_MODULE,
+ .probe = st_probe,
+ .remove = st_remove,
+ },
+};
+
+static int st_compression(struct scsi_tape *, int);
+
+static int find_partition(struct scsi_tape *);
+static int switch_partition(struct scsi_tape *);
+
+static int st_int_ioctl(struct scsi_tape *, unsigned int, unsigned long);
+
+static void scsi_tape_release(struct kref *);
+
+#define to_scsi_tape(obj) container_of(obj, struct scsi_tape, kref)
+
+static DEFINE_MUTEX(st_ref_mutex);
+static DEFINE_SPINLOCK(st_index_lock);
+static DEFINE_SPINLOCK(st_use_lock);
+static DEFINE_IDR(st_index_idr);
+
+
+
+#include "osst_detect.h"
+#ifndef SIGS_FROM_OSST
+#define SIGS_FROM_OSST \
+ {"OnStream", "SC-", "", "osst"}, \
+ {"OnStream", "DI-", "", "osst"}, \
+ {"OnStream", "DP-", "", "osst"}, \
+ {"OnStream", "USB", "", "osst"}, \
+ {"OnStream", "FW-", "", "osst"}
+#endif
+
+static struct scsi_tape *scsi_tape_get(int dev)
+{
+ struct scsi_tape *STp = NULL;
+
+ mutex_lock(&st_ref_mutex);
+ spin_lock(&st_index_lock);
+
+ STp = idr_find(&st_index_idr, dev);
+ if (!STp) goto out;
+
+ kref_get(&STp->kref);
+
+ if (!STp->device)
+ goto out_put;
+
+ if (scsi_device_get(STp->device))
+ goto out_put;
+
+ goto out;
+
+out_put:
+ kref_put(&STp->kref, scsi_tape_release);
+ STp = NULL;
+out:
+ spin_unlock(&st_index_lock);
+ mutex_unlock(&st_ref_mutex);
+ return STp;
+}
+
+static void scsi_tape_put(struct scsi_tape *STp)
+{
+ struct scsi_device *sdev = STp->device;
+
+ mutex_lock(&st_ref_mutex);
+ kref_put(&STp->kref, scsi_tape_release);
+ scsi_device_put(sdev);
+ mutex_unlock(&st_ref_mutex);
+}
+
+struct st_reject_data {
+ char *vendor;
+ char *model;
+ char *rev;
+ char *driver_hint; /* Name of the correct driver, NULL if unknown */
+};
+
+static struct st_reject_data reject_list[] = {
+ /* {"XXX", "Yy-", "", NULL}, example */
+ SIGS_FROM_OSST,
+ {NULL, }};
+
+/* If the device signature is on the list of incompatible drives, the
+ function returns a pointer to the name of the correct driver (if known) */
+static char * st_incompatible(struct scsi_device* SDp)
+{
+ struct st_reject_data *rp;
+
+ for (rp=&(reject_list[0]); rp->vendor != NULL; rp++)
+ if (!strncmp(rp->vendor, SDp->vendor, strlen(rp->vendor)) &&
+ !strncmp(rp->model, SDp->model, strlen(rp->model)) &&
+ !strncmp(rp->rev, SDp->rev, strlen(rp->rev))) {
+ if (rp->driver_hint)
+ return rp->driver_hint;
+ else
+ return "unknown";
+ }
+ return NULL;
+}
+
+
+static inline char *tape_name(struct scsi_tape *tape)
+{
+ return tape->disk->disk_name;
+}
+
+#define st_printk(prefix, t, fmt, a...) \
+ sdev_prefix_printk(prefix, (t)->device, tape_name(t), fmt, ##a)
+#ifdef DEBUG
+#define DEBC_printk(t, fmt, a...) \
+ if (debugging) { st_printk(ST_DEB_MSG, t, fmt, ##a ); }
+#else
+#define DEBC_printk(t, fmt, a...)
+#endif
+
+static void st_analyze_sense(struct st_request *SRpnt, struct st_cmdstatus *s)
+{
+ const u8 *ucp;
+ const u8 *sense = SRpnt->sense;
+
+ s->have_sense = scsi_normalize_sense(SRpnt->sense,
+ SCSI_SENSE_BUFFERSIZE, &s->sense_hdr);
+ s->flags = 0;
+
+ if (s->have_sense) {
+ s->deferred = 0;
+ s->remainder_valid =
+ scsi_get_sense_info_fld(sense, SCSI_SENSE_BUFFERSIZE, &s->uremainder64);
+ switch (sense[0] & 0x7f) {
+ case 0x71:
+ s->deferred = 1;
+ case 0x70:
+ s->fixed_format = 1;
+ s->flags = sense[2] & 0xe0;
+ break;
+ case 0x73:
+ s->deferred = 1;
+ case 0x72:
+ s->fixed_format = 0;
+ ucp = scsi_sense_desc_find(sense, SCSI_SENSE_BUFFERSIZE, 4);
+ s->flags = ucp ? (ucp[3] & 0xe0) : 0;
+ break;
+ }
+ }
+}
+
+
+/* Convert the result to success code */
+static int st_chk_result(struct scsi_tape *STp, struct st_request * SRpnt)
+{
+ int result = SRpnt->result;
+ u8 scode;
+ DEB(const char *stp;)
+ char *name = tape_name(STp);
+ struct st_cmdstatus *cmdstatp;
+
+ if (!result)
+ return 0;
+
+ cmdstatp = &STp->buffer->cmdstat;
+ st_analyze_sense(SRpnt, cmdstatp);
+
+ if (cmdstatp->have_sense)
+ scode = STp->buffer->cmdstat.sense_hdr.sense_key;
+ else
+ scode = 0;
+
+ DEB(
+ if (debugging) {
+ st_printk(ST_DEB_MSG, STp,
+ "Error: %x, cmd: %x %x %x %x %x %x\n", result,
+ SRpnt->cmd[0], SRpnt->cmd[1], SRpnt->cmd[2],
+ SRpnt->cmd[3], SRpnt->cmd[4], SRpnt->cmd[5]);
+ if (cmdstatp->have_sense)
+ __scsi_print_sense(STp->device, name,
+ SRpnt->sense, SCSI_SENSE_BUFFERSIZE);
+ } ) /* end DEB */
+ if (!debugging) { /* Abnormal conditions for tape */
+ if (!cmdstatp->have_sense)
+ st_printk(KERN_WARNING, STp,
+ "Error %x (driver bt 0x%x, host bt 0x%x).\n",
+ result, driver_byte(result), host_byte(result));
+ else if (cmdstatp->have_sense &&
+ scode != NO_SENSE &&
+ scode != RECOVERED_ERROR &&
+ /* scode != UNIT_ATTENTION && */
+ scode != BLANK_CHECK &&
+ scode != VOLUME_OVERFLOW &&
+ SRpnt->cmd[0] != MODE_SENSE &&
+ SRpnt->cmd[0] != TEST_UNIT_READY) {
+
+ __scsi_print_sense(STp->device, name,
+ SRpnt->sense, SCSI_SENSE_BUFFERSIZE);
+ }
+ }
+
+ if (cmdstatp->fixed_format &&
+ STp->cln_mode >= EXTENDED_SENSE_START) { /* Only fixed format sense */
+ if (STp->cln_sense_value)
+ STp->cleaning_req |= ((SRpnt->sense[STp->cln_mode] &
+ STp->cln_sense_mask) == STp->cln_sense_value);
+ else
+ STp->cleaning_req |= ((SRpnt->sense[STp->cln_mode] &
+ STp->cln_sense_mask) != 0);
+ }
+ if (cmdstatp->have_sense &&
+ cmdstatp->sense_hdr.asc == 0 && cmdstatp->sense_hdr.ascq == 0x17)
+ STp->cleaning_req = 1; /* ASC and ASCQ => cleaning requested */
+
+ STp->pos_unknown |= STp->device->was_reset;
+
+ if (cmdstatp->have_sense &&
+ scode == RECOVERED_ERROR
+#if ST_RECOVERED_WRITE_FATAL
+ && SRpnt->cmd[0] != WRITE_6
+ && SRpnt->cmd[0] != WRITE_FILEMARKS
+#endif
+ ) {
+ STp->recover_count++;
+ STp->recover_reg++;
+
+ DEB(
+ if (debugging) {
+ if (SRpnt->cmd[0] == READ_6)
+ stp = "read";
+ else if (SRpnt->cmd[0] == WRITE_6)
+ stp = "write";
+ else
+ stp = "ioctl";
+ st_printk(ST_DEB_MSG, STp,
+ "Recovered %s error (%d).\n",
+ stp, STp->recover_count);
+ } ) /* end DEB */
+
+ if (cmdstatp->flags == 0)
+ return 0;
+ }
+ return (-EIO);
+}
+
+static struct st_request *st_allocate_request(struct scsi_tape *stp)
+{
+ struct st_request *streq;
+
+ streq = kzalloc(sizeof(*streq), GFP_KERNEL);
+ if (streq)
+ streq->stp = stp;
+ else {
+ st_printk(KERN_ERR, stp,
+ "Can't get SCSI request.\n");
+ if (signal_pending(current))
+ stp->buffer->syscall_result = -EINTR;
+ else
+ stp->buffer->syscall_result = -EBUSY;
+ }
+
+ return streq;
+}
+
+static void st_release_request(struct st_request *streq)
+{
+ kfree(streq);
+}
+
+static void st_scsi_execute_end(struct request *req, int uptodate)
+{
+ struct st_request *SRpnt = req->end_io_data;
+ struct scsi_tape *STp = SRpnt->stp;
+ struct bio *tmp;
+
+ STp->buffer->cmdstat.midlevel_result = SRpnt->result = req->errors;
+ STp->buffer->cmdstat.residual = req->resid_len;
+
+ tmp = SRpnt->bio;
+ if (SRpnt->waiting)
+ complete(SRpnt->waiting);
+
+ blk_rq_unmap_user(tmp);
+ __blk_put_request(req->q, req);
+}
+
+static int st_scsi_execute(struct st_request *SRpnt, const unsigned char *cmd,
+ int data_direction, void *buffer, unsigned bufflen,
+ int timeout, int retries)
+{
+ struct request *req;
+ struct rq_map_data *mdata = &SRpnt->stp->buffer->map_data;
+ int err = 0;
+ int write = (data_direction == DMA_TO_DEVICE);
+
+ req = blk_get_request(SRpnt->stp->device->request_queue, write,
+ GFP_KERNEL);
+ if (IS_ERR(req))
+ return DRIVER_ERROR << 24;
+
+ blk_rq_set_block_pc(req);
+ req->cmd_flags |= REQ_QUIET;
+
+ mdata->null_mapped = 1;
+
+ if (bufflen) {
+ err = blk_rq_map_user(req->q, req, mdata, NULL, bufflen,
+ GFP_KERNEL);
+ if (err) {
+ blk_put_request(req);
+ return DRIVER_ERROR << 24;
+ }
+ }
+
+ SRpnt->bio = req->bio;
+ req->cmd_len = COMMAND_SIZE(cmd[0]);
+ memset(req->cmd, 0, BLK_MAX_CDB);
+ memcpy(req->cmd, cmd, req->cmd_len);
+ req->sense = SRpnt->sense;
+ req->sense_len = 0;
+ req->timeout = timeout;
+ req->retries = retries;
+ req->end_io_data = SRpnt;
+
+ blk_execute_rq_nowait(req->q, NULL, req, 1, st_scsi_execute_end);
+ return 0;
+}
+
+/* Do the scsi command. Waits until command performed if do_wait is true.
+ Otherwise write_behind_check() is used to check that the command
+ has finished. */
+static struct st_request *
+st_do_scsi(struct st_request * SRpnt, struct scsi_tape * STp, unsigned char *cmd,
+ int bytes, int direction, int timeout, int retries, int do_wait)
+{
+ struct completion *waiting;
+ struct rq_map_data *mdata = &STp->buffer->map_data;
+ int ret;
+
+ /* if async, make sure there's no command outstanding */
+ if (!do_wait && ((STp->buffer)->last_SRpnt)) {
+ st_printk(KERN_ERR, STp,
+ "Async command already active.\n");
+ if (signal_pending(current))
+ (STp->buffer)->syscall_result = (-EINTR);
+ else
+ (STp->buffer)->syscall_result = (-EBUSY);
+ return NULL;
+ }
+
+ if (!SRpnt) {
+ SRpnt = st_allocate_request(STp);
+ if (!SRpnt)
+ return NULL;
+ }
+
+ /* If async IO, set last_SRpnt. This ptr tells write_behind_check
+ which IO is outstanding. It's nulled out when the IO completes. */
+ if (!do_wait)
+ (STp->buffer)->last_SRpnt = SRpnt;
+
+ waiting = &STp->wait;
+ init_completion(waiting);
+ SRpnt->waiting = waiting;
+
+ if (STp->buffer->do_dio) {
+ mdata->page_order = 0;
+ mdata->nr_entries = STp->buffer->sg_segs;
+ mdata->pages = STp->buffer->mapped_pages;
+ } else {
+ mdata->page_order = STp->buffer->reserved_page_order;
+ mdata->nr_entries =
+ DIV_ROUND_UP(bytes, PAGE_SIZE << mdata->page_order);
+ mdata->pages = STp->buffer->reserved_pages;
+ mdata->offset = 0;
+ }
+
+ memcpy(SRpnt->cmd, cmd, sizeof(SRpnt->cmd));
+ STp->buffer->cmdstat.have_sense = 0;
+ STp->buffer->syscall_result = 0;
+
+ ret = st_scsi_execute(SRpnt, cmd, direction, NULL, bytes, timeout,
+ retries);
+ if (ret) {
+ /* could not allocate the buffer or request was too large */
+ (STp->buffer)->syscall_result = (-EBUSY);
+ (STp->buffer)->last_SRpnt = NULL;
+ } else if (do_wait) {
+ wait_for_completion(waiting);
+ SRpnt->waiting = NULL;
+ (STp->buffer)->syscall_result = st_chk_result(STp, SRpnt);
+ }
+
+ return SRpnt;
+}
+
+
+/* Handle the write-behind checking (waits for completion). Returns -ENOSPC if
+ write has been correct but EOM early warning reached, -EIO if write ended in
+ error or zero if write successful. Asynchronous writes are used only in
+ variable block mode. */
+static int write_behind_check(struct scsi_tape * STp)
+{
+ int retval = 0;
+ struct st_buffer *STbuffer;
+ struct st_partstat *STps;
+ struct st_cmdstatus *cmdstatp;
+ struct st_request *SRpnt;
+
+ STbuffer = STp->buffer;
+ if (!STbuffer->writing)
+ return 0;
+
+ DEB(
+ if (STp->write_pending)
+ STp->nbr_waits++;
+ else
+ STp->nbr_finished++;
+ ) /* end DEB */
+
+ wait_for_completion(&(STp->wait));
+ SRpnt = STbuffer->last_SRpnt;
+ STbuffer->last_SRpnt = NULL;
+ SRpnt->waiting = NULL;
+
+ (STp->buffer)->syscall_result = st_chk_result(STp, SRpnt);
+ st_release_request(SRpnt);
+
+ STbuffer->buffer_bytes -= STbuffer->writing;
+ STps = &(STp->ps[STp->partition]);
+ if (STps->drv_block >= 0) {
+ if (STp->block_size == 0)
+ STps->drv_block++;
+ else
+ STps->drv_block += STbuffer->writing / STp->block_size;
+ }
+
+ cmdstatp = &STbuffer->cmdstat;
+ if (STbuffer->syscall_result) {
+ retval = -EIO;
+ if (cmdstatp->have_sense && !cmdstatp->deferred &&
+ (cmdstatp->flags & SENSE_EOM) &&
+ (cmdstatp->sense_hdr.sense_key == NO_SENSE ||
+ cmdstatp->sense_hdr.sense_key == RECOVERED_ERROR)) {
+ /* EOM at write-behind, has all data been written? */
+ if (!cmdstatp->remainder_valid ||
+ cmdstatp->uremainder64 == 0)
+ retval = -ENOSPC;
+ }
+ if (retval == -EIO)
+ STps->drv_block = -1;
+ }
+ STbuffer->writing = 0;
+
+ DEB(if (debugging && retval)
+ st_printk(ST_DEB_MSG, STp,
+ "Async write error %x, return value %d.\n",
+ STbuffer->cmdstat.midlevel_result, retval);) /* end DEB */
+
+ return retval;
+}
+
+
+/* Step over EOF if it has been inadvertently crossed (ioctl not used because
+ it messes up the block number). */
+static int cross_eof(struct scsi_tape * STp, int forward)
+{
+ struct st_request *SRpnt;
+ unsigned char cmd[MAX_COMMAND_SIZE];
+
+ cmd[0] = SPACE;
+ cmd[1] = 0x01; /* Space FileMarks */
+ if (forward) {
+ cmd[2] = cmd[3] = 0;
+ cmd[4] = 1;
+ } else
+ cmd[2] = cmd[3] = cmd[4] = 0xff; /* -1 filemarks */
+ cmd[5] = 0;
+
+ DEBC_printk(STp, "Stepping over filemark %s.\n",
+ forward ? "forward" : "backward");
+
+ SRpnt = st_do_scsi(NULL, STp, cmd, 0, DMA_NONE,
+ STp->device->request_queue->rq_timeout,
+ MAX_RETRIES, 1);
+ if (!SRpnt)
+ return (STp->buffer)->syscall_result;
+
+ st_release_request(SRpnt);
+ SRpnt = NULL;
+
+ if ((STp->buffer)->cmdstat.midlevel_result != 0)
+ st_printk(KERN_ERR, STp,
+ "Stepping over filemark %s failed.\n",
+ forward ? "forward" : "backward");
+
+ return (STp->buffer)->syscall_result;
+}
+
+
+/* Flush the write buffer (never need to write if variable blocksize). */
+static int st_flush_write_buffer(struct scsi_tape * STp)
+{
+ int transfer, blks;
+ int result;
+ unsigned char cmd[MAX_COMMAND_SIZE];
+ struct st_request *SRpnt;
+ struct st_partstat *STps;
+
+ result = write_behind_check(STp);
+ if (result)
+ return result;
+
+ result = 0;
+ if (STp->dirty == 1) {
+
+ transfer = STp->buffer->buffer_bytes;
+ DEBC_printk(STp, "Flushing %d bytes.\n", transfer);
+
+ memset(cmd, 0, MAX_COMMAND_SIZE);
+ cmd[0] = WRITE_6;
+ cmd[1] = 1;
+ blks = transfer / STp->block_size;
+ cmd[2] = blks >> 16;
+ cmd[3] = blks >> 8;
+ cmd[4] = blks;
+
+ SRpnt = st_do_scsi(NULL, STp, cmd, transfer, DMA_TO_DEVICE,
+ STp->device->request_queue->rq_timeout,
+ MAX_WRITE_RETRIES, 1);
+ if (!SRpnt)
+ return (STp->buffer)->syscall_result;
+
+ STps = &(STp->ps[STp->partition]);
+ if ((STp->buffer)->syscall_result != 0) {
+ struct st_cmdstatus *cmdstatp = &STp->buffer->cmdstat;
+
+ if (cmdstatp->have_sense && !cmdstatp->deferred &&
+ (cmdstatp->flags & SENSE_EOM) &&
+ (cmdstatp->sense_hdr.sense_key == NO_SENSE ||
+ cmdstatp->sense_hdr.sense_key == RECOVERED_ERROR) &&
+ (!cmdstatp->remainder_valid ||
+ cmdstatp->uremainder64 == 0)) { /* All written at EOM early warning */
+ STp->dirty = 0;
+ (STp->buffer)->buffer_bytes = 0;
+ if (STps->drv_block >= 0)
+ STps->drv_block += blks;
+ result = (-ENOSPC);
+ } else {
+ st_printk(KERN_ERR, STp, "Error on flush.\n");
+ STps->drv_block = (-1);
+ result = (-EIO);
+ }
+ } else {
+ if (STps->drv_block >= 0)
+ STps->drv_block += blks;
+ STp->dirty = 0;
+ (STp->buffer)->buffer_bytes = 0;
+ }
+ st_release_request(SRpnt);
+ SRpnt = NULL;
+ }
+ return result;
+}
+
+
+/* Flush the tape buffer. The tape will be positioned correctly unless
+ seek_next is true. */
+static int flush_buffer(struct scsi_tape *STp, int seek_next)
+{
+ int backspace, result;
+ struct st_buffer *STbuffer;
+ struct st_partstat *STps;
+
+ STbuffer = STp->buffer;
+
+ /*
+ * If there was a bus reset, block further access
+ * to this device.
+ */
+ if (STp->pos_unknown)
+ return (-EIO);
+
+ if (STp->ready != ST_READY)
+ return 0;
+ STps = &(STp->ps[STp->partition]);
+ if (STps->rw == ST_WRITING) /* Writing */
+ return st_flush_write_buffer(STp);
+
+ if (STp->block_size == 0)
+ return 0;
+
+ backspace = ((STp->buffer)->buffer_bytes +
+ (STp->buffer)->read_pointer) / STp->block_size -
+ ((STp->buffer)->read_pointer + STp->block_size - 1) /
+ STp->block_size;
+ (STp->buffer)->buffer_bytes = 0;
+ (STp->buffer)->read_pointer = 0;
+ result = 0;
+ if (!seek_next) {
+ if (STps->eof == ST_FM_HIT) {
+ result = cross_eof(STp, 0); /* Back over the EOF hit */
+ if (!result)
+ STps->eof = ST_NOEOF;
+ else {
+ if (STps->drv_file >= 0)
+ STps->drv_file++;
+ STps->drv_block = 0;
+ }
+ }
+ if (!result && backspace > 0)
+ result = st_int_ioctl(STp, MTBSR, backspace);
+ } else if (STps->eof == ST_FM_HIT) {
+ if (STps->drv_file >= 0)
+ STps->drv_file++;
+ STps->drv_block = 0;
+ STps->eof = ST_NOEOF;
+ }
+ return result;
+
+}
+
+/* Set the mode parameters */
+static int set_mode_densblk(struct scsi_tape * STp, struct st_modedef * STm)
+{
+ int set_it = 0;
+ unsigned long arg;
+
+ if (!STp->density_changed &&
+ STm->default_density >= 0 &&
+ STm->default_density != STp->density) {
+ arg = STm->default_density;
+ set_it = 1;
+ } else
+ arg = STp->density;
+ arg <<= MT_ST_DENSITY_SHIFT;
+ if (!STp->blksize_changed &&
+ STm->default_blksize >= 0 &&
+ STm->default_blksize != STp->block_size) {
+ arg |= STm->default_blksize;
+ set_it = 1;
+ } else
+ arg |= STp->block_size;
+ if (set_it &&
+ st_int_ioctl(STp, SET_DENS_AND_BLK, arg)) {
+ st_printk(KERN_WARNING, STp,
+ "Can't set default block size to %d bytes "
+ "and density %x.\n",
+ STm->default_blksize, STm->default_density);
+ if (modes_defined)
+ return (-EINVAL);
+ }
+ return 0;
+}
+
+
+/* Lock or unlock the drive door. Don't use when st_request allocated. */
+static int do_door_lock(struct scsi_tape * STp, int do_lock)
+{
+ int retval;
+
+ DEBC_printk(STp, "%socking drive door.\n", do_lock ? "L" : "Unl");
+
+ retval = scsi_set_medium_removal(STp->device,
+ do_lock ? SCSI_REMOVAL_PREVENT : SCSI_REMOVAL_ALLOW);
+ if (!retval)
+ STp->door_locked = do_lock ? ST_LOCKED_EXPLICIT : ST_UNLOCKED;
+ else
+ STp->door_locked = ST_LOCK_FAILS;
+ return retval;
+}
+
+
+/* Set the internal state after reset */
+static void reset_state(struct scsi_tape *STp)
+{
+ int i;
+ struct st_partstat *STps;
+
+ STp->pos_unknown = 0;
+ for (i = 0; i < ST_NBR_PARTITIONS; i++) {
+ STps = &(STp->ps[i]);
+ STps->rw = ST_IDLE;
+ STps->eof = ST_NOEOF;
+ STps->at_sm = 0;
+ STps->last_block_valid = 0;
+ STps->drv_block = -1;
+ STps->drv_file = -1;
+ }
+ if (STp->can_partitions) {
+ STp->partition = find_partition(STp);
+ if (STp->partition < 0)
+ STp->partition = 0;
+ STp->new_partition = STp->partition;
+ }
+}
+
+/* Test if the drive is ready. Returns either one of the codes below or a negative system
+ error code. */
+#define CHKRES_READY 0
+#define CHKRES_NEW_SESSION 1
+#define CHKRES_NOT_READY 2
+#define CHKRES_NO_TAPE 3
+
+#define MAX_ATTENTIONS 10
+
+static int test_ready(struct scsi_tape *STp, int do_wait)
+{
+ int attentions, waits, max_wait, scode;
+ int retval = CHKRES_READY, new_session = 0;
+ unsigned char cmd[MAX_COMMAND_SIZE];
+ struct st_request *SRpnt = NULL;
+ struct st_cmdstatus *cmdstatp = &STp->buffer->cmdstat;
+
+ max_wait = do_wait ? ST_BLOCK_SECONDS : 0;
+
+ for (attentions=waits=0; ; ) {
+ memset((void *) &cmd[0], 0, MAX_COMMAND_SIZE);
+ cmd[0] = TEST_UNIT_READY;
+ SRpnt = st_do_scsi(SRpnt, STp, cmd, 0, DMA_NONE,
+ STp->long_timeout, MAX_READY_RETRIES, 1);
+
+ if (!SRpnt) {
+ retval = (STp->buffer)->syscall_result;
+ break;
+ }
+
+ if (cmdstatp->have_sense) {
+
+ scode = cmdstatp->sense_hdr.sense_key;
+
+ if (scode == UNIT_ATTENTION) { /* New media? */
+ new_session = 1;
+ if (attentions < MAX_ATTENTIONS) {
+ attentions++;
+ continue;
+ }
+ else {
+ retval = (-EIO);
+ break;
+ }
+ }
+
+ if (scode == NOT_READY) {
+ if (waits < max_wait) {
+ if (msleep_interruptible(1000)) {
+ retval = (-EINTR);
+ break;
+ }
+ waits++;
+ continue;
+ }
+ else {
+ if ((STp->device)->scsi_level >= SCSI_2 &&
+ cmdstatp->sense_hdr.asc == 0x3a) /* Check ASC */
+ retval = CHKRES_NO_TAPE;
+ else
+ retval = CHKRES_NOT_READY;
+ break;
+ }
+ }
+ }
+
+ retval = (STp->buffer)->syscall_result;
+ if (!retval)
+ retval = new_session ? CHKRES_NEW_SESSION : CHKRES_READY;
+ break;
+ }
+
+ if (SRpnt != NULL)
+ st_release_request(SRpnt);
+ return retval;
+}
+
+
+/* See if the drive is ready and gather information about the tape. Return values:
+ < 0 negative error code from errno.h
+ 0 drive ready
+ 1 drive not ready (possibly no tape)
+*/
+static int check_tape(struct scsi_tape *STp, struct file *filp)
+{
+ int i, retval, new_session = 0, do_wait;
+ unsigned char cmd[MAX_COMMAND_SIZE], saved_cleaning;
+ unsigned short st_flags = filp->f_flags;
+ struct st_request *SRpnt = NULL;
+ struct st_modedef *STm;
+ struct st_partstat *STps;
+ struct inode *inode = file_inode(filp);
+ int mode = TAPE_MODE(inode);
+
+ STp->ready = ST_READY;
+
+ if (mode != STp->current_mode) {
+ DEBC_printk(STp, "Mode change from %d to %d.\n",
+ STp->current_mode, mode);
+ new_session = 1;
+ STp->current_mode = mode;
+ }
+ STm = &(STp->modes[STp->current_mode]);
+
+ saved_cleaning = STp->cleaning_req;
+ STp->cleaning_req = 0;
+
+ do_wait = ((filp->f_flags & O_NONBLOCK) == 0);
+ retval = test_ready(STp, do_wait);
+
+ if (retval < 0)
+ goto err_out;
+
+ if (retval == CHKRES_NEW_SESSION) {
+ STp->pos_unknown = 0;
+ STp->partition = STp->new_partition = 0;
+ if (STp->can_partitions)
+ STp->nbr_partitions = 1; /* This guess will be updated later
+ if necessary */
+ for (i = 0; i < ST_NBR_PARTITIONS; i++) {
+ STps = &(STp->ps[i]);
+ STps->rw = ST_IDLE;
+ STps->eof = ST_NOEOF;
+ STps->at_sm = 0;
+ STps->last_block_valid = 0;
+ STps->drv_block = 0;
+ STps->drv_file = 0;
+ }
+ new_session = 1;
+ }
+ else {
+ STp->cleaning_req |= saved_cleaning;
+
+ if (retval == CHKRES_NOT_READY || retval == CHKRES_NO_TAPE) {
+ if (retval == CHKRES_NO_TAPE)
+ STp->ready = ST_NO_TAPE;
+ else
+ STp->ready = ST_NOT_READY;
+
+ STp->density = 0; /* Clear the erroneous "residue" */
+ STp->write_prot = 0;
+ STp->block_size = 0;
+ STp->ps[0].drv_file = STp->ps[0].drv_block = (-1);
+ STp->partition = STp->new_partition = 0;
+ STp->door_locked = ST_UNLOCKED;
+ return CHKRES_NOT_READY;
+ }
+ }
+
+ if (STp->omit_blklims)
+ STp->min_block = STp->max_block = (-1);
+ else {
+ memset((void *) &cmd[0], 0, MAX_COMMAND_SIZE);
+ cmd[0] = READ_BLOCK_LIMITS;
+
+ SRpnt = st_do_scsi(SRpnt, STp, cmd, 6, DMA_FROM_DEVICE,
+ STp->device->request_queue->rq_timeout,
+ MAX_READY_RETRIES, 1);
+ if (!SRpnt) {
+ retval = (STp->buffer)->syscall_result;
+ goto err_out;
+ }
+
+ if (!SRpnt->result && !STp->buffer->cmdstat.have_sense) {
+ STp->max_block = ((STp->buffer)->b_data[1] << 16) |
+ ((STp->buffer)->b_data[2] << 8) | (STp->buffer)->b_data[3];
+ STp->min_block = ((STp->buffer)->b_data[4] << 8) |
+ (STp->buffer)->b_data[5];
+ if ( DEB( debugging || ) !STp->inited)
+ st_printk(KERN_INFO, STp,
+ "Block limits %d - %d bytes.\n",
+ STp->min_block, STp->max_block);
+ } else {
+ STp->min_block = STp->max_block = (-1);
+ DEBC_printk(STp, "Can't read block limits.\n");
+ }
+ }
+
+ memset((void *) &cmd[0], 0, MAX_COMMAND_SIZE);
+ cmd[0] = MODE_SENSE;
+ cmd[4] = 12;
+
+ SRpnt = st_do_scsi(SRpnt, STp, cmd, 12, DMA_FROM_DEVICE,
+ STp->device->request_queue->rq_timeout,
+ MAX_READY_RETRIES, 1);
+ if (!SRpnt) {
+ retval = (STp->buffer)->syscall_result;
+ goto err_out;
+ }
+
+ if ((STp->buffer)->syscall_result != 0) {
+ DEBC_printk(STp, "No Mode Sense.\n");
+ STp->block_size = ST_DEFAULT_BLOCK; /* Educated guess (?) */
+ (STp->buffer)->syscall_result = 0; /* Prevent error propagation */
+ STp->drv_write_prot = 0;
+ } else {
+ DEBC_printk(STp,"Mode sense. Length %d, "
+ "medium %x, WBS %x, BLL %d\n",
+ (STp->buffer)->b_data[0],
+ (STp->buffer)->b_data[1],
+ (STp->buffer)->b_data[2],
+ (STp->buffer)->b_data[3]);
+
+ if ((STp->buffer)->b_data[3] >= 8) {
+ STp->drv_buffer = ((STp->buffer)->b_data[2] >> 4) & 7;
+ STp->density = (STp->buffer)->b_data[4];
+ STp->block_size = (STp->buffer)->b_data[9] * 65536 +
+ (STp->buffer)->b_data[10] * 256 + (STp->buffer)->b_data[11];
+ DEBC_printk(STp, "Density %x, tape length: %x, "
+ "drv buffer: %d\n",
+ STp->density,
+ (STp->buffer)->b_data[5] * 65536 +
+ (STp->buffer)->b_data[6] * 256 +
+ (STp->buffer)->b_data[7],
+ STp->drv_buffer);
+ }
+ STp->drv_write_prot = ((STp->buffer)->b_data[2] & 0x80) != 0;
+ if (!STp->drv_buffer && STp->immediate_filemark) {
+ st_printk(KERN_WARNING, STp,
+ "non-buffered tape: disabling "
+ "writing immediate filemarks\n");
+ STp->immediate_filemark = 0;
+ }
+ }
+ st_release_request(SRpnt);
+ SRpnt = NULL;
+ STp->inited = 1;
+
+ if (STp->block_size > 0)
+ (STp->buffer)->buffer_blocks =
+ (STp->buffer)->buffer_size / STp->block_size;
+ else
+ (STp->buffer)->buffer_blocks = 1;
+ (STp->buffer)->buffer_bytes = (STp->buffer)->read_pointer = 0;
+
+ DEBC_printk(STp, "Block size: %d, buffer size: %d (%d blocks).\n",
+ STp->block_size, (STp->buffer)->buffer_size,
+ (STp->buffer)->buffer_blocks);
+
+ if (STp->drv_write_prot) {
+ STp->write_prot = 1;
+
+ DEBC_printk(STp, "Write protected\n");
+
+ if (do_wait &&
+ ((st_flags & O_ACCMODE) == O_WRONLY ||
+ (st_flags & O_ACCMODE) == O_RDWR)) {
+ retval = (-EROFS);
+ goto err_out;
+ }
+ }
+
+ if (STp->can_partitions && STp->nbr_partitions < 1) {
+ /* This code is reached when the device is opened for the first time
+ after the driver has been initialized with tape in the drive and the
+ partition support has been enabled. */
+ DEBC_printk(STp, "Updating partition number in status.\n");
+ if ((STp->partition = find_partition(STp)) < 0) {
+ retval = STp->partition;
+ goto err_out;
+ }
+ STp->new_partition = STp->partition;
+ STp->nbr_partitions = 1; /* This guess will be updated when necessary */
+ }
+
+ if (new_session) { /* Change the drive parameters for the new mode */
+ STp->density_changed = STp->blksize_changed = 0;
+ STp->compression_changed = 0;
+ if (!(STm->defaults_for_writes) &&
+ (retval = set_mode_densblk(STp, STm)) < 0)
+ goto err_out;
+
+ if (STp->default_drvbuffer != 0xff) {
+ if (st_int_ioctl(STp, MTSETDRVBUFFER, STp->default_drvbuffer))
+ st_printk(KERN_WARNING, STp,
+ "Can't set default drive "
+ "buffering to %d.\n",
+ STp->default_drvbuffer);
+ }
+ }
+
+ return CHKRES_READY;
+
+ err_out:
+ return retval;
+}
+
+
+ /* Open the device. Needs to take the BKL only because of incrementing the SCSI host
+ module count. */
+static int st_open(struct inode *inode, struct file *filp)
+{
+ int i, retval = (-EIO);
+ int resumed = 0;
+ struct scsi_tape *STp;
+ struct st_partstat *STps;
+ int dev = TAPE_NR(inode);
+
+ /*
+ * We really want to do nonseekable_open(inode, filp); here, but some
+ * versions of tar incorrectly call lseek on tapes and bail out if that
+ * fails. So we disallow pread() and pwrite(), but permit lseeks.
+ */
+ filp->f_mode &= ~(FMODE_PREAD | FMODE_PWRITE);
+
+ if (!(STp = scsi_tape_get(dev))) {
+ return -ENXIO;
+ }
+
+ filp->private_data = STp;
+
+ spin_lock(&st_use_lock);
+ if (STp->in_use) {
+ spin_unlock(&st_use_lock);
+ scsi_tape_put(STp);
+ DEBC_printk(STp, "Device already in use.\n");
+ return (-EBUSY);
+ }
+
+ STp->in_use = 1;
+ spin_unlock(&st_use_lock);
+ STp->rew_at_close = STp->autorew_dev = (iminor(inode) & 0x80) == 0;
+
+ if (scsi_autopm_get_device(STp->device) < 0) {
+ retval = -EIO;
+ goto err_out;
+ }
+ resumed = 1;
+ if (!scsi_block_when_processing_errors(STp->device)) {
+ retval = (-ENXIO);
+ goto err_out;
+ }
+
+ /* See that we have at least a one page buffer available */
+ if (!enlarge_buffer(STp->buffer, PAGE_SIZE, STp->restr_dma)) {
+ st_printk(KERN_WARNING, STp,
+ "Can't allocate one page tape buffer.\n");
+ retval = (-EOVERFLOW);
+ goto err_out;
+ }
+
+ (STp->buffer)->cleared = 0;
+ (STp->buffer)->writing = 0;
+ (STp->buffer)->syscall_result = 0;
+
+ STp->write_prot = ((filp->f_flags & O_ACCMODE) == O_RDONLY);
+
+ STp->dirty = 0;
+ for (i = 0; i < ST_NBR_PARTITIONS; i++) {
+ STps = &(STp->ps[i]);
+ STps->rw = ST_IDLE;
+ }
+ STp->try_dio_now = STp->try_dio;
+ STp->recover_count = 0;
+ DEB( STp->nbr_waits = STp->nbr_finished = 0;
+ STp->nbr_requests = STp->nbr_dio = STp->nbr_pages = 0; )
+
+ retval = check_tape(STp, filp);
+ if (retval < 0)
+ goto err_out;
+ if ((filp->f_flags & O_NONBLOCK) == 0 &&
+ retval != CHKRES_READY) {
+ if (STp->ready == NO_TAPE)
+ retval = (-ENOMEDIUM);
+ else
+ retval = (-EIO);
+ goto err_out;
+ }
+ return 0;
+
+ err_out:
+ normalize_buffer(STp->buffer);
+ spin_lock(&st_use_lock);
+ STp->in_use = 0;
+ spin_unlock(&st_use_lock);
+ scsi_tape_put(STp);
+ if (resumed)
+ scsi_autopm_put_device(STp->device);
+ return retval;
+
+}
+
+
+/* Flush the tape buffer before close */
+static int st_flush(struct file *filp, fl_owner_t id)
+{
+ int result = 0, result2;
+ unsigned char cmd[MAX_COMMAND_SIZE];
+ struct st_request *SRpnt;
+ struct scsi_tape *STp = filp->private_data;
+ struct st_modedef *STm = &(STp->modes[STp->current_mode]);
+ struct st_partstat *STps = &(STp->ps[STp->partition]);
+
+ if (file_count(filp) > 1)
+ return 0;
+
+ if (STps->rw == ST_WRITING && !STp->pos_unknown) {
+ result = st_flush_write_buffer(STp);
+ if (result != 0 && result != (-ENOSPC))
+ goto out;
+ }
+
+ if (STp->can_partitions &&
+ (result2 = switch_partition(STp)) < 0) {
+ DEBC_printk(STp, "switch_partition at close failed.\n");
+ if (result == 0)
+ result = result2;
+ goto out;
+ }
+
+ DEBC( if (STp->nbr_requests)
+ st_printk(KERN_DEBUG, STp,
+ "Number of r/w requests %d, dio used in %d, "
+ "pages %d.\n", STp->nbr_requests, STp->nbr_dio,
+ STp->nbr_pages));
+
+ if (STps->rw == ST_WRITING && !STp->pos_unknown) {
+ struct st_cmdstatus *cmdstatp = &STp->buffer->cmdstat;
+
+#if DEBUG
+ DEBC_printk(STp, "Async write waits %d, finished %d.\n",
+ STp->nbr_waits, STp->nbr_finished);
+#endif
+ memset(cmd, 0, MAX_COMMAND_SIZE);
+ cmd[0] = WRITE_FILEMARKS;
+ if (STp->immediate_filemark)
+ cmd[1] = 1;
+ cmd[4] = 1 + STp->two_fm;
+
+ SRpnt = st_do_scsi(NULL, STp, cmd, 0, DMA_NONE,
+ STp->device->request_queue->rq_timeout,
+ MAX_WRITE_RETRIES, 1);
+ if (!SRpnt) {
+ result = (STp->buffer)->syscall_result;
+ goto out;
+ }
+
+ if (STp->buffer->syscall_result == 0 ||
+ (cmdstatp->have_sense && !cmdstatp->deferred &&
+ (cmdstatp->flags & SENSE_EOM) &&
+ (cmdstatp->sense_hdr.sense_key == NO_SENSE ||
+ cmdstatp->sense_hdr.sense_key == RECOVERED_ERROR) &&
+ (!cmdstatp->remainder_valid || cmdstatp->uremainder64 == 0))) {
+ /* Write successful at EOM */
+ st_release_request(SRpnt);
+ SRpnt = NULL;
+ if (STps->drv_file >= 0)
+ STps->drv_file++;
+ STps->drv_block = 0;
+ if (STp->two_fm)
+ cross_eof(STp, 0);
+ STps->eof = ST_FM;
+ }
+ else { /* Write error */
+ st_release_request(SRpnt);
+ SRpnt = NULL;
+ st_printk(KERN_ERR, STp,
+ "Error on write filemark.\n");
+ if (result == 0)
+ result = (-EIO);
+ }
+
+ DEBC_printk(STp, "Buffer flushed, %d EOF(s) written\n", cmd[4]);
+ } else if (!STp->rew_at_close) {
+ STps = &(STp->ps[STp->partition]);
+ if (!STm->sysv || STps->rw != ST_READING) {
+ if (STp->can_bsr)
+ result = flush_buffer(STp, 0);
+ else if (STps->eof == ST_FM_HIT) {
+ result = cross_eof(STp, 0);
+ if (result) {
+ if (STps->drv_file >= 0)
+ STps->drv_file++;
+ STps->drv_block = 0;
+ STps->eof = ST_FM;
+ } else
+ STps->eof = ST_NOEOF;
+ }
+ } else if ((STps->eof == ST_NOEOF &&
+ !(result = cross_eof(STp, 1))) ||
+ STps->eof == ST_FM_HIT) {
+ if (STps->drv_file >= 0)
+ STps->drv_file++;
+ STps->drv_block = 0;
+ STps->eof = ST_FM;
+ }
+ }
+
+ out:
+ if (STp->rew_at_close) {
+ result2 = st_int_ioctl(STp, MTREW, 1);
+ if (result == 0)
+ result = result2;
+ }
+ return result;
+}
+
+
+/* Close the device and release it. BKL is not needed: this is the only thread
+ accessing this tape. */
+static int st_release(struct inode *inode, struct file *filp)
+{
+ int result = 0;
+ struct scsi_tape *STp = filp->private_data;
+
+ if (STp->door_locked == ST_LOCKED_AUTO)
+ do_door_lock(STp, 0);
+
+ normalize_buffer(STp->buffer);
+ spin_lock(&st_use_lock);
+ STp->in_use = 0;
+ spin_unlock(&st_use_lock);
+ scsi_autopm_put_device(STp->device);
+ scsi_tape_put(STp);
+
+ return result;
+}
+
+/* The checks common to both reading and writing */
+static ssize_t rw_checks(struct scsi_tape *STp, struct file *filp, size_t count)
+{
+ ssize_t retval = 0;
+
+ /*
+ * If we are in the middle of error recovery, don't let anyone
+ * else try and use this device. Also, if error recovery fails, it
+ * may try and take the device offline, in which case all further
+ * access to the device is prohibited.
+ */
+ if (!scsi_block_when_processing_errors(STp->device)) {
+ retval = (-ENXIO);
+ goto out;
+ }
+
+ if (STp->ready != ST_READY) {
+ if (STp->ready == ST_NO_TAPE)
+ retval = (-ENOMEDIUM);
+ else
+ retval = (-EIO);
+ goto out;
+ }
+
+ if (! STp->modes[STp->current_mode].defined) {
+ retval = (-ENXIO);
+ goto out;
+ }
+
+
+ /*
+ * If there was a bus reset, block further access
+ * to this device.
+ */
+ if (STp->pos_unknown) {
+ retval = (-EIO);
+ goto out;
+ }
+
+ if (count == 0)
+ goto out;
+
+ DEB(
+ if (!STp->in_use) {
+ st_printk(ST_DEB_MSG, STp,
+ "Incorrect device.\n");
+ retval = (-EIO);
+ goto out;
+ } ) /* end DEB */
+
+ if (STp->can_partitions &&
+ (retval = switch_partition(STp)) < 0)
+ goto out;
+
+ if (STp->block_size == 0 && STp->max_block > 0 &&
+ (count < STp->min_block || count > STp->max_block)) {
+ retval = (-EINVAL);
+ goto out;
+ }
+
+ if (STp->do_auto_lock && STp->door_locked == ST_UNLOCKED &&
+ !do_door_lock(STp, 1))
+ STp->door_locked = ST_LOCKED_AUTO;
+
+ out:
+ return retval;
+}
+
+
+static int setup_buffering(struct scsi_tape *STp, const char __user *buf,
+ size_t count, int is_read)
+{
+ int i, bufsize, retval = 0;
+ struct st_buffer *STbp = STp->buffer;
+
+ if (is_read)
+ i = STp->try_dio_now && try_rdio;
+ else
+ i = STp->try_dio_now && try_wdio;
+
+ if (i && ((unsigned long)buf & queue_dma_alignment(
+ STp->device->request_queue)) == 0) {
+ i = sgl_map_user_pages(STbp, STbp->use_sg, (unsigned long)buf,
+ count, (is_read ? READ : WRITE));
+ if (i > 0) {
+ STbp->do_dio = i;
+ STbp->buffer_bytes = 0; /* can be used as transfer counter */
+ }
+ else
+ STbp->do_dio = 0; /* fall back to buffering with any error */
+ STbp->sg_segs = STbp->do_dio;
+ DEB(
+ if (STbp->do_dio) {
+ STp->nbr_dio++;
+ STp->nbr_pages += STbp->do_dio;
+ }
+ )
+ } else
+ STbp->do_dio = 0;
+ DEB( STp->nbr_requests++; )
+
+ if (!STbp->do_dio) {
+ if (STp->block_size)
+ bufsize = STp->block_size > st_fixed_buffer_size ?
+ STp->block_size : st_fixed_buffer_size;
+ else {
+ bufsize = count;
+ /* Make sure that data from previous user is not leaked even if
+ HBA does not return correct residual */
+ if (is_read && STp->sili && !STbp->cleared)
+ clear_buffer(STbp);
+ }
+
+ if (bufsize > STbp->buffer_size &&
+ !enlarge_buffer(STbp, bufsize, STp->restr_dma)) {
+ st_printk(KERN_WARNING, STp,
+ "Can't allocate %d byte tape buffer.\n",
+ bufsize);
+ retval = (-EOVERFLOW);
+ goto out;
+ }
+ if (STp->block_size)
+ STbp->buffer_blocks = bufsize / STp->block_size;
+ }
+
+ out:
+ return retval;
+}
+
+
+/* Can be called more than once after each setup_buffer() */
+static void release_buffering(struct scsi_tape *STp, int is_read)
+{
+ struct st_buffer *STbp;
+
+ STbp = STp->buffer;
+ if (STbp->do_dio) {
+ sgl_unmap_user_pages(STbp, STbp->do_dio, is_read);
+ STbp->do_dio = 0;
+ STbp->sg_segs = 0;
+ }
+}
+
+
+/* Write command */
+static ssize_t
+st_write(struct file *filp, const char __user *buf, size_t count, loff_t * ppos)
+{
+ ssize_t total;
+ ssize_t i, do_count, blks, transfer;
+ ssize_t retval;
+ int undone, retry_eot = 0, scode;
+ int async_write;
+ unsigned char cmd[MAX_COMMAND_SIZE];
+ const char __user *b_point;
+ struct st_request *SRpnt = NULL;
+ struct scsi_tape *STp = filp->private_data;
+ struct st_modedef *STm;
+ struct st_partstat *STps;
+ struct st_buffer *STbp;
+
+ if (mutex_lock_interruptible(&STp->lock))
+ return -ERESTARTSYS;
+
+ retval = rw_checks(STp, filp, count);
+ if (retval || count == 0)
+ goto out;
+
+ /* Write must be integral number of blocks */
+ if (STp->block_size != 0 && (count % STp->block_size) != 0) {
+ st_printk(KERN_WARNING, STp,
+ "Write not multiple of tape block size.\n");
+ retval = (-EINVAL);
+ goto out;
+ }
+
+ STm = &(STp->modes[STp->current_mode]);
+ STps = &(STp->ps[STp->partition]);
+
+ if (STp->write_prot) {
+ retval = (-EACCES);
+ goto out;
+ }
+
+
+ if (STps->rw == ST_READING) {
+ retval = flush_buffer(STp, 0);
+ if (retval)
+ goto out;
+ STps->rw = ST_WRITING;
+ } else if (STps->rw != ST_WRITING &&
+ STps->drv_file == 0 && STps->drv_block == 0) {
+ if ((retval = set_mode_densblk(STp, STm)) < 0)
+ goto out;
+ if (STm->default_compression != ST_DONT_TOUCH &&
+ !(STp->compression_changed)) {
+ if (st_compression(STp, (STm->default_compression == ST_YES))) {
+ st_printk(KERN_WARNING, STp,
+ "Can't set default compression.\n");
+ if (modes_defined) {
+ retval = (-EINVAL);
+ goto out;
+ }
+ }
+ }
+ }
+
+ STbp = STp->buffer;
+ i = write_behind_check(STp);
+ if (i) {
+ if (i == -ENOSPC)
+ STps->eof = ST_EOM_OK;
+ else
+ STps->eof = ST_EOM_ERROR;
+ }
+
+ if (STps->eof == ST_EOM_OK) {
+ STps->eof = ST_EOD_1; /* allow next write */
+ retval = (-ENOSPC);
+ goto out;
+ }
+ else if (STps->eof == ST_EOM_ERROR) {
+ retval = (-EIO);
+ goto out;
+ }
+
+ /* Check the buffer readability in cases where copy_user might catch
+ the problems after some tape movement. */
+ if (STp->block_size != 0 &&
+ !STbp->do_dio &&
+ (copy_from_user(&i, buf, 1) != 0 ||
+ copy_from_user(&i, buf + count - 1, 1) != 0)) {
+ retval = (-EFAULT);
+ goto out;
+ }
+
+ retval = setup_buffering(STp, buf, count, 0);
+ if (retval)
+ goto out;
+
+ total = count;
+
+ memset(cmd, 0, MAX_COMMAND_SIZE);
+ cmd[0] = WRITE_6;
+ cmd[1] = (STp->block_size != 0);
+
+ STps->rw = ST_WRITING;
+
+ b_point = buf;
+ while (count > 0 && !retry_eot) {
+
+ if (STbp->do_dio) {
+ do_count = count;
+ }
+ else {
+ if (STp->block_size == 0)
+ do_count = count;
+ else {
+ do_count = STbp->buffer_blocks * STp->block_size -
+ STbp->buffer_bytes;
+ if (do_count > count)
+ do_count = count;
+ }
+
+ i = append_to_buffer(b_point, STbp, do_count);
+ if (i) {
+ retval = i;
+ goto out;
+ }
+ }
+ count -= do_count;
+ b_point += do_count;
+
+ async_write = STp->block_size == 0 && !STbp->do_dio &&
+ STm->do_async_writes && STps->eof < ST_EOM_OK;
+
+ if (STp->block_size != 0 && STm->do_buffer_writes &&
+ !(STp->try_dio_now && try_wdio) && STps->eof < ST_EOM_OK &&
+ STbp->buffer_bytes < STbp->buffer_size) {
+ STp->dirty = 1;
+ /* Don't write a buffer that is not full enough. */
+ if (!async_write && count == 0)
+ break;
+ }
+
+ retry_write:
+ if (STp->block_size == 0)
+ blks = transfer = do_count;
+ else {
+ if (!STbp->do_dio)
+ blks = STbp->buffer_bytes;
+ else
+ blks = do_count;
+ blks /= STp->block_size;
+ transfer = blks * STp->block_size;
+ }
+ cmd[2] = blks >> 16;
+ cmd[3] = blks >> 8;
+ cmd[4] = blks;
+
+ SRpnt = st_do_scsi(SRpnt, STp, cmd, transfer, DMA_TO_DEVICE,
+ STp->device->request_queue->rq_timeout,
+ MAX_WRITE_RETRIES, !async_write);
+ if (!SRpnt) {
+ retval = STbp->syscall_result;
+ goto out;
+ }
+ if (async_write && !STbp->syscall_result) {
+ STbp->writing = transfer;
+ STp->dirty = !(STbp->writing ==
+ STbp->buffer_bytes);
+ SRpnt = NULL; /* Prevent releasing this request! */
+ DEB( STp->write_pending = 1; )
+ break;
+ }
+
+ if (STbp->syscall_result != 0) {
+ struct st_cmdstatus *cmdstatp = &STp->buffer->cmdstat;
+
+ DEBC_printk(STp, "Error on write:\n");
+ if (cmdstatp->have_sense && (cmdstatp->flags & SENSE_EOM)) {
+ scode = cmdstatp->sense_hdr.sense_key;
+ if (cmdstatp->remainder_valid)
+ undone = (int)cmdstatp->uremainder64;
+ else if (STp->block_size == 0 &&
+ scode == VOLUME_OVERFLOW)
+ undone = transfer;
+ else
+ undone = 0;
+ if (STp->block_size != 0)
+ undone *= STp->block_size;
+ if (undone <= do_count) {
+ /* Only data from this write is not written */
+ count += undone;
+ b_point -= undone;
+ do_count -= undone;
+ if (STp->block_size)
+ blks = (transfer - undone) / STp->block_size;
+ STps->eof = ST_EOM_OK;
+ /* Continue in fixed block mode if all written
+ in this request but still something left to write
+ (retval left to zero)
+ */
+ if (STp->block_size == 0 ||
+ undone > 0 || count == 0)
+ retval = (-ENOSPC); /* EOM within current request */
+ DEBC_printk(STp, "EOM with %d "
+ "bytes unwritten.\n",
+ (int)count);
+ } else {
+ /* EOT within data buffered earlier (possible only
+ in fixed block mode without direct i/o) */
+ if (!retry_eot && !cmdstatp->deferred &&
+ (scode == NO_SENSE || scode == RECOVERED_ERROR)) {
+ move_buffer_data(STp->buffer, transfer - undone);
+ retry_eot = 1;
+ if (STps->drv_block >= 0) {
+ STps->drv_block += (transfer - undone) /
+ STp->block_size;
+ }
+ STps->eof = ST_EOM_OK;
+ DEBC_printk(STp, "Retry "
+ "write of %d "
+ "bytes at EOM.\n",
+ STp->buffer->buffer_bytes);
+ goto retry_write;
+ }
+ else {
+ /* Either error within data buffered by driver or
+ failed retry */
+ count -= do_count;
+ blks = do_count = 0;
+ STps->eof = ST_EOM_ERROR;
+ STps->drv_block = (-1); /* Too cautious? */
+ retval = (-EIO); /* EOM for old data */
+ DEBC_printk(STp, "EOM with "
+ "lost data.\n");
+ }
+ }
+ } else {
+ count += do_count;
+ STps->drv_block = (-1); /* Too cautious? */
+ retval = STbp->syscall_result;
+ }
+
+ }
+
+ if (STps->drv_block >= 0) {
+ if (STp->block_size == 0)
+ STps->drv_block += (do_count > 0);
+ else
+ STps->drv_block += blks;
+ }
+
+ STbp->buffer_bytes = 0;
+ STp->dirty = 0;
+
+ if (retval || retry_eot) {
+ if (count < total)
+ retval = total - count;
+ goto out;
+ }
+ }
+
+ if (STps->eof == ST_EOD_1)
+ STps->eof = ST_EOM_OK;
+ else if (STps->eof != ST_EOM_OK)
+ STps->eof = ST_NOEOF;
+ retval = total - count;
+
+ out:
+ if (SRpnt != NULL)
+ st_release_request(SRpnt);
+ release_buffering(STp, 0);
+ mutex_unlock(&STp->lock);
+
+ return retval;
+}
+
+/* Read data from the tape. Returns zero in the normal case, one if the
+ eof status has changed, and the negative error code in case of a
+ fatal error. Otherwise updates the buffer and the eof state.
+
+ Does release user buffer mapping if it is set.
+*/
+static long read_tape(struct scsi_tape *STp, long count,
+ struct st_request ** aSRpnt)
+{
+ int transfer, blks, bytes;
+ unsigned char cmd[MAX_COMMAND_SIZE];
+ struct st_request *SRpnt;
+ struct st_modedef *STm;
+ struct st_partstat *STps;
+ struct st_buffer *STbp;
+ int retval = 0;
+
+ if (count == 0)
+ return 0;
+
+ STm = &(STp->modes[STp->current_mode]);
+ STps = &(STp->ps[STp->partition]);
+ if (STps->eof == ST_FM_HIT)
+ return 1;
+ STbp = STp->buffer;
+
+ if (STp->block_size == 0)
+ blks = bytes = count;
+ else {
+ if (!(STp->try_dio_now && try_rdio) && STm->do_read_ahead) {
+ blks = (STp->buffer)->buffer_blocks;
+ bytes = blks * STp->block_size;
+ } else {
+ bytes = count;
+ if (!STbp->do_dio && bytes > (STp->buffer)->buffer_size)
+ bytes = (STp->buffer)->buffer_size;
+ blks = bytes / STp->block_size;
+ bytes = blks * STp->block_size;
+ }
+ }
+
+ memset(cmd, 0, MAX_COMMAND_SIZE);
+ cmd[0] = READ_6;
+ cmd[1] = (STp->block_size != 0);
+ if (!cmd[1] && STp->sili)
+ cmd[1] |= 2;
+ cmd[2] = blks >> 16;
+ cmd[3] = blks >> 8;
+ cmd[4] = blks;
+
+ SRpnt = *aSRpnt;
+ SRpnt = st_do_scsi(SRpnt, STp, cmd, bytes, DMA_FROM_DEVICE,
+ STp->device->request_queue->rq_timeout,
+ MAX_RETRIES, 1);
+ release_buffering(STp, 1);
+ *aSRpnt = SRpnt;
+ if (!SRpnt)
+ return STbp->syscall_result;
+
+ STbp->read_pointer = 0;
+ STps->at_sm = 0;
+
+ /* Something to check */
+ if (STbp->syscall_result) {
+ struct st_cmdstatus *cmdstatp = &STp->buffer->cmdstat;
+
+ retval = 1;
+ DEBC_printk(STp,
+ "Sense: %2x %2x %2x %2x %2x %2x %2x %2x\n",
+ SRpnt->sense[0], SRpnt->sense[1],
+ SRpnt->sense[2], SRpnt->sense[3],
+ SRpnt->sense[4], SRpnt->sense[5],
+ SRpnt->sense[6], SRpnt->sense[7]);
+ if (cmdstatp->have_sense) {
+
+ if (cmdstatp->sense_hdr.sense_key == BLANK_CHECK)
+ cmdstatp->flags &= 0xcf; /* No need for EOM in this case */
+
+ if (cmdstatp->flags != 0) { /* EOF, EOM, or ILI */
+ /* Compute the residual count */
+ if (cmdstatp->remainder_valid)
+ transfer = (int)cmdstatp->uremainder64;
+ else
+ transfer = 0;
+ if (STp->block_size == 0 &&
+ cmdstatp->sense_hdr.sense_key == MEDIUM_ERROR)
+ transfer = bytes;
+
+ if (cmdstatp->flags & SENSE_ILI) { /* ILI */
+ if (STp->block_size == 0 &&
+ transfer < 0) {
+ st_printk(KERN_NOTICE, STp,
+ "Failed to read %d "
+ "byte block with %d "
+ "byte transfer.\n",
+ bytes - transfer,
+ bytes);
+ if (STps->drv_block >= 0)
+ STps->drv_block += 1;
+ STbp->buffer_bytes = 0;
+ return (-ENOMEM);
+ } else if (STp->block_size == 0) {
+ STbp->buffer_bytes = bytes - transfer;
+ } else {
+ st_release_request(SRpnt);
+ SRpnt = *aSRpnt = NULL;
+ if (transfer == blks) { /* We did not get anything, error */
+ st_printk(KERN_NOTICE, STp,
+ "Incorrect "
+ "block size.\n");
+ if (STps->drv_block >= 0)
+ STps->drv_block += blks - transfer + 1;
+ st_int_ioctl(STp, MTBSR, 1);
+ return (-EIO);
+ }
+ /* We have some data, deliver it */
+ STbp->buffer_bytes = (blks - transfer) *
+ STp->block_size;
+ DEBC_printk(STp, "ILI but "
+ "enough data "
+ "received %ld "
+ "%d.\n", count,
+ STbp->buffer_bytes);
+ if (STps->drv_block >= 0)
+ STps->drv_block += 1;
+ if (st_int_ioctl(STp, MTBSR, 1))
+ return (-EIO);
+ }
+ } else if (cmdstatp->flags & SENSE_FMK) { /* FM overrides EOM */
+ if (STps->eof != ST_FM_HIT)
+ STps->eof = ST_FM_HIT;
+ else
+ STps->eof = ST_EOD_2;
+ if (STp->block_size == 0)
+ STbp->buffer_bytes = 0;
+ else
+ STbp->buffer_bytes =
+ bytes - transfer * STp->block_size;
+ DEBC_printk(STp, "EOF detected (%d "
+ "bytes read).\n",
+ STbp->buffer_bytes);
+ } else if (cmdstatp->flags & SENSE_EOM) {
+ if (STps->eof == ST_FM)
+ STps->eof = ST_EOD_1;
+ else
+ STps->eof = ST_EOM_OK;
+ if (STp->block_size == 0)
+ STbp->buffer_bytes = bytes - transfer;
+ else
+ STbp->buffer_bytes =
+ bytes - transfer * STp->block_size;
+
+ DEBC_printk(STp, "EOM detected (%d "
+ "bytes read).\n",
+ STbp->buffer_bytes);
+ }
+ }
+ /* end of EOF, EOM, ILI test */
+ else { /* nonzero sense key */
+ DEBC_printk(STp, "Tape error while reading.\n");
+ STps->drv_block = (-1);
+ if (STps->eof == ST_FM &&
+ cmdstatp->sense_hdr.sense_key == BLANK_CHECK) {
+ DEBC_printk(STp, "Zero returned for "
+ "first BLANK CHECK "
+ "after EOF.\n");
+ STps->eof = ST_EOD_2; /* First BLANK_CHECK after FM */
+ } else /* Some other extended sense code */
+ retval = (-EIO);
+ }
+
+ if (STbp->buffer_bytes < 0) /* Caused by bogus sense data */
+ STbp->buffer_bytes = 0;
+ }
+ /* End of extended sense test */
+ else { /* Non-extended sense */
+ retval = STbp->syscall_result;
+ }
+
+ }
+ /* End of error handling */
+ else { /* Read successful */
+ STbp->buffer_bytes = bytes;
+ if (STp->sili) /* In fixed block mode residual is always zero here */
+ STbp->buffer_bytes -= STp->buffer->cmdstat.residual;
+ }
+
+ if (STps->drv_block >= 0) {
+ if (STp->block_size == 0)
+ STps->drv_block++;
+ else
+ STps->drv_block += STbp->buffer_bytes / STp->block_size;
+ }
+ return retval;
+}
+
+
+/* Read command */
+static ssize_t
+st_read(struct file *filp, char __user *buf, size_t count, loff_t * ppos)
+{
+ ssize_t total;
+ ssize_t retval = 0;
+ ssize_t i, transfer;
+ int special, do_dio = 0;
+ struct st_request *SRpnt = NULL;
+ struct scsi_tape *STp = filp->private_data;
+ struct st_modedef *STm;
+ struct st_partstat *STps;
+ struct st_buffer *STbp = STp->buffer;
+
+ if (mutex_lock_interruptible(&STp->lock))
+ return -ERESTARTSYS;
+
+ retval = rw_checks(STp, filp, count);
+ if (retval || count == 0)
+ goto out;
+
+ STm = &(STp->modes[STp->current_mode]);
+ if (STp->block_size != 0 && (count % STp->block_size) != 0) {
+ if (!STm->do_read_ahead) {
+ retval = (-EINVAL); /* Read must be integral number of blocks */
+ goto out;
+ }
+ STp->try_dio_now = 0; /* Direct i/o can't handle split blocks */
+ }
+
+ STps = &(STp->ps[STp->partition]);
+ if (STps->rw == ST_WRITING) {
+ retval = flush_buffer(STp, 0);
+ if (retval)
+ goto out;
+ STps->rw = ST_READING;
+ }
+ DEB(
+ if (debugging && STps->eof != ST_NOEOF)
+ st_printk(ST_DEB_MSG, STp,
+ "EOF/EOM flag up (%d). Bytes %d\n",
+ STps->eof, STbp->buffer_bytes);
+ ) /* end DEB */
+
+ retval = setup_buffering(STp, buf, count, 1);
+ if (retval)
+ goto out;
+ do_dio = STbp->do_dio;
+
+ if (STbp->buffer_bytes == 0 &&
+ STps->eof >= ST_EOD_1) {
+ if (STps->eof < ST_EOD) {
+ STps->eof += 1;
+ retval = 0;
+ goto out;
+ }
+ retval = (-EIO); /* EOM or Blank Check */
+ goto out;
+ }
+
+ if (do_dio) {
+ /* Check the buffer writability before any tape movement. Don't alter
+ buffer data. */
+ if (copy_from_user(&i, buf, 1) != 0 ||
+ copy_to_user(buf, &i, 1) != 0 ||
+ copy_from_user(&i, buf + count - 1, 1) != 0 ||
+ copy_to_user(buf + count - 1, &i, 1) != 0) {
+ retval = (-EFAULT);
+ goto out;
+ }
+ }
+
+ STps->rw = ST_READING;
+
+
+ /* Loop until enough data in buffer or a special condition found */
+ for (total = 0, special = 0; total < count && !special;) {
+
+ /* Get new data if the buffer is empty */
+ if (STbp->buffer_bytes == 0) {
+ special = read_tape(STp, count - total, &SRpnt);
+ if (special < 0) { /* No need to continue read */
+ retval = special;
+ goto out;
+ }
+ }
+
+ /* Move the data from driver buffer to user buffer */
+ if (STbp->buffer_bytes > 0) {
+ DEB(
+ if (debugging && STps->eof != ST_NOEOF)
+ st_printk(ST_DEB_MSG, STp,
+ "EOF up (%d). Left %d, needed %d.\n",
+ STps->eof, STbp->buffer_bytes,
+ (int)(count - total));
+ ) /* end DEB */
+ transfer = STbp->buffer_bytes < count - total ?
+ STbp->buffer_bytes : count - total;
+ if (!do_dio) {
+ i = from_buffer(STbp, buf, transfer);
+ if (i) {
+ retval = i;
+ goto out;
+ }
+ }
+ buf += transfer;
+ total += transfer;
+ }
+
+ if (STp->block_size == 0)
+ break; /* Read only one variable length block */
+
+ } /* for (total = 0, special = 0;
+ total < count && !special; ) */
+
+ /* Change the eof state if no data from tape or buffer */
+ if (total == 0) {
+ if (STps->eof == ST_FM_HIT) {
+ STps->eof = ST_FM;
+ STps->drv_block = 0;
+ if (STps->drv_file >= 0)
+ STps->drv_file++;
+ } else if (STps->eof == ST_EOD_1) {
+ STps->eof = ST_EOD_2;
+ STps->drv_block = 0;
+ if (STps->drv_file >= 0)
+ STps->drv_file++;
+ } else if (STps->eof == ST_EOD_2)
+ STps->eof = ST_EOD;
+ } else if (STps->eof == ST_FM)
+ STps->eof = ST_NOEOF;
+ retval = total;
+
+ out:
+ if (SRpnt != NULL) {
+ st_release_request(SRpnt);
+ SRpnt = NULL;
+ }
+ if (do_dio) {
+ release_buffering(STp, 1);
+ STbp->buffer_bytes = 0;
+ }
+ mutex_unlock(&STp->lock);
+
+ return retval;
+}
+
+
+
+DEB(
+/* Set the driver options */
+static void st_log_options(struct scsi_tape * STp, struct st_modedef * STm)
+{
+ if (debugging) {
+ st_printk(KERN_INFO, STp,
+ "Mode %d options: buffer writes: %d, "
+ "async writes: %d, read ahead: %d\n",
+ STp->current_mode, STm->do_buffer_writes,
+ STm->do_async_writes, STm->do_read_ahead);
+ st_printk(KERN_INFO, STp,
+ " can bsr: %d, two FMs: %d, "
+ "fast mteom: %d, auto lock: %d,\n",
+ STp->can_bsr, STp->two_fm, STp->fast_mteom,
+ STp->do_auto_lock);
+ st_printk(KERN_INFO, STp,
+ " defs for wr: %d, no block limits: %d, "
+ "partitions: %d, s2 log: %d\n",
+ STm->defaults_for_writes, STp->omit_blklims,
+ STp->can_partitions, STp->scsi2_logical);
+ st_printk(KERN_INFO, STp,
+ " sysv: %d nowait: %d sili: %d "
+ "nowait_filemark: %d\n",
+ STm->sysv, STp->immediate, STp->sili,
+ STp->immediate_filemark);
+ st_printk(KERN_INFO, STp, " debugging: %d\n", debugging);
+ }
+}
+ )
+
+
+static int st_set_options(struct scsi_tape *STp, long options)
+{
+ int value;
+ long code;
+ struct st_modedef *STm;
+ struct cdev *cd0, *cd1;
+ struct device *d0, *d1;
+
+ STm = &(STp->modes[STp->current_mode]);
+ if (!STm->defined) {
+ cd0 = STm->cdevs[0];
+ cd1 = STm->cdevs[1];
+ d0 = STm->devs[0];
+ d1 = STm->devs[1];
+ memcpy(STm, &(STp->modes[0]), sizeof(struct st_modedef));
+ STm->cdevs[0] = cd0;
+ STm->cdevs[1] = cd1;
+ STm->devs[0] = d0;
+ STm->devs[1] = d1;
+ modes_defined = 1;
+ DEBC_printk(STp, "Initialized mode %d definition from mode 0\n",
+ STp->current_mode);
+ }
+
+ code = options & MT_ST_OPTIONS;
+ if (code == MT_ST_BOOLEANS) {
+ STm->do_buffer_writes = (options & MT_ST_BUFFER_WRITES) != 0;
+ STm->do_async_writes = (options & MT_ST_ASYNC_WRITES) != 0;
+ STm->defaults_for_writes = (options & MT_ST_DEF_WRITES) != 0;
+ STm->do_read_ahead = (options & MT_ST_READ_AHEAD) != 0;
+ STp->two_fm = (options & MT_ST_TWO_FM) != 0;
+ STp->fast_mteom = (options & MT_ST_FAST_MTEOM) != 0;
+ STp->do_auto_lock = (options & MT_ST_AUTO_LOCK) != 0;
+ STp->can_bsr = (options & MT_ST_CAN_BSR) != 0;
+ STp->omit_blklims = (options & MT_ST_NO_BLKLIMS) != 0;
+ if ((STp->device)->scsi_level >= SCSI_2)
+ STp->can_partitions = (options & MT_ST_CAN_PARTITIONS) != 0;
+ STp->scsi2_logical = (options & MT_ST_SCSI2LOGICAL) != 0;
+ STp->immediate = (options & MT_ST_NOWAIT) != 0;
+ STp->immediate_filemark = (options & MT_ST_NOWAIT_EOF) != 0;
+ STm->sysv = (options & MT_ST_SYSV) != 0;
+ STp->sili = (options & MT_ST_SILI) != 0;
+ DEB( debugging = (options & MT_ST_DEBUGGING) != 0;
+ st_log_options(STp, STm); )
+ } else if (code == MT_ST_SETBOOLEANS || code == MT_ST_CLEARBOOLEANS) {
+ value = (code == MT_ST_SETBOOLEANS);
+ if ((options & MT_ST_BUFFER_WRITES) != 0)
+ STm->do_buffer_writes = value;
+ if ((options & MT_ST_ASYNC_WRITES) != 0)
+ STm->do_async_writes = value;
+ if ((options & MT_ST_DEF_WRITES) != 0)
+ STm->defaults_for_writes = value;
+ if ((options & MT_ST_READ_AHEAD) != 0)
+ STm->do_read_ahead = value;
+ if ((options & MT_ST_TWO_FM) != 0)
+ STp->two_fm = value;
+ if ((options & MT_ST_FAST_MTEOM) != 0)
+ STp->fast_mteom = value;
+ if ((options & MT_ST_AUTO_LOCK) != 0)
+ STp->do_auto_lock = value;
+ if ((options & MT_ST_CAN_BSR) != 0)
+ STp->can_bsr = value;
+ if ((options & MT_ST_NO_BLKLIMS) != 0)
+ STp->omit_blklims = value;
+ if ((STp->device)->scsi_level >= SCSI_2 &&
+ (options & MT_ST_CAN_PARTITIONS) != 0)
+ STp->can_partitions = value;
+ if ((options & MT_ST_SCSI2LOGICAL) != 0)
+ STp->scsi2_logical = value;
+ if ((options & MT_ST_NOWAIT) != 0)
+ STp->immediate = value;
+ if ((options & MT_ST_NOWAIT_EOF) != 0)
+ STp->immediate_filemark = value;
+ if ((options & MT_ST_SYSV) != 0)
+ STm->sysv = value;
+ if ((options & MT_ST_SILI) != 0)
+ STp->sili = value;
+ DEB(
+ if ((options & MT_ST_DEBUGGING) != 0)
+ debugging = value;
+ st_log_options(STp, STm); )
+ } else if (code == MT_ST_WRITE_THRESHOLD) {
+ /* Retained for compatibility */
+ } else if (code == MT_ST_DEF_BLKSIZE) {
+ value = (options & ~MT_ST_OPTIONS);
+ if (value == ~MT_ST_OPTIONS) {
+ STm->default_blksize = (-1);
+ DEBC_printk(STp, "Default block size disabled.\n");
+ } else {
+ STm->default_blksize = value;
+ DEBC_printk(STp,"Default block size set to "
+ "%d bytes.\n", STm->default_blksize);
+ if (STp->ready == ST_READY) {
+ STp->blksize_changed = 0;
+ set_mode_densblk(STp, STm);
+ }
+ }
+ } else if (code == MT_ST_TIMEOUTS) {
+ value = (options & ~MT_ST_OPTIONS);
+ if ((value & MT_ST_SET_LONG_TIMEOUT) != 0) {
+ STp->long_timeout = (value & ~MT_ST_SET_LONG_TIMEOUT) * HZ;
+ DEBC_printk(STp, "Long timeout set to %d seconds.\n",
+ (value & ~MT_ST_SET_LONG_TIMEOUT));
+ } else {
+ blk_queue_rq_timeout(STp->device->request_queue,
+ value * HZ);
+ DEBC_printk(STp, "Normal timeout set to %d seconds.\n",
+ value);
+ }
+ } else if (code == MT_ST_SET_CLN) {
+ value = (options & ~MT_ST_OPTIONS) & 0xff;
+ if (value != 0 &&
+ (value < EXTENDED_SENSE_START ||
+ value >= SCSI_SENSE_BUFFERSIZE))
+ return (-EINVAL);
+ STp->cln_mode = value;
+ STp->cln_sense_mask = (options >> 8) & 0xff;
+ STp->cln_sense_value = (options >> 16) & 0xff;
+ st_printk(KERN_INFO, STp,
+ "Cleaning request mode %d, mask %02x, value %02x\n",
+ value, STp->cln_sense_mask, STp->cln_sense_value);
+ } else if (code == MT_ST_DEF_OPTIONS) {
+ code = (options & ~MT_ST_CLEAR_DEFAULT);
+ value = (options & MT_ST_CLEAR_DEFAULT);
+ if (code == MT_ST_DEF_DENSITY) {
+ if (value == MT_ST_CLEAR_DEFAULT) {
+ STm->default_density = (-1);
+ DEBC_printk(STp,
+ "Density default disabled.\n");
+ } else {
+ STm->default_density = value & 0xff;
+ DEBC_printk(STp, "Density default set to %x\n",
+ STm->default_density);
+ if (STp->ready == ST_READY) {
+ STp->density_changed = 0;
+ set_mode_densblk(STp, STm);
+ }
+ }
+ } else if (code == MT_ST_DEF_DRVBUFFER) {
+ if (value == MT_ST_CLEAR_DEFAULT) {
+ STp->default_drvbuffer = 0xff;
+ DEBC_printk(STp,
+ "Drive buffer default disabled.\n");
+ } else {
+ STp->default_drvbuffer = value & 7;
+ DEBC_printk(STp,
+ "Drive buffer default set to %x\n",
+ STp->default_drvbuffer);
+ if (STp->ready == ST_READY)
+ st_int_ioctl(STp, MTSETDRVBUFFER, STp->default_drvbuffer);
+ }
+ } else if (code == MT_ST_DEF_COMPRESSION) {
+ if (value == MT_ST_CLEAR_DEFAULT) {
+ STm->default_compression = ST_DONT_TOUCH;
+ DEBC_printk(STp,
+ "Compression default disabled.\n");
+ } else {
+ if ((value & 0xff00) != 0) {
+ STp->c_algo = (value & 0xff00) >> 8;
+ DEBC_printk(STp, "Compression "
+ "algorithm set to 0x%x.\n",
+ STp->c_algo);
+ }
+ if ((value & 0xff) != 0xff) {
+ STm->default_compression = (value & 1 ? ST_YES : ST_NO);
+ DEBC_printk(STp, "Compression default "
+ "set to %x\n",
+ (value & 1));
+ if (STp->ready == ST_READY) {
+ STp->compression_changed = 0;
+ st_compression(STp, (STm->default_compression == ST_YES));
+ }
+ }
+ }
+ }
+ } else
+ return (-EIO);
+
+ return 0;
+}
+
+#define MODE_HEADER_LENGTH 4
+
+/* Mode header and page byte offsets */
+#define MH_OFF_DATA_LENGTH 0
+#define MH_OFF_MEDIUM_TYPE 1
+#define MH_OFF_DEV_SPECIFIC 2
+#define MH_OFF_BDESCS_LENGTH 3
+#define MP_OFF_PAGE_NBR 0
+#define MP_OFF_PAGE_LENGTH 1
+
+/* Mode header and page bit masks */
+#define MH_BIT_WP 0x80
+#define MP_MSK_PAGE_NBR 0x3f
+
+/* Don't return block descriptors */
+#define MODE_SENSE_OMIT_BDESCS 0x08
+
+#define MODE_SELECT_PAGE_FORMAT 0x10
+
+/* Read a mode page into the tape buffer. The block descriptors are included
+ if incl_block_descs is true. The page control is ored to the page number
+ parameter, if necessary. */
+static int read_mode_page(struct scsi_tape *STp, int page, int omit_block_descs)
+{
+ unsigned char cmd[MAX_COMMAND_SIZE];
+ struct st_request *SRpnt;
+
+ memset(cmd, 0, MAX_COMMAND_SIZE);
+ cmd[0] = MODE_SENSE;
+ if (omit_block_descs)
+ cmd[1] = MODE_SENSE_OMIT_BDESCS;
+ cmd[2] = page;
+ cmd[4] = 255;
+
+ SRpnt = st_do_scsi(NULL, STp, cmd, cmd[4], DMA_FROM_DEVICE,
+ STp->device->request_queue->rq_timeout, 0, 1);
+ if (SRpnt == NULL)
+ return (STp->buffer)->syscall_result;
+
+ st_release_request(SRpnt);
+
+ return STp->buffer->syscall_result;
+}
+
+
+/* Send the mode page in the tape buffer to the drive. Assumes that the mode data
+ in the buffer is correctly formatted. The long timeout is used if slow is non-zero. */
+static int write_mode_page(struct scsi_tape *STp, int page, int slow)
+{
+ int pgo;
+ unsigned char cmd[MAX_COMMAND_SIZE];
+ struct st_request *SRpnt;
+ int timeout;
+
+ memset(cmd, 0, MAX_COMMAND_SIZE);
+ cmd[0] = MODE_SELECT;
+ cmd[1] = MODE_SELECT_PAGE_FORMAT;
+ pgo = MODE_HEADER_LENGTH + (STp->buffer)->b_data[MH_OFF_BDESCS_LENGTH];
+ cmd[4] = pgo + (STp->buffer)->b_data[pgo + MP_OFF_PAGE_LENGTH] + 2;
+
+ /* Clear reserved fields */
+ (STp->buffer)->b_data[MH_OFF_DATA_LENGTH] = 0;
+ (STp->buffer)->b_data[MH_OFF_MEDIUM_TYPE] = 0;
+ (STp->buffer)->b_data[MH_OFF_DEV_SPECIFIC] &= ~MH_BIT_WP;
+ (STp->buffer)->b_data[pgo + MP_OFF_PAGE_NBR] &= MP_MSK_PAGE_NBR;
+
+ timeout = slow ?
+ STp->long_timeout : STp->device->request_queue->rq_timeout;
+ SRpnt = st_do_scsi(NULL, STp, cmd, cmd[4], DMA_TO_DEVICE,
+ timeout, 0, 1);
+ if (SRpnt == NULL)
+ return (STp->buffer)->syscall_result;
+
+ st_release_request(SRpnt);
+
+ return STp->buffer->syscall_result;
+}
+
+
+#define COMPRESSION_PAGE 0x0f
+#define COMPRESSION_PAGE_LENGTH 16
+
+#define CP_OFF_DCE_DCC 2
+#define CP_OFF_C_ALGO 7
+
+#define DCE_MASK 0x80
+#define DCC_MASK 0x40
+#define RED_MASK 0x60
+
+
+/* Control the compression with mode page 15. Algorithm not changed if zero.
+
+ The block descriptors are read and written because Sony SDT-7000 does not
+ work without this (suggestion from Michael Schaefer <Michael.Schaefer@dlr.de>).
+ Including block descriptors should not cause any harm to other drives. */
+
+static int st_compression(struct scsi_tape * STp, int state)
+{
+ int retval;
+ int mpoffs; /* Offset to mode page start */
+ unsigned char *b_data = (STp->buffer)->b_data;
+
+ if (STp->ready != ST_READY)
+ return (-EIO);
+
+ /* Read the current page contents */
+ retval = read_mode_page(STp, COMPRESSION_PAGE, 0);
+ if (retval) {
+ DEBC_printk(STp, "Compression mode page not supported.\n");
+ return (-EIO);
+ }
+
+ mpoffs = MODE_HEADER_LENGTH + b_data[MH_OFF_BDESCS_LENGTH];
+ DEBC_printk(STp, "Compression state is %d.\n",
+ (b_data[mpoffs + CP_OFF_DCE_DCC] & DCE_MASK ? 1 : 0));
+
+ /* Check if compression can be changed */
+ if ((b_data[mpoffs + CP_OFF_DCE_DCC] & DCC_MASK) == 0) {
+ DEBC_printk(STp, "Compression not supported.\n");
+ return (-EIO);
+ }
+
+ /* Do the change */
+ if (state) {
+ b_data[mpoffs + CP_OFF_DCE_DCC] |= DCE_MASK;
+ if (STp->c_algo != 0)
+ b_data[mpoffs + CP_OFF_C_ALGO] = STp->c_algo;
+ }
+ else {
+ b_data[mpoffs + CP_OFF_DCE_DCC] &= ~DCE_MASK;
+ if (STp->c_algo != 0)
+ b_data[mpoffs + CP_OFF_C_ALGO] = 0; /* no compression */
+ }
+
+ retval = write_mode_page(STp, COMPRESSION_PAGE, 0);
+ if (retval) {
+ DEBC_printk(STp, "Compression change failed.\n");
+ return (-EIO);
+ }
+ DEBC_printk(STp, "Compression state changed to %d.\n", state);
+
+ STp->compression_changed = 1;
+ return 0;
+}
+
+
+/* Process the load and unload commands (does unload if the load code is zero) */
+static int do_load_unload(struct scsi_tape *STp, struct file *filp, int load_code)
+{
+ int retval = (-EIO), timeout;
+ unsigned char cmd[MAX_COMMAND_SIZE];
+ struct st_partstat *STps;
+ struct st_request *SRpnt;
+
+ if (STp->ready != ST_READY && !load_code) {
+ if (STp->ready == ST_NO_TAPE)
+ return (-ENOMEDIUM);
+ else
+ return (-EIO);
+ }
+
+ memset(cmd, 0, MAX_COMMAND_SIZE);
+ cmd[0] = START_STOP;
+ if (load_code)
+ cmd[4] |= 1;
+ /*
+ * If arg >= 1 && arg <= 6 Enhanced load/unload in HP C1553A
+ */
+ if (load_code >= 1 + MT_ST_HPLOADER_OFFSET
+ && load_code <= 6 + MT_ST_HPLOADER_OFFSET) {
+ DEBC_printk(STp, " Enhanced %sload slot %2d.\n",
+ (cmd[4]) ? "" : "un",
+ load_code - MT_ST_HPLOADER_OFFSET);
+ cmd[3] = load_code - MT_ST_HPLOADER_OFFSET; /* MediaID field of C1553A */
+ }
+ if (STp->immediate) {
+ cmd[1] = 1; /* Don't wait for completion */
+ timeout = STp->device->request_queue->rq_timeout;
+ }
+ else
+ timeout = STp->long_timeout;
+
+ DEBC(
+ if (!load_code)
+ st_printk(ST_DEB_MSG, STp, "Unloading tape.\n");
+ else
+ st_printk(ST_DEB_MSG, STp, "Loading tape.\n");
+ );
+
+ SRpnt = st_do_scsi(NULL, STp, cmd, 0, DMA_NONE,
+ timeout, MAX_RETRIES, 1);
+ if (!SRpnt)
+ return (STp->buffer)->syscall_result;
+
+ retval = (STp->buffer)->syscall_result;
+ st_release_request(SRpnt);
+
+ if (!retval) { /* SCSI command successful */
+
+ if (!load_code) {
+ STp->rew_at_close = 0;
+ STp->ready = ST_NO_TAPE;
+ }
+ else {
+ STp->rew_at_close = STp->autorew_dev;
+ retval = check_tape(STp, filp);
+ if (retval > 0)
+ retval = 0;
+ }
+ }
+ else {
+ STps = &(STp->ps[STp->partition]);
+ STps->drv_file = STps->drv_block = (-1);
+ }
+
+ return retval;
+}
+
+#if DEBUG
+#define ST_DEB_FORWARD 0
+#define ST_DEB_BACKWARD 1
+static void deb_space_print(struct scsi_tape *STp, int direction, char *units, unsigned char *cmd)
+{
+ s32 sc;
+
+ if (!debugging)
+ return;
+
+ sc = cmd[2] & 0x80 ? 0xff000000 : 0;
+ sc |= (cmd[2] << 16) | (cmd[3] << 8) | cmd[4];
+ if (direction)
+ sc = -sc;
+ st_printk(ST_DEB_MSG, STp, "Spacing tape %s over %d %s.\n",
+ direction ? "backward" : "forward", sc, units);
+}
+#else
+#define ST_DEB_FORWARD 0
+#define ST_DEB_BACKWARD 1
+static void deb_space_print(struct scsi_tape *STp, int direction, char *units, unsigned char *cmd) {}
+#endif
+
+
+/* Internal ioctl function */
+static int st_int_ioctl(struct scsi_tape *STp, unsigned int cmd_in, unsigned long arg)
+{
+ int timeout;
+ long ltmp;
+ int ioctl_result;
+ int chg_eof = 1;
+ unsigned char cmd[MAX_COMMAND_SIZE];
+ struct st_request *SRpnt;
+ struct st_partstat *STps;
+ int fileno, blkno, at_sm, undone;
+ int datalen = 0, direction = DMA_NONE;
+
+ WARN_ON(STp->buffer->do_dio != 0);
+ if (STp->ready != ST_READY) {
+ if (STp->ready == ST_NO_TAPE)
+ return (-ENOMEDIUM);
+ else
+ return (-EIO);
+ }
+ timeout = STp->long_timeout;
+ STps = &(STp->ps[STp->partition]);
+ fileno = STps->drv_file;
+ blkno = STps->drv_block;
+ at_sm = STps->at_sm;
+
+ memset(cmd, 0, MAX_COMMAND_SIZE);
+ switch (cmd_in) {
+ case MTFSFM:
+ chg_eof = 0; /* Changed from the FSF after this */
+ case MTFSF:
+ cmd[0] = SPACE;
+ cmd[1] = 0x01; /* Space FileMarks */
+ cmd[2] = (arg >> 16);
+ cmd[3] = (arg >> 8);
+ cmd[4] = arg;
+ deb_space_print(STp, ST_DEB_FORWARD, "filemarks", cmd);
+ if (fileno >= 0)
+ fileno += arg;
+ blkno = 0;
+ at_sm &= (arg == 0);
+ break;
+ case MTBSFM:
+ chg_eof = 0; /* Changed from the FSF after this */
+ case MTBSF:
+ cmd[0] = SPACE;
+ cmd[1] = 0x01; /* Space FileMarks */
+ ltmp = (-arg);
+ cmd[2] = (ltmp >> 16);
+ cmd[3] = (ltmp >> 8);
+ cmd[4] = ltmp;
+ deb_space_print(STp, ST_DEB_BACKWARD, "filemarks", cmd);
+ if (fileno >= 0)
+ fileno -= arg;
+ blkno = (-1); /* We can't know the block number */
+ at_sm &= (arg == 0);
+ break;
+ case MTFSR:
+ cmd[0] = SPACE;
+ cmd[1] = 0x00; /* Space Blocks */
+ cmd[2] = (arg >> 16);
+ cmd[3] = (arg >> 8);
+ cmd[4] = arg;
+ deb_space_print(STp, ST_DEB_FORWARD, "blocks", cmd);
+ if (blkno >= 0)
+ blkno += arg;
+ at_sm &= (arg == 0);
+ break;
+ case MTBSR:
+ cmd[0] = SPACE;
+ cmd[1] = 0x00; /* Space Blocks */
+ ltmp = (-arg);
+ cmd[2] = (ltmp >> 16);
+ cmd[3] = (ltmp >> 8);
+ cmd[4] = ltmp;
+ deb_space_print(STp, ST_DEB_BACKWARD, "blocks", cmd);
+ if (blkno >= 0)
+ blkno -= arg;
+ at_sm &= (arg == 0);
+ break;
+ case MTFSS:
+ cmd[0] = SPACE;
+ cmd[1] = 0x04; /* Space Setmarks */
+ cmd[2] = (arg >> 16);
+ cmd[3] = (arg >> 8);
+ cmd[4] = arg;
+ deb_space_print(STp, ST_DEB_FORWARD, "setmarks", cmd);
+ if (arg != 0) {
+ blkno = fileno = (-1);
+ at_sm = 1;
+ }
+ break;
+ case MTBSS:
+ cmd[0] = SPACE;
+ cmd[1] = 0x04; /* Space Setmarks */
+ ltmp = (-arg);
+ cmd[2] = (ltmp >> 16);
+ cmd[3] = (ltmp >> 8);
+ cmd[4] = ltmp;
+ deb_space_print(STp, ST_DEB_BACKWARD, "setmarks", cmd);
+ if (arg != 0) {
+ blkno = fileno = (-1);
+ at_sm = 1;
+ }
+ break;
+ case MTWEOF:
+ case MTWEOFI:
+ case MTWSM:
+ if (STp->write_prot)
+ return (-EACCES);
+ cmd[0] = WRITE_FILEMARKS;
+ if (cmd_in == MTWSM)
+ cmd[1] = 2;
+ if (cmd_in == MTWEOFI ||
+ (cmd_in == MTWEOF && STp->immediate_filemark))
+ cmd[1] |= 1;
+ cmd[2] = (arg >> 16);
+ cmd[3] = (arg >> 8);
+ cmd[4] = arg;
+ timeout = STp->device->request_queue->rq_timeout;
+ DEBC(
+ if (cmd_in != MTWSM)
+ st_printk(ST_DEB_MSG, STp,
+ "Writing %d filemarks.\n",
+ cmd[2] * 65536 +
+ cmd[3] * 256 +
+ cmd[4]);
+ else
+ st_printk(ST_DEB_MSG, STp,
+ "Writing %d setmarks.\n",
+ cmd[2] * 65536 +
+ cmd[3] * 256 +
+ cmd[4]);
+ )
+ if (fileno >= 0)
+ fileno += arg;
+ blkno = 0;
+ at_sm = (cmd_in == MTWSM);
+ break;
+ case MTREW:
+ cmd[0] = REZERO_UNIT;
+ if (STp->immediate) {
+ cmd[1] = 1; /* Don't wait for completion */
+ timeout = STp->device->request_queue->rq_timeout;
+ }
+ DEBC_printk(STp, "Rewinding tape.\n");
+ fileno = blkno = at_sm = 0;
+ break;
+ case MTNOP:
+ DEBC_printk(STp, "No op on tape.\n");
+ return 0; /* Should do something ? */
+ break;
+ case MTRETEN:
+ cmd[0] = START_STOP;
+ if (STp->immediate) {
+ cmd[1] = 1; /* Don't wait for completion */
+ timeout = STp->device->request_queue->rq_timeout;
+ }
+ cmd[4] = 3;
+ DEBC_printk(STp, "Retensioning tape.\n");
+ fileno = blkno = at_sm = 0;
+ break;
+ case MTEOM:
+ if (!STp->fast_mteom) {
+ /* space to the end of tape */
+ ioctl_result = st_int_ioctl(STp, MTFSF, 0x7fffff);
+ fileno = STps->drv_file;
+ if (STps->eof >= ST_EOD_1)
+ return 0;
+ /* The next lines would hide the number of spaced FileMarks
+ That's why I inserted the previous lines. I had no luck
+ with detecting EOM with FSF, so we go now to EOM.
+ Joerg Weule */
+ } else
+ fileno = (-1);
+ cmd[0] = SPACE;
+ cmd[1] = 3;
+ DEBC_printk(STp, "Spacing to end of recorded medium.\n");
+ blkno = -1;
+ at_sm = 0;
+ break;
+ case MTERASE:
+ if (STp->write_prot)
+ return (-EACCES);
+ cmd[0] = ERASE;
+ cmd[1] = (arg ? 1 : 0); /* Long erase with non-zero argument */
+ if (STp->immediate) {
+ cmd[1] |= 2; /* Don't wait for completion */
+ timeout = STp->device->request_queue->rq_timeout;
+ }
+ else
+ timeout = STp->long_timeout * 8;
+
+ DEBC_printk(STp, "Erasing tape.\n");
+ fileno = blkno = at_sm = 0;
+ break;
+ case MTSETBLK: /* Set block length */
+ case MTSETDENSITY: /* Set tape density */
+ case MTSETDRVBUFFER: /* Set drive buffering */
+ case SET_DENS_AND_BLK: /* Set density and block size */
+ chg_eof = 0;
+ if (STp->dirty || (STp->buffer)->buffer_bytes != 0)
+ return (-EIO); /* Not allowed if data in buffer */
+ if ((cmd_in == MTSETBLK || cmd_in == SET_DENS_AND_BLK) &&
+ (arg & MT_ST_BLKSIZE_MASK) != 0 &&
+ STp->max_block > 0 &&
+ ((arg & MT_ST_BLKSIZE_MASK) < STp->min_block ||
+ (arg & MT_ST_BLKSIZE_MASK) > STp->max_block)) {
+ st_printk(KERN_WARNING, STp, "Illegal block size.\n");
+ return (-EINVAL);
+ }
+ cmd[0] = MODE_SELECT;
+ if ((STp->use_pf & USE_PF))
+ cmd[1] = MODE_SELECT_PAGE_FORMAT;
+ cmd[4] = datalen = 12;
+ direction = DMA_TO_DEVICE;
+
+ memset((STp->buffer)->b_data, 0, 12);
+ if (cmd_in == MTSETDRVBUFFER)
+ (STp->buffer)->b_data[2] = (arg & 7) << 4;
+ else
+ (STp->buffer)->b_data[2] =
+ STp->drv_buffer << 4;
+ (STp->buffer)->b_data[3] = 8; /* block descriptor length */
+ if (cmd_in == MTSETDENSITY) {
+ (STp->buffer)->b_data[4] = arg;
+ STp->density_changed = 1; /* At least we tried ;-) */
+ } else if (cmd_in == SET_DENS_AND_BLK)
+ (STp->buffer)->b_data[4] = arg >> 24;
+ else
+ (STp->buffer)->b_data[4] = STp->density;
+ if (cmd_in == MTSETBLK || cmd_in == SET_DENS_AND_BLK) {
+ ltmp = arg & MT_ST_BLKSIZE_MASK;
+ if (cmd_in == MTSETBLK)
+ STp->blksize_changed = 1; /* At least we tried ;-) */
+ } else
+ ltmp = STp->block_size;
+ (STp->buffer)->b_data[9] = (ltmp >> 16);
+ (STp->buffer)->b_data[10] = (ltmp >> 8);
+ (STp->buffer)->b_data[11] = ltmp;
+ timeout = STp->device->request_queue->rq_timeout;
+ DEBC(
+ if (cmd_in == MTSETBLK || cmd_in == SET_DENS_AND_BLK)
+ st_printk(ST_DEB_MSG, STp,
+ "Setting block size to %d bytes.\n",
+ (STp->buffer)->b_data[9] * 65536 +
+ (STp->buffer)->b_data[10] * 256 +
+ (STp->buffer)->b_data[11]);
+ if (cmd_in == MTSETDENSITY || cmd_in == SET_DENS_AND_BLK)
+ st_printk(ST_DEB_MSG, STp,
+ "Setting density code to %x.\n",
+ (STp->buffer)->b_data[4]);
+ if (cmd_in == MTSETDRVBUFFER)
+ st_printk(ST_DEB_MSG, STp,
+ "Setting drive buffer code to %d.\n",
+ ((STp->buffer)->b_data[2] >> 4) & 7);
+ )
+ break;
+ default:
+ return (-ENOSYS);
+ }
+
+ SRpnt = st_do_scsi(NULL, STp, cmd, datalen, direction,
+ timeout, MAX_RETRIES, 1);
+ if (!SRpnt)
+ return (STp->buffer)->syscall_result;
+
+ ioctl_result = (STp->buffer)->syscall_result;
+
+ if (!ioctl_result) { /* SCSI command successful */
+ st_release_request(SRpnt);
+ SRpnt = NULL;
+ STps->drv_block = blkno;
+ STps->drv_file = fileno;
+ STps->at_sm = at_sm;
+
+ if (cmd_in == MTBSFM)
+ ioctl_result = st_int_ioctl(STp, MTFSF, 1);
+ else if (cmd_in == MTFSFM)
+ ioctl_result = st_int_ioctl(STp, MTBSF, 1);
+
+ if (cmd_in == MTSETBLK || cmd_in == SET_DENS_AND_BLK) {
+ STp->block_size = arg & MT_ST_BLKSIZE_MASK;
+ if (STp->block_size != 0) {
+ (STp->buffer)->buffer_blocks =
+ (STp->buffer)->buffer_size / STp->block_size;
+ }
+ (STp->buffer)->buffer_bytes = (STp->buffer)->read_pointer = 0;
+ if (cmd_in == SET_DENS_AND_BLK)
+ STp->density = arg >> MT_ST_DENSITY_SHIFT;
+ } else if (cmd_in == MTSETDRVBUFFER)
+ STp->drv_buffer = (arg & 7);
+ else if (cmd_in == MTSETDENSITY)
+ STp->density = arg;
+
+ if (cmd_in == MTEOM)
+ STps->eof = ST_EOD;
+ else if (cmd_in == MTFSF)
+ STps->eof = ST_FM;
+ else if (chg_eof)
+ STps->eof = ST_NOEOF;
+
+ if (cmd_in == MTWEOF || cmd_in == MTWEOFI)
+ STps->rw = ST_IDLE; /* prevent automatic WEOF at close */
+ } else { /* SCSI command was not completely successful. Don't return
+ from this block without releasing the SCSI command block! */
+ struct st_cmdstatus *cmdstatp = &STp->buffer->cmdstat;
+
+ if (cmdstatp->flags & SENSE_EOM) {
+ if (cmd_in != MTBSF && cmd_in != MTBSFM &&
+ cmd_in != MTBSR && cmd_in != MTBSS)
+ STps->eof = ST_EOM_OK;
+ STps->drv_block = 0;
+ }
+
+ if (cmdstatp->remainder_valid)
+ undone = (int)cmdstatp->uremainder64;
+ else
+ undone = 0;
+
+ if ((cmd_in == MTWEOF || cmd_in == MTWEOFI) &&
+ cmdstatp->have_sense &&
+ (cmdstatp->flags & SENSE_EOM)) {
+ if (cmdstatp->sense_hdr.sense_key == NO_SENSE ||
+ cmdstatp->sense_hdr.sense_key == RECOVERED_ERROR) {
+ ioctl_result = 0; /* EOF(s) written successfully at EOM */
+ STps->eof = ST_NOEOF;
+ } else { /* Writing EOF(s) failed */
+ if (fileno >= 0)
+ fileno -= undone;
+ if (undone < arg)
+ STps->eof = ST_NOEOF;
+ }
+ STps->drv_file = fileno;
+ } else if ((cmd_in == MTFSF) || (cmd_in == MTFSFM)) {
+ if (fileno >= 0)
+ STps->drv_file = fileno - undone;
+ else
+ STps->drv_file = fileno;
+ STps->drv_block = -1;
+ STps->eof = ST_NOEOF;
+ } else if ((cmd_in == MTBSF) || (cmd_in == MTBSFM)) {
+ if (arg > 0 && undone < 0) /* Some drives get this wrong */
+ undone = (-undone);
+ if (STps->drv_file >= 0)
+ STps->drv_file = fileno + undone;
+ STps->drv_block = 0;
+ STps->eof = ST_NOEOF;
+ } else if (cmd_in == MTFSR) {
+ if (cmdstatp->flags & SENSE_FMK) { /* Hit filemark */
+ if (STps->drv_file >= 0)
+ STps->drv_file++;
+ STps->drv_block = 0;
+ STps->eof = ST_FM;
+ } else {
+ if (blkno >= undone)
+ STps->drv_block = blkno - undone;
+ else
+ STps->drv_block = (-1);
+ STps->eof = ST_NOEOF;
+ }
+ } else if (cmd_in == MTBSR) {
+ if (cmdstatp->flags & SENSE_FMK) { /* Hit filemark */
+ STps->drv_file--;
+ STps->drv_block = (-1);
+ } else {
+ if (arg > 0 && undone < 0) /* Some drives get this wrong */
+ undone = (-undone);
+ if (STps->drv_block >= 0)
+ STps->drv_block = blkno + undone;
+ }
+ STps->eof = ST_NOEOF;
+ } else if (cmd_in == MTEOM) {
+ STps->drv_file = (-1);
+ STps->drv_block = (-1);
+ STps->eof = ST_EOD;
+ } else if (cmd_in == MTSETBLK ||
+ cmd_in == MTSETDENSITY ||
+ cmd_in == MTSETDRVBUFFER ||
+ cmd_in == SET_DENS_AND_BLK) {
+ if (cmdstatp->sense_hdr.sense_key == ILLEGAL_REQUEST &&
+ !(STp->use_pf & PF_TESTED)) {
+ /* Try the other possible state of Page Format if not
+ already tried */
+ STp->use_pf = (STp->use_pf ^ USE_PF) | PF_TESTED;
+ st_release_request(SRpnt);
+ SRpnt = NULL;
+ return st_int_ioctl(STp, cmd_in, arg);
+ }
+ } else if (chg_eof)
+ STps->eof = ST_NOEOF;
+
+ if (cmdstatp->sense_hdr.sense_key == BLANK_CHECK)
+ STps->eof = ST_EOD;
+
+ st_release_request(SRpnt);
+ SRpnt = NULL;
+ }
+
+ return ioctl_result;
+}
+
+
+/* Get the tape position. If bt == 2, arg points into a kernel space mt_loc
+ structure. */
+
+static int get_location(struct scsi_tape *STp, unsigned int *block, int *partition,
+ int logical)
+{
+ int result;
+ unsigned char scmd[MAX_COMMAND_SIZE];
+ struct st_request *SRpnt;
+
+ if (STp->ready != ST_READY)
+ return (-EIO);
+
+ memset(scmd, 0, MAX_COMMAND_SIZE);
+ if ((STp->device)->scsi_level < SCSI_2) {
+ scmd[0] = QFA_REQUEST_BLOCK;
+ scmd[4] = 3;
+ } else {
+ scmd[0] = READ_POSITION;
+ if (!logical && !STp->scsi2_logical)
+ scmd[1] = 1;
+ }
+ SRpnt = st_do_scsi(NULL, STp, scmd, 20, DMA_FROM_DEVICE,
+ STp->device->request_queue->rq_timeout,
+ MAX_READY_RETRIES, 1);
+ if (!SRpnt)
+ return (STp->buffer)->syscall_result;
+
+ if ((STp->buffer)->syscall_result != 0 ||
+ (STp->device->scsi_level >= SCSI_2 &&
+ ((STp->buffer)->b_data[0] & 4) != 0)) {
+ *block = *partition = 0;
+ DEBC_printk(STp, " Can't read tape position.\n");
+ result = (-EIO);
+ } else {
+ result = 0;
+ if ((STp->device)->scsi_level < SCSI_2) {
+ *block = ((STp->buffer)->b_data[0] << 16)
+ + ((STp->buffer)->b_data[1] << 8)
+ + (STp->buffer)->b_data[2];
+ *partition = 0;
+ } else {
+ *block = ((STp->buffer)->b_data[4] << 24)
+ + ((STp->buffer)->b_data[5] << 16)
+ + ((STp->buffer)->b_data[6] << 8)
+ + (STp->buffer)->b_data[7];
+ *partition = (STp->buffer)->b_data[1];
+ if (((STp->buffer)->b_data[0] & 0x80) &&
+ (STp->buffer)->b_data[1] == 0) /* BOP of partition 0 */
+ STp->ps[0].drv_block = STp->ps[0].drv_file = 0;
+ }
+ DEBC_printk(STp, "Got tape pos. blk %d part %d.\n",
+ *block, *partition);
+ }
+ st_release_request(SRpnt);
+ SRpnt = NULL;
+
+ return result;
+}
+
+
+/* Set the tape block and partition. Negative partition means that only the
+ block should be set in vendor specific way. */
+static int set_location(struct scsi_tape *STp, unsigned int block, int partition,
+ int logical)
+{
+ struct st_partstat *STps;
+ int result, p;
+ unsigned int blk;
+ int timeout;
+ unsigned char scmd[MAX_COMMAND_SIZE];
+ struct st_request *SRpnt;
+
+ if (STp->ready != ST_READY)
+ return (-EIO);
+ timeout = STp->long_timeout;
+ STps = &(STp->ps[STp->partition]);
+
+ DEBC_printk(STp, "Setting block to %d and partition to %d.\n",
+ block, partition);
+ DEB(if (partition < 0)
+ return (-EIO); )
+
+ /* Update the location at the partition we are leaving */
+ if ((!STp->can_partitions && partition != 0) ||
+ partition >= ST_NBR_PARTITIONS)
+ return (-EINVAL);
+ if (partition != STp->partition) {
+ if (get_location(STp, &blk, &p, 1))
+ STps->last_block_valid = 0;
+ else {
+ STps->last_block_valid = 1;
+ STps->last_block_visited = blk;
+ DEBC_printk(STp, "Visited block %d for "
+ "partition %d saved.\n",
+ blk, STp->partition);
+ }
+ }
+
+ memset(scmd, 0, MAX_COMMAND_SIZE);
+ if ((STp->device)->scsi_level < SCSI_2) {
+ scmd[0] = QFA_SEEK_BLOCK;
+ scmd[2] = (block >> 16);
+ scmd[3] = (block >> 8);
+ scmd[4] = block;
+ scmd[5] = 0;
+ } else {
+ scmd[0] = SEEK_10;
+ scmd[3] = (block >> 24);
+ scmd[4] = (block >> 16);
+ scmd[5] = (block >> 8);
+ scmd[6] = block;
+ if (!logical && !STp->scsi2_logical)
+ scmd[1] = 4;
+ if (STp->partition != partition) {
+ scmd[1] |= 2;
+ scmd[8] = partition;
+ DEBC_printk(STp, "Trying to change partition "
+ "from %d to %d\n", STp->partition,
+ partition);
+ }
+ }
+ if (STp->immediate) {
+ scmd[1] |= 1; /* Don't wait for completion */
+ timeout = STp->device->request_queue->rq_timeout;
+ }
+
+ SRpnt = st_do_scsi(NULL, STp, scmd, 0, DMA_NONE,
+ timeout, MAX_READY_RETRIES, 1);
+ if (!SRpnt)
+ return (STp->buffer)->syscall_result;
+
+ STps->drv_block = STps->drv_file = (-1);
+ STps->eof = ST_NOEOF;
+ if ((STp->buffer)->syscall_result != 0) {
+ result = (-EIO);
+ if (STp->can_partitions &&
+ (STp->device)->scsi_level >= SCSI_2 &&
+ (p = find_partition(STp)) >= 0)
+ STp->partition = p;
+ } else {
+ if (STp->can_partitions) {
+ STp->partition = partition;
+ STps = &(STp->ps[partition]);
+ if (!STps->last_block_valid ||
+ STps->last_block_visited != block) {
+ STps->at_sm = 0;
+ STps->rw = ST_IDLE;
+ }
+ } else
+ STps->at_sm = 0;
+ if (block == 0)
+ STps->drv_block = STps->drv_file = 0;
+ result = 0;
+ }
+
+ st_release_request(SRpnt);
+ SRpnt = NULL;
+
+ return result;
+}
+
+
+/* Find the current partition number for the drive status. Called from open and
+ returns either partition number of negative error code. */
+static int find_partition(struct scsi_tape *STp)
+{
+ int i, partition;
+ unsigned int block;
+
+ if ((i = get_location(STp, &block, &partition, 1)) < 0)
+ return i;
+ if (partition >= ST_NBR_PARTITIONS)
+ return (-EIO);
+ return partition;
+}
+
+
+/* Change the partition if necessary */
+static int switch_partition(struct scsi_tape *STp)
+{
+ struct st_partstat *STps;
+
+ if (STp->partition == STp->new_partition)
+ return 0;
+ STps = &(STp->ps[STp->new_partition]);
+ if (!STps->last_block_valid)
+ STps->last_block_visited = 0;
+ return set_location(STp, STps->last_block_visited, STp->new_partition, 1);
+}
+
+/* Functions for reading and writing the medium partition mode page. */
+
+#define PART_PAGE 0x11
+#define PART_PAGE_FIXED_LENGTH 8
+
+#define PP_OFF_MAX_ADD_PARTS 2
+#define PP_OFF_NBR_ADD_PARTS 3
+#define PP_OFF_FLAGS 4
+#define PP_OFF_PART_UNITS 6
+#define PP_OFF_RESERVED 7
+
+#define PP_BIT_IDP 0x20
+#define PP_MSK_PSUM_MB 0x10
+
+/* Get the number of partitions on the tape. As a side effect reads the
+ mode page into the tape buffer. */
+static int nbr_partitions(struct scsi_tape *STp)
+{
+ int result;
+
+ if (STp->ready != ST_READY)
+ return (-EIO);
+
+ result = read_mode_page(STp, PART_PAGE, 1);
+
+ if (result) {
+ DEBC_printk(STp, "Can't read medium partition page.\n");
+ result = (-EIO);
+ } else {
+ result = (STp->buffer)->b_data[MODE_HEADER_LENGTH +
+ PP_OFF_NBR_ADD_PARTS] + 1;
+ DEBC_printk(STp, "Number of partitions %d.\n", result);
+ }
+
+ return result;
+}
+
+
+/* Partition the tape into two partitions if size > 0 or one partition if
+ size == 0.
+
+ The block descriptors are read and written because Sony SDT-7000 does not
+ work without this (suggestion from Michael Schaefer <Michael.Schaefer@dlr.de>).
+
+ My HP C1533A drive returns only one partition size field. This is used to
+ set the size of partition 1. There is no size field for the default partition.
+ Michael Schaefer's Sony SDT-7000 returns two descriptors and the second is
+ used to set the size of partition 1 (this is what the SCSI-3 standard specifies).
+ The following algorithm is used to accommodate both drives: if the number of
+ partition size fields is greater than the maximum number of additional partitions
+ in the mode page, the second field is used. Otherwise the first field is used.
+
+ For Seagate DDS drives the page length must be 8 when no partitions is defined
+ and 10 when 1 partition is defined (information from Eric Lee Green). This is
+ is acceptable also to some other old drives and enforced if the first partition
+ size field is used for the first additional partition size.
+ */
+static int partition_tape(struct scsi_tape *STp, int size)
+{
+ int result;
+ int pgo, psd_cnt, psdo;
+ unsigned char *bp;
+
+ result = read_mode_page(STp, PART_PAGE, 0);
+ if (result) {
+ DEBC_printk(STp, "Can't read partition mode page.\n");
+ return result;
+ }
+ /* The mode page is in the buffer. Let's modify it and write it. */
+ bp = (STp->buffer)->b_data;
+ pgo = MODE_HEADER_LENGTH + bp[MH_OFF_BDESCS_LENGTH];
+ DEBC_printk(STp, "Partition page length is %d bytes.\n",
+ bp[pgo + MP_OFF_PAGE_LENGTH] + 2);
+
+ psd_cnt = (bp[pgo + MP_OFF_PAGE_LENGTH] + 2 - PART_PAGE_FIXED_LENGTH) / 2;
+ psdo = pgo + PART_PAGE_FIXED_LENGTH;
+ if (psd_cnt > bp[pgo + PP_OFF_MAX_ADD_PARTS]) {
+ bp[psdo] = bp[psdo + 1] = 0xff; /* Rest of the tape */
+ psdo += 2;
+ }
+ memset(bp + psdo, 0, bp[pgo + PP_OFF_NBR_ADD_PARTS] * 2);
+
+ DEBC_printk(STp, "psd_cnt %d, max.parts %d, nbr_parts %d\n",
+ psd_cnt, bp[pgo + PP_OFF_MAX_ADD_PARTS],
+ bp[pgo + PP_OFF_NBR_ADD_PARTS]);
+
+ if (size <= 0) {
+ bp[pgo + PP_OFF_NBR_ADD_PARTS] = 0;
+ if (psd_cnt <= bp[pgo + PP_OFF_MAX_ADD_PARTS])
+ bp[pgo + MP_OFF_PAGE_LENGTH] = 6;
+ DEBC_printk(STp, "Formatting tape with one partition.\n");
+ } else {
+ bp[psdo] = (size >> 8) & 0xff;
+ bp[psdo + 1] = size & 0xff;
+ bp[pgo + 3] = 1;
+ if (bp[pgo + MP_OFF_PAGE_LENGTH] < 8)
+ bp[pgo + MP_OFF_PAGE_LENGTH] = 8;
+ DEBC_printk(STp, "Formatting tape with two partitions "
+ "(1 = %d MB).\n", size);
+ }
+ bp[pgo + PP_OFF_PART_UNITS] = 0;
+ bp[pgo + PP_OFF_RESERVED] = 0;
+ bp[pgo + PP_OFF_FLAGS] = PP_BIT_IDP | PP_MSK_PSUM_MB;
+
+ result = write_mode_page(STp, PART_PAGE, 1);
+ if (result) {
+ st_printk(KERN_INFO, STp, "Partitioning of tape failed.\n");
+ result = (-EIO);
+ }
+
+ return result;
+}
+
+
+
+/* The ioctl command */
+static long st_ioctl(struct file *file, unsigned int cmd_in, unsigned long arg)
+{
+ int i, cmd_nr, cmd_type, bt;
+ int retval = 0;
+ unsigned int blk;
+ struct scsi_tape *STp = file->private_data;
+ struct st_modedef *STm;
+ struct st_partstat *STps;
+ void __user *p = (void __user *)arg;
+
+ if (mutex_lock_interruptible(&STp->lock))
+ return -ERESTARTSYS;
+
+ DEB(
+ if (debugging && !STp->in_use) {
+ st_printk(ST_DEB_MSG, STp, "Incorrect device.\n");
+ retval = (-EIO);
+ goto out;
+ } ) /* end DEB */
+
+ STm = &(STp->modes[STp->current_mode]);
+ STps = &(STp->ps[STp->partition]);
+
+ /*
+ * If we are in the middle of error recovery, don't let anyone
+ * else try and use this device. Also, if error recovery fails, it
+ * may try and take the device offline, in which case all further
+ * access to the device is prohibited.
+ */
+ retval = scsi_ioctl_block_when_processing_errors(STp->device, cmd_in,
+ file->f_flags & O_NDELAY);
+ if (retval)
+ goto out;
+
+ cmd_type = _IOC_TYPE(cmd_in);
+ cmd_nr = _IOC_NR(cmd_in);
+
+ if (cmd_type == _IOC_TYPE(MTIOCTOP) && cmd_nr == _IOC_NR(MTIOCTOP)) {
+ struct mtop mtc;
+
+ if (_IOC_SIZE(cmd_in) != sizeof(mtc)) {
+ retval = (-EINVAL);
+ goto out;
+ }
+
+ i = copy_from_user(&mtc, p, sizeof(struct mtop));
+ if (i) {
+ retval = (-EFAULT);
+ goto out;
+ }
+
+ if (mtc.mt_op == MTSETDRVBUFFER && !capable(CAP_SYS_ADMIN)) {
+ st_printk(KERN_WARNING, STp,
+ "MTSETDRVBUFFER only allowed for root.\n");
+ retval = (-EPERM);
+ goto out;
+ }
+ if (!STm->defined &&
+ (mtc.mt_op != MTSETDRVBUFFER &&
+ (mtc.mt_count & MT_ST_OPTIONS) == 0)) {
+ retval = (-ENXIO);
+ goto out;
+ }
+
+ if (!STp->pos_unknown) {
+
+ if (STps->eof == ST_FM_HIT) {
+ if (mtc.mt_op == MTFSF || mtc.mt_op == MTFSFM ||
+ mtc.mt_op == MTEOM) {
+ mtc.mt_count -= 1;
+ if (STps->drv_file >= 0)
+ STps->drv_file += 1;
+ } else if (mtc.mt_op == MTBSF || mtc.mt_op == MTBSFM) {
+ mtc.mt_count += 1;
+ if (STps->drv_file >= 0)
+ STps->drv_file += 1;
+ }
+ }
+
+ if (mtc.mt_op == MTSEEK) {
+ /* Old position must be restored if partition will be
+ changed */
+ i = !STp->can_partitions ||
+ (STp->new_partition != STp->partition);
+ } else {
+ i = mtc.mt_op == MTREW || mtc.mt_op == MTOFFL ||
+ mtc.mt_op == MTRETEN || mtc.mt_op == MTEOM ||
+ mtc.mt_op == MTLOCK || mtc.mt_op == MTLOAD ||
+ mtc.mt_op == MTFSF || mtc.mt_op == MTFSFM ||
+ mtc.mt_op == MTBSF || mtc.mt_op == MTBSFM ||
+ mtc.mt_op == MTCOMPRESSION;
+ }
+ i = flush_buffer(STp, i);
+ if (i < 0) {
+ retval = i;
+ goto out;
+ }
+ if (STps->rw == ST_WRITING &&
+ (mtc.mt_op == MTREW || mtc.mt_op == MTOFFL ||
+ mtc.mt_op == MTSEEK ||
+ mtc.mt_op == MTBSF || mtc.mt_op == MTBSFM)) {
+ i = st_int_ioctl(STp, MTWEOF, 1);
+ if (i < 0) {
+ retval = i;
+ goto out;
+ }
+ if (mtc.mt_op == MTBSF || mtc.mt_op == MTBSFM)
+ mtc.mt_count++;
+ STps->rw = ST_IDLE;
+ }
+
+ } else {
+ /*
+ * If there was a bus reset, block further access
+ * to this device. If the user wants to rewind the tape,
+ * then reset the flag and allow access again.
+ */
+ if (mtc.mt_op != MTREW &&
+ mtc.mt_op != MTOFFL &&
+ mtc.mt_op != MTRETEN &&
+ mtc.mt_op != MTERASE &&
+ mtc.mt_op != MTSEEK &&
+ mtc.mt_op != MTEOM) {
+ retval = (-EIO);
+ goto out;
+ }
+ reset_state(STp);
+ /* remove this when the midlevel properly clears was_reset */
+ STp->device->was_reset = 0;
+ }
+
+ if (mtc.mt_op != MTNOP && mtc.mt_op != MTSETBLK &&
+ mtc.mt_op != MTSETDENSITY && mtc.mt_op != MTWSM &&
+ mtc.mt_op != MTSETDRVBUFFER && mtc.mt_op != MTSETPART)
+ STps->rw = ST_IDLE; /* Prevent automatic WEOF and fsf */
+
+ if (mtc.mt_op == MTOFFL && STp->door_locked != ST_UNLOCKED)
+ do_door_lock(STp, 0); /* Ignore result! */
+
+ if (mtc.mt_op == MTSETDRVBUFFER &&
+ (mtc.mt_count & MT_ST_OPTIONS) != 0) {
+ retval = st_set_options(STp, mtc.mt_count);
+ goto out;
+ }
+
+ if (mtc.mt_op == MTSETPART) {
+ if (!STp->can_partitions ||
+ mtc.mt_count < 0 || mtc.mt_count >= ST_NBR_PARTITIONS) {
+ retval = (-EINVAL);
+ goto out;
+ }
+ if (mtc.mt_count >= STp->nbr_partitions &&
+ (STp->nbr_partitions = nbr_partitions(STp)) < 0) {
+ retval = (-EIO);
+ goto out;
+ }
+ if (mtc.mt_count >= STp->nbr_partitions) {
+ retval = (-EINVAL);
+ goto out;
+ }
+ STp->new_partition = mtc.mt_count;
+ retval = 0;
+ goto out;
+ }
+
+ if (mtc.mt_op == MTMKPART) {
+ if (!STp->can_partitions) {
+ retval = (-EINVAL);
+ goto out;
+ }
+ if ((i = st_int_ioctl(STp, MTREW, 0)) < 0 ||
+ (i = partition_tape(STp, mtc.mt_count)) < 0) {
+ retval = i;
+ goto out;
+ }
+ for (i = 0; i < ST_NBR_PARTITIONS; i++) {
+ STp->ps[i].rw = ST_IDLE;
+ STp->ps[i].at_sm = 0;
+ STp->ps[i].last_block_valid = 0;
+ }
+ STp->partition = STp->new_partition = 0;
+ STp->nbr_partitions = 1; /* Bad guess ?-) */
+ STps->drv_block = STps->drv_file = 0;
+ retval = 0;
+ goto out;
+ }
+
+ if (mtc.mt_op == MTSEEK) {
+ i = set_location(STp, mtc.mt_count, STp->new_partition, 0);
+ if (!STp->can_partitions)
+ STp->ps[0].rw = ST_IDLE;
+ retval = i;
+ goto out;
+ }
+
+ if (mtc.mt_op == MTUNLOAD || mtc.mt_op == MTOFFL) {
+ retval = do_load_unload(STp, file, 0);
+ goto out;
+ }
+
+ if (mtc.mt_op == MTLOAD) {
+ retval = do_load_unload(STp, file, max(1, mtc.mt_count));
+ goto out;
+ }
+
+ if (mtc.mt_op == MTLOCK || mtc.mt_op == MTUNLOCK) {
+ retval = do_door_lock(STp, (mtc.mt_op == MTLOCK));
+ goto out;
+ }
+
+ if (STp->can_partitions && STp->ready == ST_READY &&
+ (i = switch_partition(STp)) < 0) {
+ retval = i;
+ goto out;
+ }
+
+ if (mtc.mt_op == MTCOMPRESSION)
+ retval = st_compression(STp, (mtc.mt_count & 1));
+ else
+ retval = st_int_ioctl(STp, mtc.mt_op, mtc.mt_count);
+ goto out;
+ }
+ if (!STm->defined) {
+ retval = (-ENXIO);
+ goto out;
+ }
+
+ if ((i = flush_buffer(STp, 0)) < 0) {
+ retval = i;
+ goto out;
+ }
+ if (STp->can_partitions &&
+ (i = switch_partition(STp)) < 0) {
+ retval = i;
+ goto out;
+ }
+
+ if (cmd_type == _IOC_TYPE(MTIOCGET) && cmd_nr == _IOC_NR(MTIOCGET)) {
+ struct mtget mt_status;
+
+ if (_IOC_SIZE(cmd_in) != sizeof(struct mtget)) {
+ retval = (-EINVAL);
+ goto out;
+ }
+
+ mt_status.mt_type = STp->tape_type;
+ mt_status.mt_dsreg =
+ ((STp->block_size << MT_ST_BLKSIZE_SHIFT) & MT_ST_BLKSIZE_MASK) |
+ ((STp->density << MT_ST_DENSITY_SHIFT) & MT_ST_DENSITY_MASK);
+ mt_status.mt_blkno = STps->drv_block;
+ mt_status.mt_fileno = STps->drv_file;
+ if (STp->block_size != 0) {
+ if (STps->rw == ST_WRITING)
+ mt_status.mt_blkno +=
+ (STp->buffer)->buffer_bytes / STp->block_size;
+ else if (STps->rw == ST_READING)
+ mt_status.mt_blkno -=
+ ((STp->buffer)->buffer_bytes +
+ STp->block_size - 1) / STp->block_size;
+ }
+
+ mt_status.mt_gstat = 0;
+ if (STp->drv_write_prot)
+ mt_status.mt_gstat |= GMT_WR_PROT(0xffffffff);
+ if (mt_status.mt_blkno == 0) {
+ if (mt_status.mt_fileno == 0)
+ mt_status.mt_gstat |= GMT_BOT(0xffffffff);
+ else
+ mt_status.mt_gstat |= GMT_EOF(0xffffffff);
+ }
+ mt_status.mt_erreg = (STp->recover_reg << MT_ST_SOFTERR_SHIFT);
+ mt_status.mt_resid = STp->partition;
+ if (STps->eof == ST_EOM_OK || STps->eof == ST_EOM_ERROR)
+ mt_status.mt_gstat |= GMT_EOT(0xffffffff);
+ else if (STps->eof >= ST_EOM_OK)
+ mt_status.mt_gstat |= GMT_EOD(0xffffffff);
+ if (STp->density == 1)
+ mt_status.mt_gstat |= GMT_D_800(0xffffffff);
+ else if (STp->density == 2)
+ mt_status.mt_gstat |= GMT_D_1600(0xffffffff);
+ else if (STp->density == 3)
+ mt_status.mt_gstat |= GMT_D_6250(0xffffffff);
+ if (STp->ready == ST_READY)
+ mt_status.mt_gstat |= GMT_ONLINE(0xffffffff);
+ if (STp->ready == ST_NO_TAPE)
+ mt_status.mt_gstat |= GMT_DR_OPEN(0xffffffff);
+ if (STps->at_sm)
+ mt_status.mt_gstat |= GMT_SM(0xffffffff);
+ if (STm->do_async_writes ||
+ (STm->do_buffer_writes && STp->block_size != 0) ||
+ STp->drv_buffer != 0)
+ mt_status.mt_gstat |= GMT_IM_REP_EN(0xffffffff);
+ if (STp->cleaning_req)
+ mt_status.mt_gstat |= GMT_CLN(0xffffffff);
+
+ i = copy_to_user(p, &mt_status, sizeof(struct mtget));
+ if (i) {
+ retval = (-EFAULT);
+ goto out;
+ }
+
+ STp->recover_reg = 0; /* Clear after read */
+ retval = 0;
+ goto out;
+ } /* End of MTIOCGET */
+ if (cmd_type == _IOC_TYPE(MTIOCPOS) && cmd_nr == _IOC_NR(MTIOCPOS)) {
+ struct mtpos mt_pos;
+ if (_IOC_SIZE(cmd_in) != sizeof(struct mtpos)) {
+ retval = (-EINVAL);
+ goto out;
+ }
+ if ((i = get_location(STp, &blk, &bt, 0)) < 0) {
+ retval = i;
+ goto out;
+ }
+ mt_pos.mt_blkno = blk;
+ i = copy_to_user(p, &mt_pos, sizeof(struct mtpos));
+ if (i)
+ retval = (-EFAULT);
+ goto out;
+ }
+ mutex_unlock(&STp->lock);
+ switch (cmd_in) {
+ case SCSI_IOCTL_GET_IDLUN:
+ case SCSI_IOCTL_GET_BUS_NUMBER:
+ break;
+ default:
+ if ((cmd_in == SG_IO ||
+ cmd_in == SCSI_IOCTL_SEND_COMMAND ||
+ cmd_in == CDROM_SEND_PACKET) &&
+ !capable(CAP_SYS_RAWIO))
+ i = -EPERM;
+ else
+ i = scsi_cmd_ioctl(STp->disk->queue, STp->disk,
+ file->f_mode, cmd_in, p);
+ if (i != -ENOTTY)
+ return i;
+ break;
+ }
+ retval = scsi_ioctl(STp->device, cmd_in, p);
+ if (!retval && cmd_in == SCSI_IOCTL_STOP_UNIT) { /* unload */
+ STp->rew_at_close = 0;
+ STp->ready = ST_NO_TAPE;
+ }
+ return retval;
+
+ out:
+ mutex_unlock(&STp->lock);
+ return retval;
+}
+
+#ifdef CONFIG_COMPAT
+static long st_compat_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
+{
+ struct scsi_tape *STp = file->private_data;
+ struct scsi_device *sdev = STp->device;
+ int ret = -ENOIOCTLCMD;
+ if (sdev->host->hostt->compat_ioctl) {
+
+ ret = sdev->host->hostt->compat_ioctl(sdev, cmd, (void __user *)arg);
+
+ }
+ return ret;
+}
+#endif
+
+
+
+/* Try to allocate a new tape buffer. Calling function must not hold
+ dev_arr_lock. */
+static struct st_buffer *new_tape_buffer(int need_dma, int max_sg)
+{
+ struct st_buffer *tb;
+
+ tb = kzalloc(sizeof(struct st_buffer), GFP_ATOMIC);
+ if (!tb) {
+ printk(KERN_NOTICE "st: Can't allocate new tape buffer.\n");
+ return NULL;
+ }
+ tb->frp_segs = 0;
+ tb->use_sg = max_sg;
+ tb->dma = need_dma;
+ tb->buffer_size = 0;
+
+ tb->reserved_pages = kzalloc(max_sg * sizeof(struct page *),
+ GFP_ATOMIC);
+ if (!tb->reserved_pages) {
+ kfree(tb);
+ return NULL;
+ }
+
+ return tb;
+}
+
+
+/* Try to allocate enough space in the tape buffer */
+#define ST_MAX_ORDER 6
+
+static int enlarge_buffer(struct st_buffer * STbuffer, int new_size, int need_dma)
+{
+ int segs, max_segs, b_size, order, got;
+ gfp_t priority;
+
+ if (new_size <= STbuffer->buffer_size)
+ return 1;
+
+ if (STbuffer->buffer_size <= PAGE_SIZE)
+ normalize_buffer(STbuffer); /* Avoid extra segment */
+
+ max_segs = STbuffer->use_sg;
+
+ priority = GFP_KERNEL | __GFP_NOWARN;
+ if (need_dma)
+ priority |= GFP_DMA;
+
+ if (STbuffer->cleared)
+ priority |= __GFP_ZERO;
+
+ if (STbuffer->frp_segs) {
+ order = STbuffer->reserved_page_order;
+ b_size = PAGE_SIZE << order;
+ } else {
+ for (b_size = PAGE_SIZE, order = 0;
+ order < ST_MAX_ORDER &&
+ max_segs * (PAGE_SIZE << order) < new_size;
+ order++, b_size *= 2)
+ ; /* empty */
+ STbuffer->reserved_page_order = order;
+ }
+ if (max_segs * (PAGE_SIZE << order) < new_size) {
+ if (order == ST_MAX_ORDER)
+ return 0;
+ normalize_buffer(STbuffer);
+ return enlarge_buffer(STbuffer, new_size, need_dma);
+ }
+
+ for (segs = STbuffer->frp_segs, got = STbuffer->buffer_size;
+ segs < max_segs && got < new_size;) {
+ struct page *page;
+
+ page = alloc_pages(priority, order);
+ if (!page) {
+ DEB(STbuffer->buffer_size = got);
+ normalize_buffer(STbuffer);
+ return 0;
+ }
+
+ STbuffer->frp_segs += 1;
+ got += b_size;
+ STbuffer->buffer_size = got;
+ STbuffer->reserved_pages[segs] = page;
+ segs++;
+ }
+ STbuffer->b_data = page_address(STbuffer->reserved_pages[0]);
+
+ return 1;
+}
+
+
+/* Make sure that no data from previous user is in the internal buffer */
+static void clear_buffer(struct st_buffer * st_bp)
+{
+ int i;
+
+ for (i=0; i < st_bp->frp_segs; i++)
+ memset(page_address(st_bp->reserved_pages[i]), 0,
+ PAGE_SIZE << st_bp->reserved_page_order);
+ st_bp->cleared = 1;
+}
+
+
+/* Release the extra buffer */
+static void normalize_buffer(struct st_buffer * STbuffer)
+{
+ int i, order = STbuffer->reserved_page_order;
+
+ for (i = 0; i < STbuffer->frp_segs; i++) {
+ __free_pages(STbuffer->reserved_pages[i], order);
+ STbuffer->buffer_size -= (PAGE_SIZE << order);
+ }
+ STbuffer->frp_segs = 0;
+ STbuffer->sg_segs = 0;
+ STbuffer->reserved_page_order = 0;
+ STbuffer->map_data.offset = 0;
+}
+
+
+/* Move data from the user buffer to the tape buffer. Returns zero (success) or
+ negative error code. */
+static int append_to_buffer(const char __user *ubp, struct st_buffer * st_bp, int do_count)
+{
+ int i, cnt, res, offset;
+ int length = PAGE_SIZE << st_bp->reserved_page_order;
+
+ for (i = 0, offset = st_bp->buffer_bytes;
+ i < st_bp->frp_segs && offset >= length; i++)
+ offset -= length;
+ if (i == st_bp->frp_segs) { /* Should never happen */
+ printk(KERN_WARNING "st: append_to_buffer offset overflow.\n");
+ return (-EIO);
+ }
+ for (; i < st_bp->frp_segs && do_count > 0; i++) {
+ struct page *page = st_bp->reserved_pages[i];
+ cnt = length - offset < do_count ? length - offset : do_count;
+ res = copy_from_user(page_address(page) + offset, ubp, cnt);
+ if (res)
+ return (-EFAULT);
+ do_count -= cnt;
+ st_bp->buffer_bytes += cnt;
+ ubp += cnt;
+ offset = 0;
+ }
+ if (do_count) /* Should never happen */
+ return (-EIO);
+
+ return 0;
+}
+
+
+/* Move data from the tape buffer to the user buffer. Returns zero (success) or
+ negative error code. */
+static int from_buffer(struct st_buffer * st_bp, char __user *ubp, int do_count)
+{
+ int i, cnt, res, offset;
+ int length = PAGE_SIZE << st_bp->reserved_page_order;
+
+ for (i = 0, offset = st_bp->read_pointer;
+ i < st_bp->frp_segs && offset >= length; i++)
+ offset -= length;
+ if (i == st_bp->frp_segs) { /* Should never happen */
+ printk(KERN_WARNING "st: from_buffer offset overflow.\n");
+ return (-EIO);
+ }
+ for (; i < st_bp->frp_segs && do_count > 0; i++) {
+ struct page *page = st_bp->reserved_pages[i];
+ cnt = length - offset < do_count ? length - offset : do_count;
+ res = copy_to_user(ubp, page_address(page) + offset, cnt);
+ if (res)
+ return (-EFAULT);
+ do_count -= cnt;
+ st_bp->buffer_bytes -= cnt;
+ st_bp->read_pointer += cnt;
+ ubp += cnt;
+ offset = 0;
+ }
+ if (do_count) /* Should never happen */
+ return (-EIO);
+
+ return 0;
+}
+
+
+/* Move data towards start of buffer */
+static void move_buffer_data(struct st_buffer * st_bp, int offset)
+{
+ int src_seg, dst_seg, src_offset = 0, dst_offset;
+ int count, total;
+ int length = PAGE_SIZE << st_bp->reserved_page_order;
+
+ if (offset == 0)
+ return;
+
+ total=st_bp->buffer_bytes - offset;
+ for (src_seg=0; src_seg < st_bp->frp_segs; src_seg++) {
+ src_offset = offset;
+ if (src_offset < length)
+ break;
+ offset -= length;
+ }
+
+ st_bp->buffer_bytes = st_bp->read_pointer = total;
+ for (dst_seg=dst_offset=0; total > 0; ) {
+ struct page *dpage = st_bp->reserved_pages[dst_seg];
+ struct page *spage = st_bp->reserved_pages[src_seg];
+
+ count = min(length - dst_offset, length - src_offset);
+ memmove(page_address(dpage) + dst_offset,
+ page_address(spage) + src_offset, count);
+ src_offset += count;
+ if (src_offset >= length) {
+ src_seg++;
+ src_offset = 0;
+ }
+ dst_offset += count;
+ if (dst_offset >= length) {
+ dst_seg++;
+ dst_offset = 0;
+ }
+ total -= count;
+ }
+}
+
+/* Validate the options from command line or module parameters */
+static void validate_options(void)
+{
+ if (buffer_kbs > 0)
+ st_fixed_buffer_size = buffer_kbs * ST_KILOBYTE;
+ if (max_sg_segs >= ST_FIRST_SG)
+ st_max_sg_segs = max_sg_segs;
+}
+
+#ifndef MODULE
+/* Set the boot options. Syntax is defined in Documenation/scsi/st.txt.
+ */
+static int __init st_setup(char *str)
+{
+ int i, len, ints[5];
+ char *stp;
+
+ stp = get_options(str, ARRAY_SIZE(ints), ints);
+
+ if (ints[0] > 0) {
+ for (i = 0; i < ints[0] && i < ARRAY_SIZE(parms); i++)
+ if (parms[i].val)
+ *parms[i].val = ints[i + 1];
+ } else {
+ while (stp != NULL) {
+ for (i = 0; i < ARRAY_SIZE(parms); i++) {
+ len = strlen(parms[i].name);
+ if (!strncmp(stp, parms[i].name, len) &&
+ (*(stp + len) == ':' || *(stp + len) == '=')) {
+ if (parms[i].val)
+ *parms[i].val =
+ simple_strtoul(stp + len + 1, NULL, 0);
+ else
+ printk(KERN_WARNING "st: Obsolete parameter %s\n",
+ parms[i].name);
+ break;
+ }
+ }
+ if (i >= ARRAY_SIZE(parms))
+ printk(KERN_WARNING "st: invalid parameter in '%s'\n",
+ stp);
+ stp = strchr(stp, ',');
+ if (stp)
+ stp++;
+ }
+ }
+
+ validate_options();
+
+ return 1;
+}
+
+__setup("st=", st_setup);
+
+#endif
+
+static const struct file_operations st_fops =
+{
+ .owner = THIS_MODULE,
+ .read = st_read,
+ .write = st_write,
+ .unlocked_ioctl = st_ioctl,
+#ifdef CONFIG_COMPAT
+ .compat_ioctl = st_compat_ioctl,
+#endif
+ .open = st_open,
+ .flush = st_flush,
+ .release = st_release,
+ .llseek = noop_llseek,
+};
+
+static int create_one_cdev(struct scsi_tape *tape, int mode, int rew)
+{
+ int i, error;
+ dev_t cdev_devno;
+ struct cdev *cdev;
+ struct device *dev;
+ struct st_modedef *STm = &(tape->modes[mode]);
+ char name[10];
+ int dev_num = tape->index;
+
+ cdev_devno = MKDEV(SCSI_TAPE_MAJOR, TAPE_MINOR(dev_num, mode, rew));
+
+ cdev = cdev_alloc();
+ if (!cdev) {
+ pr_err("st%d: out of memory. Device not attached.\n", dev_num);
+ error = -ENOMEM;
+ goto out;
+ }
+ cdev->owner = THIS_MODULE;
+ cdev->ops = &st_fops;
+
+ error = cdev_add(cdev, cdev_devno, 1);
+ if (error) {
+ pr_err("st%d: Can't add %s-rewind mode %d\n", dev_num,
+ rew ? "non" : "auto", mode);
+ pr_err("st%d: Device not attached.\n", dev_num);
+ goto out_free;
+ }
+ STm->cdevs[rew] = cdev;
+
+ i = mode << (4 - ST_NBR_MODE_BITS);
+ snprintf(name, 10, "%s%s%s", rew ? "n" : "",
+ tape->disk->disk_name, st_formats[i]);
+
+ dev = device_create(&st_sysfs_class, &tape->device->sdev_gendev,
+ cdev_devno, &tape->modes[mode], "%s", name);
+ if (IS_ERR(dev)) {
+ pr_err("st%d: device_create failed\n", dev_num);
+ error = PTR_ERR(dev);
+ goto out_free;
+ }
+
+ STm->devs[rew] = dev;
+
+ return 0;
+out_free:
+ cdev_del(STm->cdevs[rew]);
+ STm->cdevs[rew] = NULL;
+out:
+ return error;
+}
+
+static int create_cdevs(struct scsi_tape *tape)
+{
+ int mode, error;
+ for (mode = 0; mode < ST_NBR_MODES; ++mode) {
+ error = create_one_cdev(tape, mode, 0);
+ if (error)
+ return error;
+ error = create_one_cdev(tape, mode, 1);
+ if (error)
+ return error;
+ }
+
+ return sysfs_create_link(&tape->device->sdev_gendev.kobj,
+ &tape->modes[0].devs[0]->kobj, "tape");
+}
+
+static void remove_cdevs(struct scsi_tape *tape)
+{
+ int mode, rew;
+ sysfs_remove_link(&tape->device->sdev_gendev.kobj, "tape");
+ for (mode = 0; mode < ST_NBR_MODES; mode++) {
+ struct st_modedef *STm = &(tape->modes[mode]);
+ for (rew = 0; rew < 2; rew++) {
+ if (STm->cdevs[rew])
+ cdev_del(STm->cdevs[rew]);
+ if (STm->devs[rew])
+ device_unregister(STm->devs[rew]);
+ }
+ }
+}
+
+static int st_probe(struct device *dev)
+{
+ struct scsi_device *SDp = to_scsi_device(dev);
+ struct gendisk *disk = NULL;
+ struct scsi_tape *tpnt = NULL;
+ struct st_modedef *STm;
+ struct st_partstat *STps;
+ struct st_buffer *buffer;
+ int i, error;
+ char *stp;
+
+ if (SDp->type != TYPE_TAPE)
+ return -ENODEV;
+ if ((stp = st_incompatible(SDp))) {
+ sdev_printk(KERN_INFO, SDp, "Found incompatible tape\n");
+ sdev_printk(KERN_INFO, SDp,
+ "st: The suggested driver is %s.\n", stp);
+ return -ENODEV;
+ }
+
+ scsi_autopm_get_device(SDp);
+ i = queue_max_segments(SDp->request_queue);
+ if (st_max_sg_segs < i)
+ i = st_max_sg_segs;
+ buffer = new_tape_buffer((SDp->host)->unchecked_isa_dma, i);
+ if (buffer == NULL) {
+ sdev_printk(KERN_ERR, SDp,
+ "st: Can't allocate new tape buffer. "
+ "Device not attached.\n");
+ goto out;
+ }
+
+ disk = alloc_disk(1);
+ if (!disk) {
+ sdev_printk(KERN_ERR, SDp,
+ "st: out of memory. Device not attached.\n");
+ goto out_buffer_free;
+ }
+
+ tpnt = kzalloc(sizeof(struct scsi_tape), GFP_ATOMIC);
+ if (tpnt == NULL) {
+ sdev_printk(KERN_ERR, SDp,
+ "st: Can't allocate device descriptor.\n");
+ goto out_put_disk;
+ }
+ kref_init(&tpnt->kref);
+ tpnt->disk = disk;
+ disk->private_data = &tpnt->driver;
+ disk->queue = SDp->request_queue;
+ /* SCSI tape doesn't register this gendisk via add_disk(). Manually
+ * take queue reference that release_disk() expects. */
+ if (!blk_get_queue(disk->queue))
+ goto out_put_disk;
+ tpnt->driver = &st_template;
+
+ tpnt->device = SDp;
+ if (SDp->scsi_level <= 2)
+ tpnt->tape_type = MT_ISSCSI1;
+ else
+ tpnt->tape_type = MT_ISSCSI2;
+
+ tpnt->buffer = buffer;
+ tpnt->buffer->last_SRpnt = NULL;
+
+ tpnt->inited = 0;
+ tpnt->dirty = 0;
+ tpnt->in_use = 0;
+ tpnt->drv_buffer = 1; /* Try buffering if no mode sense */
+ tpnt->restr_dma = (SDp->host)->unchecked_isa_dma;
+ tpnt->use_pf = (SDp->scsi_level >= SCSI_2);
+ tpnt->density = 0;
+ tpnt->do_auto_lock = ST_AUTO_LOCK;
+ tpnt->can_bsr = (SDp->scsi_level > 2 ? 1 : ST_IN_FILE_POS); /* BSR mandatory in SCSI3 */
+ tpnt->can_partitions = 0;
+ tpnt->two_fm = ST_TWO_FM;
+ tpnt->fast_mteom = ST_FAST_MTEOM;
+ tpnt->scsi2_logical = ST_SCSI2LOGICAL;
+ tpnt->sili = ST_SILI;
+ tpnt->immediate = ST_NOWAIT;
+ tpnt->immediate_filemark = 0;
+ tpnt->default_drvbuffer = 0xff; /* No forced buffering */
+ tpnt->partition = 0;
+ tpnt->new_partition = 0;
+ tpnt->nbr_partitions = 0;
+ blk_queue_rq_timeout(tpnt->device->request_queue, ST_TIMEOUT);
+ tpnt->long_timeout = ST_LONG_TIMEOUT;
+ tpnt->try_dio = try_direct_io && !SDp->host->unchecked_isa_dma;
+
+ for (i = 0; i < ST_NBR_MODES; i++) {
+ STm = &(tpnt->modes[i]);
+ STm->defined = 0;
+ STm->sysv = ST_SYSV;
+ STm->defaults_for_writes = 0;
+ STm->do_async_writes = ST_ASYNC_WRITES;
+ STm->do_buffer_writes = ST_BUFFER_WRITES;
+ STm->do_read_ahead = ST_READ_AHEAD;
+ STm->default_compression = ST_DONT_TOUCH;
+ STm->default_blksize = (-1); /* No forced size */
+ STm->default_density = (-1); /* No forced density */
+ STm->tape = tpnt;
+ }
+
+ for (i = 0; i < ST_NBR_PARTITIONS; i++) {
+ STps = &(tpnt->ps[i]);
+ STps->rw = ST_IDLE;
+ STps->eof = ST_NOEOF;
+ STps->at_sm = 0;
+ STps->last_block_valid = 0;
+ STps->drv_block = (-1);
+ STps->drv_file = (-1);
+ }
+
+ tpnt->current_mode = 0;
+ tpnt->modes[0].defined = 1;
+
+ tpnt->density_changed = tpnt->compression_changed =
+ tpnt->blksize_changed = 0;
+ mutex_init(&tpnt->lock);
+
+ idr_preload(GFP_KERNEL);
+ spin_lock(&st_index_lock);
+ error = idr_alloc(&st_index_idr, tpnt, 0, ST_MAX_TAPES + 1, GFP_NOWAIT);
+ spin_unlock(&st_index_lock);
+ idr_preload_end();
+ if (error < 0) {
+ pr_warn("st: idr allocation failed: %d\n", error);
+ goto out_put_queue;
+ }
+ tpnt->index = error;
+ sprintf(disk->disk_name, "st%d", tpnt->index);
+
+ dev_set_drvdata(dev, tpnt);
+
+
+ error = create_cdevs(tpnt);
+ if (error)
+ goto out_remove_devs;
+ scsi_autopm_put_device(SDp);
+
+ sdev_printk(KERN_NOTICE, SDp,
+ "Attached scsi tape %s\n", tape_name(tpnt));
+ sdev_printk(KERN_INFO, SDp, "%s: try direct i/o: %s (alignment %d B)\n",
+ tape_name(tpnt), tpnt->try_dio ? "yes" : "no",
+ queue_dma_alignment(SDp->request_queue) + 1);
+
+ return 0;
+
+out_remove_devs:
+ remove_cdevs(tpnt);
+ spin_lock(&st_index_lock);
+ idr_remove(&st_index_idr, tpnt->index);
+ spin_unlock(&st_index_lock);
+out_put_queue:
+ blk_put_queue(disk->queue);
+out_put_disk:
+ put_disk(disk);
+ kfree(tpnt);
+out_buffer_free:
+ kfree(buffer);
+out:
+ scsi_autopm_put_device(SDp);
+ return -ENODEV;
+};
+
+
+static int st_remove(struct device *dev)
+{
+ struct scsi_tape *tpnt = dev_get_drvdata(dev);
+ int index = tpnt->index;
+
+ scsi_autopm_get_device(to_scsi_device(dev));
+ remove_cdevs(tpnt);
+
+ mutex_lock(&st_ref_mutex);
+ kref_put(&tpnt->kref, scsi_tape_release);
+ mutex_unlock(&st_ref_mutex);
+ spin_lock(&st_index_lock);
+ idr_remove(&st_index_idr, index);
+ spin_unlock(&st_index_lock);
+ return 0;
+}
+
+/**
+ * scsi_tape_release - Called to free the Scsi_Tape structure
+ * @kref: pointer to embedded kref
+ *
+ * st_ref_mutex must be held entering this routine. Because it is
+ * called on last put, you should always use the scsi_tape_get()
+ * scsi_tape_put() helpers which manipulate the semaphore directly
+ * and never do a direct kref_put().
+ **/
+static void scsi_tape_release(struct kref *kref)
+{
+ struct scsi_tape *tpnt = to_scsi_tape(kref);
+ struct gendisk *disk = tpnt->disk;
+
+ tpnt->device = NULL;
+
+ if (tpnt->buffer) {
+ normalize_buffer(tpnt->buffer);
+ kfree(tpnt->buffer->reserved_pages);
+ kfree(tpnt->buffer);
+ }
+
+ disk->private_data = NULL;
+ put_disk(disk);
+ kfree(tpnt);
+ return;
+}
+
+static struct class st_sysfs_class = {
+ .name = "scsi_tape",
+ .dev_groups = st_dev_groups,
+};
+
+static int __init init_st(void)
+{
+ int err;
+
+ validate_options();
+
+ printk(KERN_INFO "st: Version %s, fixed bufsize %d, s/g segs %d\n",
+ verstr, st_fixed_buffer_size, st_max_sg_segs);
+
+ debugging = (debug_flag > 0) ? debug_flag : NO_DEBUG;
+ if (debugging) {
+ printk(KERN_INFO "st: Debugging enabled debug_flag = %d\n",
+ debugging);
+ }
+
+ err = class_register(&st_sysfs_class);
+ if (err) {
+ pr_err("Unable register sysfs class for SCSI tapes\n");
+ return err;
+ }
+
+ err = register_chrdev_region(MKDEV(SCSI_TAPE_MAJOR, 0),
+ ST_MAX_TAPE_ENTRIES, "st");
+ if (err) {
+ printk(KERN_ERR "Unable to get major %d for SCSI tapes\n",
+ SCSI_TAPE_MAJOR);
+ goto err_class;
+ }
+
+ err = scsi_register_driver(&st_template.gendrv);
+ if (err)
+ goto err_chrdev;
+
+ err = do_create_sysfs_files();
+ if (err)
+ goto err_scsidrv;
+
+ return 0;
+
+err_scsidrv:
+ scsi_unregister_driver(&st_template.gendrv);
+err_chrdev:
+ unregister_chrdev_region(MKDEV(SCSI_TAPE_MAJOR, 0),
+ ST_MAX_TAPE_ENTRIES);
+err_class:
+ class_unregister(&st_sysfs_class);
+ return err;
+}
+
+static void __exit exit_st(void)
+{
+ do_remove_sysfs_files();
+ scsi_unregister_driver(&st_template.gendrv);
+ unregister_chrdev_region(MKDEV(SCSI_TAPE_MAJOR, 0),
+ ST_MAX_TAPE_ENTRIES);
+ class_unregister(&st_sysfs_class);
+ printk(KERN_INFO "st: Unloaded.\n");
+}
+
+module_init(init_st);
+module_exit(exit_st);
+
+
+/* The sysfs driver interface. Read-only at the moment */
+static ssize_t st_try_direct_io_show(struct device_driver *ddp, char *buf)
+{
+ return snprintf(buf, PAGE_SIZE, "%d\n", try_direct_io);
+}
+static DRIVER_ATTR(try_direct_io, S_IRUGO, st_try_direct_io_show, NULL);
+
+static ssize_t st_fixed_buffer_size_show(struct device_driver *ddp, char *buf)
+{
+ return snprintf(buf, PAGE_SIZE, "%d\n", st_fixed_buffer_size);
+}
+static DRIVER_ATTR(fixed_buffer_size, S_IRUGO, st_fixed_buffer_size_show, NULL);
+
+static ssize_t st_max_sg_segs_show(struct device_driver *ddp, char *buf)
+{
+ return snprintf(buf, PAGE_SIZE, "%d\n", st_max_sg_segs);
+}
+static DRIVER_ATTR(max_sg_segs, S_IRUGO, st_max_sg_segs_show, NULL);
+
+static ssize_t st_version_show(struct device_driver *ddd, char *buf)
+{
+ return snprintf(buf, PAGE_SIZE, "[%s]\n", verstr);
+}
+static DRIVER_ATTR(version, S_IRUGO, st_version_show, NULL);
+
+static int do_create_sysfs_files(void)
+{
+ struct device_driver *sysfs = &st_template.gendrv;
+ int err;
+
+ err = driver_create_file(sysfs, &driver_attr_try_direct_io);
+ if (err)
+ return err;
+ err = driver_create_file(sysfs, &driver_attr_fixed_buffer_size);
+ if (err)
+ goto err_try_direct_io;
+ err = driver_create_file(sysfs, &driver_attr_max_sg_segs);
+ if (err)
+ goto err_attr_fixed_buf;
+ err = driver_create_file(sysfs, &driver_attr_version);
+ if (err)
+ goto err_attr_max_sg;
+
+ return 0;
+
+err_attr_max_sg:
+ driver_remove_file(sysfs, &driver_attr_max_sg_segs);
+err_attr_fixed_buf:
+ driver_remove_file(sysfs, &driver_attr_fixed_buffer_size);
+err_try_direct_io:
+ driver_remove_file(sysfs, &driver_attr_try_direct_io);
+ return err;
+}
+
+static void do_remove_sysfs_files(void)
+{
+ struct device_driver *sysfs = &st_template.gendrv;
+
+ driver_remove_file(sysfs, &driver_attr_version);
+ driver_remove_file(sysfs, &driver_attr_max_sg_segs);
+ driver_remove_file(sysfs, &driver_attr_fixed_buffer_size);
+ driver_remove_file(sysfs, &driver_attr_try_direct_io);
+}
+
+/* The sysfs simple class interface */
+static ssize_t
+defined_show(struct device *dev, struct device_attribute *attr, char *buf)
+{
+ struct st_modedef *STm = dev_get_drvdata(dev);
+ ssize_t l = 0;
+
+ l = snprintf(buf, PAGE_SIZE, "%d\n", STm->defined);
+ return l;
+}
+static DEVICE_ATTR_RO(defined);
+
+static ssize_t
+default_blksize_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct st_modedef *STm = dev_get_drvdata(dev);
+ ssize_t l = 0;
+
+ l = snprintf(buf, PAGE_SIZE, "%d\n", STm->default_blksize);
+ return l;
+}
+static DEVICE_ATTR_RO(default_blksize);
+
+static ssize_t
+default_density_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct st_modedef *STm = dev_get_drvdata(dev);
+ ssize_t l = 0;
+ char *fmt;
+
+ fmt = STm->default_density >= 0 ? "0x%02x\n" : "%d\n";
+ l = snprintf(buf, PAGE_SIZE, fmt, STm->default_density);
+ return l;
+}
+static DEVICE_ATTR_RO(default_density);
+
+static ssize_t
+default_compression_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct st_modedef *STm = dev_get_drvdata(dev);
+ ssize_t l = 0;
+
+ l = snprintf(buf, PAGE_SIZE, "%d\n", STm->default_compression - 1);
+ return l;
+}
+static DEVICE_ATTR_RO(default_compression);
+
+static ssize_t
+options_show(struct device *dev, struct device_attribute *attr, char *buf)
+{
+ struct st_modedef *STm = dev_get_drvdata(dev);
+ struct scsi_tape *STp = STm->tape;
+ int options;
+ ssize_t l = 0;
+
+ options = STm->do_buffer_writes ? MT_ST_BUFFER_WRITES : 0;
+ options |= STm->do_async_writes ? MT_ST_ASYNC_WRITES : 0;
+ options |= STm->do_read_ahead ? MT_ST_READ_AHEAD : 0;
+ DEB( options |= debugging ? MT_ST_DEBUGGING : 0 );
+ options |= STp->two_fm ? MT_ST_TWO_FM : 0;
+ options |= STp->fast_mteom ? MT_ST_FAST_MTEOM : 0;
+ options |= STm->defaults_for_writes ? MT_ST_DEF_WRITES : 0;
+ options |= STp->can_bsr ? MT_ST_CAN_BSR : 0;
+ options |= STp->omit_blklims ? MT_ST_NO_BLKLIMS : 0;
+ options |= STp->can_partitions ? MT_ST_CAN_PARTITIONS : 0;
+ options |= STp->scsi2_logical ? MT_ST_SCSI2LOGICAL : 0;
+ options |= STm->sysv ? MT_ST_SYSV : 0;
+ options |= STp->immediate ? MT_ST_NOWAIT : 0;
+ options |= STp->immediate_filemark ? MT_ST_NOWAIT_EOF : 0;
+ options |= STp->sili ? MT_ST_SILI : 0;
+
+ l = snprintf(buf, PAGE_SIZE, "0x%08x\n", options);
+ return l;
+}
+static DEVICE_ATTR_RO(options);
+
+static struct attribute *st_dev_attrs[] = {
+ &dev_attr_defined.attr,
+ &dev_attr_default_blksize.attr,
+ &dev_attr_default_density.attr,
+ &dev_attr_default_compression.attr,
+ &dev_attr_options.attr,
+ NULL,
+};
+ATTRIBUTE_GROUPS(st_dev);
+
+/* The following functions may be useful for a larger audience. */
+static int sgl_map_user_pages(struct st_buffer *STbp,
+ const unsigned int max_pages, unsigned long uaddr,
+ size_t count, int rw)
+{
+ unsigned long end = (uaddr + count + PAGE_SIZE - 1) >> PAGE_SHIFT;
+ unsigned long start = uaddr >> PAGE_SHIFT;
+ const int nr_pages = end - start;
+ int res, i, j;
+ struct page **pages;
+ struct rq_map_data *mdata = &STbp->map_data;
+
+ /* User attempted Overflow! */
+ if ((uaddr + count) < uaddr)
+ return -EINVAL;
+
+ /* Too big */
+ if (nr_pages > max_pages)
+ return -ENOMEM;
+
+ /* Hmm? */
+ if (count == 0)
+ return 0;
+
+ if ((pages = kmalloc(max_pages * sizeof(*pages), GFP_KERNEL)) == NULL)
+ return -ENOMEM;
+
+ /* Try to fault in all of the necessary pages */
+ /* rw==READ means read from drive, write into memory area */
+ res = get_user_pages_unlocked(
+ current,
+ current->mm,
+ uaddr,
+ nr_pages,
+ rw == READ,
+ 0, /* don't force */
+ pages);
+
+ /* Errors and no page mapped should return here */
+ if (res < nr_pages)
+ goto out_unmap;
+
+ for (i=0; i < nr_pages; i++) {
+ /* FIXME: flush superflous for rw==READ,
+ * probably wrong function for rw==WRITE
+ */
+ flush_dcache_page(pages[i]);
+ }
+
+ mdata->offset = uaddr & ~PAGE_MASK;
+ STbp->mapped_pages = pages;
+
+ return nr_pages;
+ out_unmap:
+ if (res > 0) {
+ for (j=0; j < res; j++)
+ page_cache_release(pages[j]);
+ res = 0;
+ }
+ kfree(pages);
+ return res;
+}
+
+
+/* And unmap them... */
+static int sgl_unmap_user_pages(struct st_buffer *STbp,
+ const unsigned int nr_pages, int dirtied)
+{
+ int i;
+
+ for (i=0; i < nr_pages; i++) {
+ struct page *page = STbp->mapped_pages[i];
+
+ if (dirtied)
+ SetPageDirty(page);
+ /* FIXME: cache flush missing for rw==READ
+ * FIXME: call the correct reference counting function
+ */
+ page_cache_release(page);
+ }
+ kfree(STbp->mapped_pages);
+ STbp->mapped_pages = NULL;
+
+ return 0;
+}
diff --git a/drivers/scsi/st.h b/drivers/scsi/st.h
new file mode 100644
index 000000000..f3eee0f9f
--- /dev/null
+++ b/drivers/scsi/st.h
@@ -0,0 +1,227 @@
+
+#ifndef _ST_H
+#define _ST_H
+
+#include <linux/completion.h>
+#include <linux/mutex.h>
+#include <linux/kref.h>
+#include <scsi/scsi_cmnd.h>
+
+/* Descriptor for analyzed sense data */
+struct st_cmdstatus {
+ int midlevel_result;
+ struct scsi_sense_hdr sense_hdr;
+ int have_sense;
+ int residual;
+ u64 uremainder64;
+ u8 flags;
+ u8 remainder_valid;
+ u8 fixed_format;
+ u8 deferred;
+};
+
+struct scsi_tape;
+
+/* scsi tape command */
+struct st_request {
+ unsigned char cmd[MAX_COMMAND_SIZE];
+ unsigned char sense[SCSI_SENSE_BUFFERSIZE];
+ int result;
+ struct scsi_tape *stp;
+ struct completion *waiting;
+ struct bio *bio;
+};
+
+/* The tape buffer descriptor. */
+struct st_buffer {
+ unsigned char dma; /* DMA-able buffer */
+ unsigned char cleared; /* internal buffer cleared after open? */
+ unsigned short do_dio; /* direct i/o set up? */
+ int buffer_size;
+ int buffer_blocks;
+ int buffer_bytes;
+ int read_pointer;
+ int writing;
+ int syscall_result;
+ struct st_request *last_SRpnt;
+ struct st_cmdstatus cmdstat;
+ struct page **reserved_pages;
+ int reserved_page_order;
+ struct page **mapped_pages;
+ struct rq_map_data map_data;
+ unsigned char *b_data;
+ unsigned short use_sg; /* zero or max number of s/g segments for this adapter */
+ unsigned short sg_segs; /* number of segments in s/g list */
+ unsigned short frp_segs; /* number of buffer segments */
+};
+
+/* The tape mode definition */
+struct st_modedef {
+ unsigned char defined;
+ unsigned char sysv; /* SYS V semantics? */
+ unsigned char do_async_writes;
+ unsigned char do_buffer_writes;
+ unsigned char do_read_ahead;
+ unsigned char defaults_for_writes;
+ unsigned char default_compression; /* 0 = don't touch, etc */
+ short default_density; /* Forced density, -1 = no value */
+ int default_blksize; /* Forced blocksize, -1 = no value */
+ struct scsi_tape *tape;
+ struct device *devs[2]; /* Auto-rewind and non-rewind devices */
+ struct cdev *cdevs[2]; /* Auto-rewind and non-rewind devices */
+};
+
+/* Number of modes can be changed by changing ST_NBR_MODE_BITS. The maximum
+ number of modes is 16 (ST_NBR_MODE_BITS 4) */
+#define ST_NBR_MODE_BITS 2
+#define ST_NBR_MODES (1 << ST_NBR_MODE_BITS)
+#define ST_MODE_SHIFT (7 - ST_NBR_MODE_BITS)
+#define ST_MODE_MASK ((ST_NBR_MODES - 1) << ST_MODE_SHIFT)
+
+#define ST_MAX_TAPES (1 << (20 - (ST_NBR_MODE_BITS + 1)))
+#define ST_MAX_TAPE_ENTRIES (ST_MAX_TAPES << (ST_NBR_MODE_BITS + 1))
+
+/* The status related to each partition */
+struct st_partstat {
+ unsigned char rw;
+ unsigned char eof;
+ unsigned char at_sm;
+ unsigned char last_block_valid;
+ u32 last_block_visited;
+ int drv_block; /* The block where the drive head is */
+ int drv_file;
+};
+
+#define ST_NBR_PARTITIONS 4
+
+/* The tape drive descriptor */
+struct scsi_tape {
+ struct scsi_driver *driver;
+ struct scsi_device *device;
+ struct mutex lock; /* For serialization */
+ struct completion wait; /* For SCSI commands */
+ struct st_buffer *buffer;
+ int index;
+
+ /* Drive characteristics */
+ unsigned char omit_blklims;
+ unsigned char do_auto_lock;
+ unsigned char can_bsr;
+ unsigned char can_partitions;
+ unsigned char two_fm;
+ unsigned char fast_mteom;
+ unsigned char immediate;
+ unsigned char restr_dma;
+ unsigned char scsi2_logical;
+ unsigned char default_drvbuffer; /* 0xff = don't touch, value 3 bits */
+ unsigned char cln_mode; /* 0 = none, otherwise sense byte nbr */
+ unsigned char cln_sense_value;
+ unsigned char cln_sense_mask;
+ unsigned char use_pf; /* Set Page Format bit in all mode selects? */
+ unsigned char try_dio; /* try direct i/o in general? */
+ unsigned char try_dio_now; /* try direct i/o before next close? */
+ unsigned char c_algo; /* compression algorithm */
+ unsigned char pos_unknown; /* after reset position unknown */
+ unsigned char sili; /* use SILI when reading in variable b mode */
+ unsigned char immediate_filemark; /* write filemark immediately */
+ int tape_type;
+ int long_timeout; /* timeout for commands known to take long time */
+
+ unsigned long max_pfn; /* the maximum page number reachable by the HBA */
+
+ /* Mode characteristics */
+ struct st_modedef modes[ST_NBR_MODES];
+ int current_mode;
+
+ /* Status variables */
+ int partition;
+ int new_partition;
+ int nbr_partitions; /* zero until partition support enabled */
+ struct st_partstat ps[ST_NBR_PARTITIONS];
+ unsigned char dirty;
+ unsigned char ready;
+ unsigned char write_prot;
+ unsigned char drv_write_prot;
+ unsigned char in_use;
+ unsigned char blksize_changed;
+ unsigned char density_changed;
+ unsigned char compression_changed;
+ unsigned char drv_buffer;
+ unsigned char density;
+ unsigned char door_locked;
+ unsigned char autorew_dev; /* auto-rewind device */
+ unsigned char rew_at_close; /* rewind necessary at close */
+ unsigned char inited;
+ unsigned char cleaning_req; /* cleaning requested? */
+ int block_size;
+ int min_block;
+ int max_block;
+ int recover_count; /* From tape opening */
+ int recover_reg; /* From last status call */
+
+#if DEBUG
+ unsigned char write_pending;
+ int nbr_finished;
+ int nbr_waits;
+ int nbr_requests;
+ int nbr_dio;
+ int nbr_pages;
+ unsigned char last_cmnd[6];
+ unsigned char last_sense[16];
+#endif
+ struct gendisk *disk;
+ struct kref kref;
+};
+
+/* Bit masks for use_pf */
+#define USE_PF 1
+#define PF_TESTED 2
+
+/* Values of eof */
+#define ST_NOEOF 0
+#define ST_FM_HIT 1
+#define ST_FM 2
+#define ST_EOM_OK 3
+#define ST_EOM_ERROR 4
+#define ST_EOD_1 5
+#define ST_EOD_2 6
+#define ST_EOD 7
+/* EOD hit while reading => ST_EOD_1 => return zero => ST_EOD_2 =>
+ return zero => ST_EOD, return ENOSPC */
+/* When writing: ST_EOM_OK == early warning found, write OK
+ ST_EOD_1 == allow trying new write after early warning
+ ST_EOM_ERROR == early warning found, not able to write all */
+
+/* Values of rw */
+#define ST_IDLE 0
+#define ST_READING 1
+#define ST_WRITING 2
+
+/* Values of ready state */
+#define ST_READY 0
+#define ST_NOT_READY 1
+#define ST_NO_TAPE 2
+
+/* Values for door lock state */
+#define ST_UNLOCKED 0
+#define ST_LOCKED_EXPLICIT 1
+#define ST_LOCKED_AUTO 2
+#define ST_LOCK_FAILS 3
+
+/* Positioning SCSI-commands for Tandberg, etc. drives */
+#define QFA_REQUEST_BLOCK 0x02
+#define QFA_SEEK_BLOCK 0x0c
+
+/* Setting the binary options */
+#define ST_DONT_TOUCH 0
+#define ST_NO 1
+#define ST_YES 2
+
+#define EXTENDED_SENSE_START 18
+
+/* Masks for some conditions in the sense data */
+#define SENSE_FMK 0x80
+#define SENSE_EOM 0x40
+#define SENSE_ILI 0x20
+
+#endif
diff --git a/drivers/scsi/st_options.h b/drivers/scsi/st_options.h
new file mode 100644
index 000000000..d2f947935
--- /dev/null
+++ b/drivers/scsi/st_options.h
@@ -0,0 +1,104 @@
+/*
+ The compile-time configurable defaults for the Linux SCSI tape driver.
+
+ Copyright 1995-2003 Kai Makisara.
+
+ Last modified: Thu Feb 21 21:47:07 2008 by kai.makisara
+*/
+
+#ifndef _ST_OPTIONS_H
+#define _ST_OPTIONS_H
+
+/* If TRY_DIRECT_IO is non-zero, the driver tries to transfer data directly
+ between the user buffer and tape drive. If this is not possible, driver
+ buffer is used. If TRY_DIRECT_IO is zero, driver buffer is always used. */
+#define TRY_DIRECT_IO 1
+
+/* The driver does not wait for some operations to finish before returning
+ to the user program if ST_NOWAIT is non-zero. This helps if the SCSI
+ adapter does not support multiple outstanding commands. However, the user
+ should not give a new tape command before the previous one has finished. */
+#define ST_NOWAIT 0
+
+/* If ST_IN_FILE_POS is nonzero, the driver positions the tape after the
+ record been read by the user program even if the tape has moved further
+ because of buffered reads. Should be set to zero to support also drives
+ that can't space backwards over records. NOTE: The tape will be
+ spaced backwards over an "accidentally" crossed filemark in any case. */
+#define ST_IN_FILE_POS 0
+
+/* If ST_RECOVERED_WRITE_FATAL is non-zero, recovered errors while writing
+ are considered "hard errors". */
+#define ST_RECOVERED_WRITE_FATAL 0
+
+/* The "guess" for the block size for devices that don't support MODE
+ SENSE. */
+#define ST_DEFAULT_BLOCK 0
+
+/* The minimum tape driver buffer size in kilobytes in fixed block mode.
+ Must be non-zero. */
+#define ST_FIXED_BUFFER_BLOCKS 32
+
+/* Maximum number of scatter/gather segments */
+#define ST_MAX_SG 256
+
+/* The number of scatter/gather segments to allocate at first try (must be
+ smaller or equal to the maximum). */
+#define ST_FIRST_SG 8
+
+/* The size of the first scatter/gather segments (determines the maximum block
+ size for SCSI adapters not supporting scatter/gather). The default is set
+ to try to allocate the buffer as one chunk. */
+#define ST_FIRST_ORDER 5
+
+
+/* The following lines define defaults for properties that can be set
+ separately for each drive using the MTSTOPTIONS ioctl. */
+
+/* If ST_TWO_FM is non-zero, the driver writes two filemarks after a
+ file being written. Some drives can't handle two filemarks at the
+ end of data. */
+#define ST_TWO_FM 0
+
+/* If ST_BUFFER_WRITES is non-zero, writes in fixed block mode are
+ buffered until the driver buffer is full or asynchronous write is
+ triggered. May make detection of End-Of-Medium early enough fail. */
+#define ST_BUFFER_WRITES 1
+
+/* If ST_ASYNC_WRITES is non-zero, the SCSI write command may be started
+ without waiting for it to finish. May cause problems in multiple
+ tape backups. */
+#define ST_ASYNC_WRITES 1
+
+/* If ST_READ_AHEAD is non-zero, blocks are read ahead in fixed block
+ mode. */
+#define ST_READ_AHEAD 1
+
+/* If ST_AUTO_LOCK is non-zero, the drive door is locked at the first
+ read or write command after the device is opened. The door is opened
+ when the device is closed. */
+#define ST_AUTO_LOCK 0
+
+/* If ST_FAST_MTEOM is non-zero, the MTEOM ioctl is done using the
+ direct SCSI command. The file number status is lost but this method
+ is fast with some drives. Otherwise MTEOM is done by spacing over
+ files and the file number status is retained. */
+#define ST_FAST_MTEOM 0
+
+/* If ST_SCSI2LOGICAL is nonzero, the logical block addresses are used for
+ MTIOCPOS and MTSEEK by default. Vendor addresses are used if ST_SCSI2LOGICAL
+ is zero. */
+#define ST_SCSI2LOGICAL 0
+
+/* If ST_SYSV is non-zero, the tape behaves according to the SYS V semantics.
+ The default is BSD semantics. */
+#define ST_SYSV 0
+
+/* If ST_SILI is non-zero, the SILI bit is set when reading in variable block
+ mode and the block size is determined using the residual returned by the HBA. */
+#define ST_SILI 0
+
+/* Time to wait for the drive to become ready if blocking open */
+#define ST_BLOCK_SECONDS 120
+
+#endif
diff --git a/drivers/scsi/stex.c b/drivers/scsi/stex.c
new file mode 100644
index 000000000..98a62bc15
--- /dev/null
+++ b/drivers/scsi/stex.c
@@ -0,0 +1,1812 @@
+/*
+ * SuperTrak EX Series Storage Controller driver for Linux
+ *
+ * Copyright (C) 2005-2009 Promise Technology Inc.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ *
+ * Written By:
+ * Ed Lin <promise_linux@promise.com>
+ *
+ */
+
+#include <linux/init.h>
+#include <linux/errno.h>
+#include <linux/kernel.h>
+#include <linux/delay.h>
+#include <linux/slab.h>
+#include <linux/time.h>
+#include <linux/pci.h>
+#include <linux/blkdev.h>
+#include <linux/interrupt.h>
+#include <linux/types.h>
+#include <linux/module.h>
+#include <linux/spinlock.h>
+#include <asm/io.h>
+#include <asm/irq.h>
+#include <asm/byteorder.h>
+#include <scsi/scsi.h>
+#include <scsi/scsi_device.h>
+#include <scsi/scsi_cmnd.h>
+#include <scsi/scsi_host.h>
+#include <scsi/scsi_tcq.h>
+#include <scsi/scsi_dbg.h>
+#include <scsi/scsi_eh.h>
+
+#define DRV_NAME "stex"
+#define ST_DRIVER_VERSION "4.6.0000.4"
+#define ST_VER_MAJOR 4
+#define ST_VER_MINOR 6
+#define ST_OEM 0
+#define ST_BUILD_VER 4
+
+enum {
+ /* MU register offset */
+ IMR0 = 0x10, /* MU_INBOUND_MESSAGE_REG0 */
+ IMR1 = 0x14, /* MU_INBOUND_MESSAGE_REG1 */
+ OMR0 = 0x18, /* MU_OUTBOUND_MESSAGE_REG0 */
+ OMR1 = 0x1c, /* MU_OUTBOUND_MESSAGE_REG1 */
+ IDBL = 0x20, /* MU_INBOUND_DOORBELL */
+ IIS = 0x24, /* MU_INBOUND_INTERRUPT_STATUS */
+ IIM = 0x28, /* MU_INBOUND_INTERRUPT_MASK */
+ ODBL = 0x2c, /* MU_OUTBOUND_DOORBELL */
+ OIS = 0x30, /* MU_OUTBOUND_INTERRUPT_STATUS */
+ OIM = 0x3c, /* MU_OUTBOUND_INTERRUPT_MASK */
+
+ YIOA_STATUS = 0x00,
+ YH2I_INT = 0x20,
+ YINT_EN = 0x34,
+ YI2H_INT = 0x9c,
+ YI2H_INT_C = 0xa0,
+ YH2I_REQ = 0xc0,
+ YH2I_REQ_HI = 0xc4,
+
+ /* MU register value */
+ MU_INBOUND_DOORBELL_HANDSHAKE = (1 << 0),
+ MU_INBOUND_DOORBELL_REQHEADCHANGED = (1 << 1),
+ MU_INBOUND_DOORBELL_STATUSTAILCHANGED = (1 << 2),
+ MU_INBOUND_DOORBELL_HMUSTOPPED = (1 << 3),
+ MU_INBOUND_DOORBELL_RESET = (1 << 4),
+
+ MU_OUTBOUND_DOORBELL_HANDSHAKE = (1 << 0),
+ MU_OUTBOUND_DOORBELL_REQUESTTAILCHANGED = (1 << 1),
+ MU_OUTBOUND_DOORBELL_STATUSHEADCHANGED = (1 << 2),
+ MU_OUTBOUND_DOORBELL_BUSCHANGE = (1 << 3),
+ MU_OUTBOUND_DOORBELL_HASEVENT = (1 << 4),
+ MU_OUTBOUND_DOORBELL_REQUEST_RESET = (1 << 27),
+
+ /* MU status code */
+ MU_STATE_STARTING = 1,
+ MU_STATE_STARTED = 2,
+ MU_STATE_RESETTING = 3,
+ MU_STATE_FAILED = 4,
+
+ MU_MAX_DELAY = 120,
+ MU_HANDSHAKE_SIGNATURE = 0x55aaaa55,
+ MU_HANDSHAKE_SIGNATURE_HALF = 0x5a5a0000,
+ MU_HARD_RESET_WAIT = 30000,
+ HMU_PARTNER_TYPE = 2,
+
+ /* firmware returned values */
+ SRB_STATUS_SUCCESS = 0x01,
+ SRB_STATUS_ERROR = 0x04,
+ SRB_STATUS_BUSY = 0x05,
+ SRB_STATUS_INVALID_REQUEST = 0x06,
+ SRB_STATUS_SELECTION_TIMEOUT = 0x0A,
+ SRB_SEE_SENSE = 0x80,
+
+ /* task attribute */
+ TASK_ATTRIBUTE_SIMPLE = 0x0,
+ TASK_ATTRIBUTE_HEADOFQUEUE = 0x1,
+ TASK_ATTRIBUTE_ORDERED = 0x2,
+ TASK_ATTRIBUTE_ACA = 0x4,
+
+ SS_STS_NORMAL = 0x80000000,
+ SS_STS_DONE = 0x40000000,
+ SS_STS_HANDSHAKE = 0x20000000,
+
+ SS_HEAD_HANDSHAKE = 0x80,
+
+ SS_H2I_INT_RESET = 0x100,
+
+ SS_I2H_REQUEST_RESET = 0x2000,
+
+ SS_MU_OPERATIONAL = 0x80000000,
+
+ STEX_CDB_LENGTH = 16,
+ STATUS_VAR_LEN = 128,
+
+ /* sg flags */
+ SG_CF_EOT = 0x80, /* end of table */
+ SG_CF_64B = 0x40, /* 64 bit item */
+ SG_CF_HOST = 0x20, /* sg in host memory */
+ MSG_DATA_DIR_ND = 0,
+ MSG_DATA_DIR_IN = 1,
+ MSG_DATA_DIR_OUT = 2,
+
+ st_shasta = 0,
+ st_vsc = 1,
+ st_yosemite = 2,
+ st_seq = 3,
+ st_yel = 4,
+
+ PASSTHRU_REQ_TYPE = 0x00000001,
+ PASSTHRU_REQ_NO_WAKEUP = 0x00000100,
+ ST_INTERNAL_TIMEOUT = 180,
+
+ ST_TO_CMD = 0,
+ ST_FROM_CMD = 1,
+
+ /* vendor specific commands of Promise */
+ MGT_CMD = 0xd8,
+ SINBAND_MGT_CMD = 0xd9,
+ ARRAY_CMD = 0xe0,
+ CONTROLLER_CMD = 0xe1,
+ DEBUGGING_CMD = 0xe2,
+ PASSTHRU_CMD = 0xe3,
+
+ PASSTHRU_GET_ADAPTER = 0x05,
+ PASSTHRU_GET_DRVVER = 0x10,
+
+ CTLR_CONFIG_CMD = 0x03,
+ CTLR_SHUTDOWN = 0x0d,
+
+ CTLR_POWER_STATE_CHANGE = 0x0e,
+ CTLR_POWER_SAVING = 0x01,
+
+ PASSTHRU_SIGNATURE = 0x4e415041,
+ MGT_CMD_SIGNATURE = 0xba,
+
+ INQUIRY_EVPD = 0x01,
+
+ ST_ADDITIONAL_MEM = 0x200000,
+ ST_ADDITIONAL_MEM_MIN = 0x80000,
+};
+
+struct st_sgitem {
+ u8 ctrl; /* SG_CF_xxx */
+ u8 reserved[3];
+ __le32 count;
+ __le64 addr;
+};
+
+struct st_ss_sgitem {
+ __le32 addr;
+ __le32 addr_hi;
+ __le32 count;
+};
+
+struct st_sgtable {
+ __le16 sg_count;
+ __le16 max_sg_count;
+ __le32 sz_in_byte;
+};
+
+struct st_msg_header {
+ __le64 handle;
+ u8 flag;
+ u8 channel;
+ __le16 timeout;
+ u32 reserved;
+};
+
+struct handshake_frame {
+ __le64 rb_phy; /* request payload queue physical address */
+ __le16 req_sz; /* size of each request payload */
+ __le16 req_cnt; /* count of reqs the buffer can hold */
+ __le16 status_sz; /* size of each status payload */
+ __le16 status_cnt; /* count of status the buffer can hold */
+ __le64 hosttime; /* seconds from Jan 1, 1970 (GMT) */
+ u8 partner_type; /* who sends this frame */
+ u8 reserved0[7];
+ __le32 partner_ver_major;
+ __le32 partner_ver_minor;
+ __le32 partner_ver_oem;
+ __le32 partner_ver_build;
+ __le32 extra_offset; /* NEW */
+ __le32 extra_size; /* NEW */
+ __le32 scratch_size;
+ u32 reserved1;
+};
+
+struct req_msg {
+ __le16 tag;
+ u8 lun;
+ u8 target;
+ u8 task_attr;
+ u8 task_manage;
+ u8 data_dir;
+ u8 payload_sz; /* payload size in 4-byte, not used */
+ u8 cdb[STEX_CDB_LENGTH];
+ u32 variable[0];
+};
+
+struct status_msg {
+ __le16 tag;
+ u8 lun;
+ u8 target;
+ u8 srb_status;
+ u8 scsi_status;
+ u8 reserved;
+ u8 payload_sz; /* payload size in 4-byte */
+ u8 variable[STATUS_VAR_LEN];
+};
+
+struct ver_info {
+ u32 major;
+ u32 minor;
+ u32 oem;
+ u32 build;
+ u32 reserved[2];
+};
+
+struct st_frame {
+ u32 base[6];
+ u32 rom_addr;
+
+ struct ver_info drv_ver;
+ struct ver_info bios_ver;
+
+ u32 bus;
+ u32 slot;
+ u32 irq_level;
+ u32 irq_vec;
+ u32 id;
+ u32 subid;
+
+ u32 dimm_size;
+ u8 dimm_type;
+ u8 reserved[3];
+
+ u32 channel;
+ u32 reserved1;
+};
+
+struct st_drvver {
+ u32 major;
+ u32 minor;
+ u32 oem;
+ u32 build;
+ u32 signature[2];
+ u8 console_id;
+ u8 host_no;
+ u8 reserved0[2];
+ u32 reserved[3];
+};
+
+struct st_ccb {
+ struct req_msg *req;
+ struct scsi_cmnd *cmd;
+
+ void *sense_buffer;
+ unsigned int sense_bufflen;
+ int sg_count;
+
+ u32 req_type;
+ u8 srb_status;
+ u8 scsi_status;
+ u8 reserved[2];
+};
+
+struct st_hba {
+ void __iomem *mmio_base; /* iomapped PCI memory space */
+ void *dma_mem;
+ dma_addr_t dma_handle;
+ size_t dma_size;
+
+ struct Scsi_Host *host;
+ struct pci_dev *pdev;
+
+ struct req_msg * (*alloc_rq) (struct st_hba *);
+ int (*map_sg)(struct st_hba *, struct req_msg *, struct st_ccb *);
+ void (*send) (struct st_hba *, struct req_msg *, u16);
+
+ u32 req_head;
+ u32 req_tail;
+ u32 status_head;
+ u32 status_tail;
+
+ struct status_msg *status_buffer;
+ void *copy_buffer; /* temp buffer for driver-handled commands */
+ struct st_ccb *ccb;
+ struct st_ccb *wait_ccb;
+ __le32 *scratch;
+
+ char work_q_name[20];
+ struct workqueue_struct *work_q;
+ struct work_struct reset_work;
+ wait_queue_head_t reset_waitq;
+ unsigned int mu_status;
+ unsigned int cardtype;
+ int msi_enabled;
+ int out_req_cnt;
+ u32 extra_offset;
+ u16 rq_count;
+ u16 rq_size;
+ u16 sts_count;
+};
+
+struct st_card_info {
+ struct req_msg * (*alloc_rq) (struct st_hba *);
+ int (*map_sg)(struct st_hba *, struct req_msg *, struct st_ccb *);
+ void (*send) (struct st_hba *, struct req_msg *, u16);
+ unsigned int max_id;
+ unsigned int max_lun;
+ unsigned int max_channel;
+ u16 rq_count;
+ u16 rq_size;
+ u16 sts_count;
+};
+
+static int msi;
+module_param(msi, int, 0);
+MODULE_PARM_DESC(msi, "Enable Message Signaled Interrupts(0=off, 1=on)");
+
+static const char console_inq_page[] =
+{
+ 0x03,0x00,0x03,0x03,0xFA,0x00,0x00,0x30,
+ 0x50,0x72,0x6F,0x6D,0x69,0x73,0x65,0x20, /* "Promise " */
+ 0x52,0x41,0x49,0x44,0x20,0x43,0x6F,0x6E, /* "RAID Con" */
+ 0x73,0x6F,0x6C,0x65,0x20,0x20,0x20,0x20, /* "sole " */
+ 0x31,0x2E,0x30,0x30,0x20,0x20,0x20,0x20, /* "1.00 " */
+ 0x53,0x58,0x2F,0x52,0x53,0x41,0x46,0x2D, /* "SX/RSAF-" */
+ 0x54,0x45,0x31,0x2E,0x30,0x30,0x20,0x20, /* "TE1.00 " */
+ 0x0C,0x20,0x20,0x20,0x20,0x20,0x20,0x20
+};
+
+MODULE_AUTHOR("Ed Lin");
+MODULE_DESCRIPTION("Promise Technology SuperTrak EX Controllers");
+MODULE_LICENSE("GPL");
+MODULE_VERSION(ST_DRIVER_VERSION);
+
+static void stex_gettime(__le64 *time)
+{
+ struct timeval tv;
+
+ do_gettimeofday(&tv);
+ *time = cpu_to_le64(tv.tv_sec);
+}
+
+static struct status_msg *stex_get_status(struct st_hba *hba)
+{
+ struct status_msg *status = hba->status_buffer + hba->status_tail;
+
+ ++hba->status_tail;
+ hba->status_tail %= hba->sts_count+1;
+
+ return status;
+}
+
+static void stex_invalid_field(struct scsi_cmnd *cmd,
+ void (*done)(struct scsi_cmnd *))
+{
+ cmd->result = (DRIVER_SENSE << 24) | SAM_STAT_CHECK_CONDITION;
+
+ /* "Invalid field in cdb" */
+ scsi_build_sense_buffer(0, cmd->sense_buffer, ILLEGAL_REQUEST, 0x24,
+ 0x0);
+ done(cmd);
+}
+
+static struct req_msg *stex_alloc_req(struct st_hba *hba)
+{
+ struct req_msg *req = hba->dma_mem + hba->req_head * hba->rq_size;
+
+ ++hba->req_head;
+ hba->req_head %= hba->rq_count+1;
+
+ return req;
+}
+
+static struct req_msg *stex_ss_alloc_req(struct st_hba *hba)
+{
+ return (struct req_msg *)(hba->dma_mem +
+ hba->req_head * hba->rq_size + sizeof(struct st_msg_header));
+}
+
+static int stex_map_sg(struct st_hba *hba,
+ struct req_msg *req, struct st_ccb *ccb)
+{
+ struct scsi_cmnd *cmd;
+ struct scatterlist *sg;
+ struct st_sgtable *dst;
+ struct st_sgitem *table;
+ int i, nseg;
+
+ cmd = ccb->cmd;
+ nseg = scsi_dma_map(cmd);
+ BUG_ON(nseg < 0);
+ if (nseg) {
+ dst = (struct st_sgtable *)req->variable;
+
+ ccb->sg_count = nseg;
+ dst->sg_count = cpu_to_le16((u16)nseg);
+ dst->max_sg_count = cpu_to_le16(hba->host->sg_tablesize);
+ dst->sz_in_byte = cpu_to_le32(scsi_bufflen(cmd));
+
+ table = (struct st_sgitem *)(dst + 1);
+ scsi_for_each_sg(cmd, sg, nseg, i) {
+ table[i].count = cpu_to_le32((u32)sg_dma_len(sg));
+ table[i].addr = cpu_to_le64(sg_dma_address(sg));
+ table[i].ctrl = SG_CF_64B | SG_CF_HOST;
+ }
+ table[--i].ctrl |= SG_CF_EOT;
+ }
+
+ return nseg;
+}
+
+static int stex_ss_map_sg(struct st_hba *hba,
+ struct req_msg *req, struct st_ccb *ccb)
+{
+ struct scsi_cmnd *cmd;
+ struct scatterlist *sg;
+ struct st_sgtable *dst;
+ struct st_ss_sgitem *table;
+ int i, nseg;
+
+ cmd = ccb->cmd;
+ nseg = scsi_dma_map(cmd);
+ BUG_ON(nseg < 0);
+ if (nseg) {
+ dst = (struct st_sgtable *)req->variable;
+
+ ccb->sg_count = nseg;
+ dst->sg_count = cpu_to_le16((u16)nseg);
+ dst->max_sg_count = cpu_to_le16(hba->host->sg_tablesize);
+ dst->sz_in_byte = cpu_to_le32(scsi_bufflen(cmd));
+
+ table = (struct st_ss_sgitem *)(dst + 1);
+ scsi_for_each_sg(cmd, sg, nseg, i) {
+ table[i].count = cpu_to_le32((u32)sg_dma_len(sg));
+ table[i].addr =
+ cpu_to_le32(sg_dma_address(sg) & 0xffffffff);
+ table[i].addr_hi =
+ cpu_to_le32((sg_dma_address(sg) >> 16) >> 16);
+ }
+ }
+
+ return nseg;
+}
+
+static void stex_controller_info(struct st_hba *hba, struct st_ccb *ccb)
+{
+ struct st_frame *p;
+ size_t count = sizeof(struct st_frame);
+
+ p = hba->copy_buffer;
+ scsi_sg_copy_to_buffer(ccb->cmd, p, count);
+ memset(p->base, 0, sizeof(u32)*6);
+ *(unsigned long *)(p->base) = pci_resource_start(hba->pdev, 0);
+ p->rom_addr = 0;
+
+ p->drv_ver.major = ST_VER_MAJOR;
+ p->drv_ver.minor = ST_VER_MINOR;
+ p->drv_ver.oem = ST_OEM;
+ p->drv_ver.build = ST_BUILD_VER;
+
+ p->bus = hba->pdev->bus->number;
+ p->slot = hba->pdev->devfn;
+ p->irq_level = 0;
+ p->irq_vec = hba->pdev->irq;
+ p->id = hba->pdev->vendor << 16 | hba->pdev->device;
+ p->subid =
+ hba->pdev->subsystem_vendor << 16 | hba->pdev->subsystem_device;
+
+ scsi_sg_copy_from_buffer(ccb->cmd, p, count);
+}
+
+static void
+stex_send_cmd(struct st_hba *hba, struct req_msg *req, u16 tag)
+{
+ req->tag = cpu_to_le16(tag);
+
+ hba->ccb[tag].req = req;
+ hba->out_req_cnt++;
+
+ writel(hba->req_head, hba->mmio_base + IMR0);
+ writel(MU_INBOUND_DOORBELL_REQHEADCHANGED, hba->mmio_base + IDBL);
+ readl(hba->mmio_base + IDBL); /* flush */
+}
+
+static void
+stex_ss_send_cmd(struct st_hba *hba, struct req_msg *req, u16 tag)
+{
+ struct scsi_cmnd *cmd;
+ struct st_msg_header *msg_h;
+ dma_addr_t addr;
+
+ req->tag = cpu_to_le16(tag);
+
+ hba->ccb[tag].req = req;
+ hba->out_req_cnt++;
+
+ cmd = hba->ccb[tag].cmd;
+ msg_h = (struct st_msg_header *)req - 1;
+ if (likely(cmd)) {
+ msg_h->channel = (u8)cmd->device->channel;
+ msg_h->timeout = cpu_to_le16(cmd->request->timeout/HZ);
+ }
+ addr = hba->dma_handle + hba->req_head * hba->rq_size;
+ addr += (hba->ccb[tag].sg_count+4)/11;
+ msg_h->handle = cpu_to_le64(addr);
+
+ ++hba->req_head;
+ hba->req_head %= hba->rq_count+1;
+
+ writel((addr >> 16) >> 16, hba->mmio_base + YH2I_REQ_HI);
+ readl(hba->mmio_base + YH2I_REQ_HI); /* flush */
+ writel(addr, hba->mmio_base + YH2I_REQ);
+ readl(hba->mmio_base + YH2I_REQ); /* flush */
+}
+
+static int
+stex_slave_config(struct scsi_device *sdev)
+{
+ sdev->use_10_for_rw = 1;
+ sdev->use_10_for_ms = 1;
+ blk_queue_rq_timeout(sdev->request_queue, 60 * HZ);
+
+ return 0;
+}
+
+static int
+stex_queuecommand_lck(struct scsi_cmnd *cmd, void (*done)(struct scsi_cmnd *))
+{
+ struct st_hba *hba;
+ struct Scsi_Host *host;
+ unsigned int id, lun;
+ struct req_msg *req;
+ u16 tag;
+
+ host = cmd->device->host;
+ id = cmd->device->id;
+ lun = cmd->device->lun;
+ hba = (struct st_hba *) &host->hostdata[0];
+
+ if (unlikely(hba->mu_status == MU_STATE_RESETTING))
+ return SCSI_MLQUEUE_HOST_BUSY;
+
+ switch (cmd->cmnd[0]) {
+ case MODE_SENSE_10:
+ {
+ static char ms10_caching_page[12] =
+ { 0, 0x12, 0, 0, 0, 0, 0, 0, 0x8, 0xa, 0x4, 0 };
+ unsigned char page;
+
+ page = cmd->cmnd[2] & 0x3f;
+ if (page == 0x8 || page == 0x3f) {
+ scsi_sg_copy_from_buffer(cmd, ms10_caching_page,
+ sizeof(ms10_caching_page));
+ cmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8;
+ done(cmd);
+ } else
+ stex_invalid_field(cmd, done);
+ return 0;
+ }
+ case REPORT_LUNS:
+ /*
+ * The shasta firmware does not report actual luns in the
+ * target, so fail the command to force sequential lun scan.
+ * Also, the console device does not support this command.
+ */
+ if (hba->cardtype == st_shasta || id == host->max_id - 1) {
+ stex_invalid_field(cmd, done);
+ return 0;
+ }
+ break;
+ case TEST_UNIT_READY:
+ if (id == host->max_id - 1) {
+ cmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8;
+ done(cmd);
+ return 0;
+ }
+ break;
+ case INQUIRY:
+ if (lun >= host->max_lun) {
+ cmd->result = DID_NO_CONNECT << 16;
+ done(cmd);
+ return 0;
+ }
+ if (id != host->max_id - 1)
+ break;
+ if (!lun && !cmd->device->channel &&
+ (cmd->cmnd[1] & INQUIRY_EVPD) == 0) {
+ scsi_sg_copy_from_buffer(cmd, (void *)console_inq_page,
+ sizeof(console_inq_page));
+ cmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8;
+ done(cmd);
+ } else
+ stex_invalid_field(cmd, done);
+ return 0;
+ case PASSTHRU_CMD:
+ if (cmd->cmnd[1] == PASSTHRU_GET_DRVVER) {
+ struct st_drvver ver;
+ size_t cp_len = sizeof(ver);
+
+ ver.major = ST_VER_MAJOR;
+ ver.minor = ST_VER_MINOR;
+ ver.oem = ST_OEM;
+ ver.build = ST_BUILD_VER;
+ ver.signature[0] = PASSTHRU_SIGNATURE;
+ ver.console_id = host->max_id - 1;
+ ver.host_no = hba->host->host_no;
+ cp_len = scsi_sg_copy_from_buffer(cmd, &ver, cp_len);
+ cmd->result = sizeof(ver) == cp_len ?
+ DID_OK << 16 | COMMAND_COMPLETE << 8 :
+ DID_ERROR << 16 | COMMAND_COMPLETE << 8;
+ done(cmd);
+ return 0;
+ }
+ default:
+ break;
+ }
+
+ cmd->scsi_done = done;
+
+ tag = cmd->request->tag;
+
+ if (unlikely(tag >= host->can_queue))
+ return SCSI_MLQUEUE_HOST_BUSY;
+
+ req = hba->alloc_rq(hba);
+
+ req->lun = lun;
+ req->target = id;
+
+ /* cdb */
+ memcpy(req->cdb, cmd->cmnd, STEX_CDB_LENGTH);
+
+ if (cmd->sc_data_direction == DMA_FROM_DEVICE)
+ req->data_dir = MSG_DATA_DIR_IN;
+ else if (cmd->sc_data_direction == DMA_TO_DEVICE)
+ req->data_dir = MSG_DATA_DIR_OUT;
+ else
+ req->data_dir = MSG_DATA_DIR_ND;
+
+ hba->ccb[tag].cmd = cmd;
+ hba->ccb[tag].sense_bufflen = SCSI_SENSE_BUFFERSIZE;
+ hba->ccb[tag].sense_buffer = cmd->sense_buffer;
+
+ if (!hba->map_sg(hba, req, &hba->ccb[tag])) {
+ hba->ccb[tag].sg_count = 0;
+ memset(&req->variable[0], 0, 8);
+ }
+
+ hba->send(hba, req, tag);
+ return 0;
+}
+
+static DEF_SCSI_QCMD(stex_queuecommand)
+
+static void stex_scsi_done(struct st_ccb *ccb)
+{
+ struct scsi_cmnd *cmd = ccb->cmd;
+ int result;
+
+ if (ccb->srb_status == SRB_STATUS_SUCCESS || ccb->srb_status == 0) {
+ result = ccb->scsi_status;
+ switch (ccb->scsi_status) {
+ case SAM_STAT_GOOD:
+ result |= DID_OK << 16 | COMMAND_COMPLETE << 8;
+ break;
+ case SAM_STAT_CHECK_CONDITION:
+ result |= DRIVER_SENSE << 24;
+ break;
+ case SAM_STAT_BUSY:
+ result |= DID_BUS_BUSY << 16 | COMMAND_COMPLETE << 8;
+ break;
+ default:
+ result |= DID_ERROR << 16 | COMMAND_COMPLETE << 8;
+ break;
+ }
+ }
+ else if (ccb->srb_status & SRB_SEE_SENSE)
+ result = DRIVER_SENSE << 24 | SAM_STAT_CHECK_CONDITION;
+ else switch (ccb->srb_status) {
+ case SRB_STATUS_SELECTION_TIMEOUT:
+ result = DID_NO_CONNECT << 16 | COMMAND_COMPLETE << 8;
+ break;
+ case SRB_STATUS_BUSY:
+ result = DID_BUS_BUSY << 16 | COMMAND_COMPLETE << 8;
+ break;
+ case SRB_STATUS_INVALID_REQUEST:
+ case SRB_STATUS_ERROR:
+ default:
+ result = DID_ERROR << 16 | COMMAND_COMPLETE << 8;
+ break;
+ }
+
+ cmd->result = result;
+ cmd->scsi_done(cmd);
+}
+
+static void stex_copy_data(struct st_ccb *ccb,
+ struct status_msg *resp, unsigned int variable)
+{
+ if (resp->scsi_status != SAM_STAT_GOOD) {
+ if (ccb->sense_buffer != NULL)
+ memcpy(ccb->sense_buffer, resp->variable,
+ min(variable, ccb->sense_bufflen));
+ return;
+ }
+
+ if (ccb->cmd == NULL)
+ return;
+ scsi_sg_copy_from_buffer(ccb->cmd, resp->variable, variable);
+}
+
+static void stex_check_cmd(struct st_hba *hba,
+ struct st_ccb *ccb, struct status_msg *resp)
+{
+ if (ccb->cmd->cmnd[0] == MGT_CMD &&
+ resp->scsi_status != SAM_STAT_CHECK_CONDITION)
+ scsi_set_resid(ccb->cmd, scsi_bufflen(ccb->cmd) -
+ le32_to_cpu(*(__le32 *)&resp->variable[0]));
+}
+
+static void stex_mu_intr(struct st_hba *hba, u32 doorbell)
+{
+ void __iomem *base = hba->mmio_base;
+ struct status_msg *resp;
+ struct st_ccb *ccb;
+ unsigned int size;
+ u16 tag;
+
+ if (unlikely(!(doorbell & MU_OUTBOUND_DOORBELL_STATUSHEADCHANGED)))
+ return;
+
+ /* status payloads */
+ hba->status_head = readl(base + OMR1);
+ if (unlikely(hba->status_head > hba->sts_count)) {
+ printk(KERN_WARNING DRV_NAME "(%s): invalid status head\n",
+ pci_name(hba->pdev));
+ return;
+ }
+
+ /*
+ * it's not a valid status payload if:
+ * 1. there are no pending requests(e.g. during init stage)
+ * 2. there are some pending requests, but the controller is in
+ * reset status, and its type is not st_yosemite
+ * firmware of st_yosemite in reset status will return pending requests
+ * to driver, so we allow it to pass
+ */
+ if (unlikely(hba->out_req_cnt <= 0 ||
+ (hba->mu_status == MU_STATE_RESETTING &&
+ hba->cardtype != st_yosemite))) {
+ hba->status_tail = hba->status_head;
+ goto update_status;
+ }
+
+ while (hba->status_tail != hba->status_head) {
+ resp = stex_get_status(hba);
+ tag = le16_to_cpu(resp->tag);
+ if (unlikely(tag >= hba->host->can_queue)) {
+ printk(KERN_WARNING DRV_NAME
+ "(%s): invalid tag\n", pci_name(hba->pdev));
+ continue;
+ }
+
+ hba->out_req_cnt--;
+ ccb = &hba->ccb[tag];
+ if (unlikely(hba->wait_ccb == ccb))
+ hba->wait_ccb = NULL;
+ if (unlikely(ccb->req == NULL)) {
+ printk(KERN_WARNING DRV_NAME
+ "(%s): lagging req\n", pci_name(hba->pdev));
+ continue;
+ }
+
+ size = resp->payload_sz * sizeof(u32); /* payload size */
+ if (unlikely(size < sizeof(*resp) - STATUS_VAR_LEN ||
+ size > sizeof(*resp))) {
+ printk(KERN_WARNING DRV_NAME "(%s): bad status size\n",
+ pci_name(hba->pdev));
+ } else {
+ size -= sizeof(*resp) - STATUS_VAR_LEN; /* copy size */
+ if (size)
+ stex_copy_data(ccb, resp, size);
+ }
+
+ ccb->req = NULL;
+ ccb->srb_status = resp->srb_status;
+ ccb->scsi_status = resp->scsi_status;
+
+ if (likely(ccb->cmd != NULL)) {
+ if (hba->cardtype == st_yosemite)
+ stex_check_cmd(hba, ccb, resp);
+
+ if (unlikely(ccb->cmd->cmnd[0] == PASSTHRU_CMD &&
+ ccb->cmd->cmnd[1] == PASSTHRU_GET_ADAPTER))
+ stex_controller_info(hba, ccb);
+
+ scsi_dma_unmap(ccb->cmd);
+ stex_scsi_done(ccb);
+ } else
+ ccb->req_type = 0;
+ }
+
+update_status:
+ writel(hba->status_head, base + IMR1);
+ readl(base + IMR1); /* flush */
+}
+
+static irqreturn_t stex_intr(int irq, void *__hba)
+{
+ struct st_hba *hba = __hba;
+ void __iomem *base = hba->mmio_base;
+ u32 data;
+ unsigned long flags;
+
+ spin_lock_irqsave(hba->host->host_lock, flags);
+
+ data = readl(base + ODBL);
+
+ if (data && data != 0xffffffff) {
+ /* clear the interrupt */
+ writel(data, base + ODBL);
+ readl(base + ODBL); /* flush */
+ stex_mu_intr(hba, data);
+ spin_unlock_irqrestore(hba->host->host_lock, flags);
+ if (unlikely(data & MU_OUTBOUND_DOORBELL_REQUEST_RESET &&
+ hba->cardtype == st_shasta))
+ queue_work(hba->work_q, &hba->reset_work);
+ return IRQ_HANDLED;
+ }
+
+ spin_unlock_irqrestore(hba->host->host_lock, flags);
+
+ return IRQ_NONE;
+}
+
+static void stex_ss_mu_intr(struct st_hba *hba)
+{
+ struct status_msg *resp;
+ struct st_ccb *ccb;
+ __le32 *scratch;
+ unsigned int size;
+ int count = 0;
+ u32 value;
+ u16 tag;
+
+ if (unlikely(hba->out_req_cnt <= 0 ||
+ hba->mu_status == MU_STATE_RESETTING))
+ return;
+
+ while (count < hba->sts_count) {
+ scratch = hba->scratch + hba->status_tail;
+ value = le32_to_cpu(*scratch);
+ if (unlikely(!(value & SS_STS_NORMAL)))
+ return;
+
+ resp = hba->status_buffer + hba->status_tail;
+ *scratch = 0;
+ ++count;
+ ++hba->status_tail;
+ hba->status_tail %= hba->sts_count+1;
+
+ tag = (u16)value;
+ if (unlikely(tag >= hba->host->can_queue)) {
+ printk(KERN_WARNING DRV_NAME
+ "(%s): invalid tag\n", pci_name(hba->pdev));
+ continue;
+ }
+
+ hba->out_req_cnt--;
+ ccb = &hba->ccb[tag];
+ if (unlikely(hba->wait_ccb == ccb))
+ hba->wait_ccb = NULL;
+ if (unlikely(ccb->req == NULL)) {
+ printk(KERN_WARNING DRV_NAME
+ "(%s): lagging req\n", pci_name(hba->pdev));
+ continue;
+ }
+
+ ccb->req = NULL;
+ if (likely(value & SS_STS_DONE)) { /* normal case */
+ ccb->srb_status = SRB_STATUS_SUCCESS;
+ ccb->scsi_status = SAM_STAT_GOOD;
+ } else {
+ ccb->srb_status = resp->srb_status;
+ ccb->scsi_status = resp->scsi_status;
+ size = resp->payload_sz * sizeof(u32);
+ if (unlikely(size < sizeof(*resp) - STATUS_VAR_LEN ||
+ size > sizeof(*resp))) {
+ printk(KERN_WARNING DRV_NAME
+ "(%s): bad status size\n",
+ pci_name(hba->pdev));
+ } else {
+ size -= sizeof(*resp) - STATUS_VAR_LEN;
+ if (size)
+ stex_copy_data(ccb, resp, size);
+ }
+ if (likely(ccb->cmd != NULL))
+ stex_check_cmd(hba, ccb, resp);
+ }
+
+ if (likely(ccb->cmd != NULL)) {
+ scsi_dma_unmap(ccb->cmd);
+ stex_scsi_done(ccb);
+ } else
+ ccb->req_type = 0;
+ }
+}
+
+static irqreturn_t stex_ss_intr(int irq, void *__hba)
+{
+ struct st_hba *hba = __hba;
+ void __iomem *base = hba->mmio_base;
+ u32 data;
+ unsigned long flags;
+
+ spin_lock_irqsave(hba->host->host_lock, flags);
+
+ data = readl(base + YI2H_INT);
+ if (data && data != 0xffffffff) {
+ /* clear the interrupt */
+ writel(data, base + YI2H_INT_C);
+ stex_ss_mu_intr(hba);
+ spin_unlock_irqrestore(hba->host->host_lock, flags);
+ if (unlikely(data & SS_I2H_REQUEST_RESET))
+ queue_work(hba->work_q, &hba->reset_work);
+ return IRQ_HANDLED;
+ }
+
+ spin_unlock_irqrestore(hba->host->host_lock, flags);
+
+ return IRQ_NONE;
+}
+
+static int stex_common_handshake(struct st_hba *hba)
+{
+ void __iomem *base = hba->mmio_base;
+ struct handshake_frame *h;
+ dma_addr_t status_phys;
+ u32 data;
+ unsigned long before;
+
+ if (readl(base + OMR0) != MU_HANDSHAKE_SIGNATURE) {
+ writel(MU_INBOUND_DOORBELL_HANDSHAKE, base + IDBL);
+ readl(base + IDBL);
+ before = jiffies;
+ while (readl(base + OMR0) != MU_HANDSHAKE_SIGNATURE) {
+ if (time_after(jiffies, before + MU_MAX_DELAY * HZ)) {
+ printk(KERN_ERR DRV_NAME
+ "(%s): no handshake signature\n",
+ pci_name(hba->pdev));
+ return -1;
+ }
+ rmb();
+ msleep(1);
+ }
+ }
+
+ udelay(10);
+
+ data = readl(base + OMR1);
+ if ((data & 0xffff0000) == MU_HANDSHAKE_SIGNATURE_HALF) {
+ data &= 0x0000ffff;
+ if (hba->host->can_queue > data) {
+ hba->host->can_queue = data;
+ hba->host->cmd_per_lun = data;
+ }
+ }
+
+ h = (struct handshake_frame *)hba->status_buffer;
+ h->rb_phy = cpu_to_le64(hba->dma_handle);
+ h->req_sz = cpu_to_le16(hba->rq_size);
+ h->req_cnt = cpu_to_le16(hba->rq_count+1);
+ h->status_sz = cpu_to_le16(sizeof(struct status_msg));
+ h->status_cnt = cpu_to_le16(hba->sts_count+1);
+ stex_gettime(&h->hosttime);
+ h->partner_type = HMU_PARTNER_TYPE;
+ if (hba->extra_offset) {
+ h->extra_offset = cpu_to_le32(hba->extra_offset);
+ h->extra_size = cpu_to_le32(hba->dma_size - hba->extra_offset);
+ } else
+ h->extra_offset = h->extra_size = 0;
+
+ status_phys = hba->dma_handle + (hba->rq_count+1) * hba->rq_size;
+ writel(status_phys, base + IMR0);
+ readl(base + IMR0);
+ writel((status_phys >> 16) >> 16, base + IMR1);
+ readl(base + IMR1);
+
+ writel((status_phys >> 16) >> 16, base + OMR0); /* old fw compatible */
+ readl(base + OMR0);
+ writel(MU_INBOUND_DOORBELL_HANDSHAKE, base + IDBL);
+ readl(base + IDBL); /* flush */
+
+ udelay(10);
+ before = jiffies;
+ while (readl(base + OMR0) != MU_HANDSHAKE_SIGNATURE) {
+ if (time_after(jiffies, before + MU_MAX_DELAY * HZ)) {
+ printk(KERN_ERR DRV_NAME
+ "(%s): no signature after handshake frame\n",
+ pci_name(hba->pdev));
+ return -1;
+ }
+ rmb();
+ msleep(1);
+ }
+
+ writel(0, base + IMR0);
+ readl(base + IMR0);
+ writel(0, base + OMR0);
+ readl(base + OMR0);
+ writel(0, base + IMR1);
+ readl(base + IMR1);
+ writel(0, base + OMR1);
+ readl(base + OMR1); /* flush */
+ return 0;
+}
+
+static int stex_ss_handshake(struct st_hba *hba)
+{
+ void __iomem *base = hba->mmio_base;
+ struct st_msg_header *msg_h;
+ struct handshake_frame *h;
+ __le32 *scratch;
+ u32 data, scratch_size;
+ unsigned long before;
+ int ret = 0;
+
+ before = jiffies;
+ while ((readl(base + YIOA_STATUS) & SS_MU_OPERATIONAL) == 0) {
+ if (time_after(jiffies, before + MU_MAX_DELAY * HZ)) {
+ printk(KERN_ERR DRV_NAME
+ "(%s): firmware not operational\n",
+ pci_name(hba->pdev));
+ return -1;
+ }
+ msleep(1);
+ }
+
+ msg_h = (struct st_msg_header *)hba->dma_mem;
+ msg_h->handle = cpu_to_le64(hba->dma_handle);
+ msg_h->flag = SS_HEAD_HANDSHAKE;
+
+ h = (struct handshake_frame *)(msg_h + 1);
+ h->rb_phy = cpu_to_le64(hba->dma_handle);
+ h->req_sz = cpu_to_le16(hba->rq_size);
+ h->req_cnt = cpu_to_le16(hba->rq_count+1);
+ h->status_sz = cpu_to_le16(sizeof(struct status_msg));
+ h->status_cnt = cpu_to_le16(hba->sts_count+1);
+ stex_gettime(&h->hosttime);
+ h->partner_type = HMU_PARTNER_TYPE;
+ h->extra_offset = h->extra_size = 0;
+ scratch_size = (hba->sts_count+1)*sizeof(u32);
+ h->scratch_size = cpu_to_le32(scratch_size);
+
+ data = readl(base + YINT_EN);
+ data &= ~4;
+ writel(data, base + YINT_EN);
+ writel((hba->dma_handle >> 16) >> 16, base + YH2I_REQ_HI);
+ readl(base + YH2I_REQ_HI);
+ writel(hba->dma_handle, base + YH2I_REQ);
+ readl(base + YH2I_REQ); /* flush */
+
+ scratch = hba->scratch;
+ before = jiffies;
+ while (!(le32_to_cpu(*scratch) & SS_STS_HANDSHAKE)) {
+ if (time_after(jiffies, before + MU_MAX_DELAY * HZ)) {
+ printk(KERN_ERR DRV_NAME
+ "(%s): no signature after handshake frame\n",
+ pci_name(hba->pdev));
+ ret = -1;
+ break;
+ }
+ rmb();
+ msleep(1);
+ }
+
+ memset(scratch, 0, scratch_size);
+ msg_h->flag = 0;
+ return ret;
+}
+
+static int stex_handshake(struct st_hba *hba)
+{
+ int err;
+ unsigned long flags;
+ unsigned int mu_status;
+
+ err = (hba->cardtype == st_yel) ?
+ stex_ss_handshake(hba) : stex_common_handshake(hba);
+ spin_lock_irqsave(hba->host->host_lock, flags);
+ mu_status = hba->mu_status;
+ if (err == 0) {
+ hba->req_head = 0;
+ hba->req_tail = 0;
+ hba->status_head = 0;
+ hba->status_tail = 0;
+ hba->out_req_cnt = 0;
+ hba->mu_status = MU_STATE_STARTED;
+ } else
+ hba->mu_status = MU_STATE_FAILED;
+ if (mu_status == MU_STATE_RESETTING)
+ wake_up_all(&hba->reset_waitq);
+ spin_unlock_irqrestore(hba->host->host_lock, flags);
+ return err;
+}
+
+static int stex_abort(struct scsi_cmnd *cmd)
+{
+ struct Scsi_Host *host = cmd->device->host;
+ struct st_hba *hba = (struct st_hba *)host->hostdata;
+ u16 tag = cmd->request->tag;
+ void __iomem *base;
+ u32 data;
+ int result = SUCCESS;
+ unsigned long flags;
+
+ scmd_printk(KERN_INFO, cmd, "aborting command\n");
+
+ base = hba->mmio_base;
+ spin_lock_irqsave(host->host_lock, flags);
+ if (tag < host->can_queue &&
+ hba->ccb[tag].req && hba->ccb[tag].cmd == cmd)
+ hba->wait_ccb = &hba->ccb[tag];
+ else
+ goto out;
+
+ if (hba->cardtype == st_yel) {
+ data = readl(base + YI2H_INT);
+ if (data == 0 || data == 0xffffffff)
+ goto fail_out;
+
+ writel(data, base + YI2H_INT_C);
+ stex_ss_mu_intr(hba);
+ } else {
+ data = readl(base + ODBL);
+ if (data == 0 || data == 0xffffffff)
+ goto fail_out;
+
+ writel(data, base + ODBL);
+ readl(base + ODBL); /* flush */
+
+ stex_mu_intr(hba, data);
+ }
+ if (hba->wait_ccb == NULL) {
+ printk(KERN_WARNING DRV_NAME
+ "(%s): lost interrupt\n", pci_name(hba->pdev));
+ goto out;
+ }
+
+fail_out:
+ scsi_dma_unmap(cmd);
+ hba->wait_ccb->req = NULL; /* nullify the req's future return */
+ hba->wait_ccb = NULL;
+ result = FAILED;
+out:
+ spin_unlock_irqrestore(host->host_lock, flags);
+ return result;
+}
+
+static void stex_hard_reset(struct st_hba *hba)
+{
+ struct pci_bus *bus;
+ int i;
+ u16 pci_cmd;
+ u8 pci_bctl;
+
+ for (i = 0; i < 16; i++)
+ pci_read_config_dword(hba->pdev, i * 4,
+ &hba->pdev->saved_config_space[i]);
+
+ /* Reset secondary bus. Our controller(MU/ATU) is the only device on
+ secondary bus. Consult Intel 80331/3 developer's manual for detail */
+ bus = hba->pdev->bus;
+ pci_read_config_byte(bus->self, PCI_BRIDGE_CONTROL, &pci_bctl);
+ pci_bctl |= PCI_BRIDGE_CTL_BUS_RESET;
+ pci_write_config_byte(bus->self, PCI_BRIDGE_CONTROL, pci_bctl);
+
+ /*
+ * 1 ms may be enough for 8-port controllers. But 16-port controllers
+ * require more time to finish bus reset. Use 100 ms here for safety
+ */
+ msleep(100);
+ pci_bctl &= ~PCI_BRIDGE_CTL_BUS_RESET;
+ pci_write_config_byte(bus->self, PCI_BRIDGE_CONTROL, pci_bctl);
+
+ for (i = 0; i < MU_HARD_RESET_WAIT; i++) {
+ pci_read_config_word(hba->pdev, PCI_COMMAND, &pci_cmd);
+ if (pci_cmd != 0xffff && (pci_cmd & PCI_COMMAND_MASTER))
+ break;
+ msleep(1);
+ }
+
+ ssleep(5);
+ for (i = 0; i < 16; i++)
+ pci_write_config_dword(hba->pdev, i * 4,
+ hba->pdev->saved_config_space[i]);
+}
+
+static int stex_yos_reset(struct st_hba *hba)
+{
+ void __iomem *base;
+ unsigned long flags, before;
+ int ret = 0;
+
+ base = hba->mmio_base;
+ writel(MU_INBOUND_DOORBELL_RESET, base + IDBL);
+ readl(base + IDBL); /* flush */
+ before = jiffies;
+ while (hba->out_req_cnt > 0) {
+ if (time_after(jiffies, before + ST_INTERNAL_TIMEOUT * HZ)) {
+ printk(KERN_WARNING DRV_NAME
+ "(%s): reset timeout\n", pci_name(hba->pdev));
+ ret = -1;
+ break;
+ }
+ msleep(1);
+ }
+
+ spin_lock_irqsave(hba->host->host_lock, flags);
+ if (ret == -1)
+ hba->mu_status = MU_STATE_FAILED;
+ else
+ hba->mu_status = MU_STATE_STARTED;
+ wake_up_all(&hba->reset_waitq);
+ spin_unlock_irqrestore(hba->host->host_lock, flags);
+
+ return ret;
+}
+
+static void stex_ss_reset(struct st_hba *hba)
+{
+ writel(SS_H2I_INT_RESET, hba->mmio_base + YH2I_INT);
+ readl(hba->mmio_base + YH2I_INT);
+ ssleep(5);
+}
+
+static int stex_do_reset(struct st_hba *hba)
+{
+ struct st_ccb *ccb;
+ unsigned long flags;
+ unsigned int mu_status = MU_STATE_RESETTING;
+ u16 tag;
+
+ spin_lock_irqsave(hba->host->host_lock, flags);
+ if (hba->mu_status == MU_STATE_STARTING) {
+ spin_unlock_irqrestore(hba->host->host_lock, flags);
+ printk(KERN_INFO DRV_NAME "(%s): request reset during init\n",
+ pci_name(hba->pdev));
+ return 0;
+ }
+ while (hba->mu_status == MU_STATE_RESETTING) {
+ spin_unlock_irqrestore(hba->host->host_lock, flags);
+ wait_event_timeout(hba->reset_waitq,
+ hba->mu_status != MU_STATE_RESETTING,
+ MU_MAX_DELAY * HZ);
+ spin_lock_irqsave(hba->host->host_lock, flags);
+ mu_status = hba->mu_status;
+ }
+
+ if (mu_status != MU_STATE_RESETTING) {
+ spin_unlock_irqrestore(hba->host->host_lock, flags);
+ return (mu_status == MU_STATE_STARTED) ? 0 : -1;
+ }
+
+ hba->mu_status = MU_STATE_RESETTING;
+ spin_unlock_irqrestore(hba->host->host_lock, flags);
+
+ if (hba->cardtype == st_yosemite)
+ return stex_yos_reset(hba);
+
+ if (hba->cardtype == st_shasta)
+ stex_hard_reset(hba);
+ else if (hba->cardtype == st_yel)
+ stex_ss_reset(hba);
+
+ spin_lock_irqsave(hba->host->host_lock, flags);
+ for (tag = 0; tag < hba->host->can_queue; tag++) {
+ ccb = &hba->ccb[tag];
+ if (ccb->req == NULL)
+ continue;
+ ccb->req = NULL;
+ if (ccb->cmd) {
+ scsi_dma_unmap(ccb->cmd);
+ ccb->cmd->result = DID_RESET << 16;
+ ccb->cmd->scsi_done(ccb->cmd);
+ ccb->cmd = NULL;
+ }
+ }
+ spin_unlock_irqrestore(hba->host->host_lock, flags);
+
+ if (stex_handshake(hba) == 0)
+ return 0;
+
+ printk(KERN_WARNING DRV_NAME "(%s): resetting: handshake failed\n",
+ pci_name(hba->pdev));
+ return -1;
+}
+
+static int stex_reset(struct scsi_cmnd *cmd)
+{
+ struct st_hba *hba;
+
+ hba = (struct st_hba *) &cmd->device->host->hostdata[0];
+
+ shost_printk(KERN_INFO, cmd->device->host,
+ "resetting host\n");
+
+ return stex_do_reset(hba) ? FAILED : SUCCESS;
+}
+
+static void stex_reset_work(struct work_struct *work)
+{
+ struct st_hba *hba = container_of(work, struct st_hba, reset_work);
+
+ stex_do_reset(hba);
+}
+
+static int stex_biosparam(struct scsi_device *sdev,
+ struct block_device *bdev, sector_t capacity, int geom[])
+{
+ int heads = 255, sectors = 63;
+
+ if (capacity < 0x200000) {
+ heads = 64;
+ sectors = 32;
+ }
+
+ sector_div(capacity, heads * sectors);
+
+ geom[0] = heads;
+ geom[1] = sectors;
+ geom[2] = capacity;
+
+ return 0;
+}
+
+static struct scsi_host_template driver_template = {
+ .module = THIS_MODULE,
+ .name = DRV_NAME,
+ .proc_name = DRV_NAME,
+ .bios_param = stex_biosparam,
+ .queuecommand = stex_queuecommand,
+ .slave_configure = stex_slave_config,
+ .eh_abort_handler = stex_abort,
+ .eh_host_reset_handler = stex_reset,
+ .this_id = -1,
+ .use_blk_tags = 1,
+};
+
+static struct pci_device_id stex_pci_tbl[] = {
+ /* st_shasta */
+ { 0x105a, 0x8350, PCI_ANY_ID, PCI_ANY_ID, 0, 0,
+ st_shasta }, /* SuperTrak EX8350/8300/16350/16300 */
+ { 0x105a, 0xc350, PCI_ANY_ID, PCI_ANY_ID, 0, 0,
+ st_shasta }, /* SuperTrak EX12350 */
+ { 0x105a, 0x4302, PCI_ANY_ID, PCI_ANY_ID, 0, 0,
+ st_shasta }, /* SuperTrak EX4350 */
+ { 0x105a, 0xe350, PCI_ANY_ID, PCI_ANY_ID, 0, 0,
+ st_shasta }, /* SuperTrak EX24350 */
+
+ /* st_vsc */
+ { 0x105a, 0x7250, PCI_ANY_ID, PCI_ANY_ID, 0, 0, st_vsc },
+
+ /* st_yosemite */
+ { 0x105a, 0x8650, 0x105a, PCI_ANY_ID, 0, 0, st_yosemite },
+
+ /* st_seq */
+ { 0x105a, 0x3360, PCI_ANY_ID, PCI_ANY_ID, 0, 0, st_seq },
+
+ /* st_yel */
+ { 0x105a, 0x8650, 0x1033, PCI_ANY_ID, 0, 0, st_yel },
+ { 0x105a, 0x8760, PCI_ANY_ID, PCI_ANY_ID, 0, 0, st_yel },
+ { } /* terminate list */
+};
+
+static struct st_card_info stex_card_info[] = {
+ /* st_shasta */
+ {
+ .max_id = 17,
+ .max_lun = 8,
+ .max_channel = 0,
+ .rq_count = 32,
+ .rq_size = 1048,
+ .sts_count = 32,
+ .alloc_rq = stex_alloc_req,
+ .map_sg = stex_map_sg,
+ .send = stex_send_cmd,
+ },
+
+ /* st_vsc */
+ {
+ .max_id = 129,
+ .max_lun = 1,
+ .max_channel = 0,
+ .rq_count = 32,
+ .rq_size = 1048,
+ .sts_count = 32,
+ .alloc_rq = stex_alloc_req,
+ .map_sg = stex_map_sg,
+ .send = stex_send_cmd,
+ },
+
+ /* st_yosemite */
+ {
+ .max_id = 2,
+ .max_lun = 256,
+ .max_channel = 0,
+ .rq_count = 256,
+ .rq_size = 1048,
+ .sts_count = 256,
+ .alloc_rq = stex_alloc_req,
+ .map_sg = stex_map_sg,
+ .send = stex_send_cmd,
+ },
+
+ /* st_seq */
+ {
+ .max_id = 129,
+ .max_lun = 1,
+ .max_channel = 0,
+ .rq_count = 32,
+ .rq_size = 1048,
+ .sts_count = 32,
+ .alloc_rq = stex_alloc_req,
+ .map_sg = stex_map_sg,
+ .send = stex_send_cmd,
+ },
+
+ /* st_yel */
+ {
+ .max_id = 129,
+ .max_lun = 256,
+ .max_channel = 3,
+ .rq_count = 801,
+ .rq_size = 512,
+ .sts_count = 801,
+ .alloc_rq = stex_ss_alloc_req,
+ .map_sg = stex_ss_map_sg,
+ .send = stex_ss_send_cmd,
+ },
+};
+
+static int stex_set_dma_mask(struct pci_dev * pdev)
+{
+ int ret;
+
+ if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(64))
+ && !pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64)))
+ return 0;
+ ret = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
+ if (!ret)
+ ret = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
+ return ret;
+}
+
+static int stex_request_irq(struct st_hba *hba)
+{
+ struct pci_dev *pdev = hba->pdev;
+ int status;
+
+ if (msi) {
+ status = pci_enable_msi(pdev);
+ if (status != 0)
+ printk(KERN_ERR DRV_NAME
+ "(%s): error %d setting up MSI\n",
+ pci_name(pdev), status);
+ else
+ hba->msi_enabled = 1;
+ } else
+ hba->msi_enabled = 0;
+
+ status = request_irq(pdev->irq, hba->cardtype == st_yel ?
+ stex_ss_intr : stex_intr, IRQF_SHARED, DRV_NAME, hba);
+
+ if (status != 0) {
+ if (hba->msi_enabled)
+ pci_disable_msi(pdev);
+ }
+ return status;
+}
+
+static void stex_free_irq(struct st_hba *hba)
+{
+ struct pci_dev *pdev = hba->pdev;
+
+ free_irq(pdev->irq, hba);
+ if (hba->msi_enabled)
+ pci_disable_msi(pdev);
+}
+
+static int stex_probe(struct pci_dev *pdev, const struct pci_device_id *id)
+{
+ struct st_hba *hba;
+ struct Scsi_Host *host;
+ const struct st_card_info *ci = NULL;
+ u32 sts_offset, cp_offset, scratch_offset;
+ int err;
+
+ err = pci_enable_device(pdev);
+ if (err)
+ return err;
+
+ pci_set_master(pdev);
+
+ host = scsi_host_alloc(&driver_template, sizeof(struct st_hba));
+
+ if (!host) {
+ printk(KERN_ERR DRV_NAME "(%s): scsi_host_alloc failed\n",
+ pci_name(pdev));
+ err = -ENOMEM;
+ goto out_disable;
+ }
+
+ hba = (struct st_hba *)host->hostdata;
+ memset(hba, 0, sizeof(struct st_hba));
+
+ err = pci_request_regions(pdev, DRV_NAME);
+ if (err < 0) {
+ printk(KERN_ERR DRV_NAME "(%s): request regions failed\n",
+ pci_name(pdev));
+ goto out_scsi_host_put;
+ }
+
+ hba->mmio_base = pci_ioremap_bar(pdev, 0);
+ if ( !hba->mmio_base) {
+ printk(KERN_ERR DRV_NAME "(%s): memory map failed\n",
+ pci_name(pdev));
+ err = -ENOMEM;
+ goto out_release_regions;
+ }
+
+ err = stex_set_dma_mask(pdev);
+ if (err) {
+ printk(KERN_ERR DRV_NAME "(%s): set dma mask failed\n",
+ pci_name(pdev));
+ goto out_iounmap;
+ }
+
+ hba->cardtype = (unsigned int) id->driver_data;
+ ci = &stex_card_info[hba->cardtype];
+ sts_offset = scratch_offset = (ci->rq_count+1) * ci->rq_size;
+ if (hba->cardtype == st_yel)
+ sts_offset += (ci->sts_count+1) * sizeof(u32);
+ cp_offset = sts_offset + (ci->sts_count+1) * sizeof(struct status_msg);
+ hba->dma_size = cp_offset + sizeof(struct st_frame);
+ if (hba->cardtype == st_seq ||
+ (hba->cardtype == st_vsc && (pdev->subsystem_device & 1))) {
+ hba->extra_offset = hba->dma_size;
+ hba->dma_size += ST_ADDITIONAL_MEM;
+ }
+ hba->dma_mem = dma_alloc_coherent(&pdev->dev,
+ hba->dma_size, &hba->dma_handle, GFP_KERNEL);
+ if (!hba->dma_mem) {
+ /* Retry minimum coherent mapping for st_seq and st_vsc */
+ if (hba->cardtype == st_seq ||
+ (hba->cardtype == st_vsc && (pdev->subsystem_device & 1))) {
+ printk(KERN_WARNING DRV_NAME
+ "(%s): allocating min buffer for controller\n",
+ pci_name(pdev));
+ hba->dma_size = hba->extra_offset
+ + ST_ADDITIONAL_MEM_MIN;
+ hba->dma_mem = dma_alloc_coherent(&pdev->dev,
+ hba->dma_size, &hba->dma_handle, GFP_KERNEL);
+ }
+
+ if (!hba->dma_mem) {
+ err = -ENOMEM;
+ printk(KERN_ERR DRV_NAME "(%s): dma mem alloc failed\n",
+ pci_name(pdev));
+ goto out_iounmap;
+ }
+ }
+
+ hba->ccb = kcalloc(ci->rq_count, sizeof(struct st_ccb), GFP_KERNEL);
+ if (!hba->ccb) {
+ err = -ENOMEM;
+ printk(KERN_ERR DRV_NAME "(%s): ccb alloc failed\n",
+ pci_name(pdev));
+ goto out_pci_free;
+ }
+
+ if (hba->cardtype == st_yel)
+ hba->scratch = (__le32 *)(hba->dma_mem + scratch_offset);
+ hba->status_buffer = (struct status_msg *)(hba->dma_mem + sts_offset);
+ hba->copy_buffer = hba->dma_mem + cp_offset;
+ hba->rq_count = ci->rq_count;
+ hba->rq_size = ci->rq_size;
+ hba->sts_count = ci->sts_count;
+ hba->alloc_rq = ci->alloc_rq;
+ hba->map_sg = ci->map_sg;
+ hba->send = ci->send;
+ hba->mu_status = MU_STATE_STARTING;
+
+ if (hba->cardtype == st_yel)
+ host->sg_tablesize = 38;
+ else
+ host->sg_tablesize = 32;
+ host->can_queue = ci->rq_count;
+ host->cmd_per_lun = ci->rq_count;
+ host->max_id = ci->max_id;
+ host->max_lun = ci->max_lun;
+ host->max_channel = ci->max_channel;
+ host->unique_id = host->host_no;
+ host->max_cmd_len = STEX_CDB_LENGTH;
+
+ hba->host = host;
+ hba->pdev = pdev;
+ init_waitqueue_head(&hba->reset_waitq);
+
+ snprintf(hba->work_q_name, sizeof(hba->work_q_name),
+ "stex_wq_%d", host->host_no);
+ hba->work_q = create_singlethread_workqueue(hba->work_q_name);
+ if (!hba->work_q) {
+ printk(KERN_ERR DRV_NAME "(%s): create workqueue failed\n",
+ pci_name(pdev));
+ err = -ENOMEM;
+ goto out_ccb_free;
+ }
+ INIT_WORK(&hba->reset_work, stex_reset_work);
+
+ err = stex_request_irq(hba);
+ if (err) {
+ printk(KERN_ERR DRV_NAME "(%s): request irq failed\n",
+ pci_name(pdev));
+ goto out_free_wq;
+ }
+
+ err = stex_handshake(hba);
+ if (err)
+ goto out_free_irq;
+
+ err = scsi_init_shared_tag_map(host, host->can_queue);
+ if (err) {
+ printk(KERN_ERR DRV_NAME "(%s): init shared queue failed\n",
+ pci_name(pdev));
+ goto out_free_irq;
+ }
+
+ pci_set_drvdata(pdev, hba);
+
+ err = scsi_add_host(host, &pdev->dev);
+ if (err) {
+ printk(KERN_ERR DRV_NAME "(%s): scsi_add_host failed\n",
+ pci_name(pdev));
+ goto out_free_irq;
+ }
+
+ scsi_scan_host(host);
+
+ return 0;
+
+out_free_irq:
+ stex_free_irq(hba);
+out_free_wq:
+ destroy_workqueue(hba->work_q);
+out_ccb_free:
+ kfree(hba->ccb);
+out_pci_free:
+ dma_free_coherent(&pdev->dev, hba->dma_size,
+ hba->dma_mem, hba->dma_handle);
+out_iounmap:
+ iounmap(hba->mmio_base);
+out_release_regions:
+ pci_release_regions(pdev);
+out_scsi_host_put:
+ scsi_host_put(host);
+out_disable:
+ pci_disable_device(pdev);
+
+ return err;
+}
+
+static void stex_hba_stop(struct st_hba *hba)
+{
+ struct req_msg *req;
+ struct st_msg_header *msg_h;
+ unsigned long flags;
+ unsigned long before;
+ u16 tag = 0;
+
+ spin_lock_irqsave(hba->host->host_lock, flags);
+ req = hba->alloc_rq(hba);
+ if (hba->cardtype == st_yel) {
+ msg_h = (struct st_msg_header *)req - 1;
+ memset(msg_h, 0, hba->rq_size);
+ } else
+ memset(req, 0, hba->rq_size);
+
+ if (hba->cardtype == st_yosemite || hba->cardtype == st_yel) {
+ req->cdb[0] = MGT_CMD;
+ req->cdb[1] = MGT_CMD_SIGNATURE;
+ req->cdb[2] = CTLR_CONFIG_CMD;
+ req->cdb[3] = CTLR_SHUTDOWN;
+ } else {
+ req->cdb[0] = CONTROLLER_CMD;
+ req->cdb[1] = CTLR_POWER_STATE_CHANGE;
+ req->cdb[2] = CTLR_POWER_SAVING;
+ }
+
+ hba->ccb[tag].cmd = NULL;
+ hba->ccb[tag].sg_count = 0;
+ hba->ccb[tag].sense_bufflen = 0;
+ hba->ccb[tag].sense_buffer = NULL;
+ hba->ccb[tag].req_type = PASSTHRU_REQ_TYPE;
+
+ hba->send(hba, req, tag);
+ spin_unlock_irqrestore(hba->host->host_lock, flags);
+
+ before = jiffies;
+ while (hba->ccb[tag].req_type & PASSTHRU_REQ_TYPE) {
+ if (time_after(jiffies, before + ST_INTERNAL_TIMEOUT * HZ)) {
+ hba->ccb[tag].req_type = 0;
+ return;
+ }
+ msleep(1);
+ }
+}
+
+static void stex_hba_free(struct st_hba *hba)
+{
+ stex_free_irq(hba);
+
+ destroy_workqueue(hba->work_q);
+
+ iounmap(hba->mmio_base);
+
+ pci_release_regions(hba->pdev);
+
+ kfree(hba->ccb);
+
+ dma_free_coherent(&hba->pdev->dev, hba->dma_size,
+ hba->dma_mem, hba->dma_handle);
+}
+
+static void stex_remove(struct pci_dev *pdev)
+{
+ struct st_hba *hba = pci_get_drvdata(pdev);
+
+ scsi_remove_host(hba->host);
+
+ stex_hba_stop(hba);
+
+ stex_hba_free(hba);
+
+ scsi_host_put(hba->host);
+
+ pci_disable_device(pdev);
+}
+
+static void stex_shutdown(struct pci_dev *pdev)
+{
+ struct st_hba *hba = pci_get_drvdata(pdev);
+
+ stex_hba_stop(hba);
+}
+
+MODULE_DEVICE_TABLE(pci, stex_pci_tbl);
+
+static struct pci_driver stex_pci_driver = {
+ .name = DRV_NAME,
+ .id_table = stex_pci_tbl,
+ .probe = stex_probe,
+ .remove = stex_remove,
+ .shutdown = stex_shutdown,
+};
+
+static int __init stex_init(void)
+{
+ printk(KERN_INFO DRV_NAME
+ ": Promise SuperTrak EX Driver version: %s\n",
+ ST_DRIVER_VERSION);
+
+ return pci_register_driver(&stex_pci_driver);
+}
+
+static void __exit stex_exit(void)
+{
+ pci_unregister_driver(&stex_pci_driver);
+}
+
+module_init(stex_init);
+module_exit(stex_exit);
diff --git a/drivers/scsi/storvsc_drv.c b/drivers/scsi/storvsc_drv.c
new file mode 100644
index 000000000..3c6584ff6
--- /dev/null
+++ b/drivers/scsi/storvsc_drv.c
@@ -0,0 +1,1935 @@
+/*
+ * Copyright (c) 2009, Microsoft Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
+ * Place - Suite 330, Boston, MA 02111-1307 USA.
+ *
+ * Authors:
+ * Haiyang Zhang <haiyangz@microsoft.com>
+ * Hank Janssen <hjanssen@microsoft.com>
+ * K. Y. Srinivasan <kys@microsoft.com>
+ */
+
+#include <linux/kernel.h>
+#include <linux/wait.h>
+#include <linux/sched.h>
+#include <linux/completion.h>
+#include <linux/string.h>
+#include <linux/mm.h>
+#include <linux/delay.h>
+#include <linux/init.h>
+#include <linux/slab.h>
+#include <linux/module.h>
+#include <linux/device.h>
+#include <linux/hyperv.h>
+#include <linux/blkdev.h>
+#include <scsi/scsi.h>
+#include <scsi/scsi_cmnd.h>
+#include <scsi/scsi_host.h>
+#include <scsi/scsi_device.h>
+#include <scsi/scsi_tcq.h>
+#include <scsi/scsi_eh.h>
+#include <scsi/scsi_devinfo.h>
+#include <scsi/scsi_dbg.h>
+
+/*
+ * All wire protocol details (storage protocol between the guest and the host)
+ * are consolidated here.
+ *
+ * Begin protocol definitions.
+ */
+
+/*
+ * Version history:
+ * V1 Beta: 0.1
+ * V1 RC < 2008/1/31: 1.0
+ * V1 RC > 2008/1/31: 2.0
+ * Win7: 4.2
+ * Win8: 5.1
+ */
+
+
+#define VMSTOR_WIN7_MAJOR 4
+#define VMSTOR_WIN7_MINOR 2
+
+#define VMSTOR_WIN8_MAJOR 5
+#define VMSTOR_WIN8_MINOR 1
+
+
+/* Packet structure describing virtual storage requests. */
+enum vstor_packet_operation {
+ VSTOR_OPERATION_COMPLETE_IO = 1,
+ VSTOR_OPERATION_REMOVE_DEVICE = 2,
+ VSTOR_OPERATION_EXECUTE_SRB = 3,
+ VSTOR_OPERATION_RESET_LUN = 4,
+ VSTOR_OPERATION_RESET_ADAPTER = 5,
+ VSTOR_OPERATION_RESET_BUS = 6,
+ VSTOR_OPERATION_BEGIN_INITIALIZATION = 7,
+ VSTOR_OPERATION_END_INITIALIZATION = 8,
+ VSTOR_OPERATION_QUERY_PROTOCOL_VERSION = 9,
+ VSTOR_OPERATION_QUERY_PROPERTIES = 10,
+ VSTOR_OPERATION_ENUMERATE_BUS = 11,
+ VSTOR_OPERATION_FCHBA_DATA = 12,
+ VSTOR_OPERATION_CREATE_SUB_CHANNELS = 13,
+ VSTOR_OPERATION_MAXIMUM = 13
+};
+
+/*
+ * WWN packet for Fibre Channel HBA
+ */
+
+struct hv_fc_wwn_packet {
+ bool primary_active;
+ u8 reserved1;
+ u8 reserved2;
+ u8 primary_port_wwn[8];
+ u8 primary_node_wwn[8];
+ u8 secondary_port_wwn[8];
+ u8 secondary_node_wwn[8];
+};
+
+
+
+/*
+ * SRB Flag Bits
+ */
+
+#define SRB_FLAGS_QUEUE_ACTION_ENABLE 0x00000002
+#define SRB_FLAGS_DISABLE_DISCONNECT 0x00000004
+#define SRB_FLAGS_DISABLE_SYNCH_TRANSFER 0x00000008
+#define SRB_FLAGS_BYPASS_FROZEN_QUEUE 0x00000010
+#define SRB_FLAGS_DISABLE_AUTOSENSE 0x00000020
+#define SRB_FLAGS_DATA_IN 0x00000040
+#define SRB_FLAGS_DATA_OUT 0x00000080
+#define SRB_FLAGS_NO_DATA_TRANSFER 0x00000000
+#define SRB_FLAGS_UNSPECIFIED_DIRECTION (SRB_FLAGS_DATA_IN | SRB_FLAGS_DATA_OUT)
+#define SRB_FLAGS_NO_QUEUE_FREEZE 0x00000100
+#define SRB_FLAGS_ADAPTER_CACHE_ENABLE 0x00000200
+#define SRB_FLAGS_FREE_SENSE_BUFFER 0x00000400
+
+/*
+ * This flag indicates the request is part of the workflow for processing a D3.
+ */
+#define SRB_FLAGS_D3_PROCESSING 0x00000800
+#define SRB_FLAGS_IS_ACTIVE 0x00010000
+#define SRB_FLAGS_ALLOCATED_FROM_ZONE 0x00020000
+#define SRB_FLAGS_SGLIST_FROM_POOL 0x00040000
+#define SRB_FLAGS_BYPASS_LOCKED_QUEUE 0x00080000
+#define SRB_FLAGS_NO_KEEP_AWAKE 0x00100000
+#define SRB_FLAGS_PORT_DRIVER_ALLOCSENSE 0x00200000
+#define SRB_FLAGS_PORT_DRIVER_SENSEHASPORT 0x00400000
+#define SRB_FLAGS_DONT_START_NEXT_PACKET 0x00800000
+#define SRB_FLAGS_PORT_DRIVER_RESERVED 0x0F000000
+#define SRB_FLAGS_CLASS_DRIVER_RESERVED 0xF0000000
+
+
+/*
+ * Platform neutral description of a scsi request -
+ * this remains the same across the write regardless of 32/64 bit
+ * note: it's patterned off the SCSI_PASS_THROUGH structure
+ */
+#define STORVSC_MAX_CMD_LEN 0x10
+
+#define POST_WIN7_STORVSC_SENSE_BUFFER_SIZE 0x14
+#define PRE_WIN8_STORVSC_SENSE_BUFFER_SIZE 0x12
+
+#define STORVSC_SENSE_BUFFER_SIZE 0x14
+#define STORVSC_MAX_BUF_LEN_WITH_PADDING 0x14
+
+/*
+ * Sense buffer size changed in win8; have a run-time
+ * variable to track the size we should use.
+ */
+static int sense_buffer_size;
+
+/*
+ * The size of the vmscsi_request has changed in win8. The
+ * additional size is because of new elements added to the
+ * structure. These elements are valid only when we are talking
+ * to a win8 host.
+ * Track the correction to size we need to apply.
+ */
+
+static int vmscsi_size_delta;
+static int vmstor_current_major;
+static int vmstor_current_minor;
+
+struct vmscsi_win8_extension {
+ /*
+ * The following were added in Windows 8
+ */
+ u16 reserve;
+ u8 queue_tag;
+ u8 queue_action;
+ u32 srb_flags;
+ u32 time_out_value;
+ u32 queue_sort_ey;
+} __packed;
+
+struct vmscsi_request {
+ u16 length;
+ u8 srb_status;
+ u8 scsi_status;
+
+ u8 port_number;
+ u8 path_id;
+ u8 target_id;
+ u8 lun;
+
+ u8 cdb_length;
+ u8 sense_info_length;
+ u8 data_in;
+ u8 reserved;
+
+ u32 data_transfer_length;
+
+ union {
+ u8 cdb[STORVSC_MAX_CMD_LEN];
+ u8 sense_data[STORVSC_SENSE_BUFFER_SIZE];
+ u8 reserved_array[STORVSC_MAX_BUF_LEN_WITH_PADDING];
+ };
+ /*
+ * The following was added in win8.
+ */
+ struct vmscsi_win8_extension win8_extension;
+
+} __attribute((packed));
+
+
+/*
+ * This structure is sent during the intialization phase to get the different
+ * properties of the channel.
+ */
+
+#define STORAGE_CHANNEL_SUPPORTS_MULTI_CHANNEL 0x1
+
+struct vmstorage_channel_properties {
+ u32 reserved;
+ u16 max_channel_cnt;
+ u16 reserved1;
+
+ u32 flags;
+ u32 max_transfer_bytes;
+
+ u64 reserved2;
+} __packed;
+
+/* This structure is sent during the storage protocol negotiations. */
+struct vmstorage_protocol_version {
+ /* Major (MSW) and minor (LSW) version numbers. */
+ u16 major_minor;
+
+ /*
+ * Revision number is auto-incremented whenever this file is changed
+ * (See FILL_VMSTOR_REVISION macro above). Mismatch does not
+ * definitely indicate incompatibility--but it does indicate mismatched
+ * builds.
+ * This is only used on the windows side. Just set it to 0.
+ */
+ u16 revision;
+} __packed;
+
+/* Channel Property Flags */
+#define STORAGE_CHANNEL_REMOVABLE_FLAG 0x1
+#define STORAGE_CHANNEL_EMULATED_IDE_FLAG 0x2
+
+struct vstor_packet {
+ /* Requested operation type */
+ enum vstor_packet_operation operation;
+
+ /* Flags - see below for values */
+ u32 flags;
+
+ /* Status of the request returned from the server side. */
+ u32 status;
+
+ /* Data payload area */
+ union {
+ /*
+ * Structure used to forward SCSI commands from the
+ * client to the server.
+ */
+ struct vmscsi_request vm_srb;
+
+ /* Structure used to query channel properties. */
+ struct vmstorage_channel_properties storage_channel_properties;
+
+ /* Used during version negotiations. */
+ struct vmstorage_protocol_version version;
+
+ /* Fibre channel address packet */
+ struct hv_fc_wwn_packet wwn_packet;
+
+ /* Number of sub-channels to create */
+ u16 sub_channel_count;
+
+ /* This will be the maximum of the union members */
+ u8 buffer[0x34];
+ };
+} __packed;
+
+/*
+ * Packet Flags:
+ *
+ * This flag indicates that the server should send back a completion for this
+ * packet.
+ */
+
+#define REQUEST_COMPLETION_FLAG 0x1
+
+/* Matches Windows-end */
+enum storvsc_request_type {
+ WRITE_TYPE = 0,
+ READ_TYPE,
+ UNKNOWN_TYPE,
+};
+
+/*
+ * SRB status codes and masks; a subset of the codes used here.
+ */
+
+#define SRB_STATUS_AUTOSENSE_VALID 0x80
+#define SRB_STATUS_INVALID_LUN 0x20
+#define SRB_STATUS_SUCCESS 0x01
+#define SRB_STATUS_ABORTED 0x02
+#define SRB_STATUS_ERROR 0x04
+
+/*
+ * This is the end of Protocol specific defines.
+ */
+
+static int storvsc_ringbuffer_size = (256 * PAGE_SIZE);
+static u32 max_outstanding_req_per_channel;
+
+static int storvsc_vcpus_per_sub_channel = 4;
+
+module_param(storvsc_ringbuffer_size, int, S_IRUGO);
+MODULE_PARM_DESC(storvsc_ringbuffer_size, "Ring buffer size (bytes)");
+
+module_param(storvsc_vcpus_per_sub_channel, int, S_IRUGO);
+MODULE_PARM_DESC(vcpus_per_sub_channel, "Ratio of VCPUs to subchannels");
+/*
+ * Timeout in seconds for all devices managed by this driver.
+ */
+static int storvsc_timeout = 180;
+
+static int msft_blist_flags = BLIST_TRY_VPD_PAGES;
+
+
+static void storvsc_on_channel_callback(void *context);
+
+#define STORVSC_MAX_LUNS_PER_TARGET 255
+#define STORVSC_MAX_TARGETS 2
+#define STORVSC_MAX_CHANNELS 8
+
+#define STORVSC_FC_MAX_LUNS_PER_TARGET 255
+#define STORVSC_FC_MAX_TARGETS 128
+#define STORVSC_FC_MAX_CHANNELS 8
+
+#define STORVSC_IDE_MAX_LUNS_PER_TARGET 64
+#define STORVSC_IDE_MAX_TARGETS 1
+#define STORVSC_IDE_MAX_CHANNELS 1
+
+struct storvsc_cmd_request {
+ struct scsi_cmnd *cmd;
+
+ unsigned int bounce_sgl_count;
+ struct scatterlist *bounce_sgl;
+
+ struct hv_device *device;
+
+ /* Synchronize the request/response if needed */
+ struct completion wait_event;
+
+ struct vmbus_channel_packet_multipage_buffer mpb;
+ struct vmbus_packet_mpb_array *payload;
+ u32 payload_sz;
+
+ struct vstor_packet vstor_packet;
+};
+
+
+/* A storvsc device is a device object that contains a vmbus channel */
+struct storvsc_device {
+ struct hv_device *device;
+
+ bool destroy;
+ bool drain_notify;
+ bool open_sub_channel;
+ atomic_t num_outstanding_req;
+ struct Scsi_Host *host;
+
+ wait_queue_head_t waiting_to_drain;
+
+ /*
+ * Each unique Port/Path/Target represents 1 channel ie scsi
+ * controller. In reality, the pathid, targetid is always 0
+ * and the port is set by us
+ */
+ unsigned int port_number;
+ unsigned char path_id;
+ unsigned char target_id;
+
+ /*
+ * Max I/O, the device can support.
+ */
+ u32 max_transfer_bytes;
+ /* Used for vsc/vsp channel reset process */
+ struct storvsc_cmd_request init_request;
+ struct storvsc_cmd_request reset_request;
+};
+
+struct hv_host_device {
+ struct hv_device *dev;
+ unsigned int port;
+ unsigned char path;
+ unsigned char target;
+};
+
+struct storvsc_scan_work {
+ struct work_struct work;
+ struct Scsi_Host *host;
+ uint lun;
+};
+
+static void storvsc_device_scan(struct work_struct *work)
+{
+ struct storvsc_scan_work *wrk;
+ uint lun;
+ struct scsi_device *sdev;
+
+ wrk = container_of(work, struct storvsc_scan_work, work);
+ lun = wrk->lun;
+
+ sdev = scsi_device_lookup(wrk->host, 0, 0, lun);
+ if (!sdev)
+ goto done;
+ scsi_rescan_device(&sdev->sdev_gendev);
+ scsi_device_put(sdev);
+
+done:
+ kfree(wrk);
+}
+
+static void storvsc_host_scan(struct work_struct *work)
+{
+ struct storvsc_scan_work *wrk;
+ struct Scsi_Host *host;
+ struct scsi_device *sdev;
+ unsigned long flags;
+
+ wrk = container_of(work, struct storvsc_scan_work, work);
+ host = wrk->host;
+
+ /*
+ * Before scanning the host, first check to see if any of the
+ * currrently known devices have been hot removed. We issue a
+ * "unit ready" command against all currently known devices.
+ * This I/O will result in an error for devices that have been
+ * removed. As part of handling the I/O error, we remove the device.
+ *
+ * When a LUN is added or removed, the host sends us a signal to
+ * scan the host. Thus we are forced to discover the LUNs that
+ * may have been removed this way.
+ */
+ mutex_lock(&host->scan_mutex);
+ spin_lock_irqsave(host->host_lock, flags);
+ list_for_each_entry(sdev, &host->__devices, siblings) {
+ spin_unlock_irqrestore(host->host_lock, flags);
+ scsi_test_unit_ready(sdev, 1, 1, NULL);
+ spin_lock_irqsave(host->host_lock, flags);
+ continue;
+ }
+ spin_unlock_irqrestore(host->host_lock, flags);
+ mutex_unlock(&host->scan_mutex);
+ /*
+ * Now scan the host to discover LUNs that may have been added.
+ */
+ scsi_scan_host(host);
+
+ kfree(wrk);
+}
+
+static void storvsc_remove_lun(struct work_struct *work)
+{
+ struct storvsc_scan_work *wrk;
+ struct scsi_device *sdev;
+
+ wrk = container_of(work, struct storvsc_scan_work, work);
+ if (!scsi_host_get(wrk->host))
+ goto done;
+
+ sdev = scsi_device_lookup(wrk->host, 0, 0, wrk->lun);
+
+ if (sdev) {
+ scsi_remove_device(sdev);
+ scsi_device_put(sdev);
+ }
+ scsi_host_put(wrk->host);
+
+done:
+ kfree(wrk);
+}
+
+/*
+ * Major/minor macros. Minor version is in LSB, meaning that earlier flat
+ * version numbers will be interpreted as "0.x" (i.e., 1 becomes 0.1).
+ */
+
+static inline u16 storvsc_get_version(u8 major, u8 minor)
+{
+ u16 version;
+
+ version = ((major << 8) | minor);
+ return version;
+}
+
+/*
+ * We can get incoming messages from the host that are not in response to
+ * messages that we have sent out. An example of this would be messages
+ * received by the guest to notify dynamic addition/removal of LUNs. To
+ * deal with potential race conditions where the driver may be in the
+ * midst of being unloaded when we might receive an unsolicited message
+ * from the host, we have implemented a mechanism to gurantee sequential
+ * consistency:
+ *
+ * 1) Once the device is marked as being destroyed, we will fail all
+ * outgoing messages.
+ * 2) We permit incoming messages when the device is being destroyed,
+ * only to properly account for messages already sent out.
+ */
+
+static inline struct storvsc_device *get_out_stor_device(
+ struct hv_device *device)
+{
+ struct storvsc_device *stor_device;
+
+ stor_device = hv_get_drvdata(device);
+
+ if (stor_device && stor_device->destroy)
+ stor_device = NULL;
+
+ return stor_device;
+}
+
+
+static inline void storvsc_wait_to_drain(struct storvsc_device *dev)
+{
+ dev->drain_notify = true;
+ wait_event(dev->waiting_to_drain,
+ atomic_read(&dev->num_outstanding_req) == 0);
+ dev->drain_notify = false;
+}
+
+static inline struct storvsc_device *get_in_stor_device(
+ struct hv_device *device)
+{
+ struct storvsc_device *stor_device;
+
+ stor_device = hv_get_drvdata(device);
+
+ if (!stor_device)
+ goto get_in_err;
+
+ /*
+ * If the device is being destroyed; allow incoming
+ * traffic only to cleanup outstanding requests.
+ */
+
+ if (stor_device->destroy &&
+ (atomic_read(&stor_device->num_outstanding_req) == 0))
+ stor_device = NULL;
+
+get_in_err:
+ return stor_device;
+
+}
+
+static void destroy_bounce_buffer(struct scatterlist *sgl,
+ unsigned int sg_count)
+{
+ int i;
+ struct page *page_buf;
+
+ for (i = 0; i < sg_count; i++) {
+ page_buf = sg_page((&sgl[i]));
+ if (page_buf != NULL)
+ __free_page(page_buf);
+ }
+
+ kfree(sgl);
+}
+
+static int do_bounce_buffer(struct scatterlist *sgl, unsigned int sg_count)
+{
+ int i;
+
+ /* No need to check */
+ if (sg_count < 2)
+ return -1;
+
+ /* We have at least 2 sg entries */
+ for (i = 0; i < sg_count; i++) {
+ if (i == 0) {
+ /* make sure 1st one does not have hole */
+ if (sgl[i].offset + sgl[i].length != PAGE_SIZE)
+ return i;
+ } else if (i == sg_count - 1) {
+ /* make sure last one does not have hole */
+ if (sgl[i].offset != 0)
+ return i;
+ } else {
+ /* make sure no hole in the middle */
+ if (sgl[i].length != PAGE_SIZE || sgl[i].offset != 0)
+ return i;
+ }
+ }
+ return -1;
+}
+
+static struct scatterlist *create_bounce_buffer(struct scatterlist *sgl,
+ unsigned int sg_count,
+ unsigned int len,
+ int write)
+{
+ int i;
+ int num_pages;
+ struct scatterlist *bounce_sgl;
+ struct page *page_buf;
+ unsigned int buf_len = ((write == WRITE_TYPE) ? 0 : PAGE_SIZE);
+
+ num_pages = ALIGN(len, PAGE_SIZE) >> PAGE_SHIFT;
+
+ bounce_sgl = kcalloc(num_pages, sizeof(struct scatterlist), GFP_ATOMIC);
+ if (!bounce_sgl)
+ return NULL;
+
+ sg_init_table(bounce_sgl, num_pages);
+ for (i = 0; i < num_pages; i++) {
+ page_buf = alloc_page(GFP_ATOMIC);
+ if (!page_buf)
+ goto cleanup;
+ sg_set_page(&bounce_sgl[i], page_buf, buf_len, 0);
+ }
+
+ return bounce_sgl;
+
+cleanup:
+ destroy_bounce_buffer(bounce_sgl, num_pages);
+ return NULL;
+}
+
+/* Assume the original sgl has enough room */
+static unsigned int copy_from_bounce_buffer(struct scatterlist *orig_sgl,
+ struct scatterlist *bounce_sgl,
+ unsigned int orig_sgl_count,
+ unsigned int bounce_sgl_count)
+{
+ int i;
+ int j = 0;
+ unsigned long src, dest;
+ unsigned int srclen, destlen, copylen;
+ unsigned int total_copied = 0;
+ unsigned long bounce_addr = 0;
+ unsigned long dest_addr = 0;
+ unsigned long flags;
+ struct scatterlist *cur_dest_sgl;
+ struct scatterlist *cur_src_sgl;
+
+ local_irq_save(flags);
+ cur_dest_sgl = orig_sgl;
+ cur_src_sgl = bounce_sgl;
+ for (i = 0; i < orig_sgl_count; i++) {
+ dest_addr = (unsigned long)
+ kmap_atomic(sg_page(cur_dest_sgl)) +
+ cur_dest_sgl->offset;
+ dest = dest_addr;
+ destlen = cur_dest_sgl->length;
+
+ if (bounce_addr == 0)
+ bounce_addr = (unsigned long)kmap_atomic(
+ sg_page(cur_src_sgl));
+
+ while (destlen) {
+ src = bounce_addr + cur_src_sgl->offset;
+ srclen = cur_src_sgl->length - cur_src_sgl->offset;
+
+ copylen = min(srclen, destlen);
+ memcpy((void *)dest, (void *)src, copylen);
+
+ total_copied += copylen;
+ cur_src_sgl->offset += copylen;
+ destlen -= copylen;
+ dest += copylen;
+
+ if (cur_src_sgl->offset == cur_src_sgl->length) {
+ /* full */
+ kunmap_atomic((void *)bounce_addr);
+ j++;
+
+ /*
+ * It is possible that the number of elements
+ * in the bounce buffer may not be equal to
+ * the number of elements in the original
+ * scatter list. Handle this correctly.
+ */
+
+ if (j == bounce_sgl_count) {
+ /*
+ * We are done; cleanup and return.
+ */
+ kunmap_atomic((void *)(dest_addr -
+ cur_dest_sgl->offset));
+ local_irq_restore(flags);
+ return total_copied;
+ }
+
+ /* if we need to use another bounce buffer */
+ if (destlen || i != orig_sgl_count - 1) {
+ cur_src_sgl = sg_next(cur_src_sgl);
+ bounce_addr = (unsigned long)
+ kmap_atomic(
+ sg_page(cur_src_sgl));
+ }
+ } else if (destlen == 0 && i == orig_sgl_count - 1) {
+ /* unmap the last bounce that is < PAGE_SIZE */
+ kunmap_atomic((void *)bounce_addr);
+ }
+ }
+
+ kunmap_atomic((void *)(dest_addr - cur_dest_sgl->offset));
+ cur_dest_sgl = sg_next(cur_dest_sgl);
+ }
+
+ local_irq_restore(flags);
+
+ return total_copied;
+}
+
+/* Assume the bounce_sgl has enough room ie using the create_bounce_buffer() */
+static unsigned int copy_to_bounce_buffer(struct scatterlist *orig_sgl,
+ struct scatterlist *bounce_sgl,
+ unsigned int orig_sgl_count)
+{
+ int i;
+ int j = 0;
+ unsigned long src, dest;
+ unsigned int srclen, destlen, copylen;
+ unsigned int total_copied = 0;
+ unsigned long bounce_addr = 0;
+ unsigned long src_addr = 0;
+ unsigned long flags;
+ struct scatterlist *cur_src_sgl;
+ struct scatterlist *cur_dest_sgl;
+
+ local_irq_save(flags);
+
+ cur_src_sgl = orig_sgl;
+ cur_dest_sgl = bounce_sgl;
+
+ for (i = 0; i < orig_sgl_count; i++) {
+ src_addr = (unsigned long)
+ kmap_atomic(sg_page(cur_src_sgl)) +
+ cur_src_sgl->offset;
+ src = src_addr;
+ srclen = cur_src_sgl->length;
+
+ if (bounce_addr == 0)
+ bounce_addr = (unsigned long)
+ kmap_atomic(sg_page(cur_dest_sgl));
+
+ while (srclen) {
+ /* assume bounce offset always == 0 */
+ dest = bounce_addr + cur_dest_sgl->length;
+ destlen = PAGE_SIZE - cur_dest_sgl->length;
+
+ copylen = min(srclen, destlen);
+ memcpy((void *)dest, (void *)src, copylen);
+
+ total_copied += copylen;
+ cur_dest_sgl->length += copylen;
+ srclen -= copylen;
+ src += copylen;
+
+ if (cur_dest_sgl->length == PAGE_SIZE) {
+ /* full..move to next entry */
+ kunmap_atomic((void *)bounce_addr);
+ bounce_addr = 0;
+ j++;
+ }
+
+ /* if we need to use another bounce buffer */
+ if (srclen && bounce_addr == 0) {
+ cur_dest_sgl = sg_next(cur_dest_sgl);
+ bounce_addr = (unsigned long)
+ kmap_atomic(
+ sg_page(cur_dest_sgl));
+ }
+
+ }
+
+ kunmap_atomic((void *)(src_addr - cur_src_sgl->offset));
+ cur_src_sgl = sg_next(cur_src_sgl);
+ }
+
+ if (bounce_addr)
+ kunmap_atomic((void *)bounce_addr);
+
+ local_irq_restore(flags);
+
+ return total_copied;
+}
+
+static void handle_sc_creation(struct vmbus_channel *new_sc)
+{
+ struct hv_device *device = new_sc->primary_channel->device_obj;
+ struct storvsc_device *stor_device;
+ struct vmstorage_channel_properties props;
+
+ stor_device = get_out_stor_device(device);
+ if (!stor_device)
+ return;
+
+ if (stor_device->open_sub_channel == false)
+ return;
+
+ memset(&props, 0, sizeof(struct vmstorage_channel_properties));
+
+ vmbus_open(new_sc,
+ storvsc_ringbuffer_size,
+ storvsc_ringbuffer_size,
+ (void *)&props,
+ sizeof(struct vmstorage_channel_properties),
+ storvsc_on_channel_callback, new_sc);
+}
+
+static void handle_multichannel_storage(struct hv_device *device, int max_chns)
+{
+ struct storvsc_device *stor_device;
+ int num_cpus = num_online_cpus();
+ int num_sc;
+ struct storvsc_cmd_request *request;
+ struct vstor_packet *vstor_packet;
+ int ret, t;
+
+ num_sc = ((max_chns > num_cpus) ? num_cpus : max_chns);
+ stor_device = get_out_stor_device(device);
+ if (!stor_device)
+ return;
+
+ request = &stor_device->init_request;
+ vstor_packet = &request->vstor_packet;
+
+ stor_device->open_sub_channel = true;
+ /*
+ * Establish a handler for dealing with subchannels.
+ */
+ vmbus_set_sc_create_callback(device->channel, handle_sc_creation);
+
+ /*
+ * Check to see if sub-channels have already been created. This
+ * can happen when this driver is re-loaded after unloading.
+ */
+
+ if (vmbus_are_subchannels_present(device->channel))
+ return;
+
+ stor_device->open_sub_channel = false;
+ /*
+ * Request the host to create sub-channels.
+ */
+ memset(request, 0, sizeof(struct storvsc_cmd_request));
+ init_completion(&request->wait_event);
+ vstor_packet->operation = VSTOR_OPERATION_CREATE_SUB_CHANNELS;
+ vstor_packet->flags = REQUEST_COMPLETION_FLAG;
+ vstor_packet->sub_channel_count = num_sc;
+
+ ret = vmbus_sendpacket(device->channel, vstor_packet,
+ (sizeof(struct vstor_packet) -
+ vmscsi_size_delta),
+ (unsigned long)request,
+ VM_PKT_DATA_INBAND,
+ VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED);
+
+ if (ret != 0)
+ return;
+
+ t = wait_for_completion_timeout(&request->wait_event, 10*HZ);
+ if (t == 0)
+ return;
+
+ if (vstor_packet->operation != VSTOR_OPERATION_COMPLETE_IO ||
+ vstor_packet->status != 0)
+ return;
+
+ /*
+ * Now that we created the sub-channels, invoke the check; this
+ * may trigger the callback.
+ */
+ stor_device->open_sub_channel = true;
+ vmbus_are_subchannels_present(device->channel);
+}
+
+static int storvsc_channel_init(struct hv_device *device)
+{
+ struct storvsc_device *stor_device;
+ struct storvsc_cmd_request *request;
+ struct vstor_packet *vstor_packet;
+ int ret, t;
+ int max_chns;
+ bool process_sub_channels = false;
+
+ stor_device = get_out_stor_device(device);
+ if (!stor_device)
+ return -ENODEV;
+
+ request = &stor_device->init_request;
+ vstor_packet = &request->vstor_packet;
+
+ /*
+ * Now, initiate the vsc/vsp initialization protocol on the open
+ * channel
+ */
+ memset(request, 0, sizeof(struct storvsc_cmd_request));
+ init_completion(&request->wait_event);
+ vstor_packet->operation = VSTOR_OPERATION_BEGIN_INITIALIZATION;
+ vstor_packet->flags = REQUEST_COMPLETION_FLAG;
+
+ ret = vmbus_sendpacket(device->channel, vstor_packet,
+ (sizeof(struct vstor_packet) -
+ vmscsi_size_delta),
+ (unsigned long)request,
+ VM_PKT_DATA_INBAND,
+ VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED);
+ if (ret != 0)
+ goto cleanup;
+
+ t = wait_for_completion_timeout(&request->wait_event, 5*HZ);
+ if (t == 0) {
+ ret = -ETIMEDOUT;
+ goto cleanup;
+ }
+
+ if (vstor_packet->operation != VSTOR_OPERATION_COMPLETE_IO ||
+ vstor_packet->status != 0)
+ goto cleanup;
+
+
+ /* reuse the packet for version range supported */
+ memset(vstor_packet, 0, sizeof(struct vstor_packet));
+ vstor_packet->operation = VSTOR_OPERATION_QUERY_PROTOCOL_VERSION;
+ vstor_packet->flags = REQUEST_COMPLETION_FLAG;
+
+ vstor_packet->version.major_minor =
+ storvsc_get_version(vmstor_current_major, vmstor_current_minor);
+
+ /*
+ * The revision number is only used in Windows; set it to 0.
+ */
+ vstor_packet->version.revision = 0;
+
+ ret = vmbus_sendpacket(device->channel, vstor_packet,
+ (sizeof(struct vstor_packet) -
+ vmscsi_size_delta),
+ (unsigned long)request,
+ VM_PKT_DATA_INBAND,
+ VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED);
+ if (ret != 0)
+ goto cleanup;
+
+ t = wait_for_completion_timeout(&request->wait_event, 5*HZ);
+ if (t == 0) {
+ ret = -ETIMEDOUT;
+ goto cleanup;
+ }
+
+ if (vstor_packet->operation != VSTOR_OPERATION_COMPLETE_IO ||
+ vstor_packet->status != 0)
+ goto cleanup;
+
+
+ memset(vstor_packet, 0, sizeof(struct vstor_packet));
+ vstor_packet->operation = VSTOR_OPERATION_QUERY_PROPERTIES;
+ vstor_packet->flags = REQUEST_COMPLETION_FLAG;
+
+ ret = vmbus_sendpacket(device->channel, vstor_packet,
+ (sizeof(struct vstor_packet) -
+ vmscsi_size_delta),
+ (unsigned long)request,
+ VM_PKT_DATA_INBAND,
+ VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED);
+
+ if (ret != 0)
+ goto cleanup;
+
+ t = wait_for_completion_timeout(&request->wait_event, 5*HZ);
+ if (t == 0) {
+ ret = -ETIMEDOUT;
+ goto cleanup;
+ }
+
+ if (vstor_packet->operation != VSTOR_OPERATION_COMPLETE_IO ||
+ vstor_packet->status != 0)
+ goto cleanup;
+
+ /*
+ * Check to see if multi-channel support is there.
+ * Hosts that implement protocol version of 5.1 and above
+ * support multi-channel.
+ */
+ max_chns = vstor_packet->storage_channel_properties.max_channel_cnt;
+ if ((vmbus_proto_version != VERSION_WIN7) &&
+ (vmbus_proto_version != VERSION_WS2008)) {
+ if (vstor_packet->storage_channel_properties.flags &
+ STORAGE_CHANNEL_SUPPORTS_MULTI_CHANNEL)
+ process_sub_channels = true;
+ }
+ stor_device->max_transfer_bytes =
+ vstor_packet->storage_channel_properties.max_transfer_bytes;
+
+ memset(vstor_packet, 0, sizeof(struct vstor_packet));
+ vstor_packet->operation = VSTOR_OPERATION_END_INITIALIZATION;
+ vstor_packet->flags = REQUEST_COMPLETION_FLAG;
+
+ ret = vmbus_sendpacket(device->channel, vstor_packet,
+ (sizeof(struct vstor_packet) -
+ vmscsi_size_delta),
+ (unsigned long)request,
+ VM_PKT_DATA_INBAND,
+ VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED);
+
+ if (ret != 0)
+ goto cleanup;
+
+ t = wait_for_completion_timeout(&request->wait_event, 5*HZ);
+ if (t == 0) {
+ ret = -ETIMEDOUT;
+ goto cleanup;
+ }
+
+ if (vstor_packet->operation != VSTOR_OPERATION_COMPLETE_IO ||
+ vstor_packet->status != 0)
+ goto cleanup;
+
+ if (process_sub_channels)
+ handle_multichannel_storage(device, max_chns);
+
+
+cleanup:
+ return ret;
+}
+
+static void storvsc_handle_error(struct vmscsi_request *vm_srb,
+ struct scsi_cmnd *scmnd,
+ struct Scsi_Host *host,
+ u8 asc, u8 ascq)
+{
+ struct storvsc_scan_work *wrk;
+ void (*process_err_fn)(struct work_struct *work);
+ bool do_work = false;
+
+ switch (vm_srb->srb_status) {
+ case SRB_STATUS_ERROR:
+ /*
+ * If there is an error; offline the device since all
+ * error recovery strategies would have already been
+ * deployed on the host side. However, if the command
+ * were a pass-through command deal with it appropriately.
+ */
+ switch (scmnd->cmnd[0]) {
+ case ATA_16:
+ case ATA_12:
+ set_host_byte(scmnd, DID_PASSTHROUGH);
+ break;
+ /*
+ * On Some Windows hosts TEST_UNIT_READY command can return
+ * SRB_STATUS_ERROR, let the upper level code deal with it
+ * based on the sense information.
+ */
+ case TEST_UNIT_READY:
+ break;
+ default:
+ set_host_byte(scmnd, DID_TARGET_FAILURE);
+ }
+ break;
+ case SRB_STATUS_INVALID_LUN:
+ do_work = true;
+ process_err_fn = storvsc_remove_lun;
+ break;
+ case (SRB_STATUS_ABORTED | SRB_STATUS_AUTOSENSE_VALID):
+ if ((asc == 0x2a) && (ascq == 0x9)) {
+ do_work = true;
+ process_err_fn = storvsc_device_scan;
+ /*
+ * Retry the I/O that trigerred this.
+ */
+ set_host_byte(scmnd, DID_REQUEUE);
+ }
+ break;
+ }
+
+ if (!do_work)
+ return;
+
+ /*
+ * We need to schedule work to process this error; schedule it.
+ */
+ wrk = kmalloc(sizeof(struct storvsc_scan_work), GFP_ATOMIC);
+ if (!wrk) {
+ set_host_byte(scmnd, DID_TARGET_FAILURE);
+ return;
+ }
+
+ wrk->host = host;
+ wrk->lun = vm_srb->lun;
+ INIT_WORK(&wrk->work, process_err_fn);
+ schedule_work(&wrk->work);
+}
+
+
+static void storvsc_command_completion(struct storvsc_cmd_request *cmd_request)
+{
+ struct scsi_cmnd *scmnd = cmd_request->cmd;
+ struct hv_host_device *host_dev = shost_priv(scmnd->device->host);
+ struct scsi_sense_hdr sense_hdr;
+ struct vmscsi_request *vm_srb;
+ struct Scsi_Host *host;
+ struct storvsc_device *stor_dev;
+ struct hv_device *dev = host_dev->dev;
+ u32 payload_sz = cmd_request->payload_sz;
+ void *payload = cmd_request->payload;
+
+ stor_dev = get_in_stor_device(dev);
+ host = stor_dev->host;
+
+ vm_srb = &cmd_request->vstor_packet.vm_srb;
+ if (cmd_request->bounce_sgl_count) {
+ if (vm_srb->data_in == READ_TYPE)
+ copy_from_bounce_buffer(scsi_sglist(scmnd),
+ cmd_request->bounce_sgl,
+ scsi_sg_count(scmnd),
+ cmd_request->bounce_sgl_count);
+ destroy_bounce_buffer(cmd_request->bounce_sgl,
+ cmd_request->bounce_sgl_count);
+ }
+
+ scmnd->result = vm_srb->scsi_status;
+
+ if (scmnd->result) {
+ if (scsi_normalize_sense(scmnd->sense_buffer,
+ SCSI_SENSE_BUFFERSIZE, &sense_hdr))
+ scsi_print_sense_hdr(scmnd->device, "storvsc",
+ &sense_hdr);
+ }
+
+ if (vm_srb->srb_status != SRB_STATUS_SUCCESS)
+ storvsc_handle_error(vm_srb, scmnd, host, sense_hdr.asc,
+ sense_hdr.ascq);
+
+ scsi_set_resid(scmnd,
+ cmd_request->payload->range.len -
+ vm_srb->data_transfer_length);
+
+ scmnd->scsi_done(scmnd);
+
+ if (payload_sz >
+ sizeof(struct vmbus_channel_packet_multipage_buffer))
+ kfree(payload);
+}
+
+static void storvsc_on_io_completion(struct hv_device *device,
+ struct vstor_packet *vstor_packet,
+ struct storvsc_cmd_request *request)
+{
+ struct storvsc_device *stor_device;
+ struct vstor_packet *stor_pkt;
+
+ stor_device = hv_get_drvdata(device);
+ stor_pkt = &request->vstor_packet;
+
+ /*
+ * The current SCSI handling on the host side does
+ * not correctly handle:
+ * INQUIRY command with page code parameter set to 0x80
+ * MODE_SENSE command with cmd[2] == 0x1c
+ *
+ * Setup srb and scsi status so this won't be fatal.
+ * We do this so we can distinguish truly fatal failues
+ * (srb status == 0x4) and off-line the device in that case.
+ */
+
+ if ((stor_pkt->vm_srb.cdb[0] == INQUIRY) ||
+ (stor_pkt->vm_srb.cdb[0] == MODE_SENSE)) {
+ vstor_packet->vm_srb.scsi_status = 0;
+ vstor_packet->vm_srb.srb_status = SRB_STATUS_SUCCESS;
+ }
+
+
+ /* Copy over the status...etc */
+ stor_pkt->vm_srb.scsi_status = vstor_packet->vm_srb.scsi_status;
+ stor_pkt->vm_srb.srb_status = vstor_packet->vm_srb.srb_status;
+ stor_pkt->vm_srb.sense_info_length =
+ vstor_packet->vm_srb.sense_info_length;
+
+
+ if ((vstor_packet->vm_srb.scsi_status & 0xFF) == 0x02) {
+ /* CHECK_CONDITION */
+ if (vstor_packet->vm_srb.srb_status &
+ SRB_STATUS_AUTOSENSE_VALID) {
+ /* autosense data available */
+
+ memcpy(request->cmd->sense_buffer,
+ vstor_packet->vm_srb.sense_data,
+ vstor_packet->vm_srb.sense_info_length);
+
+ }
+ }
+
+ stor_pkt->vm_srb.data_transfer_length =
+ vstor_packet->vm_srb.data_transfer_length;
+
+ storvsc_command_completion(request);
+
+ if (atomic_dec_and_test(&stor_device->num_outstanding_req) &&
+ stor_device->drain_notify)
+ wake_up(&stor_device->waiting_to_drain);
+
+
+}
+
+static void storvsc_on_receive(struct hv_device *device,
+ struct vstor_packet *vstor_packet,
+ struct storvsc_cmd_request *request)
+{
+ struct storvsc_scan_work *work;
+ struct storvsc_device *stor_device;
+
+ switch (vstor_packet->operation) {
+ case VSTOR_OPERATION_COMPLETE_IO:
+ storvsc_on_io_completion(device, vstor_packet, request);
+ break;
+
+ case VSTOR_OPERATION_REMOVE_DEVICE:
+ case VSTOR_OPERATION_ENUMERATE_BUS:
+ stor_device = get_in_stor_device(device);
+ work = kmalloc(sizeof(struct storvsc_scan_work), GFP_ATOMIC);
+ if (!work)
+ return;
+
+ INIT_WORK(&work->work, storvsc_host_scan);
+ work->host = stor_device->host;
+ schedule_work(&work->work);
+ break;
+
+ default:
+ break;
+ }
+}
+
+static void storvsc_on_channel_callback(void *context)
+{
+ struct vmbus_channel *channel = (struct vmbus_channel *)context;
+ struct hv_device *device;
+ struct storvsc_device *stor_device;
+ u32 bytes_recvd;
+ u64 request_id;
+ unsigned char packet[ALIGN(sizeof(struct vstor_packet), 8)];
+ struct storvsc_cmd_request *request;
+ int ret;
+
+ if (channel->primary_channel != NULL)
+ device = channel->primary_channel->device_obj;
+ else
+ device = channel->device_obj;
+
+ stor_device = get_in_stor_device(device);
+ if (!stor_device)
+ return;
+
+ do {
+ ret = vmbus_recvpacket(channel, packet,
+ ALIGN((sizeof(struct vstor_packet) -
+ vmscsi_size_delta), 8),
+ &bytes_recvd, &request_id);
+ if (ret == 0 && bytes_recvd > 0) {
+
+ request = (struct storvsc_cmd_request *)
+ (unsigned long)request_id;
+
+ if ((request == &stor_device->init_request) ||
+ (request == &stor_device->reset_request)) {
+
+ memcpy(&request->vstor_packet, packet,
+ (sizeof(struct vstor_packet) -
+ vmscsi_size_delta));
+ complete(&request->wait_event);
+ } else {
+ storvsc_on_receive(device,
+ (struct vstor_packet *)packet,
+ request);
+ }
+ } else {
+ break;
+ }
+ } while (1);
+
+ return;
+}
+
+static int storvsc_connect_to_vsp(struct hv_device *device, u32 ring_size)
+{
+ struct vmstorage_channel_properties props;
+ int ret;
+
+ memset(&props, 0, sizeof(struct vmstorage_channel_properties));
+
+ ret = vmbus_open(device->channel,
+ ring_size,
+ ring_size,
+ (void *)&props,
+ sizeof(struct vmstorage_channel_properties),
+ storvsc_on_channel_callback, device->channel);
+
+ if (ret != 0)
+ return ret;
+
+ ret = storvsc_channel_init(device);
+
+ return ret;
+}
+
+static int storvsc_dev_remove(struct hv_device *device)
+{
+ struct storvsc_device *stor_device;
+ unsigned long flags;
+
+ stor_device = hv_get_drvdata(device);
+
+ spin_lock_irqsave(&device->channel->inbound_lock, flags);
+ stor_device->destroy = true;
+ spin_unlock_irqrestore(&device->channel->inbound_lock, flags);
+
+ /*
+ * At this point, all outbound traffic should be disable. We
+ * only allow inbound traffic (responses) to proceed so that
+ * outstanding requests can be completed.
+ */
+
+ storvsc_wait_to_drain(stor_device);
+
+ /*
+ * Since we have already drained, we don't need to busy wait
+ * as was done in final_release_stor_device()
+ * Note that we cannot set the ext pointer to NULL until
+ * we have drained - to drain the outgoing packets, we need to
+ * allow incoming packets.
+ */
+ spin_lock_irqsave(&device->channel->inbound_lock, flags);
+ hv_set_drvdata(device, NULL);
+ spin_unlock_irqrestore(&device->channel->inbound_lock, flags);
+
+ /* Close the channel */
+ vmbus_close(device->channel);
+
+ kfree(stor_device);
+ return 0;
+}
+
+static int storvsc_do_io(struct hv_device *device,
+ struct storvsc_cmd_request *request)
+{
+ struct storvsc_device *stor_device;
+ struct vstor_packet *vstor_packet;
+ struct vmbus_channel *outgoing_channel;
+ int ret = 0;
+
+ vstor_packet = &request->vstor_packet;
+ stor_device = get_out_stor_device(device);
+
+ if (!stor_device)
+ return -ENODEV;
+
+
+ request->device = device;
+ /*
+ * Select an an appropriate channel to send the request out.
+ */
+
+ outgoing_channel = vmbus_get_outgoing_channel(device->channel);
+
+
+ vstor_packet->flags |= REQUEST_COMPLETION_FLAG;
+
+ vstor_packet->vm_srb.length = (sizeof(struct vmscsi_request) -
+ vmscsi_size_delta);
+
+
+ vstor_packet->vm_srb.sense_info_length = sense_buffer_size;
+
+
+ vstor_packet->vm_srb.data_transfer_length =
+ request->payload->range.len;
+
+ vstor_packet->operation = VSTOR_OPERATION_EXECUTE_SRB;
+
+ if (request->payload->range.len) {
+
+ ret = vmbus_sendpacket_mpb_desc(outgoing_channel,
+ request->payload, request->payload_sz,
+ vstor_packet,
+ (sizeof(struct vstor_packet) -
+ vmscsi_size_delta),
+ (unsigned long)request);
+ } else {
+ ret = vmbus_sendpacket(outgoing_channel, vstor_packet,
+ (sizeof(struct vstor_packet) -
+ vmscsi_size_delta),
+ (unsigned long)request,
+ VM_PKT_DATA_INBAND,
+ VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED);
+ }
+
+ if (ret != 0)
+ return ret;
+
+ atomic_inc(&stor_device->num_outstanding_req);
+
+ return ret;
+}
+
+static int storvsc_device_configure(struct scsi_device *sdevice)
+{
+
+ blk_queue_max_segment_size(sdevice->request_queue, PAGE_SIZE);
+
+ blk_queue_bounce_limit(sdevice->request_queue, BLK_BOUNCE_ANY);
+
+ blk_queue_rq_timeout(sdevice->request_queue, (storvsc_timeout * HZ));
+
+ sdevice->no_write_same = 1;
+
+ /*
+ * Add blist flags to permit the reading of the VPD pages even when
+ * the target may claim SPC-2 compliance. MSFT targets currently
+ * claim SPC-2 compliance while they implement post SPC-2 features.
+ * With this patch we can correctly handle WRITE_SAME_16 issues.
+ */
+ sdevice->sdev_bflags |= msft_blist_flags;
+
+ /*
+ * If the host is WIN8 or WIN8 R2, claim conformance to SPC-3
+ * if the device is a MSFT virtual device.
+ */
+ if (!strncmp(sdevice->vendor, "Msft", 4)) {
+ switch (vmbus_proto_version) {
+ case VERSION_WIN8:
+ case VERSION_WIN8_1:
+ sdevice->scsi_level = SCSI_SPC_3;
+ break;
+ }
+ }
+
+ return 0;
+}
+
+static int storvsc_get_chs(struct scsi_device *sdev, struct block_device * bdev,
+ sector_t capacity, int *info)
+{
+ sector_t nsect = capacity;
+ sector_t cylinders = nsect;
+ int heads, sectors_pt;
+
+ /*
+ * We are making up these values; let us keep it simple.
+ */
+ heads = 0xff;
+ sectors_pt = 0x3f; /* Sectors per track */
+ sector_div(cylinders, heads * sectors_pt);
+ if ((sector_t)(cylinders + 1) * heads * sectors_pt < nsect)
+ cylinders = 0xffff;
+
+ info[0] = heads;
+ info[1] = sectors_pt;
+ info[2] = (int)cylinders;
+
+ return 0;
+}
+
+static int storvsc_host_reset_handler(struct scsi_cmnd *scmnd)
+{
+ struct hv_host_device *host_dev = shost_priv(scmnd->device->host);
+ struct hv_device *device = host_dev->dev;
+
+ struct storvsc_device *stor_device;
+ struct storvsc_cmd_request *request;
+ struct vstor_packet *vstor_packet;
+ int ret, t;
+
+
+ stor_device = get_out_stor_device(device);
+ if (!stor_device)
+ return FAILED;
+
+ request = &stor_device->reset_request;
+ vstor_packet = &request->vstor_packet;
+
+ init_completion(&request->wait_event);
+
+ vstor_packet->operation = VSTOR_OPERATION_RESET_BUS;
+ vstor_packet->flags = REQUEST_COMPLETION_FLAG;
+ vstor_packet->vm_srb.path_id = stor_device->path_id;
+
+ ret = vmbus_sendpacket(device->channel, vstor_packet,
+ (sizeof(struct vstor_packet) -
+ vmscsi_size_delta),
+ (unsigned long)&stor_device->reset_request,
+ VM_PKT_DATA_INBAND,
+ VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED);
+ if (ret != 0)
+ return FAILED;
+
+ t = wait_for_completion_timeout(&request->wait_event, 5*HZ);
+ if (t == 0)
+ return TIMEOUT_ERROR;
+
+
+ /*
+ * At this point, all outstanding requests in the adapter
+ * should have been flushed out and return to us
+ * There is a potential race here where the host may be in
+ * the process of responding when we return from here.
+ * Just wait for all in-transit packets to be accounted for
+ * before we return from here.
+ */
+ storvsc_wait_to_drain(stor_device);
+
+ return SUCCESS;
+}
+
+/*
+ * The host guarantees to respond to each command, although I/O latencies might
+ * be unbounded on Azure. Reset the timer unconditionally to give the host a
+ * chance to perform EH.
+ */
+static enum blk_eh_timer_return storvsc_eh_timed_out(struct scsi_cmnd *scmnd)
+{
+ return BLK_EH_RESET_TIMER;
+}
+
+static bool storvsc_scsi_cmd_ok(struct scsi_cmnd *scmnd)
+{
+ bool allowed = true;
+ u8 scsi_op = scmnd->cmnd[0];
+
+ switch (scsi_op) {
+ /* the host does not handle WRITE_SAME, log accident usage */
+ case WRITE_SAME:
+ /*
+ * smartd sends this command and the host does not handle
+ * this. So, don't send it.
+ */
+ case SET_WINDOW:
+ scmnd->result = ILLEGAL_REQUEST << 16;
+ allowed = false;
+ break;
+ default:
+ break;
+ }
+ return allowed;
+}
+
+static int storvsc_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *scmnd)
+{
+ int ret;
+ struct hv_host_device *host_dev = shost_priv(host);
+ struct hv_device *dev = host_dev->dev;
+ struct storvsc_cmd_request *cmd_request = scsi_cmd_priv(scmnd);
+ int i;
+ struct scatterlist *sgl;
+ unsigned int sg_count = 0;
+ struct vmscsi_request *vm_srb;
+ struct scatterlist *cur_sgl;
+ struct vmbus_packet_mpb_array *payload;
+ u32 payload_sz;
+ u32 length;
+
+ if (vmstor_current_major <= VMSTOR_WIN8_MAJOR) {
+ /*
+ * On legacy hosts filter unimplemented commands.
+ * Future hosts are expected to correctly handle
+ * unsupported commands. Furthermore, it is
+ * possible that some of the currently
+ * unsupported commands maybe supported in
+ * future versions of the host.
+ */
+ if (!storvsc_scsi_cmd_ok(scmnd)) {
+ scmnd->scsi_done(scmnd);
+ return 0;
+ }
+ }
+
+ /* Setup the cmd request */
+ cmd_request->cmd = scmnd;
+
+ vm_srb = &cmd_request->vstor_packet.vm_srb;
+ vm_srb->win8_extension.time_out_value = 60;
+
+ vm_srb->win8_extension.srb_flags |=
+ (SRB_FLAGS_QUEUE_ACTION_ENABLE |
+ SRB_FLAGS_DISABLE_SYNCH_TRANSFER);
+
+ /* Build the SRB */
+ switch (scmnd->sc_data_direction) {
+ case DMA_TO_DEVICE:
+ vm_srb->data_in = WRITE_TYPE;
+ vm_srb->win8_extension.srb_flags |= SRB_FLAGS_DATA_OUT;
+ break;
+ case DMA_FROM_DEVICE:
+ vm_srb->data_in = READ_TYPE;
+ vm_srb->win8_extension.srb_flags |= SRB_FLAGS_DATA_IN;
+ break;
+ default:
+ vm_srb->data_in = UNKNOWN_TYPE;
+ vm_srb->win8_extension.srb_flags |= SRB_FLAGS_NO_DATA_TRANSFER;
+ break;
+ }
+
+
+ vm_srb->port_number = host_dev->port;
+ vm_srb->path_id = scmnd->device->channel;
+ vm_srb->target_id = scmnd->device->id;
+ vm_srb->lun = scmnd->device->lun;
+
+ vm_srb->cdb_length = scmnd->cmd_len;
+
+ memcpy(vm_srb->cdb, scmnd->cmnd, vm_srb->cdb_length);
+
+ sgl = (struct scatterlist *)scsi_sglist(scmnd);
+ sg_count = scsi_sg_count(scmnd);
+
+ length = scsi_bufflen(scmnd);
+ payload = (struct vmbus_packet_mpb_array *)&cmd_request->mpb;
+ payload_sz = sizeof(cmd_request->mpb);
+
+ if (sg_count) {
+ /* check if we need to bounce the sgl */
+ if (do_bounce_buffer(sgl, scsi_sg_count(scmnd)) != -1) {
+ cmd_request->bounce_sgl =
+ create_bounce_buffer(sgl, sg_count,
+ length,
+ vm_srb->data_in);
+ if (!cmd_request->bounce_sgl)
+ return SCSI_MLQUEUE_HOST_BUSY;
+
+ cmd_request->bounce_sgl_count =
+ ALIGN(length, PAGE_SIZE) >> PAGE_SHIFT;
+
+ if (vm_srb->data_in == WRITE_TYPE)
+ copy_to_bounce_buffer(sgl,
+ cmd_request->bounce_sgl, sg_count);
+
+ sgl = cmd_request->bounce_sgl;
+ sg_count = cmd_request->bounce_sgl_count;
+ }
+
+
+ if (sg_count > MAX_PAGE_BUFFER_COUNT) {
+
+ payload_sz = (sg_count * sizeof(void *) +
+ sizeof(struct vmbus_packet_mpb_array));
+ payload = kmalloc(payload_sz, GFP_ATOMIC);
+ if (!payload) {
+ if (cmd_request->bounce_sgl_count)
+ destroy_bounce_buffer(
+ cmd_request->bounce_sgl,
+ cmd_request->bounce_sgl_count);
+
+ return SCSI_MLQUEUE_DEVICE_BUSY;
+ }
+ }
+
+ payload->range.len = length;
+ payload->range.offset = sgl[0].offset;
+
+ cur_sgl = sgl;
+ for (i = 0; i < sg_count; i++) {
+ payload->range.pfn_array[i] =
+ page_to_pfn(sg_page((cur_sgl)));
+ cur_sgl = sg_next(cur_sgl);
+ }
+
+ } else if (scsi_sglist(scmnd)) {
+ payload->range.len = length;
+ payload->range.offset =
+ virt_to_phys(scsi_sglist(scmnd)) & (PAGE_SIZE-1);
+ payload->range.pfn_array[0] =
+ virt_to_phys(scsi_sglist(scmnd)) >> PAGE_SHIFT;
+ }
+
+ cmd_request->payload = payload;
+ cmd_request->payload_sz = payload_sz;
+
+ /* Invokes the vsc to start an IO */
+ ret = storvsc_do_io(dev, cmd_request);
+
+ if (ret == -EAGAIN) {
+ /* no more space */
+
+ if (cmd_request->bounce_sgl_count)
+ destroy_bounce_buffer(cmd_request->bounce_sgl,
+ cmd_request->bounce_sgl_count);
+
+ return SCSI_MLQUEUE_DEVICE_BUSY;
+ }
+
+ return 0;
+}
+
+static struct scsi_host_template scsi_driver = {
+ .module = THIS_MODULE,
+ .name = "storvsc_host_t",
+ .cmd_size = sizeof(struct storvsc_cmd_request),
+ .bios_param = storvsc_get_chs,
+ .queuecommand = storvsc_queuecommand,
+ .eh_host_reset_handler = storvsc_host_reset_handler,
+ .proc_name = "storvsc_host",
+ .eh_timed_out = storvsc_eh_timed_out,
+ .slave_configure = storvsc_device_configure,
+ .cmd_per_lun = 255,
+ .this_id = -1,
+ .use_clustering = ENABLE_CLUSTERING,
+ /* Make sure we dont get a sg segment crosses a page boundary */
+ .dma_boundary = PAGE_SIZE-1,
+ .no_write_same = 1,
+};
+
+enum {
+ SCSI_GUID,
+ IDE_GUID,
+ SFC_GUID,
+};
+
+static const struct hv_vmbus_device_id id_table[] = {
+ /* SCSI guid */
+ { HV_SCSI_GUID,
+ .driver_data = SCSI_GUID
+ },
+ /* IDE guid */
+ { HV_IDE_GUID,
+ .driver_data = IDE_GUID
+ },
+ /* Fibre Channel GUID */
+ {
+ HV_SYNTHFC_GUID,
+ .driver_data = SFC_GUID
+ },
+ { },
+};
+
+MODULE_DEVICE_TABLE(vmbus, id_table);
+
+static int storvsc_probe(struct hv_device *device,
+ const struct hv_vmbus_device_id *dev_id)
+{
+ int ret;
+ int num_cpus = num_online_cpus();
+ struct Scsi_Host *host;
+ struct hv_host_device *host_dev;
+ bool dev_is_ide = ((dev_id->driver_data == IDE_GUID) ? true : false);
+ int target = 0;
+ struct storvsc_device *stor_device;
+ int max_luns_per_target;
+ int max_targets;
+ int max_channels;
+ int max_sub_channels = 0;
+
+ /*
+ * Based on the windows host we are running on,
+ * set state to properly communicate with the host.
+ */
+
+ switch (vmbus_proto_version) {
+ case VERSION_WS2008:
+ case VERSION_WIN7:
+ sense_buffer_size = PRE_WIN8_STORVSC_SENSE_BUFFER_SIZE;
+ vmscsi_size_delta = sizeof(struct vmscsi_win8_extension);
+ vmstor_current_major = VMSTOR_WIN7_MAJOR;
+ vmstor_current_minor = VMSTOR_WIN7_MINOR;
+ max_luns_per_target = STORVSC_IDE_MAX_LUNS_PER_TARGET;
+ max_targets = STORVSC_IDE_MAX_TARGETS;
+ max_channels = STORVSC_IDE_MAX_CHANNELS;
+ break;
+ default:
+ sense_buffer_size = POST_WIN7_STORVSC_SENSE_BUFFER_SIZE;
+ vmscsi_size_delta = 0;
+ vmstor_current_major = VMSTOR_WIN8_MAJOR;
+ vmstor_current_minor = VMSTOR_WIN8_MINOR;
+ max_luns_per_target = STORVSC_MAX_LUNS_PER_TARGET;
+ max_targets = STORVSC_MAX_TARGETS;
+ max_channels = STORVSC_MAX_CHANNELS;
+ /*
+ * On Windows8 and above, we support sub-channels for storage.
+ * The number of sub-channels offerred is based on the number of
+ * VCPUs in the guest.
+ */
+ max_sub_channels = (num_cpus / storvsc_vcpus_per_sub_channel);
+ break;
+ }
+
+ scsi_driver.can_queue = (max_outstanding_req_per_channel *
+ (max_sub_channels + 1));
+
+ host = scsi_host_alloc(&scsi_driver,
+ sizeof(struct hv_host_device));
+ if (!host)
+ return -ENOMEM;
+
+ host_dev = shost_priv(host);
+ memset(host_dev, 0, sizeof(struct hv_host_device));
+
+ host_dev->port = host->host_no;
+ host_dev->dev = device;
+
+
+ stor_device = kzalloc(sizeof(struct storvsc_device), GFP_KERNEL);
+ if (!stor_device) {
+ ret = -ENOMEM;
+ goto err_out0;
+ }
+
+ stor_device->destroy = false;
+ stor_device->open_sub_channel = false;
+ init_waitqueue_head(&stor_device->waiting_to_drain);
+ stor_device->device = device;
+ stor_device->host = host;
+ hv_set_drvdata(device, stor_device);
+
+ stor_device->port_number = host->host_no;
+ ret = storvsc_connect_to_vsp(device, storvsc_ringbuffer_size);
+ if (ret)
+ goto err_out1;
+
+ host_dev->path = stor_device->path_id;
+ host_dev->target = stor_device->target_id;
+
+ switch (dev_id->driver_data) {
+ case SFC_GUID:
+ host->max_lun = STORVSC_FC_MAX_LUNS_PER_TARGET;
+ host->max_id = STORVSC_FC_MAX_TARGETS;
+ host->max_channel = STORVSC_FC_MAX_CHANNELS - 1;
+ break;
+
+ case SCSI_GUID:
+ host->max_lun = max_luns_per_target;
+ host->max_id = max_targets;
+ host->max_channel = max_channels - 1;
+ break;
+
+ default:
+ host->max_lun = STORVSC_IDE_MAX_LUNS_PER_TARGET;
+ host->max_id = STORVSC_IDE_MAX_TARGETS;
+ host->max_channel = STORVSC_IDE_MAX_CHANNELS - 1;
+ break;
+ }
+ /* max cmd length */
+ host->max_cmd_len = STORVSC_MAX_CMD_LEN;
+
+ /*
+ * set the table size based on the info we got
+ * from the host.
+ */
+ host->sg_tablesize = (stor_device->max_transfer_bytes >> PAGE_SHIFT);
+
+ /* Register the HBA and start the scsi bus scan */
+ ret = scsi_add_host(host, &device->device);
+ if (ret != 0)
+ goto err_out2;
+
+ if (!dev_is_ide) {
+ scsi_scan_host(host);
+ } else {
+ target = (device->dev_instance.b[5] << 8 |
+ device->dev_instance.b[4]);
+ ret = scsi_add_device(host, 0, target, 0);
+ if (ret) {
+ scsi_remove_host(host);
+ goto err_out2;
+ }
+ }
+ return 0;
+
+err_out2:
+ /*
+ * Once we have connected with the host, we would need to
+ * to invoke storvsc_dev_remove() to rollback this state and
+ * this call also frees up the stor_device; hence the jump around
+ * err_out1 label.
+ */
+ storvsc_dev_remove(device);
+ goto err_out0;
+
+err_out1:
+ kfree(stor_device);
+
+err_out0:
+ scsi_host_put(host);
+ return ret;
+}
+
+static int storvsc_remove(struct hv_device *dev)
+{
+ struct storvsc_device *stor_device = hv_get_drvdata(dev);
+ struct Scsi_Host *host = stor_device->host;
+
+ scsi_remove_host(host);
+ storvsc_dev_remove(dev);
+ scsi_host_put(host);
+
+ return 0;
+}
+
+static struct hv_driver storvsc_drv = {
+ .name = KBUILD_MODNAME,
+ .id_table = id_table,
+ .probe = storvsc_probe,
+ .remove = storvsc_remove,
+};
+
+static int __init storvsc_drv_init(void)
+{
+
+ /*
+ * Divide the ring buffer data size (which is 1 page less
+ * than the ring buffer size since that page is reserved for
+ * the ring buffer indices) by the max request size (which is
+ * vmbus_channel_packet_multipage_buffer + struct vstor_packet + u64)
+ */
+ max_outstanding_req_per_channel =
+ ((storvsc_ringbuffer_size - PAGE_SIZE) /
+ ALIGN(MAX_MULTIPAGE_BUFFER_PACKET +
+ sizeof(struct vstor_packet) + sizeof(u64) -
+ vmscsi_size_delta,
+ sizeof(u64)));
+
+ return vmbus_driver_register(&storvsc_drv);
+}
+
+static void __exit storvsc_drv_exit(void)
+{
+ vmbus_driver_unregister(&storvsc_drv);
+}
+
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("Microsoft Hyper-V virtual storage driver");
+module_init(storvsc_drv_init);
+module_exit(storvsc_drv_exit);
diff --git a/drivers/scsi/sun3_scsi.c b/drivers/scsi/sun3_scsi.c
new file mode 100644
index 000000000..22a42836d
--- /dev/null
+++ b/drivers/scsi/sun3_scsi.c
@@ -0,0 +1,685 @@
+/*
+ * Sun3 SCSI stuff by Erik Verbruggen (erik@bigmama.xtdnet.nl)
+ *
+ * Sun3 DMA routines added by Sam Creasey (sammy@sammy.net)
+ *
+ * VME support added by Sam Creasey
+ *
+ * TODO: modify this driver to support multiple Sun3 SCSI VME boards
+ *
+ * Adapted from mac_scsinew.c:
+ */
+/*
+ * Generic Macintosh NCR5380 driver
+ *
+ * Copyright 1998, Michael Schmitz <mschmitz@lbl.gov>
+ *
+ * derived in part from:
+ */
+/*
+ * Generic Generic NCR5380 driver
+ *
+ * Copyright 1995, Russell King
+ */
+
+#include <linux/types.h>
+#include <linux/delay.h>
+#include <linux/module.h>
+#include <linux/ioport.h>
+#include <linux/init.h>
+#include <linux/blkdev.h>
+#include <linux/platform_device.h>
+
+#include <asm/io.h>
+#include <asm/dvma.h>
+
+#include <scsi/scsi_host.h>
+#include "sun3_scsi.h"
+
+/* Definitions for the core NCR5380 driver. */
+
+#define REAL_DMA
+/* #define SUPPORT_TAGS */
+/* minimum number of bytes to do dma on */
+#define DMA_MIN_SIZE 129
+
+/* #define MAX_TAGS 32 */
+
+#define NCR5380_implementation_fields /* none */
+
+#define NCR5380_read(reg) sun3scsi_read(reg)
+#define NCR5380_write(reg, value) sun3scsi_write(reg, value)
+
+#define NCR5380_queue_command sun3scsi_queue_command
+#define NCR5380_bus_reset sun3scsi_bus_reset
+#define NCR5380_abort sun3scsi_abort
+#define NCR5380_show_info sun3scsi_show_info
+#define NCR5380_info sun3scsi_info
+
+#define NCR5380_dma_read_setup(instance, data, count) \
+ sun3scsi_dma_setup(data, count, 0)
+#define NCR5380_dma_write_setup(instance, data, count) \
+ sun3scsi_dma_setup(data, count, 1)
+#define NCR5380_dma_residual(instance) \
+ sun3scsi_dma_residual(instance)
+#define NCR5380_dma_xfer_len(instance, cmd, phase) \
+ sun3scsi_dma_xfer_len(cmd->SCp.this_residual, cmd, !((phase) & SR_IO))
+
+#define NCR5380_acquire_dma_irq(instance) (1)
+#define NCR5380_release_dma_irq(instance)
+
+#include "NCR5380.h"
+
+
+extern int sun3_map_test(unsigned long, char *);
+
+static int setup_can_queue = -1;
+module_param(setup_can_queue, int, 0);
+static int setup_cmd_per_lun = -1;
+module_param(setup_cmd_per_lun, int, 0);
+static int setup_sg_tablesize = -1;
+module_param(setup_sg_tablesize, int, 0);
+#ifdef SUPPORT_TAGS
+static int setup_use_tagged_queuing = -1;
+module_param(setup_use_tagged_queuing, int, 0);
+#endif
+static int setup_hostid = -1;
+module_param(setup_hostid, int, 0);
+
+/* #define RESET_BOOT */
+
+#define AFTER_RESET_DELAY (HZ/2)
+
+/* ms to wait after hitting dma regs */
+#define SUN3_DMA_DELAY 10
+
+/* dvma buffer to allocate -- 32k should hopefully be more than sufficient */
+#define SUN3_DVMA_BUFSIZE 0xe000
+
+static struct scsi_cmnd *sun3_dma_setup_done;
+static unsigned char *sun3_scsi_regp;
+static volatile struct sun3_dma_regs *dregs;
+static struct sun3_udc_regs *udc_regs;
+static unsigned char *sun3_dma_orig_addr = NULL;
+static unsigned long sun3_dma_orig_count = 0;
+static int sun3_dma_active = 0;
+static unsigned long last_residual = 0;
+static struct Scsi_Host *default_instance;
+
+/*
+ * NCR 5380 register access functions
+ */
+
+static inline unsigned char sun3scsi_read(int reg)
+{
+ return in_8(sun3_scsi_regp + reg);
+}
+
+static inline void sun3scsi_write(int reg, int value)
+{
+ out_8(sun3_scsi_regp + reg, value);
+}
+
+#ifndef SUN3_SCSI_VME
+/* dma controller register access functions */
+
+static inline unsigned short sun3_udc_read(unsigned char reg)
+{
+ unsigned short ret;
+
+ dregs->udc_addr = UDC_CSR;
+ udelay(SUN3_DMA_DELAY);
+ ret = dregs->udc_data;
+ udelay(SUN3_DMA_DELAY);
+
+ return ret;
+}
+
+static inline void sun3_udc_write(unsigned short val, unsigned char reg)
+{
+ dregs->udc_addr = reg;
+ udelay(SUN3_DMA_DELAY);
+ dregs->udc_data = val;
+ udelay(SUN3_DMA_DELAY);
+}
+#endif
+
+#ifdef RESET_BOOT
+static void sun3_scsi_reset_boot(struct Scsi_Host *instance)
+{
+ unsigned long end;
+
+ /*
+ * Do a SCSI reset to clean up the bus during initialization. No
+ * messing with the queues, interrupts, or locks necessary here.
+ */
+
+ printk( "Sun3 SCSI: resetting the SCSI bus..." );
+
+ /* switch off SCSI IRQ - catch an interrupt without IRQ bit set else */
+// sun3_disable_irq( IRQ_SUN3_SCSI );
+
+ /* get in phase */
+ NCR5380_write( TARGET_COMMAND_REG,
+ PHASE_SR_TO_TCR( NCR5380_read(STATUS_REG) ));
+
+ /* assert RST */
+ NCR5380_write( INITIATOR_COMMAND_REG, ICR_BASE | ICR_ASSERT_RST );
+
+ /* The min. reset hold time is 25us, so 40us should be enough */
+ udelay( 50 );
+
+ /* reset RST and interrupt */
+ NCR5380_write( INITIATOR_COMMAND_REG, ICR_BASE );
+ NCR5380_read( RESET_PARITY_INTERRUPT_REG );
+
+ for( end = jiffies + AFTER_RESET_DELAY; time_before(jiffies, end); )
+ barrier();
+
+ /* switch on SCSI IRQ again */
+// sun3_enable_irq( IRQ_SUN3_SCSI );
+
+ printk( " done\n" );
+}
+#endif
+
+// safe bits for the CSR
+#define CSR_GOOD 0x060f
+
+static irqreturn_t scsi_sun3_intr(int irq, void *dummy)
+{
+ unsigned short csr = dregs->csr;
+ int handled = 0;
+
+#ifdef SUN3_SCSI_VME
+ dregs->csr &= ~CSR_DMA_ENABLE;
+#endif
+
+ if(csr & ~CSR_GOOD) {
+ if(csr & CSR_DMA_BUSERR) {
+ printk("scsi%d: bus error in dma\n", default_instance->host_no);
+ }
+
+ if(csr & CSR_DMA_CONFLICT) {
+ printk("scsi%d: dma conflict\n", default_instance->host_no);
+ }
+ handled = 1;
+ }
+
+ if(csr & (CSR_SDB_INT | CSR_DMA_INT)) {
+ NCR5380_intr(irq, dummy);
+ handled = 1;
+ }
+
+ return IRQ_RETVAL(handled);
+}
+
+/*
+ * Debug stuff - to be called on NMI, or sysrq key. Use at your own risk;
+ * reentering NCR5380_print_status seems to have ugly side effects
+ */
+
+/* this doesn't seem to get used at all -- sam */
+#if 0
+void sun3_sun3_debug (void)
+{
+ unsigned long flags;
+
+ if (default_instance) {
+ local_irq_save(flags);
+ NCR5380_print_status(default_instance);
+ local_irq_restore(flags);
+ }
+}
+#endif
+
+
+/* sun3scsi_dma_setup() -- initialize the dma controller for a read/write */
+static unsigned long sun3scsi_dma_setup(void *data, unsigned long count, int write_flag)
+{
+ void *addr;
+
+ if(sun3_dma_orig_addr != NULL)
+ dvma_unmap(sun3_dma_orig_addr);
+
+#ifdef SUN3_SCSI_VME
+ addr = (void *)dvma_map_vme((unsigned long) data, count);
+#else
+ addr = (void *)dvma_map((unsigned long) data, count);
+#endif
+
+ sun3_dma_orig_addr = addr;
+ sun3_dma_orig_count = count;
+
+#ifndef SUN3_SCSI_VME
+ dregs->fifo_count = 0;
+ sun3_udc_write(UDC_RESET, UDC_CSR);
+
+ /* reset fifo */
+ dregs->csr &= ~CSR_FIFO;
+ dregs->csr |= CSR_FIFO;
+#endif
+
+ /* set direction */
+ if(write_flag)
+ dregs->csr |= CSR_SEND;
+ else
+ dregs->csr &= ~CSR_SEND;
+
+#ifdef SUN3_SCSI_VME
+ dregs->csr |= CSR_PACK_ENABLE;
+
+ dregs->dma_addr_hi = ((unsigned long)addr >> 16);
+ dregs->dma_addr_lo = ((unsigned long)addr & 0xffff);
+
+ dregs->dma_count_hi = 0;
+ dregs->dma_count_lo = 0;
+ dregs->fifo_count_hi = 0;
+ dregs->fifo_count = 0;
+#else
+ /* byte count for fifo */
+ dregs->fifo_count = count;
+
+ sun3_udc_write(UDC_RESET, UDC_CSR);
+
+ /* reset fifo */
+ dregs->csr &= ~CSR_FIFO;
+ dregs->csr |= CSR_FIFO;
+
+ if(dregs->fifo_count != count) {
+ printk("scsi%d: fifo_mismatch %04x not %04x\n",
+ default_instance->host_no, dregs->fifo_count,
+ (unsigned int) count);
+ NCR5380_dprint(NDEBUG_DMA, default_instance);
+ }
+
+ /* setup udc */
+ udc_regs->addr_hi = (((unsigned long)(addr) & 0xff0000) >> 8);
+ udc_regs->addr_lo = ((unsigned long)(addr) & 0xffff);
+ udc_regs->count = count/2; /* count in words */
+ udc_regs->mode_hi = UDC_MODE_HIWORD;
+ if(write_flag) {
+ if(count & 1)
+ udc_regs->count++;
+ udc_regs->mode_lo = UDC_MODE_LSEND;
+ udc_regs->rsel = UDC_RSEL_SEND;
+ } else {
+ udc_regs->mode_lo = UDC_MODE_LRECV;
+ udc_regs->rsel = UDC_RSEL_RECV;
+ }
+
+ /* announce location of regs block */
+ sun3_udc_write(((dvma_vtob(udc_regs) & 0xff0000) >> 8),
+ UDC_CHN_HI);
+
+ sun3_udc_write((dvma_vtob(udc_regs) & 0xffff), UDC_CHN_LO);
+
+ /* set dma master on */
+ sun3_udc_write(0xd, UDC_MODE);
+
+ /* interrupt enable */
+ sun3_udc_write(UDC_INT_ENABLE, UDC_CSR);
+#endif
+
+ return count;
+
+}
+
+#ifndef SUN3_SCSI_VME
+static inline unsigned long sun3scsi_dma_count(struct Scsi_Host *instance)
+{
+ unsigned short resid;
+
+ dregs->udc_addr = 0x32;
+ udelay(SUN3_DMA_DELAY);
+ resid = dregs->udc_data;
+ udelay(SUN3_DMA_DELAY);
+ resid *= 2;
+
+ return (unsigned long) resid;
+}
+#endif
+
+static inline unsigned long sun3scsi_dma_residual(struct Scsi_Host *instance)
+{
+ return last_residual;
+}
+
+static inline unsigned long sun3scsi_dma_xfer_len(unsigned long wanted,
+ struct scsi_cmnd *cmd,
+ int write_flag)
+{
+ if (cmd->request->cmd_type == REQ_TYPE_FS)
+ return wanted;
+ else
+ return 0;
+}
+
+static inline int sun3scsi_dma_start(unsigned long count, unsigned char *data)
+{
+#ifdef SUN3_SCSI_VME
+ unsigned short csr;
+
+ csr = dregs->csr;
+
+ dregs->dma_count_hi = (sun3_dma_orig_count >> 16);
+ dregs->dma_count_lo = (sun3_dma_orig_count & 0xffff);
+
+ dregs->fifo_count_hi = (sun3_dma_orig_count >> 16);
+ dregs->fifo_count = (sun3_dma_orig_count & 0xffff);
+
+/* if(!(csr & CSR_DMA_ENABLE))
+ * dregs->csr |= CSR_DMA_ENABLE;
+ */
+#else
+ sun3_udc_write(UDC_CHN_START, UDC_CSR);
+#endif
+
+ return 0;
+}
+
+/* clean up after our dma is done */
+static int sun3scsi_dma_finish(int write_flag)
+{
+ unsigned short __maybe_unused count;
+ unsigned short fifo;
+ int ret = 0;
+
+ sun3_dma_active = 0;
+
+#ifdef SUN3_SCSI_VME
+ dregs->csr &= ~CSR_DMA_ENABLE;
+
+ fifo = dregs->fifo_count;
+ if (write_flag) {
+ if ((fifo > 0) && (fifo < sun3_dma_orig_count))
+ fifo++;
+ }
+
+ last_residual = fifo;
+ /* empty bytes from the fifo which didn't make it */
+ if ((!write_flag) && (dregs->csr & CSR_LEFT)) {
+ unsigned char *vaddr;
+
+ vaddr = (unsigned char *)dvma_vmetov(sun3_dma_orig_addr);
+
+ vaddr += (sun3_dma_orig_count - fifo);
+ vaddr--;
+
+ switch (dregs->csr & CSR_LEFT) {
+ case CSR_LEFT_3:
+ *vaddr = (dregs->bpack_lo & 0xff00) >> 8;
+ vaddr--;
+
+ case CSR_LEFT_2:
+ *vaddr = (dregs->bpack_hi & 0x00ff);
+ vaddr--;
+
+ case CSR_LEFT_1:
+ *vaddr = (dregs->bpack_hi & 0xff00) >> 8;
+ break;
+ }
+ }
+#else
+ // check to empty the fifo on a read
+ if(!write_flag) {
+ int tmo = 20000; /* .2 sec */
+
+ while(1) {
+ if(dregs->csr & CSR_FIFO_EMPTY)
+ break;
+
+ if(--tmo <= 0) {
+ printk("sun3scsi: fifo failed to empty!\n");
+ return 1;
+ }
+ udelay(10);
+ }
+ }
+
+ count = sun3scsi_dma_count(default_instance);
+
+ fifo = dregs->fifo_count;
+ last_residual = fifo;
+
+ /* empty bytes from the fifo which didn't make it */
+ if((!write_flag) && (count - fifo) == 2) {
+ unsigned short data;
+ unsigned char *vaddr;
+
+ data = dregs->fifo_data;
+ vaddr = (unsigned char *)dvma_btov(sun3_dma_orig_addr);
+
+ vaddr += (sun3_dma_orig_count - fifo);
+
+ vaddr[-2] = (data & 0xff00) >> 8;
+ vaddr[-1] = (data & 0xff);
+ }
+#endif
+
+ dvma_unmap(sun3_dma_orig_addr);
+ sun3_dma_orig_addr = NULL;
+
+#ifdef SUN3_SCSI_VME
+ dregs->dma_addr_hi = 0;
+ dregs->dma_addr_lo = 0;
+ dregs->dma_count_hi = 0;
+ dregs->dma_count_lo = 0;
+
+ dregs->fifo_count = 0;
+ dregs->fifo_count_hi = 0;
+
+ dregs->csr &= ~CSR_SEND;
+/* dregs->csr |= CSR_DMA_ENABLE; */
+#else
+ sun3_udc_write(UDC_RESET, UDC_CSR);
+ dregs->fifo_count = 0;
+ dregs->csr &= ~CSR_SEND;
+
+ /* reset fifo */
+ dregs->csr &= ~CSR_FIFO;
+ dregs->csr |= CSR_FIFO;
+#endif
+
+ sun3_dma_setup_done = NULL;
+
+ return ret;
+
+}
+
+#include "atari_NCR5380.c"
+
+#ifdef SUN3_SCSI_VME
+#define SUN3_SCSI_NAME "Sun3 NCR5380 VME SCSI"
+#define DRV_MODULE_NAME "sun3_scsi_vme"
+#else
+#define SUN3_SCSI_NAME "Sun3 NCR5380 SCSI"
+#define DRV_MODULE_NAME "sun3_scsi"
+#endif
+
+#define PFX DRV_MODULE_NAME ": "
+
+static struct scsi_host_template sun3_scsi_template = {
+ .module = THIS_MODULE,
+ .proc_name = DRV_MODULE_NAME,
+ .show_info = sun3scsi_show_info,
+ .name = SUN3_SCSI_NAME,
+ .info = sun3scsi_info,
+ .queuecommand = sun3scsi_queue_command,
+ .eh_abort_handler = sun3scsi_abort,
+ .eh_bus_reset_handler = sun3scsi_bus_reset,
+ .can_queue = 16,
+ .this_id = 7,
+ .sg_tablesize = SG_NONE,
+ .cmd_per_lun = 2,
+ .use_clustering = DISABLE_CLUSTERING
+};
+
+static int __init sun3_scsi_probe(struct platform_device *pdev)
+{
+ struct Scsi_Host *instance;
+ int error;
+ struct resource *irq, *mem;
+ unsigned char *ioaddr;
+ int host_flags = 0;
+#ifdef SUN3_SCSI_VME
+ int i;
+#endif
+
+ if (setup_can_queue > 0)
+ sun3_scsi_template.can_queue = setup_can_queue;
+ if (setup_cmd_per_lun > 0)
+ sun3_scsi_template.cmd_per_lun = setup_cmd_per_lun;
+ if (setup_sg_tablesize >= 0)
+ sun3_scsi_template.sg_tablesize = setup_sg_tablesize;
+ if (setup_hostid >= 0)
+ sun3_scsi_template.this_id = setup_hostid & 7;
+
+#ifdef SUN3_SCSI_VME
+ ioaddr = NULL;
+ for (i = 0; i < 2; i++) {
+ unsigned char x;
+
+ irq = platform_get_resource(pdev, IORESOURCE_IRQ, i);
+ mem = platform_get_resource(pdev, IORESOURCE_MEM, i);
+ if (!irq || !mem)
+ break;
+
+ ioaddr = sun3_ioremap(mem->start, resource_size(mem),
+ SUN3_PAGE_TYPE_VME16);
+ dregs = (struct sun3_dma_regs *)(ioaddr + 8);
+
+ if (sun3_map_test((unsigned long)dregs, &x)) {
+ unsigned short oldcsr;
+
+ oldcsr = dregs->csr;
+ dregs->csr = 0;
+ udelay(SUN3_DMA_DELAY);
+ if (dregs->csr == 0x1400)
+ break;
+
+ dregs->csr = oldcsr;
+ }
+
+ iounmap(ioaddr);
+ ioaddr = NULL;
+ }
+ if (!ioaddr)
+ return -ENODEV;
+#else
+ irq = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
+ mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!irq || !mem)
+ return -ENODEV;
+
+ ioaddr = ioremap(mem->start, resource_size(mem));
+ dregs = (struct sun3_dma_regs *)(ioaddr + 8);
+
+ udc_regs = dvma_malloc(sizeof(struct sun3_udc_regs));
+ if (!udc_regs) {
+ pr_err(PFX "couldn't allocate DVMA memory!\n");
+ iounmap(ioaddr);
+ return -ENOMEM;
+ }
+#endif
+
+ sun3_scsi_regp = ioaddr;
+
+ instance = scsi_host_alloc(&sun3_scsi_template,
+ sizeof(struct NCR5380_hostdata));
+ if (!instance) {
+ error = -ENOMEM;
+ goto fail_alloc;
+ }
+ default_instance = instance;
+
+ instance->io_port = (unsigned long)ioaddr;
+ instance->irq = irq->start;
+
+#ifdef SUPPORT_TAGS
+ host_flags |= setup_use_tagged_queuing > 0 ? FLAG_TAGGED_QUEUING : 0;
+#endif
+
+ NCR5380_init(instance, host_flags);
+
+ error = request_irq(instance->irq, scsi_sun3_intr, 0,
+ "NCR5380", instance);
+ if (error) {
+#ifdef REAL_DMA
+ pr_err(PFX "scsi%d: IRQ %d not free, bailing out\n",
+ instance->host_no, instance->irq);
+ goto fail_irq;
+#else
+ pr_warn(PFX "scsi%d: IRQ %d not free, interrupts disabled\n",
+ instance->host_no, instance->irq);
+ instance->irq = NO_IRQ;
+#endif
+ }
+
+ dregs->csr = 0;
+ udelay(SUN3_DMA_DELAY);
+ dregs->csr = CSR_SCSI | CSR_FIFO | CSR_INTR;
+ udelay(SUN3_DMA_DELAY);
+ dregs->fifo_count = 0;
+#ifdef SUN3_SCSI_VME
+ dregs->fifo_count_hi = 0;
+ dregs->dma_addr_hi = 0;
+ dregs->dma_addr_lo = 0;
+ dregs->dma_count_hi = 0;
+ dregs->dma_count_lo = 0;
+
+ dregs->ivect = VME_DATA24 | (instance->irq & 0xff);
+#endif
+
+#ifdef RESET_BOOT
+ sun3_scsi_reset_boot(instance);
+#endif
+
+ error = scsi_add_host(instance, NULL);
+ if (error)
+ goto fail_host;
+
+ platform_set_drvdata(pdev, instance);
+
+ scsi_scan_host(instance);
+ return 0;
+
+fail_host:
+ if (instance->irq != NO_IRQ)
+ free_irq(instance->irq, instance);
+fail_irq:
+ NCR5380_exit(instance);
+ scsi_host_put(instance);
+fail_alloc:
+ if (udc_regs)
+ dvma_free(udc_regs);
+ iounmap(sun3_scsi_regp);
+ return error;
+}
+
+static int __exit sun3_scsi_remove(struct platform_device *pdev)
+{
+ struct Scsi_Host *instance = platform_get_drvdata(pdev);
+
+ scsi_remove_host(instance);
+ if (instance->irq != NO_IRQ)
+ free_irq(instance->irq, instance);
+ NCR5380_exit(instance);
+ scsi_host_put(instance);
+ if (udc_regs)
+ dvma_free(udc_regs);
+ iounmap(sun3_scsi_regp);
+ return 0;
+}
+
+static struct platform_driver sun3_scsi_driver = {
+ .remove = __exit_p(sun3_scsi_remove),
+ .driver = {
+ .name = DRV_MODULE_NAME,
+ },
+};
+
+module_platform_driver_probe(sun3_scsi_driver, sun3_scsi_probe);
+
+MODULE_ALIAS("platform:" DRV_MODULE_NAME);
+MODULE_LICENSE("GPL");
diff --git a/drivers/scsi/sun3_scsi.h b/drivers/scsi/sun3_scsi.h
new file mode 100644
index 000000000..d22745fae
--- /dev/null
+++ b/drivers/scsi/sun3_scsi.h
@@ -0,0 +1,102 @@
+/*
+ * Sun3 SCSI stuff by Erik Verbruggen (erik@bigmama.xtdnet.nl)
+ *
+ * Sun3 DMA additions by Sam Creasey (sammy@sammy.net)
+ *
+ * Adapted from mac_scsinew.h:
+ */
+/*
+ * Cumana Generic NCR5380 driver defines
+ *
+ * Copyright 1993, Drew Eckhardt
+ * Visionary Computing
+ * (Unix and Linux consulting and custom programming)
+ * drew@colorado.edu
+ * +1 (303) 440-4894
+ */
+
+#ifndef SUN3_SCSI_H
+#define SUN3_SCSI_H
+
+/* additional registers - mainly DMA control regs */
+/* these start at regbase + 8 -- directly after the NCR regs */
+struct sun3_dma_regs {
+ unsigned short dma_addr_hi; /* vme only */
+ unsigned short dma_addr_lo; /* vme only */
+ unsigned short dma_count_hi; /* vme only */
+ unsigned short dma_count_lo; /* vme only */
+ unsigned short udc_data; /* udc dma data reg (obio only) */
+ unsigned short udc_addr; /* uda dma addr reg (obio only) */
+ unsigned short fifo_data; /* fifo data reg, holds extra byte on
+ odd dma reads */
+ unsigned short fifo_count;
+ unsigned short csr; /* control/status reg */
+ unsigned short bpack_hi; /* vme only */
+ unsigned short bpack_lo; /* vme only */
+ unsigned short ivect; /* vme only */
+ unsigned short fifo_count_hi; /* vme only */
+};
+
+/* ucd chip specific regs - live in dvma space */
+struct sun3_udc_regs {
+ unsigned short rsel; /* select regs to load */
+ unsigned short addr_hi; /* high word of addr */
+ unsigned short addr_lo; /* low word */
+ unsigned short count; /* words to be xfer'd */
+ unsigned short mode_hi; /* high word of channel mode */
+ unsigned short mode_lo; /* low word of channel mode */
+};
+
+/* addresses of the udc registers */
+#define UDC_MODE 0x38
+#define UDC_CSR 0x2e /* command/status */
+#define UDC_CHN_HI 0x26 /* chain high word */
+#define UDC_CHN_LO 0x22 /* chain lo word */
+#define UDC_CURA_HI 0x1a /* cur reg A high */
+#define UDC_CURA_LO 0x0a /* cur reg A low */
+#define UDC_CURB_HI 0x12 /* cur reg B high */
+#define UDC_CURB_LO 0x02 /* cur reg B low */
+#define UDC_MODE_HI 0x56 /* mode reg high */
+#define UDC_MODE_LO 0x52 /* mode reg low */
+#define UDC_COUNT 0x32 /* words to xfer */
+
+/* some udc commands */
+#define UDC_RESET 0
+#define UDC_CHN_START 0xa0 /* start chain */
+#define UDC_INT_ENABLE 0x32 /* channel 1 int on */
+
+/* udc mode words */
+#define UDC_MODE_HIWORD 0x40
+#define UDC_MODE_LSEND 0xc2
+#define UDC_MODE_LRECV 0xd2
+
+/* udc reg selections */
+#define UDC_RSEL_SEND 0x282
+#define UDC_RSEL_RECV 0x182
+
+/* bits in csr reg */
+#define CSR_DMA_ACTIVE 0x8000
+#define CSR_DMA_CONFLICT 0x4000
+#define CSR_DMA_BUSERR 0x2000
+
+#define CSR_FIFO_EMPTY 0x400 /* fifo flushed? */
+#define CSR_SDB_INT 0x200 /* sbc interrupt pending */
+#define CSR_DMA_INT 0x100 /* dma interrupt pending */
+
+#define CSR_LEFT 0xc0
+#define CSR_LEFT_3 0xc0
+#define CSR_LEFT_2 0x80
+#define CSR_LEFT_1 0x40
+#define CSR_PACK_ENABLE 0x20
+
+#define CSR_DMA_ENABLE 0x10
+
+#define CSR_SEND 0x8 /* 1 = send 0 = recv */
+#define CSR_FIFO 0x2 /* reset fifo */
+#define CSR_INTR 0x4 /* interrupt enable */
+#define CSR_SCSI 0x1
+
+#define VME_DATA24 0x3d00
+
+#endif /* SUN3_SCSI_H */
+
diff --git a/drivers/scsi/sun3_scsi_vme.c b/drivers/scsi/sun3_scsi_vme.c
new file mode 100644
index 000000000..1eeece6e2
--- /dev/null
+++ b/drivers/scsi/sun3_scsi_vme.c
@@ -0,0 +1,3 @@
+#define SUN3_SCSI_VME
+
+#include "sun3_scsi.c"
diff --git a/drivers/scsi/sun3x_esp.c b/drivers/scsi/sun3x_esp.c
new file mode 100644
index 000000000..e26e81de7
--- /dev/null
+++ b/drivers/scsi/sun3x_esp.c
@@ -0,0 +1,318 @@
+/* sun3x_esp.c: ESP front-end for Sun3x systems.
+ *
+ * Copyright (C) 2007,2008 Thomas Bogendoerfer (tsbogend@alpha.franken.de)
+ */
+
+#include <linux/kernel.h>
+#include <linux/gfp.h>
+#include <linux/types.h>
+#include <linux/delay.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/platform_device.h>
+#include <linux/dma-mapping.h>
+#include <linux/interrupt.h>
+
+#include <asm/sun3x.h>
+#include <asm/io.h>
+#include <asm/dma.h>
+#include <asm/dvma.h>
+
+/* DMA controller reg offsets */
+#define DMA_CSR 0x00UL /* rw DMA control/status register 0x00 */
+#define DMA_ADDR 0x04UL /* rw DMA transfer address register 0x04 */
+#define DMA_COUNT 0x08UL /* rw DMA transfer count register 0x08 */
+#define DMA_TEST 0x0cUL /* rw DMA test/debug register 0x0c */
+
+#include <scsi/scsi_host.h>
+
+#include "esp_scsi.h"
+
+#define DRV_MODULE_NAME "sun3x_esp"
+#define PFX DRV_MODULE_NAME ": "
+#define DRV_VERSION "1.000"
+#define DRV_MODULE_RELDATE "Nov 1, 2007"
+
+/*
+ * m68k always assumes readl/writel operate on little endian
+ * mmio space; this is wrong at least for Sun3x, so we
+ * need to workaround this until a proper way is found
+ */
+#if 0
+#define dma_read32(REG) \
+ readl(esp->dma_regs + (REG))
+#define dma_write32(VAL, REG) \
+ writel((VAL), esp->dma_regs + (REG))
+#else
+#define dma_read32(REG) \
+ *(volatile u32 *)(esp->dma_regs + (REG))
+#define dma_write32(VAL, REG) \
+ do { *(volatile u32 *)(esp->dma_regs + (REG)) = (VAL); } while (0)
+#endif
+
+static void sun3x_esp_write8(struct esp *esp, u8 val, unsigned long reg)
+{
+ writeb(val, esp->regs + (reg * 4UL));
+}
+
+static u8 sun3x_esp_read8(struct esp *esp, unsigned long reg)
+{
+ return readb(esp->regs + (reg * 4UL));
+}
+
+static dma_addr_t sun3x_esp_map_single(struct esp *esp, void *buf,
+ size_t sz, int dir)
+{
+ return dma_map_single(esp->dev, buf, sz, dir);
+}
+
+static int sun3x_esp_map_sg(struct esp *esp, struct scatterlist *sg,
+ int num_sg, int dir)
+{
+ return dma_map_sg(esp->dev, sg, num_sg, dir);
+}
+
+static void sun3x_esp_unmap_single(struct esp *esp, dma_addr_t addr,
+ size_t sz, int dir)
+{
+ dma_unmap_single(esp->dev, addr, sz, dir);
+}
+
+static void sun3x_esp_unmap_sg(struct esp *esp, struct scatterlist *sg,
+ int num_sg, int dir)
+{
+ dma_unmap_sg(esp->dev, sg, num_sg, dir);
+}
+
+static int sun3x_esp_irq_pending(struct esp *esp)
+{
+ if (dma_read32(DMA_CSR) & (DMA_HNDL_INTR | DMA_HNDL_ERROR))
+ return 1;
+ return 0;
+}
+
+static void sun3x_esp_reset_dma(struct esp *esp)
+{
+ u32 val;
+
+ val = dma_read32(DMA_CSR);
+ dma_write32(val | DMA_RST_SCSI, DMA_CSR);
+ dma_write32(val & ~DMA_RST_SCSI, DMA_CSR);
+
+ /* Enable interrupts. */
+ val = dma_read32(DMA_CSR);
+ dma_write32(val | DMA_INT_ENAB, DMA_CSR);
+}
+
+static void sun3x_esp_dma_drain(struct esp *esp)
+{
+ u32 csr;
+ int lim;
+
+ csr = dma_read32(DMA_CSR);
+ if (!(csr & DMA_FIFO_ISDRAIN))
+ return;
+
+ dma_write32(csr | DMA_FIFO_STDRAIN, DMA_CSR);
+
+ lim = 1000;
+ while (dma_read32(DMA_CSR) & DMA_FIFO_ISDRAIN) {
+ if (--lim == 0) {
+ printk(KERN_ALERT PFX "esp%d: DMA will not drain!\n",
+ esp->host->unique_id);
+ break;
+ }
+ udelay(1);
+ }
+}
+
+static void sun3x_esp_dma_invalidate(struct esp *esp)
+{
+ u32 val;
+ int lim;
+
+ lim = 1000;
+ while ((val = dma_read32(DMA_CSR)) & DMA_PEND_READ) {
+ if (--lim == 0) {
+ printk(KERN_ALERT PFX "esp%d: DMA will not "
+ "invalidate!\n", esp->host->unique_id);
+ break;
+ }
+ udelay(1);
+ }
+
+ val &= ~(DMA_ENABLE | DMA_ST_WRITE | DMA_BCNT_ENAB);
+ val |= DMA_FIFO_INV;
+ dma_write32(val, DMA_CSR);
+ val &= ~DMA_FIFO_INV;
+ dma_write32(val, DMA_CSR);
+}
+
+static void sun3x_esp_send_dma_cmd(struct esp *esp, u32 addr, u32 esp_count,
+ u32 dma_count, int write, u8 cmd)
+{
+ u32 csr;
+
+ BUG_ON(!(cmd & ESP_CMD_DMA));
+
+ sun3x_esp_write8(esp, (esp_count >> 0) & 0xff, ESP_TCLOW);
+ sun3x_esp_write8(esp, (esp_count >> 8) & 0xff, ESP_TCMED);
+ csr = dma_read32(DMA_CSR);
+ csr |= DMA_ENABLE;
+ if (write)
+ csr |= DMA_ST_WRITE;
+ else
+ csr &= ~DMA_ST_WRITE;
+ dma_write32(csr, DMA_CSR);
+ dma_write32(addr, DMA_ADDR);
+
+ scsi_esp_cmd(esp, cmd);
+}
+
+static int sun3x_esp_dma_error(struct esp *esp)
+{
+ u32 csr = dma_read32(DMA_CSR);
+
+ if (csr & DMA_HNDL_ERROR)
+ return 1;
+
+ return 0;
+}
+
+static const struct esp_driver_ops sun3x_esp_ops = {
+ .esp_write8 = sun3x_esp_write8,
+ .esp_read8 = sun3x_esp_read8,
+ .map_single = sun3x_esp_map_single,
+ .map_sg = sun3x_esp_map_sg,
+ .unmap_single = sun3x_esp_unmap_single,
+ .unmap_sg = sun3x_esp_unmap_sg,
+ .irq_pending = sun3x_esp_irq_pending,
+ .reset_dma = sun3x_esp_reset_dma,
+ .dma_drain = sun3x_esp_dma_drain,
+ .dma_invalidate = sun3x_esp_dma_invalidate,
+ .send_dma_cmd = sun3x_esp_send_dma_cmd,
+ .dma_error = sun3x_esp_dma_error,
+};
+
+static int esp_sun3x_probe(struct platform_device *dev)
+{
+ struct scsi_host_template *tpnt = &scsi_esp_template;
+ struct Scsi_Host *host;
+ struct esp *esp;
+ struct resource *res;
+ int err = -ENOMEM;
+
+ host = scsi_host_alloc(tpnt, sizeof(struct esp));
+ if (!host)
+ goto fail;
+
+ host->max_id = 8;
+ esp = shost_priv(host);
+
+ esp->host = host;
+ esp->dev = dev;
+ esp->ops = &sun3x_esp_ops;
+
+ res = platform_get_resource(dev, IORESOURCE_MEM, 0);
+ if (!res || !res->start)
+ goto fail_unlink;
+
+ esp->regs = ioremap_nocache(res->start, 0x20);
+ if (!esp->regs)
+ goto fail_unmap_regs;
+
+ res = platform_get_resource(dev, IORESOURCE_MEM, 1);
+ if (!res || !res->start)
+ goto fail_unmap_regs;
+
+ esp->dma_regs = ioremap_nocache(res->start, 0x10);
+
+ esp->command_block = dma_alloc_coherent(esp->dev, 16,
+ &esp->command_block_dma,
+ GFP_KERNEL);
+ if (!esp->command_block)
+ goto fail_unmap_regs_dma;
+
+ host->irq = platform_get_irq(dev, 0);
+ err = request_irq(host->irq, scsi_esp_intr, IRQF_SHARED,
+ "SUN3X ESP", esp);
+ if (err < 0)
+ goto fail_unmap_command_block;
+
+ esp->scsi_id = 7;
+ esp->host->this_id = esp->scsi_id;
+ esp->scsi_id_mask = (1 << esp->scsi_id);
+ esp->cfreq = 20000000;
+
+ dev_set_drvdata(&dev->dev, esp);
+
+ err = scsi_esp_register(esp, &dev->dev);
+ if (err)
+ goto fail_free_irq;
+
+ return 0;
+
+fail_free_irq:
+ free_irq(host->irq, esp);
+fail_unmap_command_block:
+ dma_free_coherent(esp->dev, 16,
+ esp->command_block,
+ esp->command_block_dma);
+fail_unmap_regs_dma:
+ iounmap(esp->dma_regs);
+fail_unmap_regs:
+ iounmap(esp->regs);
+fail_unlink:
+ scsi_host_put(host);
+fail:
+ return err;
+}
+
+static int esp_sun3x_remove(struct platform_device *dev)
+{
+ struct esp *esp = dev_get_drvdata(&dev->dev);
+ unsigned int irq = esp->host->irq;
+ u32 val;
+
+ scsi_esp_unregister(esp);
+
+ /* Disable interrupts. */
+ val = dma_read32(DMA_CSR);
+ dma_write32(val & ~DMA_INT_ENAB, DMA_CSR);
+
+ free_irq(irq, esp);
+ dma_free_coherent(esp->dev, 16,
+ esp->command_block,
+ esp->command_block_dma);
+
+ scsi_host_put(esp->host);
+
+ return 0;
+}
+
+static struct platform_driver esp_sun3x_driver = {
+ .probe = esp_sun3x_probe,
+ .remove = esp_sun3x_remove,
+ .driver = {
+ .name = "sun3x_esp",
+ },
+};
+
+static int __init sun3x_esp_init(void)
+{
+ return platform_driver_register(&esp_sun3x_driver);
+}
+
+static void __exit sun3x_esp_exit(void)
+{
+ platform_driver_unregister(&esp_sun3x_driver);
+}
+
+MODULE_DESCRIPTION("Sun3x ESP SCSI driver");
+MODULE_AUTHOR("Thomas Bogendoerfer (tsbogend@alpha.franken.de)");
+MODULE_LICENSE("GPL");
+MODULE_VERSION(DRV_VERSION);
+
+module_init(sun3x_esp_init);
+module_exit(sun3x_esp_exit);
+MODULE_ALIAS("platform:sun3x_esp");
diff --git a/drivers/scsi/sun_esp.c b/drivers/scsi/sun_esp.c
new file mode 100644
index 000000000..7b6d4c208
--- /dev/null
+++ b/drivers/scsi/sun_esp.c
@@ -0,0 +1,658 @@
+/* sun_esp.c: ESP front-end for Sparc SBUS systems.
+ *
+ * Copyright (C) 2007, 2008 David S. Miller (davem@davemloft.net)
+ */
+
+#include <linux/kernel.h>
+#include <linux/types.h>
+#include <linux/delay.h>
+#include <linux/module.h>
+#include <linux/mm.h>
+#include <linux/init.h>
+#include <linux/dma-mapping.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/gfp.h>
+
+#include <asm/irq.h>
+#include <asm/io.h>
+#include <asm/dma.h>
+
+#include <scsi/scsi_host.h>
+
+#include "esp_scsi.h"
+
+#define DRV_MODULE_NAME "sun_esp"
+#define PFX DRV_MODULE_NAME ": "
+#define DRV_VERSION "1.100"
+#define DRV_MODULE_RELDATE "August 27, 2008"
+
+#define dma_read32(REG) \
+ sbus_readl(esp->dma_regs + (REG))
+#define dma_write32(VAL, REG) \
+ sbus_writel((VAL), esp->dma_regs + (REG))
+
+/* DVMA chip revisions */
+enum dvma_rev {
+ dvmarev0,
+ dvmaesc1,
+ dvmarev1,
+ dvmarev2,
+ dvmarev3,
+ dvmarevplus,
+ dvmahme
+};
+
+static int esp_sbus_setup_dma(struct esp *esp, struct platform_device *dma_of)
+{
+ esp->dma = dma_of;
+
+ esp->dma_regs = of_ioremap(&dma_of->resource[0], 0,
+ resource_size(&dma_of->resource[0]),
+ "espdma");
+ if (!esp->dma_regs)
+ return -ENOMEM;
+
+ switch (dma_read32(DMA_CSR) & DMA_DEVICE_ID) {
+ case DMA_VERS0:
+ esp->dmarev = dvmarev0;
+ break;
+ case DMA_ESCV1:
+ esp->dmarev = dvmaesc1;
+ break;
+ case DMA_VERS1:
+ esp->dmarev = dvmarev1;
+ break;
+ case DMA_VERS2:
+ esp->dmarev = dvmarev2;
+ break;
+ case DMA_VERHME:
+ esp->dmarev = dvmahme;
+ break;
+ case DMA_VERSPLUS:
+ esp->dmarev = dvmarevplus;
+ break;
+ }
+
+ return 0;
+
+}
+
+static int esp_sbus_map_regs(struct esp *esp, int hme)
+{
+ struct platform_device *op = esp->dev;
+ struct resource *res;
+
+ /* On HME, two reg sets exist, first is DVMA,
+ * second is ESP registers.
+ */
+ if (hme)
+ res = &op->resource[1];
+ else
+ res = &op->resource[0];
+
+ esp->regs = of_ioremap(res, 0, SBUS_ESP_REG_SIZE, "ESP");
+ if (!esp->regs)
+ return -ENOMEM;
+
+ return 0;
+}
+
+static int esp_sbus_map_command_block(struct esp *esp)
+{
+ struct platform_device *op = esp->dev;
+
+ esp->command_block = dma_alloc_coherent(&op->dev, 16,
+ &esp->command_block_dma,
+ GFP_ATOMIC);
+ if (!esp->command_block)
+ return -ENOMEM;
+ return 0;
+}
+
+static int esp_sbus_register_irq(struct esp *esp)
+{
+ struct Scsi_Host *host = esp->host;
+ struct platform_device *op = esp->dev;
+
+ host->irq = op->archdata.irqs[0];
+ return request_irq(host->irq, scsi_esp_intr, IRQF_SHARED, "ESP", esp);
+}
+
+static void esp_get_scsi_id(struct esp *esp, struct platform_device *espdma)
+{
+ struct platform_device *op = esp->dev;
+ struct device_node *dp;
+
+ dp = op->dev.of_node;
+ esp->scsi_id = of_getintprop_default(dp, "initiator-id", 0xff);
+ if (esp->scsi_id != 0xff)
+ goto done;
+
+ esp->scsi_id = of_getintprop_default(dp, "scsi-initiator-id", 0xff);
+ if (esp->scsi_id != 0xff)
+ goto done;
+
+ esp->scsi_id = of_getintprop_default(espdma->dev.of_node,
+ "scsi-initiator-id", 7);
+
+done:
+ esp->host->this_id = esp->scsi_id;
+ esp->scsi_id_mask = (1 << esp->scsi_id);
+}
+
+static void esp_get_differential(struct esp *esp)
+{
+ struct platform_device *op = esp->dev;
+ struct device_node *dp;
+
+ dp = op->dev.of_node;
+ if (of_find_property(dp, "differential", NULL))
+ esp->flags |= ESP_FLAG_DIFFERENTIAL;
+ else
+ esp->flags &= ~ESP_FLAG_DIFFERENTIAL;
+}
+
+static void esp_get_clock_params(struct esp *esp)
+{
+ struct platform_device *op = esp->dev;
+ struct device_node *bus_dp, *dp;
+ int fmhz;
+
+ dp = op->dev.of_node;
+ bus_dp = dp->parent;
+
+ fmhz = of_getintprop_default(dp, "clock-frequency", 0);
+ if (fmhz == 0)
+ fmhz = of_getintprop_default(bus_dp, "clock-frequency", 0);
+
+ esp->cfreq = fmhz;
+}
+
+static void esp_get_bursts(struct esp *esp, struct platform_device *dma_of)
+{
+ struct device_node *dma_dp = dma_of->dev.of_node;
+ struct platform_device *op = esp->dev;
+ struct device_node *dp;
+ u8 bursts, val;
+
+ dp = op->dev.of_node;
+ bursts = of_getintprop_default(dp, "burst-sizes", 0xff);
+ val = of_getintprop_default(dma_dp, "burst-sizes", 0xff);
+ if (val != 0xff)
+ bursts &= val;
+
+ val = of_getintprop_default(dma_dp->parent, "burst-sizes", 0xff);
+ if (val != 0xff)
+ bursts &= val;
+
+ if (bursts == 0xff ||
+ (bursts & DMA_BURST16) == 0 ||
+ (bursts & DMA_BURST32) == 0)
+ bursts = (DMA_BURST32 - 1);
+
+ esp->bursts = bursts;
+}
+
+static void esp_sbus_get_props(struct esp *esp, struct platform_device *espdma)
+{
+ esp_get_scsi_id(esp, espdma);
+ esp_get_differential(esp);
+ esp_get_clock_params(esp);
+ esp_get_bursts(esp, espdma);
+}
+
+static void sbus_esp_write8(struct esp *esp, u8 val, unsigned long reg)
+{
+ sbus_writeb(val, esp->regs + (reg * 4UL));
+}
+
+static u8 sbus_esp_read8(struct esp *esp, unsigned long reg)
+{
+ return sbus_readb(esp->regs + (reg * 4UL));
+}
+
+static dma_addr_t sbus_esp_map_single(struct esp *esp, void *buf,
+ size_t sz, int dir)
+{
+ struct platform_device *op = esp->dev;
+
+ return dma_map_single(&op->dev, buf, sz, dir);
+}
+
+static int sbus_esp_map_sg(struct esp *esp, struct scatterlist *sg,
+ int num_sg, int dir)
+{
+ struct platform_device *op = esp->dev;
+
+ return dma_map_sg(&op->dev, sg, num_sg, dir);
+}
+
+static void sbus_esp_unmap_single(struct esp *esp, dma_addr_t addr,
+ size_t sz, int dir)
+{
+ struct platform_device *op = esp->dev;
+
+ dma_unmap_single(&op->dev, addr, sz, dir);
+}
+
+static void sbus_esp_unmap_sg(struct esp *esp, struct scatterlist *sg,
+ int num_sg, int dir)
+{
+ struct platform_device *op = esp->dev;
+
+ dma_unmap_sg(&op->dev, sg, num_sg, dir);
+}
+
+static int sbus_esp_irq_pending(struct esp *esp)
+{
+ if (dma_read32(DMA_CSR) & (DMA_HNDL_INTR | DMA_HNDL_ERROR))
+ return 1;
+ return 0;
+}
+
+static void sbus_esp_reset_dma(struct esp *esp)
+{
+ int can_do_burst16, can_do_burst32, can_do_burst64;
+ int can_do_sbus64, lim;
+ struct platform_device *op;
+ u32 val;
+
+ can_do_burst16 = (esp->bursts & DMA_BURST16) != 0;
+ can_do_burst32 = (esp->bursts & DMA_BURST32) != 0;
+ can_do_burst64 = 0;
+ can_do_sbus64 = 0;
+ op = esp->dev;
+ if (sbus_can_dma_64bit())
+ can_do_sbus64 = 1;
+ if (sbus_can_burst64())
+ can_do_burst64 = (esp->bursts & DMA_BURST64) != 0;
+
+ /* Put the DVMA into a known state. */
+ if (esp->dmarev != dvmahme) {
+ val = dma_read32(DMA_CSR);
+ dma_write32(val | DMA_RST_SCSI, DMA_CSR);
+ dma_write32(val & ~DMA_RST_SCSI, DMA_CSR);
+ }
+ switch (esp->dmarev) {
+ case dvmahme:
+ dma_write32(DMA_RESET_FAS366, DMA_CSR);
+ dma_write32(DMA_RST_SCSI, DMA_CSR);
+
+ esp->prev_hme_dmacsr = (DMA_PARITY_OFF | DMA_2CLKS |
+ DMA_SCSI_DISAB | DMA_INT_ENAB);
+
+ esp->prev_hme_dmacsr &= ~(DMA_ENABLE | DMA_ST_WRITE |
+ DMA_BRST_SZ);
+
+ if (can_do_burst64)
+ esp->prev_hme_dmacsr |= DMA_BRST64;
+ else if (can_do_burst32)
+ esp->prev_hme_dmacsr |= DMA_BRST32;
+
+ if (can_do_sbus64) {
+ esp->prev_hme_dmacsr |= DMA_SCSI_SBUS64;
+ sbus_set_sbus64(&op->dev, esp->bursts);
+ }
+
+ lim = 1000;
+ while (dma_read32(DMA_CSR) & DMA_PEND_READ) {
+ if (--lim == 0) {
+ printk(KERN_ALERT PFX "esp%d: DMA_PEND_READ "
+ "will not clear!\n",
+ esp->host->unique_id);
+ break;
+ }
+ udelay(1);
+ }
+
+ dma_write32(0, DMA_CSR);
+ dma_write32(esp->prev_hme_dmacsr, DMA_CSR);
+
+ dma_write32(0, DMA_ADDR);
+ break;
+
+ case dvmarev2:
+ if (esp->rev != ESP100) {
+ val = dma_read32(DMA_CSR);
+ dma_write32(val | DMA_3CLKS, DMA_CSR);
+ }
+ break;
+
+ case dvmarev3:
+ val = dma_read32(DMA_CSR);
+ val &= ~DMA_3CLKS;
+ val |= DMA_2CLKS;
+ if (can_do_burst32) {
+ val &= ~DMA_BRST_SZ;
+ val |= DMA_BRST32;
+ }
+ dma_write32(val, DMA_CSR);
+ break;
+
+ case dvmaesc1:
+ val = dma_read32(DMA_CSR);
+ val |= DMA_ADD_ENABLE;
+ val &= ~DMA_BCNT_ENAB;
+ if (!can_do_burst32 && can_do_burst16) {
+ val |= DMA_ESC_BURST;
+ } else {
+ val &= ~(DMA_ESC_BURST);
+ }
+ dma_write32(val, DMA_CSR);
+ break;
+
+ default:
+ break;
+ }
+
+ /* Enable interrupts. */
+ val = dma_read32(DMA_CSR);
+ dma_write32(val | DMA_INT_ENAB, DMA_CSR);
+}
+
+static void sbus_esp_dma_drain(struct esp *esp)
+{
+ u32 csr;
+ int lim;
+
+ if (esp->dmarev == dvmahme)
+ return;
+
+ csr = dma_read32(DMA_CSR);
+ if (!(csr & DMA_FIFO_ISDRAIN))
+ return;
+
+ if (esp->dmarev != dvmarev3 && esp->dmarev != dvmaesc1)
+ dma_write32(csr | DMA_FIFO_STDRAIN, DMA_CSR);
+
+ lim = 1000;
+ while (dma_read32(DMA_CSR) & DMA_FIFO_ISDRAIN) {
+ if (--lim == 0) {
+ printk(KERN_ALERT PFX "esp%d: DMA will not drain!\n",
+ esp->host->unique_id);
+ break;
+ }
+ udelay(1);
+ }
+}
+
+static void sbus_esp_dma_invalidate(struct esp *esp)
+{
+ if (esp->dmarev == dvmahme) {
+ dma_write32(DMA_RST_SCSI, DMA_CSR);
+
+ esp->prev_hme_dmacsr = ((esp->prev_hme_dmacsr |
+ (DMA_PARITY_OFF | DMA_2CLKS |
+ DMA_SCSI_DISAB | DMA_INT_ENAB)) &
+ ~(DMA_ST_WRITE | DMA_ENABLE));
+
+ dma_write32(0, DMA_CSR);
+ dma_write32(esp->prev_hme_dmacsr, DMA_CSR);
+
+ /* This is necessary to avoid having the SCSI channel
+ * engine lock up on us.
+ */
+ dma_write32(0, DMA_ADDR);
+ } else {
+ u32 val;
+ int lim;
+
+ lim = 1000;
+ while ((val = dma_read32(DMA_CSR)) & DMA_PEND_READ) {
+ if (--lim == 0) {
+ printk(KERN_ALERT PFX "esp%d: DMA will not "
+ "invalidate!\n", esp->host->unique_id);
+ break;
+ }
+ udelay(1);
+ }
+
+ val &= ~(DMA_ENABLE | DMA_ST_WRITE | DMA_BCNT_ENAB);
+ val |= DMA_FIFO_INV;
+ dma_write32(val, DMA_CSR);
+ val &= ~DMA_FIFO_INV;
+ dma_write32(val, DMA_CSR);
+ }
+}
+
+static void sbus_esp_send_dma_cmd(struct esp *esp, u32 addr, u32 esp_count,
+ u32 dma_count, int write, u8 cmd)
+{
+ u32 csr;
+
+ BUG_ON(!(cmd & ESP_CMD_DMA));
+
+ sbus_esp_write8(esp, (esp_count >> 0) & 0xff, ESP_TCLOW);
+ sbus_esp_write8(esp, (esp_count >> 8) & 0xff, ESP_TCMED);
+ if (esp->rev == FASHME) {
+ sbus_esp_write8(esp, (esp_count >> 16) & 0xff, FAS_RLO);
+ sbus_esp_write8(esp, 0, FAS_RHI);
+
+ scsi_esp_cmd(esp, cmd);
+
+ csr = esp->prev_hme_dmacsr;
+ csr |= DMA_SCSI_DISAB | DMA_ENABLE;
+ if (write)
+ csr |= DMA_ST_WRITE;
+ else
+ csr &= ~DMA_ST_WRITE;
+ esp->prev_hme_dmacsr = csr;
+
+ dma_write32(dma_count, DMA_COUNT);
+ dma_write32(addr, DMA_ADDR);
+ dma_write32(csr, DMA_CSR);
+ } else {
+ csr = dma_read32(DMA_CSR);
+ csr |= DMA_ENABLE;
+ if (write)
+ csr |= DMA_ST_WRITE;
+ else
+ csr &= ~DMA_ST_WRITE;
+ dma_write32(csr, DMA_CSR);
+ if (esp->dmarev == dvmaesc1) {
+ u32 end = PAGE_ALIGN(addr + dma_count + 16U);
+ dma_write32(end - addr, DMA_COUNT);
+ }
+ dma_write32(addr, DMA_ADDR);
+
+ scsi_esp_cmd(esp, cmd);
+ }
+
+}
+
+static int sbus_esp_dma_error(struct esp *esp)
+{
+ u32 csr = dma_read32(DMA_CSR);
+
+ if (csr & DMA_HNDL_ERROR)
+ return 1;
+
+ return 0;
+}
+
+static const struct esp_driver_ops sbus_esp_ops = {
+ .esp_write8 = sbus_esp_write8,
+ .esp_read8 = sbus_esp_read8,
+ .map_single = sbus_esp_map_single,
+ .map_sg = sbus_esp_map_sg,
+ .unmap_single = sbus_esp_unmap_single,
+ .unmap_sg = sbus_esp_unmap_sg,
+ .irq_pending = sbus_esp_irq_pending,
+ .reset_dma = sbus_esp_reset_dma,
+ .dma_drain = sbus_esp_dma_drain,
+ .dma_invalidate = sbus_esp_dma_invalidate,
+ .send_dma_cmd = sbus_esp_send_dma_cmd,
+ .dma_error = sbus_esp_dma_error,
+};
+
+static int esp_sbus_probe_one(struct platform_device *op,
+ struct platform_device *espdma, int hme)
+{
+ struct scsi_host_template *tpnt = &scsi_esp_template;
+ struct Scsi_Host *host;
+ struct esp *esp;
+ int err;
+
+ host = scsi_host_alloc(tpnt, sizeof(struct esp));
+
+ err = -ENOMEM;
+ if (!host)
+ goto fail;
+
+ host->max_id = (hme ? 16 : 8);
+ esp = shost_priv(host);
+
+ esp->host = host;
+ esp->dev = op;
+ esp->ops = &sbus_esp_ops;
+
+ if (hme)
+ esp->flags |= ESP_FLAG_WIDE_CAPABLE;
+
+ err = esp_sbus_setup_dma(esp, espdma);
+ if (err < 0)
+ goto fail_unlink;
+
+ err = esp_sbus_map_regs(esp, hme);
+ if (err < 0)
+ goto fail_unlink;
+
+ err = esp_sbus_map_command_block(esp);
+ if (err < 0)
+ goto fail_unmap_regs;
+
+ err = esp_sbus_register_irq(esp);
+ if (err < 0)
+ goto fail_unmap_command_block;
+
+ esp_sbus_get_props(esp, espdma);
+
+ /* Before we try to touch the ESP chip, ESC1 dma can
+ * come up with the reset bit set, so make sure that
+ * is clear first.
+ */
+ if (esp->dmarev == dvmaesc1) {
+ u32 val = dma_read32(DMA_CSR);
+
+ dma_write32(val & ~DMA_RST_SCSI, DMA_CSR);
+ }
+
+ dev_set_drvdata(&op->dev, esp);
+
+ err = scsi_esp_register(esp, &op->dev);
+ if (err)
+ goto fail_free_irq;
+
+ return 0;
+
+fail_free_irq:
+ free_irq(host->irq, esp);
+fail_unmap_command_block:
+ dma_free_coherent(&op->dev, 16,
+ esp->command_block,
+ esp->command_block_dma);
+fail_unmap_regs:
+ of_iounmap(&op->resource[(hme ? 1 : 0)], esp->regs, SBUS_ESP_REG_SIZE);
+fail_unlink:
+ scsi_host_put(host);
+fail:
+ return err;
+}
+
+static int esp_sbus_probe(struct platform_device *op)
+{
+ struct device_node *dma_node = NULL;
+ struct device_node *dp = op->dev.of_node;
+ struct platform_device *dma_of = NULL;
+ int hme = 0;
+
+ if (dp->parent &&
+ (!strcmp(dp->parent->name, "espdma") ||
+ !strcmp(dp->parent->name, "dma")))
+ dma_node = dp->parent;
+ else if (!strcmp(dp->name, "SUNW,fas")) {
+ dma_node = op->dev.of_node;
+ hme = 1;
+ }
+ if (dma_node)
+ dma_of = of_find_device_by_node(dma_node);
+ if (!dma_of)
+ return -ENODEV;
+
+ return esp_sbus_probe_one(op, dma_of, hme);
+}
+
+static int esp_sbus_remove(struct platform_device *op)
+{
+ struct esp *esp = dev_get_drvdata(&op->dev);
+ struct platform_device *dma_of = esp->dma;
+ unsigned int irq = esp->host->irq;
+ bool is_hme;
+ u32 val;
+
+ scsi_esp_unregister(esp);
+
+ /* Disable interrupts. */
+ val = dma_read32(DMA_CSR);
+ dma_write32(val & ~DMA_INT_ENAB, DMA_CSR);
+
+ free_irq(irq, esp);
+
+ is_hme = (esp->dmarev == dvmahme);
+
+ dma_free_coherent(&op->dev, 16,
+ esp->command_block,
+ esp->command_block_dma);
+ of_iounmap(&op->resource[(is_hme ? 1 : 0)], esp->regs,
+ SBUS_ESP_REG_SIZE);
+ of_iounmap(&dma_of->resource[0], esp->dma_regs,
+ resource_size(&dma_of->resource[0]));
+
+ scsi_host_put(esp->host);
+
+ dev_set_drvdata(&op->dev, NULL);
+
+ return 0;
+}
+
+static const struct of_device_id esp_match[] = {
+ {
+ .name = "SUNW,esp",
+ },
+ {
+ .name = "SUNW,fas",
+ },
+ {
+ .name = "esp",
+ },
+ {},
+};
+MODULE_DEVICE_TABLE(of, esp_match);
+
+static struct platform_driver esp_sbus_driver = {
+ .driver = {
+ .name = "esp",
+ .of_match_table = esp_match,
+ },
+ .probe = esp_sbus_probe,
+ .remove = esp_sbus_remove,
+};
+
+static int __init sunesp_init(void)
+{
+ return platform_driver_register(&esp_sbus_driver);
+}
+
+static void __exit sunesp_exit(void)
+{
+ platform_driver_unregister(&esp_sbus_driver);
+}
+
+MODULE_DESCRIPTION("Sun ESP SCSI driver");
+MODULE_AUTHOR("David S. Miller (davem@davemloft.net)");
+MODULE_LICENSE("GPL");
+MODULE_VERSION(DRV_VERSION);
+
+module_init(sunesp_init);
+module_exit(sunesp_exit);
diff --git a/drivers/scsi/sym53c416.c b/drivers/scsi/sym53c416.c
new file mode 100644
index 000000000..0b7819f3e
--- /dev/null
+++ b/drivers/scsi/sym53c416.c
@@ -0,0 +1,845 @@
+/*
+ * sym53c416.c
+ * Low-level SCSI driver for sym53c416 chip.
+ * Copyright (C) 1998 Lieven Willems (lw_linux@hotmail.com)
+ *
+ * Changes :
+ *
+ * Marcelo Tosatti <marcelo@conectiva.com.br> : Added io_request_lock locking
+ * Alan Cox <alan@lxorguk.ukuu.org.uk> : Cleaned up code formatting
+ * Fixed an irq locking bug
+ * Added ISAPnP support
+ * Bjoern A. Zeeb <bzeeb@zabbadoz.net> : Initial irq locking updates
+ * Added another card with ISAPnP support
+ *
+ * LILO command line usage: sym53c416=<PORTBASE>[,<IRQ>]
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2, or (at your option) any
+ * later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/types.h>
+#include <linux/init.h>
+#include <linux/string.h>
+#include <linux/ioport.h>
+#include <linux/interrupt.h>
+#include <linux/delay.h>
+#include <linux/proc_fs.h>
+#include <linux/spinlock.h>
+#include <asm/dma.h>
+#include <asm/io.h>
+#include <linux/blkdev.h>
+#include <linux/isapnp.h>
+#include "scsi.h"
+#include <scsi/scsi_host.h>
+#include "sym53c416.h"
+
+#define VERSION_STRING "Version 1.0.0-ac"
+
+#define TC_LOW 0x00 /* Transfer counter low */
+#define TC_MID 0x01 /* Transfer counter mid */
+#define SCSI_FIFO 0x02 /* SCSI FIFO register */
+#define COMMAND_REG 0x03 /* Command Register */
+#define STATUS_REG 0x04 /* Status Register (READ) */
+#define DEST_BUS_ID 0x04 /* Destination Bus ID (WRITE) */
+#define INT_REG 0x05 /* Interrupt Register (READ) */
+#define TOM 0x05 /* Time out multiplier (WRITE) */
+#define STP 0x06 /* Synchronous Transfer period */
+#define SYNC_OFFSET 0x07 /* Synchronous Offset */
+#define CONF_REG_1 0x08 /* Configuration register 1 */
+#define CONF_REG_2 0x0B /* Configuration register 2 */
+#define CONF_REG_3 0x0C /* Configuration register 3 */
+#define CONF_REG_4 0x0D /* Configuration register 4 */
+#define TC_HIGH 0x0E /* Transfer counter high */
+#define PIO_FIFO_1 0x10 /* PIO FIFO register 1 */
+#define PIO_FIFO_2 0x11 /* PIO FIFO register 2 */
+#define PIO_FIFO_3 0x12 /* PIO FIFO register 3 */
+#define PIO_FIFO_4 0x13 /* PIO FIFO register 4 */
+#define PIO_FIFO_CNT 0x14 /* PIO FIFO count */
+#define PIO_INT_REG 0x15 /* PIO interrupt register */
+#define CONF_REG_5 0x16 /* Configuration register 5 */
+#define FEATURE_EN 0x1D /* Feature Enable register */
+
+/* Configuration register 1 entries: */
+/* Bits 2-0: SCSI ID of host adapter */
+#define SCM 0x80 /* Slow Cable Mode */
+#define SRID 0x40 /* SCSI Reset Interrupt Disable */
+#define PTM 0x20 /* Parity Test Mode */
+#define EPC 0x10 /* Enable Parity Checking */
+#define CTME 0x08 /* Special Test Mode */
+
+/* Configuration register 2 entries: */
+#define FE 0x40 /* Features Enable */
+#define SCSI2 0x08 /* SCSI 2 Enable */
+#define TBPA 0x04 /* Target Bad Parity Abort */
+
+/* Configuration register 3 entries: */
+#define IDMRC 0x80 /* ID Message Reserved Check */
+#define QTE 0x40 /* Queue Tag Enable */
+#define CDB10 0x20 /* Command Descriptor Block 10 */
+#define FSCSI 0x10 /* FastSCSI */
+#define FCLK 0x08 /* FastClock */
+
+/* Configuration register 4 entries: */
+#define RBS 0x08 /* Register bank select */
+#define EAN 0x04 /* Enable Active Negotiation */
+
+/* Configuration register 5 entries: */
+#define LPSR 0x80 /* Lower Power SCSI Reset */
+#define IE 0x20 /* Interrupt Enable */
+#define LPM 0x02 /* Low Power Mode */
+#define WSE0 0x01 /* 0WS Enable */
+
+/* Interrupt register entries: */
+#define SRST 0x80 /* SCSI Reset */
+#define ILCMD 0x40 /* Illegal Command */
+#define DIS 0x20 /* Disconnect */
+#define BS 0x10 /* Bus Service */
+#define FC 0x08 /* Function Complete */
+#define RESEL 0x04 /* Reselected */
+#define SI 0x03 /* Selection Interrupt */
+
+/* Status Register Entries: */
+#define SCI 0x80 /* SCSI Core Int */
+#define GE 0x40 /* Gross Error */
+#define PE 0x20 /* Parity Error */
+#define TC 0x10 /* Terminal Count */
+#define VGC 0x08 /* Valid Group Code */
+#define PHBITS 0x07 /* Phase bits */
+
+/* PIO Interrupt Register Entries: */
+#define SCI 0x80 /* SCSI Core Int */
+#define PFI 0x40 /* PIO FIFO Interrupt */
+#define FULL 0x20 /* PIO FIFO Full */
+#define EMPTY 0x10 /* PIO FIFO Empty */
+#define CE 0x08 /* Collision Error */
+#define OUE 0x04 /* Overflow / Underflow error */
+#define FIE 0x02 /* Full Interrupt Enable */
+#define EIE 0x01 /* Empty Interrupt Enable */
+
+/* SYM53C416 SCSI phases (lower 3 bits of SYM53C416_STATUS_REG) */
+#define PHASE_DATA_OUT 0x00
+#define PHASE_DATA_IN 0x01
+#define PHASE_COMMAND 0x02
+#define PHASE_STATUS 0x03
+#define PHASE_RESERVED_1 0x04
+#define PHASE_RESERVED_2 0x05
+#define PHASE_MESSAGE_OUT 0x06
+#define PHASE_MESSAGE_IN 0x07
+
+/* SYM53C416 core commands */
+#define NOOP 0x00
+#define FLUSH_FIFO 0x01
+#define RESET_CHIP 0x02
+#define RESET_SCSI_BUS 0x03
+#define DISABLE_SEL_RESEL 0x45
+#define RESEL_SEQ 0x40
+#define SEL_WITHOUT_ATN_SEQ 0x41
+#define SEL_WITH_ATN_SEQ 0x42
+#define SEL_WITH_ATN_AND_STOP_SEQ 0x43
+#define ENABLE_SEL_RESEL 0x44
+#define SEL_WITH_ATN3_SEQ 0x46
+#define RESEL3_SEQ 0x47
+#define SND_MSG 0x20
+#define SND_STAT 0x21
+#define SND_DATA 0x22
+#define DISCONNECT_SEQ 0x23
+#define TERMINATE_SEQ 0x24
+#define TARGET_COMM_COMPLETE_SEQ 0x25
+#define DISCONN 0x27
+#define RECV_MSG_SEQ 0x28
+#define RECV_CMD 0x29
+#define RECV_DATA 0x2A
+#define RECV_CMD_SEQ 0x2B
+#define TARGET_ABORT_PIO 0x04
+#define TRANSFER_INFORMATION 0x10
+#define INIT_COMM_COMPLETE_SEQ 0x11
+#define MSG_ACCEPTED 0x12
+#define TRANSFER_PAD 0x18
+#define SET_ATN 0x1A
+#define RESET_ATN 0x1B
+#define ILLEGAL 0xFF
+
+#define PIO_MODE 0x80
+
+#define IO_RANGE 0x20 /* 0x00 - 0x1F */
+#define ID "sym53c416" /* Attention: copied to the sym53c416.h */
+#define PIO_SIZE 128 /* Size of PIO fifo is 128 bytes */
+
+#define READ_TIMEOUT 150
+#define WRITE_TIMEOUT 150
+
+#ifdef MODULE
+
+#define sym53c416_base sym53c416
+#define sym53c416_base_1 sym53c416_1
+#define sym53c416_base_2 sym53c416_2
+#define sym53c416_base_3 sym53c416_3
+
+static unsigned int sym53c416_base[2];
+static unsigned int sym53c416_base_1[2];
+static unsigned int sym53c416_base_2[2];
+static unsigned int sym53c416_base_3[2];
+
+#endif
+
+#define MAXHOSTS 4
+
+#define SG_ADDRESS(buffer) ((char *) sg_virt((buffer)))
+
+enum phases
+{
+ idle,
+ data_out,
+ data_in,
+ command_ph,
+ status_ph,
+ message_out,
+ message_in
+};
+
+typedef struct
+{
+ int base;
+ int irq;
+ int scsi_id;
+} host;
+
+static host hosts[MAXHOSTS] = {
+ {0, 0, SYM53C416_SCSI_ID},
+ {0, 0, SYM53C416_SCSI_ID},
+ {0, 0, SYM53C416_SCSI_ID},
+ {0, 0, SYM53C416_SCSI_ID}
+ };
+
+static int host_index = 0;
+static char info[120];
+static Scsi_Cmnd *current_command = NULL;
+static int fastpio = 1;
+
+static int probeaddrs[] = {0x200, 0x220, 0x240, 0};
+
+static void sym53c416_set_transfer_counter(int base, unsigned int len)
+{
+ /* Program Transfer Counter */
+ outb(len & 0x0000FF, base + TC_LOW);
+ outb((len & 0x00FF00) >> 8, base + TC_MID);
+ outb((len & 0xFF0000) >> 16, base + TC_HIGH);
+}
+
+static DEFINE_SPINLOCK(sym53c416_lock);
+
+/* Returns the number of bytes read */
+static __inline__ unsigned int sym53c416_read(int base, unsigned char *buffer, unsigned int len)
+{
+ unsigned int orig_len = len;
+ unsigned long flags = 0;
+ unsigned int bytes_left;
+ unsigned long i;
+ int timeout = READ_TIMEOUT;
+
+ /* Do transfer */
+ spin_lock_irqsave(&sym53c416_lock, flags);
+ while(len && timeout)
+ {
+ bytes_left = inb(base + PIO_FIFO_CNT); /* Number of bytes in the PIO FIFO */
+ if(fastpio && bytes_left > 3)
+ {
+ insl(base + PIO_FIFO_1, buffer, bytes_left >> 2);
+ buffer += bytes_left & 0xFC;
+ len -= bytes_left & 0xFC;
+ }
+ else if(bytes_left > 0)
+ {
+ len -= bytes_left;
+ for(; bytes_left > 0; bytes_left--)
+ *(buffer++) = inb(base + PIO_FIFO_1);
+ }
+ else
+ {
+ i = jiffies + timeout;
+ spin_unlock_irqrestore(&sym53c416_lock, flags);
+ while(time_before(jiffies, i) && (inb(base + PIO_INT_REG) & EMPTY) && timeout)
+ if(inb(base + PIO_INT_REG) & SCI)
+ timeout = 0;
+ spin_lock_irqsave(&sym53c416_lock, flags);
+ if(inb(base + PIO_INT_REG) & EMPTY)
+ timeout = 0;
+ }
+ }
+ spin_unlock_irqrestore(&sym53c416_lock, flags);
+ return orig_len - len;
+}
+
+/* Returns the number of bytes written */
+static __inline__ unsigned int sym53c416_write(int base, unsigned char *buffer, unsigned int len)
+{
+ unsigned int orig_len = len;
+ unsigned long flags = 0;
+ unsigned int bufferfree;
+ unsigned long i;
+ unsigned int timeout = WRITE_TIMEOUT;
+
+ /* Do transfer */
+ spin_lock_irqsave(&sym53c416_lock, flags);
+ while(len && timeout)
+ {
+ bufferfree = PIO_SIZE - inb(base + PIO_FIFO_CNT);
+ if(bufferfree > len)
+ bufferfree = len;
+ if(fastpio && bufferfree > 3)
+ {
+ outsl(base + PIO_FIFO_1, buffer, bufferfree >> 2);
+ buffer += bufferfree & 0xFC;
+ len -= bufferfree & 0xFC;
+ }
+ else if(bufferfree > 0)
+ {
+ len -= bufferfree;
+ for(; bufferfree > 0; bufferfree--)
+ outb(*(buffer++), base + PIO_FIFO_1);
+ }
+ else
+ {
+ i = jiffies + timeout;
+ spin_unlock_irqrestore(&sym53c416_lock, flags);
+ while(time_before(jiffies, i) && (inb(base + PIO_INT_REG) & FULL) && timeout)
+ ;
+ spin_lock_irqsave(&sym53c416_lock, flags);
+ if(inb(base + PIO_INT_REG) & FULL)
+ timeout = 0;
+ }
+ }
+ spin_unlock_irqrestore(&sym53c416_lock, flags);
+ return orig_len - len;
+}
+
+static irqreturn_t sym53c416_intr_handle(int irq, void *dev_id)
+{
+ struct Scsi_Host *dev = dev_id;
+ int base = dev->io_port;
+ int i;
+ unsigned long flags = 0;
+ unsigned char status_reg, pio_int_reg, int_reg;
+ struct scatterlist *sg;
+ unsigned int tot_trans = 0;
+
+ spin_lock_irqsave(dev->host_lock,flags);
+ status_reg = inb(base + STATUS_REG);
+ pio_int_reg = inb(base + PIO_INT_REG);
+ int_reg = inb(base + INT_REG);
+ spin_unlock_irqrestore(dev->host_lock, flags);
+
+ /* First, we handle error conditions */
+ if(int_reg & SCI) /* SCSI Reset */
+ {
+ printk(KERN_DEBUG "sym53c416: Reset received\n");
+ current_command->SCp.phase = idle;
+ current_command->result = DID_RESET << 16;
+ spin_lock_irqsave(dev->host_lock, flags);
+ current_command->scsi_done(current_command);
+ spin_unlock_irqrestore(dev->host_lock, flags);
+ goto out;
+ }
+ if(int_reg & ILCMD) /* Illegal Command */
+ {
+ printk(KERN_WARNING "sym53c416: Illegal Command: 0x%02x.\n", inb(base + COMMAND_REG));
+ current_command->SCp.phase = idle;
+ current_command->result = DID_ERROR << 16;
+ spin_lock_irqsave(dev->host_lock, flags);
+ current_command->scsi_done(current_command);
+ spin_unlock_irqrestore(dev->host_lock, flags);
+ goto out;
+ }
+ if(status_reg & GE) /* Gross Error */
+ {
+ printk(KERN_WARNING "sym53c416: Controller reports gross error.\n");
+ current_command->SCp.phase = idle;
+ current_command->result = DID_ERROR << 16;
+ spin_lock_irqsave(dev->host_lock, flags);
+ current_command->scsi_done(current_command);
+ spin_unlock_irqrestore(dev->host_lock, flags);
+ goto out;
+ }
+ if(status_reg & PE) /* Parity Error */
+ {
+ printk(KERN_WARNING "sym53c416:SCSI parity error.\n");
+ current_command->SCp.phase = idle;
+ current_command->result = DID_PARITY << 16;
+ spin_lock_irqsave(dev->host_lock, flags);
+ current_command->scsi_done(current_command);
+ spin_unlock_irqrestore(dev->host_lock, flags);
+ goto out;
+ }
+ if(pio_int_reg & (CE | OUE))
+ {
+ printk(KERN_WARNING "sym53c416: PIO interrupt error.\n");
+ current_command->SCp.phase = idle;
+ current_command->result = DID_ERROR << 16;
+ spin_lock_irqsave(dev->host_lock, flags);
+ current_command->scsi_done(current_command);
+ spin_unlock_irqrestore(dev->host_lock, flags);
+ goto out;
+ }
+ if(int_reg & DIS) /* Disconnect */
+ {
+ if(current_command->SCp.phase != message_in)
+ current_command->result = DID_NO_CONNECT << 16;
+ else
+ current_command->result = (current_command->SCp.Status & 0xFF) | ((current_command->SCp.Message & 0xFF) << 8) | (DID_OK << 16);
+ current_command->SCp.phase = idle;
+ spin_lock_irqsave(dev->host_lock, flags);
+ current_command->scsi_done(current_command);
+ spin_unlock_irqrestore(dev->host_lock, flags);
+ goto out;
+ }
+ /* Now we handle SCSI phases */
+
+ switch(status_reg & PHBITS) /* Filter SCSI phase out of status reg */
+ {
+ case PHASE_DATA_OUT:
+ {
+ if(int_reg & BS)
+ {
+ current_command->SCp.phase = data_out;
+ outb(FLUSH_FIFO, base + COMMAND_REG);
+ sym53c416_set_transfer_counter(base,
+ scsi_bufflen(current_command));
+ outb(TRANSFER_INFORMATION | PIO_MODE, base + COMMAND_REG);
+
+ scsi_for_each_sg(current_command,
+ sg, scsi_sg_count(current_command), i) {
+ tot_trans += sym53c416_write(base,
+ SG_ADDRESS(sg),
+ sg->length);
+ }
+ if(tot_trans < current_command->underflow)
+ printk(KERN_WARNING "sym53c416: Underflow, wrote %d bytes, request for %d bytes.\n", tot_trans, current_command->underflow);
+ }
+ break;
+ }
+
+ case PHASE_DATA_IN:
+ {
+ if(int_reg & BS)
+ {
+ current_command->SCp.phase = data_in;
+ outb(FLUSH_FIFO, base + COMMAND_REG);
+ sym53c416_set_transfer_counter(base,
+ scsi_bufflen(current_command));
+
+ outb(TRANSFER_INFORMATION | PIO_MODE, base + COMMAND_REG);
+
+ scsi_for_each_sg(current_command,
+ sg, scsi_sg_count(current_command), i) {
+ tot_trans += sym53c416_read(base,
+ SG_ADDRESS(sg),
+ sg->length);
+ }
+ if(tot_trans < current_command->underflow)
+ printk(KERN_WARNING "sym53c416: Underflow, read %d bytes, request for %d bytes.\n", tot_trans, current_command->underflow);
+ }
+ break;
+ }
+
+ case PHASE_COMMAND:
+ {
+ current_command->SCp.phase = command_ph;
+ printk(KERN_ERR "sym53c416: Unknown interrupt in command phase.\n");
+ break;
+ }
+
+ case PHASE_STATUS:
+ {
+ current_command->SCp.phase = status_ph;
+ outb(FLUSH_FIFO, base + COMMAND_REG);
+ outb(INIT_COMM_COMPLETE_SEQ, base + COMMAND_REG);
+ break;
+ }
+
+ case PHASE_RESERVED_1:
+ case PHASE_RESERVED_2:
+ {
+ printk(KERN_ERR "sym53c416: Reserved phase occurred.\n");
+ break;
+ }
+
+ case PHASE_MESSAGE_OUT:
+ {
+ current_command->SCp.phase = message_out;
+ outb(SET_ATN, base + COMMAND_REG);
+ outb(MSG_ACCEPTED, base + COMMAND_REG);
+ break;
+ }
+
+ case PHASE_MESSAGE_IN:
+ {
+ current_command->SCp.phase = message_in;
+ current_command->SCp.Status = inb(base + SCSI_FIFO);
+ current_command->SCp.Message = inb(base + SCSI_FIFO);
+ if(current_command->SCp.Message == SAVE_POINTERS || current_command->SCp.Message == DISCONNECT)
+ outb(SET_ATN, base + COMMAND_REG);
+ outb(MSG_ACCEPTED, base + COMMAND_REG);
+ break;
+ }
+ }
+out:
+ return IRQ_HANDLED;
+}
+
+static void sym53c416_init(int base, int scsi_id)
+{
+ outb(RESET_CHIP, base + COMMAND_REG);
+ outb(NOOP, base + COMMAND_REG);
+ outb(0x99, base + TOM); /* Time out of 250 ms */
+ outb(0x05, base + STP);
+ outb(0x00, base + SYNC_OFFSET);
+ outb(EPC | scsi_id, base + CONF_REG_1);
+ outb(FE | SCSI2 | TBPA, base + CONF_REG_2);
+ outb(IDMRC | QTE | CDB10 | FSCSI | FCLK, base + CONF_REG_3);
+ outb(0x83 | EAN, base + CONF_REG_4);
+ outb(IE | WSE0, base + CONF_REG_5);
+ outb(0, base + FEATURE_EN);
+}
+
+static int sym53c416_probeirq(int base, int scsi_id)
+{
+ int irq, irqs;
+ unsigned long i;
+
+ /* Clear interrupt register */
+ inb(base + INT_REG);
+ /* Start probing for irq's */
+ irqs = probe_irq_on();
+ /* Reinit chip */
+ sym53c416_init(base, scsi_id);
+ /* Cause interrupt */
+ outb(NOOP, base + COMMAND_REG);
+ outb(ILLEGAL, base + COMMAND_REG);
+ outb(0x07, base + DEST_BUS_ID);
+ outb(0x00, base + DEST_BUS_ID);
+ /* Wait for interrupt to occur */
+ i = jiffies + 20;
+ while(time_before(jiffies, i) && !(inb(base + STATUS_REG) & SCI))
+ barrier();
+ if(time_before_eq(i, jiffies)) /* timed out */
+ return 0;
+ /* Get occurred irq */
+ irq = probe_irq_off(irqs);
+ sym53c416_init(base, scsi_id);
+ return irq;
+}
+
+/* Setup: sym53c416=base,irq */
+void sym53c416_setup(char *str, int *ints)
+{
+ int i;
+
+ if(host_index >= MAXHOSTS)
+ {
+ printk(KERN_WARNING "sym53c416: Too many hosts defined\n");
+ return;
+ }
+ if(ints[0] < 1 || ints[0] > 2)
+ {
+ printk(KERN_ERR "sym53c416: Wrong number of parameters:\n");
+ printk(KERN_ERR "sym53c416: usage: sym53c416=<base>[,<irq>]\n");
+ return;
+ }
+ for(i = 0; i < host_index && i >= 0; i++)
+ if(hosts[i].base == ints[1])
+ i = -2;
+ if(i >= 0)
+ {
+ hosts[host_index].base = ints[1];
+ hosts[host_index].irq = (ints[0] == 2)? ints[2] : 0;
+ host_index++;
+ }
+}
+
+static int sym53c416_test(int base)
+{
+ outb(RESET_CHIP, base + COMMAND_REG);
+ outb(NOOP, base + COMMAND_REG);
+ if(inb(base + COMMAND_REG) != NOOP)
+ return 0;
+ if(!inb(base + TC_HIGH) || inb(base + TC_HIGH) == 0xFF)
+ return 0;
+ if((inb(base + PIO_INT_REG) & (FULL | EMPTY | CE | OUE | FIE | EIE)) != EMPTY)
+ return 0;
+ return 1;
+}
+
+
+static struct isapnp_device_id id_table[] = {
+ { ISAPNP_ANY_ID, ISAPNP_ANY_ID,
+ ISAPNP_VENDOR('S','L','I'), ISAPNP_FUNCTION(0x4161), 0 },
+ { ISAPNP_ANY_ID, ISAPNP_ANY_ID,
+ ISAPNP_VENDOR('S','L','I'), ISAPNP_FUNCTION(0x4163), 0 },
+ { ISAPNP_DEVICE_SINGLE_END }
+};
+
+MODULE_DEVICE_TABLE(isapnp, id_table);
+
+static void sym53c416_probe(void)
+{
+ int *base = probeaddrs;
+ int ints[2];
+
+ ints[0] = 1;
+ for(; *base; base++) {
+ if (request_region(*base, IO_RANGE, ID)) {
+ if (sym53c416_test(*base)) {
+ ints[1] = *base;
+ sym53c416_setup(NULL, ints);
+ }
+ release_region(*base, IO_RANGE);
+ }
+ }
+}
+
+int __init sym53c416_detect(struct scsi_host_template *tpnt)
+{
+ unsigned long flags;
+ struct Scsi_Host * shpnt = NULL;
+ int i;
+ int count;
+ struct pnp_dev *idev = NULL;
+
+#ifdef MODULE
+ int ints[3];
+
+ ints[0] = 2;
+ if(sym53c416_base[0])
+ {
+ ints[1] = sym53c416_base[0];
+ ints[2] = sym53c416_base[1];
+ sym53c416_setup(NULL, ints);
+ }
+ if(sym53c416_base_1[0])
+ {
+ ints[1] = sym53c416_base_1[0];
+ ints[2] = sym53c416_base_1[1];
+ sym53c416_setup(NULL, ints);
+ }
+ if(sym53c416_base_2[0])
+ {
+ ints[1] = sym53c416_base_2[0];
+ ints[2] = sym53c416_base_2[1];
+ sym53c416_setup(NULL, ints);
+ }
+ if(sym53c416_base_3[0])
+ {
+ ints[1] = sym53c416_base_3[0];
+ ints[2] = sym53c416_base_3[1];
+ sym53c416_setup(NULL, ints);
+ }
+#endif
+ printk(KERN_INFO "sym53c416.c: %s\n", VERSION_STRING);
+
+ for (i=0; id_table[i].vendor != 0; i++) {
+ while((idev=pnp_find_dev(NULL, id_table[i].vendor,
+ id_table[i].function, idev))!=NULL)
+ {
+ int i[3];
+
+ if(pnp_device_attach(idev)<0)
+ {
+ printk(KERN_WARNING "sym53c416: unable to attach PnP device.\n");
+ continue;
+ }
+ if(pnp_activate_dev(idev) < 0)
+ {
+ printk(KERN_WARNING "sym53c416: unable to activate PnP device.\n");
+ pnp_device_detach(idev);
+ continue;
+
+ }
+
+ i[0] = 2;
+ i[1] = pnp_port_start(idev, 0);
+ i[2] = pnp_irq(idev, 0);
+
+ printk(KERN_INFO "sym53c416: ISAPnP card found and configured at 0x%X, IRQ %d.\n",
+ i[1], i[2]);
+ sym53c416_setup(NULL, i);
+ }
+ }
+ sym53c416_probe();
+
+ /* Now we register and set up each host adapter found... */
+ for(count = 0, i = 0; i < host_index; i++) {
+ if (!request_region(hosts[i].base, IO_RANGE, ID))
+ continue;
+ if (!sym53c416_test(hosts[i].base)) {
+ printk(KERN_WARNING "No sym53c416 found at address 0x%03x\n", hosts[i].base);
+ goto fail_release_region;
+ }
+
+ /* We don't have an irq yet, so we should probe for one */
+ if (!hosts[i].irq)
+ hosts[i].irq = sym53c416_probeirq(hosts[i].base, hosts[i].scsi_id);
+ if (!hosts[i].irq)
+ goto fail_release_region;
+
+ shpnt = scsi_register(tpnt, 0);
+ if (!shpnt)
+ goto fail_release_region;
+ /* Request for specified IRQ */
+ if (request_irq(hosts[i].irq, sym53c416_intr_handle, 0, ID, shpnt))
+ goto fail_free_host;
+
+ spin_lock_irqsave(&sym53c416_lock, flags);
+ shpnt->unique_id = hosts[i].base;
+ shpnt->io_port = hosts[i].base;
+ shpnt->n_io_port = IO_RANGE;
+ shpnt->irq = hosts[i].irq;
+ shpnt->this_id = hosts[i].scsi_id;
+ sym53c416_init(hosts[i].base, hosts[i].scsi_id);
+ count++;
+ spin_unlock_irqrestore(&sym53c416_lock, flags);
+ continue;
+
+ fail_free_host:
+ scsi_unregister(shpnt);
+ fail_release_region:
+ release_region(hosts[i].base, IO_RANGE);
+ }
+ return count;
+}
+
+const char *sym53c416_info(struct Scsi_Host *SChost)
+{
+ int i;
+ int base = SChost->io_port;
+ int irq = SChost->irq;
+ int scsi_id = 0;
+ int rev = inb(base + TC_HIGH);
+
+ for(i = 0; i < host_index; i++)
+ if(hosts[i].base == base)
+ scsi_id = hosts[i].scsi_id;
+ sprintf(info, "Symbios Logic 53c416 (rev. %d) at 0x%03x, irq %d, SCSI-ID %d, %s pio", rev, base, irq, scsi_id, (fastpio)? "fast" : "slow");
+ return info;
+}
+
+static int sym53c416_queuecommand_lck(Scsi_Cmnd *SCpnt, void (*done)(Scsi_Cmnd *))
+{
+ int base;
+ unsigned long flags = 0;
+ int i;
+
+ /* Store base register as we can have more than one controller in the system */
+ base = SCpnt->device->host->io_port;
+ current_command = SCpnt; /* set current command */
+ current_command->scsi_done = done; /* set ptr to done function */
+ current_command->SCp.phase = command_ph; /* currect phase is the command phase */
+ current_command->SCp.Status = 0;
+ current_command->SCp.Message = 0;
+
+ spin_lock_irqsave(&sym53c416_lock, flags);
+ outb(scmd_id(SCpnt), base + DEST_BUS_ID); /* Set scsi id target */
+ outb(FLUSH_FIFO, base + COMMAND_REG); /* Flush SCSI and PIO FIFO's */
+ /* Write SCSI command into the SCSI fifo */
+ for(i = 0; i < SCpnt->cmd_len; i++)
+ outb(SCpnt->cmnd[i], base + SCSI_FIFO);
+ /* Start selection sequence */
+ outb(SEL_WITHOUT_ATN_SEQ, base + COMMAND_REG);
+ /* Now an interrupt will be generated which we will catch in out interrupt routine */
+ spin_unlock_irqrestore(&sym53c416_lock, flags);
+ return 0;
+}
+
+DEF_SCSI_QCMD(sym53c416_queuecommand)
+
+static int sym53c416_host_reset(Scsi_Cmnd *SCpnt)
+{
+ int base;
+ int scsi_id = -1;
+ int i;
+ unsigned long flags;
+
+ spin_lock_irqsave(&sym53c416_lock, flags);
+
+ /* printk("sym53c416_reset\n"); */
+ base = SCpnt->device->host->io_port;
+ /* search scsi_id - fixme, we shouldn't need to iterate for this! */
+ for(i = 0; i < host_index && scsi_id == -1; i++)
+ if(hosts[i].base == base)
+ scsi_id = hosts[i].scsi_id;
+ outb(RESET_CHIP, base + COMMAND_REG);
+ outb(NOOP | PIO_MODE, base + COMMAND_REG);
+ outb(RESET_SCSI_BUS, base + COMMAND_REG);
+ sym53c416_init(base, scsi_id);
+
+ spin_unlock_irqrestore(&sym53c416_lock, flags);
+ return SUCCESS;
+}
+
+static int sym53c416_release(struct Scsi_Host *shost)
+{
+ if (shost->irq)
+ free_irq(shost->irq, shost);
+ if (shost->io_port && shost->n_io_port)
+ release_region(shost->io_port, shost->n_io_port);
+ return 0;
+}
+
+static int sym53c416_bios_param(struct scsi_device *sdev,
+ struct block_device *dev,
+ sector_t capacity, int *ip)
+{
+ int size;
+
+ size = capacity;
+ ip[0] = 64; /* heads */
+ ip[1] = 32; /* sectors */
+ if((ip[2] = size >> 11) > 1024) /* cylinders, test for big disk */
+ {
+ ip[0] = 255; /* heads */
+ ip[1] = 63; /* sectors */
+ ip[2] = size / (255 * 63); /* cylinders */
+ }
+ return 0;
+}
+
+/* Loadable module support */
+#ifdef MODULE
+
+MODULE_AUTHOR("Lieven Willems");
+MODULE_LICENSE("GPL");
+
+module_param_array(sym53c416, uint, NULL, 0);
+module_param_array(sym53c416_1, uint, NULL, 0);
+module_param_array(sym53c416_2, uint, NULL, 0);
+module_param_array(sym53c416_3, uint, NULL, 0);
+
+#endif
+
+static struct scsi_host_template driver_template = {
+ .proc_name = "sym53c416",
+ .name = "Symbios Logic 53c416",
+ .detect = sym53c416_detect,
+ .info = sym53c416_info,
+ .queuecommand = sym53c416_queuecommand,
+ .eh_host_reset_handler =sym53c416_host_reset,
+ .release = sym53c416_release,
+ .bios_param = sym53c416_bios_param,
+ .can_queue = 1,
+ .this_id = SYM53C416_SCSI_ID,
+ .sg_tablesize = 32,
+ .cmd_per_lun = 1,
+ .unchecked_isa_dma = 1,
+ .use_clustering = ENABLE_CLUSTERING,
+};
+#include "scsi_module.c"
diff --git a/drivers/scsi/sym53c416.h b/drivers/scsi/sym53c416.h
new file mode 100644
index 000000000..387de5d80
--- /dev/null
+++ b/drivers/scsi/sym53c416.h
@@ -0,0 +1,33 @@
+/*
+ * sym53c416.h
+ *
+ * Copyright (C) 1998 Lieven Willems (lw_linux@hotmail.com)
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2, or (at your option) any
+ * later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ */
+
+#ifndef _SYM53C416_H
+#define _SYM53C416_H
+
+#include <linux/types.h>
+
+#define SYM53C416_SCSI_ID 7
+
+static int sym53c416_detect(struct scsi_host_template *);
+static const char *sym53c416_info(struct Scsi_Host *);
+static int sym53c416_release(struct Scsi_Host *);
+static int sym53c416_queuecommand(struct Scsi_Host *, struct scsi_cmnd *);
+static int sym53c416_host_reset(Scsi_Cmnd *);
+static int sym53c416_bios_param(struct scsi_device *, struct block_device *,
+ sector_t, int *);
+static void sym53c416_setup(char *str, int *ints);
+#endif
diff --git a/drivers/scsi/sym53c8xx_2/Makefile b/drivers/scsi/sym53c8xx_2/Makefile
new file mode 100644
index 000000000..873e8ced8
--- /dev/null
+++ b/drivers/scsi/sym53c8xx_2/Makefile
@@ -0,0 +1,4 @@
+# Makefile for the NCR/SYMBIOS/LSI 53C8XX PCI SCSI controllers driver.
+
+sym53c8xx-objs := sym_fw.o sym_glue.o sym_hipd.o sym_malloc.o sym_nvram.o
+obj-$(CONFIG_SCSI_SYM53C8XX_2) := sym53c8xx.o
diff --git a/drivers/scsi/sym53c8xx_2/sym53c8xx.h b/drivers/scsi/sym53c8xx_2/sym53c8xx.h
new file mode 100644
index 000000000..62d29cfac
--- /dev/null
+++ b/drivers/scsi/sym53c8xx_2/sym53c8xx.h
@@ -0,0 +1,215 @@
+/*
+ * Device driver for the SYMBIOS/LSILOGIC 53C8XX and 53C1010 family
+ * of PCI-SCSI IO processors.
+ *
+ * Copyright (C) 1999-2001 Gerard Roudier <groudier@free.fr>
+ *
+ * This driver is derived from the Linux sym53c8xx driver.
+ * Copyright (C) 1998-2000 Gerard Roudier
+ *
+ * The sym53c8xx driver is derived from the ncr53c8xx driver that had been
+ * a port of the FreeBSD ncr driver to Linux-1.2.13.
+ *
+ * The original ncr driver has been written for 386bsd and FreeBSD by
+ * Wolfgang Stanglmeier <wolf@cologne.de>
+ * Stefan Esser <se@mi.Uni-Koeln.de>
+ * Copyright (C) 1994 Wolfgang Stanglmeier
+ *
+ * Other major contributions:
+ *
+ * NVRAM detection and reading.
+ * Copyright (C) 1997 Richard Waltham <dormouse@farsrobt.demon.co.uk>
+ *
+ *-----------------------------------------------------------------------------
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+
+#ifndef SYM53C8XX_H
+#define SYM53C8XX_H
+
+
+/*
+ * DMA addressing mode.
+ *
+ * 0 : 32 bit addressing for all chips.
+ * 1 : 40 bit addressing when supported by chip.
+ * 2 : 64 bit addressing when supported by chip,
+ * limited to 16 segments of 4 GB -> 64 GB max.
+ */
+#define SYM_CONF_DMA_ADDRESSING_MODE CONFIG_SCSI_SYM53C8XX_DMA_ADDRESSING_MODE
+
+/*
+ * NVRAM support.
+ */
+#if 1
+#define SYM_CONF_NVRAM_SUPPORT (1)
+#endif
+
+/*
+ * These options are not tunable from 'make config'
+ */
+#if 1
+#define SYM_LINUX_PROC_INFO_SUPPORT
+#define SYM_LINUX_USER_COMMAND_SUPPORT
+#define SYM_LINUX_USER_INFO_SUPPORT
+#define SYM_LINUX_DEBUG_CONTROL_SUPPORT
+#endif
+
+/*
+ * Also handle old NCR chips if not (0).
+ */
+#define SYM_CONF_GENERIC_SUPPORT (1)
+
+/*
+ * Allow tags from 2 to 256, default 8
+ */
+#ifndef CONFIG_SCSI_SYM53C8XX_MAX_TAGS
+#define CONFIG_SCSI_SYM53C8XX_MAX_TAGS (8)
+#endif
+
+#if CONFIG_SCSI_SYM53C8XX_MAX_TAGS < 2
+#define SYM_CONF_MAX_TAG (2)
+#elif CONFIG_SCSI_SYM53C8XX_MAX_TAGS > 256
+#define SYM_CONF_MAX_TAG (256)
+#else
+#define SYM_CONF_MAX_TAG CONFIG_SCSI_SYM53C8XX_MAX_TAGS
+#endif
+
+#ifndef CONFIG_SCSI_SYM53C8XX_DEFAULT_TAGS
+#define CONFIG_SCSI_SYM53C8XX_DEFAULT_TAGS SYM_CONF_MAX_TAG
+#endif
+
+/*
+ * Anyway, we configure the driver for at least 64 tags per LUN. :)
+ */
+#if SYM_CONF_MAX_TAG <= 64
+#define SYM_CONF_MAX_TAG_ORDER (6)
+#elif SYM_CONF_MAX_TAG <= 128
+#define SYM_CONF_MAX_TAG_ORDER (7)
+#else
+#define SYM_CONF_MAX_TAG_ORDER (8)
+#endif
+
+/*
+ * Max number of SG entries.
+ */
+#define SYM_CONF_MAX_SG (96)
+
+/*
+ * Driver setup structure.
+ *
+ * This structure is initialized from linux config options.
+ * It can be overridden at boot-up by the boot command line.
+ */
+struct sym_driver_setup {
+ u_short max_tag;
+ u_char burst_order;
+ u_char scsi_led;
+ u_char scsi_diff;
+ u_char irq_mode;
+ u_char scsi_bus_check;
+ u_char host_id;
+
+ u_char verbose;
+ u_char settle_delay;
+ u_char use_nvram;
+ u_long excludes[8];
+};
+
+#define SYM_SETUP_MAX_TAG sym_driver_setup.max_tag
+#define SYM_SETUP_BURST_ORDER sym_driver_setup.burst_order
+#define SYM_SETUP_SCSI_LED sym_driver_setup.scsi_led
+#define SYM_SETUP_SCSI_DIFF sym_driver_setup.scsi_diff
+#define SYM_SETUP_IRQ_MODE sym_driver_setup.irq_mode
+#define SYM_SETUP_SCSI_BUS_CHECK sym_driver_setup.scsi_bus_check
+#define SYM_SETUP_HOST_ID sym_driver_setup.host_id
+#define boot_verbose sym_driver_setup.verbose
+
+/*
+ * Initial setup.
+ *
+ * Can be overriden at startup by a command line.
+ */
+#define SYM_LINUX_DRIVER_SETUP { \
+ .max_tag = CONFIG_SCSI_SYM53C8XX_DEFAULT_TAGS, \
+ .burst_order = 7, \
+ .scsi_led = 1, \
+ .scsi_diff = 1, \
+ .irq_mode = 0, \
+ .scsi_bus_check = 1, \
+ .host_id = 7, \
+ .verbose = 0, \
+ .settle_delay = 3, \
+ .use_nvram = 1, \
+}
+
+extern struct sym_driver_setup sym_driver_setup;
+extern unsigned int sym_debug_flags;
+#define DEBUG_FLAGS sym_debug_flags
+
+/*
+ * Max number of targets.
+ * Maximum is 16 and you are advised not to change this value.
+ */
+#ifndef SYM_CONF_MAX_TARGET
+#define SYM_CONF_MAX_TARGET (16)
+#endif
+
+/*
+ * Max number of logical units.
+ * SPI-2 allows up to 64 logical units, but in real life, target
+ * that implements more that 7 logical units are pretty rare.
+ * Anyway, the cost of accepting up to 64 logical unit is low in
+ * this driver, thus going with the maximum is acceptable.
+ */
+#ifndef SYM_CONF_MAX_LUN
+#define SYM_CONF_MAX_LUN (64)
+#endif
+
+/*
+ * Max number of IO control blocks queued to the controller.
+ * Each entry needs 8 bytes and the queues are allocated contiguously.
+ * Since we donnot want to allocate more than a page, the theorical
+ * maximum is PAGE_SIZE/8. For safety, we announce a bit less to the
+ * access method. :)
+ * When not supplied, as it is suggested, the driver compute some
+ * good value for this parameter.
+ */
+/* #define SYM_CONF_MAX_START (PAGE_SIZE/8 - 16) */
+
+/*
+ * Support for Immediate Arbitration.
+ * Not advised.
+ */
+/* #define SYM_CONF_IARB_SUPPORT */
+
+/*
+ * Only relevant if IARB support configured.
+ * - Max number of successive settings of IARB hints.
+ * - Set IARB on arbitration lost.
+ */
+#define SYM_CONF_IARB_MAX 3
+#define SYM_CONF_SET_IARB_ON_ARB_LOST 1
+
+/*
+ * Returning wrong residuals may make problems.
+ * When zero, this define tells the driver to
+ * always return 0 as transfer residual.
+ * Btw, all my testings of residuals have succeeded.
+ */
+#define SYM_SETUP_RESIDUAL_SUPPORT 1
+
+#endif /* SYM53C8XX_H */
diff --git a/drivers/scsi/sym53c8xx_2/sym_defs.h b/drivers/scsi/sym53c8xx_2/sym_defs.h
new file mode 100644
index 000000000..defccc477
--- /dev/null
+++ b/drivers/scsi/sym53c8xx_2/sym_defs.h
@@ -0,0 +1,792 @@
+/*
+ * Device driver for the SYMBIOS/LSILOGIC 53C8XX and 53C1010 family
+ * of PCI-SCSI IO processors.
+ *
+ * Copyright (C) 1999-2001 Gerard Roudier <groudier@free.fr>
+ *
+ * This driver is derived from the Linux sym53c8xx driver.
+ * Copyright (C) 1998-2000 Gerard Roudier
+ *
+ * The sym53c8xx driver is derived from the ncr53c8xx driver that had been
+ * a port of the FreeBSD ncr driver to Linux-1.2.13.
+ *
+ * The original ncr driver has been written for 386bsd and FreeBSD by
+ * Wolfgang Stanglmeier <wolf@cologne.de>
+ * Stefan Esser <se@mi.Uni-Koeln.de>
+ * Copyright (C) 1994 Wolfgang Stanglmeier
+ *
+ * Other major contributions:
+ *
+ * NVRAM detection and reading.
+ * Copyright (C) 1997 Richard Waltham <dormouse@farsrobt.demon.co.uk>
+ *
+ *-----------------------------------------------------------------------------
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+
+#ifndef SYM_DEFS_H
+#define SYM_DEFS_H
+
+#define SYM_VERSION "2.2.3"
+#define SYM_DRIVER_NAME "sym-" SYM_VERSION
+
+/*
+ * SYM53C8XX device features descriptor.
+ */
+struct sym_chip {
+ u_short device_id;
+ u_short revision_id;
+ char *name;
+ u_char burst_max; /* log-base-2 of max burst */
+ u_char offset_max;
+ u_char nr_divisor;
+ u_char lp_probe_bit;
+ u_int features;
+#define FE_LED0 (1<<0)
+#define FE_WIDE (1<<1) /* Wide data transfers */
+#define FE_ULTRA (1<<2) /* Ultra speed 20Mtrans/sec */
+#define FE_ULTRA2 (1<<3) /* Ultra 2 - 40 Mtrans/sec */
+#define FE_DBLR (1<<4) /* Clock doubler present */
+#define FE_QUAD (1<<5) /* Clock quadrupler present */
+#define FE_ERL (1<<6) /* Enable read line */
+#define FE_CLSE (1<<7) /* Cache line size enable */
+#define FE_WRIE (1<<8) /* Write & Invalidate enable */
+#define FE_ERMP (1<<9) /* Enable read multiple */
+#define FE_BOF (1<<10) /* Burst opcode fetch */
+#define FE_DFS (1<<11) /* DMA fifo size */
+#define FE_PFEN (1<<12) /* Prefetch enable */
+#define FE_LDSTR (1<<13) /* Load/Store supported */
+#define FE_RAM (1<<14) /* On chip RAM present */
+#define FE_VARCLK (1<<15) /* Clock frequency may vary */
+#define FE_RAM8K (1<<16) /* On chip RAM sized 8Kb */
+#define FE_64BIT (1<<17) /* 64-bit PCI BUS interface */
+#define FE_IO256 (1<<18) /* Requires full 256 bytes in PCI space */
+#define FE_NOPM (1<<19) /* Scripts handles phase mismatch */
+#define FE_LEDC (1<<20) /* Hardware control of LED */
+#define FE_ULTRA3 (1<<21) /* Ultra 3 - 80 Mtrans/sec DT */
+#define FE_66MHZ (1<<22) /* 66MHz PCI support */
+#define FE_CRC (1<<23) /* CRC support */
+#define FE_DIFF (1<<24) /* SCSI HVD support */
+#define FE_DFBC (1<<25) /* Have DFBC register */
+#define FE_LCKFRQ (1<<26) /* Have LCKFRQ */
+#define FE_C10 (1<<27) /* Various C10 core (mis)features */
+#define FE_U3EN (1<<28) /* U3EN bit usable */
+#define FE_DAC (1<<29) /* Support PCI DAC (64 bit addressing) */
+#define FE_ISTAT1 (1<<30) /* Have ISTAT1, MBOX0, MBOX1 registers */
+
+#define FE_CACHE_SET (FE_ERL|FE_CLSE|FE_WRIE|FE_ERMP)
+#define FE_CACHE0_SET (FE_CACHE_SET & ~FE_ERL)
+};
+
+/*
+ * SYM53C8XX IO register data structure.
+ */
+struct sym_reg {
+/*00*/ u8 nc_scntl0; /* full arb., ena parity, par->ATN */
+
+/*01*/ u8 nc_scntl1; /* no reset */
+ #define ISCON 0x10 /* connected to scsi */
+ #define CRST 0x08 /* force reset */
+ #define IARB 0x02 /* immediate arbitration */
+
+/*02*/ u8 nc_scntl2; /* no disconnect expected */
+ #define SDU 0x80 /* cmd: disconnect will raise error */
+ #define CHM 0x40 /* sta: chained mode */
+ #define WSS 0x08 /* sta: wide scsi send [W]*/
+ #define WSR 0x01 /* sta: wide scsi received [W]*/
+
+/*03*/ u8 nc_scntl3; /* cnf system clock dependent */
+ #define EWS 0x08 /* cmd: enable wide scsi [W]*/
+ #define ULTRA 0x80 /* cmd: ULTRA enable */
+ /* bits 0-2, 7 rsvd for C1010 */
+
+/*04*/ u8 nc_scid; /* cnf host adapter scsi address */
+ #define RRE 0x40 /* r/w:e enable response to resel. */
+ #define SRE 0x20 /* r/w:e enable response to select */
+
+/*05*/ u8 nc_sxfer; /* ### Sync speed and count */
+ /* bits 6-7 rsvd for C1010 */
+
+/*06*/ u8 nc_sdid; /* ### Destination-ID */
+
+/*07*/ u8 nc_gpreg; /* ??? IO-Pins */
+
+/*08*/ u8 nc_sfbr; /* ### First byte received */
+
+/*09*/ u8 nc_socl;
+ #define CREQ 0x80 /* r/w: SCSI-REQ */
+ #define CACK 0x40 /* r/w: SCSI-ACK */
+ #define CBSY 0x20 /* r/w: SCSI-BSY */
+ #define CSEL 0x10 /* r/w: SCSI-SEL */
+ #define CATN 0x08 /* r/w: SCSI-ATN */
+ #define CMSG 0x04 /* r/w: SCSI-MSG */
+ #define CC_D 0x02 /* r/w: SCSI-C_D */
+ #define CI_O 0x01 /* r/w: SCSI-I_O */
+
+/*0a*/ u8 nc_ssid;
+
+/*0b*/ u8 nc_sbcl;
+
+/*0c*/ u8 nc_dstat;
+ #define DFE 0x80 /* sta: dma fifo empty */
+ #define MDPE 0x40 /* int: master data parity error */
+ #define BF 0x20 /* int: script: bus fault */
+ #define ABRT 0x10 /* int: script: command aborted */
+ #define SSI 0x08 /* int: script: single step */
+ #define SIR 0x04 /* int: script: interrupt instruct. */
+ #define IID 0x01 /* int: script: illegal instruct. */
+
+/*0d*/ u8 nc_sstat0;
+ #define ILF 0x80 /* sta: data in SIDL register lsb */
+ #define ORF 0x40 /* sta: data in SODR register lsb */
+ #define OLF 0x20 /* sta: data in SODL register lsb */
+ #define AIP 0x10 /* sta: arbitration in progress */
+ #define LOA 0x08 /* sta: arbitration lost */
+ #define WOA 0x04 /* sta: arbitration won */
+ #define IRST 0x02 /* sta: scsi reset signal */
+ #define SDP 0x01 /* sta: scsi parity signal */
+
+/*0e*/ u8 nc_sstat1;
+ #define FF3210 0xf0 /* sta: bytes in the scsi fifo */
+
+/*0f*/ u8 nc_sstat2;
+ #define ILF1 0x80 /* sta: data in SIDL register msb[W]*/
+ #define ORF1 0x40 /* sta: data in SODR register msb[W]*/
+ #define OLF1 0x20 /* sta: data in SODL register msb[W]*/
+ #define DM 0x04 /* sta: DIFFSENS mismatch (895/6 only) */
+ #define LDSC 0x02 /* sta: disconnect & reconnect */
+
+/*10*/ u8 nc_dsa; /* --> Base page */
+/*11*/ u8 nc_dsa1;
+/*12*/ u8 nc_dsa2;
+/*13*/ u8 nc_dsa3;
+
+/*14*/ u8 nc_istat; /* --> Main Command and status */
+ #define CABRT 0x80 /* cmd: abort current operation */
+ #define SRST 0x40 /* mod: reset chip */
+ #define SIGP 0x20 /* r/w: message from host to script */
+ #define SEM 0x10 /* r/w: message between host + script */
+ #define CON 0x08 /* sta: connected to scsi */
+ #define INTF 0x04 /* sta: int on the fly (reset by wr)*/
+ #define SIP 0x02 /* sta: scsi-interrupt */
+ #define DIP 0x01 /* sta: host/script interrupt */
+
+/*15*/ u8 nc_istat1; /* 896 only */
+ #define FLSH 0x04 /* sta: chip is flushing */
+ #define SCRUN 0x02 /* sta: scripts are running */
+ #define SIRQD 0x01 /* r/w: disable INT pin */
+
+/*16*/ u8 nc_mbox0; /* 896 only */
+/*17*/ u8 nc_mbox1; /* 896 only */
+
+/*18*/ u8 nc_ctest0;
+/*19*/ u8 nc_ctest1;
+
+/*1a*/ u8 nc_ctest2;
+ #define CSIGP 0x40
+ /* bits 0-2,7 rsvd for C1010 */
+
+/*1b*/ u8 nc_ctest3;
+ #define FLF 0x08 /* cmd: flush dma fifo */
+ #define CLF 0x04 /* cmd: clear dma fifo */
+ #define FM 0x02 /* mod: fetch pin mode */
+ #define WRIE 0x01 /* mod: write and invalidate enable */
+ /* bits 4-7 rsvd for C1010 */
+
+/*1c*/ u32 nc_temp; /* ### Temporary stack */
+
+/*20*/ u8 nc_dfifo;
+/*21*/ u8 nc_ctest4;
+ #define BDIS 0x80 /* mod: burst disable */
+ #define MPEE 0x08 /* mod: master parity error enable */
+
+/*22*/ u8 nc_ctest5;
+ #define DFS 0x20 /* mod: dma fifo size */
+ /* bits 0-1, 3-7 rsvd for C1010 */
+
+/*23*/ u8 nc_ctest6;
+
+/*24*/ u32 nc_dbc; /* ### Byte count and command */
+/*28*/ u32 nc_dnad; /* ### Next command register */
+/*2c*/ u32 nc_dsp; /* --> Script Pointer */
+/*30*/ u32 nc_dsps; /* --> Script pointer save/opcode#2 */
+
+/*34*/ u8 nc_scratcha; /* Temporary register a */
+/*35*/ u8 nc_scratcha1;
+/*36*/ u8 nc_scratcha2;
+/*37*/ u8 nc_scratcha3;
+
+/*38*/ u8 nc_dmode;
+ #define BL_2 0x80 /* mod: burst length shift value +2 */
+ #define BL_1 0x40 /* mod: burst length shift value +1 */
+ #define ERL 0x08 /* mod: enable read line */
+ #define ERMP 0x04 /* mod: enable read multiple */
+ #define BOF 0x02 /* mod: burst op code fetch */
+
+/*39*/ u8 nc_dien;
+/*3a*/ u8 nc_sbr;
+
+/*3b*/ u8 nc_dcntl; /* --> Script execution control */
+ #define CLSE 0x80 /* mod: cache line size enable */
+ #define PFF 0x40 /* cmd: pre-fetch flush */
+ #define PFEN 0x20 /* mod: pre-fetch enable */
+ #define SSM 0x10 /* mod: single step mode */
+ #define IRQM 0x08 /* mod: irq mode (1 = totem pole !) */
+ #define STD 0x04 /* cmd: start dma mode */
+ #define IRQD 0x02 /* mod: irq disable */
+ #define NOCOM 0x01 /* cmd: protect sfbr while reselect */
+ /* bits 0-1 rsvd for C1010 */
+
+/*3c*/ u32 nc_adder;
+
+/*40*/ u16 nc_sien; /* -->: interrupt enable */
+/*42*/ u16 nc_sist; /* <--: interrupt status */
+ #define SBMC 0x1000/* sta: SCSI Bus Mode Change (895/6 only) */
+ #define STO 0x0400/* sta: timeout (select) */
+ #define GEN 0x0200/* sta: timeout (general) */
+ #define HTH 0x0100/* sta: timeout (handshake) */
+ #define MA 0x80 /* sta: phase mismatch */
+ #define CMP 0x40 /* sta: arbitration complete */
+ #define SEL 0x20 /* sta: selected by another device */
+ #define RSL 0x10 /* sta: reselected by another device*/
+ #define SGE 0x08 /* sta: gross error (over/underflow)*/
+ #define UDC 0x04 /* sta: unexpected disconnect */
+ #define RST 0x02 /* sta: scsi bus reset detected */
+ #define PAR 0x01 /* sta: scsi parity error */
+
+/*44*/ u8 nc_slpar;
+/*45*/ u8 nc_swide;
+/*46*/ u8 nc_macntl;
+/*47*/ u8 nc_gpcntl;
+/*48*/ u8 nc_stime0; /* cmd: timeout for select&handshake*/
+/*49*/ u8 nc_stime1; /* cmd: timeout user defined */
+/*4a*/ u16 nc_respid; /* sta: Reselect-IDs */
+
+/*4c*/ u8 nc_stest0;
+
+/*4d*/ u8 nc_stest1;
+ #define SCLK 0x80 /* Use the PCI clock as SCSI clock */
+ #define DBLEN 0x08 /* clock doubler running */
+ #define DBLSEL 0x04 /* clock doubler selected */
+
+
+/*4e*/ u8 nc_stest2;
+ #define ROF 0x40 /* reset scsi offset (after gross error!) */
+ #define EXT 0x02 /* extended filtering */
+
+/*4f*/ u8 nc_stest3;
+ #define TE 0x80 /* c: tolerAnt enable */
+ #define HSC 0x20 /* c: Halt SCSI Clock */
+ #define CSF 0x02 /* c: clear scsi fifo */
+
+/*50*/ u16 nc_sidl; /* Lowlevel: latched from scsi data */
+/*52*/ u8 nc_stest4;
+ #define SMODE 0xc0 /* SCSI bus mode (895/6 only) */
+ #define SMODE_HVD 0x40 /* High Voltage Differential */
+ #define SMODE_SE 0x80 /* Single Ended */
+ #define SMODE_LVD 0xc0 /* Low Voltage Differential */
+ #define LCKFRQ 0x20 /* Frequency Lock (895/6 only) */
+ /* bits 0-5 rsvd for C1010 */
+
+/*53*/ u8 nc_53_;
+/*54*/ u16 nc_sodl; /* Lowlevel: data out to scsi data */
+/*56*/ u8 nc_ccntl0; /* Chip Control 0 (896) */
+ #define ENPMJ 0x80 /* Enable Phase Mismatch Jump */
+ #define PMJCTL 0x40 /* Phase Mismatch Jump Control */
+ #define ENNDJ 0x20 /* Enable Non Data PM Jump */
+ #define DISFC 0x10 /* Disable Auto FIFO Clear */
+ #define DILS 0x02 /* Disable Internal Load/Store */
+ #define DPR 0x01 /* Disable Pipe Req */
+
+/*57*/ u8 nc_ccntl1; /* Chip Control 1 (896) */
+ #define ZMOD 0x80 /* High Impedance Mode */
+ #define DDAC 0x08 /* Disable Dual Address Cycle */
+ #define XTIMOD 0x04 /* 64-bit Table Ind. Indexing Mode */
+ #define EXTIBMV 0x02 /* Enable 64-bit Table Ind. BMOV */
+ #define EXDBMV 0x01 /* Enable 64-bit Direct BMOV */
+
+/*58*/ u16 nc_sbdl; /* Lowlevel: data from scsi data */
+/*5a*/ u16 nc_5a_;
+
+/*5c*/ u8 nc_scr0; /* Working register B */
+/*5d*/ u8 nc_scr1;
+/*5e*/ u8 nc_scr2;
+/*5f*/ u8 nc_scr3;
+
+/*60*/ u8 nc_scrx[64]; /* Working register C-R */
+/*a0*/ u32 nc_mmrs; /* Memory Move Read Selector */
+/*a4*/ u32 nc_mmws; /* Memory Move Write Selector */
+/*a8*/ u32 nc_sfs; /* Script Fetch Selector */
+/*ac*/ u32 nc_drs; /* DSA Relative Selector */
+/*b0*/ u32 nc_sbms; /* Static Block Move Selector */
+/*b4*/ u32 nc_dbms; /* Dynamic Block Move Selector */
+/*b8*/ u32 nc_dnad64; /* DMA Next Address 64 */
+/*bc*/ u16 nc_scntl4; /* C1010 only */
+ #define U3EN 0x80 /* Enable Ultra 3 */
+ #define AIPCKEN 0x40 /* AIP checking enable */
+ /* Also enable AIP generation on C10-33*/
+ #define XCLKH_DT 0x08 /* Extra clock of data hold on DT edge */
+ #define XCLKH_ST 0x04 /* Extra clock of data hold on ST edge */
+ #define XCLKS_DT 0x02 /* Extra clock of data set on DT edge */
+ #define XCLKS_ST 0x01 /* Extra clock of data set on ST edge */
+/*be*/ u8 nc_aipcntl0; /* AIP Control 0 C1010 only */
+/*bf*/ u8 nc_aipcntl1; /* AIP Control 1 C1010 only */
+ #define DISAIP 0x08 /* Disable AIP generation C10-66 only */
+/*c0*/ u32 nc_pmjad1; /* Phase Mismatch Jump Address 1 */
+/*c4*/ u32 nc_pmjad2; /* Phase Mismatch Jump Address 2 */
+/*c8*/ u8 nc_rbc; /* Remaining Byte Count */
+/*c9*/ u8 nc_rbc1;
+/*ca*/ u8 nc_rbc2;
+/*cb*/ u8 nc_rbc3;
+
+/*cc*/ u8 nc_ua; /* Updated Address */
+/*cd*/ u8 nc_ua1;
+/*ce*/ u8 nc_ua2;
+/*cf*/ u8 nc_ua3;
+/*d0*/ u32 nc_esa; /* Entry Storage Address */
+/*d4*/ u8 nc_ia; /* Instruction Address */
+/*d5*/ u8 nc_ia1;
+/*d6*/ u8 nc_ia2;
+/*d7*/ u8 nc_ia3;
+/*d8*/ u32 nc_sbc; /* SCSI Byte Count (3 bytes only) */
+/*dc*/ u32 nc_csbc; /* Cumulative SCSI Byte Count */
+ /* Following for C1010 only */
+/*e0*/ u16 nc_crcpad; /* CRC Value */
+/*e2*/ u8 nc_crccntl0; /* CRC control register */
+ #define SNDCRC 0x10 /* Send CRC Request */
+/*e3*/ u8 nc_crccntl1; /* CRC control register */
+/*e4*/ u32 nc_crcdata; /* CRC data register */
+/*e8*/ u32 nc_e8_;
+/*ec*/ u32 nc_ec_;
+/*f0*/ u16 nc_dfbc; /* DMA FIFO byte count */
+};
+
+/*-----------------------------------------------------------
+ *
+ * Utility macros for the script.
+ *
+ *-----------------------------------------------------------
+ */
+
+#define REGJ(p,r) (offsetof(struct sym_reg, p ## r))
+#define REG(r) REGJ (nc_, r)
+
+/*-----------------------------------------------------------
+ *
+ * SCSI phases
+ *
+ *-----------------------------------------------------------
+ */
+
+#define SCR_DATA_OUT 0x00000000
+#define SCR_DATA_IN 0x01000000
+#define SCR_COMMAND 0x02000000
+#define SCR_STATUS 0x03000000
+#define SCR_DT_DATA_OUT 0x04000000
+#define SCR_DT_DATA_IN 0x05000000
+#define SCR_MSG_OUT 0x06000000
+#define SCR_MSG_IN 0x07000000
+/* DT phases are illegal for non Ultra3 mode */
+#define SCR_ILG_OUT 0x04000000
+#define SCR_ILG_IN 0x05000000
+
+/*-----------------------------------------------------------
+ *
+ * Data transfer via SCSI.
+ *
+ *-----------------------------------------------------------
+ *
+ * MOVE_ABS (LEN)
+ * <<start address>>
+ *
+ * MOVE_IND (LEN)
+ * <<dnad_offset>>
+ *
+ * MOVE_TBL
+ * <<dnad_offset>>
+ *
+ *-----------------------------------------------------------
+ */
+
+#define OPC_MOVE 0x08000000
+
+#define SCR_MOVE_ABS(l) ((0x00000000 | OPC_MOVE) | (l))
+/* #define SCR_MOVE_IND(l) ((0x20000000 | OPC_MOVE) | (l)) */
+#define SCR_MOVE_TBL (0x10000000 | OPC_MOVE)
+
+#define SCR_CHMOV_ABS(l) ((0x00000000) | (l))
+/* #define SCR_CHMOV_IND(l) ((0x20000000) | (l)) */
+#define SCR_CHMOV_TBL (0x10000000)
+
+#ifdef SYM_CONF_TARGET_ROLE_SUPPORT
+/* We steal the `indirect addressing' flag for target mode MOVE in scripts */
+
+#define OPC_TCHMOVE 0x08000000
+
+#define SCR_TCHMOVE_ABS(l) ((0x20000000 | OPC_TCHMOVE) | (l))
+#define SCR_TCHMOVE_TBL (0x30000000 | OPC_TCHMOVE)
+
+#define SCR_TMOV_ABS(l) ((0x20000000) | (l))
+#define SCR_TMOV_TBL (0x30000000)
+#endif
+
+struct sym_tblmove {
+ u32 size;
+ u32 addr;
+};
+
+/*-----------------------------------------------------------
+ *
+ * Selection
+ *
+ *-----------------------------------------------------------
+ *
+ * SEL_ABS | SCR_ID (0..15) [ | REL_JMP]
+ * <<alternate_address>>
+ *
+ * SEL_TBL | << dnad_offset>> [ | REL_JMP]
+ * <<alternate_address>>
+ *
+ *-----------------------------------------------------------
+ */
+
+#define SCR_SEL_ABS 0x40000000
+#define SCR_SEL_ABS_ATN 0x41000000
+#define SCR_SEL_TBL 0x42000000
+#define SCR_SEL_TBL_ATN 0x43000000
+
+#ifdef SYM_CONF_TARGET_ROLE_SUPPORT
+#define SCR_RESEL_ABS 0x40000000
+#define SCR_RESEL_ABS_ATN 0x41000000
+#define SCR_RESEL_TBL 0x42000000
+#define SCR_RESEL_TBL_ATN 0x43000000
+#endif
+
+struct sym_tblsel {
+ u_char sel_scntl4; /* C1010 only */
+ u_char sel_sxfer;
+ u_char sel_id;
+ u_char sel_scntl3;
+};
+
+#define SCR_JMP_REL 0x04000000
+#define SCR_ID(id) (((u32)(id)) << 16)
+
+/*-----------------------------------------------------------
+ *
+ * Waiting for Disconnect or Reselect
+ *
+ *-----------------------------------------------------------
+ *
+ * WAIT_DISC
+ * dummy: <<alternate_address>>
+ *
+ * WAIT_RESEL
+ * <<alternate_address>>
+ *
+ *-----------------------------------------------------------
+ */
+
+#define SCR_WAIT_DISC 0x48000000
+#define SCR_WAIT_RESEL 0x50000000
+
+#ifdef SYM_CONF_TARGET_ROLE_SUPPORT
+#define SCR_DISCONNECT 0x48000000
+#endif
+
+/*-----------------------------------------------------------
+ *
+ * Bit Set / Reset
+ *
+ *-----------------------------------------------------------
+ *
+ * SET (flags {|.. })
+ *
+ * CLR (flags {|.. })
+ *
+ *-----------------------------------------------------------
+ */
+
+#define SCR_SET(f) (0x58000000 | (f))
+#define SCR_CLR(f) (0x60000000 | (f))
+
+#define SCR_CARRY 0x00000400
+#define SCR_TRG 0x00000200
+#define SCR_ACK 0x00000040
+#define SCR_ATN 0x00000008
+
+
+/*-----------------------------------------------------------
+ *
+ * Memory to memory move
+ *
+ *-----------------------------------------------------------
+ *
+ * COPY (bytecount)
+ * << source_address >>
+ * << destination_address >>
+ *
+ * SCR_COPY sets the NO FLUSH option by default.
+ * SCR_COPY_F does not set this option.
+ *
+ * For chips which do not support this option,
+ * sym_fw_bind_script() will remove this bit.
+ *
+ *-----------------------------------------------------------
+ */
+
+#define SCR_NO_FLUSH 0x01000000
+
+#define SCR_COPY(n) (0xc0000000 | SCR_NO_FLUSH | (n))
+#define SCR_COPY_F(n) (0xc0000000 | (n))
+
+/*-----------------------------------------------------------
+ *
+ * Register move and binary operations
+ *
+ *-----------------------------------------------------------
+ *
+ * SFBR_REG (reg, op, data) reg = SFBR op data
+ * << 0 >>
+ *
+ * REG_SFBR (reg, op, data) SFBR = reg op data
+ * << 0 >>
+ *
+ * REG_REG (reg, op, data) reg = reg op data
+ * << 0 >>
+ *
+ *-----------------------------------------------------------
+ *
+ * On 825A, 875, 895 and 896 chips the content
+ * of SFBR register can be used as data (SCR_SFBR_DATA).
+ * The 896 has additionnal IO registers starting at
+ * offset 0x80. Bit 7 of register offset is stored in
+ * bit 7 of the SCRIPTS instruction first DWORD.
+ *
+ *-----------------------------------------------------------
+ */
+
+#define SCR_REG_OFS(ofs) ((((ofs) & 0x7f) << 16ul) + ((ofs) & 0x80))
+
+#define SCR_SFBR_REG(reg,op,data) \
+ (0x68000000 | (SCR_REG_OFS(REG(reg))) | (op) | (((data)&0xff)<<8ul))
+
+#define SCR_REG_SFBR(reg,op,data) \
+ (0x70000000 | (SCR_REG_OFS(REG(reg))) | (op) | (((data)&0xff)<<8ul))
+
+#define SCR_REG_REG(reg,op,data) \
+ (0x78000000 | (SCR_REG_OFS(REG(reg))) | (op) | (((data)&0xff)<<8ul))
+
+
+#define SCR_LOAD 0x00000000
+#define SCR_SHL 0x01000000
+#define SCR_OR 0x02000000
+#define SCR_XOR 0x03000000
+#define SCR_AND 0x04000000
+#define SCR_SHR 0x05000000
+#define SCR_ADD 0x06000000
+#define SCR_ADDC 0x07000000
+
+#define SCR_SFBR_DATA (0x00800000>>8ul) /* Use SFBR as data */
+
+/*-----------------------------------------------------------
+ *
+ * FROM_REG (reg) SFBR = reg
+ * << 0 >>
+ *
+ * TO_REG (reg) reg = SFBR
+ * << 0 >>
+ *
+ * LOAD_REG (reg, data) reg = <data>
+ * << 0 >>
+ *
+ * LOAD_SFBR(data) SFBR = <data>
+ * << 0 >>
+ *
+ *-----------------------------------------------------------
+ */
+
+#define SCR_FROM_REG(reg) \
+ SCR_REG_SFBR(reg,SCR_OR,0)
+
+#define SCR_TO_REG(reg) \
+ SCR_SFBR_REG(reg,SCR_OR,0)
+
+#define SCR_LOAD_REG(reg,data) \
+ SCR_REG_REG(reg,SCR_LOAD,data)
+
+#define SCR_LOAD_SFBR(data) \
+ (SCR_REG_SFBR (gpreg, SCR_LOAD, data))
+
+/*-----------------------------------------------------------
+ *
+ * LOAD from memory to register.
+ * STORE from register to memory.
+ *
+ * Only supported by 810A, 860, 825A, 875, 895 and 896.
+ *
+ *-----------------------------------------------------------
+ *
+ * LOAD_ABS (LEN)
+ * <<start address>>
+ *
+ * LOAD_REL (LEN) (DSA relative)
+ * <<dsa_offset>>
+ *
+ *-----------------------------------------------------------
+ */
+
+#define SCR_REG_OFS2(ofs) (((ofs) & 0xff) << 16ul)
+#define SCR_NO_FLUSH2 0x02000000
+#define SCR_DSA_REL2 0x10000000
+
+#define SCR_LOAD_R(reg, how, n) \
+ (0xe1000000 | how | (SCR_REG_OFS2(REG(reg))) | (n))
+
+#define SCR_STORE_R(reg, how, n) \
+ (0xe0000000 | how | (SCR_REG_OFS2(REG(reg))) | (n))
+
+#define SCR_LOAD_ABS(reg, n) SCR_LOAD_R(reg, SCR_NO_FLUSH2, n)
+#define SCR_LOAD_REL(reg, n) SCR_LOAD_R(reg, SCR_NO_FLUSH2|SCR_DSA_REL2, n)
+#define SCR_LOAD_ABS_F(reg, n) SCR_LOAD_R(reg, 0, n)
+#define SCR_LOAD_REL_F(reg, n) SCR_LOAD_R(reg, SCR_DSA_REL2, n)
+
+#define SCR_STORE_ABS(reg, n) SCR_STORE_R(reg, SCR_NO_FLUSH2, n)
+#define SCR_STORE_REL(reg, n) SCR_STORE_R(reg, SCR_NO_FLUSH2|SCR_DSA_REL2,n)
+#define SCR_STORE_ABS_F(reg, n) SCR_STORE_R(reg, 0, n)
+#define SCR_STORE_REL_F(reg, n) SCR_STORE_R(reg, SCR_DSA_REL2, n)
+
+
+/*-----------------------------------------------------------
+ *
+ * Waiting for Disconnect or Reselect
+ *
+ *-----------------------------------------------------------
+ *
+ * JUMP [ | IFTRUE/IFFALSE ( ... ) ]
+ * <<address>>
+ *
+ * JUMPR [ | IFTRUE/IFFALSE ( ... ) ]
+ * <<distance>>
+ *
+ * CALL [ | IFTRUE/IFFALSE ( ... ) ]
+ * <<address>>
+ *
+ * CALLR [ | IFTRUE/IFFALSE ( ... ) ]
+ * <<distance>>
+ *
+ * RETURN [ | IFTRUE/IFFALSE ( ... ) ]
+ * <<dummy>>
+ *
+ * INT [ | IFTRUE/IFFALSE ( ... ) ]
+ * <<ident>>
+ *
+ * INT_FLY [ | IFTRUE/IFFALSE ( ... ) ]
+ * <<ident>>
+ *
+ * Conditions:
+ * WHEN (phase)
+ * IF (phase)
+ * CARRYSET
+ * DATA (data, mask)
+ *
+ *-----------------------------------------------------------
+ */
+
+#define SCR_NO_OP 0x80000000
+#define SCR_JUMP 0x80080000
+#define SCR_JUMP64 0x80480000
+#define SCR_JUMPR 0x80880000
+#define SCR_CALL 0x88080000
+#define SCR_CALLR 0x88880000
+#define SCR_RETURN 0x90080000
+#define SCR_INT 0x98080000
+#define SCR_INT_FLY 0x98180000
+
+#define IFFALSE(arg) (0x00080000 | (arg))
+#define IFTRUE(arg) (0x00000000 | (arg))
+
+#define WHEN(phase) (0x00030000 | (phase))
+#define IF(phase) (0x00020000 | (phase))
+
+#define DATA(D) (0x00040000 | ((D) & 0xff))
+#define MASK(D,M) (0x00040000 | (((M ^ 0xff) & 0xff) << 8ul)|((D) & 0xff))
+
+#define CARRYSET (0x00200000)
+
+/*-----------------------------------------------------------
+ *
+ * SCSI constants.
+ *
+ *-----------------------------------------------------------
+ */
+
+/*
+ * Messages
+ */
+
+#define M_COMPLETE COMMAND_COMPLETE
+#define M_EXTENDED EXTENDED_MESSAGE
+#define M_SAVE_DP SAVE_POINTERS
+#define M_RESTORE_DP RESTORE_POINTERS
+#define M_DISCONNECT DISCONNECT
+#define M_ID_ERROR INITIATOR_ERROR
+#define M_ABORT ABORT_TASK_SET
+#define M_REJECT MESSAGE_REJECT
+#define M_NOOP NOP
+#define M_PARITY MSG_PARITY_ERROR
+#define M_LCOMPLETE LINKED_CMD_COMPLETE
+#define M_FCOMPLETE LINKED_FLG_CMD_COMPLETE
+#define M_RESET TARGET_RESET
+#define M_ABORT_TAG ABORT_TASK
+#define M_CLEAR_QUEUE CLEAR_TASK_SET
+#define M_INIT_REC INITIATE_RECOVERY
+#define M_REL_REC RELEASE_RECOVERY
+#define M_TERMINATE (0x11)
+#define M_SIMPLE_TAG SIMPLE_QUEUE_TAG
+#define M_HEAD_TAG HEAD_OF_QUEUE_TAG
+#define M_ORDERED_TAG ORDERED_QUEUE_TAG
+#define M_IGN_RESIDUE IGNORE_WIDE_RESIDUE
+
+#define M_X_MODIFY_DP EXTENDED_MODIFY_DATA_POINTER
+#define M_X_SYNC_REQ EXTENDED_SDTR
+#define M_X_WIDE_REQ EXTENDED_WDTR
+#define M_X_PPR_REQ EXTENDED_PPR
+
+/*
+ * PPR protocol options
+ */
+#define PPR_OPT_IU (0x01)
+#define PPR_OPT_DT (0x02)
+#define PPR_OPT_QAS (0x04)
+#define PPR_OPT_MASK (0x07)
+
+/*
+ * Status
+ */
+
+#define S_GOOD SAM_STAT_GOOD
+#define S_CHECK_COND SAM_STAT_CHECK_CONDITION
+#define S_COND_MET SAM_STAT_CONDITION_MET
+#define S_BUSY SAM_STAT_BUSY
+#define S_INT SAM_STAT_INTERMEDIATE
+#define S_INT_COND_MET SAM_STAT_INTERMEDIATE_CONDITION_MET
+#define S_CONFLICT SAM_STAT_RESERVATION_CONFLICT
+#define S_TERMINATED SAM_STAT_COMMAND_TERMINATED
+#define S_QUEUE_FULL SAM_STAT_TASK_SET_FULL
+#define S_ILLEGAL (0xff)
+
+#endif /* defined SYM_DEFS_H */
diff --git a/drivers/scsi/sym53c8xx_2/sym_fw.c b/drivers/scsi/sym53c8xx_2/sym_fw.c
new file mode 100644
index 000000000..190770bdc
--- /dev/null
+++ b/drivers/scsi/sym53c8xx_2/sym_fw.c
@@ -0,0 +1,554 @@
+/*
+ * Device driver for the SYMBIOS/LSILOGIC 53C8XX and 53C1010 family
+ * of PCI-SCSI IO processors.
+ *
+ * Copyright (C) 1999-2001 Gerard Roudier <groudier@free.fr>
+ *
+ * This driver is derived from the Linux sym53c8xx driver.
+ * Copyright (C) 1998-2000 Gerard Roudier
+ *
+ * The sym53c8xx driver is derived from the ncr53c8xx driver that had been
+ * a port of the FreeBSD ncr driver to Linux-1.2.13.
+ *
+ * The original ncr driver has been written for 386bsd and FreeBSD by
+ * Wolfgang Stanglmeier <wolf@cologne.de>
+ * Stefan Esser <se@mi.Uni-Koeln.de>
+ * Copyright (C) 1994 Wolfgang Stanglmeier
+ *
+ * Other major contributions:
+ *
+ * NVRAM detection and reading.
+ * Copyright (C) 1997 Richard Waltham <dormouse@farsrobt.demon.co.uk>
+ *
+ *-----------------------------------------------------------------------------
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+
+#include "sym_glue.h"
+
+/*
+ * Macros used for all firmwares.
+ */
+#define SYM_GEN_A(s, label) ((short) offsetof(s, label)),
+#define SYM_GEN_B(s, label) ((short) offsetof(s, label)),
+#define SYM_GEN_Z(s, label) ((short) offsetof(s, label)),
+#define PADDR_A(label) SYM_GEN_PADDR_A(struct SYM_FWA_SCR, label)
+#define PADDR_B(label) SYM_GEN_PADDR_B(struct SYM_FWB_SCR, label)
+
+
+#if SYM_CONF_GENERIC_SUPPORT
+/*
+ * Allocate firmware #1 script area.
+ */
+#define SYM_FWA_SCR sym_fw1a_scr
+#define SYM_FWB_SCR sym_fw1b_scr
+#define SYM_FWZ_SCR sym_fw1z_scr
+#include "sym_fw1.h"
+static struct sym_fwa_ofs sym_fw1a_ofs = {
+ SYM_GEN_FW_A(struct SYM_FWA_SCR)
+};
+static struct sym_fwb_ofs sym_fw1b_ofs = {
+ SYM_GEN_FW_B(struct SYM_FWB_SCR)
+};
+static struct sym_fwz_ofs sym_fw1z_ofs = {
+ SYM_GEN_FW_Z(struct SYM_FWZ_SCR)
+};
+#undef SYM_FWA_SCR
+#undef SYM_FWB_SCR
+#undef SYM_FWZ_SCR
+#endif /* SYM_CONF_GENERIC_SUPPORT */
+
+/*
+ * Allocate firmware #2 script area.
+ */
+#define SYM_FWA_SCR sym_fw2a_scr
+#define SYM_FWB_SCR sym_fw2b_scr
+#define SYM_FWZ_SCR sym_fw2z_scr
+#include "sym_fw2.h"
+static struct sym_fwa_ofs sym_fw2a_ofs = {
+ SYM_GEN_FW_A(struct SYM_FWA_SCR)
+};
+static struct sym_fwb_ofs sym_fw2b_ofs = {
+ SYM_GEN_FW_B(struct SYM_FWB_SCR)
+ SYM_GEN_B(struct SYM_FWB_SCR, start64)
+ SYM_GEN_B(struct SYM_FWB_SCR, pm_handle)
+};
+static struct sym_fwz_ofs sym_fw2z_ofs = {
+ SYM_GEN_FW_Z(struct SYM_FWZ_SCR)
+};
+#undef SYM_FWA_SCR
+#undef SYM_FWB_SCR
+#undef SYM_FWZ_SCR
+
+#undef SYM_GEN_A
+#undef SYM_GEN_B
+#undef SYM_GEN_Z
+#undef PADDR_A
+#undef PADDR_B
+
+#if SYM_CONF_GENERIC_SUPPORT
+/*
+ * Patch routine for firmware #1.
+ */
+static void
+sym_fw1_patch(struct Scsi_Host *shost)
+{
+ struct sym_hcb *np = sym_get_hcb(shost);
+ struct sym_fw1a_scr *scripta0;
+ struct sym_fw1b_scr *scriptb0;
+
+ scripta0 = (struct sym_fw1a_scr *) np->scripta0;
+ scriptb0 = (struct sym_fw1b_scr *) np->scriptb0;
+
+ /*
+ * Remove LED support if not needed.
+ */
+ if (!(np->features & FE_LED0)) {
+ scripta0->idle[0] = cpu_to_scr(SCR_NO_OP);
+ scripta0->reselected[0] = cpu_to_scr(SCR_NO_OP);
+ scripta0->start[0] = cpu_to_scr(SCR_NO_OP);
+ }
+
+#ifdef SYM_CONF_IARB_SUPPORT
+ /*
+ * If user does not want to use IMMEDIATE ARBITRATION
+ * when we are reselected while attempting to arbitrate,
+ * patch the SCRIPTS accordingly with a SCRIPT NO_OP.
+ */
+ if (!SYM_CONF_SET_IARB_ON_ARB_LOST)
+ scripta0->ungetjob[0] = cpu_to_scr(SCR_NO_OP);
+#endif
+ /*
+ * Patch some data in SCRIPTS.
+ * - start and done queue initial bus address.
+ * - target bus address table bus address.
+ */
+ scriptb0->startpos[0] = cpu_to_scr(np->squeue_ba);
+ scriptb0->done_pos[0] = cpu_to_scr(np->dqueue_ba);
+ scriptb0->targtbl[0] = cpu_to_scr(np->targtbl_ba);
+}
+#endif /* SYM_CONF_GENERIC_SUPPORT */
+
+/*
+ * Patch routine for firmware #2.
+ */
+static void
+sym_fw2_patch(struct Scsi_Host *shost)
+{
+ struct sym_data *sym_data = shost_priv(shost);
+ struct pci_dev *pdev = sym_data->pdev;
+ struct sym_hcb *np = sym_data->ncb;
+ struct sym_fw2a_scr *scripta0;
+ struct sym_fw2b_scr *scriptb0;
+
+ scripta0 = (struct sym_fw2a_scr *) np->scripta0;
+ scriptb0 = (struct sym_fw2b_scr *) np->scriptb0;
+
+ /*
+ * Remove LED support if not needed.
+ */
+ if (!(np->features & FE_LED0)) {
+ scripta0->idle[0] = cpu_to_scr(SCR_NO_OP);
+ scripta0->reselected[0] = cpu_to_scr(SCR_NO_OP);
+ scripta0->start[0] = cpu_to_scr(SCR_NO_OP);
+ }
+
+#if SYM_CONF_DMA_ADDRESSING_MODE == 2
+ /*
+ * Remove useless 64 bit DMA specific SCRIPTS,
+ * when this feature is not available.
+ */
+ if (!use_dac(np)) {
+ scripta0->is_dmap_dirty[0] = cpu_to_scr(SCR_NO_OP);
+ scripta0->is_dmap_dirty[1] = 0;
+ scripta0->is_dmap_dirty[2] = cpu_to_scr(SCR_NO_OP);
+ scripta0->is_dmap_dirty[3] = 0;
+ }
+#endif
+
+#ifdef SYM_CONF_IARB_SUPPORT
+ /*
+ * If user does not want to use IMMEDIATE ARBITRATION
+ * when we are reselected while attempting to arbitrate,
+ * patch the SCRIPTS accordingly with a SCRIPT NO_OP.
+ */
+ if (!SYM_CONF_SET_IARB_ON_ARB_LOST)
+ scripta0->ungetjob[0] = cpu_to_scr(SCR_NO_OP);
+#endif
+ /*
+ * Patch some variable in SCRIPTS.
+ * - start and done queue initial bus address.
+ * - target bus address table bus address.
+ */
+ scriptb0->startpos[0] = cpu_to_scr(np->squeue_ba);
+ scriptb0->done_pos[0] = cpu_to_scr(np->dqueue_ba);
+ scriptb0->targtbl[0] = cpu_to_scr(np->targtbl_ba);
+
+ /*
+ * Remove the load of SCNTL4 on reselection if not a C10.
+ */
+ if (!(np->features & FE_C10)) {
+ scripta0->resel_scntl4[0] = cpu_to_scr(SCR_NO_OP);
+ scripta0->resel_scntl4[1] = cpu_to_scr(0);
+ }
+
+ /*
+ * Remove a couple of work-arounds specific to C1010 if
+ * they are not desirable. See `sym_fw2.h' for more details.
+ */
+ if (!(pdev->device == PCI_DEVICE_ID_LSI_53C1010_66 &&
+ pdev->revision < 0x1 &&
+ np->pciclk_khz < 60000)) {
+ scripta0->datao_phase[0] = cpu_to_scr(SCR_NO_OP);
+ scripta0->datao_phase[1] = cpu_to_scr(0);
+ }
+ if (!(pdev->device == PCI_DEVICE_ID_LSI_53C1010_33 /* &&
+ pdev->revision < 0xff */)) {
+ scripta0->sel_done[0] = cpu_to_scr(SCR_NO_OP);
+ scripta0->sel_done[1] = cpu_to_scr(0);
+ }
+
+ /*
+ * Patch some other variables in SCRIPTS.
+ * These ones are loaded by the SCRIPTS processor.
+ */
+ scriptb0->pm0_data_addr[0] =
+ cpu_to_scr(np->scripta_ba +
+ offsetof(struct sym_fw2a_scr, pm0_data));
+ scriptb0->pm1_data_addr[0] =
+ cpu_to_scr(np->scripta_ba +
+ offsetof(struct sym_fw2a_scr, pm1_data));
+}
+
+/*
+ * Fill the data area in scripts.
+ * To be done for all firmwares.
+ */
+static void
+sym_fw_fill_data (u32 *in, u32 *out)
+{
+ int i;
+
+ for (i = 0; i < SYM_CONF_MAX_SG; i++) {
+ *in++ = SCR_CHMOV_TBL ^ SCR_DATA_IN;
+ *in++ = offsetof (struct sym_dsb, data[i]);
+ *out++ = SCR_CHMOV_TBL ^ SCR_DATA_OUT;
+ *out++ = offsetof (struct sym_dsb, data[i]);
+ }
+}
+
+/*
+ * Setup useful script bus addresses.
+ * To be done for all firmwares.
+ */
+static void
+sym_fw_setup_bus_addresses(struct sym_hcb *np, struct sym_fw *fw)
+{
+ u32 *pa;
+ u_short *po;
+ int i;
+
+ /*
+ * Build the bus address table for script A
+ * from the script A offset table.
+ */
+ po = (u_short *) fw->a_ofs;
+ pa = (u32 *) &np->fwa_bas;
+ for (i = 0 ; i < sizeof(np->fwa_bas)/sizeof(u32) ; i++)
+ pa[i] = np->scripta_ba + po[i];
+
+ /*
+ * Same for script B.
+ */
+ po = (u_short *) fw->b_ofs;
+ pa = (u32 *) &np->fwb_bas;
+ for (i = 0 ; i < sizeof(np->fwb_bas)/sizeof(u32) ; i++)
+ pa[i] = np->scriptb_ba + po[i];
+
+ /*
+ * Same for script Z.
+ */
+ po = (u_short *) fw->z_ofs;
+ pa = (u32 *) &np->fwz_bas;
+ for (i = 0 ; i < sizeof(np->fwz_bas)/sizeof(u32) ; i++)
+ pa[i] = np->scriptz_ba + po[i];
+}
+
+#if SYM_CONF_GENERIC_SUPPORT
+/*
+ * Setup routine for firmware #1.
+ */
+static void
+sym_fw1_setup(struct sym_hcb *np, struct sym_fw *fw)
+{
+ struct sym_fw1a_scr *scripta0;
+ struct sym_fw1b_scr *scriptb0;
+
+ scripta0 = (struct sym_fw1a_scr *) np->scripta0;
+ scriptb0 = (struct sym_fw1b_scr *) np->scriptb0;
+
+ /*
+ * Fill variable parts in scripts.
+ */
+ sym_fw_fill_data(scripta0->data_in, scripta0->data_out);
+
+ /*
+ * Setup bus addresses used from the C code..
+ */
+ sym_fw_setup_bus_addresses(np, fw);
+}
+#endif /* SYM_CONF_GENERIC_SUPPORT */
+
+/*
+ * Setup routine for firmware #2.
+ */
+static void
+sym_fw2_setup(struct sym_hcb *np, struct sym_fw *fw)
+{
+ struct sym_fw2a_scr *scripta0;
+ struct sym_fw2b_scr *scriptb0;
+
+ scripta0 = (struct sym_fw2a_scr *) np->scripta0;
+ scriptb0 = (struct sym_fw2b_scr *) np->scriptb0;
+
+ /*
+ * Fill variable parts in scripts.
+ */
+ sym_fw_fill_data(scripta0->data_in, scripta0->data_out);
+
+ /*
+ * Setup bus addresses used from the C code..
+ */
+ sym_fw_setup_bus_addresses(np, fw);
+}
+
+/*
+ * Allocate firmware descriptors.
+ */
+#if SYM_CONF_GENERIC_SUPPORT
+static struct sym_fw sym_fw1 = SYM_FW_ENTRY(sym_fw1, "NCR-generic");
+#endif /* SYM_CONF_GENERIC_SUPPORT */
+static struct sym_fw sym_fw2 = SYM_FW_ENTRY(sym_fw2, "LOAD/STORE-based");
+
+/*
+ * Find the most appropriate firmware for a chip.
+ */
+struct sym_fw *
+sym_find_firmware(struct sym_chip *chip)
+{
+ if (chip->features & FE_LDSTR)
+ return &sym_fw2;
+#if SYM_CONF_GENERIC_SUPPORT
+ else if (!(chip->features & (FE_PFEN|FE_NOPM|FE_DAC)))
+ return &sym_fw1;
+#endif
+ else
+ return NULL;
+}
+
+/*
+ * Bind a script to physical addresses.
+ */
+void sym_fw_bind_script(struct sym_hcb *np, u32 *start, int len)
+{
+ u32 opcode, new, old, tmp1, tmp2;
+ u32 *end, *cur;
+ int relocs;
+
+ cur = start;
+ end = start + len/4;
+
+ while (cur < end) {
+
+ opcode = *cur;
+
+ /*
+ * If we forget to change the length
+ * in scripts, a field will be
+ * padded with 0. This is an illegal
+ * command.
+ */
+ if (opcode == 0) {
+ printf ("%s: ERROR0 IN SCRIPT at %d.\n",
+ sym_name(np), (int) (cur-start));
+ ++cur;
+ continue;
+ };
+
+ /*
+ * We use the bogus value 0xf00ff00f ;-)
+ * to reserve data area in SCRIPTS.
+ */
+ if (opcode == SCR_DATA_ZERO) {
+ *cur++ = 0;
+ continue;
+ }
+
+ if (DEBUG_FLAGS & DEBUG_SCRIPT)
+ printf ("%d: <%x>\n", (int) (cur-start),
+ (unsigned)opcode);
+
+ /*
+ * We don't have to decode ALL commands
+ */
+ switch (opcode >> 28) {
+ case 0xf:
+ /*
+ * LOAD / STORE DSA relative, don't relocate.
+ */
+ relocs = 0;
+ break;
+ case 0xe:
+ /*
+ * LOAD / STORE absolute.
+ */
+ relocs = 1;
+ break;
+ case 0xc:
+ /*
+ * COPY has TWO arguments.
+ */
+ relocs = 2;
+ tmp1 = cur[1];
+ tmp2 = cur[2];
+ if ((tmp1 ^ tmp2) & 3) {
+ printf ("%s: ERROR1 IN SCRIPT at %d.\n",
+ sym_name(np), (int) (cur-start));
+ }
+ /*
+ * If PREFETCH feature not enabled, remove
+ * the NO FLUSH bit if present.
+ */
+ if ((opcode & SCR_NO_FLUSH) &&
+ !(np->features & FE_PFEN)) {
+ opcode = (opcode & ~SCR_NO_FLUSH);
+ }
+ break;
+ case 0x0:
+ /*
+ * MOVE/CHMOV (absolute address)
+ */
+ if (!(np->features & FE_WIDE))
+ opcode = (opcode | OPC_MOVE);
+ relocs = 1;
+ break;
+ case 0x1:
+ /*
+ * MOVE/CHMOV (table indirect)
+ */
+ if (!(np->features & FE_WIDE))
+ opcode = (opcode | OPC_MOVE);
+ relocs = 0;
+ break;
+#ifdef SYM_CONF_TARGET_ROLE_SUPPORT
+ case 0x2:
+ /*
+ * MOVE/CHMOV in target role (absolute address)
+ */
+ opcode &= ~0x20000000;
+ if (!(np->features & FE_WIDE))
+ opcode = (opcode & ~OPC_TCHMOVE);
+ relocs = 1;
+ break;
+ case 0x3:
+ /*
+ * MOVE/CHMOV in target role (table indirect)
+ */
+ opcode &= ~0x20000000;
+ if (!(np->features & FE_WIDE))
+ opcode = (opcode & ~OPC_TCHMOVE);
+ relocs = 0;
+ break;
+#endif
+ case 0x8:
+ /*
+ * JUMP / CALL
+ * don't relocate if relative :-)
+ */
+ if (opcode & 0x00800000)
+ relocs = 0;
+ else if ((opcode & 0xf8400000) == 0x80400000)/*JUMP64*/
+ relocs = 2;
+ else
+ relocs = 1;
+ break;
+ case 0x4:
+ case 0x5:
+ case 0x6:
+ case 0x7:
+ relocs = 1;
+ break;
+ default:
+ relocs = 0;
+ break;
+ };
+
+ /*
+ * Scriptify:) the opcode.
+ */
+ *cur++ = cpu_to_scr(opcode);
+
+ /*
+ * If no relocation, assume 1 argument
+ * and just scriptize:) it.
+ */
+ if (!relocs) {
+ *cur = cpu_to_scr(*cur);
+ ++cur;
+ continue;
+ }
+
+ /*
+ * Otherwise performs all needed relocations.
+ */
+ while (relocs--) {
+ old = *cur;
+
+ switch (old & RELOC_MASK) {
+ case RELOC_REGISTER:
+ new = (old & ~RELOC_MASK) + np->mmio_ba;
+ break;
+ case RELOC_LABEL_A:
+ new = (old & ~RELOC_MASK) + np->scripta_ba;
+ break;
+ case RELOC_LABEL_B:
+ new = (old & ~RELOC_MASK) + np->scriptb_ba;
+ break;
+ case RELOC_SOFTC:
+ new = (old & ~RELOC_MASK) + np->hcb_ba;
+ break;
+ case 0:
+ /*
+ * Don't relocate a 0 address.
+ * They are mostly used for patched or
+ * script self-modified areas.
+ */
+ if (old == 0) {
+ new = old;
+ break;
+ }
+ /* fall through */
+ default:
+ new = 0;
+ panic("sym_fw_bind_script: "
+ "weird relocation %x\n", old);
+ break;
+ }
+
+ *cur++ = cpu_to_scr(new);
+ }
+ };
+}
diff --git a/drivers/scsi/sym53c8xx_2/sym_fw.h b/drivers/scsi/sym53c8xx_2/sym_fw.h
new file mode 100644
index 000000000..ae7e0f9e9
--- /dev/null
+++ b/drivers/scsi/sym53c8xx_2/sym_fw.h
@@ -0,0 +1,205 @@
+/*
+ * Device driver for the SYMBIOS/LSILOGIC 53C8XX and 53C1010 family
+ * of PCI-SCSI IO processors.
+ *
+ * Copyright (C) 1999-2001 Gerard Roudier <groudier@free.fr>
+ *
+ * This driver is derived from the Linux sym53c8xx driver.
+ * Copyright (C) 1998-2000 Gerard Roudier
+ *
+ * The sym53c8xx driver is derived from the ncr53c8xx driver that had been
+ * a port of the FreeBSD ncr driver to Linux-1.2.13.
+ *
+ * The original ncr driver has been written for 386bsd and FreeBSD by
+ * Wolfgang Stanglmeier <wolf@cologne.de>
+ * Stefan Esser <se@mi.Uni-Koeln.de>
+ * Copyright (C) 1994 Wolfgang Stanglmeier
+ *
+ * Other major contributions:
+ *
+ * NVRAM detection and reading.
+ * Copyright (C) 1997 Richard Waltham <dormouse@farsrobt.demon.co.uk>
+ *
+ *-----------------------------------------------------------------------------
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+
+#ifndef SYM_FW_H
+#define SYM_FW_H
+/*
+ * Macro used to generate interfaces for script A.
+ */
+#define SYM_GEN_FW_A(s) \
+ SYM_GEN_A(s, start) SYM_GEN_A(s, getjob_begin) \
+ SYM_GEN_A(s, getjob_end) \
+ SYM_GEN_A(s, select) SYM_GEN_A(s, wf_sel_done) \
+ SYM_GEN_A(s, send_ident) \
+ SYM_GEN_A(s, dispatch) SYM_GEN_A(s, init) \
+ SYM_GEN_A(s, clrack) SYM_GEN_A(s, complete_error) \
+ SYM_GEN_A(s, done) SYM_GEN_A(s, done_end) \
+ SYM_GEN_A(s, idle) SYM_GEN_A(s, ungetjob) \
+ SYM_GEN_A(s, reselect) \
+ SYM_GEN_A(s, resel_tag) SYM_GEN_A(s, resel_dsa) \
+ SYM_GEN_A(s, resel_no_tag) \
+ SYM_GEN_A(s, data_in) SYM_GEN_A(s, data_in2) \
+ SYM_GEN_A(s, data_out) SYM_GEN_A(s, data_out2) \
+ SYM_GEN_A(s, pm0_data) SYM_GEN_A(s, pm1_data)
+
+/*
+ * Macro used to generate interfaces for script B.
+ */
+#define SYM_GEN_FW_B(s) \
+ SYM_GEN_B(s, no_data) \
+ SYM_GEN_B(s, sel_for_abort) SYM_GEN_B(s, sel_for_abort_1) \
+ SYM_GEN_B(s, msg_bad) SYM_GEN_B(s, msg_weird) \
+ SYM_GEN_B(s, wdtr_resp) SYM_GEN_B(s, send_wdtr) \
+ SYM_GEN_B(s, sdtr_resp) SYM_GEN_B(s, send_sdtr) \
+ SYM_GEN_B(s, ppr_resp) SYM_GEN_B(s, send_ppr) \
+ SYM_GEN_B(s, nego_bad_phase) \
+ SYM_GEN_B(s, ident_break) SYM_GEN_B(s, ident_break_atn) \
+ SYM_GEN_B(s, sdata_in) SYM_GEN_B(s, resel_bad_lun) \
+ SYM_GEN_B(s, bad_i_t_l) SYM_GEN_B(s, bad_i_t_l_q) \
+ SYM_GEN_B(s, wsr_ma_helper)
+
+/*
+ * Macro used to generate interfaces for script Z.
+ */
+#define SYM_GEN_FW_Z(s) \
+ SYM_GEN_Z(s, snooptest) SYM_GEN_Z(s, snoopend)
+
+/*
+ * Generates structure interface that contains
+ * offsets within script A, B and Z.
+ */
+#define SYM_GEN_A(s, label) s label;
+#define SYM_GEN_B(s, label) s label;
+#define SYM_GEN_Z(s, label) s label;
+struct sym_fwa_ofs {
+ SYM_GEN_FW_A(u_short)
+};
+struct sym_fwb_ofs {
+ SYM_GEN_FW_B(u_short)
+ SYM_GEN_B(u_short, start64)
+ SYM_GEN_B(u_short, pm_handle)
+};
+struct sym_fwz_ofs {
+ SYM_GEN_FW_Z(u_short)
+};
+
+/*
+ * Generates structure interface that contains
+ * bus addresses within script A, B and Z.
+ */
+struct sym_fwa_ba {
+ SYM_GEN_FW_A(u32)
+};
+struct sym_fwb_ba {
+ SYM_GEN_FW_B(u32)
+ SYM_GEN_B(u32, start64);
+ SYM_GEN_B(u32, pm_handle);
+};
+struct sym_fwz_ba {
+ SYM_GEN_FW_Z(u32)
+};
+#undef SYM_GEN_A
+#undef SYM_GEN_B
+#undef SYM_GEN_Z
+
+/*
+ * Let cc know about the name of the controller data structure.
+ * We need this for function prototype declarations just below.
+ */
+struct sym_hcb;
+
+/*
+ * Generic structure that defines a firmware.
+ */
+struct sym_fw {
+ char *name; /* Name we want to print out */
+ u32 *a_base; /* Pointer to script A template */
+ int a_size; /* Size of script A */
+ struct sym_fwa_ofs
+ *a_ofs; /* Useful offsets in script A */
+ u32 *b_base; /* Pointer to script B template */
+ int b_size; /* Size of script B */
+ struct sym_fwb_ofs
+ *b_ofs; /* Useful offsets in script B */
+ u32 *z_base; /* Pointer to script Z template */
+ int z_size; /* Size of script Z */
+ struct sym_fwz_ofs
+ *z_ofs; /* Useful offsets in script Z */
+ /* Setup and patch methods for this firmware */
+ void (*setup)(struct sym_hcb *, struct sym_fw *);
+ void (*patch)(struct Scsi_Host *);
+};
+
+/*
+ * Macro used to declare a firmware.
+ */
+#define SYM_FW_ENTRY(fw, name) \
+{ \
+ name, \
+ (u32 *) &fw##a_scr, sizeof(fw##a_scr), &fw##a_ofs, \
+ (u32 *) &fw##b_scr, sizeof(fw##b_scr), &fw##b_ofs, \
+ (u32 *) &fw##z_scr, sizeof(fw##z_scr), &fw##z_ofs, \
+ fw##_setup, fw##_patch \
+}
+
+/*
+ * Macros used from the C code to get useful
+ * SCRIPTS bus addresses.
+ */
+#define SCRIPTA_BA(np, label) (np->fwa_bas.label)
+#define SCRIPTB_BA(np, label) (np->fwb_bas.label)
+#define SCRIPTZ_BA(np, label) (np->fwz_bas.label)
+
+/*
+ * Macros used by scripts definitions.
+ *
+ * HADDR_1 generates a reference to a field of the controller data.
+ * HADDR_2 generates a reference to a field of the controller data
+ * with offset.
+ * RADDR_1 generates a reference to a script processor register.
+ * RADDR_2 generates a reference to a script processor register
+ * with offset.
+ * PADDR_A generates a reference to another part of script A.
+ * PADDR_B generates a reference to another part of script B.
+ *
+ * SYM_GEN_PADDR_A and SYM_GEN_PADDR_B are used to define respectively
+ * the PADDR_A and PADDR_B macros for each firmware by setting argument
+ * `s' to the name of the corresponding structure.
+ *
+ * SCR_DATA_ZERO is used to allocate a DWORD of data in scripts areas.
+ */
+
+#define RELOC_SOFTC 0x40000000
+#define RELOC_LABEL_A 0x50000000
+#define RELOC_REGISTER 0x60000000
+#define RELOC_LABEL_B 0x80000000
+#define RELOC_MASK 0xf0000000
+
+#define HADDR_1(label) (RELOC_SOFTC | offsetof(struct sym_hcb, label))
+#define HADDR_2(label,ofs) (RELOC_SOFTC | \
+ (offsetof(struct sym_hcb, label)+(ofs)))
+#define RADDR_1(label) (RELOC_REGISTER | REG(label))
+#define RADDR_2(label,ofs) (RELOC_REGISTER | ((REG(label))+(ofs)))
+
+#define SYM_GEN_PADDR_A(s, label) (RELOC_LABEL_A | offsetof(s, label))
+#define SYM_GEN_PADDR_B(s, label) (RELOC_LABEL_B | offsetof(s, label))
+
+#define SCR_DATA_ZERO 0xf00ff00f
+
+#endif /* SYM_FW_H */
diff --git a/drivers/scsi/sym53c8xx_2/sym_fw1.h b/drivers/scsi/sym53c8xx_2/sym_fw1.h
new file mode 100644
index 000000000..63952ee30
--- /dev/null
+++ b/drivers/scsi/sym53c8xx_2/sym_fw1.h
@@ -0,0 +1,1790 @@
+/*
+ * Device driver for the SYMBIOS/LSILOGIC 53C8XX and 53C1010 family
+ * of PCI-SCSI IO processors.
+ *
+ * Copyright (C) 1999-2001 Gerard Roudier <groudier@free.fr>
+ *
+ * This driver is derived from the Linux sym53c8xx driver.
+ * Copyright (C) 1998-2000 Gerard Roudier
+ *
+ * The sym53c8xx driver is derived from the ncr53c8xx driver that had been
+ * a port of the FreeBSD ncr driver to Linux-1.2.13.
+ *
+ * The original ncr driver has been written for 386bsd and FreeBSD by
+ * Wolfgang Stanglmeier <wolf@cologne.de>
+ * Stefan Esser <se@mi.Uni-Koeln.de>
+ * Copyright (C) 1994 Wolfgang Stanglmeier
+ *
+ * Other major contributions:
+ *
+ * NVRAM detection and reading.
+ * Copyright (C) 1997 Richard Waltham <dormouse@farsrobt.demon.co.uk>
+ *
+ *-----------------------------------------------------------------------------
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+
+/*
+ * Scripts for SYMBIOS-Processor
+ *
+ * We have to know the offsets of all labels before we reach
+ * them (for forward jumps). Therefore we declare a struct
+ * here. If you make changes inside the script,
+ *
+ * DONT FORGET TO CHANGE THE LENGTHS HERE!
+ */
+
+/*
+ * Script fragments which are loaded into the on-chip RAM
+ * of 825A, 875, 876, 895, 895A, 896 and 1010 chips.
+ * Must not exceed 4K bytes.
+ */
+struct SYM_FWA_SCR {
+ u32 start [ 11];
+ u32 getjob_begin [ 4];
+ u32 _sms_a10 [ 5];
+ u32 getjob_end [ 4];
+ u32 _sms_a20 [ 4];
+#ifdef SYM_CONF_TARGET_ROLE_SUPPORT
+ u32 select [ 8];
+#else
+ u32 select [ 6];
+#endif
+ u32 _sms_a30 [ 5];
+ u32 wf_sel_done [ 2];
+ u32 send_ident [ 2];
+#ifdef SYM_CONF_IARB_SUPPORT
+ u32 select2 [ 8];
+#else
+ u32 select2 [ 2];
+#endif
+ u32 command [ 2];
+ u32 dispatch [ 28];
+ u32 sel_no_cmd [ 10];
+ u32 init [ 6];
+ u32 clrack [ 4];
+ u32 datai_done [ 11];
+ u32 datai_done_wsr [ 20];
+ u32 datao_done [ 11];
+ u32 datao_done_wss [ 6];
+ u32 datai_phase [ 5];
+ u32 datao_phase [ 5];
+ u32 msg_in [ 2];
+ u32 msg_in2 [ 10];
+#ifdef SYM_CONF_IARB_SUPPORT
+ u32 status [ 14];
+#else
+ u32 status [ 10];
+#endif
+ u32 complete [ 6];
+ u32 complete2 [ 8];
+ u32 _sms_a40 [ 12];
+ u32 done [ 5];
+ u32 _sms_a50 [ 5];
+ u32 _sms_a60 [ 2];
+ u32 done_end [ 4];
+ u32 complete_error [ 5];
+ u32 save_dp [ 11];
+ u32 restore_dp [ 7];
+ u32 disconnect [ 11];
+ u32 disconnect2 [ 5];
+ u32 _sms_a65 [ 3];
+#ifdef SYM_CONF_IARB_SUPPORT
+ u32 idle [ 4];
+#else
+ u32 idle [ 2];
+#endif
+#ifdef SYM_CONF_IARB_SUPPORT
+ u32 ungetjob [ 7];
+#else
+ u32 ungetjob [ 5];
+#endif
+#ifdef SYM_CONF_TARGET_ROLE_SUPPORT
+ u32 reselect [ 4];
+#else
+ u32 reselect [ 2];
+#endif
+ u32 reselected [ 19];
+ u32 _sms_a70 [ 6];
+ u32 _sms_a80 [ 4];
+ u32 reselected1 [ 25];
+ u32 _sms_a90 [ 4];
+ u32 resel_lun0 [ 7];
+ u32 _sms_a100 [ 4];
+ u32 resel_tag [ 8];
+#if SYM_CONF_MAX_TASK*4 > 512
+ u32 _sms_a110 [ 23];
+#elif SYM_CONF_MAX_TASK*4 > 256
+ u32 _sms_a110 [ 17];
+#else
+ u32 _sms_a110 [ 13];
+#endif
+ u32 _sms_a120 [ 2];
+ u32 resel_go [ 4];
+ u32 _sms_a130 [ 7];
+ u32 resel_dsa [ 2];
+ u32 resel_dsa1 [ 4];
+ u32 _sms_a140 [ 7];
+ u32 resel_no_tag [ 4];
+ u32 _sms_a145 [ 7];
+ u32 data_in [SYM_CONF_MAX_SG * 2];
+ u32 data_in2 [ 4];
+ u32 data_out [SYM_CONF_MAX_SG * 2];
+ u32 data_out2 [ 4];
+ u32 pm0_data [ 12];
+ u32 pm0_data_out [ 6];
+ u32 pm0_data_end [ 7];
+ u32 pm_data_end [ 4];
+ u32 _sms_a150 [ 4];
+ u32 pm1_data [ 12];
+ u32 pm1_data_out [ 6];
+ u32 pm1_data_end [ 9];
+};
+
+/*
+ * Script fragments which stay in main memory for all chips
+ * except for chips that support 8K on-chip RAM.
+ */
+struct SYM_FWB_SCR {
+ u32 no_data [ 2];
+#ifdef SYM_CONF_TARGET_ROLE_SUPPORT
+ u32 sel_for_abort [ 18];
+#else
+ u32 sel_for_abort [ 16];
+#endif
+ u32 sel_for_abort_1 [ 2];
+ u32 msg_in_etc [ 12];
+ u32 msg_received [ 5];
+ u32 msg_weird_seen [ 5];
+ u32 msg_extended [ 17];
+ u32 _sms_b10 [ 4];
+ u32 msg_bad [ 6];
+ u32 msg_weird [ 4];
+ u32 msg_weird1 [ 8];
+ u32 wdtr_resp [ 6];
+ u32 send_wdtr [ 4];
+ u32 sdtr_resp [ 6];
+ u32 send_sdtr [ 4];
+ u32 ppr_resp [ 6];
+ u32 send_ppr [ 4];
+ u32 nego_bad_phase [ 4];
+ u32 msg_out [ 4];
+ u32 msg_out_done [ 4];
+ u32 data_ovrun [ 3];
+ u32 data_ovrun1 [ 22];
+ u32 data_ovrun2 [ 8];
+ u32 abort_resel [ 16];
+ u32 resend_ident [ 4];
+ u32 ident_break [ 4];
+ u32 ident_break_atn [ 4];
+ u32 sdata_in [ 6];
+ u32 resel_bad_lun [ 4];
+ u32 bad_i_t_l [ 4];
+ u32 bad_i_t_l_q [ 4];
+ u32 bad_status [ 7];
+ u32 wsr_ma_helper [ 4];
+
+ /* Data area */
+ u32 zero [ 1];
+ u32 scratch [ 1];
+ u32 scratch1 [ 1];
+ u32 prev_done [ 1];
+ u32 done_pos [ 1];
+ u32 nextjob [ 1];
+ u32 startpos [ 1];
+ u32 targtbl [ 1];
+};
+
+/*
+ * Script fragments used at initialisations.
+ * Only runs out of main memory.
+ */
+struct SYM_FWZ_SCR {
+ u32 snooptest [ 9];
+ u32 snoopend [ 2];
+};
+
+static struct SYM_FWA_SCR SYM_FWA_SCR = {
+/*--------------------------< START >----------------------------*/ {
+ /*
+ * Switch the LED on.
+ * Will be patched with a NO_OP if LED
+ * not needed or not desired.
+ */
+ SCR_REG_REG (gpreg, SCR_AND, 0xfe),
+ 0,
+ /*
+ * Clear SIGP.
+ */
+ SCR_FROM_REG (ctest2),
+ 0,
+ /*
+ * Stop here if the C code wants to perform
+ * some error recovery procedure manually.
+ * (Indicate this by setting SEM in ISTAT)
+ */
+ SCR_FROM_REG (istat),
+ 0,
+ /*
+ * Report to the C code the next position in
+ * the start queue the SCRIPTS will schedule.
+ * The C code must not change SCRATCHA.
+ */
+ SCR_COPY (4),
+ PADDR_B (startpos),
+ RADDR_1 (scratcha),
+ SCR_INT ^ IFTRUE (MASK (SEM, SEM)),
+ SIR_SCRIPT_STOPPED,
+ /*
+ * Start the next job.
+ *
+ * @DSA = start point for this job.
+ * SCRATCHA = address of this job in the start queue.
+ *
+ * We will restore startpos with SCRATCHA if we fails the
+ * arbitration or if it is the idle job.
+ *
+ * The below GETJOB_BEGIN to GETJOB_END section of SCRIPTS
+ * is a critical path. If it is partially executed, it then
+ * may happen that the job address is not yet in the DSA
+ * and the next queue position points to the next JOB.
+ */
+}/*-------------------------< GETJOB_BEGIN >---------------------*/,{
+ /*
+ * Copy to a fixed location both the next STARTPOS
+ * and the current JOB address, using self modifying
+ * SCRIPTS.
+ */
+ SCR_COPY (4),
+ RADDR_1 (scratcha),
+ PADDR_A (_sms_a10),
+ SCR_COPY (8),
+}/*-------------------------< _SMS_A10 >-------------------------*/,{
+ 0,
+ PADDR_B (nextjob),
+ /*
+ * Move the start address to TEMP using self-
+ * modifying SCRIPTS and jump indirectly to
+ * that address.
+ */
+ SCR_COPY (4),
+ PADDR_B (nextjob),
+ RADDR_1 (dsa),
+}/*-------------------------< GETJOB_END >-----------------------*/,{
+ SCR_COPY (4),
+ RADDR_1 (dsa),
+ PADDR_A (_sms_a20),
+ SCR_COPY (4),
+}/*-------------------------< _SMS_A20 >-------------------------*/,{
+ 0,
+ RADDR_1 (temp),
+ SCR_RETURN,
+ 0,
+}/*-------------------------< SELECT >---------------------------*/,{
+ /*
+ * DSA contains the address of a scheduled
+ * data structure.
+ *
+ * SCRATCHA contains the address of the start queue
+ * entry which points to the next job.
+ *
+ * Set Initiator mode.
+ *
+ * (Target mode is left as an exercise for the reader)
+ */
+#ifdef SYM_CONF_TARGET_ROLE_SUPPORT
+ SCR_CLR (SCR_TRG),
+ 0,
+#endif
+ /*
+ * And try to select this target.
+ */
+ SCR_SEL_TBL_ATN ^ offsetof (struct sym_dsb, select),
+ PADDR_A (ungetjob),
+ /*
+ * Now there are 4 possibilities:
+ *
+ * (1) The chip loses arbitration.
+ * This is ok, because it will try again,
+ * when the bus becomes idle.
+ * (But beware of the timeout function!)
+ *
+ * (2) The chip is reselected.
+ * Then the script processor takes the jump
+ * to the RESELECT label.
+ *
+ * (3) The chip wins arbitration.
+ * Then it will execute SCRIPTS instruction until
+ * the next instruction that checks SCSI phase.
+ * Then will stop and wait for selection to be
+ * complete or selection time-out to occur.
+ *
+ * After having won arbitration, the SCRIPTS
+ * processor is able to execute instructions while
+ * the SCSI core is performing SCSI selection.
+ */
+
+ /*
+ * Copy the CCB header to a fixed location
+ * in the HCB using self-modifying SCRIPTS.
+ */
+ SCR_COPY (4),
+ RADDR_1 (dsa),
+ PADDR_A (_sms_a30),
+ SCR_COPY (sizeof(struct sym_ccbh)),
+}/*-------------------------< _SMS_A30 >-------------------------*/,{
+ 0,
+ HADDR_1 (ccb_head),
+ /*
+ * Initialize the status register
+ */
+ SCR_COPY (4),
+ HADDR_1 (ccb_head.status),
+ RADDR_1 (scr0),
+}/*-------------------------< WF_SEL_DONE >----------------------*/,{
+ SCR_INT ^ IFFALSE (WHEN (SCR_MSG_OUT)),
+ SIR_SEL_ATN_NO_MSG_OUT,
+}/*-------------------------< SEND_IDENT >-----------------------*/,{
+ /*
+ * Selection complete.
+ * Send the IDENTIFY and possibly the TAG message
+ * and negotiation message if present.
+ */
+ SCR_MOVE_TBL ^ SCR_MSG_OUT,
+ offsetof (struct sym_dsb, smsg),
+}/*-------------------------< SELECT2 >--------------------------*/,{
+#ifdef SYM_CONF_IARB_SUPPORT
+ /*
+ * Set IMMEDIATE ARBITRATION if we have been given
+ * a hint to do so. (Some job to do after this one).
+ */
+ SCR_FROM_REG (HF_REG),
+ 0,
+ SCR_JUMPR ^ IFFALSE (MASK (HF_HINT_IARB, HF_HINT_IARB)),
+ 8,
+ SCR_REG_REG (scntl1, SCR_OR, IARB),
+ 0,
+#endif
+ /*
+ * Anticipate the COMMAND phase.
+ * This is the PHASE we expect at this point.
+ */
+ SCR_JUMP ^ IFFALSE (WHEN (SCR_COMMAND)),
+ PADDR_A (sel_no_cmd),
+}/*-------------------------< COMMAND >--------------------------*/,{
+ /*
+ * ... and send the command
+ */
+ SCR_MOVE_TBL ^ SCR_COMMAND,
+ offsetof (struct sym_dsb, cmd),
+}/*-------------------------< DISPATCH >-------------------------*/,{
+ /*
+ * MSG_IN is the only phase that shall be
+ * entered at least once for each (re)selection.
+ * So we test it first.
+ */
+ SCR_JUMP ^ IFTRUE (WHEN (SCR_MSG_IN)),
+ PADDR_A (msg_in),
+ SCR_JUMP ^ IFTRUE (IF (SCR_DATA_OUT)),
+ PADDR_A (datao_phase),
+ SCR_JUMP ^ IFTRUE (IF (SCR_DATA_IN)),
+ PADDR_A (datai_phase),
+ SCR_JUMP ^ IFTRUE (IF (SCR_STATUS)),
+ PADDR_A (status),
+ SCR_JUMP ^ IFTRUE (IF (SCR_COMMAND)),
+ PADDR_A (command),
+ SCR_JUMP ^ IFTRUE (IF (SCR_MSG_OUT)),
+ PADDR_B (msg_out),
+ /*
+ * Discard as many illegal phases as
+ * required and tell the C code about.
+ */
+ SCR_JUMPR ^ IFFALSE (WHEN (SCR_ILG_OUT)),
+ 16,
+ SCR_MOVE_ABS (1) ^ SCR_ILG_OUT,
+ HADDR_1 (scratch),
+ SCR_JUMPR ^ IFTRUE (WHEN (SCR_ILG_OUT)),
+ -16,
+ SCR_JUMPR ^ IFFALSE (WHEN (SCR_ILG_IN)),
+ 16,
+ SCR_MOVE_ABS (1) ^ SCR_ILG_IN,
+ HADDR_1 (scratch),
+ SCR_JUMPR ^ IFTRUE (WHEN (SCR_ILG_IN)),
+ -16,
+ SCR_INT,
+ SIR_BAD_PHASE,
+ SCR_JUMP,
+ PADDR_A (dispatch),
+}/*-------------------------< SEL_NO_CMD >-----------------------*/,{
+ /*
+ * The target does not switch to command
+ * phase after IDENTIFY has been sent.
+ *
+ * If it stays in MSG OUT phase send it
+ * the IDENTIFY again.
+ */
+ SCR_JUMP ^ IFTRUE (WHEN (SCR_MSG_OUT)),
+ PADDR_B (resend_ident),
+ /*
+ * If target does not switch to MSG IN phase
+ * and we sent a negotiation, assert the
+ * failure immediately.
+ */
+ SCR_JUMP ^ IFTRUE (WHEN (SCR_MSG_IN)),
+ PADDR_A (dispatch),
+ SCR_FROM_REG (HS_REG),
+ 0,
+ SCR_INT ^ IFTRUE (DATA (HS_NEGOTIATE)),
+ SIR_NEGO_FAILED,
+ /*
+ * Jump to dispatcher.
+ */
+ SCR_JUMP,
+ PADDR_A (dispatch),
+}/*-------------------------< INIT >-----------------------------*/,{
+ /*
+ * Wait for the SCSI RESET signal to be
+ * inactive before restarting operations,
+ * since the chip may hang on SEL_ATN
+ * if SCSI RESET is active.
+ */
+ SCR_FROM_REG (sstat0),
+ 0,
+ SCR_JUMPR ^ IFTRUE (MASK (IRST, IRST)),
+ -16,
+ SCR_JUMP,
+ PADDR_A (start),
+}/*-------------------------< CLRACK >---------------------------*/,{
+ /*
+ * Terminate possible pending message phase.
+ */
+ SCR_CLR (SCR_ACK),
+ 0,
+ SCR_JUMP,
+ PADDR_A (dispatch),
+}/*-------------------------< DATAI_DONE >-----------------------*/,{
+ /*
+ * Save current pointer to LASTP.
+ */
+ SCR_COPY (4),
+ RADDR_1 (temp),
+ HADDR_1 (ccb_head.lastp),
+ /*
+ * If the SWIDE is not full, jump to dispatcher.
+ * We anticipate a STATUS phase.
+ */
+ SCR_FROM_REG (scntl2),
+ 0,
+ SCR_JUMP ^ IFTRUE (MASK (WSR, WSR)),
+ PADDR_A (datai_done_wsr),
+ SCR_JUMP ^ IFTRUE (WHEN (SCR_STATUS)),
+ PADDR_A (status),
+ SCR_JUMP,
+ PADDR_A (dispatch),
+}/*-------------------------< DATAI_DONE_WSR >-------------------*/,{
+ /*
+ * The SWIDE is full.
+ * Clear this condition.
+ */
+ SCR_REG_REG (scntl2, SCR_OR, WSR),
+ 0,
+ /*
+ * We are expecting an IGNORE RESIDUE message
+ * from the device, otherwise we are in data
+ * overrun condition. Check against MSG_IN phase.
+ */
+ SCR_INT ^ IFFALSE (WHEN (SCR_MSG_IN)),
+ SIR_SWIDE_OVERRUN,
+ SCR_JUMP ^ IFFALSE (WHEN (SCR_MSG_IN)),
+ PADDR_A (dispatch),
+ /*
+ * We are in MSG_IN phase,
+ * Read the first byte of the message.
+ * If it is not an IGNORE RESIDUE message,
+ * signal overrun and jump to message
+ * processing.
+ */
+ SCR_MOVE_ABS (1) ^ SCR_MSG_IN,
+ HADDR_1 (msgin[0]),
+ SCR_INT ^ IFFALSE (DATA (M_IGN_RESIDUE)),
+ SIR_SWIDE_OVERRUN,
+ SCR_JUMP ^ IFFALSE (DATA (M_IGN_RESIDUE)),
+ PADDR_A (msg_in2),
+ /*
+ * We got the message we expected.
+ * Read the 2nd byte, and jump to dispatcher.
+ */
+ SCR_CLR (SCR_ACK),
+ 0,
+ SCR_MOVE_ABS (1) ^ SCR_MSG_IN,
+ HADDR_1 (msgin[1]),
+ SCR_CLR (SCR_ACK),
+ 0,
+ SCR_JUMP,
+ PADDR_A (dispatch),
+}/*-------------------------< DATAO_DONE >-----------------------*/,{
+ /*
+ * Save current pointer to LASTP.
+ */
+ SCR_COPY (4),
+ RADDR_1 (temp),
+ HADDR_1 (ccb_head.lastp),
+ /*
+ * If the SODL is not full jump to dispatcher.
+ * We anticipate a STATUS phase.
+ */
+ SCR_FROM_REG (scntl2),
+ 0,
+ SCR_JUMP ^ IFTRUE (MASK (WSS, WSS)),
+ PADDR_A (datao_done_wss),
+ SCR_JUMP ^ IFTRUE (WHEN (SCR_STATUS)),
+ PADDR_A (status),
+ SCR_JUMP,
+ PADDR_A (dispatch),
+}/*-------------------------< DATAO_DONE_WSS >-------------------*/,{
+ /*
+ * The SODL is full, clear this condition.
+ */
+ SCR_REG_REG (scntl2, SCR_OR, WSS),
+ 0,
+ /*
+ * And signal a DATA UNDERRUN condition
+ * to the C code.
+ */
+ SCR_INT,
+ SIR_SODL_UNDERRUN,
+ SCR_JUMP,
+ PADDR_A (dispatch),
+}/*-------------------------< DATAI_PHASE >----------------------*/,{
+ /*
+ * Jump to current pointer.
+ */
+ SCR_COPY (4),
+ HADDR_1 (ccb_head.lastp),
+ RADDR_1 (temp),
+ SCR_RETURN,
+ 0,
+}/*-------------------------< DATAO_PHASE >----------------------*/,{
+ /*
+ * Jump to current pointer.
+ */
+ SCR_COPY (4),
+ HADDR_1 (ccb_head.lastp),
+ RADDR_1 (temp),
+ SCR_RETURN,
+ 0,
+}/*-------------------------< MSG_IN >---------------------------*/,{
+ /*
+ * Get the first byte of the message.
+ *
+ * The script processor doesn't negate the
+ * ACK signal after this transfer.
+ */
+ SCR_MOVE_ABS (1) ^ SCR_MSG_IN,
+ HADDR_1 (msgin[0]),
+}/*-------------------------< MSG_IN2 >--------------------------*/,{
+ /*
+ * Check first against 1 byte messages
+ * that we handle from SCRIPTS.
+ */
+ SCR_JUMP ^ IFTRUE (DATA (M_COMPLETE)),
+ PADDR_A (complete),
+ SCR_JUMP ^ IFTRUE (DATA (M_DISCONNECT)),
+ PADDR_A (disconnect),
+ SCR_JUMP ^ IFTRUE (DATA (M_SAVE_DP)),
+ PADDR_A (save_dp),
+ SCR_JUMP ^ IFTRUE (DATA (M_RESTORE_DP)),
+ PADDR_A (restore_dp),
+ /*
+ * We handle all other messages from the
+ * C code, so no need to waste on-chip RAM
+ * for those ones.
+ */
+ SCR_JUMP,
+ PADDR_B (msg_in_etc),
+}/*-------------------------< STATUS >---------------------------*/,{
+ /*
+ * get the status
+ */
+ SCR_MOVE_ABS (1) ^ SCR_STATUS,
+ HADDR_1 (scratch),
+#ifdef SYM_CONF_IARB_SUPPORT
+ /*
+ * If STATUS is not GOOD, clear IMMEDIATE ARBITRATION,
+ * since we may have to tamper the start queue from
+ * the C code.
+ */
+ SCR_JUMPR ^ IFTRUE (DATA (S_GOOD)),
+ 8,
+ SCR_REG_REG (scntl1, SCR_AND, ~IARB),
+ 0,
+#endif
+ /*
+ * save status to scsi_status.
+ * mark as complete.
+ */
+ SCR_TO_REG (SS_REG),
+ 0,
+ SCR_LOAD_REG (HS_REG, HS_COMPLETE),
+ 0,
+ /*
+ * Anticipate the MESSAGE PHASE for
+ * the TASK COMPLETE message.
+ */
+ SCR_JUMP ^ IFTRUE (WHEN (SCR_MSG_IN)),
+ PADDR_A (msg_in),
+ SCR_JUMP,
+ PADDR_A (dispatch),
+}/*-------------------------< COMPLETE >-------------------------*/,{
+ /*
+ * Complete message.
+ *
+ * When we terminate the cycle by clearing ACK,
+ * the target may disconnect immediately.
+ *
+ * We don't want to be told of an "unexpected disconnect",
+ * so we disable this feature.
+ */
+ SCR_REG_REG (scntl2, SCR_AND, 0x7f),
+ 0,
+ /*
+ * Terminate cycle ...
+ */
+ SCR_CLR (SCR_ACK|SCR_ATN),
+ 0,
+ /*
+ * ... and wait for the disconnect.
+ */
+ SCR_WAIT_DISC,
+ 0,
+}/*-------------------------< COMPLETE2 >------------------------*/,{
+ /*
+ * Save host status.
+ */
+ SCR_COPY (4),
+ RADDR_1 (scr0),
+ HADDR_1 (ccb_head.status),
+ /*
+ * Move back the CCB header using self-modifying
+ * SCRIPTS.
+ */
+ SCR_COPY (4),
+ RADDR_1 (dsa),
+ PADDR_A (_sms_a40),
+ SCR_COPY (sizeof(struct sym_ccbh)),
+ HADDR_1 (ccb_head),
+}/*-------------------------< _SMS_A40 >-------------------------*/,{
+ 0,
+ /*
+ * Some bridges may reorder DMA writes to memory.
+ * We donnot want the CPU to deal with completions
+ * without all the posted write having been flushed
+ * to memory. This DUMMY READ should flush posted
+ * buffers prior to the CPU having to deal with
+ * completions.
+ */
+ SCR_COPY (4), /* DUMMY READ */
+ HADDR_1 (ccb_head.status),
+ RADDR_1 (scr0),
+ /*
+ * If command resulted in not GOOD status,
+ * call the C code if needed.
+ */
+ SCR_FROM_REG (SS_REG),
+ 0,
+ SCR_CALL ^ IFFALSE (DATA (S_GOOD)),
+ PADDR_B (bad_status),
+ /*
+ * If we performed an auto-sense, call
+ * the C code to synchronyze task aborts
+ * with UNIT ATTENTION conditions.
+ */
+ SCR_FROM_REG (HF_REG),
+ 0,
+ SCR_JUMP ^ IFFALSE (MASK (0 ,(HF_SENSE|HF_EXT_ERR))),
+ PADDR_A (complete_error),
+}/*-------------------------< DONE >-----------------------------*/,{
+ /*
+ * Copy the DSA to the DONE QUEUE and
+ * signal completion to the host.
+ * If we are interrupted between DONE
+ * and DONE_END, we must reset, otherwise
+ * the completed CCB may be lost.
+ */
+ SCR_COPY (4),
+ PADDR_B (done_pos),
+ PADDR_A (_sms_a50),
+ SCR_COPY (4),
+ RADDR_1 (dsa),
+}/*-------------------------< _SMS_A50 >-------------------------*/,{
+ 0,
+ SCR_COPY (4),
+ PADDR_B (done_pos),
+ PADDR_A (_sms_a60),
+ /*
+ * The instruction below reads the DONE QUEUE next
+ * free position from memory.
+ * In addition it ensures that all PCI posted writes
+ * are flushed and so the DSA value of the done
+ * CCB is visible by the CPU before INTFLY is raised.
+ */
+ SCR_COPY (8),
+}/*-------------------------< _SMS_A60 >-------------------------*/,{
+ 0,
+ PADDR_B (prev_done),
+}/*-------------------------< DONE_END >-------------------------*/,{
+ SCR_INT_FLY,
+ 0,
+ SCR_JUMP,
+ PADDR_A (start),
+}/*-------------------------< COMPLETE_ERROR >-------------------*/,{
+ SCR_COPY (4),
+ PADDR_B (startpos),
+ RADDR_1 (scratcha),
+ SCR_INT,
+ SIR_COMPLETE_ERROR,
+}/*-------------------------< SAVE_DP >--------------------------*/,{
+ /*
+ * Clear ACK immediately.
+ * No need to delay it.
+ */
+ SCR_CLR (SCR_ACK),
+ 0,
+ /*
+ * Keep track we received a SAVE DP, so
+ * we will switch to the other PM context
+ * on the next PM since the DP may point
+ * to the current PM context.
+ */
+ SCR_REG_REG (HF_REG, SCR_OR, HF_DP_SAVED),
+ 0,
+ /*
+ * SAVE_DP message:
+ * Copy LASTP to SAVEP.
+ */
+ SCR_COPY (4),
+ HADDR_1 (ccb_head.lastp),
+ HADDR_1 (ccb_head.savep),
+ /*
+ * Anticipate the MESSAGE PHASE for
+ * the DISCONNECT message.
+ */
+ SCR_JUMP ^ IFTRUE (WHEN (SCR_MSG_IN)),
+ PADDR_A (msg_in),
+ SCR_JUMP,
+ PADDR_A (dispatch),
+}/*-------------------------< RESTORE_DP >-----------------------*/,{
+ /*
+ * Clear ACK immediately.
+ * No need to delay it.
+ */
+ SCR_CLR (SCR_ACK),
+ 0,
+ /*
+ * Copy SAVEP to LASTP.
+ */
+ SCR_COPY (4),
+ HADDR_1 (ccb_head.savep),
+ HADDR_1 (ccb_head.lastp),
+ SCR_JUMP,
+ PADDR_A (dispatch),
+}/*-------------------------< DISCONNECT >-----------------------*/,{
+ /*
+ * DISCONNECTing ...
+ *
+ * disable the "unexpected disconnect" feature,
+ * and remove the ACK signal.
+ */
+ SCR_REG_REG (scntl2, SCR_AND, 0x7f),
+ 0,
+ SCR_CLR (SCR_ACK|SCR_ATN),
+ 0,
+ /*
+ * Wait for the disconnect.
+ */
+ SCR_WAIT_DISC,
+ 0,
+ /*
+ * Status is: DISCONNECTED.
+ */
+ SCR_LOAD_REG (HS_REG, HS_DISCONNECT),
+ 0,
+ /*
+ * Save host status.
+ */
+ SCR_COPY (4),
+ RADDR_1 (scr0),
+ HADDR_1 (ccb_head.status),
+}/*-------------------------< DISCONNECT2 >----------------------*/,{
+ /*
+ * Move back the CCB header using self-modifying
+ * SCRIPTS.
+ */
+ SCR_COPY (4),
+ RADDR_1 (dsa),
+ PADDR_A (_sms_a65),
+ SCR_COPY (sizeof(struct sym_ccbh)),
+ HADDR_1 (ccb_head),
+}/*-------------------------< _SMS_A65 >-------------------------*/,{
+ 0,
+ SCR_JUMP,
+ PADDR_A (start),
+}/*-------------------------< IDLE >-----------------------------*/,{
+ /*
+ * Nothing to do?
+ * Switch the LED off and wait for reselect.
+ * Will be patched with a NO_OP if LED
+ * not needed or not desired.
+ */
+ SCR_REG_REG (gpreg, SCR_OR, 0x01),
+ 0,
+#ifdef SYM_CONF_IARB_SUPPORT
+ SCR_JUMPR,
+ 8,
+#endif
+}/*-------------------------< UNGETJOB >-------------------------*/,{
+#ifdef SYM_CONF_IARB_SUPPORT
+ /*
+ * Set IMMEDIATE ARBITRATION, for the next time.
+ * This will give us better chance to win arbitration
+ * for the job we just wanted to do.
+ */
+ SCR_REG_REG (scntl1, SCR_OR, IARB),
+ 0,
+#endif
+ /*
+ * We are not able to restart the SCRIPTS if we are
+ * interrupted and these instruction haven't been
+ * all executed. BTW, this is very unlikely to
+ * happen, but we check that from the C code.
+ */
+ SCR_LOAD_REG (dsa, 0xff),
+ 0,
+ SCR_COPY (4),
+ RADDR_1 (scratcha),
+ PADDR_B (startpos),
+}/*-------------------------< RESELECT >-------------------------*/,{
+#ifdef SYM_CONF_TARGET_ROLE_SUPPORT
+ /*
+ * Make sure we are in initiator mode.
+ */
+ SCR_CLR (SCR_TRG),
+ 0,
+#endif
+ /*
+ * Sleep waiting for a reselection.
+ */
+ SCR_WAIT_RESEL,
+ PADDR_A(start),
+}/*-------------------------< RESELECTED >-----------------------*/,{
+ /*
+ * Switch the LED on.
+ * Will be patched with a NO_OP if LED
+ * not needed or not desired.
+ */
+ SCR_REG_REG (gpreg, SCR_AND, 0xfe),
+ 0,
+ /*
+ * load the target id into the sdid
+ */
+ SCR_REG_SFBR (ssid, SCR_AND, 0x8F),
+ 0,
+ SCR_TO_REG (sdid),
+ 0,
+ /*
+ * Load the target control block address
+ */
+ SCR_COPY (4),
+ PADDR_B (targtbl),
+ RADDR_1 (dsa),
+ SCR_SFBR_REG (dsa, SCR_SHL, 0),
+ 0,
+ SCR_REG_REG (dsa, SCR_SHL, 0),
+ 0,
+ SCR_REG_REG (dsa, SCR_AND, 0x3c),
+ 0,
+ SCR_COPY (4),
+ RADDR_1 (dsa),
+ PADDR_A (_sms_a70),
+ SCR_COPY (4),
+}/*-------------------------< _SMS_A70 >-------------------------*/,{
+ 0,
+ RADDR_1 (dsa),
+ /*
+ * Copy the TCB header to a fixed place in
+ * the HCB.
+ */
+ SCR_COPY (4),
+ RADDR_1 (dsa),
+ PADDR_A (_sms_a80),
+ SCR_COPY (sizeof(struct sym_tcbh)),
+}/*-------------------------< _SMS_A80 >-------------------------*/,{
+ 0,
+ HADDR_1 (tcb_head),
+ /*
+ * We expect MESSAGE IN phase.
+ * If not, get help from the C code.
+ */
+ SCR_INT ^ IFFALSE (WHEN (SCR_MSG_IN)),
+ SIR_RESEL_NO_MSG_IN,
+}/*-------------------------< RESELECTED1 >----------------------*/,{
+ /*
+ * Load the synchronous transfer registers.
+ */
+ SCR_COPY (1),
+ HADDR_1 (tcb_head.wval),
+ RADDR_1 (scntl3),
+ SCR_COPY (1),
+ HADDR_1 (tcb_head.sval),
+ RADDR_1 (sxfer),
+ /*
+ * Get the IDENTIFY message.
+ */
+ SCR_MOVE_ABS (1) ^ SCR_MSG_IN,
+ HADDR_1 (msgin),
+ /*
+ * If IDENTIFY LUN #0, use a faster path
+ * to find the LCB structure.
+ */
+ SCR_JUMP ^ IFTRUE (MASK (0x80, 0xbf)),
+ PADDR_A (resel_lun0),
+ /*
+ * If message isn't an IDENTIFY,
+ * tell the C code about.
+ */
+ SCR_INT ^ IFFALSE (MASK (0x80, 0x80)),
+ SIR_RESEL_NO_IDENTIFY,
+ /*
+ * It is an IDENTIFY message,
+ * Load the LUN control block address.
+ */
+ SCR_COPY (4),
+ HADDR_1 (tcb_head.luntbl_sa),
+ RADDR_1 (dsa),
+ SCR_SFBR_REG (dsa, SCR_SHL, 0),
+ 0,
+ SCR_REG_REG (dsa, SCR_SHL, 0),
+ 0,
+ SCR_REG_REG (dsa, SCR_AND, 0xfc),
+ 0,
+ SCR_COPY (4),
+ RADDR_1 (dsa),
+ PADDR_A (_sms_a90),
+ SCR_COPY (4),
+}/*-------------------------< _SMS_A90 >-------------------------*/,{
+ 0,
+ RADDR_1 (dsa),
+ SCR_JUMPR,
+ 12,
+}/*-------------------------< RESEL_LUN0 >-----------------------*/,{
+ /*
+ * LUN 0 special case (but usual one :))
+ */
+ SCR_COPY (4),
+ HADDR_1 (tcb_head.lun0_sa),
+ RADDR_1 (dsa),
+ /*
+ * Jump indirectly to the reselect action for this LUN.
+ * (lcb.head.resel_sa assumed at offset zero of lcb).
+ */
+ SCR_COPY (4),
+ RADDR_1 (dsa),
+ PADDR_A (_sms_a100),
+ SCR_COPY (4),
+}/*-------------------------< _SMS_A100 >------------------------*/,{
+ 0,
+ RADDR_1 (temp),
+ SCR_RETURN,
+ 0,
+ /* In normal situations, we jump to RESEL_TAG or RESEL_NO_TAG */
+}/*-------------------------< RESEL_TAG >------------------------*/,{
+ /*
+ * ACK the IDENTIFY previously received.
+ */
+ SCR_CLR (SCR_ACK),
+ 0,
+ /*
+ * It shall be a tagged command.
+ * Read SIMPLE+TAG.
+ * The C code will deal with errors.
+ * Aggressive optimization, isn't it? :)
+ */
+ SCR_MOVE_ABS (2) ^ SCR_MSG_IN,
+ HADDR_1 (msgin),
+ /*
+ * Copy the LCB header to a fixed place in
+ * the HCB using self-modifying SCRIPTS.
+ */
+ SCR_COPY (4),
+ RADDR_1 (dsa),
+ PADDR_A (_sms_a110),
+ SCR_COPY (sizeof(struct sym_lcbh)),
+}/*-------------------------< _SMS_A110 >------------------------*/,{
+ 0,
+ HADDR_1 (lcb_head),
+ /*
+ * Load the pointer to the tagged task
+ * table for this LUN.
+ */
+ SCR_COPY (4),
+ HADDR_1 (lcb_head.itlq_tbl_sa),
+ RADDR_1 (dsa),
+ /*
+ * The SIDL still contains the TAG value.
+ * Aggressive optimization, isn't it? :):)
+ */
+ SCR_REG_SFBR (sidl, SCR_SHL, 0),
+ 0,
+#if SYM_CONF_MAX_TASK*4 > 512
+ SCR_JUMPR ^ IFFALSE (CARRYSET),
+ 8,
+ SCR_REG_REG (dsa1, SCR_OR, 2),
+ 0,
+ SCR_REG_REG (sfbr, SCR_SHL, 0),
+ 0,
+ SCR_JUMPR ^ IFFALSE (CARRYSET),
+ 8,
+ SCR_REG_REG (dsa1, SCR_OR, 1),
+ 0,
+#elif SYM_CONF_MAX_TASK*4 > 256
+ SCR_JUMPR ^ IFFALSE (CARRYSET),
+ 8,
+ SCR_REG_REG (dsa1, SCR_OR, 1),
+ 0,
+#endif
+ /*
+ * Retrieve the DSA of this task.
+ * JUMP indirectly to the restart point of the CCB.
+ */
+ SCR_SFBR_REG (dsa, SCR_AND, 0xfc),
+ 0,
+ SCR_COPY (4),
+ RADDR_1 (dsa),
+ PADDR_A (_sms_a120),
+ SCR_COPY (4),
+}/*-------------------------< _SMS_A120 >------------------------*/,{
+ 0,
+ RADDR_1 (dsa),
+}/*-------------------------< RESEL_GO >-------------------------*/,{
+ SCR_COPY (4),
+ RADDR_1 (dsa),
+ PADDR_A (_sms_a130),
+ /*
+ * Move 'ccb.phys.head.go' action to
+ * scratch/scratch1. So scratch1 will
+ * contain the 'restart' field of the
+ * 'go' structure.
+ */
+ SCR_COPY (8),
+}/*-------------------------< _SMS_A130 >------------------------*/,{
+ 0,
+ PADDR_B (scratch),
+ SCR_COPY (4),
+ PADDR_B (scratch1), /* phys.head.go.restart */
+ RADDR_1 (temp),
+ SCR_RETURN,
+ 0,
+ /* In normal situations we branch to RESEL_DSA */
+}/*-------------------------< RESEL_DSA >------------------------*/,{
+ /*
+ * ACK the IDENTIFY or TAG previously received.
+ */
+ SCR_CLR (SCR_ACK),
+ 0,
+}/*-------------------------< RESEL_DSA1 >-----------------------*/,{
+ /*
+ * Copy the CCB header to a fixed location
+ * in the HCB using self-modifying SCRIPTS.
+ */
+ SCR_COPY (4),
+ RADDR_1 (dsa),
+ PADDR_A (_sms_a140),
+ SCR_COPY (sizeof(struct sym_ccbh)),
+}/*-------------------------< _SMS_A140 >------------------------*/,{
+ 0,
+ HADDR_1 (ccb_head),
+ /*
+ * Initialize the status register
+ */
+ SCR_COPY (4),
+ HADDR_1 (ccb_head.status),
+ RADDR_1 (scr0),
+ /*
+ * Jump to dispatcher.
+ */
+ SCR_JUMP,
+ PADDR_A (dispatch),
+}/*-------------------------< RESEL_NO_TAG >---------------------*/,{
+ /*
+ * Copy the LCB header to a fixed place in
+ * the HCB using self-modifying SCRIPTS.
+ */
+ SCR_COPY (4),
+ RADDR_1 (dsa),
+ PADDR_A (_sms_a145),
+ SCR_COPY (sizeof(struct sym_lcbh)),
+}/*-------------------------< _SMS_A145 >------------------------*/,{
+ 0,
+ HADDR_1 (lcb_head),
+ /*
+ * Load the DSA with the unique ITL task.
+ */
+ SCR_COPY (4),
+ HADDR_1 (lcb_head.itl_task_sa),
+ RADDR_1 (dsa),
+ SCR_JUMP,
+ PADDR_A (resel_go),
+}/*-------------------------< DATA_IN >--------------------------*/,{
+/*
+ * Because the size depends on the
+ * #define SYM_CONF_MAX_SG parameter,
+ * it is filled in at runtime.
+ *
+ * ##===========< i=0; i<SYM_CONF_MAX_SG >=========
+ * || SCR_CHMOV_TBL ^ SCR_DATA_IN,
+ * || offsetof (struct sym_dsb, data[ i]),
+ * ##==========================================
+ */
+0
+}/*-------------------------< DATA_IN2 >-------------------------*/,{
+ SCR_CALL,
+ PADDR_A (datai_done),
+ SCR_JUMP,
+ PADDR_B (data_ovrun),
+}/*-------------------------< DATA_OUT >-------------------------*/,{
+/*
+ * Because the size depends on the
+ * #define SYM_CONF_MAX_SG parameter,
+ * it is filled in at runtime.
+ *
+ * ##===========< i=0; i<SYM_CONF_MAX_SG >=========
+ * || SCR_CHMOV_TBL ^ SCR_DATA_OUT,
+ * || offsetof (struct sym_dsb, data[ i]),
+ * ##==========================================
+ */
+0
+}/*-------------------------< DATA_OUT2 >------------------------*/,{
+ SCR_CALL,
+ PADDR_A (datao_done),
+ SCR_JUMP,
+ PADDR_B (data_ovrun),
+}/*-------------------------< PM0_DATA >-------------------------*/,{
+ /*
+ * Read our host flags to SFBR, so we will be able
+ * to check against the data direction we expect.
+ */
+ SCR_FROM_REG (HF_REG),
+ 0,
+ /*
+ * Check against actual DATA PHASE.
+ */
+ SCR_JUMP ^ IFFALSE (WHEN (SCR_DATA_IN)),
+ PADDR_A (pm0_data_out),
+ /*
+ * Actual phase is DATA IN.
+ * Check against expected direction.
+ */
+ SCR_JUMP ^ IFFALSE (MASK (HF_DATA_IN, HF_DATA_IN)),
+ PADDR_B (data_ovrun),
+ /*
+ * Keep track we are moving data from the
+ * PM0 DATA mini-script.
+ */
+ SCR_REG_REG (HF_REG, SCR_OR, HF_IN_PM0),
+ 0,
+ /*
+ * Move the data to memory.
+ */
+ SCR_CHMOV_TBL ^ SCR_DATA_IN,
+ offsetof (struct sym_ccb, phys.pm0.sg),
+ SCR_JUMP,
+ PADDR_A (pm0_data_end),
+}/*-------------------------< PM0_DATA_OUT >---------------------*/,{
+ /*
+ * Actual phase is DATA OUT.
+ * Check against expected direction.
+ */
+ SCR_JUMP ^ IFTRUE (MASK (HF_DATA_IN, HF_DATA_IN)),
+ PADDR_B (data_ovrun),
+ /*
+ * Keep track we are moving data from the
+ * PM0 DATA mini-script.
+ */
+ SCR_REG_REG (HF_REG, SCR_OR, HF_IN_PM0),
+ 0,
+ /*
+ * Move the data from memory.
+ */
+ SCR_CHMOV_TBL ^ SCR_DATA_OUT,
+ offsetof (struct sym_ccb, phys.pm0.sg),
+}/*-------------------------< PM0_DATA_END >---------------------*/,{
+ /*
+ * Clear the flag that told we were moving
+ * data from the PM0 DATA mini-script.
+ */
+ SCR_REG_REG (HF_REG, SCR_AND, (~HF_IN_PM0)),
+ 0,
+ /*
+ * Return to the previous DATA script which
+ * is guaranteed by design (if no bug) to be
+ * the main DATA script for this transfer.
+ */
+ SCR_COPY (4),
+ RADDR_1 (dsa),
+ RADDR_1 (scratcha),
+ SCR_REG_REG (scratcha, SCR_ADD, offsetof (struct sym_ccb,phys.pm0.ret)),
+ 0,
+}/*-------------------------< PM_DATA_END >----------------------*/,{
+ SCR_COPY (4),
+ RADDR_1 (scratcha),
+ PADDR_A (_sms_a150),
+ SCR_COPY (4),
+}/*-------------------------< _SMS_A150 >------------------------*/,{
+ 0,
+ RADDR_1 (temp),
+ SCR_RETURN,
+ 0,
+}/*-------------------------< PM1_DATA >-------------------------*/,{
+ /*
+ * Read our host flags to SFBR, so we will be able
+ * to check against the data direction we expect.
+ */
+ SCR_FROM_REG (HF_REG),
+ 0,
+ /*
+ * Check against actual DATA PHASE.
+ */
+ SCR_JUMP ^ IFFALSE (WHEN (SCR_DATA_IN)),
+ PADDR_A (pm1_data_out),
+ /*
+ * Actual phase is DATA IN.
+ * Check against expected direction.
+ */
+ SCR_JUMP ^ IFFALSE (MASK (HF_DATA_IN, HF_DATA_IN)),
+ PADDR_B (data_ovrun),
+ /*
+ * Keep track we are moving data from the
+ * PM1 DATA mini-script.
+ */
+ SCR_REG_REG (HF_REG, SCR_OR, HF_IN_PM1),
+ 0,
+ /*
+ * Move the data to memory.
+ */
+ SCR_CHMOV_TBL ^ SCR_DATA_IN,
+ offsetof (struct sym_ccb, phys.pm1.sg),
+ SCR_JUMP,
+ PADDR_A (pm1_data_end),
+}/*-------------------------< PM1_DATA_OUT >---------------------*/,{
+ /*
+ * Actual phase is DATA OUT.
+ * Check against expected direction.
+ */
+ SCR_JUMP ^ IFTRUE (MASK (HF_DATA_IN, HF_DATA_IN)),
+ PADDR_B (data_ovrun),
+ /*
+ * Keep track we are moving data from the
+ * PM1 DATA mini-script.
+ */
+ SCR_REG_REG (HF_REG, SCR_OR, HF_IN_PM1),
+ 0,
+ /*
+ * Move the data from memory.
+ */
+ SCR_CHMOV_TBL ^ SCR_DATA_OUT,
+ offsetof (struct sym_ccb, phys.pm1.sg),
+}/*-------------------------< PM1_DATA_END >---------------------*/,{
+ /*
+ * Clear the flag that told we were moving
+ * data from the PM1 DATA mini-script.
+ */
+ SCR_REG_REG (HF_REG, SCR_AND, (~HF_IN_PM1)),
+ 0,
+ /*
+ * Return to the previous DATA script which
+ * is guaranteed by design (if no bug) to be
+ * the main DATA script for this transfer.
+ */
+ SCR_COPY (4),
+ RADDR_1 (dsa),
+ RADDR_1 (scratcha),
+ SCR_REG_REG (scratcha, SCR_ADD, offsetof (struct sym_ccb,phys.pm1.ret)),
+ 0,
+ SCR_JUMP,
+ PADDR_A (pm_data_end),
+}/*--------------------------<>----------------------------------*/
+};
+
+static struct SYM_FWB_SCR SYM_FWB_SCR = {
+/*-------------------------< NO_DATA >--------------------------*/ {
+ SCR_JUMP,
+ PADDR_B (data_ovrun),
+}/*-------------------------< SEL_FOR_ABORT >--------------------*/,{
+ /*
+ * We are jumped here by the C code, if we have
+ * some target to reset or some disconnected
+ * job to abort. Since error recovery is a serious
+ * busyness, we will really reset the SCSI BUS, if
+ * case of a SCSI interrupt occurring in this path.
+ */
+
+#ifdef SYM_CONF_TARGET_ROLE_SUPPORT
+ /*
+ * Set initiator mode.
+ */
+ SCR_CLR (SCR_TRG),
+ 0,
+#endif
+ /*
+ * And try to select this target.
+ */
+ SCR_SEL_TBL_ATN ^ offsetof (struct sym_hcb, abrt_sel),
+ PADDR_A (reselect),
+ /*
+ * Wait for the selection to complete or
+ * the selection to time out.
+ */
+ SCR_JUMPR ^ IFFALSE (WHEN (SCR_MSG_OUT)),
+ -8,
+ /*
+ * Call the C code.
+ */
+ SCR_INT,
+ SIR_TARGET_SELECTED,
+ /*
+ * The C code should let us continue here.
+ * Send the 'kiss of death' message.
+ * We expect an immediate disconnect once
+ * the target has eaten the message.
+ */
+ SCR_REG_REG (scntl2, SCR_AND, 0x7f),
+ 0,
+ SCR_MOVE_TBL ^ SCR_MSG_OUT,
+ offsetof (struct sym_hcb, abrt_tbl),
+ SCR_CLR (SCR_ACK|SCR_ATN),
+ 0,
+ SCR_WAIT_DISC,
+ 0,
+ /*
+ * Tell the C code that we are done.
+ */
+ SCR_INT,
+ SIR_ABORT_SENT,
+}/*-------------------------< SEL_FOR_ABORT_1 >------------------*/,{
+ /*
+ * Jump at scheduler.
+ */
+ SCR_JUMP,
+ PADDR_A (start),
+}/*-------------------------< MSG_IN_ETC >-----------------------*/,{
+ /*
+ * If it is an EXTENDED (variable size message)
+ * Handle it.
+ */
+ SCR_JUMP ^ IFTRUE (DATA (M_EXTENDED)),
+ PADDR_B (msg_extended),
+ /*
+ * Let the C code handle any other
+ * 1 byte message.
+ */
+ SCR_JUMP ^ IFTRUE (MASK (0x00, 0xf0)),
+ PADDR_B (msg_received),
+ SCR_JUMP ^ IFTRUE (MASK (0x10, 0xf0)),
+ PADDR_B (msg_received),
+ /*
+ * We donnot handle 2 bytes messages from SCRIPTS.
+ * So, let the C code deal with these ones too.
+ */
+ SCR_JUMP ^ IFFALSE (MASK (0x20, 0xf0)),
+ PADDR_B (msg_weird_seen),
+ SCR_CLR (SCR_ACK),
+ 0,
+ SCR_MOVE_ABS (1) ^ SCR_MSG_IN,
+ HADDR_1 (msgin[1]),
+}/*-------------------------< MSG_RECEIVED >---------------------*/,{
+ SCR_COPY (4), /* DUMMY READ */
+ HADDR_1 (scratch),
+ RADDR_1 (scratcha),
+ SCR_INT,
+ SIR_MSG_RECEIVED,
+}/*-------------------------< MSG_WEIRD_SEEN >-------------------*/,{
+ SCR_COPY (4), /* DUMMY READ */
+ HADDR_1 (scratch),
+ RADDR_1 (scratcha),
+ SCR_INT,
+ SIR_MSG_WEIRD,
+}/*-------------------------< MSG_EXTENDED >---------------------*/,{
+ /*
+ * Clear ACK and get the next byte
+ * assumed to be the message length.
+ */
+ SCR_CLR (SCR_ACK),
+ 0,
+ SCR_MOVE_ABS (1) ^ SCR_MSG_IN,
+ HADDR_1 (msgin[1]),
+ /*
+ * Try to catch some unlikely situations as 0 length
+ * or too large the length.
+ */
+ SCR_JUMP ^ IFTRUE (DATA (0)),
+ PADDR_B (msg_weird_seen),
+ SCR_TO_REG (scratcha),
+ 0,
+ SCR_REG_REG (sfbr, SCR_ADD, (256-8)),
+ 0,
+ SCR_JUMP ^ IFTRUE (CARRYSET),
+ PADDR_B (msg_weird_seen),
+ /*
+ * We donnot handle extended messages from SCRIPTS.
+ * Read the amount of data corresponding to the
+ * message length and call the C code.
+ */
+ SCR_COPY (1),
+ RADDR_1 (scratcha),
+ PADDR_B (_sms_b10),
+ SCR_CLR (SCR_ACK),
+ 0,
+}/*-------------------------< _SMS_B10 >-------------------------*/,{
+ SCR_MOVE_ABS (0) ^ SCR_MSG_IN,
+ HADDR_1 (msgin[2]),
+ SCR_JUMP,
+ PADDR_B (msg_received),
+}/*-------------------------< MSG_BAD >--------------------------*/,{
+ /*
+ * unimplemented message - reject it.
+ */
+ SCR_INT,
+ SIR_REJECT_TO_SEND,
+ SCR_SET (SCR_ATN),
+ 0,
+ SCR_JUMP,
+ PADDR_A (clrack),
+}/*-------------------------< MSG_WEIRD >------------------------*/,{
+ /*
+ * weird message received
+ * ignore all MSG IN phases and reject it.
+ */
+ SCR_INT,
+ SIR_REJECT_TO_SEND,
+ SCR_SET (SCR_ATN),
+ 0,
+}/*-------------------------< MSG_WEIRD1 >-----------------------*/,{
+ SCR_CLR (SCR_ACK),
+ 0,
+ SCR_JUMP ^ IFFALSE (WHEN (SCR_MSG_IN)),
+ PADDR_A (dispatch),
+ SCR_MOVE_ABS (1) ^ SCR_MSG_IN,
+ HADDR_1 (scratch),
+ SCR_JUMP,
+ PADDR_B (msg_weird1),
+}/*-------------------------< WDTR_RESP >------------------------*/,{
+ /*
+ * let the target fetch our answer.
+ */
+ SCR_SET (SCR_ATN),
+ 0,
+ SCR_CLR (SCR_ACK),
+ 0,
+ SCR_JUMP ^ IFFALSE (WHEN (SCR_MSG_OUT)),
+ PADDR_B (nego_bad_phase),
+}/*-------------------------< SEND_WDTR >------------------------*/,{
+ /*
+ * Send the M_X_WIDE_REQ
+ */
+ SCR_MOVE_ABS (4) ^ SCR_MSG_OUT,
+ HADDR_1 (msgout),
+ SCR_JUMP,
+ PADDR_B (msg_out_done),
+}/*-------------------------< SDTR_RESP >------------------------*/,{
+ /*
+ * let the target fetch our answer.
+ */
+ SCR_SET (SCR_ATN),
+ 0,
+ SCR_CLR (SCR_ACK),
+ 0,
+ SCR_JUMP ^ IFFALSE (WHEN (SCR_MSG_OUT)),
+ PADDR_B (nego_bad_phase),
+}/*-------------------------< SEND_SDTR >------------------------*/,{
+ /*
+ * Send the M_X_SYNC_REQ
+ */
+ SCR_MOVE_ABS (5) ^ SCR_MSG_OUT,
+ HADDR_1 (msgout),
+ SCR_JUMP,
+ PADDR_B (msg_out_done),
+}/*-------------------------< PPR_RESP >-------------------------*/,{
+ /*
+ * let the target fetch our answer.
+ */
+ SCR_SET (SCR_ATN),
+ 0,
+ SCR_CLR (SCR_ACK),
+ 0,
+ SCR_JUMP ^ IFFALSE (WHEN (SCR_MSG_OUT)),
+ PADDR_B (nego_bad_phase),
+}/*-------------------------< SEND_PPR >-------------------------*/,{
+ /*
+ * Send the M_X_PPR_REQ
+ */
+ SCR_MOVE_ABS (8) ^ SCR_MSG_OUT,
+ HADDR_1 (msgout),
+ SCR_JUMP,
+ PADDR_B (msg_out_done),
+}/*-------------------------< NEGO_BAD_PHASE >-------------------*/,{
+ SCR_INT,
+ SIR_NEGO_PROTO,
+ SCR_JUMP,
+ PADDR_A (dispatch),
+}/*-------------------------< MSG_OUT >--------------------------*/,{
+ /*
+ * The target requests a message.
+ * We donnot send messages that may
+ * require the device to go to bus free.
+ */
+ SCR_MOVE_ABS (1) ^ SCR_MSG_OUT,
+ HADDR_1 (msgout),
+ /*
+ * ... wait for the next phase
+ * if it's a message out, send it again, ...
+ */
+ SCR_JUMP ^ IFTRUE (WHEN (SCR_MSG_OUT)),
+ PADDR_B (msg_out),
+}/*-------------------------< MSG_OUT_DONE >---------------------*/,{
+ /*
+ * Let the C code be aware of the
+ * sent message and clear the message.
+ */
+ SCR_INT,
+ SIR_MSG_OUT_DONE,
+ /*
+ * ... and process the next phase
+ */
+ SCR_JUMP,
+ PADDR_A (dispatch),
+}/*-------------------------< DATA_OVRUN >-----------------------*/,{
+ /*
+ * Zero scratcha that will count the
+ * extras bytes.
+ */
+ SCR_COPY (4),
+ PADDR_B (zero),
+ RADDR_1 (scratcha),
+}/*-------------------------< DATA_OVRUN1 >----------------------*/,{
+ /*
+ * The target may want to transfer too much data.
+ *
+ * If phase is DATA OUT write 1 byte and count it.
+ */
+ SCR_JUMPR ^ IFFALSE (WHEN (SCR_DATA_OUT)),
+ 16,
+ SCR_CHMOV_ABS (1) ^ SCR_DATA_OUT,
+ HADDR_1 (scratch),
+ SCR_JUMP,
+ PADDR_B (data_ovrun2),
+ /*
+ * If WSR is set, clear this condition, and
+ * count this byte.
+ */
+ SCR_FROM_REG (scntl2),
+ 0,
+ SCR_JUMPR ^ IFFALSE (MASK (WSR, WSR)),
+ 16,
+ SCR_REG_REG (scntl2, SCR_OR, WSR),
+ 0,
+ SCR_JUMP,
+ PADDR_B (data_ovrun2),
+ /*
+ * Finally check against DATA IN phase.
+ * Signal data overrun to the C code
+ * and jump to dispatcher if not so.
+ * Read 1 byte otherwise and count it.
+ */
+ SCR_JUMPR ^ IFTRUE (WHEN (SCR_DATA_IN)),
+ 16,
+ SCR_INT,
+ SIR_DATA_OVERRUN,
+ SCR_JUMP,
+ PADDR_A (dispatch),
+ SCR_CHMOV_ABS (1) ^ SCR_DATA_IN,
+ HADDR_1 (scratch),
+}/*-------------------------< DATA_OVRUN2 >----------------------*/,{
+ /*
+ * Count this byte.
+ * This will allow to return a negative
+ * residual to user.
+ */
+ SCR_REG_REG (scratcha, SCR_ADD, 0x01),
+ 0,
+ SCR_REG_REG (scratcha1, SCR_ADDC, 0),
+ 0,
+ SCR_REG_REG (scratcha2, SCR_ADDC, 0),
+ 0,
+ /*
+ * .. and repeat as required.
+ */
+ SCR_JUMP,
+ PADDR_B (data_ovrun1),
+}/*-------------------------< ABORT_RESEL >----------------------*/,{
+ SCR_SET (SCR_ATN),
+ 0,
+ SCR_CLR (SCR_ACK),
+ 0,
+ /*
+ * send the abort/abortag/reset message
+ * we expect an immediate disconnect
+ */
+ SCR_REG_REG (scntl2, SCR_AND, 0x7f),
+ 0,
+ SCR_MOVE_ABS (1) ^ SCR_MSG_OUT,
+ HADDR_1 (msgout),
+ SCR_CLR (SCR_ACK|SCR_ATN),
+ 0,
+ SCR_WAIT_DISC,
+ 0,
+ SCR_INT,
+ SIR_RESEL_ABORTED,
+ SCR_JUMP,
+ PADDR_A (start),
+}/*-------------------------< RESEND_IDENT >---------------------*/,{
+ /*
+ * The target stays in MSG OUT phase after having acked
+ * Identify [+ Tag [+ Extended message ]]. Targets shall
+ * behave this way on parity error.
+ * We must send it again all the messages.
+ */
+ SCR_SET (SCR_ATN), /* Shall be asserted 2 deskew delays before the */
+ 0, /* 1rst ACK = 90 ns. Hope the chip isn't too fast */
+ SCR_JUMP,
+ PADDR_A (send_ident),
+}/*-------------------------< IDENT_BREAK >----------------------*/,{
+ SCR_CLR (SCR_ATN),
+ 0,
+ SCR_JUMP,
+ PADDR_A (select2),
+}/*-------------------------< IDENT_BREAK_ATN >------------------*/,{
+ SCR_SET (SCR_ATN),
+ 0,
+ SCR_JUMP,
+ PADDR_A (select2),
+}/*-------------------------< SDATA_IN >-------------------------*/,{
+ SCR_CHMOV_TBL ^ SCR_DATA_IN,
+ offsetof (struct sym_dsb, sense),
+ SCR_CALL,
+ PADDR_A (datai_done),
+ SCR_JUMP,
+ PADDR_B (data_ovrun),
+}/*-------------------------< RESEL_BAD_LUN >--------------------*/,{
+ /*
+ * Message is an IDENTIFY, but lun is unknown.
+ * Signal problem to C code for logging the event.
+ * Send a M_ABORT to clear all pending tasks.
+ */
+ SCR_INT,
+ SIR_RESEL_BAD_LUN,
+ SCR_JUMP,
+ PADDR_B (abort_resel),
+}/*-------------------------< BAD_I_T_L >------------------------*/,{
+ /*
+ * We donnot have a task for that I_T_L.
+ * Signal problem to C code for logging the event.
+ * Send a M_ABORT message.
+ */
+ SCR_INT,
+ SIR_RESEL_BAD_I_T_L,
+ SCR_JUMP,
+ PADDR_B (abort_resel),
+}/*-------------------------< BAD_I_T_L_Q >----------------------*/,{
+ /*
+ * We donnot have a task that matches the tag.
+ * Signal problem to C code for logging the event.
+ * Send a M_ABORTTAG message.
+ */
+ SCR_INT,
+ SIR_RESEL_BAD_I_T_L_Q,
+ SCR_JUMP,
+ PADDR_B (abort_resel),
+}/*-------------------------< BAD_STATUS >-----------------------*/,{
+ /*
+ * Anything different from INTERMEDIATE
+ * CONDITION MET should be a bad SCSI status,
+ * given that GOOD status has already been tested.
+ * Call the C code.
+ */
+ SCR_COPY (4),
+ PADDR_B (startpos),
+ RADDR_1 (scratcha),
+ SCR_INT ^ IFFALSE (DATA (S_COND_MET)),
+ SIR_BAD_SCSI_STATUS,
+ SCR_RETURN,
+ 0,
+}/*-------------------------< WSR_MA_HELPER >--------------------*/,{
+ /*
+ * Helper for the C code when WSR bit is set.
+ * Perform the move of the residual byte.
+ */
+ SCR_CHMOV_TBL ^ SCR_DATA_IN,
+ offsetof (struct sym_ccb, phys.wresid),
+ SCR_JUMP,
+ PADDR_A (dispatch),
+
+}/*-------------------------< ZERO >-----------------------------*/,{
+ SCR_DATA_ZERO,
+}/*-------------------------< SCRATCH >--------------------------*/,{
+ SCR_DATA_ZERO, /* MUST BE BEFORE SCRATCH1 */
+}/*-------------------------< SCRATCH1 >-------------------------*/,{
+ SCR_DATA_ZERO,
+}/*-------------------------< PREV_DONE >------------------------*/,{
+ SCR_DATA_ZERO, /* MUST BE BEFORE DONE_POS ! */
+}/*-------------------------< DONE_POS >-------------------------*/,{
+ SCR_DATA_ZERO,
+}/*-------------------------< NEXTJOB >--------------------------*/,{
+ SCR_DATA_ZERO, /* MUST BE BEFORE STARTPOS ! */
+}/*-------------------------< STARTPOS >-------------------------*/,{
+ SCR_DATA_ZERO,
+}/*-------------------------< TARGTBL >--------------------------*/,{
+ SCR_DATA_ZERO,
+}/*--------------------------<>----------------------------------*/
+};
+
+static struct SYM_FWZ_SCR SYM_FWZ_SCR = {
+ /*-------------------------< SNOOPTEST >------------------------*/{
+ /*
+ * Read the variable.
+ */
+ SCR_COPY (4),
+ HADDR_1 (scratch),
+ RADDR_1 (scratcha),
+ /*
+ * Write the variable.
+ */
+ SCR_COPY (4),
+ RADDR_1 (temp),
+ HADDR_1 (scratch),
+ /*
+ * Read back the variable.
+ */
+ SCR_COPY (4),
+ HADDR_1 (scratch),
+ RADDR_1 (temp),
+}/*-------------------------< SNOOPEND >-------------------------*/,{
+ /*
+ * And stop.
+ */
+ SCR_INT,
+ 99,
+}/*--------------------------<>----------------------------------*/
+};
diff --git a/drivers/scsi/sym53c8xx_2/sym_fw2.h b/drivers/scsi/sym53c8xx_2/sym_fw2.h
new file mode 100644
index 000000000..c87d72443
--- /dev/null
+++ b/drivers/scsi/sym53c8xx_2/sym_fw2.h
@@ -0,0 +1,1875 @@
+/*
+ * Device driver for the SYMBIOS/LSILOGIC 53C8XX and 53C1010 family
+ * of PCI-SCSI IO processors.
+ *
+ * Copyright (C) 1999-2001 Gerard Roudier <groudier@free.fr>
+ *
+ * This driver is derived from the Linux sym53c8xx driver.
+ * Copyright (C) 1998-2000 Gerard Roudier
+ *
+ * The sym53c8xx driver is derived from the ncr53c8xx driver that had been
+ * a port of the FreeBSD ncr driver to Linux-1.2.13.
+ *
+ * The original ncr driver has been written for 386bsd and FreeBSD by
+ * Wolfgang Stanglmeier <wolf@cologne.de>
+ * Stefan Esser <se@mi.Uni-Koeln.de>
+ * Copyright (C) 1994 Wolfgang Stanglmeier
+ *
+ * Other major contributions:
+ *
+ * NVRAM detection and reading.
+ * Copyright (C) 1997 Richard Waltham <dormouse@farsrobt.demon.co.uk>
+ *
+ *-----------------------------------------------------------------------------
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+
+/*
+ * Scripts for SYMBIOS-Processor
+ *
+ * We have to know the offsets of all labels before we reach
+ * them (for forward jumps). Therefore we declare a struct
+ * here. If you make changes inside the script,
+ *
+ * DONT FORGET TO CHANGE THE LENGTHS HERE!
+ */
+
+/*
+ * Script fragments which are loaded into the on-chip RAM
+ * of 825A, 875, 876, 895, 895A, 896 and 1010 chips.
+ * Must not exceed 4K bytes.
+ */
+struct SYM_FWA_SCR {
+ u32 start [ 14];
+ u32 getjob_begin [ 4];
+ u32 getjob_end [ 4];
+#ifdef SYM_CONF_TARGET_ROLE_SUPPORT
+ u32 select [ 6];
+#else
+ u32 select [ 4];
+#endif
+#if SYM_CONF_DMA_ADDRESSING_MODE == 2
+ u32 is_dmap_dirty [ 4];
+#endif
+ u32 wf_sel_done [ 2];
+ u32 sel_done [ 2];
+ u32 send_ident [ 2];
+#ifdef SYM_CONF_IARB_SUPPORT
+ u32 select2 [ 8];
+#else
+ u32 select2 [ 2];
+#endif
+ u32 command [ 2];
+ u32 dispatch [ 28];
+ u32 sel_no_cmd [ 10];
+ u32 init [ 6];
+ u32 clrack [ 4];
+ u32 datai_done [ 10];
+ u32 datai_done_wsr [ 20];
+ u32 datao_done [ 10];
+ u32 datao_done_wss [ 6];
+ u32 datai_phase [ 4];
+ u32 datao_phase [ 6];
+ u32 msg_in [ 2];
+ u32 msg_in2 [ 10];
+#ifdef SYM_CONF_IARB_SUPPORT
+ u32 status [ 14];
+#else
+ u32 status [ 10];
+#endif
+ u32 complete [ 6];
+ u32 complete2 [ 12];
+ u32 done [ 14];
+ u32 done_end [ 2];
+ u32 complete_error [ 4];
+ u32 save_dp [ 12];
+ u32 restore_dp [ 8];
+ u32 disconnect [ 12];
+#ifdef SYM_CONF_IARB_SUPPORT
+ u32 idle [ 4];
+#else
+ u32 idle [ 2];
+#endif
+#ifdef SYM_CONF_IARB_SUPPORT
+ u32 ungetjob [ 6];
+#else
+ u32 ungetjob [ 4];
+#endif
+#ifdef SYM_CONF_TARGET_ROLE_SUPPORT
+ u32 reselect [ 4];
+#else
+ u32 reselect [ 2];
+#endif
+ u32 reselected [ 22];
+ u32 resel_scntl4 [ 20];
+ u32 resel_lun0 [ 6];
+#if SYM_CONF_MAX_TASK*4 > 512
+ u32 resel_tag [ 26];
+#elif SYM_CONF_MAX_TASK*4 > 256
+ u32 resel_tag [ 20];
+#else
+ u32 resel_tag [ 16];
+#endif
+ u32 resel_dsa [ 2];
+ u32 resel_dsa1 [ 4];
+ u32 resel_no_tag [ 6];
+ u32 data_in [SYM_CONF_MAX_SG * 2];
+ u32 data_in2 [ 4];
+ u32 data_out [SYM_CONF_MAX_SG * 2];
+ u32 data_out2 [ 4];
+ u32 pm0_data [ 12];
+ u32 pm0_data_out [ 6];
+ u32 pm0_data_end [ 6];
+ u32 pm1_data [ 12];
+ u32 pm1_data_out [ 6];
+ u32 pm1_data_end [ 6];
+};
+
+/*
+ * Script fragments which stay in main memory for all chips
+ * except for chips that support 8K on-chip RAM.
+ */
+struct SYM_FWB_SCR {
+ u32 start64 [ 2];
+ u32 no_data [ 2];
+#ifdef SYM_CONF_TARGET_ROLE_SUPPORT
+ u32 sel_for_abort [ 18];
+#else
+ u32 sel_for_abort [ 16];
+#endif
+ u32 sel_for_abort_1 [ 2];
+ u32 msg_in_etc [ 12];
+ u32 msg_received [ 4];
+ u32 msg_weird_seen [ 4];
+ u32 msg_extended [ 20];
+ u32 msg_bad [ 6];
+ u32 msg_weird [ 4];
+ u32 msg_weird1 [ 8];
+
+ u32 wdtr_resp [ 6];
+ u32 send_wdtr [ 4];
+ u32 sdtr_resp [ 6];
+ u32 send_sdtr [ 4];
+ u32 ppr_resp [ 6];
+ u32 send_ppr [ 4];
+ u32 nego_bad_phase [ 4];
+ u32 msg_out [ 4];
+ u32 msg_out_done [ 4];
+ u32 data_ovrun [ 2];
+ u32 data_ovrun1 [ 22];
+ u32 data_ovrun2 [ 8];
+ u32 abort_resel [ 16];
+ u32 resend_ident [ 4];
+ u32 ident_break [ 4];
+ u32 ident_break_atn [ 4];
+ u32 sdata_in [ 6];
+ u32 resel_bad_lun [ 4];
+ u32 bad_i_t_l [ 4];
+ u32 bad_i_t_l_q [ 4];
+ u32 bad_status [ 6];
+ u32 pm_handle [ 20];
+ u32 pm_handle1 [ 4];
+ u32 pm_save [ 4];
+ u32 pm0_save [ 12];
+ u32 pm_save_end [ 4];
+ u32 pm1_save [ 14];
+
+ /* WSR handling */
+ u32 pm_wsr_handle [ 38];
+ u32 wsr_ma_helper [ 4];
+
+ /* Data area */
+ u32 zero [ 1];
+ u32 scratch [ 1];
+ u32 pm0_data_addr [ 1];
+ u32 pm1_data_addr [ 1];
+ u32 done_pos [ 1];
+ u32 startpos [ 1];
+ u32 targtbl [ 1];
+};
+
+/*
+ * Script fragments used at initialisations.
+ * Only runs out of main memory.
+ */
+struct SYM_FWZ_SCR {
+ u32 snooptest [ 6];
+ u32 snoopend [ 2];
+};
+
+static struct SYM_FWA_SCR SYM_FWA_SCR = {
+/*--------------------------< START >----------------------------*/ {
+ /*
+ * Switch the LED on.
+ * Will be patched with a NO_OP if LED
+ * not needed or not desired.
+ */
+ SCR_REG_REG (gpreg, SCR_AND, 0xfe),
+ 0,
+ /*
+ * Clear SIGP.
+ */
+ SCR_FROM_REG (ctest2),
+ 0,
+ /*
+ * Stop here if the C code wants to perform
+ * some error recovery procedure manually.
+ * (Indicate this by setting SEM in ISTAT)
+ */
+ SCR_FROM_REG (istat),
+ 0,
+ /*
+ * Report to the C code the next position in
+ * the start queue the SCRIPTS will schedule.
+ * The C code must not change SCRATCHA.
+ */
+ SCR_LOAD_ABS (scratcha, 4),
+ PADDR_B (startpos),
+ SCR_INT ^ IFTRUE (MASK (SEM, SEM)),
+ SIR_SCRIPT_STOPPED,
+ /*
+ * Start the next job.
+ *
+ * @DSA = start point for this job.
+ * SCRATCHA = address of this job in the start queue.
+ *
+ * We will restore startpos with SCRATCHA if we fails the
+ * arbitration or if it is the idle job.
+ *
+ * The below GETJOB_BEGIN to GETJOB_END section of SCRIPTS
+ * is a critical path. If it is partially executed, it then
+ * may happen that the job address is not yet in the DSA
+ * and the next queue position points to the next JOB.
+ */
+ SCR_LOAD_ABS (dsa, 4),
+ PADDR_B (startpos),
+ SCR_LOAD_REL (temp, 4),
+ 4,
+}/*-------------------------< GETJOB_BEGIN >---------------------*/,{
+ SCR_STORE_ABS (temp, 4),
+ PADDR_B (startpos),
+ SCR_LOAD_REL (dsa, 4),
+ 0,
+}/*-------------------------< GETJOB_END >-----------------------*/,{
+ SCR_LOAD_REL (temp, 4),
+ 0,
+ SCR_RETURN,
+ 0,
+}/*-------------------------< SELECT >---------------------------*/,{
+ /*
+ * DSA contains the address of a scheduled
+ * data structure.
+ *
+ * SCRATCHA contains the address of the start queue
+ * entry which points to the next job.
+ *
+ * Set Initiator mode.
+ *
+ * (Target mode is left as an exercise for the reader)
+ */
+#ifdef SYM_CONF_TARGET_ROLE_SUPPORT
+ SCR_CLR (SCR_TRG),
+ 0,
+#endif
+ /*
+ * And try to select this target.
+ */
+ SCR_SEL_TBL_ATN ^ offsetof (struct sym_dsb, select),
+ PADDR_A (ungetjob),
+ /*
+ * Now there are 4 possibilities:
+ *
+ * (1) The chip loses arbitration.
+ * This is ok, because it will try again,
+ * when the bus becomes idle.
+ * (But beware of the timeout function!)
+ *
+ * (2) The chip is reselected.
+ * Then the script processor takes the jump
+ * to the RESELECT label.
+ *
+ * (3) The chip wins arbitration.
+ * Then it will execute SCRIPTS instruction until
+ * the next instruction that checks SCSI phase.
+ * Then will stop and wait for selection to be
+ * complete or selection time-out to occur.
+ *
+ * After having won arbitration, the SCRIPTS
+ * processor is able to execute instructions while
+ * the SCSI core is performing SCSI selection.
+ */
+ /*
+ * Initialize the status registers
+ */
+ SCR_LOAD_REL (scr0, 4),
+ offsetof (struct sym_ccb, phys.head.status),
+ /*
+ * We may need help from CPU if the DMA segment
+ * registers aren't up-to-date for this IO.
+ * Patched with NOOP for chips that donnot
+ * support DAC addressing.
+ */
+#if SYM_CONF_DMA_ADDRESSING_MODE == 2
+}/*-------------------------< IS_DMAP_DIRTY >--------------------*/,{
+ SCR_FROM_REG (HX_REG),
+ 0,
+ SCR_INT ^ IFTRUE (MASK (HX_DMAP_DIRTY, HX_DMAP_DIRTY)),
+ SIR_DMAP_DIRTY,
+#endif
+}/*-------------------------< WF_SEL_DONE >----------------------*/,{
+ SCR_INT ^ IFFALSE (WHEN (SCR_MSG_OUT)),
+ SIR_SEL_ATN_NO_MSG_OUT,
+}/*-------------------------< SEL_DONE >-------------------------*/,{
+ /*
+ * C1010-33 errata work-around.
+ * Due to a race, the SCSI core may not have
+ * loaded SCNTL3 on SEL_TBL instruction.
+ * We reload it once phase is stable.
+ * Patched with a NOOP for other chips.
+ */
+ SCR_LOAD_REL (scntl3, 1),
+ offsetof(struct sym_dsb, select.sel_scntl3),
+}/*-------------------------< SEND_IDENT >-----------------------*/,{
+ /*
+ * Selection complete.
+ * Send the IDENTIFY and possibly the TAG message
+ * and negotiation message if present.
+ */
+ SCR_MOVE_TBL ^ SCR_MSG_OUT,
+ offsetof (struct sym_dsb, smsg),
+}/*-------------------------< SELECT2 >--------------------------*/,{
+#ifdef SYM_CONF_IARB_SUPPORT
+ /*
+ * Set IMMEDIATE ARBITRATION if we have been given
+ * a hint to do so. (Some job to do after this one).
+ */
+ SCR_FROM_REG (HF_REG),
+ 0,
+ SCR_JUMPR ^ IFFALSE (MASK (HF_HINT_IARB, HF_HINT_IARB)),
+ 8,
+ SCR_REG_REG (scntl1, SCR_OR, IARB),
+ 0,
+#endif
+ /*
+ * Anticipate the COMMAND phase.
+ * This is the PHASE we expect at this point.
+ */
+ SCR_JUMP ^ IFFALSE (WHEN (SCR_COMMAND)),
+ PADDR_A (sel_no_cmd),
+}/*-------------------------< COMMAND >--------------------------*/,{
+ /*
+ * ... and send the command
+ */
+ SCR_MOVE_TBL ^ SCR_COMMAND,
+ offsetof (struct sym_dsb, cmd),
+}/*-------------------------< DISPATCH >-------------------------*/,{
+ /*
+ * MSG_IN is the only phase that shall be
+ * entered at least once for each (re)selection.
+ * So we test it first.
+ */
+ SCR_JUMP ^ IFTRUE (WHEN (SCR_MSG_IN)),
+ PADDR_A (msg_in),
+ SCR_JUMP ^ IFTRUE (IF (SCR_DATA_OUT)),
+ PADDR_A (datao_phase),
+ SCR_JUMP ^ IFTRUE (IF (SCR_DATA_IN)),
+ PADDR_A (datai_phase),
+ SCR_JUMP ^ IFTRUE (IF (SCR_STATUS)),
+ PADDR_A (status),
+ SCR_JUMP ^ IFTRUE (IF (SCR_COMMAND)),
+ PADDR_A (command),
+ SCR_JUMP ^ IFTRUE (IF (SCR_MSG_OUT)),
+ PADDR_B (msg_out),
+ /*
+ * Discard as many illegal phases as
+ * required and tell the C code about.
+ */
+ SCR_JUMPR ^ IFFALSE (WHEN (SCR_ILG_OUT)),
+ 16,
+ SCR_MOVE_ABS (1) ^ SCR_ILG_OUT,
+ HADDR_1 (scratch),
+ SCR_JUMPR ^ IFTRUE (WHEN (SCR_ILG_OUT)),
+ -16,
+ SCR_JUMPR ^ IFFALSE (WHEN (SCR_ILG_IN)),
+ 16,
+ SCR_MOVE_ABS (1) ^ SCR_ILG_IN,
+ HADDR_1 (scratch),
+ SCR_JUMPR ^ IFTRUE (WHEN (SCR_ILG_IN)),
+ -16,
+ SCR_INT,
+ SIR_BAD_PHASE,
+ SCR_JUMP,
+ PADDR_A (dispatch),
+}/*-------------------------< SEL_NO_CMD >-----------------------*/,{
+ /*
+ * The target does not switch to command
+ * phase after IDENTIFY has been sent.
+ *
+ * If it stays in MSG OUT phase send it
+ * the IDENTIFY again.
+ */
+ SCR_JUMP ^ IFTRUE (WHEN (SCR_MSG_OUT)),
+ PADDR_B (resend_ident),
+ /*
+ * If target does not switch to MSG IN phase
+ * and we sent a negotiation, assert the
+ * failure immediately.
+ */
+ SCR_JUMP ^ IFTRUE (WHEN (SCR_MSG_IN)),
+ PADDR_A (dispatch),
+ SCR_FROM_REG (HS_REG),
+ 0,
+ SCR_INT ^ IFTRUE (DATA (HS_NEGOTIATE)),
+ SIR_NEGO_FAILED,
+ /*
+ * Jump to dispatcher.
+ */
+ SCR_JUMP,
+ PADDR_A (dispatch),
+}/*-------------------------< INIT >-----------------------------*/,{
+ /*
+ * Wait for the SCSI RESET signal to be
+ * inactive before restarting operations,
+ * since the chip may hang on SEL_ATN
+ * if SCSI RESET is active.
+ */
+ SCR_FROM_REG (sstat0),
+ 0,
+ SCR_JUMPR ^ IFTRUE (MASK (IRST, IRST)),
+ -16,
+ SCR_JUMP,
+ PADDR_A (start),
+}/*-------------------------< CLRACK >---------------------------*/,{
+ /*
+ * Terminate possible pending message phase.
+ */
+ SCR_CLR (SCR_ACK),
+ 0,
+ SCR_JUMP,
+ PADDR_A (dispatch),
+}/*-------------------------< DATAI_DONE >-----------------------*/,{
+ /*
+ * Save current pointer to LASTP.
+ */
+ SCR_STORE_REL (temp, 4),
+ offsetof (struct sym_ccb, phys.head.lastp),
+ /*
+ * If the SWIDE is not full, jump to dispatcher.
+ * We anticipate a STATUS phase.
+ */
+ SCR_FROM_REG (scntl2),
+ 0,
+ SCR_JUMP ^ IFTRUE (MASK (WSR, WSR)),
+ PADDR_A (datai_done_wsr),
+ SCR_JUMP ^ IFTRUE (WHEN (SCR_STATUS)),
+ PADDR_A (status),
+ SCR_JUMP,
+ PADDR_A (dispatch),
+}/*-------------------------< DATAI_DONE_WSR >-------------------*/,{
+ /*
+ * The SWIDE is full.
+ * Clear this condition.
+ */
+ SCR_REG_REG (scntl2, SCR_OR, WSR),
+ 0,
+ /*
+ * We are expecting an IGNORE RESIDUE message
+ * from the device, otherwise we are in data
+ * overrun condition. Check against MSG_IN phase.
+ */
+ SCR_INT ^ IFFALSE (WHEN (SCR_MSG_IN)),
+ SIR_SWIDE_OVERRUN,
+ SCR_JUMP ^ IFFALSE (WHEN (SCR_MSG_IN)),
+ PADDR_A (dispatch),
+ /*
+ * We are in MSG_IN phase,
+ * Read the first byte of the message.
+ * If it is not an IGNORE RESIDUE message,
+ * signal overrun and jump to message
+ * processing.
+ */
+ SCR_MOVE_ABS (1) ^ SCR_MSG_IN,
+ HADDR_1 (msgin[0]),
+ SCR_INT ^ IFFALSE (DATA (M_IGN_RESIDUE)),
+ SIR_SWIDE_OVERRUN,
+ SCR_JUMP ^ IFFALSE (DATA (M_IGN_RESIDUE)),
+ PADDR_A (msg_in2),
+ /*
+ * We got the message we expected.
+ * Read the 2nd byte, and jump to dispatcher.
+ */
+ SCR_CLR (SCR_ACK),
+ 0,
+ SCR_MOVE_ABS (1) ^ SCR_MSG_IN,
+ HADDR_1 (msgin[1]),
+ SCR_CLR (SCR_ACK),
+ 0,
+ SCR_JUMP,
+ PADDR_A (dispatch),
+}/*-------------------------< DATAO_DONE >-----------------------*/,{
+ /*
+ * Save current pointer to LASTP.
+ */
+ SCR_STORE_REL (temp, 4),
+ offsetof (struct sym_ccb, phys.head.lastp),
+ /*
+ * If the SODL is not full jump to dispatcher.
+ * We anticipate a STATUS phase.
+ */
+ SCR_FROM_REG (scntl2),
+ 0,
+ SCR_JUMP ^ IFTRUE (MASK (WSS, WSS)),
+ PADDR_A (datao_done_wss),
+ SCR_JUMP ^ IFTRUE (WHEN (SCR_STATUS)),
+ PADDR_A (status),
+ SCR_JUMP,
+ PADDR_A (dispatch),
+}/*-------------------------< DATAO_DONE_WSS >-------------------*/,{
+ /*
+ * The SODL is full, clear this condition.
+ */
+ SCR_REG_REG (scntl2, SCR_OR, WSS),
+ 0,
+ /*
+ * And signal a DATA UNDERRUN condition
+ * to the C code.
+ */
+ SCR_INT,
+ SIR_SODL_UNDERRUN,
+ SCR_JUMP,
+ PADDR_A (dispatch),
+}/*-------------------------< DATAI_PHASE >----------------------*/,{
+ /*
+ * Jump to current pointer.
+ */
+ SCR_LOAD_REL (temp, 4),
+ offsetof (struct sym_ccb, phys.head.lastp),
+ SCR_RETURN,
+ 0,
+}/*-------------------------< DATAO_PHASE >----------------------*/,{
+ /*
+ * C1010-66 errata work-around.
+ * Extra clocks of data hold must be inserted
+ * in DATA OUT phase on 33 MHz PCI BUS.
+ * Patched with a NOOP for other chips.
+ */
+ SCR_REG_REG (scntl4, SCR_OR, (XCLKH_DT|XCLKH_ST)),
+ 0,
+ /*
+ * Jump to current pointer.
+ */
+ SCR_LOAD_REL (temp, 4),
+ offsetof (struct sym_ccb, phys.head.lastp),
+ SCR_RETURN,
+ 0,
+}/*-------------------------< MSG_IN >---------------------------*/,{
+ /*
+ * Get the first byte of the message.
+ *
+ * The script processor doesn't negate the
+ * ACK signal after this transfer.
+ */
+ SCR_MOVE_ABS (1) ^ SCR_MSG_IN,
+ HADDR_1 (msgin[0]),
+}/*-------------------------< MSG_IN2 >--------------------------*/,{
+ /*
+ * Check first against 1 byte messages
+ * that we handle from SCRIPTS.
+ */
+ SCR_JUMP ^ IFTRUE (DATA (M_COMPLETE)),
+ PADDR_A (complete),
+ SCR_JUMP ^ IFTRUE (DATA (M_DISCONNECT)),
+ PADDR_A (disconnect),
+ SCR_JUMP ^ IFTRUE (DATA (M_SAVE_DP)),
+ PADDR_A (save_dp),
+ SCR_JUMP ^ IFTRUE (DATA (M_RESTORE_DP)),
+ PADDR_A (restore_dp),
+ /*
+ * We handle all other messages from the
+ * C code, so no need to waste on-chip RAM
+ * for those ones.
+ */
+ SCR_JUMP,
+ PADDR_B (msg_in_etc),
+}/*-------------------------< STATUS >---------------------------*/,{
+ /*
+ * get the status
+ */
+ SCR_MOVE_ABS (1) ^ SCR_STATUS,
+ HADDR_1 (scratch),
+#ifdef SYM_CONF_IARB_SUPPORT
+ /*
+ * If STATUS is not GOOD, clear IMMEDIATE ARBITRATION,
+ * since we may have to tamper the start queue from
+ * the C code.
+ */
+ SCR_JUMPR ^ IFTRUE (DATA (S_GOOD)),
+ 8,
+ SCR_REG_REG (scntl1, SCR_AND, ~IARB),
+ 0,
+#endif
+ /*
+ * save status to scsi_status.
+ * mark as complete.
+ */
+ SCR_TO_REG (SS_REG),
+ 0,
+ SCR_LOAD_REG (HS_REG, HS_COMPLETE),
+ 0,
+ /*
+ * Anticipate the MESSAGE PHASE for
+ * the TASK COMPLETE message.
+ */
+ SCR_JUMP ^ IFTRUE (WHEN (SCR_MSG_IN)),
+ PADDR_A (msg_in),
+ SCR_JUMP,
+ PADDR_A (dispatch),
+}/*-------------------------< COMPLETE >-------------------------*/,{
+ /*
+ * Complete message.
+ *
+ * When we terminate the cycle by clearing ACK,
+ * the target may disconnect immediately.
+ *
+ * We don't want to be told of an "unexpected disconnect",
+ * so we disable this feature.
+ */
+ SCR_REG_REG (scntl2, SCR_AND, 0x7f),
+ 0,
+ /*
+ * Terminate cycle ...
+ */
+ SCR_CLR (SCR_ACK|SCR_ATN),
+ 0,
+ /*
+ * ... and wait for the disconnect.
+ */
+ SCR_WAIT_DISC,
+ 0,
+}/*-------------------------< COMPLETE2 >------------------------*/,{
+ /*
+ * Save host status.
+ */
+ SCR_STORE_REL (scr0, 4),
+ offsetof (struct sym_ccb, phys.head.status),
+ /*
+ * Some bridges may reorder DMA writes to memory.
+ * We donnot want the CPU to deal with completions
+ * without all the posted write having been flushed
+ * to memory. This DUMMY READ should flush posted
+ * buffers prior to the CPU having to deal with
+ * completions.
+ */
+ SCR_LOAD_REL (scr0, 4), /* DUMMY READ */
+ offsetof (struct sym_ccb, phys.head.status),
+
+ /*
+ * If command resulted in not GOOD status,
+ * call the C code if needed.
+ */
+ SCR_FROM_REG (SS_REG),
+ 0,
+ SCR_CALL ^ IFFALSE (DATA (S_GOOD)),
+ PADDR_B (bad_status),
+ /*
+ * If we performed an auto-sense, call
+ * the C code to synchronyze task aborts
+ * with UNIT ATTENTION conditions.
+ */
+ SCR_FROM_REG (HF_REG),
+ 0,
+ SCR_JUMP ^ IFFALSE (MASK (0 ,(HF_SENSE|HF_EXT_ERR))),
+ PADDR_A (complete_error),
+}/*-------------------------< DONE >-----------------------------*/,{
+ /*
+ * Copy the DSA to the DONE QUEUE and
+ * signal completion to the host.
+ * If we are interrupted between DONE
+ * and DONE_END, we must reset, otherwise
+ * the completed CCB may be lost.
+ */
+ SCR_STORE_ABS (dsa, 4),
+ PADDR_B (scratch),
+ SCR_LOAD_ABS (dsa, 4),
+ PADDR_B (done_pos),
+ SCR_LOAD_ABS (scratcha, 4),
+ PADDR_B (scratch),
+ SCR_STORE_REL (scratcha, 4),
+ 0,
+ /*
+ * The instruction below reads the DONE QUEUE next
+ * free position from memory.
+ * In addition it ensures that all PCI posted writes
+ * are flushed and so the DSA value of the done
+ * CCB is visible by the CPU before INTFLY is raised.
+ */
+ SCR_LOAD_REL (scratcha, 4),
+ 4,
+ SCR_INT_FLY,
+ 0,
+ SCR_STORE_ABS (scratcha, 4),
+ PADDR_B (done_pos),
+}/*-------------------------< DONE_END >-------------------------*/,{
+ SCR_JUMP,
+ PADDR_A (start),
+}/*-------------------------< COMPLETE_ERROR >-------------------*/,{
+ SCR_LOAD_ABS (scratcha, 4),
+ PADDR_B (startpos),
+ SCR_INT,
+ SIR_COMPLETE_ERROR,
+}/*-------------------------< SAVE_DP >--------------------------*/,{
+ /*
+ * Clear ACK immediately.
+ * No need to delay it.
+ */
+ SCR_CLR (SCR_ACK),
+ 0,
+ /*
+ * Keep track we received a SAVE DP, so
+ * we will switch to the other PM context
+ * on the next PM since the DP may point
+ * to the current PM context.
+ */
+ SCR_REG_REG (HF_REG, SCR_OR, HF_DP_SAVED),
+ 0,
+ /*
+ * SAVE_DP message:
+ * Copy LASTP to SAVEP.
+ */
+ SCR_LOAD_REL (scratcha, 4),
+ offsetof (struct sym_ccb, phys.head.lastp),
+ SCR_STORE_REL (scratcha, 4),
+ offsetof (struct sym_ccb, phys.head.savep),
+ /*
+ * Anticipate the MESSAGE PHASE for
+ * the DISCONNECT message.
+ */
+ SCR_JUMP ^ IFTRUE (WHEN (SCR_MSG_IN)),
+ PADDR_A (msg_in),
+ SCR_JUMP,
+ PADDR_A (dispatch),
+}/*-------------------------< RESTORE_DP >-----------------------*/,{
+ /*
+ * Clear ACK immediately.
+ * No need to delay it.
+ */
+ SCR_CLR (SCR_ACK),
+ 0,
+ /*
+ * Copy SAVEP to LASTP.
+ */
+ SCR_LOAD_REL (scratcha, 4),
+ offsetof (struct sym_ccb, phys.head.savep),
+ SCR_STORE_REL (scratcha, 4),
+ offsetof (struct sym_ccb, phys.head.lastp),
+ SCR_JUMP,
+ PADDR_A (dispatch),
+}/*-------------------------< DISCONNECT >-----------------------*/,{
+ /*
+ * DISCONNECTing ...
+ *
+ * disable the "unexpected disconnect" feature,
+ * and remove the ACK signal.
+ */
+ SCR_REG_REG (scntl2, SCR_AND, 0x7f),
+ 0,
+ SCR_CLR (SCR_ACK|SCR_ATN),
+ 0,
+ /*
+ * Wait for the disconnect.
+ */
+ SCR_WAIT_DISC,
+ 0,
+ /*
+ * Status is: DISCONNECTED.
+ */
+ SCR_LOAD_REG (HS_REG, HS_DISCONNECT),
+ 0,
+ /*
+ * Save host status.
+ */
+ SCR_STORE_REL (scr0, 4),
+ offsetof (struct sym_ccb, phys.head.status),
+ SCR_JUMP,
+ PADDR_A (start),
+}/*-------------------------< IDLE >-----------------------------*/,{
+ /*
+ * Nothing to do?
+ * Switch the LED off and wait for reselect.
+ * Will be patched with a NO_OP if LED
+ * not needed or not desired.
+ */
+ SCR_REG_REG (gpreg, SCR_OR, 0x01),
+ 0,
+#ifdef SYM_CONF_IARB_SUPPORT
+ SCR_JUMPR,
+ 8,
+#endif
+}/*-------------------------< UNGETJOB >-------------------------*/,{
+#ifdef SYM_CONF_IARB_SUPPORT
+ /*
+ * Set IMMEDIATE ARBITRATION, for the next time.
+ * This will give us better chance to win arbitration
+ * for the job we just wanted to do.
+ */
+ SCR_REG_REG (scntl1, SCR_OR, IARB),
+ 0,
+#endif
+ /*
+ * We are not able to restart the SCRIPTS if we are
+ * interrupted and these instruction haven't been
+ * all executed. BTW, this is very unlikely to
+ * happen, but we check that from the C code.
+ */
+ SCR_LOAD_REG (dsa, 0xff),
+ 0,
+ SCR_STORE_ABS (scratcha, 4),
+ PADDR_B (startpos),
+}/*-------------------------< RESELECT >-------------------------*/,{
+#ifdef SYM_CONF_TARGET_ROLE_SUPPORT
+ /*
+ * Make sure we are in initiator mode.
+ */
+ SCR_CLR (SCR_TRG),
+ 0,
+#endif
+ /*
+ * Sleep waiting for a reselection.
+ */
+ SCR_WAIT_RESEL,
+ PADDR_A(start),
+}/*-------------------------< RESELECTED >-----------------------*/,{
+ /*
+ * Switch the LED on.
+ * Will be patched with a NO_OP if LED
+ * not needed or not desired.
+ */
+ SCR_REG_REG (gpreg, SCR_AND, 0xfe),
+ 0,
+ /*
+ * load the target id into the sdid
+ */
+ SCR_REG_SFBR (ssid, SCR_AND, 0x8F),
+ 0,
+ SCR_TO_REG (sdid),
+ 0,
+ /*
+ * Load the target control block address
+ */
+ SCR_LOAD_ABS (dsa, 4),
+ PADDR_B (targtbl),
+ SCR_SFBR_REG (dsa, SCR_SHL, 0),
+ 0,
+ SCR_REG_REG (dsa, SCR_SHL, 0),
+ 0,
+ SCR_REG_REG (dsa, SCR_AND, 0x3c),
+ 0,
+ SCR_LOAD_REL (dsa, 4),
+ 0,
+ /*
+ * We expect MESSAGE IN phase.
+ * If not, get help from the C code.
+ */
+ SCR_INT ^ IFFALSE (WHEN (SCR_MSG_IN)),
+ SIR_RESEL_NO_MSG_IN,
+ /*
+ * Load the legacy synchronous transfer registers.
+ */
+ SCR_LOAD_REL (scntl3, 1),
+ offsetof(struct sym_tcb, head.wval),
+ SCR_LOAD_REL (sxfer, 1),
+ offsetof(struct sym_tcb, head.sval),
+}/*-------------------------< RESEL_SCNTL4 >---------------------*/,{
+ /*
+ * The C1010 uses a new synchronous timing scheme.
+ * Will be patched with a NO_OP if not a C1010.
+ */
+ SCR_LOAD_REL (scntl4, 1),
+ offsetof(struct sym_tcb, head.uval),
+ /*
+ * Get the IDENTIFY message.
+ */
+ SCR_MOVE_ABS (1) ^ SCR_MSG_IN,
+ HADDR_1 (msgin),
+ /*
+ * If IDENTIFY LUN #0, use a faster path
+ * to find the LCB structure.
+ */
+ SCR_JUMP ^ IFTRUE (MASK (0x80, 0xbf)),
+ PADDR_A (resel_lun0),
+ /*
+ * If message isn't an IDENTIFY,
+ * tell the C code about.
+ */
+ SCR_INT ^ IFFALSE (MASK (0x80, 0x80)),
+ SIR_RESEL_NO_IDENTIFY,
+ /*
+ * It is an IDENTIFY message,
+ * Load the LUN control block address.
+ */
+ SCR_LOAD_REL (dsa, 4),
+ offsetof(struct sym_tcb, head.luntbl_sa),
+ SCR_SFBR_REG (dsa, SCR_SHL, 0),
+ 0,
+ SCR_REG_REG (dsa, SCR_SHL, 0),
+ 0,
+ SCR_REG_REG (dsa, SCR_AND, 0xfc),
+ 0,
+ SCR_LOAD_REL (dsa, 4),
+ 0,
+ SCR_JUMPR,
+ 8,
+}/*-------------------------< RESEL_LUN0 >-----------------------*/,{
+ /*
+ * LUN 0 special case (but usual one :))
+ */
+ SCR_LOAD_REL (dsa, 4),
+ offsetof(struct sym_tcb, head.lun0_sa),
+ /*
+ * Jump indirectly to the reselect action for this LUN.
+ */
+ SCR_LOAD_REL (temp, 4),
+ offsetof(struct sym_lcb, head.resel_sa),
+ SCR_RETURN,
+ 0,
+ /* In normal situations, we jump to RESEL_TAG or RESEL_NO_TAG */
+}/*-------------------------< RESEL_TAG >------------------------*/,{
+ /*
+ * ACK the IDENTIFY previously received.
+ */
+ SCR_CLR (SCR_ACK),
+ 0,
+ /*
+ * It shall be a tagged command.
+ * Read SIMPLE+TAG.
+ * The C code will deal with errors.
+ * Aggressive optimization, isn't it? :)
+ */
+ SCR_MOVE_ABS (2) ^ SCR_MSG_IN,
+ HADDR_1 (msgin),
+ /*
+ * Load the pointer to the tagged task
+ * table for this LUN.
+ */
+ SCR_LOAD_REL (dsa, 4),
+ offsetof(struct sym_lcb, head.itlq_tbl_sa),
+ /*
+ * The SIDL still contains the TAG value.
+ * Aggressive optimization, isn't it? :):)
+ */
+ SCR_REG_SFBR (sidl, SCR_SHL, 0),
+ 0,
+#if SYM_CONF_MAX_TASK*4 > 512
+ SCR_JUMPR ^ IFFALSE (CARRYSET),
+ 8,
+ SCR_REG_REG (dsa1, SCR_OR, 2),
+ 0,
+ SCR_REG_REG (sfbr, SCR_SHL, 0),
+ 0,
+ SCR_JUMPR ^ IFFALSE (CARRYSET),
+ 8,
+ SCR_REG_REG (dsa1, SCR_OR, 1),
+ 0,
+#elif SYM_CONF_MAX_TASK*4 > 256
+ SCR_JUMPR ^ IFFALSE (CARRYSET),
+ 8,
+ SCR_REG_REG (dsa1, SCR_OR, 1),
+ 0,
+#endif
+ /*
+ * Retrieve the DSA of this task.
+ * JUMP indirectly to the restart point of the CCB.
+ */
+ SCR_SFBR_REG (dsa, SCR_AND, 0xfc),
+ 0,
+ SCR_LOAD_REL (dsa, 4),
+ 0,
+ SCR_LOAD_REL (temp, 4),
+ offsetof(struct sym_ccb, phys.head.go.restart),
+ SCR_RETURN,
+ 0,
+ /* In normal situations we branch to RESEL_DSA */
+}/*-------------------------< RESEL_DSA >------------------------*/,{
+ /*
+ * ACK the IDENTIFY or TAG previously received.
+ */
+ SCR_CLR (SCR_ACK),
+ 0,
+}/*-------------------------< RESEL_DSA1 >-----------------------*/,{
+ /*
+ * Initialize the status registers
+ */
+ SCR_LOAD_REL (scr0, 4),
+ offsetof (struct sym_ccb, phys.head.status),
+ /*
+ * Jump to dispatcher.
+ */
+ SCR_JUMP,
+ PADDR_A (dispatch),
+}/*-------------------------< RESEL_NO_TAG >---------------------*/,{
+ /*
+ * Load the DSA with the unique ITL task.
+ */
+ SCR_LOAD_REL (dsa, 4),
+ offsetof(struct sym_lcb, head.itl_task_sa),
+ /*
+ * JUMP indirectly to the restart point of the CCB.
+ */
+ SCR_LOAD_REL (temp, 4),
+ offsetof(struct sym_ccb, phys.head.go.restart),
+ SCR_RETURN,
+ 0,
+ /* In normal situations we branch to RESEL_DSA */
+}/*-------------------------< DATA_IN >--------------------------*/,{
+/*
+ * Because the size depends on the
+ * #define SYM_CONF_MAX_SG parameter,
+ * it is filled in at runtime.
+ *
+ * ##===========< i=0; i<SYM_CONF_MAX_SG >=========
+ * || SCR_CHMOV_TBL ^ SCR_DATA_IN,
+ * || offsetof (struct sym_dsb, data[ i]),
+ * ##==========================================
+ */
+0
+}/*-------------------------< DATA_IN2 >-------------------------*/,{
+ SCR_CALL,
+ PADDR_A (datai_done),
+ SCR_JUMP,
+ PADDR_B (data_ovrun),
+}/*-------------------------< DATA_OUT >-------------------------*/,{
+/*
+ * Because the size depends on the
+ * #define SYM_CONF_MAX_SG parameter,
+ * it is filled in at runtime.
+ *
+ * ##===========< i=0; i<SYM_CONF_MAX_SG >=========
+ * || SCR_CHMOV_TBL ^ SCR_DATA_OUT,
+ * || offsetof (struct sym_dsb, data[ i]),
+ * ##==========================================
+ */
+0
+}/*-------------------------< DATA_OUT2 >------------------------*/,{
+ SCR_CALL,
+ PADDR_A (datao_done),
+ SCR_JUMP,
+ PADDR_B (data_ovrun),
+}/*-------------------------< PM0_DATA >-------------------------*/,{
+ /*
+ * Read our host flags to SFBR, so we will be able
+ * to check against the data direction we expect.
+ */
+ SCR_FROM_REG (HF_REG),
+ 0,
+ /*
+ * Check against actual DATA PHASE.
+ */
+ SCR_JUMP ^ IFFALSE (WHEN (SCR_DATA_IN)),
+ PADDR_A (pm0_data_out),
+ /*
+ * Actual phase is DATA IN.
+ * Check against expected direction.
+ */
+ SCR_JUMP ^ IFFALSE (MASK (HF_DATA_IN, HF_DATA_IN)),
+ PADDR_B (data_ovrun),
+ /*
+ * Keep track we are moving data from the
+ * PM0 DATA mini-script.
+ */
+ SCR_REG_REG (HF_REG, SCR_OR, HF_IN_PM0),
+ 0,
+ /*
+ * Move the data to memory.
+ */
+ SCR_CHMOV_TBL ^ SCR_DATA_IN,
+ offsetof (struct sym_ccb, phys.pm0.sg),
+ SCR_JUMP,
+ PADDR_A (pm0_data_end),
+}/*-------------------------< PM0_DATA_OUT >---------------------*/,{
+ /*
+ * Actual phase is DATA OUT.
+ * Check against expected direction.
+ */
+ SCR_JUMP ^ IFTRUE (MASK (HF_DATA_IN, HF_DATA_IN)),
+ PADDR_B (data_ovrun),
+ /*
+ * Keep track we are moving data from the
+ * PM0 DATA mini-script.
+ */
+ SCR_REG_REG (HF_REG, SCR_OR, HF_IN_PM0),
+ 0,
+ /*
+ * Move the data from memory.
+ */
+ SCR_CHMOV_TBL ^ SCR_DATA_OUT,
+ offsetof (struct sym_ccb, phys.pm0.sg),
+}/*-------------------------< PM0_DATA_END >---------------------*/,{
+ /*
+ * Clear the flag that told we were moving
+ * data from the PM0 DATA mini-script.
+ */
+ SCR_REG_REG (HF_REG, SCR_AND, (~HF_IN_PM0)),
+ 0,
+ /*
+ * Return to the previous DATA script which
+ * is guaranteed by design (if no bug) to be
+ * the main DATA script for this transfer.
+ */
+ SCR_LOAD_REL (temp, 4),
+ offsetof (struct sym_ccb, phys.pm0.ret),
+ SCR_RETURN,
+ 0,
+}/*-------------------------< PM1_DATA >-------------------------*/,{
+ /*
+ * Read our host flags to SFBR, so we will be able
+ * to check against the data direction we expect.
+ */
+ SCR_FROM_REG (HF_REG),
+ 0,
+ /*
+ * Check against actual DATA PHASE.
+ */
+ SCR_JUMP ^ IFFALSE (WHEN (SCR_DATA_IN)),
+ PADDR_A (pm1_data_out),
+ /*
+ * Actual phase is DATA IN.
+ * Check against expected direction.
+ */
+ SCR_JUMP ^ IFFALSE (MASK (HF_DATA_IN, HF_DATA_IN)),
+ PADDR_B (data_ovrun),
+ /*
+ * Keep track we are moving data from the
+ * PM1 DATA mini-script.
+ */
+ SCR_REG_REG (HF_REG, SCR_OR, HF_IN_PM1),
+ 0,
+ /*
+ * Move the data to memory.
+ */
+ SCR_CHMOV_TBL ^ SCR_DATA_IN,
+ offsetof (struct sym_ccb, phys.pm1.sg),
+ SCR_JUMP,
+ PADDR_A (pm1_data_end),
+}/*-------------------------< PM1_DATA_OUT >---------------------*/,{
+ /*
+ * Actual phase is DATA OUT.
+ * Check against expected direction.
+ */
+ SCR_JUMP ^ IFTRUE (MASK (HF_DATA_IN, HF_DATA_IN)),
+ PADDR_B (data_ovrun),
+ /*
+ * Keep track we are moving data from the
+ * PM1 DATA mini-script.
+ */
+ SCR_REG_REG (HF_REG, SCR_OR, HF_IN_PM1),
+ 0,
+ /*
+ * Move the data from memory.
+ */
+ SCR_CHMOV_TBL ^ SCR_DATA_OUT,
+ offsetof (struct sym_ccb, phys.pm1.sg),
+}/*-------------------------< PM1_DATA_END >---------------------*/,{
+ /*
+ * Clear the flag that told we were moving
+ * data from the PM1 DATA mini-script.
+ */
+ SCR_REG_REG (HF_REG, SCR_AND, (~HF_IN_PM1)),
+ 0,
+ /*
+ * Return to the previous DATA script which
+ * is guaranteed by design (if no bug) to be
+ * the main DATA script for this transfer.
+ */
+ SCR_LOAD_REL (temp, 4),
+ offsetof (struct sym_ccb, phys.pm1.ret),
+ SCR_RETURN,
+ 0,
+}/*-------------------------<>-----------------------------------*/
+};
+
+static struct SYM_FWB_SCR SYM_FWB_SCR = {
+/*--------------------------< START64 >--------------------------*/ {
+ /*
+ * SCRIPT entry point for the 895A, 896 and 1010.
+ * For now, there is no specific stuff for those
+ * chips at this point, but this may come.
+ */
+ SCR_JUMP,
+ PADDR_A (init),
+}/*-------------------------< NO_DATA >--------------------------*/,{
+ SCR_JUMP,
+ PADDR_B (data_ovrun),
+}/*-------------------------< SEL_FOR_ABORT >--------------------*/,{
+ /*
+ * We are jumped here by the C code, if we have
+ * some target to reset or some disconnected
+ * job to abort. Since error recovery is a serious
+ * busyness, we will really reset the SCSI BUS, if
+ * case of a SCSI interrupt occurring in this path.
+ */
+#ifdef SYM_CONF_TARGET_ROLE_SUPPORT
+ /*
+ * Set initiator mode.
+ */
+ SCR_CLR (SCR_TRG),
+ 0,
+#endif
+ /*
+ * And try to select this target.
+ */
+ SCR_SEL_TBL_ATN ^ offsetof (struct sym_hcb, abrt_sel),
+ PADDR_A (reselect),
+ /*
+ * Wait for the selection to complete or
+ * the selection to time out.
+ */
+ SCR_JUMPR ^ IFFALSE (WHEN (SCR_MSG_OUT)),
+ -8,
+ /*
+ * Call the C code.
+ */
+ SCR_INT,
+ SIR_TARGET_SELECTED,
+ /*
+ * The C code should let us continue here.
+ * Send the 'kiss of death' message.
+ * We expect an immediate disconnect once
+ * the target has eaten the message.
+ */
+ SCR_REG_REG (scntl2, SCR_AND, 0x7f),
+ 0,
+ SCR_MOVE_TBL ^ SCR_MSG_OUT,
+ offsetof (struct sym_hcb, abrt_tbl),
+ SCR_CLR (SCR_ACK|SCR_ATN),
+ 0,
+ SCR_WAIT_DISC,
+ 0,
+ /*
+ * Tell the C code that we are done.
+ */
+ SCR_INT,
+ SIR_ABORT_SENT,
+}/*-------------------------< SEL_FOR_ABORT_1 >------------------*/,{
+ /*
+ * Jump at scheduler.
+ */
+ SCR_JUMP,
+ PADDR_A (start),
+}/*-------------------------< MSG_IN_ETC >-----------------------*/,{
+ /*
+ * If it is an EXTENDED (variable size message)
+ * Handle it.
+ */
+ SCR_JUMP ^ IFTRUE (DATA (M_EXTENDED)),
+ PADDR_B (msg_extended),
+ /*
+ * Let the C code handle any other
+ * 1 byte message.
+ */
+ SCR_JUMP ^ IFTRUE (MASK (0x00, 0xf0)),
+ PADDR_B (msg_received),
+ SCR_JUMP ^ IFTRUE (MASK (0x10, 0xf0)),
+ PADDR_B (msg_received),
+ /*
+ * We donnot handle 2 bytes messages from SCRIPTS.
+ * So, let the C code deal with these ones too.
+ */
+ SCR_JUMP ^ IFFALSE (MASK (0x20, 0xf0)),
+ PADDR_B (msg_weird_seen),
+ SCR_CLR (SCR_ACK),
+ 0,
+ SCR_MOVE_ABS (1) ^ SCR_MSG_IN,
+ HADDR_1 (msgin[1]),
+}/*-------------------------< MSG_RECEIVED >---------------------*/,{
+ SCR_LOAD_REL (scratcha, 4), /* DUMMY READ */
+ 0,
+ SCR_INT,
+ SIR_MSG_RECEIVED,
+}/*-------------------------< MSG_WEIRD_SEEN >-------------------*/,{
+ SCR_LOAD_REL (scratcha, 4), /* DUMMY READ */
+ 0,
+ SCR_INT,
+ SIR_MSG_WEIRD,
+}/*-------------------------< MSG_EXTENDED >---------------------*/,{
+ /*
+ * Clear ACK and get the next byte
+ * assumed to be the message length.
+ */
+ SCR_CLR (SCR_ACK),
+ 0,
+ SCR_MOVE_ABS (1) ^ SCR_MSG_IN,
+ HADDR_1 (msgin[1]),
+ /*
+ * Try to catch some unlikely situations as 0 length
+ * or too large the length.
+ */
+ SCR_JUMP ^ IFTRUE (DATA (0)),
+ PADDR_B (msg_weird_seen),
+ SCR_TO_REG (scratcha),
+ 0,
+ SCR_REG_REG (sfbr, SCR_ADD, (256-8)),
+ 0,
+ SCR_JUMP ^ IFTRUE (CARRYSET),
+ PADDR_B (msg_weird_seen),
+ /*
+ * We donnot handle extended messages from SCRIPTS.
+ * Read the amount of data corresponding to the
+ * message length and call the C code.
+ */
+ SCR_STORE_REL (scratcha, 1),
+ offsetof (struct sym_dsb, smsg_ext.size),
+ SCR_CLR (SCR_ACK),
+ 0,
+ SCR_MOVE_TBL ^ SCR_MSG_IN,
+ offsetof (struct sym_dsb, smsg_ext),
+ SCR_JUMP,
+ PADDR_B (msg_received),
+}/*-------------------------< MSG_BAD >--------------------------*/,{
+ /*
+ * unimplemented message - reject it.
+ */
+ SCR_INT,
+ SIR_REJECT_TO_SEND,
+ SCR_SET (SCR_ATN),
+ 0,
+ SCR_JUMP,
+ PADDR_A (clrack),
+}/*-------------------------< MSG_WEIRD >------------------------*/,{
+ /*
+ * weird message received
+ * ignore all MSG IN phases and reject it.
+ */
+ SCR_INT,
+ SIR_REJECT_TO_SEND,
+ SCR_SET (SCR_ATN),
+ 0,
+}/*-------------------------< MSG_WEIRD1 >-----------------------*/,{
+ SCR_CLR (SCR_ACK),
+ 0,
+ SCR_JUMP ^ IFFALSE (WHEN (SCR_MSG_IN)),
+ PADDR_A (dispatch),
+ SCR_MOVE_ABS (1) ^ SCR_MSG_IN,
+ HADDR_1 (scratch),
+ SCR_JUMP,
+ PADDR_B (msg_weird1),
+}/*-------------------------< WDTR_RESP >------------------------*/,{
+ /*
+ * let the target fetch our answer.
+ */
+ SCR_SET (SCR_ATN),
+ 0,
+ SCR_CLR (SCR_ACK),
+ 0,
+ SCR_JUMP ^ IFFALSE (WHEN (SCR_MSG_OUT)),
+ PADDR_B (nego_bad_phase),
+}/*-------------------------< SEND_WDTR >------------------------*/,{
+ /*
+ * Send the M_X_WIDE_REQ
+ */
+ SCR_MOVE_ABS (4) ^ SCR_MSG_OUT,
+ HADDR_1 (msgout),
+ SCR_JUMP,
+ PADDR_B (msg_out_done),
+}/*-------------------------< SDTR_RESP >------------------------*/,{
+ /*
+ * let the target fetch our answer.
+ */
+ SCR_SET (SCR_ATN),
+ 0,
+ SCR_CLR (SCR_ACK),
+ 0,
+ SCR_JUMP ^ IFFALSE (WHEN (SCR_MSG_OUT)),
+ PADDR_B (nego_bad_phase),
+}/*-------------------------< SEND_SDTR >------------------------*/,{
+ /*
+ * Send the M_X_SYNC_REQ
+ */
+ SCR_MOVE_ABS (5) ^ SCR_MSG_OUT,
+ HADDR_1 (msgout),
+ SCR_JUMP,
+ PADDR_B (msg_out_done),
+}/*-------------------------< PPR_RESP >-------------------------*/,{
+ /*
+ * let the target fetch our answer.
+ */
+ SCR_SET (SCR_ATN),
+ 0,
+ SCR_CLR (SCR_ACK),
+ 0,
+ SCR_JUMP ^ IFFALSE (WHEN (SCR_MSG_OUT)),
+ PADDR_B (nego_bad_phase),
+}/*-------------------------< SEND_PPR >-------------------------*/,{
+ /*
+ * Send the M_X_PPR_REQ
+ */
+ SCR_MOVE_ABS (8) ^ SCR_MSG_OUT,
+ HADDR_1 (msgout),
+ SCR_JUMP,
+ PADDR_B (msg_out_done),
+}/*-------------------------< NEGO_BAD_PHASE >-------------------*/,{
+ SCR_INT,
+ SIR_NEGO_PROTO,
+ SCR_JUMP,
+ PADDR_A (dispatch),
+}/*-------------------------< MSG_OUT >--------------------------*/,{
+ /*
+ * The target requests a message.
+ * We donnot send messages that may
+ * require the device to go to bus free.
+ */
+ SCR_MOVE_ABS (1) ^ SCR_MSG_OUT,
+ HADDR_1 (msgout),
+ /*
+ * ... wait for the next phase
+ * if it's a message out, send it again, ...
+ */
+ SCR_JUMP ^ IFTRUE (WHEN (SCR_MSG_OUT)),
+ PADDR_B (msg_out),
+}/*-------------------------< MSG_OUT_DONE >---------------------*/,{
+ /*
+ * Let the C code be aware of the
+ * sent message and clear the message.
+ */
+ SCR_INT,
+ SIR_MSG_OUT_DONE,
+ /*
+ * ... and process the next phase
+ */
+ SCR_JUMP,
+ PADDR_A (dispatch),
+}/*-------------------------< DATA_OVRUN >-----------------------*/,{
+ /*
+ * Use scratcha to count the extra bytes.
+ */
+ SCR_LOAD_ABS (scratcha, 4),
+ PADDR_B (zero),
+}/*-------------------------< DATA_OVRUN1 >----------------------*/,{
+ /*
+ * The target may want to transfer too much data.
+ *
+ * If phase is DATA OUT write 1 byte and count it.
+ */
+ SCR_JUMPR ^ IFFALSE (WHEN (SCR_DATA_OUT)),
+ 16,
+ SCR_CHMOV_ABS (1) ^ SCR_DATA_OUT,
+ HADDR_1 (scratch),
+ SCR_JUMP,
+ PADDR_B (data_ovrun2),
+ /*
+ * If WSR is set, clear this condition, and
+ * count this byte.
+ */
+ SCR_FROM_REG (scntl2),
+ 0,
+ SCR_JUMPR ^ IFFALSE (MASK (WSR, WSR)),
+ 16,
+ SCR_REG_REG (scntl2, SCR_OR, WSR),
+ 0,
+ SCR_JUMP,
+ PADDR_B (data_ovrun2),
+ /*
+ * Finally check against DATA IN phase.
+ * Signal data overrun to the C code
+ * and jump to dispatcher if not so.
+ * Read 1 byte otherwise and count it.
+ */
+ SCR_JUMPR ^ IFTRUE (WHEN (SCR_DATA_IN)),
+ 16,
+ SCR_INT,
+ SIR_DATA_OVERRUN,
+ SCR_JUMP,
+ PADDR_A (dispatch),
+ SCR_CHMOV_ABS (1) ^ SCR_DATA_IN,
+ HADDR_1 (scratch),
+}/*-------------------------< DATA_OVRUN2 >----------------------*/,{
+ /*
+ * Count this byte.
+ * This will allow to return a negative
+ * residual to user.
+ */
+ SCR_REG_REG (scratcha, SCR_ADD, 0x01),
+ 0,
+ SCR_REG_REG (scratcha1, SCR_ADDC, 0),
+ 0,
+ SCR_REG_REG (scratcha2, SCR_ADDC, 0),
+ 0,
+ /*
+ * .. and repeat as required.
+ */
+ SCR_JUMP,
+ PADDR_B (data_ovrun1),
+}/*-------------------------< ABORT_RESEL >----------------------*/,{
+ SCR_SET (SCR_ATN),
+ 0,
+ SCR_CLR (SCR_ACK),
+ 0,
+ /*
+ * send the abort/abortag/reset message
+ * we expect an immediate disconnect
+ */
+ SCR_REG_REG (scntl2, SCR_AND, 0x7f),
+ 0,
+ SCR_MOVE_ABS (1) ^ SCR_MSG_OUT,
+ HADDR_1 (msgout),
+ SCR_CLR (SCR_ACK|SCR_ATN),
+ 0,
+ SCR_WAIT_DISC,
+ 0,
+ SCR_INT,
+ SIR_RESEL_ABORTED,
+ SCR_JUMP,
+ PADDR_A (start),
+}/*-------------------------< RESEND_IDENT >---------------------*/,{
+ /*
+ * The target stays in MSG OUT phase after having acked
+ * Identify [+ Tag [+ Extended message ]]. Targets shall
+ * behave this way on parity error.
+ * We must send it again all the messages.
+ */
+ SCR_SET (SCR_ATN), /* Shall be asserted 2 deskew delays before the */
+ 0, /* 1rst ACK = 90 ns. Hope the chip isn't too fast */
+ SCR_JUMP,
+ PADDR_A (send_ident),
+}/*-------------------------< IDENT_BREAK >----------------------*/,{
+ SCR_CLR (SCR_ATN),
+ 0,
+ SCR_JUMP,
+ PADDR_A (select2),
+}/*-------------------------< IDENT_BREAK_ATN >------------------*/,{
+ SCR_SET (SCR_ATN),
+ 0,
+ SCR_JUMP,
+ PADDR_A (select2),
+}/*-------------------------< SDATA_IN >-------------------------*/,{
+ SCR_CHMOV_TBL ^ SCR_DATA_IN,
+ offsetof (struct sym_dsb, sense),
+ SCR_CALL,
+ PADDR_A (datai_done),
+ SCR_JUMP,
+ PADDR_B (data_ovrun),
+}/*-------------------------< RESEL_BAD_LUN >--------------------*/,{
+ /*
+ * Message is an IDENTIFY, but lun is unknown.
+ * Signal problem to C code for logging the event.
+ * Send a M_ABORT to clear all pending tasks.
+ */
+ SCR_INT,
+ SIR_RESEL_BAD_LUN,
+ SCR_JUMP,
+ PADDR_B (abort_resel),
+}/*-------------------------< BAD_I_T_L >------------------------*/,{
+ /*
+ * We donnot have a task for that I_T_L.
+ * Signal problem to C code for logging the event.
+ * Send a M_ABORT message.
+ */
+ SCR_INT,
+ SIR_RESEL_BAD_I_T_L,
+ SCR_JUMP,
+ PADDR_B (abort_resel),
+}/*-------------------------< BAD_I_T_L_Q >----------------------*/,{
+ /*
+ * We donnot have a task that matches the tag.
+ * Signal problem to C code for logging the event.
+ * Send a M_ABORTTAG message.
+ */
+ SCR_INT,
+ SIR_RESEL_BAD_I_T_L_Q,
+ SCR_JUMP,
+ PADDR_B (abort_resel),
+}/*-------------------------< BAD_STATUS >-----------------------*/,{
+ /*
+ * Anything different from INTERMEDIATE
+ * CONDITION MET should be a bad SCSI status,
+ * given that GOOD status has already been tested.
+ * Call the C code.
+ */
+ SCR_LOAD_ABS (scratcha, 4),
+ PADDR_B (startpos),
+ SCR_INT ^ IFFALSE (DATA (S_COND_MET)),
+ SIR_BAD_SCSI_STATUS,
+ SCR_RETURN,
+ 0,
+}/*-------------------------< PM_HANDLE >------------------------*/,{
+ /*
+ * Phase mismatch handling.
+ *
+ * Since we have to deal with 2 SCSI data pointers
+ * (current and saved), we need at least 2 contexts.
+ * Each context (pm0 and pm1) has a saved area, a
+ * SAVE mini-script and a DATA phase mini-script.
+ */
+ /*
+ * Get the PM handling flags.
+ */
+ SCR_FROM_REG (HF_REG),
+ 0,
+ /*
+ * If no flags (1rst PM for example), avoid
+ * all the below heavy flags testing.
+ * This makes the normal case a bit faster.
+ */
+ SCR_JUMP ^ IFTRUE (MASK (0, (HF_IN_PM0 | HF_IN_PM1 | HF_DP_SAVED))),
+ PADDR_B (pm_handle1),
+ /*
+ * If we received a SAVE DP, switch to the
+ * other PM context since the savep may point
+ * to the current PM context.
+ */
+ SCR_JUMPR ^ IFFALSE (MASK (HF_DP_SAVED, HF_DP_SAVED)),
+ 8,
+ SCR_REG_REG (sfbr, SCR_XOR, HF_ACT_PM),
+ 0,
+ /*
+ * If we have been interrupt in a PM DATA mini-script,
+ * we take the return address from the corresponding
+ * saved area.
+ * This ensure the return address always points to the
+ * main DATA script for this transfer.
+ */
+ SCR_JUMP ^ IFTRUE (MASK (0, (HF_IN_PM0 | HF_IN_PM1))),
+ PADDR_B (pm_handle1),
+ SCR_JUMPR ^ IFFALSE (MASK (HF_IN_PM0, HF_IN_PM0)),
+ 16,
+ SCR_LOAD_REL (ia, 4),
+ offsetof(struct sym_ccb, phys.pm0.ret),
+ SCR_JUMP,
+ PADDR_B (pm_save),
+ SCR_LOAD_REL (ia, 4),
+ offsetof(struct sym_ccb, phys.pm1.ret),
+ SCR_JUMP,
+ PADDR_B (pm_save),
+}/*-------------------------< PM_HANDLE1 >-----------------------*/,{
+ /*
+ * Normal case.
+ * Update the return address so that it
+ * will point after the interrupted MOVE.
+ */
+ SCR_REG_REG (ia, SCR_ADD, 8),
+ 0,
+ SCR_REG_REG (ia1, SCR_ADDC, 0),
+ 0,
+}/*-------------------------< PM_SAVE >--------------------------*/,{
+ /*
+ * Clear all the flags that told us if we were
+ * interrupted in a PM DATA mini-script and/or
+ * we received a SAVE DP.
+ */
+ SCR_SFBR_REG (HF_REG, SCR_AND, (~(HF_IN_PM0|HF_IN_PM1|HF_DP_SAVED))),
+ 0,
+ /*
+ * Choose the current PM context.
+ */
+ SCR_JUMP ^ IFTRUE (MASK (HF_ACT_PM, HF_ACT_PM)),
+ PADDR_B (pm1_save),
+}/*-------------------------< PM0_SAVE >-------------------------*/,{
+ SCR_STORE_REL (ia, 4),
+ offsetof(struct sym_ccb, phys.pm0.ret),
+ /*
+ * If WSR bit is set, either UA and RBC may
+ * have to be changed whether the device wants
+ * to ignore this residue or not.
+ */
+ SCR_FROM_REG (scntl2),
+ 0,
+ SCR_CALL ^ IFTRUE (MASK (WSR, WSR)),
+ PADDR_B (pm_wsr_handle),
+ /*
+ * Save the remaining byte count, the updated
+ * address and the return address.
+ */
+ SCR_STORE_REL (rbc, 4),
+ offsetof(struct sym_ccb, phys.pm0.sg.size),
+ SCR_STORE_REL (ua, 4),
+ offsetof(struct sym_ccb, phys.pm0.sg.addr),
+ /*
+ * Set the current pointer at the PM0 DATA mini-script.
+ */
+ SCR_LOAD_ABS (ia, 4),
+ PADDR_B (pm0_data_addr),
+}/*-------------------------< PM_SAVE_END >----------------------*/,{
+ SCR_STORE_REL (ia, 4),
+ offsetof(struct sym_ccb, phys.head.lastp),
+ SCR_JUMP,
+ PADDR_A (dispatch),
+}/*-------------------------< PM1_SAVE >-------------------------*/,{
+ SCR_STORE_REL (ia, 4),
+ offsetof(struct sym_ccb, phys.pm1.ret),
+ /*
+ * If WSR bit is set, either UA and RBC may
+ * have to be changed whether the device wants
+ * to ignore this residue or not.
+ */
+ SCR_FROM_REG (scntl2),
+ 0,
+ SCR_CALL ^ IFTRUE (MASK (WSR, WSR)),
+ PADDR_B (pm_wsr_handle),
+ /*
+ * Save the remaining byte count, the updated
+ * address and the return address.
+ */
+ SCR_STORE_REL (rbc, 4),
+ offsetof(struct sym_ccb, phys.pm1.sg.size),
+ SCR_STORE_REL (ua, 4),
+ offsetof(struct sym_ccb, phys.pm1.sg.addr),
+ /*
+ * Set the current pointer at the PM1 DATA mini-script.
+ */
+ SCR_LOAD_ABS (ia, 4),
+ PADDR_B (pm1_data_addr),
+ SCR_JUMP,
+ PADDR_B (pm_save_end),
+}/*-------------------------< PM_WSR_HANDLE >--------------------*/,{
+ /*
+ * Phase mismatch handling from SCRIPT with WSR set.
+ * Such a condition can occur if the chip wants to
+ * execute a CHMOV(size > 1) when the WSR bit is
+ * set and the target changes PHASE.
+ *
+ * We must move the residual byte to memory.
+ *
+ * UA contains bit 0..31 of the address to
+ * move the residual byte.
+ * Move it to the table indirect.
+ */
+ SCR_STORE_REL (ua, 4),
+ offsetof (struct sym_ccb, phys.wresid.addr),
+ /*
+ * Increment UA (move address to next position).
+ */
+ SCR_REG_REG (ua, SCR_ADD, 1),
+ 0,
+ SCR_REG_REG (ua1, SCR_ADDC, 0),
+ 0,
+ SCR_REG_REG (ua2, SCR_ADDC, 0),
+ 0,
+ SCR_REG_REG (ua3, SCR_ADDC, 0),
+ 0,
+ /*
+ * Compute SCRATCHA as:
+ * - size to transfer = 1 byte.
+ * - bit 24..31 = high address bit [32...39].
+ */
+ SCR_LOAD_ABS (scratcha, 4),
+ PADDR_B (zero),
+ SCR_REG_REG (scratcha, SCR_OR, 1),
+ 0,
+ SCR_FROM_REG (rbc3),
+ 0,
+ SCR_TO_REG (scratcha3),
+ 0,
+ /*
+ * Move this value to the table indirect.
+ */
+ SCR_STORE_REL (scratcha, 4),
+ offsetof (struct sym_ccb, phys.wresid.size),
+ /*
+ * Wait for a valid phase.
+ * While testing with bogus QUANTUM drives, the C1010
+ * sometimes raised a spurious phase mismatch with
+ * WSR and the CHMOV(1) triggered another PM.
+ * Waiting explicitly for the PHASE seemed to avoid
+ * the nested phase mismatch. Btw, this didn't happen
+ * using my IBM drives.
+ */
+ SCR_JUMPR ^ IFFALSE (WHEN (SCR_DATA_IN)),
+ 0,
+ /*
+ * Perform the move of the residual byte.
+ */
+ SCR_CHMOV_TBL ^ SCR_DATA_IN,
+ offsetof (struct sym_ccb, phys.wresid),
+ /*
+ * We can now handle the phase mismatch with UA fixed.
+ * RBC[0..23]=0 is a special case that does not require
+ * a PM context. The C code also checks against this.
+ */
+ SCR_FROM_REG (rbc),
+ 0,
+ SCR_RETURN ^ IFFALSE (DATA (0)),
+ 0,
+ SCR_FROM_REG (rbc1),
+ 0,
+ SCR_RETURN ^ IFFALSE (DATA (0)),
+ 0,
+ SCR_FROM_REG (rbc2),
+ 0,
+ SCR_RETURN ^ IFFALSE (DATA (0)),
+ 0,
+ /*
+ * RBC[0..23]=0.
+ * Not only we donnot need a PM context, but this would
+ * lead to a bogus CHMOV(0). This condition means that
+ * the residual was the last byte to move from this CHMOV.
+ * So, we just have to move the current data script pointer
+ * (i.e. TEMP) to the SCRIPTS address following the
+ * interrupted CHMOV and jump to dispatcher.
+ * IA contains the data pointer to save.
+ */
+ SCR_JUMP,
+ PADDR_B (pm_save_end),
+}/*-------------------------< WSR_MA_HELPER >--------------------*/,{
+ /*
+ * Helper for the C code when WSR bit is set.
+ * Perform the move of the residual byte.
+ */
+ SCR_CHMOV_TBL ^ SCR_DATA_IN,
+ offsetof (struct sym_ccb, phys.wresid),
+ SCR_JUMP,
+ PADDR_A (dispatch),
+
+}/*-------------------------< ZERO >-----------------------------*/,{
+ SCR_DATA_ZERO,
+}/*-------------------------< SCRATCH >--------------------------*/,{
+ SCR_DATA_ZERO,
+}/*-------------------------< PM0_DATA_ADDR >--------------------*/,{
+ SCR_DATA_ZERO,
+}/*-------------------------< PM1_DATA_ADDR >--------------------*/,{
+ SCR_DATA_ZERO,
+}/*-------------------------< DONE_POS >-------------------------*/,{
+ SCR_DATA_ZERO,
+}/*-------------------------< STARTPOS >-------------------------*/,{
+ SCR_DATA_ZERO,
+}/*-------------------------< TARGTBL >--------------------------*/,{
+ SCR_DATA_ZERO,
+}/*-------------------------<>-----------------------------------*/
+};
+
+static struct SYM_FWZ_SCR SYM_FWZ_SCR = {
+ /*-------------------------< SNOOPTEST >------------------------*/{
+ /*
+ * Read the variable from memory.
+ */
+ SCR_LOAD_REL (scratcha, 4),
+ offsetof(struct sym_hcb, scratch),
+ /*
+ * Write the variable to memory.
+ */
+ SCR_STORE_REL (temp, 4),
+ offsetof(struct sym_hcb, scratch),
+ /*
+ * Read back the variable from memory.
+ */
+ SCR_LOAD_REL (temp, 4),
+ offsetof(struct sym_hcb, scratch),
+}/*-------------------------< SNOOPEND >-------------------------*/,{
+ /*
+ * And stop.
+ */
+ SCR_INT,
+ 99,
+}/*-------------------------<>-----------------------------------*/
+};
diff --git a/drivers/scsi/sym53c8xx_2/sym_glue.c b/drivers/scsi/sym53c8xx_2/sym_glue.c
new file mode 100644
index 000000000..5d00e514f
--- /dev/null
+++ b/drivers/scsi/sym53c8xx_2/sym_glue.c
@@ -0,0 +1,2080 @@
+/*
+ * Device driver for the SYMBIOS/LSILOGIC 53C8XX and 53C1010 family
+ * of PCI-SCSI IO processors.
+ *
+ * Copyright (C) 1999-2001 Gerard Roudier <groudier@free.fr>
+ * Copyright (c) 2003-2005 Matthew Wilcox <matthew@wil.cx>
+ *
+ * This driver is derived from the Linux sym53c8xx driver.
+ * Copyright (C) 1998-2000 Gerard Roudier
+ *
+ * The sym53c8xx driver is derived from the ncr53c8xx driver that had been
+ * a port of the FreeBSD ncr driver to Linux-1.2.13.
+ *
+ * The original ncr driver has been written for 386bsd and FreeBSD by
+ * Wolfgang Stanglmeier <wolf@cologne.de>
+ * Stefan Esser <se@mi.Uni-Koeln.de>
+ * Copyright (C) 1994 Wolfgang Stanglmeier
+ *
+ * Other major contributions:
+ *
+ * NVRAM detection and reading.
+ * Copyright (C) 1997 Richard Waltham <dormouse@farsrobt.demon.co.uk>
+ *
+ *-----------------------------------------------------------------------------
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+#include <linux/ctype.h>
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/spinlock.h>
+#include <scsi/scsi.h>
+#include <scsi/scsi_tcq.h>
+#include <scsi/scsi_device.h>
+#include <scsi/scsi_transport.h>
+
+#include "sym_glue.h"
+#include "sym_nvram.h"
+
+#define NAME53C "sym53c"
+#define NAME53C8XX "sym53c8xx"
+
+struct sym_driver_setup sym_driver_setup = SYM_LINUX_DRIVER_SETUP;
+unsigned int sym_debug_flags = 0;
+
+static char *excl_string;
+static char *safe_string;
+module_param_named(cmd_per_lun, sym_driver_setup.max_tag, ushort, 0);
+module_param_named(burst, sym_driver_setup.burst_order, byte, 0);
+module_param_named(led, sym_driver_setup.scsi_led, byte, 0);
+module_param_named(diff, sym_driver_setup.scsi_diff, byte, 0);
+module_param_named(irqm, sym_driver_setup.irq_mode, byte, 0);
+module_param_named(buschk, sym_driver_setup.scsi_bus_check, byte, 0);
+module_param_named(hostid, sym_driver_setup.host_id, byte, 0);
+module_param_named(verb, sym_driver_setup.verbose, byte, 0);
+module_param_named(debug, sym_debug_flags, uint, 0);
+module_param_named(settle, sym_driver_setup.settle_delay, byte, 0);
+module_param_named(nvram, sym_driver_setup.use_nvram, byte, 0);
+module_param_named(excl, excl_string, charp, 0);
+module_param_named(safe, safe_string, charp, 0);
+
+MODULE_PARM_DESC(cmd_per_lun, "The maximum number of tags to use by default");
+MODULE_PARM_DESC(burst, "Maximum burst. 0 to disable, 255 to read from registers");
+MODULE_PARM_DESC(led, "Set to 1 to enable LED support");
+MODULE_PARM_DESC(diff, "0 for no differential mode, 1 for BIOS, 2 for always, 3 for not GPIO3");
+MODULE_PARM_DESC(irqm, "0 for open drain, 1 to leave alone, 2 for totem pole");
+MODULE_PARM_DESC(buschk, "0 to not check, 1 for detach on error, 2 for warn on error");
+MODULE_PARM_DESC(hostid, "The SCSI ID to use for the host adapters");
+MODULE_PARM_DESC(verb, "0 for minimal verbosity, 1 for normal, 2 for excessive");
+MODULE_PARM_DESC(debug, "Set bits to enable debugging");
+MODULE_PARM_DESC(settle, "Settle delay in seconds. Default 3");
+MODULE_PARM_DESC(nvram, "Option currently not used");
+MODULE_PARM_DESC(excl, "List ioport addresses here to prevent controllers from being attached");
+MODULE_PARM_DESC(safe, "Set other settings to a \"safe mode\"");
+
+MODULE_LICENSE("GPL");
+MODULE_VERSION(SYM_VERSION);
+MODULE_AUTHOR("Matthew Wilcox <matthew@wil.cx>");
+MODULE_DESCRIPTION("NCR, Symbios and LSI 8xx and 1010 PCI SCSI adapters");
+
+static void sym2_setup_params(void)
+{
+ char *p = excl_string;
+ int xi = 0;
+
+ while (p && (xi < 8)) {
+ char *next_p;
+ int val = (int) simple_strtoul(p, &next_p, 0);
+ sym_driver_setup.excludes[xi++] = val;
+ p = next_p;
+ }
+
+ if (safe_string) {
+ if (*safe_string == 'y') {
+ sym_driver_setup.max_tag = 0;
+ sym_driver_setup.burst_order = 0;
+ sym_driver_setup.scsi_led = 0;
+ sym_driver_setup.scsi_diff = 1;
+ sym_driver_setup.irq_mode = 0;
+ sym_driver_setup.scsi_bus_check = 2;
+ sym_driver_setup.host_id = 7;
+ sym_driver_setup.verbose = 2;
+ sym_driver_setup.settle_delay = 10;
+ sym_driver_setup.use_nvram = 1;
+ } else if (*safe_string != 'n') {
+ printk(KERN_WARNING NAME53C8XX "Ignoring parameter %s"
+ " passed to safe option", safe_string);
+ }
+ }
+}
+
+static struct scsi_transport_template *sym2_transport_template = NULL;
+
+/*
+ * Driver private area in the SCSI command structure.
+ */
+struct sym_ucmd { /* Override the SCSI pointer structure */
+ struct completion *eh_done; /* SCSI error handling */
+};
+
+#define SYM_UCMD_PTR(cmd) ((struct sym_ucmd *)(&(cmd)->SCp))
+#define SYM_SOFTC_PTR(cmd) sym_get_hcb(cmd->device->host)
+
+/*
+ * Complete a pending CAM CCB.
+ */
+void sym_xpt_done(struct sym_hcb *np, struct scsi_cmnd *cmd)
+{
+ struct sym_ucmd *ucmd = SYM_UCMD_PTR(cmd);
+ BUILD_BUG_ON(sizeof(struct scsi_pointer) < sizeof(struct sym_ucmd));
+
+ if (ucmd->eh_done)
+ complete(ucmd->eh_done);
+
+ scsi_dma_unmap(cmd);
+ cmd->scsi_done(cmd);
+}
+
+/*
+ * Tell the SCSI layer about a BUS RESET.
+ */
+void sym_xpt_async_bus_reset(struct sym_hcb *np)
+{
+ printf_notice("%s: SCSI BUS has been reset.\n", sym_name(np));
+ np->s.settle_time = jiffies + sym_driver_setup.settle_delay * HZ;
+ np->s.settle_time_valid = 1;
+ if (sym_verbose >= 2)
+ printf_info("%s: command processing suspended for %d seconds\n",
+ sym_name(np), sym_driver_setup.settle_delay);
+}
+
+/*
+ * Choose the more appropriate CAM status if
+ * the IO encountered an extended error.
+ */
+static int sym_xerr_cam_status(int cam_status, int x_status)
+{
+ if (x_status) {
+ if (x_status & XE_PARITY_ERR)
+ cam_status = DID_PARITY;
+ else if (x_status &(XE_EXTRA_DATA|XE_SODL_UNRUN|XE_SWIDE_OVRUN))
+ cam_status = DID_ERROR;
+ else if (x_status & XE_BAD_PHASE)
+ cam_status = DID_ERROR;
+ else
+ cam_status = DID_ERROR;
+ }
+ return cam_status;
+}
+
+/*
+ * Build CAM result for a failed or auto-sensed IO.
+ */
+void sym_set_cam_result_error(struct sym_hcb *np, struct sym_ccb *cp, int resid)
+{
+ struct scsi_cmnd *cmd = cp->cmd;
+ u_int cam_status, scsi_status, drv_status;
+
+ drv_status = 0;
+ cam_status = DID_OK;
+ scsi_status = cp->ssss_status;
+
+ if (cp->host_flags & HF_SENSE) {
+ scsi_status = cp->sv_scsi_status;
+ resid = cp->sv_resid;
+ if (sym_verbose && cp->sv_xerr_status)
+ sym_print_xerr(cmd, cp->sv_xerr_status);
+ if (cp->host_status == HS_COMPLETE &&
+ cp->ssss_status == S_GOOD &&
+ cp->xerr_status == 0) {
+ cam_status = sym_xerr_cam_status(DID_OK,
+ cp->sv_xerr_status);
+ drv_status = DRIVER_SENSE;
+ /*
+ * Bounce back the sense data to user.
+ */
+ memset(cmd->sense_buffer, 0, SCSI_SENSE_BUFFERSIZE);
+ memcpy(cmd->sense_buffer, cp->sns_bbuf,
+ min(SCSI_SENSE_BUFFERSIZE, SYM_SNS_BBUF_LEN));
+#if 0
+ /*
+ * If the device reports a UNIT ATTENTION condition
+ * due to a RESET condition, we should consider all
+ * disconnect CCBs for this unit as aborted.
+ */
+ if (1) {
+ u_char *p;
+ p = (u_char *) cmd->sense_data;
+ if (p[0]==0x70 && p[2]==0x6 && p[12]==0x29)
+ sym_clear_tasks(np, DID_ABORT,
+ cp->target,cp->lun, -1);
+ }
+#endif
+ } else {
+ /*
+ * Error return from our internal request sense. This
+ * is bad: we must clear the contingent allegiance
+ * condition otherwise the device will always return
+ * BUSY. Use a big stick.
+ */
+ sym_reset_scsi_target(np, cmd->device->id);
+ cam_status = DID_ERROR;
+ }
+ } else if (cp->host_status == HS_COMPLETE) /* Bad SCSI status */
+ cam_status = DID_OK;
+ else if (cp->host_status == HS_SEL_TIMEOUT) /* Selection timeout */
+ cam_status = DID_NO_CONNECT;
+ else if (cp->host_status == HS_UNEXPECTED) /* Unexpected BUS FREE*/
+ cam_status = DID_ERROR;
+ else { /* Extended error */
+ if (sym_verbose) {
+ sym_print_addr(cmd, "COMMAND FAILED (%x %x %x).\n",
+ cp->host_status, cp->ssss_status,
+ cp->xerr_status);
+ }
+ /*
+ * Set the most appropriate value for CAM status.
+ */
+ cam_status = sym_xerr_cam_status(DID_ERROR, cp->xerr_status);
+ }
+ scsi_set_resid(cmd, resid);
+ cmd->result = (drv_status << 24) + (cam_status << 16) + scsi_status;
+}
+
+static int sym_scatter(struct sym_hcb *np, struct sym_ccb *cp, struct scsi_cmnd *cmd)
+{
+ int segment;
+ int use_sg;
+
+ cp->data_len = 0;
+
+ use_sg = scsi_dma_map(cmd);
+ if (use_sg > 0) {
+ struct scatterlist *sg;
+ struct sym_tcb *tp = &np->target[cp->target];
+ struct sym_tblmove *data;
+
+ if (use_sg > SYM_CONF_MAX_SG) {
+ scsi_dma_unmap(cmd);
+ return -1;
+ }
+
+ data = &cp->phys.data[SYM_CONF_MAX_SG - use_sg];
+
+ scsi_for_each_sg(cmd, sg, use_sg, segment) {
+ dma_addr_t baddr = sg_dma_address(sg);
+ unsigned int len = sg_dma_len(sg);
+
+ if ((len & 1) && (tp->head.wval & EWS)) {
+ len++;
+ cp->odd_byte_adjustment++;
+ }
+
+ sym_build_sge(np, &data[segment], baddr, len);
+ cp->data_len += len;
+ }
+ } else {
+ segment = -2;
+ }
+
+ return segment;
+}
+
+/*
+ * Queue a SCSI command.
+ */
+static int sym_queue_command(struct sym_hcb *np, struct scsi_cmnd *cmd)
+{
+ struct scsi_device *sdev = cmd->device;
+ struct sym_tcb *tp;
+ struct sym_lcb *lp;
+ struct sym_ccb *cp;
+ int order;
+
+ /*
+ * Retrieve the target descriptor.
+ */
+ tp = &np->target[sdev->id];
+
+ /*
+ * Select tagged/untagged.
+ */
+ lp = sym_lp(tp, sdev->lun);
+ order = (lp && lp->s.reqtags) ? M_SIMPLE_TAG : 0;
+
+ /*
+ * Queue the SCSI IO.
+ */
+ cp = sym_get_ccb(np, cmd, order);
+ if (!cp)
+ return 1; /* Means resource shortage */
+ sym_queue_scsiio(np, cmd, cp);
+ return 0;
+}
+
+/*
+ * Setup buffers and pointers that address the CDB.
+ */
+static inline int sym_setup_cdb(struct sym_hcb *np, struct scsi_cmnd *cmd, struct sym_ccb *cp)
+{
+ memcpy(cp->cdb_buf, cmd->cmnd, cmd->cmd_len);
+
+ cp->phys.cmd.addr = CCB_BA(cp, cdb_buf[0]);
+ cp->phys.cmd.size = cpu_to_scr(cmd->cmd_len);
+
+ return 0;
+}
+
+/*
+ * Setup pointers that address the data and start the I/O.
+ */
+int sym_setup_data_and_start(struct sym_hcb *np, struct scsi_cmnd *cmd, struct sym_ccb *cp)
+{
+ u32 lastp, goalp;
+ int dir;
+
+ /*
+ * Build the CDB.
+ */
+ if (sym_setup_cdb(np, cmd, cp))
+ goto out_abort;
+
+ /*
+ * No direction means no data.
+ */
+ dir = cmd->sc_data_direction;
+ if (dir != DMA_NONE) {
+ cp->segments = sym_scatter(np, cp, cmd);
+ if (cp->segments < 0) {
+ sym_set_cam_status(cmd, DID_ERROR);
+ goto out_abort;
+ }
+
+ /*
+ * No segments means no data.
+ */
+ if (!cp->segments)
+ dir = DMA_NONE;
+ } else {
+ cp->data_len = 0;
+ cp->segments = 0;
+ }
+
+ /*
+ * Set the data pointer.
+ */
+ switch (dir) {
+ case DMA_BIDIRECTIONAL:
+ scmd_printk(KERN_INFO, cmd, "got DMA_BIDIRECTIONAL command");
+ sym_set_cam_status(cmd, DID_ERROR);
+ goto out_abort;
+ case DMA_TO_DEVICE:
+ goalp = SCRIPTA_BA(np, data_out2) + 8;
+ lastp = goalp - 8 - (cp->segments * (2*4));
+ break;
+ case DMA_FROM_DEVICE:
+ cp->host_flags |= HF_DATA_IN;
+ goalp = SCRIPTA_BA(np, data_in2) + 8;
+ lastp = goalp - 8 - (cp->segments * (2*4));
+ break;
+ case DMA_NONE:
+ default:
+ lastp = goalp = SCRIPTB_BA(np, no_data);
+ break;
+ }
+
+ /*
+ * Set all pointers values needed by SCRIPTS.
+ */
+ cp->phys.head.lastp = cpu_to_scr(lastp);
+ cp->phys.head.savep = cpu_to_scr(lastp);
+ cp->startp = cp->phys.head.savep;
+ cp->goalp = cpu_to_scr(goalp);
+
+ /*
+ * When `#ifed 1', the code below makes the driver
+ * panic on the first attempt to write to a SCSI device.
+ * It is the first test we want to do after a driver
+ * change that does not seem obviously safe. :)
+ */
+#if 0
+ switch (cp->cdb_buf[0]) {
+ case 0x0A: case 0x2A: case 0xAA:
+ panic("XXXXXXXXXXXXX WRITE NOT YET ALLOWED XXXXXXXXXXXXXX\n");
+ break;
+ default:
+ break;
+ }
+#endif
+
+ /*
+ * activate this job.
+ */
+ sym_put_start_queue(np, cp);
+ return 0;
+
+out_abort:
+ sym_free_ccb(np, cp);
+ sym_xpt_done(np, cmd);
+ return 0;
+}
+
+
+/*
+ * timer daemon.
+ *
+ * Misused to keep the driver running when
+ * interrupts are not configured correctly.
+ */
+static void sym_timer(struct sym_hcb *np)
+{
+ unsigned long thistime = jiffies;
+
+ /*
+ * Restart the timer.
+ */
+ np->s.timer.expires = thistime + SYM_CONF_TIMER_INTERVAL;
+ add_timer(&np->s.timer);
+
+ /*
+ * If we are resetting the ncr, wait for settle_time before
+ * clearing it. Then command processing will be resumed.
+ */
+ if (np->s.settle_time_valid) {
+ if (time_before_eq(np->s.settle_time, thistime)) {
+ if (sym_verbose >= 2 )
+ printk("%s: command processing resumed\n",
+ sym_name(np));
+ np->s.settle_time_valid = 0;
+ }
+ return;
+ }
+
+ /*
+ * Nothing to do for now, but that may come.
+ */
+ if (np->s.lasttime + 4*HZ < thistime) {
+ np->s.lasttime = thistime;
+ }
+
+#ifdef SYM_CONF_PCIQ_MAY_MISS_COMPLETIONS
+ /*
+ * Some way-broken PCI bridges may lead to
+ * completions being lost when the clearing
+ * of the INTFLY flag by the CPU occurs
+ * concurrently with the chip raising this flag.
+ * If this ever happen, lost completions will
+ * be reaped here.
+ */
+ sym_wakeup_done(np);
+#endif
+}
+
+
+/*
+ * PCI BUS error handler.
+ */
+void sym_log_bus_error(struct Scsi_Host *shost)
+{
+ struct sym_data *sym_data = shost_priv(shost);
+ struct pci_dev *pdev = sym_data->pdev;
+ unsigned short pci_sts;
+ pci_read_config_word(pdev, PCI_STATUS, &pci_sts);
+ if (pci_sts & 0xf900) {
+ pci_write_config_word(pdev, PCI_STATUS, pci_sts);
+ shost_printk(KERN_WARNING, shost,
+ "PCI bus error: status = 0x%04x\n", pci_sts & 0xf900);
+ }
+}
+
+/*
+ * queuecommand method. Entered with the host adapter lock held and
+ * interrupts disabled.
+ */
+static int sym53c8xx_queue_command_lck(struct scsi_cmnd *cmd,
+ void (*done)(struct scsi_cmnd *))
+{
+ struct sym_hcb *np = SYM_SOFTC_PTR(cmd);
+ struct sym_ucmd *ucp = SYM_UCMD_PTR(cmd);
+ int sts = 0;
+
+ cmd->scsi_done = done;
+ memset(ucp, 0, sizeof(*ucp));
+
+ /*
+ * Shorten our settle_time if needed for
+ * this command not to time out.
+ */
+ if (np->s.settle_time_valid && cmd->request->timeout) {
+ unsigned long tlimit = jiffies + cmd->request->timeout;
+ tlimit -= SYM_CONF_TIMER_INTERVAL*2;
+ if (time_after(np->s.settle_time, tlimit)) {
+ np->s.settle_time = tlimit;
+ }
+ }
+
+ if (np->s.settle_time_valid)
+ return SCSI_MLQUEUE_HOST_BUSY;
+
+ sts = sym_queue_command(np, cmd);
+ if (sts)
+ return SCSI_MLQUEUE_HOST_BUSY;
+ return 0;
+}
+
+static DEF_SCSI_QCMD(sym53c8xx_queue_command)
+
+/*
+ * Linux entry point of the interrupt handler.
+ */
+static irqreturn_t sym53c8xx_intr(int irq, void *dev_id)
+{
+ struct Scsi_Host *shost = dev_id;
+ struct sym_data *sym_data = shost_priv(shost);
+ irqreturn_t result;
+
+ /* Avoid spinloop trying to handle interrupts on frozen device */
+ if (pci_channel_offline(sym_data->pdev))
+ return IRQ_NONE;
+
+ if (DEBUG_FLAGS & DEBUG_TINY) printf_debug ("[");
+
+ spin_lock(shost->host_lock);
+ result = sym_interrupt(shost);
+ spin_unlock(shost->host_lock);
+
+ if (DEBUG_FLAGS & DEBUG_TINY) printf_debug ("]\n");
+
+ return result;
+}
+
+/*
+ * Linux entry point of the timer handler
+ */
+static void sym53c8xx_timer(unsigned long npref)
+{
+ struct sym_hcb *np = (struct sym_hcb *)npref;
+ unsigned long flags;
+
+ spin_lock_irqsave(np->s.host->host_lock, flags);
+ sym_timer(np);
+ spin_unlock_irqrestore(np->s.host->host_lock, flags);
+}
+
+
+/*
+ * What the eh thread wants us to perform.
+ */
+#define SYM_EH_ABORT 0
+#define SYM_EH_DEVICE_RESET 1
+#define SYM_EH_BUS_RESET 2
+#define SYM_EH_HOST_RESET 3
+
+/*
+ * Generic method for our eh processing.
+ * The 'op' argument tells what we have to do.
+ */
+static int sym_eh_handler(int op, char *opname, struct scsi_cmnd *cmd)
+{
+ struct sym_ucmd *ucmd = SYM_UCMD_PTR(cmd);
+ struct Scsi_Host *shost = cmd->device->host;
+ struct sym_data *sym_data = shost_priv(shost);
+ struct pci_dev *pdev = sym_data->pdev;
+ struct sym_hcb *np = sym_data->ncb;
+ SYM_QUEHEAD *qp;
+ int cmd_queued = 0;
+ int sts = -1;
+ struct completion eh_done;
+
+ scmd_printk(KERN_WARNING, cmd, "%s operation started\n", opname);
+
+ /* We may be in an error condition because the PCI bus
+ * went down. In this case, we need to wait until the
+ * PCI bus is reset, the card is reset, and only then
+ * proceed with the scsi error recovery. There's no
+ * point in hurrying; take a leisurely wait.
+ */
+#define WAIT_FOR_PCI_RECOVERY 35
+ if (pci_channel_offline(pdev)) {
+ int finished_reset = 0;
+ init_completion(&eh_done);
+ spin_lock_irq(shost->host_lock);
+ /* Make sure we didn't race */
+ if (pci_channel_offline(pdev)) {
+ BUG_ON(sym_data->io_reset);
+ sym_data->io_reset = &eh_done;
+ } else {
+ finished_reset = 1;
+ }
+ spin_unlock_irq(shost->host_lock);
+ if (!finished_reset)
+ finished_reset = wait_for_completion_timeout
+ (sym_data->io_reset,
+ WAIT_FOR_PCI_RECOVERY*HZ);
+ spin_lock_irq(shost->host_lock);
+ sym_data->io_reset = NULL;
+ spin_unlock_irq(shost->host_lock);
+ if (!finished_reset)
+ return SCSI_FAILED;
+ }
+
+ spin_lock_irq(shost->host_lock);
+ /* This one is queued in some place -> to wait for completion */
+ FOR_EACH_QUEUED_ELEMENT(&np->busy_ccbq, qp) {
+ struct sym_ccb *cp = sym_que_entry(qp, struct sym_ccb, link_ccbq);
+ if (cp->cmd == cmd) {
+ cmd_queued = 1;
+ break;
+ }
+ }
+
+ /* Try to proceed the operation we have been asked for */
+ sts = -1;
+ switch(op) {
+ case SYM_EH_ABORT:
+ sts = sym_abort_scsiio(np, cmd, 1);
+ break;
+ case SYM_EH_DEVICE_RESET:
+ sts = sym_reset_scsi_target(np, cmd->device->id);
+ break;
+ case SYM_EH_BUS_RESET:
+ sym_reset_scsi_bus(np, 1);
+ sts = 0;
+ break;
+ case SYM_EH_HOST_RESET:
+ sym_reset_scsi_bus(np, 0);
+ sym_start_up(shost, 1);
+ sts = 0;
+ break;
+ default:
+ break;
+ }
+
+ /* On error, restore everything and cross fingers :) */
+ if (sts)
+ cmd_queued = 0;
+
+ if (cmd_queued) {
+ init_completion(&eh_done);
+ ucmd->eh_done = &eh_done;
+ spin_unlock_irq(shost->host_lock);
+ if (!wait_for_completion_timeout(&eh_done, 5*HZ)) {
+ ucmd->eh_done = NULL;
+ sts = -2;
+ }
+ } else {
+ spin_unlock_irq(shost->host_lock);
+ }
+
+ dev_warn(&cmd->device->sdev_gendev, "%s operation %s.\n", opname,
+ sts==0 ? "complete" :sts==-2 ? "timed-out" : "failed");
+ return sts ? SCSI_FAILED : SCSI_SUCCESS;
+}
+
+
+/*
+ * Error handlers called from the eh thread (one thread per HBA).
+ */
+static int sym53c8xx_eh_abort_handler(struct scsi_cmnd *cmd)
+{
+ return sym_eh_handler(SYM_EH_ABORT, "ABORT", cmd);
+}
+
+static int sym53c8xx_eh_device_reset_handler(struct scsi_cmnd *cmd)
+{
+ return sym_eh_handler(SYM_EH_DEVICE_RESET, "DEVICE RESET", cmd);
+}
+
+static int sym53c8xx_eh_bus_reset_handler(struct scsi_cmnd *cmd)
+{
+ return sym_eh_handler(SYM_EH_BUS_RESET, "BUS RESET", cmd);
+}
+
+static int sym53c8xx_eh_host_reset_handler(struct scsi_cmnd *cmd)
+{
+ return sym_eh_handler(SYM_EH_HOST_RESET, "HOST RESET", cmd);
+}
+
+/*
+ * Tune device queuing depth, according to various limits.
+ */
+static void sym_tune_dev_queuing(struct sym_tcb *tp, int lun, u_short reqtags)
+{
+ struct sym_lcb *lp = sym_lp(tp, lun);
+ u_short oldtags;
+
+ if (!lp)
+ return;
+
+ oldtags = lp->s.reqtags;
+
+ if (reqtags > lp->s.scdev_depth)
+ reqtags = lp->s.scdev_depth;
+
+ lp->s.reqtags = reqtags;
+
+ if (reqtags != oldtags) {
+ dev_info(&tp->starget->dev,
+ "tagged command queuing %s, command queue depth %d.\n",
+ lp->s.reqtags ? "enabled" : "disabled", reqtags);
+ }
+}
+
+static int sym53c8xx_slave_alloc(struct scsi_device *sdev)
+{
+ struct sym_hcb *np = sym_get_hcb(sdev->host);
+ struct sym_tcb *tp = &np->target[sdev->id];
+ struct sym_lcb *lp;
+ unsigned long flags;
+ int error;
+
+ if (sdev->id >= SYM_CONF_MAX_TARGET || sdev->lun >= SYM_CONF_MAX_LUN)
+ return -ENXIO;
+
+ spin_lock_irqsave(np->s.host->host_lock, flags);
+
+ /*
+ * Fail the device init if the device is flagged NOSCAN at BOOT in
+ * the NVRAM. This may speed up boot and maintain coherency with
+ * BIOS device numbering. Clearing the flag allows the user to
+ * rescan skipped devices later. We also return an error for
+ * devices not flagged for SCAN LUNS in the NVRAM since some single
+ * lun devices behave badly when asked for a non zero LUN.
+ */
+
+ if (tp->usrflags & SYM_SCAN_BOOT_DISABLED) {
+ tp->usrflags &= ~SYM_SCAN_BOOT_DISABLED;
+ starget_printk(KERN_INFO, sdev->sdev_target,
+ "Scan at boot disabled in NVRAM\n");
+ error = -ENXIO;
+ goto out;
+ }
+
+ if (tp->usrflags & SYM_SCAN_LUNS_DISABLED) {
+ if (sdev->lun != 0) {
+ error = -ENXIO;
+ goto out;
+ }
+ starget_printk(KERN_INFO, sdev->sdev_target,
+ "Multiple LUNs disabled in NVRAM\n");
+ }
+
+ lp = sym_alloc_lcb(np, sdev->id, sdev->lun);
+ if (!lp) {
+ error = -ENOMEM;
+ goto out;
+ }
+ if (tp->nlcb == 1)
+ tp->starget = sdev->sdev_target;
+
+ spi_min_period(tp->starget) = tp->usr_period;
+ spi_max_width(tp->starget) = tp->usr_width;
+
+ error = 0;
+out:
+ spin_unlock_irqrestore(np->s.host->host_lock, flags);
+
+ return error;
+}
+
+/*
+ * Linux entry point for device queue sizing.
+ */
+static int sym53c8xx_slave_configure(struct scsi_device *sdev)
+{
+ struct sym_hcb *np = sym_get_hcb(sdev->host);
+ struct sym_tcb *tp = &np->target[sdev->id];
+ struct sym_lcb *lp = sym_lp(tp, sdev->lun);
+ int reqtags, depth_to_use;
+
+ /*
+ * Get user flags.
+ */
+ lp->curr_flags = lp->user_flags;
+
+ /*
+ * Select queue depth from driver setup.
+ * Do not use more than configured by user.
+ * Use at least 1.
+ * Do not use more than our maximum.
+ */
+ reqtags = sym_driver_setup.max_tag;
+ if (reqtags > tp->usrtags)
+ reqtags = tp->usrtags;
+ if (!sdev->tagged_supported)
+ reqtags = 0;
+ if (reqtags > SYM_CONF_MAX_TAG)
+ reqtags = SYM_CONF_MAX_TAG;
+ depth_to_use = reqtags ? reqtags : 1;
+ scsi_change_queue_depth(sdev, depth_to_use);
+ lp->s.scdev_depth = depth_to_use;
+ sym_tune_dev_queuing(tp, sdev->lun, reqtags);
+
+ if (!spi_initial_dv(sdev->sdev_target))
+ spi_dv_device(sdev);
+
+ return 0;
+}
+
+static void sym53c8xx_slave_destroy(struct scsi_device *sdev)
+{
+ struct sym_hcb *np = sym_get_hcb(sdev->host);
+ struct sym_tcb *tp = &np->target[sdev->id];
+ struct sym_lcb *lp = sym_lp(tp, sdev->lun);
+ unsigned long flags;
+
+ /* if slave_alloc returned before allocating a sym_lcb, return */
+ if (!lp)
+ return;
+
+ spin_lock_irqsave(np->s.host->host_lock, flags);
+
+ if (lp->busy_itlq || lp->busy_itl) {
+ /*
+ * This really shouldn't happen, but we can't return an error
+ * so let's try to stop all on-going I/O.
+ */
+ starget_printk(KERN_WARNING, tp->starget,
+ "Removing busy LCB (%d)\n", (u8)sdev->lun);
+ sym_reset_scsi_bus(np, 1);
+ }
+
+ if (sym_free_lcb(np, sdev->id, sdev->lun) == 0) {
+ /*
+ * It was the last unit for this target.
+ */
+ tp->head.sval = 0;
+ tp->head.wval = np->rv_scntl3;
+ tp->head.uval = 0;
+ tp->tgoal.check_nego = 1;
+ tp->starget = NULL;
+ }
+
+ spin_unlock_irqrestore(np->s.host->host_lock, flags);
+}
+
+/*
+ * Linux entry point for info() function
+ */
+static const char *sym53c8xx_info (struct Scsi_Host *host)
+{
+ return SYM_DRIVER_NAME;
+}
+
+
+#ifdef SYM_LINUX_PROC_INFO_SUPPORT
+/*
+ * Proc file system stuff
+ *
+ * A read operation returns adapter information.
+ * A write operation is a control command.
+ * The string is parsed in the driver code and the command is passed
+ * to the sym_usercmd() function.
+ */
+
+#ifdef SYM_LINUX_USER_COMMAND_SUPPORT
+
+struct sym_usrcmd {
+ u_long target;
+ u_long lun;
+ u_long data;
+ u_long cmd;
+};
+
+#define UC_SETSYNC 10
+#define UC_SETTAGS 11
+#define UC_SETDEBUG 12
+#define UC_SETWIDE 14
+#define UC_SETFLAG 15
+#define UC_SETVERBOSE 17
+#define UC_RESETDEV 18
+#define UC_CLEARDEV 19
+
+static void sym_exec_user_command (struct sym_hcb *np, struct sym_usrcmd *uc)
+{
+ struct sym_tcb *tp;
+ int t, l;
+
+ switch (uc->cmd) {
+ case 0: return;
+
+#ifdef SYM_LINUX_DEBUG_CONTROL_SUPPORT
+ case UC_SETDEBUG:
+ sym_debug_flags = uc->data;
+ break;
+#endif
+ case UC_SETVERBOSE:
+ np->verbose = uc->data;
+ break;
+ default:
+ /*
+ * We assume that other commands apply to targets.
+ * This should always be the case and avoid the below
+ * 4 lines to be repeated 6 times.
+ */
+ for (t = 0; t < SYM_CONF_MAX_TARGET; t++) {
+ if (!((uc->target >> t) & 1))
+ continue;
+ tp = &np->target[t];
+ if (!tp->nlcb)
+ continue;
+
+ switch (uc->cmd) {
+
+ case UC_SETSYNC:
+ if (!uc->data || uc->data >= 255) {
+ tp->tgoal.iu = tp->tgoal.dt =
+ tp->tgoal.qas = 0;
+ tp->tgoal.offset = 0;
+ } else if (uc->data <= 9 && np->minsync_dt) {
+ if (uc->data < np->minsync_dt)
+ uc->data = np->minsync_dt;
+ tp->tgoal.iu = tp->tgoal.dt =
+ tp->tgoal.qas = 1;
+ tp->tgoal.width = 1;
+ tp->tgoal.period = uc->data;
+ tp->tgoal.offset = np->maxoffs_dt;
+ } else {
+ if (uc->data < np->minsync)
+ uc->data = np->minsync;
+ tp->tgoal.iu = tp->tgoal.dt =
+ tp->tgoal.qas = 0;
+ tp->tgoal.period = uc->data;
+ tp->tgoal.offset = np->maxoffs;
+ }
+ tp->tgoal.check_nego = 1;
+ break;
+ case UC_SETWIDE:
+ tp->tgoal.width = uc->data ? 1 : 0;
+ tp->tgoal.check_nego = 1;
+ break;
+ case UC_SETTAGS:
+ for (l = 0; l < SYM_CONF_MAX_LUN; l++)
+ sym_tune_dev_queuing(tp, l, uc->data);
+ break;
+ case UC_RESETDEV:
+ tp->to_reset = 1;
+ np->istat_sem = SEM;
+ OUTB(np, nc_istat, SIGP|SEM);
+ break;
+ case UC_CLEARDEV:
+ for (l = 0; l < SYM_CONF_MAX_LUN; l++) {
+ struct sym_lcb *lp = sym_lp(tp, l);
+ if (lp) lp->to_clear = 1;
+ }
+ np->istat_sem = SEM;
+ OUTB(np, nc_istat, SIGP|SEM);
+ break;
+ case UC_SETFLAG:
+ tp->usrflags = uc->data;
+ break;
+ }
+ }
+ break;
+ }
+}
+
+static int sym_skip_spaces(char *ptr, int len)
+{
+ int cnt, c;
+
+ for (cnt = len; cnt > 0 && (c = *ptr++) && isspace(c); cnt--);
+
+ return (len - cnt);
+}
+
+static int get_int_arg(char *ptr, int len, u_long *pv)
+{
+ char *end;
+
+ *pv = simple_strtoul(ptr, &end, 10);
+ return (end - ptr);
+}
+
+static int is_keyword(char *ptr, int len, char *verb)
+{
+ int verb_len = strlen(verb);
+
+ if (len >= verb_len && !memcmp(verb, ptr, verb_len))
+ return verb_len;
+ else
+ return 0;
+}
+
+#define SKIP_SPACES(ptr, len) \
+ if ((arg_len = sym_skip_spaces(ptr, len)) < 1) \
+ return -EINVAL; \
+ ptr += arg_len; len -= arg_len;
+
+#define GET_INT_ARG(ptr, len, v) \
+ if (!(arg_len = get_int_arg(ptr, len, &(v)))) \
+ return -EINVAL; \
+ ptr += arg_len; len -= arg_len;
+
+
+/*
+ * Parse a control command
+ */
+
+static int sym_user_command(struct Scsi_Host *shost, char *buffer, int length)
+{
+ struct sym_hcb *np = sym_get_hcb(shost);
+ char *ptr = buffer;
+ int len = length;
+ struct sym_usrcmd cmd, *uc = &cmd;
+ int arg_len;
+ u_long target;
+
+ memset(uc, 0, sizeof(*uc));
+
+ if (len > 0 && ptr[len-1] == '\n')
+ --len;
+
+ if ((arg_len = is_keyword(ptr, len, "setsync")) != 0)
+ uc->cmd = UC_SETSYNC;
+ else if ((arg_len = is_keyword(ptr, len, "settags")) != 0)
+ uc->cmd = UC_SETTAGS;
+ else if ((arg_len = is_keyword(ptr, len, "setverbose")) != 0)
+ uc->cmd = UC_SETVERBOSE;
+ else if ((arg_len = is_keyword(ptr, len, "setwide")) != 0)
+ uc->cmd = UC_SETWIDE;
+#ifdef SYM_LINUX_DEBUG_CONTROL_SUPPORT
+ else if ((arg_len = is_keyword(ptr, len, "setdebug")) != 0)
+ uc->cmd = UC_SETDEBUG;
+#endif
+ else if ((arg_len = is_keyword(ptr, len, "setflag")) != 0)
+ uc->cmd = UC_SETFLAG;
+ else if ((arg_len = is_keyword(ptr, len, "resetdev")) != 0)
+ uc->cmd = UC_RESETDEV;
+ else if ((arg_len = is_keyword(ptr, len, "cleardev")) != 0)
+ uc->cmd = UC_CLEARDEV;
+ else
+ arg_len = 0;
+
+#ifdef DEBUG_PROC_INFO
+printk("sym_user_command: arg_len=%d, cmd=%ld\n", arg_len, uc->cmd);
+#endif
+
+ if (!arg_len)
+ return -EINVAL;
+ ptr += arg_len; len -= arg_len;
+
+ switch(uc->cmd) {
+ case UC_SETSYNC:
+ case UC_SETTAGS:
+ case UC_SETWIDE:
+ case UC_SETFLAG:
+ case UC_RESETDEV:
+ case UC_CLEARDEV:
+ SKIP_SPACES(ptr, len);
+ if ((arg_len = is_keyword(ptr, len, "all")) != 0) {
+ ptr += arg_len; len -= arg_len;
+ uc->target = ~0;
+ } else {
+ GET_INT_ARG(ptr, len, target);
+ uc->target = (1<<target);
+#ifdef DEBUG_PROC_INFO
+printk("sym_user_command: target=%ld\n", target);
+#endif
+ }
+ break;
+ }
+
+ switch(uc->cmd) {
+ case UC_SETVERBOSE:
+ case UC_SETSYNC:
+ case UC_SETTAGS:
+ case UC_SETWIDE:
+ SKIP_SPACES(ptr, len);
+ GET_INT_ARG(ptr, len, uc->data);
+#ifdef DEBUG_PROC_INFO
+printk("sym_user_command: data=%ld\n", uc->data);
+#endif
+ break;
+#ifdef SYM_LINUX_DEBUG_CONTROL_SUPPORT
+ case UC_SETDEBUG:
+ while (len > 0) {
+ SKIP_SPACES(ptr, len);
+ if ((arg_len = is_keyword(ptr, len, "alloc")))
+ uc->data |= DEBUG_ALLOC;
+ else if ((arg_len = is_keyword(ptr, len, "phase")))
+ uc->data |= DEBUG_PHASE;
+ else if ((arg_len = is_keyword(ptr, len, "queue")))
+ uc->data |= DEBUG_QUEUE;
+ else if ((arg_len = is_keyword(ptr, len, "result")))
+ uc->data |= DEBUG_RESULT;
+ else if ((arg_len = is_keyword(ptr, len, "scatter")))
+ uc->data |= DEBUG_SCATTER;
+ else if ((arg_len = is_keyword(ptr, len, "script")))
+ uc->data |= DEBUG_SCRIPT;
+ else if ((arg_len = is_keyword(ptr, len, "tiny")))
+ uc->data |= DEBUG_TINY;
+ else if ((arg_len = is_keyword(ptr, len, "timing")))
+ uc->data |= DEBUG_TIMING;
+ else if ((arg_len = is_keyword(ptr, len, "nego")))
+ uc->data |= DEBUG_NEGO;
+ else if ((arg_len = is_keyword(ptr, len, "tags")))
+ uc->data |= DEBUG_TAGS;
+ else if ((arg_len = is_keyword(ptr, len, "pointer")))
+ uc->data |= DEBUG_POINTER;
+ else
+ return -EINVAL;
+ ptr += arg_len; len -= arg_len;
+ }
+#ifdef DEBUG_PROC_INFO
+printk("sym_user_command: data=%ld\n", uc->data);
+#endif
+ break;
+#endif /* SYM_LINUX_DEBUG_CONTROL_SUPPORT */
+ case UC_SETFLAG:
+ while (len > 0) {
+ SKIP_SPACES(ptr, len);
+ if ((arg_len = is_keyword(ptr, len, "no_disc")))
+ uc->data &= ~SYM_DISC_ENABLED;
+ else
+ return -EINVAL;
+ ptr += arg_len; len -= arg_len;
+ }
+ break;
+ default:
+ break;
+ }
+
+ if (len)
+ return -EINVAL;
+ else {
+ unsigned long flags;
+
+ spin_lock_irqsave(shost->host_lock, flags);
+ sym_exec_user_command(np, uc);
+ spin_unlock_irqrestore(shost->host_lock, flags);
+ }
+ return length;
+}
+
+#endif /* SYM_LINUX_USER_COMMAND_SUPPORT */
+
+
+/*
+ * Copy formatted information into the input buffer.
+ */
+static int sym_show_info(struct seq_file *m, struct Scsi_Host *shost)
+{
+#ifdef SYM_LINUX_USER_INFO_SUPPORT
+ struct sym_data *sym_data = shost_priv(shost);
+ struct pci_dev *pdev = sym_data->pdev;
+ struct sym_hcb *np = sym_data->ncb;
+
+ seq_printf(m, "Chip " NAME53C "%s, device id 0x%x, "
+ "revision id 0x%x\n", np->s.chip_name,
+ pdev->device, pdev->revision);
+ seq_printf(m, "At PCI address %s, IRQ %u\n",
+ pci_name(pdev), pdev->irq);
+ seq_printf(m, "Min. period factor %d, %s SCSI BUS%s\n",
+ (int) (np->minsync_dt ? np->minsync_dt : np->minsync),
+ np->maxwide ? "Wide" : "Narrow",
+ np->minsync_dt ? ", DT capable" : "");
+
+ seq_printf(m, "Max. started commands %d, "
+ "max. commands per LUN %d\n",
+ SYM_CONF_MAX_START, SYM_CONF_MAX_TAG);
+
+ return 0;
+#else
+ return -EINVAL;
+#endif /* SYM_LINUX_USER_INFO_SUPPORT */
+}
+
+#endif /* SYM_LINUX_PROC_INFO_SUPPORT */
+
+/*
+ * Free resources claimed by sym_iomap_device(). Note that
+ * sym_free_resources() should be used instead of this function after calling
+ * sym_attach().
+ */
+static void sym_iounmap_device(struct sym_device *device)
+{
+ if (device->s.ioaddr)
+ pci_iounmap(device->pdev, device->s.ioaddr);
+ if (device->s.ramaddr)
+ pci_iounmap(device->pdev, device->s.ramaddr);
+}
+
+/*
+ * Free controller resources.
+ */
+static void sym_free_resources(struct sym_hcb *np, struct pci_dev *pdev,
+ int do_free_irq)
+{
+ /*
+ * Free O/S specific resources.
+ */
+ if (do_free_irq)
+ free_irq(pdev->irq, np->s.host);
+ if (np->s.ioaddr)
+ pci_iounmap(pdev, np->s.ioaddr);
+ if (np->s.ramaddr)
+ pci_iounmap(pdev, np->s.ramaddr);
+ /*
+ * Free O/S independent resources.
+ */
+ sym_hcb_free(np);
+
+ sym_mfree_dma(np, sizeof(*np), "HCB");
+}
+
+/*
+ * Host attach and initialisations.
+ *
+ * Allocate host data and ncb structure.
+ * Remap MMIO region.
+ * Do chip initialization.
+ * If all is OK, install interrupt handling and
+ * start the timer daemon.
+ */
+static struct Scsi_Host *sym_attach(struct scsi_host_template *tpnt, int unit,
+ struct sym_device *dev)
+{
+ struct sym_data *sym_data;
+ struct sym_hcb *np = NULL;
+ struct Scsi_Host *shost = NULL;
+ struct pci_dev *pdev = dev->pdev;
+ unsigned long flags;
+ struct sym_fw *fw;
+ int do_free_irq = 0;
+
+ printk(KERN_INFO "sym%d: <%s> rev 0x%x at pci %s irq %u\n",
+ unit, dev->chip.name, pdev->revision, pci_name(pdev),
+ pdev->irq);
+
+ /*
+ * Get the firmware for this chip.
+ */
+ fw = sym_find_firmware(&dev->chip);
+ if (!fw)
+ goto attach_failed;
+
+ shost = scsi_host_alloc(tpnt, sizeof(*sym_data));
+ if (!shost)
+ goto attach_failed;
+ sym_data = shost_priv(shost);
+
+ /*
+ * Allocate immediately the host control block,
+ * since we are only expecting to succeed. :)
+ * We keep track in the HCB of all the resources that
+ * are to be released on error.
+ */
+ np = __sym_calloc_dma(&pdev->dev, sizeof(*np), "HCB");
+ if (!np)
+ goto attach_failed;
+ np->bus_dmat = &pdev->dev; /* Result in 1 DMA pool per HBA */
+ sym_data->ncb = np;
+ sym_data->pdev = pdev;
+ np->s.host = shost;
+
+ pci_set_drvdata(pdev, shost);
+
+ /*
+ * Copy some useful infos to the HCB.
+ */
+ np->hcb_ba = vtobus(np);
+ np->verbose = sym_driver_setup.verbose;
+ np->s.unit = unit;
+ np->features = dev->chip.features;
+ np->clock_divn = dev->chip.nr_divisor;
+ np->maxoffs = dev->chip.offset_max;
+ np->maxburst = dev->chip.burst_max;
+ np->myaddr = dev->host_id;
+ np->mmio_ba = (u32)dev->mmio_base;
+ np->ram_ba = (u32)dev->ram_base;
+ np->s.ioaddr = dev->s.ioaddr;
+ np->s.ramaddr = dev->s.ramaddr;
+
+ /*
+ * Edit its name.
+ */
+ strlcpy(np->s.chip_name, dev->chip.name, sizeof(np->s.chip_name));
+ sprintf(np->s.inst_name, "sym%d", np->s.unit);
+
+ if ((SYM_CONF_DMA_ADDRESSING_MODE > 0) && (np->features & FE_DAC) &&
+ !pci_set_dma_mask(pdev, DMA_DAC_MASK)) {
+ set_dac(np);
+ } else if (pci_set_dma_mask(pdev, DMA_BIT_MASK(32))) {
+ printf_warning("%s: No suitable DMA available\n", sym_name(np));
+ goto attach_failed;
+ }
+
+ if (sym_hcb_attach(shost, fw, dev->nvram))
+ goto attach_failed;
+
+ /*
+ * Install the interrupt handler.
+ * If we synchonize the C code with SCRIPTS on interrupt,
+ * we do not want to share the INTR line at all.
+ */
+ if (request_irq(pdev->irq, sym53c8xx_intr, IRQF_SHARED, NAME53C8XX,
+ shost)) {
+ printf_err("%s: request irq %u failure\n",
+ sym_name(np), pdev->irq);
+ goto attach_failed;
+ }
+ do_free_irq = 1;
+
+ /*
+ * After SCSI devices have been opened, we cannot
+ * reset the bus safely, so we do it here.
+ */
+ spin_lock_irqsave(shost->host_lock, flags);
+ if (sym_reset_scsi_bus(np, 0))
+ goto reset_failed;
+
+ /*
+ * Start the SCRIPTS.
+ */
+ sym_start_up(shost, 1);
+
+ /*
+ * Start the timer daemon
+ */
+ init_timer(&np->s.timer);
+ np->s.timer.data = (unsigned long) np;
+ np->s.timer.function = sym53c8xx_timer;
+ np->s.lasttime=0;
+ sym_timer (np);
+
+ /*
+ * Fill Linux host instance structure
+ * and return success.
+ */
+ shost->max_channel = 0;
+ shost->this_id = np->myaddr;
+ shost->max_id = np->maxwide ? 16 : 8;
+ shost->max_lun = SYM_CONF_MAX_LUN;
+ shost->unique_id = pci_resource_start(pdev, 0);
+ shost->cmd_per_lun = SYM_CONF_MAX_TAG;
+ shost->can_queue = (SYM_CONF_MAX_START-2);
+ shost->sg_tablesize = SYM_CONF_MAX_SG;
+ shost->max_cmd_len = 16;
+ BUG_ON(sym2_transport_template == NULL);
+ shost->transportt = sym2_transport_template;
+
+ /* 53c896 rev 1 errata: DMA may not cross 16MB boundary */
+ if (pdev->device == PCI_DEVICE_ID_NCR_53C896 && pdev->revision < 2)
+ shost->dma_boundary = 0xFFFFFF;
+
+ spin_unlock_irqrestore(shost->host_lock, flags);
+
+ return shost;
+
+ reset_failed:
+ printf_err("%s: FATAL ERROR: CHECK SCSI BUS - CABLES, "
+ "TERMINATION, DEVICE POWER etc.!\n", sym_name(np));
+ spin_unlock_irqrestore(shost->host_lock, flags);
+ attach_failed:
+ printf_info("sym%d: giving up ...\n", unit);
+ if (np)
+ sym_free_resources(np, pdev, do_free_irq);
+ else
+ sym_iounmap_device(dev);
+ if (shost)
+ scsi_host_put(shost);
+
+ return NULL;
+ }
+
+
+/*
+ * Detect and try to read SYMBIOS and TEKRAM NVRAM.
+ */
+#if SYM_CONF_NVRAM_SUPPORT
+static void sym_get_nvram(struct sym_device *devp, struct sym_nvram *nvp)
+{
+ devp->nvram = nvp;
+ nvp->type = 0;
+
+ sym_read_nvram(devp, nvp);
+}
+#else
+static inline void sym_get_nvram(struct sym_device *devp, struct sym_nvram *nvp)
+{
+}
+#endif /* SYM_CONF_NVRAM_SUPPORT */
+
+static int sym_check_supported(struct sym_device *device)
+{
+ struct sym_chip *chip;
+ struct pci_dev *pdev = device->pdev;
+ unsigned long io_port = pci_resource_start(pdev, 0);
+ int i;
+
+ /*
+ * If user excluded this chip, do not initialize it.
+ * I hate this code so much. Must kill it.
+ */
+ if (io_port) {
+ for (i = 0 ; i < 8 ; i++) {
+ if (sym_driver_setup.excludes[i] == io_port)
+ return -ENODEV;
+ }
+ }
+
+ /*
+ * Check if the chip is supported. Then copy the chip description
+ * to our device structure so we can make it match the actual device
+ * and options.
+ */
+ chip = sym_lookup_chip_table(pdev->device, pdev->revision);
+ if (!chip) {
+ dev_info(&pdev->dev, "device not supported\n");
+ return -ENODEV;
+ }
+ memcpy(&device->chip, chip, sizeof(device->chip));
+
+ return 0;
+}
+
+/*
+ * Ignore Symbios chips controlled by various RAID controllers.
+ * These controllers set value 0x52414944 at RAM end - 16.
+ */
+static int sym_check_raid(struct sym_device *device)
+{
+ unsigned int ram_size, ram_val;
+
+ if (!device->s.ramaddr)
+ return 0;
+
+ if (device->chip.features & FE_RAM8K)
+ ram_size = 8192;
+ else
+ ram_size = 4096;
+
+ ram_val = readl(device->s.ramaddr + ram_size - 16);
+ if (ram_val != 0x52414944)
+ return 0;
+
+ dev_info(&device->pdev->dev,
+ "not initializing, driven by RAID controller.\n");
+ return -ENODEV;
+}
+
+static int sym_set_workarounds(struct sym_device *device)
+{
+ struct sym_chip *chip = &device->chip;
+ struct pci_dev *pdev = device->pdev;
+ u_short status_reg;
+
+ /*
+ * (ITEM 12 of a DEL about the 896 I haven't yet).
+ * We must ensure the chip will use WRITE AND INVALIDATE.
+ * The revision number limit is for now arbitrary.
+ */
+ if (pdev->device == PCI_DEVICE_ID_NCR_53C896 && pdev->revision < 0x4) {
+ chip->features |= (FE_WRIE | FE_CLSE);
+ }
+
+ /* If the chip can do Memory Write Invalidate, enable it */
+ if (chip->features & FE_WRIE) {
+ if (pci_set_mwi(pdev))
+ return -ENODEV;
+ }
+
+ /*
+ * Work around for errant bit in 895A. The 66Mhz
+ * capable bit is set erroneously. Clear this bit.
+ * (Item 1 DEL 533)
+ *
+ * Make sure Config space and Features agree.
+ *
+ * Recall: writes are not normal to status register -
+ * write a 1 to clear and a 0 to leave unchanged.
+ * Can only reset bits.
+ */
+ pci_read_config_word(pdev, PCI_STATUS, &status_reg);
+ if (chip->features & FE_66MHZ) {
+ if (!(status_reg & PCI_STATUS_66MHZ))
+ chip->features &= ~FE_66MHZ;
+ } else {
+ if (status_reg & PCI_STATUS_66MHZ) {
+ status_reg = PCI_STATUS_66MHZ;
+ pci_write_config_word(pdev, PCI_STATUS, status_reg);
+ pci_read_config_word(pdev, PCI_STATUS, &status_reg);
+ }
+ }
+
+ return 0;
+}
+
+/*
+ * Map HBA registers and on-chip SRAM (if present).
+ */
+static int sym_iomap_device(struct sym_device *device)
+{
+ struct pci_dev *pdev = device->pdev;
+ struct pci_bus_region bus_addr;
+ int i = 2;
+
+ pcibios_resource_to_bus(pdev->bus, &bus_addr, &pdev->resource[1]);
+ device->mmio_base = bus_addr.start;
+
+ if (device->chip.features & FE_RAM) {
+ /*
+ * If the BAR is 64-bit, resource 2 will be occupied by the
+ * upper 32 bits
+ */
+ if (!pdev->resource[i].flags)
+ i++;
+ pcibios_resource_to_bus(pdev->bus, &bus_addr,
+ &pdev->resource[i]);
+ device->ram_base = bus_addr.start;
+ }
+
+#ifdef CONFIG_SCSI_SYM53C8XX_MMIO
+ if (device->mmio_base)
+ device->s.ioaddr = pci_iomap(pdev, 1,
+ pci_resource_len(pdev, 1));
+#endif
+ if (!device->s.ioaddr)
+ device->s.ioaddr = pci_iomap(pdev, 0,
+ pci_resource_len(pdev, 0));
+ if (!device->s.ioaddr) {
+ dev_err(&pdev->dev, "could not map registers; giving up.\n");
+ return -EIO;
+ }
+ if (device->ram_base) {
+ device->s.ramaddr = pci_iomap(pdev, i,
+ pci_resource_len(pdev, i));
+ if (!device->s.ramaddr) {
+ dev_warn(&pdev->dev,
+ "could not map SRAM; continuing anyway.\n");
+ device->ram_base = 0;
+ }
+ }
+
+ return 0;
+}
+
+/*
+ * The NCR PQS and PDS cards are constructed as a DEC bridge
+ * behind which sits a proprietary NCR memory controller and
+ * either four or two 53c875s as separate devices. We can tell
+ * if an 875 is part of a PQS/PDS or not since if it is, it will
+ * be on the same bus as the memory controller. In its usual
+ * mode of operation, the 875s are slaved to the memory
+ * controller for all transfers. To operate with the Linux
+ * driver, the memory controller is disabled and the 875s
+ * freed to function independently. The only wrinkle is that
+ * the preset SCSI ID (which may be zero) must be read in from
+ * a special configuration space register of the 875.
+ */
+static void sym_config_pqs(struct pci_dev *pdev, struct sym_device *sym_dev)
+{
+ int slot;
+ u8 tmp;
+
+ for (slot = 0; slot < 256; slot++) {
+ struct pci_dev *memc = pci_get_slot(pdev->bus, slot);
+
+ if (!memc || memc->vendor != 0x101a || memc->device == 0x0009) {
+ pci_dev_put(memc);
+ continue;
+ }
+
+ /* bit 1: allow individual 875 configuration */
+ pci_read_config_byte(memc, 0x44, &tmp);
+ if ((tmp & 0x2) == 0) {
+ tmp |= 0x2;
+ pci_write_config_byte(memc, 0x44, tmp);
+ }
+
+ /* bit 2: drive individual 875 interrupts to the bus */
+ pci_read_config_byte(memc, 0x45, &tmp);
+ if ((tmp & 0x4) == 0) {
+ tmp |= 0x4;
+ pci_write_config_byte(memc, 0x45, tmp);
+ }
+
+ pci_dev_put(memc);
+ break;
+ }
+
+ pci_read_config_byte(pdev, 0x84, &tmp);
+ sym_dev->host_id = tmp;
+}
+
+/*
+ * Called before unloading the module.
+ * Detach the host.
+ * We have to free resources and halt the NCR chip.
+ */
+static int sym_detach(struct Scsi_Host *shost, struct pci_dev *pdev)
+{
+ struct sym_hcb *np = sym_get_hcb(shost);
+ printk("%s: detaching ...\n", sym_name(np));
+
+ del_timer_sync(&np->s.timer);
+
+ /*
+ * Reset NCR chip.
+ * We should use sym_soft_reset(), but we don't want to do
+ * so, since we may not be safe if interrupts occur.
+ */
+ printk("%s: resetting chip\n", sym_name(np));
+ OUTB(np, nc_istat, SRST);
+ INB(np, nc_mbox1);
+ udelay(10);
+ OUTB(np, nc_istat, 0);
+
+ sym_free_resources(np, pdev, 1);
+ scsi_host_put(shost);
+
+ return 1;
+}
+
+/*
+ * Driver host template.
+ */
+static struct scsi_host_template sym2_template = {
+ .module = THIS_MODULE,
+ .name = "sym53c8xx",
+ .info = sym53c8xx_info,
+ .queuecommand = sym53c8xx_queue_command,
+ .slave_alloc = sym53c8xx_slave_alloc,
+ .slave_configure = sym53c8xx_slave_configure,
+ .slave_destroy = sym53c8xx_slave_destroy,
+ .eh_abort_handler = sym53c8xx_eh_abort_handler,
+ .eh_device_reset_handler = sym53c8xx_eh_device_reset_handler,
+ .eh_bus_reset_handler = sym53c8xx_eh_bus_reset_handler,
+ .eh_host_reset_handler = sym53c8xx_eh_host_reset_handler,
+ .this_id = 7,
+ .use_clustering = ENABLE_CLUSTERING,
+ .max_sectors = 0xFFFF,
+#ifdef SYM_LINUX_PROC_INFO_SUPPORT
+ .show_info = sym_show_info,
+#ifdef SYM_LINUX_USER_COMMAND_SUPPORT
+ .write_info = sym_user_command,
+#endif
+ .proc_name = NAME53C8XX,
+#endif
+};
+
+static int attach_count;
+
+static int sym2_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
+{
+ struct sym_device sym_dev;
+ struct sym_nvram nvram;
+ struct Scsi_Host *shost;
+ int do_iounmap = 0;
+ int do_disable_device = 1;
+
+ memset(&sym_dev, 0, sizeof(sym_dev));
+ memset(&nvram, 0, sizeof(nvram));
+ sym_dev.pdev = pdev;
+ sym_dev.host_id = SYM_SETUP_HOST_ID;
+
+ if (pci_enable_device(pdev))
+ goto leave;
+
+ pci_set_master(pdev);
+
+ if (pci_request_regions(pdev, NAME53C8XX))
+ goto disable;
+
+ if (sym_check_supported(&sym_dev))
+ goto free;
+
+ if (sym_iomap_device(&sym_dev))
+ goto free;
+ do_iounmap = 1;
+
+ if (sym_check_raid(&sym_dev)) {
+ do_disable_device = 0; /* Don't disable the device */
+ goto free;
+ }
+
+ if (sym_set_workarounds(&sym_dev))
+ goto free;
+
+ sym_config_pqs(pdev, &sym_dev);
+
+ sym_get_nvram(&sym_dev, &nvram);
+
+ do_iounmap = 0; /* Don't sym_iounmap_device() after sym_attach(). */
+ shost = sym_attach(&sym2_template, attach_count, &sym_dev);
+ if (!shost)
+ goto free;
+
+ if (scsi_add_host(shost, &pdev->dev))
+ goto detach;
+ scsi_scan_host(shost);
+
+ attach_count++;
+
+ return 0;
+
+ detach:
+ sym_detach(pci_get_drvdata(pdev), pdev);
+ free:
+ if (do_iounmap)
+ sym_iounmap_device(&sym_dev);
+ pci_release_regions(pdev);
+ disable:
+ if (do_disable_device)
+ pci_disable_device(pdev);
+ leave:
+ return -ENODEV;
+}
+
+static void sym2_remove(struct pci_dev *pdev)
+{
+ struct Scsi_Host *shost = pci_get_drvdata(pdev);
+
+ scsi_remove_host(shost);
+ sym_detach(shost, pdev);
+ pci_release_regions(pdev);
+ pci_disable_device(pdev);
+
+ attach_count--;
+}
+
+/**
+ * sym2_io_error_detected() - called when PCI error is detected
+ * @pdev: pointer to PCI device
+ * @state: current state of the PCI slot
+ */
+static pci_ers_result_t sym2_io_error_detected(struct pci_dev *pdev,
+ enum pci_channel_state state)
+{
+ /* If slot is permanently frozen, turn everything off */
+ if (state == pci_channel_io_perm_failure) {
+ sym2_remove(pdev);
+ return PCI_ERS_RESULT_DISCONNECT;
+ }
+
+ disable_irq(pdev->irq);
+ pci_disable_device(pdev);
+
+ /* Request that MMIO be enabled, so register dump can be taken. */
+ return PCI_ERS_RESULT_CAN_RECOVER;
+}
+
+/**
+ * sym2_io_slot_dump - Enable MMIO and dump debug registers
+ * @pdev: pointer to PCI device
+ */
+static pci_ers_result_t sym2_io_slot_dump(struct pci_dev *pdev)
+{
+ struct Scsi_Host *shost = pci_get_drvdata(pdev);
+
+ sym_dump_registers(shost);
+
+ /* Request a slot reset. */
+ return PCI_ERS_RESULT_NEED_RESET;
+}
+
+/**
+ * sym2_reset_workarounds - hardware-specific work-arounds
+ *
+ * This routine is similar to sym_set_workarounds(), except
+ * that, at this point, we already know that the device was
+ * successfully initialized at least once before, and so most
+ * of the steps taken there are un-needed here.
+ */
+static void sym2_reset_workarounds(struct pci_dev *pdev)
+{
+ u_short status_reg;
+ struct sym_chip *chip;
+
+ chip = sym_lookup_chip_table(pdev->device, pdev->revision);
+
+ /* Work around for errant bit in 895A, in a fashion
+ * similar to what is done in sym_set_workarounds().
+ */
+ pci_read_config_word(pdev, PCI_STATUS, &status_reg);
+ if (!(chip->features & FE_66MHZ) && (status_reg & PCI_STATUS_66MHZ)) {
+ status_reg = PCI_STATUS_66MHZ;
+ pci_write_config_word(pdev, PCI_STATUS, status_reg);
+ pci_read_config_word(pdev, PCI_STATUS, &status_reg);
+ }
+}
+
+/**
+ * sym2_io_slot_reset() - called when the pci bus has been reset.
+ * @pdev: pointer to PCI device
+ *
+ * Restart the card from scratch.
+ */
+static pci_ers_result_t sym2_io_slot_reset(struct pci_dev *pdev)
+{
+ struct Scsi_Host *shost = pci_get_drvdata(pdev);
+ struct sym_hcb *np = sym_get_hcb(shost);
+
+ printk(KERN_INFO "%s: recovering from a PCI slot reset\n",
+ sym_name(np));
+
+ if (pci_enable_device(pdev)) {
+ printk(KERN_ERR "%s: Unable to enable after PCI reset\n",
+ sym_name(np));
+ return PCI_ERS_RESULT_DISCONNECT;
+ }
+
+ pci_set_master(pdev);
+ enable_irq(pdev->irq);
+
+ /* If the chip can do Memory Write Invalidate, enable it */
+ if (np->features & FE_WRIE) {
+ if (pci_set_mwi(pdev))
+ return PCI_ERS_RESULT_DISCONNECT;
+ }
+
+ /* Perform work-arounds, analogous to sym_set_workarounds() */
+ sym2_reset_workarounds(pdev);
+
+ /* Perform host reset only on one instance of the card */
+ if (PCI_FUNC(pdev->devfn) == 0) {
+ if (sym_reset_scsi_bus(np, 0)) {
+ printk(KERN_ERR "%s: Unable to reset scsi host\n",
+ sym_name(np));
+ return PCI_ERS_RESULT_DISCONNECT;
+ }
+ sym_start_up(shost, 1);
+ }
+
+ return PCI_ERS_RESULT_RECOVERED;
+}
+
+/**
+ * sym2_io_resume() - resume normal ops after PCI reset
+ * @pdev: pointer to PCI device
+ *
+ * Called when the error recovery driver tells us that its
+ * OK to resume normal operation. Use completion to allow
+ * halted scsi ops to resume.
+ */
+static void sym2_io_resume(struct pci_dev *pdev)
+{
+ struct Scsi_Host *shost = pci_get_drvdata(pdev);
+ struct sym_data *sym_data = shost_priv(shost);
+
+ spin_lock_irq(shost->host_lock);
+ if (sym_data->io_reset)
+ complete_all(sym_data->io_reset);
+ spin_unlock_irq(shost->host_lock);
+}
+
+static void sym2_get_signalling(struct Scsi_Host *shost)
+{
+ struct sym_hcb *np = sym_get_hcb(shost);
+ enum spi_signal_type type;
+
+ switch (np->scsi_mode) {
+ case SMODE_SE:
+ type = SPI_SIGNAL_SE;
+ break;
+ case SMODE_LVD:
+ type = SPI_SIGNAL_LVD;
+ break;
+ case SMODE_HVD:
+ type = SPI_SIGNAL_HVD;
+ break;
+ default:
+ type = SPI_SIGNAL_UNKNOWN;
+ break;
+ }
+ spi_signalling(shost) = type;
+}
+
+static void sym2_set_offset(struct scsi_target *starget, int offset)
+{
+ struct Scsi_Host *shost = dev_to_shost(starget->dev.parent);
+ struct sym_hcb *np = sym_get_hcb(shost);
+ struct sym_tcb *tp = &np->target[starget->id];
+
+ tp->tgoal.offset = offset;
+ tp->tgoal.check_nego = 1;
+}
+
+static void sym2_set_period(struct scsi_target *starget, int period)
+{
+ struct Scsi_Host *shost = dev_to_shost(starget->dev.parent);
+ struct sym_hcb *np = sym_get_hcb(shost);
+ struct sym_tcb *tp = &np->target[starget->id];
+
+ /* have to have DT for these transfers, but DT will also
+ * set width, so check that this is allowed */
+ if (period <= np->minsync && spi_width(starget))
+ tp->tgoal.dt = 1;
+
+ tp->tgoal.period = period;
+ tp->tgoal.check_nego = 1;
+}
+
+static void sym2_set_width(struct scsi_target *starget, int width)
+{
+ struct Scsi_Host *shost = dev_to_shost(starget->dev.parent);
+ struct sym_hcb *np = sym_get_hcb(shost);
+ struct sym_tcb *tp = &np->target[starget->id];
+
+ /* It is illegal to have DT set on narrow transfers. If DT is
+ * clear, we must also clear IU and QAS. */
+ if (width == 0)
+ tp->tgoal.iu = tp->tgoal.dt = tp->tgoal.qas = 0;
+
+ tp->tgoal.width = width;
+ tp->tgoal.check_nego = 1;
+}
+
+static void sym2_set_dt(struct scsi_target *starget, int dt)
+{
+ struct Scsi_Host *shost = dev_to_shost(starget->dev.parent);
+ struct sym_hcb *np = sym_get_hcb(shost);
+ struct sym_tcb *tp = &np->target[starget->id];
+
+ /* We must clear QAS and IU if DT is clear */
+ if (dt)
+ tp->tgoal.dt = 1;
+ else
+ tp->tgoal.iu = tp->tgoal.dt = tp->tgoal.qas = 0;
+ tp->tgoal.check_nego = 1;
+}
+
+#if 0
+static void sym2_set_iu(struct scsi_target *starget, int iu)
+{
+ struct Scsi_Host *shost = dev_to_shost(starget->dev.parent);
+ struct sym_hcb *np = sym_get_hcb(shost);
+ struct sym_tcb *tp = &np->target[starget->id];
+
+ if (iu)
+ tp->tgoal.iu = tp->tgoal.dt = 1;
+ else
+ tp->tgoal.iu = 0;
+ tp->tgoal.check_nego = 1;
+}
+
+static void sym2_set_qas(struct scsi_target *starget, int qas)
+{
+ struct Scsi_Host *shost = dev_to_shost(starget->dev.parent);
+ struct sym_hcb *np = sym_get_hcb(shost);
+ struct sym_tcb *tp = &np->target[starget->id];
+
+ if (qas)
+ tp->tgoal.dt = tp->tgoal.qas = 1;
+ else
+ tp->tgoal.qas = 0;
+ tp->tgoal.check_nego = 1;
+}
+#endif
+
+static struct spi_function_template sym2_transport_functions = {
+ .set_offset = sym2_set_offset,
+ .show_offset = 1,
+ .set_period = sym2_set_period,
+ .show_period = 1,
+ .set_width = sym2_set_width,
+ .show_width = 1,
+ .set_dt = sym2_set_dt,
+ .show_dt = 1,
+#if 0
+ .set_iu = sym2_set_iu,
+ .show_iu = 1,
+ .set_qas = sym2_set_qas,
+ .show_qas = 1,
+#endif
+ .get_signalling = sym2_get_signalling,
+};
+
+static struct pci_device_id sym2_id_table[] = {
+ { PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_NCR_53C810,
+ PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
+ { PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_NCR_53C820,
+ PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL }, /* new */
+ { PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_NCR_53C825,
+ PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
+ { PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_NCR_53C815,
+ PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
+ { PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_53C810AP,
+ PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL }, /* new */
+ { PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_NCR_53C860,
+ PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
+ { PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_53C1510,
+ PCI_ANY_ID, PCI_ANY_ID, PCI_CLASS_STORAGE_SCSI<<8, 0xffff00, 0UL },
+ { PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_NCR_53C896,
+ PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
+ { PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_NCR_53C895,
+ PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
+ { PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_NCR_53C885,
+ PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
+ { PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_NCR_53C875,
+ PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
+ { PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_NCR_53C1510,
+ PCI_ANY_ID, PCI_ANY_ID, PCI_CLASS_STORAGE_SCSI<<8, 0xffff00, 0UL }, /* new */
+ { PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_53C895A,
+ PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
+ { PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_53C875A,
+ PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
+ { PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_53C1010_33,
+ PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
+ { PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_53C1010_66,
+ PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
+ { PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_NCR_53C875J,
+ PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
+ { 0, }
+};
+
+MODULE_DEVICE_TABLE(pci, sym2_id_table);
+
+static const struct pci_error_handlers sym2_err_handler = {
+ .error_detected = sym2_io_error_detected,
+ .mmio_enabled = sym2_io_slot_dump,
+ .slot_reset = sym2_io_slot_reset,
+ .resume = sym2_io_resume,
+};
+
+static struct pci_driver sym2_driver = {
+ .name = NAME53C8XX,
+ .id_table = sym2_id_table,
+ .probe = sym2_probe,
+ .remove = sym2_remove,
+ .err_handler = &sym2_err_handler,
+};
+
+static int __init sym2_init(void)
+{
+ int error;
+
+ sym2_setup_params();
+ sym2_transport_template = spi_attach_transport(&sym2_transport_functions);
+ if (!sym2_transport_template)
+ return -ENODEV;
+
+ error = pci_register_driver(&sym2_driver);
+ if (error)
+ spi_release_transport(sym2_transport_template);
+ return error;
+}
+
+static void __exit sym2_exit(void)
+{
+ pci_unregister_driver(&sym2_driver);
+ spi_release_transport(sym2_transport_template);
+}
+
+module_init(sym2_init);
+module_exit(sym2_exit);
diff --git a/drivers/scsi/sym53c8xx_2/sym_glue.h b/drivers/scsi/sym53c8xx_2/sym_glue.h
new file mode 100644
index 000000000..805369521
--- /dev/null
+++ b/drivers/scsi/sym53c8xx_2/sym_glue.h
@@ -0,0 +1,270 @@
+/*
+ * Device driver for the SYMBIOS/LSILOGIC 53C8XX and 53C1010 family
+ * of PCI-SCSI IO processors.
+ *
+ * Copyright (C) 1999-2001 Gerard Roudier <groudier@free.fr>
+ *
+ * This driver is derived from the Linux sym53c8xx driver.
+ * Copyright (C) 1998-2000 Gerard Roudier
+ *
+ * The sym53c8xx driver is derived from the ncr53c8xx driver that had been
+ * a port of the FreeBSD ncr driver to Linux-1.2.13.
+ *
+ * The original ncr driver has been written for 386bsd and FreeBSD by
+ * Wolfgang Stanglmeier <wolf@cologne.de>
+ * Stefan Esser <se@mi.Uni-Koeln.de>
+ * Copyright (C) 1994 Wolfgang Stanglmeier
+ *
+ * Other major contributions:
+ *
+ * NVRAM detection and reading.
+ * Copyright (C) 1997 Richard Waltham <dormouse@farsrobt.demon.co.uk>
+ *
+ *-----------------------------------------------------------------------------
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+
+#ifndef SYM_GLUE_H
+#define SYM_GLUE_H
+
+#include <linux/completion.h>
+#include <linux/delay.h>
+#include <linux/interrupt.h>
+#include <linux/ioport.h>
+#include <linux/pci.h>
+#include <linux/string.h>
+#include <linux/timer.h>
+#include <linux/types.h>
+
+#include <asm/io.h>
+#ifdef __sparc__
+# include <asm/irq.h>
+#endif
+
+#include <scsi/scsi.h>
+#include <scsi/scsi_cmnd.h>
+#include <scsi/scsi_device.h>
+#include <scsi/scsi_transport_spi.h>
+#include <scsi/scsi_host.h>
+
+#include "sym53c8xx.h"
+#include "sym_defs.h"
+#include "sym_misc.h"
+
+/*
+ * Configuration addendum for Linux.
+ */
+#define SYM_CONF_TIMER_INTERVAL ((HZ+1)/2)
+
+#undef SYM_OPT_HANDLE_DEVICE_QUEUEING
+#define SYM_OPT_LIMIT_COMMAND_REORDERING
+
+/*
+ * Print a message with severity.
+ */
+#define printf_emerg(args...) printk(KERN_EMERG args)
+#define printf_alert(args...) printk(KERN_ALERT args)
+#define printf_crit(args...) printk(KERN_CRIT args)
+#define printf_err(args...) printk(KERN_ERR args)
+#define printf_warning(args...) printk(KERN_WARNING args)
+#define printf_notice(args...) printk(KERN_NOTICE args)
+#define printf_info(args...) printk(KERN_INFO args)
+#define printf_debug(args...) printk(KERN_DEBUG args)
+#define printf(args...) printk(args)
+
+/*
+ * A 'read barrier' flushes any data that have been prefetched
+ * by the processor due to out of order execution. Such a barrier
+ * must notably be inserted prior to looking at data that have
+ * been DMAed, assuming that program does memory READs in proper
+ * order and that the device ensured proper ordering of WRITEs.
+ *
+ * A 'write barrier' prevents any previous WRITEs to pass further
+ * WRITEs. Such barriers must be inserted each time another agent
+ * relies on ordering of WRITEs.
+ *
+ * Note that, due to posting of PCI memory writes, we also must
+ * insert dummy PCI read transactions when some ordering involving
+ * both directions over the PCI does matter. PCI transactions are
+ * fully ordered in each direction.
+ */
+
+#define MEMORY_READ_BARRIER() rmb()
+#define MEMORY_WRITE_BARRIER() wmb()
+
+/*
+ * IO functions definition for big/little endian CPU support.
+ * For now, PCI chips are only supported in little endian addressing mode,
+ */
+
+#ifdef __BIG_ENDIAN
+
+#define readw_l2b readw
+#define readl_l2b readl
+#define writew_b2l writew
+#define writel_b2l writel
+
+#else /* little endian */
+
+#define readw_raw readw
+#define readl_raw readl
+#define writew_raw writew
+#define writel_raw writel
+
+#endif /* endian */
+
+#ifdef SYM_CONF_CHIP_BIG_ENDIAN
+#error "Chips in BIG ENDIAN addressing mode are not (yet) supported"
+#endif
+
+/*
+ * If the CPU and the chip use same endian-ness addressing,
+ * no byte reordering is needed for script patching.
+ * Macro cpu_to_scr() is to be used for script patching.
+ * Macro scr_to_cpu() is to be used for getting a DWORD
+ * from the script.
+ */
+
+#define cpu_to_scr(dw) cpu_to_le32(dw)
+#define scr_to_cpu(dw) le32_to_cpu(dw)
+
+/*
+ * These ones are used as return code from
+ * error recovery handlers under Linux.
+ */
+#define SCSI_SUCCESS SUCCESS
+#define SCSI_FAILED FAILED
+
+/*
+ * System specific target data structure.
+ * None for now, under Linux.
+ */
+/* #define SYM_HAVE_STCB */
+
+/*
+ * System specific lun data structure.
+ */
+#define SYM_HAVE_SLCB
+struct sym_slcb {
+ u_short reqtags; /* Number of tags requested by user */
+ u_short scdev_depth; /* Queue depth set in select_queue_depth() */
+};
+
+/*
+ * System specific command data structure.
+ * Not needed under Linux.
+ */
+/* struct sym_sccb */
+
+/*
+ * System specific host data structure.
+ */
+struct sym_shcb {
+ /*
+ * Chip and controller identification.
+ */
+ int unit;
+ char inst_name[16];
+ char chip_name[8];
+
+ struct Scsi_Host *host;
+
+ void __iomem * ioaddr; /* MMIO kernel io address */
+ void __iomem * ramaddr; /* RAM kernel io address */
+
+ struct timer_list timer; /* Timer handler link header */
+ u_long lasttime;
+ u_long settle_time; /* Resetting the SCSI BUS */
+ u_char settle_time_valid;
+};
+
+/*
+ * Return the name of the controller.
+ */
+#define sym_name(np) (np)->s.inst_name
+
+struct sym_nvram;
+
+/*
+ * The IO macros require a struct called 's' and are abused in sym_nvram.c
+ */
+struct sym_device {
+ struct pci_dev *pdev;
+ unsigned long mmio_base;
+ unsigned long ram_base;
+ struct {
+ void __iomem *ioaddr;
+ void __iomem *ramaddr;
+ } s;
+ struct sym_chip chip;
+ struct sym_nvram *nvram;
+ u_char host_id;
+};
+
+/*
+ * Driver host data structure.
+ */
+struct sym_data {
+ struct sym_hcb *ncb;
+ struct completion *io_reset; /* PCI error handling */
+ struct pci_dev *pdev;
+};
+
+static inline struct sym_hcb * sym_get_hcb(struct Scsi_Host *host)
+{
+ return ((struct sym_data *)host->hostdata)->ncb;
+}
+
+#include "sym_fw.h"
+#include "sym_hipd.h"
+
+/*
+ * Set the status field of a CAM CCB.
+ */
+static inline void
+sym_set_cam_status(struct scsi_cmnd *cmd, int status)
+{
+ cmd->result &= ~(0xff << 16);
+ cmd->result |= (status << 16);
+}
+
+/*
+ * Get the status field of a CAM CCB.
+ */
+static inline int
+sym_get_cam_status(struct scsi_cmnd *cmd)
+{
+ return host_byte(cmd->result);
+}
+
+/*
+ * Build CAM result for a successful IO and for a failed IO.
+ */
+static inline void sym_set_cam_result_ok(struct sym_ccb *cp, struct scsi_cmnd *cmd, int resid)
+{
+ scsi_set_resid(cmd, resid);
+ cmd->result = (((DID_OK) << 16) + ((cp->ssss_status) & 0x7f));
+}
+void sym_set_cam_result_error(struct sym_hcb *np, struct sym_ccb *cp, int resid);
+
+void sym_xpt_done(struct sym_hcb *np, struct scsi_cmnd *ccb);
+#define sym_print_addr(cmd, arg...) dev_info(&cmd->device->sdev_gendev , ## arg)
+void sym_xpt_async_bus_reset(struct sym_hcb *np);
+int sym_setup_data_and_start (struct sym_hcb *np, struct scsi_cmnd *csio, struct sym_ccb *cp);
+void sym_log_bus_error(struct Scsi_Host *);
+void sym_dump_registers(struct Scsi_Host *);
+
+#endif /* SYM_GLUE_H */
diff --git a/drivers/scsi/sym53c8xx_2/sym_hipd.c b/drivers/scsi/sym53c8xx_2/sym_hipd.c
new file mode 100644
index 000000000..6b349e301
--- /dev/null
+++ b/drivers/scsi/sym53c8xx_2/sym_hipd.c
@@ -0,0 +1,5842 @@
+/*
+ * Device driver for the SYMBIOS/LSILOGIC 53C8XX and 53C1010 family
+ * of PCI-SCSI IO processors.
+ *
+ * Copyright (C) 1999-2001 Gerard Roudier <groudier@free.fr>
+ * Copyright (c) 2003-2005 Matthew Wilcox <matthew@wil.cx>
+ *
+ * This driver is derived from the Linux sym53c8xx driver.
+ * Copyright (C) 1998-2000 Gerard Roudier
+ *
+ * The sym53c8xx driver is derived from the ncr53c8xx driver that had been
+ * a port of the FreeBSD ncr driver to Linux-1.2.13.
+ *
+ * The original ncr driver has been written for 386bsd and FreeBSD by
+ * Wolfgang Stanglmeier <wolf@cologne.de>
+ * Stefan Esser <se@mi.Uni-Koeln.de>
+ * Copyright (C) 1994 Wolfgang Stanglmeier
+ *
+ * Other major contributions:
+ *
+ * NVRAM detection and reading.
+ * Copyright (C) 1997 Richard Waltham <dormouse@farsrobt.demon.co.uk>
+ *
+ *-----------------------------------------------------------------------------
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+
+#include <linux/slab.h>
+#include <asm/param.h> /* for timeouts in units of HZ */
+
+#include "sym_glue.h"
+#include "sym_nvram.h"
+
+#if 0
+#define SYM_DEBUG_GENERIC_SUPPORT
+#endif
+
+/*
+ * Needed function prototypes.
+ */
+static void sym_int_ma (struct sym_hcb *np);
+static void sym_int_sir(struct sym_hcb *);
+static struct sym_ccb *sym_alloc_ccb(struct sym_hcb *np);
+static struct sym_ccb *sym_ccb_from_dsa(struct sym_hcb *np, u32 dsa);
+static void sym_alloc_lcb_tags (struct sym_hcb *np, u_char tn, u_char ln);
+static void sym_complete_error (struct sym_hcb *np, struct sym_ccb *cp);
+static void sym_complete_ok (struct sym_hcb *np, struct sym_ccb *cp);
+static int sym_compute_residual(struct sym_hcb *np, struct sym_ccb *cp);
+
+/*
+ * Print a buffer in hexadecimal format with a ".\n" at end.
+ */
+static void sym_printl_hex(u_char *p, int n)
+{
+ while (n-- > 0)
+ printf (" %x", *p++);
+ printf (".\n");
+}
+
+static void sym_print_msg(struct sym_ccb *cp, char *label, u_char *msg)
+{
+ sym_print_addr(cp->cmd, "%s: ", label);
+
+ spi_print_msg(msg);
+ printf("\n");
+}
+
+static void sym_print_nego_msg(struct sym_hcb *np, int target, char *label, u_char *msg)
+{
+ struct sym_tcb *tp = &np->target[target];
+ dev_info(&tp->starget->dev, "%s: ", label);
+
+ spi_print_msg(msg);
+ printf("\n");
+}
+
+/*
+ * Print something that tells about extended errors.
+ */
+void sym_print_xerr(struct scsi_cmnd *cmd, int x_status)
+{
+ if (x_status & XE_PARITY_ERR) {
+ sym_print_addr(cmd, "unrecovered SCSI parity error.\n");
+ }
+ if (x_status & XE_EXTRA_DATA) {
+ sym_print_addr(cmd, "extraneous data discarded.\n");
+ }
+ if (x_status & XE_BAD_PHASE) {
+ sym_print_addr(cmd, "illegal scsi phase (4/5).\n");
+ }
+ if (x_status & XE_SODL_UNRUN) {
+ sym_print_addr(cmd, "ODD transfer in DATA OUT phase.\n");
+ }
+ if (x_status & XE_SWIDE_OVRUN) {
+ sym_print_addr(cmd, "ODD transfer in DATA IN phase.\n");
+ }
+}
+
+/*
+ * Return a string for SCSI BUS mode.
+ */
+static char *sym_scsi_bus_mode(int mode)
+{
+ switch(mode) {
+ case SMODE_HVD: return "HVD";
+ case SMODE_SE: return "SE";
+ case SMODE_LVD: return "LVD";
+ }
+ return "??";
+}
+
+/*
+ * Soft reset the chip.
+ *
+ * Raising SRST when the chip is running may cause
+ * problems on dual function chips (see below).
+ * On the other hand, LVD devices need some delay
+ * to settle and report actual BUS mode in STEST4.
+ */
+static void sym_chip_reset (struct sym_hcb *np)
+{
+ OUTB(np, nc_istat, SRST);
+ INB(np, nc_mbox1);
+ udelay(10);
+ OUTB(np, nc_istat, 0);
+ INB(np, nc_mbox1);
+ udelay(2000); /* For BUS MODE to settle */
+}
+
+/*
+ * Really soft reset the chip.:)
+ *
+ * Some 896 and 876 chip revisions may hang-up if we set
+ * the SRST (soft reset) bit at the wrong time when SCRIPTS
+ * are running.
+ * So, we need to abort the current operation prior to
+ * soft resetting the chip.
+ */
+static void sym_soft_reset (struct sym_hcb *np)
+{
+ u_char istat = 0;
+ int i;
+
+ if (!(np->features & FE_ISTAT1) || !(INB(np, nc_istat1) & SCRUN))
+ goto do_chip_reset;
+
+ OUTB(np, nc_istat, CABRT);
+ for (i = 100000 ; i ; --i) {
+ istat = INB(np, nc_istat);
+ if (istat & SIP) {
+ INW(np, nc_sist);
+ }
+ else if (istat & DIP) {
+ if (INB(np, nc_dstat) & ABRT)
+ break;
+ }
+ udelay(5);
+ }
+ OUTB(np, nc_istat, 0);
+ if (!i)
+ printf("%s: unable to abort current chip operation, "
+ "ISTAT=0x%02x.\n", sym_name(np), istat);
+do_chip_reset:
+ sym_chip_reset(np);
+}
+
+/*
+ * Start reset process.
+ *
+ * The interrupt handler will reinitialize the chip.
+ */
+static void sym_start_reset(struct sym_hcb *np)
+{
+ sym_reset_scsi_bus(np, 1);
+}
+
+int sym_reset_scsi_bus(struct sym_hcb *np, int enab_int)
+{
+ u32 term;
+ int retv = 0;
+
+ sym_soft_reset(np); /* Soft reset the chip */
+ if (enab_int)
+ OUTW(np, nc_sien, RST);
+ /*
+ * Enable Tolerant, reset IRQD if present and
+ * properly set IRQ mode, prior to resetting the bus.
+ */
+ OUTB(np, nc_stest3, TE);
+ OUTB(np, nc_dcntl, (np->rv_dcntl & IRQM));
+ OUTB(np, nc_scntl1, CRST);
+ INB(np, nc_mbox1);
+ udelay(200);
+
+ if (!SYM_SETUP_SCSI_BUS_CHECK)
+ goto out;
+ /*
+ * Check for no terminators or SCSI bus shorts to ground.
+ * Read SCSI data bus, data parity bits and control signals.
+ * We are expecting RESET to be TRUE and other signals to be
+ * FALSE.
+ */
+ term = INB(np, nc_sstat0);
+ term = ((term & 2) << 7) + ((term & 1) << 17); /* rst sdp0 */
+ term |= ((INB(np, nc_sstat2) & 0x01) << 26) | /* sdp1 */
+ ((INW(np, nc_sbdl) & 0xff) << 9) | /* d7-0 */
+ ((INW(np, nc_sbdl) & 0xff00) << 10) | /* d15-8 */
+ INB(np, nc_sbcl); /* req ack bsy sel atn msg cd io */
+
+ if (!np->maxwide)
+ term &= 0x3ffff;
+
+ if (term != (2<<7)) {
+ printf("%s: suspicious SCSI data while resetting the BUS.\n",
+ sym_name(np));
+ printf("%s: %sdp0,d7-0,rst,req,ack,bsy,sel,atn,msg,c/d,i/o = "
+ "0x%lx, expecting 0x%lx\n",
+ sym_name(np),
+ (np->features & FE_WIDE) ? "dp1,d15-8," : "",
+ (u_long)term, (u_long)(2<<7));
+ if (SYM_SETUP_SCSI_BUS_CHECK == 1)
+ retv = 1;
+ }
+out:
+ OUTB(np, nc_scntl1, 0);
+ return retv;
+}
+
+/*
+ * Select SCSI clock frequency
+ */
+static void sym_selectclock(struct sym_hcb *np, u_char scntl3)
+{
+ /*
+ * If multiplier not present or not selected, leave here.
+ */
+ if (np->multiplier <= 1) {
+ OUTB(np, nc_scntl3, scntl3);
+ return;
+ }
+
+ if (sym_verbose >= 2)
+ printf ("%s: enabling clock multiplier\n", sym_name(np));
+
+ OUTB(np, nc_stest1, DBLEN); /* Enable clock multiplier */
+ /*
+ * Wait for the LCKFRQ bit to be set if supported by the chip.
+ * Otherwise wait 50 micro-seconds (at least).
+ */
+ if (np->features & FE_LCKFRQ) {
+ int i = 20;
+ while (!(INB(np, nc_stest4) & LCKFRQ) && --i > 0)
+ udelay(20);
+ if (!i)
+ printf("%s: the chip cannot lock the frequency\n",
+ sym_name(np));
+ } else {
+ INB(np, nc_mbox1);
+ udelay(50+10);
+ }
+ OUTB(np, nc_stest3, HSC); /* Halt the scsi clock */
+ OUTB(np, nc_scntl3, scntl3);
+ OUTB(np, nc_stest1, (DBLEN|DBLSEL));/* Select clock multiplier */
+ OUTB(np, nc_stest3, 0x00); /* Restart scsi clock */
+}
+
+
+/*
+ * Determine the chip's clock frequency.
+ *
+ * This is essential for the negotiation of the synchronous
+ * transfer rate.
+ *
+ * Note: we have to return the correct value.
+ * THERE IS NO SAFE DEFAULT VALUE.
+ *
+ * Most NCR/SYMBIOS boards are delivered with a 40 Mhz clock.
+ * 53C860 and 53C875 rev. 1 support fast20 transfers but
+ * do not have a clock doubler and so are provided with a
+ * 80 MHz clock. All other fast20 boards incorporate a doubler
+ * and so should be delivered with a 40 MHz clock.
+ * The recent fast40 chips (895/896/895A/1010) use a 40 Mhz base
+ * clock and provide a clock quadrupler (160 Mhz).
+ */
+
+/*
+ * calculate SCSI clock frequency (in KHz)
+ */
+static unsigned getfreq (struct sym_hcb *np, int gen)
+{
+ unsigned int ms = 0;
+ unsigned int f;
+
+ /*
+ * Measure GEN timer delay in order
+ * to calculate SCSI clock frequency
+ *
+ * This code will never execute too
+ * many loop iterations (if DELAY is
+ * reasonably correct). It could get
+ * too low a delay (too high a freq.)
+ * if the CPU is slow executing the
+ * loop for some reason (an NMI, for
+ * example). For this reason we will
+ * if multiple measurements are to be
+ * performed trust the higher delay
+ * (lower frequency returned).
+ */
+ OUTW(np, nc_sien, 0); /* mask all scsi interrupts */
+ INW(np, nc_sist); /* clear pending scsi interrupt */
+ OUTB(np, nc_dien, 0); /* mask all dma interrupts */
+ INW(np, nc_sist); /* another one, just to be sure :) */
+ /*
+ * The C1010-33 core does not report GEN in SIST,
+ * if this interrupt is masked in SIEN.
+ * I don't know yet if the C1010-66 behaves the same way.
+ */
+ if (np->features & FE_C10) {
+ OUTW(np, nc_sien, GEN);
+ OUTB(np, nc_istat1, SIRQD);
+ }
+ OUTB(np, nc_scntl3, 4); /* set pre-scaler to divide by 3 */
+ OUTB(np, nc_stime1, 0); /* disable general purpose timer */
+ OUTB(np, nc_stime1, gen); /* set to nominal delay of 1<<gen * 125us */
+ while (!(INW(np, nc_sist) & GEN) && ms++ < 100000)
+ udelay(1000/4); /* count in 1/4 of ms */
+ OUTB(np, nc_stime1, 0); /* disable general purpose timer */
+ /*
+ * Undo C1010-33 specific settings.
+ */
+ if (np->features & FE_C10) {
+ OUTW(np, nc_sien, 0);
+ OUTB(np, nc_istat1, 0);
+ }
+ /*
+ * set prescaler to divide by whatever 0 means
+ * 0 ought to choose divide by 2, but appears
+ * to set divide by 3.5 mode in my 53c810 ...
+ */
+ OUTB(np, nc_scntl3, 0);
+
+ /*
+ * adjust for prescaler, and convert into KHz
+ */
+ f = ms ? ((1 << gen) * (4340*4)) / ms : 0;
+
+ /*
+ * The C1010-33 result is biased by a factor
+ * of 2/3 compared to earlier chips.
+ */
+ if (np->features & FE_C10)
+ f = (f * 2) / 3;
+
+ if (sym_verbose >= 2)
+ printf ("%s: Delay (GEN=%d): %u msec, %u KHz\n",
+ sym_name(np), gen, ms/4, f);
+
+ return f;
+}
+
+static unsigned sym_getfreq (struct sym_hcb *np)
+{
+ u_int f1, f2;
+ int gen = 8;
+
+ getfreq (np, gen); /* throw away first result */
+ f1 = getfreq (np, gen);
+ f2 = getfreq (np, gen);
+ if (f1 > f2) f1 = f2; /* trust lower result */
+ return f1;
+}
+
+/*
+ * Get/probe chip SCSI clock frequency
+ */
+static void sym_getclock (struct sym_hcb *np, int mult)
+{
+ unsigned char scntl3 = np->sv_scntl3;
+ unsigned char stest1 = np->sv_stest1;
+ unsigned f1;
+
+ np->multiplier = 1;
+ f1 = 40000;
+ /*
+ * True with 875/895/896/895A with clock multiplier selected
+ */
+ if (mult > 1 && (stest1 & (DBLEN+DBLSEL)) == DBLEN+DBLSEL) {
+ if (sym_verbose >= 2)
+ printf ("%s: clock multiplier found\n", sym_name(np));
+ np->multiplier = mult;
+ }
+
+ /*
+ * If multiplier not found or scntl3 not 7,5,3,
+ * reset chip and get frequency from general purpose timer.
+ * Otherwise trust scntl3 BIOS setting.
+ */
+ if (np->multiplier != mult || (scntl3 & 7) < 3 || !(scntl3 & 1)) {
+ OUTB(np, nc_stest1, 0); /* make sure doubler is OFF */
+ f1 = sym_getfreq (np);
+
+ if (sym_verbose)
+ printf ("%s: chip clock is %uKHz\n", sym_name(np), f1);
+
+ if (f1 < 45000) f1 = 40000;
+ else if (f1 < 55000) f1 = 50000;
+ else f1 = 80000;
+
+ if (f1 < 80000 && mult > 1) {
+ if (sym_verbose >= 2)
+ printf ("%s: clock multiplier assumed\n",
+ sym_name(np));
+ np->multiplier = mult;
+ }
+ } else {
+ if ((scntl3 & 7) == 3) f1 = 40000;
+ else if ((scntl3 & 7) == 5) f1 = 80000;
+ else f1 = 160000;
+
+ f1 /= np->multiplier;
+ }
+
+ /*
+ * Compute controller synchronous parameters.
+ */
+ f1 *= np->multiplier;
+ np->clock_khz = f1;
+}
+
+/*
+ * Get/probe PCI clock frequency
+ */
+static int sym_getpciclock (struct sym_hcb *np)
+{
+ int f = 0;
+
+ /*
+ * For now, we only need to know about the actual
+ * PCI BUS clock frequency for C1010-66 chips.
+ */
+#if 1
+ if (np->features & FE_66MHZ) {
+#else
+ if (1) {
+#endif
+ OUTB(np, nc_stest1, SCLK); /* Use the PCI clock as SCSI clock */
+ f = sym_getfreq(np);
+ OUTB(np, nc_stest1, 0);
+ }
+ np->pciclk_khz = f;
+
+ return f;
+}
+
+/*
+ * SYMBIOS chip clock divisor table.
+ *
+ * Divisors are multiplied by 10,000,000 in order to make
+ * calculations more simple.
+ */
+#define _5M 5000000
+static const u32 div_10M[] = {2*_5M, 3*_5M, 4*_5M, 6*_5M, 8*_5M, 12*_5M, 16*_5M};
+
+/*
+ * Get clock factor and sync divisor for a given
+ * synchronous factor period.
+ */
+static int
+sym_getsync(struct sym_hcb *np, u_char dt, u_char sfac, u_char *divp, u_char *fakp)
+{
+ u32 clk = np->clock_khz; /* SCSI clock frequency in kHz */
+ int div = np->clock_divn; /* Number of divisors supported */
+ u32 fak; /* Sync factor in sxfer */
+ u32 per; /* Period in tenths of ns */
+ u32 kpc; /* (per * clk) */
+ int ret;
+
+ /*
+ * Compute the synchronous period in tenths of nano-seconds
+ */
+ if (dt && sfac <= 9) per = 125;
+ else if (sfac <= 10) per = 250;
+ else if (sfac == 11) per = 303;
+ else if (sfac == 12) per = 500;
+ else per = 40 * sfac;
+ ret = per;
+
+ kpc = per * clk;
+ if (dt)
+ kpc <<= 1;
+
+ /*
+ * For earliest C10 revision 0, we cannot use extra
+ * clocks for the setting of the SCSI clocking.
+ * Note that this limits the lowest sync data transfer
+ * to 5 Mega-transfers per second and may result in
+ * using higher clock divisors.
+ */
+#if 1
+ if ((np->features & (FE_C10|FE_U3EN)) == FE_C10) {
+ /*
+ * Look for the lowest clock divisor that allows an
+ * output speed not faster than the period.
+ */
+ while (div > 0) {
+ --div;
+ if (kpc > (div_10M[div] << 2)) {
+ ++div;
+ break;
+ }
+ }
+ fak = 0; /* No extra clocks */
+ if (div == np->clock_divn) { /* Are we too fast ? */
+ ret = -1;
+ }
+ *divp = div;
+ *fakp = fak;
+ return ret;
+ }
+#endif
+
+ /*
+ * Look for the greatest clock divisor that allows an
+ * input speed faster than the period.
+ */
+ while (div-- > 0)
+ if (kpc >= (div_10M[div] << 2)) break;
+
+ /*
+ * Calculate the lowest clock factor that allows an output
+ * speed not faster than the period, and the max output speed.
+ * If fak >= 1 we will set both XCLKH_ST and XCLKH_DT.
+ * If fak >= 2 we will also set XCLKS_ST and XCLKS_DT.
+ */
+ if (dt) {
+ fak = (kpc - 1) / (div_10M[div] << 1) + 1 - 2;
+ /* ret = ((2+fak)*div_10M[div])/np->clock_khz; */
+ } else {
+ fak = (kpc - 1) / div_10M[div] + 1 - 4;
+ /* ret = ((4+fak)*div_10M[div])/np->clock_khz; */
+ }
+
+ /*
+ * Check against our hardware limits, or bugs :).
+ */
+ if (fak > 2) {
+ fak = 2;
+ ret = -1;
+ }
+
+ /*
+ * Compute and return sync parameters.
+ */
+ *divp = div;
+ *fakp = fak;
+
+ return ret;
+}
+
+/*
+ * SYMBIOS chips allow burst lengths of 2, 4, 8, 16, 32, 64,
+ * 128 transfers. All chips support at least 16 transfers
+ * bursts. The 825A, 875 and 895 chips support bursts of up
+ * to 128 transfers and the 895A and 896 support bursts of up
+ * to 64 transfers. All other chips support up to 16
+ * transfers bursts.
+ *
+ * For PCI 32 bit data transfers each transfer is a DWORD.
+ * It is a QUADWORD (8 bytes) for PCI 64 bit data transfers.
+ *
+ * We use log base 2 (burst length) as internal code, with
+ * value 0 meaning "burst disabled".
+ */
+
+/*
+ * Burst length from burst code.
+ */
+#define burst_length(bc) (!(bc))? 0 : 1 << (bc)
+
+/*
+ * Burst code from io register bits.
+ */
+#define burst_code(dmode, ctest4, ctest5) \
+ (ctest4) & 0x80? 0 : (((dmode) & 0xc0) >> 6) + ((ctest5) & 0x04) + 1
+
+/*
+ * Set initial io register bits from burst code.
+ */
+static inline void sym_init_burst(struct sym_hcb *np, u_char bc)
+{
+ np->rv_ctest4 &= ~0x80;
+ np->rv_dmode &= ~(0x3 << 6);
+ np->rv_ctest5 &= ~0x4;
+
+ if (!bc) {
+ np->rv_ctest4 |= 0x80;
+ }
+ else {
+ --bc;
+ np->rv_dmode |= ((bc & 0x3) << 6);
+ np->rv_ctest5 |= (bc & 0x4);
+ }
+}
+
+/*
+ * Save initial settings of some IO registers.
+ * Assumed to have been set by BIOS.
+ * We cannot reset the chip prior to reading the
+ * IO registers, since informations will be lost.
+ * Since the SCRIPTS processor may be running, this
+ * is not safe on paper, but it seems to work quite
+ * well. :)
+ */
+static void sym_save_initial_setting (struct sym_hcb *np)
+{
+ np->sv_scntl0 = INB(np, nc_scntl0) & 0x0a;
+ np->sv_scntl3 = INB(np, nc_scntl3) & 0x07;
+ np->sv_dmode = INB(np, nc_dmode) & 0xce;
+ np->sv_dcntl = INB(np, nc_dcntl) & 0xa8;
+ np->sv_ctest3 = INB(np, nc_ctest3) & 0x01;
+ np->sv_ctest4 = INB(np, nc_ctest4) & 0x80;
+ np->sv_gpcntl = INB(np, nc_gpcntl);
+ np->sv_stest1 = INB(np, nc_stest1);
+ np->sv_stest2 = INB(np, nc_stest2) & 0x20;
+ np->sv_stest4 = INB(np, nc_stest4);
+ if (np->features & FE_C10) { /* Always large DMA fifo + ultra3 */
+ np->sv_scntl4 = INB(np, nc_scntl4);
+ np->sv_ctest5 = INB(np, nc_ctest5) & 0x04;
+ }
+ else
+ np->sv_ctest5 = INB(np, nc_ctest5) & 0x24;
+}
+
+/*
+ * Set SCSI BUS mode.
+ * - LVD capable chips (895/895A/896/1010) report the current BUS mode
+ * through the STEST4 IO register.
+ * - For previous generation chips (825/825A/875), the user has to tell us
+ * how to check against HVD, since a 100% safe algorithm is not possible.
+ */
+static void sym_set_bus_mode(struct sym_hcb *np, struct sym_nvram *nvram)
+{
+ if (np->scsi_mode)
+ return;
+
+ np->scsi_mode = SMODE_SE;
+ if (np->features & (FE_ULTRA2|FE_ULTRA3))
+ np->scsi_mode = (np->sv_stest4 & SMODE);
+ else if (np->features & FE_DIFF) {
+ if (SYM_SETUP_SCSI_DIFF == 1) {
+ if (np->sv_scntl3) {
+ if (np->sv_stest2 & 0x20)
+ np->scsi_mode = SMODE_HVD;
+ } else if (nvram->type == SYM_SYMBIOS_NVRAM) {
+ if (!(INB(np, nc_gpreg) & 0x08))
+ np->scsi_mode = SMODE_HVD;
+ }
+ } else if (SYM_SETUP_SCSI_DIFF == 2)
+ np->scsi_mode = SMODE_HVD;
+ }
+ if (np->scsi_mode == SMODE_HVD)
+ np->rv_stest2 |= 0x20;
+}
+
+/*
+ * Prepare io register values used by sym_start_up()
+ * according to selected and supported features.
+ */
+static int sym_prepare_setting(struct Scsi_Host *shost, struct sym_hcb *np, struct sym_nvram *nvram)
+{
+ struct sym_data *sym_data = shost_priv(shost);
+ struct pci_dev *pdev = sym_data->pdev;
+ u_char burst_max;
+ u32 period;
+ int i;
+
+ np->maxwide = (np->features & FE_WIDE) ? 1 : 0;
+
+ /*
+ * Guess the frequency of the chip's clock.
+ */
+ if (np->features & (FE_ULTRA3 | FE_ULTRA2))
+ np->clock_khz = 160000;
+ else if (np->features & FE_ULTRA)
+ np->clock_khz = 80000;
+ else
+ np->clock_khz = 40000;
+
+ /*
+ * Get the clock multiplier factor.
+ */
+ if (np->features & FE_QUAD)
+ np->multiplier = 4;
+ else if (np->features & FE_DBLR)
+ np->multiplier = 2;
+ else
+ np->multiplier = 1;
+
+ /*
+ * Measure SCSI clock frequency for chips
+ * it may vary from assumed one.
+ */
+ if (np->features & FE_VARCLK)
+ sym_getclock(np, np->multiplier);
+
+ /*
+ * Divisor to be used for async (timer pre-scaler).
+ */
+ i = np->clock_divn - 1;
+ while (--i >= 0) {
+ if (10ul * SYM_CONF_MIN_ASYNC * np->clock_khz > div_10M[i]) {
+ ++i;
+ break;
+ }
+ }
+ np->rv_scntl3 = i+1;
+
+ /*
+ * The C1010 uses hardwired divisors for async.
+ * So, we just throw away, the async. divisor.:-)
+ */
+ if (np->features & FE_C10)
+ np->rv_scntl3 = 0;
+
+ /*
+ * Minimum synchronous period factor supported by the chip.
+ * Btw, 'period' is in tenths of nanoseconds.
+ */
+ period = (4 * div_10M[0] + np->clock_khz - 1) / np->clock_khz;
+
+ if (period <= 250) np->minsync = 10;
+ else if (period <= 303) np->minsync = 11;
+ else if (period <= 500) np->minsync = 12;
+ else np->minsync = (period + 40 - 1) / 40;
+
+ /*
+ * Check against chip SCSI standard support (SCSI-2,ULTRA,ULTRA2).
+ */
+ if (np->minsync < 25 &&
+ !(np->features & (FE_ULTRA|FE_ULTRA2|FE_ULTRA3)))
+ np->minsync = 25;
+ else if (np->minsync < 12 &&
+ !(np->features & (FE_ULTRA2|FE_ULTRA3)))
+ np->minsync = 12;
+
+ /*
+ * Maximum synchronous period factor supported by the chip.
+ */
+ period = (11 * div_10M[np->clock_divn - 1]) / (4 * np->clock_khz);
+ np->maxsync = period > 2540 ? 254 : period / 10;
+
+ /*
+ * If chip is a C1010, guess the sync limits in DT mode.
+ */
+ if ((np->features & (FE_C10|FE_ULTRA3)) == (FE_C10|FE_ULTRA3)) {
+ if (np->clock_khz == 160000) {
+ np->minsync_dt = 9;
+ np->maxsync_dt = 50;
+ np->maxoffs_dt = nvram->type ? 62 : 31;
+ }
+ }
+
+ /*
+ * 64 bit addressing (895A/896/1010) ?
+ */
+ if (np->features & FE_DAC) {
+ if (!use_dac(np))
+ np->rv_ccntl1 |= (DDAC);
+ else if (SYM_CONF_DMA_ADDRESSING_MODE == 1)
+ np->rv_ccntl1 |= (XTIMOD | EXTIBMV);
+ else if (SYM_CONF_DMA_ADDRESSING_MODE == 2)
+ np->rv_ccntl1 |= (0 | EXTIBMV);
+ }
+
+ /*
+ * Phase mismatch handled by SCRIPTS (895A/896/1010) ?
+ */
+ if (np->features & FE_NOPM)
+ np->rv_ccntl0 |= (ENPMJ);
+
+ /*
+ * C1010-33 Errata: Part Number:609-039638 (rev. 1) is fixed.
+ * In dual channel mode, contention occurs if internal cycles
+ * are used. Disable internal cycles.
+ */
+ if (pdev->device == PCI_DEVICE_ID_LSI_53C1010_33 &&
+ pdev->revision < 0x1)
+ np->rv_ccntl0 |= DILS;
+
+ /*
+ * Select burst length (dwords)
+ */
+ burst_max = SYM_SETUP_BURST_ORDER;
+ if (burst_max == 255)
+ burst_max = burst_code(np->sv_dmode, np->sv_ctest4,
+ np->sv_ctest5);
+ if (burst_max > 7)
+ burst_max = 7;
+ if (burst_max > np->maxburst)
+ burst_max = np->maxburst;
+
+ /*
+ * DEL 352 - 53C810 Rev x11 - Part Number 609-0392140 - ITEM 2.
+ * This chip and the 860 Rev 1 may wrongly use PCI cache line
+ * based transactions on LOAD/STORE instructions. So we have
+ * to prevent these chips from using such PCI transactions in
+ * this driver. The generic ncr driver that does not use
+ * LOAD/STORE instructions does not need this work-around.
+ */
+ if ((pdev->device == PCI_DEVICE_ID_NCR_53C810 &&
+ pdev->revision >= 0x10 && pdev->revision <= 0x11) ||
+ (pdev->device == PCI_DEVICE_ID_NCR_53C860 &&
+ pdev->revision <= 0x1))
+ np->features &= ~(FE_WRIE|FE_ERL|FE_ERMP);
+
+ /*
+ * Select all supported special features.
+ * If we are using on-board RAM for scripts, prefetch (PFEN)
+ * does not help, but burst op fetch (BOF) does.
+ * Disabling PFEN makes sure BOF will be used.
+ */
+ if (np->features & FE_ERL)
+ np->rv_dmode |= ERL; /* Enable Read Line */
+ if (np->features & FE_BOF)
+ np->rv_dmode |= BOF; /* Burst Opcode Fetch */
+ if (np->features & FE_ERMP)
+ np->rv_dmode |= ERMP; /* Enable Read Multiple */
+#if 1
+ if ((np->features & FE_PFEN) && !np->ram_ba)
+#else
+ if (np->features & FE_PFEN)
+#endif
+ np->rv_dcntl |= PFEN; /* Prefetch Enable */
+ if (np->features & FE_CLSE)
+ np->rv_dcntl |= CLSE; /* Cache Line Size Enable */
+ if (np->features & FE_WRIE)
+ np->rv_ctest3 |= WRIE; /* Write and Invalidate */
+ if (np->features & FE_DFS)
+ np->rv_ctest5 |= DFS; /* Dma Fifo Size */
+
+ /*
+ * Select some other
+ */
+ np->rv_ctest4 |= MPEE; /* Master parity checking */
+ np->rv_scntl0 |= 0x0a; /* full arb., ena parity, par->ATN */
+
+ /*
+ * Get parity checking, host ID and verbose mode from NVRAM
+ */
+ np->myaddr = 255;
+ np->scsi_mode = 0;
+ sym_nvram_setup_host(shost, np, nvram);
+
+ /*
+ * Get SCSI addr of host adapter (set by bios?).
+ */
+ if (np->myaddr == 255) {
+ np->myaddr = INB(np, nc_scid) & 0x07;
+ if (!np->myaddr)
+ np->myaddr = SYM_SETUP_HOST_ID;
+ }
+
+ /*
+ * Prepare initial io register bits for burst length
+ */
+ sym_init_burst(np, burst_max);
+
+ sym_set_bus_mode(np, nvram);
+
+ /*
+ * Set LED support from SCRIPTS.
+ * Ignore this feature for boards known to use a
+ * specific GPIO wiring and for the 895A, 896
+ * and 1010 that drive the LED directly.
+ */
+ if ((SYM_SETUP_SCSI_LED ||
+ (nvram->type == SYM_SYMBIOS_NVRAM ||
+ (nvram->type == SYM_TEKRAM_NVRAM &&
+ pdev->device == PCI_DEVICE_ID_NCR_53C895))) &&
+ !(np->features & FE_LEDC) && !(np->sv_gpcntl & 0x01))
+ np->features |= FE_LED0;
+
+ /*
+ * Set irq mode.
+ */
+ switch(SYM_SETUP_IRQ_MODE & 3) {
+ case 2:
+ np->rv_dcntl |= IRQM;
+ break;
+ case 1:
+ np->rv_dcntl |= (np->sv_dcntl & IRQM);
+ break;
+ default:
+ break;
+ }
+
+ /*
+ * Configure targets according to driver setup.
+ * If NVRAM present get targets setup from NVRAM.
+ */
+ for (i = 0 ; i < SYM_CONF_MAX_TARGET ; i++) {
+ struct sym_tcb *tp = &np->target[i];
+
+ tp->usrflags |= (SYM_DISC_ENABLED | SYM_TAGS_ENABLED);
+ tp->usrtags = SYM_SETUP_MAX_TAG;
+ tp->usr_width = np->maxwide;
+ tp->usr_period = 9;
+
+ sym_nvram_setup_target(tp, i, nvram);
+
+ if (!tp->usrtags)
+ tp->usrflags &= ~SYM_TAGS_ENABLED;
+ }
+
+ /*
+ * Let user know about the settings.
+ */
+ printf("%s: %s, ID %d, Fast-%d, %s, %s\n", sym_name(np),
+ sym_nvram_type(nvram), np->myaddr,
+ (np->features & FE_ULTRA3) ? 80 :
+ (np->features & FE_ULTRA2) ? 40 :
+ (np->features & FE_ULTRA) ? 20 : 10,
+ sym_scsi_bus_mode(np->scsi_mode),
+ (np->rv_scntl0 & 0xa) ? "parity checking" : "NO parity");
+ /*
+ * Tell him more on demand.
+ */
+ if (sym_verbose) {
+ printf("%s: %s IRQ line driver%s\n",
+ sym_name(np),
+ np->rv_dcntl & IRQM ? "totem pole" : "open drain",
+ np->ram_ba ? ", using on-chip SRAM" : "");
+ printf("%s: using %s firmware.\n", sym_name(np), np->fw_name);
+ if (np->features & FE_NOPM)
+ printf("%s: handling phase mismatch from SCRIPTS.\n",
+ sym_name(np));
+ }
+ /*
+ * And still more.
+ */
+ if (sym_verbose >= 2) {
+ printf ("%s: initial SCNTL3/DMODE/DCNTL/CTEST3/4/5 = "
+ "(hex) %02x/%02x/%02x/%02x/%02x/%02x\n",
+ sym_name(np), np->sv_scntl3, np->sv_dmode, np->sv_dcntl,
+ np->sv_ctest3, np->sv_ctest4, np->sv_ctest5);
+
+ printf ("%s: final SCNTL3/DMODE/DCNTL/CTEST3/4/5 = "
+ "(hex) %02x/%02x/%02x/%02x/%02x/%02x\n",
+ sym_name(np), np->rv_scntl3, np->rv_dmode, np->rv_dcntl,
+ np->rv_ctest3, np->rv_ctest4, np->rv_ctest5);
+ }
+
+ return 0;
+}
+
+/*
+ * Test the pci bus snoop logic :-(
+ *
+ * Has to be called with interrupts disabled.
+ */
+#ifdef CONFIG_SCSI_SYM53C8XX_MMIO
+static int sym_regtest(struct sym_hcb *np)
+{
+ register volatile u32 data;
+ /*
+ * chip registers may NOT be cached.
+ * write 0xffffffff to a read only register area,
+ * and try to read it back.
+ */
+ data = 0xffffffff;
+ OUTL(np, nc_dstat, data);
+ data = INL(np, nc_dstat);
+#if 1
+ if (data == 0xffffffff) {
+#else
+ if ((data & 0xe2f0fffd) != 0x02000080) {
+#endif
+ printf ("CACHE TEST FAILED: reg dstat-sstat2 readback %x.\n",
+ (unsigned) data);
+ return 0x10;
+ }
+ return 0;
+}
+#else
+static inline int sym_regtest(struct sym_hcb *np)
+{
+ return 0;
+}
+#endif
+
+static int sym_snooptest(struct sym_hcb *np)
+{
+ u32 sym_rd, sym_wr, sym_bk, host_rd, host_wr, pc, dstat;
+ int i, err;
+
+ err = sym_regtest(np);
+ if (err)
+ return err;
+restart_test:
+ /*
+ * Enable Master Parity Checking as we intend
+ * to enable it for normal operations.
+ */
+ OUTB(np, nc_ctest4, (np->rv_ctest4 & MPEE));
+ /*
+ * init
+ */
+ pc = SCRIPTZ_BA(np, snooptest);
+ host_wr = 1;
+ sym_wr = 2;
+ /*
+ * Set memory and register.
+ */
+ np->scratch = cpu_to_scr(host_wr);
+ OUTL(np, nc_temp, sym_wr);
+ /*
+ * Start script (exchange values)
+ */
+ OUTL(np, nc_dsa, np->hcb_ba);
+ OUTL_DSP(np, pc);
+ /*
+ * Wait 'til done (with timeout)
+ */
+ for (i=0; i<SYM_SNOOP_TIMEOUT; i++)
+ if (INB(np, nc_istat) & (INTF|SIP|DIP))
+ break;
+ if (i>=SYM_SNOOP_TIMEOUT) {
+ printf ("CACHE TEST FAILED: timeout.\n");
+ return (0x20);
+ }
+ /*
+ * Check for fatal DMA errors.
+ */
+ dstat = INB(np, nc_dstat);
+#if 1 /* Band aiding for broken hardwares that fail PCI parity */
+ if ((dstat & MDPE) && (np->rv_ctest4 & MPEE)) {
+ printf ("%s: PCI DATA PARITY ERROR DETECTED - "
+ "DISABLING MASTER DATA PARITY CHECKING.\n",
+ sym_name(np));
+ np->rv_ctest4 &= ~MPEE;
+ goto restart_test;
+ }
+#endif
+ if (dstat & (MDPE|BF|IID)) {
+ printf ("CACHE TEST FAILED: DMA error (dstat=0x%02x).", dstat);
+ return (0x80);
+ }
+ /*
+ * Save termination position.
+ */
+ pc = INL(np, nc_dsp);
+ /*
+ * Read memory and register.
+ */
+ host_rd = scr_to_cpu(np->scratch);
+ sym_rd = INL(np, nc_scratcha);
+ sym_bk = INL(np, nc_temp);
+ /*
+ * Check termination position.
+ */
+ if (pc != SCRIPTZ_BA(np, snoopend)+8) {
+ printf ("CACHE TEST FAILED: script execution failed.\n");
+ printf ("start=%08lx, pc=%08lx, end=%08lx\n",
+ (u_long) SCRIPTZ_BA(np, snooptest), (u_long) pc,
+ (u_long) SCRIPTZ_BA(np, snoopend) +8);
+ return (0x40);
+ }
+ /*
+ * Show results.
+ */
+ if (host_wr != sym_rd) {
+ printf ("CACHE TEST FAILED: host wrote %d, chip read %d.\n",
+ (int) host_wr, (int) sym_rd);
+ err |= 1;
+ }
+ if (host_rd != sym_wr) {
+ printf ("CACHE TEST FAILED: chip wrote %d, host read %d.\n",
+ (int) sym_wr, (int) host_rd);
+ err |= 2;
+ }
+ if (sym_bk != sym_wr) {
+ printf ("CACHE TEST FAILED: chip wrote %d, read back %d.\n",
+ (int) sym_wr, (int) sym_bk);
+ err |= 4;
+ }
+
+ return err;
+}
+
+/*
+ * log message for real hard errors
+ *
+ * sym0 targ 0?: ERROR (ds:si) (so-si-sd) (sx/s3/s4) @ name (dsp:dbc).
+ * reg: r0 r1 r2 r3 r4 r5 r6 ..... rf.
+ *
+ * exception register:
+ * ds: dstat
+ * si: sist
+ *
+ * SCSI bus lines:
+ * so: control lines as driven by chip.
+ * si: control lines as seen by chip.
+ * sd: scsi data lines as seen by chip.
+ *
+ * wide/fastmode:
+ * sx: sxfer (see the manual)
+ * s3: scntl3 (see the manual)
+ * s4: scntl4 (see the manual)
+ *
+ * current script command:
+ * dsp: script address (relative to start of script).
+ * dbc: first word of script command.
+ *
+ * First 24 register of the chip:
+ * r0..rf
+ */
+static void sym_log_hard_error(struct Scsi_Host *shost, u_short sist, u_char dstat)
+{
+ struct sym_hcb *np = sym_get_hcb(shost);
+ u32 dsp;
+ int script_ofs;
+ int script_size;
+ char *script_name;
+ u_char *script_base;
+ int i;
+
+ dsp = INL(np, nc_dsp);
+
+ if (dsp > np->scripta_ba &&
+ dsp <= np->scripta_ba + np->scripta_sz) {
+ script_ofs = dsp - np->scripta_ba;
+ script_size = np->scripta_sz;
+ script_base = (u_char *) np->scripta0;
+ script_name = "scripta";
+ }
+ else if (np->scriptb_ba < dsp &&
+ dsp <= np->scriptb_ba + np->scriptb_sz) {
+ script_ofs = dsp - np->scriptb_ba;
+ script_size = np->scriptb_sz;
+ script_base = (u_char *) np->scriptb0;
+ script_name = "scriptb";
+ } else {
+ script_ofs = dsp;
+ script_size = 0;
+ script_base = NULL;
+ script_name = "mem";
+ }
+
+ printf ("%s:%d: ERROR (%x:%x) (%x-%x-%x) (%x/%x/%x) @ (%s %x:%08x).\n",
+ sym_name(np), (unsigned)INB(np, nc_sdid)&0x0f, dstat, sist,
+ (unsigned)INB(np, nc_socl), (unsigned)INB(np, nc_sbcl),
+ (unsigned)INB(np, nc_sbdl), (unsigned)INB(np, nc_sxfer),
+ (unsigned)INB(np, nc_scntl3),
+ (np->features & FE_C10) ? (unsigned)INB(np, nc_scntl4) : 0,
+ script_name, script_ofs, (unsigned)INL(np, nc_dbc));
+
+ if (((script_ofs & 3) == 0) &&
+ (unsigned)script_ofs < script_size) {
+ printf ("%s: script cmd = %08x\n", sym_name(np),
+ scr_to_cpu((int) *(u32 *)(script_base + script_ofs)));
+ }
+
+ printf("%s: regdump:", sym_name(np));
+ for (i = 0; i < 24; i++)
+ printf(" %02x", (unsigned)INB_OFF(np, i));
+ printf(".\n");
+
+ /*
+ * PCI BUS error.
+ */
+ if (dstat & (MDPE|BF))
+ sym_log_bus_error(shost);
+}
+
+void sym_dump_registers(struct Scsi_Host *shost)
+{
+ struct sym_hcb *np = sym_get_hcb(shost);
+ u_short sist;
+ u_char dstat;
+
+ sist = INW(np, nc_sist);
+ dstat = INB(np, nc_dstat);
+ sym_log_hard_error(shost, sist, dstat);
+}
+
+static struct sym_chip sym_dev_table[] = {
+ {PCI_DEVICE_ID_NCR_53C810, 0x0f, "810", 4, 8, 4, 64,
+ FE_ERL}
+ ,
+#ifdef SYM_DEBUG_GENERIC_SUPPORT
+ {PCI_DEVICE_ID_NCR_53C810, 0xff, "810a", 4, 8, 4, 1,
+ FE_BOF}
+ ,
+#else
+ {PCI_DEVICE_ID_NCR_53C810, 0xff, "810a", 4, 8, 4, 1,
+ FE_CACHE_SET|FE_LDSTR|FE_PFEN|FE_BOF}
+ ,
+#endif
+ {PCI_DEVICE_ID_NCR_53C815, 0xff, "815", 4, 8, 4, 64,
+ FE_BOF|FE_ERL}
+ ,
+ {PCI_DEVICE_ID_NCR_53C825, 0x0f, "825", 6, 8, 4, 64,
+ FE_WIDE|FE_BOF|FE_ERL|FE_DIFF}
+ ,
+ {PCI_DEVICE_ID_NCR_53C825, 0xff, "825a", 6, 8, 4, 2,
+ FE_WIDE|FE_CACHE0_SET|FE_BOF|FE_DFS|FE_LDSTR|FE_PFEN|FE_RAM|FE_DIFF}
+ ,
+ {PCI_DEVICE_ID_NCR_53C860, 0xff, "860", 4, 8, 5, 1,
+ FE_ULTRA|FE_CACHE_SET|FE_BOF|FE_LDSTR|FE_PFEN}
+ ,
+ {PCI_DEVICE_ID_NCR_53C875, 0x01, "875", 6, 16, 5, 2,
+ FE_WIDE|FE_ULTRA|FE_CACHE0_SET|FE_BOF|FE_DFS|FE_LDSTR|FE_PFEN|
+ FE_RAM|FE_DIFF|FE_VARCLK}
+ ,
+ {PCI_DEVICE_ID_NCR_53C875, 0xff, "875", 6, 16, 5, 2,
+ FE_WIDE|FE_ULTRA|FE_DBLR|FE_CACHE0_SET|FE_BOF|FE_DFS|FE_LDSTR|FE_PFEN|
+ FE_RAM|FE_DIFF|FE_VARCLK}
+ ,
+ {PCI_DEVICE_ID_NCR_53C875J, 0xff, "875J", 6, 16, 5, 2,
+ FE_WIDE|FE_ULTRA|FE_DBLR|FE_CACHE0_SET|FE_BOF|FE_DFS|FE_LDSTR|FE_PFEN|
+ FE_RAM|FE_DIFF|FE_VARCLK}
+ ,
+ {PCI_DEVICE_ID_NCR_53C885, 0xff, "885", 6, 16, 5, 2,
+ FE_WIDE|FE_ULTRA|FE_DBLR|FE_CACHE0_SET|FE_BOF|FE_DFS|FE_LDSTR|FE_PFEN|
+ FE_RAM|FE_DIFF|FE_VARCLK}
+ ,
+#ifdef SYM_DEBUG_GENERIC_SUPPORT
+ {PCI_DEVICE_ID_NCR_53C895, 0xff, "895", 6, 31, 7, 2,
+ FE_WIDE|FE_ULTRA2|FE_QUAD|FE_CACHE_SET|FE_BOF|FE_DFS|
+ FE_RAM|FE_LCKFRQ}
+ ,
+#else
+ {PCI_DEVICE_ID_NCR_53C895, 0xff, "895", 6, 31, 7, 2,
+ FE_WIDE|FE_ULTRA2|FE_QUAD|FE_CACHE_SET|FE_BOF|FE_DFS|FE_LDSTR|FE_PFEN|
+ FE_RAM|FE_LCKFRQ}
+ ,
+#endif
+ {PCI_DEVICE_ID_NCR_53C896, 0xff, "896", 6, 31, 7, 4,
+ FE_WIDE|FE_ULTRA2|FE_QUAD|FE_CACHE_SET|FE_BOF|FE_DFS|FE_LDSTR|FE_PFEN|
+ FE_RAM|FE_RAM8K|FE_64BIT|FE_DAC|FE_IO256|FE_NOPM|FE_LEDC|FE_LCKFRQ}
+ ,
+ {PCI_DEVICE_ID_LSI_53C895A, 0xff, "895a", 6, 31, 7, 4,
+ FE_WIDE|FE_ULTRA2|FE_QUAD|FE_CACHE_SET|FE_BOF|FE_DFS|FE_LDSTR|FE_PFEN|
+ FE_RAM|FE_RAM8K|FE_DAC|FE_IO256|FE_NOPM|FE_LEDC|FE_LCKFRQ}
+ ,
+ {PCI_DEVICE_ID_LSI_53C875A, 0xff, "875a", 6, 31, 7, 4,
+ FE_WIDE|FE_ULTRA|FE_QUAD|FE_CACHE_SET|FE_BOF|FE_DFS|FE_LDSTR|FE_PFEN|
+ FE_RAM|FE_DAC|FE_IO256|FE_NOPM|FE_LEDC|FE_LCKFRQ}
+ ,
+ {PCI_DEVICE_ID_LSI_53C1010_33, 0x00, "1010-33", 6, 31, 7, 8,
+ FE_WIDE|FE_ULTRA3|FE_QUAD|FE_CACHE_SET|FE_BOF|FE_DFBC|FE_LDSTR|FE_PFEN|
+ FE_RAM|FE_RAM8K|FE_64BIT|FE_DAC|FE_IO256|FE_NOPM|FE_LEDC|FE_CRC|
+ FE_C10}
+ ,
+ {PCI_DEVICE_ID_LSI_53C1010_33, 0xff, "1010-33", 6, 31, 7, 8,
+ FE_WIDE|FE_ULTRA3|FE_QUAD|FE_CACHE_SET|FE_BOF|FE_DFBC|FE_LDSTR|FE_PFEN|
+ FE_RAM|FE_RAM8K|FE_64BIT|FE_DAC|FE_IO256|FE_NOPM|FE_LEDC|FE_CRC|
+ FE_C10|FE_U3EN}
+ ,
+ {PCI_DEVICE_ID_LSI_53C1010_66, 0xff, "1010-66", 6, 31, 7, 8,
+ FE_WIDE|FE_ULTRA3|FE_QUAD|FE_CACHE_SET|FE_BOF|FE_DFBC|FE_LDSTR|FE_PFEN|
+ FE_RAM|FE_RAM8K|FE_64BIT|FE_DAC|FE_IO256|FE_NOPM|FE_LEDC|FE_66MHZ|FE_CRC|
+ FE_C10|FE_U3EN}
+ ,
+ {PCI_DEVICE_ID_LSI_53C1510, 0xff, "1510d", 6, 31, 7, 4,
+ FE_WIDE|FE_ULTRA2|FE_QUAD|FE_CACHE_SET|FE_BOF|FE_DFS|FE_LDSTR|FE_PFEN|
+ FE_RAM|FE_IO256|FE_LEDC}
+};
+
+#define sym_num_devs (ARRAY_SIZE(sym_dev_table))
+
+/*
+ * Look up the chip table.
+ *
+ * Return a pointer to the chip entry if found,
+ * zero otherwise.
+ */
+struct sym_chip *
+sym_lookup_chip_table (u_short device_id, u_char revision)
+{
+ struct sym_chip *chip;
+ int i;
+
+ for (i = 0; i < sym_num_devs; i++) {
+ chip = &sym_dev_table[i];
+ if (device_id != chip->device_id)
+ continue;
+ if (revision > chip->revision_id)
+ continue;
+ return chip;
+ }
+
+ return NULL;
+}
+
+#if SYM_CONF_DMA_ADDRESSING_MODE == 2
+/*
+ * Lookup the 64 bit DMA segments map.
+ * This is only used if the direct mapping
+ * has been unsuccessful.
+ */
+int sym_lookup_dmap(struct sym_hcb *np, u32 h, int s)
+{
+ int i;
+
+ if (!use_dac(np))
+ goto weird;
+
+ /* Look up existing mappings */
+ for (i = SYM_DMAP_SIZE-1; i > 0; i--) {
+ if (h == np->dmap_bah[i])
+ return i;
+ }
+ /* If direct mapping is free, get it */
+ if (!np->dmap_bah[s])
+ goto new;
+ /* Collision -> lookup free mappings */
+ for (s = SYM_DMAP_SIZE-1; s > 0; s--) {
+ if (!np->dmap_bah[s])
+ goto new;
+ }
+weird:
+ panic("sym: ran out of 64 bit DMA segment registers");
+ return -1;
+new:
+ np->dmap_bah[s] = h;
+ np->dmap_dirty = 1;
+ return s;
+}
+
+/*
+ * Update IO registers scratch C..R so they will be
+ * in sync. with queued CCB expectations.
+ */
+static void sym_update_dmap_regs(struct sym_hcb *np)
+{
+ int o, i;
+
+ if (!np->dmap_dirty)
+ return;
+ o = offsetof(struct sym_reg, nc_scrx[0]);
+ for (i = 0; i < SYM_DMAP_SIZE; i++) {
+ OUTL_OFF(np, o, np->dmap_bah[i]);
+ o += 4;
+ }
+ np->dmap_dirty = 0;
+}
+#endif
+
+/* Enforce all the fiddly SPI rules and the chip limitations */
+static void sym_check_goals(struct sym_hcb *np, struct scsi_target *starget,
+ struct sym_trans *goal)
+{
+ if (!spi_support_wide(starget))
+ goal->width = 0;
+
+ if (!spi_support_sync(starget)) {
+ goal->iu = 0;
+ goal->dt = 0;
+ goal->qas = 0;
+ goal->offset = 0;
+ return;
+ }
+
+ if (spi_support_dt(starget)) {
+ if (spi_support_dt_only(starget))
+ goal->dt = 1;
+
+ if (goal->offset == 0)
+ goal->dt = 0;
+ } else {
+ goal->dt = 0;
+ }
+
+ /* Some targets fail to properly negotiate DT in SE mode */
+ if ((np->scsi_mode != SMODE_LVD) || !(np->features & FE_U3EN))
+ goal->dt = 0;
+
+ if (goal->dt) {
+ /* all DT transfers must be wide */
+ goal->width = 1;
+ if (goal->offset > np->maxoffs_dt)
+ goal->offset = np->maxoffs_dt;
+ if (goal->period < np->minsync_dt)
+ goal->period = np->minsync_dt;
+ if (goal->period > np->maxsync_dt)
+ goal->period = np->maxsync_dt;
+ } else {
+ goal->iu = goal->qas = 0;
+ if (goal->offset > np->maxoffs)
+ goal->offset = np->maxoffs;
+ if (goal->period < np->minsync)
+ goal->period = np->minsync;
+ if (goal->period > np->maxsync)
+ goal->period = np->maxsync;
+ }
+}
+
+/*
+ * Prepare the next negotiation message if needed.
+ *
+ * Fill in the part of message buffer that contains the
+ * negotiation and the nego_status field of the CCB.
+ * Returns the size of the message in bytes.
+ */
+static int sym_prepare_nego(struct sym_hcb *np, struct sym_ccb *cp, u_char *msgptr)
+{
+ struct sym_tcb *tp = &np->target[cp->target];
+ struct scsi_target *starget = tp->starget;
+ struct sym_trans *goal = &tp->tgoal;
+ int msglen = 0;
+ int nego;
+
+ sym_check_goals(np, starget, goal);
+
+ /*
+ * Many devices implement PPR in a buggy way, so only use it if we
+ * really want to.
+ */
+ if (goal->renego == NS_PPR || (goal->offset &&
+ (goal->iu || goal->dt || goal->qas || (goal->period < 0xa)))) {
+ nego = NS_PPR;
+ } else if (goal->renego == NS_WIDE || goal->width) {
+ nego = NS_WIDE;
+ } else if (goal->renego == NS_SYNC || goal->offset) {
+ nego = NS_SYNC;
+ } else {
+ goal->check_nego = 0;
+ nego = 0;
+ }
+
+ switch (nego) {
+ case NS_SYNC:
+ msglen += spi_populate_sync_msg(msgptr + msglen, goal->period,
+ goal->offset);
+ break;
+ case NS_WIDE:
+ msglen += spi_populate_width_msg(msgptr + msglen, goal->width);
+ break;
+ case NS_PPR:
+ msglen += spi_populate_ppr_msg(msgptr + msglen, goal->period,
+ goal->offset, goal->width,
+ (goal->iu ? PPR_OPT_IU : 0) |
+ (goal->dt ? PPR_OPT_DT : 0) |
+ (goal->qas ? PPR_OPT_QAS : 0));
+ break;
+ }
+
+ cp->nego_status = nego;
+
+ if (nego) {
+ tp->nego_cp = cp; /* Keep track a nego will be performed */
+ if (DEBUG_FLAGS & DEBUG_NEGO) {
+ sym_print_nego_msg(np, cp->target,
+ nego == NS_SYNC ? "sync msgout" :
+ nego == NS_WIDE ? "wide msgout" :
+ "ppr msgout", msgptr);
+ }
+ }
+
+ return msglen;
+}
+
+/*
+ * Insert a job into the start queue.
+ */
+void sym_put_start_queue(struct sym_hcb *np, struct sym_ccb *cp)
+{
+ u_short qidx;
+
+#ifdef SYM_CONF_IARB_SUPPORT
+ /*
+ * If the previously queued CCB is not yet done,
+ * set the IARB hint. The SCRIPTS will go with IARB
+ * for this job when starting the previous one.
+ * We leave devices a chance to win arbitration by
+ * not using more than 'iarb_max' consecutive
+ * immediate arbitrations.
+ */
+ if (np->last_cp && np->iarb_count < np->iarb_max) {
+ np->last_cp->host_flags |= HF_HINT_IARB;
+ ++np->iarb_count;
+ }
+ else
+ np->iarb_count = 0;
+ np->last_cp = cp;
+#endif
+
+#if SYM_CONF_DMA_ADDRESSING_MODE == 2
+ /*
+ * Make SCRIPTS aware of the 64 bit DMA
+ * segment registers not being up-to-date.
+ */
+ if (np->dmap_dirty)
+ cp->host_xflags |= HX_DMAP_DIRTY;
+#endif
+
+ /*
+ * Insert first the idle task and then our job.
+ * The MBs should ensure proper ordering.
+ */
+ qidx = np->squeueput + 2;
+ if (qidx >= MAX_QUEUE*2) qidx = 0;
+
+ np->squeue [qidx] = cpu_to_scr(np->idletask_ba);
+ MEMORY_WRITE_BARRIER();
+ np->squeue [np->squeueput] = cpu_to_scr(cp->ccb_ba);
+
+ np->squeueput = qidx;
+
+ if (DEBUG_FLAGS & DEBUG_QUEUE)
+ scmd_printk(KERN_DEBUG, cp->cmd, "queuepos=%d\n",
+ np->squeueput);
+
+ /*
+ * Script processor may be waiting for reselect.
+ * Wake it up.
+ */
+ MEMORY_WRITE_BARRIER();
+ OUTB(np, nc_istat, SIGP|np->istat_sem);
+}
+
+#ifdef SYM_OPT_HANDLE_DEVICE_QUEUEING
+/*
+ * Start next ready-to-start CCBs.
+ */
+void sym_start_next_ccbs(struct sym_hcb *np, struct sym_lcb *lp, int maxn)
+{
+ SYM_QUEHEAD *qp;
+ struct sym_ccb *cp;
+
+ /*
+ * Paranoia, as usual. :-)
+ */
+ assert(!lp->started_tags || !lp->started_no_tag);
+
+ /*
+ * Try to start as many commands as asked by caller.
+ * Prevent from having both tagged and untagged
+ * commands queued to the device at the same time.
+ */
+ while (maxn--) {
+ qp = sym_remque_head(&lp->waiting_ccbq);
+ if (!qp)
+ break;
+ cp = sym_que_entry(qp, struct sym_ccb, link2_ccbq);
+ if (cp->tag != NO_TAG) {
+ if (lp->started_no_tag ||
+ lp->started_tags >= lp->started_max) {
+ sym_insque_head(qp, &lp->waiting_ccbq);
+ break;
+ }
+ lp->itlq_tbl[cp->tag] = cpu_to_scr(cp->ccb_ba);
+ lp->head.resel_sa =
+ cpu_to_scr(SCRIPTA_BA(np, resel_tag));
+ ++lp->started_tags;
+ } else {
+ if (lp->started_no_tag || lp->started_tags) {
+ sym_insque_head(qp, &lp->waiting_ccbq);
+ break;
+ }
+ lp->head.itl_task_sa = cpu_to_scr(cp->ccb_ba);
+ lp->head.resel_sa =
+ cpu_to_scr(SCRIPTA_BA(np, resel_no_tag));
+ ++lp->started_no_tag;
+ }
+ cp->started = 1;
+ sym_insque_tail(qp, &lp->started_ccbq);
+ sym_put_start_queue(np, cp);
+ }
+}
+#endif /* SYM_OPT_HANDLE_DEVICE_QUEUEING */
+
+/*
+ * The chip may have completed jobs. Look at the DONE QUEUE.
+ *
+ * On paper, memory read barriers may be needed here to
+ * prevent out of order LOADs by the CPU from having
+ * prefetched stale data prior to DMA having occurred.
+ */
+static int sym_wakeup_done (struct sym_hcb *np)
+{
+ struct sym_ccb *cp;
+ int i, n;
+ u32 dsa;
+
+ n = 0;
+ i = np->dqueueget;
+
+ /* MEMORY_READ_BARRIER(); */
+ while (1) {
+ dsa = scr_to_cpu(np->dqueue[i]);
+ if (!dsa)
+ break;
+ np->dqueue[i] = 0;
+ if ((i = i+2) >= MAX_QUEUE*2)
+ i = 0;
+
+ cp = sym_ccb_from_dsa(np, dsa);
+ if (cp) {
+ MEMORY_READ_BARRIER();
+ sym_complete_ok (np, cp);
+ ++n;
+ }
+ else
+ printf ("%s: bad DSA (%x) in done queue.\n",
+ sym_name(np), (u_int) dsa);
+ }
+ np->dqueueget = i;
+
+ return n;
+}
+
+/*
+ * Complete all CCBs queued to the COMP queue.
+ *
+ * These CCBs are assumed:
+ * - Not to be referenced either by devices or
+ * SCRIPTS-related queues and datas.
+ * - To have to be completed with an error condition
+ * or requeued.
+ *
+ * The device queue freeze count is incremented
+ * for each CCB that does not prevent this.
+ * This function is called when all CCBs involved
+ * in error handling/recovery have been reaped.
+ */
+static void sym_flush_comp_queue(struct sym_hcb *np, int cam_status)
+{
+ SYM_QUEHEAD *qp;
+ struct sym_ccb *cp;
+
+ while ((qp = sym_remque_head(&np->comp_ccbq)) != NULL) {
+ struct scsi_cmnd *cmd;
+ cp = sym_que_entry(qp, struct sym_ccb, link_ccbq);
+ sym_insque_tail(&cp->link_ccbq, &np->busy_ccbq);
+ /* Leave quiet CCBs waiting for resources */
+ if (cp->host_status == HS_WAIT)
+ continue;
+ cmd = cp->cmd;
+ if (cam_status)
+ sym_set_cam_status(cmd, cam_status);
+#ifdef SYM_OPT_HANDLE_DEVICE_QUEUEING
+ if (sym_get_cam_status(cmd) == DID_SOFT_ERROR) {
+ struct sym_tcb *tp = &np->target[cp->target];
+ struct sym_lcb *lp = sym_lp(tp, cp->lun);
+ if (lp) {
+ sym_remque(&cp->link2_ccbq);
+ sym_insque_tail(&cp->link2_ccbq,
+ &lp->waiting_ccbq);
+ if (cp->started) {
+ if (cp->tag != NO_TAG)
+ --lp->started_tags;
+ else
+ --lp->started_no_tag;
+ }
+ }
+ cp->started = 0;
+ continue;
+ }
+#endif
+ sym_free_ccb(np, cp);
+ sym_xpt_done(np, cmd);
+ }
+}
+
+/*
+ * Complete all active CCBs with error.
+ * Used on CHIP/SCSI RESET.
+ */
+static void sym_flush_busy_queue (struct sym_hcb *np, int cam_status)
+{
+ /*
+ * Move all active CCBs to the COMP queue
+ * and flush this queue.
+ */
+ sym_que_splice(&np->busy_ccbq, &np->comp_ccbq);
+ sym_que_init(&np->busy_ccbq);
+ sym_flush_comp_queue(np, cam_status);
+}
+
+/*
+ * Start chip.
+ *
+ * 'reason' means:
+ * 0: initialisation.
+ * 1: SCSI BUS RESET delivered or received.
+ * 2: SCSI BUS MODE changed.
+ */
+void sym_start_up(struct Scsi_Host *shost, int reason)
+{
+ struct sym_data *sym_data = shost_priv(shost);
+ struct pci_dev *pdev = sym_data->pdev;
+ struct sym_hcb *np = sym_data->ncb;
+ int i;
+ u32 phys;
+
+ /*
+ * Reset chip if asked, otherwise just clear fifos.
+ */
+ if (reason == 1)
+ sym_soft_reset(np);
+ else {
+ OUTB(np, nc_stest3, TE|CSF);
+ OUTONB(np, nc_ctest3, CLF);
+ }
+
+ /*
+ * Clear Start Queue
+ */
+ phys = np->squeue_ba;
+ for (i = 0; i < MAX_QUEUE*2; i += 2) {
+ np->squeue[i] = cpu_to_scr(np->idletask_ba);
+ np->squeue[i+1] = cpu_to_scr(phys + (i+2)*4);
+ }
+ np->squeue[MAX_QUEUE*2-1] = cpu_to_scr(phys);
+
+ /*
+ * Start at first entry.
+ */
+ np->squeueput = 0;
+
+ /*
+ * Clear Done Queue
+ */
+ phys = np->dqueue_ba;
+ for (i = 0; i < MAX_QUEUE*2; i += 2) {
+ np->dqueue[i] = 0;
+ np->dqueue[i+1] = cpu_to_scr(phys + (i+2)*4);
+ }
+ np->dqueue[MAX_QUEUE*2-1] = cpu_to_scr(phys);
+
+ /*
+ * Start at first entry.
+ */
+ np->dqueueget = 0;
+
+ /*
+ * Install patches in scripts.
+ * This also let point to first position the start
+ * and done queue pointers used from SCRIPTS.
+ */
+ np->fw_patch(shost);
+
+ /*
+ * Wakeup all pending jobs.
+ */
+ sym_flush_busy_queue(np, DID_RESET);
+
+ /*
+ * Init chip.
+ */
+ OUTB(np, nc_istat, 0x00); /* Remove Reset, abort */
+ INB(np, nc_mbox1);
+ udelay(2000); /* The 895 needs time for the bus mode to settle */
+
+ OUTB(np, nc_scntl0, np->rv_scntl0 | 0xc0);
+ /* full arb., ena parity, par->ATN */
+ OUTB(np, nc_scntl1, 0x00); /* odd parity, and remove CRST!! */
+
+ sym_selectclock(np, np->rv_scntl3); /* Select SCSI clock */
+
+ OUTB(np, nc_scid , RRE|np->myaddr); /* Adapter SCSI address */
+ OUTW(np, nc_respid, 1ul<<np->myaddr); /* Id to respond to */
+ OUTB(np, nc_istat , SIGP ); /* Signal Process */
+ OUTB(np, nc_dmode , np->rv_dmode); /* Burst length, dma mode */
+ OUTB(np, nc_ctest5, np->rv_ctest5); /* Large fifo + large burst */
+
+ OUTB(np, nc_dcntl , NOCOM|np->rv_dcntl); /* Protect SFBR */
+ OUTB(np, nc_ctest3, np->rv_ctest3); /* Write and invalidate */
+ OUTB(np, nc_ctest4, np->rv_ctest4); /* Master parity checking */
+
+ /* Extended Sreq/Sack filtering not supported on the C10 */
+ if (np->features & FE_C10)
+ OUTB(np, nc_stest2, np->rv_stest2);
+ else
+ OUTB(np, nc_stest2, EXT|np->rv_stest2);
+
+ OUTB(np, nc_stest3, TE); /* TolerANT enable */
+ OUTB(np, nc_stime0, 0x0c); /* HTH disabled STO 0.25 sec */
+
+ /*
+ * For now, disable AIP generation on C1010-66.
+ */
+ if (pdev->device == PCI_DEVICE_ID_LSI_53C1010_66)
+ OUTB(np, nc_aipcntl1, DISAIP);
+
+ /*
+ * C10101 rev. 0 errata.
+ * Errant SGE's when in narrow. Write bits 4 & 5 of
+ * STEST1 register to disable SGE. We probably should do
+ * that from SCRIPTS for each selection/reselection, but
+ * I just don't want. :)
+ */
+ if (pdev->device == PCI_DEVICE_ID_LSI_53C1010_33 &&
+ pdev->revision < 1)
+ OUTB(np, nc_stest1, INB(np, nc_stest1) | 0x30);
+
+ /*
+ * DEL 441 - 53C876 Rev 5 - Part Number 609-0392787/2788 - ITEM 2.
+ * Disable overlapped arbitration for some dual function devices,
+ * regardless revision id (kind of post-chip-design feature. ;-))
+ */
+ if (pdev->device == PCI_DEVICE_ID_NCR_53C875)
+ OUTB(np, nc_ctest0, (1<<5));
+ else if (pdev->device == PCI_DEVICE_ID_NCR_53C896)
+ np->rv_ccntl0 |= DPR;
+
+ /*
+ * Write CCNTL0/CCNTL1 for chips capable of 64 bit addressing
+ * and/or hardware phase mismatch, since only such chips
+ * seem to support those IO registers.
+ */
+ if (np->features & (FE_DAC|FE_NOPM)) {
+ OUTB(np, nc_ccntl0, np->rv_ccntl0);
+ OUTB(np, nc_ccntl1, np->rv_ccntl1);
+ }
+
+#if SYM_CONF_DMA_ADDRESSING_MODE == 2
+ /*
+ * Set up scratch C and DRS IO registers to map the 32 bit
+ * DMA address range our data structures are located in.
+ */
+ if (use_dac(np)) {
+ np->dmap_bah[0] = 0; /* ??? */
+ OUTL(np, nc_scrx[0], np->dmap_bah[0]);
+ OUTL(np, nc_drs, np->dmap_bah[0]);
+ }
+#endif
+
+ /*
+ * If phase mismatch handled by scripts (895A/896/1010),
+ * set PM jump addresses.
+ */
+ if (np->features & FE_NOPM) {
+ OUTL(np, nc_pmjad1, SCRIPTB_BA(np, pm_handle));
+ OUTL(np, nc_pmjad2, SCRIPTB_BA(np, pm_handle));
+ }
+
+ /*
+ * Enable GPIO0 pin for writing if LED support from SCRIPTS.
+ * Also set GPIO5 and clear GPIO6 if hardware LED control.
+ */
+ if (np->features & FE_LED0)
+ OUTB(np, nc_gpcntl, INB(np, nc_gpcntl) & ~0x01);
+ else if (np->features & FE_LEDC)
+ OUTB(np, nc_gpcntl, (INB(np, nc_gpcntl) & ~0x41) | 0x20);
+
+ /*
+ * enable ints
+ */
+ OUTW(np, nc_sien , STO|HTH|MA|SGE|UDC|RST|PAR);
+ OUTB(np, nc_dien , MDPE|BF|SSI|SIR|IID);
+
+ /*
+ * For 895/6 enable SBMC interrupt and save current SCSI bus mode.
+ * Try to eat the spurious SBMC interrupt that may occur when
+ * we reset the chip but not the SCSI BUS (at initialization).
+ */
+ if (np->features & (FE_ULTRA2|FE_ULTRA3)) {
+ OUTONW(np, nc_sien, SBMC);
+ if (reason == 0) {
+ INB(np, nc_mbox1);
+ mdelay(100);
+ INW(np, nc_sist);
+ }
+ np->scsi_mode = INB(np, nc_stest4) & SMODE;
+ }
+
+ /*
+ * Fill in target structure.
+ * Reinitialize usrsync.
+ * Reinitialize usrwide.
+ * Prepare sync negotiation according to actual SCSI bus mode.
+ */
+ for (i=0;i<SYM_CONF_MAX_TARGET;i++) {
+ struct sym_tcb *tp = &np->target[i];
+
+ tp->to_reset = 0;
+ tp->head.sval = 0;
+ tp->head.wval = np->rv_scntl3;
+ tp->head.uval = 0;
+ if (tp->lun0p)
+ tp->lun0p->to_clear = 0;
+ if (tp->lunmp) {
+ int ln;
+
+ for (ln = 1; ln < SYM_CONF_MAX_LUN; ln++)
+ if (tp->lunmp[ln])
+ tp->lunmp[ln]->to_clear = 0;
+ }
+ }
+
+ /*
+ * Download SCSI SCRIPTS to on-chip RAM if present,
+ * and start script processor.
+ * We do the download preferently from the CPU.
+ * For platforms that may not support PCI memory mapping,
+ * we use simple SCRIPTS that performs MEMORY MOVEs.
+ */
+ phys = SCRIPTA_BA(np, init);
+ if (np->ram_ba) {
+ if (sym_verbose >= 2)
+ printf("%s: Downloading SCSI SCRIPTS.\n", sym_name(np));
+ memcpy_toio(np->s.ramaddr, np->scripta0, np->scripta_sz);
+ if (np->features & FE_RAM8K) {
+ memcpy_toio(np->s.ramaddr + 4096, np->scriptb0, np->scriptb_sz);
+ phys = scr_to_cpu(np->scr_ram_seg);
+ OUTL(np, nc_mmws, phys);
+ OUTL(np, nc_mmrs, phys);
+ OUTL(np, nc_sfs, phys);
+ phys = SCRIPTB_BA(np, start64);
+ }
+ }
+
+ np->istat_sem = 0;
+
+ OUTL(np, nc_dsa, np->hcb_ba);
+ OUTL_DSP(np, phys);
+
+ /*
+ * Notify the XPT about the RESET condition.
+ */
+ if (reason != 0)
+ sym_xpt_async_bus_reset(np);
+}
+
+/*
+ * Switch trans mode for current job and its target.
+ */
+static void sym_settrans(struct sym_hcb *np, int target, u_char opts, u_char ofs,
+ u_char per, u_char wide, u_char div, u_char fak)
+{
+ SYM_QUEHEAD *qp;
+ u_char sval, wval, uval;
+ struct sym_tcb *tp = &np->target[target];
+
+ assert(target == (INB(np, nc_sdid) & 0x0f));
+
+ sval = tp->head.sval;
+ wval = tp->head.wval;
+ uval = tp->head.uval;
+
+#if 0
+ printf("XXXX sval=%x wval=%x uval=%x (%x)\n",
+ sval, wval, uval, np->rv_scntl3);
+#endif
+ /*
+ * Set the offset.
+ */
+ if (!(np->features & FE_C10))
+ sval = (sval & ~0x1f) | ofs;
+ else
+ sval = (sval & ~0x3f) | ofs;
+
+ /*
+ * Set the sync divisor and extra clock factor.
+ */
+ if (ofs != 0) {
+ wval = (wval & ~0x70) | ((div+1) << 4);
+ if (!(np->features & FE_C10))
+ sval = (sval & ~0xe0) | (fak << 5);
+ else {
+ uval = uval & ~(XCLKH_ST|XCLKH_DT|XCLKS_ST|XCLKS_DT);
+ if (fak >= 1) uval |= (XCLKH_ST|XCLKH_DT);
+ if (fak >= 2) uval |= (XCLKS_ST|XCLKS_DT);
+ }
+ }
+
+ /*
+ * Set the bus width.
+ */
+ wval = wval & ~EWS;
+ if (wide != 0)
+ wval |= EWS;
+
+ /*
+ * Set misc. ultra enable bits.
+ */
+ if (np->features & FE_C10) {
+ uval = uval & ~(U3EN|AIPCKEN);
+ if (opts) {
+ assert(np->features & FE_U3EN);
+ uval |= U3EN;
+ }
+ } else {
+ wval = wval & ~ULTRA;
+ if (per <= 12) wval |= ULTRA;
+ }
+
+ /*
+ * Stop there if sync parameters are unchanged.
+ */
+ if (tp->head.sval == sval &&
+ tp->head.wval == wval &&
+ tp->head.uval == uval)
+ return;
+ tp->head.sval = sval;
+ tp->head.wval = wval;
+ tp->head.uval = uval;
+
+ /*
+ * Disable extended Sreq/Sack filtering if per < 50.
+ * Not supported on the C1010.
+ */
+ if (per < 50 && !(np->features & FE_C10))
+ OUTOFFB(np, nc_stest2, EXT);
+
+ /*
+ * set actual value and sync_status
+ */
+ OUTB(np, nc_sxfer, tp->head.sval);
+ OUTB(np, nc_scntl3, tp->head.wval);
+
+ if (np->features & FE_C10) {
+ OUTB(np, nc_scntl4, tp->head.uval);
+ }
+
+ /*
+ * patch ALL busy ccbs of this target.
+ */
+ FOR_EACH_QUEUED_ELEMENT(&np->busy_ccbq, qp) {
+ struct sym_ccb *cp;
+ cp = sym_que_entry(qp, struct sym_ccb, link_ccbq);
+ if (cp->target != target)
+ continue;
+ cp->phys.select.sel_scntl3 = tp->head.wval;
+ cp->phys.select.sel_sxfer = tp->head.sval;
+ if (np->features & FE_C10) {
+ cp->phys.select.sel_scntl4 = tp->head.uval;
+ }
+ }
+}
+
+static void sym_announce_transfer_rate(struct sym_tcb *tp)
+{
+ struct scsi_target *starget = tp->starget;
+
+ if (tp->tprint.period != spi_period(starget) ||
+ tp->tprint.offset != spi_offset(starget) ||
+ tp->tprint.width != spi_width(starget) ||
+ tp->tprint.iu != spi_iu(starget) ||
+ tp->tprint.dt != spi_dt(starget) ||
+ tp->tprint.qas != spi_qas(starget) ||
+ !tp->tprint.check_nego) {
+ tp->tprint.period = spi_period(starget);
+ tp->tprint.offset = spi_offset(starget);
+ tp->tprint.width = spi_width(starget);
+ tp->tprint.iu = spi_iu(starget);
+ tp->tprint.dt = spi_dt(starget);
+ tp->tprint.qas = spi_qas(starget);
+ tp->tprint.check_nego = 1;
+
+ spi_display_xfer_agreement(starget);
+ }
+}
+
+/*
+ * We received a WDTR.
+ * Let everything be aware of the changes.
+ */
+static void sym_setwide(struct sym_hcb *np, int target, u_char wide)
+{
+ struct sym_tcb *tp = &np->target[target];
+ struct scsi_target *starget = tp->starget;
+
+ sym_settrans(np, target, 0, 0, 0, wide, 0, 0);
+
+ if (wide)
+ tp->tgoal.renego = NS_WIDE;
+ else
+ tp->tgoal.renego = 0;
+ tp->tgoal.check_nego = 0;
+ tp->tgoal.width = wide;
+ spi_offset(starget) = 0;
+ spi_period(starget) = 0;
+ spi_width(starget) = wide;
+ spi_iu(starget) = 0;
+ spi_dt(starget) = 0;
+ spi_qas(starget) = 0;
+
+ if (sym_verbose >= 3)
+ sym_announce_transfer_rate(tp);
+}
+
+/*
+ * We received a SDTR.
+ * Let everything be aware of the changes.
+ */
+static void
+sym_setsync(struct sym_hcb *np, int target,
+ u_char ofs, u_char per, u_char div, u_char fak)
+{
+ struct sym_tcb *tp = &np->target[target];
+ struct scsi_target *starget = tp->starget;
+ u_char wide = (tp->head.wval & EWS) ? BUS_16_BIT : BUS_8_BIT;
+
+ sym_settrans(np, target, 0, ofs, per, wide, div, fak);
+
+ if (wide)
+ tp->tgoal.renego = NS_WIDE;
+ else if (ofs)
+ tp->tgoal.renego = NS_SYNC;
+ else
+ tp->tgoal.renego = 0;
+ spi_period(starget) = per;
+ spi_offset(starget) = ofs;
+ spi_iu(starget) = spi_dt(starget) = spi_qas(starget) = 0;
+
+ if (!tp->tgoal.dt && !tp->tgoal.iu && !tp->tgoal.qas) {
+ tp->tgoal.period = per;
+ tp->tgoal.offset = ofs;
+ tp->tgoal.check_nego = 0;
+ }
+
+ sym_announce_transfer_rate(tp);
+}
+
+/*
+ * We received a PPR.
+ * Let everything be aware of the changes.
+ */
+static void
+sym_setpprot(struct sym_hcb *np, int target, u_char opts, u_char ofs,
+ u_char per, u_char wide, u_char div, u_char fak)
+{
+ struct sym_tcb *tp = &np->target[target];
+ struct scsi_target *starget = tp->starget;
+
+ sym_settrans(np, target, opts, ofs, per, wide, div, fak);
+
+ if (wide || ofs)
+ tp->tgoal.renego = NS_PPR;
+ else
+ tp->tgoal.renego = 0;
+ spi_width(starget) = tp->tgoal.width = wide;
+ spi_period(starget) = tp->tgoal.period = per;
+ spi_offset(starget) = tp->tgoal.offset = ofs;
+ spi_iu(starget) = tp->tgoal.iu = !!(opts & PPR_OPT_IU);
+ spi_dt(starget) = tp->tgoal.dt = !!(opts & PPR_OPT_DT);
+ spi_qas(starget) = tp->tgoal.qas = !!(opts & PPR_OPT_QAS);
+ tp->tgoal.check_nego = 0;
+
+ sym_announce_transfer_rate(tp);
+}
+
+/*
+ * generic recovery from scsi interrupt
+ *
+ * The doc says that when the chip gets an SCSI interrupt,
+ * it tries to stop in an orderly fashion, by completing
+ * an instruction fetch that had started or by flushing
+ * the DMA fifo for a write to memory that was executing.
+ * Such a fashion is not enough to know if the instruction
+ * that was just before the current DSP value has been
+ * executed or not.
+ *
+ * There are some small SCRIPTS sections that deal with
+ * the start queue and the done queue that may break any
+ * assomption from the C code if we are interrupted
+ * inside, so we reset if this happens. Btw, since these
+ * SCRIPTS sections are executed while the SCRIPTS hasn't
+ * started SCSI operations, it is very unlikely to happen.
+ *
+ * All the driver data structures are supposed to be
+ * allocated from the same 4 GB memory window, so there
+ * is a 1 to 1 relationship between DSA and driver data
+ * structures. Since we are careful :) to invalidate the
+ * DSA when we complete a command or when the SCRIPTS
+ * pushes a DSA into a queue, we can trust it when it
+ * points to a CCB.
+ */
+static void sym_recover_scsi_int (struct sym_hcb *np, u_char hsts)
+{
+ u32 dsp = INL(np, nc_dsp);
+ u32 dsa = INL(np, nc_dsa);
+ struct sym_ccb *cp = sym_ccb_from_dsa(np, dsa);
+
+ /*
+ * If we haven't been interrupted inside the SCRIPTS
+ * critical pathes, we can safely restart the SCRIPTS
+ * and trust the DSA value if it matches a CCB.
+ */
+ if ((!(dsp > SCRIPTA_BA(np, getjob_begin) &&
+ dsp < SCRIPTA_BA(np, getjob_end) + 1)) &&
+ (!(dsp > SCRIPTA_BA(np, ungetjob) &&
+ dsp < SCRIPTA_BA(np, reselect) + 1)) &&
+ (!(dsp > SCRIPTB_BA(np, sel_for_abort) &&
+ dsp < SCRIPTB_BA(np, sel_for_abort_1) + 1)) &&
+ (!(dsp > SCRIPTA_BA(np, done) &&
+ dsp < SCRIPTA_BA(np, done_end) + 1))) {
+ OUTB(np, nc_ctest3, np->rv_ctest3 | CLF); /* clear dma fifo */
+ OUTB(np, nc_stest3, TE|CSF); /* clear scsi fifo */
+ /*
+ * If we have a CCB, let the SCRIPTS call us back for
+ * the handling of the error with SCRATCHA filled with
+ * STARTPOS. This way, we will be able to freeze the
+ * device queue and requeue awaiting IOs.
+ */
+ if (cp) {
+ cp->host_status = hsts;
+ OUTL_DSP(np, SCRIPTA_BA(np, complete_error));
+ }
+ /*
+ * Otherwise just restart the SCRIPTS.
+ */
+ else {
+ OUTL(np, nc_dsa, 0xffffff);
+ OUTL_DSP(np, SCRIPTA_BA(np, start));
+ }
+ }
+ else
+ goto reset_all;
+
+ return;
+
+reset_all:
+ sym_start_reset(np);
+}
+
+/*
+ * chip exception handler for selection timeout
+ */
+static void sym_int_sto (struct sym_hcb *np)
+{
+ u32 dsp = INL(np, nc_dsp);
+
+ if (DEBUG_FLAGS & DEBUG_TINY) printf ("T");
+
+ if (dsp == SCRIPTA_BA(np, wf_sel_done) + 8)
+ sym_recover_scsi_int(np, HS_SEL_TIMEOUT);
+ else
+ sym_start_reset(np);
+}
+
+/*
+ * chip exception handler for unexpected disconnect
+ */
+static void sym_int_udc (struct sym_hcb *np)
+{
+ printf ("%s: unexpected disconnect\n", sym_name(np));
+ sym_recover_scsi_int(np, HS_UNEXPECTED);
+}
+
+/*
+ * chip exception handler for SCSI bus mode change
+ *
+ * spi2-r12 11.2.3 says a transceiver mode change must
+ * generate a reset event and a device that detects a reset
+ * event shall initiate a hard reset. It says also that a
+ * device that detects a mode change shall set data transfer
+ * mode to eight bit asynchronous, etc...
+ * So, just reinitializing all except chip should be enough.
+ */
+static void sym_int_sbmc(struct Scsi_Host *shost)
+{
+ struct sym_hcb *np = sym_get_hcb(shost);
+ u_char scsi_mode = INB(np, nc_stest4) & SMODE;
+
+ /*
+ * Notify user.
+ */
+ printf("%s: SCSI BUS mode change from %s to %s.\n", sym_name(np),
+ sym_scsi_bus_mode(np->scsi_mode), sym_scsi_bus_mode(scsi_mode));
+
+ /*
+ * Should suspend command processing for a few seconds and
+ * reinitialize all except the chip.
+ */
+ sym_start_up(shost, 2);
+}
+
+/*
+ * chip exception handler for SCSI parity error.
+ *
+ * When the chip detects a SCSI parity error and is
+ * currently executing a (CH)MOV instruction, it does
+ * not interrupt immediately, but tries to finish the
+ * transfer of the current scatter entry before
+ * interrupting. The following situations may occur:
+ *
+ * - The complete scatter entry has been transferred
+ * without the device having changed phase.
+ * The chip will then interrupt with the DSP pointing
+ * to the instruction that follows the MOV.
+ *
+ * - A phase mismatch occurs before the MOV finished
+ * and phase errors are to be handled by the C code.
+ * The chip will then interrupt with both PAR and MA
+ * conditions set.
+ *
+ * - A phase mismatch occurs before the MOV finished and
+ * phase errors are to be handled by SCRIPTS.
+ * The chip will load the DSP with the phase mismatch
+ * JUMP address and interrupt the host processor.
+ */
+static void sym_int_par (struct sym_hcb *np, u_short sist)
+{
+ u_char hsts = INB(np, HS_PRT);
+ u32 dsp = INL(np, nc_dsp);
+ u32 dbc = INL(np, nc_dbc);
+ u32 dsa = INL(np, nc_dsa);
+ u_char sbcl = INB(np, nc_sbcl);
+ u_char cmd = dbc >> 24;
+ int phase = cmd & 7;
+ struct sym_ccb *cp = sym_ccb_from_dsa(np, dsa);
+
+ if (printk_ratelimit())
+ printf("%s: SCSI parity error detected: SCR1=%d DBC=%x SBCL=%x\n",
+ sym_name(np), hsts, dbc, sbcl);
+
+ /*
+ * Check that the chip is connected to the SCSI BUS.
+ */
+ if (!(INB(np, nc_scntl1) & ISCON)) {
+ sym_recover_scsi_int(np, HS_UNEXPECTED);
+ return;
+ }
+
+ /*
+ * If the nexus is not clearly identified, reset the bus.
+ * We will try to do better later.
+ */
+ if (!cp)
+ goto reset_all;
+
+ /*
+ * Check instruction was a MOV, direction was INPUT and
+ * ATN is asserted.
+ */
+ if ((cmd & 0xc0) || !(phase & 1) || !(sbcl & 0x8))
+ goto reset_all;
+
+ /*
+ * Keep track of the parity error.
+ */
+ OUTONB(np, HF_PRT, HF_EXT_ERR);
+ cp->xerr_status |= XE_PARITY_ERR;
+
+ /*
+ * Prepare the message to send to the device.
+ */
+ np->msgout[0] = (phase == 7) ? M_PARITY : M_ID_ERROR;
+
+ /*
+ * If the old phase was DATA IN phase, we have to deal with
+ * the 3 situations described above.
+ * For other input phases (MSG IN and STATUS), the device
+ * must resend the whole thing that failed parity checking
+ * or signal error. So, jumping to dispatcher should be OK.
+ */
+ if (phase == 1 || phase == 5) {
+ /* Phase mismatch handled by SCRIPTS */
+ if (dsp == SCRIPTB_BA(np, pm_handle))
+ OUTL_DSP(np, dsp);
+ /* Phase mismatch handled by the C code */
+ else if (sist & MA)
+ sym_int_ma (np);
+ /* No phase mismatch occurred */
+ else {
+ sym_set_script_dp (np, cp, dsp);
+ OUTL_DSP(np, SCRIPTA_BA(np, dispatch));
+ }
+ }
+ else if (phase == 7) /* We definitely cannot handle parity errors */
+#if 1 /* in message-in phase due to the relection */
+ goto reset_all; /* path and various message anticipations. */
+#else
+ OUTL_DSP(np, SCRIPTA_BA(np, clrack));
+#endif
+ else
+ OUTL_DSP(np, SCRIPTA_BA(np, dispatch));
+ return;
+
+reset_all:
+ sym_start_reset(np);
+ return;
+}
+
+/*
+ * chip exception handler for phase errors.
+ *
+ * We have to construct a new transfer descriptor,
+ * to transfer the rest of the current block.
+ */
+static void sym_int_ma (struct sym_hcb *np)
+{
+ u32 dbc;
+ u32 rest;
+ u32 dsp;
+ u32 dsa;
+ u32 nxtdsp;
+ u32 *vdsp;
+ u32 oadr, olen;
+ u32 *tblp;
+ u32 newcmd;
+ u_int delta;
+ u_char cmd;
+ u_char hflags, hflags0;
+ struct sym_pmc *pm;
+ struct sym_ccb *cp;
+
+ dsp = INL(np, nc_dsp);
+ dbc = INL(np, nc_dbc);
+ dsa = INL(np, nc_dsa);
+
+ cmd = dbc >> 24;
+ rest = dbc & 0xffffff;
+ delta = 0;
+
+ /*
+ * locate matching cp if any.
+ */
+ cp = sym_ccb_from_dsa(np, dsa);
+
+ /*
+ * Donnot take into account dma fifo and various buffers in
+ * INPUT phase since the chip flushes everything before
+ * raising the MA interrupt for interrupted INPUT phases.
+ * For DATA IN phase, we will check for the SWIDE later.
+ */
+ if ((cmd & 7) != 1 && (cmd & 7) != 5) {
+ u_char ss0, ss2;
+
+ if (np->features & FE_DFBC)
+ delta = INW(np, nc_dfbc);
+ else {
+ u32 dfifo;
+
+ /*
+ * Read DFIFO, CTEST[4-6] using 1 PCI bus ownership.
+ */
+ dfifo = INL(np, nc_dfifo);
+
+ /*
+ * Calculate remaining bytes in DMA fifo.
+ * (CTEST5 = dfifo >> 16)
+ */
+ if (dfifo & (DFS << 16))
+ delta = ((((dfifo >> 8) & 0x300) |
+ (dfifo & 0xff)) - rest) & 0x3ff;
+ else
+ delta = ((dfifo & 0xff) - rest) & 0x7f;
+ }
+
+ /*
+ * The data in the dma fifo has not been transferred to
+ * the target -> add the amount to the rest
+ * and clear the data.
+ * Check the sstat2 register in case of wide transfer.
+ */
+ rest += delta;
+ ss0 = INB(np, nc_sstat0);
+ if (ss0 & OLF) rest++;
+ if (!(np->features & FE_C10))
+ if (ss0 & ORF) rest++;
+ if (cp && (cp->phys.select.sel_scntl3 & EWS)) {
+ ss2 = INB(np, nc_sstat2);
+ if (ss2 & OLF1) rest++;
+ if (!(np->features & FE_C10))
+ if (ss2 & ORF1) rest++;
+ }
+
+ /*
+ * Clear fifos.
+ */
+ OUTB(np, nc_ctest3, np->rv_ctest3 | CLF); /* dma fifo */
+ OUTB(np, nc_stest3, TE|CSF); /* scsi fifo */
+ }
+
+ /*
+ * log the information
+ */
+ if (DEBUG_FLAGS & (DEBUG_TINY|DEBUG_PHASE))
+ printf ("P%x%x RL=%d D=%d ", cmd&7, INB(np, nc_sbcl)&7,
+ (unsigned) rest, (unsigned) delta);
+
+ /*
+ * try to find the interrupted script command,
+ * and the address at which to continue.
+ */
+ vdsp = NULL;
+ nxtdsp = 0;
+ if (dsp > np->scripta_ba &&
+ dsp <= np->scripta_ba + np->scripta_sz) {
+ vdsp = (u32 *)((char*)np->scripta0 + (dsp-np->scripta_ba-8));
+ nxtdsp = dsp;
+ }
+ else if (dsp > np->scriptb_ba &&
+ dsp <= np->scriptb_ba + np->scriptb_sz) {
+ vdsp = (u32 *)((char*)np->scriptb0 + (dsp-np->scriptb_ba-8));
+ nxtdsp = dsp;
+ }
+
+ /*
+ * log the information
+ */
+ if (DEBUG_FLAGS & DEBUG_PHASE) {
+ printf ("\nCP=%p DSP=%x NXT=%x VDSP=%p CMD=%x ",
+ cp, (unsigned)dsp, (unsigned)nxtdsp, vdsp, cmd);
+ }
+
+ if (!vdsp) {
+ printf ("%s: interrupted SCRIPT address not found.\n",
+ sym_name (np));
+ goto reset_all;
+ }
+
+ if (!cp) {
+ printf ("%s: SCSI phase error fixup: CCB already dequeued.\n",
+ sym_name (np));
+ goto reset_all;
+ }
+
+ /*
+ * get old startaddress and old length.
+ */
+ oadr = scr_to_cpu(vdsp[1]);
+
+ if (cmd & 0x10) { /* Table indirect */
+ tblp = (u32 *) ((char*) &cp->phys + oadr);
+ olen = scr_to_cpu(tblp[0]);
+ oadr = scr_to_cpu(tblp[1]);
+ } else {
+ tblp = (u32 *) 0;
+ olen = scr_to_cpu(vdsp[0]) & 0xffffff;
+ }
+
+ if (DEBUG_FLAGS & DEBUG_PHASE) {
+ printf ("OCMD=%x\nTBLP=%p OLEN=%x OADR=%x\n",
+ (unsigned) (scr_to_cpu(vdsp[0]) >> 24),
+ tblp,
+ (unsigned) olen,
+ (unsigned) oadr);
+ }
+
+ /*
+ * check cmd against assumed interrupted script command.
+ * If dt data phase, the MOVE instruction hasn't bit 4 of
+ * the phase.
+ */
+ if (((cmd & 2) ? cmd : (cmd & ~4)) != (scr_to_cpu(vdsp[0]) >> 24)) {
+ sym_print_addr(cp->cmd,
+ "internal error: cmd=%02x != %02x=(vdsp[0] >> 24)\n",
+ cmd, scr_to_cpu(vdsp[0]) >> 24);
+
+ goto reset_all;
+ }
+
+ /*
+ * if old phase not dataphase, leave here.
+ */
+ if (cmd & 2) {
+ sym_print_addr(cp->cmd,
+ "phase change %x-%x %d@%08x resid=%d.\n",
+ cmd&7, INB(np, nc_sbcl)&7, (unsigned)olen,
+ (unsigned)oadr, (unsigned)rest);
+ goto unexpected_phase;
+ }
+
+ /*
+ * Choose the correct PM save area.
+ *
+ * Look at the PM_SAVE SCRIPT if you want to understand
+ * this stuff. The equivalent code is implemented in
+ * SCRIPTS for the 895A, 896 and 1010 that are able to
+ * handle PM from the SCRIPTS processor.
+ */
+ hflags0 = INB(np, HF_PRT);
+ hflags = hflags0;
+
+ if (hflags & (HF_IN_PM0 | HF_IN_PM1 | HF_DP_SAVED)) {
+ if (hflags & HF_IN_PM0)
+ nxtdsp = scr_to_cpu(cp->phys.pm0.ret);
+ else if (hflags & HF_IN_PM1)
+ nxtdsp = scr_to_cpu(cp->phys.pm1.ret);
+
+ if (hflags & HF_DP_SAVED)
+ hflags ^= HF_ACT_PM;
+ }
+
+ if (!(hflags & HF_ACT_PM)) {
+ pm = &cp->phys.pm0;
+ newcmd = SCRIPTA_BA(np, pm0_data);
+ }
+ else {
+ pm = &cp->phys.pm1;
+ newcmd = SCRIPTA_BA(np, pm1_data);
+ }
+
+ hflags &= ~(HF_IN_PM0 | HF_IN_PM1 | HF_DP_SAVED);
+ if (hflags != hflags0)
+ OUTB(np, HF_PRT, hflags);
+
+ /*
+ * fillin the phase mismatch context
+ */
+ pm->sg.addr = cpu_to_scr(oadr + olen - rest);
+ pm->sg.size = cpu_to_scr(rest);
+ pm->ret = cpu_to_scr(nxtdsp);
+
+ /*
+ * If we have a SWIDE,
+ * - prepare the address to write the SWIDE from SCRIPTS,
+ * - compute the SCRIPTS address to restart from,
+ * - move current data pointer context by one byte.
+ */
+ nxtdsp = SCRIPTA_BA(np, dispatch);
+ if ((cmd & 7) == 1 && cp && (cp->phys.select.sel_scntl3 & EWS) &&
+ (INB(np, nc_scntl2) & WSR)) {
+ u32 tmp;
+
+ /*
+ * Set up the table indirect for the MOVE
+ * of the residual byte and adjust the data
+ * pointer context.
+ */
+ tmp = scr_to_cpu(pm->sg.addr);
+ cp->phys.wresid.addr = cpu_to_scr(tmp);
+ pm->sg.addr = cpu_to_scr(tmp + 1);
+ tmp = scr_to_cpu(pm->sg.size);
+ cp->phys.wresid.size = cpu_to_scr((tmp&0xff000000) | 1);
+ pm->sg.size = cpu_to_scr(tmp - 1);
+
+ /*
+ * If only the residual byte is to be moved,
+ * no PM context is needed.
+ */
+ if ((tmp&0xffffff) == 1)
+ newcmd = pm->ret;
+
+ /*
+ * Prepare the address of SCRIPTS that will
+ * move the residual byte to memory.
+ */
+ nxtdsp = SCRIPTB_BA(np, wsr_ma_helper);
+ }
+
+ if (DEBUG_FLAGS & DEBUG_PHASE) {
+ sym_print_addr(cp->cmd, "PM %x %x %x / %x %x %x.\n",
+ hflags0, hflags, newcmd,
+ (unsigned)scr_to_cpu(pm->sg.addr),
+ (unsigned)scr_to_cpu(pm->sg.size),
+ (unsigned)scr_to_cpu(pm->ret));
+ }
+
+ /*
+ * Restart the SCRIPTS processor.
+ */
+ sym_set_script_dp (np, cp, newcmd);
+ OUTL_DSP(np, nxtdsp);
+ return;
+
+ /*
+ * Unexpected phase changes that occurs when the current phase
+ * is not a DATA IN or DATA OUT phase are due to error conditions.
+ * Such event may only happen when the SCRIPTS is using a
+ * multibyte SCSI MOVE.
+ *
+ * Phase change Some possible cause
+ *
+ * COMMAND --> MSG IN SCSI parity error detected by target.
+ * COMMAND --> STATUS Bad command or refused by target.
+ * MSG OUT --> MSG IN Message rejected by target.
+ * MSG OUT --> COMMAND Bogus target that discards extended
+ * negotiation messages.
+ *
+ * The code below does not care of the new phase and so
+ * trusts the target. Why to annoy it ?
+ * If the interrupted phase is COMMAND phase, we restart at
+ * dispatcher.
+ * If a target does not get all the messages after selection,
+ * the code assumes blindly that the target discards extended
+ * messages and clears the negotiation status.
+ * If the target does not want all our response to negotiation,
+ * we force a SIR_NEGO_PROTO interrupt (it is a hack that avoids
+ * bloat for such a should_not_happen situation).
+ * In all other situation, we reset the BUS.
+ * Are these assumptions reasonable ? (Wait and see ...)
+ */
+unexpected_phase:
+ dsp -= 8;
+ nxtdsp = 0;
+
+ switch (cmd & 7) {
+ case 2: /* COMMAND phase */
+ nxtdsp = SCRIPTA_BA(np, dispatch);
+ break;
+#if 0
+ case 3: /* STATUS phase */
+ nxtdsp = SCRIPTA_BA(np, dispatch);
+ break;
+#endif
+ case 6: /* MSG OUT phase */
+ /*
+ * If the device may want to use untagged when we want
+ * tagged, we prepare an IDENTIFY without disc. granted,
+ * since we will not be able to handle reselect.
+ * Otherwise, we just don't care.
+ */
+ if (dsp == SCRIPTA_BA(np, send_ident)) {
+ if (cp->tag != NO_TAG && olen - rest <= 3) {
+ cp->host_status = HS_BUSY;
+ np->msgout[0] = IDENTIFY(0, cp->lun);
+ nxtdsp = SCRIPTB_BA(np, ident_break_atn);
+ }
+ else
+ nxtdsp = SCRIPTB_BA(np, ident_break);
+ }
+ else if (dsp == SCRIPTB_BA(np, send_wdtr) ||
+ dsp == SCRIPTB_BA(np, send_sdtr) ||
+ dsp == SCRIPTB_BA(np, send_ppr)) {
+ nxtdsp = SCRIPTB_BA(np, nego_bad_phase);
+ if (dsp == SCRIPTB_BA(np, send_ppr)) {
+ struct scsi_device *dev = cp->cmd->device;
+ dev->ppr = 0;
+ }
+ }
+ break;
+#if 0
+ case 7: /* MSG IN phase */
+ nxtdsp = SCRIPTA_BA(np, clrack);
+ break;
+#endif
+ }
+
+ if (nxtdsp) {
+ OUTL_DSP(np, nxtdsp);
+ return;
+ }
+
+reset_all:
+ sym_start_reset(np);
+}
+
+/*
+ * chip interrupt handler
+ *
+ * In normal situations, interrupt conditions occur one at
+ * a time. But when something bad happens on the SCSI BUS,
+ * the chip may raise several interrupt flags before
+ * stopping and interrupting the CPU. The additionnal
+ * interrupt flags are stacked in some extra registers
+ * after the SIP and/or DIP flag has been raised in the
+ * ISTAT. After the CPU has read the interrupt condition
+ * flag from SIST or DSTAT, the chip unstacks the other
+ * interrupt flags and sets the corresponding bits in
+ * SIST or DSTAT. Since the chip starts stacking once the
+ * SIP or DIP flag is set, there is a small window of time
+ * where the stacking does not occur.
+ *
+ * Typically, multiple interrupt conditions may happen in
+ * the following situations:
+ *
+ * - SCSI parity error + Phase mismatch (PAR|MA)
+ * When an parity error is detected in input phase
+ * and the device switches to msg-in phase inside a
+ * block MOV.
+ * - SCSI parity error + Unexpected disconnect (PAR|UDC)
+ * When a stupid device does not want to handle the
+ * recovery of an SCSI parity error.
+ * - Some combinations of STO, PAR, UDC, ...
+ * When using non compliant SCSI stuff, when user is
+ * doing non compliant hot tampering on the BUS, when
+ * something really bad happens to a device, etc ...
+ *
+ * The heuristic suggested by SYMBIOS to handle
+ * multiple interrupts is to try unstacking all
+ * interrupts conditions and to handle them on some
+ * priority based on error severity.
+ * This will work when the unstacking has been
+ * successful, but we cannot be 100 % sure of that,
+ * since the CPU may have been faster to unstack than
+ * the chip is able to stack. Hmmm ... But it seems that
+ * such a situation is very unlikely to happen.
+ *
+ * If this happen, for example STO caught by the CPU
+ * then UDC happenning before the CPU have restarted
+ * the SCRIPTS, the driver may wrongly complete the
+ * same command on UDC, since the SCRIPTS didn't restart
+ * and the DSA still points to the same command.
+ * We avoid this situation by setting the DSA to an
+ * invalid value when the CCB is completed and before
+ * restarting the SCRIPTS.
+ *
+ * Another issue is that we need some section of our
+ * recovery procedures to be somehow uninterruptible but
+ * the SCRIPTS processor does not provides such a
+ * feature. For this reason, we handle recovery preferently
+ * from the C code and check against some SCRIPTS critical
+ * sections from the C code.
+ *
+ * Hopefully, the interrupt handling of the driver is now
+ * able to resist to weird BUS error conditions, but donnot
+ * ask me for any guarantee that it will never fail. :-)
+ * Use at your own decision and risk.
+ */
+
+irqreturn_t sym_interrupt(struct Scsi_Host *shost)
+{
+ struct sym_data *sym_data = shost_priv(shost);
+ struct sym_hcb *np = sym_data->ncb;
+ struct pci_dev *pdev = sym_data->pdev;
+ u_char istat, istatc;
+ u_char dstat;
+ u_short sist;
+
+ /*
+ * interrupt on the fly ?
+ * (SCRIPTS may still be running)
+ *
+ * A `dummy read' is needed to ensure that the
+ * clear of the INTF flag reaches the device
+ * and that posted writes are flushed to memory
+ * before the scanning of the DONE queue.
+ * Note that SCRIPTS also (dummy) read to memory
+ * prior to deliver the INTF interrupt condition.
+ */
+ istat = INB(np, nc_istat);
+ if (istat & INTF) {
+ OUTB(np, nc_istat, (istat & SIGP) | INTF | np->istat_sem);
+ istat |= INB(np, nc_istat); /* DUMMY READ */
+ if (DEBUG_FLAGS & DEBUG_TINY) printf ("F ");
+ sym_wakeup_done(np);
+ }
+
+ if (!(istat & (SIP|DIP)))
+ return (istat & INTF) ? IRQ_HANDLED : IRQ_NONE;
+
+#if 0 /* We should never get this one */
+ if (istat & CABRT)
+ OUTB(np, nc_istat, CABRT);
+#endif
+
+ /*
+ * PAR and MA interrupts may occur at the same time,
+ * and we need to know of both in order to handle
+ * this situation properly. We try to unstack SCSI
+ * interrupts for that reason. BTW, I dislike a LOT
+ * such a loop inside the interrupt routine.
+ * Even if DMA interrupt stacking is very unlikely to
+ * happen, we also try unstacking these ones, since
+ * this has no performance impact.
+ */
+ sist = 0;
+ dstat = 0;
+ istatc = istat;
+ do {
+ if (istatc & SIP)
+ sist |= INW(np, nc_sist);
+ if (istatc & DIP)
+ dstat |= INB(np, nc_dstat);
+ istatc = INB(np, nc_istat);
+ istat |= istatc;
+
+ /* Prevent deadlock waiting on a condition that may
+ * never clear. */
+ if (unlikely(sist == 0xffff && dstat == 0xff)) {
+ if (pci_channel_offline(pdev))
+ return IRQ_NONE;
+ }
+ } while (istatc & (SIP|DIP));
+
+ if (DEBUG_FLAGS & DEBUG_TINY)
+ printf ("<%d|%x:%x|%x:%x>",
+ (int)INB(np, nc_scr0),
+ dstat,sist,
+ (unsigned)INL(np, nc_dsp),
+ (unsigned)INL(np, nc_dbc));
+ /*
+ * On paper, a memory read barrier may be needed here to
+ * prevent out of order LOADs by the CPU from having
+ * prefetched stale data prior to DMA having occurred.
+ * And since we are paranoid ... :)
+ */
+ MEMORY_READ_BARRIER();
+
+ /*
+ * First, interrupts we want to service cleanly.
+ *
+ * Phase mismatch (MA) is the most frequent interrupt
+ * for chip earlier than the 896 and so we have to service
+ * it as quickly as possible.
+ * A SCSI parity error (PAR) may be combined with a phase
+ * mismatch condition (MA).
+ * Programmed interrupts (SIR) are used to call the C code
+ * from SCRIPTS.
+ * The single step interrupt (SSI) is not used in this
+ * driver.
+ */
+ if (!(sist & (STO|GEN|HTH|SGE|UDC|SBMC|RST)) &&
+ !(dstat & (MDPE|BF|ABRT|IID))) {
+ if (sist & PAR) sym_int_par (np, sist);
+ else if (sist & MA) sym_int_ma (np);
+ else if (dstat & SIR) sym_int_sir(np);
+ else if (dstat & SSI) OUTONB_STD();
+ else goto unknown_int;
+ return IRQ_HANDLED;
+ }
+
+ /*
+ * Now, interrupts that donnot happen in normal
+ * situations and that we may need to recover from.
+ *
+ * On SCSI RESET (RST), we reset everything.
+ * On SCSI BUS MODE CHANGE (SBMC), we complete all
+ * active CCBs with RESET status, prepare all devices
+ * for negotiating again and restart the SCRIPTS.
+ * On STO and UDC, we complete the CCB with the corres-
+ * ponding status and restart the SCRIPTS.
+ */
+ if (sist & RST) {
+ printf("%s: SCSI BUS reset detected.\n", sym_name(np));
+ sym_start_up(shost, 1);
+ return IRQ_HANDLED;
+ }
+
+ OUTB(np, nc_ctest3, np->rv_ctest3 | CLF); /* clear dma fifo */
+ OUTB(np, nc_stest3, TE|CSF); /* clear scsi fifo */
+
+ if (!(sist & (GEN|HTH|SGE)) &&
+ !(dstat & (MDPE|BF|ABRT|IID))) {
+ if (sist & SBMC) sym_int_sbmc(shost);
+ else if (sist & STO) sym_int_sto (np);
+ else if (sist & UDC) sym_int_udc (np);
+ else goto unknown_int;
+ return IRQ_HANDLED;
+ }
+
+ /*
+ * Now, interrupts we are not able to recover cleanly.
+ *
+ * Log message for hard errors.
+ * Reset everything.
+ */
+
+ sym_log_hard_error(shost, sist, dstat);
+
+ if ((sist & (GEN|HTH|SGE)) ||
+ (dstat & (MDPE|BF|ABRT|IID))) {
+ sym_start_reset(np);
+ return IRQ_HANDLED;
+ }
+
+unknown_int:
+ /*
+ * We just miss the cause of the interrupt. :(
+ * Print a message. The timeout will do the real work.
+ */
+ printf( "%s: unknown interrupt(s) ignored, "
+ "ISTAT=0x%x DSTAT=0x%x SIST=0x%x\n",
+ sym_name(np), istat, dstat, sist);
+ return IRQ_NONE;
+}
+
+/*
+ * Dequeue from the START queue all CCBs that match
+ * a given target/lun/task condition (-1 means all),
+ * and move them from the BUSY queue to the COMP queue
+ * with DID_SOFT_ERROR status condition.
+ * This function is used during error handling/recovery.
+ * It is called with SCRIPTS not running.
+ */
+static int
+sym_dequeue_from_squeue(struct sym_hcb *np, int i, int target, int lun, int task)
+{
+ int j;
+ struct sym_ccb *cp;
+
+ /*
+ * Make sure the starting index is within range.
+ */
+ assert((i >= 0) && (i < 2*MAX_QUEUE));
+
+ /*
+ * Walk until end of START queue and dequeue every job
+ * that matches the target/lun/task condition.
+ */
+ j = i;
+ while (i != np->squeueput) {
+ cp = sym_ccb_from_dsa(np, scr_to_cpu(np->squeue[i]));
+ assert(cp);
+#ifdef SYM_CONF_IARB_SUPPORT
+ /* Forget hints for IARB, they may be no longer relevant */
+ cp->host_flags &= ~HF_HINT_IARB;
+#endif
+ if ((target == -1 || cp->target == target) &&
+ (lun == -1 || cp->lun == lun) &&
+ (task == -1 || cp->tag == task)) {
+#ifdef SYM_OPT_HANDLE_DEVICE_QUEUEING
+ sym_set_cam_status(cp->cmd, DID_SOFT_ERROR);
+#else
+ sym_set_cam_status(cp->cmd, DID_REQUEUE);
+#endif
+ sym_remque(&cp->link_ccbq);
+ sym_insque_tail(&cp->link_ccbq, &np->comp_ccbq);
+ }
+ else {
+ if (i != j)
+ np->squeue[j] = np->squeue[i];
+ if ((j += 2) >= MAX_QUEUE*2) j = 0;
+ }
+ if ((i += 2) >= MAX_QUEUE*2) i = 0;
+ }
+ if (i != j) /* Copy back the idle task if needed */
+ np->squeue[j] = np->squeue[i];
+ np->squeueput = j; /* Update our current start queue pointer */
+
+ return (i - j) / 2;
+}
+
+/*
+ * chip handler for bad SCSI status condition
+ *
+ * In case of bad SCSI status, we unqueue all the tasks
+ * currently queued to the controller but not yet started
+ * and then restart the SCRIPTS processor immediately.
+ *
+ * QUEUE FULL and BUSY conditions are handled the same way.
+ * Basically all the not yet started tasks are requeued in
+ * device queue and the queue is frozen until a completion.
+ *
+ * For CHECK CONDITION and COMMAND TERMINATED status, we use
+ * the CCB of the failed command to prepare a REQUEST SENSE
+ * SCSI command and queue it to the controller queue.
+ *
+ * SCRATCHA is assumed to have been loaded with STARTPOS
+ * before the SCRIPTS called the C code.
+ */
+static void sym_sir_bad_scsi_status(struct sym_hcb *np, int num, struct sym_ccb *cp)
+{
+ u32 startp;
+ u_char s_status = cp->ssss_status;
+ u_char h_flags = cp->host_flags;
+ int msglen;
+ int i;
+
+ /*
+ * Compute the index of the next job to start from SCRIPTS.
+ */
+ i = (INL(np, nc_scratcha) - np->squeue_ba) / 4;
+
+ /*
+ * The last CCB queued used for IARB hint may be
+ * no longer relevant. Forget it.
+ */
+#ifdef SYM_CONF_IARB_SUPPORT
+ if (np->last_cp)
+ np->last_cp = 0;
+#endif
+
+ /*
+ * Now deal with the SCSI status.
+ */
+ switch(s_status) {
+ case S_BUSY:
+ case S_QUEUE_FULL:
+ if (sym_verbose >= 2) {
+ sym_print_addr(cp->cmd, "%s\n",
+ s_status == S_BUSY ? "BUSY" : "QUEUE FULL\n");
+ }
+ default: /* S_INT, S_INT_COND_MET, S_CONFLICT */
+ sym_complete_error (np, cp);
+ break;
+ case S_TERMINATED:
+ case S_CHECK_COND:
+ /*
+ * If we get an SCSI error when requesting sense, give up.
+ */
+ if (h_flags & HF_SENSE) {
+ sym_complete_error (np, cp);
+ break;
+ }
+
+ /*
+ * Dequeue all queued CCBs for that device not yet started,
+ * and restart the SCRIPTS processor immediately.
+ */
+ sym_dequeue_from_squeue(np, i, cp->target, cp->lun, -1);
+ OUTL_DSP(np, SCRIPTA_BA(np, start));
+
+ /*
+ * Save some info of the actual IO.
+ * Compute the data residual.
+ */
+ cp->sv_scsi_status = cp->ssss_status;
+ cp->sv_xerr_status = cp->xerr_status;
+ cp->sv_resid = sym_compute_residual(np, cp);
+
+ /*
+ * Prepare all needed data structures for
+ * requesting sense data.
+ */
+
+ cp->scsi_smsg2[0] = IDENTIFY(0, cp->lun);
+ msglen = 1;
+
+ /*
+ * If we are currently using anything different from
+ * async. 8 bit data transfers with that target,
+ * start a negotiation, since the device may want
+ * to report us a UNIT ATTENTION condition due to
+ * a cause we currently ignore, and we donnot want
+ * to be stuck with WIDE and/or SYNC data transfer.
+ *
+ * cp->nego_status is filled by sym_prepare_nego().
+ */
+ cp->nego_status = 0;
+ msglen += sym_prepare_nego(np, cp, &cp->scsi_smsg2[msglen]);
+ /*
+ * Message table indirect structure.
+ */
+ cp->phys.smsg.addr = CCB_BA(cp, scsi_smsg2);
+ cp->phys.smsg.size = cpu_to_scr(msglen);
+
+ /*
+ * sense command
+ */
+ cp->phys.cmd.addr = CCB_BA(cp, sensecmd);
+ cp->phys.cmd.size = cpu_to_scr(6);
+
+ /*
+ * patch requested size into sense command
+ */
+ cp->sensecmd[0] = REQUEST_SENSE;
+ cp->sensecmd[1] = 0;
+ if (cp->cmd->device->scsi_level <= SCSI_2 && cp->lun <= 7)
+ cp->sensecmd[1] = cp->lun << 5;
+ cp->sensecmd[4] = SYM_SNS_BBUF_LEN;
+ cp->data_len = SYM_SNS_BBUF_LEN;
+
+ /*
+ * sense data
+ */
+ memset(cp->sns_bbuf, 0, SYM_SNS_BBUF_LEN);
+ cp->phys.sense.addr = CCB_BA(cp, sns_bbuf);
+ cp->phys.sense.size = cpu_to_scr(SYM_SNS_BBUF_LEN);
+
+ /*
+ * requeue the command.
+ */
+ startp = SCRIPTB_BA(np, sdata_in);
+
+ cp->phys.head.savep = cpu_to_scr(startp);
+ cp->phys.head.lastp = cpu_to_scr(startp);
+ cp->startp = cpu_to_scr(startp);
+ cp->goalp = cpu_to_scr(startp + 16);
+
+ cp->host_xflags = 0;
+ cp->host_status = cp->nego_status ? HS_NEGOTIATE : HS_BUSY;
+ cp->ssss_status = S_ILLEGAL;
+ cp->host_flags = (HF_SENSE|HF_DATA_IN);
+ cp->xerr_status = 0;
+ cp->extra_bytes = 0;
+
+ cp->phys.head.go.start = cpu_to_scr(SCRIPTA_BA(np, select));
+
+ /*
+ * Requeue the command.
+ */
+ sym_put_start_queue(np, cp);
+
+ /*
+ * Give back to upper layer everything we have dequeued.
+ */
+ sym_flush_comp_queue(np, 0);
+ break;
+ }
+}
+
+/*
+ * After a device has accepted some management message
+ * as BUS DEVICE RESET, ABORT TASK, etc ..., or when
+ * a device signals a UNIT ATTENTION condition, some
+ * tasks are thrown away by the device. We are required
+ * to reflect that on our tasks list since the device
+ * will never complete these tasks.
+ *
+ * This function move from the BUSY queue to the COMP
+ * queue all disconnected CCBs for a given target that
+ * match the following criteria:
+ * - lun=-1 means any logical UNIT otherwise a given one.
+ * - task=-1 means any task, otherwise a given one.
+ */
+int sym_clear_tasks(struct sym_hcb *np, int cam_status, int target, int lun, int task)
+{
+ SYM_QUEHEAD qtmp, *qp;
+ int i = 0;
+ struct sym_ccb *cp;
+
+ /*
+ * Move the entire BUSY queue to our temporary queue.
+ */
+ sym_que_init(&qtmp);
+ sym_que_splice(&np->busy_ccbq, &qtmp);
+ sym_que_init(&np->busy_ccbq);
+
+ /*
+ * Put all CCBs that matches our criteria into
+ * the COMP queue and put back other ones into
+ * the BUSY queue.
+ */
+ while ((qp = sym_remque_head(&qtmp)) != NULL) {
+ struct scsi_cmnd *cmd;
+ cp = sym_que_entry(qp, struct sym_ccb, link_ccbq);
+ cmd = cp->cmd;
+ if (cp->host_status != HS_DISCONNECT ||
+ cp->target != target ||
+ (lun != -1 && cp->lun != lun) ||
+ (task != -1 &&
+ (cp->tag != NO_TAG && cp->scsi_smsg[2] != task))) {
+ sym_insque_tail(&cp->link_ccbq, &np->busy_ccbq);
+ continue;
+ }
+ sym_insque_tail(&cp->link_ccbq, &np->comp_ccbq);
+
+ /* Preserve the software timeout condition */
+ if (sym_get_cam_status(cmd) != DID_TIME_OUT)
+ sym_set_cam_status(cmd, cam_status);
+ ++i;
+#if 0
+printf("XXXX TASK @%p CLEARED\n", cp);
+#endif
+ }
+ return i;
+}
+
+/*
+ * chip handler for TASKS recovery
+ *
+ * We cannot safely abort a command, while the SCRIPTS
+ * processor is running, since we just would be in race
+ * with it.
+ *
+ * As long as we have tasks to abort, we keep the SEM
+ * bit set in the ISTAT. When this bit is set, the
+ * SCRIPTS processor interrupts (SIR_SCRIPT_STOPPED)
+ * each time it enters the scheduler.
+ *
+ * If we have to reset a target, clear tasks of a unit,
+ * or to perform the abort of a disconnected job, we
+ * restart the SCRIPTS for selecting the target. Once
+ * selected, the SCRIPTS interrupts (SIR_TARGET_SELECTED).
+ * If it loses arbitration, the SCRIPTS will interrupt again
+ * the next time it will enter its scheduler, and so on ...
+ *
+ * On SIR_TARGET_SELECTED, we scan for the more
+ * appropriate thing to do:
+ *
+ * - If nothing, we just sent a M_ABORT message to the
+ * target to get rid of the useless SCSI bus ownership.
+ * According to the specs, no tasks shall be affected.
+ * - If the target is to be reset, we send it a M_RESET
+ * message.
+ * - If a logical UNIT is to be cleared , we send the
+ * IDENTIFY(lun) + M_ABORT.
+ * - If an untagged task is to be aborted, we send the
+ * IDENTIFY(lun) + M_ABORT.
+ * - If a tagged task is to be aborted, we send the
+ * IDENTIFY(lun) + task attributes + M_ABORT_TAG.
+ *
+ * Once our 'kiss of death' :) message has been accepted
+ * by the target, the SCRIPTS interrupts again
+ * (SIR_ABORT_SENT). On this interrupt, we complete
+ * all the CCBs that should have been aborted by the
+ * target according to our message.
+ */
+static void sym_sir_task_recovery(struct sym_hcb *np, int num)
+{
+ SYM_QUEHEAD *qp;
+ struct sym_ccb *cp;
+ struct sym_tcb *tp = NULL; /* gcc isn't quite smart enough yet */
+ struct scsi_target *starget;
+ int target=-1, lun=-1, task;
+ int i, k;
+
+ switch(num) {
+ /*
+ * The SCRIPTS processor stopped before starting
+ * the next command in order to allow us to perform
+ * some task recovery.
+ */
+ case SIR_SCRIPT_STOPPED:
+ /*
+ * Do we have any target to reset or unit to clear ?
+ */
+ for (i = 0 ; i < SYM_CONF_MAX_TARGET ; i++) {
+ tp = &np->target[i];
+ if (tp->to_reset ||
+ (tp->lun0p && tp->lun0p->to_clear)) {
+ target = i;
+ break;
+ }
+ if (!tp->lunmp)
+ continue;
+ for (k = 1 ; k < SYM_CONF_MAX_LUN ; k++) {
+ if (tp->lunmp[k] && tp->lunmp[k]->to_clear) {
+ target = i;
+ break;
+ }
+ }
+ if (target != -1)
+ break;
+ }
+
+ /*
+ * If not, walk the busy queue for any
+ * disconnected CCB to be aborted.
+ */
+ if (target == -1) {
+ FOR_EACH_QUEUED_ELEMENT(&np->busy_ccbq, qp) {
+ cp = sym_que_entry(qp,struct sym_ccb,link_ccbq);
+ if (cp->host_status != HS_DISCONNECT)
+ continue;
+ if (cp->to_abort) {
+ target = cp->target;
+ break;
+ }
+ }
+ }
+
+ /*
+ * If some target is to be selected,
+ * prepare and start the selection.
+ */
+ if (target != -1) {
+ tp = &np->target[target];
+ np->abrt_sel.sel_id = target;
+ np->abrt_sel.sel_scntl3 = tp->head.wval;
+ np->abrt_sel.sel_sxfer = tp->head.sval;
+ OUTL(np, nc_dsa, np->hcb_ba);
+ OUTL_DSP(np, SCRIPTB_BA(np, sel_for_abort));
+ return;
+ }
+
+ /*
+ * Now look for a CCB to abort that haven't started yet.
+ * Btw, the SCRIPTS processor is still stopped, so
+ * we are not in race.
+ */
+ i = 0;
+ cp = NULL;
+ FOR_EACH_QUEUED_ELEMENT(&np->busy_ccbq, qp) {
+ cp = sym_que_entry(qp, struct sym_ccb, link_ccbq);
+ if (cp->host_status != HS_BUSY &&
+ cp->host_status != HS_NEGOTIATE)
+ continue;
+ if (!cp->to_abort)
+ continue;
+#ifdef SYM_CONF_IARB_SUPPORT
+ /*
+ * If we are using IMMEDIATE ARBITRATION, we donnot
+ * want to cancel the last queued CCB, since the
+ * SCRIPTS may have anticipated the selection.
+ */
+ if (cp == np->last_cp) {
+ cp->to_abort = 0;
+ continue;
+ }
+#endif
+ i = 1; /* Means we have found some */
+ break;
+ }
+ if (!i) {
+ /*
+ * We are done, so we donnot need
+ * to synchronize with the SCRIPTS anylonger.
+ * Remove the SEM flag from the ISTAT.
+ */
+ np->istat_sem = 0;
+ OUTB(np, nc_istat, SIGP);
+ break;
+ }
+ /*
+ * Compute index of next position in the start
+ * queue the SCRIPTS intends to start and dequeue
+ * all CCBs for that device that haven't been started.
+ */
+ i = (INL(np, nc_scratcha) - np->squeue_ba) / 4;
+ i = sym_dequeue_from_squeue(np, i, cp->target, cp->lun, -1);
+
+ /*
+ * Make sure at least our IO to abort has been dequeued.
+ */
+#ifndef SYM_OPT_HANDLE_DEVICE_QUEUEING
+ assert(i && sym_get_cam_status(cp->cmd) == DID_SOFT_ERROR);
+#else
+ sym_remque(&cp->link_ccbq);
+ sym_insque_tail(&cp->link_ccbq, &np->comp_ccbq);
+#endif
+ /*
+ * Keep track in cam status of the reason of the abort.
+ */
+ if (cp->to_abort == 2)
+ sym_set_cam_status(cp->cmd, DID_TIME_OUT);
+ else
+ sym_set_cam_status(cp->cmd, DID_ABORT);
+
+ /*
+ * Complete with error everything that we have dequeued.
+ */
+ sym_flush_comp_queue(np, 0);
+ break;
+ /*
+ * The SCRIPTS processor has selected a target
+ * we may have some manual recovery to perform for.
+ */
+ case SIR_TARGET_SELECTED:
+ target = INB(np, nc_sdid) & 0xf;
+ tp = &np->target[target];
+
+ np->abrt_tbl.addr = cpu_to_scr(vtobus(np->abrt_msg));
+
+ /*
+ * If the target is to be reset, prepare a
+ * M_RESET message and clear the to_reset flag
+ * since we donnot expect this operation to fail.
+ */
+ if (tp->to_reset) {
+ np->abrt_msg[0] = M_RESET;
+ np->abrt_tbl.size = 1;
+ tp->to_reset = 0;
+ break;
+ }
+
+ /*
+ * Otherwise, look for some logical unit to be cleared.
+ */
+ if (tp->lun0p && tp->lun0p->to_clear)
+ lun = 0;
+ else if (tp->lunmp) {
+ for (k = 1 ; k < SYM_CONF_MAX_LUN ; k++) {
+ if (tp->lunmp[k] && tp->lunmp[k]->to_clear) {
+ lun = k;
+ break;
+ }
+ }
+ }
+
+ /*
+ * If a logical unit is to be cleared, prepare
+ * an IDENTIFY(lun) + ABORT MESSAGE.
+ */
+ if (lun != -1) {
+ struct sym_lcb *lp = sym_lp(tp, lun);
+ lp->to_clear = 0; /* We don't expect to fail here */
+ np->abrt_msg[0] = IDENTIFY(0, lun);
+ np->abrt_msg[1] = M_ABORT;
+ np->abrt_tbl.size = 2;
+ break;
+ }
+
+ /*
+ * Otherwise, look for some disconnected job to
+ * abort for this target.
+ */
+ i = 0;
+ cp = NULL;
+ FOR_EACH_QUEUED_ELEMENT(&np->busy_ccbq, qp) {
+ cp = sym_que_entry(qp, struct sym_ccb, link_ccbq);
+ if (cp->host_status != HS_DISCONNECT)
+ continue;
+ if (cp->target != target)
+ continue;
+ if (!cp->to_abort)
+ continue;
+ i = 1; /* Means we have some */
+ break;
+ }
+
+ /*
+ * If we have none, probably since the device has
+ * completed the command before we won abitration,
+ * send a M_ABORT message without IDENTIFY.
+ * According to the specs, the device must just
+ * disconnect the BUS and not abort any task.
+ */
+ if (!i) {
+ np->abrt_msg[0] = M_ABORT;
+ np->abrt_tbl.size = 1;
+ break;
+ }
+
+ /*
+ * We have some task to abort.
+ * Set the IDENTIFY(lun)
+ */
+ np->abrt_msg[0] = IDENTIFY(0, cp->lun);
+
+ /*
+ * If we want to abort an untagged command, we
+ * will send a IDENTIFY + M_ABORT.
+ * Otherwise (tagged command), we will send
+ * a IDENTITFY + task attributes + ABORT TAG.
+ */
+ if (cp->tag == NO_TAG) {
+ np->abrt_msg[1] = M_ABORT;
+ np->abrt_tbl.size = 2;
+ } else {
+ np->abrt_msg[1] = cp->scsi_smsg[1];
+ np->abrt_msg[2] = cp->scsi_smsg[2];
+ np->abrt_msg[3] = M_ABORT_TAG;
+ np->abrt_tbl.size = 4;
+ }
+ /*
+ * Keep track of software timeout condition, since the
+ * peripheral driver may not count retries on abort
+ * conditions not due to timeout.
+ */
+ if (cp->to_abort == 2)
+ sym_set_cam_status(cp->cmd, DID_TIME_OUT);
+ cp->to_abort = 0; /* We donnot expect to fail here */
+ break;
+
+ /*
+ * The target has accepted our message and switched
+ * to BUS FREE phase as we expected.
+ */
+ case SIR_ABORT_SENT:
+ target = INB(np, nc_sdid) & 0xf;
+ tp = &np->target[target];
+ starget = tp->starget;
+
+ /*
+ ** If we didn't abort anything, leave here.
+ */
+ if (np->abrt_msg[0] == M_ABORT)
+ break;
+
+ /*
+ * If we sent a M_RESET, then a hardware reset has
+ * been performed by the target.
+ * - Reset everything to async 8 bit
+ * - Tell ourself to negotiate next time :-)
+ * - Prepare to clear all disconnected CCBs for
+ * this target from our task list (lun=task=-1)
+ */
+ lun = -1;
+ task = -1;
+ if (np->abrt_msg[0] == M_RESET) {
+ tp->head.sval = 0;
+ tp->head.wval = np->rv_scntl3;
+ tp->head.uval = 0;
+ spi_period(starget) = 0;
+ spi_offset(starget) = 0;
+ spi_width(starget) = 0;
+ spi_iu(starget) = 0;
+ spi_dt(starget) = 0;
+ spi_qas(starget) = 0;
+ tp->tgoal.check_nego = 1;
+ tp->tgoal.renego = 0;
+ }
+
+ /*
+ * Otherwise, check for the LUN and TASK(s)
+ * concerned by the cancelation.
+ * If it is not ABORT_TAG then it is CLEAR_QUEUE
+ * or an ABORT message :-)
+ */
+ else {
+ lun = np->abrt_msg[0] & 0x3f;
+ if (np->abrt_msg[1] == M_ABORT_TAG)
+ task = np->abrt_msg[2];
+ }
+
+ /*
+ * Complete all the CCBs the device should have
+ * aborted due to our 'kiss of death' message.
+ */
+ i = (INL(np, nc_scratcha) - np->squeue_ba) / 4;
+ sym_dequeue_from_squeue(np, i, target, lun, -1);
+ sym_clear_tasks(np, DID_ABORT, target, lun, task);
+ sym_flush_comp_queue(np, 0);
+
+ /*
+ * If we sent a BDR, make upper layer aware of that.
+ */
+ if (np->abrt_msg[0] == M_RESET)
+ starget_printk(KERN_NOTICE, starget,
+ "has been reset\n");
+ break;
+ }
+
+ /*
+ * Print to the log the message we intend to send.
+ */
+ if (num == SIR_TARGET_SELECTED) {
+ dev_info(&tp->starget->dev, "control msgout:");
+ sym_printl_hex(np->abrt_msg, np->abrt_tbl.size);
+ np->abrt_tbl.size = cpu_to_scr(np->abrt_tbl.size);
+ }
+
+ /*
+ * Let the SCRIPTS processor continue.
+ */
+ OUTONB_STD();
+}
+
+/*
+ * Gerard's alchemy:) that deals with with the data
+ * pointer for both MDP and the residual calculation.
+ *
+ * I didn't want to bloat the code by more than 200
+ * lines for the handling of both MDP and the residual.
+ * This has been achieved by using a data pointer
+ * representation consisting in an index in the data
+ * array (dp_sg) and a negative offset (dp_ofs) that
+ * have the following meaning:
+ *
+ * - dp_sg = SYM_CONF_MAX_SG
+ * we are at the end of the data script.
+ * - dp_sg < SYM_CONF_MAX_SG
+ * dp_sg points to the next entry of the scatter array
+ * we want to transfer.
+ * - dp_ofs < 0
+ * dp_ofs represents the residual of bytes of the
+ * previous entry scatter entry we will send first.
+ * - dp_ofs = 0
+ * no residual to send first.
+ *
+ * The function sym_evaluate_dp() accepts an arbitray
+ * offset (basically from the MDP message) and returns
+ * the corresponding values of dp_sg and dp_ofs.
+ */
+
+static int sym_evaluate_dp(struct sym_hcb *np, struct sym_ccb *cp, u32 scr, int *ofs)
+{
+ u32 dp_scr;
+ int dp_ofs, dp_sg, dp_sgmin;
+ int tmp;
+ struct sym_pmc *pm;
+
+ /*
+ * Compute the resulted data pointer in term of a script
+ * address within some DATA script and a signed byte offset.
+ */
+ dp_scr = scr;
+ dp_ofs = *ofs;
+ if (dp_scr == SCRIPTA_BA(np, pm0_data))
+ pm = &cp->phys.pm0;
+ else if (dp_scr == SCRIPTA_BA(np, pm1_data))
+ pm = &cp->phys.pm1;
+ else
+ pm = NULL;
+
+ if (pm) {
+ dp_scr = scr_to_cpu(pm->ret);
+ dp_ofs -= scr_to_cpu(pm->sg.size) & 0x00ffffff;
+ }
+
+ /*
+ * If we are auto-sensing, then we are done.
+ */
+ if (cp->host_flags & HF_SENSE) {
+ *ofs = dp_ofs;
+ return 0;
+ }
+
+ /*
+ * Deduce the index of the sg entry.
+ * Keep track of the index of the first valid entry.
+ * If result is dp_sg = SYM_CONF_MAX_SG, then we are at the
+ * end of the data.
+ */
+ tmp = scr_to_cpu(cp->goalp);
+ dp_sg = SYM_CONF_MAX_SG;
+ if (dp_scr != tmp)
+ dp_sg -= (tmp - 8 - (int)dp_scr) / (2*4);
+ dp_sgmin = SYM_CONF_MAX_SG - cp->segments;
+
+ /*
+ * Move to the sg entry the data pointer belongs to.
+ *
+ * If we are inside the data area, we expect result to be:
+ *
+ * Either,
+ * dp_ofs = 0 and dp_sg is the index of the sg entry
+ * the data pointer belongs to (or the end of the data)
+ * Or,
+ * dp_ofs < 0 and dp_sg is the index of the sg entry
+ * the data pointer belongs to + 1.
+ */
+ if (dp_ofs < 0) {
+ int n;
+ while (dp_sg > dp_sgmin) {
+ --dp_sg;
+ tmp = scr_to_cpu(cp->phys.data[dp_sg].size);
+ n = dp_ofs + (tmp & 0xffffff);
+ if (n > 0) {
+ ++dp_sg;
+ break;
+ }
+ dp_ofs = n;
+ }
+ }
+ else if (dp_ofs > 0) {
+ while (dp_sg < SYM_CONF_MAX_SG) {
+ tmp = scr_to_cpu(cp->phys.data[dp_sg].size);
+ dp_ofs -= (tmp & 0xffffff);
+ ++dp_sg;
+ if (dp_ofs <= 0)
+ break;
+ }
+ }
+
+ /*
+ * Make sure the data pointer is inside the data area.
+ * If not, return some error.
+ */
+ if (dp_sg < dp_sgmin || (dp_sg == dp_sgmin && dp_ofs < 0))
+ goto out_err;
+ else if (dp_sg > SYM_CONF_MAX_SG ||
+ (dp_sg == SYM_CONF_MAX_SG && dp_ofs > 0))
+ goto out_err;
+
+ /*
+ * Save the extreme pointer if needed.
+ */
+ if (dp_sg > cp->ext_sg ||
+ (dp_sg == cp->ext_sg && dp_ofs > cp->ext_ofs)) {
+ cp->ext_sg = dp_sg;
+ cp->ext_ofs = dp_ofs;
+ }
+
+ /*
+ * Return data.
+ */
+ *ofs = dp_ofs;
+ return dp_sg;
+
+out_err:
+ return -1;
+}
+
+/*
+ * chip handler for MODIFY DATA POINTER MESSAGE
+ *
+ * We also call this function on IGNORE WIDE RESIDUE
+ * messages that do not match a SWIDE full condition.
+ * Btw, we assume in that situation that such a message
+ * is equivalent to a MODIFY DATA POINTER (offset=-1).
+ */
+
+static void sym_modify_dp(struct sym_hcb *np, struct sym_tcb *tp, struct sym_ccb *cp, int ofs)
+{
+ int dp_ofs = ofs;
+ u32 dp_scr = sym_get_script_dp (np, cp);
+ u32 dp_ret;
+ u32 tmp;
+ u_char hflags;
+ int dp_sg;
+ struct sym_pmc *pm;
+
+ /*
+ * Not supported for auto-sense.
+ */
+ if (cp->host_flags & HF_SENSE)
+ goto out_reject;
+
+ /*
+ * Apply our alchemy:) (see comments in sym_evaluate_dp()),
+ * to the resulted data pointer.
+ */
+ dp_sg = sym_evaluate_dp(np, cp, dp_scr, &dp_ofs);
+ if (dp_sg < 0)
+ goto out_reject;
+
+ /*
+ * And our alchemy:) allows to easily calculate the data
+ * script address we want to return for the next data phase.
+ */
+ dp_ret = cpu_to_scr(cp->goalp);
+ dp_ret = dp_ret - 8 - (SYM_CONF_MAX_SG - dp_sg) * (2*4);
+
+ /*
+ * If offset / scatter entry is zero we donnot need
+ * a context for the new current data pointer.
+ */
+ if (dp_ofs == 0) {
+ dp_scr = dp_ret;
+ goto out_ok;
+ }
+
+ /*
+ * Get a context for the new current data pointer.
+ */
+ hflags = INB(np, HF_PRT);
+
+ if (hflags & HF_DP_SAVED)
+ hflags ^= HF_ACT_PM;
+
+ if (!(hflags & HF_ACT_PM)) {
+ pm = &cp->phys.pm0;
+ dp_scr = SCRIPTA_BA(np, pm0_data);
+ }
+ else {
+ pm = &cp->phys.pm1;
+ dp_scr = SCRIPTA_BA(np, pm1_data);
+ }
+
+ hflags &= ~(HF_DP_SAVED);
+
+ OUTB(np, HF_PRT, hflags);
+
+ /*
+ * Set up the new current data pointer.
+ * ofs < 0 there, and for the next data phase, we
+ * want to transfer part of the data of the sg entry
+ * corresponding to index dp_sg-1 prior to returning
+ * to the main data script.
+ */
+ pm->ret = cpu_to_scr(dp_ret);
+ tmp = scr_to_cpu(cp->phys.data[dp_sg-1].addr);
+ tmp += scr_to_cpu(cp->phys.data[dp_sg-1].size) + dp_ofs;
+ pm->sg.addr = cpu_to_scr(tmp);
+ pm->sg.size = cpu_to_scr(-dp_ofs);
+
+out_ok:
+ sym_set_script_dp (np, cp, dp_scr);
+ OUTL_DSP(np, SCRIPTA_BA(np, clrack));
+ return;
+
+out_reject:
+ OUTL_DSP(np, SCRIPTB_BA(np, msg_bad));
+}
+
+
+/*
+ * chip calculation of the data residual.
+ *
+ * As I used to say, the requirement of data residual
+ * in SCSI is broken, useless and cannot be achieved
+ * without huge complexity.
+ * But most OSes and even the official CAM require it.
+ * When stupidity happens to be so widely spread inside
+ * a community, it gets hard to convince.
+ *
+ * Anyway, I don't care, since I am not going to use
+ * any software that considers this data residual as
+ * a relevant information. :)
+ */
+
+int sym_compute_residual(struct sym_hcb *np, struct sym_ccb *cp)
+{
+ int dp_sg, dp_sgmin, resid = 0;
+ int dp_ofs = 0;
+
+ /*
+ * Check for some data lost or just thrown away.
+ * We are not required to be quite accurate in this
+ * situation. Btw, if we are odd for output and the
+ * device claims some more data, it may well happen
+ * than our residual be zero. :-)
+ */
+ if (cp->xerr_status & (XE_EXTRA_DATA|XE_SODL_UNRUN|XE_SWIDE_OVRUN)) {
+ if (cp->xerr_status & XE_EXTRA_DATA)
+ resid -= cp->extra_bytes;
+ if (cp->xerr_status & XE_SODL_UNRUN)
+ ++resid;
+ if (cp->xerr_status & XE_SWIDE_OVRUN)
+ --resid;
+ }
+
+ /*
+ * If all data has been transferred,
+ * there is no residual.
+ */
+ if (cp->phys.head.lastp == cp->goalp)
+ return resid;
+
+ /*
+ * If no data transfer occurs, or if the data
+ * pointer is weird, return full residual.
+ */
+ if (cp->startp == cp->phys.head.lastp ||
+ sym_evaluate_dp(np, cp, scr_to_cpu(cp->phys.head.lastp),
+ &dp_ofs) < 0) {
+ return cp->data_len - cp->odd_byte_adjustment;
+ }
+
+ /*
+ * If we were auto-sensing, then we are done.
+ */
+ if (cp->host_flags & HF_SENSE) {
+ return -dp_ofs;
+ }
+
+ /*
+ * We are now full comfortable in the computation
+ * of the data residual (2's complement).
+ */
+ dp_sgmin = SYM_CONF_MAX_SG - cp->segments;
+ resid = -cp->ext_ofs;
+ for (dp_sg = cp->ext_sg; dp_sg < SYM_CONF_MAX_SG; ++dp_sg) {
+ u_int tmp = scr_to_cpu(cp->phys.data[dp_sg].size);
+ resid += (tmp & 0xffffff);
+ }
+
+ resid -= cp->odd_byte_adjustment;
+
+ /*
+ * Hopefully, the result is not too wrong.
+ */
+ return resid;
+}
+
+/*
+ * Negotiation for WIDE and SYNCHRONOUS DATA TRANSFER.
+ *
+ * When we try to negotiate, we append the negotiation message
+ * to the identify and (maybe) simple tag message.
+ * The host status field is set to HS_NEGOTIATE to mark this
+ * situation.
+ *
+ * If the target doesn't answer this message immediately
+ * (as required by the standard), the SIR_NEGO_FAILED interrupt
+ * will be raised eventually.
+ * The handler removes the HS_NEGOTIATE status, and sets the
+ * negotiated value to the default (async / nowide).
+ *
+ * If we receive a matching answer immediately, we check it
+ * for validity, and set the values.
+ *
+ * If we receive a Reject message immediately, we assume the
+ * negotiation has failed, and fall back to standard values.
+ *
+ * If we receive a negotiation message while not in HS_NEGOTIATE
+ * state, it's a target initiated negotiation. We prepare a
+ * (hopefully) valid answer, set our parameters, and send back
+ * this answer to the target.
+ *
+ * If the target doesn't fetch the answer (no message out phase),
+ * we assume the negotiation has failed, and fall back to default
+ * settings (SIR_NEGO_PROTO interrupt).
+ *
+ * When we set the values, we adjust them in all ccbs belonging
+ * to this target, in the controller's register, and in the "phys"
+ * field of the controller's struct sym_hcb.
+ */
+
+/*
+ * chip handler for SYNCHRONOUS DATA TRANSFER REQUEST (SDTR) message.
+ */
+static int
+sym_sync_nego_check(struct sym_hcb *np, int req, struct sym_ccb *cp)
+{
+ int target = cp->target;
+ u_char chg, ofs, per, fak, div;
+
+ if (DEBUG_FLAGS & DEBUG_NEGO) {
+ sym_print_nego_msg(np, target, "sync msgin", np->msgin);
+ }
+
+ /*
+ * Get requested values.
+ */
+ chg = 0;
+ per = np->msgin[3];
+ ofs = np->msgin[4];
+
+ /*
+ * Check values against our limits.
+ */
+ if (ofs) {
+ if (ofs > np->maxoffs)
+ {chg = 1; ofs = np->maxoffs;}
+ }
+
+ if (ofs) {
+ if (per < np->minsync)
+ {chg = 1; per = np->minsync;}
+ }
+
+ /*
+ * Get new chip synchronous parameters value.
+ */
+ div = fak = 0;
+ if (ofs && sym_getsync(np, 0, per, &div, &fak) < 0)
+ goto reject_it;
+
+ if (DEBUG_FLAGS & DEBUG_NEGO) {
+ sym_print_addr(cp->cmd,
+ "sdtr: ofs=%d per=%d div=%d fak=%d chg=%d.\n",
+ ofs, per, div, fak, chg);
+ }
+
+ /*
+ * If it was an answer we want to change,
+ * then it isn't acceptable. Reject it.
+ */
+ if (!req && chg)
+ goto reject_it;
+
+ /*
+ * Apply new values.
+ */
+ sym_setsync (np, target, ofs, per, div, fak);
+
+ /*
+ * It was an answer. We are done.
+ */
+ if (!req)
+ return 0;
+
+ /*
+ * It was a request. Prepare an answer message.
+ */
+ spi_populate_sync_msg(np->msgout, per, ofs);
+
+ if (DEBUG_FLAGS & DEBUG_NEGO) {
+ sym_print_nego_msg(np, target, "sync msgout", np->msgout);
+ }
+
+ np->msgin [0] = M_NOOP;
+
+ return 0;
+
+reject_it:
+ sym_setsync (np, target, 0, 0, 0, 0);
+ return -1;
+}
+
+static void sym_sync_nego(struct sym_hcb *np, struct sym_tcb *tp, struct sym_ccb *cp)
+{
+ int req = 1;
+ int result;
+
+ /*
+ * Request or answer ?
+ */
+ if (INB(np, HS_PRT) == HS_NEGOTIATE) {
+ OUTB(np, HS_PRT, HS_BUSY);
+ if (cp->nego_status && cp->nego_status != NS_SYNC)
+ goto reject_it;
+ req = 0;
+ }
+
+ /*
+ * Check and apply new values.
+ */
+ result = sym_sync_nego_check(np, req, cp);
+ if (result) /* Not acceptable, reject it */
+ goto reject_it;
+ if (req) { /* Was a request, send response. */
+ cp->nego_status = NS_SYNC;
+ OUTL_DSP(np, SCRIPTB_BA(np, sdtr_resp));
+ }
+ else /* Was a response, we are done. */
+ OUTL_DSP(np, SCRIPTA_BA(np, clrack));
+ return;
+
+reject_it:
+ OUTL_DSP(np, SCRIPTB_BA(np, msg_bad));
+}
+
+/*
+ * chip handler for PARALLEL PROTOCOL REQUEST (PPR) message.
+ */
+static int
+sym_ppr_nego_check(struct sym_hcb *np, int req, int target)
+{
+ struct sym_tcb *tp = &np->target[target];
+ unsigned char fak, div;
+ int dt, chg = 0;
+
+ unsigned char per = np->msgin[3];
+ unsigned char ofs = np->msgin[5];
+ unsigned char wide = np->msgin[6];
+ unsigned char opts = np->msgin[7] & PPR_OPT_MASK;
+
+ if (DEBUG_FLAGS & DEBUG_NEGO) {
+ sym_print_nego_msg(np, target, "ppr msgin", np->msgin);
+ }
+
+ /*
+ * Check values against our limits.
+ */
+ if (wide > np->maxwide) {
+ chg = 1;
+ wide = np->maxwide;
+ }
+ if (!wide || !(np->features & FE_U3EN))
+ opts = 0;
+
+ if (opts != (np->msgin[7] & PPR_OPT_MASK))
+ chg = 1;
+
+ dt = opts & PPR_OPT_DT;
+
+ if (ofs) {
+ unsigned char maxoffs = dt ? np->maxoffs_dt : np->maxoffs;
+ if (ofs > maxoffs) {
+ chg = 1;
+ ofs = maxoffs;
+ }
+ }
+
+ if (ofs) {
+ unsigned char minsync = dt ? np->minsync_dt : np->minsync;
+ if (per < minsync) {
+ chg = 1;
+ per = minsync;
+ }
+ }
+
+ /*
+ * Get new chip synchronous parameters value.
+ */
+ div = fak = 0;
+ if (ofs && sym_getsync(np, dt, per, &div, &fak) < 0)
+ goto reject_it;
+
+ /*
+ * If it was an answer we want to change,
+ * then it isn't acceptable. Reject it.
+ */
+ if (!req && chg)
+ goto reject_it;
+
+ /*
+ * Apply new values.
+ */
+ sym_setpprot(np, target, opts, ofs, per, wide, div, fak);
+
+ /*
+ * It was an answer. We are done.
+ */
+ if (!req)
+ return 0;
+
+ /*
+ * It was a request. Prepare an answer message.
+ */
+ spi_populate_ppr_msg(np->msgout, per, ofs, wide, opts);
+
+ if (DEBUG_FLAGS & DEBUG_NEGO) {
+ sym_print_nego_msg(np, target, "ppr msgout", np->msgout);
+ }
+
+ np->msgin [0] = M_NOOP;
+
+ return 0;
+
+reject_it:
+ sym_setpprot (np, target, 0, 0, 0, 0, 0, 0);
+ /*
+ * If it is a device response that should result in
+ * ST, we may want to try a legacy negotiation later.
+ */
+ if (!req && !opts) {
+ tp->tgoal.period = per;
+ tp->tgoal.offset = ofs;
+ tp->tgoal.width = wide;
+ tp->tgoal.iu = tp->tgoal.dt = tp->tgoal.qas = 0;
+ tp->tgoal.check_nego = 1;
+ }
+ return -1;
+}
+
+static void sym_ppr_nego(struct sym_hcb *np, struct sym_tcb *tp, struct sym_ccb *cp)
+{
+ int req = 1;
+ int result;
+
+ /*
+ * Request or answer ?
+ */
+ if (INB(np, HS_PRT) == HS_NEGOTIATE) {
+ OUTB(np, HS_PRT, HS_BUSY);
+ if (cp->nego_status && cp->nego_status != NS_PPR)
+ goto reject_it;
+ req = 0;
+ }
+
+ /*
+ * Check and apply new values.
+ */
+ result = sym_ppr_nego_check(np, req, cp->target);
+ if (result) /* Not acceptable, reject it */
+ goto reject_it;
+ if (req) { /* Was a request, send response. */
+ cp->nego_status = NS_PPR;
+ OUTL_DSP(np, SCRIPTB_BA(np, ppr_resp));
+ }
+ else /* Was a response, we are done. */
+ OUTL_DSP(np, SCRIPTA_BA(np, clrack));
+ return;
+
+reject_it:
+ OUTL_DSP(np, SCRIPTB_BA(np, msg_bad));
+}
+
+/*
+ * chip handler for WIDE DATA TRANSFER REQUEST (WDTR) message.
+ */
+static int
+sym_wide_nego_check(struct sym_hcb *np, int req, struct sym_ccb *cp)
+{
+ int target = cp->target;
+ u_char chg, wide;
+
+ if (DEBUG_FLAGS & DEBUG_NEGO) {
+ sym_print_nego_msg(np, target, "wide msgin", np->msgin);
+ }
+
+ /*
+ * Get requested values.
+ */
+ chg = 0;
+ wide = np->msgin[3];
+
+ /*
+ * Check values against our limits.
+ */
+ if (wide > np->maxwide) {
+ chg = 1;
+ wide = np->maxwide;
+ }
+
+ if (DEBUG_FLAGS & DEBUG_NEGO) {
+ sym_print_addr(cp->cmd, "wdtr: wide=%d chg=%d.\n",
+ wide, chg);
+ }
+
+ /*
+ * If it was an answer we want to change,
+ * then it isn't acceptable. Reject it.
+ */
+ if (!req && chg)
+ goto reject_it;
+
+ /*
+ * Apply new values.
+ */
+ sym_setwide (np, target, wide);
+
+ /*
+ * It was an answer. We are done.
+ */
+ if (!req)
+ return 0;
+
+ /*
+ * It was a request. Prepare an answer message.
+ */
+ spi_populate_width_msg(np->msgout, wide);
+
+ np->msgin [0] = M_NOOP;
+
+ if (DEBUG_FLAGS & DEBUG_NEGO) {
+ sym_print_nego_msg(np, target, "wide msgout", np->msgout);
+ }
+
+ return 0;
+
+reject_it:
+ return -1;
+}
+
+static void sym_wide_nego(struct sym_hcb *np, struct sym_tcb *tp, struct sym_ccb *cp)
+{
+ int req = 1;
+ int result;
+
+ /*
+ * Request or answer ?
+ */
+ if (INB(np, HS_PRT) == HS_NEGOTIATE) {
+ OUTB(np, HS_PRT, HS_BUSY);
+ if (cp->nego_status && cp->nego_status != NS_WIDE)
+ goto reject_it;
+ req = 0;
+ }
+
+ /*
+ * Check and apply new values.
+ */
+ result = sym_wide_nego_check(np, req, cp);
+ if (result) /* Not acceptable, reject it */
+ goto reject_it;
+ if (req) { /* Was a request, send response. */
+ cp->nego_status = NS_WIDE;
+ OUTL_DSP(np, SCRIPTB_BA(np, wdtr_resp));
+ } else { /* Was a response. */
+ /*
+ * Negotiate for SYNC immediately after WIDE response.
+ * This allows to negotiate for both WIDE and SYNC on
+ * a single SCSI command (Suggested by Justin Gibbs).
+ */
+ if (tp->tgoal.offset) {
+ spi_populate_sync_msg(np->msgout, tp->tgoal.period,
+ tp->tgoal.offset);
+
+ if (DEBUG_FLAGS & DEBUG_NEGO) {
+ sym_print_nego_msg(np, cp->target,
+ "sync msgout", np->msgout);
+ }
+
+ cp->nego_status = NS_SYNC;
+ OUTB(np, HS_PRT, HS_NEGOTIATE);
+ OUTL_DSP(np, SCRIPTB_BA(np, sdtr_resp));
+ return;
+ } else
+ OUTL_DSP(np, SCRIPTA_BA(np, clrack));
+ }
+
+ return;
+
+reject_it:
+ OUTL_DSP(np, SCRIPTB_BA(np, msg_bad));
+}
+
+/*
+ * Reset DT, SYNC or WIDE to default settings.
+ *
+ * Called when a negotiation does not succeed either
+ * on rejection or on protocol error.
+ *
+ * A target that understands a PPR message should never
+ * reject it, and messing with it is very unlikely.
+ * So, if a PPR makes problems, we may just want to
+ * try a legacy negotiation later.
+ */
+static void sym_nego_default(struct sym_hcb *np, struct sym_tcb *tp, struct sym_ccb *cp)
+{
+ switch (cp->nego_status) {
+ case NS_PPR:
+#if 0
+ sym_setpprot (np, cp->target, 0, 0, 0, 0, 0, 0);
+#else
+ if (tp->tgoal.period < np->minsync)
+ tp->tgoal.period = np->minsync;
+ if (tp->tgoal.offset > np->maxoffs)
+ tp->tgoal.offset = np->maxoffs;
+ tp->tgoal.iu = tp->tgoal.dt = tp->tgoal.qas = 0;
+ tp->tgoal.check_nego = 1;
+#endif
+ break;
+ case NS_SYNC:
+ sym_setsync (np, cp->target, 0, 0, 0, 0);
+ break;
+ case NS_WIDE:
+ sym_setwide (np, cp->target, 0);
+ break;
+ }
+ np->msgin [0] = M_NOOP;
+ np->msgout[0] = M_NOOP;
+ cp->nego_status = 0;
+}
+
+/*
+ * chip handler for MESSAGE REJECT received in response to
+ * PPR, WIDE or SYNCHRONOUS negotiation.
+ */
+static void sym_nego_rejected(struct sym_hcb *np, struct sym_tcb *tp, struct sym_ccb *cp)
+{
+ sym_nego_default(np, tp, cp);
+ OUTB(np, HS_PRT, HS_BUSY);
+}
+
+/*
+ * chip exception handler for programmed interrupts.
+ */
+static void sym_int_sir(struct sym_hcb *np)
+{
+ u_char num = INB(np, nc_dsps);
+ u32 dsa = INL(np, nc_dsa);
+ struct sym_ccb *cp = sym_ccb_from_dsa(np, dsa);
+ u_char target = INB(np, nc_sdid) & 0x0f;
+ struct sym_tcb *tp = &np->target[target];
+ int tmp;
+
+ if (DEBUG_FLAGS & DEBUG_TINY) printf ("I#%d", num);
+
+ switch (num) {
+#if SYM_CONF_DMA_ADDRESSING_MODE == 2
+ /*
+ * SCRIPTS tell us that we may have to update
+ * 64 bit DMA segment registers.
+ */
+ case SIR_DMAP_DIRTY:
+ sym_update_dmap_regs(np);
+ goto out;
+#endif
+ /*
+ * Command has been completed with error condition
+ * or has been auto-sensed.
+ */
+ case SIR_COMPLETE_ERROR:
+ sym_complete_error(np, cp);
+ return;
+ /*
+ * The C code is currently trying to recover from something.
+ * Typically, user want to abort some command.
+ */
+ case SIR_SCRIPT_STOPPED:
+ case SIR_TARGET_SELECTED:
+ case SIR_ABORT_SENT:
+ sym_sir_task_recovery(np, num);
+ return;
+ /*
+ * The device didn't go to MSG OUT phase after having
+ * been selected with ATN. We do not want to handle that.
+ */
+ case SIR_SEL_ATN_NO_MSG_OUT:
+ scmd_printk(KERN_WARNING, cp->cmd,
+ "No MSG OUT phase after selection with ATN\n");
+ goto out_stuck;
+ /*
+ * The device didn't switch to MSG IN phase after
+ * having reselected the initiator.
+ */
+ case SIR_RESEL_NO_MSG_IN:
+ scmd_printk(KERN_WARNING, cp->cmd,
+ "No MSG IN phase after reselection\n");
+ goto out_stuck;
+ /*
+ * After reselection, the device sent a message that wasn't
+ * an IDENTIFY.
+ */
+ case SIR_RESEL_NO_IDENTIFY:
+ scmd_printk(KERN_WARNING, cp->cmd,
+ "No IDENTIFY after reselection\n");
+ goto out_stuck;
+ /*
+ * The device reselected a LUN we do not know about.
+ */
+ case SIR_RESEL_BAD_LUN:
+ np->msgout[0] = M_RESET;
+ goto out;
+ /*
+ * The device reselected for an untagged nexus and we
+ * haven't any.
+ */
+ case SIR_RESEL_BAD_I_T_L:
+ np->msgout[0] = M_ABORT;
+ goto out;
+ /*
+ * The device reselected for a tagged nexus that we do not have.
+ */
+ case SIR_RESEL_BAD_I_T_L_Q:
+ np->msgout[0] = M_ABORT_TAG;
+ goto out;
+ /*
+ * The SCRIPTS let us know that the device has grabbed
+ * our message and will abort the job.
+ */
+ case SIR_RESEL_ABORTED:
+ np->lastmsg = np->msgout[0];
+ np->msgout[0] = M_NOOP;
+ scmd_printk(KERN_WARNING, cp->cmd,
+ "message %x sent on bad reselection\n", np->lastmsg);
+ goto out;
+ /*
+ * The SCRIPTS let us know that a message has been
+ * successfully sent to the device.
+ */
+ case SIR_MSG_OUT_DONE:
+ np->lastmsg = np->msgout[0];
+ np->msgout[0] = M_NOOP;
+ /* Should we really care of that */
+ if (np->lastmsg == M_PARITY || np->lastmsg == M_ID_ERROR) {
+ if (cp) {
+ cp->xerr_status &= ~XE_PARITY_ERR;
+ if (!cp->xerr_status)
+ OUTOFFB(np, HF_PRT, HF_EXT_ERR);
+ }
+ }
+ goto out;
+ /*
+ * The device didn't send a GOOD SCSI status.
+ * We may have some work to do prior to allow
+ * the SCRIPTS processor to continue.
+ */
+ case SIR_BAD_SCSI_STATUS:
+ if (!cp)
+ goto out;
+ sym_sir_bad_scsi_status(np, num, cp);
+ return;
+ /*
+ * We are asked by the SCRIPTS to prepare a
+ * REJECT message.
+ */
+ case SIR_REJECT_TO_SEND:
+ sym_print_msg(cp, "M_REJECT to send for ", np->msgin);
+ np->msgout[0] = M_REJECT;
+ goto out;
+ /*
+ * We have been ODD at the end of a DATA IN
+ * transfer and the device didn't send a
+ * IGNORE WIDE RESIDUE message.
+ * It is a data overrun condition.
+ */
+ case SIR_SWIDE_OVERRUN:
+ if (cp) {
+ OUTONB(np, HF_PRT, HF_EXT_ERR);
+ cp->xerr_status |= XE_SWIDE_OVRUN;
+ }
+ goto out;
+ /*
+ * We have been ODD at the end of a DATA OUT
+ * transfer.
+ * It is a data underrun condition.
+ */
+ case SIR_SODL_UNDERRUN:
+ if (cp) {
+ OUTONB(np, HF_PRT, HF_EXT_ERR);
+ cp->xerr_status |= XE_SODL_UNRUN;
+ }
+ goto out;
+ /*
+ * The device wants us to tranfer more data than
+ * expected or in the wrong direction.
+ * The number of extra bytes is in scratcha.
+ * It is a data overrun condition.
+ */
+ case SIR_DATA_OVERRUN:
+ if (cp) {
+ OUTONB(np, HF_PRT, HF_EXT_ERR);
+ cp->xerr_status |= XE_EXTRA_DATA;
+ cp->extra_bytes += INL(np, nc_scratcha);
+ }
+ goto out;
+ /*
+ * The device switched to an illegal phase (4/5).
+ */
+ case SIR_BAD_PHASE:
+ if (cp) {
+ OUTONB(np, HF_PRT, HF_EXT_ERR);
+ cp->xerr_status |= XE_BAD_PHASE;
+ }
+ goto out;
+ /*
+ * We received a message.
+ */
+ case SIR_MSG_RECEIVED:
+ if (!cp)
+ goto out_stuck;
+ switch (np->msgin [0]) {
+ /*
+ * We received an extended message.
+ * We handle MODIFY DATA POINTER, SDTR, WDTR
+ * and reject all other extended messages.
+ */
+ case M_EXTENDED:
+ switch (np->msgin [2]) {
+ case M_X_MODIFY_DP:
+ if (DEBUG_FLAGS & DEBUG_POINTER)
+ sym_print_msg(cp, "extended msg ",
+ np->msgin);
+ tmp = (np->msgin[3]<<24) + (np->msgin[4]<<16) +
+ (np->msgin[5]<<8) + (np->msgin[6]);
+ sym_modify_dp(np, tp, cp, tmp);
+ return;
+ case M_X_SYNC_REQ:
+ sym_sync_nego(np, tp, cp);
+ return;
+ case M_X_PPR_REQ:
+ sym_ppr_nego(np, tp, cp);
+ return;
+ case M_X_WIDE_REQ:
+ sym_wide_nego(np, tp, cp);
+ return;
+ default:
+ goto out_reject;
+ }
+ break;
+ /*
+ * We received a 1/2 byte message not handled from SCRIPTS.
+ * We are only expecting MESSAGE REJECT and IGNORE WIDE
+ * RESIDUE messages that haven't been anticipated by
+ * SCRIPTS on SWIDE full condition. Unanticipated IGNORE
+ * WIDE RESIDUE messages are aliased as MODIFY DP (-1).
+ */
+ case M_IGN_RESIDUE:
+ if (DEBUG_FLAGS & DEBUG_POINTER)
+ sym_print_msg(cp, "1 or 2 byte ", np->msgin);
+ if (cp->host_flags & HF_SENSE)
+ OUTL_DSP(np, SCRIPTA_BA(np, clrack));
+ else
+ sym_modify_dp(np, tp, cp, -1);
+ return;
+ case M_REJECT:
+ if (INB(np, HS_PRT) == HS_NEGOTIATE)
+ sym_nego_rejected(np, tp, cp);
+ else {
+ sym_print_addr(cp->cmd,
+ "M_REJECT received (%x:%x).\n",
+ scr_to_cpu(np->lastmsg), np->msgout[0]);
+ }
+ goto out_clrack;
+ break;
+ default:
+ goto out_reject;
+ }
+ break;
+ /*
+ * We received an unknown message.
+ * Ignore all MSG IN phases and reject it.
+ */
+ case SIR_MSG_WEIRD:
+ sym_print_msg(cp, "WEIRD message received", np->msgin);
+ OUTL_DSP(np, SCRIPTB_BA(np, msg_weird));
+ return;
+ /*
+ * Negotiation failed.
+ * Target does not send us the reply.
+ * Remove the HS_NEGOTIATE status.
+ */
+ case SIR_NEGO_FAILED:
+ OUTB(np, HS_PRT, HS_BUSY);
+ /*
+ * Negotiation failed.
+ * Target does not want answer message.
+ */
+ case SIR_NEGO_PROTO:
+ sym_nego_default(np, tp, cp);
+ goto out;
+ }
+
+out:
+ OUTONB_STD();
+ return;
+out_reject:
+ OUTL_DSP(np, SCRIPTB_BA(np, msg_bad));
+ return;
+out_clrack:
+ OUTL_DSP(np, SCRIPTA_BA(np, clrack));
+ return;
+out_stuck:
+ return;
+}
+
+/*
+ * Acquire a control block
+ */
+struct sym_ccb *sym_get_ccb (struct sym_hcb *np, struct scsi_cmnd *cmd, u_char tag_order)
+{
+ u_char tn = cmd->device->id;
+ u_char ln = cmd->device->lun;
+ struct sym_tcb *tp = &np->target[tn];
+ struct sym_lcb *lp = sym_lp(tp, ln);
+ u_short tag = NO_TAG;
+ SYM_QUEHEAD *qp;
+ struct sym_ccb *cp = NULL;
+
+ /*
+ * Look for a free CCB
+ */
+ if (sym_que_empty(&np->free_ccbq))
+ sym_alloc_ccb(np);
+ qp = sym_remque_head(&np->free_ccbq);
+ if (!qp)
+ goto out;
+ cp = sym_que_entry(qp, struct sym_ccb, link_ccbq);
+
+ {
+ /*
+ * If we have been asked for a tagged command.
+ */
+ if (tag_order) {
+ /*
+ * Debugging purpose.
+ */
+#ifndef SYM_OPT_HANDLE_DEVICE_QUEUEING
+ if (lp->busy_itl != 0)
+ goto out_free;
+#endif
+ /*
+ * Allocate resources for tags if not yet.
+ */
+ if (!lp->cb_tags) {
+ sym_alloc_lcb_tags(np, tn, ln);
+ if (!lp->cb_tags)
+ goto out_free;
+ }
+ /*
+ * Get a tag for this SCSI IO and set up
+ * the CCB bus address for reselection,
+ * and count it for this LUN.
+ * Toggle reselect path to tagged.
+ */
+ if (lp->busy_itlq < SYM_CONF_MAX_TASK) {
+ tag = lp->cb_tags[lp->ia_tag];
+ if (++lp->ia_tag == SYM_CONF_MAX_TASK)
+ lp->ia_tag = 0;
+ ++lp->busy_itlq;
+#ifndef SYM_OPT_HANDLE_DEVICE_QUEUEING
+ lp->itlq_tbl[tag] = cpu_to_scr(cp->ccb_ba);
+ lp->head.resel_sa =
+ cpu_to_scr(SCRIPTA_BA(np, resel_tag));
+#endif
+#ifdef SYM_OPT_LIMIT_COMMAND_REORDERING
+ cp->tags_si = lp->tags_si;
+ ++lp->tags_sum[cp->tags_si];
+ ++lp->tags_since;
+#endif
+ }
+ else
+ goto out_free;
+ }
+ /*
+ * This command will not be tagged.
+ * If we already have either a tagged or untagged
+ * one, refuse to overlap this untagged one.
+ */
+ else {
+ /*
+ * Debugging purpose.
+ */
+#ifndef SYM_OPT_HANDLE_DEVICE_QUEUEING
+ if (lp->busy_itl != 0 || lp->busy_itlq != 0)
+ goto out_free;
+#endif
+ /*
+ * Count this nexus for this LUN.
+ * Set up the CCB bus address for reselection.
+ * Toggle reselect path to untagged.
+ */
+ ++lp->busy_itl;
+#ifndef SYM_OPT_HANDLE_DEVICE_QUEUEING
+ if (lp->busy_itl == 1) {
+ lp->head.itl_task_sa = cpu_to_scr(cp->ccb_ba);
+ lp->head.resel_sa =
+ cpu_to_scr(SCRIPTA_BA(np, resel_no_tag));
+ }
+ else
+ goto out_free;
+#endif
+ }
+ }
+ /*
+ * Put the CCB into the busy queue.
+ */
+ sym_insque_tail(&cp->link_ccbq, &np->busy_ccbq);
+#ifdef SYM_OPT_HANDLE_DEVICE_QUEUEING
+ if (lp) {
+ sym_remque(&cp->link2_ccbq);
+ sym_insque_tail(&cp->link2_ccbq, &lp->waiting_ccbq);
+ }
+
+#endif
+ cp->to_abort = 0;
+ cp->odd_byte_adjustment = 0;
+ cp->tag = tag;
+ cp->order = tag_order;
+ cp->target = tn;
+ cp->lun = ln;
+
+ if (DEBUG_FLAGS & DEBUG_TAGS) {
+ sym_print_addr(cmd, "ccb @%p using tag %d.\n", cp, tag);
+ }
+
+out:
+ return cp;
+out_free:
+ sym_insque_head(&cp->link_ccbq, &np->free_ccbq);
+ return NULL;
+}
+
+/*
+ * Release one control block
+ */
+void sym_free_ccb (struct sym_hcb *np, struct sym_ccb *cp)
+{
+ struct sym_tcb *tp = &np->target[cp->target];
+ struct sym_lcb *lp = sym_lp(tp, cp->lun);
+
+ if (DEBUG_FLAGS & DEBUG_TAGS) {
+ sym_print_addr(cp->cmd, "ccb @%p freeing tag %d.\n",
+ cp, cp->tag);
+ }
+
+ /*
+ * If LCB available,
+ */
+ if (lp) {
+ /*
+ * If tagged, release the tag, set the relect path
+ */
+ if (cp->tag != NO_TAG) {
+#ifdef SYM_OPT_LIMIT_COMMAND_REORDERING
+ --lp->tags_sum[cp->tags_si];
+#endif
+ /*
+ * Free the tag value.
+ */
+ lp->cb_tags[lp->if_tag] = cp->tag;
+ if (++lp->if_tag == SYM_CONF_MAX_TASK)
+ lp->if_tag = 0;
+ /*
+ * Make the reselect path invalid,
+ * and uncount this CCB.
+ */
+ lp->itlq_tbl[cp->tag] = cpu_to_scr(np->bad_itlq_ba);
+ --lp->busy_itlq;
+ } else { /* Untagged */
+ /*
+ * Make the reselect path invalid,
+ * and uncount this CCB.
+ */
+ lp->head.itl_task_sa = cpu_to_scr(np->bad_itl_ba);
+ --lp->busy_itl;
+ }
+ /*
+ * If no JOB active, make the LUN reselect path invalid.
+ */
+ if (lp->busy_itlq == 0 && lp->busy_itl == 0)
+ lp->head.resel_sa =
+ cpu_to_scr(SCRIPTB_BA(np, resel_bad_lun));
+ }
+
+ /*
+ * We donnot queue more than 1 ccb per target
+ * with negotiation at any time. If this ccb was
+ * used for negotiation, clear this info in the tcb.
+ */
+ if (cp == tp->nego_cp)
+ tp->nego_cp = NULL;
+
+#ifdef SYM_CONF_IARB_SUPPORT
+ /*
+ * If we just complete the last queued CCB,
+ * clear this info that is no longer relevant.
+ */
+ if (cp == np->last_cp)
+ np->last_cp = 0;
+#endif
+
+ /*
+ * Make this CCB available.
+ */
+ cp->cmd = NULL;
+ cp->host_status = HS_IDLE;
+ sym_remque(&cp->link_ccbq);
+ sym_insque_head(&cp->link_ccbq, &np->free_ccbq);
+
+#ifdef SYM_OPT_HANDLE_DEVICE_QUEUEING
+ if (lp) {
+ sym_remque(&cp->link2_ccbq);
+ sym_insque_tail(&cp->link2_ccbq, &np->dummy_ccbq);
+ if (cp->started) {
+ if (cp->tag != NO_TAG)
+ --lp->started_tags;
+ else
+ --lp->started_no_tag;
+ }
+ }
+ cp->started = 0;
+#endif
+}
+
+/*
+ * Allocate a CCB from memory and initialize its fixed part.
+ */
+static struct sym_ccb *sym_alloc_ccb(struct sym_hcb *np)
+{
+ struct sym_ccb *cp = NULL;
+ int hcode;
+
+ /*
+ * Prevent from allocating more CCBs than we can
+ * queue to the controller.
+ */
+ if (np->actccbs >= SYM_CONF_MAX_START)
+ return NULL;
+
+ /*
+ * Allocate memory for this CCB.
+ */
+ cp = sym_calloc_dma(sizeof(struct sym_ccb), "CCB");
+ if (!cp)
+ goto out_free;
+
+ /*
+ * Count it.
+ */
+ np->actccbs++;
+
+ /*
+ * Compute the bus address of this ccb.
+ */
+ cp->ccb_ba = vtobus(cp);
+
+ /*
+ * Insert this ccb into the hashed list.
+ */
+ hcode = CCB_HASH_CODE(cp->ccb_ba);
+ cp->link_ccbh = np->ccbh[hcode];
+ np->ccbh[hcode] = cp;
+
+ /*
+ * Initialyze the start and restart actions.
+ */
+ cp->phys.head.go.start = cpu_to_scr(SCRIPTA_BA(np, idle));
+ cp->phys.head.go.restart = cpu_to_scr(SCRIPTB_BA(np, bad_i_t_l));
+
+ /*
+ * Initilialyze some other fields.
+ */
+ cp->phys.smsg_ext.addr = cpu_to_scr(HCB_BA(np, msgin[2]));
+
+ /*
+ * Chain into free ccb queue.
+ */
+ sym_insque_head(&cp->link_ccbq, &np->free_ccbq);
+
+ /*
+ * Chain into optionnal lists.
+ */
+#ifdef SYM_OPT_HANDLE_DEVICE_QUEUEING
+ sym_insque_head(&cp->link2_ccbq, &np->dummy_ccbq);
+#endif
+ return cp;
+out_free:
+ if (cp)
+ sym_mfree_dma(cp, sizeof(*cp), "CCB");
+ return NULL;
+}
+
+/*
+ * Look up a CCB from a DSA value.
+ */
+static struct sym_ccb *sym_ccb_from_dsa(struct sym_hcb *np, u32 dsa)
+{
+ int hcode;
+ struct sym_ccb *cp;
+
+ hcode = CCB_HASH_CODE(dsa);
+ cp = np->ccbh[hcode];
+ while (cp) {
+ if (cp->ccb_ba == dsa)
+ break;
+ cp = cp->link_ccbh;
+ }
+
+ return cp;
+}
+
+/*
+ * Target control block initialisation.
+ * Nothing important to do at the moment.
+ */
+static void sym_init_tcb (struct sym_hcb *np, u_char tn)
+{
+#if 0 /* Hmmm... this checking looks paranoid. */
+ /*
+ * Check some alignments required by the chip.
+ */
+ assert (((offsetof(struct sym_reg, nc_sxfer) ^
+ offsetof(struct sym_tcb, head.sval)) &3) == 0);
+ assert (((offsetof(struct sym_reg, nc_scntl3) ^
+ offsetof(struct sym_tcb, head.wval)) &3) == 0);
+#endif
+}
+
+/*
+ * Lun control block allocation and initialization.
+ */
+struct sym_lcb *sym_alloc_lcb (struct sym_hcb *np, u_char tn, u_char ln)
+{
+ struct sym_tcb *tp = &np->target[tn];
+ struct sym_lcb *lp = NULL;
+
+ /*
+ * Initialize the target control block if not yet.
+ */
+ sym_init_tcb (np, tn);
+
+ /*
+ * Allocate the LCB bus address array.
+ * Compute the bus address of this table.
+ */
+ if (ln && !tp->luntbl) {
+ int i;
+
+ tp->luntbl = sym_calloc_dma(256, "LUNTBL");
+ if (!tp->luntbl)
+ goto fail;
+ for (i = 0 ; i < 64 ; i++)
+ tp->luntbl[i] = cpu_to_scr(vtobus(&np->badlun_sa));
+ tp->head.luntbl_sa = cpu_to_scr(vtobus(tp->luntbl));
+ }
+
+ /*
+ * Allocate the table of pointers for LUN(s) > 0, if needed.
+ */
+ if (ln && !tp->lunmp) {
+ tp->lunmp = kcalloc(SYM_CONF_MAX_LUN, sizeof(struct sym_lcb *),
+ GFP_ATOMIC);
+ if (!tp->lunmp)
+ goto fail;
+ }
+
+ /*
+ * Allocate the lcb.
+ * Make it available to the chip.
+ */
+ lp = sym_calloc_dma(sizeof(struct sym_lcb), "LCB");
+ if (!lp)
+ goto fail;
+ if (ln) {
+ tp->lunmp[ln] = lp;
+ tp->luntbl[ln] = cpu_to_scr(vtobus(lp));
+ }
+ else {
+ tp->lun0p = lp;
+ tp->head.lun0_sa = cpu_to_scr(vtobus(lp));
+ }
+ tp->nlcb++;
+
+ /*
+ * Let the itl task point to error handling.
+ */
+ lp->head.itl_task_sa = cpu_to_scr(np->bad_itl_ba);
+
+ /*
+ * Set the reselect pattern to our default. :)
+ */
+ lp->head.resel_sa = cpu_to_scr(SCRIPTB_BA(np, resel_bad_lun));
+
+ /*
+ * Set user capabilities.
+ */
+ lp->user_flags = tp->usrflags & (SYM_DISC_ENABLED | SYM_TAGS_ENABLED);
+
+#ifdef SYM_OPT_HANDLE_DEVICE_QUEUEING
+ /*
+ * Initialize device queueing.
+ */
+ sym_que_init(&lp->waiting_ccbq);
+ sym_que_init(&lp->started_ccbq);
+ lp->started_max = SYM_CONF_MAX_TASK;
+ lp->started_limit = SYM_CONF_MAX_TASK;
+#endif
+
+fail:
+ return lp;
+}
+
+/*
+ * Allocate LCB resources for tagged command queuing.
+ */
+static void sym_alloc_lcb_tags (struct sym_hcb *np, u_char tn, u_char ln)
+{
+ struct sym_tcb *tp = &np->target[tn];
+ struct sym_lcb *lp = sym_lp(tp, ln);
+ int i;
+
+ /*
+ * Allocate the task table and and the tag allocation
+ * circular buffer. We want both or none.
+ */
+ lp->itlq_tbl = sym_calloc_dma(SYM_CONF_MAX_TASK*4, "ITLQ_TBL");
+ if (!lp->itlq_tbl)
+ goto fail;
+ lp->cb_tags = kcalloc(SYM_CONF_MAX_TASK, 1, GFP_ATOMIC);
+ if (!lp->cb_tags) {
+ sym_mfree_dma(lp->itlq_tbl, SYM_CONF_MAX_TASK*4, "ITLQ_TBL");
+ lp->itlq_tbl = NULL;
+ goto fail;
+ }
+
+ /*
+ * Initialize the task table with invalid entries.
+ */
+ for (i = 0 ; i < SYM_CONF_MAX_TASK ; i++)
+ lp->itlq_tbl[i] = cpu_to_scr(np->notask_ba);
+
+ /*
+ * Fill up the tag buffer with tag numbers.
+ */
+ for (i = 0 ; i < SYM_CONF_MAX_TASK ; i++)
+ lp->cb_tags[i] = i;
+
+ /*
+ * Make the task table available to SCRIPTS,
+ * And accept tagged commands now.
+ */
+ lp->head.itlq_tbl_sa = cpu_to_scr(vtobus(lp->itlq_tbl));
+
+ return;
+fail:
+ return;
+}
+
+/*
+ * Lun control block deallocation. Returns the number of valid remaining LCBs
+ * for the target.
+ */
+int sym_free_lcb(struct sym_hcb *np, u_char tn, u_char ln)
+{
+ struct sym_tcb *tp = &np->target[tn];
+ struct sym_lcb *lp = sym_lp(tp, ln);
+
+ tp->nlcb--;
+
+ if (ln) {
+ if (!tp->nlcb) {
+ kfree(tp->lunmp);
+ sym_mfree_dma(tp->luntbl, 256, "LUNTBL");
+ tp->lunmp = NULL;
+ tp->luntbl = NULL;
+ tp->head.luntbl_sa = cpu_to_scr(vtobus(np->badluntbl));
+ } else {
+ tp->luntbl[ln] = cpu_to_scr(vtobus(&np->badlun_sa));
+ tp->lunmp[ln] = NULL;
+ }
+ } else {
+ tp->lun0p = NULL;
+ tp->head.lun0_sa = cpu_to_scr(vtobus(&np->badlun_sa));
+ }
+
+ if (lp->itlq_tbl) {
+ sym_mfree_dma(lp->itlq_tbl, SYM_CONF_MAX_TASK*4, "ITLQ_TBL");
+ kfree(lp->cb_tags);
+ }
+
+ sym_mfree_dma(lp, sizeof(*lp), "LCB");
+
+ return tp->nlcb;
+}
+
+/*
+ * Queue a SCSI IO to the controller.
+ */
+int sym_queue_scsiio(struct sym_hcb *np, struct scsi_cmnd *cmd, struct sym_ccb *cp)
+{
+ struct scsi_device *sdev = cmd->device;
+ struct sym_tcb *tp;
+ struct sym_lcb *lp;
+ u_char *msgptr;
+ u_int msglen;
+ int can_disconnect;
+
+ /*
+ * Keep track of the IO in our CCB.
+ */
+ cp->cmd = cmd;
+
+ /*
+ * Retrieve the target descriptor.
+ */
+ tp = &np->target[cp->target];
+
+ /*
+ * Retrieve the lun descriptor.
+ */
+ lp = sym_lp(tp, sdev->lun);
+
+ can_disconnect = (cp->tag != NO_TAG) ||
+ (lp && (lp->curr_flags & SYM_DISC_ENABLED));
+
+ msgptr = cp->scsi_smsg;
+ msglen = 0;
+ msgptr[msglen++] = IDENTIFY(can_disconnect, sdev->lun);
+
+ /*
+ * Build the tag message if present.
+ */
+ if (cp->tag != NO_TAG) {
+ u_char order = cp->order;
+
+ switch(order) {
+ case M_ORDERED_TAG:
+ break;
+ case M_HEAD_TAG:
+ break;
+ default:
+ order = M_SIMPLE_TAG;
+ }
+#ifdef SYM_OPT_LIMIT_COMMAND_REORDERING
+ /*
+ * Avoid too much reordering of SCSI commands.
+ * The algorithm tries to prevent completion of any
+ * tagged command from being delayed against more
+ * than 3 times the max number of queued commands.
+ */
+ if (lp && lp->tags_since > 3*SYM_CONF_MAX_TAG) {
+ lp->tags_si = !(lp->tags_si);
+ if (lp->tags_sum[lp->tags_si]) {
+ order = M_ORDERED_TAG;
+ if ((DEBUG_FLAGS & DEBUG_TAGS)||sym_verbose>1) {
+ sym_print_addr(cmd,
+ "ordered tag forced.\n");
+ }
+ }
+ lp->tags_since = 0;
+ }
+#endif
+ msgptr[msglen++] = order;
+
+ /*
+ * For less than 128 tags, actual tags are numbered
+ * 1,3,5,..2*MAXTAGS+1,since we may have to deal
+ * with devices that have problems with #TAG 0 or too
+ * great #TAG numbers. For more tags (up to 256),
+ * we use directly our tag number.
+ */
+#if SYM_CONF_MAX_TASK > (512/4)
+ msgptr[msglen++] = cp->tag;
+#else
+ msgptr[msglen++] = (cp->tag << 1) + 1;
+#endif
+ }
+
+ /*
+ * Build a negotiation message if needed.
+ * (nego_status is filled by sym_prepare_nego())
+ *
+ * Always negotiate on INQUIRY and REQUEST SENSE.
+ *
+ */
+ cp->nego_status = 0;
+ if ((tp->tgoal.check_nego ||
+ cmd->cmnd[0] == INQUIRY || cmd->cmnd[0] == REQUEST_SENSE) &&
+ !tp->nego_cp && lp) {
+ msglen += sym_prepare_nego(np, cp, msgptr + msglen);
+ }
+
+ /*
+ * Startqueue
+ */
+ cp->phys.head.go.start = cpu_to_scr(SCRIPTA_BA(np, select));
+ cp->phys.head.go.restart = cpu_to_scr(SCRIPTA_BA(np, resel_dsa));
+
+ /*
+ * select
+ */
+ cp->phys.select.sel_id = cp->target;
+ cp->phys.select.sel_scntl3 = tp->head.wval;
+ cp->phys.select.sel_sxfer = tp->head.sval;
+ cp->phys.select.sel_scntl4 = tp->head.uval;
+
+ /*
+ * message
+ */
+ cp->phys.smsg.addr = CCB_BA(cp, scsi_smsg);
+ cp->phys.smsg.size = cpu_to_scr(msglen);
+
+ /*
+ * status
+ */
+ cp->host_xflags = 0;
+ cp->host_status = cp->nego_status ? HS_NEGOTIATE : HS_BUSY;
+ cp->ssss_status = S_ILLEGAL;
+ cp->xerr_status = 0;
+ cp->host_flags = 0;
+ cp->extra_bytes = 0;
+
+ /*
+ * extreme data pointer.
+ * shall be positive, so -1 is lower than lowest.:)
+ */
+ cp->ext_sg = -1;
+ cp->ext_ofs = 0;
+
+ /*
+ * Build the CDB and DATA descriptor block
+ * and start the IO.
+ */
+ return sym_setup_data_and_start(np, cmd, cp);
+}
+
+/*
+ * Reset a SCSI target (all LUNs of this target).
+ */
+int sym_reset_scsi_target(struct sym_hcb *np, int target)
+{
+ struct sym_tcb *tp;
+
+ if (target == np->myaddr || (u_int)target >= SYM_CONF_MAX_TARGET)
+ return -1;
+
+ tp = &np->target[target];
+ tp->to_reset = 1;
+
+ np->istat_sem = SEM;
+ OUTB(np, nc_istat, SIGP|SEM);
+
+ return 0;
+}
+
+/*
+ * Abort a SCSI IO.
+ */
+static int sym_abort_ccb(struct sym_hcb *np, struct sym_ccb *cp, int timed_out)
+{
+ /*
+ * Check that the IO is active.
+ */
+ if (!cp || !cp->host_status || cp->host_status == HS_WAIT)
+ return -1;
+
+ /*
+ * If a previous abort didn't succeed in time,
+ * perform a BUS reset.
+ */
+ if (cp->to_abort) {
+ sym_reset_scsi_bus(np, 1);
+ return 0;
+ }
+
+ /*
+ * Mark the CCB for abort and allow time for.
+ */
+ cp->to_abort = timed_out ? 2 : 1;
+
+ /*
+ * Tell the SCRIPTS processor to stop and synchronize with us.
+ */
+ np->istat_sem = SEM;
+ OUTB(np, nc_istat, SIGP|SEM);
+ return 0;
+}
+
+int sym_abort_scsiio(struct sym_hcb *np, struct scsi_cmnd *cmd, int timed_out)
+{
+ struct sym_ccb *cp;
+ SYM_QUEHEAD *qp;
+
+ /*
+ * Look up our CCB control block.
+ */
+ cp = NULL;
+ FOR_EACH_QUEUED_ELEMENT(&np->busy_ccbq, qp) {
+ struct sym_ccb *cp2 = sym_que_entry(qp, struct sym_ccb, link_ccbq);
+ if (cp2->cmd == cmd) {
+ cp = cp2;
+ break;
+ }
+ }
+
+ return sym_abort_ccb(np, cp, timed_out);
+}
+
+/*
+ * Complete execution of a SCSI command with extended
+ * error, SCSI status error, or having been auto-sensed.
+ *
+ * The SCRIPTS processor is not running there, so we
+ * can safely access IO registers and remove JOBs from
+ * the START queue.
+ * SCRATCHA is assumed to have been loaded with STARTPOS
+ * before the SCRIPTS called the C code.
+ */
+void sym_complete_error(struct sym_hcb *np, struct sym_ccb *cp)
+{
+ struct scsi_device *sdev;
+ struct scsi_cmnd *cmd;
+ struct sym_tcb *tp;
+ struct sym_lcb *lp;
+ int resid;
+ int i;
+
+ /*
+ * Paranoid check. :)
+ */
+ if (!cp || !cp->cmd)
+ return;
+
+ cmd = cp->cmd;
+ sdev = cmd->device;
+ if (DEBUG_FLAGS & (DEBUG_TINY|DEBUG_RESULT)) {
+ dev_info(&sdev->sdev_gendev, "CCB=%p STAT=%x/%x/%x\n", cp,
+ cp->host_status, cp->ssss_status, cp->host_flags);
+ }
+
+ /*
+ * Get target and lun pointers.
+ */
+ tp = &np->target[cp->target];
+ lp = sym_lp(tp, sdev->lun);
+
+ /*
+ * Check for extended errors.
+ */
+ if (cp->xerr_status) {
+ if (sym_verbose)
+ sym_print_xerr(cmd, cp->xerr_status);
+ if (cp->host_status == HS_COMPLETE)
+ cp->host_status = HS_COMP_ERR;
+ }
+
+ /*
+ * Calculate the residual.
+ */
+ resid = sym_compute_residual(np, cp);
+
+ if (!SYM_SETUP_RESIDUAL_SUPPORT) {/* If user does not want residuals */
+ resid = 0; /* throw them away. :) */
+ cp->sv_resid = 0;
+ }
+#ifdef DEBUG_2_0_X
+if (resid)
+ printf("XXXX RESID= %d - 0x%x\n", resid, resid);
+#endif
+
+ /*
+ * Dequeue all queued CCBs for that device
+ * not yet started by SCRIPTS.
+ */
+ i = (INL(np, nc_scratcha) - np->squeue_ba) / 4;
+ i = sym_dequeue_from_squeue(np, i, cp->target, sdev->lun, -1);
+
+ /*
+ * Restart the SCRIPTS processor.
+ */
+ OUTL_DSP(np, SCRIPTA_BA(np, start));
+
+#ifdef SYM_OPT_HANDLE_DEVICE_QUEUEING
+ if (cp->host_status == HS_COMPLETE &&
+ cp->ssss_status == S_QUEUE_FULL) {
+ if (!lp || lp->started_tags - i < 2)
+ goto weirdness;
+ /*
+ * Decrease queue depth as needed.
+ */
+ lp->started_max = lp->started_tags - i - 1;
+ lp->num_sgood = 0;
+
+ if (sym_verbose >= 2) {
+ sym_print_addr(cmd, " queue depth is now %d\n",
+ lp->started_max);
+ }
+
+ /*
+ * Repair the CCB.
+ */
+ cp->host_status = HS_BUSY;
+ cp->ssss_status = S_ILLEGAL;
+
+ /*
+ * Let's requeue it to device.
+ */
+ sym_set_cam_status(cmd, DID_SOFT_ERROR);
+ goto finish;
+ }
+weirdness:
+#endif
+ /*
+ * Build result in CAM ccb.
+ */
+ sym_set_cam_result_error(np, cp, resid);
+
+#ifdef SYM_OPT_HANDLE_DEVICE_QUEUEING
+finish:
+#endif
+ /*
+ * Add this one to the COMP queue.
+ */
+ sym_remque(&cp->link_ccbq);
+ sym_insque_head(&cp->link_ccbq, &np->comp_ccbq);
+
+ /*
+ * Complete all those commands with either error
+ * or requeue condition.
+ */
+ sym_flush_comp_queue(np, 0);
+
+#ifdef SYM_OPT_HANDLE_DEVICE_QUEUEING
+ /*
+ * Donnot start more than 1 command after an error.
+ */
+ sym_start_next_ccbs(np, lp, 1);
+#endif
+}
+
+/*
+ * Complete execution of a successful SCSI command.
+ *
+ * Only successful commands go to the DONE queue,
+ * since we need to have the SCRIPTS processor
+ * stopped on any error condition.
+ * The SCRIPTS processor is running while we are
+ * completing successful commands.
+ */
+void sym_complete_ok (struct sym_hcb *np, struct sym_ccb *cp)
+{
+ struct sym_tcb *tp;
+ struct sym_lcb *lp;
+ struct scsi_cmnd *cmd;
+ int resid;
+
+ /*
+ * Paranoid check. :)
+ */
+ if (!cp || !cp->cmd)
+ return;
+ assert (cp->host_status == HS_COMPLETE);
+
+ /*
+ * Get user command.
+ */
+ cmd = cp->cmd;
+
+ /*
+ * Get target and lun pointers.
+ */
+ tp = &np->target[cp->target];
+ lp = sym_lp(tp, cp->lun);
+
+ /*
+ * If all data have been transferred, given than no
+ * extended error did occur, there is no residual.
+ */
+ resid = 0;
+ if (cp->phys.head.lastp != cp->goalp)
+ resid = sym_compute_residual(np, cp);
+
+ /*
+ * Wrong transfer residuals may be worse than just always
+ * returning zero. User can disable this feature in
+ * sym53c8xx.h. Residual support is enabled by default.
+ */
+ if (!SYM_SETUP_RESIDUAL_SUPPORT)
+ resid = 0;
+#ifdef DEBUG_2_0_X
+if (resid)
+ printf("XXXX RESID= %d - 0x%x\n", resid, resid);
+#endif
+
+ /*
+ * Build result in CAM ccb.
+ */
+ sym_set_cam_result_ok(cp, cmd, resid);
+
+#ifdef SYM_OPT_HANDLE_DEVICE_QUEUEING
+ /*
+ * If max number of started ccbs had been reduced,
+ * increase it if 200 good status received.
+ */
+ if (lp && lp->started_max < lp->started_limit) {
+ ++lp->num_sgood;
+ if (lp->num_sgood >= 200) {
+ lp->num_sgood = 0;
+ ++lp->started_max;
+ if (sym_verbose >= 2) {
+ sym_print_addr(cmd, " queue depth is now %d\n",
+ lp->started_max);
+ }
+ }
+ }
+#endif
+
+ /*
+ * Free our CCB.
+ */
+ sym_free_ccb (np, cp);
+
+#ifdef SYM_OPT_HANDLE_DEVICE_QUEUEING
+ /*
+ * Requeue a couple of awaiting scsi commands.
+ */
+ if (!sym_que_empty(&lp->waiting_ccbq))
+ sym_start_next_ccbs(np, lp, 2);
+#endif
+ /*
+ * Complete the command.
+ */
+ sym_xpt_done(np, cmd);
+}
+
+/*
+ * Soft-attach the controller.
+ */
+int sym_hcb_attach(struct Scsi_Host *shost, struct sym_fw *fw, struct sym_nvram *nvram)
+{
+ struct sym_hcb *np = sym_get_hcb(shost);
+ int i;
+
+ /*
+ * Get some info about the firmware.
+ */
+ np->scripta_sz = fw->a_size;
+ np->scriptb_sz = fw->b_size;
+ np->scriptz_sz = fw->z_size;
+ np->fw_setup = fw->setup;
+ np->fw_patch = fw->patch;
+ np->fw_name = fw->name;
+
+ /*
+ * Save setting of some IO registers, so we will
+ * be able to probe specific implementations.
+ */
+ sym_save_initial_setting (np);
+
+ /*
+ * Reset the chip now, since it has been reported
+ * that SCSI clock calibration may not work properly
+ * if the chip is currently active.
+ */
+ sym_chip_reset(np);
+
+ /*
+ * Prepare controller and devices settings, according
+ * to chip features, user set-up and driver set-up.
+ */
+ sym_prepare_setting(shost, np, nvram);
+
+ /*
+ * Check the PCI clock frequency.
+ * Must be performed after prepare_setting since it destroys
+ * STEST1 that is used to probe for the clock doubler.
+ */
+ i = sym_getpciclock(np);
+ if (i > 37000 && !(np->features & FE_66MHZ))
+ printf("%s: PCI BUS clock seems too high: %u KHz.\n",
+ sym_name(np), i);
+
+ /*
+ * Allocate the start queue.
+ */
+ np->squeue = sym_calloc_dma(sizeof(u32)*(MAX_QUEUE*2),"SQUEUE");
+ if (!np->squeue)
+ goto attach_failed;
+ np->squeue_ba = vtobus(np->squeue);
+
+ /*
+ * Allocate the done queue.
+ */
+ np->dqueue = sym_calloc_dma(sizeof(u32)*(MAX_QUEUE*2),"DQUEUE");
+ if (!np->dqueue)
+ goto attach_failed;
+ np->dqueue_ba = vtobus(np->dqueue);
+
+ /*
+ * Allocate the target bus address array.
+ */
+ np->targtbl = sym_calloc_dma(256, "TARGTBL");
+ if (!np->targtbl)
+ goto attach_failed;
+ np->targtbl_ba = vtobus(np->targtbl);
+
+ /*
+ * Allocate SCRIPTS areas.
+ */
+ np->scripta0 = sym_calloc_dma(np->scripta_sz, "SCRIPTA0");
+ np->scriptb0 = sym_calloc_dma(np->scriptb_sz, "SCRIPTB0");
+ np->scriptz0 = sym_calloc_dma(np->scriptz_sz, "SCRIPTZ0");
+ if (!np->scripta0 || !np->scriptb0 || !np->scriptz0)
+ goto attach_failed;
+
+ /*
+ * Allocate the array of lists of CCBs hashed by DSA.
+ */
+ np->ccbh = kcalloc(CCB_HASH_SIZE, sizeof(struct sym_ccb **), GFP_KERNEL);
+ if (!np->ccbh)
+ goto attach_failed;
+
+ /*
+ * Initialyze the CCB free and busy queues.
+ */
+ sym_que_init(&np->free_ccbq);
+ sym_que_init(&np->busy_ccbq);
+ sym_que_init(&np->comp_ccbq);
+
+ /*
+ * Initialization for optional handling
+ * of device queueing.
+ */
+#ifdef SYM_OPT_HANDLE_DEVICE_QUEUEING
+ sym_que_init(&np->dummy_ccbq);
+#endif
+ /*
+ * Allocate some CCB. We need at least ONE.
+ */
+ if (!sym_alloc_ccb(np))
+ goto attach_failed;
+
+ /*
+ * Calculate BUS addresses where we are going
+ * to load the SCRIPTS.
+ */
+ np->scripta_ba = vtobus(np->scripta0);
+ np->scriptb_ba = vtobus(np->scriptb0);
+ np->scriptz_ba = vtobus(np->scriptz0);
+
+ if (np->ram_ba) {
+ np->scripta_ba = np->ram_ba;
+ if (np->features & FE_RAM8K) {
+ np->scriptb_ba = np->scripta_ba + 4096;
+#if 0 /* May get useful for 64 BIT PCI addressing */
+ np->scr_ram_seg = cpu_to_scr(np->scripta_ba >> 32);
+#endif
+ }
+ }
+
+ /*
+ * Copy scripts to controller instance.
+ */
+ memcpy(np->scripta0, fw->a_base, np->scripta_sz);
+ memcpy(np->scriptb0, fw->b_base, np->scriptb_sz);
+ memcpy(np->scriptz0, fw->z_base, np->scriptz_sz);
+
+ /*
+ * Setup variable parts in scripts and compute
+ * scripts bus addresses used from the C code.
+ */
+ np->fw_setup(np, fw);
+
+ /*
+ * Bind SCRIPTS with physical addresses usable by the
+ * SCRIPTS processor (as seen from the BUS = BUS addresses).
+ */
+ sym_fw_bind_script(np, (u32 *) np->scripta0, np->scripta_sz);
+ sym_fw_bind_script(np, (u32 *) np->scriptb0, np->scriptb_sz);
+ sym_fw_bind_script(np, (u32 *) np->scriptz0, np->scriptz_sz);
+
+#ifdef SYM_CONF_IARB_SUPPORT
+ /*
+ * If user wants IARB to be set when we win arbitration
+ * and have other jobs, compute the max number of consecutive
+ * settings of IARB hints before we leave devices a chance to
+ * arbitrate for reselection.
+ */
+#ifdef SYM_SETUP_IARB_MAX
+ np->iarb_max = SYM_SETUP_IARB_MAX;
+#else
+ np->iarb_max = 4;
+#endif
+#endif
+
+ /*
+ * Prepare the idle and invalid task actions.
+ */
+ np->idletask.start = cpu_to_scr(SCRIPTA_BA(np, idle));
+ np->idletask.restart = cpu_to_scr(SCRIPTB_BA(np, bad_i_t_l));
+ np->idletask_ba = vtobus(&np->idletask);
+
+ np->notask.start = cpu_to_scr(SCRIPTA_BA(np, idle));
+ np->notask.restart = cpu_to_scr(SCRIPTB_BA(np, bad_i_t_l));
+ np->notask_ba = vtobus(&np->notask);
+
+ np->bad_itl.start = cpu_to_scr(SCRIPTA_BA(np, idle));
+ np->bad_itl.restart = cpu_to_scr(SCRIPTB_BA(np, bad_i_t_l));
+ np->bad_itl_ba = vtobus(&np->bad_itl);
+
+ np->bad_itlq.start = cpu_to_scr(SCRIPTA_BA(np, idle));
+ np->bad_itlq.restart = cpu_to_scr(SCRIPTB_BA(np,bad_i_t_l_q));
+ np->bad_itlq_ba = vtobus(&np->bad_itlq);
+
+ /*
+ * Allocate and prepare the lun JUMP table that is used
+ * for a target prior the probing of devices (bad lun table).
+ * A private table will be allocated for the target on the
+ * first INQUIRY response received.
+ */
+ np->badluntbl = sym_calloc_dma(256, "BADLUNTBL");
+ if (!np->badluntbl)
+ goto attach_failed;
+
+ np->badlun_sa = cpu_to_scr(SCRIPTB_BA(np, resel_bad_lun));
+ for (i = 0 ; i < 64 ; i++) /* 64 luns/target, no less */
+ np->badluntbl[i] = cpu_to_scr(vtobus(&np->badlun_sa));
+
+ /*
+ * Prepare the bus address array that contains the bus
+ * address of each target control block.
+ * For now, assume all logical units are wrong. :)
+ */
+ for (i = 0 ; i < SYM_CONF_MAX_TARGET ; i++) {
+ np->targtbl[i] = cpu_to_scr(vtobus(&np->target[i]));
+ np->target[i].head.luntbl_sa =
+ cpu_to_scr(vtobus(np->badluntbl));
+ np->target[i].head.lun0_sa =
+ cpu_to_scr(vtobus(&np->badlun_sa));
+ }
+
+ /*
+ * Now check the cache handling of the pci chipset.
+ */
+ if (sym_snooptest (np)) {
+ printf("%s: CACHE INCORRECTLY CONFIGURED.\n", sym_name(np));
+ goto attach_failed;
+ }
+
+ /*
+ * Sigh! we are done.
+ */
+ return 0;
+
+attach_failed:
+ return -ENXIO;
+}
+
+/*
+ * Free everything that has been allocated for this device.
+ */
+void sym_hcb_free(struct sym_hcb *np)
+{
+ SYM_QUEHEAD *qp;
+ struct sym_ccb *cp;
+ struct sym_tcb *tp;
+ int target;
+
+ if (np->scriptz0)
+ sym_mfree_dma(np->scriptz0, np->scriptz_sz, "SCRIPTZ0");
+ if (np->scriptb0)
+ sym_mfree_dma(np->scriptb0, np->scriptb_sz, "SCRIPTB0");
+ if (np->scripta0)
+ sym_mfree_dma(np->scripta0, np->scripta_sz, "SCRIPTA0");
+ if (np->squeue)
+ sym_mfree_dma(np->squeue, sizeof(u32)*(MAX_QUEUE*2), "SQUEUE");
+ if (np->dqueue)
+ sym_mfree_dma(np->dqueue, sizeof(u32)*(MAX_QUEUE*2), "DQUEUE");
+
+ if (np->actccbs) {
+ while ((qp = sym_remque_head(&np->free_ccbq)) != NULL) {
+ cp = sym_que_entry(qp, struct sym_ccb, link_ccbq);
+ sym_mfree_dma(cp, sizeof(*cp), "CCB");
+ }
+ }
+ kfree(np->ccbh);
+
+ if (np->badluntbl)
+ sym_mfree_dma(np->badluntbl, 256,"BADLUNTBL");
+
+ for (target = 0; target < SYM_CONF_MAX_TARGET ; target++) {
+ tp = &np->target[target];
+ if (tp->luntbl)
+ sym_mfree_dma(tp->luntbl, 256, "LUNTBL");
+#if SYM_CONF_MAX_LUN > 1
+ kfree(tp->lunmp);
+#endif
+ }
+ if (np->targtbl)
+ sym_mfree_dma(np->targtbl, 256, "TARGTBL");
+}
diff --git a/drivers/scsi/sym53c8xx_2/sym_hipd.h b/drivers/scsi/sym53c8xx_2/sym_hipd.h
new file mode 100644
index 000000000..a141b1758
--- /dev/null
+++ b/drivers/scsi/sym53c8xx_2/sym_hipd.h
@@ -0,0 +1,1226 @@
+/*
+ * Device driver for the SYMBIOS/LSILOGIC 53C8XX and 53C1010 family
+ * of PCI-SCSI IO processors.
+ *
+ * Copyright (C) 1999-2001 Gerard Roudier <groudier@free.fr>
+ *
+ * This driver is derived from the Linux sym53c8xx driver.
+ * Copyright (C) 1998-2000 Gerard Roudier
+ *
+ * The sym53c8xx driver is derived from the ncr53c8xx driver that had been
+ * a port of the FreeBSD ncr driver to Linux-1.2.13.
+ *
+ * The original ncr driver has been written for 386bsd and FreeBSD by
+ * Wolfgang Stanglmeier <wolf@cologne.de>
+ * Stefan Esser <se@mi.Uni-Koeln.de>
+ * Copyright (C) 1994 Wolfgang Stanglmeier
+ *
+ * Other major contributions:
+ *
+ * NVRAM detection and reading.
+ * Copyright (C) 1997 Richard Waltham <dormouse@farsrobt.demon.co.uk>
+ *
+ *-----------------------------------------------------------------------------
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+
+#include <linux/gfp.h>
+
+#ifndef SYM_HIPD_H
+#define SYM_HIPD_H
+
+/*
+ * Generic driver options.
+ *
+ * They may be defined in platform specific headers, if they
+ * are useful.
+ *
+ * SYM_OPT_HANDLE_DEVICE_QUEUEING
+ * When this option is set, the driver will use a queue per
+ * device and handle QUEUE FULL status requeuing internally.
+ *
+ * SYM_OPT_LIMIT_COMMAND_REORDERING
+ * When this option is set, the driver tries to limit tagged
+ * command reordering to some reasonable value.
+ * (set for Linux)
+ */
+#if 0
+#define SYM_OPT_HANDLE_DEVICE_QUEUEING
+#define SYM_OPT_LIMIT_COMMAND_REORDERING
+#endif
+
+/*
+ * Active debugging tags and verbosity.
+ * Both DEBUG_FLAGS and sym_verbose can be redefined
+ * by the platform specific code to something else.
+ */
+#define DEBUG_ALLOC (0x0001)
+#define DEBUG_PHASE (0x0002)
+#define DEBUG_POLL (0x0004)
+#define DEBUG_QUEUE (0x0008)
+#define DEBUG_RESULT (0x0010)
+#define DEBUG_SCATTER (0x0020)
+#define DEBUG_SCRIPT (0x0040)
+#define DEBUG_TINY (0x0080)
+#define DEBUG_TIMING (0x0100)
+#define DEBUG_NEGO (0x0200)
+#define DEBUG_TAGS (0x0400)
+#define DEBUG_POINTER (0x0800)
+
+#ifndef DEBUG_FLAGS
+#define DEBUG_FLAGS (0x0000)
+#endif
+
+#ifndef sym_verbose
+#define sym_verbose (np->verbose)
+#endif
+
+/*
+ * These ones should have been already defined.
+ */
+#ifndef assert
+#define assert(expression) { \
+ if (!(expression)) { \
+ (void)panic( \
+ "assertion \"%s\" failed: file \"%s\", line %d\n", \
+ #expression, \
+ __FILE__, __LINE__); \
+ } \
+}
+#endif
+
+/*
+ * Number of tasks per device we want to handle.
+ */
+#if SYM_CONF_MAX_TAG_ORDER > 8
+#error "more than 256 tags per logical unit not allowed."
+#endif
+#define SYM_CONF_MAX_TASK (1<<SYM_CONF_MAX_TAG_ORDER)
+
+/*
+ * Donnot use more tasks that we can handle.
+ */
+#ifndef SYM_CONF_MAX_TAG
+#define SYM_CONF_MAX_TAG SYM_CONF_MAX_TASK
+#endif
+#if SYM_CONF_MAX_TAG > SYM_CONF_MAX_TASK
+#undef SYM_CONF_MAX_TAG
+#define SYM_CONF_MAX_TAG SYM_CONF_MAX_TASK
+#endif
+
+/*
+ * This one means 'NO TAG for this job'
+ */
+#define NO_TAG (256)
+
+/*
+ * Number of SCSI targets.
+ */
+#if SYM_CONF_MAX_TARGET > 16
+#error "more than 16 targets not allowed."
+#endif
+
+/*
+ * Number of logical units per target.
+ */
+#if SYM_CONF_MAX_LUN > 64
+#error "more than 64 logical units per target not allowed."
+#endif
+
+/*
+ * Asynchronous pre-scaler (ns). Shall be 40 for
+ * the SCSI timings to be compliant.
+ */
+#define SYM_CONF_MIN_ASYNC (40)
+
+
+/*
+ * MEMORY ALLOCATOR.
+ */
+
+#define SYM_MEM_WARN 1 /* Warn on failed operations */
+
+#define SYM_MEM_PAGE_ORDER 0 /* 1 PAGE maximum */
+#define SYM_MEM_CLUSTER_SHIFT (PAGE_SHIFT+SYM_MEM_PAGE_ORDER)
+#define SYM_MEM_FREE_UNUSED /* Free unused pages immediately */
+/*
+ * Shortest memory chunk is (1<<SYM_MEM_SHIFT), currently 16.
+ * Actual allocations happen as SYM_MEM_CLUSTER_SIZE sized.
+ * (1 PAGE at a time is just fine).
+ */
+#define SYM_MEM_SHIFT 4
+#define SYM_MEM_CLUSTER_SIZE (1UL << SYM_MEM_CLUSTER_SHIFT)
+#define SYM_MEM_CLUSTER_MASK (SYM_MEM_CLUSTER_SIZE-1)
+
+/*
+ * Number of entries in the START and DONE queues.
+ *
+ * We limit to 1 PAGE in order to succeed allocation of
+ * these queues. Each entry is 8 bytes long (2 DWORDS).
+ */
+#ifdef SYM_CONF_MAX_START
+#define SYM_CONF_MAX_QUEUE (SYM_CONF_MAX_START+2)
+#else
+#define SYM_CONF_MAX_QUEUE (7*SYM_CONF_MAX_TASK+2)
+#define SYM_CONF_MAX_START (SYM_CONF_MAX_QUEUE-2)
+#endif
+
+#if SYM_CONF_MAX_QUEUE > SYM_MEM_CLUSTER_SIZE/8
+#undef SYM_CONF_MAX_QUEUE
+#define SYM_CONF_MAX_QUEUE (SYM_MEM_CLUSTER_SIZE/8)
+#undef SYM_CONF_MAX_START
+#define SYM_CONF_MAX_START (SYM_CONF_MAX_QUEUE-2)
+#endif
+
+/*
+ * For this one, we want a short name :-)
+ */
+#define MAX_QUEUE SYM_CONF_MAX_QUEUE
+
+/*
+ * Common definitions for both bus space based and legacy IO methods.
+ */
+
+#define INB_OFF(np, o) ioread8(np->s.ioaddr + (o))
+#define INW_OFF(np, o) ioread16(np->s.ioaddr + (o))
+#define INL_OFF(np, o) ioread32(np->s.ioaddr + (o))
+
+#define OUTB_OFF(np, o, val) iowrite8((val), np->s.ioaddr + (o))
+#define OUTW_OFF(np, o, val) iowrite16((val), np->s.ioaddr + (o))
+#define OUTL_OFF(np, o, val) iowrite32((val), np->s.ioaddr + (o))
+
+#define INB(np, r) INB_OFF(np, offsetof(struct sym_reg, r))
+#define INW(np, r) INW_OFF(np, offsetof(struct sym_reg, r))
+#define INL(np, r) INL_OFF(np, offsetof(struct sym_reg, r))
+
+#define OUTB(np, r, v) OUTB_OFF(np, offsetof(struct sym_reg, r), (v))
+#define OUTW(np, r, v) OUTW_OFF(np, offsetof(struct sym_reg, r), (v))
+#define OUTL(np, r, v) OUTL_OFF(np, offsetof(struct sym_reg, r), (v))
+
+#define OUTONB(np, r, m) OUTB(np, r, INB(np, r) | (m))
+#define OUTOFFB(np, r, m) OUTB(np, r, INB(np, r) & ~(m))
+#define OUTONW(np, r, m) OUTW(np, r, INW(np, r) | (m))
+#define OUTOFFW(np, r, m) OUTW(np, r, INW(np, r) & ~(m))
+#define OUTONL(np, r, m) OUTL(np, r, INL(np, r) | (m))
+#define OUTOFFL(np, r, m) OUTL(np, r, INL(np, r) & ~(m))
+
+/*
+ * We normally want the chip to have a consistent view
+ * of driver internal data structures when we restart it.
+ * Thus these macros.
+ */
+#define OUTL_DSP(np, v) \
+ do { \
+ MEMORY_WRITE_BARRIER(); \
+ OUTL(np, nc_dsp, (v)); \
+ } while (0)
+
+#define OUTONB_STD() \
+ do { \
+ MEMORY_WRITE_BARRIER(); \
+ OUTONB(np, nc_dcntl, (STD|NOCOM)); \
+ } while (0)
+
+/*
+ * Command control block states.
+ */
+#define HS_IDLE (0)
+#define HS_BUSY (1)
+#define HS_NEGOTIATE (2) /* sync/wide data transfer*/
+#define HS_DISCONNECT (3) /* Disconnected by target */
+#define HS_WAIT (4) /* waiting for resource */
+
+#define HS_DONEMASK (0x80)
+#define HS_COMPLETE (4|HS_DONEMASK)
+#define HS_SEL_TIMEOUT (5|HS_DONEMASK) /* Selection timeout */
+#define HS_UNEXPECTED (6|HS_DONEMASK) /* Unexpected disconnect */
+#define HS_COMP_ERR (7|HS_DONEMASK) /* Completed with error */
+
+/*
+ * Software Interrupt Codes
+ */
+#define SIR_BAD_SCSI_STATUS (1)
+#define SIR_SEL_ATN_NO_MSG_OUT (2)
+#define SIR_MSG_RECEIVED (3)
+#define SIR_MSG_WEIRD (4)
+#define SIR_NEGO_FAILED (5)
+#define SIR_NEGO_PROTO (6)
+#define SIR_SCRIPT_STOPPED (7)
+#define SIR_REJECT_TO_SEND (8)
+#define SIR_SWIDE_OVERRUN (9)
+#define SIR_SODL_UNDERRUN (10)
+#define SIR_RESEL_NO_MSG_IN (11)
+#define SIR_RESEL_NO_IDENTIFY (12)
+#define SIR_RESEL_BAD_LUN (13)
+#define SIR_TARGET_SELECTED (14)
+#define SIR_RESEL_BAD_I_T_L (15)
+#define SIR_RESEL_BAD_I_T_L_Q (16)
+#define SIR_ABORT_SENT (17)
+#define SIR_RESEL_ABORTED (18)
+#define SIR_MSG_OUT_DONE (19)
+#define SIR_COMPLETE_ERROR (20)
+#define SIR_DATA_OVERRUN (21)
+#define SIR_BAD_PHASE (22)
+#if SYM_CONF_DMA_ADDRESSING_MODE == 2
+#define SIR_DMAP_DIRTY (23)
+#define SIR_MAX (23)
+#else
+#define SIR_MAX (22)
+#endif
+
+/*
+ * Extended error bit codes.
+ * xerr_status field of struct sym_ccb.
+ */
+#define XE_EXTRA_DATA (1) /* unexpected data phase */
+#define XE_BAD_PHASE (1<<1) /* illegal phase (4/5) */
+#define XE_PARITY_ERR (1<<2) /* unrecovered SCSI parity error */
+#define XE_SODL_UNRUN (1<<3) /* ODD transfer in DATA OUT phase */
+#define XE_SWIDE_OVRUN (1<<4) /* ODD transfer in DATA IN phase */
+
+/*
+ * Negotiation status.
+ * nego_status field of struct sym_ccb.
+ */
+#define NS_SYNC (1)
+#define NS_WIDE (2)
+#define NS_PPR (3)
+
+/*
+ * A CCB hashed table is used to retrieve CCB address
+ * from DSA value.
+ */
+#define CCB_HASH_SHIFT 8
+#define CCB_HASH_SIZE (1UL << CCB_HASH_SHIFT)
+#define CCB_HASH_MASK (CCB_HASH_SIZE-1)
+#if 1
+#define CCB_HASH_CODE(dsa) \
+ (((dsa) >> (_LGRU16_(sizeof(struct sym_ccb)))) & CCB_HASH_MASK)
+#else
+#define CCB_HASH_CODE(dsa) (((dsa) >> 9) & CCB_HASH_MASK)
+#endif
+
+#if SYM_CONF_DMA_ADDRESSING_MODE == 2
+/*
+ * We may want to use segment registers for 64 bit DMA.
+ * 16 segments registers -> up to 64 GB addressable.
+ */
+#define SYM_DMAP_SHIFT (4)
+#define SYM_DMAP_SIZE (1u<<SYM_DMAP_SHIFT)
+#define SYM_DMAP_MASK (SYM_DMAP_SIZE-1)
+#endif
+
+/*
+ * Device flags.
+ */
+#define SYM_DISC_ENABLED (1)
+#define SYM_TAGS_ENABLED (1<<1)
+#define SYM_SCAN_BOOT_DISABLED (1<<2)
+#define SYM_SCAN_LUNS_DISABLED (1<<3)
+
+/*
+ * Host adapter miscellaneous flags.
+ */
+#define SYM_AVOID_BUS_RESET (1)
+
+/*
+ * Misc.
+ */
+#define SYM_SNOOP_TIMEOUT (10000000)
+#define BUS_8_BIT 0
+#define BUS_16_BIT 1
+
+/*
+ * Gather negotiable parameters value
+ */
+struct sym_trans {
+ u8 period;
+ u8 offset;
+ unsigned int width:1;
+ unsigned int iu:1;
+ unsigned int dt:1;
+ unsigned int qas:1;
+ unsigned int check_nego:1;
+ unsigned int renego:2;
+};
+
+/*
+ * Global TCB HEADER.
+ *
+ * Due to lack of indirect addressing on earlier NCR chips,
+ * this substructure is copied from the TCB to a global
+ * address after selection.
+ * For SYMBIOS chips that support LOAD/STORE this copy is
+ * not needed and thus not performed.
+ */
+struct sym_tcbh {
+ /*
+ * Scripts bus addresses of LUN table accessed from scripts.
+ * LUN #0 is a special case, since multi-lun devices are rare,
+ * and we we want to speed-up the general case and not waste
+ * resources.
+ */
+ u32 luntbl_sa; /* bus address of this table */
+ u32 lun0_sa; /* bus address of LCB #0 */
+ /*
+ * Actual SYNC/WIDE IO registers value for this target.
+ * 'sval', 'wval' and 'uval' are read from SCRIPTS and
+ * so have alignment constraints.
+ */
+/*0*/ u_char uval; /* -> SCNTL4 register */
+/*1*/ u_char sval; /* -> SXFER io register */
+/*2*/ u_char filler1;
+/*3*/ u_char wval; /* -> SCNTL3 io register */
+};
+
+/*
+ * Target Control Block
+ */
+struct sym_tcb {
+ /*
+ * TCB header.
+ * Assumed at offset 0.
+ */
+/*0*/ struct sym_tcbh head;
+
+ /*
+ * LUN table used by the SCRIPTS processor.
+ * An array of bus addresses is used on reselection.
+ */
+ u32 *luntbl; /* LCBs bus address table */
+ int nlcb; /* Number of valid LCBs (including LUN #0) */
+
+ /*
+ * LUN table used by the C code.
+ */
+ struct sym_lcb *lun0p; /* LCB of LUN #0 (usual case) */
+#if SYM_CONF_MAX_LUN > 1
+ struct sym_lcb **lunmp; /* Other LCBs [1..MAX_LUN] */
+#endif
+
+#ifdef SYM_HAVE_STCB
+ /*
+ * O/S specific data structure.
+ */
+ struct sym_stcb s;
+#endif
+
+ /* Transfer goal */
+ struct sym_trans tgoal;
+
+ /* Last printed transfer speed */
+ struct sym_trans tprint;
+
+ /*
+ * Keep track of the CCB used for the negotiation in order
+ * to ensure that only 1 negotiation is queued at a time.
+ */
+ struct sym_ccb * nego_cp; /* CCB used for the nego */
+
+ /*
+ * Set when we want to reset the device.
+ */
+ u_char to_reset;
+
+ /*
+ * Other user settable limits and options.
+ * These limits are read from the NVRAM if present.
+ */
+ unsigned char usrflags;
+ unsigned char usr_period;
+ unsigned char usr_width;
+ unsigned short usrtags;
+ struct scsi_target *starget;
+};
+
+/*
+ * Global LCB HEADER.
+ *
+ * Due to lack of indirect addressing on earlier NCR chips,
+ * this substructure is copied from the LCB to a global
+ * address after selection.
+ * For SYMBIOS chips that support LOAD/STORE this copy is
+ * not needed and thus not performed.
+ */
+struct sym_lcbh {
+ /*
+ * SCRIPTS address jumped by SCRIPTS on reselection.
+ * For not probed logical units, this address points to
+ * SCRIPTS that deal with bad LU handling (must be at
+ * offset zero of the LCB for that reason).
+ */
+/*0*/ u32 resel_sa;
+
+ /*
+ * Task (bus address of a CCB) read from SCRIPTS that points
+ * to the unique ITL nexus allowed to be disconnected.
+ */
+ u32 itl_task_sa;
+
+ /*
+ * Task table bus address (read from SCRIPTS).
+ */
+ u32 itlq_tbl_sa;
+};
+
+/*
+ * Logical Unit Control Block
+ */
+struct sym_lcb {
+ /*
+ * TCB header.
+ * Assumed at offset 0.
+ */
+/*0*/ struct sym_lcbh head;
+
+ /*
+ * Task table read from SCRIPTS that contains pointers to
+ * ITLQ nexuses. The bus address read from SCRIPTS is
+ * inside the header.
+ */
+ u32 *itlq_tbl; /* Kernel virtual address */
+
+ /*
+ * Busy CCBs management.
+ */
+ u_short busy_itlq; /* Number of busy tagged CCBs */
+ u_short busy_itl; /* Number of busy untagged CCBs */
+
+ /*
+ * Circular tag allocation buffer.
+ */
+ u_short ia_tag; /* Tag allocation index */
+ u_short if_tag; /* Tag release index */
+ u_char *cb_tags; /* Circular tags buffer */
+
+ /*
+ * O/S specific data structure.
+ */
+#ifdef SYM_HAVE_SLCB
+ struct sym_slcb s;
+#endif
+
+#ifdef SYM_OPT_HANDLE_DEVICE_QUEUEING
+ /*
+ * Optionnaly the driver can handle device queueing,
+ * and requeues internally command to redo.
+ */
+ SYM_QUEHEAD waiting_ccbq;
+ SYM_QUEHEAD started_ccbq;
+ int num_sgood;
+ u_short started_tags;
+ u_short started_no_tag;
+ u_short started_max;
+ u_short started_limit;
+#endif
+
+#ifdef SYM_OPT_LIMIT_COMMAND_REORDERING
+ /*
+ * Optionally the driver can try to prevent SCSI
+ * IOs from being reordered too much.
+ */
+ u_char tags_si; /* Current index to tags sum */
+ u_short tags_sum[2]; /* Tags sum counters */
+ u_short tags_since; /* # of tags since last switch */
+#endif
+
+ /*
+ * Set when we want to clear all tasks.
+ */
+ u_char to_clear;
+
+ /*
+ * Capabilities.
+ */
+ u_char user_flags;
+ u_char curr_flags;
+};
+
+/*
+ * Action from SCRIPTS on a task.
+ * Is part of the CCB, but is also used separately to plug
+ * error handling action to perform from SCRIPTS.
+ */
+struct sym_actscr {
+ u32 start; /* Jumped by SCRIPTS after selection */
+ u32 restart; /* Jumped by SCRIPTS on relection */
+};
+
+/*
+ * Phase mismatch context.
+ *
+ * It is part of the CCB and is used as parameters for the
+ * DATA pointer. We need two contexts to handle correctly the
+ * SAVED DATA POINTER.
+ */
+struct sym_pmc {
+ struct sym_tblmove sg; /* Updated interrupted SG block */
+ u32 ret; /* SCRIPT return address */
+};
+
+/*
+ * LUN control block lookup.
+ * We use a direct pointer for LUN #0, and a table of
+ * pointers which is only allocated for devices that support
+ * LUN(s) > 0.
+ */
+#if SYM_CONF_MAX_LUN <= 1
+#define sym_lp(tp, lun) (!lun) ? (tp)->lun0p : NULL
+#else
+#define sym_lp(tp, lun) \
+ (!lun) ? (tp)->lun0p : (tp)->lunmp ? (tp)->lunmp[((u8)lun)] : NULL
+#endif
+
+/*
+ * Status are used by the host and the script processor.
+ *
+ * The last four bytes (status[4]) are copied to the
+ * scratchb register (declared as scr0..scr3) just after the
+ * select/reselect, and copied back just after disconnecting.
+ * Inside the script the XX_REG are used.
+ */
+
+/*
+ * Last four bytes (script)
+ */
+#define HX_REG scr0
+#define HX_PRT nc_scr0
+#define HS_REG scr1
+#define HS_PRT nc_scr1
+#define SS_REG scr2
+#define SS_PRT nc_scr2
+#define HF_REG scr3
+#define HF_PRT nc_scr3
+
+/*
+ * Last four bytes (host)
+ */
+#define host_xflags phys.head.status[0]
+#define host_status phys.head.status[1]
+#define ssss_status phys.head.status[2]
+#define host_flags phys.head.status[3]
+
+/*
+ * Host flags
+ */
+#define HF_IN_PM0 1u
+#define HF_IN_PM1 (1u<<1)
+#define HF_ACT_PM (1u<<2)
+#define HF_DP_SAVED (1u<<3)
+#define HF_SENSE (1u<<4)
+#define HF_EXT_ERR (1u<<5)
+#define HF_DATA_IN (1u<<6)
+#ifdef SYM_CONF_IARB_SUPPORT
+#define HF_HINT_IARB (1u<<7)
+#endif
+
+/*
+ * More host flags
+ */
+#if SYM_CONF_DMA_ADDRESSING_MODE == 2
+#define HX_DMAP_DIRTY (1u<<7)
+#endif
+
+/*
+ * Global CCB HEADER.
+ *
+ * Due to lack of indirect addressing on earlier NCR chips,
+ * this substructure is copied from the ccb to a global
+ * address after selection (or reselection) and copied back
+ * before disconnect.
+ * For SYMBIOS chips that support LOAD/STORE this copy is
+ * not needed and thus not performed.
+ */
+
+struct sym_ccbh {
+ /*
+ * Start and restart SCRIPTS addresses (must be at 0).
+ */
+/*0*/ struct sym_actscr go;
+
+ /*
+ * SCRIPTS jump address that deal with data pointers.
+ * 'savep' points to the position in the script responsible
+ * for the actual transfer of data.
+ * It's written on reception of a SAVE_DATA_POINTER message.
+ */
+ u32 savep; /* Jump address to saved data pointer */
+ u32 lastp; /* SCRIPTS address at end of data */
+
+ /*
+ * Status fields.
+ */
+ u8 status[4];
+};
+
+/*
+ * GET/SET the value of the data pointer used by SCRIPTS.
+ *
+ * We must distinguish between the LOAD/STORE-based SCRIPTS
+ * that use directly the header in the CCB, and the NCR-GENERIC
+ * SCRIPTS that use the copy of the header in the HCB.
+ */
+#if SYM_CONF_GENERIC_SUPPORT
+#define sym_set_script_dp(np, cp, dp) \
+ do { \
+ if (np->features & FE_LDSTR) \
+ cp->phys.head.lastp = cpu_to_scr(dp); \
+ else \
+ np->ccb_head.lastp = cpu_to_scr(dp); \
+ } while (0)
+#define sym_get_script_dp(np, cp) \
+ scr_to_cpu((np->features & FE_LDSTR) ? \
+ cp->phys.head.lastp : np->ccb_head.lastp)
+#else
+#define sym_set_script_dp(np, cp, dp) \
+ do { \
+ cp->phys.head.lastp = cpu_to_scr(dp); \
+ } while (0)
+
+#define sym_get_script_dp(np, cp) (cp->phys.head.lastp)
+#endif
+
+/*
+ * Data Structure Block
+ *
+ * During execution of a ccb by the script processor, the
+ * DSA (data structure address) register points to this
+ * substructure of the ccb.
+ */
+struct sym_dsb {
+ /*
+ * CCB header.
+ * Also assumed at offset 0 of the sym_ccb structure.
+ */
+/*0*/ struct sym_ccbh head;
+
+ /*
+ * Phase mismatch contexts.
+ * We need two to handle correctly the SAVED DATA POINTER.
+ * MUST BOTH BE AT OFFSET < 256, due to using 8 bit arithmetic
+ * for address calculation from SCRIPTS.
+ */
+ struct sym_pmc pm0;
+ struct sym_pmc pm1;
+
+ /*
+ * Table data for Script
+ */
+ struct sym_tblsel select;
+ struct sym_tblmove smsg;
+ struct sym_tblmove smsg_ext;
+ struct sym_tblmove cmd;
+ struct sym_tblmove sense;
+ struct sym_tblmove wresid;
+ struct sym_tblmove data [SYM_CONF_MAX_SG];
+};
+
+/*
+ * Our Command Control Block
+ */
+struct sym_ccb {
+ /*
+ * This is the data structure which is pointed by the DSA
+ * register when it is executed by the script processor.
+ * It must be the first entry.
+ */
+ struct sym_dsb phys;
+
+ /*
+ * Pointer to CAM ccb and related stuff.
+ */
+ struct scsi_cmnd *cmd; /* CAM scsiio ccb */
+ u8 cdb_buf[16]; /* Copy of CDB */
+#define SYM_SNS_BBUF_LEN 32
+ u8 sns_bbuf[SYM_SNS_BBUF_LEN]; /* Bounce buffer for sense data */
+ int data_len; /* Total data length */
+ int segments; /* Number of SG segments */
+
+ u8 order; /* Tag type (if tagged command) */
+ unsigned char odd_byte_adjustment; /* odd-sized req on wide bus */
+
+ u_char nego_status; /* Negotiation status */
+ u_char xerr_status; /* Extended error flags */
+ u32 extra_bytes; /* Extraneous bytes transferred */
+
+ /*
+ * Message areas.
+ * We prepare a message to be sent after selection.
+ * We may use a second one if the command is rescheduled
+ * due to CHECK_CONDITION or COMMAND TERMINATED.
+ * Contents are IDENTIFY and SIMPLE_TAG.
+ * While negotiating sync or wide transfer,
+ * a SDTR or WDTR message is appended.
+ */
+ u_char scsi_smsg [12];
+ u_char scsi_smsg2[12];
+
+ /*
+ * Auto request sense related fields.
+ */
+ u_char sensecmd[6]; /* Request Sense command */
+ u_char sv_scsi_status; /* Saved SCSI status */
+ u_char sv_xerr_status; /* Saved extended status */
+ int sv_resid; /* Saved residual */
+
+ /*
+ * Other fields.
+ */
+ u32 ccb_ba; /* BUS address of this CCB */
+ u_short tag; /* Tag for this transfer */
+ /* NO_TAG means no tag */
+ u_char target;
+ u_char lun;
+ struct sym_ccb *link_ccbh; /* Host adapter CCB hash chain */
+ SYM_QUEHEAD link_ccbq; /* Link to free/busy CCB queue */
+ u32 startp; /* Initial data pointer */
+ u32 goalp; /* Expected last data pointer */
+ int ext_sg; /* Extreme data pointer, used */
+ int ext_ofs; /* to calculate the residual. */
+#ifdef SYM_OPT_HANDLE_DEVICE_QUEUEING
+ SYM_QUEHEAD link2_ccbq; /* Link for device queueing */
+ u_char started; /* CCB queued to the squeue */
+#endif
+ u_char to_abort; /* Want this IO to be aborted */
+#ifdef SYM_OPT_LIMIT_COMMAND_REORDERING
+ u_char tags_si; /* Lun tags sum index (0,1) */
+#endif
+};
+
+#define CCB_BA(cp,lbl) cpu_to_scr(cp->ccb_ba + offsetof(struct sym_ccb, lbl))
+
+typedef struct device *m_pool_ident_t;
+
+/*
+ * Host Control Block
+ */
+struct sym_hcb {
+ /*
+ * Global headers.
+ * Due to poorness of addressing capabilities, earlier
+ * chips (810, 815, 825) copy part of the data structures
+ * (CCB, TCB and LCB) in fixed areas.
+ */
+#if SYM_CONF_GENERIC_SUPPORT
+ struct sym_ccbh ccb_head;
+ struct sym_tcbh tcb_head;
+ struct sym_lcbh lcb_head;
+#endif
+ /*
+ * Idle task and invalid task actions and
+ * their bus addresses.
+ */
+ struct sym_actscr idletask, notask, bad_itl, bad_itlq;
+ u32 idletask_ba, notask_ba, bad_itl_ba, bad_itlq_ba;
+
+ /*
+ * Dummy lun table to protect us against target
+ * returning bad lun number on reselection.
+ */
+ u32 *badluntbl; /* Table physical address */
+ u32 badlun_sa; /* SCRIPT handler BUS address */
+
+ /*
+ * Bus address of this host control block.
+ */
+ u32 hcb_ba;
+
+ /*
+ * Bit 32-63 of the on-chip RAM bus address in LE format.
+ * The START_RAM64 script loads the MMRS and MMWS from this
+ * field.
+ */
+ u32 scr_ram_seg;
+
+ /*
+ * Initial value of some IO register bits.
+ * These values are assumed to have been set by BIOS, and may
+ * be used to probe adapter implementation differences.
+ */
+ u_char sv_scntl0, sv_scntl3, sv_dmode, sv_dcntl, sv_ctest3, sv_ctest4,
+ sv_ctest5, sv_gpcntl, sv_stest2, sv_stest4, sv_scntl4,
+ sv_stest1;
+
+ /*
+ * Actual initial value of IO register bits used by the
+ * driver. They are loaded at initialisation according to
+ * features that are to be enabled/disabled.
+ */
+ u_char rv_scntl0, rv_scntl3, rv_dmode, rv_dcntl, rv_ctest3, rv_ctest4,
+ rv_ctest5, rv_stest2, rv_ccntl0, rv_ccntl1, rv_scntl4;
+
+ /*
+ * Target data.
+ */
+ struct sym_tcb target[SYM_CONF_MAX_TARGET];
+
+ /*
+ * Target control block bus address array used by the SCRIPT
+ * on reselection.
+ */
+ u32 *targtbl;
+ u32 targtbl_ba;
+
+ /*
+ * DMA pool handle for this HBA.
+ */
+ m_pool_ident_t bus_dmat;
+
+ /*
+ * O/S specific data structure
+ */
+ struct sym_shcb s;
+
+ /*
+ * Physical bus addresses of the chip.
+ */
+ u32 mmio_ba; /* MMIO 32 bit BUS address */
+ u32 ram_ba; /* RAM 32 bit BUS address */
+
+ /*
+ * SCRIPTS virtual and physical bus addresses.
+ * 'script' is loaded in the on-chip RAM if present.
+ * 'scripth' stays in main memory for all chips except the
+ * 53C895A, 53C896 and 53C1010 that provide 8K on-chip RAM.
+ */
+ u_char *scripta0; /* Copy of scripts A, B, Z */
+ u_char *scriptb0;
+ u_char *scriptz0;
+ u32 scripta_ba; /* Actual scripts A, B, Z */
+ u32 scriptb_ba; /* 32 bit bus addresses. */
+ u32 scriptz_ba;
+ u_short scripta_sz; /* Actual size of script A, B, Z*/
+ u_short scriptb_sz;
+ u_short scriptz_sz;
+
+ /*
+ * Bus addresses, setup and patch methods for
+ * the selected firmware.
+ */
+ struct sym_fwa_ba fwa_bas; /* Useful SCRIPTA bus addresses */
+ struct sym_fwb_ba fwb_bas; /* Useful SCRIPTB bus addresses */
+ struct sym_fwz_ba fwz_bas; /* Useful SCRIPTZ bus addresses */
+ void (*fw_setup)(struct sym_hcb *np, struct sym_fw *fw);
+ void (*fw_patch)(struct Scsi_Host *);
+ char *fw_name;
+
+ /*
+ * General controller parameters and configuration.
+ */
+ u_int features; /* Chip features map */
+ u_char myaddr; /* SCSI id of the adapter */
+ u_char maxburst; /* log base 2 of dwords burst */
+ u_char maxwide; /* Maximum transfer width */
+ u_char minsync; /* Min sync period factor (ST) */
+ u_char maxsync; /* Max sync period factor (ST) */
+ u_char maxoffs; /* Max scsi offset (ST) */
+ u_char minsync_dt; /* Min sync period factor (DT) */
+ u_char maxsync_dt; /* Max sync period factor (DT) */
+ u_char maxoffs_dt; /* Max scsi offset (DT) */
+ u_char multiplier; /* Clock multiplier (1,2,4) */
+ u_char clock_divn; /* Number of clock divisors */
+ u32 clock_khz; /* SCSI clock frequency in KHz */
+ u32 pciclk_khz; /* Estimated PCI clock in KHz */
+ /*
+ * Start queue management.
+ * It is filled up by the host processor and accessed by the
+ * SCRIPTS processor in order to start SCSI commands.
+ */
+ volatile /* Prevent code optimizations */
+ u32 *squeue; /* Start queue virtual address */
+ u32 squeue_ba; /* Start queue BUS address */
+ u_short squeueput; /* Next free slot of the queue */
+ u_short actccbs; /* Number of allocated CCBs */
+
+ /*
+ * Command completion queue.
+ * It is the same size as the start queue to avoid overflow.
+ */
+ u_short dqueueget; /* Next position to scan */
+ volatile /* Prevent code optimizations */
+ u32 *dqueue; /* Completion (done) queue */
+ u32 dqueue_ba; /* Done queue BUS address */
+
+ /*
+ * Miscellaneous buffers accessed by the scripts-processor.
+ * They shall be DWORD aligned, because they may be read or
+ * written with a script command.
+ */
+ u_char msgout[8]; /* Buffer for MESSAGE OUT */
+ u_char msgin [8]; /* Buffer for MESSAGE IN */
+ u32 lastmsg; /* Last SCSI message sent */
+ u32 scratch; /* Scratch for SCSI receive */
+ /* Also used for cache test */
+ /*
+ * Miscellaneous configuration and status parameters.
+ */
+ u_char usrflags; /* Miscellaneous user flags */
+ u_char scsi_mode; /* Current SCSI BUS mode */
+ u_char verbose; /* Verbosity for this controller*/
+
+ /*
+ * CCB lists and queue.
+ */
+ struct sym_ccb **ccbh; /* CCBs hashed by DSA value */
+ /* CCB_HASH_SIZE lists of CCBs */
+ SYM_QUEHEAD free_ccbq; /* Queue of available CCBs */
+ SYM_QUEHEAD busy_ccbq; /* Queue of busy CCBs */
+
+ /*
+ * During error handling and/or recovery,
+ * active CCBs that are to be completed with
+ * error or requeued are moved from the busy_ccbq
+ * to the comp_ccbq prior to completion.
+ */
+ SYM_QUEHEAD comp_ccbq;
+
+#ifdef SYM_OPT_HANDLE_DEVICE_QUEUEING
+ SYM_QUEHEAD dummy_ccbq;
+#endif
+
+ /*
+ * IMMEDIATE ARBITRATION (IARB) control.
+ *
+ * We keep track in 'last_cp' of the last CCB that has been
+ * queued to the SCRIPTS processor and clear 'last_cp' when
+ * this CCB completes. If last_cp is not zero at the moment
+ * we queue a new CCB, we set a flag in 'last_cp' that is
+ * used by the SCRIPTS as a hint for setting IARB.
+ * We donnot set more than 'iarb_max' consecutive hints for
+ * IARB in order to leave devices a chance to reselect.
+ * By the way, any non zero value of 'iarb_max' is unfair. :)
+ */
+#ifdef SYM_CONF_IARB_SUPPORT
+ u_short iarb_max; /* Max. # consecutive IARB hints*/
+ u_short iarb_count; /* Actual # of these hints */
+ struct sym_ccb * last_cp;
+#endif
+
+ /*
+ * Command abort handling.
+ * We need to synchronize tightly with the SCRIPTS
+ * processor in order to handle things correctly.
+ */
+ u_char abrt_msg[4]; /* Message to send buffer */
+ struct sym_tblmove abrt_tbl; /* Table for the MOV of it */
+ struct sym_tblsel abrt_sel; /* Sync params for selection */
+ u_char istat_sem; /* Tells the chip to stop (SEM) */
+
+ /*
+ * 64 bit DMA handling.
+ */
+#if SYM_CONF_DMA_ADDRESSING_MODE != 0
+ u_char use_dac; /* Use PCI DAC cycles */
+#if SYM_CONF_DMA_ADDRESSING_MODE == 2
+ u_char dmap_dirty; /* Dma segments registers dirty */
+ u32 dmap_bah[SYM_DMAP_SIZE];/* Segment registers map */
+#endif
+#endif
+};
+
+#if SYM_CONF_DMA_ADDRESSING_MODE == 0
+#define use_dac(np) 0
+#define set_dac(np) do { } while (0)
+#else
+#define use_dac(np) (np)->use_dac
+#define set_dac(np) (np)->use_dac = 1
+#endif
+
+#define HCB_BA(np, lbl) (np->hcb_ba + offsetof(struct sym_hcb, lbl))
+
+
+/*
+ * FIRMWARES (sym_fw.c)
+ */
+struct sym_fw * sym_find_firmware(struct sym_chip *chip);
+void sym_fw_bind_script(struct sym_hcb *np, u32 *start, int len);
+
+/*
+ * Driver methods called from O/S specific code.
+ */
+char *sym_driver_name(void);
+void sym_print_xerr(struct scsi_cmnd *cmd, int x_status);
+int sym_reset_scsi_bus(struct sym_hcb *np, int enab_int);
+struct sym_chip *sym_lookup_chip_table(u_short device_id, u_char revision);
+#ifdef SYM_OPT_HANDLE_DEVICE_QUEUEING
+void sym_start_next_ccbs(struct sym_hcb *np, struct sym_lcb *lp, int maxn);
+#else
+void sym_put_start_queue(struct sym_hcb *np, struct sym_ccb *cp);
+#endif
+void sym_start_up(struct Scsi_Host *, int reason);
+irqreturn_t sym_interrupt(struct Scsi_Host *);
+int sym_clear_tasks(struct sym_hcb *np, int cam_status, int target, int lun, int task);
+struct sym_ccb *sym_get_ccb(struct sym_hcb *np, struct scsi_cmnd *cmd, u_char tag_order);
+void sym_free_ccb(struct sym_hcb *np, struct sym_ccb *cp);
+struct sym_lcb *sym_alloc_lcb(struct sym_hcb *np, u_char tn, u_char ln);
+int sym_free_lcb(struct sym_hcb *np, u_char tn, u_char ln);
+int sym_queue_scsiio(struct sym_hcb *np, struct scsi_cmnd *csio, struct sym_ccb *cp);
+int sym_abort_scsiio(struct sym_hcb *np, struct scsi_cmnd *ccb, int timed_out);
+int sym_reset_scsi_target(struct sym_hcb *np, int target);
+void sym_hcb_free(struct sym_hcb *np);
+int sym_hcb_attach(struct Scsi_Host *shost, struct sym_fw *fw, struct sym_nvram *nvram);
+
+/*
+ * Build a scatter/gather entry.
+ *
+ * For 64 bit systems, we use the 8 upper bits of the size field
+ * to provide bus address bits 32-39 to the SCRIPTS processor.
+ * This allows the 895A, 896, 1010 to address up to 1 TB of memory.
+ */
+
+#if SYM_CONF_DMA_ADDRESSING_MODE == 0
+#define DMA_DAC_MASK DMA_BIT_MASK(32)
+#define sym_build_sge(np, data, badd, len) \
+do { \
+ (data)->addr = cpu_to_scr(badd); \
+ (data)->size = cpu_to_scr(len); \
+} while (0)
+#elif SYM_CONF_DMA_ADDRESSING_MODE == 1
+#define DMA_DAC_MASK DMA_BIT_MASK(40)
+#define sym_build_sge(np, data, badd, len) \
+do { \
+ (data)->addr = cpu_to_scr(badd); \
+ (data)->size = cpu_to_scr((((badd) >> 8) & 0xff000000) + len); \
+} while (0)
+#elif SYM_CONF_DMA_ADDRESSING_MODE == 2
+#define DMA_DAC_MASK DMA_BIT_MASK(64)
+int sym_lookup_dmap(struct sym_hcb *np, u32 h, int s);
+static inline void
+sym_build_sge(struct sym_hcb *np, struct sym_tblmove *data, u64 badd, int len)
+{
+ u32 h = (badd>>32);
+ int s = (h&SYM_DMAP_MASK);
+
+ if (h != np->dmap_bah[s])
+ goto bad;
+good:
+ (data)->addr = cpu_to_scr(badd);
+ (data)->size = cpu_to_scr((s<<24) + len);
+ return;
+bad:
+ s = sym_lookup_dmap(np, h, s);
+ goto good;
+}
+#else
+#error "Unsupported DMA addressing mode"
+#endif
+
+/*
+ * MEMORY ALLOCATOR.
+ */
+
+#define sym_get_mem_cluster() \
+ (void *) __get_free_pages(GFP_ATOMIC, SYM_MEM_PAGE_ORDER)
+#define sym_free_mem_cluster(p) \
+ free_pages((unsigned long)p, SYM_MEM_PAGE_ORDER)
+
+/*
+ * Link between free memory chunks of a given size.
+ */
+typedef struct sym_m_link {
+ struct sym_m_link *next;
+} *m_link_p;
+
+/*
+ * Virtual to bus physical translation for a given cluster.
+ * Such a structure is only useful with DMA abstraction.
+ */
+typedef struct sym_m_vtob { /* Virtual to Bus address translation */
+ struct sym_m_vtob *next;
+ void *vaddr; /* Virtual address */
+ dma_addr_t baddr; /* Bus physical address */
+} *m_vtob_p;
+
+/* Hash this stuff a bit to speed up translations */
+#define VTOB_HASH_SHIFT 5
+#define VTOB_HASH_SIZE (1UL << VTOB_HASH_SHIFT)
+#define VTOB_HASH_MASK (VTOB_HASH_SIZE-1)
+#define VTOB_HASH_CODE(m) \
+ ((((unsigned long)(m)) >> SYM_MEM_CLUSTER_SHIFT) & VTOB_HASH_MASK)
+
+/*
+ * Memory pool of a given kind.
+ * Ideally, we want to use:
+ * 1) 1 pool for memory we donnot need to involve in DMA.
+ * 2) The same pool for controllers that require same DMA
+ * constraints and features.
+ * The OS specific m_pool_id_t thing and the sym_m_pool_match()
+ * method are expected to tell the driver about.
+ */
+typedef struct sym_m_pool {
+ m_pool_ident_t dev_dmat; /* Identifies the pool (see above) */
+ void * (*get_mem_cluster)(struct sym_m_pool *);
+#ifdef SYM_MEM_FREE_UNUSED
+ void (*free_mem_cluster)(struct sym_m_pool *, void *);
+#endif
+#define M_GET_MEM_CLUSTER() mp->get_mem_cluster(mp)
+#define M_FREE_MEM_CLUSTER(p) mp->free_mem_cluster(mp, p)
+ int nump;
+ m_vtob_p vtob[VTOB_HASH_SIZE];
+ struct sym_m_pool *next;
+ struct sym_m_link h[SYM_MEM_CLUSTER_SHIFT - SYM_MEM_SHIFT + 1];
+} *m_pool_p;
+
+/*
+ * Alloc, free and translate addresses to bus physical
+ * for DMAable memory.
+ */
+void *__sym_calloc_dma(m_pool_ident_t dev_dmat, int size, char *name);
+void __sym_mfree_dma(m_pool_ident_t dev_dmat, void *m, int size, char *name);
+dma_addr_t __vtobus(m_pool_ident_t dev_dmat, void *m);
+
+/*
+ * Verbs used by the driver code for DMAable memory handling.
+ * The _uvptv_ macro avoids a nasty warning about pointer to volatile
+ * being discarded.
+ */
+#define _uvptv_(p) ((void *)((u_long)(p)))
+
+#define _sym_calloc_dma(np, l, n) __sym_calloc_dma(np->bus_dmat, l, n)
+#define _sym_mfree_dma(np, p, l, n) \
+ __sym_mfree_dma(np->bus_dmat, _uvptv_(p), l, n)
+#define sym_calloc_dma(l, n) _sym_calloc_dma(np, l, n)
+#define sym_mfree_dma(p, l, n) _sym_mfree_dma(np, p, l, n)
+#define vtobus(p) __vtobus(np->bus_dmat, _uvptv_(p))
+
+/*
+ * We have to provide the driver memory allocator with methods for
+ * it to maintain virtual to bus physical address translations.
+ */
+
+#define sym_m_pool_match(mp_id1, mp_id2) (mp_id1 == mp_id2)
+
+static inline void *sym_m_get_dma_mem_cluster(m_pool_p mp, m_vtob_p vbp)
+{
+ void *vaddr = NULL;
+ dma_addr_t baddr = 0;
+
+ vaddr = dma_alloc_coherent(mp->dev_dmat, SYM_MEM_CLUSTER_SIZE, &baddr,
+ GFP_ATOMIC);
+ if (vaddr) {
+ vbp->vaddr = vaddr;
+ vbp->baddr = baddr;
+ }
+ return vaddr;
+}
+
+static inline void sym_m_free_dma_mem_cluster(m_pool_p mp, m_vtob_p vbp)
+{
+ dma_free_coherent(mp->dev_dmat, SYM_MEM_CLUSTER_SIZE, vbp->vaddr,
+ vbp->baddr);
+}
+
+#endif /* SYM_HIPD_H */
diff --git a/drivers/scsi/sym53c8xx_2/sym_malloc.c b/drivers/scsi/sym53c8xx_2/sym_malloc.c
new file mode 100644
index 000000000..6f9af0de7
--- /dev/null
+++ b/drivers/scsi/sym53c8xx_2/sym_malloc.c
@@ -0,0 +1,378 @@
+/*
+ * Device driver for the SYMBIOS/LSILOGIC 53C8XX and 53C1010 family
+ * of PCI-SCSI IO processors.
+ *
+ * Copyright (C) 1999-2001 Gerard Roudier <groudier@free.fr>
+ *
+ * This driver is derived from the Linux sym53c8xx driver.
+ * Copyright (C) 1998-2000 Gerard Roudier
+ *
+ * The sym53c8xx driver is derived from the ncr53c8xx driver that had been
+ * a port of the FreeBSD ncr driver to Linux-1.2.13.
+ *
+ * The original ncr driver has been written for 386bsd and FreeBSD by
+ * Wolfgang Stanglmeier <wolf@cologne.de>
+ * Stefan Esser <se@mi.Uni-Koeln.de>
+ * Copyright (C) 1994 Wolfgang Stanglmeier
+ *
+ * Other major contributions:
+ *
+ * NVRAM detection and reading.
+ * Copyright (C) 1997 Richard Waltham <dormouse@farsrobt.demon.co.uk>
+ *
+ *-----------------------------------------------------------------------------
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+
+#include "sym_glue.h"
+
+/*
+ * Simple power of two buddy-like generic allocator.
+ * Provides naturally aligned memory chunks.
+ *
+ * This simple code is not intended to be fast, but to
+ * provide power of 2 aligned memory allocations.
+ * Since the SCRIPTS processor only supplies 8 bit arithmetic,
+ * this allocator allows simple and fast address calculations
+ * from the SCRIPTS code. In addition, cache line alignment
+ * is guaranteed for power of 2 cache line size.
+ *
+ * This allocator has been developed for the Linux sym53c8xx
+ * driver, since this O/S does not provide naturally aligned
+ * allocations.
+ * It has the advantage of allowing the driver to use private
+ * pages of memory that will be useful if we ever need to deal
+ * with IO MMUs for PCI.
+ */
+static void *___sym_malloc(m_pool_p mp, int size)
+{
+ int i = 0;
+ int s = (1 << SYM_MEM_SHIFT);
+ int j;
+ void *a;
+ m_link_p h = mp->h;
+
+ if (size > SYM_MEM_CLUSTER_SIZE)
+ return NULL;
+
+ while (size > s) {
+ s <<= 1;
+ ++i;
+ }
+
+ j = i;
+ while (!h[j].next) {
+ if (s == SYM_MEM_CLUSTER_SIZE) {
+ h[j].next = (m_link_p) M_GET_MEM_CLUSTER();
+ if (h[j].next)
+ h[j].next->next = NULL;
+ break;
+ }
+ ++j;
+ s <<= 1;
+ }
+ a = h[j].next;
+ if (a) {
+ h[j].next = h[j].next->next;
+ while (j > i) {
+ j -= 1;
+ s >>= 1;
+ h[j].next = (m_link_p) (a+s);
+ h[j].next->next = NULL;
+ }
+ }
+#ifdef DEBUG
+ printf("___sym_malloc(%d) = %p\n", size, (void *) a);
+#endif
+ return a;
+}
+
+/*
+ * Counter-part of the generic allocator.
+ */
+static void ___sym_mfree(m_pool_p mp, void *ptr, int size)
+{
+ int i = 0;
+ int s = (1 << SYM_MEM_SHIFT);
+ m_link_p q;
+ unsigned long a, b;
+ m_link_p h = mp->h;
+
+#ifdef DEBUG
+ printf("___sym_mfree(%p, %d)\n", ptr, size);
+#endif
+
+ if (size > SYM_MEM_CLUSTER_SIZE)
+ return;
+
+ while (size > s) {
+ s <<= 1;
+ ++i;
+ }
+
+ a = (unsigned long)ptr;
+
+ while (1) {
+ if (s == SYM_MEM_CLUSTER_SIZE) {
+#ifdef SYM_MEM_FREE_UNUSED
+ M_FREE_MEM_CLUSTER((void *)a);
+#else
+ ((m_link_p) a)->next = h[i].next;
+ h[i].next = (m_link_p) a;
+#endif
+ break;
+ }
+ b = a ^ s;
+ q = &h[i];
+ while (q->next && q->next != (m_link_p) b) {
+ q = q->next;
+ }
+ if (!q->next) {
+ ((m_link_p) a)->next = h[i].next;
+ h[i].next = (m_link_p) a;
+ break;
+ }
+ q->next = q->next->next;
+ a = a & b;
+ s <<= 1;
+ ++i;
+ }
+}
+
+/*
+ * Verbose and zeroing allocator that wrapps to the generic allocator.
+ */
+static void *__sym_calloc2(m_pool_p mp, int size, char *name, int uflags)
+{
+ void *p;
+
+ p = ___sym_malloc(mp, size);
+
+ if (DEBUG_FLAGS & DEBUG_ALLOC) {
+ printf ("new %-10s[%4d] @%p.\n", name, size, p);
+ }
+
+ if (p)
+ memset(p, 0, size);
+ else if (uflags & SYM_MEM_WARN)
+ printf ("__sym_calloc2: failed to allocate %s[%d]\n", name, size);
+ return p;
+}
+#define __sym_calloc(mp, s, n) __sym_calloc2(mp, s, n, SYM_MEM_WARN)
+
+/*
+ * Its counter-part.
+ */
+static void __sym_mfree(m_pool_p mp, void *ptr, int size, char *name)
+{
+ if (DEBUG_FLAGS & DEBUG_ALLOC)
+ printf ("freeing %-10s[%4d] @%p.\n", name, size, ptr);
+
+ ___sym_mfree(mp, ptr, size);
+}
+
+/*
+ * Default memory pool we donnot need to involve in DMA.
+ *
+ * With DMA abstraction, we use functions (methods), to
+ * distinguish between non DMAable memory and DMAable memory.
+ */
+static void *___mp0_get_mem_cluster(m_pool_p mp)
+{
+ void *m = sym_get_mem_cluster();
+ if (m)
+ ++mp->nump;
+ return m;
+}
+
+#ifdef SYM_MEM_FREE_UNUSED
+static void ___mp0_free_mem_cluster(m_pool_p mp, void *m)
+{
+ sym_free_mem_cluster(m);
+ --mp->nump;
+}
+#else
+#define ___mp0_free_mem_cluster NULL
+#endif
+
+static struct sym_m_pool mp0 = {
+ NULL,
+ ___mp0_get_mem_cluster,
+ ___mp0_free_mem_cluster
+};
+
+/*
+ * Methods that maintains DMAable pools according to user allocations.
+ * New pools are created on the fly when a new pool id is provided.
+ * They are deleted on the fly when they get emptied.
+ */
+/* Get a memory cluster that matches the DMA constraints of a given pool */
+static void * ___get_dma_mem_cluster(m_pool_p mp)
+{
+ m_vtob_p vbp;
+ void *vaddr;
+
+ vbp = __sym_calloc(&mp0, sizeof(*vbp), "VTOB");
+ if (!vbp)
+ goto out_err;
+
+ vaddr = sym_m_get_dma_mem_cluster(mp, vbp);
+ if (vaddr) {
+ int hc = VTOB_HASH_CODE(vaddr);
+ vbp->next = mp->vtob[hc];
+ mp->vtob[hc] = vbp;
+ ++mp->nump;
+ }
+ return vaddr;
+out_err:
+ return NULL;
+}
+
+#ifdef SYM_MEM_FREE_UNUSED
+/* Free a memory cluster and associated resources for DMA */
+static void ___free_dma_mem_cluster(m_pool_p mp, void *m)
+{
+ m_vtob_p *vbpp, vbp;
+ int hc = VTOB_HASH_CODE(m);
+
+ vbpp = &mp->vtob[hc];
+ while (*vbpp && (*vbpp)->vaddr != m)
+ vbpp = &(*vbpp)->next;
+ if (*vbpp) {
+ vbp = *vbpp;
+ *vbpp = (*vbpp)->next;
+ sym_m_free_dma_mem_cluster(mp, vbp);
+ __sym_mfree(&mp0, vbp, sizeof(*vbp), "VTOB");
+ --mp->nump;
+ }
+}
+#endif
+
+/* Fetch the memory pool for a given pool id (i.e. DMA constraints) */
+static inline m_pool_p ___get_dma_pool(m_pool_ident_t dev_dmat)
+{
+ m_pool_p mp;
+ for (mp = mp0.next;
+ mp && !sym_m_pool_match(mp->dev_dmat, dev_dmat);
+ mp = mp->next);
+ return mp;
+}
+
+/* Create a new memory DMAable pool (when fetch failed) */
+static m_pool_p ___cre_dma_pool(m_pool_ident_t dev_dmat)
+{
+ m_pool_p mp = __sym_calloc(&mp0, sizeof(*mp), "MPOOL");
+ if (mp) {
+ mp->dev_dmat = dev_dmat;
+ mp->get_mem_cluster = ___get_dma_mem_cluster;
+#ifdef SYM_MEM_FREE_UNUSED
+ mp->free_mem_cluster = ___free_dma_mem_cluster;
+#endif
+ mp->next = mp0.next;
+ mp0.next = mp;
+ return mp;
+ }
+ return NULL;
+}
+
+#ifdef SYM_MEM_FREE_UNUSED
+/* Destroy a DMAable memory pool (when got emptied) */
+static void ___del_dma_pool(m_pool_p p)
+{
+ m_pool_p *pp = &mp0.next;
+
+ while (*pp && *pp != p)
+ pp = &(*pp)->next;
+ if (*pp) {
+ *pp = (*pp)->next;
+ __sym_mfree(&mp0, p, sizeof(*p), "MPOOL");
+ }
+}
+#endif
+
+/* This lock protects only the memory allocation/free. */
+static DEFINE_SPINLOCK(sym53c8xx_lock);
+
+/*
+ * Actual allocator for DMAable memory.
+ */
+void *__sym_calloc_dma(m_pool_ident_t dev_dmat, int size, char *name)
+{
+ unsigned long flags;
+ m_pool_p mp;
+ void *m = NULL;
+
+ spin_lock_irqsave(&sym53c8xx_lock, flags);
+ mp = ___get_dma_pool(dev_dmat);
+ if (!mp)
+ mp = ___cre_dma_pool(dev_dmat);
+ if (!mp)
+ goto out;
+ m = __sym_calloc(mp, size, name);
+#ifdef SYM_MEM_FREE_UNUSED
+ if (!mp->nump)
+ ___del_dma_pool(mp);
+#endif
+
+ out:
+ spin_unlock_irqrestore(&sym53c8xx_lock, flags);
+ return m;
+}
+
+void __sym_mfree_dma(m_pool_ident_t dev_dmat, void *m, int size, char *name)
+{
+ unsigned long flags;
+ m_pool_p mp;
+
+ spin_lock_irqsave(&sym53c8xx_lock, flags);
+ mp = ___get_dma_pool(dev_dmat);
+ if (!mp)
+ goto out;
+ __sym_mfree(mp, m, size, name);
+#ifdef SYM_MEM_FREE_UNUSED
+ if (!mp->nump)
+ ___del_dma_pool(mp);
+#endif
+ out:
+ spin_unlock_irqrestore(&sym53c8xx_lock, flags);
+}
+
+/*
+ * Actual virtual to bus physical address translator
+ * for 32 bit addressable DMAable memory.
+ */
+dma_addr_t __vtobus(m_pool_ident_t dev_dmat, void *m)
+{
+ unsigned long flags;
+ m_pool_p mp;
+ int hc = VTOB_HASH_CODE(m);
+ m_vtob_p vp = NULL;
+ void *a = (void *)((unsigned long)m & ~SYM_MEM_CLUSTER_MASK);
+ dma_addr_t b;
+
+ spin_lock_irqsave(&sym53c8xx_lock, flags);
+ mp = ___get_dma_pool(dev_dmat);
+ if (mp) {
+ vp = mp->vtob[hc];
+ while (vp && vp->vaddr != a)
+ vp = vp->next;
+ }
+ if (!vp)
+ panic("sym: VTOBUS FAILED!\n");
+ b = vp->baddr + (m - a);
+ spin_unlock_irqrestore(&sym53c8xx_lock, flags);
+ return b;
+}
diff --git a/drivers/scsi/sym53c8xx_2/sym_misc.h b/drivers/scsi/sym53c8xx_2/sym_misc.h
new file mode 100644
index 000000000..96c151459
--- /dev/null
+++ b/drivers/scsi/sym53c8xx_2/sym_misc.h
@@ -0,0 +1,190 @@
+/*
+ * Device driver for the SYMBIOS/LSILOGIC 53C8XX and 53C1010 family
+ * of PCI-SCSI IO processors.
+ *
+ * Copyright (C) 1999-2001 Gerard Roudier <groudier@free.fr>
+ *
+ * This driver is derived from the Linux sym53c8xx driver.
+ * Copyright (C) 1998-2000 Gerard Roudier
+ *
+ * The sym53c8xx driver is derived from the ncr53c8xx driver that had been
+ * a port of the FreeBSD ncr driver to Linux-1.2.13.
+ *
+ * The original ncr driver has been written for 386bsd and FreeBSD by
+ * Wolfgang Stanglmeier <wolf@cologne.de>
+ * Stefan Esser <se@mi.Uni-Koeln.de>
+ * Copyright (C) 1994 Wolfgang Stanglmeier
+ *
+ * Other major contributions:
+ *
+ * NVRAM detection and reading.
+ * Copyright (C) 1997 Richard Waltham <dormouse@farsrobt.demon.co.uk>
+ *
+ *-----------------------------------------------------------------------------
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+
+#ifndef SYM_MISC_H
+#define SYM_MISC_H
+
+/*
+ * A la VMS/CAM-3 queue management.
+ */
+typedef struct sym_quehead {
+ struct sym_quehead *flink; /* Forward pointer */
+ struct sym_quehead *blink; /* Backward pointer */
+} SYM_QUEHEAD;
+
+#define sym_que_init(ptr) do { \
+ (ptr)->flink = (ptr); (ptr)->blink = (ptr); \
+} while (0)
+
+static inline struct sym_quehead *sym_que_first(struct sym_quehead *head)
+{
+ return (head->flink == head) ? 0 : head->flink;
+}
+
+static inline struct sym_quehead *sym_que_last(struct sym_quehead *head)
+{
+ return (head->blink == head) ? 0 : head->blink;
+}
+
+static inline void __sym_que_add(struct sym_quehead * new,
+ struct sym_quehead * blink,
+ struct sym_quehead * flink)
+{
+ flink->blink = new;
+ new->flink = flink;
+ new->blink = blink;
+ blink->flink = new;
+}
+
+static inline void __sym_que_del(struct sym_quehead * blink,
+ struct sym_quehead * flink)
+{
+ flink->blink = blink;
+ blink->flink = flink;
+}
+
+static inline int sym_que_empty(struct sym_quehead *head)
+{
+ return head->flink == head;
+}
+
+static inline void sym_que_splice(struct sym_quehead *list,
+ struct sym_quehead *head)
+{
+ struct sym_quehead *first = list->flink;
+
+ if (first != list) {
+ struct sym_quehead *last = list->blink;
+ struct sym_quehead *at = head->flink;
+
+ first->blink = head;
+ head->flink = first;
+
+ last->flink = at;
+ at->blink = last;
+ }
+}
+
+static inline void sym_que_move(struct sym_quehead *orig,
+ struct sym_quehead *dest)
+{
+ struct sym_quehead *first, *last;
+
+ first = orig->flink;
+ if (first != orig) {
+ first->blink = dest;
+ dest->flink = first;
+ last = orig->blink;
+ last->flink = dest;
+ dest->blink = last;
+ orig->flink = orig;
+ orig->blink = orig;
+ } else {
+ dest->flink = dest;
+ dest->blink = dest;
+ }
+}
+
+#define sym_que_entry(ptr, type, member) container_of(ptr, type, member)
+
+#define sym_insque(new, pos) __sym_que_add(new, pos, (pos)->flink)
+
+#define sym_remque(el) __sym_que_del((el)->blink, (el)->flink)
+
+#define sym_insque_head(new, head) __sym_que_add(new, head, (head)->flink)
+
+static inline struct sym_quehead *sym_remque_head(struct sym_quehead *head)
+{
+ struct sym_quehead *elem = head->flink;
+
+ if (elem != head)
+ __sym_que_del(head, elem->flink);
+ else
+ elem = NULL;
+ return elem;
+}
+
+#define sym_insque_tail(new, head) __sym_que_add(new, (head)->blink, head)
+
+static inline struct sym_quehead *sym_remque_tail(struct sym_quehead *head)
+{
+ struct sym_quehead *elem = head->blink;
+
+ if (elem != head)
+ __sym_que_del(elem->blink, head);
+ else
+ elem = 0;
+ return elem;
+}
+
+/*
+ * This one may be useful.
+ */
+#define FOR_EACH_QUEUED_ELEMENT(head, qp) \
+ for (qp = (head)->flink; qp != (head); qp = qp->flink)
+/*
+ * FreeBSD does not offer our kind of queue in the CAM CCB.
+ * So, we have to cast.
+ */
+#define sym_qptr(p) ((struct sym_quehead *) (p))
+
+/*
+ * Simple bitmap operations.
+ */
+#define sym_set_bit(p, n) (((u32 *)(p))[(n)>>5] |= (1<<((n)&0x1f)))
+#define sym_clr_bit(p, n) (((u32 *)(p))[(n)>>5] &= ~(1<<((n)&0x1f)))
+#define sym_is_bit(p, n) (((u32 *)(p))[(n)>>5] & (1<<((n)&0x1f)))
+
+/*
+ * The below round up/down macros are to be used with a constant
+ * as argument (sizeof(...) for example), for the compiler to
+ * optimize the whole thing.
+ */
+#define _U_(a,m) (a)<=(1<<m)?m:
+
+/*
+ * Round up logarithm to base 2 of a 16 bit constant.
+ */
+#define _LGRU16_(a) \
+( \
+ _U_(a, 0)_U_(a, 1)_U_(a, 2)_U_(a, 3)_U_(a, 4)_U_(a, 5)_U_(a, 6)_U_(a, 7) \
+ _U_(a, 8)_U_(a, 9)_U_(a,10)_U_(a,11)_U_(a,12)_U_(a,13)_U_(a,14)_U_(a,15) \
+ 16)
+
+#endif /* SYM_MISC_H */
diff --git a/drivers/scsi/sym53c8xx_2/sym_nvram.c b/drivers/scsi/sym53c8xx_2/sym_nvram.c
new file mode 100644
index 000000000..5662fbb3f
--- /dev/null
+++ b/drivers/scsi/sym53c8xx_2/sym_nvram.c
@@ -0,0 +1,779 @@
+/*
+ * Device driver for the SYMBIOS/LSILOGIC 53C8XX and 53C1010 family
+ * of PCI-SCSI IO processors.
+ *
+ * Copyright (C) 1999-2001 Gerard Roudier <groudier@free.fr>
+ *
+ * This driver is derived from the Linux sym53c8xx driver.
+ * Copyright (C) 1998-2000 Gerard Roudier
+ *
+ * The sym53c8xx driver is derived from the ncr53c8xx driver that had been
+ * a port of the FreeBSD ncr driver to Linux-1.2.13.
+ *
+ * The original ncr driver has been written for 386bsd and FreeBSD by
+ * Wolfgang Stanglmeier <wolf@cologne.de>
+ * Stefan Esser <se@mi.Uni-Koeln.de>
+ * Copyright (C) 1994 Wolfgang Stanglmeier
+ *
+ * Other major contributions:
+ *
+ * NVRAM detection and reading.
+ * Copyright (C) 1997 Richard Waltham <dormouse@farsrobt.demon.co.uk>
+ *
+ *-----------------------------------------------------------------------------
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+
+#include "sym_glue.h"
+#include "sym_nvram.h"
+
+#ifdef SYM_CONF_DEBUG_NVRAM
+static u_char Tekram_boot_delay[7] = {3, 5, 10, 20, 30, 60, 120};
+#endif
+
+/*
+ * Get host setup from NVRAM.
+ */
+void sym_nvram_setup_host(struct Scsi_Host *shost, struct sym_hcb *np, struct sym_nvram *nvram)
+{
+ /*
+ * Get parity checking, host ID, verbose mode
+ * and miscellaneous host flags from NVRAM.
+ */
+ switch (nvram->type) {
+ case SYM_SYMBIOS_NVRAM:
+ if (!(nvram->data.Symbios.flags & SYMBIOS_PARITY_ENABLE))
+ np->rv_scntl0 &= ~0x0a;
+ np->myaddr = nvram->data.Symbios.host_id & 0x0f;
+ if (nvram->data.Symbios.flags & SYMBIOS_VERBOSE_MSGS)
+ np->verbose += 1;
+ if (nvram->data.Symbios.flags1 & SYMBIOS_SCAN_HI_LO)
+ shost->reverse_ordering = 1;
+ if (nvram->data.Symbios.flags2 & SYMBIOS_AVOID_BUS_RESET)
+ np->usrflags |= SYM_AVOID_BUS_RESET;
+ break;
+ case SYM_TEKRAM_NVRAM:
+ np->myaddr = nvram->data.Tekram.host_id & 0x0f;
+ break;
+#ifdef CONFIG_PARISC
+ case SYM_PARISC_PDC:
+ if (nvram->data.parisc.host_id != -1)
+ np->myaddr = nvram->data.parisc.host_id;
+ if (nvram->data.parisc.factor != -1)
+ np->minsync = nvram->data.parisc.factor;
+ if (nvram->data.parisc.width != -1)
+ np->maxwide = nvram->data.parisc.width;
+ switch (nvram->data.parisc.mode) {
+ case 0: np->scsi_mode = SMODE_SE; break;
+ case 1: np->scsi_mode = SMODE_HVD; break;
+ case 2: np->scsi_mode = SMODE_LVD; break;
+ default: break;
+ }
+#endif
+ default:
+ break;
+ }
+}
+
+/*
+ * Get target set-up from Symbios format NVRAM.
+ */
+static void
+sym_Symbios_setup_target(struct sym_tcb *tp, int target, Symbios_nvram *nvram)
+{
+ Symbios_target *tn = &nvram->target[target];
+
+ if (!(tn->flags & SYMBIOS_QUEUE_TAGS_ENABLED))
+ tp->usrtags = 0;
+ if (!(tn->flags & SYMBIOS_DISCONNECT_ENABLE))
+ tp->usrflags &= ~SYM_DISC_ENABLED;
+ if (!(tn->flags & SYMBIOS_SCAN_AT_BOOT_TIME))
+ tp->usrflags |= SYM_SCAN_BOOT_DISABLED;
+ if (!(tn->flags & SYMBIOS_SCAN_LUNS))
+ tp->usrflags |= SYM_SCAN_LUNS_DISABLED;
+ tp->usr_period = (tn->sync_period + 3) / 4;
+ tp->usr_width = (tn->bus_width == 0x8) ? 0 : 1;
+}
+
+static const unsigned char Tekram_sync[16] = {
+ 25, 31, 37, 43, 50, 62, 75, 125, 12, 15, 18, 21, 6, 7, 9, 10
+};
+
+/*
+ * Get target set-up from Tekram format NVRAM.
+ */
+static void
+sym_Tekram_setup_target(struct sym_tcb *tp, int target, Tekram_nvram *nvram)
+{
+ struct Tekram_target *tn = &nvram->target[target];
+
+ if (tn->flags & TEKRAM_TAGGED_COMMANDS) {
+ tp->usrtags = 2 << nvram->max_tags_index;
+ }
+
+ if (tn->flags & TEKRAM_DISCONNECT_ENABLE)
+ tp->usrflags |= SYM_DISC_ENABLED;
+
+ if (tn->flags & TEKRAM_SYNC_NEGO)
+ tp->usr_period = Tekram_sync[tn->sync_index & 0xf];
+ tp->usr_width = (tn->flags & TEKRAM_WIDE_NEGO) ? 1 : 0;
+}
+
+/*
+ * Get target setup from NVRAM.
+ */
+void sym_nvram_setup_target(struct sym_tcb *tp, int target, struct sym_nvram *nvp)
+{
+ switch (nvp->type) {
+ case SYM_SYMBIOS_NVRAM:
+ sym_Symbios_setup_target(tp, target, &nvp->data.Symbios);
+ break;
+ case SYM_TEKRAM_NVRAM:
+ sym_Tekram_setup_target(tp, target, &nvp->data.Tekram);
+ break;
+ default:
+ break;
+ }
+}
+
+#ifdef SYM_CONF_DEBUG_NVRAM
+/*
+ * Dump Symbios format NVRAM for debugging purpose.
+ */
+static void sym_display_Symbios_nvram(struct sym_device *np, Symbios_nvram *nvram)
+{
+ int i;
+
+ /* display Symbios nvram host data */
+ printf("%s: HOST ID=%d%s%s%s%s%s%s\n",
+ sym_name(np), nvram->host_id & 0x0f,
+ (nvram->flags & SYMBIOS_SCAM_ENABLE) ? " SCAM" :"",
+ (nvram->flags & SYMBIOS_PARITY_ENABLE) ? " PARITY" :"",
+ (nvram->flags & SYMBIOS_VERBOSE_MSGS) ? " VERBOSE" :"",
+ (nvram->flags & SYMBIOS_CHS_MAPPING) ? " CHS_ALT" :"",
+ (nvram->flags2 & SYMBIOS_AVOID_BUS_RESET)?" NO_RESET" :"",
+ (nvram->flags1 & SYMBIOS_SCAN_HI_LO) ? " HI_LO" :"");
+
+ /* display Symbios nvram drive data */
+ for (i = 0 ; i < 15 ; i++) {
+ struct Symbios_target *tn = &nvram->target[i];
+ printf("%s-%d:%s%s%s%s WIDTH=%d SYNC=%d TMO=%d\n",
+ sym_name(np), i,
+ (tn->flags & SYMBIOS_DISCONNECT_ENABLE) ? " DISC" : "",
+ (tn->flags & SYMBIOS_SCAN_AT_BOOT_TIME) ? " SCAN_BOOT" : "",
+ (tn->flags & SYMBIOS_SCAN_LUNS) ? " SCAN_LUNS" : "",
+ (tn->flags & SYMBIOS_QUEUE_TAGS_ENABLED)? " TCQ" : "",
+ tn->bus_width,
+ tn->sync_period / 4,
+ tn->timeout);
+ }
+}
+
+/*
+ * Dump TEKRAM format NVRAM for debugging purpose.
+ */
+static void sym_display_Tekram_nvram(struct sym_device *np, Tekram_nvram *nvram)
+{
+ int i, tags, boot_delay;
+ char *rem;
+
+ /* display Tekram nvram host data */
+ tags = 2 << nvram->max_tags_index;
+ boot_delay = 0;
+ if (nvram->boot_delay_index < 6)
+ boot_delay = Tekram_boot_delay[nvram->boot_delay_index];
+ switch ((nvram->flags & TEKRAM_REMOVABLE_FLAGS) >> 6) {
+ default:
+ case 0: rem = ""; break;
+ case 1: rem = " REMOVABLE=boot device"; break;
+ case 2: rem = " REMOVABLE=all"; break;
+ }
+
+ printf("%s: HOST ID=%d%s%s%s%s%s%s%s%s%s BOOT DELAY=%d tags=%d\n",
+ sym_name(np), nvram->host_id & 0x0f,
+ (nvram->flags1 & SYMBIOS_SCAM_ENABLE) ? " SCAM" :"",
+ (nvram->flags & TEKRAM_MORE_THAN_2_DRIVES) ? " >2DRIVES":"",
+ (nvram->flags & TEKRAM_DRIVES_SUP_1GB) ? " >1GB" :"",
+ (nvram->flags & TEKRAM_RESET_ON_POWER_ON) ? " RESET" :"",
+ (nvram->flags & TEKRAM_ACTIVE_NEGATION) ? " ACT_NEG" :"",
+ (nvram->flags & TEKRAM_IMMEDIATE_SEEK) ? " IMM_SEEK" :"",
+ (nvram->flags & TEKRAM_SCAN_LUNS) ? " SCAN_LUNS" :"",
+ (nvram->flags1 & TEKRAM_F2_F6_ENABLED) ? " F2_F6" :"",
+ rem, boot_delay, tags);
+
+ /* display Tekram nvram drive data */
+ for (i = 0; i <= 15; i++) {
+ int sync, j;
+ struct Tekram_target *tn = &nvram->target[i];
+ j = tn->sync_index & 0xf;
+ sync = Tekram_sync[j];
+ printf("%s-%d:%s%s%s%s%s%s PERIOD=%d\n",
+ sym_name(np), i,
+ (tn->flags & TEKRAM_PARITY_CHECK) ? " PARITY" : "",
+ (tn->flags & TEKRAM_SYNC_NEGO) ? " SYNC" : "",
+ (tn->flags & TEKRAM_DISCONNECT_ENABLE) ? " DISC" : "",
+ (tn->flags & TEKRAM_START_CMD) ? " START" : "",
+ (tn->flags & TEKRAM_TAGGED_COMMANDS) ? " TCQ" : "",
+ (tn->flags & TEKRAM_WIDE_NEGO) ? " WIDE" : "",
+ sync);
+ }
+}
+#else
+static void sym_display_Symbios_nvram(struct sym_device *np, Symbios_nvram *nvram) { (void)np; (void)nvram; }
+static void sym_display_Tekram_nvram(struct sym_device *np, Tekram_nvram *nvram) { (void)np; (void)nvram; }
+#endif /* SYM_CONF_DEBUG_NVRAM */
+
+
+/*
+ * 24C16 EEPROM reading.
+ *
+ * GPOI0 - data in/data out
+ * GPIO1 - clock
+ * Symbios NVRAM wiring now also used by Tekram.
+ */
+
+#define SET_BIT 0
+#define CLR_BIT 1
+#define SET_CLK 2
+#define CLR_CLK 3
+
+/*
+ * Set/clear data/clock bit in GPIO0
+ */
+static void S24C16_set_bit(struct sym_device *np, u_char write_bit, u_char *gpreg,
+ int bit_mode)
+{
+ udelay(5);
+ switch (bit_mode) {
+ case SET_BIT:
+ *gpreg |= write_bit;
+ break;
+ case CLR_BIT:
+ *gpreg &= 0xfe;
+ break;
+ case SET_CLK:
+ *gpreg |= 0x02;
+ break;
+ case CLR_CLK:
+ *gpreg &= 0xfd;
+ break;
+
+ }
+ OUTB(np, nc_gpreg, *gpreg);
+ INB(np, nc_mbox1);
+ udelay(5);
+}
+
+/*
+ * Send START condition to NVRAM to wake it up.
+ */
+static void S24C16_start(struct sym_device *np, u_char *gpreg)
+{
+ S24C16_set_bit(np, 1, gpreg, SET_BIT);
+ S24C16_set_bit(np, 0, gpreg, SET_CLK);
+ S24C16_set_bit(np, 0, gpreg, CLR_BIT);
+ S24C16_set_bit(np, 0, gpreg, CLR_CLK);
+}
+
+/*
+ * Send STOP condition to NVRAM - puts NVRAM to sleep... ZZzzzz!!
+ */
+static void S24C16_stop(struct sym_device *np, u_char *gpreg)
+{
+ S24C16_set_bit(np, 0, gpreg, SET_CLK);
+ S24C16_set_bit(np, 1, gpreg, SET_BIT);
+}
+
+/*
+ * Read or write a bit to the NVRAM,
+ * read if GPIO0 input else write if GPIO0 output
+ */
+static void S24C16_do_bit(struct sym_device *np, u_char *read_bit, u_char write_bit,
+ u_char *gpreg)
+{
+ S24C16_set_bit(np, write_bit, gpreg, SET_BIT);
+ S24C16_set_bit(np, 0, gpreg, SET_CLK);
+ if (read_bit)
+ *read_bit = INB(np, nc_gpreg);
+ S24C16_set_bit(np, 0, gpreg, CLR_CLK);
+ S24C16_set_bit(np, 0, gpreg, CLR_BIT);
+}
+
+/*
+ * Output an ACK to the NVRAM after reading,
+ * change GPIO0 to output and when done back to an input
+ */
+static void S24C16_write_ack(struct sym_device *np, u_char write_bit, u_char *gpreg,
+ u_char *gpcntl)
+{
+ OUTB(np, nc_gpcntl, *gpcntl & 0xfe);
+ S24C16_do_bit(np, NULL, write_bit, gpreg);
+ OUTB(np, nc_gpcntl, *gpcntl);
+}
+
+/*
+ * Input an ACK from NVRAM after writing,
+ * change GPIO0 to input and when done back to an output
+ */
+static void S24C16_read_ack(struct sym_device *np, u_char *read_bit, u_char *gpreg,
+ u_char *gpcntl)
+{
+ OUTB(np, nc_gpcntl, *gpcntl | 0x01);
+ S24C16_do_bit(np, read_bit, 1, gpreg);
+ OUTB(np, nc_gpcntl, *gpcntl);
+}
+
+/*
+ * WRITE a byte to the NVRAM and then get an ACK to see it was accepted OK,
+ * GPIO0 must already be set as an output
+ */
+static void S24C16_write_byte(struct sym_device *np, u_char *ack_data, u_char write_data,
+ u_char *gpreg, u_char *gpcntl)
+{
+ int x;
+
+ for (x = 0; x < 8; x++)
+ S24C16_do_bit(np, NULL, (write_data >> (7 - x)) & 0x01, gpreg);
+
+ S24C16_read_ack(np, ack_data, gpreg, gpcntl);
+}
+
+/*
+ * READ a byte from the NVRAM and then send an ACK to say we have got it,
+ * GPIO0 must already be set as an input
+ */
+static void S24C16_read_byte(struct sym_device *np, u_char *read_data, u_char ack_data,
+ u_char *gpreg, u_char *gpcntl)
+{
+ int x;
+ u_char read_bit;
+
+ *read_data = 0;
+ for (x = 0; x < 8; x++) {
+ S24C16_do_bit(np, &read_bit, 1, gpreg);
+ *read_data |= ((read_bit & 0x01) << (7 - x));
+ }
+
+ S24C16_write_ack(np, ack_data, gpreg, gpcntl);
+}
+
+#ifdef SYM_CONF_NVRAM_WRITE_SUPPORT
+/*
+ * Write 'len' bytes starting at 'offset'.
+ */
+static int sym_write_S24C16_nvram(struct sym_device *np, int offset,
+ u_char *data, int len)
+{
+ u_char gpcntl, gpreg;
+ u_char old_gpcntl, old_gpreg;
+ u_char ack_data;
+ int x;
+
+ /* save current state of GPCNTL and GPREG */
+ old_gpreg = INB(np, nc_gpreg);
+ old_gpcntl = INB(np, nc_gpcntl);
+ gpcntl = old_gpcntl & 0x1c;
+
+ /* set up GPREG & GPCNTL to set GPIO0 and GPIO1 in to known state */
+ OUTB(np, nc_gpreg, old_gpreg);
+ OUTB(np, nc_gpcntl, gpcntl);
+
+ /* this is to set NVRAM into a known state with GPIO0/1 both low */
+ gpreg = old_gpreg;
+ S24C16_set_bit(np, 0, &gpreg, CLR_CLK);
+ S24C16_set_bit(np, 0, &gpreg, CLR_BIT);
+
+ /* now set NVRAM inactive with GPIO0/1 both high */
+ S24C16_stop(np, &gpreg);
+
+ /* NVRAM has to be written in segments of 16 bytes */
+ for (x = 0; x < len ; x += 16) {
+ do {
+ S24C16_start(np, &gpreg);
+ S24C16_write_byte(np, &ack_data,
+ 0xa0 | (((offset+x) >> 7) & 0x0e),
+ &gpreg, &gpcntl);
+ } while (ack_data & 0x01);
+
+ S24C16_write_byte(np, &ack_data, (offset+x) & 0xff,
+ &gpreg, &gpcntl);
+
+ for (y = 0; y < 16; y++)
+ S24C16_write_byte(np, &ack_data, data[x+y],
+ &gpreg, &gpcntl);
+ S24C16_stop(np, &gpreg);
+ }
+
+ /* return GPIO0/1 to original states after having accessed NVRAM */
+ OUTB(np, nc_gpcntl, old_gpcntl);
+ OUTB(np, nc_gpreg, old_gpreg);
+
+ return 0;
+}
+#endif /* SYM_CONF_NVRAM_WRITE_SUPPORT */
+
+/*
+ * Read 'len' bytes starting at 'offset'.
+ */
+static int sym_read_S24C16_nvram(struct sym_device *np, int offset, u_char *data, int len)
+{
+ u_char gpcntl, gpreg;
+ u_char old_gpcntl, old_gpreg;
+ u_char ack_data;
+ int retv = 1;
+ int x;
+
+ /* save current state of GPCNTL and GPREG */
+ old_gpreg = INB(np, nc_gpreg);
+ old_gpcntl = INB(np, nc_gpcntl);
+ gpcntl = old_gpcntl & 0x1c;
+
+ /* set up GPREG & GPCNTL to set GPIO0 and GPIO1 in to known state */
+ OUTB(np, nc_gpreg, old_gpreg);
+ OUTB(np, nc_gpcntl, gpcntl);
+
+ /* this is to set NVRAM into a known state with GPIO0/1 both low */
+ gpreg = old_gpreg;
+ S24C16_set_bit(np, 0, &gpreg, CLR_CLK);
+ S24C16_set_bit(np, 0, &gpreg, CLR_BIT);
+
+ /* now set NVRAM inactive with GPIO0/1 both high */
+ S24C16_stop(np, &gpreg);
+
+ /* activate NVRAM */
+ S24C16_start(np, &gpreg);
+
+ /* write device code and random address MSB */
+ S24C16_write_byte(np, &ack_data,
+ 0xa0 | ((offset >> 7) & 0x0e), &gpreg, &gpcntl);
+ if (ack_data & 0x01)
+ goto out;
+
+ /* write random address LSB */
+ S24C16_write_byte(np, &ack_data,
+ offset & 0xff, &gpreg, &gpcntl);
+ if (ack_data & 0x01)
+ goto out;
+
+ /* regenerate START state to set up for reading */
+ S24C16_start(np, &gpreg);
+
+ /* rewrite device code and address MSB with read bit set (lsb = 0x01) */
+ S24C16_write_byte(np, &ack_data,
+ 0xa1 | ((offset >> 7) & 0x0e), &gpreg, &gpcntl);
+ if (ack_data & 0x01)
+ goto out;
+
+ /* now set up GPIO0 for inputting data */
+ gpcntl |= 0x01;
+ OUTB(np, nc_gpcntl, gpcntl);
+
+ /* input all requested data - only part of total NVRAM */
+ for (x = 0; x < len; x++)
+ S24C16_read_byte(np, &data[x], (x == (len-1)), &gpreg, &gpcntl);
+
+ /* finally put NVRAM back in inactive mode */
+ gpcntl &= 0xfe;
+ OUTB(np, nc_gpcntl, gpcntl);
+ S24C16_stop(np, &gpreg);
+ retv = 0;
+out:
+ /* return GPIO0/1 to original states after having accessed NVRAM */
+ OUTB(np, nc_gpcntl, old_gpcntl);
+ OUTB(np, nc_gpreg, old_gpreg);
+
+ return retv;
+}
+
+#undef SET_BIT
+#undef CLR_BIT
+#undef SET_CLK
+#undef CLR_CLK
+
+/*
+ * Try reading Symbios NVRAM.
+ * Return 0 if OK.
+ */
+static int sym_read_Symbios_nvram(struct sym_device *np, Symbios_nvram *nvram)
+{
+ static u_char Symbios_trailer[6] = {0xfe, 0xfe, 0, 0, 0, 0};
+ u_char *data = (u_char *) nvram;
+ int len = sizeof(*nvram);
+ u_short csum;
+ int x;
+
+ /* probe the 24c16 and read the SYMBIOS 24c16 area */
+ if (sym_read_S24C16_nvram (np, SYMBIOS_NVRAM_ADDRESS, data, len))
+ return 1;
+
+ /* check valid NVRAM signature, verify byte count and checksum */
+ if (nvram->type != 0 ||
+ memcmp(nvram->trailer, Symbios_trailer, 6) ||
+ nvram->byte_count != len - 12)
+ return 1;
+
+ /* verify checksum */
+ for (x = 6, csum = 0; x < len - 6; x++)
+ csum += data[x];
+ if (csum != nvram->checksum)
+ return 1;
+
+ return 0;
+}
+
+/*
+ * 93C46 EEPROM reading.
+ *
+ * GPOI0 - data in
+ * GPIO1 - data out
+ * GPIO2 - clock
+ * GPIO4 - chip select
+ *
+ * Used by Tekram.
+ */
+
+/*
+ * Pulse clock bit in GPIO0
+ */
+static void T93C46_Clk(struct sym_device *np, u_char *gpreg)
+{
+ OUTB(np, nc_gpreg, *gpreg | 0x04);
+ INB(np, nc_mbox1);
+ udelay(2);
+ OUTB(np, nc_gpreg, *gpreg);
+}
+
+/*
+ * Read bit from NVRAM
+ */
+static void T93C46_Read_Bit(struct sym_device *np, u_char *read_bit, u_char *gpreg)
+{
+ udelay(2);
+ T93C46_Clk(np, gpreg);
+ *read_bit = INB(np, nc_gpreg);
+}
+
+/*
+ * Write bit to GPIO0
+ */
+static void T93C46_Write_Bit(struct sym_device *np, u_char write_bit, u_char *gpreg)
+{
+ if (write_bit & 0x01)
+ *gpreg |= 0x02;
+ else
+ *gpreg &= 0xfd;
+
+ *gpreg |= 0x10;
+
+ OUTB(np, nc_gpreg, *gpreg);
+ INB(np, nc_mbox1);
+ udelay(2);
+
+ T93C46_Clk(np, gpreg);
+}
+
+/*
+ * Send STOP condition to NVRAM - puts NVRAM to sleep... ZZZzzz!!
+ */
+static void T93C46_Stop(struct sym_device *np, u_char *gpreg)
+{
+ *gpreg &= 0xef;
+ OUTB(np, nc_gpreg, *gpreg);
+ INB(np, nc_mbox1);
+ udelay(2);
+
+ T93C46_Clk(np, gpreg);
+}
+
+/*
+ * Send read command and address to NVRAM
+ */
+static void T93C46_Send_Command(struct sym_device *np, u_short write_data,
+ u_char *read_bit, u_char *gpreg)
+{
+ int x;
+
+ /* send 9 bits, start bit (1), command (2), address (6) */
+ for (x = 0; x < 9; x++)
+ T93C46_Write_Bit(np, (u_char) (write_data >> (8 - x)), gpreg);
+
+ *read_bit = INB(np, nc_gpreg);
+}
+
+/*
+ * READ 2 bytes from the NVRAM
+ */
+static void T93C46_Read_Word(struct sym_device *np,
+ unsigned short *nvram_data, unsigned char *gpreg)
+{
+ int x;
+ u_char read_bit;
+
+ *nvram_data = 0;
+ for (x = 0; x < 16; x++) {
+ T93C46_Read_Bit(np, &read_bit, gpreg);
+
+ if (read_bit & 0x01)
+ *nvram_data |= (0x01 << (15 - x));
+ else
+ *nvram_data &= ~(0x01 << (15 - x));
+ }
+}
+
+/*
+ * Read Tekram NvRAM data.
+ */
+static int T93C46_Read_Data(struct sym_device *np, unsigned short *data,
+ int len, unsigned char *gpreg)
+{
+ int x;
+
+ for (x = 0; x < len; x++) {
+ unsigned char read_bit;
+ /* output read command and address */
+ T93C46_Send_Command(np, 0x180 | x, &read_bit, gpreg);
+ if (read_bit & 0x01)
+ return 1; /* Bad */
+ T93C46_Read_Word(np, &data[x], gpreg);
+ T93C46_Stop(np, gpreg);
+ }
+
+ return 0;
+}
+
+/*
+ * Try reading 93C46 Tekram NVRAM.
+ */
+static int sym_read_T93C46_nvram(struct sym_device *np, Tekram_nvram *nvram)
+{
+ u_char gpcntl, gpreg;
+ u_char old_gpcntl, old_gpreg;
+ int retv = 1;
+
+ /* save current state of GPCNTL and GPREG */
+ old_gpreg = INB(np, nc_gpreg);
+ old_gpcntl = INB(np, nc_gpcntl);
+
+ /* set up GPREG & GPCNTL to set GPIO0/1/2/4 in to known state, 0 in,
+ 1/2/4 out */
+ gpreg = old_gpreg & 0xe9;
+ OUTB(np, nc_gpreg, gpreg);
+ gpcntl = (old_gpcntl & 0xe9) | 0x09;
+ OUTB(np, nc_gpcntl, gpcntl);
+
+ /* input all of NVRAM, 64 words */
+ retv = T93C46_Read_Data(np, (u_short *) nvram,
+ sizeof(*nvram) / sizeof(short), &gpreg);
+
+ /* return GPIO0/1/2/4 to original states after having accessed NVRAM */
+ OUTB(np, nc_gpcntl, old_gpcntl);
+ OUTB(np, nc_gpreg, old_gpreg);
+
+ return retv;
+}
+
+/*
+ * Try reading Tekram NVRAM.
+ * Return 0 if OK.
+ */
+static int sym_read_Tekram_nvram (struct sym_device *np, Tekram_nvram *nvram)
+{
+ u_char *data = (u_char *) nvram;
+ int len = sizeof(*nvram);
+ u_short csum;
+ int x;
+
+ switch (np->pdev->device) {
+ case PCI_DEVICE_ID_NCR_53C885:
+ case PCI_DEVICE_ID_NCR_53C895:
+ case PCI_DEVICE_ID_NCR_53C896:
+ x = sym_read_S24C16_nvram(np, TEKRAM_24C16_NVRAM_ADDRESS,
+ data, len);
+ break;
+ case PCI_DEVICE_ID_NCR_53C875:
+ x = sym_read_S24C16_nvram(np, TEKRAM_24C16_NVRAM_ADDRESS,
+ data, len);
+ if (!x)
+ break;
+ default:
+ x = sym_read_T93C46_nvram(np, nvram);
+ break;
+ }
+ if (x)
+ return 1;
+
+ /* verify checksum */
+ for (x = 0, csum = 0; x < len - 1; x += 2)
+ csum += data[x] + (data[x+1] << 8);
+ if (csum != 0x1234)
+ return 1;
+
+ return 0;
+}
+
+#ifdef CONFIG_PARISC
+/*
+ * Host firmware (PDC) keeps a table for altering SCSI capabilities.
+ * Many newer machines export one channel of 53c896 chip as SE, 50-pin HD.
+ * Also used for Multi-initiator SCSI clusters to set the SCSI Initiator ID.
+ */
+static int sym_read_parisc_pdc(struct sym_device *np, struct pdc_initiator *pdc)
+{
+ struct hardware_path hwpath;
+ get_pci_node_path(np->pdev, &hwpath);
+ if (!pdc_get_initiator(&hwpath, pdc))
+ return 0;
+
+ return SYM_PARISC_PDC;
+}
+#else
+static inline int sym_read_parisc_pdc(struct sym_device *np,
+ struct pdc_initiator *x)
+{
+ return 0;
+}
+#endif
+
+/*
+ * Try reading Symbios or Tekram NVRAM
+ */
+int sym_read_nvram(struct sym_device *np, struct sym_nvram *nvp)
+{
+ if (!sym_read_Symbios_nvram(np, &nvp->data.Symbios)) {
+ nvp->type = SYM_SYMBIOS_NVRAM;
+ sym_display_Symbios_nvram(np, &nvp->data.Symbios);
+ } else if (!sym_read_Tekram_nvram(np, &nvp->data.Tekram)) {
+ nvp->type = SYM_TEKRAM_NVRAM;
+ sym_display_Tekram_nvram(np, &nvp->data.Tekram);
+ } else {
+ nvp->type = sym_read_parisc_pdc(np, &nvp->data.parisc);
+ }
+ return nvp->type;
+}
+
+char *sym_nvram_type(struct sym_nvram *nvp)
+{
+ switch (nvp->type) {
+ case SYM_SYMBIOS_NVRAM:
+ return "Symbios NVRAM";
+ case SYM_TEKRAM_NVRAM:
+ return "Tekram NVRAM";
+ case SYM_PARISC_PDC:
+ return "PA-RISC Firmware";
+ default:
+ return "No NVRAM";
+ }
+}
diff --git a/drivers/scsi/sym53c8xx_2/sym_nvram.h b/drivers/scsi/sym53c8xx_2/sym_nvram.h
new file mode 100644
index 000000000..bdfbbb083
--- /dev/null
+++ b/drivers/scsi/sym53c8xx_2/sym_nvram.h
@@ -0,0 +1,214 @@
+/*
+ * Device driver for the SYMBIOS/LSILOGIC 53C8XX and 53C1010 family
+ * of PCI-SCSI IO processors.
+ *
+ * Copyright (C) 1999-2001 Gerard Roudier <groudier@free.fr>
+ *
+ * This driver is derived from the Linux sym53c8xx driver.
+ * Copyright (C) 1998-2000 Gerard Roudier
+ *
+ * The sym53c8xx driver is derived from the ncr53c8xx driver that had been
+ * a port of the FreeBSD ncr driver to Linux-1.2.13.
+ *
+ * The original ncr driver has been written for 386bsd and FreeBSD by
+ * Wolfgang Stanglmeier <wolf@cologne.de>
+ * Stefan Esser <se@mi.Uni-Koeln.de>
+ * Copyright (C) 1994 Wolfgang Stanglmeier
+ *
+ * Other major contributions:
+ *
+ * NVRAM detection and reading.
+ * Copyright (C) 1997 Richard Waltham <dormouse@farsrobt.demon.co.uk>
+ *
+ *-----------------------------------------------------------------------------
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+
+#ifndef SYM_NVRAM_H
+#define SYM_NVRAM_H
+
+#include "sym53c8xx.h"
+
+/*
+ * Symbios NVRAM data format
+ */
+#define SYMBIOS_NVRAM_SIZE 368
+#define SYMBIOS_NVRAM_ADDRESS 0x100
+
+struct Symbios_nvram {
+/* Header 6 bytes */
+ u_short type; /* 0x0000 */
+ u_short byte_count; /* excluding header/trailer */
+ u_short checksum;
+
+/* Controller set up 20 bytes */
+ u_char v_major; /* 0x00 */
+ u_char v_minor; /* 0x30 */
+ u32 boot_crc;
+ u_short flags;
+#define SYMBIOS_SCAM_ENABLE (1)
+#define SYMBIOS_PARITY_ENABLE (1<<1)
+#define SYMBIOS_VERBOSE_MSGS (1<<2)
+#define SYMBIOS_CHS_MAPPING (1<<3)
+#define SYMBIOS_NO_NVRAM (1<<3) /* ??? */
+ u_short flags1;
+#define SYMBIOS_SCAN_HI_LO (1)
+ u_short term_state;
+#define SYMBIOS_TERM_CANT_PROGRAM (0)
+#define SYMBIOS_TERM_ENABLED (1)
+#define SYMBIOS_TERM_DISABLED (2)
+ u_short rmvbl_flags;
+#define SYMBIOS_RMVBL_NO_SUPPORT (0)
+#define SYMBIOS_RMVBL_BOOT_DEVICE (1)
+#define SYMBIOS_RMVBL_MEDIA_INSTALLED (2)
+ u_char host_id;
+ u_char num_hba; /* 0x04 */
+ u_char num_devices; /* 0x10 */
+ u_char max_scam_devices; /* 0x04 */
+ u_char num_valid_scam_devices; /* 0x00 */
+ u_char flags2;
+#define SYMBIOS_AVOID_BUS_RESET (1<<2)
+
+/* Boot order 14 bytes * 4 */
+ struct Symbios_host{
+ u_short type; /* 4:8xx / 0:nok */
+ u_short device_id; /* PCI device id */
+ u_short vendor_id; /* PCI vendor id */
+ u_char bus_nr; /* PCI bus number */
+ u_char device_fn; /* PCI device/function number << 3*/
+ u_short word8;
+ u_short flags;
+#define SYMBIOS_INIT_SCAN_AT_BOOT (1)
+ u_short io_port; /* PCI io_port address */
+ } host[4];
+
+/* Targets 8 bytes * 16 */
+ struct Symbios_target {
+ u_char flags;
+#define SYMBIOS_DISCONNECT_ENABLE (1)
+#define SYMBIOS_SCAN_AT_BOOT_TIME (1<<1)
+#define SYMBIOS_SCAN_LUNS (1<<2)
+#define SYMBIOS_QUEUE_TAGS_ENABLED (1<<3)
+ u_char rsvd;
+ u_char bus_width; /* 0x08/0x10 */
+ u_char sync_offset;
+ u_short sync_period; /* 4*period factor */
+ u_short timeout;
+ } target[16];
+/* Scam table 8 bytes * 4 */
+ struct Symbios_scam {
+ u_short id;
+ u_short method;
+#define SYMBIOS_SCAM_DEFAULT_METHOD (0)
+#define SYMBIOS_SCAM_DONT_ASSIGN (1)
+#define SYMBIOS_SCAM_SET_SPECIFIC_ID (2)
+#define SYMBIOS_SCAM_USE_ORDER_GIVEN (3)
+ u_short status;
+#define SYMBIOS_SCAM_UNKNOWN (0)
+#define SYMBIOS_SCAM_DEVICE_NOT_FOUND (1)
+#define SYMBIOS_SCAM_ID_NOT_SET (2)
+#define SYMBIOS_SCAM_ID_VALID (3)
+ u_char target_id;
+ u_char rsvd;
+ } scam[4];
+
+ u_char spare_devices[15*8];
+ u_char trailer[6]; /* 0xfe 0xfe 0x00 0x00 0x00 0x00 */
+};
+typedef struct Symbios_nvram Symbios_nvram;
+typedef struct Symbios_host Symbios_host;
+typedef struct Symbios_target Symbios_target;
+typedef struct Symbios_scam Symbios_scam;
+
+/*
+ * Tekram NvRAM data format.
+ */
+#define TEKRAM_NVRAM_SIZE 64
+#define TEKRAM_93C46_NVRAM_ADDRESS 0
+#define TEKRAM_24C16_NVRAM_ADDRESS 0x40
+
+struct Tekram_nvram {
+ struct Tekram_target {
+ u_char flags;
+#define TEKRAM_PARITY_CHECK (1)
+#define TEKRAM_SYNC_NEGO (1<<1)
+#define TEKRAM_DISCONNECT_ENABLE (1<<2)
+#define TEKRAM_START_CMD (1<<3)
+#define TEKRAM_TAGGED_COMMANDS (1<<4)
+#define TEKRAM_WIDE_NEGO (1<<5)
+ u_char sync_index;
+ u_short word2;
+ } target[16];
+ u_char host_id;
+ u_char flags;
+#define TEKRAM_MORE_THAN_2_DRIVES (1)
+#define TEKRAM_DRIVES_SUP_1GB (1<<1)
+#define TEKRAM_RESET_ON_POWER_ON (1<<2)
+#define TEKRAM_ACTIVE_NEGATION (1<<3)
+#define TEKRAM_IMMEDIATE_SEEK (1<<4)
+#define TEKRAM_SCAN_LUNS (1<<5)
+#define TEKRAM_REMOVABLE_FLAGS (3<<6) /* 0: disable; */
+ /* 1: boot device; 2:all */
+ u_char boot_delay_index;
+ u_char max_tags_index;
+ u_short flags1;
+#define TEKRAM_F2_F6_ENABLED (1)
+ u_short spare[29];
+};
+typedef struct Tekram_nvram Tekram_nvram;
+typedef struct Tekram_target Tekram_target;
+
+#ifndef CONFIG_PARISC
+struct pdc_initiator { int dummy; };
+#endif
+
+/*
+ * Union of supported NVRAM formats.
+ */
+struct sym_nvram {
+ int type;
+#define SYM_SYMBIOS_NVRAM (1)
+#define SYM_TEKRAM_NVRAM (2)
+#define SYM_PARISC_PDC (3)
+#if SYM_CONF_NVRAM_SUPPORT
+ union {
+ Symbios_nvram Symbios;
+ Tekram_nvram Tekram;
+ struct pdc_initiator parisc;
+ } data;
+#endif
+};
+
+#if SYM_CONF_NVRAM_SUPPORT
+void sym_nvram_setup_host(struct Scsi_Host *shost, struct sym_hcb *np, struct sym_nvram *nvram);
+void sym_nvram_setup_target (struct sym_tcb *tp, int target, struct sym_nvram *nvp);
+int sym_read_nvram (struct sym_device *np, struct sym_nvram *nvp);
+char *sym_nvram_type(struct sym_nvram *nvp);
+#else
+static inline void sym_nvram_setup_host(struct Scsi_Host *shost, struct sym_hcb *np, struct sym_nvram *nvram) { }
+static inline void sym_nvram_setup_target(struct sym_tcb *tp, struct sym_nvram *nvram) { }
+static inline int sym_read_nvram(struct sym_device *np, struct sym_nvram *nvp)
+{
+ nvp->type = 0;
+ return 0;
+}
+static inline char *sym_nvram_type(struct sym_nvram *nvp)
+{
+ return "No NVRAM";
+}
+#endif
+
+#endif /* SYM_NVRAM_H */
diff --git a/drivers/scsi/t128.c b/drivers/scsi/t128.c
new file mode 100644
index 000000000..87828acbf
--- /dev/null
+++ b/drivers/scsi/t128.c
@@ -0,0 +1,412 @@
+#define PSEUDO_DMA
+
+/*
+ * Trantor T128/T128F/T228 driver
+ * Note : architecturally, the T100 and T130 are different and won't
+ * work
+ *
+ * Copyright 1993, Drew Eckhardt
+ * Visionary Computing
+ * (Unix and Linux consulting and custom programming)
+ * drew@colorado.edu
+ * +1 (303) 440-4894
+ *
+ * For more information, please consult
+ *
+ * Trantor Systems, Ltd.
+ * T128/T128F/T228 SCSI Host Adapter
+ * Hardware Specifications
+ *
+ * Trantor Systems, Ltd.
+ * 5415 Randall Place
+ * Fremont, CA 94538
+ * 1+ (415) 770-1400, FAX 1+ (415) 770-9910
+ */
+
+/*
+ * The card is detected and initialized in one of several ways :
+ * 1. Autoprobe (default) - since the board is memory mapped,
+ * a BIOS signature is scanned for to locate the registers.
+ * An interrupt is triggered to autoprobe for the interrupt
+ * line.
+ *
+ * 2. With command line overrides - t128=address,irq may be
+ * used on the LILO command line to override the defaults.
+ *
+ * 3. With the T128_OVERRIDE compile time define. This is
+ * specified as an array of address, irq tuples. Ie, for
+ * one board at the default 0xcc000 address, IRQ5, I could say
+ * -DT128_OVERRIDE={{0xcc000, 5}}
+ *
+ * Note that if the override methods are used, place holders must
+ * be specified for other boards in the system.
+ *
+ * T128/T128F jumper/dipswitch settings (note : on my sample, the switches
+ * were epoxy'd shut, meaning I couldn't change the 0xcc000 base address) :
+ *
+ * T128 Sw7 Sw8 Sw6 = 0ws Sw5 = boot
+ * T128F Sw6 Sw7 Sw5 = 0ws Sw4 = boot Sw8 = floppy disable
+ * cc000 off off
+ * c8000 off on
+ * dc000 on off
+ * d8000 on on
+ *
+ *
+ * Interrupts
+ * There is a 12 pin jumper block, jp1, numbered as follows :
+ * T128 (JP1) T128F (J5)
+ * 2 4 6 8 10 12 11 9 7 5 3 1
+ * 1 3 5 7 9 11 12 10 8 6 4 2
+ *
+ * 3 2-4
+ * 5 1-3
+ * 7 3-5
+ * T128F only
+ * 10 8-10
+ * 12 7-9
+ * 14 10-12
+ * 15 9-11
+ */
+
+#include <linux/signal.h>
+#include <linux/io.h>
+#include <linux/blkdev.h>
+#include <linux/interrupt.h>
+#include <linux/stat.h>
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/delay.h>
+
+#include <scsi/scsi_host.h>
+#include "t128.h"
+#define AUTOPROBE_IRQ
+#include "NCR5380.h"
+
+static struct override {
+ unsigned long address;
+ int irq;
+} overrides
+#ifdef T128_OVERRIDE
+ [] __initdata = T128_OVERRIDE;
+#else
+ [4] __initdata = {{0, IRQ_AUTO}, {0, IRQ_AUTO},
+ {0 ,IRQ_AUTO}, {0, IRQ_AUTO}};
+#endif
+
+#define NO_OVERRIDES ARRAY_SIZE(overrides)
+
+static struct base {
+ unsigned int address;
+ int noauto;
+} bases[] __initdata = {
+ { 0xcc000, 0}, { 0xc8000, 0}, { 0xdc000, 0}, { 0xd8000, 0}
+};
+
+#define NO_BASES ARRAY_SIZE(bases)
+
+static struct signature {
+ const char *string;
+ int offset;
+} signatures[] __initdata = {
+{"TSROM: SCSI BIOS, Version 1.12", 0x36},
+};
+
+#define NO_SIGNATURES ARRAY_SIZE(signatures)
+
+#ifndef MODULE
+/*
+ * Function : t128_setup(char *str, int *ints)
+ *
+ * Purpose : LILO command line initialization of the overrides array,
+ *
+ * Inputs : str - unused, ints - array of integer parameters with ints[0]
+ * equal to the number of ints.
+ *
+ */
+
+static int __init t128_setup(char *str)
+{
+ static int commandline_current = 0;
+ int i;
+ int ints[10];
+
+ get_options(str, ARRAY_SIZE(ints), ints);
+ if (ints[0] != 2)
+ printk("t128_setup : usage t128=address,irq\n");
+ else
+ if (commandline_current < NO_OVERRIDES) {
+ overrides[commandline_current].address = ints[1];
+ overrides[commandline_current].irq = ints[2];
+ for (i = 0; i < NO_BASES; ++i)
+ if (bases[i].address == ints[1]) {
+ bases[i].noauto = 1;
+ break;
+ }
+ ++commandline_current;
+ }
+ return 1;
+}
+
+__setup("t128=", t128_setup);
+#endif
+
+/*
+ * Function : int t128_detect(struct scsi_host_template * tpnt)
+ *
+ * Purpose : detects and initializes T128,T128F, or T228 controllers
+ * that were autoprobed, overridden on the LILO command line,
+ * or specified at compile time.
+ *
+ * Inputs : tpnt - template for this SCSI adapter.
+ *
+ * Returns : 1 if a host adapter was found, 0 if not.
+ *
+ */
+
+static int __init t128_detect(struct scsi_host_template *tpnt)
+{
+ static int current_override = 0, current_base = 0;
+ struct Scsi_Host *instance;
+ unsigned long base;
+ void __iomem *p;
+ int sig, count;
+
+ for (count = 0; current_override < NO_OVERRIDES; ++current_override) {
+ base = 0;
+ p = NULL;
+
+ if (overrides[current_override].address) {
+ base = overrides[current_override].address;
+ p = ioremap(bases[current_base].address, 0x2000);
+ if (!p)
+ base = 0;
+ } else
+ for (; !base && (current_base < NO_BASES); ++current_base) {
+#if (TDEBUG & TDEBUG_INIT)
+ printk("scsi-t128 : probing address %08x\n", bases[current_base].address);
+#endif
+ if (bases[current_base].noauto)
+ continue;
+ p = ioremap(bases[current_base].address, 0x2000);
+ if (!p)
+ continue;
+ for (sig = 0; sig < NO_SIGNATURES; ++sig)
+ if (check_signature(p + signatures[sig].offset,
+ signatures[sig].string,
+ strlen(signatures[sig].string))) {
+ base = bases[current_base].address;
+#if (TDEBUG & TDEBUG_INIT)
+ printk("scsi-t128 : detected board.\n");
+#endif
+ goto found;
+ }
+ iounmap(p);
+ }
+
+#if defined(TDEBUG) && (TDEBUG & TDEBUG_INIT)
+ printk("scsi-t128 : base = %08x\n", (unsigned int) base);
+#endif
+
+ if (!base)
+ break;
+
+found:
+ instance = scsi_register (tpnt, sizeof(struct NCR5380_hostdata));
+ if(instance == NULL)
+ break;
+
+ instance->base = base;
+ ((struct NCR5380_hostdata *)instance->hostdata)->base = p;
+
+ NCR5380_init(instance, 0);
+
+ if (overrides[current_override].irq != IRQ_AUTO)
+ instance->irq = overrides[current_override].irq;
+ else
+ instance->irq = NCR5380_probe_irq(instance, T128_IRQS);
+
+ /* Compatibility with documented NCR5380 kernel parameters */
+ if (instance->irq == 255)
+ instance->irq = NO_IRQ;
+
+ if (instance->irq != NO_IRQ)
+ if (request_irq(instance->irq, t128_intr, 0, "t128",
+ instance)) {
+ printk("scsi%d : IRQ%d not free, interrupts disabled\n",
+ instance->host_no, instance->irq);
+ instance->irq = NO_IRQ;
+ }
+
+ if (instance->irq == NO_IRQ) {
+ printk("scsi%d : interrupts not enabled. for better interactive performance,\n", instance->host_no);
+ printk("scsi%d : please jumper the board for a free IRQ.\n", instance->host_no);
+ }
+
+#if defined(TDEBUG) && (TDEBUG & TDEBUG_INIT)
+ printk("scsi%d : irq = %d\n", instance->host_no, instance->irq);
+#endif
+
+ ++current_override;
+ ++count;
+ }
+ return count;
+}
+
+static int t128_release(struct Scsi_Host *shost)
+{
+ NCR5380_local_declare();
+ NCR5380_setup(shost);
+ if (shost->irq != NO_IRQ)
+ free_irq(shost->irq, shost);
+ NCR5380_exit(shost);
+ if (shost->io_port && shost->n_io_port)
+ release_region(shost->io_port, shost->n_io_port);
+ scsi_unregister(shost);
+ iounmap(base);
+ return 0;
+}
+
+/*
+ * Function : int t128_biosparam(Disk * disk, struct block_device *dev, int *ip)
+ *
+ * Purpose : Generates a BIOS / DOS compatible H-C-S mapping for
+ * the specified device / size.
+ *
+ * Inputs : size = size of device in sectors (512 bytes), dev = block device
+ * major / minor, ip[] = {heads, sectors, cylinders}
+ *
+ * Returns : always 0 (success), initializes ip
+ *
+ */
+
+/*
+ * XXX Most SCSI boards use this mapping, I could be incorrect. Some one
+ * using hard disks on a trantor should verify that this mapping corresponds
+ * to that used by the BIOS / ASPI driver by running the linux fdisk program
+ * and matching the H_C_S coordinates to what DOS uses.
+ */
+
+static int t128_biosparam(struct scsi_device *sdev, struct block_device *bdev,
+ sector_t capacity, int *ip)
+{
+ ip[0] = 64;
+ ip[1] = 32;
+ ip[2] = capacity >> 11;
+ return 0;
+}
+
+/*
+ * Function : int NCR5380_pread (struct Scsi_Host *instance,
+ * unsigned char *dst, int len)
+ *
+ * Purpose : Fast 5380 pseudo-dma read function, transfers len bytes to
+ * dst
+ *
+ * Inputs : dst = destination, len = length in bytes
+ *
+ * Returns : 0 on success, non zero on a failure such as a watchdog
+ * timeout.
+ */
+
+static inline int NCR5380_pread (struct Scsi_Host *instance, unsigned char *dst,
+ int len) {
+ NCR5380_local_declare();
+ void __iomem *reg;
+ unsigned char *d = dst;
+ register int i = len;
+
+ NCR5380_setup(instance);
+ reg = base + T_DATA_REG_OFFSET;
+
+#if 0
+ for (; i; --i) {
+ while (!(readb(base+T_STATUS_REG_OFFSET) & T_ST_RDY)) barrier();
+#else
+ while (!(readb(base+T_STATUS_REG_OFFSET) & T_ST_RDY)) barrier();
+ for (; i; --i) {
+#endif
+ *d++ = readb(reg);
+ }
+
+ if (readb(base + T_STATUS_REG_OFFSET) & T_ST_TIM) {
+ unsigned char tmp;
+ void __iomem *foo = base + T_CONTROL_REG_OFFSET;
+ tmp = readb(foo);
+ writeb(tmp | T_CR_CT, foo);
+ writeb(tmp, foo);
+ printk("scsi%d : watchdog timer fired in NCR5380_pread()\n",
+ instance->host_no);
+ return -1;
+ } else
+ return 0;
+}
+
+/*
+ * Function : int NCR5380_pwrite (struct Scsi_Host *instance,
+ * unsigned char *src, int len)
+ *
+ * Purpose : Fast 5380 pseudo-dma write function, transfers len bytes from
+ * src
+ *
+ * Inputs : src = source, len = length in bytes
+ *
+ * Returns : 0 on success, non zero on a failure such as a watchdog
+ * timeout.
+ */
+
+static inline int NCR5380_pwrite (struct Scsi_Host *instance, unsigned char *src,
+ int len) {
+ NCR5380_local_declare();
+ void __iomem *reg;
+ unsigned char *s = src;
+ register int i = len;
+
+ NCR5380_setup(instance);
+ reg = base + T_DATA_REG_OFFSET;
+
+#if 0
+ for (; i; --i) {
+ while (!(readb(base+T_STATUS_REG_OFFSET) & T_ST_RDY)) barrier();
+#else
+ while (!(readb(base+T_STATUS_REG_OFFSET) & T_ST_RDY)) barrier();
+ for (; i; --i) {
+#endif
+ writeb(*s++, reg);
+ }
+
+ if (readb(base + T_STATUS_REG_OFFSET) & T_ST_TIM) {
+ unsigned char tmp;
+ void __iomem *foo = base + T_CONTROL_REG_OFFSET;
+ tmp = readb(foo);
+ writeb(tmp | T_CR_CT, foo);
+ writeb(tmp, foo);
+ printk("scsi%d : watchdog timer fired in NCR5380_pwrite()\n",
+ instance->host_no);
+ return -1;
+ } else
+ return 0;
+}
+
+MODULE_LICENSE("GPL");
+
+#include "NCR5380.c"
+
+static struct scsi_host_template driver_template = {
+ .name = "Trantor T128/T128F/T228",
+ .detect = t128_detect,
+ .release = t128_release,
+ .proc_name = "t128",
+ .show_info = t128_show_info,
+ .write_info = t128_write_info,
+ .info = t128_info,
+ .queuecommand = t128_queue_command,
+ .eh_abort_handler = t128_abort,
+ .eh_bus_reset_handler = t128_bus_reset,
+ .bios_param = t128_biosparam,
+ .can_queue = CAN_QUEUE,
+ .this_id = 7,
+ .sg_tablesize = SG_ALL,
+ .cmd_per_lun = CMD_PER_LUN,
+ .use_clustering = DISABLE_CLUSTERING,
+};
+#include "scsi_module.c"
diff --git a/drivers/scsi/t128.h b/drivers/scsi/t128.h
new file mode 100644
index 000000000..2c7371454
--- /dev/null
+++ b/drivers/scsi/t128.h
@@ -0,0 +1,125 @@
+/*
+ * Trantor T128/T128F/T228 defines
+ * Note : architecturally, the T100 and T128 are different and won't work
+ *
+ * Copyright 1993, Drew Eckhardt
+ * Visionary Computing
+ * (Unix and Linux consulting and custom programming)
+ * drew@colorado.edu
+ * +1 (303) 440-4894
+ *
+ * For more information, please consult
+ *
+ * Trantor Systems, Ltd.
+ * T128/T128F/T228 SCSI Host Adapter
+ * Hardware Specifications
+ *
+ * Trantor Systems, Ltd.
+ * 5415 Randall Place
+ * Fremont, CA 94538
+ * 1+ (415) 770-1400, FAX 1+ (415) 770-9910
+ */
+
+#ifndef T128_H
+#define T128_H
+
+#define TDEBUG 0
+#define TDEBUG_INIT 0x1
+#define TDEBUG_TRANSFER 0x2
+
+/*
+ * The trantor boards are memory mapped. They use an NCR5380 or
+ * equivalent (my sample board had part second sourced from ZILOG).
+ * NCR's recommended "Pseudo-DMA" architecture is used, where
+ * a PAL drives the DMA signals on the 5380 allowing fast, blind
+ * transfers with proper handshaking.
+ */
+
+/*
+ * Note : a boot switch is provided for the purpose of informing the
+ * firmware to boot or not boot from attached SCSI devices. So, I imagine
+ * there are fewer people who've yanked the ROM like they do on the Seagate
+ * to make bootup faster, and I'll probably use this for autodetection.
+ */
+#define T_ROM_OFFSET 0
+
+/*
+ * Note : my sample board *WAS NOT* populated with the SRAM, so this
+ * can't be used for autodetection without a ROM present.
+ */
+#define T_RAM_OFFSET 0x1800
+
+/*
+ * All of the registers are allocated 32 bytes of address space, except
+ * for the data register (read/write to/from the 5380 in pseudo-DMA mode)
+ */
+#define T_CONTROL_REG_OFFSET 0x1c00 /* rw */
+#define T_CR_INT 0x10 /* Enable interrupts */
+#define T_CR_CT 0x02 /* Reset watchdog timer */
+
+#define T_STATUS_REG_OFFSET 0x1c20 /* ro */
+#define T_ST_BOOT 0x80 /* Boot switch */
+#define T_ST_S3 0x40 /* User settable switches, */
+#define T_ST_S2 0x20 /* read 0 when switch is on, 1 off */
+#define T_ST_S1 0x10
+#define T_ST_PS2 0x08 /* Set for Microchannel 228 */
+#define T_ST_RDY 0x04 /* 5380 DRQ */
+#define T_ST_TIM 0x02 /* indicates 40us watchdog timer fired */
+#define T_ST_ZERO 0x01 /* Always zero */
+
+#define T_5380_OFFSET 0x1d00 /* 8 registers here, see NCR5380.h */
+
+#define T_DATA_REG_OFFSET 0x1e00 /* rw 512 bytes long */
+
+#ifndef ASM
+
+#ifndef CMD_PER_LUN
+#define CMD_PER_LUN 2
+#endif
+
+#ifndef CAN_QUEUE
+#define CAN_QUEUE 32
+#endif
+
+#define NCR5380_implementation_fields \
+ void __iomem *base
+
+#define NCR5380_local_declare() \
+ void __iomem *base
+
+#define NCR5380_setup(instance) \
+ base = ((struct NCR5380_hostdata *)(instance->hostdata))->base
+
+#define T128_address(reg) (base + T_5380_OFFSET + ((reg) * 0x20))
+
+#if !(TDEBUG & TDEBUG_TRANSFER)
+#define NCR5380_read(reg) readb(T128_address(reg))
+#define NCR5380_write(reg, value) writeb((value),(T128_address(reg)))
+#else
+#define NCR5380_read(reg) \
+ (((unsigned char) printk("scsi%d : read register %d at address %08x\n"\
+ , instance->hostno, (reg), T128_address(reg))), readb(T128_address(reg)))
+
+#define NCR5380_write(reg, value) { \
+ printk("scsi%d : write %02x to register %d at address %08x\n", \
+ instance->hostno, (value), (reg), T128_address(reg)); \
+ writeb((value), (T128_address(reg))); \
+}
+#endif
+
+#define NCR5380_intr t128_intr
+#define do_NCR5380_intr do_t128_intr
+#define NCR5380_queue_command t128_queue_command
+#define NCR5380_abort t128_abort
+#define NCR5380_bus_reset t128_bus_reset
+#define NCR5380_info t128_info
+#define NCR5380_show_info t128_show_info
+#define NCR5380_write_info t128_write_info
+
+/* 15 14 12 10 7 5 3
+ 1101 0100 1010 1000 */
+
+#define T128_IRQS 0xc4a8
+
+#endif /* ndef ASM */
+#endif /* T128_H */
diff --git a/drivers/scsi/u14-34f.c b/drivers/scsi/u14-34f.c
new file mode 100644
index 000000000..14eb50b95
--- /dev/null
+++ b/drivers/scsi/u14-34f.c
@@ -0,0 +1,1971 @@
+/*
+ * u14-34f.c - Low-level driver for UltraStor 14F/34F SCSI host adapters.
+ *
+ * 03 Jun 2003 Rev. 8.10 for linux-2.5.70
+ * + Update for new IRQ API.
+ * + Use "goto" when appropriate.
+ * + Drop u14-34f.h.
+ * + Update for new module_param API.
+ * + Module parameters can now be specified only in the
+ * same format as the kernel boot options.
+ *
+ * boot option old module param
+ * ----------- ------------------
+ * addr,... io_port=addr,...
+ * lc:[y|n] linked_comm=[1|0]
+ * mq:xx max_queue_depth=xx
+ * tm:[0|1|2] tag_mode=[0|1|2]
+ * et:[y|n] ext_tran=[1|0]
+ * of:[y|n] have_old_firmware=[1|0]
+ *
+ * A valid example using the new parameter format is:
+ * modprobe u14-34f "u14-34f=0x340,0x330,lc:y,tm:0,mq:4"
+ *
+ * which is equivalent to the old format:
+ * modprobe u14-34f io_port=0x340,0x330 linked_comm=1 tag_mode=0 \
+ * max_queue_depth=4
+ *
+ * With actual module code, u14-34f and u14_34f are equivalent
+ * as module parameter names.
+ *
+ * 12 Feb 2003 Rev. 8.04 for linux 2.5.60
+ * + Release irq before calling scsi_register.
+ *
+ * 12 Nov 2002 Rev. 8.02 for linux 2.5.47
+ * + Release driver_lock before calling scsi_register.
+ *
+ * 11 Nov 2002 Rev. 8.01 for linux 2.5.47
+ * + Fixed bios_param and scsicam_bios_param calling parameters.
+ *
+ * 28 Oct 2002 Rev. 8.00 for linux 2.5.44-ac4
+ * + Use new tcq and adjust_queue_depth api.
+ * + New command line option (tm:[0-2]) to choose the type of tags:
+ * 0 -> disable tagging ; 1 -> simple tags ; 2 -> ordered tags.
+ * Default is tm:0 (tagged commands disabled).
+ * For compatibility the "tc:" option is an alias of the "tm:"
+ * option; tc:n is equivalent to tm:0 and tc:y is equivalent to
+ * tm:1.
+ *
+ * 10 Oct 2002 Rev. 7.70 for linux 2.5.42
+ * + Foreport from revision 6.70.
+ *
+ * 25 Jun 2002 Rev. 6.70 for linux 2.4.19
+ * + Fixed endian-ness problem due to bitfields.
+ *
+ * 21 Feb 2002 Rev. 6.52 for linux 2.4.18
+ * + Backport from rev. 7.22 (use io_request_lock).
+ *
+ * 20 Feb 2002 Rev. 7.22 for linux 2.5.5
+ * + Remove any reference to virt_to_bus().
+ * + Fix pio hang while detecting multiple HBAs.
+ *
+ * 01 Jan 2002 Rev. 7.20 for linux 2.5.1
+ * + Use the dynamic DMA mapping API.
+ *
+ * 19 Dec 2001 Rev. 7.02 for linux 2.5.1
+ * + Use SCpnt->sc_data_direction if set.
+ * + Use sglist.page instead of sglist.address.
+ *
+ * 11 Dec 2001 Rev. 7.00 for linux 2.5.1
+ * + Use host->host_lock instead of io_request_lock.
+ *
+ * 1 May 2001 Rev. 6.05 for linux 2.4.4
+ * + Fix data transfer direction for opcode SEND_CUE_SHEET (0x5d)
+ *
+ * 25 Jan 2001 Rev. 6.03 for linux 2.4.0
+ * + "check_region" call replaced by "request_region".
+ *
+ * 22 Nov 2000 Rev. 6.02 for linux 2.4.0-test11
+ * + Removed old scsi error handling support.
+ * + The obsolete boot option flag eh:n is silently ignored.
+ * + Removed error messages while a disk drive is powered up at
+ * boot time.
+ * + Improved boot messages: all tagged capable device are
+ * indicated as "tagged".
+ *
+ * 16 Sep 1999 Rev. 5.11 for linux 2.2.12 and 2.3.18
+ * + Updated to the new __setup interface for boot command line options.
+ * + When loaded as a module, accepts the new parameter boot_options
+ * which value is a string with the same format of the kernel boot
+ * command line options. A valid example is:
+ * modprobe u14-34f 'boot_options="0x230,0x340,lc:y,mq:4"'
+ *
+ * 22 Jul 1999 Rev. 5.00 for linux 2.2.10 and 2.3.11
+ * + Removed pre-2.2 source code compatibility.
+ *
+ * 26 Jul 1998 Rev. 4.33 for linux 2.0.35 and 2.1.111
+ * Added command line option (et:[y|n]) to use the existing
+ * translation (returned by scsicam_bios_param) as disk geometry.
+ * The default is et:n, which uses the disk geometry jumpered
+ * on the board.
+ * The default value et:n is compatible with all previous revisions
+ * of this driver.
+ *
+ * 28 May 1998 Rev. 4.32 for linux 2.0.33 and 2.1.104
+ * Increased busy timeout from 10 msec. to 200 msec. while
+ * processing interrupts.
+ *
+ * 18 May 1998 Rev. 4.31 for linux 2.0.33 and 2.1.102
+ * Improved abort handling during the eh recovery process.
+ *
+ * 13 May 1998 Rev. 4.30 for linux 2.0.33 and 2.1.101
+ * The driver is now fully SMP safe, including the
+ * abort and reset routines.
+ * Added command line options (eh:[y|n]) to choose between
+ * new_eh_code and the old scsi code.
+ * If linux version >= 2.1.101 the default is eh:y, while the eh
+ * option is ignored for previous releases and the old scsi code
+ * is used.
+ *
+ * 18 Apr 1998 Rev. 4.20 for linux 2.0.33 and 2.1.97
+ * Reworked interrupt handler.
+ *
+ * 11 Apr 1998 rev. 4.05 for linux 2.0.33 and 2.1.95
+ * Major reliability improvement: when a batch with overlapping
+ * requests is detected, requests are queued one at a time
+ * eliminating any possible board or drive reordering.
+ *
+ * 10 Apr 1998 rev. 4.04 for linux 2.0.33 and 2.1.95
+ * Improved SMP support (if linux version >= 2.1.95).
+ *
+ * 9 Apr 1998 rev. 4.03 for linux 2.0.33 and 2.1.94
+ * Performance improvement: when sequential i/o is detected,
+ * always use direct sort instead of reverse sort.
+ *
+ * 4 Apr 1998 rev. 4.02 for linux 2.0.33 and 2.1.92
+ * io_port is now unsigned long.
+ *
+ * 17 Mar 1998 rev. 4.01 for linux 2.0.33 and 2.1.88
+ * Use new scsi error handling code (if linux version >= 2.1.88).
+ * Use new interrupt code.
+ *
+ * 12 Sep 1997 rev. 3.11 for linux 2.0.30 and 2.1.55
+ * Use of udelay inside the wait loops to avoid timeout
+ * problems with fast cpus.
+ * Removed check about useless calls to the interrupt service
+ * routine (reported on SMP systems only).
+ * At initialization time "sorted/unsorted" is displayed instead
+ * of "linked/unlinked" to reinforce the fact that "linking" is
+ * nothing but "elevator sorting" in the actual implementation.
+ *
+ * 17 May 1997 rev. 3.10 for linux 2.0.30 and 2.1.38
+ * Use of serial_number_at_timeout in abort and reset processing.
+ * Use of the __initfunc and __initdata macro in setup code.
+ * Minor cleanups in the list_statistics code.
+ *
+ * 24 Feb 1997 rev. 3.00 for linux 2.0.29 and 2.1.26
+ * When loading as a module, parameter passing is now supported
+ * both in 2.0 and in 2.1 style.
+ * Fixed data transfer direction for some SCSI opcodes.
+ * Immediate acknowledge to request sense commands.
+ * Linked commands to each disk device are now reordered by elevator
+ * sorting. Rare cases in which reordering of write requests could
+ * cause wrong results are managed.
+ *
+ * 18 Jan 1997 rev. 2.60 for linux 2.1.21 and 2.0.28
+ * Added command line options to enable/disable linked commands
+ * (lc:[y|n]), old firmware support (of:[y|n]) and to set the max
+ * queue depth (mq:xx). Default is "u14-34f=lc:n,of:n,mq:8".
+ * Improved command linking.
+ *
+ * 8 Jan 1997 rev. 2.50 for linux 2.1.20 and 2.0.27
+ * Added linked command support.
+ *
+ * 3 Dec 1996 rev. 2.40 for linux 2.1.14 and 2.0.27
+ * Added queue depth adjustment.
+ *
+ * 22 Nov 1996 rev. 2.30 for linux 2.1.12 and 2.0.26
+ * The list of i/o ports to be probed can be overwritten by the
+ * "u14-34f=port0,port1,...." boot command line option.
+ * Scatter/gather lists are now allocated by a number of kmalloc
+ * calls, in order to avoid the previous size limit of 64Kb.
+ *
+ * 16 Nov 1996 rev. 2.20 for linux 2.1.10 and 2.0.25
+ * Added multichannel support.
+ *
+ * 27 Sep 1996 rev. 2.12 for linux 2.1.0
+ * Portability cleanups (virtual/bus addressing, little/big endian
+ * support).
+ *
+ * 09 Jul 1996 rev. 2.11 for linux 2.0.4
+ * "Data over/under-run" no longer implies a redo on all targets.
+ * Number of internal retries is now limited.
+ *
+ * 16 Apr 1996 rev. 2.10 for linux 1.3.90
+ * New argument "reset_flags" to the reset routine.
+ *
+ * 21 Jul 1995 rev. 2.02 for linux 1.3.11
+ * Fixed Data Transfer Direction for some SCSI commands.
+ *
+ * 13 Jun 1995 rev. 2.01 for linux 1.2.10
+ * HAVE_OLD_UX4F_FIRMWARE should be defined for U34F boards when
+ * the firmware prom is not the latest one (28008-006).
+ *
+ * 11 Mar 1995 rev. 2.00 for linux 1.2.0
+ * Fixed a bug which prevented media change detection for removable
+ * disk drives.
+ *
+ * 23 Feb 1995 rev. 1.18 for linux 1.1.94
+ * Added a check for scsi_register returning NULL.
+ *
+ * 11 Feb 1995 rev. 1.17 for linux 1.1.91
+ * U14F qualified to run with 32 sglists.
+ * Now DEBUG_RESET is disabled by default.
+ *
+ * 9 Feb 1995 rev. 1.16 for linux 1.1.90
+ * Use host->wish_block instead of host->block.
+ *
+ * 8 Feb 1995 rev. 1.15 for linux 1.1.89
+ * Cleared target_time_out counter while performing a reset.
+ *
+ * 28 Jan 1995 rev. 1.14 for linux 1.1.86
+ * Added module support.
+ * Log and do a retry when a disk drive returns a target status
+ * different from zero on a recovered error.
+ * Auto detects if U14F boards have an old firmware revision.
+ * Max number of scatter/gather lists set to 16 for all boards
+ * (most installation run fine using 33 sglists, while other
+ * has problems when using more than 16).
+ *
+ * 16 Jan 1995 rev. 1.13 for linux 1.1.81
+ * Display a message if check_region detects a port address
+ * already in use.
+ *
+ * 15 Dec 1994 rev. 1.12 for linux 1.1.74
+ * The host->block flag is set for all the detected ISA boards.
+ *
+ * 30 Nov 1994 rev. 1.11 for linux 1.1.68
+ * Redo i/o on target status CHECK_CONDITION for TYPE_DISK only.
+ * Added optional support for using a single board at a time.
+ *
+ * 14 Nov 1994 rev. 1.10 for linux 1.1.63
+ *
+ * 28 Oct 1994 rev. 1.09 for linux 1.1.58 Final BETA release.
+ * 16 Jul 1994 rev. 1.00 for linux 1.1.29 Initial ALPHA release.
+ *
+ * This driver is a total replacement of the original UltraStor
+ * scsi driver, but it supports ONLY the 14F and 34F boards.
+ * It can be configured in the same kernel in which the original
+ * ultrastor driver is configured to allow the original U24F
+ * support.
+ *
+ * Multiple U14F and/or U34F host adapters are supported.
+ *
+ * Copyright (C) 1994-2003 Dario Ballabio (ballabio_dario@emc.com)
+ *
+ * Alternate email: dario.ballabio@inwind.it, dario.ballabio@tiscalinet.it
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that redistributions of source
+ * code retain the above copyright notice and this comment without
+ * modification.
+ *
+ * WARNING: if your 14/34F board has an old firmware revision (see below)
+ * you must change "#undef" into "#define" in the following
+ * statement.
+ */
+#undef HAVE_OLD_UX4F_FIRMWARE
+/*
+ * The UltraStor 14F, 24F, and 34F are a family of intelligent, high
+ * performance SCSI-2 host adapters.
+ * Here is the scoop on the various models:
+ *
+ * 14F - ISA first-party DMA HA with floppy support and WD1003 emulation.
+ * 24F - EISA Bus Master HA with floppy support and WD1003 emulation.
+ * 34F - VESA Local-Bus Bus Master HA (no WD1003 emulation).
+ *
+ * This code has been tested with up to two U14F boards, using both
+ * firmware 28004-005/38004-004 (BIOS rev. 2.00) and the latest firmware
+ * 28004-006/38004-005 (BIOS rev. 2.01).
+ *
+ * The latest firmware is required in order to get reliable operations when
+ * clustering is enabled. ENABLE_CLUSTERING provides a performance increase
+ * up to 50% on sequential access.
+ *
+ * Since the struct scsi_host_template structure is shared among all 14F and 34F,
+ * the last setting of use_clustering is in effect for all of these boards.
+ *
+ * Here a sample configuration using two U14F boards:
+ *
+ U14F0: ISA 0x330, BIOS 0xc8000, IRQ 11, DMA 5, SG 32, MB 16, of:n, lc:y, mq:8.
+ U14F1: ISA 0x340, BIOS 0x00000, IRQ 10, DMA 6, SG 32, MB 16, of:n, lc:y, mq:8.
+ *
+ * The boot controller must have its BIOS enabled, while other boards can
+ * have their BIOS disabled, or enabled to an higher address.
+ * Boards are named Ux4F0, Ux4F1..., according to the port address order in
+ * the io_port[] array.
+ *
+ * The following facts are based on real testing results (not on
+ * documentation) on the above U14F board.
+ *
+ * - The U14F board should be jumpered for bus on time less or equal to 7
+ * microseconds, while the default is 11 microseconds. This is order to
+ * get acceptable performance while using floppy drive and hard disk
+ * together. The jumpering for 7 microseconds is: JP13 pin 15-16,
+ * JP14 pin 7-8 and pin 9-10.
+ * The reduction has a little impact on scsi performance.
+ *
+ * - If scsi bus length exceeds 3m., the scsi bus speed needs to be reduced
+ * from 10Mhz to 5Mhz (do this by inserting a jumper on JP13 pin 7-8).
+ *
+ * - If U14F on board firmware is older than 28004-006/38004-005,
+ * the U14F board is unable to provide reliable operations if the scsi
+ * request length exceeds 16Kbyte. When this length is exceeded the
+ * behavior is:
+ * - adapter_status equal 0x96 or 0xa3 or 0x93 or 0x94;
+ * - adapter_status equal 0 and target_status equal 2 on for all targets
+ * in the next operation following the reset.
+ * This sequence takes a long time (>3 seconds), so in the meantime
+ * the SD_TIMEOUT in sd.c could expire giving rise to scsi aborts
+ * (SD_TIMEOUT has been increased from 3 to 6 seconds in 1.1.31).
+ * Because of this I had to DISABLE_CLUSTERING and to work around the
+ * bus reset in the interrupt service routine, returning DID_BUS_BUSY
+ * so that the operations are retried without complains from the scsi.c
+ * code.
+ * Any reset of the scsi bus is going to kill tape operations, since
+ * no retry is allowed for tapes. Bus resets are more likely when the
+ * scsi bus is under heavy load.
+ * Requests using scatter/gather have a maximum length of 16 x 1024 bytes
+ * when DISABLE_CLUSTERING is in effect, but unscattered requests could be
+ * larger than 16Kbyte.
+ *
+ * The new firmware has fixed all the above problems.
+ *
+ * For U34F boards the latest bios prom is 38008-002 (BIOS rev. 2.01),
+ * the latest firmware prom is 28008-006. Older firmware 28008-005 has
+ * problems when using more than 16 scatter/gather lists.
+ *
+ * The list of i/o ports to be probed can be totally replaced by the
+ * boot command line option: "u14-34f=port0,port1,port2,...", where the
+ * port0, port1... arguments are ISA/VESA addresses to be probed.
+ * For example using "u14-34f=0x230,0x340", the driver probes only the two
+ * addresses 0x230 and 0x340 in this order; "u14-34f=0" totally disables
+ * this driver.
+ *
+ * After the optional list of detection probes, other possible command line
+ * options are:
+ *
+ * et:y use disk geometry returned by scsicam_bios_param;
+ * et:n use disk geometry jumpered on the board;
+ * lc:y enables linked commands;
+ * lc:n disables linked commands;
+ * tm:0 disables tagged commands (same as tc:n);
+ * tm:1 use simple queue tags (same as tc:y);
+ * tm:2 use ordered queue tags (same as tc:2);
+ * of:y enables old firmware support;
+ * of:n disables old firmware support;
+ * mq:xx set the max queue depth to the value xx (2 <= xx <= 8).
+ *
+ * The default value is: "u14-34f=lc:n,of:n,mq:8,tm:0,et:n".
+ * An example using the list of detection probes could be:
+ * "u14-34f=0x230,0x340,lc:y,tm:2,of:n,mq:4,et:n".
+ *
+ * When loading as a module, parameters can be specified as well.
+ * The above example would be (use 1 in place of y and 0 in place of n):
+ *
+ * modprobe u14-34f io_port=0x230,0x340 linked_comm=1 have_old_firmware=0 \
+ * max_queue_depth=4 ext_tran=0 tag_mode=2
+ *
+ * ----------------------------------------------------------------------------
+ * In this implementation, linked commands are designed to work with any DISK
+ * or CD-ROM, since this linking has only the intent of clustering (time-wise)
+ * and reordering by elevator sorting commands directed to each device,
+ * without any relation with the actual SCSI protocol between the controller
+ * and the device.
+ * If Q is the queue depth reported at boot time for each device (also named
+ * cmds/lun) and Q > 2, whenever there is already an active command to the
+ * device all other commands to the same device (up to Q-1) are kept waiting
+ * in the elevator sorting queue. When the active command completes, the
+ * commands in this queue are sorted by sector address. The sort is chosen
+ * between increasing or decreasing by minimizing the seek distance between
+ * the sector of the commands just completed and the sector of the first
+ * command in the list to be sorted.
+ * Trivial math assures that the unsorted average seek distance when doing
+ * random seeks over S sectors is S/3.
+ * When (Q-1) requests are uniformly distributed over S sectors, the average
+ * distance between two adjacent requests is S/((Q-1) + 1), so the sorted
+ * average seek distance for (Q-1) random requests over S sectors is S/Q.
+ * The elevator sorting hence divides the seek distance by a factor Q/3.
+ * The above pure geometric remarks are valid in all cases and the
+ * driver effectively reduces the seek distance by the predicted factor
+ * when there are Q concurrent read i/o operations on the device, but this
+ * does not necessarily results in a noticeable performance improvement:
+ * your mileage may vary....
+ *
+ * Note: command reordering inside a batch of queued commands could cause
+ * wrong results only if there is at least one write request and the
+ * intersection (sector-wise) of all requests is not empty.
+ * When the driver detects a batch including overlapping requests
+ * (a really rare event) strict serial (pid) order is enforced.
+ * ----------------------------------------------------------------------------
+ *
+ * The boards are named Ux4F0, Ux4F1,... according to the detection order.
+ *
+ * In order to support multiple ISA boards in a reliable way,
+ * the driver sets host->wish_block = TRUE for all ISA boards.
+ */
+
+#include <linux/string.h>
+#include <linux/kernel.h>
+#include <linux/ioport.h>
+#include <linux/delay.h>
+#include <asm/io.h>
+#include <asm/byteorder.h>
+#include <linux/proc_fs.h>
+#include <linux/blkdev.h>
+#include <linux/interrupt.h>
+#include <linux/stat.h>
+#include <linux/pci.h>
+#include <linux/init.h>
+#include <linux/ctype.h>
+#include <linux/spinlock.h>
+#include <linux/slab.h>
+#include <asm/dma.h>
+#include <asm/irq.h>
+
+#include <scsi/scsi.h>
+#include <scsi/scsi_cmnd.h>
+#include <scsi/scsi_device.h>
+#include <scsi/scsi_host.h>
+#include <scsi/scsi_tcq.h>
+#include <scsi/scsicam.h>
+
+static int u14_34f_detect(struct scsi_host_template *);
+static int u14_34f_release(struct Scsi_Host *);
+static int u14_34f_queuecommand(struct Scsi_Host *, struct scsi_cmnd *);
+static int u14_34f_eh_abort(struct scsi_cmnd *);
+static int u14_34f_eh_host_reset(struct scsi_cmnd *);
+static int u14_34f_bios_param(struct scsi_device *, struct block_device *,
+ sector_t, int *);
+static int u14_34f_slave_configure(struct scsi_device *);
+
+static struct scsi_host_template driver_template = {
+ .name = "UltraStor 14F/34F rev. 8.10.00 ",
+ .detect = u14_34f_detect,
+ .release = u14_34f_release,
+ .queuecommand = u14_34f_queuecommand,
+ .eh_abort_handler = u14_34f_eh_abort,
+ .eh_host_reset_handler = u14_34f_eh_host_reset,
+ .bios_param = u14_34f_bios_param,
+ .slave_configure = u14_34f_slave_configure,
+ .this_id = 7,
+ .unchecked_isa_dma = 1,
+ .use_clustering = ENABLE_CLUSTERING,
+ };
+
+#if !defined(__BIG_ENDIAN_BITFIELD) && !defined(__LITTLE_ENDIAN_BITFIELD)
+#error "Adjust your <asm/byteorder.h> defines"
+#endif
+
+/* Values for the PRODUCT_ID ports for the 14/34F */
+#define PRODUCT_ID1 0x56
+#define PRODUCT_ID2 0x40 /* NOTE: Only upper nibble is used */
+
+/* Subversion values */
+#define ISA 0
+#define ESA 1
+
+#define OP_HOST_ADAPTER 0x1
+#define OP_SCSI 0x2
+#define OP_RESET 0x4
+#define DTD_SCSI 0x0
+#define DTD_IN 0x1
+#define DTD_OUT 0x2
+#define DTD_NONE 0x3
+#define HA_CMD_INQUIRY 0x1
+#define HA_CMD_SELF_DIAG 0x2
+#define HA_CMD_READ_BUFF 0x3
+#define HA_CMD_WRITE_BUFF 0x4
+
+#undef DEBUG_LINKED_COMMANDS
+#undef DEBUG_DETECT
+#undef DEBUG_INTERRUPT
+#undef DEBUG_RESET
+#undef DEBUG_GENERATE_ERRORS
+#undef DEBUG_GENERATE_ABORTS
+#undef DEBUG_GEOMETRY
+
+#define MAX_ISA 3
+#define MAX_VESA 1
+#define MAX_EISA 0
+#define MAX_PCI 0
+#define MAX_BOARDS (MAX_ISA + MAX_VESA + MAX_EISA + MAX_PCI)
+#define MAX_CHANNEL 1
+#define MAX_LUN 8
+#define MAX_TARGET 8
+#define MAX_MAILBOXES 16
+#define MAX_SGLIST 32
+#define MAX_SAFE_SGLIST 16
+#define MAX_INTERNAL_RETRIES 64
+#define MAX_CMD_PER_LUN 2
+#define MAX_TAGGED_CMD_PER_LUN (MAX_MAILBOXES - MAX_CMD_PER_LUN)
+
+#define SKIP ULONG_MAX
+#define FALSE 0
+#define TRUE 1
+#define FREE 0
+#define IN_USE 1
+#define LOCKED 2
+#define IN_RESET 3
+#define IGNORE 4
+#define READY 5
+#define ABORTING 6
+#define NO_DMA 0xff
+#define MAXLOOP 10000
+#define TAG_DISABLED 0
+#define TAG_SIMPLE 1
+#define TAG_ORDERED 2
+
+#define REG_LCL_MASK 0
+#define REG_LCL_INTR 1
+#define REG_SYS_MASK 2
+#define REG_SYS_INTR 3
+#define REG_PRODUCT_ID1 4
+#define REG_PRODUCT_ID2 5
+#define REG_CONFIG1 6
+#define REG_CONFIG2 7
+#define REG_OGM 8
+#define REG_ICM 12
+#define REGION_SIZE 13UL
+#define BSY_ASSERTED 0x01
+#define IRQ_ASSERTED 0x01
+#define CMD_RESET 0xc0
+#define CMD_OGM_INTR 0x01
+#define CMD_CLR_INTR 0x01
+#define CMD_ENA_INTR 0x81
+#define ASOK 0x00
+#define ASST 0x91
+
+#define YESNO(a) ((a) ? 'y' : 'n')
+#define TLDEV(type) ((type) == TYPE_DISK || (type) == TYPE_ROM)
+
+#define PACKED __attribute__((packed))
+
+struct sg_list {
+ unsigned int address; /* Segment Address */
+ unsigned int num_bytes; /* Segment Length */
+ };
+
+/* MailBox SCSI Command Packet */
+struct mscp {
+
+#if defined(__BIG_ENDIAN_BITFIELD)
+ unsigned char sg:1, ca:1, dcn:1, xdir:2, opcode:3;
+ unsigned char lun: 3, channel:2, target:3;
+#else
+ unsigned char opcode: 3, /* type of command */
+ xdir: 2, /* data transfer direction */
+ dcn: 1, /* disable disconnect */
+ ca: 1, /* use cache (if available) */
+ sg: 1; /* scatter/gather operation */
+ unsigned char target: 3, /* SCSI target id */
+ channel: 2, /* SCSI channel number */
+ lun: 3; /* SCSI logical unit number */
+#endif
+
+ unsigned int data_address PACKED; /* transfer data pointer */
+ unsigned int data_len PACKED; /* length in bytes */
+ unsigned int link_address PACKED; /* for linking command chains */
+ unsigned char clink_id; /* identifies command in chain */
+ unsigned char use_sg; /* (if sg is set) 8 bytes per list */
+ unsigned char sense_len;
+ unsigned char cdb_len; /* 6, 10, or 12 */
+ unsigned char cdb[12]; /* SCSI Command Descriptor Block */
+ unsigned char adapter_status; /* non-zero indicates HA error */
+ unsigned char target_status; /* non-zero indicates target error */
+ unsigned int sense_addr PACKED;
+
+ /* Additional fields begin here. */
+ struct scsi_cmnd *SCpnt;
+ unsigned int cpp_index; /* cp index */
+
+ /* All the cp structure is zero filled by queuecommand except the
+ following CP_TAIL_SIZE bytes, initialized by detect */
+ dma_addr_t cp_dma_addr; /* dma handle for this cp structure */
+ struct sg_list *sglist; /* pointer to the allocated SG list */
+ };
+
+#define CP_TAIL_SIZE (sizeof(struct sglist *) + sizeof(dma_addr_t))
+
+struct hostdata {
+ struct mscp cp[MAX_MAILBOXES]; /* Mailboxes for this board */
+ unsigned int cp_stat[MAX_MAILBOXES]; /* FREE, IN_USE, LOCKED, IN_RESET */
+ unsigned int last_cp_used; /* Index of last mailbox used */
+ unsigned int iocount; /* Total i/o done for this board */
+ int board_number; /* Number of this board */
+ char board_name[16]; /* Name of this board */
+ int in_reset; /* True if board is doing a reset */
+ int target_to[MAX_TARGET][MAX_CHANNEL]; /* N. of timeout errors on target */
+ int target_redo[MAX_TARGET][MAX_CHANNEL]; /* If TRUE redo i/o on target */
+ unsigned int retries; /* Number of internal retries */
+ unsigned long last_retried_pid; /* Pid of last retried command */
+ unsigned char subversion; /* Bus type, either ISA or ESA */
+ struct pci_dev *pdev; /* Always NULL */
+ unsigned char heads;
+ unsigned char sectors;
+ char board_id[256]; /* data from INQUIRY on this board */
+ };
+
+static struct Scsi_Host *sh[MAX_BOARDS + 1];
+static const char *driver_name = "Ux4F";
+static char sha[MAX_BOARDS];
+static DEFINE_SPINLOCK(driver_lock);
+
+/* Initialize num_boards so that ihdlr can work while detect is in progress */
+static unsigned int num_boards = MAX_BOARDS;
+
+static unsigned long io_port[] = {
+
+ /* Space for MAX_INT_PARAM ports usable while loading as a module */
+ SKIP, SKIP, SKIP, SKIP, SKIP, SKIP, SKIP, SKIP,
+ SKIP, SKIP,
+
+ /* Possible ISA/VESA ports */
+ 0x330, 0x340, 0x230, 0x240, 0x210, 0x130, 0x140,
+
+ /* End of list */
+ 0x0
+ };
+
+#define HD(board) ((struct hostdata *) &sh[board]->hostdata)
+#define BN(board) (HD(board)->board_name)
+
+/* Device is Little Endian */
+#define H2DEV(x) cpu_to_le32(x)
+#define DEV2H(x) le32_to_cpu(x)
+
+static irqreturn_t do_interrupt_handler(int, void *);
+static void flush_dev(struct scsi_device *, unsigned long, unsigned int, unsigned int);
+static int do_trace = FALSE;
+static int setup_done = FALSE;
+static int link_statistics;
+static int ext_tran = FALSE;
+
+#if defined(HAVE_OLD_UX4F_FIRMWARE)
+static int have_old_firmware = TRUE;
+#else
+static int have_old_firmware = FALSE;
+#endif
+
+#if defined(CONFIG_SCSI_U14_34F_TAGGED_QUEUE)
+static int tag_mode = TAG_SIMPLE;
+#else
+static int tag_mode = TAG_DISABLED;
+#endif
+
+#if defined(CONFIG_SCSI_U14_34F_LINKED_COMMANDS)
+static int linked_comm = TRUE;
+#else
+static int linked_comm = FALSE;
+#endif
+
+#if defined(CONFIG_SCSI_U14_34F_MAX_TAGS)
+static int max_queue_depth = CONFIG_SCSI_U14_34F_MAX_TAGS;
+#else
+static int max_queue_depth = MAX_CMD_PER_LUN;
+#endif
+
+#define MAX_INT_PARAM 10
+#define MAX_BOOT_OPTIONS_SIZE 256
+static char boot_options[MAX_BOOT_OPTIONS_SIZE];
+
+#if defined(MODULE)
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+
+module_param_string(u14_34f, boot_options, MAX_BOOT_OPTIONS_SIZE, 0);
+MODULE_PARM_DESC(u14_34f, " equivalent to the \"u14-34f=...\" kernel boot " \
+"option." \
+" Example: modprobe u14-34f \"u14_34f=0x340,0x330,lc:y,tm:0,mq:4\"");
+MODULE_AUTHOR("Dario Ballabio");
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("UltraStor 14F/34F SCSI Driver");
+
+#endif
+
+static int u14_34f_slave_configure(struct scsi_device *dev) {
+ int j, tqd, utqd;
+ char *tag_suffix, *link_suffix;
+ struct Scsi_Host *host = dev->host;
+
+ j = ((struct hostdata *) host->hostdata)->board_number;
+
+ utqd = MAX_CMD_PER_LUN;
+ tqd = max_queue_depth;
+
+ if (TLDEV(dev->type) && dev->tagged_supported)
+
+ if (tag_mode == TAG_SIMPLE) {
+ scsi_change_queue_depth(dev, tqd);
+ tag_suffix = ", simple tags";
+ }
+ else if (tag_mode == TAG_ORDERED) {
+ scsi_change_queue_depth(dev, tqd);
+ tag_suffix = ", ordered tags";
+ }
+ else {
+ scsi_change_queue_depth(dev, tqd);
+ tag_suffix = ", no tags";
+ }
+
+ else if (TLDEV(dev->type) && linked_comm) {
+ scsi_change_queue_depth(dev, tqd);
+ tag_suffix = ", untagged";
+ }
+
+ else {
+ scsi_change_queue_depth(dev, utqd);
+ tag_suffix = "";
+ }
+
+ if (TLDEV(dev->type) && linked_comm && dev->queue_depth > 2)
+ link_suffix = ", sorted";
+ else if (TLDEV(dev->type))
+ link_suffix = ", unsorted";
+ else
+ link_suffix = "";
+
+ sdev_printk(KERN_INFO, dev, "cmds/lun %d%s%s.\n",
+ dev->queue_depth, link_suffix, tag_suffix);
+
+ return FALSE;
+}
+
+static int wait_on_busy(unsigned long iobase, unsigned int loop) {
+
+ while (inb(iobase + REG_LCL_INTR) & BSY_ASSERTED) {
+ udelay(1L);
+ if (--loop == 0) return TRUE;
+ }
+
+ return FALSE;
+}
+
+static int board_inquiry(unsigned int j) {
+ struct mscp *cpp;
+ dma_addr_t id_dma_addr;
+ unsigned int limit = 0;
+ unsigned long time;
+
+ id_dma_addr = pci_map_single(HD(j)->pdev, HD(j)->board_id,
+ sizeof(HD(j)->board_id), PCI_DMA_BIDIRECTIONAL);
+ cpp = &HD(j)->cp[0];
+ cpp->cp_dma_addr = pci_map_single(HD(j)->pdev, cpp, sizeof(struct mscp),
+ PCI_DMA_BIDIRECTIONAL);
+ memset(cpp, 0, sizeof(struct mscp) - CP_TAIL_SIZE);
+ cpp->opcode = OP_HOST_ADAPTER;
+ cpp->xdir = DTD_IN;
+ cpp->data_address = H2DEV(id_dma_addr);
+ cpp->data_len = H2DEV(sizeof(HD(j)->board_id));
+ cpp->cdb_len = 6;
+ cpp->cdb[0] = HA_CMD_INQUIRY;
+
+ if (wait_on_busy(sh[j]->io_port, MAXLOOP)) {
+ printk("%s: board_inquiry, adapter busy.\n", BN(j));
+ return TRUE;
+ }
+
+ HD(j)->cp_stat[0] = IGNORE;
+
+ /* Clear the interrupt indication */
+ outb(CMD_CLR_INTR, sh[j]->io_port + REG_SYS_INTR);
+
+ /* Store pointer in OGM address bytes */
+ outl(H2DEV(cpp->cp_dma_addr), sh[j]->io_port + REG_OGM);
+
+ /* Issue OGM interrupt */
+ outb(CMD_OGM_INTR, sh[j]->io_port + REG_LCL_INTR);
+
+ spin_unlock_irq(&driver_lock);
+ time = jiffies;
+ while ((jiffies - time) < HZ && limit++ < 20000) udelay(100L);
+ spin_lock_irq(&driver_lock);
+
+ if (cpp->adapter_status || HD(j)->cp_stat[0] != FREE) {
+ HD(j)->cp_stat[0] = FREE;
+ printk("%s: board_inquiry, err 0x%x.\n", BN(j), cpp->adapter_status);
+ return TRUE;
+ }
+
+ pci_unmap_single(HD(j)->pdev, cpp->cp_dma_addr, sizeof(struct mscp),
+ PCI_DMA_BIDIRECTIONAL);
+ pci_unmap_single(HD(j)->pdev, id_dma_addr, sizeof(HD(j)->board_id),
+ PCI_DMA_BIDIRECTIONAL);
+ return FALSE;
+}
+
+static int port_detect \
+ (unsigned long port_base, unsigned int j, struct scsi_host_template *tpnt) {
+ unsigned char irq, dma_channel, subversion, i;
+ unsigned char in_byte;
+ char *bus_type, dma_name[16];
+
+ /* Allowed BIOS base addresses (NULL indicates reserved) */
+ unsigned long bios_segment_table[8] = {
+ 0,
+ 0xc4000, 0xc8000, 0xcc000, 0xd0000,
+ 0xd4000, 0xd8000, 0xdc000
+ };
+
+ /* Allowed IRQs */
+ unsigned char interrupt_table[4] = { 15, 14, 11, 10 };
+
+ /* Allowed DMA channels for ISA (0 indicates reserved) */
+ unsigned char dma_channel_table[4] = { 5, 6, 7, 0 };
+
+ /* Head/sector mappings */
+ struct {
+ unsigned char heads;
+ unsigned char sectors;
+ } mapping_table[4] = {
+ { 16, 63 }, { 64, 32 }, { 64, 63 }, { 64, 32 }
+ };
+
+ struct config_1 {
+
+#if defined(__BIG_ENDIAN_BITFIELD)
+ unsigned char dma_channel: 2, interrupt:2,
+ removable_disks_as_fixed:1, bios_segment: 3;
+#else
+ unsigned char bios_segment: 3, removable_disks_as_fixed: 1,
+ interrupt: 2, dma_channel: 2;
+#endif
+
+ } config_1;
+
+ struct config_2 {
+
+#if defined(__BIG_ENDIAN_BITFIELD)
+ unsigned char tfr_port: 2, bios_drive_number: 1,
+ mapping_mode: 2, ha_scsi_id: 3;
+#else
+ unsigned char ha_scsi_id: 3, mapping_mode: 2,
+ bios_drive_number: 1, tfr_port: 2;
+#endif
+
+ } config_2;
+
+ char name[16];
+
+ sprintf(name, "%s%d", driver_name, j);
+
+ if (!request_region(port_base, REGION_SIZE, driver_name)) {
+#if defined(DEBUG_DETECT)
+ printk("%s: address 0x%03lx in use, skipping probe.\n", name, port_base);
+#endif
+ goto fail;
+ }
+
+ spin_lock_irq(&driver_lock);
+
+ if (inb(port_base + REG_PRODUCT_ID1) != PRODUCT_ID1) goto freelock;
+
+ in_byte = inb(port_base + REG_PRODUCT_ID2);
+
+ if ((in_byte & 0xf0) != PRODUCT_ID2) goto freelock;
+
+ *(char *)&config_1 = inb(port_base + REG_CONFIG1);
+ *(char *)&config_2 = inb(port_base + REG_CONFIG2);
+
+ irq = interrupt_table[config_1.interrupt];
+ dma_channel = dma_channel_table[config_1.dma_channel];
+ subversion = (in_byte & 0x0f);
+
+ /* Board detected, allocate its IRQ */
+ if (request_irq(irq, do_interrupt_handler,
+ (subversion == ESA) ? IRQF_SHARED : 0,
+ driver_name, (void *) &sha[j])) {
+ printk("%s: unable to allocate IRQ %u, detaching.\n", name, irq);
+ goto freelock;
+ }
+
+ if (subversion == ISA && request_dma(dma_channel, driver_name)) {
+ printk("%s: unable to allocate DMA channel %u, detaching.\n",
+ name, dma_channel);
+ goto freeirq;
+ }
+
+ if (have_old_firmware) tpnt->use_clustering = DISABLE_CLUSTERING;
+
+ spin_unlock_irq(&driver_lock);
+ sh[j] = scsi_register(tpnt, sizeof(struct hostdata));
+ spin_lock_irq(&driver_lock);
+
+ if (sh[j] == NULL) {
+ printk("%s: unable to register host, detaching.\n", name);
+ goto freedma;
+ }
+
+ sh[j]->io_port = port_base;
+ sh[j]->unique_id = port_base;
+ sh[j]->n_io_port = REGION_SIZE;
+ sh[j]->base = bios_segment_table[config_1.bios_segment];
+ sh[j]->irq = irq;
+ sh[j]->sg_tablesize = MAX_SGLIST;
+ sh[j]->this_id = config_2.ha_scsi_id;
+ sh[j]->can_queue = MAX_MAILBOXES;
+ sh[j]->cmd_per_lun = MAX_CMD_PER_LUN;
+
+#if defined(DEBUG_DETECT)
+ {
+ unsigned char sys_mask, lcl_mask;
+
+ sys_mask = inb(sh[j]->io_port + REG_SYS_MASK);
+ lcl_mask = inb(sh[j]->io_port + REG_LCL_MASK);
+ printk("SYS_MASK 0x%x, LCL_MASK 0x%x.\n", sys_mask, lcl_mask);
+ }
+#endif
+
+ /* Probably a bogus host scsi id, set it to the dummy value */
+ if (sh[j]->this_id == 0) sh[j]->this_id = -1;
+
+ /* If BIOS is disabled, force enable interrupts */
+ if (sh[j]->base == 0) outb(CMD_ENA_INTR, sh[j]->io_port + REG_SYS_MASK);
+
+ memset(HD(j), 0, sizeof(struct hostdata));
+ HD(j)->heads = mapping_table[config_2.mapping_mode].heads;
+ HD(j)->sectors = mapping_table[config_2.mapping_mode].sectors;
+ HD(j)->subversion = subversion;
+ HD(j)->pdev = NULL;
+ HD(j)->board_number = j;
+
+ if (have_old_firmware) sh[j]->sg_tablesize = MAX_SAFE_SGLIST;
+
+ if (HD(j)->subversion == ESA) {
+ sh[j]->unchecked_isa_dma = FALSE;
+ sh[j]->dma_channel = NO_DMA;
+ sprintf(BN(j), "U34F%d", j);
+ bus_type = "VESA";
+ }
+ else {
+ unsigned long flags;
+ sh[j]->unchecked_isa_dma = TRUE;
+
+ flags=claim_dma_lock();
+ disable_dma(dma_channel);
+ clear_dma_ff(dma_channel);
+ set_dma_mode(dma_channel, DMA_MODE_CASCADE);
+ enable_dma(dma_channel);
+ release_dma_lock(flags);
+
+ sh[j]->dma_channel = dma_channel;
+ sprintf(BN(j), "U14F%d", j);
+ bus_type = "ISA";
+ }
+
+ sh[j]->max_channel = MAX_CHANNEL - 1;
+ sh[j]->max_id = MAX_TARGET;
+ sh[j]->max_lun = MAX_LUN;
+
+ if (HD(j)->subversion == ISA && !board_inquiry(j)) {
+ HD(j)->board_id[40] = 0;
+
+ if (strcmp(&HD(j)->board_id[32], "06000600")) {
+ printk("%s: %s.\n", BN(j), &HD(j)->board_id[8]);
+ printk("%s: firmware %s is outdated, FW PROM should be 28004-006.\n",
+ BN(j), &HD(j)->board_id[32]);
+ sh[j]->hostt->use_clustering = DISABLE_CLUSTERING;
+ sh[j]->sg_tablesize = MAX_SAFE_SGLIST;
+ }
+ }
+
+ if (dma_channel == NO_DMA) sprintf(dma_name, "%s", "BMST");
+ else sprintf(dma_name, "DMA %u", dma_channel);
+
+ spin_unlock_irq(&driver_lock);
+
+ for (i = 0; i < sh[j]->can_queue; i++)
+ HD(j)->cp[i].cp_dma_addr = pci_map_single(HD(j)->pdev,
+ &HD(j)->cp[i], sizeof(struct mscp), PCI_DMA_BIDIRECTIONAL);
+
+ for (i = 0; i < sh[j]->can_queue; i++)
+ if (! ((&HD(j)->cp[i])->sglist = kmalloc(
+ sh[j]->sg_tablesize * sizeof(struct sg_list),
+ (sh[j]->unchecked_isa_dma ? GFP_DMA : 0) | GFP_ATOMIC))) {
+ printk("%s: kmalloc SGlist failed, mbox %d, detaching.\n", BN(j), i);
+ goto release;
+ }
+
+ if (max_queue_depth > MAX_TAGGED_CMD_PER_LUN)
+ max_queue_depth = MAX_TAGGED_CMD_PER_LUN;
+
+ if (max_queue_depth < MAX_CMD_PER_LUN) max_queue_depth = MAX_CMD_PER_LUN;
+
+ if (tag_mode != TAG_DISABLED && tag_mode != TAG_SIMPLE)
+ tag_mode = TAG_ORDERED;
+
+ if (j == 0) {
+ printk("UltraStor 14F/34F: Copyright (C) 1994-2003 Dario Ballabio.\n");
+ printk("%s config options -> of:%c, tm:%d, lc:%c, mq:%d, et:%c.\n",
+ driver_name, YESNO(have_old_firmware), tag_mode,
+ YESNO(linked_comm), max_queue_depth, YESNO(ext_tran));
+ }
+
+ printk("%s: %s 0x%03lx, BIOS 0x%05x, IRQ %u, %s, SG %d, MB %d.\n",
+ BN(j), bus_type, (unsigned long)sh[j]->io_port, (int)sh[j]->base,
+ sh[j]->irq, dma_name, sh[j]->sg_tablesize, sh[j]->can_queue);
+
+ if (sh[j]->max_id > 8 || sh[j]->max_lun > 8)
+ printk("%s: wide SCSI support enabled, max_id %u, max_lun %llu.\n",
+ BN(j), sh[j]->max_id, sh[j]->max_lun);
+
+ for (i = 0; i <= sh[j]->max_channel; i++)
+ printk("%s: SCSI channel %u enabled, host target ID %d.\n",
+ BN(j), i, sh[j]->this_id);
+
+ return TRUE;
+
+freedma:
+ if (subversion == ISA) free_dma(dma_channel);
+freeirq:
+ free_irq(irq, &sha[j]);
+freelock:
+ spin_unlock_irq(&driver_lock);
+ release_region(port_base, REGION_SIZE);
+fail:
+ return FALSE;
+
+release:
+ u14_34f_release(sh[j]);
+ return FALSE;
+}
+
+static void internal_setup(char *str, int *ints) {
+ int i, argc = ints[0];
+ char *cur = str, *pc;
+
+ if (argc > 0) {
+
+ if (argc > MAX_INT_PARAM) argc = MAX_INT_PARAM;
+
+ for (i = 0; i < argc; i++) io_port[i] = ints[i + 1];
+
+ io_port[i] = 0;
+ setup_done = TRUE;
+ }
+
+ while (cur && (pc = strchr(cur, ':'))) {
+ int val = 0, c = *++pc;
+
+ if (c == 'n' || c == 'N') val = FALSE;
+ else if (c == 'y' || c == 'Y') val = TRUE;
+ else val = (int) simple_strtoul(pc, NULL, 0);
+
+ if (!strncmp(cur, "lc:", 3)) linked_comm = val;
+ else if (!strncmp(cur, "of:", 3)) have_old_firmware = val;
+ else if (!strncmp(cur, "tm:", 3)) tag_mode = val;
+ else if (!strncmp(cur, "tc:", 3)) tag_mode = val;
+ else if (!strncmp(cur, "mq:", 3)) max_queue_depth = val;
+ else if (!strncmp(cur, "ls:", 3)) link_statistics = val;
+ else if (!strncmp(cur, "et:", 3)) ext_tran = val;
+
+ if ((cur = strchr(cur, ','))) ++cur;
+ }
+
+ return;
+}
+
+static int option_setup(char *str) {
+ int ints[MAX_INT_PARAM];
+ char *cur = str;
+ int i = 1;
+
+ while (cur && isdigit(*cur) && i < MAX_INT_PARAM) {
+ ints[i++] = simple_strtoul(cur, NULL, 0);
+
+ if ((cur = strchr(cur, ',')) != NULL) cur++;
+ }
+
+ ints[0] = i - 1;
+ internal_setup(cur, ints);
+ return 1;
+}
+
+static int u14_34f_detect(struct scsi_host_template *tpnt) {
+ unsigned int j = 0, k;
+
+ tpnt->proc_name = "u14-34f";
+
+ if(strlen(boot_options)) option_setup(boot_options);
+
+#if defined(MODULE)
+ /* io_port could have been modified when loading as a module */
+ if(io_port[0] != SKIP) {
+ setup_done = TRUE;
+ io_port[MAX_INT_PARAM] = 0;
+ }
+#endif
+
+ for (k = 0; k < MAX_BOARDS + 1; k++) sh[k] = NULL;
+
+ for (k = 0; io_port[k]; k++) {
+
+ if (io_port[k] == SKIP) continue;
+
+ if (j < MAX_BOARDS && port_detect(io_port[k], j, tpnt)) j++;
+ }
+
+ num_boards = j;
+ return j;
+}
+
+static void map_dma(unsigned int i, unsigned int j) {
+ unsigned int data_len = 0;
+ unsigned int k, pci_dir;
+ int count;
+ struct scatterlist *sg;
+ struct mscp *cpp;
+ struct scsi_cmnd *SCpnt;
+
+ cpp = &HD(j)->cp[i]; SCpnt = cpp->SCpnt;
+ pci_dir = SCpnt->sc_data_direction;
+
+ if (SCpnt->sense_buffer)
+ cpp->sense_addr = H2DEV(pci_map_single(HD(j)->pdev, SCpnt->sense_buffer,
+ SCSI_SENSE_BUFFERSIZE, PCI_DMA_FROMDEVICE));
+
+ cpp->sense_len = SCSI_SENSE_BUFFERSIZE;
+
+ if (scsi_bufflen(SCpnt)) {
+ count = scsi_dma_map(SCpnt);
+ BUG_ON(count < 0);
+
+ scsi_for_each_sg(SCpnt, sg, count, k) {
+ cpp->sglist[k].address = H2DEV(sg_dma_address(sg));
+ cpp->sglist[k].num_bytes = H2DEV(sg_dma_len(sg));
+ data_len += sg->length;
+ }
+
+ cpp->sg = TRUE;
+ cpp->use_sg = scsi_sg_count(SCpnt);
+ cpp->data_address =
+ H2DEV(pci_map_single(HD(j)->pdev, cpp->sglist,
+ cpp->use_sg * sizeof(struct sg_list),
+ pci_dir));
+ cpp->data_len = H2DEV(data_len);
+
+ } else {
+ pci_dir = PCI_DMA_BIDIRECTIONAL;
+ cpp->data_len = H2DEV(scsi_bufflen(SCpnt));
+ }
+}
+
+static void unmap_dma(unsigned int i, unsigned int j) {
+ unsigned int pci_dir;
+ struct mscp *cpp;
+ struct scsi_cmnd *SCpnt;
+
+ cpp = &HD(j)->cp[i]; SCpnt = cpp->SCpnt;
+ pci_dir = SCpnt->sc_data_direction;
+
+ if (DEV2H(cpp->sense_addr))
+ pci_unmap_single(HD(j)->pdev, DEV2H(cpp->sense_addr),
+ DEV2H(cpp->sense_len), PCI_DMA_FROMDEVICE);
+
+ scsi_dma_unmap(SCpnt);
+
+ if (!DEV2H(cpp->data_len)) pci_dir = PCI_DMA_BIDIRECTIONAL;
+
+ if (DEV2H(cpp->data_address))
+ pci_unmap_single(HD(j)->pdev, DEV2H(cpp->data_address),
+ DEV2H(cpp->data_len), pci_dir);
+}
+
+static void sync_dma(unsigned int i, unsigned int j) {
+ unsigned int pci_dir;
+ struct mscp *cpp;
+ struct scsi_cmnd *SCpnt;
+
+ cpp = &HD(j)->cp[i]; SCpnt = cpp->SCpnt;
+ pci_dir = SCpnt->sc_data_direction;
+
+ if (DEV2H(cpp->sense_addr))
+ pci_dma_sync_single_for_cpu(HD(j)->pdev, DEV2H(cpp->sense_addr),
+ DEV2H(cpp->sense_len), PCI_DMA_FROMDEVICE);
+
+ if (scsi_sg_count(SCpnt))
+ pci_dma_sync_sg_for_cpu(HD(j)->pdev, scsi_sglist(SCpnt),
+ scsi_sg_count(SCpnt), pci_dir);
+
+ if (!DEV2H(cpp->data_len)) pci_dir = PCI_DMA_BIDIRECTIONAL;
+
+ if (DEV2H(cpp->data_address))
+ pci_dma_sync_single_for_cpu(HD(j)->pdev, DEV2H(cpp->data_address),
+ DEV2H(cpp->data_len), pci_dir);
+}
+
+static void scsi_to_dev_dir(unsigned int i, unsigned int j) {
+ unsigned int k;
+
+ static const unsigned char data_out_cmds[] = {
+ 0x0a, 0x2a, 0x15, 0x55, 0x04, 0x07, 0x18, 0x1d, 0x24, 0x2e,
+ 0x30, 0x31, 0x32, 0x38, 0x39, 0x3a, 0x3b, 0x3d, 0x3f, 0x40,
+ 0x41, 0x4c, 0xaa, 0xae, 0xb0, 0xb1, 0xb2, 0xb6, 0xea, 0x1b, 0x5d
+ };
+
+ static const unsigned char data_none_cmds[] = {
+ 0x01, 0x0b, 0x10, 0x11, 0x13, 0x16, 0x17, 0x19, 0x2b, 0x1e,
+ 0x2c, 0xac, 0x2f, 0xaf, 0x33, 0xb3, 0x35, 0x36, 0x45, 0x47,
+ 0x48, 0x49, 0xa9, 0x4b, 0xa5, 0xa6, 0xb5, 0x00
+ };
+
+ struct mscp *cpp;
+ struct scsi_cmnd *SCpnt;
+
+ cpp = &HD(j)->cp[i]; SCpnt = cpp->SCpnt;
+
+ if (SCpnt->sc_data_direction == DMA_FROM_DEVICE) {
+ cpp->xdir = DTD_IN;
+ return;
+ }
+ else if (SCpnt->sc_data_direction == DMA_TO_DEVICE) {
+ cpp->xdir = DTD_OUT;
+ return;
+ }
+ else if (SCpnt->sc_data_direction == DMA_NONE) {
+ cpp->xdir = DTD_NONE;
+ return;
+ }
+
+ if (SCpnt->sc_data_direction != DMA_BIDIRECTIONAL)
+ panic("%s: qcomm, invalid SCpnt->sc_data_direction.\n", BN(j));
+
+ cpp->xdir = DTD_IN;
+
+ for (k = 0; k < ARRAY_SIZE(data_out_cmds); k++)
+ if (SCpnt->cmnd[0] == data_out_cmds[k]) {
+ cpp->xdir = DTD_OUT;
+ break;
+ }
+
+ if (cpp->xdir == DTD_IN)
+ for (k = 0; k < ARRAY_SIZE(data_none_cmds); k++)
+ if (SCpnt->cmnd[0] == data_none_cmds[k]) {
+ cpp->xdir = DTD_NONE;
+ break;
+ }
+
+}
+
+static int u14_34f_queuecommand_lck(struct scsi_cmnd *SCpnt, void (*done)(struct scsi_cmnd *)) {
+ unsigned int i, j, k;
+ struct mscp *cpp;
+
+ /* j is the board number */
+ j = ((struct hostdata *) SCpnt->device->host->hostdata)->board_number;
+
+ if (SCpnt->host_scribble)
+ panic("%s: qcomm, SCpnt %p already active.\n",
+ BN(j), SCpnt);
+
+ /* i is the mailbox number, look for the first free mailbox
+ starting from last_cp_used */
+ i = HD(j)->last_cp_used + 1;
+
+ for (k = 0; k < sh[j]->can_queue; k++, i++) {
+
+ if (i >= sh[j]->can_queue) i = 0;
+
+ if (HD(j)->cp_stat[i] == FREE) {
+ HD(j)->last_cp_used = i;
+ break;
+ }
+ }
+
+ if (k == sh[j]->can_queue) {
+ printk("%s: qcomm, no free mailbox.\n", BN(j));
+ return 1;
+ }
+
+ /* Set pointer to control packet structure */
+ cpp = &HD(j)->cp[i];
+
+ memset(cpp, 0, sizeof(struct mscp) - CP_TAIL_SIZE);
+ SCpnt->scsi_done = done;
+ cpp->cpp_index = i;
+ SCpnt->host_scribble = (unsigned char *) &cpp->cpp_index;
+
+ if (do_trace) printk("%s: qcomm, mbox %d, target %d.%d:%u.\n",
+ BN(j), i, SCpnt->device->channel, SCpnt->device->id,
+ (u8)SCpnt->device->lun);
+
+ cpp->opcode = OP_SCSI;
+ cpp->channel = SCpnt->device->channel;
+ cpp->target = SCpnt->device->id;
+ cpp->lun = (u8)SCpnt->device->lun;
+ cpp->SCpnt = SCpnt;
+ cpp->cdb_len = SCpnt->cmd_len;
+ memcpy(cpp->cdb, SCpnt->cmnd, SCpnt->cmd_len);
+
+ /* Use data transfer direction SCpnt->sc_data_direction */
+ scsi_to_dev_dir(i, j);
+
+ /* Map DMA buffers and SG list */
+ map_dma(i, j);
+
+ if (linked_comm && SCpnt->device->queue_depth > 2
+ && TLDEV(SCpnt->device->type)) {
+ HD(j)->cp_stat[i] = READY;
+ flush_dev(SCpnt->device, blk_rq_pos(SCpnt->request), j, FALSE);
+ return 0;
+ }
+
+ if (wait_on_busy(sh[j]->io_port, MAXLOOP)) {
+ unmap_dma(i, j);
+ SCpnt->host_scribble = NULL;
+ scmd_printk(KERN_INFO, SCpnt,
+ "qcomm, adapter busy.\n");
+ return 1;
+ }
+
+ /* Store pointer in OGM address bytes */
+ outl(H2DEV(cpp->cp_dma_addr), sh[j]->io_port + REG_OGM);
+
+ /* Issue OGM interrupt */
+ outb(CMD_OGM_INTR, sh[j]->io_port + REG_LCL_INTR);
+
+ HD(j)->cp_stat[i] = IN_USE;
+ return 0;
+}
+
+static DEF_SCSI_QCMD(u14_34f_queuecommand)
+
+static int u14_34f_eh_abort(struct scsi_cmnd *SCarg) {
+ unsigned int i, j;
+
+ j = ((struct hostdata *) SCarg->device->host->hostdata)->board_number;
+
+ if (SCarg->host_scribble == NULL) {
+ scmd_printk(KERN_INFO, SCarg, "abort, command inactive.\n");
+ return SUCCESS;
+ }
+
+ i = *(unsigned int *)SCarg->host_scribble;
+ scmd_printk(KERN_INFO, SCarg, "abort, mbox %d.\n", i);
+
+ if (i >= sh[j]->can_queue)
+ panic("%s: abort, invalid SCarg->host_scribble.\n", BN(j));
+
+ if (wait_on_busy(sh[j]->io_port, MAXLOOP)) {
+ printk("%s: abort, timeout error.\n", BN(j));
+ return FAILED;
+ }
+
+ if (HD(j)->cp_stat[i] == FREE) {
+ printk("%s: abort, mbox %d is free.\n", BN(j), i);
+ return SUCCESS;
+ }
+
+ if (HD(j)->cp_stat[i] == IN_USE) {
+ printk("%s: abort, mbox %d is in use.\n", BN(j), i);
+
+ if (SCarg != HD(j)->cp[i].SCpnt)
+ panic("%s: abort, mbox %d, SCarg %p, cp SCpnt %p.\n",
+ BN(j), i, SCarg, HD(j)->cp[i].SCpnt);
+
+ if (inb(sh[j]->io_port + REG_SYS_INTR) & IRQ_ASSERTED)
+ printk("%s: abort, mbox %d, interrupt pending.\n", BN(j), i);
+
+ return FAILED;
+ }
+
+ if (HD(j)->cp_stat[i] == IN_RESET) {
+ printk("%s: abort, mbox %d is in reset.\n", BN(j), i);
+ return FAILED;
+ }
+
+ if (HD(j)->cp_stat[i] == LOCKED) {
+ printk("%s: abort, mbox %d is locked.\n", BN(j), i);
+ return SUCCESS;
+ }
+
+ if (HD(j)->cp_stat[i] == READY || HD(j)->cp_stat[i] == ABORTING) {
+ unmap_dma(i, j);
+ SCarg->result = DID_ABORT << 16;
+ SCarg->host_scribble = NULL;
+ HD(j)->cp_stat[i] = FREE;
+ printk("%s, abort, mbox %d ready, DID_ABORT, done.\n", BN(j), i);
+ SCarg->scsi_done(SCarg);
+ return SUCCESS;
+ }
+
+ panic("%s: abort, mbox %d, invalid cp_stat.\n", BN(j), i);
+}
+
+static int u14_34f_eh_host_reset(struct scsi_cmnd *SCarg) {
+ unsigned int i, j, k, c, limit = 0;
+ unsigned long time;
+ int arg_done = FALSE;
+ struct scsi_cmnd *SCpnt;
+
+ j = ((struct hostdata *) SCarg->device->host->hostdata)->board_number;
+ scmd_printk(KERN_INFO, SCarg, "reset, enter.\n");
+
+ spin_lock_irq(sh[j]->host_lock);
+
+ if (SCarg->host_scribble == NULL)
+ printk("%s: reset, inactive.\n", BN(j));
+
+ if (HD(j)->in_reset) {
+ printk("%s: reset, exit, already in reset.\n", BN(j));
+ spin_unlock_irq(sh[j]->host_lock);
+ return FAILED;
+ }
+
+ if (wait_on_busy(sh[j]->io_port, MAXLOOP)) {
+ printk("%s: reset, exit, timeout error.\n", BN(j));
+ spin_unlock_irq(sh[j]->host_lock);
+ return FAILED;
+ }
+
+ HD(j)->retries = 0;
+
+ for (c = 0; c <= sh[j]->max_channel; c++)
+ for (k = 0; k < sh[j]->max_id; k++) {
+ HD(j)->target_redo[k][c] = TRUE;
+ HD(j)->target_to[k][c] = 0;
+ }
+
+ for (i = 0; i < sh[j]->can_queue; i++) {
+
+ if (HD(j)->cp_stat[i] == FREE) continue;
+
+ if (HD(j)->cp_stat[i] == LOCKED) {
+ HD(j)->cp_stat[i] = FREE;
+ printk("%s: reset, locked mbox %d forced free.\n", BN(j), i);
+ continue;
+ }
+
+ if (!(SCpnt = HD(j)->cp[i].SCpnt))
+ panic("%s: reset, mbox %d, SCpnt == NULL.\n", BN(j), i);
+
+ if (HD(j)->cp_stat[i] == READY || HD(j)->cp_stat[i] == ABORTING) {
+ HD(j)->cp_stat[i] = ABORTING;
+ printk("%s: reset, mbox %d aborting.\n", BN(j), i);
+ }
+
+ else {
+ HD(j)->cp_stat[i] = IN_RESET;
+ printk("%s: reset, mbox %d in reset.\n", BN(j), i);
+ }
+
+ if (SCpnt->host_scribble == NULL)
+ panic("%s: reset, mbox %d, garbled SCpnt.\n", BN(j), i);
+
+ if (*(unsigned int *)SCpnt->host_scribble != i)
+ panic("%s: reset, mbox %d, index mismatch.\n", BN(j), i);
+
+ if (SCpnt->scsi_done == NULL)
+ panic("%s: reset, mbox %d, SCpnt->scsi_done == NULL.\n", BN(j), i);
+
+ if (SCpnt == SCarg) arg_done = TRUE;
+ }
+
+ if (wait_on_busy(sh[j]->io_port, MAXLOOP)) {
+ printk("%s: reset, cannot reset, timeout error.\n", BN(j));
+ spin_unlock_irq(sh[j]->host_lock);
+ return FAILED;
+ }
+
+ outb(CMD_RESET, sh[j]->io_port + REG_LCL_INTR);
+ printk("%s: reset, board reset done, enabling interrupts.\n", BN(j));
+
+#if defined(DEBUG_RESET)
+ do_trace = TRUE;
+#endif
+
+ HD(j)->in_reset = TRUE;
+
+ spin_unlock_irq(sh[j]->host_lock);
+ time = jiffies;
+ while ((jiffies - time) < (10 * HZ) && limit++ < 200000) udelay(100L);
+ spin_lock_irq(sh[j]->host_lock);
+
+ printk("%s: reset, interrupts disabled, loops %d.\n", BN(j), limit);
+
+ for (i = 0; i < sh[j]->can_queue; i++) {
+
+ if (HD(j)->cp_stat[i] == IN_RESET) {
+ SCpnt = HD(j)->cp[i].SCpnt;
+ unmap_dma(i, j);
+ SCpnt->result = DID_RESET << 16;
+ SCpnt->host_scribble = NULL;
+
+ /* This mailbox is still waiting for its interrupt */
+ HD(j)->cp_stat[i] = LOCKED;
+
+ printk("%s, reset, mbox %d locked, DID_RESET, done.\n", BN(j), i);
+ }
+
+ else if (HD(j)->cp_stat[i] == ABORTING) {
+ SCpnt = HD(j)->cp[i].SCpnt;
+ unmap_dma(i, j);
+ SCpnt->result = DID_RESET << 16;
+ SCpnt->host_scribble = NULL;
+
+ /* This mailbox was never queued to the adapter */
+ HD(j)->cp_stat[i] = FREE;
+
+ printk("%s, reset, mbox %d aborting, DID_RESET, done.\n", BN(j), i);
+ }
+
+ else
+
+ /* Any other mailbox has already been set free by interrupt */
+ continue;
+
+ SCpnt->scsi_done(SCpnt);
+ }
+
+ HD(j)->in_reset = FALSE;
+ do_trace = FALSE;
+
+ if (arg_done) printk("%s: reset, exit, done.\n", BN(j));
+ else printk("%s: reset, exit.\n", BN(j));
+
+ spin_unlock_irq(sh[j]->host_lock);
+ return SUCCESS;
+}
+
+static int u14_34f_bios_param(struct scsi_device *disk,
+ struct block_device *bdev, sector_t capacity, int *dkinfo) {
+ unsigned int j = 0;
+ unsigned int size = capacity;
+
+ dkinfo[0] = HD(j)->heads;
+ dkinfo[1] = HD(j)->sectors;
+ dkinfo[2] = size / (HD(j)->heads * HD(j)->sectors);
+
+ if (ext_tran && (scsicam_bios_param(bdev, capacity, dkinfo) < 0)) {
+ dkinfo[0] = 255;
+ dkinfo[1] = 63;
+ dkinfo[2] = size / (dkinfo[0] * dkinfo[1]);
+ }
+
+#if defined (DEBUG_GEOMETRY)
+ printk ("%s: bios_param, head=%d, sec=%d, cyl=%d.\n", driver_name,
+ dkinfo[0], dkinfo[1], dkinfo[2]);
+#endif
+
+ return FALSE;
+}
+
+static void sort(unsigned long sk[], unsigned int da[], unsigned int n,
+ unsigned int rev) {
+ unsigned int i, j, k, y;
+ unsigned long x;
+
+ for (i = 0; i < n - 1; i++) {
+ k = i;
+
+ for (j = k + 1; j < n; j++)
+ if (rev) {
+ if (sk[j] > sk[k]) k = j;
+ }
+ else {
+ if (sk[j] < sk[k]) k = j;
+ }
+
+ if (k != i) {
+ x = sk[k]; sk[k] = sk[i]; sk[i] = x;
+ y = da[k]; da[k] = da[i]; da[i] = y;
+ }
+ }
+
+ return;
+ }
+
+static int reorder(unsigned int j, unsigned long cursec,
+ unsigned int ihdlr, unsigned int il[], unsigned int n_ready) {
+ struct scsi_cmnd *SCpnt;
+ struct mscp *cpp;
+ unsigned int k, n;
+ unsigned int rev = FALSE, s = TRUE, r = TRUE;
+ unsigned int input_only = TRUE, overlap = FALSE;
+ unsigned long sl[n_ready], pl[n_ready], ll[n_ready];
+ unsigned long maxsec = 0, minsec = ULONG_MAX, seek = 0, iseek = 0;
+ unsigned long ioseek = 0;
+
+ static unsigned int flushcount = 0, batchcount = 0, sortcount = 0;
+ static unsigned int readycount = 0, ovlcount = 0, inputcount = 0;
+ static unsigned int readysorted = 0, revcount = 0;
+ static unsigned long seeksorted = 0, seeknosort = 0;
+
+ if (link_statistics && !(++flushcount % link_statistics))
+ printk("fc %d bc %d ic %d oc %d rc %d rs %d sc %d re %d"\
+ " av %ldK as %ldK.\n", flushcount, batchcount, inputcount,
+ ovlcount, readycount, readysorted, sortcount, revcount,
+ seeknosort / (readycount + 1),
+ seeksorted / (readycount + 1));
+
+ if (n_ready <= 1) return FALSE;
+
+ for (n = 0; n < n_ready; n++) {
+ k = il[n]; cpp = &HD(j)->cp[k]; SCpnt = cpp->SCpnt;
+
+ if (!(cpp->xdir == DTD_IN)) input_only = FALSE;
+
+ if (blk_rq_pos(SCpnt->request) < minsec)
+ minsec = blk_rq_pos(SCpnt->request);
+ if (blk_rq_pos(SCpnt->request) > maxsec)
+ maxsec = blk_rq_pos(SCpnt->request);
+
+ sl[n] = blk_rq_pos(SCpnt->request);
+ ioseek += blk_rq_sectors(SCpnt->request);
+
+ if (!n) continue;
+
+ if (sl[n] < sl[n - 1]) s = FALSE;
+ if (sl[n] > sl[n - 1]) r = FALSE;
+
+ if (link_statistics) {
+ if (sl[n] > sl[n - 1])
+ seek += sl[n] - sl[n - 1];
+ else
+ seek += sl[n - 1] - sl[n];
+ }
+
+ }
+
+ if (link_statistics) {
+ if (cursec > sl[0]) seek += cursec - sl[0]; else seek += sl[0] - cursec;
+ }
+
+ if (cursec > ((maxsec + minsec) / 2)) rev = TRUE;
+
+ if (ioseek > ((maxsec - minsec) / 2)) rev = FALSE;
+
+ if (!((rev && r) || (!rev && s))) sort(sl, il, n_ready, rev);
+
+ if (!input_only) for (n = 0; n < n_ready; n++) {
+ k = il[n]; cpp = &HD(j)->cp[k]; SCpnt = cpp->SCpnt;
+ ll[n] = blk_rq_sectors(SCpnt->request); pl[n] = SCpnt->serial_number;
+
+ if (!n) continue;
+
+ if ((sl[n] == sl[n - 1]) || (!rev && ((sl[n - 1] + ll[n - 1]) > sl[n]))
+ || (rev && ((sl[n] + ll[n]) > sl[n - 1]))) overlap = TRUE;
+ }
+
+ if (overlap) sort(pl, il, n_ready, FALSE);
+
+ if (link_statistics) {
+ if (cursec > sl[0]) iseek = cursec - sl[0]; else iseek = sl[0] - cursec;
+ batchcount++; readycount += n_ready; seeknosort += seek / 1024;
+ if (input_only) inputcount++;
+ if (overlap) { ovlcount++; seeksorted += iseek / 1024; }
+ else seeksorted += (iseek + maxsec - minsec) / 1024;
+ if (rev && !r) { revcount++; readysorted += n_ready; }
+ if (!rev && !s) { sortcount++; readysorted += n_ready; }
+ }
+
+#if defined(DEBUG_LINKED_COMMANDS)
+ if (link_statistics && (overlap || !(flushcount % link_statistics)))
+ for (n = 0; n < n_ready; n++) {
+ k = il[n]; cpp = &HD(j)->cp[k]; SCpnt = cpp->SCpnt;
+ printk("%s %d.%d:%llu mb %d fc %d nr %d sec %ld ns %u"\
+ " cur %ld s:%c r:%c rev:%c in:%c ov:%c xd %d.\n",
+ (ihdlr ? "ihdlr" : "qcomm"), SCpnt->channel, SCpnt->target,
+ (u8)SCpnt->lun, k, flushcount, n_ready,
+ blk_rq_pos(SCpnt->request), blk_rq_sectors(SCpnt->request),
+ cursec, YESNO(s), YESNO(r), YESNO(rev), YESNO(input_only),
+ YESNO(overlap), cpp->xdir);
+ }
+#endif
+ return overlap;
+}
+
+static void flush_dev(struct scsi_device *dev, unsigned long cursec, unsigned int j,
+ unsigned int ihdlr) {
+ struct scsi_cmnd *SCpnt;
+ struct mscp *cpp;
+ unsigned int k, n, n_ready = 0, il[MAX_MAILBOXES];
+
+ for (k = 0; k < sh[j]->can_queue; k++) {
+
+ if (HD(j)->cp_stat[k] != READY && HD(j)->cp_stat[k] != IN_USE) continue;
+
+ cpp = &HD(j)->cp[k]; SCpnt = cpp->SCpnt;
+
+ if (SCpnt->device != dev) continue;
+
+ if (HD(j)->cp_stat[k] == IN_USE) return;
+
+ il[n_ready++] = k;
+ }
+
+ if (reorder(j, cursec, ihdlr, il, n_ready)) n_ready = 1;
+
+ for (n = 0; n < n_ready; n++) {
+ k = il[n]; cpp = &HD(j)->cp[k]; SCpnt = cpp->SCpnt;
+
+ if (wait_on_busy(sh[j]->io_port, MAXLOOP)) {
+ scmd_printk(KERN_INFO, SCpnt,
+ "%s, mbox %d, adapter"
+ " busy, will abort.\n", (ihdlr ? "ihdlr" : "qcomm"),
+ k);
+ HD(j)->cp_stat[k] = ABORTING;
+ continue;
+ }
+
+ outl(H2DEV(cpp->cp_dma_addr), sh[j]->io_port + REG_OGM);
+ outb(CMD_OGM_INTR, sh[j]->io_port + REG_LCL_INTR);
+ HD(j)->cp_stat[k] = IN_USE;
+ }
+
+}
+
+static irqreturn_t ihdlr(unsigned int j)
+{
+ struct scsi_cmnd *SCpnt;
+ unsigned int i, k, c, status, tstatus, reg, ret;
+ struct mscp *spp, *cpp;
+ int irq = sh[j]->irq;
+
+ /* Check if this board need to be serviced */
+ if (!((reg = inb(sh[j]->io_port + REG_SYS_INTR)) & IRQ_ASSERTED)) goto none;
+
+ HD(j)->iocount++;
+
+ if (do_trace) printk("%s: ihdlr, enter, irq %d, count %d.\n", BN(j), irq,
+ HD(j)->iocount);
+
+ /* Check if this board is still busy */
+ if (wait_on_busy(sh[j]->io_port, 20 * MAXLOOP)) {
+ outb(CMD_CLR_INTR, sh[j]->io_port + REG_SYS_INTR);
+ printk("%s: ihdlr, busy timeout error, irq %d, reg 0x%x, count %d.\n",
+ BN(j), irq, reg, HD(j)->iocount);
+ goto none;
+ }
+
+ ret = inl(sh[j]->io_port + REG_ICM);
+
+ /* Clear interrupt pending flag */
+ outb(CMD_CLR_INTR, sh[j]->io_port + REG_SYS_INTR);
+
+ /* Find the mailbox to be serviced on this board */
+ for (i = 0; i < sh[j]->can_queue; i++)
+ if (H2DEV(HD(j)->cp[i].cp_dma_addr) == ret) break;
+
+ if (i >= sh[j]->can_queue)
+ panic("%s: ihdlr, invalid mscp bus address %p, cp0 %p.\n", BN(j),
+ (void *)ret, (void *)H2DEV(HD(j)->cp[0].cp_dma_addr));
+
+ cpp = &(HD(j)->cp[i]);
+ spp = cpp;
+
+#if defined(DEBUG_GENERATE_ABORTS)
+ if ((HD(j)->iocount > 500) && ((HD(j)->iocount % 500) < 3)) goto handled;
+#endif
+
+ if (HD(j)->cp_stat[i] == IGNORE) {
+ HD(j)->cp_stat[i] = FREE;
+ goto handled;
+ }
+ else if (HD(j)->cp_stat[i] == LOCKED) {
+ HD(j)->cp_stat[i] = FREE;
+ printk("%s: ihdlr, mbox %d unlocked, count %d.\n", BN(j), i,
+ HD(j)->iocount);
+ goto handled;
+ }
+ else if (HD(j)->cp_stat[i] == FREE) {
+ printk("%s: ihdlr, mbox %d is free, count %d.\n", BN(j), i,
+ HD(j)->iocount);
+ goto handled;
+ }
+ else if (HD(j)->cp_stat[i] == IN_RESET)
+ printk("%s: ihdlr, mbox %d is in reset.\n", BN(j), i);
+ else if (HD(j)->cp_stat[i] != IN_USE)
+ panic("%s: ihdlr, mbox %d, invalid cp_stat: %d.\n",
+ BN(j), i, HD(j)->cp_stat[i]);
+
+ HD(j)->cp_stat[i] = FREE;
+ SCpnt = cpp->SCpnt;
+
+ if (SCpnt == NULL) panic("%s: ihdlr, mbox %d, SCpnt == NULL.\n", BN(j), i);
+
+ if (SCpnt->host_scribble == NULL)
+ panic("%s: ihdlr, mbox %d, SCpnt %p garbled.\n", BN(j), i,
+ SCpnt);
+
+ if (*(unsigned int *)SCpnt->host_scribble != i)
+ panic("%s: ihdlr, mbox %d, index mismatch %d.\n",
+ BN(j), i, *(unsigned int *)SCpnt->host_scribble);
+
+ sync_dma(i, j);
+
+ if (linked_comm && SCpnt->device->queue_depth > 2
+ && TLDEV(SCpnt->device->type))
+ flush_dev(SCpnt->device, blk_rq_pos(SCpnt->request), j, TRUE);
+
+ tstatus = status_byte(spp->target_status);
+
+#if defined(DEBUG_GENERATE_ERRORS)
+ if ((HD(j)->iocount > 500) && ((HD(j)->iocount % 200) < 2))
+ spp->adapter_status = 0x01;
+#endif
+
+ switch (spp->adapter_status) {
+ case ASOK: /* status OK */
+
+ /* Forces a reset if a disk drive keeps returning BUSY */
+ if (tstatus == BUSY && SCpnt->device->type != TYPE_TAPE)
+ status = DID_ERROR << 16;
+
+ /* If there was a bus reset, redo operation on each target */
+ else if (tstatus != GOOD && SCpnt->device->type == TYPE_DISK
+ && HD(j)->target_redo[scmd_id(SCpnt)][scmd_channel(SCpnt)])
+ status = DID_BUS_BUSY << 16;
+
+ /* Works around a flaw in scsi.c */
+ else if (tstatus == CHECK_CONDITION
+ && SCpnt->device->type == TYPE_DISK
+ && (SCpnt->sense_buffer[2] & 0xf) == RECOVERED_ERROR)
+ status = DID_BUS_BUSY << 16;
+
+ else
+ status = DID_OK << 16;
+
+ if (tstatus == GOOD)
+ HD(j)->target_redo[scmd_id(SCpnt)][scmd_channel(SCpnt)] = FALSE;
+
+ if (spp->target_status && SCpnt->device->type == TYPE_DISK &&
+ (!(tstatus == CHECK_CONDITION && HD(j)->iocount <= 1000 &&
+ (SCpnt->sense_buffer[2] & 0xf) == NOT_READY)))
+ scmd_printk(KERN_INFO, SCpnt,
+ "ihdlr, target_status 0x%x, sense key 0x%x.\n",
+ spp->target_status,
+ SCpnt->sense_buffer[2]);
+
+ HD(j)->target_to[scmd_id(SCpnt)][scmd_channel(SCpnt)] = 0;
+
+ if (HD(j)->last_retried_pid == SCpnt->serial_number) HD(j)->retries = 0;
+
+ break;
+ case ASST: /* Selection Time Out */
+
+ if (HD(j)->target_to[scmd_id(SCpnt)][scmd_channel(SCpnt)] > 1)
+ status = DID_ERROR << 16;
+ else {
+ status = DID_TIME_OUT << 16;
+ HD(j)->target_to[scmd_id(SCpnt)][scmd_channel(SCpnt)]++;
+ }
+
+ break;
+
+ /* Perform a limited number of internal retries */
+ case 0x93: /* Unexpected bus free */
+ case 0x94: /* Target bus phase sequence failure */
+ case 0x96: /* Illegal SCSI command */
+ case 0xa3: /* SCSI bus reset error */
+
+ for (c = 0; c <= sh[j]->max_channel; c++)
+ for (k = 0; k < sh[j]->max_id; k++)
+ HD(j)->target_redo[k][c] = TRUE;
+
+
+ case 0x92: /* Data over/under-run */
+
+ if (SCpnt->device->type != TYPE_TAPE
+ && HD(j)->retries < MAX_INTERNAL_RETRIES) {
+
+#if defined(DID_SOFT_ERROR)
+ status = DID_SOFT_ERROR << 16;
+#else
+ status = DID_BUS_BUSY << 16;
+#endif
+
+ HD(j)->retries++;
+ HD(j)->last_retried_pid = SCpnt->serial_number;
+ }
+ else
+ status = DID_ERROR << 16;
+
+ break;
+ case 0x01: /* Invalid command */
+ case 0x02: /* Invalid parameters */
+ case 0x03: /* Invalid data list */
+ case 0x84: /* SCSI bus abort error */
+ case 0x9b: /* Auto request sense error */
+ case 0x9f: /* Unexpected command complete message error */
+ case 0xff: /* Invalid parameter in the S/G list */
+ default:
+ status = DID_ERROR << 16;
+ break;
+ }
+
+ SCpnt->result = status | spp->target_status;
+
+#if defined(DEBUG_INTERRUPT)
+ if (SCpnt->result || do_trace)
+#else
+ if ((spp->adapter_status != ASOK && HD(j)->iocount > 1000) ||
+ (spp->adapter_status != ASOK &&
+ spp->adapter_status != ASST && HD(j)->iocount <= 1000) ||
+ do_trace || msg_byte(spp->target_status))
+#endif
+ scmd_printk(KERN_INFO, SCpnt, "ihdlr, mbox %2d, err 0x%x:%x,"\
+ " reg 0x%x, count %d.\n",
+ i, spp->adapter_status, spp->target_status,
+ reg, HD(j)->iocount);
+
+ unmap_dma(i, j);
+
+ /* Set the command state to inactive */
+ SCpnt->host_scribble = NULL;
+
+ SCpnt->scsi_done(SCpnt);
+
+ if (do_trace) printk("%s: ihdlr, exit, irq %d, count %d.\n", BN(j), irq,
+ HD(j)->iocount);
+
+handled:
+ return IRQ_HANDLED;
+none:
+ return IRQ_NONE;
+}
+
+static irqreturn_t do_interrupt_handler(int irq, void *shap) {
+ unsigned int j;
+ unsigned long spin_flags;
+ irqreturn_t ret;
+
+ /* Check if the interrupt must be processed by this handler */
+ if ((j = (unsigned int)((char *)shap - sha)) >= num_boards) return IRQ_NONE;
+
+ spin_lock_irqsave(sh[j]->host_lock, spin_flags);
+ ret = ihdlr(j);
+ spin_unlock_irqrestore(sh[j]->host_lock, spin_flags);
+ return ret;
+}
+
+static int u14_34f_release(struct Scsi_Host *shpnt) {
+ unsigned int i, j;
+
+ for (j = 0; sh[j] != NULL && sh[j] != shpnt; j++);
+
+ if (sh[j] == NULL)
+ panic("%s: release, invalid Scsi_Host pointer.\n", driver_name);
+
+ for (i = 0; i < sh[j]->can_queue; i++)
+ kfree((&HD(j)->cp[i])->sglist);
+
+ for (i = 0; i < sh[j]->can_queue; i++)
+ pci_unmap_single(HD(j)->pdev, HD(j)->cp[i].cp_dma_addr,
+ sizeof(struct mscp), PCI_DMA_BIDIRECTIONAL);
+
+ free_irq(sh[j]->irq, &sha[j]);
+
+ if (sh[j]->dma_channel != NO_DMA)
+ free_dma(sh[j]->dma_channel);
+
+ release_region(sh[j]->io_port, sh[j]->n_io_port);
+ scsi_unregister(sh[j]);
+ return FALSE;
+}
+
+#include "scsi_module.c"
+
+#ifndef MODULE
+__setup("u14-34f=", option_setup);
+#endif /* end MODULE */
diff --git a/drivers/scsi/ufs/Kconfig b/drivers/scsi/ufs/Kconfig
new file mode 100644
index 000000000..8a1f4b355
--- /dev/null
+++ b/drivers/scsi/ufs/Kconfig
@@ -0,0 +1,85 @@
+#
+# Kernel configuration file for the UFS Host Controller
+#
+# This code is based on drivers/scsi/ufs/Kconfig
+# Copyright (C) 2011-2013 Samsung India Software Operations
+#
+# Authors:
+# Santosh Yaraganavi <santosh.sy@samsung.com>
+# Vinayak Holikatti <h.vinayak@samsung.com>
+#
+# This program is free software; you can redistribute it and/or
+# modify it under the terms of the GNU General Public License
+# as published by the Free Software Foundation; either version 2
+# of the License, or (at your option) any later version.
+# See the COPYING file in the top-level directory or visit
+# <http://www.gnu.org/licenses/gpl-2.0.html>
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# This program is provided "AS IS" and "WITH ALL FAULTS" and
+# without warranty of any kind. You are solely responsible for
+# determining the appropriateness of using and distributing
+# the program and assume all risks associated with your exercise
+# of rights with respect to the program, including but not limited
+# to infringement of third party rights, the risks and costs of
+# program errors, damage to or loss of data, programs or equipment,
+# and unavailability or interruption of operations. Under no
+# circumstances will the contributor of this Program be liable for
+# any damages of any kind arising from your use or distribution of
+# this program.
+
+config SCSI_UFSHCD
+ tristate "Universal Flash Storage Controller Driver Core"
+ depends on SCSI && SCSI_DMA
+ select PM_DEVFREQ
+ select DEVFREQ_GOV_SIMPLE_ONDEMAND
+ ---help---
+ This selects the support for UFS devices in Linux, say Y and make
+ sure that you know the name of your UFS host adapter (the card
+ inside your computer that "speaks" the UFS protocol, also
+ called UFS Host Controller), because you will be asked for it.
+ The module will be called ufshcd.
+
+ To compile this driver as a module, choose M here and read
+ <file:Documentation/scsi/ufs.txt>.
+ However, do not compile this as a module if your root file system
+ (the one containing the directory /) is located on a UFS device.
+
+config SCSI_UFSHCD_PCI
+ tristate "PCI bus based UFS Controller support"
+ depends on SCSI_UFSHCD && PCI
+ ---help---
+ This selects the PCI UFS Host Controller Interface. Select this if
+ you have UFS Host Controller with PCI Interface.
+
+ If you have a controller with this interface, say Y or M here.
+
+ If unsure, say N.
+
+config SCSI_UFSHCD_PLATFORM
+ tristate "Platform bus based UFS Controller support"
+ depends on SCSI_UFSHCD
+ ---help---
+ This selects the UFS host controller support. Select this if
+ you have an UFS controller on Platform bus.
+
+ If you have a controller with this interface, say Y or M here.
+
+ If unsure, say N.
+
+config SCSI_UFS_QCOM
+ bool "QCOM specific hooks to UFS controller platform driver"
+ depends on SCSI_UFSHCD_PLATFORM && ARCH_MSM
+ select PHY_QCOM_UFS
+ help
+ This selects the QCOM specific additions to UFSHCD platform driver.
+ UFS host on QCOM needs some vendor specific configuration before
+ accessing the hardware which includes PHY configuration and vendor
+ specific registers.
+
+ Select this if you have UFS controller on QCOM chipset.
+ If unsure, say N.
diff --git a/drivers/scsi/ufs/Makefile b/drivers/scsi/ufs/Makefile
new file mode 100644
index 000000000..8303bcce7
--- /dev/null
+++ b/drivers/scsi/ufs/Makefile
@@ -0,0 +1,5 @@
+# UFSHCD makefile
+obj-$(CONFIG_SCSI_UFS_QCOM) += ufs-qcom.o
+obj-$(CONFIG_SCSI_UFSHCD) += ufshcd.o
+obj-$(CONFIG_SCSI_UFSHCD_PCI) += ufshcd-pci.o
+obj-$(CONFIG_SCSI_UFSHCD_PLATFORM) += ufshcd-pltfrm.o
diff --git a/drivers/scsi/ufs/ufs-qcom.c b/drivers/scsi/ufs/ufs-qcom.c
new file mode 100644
index 000000000..6652a8171
--- /dev/null
+++ b/drivers/scsi/ufs/ufs-qcom.c
@@ -0,0 +1,1016 @@
+/*
+ * Copyright (c) 2013-2015, Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/time.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/phy/phy.h>
+
+#include <linux/phy/phy-qcom-ufs.h>
+#include "ufshcd.h"
+#include "unipro.h"
+#include "ufs-qcom.h"
+#include "ufshci.h"
+
+static struct ufs_qcom_host *ufs_qcom_hosts[MAX_UFS_QCOM_HOSTS];
+
+static void ufs_qcom_get_speed_mode(struct ufs_pa_layer_attr *p, char *result);
+static int ufs_qcom_get_bus_vote(struct ufs_qcom_host *host,
+ const char *speed_mode);
+static int ufs_qcom_set_bus_vote(struct ufs_qcom_host *host, int vote);
+
+static int ufs_qcom_get_connected_tx_lanes(struct ufs_hba *hba, u32 *tx_lanes)
+{
+ int err = 0;
+
+ err = ufshcd_dme_get(hba,
+ UIC_ARG_MIB(PA_CONNECTEDTXDATALANES), tx_lanes);
+ if (err)
+ dev_err(hba->dev, "%s: couldn't read PA_CONNECTEDTXDATALANES %d\n",
+ __func__, err);
+
+ return err;
+}
+
+static int ufs_qcom_host_clk_get(struct device *dev,
+ const char *name, struct clk **clk_out)
+{
+ struct clk *clk;
+ int err = 0;
+
+ clk = devm_clk_get(dev, name);
+ if (IS_ERR(clk)) {
+ err = PTR_ERR(clk);
+ dev_err(dev, "%s: failed to get %s err %d",
+ __func__, name, err);
+ } else {
+ *clk_out = clk;
+ }
+
+ return err;
+}
+
+static int ufs_qcom_host_clk_enable(struct device *dev,
+ const char *name, struct clk *clk)
+{
+ int err = 0;
+
+ err = clk_prepare_enable(clk);
+ if (err)
+ dev_err(dev, "%s: %s enable failed %d\n", __func__, name, err);
+
+ return err;
+}
+
+static void ufs_qcom_disable_lane_clks(struct ufs_qcom_host *host)
+{
+ if (!host->is_lane_clks_enabled)
+ return;
+
+ clk_disable_unprepare(host->tx_l1_sync_clk);
+ clk_disable_unprepare(host->tx_l0_sync_clk);
+ clk_disable_unprepare(host->rx_l1_sync_clk);
+ clk_disable_unprepare(host->rx_l0_sync_clk);
+
+ host->is_lane_clks_enabled = false;
+}
+
+static int ufs_qcom_enable_lane_clks(struct ufs_qcom_host *host)
+{
+ int err = 0;
+ struct device *dev = host->hba->dev;
+
+ if (host->is_lane_clks_enabled)
+ return 0;
+
+ err = ufs_qcom_host_clk_enable(dev, "rx_lane0_sync_clk",
+ host->rx_l0_sync_clk);
+ if (err)
+ goto out;
+
+ err = ufs_qcom_host_clk_enable(dev, "tx_lane0_sync_clk",
+ host->tx_l0_sync_clk);
+ if (err)
+ goto disable_rx_l0;
+
+ err = ufs_qcom_host_clk_enable(dev, "rx_lane1_sync_clk",
+ host->rx_l1_sync_clk);
+ if (err)
+ goto disable_tx_l0;
+
+ err = ufs_qcom_host_clk_enable(dev, "tx_lane1_sync_clk",
+ host->tx_l1_sync_clk);
+ if (err)
+ goto disable_rx_l1;
+
+ host->is_lane_clks_enabled = true;
+ goto out;
+
+disable_rx_l1:
+ clk_disable_unprepare(host->rx_l1_sync_clk);
+disable_tx_l0:
+ clk_disable_unprepare(host->tx_l0_sync_clk);
+disable_rx_l0:
+ clk_disable_unprepare(host->rx_l0_sync_clk);
+out:
+ return err;
+}
+
+static int ufs_qcom_init_lane_clks(struct ufs_qcom_host *host)
+{
+ int err = 0;
+ struct device *dev = host->hba->dev;
+
+ err = ufs_qcom_host_clk_get(dev,
+ "rx_lane0_sync_clk", &host->rx_l0_sync_clk);
+ if (err)
+ goto out;
+
+ err = ufs_qcom_host_clk_get(dev,
+ "tx_lane0_sync_clk", &host->tx_l0_sync_clk);
+ if (err)
+ goto out;
+
+ err = ufs_qcom_host_clk_get(dev, "rx_lane1_sync_clk",
+ &host->rx_l1_sync_clk);
+ if (err)
+ goto out;
+
+ err = ufs_qcom_host_clk_get(dev, "tx_lane1_sync_clk",
+ &host->tx_l1_sync_clk);
+out:
+ return err;
+}
+
+static int ufs_qcom_link_startup_post_change(struct ufs_hba *hba)
+{
+ struct ufs_qcom_host *host = hba->priv;
+ struct phy *phy = host->generic_phy;
+ u32 tx_lanes;
+ int err = 0;
+
+ err = ufs_qcom_get_connected_tx_lanes(hba, &tx_lanes);
+ if (err)
+ goto out;
+
+ err = ufs_qcom_phy_set_tx_lane_enable(phy, tx_lanes);
+ if (err)
+ dev_err(hba->dev, "%s: ufs_qcom_phy_set_tx_lane_enable failed\n",
+ __func__);
+
+out:
+ return err;
+}
+
+static int ufs_qcom_check_hibern8(struct ufs_hba *hba)
+{
+ int err;
+ u32 tx_fsm_val = 0;
+ unsigned long timeout = jiffies + msecs_to_jiffies(HBRN8_POLL_TOUT_MS);
+
+ do {
+ err = ufshcd_dme_get(hba,
+ UIC_ARG_MIB(MPHY_TX_FSM_STATE), &tx_fsm_val);
+ if (err || tx_fsm_val == TX_FSM_HIBERN8)
+ break;
+
+ /* sleep for max. 200us */
+ usleep_range(100, 200);
+ } while (time_before(jiffies, timeout));
+
+ /*
+ * we might have scheduled out for long during polling so
+ * check the state again.
+ */
+ if (time_after(jiffies, timeout))
+ err = ufshcd_dme_get(hba,
+ UIC_ARG_MIB(MPHY_TX_FSM_STATE), &tx_fsm_val);
+
+ if (err) {
+ dev_err(hba->dev, "%s: unable to get TX_FSM_STATE, err %d\n",
+ __func__, err);
+ } else if (tx_fsm_val != TX_FSM_HIBERN8) {
+ err = tx_fsm_val;
+ dev_err(hba->dev, "%s: invalid TX_FSM_STATE = %d\n",
+ __func__, err);
+ }
+
+ return err;
+}
+
+static int ufs_qcom_power_up_sequence(struct ufs_hba *hba)
+{
+ struct ufs_qcom_host *host = hba->priv;
+ struct phy *phy = host->generic_phy;
+ int ret = 0;
+ bool is_rate_B = (UFS_QCOM_LIMIT_HS_RATE == PA_HS_MODE_B)
+ ? true : false;
+
+ /* Assert PHY reset and apply PHY calibration values */
+ ufs_qcom_assert_reset(hba);
+ /* provide 1ms delay to let the reset pulse propagate */
+ usleep_range(1000, 1100);
+
+ ret = ufs_qcom_phy_calibrate_phy(phy, is_rate_B);
+ if (ret) {
+ dev_err(hba->dev, "%s: ufs_qcom_phy_calibrate_phy() failed, ret = %d\n",
+ __func__, ret);
+ goto out;
+ }
+
+ /* De-assert PHY reset and start serdes */
+ ufs_qcom_deassert_reset(hba);
+
+ /*
+ * after reset deassertion, phy will need all ref clocks,
+ * voltage, current to settle down before starting serdes.
+ */
+ usleep_range(1000, 1100);
+ ret = ufs_qcom_phy_start_serdes(phy);
+ if (ret) {
+ dev_err(hba->dev, "%s: ufs_qcom_phy_start_serdes() failed, ret = %d\n",
+ __func__, ret);
+ goto out;
+ }
+
+ ret = ufs_qcom_phy_is_pcs_ready(phy);
+ if (ret)
+ dev_err(hba->dev, "%s: is_physical_coding_sublayer_ready() failed, ret = %d\n",
+ __func__, ret);
+
+out:
+ return ret;
+}
+
+/*
+ * The UTP controller has a number of internal clock gating cells (CGCs).
+ * Internal hardware sub-modules within the UTP controller control the CGCs.
+ * Hardware CGCs disable the clock to inactivate UTP sub-modules not involved
+ * in a specific operation, UTP controller CGCs are by default disabled and
+ * this function enables them (after every UFS link startup) to save some power
+ * leakage.
+ */
+static void ufs_qcom_enable_hw_clk_gating(struct ufs_hba *hba)
+{
+ ufshcd_writel(hba,
+ ufshcd_readl(hba, REG_UFS_CFG2) | REG_UFS_CFG2_CGC_EN_ALL,
+ REG_UFS_CFG2);
+
+ /* Ensure that HW clock gating is enabled before next operations */
+ mb();
+}
+
+static int ufs_qcom_hce_enable_notify(struct ufs_hba *hba, bool status)
+{
+ struct ufs_qcom_host *host = hba->priv;
+ int err = 0;
+
+ switch (status) {
+ case PRE_CHANGE:
+ ufs_qcom_power_up_sequence(hba);
+ /*
+ * The PHY PLL output is the source of tx/rx lane symbol
+ * clocks, hence, enable the lane clocks only after PHY
+ * is initialized.
+ */
+ err = ufs_qcom_enable_lane_clks(host);
+ break;
+ case POST_CHANGE:
+ /* check if UFS PHY moved from DISABLED to HIBERN8 */
+ err = ufs_qcom_check_hibern8(hba);
+ ufs_qcom_enable_hw_clk_gating(hba);
+
+ break;
+ default:
+ dev_err(hba->dev, "%s: invalid status %d\n", __func__, status);
+ err = -EINVAL;
+ break;
+ }
+ return err;
+}
+
+/**
+ * Returns non-zero for success (which rate of core_clk) and 0
+ * in case of a failure
+ */
+static unsigned long
+ufs_qcom_cfg_timers(struct ufs_hba *hba, u32 gear, u32 hs, u32 rate)
+{
+ struct ufs_clk_info *clki;
+ u32 core_clk_period_in_ns;
+ u32 tx_clk_cycles_per_us = 0;
+ unsigned long core_clk_rate = 0;
+ u32 core_clk_cycles_per_us = 0;
+
+ static u32 pwm_fr_table[][2] = {
+ {UFS_PWM_G1, 0x1},
+ {UFS_PWM_G2, 0x1},
+ {UFS_PWM_G3, 0x1},
+ {UFS_PWM_G4, 0x1},
+ };
+
+ static u32 hs_fr_table_rA[][2] = {
+ {UFS_HS_G1, 0x1F},
+ {UFS_HS_G2, 0x3e},
+ };
+
+ static u32 hs_fr_table_rB[][2] = {
+ {UFS_HS_G1, 0x24},
+ {UFS_HS_G2, 0x49},
+ };
+
+ if (gear == 0) {
+ dev_err(hba->dev, "%s: invalid gear = %d\n", __func__, gear);
+ goto out_error;
+ }
+
+ list_for_each_entry(clki, &hba->clk_list_head, list) {
+ if (!strcmp(clki->name, "core_clk"))
+ core_clk_rate = clk_get_rate(clki->clk);
+ }
+
+ /* If frequency is smaller than 1MHz, set to 1MHz */
+ if (core_clk_rate < DEFAULT_CLK_RATE_HZ)
+ core_clk_rate = DEFAULT_CLK_RATE_HZ;
+
+ core_clk_cycles_per_us = core_clk_rate / USEC_PER_SEC;
+ ufshcd_writel(hba, core_clk_cycles_per_us, REG_UFS_SYS1CLK_1US);
+
+ core_clk_period_in_ns = NSEC_PER_SEC / core_clk_rate;
+ core_clk_period_in_ns <<= OFFSET_CLK_NS_REG;
+ core_clk_period_in_ns &= MASK_CLK_NS_REG;
+
+ switch (hs) {
+ case FASTAUTO_MODE:
+ case FAST_MODE:
+ if (rate == PA_HS_MODE_A) {
+ if (gear > ARRAY_SIZE(hs_fr_table_rA)) {
+ dev_err(hba->dev,
+ "%s: index %d exceeds table size %zu\n",
+ __func__, gear,
+ ARRAY_SIZE(hs_fr_table_rA));
+ goto out_error;
+ }
+ tx_clk_cycles_per_us = hs_fr_table_rA[gear-1][1];
+ } else if (rate == PA_HS_MODE_B) {
+ if (gear > ARRAY_SIZE(hs_fr_table_rB)) {
+ dev_err(hba->dev,
+ "%s: index %d exceeds table size %zu\n",
+ __func__, gear,
+ ARRAY_SIZE(hs_fr_table_rB));
+ goto out_error;
+ }
+ tx_clk_cycles_per_us = hs_fr_table_rB[gear-1][1];
+ } else {
+ dev_err(hba->dev, "%s: invalid rate = %d\n",
+ __func__, rate);
+ goto out_error;
+ }
+ break;
+ case SLOWAUTO_MODE:
+ case SLOW_MODE:
+ if (gear > ARRAY_SIZE(pwm_fr_table)) {
+ dev_err(hba->dev,
+ "%s: index %d exceeds table size %zu\n",
+ __func__, gear,
+ ARRAY_SIZE(pwm_fr_table));
+ goto out_error;
+ }
+ tx_clk_cycles_per_us = pwm_fr_table[gear-1][1];
+ break;
+ case UNCHANGED:
+ default:
+ dev_err(hba->dev, "%s: invalid mode = %d\n", __func__, hs);
+ goto out_error;
+ }
+
+ /* this register 2 fields shall be written at once */
+ ufshcd_writel(hba, core_clk_period_in_ns | tx_clk_cycles_per_us,
+ REG_UFS_TX_SYMBOL_CLK_NS_US);
+ goto out;
+
+out_error:
+ core_clk_rate = 0;
+out:
+ return core_clk_rate;
+}
+
+static int ufs_qcom_link_startup_notify(struct ufs_hba *hba, bool status)
+{
+ unsigned long core_clk_rate = 0;
+ u32 core_clk_cycles_per_100ms;
+
+ switch (status) {
+ case PRE_CHANGE:
+ core_clk_rate = ufs_qcom_cfg_timers(hba, UFS_PWM_G1,
+ SLOWAUTO_MODE, 0);
+ if (!core_clk_rate) {
+ dev_err(hba->dev, "%s: ufs_qcom_cfg_timers() failed\n",
+ __func__);
+ return -EINVAL;
+ }
+ core_clk_cycles_per_100ms =
+ (core_clk_rate / MSEC_PER_SEC) * 100;
+ ufshcd_writel(hba, core_clk_cycles_per_100ms,
+ REG_UFS_PA_LINK_STARTUP_TIMER);
+ break;
+ case POST_CHANGE:
+ ufs_qcom_link_startup_post_change(hba);
+ break;
+ default:
+ break;
+ }
+
+ return 0;
+}
+
+static int ufs_qcom_suspend(struct ufs_hba *hba, enum ufs_pm_op pm_op)
+{
+ struct ufs_qcom_host *host = hba->priv;
+ struct phy *phy = host->generic_phy;
+ int ret = 0;
+
+ if (ufs_qcom_is_link_off(hba)) {
+ /*
+ * Disable the tx/rx lane symbol clocks before PHY is
+ * powered down as the PLL source should be disabled
+ * after downstream clocks are disabled.
+ */
+ ufs_qcom_disable_lane_clks(host);
+ phy_power_off(phy);
+
+ /* Assert PHY soft reset */
+ ufs_qcom_assert_reset(hba);
+ goto out;
+ }
+
+ /*
+ * If UniPro link is not active, PHY ref_clk, main PHY analog power
+ * rail and low noise analog power rail for PLL can be switched off.
+ */
+ if (!ufs_qcom_is_link_active(hba))
+ phy_power_off(phy);
+
+out:
+ return ret;
+}
+
+static int ufs_qcom_resume(struct ufs_hba *hba, enum ufs_pm_op pm_op)
+{
+ struct ufs_qcom_host *host = hba->priv;
+ struct phy *phy = host->generic_phy;
+ int err;
+
+ err = phy_power_on(phy);
+ if (err) {
+ dev_err(hba->dev, "%s: failed enabling regs, err = %d\n",
+ __func__, err);
+ goto out;
+ }
+
+ hba->is_sys_suspended = false;
+
+out:
+ return err;
+}
+
+struct ufs_qcom_dev_params {
+ u32 pwm_rx_gear; /* pwm rx gear to work in */
+ u32 pwm_tx_gear; /* pwm tx gear to work in */
+ u32 hs_rx_gear; /* hs rx gear to work in */
+ u32 hs_tx_gear; /* hs tx gear to work in */
+ u32 rx_lanes; /* number of rx lanes */
+ u32 tx_lanes; /* number of tx lanes */
+ u32 rx_pwr_pwm; /* rx pwm working pwr */
+ u32 tx_pwr_pwm; /* tx pwm working pwr */
+ u32 rx_pwr_hs; /* rx hs working pwr */
+ u32 tx_pwr_hs; /* tx hs working pwr */
+ u32 hs_rate; /* rate A/B to work in HS */
+ u32 desired_working_mode;
+};
+
+static int ufs_qcom_get_pwr_dev_param(struct ufs_qcom_dev_params *qcom_param,
+ struct ufs_pa_layer_attr *dev_max,
+ struct ufs_pa_layer_attr *agreed_pwr)
+{
+ int min_qcom_gear;
+ int min_dev_gear;
+ bool is_dev_sup_hs = false;
+ bool is_qcom_max_hs = false;
+
+ if (dev_max->pwr_rx == FAST_MODE)
+ is_dev_sup_hs = true;
+
+ if (qcom_param->desired_working_mode == FAST) {
+ is_qcom_max_hs = true;
+ min_qcom_gear = min_t(u32, qcom_param->hs_rx_gear,
+ qcom_param->hs_tx_gear);
+ } else {
+ min_qcom_gear = min_t(u32, qcom_param->pwm_rx_gear,
+ qcom_param->pwm_tx_gear);
+ }
+
+ /*
+ * device doesn't support HS but qcom_param->desired_working_mode is
+ * HS, thus device and qcom_param don't agree
+ */
+ if (!is_dev_sup_hs && is_qcom_max_hs) {
+ pr_err("%s: failed to agree on power mode (device doesn't support HS but requested power is HS)\n",
+ __func__);
+ return -ENOTSUPP;
+ } else if (is_dev_sup_hs && is_qcom_max_hs) {
+ /*
+ * since device supports HS, it supports FAST_MODE.
+ * since qcom_param->desired_working_mode is also HS
+ * then final decision (FAST/FASTAUTO) is done according
+ * to qcom_params as it is the restricting factor
+ */
+ agreed_pwr->pwr_rx = agreed_pwr->pwr_tx =
+ qcom_param->rx_pwr_hs;
+ } else {
+ /*
+ * here qcom_param->desired_working_mode is PWM.
+ * it doesn't matter whether device supports HS or PWM,
+ * in both cases qcom_param->desired_working_mode will
+ * determine the mode
+ */
+ agreed_pwr->pwr_rx = agreed_pwr->pwr_tx =
+ qcom_param->rx_pwr_pwm;
+ }
+
+ /*
+ * we would like tx to work in the minimum number of lanes
+ * between device capability and vendor preferences.
+ * the same decision will be made for rx
+ */
+ agreed_pwr->lane_tx = min_t(u32, dev_max->lane_tx,
+ qcom_param->tx_lanes);
+ agreed_pwr->lane_rx = min_t(u32, dev_max->lane_rx,
+ qcom_param->rx_lanes);
+
+ /* device maximum gear is the minimum between device rx and tx gears */
+ min_dev_gear = min_t(u32, dev_max->gear_rx, dev_max->gear_tx);
+
+ /*
+ * if both device capabilities and vendor pre-defined preferences are
+ * both HS or both PWM then set the minimum gear to be the chosen
+ * working gear.
+ * if one is PWM and one is HS then the one that is PWM get to decide
+ * what is the gear, as it is the one that also decided previously what
+ * pwr the device will be configured to.
+ */
+ if ((is_dev_sup_hs && is_qcom_max_hs) ||
+ (!is_dev_sup_hs && !is_qcom_max_hs))
+ agreed_pwr->gear_rx = agreed_pwr->gear_tx =
+ min_t(u32, min_dev_gear, min_qcom_gear);
+ else if (!is_dev_sup_hs)
+ agreed_pwr->gear_rx = agreed_pwr->gear_tx = min_dev_gear;
+ else
+ agreed_pwr->gear_rx = agreed_pwr->gear_tx = min_qcom_gear;
+
+ agreed_pwr->hs_rate = qcom_param->hs_rate;
+ return 0;
+}
+
+static int ufs_qcom_update_bus_bw_vote(struct ufs_qcom_host *host)
+{
+ int vote;
+ int err = 0;
+ char mode[BUS_VECTOR_NAME_LEN];
+
+ ufs_qcom_get_speed_mode(&host->dev_req_params, mode);
+
+ vote = ufs_qcom_get_bus_vote(host, mode);
+ if (vote >= 0)
+ err = ufs_qcom_set_bus_vote(host, vote);
+ else
+ err = vote;
+
+ if (err)
+ dev_err(host->hba->dev, "%s: failed %d\n", __func__, err);
+ else
+ host->bus_vote.saved_vote = vote;
+ return err;
+}
+
+static int ufs_qcom_pwr_change_notify(struct ufs_hba *hba,
+ bool status,
+ struct ufs_pa_layer_attr *dev_max_params,
+ struct ufs_pa_layer_attr *dev_req_params)
+{
+ u32 val;
+ struct ufs_qcom_host *host = hba->priv;
+ struct phy *phy = host->generic_phy;
+ struct ufs_qcom_dev_params ufs_qcom_cap;
+ int ret = 0;
+ int res = 0;
+
+ if (!dev_req_params) {
+ pr_err("%s: incoming dev_req_params is NULL\n", __func__);
+ ret = -EINVAL;
+ goto out;
+ }
+
+ switch (status) {
+ case PRE_CHANGE:
+ ufs_qcom_cap.tx_lanes = UFS_QCOM_LIMIT_NUM_LANES_TX;
+ ufs_qcom_cap.rx_lanes = UFS_QCOM_LIMIT_NUM_LANES_RX;
+ ufs_qcom_cap.hs_rx_gear = UFS_QCOM_LIMIT_HSGEAR_RX;
+ ufs_qcom_cap.hs_tx_gear = UFS_QCOM_LIMIT_HSGEAR_TX;
+ ufs_qcom_cap.pwm_rx_gear = UFS_QCOM_LIMIT_PWMGEAR_RX;
+ ufs_qcom_cap.pwm_tx_gear = UFS_QCOM_LIMIT_PWMGEAR_TX;
+ ufs_qcom_cap.rx_pwr_pwm = UFS_QCOM_LIMIT_RX_PWR_PWM;
+ ufs_qcom_cap.tx_pwr_pwm = UFS_QCOM_LIMIT_TX_PWR_PWM;
+ ufs_qcom_cap.rx_pwr_hs = UFS_QCOM_LIMIT_RX_PWR_HS;
+ ufs_qcom_cap.tx_pwr_hs = UFS_QCOM_LIMIT_TX_PWR_HS;
+ ufs_qcom_cap.hs_rate = UFS_QCOM_LIMIT_HS_RATE;
+ ufs_qcom_cap.desired_working_mode =
+ UFS_QCOM_LIMIT_DESIRED_MODE;
+
+ ret = ufs_qcom_get_pwr_dev_param(&ufs_qcom_cap,
+ dev_max_params,
+ dev_req_params);
+ if (ret) {
+ pr_err("%s: failed to determine capabilities\n",
+ __func__);
+ goto out;
+ }
+
+ break;
+ case POST_CHANGE:
+ if (!ufs_qcom_cfg_timers(hba, dev_req_params->gear_rx,
+ dev_req_params->pwr_rx,
+ dev_req_params->hs_rate)) {
+ dev_err(hba->dev, "%s: ufs_qcom_cfg_timers() failed\n",
+ __func__);
+ /*
+ * we return error code at the end of the routine,
+ * but continue to configure UFS_PHY_TX_LANE_ENABLE
+ * and bus voting as usual
+ */
+ ret = -EINVAL;
+ }
+
+ val = ~(MAX_U32 << dev_req_params->lane_tx);
+ res = ufs_qcom_phy_set_tx_lane_enable(phy, val);
+ if (res) {
+ dev_err(hba->dev, "%s: ufs_qcom_phy_set_tx_lane_enable() failed res = %d\n",
+ __func__, res);
+ ret = res;
+ }
+
+ /* cache the power mode parameters to use internally */
+ memcpy(&host->dev_req_params,
+ dev_req_params, sizeof(*dev_req_params));
+ ufs_qcom_update_bus_bw_vote(host);
+ break;
+ default:
+ ret = -EINVAL;
+ break;
+ }
+out:
+ return ret;
+}
+
+/**
+ * ufs_qcom_advertise_quirks - advertise the known QCOM UFS controller quirks
+ * @hba: host controller instance
+ *
+ * QCOM UFS host controller might have some non standard behaviours (quirks)
+ * than what is specified by UFSHCI specification. Advertise all such
+ * quirks to standard UFS host controller driver so standard takes them into
+ * account.
+ */
+static void ufs_qcom_advertise_quirks(struct ufs_hba *hba)
+{
+ struct ufs_qcom_host *host = hba->priv;
+
+ if (host->hw_ver.major == 0x1)
+ hba->quirks |= UFSHCD_QUIRK_DELAY_BEFORE_DME_CMDS;
+
+ if (host->hw_ver.major >= 0x2) {
+ if (!ufs_qcom_cap_qunipro(host))
+ /* Legacy UniPro mode still need following quirks */
+ hba->quirks |= UFSHCD_QUIRK_DELAY_BEFORE_DME_CMDS;
+ }
+}
+
+static void ufs_qcom_set_caps(struct ufs_hba *hba)
+{
+ struct ufs_qcom_host *host = hba->priv;
+
+ if (host->hw_ver.major >= 0x2)
+ host->caps = UFS_QCOM_CAP_QUNIPRO;
+}
+
+static int ufs_qcom_get_bus_vote(struct ufs_qcom_host *host,
+ const char *speed_mode)
+{
+ struct device *dev = host->hba->dev;
+ struct device_node *np = dev->of_node;
+ int err;
+ const char *key = "qcom,bus-vector-names";
+
+ if (!speed_mode) {
+ err = -EINVAL;
+ goto out;
+ }
+
+ if (host->bus_vote.is_max_bw_needed && !!strcmp(speed_mode, "MIN"))
+ err = of_property_match_string(np, key, "MAX");
+ else
+ err = of_property_match_string(np, key, speed_mode);
+
+out:
+ if (err < 0)
+ dev_err(dev, "%s: Invalid %s mode %d\n",
+ __func__, speed_mode, err);
+ return err;
+}
+
+static int ufs_qcom_set_bus_vote(struct ufs_qcom_host *host, int vote)
+{
+ int err = 0;
+
+ if (vote != host->bus_vote.curr_vote)
+ host->bus_vote.curr_vote = vote;
+
+ return err;
+}
+
+static void ufs_qcom_get_speed_mode(struct ufs_pa_layer_attr *p, char *result)
+{
+ int gear = max_t(u32, p->gear_rx, p->gear_tx);
+ int lanes = max_t(u32, p->lane_rx, p->lane_tx);
+ int pwr;
+
+ /* default to PWM Gear 1, Lane 1 if power mode is not initialized */
+ if (!gear)
+ gear = 1;
+
+ if (!lanes)
+ lanes = 1;
+
+ if (!p->pwr_rx && !p->pwr_tx) {
+ pwr = SLOWAUTO_MODE;
+ snprintf(result, BUS_VECTOR_NAME_LEN, "MIN");
+ } else if (p->pwr_rx == FAST_MODE || p->pwr_rx == FASTAUTO_MODE ||
+ p->pwr_tx == FAST_MODE || p->pwr_tx == FASTAUTO_MODE) {
+ pwr = FAST_MODE;
+ snprintf(result, BUS_VECTOR_NAME_LEN, "%s_R%s_G%d_L%d", "HS",
+ p->hs_rate == PA_HS_MODE_B ? "B" : "A", gear, lanes);
+ } else {
+ pwr = SLOW_MODE;
+ snprintf(result, BUS_VECTOR_NAME_LEN, "%s_G%d_L%d",
+ "PWM", gear, lanes);
+ }
+}
+
+static int ufs_qcom_setup_clocks(struct ufs_hba *hba, bool on)
+{
+ struct ufs_qcom_host *host = hba->priv;
+ int err = 0;
+ int vote = 0;
+
+ /*
+ * In case ufs_qcom_init() is not yet done, simply ignore.
+ * This ufs_qcom_setup_clocks() shall be called from
+ * ufs_qcom_init() after init is done.
+ */
+ if (!host)
+ return 0;
+
+ if (on) {
+ err = ufs_qcom_phy_enable_iface_clk(host->generic_phy);
+ if (err)
+ goto out;
+
+ err = ufs_qcom_phy_enable_ref_clk(host->generic_phy);
+ if (err) {
+ dev_err(hba->dev, "%s enable phy ref clock failed, err=%d\n",
+ __func__, err);
+ ufs_qcom_phy_disable_iface_clk(host->generic_phy);
+ goto out;
+ }
+ /* enable the device ref clock */
+ ufs_qcom_phy_enable_dev_ref_clk(host->generic_phy);
+ vote = host->bus_vote.saved_vote;
+ if (vote == host->bus_vote.min_bw_vote)
+ ufs_qcom_update_bus_bw_vote(host);
+ } else {
+ /* M-PHY RMMI interface clocks can be turned off */
+ ufs_qcom_phy_disable_iface_clk(host->generic_phy);
+ if (!ufs_qcom_is_link_active(hba)) {
+ /* turn off UFS local PHY ref_clk */
+ ufs_qcom_phy_disable_ref_clk(host->generic_phy);
+ /* disable device ref_clk */
+ ufs_qcom_phy_disable_dev_ref_clk(host->generic_phy);
+ }
+ vote = host->bus_vote.min_bw_vote;
+ }
+
+ err = ufs_qcom_set_bus_vote(host, vote);
+ if (err)
+ dev_err(hba->dev, "%s: set bus vote failed %d\n",
+ __func__, err);
+
+out:
+ return err;
+}
+
+static ssize_t
+show_ufs_to_mem_max_bus_bw(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct ufs_hba *hba = dev_get_drvdata(dev);
+ struct ufs_qcom_host *host = hba->priv;
+
+ return snprintf(buf, PAGE_SIZE, "%u\n",
+ host->bus_vote.is_max_bw_needed);
+}
+
+static ssize_t
+store_ufs_to_mem_max_bus_bw(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct ufs_hba *hba = dev_get_drvdata(dev);
+ struct ufs_qcom_host *host = hba->priv;
+ uint32_t value;
+
+ if (!kstrtou32(buf, 0, &value)) {
+ host->bus_vote.is_max_bw_needed = !!value;
+ ufs_qcom_update_bus_bw_vote(host);
+ }
+
+ return count;
+}
+
+static int ufs_qcom_bus_register(struct ufs_qcom_host *host)
+{
+ int err;
+ struct device *dev = host->hba->dev;
+ struct device_node *np = dev->of_node;
+
+ err = of_property_count_strings(np, "qcom,bus-vector-names");
+ if (err < 0 ) {
+ dev_err(dev, "%s: qcom,bus-vector-names not specified correctly %d\n",
+ __func__, err);
+ goto out;
+ }
+
+ /* cache the vote index for minimum and maximum bandwidth */
+ host->bus_vote.min_bw_vote = ufs_qcom_get_bus_vote(host, "MIN");
+ host->bus_vote.max_bw_vote = ufs_qcom_get_bus_vote(host, "MAX");
+
+ host->bus_vote.max_bus_bw.show = show_ufs_to_mem_max_bus_bw;
+ host->bus_vote.max_bus_bw.store = store_ufs_to_mem_max_bus_bw;
+ sysfs_attr_init(&host->bus_vote.max_bus_bw.attr);
+ host->bus_vote.max_bus_bw.attr.name = "max_bus_bw";
+ host->bus_vote.max_bus_bw.attr.mode = S_IRUGO | S_IWUSR;
+ err = device_create_file(dev, &host->bus_vote.max_bus_bw);
+out:
+ return err;
+}
+
+#define ANDROID_BOOT_DEV_MAX 30
+static char android_boot_dev[ANDROID_BOOT_DEV_MAX];
+static int get_android_boot_dev(char *str)
+{
+ strlcpy(android_boot_dev, str, ANDROID_BOOT_DEV_MAX);
+ return 1;
+}
+__setup("androidboot.bootdevice=", get_android_boot_dev);
+
+/**
+ * ufs_qcom_init - bind phy with controller
+ * @hba: host controller instance
+ *
+ * Binds PHY with controller and powers up PHY enabling clocks
+ * and regulators.
+ *
+ * Returns -EPROBE_DEFER if binding fails, returns negative error
+ * on phy power up failure and returns zero on success.
+ */
+static int ufs_qcom_init(struct ufs_hba *hba)
+{
+ int err;
+ struct device *dev = hba->dev;
+ struct ufs_qcom_host *host;
+
+ if (strlen(android_boot_dev) && strcmp(android_boot_dev, dev_name(dev)))
+ return -ENODEV;
+
+ host = devm_kzalloc(dev, sizeof(*host), GFP_KERNEL);
+ if (!host) {
+ err = -ENOMEM;
+ dev_err(dev, "%s: no memory for qcom ufs host\n", __func__);
+ goto out;
+ }
+
+ host->hba = hba;
+ hba->priv = (void *)host;
+
+ host->generic_phy = devm_phy_get(dev, "ufsphy");
+
+ if (IS_ERR(host->generic_phy)) {
+ err = PTR_ERR(host->generic_phy);
+ dev_err(dev, "%s: PHY get failed %d\n", __func__, err);
+ goto out;
+ }
+
+ err = ufs_qcom_bus_register(host);
+ if (err)
+ goto out_host_free;
+
+ ufs_qcom_get_controller_revision(hba, &host->hw_ver.major,
+ &host->hw_ver.minor, &host->hw_ver.step);
+
+ /* update phy revision information before calling phy_init() */
+ ufs_qcom_phy_save_controller_version(host->generic_phy,
+ host->hw_ver.major, host->hw_ver.minor, host->hw_ver.step);
+
+ phy_init(host->generic_phy);
+ err = phy_power_on(host->generic_phy);
+ if (err)
+ goto out_unregister_bus;
+
+ err = ufs_qcom_init_lane_clks(host);
+ if (err)
+ goto out_disable_phy;
+
+ ufs_qcom_set_caps(hba);
+ ufs_qcom_advertise_quirks(hba);
+
+ hba->caps |= UFSHCD_CAP_CLK_GATING | UFSHCD_CAP_CLK_SCALING;
+ hba->caps |= UFSHCD_CAP_AUTO_BKOPS_SUSPEND;
+
+ ufs_qcom_setup_clocks(hba, true);
+
+ if (hba->dev->id < MAX_UFS_QCOM_HOSTS)
+ ufs_qcom_hosts[hba->dev->id] = host;
+
+ goto out;
+
+out_disable_phy:
+ phy_power_off(host->generic_phy);
+out_unregister_bus:
+ phy_exit(host->generic_phy);
+out_host_free:
+ devm_kfree(dev, host);
+ hba->priv = NULL;
+out:
+ return err;
+}
+
+static void ufs_qcom_exit(struct ufs_hba *hba)
+{
+ struct ufs_qcom_host *host = hba->priv;
+
+ ufs_qcom_disable_lane_clks(host);
+ phy_power_off(host->generic_phy);
+}
+
+static
+void ufs_qcom_clk_scale_notify(struct ufs_hba *hba)
+{
+ struct ufs_qcom_host *host = hba->priv;
+ struct ufs_pa_layer_attr *dev_req_params = &host->dev_req_params;
+
+ if (!dev_req_params)
+ return;
+
+ ufs_qcom_cfg_timers(hba, dev_req_params->gear_rx,
+ dev_req_params->pwr_rx,
+ dev_req_params->hs_rate);
+}
+
+/**
+ * struct ufs_hba_qcom_vops - UFS QCOM specific variant operations
+ *
+ * The variant operations configure the necessary controller and PHY
+ * handshake during initialization.
+ */
+static const struct ufs_hba_variant_ops ufs_hba_qcom_vops = {
+ .name = "qcom",
+ .init = ufs_qcom_init,
+ .exit = ufs_qcom_exit,
+ .clk_scale_notify = ufs_qcom_clk_scale_notify,
+ .setup_clocks = ufs_qcom_setup_clocks,
+ .hce_enable_notify = ufs_qcom_hce_enable_notify,
+ .link_startup_notify = ufs_qcom_link_startup_notify,
+ .pwr_change_notify = ufs_qcom_pwr_change_notify,
+ .suspend = ufs_qcom_suspend,
+ .resume = ufs_qcom_resume,
+};
+EXPORT_SYMBOL(ufs_hba_qcom_vops);
diff --git a/drivers/scsi/ufs/ufs-qcom.h b/drivers/scsi/ufs/ufs-qcom.h
new file mode 100644
index 000000000..db2c0a00e
--- /dev/null
+++ b/drivers/scsi/ufs/ufs-qcom.h
@@ -0,0 +1,196 @@
+/* Copyright (c) 2013-2015, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef UFS_QCOM_H_
+#define UFS_QCOM_H_
+
+#define MAX_UFS_QCOM_HOSTS 1
+#define MAX_U32 (~(u32)0)
+#define MPHY_TX_FSM_STATE 0x41
+#define TX_FSM_HIBERN8 0x1
+#define HBRN8_POLL_TOUT_MS 100
+#define DEFAULT_CLK_RATE_HZ 1000000
+#define BUS_VECTOR_NAME_LEN 32
+
+#define UFS_HW_VER_MAJOR_SHFT (28)
+#define UFS_HW_VER_MAJOR_MASK (0x000F << UFS_HW_VER_MAJOR_SHFT)
+#define UFS_HW_VER_MINOR_SHFT (16)
+#define UFS_HW_VER_MINOR_MASK (0x0FFF << UFS_HW_VER_MINOR_SHFT)
+#define UFS_HW_VER_STEP_SHFT (0)
+#define UFS_HW_VER_STEP_MASK (0xFFFF << UFS_HW_VER_STEP_SHFT)
+
+/* vendor specific pre-defined parameters */
+#define SLOW 1
+#define FAST 2
+
+#define UFS_QCOM_LIMIT_NUM_LANES_RX 2
+#define UFS_QCOM_LIMIT_NUM_LANES_TX 2
+#define UFS_QCOM_LIMIT_HSGEAR_RX UFS_HS_G2
+#define UFS_QCOM_LIMIT_HSGEAR_TX UFS_HS_G2
+#define UFS_QCOM_LIMIT_PWMGEAR_RX UFS_PWM_G4
+#define UFS_QCOM_LIMIT_PWMGEAR_TX UFS_PWM_G4
+#define UFS_QCOM_LIMIT_RX_PWR_PWM SLOW_MODE
+#define UFS_QCOM_LIMIT_TX_PWR_PWM SLOW_MODE
+#define UFS_QCOM_LIMIT_RX_PWR_HS FAST_MODE
+#define UFS_QCOM_LIMIT_TX_PWR_HS FAST_MODE
+#define UFS_QCOM_LIMIT_HS_RATE PA_HS_MODE_B
+#define UFS_QCOM_LIMIT_DESIRED_MODE FAST
+
+/* QCOM UFS host controller vendor specific registers */
+enum {
+ REG_UFS_SYS1CLK_1US = 0xC0,
+ REG_UFS_TX_SYMBOL_CLK_NS_US = 0xC4,
+ REG_UFS_LOCAL_PORT_ID_REG = 0xC8,
+ REG_UFS_PA_ERR_CODE = 0xCC,
+ REG_UFS_RETRY_TIMER_REG = 0xD0,
+ REG_UFS_PA_LINK_STARTUP_TIMER = 0xD8,
+ REG_UFS_CFG1 = 0xDC,
+ REG_UFS_CFG2 = 0xE0,
+ REG_UFS_HW_VERSION = 0xE4,
+
+ UFS_DBG_RD_REG_UAWM = 0x100,
+ UFS_DBG_RD_REG_UARM = 0x200,
+ UFS_DBG_RD_REG_TXUC = 0x300,
+ UFS_DBG_RD_REG_RXUC = 0x400,
+ UFS_DBG_RD_REG_DFC = 0x500,
+ UFS_DBG_RD_REG_TRLUT = 0x600,
+ UFS_DBG_RD_REG_TMRLUT = 0x700,
+ UFS_UFS_DBG_RD_REG_OCSC = 0x800,
+
+ UFS_UFS_DBG_RD_DESC_RAM = 0x1500,
+ UFS_UFS_DBG_RD_PRDT_RAM = 0x1700,
+ UFS_UFS_DBG_RD_RESP_RAM = 0x1800,
+ UFS_UFS_DBG_RD_EDTL_RAM = 0x1900,
+};
+
+/* bit definitions for REG_UFS_CFG2 register */
+#define UAWM_HW_CGC_EN (1 << 0)
+#define UARM_HW_CGC_EN (1 << 1)
+#define TXUC_HW_CGC_EN (1 << 2)
+#define RXUC_HW_CGC_EN (1 << 3)
+#define DFC_HW_CGC_EN (1 << 4)
+#define TRLUT_HW_CGC_EN (1 << 5)
+#define TMRLUT_HW_CGC_EN (1 << 6)
+#define OCSC_HW_CGC_EN (1 << 7)
+
+#define REG_UFS_CFG2_CGC_EN_ALL (UAWM_HW_CGC_EN | UARM_HW_CGC_EN |\
+ TXUC_HW_CGC_EN | RXUC_HW_CGC_EN |\
+ DFC_HW_CGC_EN | TRLUT_HW_CGC_EN |\
+ TMRLUT_HW_CGC_EN | OCSC_HW_CGC_EN)
+
+/* bit offset */
+enum {
+ OFFSET_UFS_PHY_SOFT_RESET = 1,
+ OFFSET_CLK_NS_REG = 10,
+};
+
+/* bit masks */
+enum {
+ MASK_UFS_PHY_SOFT_RESET = 0x2,
+ MASK_TX_SYMBOL_CLK_1US_REG = 0x3FF,
+ MASK_CLK_NS_REG = 0xFFFC00,
+};
+
+enum ufs_qcom_phy_init_type {
+ UFS_PHY_INIT_FULL,
+ UFS_PHY_INIT_CFG_RESTORE,
+};
+
+static inline void
+ufs_qcom_get_controller_revision(struct ufs_hba *hba,
+ u8 *major, u16 *minor, u16 *step)
+{
+ u32 ver = ufshcd_readl(hba, REG_UFS_HW_VERSION);
+
+ *major = (ver & UFS_HW_VER_MAJOR_MASK) >> UFS_HW_VER_MAJOR_SHFT;
+ *minor = (ver & UFS_HW_VER_MINOR_MASK) >> UFS_HW_VER_MINOR_SHFT;
+ *step = (ver & UFS_HW_VER_STEP_MASK) >> UFS_HW_VER_STEP_SHFT;
+};
+
+static inline void ufs_qcom_assert_reset(struct ufs_hba *hba)
+{
+ ufshcd_rmwl(hba, MASK_UFS_PHY_SOFT_RESET,
+ 1 << OFFSET_UFS_PHY_SOFT_RESET, REG_UFS_CFG1);
+
+ /*
+ * Make sure assertion of ufs phy reset is written to
+ * register before returning
+ */
+ mb();
+}
+
+static inline void ufs_qcom_deassert_reset(struct ufs_hba *hba)
+{
+ ufshcd_rmwl(hba, MASK_UFS_PHY_SOFT_RESET,
+ 0 << OFFSET_UFS_PHY_SOFT_RESET, REG_UFS_CFG1);
+
+ /*
+ * Make sure de-assertion of ufs phy reset is written to
+ * register before returning
+ */
+ mb();
+}
+
+struct ufs_qcom_bus_vote {
+ uint32_t client_handle;
+ uint32_t curr_vote;
+ int min_bw_vote;
+ int max_bw_vote;
+ int saved_vote;
+ bool is_max_bw_needed;
+ struct device_attribute max_bus_bw;
+};
+
+/* Host controller hardware version: major.minor.step */
+struct ufs_hw_version {
+ u16 step;
+ u16 minor;
+ u8 major;
+};
+struct ufs_qcom_host {
+
+ /*
+ * Set this capability if host controller supports the QUniPro mode
+ * and if driver wants the Host controller to operate in QUniPro mode.
+ * Note: By default this capability will be kept enabled if host
+ * controller supports the QUniPro mode.
+ */
+ #define UFS_QCOM_CAP_QUNIPRO UFS_BIT(0)
+ u32 caps;
+
+ struct phy *generic_phy;
+ struct ufs_hba *hba;
+ struct ufs_qcom_bus_vote bus_vote;
+ struct ufs_pa_layer_attr dev_req_params;
+ struct clk *rx_l0_sync_clk;
+ struct clk *tx_l0_sync_clk;
+ struct clk *rx_l1_sync_clk;
+ struct clk *tx_l1_sync_clk;
+ bool is_lane_clks_enabled;
+
+ struct ufs_hw_version hw_ver;
+};
+
+#define ufs_qcom_is_link_off(hba) ufshcd_is_link_off(hba)
+#define ufs_qcom_is_link_active(hba) ufshcd_is_link_active(hba)
+#define ufs_qcom_is_link_hibern8(hba) ufshcd_is_link_hibern8(hba)
+
+static inline bool ufs_qcom_cap_qunipro(struct ufs_qcom_host *host)
+{
+ if (host->caps & UFS_QCOM_CAP_QUNIPRO)
+ return true;
+ else
+ return false;
+}
+
+#endif /* UFS_QCOM_H_ */
diff --git a/drivers/scsi/ufs/ufs.h b/drivers/scsi/ufs/ufs.h
new file mode 100644
index 000000000..42c459a9d
--- /dev/null
+++ b/drivers/scsi/ufs/ufs.h
@@ -0,0 +1,491 @@
+/*
+ * Universal Flash Storage Host controller driver
+ *
+ * This code is based on drivers/scsi/ufs/ufs.h
+ * Copyright (C) 2011-2013 Samsung India Software Operations
+ *
+ * Authors:
+ * Santosh Yaraganavi <santosh.sy@samsung.com>
+ * Vinayak Holikatti <h.vinayak@samsung.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ * See the COPYING file in the top-level directory or visit
+ * <http://www.gnu.org/licenses/gpl-2.0.html>
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * This program is provided "AS IS" and "WITH ALL FAULTS" and
+ * without warranty of any kind. You are solely responsible for
+ * determining the appropriateness of using and distributing
+ * the program and assume all risks associated with your exercise
+ * of rights with respect to the program, including but not limited
+ * to infringement of third party rights, the risks and costs of
+ * program errors, damage to or loss of data, programs or equipment,
+ * and unavailability or interruption of operations. Under no
+ * circumstances will the contributor of this Program be liable for
+ * any damages of any kind arising from your use or distribution of
+ * this program.
+ */
+
+#ifndef _UFS_H
+#define _UFS_H
+
+#include <linux/mutex.h>
+#include <linux/types.h>
+
+#define MAX_CDB_SIZE 16
+#define GENERAL_UPIU_REQUEST_SIZE 32
+#define QUERY_DESC_MAX_SIZE 255
+#define QUERY_DESC_MIN_SIZE 2
+#define QUERY_OSF_SIZE (GENERAL_UPIU_REQUEST_SIZE - \
+ (sizeof(struct utp_upiu_header)))
+
+#define UPIU_HEADER_DWORD(byte3, byte2, byte1, byte0)\
+ cpu_to_be32((byte3 << 24) | (byte2 << 16) |\
+ (byte1 << 8) | (byte0))
+/*
+ * UFS device may have standard LUs and LUN id could be from 0x00 to
+ * 0x7F. Standard LUs use "Peripheral Device Addressing Format".
+ * UFS device may also have the Well Known LUs (also referred as W-LU)
+ * which again could be from 0x00 to 0x7F. For W-LUs, device only use
+ * the "Extended Addressing Format" which means the W-LUNs would be
+ * from 0xc100 (SCSI_W_LUN_BASE) onwards.
+ * This means max. LUN number reported from UFS device could be 0xC17F.
+ */
+#define UFS_UPIU_MAX_UNIT_NUM_ID 0x7F
+#define UFS_MAX_LUNS (SCSI_W_LUN_BASE + UFS_UPIU_MAX_UNIT_NUM_ID)
+#define UFS_UPIU_WLUN_ID (1 << 7)
+#define UFS_UPIU_MAX_GENERAL_LUN 8
+
+/* Well known logical unit id in LUN field of UPIU */
+enum {
+ UFS_UPIU_REPORT_LUNS_WLUN = 0x81,
+ UFS_UPIU_UFS_DEVICE_WLUN = 0xD0,
+ UFS_UPIU_BOOT_WLUN = 0xB0,
+ UFS_UPIU_RPMB_WLUN = 0xC4,
+};
+
+/*
+ * UFS Protocol Information Unit related definitions
+ */
+
+/* Task management functions */
+enum {
+ UFS_ABORT_TASK = 0x01,
+ UFS_ABORT_TASK_SET = 0x02,
+ UFS_CLEAR_TASK_SET = 0x04,
+ UFS_LOGICAL_RESET = 0x08,
+ UFS_QUERY_TASK = 0x80,
+ UFS_QUERY_TASK_SET = 0x81,
+};
+
+/* UTP UPIU Transaction Codes Initiator to Target */
+enum {
+ UPIU_TRANSACTION_NOP_OUT = 0x00,
+ UPIU_TRANSACTION_COMMAND = 0x01,
+ UPIU_TRANSACTION_DATA_OUT = 0x02,
+ UPIU_TRANSACTION_TASK_REQ = 0x04,
+ UPIU_TRANSACTION_QUERY_REQ = 0x16,
+};
+
+/* UTP UPIU Transaction Codes Target to Initiator */
+enum {
+ UPIU_TRANSACTION_NOP_IN = 0x20,
+ UPIU_TRANSACTION_RESPONSE = 0x21,
+ UPIU_TRANSACTION_DATA_IN = 0x22,
+ UPIU_TRANSACTION_TASK_RSP = 0x24,
+ UPIU_TRANSACTION_READY_XFER = 0x31,
+ UPIU_TRANSACTION_QUERY_RSP = 0x36,
+ UPIU_TRANSACTION_REJECT_UPIU = 0x3F,
+};
+
+/* UPIU Read/Write flags */
+enum {
+ UPIU_CMD_FLAGS_NONE = 0x00,
+ UPIU_CMD_FLAGS_WRITE = 0x20,
+ UPIU_CMD_FLAGS_READ = 0x40,
+};
+
+/* UPIU Task Attributes */
+enum {
+ UPIU_TASK_ATTR_SIMPLE = 0x00,
+ UPIU_TASK_ATTR_ORDERED = 0x01,
+ UPIU_TASK_ATTR_HEADQ = 0x02,
+ UPIU_TASK_ATTR_ACA = 0x03,
+};
+
+/* UPIU Query request function */
+enum {
+ UPIU_QUERY_FUNC_STANDARD_READ_REQUEST = 0x01,
+ UPIU_QUERY_FUNC_STANDARD_WRITE_REQUEST = 0x81,
+};
+
+/* Flag idn for Query Requests*/
+enum flag_idn {
+ QUERY_FLAG_IDN_FDEVICEINIT = 0x01,
+ QUERY_FLAG_IDN_PWR_ON_WPE = 0x03,
+ QUERY_FLAG_IDN_BKOPS_EN = 0x04,
+};
+
+/* Attribute idn for Query requests */
+enum attr_idn {
+ QUERY_ATTR_IDN_ACTIVE_ICC_LVL = 0x03,
+ QUERY_ATTR_IDN_BKOPS_STATUS = 0x05,
+ QUERY_ATTR_IDN_EE_CONTROL = 0x0D,
+ QUERY_ATTR_IDN_EE_STATUS = 0x0E,
+};
+
+/* Descriptor idn for Query requests */
+enum desc_idn {
+ QUERY_DESC_IDN_DEVICE = 0x0,
+ QUERY_DESC_IDN_CONFIGURAION = 0x1,
+ QUERY_DESC_IDN_UNIT = 0x2,
+ QUERY_DESC_IDN_RFU_0 = 0x3,
+ QUERY_DESC_IDN_INTERCONNECT = 0x4,
+ QUERY_DESC_IDN_STRING = 0x5,
+ QUERY_DESC_IDN_RFU_1 = 0x6,
+ QUERY_DESC_IDN_GEOMETRY = 0x7,
+ QUERY_DESC_IDN_POWER = 0x8,
+ QUERY_DESC_IDN_MAX,
+};
+
+enum desc_header_offset {
+ QUERY_DESC_LENGTH_OFFSET = 0x00,
+ QUERY_DESC_DESC_TYPE_OFFSET = 0x01,
+};
+
+enum ufs_desc_max_size {
+ QUERY_DESC_DEVICE_MAX_SIZE = 0x1F,
+ QUERY_DESC_CONFIGURAION_MAX_SIZE = 0x90,
+ QUERY_DESC_UNIT_MAX_SIZE = 0x23,
+ QUERY_DESC_INTERCONNECT_MAX_SIZE = 0x06,
+ /*
+ * Max. 126 UNICODE characters (2 bytes per character) plus 2 bytes
+ * of descriptor header.
+ */
+ QUERY_DESC_STRING_MAX_SIZE = 0xFE,
+ QUERY_DESC_GEOMETRY_MAZ_SIZE = 0x44,
+ QUERY_DESC_POWER_MAX_SIZE = 0x62,
+ QUERY_DESC_RFU_MAX_SIZE = 0x00,
+};
+
+/* Unit descriptor parameters offsets in bytes*/
+enum unit_desc_param {
+ UNIT_DESC_PARAM_LEN = 0x0,
+ UNIT_DESC_PARAM_TYPE = 0x1,
+ UNIT_DESC_PARAM_UNIT_INDEX = 0x2,
+ UNIT_DESC_PARAM_LU_ENABLE = 0x3,
+ UNIT_DESC_PARAM_BOOT_LUN_ID = 0x4,
+ UNIT_DESC_PARAM_LU_WR_PROTECT = 0x5,
+ UNIT_DESC_PARAM_LU_Q_DEPTH = 0x6,
+ UNIT_DESC_PARAM_MEM_TYPE = 0x8,
+ UNIT_DESC_PARAM_DATA_RELIABILITY = 0x9,
+ UNIT_DESC_PARAM_LOGICAL_BLK_SIZE = 0xA,
+ UNIT_DESC_PARAM_LOGICAL_BLK_COUNT = 0xB,
+ UNIT_DESC_PARAM_ERASE_BLK_SIZE = 0x13,
+ UNIT_DESC_PARAM_PROVISIONING_TYPE = 0x17,
+ UNIT_DESC_PARAM_PHY_MEM_RSRC_CNT = 0x18,
+ UNIT_DESC_PARAM_CTX_CAPABILITIES = 0x20,
+ UNIT_DESC_PARAM_LARGE_UNIT_SIZE_M1 = 0x22,
+};
+
+/*
+ * Logical Unit Write Protect
+ * 00h: LU not write protected
+ * 01h: LU write protected when fPowerOnWPEn =1
+ * 02h: LU permanently write protected when fPermanentWPEn =1
+ */
+enum ufs_lu_wp_type {
+ UFS_LU_NO_WP = 0x00,
+ UFS_LU_POWER_ON_WP = 0x01,
+ UFS_LU_PERM_WP = 0x02,
+};
+
+/* bActiveICCLevel parameter current units */
+enum {
+ UFSHCD_NANO_AMP = 0,
+ UFSHCD_MICRO_AMP = 1,
+ UFSHCD_MILI_AMP = 2,
+ UFSHCD_AMP = 3,
+};
+
+#define POWER_DESC_MAX_SIZE 0x62
+#define POWER_DESC_MAX_ACTV_ICC_LVLS 16
+
+/* Attribute bActiveICCLevel parameter bit masks definitions */
+#define ATTR_ICC_LVL_UNIT_OFFSET 14
+#define ATTR_ICC_LVL_UNIT_MASK (0x3 << ATTR_ICC_LVL_UNIT_OFFSET)
+#define ATTR_ICC_LVL_VALUE_MASK 0x3FF
+
+/* Power descriptor parameters offsets in bytes */
+enum power_desc_param_offset {
+ PWR_DESC_LEN = 0x0,
+ PWR_DESC_TYPE = 0x1,
+ PWR_DESC_ACTIVE_LVLS_VCC_0 = 0x2,
+ PWR_DESC_ACTIVE_LVLS_VCCQ_0 = 0x22,
+ PWR_DESC_ACTIVE_LVLS_VCCQ2_0 = 0x42,
+};
+
+/* Exception event mask values */
+enum {
+ MASK_EE_STATUS = 0xFFFF,
+ MASK_EE_URGENT_BKOPS = (1 << 2),
+};
+
+/* Background operation status */
+enum bkops_status {
+ BKOPS_STATUS_NO_OP = 0x0,
+ BKOPS_STATUS_NON_CRITICAL = 0x1,
+ BKOPS_STATUS_PERF_IMPACT = 0x2,
+ BKOPS_STATUS_CRITICAL = 0x3,
+ BKOPS_STATUS_MAX = BKOPS_STATUS_CRITICAL,
+};
+
+/* UTP QUERY Transaction Specific Fields OpCode */
+enum query_opcode {
+ UPIU_QUERY_OPCODE_NOP = 0x0,
+ UPIU_QUERY_OPCODE_READ_DESC = 0x1,
+ UPIU_QUERY_OPCODE_WRITE_DESC = 0x2,
+ UPIU_QUERY_OPCODE_READ_ATTR = 0x3,
+ UPIU_QUERY_OPCODE_WRITE_ATTR = 0x4,
+ UPIU_QUERY_OPCODE_READ_FLAG = 0x5,
+ UPIU_QUERY_OPCODE_SET_FLAG = 0x6,
+ UPIU_QUERY_OPCODE_CLEAR_FLAG = 0x7,
+ UPIU_QUERY_OPCODE_TOGGLE_FLAG = 0x8,
+};
+
+/* Query response result code */
+enum {
+ QUERY_RESULT_SUCCESS = 0x00,
+ QUERY_RESULT_NOT_READABLE = 0xF6,
+ QUERY_RESULT_NOT_WRITEABLE = 0xF7,
+ QUERY_RESULT_ALREADY_WRITTEN = 0xF8,
+ QUERY_RESULT_INVALID_LENGTH = 0xF9,
+ QUERY_RESULT_INVALID_VALUE = 0xFA,
+ QUERY_RESULT_INVALID_SELECTOR = 0xFB,
+ QUERY_RESULT_INVALID_INDEX = 0xFC,
+ QUERY_RESULT_INVALID_IDN = 0xFD,
+ QUERY_RESULT_INVALID_OPCODE = 0xFE,
+ QUERY_RESULT_GENERAL_FAILURE = 0xFF,
+};
+
+/* UTP Transfer Request Command Type (CT) */
+enum {
+ UPIU_COMMAND_SET_TYPE_SCSI = 0x0,
+ UPIU_COMMAND_SET_TYPE_UFS = 0x1,
+ UPIU_COMMAND_SET_TYPE_QUERY = 0x2,
+};
+
+/* UTP Transfer Request Command Offset */
+#define UPIU_COMMAND_TYPE_OFFSET 28
+
+/* Offset of the response code in the UPIU header */
+#define UPIU_RSP_CODE_OFFSET 8
+
+enum {
+ MASK_SCSI_STATUS = 0xFF,
+ MASK_TASK_RESPONSE = 0xFF00,
+ MASK_RSP_UPIU_RESULT = 0xFFFF,
+ MASK_QUERY_DATA_SEG_LEN = 0xFFFF,
+ MASK_RSP_UPIU_DATA_SEG_LEN = 0xFFFF,
+ MASK_RSP_EXCEPTION_EVENT = 0x10000,
+};
+
+/* Task management service response */
+enum {
+ UPIU_TASK_MANAGEMENT_FUNC_COMPL = 0x00,
+ UPIU_TASK_MANAGEMENT_FUNC_NOT_SUPPORTED = 0x04,
+ UPIU_TASK_MANAGEMENT_FUNC_SUCCEEDED = 0x08,
+ UPIU_TASK_MANAGEMENT_FUNC_FAILED = 0x05,
+ UPIU_INCORRECT_LOGICAL_UNIT_NO = 0x09,
+};
+
+/* UFS device power modes */
+enum ufs_dev_pwr_mode {
+ UFS_ACTIVE_PWR_MODE = 1,
+ UFS_SLEEP_PWR_MODE = 2,
+ UFS_POWERDOWN_PWR_MODE = 3,
+};
+
+/**
+ * struct utp_upiu_header - UPIU header structure
+ * @dword_0: UPIU header DW-0
+ * @dword_1: UPIU header DW-1
+ * @dword_2: UPIU header DW-2
+ */
+struct utp_upiu_header {
+ __be32 dword_0;
+ __be32 dword_1;
+ __be32 dword_2;
+};
+
+/**
+ * struct utp_upiu_cmd - Command UPIU structure
+ * @data_transfer_len: Data Transfer Length DW-3
+ * @cdb: Command Descriptor Block CDB DW-4 to DW-7
+ */
+struct utp_upiu_cmd {
+ __be32 exp_data_transfer_len;
+ u8 cdb[MAX_CDB_SIZE];
+};
+
+/**
+ * struct utp_upiu_query - upiu request buffer structure for
+ * query request.
+ * @opcode: command to perform B-0
+ * @idn: a value that indicates the particular type of data B-1
+ * @index: Index to further identify data B-2
+ * @selector: Index to further identify data B-3
+ * @reserved_osf: spec reserved field B-4,5
+ * @length: number of descriptor bytes to read/write B-6,7
+ * @value: Attribute value to be written DW-5
+ * @reserved: spec reserved DW-6,7
+ */
+struct utp_upiu_query {
+ u8 opcode;
+ u8 idn;
+ u8 index;
+ u8 selector;
+ __be16 reserved_osf;
+ __be16 length;
+ __be32 value;
+ __be32 reserved[2];
+};
+
+/**
+ * struct utp_upiu_req - general upiu request structure
+ * @header:UPIU header structure DW-0 to DW-2
+ * @sc: fields structure for scsi command DW-3 to DW-7
+ * @qr: fields structure for query request DW-3 to DW-7
+ */
+struct utp_upiu_req {
+ struct utp_upiu_header header;
+ union {
+ struct utp_upiu_cmd sc;
+ struct utp_upiu_query qr;
+ };
+};
+
+/**
+ * struct utp_cmd_rsp - Response UPIU structure
+ * @residual_transfer_count: Residual transfer count DW-3
+ * @reserved: Reserved double words DW-4 to DW-7
+ * @sense_data_len: Sense data length DW-8 U16
+ * @sense_data: Sense data field DW-8 to DW-12
+ */
+struct utp_cmd_rsp {
+ __be32 residual_transfer_count;
+ __be32 reserved[4];
+ __be16 sense_data_len;
+ u8 sense_data[18];
+};
+
+/**
+ * struct utp_upiu_rsp - general upiu response structure
+ * @header: UPIU header structure DW-0 to DW-2
+ * @sr: fields structure for scsi command DW-3 to DW-12
+ * @qr: fields structure for query request DW-3 to DW-7
+ */
+struct utp_upiu_rsp {
+ struct utp_upiu_header header;
+ union {
+ struct utp_cmd_rsp sr;
+ struct utp_upiu_query qr;
+ };
+};
+
+/**
+ * struct utp_upiu_task_req - Task request UPIU structure
+ * @header - UPIU header structure DW0 to DW-2
+ * @input_param1: Input parameter 1 DW-3
+ * @input_param2: Input parameter 2 DW-4
+ * @input_param3: Input parameter 3 DW-5
+ * @reserved: Reserved double words DW-6 to DW-7
+ */
+struct utp_upiu_task_req {
+ struct utp_upiu_header header;
+ __be32 input_param1;
+ __be32 input_param2;
+ __be32 input_param3;
+ __be32 reserved[2];
+};
+
+/**
+ * struct utp_upiu_task_rsp - Task Management Response UPIU structure
+ * @header: UPIU header structure DW0-DW-2
+ * @output_param1: Ouput parameter 1 DW3
+ * @output_param2: Output parameter 2 DW4
+ * @reserved: Reserved double words DW-5 to DW-7
+ */
+struct utp_upiu_task_rsp {
+ struct utp_upiu_header header;
+ __be32 output_param1;
+ __be32 output_param2;
+ __be32 reserved[3];
+};
+
+/**
+ * struct ufs_query_req - parameters for building a query request
+ * @query_func: UPIU header query function
+ * @upiu_req: the query request data
+ */
+struct ufs_query_req {
+ u8 query_func;
+ struct utp_upiu_query upiu_req;
+};
+
+/**
+ * struct ufs_query_resp - UPIU QUERY
+ * @response: device response code
+ * @upiu_res: query response data
+ */
+struct ufs_query_res {
+ u8 response;
+ struct utp_upiu_query upiu_res;
+};
+
+#define UFS_VREG_VCC_MIN_UV 2700000 /* uV */
+#define UFS_VREG_VCC_MAX_UV 3600000 /* uV */
+#define UFS_VREG_VCC_1P8_MIN_UV 1700000 /* uV */
+#define UFS_VREG_VCC_1P8_MAX_UV 1950000 /* uV */
+#define UFS_VREG_VCCQ_MIN_UV 1100000 /* uV */
+#define UFS_VREG_VCCQ_MAX_UV 1300000 /* uV */
+#define UFS_VREG_VCCQ2_MIN_UV 1650000 /* uV */
+#define UFS_VREG_VCCQ2_MAX_UV 1950000 /* uV */
+
+/*
+ * VCCQ & VCCQ2 current requirement when UFS device is in sleep state
+ * and link is in Hibern8 state.
+ */
+#define UFS_VREG_LPM_LOAD_UA 1000 /* uA */
+
+struct ufs_vreg {
+ struct regulator *reg;
+ const char *name;
+ bool enabled;
+ int min_uV;
+ int max_uV;
+ int min_uA;
+ int max_uA;
+};
+
+struct ufs_vreg_info {
+ struct ufs_vreg *vcc;
+ struct ufs_vreg *vccq;
+ struct ufs_vreg *vccq2;
+ struct ufs_vreg *vdd_hba;
+};
+
+struct ufs_dev_info {
+ bool f_power_on_wp_en;
+ /* Keeps information if any of the LU is power on write protected */
+ bool is_lu_power_on_wp;
+};
+
+#endif /* End of Header */
diff --git a/drivers/scsi/ufs/ufshcd-pci.c b/drivers/scsi/ufs/ufshcd-pci.c
new file mode 100644
index 000000000..d15eaa466
--- /dev/null
+++ b/drivers/scsi/ufs/ufshcd-pci.c
@@ -0,0 +1,192 @@
+/*
+ * Universal Flash Storage Host controller PCI glue driver
+ *
+ * This code is based on drivers/scsi/ufs/ufshcd-pci.c
+ * Copyright (C) 2011-2013 Samsung India Software Operations
+ *
+ * Authors:
+ * Santosh Yaraganavi <santosh.sy@samsung.com>
+ * Vinayak Holikatti <h.vinayak@samsung.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ * See the COPYING file in the top-level directory or visit
+ * <http://www.gnu.org/licenses/gpl-2.0.html>
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * This program is provided "AS IS" and "WITH ALL FAULTS" and
+ * without warranty of any kind. You are solely responsible for
+ * determining the appropriateness of using and distributing
+ * the program and assume all risks associated with your exercise
+ * of rights with respect to the program, including but not limited
+ * to infringement of third party rights, the risks and costs of
+ * program errors, damage to or loss of data, programs or equipment,
+ * and unavailability or interruption of operations. Under no
+ * circumstances will the contributor of this Program be liable for
+ * any damages of any kind arising from your use or distribution of
+ * this program.
+ */
+
+#include "ufshcd.h"
+#include <linux/pci.h>
+#include <linux/pm_runtime.h>
+
+#ifdef CONFIG_PM
+/**
+ * ufshcd_pci_suspend - suspend power management function
+ * @pdev: pointer to PCI device handle
+ * @state: power state
+ *
+ * Returns 0 if successful
+ * Returns non-zero otherwise
+ */
+static int ufshcd_pci_suspend(struct device *dev)
+{
+ return ufshcd_system_suspend(dev_get_drvdata(dev));
+}
+
+/**
+ * ufshcd_pci_resume - resume power management function
+ * @pdev: pointer to PCI device handle
+ *
+ * Returns 0 if successful
+ * Returns non-zero otherwise
+ */
+static int ufshcd_pci_resume(struct device *dev)
+{
+ return ufshcd_system_resume(dev_get_drvdata(dev));
+}
+
+static int ufshcd_pci_runtime_suspend(struct device *dev)
+{
+ return ufshcd_runtime_suspend(dev_get_drvdata(dev));
+}
+static int ufshcd_pci_runtime_resume(struct device *dev)
+{
+ return ufshcd_runtime_resume(dev_get_drvdata(dev));
+}
+static int ufshcd_pci_runtime_idle(struct device *dev)
+{
+ return ufshcd_runtime_idle(dev_get_drvdata(dev));
+}
+#else /* !CONFIG_PM */
+#define ufshcd_pci_suspend NULL
+#define ufshcd_pci_resume NULL
+#define ufshcd_pci_runtime_suspend NULL
+#define ufshcd_pci_runtime_resume NULL
+#define ufshcd_pci_runtime_idle NULL
+#endif /* CONFIG_PM */
+
+/**
+ * ufshcd_pci_shutdown - main function to put the controller in reset state
+ * @pdev: pointer to PCI device handle
+ */
+static void ufshcd_pci_shutdown(struct pci_dev *pdev)
+{
+ ufshcd_shutdown((struct ufs_hba *)pci_get_drvdata(pdev));
+}
+
+/**
+ * ufshcd_pci_remove - de-allocate PCI/SCSI host and host memory space
+ * data structure memory
+ * @pdev - pointer to PCI handle
+ */
+static void ufshcd_pci_remove(struct pci_dev *pdev)
+{
+ struct ufs_hba *hba = pci_get_drvdata(pdev);
+
+ pm_runtime_forbid(&pdev->dev);
+ pm_runtime_get_noresume(&pdev->dev);
+ ufshcd_remove(hba);
+}
+
+/**
+ * ufshcd_pci_probe - probe routine of the driver
+ * @pdev: pointer to PCI device handle
+ * @id: PCI device id
+ *
+ * Returns 0 on success, non-zero value on failure
+ */
+static int
+ufshcd_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
+{
+ struct ufs_hba *hba;
+ void __iomem *mmio_base;
+ int err;
+
+ err = pcim_enable_device(pdev);
+ if (err) {
+ dev_err(&pdev->dev, "pcim_enable_device failed\n");
+ return err;
+ }
+
+ pci_set_master(pdev);
+
+ err = pcim_iomap_regions(pdev, 1 << 0, UFSHCD);
+ if (err < 0) {
+ dev_err(&pdev->dev, "request and iomap failed\n");
+ return err;
+ }
+
+ mmio_base = pcim_iomap_table(pdev)[0];
+
+ err = ufshcd_alloc_host(&pdev->dev, &hba);
+ if (err) {
+ dev_err(&pdev->dev, "Allocation failed\n");
+ return err;
+ }
+
+ INIT_LIST_HEAD(&hba->clk_list_head);
+
+ err = ufshcd_init(hba, mmio_base, pdev->irq);
+ if (err) {
+ dev_err(&pdev->dev, "Initialization failed\n");
+ return err;
+ }
+
+ pci_set_drvdata(pdev, hba);
+ pm_runtime_put_noidle(&pdev->dev);
+ pm_runtime_allow(&pdev->dev);
+
+ return 0;
+}
+
+static const struct dev_pm_ops ufshcd_pci_pm_ops = {
+ .suspend = ufshcd_pci_suspend,
+ .resume = ufshcd_pci_resume,
+ .runtime_suspend = ufshcd_pci_runtime_suspend,
+ .runtime_resume = ufshcd_pci_runtime_resume,
+ .runtime_idle = ufshcd_pci_runtime_idle,
+};
+
+static const struct pci_device_id ufshcd_pci_tbl[] = {
+ { PCI_VENDOR_ID_SAMSUNG, 0xC00C, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 },
+ { } /* terminate list */
+};
+
+MODULE_DEVICE_TABLE(pci, ufshcd_pci_tbl);
+
+static struct pci_driver ufshcd_pci_driver = {
+ .name = UFSHCD,
+ .id_table = ufshcd_pci_tbl,
+ .probe = ufshcd_pci_probe,
+ .remove = ufshcd_pci_remove,
+ .shutdown = ufshcd_pci_shutdown,
+ .driver = {
+ .pm = &ufshcd_pci_pm_ops
+ },
+};
+
+module_pci_driver(ufshcd_pci_driver);
+
+MODULE_AUTHOR("Santosh Yaragnavi <santosh.sy@samsung.com>");
+MODULE_AUTHOR("Vinayak Holikatti <h.vinayak@samsung.com>");
+MODULE_DESCRIPTION("UFS host controller PCI glue driver");
+MODULE_LICENSE("GPL");
+MODULE_VERSION(UFSHCD_DRIVER_VERSION);
diff --git a/drivers/scsi/ufs/ufshcd-pltfrm.c b/drivers/scsi/ufs/ufshcd-pltfrm.c
new file mode 100644
index 000000000..7db9564f5
--- /dev/null
+++ b/drivers/scsi/ufs/ufshcd-pltfrm.c
@@ -0,0 +1,404 @@
+/*
+ * Universal Flash Storage Host controller Platform bus based glue driver
+ *
+ * This code is based on drivers/scsi/ufs/ufshcd-pltfrm.c
+ * Copyright (C) 2011-2013 Samsung India Software Operations
+ *
+ * Authors:
+ * Santosh Yaraganavi <santosh.sy@samsung.com>
+ * Vinayak Holikatti <h.vinayak@samsung.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ * See the COPYING file in the top-level directory or visit
+ * <http://www.gnu.org/licenses/gpl-2.0.html>
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * This program is provided "AS IS" and "WITH ALL FAULTS" and
+ * without warranty of any kind. You are solely responsible for
+ * determining the appropriateness of using and distributing
+ * the program and assume all risks associated with your exercise
+ * of rights with respect to the program, including but not limited
+ * to infringement of third party rights, the risks and costs of
+ * program errors, damage to or loss of data, programs or equipment,
+ * and unavailability or interruption of operations. Under no
+ * circumstances will the contributor of this Program be liable for
+ * any damages of any kind arising from your use or distribution of
+ * this program.
+ */
+
+#include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
+#include <linux/of.h>
+
+#include "ufshcd.h"
+
+static const struct of_device_id ufs_of_match[];
+static struct ufs_hba_variant_ops *get_variant_ops(struct device *dev)
+{
+ if (dev->of_node) {
+ const struct of_device_id *match;
+
+ match = of_match_node(ufs_of_match, dev->of_node);
+ if (match)
+ return (struct ufs_hba_variant_ops *)match->data;
+ }
+
+ return NULL;
+}
+
+static int ufshcd_parse_clock_info(struct ufs_hba *hba)
+{
+ int ret = 0;
+ int cnt;
+ int i;
+ struct device *dev = hba->dev;
+ struct device_node *np = dev->of_node;
+ char *name;
+ u32 *clkfreq = NULL;
+ struct ufs_clk_info *clki;
+ int len = 0;
+ size_t sz = 0;
+
+ if (!np)
+ goto out;
+
+ INIT_LIST_HEAD(&hba->clk_list_head);
+
+ cnt = of_property_count_strings(np, "clock-names");
+ if (!cnt || (cnt == -EINVAL)) {
+ dev_info(dev, "%s: Unable to find clocks, assuming enabled\n",
+ __func__);
+ } else if (cnt < 0) {
+ dev_err(dev, "%s: count clock strings failed, err %d\n",
+ __func__, cnt);
+ ret = cnt;
+ }
+
+ if (cnt <= 0)
+ goto out;
+
+ if (!of_get_property(np, "freq-table-hz", &len)) {
+ dev_info(dev, "freq-table-hz property not specified\n");
+ goto out;
+ }
+
+ if (len <= 0)
+ goto out;
+
+ sz = len / sizeof(*clkfreq);
+ if (sz != 2 * cnt) {
+ dev_err(dev, "%s len mismatch\n", "freq-table-hz");
+ ret = -EINVAL;
+ goto out;
+ }
+
+ clkfreq = devm_kzalloc(dev, sz * sizeof(*clkfreq),
+ GFP_KERNEL);
+ if (!clkfreq) {
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ ret = of_property_read_u32_array(np, "freq-table-hz",
+ clkfreq, sz);
+ if (ret && (ret != -EINVAL)) {
+ dev_err(dev, "%s: error reading array %d\n",
+ "freq-table-hz", ret);
+ return ret;
+ }
+
+ for (i = 0; i < sz; i += 2) {
+ ret = of_property_read_string_index(np,
+ "clock-names", i/2, (const char **)&name);
+ if (ret)
+ goto out;
+
+ clki = devm_kzalloc(dev, sizeof(*clki), GFP_KERNEL);
+ if (!clki) {
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ clki->min_freq = clkfreq[i];
+ clki->max_freq = clkfreq[i+1];
+ clki->name = kstrdup(name, GFP_KERNEL);
+ dev_dbg(dev, "%s: min %u max %u name %s\n", "freq-table-hz",
+ clki->min_freq, clki->max_freq, clki->name);
+ list_add_tail(&clki->list, &hba->clk_list_head);
+ }
+out:
+ return ret;
+}
+
+#define MAX_PROP_SIZE 32
+static int ufshcd_populate_vreg(struct device *dev, const char *name,
+ struct ufs_vreg **out_vreg)
+{
+ int ret = 0;
+ char prop_name[MAX_PROP_SIZE];
+ struct ufs_vreg *vreg = NULL;
+ struct device_node *np = dev->of_node;
+
+ if (!np) {
+ dev_err(dev, "%s: non DT initialization\n", __func__);
+ goto out;
+ }
+
+ snprintf(prop_name, MAX_PROP_SIZE, "%s-supply", name);
+ if (!of_parse_phandle(np, prop_name, 0)) {
+ dev_info(dev, "%s: Unable to find %s regulator, assuming enabled\n",
+ __func__, prop_name);
+ goto out;
+ }
+
+ vreg = devm_kzalloc(dev, sizeof(*vreg), GFP_KERNEL);
+ if (!vreg)
+ return -ENOMEM;
+
+ vreg->name = kstrdup(name, GFP_KERNEL);
+
+ /* if fixed regulator no need further initialization */
+ snprintf(prop_name, MAX_PROP_SIZE, "%s-fixed-regulator", name);
+ if (of_property_read_bool(np, prop_name))
+ goto out;
+
+ snprintf(prop_name, MAX_PROP_SIZE, "%s-max-microamp", name);
+ ret = of_property_read_u32(np, prop_name, &vreg->max_uA);
+ if (ret) {
+ dev_err(dev, "%s: unable to find %s err %d\n",
+ __func__, prop_name, ret);
+ goto out_free;
+ }
+
+ vreg->min_uA = 0;
+ if (!strcmp(name, "vcc")) {
+ if (of_property_read_bool(np, "vcc-supply-1p8")) {
+ vreg->min_uV = UFS_VREG_VCC_1P8_MIN_UV;
+ vreg->max_uV = UFS_VREG_VCC_1P8_MAX_UV;
+ } else {
+ vreg->min_uV = UFS_VREG_VCC_MIN_UV;
+ vreg->max_uV = UFS_VREG_VCC_MAX_UV;
+ }
+ } else if (!strcmp(name, "vccq")) {
+ vreg->min_uV = UFS_VREG_VCCQ_MIN_UV;
+ vreg->max_uV = UFS_VREG_VCCQ_MAX_UV;
+ } else if (!strcmp(name, "vccq2")) {
+ vreg->min_uV = UFS_VREG_VCCQ2_MIN_UV;
+ vreg->max_uV = UFS_VREG_VCCQ2_MAX_UV;
+ }
+
+ goto out;
+
+out_free:
+ devm_kfree(dev, vreg);
+ vreg = NULL;
+out:
+ if (!ret)
+ *out_vreg = vreg;
+ return ret;
+}
+
+/**
+ * ufshcd_parse_regulator_info - get regulator info from device tree
+ * @hba: per adapter instance
+ *
+ * Get regulator info from device tree for vcc, vccq, vccq2 power supplies.
+ * If any of the supplies are not defined it is assumed that they are always-on
+ * and hence return zero. If the property is defined but parsing is failed
+ * then return corresponding error.
+ */
+static int ufshcd_parse_regulator_info(struct ufs_hba *hba)
+{
+ int err;
+ struct device *dev = hba->dev;
+ struct ufs_vreg_info *info = &hba->vreg_info;
+
+ err = ufshcd_populate_vreg(dev, "vdd-hba", &info->vdd_hba);
+ if (err)
+ goto out;
+
+ err = ufshcd_populate_vreg(dev, "vcc", &info->vcc);
+ if (err)
+ goto out;
+
+ err = ufshcd_populate_vreg(dev, "vccq", &info->vccq);
+ if (err)
+ goto out;
+
+ err = ufshcd_populate_vreg(dev, "vccq2", &info->vccq2);
+out:
+ return err;
+}
+
+#ifdef CONFIG_PM
+/**
+ * ufshcd_pltfrm_suspend - suspend power management function
+ * @dev: pointer to device handle
+ *
+ * Returns 0 if successful
+ * Returns non-zero otherwise
+ */
+static int ufshcd_pltfrm_suspend(struct device *dev)
+{
+ return ufshcd_system_suspend(dev_get_drvdata(dev));
+}
+
+/**
+ * ufshcd_pltfrm_resume - resume power management function
+ * @dev: pointer to device handle
+ *
+ * Returns 0 if successful
+ * Returns non-zero otherwise
+ */
+static int ufshcd_pltfrm_resume(struct device *dev)
+{
+ return ufshcd_system_resume(dev_get_drvdata(dev));
+}
+
+static int ufshcd_pltfrm_runtime_suspend(struct device *dev)
+{
+ return ufshcd_runtime_suspend(dev_get_drvdata(dev));
+}
+static int ufshcd_pltfrm_runtime_resume(struct device *dev)
+{
+ return ufshcd_runtime_resume(dev_get_drvdata(dev));
+}
+static int ufshcd_pltfrm_runtime_idle(struct device *dev)
+{
+ return ufshcd_runtime_idle(dev_get_drvdata(dev));
+}
+#else /* !CONFIG_PM */
+#define ufshcd_pltfrm_suspend NULL
+#define ufshcd_pltfrm_resume NULL
+#define ufshcd_pltfrm_runtime_suspend NULL
+#define ufshcd_pltfrm_runtime_resume NULL
+#define ufshcd_pltfrm_runtime_idle NULL
+#endif /* CONFIG_PM */
+
+static void ufshcd_pltfrm_shutdown(struct platform_device *pdev)
+{
+ ufshcd_shutdown((struct ufs_hba *)platform_get_drvdata(pdev));
+}
+
+/**
+ * ufshcd_pltfrm_probe - probe routine of the driver
+ * @pdev: pointer to Platform device handle
+ *
+ * Returns 0 on success, non-zero value on failure
+ */
+static int ufshcd_pltfrm_probe(struct platform_device *pdev)
+{
+ struct ufs_hba *hba;
+ void __iomem *mmio_base;
+ struct resource *mem_res;
+ int irq, err;
+ struct device *dev = &pdev->dev;
+
+ mem_res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ mmio_base = devm_ioremap_resource(dev, mem_res);
+ if (IS_ERR(*(void **)&mmio_base)) {
+ err = PTR_ERR(*(void **)&mmio_base);
+ goto out;
+ }
+
+ irq = platform_get_irq(pdev, 0);
+ if (irq < 0) {
+ dev_err(dev, "IRQ resource not available\n");
+ err = -ENODEV;
+ goto out;
+ }
+
+ err = ufshcd_alloc_host(dev, &hba);
+ if (err) {
+ dev_err(&pdev->dev, "Allocation failed\n");
+ goto out;
+ }
+
+ hba->vops = get_variant_ops(&pdev->dev);
+
+ err = ufshcd_parse_clock_info(hba);
+ if (err) {
+ dev_err(&pdev->dev, "%s: clock parse failed %d\n",
+ __func__, err);
+ goto out;
+ }
+ err = ufshcd_parse_regulator_info(hba);
+ if (err) {
+ dev_err(&pdev->dev, "%s: regulator init failed %d\n",
+ __func__, err);
+ goto out;
+ }
+
+ pm_runtime_set_active(&pdev->dev);
+ pm_runtime_enable(&pdev->dev);
+
+ err = ufshcd_init(hba, mmio_base, irq);
+ if (err) {
+ dev_err(dev, "Intialization failed\n");
+ goto out_disable_rpm;
+ }
+
+ platform_set_drvdata(pdev, hba);
+
+ return 0;
+
+out_disable_rpm:
+ pm_runtime_disable(&pdev->dev);
+ pm_runtime_set_suspended(&pdev->dev);
+out:
+ return err;
+}
+
+/**
+ * ufshcd_pltfrm_remove - remove platform driver routine
+ * @pdev: pointer to platform device handle
+ *
+ * Returns 0 on success, non-zero value on failure
+ */
+static int ufshcd_pltfrm_remove(struct platform_device *pdev)
+{
+ struct ufs_hba *hba = platform_get_drvdata(pdev);
+
+ pm_runtime_get_sync(&(pdev)->dev);
+ ufshcd_remove(hba);
+ return 0;
+}
+
+static const struct of_device_id ufs_of_match[] = {
+ { .compatible = "jedec,ufs-1.1"},
+ {},
+};
+
+static const struct dev_pm_ops ufshcd_dev_pm_ops = {
+ .suspend = ufshcd_pltfrm_suspend,
+ .resume = ufshcd_pltfrm_resume,
+ .runtime_suspend = ufshcd_pltfrm_runtime_suspend,
+ .runtime_resume = ufshcd_pltfrm_runtime_resume,
+ .runtime_idle = ufshcd_pltfrm_runtime_idle,
+};
+
+static struct platform_driver ufshcd_pltfrm_driver = {
+ .probe = ufshcd_pltfrm_probe,
+ .remove = ufshcd_pltfrm_remove,
+ .shutdown = ufshcd_pltfrm_shutdown,
+ .driver = {
+ .name = "ufshcd",
+ .pm = &ufshcd_dev_pm_ops,
+ .of_match_table = ufs_of_match,
+ },
+};
+
+module_platform_driver(ufshcd_pltfrm_driver);
+
+MODULE_AUTHOR("Santosh Yaragnavi <santosh.sy@samsung.com>");
+MODULE_AUTHOR("Vinayak Holikatti <h.vinayak@samsung.com>");
+MODULE_DESCRIPTION("UFS host controller Pltform bus based glue driver");
+MODULE_LICENSE("GPL");
+MODULE_VERSION(UFSHCD_DRIVER_VERSION);
diff --git a/drivers/scsi/ufs/ufshcd.c b/drivers/scsi/ufs/ufshcd.c
new file mode 100644
index 000000000..648a44675
--- /dev/null
+++ b/drivers/scsi/ufs/ufshcd.c
@@ -0,0 +1,5583 @@
+/*
+ * Universal Flash Storage Host controller driver Core
+ *
+ * This code is based on drivers/scsi/ufs/ufshcd.c
+ * Copyright (C) 2011-2013 Samsung India Software Operations
+ * Copyright (c) 2013-2014, The Linux Foundation. All rights reserved.
+ *
+ * Authors:
+ * Santosh Yaraganavi <santosh.sy@samsung.com>
+ * Vinayak Holikatti <h.vinayak@samsung.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ * See the COPYING file in the top-level directory or visit
+ * <http://www.gnu.org/licenses/gpl-2.0.html>
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * This program is provided "AS IS" and "WITH ALL FAULTS" and
+ * without warranty of any kind. You are solely responsible for
+ * determining the appropriateness of using and distributing
+ * the program and assume all risks associated with your exercise
+ * of rights with respect to the program, including but not limited
+ * to infringement of third party rights, the risks and costs of
+ * program errors, damage to or loss of data, programs or equipment,
+ * and unavailability or interruption of operations. Under no
+ * circumstances will the contributor of this Program be liable for
+ * any damages of any kind arising from your use or distribution of
+ * this program.
+ *
+ * The Linux Foundation chooses to take subject only to the GPLv2
+ * license terms, and distributes only under these terms.
+ */
+
+#include <linux/async.h>
+#include <linux/devfreq.h>
+
+#include "ufshcd.h"
+#include "unipro.h"
+
+#define UFSHCD_ENABLE_INTRS (UTP_TRANSFER_REQ_COMPL |\
+ UTP_TASK_REQ_COMPL |\
+ UFSHCD_ERROR_MASK)
+/* UIC command timeout, unit: ms */
+#define UIC_CMD_TIMEOUT 500
+
+/* NOP OUT retries waiting for NOP IN response */
+#define NOP_OUT_RETRIES 10
+/* Timeout after 30 msecs if NOP OUT hangs without response */
+#define NOP_OUT_TIMEOUT 30 /* msecs */
+
+/* Query request retries */
+#define QUERY_REQ_RETRIES 10
+/* Query request timeout */
+#define QUERY_REQ_TIMEOUT 30 /* msec */
+
+/* Task management command timeout */
+#define TM_CMD_TIMEOUT 100 /* msecs */
+
+/* maximum number of link-startup retries */
+#define DME_LINKSTARTUP_RETRIES 3
+
+/* maximum number of reset retries before giving up */
+#define MAX_HOST_RESET_RETRIES 5
+
+/* Expose the flag value from utp_upiu_query.value */
+#define MASK_QUERY_UPIU_FLAG_LOC 0xFF
+
+/* Interrupt aggregation default timeout, unit: 40us */
+#define INT_AGGR_DEF_TO 0x02
+
+#define ufshcd_toggle_vreg(_dev, _vreg, _on) \
+ ({ \
+ int _ret; \
+ if (_on) \
+ _ret = ufshcd_enable_vreg(_dev, _vreg); \
+ else \
+ _ret = ufshcd_disable_vreg(_dev, _vreg); \
+ _ret; \
+ })
+
+static u32 ufs_query_desc_max_size[] = {
+ QUERY_DESC_DEVICE_MAX_SIZE,
+ QUERY_DESC_CONFIGURAION_MAX_SIZE,
+ QUERY_DESC_UNIT_MAX_SIZE,
+ QUERY_DESC_RFU_MAX_SIZE,
+ QUERY_DESC_INTERCONNECT_MAX_SIZE,
+ QUERY_DESC_STRING_MAX_SIZE,
+ QUERY_DESC_RFU_MAX_SIZE,
+ QUERY_DESC_GEOMETRY_MAZ_SIZE,
+ QUERY_DESC_POWER_MAX_SIZE,
+ QUERY_DESC_RFU_MAX_SIZE,
+};
+
+enum {
+ UFSHCD_MAX_CHANNEL = 0,
+ UFSHCD_MAX_ID = 1,
+ UFSHCD_CMD_PER_LUN = 32,
+ UFSHCD_CAN_QUEUE = 32,
+};
+
+/* UFSHCD states */
+enum {
+ UFSHCD_STATE_RESET,
+ UFSHCD_STATE_ERROR,
+ UFSHCD_STATE_OPERATIONAL,
+};
+
+/* UFSHCD error handling flags */
+enum {
+ UFSHCD_EH_IN_PROGRESS = (1 << 0),
+};
+
+/* UFSHCD UIC layer error flags */
+enum {
+ UFSHCD_UIC_DL_PA_INIT_ERROR = (1 << 0), /* Data link layer error */
+ UFSHCD_UIC_NL_ERROR = (1 << 1), /* Network layer error */
+ UFSHCD_UIC_TL_ERROR = (1 << 2), /* Transport Layer error */
+ UFSHCD_UIC_DME_ERROR = (1 << 3), /* DME error */
+};
+
+/* Interrupt configuration options */
+enum {
+ UFSHCD_INT_DISABLE,
+ UFSHCD_INT_ENABLE,
+ UFSHCD_INT_CLEAR,
+};
+
+#define ufshcd_set_eh_in_progress(h) \
+ (h->eh_flags |= UFSHCD_EH_IN_PROGRESS)
+#define ufshcd_eh_in_progress(h) \
+ (h->eh_flags & UFSHCD_EH_IN_PROGRESS)
+#define ufshcd_clear_eh_in_progress(h) \
+ (h->eh_flags &= ~UFSHCD_EH_IN_PROGRESS)
+
+#define ufshcd_set_ufs_dev_active(h) \
+ ((h)->curr_dev_pwr_mode = UFS_ACTIVE_PWR_MODE)
+#define ufshcd_set_ufs_dev_sleep(h) \
+ ((h)->curr_dev_pwr_mode = UFS_SLEEP_PWR_MODE)
+#define ufshcd_set_ufs_dev_poweroff(h) \
+ ((h)->curr_dev_pwr_mode = UFS_POWERDOWN_PWR_MODE)
+#define ufshcd_is_ufs_dev_active(h) \
+ ((h)->curr_dev_pwr_mode == UFS_ACTIVE_PWR_MODE)
+#define ufshcd_is_ufs_dev_sleep(h) \
+ ((h)->curr_dev_pwr_mode == UFS_SLEEP_PWR_MODE)
+#define ufshcd_is_ufs_dev_poweroff(h) \
+ ((h)->curr_dev_pwr_mode == UFS_POWERDOWN_PWR_MODE)
+
+static struct ufs_pm_lvl_states ufs_pm_lvl_states[] = {
+ {UFS_ACTIVE_PWR_MODE, UIC_LINK_ACTIVE_STATE},
+ {UFS_ACTIVE_PWR_MODE, UIC_LINK_HIBERN8_STATE},
+ {UFS_SLEEP_PWR_MODE, UIC_LINK_ACTIVE_STATE},
+ {UFS_SLEEP_PWR_MODE, UIC_LINK_HIBERN8_STATE},
+ {UFS_POWERDOWN_PWR_MODE, UIC_LINK_HIBERN8_STATE},
+ {UFS_POWERDOWN_PWR_MODE, UIC_LINK_OFF_STATE},
+};
+
+static inline enum ufs_dev_pwr_mode
+ufs_get_pm_lvl_to_dev_pwr_mode(enum ufs_pm_level lvl)
+{
+ return ufs_pm_lvl_states[lvl].dev_state;
+}
+
+static inline enum uic_link_state
+ufs_get_pm_lvl_to_link_pwr_state(enum ufs_pm_level lvl)
+{
+ return ufs_pm_lvl_states[lvl].link_state;
+}
+
+static void ufshcd_tmc_handler(struct ufs_hba *hba);
+static void ufshcd_async_scan(void *data, async_cookie_t cookie);
+static int ufshcd_reset_and_restore(struct ufs_hba *hba);
+static int ufshcd_clear_tm_cmd(struct ufs_hba *hba, int tag);
+static void ufshcd_hba_exit(struct ufs_hba *hba);
+static int ufshcd_probe_hba(struct ufs_hba *hba);
+static int __ufshcd_setup_clocks(struct ufs_hba *hba, bool on,
+ bool skip_ref_clk);
+static int ufshcd_setup_clocks(struct ufs_hba *hba, bool on);
+static int ufshcd_uic_hibern8_exit(struct ufs_hba *hba);
+static int ufshcd_uic_hibern8_enter(struct ufs_hba *hba);
+static inline void ufshcd_add_delay_before_dme_cmd(struct ufs_hba *hba);
+static int ufshcd_host_reset_and_restore(struct ufs_hba *hba);
+static irqreturn_t ufshcd_intr(int irq, void *__hba);
+static int ufshcd_config_pwr_mode(struct ufs_hba *hba,
+ struct ufs_pa_layer_attr *desired_pwr_mode);
+
+static inline int ufshcd_enable_irq(struct ufs_hba *hba)
+{
+ int ret = 0;
+
+ if (!hba->is_irq_enabled) {
+ ret = request_irq(hba->irq, ufshcd_intr, IRQF_SHARED, UFSHCD,
+ hba);
+ if (ret)
+ dev_err(hba->dev, "%s: request_irq failed, ret=%d\n",
+ __func__, ret);
+ hba->is_irq_enabled = true;
+ }
+
+ return ret;
+}
+
+static inline void ufshcd_disable_irq(struct ufs_hba *hba)
+{
+ if (hba->is_irq_enabled) {
+ free_irq(hba->irq, hba);
+ hba->is_irq_enabled = false;
+ }
+}
+
+/*
+ * ufshcd_wait_for_register - wait for register value to change
+ * @hba - per-adapter interface
+ * @reg - mmio register offset
+ * @mask - mask to apply to read register value
+ * @val - wait condition
+ * @interval_us - polling interval in microsecs
+ * @timeout_ms - timeout in millisecs
+ *
+ * Returns -ETIMEDOUT on error, zero on success
+ */
+static int ufshcd_wait_for_register(struct ufs_hba *hba, u32 reg, u32 mask,
+ u32 val, unsigned long interval_us, unsigned long timeout_ms)
+{
+ int err = 0;
+ unsigned long timeout = jiffies + msecs_to_jiffies(timeout_ms);
+
+ /* ignore bits that we don't intend to wait on */
+ val = val & mask;
+
+ while ((ufshcd_readl(hba, reg) & mask) != val) {
+ /* wakeup within 50us of expiry */
+ usleep_range(interval_us, interval_us + 50);
+
+ if (time_after(jiffies, timeout)) {
+ if ((ufshcd_readl(hba, reg) & mask) != val)
+ err = -ETIMEDOUT;
+ break;
+ }
+ }
+
+ return err;
+}
+
+/**
+ * ufshcd_get_intr_mask - Get the interrupt bit mask
+ * @hba - Pointer to adapter instance
+ *
+ * Returns interrupt bit mask per version
+ */
+static inline u32 ufshcd_get_intr_mask(struct ufs_hba *hba)
+{
+ if (hba->ufs_version == UFSHCI_VERSION_10)
+ return INTERRUPT_MASK_ALL_VER_10;
+ else
+ return INTERRUPT_MASK_ALL_VER_11;
+}
+
+/**
+ * ufshcd_get_ufs_version - Get the UFS version supported by the HBA
+ * @hba - Pointer to adapter instance
+ *
+ * Returns UFSHCI version supported by the controller
+ */
+static inline u32 ufshcd_get_ufs_version(struct ufs_hba *hba)
+{
+ return ufshcd_readl(hba, REG_UFS_VERSION);
+}
+
+/**
+ * ufshcd_is_device_present - Check if any device connected to
+ * the host controller
+ * @hba: pointer to adapter instance
+ *
+ * Returns 1 if device present, 0 if no device detected
+ */
+static inline int ufshcd_is_device_present(struct ufs_hba *hba)
+{
+ return (ufshcd_readl(hba, REG_CONTROLLER_STATUS) &
+ DEVICE_PRESENT) ? 1 : 0;
+}
+
+/**
+ * ufshcd_get_tr_ocs - Get the UTRD Overall Command Status
+ * @lrb: pointer to local command reference block
+ *
+ * This function is used to get the OCS field from UTRD
+ * Returns the OCS field in the UTRD
+ */
+static inline int ufshcd_get_tr_ocs(struct ufshcd_lrb *lrbp)
+{
+ return le32_to_cpu(lrbp->utr_descriptor_ptr->header.dword_2) & MASK_OCS;
+}
+
+/**
+ * ufshcd_get_tmr_ocs - Get the UTMRD Overall Command Status
+ * @task_req_descp: pointer to utp_task_req_desc structure
+ *
+ * This function is used to get the OCS field from UTMRD
+ * Returns the OCS field in the UTMRD
+ */
+static inline int
+ufshcd_get_tmr_ocs(struct utp_task_req_desc *task_req_descp)
+{
+ return le32_to_cpu(task_req_descp->header.dword_2) & MASK_OCS;
+}
+
+/**
+ * ufshcd_get_tm_free_slot - get a free slot for task management request
+ * @hba: per adapter instance
+ * @free_slot: pointer to variable with available slot value
+ *
+ * Get a free tag and lock it until ufshcd_put_tm_slot() is called.
+ * Returns 0 if free slot is not available, else return 1 with tag value
+ * in @free_slot.
+ */
+static bool ufshcd_get_tm_free_slot(struct ufs_hba *hba, int *free_slot)
+{
+ int tag;
+ bool ret = false;
+
+ if (!free_slot)
+ goto out;
+
+ do {
+ tag = find_first_zero_bit(&hba->tm_slots_in_use, hba->nutmrs);
+ if (tag >= hba->nutmrs)
+ goto out;
+ } while (test_and_set_bit_lock(tag, &hba->tm_slots_in_use));
+
+ *free_slot = tag;
+ ret = true;
+out:
+ return ret;
+}
+
+static inline void ufshcd_put_tm_slot(struct ufs_hba *hba, int slot)
+{
+ clear_bit_unlock(slot, &hba->tm_slots_in_use);
+}
+
+/**
+ * ufshcd_utrl_clear - Clear a bit in UTRLCLR register
+ * @hba: per adapter instance
+ * @pos: position of the bit to be cleared
+ */
+static inline void ufshcd_utrl_clear(struct ufs_hba *hba, u32 pos)
+{
+ ufshcd_writel(hba, ~(1 << pos), REG_UTP_TRANSFER_REQ_LIST_CLEAR);
+}
+
+/**
+ * ufshcd_get_lists_status - Check UCRDY, UTRLRDY and UTMRLRDY
+ * @reg: Register value of host controller status
+ *
+ * Returns integer, 0 on Success and positive value if failed
+ */
+static inline int ufshcd_get_lists_status(u32 reg)
+{
+ /*
+ * The mask 0xFF is for the following HCS register bits
+ * Bit Description
+ * 0 Device Present
+ * 1 UTRLRDY
+ * 2 UTMRLRDY
+ * 3 UCRDY
+ * 4 HEI
+ * 5 DEI
+ * 6-7 reserved
+ */
+ return (((reg) & (0xFF)) >> 1) ^ (0x07);
+}
+
+/**
+ * ufshcd_get_uic_cmd_result - Get the UIC command result
+ * @hba: Pointer to adapter instance
+ *
+ * This function gets the result of UIC command completion
+ * Returns 0 on success, non zero value on error
+ */
+static inline int ufshcd_get_uic_cmd_result(struct ufs_hba *hba)
+{
+ return ufshcd_readl(hba, REG_UIC_COMMAND_ARG_2) &
+ MASK_UIC_COMMAND_RESULT;
+}
+
+/**
+ * ufshcd_get_dme_attr_val - Get the value of attribute returned by UIC command
+ * @hba: Pointer to adapter instance
+ *
+ * This function gets UIC command argument3
+ * Returns 0 on success, non zero value on error
+ */
+static inline u32 ufshcd_get_dme_attr_val(struct ufs_hba *hba)
+{
+ return ufshcd_readl(hba, REG_UIC_COMMAND_ARG_3);
+}
+
+/**
+ * ufshcd_get_req_rsp - returns the TR response transaction type
+ * @ucd_rsp_ptr: pointer to response UPIU
+ */
+static inline int
+ufshcd_get_req_rsp(struct utp_upiu_rsp *ucd_rsp_ptr)
+{
+ return be32_to_cpu(ucd_rsp_ptr->header.dword_0) >> 24;
+}
+
+/**
+ * ufshcd_get_rsp_upiu_result - Get the result from response UPIU
+ * @ucd_rsp_ptr: pointer to response UPIU
+ *
+ * This function gets the response status and scsi_status from response UPIU
+ * Returns the response result code.
+ */
+static inline int
+ufshcd_get_rsp_upiu_result(struct utp_upiu_rsp *ucd_rsp_ptr)
+{
+ return be32_to_cpu(ucd_rsp_ptr->header.dword_1) & MASK_RSP_UPIU_RESULT;
+}
+
+/*
+ * ufshcd_get_rsp_upiu_data_seg_len - Get the data segment length
+ * from response UPIU
+ * @ucd_rsp_ptr: pointer to response UPIU
+ *
+ * Return the data segment length.
+ */
+static inline unsigned int
+ufshcd_get_rsp_upiu_data_seg_len(struct utp_upiu_rsp *ucd_rsp_ptr)
+{
+ return be32_to_cpu(ucd_rsp_ptr->header.dword_2) &
+ MASK_RSP_UPIU_DATA_SEG_LEN;
+}
+
+/**
+ * ufshcd_is_exception_event - Check if the device raised an exception event
+ * @ucd_rsp_ptr: pointer to response UPIU
+ *
+ * The function checks if the device raised an exception event indicated in
+ * the Device Information field of response UPIU.
+ *
+ * Returns true if exception is raised, false otherwise.
+ */
+static inline bool ufshcd_is_exception_event(struct utp_upiu_rsp *ucd_rsp_ptr)
+{
+ return be32_to_cpu(ucd_rsp_ptr->header.dword_2) &
+ MASK_RSP_EXCEPTION_EVENT ? true : false;
+}
+
+/**
+ * ufshcd_reset_intr_aggr - Reset interrupt aggregation values.
+ * @hba: per adapter instance
+ */
+static inline void
+ufshcd_reset_intr_aggr(struct ufs_hba *hba)
+{
+ ufshcd_writel(hba, INT_AGGR_ENABLE |
+ INT_AGGR_COUNTER_AND_TIMER_RESET,
+ REG_UTP_TRANSFER_REQ_INT_AGG_CONTROL);
+}
+
+/**
+ * ufshcd_config_intr_aggr - Configure interrupt aggregation values.
+ * @hba: per adapter instance
+ * @cnt: Interrupt aggregation counter threshold
+ * @tmout: Interrupt aggregation timeout value
+ */
+static inline void
+ufshcd_config_intr_aggr(struct ufs_hba *hba, u8 cnt, u8 tmout)
+{
+ ufshcd_writel(hba, INT_AGGR_ENABLE | INT_AGGR_PARAM_WRITE |
+ INT_AGGR_COUNTER_THLD_VAL(cnt) |
+ INT_AGGR_TIMEOUT_VAL(tmout),
+ REG_UTP_TRANSFER_REQ_INT_AGG_CONTROL);
+}
+
+/**
+ * ufshcd_enable_run_stop_reg - Enable run-stop registers,
+ * When run-stop registers are set to 1, it indicates the
+ * host controller that it can process the requests
+ * @hba: per adapter instance
+ */
+static void ufshcd_enable_run_stop_reg(struct ufs_hba *hba)
+{
+ ufshcd_writel(hba, UTP_TASK_REQ_LIST_RUN_STOP_BIT,
+ REG_UTP_TASK_REQ_LIST_RUN_STOP);
+ ufshcd_writel(hba, UTP_TRANSFER_REQ_LIST_RUN_STOP_BIT,
+ REG_UTP_TRANSFER_REQ_LIST_RUN_STOP);
+}
+
+/**
+ * ufshcd_hba_start - Start controller initialization sequence
+ * @hba: per adapter instance
+ */
+static inline void ufshcd_hba_start(struct ufs_hba *hba)
+{
+ ufshcd_writel(hba, CONTROLLER_ENABLE, REG_CONTROLLER_ENABLE);
+}
+
+/**
+ * ufshcd_is_hba_active - Get controller state
+ * @hba: per adapter instance
+ *
+ * Returns zero if controller is active, 1 otherwise
+ */
+static inline int ufshcd_is_hba_active(struct ufs_hba *hba)
+{
+ return (ufshcd_readl(hba, REG_CONTROLLER_ENABLE) & 0x1) ? 0 : 1;
+}
+
+static void ufshcd_ungate_work(struct work_struct *work)
+{
+ int ret;
+ unsigned long flags;
+ struct ufs_hba *hba = container_of(work, struct ufs_hba,
+ clk_gating.ungate_work);
+
+ cancel_delayed_work_sync(&hba->clk_gating.gate_work);
+
+ spin_lock_irqsave(hba->host->host_lock, flags);
+ if (hba->clk_gating.state == CLKS_ON) {
+ spin_unlock_irqrestore(hba->host->host_lock, flags);
+ goto unblock_reqs;
+ }
+
+ spin_unlock_irqrestore(hba->host->host_lock, flags);
+ ufshcd_setup_clocks(hba, true);
+
+ /* Exit from hibern8 */
+ if (ufshcd_can_hibern8_during_gating(hba)) {
+ /* Prevent gating in this path */
+ hba->clk_gating.is_suspended = true;
+ if (ufshcd_is_link_hibern8(hba)) {
+ ret = ufshcd_uic_hibern8_exit(hba);
+ if (ret)
+ dev_err(hba->dev, "%s: hibern8 exit failed %d\n",
+ __func__, ret);
+ else
+ ufshcd_set_link_active(hba);
+ }
+ hba->clk_gating.is_suspended = false;
+ }
+unblock_reqs:
+ if (ufshcd_is_clkscaling_enabled(hba))
+ devfreq_resume_device(hba->devfreq);
+ scsi_unblock_requests(hba->host);
+}
+
+/**
+ * ufshcd_hold - Enable clocks that were gated earlier due to ufshcd_release.
+ * Also, exit from hibern8 mode and set the link as active.
+ * @hba: per adapter instance
+ * @async: This indicates whether caller should ungate clocks asynchronously.
+ */
+int ufshcd_hold(struct ufs_hba *hba, bool async)
+{
+ int rc = 0;
+ unsigned long flags;
+
+ if (!ufshcd_is_clkgating_allowed(hba))
+ goto out;
+ spin_lock_irqsave(hba->host->host_lock, flags);
+ hba->clk_gating.active_reqs++;
+
+start:
+ switch (hba->clk_gating.state) {
+ case CLKS_ON:
+ break;
+ case REQ_CLKS_OFF:
+ if (cancel_delayed_work(&hba->clk_gating.gate_work)) {
+ hba->clk_gating.state = CLKS_ON;
+ break;
+ }
+ /*
+ * If we here, it means gating work is either done or
+ * currently running. Hence, fall through to cancel gating
+ * work and to enable clocks.
+ */
+ case CLKS_OFF:
+ scsi_block_requests(hba->host);
+ hba->clk_gating.state = REQ_CLKS_ON;
+ schedule_work(&hba->clk_gating.ungate_work);
+ /*
+ * fall through to check if we should wait for this
+ * work to be done or not.
+ */
+ case REQ_CLKS_ON:
+ if (async) {
+ rc = -EAGAIN;
+ hba->clk_gating.active_reqs--;
+ break;
+ }
+
+ spin_unlock_irqrestore(hba->host->host_lock, flags);
+ flush_work(&hba->clk_gating.ungate_work);
+ /* Make sure state is CLKS_ON before returning */
+ spin_lock_irqsave(hba->host->host_lock, flags);
+ goto start;
+ default:
+ dev_err(hba->dev, "%s: clk gating is in invalid state %d\n",
+ __func__, hba->clk_gating.state);
+ break;
+ }
+ spin_unlock_irqrestore(hba->host->host_lock, flags);
+out:
+ return rc;
+}
+
+static void ufshcd_gate_work(struct work_struct *work)
+{
+ struct ufs_hba *hba = container_of(work, struct ufs_hba,
+ clk_gating.gate_work.work);
+ unsigned long flags;
+
+ spin_lock_irqsave(hba->host->host_lock, flags);
+ if (hba->clk_gating.is_suspended) {
+ hba->clk_gating.state = CLKS_ON;
+ goto rel_lock;
+ }
+
+ if (hba->clk_gating.active_reqs
+ || hba->ufshcd_state != UFSHCD_STATE_OPERATIONAL
+ || hba->lrb_in_use || hba->outstanding_tasks
+ || hba->active_uic_cmd || hba->uic_async_done)
+ goto rel_lock;
+
+ spin_unlock_irqrestore(hba->host->host_lock, flags);
+
+ /* put the link into hibern8 mode before turning off clocks */
+ if (ufshcd_can_hibern8_during_gating(hba)) {
+ if (ufshcd_uic_hibern8_enter(hba)) {
+ hba->clk_gating.state = CLKS_ON;
+ goto out;
+ }
+ ufshcd_set_link_hibern8(hba);
+ }
+
+ if (ufshcd_is_clkscaling_enabled(hba)) {
+ devfreq_suspend_device(hba->devfreq);
+ hba->clk_scaling.window_start_t = 0;
+ }
+
+ if (!ufshcd_is_link_active(hba))
+ ufshcd_setup_clocks(hba, false);
+ else
+ /* If link is active, device ref_clk can't be switched off */
+ __ufshcd_setup_clocks(hba, false, true);
+
+ /*
+ * In case you are here to cancel this work the gating state
+ * would be marked as REQ_CLKS_ON. In this case keep the state
+ * as REQ_CLKS_ON which would anyway imply that clocks are off
+ * and a request to turn them on is pending. By doing this way,
+ * we keep the state machine in tact and this would ultimately
+ * prevent from doing cancel work multiple times when there are
+ * new requests arriving before the current cancel work is done.
+ */
+ spin_lock_irqsave(hba->host->host_lock, flags);
+ if (hba->clk_gating.state == REQ_CLKS_OFF)
+ hba->clk_gating.state = CLKS_OFF;
+
+rel_lock:
+ spin_unlock_irqrestore(hba->host->host_lock, flags);
+out:
+ return;
+}
+
+/* host lock must be held before calling this variant */
+static void __ufshcd_release(struct ufs_hba *hba)
+{
+ if (!ufshcd_is_clkgating_allowed(hba))
+ return;
+
+ hba->clk_gating.active_reqs--;
+
+ if (hba->clk_gating.active_reqs || hba->clk_gating.is_suspended
+ || hba->ufshcd_state != UFSHCD_STATE_OPERATIONAL
+ || hba->lrb_in_use || hba->outstanding_tasks
+ || hba->active_uic_cmd || hba->uic_async_done)
+ return;
+
+ hba->clk_gating.state = REQ_CLKS_OFF;
+ schedule_delayed_work(&hba->clk_gating.gate_work,
+ msecs_to_jiffies(hba->clk_gating.delay_ms));
+}
+
+void ufshcd_release(struct ufs_hba *hba)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(hba->host->host_lock, flags);
+ __ufshcd_release(hba);
+ spin_unlock_irqrestore(hba->host->host_lock, flags);
+}
+
+static ssize_t ufshcd_clkgate_delay_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct ufs_hba *hba = dev_get_drvdata(dev);
+
+ return snprintf(buf, PAGE_SIZE, "%lu\n", hba->clk_gating.delay_ms);
+}
+
+static ssize_t ufshcd_clkgate_delay_store(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t count)
+{
+ struct ufs_hba *hba = dev_get_drvdata(dev);
+ unsigned long flags, value;
+
+ if (kstrtoul(buf, 0, &value))
+ return -EINVAL;
+
+ spin_lock_irqsave(hba->host->host_lock, flags);
+ hba->clk_gating.delay_ms = value;
+ spin_unlock_irqrestore(hba->host->host_lock, flags);
+ return count;
+}
+
+static void ufshcd_init_clk_gating(struct ufs_hba *hba)
+{
+ if (!ufshcd_is_clkgating_allowed(hba))
+ return;
+
+ hba->clk_gating.delay_ms = 150;
+ INIT_DELAYED_WORK(&hba->clk_gating.gate_work, ufshcd_gate_work);
+ INIT_WORK(&hba->clk_gating.ungate_work, ufshcd_ungate_work);
+
+ hba->clk_gating.delay_attr.show = ufshcd_clkgate_delay_show;
+ hba->clk_gating.delay_attr.store = ufshcd_clkgate_delay_store;
+ sysfs_attr_init(&hba->clk_gating.delay_attr.attr);
+ hba->clk_gating.delay_attr.attr.name = "clkgate_delay_ms";
+ hba->clk_gating.delay_attr.attr.mode = S_IRUGO | S_IWUSR;
+ if (device_create_file(hba->dev, &hba->clk_gating.delay_attr))
+ dev_err(hba->dev, "Failed to create sysfs for clkgate_delay\n");
+}
+
+static void ufshcd_exit_clk_gating(struct ufs_hba *hba)
+{
+ if (!ufshcd_is_clkgating_allowed(hba))
+ return;
+ device_remove_file(hba->dev, &hba->clk_gating.delay_attr);
+ cancel_work_sync(&hba->clk_gating.ungate_work);
+ cancel_delayed_work_sync(&hba->clk_gating.gate_work);
+}
+
+/* Must be called with host lock acquired */
+static void ufshcd_clk_scaling_start_busy(struct ufs_hba *hba)
+{
+ if (!ufshcd_is_clkscaling_enabled(hba))
+ return;
+
+ if (!hba->clk_scaling.is_busy_started) {
+ hba->clk_scaling.busy_start_t = ktime_get();
+ hba->clk_scaling.is_busy_started = true;
+ }
+}
+
+static void ufshcd_clk_scaling_update_busy(struct ufs_hba *hba)
+{
+ struct ufs_clk_scaling *scaling = &hba->clk_scaling;
+
+ if (!ufshcd_is_clkscaling_enabled(hba))
+ return;
+
+ if (!hba->outstanding_reqs && scaling->is_busy_started) {
+ scaling->tot_busy_t += ktime_to_us(ktime_sub(ktime_get(),
+ scaling->busy_start_t));
+ scaling->busy_start_t = ktime_set(0, 0);
+ scaling->is_busy_started = false;
+ }
+}
+/**
+ * ufshcd_send_command - Send SCSI or device management commands
+ * @hba: per adapter instance
+ * @task_tag: Task tag of the command
+ */
+static inline
+void ufshcd_send_command(struct ufs_hba *hba, unsigned int task_tag)
+{
+ ufshcd_clk_scaling_start_busy(hba);
+ __set_bit(task_tag, &hba->outstanding_reqs);
+ ufshcd_writel(hba, 1 << task_tag, REG_UTP_TRANSFER_REQ_DOOR_BELL);
+}
+
+/**
+ * ufshcd_copy_sense_data - Copy sense data in case of check condition
+ * @lrb - pointer to local reference block
+ */
+static inline void ufshcd_copy_sense_data(struct ufshcd_lrb *lrbp)
+{
+ int len;
+ if (lrbp->sense_buffer &&
+ ufshcd_get_rsp_upiu_data_seg_len(lrbp->ucd_rsp_ptr)) {
+ len = be16_to_cpu(lrbp->ucd_rsp_ptr->sr.sense_data_len);
+ memcpy(lrbp->sense_buffer,
+ lrbp->ucd_rsp_ptr->sr.sense_data,
+ min_t(int, len, SCSI_SENSE_BUFFERSIZE));
+ }
+}
+
+/**
+ * ufshcd_copy_query_response() - Copy the Query Response and the data
+ * descriptor
+ * @hba: per adapter instance
+ * @lrb - pointer to local reference block
+ */
+static
+int ufshcd_copy_query_response(struct ufs_hba *hba, struct ufshcd_lrb *lrbp)
+{
+ struct ufs_query_res *query_res = &hba->dev_cmd.query.response;
+
+ memcpy(&query_res->upiu_res, &lrbp->ucd_rsp_ptr->qr, QUERY_OSF_SIZE);
+
+ /* Get the descriptor */
+ if (lrbp->ucd_rsp_ptr->qr.opcode == UPIU_QUERY_OPCODE_READ_DESC) {
+ u8 *descp = (u8 *)lrbp->ucd_rsp_ptr +
+ GENERAL_UPIU_REQUEST_SIZE;
+ u16 resp_len;
+ u16 buf_len;
+
+ /* data segment length */
+ resp_len = be32_to_cpu(lrbp->ucd_rsp_ptr->header.dword_2) &
+ MASK_QUERY_DATA_SEG_LEN;
+ buf_len = be16_to_cpu(
+ hba->dev_cmd.query.request.upiu_req.length);
+ if (likely(buf_len >= resp_len)) {
+ memcpy(hba->dev_cmd.query.descriptor, descp, resp_len);
+ } else {
+ dev_warn(hba->dev,
+ "%s: Response size is bigger than buffer",
+ __func__);
+ return -EINVAL;
+ }
+ }
+
+ return 0;
+}
+
+/**
+ * ufshcd_hba_capabilities - Read controller capabilities
+ * @hba: per adapter instance
+ */
+static inline void ufshcd_hba_capabilities(struct ufs_hba *hba)
+{
+ hba->capabilities = ufshcd_readl(hba, REG_CONTROLLER_CAPABILITIES);
+
+ /* nutrs and nutmrs are 0 based values */
+ hba->nutrs = (hba->capabilities & MASK_TRANSFER_REQUESTS_SLOTS) + 1;
+ hba->nutmrs =
+ ((hba->capabilities & MASK_TASK_MANAGEMENT_REQUEST_SLOTS) >> 16) + 1;
+}
+
+/**
+ * ufshcd_ready_for_uic_cmd - Check if controller is ready
+ * to accept UIC commands
+ * @hba: per adapter instance
+ * Return true on success, else false
+ */
+static inline bool ufshcd_ready_for_uic_cmd(struct ufs_hba *hba)
+{
+ if (ufshcd_readl(hba, REG_CONTROLLER_STATUS) & UIC_COMMAND_READY)
+ return true;
+ else
+ return false;
+}
+
+/**
+ * ufshcd_get_upmcrs - Get the power mode change request status
+ * @hba: Pointer to adapter instance
+ *
+ * This function gets the UPMCRS field of HCS register
+ * Returns value of UPMCRS field
+ */
+static inline u8 ufshcd_get_upmcrs(struct ufs_hba *hba)
+{
+ return (ufshcd_readl(hba, REG_CONTROLLER_STATUS) >> 8) & 0x7;
+}
+
+/**
+ * ufshcd_dispatch_uic_cmd - Dispatch UIC commands to unipro layers
+ * @hba: per adapter instance
+ * @uic_cmd: UIC command
+ *
+ * Mutex must be held.
+ */
+static inline void
+ufshcd_dispatch_uic_cmd(struct ufs_hba *hba, struct uic_command *uic_cmd)
+{
+ WARN_ON(hba->active_uic_cmd);
+
+ hba->active_uic_cmd = uic_cmd;
+
+ /* Write Args */
+ ufshcd_writel(hba, uic_cmd->argument1, REG_UIC_COMMAND_ARG_1);
+ ufshcd_writel(hba, uic_cmd->argument2, REG_UIC_COMMAND_ARG_2);
+ ufshcd_writel(hba, uic_cmd->argument3, REG_UIC_COMMAND_ARG_3);
+
+ /* Write UIC Cmd */
+ ufshcd_writel(hba, uic_cmd->command & COMMAND_OPCODE_MASK,
+ REG_UIC_COMMAND);
+}
+
+/**
+ * ufshcd_wait_for_uic_cmd - Wait complectioin of UIC command
+ * @hba: per adapter instance
+ * @uic_command: UIC command
+ *
+ * Must be called with mutex held.
+ * Returns 0 only if success.
+ */
+static int
+ufshcd_wait_for_uic_cmd(struct ufs_hba *hba, struct uic_command *uic_cmd)
+{
+ int ret;
+ unsigned long flags;
+
+ if (wait_for_completion_timeout(&uic_cmd->done,
+ msecs_to_jiffies(UIC_CMD_TIMEOUT)))
+ ret = uic_cmd->argument2 & MASK_UIC_COMMAND_RESULT;
+ else
+ ret = -ETIMEDOUT;
+
+ spin_lock_irqsave(hba->host->host_lock, flags);
+ hba->active_uic_cmd = NULL;
+ spin_unlock_irqrestore(hba->host->host_lock, flags);
+
+ return ret;
+}
+
+/**
+ * __ufshcd_send_uic_cmd - Send UIC commands and retrieve the result
+ * @hba: per adapter instance
+ * @uic_cmd: UIC command
+ *
+ * Identical to ufshcd_send_uic_cmd() expect mutex. Must be called
+ * with mutex held and host_lock locked.
+ * Returns 0 only if success.
+ */
+static int
+__ufshcd_send_uic_cmd(struct ufs_hba *hba, struct uic_command *uic_cmd)
+{
+ if (!ufshcd_ready_for_uic_cmd(hba)) {
+ dev_err(hba->dev,
+ "Controller not ready to accept UIC commands\n");
+ return -EIO;
+ }
+
+ init_completion(&uic_cmd->done);
+
+ ufshcd_dispatch_uic_cmd(hba, uic_cmd);
+
+ return 0;
+}
+
+/**
+ * ufshcd_send_uic_cmd - Send UIC commands and retrieve the result
+ * @hba: per adapter instance
+ * @uic_cmd: UIC command
+ *
+ * Returns 0 only if success.
+ */
+static int
+ufshcd_send_uic_cmd(struct ufs_hba *hba, struct uic_command *uic_cmd)
+{
+ int ret;
+ unsigned long flags;
+
+ ufshcd_hold(hba, false);
+ mutex_lock(&hba->uic_cmd_mutex);
+ ufshcd_add_delay_before_dme_cmd(hba);
+
+ spin_lock_irqsave(hba->host->host_lock, flags);
+ ret = __ufshcd_send_uic_cmd(hba, uic_cmd);
+ spin_unlock_irqrestore(hba->host->host_lock, flags);
+ if (!ret)
+ ret = ufshcd_wait_for_uic_cmd(hba, uic_cmd);
+
+ mutex_unlock(&hba->uic_cmd_mutex);
+
+ ufshcd_release(hba);
+ return ret;
+}
+
+/**
+ * ufshcd_map_sg - Map scatter-gather list to prdt
+ * @lrbp - pointer to local reference block
+ *
+ * Returns 0 in case of success, non-zero value in case of failure
+ */
+static int ufshcd_map_sg(struct ufshcd_lrb *lrbp)
+{
+ struct ufshcd_sg_entry *prd_table;
+ struct scatterlist *sg;
+ struct scsi_cmnd *cmd;
+ int sg_segments;
+ int i;
+
+ cmd = lrbp->cmd;
+ sg_segments = scsi_dma_map(cmd);
+ if (sg_segments < 0)
+ return sg_segments;
+
+ if (sg_segments) {
+ lrbp->utr_descriptor_ptr->prd_table_length =
+ cpu_to_le16((u16) (sg_segments));
+
+ prd_table = (struct ufshcd_sg_entry *)lrbp->ucd_prdt_ptr;
+
+ scsi_for_each_sg(cmd, sg, sg_segments, i) {
+ prd_table[i].size =
+ cpu_to_le32(((u32) sg_dma_len(sg))-1);
+ prd_table[i].base_addr =
+ cpu_to_le32(lower_32_bits(sg->dma_address));
+ prd_table[i].upper_addr =
+ cpu_to_le32(upper_32_bits(sg->dma_address));
+ }
+ } else {
+ lrbp->utr_descriptor_ptr->prd_table_length = 0;
+ }
+
+ return 0;
+}
+
+/**
+ * ufshcd_enable_intr - enable interrupts
+ * @hba: per adapter instance
+ * @intrs: interrupt bits
+ */
+static void ufshcd_enable_intr(struct ufs_hba *hba, u32 intrs)
+{
+ u32 set = ufshcd_readl(hba, REG_INTERRUPT_ENABLE);
+
+ if (hba->ufs_version == UFSHCI_VERSION_10) {
+ u32 rw;
+ rw = set & INTERRUPT_MASK_RW_VER_10;
+ set = rw | ((set ^ intrs) & intrs);
+ } else {
+ set |= intrs;
+ }
+
+ ufshcd_writel(hba, set, REG_INTERRUPT_ENABLE);
+}
+
+/**
+ * ufshcd_disable_intr - disable interrupts
+ * @hba: per adapter instance
+ * @intrs: interrupt bits
+ */
+static void ufshcd_disable_intr(struct ufs_hba *hba, u32 intrs)
+{
+ u32 set = ufshcd_readl(hba, REG_INTERRUPT_ENABLE);
+
+ if (hba->ufs_version == UFSHCI_VERSION_10) {
+ u32 rw;
+ rw = (set & INTERRUPT_MASK_RW_VER_10) &
+ ~(intrs & INTERRUPT_MASK_RW_VER_10);
+ set = rw | ((set & intrs) & ~INTERRUPT_MASK_RW_VER_10);
+
+ } else {
+ set &= ~intrs;
+ }
+
+ ufshcd_writel(hba, set, REG_INTERRUPT_ENABLE);
+}
+
+/**
+ * ufshcd_prepare_req_desc_hdr() - Fills the requests header
+ * descriptor according to request
+ * @lrbp: pointer to local reference block
+ * @upiu_flags: flags required in the header
+ * @cmd_dir: requests data direction
+ */
+static void ufshcd_prepare_req_desc_hdr(struct ufshcd_lrb *lrbp,
+ u32 *upiu_flags, enum dma_data_direction cmd_dir)
+{
+ struct utp_transfer_req_desc *req_desc = lrbp->utr_descriptor_ptr;
+ u32 data_direction;
+ u32 dword_0;
+
+ if (cmd_dir == DMA_FROM_DEVICE) {
+ data_direction = UTP_DEVICE_TO_HOST;
+ *upiu_flags = UPIU_CMD_FLAGS_READ;
+ } else if (cmd_dir == DMA_TO_DEVICE) {
+ data_direction = UTP_HOST_TO_DEVICE;
+ *upiu_flags = UPIU_CMD_FLAGS_WRITE;
+ } else {
+ data_direction = UTP_NO_DATA_TRANSFER;
+ *upiu_flags = UPIU_CMD_FLAGS_NONE;
+ }
+
+ dword_0 = data_direction | (lrbp->command_type
+ << UPIU_COMMAND_TYPE_OFFSET);
+ if (lrbp->intr_cmd)
+ dword_0 |= UTP_REQ_DESC_INT_CMD;
+
+ /* Transfer request descriptor header fields */
+ req_desc->header.dword_0 = cpu_to_le32(dword_0);
+
+ /*
+ * assigning invalid value for command status. Controller
+ * updates OCS on command completion, with the command
+ * status
+ */
+ req_desc->header.dword_2 =
+ cpu_to_le32(OCS_INVALID_COMMAND_STATUS);
+}
+
+/**
+ * ufshcd_prepare_utp_scsi_cmd_upiu() - fills the utp_transfer_req_desc,
+ * for scsi commands
+ * @lrbp - local reference block pointer
+ * @upiu_flags - flags
+ */
+static
+void ufshcd_prepare_utp_scsi_cmd_upiu(struct ufshcd_lrb *lrbp, u32 upiu_flags)
+{
+ struct utp_upiu_req *ucd_req_ptr = lrbp->ucd_req_ptr;
+
+ /* command descriptor fields */
+ ucd_req_ptr->header.dword_0 = UPIU_HEADER_DWORD(
+ UPIU_TRANSACTION_COMMAND, upiu_flags,
+ lrbp->lun, lrbp->task_tag);
+ ucd_req_ptr->header.dword_1 = UPIU_HEADER_DWORD(
+ UPIU_COMMAND_SET_TYPE_SCSI, 0, 0, 0);
+
+ /* Total EHS length and Data segment length will be zero */
+ ucd_req_ptr->header.dword_2 = 0;
+
+ ucd_req_ptr->sc.exp_data_transfer_len =
+ cpu_to_be32(lrbp->cmd->sdb.length);
+
+ memcpy(ucd_req_ptr->sc.cdb, lrbp->cmd->cmnd,
+ (min_t(unsigned short, lrbp->cmd->cmd_len, MAX_CDB_SIZE)));
+}
+
+/**
+ * ufshcd_prepare_utp_query_req_upiu() - fills the utp_transfer_req_desc,
+ * for query requsts
+ * @hba: UFS hba
+ * @lrbp: local reference block pointer
+ * @upiu_flags: flags
+ */
+static void ufshcd_prepare_utp_query_req_upiu(struct ufs_hba *hba,
+ struct ufshcd_lrb *lrbp, u32 upiu_flags)
+{
+ struct utp_upiu_req *ucd_req_ptr = lrbp->ucd_req_ptr;
+ struct ufs_query *query = &hba->dev_cmd.query;
+ u16 len = be16_to_cpu(query->request.upiu_req.length);
+ u8 *descp = (u8 *)lrbp->ucd_req_ptr + GENERAL_UPIU_REQUEST_SIZE;
+
+ /* Query request header */
+ ucd_req_ptr->header.dword_0 = UPIU_HEADER_DWORD(
+ UPIU_TRANSACTION_QUERY_REQ, upiu_flags,
+ lrbp->lun, lrbp->task_tag);
+ ucd_req_ptr->header.dword_1 = UPIU_HEADER_DWORD(
+ 0, query->request.query_func, 0, 0);
+
+ /* Data segment length */
+ ucd_req_ptr->header.dword_2 = UPIU_HEADER_DWORD(
+ 0, 0, len >> 8, (u8)len);
+
+ /* Copy the Query Request buffer as is */
+ memcpy(&ucd_req_ptr->qr, &query->request.upiu_req,
+ QUERY_OSF_SIZE);
+
+ /* Copy the Descriptor */
+ if (query->request.upiu_req.opcode == UPIU_QUERY_OPCODE_WRITE_DESC)
+ memcpy(descp, query->descriptor, len);
+
+}
+
+static inline void ufshcd_prepare_utp_nop_upiu(struct ufshcd_lrb *lrbp)
+{
+ struct utp_upiu_req *ucd_req_ptr = lrbp->ucd_req_ptr;
+
+ memset(ucd_req_ptr, 0, sizeof(struct utp_upiu_req));
+
+ /* command descriptor fields */
+ ucd_req_ptr->header.dword_0 =
+ UPIU_HEADER_DWORD(
+ UPIU_TRANSACTION_NOP_OUT, 0, 0, lrbp->task_tag);
+}
+
+/**
+ * ufshcd_compose_upiu - form UFS Protocol Information Unit(UPIU)
+ * @hba - per adapter instance
+ * @lrb - pointer to local reference block
+ */
+static int ufshcd_compose_upiu(struct ufs_hba *hba, struct ufshcd_lrb *lrbp)
+{
+ u32 upiu_flags;
+ int ret = 0;
+
+ switch (lrbp->command_type) {
+ case UTP_CMD_TYPE_SCSI:
+ if (likely(lrbp->cmd)) {
+ ufshcd_prepare_req_desc_hdr(lrbp, &upiu_flags,
+ lrbp->cmd->sc_data_direction);
+ ufshcd_prepare_utp_scsi_cmd_upiu(lrbp, upiu_flags);
+ } else {
+ ret = -EINVAL;
+ }
+ break;
+ case UTP_CMD_TYPE_DEV_MANAGE:
+ ufshcd_prepare_req_desc_hdr(lrbp, &upiu_flags, DMA_NONE);
+ if (hba->dev_cmd.type == DEV_CMD_TYPE_QUERY)
+ ufshcd_prepare_utp_query_req_upiu(
+ hba, lrbp, upiu_flags);
+ else if (hba->dev_cmd.type == DEV_CMD_TYPE_NOP)
+ ufshcd_prepare_utp_nop_upiu(lrbp);
+ else
+ ret = -EINVAL;
+ break;
+ case UTP_CMD_TYPE_UFS:
+ /* For UFS native command implementation */
+ ret = -ENOTSUPP;
+ dev_err(hba->dev, "%s: UFS native command are not supported\n",
+ __func__);
+ break;
+ default:
+ ret = -ENOTSUPP;
+ dev_err(hba->dev, "%s: unknown command type: 0x%x\n",
+ __func__, lrbp->command_type);
+ break;
+ } /* end of switch */
+
+ return ret;
+}
+
+/*
+ * ufshcd_scsi_to_upiu_lun - maps scsi LUN to UPIU LUN
+ * @scsi_lun: scsi LUN id
+ *
+ * Returns UPIU LUN id
+ */
+static inline u8 ufshcd_scsi_to_upiu_lun(unsigned int scsi_lun)
+{
+ if (scsi_is_wlun(scsi_lun))
+ return (scsi_lun & UFS_UPIU_MAX_UNIT_NUM_ID)
+ | UFS_UPIU_WLUN_ID;
+ else
+ return scsi_lun & UFS_UPIU_MAX_UNIT_NUM_ID;
+}
+
+/**
+ * ufshcd_upiu_wlun_to_scsi_wlun - maps UPIU W-LUN id to SCSI W-LUN ID
+ * @scsi_lun: UPIU W-LUN id
+ *
+ * Returns SCSI W-LUN id
+ */
+static inline u16 ufshcd_upiu_wlun_to_scsi_wlun(u8 upiu_wlun_id)
+{
+ return (upiu_wlun_id & ~UFS_UPIU_WLUN_ID) | SCSI_W_LUN_BASE;
+}
+
+/**
+ * ufshcd_queuecommand - main entry point for SCSI requests
+ * @cmd: command from SCSI Midlayer
+ * @done: call back function
+ *
+ * Returns 0 for success, non-zero in case of failure
+ */
+static int ufshcd_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *cmd)
+{
+ struct ufshcd_lrb *lrbp;
+ struct ufs_hba *hba;
+ unsigned long flags;
+ int tag;
+ int err = 0;
+
+ hba = shost_priv(host);
+
+ tag = cmd->request->tag;
+
+ spin_lock_irqsave(hba->host->host_lock, flags);
+ switch (hba->ufshcd_state) {
+ case UFSHCD_STATE_OPERATIONAL:
+ break;
+ case UFSHCD_STATE_RESET:
+ err = SCSI_MLQUEUE_HOST_BUSY;
+ goto out_unlock;
+ case UFSHCD_STATE_ERROR:
+ set_host_byte(cmd, DID_ERROR);
+ cmd->scsi_done(cmd);
+ goto out_unlock;
+ default:
+ dev_WARN_ONCE(hba->dev, 1, "%s: invalid state %d\n",
+ __func__, hba->ufshcd_state);
+ set_host_byte(cmd, DID_BAD_TARGET);
+ cmd->scsi_done(cmd);
+ goto out_unlock;
+ }
+ spin_unlock_irqrestore(hba->host->host_lock, flags);
+
+ /* acquire the tag to make sure device cmds don't use it */
+ if (test_and_set_bit_lock(tag, &hba->lrb_in_use)) {
+ /*
+ * Dev manage command in progress, requeue the command.
+ * Requeuing the command helps in cases where the request *may*
+ * find different tag instead of waiting for dev manage command
+ * completion.
+ */
+ err = SCSI_MLQUEUE_HOST_BUSY;
+ goto out;
+ }
+
+ err = ufshcd_hold(hba, true);
+ if (err) {
+ err = SCSI_MLQUEUE_HOST_BUSY;
+ clear_bit_unlock(tag, &hba->lrb_in_use);
+ goto out;
+ }
+ WARN_ON(hba->clk_gating.state != CLKS_ON);
+
+ lrbp = &hba->lrb[tag];
+
+ WARN_ON(lrbp->cmd);
+ lrbp->cmd = cmd;
+ lrbp->sense_bufflen = SCSI_SENSE_BUFFERSIZE;
+ lrbp->sense_buffer = cmd->sense_buffer;
+ lrbp->task_tag = tag;
+ lrbp->lun = ufshcd_scsi_to_upiu_lun(cmd->device->lun);
+ lrbp->intr_cmd = false;
+ lrbp->command_type = UTP_CMD_TYPE_SCSI;
+
+ /* form UPIU before issuing the command */
+ ufshcd_compose_upiu(hba, lrbp);
+ err = ufshcd_map_sg(lrbp);
+ if (err) {
+ lrbp->cmd = NULL;
+ clear_bit_unlock(tag, &hba->lrb_in_use);
+ goto out;
+ }
+
+ /* issue command to the controller */
+ spin_lock_irqsave(hba->host->host_lock, flags);
+ ufshcd_send_command(hba, tag);
+out_unlock:
+ spin_unlock_irqrestore(hba->host->host_lock, flags);
+out:
+ return err;
+}
+
+static int ufshcd_compose_dev_cmd(struct ufs_hba *hba,
+ struct ufshcd_lrb *lrbp, enum dev_cmd_type cmd_type, int tag)
+{
+ lrbp->cmd = NULL;
+ lrbp->sense_bufflen = 0;
+ lrbp->sense_buffer = NULL;
+ lrbp->task_tag = tag;
+ lrbp->lun = 0; /* device management cmd is not specific to any LUN */
+ lrbp->command_type = UTP_CMD_TYPE_DEV_MANAGE;
+ lrbp->intr_cmd = true; /* No interrupt aggregation */
+ hba->dev_cmd.type = cmd_type;
+
+ return ufshcd_compose_upiu(hba, lrbp);
+}
+
+static int
+ufshcd_clear_cmd(struct ufs_hba *hba, int tag)
+{
+ int err = 0;
+ unsigned long flags;
+ u32 mask = 1 << tag;
+
+ /* clear outstanding transaction before retry */
+ spin_lock_irqsave(hba->host->host_lock, flags);
+ ufshcd_utrl_clear(hba, tag);
+ spin_unlock_irqrestore(hba->host->host_lock, flags);
+
+ /*
+ * wait for for h/w to clear corresponding bit in door-bell.
+ * max. wait is 1 sec.
+ */
+ err = ufshcd_wait_for_register(hba,
+ REG_UTP_TRANSFER_REQ_DOOR_BELL,
+ mask, ~mask, 1000, 1000);
+
+ return err;
+}
+
+static int
+ufshcd_check_query_response(struct ufs_hba *hba, struct ufshcd_lrb *lrbp)
+{
+ struct ufs_query_res *query_res = &hba->dev_cmd.query.response;
+
+ /* Get the UPIU response */
+ query_res->response = ufshcd_get_rsp_upiu_result(lrbp->ucd_rsp_ptr) >>
+ UPIU_RSP_CODE_OFFSET;
+ return query_res->response;
+}
+
+/**
+ * ufshcd_dev_cmd_completion() - handles device management command responses
+ * @hba: per adapter instance
+ * @lrbp: pointer to local reference block
+ */
+static int
+ufshcd_dev_cmd_completion(struct ufs_hba *hba, struct ufshcd_lrb *lrbp)
+{
+ int resp;
+ int err = 0;
+
+ resp = ufshcd_get_req_rsp(lrbp->ucd_rsp_ptr);
+
+ switch (resp) {
+ case UPIU_TRANSACTION_NOP_IN:
+ if (hba->dev_cmd.type != DEV_CMD_TYPE_NOP) {
+ err = -EINVAL;
+ dev_err(hba->dev, "%s: unexpected response %x\n",
+ __func__, resp);
+ }
+ break;
+ case UPIU_TRANSACTION_QUERY_RSP:
+ err = ufshcd_check_query_response(hba, lrbp);
+ if (!err)
+ err = ufshcd_copy_query_response(hba, lrbp);
+ break;
+ case UPIU_TRANSACTION_REJECT_UPIU:
+ /* TODO: handle Reject UPIU Response */
+ err = -EPERM;
+ dev_err(hba->dev, "%s: Reject UPIU not fully implemented\n",
+ __func__);
+ break;
+ default:
+ err = -EINVAL;
+ dev_err(hba->dev, "%s: Invalid device management cmd response: %x\n",
+ __func__, resp);
+ break;
+ }
+
+ return err;
+}
+
+static int ufshcd_wait_for_dev_cmd(struct ufs_hba *hba,
+ struct ufshcd_lrb *lrbp, int max_timeout)
+{
+ int err = 0;
+ unsigned long time_left;
+ unsigned long flags;
+
+ time_left = wait_for_completion_timeout(hba->dev_cmd.complete,
+ msecs_to_jiffies(max_timeout));
+
+ spin_lock_irqsave(hba->host->host_lock, flags);
+ hba->dev_cmd.complete = NULL;
+ if (likely(time_left)) {
+ err = ufshcd_get_tr_ocs(lrbp);
+ if (!err)
+ err = ufshcd_dev_cmd_completion(hba, lrbp);
+ }
+ spin_unlock_irqrestore(hba->host->host_lock, flags);
+
+ if (!time_left) {
+ err = -ETIMEDOUT;
+ if (!ufshcd_clear_cmd(hba, lrbp->task_tag))
+ /* sucessfully cleared the command, retry if needed */
+ err = -EAGAIN;
+ }
+
+ return err;
+}
+
+/**
+ * ufshcd_get_dev_cmd_tag - Get device management command tag
+ * @hba: per-adapter instance
+ * @tag: pointer to variable with available slot value
+ *
+ * Get a free slot and lock it until device management command
+ * completes.
+ *
+ * Returns false if free slot is unavailable for locking, else
+ * return true with tag value in @tag.
+ */
+static bool ufshcd_get_dev_cmd_tag(struct ufs_hba *hba, int *tag_out)
+{
+ int tag;
+ bool ret = false;
+ unsigned long tmp;
+
+ if (!tag_out)
+ goto out;
+
+ do {
+ tmp = ~hba->lrb_in_use;
+ tag = find_last_bit(&tmp, hba->nutrs);
+ if (tag >= hba->nutrs)
+ goto out;
+ } while (test_and_set_bit_lock(tag, &hba->lrb_in_use));
+
+ *tag_out = tag;
+ ret = true;
+out:
+ return ret;
+}
+
+static inline void ufshcd_put_dev_cmd_tag(struct ufs_hba *hba, int tag)
+{
+ clear_bit_unlock(tag, &hba->lrb_in_use);
+}
+
+/**
+ * ufshcd_exec_dev_cmd - API for sending device management requests
+ * @hba - UFS hba
+ * @cmd_type - specifies the type (NOP, Query...)
+ * @timeout - time in seconds
+ *
+ * NOTE: Since there is only one available tag for device management commands,
+ * it is expected you hold the hba->dev_cmd.lock mutex.
+ */
+static int ufshcd_exec_dev_cmd(struct ufs_hba *hba,
+ enum dev_cmd_type cmd_type, int timeout)
+{
+ struct ufshcd_lrb *lrbp;
+ int err;
+ int tag;
+ struct completion wait;
+ unsigned long flags;
+
+ /*
+ * Get free slot, sleep if slots are unavailable.
+ * Even though we use wait_event() which sleeps indefinitely,
+ * the maximum wait time is bounded by SCSI request timeout.
+ */
+ wait_event(hba->dev_cmd.tag_wq, ufshcd_get_dev_cmd_tag(hba, &tag));
+
+ init_completion(&wait);
+ lrbp = &hba->lrb[tag];
+ WARN_ON(lrbp->cmd);
+ err = ufshcd_compose_dev_cmd(hba, lrbp, cmd_type, tag);
+ if (unlikely(err))
+ goto out_put_tag;
+
+ hba->dev_cmd.complete = &wait;
+
+ spin_lock_irqsave(hba->host->host_lock, flags);
+ ufshcd_send_command(hba, tag);
+ spin_unlock_irqrestore(hba->host->host_lock, flags);
+
+ err = ufshcd_wait_for_dev_cmd(hba, lrbp, timeout);
+
+out_put_tag:
+ ufshcd_put_dev_cmd_tag(hba, tag);
+ wake_up(&hba->dev_cmd.tag_wq);
+ return err;
+}
+
+/**
+ * ufshcd_init_query() - init the query response and request parameters
+ * @hba: per-adapter instance
+ * @request: address of the request pointer to be initialized
+ * @response: address of the response pointer to be initialized
+ * @opcode: operation to perform
+ * @idn: flag idn to access
+ * @index: LU number to access
+ * @selector: query/flag/descriptor further identification
+ */
+static inline void ufshcd_init_query(struct ufs_hba *hba,
+ struct ufs_query_req **request, struct ufs_query_res **response,
+ enum query_opcode opcode, u8 idn, u8 index, u8 selector)
+{
+ *request = &hba->dev_cmd.query.request;
+ *response = &hba->dev_cmd.query.response;
+ memset(*request, 0, sizeof(struct ufs_query_req));
+ memset(*response, 0, sizeof(struct ufs_query_res));
+ (*request)->upiu_req.opcode = opcode;
+ (*request)->upiu_req.idn = idn;
+ (*request)->upiu_req.index = index;
+ (*request)->upiu_req.selector = selector;
+}
+
+/**
+ * ufshcd_query_flag() - API function for sending flag query requests
+ * hba: per-adapter instance
+ * query_opcode: flag query to perform
+ * idn: flag idn to access
+ * flag_res: the flag value after the query request completes
+ *
+ * Returns 0 for success, non-zero in case of failure
+ */
+static int ufshcd_query_flag(struct ufs_hba *hba, enum query_opcode opcode,
+ enum flag_idn idn, bool *flag_res)
+{
+ struct ufs_query_req *request = NULL;
+ struct ufs_query_res *response = NULL;
+ int err, index = 0, selector = 0;
+
+ BUG_ON(!hba);
+
+ ufshcd_hold(hba, false);
+ mutex_lock(&hba->dev_cmd.lock);
+ ufshcd_init_query(hba, &request, &response, opcode, idn, index,
+ selector);
+
+ switch (opcode) {
+ case UPIU_QUERY_OPCODE_SET_FLAG:
+ case UPIU_QUERY_OPCODE_CLEAR_FLAG:
+ case UPIU_QUERY_OPCODE_TOGGLE_FLAG:
+ request->query_func = UPIU_QUERY_FUNC_STANDARD_WRITE_REQUEST;
+ break;
+ case UPIU_QUERY_OPCODE_READ_FLAG:
+ request->query_func = UPIU_QUERY_FUNC_STANDARD_READ_REQUEST;
+ if (!flag_res) {
+ /* No dummy reads */
+ dev_err(hba->dev, "%s: Invalid argument for read request\n",
+ __func__);
+ err = -EINVAL;
+ goto out_unlock;
+ }
+ break;
+ default:
+ dev_err(hba->dev,
+ "%s: Expected query flag opcode but got = %d\n",
+ __func__, opcode);
+ err = -EINVAL;
+ goto out_unlock;
+ }
+
+ err = ufshcd_exec_dev_cmd(hba, DEV_CMD_TYPE_QUERY, QUERY_REQ_TIMEOUT);
+
+ if (err) {
+ dev_err(hba->dev,
+ "%s: Sending flag query for idn %d failed, err = %d\n",
+ __func__, idn, err);
+ goto out_unlock;
+ }
+
+ if (flag_res)
+ *flag_res = (be32_to_cpu(response->upiu_res.value) &
+ MASK_QUERY_UPIU_FLAG_LOC) & 0x1;
+
+out_unlock:
+ mutex_unlock(&hba->dev_cmd.lock);
+ ufshcd_release(hba);
+ return err;
+}
+
+/**
+ * ufshcd_query_attr - API function for sending attribute requests
+ * hba: per-adapter instance
+ * opcode: attribute opcode
+ * idn: attribute idn to access
+ * index: index field
+ * selector: selector field
+ * attr_val: the attribute value after the query request completes
+ *
+ * Returns 0 for success, non-zero in case of failure
+*/
+static int ufshcd_query_attr(struct ufs_hba *hba, enum query_opcode opcode,
+ enum attr_idn idn, u8 index, u8 selector, u32 *attr_val)
+{
+ struct ufs_query_req *request = NULL;
+ struct ufs_query_res *response = NULL;
+ int err;
+
+ BUG_ON(!hba);
+
+ ufshcd_hold(hba, false);
+ if (!attr_val) {
+ dev_err(hba->dev, "%s: attribute value required for opcode 0x%x\n",
+ __func__, opcode);
+ err = -EINVAL;
+ goto out;
+ }
+
+ mutex_lock(&hba->dev_cmd.lock);
+ ufshcd_init_query(hba, &request, &response, opcode, idn, index,
+ selector);
+
+ switch (opcode) {
+ case UPIU_QUERY_OPCODE_WRITE_ATTR:
+ request->query_func = UPIU_QUERY_FUNC_STANDARD_WRITE_REQUEST;
+ request->upiu_req.value = cpu_to_be32(*attr_val);
+ break;
+ case UPIU_QUERY_OPCODE_READ_ATTR:
+ request->query_func = UPIU_QUERY_FUNC_STANDARD_READ_REQUEST;
+ break;
+ default:
+ dev_err(hba->dev, "%s: Expected query attr opcode but got = 0x%.2x\n",
+ __func__, opcode);
+ err = -EINVAL;
+ goto out_unlock;
+ }
+
+ err = ufshcd_exec_dev_cmd(hba, DEV_CMD_TYPE_QUERY, QUERY_REQ_TIMEOUT);
+
+ if (err) {
+ dev_err(hba->dev, "%s: opcode 0x%.2x for idn %d failed, err = %d\n",
+ __func__, opcode, idn, err);
+ goto out_unlock;
+ }
+
+ *attr_val = be32_to_cpu(response->upiu_res.value);
+
+out_unlock:
+ mutex_unlock(&hba->dev_cmd.lock);
+out:
+ ufshcd_release(hba);
+ return err;
+}
+
+/**
+ * ufshcd_query_descriptor - API function for sending descriptor requests
+ * hba: per-adapter instance
+ * opcode: attribute opcode
+ * idn: attribute idn to access
+ * index: index field
+ * selector: selector field
+ * desc_buf: the buffer that contains the descriptor
+ * buf_len: length parameter passed to the device
+ *
+ * Returns 0 for success, non-zero in case of failure.
+ * The buf_len parameter will contain, on return, the length parameter
+ * received on the response.
+ */
+static int ufshcd_query_descriptor(struct ufs_hba *hba,
+ enum query_opcode opcode, enum desc_idn idn, u8 index,
+ u8 selector, u8 *desc_buf, int *buf_len)
+{
+ struct ufs_query_req *request = NULL;
+ struct ufs_query_res *response = NULL;
+ int err;
+
+ BUG_ON(!hba);
+
+ ufshcd_hold(hba, false);
+ if (!desc_buf) {
+ dev_err(hba->dev, "%s: descriptor buffer required for opcode 0x%x\n",
+ __func__, opcode);
+ err = -EINVAL;
+ goto out;
+ }
+
+ if (*buf_len <= QUERY_DESC_MIN_SIZE || *buf_len > QUERY_DESC_MAX_SIZE) {
+ dev_err(hba->dev, "%s: descriptor buffer size (%d) is out of range\n",
+ __func__, *buf_len);
+ err = -EINVAL;
+ goto out;
+ }
+
+ mutex_lock(&hba->dev_cmd.lock);
+ ufshcd_init_query(hba, &request, &response, opcode, idn, index,
+ selector);
+ hba->dev_cmd.query.descriptor = desc_buf;
+ request->upiu_req.length = cpu_to_be16(*buf_len);
+
+ switch (opcode) {
+ case UPIU_QUERY_OPCODE_WRITE_DESC:
+ request->query_func = UPIU_QUERY_FUNC_STANDARD_WRITE_REQUEST;
+ break;
+ case UPIU_QUERY_OPCODE_READ_DESC:
+ request->query_func = UPIU_QUERY_FUNC_STANDARD_READ_REQUEST;
+ break;
+ default:
+ dev_err(hba->dev,
+ "%s: Expected query descriptor opcode but got = 0x%.2x\n",
+ __func__, opcode);
+ err = -EINVAL;
+ goto out_unlock;
+ }
+
+ err = ufshcd_exec_dev_cmd(hba, DEV_CMD_TYPE_QUERY, QUERY_REQ_TIMEOUT);
+
+ if (err) {
+ dev_err(hba->dev, "%s: opcode 0x%.2x for idn %d failed, err = %d\n",
+ __func__, opcode, idn, err);
+ goto out_unlock;
+ }
+
+ hba->dev_cmd.query.descriptor = NULL;
+ *buf_len = be16_to_cpu(response->upiu_res.length);
+
+out_unlock:
+ mutex_unlock(&hba->dev_cmd.lock);
+out:
+ ufshcd_release(hba);
+ return err;
+}
+
+/**
+ * ufshcd_read_desc_param - read the specified descriptor parameter
+ * @hba: Pointer to adapter instance
+ * @desc_id: descriptor idn value
+ * @desc_index: descriptor index
+ * @param_offset: offset of the parameter to read
+ * @param_read_buf: pointer to buffer where parameter would be read
+ * @param_size: sizeof(param_read_buf)
+ *
+ * Return 0 in case of success, non-zero otherwise
+ */
+static int ufshcd_read_desc_param(struct ufs_hba *hba,
+ enum desc_idn desc_id,
+ int desc_index,
+ u32 param_offset,
+ u8 *param_read_buf,
+ u32 param_size)
+{
+ int ret;
+ u8 *desc_buf;
+ u32 buff_len;
+ bool is_kmalloc = true;
+
+ /* safety checks */
+ if (desc_id >= QUERY_DESC_IDN_MAX)
+ return -EINVAL;
+
+ buff_len = ufs_query_desc_max_size[desc_id];
+ if ((param_offset + param_size) > buff_len)
+ return -EINVAL;
+
+ if (!param_offset && (param_size == buff_len)) {
+ /* memory space already available to hold full descriptor */
+ desc_buf = param_read_buf;
+ is_kmalloc = false;
+ } else {
+ /* allocate memory to hold full descriptor */
+ desc_buf = kmalloc(buff_len, GFP_KERNEL);
+ if (!desc_buf)
+ return -ENOMEM;
+ }
+
+ ret = ufshcd_query_descriptor(hba, UPIU_QUERY_OPCODE_READ_DESC,
+ desc_id, desc_index, 0, desc_buf,
+ &buff_len);
+
+ if (ret || (buff_len < ufs_query_desc_max_size[desc_id]) ||
+ (desc_buf[QUERY_DESC_LENGTH_OFFSET] !=
+ ufs_query_desc_max_size[desc_id])
+ || (desc_buf[QUERY_DESC_DESC_TYPE_OFFSET] != desc_id)) {
+ dev_err(hba->dev, "%s: Failed reading descriptor. desc_id %d param_offset %d buff_len %d ret %d",
+ __func__, desc_id, param_offset, buff_len, ret);
+ if (!ret)
+ ret = -EINVAL;
+
+ goto out;
+ }
+
+ if (is_kmalloc)
+ memcpy(param_read_buf, &desc_buf[param_offset], param_size);
+out:
+ if (is_kmalloc)
+ kfree(desc_buf);
+ return ret;
+}
+
+static inline int ufshcd_read_desc(struct ufs_hba *hba,
+ enum desc_idn desc_id,
+ int desc_index,
+ u8 *buf,
+ u32 size)
+{
+ return ufshcd_read_desc_param(hba, desc_id, desc_index, 0, buf, size);
+}
+
+static inline int ufshcd_read_power_desc(struct ufs_hba *hba,
+ u8 *buf,
+ u32 size)
+{
+ return ufshcd_read_desc(hba, QUERY_DESC_IDN_POWER, 0, buf, size);
+}
+
+/**
+ * ufshcd_read_unit_desc_param - read the specified unit descriptor parameter
+ * @hba: Pointer to adapter instance
+ * @lun: lun id
+ * @param_offset: offset of the parameter to read
+ * @param_read_buf: pointer to buffer where parameter would be read
+ * @param_size: sizeof(param_read_buf)
+ *
+ * Return 0 in case of success, non-zero otherwise
+ */
+static inline int ufshcd_read_unit_desc_param(struct ufs_hba *hba,
+ int lun,
+ enum unit_desc_param param_offset,
+ u8 *param_read_buf,
+ u32 param_size)
+{
+ /*
+ * Unit descriptors are only available for general purpose LUs (LUN id
+ * from 0 to 7) and RPMB Well known LU.
+ */
+ if (lun != UFS_UPIU_RPMB_WLUN && (lun >= UFS_UPIU_MAX_GENERAL_LUN))
+ return -EOPNOTSUPP;
+
+ return ufshcd_read_desc_param(hba, QUERY_DESC_IDN_UNIT, lun,
+ param_offset, param_read_buf, param_size);
+}
+
+/**
+ * ufshcd_memory_alloc - allocate memory for host memory space data structures
+ * @hba: per adapter instance
+ *
+ * 1. Allocate DMA memory for Command Descriptor array
+ * Each command descriptor consist of Command UPIU, Response UPIU and PRDT
+ * 2. Allocate DMA memory for UTP Transfer Request Descriptor List (UTRDL).
+ * 3. Allocate DMA memory for UTP Task Management Request Descriptor List
+ * (UTMRDL)
+ * 4. Allocate memory for local reference block(lrb).
+ *
+ * Returns 0 for success, non-zero in case of failure
+ */
+static int ufshcd_memory_alloc(struct ufs_hba *hba)
+{
+ size_t utmrdl_size, utrdl_size, ucdl_size;
+
+ /* Allocate memory for UTP command descriptors */
+ ucdl_size = (sizeof(struct utp_transfer_cmd_desc) * hba->nutrs);
+ hba->ucdl_base_addr = dmam_alloc_coherent(hba->dev,
+ ucdl_size,
+ &hba->ucdl_dma_addr,
+ GFP_KERNEL);
+
+ /*
+ * UFSHCI requires UTP command descriptor to be 128 byte aligned.
+ * make sure hba->ucdl_dma_addr is aligned to PAGE_SIZE
+ * if hba->ucdl_dma_addr is aligned to PAGE_SIZE, then it will
+ * be aligned to 128 bytes as well
+ */
+ if (!hba->ucdl_base_addr ||
+ WARN_ON(hba->ucdl_dma_addr & (PAGE_SIZE - 1))) {
+ dev_err(hba->dev,
+ "Command Descriptor Memory allocation failed\n");
+ goto out;
+ }
+
+ /*
+ * Allocate memory for UTP Transfer descriptors
+ * UFSHCI requires 1024 byte alignment of UTRD
+ */
+ utrdl_size = (sizeof(struct utp_transfer_req_desc) * hba->nutrs);
+ hba->utrdl_base_addr = dmam_alloc_coherent(hba->dev,
+ utrdl_size,
+ &hba->utrdl_dma_addr,
+ GFP_KERNEL);
+ if (!hba->utrdl_base_addr ||
+ WARN_ON(hba->utrdl_dma_addr & (PAGE_SIZE - 1))) {
+ dev_err(hba->dev,
+ "Transfer Descriptor Memory allocation failed\n");
+ goto out;
+ }
+
+ /*
+ * Allocate memory for UTP Task Management descriptors
+ * UFSHCI requires 1024 byte alignment of UTMRD
+ */
+ utmrdl_size = sizeof(struct utp_task_req_desc) * hba->nutmrs;
+ hba->utmrdl_base_addr = dmam_alloc_coherent(hba->dev,
+ utmrdl_size,
+ &hba->utmrdl_dma_addr,
+ GFP_KERNEL);
+ if (!hba->utmrdl_base_addr ||
+ WARN_ON(hba->utmrdl_dma_addr & (PAGE_SIZE - 1))) {
+ dev_err(hba->dev,
+ "Task Management Descriptor Memory allocation failed\n");
+ goto out;
+ }
+
+ /* Allocate memory for local reference block */
+ hba->lrb = devm_kzalloc(hba->dev,
+ hba->nutrs * sizeof(struct ufshcd_lrb),
+ GFP_KERNEL);
+ if (!hba->lrb) {
+ dev_err(hba->dev, "LRB Memory allocation failed\n");
+ goto out;
+ }
+ return 0;
+out:
+ return -ENOMEM;
+}
+
+/**
+ * ufshcd_host_memory_configure - configure local reference block with
+ * memory offsets
+ * @hba: per adapter instance
+ *
+ * Configure Host memory space
+ * 1. Update Corresponding UTRD.UCDBA and UTRD.UCDBAU with UCD DMA
+ * address.
+ * 2. Update each UTRD with Response UPIU offset, Response UPIU length
+ * and PRDT offset.
+ * 3. Save the corresponding addresses of UTRD, UCD.CMD, UCD.RSP and UCD.PRDT
+ * into local reference block.
+ */
+static void ufshcd_host_memory_configure(struct ufs_hba *hba)
+{
+ struct utp_transfer_cmd_desc *cmd_descp;
+ struct utp_transfer_req_desc *utrdlp;
+ dma_addr_t cmd_desc_dma_addr;
+ dma_addr_t cmd_desc_element_addr;
+ u16 response_offset;
+ u16 prdt_offset;
+ int cmd_desc_size;
+ int i;
+
+ utrdlp = hba->utrdl_base_addr;
+ cmd_descp = hba->ucdl_base_addr;
+
+ response_offset =
+ offsetof(struct utp_transfer_cmd_desc, response_upiu);
+ prdt_offset =
+ offsetof(struct utp_transfer_cmd_desc, prd_table);
+
+ cmd_desc_size = sizeof(struct utp_transfer_cmd_desc);
+ cmd_desc_dma_addr = hba->ucdl_dma_addr;
+
+ for (i = 0; i < hba->nutrs; i++) {
+ /* Configure UTRD with command descriptor base address */
+ cmd_desc_element_addr =
+ (cmd_desc_dma_addr + (cmd_desc_size * i));
+ utrdlp[i].command_desc_base_addr_lo =
+ cpu_to_le32(lower_32_bits(cmd_desc_element_addr));
+ utrdlp[i].command_desc_base_addr_hi =
+ cpu_to_le32(upper_32_bits(cmd_desc_element_addr));
+
+ /* Response upiu and prdt offset should be in double words */
+ utrdlp[i].response_upiu_offset =
+ cpu_to_le16((response_offset >> 2));
+ utrdlp[i].prd_table_offset =
+ cpu_to_le16((prdt_offset >> 2));
+ utrdlp[i].response_upiu_length =
+ cpu_to_le16(ALIGNED_UPIU_SIZE >> 2);
+
+ hba->lrb[i].utr_descriptor_ptr = (utrdlp + i);
+ hba->lrb[i].ucd_req_ptr =
+ (struct utp_upiu_req *)(cmd_descp + i);
+ hba->lrb[i].ucd_rsp_ptr =
+ (struct utp_upiu_rsp *)cmd_descp[i].response_upiu;
+ hba->lrb[i].ucd_prdt_ptr =
+ (struct ufshcd_sg_entry *)cmd_descp[i].prd_table;
+ }
+}
+
+/**
+ * ufshcd_dme_link_startup - Notify Unipro to perform link startup
+ * @hba: per adapter instance
+ *
+ * UIC_CMD_DME_LINK_STARTUP command must be issued to Unipro layer,
+ * in order to initialize the Unipro link startup procedure.
+ * Once the Unipro links are up, the device connected to the controller
+ * is detected.
+ *
+ * Returns 0 on success, non-zero value on failure
+ */
+static int ufshcd_dme_link_startup(struct ufs_hba *hba)
+{
+ struct uic_command uic_cmd = {0};
+ int ret;
+
+ uic_cmd.command = UIC_CMD_DME_LINK_STARTUP;
+
+ ret = ufshcd_send_uic_cmd(hba, &uic_cmd);
+ if (ret)
+ dev_err(hba->dev,
+ "dme-link-startup: error code %d\n", ret);
+ return ret;
+}
+
+static inline void ufshcd_add_delay_before_dme_cmd(struct ufs_hba *hba)
+{
+ #define MIN_DELAY_BEFORE_DME_CMDS_US 1000
+ unsigned long min_sleep_time_us;
+
+ if (!(hba->quirks & UFSHCD_QUIRK_DELAY_BEFORE_DME_CMDS))
+ return;
+
+ /*
+ * last_dme_cmd_tstamp will be 0 only for 1st call to
+ * this function
+ */
+ if (unlikely(!ktime_to_us(hba->last_dme_cmd_tstamp))) {
+ min_sleep_time_us = MIN_DELAY_BEFORE_DME_CMDS_US;
+ } else {
+ unsigned long delta =
+ (unsigned long) ktime_to_us(
+ ktime_sub(ktime_get(),
+ hba->last_dme_cmd_tstamp));
+
+ if (delta < MIN_DELAY_BEFORE_DME_CMDS_US)
+ min_sleep_time_us =
+ MIN_DELAY_BEFORE_DME_CMDS_US - delta;
+ else
+ return; /* no more delay required */
+ }
+
+ /* allow sleep for extra 50us if needed */
+ usleep_range(min_sleep_time_us, min_sleep_time_us + 50);
+}
+
+/**
+ * ufshcd_dme_set_attr - UIC command for DME_SET, DME_PEER_SET
+ * @hba: per adapter instance
+ * @attr_sel: uic command argument1
+ * @attr_set: attribute set type as uic command argument2
+ * @mib_val: setting value as uic command argument3
+ * @peer: indicate whether peer or local
+ *
+ * Returns 0 on success, non-zero value on failure
+ */
+int ufshcd_dme_set_attr(struct ufs_hba *hba, u32 attr_sel,
+ u8 attr_set, u32 mib_val, u8 peer)
+{
+ struct uic_command uic_cmd = {0};
+ static const char *const action[] = {
+ "dme-set",
+ "dme-peer-set"
+ };
+ const char *set = action[!!peer];
+ int ret;
+
+ uic_cmd.command = peer ?
+ UIC_CMD_DME_PEER_SET : UIC_CMD_DME_SET;
+ uic_cmd.argument1 = attr_sel;
+ uic_cmd.argument2 = UIC_ARG_ATTR_TYPE(attr_set);
+ uic_cmd.argument3 = mib_val;
+
+ ret = ufshcd_send_uic_cmd(hba, &uic_cmd);
+ if (ret)
+ dev_err(hba->dev, "%s: attr-id 0x%x val 0x%x error code %d\n",
+ set, UIC_GET_ATTR_ID(attr_sel), mib_val, ret);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(ufshcd_dme_set_attr);
+
+/**
+ * ufshcd_dme_get_attr - UIC command for DME_GET, DME_PEER_GET
+ * @hba: per adapter instance
+ * @attr_sel: uic command argument1
+ * @mib_val: the value of the attribute as returned by the UIC command
+ * @peer: indicate whether peer or local
+ *
+ * Returns 0 on success, non-zero value on failure
+ */
+int ufshcd_dme_get_attr(struct ufs_hba *hba, u32 attr_sel,
+ u32 *mib_val, u8 peer)
+{
+ struct uic_command uic_cmd = {0};
+ static const char *const action[] = {
+ "dme-get",
+ "dme-peer-get"
+ };
+ const char *get = action[!!peer];
+ int ret;
+
+ uic_cmd.command = peer ?
+ UIC_CMD_DME_PEER_GET : UIC_CMD_DME_GET;
+ uic_cmd.argument1 = attr_sel;
+
+ ret = ufshcd_send_uic_cmd(hba, &uic_cmd);
+ if (ret) {
+ dev_err(hba->dev, "%s: attr-id 0x%x error code %d\n",
+ get, UIC_GET_ATTR_ID(attr_sel), ret);
+ goto out;
+ }
+
+ if (mib_val)
+ *mib_val = uic_cmd.argument3;
+out:
+ return ret;
+}
+EXPORT_SYMBOL_GPL(ufshcd_dme_get_attr);
+
+/**
+ * ufshcd_uic_pwr_ctrl - executes UIC commands (which affects the link power
+ * state) and waits for it to take effect.
+ *
+ * @hba: per adapter instance
+ * @cmd: UIC command to execute
+ *
+ * DME operations like DME_SET(PA_PWRMODE), DME_HIBERNATE_ENTER &
+ * DME_HIBERNATE_EXIT commands take some time to take its effect on both host
+ * and device UniPro link and hence it's final completion would be indicated by
+ * dedicated status bits in Interrupt Status register (UPMS, UHES, UHXS) in
+ * addition to normal UIC command completion Status (UCCS). This function only
+ * returns after the relevant status bits indicate the completion.
+ *
+ * Returns 0 on success, non-zero value on failure
+ */
+static int ufshcd_uic_pwr_ctrl(struct ufs_hba *hba, struct uic_command *cmd)
+{
+ struct completion uic_async_done;
+ unsigned long flags;
+ u8 status;
+ int ret;
+
+ mutex_lock(&hba->uic_cmd_mutex);
+ init_completion(&uic_async_done);
+ ufshcd_add_delay_before_dme_cmd(hba);
+
+ spin_lock_irqsave(hba->host->host_lock, flags);
+ hba->uic_async_done = &uic_async_done;
+ ret = __ufshcd_send_uic_cmd(hba, cmd);
+ spin_unlock_irqrestore(hba->host->host_lock, flags);
+ if (ret) {
+ dev_err(hba->dev,
+ "pwr ctrl cmd 0x%x with mode 0x%x uic error %d\n",
+ cmd->command, cmd->argument3, ret);
+ goto out;
+ }
+ ret = ufshcd_wait_for_uic_cmd(hba, cmd);
+ if (ret) {
+ dev_err(hba->dev,
+ "pwr ctrl cmd 0x%x with mode 0x%x uic error %d\n",
+ cmd->command, cmd->argument3, ret);
+ goto out;
+ }
+
+ if (!wait_for_completion_timeout(hba->uic_async_done,
+ msecs_to_jiffies(UIC_CMD_TIMEOUT))) {
+ dev_err(hba->dev,
+ "pwr ctrl cmd 0x%x with mode 0x%x completion timeout\n",
+ cmd->command, cmd->argument3);
+ ret = -ETIMEDOUT;
+ goto out;
+ }
+
+ status = ufshcd_get_upmcrs(hba);
+ if (status != PWR_LOCAL) {
+ dev_err(hba->dev,
+ "pwr ctrl cmd 0x%0x failed, host umpcrs:0x%x\n",
+ cmd->command, status);
+ ret = (status != PWR_OK) ? status : -1;
+ }
+out:
+ spin_lock_irqsave(hba->host->host_lock, flags);
+ hba->uic_async_done = NULL;
+ spin_unlock_irqrestore(hba->host->host_lock, flags);
+ mutex_unlock(&hba->uic_cmd_mutex);
+
+ return ret;
+}
+
+/**
+ * ufshcd_uic_change_pwr_mode - Perform the UIC power mode chage
+ * using DME_SET primitives.
+ * @hba: per adapter instance
+ * @mode: powr mode value
+ *
+ * Returns 0 on success, non-zero value on failure
+ */
+static int ufshcd_uic_change_pwr_mode(struct ufs_hba *hba, u8 mode)
+{
+ struct uic_command uic_cmd = {0};
+ int ret;
+
+ uic_cmd.command = UIC_CMD_DME_SET;
+ uic_cmd.argument1 = UIC_ARG_MIB(PA_PWRMODE);
+ uic_cmd.argument3 = mode;
+ ufshcd_hold(hba, false);
+ ret = ufshcd_uic_pwr_ctrl(hba, &uic_cmd);
+ ufshcd_release(hba);
+
+ return ret;
+}
+
+static int ufshcd_uic_hibern8_enter(struct ufs_hba *hba)
+{
+ struct uic_command uic_cmd = {0};
+
+ uic_cmd.command = UIC_CMD_DME_HIBER_ENTER;
+
+ return ufshcd_uic_pwr_ctrl(hba, &uic_cmd);
+}
+
+static int ufshcd_uic_hibern8_exit(struct ufs_hba *hba)
+{
+ struct uic_command uic_cmd = {0};
+ int ret;
+
+ uic_cmd.command = UIC_CMD_DME_HIBER_EXIT;
+ ret = ufshcd_uic_pwr_ctrl(hba, &uic_cmd);
+ if (ret) {
+ ufshcd_set_link_off(hba);
+ ret = ufshcd_host_reset_and_restore(hba);
+ }
+
+ return ret;
+}
+
+ /**
+ * ufshcd_init_pwr_info - setting the POR (power on reset)
+ * values in hba power info
+ * @hba: per-adapter instance
+ */
+static void ufshcd_init_pwr_info(struct ufs_hba *hba)
+{
+ hba->pwr_info.gear_rx = UFS_PWM_G1;
+ hba->pwr_info.gear_tx = UFS_PWM_G1;
+ hba->pwr_info.lane_rx = 1;
+ hba->pwr_info.lane_tx = 1;
+ hba->pwr_info.pwr_rx = SLOWAUTO_MODE;
+ hba->pwr_info.pwr_tx = SLOWAUTO_MODE;
+ hba->pwr_info.hs_rate = 0;
+}
+
+/**
+ * ufshcd_get_max_pwr_mode - reads the max power mode negotiated with device
+ * @hba: per-adapter instance
+ */
+static int ufshcd_get_max_pwr_mode(struct ufs_hba *hba)
+{
+ struct ufs_pa_layer_attr *pwr_info = &hba->max_pwr_info.info;
+
+ if (hba->max_pwr_info.is_valid)
+ return 0;
+
+ pwr_info->pwr_tx = FASTAUTO_MODE;
+ pwr_info->pwr_rx = FASTAUTO_MODE;
+ pwr_info->hs_rate = PA_HS_MODE_B;
+
+ /* Get the connected lane count */
+ ufshcd_dme_get(hba, UIC_ARG_MIB(PA_CONNECTEDRXDATALANES),
+ &pwr_info->lane_rx);
+ ufshcd_dme_get(hba, UIC_ARG_MIB(PA_CONNECTEDTXDATALANES),
+ &pwr_info->lane_tx);
+
+ if (!pwr_info->lane_rx || !pwr_info->lane_tx) {
+ dev_err(hba->dev, "%s: invalid connected lanes value. rx=%d, tx=%d\n",
+ __func__,
+ pwr_info->lane_rx,
+ pwr_info->lane_tx);
+ return -EINVAL;
+ }
+
+ /*
+ * First, get the maximum gears of HS speed.
+ * If a zero value, it means there is no HSGEAR capability.
+ * Then, get the maximum gears of PWM speed.
+ */
+ ufshcd_dme_get(hba, UIC_ARG_MIB(PA_MAXRXHSGEAR), &pwr_info->gear_rx);
+ if (!pwr_info->gear_rx) {
+ ufshcd_dme_get(hba, UIC_ARG_MIB(PA_MAXRXPWMGEAR),
+ &pwr_info->gear_rx);
+ if (!pwr_info->gear_rx) {
+ dev_err(hba->dev, "%s: invalid max pwm rx gear read = %d\n",
+ __func__, pwr_info->gear_rx);
+ return -EINVAL;
+ }
+ pwr_info->pwr_rx = SLOWAUTO_MODE;
+ }
+
+ ufshcd_dme_peer_get(hba, UIC_ARG_MIB(PA_MAXRXHSGEAR),
+ &pwr_info->gear_tx);
+ if (!pwr_info->gear_tx) {
+ ufshcd_dme_peer_get(hba, UIC_ARG_MIB(PA_MAXRXPWMGEAR),
+ &pwr_info->gear_tx);
+ if (!pwr_info->gear_tx) {
+ dev_err(hba->dev, "%s: invalid max pwm tx gear read = %d\n",
+ __func__, pwr_info->gear_tx);
+ return -EINVAL;
+ }
+ pwr_info->pwr_tx = SLOWAUTO_MODE;
+ }
+
+ hba->max_pwr_info.is_valid = true;
+ return 0;
+}
+
+static int ufshcd_change_power_mode(struct ufs_hba *hba,
+ struct ufs_pa_layer_attr *pwr_mode)
+{
+ int ret;
+
+ /* if already configured to the requested pwr_mode */
+ if (pwr_mode->gear_rx == hba->pwr_info.gear_rx &&
+ pwr_mode->gear_tx == hba->pwr_info.gear_tx &&
+ pwr_mode->lane_rx == hba->pwr_info.lane_rx &&
+ pwr_mode->lane_tx == hba->pwr_info.lane_tx &&
+ pwr_mode->pwr_rx == hba->pwr_info.pwr_rx &&
+ pwr_mode->pwr_tx == hba->pwr_info.pwr_tx &&
+ pwr_mode->hs_rate == hba->pwr_info.hs_rate) {
+ dev_dbg(hba->dev, "%s: power already configured\n", __func__);
+ return 0;
+ }
+
+ /*
+ * Configure attributes for power mode change with below.
+ * - PA_RXGEAR, PA_ACTIVERXDATALANES, PA_RXTERMINATION,
+ * - PA_TXGEAR, PA_ACTIVETXDATALANES, PA_TXTERMINATION,
+ * - PA_HSSERIES
+ */
+ ufshcd_dme_set(hba, UIC_ARG_MIB(PA_RXGEAR), pwr_mode->gear_rx);
+ ufshcd_dme_set(hba, UIC_ARG_MIB(PA_ACTIVERXDATALANES),
+ pwr_mode->lane_rx);
+ if (pwr_mode->pwr_rx == FASTAUTO_MODE ||
+ pwr_mode->pwr_rx == FAST_MODE)
+ ufshcd_dme_set(hba, UIC_ARG_MIB(PA_RXTERMINATION), TRUE);
+ else
+ ufshcd_dme_set(hba, UIC_ARG_MIB(PA_RXTERMINATION), FALSE);
+
+ ufshcd_dme_set(hba, UIC_ARG_MIB(PA_TXGEAR), pwr_mode->gear_tx);
+ ufshcd_dme_set(hba, UIC_ARG_MIB(PA_ACTIVETXDATALANES),
+ pwr_mode->lane_tx);
+ if (pwr_mode->pwr_tx == FASTAUTO_MODE ||
+ pwr_mode->pwr_tx == FAST_MODE)
+ ufshcd_dme_set(hba, UIC_ARG_MIB(PA_TXTERMINATION), TRUE);
+ else
+ ufshcd_dme_set(hba, UIC_ARG_MIB(PA_TXTERMINATION), FALSE);
+
+ if (pwr_mode->pwr_rx == FASTAUTO_MODE ||
+ pwr_mode->pwr_tx == FASTAUTO_MODE ||
+ pwr_mode->pwr_rx == FAST_MODE ||
+ pwr_mode->pwr_tx == FAST_MODE)
+ ufshcd_dme_set(hba, UIC_ARG_MIB(PA_HSSERIES),
+ pwr_mode->hs_rate);
+
+ ret = ufshcd_uic_change_pwr_mode(hba, pwr_mode->pwr_rx << 4
+ | pwr_mode->pwr_tx);
+
+ if (ret) {
+ dev_err(hba->dev,
+ "%s: power mode change failed %d\n", __func__, ret);
+ } else {
+ if (hba->vops && hba->vops->pwr_change_notify)
+ hba->vops->pwr_change_notify(hba,
+ POST_CHANGE, NULL, pwr_mode);
+
+ memcpy(&hba->pwr_info, pwr_mode,
+ sizeof(struct ufs_pa_layer_attr));
+ }
+
+ return ret;
+}
+
+/**
+ * ufshcd_config_pwr_mode - configure a new power mode
+ * @hba: per-adapter instance
+ * @desired_pwr_mode: desired power configuration
+ */
+static int ufshcd_config_pwr_mode(struct ufs_hba *hba,
+ struct ufs_pa_layer_attr *desired_pwr_mode)
+{
+ struct ufs_pa_layer_attr final_params = { 0 };
+ int ret;
+
+ if (hba->vops && hba->vops->pwr_change_notify)
+ hba->vops->pwr_change_notify(hba,
+ PRE_CHANGE, desired_pwr_mode, &final_params);
+ else
+ memcpy(&final_params, desired_pwr_mode, sizeof(final_params));
+
+ ret = ufshcd_change_power_mode(hba, &final_params);
+
+ return ret;
+}
+
+/**
+ * ufshcd_complete_dev_init() - checks device readiness
+ * hba: per-adapter instance
+ *
+ * Set fDeviceInit flag and poll until device toggles it.
+ */
+static int ufshcd_complete_dev_init(struct ufs_hba *hba)
+{
+ int i, retries, err = 0;
+ bool flag_res = 1;
+
+ for (retries = QUERY_REQ_RETRIES; retries > 0; retries--) {
+ /* Set the fDeviceInit flag */
+ err = ufshcd_query_flag(hba, UPIU_QUERY_OPCODE_SET_FLAG,
+ QUERY_FLAG_IDN_FDEVICEINIT, NULL);
+ if (!err || err == -ETIMEDOUT)
+ break;
+ dev_dbg(hba->dev, "%s: error %d retrying\n", __func__, err);
+ }
+ if (err) {
+ dev_err(hba->dev,
+ "%s setting fDeviceInit flag failed with error %d\n",
+ __func__, err);
+ goto out;
+ }
+
+ /* poll for max. 100 iterations for fDeviceInit flag to clear */
+ for (i = 0; i < 100 && !err && flag_res; i++) {
+ for (retries = QUERY_REQ_RETRIES; retries > 0; retries--) {
+ err = ufshcd_query_flag(hba,
+ UPIU_QUERY_OPCODE_READ_FLAG,
+ QUERY_FLAG_IDN_FDEVICEINIT, &flag_res);
+ if (!err || err == -ETIMEDOUT)
+ break;
+ dev_dbg(hba->dev, "%s: error %d retrying\n", __func__,
+ err);
+ }
+ }
+ if (err)
+ dev_err(hba->dev,
+ "%s reading fDeviceInit flag failed with error %d\n",
+ __func__, err);
+ else if (flag_res)
+ dev_err(hba->dev,
+ "%s fDeviceInit was not cleared by the device\n",
+ __func__);
+
+out:
+ return err;
+}
+
+/**
+ * ufshcd_make_hba_operational - Make UFS controller operational
+ * @hba: per adapter instance
+ *
+ * To bring UFS host controller to operational state,
+ * 1. Enable required interrupts
+ * 2. Configure interrupt aggregation
+ * 3. Program UTRL and UTMRL base addres
+ * 4. Configure run-stop-registers
+ *
+ * Returns 0 on success, non-zero value on failure
+ */
+static int ufshcd_make_hba_operational(struct ufs_hba *hba)
+{
+ int err = 0;
+ u32 reg;
+
+ /* Enable required interrupts */
+ ufshcd_enable_intr(hba, UFSHCD_ENABLE_INTRS);
+
+ /* Configure interrupt aggregation */
+ ufshcd_config_intr_aggr(hba, hba->nutrs - 1, INT_AGGR_DEF_TO);
+
+ /* Configure UTRL and UTMRL base address registers */
+ ufshcd_writel(hba, lower_32_bits(hba->utrdl_dma_addr),
+ REG_UTP_TRANSFER_REQ_LIST_BASE_L);
+ ufshcd_writel(hba, upper_32_bits(hba->utrdl_dma_addr),
+ REG_UTP_TRANSFER_REQ_LIST_BASE_H);
+ ufshcd_writel(hba, lower_32_bits(hba->utmrdl_dma_addr),
+ REG_UTP_TASK_REQ_LIST_BASE_L);
+ ufshcd_writel(hba, upper_32_bits(hba->utmrdl_dma_addr),
+ REG_UTP_TASK_REQ_LIST_BASE_H);
+
+ /*
+ * UCRDY, UTMRLDY and UTRLRDY bits must be 1
+ * DEI, HEI bits must be 0
+ */
+ reg = ufshcd_readl(hba, REG_CONTROLLER_STATUS);
+ if (!(ufshcd_get_lists_status(reg))) {
+ ufshcd_enable_run_stop_reg(hba);
+ } else {
+ dev_err(hba->dev,
+ "Host controller not ready to process requests");
+ err = -EIO;
+ goto out;
+ }
+
+out:
+ return err;
+}
+
+/**
+ * ufshcd_hba_enable - initialize the controller
+ * @hba: per adapter instance
+ *
+ * The controller resets itself and controller firmware initialization
+ * sequence kicks off. When controller is ready it will set
+ * the Host Controller Enable bit to 1.
+ *
+ * Returns 0 on success, non-zero value on failure
+ */
+static int ufshcd_hba_enable(struct ufs_hba *hba)
+{
+ int retry;
+
+ /*
+ * msleep of 1 and 5 used in this function might result in msleep(20),
+ * but it was necessary to send the UFS FPGA to reset mode during
+ * development and testing of this driver. msleep can be changed to
+ * mdelay and retry count can be reduced based on the controller.
+ */
+ if (!ufshcd_is_hba_active(hba)) {
+
+ /* change controller state to "reset state" */
+ ufshcd_hba_stop(hba);
+
+ /*
+ * This delay is based on the testing done with UFS host
+ * controller FPGA. The delay can be changed based on the
+ * host controller used.
+ */
+ msleep(5);
+ }
+
+ /* UniPro link is disabled at this point */
+ ufshcd_set_link_off(hba);
+
+ if (hba->vops && hba->vops->hce_enable_notify)
+ hba->vops->hce_enable_notify(hba, PRE_CHANGE);
+
+ /* start controller initialization sequence */
+ ufshcd_hba_start(hba);
+
+ /*
+ * To initialize a UFS host controller HCE bit must be set to 1.
+ * During initialization the HCE bit value changes from 1->0->1.
+ * When the host controller completes initialization sequence
+ * it sets the value of HCE bit to 1. The same HCE bit is read back
+ * to check if the controller has completed initialization sequence.
+ * So without this delay the value HCE = 1, set in the previous
+ * instruction might be read back.
+ * This delay can be changed based on the controller.
+ */
+ msleep(1);
+
+ /* wait for the host controller to complete initialization */
+ retry = 10;
+ while (ufshcd_is_hba_active(hba)) {
+ if (retry) {
+ retry--;
+ } else {
+ dev_err(hba->dev,
+ "Controller enable failed\n");
+ return -EIO;
+ }
+ msleep(5);
+ }
+
+ /* enable UIC related interrupts */
+ ufshcd_enable_intr(hba, UFSHCD_UIC_MASK);
+
+ if (hba->vops && hba->vops->hce_enable_notify)
+ hba->vops->hce_enable_notify(hba, POST_CHANGE);
+
+ return 0;
+}
+
+/**
+ * ufshcd_link_startup - Initialize unipro link startup
+ * @hba: per adapter instance
+ *
+ * Returns 0 for success, non-zero in case of failure
+ */
+static int ufshcd_link_startup(struct ufs_hba *hba)
+{
+ int ret;
+ int retries = DME_LINKSTARTUP_RETRIES;
+
+ do {
+ if (hba->vops && hba->vops->link_startup_notify)
+ hba->vops->link_startup_notify(hba, PRE_CHANGE);
+
+ ret = ufshcd_dme_link_startup(hba);
+
+ /* check if device is detected by inter-connect layer */
+ if (!ret && !ufshcd_is_device_present(hba)) {
+ dev_err(hba->dev, "%s: Device not present\n", __func__);
+ ret = -ENXIO;
+ goto out;
+ }
+
+ /*
+ * DME link lost indication is only received when link is up,
+ * but we can't be sure if the link is up until link startup
+ * succeeds. So reset the local Uni-Pro and try again.
+ */
+ if (ret && ufshcd_hba_enable(hba))
+ goto out;
+ } while (ret && retries--);
+
+ if (ret)
+ /* failed to get the link up... retire */
+ goto out;
+
+ /* Include any host controller configuration via UIC commands */
+ if (hba->vops && hba->vops->link_startup_notify) {
+ ret = hba->vops->link_startup_notify(hba, POST_CHANGE);
+ if (ret)
+ goto out;
+ }
+
+ ret = ufshcd_make_hba_operational(hba);
+out:
+ if (ret)
+ dev_err(hba->dev, "link startup failed %d\n", ret);
+ return ret;
+}
+
+/**
+ * ufshcd_verify_dev_init() - Verify device initialization
+ * @hba: per-adapter instance
+ *
+ * Send NOP OUT UPIU and wait for NOP IN response to check whether the
+ * device Transport Protocol (UTP) layer is ready after a reset.
+ * If the UTP layer at the device side is not initialized, it may
+ * not respond with NOP IN UPIU within timeout of %NOP_OUT_TIMEOUT
+ * and we retry sending NOP OUT for %NOP_OUT_RETRIES iterations.
+ */
+static int ufshcd_verify_dev_init(struct ufs_hba *hba)
+{
+ int err = 0;
+ int retries;
+
+ ufshcd_hold(hba, false);
+ mutex_lock(&hba->dev_cmd.lock);
+ for (retries = NOP_OUT_RETRIES; retries > 0; retries--) {
+ err = ufshcd_exec_dev_cmd(hba, DEV_CMD_TYPE_NOP,
+ NOP_OUT_TIMEOUT);
+
+ if (!err || err == -ETIMEDOUT)
+ break;
+
+ dev_dbg(hba->dev, "%s: error %d retrying\n", __func__, err);
+ }
+ mutex_unlock(&hba->dev_cmd.lock);
+ ufshcd_release(hba);
+
+ if (err)
+ dev_err(hba->dev, "%s: NOP OUT failed %d\n", __func__, err);
+ return err;
+}
+
+/**
+ * ufshcd_set_queue_depth - set lun queue depth
+ * @sdev: pointer to SCSI device
+ *
+ * Read bLUQueueDepth value and activate scsi tagged command
+ * queueing. For WLUN, queue depth is set to 1. For best-effort
+ * cases (bLUQueueDepth = 0) the queue depth is set to a maximum
+ * value that host can queue.
+ */
+static void ufshcd_set_queue_depth(struct scsi_device *sdev)
+{
+ int ret = 0;
+ u8 lun_qdepth;
+ struct ufs_hba *hba;
+
+ hba = shost_priv(sdev->host);
+
+ lun_qdepth = hba->nutrs;
+ ret = ufshcd_read_unit_desc_param(hba,
+ ufshcd_scsi_to_upiu_lun(sdev->lun),
+ UNIT_DESC_PARAM_LU_Q_DEPTH,
+ &lun_qdepth,
+ sizeof(lun_qdepth));
+
+ /* Some WLUN doesn't support unit descriptor */
+ if (ret == -EOPNOTSUPP)
+ lun_qdepth = 1;
+ else if (!lun_qdepth)
+ /* eventually, we can figure out the real queue depth */
+ lun_qdepth = hba->nutrs;
+ else
+ lun_qdepth = min_t(int, lun_qdepth, hba->nutrs);
+
+ dev_dbg(hba->dev, "%s: activate tcq with queue depth %d\n",
+ __func__, lun_qdepth);
+ scsi_change_queue_depth(sdev, lun_qdepth);
+}
+
+/*
+ * ufshcd_get_lu_wp - returns the "b_lu_write_protect" from UNIT DESCRIPTOR
+ * @hba: per-adapter instance
+ * @lun: UFS device lun id
+ * @b_lu_write_protect: pointer to buffer to hold the LU's write protect info
+ *
+ * Returns 0 in case of success and b_lu_write_protect status would be returned
+ * @b_lu_write_protect parameter.
+ * Returns -ENOTSUPP if reading b_lu_write_protect is not supported.
+ * Returns -EINVAL in case of invalid parameters passed to this function.
+ */
+static int ufshcd_get_lu_wp(struct ufs_hba *hba,
+ u8 lun,
+ u8 *b_lu_write_protect)
+{
+ int ret;
+
+ if (!b_lu_write_protect)
+ ret = -EINVAL;
+ /*
+ * According to UFS device spec, RPMB LU can't be write
+ * protected so skip reading bLUWriteProtect parameter for
+ * it. For other W-LUs, UNIT DESCRIPTOR is not available.
+ */
+ else if (lun >= UFS_UPIU_MAX_GENERAL_LUN)
+ ret = -ENOTSUPP;
+ else
+ ret = ufshcd_read_unit_desc_param(hba,
+ lun,
+ UNIT_DESC_PARAM_LU_WR_PROTECT,
+ b_lu_write_protect,
+ sizeof(*b_lu_write_protect));
+ return ret;
+}
+
+/**
+ * ufshcd_get_lu_power_on_wp_status - get LU's power on write protect
+ * status
+ * @hba: per-adapter instance
+ * @sdev: pointer to SCSI device
+ *
+ */
+static inline void ufshcd_get_lu_power_on_wp_status(struct ufs_hba *hba,
+ struct scsi_device *sdev)
+{
+ if (hba->dev_info.f_power_on_wp_en &&
+ !hba->dev_info.is_lu_power_on_wp) {
+ u8 b_lu_write_protect;
+
+ if (!ufshcd_get_lu_wp(hba, ufshcd_scsi_to_upiu_lun(sdev->lun),
+ &b_lu_write_protect) &&
+ (b_lu_write_protect == UFS_LU_POWER_ON_WP))
+ hba->dev_info.is_lu_power_on_wp = true;
+ }
+}
+
+/**
+ * ufshcd_slave_alloc - handle initial SCSI device configurations
+ * @sdev: pointer to SCSI device
+ *
+ * Returns success
+ */
+static int ufshcd_slave_alloc(struct scsi_device *sdev)
+{
+ struct ufs_hba *hba;
+
+ hba = shost_priv(sdev->host);
+
+ /* Mode sense(6) is not supported by UFS, so use Mode sense(10) */
+ sdev->use_10_for_ms = 1;
+
+ /* allow SCSI layer to restart the device in case of errors */
+ sdev->allow_restart = 1;
+
+ /* REPORT SUPPORTED OPERATION CODES is not supported */
+ sdev->no_report_opcodes = 1;
+
+
+ ufshcd_set_queue_depth(sdev);
+
+ ufshcd_get_lu_power_on_wp_status(hba, sdev);
+
+ return 0;
+}
+
+/**
+ * ufshcd_change_queue_depth - change queue depth
+ * @sdev: pointer to SCSI device
+ * @depth: required depth to set
+ *
+ * Change queue depth and make sure the max. limits are not crossed.
+ */
+static int ufshcd_change_queue_depth(struct scsi_device *sdev, int depth)
+{
+ struct ufs_hba *hba = shost_priv(sdev->host);
+
+ if (depth > hba->nutrs)
+ depth = hba->nutrs;
+ return scsi_change_queue_depth(sdev, depth);
+}
+
+/**
+ * ufshcd_slave_configure - adjust SCSI device configurations
+ * @sdev: pointer to SCSI device
+ */
+static int ufshcd_slave_configure(struct scsi_device *sdev)
+{
+ struct request_queue *q = sdev->request_queue;
+
+ blk_queue_update_dma_pad(q, PRDT_DATA_BYTE_COUNT_PAD - 1);
+ blk_queue_max_segment_size(q, PRDT_DATA_BYTE_COUNT_MAX);
+
+ return 0;
+}
+
+/**
+ * ufshcd_slave_destroy - remove SCSI device configurations
+ * @sdev: pointer to SCSI device
+ */
+static void ufshcd_slave_destroy(struct scsi_device *sdev)
+{
+ struct ufs_hba *hba;
+
+ hba = shost_priv(sdev->host);
+ /* Drop the reference as it won't be needed anymore */
+ if (ufshcd_scsi_to_upiu_lun(sdev->lun) == UFS_UPIU_UFS_DEVICE_WLUN) {
+ unsigned long flags;
+
+ spin_lock_irqsave(hba->host->host_lock, flags);
+ hba->sdev_ufs_device = NULL;
+ spin_unlock_irqrestore(hba->host->host_lock, flags);
+ }
+}
+
+/**
+ * ufshcd_task_req_compl - handle task management request completion
+ * @hba: per adapter instance
+ * @index: index of the completed request
+ * @resp: task management service response
+ *
+ * Returns non-zero value on error, zero on success
+ */
+static int ufshcd_task_req_compl(struct ufs_hba *hba, u32 index, u8 *resp)
+{
+ struct utp_task_req_desc *task_req_descp;
+ struct utp_upiu_task_rsp *task_rsp_upiup;
+ unsigned long flags;
+ int ocs_value;
+ int task_result;
+
+ spin_lock_irqsave(hba->host->host_lock, flags);
+
+ /* Clear completed tasks from outstanding_tasks */
+ __clear_bit(index, &hba->outstanding_tasks);
+
+ task_req_descp = hba->utmrdl_base_addr;
+ ocs_value = ufshcd_get_tmr_ocs(&task_req_descp[index]);
+
+ if (ocs_value == OCS_SUCCESS) {
+ task_rsp_upiup = (struct utp_upiu_task_rsp *)
+ task_req_descp[index].task_rsp_upiu;
+ task_result = be32_to_cpu(task_rsp_upiup->header.dword_1);
+ task_result = ((task_result & MASK_TASK_RESPONSE) >> 8);
+ if (resp)
+ *resp = (u8)task_result;
+ } else {
+ dev_err(hba->dev, "%s: failed, ocs = 0x%x\n",
+ __func__, ocs_value);
+ }
+ spin_unlock_irqrestore(hba->host->host_lock, flags);
+
+ return ocs_value;
+}
+
+/**
+ * ufshcd_scsi_cmd_status - Update SCSI command result based on SCSI status
+ * @lrb: pointer to local reference block of completed command
+ * @scsi_status: SCSI command status
+ *
+ * Returns value base on SCSI command status
+ */
+static inline int
+ufshcd_scsi_cmd_status(struct ufshcd_lrb *lrbp, int scsi_status)
+{
+ int result = 0;
+
+ switch (scsi_status) {
+ case SAM_STAT_CHECK_CONDITION:
+ ufshcd_copy_sense_data(lrbp);
+ case SAM_STAT_GOOD:
+ result |= DID_OK << 16 |
+ COMMAND_COMPLETE << 8 |
+ scsi_status;
+ break;
+ case SAM_STAT_TASK_SET_FULL:
+ case SAM_STAT_BUSY:
+ case SAM_STAT_TASK_ABORTED:
+ ufshcd_copy_sense_data(lrbp);
+ result |= scsi_status;
+ break;
+ default:
+ result |= DID_ERROR << 16;
+ break;
+ } /* end of switch */
+
+ return result;
+}
+
+/**
+ * ufshcd_transfer_rsp_status - Get overall status of the response
+ * @hba: per adapter instance
+ * @lrb: pointer to local reference block of completed command
+ *
+ * Returns result of the command to notify SCSI midlayer
+ */
+static inline int
+ufshcd_transfer_rsp_status(struct ufs_hba *hba, struct ufshcd_lrb *lrbp)
+{
+ int result = 0;
+ int scsi_status;
+ int ocs;
+
+ /* overall command status of utrd */
+ ocs = ufshcd_get_tr_ocs(lrbp);
+
+ switch (ocs) {
+ case OCS_SUCCESS:
+ result = ufshcd_get_req_rsp(lrbp->ucd_rsp_ptr);
+
+ switch (result) {
+ case UPIU_TRANSACTION_RESPONSE:
+ /*
+ * get the response UPIU result to extract
+ * the SCSI command status
+ */
+ result = ufshcd_get_rsp_upiu_result(lrbp->ucd_rsp_ptr);
+
+ /*
+ * get the result based on SCSI status response
+ * to notify the SCSI midlayer of the command status
+ */
+ scsi_status = result & MASK_SCSI_STATUS;
+ result = ufshcd_scsi_cmd_status(lrbp, scsi_status);
+
+ if (ufshcd_is_exception_event(lrbp->ucd_rsp_ptr))
+ schedule_work(&hba->eeh_work);
+ break;
+ case UPIU_TRANSACTION_REJECT_UPIU:
+ /* TODO: handle Reject UPIU Response */
+ result = DID_ERROR << 16;
+ dev_err(hba->dev,
+ "Reject UPIU not fully implemented\n");
+ break;
+ default:
+ result = DID_ERROR << 16;
+ dev_err(hba->dev,
+ "Unexpected request response code = %x\n",
+ result);
+ break;
+ }
+ break;
+ case OCS_ABORTED:
+ result |= DID_ABORT << 16;
+ break;
+ case OCS_INVALID_COMMAND_STATUS:
+ result |= DID_REQUEUE << 16;
+ break;
+ case OCS_INVALID_CMD_TABLE_ATTR:
+ case OCS_INVALID_PRDT_ATTR:
+ case OCS_MISMATCH_DATA_BUF_SIZE:
+ case OCS_MISMATCH_RESP_UPIU_SIZE:
+ case OCS_PEER_COMM_FAILURE:
+ case OCS_FATAL_ERROR:
+ default:
+ result |= DID_ERROR << 16;
+ dev_err(hba->dev,
+ "OCS error from controller = %x\n", ocs);
+ break;
+ } /* end of switch */
+
+ return result;
+}
+
+/**
+ * ufshcd_uic_cmd_compl - handle completion of uic command
+ * @hba: per adapter instance
+ * @intr_status: interrupt status generated by the controller
+ */
+static void ufshcd_uic_cmd_compl(struct ufs_hba *hba, u32 intr_status)
+{
+ if ((intr_status & UIC_COMMAND_COMPL) && hba->active_uic_cmd) {
+ hba->active_uic_cmd->argument2 |=
+ ufshcd_get_uic_cmd_result(hba);
+ hba->active_uic_cmd->argument3 =
+ ufshcd_get_dme_attr_val(hba);
+ complete(&hba->active_uic_cmd->done);
+ }
+
+ if ((intr_status & UFSHCD_UIC_PWR_MASK) && hba->uic_async_done)
+ complete(hba->uic_async_done);
+}
+
+/**
+ * ufshcd_transfer_req_compl - handle SCSI and query command completion
+ * @hba: per adapter instance
+ */
+static void ufshcd_transfer_req_compl(struct ufs_hba *hba)
+{
+ struct ufshcd_lrb *lrbp;
+ struct scsi_cmnd *cmd;
+ unsigned long completed_reqs;
+ u32 tr_doorbell;
+ int result;
+ int index;
+
+ /* Resetting interrupt aggregation counters first and reading the
+ * DOOR_BELL afterward allows us to handle all the completed requests.
+ * In order to prevent other interrupts starvation the DB is read once
+ * after reset. The down side of this solution is the possibility of
+ * false interrupt if device completes another request after resetting
+ * aggregation and before reading the DB.
+ */
+ ufshcd_reset_intr_aggr(hba);
+
+ tr_doorbell = ufshcd_readl(hba, REG_UTP_TRANSFER_REQ_DOOR_BELL);
+ completed_reqs = tr_doorbell ^ hba->outstanding_reqs;
+
+ for_each_set_bit(index, &completed_reqs, hba->nutrs) {
+ lrbp = &hba->lrb[index];
+ cmd = lrbp->cmd;
+ if (cmd) {
+ result = ufshcd_transfer_rsp_status(hba, lrbp);
+ scsi_dma_unmap(cmd);
+ cmd->result = result;
+ /* Mark completed command as NULL in LRB */
+ lrbp->cmd = NULL;
+ clear_bit_unlock(index, &hba->lrb_in_use);
+ /* Do not touch lrbp after scsi done */
+ cmd->scsi_done(cmd);
+ __ufshcd_release(hba);
+ } else if (lrbp->command_type == UTP_CMD_TYPE_DEV_MANAGE) {
+ if (hba->dev_cmd.complete)
+ complete(hba->dev_cmd.complete);
+ }
+ }
+
+ /* clear corresponding bits of completed commands */
+ hba->outstanding_reqs ^= completed_reqs;
+
+ ufshcd_clk_scaling_update_busy(hba);
+
+ /* we might have free'd some tags above */
+ wake_up(&hba->dev_cmd.tag_wq);
+}
+
+/**
+ * ufshcd_disable_ee - disable exception event
+ * @hba: per-adapter instance
+ * @mask: exception event to disable
+ *
+ * Disables exception event in the device so that the EVENT_ALERT
+ * bit is not set.
+ *
+ * Returns zero on success, non-zero error value on failure.
+ */
+static int ufshcd_disable_ee(struct ufs_hba *hba, u16 mask)
+{
+ int err = 0;
+ u32 val;
+
+ if (!(hba->ee_ctrl_mask & mask))
+ goto out;
+
+ val = hba->ee_ctrl_mask & ~mask;
+ val &= 0xFFFF; /* 2 bytes */
+ err = ufshcd_query_attr(hba, UPIU_QUERY_OPCODE_WRITE_ATTR,
+ QUERY_ATTR_IDN_EE_CONTROL, 0, 0, &val);
+ if (!err)
+ hba->ee_ctrl_mask &= ~mask;
+out:
+ return err;
+}
+
+/**
+ * ufshcd_enable_ee - enable exception event
+ * @hba: per-adapter instance
+ * @mask: exception event to enable
+ *
+ * Enable corresponding exception event in the device to allow
+ * device to alert host in critical scenarios.
+ *
+ * Returns zero on success, non-zero error value on failure.
+ */
+static int ufshcd_enable_ee(struct ufs_hba *hba, u16 mask)
+{
+ int err = 0;
+ u32 val;
+
+ if (hba->ee_ctrl_mask & mask)
+ goto out;
+
+ val = hba->ee_ctrl_mask | mask;
+ val &= 0xFFFF; /* 2 bytes */
+ err = ufshcd_query_attr(hba, UPIU_QUERY_OPCODE_WRITE_ATTR,
+ QUERY_ATTR_IDN_EE_CONTROL, 0, 0, &val);
+ if (!err)
+ hba->ee_ctrl_mask |= mask;
+out:
+ return err;
+}
+
+/**
+ * ufshcd_enable_auto_bkops - Allow device managed BKOPS
+ * @hba: per-adapter instance
+ *
+ * Allow device to manage background operations on its own. Enabling
+ * this might lead to inconsistent latencies during normal data transfers
+ * as the device is allowed to manage its own way of handling background
+ * operations.
+ *
+ * Returns zero on success, non-zero on failure.
+ */
+static int ufshcd_enable_auto_bkops(struct ufs_hba *hba)
+{
+ int err = 0;
+
+ if (hba->auto_bkops_enabled)
+ goto out;
+
+ err = ufshcd_query_flag(hba, UPIU_QUERY_OPCODE_SET_FLAG,
+ QUERY_FLAG_IDN_BKOPS_EN, NULL);
+ if (err) {
+ dev_err(hba->dev, "%s: failed to enable bkops %d\n",
+ __func__, err);
+ goto out;
+ }
+
+ hba->auto_bkops_enabled = true;
+
+ /* No need of URGENT_BKOPS exception from the device */
+ err = ufshcd_disable_ee(hba, MASK_EE_URGENT_BKOPS);
+ if (err)
+ dev_err(hba->dev, "%s: failed to disable exception event %d\n",
+ __func__, err);
+out:
+ return err;
+}
+
+/**
+ * ufshcd_disable_auto_bkops - block device in doing background operations
+ * @hba: per-adapter instance
+ *
+ * Disabling background operations improves command response latency but
+ * has drawback of device moving into critical state where the device is
+ * not-operable. Make sure to call ufshcd_enable_auto_bkops() whenever the
+ * host is idle so that BKOPS are managed effectively without any negative
+ * impacts.
+ *
+ * Returns zero on success, non-zero on failure.
+ */
+static int ufshcd_disable_auto_bkops(struct ufs_hba *hba)
+{
+ int err = 0;
+
+ if (!hba->auto_bkops_enabled)
+ goto out;
+
+ /*
+ * If host assisted BKOPs is to be enabled, make sure
+ * urgent bkops exception is allowed.
+ */
+ err = ufshcd_enable_ee(hba, MASK_EE_URGENT_BKOPS);
+ if (err) {
+ dev_err(hba->dev, "%s: failed to enable exception event %d\n",
+ __func__, err);
+ goto out;
+ }
+
+ err = ufshcd_query_flag(hba, UPIU_QUERY_OPCODE_CLEAR_FLAG,
+ QUERY_FLAG_IDN_BKOPS_EN, NULL);
+ if (err) {
+ dev_err(hba->dev, "%s: failed to disable bkops %d\n",
+ __func__, err);
+ ufshcd_disable_ee(hba, MASK_EE_URGENT_BKOPS);
+ goto out;
+ }
+
+ hba->auto_bkops_enabled = false;
+out:
+ return err;
+}
+
+/**
+ * ufshcd_force_reset_auto_bkops - force enable of auto bkops
+ * @hba: per adapter instance
+ *
+ * After a device reset the device may toggle the BKOPS_EN flag
+ * to default value. The s/w tracking variables should be updated
+ * as well. Do this by forcing enable of auto bkops.
+ */
+static void ufshcd_force_reset_auto_bkops(struct ufs_hba *hba)
+{
+ hba->auto_bkops_enabled = false;
+ hba->ee_ctrl_mask |= MASK_EE_URGENT_BKOPS;
+ ufshcd_enable_auto_bkops(hba);
+}
+
+static inline int ufshcd_get_bkops_status(struct ufs_hba *hba, u32 *status)
+{
+ return ufshcd_query_attr(hba, UPIU_QUERY_OPCODE_READ_ATTR,
+ QUERY_ATTR_IDN_BKOPS_STATUS, 0, 0, status);
+}
+
+/**
+ * ufshcd_bkops_ctrl - control the auto bkops based on current bkops status
+ * @hba: per-adapter instance
+ * @status: bkops_status value
+ *
+ * Read the bkops_status from the UFS device and Enable fBackgroundOpsEn
+ * flag in the device to permit background operations if the device
+ * bkops_status is greater than or equal to "status" argument passed to
+ * this function, disable otherwise.
+ *
+ * Returns 0 for success, non-zero in case of failure.
+ *
+ * NOTE: Caller of this function can check the "hba->auto_bkops_enabled" flag
+ * to know whether auto bkops is enabled or disabled after this function
+ * returns control to it.
+ */
+static int ufshcd_bkops_ctrl(struct ufs_hba *hba,
+ enum bkops_status status)
+{
+ int err;
+ u32 curr_status = 0;
+
+ err = ufshcd_get_bkops_status(hba, &curr_status);
+ if (err) {
+ dev_err(hba->dev, "%s: failed to get BKOPS status %d\n",
+ __func__, err);
+ goto out;
+ } else if (curr_status > BKOPS_STATUS_MAX) {
+ dev_err(hba->dev, "%s: invalid BKOPS status %d\n",
+ __func__, curr_status);
+ err = -EINVAL;
+ goto out;
+ }
+
+ if (curr_status >= status)
+ err = ufshcd_enable_auto_bkops(hba);
+ else
+ err = ufshcd_disable_auto_bkops(hba);
+out:
+ return err;
+}
+
+/**
+ * ufshcd_urgent_bkops - handle urgent bkops exception event
+ * @hba: per-adapter instance
+ *
+ * Enable fBackgroundOpsEn flag in the device to permit background
+ * operations.
+ *
+ * If BKOPs is enabled, this function returns 0, 1 if the bkops in not enabled
+ * and negative error value for any other failure.
+ */
+static int ufshcd_urgent_bkops(struct ufs_hba *hba)
+{
+ return ufshcd_bkops_ctrl(hba, BKOPS_STATUS_PERF_IMPACT);
+}
+
+static inline int ufshcd_get_ee_status(struct ufs_hba *hba, u32 *status)
+{
+ return ufshcd_query_attr(hba, UPIU_QUERY_OPCODE_READ_ATTR,
+ QUERY_ATTR_IDN_EE_STATUS, 0, 0, status);
+}
+
+/**
+ * ufshcd_exception_event_handler - handle exceptions raised by device
+ * @work: pointer to work data
+ *
+ * Read bExceptionEventStatus attribute from the device and handle the
+ * exception event accordingly.
+ */
+static void ufshcd_exception_event_handler(struct work_struct *work)
+{
+ struct ufs_hba *hba;
+ int err;
+ u32 status = 0;
+ hba = container_of(work, struct ufs_hba, eeh_work);
+
+ pm_runtime_get_sync(hba->dev);
+ err = ufshcd_get_ee_status(hba, &status);
+ if (err) {
+ dev_err(hba->dev, "%s: failed to get exception status %d\n",
+ __func__, err);
+ goto out;
+ }
+
+ status &= hba->ee_ctrl_mask;
+ if (status & MASK_EE_URGENT_BKOPS) {
+ err = ufshcd_urgent_bkops(hba);
+ if (err < 0)
+ dev_err(hba->dev, "%s: failed to handle urgent bkops %d\n",
+ __func__, err);
+ }
+out:
+ pm_runtime_put_sync(hba->dev);
+ return;
+}
+
+/**
+ * ufshcd_err_handler - handle UFS errors that require s/w attention
+ * @work: pointer to work structure
+ */
+static void ufshcd_err_handler(struct work_struct *work)
+{
+ struct ufs_hba *hba;
+ unsigned long flags;
+ u32 err_xfer = 0;
+ u32 err_tm = 0;
+ int err = 0;
+ int tag;
+
+ hba = container_of(work, struct ufs_hba, eh_work);
+
+ pm_runtime_get_sync(hba->dev);
+ ufshcd_hold(hba, false);
+
+ spin_lock_irqsave(hba->host->host_lock, flags);
+ if (hba->ufshcd_state == UFSHCD_STATE_RESET) {
+ spin_unlock_irqrestore(hba->host->host_lock, flags);
+ goto out;
+ }
+
+ hba->ufshcd_state = UFSHCD_STATE_RESET;
+ ufshcd_set_eh_in_progress(hba);
+
+ /* Complete requests that have door-bell cleared by h/w */
+ ufshcd_transfer_req_compl(hba);
+ ufshcd_tmc_handler(hba);
+ spin_unlock_irqrestore(hba->host->host_lock, flags);
+
+ /* Clear pending transfer requests */
+ for_each_set_bit(tag, &hba->outstanding_reqs, hba->nutrs)
+ if (ufshcd_clear_cmd(hba, tag))
+ err_xfer |= 1 << tag;
+
+ /* Clear pending task management requests */
+ for_each_set_bit(tag, &hba->outstanding_tasks, hba->nutmrs)
+ if (ufshcd_clear_tm_cmd(hba, tag))
+ err_tm |= 1 << tag;
+
+ /* Complete the requests that are cleared by s/w */
+ spin_lock_irqsave(hba->host->host_lock, flags);
+ ufshcd_transfer_req_compl(hba);
+ ufshcd_tmc_handler(hba);
+ spin_unlock_irqrestore(hba->host->host_lock, flags);
+
+ /* Fatal errors need reset */
+ if (err_xfer || err_tm || (hba->saved_err & INT_FATAL_ERRORS) ||
+ ((hba->saved_err & UIC_ERROR) &&
+ (hba->saved_uic_err & UFSHCD_UIC_DL_PA_INIT_ERROR))) {
+ err = ufshcd_reset_and_restore(hba);
+ if (err) {
+ dev_err(hba->dev, "%s: reset and restore failed\n",
+ __func__);
+ hba->ufshcd_state = UFSHCD_STATE_ERROR;
+ }
+ /*
+ * Inform scsi mid-layer that we did reset and allow to handle
+ * Unit Attention properly.
+ */
+ scsi_report_bus_reset(hba->host, 0);
+ hba->saved_err = 0;
+ hba->saved_uic_err = 0;
+ }
+ ufshcd_clear_eh_in_progress(hba);
+
+out:
+ scsi_unblock_requests(hba->host);
+ ufshcd_release(hba);
+ pm_runtime_put_sync(hba->dev);
+}
+
+/**
+ * ufshcd_update_uic_error - check and set fatal UIC error flags.
+ * @hba: per-adapter instance
+ */
+static void ufshcd_update_uic_error(struct ufs_hba *hba)
+{
+ u32 reg;
+
+ /* PA_INIT_ERROR is fatal and needs UIC reset */
+ reg = ufshcd_readl(hba, REG_UIC_ERROR_CODE_DATA_LINK_LAYER);
+ if (reg & UIC_DATA_LINK_LAYER_ERROR_PA_INIT)
+ hba->uic_error |= UFSHCD_UIC_DL_PA_INIT_ERROR;
+
+ /* UIC NL/TL/DME errors needs software retry */
+ reg = ufshcd_readl(hba, REG_UIC_ERROR_CODE_NETWORK_LAYER);
+ if (reg)
+ hba->uic_error |= UFSHCD_UIC_NL_ERROR;
+
+ reg = ufshcd_readl(hba, REG_UIC_ERROR_CODE_TRANSPORT_LAYER);
+ if (reg)
+ hba->uic_error |= UFSHCD_UIC_TL_ERROR;
+
+ reg = ufshcd_readl(hba, REG_UIC_ERROR_CODE_DME);
+ if (reg)
+ hba->uic_error |= UFSHCD_UIC_DME_ERROR;
+
+ dev_dbg(hba->dev, "%s: UIC error flags = 0x%08x\n",
+ __func__, hba->uic_error);
+}
+
+/**
+ * ufshcd_check_errors - Check for errors that need s/w attention
+ * @hba: per-adapter instance
+ */
+static void ufshcd_check_errors(struct ufs_hba *hba)
+{
+ bool queue_eh_work = false;
+
+ if (hba->errors & INT_FATAL_ERRORS)
+ queue_eh_work = true;
+
+ if (hba->errors & UIC_ERROR) {
+ hba->uic_error = 0;
+ ufshcd_update_uic_error(hba);
+ if (hba->uic_error)
+ queue_eh_work = true;
+ }
+
+ if (queue_eh_work) {
+ /* handle fatal errors only when link is functional */
+ if (hba->ufshcd_state == UFSHCD_STATE_OPERATIONAL) {
+ /* block commands from scsi mid-layer */
+ scsi_block_requests(hba->host);
+
+ /* transfer error masks to sticky bits */
+ hba->saved_err |= hba->errors;
+ hba->saved_uic_err |= hba->uic_error;
+
+ hba->ufshcd_state = UFSHCD_STATE_ERROR;
+ schedule_work(&hba->eh_work);
+ }
+ }
+ /*
+ * if (!queue_eh_work) -
+ * Other errors are either non-fatal where host recovers
+ * itself without s/w intervention or errors that will be
+ * handled by the SCSI core layer.
+ */
+}
+
+/**
+ * ufshcd_tmc_handler - handle task management function completion
+ * @hba: per adapter instance
+ */
+static void ufshcd_tmc_handler(struct ufs_hba *hba)
+{
+ u32 tm_doorbell;
+
+ tm_doorbell = ufshcd_readl(hba, REG_UTP_TASK_REQ_DOOR_BELL);
+ hba->tm_condition = tm_doorbell ^ hba->outstanding_tasks;
+ wake_up(&hba->tm_wq);
+}
+
+/**
+ * ufshcd_sl_intr - Interrupt service routine
+ * @hba: per adapter instance
+ * @intr_status: contains interrupts generated by the controller
+ */
+static void ufshcd_sl_intr(struct ufs_hba *hba, u32 intr_status)
+{
+ hba->errors = UFSHCD_ERROR_MASK & intr_status;
+ if (hba->errors)
+ ufshcd_check_errors(hba);
+
+ if (intr_status & UFSHCD_UIC_MASK)
+ ufshcd_uic_cmd_compl(hba, intr_status);
+
+ if (intr_status & UTP_TASK_REQ_COMPL)
+ ufshcd_tmc_handler(hba);
+
+ if (intr_status & UTP_TRANSFER_REQ_COMPL)
+ ufshcd_transfer_req_compl(hba);
+}
+
+/**
+ * ufshcd_intr - Main interrupt service routine
+ * @irq: irq number
+ * @__hba: pointer to adapter instance
+ *
+ * Returns IRQ_HANDLED - If interrupt is valid
+ * IRQ_NONE - If invalid interrupt
+ */
+static irqreturn_t ufshcd_intr(int irq, void *__hba)
+{
+ u32 intr_status;
+ irqreturn_t retval = IRQ_NONE;
+ struct ufs_hba *hba = __hba;
+
+ spin_lock(hba->host->host_lock);
+ intr_status = ufshcd_readl(hba, REG_INTERRUPT_STATUS);
+
+ if (intr_status) {
+ ufshcd_writel(hba, intr_status, REG_INTERRUPT_STATUS);
+ ufshcd_sl_intr(hba, intr_status);
+ retval = IRQ_HANDLED;
+ }
+ spin_unlock(hba->host->host_lock);
+ return retval;
+}
+
+static int ufshcd_clear_tm_cmd(struct ufs_hba *hba, int tag)
+{
+ int err = 0;
+ u32 mask = 1 << tag;
+ unsigned long flags;
+
+ if (!test_bit(tag, &hba->outstanding_tasks))
+ goto out;
+
+ spin_lock_irqsave(hba->host->host_lock, flags);
+ ufshcd_writel(hba, ~(1 << tag), REG_UTP_TASK_REQ_LIST_CLEAR);
+ spin_unlock_irqrestore(hba->host->host_lock, flags);
+
+ /* poll for max. 1 sec to clear door bell register by h/w */
+ err = ufshcd_wait_for_register(hba,
+ REG_UTP_TASK_REQ_DOOR_BELL,
+ mask, 0, 1000, 1000);
+out:
+ return err;
+}
+
+/**
+ * ufshcd_issue_tm_cmd - issues task management commands to controller
+ * @hba: per adapter instance
+ * @lun_id: LUN ID to which TM command is sent
+ * @task_id: task ID to which the TM command is applicable
+ * @tm_function: task management function opcode
+ * @tm_response: task management service response return value
+ *
+ * Returns non-zero value on error, zero on success.
+ */
+static int ufshcd_issue_tm_cmd(struct ufs_hba *hba, int lun_id, int task_id,
+ u8 tm_function, u8 *tm_response)
+{
+ struct utp_task_req_desc *task_req_descp;
+ struct utp_upiu_task_req *task_req_upiup;
+ struct Scsi_Host *host;
+ unsigned long flags;
+ int free_slot;
+ int err;
+ int task_tag;
+
+ host = hba->host;
+
+ /*
+ * Get free slot, sleep if slots are unavailable.
+ * Even though we use wait_event() which sleeps indefinitely,
+ * the maximum wait time is bounded by %TM_CMD_TIMEOUT.
+ */
+ wait_event(hba->tm_tag_wq, ufshcd_get_tm_free_slot(hba, &free_slot));
+ ufshcd_hold(hba, false);
+
+ spin_lock_irqsave(host->host_lock, flags);
+ task_req_descp = hba->utmrdl_base_addr;
+ task_req_descp += free_slot;
+
+ /* Configure task request descriptor */
+ task_req_descp->header.dword_0 = cpu_to_le32(UTP_REQ_DESC_INT_CMD);
+ task_req_descp->header.dword_2 =
+ cpu_to_le32(OCS_INVALID_COMMAND_STATUS);
+
+ /* Configure task request UPIU */
+ task_req_upiup =
+ (struct utp_upiu_task_req *) task_req_descp->task_req_upiu;
+ task_tag = hba->nutrs + free_slot;
+ task_req_upiup->header.dword_0 =
+ UPIU_HEADER_DWORD(UPIU_TRANSACTION_TASK_REQ, 0,
+ lun_id, task_tag);
+ task_req_upiup->header.dword_1 =
+ UPIU_HEADER_DWORD(0, tm_function, 0, 0);
+ /*
+ * The host shall provide the same value for LUN field in the basic
+ * header and for Input Parameter.
+ */
+ task_req_upiup->input_param1 = cpu_to_be32(lun_id);
+ task_req_upiup->input_param2 = cpu_to_be32(task_id);
+
+ /* send command to the controller */
+ __set_bit(free_slot, &hba->outstanding_tasks);
+ ufshcd_writel(hba, 1 << free_slot, REG_UTP_TASK_REQ_DOOR_BELL);
+
+ spin_unlock_irqrestore(host->host_lock, flags);
+
+ /* wait until the task management command is completed */
+ err = wait_event_timeout(hba->tm_wq,
+ test_bit(free_slot, &hba->tm_condition),
+ msecs_to_jiffies(TM_CMD_TIMEOUT));
+ if (!err) {
+ dev_err(hba->dev, "%s: task management cmd 0x%.2x timed-out\n",
+ __func__, tm_function);
+ if (ufshcd_clear_tm_cmd(hba, free_slot))
+ dev_WARN(hba->dev, "%s: unable clear tm cmd (slot %d) after timeout\n",
+ __func__, free_slot);
+ err = -ETIMEDOUT;
+ } else {
+ err = ufshcd_task_req_compl(hba, free_slot, tm_response);
+ }
+
+ clear_bit(free_slot, &hba->tm_condition);
+ ufshcd_put_tm_slot(hba, free_slot);
+ wake_up(&hba->tm_tag_wq);
+
+ ufshcd_release(hba);
+ return err;
+}
+
+/**
+ * ufshcd_eh_device_reset_handler - device reset handler registered to
+ * scsi layer.
+ * @cmd: SCSI command pointer
+ *
+ * Returns SUCCESS/FAILED
+ */
+static int ufshcd_eh_device_reset_handler(struct scsi_cmnd *cmd)
+{
+ struct Scsi_Host *host;
+ struct ufs_hba *hba;
+ unsigned int tag;
+ u32 pos;
+ int err;
+ u8 resp = 0xF;
+ struct ufshcd_lrb *lrbp;
+ unsigned long flags;
+
+ host = cmd->device->host;
+ hba = shost_priv(host);
+ tag = cmd->request->tag;
+
+ lrbp = &hba->lrb[tag];
+ err = ufshcd_issue_tm_cmd(hba, lrbp->lun, 0, UFS_LOGICAL_RESET, &resp);
+ if (err || resp != UPIU_TASK_MANAGEMENT_FUNC_COMPL) {
+ if (!err)
+ err = resp;
+ goto out;
+ }
+
+ /* clear the commands that were pending for corresponding LUN */
+ for_each_set_bit(pos, &hba->outstanding_reqs, hba->nutrs) {
+ if (hba->lrb[pos].lun == lrbp->lun) {
+ err = ufshcd_clear_cmd(hba, pos);
+ if (err)
+ break;
+ }
+ }
+ spin_lock_irqsave(host->host_lock, flags);
+ ufshcd_transfer_req_compl(hba);
+ spin_unlock_irqrestore(host->host_lock, flags);
+out:
+ if (!err) {
+ err = SUCCESS;
+ } else {
+ dev_err(hba->dev, "%s: failed with err %d\n", __func__, err);
+ err = FAILED;
+ }
+ return err;
+}
+
+/**
+ * ufshcd_abort - abort a specific command
+ * @cmd: SCSI command pointer
+ *
+ * Abort the pending command in device by sending UFS_ABORT_TASK task management
+ * command, and in host controller by clearing the door-bell register. There can
+ * be race between controller sending the command to the device while abort is
+ * issued. To avoid that, first issue UFS_QUERY_TASK to check if the command is
+ * really issued and then try to abort it.
+ *
+ * Returns SUCCESS/FAILED
+ */
+static int ufshcd_abort(struct scsi_cmnd *cmd)
+{
+ struct Scsi_Host *host;
+ struct ufs_hba *hba;
+ unsigned long flags;
+ unsigned int tag;
+ int err = 0;
+ int poll_cnt;
+ u8 resp = 0xF;
+ struct ufshcd_lrb *lrbp;
+ u32 reg;
+
+ host = cmd->device->host;
+ hba = shost_priv(host);
+ tag = cmd->request->tag;
+
+ ufshcd_hold(hba, false);
+ /* If command is already aborted/completed, return SUCCESS */
+ if (!(test_bit(tag, &hba->outstanding_reqs)))
+ goto out;
+
+ reg = ufshcd_readl(hba, REG_UTP_TRANSFER_REQ_DOOR_BELL);
+ if (!(reg & (1 << tag))) {
+ dev_err(hba->dev,
+ "%s: cmd was completed, but without a notifying intr, tag = %d",
+ __func__, tag);
+ }
+
+ lrbp = &hba->lrb[tag];
+ for (poll_cnt = 100; poll_cnt; poll_cnt--) {
+ err = ufshcd_issue_tm_cmd(hba, lrbp->lun, lrbp->task_tag,
+ UFS_QUERY_TASK, &resp);
+ if (!err && resp == UPIU_TASK_MANAGEMENT_FUNC_SUCCEEDED) {
+ /* cmd pending in the device */
+ break;
+ } else if (!err && resp == UPIU_TASK_MANAGEMENT_FUNC_COMPL) {
+ /*
+ * cmd not pending in the device, check if it is
+ * in transition.
+ */
+ reg = ufshcd_readl(hba, REG_UTP_TRANSFER_REQ_DOOR_BELL);
+ if (reg & (1 << tag)) {
+ /* sleep for max. 200us to stabilize */
+ usleep_range(100, 200);
+ continue;
+ }
+ /* command completed already */
+ goto out;
+ } else {
+ if (!err)
+ err = resp; /* service response error */
+ goto out;
+ }
+ }
+
+ if (!poll_cnt) {
+ err = -EBUSY;
+ goto out;
+ }
+
+ err = ufshcd_issue_tm_cmd(hba, lrbp->lun, lrbp->task_tag,
+ UFS_ABORT_TASK, &resp);
+ if (err || resp != UPIU_TASK_MANAGEMENT_FUNC_COMPL) {
+ if (!err)
+ err = resp; /* service response error */
+ goto out;
+ }
+
+ err = ufshcd_clear_cmd(hba, tag);
+ if (err)
+ goto out;
+
+ scsi_dma_unmap(cmd);
+
+ spin_lock_irqsave(host->host_lock, flags);
+ __clear_bit(tag, &hba->outstanding_reqs);
+ hba->lrb[tag].cmd = NULL;
+ spin_unlock_irqrestore(host->host_lock, flags);
+
+ clear_bit_unlock(tag, &hba->lrb_in_use);
+ wake_up(&hba->dev_cmd.tag_wq);
+
+out:
+ if (!err) {
+ err = SUCCESS;
+ } else {
+ dev_err(hba->dev, "%s: failed with err %d\n", __func__, err);
+ err = FAILED;
+ }
+
+ /*
+ * This ufshcd_release() corresponds to the original scsi cmd that got
+ * aborted here (as we won't get any IRQ for it).
+ */
+ ufshcd_release(hba);
+ return err;
+}
+
+/**
+ * ufshcd_host_reset_and_restore - reset and restore host controller
+ * @hba: per-adapter instance
+ *
+ * Note that host controller reset may issue DME_RESET to
+ * local and remote (device) Uni-Pro stack and the attributes
+ * are reset to default state.
+ *
+ * Returns zero on success, non-zero on failure
+ */
+static int ufshcd_host_reset_and_restore(struct ufs_hba *hba)
+{
+ int err;
+ unsigned long flags;
+
+ /* Reset the host controller */
+ spin_lock_irqsave(hba->host->host_lock, flags);
+ ufshcd_hba_stop(hba);
+ spin_unlock_irqrestore(hba->host->host_lock, flags);
+
+ err = ufshcd_hba_enable(hba);
+ if (err)
+ goto out;
+
+ /* Establish the link again and restore the device */
+ err = ufshcd_probe_hba(hba);
+
+ if (!err && (hba->ufshcd_state != UFSHCD_STATE_OPERATIONAL))
+ err = -EIO;
+out:
+ if (err)
+ dev_err(hba->dev, "%s: Host init failed %d\n", __func__, err);
+
+ return err;
+}
+
+/**
+ * ufshcd_reset_and_restore - reset and re-initialize host/device
+ * @hba: per-adapter instance
+ *
+ * Reset and recover device, host and re-establish link. This
+ * is helpful to recover the communication in fatal error conditions.
+ *
+ * Returns zero on success, non-zero on failure
+ */
+static int ufshcd_reset_and_restore(struct ufs_hba *hba)
+{
+ int err = 0;
+ unsigned long flags;
+ int retries = MAX_HOST_RESET_RETRIES;
+
+ do {
+ err = ufshcd_host_reset_and_restore(hba);
+ } while (err && --retries);
+
+ /*
+ * After reset the door-bell might be cleared, complete
+ * outstanding requests in s/w here.
+ */
+ spin_lock_irqsave(hba->host->host_lock, flags);
+ ufshcd_transfer_req_compl(hba);
+ ufshcd_tmc_handler(hba);
+ spin_unlock_irqrestore(hba->host->host_lock, flags);
+
+ return err;
+}
+
+/**
+ * ufshcd_eh_host_reset_handler - host reset handler registered to scsi layer
+ * @cmd - SCSI command pointer
+ *
+ * Returns SUCCESS/FAILED
+ */
+static int ufshcd_eh_host_reset_handler(struct scsi_cmnd *cmd)
+{
+ int err;
+ unsigned long flags;
+ struct ufs_hba *hba;
+
+ hba = shost_priv(cmd->device->host);
+
+ ufshcd_hold(hba, false);
+ /*
+ * Check if there is any race with fatal error handling.
+ * If so, wait for it to complete. Even though fatal error
+ * handling does reset and restore in some cases, don't assume
+ * anything out of it. We are just avoiding race here.
+ */
+ do {
+ spin_lock_irqsave(hba->host->host_lock, flags);
+ if (!(work_pending(&hba->eh_work) ||
+ hba->ufshcd_state == UFSHCD_STATE_RESET))
+ break;
+ spin_unlock_irqrestore(hba->host->host_lock, flags);
+ dev_dbg(hba->dev, "%s: reset in progress\n", __func__);
+ flush_work(&hba->eh_work);
+ } while (1);
+
+ hba->ufshcd_state = UFSHCD_STATE_RESET;
+ ufshcd_set_eh_in_progress(hba);
+ spin_unlock_irqrestore(hba->host->host_lock, flags);
+
+ err = ufshcd_reset_and_restore(hba);
+
+ spin_lock_irqsave(hba->host->host_lock, flags);
+ if (!err) {
+ err = SUCCESS;
+ hba->ufshcd_state = UFSHCD_STATE_OPERATIONAL;
+ } else {
+ err = FAILED;
+ hba->ufshcd_state = UFSHCD_STATE_ERROR;
+ }
+ ufshcd_clear_eh_in_progress(hba);
+ spin_unlock_irqrestore(hba->host->host_lock, flags);
+
+ ufshcd_release(hba);
+ return err;
+}
+
+/**
+ * ufshcd_get_max_icc_level - calculate the ICC level
+ * @sup_curr_uA: max. current supported by the regulator
+ * @start_scan: row at the desc table to start scan from
+ * @buff: power descriptor buffer
+ *
+ * Returns calculated max ICC level for specific regulator
+ */
+static u32 ufshcd_get_max_icc_level(int sup_curr_uA, u32 start_scan, char *buff)
+{
+ int i;
+ int curr_uA;
+ u16 data;
+ u16 unit;
+
+ for (i = start_scan; i >= 0; i--) {
+ data = be16_to_cpu(*((u16 *)(buff + 2*i)));
+ unit = (data & ATTR_ICC_LVL_UNIT_MASK) >>
+ ATTR_ICC_LVL_UNIT_OFFSET;
+ curr_uA = data & ATTR_ICC_LVL_VALUE_MASK;
+ switch (unit) {
+ case UFSHCD_NANO_AMP:
+ curr_uA = curr_uA / 1000;
+ break;
+ case UFSHCD_MILI_AMP:
+ curr_uA = curr_uA * 1000;
+ break;
+ case UFSHCD_AMP:
+ curr_uA = curr_uA * 1000 * 1000;
+ break;
+ case UFSHCD_MICRO_AMP:
+ default:
+ break;
+ }
+ if (sup_curr_uA >= curr_uA)
+ break;
+ }
+ if (i < 0) {
+ i = 0;
+ pr_err("%s: Couldn't find valid icc_level = %d", __func__, i);
+ }
+
+ return (u32)i;
+}
+
+/**
+ * ufshcd_calc_icc_level - calculate the max ICC level
+ * In case regulators are not initialized we'll return 0
+ * @hba: per-adapter instance
+ * @desc_buf: power descriptor buffer to extract ICC levels from.
+ * @len: length of desc_buff
+ *
+ * Returns calculated ICC level
+ */
+static u32 ufshcd_find_max_sup_active_icc_level(struct ufs_hba *hba,
+ u8 *desc_buf, int len)
+{
+ u32 icc_level = 0;
+
+ if (!hba->vreg_info.vcc || !hba->vreg_info.vccq ||
+ !hba->vreg_info.vccq2) {
+ dev_err(hba->dev,
+ "%s: Regulator capability was not set, actvIccLevel=%d",
+ __func__, icc_level);
+ goto out;
+ }
+
+ if (hba->vreg_info.vcc)
+ icc_level = ufshcd_get_max_icc_level(
+ hba->vreg_info.vcc->max_uA,
+ POWER_DESC_MAX_ACTV_ICC_LVLS - 1,
+ &desc_buf[PWR_DESC_ACTIVE_LVLS_VCC_0]);
+
+ if (hba->vreg_info.vccq)
+ icc_level = ufshcd_get_max_icc_level(
+ hba->vreg_info.vccq->max_uA,
+ icc_level,
+ &desc_buf[PWR_DESC_ACTIVE_LVLS_VCCQ_0]);
+
+ if (hba->vreg_info.vccq2)
+ icc_level = ufshcd_get_max_icc_level(
+ hba->vreg_info.vccq2->max_uA,
+ icc_level,
+ &desc_buf[PWR_DESC_ACTIVE_LVLS_VCCQ2_0]);
+out:
+ return icc_level;
+}
+
+static void ufshcd_init_icc_levels(struct ufs_hba *hba)
+{
+ int ret;
+ int buff_len = QUERY_DESC_POWER_MAX_SIZE;
+ u8 desc_buf[QUERY_DESC_POWER_MAX_SIZE];
+
+ ret = ufshcd_read_power_desc(hba, desc_buf, buff_len);
+ if (ret) {
+ dev_err(hba->dev,
+ "%s: Failed reading power descriptor.len = %d ret = %d",
+ __func__, buff_len, ret);
+ return;
+ }
+
+ hba->init_prefetch_data.icc_level =
+ ufshcd_find_max_sup_active_icc_level(hba,
+ desc_buf, buff_len);
+ dev_dbg(hba->dev, "%s: setting icc_level 0x%x",
+ __func__, hba->init_prefetch_data.icc_level);
+
+ ret = ufshcd_query_attr(hba, UPIU_QUERY_OPCODE_WRITE_ATTR,
+ QUERY_ATTR_IDN_ACTIVE_ICC_LVL, 0, 0,
+ &hba->init_prefetch_data.icc_level);
+
+ if (ret)
+ dev_err(hba->dev,
+ "%s: Failed configuring bActiveICCLevel = %d ret = %d",
+ __func__, hba->init_prefetch_data.icc_level , ret);
+
+}
+
+/**
+ * ufshcd_scsi_add_wlus - Adds required W-LUs
+ * @hba: per-adapter instance
+ *
+ * UFS device specification requires the UFS devices to support 4 well known
+ * logical units:
+ * "REPORT_LUNS" (address: 01h)
+ * "UFS Device" (address: 50h)
+ * "RPMB" (address: 44h)
+ * "BOOT" (address: 30h)
+ * UFS device's power management needs to be controlled by "POWER CONDITION"
+ * field of SSU (START STOP UNIT) command. But this "power condition" field
+ * will take effect only when its sent to "UFS device" well known logical unit
+ * hence we require the scsi_device instance to represent this logical unit in
+ * order for the UFS host driver to send the SSU command for power management.
+
+ * We also require the scsi_device instance for "RPMB" (Replay Protected Memory
+ * Block) LU so user space process can control this LU. User space may also
+ * want to have access to BOOT LU.
+
+ * This function adds scsi device instances for each of all well known LUs
+ * (except "REPORT LUNS" LU).
+ *
+ * Returns zero on success (all required W-LUs are added successfully),
+ * non-zero error value on failure (if failed to add any of the required W-LU).
+ */
+static int ufshcd_scsi_add_wlus(struct ufs_hba *hba)
+{
+ int ret = 0;
+ struct scsi_device *sdev_rpmb;
+ struct scsi_device *sdev_boot;
+
+ hba->sdev_ufs_device = __scsi_add_device(hba->host, 0, 0,
+ ufshcd_upiu_wlun_to_scsi_wlun(UFS_UPIU_UFS_DEVICE_WLUN), NULL);
+ if (IS_ERR(hba->sdev_ufs_device)) {
+ ret = PTR_ERR(hba->sdev_ufs_device);
+ hba->sdev_ufs_device = NULL;
+ goto out;
+ }
+ scsi_device_put(hba->sdev_ufs_device);
+
+ sdev_boot = __scsi_add_device(hba->host, 0, 0,
+ ufshcd_upiu_wlun_to_scsi_wlun(UFS_UPIU_BOOT_WLUN), NULL);
+ if (IS_ERR(sdev_boot)) {
+ ret = PTR_ERR(sdev_boot);
+ goto remove_sdev_ufs_device;
+ }
+ scsi_device_put(sdev_boot);
+
+ sdev_rpmb = __scsi_add_device(hba->host, 0, 0,
+ ufshcd_upiu_wlun_to_scsi_wlun(UFS_UPIU_RPMB_WLUN), NULL);
+ if (IS_ERR(sdev_rpmb)) {
+ ret = PTR_ERR(sdev_rpmb);
+ goto remove_sdev_boot;
+ }
+ scsi_device_put(sdev_rpmb);
+ goto out;
+
+remove_sdev_boot:
+ scsi_remove_device(sdev_boot);
+remove_sdev_ufs_device:
+ scsi_remove_device(hba->sdev_ufs_device);
+out:
+ return ret;
+}
+
+/**
+ * ufshcd_probe_hba - probe hba to detect device and initialize
+ * @hba: per-adapter instance
+ *
+ * Execute link-startup and verify device initialization
+ */
+static int ufshcd_probe_hba(struct ufs_hba *hba)
+{
+ int ret;
+
+ ret = ufshcd_link_startup(hba);
+ if (ret)
+ goto out;
+
+ ufshcd_init_pwr_info(hba);
+
+ /* UniPro link is active now */
+ ufshcd_set_link_active(hba);
+
+ ret = ufshcd_verify_dev_init(hba);
+ if (ret)
+ goto out;
+
+ ret = ufshcd_complete_dev_init(hba);
+ if (ret)
+ goto out;
+
+ /* UFS device is also active now */
+ ufshcd_set_ufs_dev_active(hba);
+ ufshcd_force_reset_auto_bkops(hba);
+ hba->ufshcd_state = UFSHCD_STATE_OPERATIONAL;
+ hba->wlun_dev_clr_ua = true;
+
+ if (ufshcd_get_max_pwr_mode(hba)) {
+ dev_err(hba->dev,
+ "%s: Failed getting max supported power mode\n",
+ __func__);
+ } else {
+ ret = ufshcd_config_pwr_mode(hba, &hba->max_pwr_info.info);
+ if (ret)
+ dev_err(hba->dev, "%s: Failed setting power mode, err = %d\n",
+ __func__, ret);
+ }
+
+ /*
+ * If we are in error handling context or in power management callbacks
+ * context, no need to scan the host
+ */
+ if (!ufshcd_eh_in_progress(hba) && !hba->pm_op_in_progress) {
+ bool flag;
+
+ /* clear any previous UFS device information */
+ memset(&hba->dev_info, 0, sizeof(hba->dev_info));
+ if (!ufshcd_query_flag(hba, UPIU_QUERY_OPCODE_READ_FLAG,
+ QUERY_FLAG_IDN_PWR_ON_WPE, &flag))
+ hba->dev_info.f_power_on_wp_en = flag;
+
+ if (!hba->is_init_prefetch)
+ ufshcd_init_icc_levels(hba);
+
+ /* Add required well known logical units to scsi mid layer */
+ if (ufshcd_scsi_add_wlus(hba))
+ goto out;
+
+ scsi_scan_host(hba->host);
+ pm_runtime_put_sync(hba->dev);
+ }
+
+ if (!hba->is_init_prefetch)
+ hba->is_init_prefetch = true;
+
+ /* Resume devfreq after UFS device is detected */
+ if (ufshcd_is_clkscaling_enabled(hba))
+ devfreq_resume_device(hba->devfreq);
+
+out:
+ /*
+ * If we failed to initialize the device or the device is not
+ * present, turn off the power/clocks etc.
+ */
+ if (ret && !ufshcd_eh_in_progress(hba) && !hba->pm_op_in_progress) {
+ pm_runtime_put_sync(hba->dev);
+ ufshcd_hba_exit(hba);
+ }
+
+ return ret;
+}
+
+/**
+ * ufshcd_async_scan - asynchronous execution for probing hba
+ * @data: data pointer to pass to this function
+ * @cookie: cookie data
+ */
+static void ufshcd_async_scan(void *data, async_cookie_t cookie)
+{
+ struct ufs_hba *hba = (struct ufs_hba *)data;
+
+ ufshcd_probe_hba(hba);
+}
+
+static struct scsi_host_template ufshcd_driver_template = {
+ .module = THIS_MODULE,
+ .name = UFSHCD,
+ .proc_name = UFSHCD,
+ .queuecommand = ufshcd_queuecommand,
+ .slave_alloc = ufshcd_slave_alloc,
+ .slave_configure = ufshcd_slave_configure,
+ .slave_destroy = ufshcd_slave_destroy,
+ .change_queue_depth = ufshcd_change_queue_depth,
+ .eh_abort_handler = ufshcd_abort,
+ .eh_device_reset_handler = ufshcd_eh_device_reset_handler,
+ .eh_host_reset_handler = ufshcd_eh_host_reset_handler,
+ .this_id = -1,
+ .sg_tablesize = SG_ALL,
+ .cmd_per_lun = UFSHCD_CMD_PER_LUN,
+ .can_queue = UFSHCD_CAN_QUEUE,
+ .max_host_blocked = 1,
+ .use_blk_tags = 1,
+ .track_queue_depth = 1,
+};
+
+static int ufshcd_config_vreg_load(struct device *dev, struct ufs_vreg *vreg,
+ int ua)
+{
+ int ret;
+
+ if (!vreg)
+ return 0;
+
+ ret = regulator_set_load(vreg->reg, ua);
+ if (ret < 0) {
+ dev_err(dev, "%s: %s set load (ua=%d) failed, err=%d\n",
+ __func__, vreg->name, ua, ret);
+ }
+
+ return ret;
+}
+
+static inline int ufshcd_config_vreg_lpm(struct ufs_hba *hba,
+ struct ufs_vreg *vreg)
+{
+ return ufshcd_config_vreg_load(hba->dev, vreg, UFS_VREG_LPM_LOAD_UA);
+}
+
+static inline int ufshcd_config_vreg_hpm(struct ufs_hba *hba,
+ struct ufs_vreg *vreg)
+{
+ return ufshcd_config_vreg_load(hba->dev, vreg, vreg->max_uA);
+}
+
+static int ufshcd_config_vreg(struct device *dev,
+ struct ufs_vreg *vreg, bool on)
+{
+ int ret = 0;
+ struct regulator *reg = vreg->reg;
+ const char *name = vreg->name;
+ int min_uV, uA_load;
+
+ BUG_ON(!vreg);
+
+ if (regulator_count_voltages(reg) > 0) {
+ min_uV = on ? vreg->min_uV : 0;
+ ret = regulator_set_voltage(reg, min_uV, vreg->max_uV);
+ if (ret) {
+ dev_err(dev, "%s: %s set voltage failed, err=%d\n",
+ __func__, name, ret);
+ goto out;
+ }
+
+ uA_load = on ? vreg->max_uA : 0;
+ ret = ufshcd_config_vreg_load(dev, vreg, uA_load);
+ if (ret)
+ goto out;
+ }
+out:
+ return ret;
+}
+
+static int ufshcd_enable_vreg(struct device *dev, struct ufs_vreg *vreg)
+{
+ int ret = 0;
+
+ if (!vreg || vreg->enabled)
+ goto out;
+
+ ret = ufshcd_config_vreg(dev, vreg, true);
+ if (!ret)
+ ret = regulator_enable(vreg->reg);
+
+ if (!ret)
+ vreg->enabled = true;
+ else
+ dev_err(dev, "%s: %s enable failed, err=%d\n",
+ __func__, vreg->name, ret);
+out:
+ return ret;
+}
+
+static int ufshcd_disable_vreg(struct device *dev, struct ufs_vreg *vreg)
+{
+ int ret = 0;
+
+ if (!vreg || !vreg->enabled)
+ goto out;
+
+ ret = regulator_disable(vreg->reg);
+
+ if (!ret) {
+ /* ignore errors on applying disable config */
+ ufshcd_config_vreg(dev, vreg, false);
+ vreg->enabled = false;
+ } else {
+ dev_err(dev, "%s: %s disable failed, err=%d\n",
+ __func__, vreg->name, ret);
+ }
+out:
+ return ret;
+}
+
+static int ufshcd_setup_vreg(struct ufs_hba *hba, bool on)
+{
+ int ret = 0;
+ struct device *dev = hba->dev;
+ struct ufs_vreg_info *info = &hba->vreg_info;
+
+ if (!info)
+ goto out;
+
+ ret = ufshcd_toggle_vreg(dev, info->vcc, on);
+ if (ret)
+ goto out;
+
+ ret = ufshcd_toggle_vreg(dev, info->vccq, on);
+ if (ret)
+ goto out;
+
+ ret = ufshcd_toggle_vreg(dev, info->vccq2, on);
+ if (ret)
+ goto out;
+
+out:
+ if (ret) {
+ ufshcd_toggle_vreg(dev, info->vccq2, false);
+ ufshcd_toggle_vreg(dev, info->vccq, false);
+ ufshcd_toggle_vreg(dev, info->vcc, false);
+ }
+ return ret;
+}
+
+static int ufshcd_setup_hba_vreg(struct ufs_hba *hba, bool on)
+{
+ struct ufs_vreg_info *info = &hba->vreg_info;
+
+ if (info)
+ return ufshcd_toggle_vreg(hba->dev, info->vdd_hba, on);
+
+ return 0;
+}
+
+static int ufshcd_get_vreg(struct device *dev, struct ufs_vreg *vreg)
+{
+ int ret = 0;
+
+ if (!vreg)
+ goto out;
+
+ vreg->reg = devm_regulator_get(dev, vreg->name);
+ if (IS_ERR(vreg->reg)) {
+ ret = PTR_ERR(vreg->reg);
+ dev_err(dev, "%s: %s get failed, err=%d\n",
+ __func__, vreg->name, ret);
+ }
+out:
+ return ret;
+}
+
+static int ufshcd_init_vreg(struct ufs_hba *hba)
+{
+ int ret = 0;
+ struct device *dev = hba->dev;
+ struct ufs_vreg_info *info = &hba->vreg_info;
+
+ if (!info)
+ goto out;
+
+ ret = ufshcd_get_vreg(dev, info->vcc);
+ if (ret)
+ goto out;
+
+ ret = ufshcd_get_vreg(dev, info->vccq);
+ if (ret)
+ goto out;
+
+ ret = ufshcd_get_vreg(dev, info->vccq2);
+out:
+ return ret;
+}
+
+static int ufshcd_init_hba_vreg(struct ufs_hba *hba)
+{
+ struct ufs_vreg_info *info = &hba->vreg_info;
+
+ if (info)
+ return ufshcd_get_vreg(hba->dev, info->vdd_hba);
+
+ return 0;
+}
+
+static int __ufshcd_setup_clocks(struct ufs_hba *hba, bool on,
+ bool skip_ref_clk)
+{
+ int ret = 0;
+ struct ufs_clk_info *clki;
+ struct list_head *head = &hba->clk_list_head;
+ unsigned long flags;
+
+ if (!head || list_empty(head))
+ goto out;
+
+ list_for_each_entry(clki, head, list) {
+ if (!IS_ERR_OR_NULL(clki->clk)) {
+ if (skip_ref_clk && !strcmp(clki->name, "ref_clk"))
+ continue;
+
+ if (on && !clki->enabled) {
+ ret = clk_prepare_enable(clki->clk);
+ if (ret) {
+ dev_err(hba->dev, "%s: %s prepare enable failed, %d\n",
+ __func__, clki->name, ret);
+ goto out;
+ }
+ } else if (!on && clki->enabled) {
+ clk_disable_unprepare(clki->clk);
+ }
+ clki->enabled = on;
+ dev_dbg(hba->dev, "%s: clk: %s %sabled\n", __func__,
+ clki->name, on ? "en" : "dis");
+ }
+ }
+
+ if (hba->vops && hba->vops->setup_clocks)
+ ret = hba->vops->setup_clocks(hba, on);
+out:
+ if (ret) {
+ list_for_each_entry(clki, head, list) {
+ if (!IS_ERR_OR_NULL(clki->clk) && clki->enabled)
+ clk_disable_unprepare(clki->clk);
+ }
+ } else if (on) {
+ spin_lock_irqsave(hba->host->host_lock, flags);
+ hba->clk_gating.state = CLKS_ON;
+ spin_unlock_irqrestore(hba->host->host_lock, flags);
+ }
+ return ret;
+}
+
+static int ufshcd_setup_clocks(struct ufs_hba *hba, bool on)
+{
+ return __ufshcd_setup_clocks(hba, on, false);
+}
+
+static int ufshcd_init_clocks(struct ufs_hba *hba)
+{
+ int ret = 0;
+ struct ufs_clk_info *clki;
+ struct device *dev = hba->dev;
+ struct list_head *head = &hba->clk_list_head;
+
+ if (!head || list_empty(head))
+ goto out;
+
+ list_for_each_entry(clki, head, list) {
+ if (!clki->name)
+ continue;
+
+ clki->clk = devm_clk_get(dev, clki->name);
+ if (IS_ERR(clki->clk)) {
+ ret = PTR_ERR(clki->clk);
+ dev_err(dev, "%s: %s clk get failed, %d\n",
+ __func__, clki->name, ret);
+ goto out;
+ }
+
+ if (clki->max_freq) {
+ ret = clk_set_rate(clki->clk, clki->max_freq);
+ if (ret) {
+ dev_err(hba->dev, "%s: %s clk set rate(%dHz) failed, %d\n",
+ __func__, clki->name,
+ clki->max_freq, ret);
+ goto out;
+ }
+ clki->curr_freq = clki->max_freq;
+ }
+ dev_dbg(dev, "%s: clk: %s, rate: %lu\n", __func__,
+ clki->name, clk_get_rate(clki->clk));
+ }
+out:
+ return ret;
+}
+
+static int ufshcd_variant_hba_init(struct ufs_hba *hba)
+{
+ int err = 0;
+
+ if (!hba->vops)
+ goto out;
+
+ if (hba->vops->init) {
+ err = hba->vops->init(hba);
+ if (err)
+ goto out;
+ }
+
+ if (hba->vops->setup_regulators) {
+ err = hba->vops->setup_regulators(hba, true);
+ if (err)
+ goto out_exit;
+ }
+
+ goto out;
+
+out_exit:
+ if (hba->vops->exit)
+ hba->vops->exit(hba);
+out:
+ if (err)
+ dev_err(hba->dev, "%s: variant %s init failed err %d\n",
+ __func__, hba->vops ? hba->vops->name : "", err);
+ return err;
+}
+
+static void ufshcd_variant_hba_exit(struct ufs_hba *hba)
+{
+ if (!hba->vops)
+ return;
+
+ if (hba->vops->setup_clocks)
+ hba->vops->setup_clocks(hba, false);
+
+ if (hba->vops->setup_regulators)
+ hba->vops->setup_regulators(hba, false);
+
+ if (hba->vops->exit)
+ hba->vops->exit(hba);
+}
+
+static int ufshcd_hba_init(struct ufs_hba *hba)
+{
+ int err;
+
+ /*
+ * Handle host controller power separately from the UFS device power
+ * rails as it will help controlling the UFS host controller power
+ * collapse easily which is different than UFS device power collapse.
+ * Also, enable the host controller power before we go ahead with rest
+ * of the initialization here.
+ */
+ err = ufshcd_init_hba_vreg(hba);
+ if (err)
+ goto out;
+
+ err = ufshcd_setup_hba_vreg(hba, true);
+ if (err)
+ goto out;
+
+ err = ufshcd_init_clocks(hba);
+ if (err)
+ goto out_disable_hba_vreg;
+
+ err = ufshcd_setup_clocks(hba, true);
+ if (err)
+ goto out_disable_hba_vreg;
+
+ err = ufshcd_init_vreg(hba);
+ if (err)
+ goto out_disable_clks;
+
+ err = ufshcd_setup_vreg(hba, true);
+ if (err)
+ goto out_disable_clks;
+
+ err = ufshcd_variant_hba_init(hba);
+ if (err)
+ goto out_disable_vreg;
+
+ hba->is_powered = true;
+ goto out;
+
+out_disable_vreg:
+ ufshcd_setup_vreg(hba, false);
+out_disable_clks:
+ ufshcd_setup_clocks(hba, false);
+out_disable_hba_vreg:
+ ufshcd_setup_hba_vreg(hba, false);
+out:
+ return err;
+}
+
+static void ufshcd_hba_exit(struct ufs_hba *hba)
+{
+ if (hba->is_powered) {
+ ufshcd_variant_hba_exit(hba);
+ ufshcd_setup_vreg(hba, false);
+ ufshcd_setup_clocks(hba, false);
+ ufshcd_setup_hba_vreg(hba, false);
+ hba->is_powered = false;
+ }
+}
+
+static int
+ufshcd_send_request_sense(struct ufs_hba *hba, struct scsi_device *sdp)
+{
+ unsigned char cmd[6] = {REQUEST_SENSE,
+ 0,
+ 0,
+ 0,
+ SCSI_SENSE_BUFFERSIZE,
+ 0};
+ char *buffer;
+ int ret;
+
+ buffer = kzalloc(SCSI_SENSE_BUFFERSIZE, GFP_KERNEL);
+ if (!buffer) {
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ ret = scsi_execute_req_flags(sdp, cmd, DMA_FROM_DEVICE, buffer,
+ SCSI_SENSE_BUFFERSIZE, NULL,
+ msecs_to_jiffies(1000), 3, NULL, REQ_PM);
+ if (ret)
+ pr_err("%s: failed with err %d\n", __func__, ret);
+
+ kfree(buffer);
+out:
+ return ret;
+}
+
+/**
+ * ufshcd_set_dev_pwr_mode - sends START STOP UNIT command to set device
+ * power mode
+ * @hba: per adapter instance
+ * @pwr_mode: device power mode to set
+ *
+ * Returns 0 if requested power mode is set successfully
+ * Returns non-zero if failed to set the requested power mode
+ */
+static int ufshcd_set_dev_pwr_mode(struct ufs_hba *hba,
+ enum ufs_dev_pwr_mode pwr_mode)
+{
+ unsigned char cmd[6] = { START_STOP };
+ struct scsi_sense_hdr sshdr;
+ struct scsi_device *sdp;
+ unsigned long flags;
+ int ret;
+
+ spin_lock_irqsave(hba->host->host_lock, flags);
+ sdp = hba->sdev_ufs_device;
+ if (sdp) {
+ ret = scsi_device_get(sdp);
+ if (!ret && !scsi_device_online(sdp)) {
+ ret = -ENODEV;
+ scsi_device_put(sdp);
+ }
+ } else {
+ ret = -ENODEV;
+ }
+ spin_unlock_irqrestore(hba->host->host_lock, flags);
+
+ if (ret)
+ return ret;
+
+ /*
+ * If scsi commands fail, the scsi mid-layer schedules scsi error-
+ * handling, which would wait for host to be resumed. Since we know
+ * we are functional while we are here, skip host resume in error
+ * handling context.
+ */
+ hba->host->eh_noresume = 1;
+ if (hba->wlun_dev_clr_ua) {
+ ret = ufshcd_send_request_sense(hba, sdp);
+ if (ret)
+ goto out;
+ /* Unit attention condition is cleared now */
+ hba->wlun_dev_clr_ua = false;
+ }
+
+ cmd[4] = pwr_mode << 4;
+
+ /*
+ * Current function would be generally called from the power management
+ * callbacks hence set the REQ_PM flag so that it doesn't resume the
+ * already suspended childs.
+ */
+ ret = scsi_execute_req_flags(sdp, cmd, DMA_NONE, NULL, 0, &sshdr,
+ START_STOP_TIMEOUT, 0, NULL, REQ_PM);
+ if (ret) {
+ sdev_printk(KERN_WARNING, sdp,
+ "START_STOP failed for power mode: %d, result %x\n",
+ pwr_mode, ret);
+ if (driver_byte(ret) & DRIVER_SENSE)
+ scsi_print_sense_hdr(sdp, NULL, &sshdr);
+ }
+
+ if (!ret)
+ hba->curr_dev_pwr_mode = pwr_mode;
+out:
+ scsi_device_put(sdp);
+ hba->host->eh_noresume = 0;
+ return ret;
+}
+
+static int ufshcd_link_state_transition(struct ufs_hba *hba,
+ enum uic_link_state req_link_state,
+ int check_for_bkops)
+{
+ int ret = 0;
+
+ if (req_link_state == hba->uic_link_state)
+ return 0;
+
+ if (req_link_state == UIC_LINK_HIBERN8_STATE) {
+ ret = ufshcd_uic_hibern8_enter(hba);
+ if (!ret)
+ ufshcd_set_link_hibern8(hba);
+ else
+ goto out;
+ }
+ /*
+ * If autobkops is enabled, link can't be turned off because
+ * turning off the link would also turn off the device.
+ */
+ else if ((req_link_state == UIC_LINK_OFF_STATE) &&
+ (!check_for_bkops || (check_for_bkops &&
+ !hba->auto_bkops_enabled))) {
+ /*
+ * Change controller state to "reset state" which
+ * should also put the link in off/reset state
+ */
+ ufshcd_hba_stop(hba);
+ /*
+ * TODO: Check if we need any delay to make sure that
+ * controller is reset
+ */
+ ufshcd_set_link_off(hba);
+ }
+
+out:
+ return ret;
+}
+
+static void ufshcd_vreg_set_lpm(struct ufs_hba *hba)
+{
+ /*
+ * If UFS device is either in UFS_Sleep turn off VCC rail to save some
+ * power.
+ *
+ * If UFS device and link is in OFF state, all power supplies (VCC,
+ * VCCQ, VCCQ2) can be turned off if power on write protect is not
+ * required. If UFS link is inactive (Hibern8 or OFF state) and device
+ * is in sleep state, put VCCQ & VCCQ2 rails in LPM mode.
+ *
+ * Ignore the error returned by ufshcd_toggle_vreg() as device is anyway
+ * in low power state which would save some power.
+ */
+ if (ufshcd_is_ufs_dev_poweroff(hba) && ufshcd_is_link_off(hba) &&
+ !hba->dev_info.is_lu_power_on_wp) {
+ ufshcd_setup_vreg(hba, false);
+ } else if (!ufshcd_is_ufs_dev_active(hba)) {
+ ufshcd_toggle_vreg(hba->dev, hba->vreg_info.vcc, false);
+ if (!ufshcd_is_link_active(hba)) {
+ ufshcd_config_vreg_lpm(hba, hba->vreg_info.vccq);
+ ufshcd_config_vreg_lpm(hba, hba->vreg_info.vccq2);
+ }
+ }
+}
+
+static int ufshcd_vreg_set_hpm(struct ufs_hba *hba)
+{
+ int ret = 0;
+
+ if (ufshcd_is_ufs_dev_poweroff(hba) && ufshcd_is_link_off(hba) &&
+ !hba->dev_info.is_lu_power_on_wp) {
+ ret = ufshcd_setup_vreg(hba, true);
+ } else if (!ufshcd_is_ufs_dev_active(hba)) {
+ ret = ufshcd_toggle_vreg(hba->dev, hba->vreg_info.vcc, true);
+ if (!ret && !ufshcd_is_link_active(hba)) {
+ ret = ufshcd_config_vreg_hpm(hba, hba->vreg_info.vccq);
+ if (ret)
+ goto vcc_disable;
+ ret = ufshcd_config_vreg_hpm(hba, hba->vreg_info.vccq2);
+ if (ret)
+ goto vccq_lpm;
+ }
+ }
+ goto out;
+
+vccq_lpm:
+ ufshcd_config_vreg_lpm(hba, hba->vreg_info.vccq);
+vcc_disable:
+ ufshcd_toggle_vreg(hba->dev, hba->vreg_info.vcc, false);
+out:
+ return ret;
+}
+
+static void ufshcd_hba_vreg_set_lpm(struct ufs_hba *hba)
+{
+ if (ufshcd_is_link_off(hba))
+ ufshcd_setup_hba_vreg(hba, false);
+}
+
+static void ufshcd_hba_vreg_set_hpm(struct ufs_hba *hba)
+{
+ if (ufshcd_is_link_off(hba))
+ ufshcd_setup_hba_vreg(hba, true);
+}
+
+/**
+ * ufshcd_suspend - helper function for suspend operations
+ * @hba: per adapter instance
+ * @pm_op: desired low power operation type
+ *
+ * This function will try to put the UFS device and link into low power
+ * mode based on the "rpm_lvl" (Runtime PM level) or "spm_lvl"
+ * (System PM level).
+ *
+ * If this function is called during shutdown, it will make sure that
+ * both UFS device and UFS link is powered off.
+ *
+ * NOTE: UFS device & link must be active before we enter in this function.
+ *
+ * Returns 0 for success and non-zero for failure
+ */
+static int ufshcd_suspend(struct ufs_hba *hba, enum ufs_pm_op pm_op)
+{
+ int ret = 0;
+ enum ufs_pm_level pm_lvl;
+ enum ufs_dev_pwr_mode req_dev_pwr_mode;
+ enum uic_link_state req_link_state;
+
+ hba->pm_op_in_progress = 1;
+ if (!ufshcd_is_shutdown_pm(pm_op)) {
+ pm_lvl = ufshcd_is_runtime_pm(pm_op) ?
+ hba->rpm_lvl : hba->spm_lvl;
+ req_dev_pwr_mode = ufs_get_pm_lvl_to_dev_pwr_mode(pm_lvl);
+ req_link_state = ufs_get_pm_lvl_to_link_pwr_state(pm_lvl);
+ } else {
+ req_dev_pwr_mode = UFS_POWERDOWN_PWR_MODE;
+ req_link_state = UIC_LINK_OFF_STATE;
+ }
+
+ /*
+ * If we can't transition into any of the low power modes
+ * just gate the clocks.
+ */
+ ufshcd_hold(hba, false);
+ hba->clk_gating.is_suspended = true;
+
+ if (req_dev_pwr_mode == UFS_ACTIVE_PWR_MODE &&
+ req_link_state == UIC_LINK_ACTIVE_STATE) {
+ goto disable_clks;
+ }
+
+ if ((req_dev_pwr_mode == hba->curr_dev_pwr_mode) &&
+ (req_link_state == hba->uic_link_state))
+ goto out;
+
+ /* UFS device & link must be active before we enter in this function */
+ if (!ufshcd_is_ufs_dev_active(hba) || !ufshcd_is_link_active(hba)) {
+ ret = -EINVAL;
+ goto out;
+ }
+
+ if (ufshcd_is_runtime_pm(pm_op)) {
+ if (ufshcd_can_autobkops_during_suspend(hba)) {
+ /*
+ * The device is idle with no requests in the queue,
+ * allow background operations if bkops status shows
+ * that performance might be impacted.
+ */
+ ret = ufshcd_urgent_bkops(hba);
+ if (ret)
+ goto enable_gating;
+ } else {
+ /* make sure that auto bkops is disabled */
+ ufshcd_disable_auto_bkops(hba);
+ }
+ }
+
+ if ((req_dev_pwr_mode != hba->curr_dev_pwr_mode) &&
+ ((ufshcd_is_runtime_pm(pm_op) && !hba->auto_bkops_enabled) ||
+ !ufshcd_is_runtime_pm(pm_op))) {
+ /* ensure that bkops is disabled */
+ ufshcd_disable_auto_bkops(hba);
+ ret = ufshcd_set_dev_pwr_mode(hba, req_dev_pwr_mode);
+ if (ret)
+ goto enable_gating;
+ }
+
+ ret = ufshcd_link_state_transition(hba, req_link_state, 1);
+ if (ret)
+ goto set_dev_active;
+
+ ufshcd_vreg_set_lpm(hba);
+
+disable_clks:
+ /*
+ * The clock scaling needs access to controller registers. Hence, Wait
+ * for pending clock scaling work to be done before clocks are
+ * turned off.
+ */
+ if (ufshcd_is_clkscaling_enabled(hba)) {
+ devfreq_suspend_device(hba->devfreq);
+ hba->clk_scaling.window_start_t = 0;
+ }
+ /*
+ * Call vendor specific suspend callback. As these callbacks may access
+ * vendor specific host controller register space call them before the
+ * host clocks are ON.
+ */
+ if (hba->vops && hba->vops->suspend) {
+ ret = hba->vops->suspend(hba, pm_op);
+ if (ret)
+ goto set_link_active;
+ }
+
+ if (hba->vops && hba->vops->setup_clocks) {
+ ret = hba->vops->setup_clocks(hba, false);
+ if (ret)
+ goto vops_resume;
+ }
+
+ if (!ufshcd_is_link_active(hba))
+ ufshcd_setup_clocks(hba, false);
+ else
+ /* If link is active, device ref_clk can't be switched off */
+ __ufshcd_setup_clocks(hba, false, true);
+
+ hba->clk_gating.state = CLKS_OFF;
+ /*
+ * Disable the host irq as host controller as there won't be any
+ * host controller trasanction expected till resume.
+ */
+ ufshcd_disable_irq(hba);
+ /* Put the host controller in low power mode if possible */
+ ufshcd_hba_vreg_set_lpm(hba);
+ goto out;
+
+vops_resume:
+ if (hba->vops && hba->vops->resume)
+ hba->vops->resume(hba, pm_op);
+set_link_active:
+ ufshcd_vreg_set_hpm(hba);
+ if (ufshcd_is_link_hibern8(hba) && !ufshcd_uic_hibern8_exit(hba))
+ ufshcd_set_link_active(hba);
+ else if (ufshcd_is_link_off(hba))
+ ufshcd_host_reset_and_restore(hba);
+set_dev_active:
+ if (!ufshcd_set_dev_pwr_mode(hba, UFS_ACTIVE_PWR_MODE))
+ ufshcd_disable_auto_bkops(hba);
+enable_gating:
+ hba->clk_gating.is_suspended = false;
+ ufshcd_release(hba);
+out:
+ hba->pm_op_in_progress = 0;
+ return ret;
+}
+
+/**
+ * ufshcd_resume - helper function for resume operations
+ * @hba: per adapter instance
+ * @pm_op: runtime PM or system PM
+ *
+ * This function basically brings the UFS device, UniPro link and controller
+ * to active state.
+ *
+ * Returns 0 for success and non-zero for failure
+ */
+static int ufshcd_resume(struct ufs_hba *hba, enum ufs_pm_op pm_op)
+{
+ int ret;
+ enum uic_link_state old_link_state;
+
+ hba->pm_op_in_progress = 1;
+ old_link_state = hba->uic_link_state;
+
+ ufshcd_hba_vreg_set_hpm(hba);
+ /* Make sure clocks are enabled before accessing controller */
+ ret = ufshcd_setup_clocks(hba, true);
+ if (ret)
+ goto out;
+
+ /* enable the host irq as host controller would be active soon */
+ ret = ufshcd_enable_irq(hba);
+ if (ret)
+ goto disable_irq_and_vops_clks;
+
+ ret = ufshcd_vreg_set_hpm(hba);
+ if (ret)
+ goto disable_irq_and_vops_clks;
+
+ /*
+ * Call vendor specific resume callback. As these callbacks may access
+ * vendor specific host controller register space call them when the
+ * host clocks are ON.
+ */
+ if (hba->vops && hba->vops->resume) {
+ ret = hba->vops->resume(hba, pm_op);
+ if (ret)
+ goto disable_vreg;
+ }
+
+ if (ufshcd_is_link_hibern8(hba)) {
+ ret = ufshcd_uic_hibern8_exit(hba);
+ if (!ret)
+ ufshcd_set_link_active(hba);
+ else
+ goto vendor_suspend;
+ } else if (ufshcd_is_link_off(hba)) {
+ ret = ufshcd_host_reset_and_restore(hba);
+ /*
+ * ufshcd_host_reset_and_restore() should have already
+ * set the link state as active
+ */
+ if (ret || !ufshcd_is_link_active(hba))
+ goto vendor_suspend;
+ }
+
+ if (!ufshcd_is_ufs_dev_active(hba)) {
+ ret = ufshcd_set_dev_pwr_mode(hba, UFS_ACTIVE_PWR_MODE);
+ if (ret)
+ goto set_old_link_state;
+ }
+
+ /*
+ * If BKOPs operations are urgently needed at this moment then
+ * keep auto-bkops enabled or else disable it.
+ */
+ ufshcd_urgent_bkops(hba);
+ hba->clk_gating.is_suspended = false;
+
+ if (ufshcd_is_clkscaling_enabled(hba))
+ devfreq_resume_device(hba->devfreq);
+
+ /* Schedule clock gating in case of no access to UFS device yet */
+ ufshcd_release(hba);
+ goto out;
+
+set_old_link_state:
+ ufshcd_link_state_transition(hba, old_link_state, 0);
+vendor_suspend:
+ if (hba->vops && hba->vops->suspend)
+ hba->vops->suspend(hba, pm_op);
+disable_vreg:
+ ufshcd_vreg_set_lpm(hba);
+disable_irq_and_vops_clks:
+ ufshcd_disable_irq(hba);
+ ufshcd_setup_clocks(hba, false);
+out:
+ hba->pm_op_in_progress = 0;
+ return ret;
+}
+
+/**
+ * ufshcd_system_suspend - system suspend routine
+ * @hba: per adapter instance
+ * @pm_op: runtime PM or system PM
+ *
+ * Check the description of ufshcd_suspend() function for more details.
+ *
+ * Returns 0 for success and non-zero for failure
+ */
+int ufshcd_system_suspend(struct ufs_hba *hba)
+{
+ int ret = 0;
+
+ if (!hba || !hba->is_powered)
+ return 0;
+
+ if (pm_runtime_suspended(hba->dev)) {
+ if (hba->rpm_lvl == hba->spm_lvl)
+ /*
+ * There is possibility that device may still be in
+ * active state during the runtime suspend.
+ */
+ if ((ufs_get_pm_lvl_to_dev_pwr_mode(hba->spm_lvl) ==
+ hba->curr_dev_pwr_mode) && !hba->auto_bkops_enabled)
+ goto out;
+
+ /*
+ * UFS device and/or UFS link low power states during runtime
+ * suspend seems to be different than what is expected during
+ * system suspend. Hence runtime resume the devic & link and
+ * let the system suspend low power states to take effect.
+ * TODO: If resume takes longer time, we might have optimize
+ * it in future by not resuming everything if possible.
+ */
+ ret = ufshcd_runtime_resume(hba);
+ if (ret)
+ goto out;
+ }
+
+ ret = ufshcd_suspend(hba, UFS_SYSTEM_PM);
+out:
+ if (!ret)
+ hba->is_sys_suspended = true;
+ return ret;
+}
+EXPORT_SYMBOL(ufshcd_system_suspend);
+
+/**
+ * ufshcd_system_resume - system resume routine
+ * @hba: per adapter instance
+ *
+ * Returns 0 for success and non-zero for failure
+ */
+
+int ufshcd_system_resume(struct ufs_hba *hba)
+{
+ if (!hba || !hba->is_powered || pm_runtime_suspended(hba->dev))
+ /*
+ * Let the runtime resume take care of resuming
+ * if runtime suspended.
+ */
+ return 0;
+
+ return ufshcd_resume(hba, UFS_SYSTEM_PM);
+}
+EXPORT_SYMBOL(ufshcd_system_resume);
+
+/**
+ * ufshcd_runtime_suspend - runtime suspend routine
+ * @hba: per adapter instance
+ *
+ * Check the description of ufshcd_suspend() function for more details.
+ *
+ * Returns 0 for success and non-zero for failure
+ */
+int ufshcd_runtime_suspend(struct ufs_hba *hba)
+{
+ if (!hba || !hba->is_powered)
+ return 0;
+
+ return ufshcd_suspend(hba, UFS_RUNTIME_PM);
+}
+EXPORT_SYMBOL(ufshcd_runtime_suspend);
+
+/**
+ * ufshcd_runtime_resume - runtime resume routine
+ * @hba: per adapter instance
+ *
+ * This function basically brings the UFS device, UniPro link and controller
+ * to active state. Following operations are done in this function:
+ *
+ * 1. Turn on all the controller related clocks
+ * 2. Bring the UniPro link out of Hibernate state
+ * 3. If UFS device is in sleep state, turn ON VCC rail and bring the UFS device
+ * to active state.
+ * 4. If auto-bkops is enabled on the device, disable it.
+ *
+ * So following would be the possible power state after this function return
+ * successfully:
+ * S1: UFS device in Active state with VCC rail ON
+ * UniPro link in Active state
+ * All the UFS/UniPro controller clocks are ON
+ *
+ * Returns 0 for success and non-zero for failure
+ */
+int ufshcd_runtime_resume(struct ufs_hba *hba)
+{
+ if (!hba || !hba->is_powered)
+ return 0;
+ else
+ return ufshcd_resume(hba, UFS_RUNTIME_PM);
+}
+EXPORT_SYMBOL(ufshcd_runtime_resume);
+
+int ufshcd_runtime_idle(struct ufs_hba *hba)
+{
+ return 0;
+}
+EXPORT_SYMBOL(ufshcd_runtime_idle);
+
+/**
+ * ufshcd_shutdown - shutdown routine
+ * @hba: per adapter instance
+ *
+ * This function would power off both UFS device and UFS link.
+ *
+ * Returns 0 always to allow force shutdown even in case of errors.
+ */
+int ufshcd_shutdown(struct ufs_hba *hba)
+{
+ int ret = 0;
+
+ if (ufshcd_is_ufs_dev_poweroff(hba) && ufshcd_is_link_off(hba))
+ goto out;
+
+ if (pm_runtime_suspended(hba->dev)) {
+ ret = ufshcd_runtime_resume(hba);
+ if (ret)
+ goto out;
+ }
+
+ ret = ufshcd_suspend(hba, UFS_SHUTDOWN_PM);
+out:
+ if (ret)
+ dev_err(hba->dev, "%s failed, err %d\n", __func__, ret);
+ /* allow force shutdown even in case of errors */
+ return 0;
+}
+EXPORT_SYMBOL(ufshcd_shutdown);
+
+/**
+ * ufshcd_remove - de-allocate SCSI host and host memory space
+ * data structure memory
+ * @hba - per adapter instance
+ */
+void ufshcd_remove(struct ufs_hba *hba)
+{
+ scsi_remove_host(hba->host);
+ /* disable interrupts */
+ ufshcd_disable_intr(hba, hba->intr_mask);
+ ufshcd_hba_stop(hba);
+
+ scsi_host_put(hba->host);
+
+ ufshcd_exit_clk_gating(hba);
+ if (ufshcd_is_clkscaling_enabled(hba))
+ devfreq_remove_device(hba->devfreq);
+ ufshcd_hba_exit(hba);
+}
+EXPORT_SYMBOL_GPL(ufshcd_remove);
+
+/**
+ * ufshcd_set_dma_mask - Set dma mask based on the controller
+ * addressing capability
+ * @hba: per adapter instance
+ *
+ * Returns 0 for success, non-zero for failure
+ */
+static int ufshcd_set_dma_mask(struct ufs_hba *hba)
+{
+ if (hba->capabilities & MASK_64_ADDRESSING_SUPPORT) {
+ if (!dma_set_mask_and_coherent(hba->dev, DMA_BIT_MASK(64)))
+ return 0;
+ }
+ return dma_set_mask_and_coherent(hba->dev, DMA_BIT_MASK(32));
+}
+
+/**
+ * ufshcd_alloc_host - allocate Host Bus Adapter (HBA)
+ * @dev: pointer to device handle
+ * @hba_handle: driver private handle
+ * Returns 0 on success, non-zero value on failure
+ */
+int ufshcd_alloc_host(struct device *dev, struct ufs_hba **hba_handle)
+{
+ struct Scsi_Host *host;
+ struct ufs_hba *hba;
+ int err = 0;
+
+ if (!dev) {
+ dev_err(dev,
+ "Invalid memory reference for dev is NULL\n");
+ err = -ENODEV;
+ goto out_error;
+ }
+
+ host = scsi_host_alloc(&ufshcd_driver_template,
+ sizeof(struct ufs_hba));
+ if (!host) {
+ dev_err(dev, "scsi_host_alloc failed\n");
+ err = -ENOMEM;
+ goto out_error;
+ }
+ hba = shost_priv(host);
+ hba->host = host;
+ hba->dev = dev;
+ *hba_handle = hba;
+
+out_error:
+ return err;
+}
+EXPORT_SYMBOL(ufshcd_alloc_host);
+
+static int ufshcd_scale_clks(struct ufs_hba *hba, bool scale_up)
+{
+ int ret = 0;
+ struct ufs_clk_info *clki;
+ struct list_head *head = &hba->clk_list_head;
+
+ if (!head || list_empty(head))
+ goto out;
+
+ list_for_each_entry(clki, head, list) {
+ if (!IS_ERR_OR_NULL(clki->clk)) {
+ if (scale_up && clki->max_freq) {
+ if (clki->curr_freq == clki->max_freq)
+ continue;
+ ret = clk_set_rate(clki->clk, clki->max_freq);
+ if (ret) {
+ dev_err(hba->dev, "%s: %s clk set rate(%dHz) failed, %d\n",
+ __func__, clki->name,
+ clki->max_freq, ret);
+ break;
+ }
+ clki->curr_freq = clki->max_freq;
+
+ } else if (!scale_up && clki->min_freq) {
+ if (clki->curr_freq == clki->min_freq)
+ continue;
+ ret = clk_set_rate(clki->clk, clki->min_freq);
+ if (ret) {
+ dev_err(hba->dev, "%s: %s clk set rate(%dHz) failed, %d\n",
+ __func__, clki->name,
+ clki->min_freq, ret);
+ break;
+ }
+ clki->curr_freq = clki->min_freq;
+ }
+ }
+ dev_dbg(hba->dev, "%s: clk: %s, rate: %lu\n", __func__,
+ clki->name, clk_get_rate(clki->clk));
+ }
+ if (hba->vops->clk_scale_notify)
+ hba->vops->clk_scale_notify(hba);
+out:
+ return ret;
+}
+
+static int ufshcd_devfreq_target(struct device *dev,
+ unsigned long *freq, u32 flags)
+{
+ int err = 0;
+ struct ufs_hba *hba = dev_get_drvdata(dev);
+
+ if (!ufshcd_is_clkscaling_enabled(hba))
+ return -EINVAL;
+
+ if (*freq == UINT_MAX)
+ err = ufshcd_scale_clks(hba, true);
+ else if (*freq == 0)
+ err = ufshcd_scale_clks(hba, false);
+
+ return err;
+}
+
+static int ufshcd_devfreq_get_dev_status(struct device *dev,
+ struct devfreq_dev_status *stat)
+{
+ struct ufs_hba *hba = dev_get_drvdata(dev);
+ struct ufs_clk_scaling *scaling = &hba->clk_scaling;
+ unsigned long flags;
+
+ if (!ufshcd_is_clkscaling_enabled(hba))
+ return -EINVAL;
+
+ memset(stat, 0, sizeof(*stat));
+
+ spin_lock_irqsave(hba->host->host_lock, flags);
+ if (!scaling->window_start_t)
+ goto start_window;
+
+ if (scaling->is_busy_started)
+ scaling->tot_busy_t += ktime_to_us(ktime_sub(ktime_get(),
+ scaling->busy_start_t));
+
+ stat->total_time = jiffies_to_usecs((long)jiffies -
+ (long)scaling->window_start_t);
+ stat->busy_time = scaling->tot_busy_t;
+start_window:
+ scaling->window_start_t = jiffies;
+ scaling->tot_busy_t = 0;
+
+ if (hba->outstanding_reqs) {
+ scaling->busy_start_t = ktime_get();
+ scaling->is_busy_started = true;
+ } else {
+ scaling->busy_start_t = ktime_set(0, 0);
+ scaling->is_busy_started = false;
+ }
+ spin_unlock_irqrestore(hba->host->host_lock, flags);
+ return 0;
+}
+
+static struct devfreq_dev_profile ufs_devfreq_profile = {
+ .polling_ms = 100,
+ .target = ufshcd_devfreq_target,
+ .get_dev_status = ufshcd_devfreq_get_dev_status,
+};
+
+/**
+ * ufshcd_init - Driver initialization routine
+ * @hba: per-adapter instance
+ * @mmio_base: base register address
+ * @irq: Interrupt line of device
+ * Returns 0 on success, non-zero value on failure
+ */
+int ufshcd_init(struct ufs_hba *hba, void __iomem *mmio_base, unsigned int irq)
+{
+ int err;
+ struct Scsi_Host *host = hba->host;
+ struct device *dev = hba->dev;
+
+ if (!mmio_base) {
+ dev_err(hba->dev,
+ "Invalid memory reference for mmio_base is NULL\n");
+ err = -ENODEV;
+ goto out_error;
+ }
+
+ hba->mmio_base = mmio_base;
+ hba->irq = irq;
+
+ err = ufshcd_hba_init(hba);
+ if (err)
+ goto out_error;
+
+ /* Read capabilities registers */
+ ufshcd_hba_capabilities(hba);
+
+ /* Get UFS version supported by the controller */
+ hba->ufs_version = ufshcd_get_ufs_version(hba);
+
+ /* Get Interrupt bit mask per version */
+ hba->intr_mask = ufshcd_get_intr_mask(hba);
+
+ err = ufshcd_set_dma_mask(hba);
+ if (err) {
+ dev_err(hba->dev, "set dma mask failed\n");
+ goto out_disable;
+ }
+
+ /* Allocate memory for host memory space */
+ err = ufshcd_memory_alloc(hba);
+ if (err) {
+ dev_err(hba->dev, "Memory allocation failed\n");
+ goto out_disable;
+ }
+
+ /* Configure LRB */
+ ufshcd_host_memory_configure(hba);
+
+ host->can_queue = hba->nutrs;
+ host->cmd_per_lun = hba->nutrs;
+ host->max_id = UFSHCD_MAX_ID;
+ host->max_lun = UFS_MAX_LUNS;
+ host->max_channel = UFSHCD_MAX_CHANNEL;
+ host->unique_id = host->host_no;
+ host->max_cmd_len = MAX_CDB_SIZE;
+
+ hba->max_pwr_info.is_valid = false;
+
+ /* Initailize wait queue for task management */
+ init_waitqueue_head(&hba->tm_wq);
+ init_waitqueue_head(&hba->tm_tag_wq);
+
+ /* Initialize work queues */
+ INIT_WORK(&hba->eh_work, ufshcd_err_handler);
+ INIT_WORK(&hba->eeh_work, ufshcd_exception_event_handler);
+
+ /* Initialize UIC command mutex */
+ mutex_init(&hba->uic_cmd_mutex);
+
+ /* Initialize mutex for device management commands */
+ mutex_init(&hba->dev_cmd.lock);
+
+ /* Initialize device management tag acquire wait queue */
+ init_waitqueue_head(&hba->dev_cmd.tag_wq);
+
+ ufshcd_init_clk_gating(hba);
+ /* IRQ registration */
+ err = devm_request_irq(dev, irq, ufshcd_intr, IRQF_SHARED, UFSHCD, hba);
+ if (err) {
+ dev_err(hba->dev, "request irq failed\n");
+ goto exit_gating;
+ } else {
+ hba->is_irq_enabled = true;
+ }
+
+ /* Enable SCSI tag mapping */
+ err = scsi_init_shared_tag_map(host, host->can_queue);
+ if (err) {
+ dev_err(hba->dev, "init shared queue failed\n");
+ goto exit_gating;
+ }
+
+ err = scsi_add_host(host, hba->dev);
+ if (err) {
+ dev_err(hba->dev, "scsi_add_host failed\n");
+ goto exit_gating;
+ }
+
+ /* Host controller enable */
+ err = ufshcd_hba_enable(hba);
+ if (err) {
+ dev_err(hba->dev, "Host controller enable failed\n");
+ goto out_remove_scsi_host;
+ }
+
+ if (ufshcd_is_clkscaling_enabled(hba)) {
+ hba->devfreq = devfreq_add_device(dev, &ufs_devfreq_profile,
+ "simple_ondemand", NULL);
+ if (IS_ERR(hba->devfreq)) {
+ dev_err(hba->dev, "Unable to register with devfreq %ld\n",
+ PTR_ERR(hba->devfreq));
+ goto out_remove_scsi_host;
+ }
+ /* Suspend devfreq until the UFS device is detected */
+ devfreq_suspend_device(hba->devfreq);
+ hba->clk_scaling.window_start_t = 0;
+ }
+
+ /* Hold auto suspend until async scan completes */
+ pm_runtime_get_sync(dev);
+
+ /*
+ * The device-initialize-sequence hasn't been invoked yet.
+ * Set the device to power-off state
+ */
+ ufshcd_set_ufs_dev_poweroff(hba);
+
+ async_schedule(ufshcd_async_scan, hba);
+
+ return 0;
+
+out_remove_scsi_host:
+ scsi_remove_host(hba->host);
+exit_gating:
+ ufshcd_exit_clk_gating(hba);
+out_disable:
+ hba->is_irq_enabled = false;
+ scsi_host_put(host);
+ ufshcd_hba_exit(hba);
+out_error:
+ return err;
+}
+EXPORT_SYMBOL_GPL(ufshcd_init);
+
+MODULE_AUTHOR("Santosh Yaragnavi <santosh.sy@samsung.com>");
+MODULE_AUTHOR("Vinayak Holikatti <h.vinayak@samsung.com>");
+MODULE_DESCRIPTION("Generic UFS host controller driver Core");
+MODULE_LICENSE("GPL");
+MODULE_VERSION(UFSHCD_DRIVER_VERSION);
diff --git a/drivers/scsi/ufs/ufshcd.h b/drivers/scsi/ufs/ufshcd.h
new file mode 100644
index 000000000..b47ff0769
--- /dev/null
+++ b/drivers/scsi/ufs/ufshcd.h
@@ -0,0 +1,605 @@
+/*
+ * Universal Flash Storage Host controller driver
+ *
+ * This code is based on drivers/scsi/ufs/ufshcd.h
+ * Copyright (C) 2011-2013 Samsung India Software Operations
+ *
+ * Authors:
+ * Santosh Yaraganavi <santosh.sy@samsung.com>
+ * Vinayak Holikatti <h.vinayak@samsung.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ * See the COPYING file in the top-level directory or visit
+ * <http://www.gnu.org/licenses/gpl-2.0.html>
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * This program is provided "AS IS" and "WITH ALL FAULTS" and
+ * without warranty of any kind. You are solely responsible for
+ * determining the appropriateness of using and distributing
+ * the program and assume all risks associated with your exercise
+ * of rights with respect to the program, including but not limited
+ * to infringement of third party rights, the risks and costs of
+ * program errors, damage to or loss of data, programs or equipment,
+ * and unavailability or interruption of operations. Under no
+ * circumstances will the contributor of this Program be liable for
+ * any damages of any kind arising from your use or distribution of
+ * this program.
+ */
+
+#ifndef _UFSHCD_H
+#define _UFSHCD_H
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/delay.h>
+#include <linux/slab.h>
+#include <linux/spinlock.h>
+#include <linux/workqueue.h>
+#include <linux/errno.h>
+#include <linux/types.h>
+#include <linux/wait.h>
+#include <linux/bitops.h>
+#include <linux/pm_runtime.h>
+#include <linux/clk.h>
+#include <linux/completion.h>
+#include <linux/regulator/consumer.h>
+
+#include <asm/irq.h>
+#include <asm/byteorder.h>
+#include <scsi/scsi.h>
+#include <scsi/scsi_cmnd.h>
+#include <scsi/scsi_host.h>
+#include <scsi/scsi_tcq.h>
+#include <scsi/scsi_dbg.h>
+#include <scsi/scsi_eh.h>
+
+#include "ufs.h"
+#include "ufshci.h"
+
+#define UFSHCD "ufshcd"
+#define UFSHCD_DRIVER_VERSION "0.2"
+
+struct ufs_hba;
+
+enum dev_cmd_type {
+ DEV_CMD_TYPE_NOP = 0x0,
+ DEV_CMD_TYPE_QUERY = 0x1,
+};
+
+/**
+ * struct uic_command - UIC command structure
+ * @command: UIC command
+ * @argument1: UIC command argument 1
+ * @argument2: UIC command argument 2
+ * @argument3: UIC command argument 3
+ * @cmd_active: Indicate if UIC command is outstanding
+ * @result: UIC command result
+ * @done: UIC command completion
+ */
+struct uic_command {
+ u32 command;
+ u32 argument1;
+ u32 argument2;
+ u32 argument3;
+ int cmd_active;
+ int result;
+ struct completion done;
+};
+
+/* Used to differentiate the power management options */
+enum ufs_pm_op {
+ UFS_RUNTIME_PM,
+ UFS_SYSTEM_PM,
+ UFS_SHUTDOWN_PM,
+};
+
+#define ufshcd_is_runtime_pm(op) ((op) == UFS_RUNTIME_PM)
+#define ufshcd_is_system_pm(op) ((op) == UFS_SYSTEM_PM)
+#define ufshcd_is_shutdown_pm(op) ((op) == UFS_SHUTDOWN_PM)
+
+/* Host <-> Device UniPro Link state */
+enum uic_link_state {
+ UIC_LINK_OFF_STATE = 0, /* Link powered down or disabled */
+ UIC_LINK_ACTIVE_STATE = 1, /* Link is in Fast/Slow/Sleep state */
+ UIC_LINK_HIBERN8_STATE = 2, /* Link is in Hibernate state */
+};
+
+#define ufshcd_is_link_off(hba) ((hba)->uic_link_state == UIC_LINK_OFF_STATE)
+#define ufshcd_is_link_active(hba) ((hba)->uic_link_state == \
+ UIC_LINK_ACTIVE_STATE)
+#define ufshcd_is_link_hibern8(hba) ((hba)->uic_link_state == \
+ UIC_LINK_HIBERN8_STATE)
+#define ufshcd_set_link_off(hba) ((hba)->uic_link_state = UIC_LINK_OFF_STATE)
+#define ufshcd_set_link_active(hba) ((hba)->uic_link_state = \
+ UIC_LINK_ACTIVE_STATE)
+#define ufshcd_set_link_hibern8(hba) ((hba)->uic_link_state = \
+ UIC_LINK_HIBERN8_STATE)
+
+/*
+ * UFS Power management levels.
+ * Each level is in increasing order of power savings.
+ */
+enum ufs_pm_level {
+ UFS_PM_LVL_0, /* UFS_ACTIVE_PWR_MODE, UIC_LINK_ACTIVE_STATE */
+ UFS_PM_LVL_1, /* UFS_ACTIVE_PWR_MODE, UIC_LINK_HIBERN8_STATE */
+ UFS_PM_LVL_2, /* UFS_SLEEP_PWR_MODE, UIC_LINK_ACTIVE_STATE */
+ UFS_PM_LVL_3, /* UFS_SLEEP_PWR_MODE, UIC_LINK_HIBERN8_STATE */
+ UFS_PM_LVL_4, /* UFS_POWERDOWN_PWR_MODE, UIC_LINK_HIBERN8_STATE */
+ UFS_PM_LVL_5, /* UFS_POWERDOWN_PWR_MODE, UIC_LINK_OFF_STATE */
+ UFS_PM_LVL_MAX
+};
+
+struct ufs_pm_lvl_states {
+ enum ufs_dev_pwr_mode dev_state;
+ enum uic_link_state link_state;
+};
+
+/**
+ * struct ufshcd_lrb - local reference block
+ * @utr_descriptor_ptr: UTRD address of the command
+ * @ucd_req_ptr: UCD address of the command
+ * @ucd_rsp_ptr: Response UPIU address for this command
+ * @ucd_prdt_ptr: PRDT address of the command
+ * @cmd: pointer to SCSI command
+ * @sense_buffer: pointer to sense buffer address of the SCSI command
+ * @sense_bufflen: Length of the sense buffer
+ * @scsi_status: SCSI status of the command
+ * @command_type: SCSI, UFS, Query.
+ * @task_tag: Task tag of the command
+ * @lun: LUN of the command
+ * @intr_cmd: Interrupt command (doesn't participate in interrupt aggregation)
+ */
+struct ufshcd_lrb {
+ struct utp_transfer_req_desc *utr_descriptor_ptr;
+ struct utp_upiu_req *ucd_req_ptr;
+ struct utp_upiu_rsp *ucd_rsp_ptr;
+ struct ufshcd_sg_entry *ucd_prdt_ptr;
+
+ struct scsi_cmnd *cmd;
+ u8 *sense_buffer;
+ unsigned int sense_bufflen;
+ int scsi_status;
+
+ int command_type;
+ int task_tag;
+ u8 lun; /* UPIU LUN id field is only 8-bit wide */
+ bool intr_cmd;
+};
+
+/**
+ * struct ufs_query - holds relevent data structures for query request
+ * @request: request upiu and function
+ * @descriptor: buffer for sending/receiving descriptor
+ * @response: response upiu and response
+ */
+struct ufs_query {
+ struct ufs_query_req request;
+ u8 *descriptor;
+ struct ufs_query_res response;
+};
+
+/**
+ * struct ufs_dev_cmd - all assosiated fields with device management commands
+ * @type: device management command type - Query, NOP OUT
+ * @lock: lock to allow one command at a time
+ * @complete: internal commands completion
+ * @tag_wq: wait queue until free command slot is available
+ */
+struct ufs_dev_cmd {
+ enum dev_cmd_type type;
+ struct mutex lock;
+ struct completion *complete;
+ wait_queue_head_t tag_wq;
+ struct ufs_query query;
+};
+
+/**
+ * struct ufs_clk_info - UFS clock related info
+ * @list: list headed by hba->clk_list_head
+ * @clk: clock node
+ * @name: clock name
+ * @max_freq: maximum frequency supported by the clock
+ * @min_freq: min frequency that can be used for clock scaling
+ * @curr_freq: indicates the current frequency that it is set to
+ * @enabled: variable to check against multiple enable/disable
+ */
+struct ufs_clk_info {
+ struct list_head list;
+ struct clk *clk;
+ const char *name;
+ u32 max_freq;
+ u32 min_freq;
+ u32 curr_freq;
+ bool enabled;
+};
+
+#define PRE_CHANGE 0
+#define POST_CHANGE 1
+
+struct ufs_pa_layer_attr {
+ u32 gear_rx;
+ u32 gear_tx;
+ u32 lane_rx;
+ u32 lane_tx;
+ u32 pwr_rx;
+ u32 pwr_tx;
+ u32 hs_rate;
+};
+
+struct ufs_pwr_mode_info {
+ bool is_valid;
+ struct ufs_pa_layer_attr info;
+};
+
+/**
+ * struct ufs_hba_variant_ops - variant specific callbacks
+ * @name: variant name
+ * @init: called when the driver is initialized
+ * @exit: called to cleanup everything done in init
+ * @clk_scale_notify: notifies that clks are scaled up/down
+ * @setup_clocks: called before touching any of the controller registers
+ * @setup_regulators: called before accessing the host controller
+ * @hce_enable_notify: called before and after HCE enable bit is set to allow
+ * variant specific Uni-Pro initialization.
+ * @link_startup_notify: called before and after Link startup is carried out
+ * to allow variant specific Uni-Pro initialization.
+ * @pwr_change_notify: called before and after a power mode change
+ * is carried out to allow vendor spesific capabilities
+ * to be set.
+ * @suspend: called during host controller PM callback
+ * @resume: called during host controller PM callback
+ */
+struct ufs_hba_variant_ops {
+ const char *name;
+ int (*init)(struct ufs_hba *);
+ void (*exit)(struct ufs_hba *);
+ void (*clk_scale_notify)(struct ufs_hba *);
+ int (*setup_clocks)(struct ufs_hba *, bool);
+ int (*setup_regulators)(struct ufs_hba *, bool);
+ int (*hce_enable_notify)(struct ufs_hba *, bool);
+ int (*link_startup_notify)(struct ufs_hba *, bool);
+ int (*pwr_change_notify)(struct ufs_hba *,
+ bool, struct ufs_pa_layer_attr *,
+ struct ufs_pa_layer_attr *);
+ int (*suspend)(struct ufs_hba *, enum ufs_pm_op);
+ int (*resume)(struct ufs_hba *, enum ufs_pm_op);
+};
+
+/* clock gating state */
+enum clk_gating_state {
+ CLKS_OFF,
+ CLKS_ON,
+ REQ_CLKS_OFF,
+ REQ_CLKS_ON,
+};
+
+/**
+ * struct ufs_clk_gating - UFS clock gating related info
+ * @gate_work: worker to turn off clocks after some delay as specified in
+ * delay_ms
+ * @ungate_work: worker to turn on clocks that will be used in case of
+ * interrupt context
+ * @state: the current clocks state
+ * @delay_ms: gating delay in ms
+ * @is_suspended: clk gating is suspended when set to 1 which can be used
+ * during suspend/resume
+ * @delay_attr: sysfs attribute to control delay_attr
+ * @active_reqs: number of requests that are pending and should be waited for
+ * completion before gating clocks.
+ */
+struct ufs_clk_gating {
+ struct delayed_work gate_work;
+ struct work_struct ungate_work;
+ enum clk_gating_state state;
+ unsigned long delay_ms;
+ bool is_suspended;
+ struct device_attribute delay_attr;
+ int active_reqs;
+};
+
+struct ufs_clk_scaling {
+ ktime_t busy_start_t;
+ bool is_busy_started;
+ unsigned long tot_busy_t;
+ unsigned long window_start_t;
+};
+
+/**
+ * struct ufs_init_prefetch - contains data that is pre-fetched once during
+ * initialization
+ * @icc_level: icc level which was read during initialization
+ */
+struct ufs_init_prefetch {
+ u32 icc_level;
+};
+
+/**
+ * struct ufs_hba - per adapter private structure
+ * @mmio_base: UFSHCI base register address
+ * @ucdl_base_addr: UFS Command Descriptor base address
+ * @utrdl_base_addr: UTP Transfer Request Descriptor base address
+ * @utmrdl_base_addr: UTP Task Management Descriptor base address
+ * @ucdl_dma_addr: UFS Command Descriptor DMA address
+ * @utrdl_dma_addr: UTRDL DMA address
+ * @utmrdl_dma_addr: UTMRDL DMA address
+ * @host: Scsi_Host instance of the driver
+ * @dev: device handle
+ * @lrb: local reference block
+ * @lrb_in_use: lrb in use
+ * @outstanding_tasks: Bits representing outstanding task requests
+ * @outstanding_reqs: Bits representing outstanding transfer requests
+ * @capabilities: UFS Controller Capabilities
+ * @nutrs: Transfer Request Queue depth supported by controller
+ * @nutmrs: Task Management Queue depth supported by controller
+ * @ufs_version: UFS Version to which controller complies
+ * @vops: pointer to variant specific operations
+ * @priv: pointer to variant specific private data
+ * @irq: Irq number of the controller
+ * @active_uic_cmd: handle of active UIC command
+ * @uic_cmd_mutex: mutex for uic command
+ * @tm_wq: wait queue for task management
+ * @tm_tag_wq: wait queue for free task management slots
+ * @tm_slots_in_use: bit map of task management request slots in use
+ * @pwr_done: completion for power mode change
+ * @tm_condition: condition variable for task management
+ * @ufshcd_state: UFSHCD states
+ * @eh_flags: Error handling flags
+ * @intr_mask: Interrupt Mask Bits
+ * @ee_ctrl_mask: Exception event control mask
+ * @is_powered: flag to check if HBA is powered
+ * @is_init_prefetch: flag to check if data was pre-fetched in initialization
+ * @init_prefetch_data: data pre-fetched during initialization
+ * @eh_work: Worker to handle UFS errors that require s/w attention
+ * @eeh_work: Worker to handle exception events
+ * @errors: HBA errors
+ * @uic_error: UFS interconnect layer error status
+ * @saved_err: sticky error mask
+ * @saved_uic_err: sticky UIC error mask
+ * @dev_cmd: ufs device management command information
+ * @last_dme_cmd_tstamp: time stamp of the last completed DME command
+ * @auto_bkops_enabled: to track whether bkops is enabled in device
+ * @vreg_info: UFS device voltage regulator information
+ * @clk_list_head: UFS host controller clocks list node head
+ * @pwr_info: holds current power mode
+ * @max_pwr_info: keeps the device max valid pwm
+ */
+struct ufs_hba {
+ void __iomem *mmio_base;
+
+ /* Virtual memory reference */
+ struct utp_transfer_cmd_desc *ucdl_base_addr;
+ struct utp_transfer_req_desc *utrdl_base_addr;
+ struct utp_task_req_desc *utmrdl_base_addr;
+
+ /* DMA memory reference */
+ dma_addr_t ucdl_dma_addr;
+ dma_addr_t utrdl_dma_addr;
+ dma_addr_t utmrdl_dma_addr;
+
+ struct Scsi_Host *host;
+ struct device *dev;
+ /*
+ * This field is to keep a reference to "scsi_device" corresponding to
+ * "UFS device" W-LU.
+ */
+ struct scsi_device *sdev_ufs_device;
+
+ enum ufs_dev_pwr_mode curr_dev_pwr_mode;
+ enum uic_link_state uic_link_state;
+ /* Desired UFS power management level during runtime PM */
+ enum ufs_pm_level rpm_lvl;
+ /* Desired UFS power management level during system PM */
+ enum ufs_pm_level spm_lvl;
+ int pm_op_in_progress;
+
+ struct ufshcd_lrb *lrb;
+ unsigned long lrb_in_use;
+
+ unsigned long outstanding_tasks;
+ unsigned long outstanding_reqs;
+
+ u32 capabilities;
+ int nutrs;
+ int nutmrs;
+ u32 ufs_version;
+ struct ufs_hba_variant_ops *vops;
+ void *priv;
+ unsigned int irq;
+ bool is_irq_enabled;
+
+ /*
+ * delay before each dme command is required as the unipro
+ * layer has shown instabilities
+ */
+ #define UFSHCD_QUIRK_DELAY_BEFORE_DME_CMDS UFS_BIT(0)
+
+ unsigned int quirks; /* Deviations from standard UFSHCI spec. */
+
+ wait_queue_head_t tm_wq;
+ wait_queue_head_t tm_tag_wq;
+ unsigned long tm_condition;
+ unsigned long tm_slots_in_use;
+
+ struct uic_command *active_uic_cmd;
+ struct mutex uic_cmd_mutex;
+ struct completion *uic_async_done;
+
+ u32 ufshcd_state;
+ u32 eh_flags;
+ u32 intr_mask;
+ u16 ee_ctrl_mask;
+ bool is_powered;
+ bool is_init_prefetch;
+ struct ufs_init_prefetch init_prefetch_data;
+
+ /* Work Queues */
+ struct work_struct eh_work;
+ struct work_struct eeh_work;
+
+ /* HBA Errors */
+ u32 errors;
+ u32 uic_error;
+ u32 saved_err;
+ u32 saved_uic_err;
+
+ /* Device management request data */
+ struct ufs_dev_cmd dev_cmd;
+ ktime_t last_dme_cmd_tstamp;
+
+ /* Keeps information of the UFS device connected to this host */
+ struct ufs_dev_info dev_info;
+ bool auto_bkops_enabled;
+ struct ufs_vreg_info vreg_info;
+ struct list_head clk_list_head;
+
+ bool wlun_dev_clr_ua;
+
+ struct ufs_pa_layer_attr pwr_info;
+ struct ufs_pwr_mode_info max_pwr_info;
+
+ struct ufs_clk_gating clk_gating;
+ /* Control to enable/disable host capabilities */
+ u32 caps;
+ /* Allow dynamic clk gating */
+#define UFSHCD_CAP_CLK_GATING (1 << 0)
+ /* Allow hiberb8 with clk gating */
+#define UFSHCD_CAP_HIBERN8_WITH_CLK_GATING (1 << 1)
+ /* Allow dynamic clk scaling */
+#define UFSHCD_CAP_CLK_SCALING (1 << 2)
+ /* Allow auto bkops to enabled during runtime suspend */
+#define UFSHCD_CAP_AUTO_BKOPS_SUSPEND (1 << 3)
+
+ struct devfreq *devfreq;
+ struct ufs_clk_scaling clk_scaling;
+ bool is_sys_suspended;
+};
+
+/* Returns true if clocks can be gated. Otherwise false */
+static inline bool ufshcd_is_clkgating_allowed(struct ufs_hba *hba)
+{
+ return hba->caps & UFSHCD_CAP_CLK_GATING;
+}
+static inline bool ufshcd_can_hibern8_during_gating(struct ufs_hba *hba)
+{
+ return hba->caps & UFSHCD_CAP_HIBERN8_WITH_CLK_GATING;
+}
+static inline int ufshcd_is_clkscaling_enabled(struct ufs_hba *hba)
+{
+ return hba->caps & UFSHCD_CAP_CLK_SCALING;
+}
+static inline bool ufshcd_can_autobkops_during_suspend(struct ufs_hba *hba)
+{
+ return hba->caps & UFSHCD_CAP_AUTO_BKOPS_SUSPEND;
+}
+
+#define ufshcd_writel(hba, val, reg) \
+ writel((val), (hba)->mmio_base + (reg))
+#define ufshcd_readl(hba, reg) \
+ readl((hba)->mmio_base + (reg))
+
+/**
+ * ufshcd_rmwl - read modify write into a register
+ * @hba - per adapter instance
+ * @mask - mask to apply on read value
+ * @val - actual value to write
+ * @reg - register address
+ */
+static inline void ufshcd_rmwl(struct ufs_hba *hba, u32 mask, u32 val, u32 reg)
+{
+ u32 tmp;
+
+ tmp = ufshcd_readl(hba, reg);
+ tmp &= ~mask;
+ tmp |= (val & mask);
+ ufshcd_writel(hba, tmp, reg);
+}
+
+int ufshcd_alloc_host(struct device *, struct ufs_hba **);
+int ufshcd_init(struct ufs_hba * , void __iomem * , unsigned int);
+void ufshcd_remove(struct ufs_hba *);
+
+/**
+ * ufshcd_hba_stop - Send controller to reset state
+ * @hba: per adapter instance
+ */
+static inline void ufshcd_hba_stop(struct ufs_hba *hba)
+{
+ ufshcd_writel(hba, CONTROLLER_DISABLE, REG_CONTROLLER_ENABLE);
+}
+
+static inline void check_upiu_size(void)
+{
+ BUILD_BUG_ON(ALIGNED_UPIU_SIZE <
+ GENERAL_UPIU_REQUEST_SIZE + QUERY_DESC_MAX_SIZE);
+}
+
+extern int ufshcd_runtime_suspend(struct ufs_hba *hba);
+extern int ufshcd_runtime_resume(struct ufs_hba *hba);
+extern int ufshcd_runtime_idle(struct ufs_hba *hba);
+extern int ufshcd_system_suspend(struct ufs_hba *hba);
+extern int ufshcd_system_resume(struct ufs_hba *hba);
+extern int ufshcd_shutdown(struct ufs_hba *hba);
+extern int ufshcd_dme_set_attr(struct ufs_hba *hba, u32 attr_sel,
+ u8 attr_set, u32 mib_val, u8 peer);
+extern int ufshcd_dme_get_attr(struct ufs_hba *hba, u32 attr_sel,
+ u32 *mib_val, u8 peer);
+
+/* UIC command interfaces for DME primitives */
+#define DME_LOCAL 0
+#define DME_PEER 1
+#define ATTR_SET_NOR 0 /* NORMAL */
+#define ATTR_SET_ST 1 /* STATIC */
+
+static inline int ufshcd_dme_set(struct ufs_hba *hba, u32 attr_sel,
+ u32 mib_val)
+{
+ return ufshcd_dme_set_attr(hba, attr_sel, ATTR_SET_NOR,
+ mib_val, DME_LOCAL);
+}
+
+static inline int ufshcd_dme_st_set(struct ufs_hba *hba, u32 attr_sel,
+ u32 mib_val)
+{
+ return ufshcd_dme_set_attr(hba, attr_sel, ATTR_SET_ST,
+ mib_val, DME_LOCAL);
+}
+
+static inline int ufshcd_dme_peer_set(struct ufs_hba *hba, u32 attr_sel,
+ u32 mib_val)
+{
+ return ufshcd_dme_set_attr(hba, attr_sel, ATTR_SET_NOR,
+ mib_val, DME_PEER);
+}
+
+static inline int ufshcd_dme_peer_st_set(struct ufs_hba *hba, u32 attr_sel,
+ u32 mib_val)
+{
+ return ufshcd_dme_set_attr(hba, attr_sel, ATTR_SET_ST,
+ mib_val, DME_PEER);
+}
+
+static inline int ufshcd_dme_get(struct ufs_hba *hba,
+ u32 attr_sel, u32 *mib_val)
+{
+ return ufshcd_dme_get_attr(hba, attr_sel, mib_val, DME_LOCAL);
+}
+
+static inline int ufshcd_dme_peer_get(struct ufs_hba *hba,
+ u32 attr_sel, u32 *mib_val)
+{
+ return ufshcd_dme_get_attr(hba, attr_sel, mib_val, DME_PEER);
+}
+
+int ufshcd_hold(struct ufs_hba *hba, bool async);
+void ufshcd_release(struct ufs_hba *hba);
+#endif /* End of Header */
diff --git a/drivers/scsi/ufs/ufshci.h b/drivers/scsi/ufs/ufshci.h
new file mode 100644
index 000000000..d5721199e
--- /dev/null
+++ b/drivers/scsi/ufs/ufshci.h
@@ -0,0 +1,393 @@
+/*
+ * Universal Flash Storage Host controller driver
+ *
+ * This code is based on drivers/scsi/ufs/ufshci.h
+ * Copyright (C) 2011-2013 Samsung India Software Operations
+ *
+ * Authors:
+ * Santosh Yaraganavi <santosh.sy@samsung.com>
+ * Vinayak Holikatti <h.vinayak@samsung.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ * See the COPYING file in the top-level directory or visit
+ * <http://www.gnu.org/licenses/gpl-2.0.html>
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * This program is provided "AS IS" and "WITH ALL FAULTS" and
+ * without warranty of any kind. You are solely responsible for
+ * determining the appropriateness of using and distributing
+ * the program and assume all risks associated with your exercise
+ * of rights with respect to the program, including but not limited
+ * to infringement of third party rights, the risks and costs of
+ * program errors, damage to or loss of data, programs or equipment,
+ * and unavailability or interruption of operations. Under no
+ * circumstances will the contributor of this Program be liable for
+ * any damages of any kind arising from your use or distribution of
+ * this program.
+ */
+
+#ifndef _UFSHCI_H
+#define _UFSHCI_H
+
+enum {
+ TASK_REQ_UPIU_SIZE_DWORDS = 8,
+ TASK_RSP_UPIU_SIZE_DWORDS = 8,
+ ALIGNED_UPIU_SIZE = 512,
+};
+
+/* UFSHCI Registers */
+enum {
+ REG_CONTROLLER_CAPABILITIES = 0x00,
+ REG_UFS_VERSION = 0x08,
+ REG_CONTROLLER_DEV_ID = 0x10,
+ REG_CONTROLLER_PROD_ID = 0x14,
+ REG_INTERRUPT_STATUS = 0x20,
+ REG_INTERRUPT_ENABLE = 0x24,
+ REG_CONTROLLER_STATUS = 0x30,
+ REG_CONTROLLER_ENABLE = 0x34,
+ REG_UIC_ERROR_CODE_PHY_ADAPTER_LAYER = 0x38,
+ REG_UIC_ERROR_CODE_DATA_LINK_LAYER = 0x3C,
+ REG_UIC_ERROR_CODE_NETWORK_LAYER = 0x40,
+ REG_UIC_ERROR_CODE_TRANSPORT_LAYER = 0x44,
+ REG_UIC_ERROR_CODE_DME = 0x48,
+ REG_UTP_TRANSFER_REQ_INT_AGG_CONTROL = 0x4C,
+ REG_UTP_TRANSFER_REQ_LIST_BASE_L = 0x50,
+ REG_UTP_TRANSFER_REQ_LIST_BASE_H = 0x54,
+ REG_UTP_TRANSFER_REQ_DOOR_BELL = 0x58,
+ REG_UTP_TRANSFER_REQ_LIST_CLEAR = 0x5C,
+ REG_UTP_TRANSFER_REQ_LIST_RUN_STOP = 0x60,
+ REG_UTP_TASK_REQ_LIST_BASE_L = 0x70,
+ REG_UTP_TASK_REQ_LIST_BASE_H = 0x74,
+ REG_UTP_TASK_REQ_DOOR_BELL = 0x78,
+ REG_UTP_TASK_REQ_LIST_CLEAR = 0x7C,
+ REG_UTP_TASK_REQ_LIST_RUN_STOP = 0x80,
+ REG_UIC_COMMAND = 0x90,
+ REG_UIC_COMMAND_ARG_1 = 0x94,
+ REG_UIC_COMMAND_ARG_2 = 0x98,
+ REG_UIC_COMMAND_ARG_3 = 0x9C,
+};
+
+/* Controller capability masks */
+enum {
+ MASK_TRANSFER_REQUESTS_SLOTS = 0x0000001F,
+ MASK_TASK_MANAGEMENT_REQUEST_SLOTS = 0x00070000,
+ MASK_64_ADDRESSING_SUPPORT = 0x01000000,
+ MASK_OUT_OF_ORDER_DATA_DELIVERY_SUPPORT = 0x02000000,
+ MASK_UIC_DME_TEST_MODE_SUPPORT = 0x04000000,
+};
+
+/* UFS Version 08h */
+#define MINOR_VERSION_NUM_MASK UFS_MASK(0xFFFF, 0)
+#define MAJOR_VERSION_NUM_MASK UFS_MASK(0xFFFF, 16)
+
+/* Controller UFSHCI version */
+enum {
+ UFSHCI_VERSION_10 = 0x00010000,
+ UFSHCI_VERSION_11 = 0x00010100,
+};
+
+/*
+ * HCDDID - Host Controller Identification Descriptor
+ * - Device ID and Device Class 10h
+ */
+#define DEVICE_CLASS UFS_MASK(0xFFFF, 0)
+#define DEVICE_ID UFS_MASK(0xFF, 24)
+
+/*
+ * HCPMID - Host Controller Identification Descriptor
+ * - Product/Manufacturer ID 14h
+ */
+#define MANUFACTURE_ID_MASK UFS_MASK(0xFFFF, 0)
+#define PRODUCT_ID_MASK UFS_MASK(0xFFFF, 16)
+
+#define UFS_BIT(x) (1L << (x))
+
+#define UTP_TRANSFER_REQ_COMPL UFS_BIT(0)
+#define UIC_DME_END_PT_RESET UFS_BIT(1)
+#define UIC_ERROR UFS_BIT(2)
+#define UIC_TEST_MODE UFS_BIT(3)
+#define UIC_POWER_MODE UFS_BIT(4)
+#define UIC_HIBERNATE_EXIT UFS_BIT(5)
+#define UIC_HIBERNATE_ENTER UFS_BIT(6)
+#define UIC_LINK_LOST UFS_BIT(7)
+#define UIC_LINK_STARTUP UFS_BIT(8)
+#define UTP_TASK_REQ_COMPL UFS_BIT(9)
+#define UIC_COMMAND_COMPL UFS_BIT(10)
+#define DEVICE_FATAL_ERROR UFS_BIT(11)
+#define CONTROLLER_FATAL_ERROR UFS_BIT(16)
+#define SYSTEM_BUS_FATAL_ERROR UFS_BIT(17)
+
+#define UFSHCD_UIC_PWR_MASK (UIC_HIBERNATE_ENTER |\
+ UIC_HIBERNATE_EXIT |\
+ UIC_POWER_MODE)
+
+#define UFSHCD_UIC_MASK (UIC_COMMAND_COMPL | UFSHCD_UIC_PWR_MASK)
+
+#define UFSHCD_ERROR_MASK (UIC_ERROR |\
+ DEVICE_FATAL_ERROR |\
+ CONTROLLER_FATAL_ERROR |\
+ SYSTEM_BUS_FATAL_ERROR)
+
+#define INT_FATAL_ERRORS (DEVICE_FATAL_ERROR |\
+ CONTROLLER_FATAL_ERROR |\
+ SYSTEM_BUS_FATAL_ERROR)
+
+/* HCS - Host Controller Status 30h */
+#define DEVICE_PRESENT UFS_BIT(0)
+#define UTP_TRANSFER_REQ_LIST_READY UFS_BIT(1)
+#define UTP_TASK_REQ_LIST_READY UFS_BIT(2)
+#define UIC_COMMAND_READY UFS_BIT(3)
+#define HOST_ERROR_INDICATOR UFS_BIT(4)
+#define DEVICE_ERROR_INDICATOR UFS_BIT(5)
+#define UIC_POWER_MODE_CHANGE_REQ_STATUS_MASK UFS_MASK(0x7, 8)
+
+enum {
+ PWR_OK = 0x0,
+ PWR_LOCAL = 0x01,
+ PWR_REMOTE = 0x02,
+ PWR_BUSY = 0x03,
+ PWR_ERROR_CAP = 0x04,
+ PWR_FATAL_ERROR = 0x05,
+};
+
+/* HCE - Host Controller Enable 34h */
+#define CONTROLLER_ENABLE UFS_BIT(0)
+#define CONTROLLER_DISABLE 0x0
+
+/* UECPA - Host UIC Error Code PHY Adapter Layer 38h */
+#define UIC_PHY_ADAPTER_LAYER_ERROR UFS_BIT(31)
+#define UIC_PHY_ADAPTER_LAYER_ERROR_CODE_MASK 0x1F
+
+/* UECDL - Host UIC Error Code Data Link Layer 3Ch */
+#define UIC_DATA_LINK_LAYER_ERROR UFS_BIT(31)
+#define UIC_DATA_LINK_LAYER_ERROR_CODE_MASK 0x7FFF
+#define UIC_DATA_LINK_LAYER_ERROR_PA_INIT 0x2000
+
+/* UECN - Host UIC Error Code Network Layer 40h */
+#define UIC_NETWORK_LAYER_ERROR UFS_BIT(31)
+#define UIC_NETWORK_LAYER_ERROR_CODE_MASK 0x7
+
+/* UECT - Host UIC Error Code Transport Layer 44h */
+#define UIC_TRANSPORT_LAYER_ERROR UFS_BIT(31)
+#define UIC_TRANSPORT_LAYER_ERROR_CODE_MASK 0x7F
+
+/* UECDME - Host UIC Error Code DME 48h */
+#define UIC_DME_ERROR UFS_BIT(31)
+#define UIC_DME_ERROR_CODE_MASK 0x1
+
+#define INT_AGGR_TIMEOUT_VAL_MASK 0xFF
+#define INT_AGGR_COUNTER_THRESHOLD_MASK UFS_MASK(0x1F, 8)
+#define INT_AGGR_COUNTER_AND_TIMER_RESET UFS_BIT(16)
+#define INT_AGGR_STATUS_BIT UFS_BIT(20)
+#define INT_AGGR_PARAM_WRITE UFS_BIT(24)
+#define INT_AGGR_ENABLE UFS_BIT(31)
+
+/* UTRLRSR - UTP Transfer Request Run-Stop Register 60h */
+#define UTP_TRANSFER_REQ_LIST_RUN_STOP_BIT UFS_BIT(0)
+
+/* UTMRLRSR - UTP Task Management Request Run-Stop Register 80h */
+#define UTP_TASK_REQ_LIST_RUN_STOP_BIT UFS_BIT(0)
+
+/* UICCMD - UIC Command */
+#define COMMAND_OPCODE_MASK 0xFF
+#define GEN_SELECTOR_INDEX_MASK 0xFFFF
+
+#define MIB_ATTRIBUTE_MASK UFS_MASK(0xFFFF, 16)
+#define RESET_LEVEL 0xFF
+
+#define ATTR_SET_TYPE_MASK UFS_MASK(0xFF, 16)
+#define CONFIG_RESULT_CODE_MASK 0xFF
+#define GENERIC_ERROR_CODE_MASK 0xFF
+
+#define UIC_ARG_MIB_SEL(attr, sel) ((((attr) & 0xFFFF) << 16) |\
+ ((sel) & 0xFFFF))
+#define UIC_ARG_MIB(attr) UIC_ARG_MIB_SEL(attr, 0)
+#define UIC_ARG_ATTR_TYPE(t) (((t) & 0xFF) << 16)
+#define UIC_GET_ATTR_ID(v) (((v) >> 16) & 0xFFFF)
+
+/* UIC Commands */
+enum uic_cmd_dme {
+ UIC_CMD_DME_GET = 0x01,
+ UIC_CMD_DME_SET = 0x02,
+ UIC_CMD_DME_PEER_GET = 0x03,
+ UIC_CMD_DME_PEER_SET = 0x04,
+ UIC_CMD_DME_POWERON = 0x10,
+ UIC_CMD_DME_POWEROFF = 0x11,
+ UIC_CMD_DME_ENABLE = 0x12,
+ UIC_CMD_DME_RESET = 0x14,
+ UIC_CMD_DME_END_PT_RST = 0x15,
+ UIC_CMD_DME_LINK_STARTUP = 0x16,
+ UIC_CMD_DME_HIBER_ENTER = 0x17,
+ UIC_CMD_DME_HIBER_EXIT = 0x18,
+ UIC_CMD_DME_TEST_MODE = 0x1A,
+};
+
+/* UIC Config result code / Generic error code */
+enum {
+ UIC_CMD_RESULT_SUCCESS = 0x00,
+ UIC_CMD_RESULT_INVALID_ATTR = 0x01,
+ UIC_CMD_RESULT_FAILURE = 0x01,
+ UIC_CMD_RESULT_INVALID_ATTR_VALUE = 0x02,
+ UIC_CMD_RESULT_READ_ONLY_ATTR = 0x03,
+ UIC_CMD_RESULT_WRITE_ONLY_ATTR = 0x04,
+ UIC_CMD_RESULT_BAD_INDEX = 0x05,
+ UIC_CMD_RESULT_LOCKED_ATTR = 0x06,
+ UIC_CMD_RESULT_BAD_TEST_FEATURE_INDEX = 0x07,
+ UIC_CMD_RESULT_PEER_COMM_FAILURE = 0x08,
+ UIC_CMD_RESULT_BUSY = 0x09,
+ UIC_CMD_RESULT_DME_FAILURE = 0x0A,
+};
+
+#define MASK_UIC_COMMAND_RESULT 0xFF
+
+#define INT_AGGR_COUNTER_THLD_VAL(c) (((c) & 0x1F) << 8)
+#define INT_AGGR_TIMEOUT_VAL(t) (((t) & 0xFF) << 0)
+
+/* Interrupt disable masks */
+enum {
+ /* Interrupt disable mask for UFSHCI v1.0 */
+ INTERRUPT_MASK_ALL_VER_10 = 0x30FFF,
+ INTERRUPT_MASK_RW_VER_10 = 0x30000,
+
+ /* Interrupt disable mask for UFSHCI v1.1 */
+ INTERRUPT_MASK_ALL_VER_11 = 0x31FFF,
+};
+
+/*
+ * Request Descriptor Definitions
+ */
+
+/* Transfer request command type */
+enum {
+ UTP_CMD_TYPE_SCSI = 0x0,
+ UTP_CMD_TYPE_UFS = 0x1,
+ UTP_CMD_TYPE_DEV_MANAGE = 0x2,
+};
+
+enum {
+ UTP_SCSI_COMMAND = 0x00000000,
+ UTP_NATIVE_UFS_COMMAND = 0x10000000,
+ UTP_DEVICE_MANAGEMENT_FUNCTION = 0x20000000,
+ UTP_REQ_DESC_INT_CMD = 0x01000000,
+};
+
+/* UTP Transfer Request Data Direction (DD) */
+enum {
+ UTP_NO_DATA_TRANSFER = 0x00000000,
+ UTP_HOST_TO_DEVICE = 0x02000000,
+ UTP_DEVICE_TO_HOST = 0x04000000,
+};
+
+/* Overall command status values */
+enum {
+ OCS_SUCCESS = 0x0,
+ OCS_INVALID_CMD_TABLE_ATTR = 0x1,
+ OCS_INVALID_PRDT_ATTR = 0x2,
+ OCS_MISMATCH_DATA_BUF_SIZE = 0x3,
+ OCS_MISMATCH_RESP_UPIU_SIZE = 0x4,
+ OCS_PEER_COMM_FAILURE = 0x5,
+ OCS_ABORTED = 0x6,
+ OCS_FATAL_ERROR = 0x7,
+ OCS_INVALID_COMMAND_STATUS = 0x0F,
+ MASK_OCS = 0x0F,
+};
+
+/* The maximum length of the data byte count field in the PRDT is 256KB */
+#define PRDT_DATA_BYTE_COUNT_MAX (256 * 1024)
+/* The granularity of the data byte count field in the PRDT is 32-bit */
+#define PRDT_DATA_BYTE_COUNT_PAD 4
+
+/**
+ * struct ufshcd_sg_entry - UFSHCI PRD Entry
+ * @base_addr: Lower 32bit physical address DW-0
+ * @upper_addr: Upper 32bit physical address DW-1
+ * @reserved: Reserved for future use DW-2
+ * @size: size of physical segment DW-3
+ */
+struct ufshcd_sg_entry {
+ __le32 base_addr;
+ __le32 upper_addr;
+ __le32 reserved;
+ __le32 size;
+};
+
+/**
+ * struct utp_transfer_cmd_desc - UFS Command Descriptor structure
+ * @command_upiu: Command UPIU Frame address
+ * @response_upiu: Response UPIU Frame address
+ * @prd_table: Physical Region Descriptor
+ */
+struct utp_transfer_cmd_desc {
+ u8 command_upiu[ALIGNED_UPIU_SIZE];
+ u8 response_upiu[ALIGNED_UPIU_SIZE];
+ struct ufshcd_sg_entry prd_table[SG_ALL];
+};
+
+/**
+ * struct request_desc_header - Descriptor Header common to both UTRD and UTMRD
+ * @dword0: Descriptor Header DW0
+ * @dword1: Descriptor Header DW1
+ * @dword2: Descriptor Header DW2
+ * @dword3: Descriptor Header DW3
+ */
+struct request_desc_header {
+ __le32 dword_0;
+ __le32 dword_1;
+ __le32 dword_2;
+ __le32 dword_3;
+};
+
+/**
+ * struct utp_transfer_req_desc - UTRD structure
+ * @header: UTRD header DW-0 to DW-3
+ * @command_desc_base_addr_lo: UCD base address low DW-4
+ * @command_desc_base_addr_hi: UCD base address high DW-5
+ * @response_upiu_length: response UPIU length DW-6
+ * @response_upiu_offset: response UPIU offset DW-6
+ * @prd_table_length: Physical region descriptor length DW-7
+ * @prd_table_offset: Physical region descriptor offset DW-7
+ */
+struct utp_transfer_req_desc {
+
+ /* DW 0-3 */
+ struct request_desc_header header;
+
+ /* DW 4-5*/
+ __le32 command_desc_base_addr_lo;
+ __le32 command_desc_base_addr_hi;
+
+ /* DW 6 */
+ __le16 response_upiu_length;
+ __le16 response_upiu_offset;
+
+ /* DW 7 */
+ __le16 prd_table_length;
+ __le16 prd_table_offset;
+};
+
+/**
+ * struct utp_task_req_desc - UTMRD structure
+ * @header: UTMRD header DW-0 to DW-3
+ * @task_req_upiu: Pointer to task request UPIU DW-4 to DW-11
+ * @task_rsp_upiu: Pointer to task response UPIU DW12 to DW-19
+ */
+struct utp_task_req_desc {
+
+ /* DW 0-3 */
+ struct request_desc_header header;
+
+ /* DW 4-11 */
+ __le32 task_req_upiu[TASK_REQ_UPIU_SIZE_DWORDS];
+
+ /* DW 12-19 */
+ __le32 task_rsp_upiu[TASK_RSP_UPIU_SIZE_DWORDS];
+};
+
+#endif /* End of Header */
diff --git a/drivers/scsi/ufs/unipro.h b/drivers/scsi/ufs/unipro.h
new file mode 100644
index 000000000..3fc3e21b7
--- /dev/null
+++ b/drivers/scsi/ufs/unipro.h
@@ -0,0 +1,207 @@
+/*
+ * drivers/scsi/ufs/unipro.h
+ *
+ * Copyright (C) 2013 Samsung Electronics Co., Ltd.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#ifndef _UNIPRO_H_
+#define _UNIPRO_H_
+
+/*
+ * M-TX Configuration Attributes
+ */
+#define TX_MODE 0x0021
+#define TX_HSRATE_SERIES 0x0022
+#define TX_HSGEAR 0x0023
+#define TX_PWMGEAR 0x0024
+#define TX_AMPLITUDE 0x0025
+#define TX_HS_SLEWRATE 0x0026
+#define TX_SYNC_SOURCE 0x0027
+#define TX_HS_SYNC_LENGTH 0x0028
+#define TX_HS_PREPARE_LENGTH 0x0029
+#define TX_LS_PREPARE_LENGTH 0x002A
+#define TX_HIBERN8_CONTROL 0x002B
+#define TX_LCC_ENABLE 0x002C
+#define TX_PWM_BURST_CLOSURE_EXTENSION 0x002D
+#define TX_BYPASS_8B10B_ENABLE 0x002E
+#define TX_DRIVER_POLARITY 0x002F
+#define TX_HS_UNTERMINATED_LINE_DRIVE_ENABLE 0x0030
+#define TX_LS_TERMINATED_LINE_DRIVE_ENABLE 0x0031
+#define TX_LCC_SEQUENCER 0x0032
+#define TX_MIN_ACTIVATETIME 0x0033
+#define TX_PWM_G6_G7_SYNC_LENGTH 0x0034
+
+/*
+ * M-RX Configuration Attributes
+ */
+#define RX_MODE 0x00A1
+#define RX_HSRATE_SERIES 0x00A2
+#define RX_HSGEAR 0x00A3
+#define RX_PWMGEAR 0x00A4
+#define RX_LS_TERMINATED_ENABLE 0x00A5
+#define RX_HS_UNTERMINATED_ENABLE 0x00A6
+#define RX_ENTER_HIBERN8 0x00A7
+#define RX_BYPASS_8B10B_ENABLE 0x00A8
+#define RX_TERMINATION_FORCE_ENABLE 0x0089
+
+#define is_mphy_tx_attr(attr) (attr < RX_MODE)
+/*
+ * PHY Adpater attributes
+ */
+#define PA_ACTIVETXDATALANES 0x1560
+#define PA_ACTIVERXDATALANES 0x1580
+#define PA_TXTRAILINGCLOCKS 0x1564
+#define PA_PHY_TYPE 0x1500
+#define PA_AVAILTXDATALANES 0x1520
+#define PA_AVAILRXDATALANES 0x1540
+#define PA_MINRXTRAILINGCLOCKS 0x1543
+#define PA_TXPWRSTATUS 0x1567
+#define PA_RXPWRSTATUS 0x1582
+#define PA_TXFORCECLOCK 0x1562
+#define PA_TXPWRMODE 0x1563
+#define PA_LEGACYDPHYESCDL 0x1570
+#define PA_MAXTXSPEEDFAST 0x1521
+#define PA_MAXTXSPEEDSLOW 0x1522
+#define PA_MAXRXSPEEDFAST 0x1541
+#define PA_MAXRXSPEEDSLOW 0x1542
+#define PA_TXLINKSTARTUPHS 0x1544
+#define PA_TXSPEEDFAST 0x1565
+#define PA_TXSPEEDSLOW 0x1566
+#define PA_REMOTEVERINFO 0x15A0
+#define PA_TXGEAR 0x1568
+#define PA_TXTERMINATION 0x1569
+#define PA_HSSERIES 0x156A
+#define PA_PWRMODE 0x1571
+#define PA_RXGEAR 0x1583
+#define PA_RXTERMINATION 0x1584
+#define PA_MAXRXPWMGEAR 0x1586
+#define PA_MAXRXHSGEAR 0x1587
+#define PA_RXHSUNTERMCAP 0x15A5
+#define PA_RXLSTERMCAP 0x15A6
+#define PA_PACPREQTIMEOUT 0x1590
+#define PA_PACPREQEOBTIMEOUT 0x1591
+#define PA_HIBERN8TIME 0x15A7
+#define PA_LOCALVERINFO 0x15A9
+#define PA_TACTIVATE 0x15A8
+#define PA_PACPFRAMECOUNT 0x15C0
+#define PA_PACPERRORCOUNT 0x15C1
+#define PA_PHYTESTCONTROL 0x15C2
+#define PA_PWRMODEUSERDATA0 0x15B0
+#define PA_PWRMODEUSERDATA1 0x15B1
+#define PA_PWRMODEUSERDATA2 0x15B2
+#define PA_PWRMODEUSERDATA3 0x15B3
+#define PA_PWRMODEUSERDATA4 0x15B4
+#define PA_PWRMODEUSERDATA5 0x15B5
+#define PA_PWRMODEUSERDATA6 0x15B6
+#define PA_PWRMODEUSERDATA7 0x15B7
+#define PA_PWRMODEUSERDATA8 0x15B8
+#define PA_PWRMODEUSERDATA9 0x15B9
+#define PA_PWRMODEUSERDATA10 0x15BA
+#define PA_PWRMODEUSERDATA11 0x15BB
+#define PA_CONNECTEDTXDATALANES 0x1561
+#define PA_CONNECTEDRXDATALANES 0x1581
+#define PA_LOGICALLANEMAP 0x15A1
+#define PA_SLEEPNOCONFIGTIME 0x15A2
+#define PA_STALLNOCONFIGTIME 0x15A3
+#define PA_SAVECONFIGTIME 0x15A4
+
+/* PA power modes */
+enum {
+ FAST_MODE = 1,
+ SLOW_MODE = 2,
+ FASTAUTO_MODE = 4,
+ SLOWAUTO_MODE = 5,
+ UNCHANGED = 7,
+};
+
+/* PA TX/RX Frequency Series */
+enum {
+ PA_HS_MODE_A = 1,
+ PA_HS_MODE_B = 2,
+};
+
+enum ufs_pwm_gear_tag {
+ UFS_PWM_DONT_CHANGE, /* Don't change Gear */
+ UFS_PWM_G1, /* PWM Gear 1 (default for reset) */
+ UFS_PWM_G2, /* PWM Gear 2 */
+ UFS_PWM_G3, /* PWM Gear 3 */
+ UFS_PWM_G4, /* PWM Gear 4 */
+ UFS_PWM_G5, /* PWM Gear 5 */
+ UFS_PWM_G6, /* PWM Gear 6 */
+ UFS_PWM_G7, /* PWM Gear 7 */
+};
+
+enum ufs_hs_gear_tag {
+ UFS_HS_DONT_CHANGE, /* Don't change Gear */
+ UFS_HS_G1, /* HS Gear 1 (default for reset) */
+ UFS_HS_G2, /* HS Gear 2 */
+ UFS_HS_G3, /* HS Gear 3 */
+};
+
+/*
+ * Data Link Layer Attributes
+ */
+#define DL_TC0TXFCTHRESHOLD 0x2040
+#define DL_FC0PROTTIMEOUTVAL 0x2041
+#define DL_TC0REPLAYTIMEOUTVAL 0x2042
+#define DL_AFC0REQTIMEOUTVAL 0x2043
+#define DL_AFC0CREDITTHRESHOLD 0x2044
+#define DL_TC0OUTACKTHRESHOLD 0x2045
+#define DL_TC1TXFCTHRESHOLD 0x2060
+#define DL_FC1PROTTIMEOUTVAL 0x2061
+#define DL_TC1REPLAYTIMEOUTVAL 0x2062
+#define DL_AFC1REQTIMEOUTVAL 0x2063
+#define DL_AFC1CREDITTHRESHOLD 0x2064
+#define DL_TC1OUTACKTHRESHOLD 0x2065
+#define DL_TXPREEMPTIONCAP 0x2000
+#define DL_TC0TXMAXSDUSIZE 0x2001
+#define DL_TC0RXINITCREDITVAL 0x2002
+#define DL_TC0TXBUFFERSIZE 0x2005
+#define DL_PEERTC0PRESENT 0x2046
+#define DL_PEERTC0RXINITCREVAL 0x2047
+#define DL_TC1TXMAXSDUSIZE 0x2003
+#define DL_TC1RXINITCREDITVAL 0x2004
+#define DL_TC1TXBUFFERSIZE 0x2006
+#define DL_PEERTC1PRESENT 0x2066
+#define DL_PEERTC1RXINITCREVAL 0x2067
+
+/*
+ * Network Layer Attributes
+ */
+#define N_DEVICEID 0x3000
+#define N_DEVICEID_VALID 0x3001
+#define N_TC0TXMAXSDUSIZE 0x3020
+#define N_TC1TXMAXSDUSIZE 0x3021
+
+/*
+ * Transport Layer Attributes
+ */
+#define T_NUMCPORTS 0x4000
+#define T_NUMTESTFEATURES 0x4001
+#define T_CONNECTIONSTATE 0x4020
+#define T_PEERDEVICEID 0x4021
+#define T_PEERCPORTID 0x4022
+#define T_TRAFFICCLASS 0x4023
+#define T_PROTOCOLID 0x4024
+#define T_CPORTFLAGS 0x4025
+#define T_TXTOKENVALUE 0x4026
+#define T_RXTOKENVALUE 0x4027
+#define T_LOCALBUFFERSPACE 0x4028
+#define T_PEERBUFFERSPACE 0x4029
+#define T_CREDITSTOSEND 0x402A
+#define T_CPORTMODE 0x402B
+#define T_TC0TXMAXSDUSIZE 0x4060
+#define T_TC1TXMAXSDUSIZE 0x4061
+
+/* Boolean attribute values */
+enum {
+ FALSE = 0,
+ TRUE,
+};
+
+#endif /* _UNIPRO_H_ */
diff --git a/drivers/scsi/ultrastor.c b/drivers/scsi/ultrastor.c
new file mode 100644
index 000000000..14e0c40a6
--- /dev/null
+++ b/drivers/scsi/ultrastor.c
@@ -0,0 +1,1210 @@
+/*
+ * ultrastor.c Copyright (C) 1992 David B. Gentzel
+ * Low-level SCSI driver for UltraStor 14F, 24F, and 34F
+ * by David B. Gentzel, Whitfield Software Services, Carnegie, PA
+ * (gentzel@nova.enet.dec.com)
+ * scatter/gather added by Scott Taylor (n217cg@tamuts.tamu.edu)
+ * 24F and multiple command support by John F. Carr (jfc@athena.mit.edu)
+ * John's work modified by Caleb Epstein (cae@jpmorgan.com) and
+ * Eric Youngdale (ericy@cais.com).
+ * Thanks to UltraStor for providing the necessary documentation
+ *
+ * This is an old driver, for the 14F and 34F you should be using the
+ * u14-34f driver instead.
+ */
+
+/*
+ * TODO:
+ * 1. Find out why scatter/gather is limited to 16 requests per command.
+ * This is fixed, at least on the 24F, as of version 1.12 - CAE.
+ * 2. Look at command linking (mscp.command_link and
+ * mscp.command_link_id). (Does not work with many disks,
+ * and no performance increase. ERY).
+ * 3. Allow multiple adapters.
+ */
+
+/*
+ * NOTES:
+ * The UltraStor 14F, 24F, and 34F are a family of intelligent, high
+ * performance SCSI-2 host adapters. They all support command queueing
+ * and scatter/gather I/O. Some of them can also emulate the standard
+ * WD1003 interface for use with OS's which don't support SCSI. Here
+ * is the scoop on the various models:
+ * 14F - ISA first-party DMA HA with floppy support and WD1003 emulation.
+ * 14N - ISA HA with floppy support. I think that this is a non-DMA
+ * HA. Nothing further known.
+ * 24F - EISA Bus Master HA with floppy support and WD1003 emulation.
+ * 34F - VL-Bus Bus Master HA with floppy support (no WD1003 emulation).
+ *
+ * The 14F, 24F, and 34F are supported by this driver.
+ *
+ * Places flagged with a triple question-mark are things which are either
+ * unfinished, questionable, or wrong.
+ */
+
+/* Changes from version 1.11 alpha to 1.12
+ *
+ * Increased the size of the scatter-gather list to 33 entries for
+ * the 24F adapter (it was 16). I don't have the specs for the 14F
+ * or the 34F, so they may support larger s-g lists as well.
+ *
+ * Caleb Epstein <cae@jpmorgan.com>
+ */
+
+/* Changes from version 1.9 to 1.11
+ *
+ * Patches to bring this driver up to speed with the default kernel
+ * driver which supports only the 14F and 34F adapters. This version
+ * should compile cleanly into 0.99.13, 0.99.12 and probably 0.99.11.
+ *
+ * Fixes from Eric Youngdale to fix a few possible race conditions and
+ * several problems with bit testing operations (insufficient
+ * parentheses).
+ *
+ * Removed the ultrastor_abort() and ultrastor_reset() functions
+ * (enclosed them in #if 0 / #endif). These functions, at least on
+ * the 24F, cause the SCSI bus to do odd things and generally lead to
+ * kernel panics and machine hangs. This is like the Adaptec code.
+ *
+ * Use check/snarf_region for 14f, 34f to avoid I/O space address conflicts.
+ */
+
+/* Changes from version 1.8 to version 1.9
+ *
+ * 0.99.11 patches (cae@jpmorgan.com) */
+
+/* Changes from version 1.7 to version 1.8
+ *
+ * Better error reporting.
+ */
+
+/* Changes from version 1.6 to version 1.7
+ *
+ * Removed CSIR command code.
+ *
+ * Better race condition avoidance (xchgb function added).
+ *
+ * Set ICM and OGM status to zero at probe (24F)
+ *
+ * reset sends soft reset to UltraStor adapter
+ *
+ * reset adapter if adapter interrupts with an invalid MSCP address
+ *
+ * handle aborted command interrupt (24F)
+ *
+ */
+
+/* Changes from version 1.5 to version 1.6:
+ *
+ * Read MSCP address from ICM _before_ clearing the interrupt flag.
+ * This fixes a race condition.
+ */
+
+/* Changes from version 1.4 to version 1.5:
+ *
+ * Abort now calls done when multiple commands are enabled.
+ *
+ * Clear busy when aborted command finishes, not when abort is called.
+ *
+ * More debugging messages for aborts.
+ */
+
+/* Changes from version 1.3 to version 1.4:
+ *
+ * Enable automatic request of sense data on error (requires newer version
+ * of scsi.c to be useful).
+ *
+ * Fix PORT_OVERRIDE for 14F.
+ *
+ * Fix abort and reset to work properly (config.aborted wasn't cleared
+ * after it was tested, so after a command abort no further commands would
+ * work).
+ *
+ * Boot time test to enable SCSI bus reset (defaults to not allowing reset).
+ *
+ * Fix test for OGM busy -- the busy bit is in different places on the 24F.
+ *
+ * Release ICM slot by clearing first byte on 24F.
+ */
+
+#include <linux/module.h>
+#include <linux/blkdev.h>
+#include <linux/interrupt.h>
+#include <linux/stddef.h>
+#include <linux/string.h>
+#include <linux/kernel.h>
+#include <linux/ioport.h>
+#include <linux/proc_fs.h>
+#include <linux/spinlock.h>
+#include <linux/stat.h>
+#include <linux/bitops.h>
+#include <linux/delay.h>
+
+#include <asm/io.h>
+#include <asm/dma.h>
+
+#define ULTRASTOR_PRIVATE /* Get the private stuff from ultrastor.h */
+#include "scsi.h"
+#include <scsi/scsi_host.h>
+#include "ultrastor.h"
+
+#define FALSE 0
+#define TRUE 1
+
+#ifndef ULTRASTOR_DEBUG
+#define ULTRASTOR_DEBUG (UD_ABORT|UD_CSIR|UD_RESET)
+#endif
+
+#define VERSION "1.12"
+
+#define PACKED __attribute__((packed))
+#define ALIGNED(x) __attribute__((aligned(x)))
+
+
+/* The 14F uses an array of 4-byte ints for its scatter/gather list.
+ The data can be unaligned, but need not be. It's easier to give
+ the list normal alignment since it doesn't need to fit into a
+ packed structure. */
+
+typedef struct {
+ u32 address;
+ u32 num_bytes;
+} ultrastor_sg_list;
+
+
+/* MailBox SCSI Command Packet. Basic command structure for communicating
+ with controller. */
+struct mscp {
+ unsigned char opcode: 3; /* type of command */
+ unsigned char xdir: 2; /* data transfer direction */
+ unsigned char dcn: 1; /* disable disconnect */
+ unsigned char ca: 1; /* use cache (if available) */
+ unsigned char sg: 1; /* scatter/gather operation */
+ unsigned char target_id: 3; /* target SCSI id */
+ unsigned char ch_no: 2; /* SCSI channel (always 0 for 14f) */
+ unsigned char lun: 3; /* logical unit number */
+ unsigned int transfer_data PACKED; /* transfer data pointer */
+ unsigned int transfer_data_length PACKED; /* length in bytes */
+ unsigned int command_link PACKED; /* for linking command chains */
+ unsigned char scsi_command_link_id; /* identifies command in chain */
+ unsigned char number_of_sg_list; /* (if sg is set) 8 bytes per list */
+ unsigned char length_of_sense_byte;
+ unsigned char length_of_scsi_cdbs; /* 6, 10, or 12 */
+ unsigned char scsi_cdbs[12]; /* SCSI commands */
+ unsigned char adapter_status; /* non-zero indicates HA error */
+ unsigned char target_status; /* non-zero indicates target error */
+ u32 sense_data PACKED;
+ /* The following fields are for software only. They are included in
+ the MSCP structure because they are associated with SCSI requests. */
+ void (*done) (struct scsi_cmnd *);
+ struct scsi_cmnd *SCint;
+ ultrastor_sg_list sglist[ULTRASTOR_24F_MAX_SG]; /* use larger size for 24F */
+};
+
+
+/* Port addresses (relative to the base address) */
+#define U14F_PRODUCT_ID(port) ((port) + 0x4)
+#define CONFIG(port) ((port) + 0x6)
+
+/* Port addresses relative to the doorbell base address. */
+#define LCL_DOORBELL_MASK(port) ((port) + 0x0)
+#define LCL_DOORBELL_INTR(port) ((port) + 0x1)
+#define SYS_DOORBELL_MASK(port) ((port) + 0x2)
+#define SYS_DOORBELL_INTR(port) ((port) + 0x3)
+
+
+/* Used to store configuration info read from config i/o registers. Most of
+ this is not used yet, but might as well save it.
+
+ This structure also holds port addresses that are not at the same offset
+ on the 14F and 24F.
+
+ This structure holds all data that must be duplicated to support multiple
+ adapters. */
+
+static struct ultrastor_config
+{
+ unsigned short port_address; /* base address of card */
+ unsigned short doorbell_address; /* base address of doorbell CSRs */
+ unsigned short ogm_address; /* base address of OGM */
+ unsigned short icm_address; /* base address of ICM */
+ const void *bios_segment;
+ unsigned char interrupt: 4;
+ unsigned char dma_channel: 3;
+ unsigned char bios_drive_number: 1;
+ unsigned char heads;
+ unsigned char sectors;
+ unsigned char ha_scsi_id: 3;
+ unsigned char subversion: 4;
+ unsigned char revision;
+ /* The slot number is used to distinguish the 24F (slot != 0) from
+ the 14F and 34F (slot == 0). */
+ unsigned char slot;
+
+#ifdef PRINT_U24F_VERSION
+ volatile int csir_done;
+#endif
+
+ /* A pool of MSCP structures for this adapter, and a bitmask of
+ busy structures. (If ULTRASTOR_14F_MAX_CMDS == 1, a 1 byte
+ busy flag is used instead.) */
+
+#if ULTRASTOR_MAX_CMDS == 1
+ unsigned char mscp_busy;
+#else
+ unsigned long mscp_free;
+#endif
+ volatile unsigned char aborted[ULTRASTOR_MAX_CMDS];
+ struct mscp mscp[ULTRASTOR_MAX_CMDS];
+} config = {0};
+
+/* Set this to 1 to reset the SCSI bus on error. */
+static int ultrastor_bus_reset;
+
+
+/* Allowed BIOS base addresses (NULL indicates reserved) */
+static const void *const bios_segment_table[8] = {
+ NULL, (void *)0xC4000, (void *)0xC8000, (void *)0xCC000,
+ (void *)0xD0000, (void *)0xD4000, (void *)0xD8000, (void *)0xDC000,
+};
+
+/* Allowed IRQs for 14f */
+static const unsigned char interrupt_table_14f[4] = { 15, 14, 11, 10 };
+
+/* Allowed DMA channels for 14f (0 indicates reserved) */
+static const unsigned char dma_channel_table_14f[4] = { 5, 6, 7, 0 };
+
+/* Head/sector mappings allowed by 14f */
+static const struct {
+ unsigned char heads;
+ unsigned char sectors;
+} mapping_table[4] = { { 16, 63 }, { 64, 32 }, { 64, 63 }, { 64, 32 } };
+
+#ifndef PORT_OVERRIDE
+/* ??? A probe of address 0x310 screws up NE2000 cards */
+static const unsigned short ultrastor_ports_14f[] = {
+ 0x330, 0x340, /*0x310,*/ 0x230, 0x240, 0x210, 0x130, 0x140,
+};
+#endif
+
+static void ultrastor_interrupt(void *);
+static irqreturn_t do_ultrastor_interrupt(int, void *);
+static inline void build_sg_list(struct mscp *, struct scsi_cmnd *SCpnt);
+
+
+/* Always called with host lock held */
+
+static inline int find_and_clear_bit_16(unsigned long *field)
+{
+ int rv;
+
+ if (*field == 0)
+ panic("No free mscp");
+
+ asm volatile (
+ "xorl %0,%0\n\t"
+ "0: bsfw %1,%w0\n\t"
+ "btr %0,%1\n\t"
+ "jnc 0b"
+ : "=&r" (rv), "+m" (*field) :);
+
+ return rv;
+}
+
+/* This has been re-implemented with the help of Richard Earnshaw,
+ <rwe@pegasus.esprit.ec.org> and works with gcc-2.5.8 and gcc-2.6.0.
+ The instability noted by jfc below appears to be a bug in
+ gcc-2.5.x when compiling w/o optimization. --Caleb
+
+ This asm is fragile: it doesn't work without the casts and it may
+ not work without optimization. Maybe I should add a swap builtin
+ to gcc. --jfc */
+static inline unsigned char xchgb(unsigned char reg,
+ volatile unsigned char *mem)
+{
+ __asm__ ("xchgb %0,%1" : "=q" (reg), "=m" (*mem) : "0" (reg));
+ return reg;
+}
+
+#if ULTRASTOR_DEBUG & (UD_COMMAND | UD_ABORT)
+
+/* Always called with the host lock held */
+static void log_ultrastor_abort(struct ultrastor_config *config,
+ int command)
+{
+ static char fmt[80] = "abort %d (%x); MSCP free pool: %x;";
+ int i;
+
+ for (i = 0; i < ULTRASTOR_MAX_CMDS; i++)
+ {
+ fmt[20 + i*2] = ' ';
+ if (! (config->mscp_free & (1 << i)))
+ fmt[21 + i*2] = '0' + config->mscp[i].target_id;
+ else
+ fmt[21 + i*2] = '-';
+ }
+ fmt[20 + ULTRASTOR_MAX_CMDS * 2] = '\n';
+ fmt[21 + ULTRASTOR_MAX_CMDS * 2] = 0;
+ printk(fmt, command, &config->mscp[command], config->mscp_free);
+
+}
+#endif
+
+static int ultrastor_14f_detect(struct scsi_host_template * tpnt)
+{
+ size_t i;
+ unsigned char in_byte, version_byte = 0;
+ struct config_1 {
+ unsigned char bios_segment: 3;
+ unsigned char removable_disks_as_fixed: 1;
+ unsigned char interrupt: 2;
+ unsigned char dma_channel: 2;
+ } config_1;
+ struct config_2 {
+ unsigned char ha_scsi_id: 3;
+ unsigned char mapping_mode: 2;
+ unsigned char bios_drive_number: 1;
+ unsigned char tfr_port: 2;
+ } config_2;
+
+#if (ULTRASTOR_DEBUG & UD_DETECT)
+ printk("US14F: detect: called\n");
+#endif
+
+ /* If a 24F has already been configured, don't look for a 14F. */
+ if (config.bios_segment)
+ return FALSE;
+
+#ifdef PORT_OVERRIDE
+ if(!request_region(PORT_OVERRIDE, 0xc, "ultrastor")) {
+ printk("Ultrastor I/O space already in use\n");
+ return FALSE;
+ };
+ config.port_address = PORT_OVERRIDE;
+#else
+ for (i = 0; i < ARRAY_SIZE(ultrastor_ports_14f); i++) {
+ if(!request_region(ultrastor_ports_14f[i], 0x0c, "ultrastor")) continue;
+ config.port_address = ultrastor_ports_14f[i];
+#endif
+
+#if (ULTRASTOR_DEBUG & UD_DETECT)
+ printk("US14F: detect: testing port address %03X\n", config.port_address);
+#endif
+
+ in_byte = inb(U14F_PRODUCT_ID(config.port_address));
+ if (in_byte != US14F_PRODUCT_ID_0) {
+#if (ULTRASTOR_DEBUG & UD_DETECT)
+# ifdef PORT_OVERRIDE
+ printk("US14F: detect: wrong product ID 0 - %02X\n", in_byte);
+# else
+ printk("US14F: detect: no adapter at port %03X\n", config.port_address);
+# endif
+#endif
+#ifdef PORT_OVERRIDE
+ goto out_release_port;
+#else
+ release_region(config.port_address, 0x0c);
+ continue;
+#endif
+ }
+ in_byte = inb(U14F_PRODUCT_ID(config.port_address) + 1);
+ /* Only upper nibble is significant for Product ID 1 */
+ if ((in_byte & 0xF0) != US14F_PRODUCT_ID_1) {
+#if (ULTRASTOR_DEBUG & UD_DETECT)
+# ifdef PORT_OVERRIDE
+ printk("US14F: detect: wrong product ID 1 - %02X\n", in_byte);
+# else
+ printk("US14F: detect: no adapter at port %03X\n", config.port_address);
+# endif
+#endif
+#ifdef PORT_OVERRIDE
+ goto out_release_port;
+#else
+ release_region(config.port_address, 0x0c);
+ continue;
+#endif
+ }
+ version_byte = in_byte;
+#ifndef PORT_OVERRIDE
+ break;
+ }
+ if (i == ARRAY_SIZE(ultrastor_ports_14f)) {
+# if (ULTRASTOR_DEBUG & UD_DETECT)
+ printk("US14F: detect: no port address found!\n");
+# endif
+ /* all ports probed already released - we can just go straight out */
+ return FALSE;
+ }
+#endif
+
+#if (ULTRASTOR_DEBUG & UD_DETECT)
+ printk("US14F: detect: adapter found at port address %03X\n",
+ config.port_address);
+#endif
+
+ /* Set local doorbell mask to disallow bus reset unless
+ ultrastor_bus_reset is true. */
+ outb(ultrastor_bus_reset ? 0xc2 : 0x82, LCL_DOORBELL_MASK(config.port_address));
+
+ /* All above tests passed, must be the right thing. Get some useful
+ info. */
+
+ /* Register the I/O space that we use */
+
+ *(char *)&config_1 = inb(CONFIG(config.port_address + 0));
+ *(char *)&config_2 = inb(CONFIG(config.port_address + 1));
+ config.bios_segment = bios_segment_table[config_1.bios_segment];
+ config.doorbell_address = config.port_address;
+ config.ogm_address = config.port_address + 0x8;
+ config.icm_address = config.port_address + 0xC;
+ config.interrupt = interrupt_table_14f[config_1.interrupt];
+ config.ha_scsi_id = config_2.ha_scsi_id;
+ config.heads = mapping_table[config_2.mapping_mode].heads;
+ config.sectors = mapping_table[config_2.mapping_mode].sectors;
+ config.bios_drive_number = config_2.bios_drive_number;
+ config.subversion = (version_byte & 0x0F);
+ if (config.subversion == U34F)
+ config.dma_channel = 0;
+ else
+ config.dma_channel = dma_channel_table_14f[config_1.dma_channel];
+
+ if (!config.bios_segment) {
+#if (ULTRASTOR_DEBUG & UD_DETECT)
+ printk("US14F: detect: not detected.\n");
+#endif
+ goto out_release_port;
+ }
+
+ /* Final consistency check, verify previous info. */
+ if (config.subversion != U34F)
+ if (!config.dma_channel || !(config_2.tfr_port & 0x2)) {
+#if (ULTRASTOR_DEBUG & UD_DETECT)
+ printk("US14F: detect: consistency check failed\n");
+#endif
+ goto out_release_port;
+ }
+
+ /* If we were TRULY paranoid, we could issue a host adapter inquiry
+ command here and verify the data returned. But frankly, I'm
+ exhausted! */
+
+ /* Finally! Now I'm satisfied... */
+#if (ULTRASTOR_DEBUG & UD_DETECT)
+ printk("US14F: detect: detect succeeded\n"
+ " Port address: %03X\n"
+ " BIOS segment: %05X\n"
+ " Interrupt: %u\n"
+ " DMA channel: %u\n"
+ " H/A SCSI ID: %u\n"
+ " Subversion: %u\n",
+ config.port_address, config.bios_segment, config.interrupt,
+ config.dma_channel, config.ha_scsi_id, config.subversion);
+#endif
+ tpnt->this_id = config.ha_scsi_id;
+ tpnt->unchecked_isa_dma = (config.subversion != U34F);
+
+#if ULTRASTOR_MAX_CMDS > 1
+ config.mscp_free = ~0;
+#endif
+
+ /*
+ * Brrr, &config.mscp[0].SCint->host) it is something magical....
+ * XXX and FIXME
+ */
+ if (request_irq(config.interrupt, do_ultrastor_interrupt, 0, "Ultrastor", &config.mscp[0].SCint->device->host)) {
+ printk("Unable to allocate IRQ%u for UltraStor controller.\n",
+ config.interrupt);
+ goto out_release_port;
+ }
+ if (config.dma_channel && request_dma(config.dma_channel,"Ultrastor")) {
+ printk("Unable to allocate DMA channel %u for UltraStor controller.\n",
+ config.dma_channel);
+ free_irq(config.interrupt, NULL);
+ goto out_release_port;
+ }
+ tpnt->sg_tablesize = ULTRASTOR_14F_MAX_SG;
+ printk("UltraStor driver version" VERSION ". Using %d SG lists.\n",
+ ULTRASTOR_14F_MAX_SG);
+
+ return TRUE;
+out_release_port:
+ release_region(config.port_address, 0x0c);
+ return FALSE;
+}
+
+static int ultrastor_24f_detect(struct scsi_host_template * tpnt)
+{
+ int i;
+ struct Scsi_Host * shpnt = NULL;
+
+#if (ULTRASTOR_DEBUG & UD_DETECT)
+ printk("US24F: detect");
+#endif
+
+ /* probe each EISA slot at slot address C80 */
+ for (i = 1; i < 15; i++)
+ {
+ unsigned char config_1, config_2;
+ unsigned short addr = (i << 12) | ULTRASTOR_24F_PORT;
+
+ if (inb(addr) != US24F_PRODUCT_ID_0 &&
+ inb(addr+1) != US24F_PRODUCT_ID_1 &&
+ inb(addr+2) != US24F_PRODUCT_ID_2)
+ continue;
+
+ config.revision = inb(addr+3);
+ config.slot = i;
+ if (! (inb(addr+4) & 1))
+ {
+#if (ULTRASTOR_DEBUG & UD_DETECT)
+ printk("U24F: found disabled card in slot %u\n", i);
+#endif
+ continue;
+ }
+#if (ULTRASTOR_DEBUG & UD_DETECT)
+ printk("U24F: found card in slot %u\n", i);
+#endif
+ config_1 = inb(addr + 5);
+ config.bios_segment = bios_segment_table[config_1 & 7];
+ switch(config_1 >> 4)
+ {
+ case 1:
+ config.interrupt = 15;
+ break;
+ case 2:
+ config.interrupt = 14;
+ break;
+ case 4:
+ config.interrupt = 11;
+ break;
+ case 8:
+ config.interrupt = 10;
+ break;
+ default:
+ printk("U24F: invalid IRQ\n");
+ return FALSE;
+ }
+
+ /* BIOS addr set */
+ /* base port set */
+ config.port_address = addr;
+ config.doorbell_address = addr + 12;
+ config.ogm_address = addr + 0x17;
+ config.icm_address = addr + 0x1C;
+ config_2 = inb(addr + 7);
+ config.ha_scsi_id = config_2 & 7;
+ config.heads = mapping_table[(config_2 >> 3) & 3].heads;
+ config.sectors = mapping_table[(config_2 >> 3) & 3].sectors;
+#if (ULTRASTOR_DEBUG & UD_DETECT)
+ printk("US24F: detect: detect succeeded\n"
+ " Port address: %03X\n"
+ " BIOS segment: %05X\n"
+ " Interrupt: %u\n"
+ " H/A SCSI ID: %u\n",
+ config.port_address, config.bios_segment,
+ config.interrupt, config.ha_scsi_id);
+#endif
+ tpnt->this_id = config.ha_scsi_id;
+ tpnt->unchecked_isa_dma = 0;
+ tpnt->sg_tablesize = ULTRASTOR_24F_MAX_SG;
+
+ shpnt = scsi_register(tpnt, 0);
+ if (!shpnt) {
+ printk(KERN_WARNING "(ultrastor:) Could not register scsi device. Aborting registration.\n");
+ free_irq(config.interrupt, do_ultrastor_interrupt);
+ return FALSE;
+ }
+
+ if (request_irq(config.interrupt, do_ultrastor_interrupt, 0, "Ultrastor", shpnt))
+ {
+ printk("Unable to allocate IRQ%u for UltraStor controller.\n",
+ config.interrupt);
+ return FALSE;
+ }
+
+ shpnt->irq = config.interrupt;
+ shpnt->dma_channel = config.dma_channel;
+ shpnt->io_port = config.port_address;
+
+#if ULTRASTOR_MAX_CMDS > 1
+ config.mscp_free = ~0;
+#endif
+ /* Mark ICM and OGM free */
+ outb(0, addr + 0x16);
+ outb(0, addr + 0x1B);
+
+ /* Set local doorbell mask to disallow bus reset unless
+ ultrastor_bus_reset is true. */
+ outb(ultrastor_bus_reset ? 0xc2 : 0x82, LCL_DOORBELL_MASK(addr+12));
+ outb(0x02, SYS_DOORBELL_MASK(addr+12));
+ printk("UltraStor driver version " VERSION ". Using %d SG lists.\n",
+ tpnt->sg_tablesize);
+ return TRUE;
+ }
+ return FALSE;
+}
+
+static int ultrastor_detect(struct scsi_host_template * tpnt)
+{
+ tpnt->proc_name = "ultrastor";
+ return ultrastor_14f_detect(tpnt) || ultrastor_24f_detect(tpnt);
+}
+
+static int ultrastor_release(struct Scsi_Host *shost)
+{
+ if (shost->irq)
+ free_irq(shost->irq, NULL);
+ if (shost->dma_channel != 0xff)
+ free_dma(shost->dma_channel);
+ if (shost->io_port && shost->n_io_port)
+ release_region(shost->io_port, shost->n_io_port);
+ scsi_unregister(shost);
+ return 0;
+}
+
+static const char *ultrastor_info(struct Scsi_Host * shpnt)
+{
+ static char buf[64];
+
+ if (config.slot)
+ sprintf(buf, "UltraStor 24F SCSI @ Slot %u IRQ%u",
+ config.slot, config.interrupt);
+ else if (config.subversion)
+ sprintf(buf, "UltraStor 34F SCSI @ Port %03X BIOS %05X IRQ%u",
+ config.port_address, (int)config.bios_segment,
+ config.interrupt);
+ else
+ sprintf(buf, "UltraStor 14F SCSI @ Port %03X BIOS %05X IRQ%u DMA%u",
+ config.port_address, (int)config.bios_segment,
+ config.interrupt, config.dma_channel);
+ return buf;
+}
+
+static inline void build_sg_list(struct mscp *mscp, struct scsi_cmnd *SCpnt)
+{
+ struct scatterlist *sg;
+ long transfer_length = 0;
+ int i, max;
+
+ max = scsi_sg_count(SCpnt);
+ scsi_for_each_sg(SCpnt, sg, max, i) {
+ mscp->sglist[i].address = isa_page_to_bus(sg_page(sg)) + sg->offset;
+ mscp->sglist[i].num_bytes = sg->length;
+ transfer_length += sg->length;
+ }
+ mscp->number_of_sg_list = max;
+ mscp->transfer_data = isa_virt_to_bus(mscp->sglist);
+ /* ??? May not be necessary. Docs are unclear as to whether transfer
+ length field is ignored or whether it should be set to the total
+ number of bytes of the transfer. */
+ mscp->transfer_data_length = transfer_length;
+}
+
+static int ultrastor_queuecommand_lck(struct scsi_cmnd *SCpnt,
+ void (*done) (struct scsi_cmnd *))
+{
+ struct mscp *my_mscp;
+#if ULTRASTOR_MAX_CMDS > 1
+ int mscp_index;
+#endif
+ unsigned int status;
+
+ /* Next test is for debugging; "can't happen" */
+ if ((config.mscp_free & ((1U << ULTRASTOR_MAX_CMDS) - 1)) == 0)
+ panic("ultrastor_queuecommand: no free MSCP\n");
+ mscp_index = find_and_clear_bit_16(&config.mscp_free);
+
+ /* Has the command been aborted? */
+ if (xchgb(0xff, &config.aborted[mscp_index]) != 0)
+ {
+ status = DID_ABORT << 16;
+ goto aborted;
+ }
+
+ my_mscp = &config.mscp[mscp_index];
+
+ *(unsigned char *)my_mscp = OP_SCSI | (DTD_SCSI << 3);
+
+ /* Tape drives don't work properly if the cache is used. The SCSI
+ READ command for a tape doesn't have a block offset, and the adapter
+ incorrectly assumes that all reads from the tape read the same
+ blocks. Results will depend on read buffer size and other disk
+ activity.
+
+ ??? Which other device types should never use the cache? */
+ my_mscp->ca = SCpnt->device->type != TYPE_TAPE;
+ my_mscp->target_id = SCpnt->device->id;
+ my_mscp->ch_no = 0;
+ my_mscp->lun = SCpnt->device->lun;
+ if (scsi_sg_count(SCpnt)) {
+ /* Set scatter/gather flag in SCSI command packet */
+ my_mscp->sg = TRUE;
+ build_sg_list(my_mscp, SCpnt);
+ } else {
+ /* Unset scatter/gather flag in SCSI command packet */
+ my_mscp->sg = FALSE;
+ my_mscp->transfer_data = isa_virt_to_bus(scsi_sglist(SCpnt));
+ my_mscp->transfer_data_length = scsi_bufflen(SCpnt);
+ }
+ my_mscp->command_link = 0; /*???*/
+ my_mscp->scsi_command_link_id = 0; /*???*/
+ my_mscp->length_of_sense_byte = SCSI_SENSE_BUFFERSIZE;
+ my_mscp->length_of_scsi_cdbs = SCpnt->cmd_len;
+ memcpy(my_mscp->scsi_cdbs, SCpnt->cmnd, my_mscp->length_of_scsi_cdbs);
+ my_mscp->adapter_status = 0;
+ my_mscp->target_status = 0;
+ my_mscp->sense_data = isa_virt_to_bus(&SCpnt->sense_buffer);
+ my_mscp->done = done;
+ my_mscp->SCint = SCpnt;
+ SCpnt->host_scribble = (unsigned char *)my_mscp;
+
+ /* Find free OGM slot. On 24F, look for OGM status byte == 0.
+ On 14F and 34F, wait for local interrupt pending flag to clear.
+
+ FIXME: now we are using new_eh we should punt here and let the
+ midlayer sort it out */
+
+retry:
+ if (config.slot)
+ while (inb(config.ogm_address - 1) != 0 && config.aborted[mscp_index] == 0xff)
+ barrier();
+
+ /* else??? */
+
+ while ((inb(LCL_DOORBELL_INTR(config.doorbell_address)) & (config.slot ? 2 : 1)) && config.aborted[mscp_index] == 0xff)
+ barrier();
+
+ /* To avoid race conditions, keep the code to write to the adapter
+ atomic. This simplifies the abort code. Right now the
+ scsi mid layer has the host_lock already held
+ */
+
+ if (inb(LCL_DOORBELL_INTR(config.doorbell_address)) & (config.slot ? 2 : 1))
+ goto retry;
+
+ status = xchgb(0, &config.aborted[mscp_index]);
+ if (status != 0xff) {
+
+#if ULTRASTOR_DEBUG & (UD_COMMAND | UD_ABORT)
+ printk("USx4F: queuecommand: aborted\n");
+#if ULTRASTOR_MAX_CMDS > 1
+ log_ultrastor_abort(&config, mscp_index);
+#endif
+#endif
+ status <<= 16;
+
+ aborted:
+ set_bit(mscp_index, &config.mscp_free);
+ /* If the driver queues commands, call the done proc here. Otherwise
+ return an error. */
+#if ULTRASTOR_MAX_CMDS > 1
+ SCpnt->result = status;
+ done(SCpnt);
+ return 0;
+#else
+ return status;
+#endif
+ }
+
+ /* Store pointer in OGM address bytes */
+ outl(isa_virt_to_bus(my_mscp), config.ogm_address);
+
+ /* Issue OGM interrupt */
+ if (config.slot) {
+ /* Write OGM command register on 24F */
+ outb(1, config.ogm_address - 1);
+ outb(0x2, LCL_DOORBELL_INTR(config.doorbell_address));
+ } else {
+ outb(0x1, LCL_DOORBELL_INTR(config.doorbell_address));
+ }
+
+#if (ULTRASTOR_DEBUG & UD_COMMAND)
+ printk("USx4F: queuecommand: returning\n");
+#endif
+
+ return 0;
+}
+
+static DEF_SCSI_QCMD(ultrastor_queuecommand)
+
+/* This code must deal with 2 cases:
+
+ 1. The command has not been written to the OGM. In this case, set
+ the abort flag and return.
+
+ 2. The command has been written to the OGM and is stuck somewhere in
+ the adapter.
+
+ 2a. On a 24F, ask the adapter to abort the command. It will interrupt
+ when it does.
+
+ 2b. Call the command's done procedure.
+
+ */
+
+static int ultrastor_abort(struct scsi_cmnd *SCpnt)
+{
+#if ULTRASTOR_DEBUG & UD_ABORT
+ char out[108];
+ unsigned char icm_status = 0, ogm_status = 0;
+ unsigned int icm_addr = 0, ogm_addr = 0;
+#endif
+ unsigned int mscp_index;
+ unsigned char old_aborted;
+ unsigned long flags;
+ void (*done)(struct scsi_cmnd *);
+ struct Scsi_Host *host = SCpnt->device->host;
+
+ if(config.slot)
+ return FAILED; /* Do not attempt an abort for the 24f */
+
+ /* Simple consistency checking */
+ if(!SCpnt->host_scribble)
+ return FAILED;
+
+ mscp_index = ((struct mscp *)SCpnt->host_scribble) - config.mscp;
+ if (mscp_index >= ULTRASTOR_MAX_CMDS)
+ panic("Ux4F aborting invalid MSCP");
+
+#if ULTRASTOR_DEBUG & UD_ABORT
+ if (config.slot)
+ {
+ int port0 = (config.slot << 12) | 0xc80;
+ int i;
+ unsigned long flags;
+
+ spin_lock_irqsave(host->host_lock, flags);
+ strcpy(out, "OGM %d:%x ICM %d:%x ports: ");
+ for (i = 0; i < 16; i++)
+ {
+ unsigned char p = inb(port0 + i);
+ out[28 + i * 3] = "0123456789abcdef"[p >> 4];
+ out[29 + i * 3] = "0123456789abcdef"[p & 15];
+ out[30 + i * 3] = ' ';
+ }
+ out[28 + i * 3] = '\n';
+ out[29 + i * 3] = 0;
+ ogm_status = inb(port0 + 22);
+ ogm_addr = (unsigned int)isa_bus_to_virt(inl(port0 + 23));
+ icm_status = inb(port0 + 27);
+ icm_addr = (unsigned int)isa_bus_to_virt(inl(port0 + 28));
+ spin_unlock_irqrestore(host->host_lock, flags);
+ }
+
+ /* First check to see if an interrupt is pending. I suspect the SiS
+ chipset loses interrupts. (I also suspect is mangles data, but
+ one bug at a time... */
+ if (config.slot ? inb(config.icm_address - 1) == 2 :
+ (inb(SYS_DOORBELL_INTR(config.doorbell_address)) & 1))
+ {
+ printk("Ux4F: abort while completed command pending\n");
+
+ spin_lock_irqsave(host->host_lock, flags);
+ /* FIXME: Ewww... need to think about passing host around properly */
+ ultrastor_interrupt(NULL);
+ spin_unlock_irqrestore(host->host_lock, flags);
+ return SUCCESS;
+ }
+#endif
+
+ old_aborted = xchgb(DID_ABORT, &config.aborted[mscp_index]);
+
+ /* aborted == 0xff is the signal that queuecommand has not yet sent
+ the command. It will notice the new abort flag and fail. */
+ if (old_aborted == 0xff)
+ return SUCCESS;
+
+ /* On 24F, send an abort MSCP request. The adapter will interrupt
+ and the interrupt handler will call done. */
+ if (config.slot && inb(config.ogm_address - 1) == 0)
+ {
+ unsigned long flags;
+
+ spin_lock_irqsave(host->host_lock, flags);
+ outl(isa_virt_to_bus(&config.mscp[mscp_index]), config.ogm_address);
+ udelay(8);
+ outb(0x80, config.ogm_address - 1);
+ outb(0x2, LCL_DOORBELL_INTR(config.doorbell_address));
+#if ULTRASTOR_DEBUG & UD_ABORT
+ log_ultrastor_abort(&config, mscp_index);
+ printk(out, ogm_status, ogm_addr, icm_status, icm_addr);
+#endif
+ spin_unlock_irqrestore(host->host_lock, flags);
+ /* FIXME: add a wait for the abort to complete */
+ return SUCCESS;
+ }
+
+#if ULTRASTOR_DEBUG & UD_ABORT
+ log_ultrastor_abort(&config, mscp_index);
+#endif
+
+ /* Can't request a graceful abort. Either this is not a 24F or
+ the OGM is busy. Don't free the command -- the adapter might
+ still be using it. Setting SCint = 0 causes the interrupt
+ handler to ignore the command. */
+
+ /* FIXME - devices that implement soft resets will still be running
+ the command after a bus reset. We would probably rather leave
+ the command in the queue. The upper level code will automatically
+ leave the command in the active state instead of requeueing it. ERY */
+
+#if ULTRASTOR_DEBUG & UD_ABORT
+ if (config.mscp[mscp_index].SCint != SCpnt)
+ printk("abort: command mismatch, %p != %p\n",
+ config.mscp[mscp_index].SCint, SCpnt);
+#endif
+ if (config.mscp[mscp_index].SCint == NULL)
+ return FAILED;
+
+ if (config.mscp[mscp_index].SCint != SCpnt) panic("Bad abort");
+ config.mscp[mscp_index].SCint = NULL;
+ done = config.mscp[mscp_index].done;
+ config.mscp[mscp_index].done = NULL;
+ SCpnt->result = DID_ABORT << 16;
+
+ /* Take the host lock to guard against scsi layer re-entry */
+ done(SCpnt);
+
+ /* Need to set a timeout here in case command never completes. */
+ return SUCCESS;
+}
+
+static int ultrastor_host_reset(struct scsi_cmnd * SCpnt)
+{
+ unsigned long flags;
+ int i;
+ struct Scsi_Host *host = SCpnt->device->host;
+
+#if (ULTRASTOR_DEBUG & UD_RESET)
+ printk("US14F: reset: called\n");
+#endif
+
+ if(config.slot)
+ return FAILED;
+
+ spin_lock_irqsave(host->host_lock, flags);
+ /* Reset the adapter and SCSI bus. The SCSI bus reset can be
+ inhibited by clearing ultrastor_bus_reset before probe. */
+ outb(0xc0, LCL_DOORBELL_INTR(config.doorbell_address));
+ if (config.slot)
+ {
+ outb(0, config.ogm_address - 1);
+ outb(0, config.icm_address - 1);
+ }
+
+#if ULTRASTOR_MAX_CMDS == 1
+ if (config.mscp_busy && config.mscp->done && config.mscp->SCint)
+ {
+ config.mscp->SCint->result = DID_RESET << 16;
+ config.mscp->done(config.mscp->SCint);
+ }
+ config.mscp->SCint = 0;
+#else
+ for (i = 0; i < ULTRASTOR_MAX_CMDS; i++)
+ {
+ if (! (config.mscp_free & (1 << i)) &&
+ config.mscp[i].done && config.mscp[i].SCint)
+ {
+ config.mscp[i].SCint->result = DID_RESET << 16;
+ config.mscp[i].done(config.mscp[i].SCint);
+ config.mscp[i].done = NULL;
+ }
+ config.mscp[i].SCint = NULL;
+ }
+#endif
+
+ /* FIXME - if the device implements soft resets, then the command
+ will still be running. ERY
+
+ Even bigger deal with new_eh!
+ */
+
+ memset((unsigned char *)config.aborted, 0, sizeof config.aborted);
+#if ULTRASTOR_MAX_CMDS == 1
+ config.mscp_busy = 0;
+#else
+ config.mscp_free = ~0;
+#endif
+
+ spin_unlock_irqrestore(host->host_lock, flags);
+ return SUCCESS;
+
+}
+
+int ultrastor_biosparam(struct scsi_device *sdev, struct block_device *bdev,
+ sector_t capacity, int * dkinfo)
+{
+ int size = capacity;
+ unsigned int s = config.heads * config.sectors;
+
+ dkinfo[0] = config.heads;
+ dkinfo[1] = config.sectors;
+ dkinfo[2] = size / s; /* Ignore partial cylinders */
+#if 0
+ if (dkinfo[2] > 1024)
+ dkinfo[2] = 1024;
+#endif
+ return 0;
+}
+
+static void ultrastor_interrupt(void *dev_id)
+{
+ unsigned int status;
+#if ULTRASTOR_MAX_CMDS > 1
+ unsigned int mscp_index;
+#endif
+ struct mscp *mscp;
+ void (*done) (struct scsi_cmnd *);
+ struct scsi_cmnd *SCtmp;
+
+#if ULTRASTOR_MAX_CMDS == 1
+ mscp = &config.mscp[0];
+#else
+ mscp = (struct mscp *)isa_bus_to_virt(inl(config.icm_address));
+ mscp_index = mscp - config.mscp;
+ if (mscp_index >= ULTRASTOR_MAX_CMDS) {
+ printk("Ux4F interrupt: bad MSCP address %x\n", (unsigned int) mscp);
+ /* A command has been lost. Reset and report an error
+ for all commands. */
+ ultrastor_host_reset(dev_id);
+ return;
+ }
+#endif
+
+ /* Clean ICM slot (set ICMINT bit to 0) */
+ if (config.slot) {
+ unsigned char icm_status = inb(config.icm_address - 1);
+#if ULTRASTOR_DEBUG & (UD_INTERRUPT|UD_ERROR|UD_ABORT)
+ if (icm_status != 1 && icm_status != 2)
+ printk("US24F: ICM status %x for MSCP %d (%x)\n", icm_status,
+ mscp_index, (unsigned int) mscp);
+#endif
+ /* The manual says clear interrupt then write 0 to ICM status.
+ This seems backwards, but I'll do it anyway. --jfc */
+ outb(2, SYS_DOORBELL_INTR(config.doorbell_address));
+ outb(0, config.icm_address - 1);
+ if (icm_status == 4) {
+ printk("UltraStor abort command failed\n");
+ return;
+ }
+ if (icm_status == 3) {
+ void (*done)(struct scsi_cmnd *) = mscp->done;
+ if (done) {
+ mscp->done = NULL;
+ mscp->SCint->result = DID_ABORT << 16;
+ done(mscp->SCint);
+ }
+ return;
+ }
+ } else {
+ outb(1, SYS_DOORBELL_INTR(config.doorbell_address));
+ }
+
+ SCtmp = mscp->SCint;
+ mscp->SCint = NULL;
+
+ if (!SCtmp)
+ {
+#if ULTRASTOR_DEBUG & (UD_ABORT|UD_INTERRUPT)
+ printk("MSCP %d (%x): no command\n", mscp_index, (unsigned int) mscp);
+#endif
+#if ULTRASTOR_MAX_CMDS == 1
+ config.mscp_busy = FALSE;
+#else
+ set_bit(mscp_index, &config.mscp_free);
+#endif
+ config.aborted[mscp_index] = 0;
+ return;
+ }
+
+ /* Save done locally and zero before calling. This is needed as
+ once we call done, we may get another command queued before this
+ interrupt service routine can return. */
+ done = mscp->done;
+ mscp->done = NULL;
+
+ /* Let the higher levels know that we're done */
+ switch (mscp->adapter_status)
+ {
+ case 0:
+ status = DID_OK << 16;
+ break;
+ case 0x01: /* invalid command */
+ case 0x02: /* invalid parameters */
+ case 0x03: /* invalid data list */
+ default:
+ status = DID_ERROR << 16;
+ break;
+ case 0x84: /* SCSI bus abort */
+ status = DID_ABORT << 16;
+ break;
+ case 0x91:
+ status = DID_TIME_OUT << 16;
+ break;
+ }
+
+ SCtmp->result = status | mscp->target_status;
+
+ SCtmp->host_scribble = NULL;
+
+ /* Free up mscp block for next command */
+#if ULTRASTOR_MAX_CMDS == 1
+ config.mscp_busy = FALSE;
+#else
+ set_bit(mscp_index, &config.mscp_free);
+#endif
+
+#if ULTRASTOR_DEBUG & (UD_ABORT|UD_INTERRUPT)
+ if (config.aborted[mscp_index])
+ printk("Ux4 interrupt: MSCP %d (%x) aborted = %d\n",
+ mscp_index, (unsigned int) mscp, config.aborted[mscp_index]);
+#endif
+ config.aborted[mscp_index] = 0;
+
+ if (done)
+ done(SCtmp);
+ else
+ printk("US14F: interrupt: unexpected interrupt\n");
+
+ if (config.slot ? inb(config.icm_address - 1) :
+ (inb(SYS_DOORBELL_INTR(config.doorbell_address)) & 1))
+#if (ULTRASTOR_DEBUG & UD_MULTI_CMD)
+ printk("Ux4F: multiple commands completed\n");
+#else
+ ;
+#endif
+
+#if (ULTRASTOR_DEBUG & UD_INTERRUPT)
+ printk("USx4F: interrupt: returning\n");
+#endif
+}
+
+static irqreturn_t do_ultrastor_interrupt(int irq, void *dev_id)
+{
+ unsigned long flags;
+ struct Scsi_Host *dev = dev_id;
+
+ spin_lock_irqsave(dev->host_lock, flags);
+ ultrastor_interrupt(dev_id);
+ spin_unlock_irqrestore(dev->host_lock, flags);
+ return IRQ_HANDLED;
+}
+
+MODULE_LICENSE("GPL");
+
+static struct scsi_host_template driver_template = {
+ .name = "UltraStor 14F/24F/34F",
+ .detect = ultrastor_detect,
+ .release = ultrastor_release,
+ .info = ultrastor_info,
+ .queuecommand = ultrastor_queuecommand,
+ .eh_abort_handler = ultrastor_abort,
+ .eh_host_reset_handler = ultrastor_host_reset,
+ .bios_param = ultrastor_biosparam,
+ .can_queue = ULTRASTOR_MAX_CMDS,
+ .sg_tablesize = ULTRASTOR_14F_MAX_SG,
+ .cmd_per_lun = ULTRASTOR_MAX_CMDS_PER_LUN,
+ .unchecked_isa_dma = 1,
+ .use_clustering = ENABLE_CLUSTERING,
+};
+#include "scsi_module.c"
diff --git a/drivers/scsi/ultrastor.h b/drivers/scsi/ultrastor.h
new file mode 100644
index 000000000..165c18b5c
--- /dev/null
+++ b/drivers/scsi/ultrastor.h
@@ -0,0 +1,80 @@
+/*
+ * ultrastor.c (C) 1991 David B. Gentzel
+ * Low-level scsi driver for UltraStor 14F
+ * by David B. Gentzel, Whitfield Software Services, Carnegie, PA
+ * (gentzel@nova.enet.dec.com)
+ * scatter/gather added by Scott Taylor (n217cg@tamuts.tamu.edu)
+ * 24F support by John F. Carr (jfc@athena.mit.edu)
+ * John's work modified by Caleb Epstein (cae@jpmorgan.com) and
+ * Eric Youngdale (eric@tantalus.nrl.navy.mil).
+ * Thanks to UltraStor for providing the necessary documentation
+ */
+
+#ifndef _ULTRASTOR_H
+#define _ULTRASTOR_H
+
+static int ultrastor_detect(struct scsi_host_template *);
+static const char *ultrastor_info(struct Scsi_Host *shpnt);
+static int ultrastor_queuecommand(struct Scsi_Host *, struct scsi_cmnd *);
+static int ultrastor_abort(struct scsi_cmnd *);
+static int ultrastor_host_reset(struct scsi_cmnd *);
+static int ultrastor_biosparam(struct scsi_device *, struct block_device *,
+ sector_t, int *);
+
+
+#define ULTRASTOR_14F_MAX_SG 16
+#define ULTRASTOR_24F_MAX_SG 33
+
+#define ULTRASTOR_MAX_CMDS_PER_LUN 5
+#define ULTRASTOR_MAX_CMDS 16
+
+#define ULTRASTOR_24F_PORT 0xC80
+
+
+#ifdef ULTRASTOR_PRIVATE
+
+#define UD_ABORT 0x0001
+#define UD_COMMAND 0x0002
+#define UD_DETECT 0x0004
+#define UD_INTERRUPT 0x0008
+#define UD_RESET 0x0010
+#define UD_MULTI_CMD 0x0020
+#define UD_CSIR 0x0040
+#define UD_ERROR 0x0080
+
+/* #define PORT_OVERRIDE 0x330 */
+
+/* Values for the PRODUCT_ID ports for the 14F */
+#define US14F_PRODUCT_ID_0 0x56
+#define US14F_PRODUCT_ID_1 0x40 /* NOTE: Only upper nibble is used */
+
+#define US24F_PRODUCT_ID_0 0x56
+#define US24F_PRODUCT_ID_1 0x63
+#define US24F_PRODUCT_ID_2 0x02
+
+/* Subversion values */
+#define U14F 0
+#define U34F 1
+
+/* MSCP field values */
+
+/* Opcode */
+#define OP_HOST_ADAPTER 0x1
+#define OP_SCSI 0x2
+#define OP_RESET 0x4
+
+/* Date Transfer Direction */
+#define DTD_SCSI 0x0
+#define DTD_IN 0x1
+#define DTD_OUT 0x2
+#define DTD_NONE 0x3
+
+/* Host Adapter command subcodes */
+#define HA_CMD_INQUIRY 0x1
+#define HA_CMD_SELF_DIAG 0x2
+#define HA_CMD_READ_BUFF 0x3
+#define HA_CMD_WRITE_BUFF 0x4
+
+#endif
+
+#endif
diff --git a/drivers/scsi/virtio_scsi.c b/drivers/scsi/virtio_scsi.c
new file mode 100644
index 000000000..f164f24a4
--- /dev/null
+++ b/drivers/scsi/virtio_scsi.c
@@ -0,0 +1,1157 @@
+/*
+ * Virtio SCSI HBA driver
+ *
+ * Copyright IBM Corp. 2010
+ * Copyright Red Hat, Inc. 2011
+ *
+ * Authors:
+ * Stefan Hajnoczi <stefanha@linux.vnet.ibm.com>
+ * Paolo Bonzini <pbonzini@redhat.com>
+ *
+ * This work is licensed under the terms of the GNU GPL, version 2 or later.
+ * See the COPYING file in the top-level directory.
+ *
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/mempool.h>
+#include <linux/virtio.h>
+#include <linux/virtio_ids.h>
+#include <linux/virtio_config.h>
+#include <linux/virtio_scsi.h>
+#include <linux/cpu.h>
+#include <linux/blkdev.h>
+#include <scsi/scsi_host.h>
+#include <scsi/scsi_device.h>
+#include <scsi/scsi_cmnd.h>
+#include <scsi/scsi_tcq.h>
+#include <linux/seqlock.h>
+
+#define VIRTIO_SCSI_MEMPOOL_SZ 64
+#define VIRTIO_SCSI_EVENT_LEN 8
+#define VIRTIO_SCSI_VQ_BASE 2
+
+/* Command queue element */
+struct virtio_scsi_cmd {
+ struct scsi_cmnd *sc;
+ struct completion *comp;
+ union {
+ struct virtio_scsi_cmd_req cmd;
+ struct virtio_scsi_cmd_req_pi cmd_pi;
+ struct virtio_scsi_ctrl_tmf_req tmf;
+ struct virtio_scsi_ctrl_an_req an;
+ } req;
+ union {
+ struct virtio_scsi_cmd_resp cmd;
+ struct virtio_scsi_ctrl_tmf_resp tmf;
+ struct virtio_scsi_ctrl_an_resp an;
+ struct virtio_scsi_event evt;
+ } resp;
+} ____cacheline_aligned_in_smp;
+
+struct virtio_scsi_event_node {
+ struct virtio_scsi *vscsi;
+ struct virtio_scsi_event event;
+ struct work_struct work;
+};
+
+struct virtio_scsi_vq {
+ /* Protects vq */
+ spinlock_t vq_lock;
+
+ struct virtqueue *vq;
+};
+
+/*
+ * Per-target queue state.
+ *
+ * This struct holds the data needed by the queue steering policy. When a
+ * target is sent multiple requests, we need to drive them to the same queue so
+ * that FIFO processing order is kept. However, if a target was idle, we can
+ * choose a queue arbitrarily. In this case the queue is chosen according to
+ * the current VCPU, so the driver expects the number of request queues to be
+ * equal to the number of VCPUs. This makes it easy and fast to select the
+ * queue, and also lets the driver optimize the IRQ affinity for the virtqueues
+ * (each virtqueue's affinity is set to the CPU that "owns" the queue).
+ *
+ * tgt_seq is held to serialize reading and writing req_vq.
+ *
+ * Decrements of reqs are never concurrent with writes of req_vq: before the
+ * decrement reqs will be != 0; after the decrement the virtqueue completion
+ * routine will not use the req_vq so it can be changed by a new request.
+ * Thus they can happen outside the tgt_seq, provided of course we make reqs
+ * an atomic_t.
+ */
+struct virtio_scsi_target_state {
+ seqcount_t tgt_seq;
+
+ /* Count of outstanding requests. */
+ atomic_t reqs;
+
+ /* Currently active virtqueue for requests sent to this target. */
+ struct virtio_scsi_vq *req_vq;
+};
+
+/* Driver instance state */
+struct virtio_scsi {
+ struct virtio_device *vdev;
+
+ /* Get some buffers ready for event vq */
+ struct virtio_scsi_event_node event_list[VIRTIO_SCSI_EVENT_LEN];
+
+ u32 num_queues;
+
+ /* If the affinity hint is set for virtqueues */
+ bool affinity_hint_set;
+
+ /* CPU hotplug notifier */
+ struct notifier_block nb;
+
+ /* Protected by event_vq lock */
+ bool stop_events;
+
+ struct virtio_scsi_vq ctrl_vq;
+ struct virtio_scsi_vq event_vq;
+ struct virtio_scsi_vq req_vqs[];
+};
+
+static struct kmem_cache *virtscsi_cmd_cache;
+static mempool_t *virtscsi_cmd_pool;
+
+static inline struct Scsi_Host *virtio_scsi_host(struct virtio_device *vdev)
+{
+ return vdev->priv;
+}
+
+static void virtscsi_compute_resid(struct scsi_cmnd *sc, u32 resid)
+{
+ if (!resid)
+ return;
+
+ if (!scsi_bidi_cmnd(sc)) {
+ scsi_set_resid(sc, resid);
+ return;
+ }
+
+ scsi_in(sc)->resid = min(resid, scsi_in(sc)->length);
+ scsi_out(sc)->resid = resid - scsi_in(sc)->resid;
+}
+
+/**
+ * virtscsi_complete_cmd - finish a scsi_cmd and invoke scsi_done
+ *
+ * Called with vq_lock held.
+ */
+static void virtscsi_complete_cmd(struct virtio_scsi *vscsi, void *buf)
+{
+ struct virtio_scsi_cmd *cmd = buf;
+ struct scsi_cmnd *sc = cmd->sc;
+ struct virtio_scsi_cmd_resp *resp = &cmd->resp.cmd;
+ struct virtio_scsi_target_state *tgt =
+ scsi_target(sc->device)->hostdata;
+
+ dev_dbg(&sc->device->sdev_gendev,
+ "cmd %p response %u status %#02x sense_len %u\n",
+ sc, resp->response, resp->status, resp->sense_len);
+
+ sc->result = resp->status;
+ virtscsi_compute_resid(sc, virtio32_to_cpu(vscsi->vdev, resp->resid));
+ switch (resp->response) {
+ case VIRTIO_SCSI_S_OK:
+ set_host_byte(sc, DID_OK);
+ break;
+ case VIRTIO_SCSI_S_OVERRUN:
+ set_host_byte(sc, DID_ERROR);
+ break;
+ case VIRTIO_SCSI_S_ABORTED:
+ set_host_byte(sc, DID_ABORT);
+ break;
+ case VIRTIO_SCSI_S_BAD_TARGET:
+ set_host_byte(sc, DID_BAD_TARGET);
+ break;
+ case VIRTIO_SCSI_S_RESET:
+ set_host_byte(sc, DID_RESET);
+ break;
+ case VIRTIO_SCSI_S_BUSY:
+ set_host_byte(sc, DID_BUS_BUSY);
+ break;
+ case VIRTIO_SCSI_S_TRANSPORT_FAILURE:
+ set_host_byte(sc, DID_TRANSPORT_DISRUPTED);
+ break;
+ case VIRTIO_SCSI_S_TARGET_FAILURE:
+ set_host_byte(sc, DID_TARGET_FAILURE);
+ break;
+ case VIRTIO_SCSI_S_NEXUS_FAILURE:
+ set_host_byte(sc, DID_NEXUS_FAILURE);
+ break;
+ default:
+ scmd_printk(KERN_WARNING, sc, "Unknown response %d",
+ resp->response);
+ /* fall through */
+ case VIRTIO_SCSI_S_FAILURE:
+ set_host_byte(sc, DID_ERROR);
+ break;
+ }
+
+ WARN_ON(virtio32_to_cpu(vscsi->vdev, resp->sense_len) >
+ VIRTIO_SCSI_SENSE_SIZE);
+ if (sc->sense_buffer) {
+ memcpy(sc->sense_buffer, resp->sense,
+ min_t(u32,
+ virtio32_to_cpu(vscsi->vdev, resp->sense_len),
+ VIRTIO_SCSI_SENSE_SIZE));
+ if (resp->sense_len)
+ set_driver_byte(sc, DRIVER_SENSE);
+ }
+
+ sc->scsi_done(sc);
+
+ atomic_dec(&tgt->reqs);
+}
+
+static void virtscsi_vq_done(struct virtio_scsi *vscsi,
+ struct virtio_scsi_vq *virtscsi_vq,
+ void (*fn)(struct virtio_scsi *vscsi, void *buf))
+{
+ void *buf;
+ unsigned int len;
+ unsigned long flags;
+ struct virtqueue *vq = virtscsi_vq->vq;
+
+ spin_lock_irqsave(&virtscsi_vq->vq_lock, flags);
+ do {
+ virtqueue_disable_cb(vq);
+ while ((buf = virtqueue_get_buf(vq, &len)) != NULL)
+ fn(vscsi, buf);
+
+ if (unlikely(virtqueue_is_broken(vq)))
+ break;
+ } while (!virtqueue_enable_cb(vq));
+ spin_unlock_irqrestore(&virtscsi_vq->vq_lock, flags);
+}
+
+static void virtscsi_req_done(struct virtqueue *vq)
+{
+ struct Scsi_Host *sh = virtio_scsi_host(vq->vdev);
+ struct virtio_scsi *vscsi = shost_priv(sh);
+ int index = vq->index - VIRTIO_SCSI_VQ_BASE;
+ struct virtio_scsi_vq *req_vq = &vscsi->req_vqs[index];
+
+ virtscsi_vq_done(vscsi, req_vq, virtscsi_complete_cmd);
+};
+
+static void virtscsi_poll_requests(struct virtio_scsi *vscsi)
+{
+ int i, num_vqs;
+
+ num_vqs = vscsi->num_queues;
+ for (i = 0; i < num_vqs; i++)
+ virtscsi_vq_done(vscsi, &vscsi->req_vqs[i],
+ virtscsi_complete_cmd);
+}
+
+static void virtscsi_complete_free(struct virtio_scsi *vscsi, void *buf)
+{
+ struct virtio_scsi_cmd *cmd = buf;
+
+ if (cmd->comp)
+ complete_all(cmd->comp);
+}
+
+static void virtscsi_ctrl_done(struct virtqueue *vq)
+{
+ struct Scsi_Host *sh = virtio_scsi_host(vq->vdev);
+ struct virtio_scsi *vscsi = shost_priv(sh);
+
+ virtscsi_vq_done(vscsi, &vscsi->ctrl_vq, virtscsi_complete_free);
+};
+
+static void virtscsi_handle_event(struct work_struct *work);
+
+static int virtscsi_kick_event(struct virtio_scsi *vscsi,
+ struct virtio_scsi_event_node *event_node)
+{
+ int err;
+ struct scatterlist sg;
+ unsigned long flags;
+
+ INIT_WORK(&event_node->work, virtscsi_handle_event);
+ sg_init_one(&sg, &event_node->event, sizeof(struct virtio_scsi_event));
+
+ spin_lock_irqsave(&vscsi->event_vq.vq_lock, flags);
+
+ err = virtqueue_add_inbuf(vscsi->event_vq.vq, &sg, 1, event_node,
+ GFP_ATOMIC);
+ if (!err)
+ virtqueue_kick(vscsi->event_vq.vq);
+
+ spin_unlock_irqrestore(&vscsi->event_vq.vq_lock, flags);
+
+ return err;
+}
+
+static int virtscsi_kick_event_all(struct virtio_scsi *vscsi)
+{
+ int i;
+
+ for (i = 0; i < VIRTIO_SCSI_EVENT_LEN; i++) {
+ vscsi->event_list[i].vscsi = vscsi;
+ virtscsi_kick_event(vscsi, &vscsi->event_list[i]);
+ }
+
+ return 0;
+}
+
+static void virtscsi_cancel_event_work(struct virtio_scsi *vscsi)
+{
+ int i;
+
+ /* Stop scheduling work before calling cancel_work_sync. */
+ spin_lock_irq(&vscsi->event_vq.vq_lock);
+ vscsi->stop_events = true;
+ spin_unlock_irq(&vscsi->event_vq.vq_lock);
+
+ for (i = 0; i < VIRTIO_SCSI_EVENT_LEN; i++)
+ cancel_work_sync(&vscsi->event_list[i].work);
+}
+
+static void virtscsi_handle_transport_reset(struct virtio_scsi *vscsi,
+ struct virtio_scsi_event *event)
+{
+ struct scsi_device *sdev;
+ struct Scsi_Host *shost = virtio_scsi_host(vscsi->vdev);
+ unsigned int target = event->lun[1];
+ unsigned int lun = (event->lun[2] << 8) | event->lun[3];
+
+ switch (virtio32_to_cpu(vscsi->vdev, event->reason)) {
+ case VIRTIO_SCSI_EVT_RESET_RESCAN:
+ scsi_add_device(shost, 0, target, lun);
+ break;
+ case VIRTIO_SCSI_EVT_RESET_REMOVED:
+ sdev = scsi_device_lookup(shost, 0, target, lun);
+ if (sdev) {
+ scsi_remove_device(sdev);
+ scsi_device_put(sdev);
+ } else {
+ pr_err("SCSI device %d 0 %d %d not found\n",
+ shost->host_no, target, lun);
+ }
+ break;
+ default:
+ pr_info("Unsupport virtio scsi event reason %x\n", event->reason);
+ }
+}
+
+static void virtscsi_handle_param_change(struct virtio_scsi *vscsi,
+ struct virtio_scsi_event *event)
+{
+ struct scsi_device *sdev;
+ struct Scsi_Host *shost = virtio_scsi_host(vscsi->vdev);
+ unsigned int target = event->lun[1];
+ unsigned int lun = (event->lun[2] << 8) | event->lun[3];
+ u8 asc = virtio32_to_cpu(vscsi->vdev, event->reason) & 255;
+ u8 ascq = virtio32_to_cpu(vscsi->vdev, event->reason) >> 8;
+
+ sdev = scsi_device_lookup(shost, 0, target, lun);
+ if (!sdev) {
+ pr_err("SCSI device %d 0 %d %d not found\n",
+ shost->host_no, target, lun);
+ return;
+ }
+
+ /* Handle "Parameters changed", "Mode parameters changed", and
+ "Capacity data has changed". */
+ if (asc == 0x2a && (ascq == 0x00 || ascq == 0x01 || ascq == 0x09))
+ scsi_rescan_device(&sdev->sdev_gendev);
+
+ scsi_device_put(sdev);
+}
+
+static void virtscsi_handle_event(struct work_struct *work)
+{
+ struct virtio_scsi_event_node *event_node =
+ container_of(work, struct virtio_scsi_event_node, work);
+ struct virtio_scsi *vscsi = event_node->vscsi;
+ struct virtio_scsi_event *event = &event_node->event;
+
+ if (event->event &
+ cpu_to_virtio32(vscsi->vdev, VIRTIO_SCSI_T_EVENTS_MISSED)) {
+ event->event &= ~cpu_to_virtio32(vscsi->vdev,
+ VIRTIO_SCSI_T_EVENTS_MISSED);
+ scsi_scan_host(virtio_scsi_host(vscsi->vdev));
+ }
+
+ switch (virtio32_to_cpu(vscsi->vdev, event->event)) {
+ case VIRTIO_SCSI_T_NO_EVENT:
+ break;
+ case VIRTIO_SCSI_T_TRANSPORT_RESET:
+ virtscsi_handle_transport_reset(vscsi, event);
+ break;
+ case VIRTIO_SCSI_T_PARAM_CHANGE:
+ virtscsi_handle_param_change(vscsi, event);
+ break;
+ default:
+ pr_err("Unsupport virtio scsi event %x\n", event->event);
+ }
+ virtscsi_kick_event(vscsi, event_node);
+}
+
+static void virtscsi_complete_event(struct virtio_scsi *vscsi, void *buf)
+{
+ struct virtio_scsi_event_node *event_node = buf;
+
+ if (!vscsi->stop_events)
+ queue_work(system_freezable_wq, &event_node->work);
+}
+
+static void virtscsi_event_done(struct virtqueue *vq)
+{
+ struct Scsi_Host *sh = virtio_scsi_host(vq->vdev);
+ struct virtio_scsi *vscsi = shost_priv(sh);
+
+ virtscsi_vq_done(vscsi, &vscsi->event_vq, virtscsi_complete_event);
+};
+
+/**
+ * virtscsi_add_cmd - add a virtio_scsi_cmd to a virtqueue
+ * @vq : the struct virtqueue we're talking about
+ * @cmd : command structure
+ * @req_size : size of the request buffer
+ * @resp_size : size of the response buffer
+ */
+static int virtscsi_add_cmd(struct virtqueue *vq,
+ struct virtio_scsi_cmd *cmd,
+ size_t req_size, size_t resp_size)
+{
+ struct scsi_cmnd *sc = cmd->sc;
+ struct scatterlist *sgs[6], req, resp;
+ struct sg_table *out, *in;
+ unsigned out_num = 0, in_num = 0;
+
+ out = in = NULL;
+
+ if (sc && sc->sc_data_direction != DMA_NONE) {
+ if (sc->sc_data_direction != DMA_FROM_DEVICE)
+ out = &scsi_out(sc)->table;
+ if (sc->sc_data_direction != DMA_TO_DEVICE)
+ in = &scsi_in(sc)->table;
+ }
+
+ /* Request header. */
+ sg_init_one(&req, &cmd->req, req_size);
+ sgs[out_num++] = &req;
+
+ /* Data-out buffer. */
+ if (out) {
+ /* Place WRITE protection SGLs before Data OUT payload */
+ if (scsi_prot_sg_count(sc))
+ sgs[out_num++] = scsi_prot_sglist(sc);
+ sgs[out_num++] = out->sgl;
+ }
+
+ /* Response header. */
+ sg_init_one(&resp, &cmd->resp, resp_size);
+ sgs[out_num + in_num++] = &resp;
+
+ /* Data-in buffer */
+ if (in) {
+ /* Place READ protection SGLs before Data IN payload */
+ if (scsi_prot_sg_count(sc))
+ sgs[out_num + in_num++] = scsi_prot_sglist(sc);
+ sgs[out_num + in_num++] = in->sgl;
+ }
+
+ return virtqueue_add_sgs(vq, sgs, out_num, in_num, cmd, GFP_ATOMIC);
+}
+
+static int virtscsi_kick_cmd(struct virtio_scsi_vq *vq,
+ struct virtio_scsi_cmd *cmd,
+ size_t req_size, size_t resp_size)
+{
+ unsigned long flags;
+ int err;
+ bool needs_kick = false;
+
+ spin_lock_irqsave(&vq->vq_lock, flags);
+ err = virtscsi_add_cmd(vq->vq, cmd, req_size, resp_size);
+ if (!err)
+ needs_kick = virtqueue_kick_prepare(vq->vq);
+
+ spin_unlock_irqrestore(&vq->vq_lock, flags);
+
+ if (needs_kick)
+ virtqueue_notify(vq->vq);
+ return err;
+}
+
+static void virtio_scsi_init_hdr(struct virtio_device *vdev,
+ struct virtio_scsi_cmd_req *cmd,
+ struct scsi_cmnd *sc)
+{
+ cmd->lun[0] = 1;
+ cmd->lun[1] = sc->device->id;
+ cmd->lun[2] = (sc->device->lun >> 8) | 0x40;
+ cmd->lun[3] = sc->device->lun & 0xff;
+ cmd->tag = cpu_to_virtio64(vdev, (unsigned long)sc);
+ cmd->task_attr = VIRTIO_SCSI_S_SIMPLE;
+ cmd->prio = 0;
+ cmd->crn = 0;
+}
+
+static void virtio_scsi_init_hdr_pi(struct virtio_device *vdev,
+ struct virtio_scsi_cmd_req_pi *cmd_pi,
+ struct scsi_cmnd *sc)
+{
+ struct request *rq = sc->request;
+ struct blk_integrity *bi;
+
+ virtio_scsi_init_hdr(vdev, (struct virtio_scsi_cmd_req *)cmd_pi, sc);
+
+ if (!rq || !scsi_prot_sg_count(sc))
+ return;
+
+ bi = blk_get_integrity(rq->rq_disk);
+
+ if (sc->sc_data_direction == DMA_TO_DEVICE)
+ cmd_pi->pi_bytesout = cpu_to_virtio32(vdev,
+ blk_rq_sectors(rq) *
+ bi->tuple_size);
+ else if (sc->sc_data_direction == DMA_FROM_DEVICE)
+ cmd_pi->pi_bytesin = cpu_to_virtio32(vdev,
+ blk_rq_sectors(rq) *
+ bi->tuple_size);
+}
+
+static int virtscsi_queuecommand(struct virtio_scsi *vscsi,
+ struct virtio_scsi_vq *req_vq,
+ struct scsi_cmnd *sc)
+{
+ struct Scsi_Host *shost = virtio_scsi_host(vscsi->vdev);
+ struct virtio_scsi_cmd *cmd = scsi_cmd_priv(sc);
+ int req_size;
+
+ BUG_ON(scsi_sg_count(sc) > shost->sg_tablesize);
+
+ /* TODO: check feature bit and fail if unsupported? */
+ BUG_ON(sc->sc_data_direction == DMA_BIDIRECTIONAL);
+
+ dev_dbg(&sc->device->sdev_gendev,
+ "cmd %p CDB: %#02x\n", sc, sc->cmnd[0]);
+
+ memset(cmd, 0, sizeof(*cmd));
+ cmd->sc = sc;
+
+ BUG_ON(sc->cmd_len > VIRTIO_SCSI_CDB_SIZE);
+
+ if (virtio_has_feature(vscsi->vdev, VIRTIO_SCSI_F_T10_PI)) {
+ virtio_scsi_init_hdr_pi(vscsi->vdev, &cmd->req.cmd_pi, sc);
+ memcpy(cmd->req.cmd_pi.cdb, sc->cmnd, sc->cmd_len);
+ req_size = sizeof(cmd->req.cmd_pi);
+ } else {
+ virtio_scsi_init_hdr(vscsi->vdev, &cmd->req.cmd, sc);
+ memcpy(cmd->req.cmd.cdb, sc->cmnd, sc->cmd_len);
+ req_size = sizeof(cmd->req.cmd);
+ }
+
+ if (virtscsi_kick_cmd(req_vq, cmd, req_size, sizeof(cmd->resp.cmd)) != 0)
+ return SCSI_MLQUEUE_HOST_BUSY;
+ return 0;
+}
+
+static int virtscsi_queuecommand_single(struct Scsi_Host *sh,
+ struct scsi_cmnd *sc)
+{
+ struct virtio_scsi *vscsi = shost_priv(sh);
+ struct virtio_scsi_target_state *tgt =
+ scsi_target(sc->device)->hostdata;
+
+ atomic_inc(&tgt->reqs);
+ return virtscsi_queuecommand(vscsi, &vscsi->req_vqs[0], sc);
+}
+
+static struct virtio_scsi_vq *virtscsi_pick_vq_mq(struct virtio_scsi *vscsi,
+ struct scsi_cmnd *sc)
+{
+ u32 tag = blk_mq_unique_tag(sc->request);
+ u16 hwq = blk_mq_unique_tag_to_hwq(tag);
+
+ return &vscsi->req_vqs[hwq];
+}
+
+static struct virtio_scsi_vq *virtscsi_pick_vq(struct virtio_scsi *vscsi,
+ struct virtio_scsi_target_state *tgt)
+{
+ struct virtio_scsi_vq *vq;
+ unsigned long flags;
+ u32 queue_num;
+
+ local_irq_save(flags);
+ if (atomic_inc_return(&tgt->reqs) > 1) {
+ unsigned long seq;
+
+ do {
+ seq = read_seqcount_begin(&tgt->tgt_seq);
+ vq = tgt->req_vq;
+ } while (read_seqcount_retry(&tgt->tgt_seq, seq));
+ } else {
+ /* no writes can be concurrent because of atomic_t */
+ write_seqcount_begin(&tgt->tgt_seq);
+
+ /* keep previous req_vq if a reader just arrived */
+ if (unlikely(atomic_read(&tgt->reqs) > 1)) {
+ vq = tgt->req_vq;
+ goto unlock;
+ }
+
+ queue_num = smp_processor_id();
+ while (unlikely(queue_num >= vscsi->num_queues))
+ queue_num -= vscsi->num_queues;
+ tgt->req_vq = vq = &vscsi->req_vqs[queue_num];
+ unlock:
+ write_seqcount_end(&tgt->tgt_seq);
+ }
+ local_irq_restore(flags);
+
+ return vq;
+}
+
+static int virtscsi_queuecommand_multi(struct Scsi_Host *sh,
+ struct scsi_cmnd *sc)
+{
+ struct virtio_scsi *vscsi = shost_priv(sh);
+ struct virtio_scsi_target_state *tgt =
+ scsi_target(sc->device)->hostdata;
+ struct virtio_scsi_vq *req_vq;
+
+ if (shost_use_blk_mq(sh))
+ req_vq = virtscsi_pick_vq_mq(vscsi, sc);
+ else
+ req_vq = virtscsi_pick_vq(vscsi, tgt);
+
+ return virtscsi_queuecommand(vscsi, req_vq, sc);
+}
+
+static int virtscsi_tmf(struct virtio_scsi *vscsi, struct virtio_scsi_cmd *cmd)
+{
+ DECLARE_COMPLETION_ONSTACK(comp);
+ int ret = FAILED;
+
+ cmd->comp = &comp;
+ if (virtscsi_kick_cmd(&vscsi->ctrl_vq, cmd,
+ sizeof cmd->req.tmf, sizeof cmd->resp.tmf) < 0)
+ goto out;
+
+ wait_for_completion(&comp);
+ if (cmd->resp.tmf.response == VIRTIO_SCSI_S_OK ||
+ cmd->resp.tmf.response == VIRTIO_SCSI_S_FUNCTION_SUCCEEDED)
+ ret = SUCCESS;
+
+ /*
+ * The spec guarantees that all requests related to the TMF have
+ * been completed, but the callback might not have run yet if
+ * we're using independent interrupts (e.g. MSI). Poll the
+ * virtqueues once.
+ *
+ * In the abort case, sc->scsi_done will do nothing, because
+ * the block layer must have detected a timeout and as a result
+ * REQ_ATOM_COMPLETE has been set.
+ */
+ virtscsi_poll_requests(vscsi);
+
+out:
+ mempool_free(cmd, virtscsi_cmd_pool);
+ return ret;
+}
+
+static int virtscsi_device_reset(struct scsi_cmnd *sc)
+{
+ struct virtio_scsi *vscsi = shost_priv(sc->device->host);
+ struct virtio_scsi_cmd *cmd;
+
+ sdev_printk(KERN_INFO, sc->device, "device reset\n");
+ cmd = mempool_alloc(virtscsi_cmd_pool, GFP_NOIO);
+ if (!cmd)
+ return FAILED;
+
+ memset(cmd, 0, sizeof(*cmd));
+ cmd->sc = sc;
+ cmd->req.tmf = (struct virtio_scsi_ctrl_tmf_req){
+ .type = VIRTIO_SCSI_T_TMF,
+ .subtype = cpu_to_virtio32(vscsi->vdev,
+ VIRTIO_SCSI_T_TMF_LOGICAL_UNIT_RESET),
+ .lun[0] = 1,
+ .lun[1] = sc->device->id,
+ .lun[2] = (sc->device->lun >> 8) | 0x40,
+ .lun[3] = sc->device->lun & 0xff,
+ };
+ return virtscsi_tmf(vscsi, cmd);
+}
+
+/**
+ * virtscsi_change_queue_depth() - Change a virtscsi target's queue depth
+ * @sdev: Virtscsi target whose queue depth to change
+ * @qdepth: New queue depth
+ */
+static int virtscsi_change_queue_depth(struct scsi_device *sdev, int qdepth)
+{
+ struct Scsi_Host *shost = sdev->host;
+ int max_depth = shost->cmd_per_lun;
+
+ return scsi_change_queue_depth(sdev, min(max_depth, qdepth));
+}
+
+static int virtscsi_abort(struct scsi_cmnd *sc)
+{
+ struct virtio_scsi *vscsi = shost_priv(sc->device->host);
+ struct virtio_scsi_cmd *cmd;
+
+ scmd_printk(KERN_INFO, sc, "abort\n");
+ cmd = mempool_alloc(virtscsi_cmd_pool, GFP_NOIO);
+ if (!cmd)
+ return FAILED;
+
+ memset(cmd, 0, sizeof(*cmd));
+ cmd->sc = sc;
+ cmd->req.tmf = (struct virtio_scsi_ctrl_tmf_req){
+ .type = VIRTIO_SCSI_T_TMF,
+ .subtype = VIRTIO_SCSI_T_TMF_ABORT_TASK,
+ .lun[0] = 1,
+ .lun[1] = sc->device->id,
+ .lun[2] = (sc->device->lun >> 8) | 0x40,
+ .lun[3] = sc->device->lun & 0xff,
+ .tag = cpu_to_virtio64(vscsi->vdev, (unsigned long)sc),
+ };
+ return virtscsi_tmf(vscsi, cmd);
+}
+
+static int virtscsi_target_alloc(struct scsi_target *starget)
+{
+ struct Scsi_Host *sh = dev_to_shost(starget->dev.parent);
+ struct virtio_scsi *vscsi = shost_priv(sh);
+
+ struct virtio_scsi_target_state *tgt =
+ kmalloc(sizeof(*tgt), GFP_KERNEL);
+ if (!tgt)
+ return -ENOMEM;
+
+ seqcount_init(&tgt->tgt_seq);
+ atomic_set(&tgt->reqs, 0);
+ tgt->req_vq = &vscsi->req_vqs[0];
+
+ starget->hostdata = tgt;
+ return 0;
+}
+
+static void virtscsi_target_destroy(struct scsi_target *starget)
+{
+ struct virtio_scsi_target_state *tgt = starget->hostdata;
+ kfree(tgt);
+}
+
+static struct scsi_host_template virtscsi_host_template_single = {
+ .module = THIS_MODULE,
+ .name = "Virtio SCSI HBA",
+ .proc_name = "virtio_scsi",
+ .this_id = -1,
+ .cmd_size = sizeof(struct virtio_scsi_cmd),
+ .queuecommand = virtscsi_queuecommand_single,
+ .change_queue_depth = virtscsi_change_queue_depth,
+ .eh_abort_handler = virtscsi_abort,
+ .eh_device_reset_handler = virtscsi_device_reset,
+
+ .can_queue = 1024,
+ .dma_boundary = UINT_MAX,
+ .use_clustering = ENABLE_CLUSTERING,
+ .target_alloc = virtscsi_target_alloc,
+ .target_destroy = virtscsi_target_destroy,
+ .track_queue_depth = 1,
+};
+
+static struct scsi_host_template virtscsi_host_template_multi = {
+ .module = THIS_MODULE,
+ .name = "Virtio SCSI HBA",
+ .proc_name = "virtio_scsi",
+ .this_id = -1,
+ .cmd_size = sizeof(struct virtio_scsi_cmd),
+ .queuecommand = virtscsi_queuecommand_multi,
+ .change_queue_depth = virtscsi_change_queue_depth,
+ .eh_abort_handler = virtscsi_abort,
+ .eh_device_reset_handler = virtscsi_device_reset,
+
+ .can_queue = 1024,
+ .dma_boundary = UINT_MAX,
+ .use_clustering = ENABLE_CLUSTERING,
+ .target_alloc = virtscsi_target_alloc,
+ .target_destroy = virtscsi_target_destroy,
+ .track_queue_depth = 1,
+};
+
+#define virtscsi_config_get(vdev, fld) \
+ ({ \
+ typeof(((struct virtio_scsi_config *)0)->fld) __val; \
+ virtio_cread(vdev, struct virtio_scsi_config, fld, &__val); \
+ __val; \
+ })
+
+#define virtscsi_config_set(vdev, fld, val) \
+ do { \
+ typeof(((struct virtio_scsi_config *)0)->fld) __val = (val); \
+ virtio_cwrite(vdev, struct virtio_scsi_config, fld, &__val); \
+ } while(0)
+
+static void __virtscsi_set_affinity(struct virtio_scsi *vscsi, bool affinity)
+{
+ int i;
+ int cpu;
+
+ /* In multiqueue mode, when the number of cpu is equal
+ * to the number of request queues, we let the qeueues
+ * to be private to one cpu by setting the affinity hint
+ * to eliminate the contention.
+ */
+ if ((vscsi->num_queues == 1 ||
+ vscsi->num_queues != num_online_cpus()) && affinity) {
+ if (vscsi->affinity_hint_set)
+ affinity = false;
+ else
+ return;
+ }
+
+ if (affinity) {
+ i = 0;
+ for_each_online_cpu(cpu) {
+ virtqueue_set_affinity(vscsi->req_vqs[i].vq, cpu);
+ i++;
+ }
+
+ vscsi->affinity_hint_set = true;
+ } else {
+ for (i = 0; i < vscsi->num_queues; i++) {
+ if (!vscsi->req_vqs[i].vq)
+ continue;
+
+ virtqueue_set_affinity(vscsi->req_vqs[i].vq, -1);
+ }
+
+ vscsi->affinity_hint_set = false;
+ }
+}
+
+static void virtscsi_set_affinity(struct virtio_scsi *vscsi, bool affinity)
+{
+ get_online_cpus();
+ __virtscsi_set_affinity(vscsi, affinity);
+ put_online_cpus();
+}
+
+static int virtscsi_cpu_callback(struct notifier_block *nfb,
+ unsigned long action, void *hcpu)
+{
+ struct virtio_scsi *vscsi = container_of(nfb, struct virtio_scsi, nb);
+ switch(action) {
+ case CPU_ONLINE:
+ case CPU_ONLINE_FROZEN:
+ case CPU_DEAD:
+ case CPU_DEAD_FROZEN:
+ __virtscsi_set_affinity(vscsi, true);
+ break;
+ default:
+ break;
+ }
+ return NOTIFY_OK;
+}
+
+static void virtscsi_init_vq(struct virtio_scsi_vq *virtscsi_vq,
+ struct virtqueue *vq)
+{
+ spin_lock_init(&virtscsi_vq->vq_lock);
+ virtscsi_vq->vq = vq;
+}
+
+static void virtscsi_remove_vqs(struct virtio_device *vdev)
+{
+ struct Scsi_Host *sh = virtio_scsi_host(vdev);
+ struct virtio_scsi *vscsi = shost_priv(sh);
+
+ virtscsi_set_affinity(vscsi, false);
+
+ /* Stop all the virtqueues. */
+ vdev->config->reset(vdev);
+
+ vdev->config->del_vqs(vdev);
+}
+
+static int virtscsi_init(struct virtio_device *vdev,
+ struct virtio_scsi *vscsi)
+{
+ int err;
+ u32 i;
+ u32 num_vqs;
+ vq_callback_t **callbacks;
+ const char **names;
+ struct virtqueue **vqs;
+
+ num_vqs = vscsi->num_queues + VIRTIO_SCSI_VQ_BASE;
+ vqs = kmalloc(num_vqs * sizeof(struct virtqueue *), GFP_KERNEL);
+ callbacks = kmalloc(num_vqs * sizeof(vq_callback_t *), GFP_KERNEL);
+ names = kmalloc(num_vqs * sizeof(char *), GFP_KERNEL);
+
+ if (!callbacks || !vqs || !names) {
+ err = -ENOMEM;
+ goto out;
+ }
+
+ callbacks[0] = virtscsi_ctrl_done;
+ callbacks[1] = virtscsi_event_done;
+ names[0] = "control";
+ names[1] = "event";
+ for (i = VIRTIO_SCSI_VQ_BASE; i < num_vqs; i++) {
+ callbacks[i] = virtscsi_req_done;
+ names[i] = "request";
+ }
+
+ /* Discover virtqueues and write information to configuration. */
+ err = vdev->config->find_vqs(vdev, num_vqs, vqs, callbacks, names);
+ if (err)
+ goto out;
+
+ virtscsi_init_vq(&vscsi->ctrl_vq, vqs[0]);
+ virtscsi_init_vq(&vscsi->event_vq, vqs[1]);
+ for (i = VIRTIO_SCSI_VQ_BASE; i < num_vqs; i++)
+ virtscsi_init_vq(&vscsi->req_vqs[i - VIRTIO_SCSI_VQ_BASE],
+ vqs[i]);
+
+ virtscsi_set_affinity(vscsi, true);
+
+ virtscsi_config_set(vdev, cdb_size, VIRTIO_SCSI_CDB_SIZE);
+ virtscsi_config_set(vdev, sense_size, VIRTIO_SCSI_SENSE_SIZE);
+
+ err = 0;
+
+out:
+ kfree(names);
+ kfree(callbacks);
+ kfree(vqs);
+ if (err)
+ virtscsi_remove_vqs(vdev);
+ return err;
+}
+
+static int virtscsi_probe(struct virtio_device *vdev)
+{
+ struct Scsi_Host *shost;
+ struct virtio_scsi *vscsi;
+ int err, host_prot;
+ u32 sg_elems, num_targets;
+ u32 cmd_per_lun;
+ u32 num_queues;
+ struct scsi_host_template *hostt;
+
+ if (!vdev->config->get) {
+ dev_err(&vdev->dev, "%s failure: config access disabled\n",
+ __func__);
+ return -EINVAL;
+ }
+
+ /* We need to know how many queues before we allocate. */
+ num_queues = virtscsi_config_get(vdev, num_queues) ? : 1;
+
+ num_targets = virtscsi_config_get(vdev, max_target) + 1;
+
+ if (num_queues == 1)
+ hostt = &virtscsi_host_template_single;
+ else
+ hostt = &virtscsi_host_template_multi;
+
+ shost = scsi_host_alloc(hostt,
+ sizeof(*vscsi) + sizeof(vscsi->req_vqs[0]) * num_queues);
+ if (!shost)
+ return -ENOMEM;
+
+ sg_elems = virtscsi_config_get(vdev, seg_max) ?: 1;
+ shost->sg_tablesize = sg_elems;
+ vscsi = shost_priv(shost);
+ vscsi->vdev = vdev;
+ vscsi->num_queues = num_queues;
+ vdev->priv = shost;
+
+ err = virtscsi_init(vdev, vscsi);
+ if (err)
+ goto virtscsi_init_failed;
+
+ vscsi->nb.notifier_call = &virtscsi_cpu_callback;
+ err = register_hotcpu_notifier(&vscsi->nb);
+ if (err) {
+ pr_err("registering cpu notifier failed\n");
+ goto scsi_add_host_failed;
+ }
+
+ cmd_per_lun = virtscsi_config_get(vdev, cmd_per_lun) ?: 1;
+ shost->cmd_per_lun = min_t(u32, cmd_per_lun, shost->can_queue);
+ shost->max_sectors = virtscsi_config_get(vdev, max_sectors) ?: 0xFFFF;
+
+ /* LUNs > 256 are reported with format 1, so they go in the range
+ * 16640-32767.
+ */
+ shost->max_lun = virtscsi_config_get(vdev, max_lun) + 1 + 0x4000;
+ shost->max_id = num_targets;
+ shost->max_channel = 0;
+ shost->max_cmd_len = VIRTIO_SCSI_CDB_SIZE;
+ shost->nr_hw_queues = num_queues;
+
+ if (virtio_has_feature(vdev, VIRTIO_SCSI_F_T10_PI)) {
+ host_prot = SHOST_DIF_TYPE1_PROTECTION | SHOST_DIF_TYPE2_PROTECTION |
+ SHOST_DIF_TYPE3_PROTECTION | SHOST_DIX_TYPE1_PROTECTION |
+ SHOST_DIX_TYPE2_PROTECTION | SHOST_DIX_TYPE3_PROTECTION;
+
+ scsi_host_set_prot(shost, host_prot);
+ scsi_host_set_guard(shost, SHOST_DIX_GUARD_CRC);
+ }
+
+ err = scsi_add_host(shost, &vdev->dev);
+ if (err)
+ goto scsi_add_host_failed;
+
+ virtio_device_ready(vdev);
+
+ if (virtio_has_feature(vdev, VIRTIO_SCSI_F_HOTPLUG))
+ virtscsi_kick_event_all(vscsi);
+
+ scsi_scan_host(shost);
+ return 0;
+
+scsi_add_host_failed:
+ vdev->config->del_vqs(vdev);
+virtscsi_init_failed:
+ scsi_host_put(shost);
+ return err;
+}
+
+static void virtscsi_remove(struct virtio_device *vdev)
+{
+ struct Scsi_Host *shost = virtio_scsi_host(vdev);
+ struct virtio_scsi *vscsi = shost_priv(shost);
+
+ if (virtio_has_feature(vdev, VIRTIO_SCSI_F_HOTPLUG))
+ virtscsi_cancel_event_work(vscsi);
+
+ scsi_remove_host(shost);
+
+ unregister_hotcpu_notifier(&vscsi->nb);
+
+ virtscsi_remove_vqs(vdev);
+ scsi_host_put(shost);
+}
+
+#ifdef CONFIG_PM_SLEEP
+static int virtscsi_freeze(struct virtio_device *vdev)
+{
+ struct Scsi_Host *sh = virtio_scsi_host(vdev);
+ struct virtio_scsi *vscsi = shost_priv(sh);
+
+ unregister_hotcpu_notifier(&vscsi->nb);
+ virtscsi_remove_vqs(vdev);
+ return 0;
+}
+
+static int virtscsi_restore(struct virtio_device *vdev)
+{
+ struct Scsi_Host *sh = virtio_scsi_host(vdev);
+ struct virtio_scsi *vscsi = shost_priv(sh);
+ int err;
+
+ err = virtscsi_init(vdev, vscsi);
+ if (err)
+ return err;
+
+ err = register_hotcpu_notifier(&vscsi->nb);
+ if (err) {
+ vdev->config->del_vqs(vdev);
+ return err;
+ }
+
+ virtio_device_ready(vdev);
+
+ if (virtio_has_feature(vdev, VIRTIO_SCSI_F_HOTPLUG))
+ virtscsi_kick_event_all(vscsi);
+
+ return err;
+}
+#endif
+
+static struct virtio_device_id id_table[] = {
+ { VIRTIO_ID_SCSI, VIRTIO_DEV_ANY_ID },
+ { 0 },
+};
+
+static unsigned int features[] = {
+ VIRTIO_SCSI_F_HOTPLUG,
+ VIRTIO_SCSI_F_CHANGE,
+ VIRTIO_SCSI_F_T10_PI,
+};
+
+static struct virtio_driver virtio_scsi_driver = {
+ .feature_table = features,
+ .feature_table_size = ARRAY_SIZE(features),
+ .driver.name = KBUILD_MODNAME,
+ .driver.owner = THIS_MODULE,
+ .id_table = id_table,
+ .probe = virtscsi_probe,
+#ifdef CONFIG_PM_SLEEP
+ .freeze = virtscsi_freeze,
+ .restore = virtscsi_restore,
+#endif
+ .remove = virtscsi_remove,
+};
+
+static int __init init(void)
+{
+ int ret = -ENOMEM;
+
+ virtscsi_cmd_cache = KMEM_CACHE(virtio_scsi_cmd, 0);
+ if (!virtscsi_cmd_cache) {
+ pr_err("kmem_cache_create() for virtscsi_cmd_cache failed\n");
+ goto error;
+ }
+
+
+ virtscsi_cmd_pool =
+ mempool_create_slab_pool(VIRTIO_SCSI_MEMPOOL_SZ,
+ virtscsi_cmd_cache);
+ if (!virtscsi_cmd_pool) {
+ pr_err("mempool_create() for virtscsi_cmd_pool failed\n");
+ goto error;
+ }
+ ret = register_virtio_driver(&virtio_scsi_driver);
+ if (ret < 0)
+ goto error;
+
+ return 0;
+
+error:
+ if (virtscsi_cmd_pool) {
+ mempool_destroy(virtscsi_cmd_pool);
+ virtscsi_cmd_pool = NULL;
+ }
+ if (virtscsi_cmd_cache) {
+ kmem_cache_destroy(virtscsi_cmd_cache);
+ virtscsi_cmd_cache = NULL;
+ }
+ return ret;
+}
+
+static void __exit fini(void)
+{
+ unregister_virtio_driver(&virtio_scsi_driver);
+ mempool_destroy(virtscsi_cmd_pool);
+ kmem_cache_destroy(virtscsi_cmd_cache);
+}
+module_init(init);
+module_exit(fini);
+
+MODULE_DEVICE_TABLE(virtio, id_table);
+MODULE_DESCRIPTION("Virtio SCSI HBA driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/scsi/vmw_pvscsi.c b/drivers/scsi/vmw_pvscsi.c
new file mode 100644
index 000000000..0f133c181
--- /dev/null
+++ b/drivers/scsi/vmw_pvscsi.c
@@ -0,0 +1,1597 @@
+/*
+ * Linux driver for VMware's para-virtualized SCSI HBA.
+ *
+ * Copyright (C) 2008-2014, VMware, Inc. All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; version 2 of the License and no later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
+ * NON INFRINGEMENT. See the GNU General Public License for more
+ * details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Maintained by: Arvind Kumar <arvindkumar@vmware.com>
+ *
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/interrupt.h>
+#include <linux/slab.h>
+#include <linux/workqueue.h>
+#include <linux/pci.h>
+
+#include <scsi/scsi.h>
+#include <scsi/scsi_host.h>
+#include <scsi/scsi_cmnd.h>
+#include <scsi/scsi_device.h>
+#include <scsi/scsi_tcq.h>
+
+#include "vmw_pvscsi.h"
+
+#define PVSCSI_LINUX_DRIVER_DESC "VMware PVSCSI driver"
+
+MODULE_DESCRIPTION(PVSCSI_LINUX_DRIVER_DESC);
+MODULE_AUTHOR("VMware, Inc.");
+MODULE_LICENSE("GPL");
+MODULE_VERSION(PVSCSI_DRIVER_VERSION_STRING);
+
+#define PVSCSI_DEFAULT_NUM_PAGES_PER_RING 8
+#define PVSCSI_DEFAULT_NUM_PAGES_MSG_RING 1
+#define PVSCSI_DEFAULT_QUEUE_DEPTH 254
+#define SGL_SIZE PAGE_SIZE
+
+struct pvscsi_sg_list {
+ struct PVSCSISGElement sge[PVSCSI_MAX_NUM_SG_ENTRIES_PER_SEGMENT];
+};
+
+struct pvscsi_ctx {
+ /*
+ * The index of the context in cmd_map serves as the context ID for a
+ * 1-to-1 mapping completions back to requests.
+ */
+ struct scsi_cmnd *cmd;
+ struct pvscsi_sg_list *sgl;
+ struct list_head list;
+ dma_addr_t dataPA;
+ dma_addr_t sensePA;
+ dma_addr_t sglPA;
+ struct completion *abort_cmp;
+};
+
+struct pvscsi_adapter {
+ char *mmioBase;
+ unsigned int irq;
+ u8 rev;
+ bool use_msi;
+ bool use_msix;
+ bool use_msg;
+ bool use_req_threshold;
+
+ spinlock_t hw_lock;
+
+ struct workqueue_struct *workqueue;
+ struct work_struct work;
+
+ struct PVSCSIRingReqDesc *req_ring;
+ unsigned req_pages;
+ unsigned req_depth;
+ dma_addr_t reqRingPA;
+
+ struct PVSCSIRingCmpDesc *cmp_ring;
+ unsigned cmp_pages;
+ dma_addr_t cmpRingPA;
+
+ struct PVSCSIRingMsgDesc *msg_ring;
+ unsigned msg_pages;
+ dma_addr_t msgRingPA;
+
+ struct PVSCSIRingsState *rings_state;
+ dma_addr_t ringStatePA;
+
+ struct pci_dev *dev;
+ struct Scsi_Host *host;
+
+ struct list_head cmd_pool;
+ struct pvscsi_ctx *cmd_map;
+};
+
+
+/* Command line parameters */
+static int pvscsi_ring_pages;
+static int pvscsi_msg_ring_pages = PVSCSI_DEFAULT_NUM_PAGES_MSG_RING;
+static int pvscsi_cmd_per_lun = PVSCSI_DEFAULT_QUEUE_DEPTH;
+static bool pvscsi_disable_msi;
+static bool pvscsi_disable_msix;
+static bool pvscsi_use_msg = true;
+static bool pvscsi_use_req_threshold = true;
+
+#define PVSCSI_RW (S_IRUSR | S_IWUSR)
+
+module_param_named(ring_pages, pvscsi_ring_pages, int, PVSCSI_RW);
+MODULE_PARM_DESC(ring_pages, "Number of pages per req/cmp ring - (default="
+ __stringify(PVSCSI_DEFAULT_NUM_PAGES_PER_RING)
+ "[up to 16 targets],"
+ __stringify(PVSCSI_SETUP_RINGS_MAX_NUM_PAGES)
+ "[for 16+ targets])");
+
+module_param_named(msg_ring_pages, pvscsi_msg_ring_pages, int, PVSCSI_RW);
+MODULE_PARM_DESC(msg_ring_pages, "Number of pages for the msg ring - (default="
+ __stringify(PVSCSI_DEFAULT_NUM_PAGES_MSG_RING) ")");
+
+module_param_named(cmd_per_lun, pvscsi_cmd_per_lun, int, PVSCSI_RW);
+MODULE_PARM_DESC(cmd_per_lun, "Maximum commands per lun - (default="
+ __stringify(PVSCSI_DEFAULT_QUEUE_DEPTH) ")");
+
+module_param_named(disable_msi, pvscsi_disable_msi, bool, PVSCSI_RW);
+MODULE_PARM_DESC(disable_msi, "Disable MSI use in driver - (default=0)");
+
+module_param_named(disable_msix, pvscsi_disable_msix, bool, PVSCSI_RW);
+MODULE_PARM_DESC(disable_msix, "Disable MSI-X use in driver - (default=0)");
+
+module_param_named(use_msg, pvscsi_use_msg, bool, PVSCSI_RW);
+MODULE_PARM_DESC(use_msg, "Use msg ring when available - (default=1)");
+
+module_param_named(use_req_threshold, pvscsi_use_req_threshold,
+ bool, PVSCSI_RW);
+MODULE_PARM_DESC(use_req_threshold, "Use driver-based request coalescing if configured - (default=1)");
+
+static const struct pci_device_id pvscsi_pci_tbl[] = {
+ { PCI_VDEVICE(VMWARE, PCI_DEVICE_ID_VMWARE_PVSCSI) },
+ { 0 }
+};
+
+MODULE_DEVICE_TABLE(pci, pvscsi_pci_tbl);
+
+static struct device *
+pvscsi_dev(const struct pvscsi_adapter *adapter)
+{
+ return &(adapter->dev->dev);
+}
+
+static struct pvscsi_ctx *
+pvscsi_find_context(const struct pvscsi_adapter *adapter, struct scsi_cmnd *cmd)
+{
+ struct pvscsi_ctx *ctx, *end;
+
+ end = &adapter->cmd_map[adapter->req_depth];
+ for (ctx = adapter->cmd_map; ctx < end; ctx++)
+ if (ctx->cmd == cmd)
+ return ctx;
+
+ return NULL;
+}
+
+static struct pvscsi_ctx *
+pvscsi_acquire_context(struct pvscsi_adapter *adapter, struct scsi_cmnd *cmd)
+{
+ struct pvscsi_ctx *ctx;
+
+ if (list_empty(&adapter->cmd_pool))
+ return NULL;
+
+ ctx = list_first_entry(&adapter->cmd_pool, struct pvscsi_ctx, list);
+ ctx->cmd = cmd;
+ list_del(&ctx->list);
+
+ return ctx;
+}
+
+static void pvscsi_release_context(struct pvscsi_adapter *adapter,
+ struct pvscsi_ctx *ctx)
+{
+ ctx->cmd = NULL;
+ ctx->abort_cmp = NULL;
+ list_add(&ctx->list, &adapter->cmd_pool);
+}
+
+/*
+ * Map a pvscsi_ctx struct to a context ID field value; we map to a simple
+ * non-zero integer. ctx always points to an entry in cmd_map array, hence
+ * the return value is always >=1.
+ */
+static u64 pvscsi_map_context(const struct pvscsi_adapter *adapter,
+ const struct pvscsi_ctx *ctx)
+{
+ return ctx - adapter->cmd_map + 1;
+}
+
+static struct pvscsi_ctx *
+pvscsi_get_context(const struct pvscsi_adapter *adapter, u64 context)
+{
+ return &adapter->cmd_map[context - 1];
+}
+
+static void pvscsi_reg_write(const struct pvscsi_adapter *adapter,
+ u32 offset, u32 val)
+{
+ writel(val, adapter->mmioBase + offset);
+}
+
+static u32 pvscsi_reg_read(const struct pvscsi_adapter *adapter, u32 offset)
+{
+ return readl(adapter->mmioBase + offset);
+}
+
+static u32 pvscsi_read_intr_status(const struct pvscsi_adapter *adapter)
+{
+ return pvscsi_reg_read(adapter, PVSCSI_REG_OFFSET_INTR_STATUS);
+}
+
+static void pvscsi_write_intr_status(const struct pvscsi_adapter *adapter,
+ u32 val)
+{
+ pvscsi_reg_write(adapter, PVSCSI_REG_OFFSET_INTR_STATUS, val);
+}
+
+static void pvscsi_unmask_intr(const struct pvscsi_adapter *adapter)
+{
+ u32 intr_bits;
+
+ intr_bits = PVSCSI_INTR_CMPL_MASK;
+ if (adapter->use_msg)
+ intr_bits |= PVSCSI_INTR_MSG_MASK;
+
+ pvscsi_reg_write(adapter, PVSCSI_REG_OFFSET_INTR_MASK, intr_bits);
+}
+
+static void pvscsi_mask_intr(const struct pvscsi_adapter *adapter)
+{
+ pvscsi_reg_write(adapter, PVSCSI_REG_OFFSET_INTR_MASK, 0);
+}
+
+static void pvscsi_write_cmd_desc(const struct pvscsi_adapter *adapter,
+ u32 cmd, const void *desc, size_t len)
+{
+ const u32 *ptr = desc;
+ size_t i;
+
+ len /= sizeof(*ptr);
+ pvscsi_reg_write(adapter, PVSCSI_REG_OFFSET_COMMAND, cmd);
+ for (i = 0; i < len; i++)
+ pvscsi_reg_write(adapter,
+ PVSCSI_REG_OFFSET_COMMAND_DATA, ptr[i]);
+}
+
+static void pvscsi_abort_cmd(const struct pvscsi_adapter *adapter,
+ const struct pvscsi_ctx *ctx)
+{
+ struct PVSCSICmdDescAbortCmd cmd = { 0 };
+
+ cmd.target = ctx->cmd->device->id;
+ cmd.context = pvscsi_map_context(adapter, ctx);
+
+ pvscsi_write_cmd_desc(adapter, PVSCSI_CMD_ABORT_CMD, &cmd, sizeof(cmd));
+}
+
+static void pvscsi_kick_rw_io(const struct pvscsi_adapter *adapter)
+{
+ pvscsi_reg_write(adapter, PVSCSI_REG_OFFSET_KICK_RW_IO, 0);
+}
+
+static void pvscsi_process_request_ring(const struct pvscsi_adapter *adapter)
+{
+ pvscsi_reg_write(adapter, PVSCSI_REG_OFFSET_KICK_NON_RW_IO, 0);
+}
+
+static int scsi_is_rw(unsigned char op)
+{
+ return op == READ_6 || op == WRITE_6 ||
+ op == READ_10 || op == WRITE_10 ||
+ op == READ_12 || op == WRITE_12 ||
+ op == READ_16 || op == WRITE_16;
+}
+
+static void pvscsi_kick_io(const struct pvscsi_adapter *adapter,
+ unsigned char op)
+{
+ if (scsi_is_rw(op)) {
+ struct PVSCSIRingsState *s = adapter->rings_state;
+
+ if (!adapter->use_req_threshold ||
+ s->reqProdIdx - s->reqConsIdx >= s->reqCallThreshold)
+ pvscsi_kick_rw_io(adapter);
+ } else {
+ pvscsi_process_request_ring(adapter);
+ }
+}
+
+static void ll_adapter_reset(const struct pvscsi_adapter *adapter)
+{
+ dev_dbg(pvscsi_dev(adapter), "Adapter Reset on %p\n", adapter);
+
+ pvscsi_write_cmd_desc(adapter, PVSCSI_CMD_ADAPTER_RESET, NULL, 0);
+}
+
+static void ll_bus_reset(const struct pvscsi_adapter *adapter)
+{
+ dev_dbg(pvscsi_dev(adapter), "Resetting bus on %p\n", adapter);
+
+ pvscsi_write_cmd_desc(adapter, PVSCSI_CMD_RESET_BUS, NULL, 0);
+}
+
+static void ll_device_reset(const struct pvscsi_adapter *adapter, u32 target)
+{
+ struct PVSCSICmdDescResetDevice cmd = { 0 };
+
+ dev_dbg(pvscsi_dev(adapter), "Resetting device: target=%u\n", target);
+
+ cmd.target = target;
+
+ pvscsi_write_cmd_desc(adapter, PVSCSI_CMD_RESET_DEVICE,
+ &cmd, sizeof(cmd));
+}
+
+static void pvscsi_create_sg(struct pvscsi_ctx *ctx,
+ struct scatterlist *sg, unsigned count)
+{
+ unsigned i;
+ struct PVSCSISGElement *sge;
+
+ BUG_ON(count > PVSCSI_MAX_NUM_SG_ENTRIES_PER_SEGMENT);
+
+ sge = &ctx->sgl->sge[0];
+ for (i = 0; i < count; i++, sg++) {
+ sge[i].addr = sg_dma_address(sg);
+ sge[i].length = sg_dma_len(sg);
+ sge[i].flags = 0;
+ }
+}
+
+/*
+ * Map all data buffers for a command into PCI space and
+ * setup the scatter/gather list if needed.
+ */
+static void pvscsi_map_buffers(struct pvscsi_adapter *adapter,
+ struct pvscsi_ctx *ctx, struct scsi_cmnd *cmd,
+ struct PVSCSIRingReqDesc *e)
+{
+ unsigned count;
+ unsigned bufflen = scsi_bufflen(cmd);
+ struct scatterlist *sg;
+
+ e->dataLen = bufflen;
+ e->dataAddr = 0;
+ if (bufflen == 0)
+ return;
+
+ sg = scsi_sglist(cmd);
+ count = scsi_sg_count(cmd);
+ if (count != 0) {
+ int segs = scsi_dma_map(cmd);
+ if (segs > 1) {
+ pvscsi_create_sg(ctx, sg, segs);
+
+ e->flags |= PVSCSI_FLAG_CMD_WITH_SG_LIST;
+ ctx->sglPA = pci_map_single(adapter->dev, ctx->sgl,
+ SGL_SIZE, PCI_DMA_TODEVICE);
+ e->dataAddr = ctx->sglPA;
+ } else
+ e->dataAddr = sg_dma_address(sg);
+ } else {
+ /*
+ * In case there is no S/G list, scsi_sglist points
+ * directly to the buffer.
+ */
+ ctx->dataPA = pci_map_single(adapter->dev, sg, bufflen,
+ cmd->sc_data_direction);
+ e->dataAddr = ctx->dataPA;
+ }
+}
+
+static void pvscsi_unmap_buffers(const struct pvscsi_adapter *adapter,
+ struct pvscsi_ctx *ctx)
+{
+ struct scsi_cmnd *cmd;
+ unsigned bufflen;
+
+ cmd = ctx->cmd;
+ bufflen = scsi_bufflen(cmd);
+
+ if (bufflen != 0) {
+ unsigned count = scsi_sg_count(cmd);
+
+ if (count != 0) {
+ scsi_dma_unmap(cmd);
+ if (ctx->sglPA) {
+ pci_unmap_single(adapter->dev, ctx->sglPA,
+ SGL_SIZE, PCI_DMA_TODEVICE);
+ ctx->sglPA = 0;
+ }
+ } else
+ pci_unmap_single(adapter->dev, ctx->dataPA, bufflen,
+ cmd->sc_data_direction);
+ }
+ if (cmd->sense_buffer)
+ pci_unmap_single(adapter->dev, ctx->sensePA,
+ SCSI_SENSE_BUFFERSIZE, PCI_DMA_FROMDEVICE);
+}
+
+static int pvscsi_allocate_rings(struct pvscsi_adapter *adapter)
+{
+ adapter->rings_state = pci_alloc_consistent(adapter->dev, PAGE_SIZE,
+ &adapter->ringStatePA);
+ if (!adapter->rings_state)
+ return -ENOMEM;
+
+ adapter->req_pages = min(PVSCSI_MAX_NUM_PAGES_REQ_RING,
+ pvscsi_ring_pages);
+ adapter->req_depth = adapter->req_pages
+ * PVSCSI_MAX_NUM_REQ_ENTRIES_PER_PAGE;
+ adapter->req_ring = pci_alloc_consistent(adapter->dev,
+ adapter->req_pages * PAGE_SIZE,
+ &adapter->reqRingPA);
+ if (!adapter->req_ring)
+ return -ENOMEM;
+
+ adapter->cmp_pages = min(PVSCSI_MAX_NUM_PAGES_CMP_RING,
+ pvscsi_ring_pages);
+ adapter->cmp_ring = pci_alloc_consistent(adapter->dev,
+ adapter->cmp_pages * PAGE_SIZE,
+ &adapter->cmpRingPA);
+ if (!adapter->cmp_ring)
+ return -ENOMEM;
+
+ BUG_ON(!IS_ALIGNED(adapter->ringStatePA, PAGE_SIZE));
+ BUG_ON(!IS_ALIGNED(adapter->reqRingPA, PAGE_SIZE));
+ BUG_ON(!IS_ALIGNED(adapter->cmpRingPA, PAGE_SIZE));
+
+ if (!adapter->use_msg)
+ return 0;
+
+ adapter->msg_pages = min(PVSCSI_MAX_NUM_PAGES_MSG_RING,
+ pvscsi_msg_ring_pages);
+ adapter->msg_ring = pci_alloc_consistent(adapter->dev,
+ adapter->msg_pages * PAGE_SIZE,
+ &adapter->msgRingPA);
+ if (!adapter->msg_ring)
+ return -ENOMEM;
+ BUG_ON(!IS_ALIGNED(adapter->msgRingPA, PAGE_SIZE));
+
+ return 0;
+}
+
+static void pvscsi_setup_all_rings(const struct pvscsi_adapter *adapter)
+{
+ struct PVSCSICmdDescSetupRings cmd = { 0 };
+ dma_addr_t base;
+ unsigned i;
+
+ cmd.ringsStatePPN = adapter->ringStatePA >> PAGE_SHIFT;
+ cmd.reqRingNumPages = adapter->req_pages;
+ cmd.cmpRingNumPages = adapter->cmp_pages;
+
+ base = adapter->reqRingPA;
+ for (i = 0; i < adapter->req_pages; i++) {
+ cmd.reqRingPPNs[i] = base >> PAGE_SHIFT;
+ base += PAGE_SIZE;
+ }
+
+ base = adapter->cmpRingPA;
+ for (i = 0; i < adapter->cmp_pages; i++) {
+ cmd.cmpRingPPNs[i] = base >> PAGE_SHIFT;
+ base += PAGE_SIZE;
+ }
+
+ memset(adapter->rings_state, 0, PAGE_SIZE);
+ memset(adapter->req_ring, 0, adapter->req_pages * PAGE_SIZE);
+ memset(adapter->cmp_ring, 0, adapter->cmp_pages * PAGE_SIZE);
+
+ pvscsi_write_cmd_desc(adapter, PVSCSI_CMD_SETUP_RINGS,
+ &cmd, sizeof(cmd));
+
+ if (adapter->use_msg) {
+ struct PVSCSICmdDescSetupMsgRing cmd_msg = { 0 };
+
+ cmd_msg.numPages = adapter->msg_pages;
+
+ base = adapter->msgRingPA;
+ for (i = 0; i < adapter->msg_pages; i++) {
+ cmd_msg.ringPPNs[i] = base >> PAGE_SHIFT;
+ base += PAGE_SIZE;
+ }
+ memset(adapter->msg_ring, 0, adapter->msg_pages * PAGE_SIZE);
+
+ pvscsi_write_cmd_desc(adapter, PVSCSI_CMD_SETUP_MSG_RING,
+ &cmd_msg, sizeof(cmd_msg));
+ }
+}
+
+static int pvscsi_change_queue_depth(struct scsi_device *sdev, int qdepth)
+{
+ if (!sdev->tagged_supported)
+ qdepth = 1;
+ return scsi_change_queue_depth(sdev, qdepth);
+}
+
+/*
+ * Pull a completion descriptor off and pass the completion back
+ * to the SCSI mid layer.
+ */
+static void pvscsi_complete_request(struct pvscsi_adapter *adapter,
+ const struct PVSCSIRingCmpDesc *e)
+{
+ struct pvscsi_ctx *ctx;
+ struct scsi_cmnd *cmd;
+ struct completion *abort_cmp;
+ u32 btstat = e->hostStatus;
+ u32 sdstat = e->scsiStatus;
+
+ ctx = pvscsi_get_context(adapter, e->context);
+ cmd = ctx->cmd;
+ abort_cmp = ctx->abort_cmp;
+ pvscsi_unmap_buffers(adapter, ctx);
+ pvscsi_release_context(adapter, ctx);
+ if (abort_cmp) {
+ /*
+ * The command was requested to be aborted. Just signal that
+ * the request completed and swallow the actual cmd completion
+ * here. The abort handler will post a completion for this
+ * command indicating that it got successfully aborted.
+ */
+ complete(abort_cmp);
+ return;
+ }
+
+ cmd->result = 0;
+ if (sdstat != SAM_STAT_GOOD &&
+ (btstat == BTSTAT_SUCCESS ||
+ btstat == BTSTAT_LINKED_COMMAND_COMPLETED ||
+ btstat == BTSTAT_LINKED_COMMAND_COMPLETED_WITH_FLAG)) {
+ cmd->result = (DID_OK << 16) | sdstat;
+ if (sdstat == SAM_STAT_CHECK_CONDITION && cmd->sense_buffer)
+ cmd->result |= (DRIVER_SENSE << 24);
+ } else
+ switch (btstat) {
+ case BTSTAT_SUCCESS:
+ case BTSTAT_LINKED_COMMAND_COMPLETED:
+ case BTSTAT_LINKED_COMMAND_COMPLETED_WITH_FLAG:
+ /* If everything went fine, let's move on.. */
+ cmd->result = (DID_OK << 16);
+ break;
+
+ case BTSTAT_DATARUN:
+ case BTSTAT_DATA_UNDERRUN:
+ /* Report residual data in underruns */
+ scsi_set_resid(cmd, scsi_bufflen(cmd) - e->dataLen);
+ cmd->result = (DID_ERROR << 16);
+ break;
+
+ case BTSTAT_SELTIMEO:
+ /* Our emulation returns this for non-connected devs */
+ cmd->result = (DID_BAD_TARGET << 16);
+ break;
+
+ case BTSTAT_LUNMISMATCH:
+ case BTSTAT_TAGREJECT:
+ case BTSTAT_BADMSG:
+ cmd->result = (DRIVER_INVALID << 24);
+ /* fall through */
+
+ case BTSTAT_HAHARDWARE:
+ case BTSTAT_INVPHASE:
+ case BTSTAT_HATIMEOUT:
+ case BTSTAT_NORESPONSE:
+ case BTSTAT_DISCONNECT:
+ case BTSTAT_HASOFTWARE:
+ case BTSTAT_BUSFREE:
+ case BTSTAT_SENSFAILED:
+ cmd->result |= (DID_ERROR << 16);
+ break;
+
+ case BTSTAT_SENTRST:
+ case BTSTAT_RECVRST:
+ case BTSTAT_BUSRESET:
+ cmd->result = (DID_RESET << 16);
+ break;
+
+ case BTSTAT_ABORTQUEUE:
+ cmd->result = (DID_ABORT << 16);
+ break;
+
+ case BTSTAT_SCSIPARITY:
+ cmd->result = (DID_PARITY << 16);
+ break;
+
+ default:
+ cmd->result = (DID_ERROR << 16);
+ scmd_printk(KERN_DEBUG, cmd,
+ "Unknown completion status: 0x%x\n",
+ btstat);
+ }
+
+ dev_dbg(&cmd->device->sdev_gendev,
+ "cmd=%p %x ctx=%p result=0x%x status=0x%x,%x\n",
+ cmd, cmd->cmnd[0], ctx, cmd->result, btstat, sdstat);
+
+ cmd->scsi_done(cmd);
+}
+
+/*
+ * barrier usage : Since the PVSCSI device is emulated, there could be cases
+ * where we may want to serialize some accesses between the driver and the
+ * emulation layer. We use compiler barriers instead of the more expensive
+ * memory barriers because PVSCSI is only supported on X86 which has strong
+ * memory access ordering.
+ */
+static void pvscsi_process_completion_ring(struct pvscsi_adapter *adapter)
+{
+ struct PVSCSIRingsState *s = adapter->rings_state;
+ struct PVSCSIRingCmpDesc *ring = adapter->cmp_ring;
+ u32 cmp_entries = s->cmpNumEntriesLog2;
+
+ while (s->cmpConsIdx != s->cmpProdIdx) {
+ struct PVSCSIRingCmpDesc *e = ring + (s->cmpConsIdx &
+ MASK(cmp_entries));
+ /*
+ * This barrier() ensures that *e is not dereferenced while
+ * the device emulation still writes data into the slot.
+ * Since the device emulation advances s->cmpProdIdx only after
+ * updating the slot we want to check it first.
+ */
+ barrier();
+ pvscsi_complete_request(adapter, e);
+ /*
+ * This barrier() ensures that compiler doesn't reorder write
+ * to s->cmpConsIdx before the read of (*e) inside
+ * pvscsi_complete_request. Otherwise, device emulation may
+ * overwrite *e before we had a chance to read it.
+ */
+ barrier();
+ s->cmpConsIdx++;
+ }
+}
+
+/*
+ * Translate a Linux SCSI request into a request ring entry.
+ */
+static int pvscsi_queue_ring(struct pvscsi_adapter *adapter,
+ struct pvscsi_ctx *ctx, struct scsi_cmnd *cmd)
+{
+ struct PVSCSIRingsState *s;
+ struct PVSCSIRingReqDesc *e;
+ struct scsi_device *sdev;
+ u32 req_entries;
+
+ s = adapter->rings_state;
+ sdev = cmd->device;
+ req_entries = s->reqNumEntriesLog2;
+
+ /*
+ * If this condition holds, we might have room on the request ring, but
+ * we might not have room on the completion ring for the response.
+ * However, we have already ruled out this possibility - we would not
+ * have successfully allocated a context if it were true, since we only
+ * have one context per request entry. Check for it anyway, since it
+ * would be a serious bug.
+ */
+ if (s->reqProdIdx - s->cmpConsIdx >= 1 << req_entries) {
+ scmd_printk(KERN_ERR, cmd, "vmw_pvscsi: "
+ "ring full: reqProdIdx=%d cmpConsIdx=%d\n",
+ s->reqProdIdx, s->cmpConsIdx);
+ return -1;
+ }
+
+ e = adapter->req_ring + (s->reqProdIdx & MASK(req_entries));
+
+ e->bus = sdev->channel;
+ e->target = sdev->id;
+ memset(e->lun, 0, sizeof(e->lun));
+ e->lun[1] = sdev->lun;
+
+ if (cmd->sense_buffer) {
+ ctx->sensePA = pci_map_single(adapter->dev, cmd->sense_buffer,
+ SCSI_SENSE_BUFFERSIZE,
+ PCI_DMA_FROMDEVICE);
+ e->senseAddr = ctx->sensePA;
+ e->senseLen = SCSI_SENSE_BUFFERSIZE;
+ } else {
+ e->senseLen = 0;
+ e->senseAddr = 0;
+ }
+ e->cdbLen = cmd->cmd_len;
+ e->vcpuHint = smp_processor_id();
+ memcpy(e->cdb, cmd->cmnd, e->cdbLen);
+
+ e->tag = SIMPLE_QUEUE_TAG;
+
+ if (cmd->sc_data_direction == DMA_FROM_DEVICE)
+ e->flags = PVSCSI_FLAG_CMD_DIR_TOHOST;
+ else if (cmd->sc_data_direction == DMA_TO_DEVICE)
+ e->flags = PVSCSI_FLAG_CMD_DIR_TODEVICE;
+ else if (cmd->sc_data_direction == DMA_NONE)
+ e->flags = PVSCSI_FLAG_CMD_DIR_NONE;
+ else
+ e->flags = 0;
+
+ pvscsi_map_buffers(adapter, ctx, cmd, e);
+
+ e->context = pvscsi_map_context(adapter, ctx);
+
+ barrier();
+
+ s->reqProdIdx++;
+
+ return 0;
+}
+
+static int pvscsi_queue_lck(struct scsi_cmnd *cmd, void (*done)(struct scsi_cmnd *))
+{
+ struct Scsi_Host *host = cmd->device->host;
+ struct pvscsi_adapter *adapter = shost_priv(host);
+ struct pvscsi_ctx *ctx;
+ unsigned long flags;
+
+ spin_lock_irqsave(&adapter->hw_lock, flags);
+
+ ctx = pvscsi_acquire_context(adapter, cmd);
+ if (!ctx || pvscsi_queue_ring(adapter, ctx, cmd) != 0) {
+ if (ctx)
+ pvscsi_release_context(adapter, ctx);
+ spin_unlock_irqrestore(&adapter->hw_lock, flags);
+ return SCSI_MLQUEUE_HOST_BUSY;
+ }
+
+ cmd->scsi_done = done;
+
+ dev_dbg(&cmd->device->sdev_gendev,
+ "queued cmd %p, ctx %p, op=%x\n", cmd, ctx, cmd->cmnd[0]);
+
+ spin_unlock_irqrestore(&adapter->hw_lock, flags);
+
+ pvscsi_kick_io(adapter, cmd->cmnd[0]);
+
+ return 0;
+}
+
+static DEF_SCSI_QCMD(pvscsi_queue)
+
+static int pvscsi_abort(struct scsi_cmnd *cmd)
+{
+ struct pvscsi_adapter *adapter = shost_priv(cmd->device->host);
+ struct pvscsi_ctx *ctx;
+ unsigned long flags;
+ int result = SUCCESS;
+ DECLARE_COMPLETION_ONSTACK(abort_cmp);
+
+ scmd_printk(KERN_DEBUG, cmd, "task abort on host %u, %p\n",
+ adapter->host->host_no, cmd);
+
+ spin_lock_irqsave(&adapter->hw_lock, flags);
+
+ /*
+ * Poll the completion ring first - we might be trying to abort
+ * a command that is waiting to be dispatched in the completion ring.
+ */
+ pvscsi_process_completion_ring(adapter);
+
+ /*
+ * If there is no context for the command, it either already succeeded
+ * or else was never properly issued. Not our problem.
+ */
+ ctx = pvscsi_find_context(adapter, cmd);
+ if (!ctx) {
+ scmd_printk(KERN_DEBUG, cmd, "Failed to abort cmd %p\n", cmd);
+ goto out;
+ }
+
+ /*
+ * Mark that the command has been requested to be aborted and issue
+ * the abort.
+ */
+ ctx->abort_cmp = &abort_cmp;
+
+ pvscsi_abort_cmd(adapter, ctx);
+ spin_unlock_irqrestore(&adapter->hw_lock, flags);
+ /* Wait for 2 secs for the completion. */
+ wait_for_completion_timeout(&abort_cmp, msecs_to_jiffies(2000));
+ spin_lock_irqsave(&adapter->hw_lock, flags);
+
+ if (!completion_done(&abort_cmp)) {
+ /*
+ * Failed to abort the command, unmark the fact that it
+ * was requested to be aborted.
+ */
+ ctx->abort_cmp = NULL;
+ result = FAILED;
+ scmd_printk(KERN_DEBUG, cmd,
+ "Failed to get completion for aborted cmd %p\n",
+ cmd);
+ goto out;
+ }
+
+ /*
+ * Successfully aborted the command.
+ */
+ cmd->result = (DID_ABORT << 16);
+ cmd->scsi_done(cmd);
+
+out:
+ spin_unlock_irqrestore(&adapter->hw_lock, flags);
+ return result;
+}
+
+/*
+ * Abort all outstanding requests. This is only safe to use if the completion
+ * ring will never be walked again or the device has been reset, because it
+ * destroys the 1-1 mapping between context field passed to emulation and our
+ * request structure.
+ */
+static void pvscsi_reset_all(struct pvscsi_adapter *adapter)
+{
+ unsigned i;
+
+ for (i = 0; i < adapter->req_depth; i++) {
+ struct pvscsi_ctx *ctx = &adapter->cmd_map[i];
+ struct scsi_cmnd *cmd = ctx->cmd;
+ if (cmd) {
+ scmd_printk(KERN_ERR, cmd,
+ "Forced reset on cmd %p\n", cmd);
+ pvscsi_unmap_buffers(adapter, ctx);
+ pvscsi_release_context(adapter, ctx);
+ cmd->result = (DID_RESET << 16);
+ cmd->scsi_done(cmd);
+ }
+ }
+}
+
+static int pvscsi_host_reset(struct scsi_cmnd *cmd)
+{
+ struct Scsi_Host *host = cmd->device->host;
+ struct pvscsi_adapter *adapter = shost_priv(host);
+ unsigned long flags;
+ bool use_msg;
+
+ scmd_printk(KERN_INFO, cmd, "SCSI Host reset\n");
+
+ spin_lock_irqsave(&adapter->hw_lock, flags);
+
+ use_msg = adapter->use_msg;
+
+ if (use_msg) {
+ adapter->use_msg = 0;
+ spin_unlock_irqrestore(&adapter->hw_lock, flags);
+
+ /*
+ * Now that we know that the ISR won't add more work on the
+ * workqueue we can safely flush any outstanding work.
+ */
+ flush_workqueue(adapter->workqueue);
+ spin_lock_irqsave(&adapter->hw_lock, flags);
+ }
+
+ /*
+ * We're going to tear down the entire ring structure and set it back
+ * up, so stalling new requests until all completions are flushed and
+ * the rings are back in place.
+ */
+
+ pvscsi_process_request_ring(adapter);
+
+ ll_adapter_reset(adapter);
+
+ /*
+ * Now process any completions. Note we do this AFTER adapter reset,
+ * which is strange, but stops races where completions get posted
+ * between processing the ring and issuing the reset. The backend will
+ * not touch the ring memory after reset, so the immediately pre-reset
+ * completion ring state is still valid.
+ */
+ pvscsi_process_completion_ring(adapter);
+
+ pvscsi_reset_all(adapter);
+ adapter->use_msg = use_msg;
+ pvscsi_setup_all_rings(adapter);
+ pvscsi_unmask_intr(adapter);
+
+ spin_unlock_irqrestore(&adapter->hw_lock, flags);
+
+ return SUCCESS;
+}
+
+static int pvscsi_bus_reset(struct scsi_cmnd *cmd)
+{
+ struct Scsi_Host *host = cmd->device->host;
+ struct pvscsi_adapter *adapter = shost_priv(host);
+ unsigned long flags;
+
+ scmd_printk(KERN_INFO, cmd, "SCSI Bus reset\n");
+
+ /*
+ * We don't want to queue new requests for this bus after
+ * flushing all pending requests to emulation, since new
+ * requests could then sneak in during this bus reset phase,
+ * so take the lock now.
+ */
+ spin_lock_irqsave(&adapter->hw_lock, flags);
+
+ pvscsi_process_request_ring(adapter);
+ ll_bus_reset(adapter);
+ pvscsi_process_completion_ring(adapter);
+
+ spin_unlock_irqrestore(&adapter->hw_lock, flags);
+
+ return SUCCESS;
+}
+
+static int pvscsi_device_reset(struct scsi_cmnd *cmd)
+{
+ struct Scsi_Host *host = cmd->device->host;
+ struct pvscsi_adapter *adapter = shost_priv(host);
+ unsigned long flags;
+
+ scmd_printk(KERN_INFO, cmd, "SCSI device reset on scsi%u:%u\n",
+ host->host_no, cmd->device->id);
+
+ /*
+ * We don't want to queue new requests for this device after flushing
+ * all pending requests to emulation, since new requests could then
+ * sneak in during this device reset phase, so take the lock now.
+ */
+ spin_lock_irqsave(&adapter->hw_lock, flags);
+
+ pvscsi_process_request_ring(adapter);
+ ll_device_reset(adapter, cmd->device->id);
+ pvscsi_process_completion_ring(adapter);
+
+ spin_unlock_irqrestore(&adapter->hw_lock, flags);
+
+ return SUCCESS;
+}
+
+static struct scsi_host_template pvscsi_template;
+
+static const char *pvscsi_info(struct Scsi_Host *host)
+{
+ struct pvscsi_adapter *adapter = shost_priv(host);
+ static char buf[256];
+
+ sprintf(buf, "VMware PVSCSI storage adapter rev %d, req/cmp/msg rings: "
+ "%u/%u/%u pages, cmd_per_lun=%u", adapter->rev,
+ adapter->req_pages, adapter->cmp_pages, adapter->msg_pages,
+ pvscsi_template.cmd_per_lun);
+
+ return buf;
+}
+
+static struct scsi_host_template pvscsi_template = {
+ .module = THIS_MODULE,
+ .name = "VMware PVSCSI Host Adapter",
+ .proc_name = "vmw_pvscsi",
+ .info = pvscsi_info,
+ .queuecommand = pvscsi_queue,
+ .this_id = -1,
+ .sg_tablesize = PVSCSI_MAX_NUM_SG_ENTRIES_PER_SEGMENT,
+ .dma_boundary = UINT_MAX,
+ .max_sectors = 0xffff,
+ .use_clustering = ENABLE_CLUSTERING,
+ .change_queue_depth = pvscsi_change_queue_depth,
+ .eh_abort_handler = pvscsi_abort,
+ .eh_device_reset_handler = pvscsi_device_reset,
+ .eh_bus_reset_handler = pvscsi_bus_reset,
+ .eh_host_reset_handler = pvscsi_host_reset,
+};
+
+static void pvscsi_process_msg(const struct pvscsi_adapter *adapter,
+ const struct PVSCSIRingMsgDesc *e)
+{
+ struct PVSCSIRingsState *s = adapter->rings_state;
+ struct Scsi_Host *host = adapter->host;
+ struct scsi_device *sdev;
+
+ printk(KERN_INFO "vmw_pvscsi: msg type: 0x%x - MSG RING: %u/%u (%u) \n",
+ e->type, s->msgProdIdx, s->msgConsIdx, s->msgNumEntriesLog2);
+
+ BUILD_BUG_ON(PVSCSI_MSG_LAST != 2);
+
+ if (e->type == PVSCSI_MSG_DEV_ADDED) {
+ struct PVSCSIMsgDescDevStatusChanged *desc;
+ desc = (struct PVSCSIMsgDescDevStatusChanged *)e;
+
+ printk(KERN_INFO
+ "vmw_pvscsi: msg: device added at scsi%u:%u:%u\n",
+ desc->bus, desc->target, desc->lun[1]);
+
+ if (!scsi_host_get(host))
+ return;
+
+ sdev = scsi_device_lookup(host, desc->bus, desc->target,
+ desc->lun[1]);
+ if (sdev) {
+ printk(KERN_INFO "vmw_pvscsi: device already exists\n");
+ scsi_device_put(sdev);
+ } else
+ scsi_add_device(adapter->host, desc->bus,
+ desc->target, desc->lun[1]);
+
+ scsi_host_put(host);
+ } else if (e->type == PVSCSI_MSG_DEV_REMOVED) {
+ struct PVSCSIMsgDescDevStatusChanged *desc;
+ desc = (struct PVSCSIMsgDescDevStatusChanged *)e;
+
+ printk(KERN_INFO
+ "vmw_pvscsi: msg: device removed at scsi%u:%u:%u\n",
+ desc->bus, desc->target, desc->lun[1]);
+
+ if (!scsi_host_get(host))
+ return;
+
+ sdev = scsi_device_lookup(host, desc->bus, desc->target,
+ desc->lun[1]);
+ if (sdev) {
+ scsi_remove_device(sdev);
+ scsi_device_put(sdev);
+ } else
+ printk(KERN_INFO
+ "vmw_pvscsi: failed to lookup scsi%u:%u:%u\n",
+ desc->bus, desc->target, desc->lun[1]);
+
+ scsi_host_put(host);
+ }
+}
+
+static int pvscsi_msg_pending(const struct pvscsi_adapter *adapter)
+{
+ struct PVSCSIRingsState *s = adapter->rings_state;
+
+ return s->msgProdIdx != s->msgConsIdx;
+}
+
+static void pvscsi_process_msg_ring(const struct pvscsi_adapter *adapter)
+{
+ struct PVSCSIRingsState *s = adapter->rings_state;
+ struct PVSCSIRingMsgDesc *ring = adapter->msg_ring;
+ u32 msg_entries = s->msgNumEntriesLog2;
+
+ while (pvscsi_msg_pending(adapter)) {
+ struct PVSCSIRingMsgDesc *e = ring + (s->msgConsIdx &
+ MASK(msg_entries));
+
+ barrier();
+ pvscsi_process_msg(adapter, e);
+ barrier();
+ s->msgConsIdx++;
+ }
+}
+
+static void pvscsi_msg_workqueue_handler(struct work_struct *data)
+{
+ struct pvscsi_adapter *adapter;
+
+ adapter = container_of(data, struct pvscsi_adapter, work);
+
+ pvscsi_process_msg_ring(adapter);
+}
+
+static int pvscsi_setup_msg_workqueue(struct pvscsi_adapter *adapter)
+{
+ char name[32];
+
+ if (!pvscsi_use_msg)
+ return 0;
+
+ pvscsi_reg_write(adapter, PVSCSI_REG_OFFSET_COMMAND,
+ PVSCSI_CMD_SETUP_MSG_RING);
+
+ if (pvscsi_reg_read(adapter, PVSCSI_REG_OFFSET_COMMAND_STATUS) == -1)
+ return 0;
+
+ snprintf(name, sizeof(name),
+ "vmw_pvscsi_wq_%u", adapter->host->host_no);
+
+ adapter->workqueue = create_singlethread_workqueue(name);
+ if (!adapter->workqueue) {
+ printk(KERN_ERR "vmw_pvscsi: failed to create work queue\n");
+ return 0;
+ }
+ INIT_WORK(&adapter->work, pvscsi_msg_workqueue_handler);
+
+ return 1;
+}
+
+static bool pvscsi_setup_req_threshold(struct pvscsi_adapter *adapter,
+ bool enable)
+{
+ u32 val;
+
+ if (!pvscsi_use_req_threshold)
+ return false;
+
+ pvscsi_reg_write(adapter, PVSCSI_REG_OFFSET_COMMAND,
+ PVSCSI_CMD_SETUP_REQCALLTHRESHOLD);
+ val = pvscsi_reg_read(adapter, PVSCSI_REG_OFFSET_COMMAND_STATUS);
+ if (val == -1) {
+ printk(KERN_INFO "vmw_pvscsi: device does not support req_threshold\n");
+ return false;
+ } else {
+ struct PVSCSICmdDescSetupReqCall cmd_msg = { 0 };
+ cmd_msg.enable = enable;
+ printk(KERN_INFO
+ "vmw_pvscsi: %sabling reqCallThreshold\n",
+ enable ? "en" : "dis");
+ pvscsi_write_cmd_desc(adapter,
+ PVSCSI_CMD_SETUP_REQCALLTHRESHOLD,
+ &cmd_msg, sizeof(cmd_msg));
+ return pvscsi_reg_read(adapter,
+ PVSCSI_REG_OFFSET_COMMAND_STATUS) != 0;
+ }
+}
+
+static irqreturn_t pvscsi_isr(int irq, void *devp)
+{
+ struct pvscsi_adapter *adapter = devp;
+ int handled;
+
+ if (adapter->use_msi || adapter->use_msix)
+ handled = true;
+ else {
+ u32 val = pvscsi_read_intr_status(adapter);
+ handled = (val & PVSCSI_INTR_ALL_SUPPORTED) != 0;
+ if (handled)
+ pvscsi_write_intr_status(devp, val);
+ }
+
+ if (handled) {
+ unsigned long flags;
+
+ spin_lock_irqsave(&adapter->hw_lock, flags);
+
+ pvscsi_process_completion_ring(adapter);
+ if (adapter->use_msg && pvscsi_msg_pending(adapter))
+ queue_work(adapter->workqueue, &adapter->work);
+
+ spin_unlock_irqrestore(&adapter->hw_lock, flags);
+ }
+
+ return IRQ_RETVAL(handled);
+}
+
+static void pvscsi_free_sgls(const struct pvscsi_adapter *adapter)
+{
+ struct pvscsi_ctx *ctx = adapter->cmd_map;
+ unsigned i;
+
+ for (i = 0; i < adapter->req_depth; ++i, ++ctx)
+ free_pages((unsigned long)ctx->sgl, get_order(SGL_SIZE));
+}
+
+static int pvscsi_setup_msix(const struct pvscsi_adapter *adapter,
+ unsigned int *irq)
+{
+ struct msix_entry entry = { 0, PVSCSI_VECTOR_COMPLETION };
+ int ret;
+
+ ret = pci_enable_msix_exact(adapter->dev, &entry, 1);
+ if (ret)
+ return ret;
+
+ *irq = entry.vector;
+
+ return 0;
+}
+
+static void pvscsi_shutdown_intr(struct pvscsi_adapter *adapter)
+{
+ if (adapter->irq) {
+ free_irq(adapter->irq, adapter);
+ adapter->irq = 0;
+ }
+ if (adapter->use_msi) {
+ pci_disable_msi(adapter->dev);
+ adapter->use_msi = 0;
+ } else if (adapter->use_msix) {
+ pci_disable_msix(adapter->dev);
+ adapter->use_msix = 0;
+ }
+}
+
+static void pvscsi_release_resources(struct pvscsi_adapter *adapter)
+{
+ pvscsi_shutdown_intr(adapter);
+
+ if (adapter->workqueue)
+ destroy_workqueue(adapter->workqueue);
+
+ if (adapter->mmioBase)
+ pci_iounmap(adapter->dev, adapter->mmioBase);
+
+ pci_release_regions(adapter->dev);
+
+ if (adapter->cmd_map) {
+ pvscsi_free_sgls(adapter);
+ kfree(adapter->cmd_map);
+ }
+
+ if (adapter->rings_state)
+ pci_free_consistent(adapter->dev, PAGE_SIZE,
+ adapter->rings_state, adapter->ringStatePA);
+
+ if (adapter->req_ring)
+ pci_free_consistent(adapter->dev,
+ adapter->req_pages * PAGE_SIZE,
+ adapter->req_ring, adapter->reqRingPA);
+
+ if (adapter->cmp_ring)
+ pci_free_consistent(adapter->dev,
+ adapter->cmp_pages * PAGE_SIZE,
+ adapter->cmp_ring, adapter->cmpRingPA);
+
+ if (adapter->msg_ring)
+ pci_free_consistent(adapter->dev,
+ adapter->msg_pages * PAGE_SIZE,
+ adapter->msg_ring, adapter->msgRingPA);
+}
+
+/*
+ * Allocate scatter gather lists.
+ *
+ * These are statically allocated. Trying to be clever was not worth it.
+ *
+ * Dynamic allocation can fail, and we can't go deep into the memory
+ * allocator, since we're a SCSI driver, and trying too hard to allocate
+ * memory might generate disk I/O. We also don't want to fail disk I/O
+ * in that case because we can't get an allocation - the I/O could be
+ * trying to swap out data to free memory. Since that is pathological,
+ * just use a statically allocated scatter list.
+ *
+ */
+static int pvscsi_allocate_sg(struct pvscsi_adapter *adapter)
+{
+ struct pvscsi_ctx *ctx;
+ int i;
+
+ ctx = adapter->cmd_map;
+ BUILD_BUG_ON(sizeof(struct pvscsi_sg_list) > SGL_SIZE);
+
+ for (i = 0; i < adapter->req_depth; ++i, ++ctx) {
+ ctx->sgl = (void *)__get_free_pages(GFP_KERNEL,
+ get_order(SGL_SIZE));
+ ctx->sglPA = 0;
+ BUG_ON(!IS_ALIGNED(((unsigned long)ctx->sgl), PAGE_SIZE));
+ if (!ctx->sgl) {
+ for (; i >= 0; --i, --ctx) {
+ free_pages((unsigned long)ctx->sgl,
+ get_order(SGL_SIZE));
+ ctx->sgl = NULL;
+ }
+ return -ENOMEM;
+ }
+ }
+
+ return 0;
+}
+
+/*
+ * Query the device, fetch the config info and return the
+ * maximum number of targets on the adapter. In case of
+ * failure due to any reason return default i.e. 16.
+ */
+static u32 pvscsi_get_max_targets(struct pvscsi_adapter *adapter)
+{
+ struct PVSCSICmdDescConfigCmd cmd;
+ struct PVSCSIConfigPageHeader *header;
+ struct device *dev;
+ dma_addr_t configPagePA;
+ void *config_page;
+ u32 numPhys = 16;
+
+ dev = pvscsi_dev(adapter);
+ config_page = pci_alloc_consistent(adapter->dev, PAGE_SIZE,
+ &configPagePA);
+ if (!config_page) {
+ dev_warn(dev, "vmw_pvscsi: failed to allocate memory for config page\n");
+ goto exit;
+ }
+ BUG_ON(configPagePA & ~PAGE_MASK);
+
+ /* Fetch config info from the device. */
+ cmd.configPageAddress = ((u64)PVSCSI_CONFIG_CONTROLLER_ADDRESS) << 32;
+ cmd.configPageNum = PVSCSI_CONFIG_PAGE_CONTROLLER;
+ cmd.cmpAddr = configPagePA;
+ cmd._pad = 0;
+
+ /*
+ * Mark the completion page header with error values. If the device
+ * completes the command successfully, it sets the status values to
+ * indicate success.
+ */
+ header = config_page;
+ memset(header, 0, sizeof *header);
+ header->hostStatus = BTSTAT_INVPARAM;
+ header->scsiStatus = SDSTAT_CHECK;
+
+ pvscsi_write_cmd_desc(adapter, PVSCSI_CMD_CONFIG, &cmd, sizeof cmd);
+
+ if (header->hostStatus == BTSTAT_SUCCESS &&
+ header->scsiStatus == SDSTAT_GOOD) {
+ struct PVSCSIConfigPageController *config;
+
+ config = config_page;
+ numPhys = config->numPhys;
+ } else
+ dev_warn(dev, "vmw_pvscsi: PVSCSI_CMD_CONFIG failed. hostStatus = 0x%x, scsiStatus = 0x%x\n",
+ header->hostStatus, header->scsiStatus);
+ pci_free_consistent(adapter->dev, PAGE_SIZE, config_page, configPagePA);
+exit:
+ return numPhys;
+}
+
+static int pvscsi_probe(struct pci_dev *pdev, const struct pci_device_id *id)
+{
+ struct pvscsi_adapter *adapter;
+ struct pvscsi_adapter adapter_temp;
+ struct Scsi_Host *host = NULL;
+ unsigned int i;
+ unsigned long flags = 0;
+ int error;
+ u32 max_id;
+
+ error = -ENODEV;
+
+ if (pci_enable_device(pdev))
+ return error;
+
+ if (pci_set_dma_mask(pdev, DMA_BIT_MASK(64)) == 0 &&
+ pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64)) == 0) {
+ printk(KERN_INFO "vmw_pvscsi: using 64bit dma\n");
+ } else if (pci_set_dma_mask(pdev, DMA_BIT_MASK(32)) == 0 &&
+ pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32)) == 0) {
+ printk(KERN_INFO "vmw_pvscsi: using 32bit dma\n");
+ } else {
+ printk(KERN_ERR "vmw_pvscsi: failed to set DMA mask\n");
+ goto out_disable_device;
+ }
+
+ /*
+ * Let's use a temp pvscsi_adapter struct until we find the number of
+ * targets on the adapter, after that we will switch to the real
+ * allocated struct.
+ */
+ adapter = &adapter_temp;
+ memset(adapter, 0, sizeof(*adapter));
+ adapter->dev = pdev;
+ adapter->rev = pdev->revision;
+
+ if (pci_request_regions(pdev, "vmw_pvscsi")) {
+ printk(KERN_ERR "vmw_pvscsi: pci memory selection failed\n");
+ goto out_disable_device;
+ }
+
+ for (i = 0; i < DEVICE_COUNT_RESOURCE; i++) {
+ if ((pci_resource_flags(pdev, i) & PCI_BASE_ADDRESS_SPACE_IO))
+ continue;
+
+ if (pci_resource_len(pdev, i) < PVSCSI_MEM_SPACE_SIZE)
+ continue;
+
+ break;
+ }
+
+ if (i == DEVICE_COUNT_RESOURCE) {
+ printk(KERN_ERR
+ "vmw_pvscsi: adapter has no suitable MMIO region\n");
+ goto out_release_resources_and_disable;
+ }
+
+ adapter->mmioBase = pci_iomap(pdev, i, PVSCSI_MEM_SPACE_SIZE);
+
+ if (!adapter->mmioBase) {
+ printk(KERN_ERR
+ "vmw_pvscsi: can't iomap for BAR %d memsize %lu\n",
+ i, PVSCSI_MEM_SPACE_SIZE);
+ goto out_release_resources_and_disable;
+ }
+
+ pci_set_master(pdev);
+
+ /*
+ * Ask the device for max number of targets before deciding the
+ * default pvscsi_ring_pages value.
+ */
+ max_id = pvscsi_get_max_targets(adapter);
+ printk(KERN_INFO "vmw_pvscsi: max_id: %u\n", max_id);
+
+ if (pvscsi_ring_pages == 0)
+ /*
+ * Set the right default value. Up to 16 it is 8, above it is
+ * max.
+ */
+ pvscsi_ring_pages = (max_id > 16) ?
+ PVSCSI_SETUP_RINGS_MAX_NUM_PAGES :
+ PVSCSI_DEFAULT_NUM_PAGES_PER_RING;
+ printk(KERN_INFO
+ "vmw_pvscsi: setting ring_pages to %d\n",
+ pvscsi_ring_pages);
+
+ pvscsi_template.can_queue =
+ min(PVSCSI_MAX_NUM_PAGES_REQ_RING, pvscsi_ring_pages) *
+ PVSCSI_MAX_NUM_REQ_ENTRIES_PER_PAGE;
+ pvscsi_template.cmd_per_lun =
+ min(pvscsi_template.can_queue, pvscsi_cmd_per_lun);
+ host = scsi_host_alloc(&pvscsi_template, sizeof(struct pvscsi_adapter));
+ if (!host) {
+ printk(KERN_ERR "vmw_pvscsi: failed to allocate host\n");
+ goto out_release_resources_and_disable;
+ }
+
+ /*
+ * Let's use the real pvscsi_adapter struct here onwards.
+ */
+ adapter = shost_priv(host);
+ memset(adapter, 0, sizeof(*adapter));
+ adapter->dev = pdev;
+ adapter->host = host;
+ /*
+ * Copy back what we already have to the allocated adapter struct.
+ */
+ adapter->rev = adapter_temp.rev;
+ adapter->mmioBase = adapter_temp.mmioBase;
+
+ spin_lock_init(&adapter->hw_lock);
+ host->max_channel = 0;
+ host->max_lun = 1;
+ host->max_cmd_len = 16;
+ host->max_id = max_id;
+
+ pci_set_drvdata(pdev, host);
+
+ ll_adapter_reset(adapter);
+
+ adapter->use_msg = pvscsi_setup_msg_workqueue(adapter);
+
+ error = pvscsi_allocate_rings(adapter);
+ if (error) {
+ printk(KERN_ERR "vmw_pvscsi: unable to allocate ring memory\n");
+ goto out_release_resources;
+ }
+
+ /*
+ * From this point on we should reset the adapter if anything goes
+ * wrong.
+ */
+ pvscsi_setup_all_rings(adapter);
+
+ adapter->cmd_map = kcalloc(adapter->req_depth,
+ sizeof(struct pvscsi_ctx), GFP_KERNEL);
+ if (!adapter->cmd_map) {
+ printk(KERN_ERR "vmw_pvscsi: failed to allocate memory.\n");
+ error = -ENOMEM;
+ goto out_reset_adapter;
+ }
+
+ INIT_LIST_HEAD(&adapter->cmd_pool);
+ for (i = 0; i < adapter->req_depth; i++) {
+ struct pvscsi_ctx *ctx = adapter->cmd_map + i;
+ list_add(&ctx->list, &adapter->cmd_pool);
+ }
+
+ error = pvscsi_allocate_sg(adapter);
+ if (error) {
+ printk(KERN_ERR "vmw_pvscsi: unable to allocate s/g table\n");
+ goto out_reset_adapter;
+ }
+
+ if (!pvscsi_disable_msix &&
+ pvscsi_setup_msix(adapter, &adapter->irq) == 0) {
+ printk(KERN_INFO "vmw_pvscsi: using MSI-X\n");
+ adapter->use_msix = 1;
+ } else if (!pvscsi_disable_msi && pci_enable_msi(pdev) == 0) {
+ printk(KERN_INFO "vmw_pvscsi: using MSI\n");
+ adapter->use_msi = 1;
+ adapter->irq = pdev->irq;
+ } else {
+ printk(KERN_INFO "vmw_pvscsi: using INTx\n");
+ adapter->irq = pdev->irq;
+ flags = IRQF_SHARED;
+ }
+
+ adapter->use_req_threshold = pvscsi_setup_req_threshold(adapter, true);
+ printk(KERN_DEBUG "vmw_pvscsi: driver-based request coalescing %sabled\n",
+ adapter->use_req_threshold ? "en" : "dis");
+
+ error = request_irq(adapter->irq, pvscsi_isr, flags,
+ "vmw_pvscsi", adapter);
+ if (error) {
+ printk(KERN_ERR
+ "vmw_pvscsi: unable to request IRQ: %d\n", error);
+ adapter->irq = 0;
+ goto out_reset_adapter;
+ }
+
+ error = scsi_add_host(host, &pdev->dev);
+ if (error) {
+ printk(KERN_ERR
+ "vmw_pvscsi: scsi_add_host failed: %d\n", error);
+ goto out_reset_adapter;
+ }
+
+ dev_info(&pdev->dev, "VMware PVSCSI rev %d host #%u\n",
+ adapter->rev, host->host_no);
+
+ pvscsi_unmask_intr(adapter);
+
+ scsi_scan_host(host);
+
+ return 0;
+
+out_reset_adapter:
+ ll_adapter_reset(adapter);
+out_release_resources:
+ pvscsi_release_resources(adapter);
+ scsi_host_put(host);
+out_disable_device:
+ pci_disable_device(pdev);
+
+ return error;
+
+out_release_resources_and_disable:
+ pvscsi_release_resources(adapter);
+ goto out_disable_device;
+}
+
+static void __pvscsi_shutdown(struct pvscsi_adapter *adapter)
+{
+ pvscsi_mask_intr(adapter);
+
+ if (adapter->workqueue)
+ flush_workqueue(adapter->workqueue);
+
+ pvscsi_shutdown_intr(adapter);
+
+ pvscsi_process_request_ring(adapter);
+ pvscsi_process_completion_ring(adapter);
+ ll_adapter_reset(adapter);
+}
+
+static void pvscsi_shutdown(struct pci_dev *dev)
+{
+ struct Scsi_Host *host = pci_get_drvdata(dev);
+ struct pvscsi_adapter *adapter = shost_priv(host);
+
+ __pvscsi_shutdown(adapter);
+}
+
+static void pvscsi_remove(struct pci_dev *pdev)
+{
+ struct Scsi_Host *host = pci_get_drvdata(pdev);
+ struct pvscsi_adapter *adapter = shost_priv(host);
+
+ scsi_remove_host(host);
+
+ __pvscsi_shutdown(adapter);
+ pvscsi_release_resources(adapter);
+
+ scsi_host_put(host);
+
+ pci_disable_device(pdev);
+}
+
+static struct pci_driver pvscsi_pci_driver = {
+ .name = "vmw_pvscsi",
+ .id_table = pvscsi_pci_tbl,
+ .probe = pvscsi_probe,
+ .remove = pvscsi_remove,
+ .shutdown = pvscsi_shutdown,
+};
+
+static int __init pvscsi_init(void)
+{
+ pr_info("%s - version %s\n",
+ PVSCSI_LINUX_DRIVER_DESC, PVSCSI_DRIVER_VERSION_STRING);
+ return pci_register_driver(&pvscsi_pci_driver);
+}
+
+static void __exit pvscsi_exit(void)
+{
+ pci_unregister_driver(&pvscsi_pci_driver);
+}
+
+module_init(pvscsi_init);
+module_exit(pvscsi_exit);
diff --git a/drivers/scsi/vmw_pvscsi.h b/drivers/scsi/vmw_pvscsi.h
new file mode 100644
index 000000000..ee16f0c5c
--- /dev/null
+++ b/drivers/scsi/vmw_pvscsi.h
@@ -0,0 +1,468 @@
+/*
+ * VMware PVSCSI header file
+ *
+ * Copyright (C) 2008-2014, VMware, Inc. All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; version 2 of the License and no later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
+ * NON INFRINGEMENT. See the GNU General Public License for more
+ * details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Maintained by: Arvind Kumar <arvindkumar@vmware.com>
+ *
+ */
+
+#ifndef _VMW_PVSCSI_H_
+#define _VMW_PVSCSI_H_
+
+#include <linux/types.h>
+
+#define PVSCSI_DRIVER_VERSION_STRING "1.0.5.0-k"
+
+#define PVSCSI_MAX_NUM_SG_ENTRIES_PER_SEGMENT 128
+
+#define MASK(n) ((1 << (n)) - 1) /* make an n-bit mask */
+
+#define PCI_DEVICE_ID_VMWARE_PVSCSI 0x07C0
+
+/*
+ * host adapter status/error codes
+ */
+enum HostBusAdapterStatus {
+ BTSTAT_SUCCESS = 0x00, /* CCB complete normally with no errors */
+ BTSTAT_LINKED_COMMAND_COMPLETED = 0x0a,
+ BTSTAT_LINKED_COMMAND_COMPLETED_WITH_FLAG = 0x0b,
+ BTSTAT_DATA_UNDERRUN = 0x0c,
+ BTSTAT_SELTIMEO = 0x11, /* SCSI selection timeout */
+ BTSTAT_DATARUN = 0x12, /* data overrun/underrun */
+ BTSTAT_BUSFREE = 0x13, /* unexpected bus free */
+ BTSTAT_INVPHASE = 0x14, /* invalid bus phase or sequence
+ * requested by target */
+ BTSTAT_LUNMISMATCH = 0x17, /* linked CCB has different LUN from
+ * first CCB */
+ BTSTAT_INVPARAM = 0x1a, /* invalid parameter in CCB or segment
+ * list */
+ BTSTAT_SENSFAILED = 0x1b, /* auto request sense failed */
+ BTSTAT_TAGREJECT = 0x1c, /* SCSI II tagged queueing message
+ * rejected by target */
+ BTSTAT_BADMSG = 0x1d, /* unsupported message received by the
+ * host adapter */
+ BTSTAT_HAHARDWARE = 0x20, /* host adapter hardware failed */
+ BTSTAT_NORESPONSE = 0x21, /* target did not respond to SCSI ATN,
+ * sent a SCSI RST */
+ BTSTAT_SENTRST = 0x22, /* host adapter asserted a SCSI RST */
+ BTSTAT_RECVRST = 0x23, /* other SCSI devices asserted a SCSI
+ * RST */
+ BTSTAT_DISCONNECT = 0x24, /* target device reconnected improperly
+ * (w/o tag) */
+ BTSTAT_BUSRESET = 0x25, /* host adapter issued BUS device reset */
+ BTSTAT_ABORTQUEUE = 0x26, /* abort queue generated */
+ BTSTAT_HASOFTWARE = 0x27, /* host adapter software error */
+ BTSTAT_HATIMEOUT = 0x30, /* host adapter hardware timeout error */
+ BTSTAT_SCSIPARITY = 0x34, /* SCSI parity error detected */
+};
+
+/*
+ * SCSI device status values.
+ */
+enum ScsiDeviceStatus {
+ SDSTAT_GOOD = 0x00, /* No errors. */
+ SDSTAT_CHECK = 0x02, /* Check condition. */
+};
+
+/*
+ * Register offsets.
+ *
+ * These registers are accessible both via i/o space and mm i/o.
+ */
+
+enum PVSCSIRegOffset {
+ PVSCSI_REG_OFFSET_COMMAND = 0x0,
+ PVSCSI_REG_OFFSET_COMMAND_DATA = 0x4,
+ PVSCSI_REG_OFFSET_COMMAND_STATUS = 0x8,
+ PVSCSI_REG_OFFSET_LAST_STS_0 = 0x100,
+ PVSCSI_REG_OFFSET_LAST_STS_1 = 0x104,
+ PVSCSI_REG_OFFSET_LAST_STS_2 = 0x108,
+ PVSCSI_REG_OFFSET_LAST_STS_3 = 0x10c,
+ PVSCSI_REG_OFFSET_INTR_STATUS = 0x100c,
+ PVSCSI_REG_OFFSET_INTR_MASK = 0x2010,
+ PVSCSI_REG_OFFSET_KICK_NON_RW_IO = 0x3014,
+ PVSCSI_REG_OFFSET_DEBUG = 0x3018,
+ PVSCSI_REG_OFFSET_KICK_RW_IO = 0x4018,
+};
+
+/*
+ * Virtual h/w commands.
+ */
+
+enum PVSCSICommands {
+ PVSCSI_CMD_FIRST = 0, /* has to be first */
+
+ PVSCSI_CMD_ADAPTER_RESET = 1,
+ PVSCSI_CMD_ISSUE_SCSI = 2,
+ PVSCSI_CMD_SETUP_RINGS = 3,
+ PVSCSI_CMD_RESET_BUS = 4,
+ PVSCSI_CMD_RESET_DEVICE = 5,
+ PVSCSI_CMD_ABORT_CMD = 6,
+ PVSCSI_CMD_CONFIG = 7,
+ PVSCSI_CMD_SETUP_MSG_RING = 8,
+ PVSCSI_CMD_DEVICE_UNPLUG = 9,
+ PVSCSI_CMD_SETUP_REQCALLTHRESHOLD = 10,
+
+ PVSCSI_CMD_LAST = 11 /* has to be last */
+};
+
+/*
+ * Command descriptor for PVSCSI_CMD_RESET_DEVICE --
+ */
+
+struct PVSCSICmdDescResetDevice {
+ u32 target;
+ u8 lun[8];
+} __packed;
+
+/*
+ * Command descriptor for PVSCSI_CMD_CONFIG --
+ */
+
+struct PVSCSICmdDescConfigCmd {
+ u64 cmpAddr;
+ u64 configPageAddress;
+ u32 configPageNum;
+ u32 _pad;
+} __packed;
+
+/*
+ * Command descriptor for PVSCSI_CMD_SETUP_REQCALLTHRESHOLD --
+ */
+
+struct PVSCSICmdDescSetupReqCall {
+ u32 enable;
+} __packed;
+
+enum PVSCSIConfigPageType {
+ PVSCSI_CONFIG_PAGE_CONTROLLER = 0x1958,
+ PVSCSI_CONFIG_PAGE_PHY = 0x1959,
+ PVSCSI_CONFIG_PAGE_DEVICE = 0x195a,
+};
+
+enum PVSCSIConfigPageAddressType {
+ PVSCSI_CONFIG_CONTROLLER_ADDRESS = 0x2120,
+ PVSCSI_CONFIG_BUSTARGET_ADDRESS = 0x2121,
+ PVSCSI_CONFIG_PHY_ADDRESS = 0x2122,
+};
+
+/*
+ * Command descriptor for PVSCSI_CMD_ABORT_CMD --
+ *
+ * - currently does not support specifying the LUN.
+ * - _pad should be 0.
+ */
+
+struct PVSCSICmdDescAbortCmd {
+ u64 context;
+ u32 target;
+ u32 _pad;
+} __packed;
+
+/*
+ * Command descriptor for PVSCSI_CMD_SETUP_RINGS --
+ *
+ * Notes:
+ * - reqRingNumPages and cmpRingNumPages need to be power of two.
+ * - reqRingNumPages and cmpRingNumPages need to be different from 0,
+ * - reqRingNumPages and cmpRingNumPages need to be inferior to
+ * PVSCSI_SETUP_RINGS_MAX_NUM_PAGES.
+ */
+
+#define PVSCSI_SETUP_RINGS_MAX_NUM_PAGES 32
+struct PVSCSICmdDescSetupRings {
+ u32 reqRingNumPages;
+ u32 cmpRingNumPages;
+ u64 ringsStatePPN;
+ u64 reqRingPPNs[PVSCSI_SETUP_RINGS_MAX_NUM_PAGES];
+ u64 cmpRingPPNs[PVSCSI_SETUP_RINGS_MAX_NUM_PAGES];
+} __packed;
+
+/*
+ * Command descriptor for PVSCSI_CMD_SETUP_MSG_RING --
+ *
+ * Notes:
+ * - this command was not supported in the initial revision of the h/w
+ * interface. Before using it, you need to check that it is supported by
+ * writing PVSCSI_CMD_SETUP_MSG_RING to the 'command' register, then
+ * immediately after read the 'command status' register:
+ * * a value of -1 means that the cmd is NOT supported,
+ * * a value != -1 means that the cmd IS supported.
+ * If it's supported the 'command status' register should return:
+ * sizeof(PVSCSICmdDescSetupMsgRing) / sizeof(u32).
+ * - this command should be issued _after_ the usual SETUP_RINGS so that the
+ * RingsState page is already setup. If not, the command is a nop.
+ * - numPages needs to be a power of two,
+ * - numPages needs to be different from 0,
+ * - _pad should be zero.
+ */
+
+#define PVSCSI_SETUP_MSG_RING_MAX_NUM_PAGES 16
+
+struct PVSCSICmdDescSetupMsgRing {
+ u32 numPages;
+ u32 _pad;
+ u64 ringPPNs[PVSCSI_SETUP_MSG_RING_MAX_NUM_PAGES];
+} __packed;
+
+enum PVSCSIMsgType {
+ PVSCSI_MSG_DEV_ADDED = 0,
+ PVSCSI_MSG_DEV_REMOVED = 1,
+ PVSCSI_MSG_LAST = 2,
+};
+
+/*
+ * Msg descriptor.
+ *
+ * sizeof(struct PVSCSIRingMsgDesc) == 128.
+ *
+ * - type is of type enum PVSCSIMsgType.
+ * - the content of args depend on the type of event being delivered.
+ */
+
+struct PVSCSIRingMsgDesc {
+ u32 type;
+ u32 args[31];
+} __packed;
+
+struct PVSCSIMsgDescDevStatusChanged {
+ u32 type; /* PVSCSI_MSG_DEV _ADDED / _REMOVED */
+ u32 bus;
+ u32 target;
+ u8 lun[8];
+ u32 pad[27];
+} __packed;
+
+/*
+ * Rings state.
+ *
+ * - the fields:
+ * . msgProdIdx,
+ * . msgConsIdx,
+ * . msgNumEntriesLog2,
+ * .. are only used once the SETUP_MSG_RING cmd has been issued.
+ * - '_pad' helps to ensure that the msg related fields are on their own
+ * cache-line.
+ */
+
+struct PVSCSIRingsState {
+ u32 reqProdIdx;
+ u32 reqConsIdx;
+ u32 reqNumEntriesLog2;
+
+ u32 cmpProdIdx;
+ u32 cmpConsIdx;
+ u32 cmpNumEntriesLog2;
+
+ u32 reqCallThreshold;
+
+ u8 _pad[100];
+
+ u32 msgProdIdx;
+ u32 msgConsIdx;
+ u32 msgNumEntriesLog2;
+} __packed;
+
+/*
+ * Request descriptor.
+ *
+ * sizeof(RingReqDesc) = 128
+ *
+ * - context: is a unique identifier of a command. It could normally be any
+ * 64bit value, however we currently store it in the serialNumber variable
+ * of struct SCSI_Command, so we have the following restrictions due to the
+ * way this field is handled in the vmkernel storage stack:
+ * * this value can't be 0,
+ * * the upper 32bit need to be 0 since serialNumber is as a u32.
+ * Currently tracked as PR 292060.
+ * - dataLen: contains the total number of bytes that need to be transferred.
+ * - dataAddr:
+ * * if PVSCSI_FLAG_CMD_WITH_SG_LIST is set: dataAddr is the PA of the first
+ * s/g table segment, each s/g segment is entirely contained on a single
+ * page of physical memory,
+ * * if PVSCSI_FLAG_CMD_WITH_SG_LIST is NOT set, then dataAddr is the PA of
+ * the buffer used for the DMA transfer,
+ * - flags:
+ * * PVSCSI_FLAG_CMD_WITH_SG_LIST: see dataAddr above,
+ * * PVSCSI_FLAG_CMD_DIR_NONE: no DMA involved,
+ * * PVSCSI_FLAG_CMD_DIR_TOHOST: transfer from device to main memory,
+ * * PVSCSI_FLAG_CMD_DIR_TODEVICE: transfer from main memory to device,
+ * * PVSCSI_FLAG_CMD_OUT_OF_BAND_CDB: reserved to handle CDBs larger than
+ * 16bytes. To be specified.
+ * - vcpuHint: vcpuId of the processor that will be most likely waiting for the
+ * completion of the i/o. For guest OSes that use lowest priority message
+ * delivery mode (such as windows), we use this "hint" to deliver the
+ * completion action to the proper vcpu. For now, we can use the vcpuId of
+ * the processor that initiated the i/o as a likely candidate for the vcpu
+ * that will be waiting for the completion..
+ * - bus should be 0: we currently only support bus 0 for now.
+ * - unused should be zero'd.
+ */
+
+#define PVSCSI_FLAG_CMD_WITH_SG_LIST (1 << 0)
+#define PVSCSI_FLAG_CMD_OUT_OF_BAND_CDB (1 << 1)
+#define PVSCSI_FLAG_CMD_DIR_NONE (1 << 2)
+#define PVSCSI_FLAG_CMD_DIR_TOHOST (1 << 3)
+#define PVSCSI_FLAG_CMD_DIR_TODEVICE (1 << 4)
+
+struct PVSCSIRingReqDesc {
+ u64 context;
+ u64 dataAddr;
+ u64 dataLen;
+ u64 senseAddr;
+ u32 senseLen;
+ u32 flags;
+ u8 cdb[16];
+ u8 cdbLen;
+ u8 lun[8];
+ u8 tag;
+ u8 bus;
+ u8 target;
+ u8 vcpuHint;
+ u8 unused[59];
+} __packed;
+
+/*
+ * Scatter-gather list management.
+ *
+ * As described above, when PVSCSI_FLAG_CMD_WITH_SG_LIST is set in the
+ * RingReqDesc.flags, then RingReqDesc.dataAddr is the PA of the first s/g
+ * table segment.
+ *
+ * - each segment of the s/g table contain a succession of struct
+ * PVSCSISGElement.
+ * - each segment is entirely contained on a single physical page of memory.
+ * - a "chain" s/g element has the flag PVSCSI_SGE_FLAG_CHAIN_ELEMENT set in
+ * PVSCSISGElement.flags and in this case:
+ * * addr is the PA of the next s/g segment,
+ * * length is undefined, assumed to be 0.
+ */
+
+struct PVSCSISGElement {
+ u64 addr;
+ u32 length;
+ u32 flags;
+} __packed;
+
+/*
+ * Completion descriptor.
+ *
+ * sizeof(RingCmpDesc) = 32
+ *
+ * - context: identifier of the command. The same thing that was specified
+ * under "context" as part of struct RingReqDesc at initiation time,
+ * - dataLen: number of bytes transferred for the actual i/o operation,
+ * - senseLen: number of bytes written into the sense buffer,
+ * - hostStatus: adapter status,
+ * - scsiStatus: device status,
+ * - _pad should be zero.
+ */
+
+struct PVSCSIRingCmpDesc {
+ u64 context;
+ u64 dataLen;
+ u32 senseLen;
+ u16 hostStatus;
+ u16 scsiStatus;
+ u32 _pad[2];
+} __packed;
+
+struct PVSCSIConfigPageHeader {
+ u32 pageNum;
+ u16 numDwords;
+ u16 hostStatus;
+ u16 scsiStatus;
+ u16 reserved[3];
+} __packed;
+
+struct PVSCSIConfigPageController {
+ struct PVSCSIConfigPageHeader header;
+ u64 nodeWWN; /* Device name as defined in the SAS spec. */
+ u16 manufacturer[64];
+ u16 serialNumber[64];
+ u16 opromVersion[32];
+ u16 hwVersion[32];
+ u16 firmwareVersion[32];
+ u32 numPhys;
+ u8 useConsecutivePhyWWNs;
+ u8 reserved[3];
+} __packed;
+
+/*
+ * Interrupt status / IRQ bits.
+ */
+
+#define PVSCSI_INTR_CMPL_0 (1 << 0)
+#define PVSCSI_INTR_CMPL_1 (1 << 1)
+#define PVSCSI_INTR_CMPL_MASK MASK(2)
+
+#define PVSCSI_INTR_MSG_0 (1 << 2)
+#define PVSCSI_INTR_MSG_1 (1 << 3)
+#define PVSCSI_INTR_MSG_MASK (MASK(2) << 2)
+
+#define PVSCSI_INTR_ALL_SUPPORTED MASK(4)
+
+/*
+ * Number of MSI-X vectors supported.
+ */
+#define PVSCSI_MAX_INTRS 24
+
+/*
+ * Enumeration of supported MSI-X vectors
+ */
+#define PVSCSI_VECTOR_COMPLETION 0
+
+/*
+ * Misc constants for the rings.
+ */
+
+#define PVSCSI_MAX_NUM_PAGES_REQ_RING PVSCSI_SETUP_RINGS_MAX_NUM_PAGES
+#define PVSCSI_MAX_NUM_PAGES_CMP_RING PVSCSI_SETUP_RINGS_MAX_NUM_PAGES
+#define PVSCSI_MAX_NUM_PAGES_MSG_RING PVSCSI_SETUP_MSG_RING_MAX_NUM_PAGES
+
+#define PVSCSI_MAX_NUM_REQ_ENTRIES_PER_PAGE \
+ (PAGE_SIZE / sizeof(struct PVSCSIRingReqDesc))
+
+#define PVSCSI_MAX_REQ_QUEUE_DEPTH \
+ (PVSCSI_MAX_NUM_PAGES_REQ_RING * PVSCSI_MAX_NUM_REQ_ENTRIES_PER_PAGE)
+
+#define PVSCSI_MEM_SPACE_COMMAND_NUM_PAGES 1
+#define PVSCSI_MEM_SPACE_INTR_STATUS_NUM_PAGES 1
+#define PVSCSI_MEM_SPACE_MISC_NUM_PAGES 2
+#define PVSCSI_MEM_SPACE_KICK_IO_NUM_PAGES 2
+#define PVSCSI_MEM_SPACE_MSIX_NUM_PAGES 2
+
+enum PVSCSIMemSpace {
+ PVSCSI_MEM_SPACE_COMMAND_PAGE = 0,
+ PVSCSI_MEM_SPACE_INTR_STATUS_PAGE = 1,
+ PVSCSI_MEM_SPACE_MISC_PAGE = 2,
+ PVSCSI_MEM_SPACE_KICK_IO_PAGE = 4,
+ PVSCSI_MEM_SPACE_MSIX_TABLE_PAGE = 6,
+ PVSCSI_MEM_SPACE_MSIX_PBA_PAGE = 7,
+};
+
+#define PVSCSI_MEM_SPACE_NUM_PAGES \
+ (PVSCSI_MEM_SPACE_COMMAND_NUM_PAGES + \
+ PVSCSI_MEM_SPACE_INTR_STATUS_NUM_PAGES + \
+ PVSCSI_MEM_SPACE_MISC_NUM_PAGES + \
+ PVSCSI_MEM_SPACE_KICK_IO_NUM_PAGES + \
+ PVSCSI_MEM_SPACE_MSIX_NUM_PAGES)
+
+#define PVSCSI_MEM_SPACE_SIZE (PVSCSI_MEM_SPACE_NUM_PAGES * PAGE_SIZE)
+
+#endif /* _VMW_PVSCSI_H_ */
diff --git a/drivers/scsi/wd33c93.c b/drivers/scsi/wd33c93.c
new file mode 100644
index 000000000..9e09da412
--- /dev/null
+++ b/drivers/scsi/wd33c93.c
@@ -0,0 +1,2207 @@
+/*
+ * Copyright (c) 1996 John Shifflett, GeoLog Consulting
+ * john@geolog.com
+ * jshiffle@netcom.com
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2, or (at your option)
+ * any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+/*
+ * Drew Eckhardt's excellent 'Generic NCR5380' sources from Linux-PC
+ * provided much of the inspiration and some of the code for this
+ * driver. Everything I know about Amiga DMA was gleaned from careful
+ * reading of Hamish Mcdonald's original wd33c93 driver; in fact, I
+ * borrowed shamelessly from all over that source. Thanks Hamish!
+ *
+ * _This_ driver is (I feel) an improvement over the old one in
+ * several respects:
+ *
+ * - Target Disconnection/Reconnection is now supported. Any
+ * system with more than one device active on the SCSI bus
+ * will benefit from this. The driver defaults to what I
+ * call 'adaptive disconnect' - meaning that each command
+ * is evaluated individually as to whether or not it should
+ * be run with the option to disconnect/reselect (if the
+ * device chooses), or as a "SCSI-bus-hog".
+ *
+ * - Synchronous data transfers are now supported. Because of
+ * a few devices that choke after telling the driver that
+ * they can do sync transfers, we don't automatically use
+ * this faster protocol - it can be enabled via the command-
+ * line on a device-by-device basis.
+ *
+ * - Runtime operating parameters can now be specified through
+ * the 'amiboot' or the 'insmod' command line. For amiboot do:
+ * "amiboot [usual stuff] wd33c93=blah,blah,blah"
+ * The defaults should be good for most people. See the comment
+ * for 'setup_strings' below for more details.
+ *
+ * - The old driver relied exclusively on what the Western Digital
+ * docs call "Combination Level 2 Commands", which are a great
+ * idea in that the CPU is relieved of a lot of interrupt
+ * overhead. However, by accepting a certain (user-settable)
+ * amount of additional interrupts, this driver achieves
+ * better control over the SCSI bus, and data transfers are
+ * almost as fast while being much easier to define, track,
+ * and debug.
+ *
+ *
+ * TODO:
+ * more speed. linked commands.
+ *
+ *
+ * People with bug reports, wish-lists, complaints, comments,
+ * or improvements are asked to pah-leeez email me (John Shifflett)
+ * at john@geolog.com or jshiffle@netcom.com! I'm anxious to get
+ * this thing into as good a shape as possible, and I'm positive
+ * there are lots of lurking bugs and "Stupid Places".
+ *
+ * Updates:
+ *
+ * Added support for pre -A chips, which don't have advanced features
+ * and will generate CSR_RESEL rather than CSR_RESEL_AM.
+ * Richard Hirst <richard@sleepie.demon.co.uk> August 2000
+ *
+ * Added support for Burst Mode DMA and Fast SCSI. Enabled the use of
+ * default_sx_per for asynchronous data transfers. Added adjustment
+ * of transfer periods in sx_table to the actual input-clock.
+ * peter fuerst <post@pfrst.de> February 2007
+ */
+
+#include <linux/module.h>
+
+#include <linux/string.h>
+#include <linux/delay.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/blkdev.h>
+
+#include <scsi/scsi.h>
+#include <scsi/scsi_cmnd.h>
+#include <scsi/scsi_device.h>
+#include <scsi/scsi_host.h>
+
+#include <asm/irq.h>
+
+#include "wd33c93.h"
+
+#define optimum_sx_per(hostdata) (hostdata)->sx_table[1].period_ns
+
+
+#define WD33C93_VERSION "1.26++"
+#define WD33C93_DATE "10/Feb/2007"
+
+MODULE_AUTHOR("John Shifflett");
+MODULE_DESCRIPTION("Generic WD33C93 SCSI driver");
+MODULE_LICENSE("GPL");
+
+/*
+ * 'setup_strings' is a single string used to pass operating parameters and
+ * settings from the kernel/module command-line to the driver. 'setup_args[]'
+ * is an array of strings that define the compile-time default values for
+ * these settings. If Linux boots with an amiboot or insmod command-line,
+ * those settings are combined with 'setup_args[]'. Note that amiboot
+ * command-lines are prefixed with "wd33c93=" while insmod uses a
+ * "setup_strings=" prefix. The driver recognizes the following keywords
+ * (lower case required) and arguments:
+ *
+ * - nosync:bitmask -bitmask is a byte where the 1st 7 bits correspond with
+ * the 7 possible SCSI devices. Set a bit to negotiate for
+ * asynchronous transfers on that device. To maintain
+ * backwards compatibility, a command-line such as
+ * "wd33c93=255" will be automatically translated to
+ * "wd33c93=nosync:0xff".
+ * - nodma:x -x = 1 to disable DMA, x = 0 to enable it. Argument is
+ * optional - if not present, same as "nodma:1".
+ * - period:ns -ns is the minimum # of nanoseconds in a SCSI data transfer
+ * period. Default is 500; acceptable values are 250 - 1000.
+ * - disconnect:x -x = 0 to never allow disconnects, 2 to always allow them.
+ * x = 1 does 'adaptive' disconnects, which is the default
+ * and generally the best choice.
+ * - debug:x -If 'DEBUGGING_ON' is defined, x is a bit mask that causes
+ * various types of debug output to printed - see the DB_xxx
+ * defines in wd33c93.h
+ * - clock:x -x = clock input in MHz for WD33c93 chip. Normal values
+ * would be from 8 through 20. Default is 8.
+ * - burst:x -x = 1 to use Burst Mode (or Demand-Mode) DMA, x = 0 to use
+ * Single Byte DMA, which is the default. Argument is
+ * optional - if not present, same as "burst:1".
+ * - fast:x -x = 1 to enable Fast SCSI, which is only effective with
+ * input-clock divisor 4 (WD33C93_FS_16_20), x = 0 to disable
+ * it, which is the default. Argument is optional - if not
+ * present, same as "fast:1".
+ * - next -No argument. Used to separate blocks of keywords when
+ * there's more than one host adapter in the system.
+ *
+ * Syntax Notes:
+ * - Numeric arguments can be decimal or the '0x' form of hex notation. There
+ * _must_ be a colon between a keyword and its numeric argument, with no
+ * spaces.
+ * - Keywords are separated by commas, no spaces, in the standard kernel
+ * command-line manner.
+ * - A keyword in the 'nth' comma-separated command-line member will overwrite
+ * the 'nth' element of setup_args[]. A blank command-line member (in
+ * other words, a comma with no preceding keyword) will _not_ overwrite
+ * the corresponding setup_args[] element.
+ * - If a keyword is used more than once, the first one applies to the first
+ * SCSI host found, the second to the second card, etc, unless the 'next'
+ * keyword is used to change the order.
+ *
+ * Some amiboot examples (for insmod, use 'setup_strings' instead of 'wd33c93'):
+ * - wd33c93=nosync:255
+ * - wd33c93=nodma
+ * - wd33c93=nodma:1
+ * - wd33c93=disconnect:2,nosync:0x08,period:250
+ * - wd33c93=debug:0x1c
+ */
+
+/* Normally, no defaults are specified */
+static char *setup_args[] = { "", "", "", "", "", "", "", "", "", "" };
+
+static char *setup_strings;
+module_param(setup_strings, charp, 0);
+
+static void wd33c93_execute(struct Scsi_Host *instance);
+
+#ifdef CONFIG_WD33C93_PIO
+static inline uchar
+read_wd33c93(const wd33c93_regs regs, uchar reg_num)
+{
+ uchar data;
+
+ outb(reg_num, regs.SASR);
+ data = inb(regs.SCMD);
+ return data;
+}
+
+static inline unsigned long
+read_wd33c93_count(const wd33c93_regs regs)
+{
+ unsigned long value;
+
+ outb(WD_TRANSFER_COUNT_MSB, regs.SASR);
+ value = inb(regs.SCMD) << 16;
+ value |= inb(regs.SCMD) << 8;
+ value |= inb(regs.SCMD);
+ return value;
+}
+
+static inline uchar
+read_aux_stat(const wd33c93_regs regs)
+{
+ return inb(regs.SASR);
+}
+
+static inline void
+write_wd33c93(const wd33c93_regs regs, uchar reg_num, uchar value)
+{
+ outb(reg_num, regs.SASR);
+ outb(value, regs.SCMD);
+}
+
+static inline void
+write_wd33c93_count(const wd33c93_regs regs, unsigned long value)
+{
+ outb(WD_TRANSFER_COUNT_MSB, regs.SASR);
+ outb((value >> 16) & 0xff, regs.SCMD);
+ outb((value >> 8) & 0xff, regs.SCMD);
+ outb( value & 0xff, regs.SCMD);
+}
+
+#define write_wd33c93_cmd(regs, cmd) \
+ write_wd33c93((regs), WD_COMMAND, (cmd))
+
+static inline void
+write_wd33c93_cdb(const wd33c93_regs regs, uint len, uchar cmnd[])
+{
+ int i;
+
+ outb(WD_CDB_1, regs.SASR);
+ for (i=0; i<len; i++)
+ outb(cmnd[i], regs.SCMD);
+}
+
+#else /* CONFIG_WD33C93_PIO */
+static inline uchar
+read_wd33c93(const wd33c93_regs regs, uchar reg_num)
+{
+ *regs.SASR = reg_num;
+ mb();
+ return (*regs.SCMD);
+}
+
+static unsigned long
+read_wd33c93_count(const wd33c93_regs regs)
+{
+ unsigned long value;
+
+ *regs.SASR = WD_TRANSFER_COUNT_MSB;
+ mb();
+ value = *regs.SCMD << 16;
+ value |= *regs.SCMD << 8;
+ value |= *regs.SCMD;
+ mb();
+ return value;
+}
+
+static inline uchar
+read_aux_stat(const wd33c93_regs regs)
+{
+ return *regs.SASR;
+}
+
+static inline void
+write_wd33c93(const wd33c93_regs regs, uchar reg_num, uchar value)
+{
+ *regs.SASR = reg_num;
+ mb();
+ *regs.SCMD = value;
+ mb();
+}
+
+static void
+write_wd33c93_count(const wd33c93_regs regs, unsigned long value)
+{
+ *regs.SASR = WD_TRANSFER_COUNT_MSB;
+ mb();
+ *regs.SCMD = value >> 16;
+ *regs.SCMD = value >> 8;
+ *regs.SCMD = value;
+ mb();
+}
+
+static inline void
+write_wd33c93_cmd(const wd33c93_regs regs, uchar cmd)
+{
+ *regs.SASR = WD_COMMAND;
+ mb();
+ *regs.SCMD = cmd;
+ mb();
+}
+
+static inline void
+write_wd33c93_cdb(const wd33c93_regs regs, uint len, uchar cmnd[])
+{
+ int i;
+
+ *regs.SASR = WD_CDB_1;
+ for (i = 0; i < len; i++)
+ *regs.SCMD = cmnd[i];
+}
+#endif /* CONFIG_WD33C93_PIO */
+
+static inline uchar
+read_1_byte(const wd33c93_regs regs)
+{
+ uchar asr;
+ uchar x = 0;
+
+ write_wd33c93(regs, WD_CONTROL, CTRL_IDI | CTRL_EDI | CTRL_POLLED);
+ write_wd33c93_cmd(regs, WD_CMD_TRANS_INFO | 0x80);
+ do {
+ asr = read_aux_stat(regs);
+ if (asr & ASR_DBR)
+ x = read_wd33c93(regs, WD_DATA);
+ } while (!(asr & ASR_INT));
+ return x;
+}
+
+static int
+round_period(unsigned int period, const struct sx_period *sx_table)
+{
+ int x;
+
+ for (x = 1; sx_table[x].period_ns; x++) {
+ if ((period <= sx_table[x - 0].period_ns) &&
+ (period > sx_table[x - 1].period_ns)) {
+ return x;
+ }
+ }
+ return 7;
+}
+
+/*
+ * Calculate Synchronous Transfer Register value from SDTR code.
+ */
+static uchar
+calc_sync_xfer(unsigned int period, unsigned int offset, unsigned int fast,
+ const struct sx_period *sx_table)
+{
+ /* When doing Fast SCSI synchronous data transfers, the corresponding
+ * value in 'sx_table' is two times the actually used transfer period.
+ */
+ uchar result;
+
+ if (offset && fast) {
+ fast = STR_FSS;
+ period *= 2;
+ } else {
+ fast = 0;
+ }
+ period *= 4; /* convert SDTR code to ns */
+ result = sx_table[round_period(period,sx_table)].reg_value;
+ result |= (offset < OPTIMUM_SX_OFF) ? offset : OPTIMUM_SX_OFF;
+ result |= fast;
+ return result;
+}
+
+/*
+ * Calculate SDTR code bytes [3],[4] from period and offset.
+ */
+static inline void
+calc_sync_msg(unsigned int period, unsigned int offset, unsigned int fast,
+ uchar msg[2])
+{
+ /* 'period' is a "normal"-mode value, like the ones in 'sx_table'. The
+ * actually used transfer period for Fast SCSI synchronous data
+ * transfers is half that value.
+ */
+ period /= 4;
+ if (offset && fast)
+ period /= 2;
+ msg[0] = period;
+ msg[1] = offset;
+}
+
+static int
+wd33c93_queuecommand_lck(struct scsi_cmnd *cmd,
+ void (*done)(struct scsi_cmnd *))
+{
+ struct WD33C93_hostdata *hostdata;
+ struct scsi_cmnd *tmp;
+
+ hostdata = (struct WD33C93_hostdata *) cmd->device->host->hostdata;
+
+ DB(DB_QUEUE_COMMAND,
+ printk("Q-%d-%02x( ", cmd->device->id, cmd->cmnd[0]))
+
+/* Set up a few fields in the scsi_cmnd structure for our own use:
+ * - host_scribble is the pointer to the next cmd in the input queue
+ * - scsi_done points to the routine we call when a cmd is finished
+ * - result is what you'd expect
+ */
+ cmd->host_scribble = NULL;
+ cmd->scsi_done = done;
+ cmd->result = 0;
+
+/* We use the Scsi_Pointer structure that's included with each command
+ * as a scratchpad (as it's intended to be used!). The handy thing about
+ * the SCp.xxx fields is that they're always associated with a given
+ * cmd, and are preserved across disconnect-reselect. This means we
+ * can pretty much ignore SAVE_POINTERS and RESTORE_POINTERS messages
+ * if we keep all the critical pointers and counters in SCp:
+ * - SCp.ptr is the pointer into the RAM buffer
+ * - SCp.this_residual is the size of that buffer
+ * - SCp.buffer points to the current scatter-gather buffer
+ * - SCp.buffers_residual tells us how many S.G. buffers there are
+ * - SCp.have_data_in is not used
+ * - SCp.sent_command is not used
+ * - SCp.phase records this command's SRCID_ER bit setting
+ */
+
+ if (scsi_bufflen(cmd)) {
+ cmd->SCp.buffer = scsi_sglist(cmd);
+ cmd->SCp.buffers_residual = scsi_sg_count(cmd) - 1;
+ cmd->SCp.ptr = sg_virt(cmd->SCp.buffer);
+ cmd->SCp.this_residual = cmd->SCp.buffer->length;
+ } else {
+ cmd->SCp.buffer = NULL;
+ cmd->SCp.buffers_residual = 0;
+ cmd->SCp.ptr = NULL;
+ cmd->SCp.this_residual = 0;
+ }
+
+/* WD docs state that at the conclusion of a "LEVEL2" command, the
+ * status byte can be retrieved from the LUN register. Apparently,
+ * this is the case only for *uninterrupted* LEVEL2 commands! If
+ * there are any unexpected phases entered, even if they are 100%
+ * legal (different devices may choose to do things differently),
+ * the LEVEL2 command sequence is exited. This often occurs prior
+ * to receiving the status byte, in which case the driver does a
+ * status phase interrupt and gets the status byte on its own.
+ * While such a command can then be "resumed" (ie restarted to
+ * finish up as a LEVEL2 command), the LUN register will NOT be
+ * a valid status byte at the command's conclusion, and we must
+ * use the byte obtained during the earlier interrupt. Here, we
+ * preset SCp.Status to an illegal value (0xff) so that when
+ * this command finally completes, we can tell where the actual
+ * status byte is stored.
+ */
+
+ cmd->SCp.Status = ILLEGAL_STATUS_BYTE;
+
+ /*
+ * Add the cmd to the end of 'input_Q'. Note that REQUEST SENSE
+ * commands are added to the head of the queue so that the desired
+ * sense data is not lost before REQUEST_SENSE executes.
+ */
+
+ spin_lock_irq(&hostdata->lock);
+
+ if (!(hostdata->input_Q) || (cmd->cmnd[0] == REQUEST_SENSE)) {
+ cmd->host_scribble = (uchar *) hostdata->input_Q;
+ hostdata->input_Q = cmd;
+ } else { /* find the end of the queue */
+ for (tmp = (struct scsi_cmnd *) hostdata->input_Q;
+ tmp->host_scribble;
+ tmp = (struct scsi_cmnd *) tmp->host_scribble) ;
+ tmp->host_scribble = (uchar *) cmd;
+ }
+
+/* We know that there's at least one command in 'input_Q' now.
+ * Go see if any of them are runnable!
+ */
+
+ wd33c93_execute(cmd->device->host);
+
+ DB(DB_QUEUE_COMMAND, printk(")Q "))
+
+ spin_unlock_irq(&hostdata->lock);
+ return 0;
+}
+
+DEF_SCSI_QCMD(wd33c93_queuecommand)
+
+/*
+ * This routine attempts to start a scsi command. If the host_card is
+ * already connected, we give up immediately. Otherwise, look through
+ * the input_Q, using the first command we find that's intended
+ * for a currently non-busy target/lun.
+ *
+ * wd33c93_execute() is always called with interrupts disabled or from
+ * the wd33c93_intr itself, which means that a wd33c93 interrupt
+ * cannot occur while we are in here.
+ */
+static void
+wd33c93_execute(struct Scsi_Host *instance)
+{
+ struct WD33C93_hostdata *hostdata =
+ (struct WD33C93_hostdata *) instance->hostdata;
+ const wd33c93_regs regs = hostdata->regs;
+ struct scsi_cmnd *cmd, *prev;
+
+ DB(DB_EXECUTE, printk("EX("))
+ if (hostdata->selecting || hostdata->connected) {
+ DB(DB_EXECUTE, printk(")EX-0 "))
+ return;
+ }
+
+ /*
+ * Search through the input_Q for a command destined
+ * for an idle target/lun.
+ */
+
+ cmd = (struct scsi_cmnd *) hostdata->input_Q;
+ prev = NULL;
+ while (cmd) {
+ if (!(hostdata->busy[cmd->device->id] &
+ (1 << (cmd->device->lun & 0xff))))
+ break;
+ prev = cmd;
+ cmd = (struct scsi_cmnd *) cmd->host_scribble;
+ }
+
+ /* quit if queue empty or all possible targets are busy */
+
+ if (!cmd) {
+ DB(DB_EXECUTE, printk(")EX-1 "))
+ return;
+ }
+
+ /* remove command from queue */
+
+ if (prev)
+ prev->host_scribble = cmd->host_scribble;
+ else
+ hostdata->input_Q = (struct scsi_cmnd *) cmd->host_scribble;
+
+#ifdef PROC_STATISTICS
+ hostdata->cmd_cnt[cmd->device->id]++;
+#endif
+
+ /*
+ * Start the selection process
+ */
+
+ if (cmd->sc_data_direction == DMA_TO_DEVICE)
+ write_wd33c93(regs, WD_DESTINATION_ID, cmd->device->id);
+ else
+ write_wd33c93(regs, WD_DESTINATION_ID, cmd->device->id | DSTID_DPD);
+
+/* Now we need to figure out whether or not this command is a good
+ * candidate for disconnect/reselect. We guess to the best of our
+ * ability, based on a set of hierarchical rules. When several
+ * devices are operating simultaneously, disconnects are usually
+ * an advantage. In a single device system, or if only 1 device
+ * is being accessed, transfers usually go faster if disconnects
+ * are not allowed:
+ *
+ * + Commands should NEVER disconnect if hostdata->disconnect =
+ * DIS_NEVER (this holds for tape drives also), and ALWAYS
+ * disconnect if hostdata->disconnect = DIS_ALWAYS.
+ * + Tape drive commands should always be allowed to disconnect.
+ * + Disconnect should be allowed if disconnected_Q isn't empty.
+ * + Commands should NOT disconnect if input_Q is empty.
+ * + Disconnect should be allowed if there are commands in input_Q
+ * for a different target/lun. In this case, the other commands
+ * should be made disconnect-able, if not already.
+ *
+ * I know, I know - this code would flunk me out of any
+ * "C Programming 101" class ever offered. But it's easy
+ * to change around and experiment with for now.
+ */
+
+ cmd->SCp.phase = 0; /* assume no disconnect */
+ if (hostdata->disconnect == DIS_NEVER)
+ goto no;
+ if (hostdata->disconnect == DIS_ALWAYS)
+ goto yes;
+ if (cmd->device->type == 1) /* tape drive? */
+ goto yes;
+ if (hostdata->disconnected_Q) /* other commands disconnected? */
+ goto yes;
+ if (!(hostdata->input_Q)) /* input_Q empty? */
+ goto no;
+ for (prev = (struct scsi_cmnd *) hostdata->input_Q; prev;
+ prev = (struct scsi_cmnd *) prev->host_scribble) {
+ if ((prev->device->id != cmd->device->id) ||
+ (prev->device->lun != cmd->device->lun)) {
+ for (prev = (struct scsi_cmnd *) hostdata->input_Q; prev;
+ prev = (struct scsi_cmnd *) prev->host_scribble)
+ prev->SCp.phase = 1;
+ goto yes;
+ }
+ }
+
+ goto no;
+
+ yes:
+ cmd->SCp.phase = 1;
+
+#ifdef PROC_STATISTICS
+ hostdata->disc_allowed_cnt[cmd->device->id]++;
+#endif
+
+ no:
+
+ write_wd33c93(regs, WD_SOURCE_ID, ((cmd->SCp.phase) ? SRCID_ER : 0));
+
+ write_wd33c93(regs, WD_TARGET_LUN, (u8)cmd->device->lun);
+ write_wd33c93(regs, WD_SYNCHRONOUS_TRANSFER,
+ hostdata->sync_xfer[cmd->device->id]);
+ hostdata->busy[cmd->device->id] |= (1 << (cmd->device->lun & 0xFF));
+
+ if ((hostdata->level2 == L2_NONE) ||
+ (hostdata->sync_stat[cmd->device->id] == SS_UNSET)) {
+
+ /*
+ * Do a 'Select-With-ATN' command. This will end with
+ * one of the following interrupts:
+ * CSR_RESEL_AM: failure - can try again later.
+ * CSR_TIMEOUT: failure - give up.
+ * CSR_SELECT: success - proceed.
+ */
+
+ hostdata->selecting = cmd;
+
+/* Every target has its own synchronous transfer setting, kept in the
+ * sync_xfer array, and a corresponding status byte in sync_stat[].
+ * Each target's sync_stat[] entry is initialized to SX_UNSET, and its
+ * sync_xfer[] entry is initialized to the default/safe value. SS_UNSET
+ * means that the parameters are undetermined as yet, and that we
+ * need to send an SDTR message to this device after selection is
+ * complete: We set SS_FIRST to tell the interrupt routine to do so.
+ * If we've been asked not to try synchronous transfers on this
+ * target (and _all_ luns within it), we'll still send the SDTR message
+ * later, but at that time we'll negotiate for async by specifying a
+ * sync fifo depth of 0.
+ */
+ if (hostdata->sync_stat[cmd->device->id] == SS_UNSET)
+ hostdata->sync_stat[cmd->device->id] = SS_FIRST;
+ hostdata->state = S_SELECTING;
+ write_wd33c93_count(regs, 0); /* guarantee a DATA_PHASE interrupt */
+ write_wd33c93_cmd(regs, WD_CMD_SEL_ATN);
+ } else {
+
+ /*
+ * Do a 'Select-With-ATN-Xfer' command. This will end with
+ * one of the following interrupts:
+ * CSR_RESEL_AM: failure - can try again later.
+ * CSR_TIMEOUT: failure - give up.
+ * anything else: success - proceed.
+ */
+
+ hostdata->connected = cmd;
+ write_wd33c93(regs, WD_COMMAND_PHASE, 0);
+
+ /* copy command_descriptor_block into WD chip
+ * (take advantage of auto-incrementing)
+ */
+
+ write_wd33c93_cdb(regs, cmd->cmd_len, cmd->cmnd);
+
+ /* The wd33c93 only knows about Group 0, 1, and 5 commands when
+ * it's doing a 'select-and-transfer'. To be safe, we write the
+ * size of the CDB into the OWN_ID register for every case. This
+ * way there won't be problems with vendor-unique, audio, etc.
+ */
+
+ write_wd33c93(regs, WD_OWN_ID, cmd->cmd_len);
+
+ /* When doing a non-disconnect command with DMA, we can save
+ * ourselves a DATA phase interrupt later by setting everything
+ * up ahead of time.
+ */
+
+ if ((cmd->SCp.phase == 0) && (hostdata->no_dma == 0)) {
+ if (hostdata->dma_setup(cmd,
+ (cmd->sc_data_direction == DMA_TO_DEVICE) ?
+ DATA_OUT_DIR : DATA_IN_DIR))
+ write_wd33c93_count(regs, 0); /* guarantee a DATA_PHASE interrupt */
+ else {
+ write_wd33c93_count(regs,
+ cmd->SCp.this_residual);
+ write_wd33c93(regs, WD_CONTROL,
+ CTRL_IDI | CTRL_EDI | hostdata->dma_mode);
+ hostdata->dma = D_DMA_RUNNING;
+ }
+ } else
+ write_wd33c93_count(regs, 0); /* guarantee a DATA_PHASE interrupt */
+
+ hostdata->state = S_RUNNING_LEVEL2;
+ write_wd33c93_cmd(regs, WD_CMD_SEL_ATN_XFER);
+ }
+
+ /*
+ * Since the SCSI bus can handle only 1 connection at a time,
+ * we get out of here now. If the selection fails, or when
+ * the command disconnects, we'll come back to this routine
+ * to search the input_Q again...
+ */
+
+ DB(DB_EXECUTE,
+ printk("%s)EX-2 ", (cmd->SCp.phase) ? "d:" : ""))
+}
+
+static void
+transfer_pio(const wd33c93_regs regs, uchar * buf, int cnt,
+ int data_in_dir, struct WD33C93_hostdata *hostdata)
+{
+ uchar asr;
+
+ DB(DB_TRANSFER,
+ printk("(%p,%d,%s:", buf, cnt, data_in_dir ? "in" : "out"))
+
+ write_wd33c93(regs, WD_CONTROL, CTRL_IDI | CTRL_EDI | CTRL_POLLED);
+ write_wd33c93_count(regs, cnt);
+ write_wd33c93_cmd(regs, WD_CMD_TRANS_INFO);
+ if (data_in_dir) {
+ do {
+ asr = read_aux_stat(regs);
+ if (asr & ASR_DBR)
+ *buf++ = read_wd33c93(regs, WD_DATA);
+ } while (!(asr & ASR_INT));
+ } else {
+ do {
+ asr = read_aux_stat(regs);
+ if (asr & ASR_DBR)
+ write_wd33c93(regs, WD_DATA, *buf++);
+ } while (!(asr & ASR_INT));
+ }
+
+ /* Note: we are returning with the interrupt UN-cleared.
+ * Since (presumably) an entire I/O operation has
+ * completed, the bus phase is probably different, and
+ * the interrupt routine will discover this when it
+ * responds to the uncleared int.
+ */
+
+}
+
+static void
+transfer_bytes(const wd33c93_regs regs, struct scsi_cmnd *cmd,
+ int data_in_dir)
+{
+ struct WD33C93_hostdata *hostdata;
+ unsigned long length;
+
+ hostdata = (struct WD33C93_hostdata *) cmd->device->host->hostdata;
+
+/* Normally, you'd expect 'this_residual' to be non-zero here.
+ * In a series of scatter-gather transfers, however, this
+ * routine will usually be called with 'this_residual' equal
+ * to 0 and 'buffers_residual' non-zero. This means that a
+ * previous transfer completed, clearing 'this_residual', and
+ * now we need to setup the next scatter-gather buffer as the
+ * source or destination for THIS transfer.
+ */
+ if (!cmd->SCp.this_residual && cmd->SCp.buffers_residual) {
+ ++cmd->SCp.buffer;
+ --cmd->SCp.buffers_residual;
+ cmd->SCp.this_residual = cmd->SCp.buffer->length;
+ cmd->SCp.ptr = sg_virt(cmd->SCp.buffer);
+ }
+ if (!cmd->SCp.this_residual) /* avoid bogus setups */
+ return;
+
+ write_wd33c93(regs, WD_SYNCHRONOUS_TRANSFER,
+ hostdata->sync_xfer[cmd->device->id]);
+
+/* 'hostdata->no_dma' is TRUE if we don't even want to try DMA.
+ * Update 'this_residual' and 'ptr' after 'transfer_pio()' returns.
+ */
+
+ if (hostdata->no_dma || hostdata->dma_setup(cmd, data_in_dir)) {
+#ifdef PROC_STATISTICS
+ hostdata->pio_cnt++;
+#endif
+ transfer_pio(regs, (uchar *) cmd->SCp.ptr,
+ cmd->SCp.this_residual, data_in_dir, hostdata);
+ length = cmd->SCp.this_residual;
+ cmd->SCp.this_residual = read_wd33c93_count(regs);
+ cmd->SCp.ptr += (length - cmd->SCp.this_residual);
+ }
+
+/* We are able to do DMA (in fact, the Amiga hardware is
+ * already going!), so start up the wd33c93 in DMA mode.
+ * We set 'hostdata->dma' = D_DMA_RUNNING so that when the
+ * transfer completes and causes an interrupt, we're
+ * reminded to tell the Amiga to shut down its end. We'll
+ * postpone the updating of 'this_residual' and 'ptr'
+ * until then.
+ */
+
+ else {
+#ifdef PROC_STATISTICS
+ hostdata->dma_cnt++;
+#endif
+ write_wd33c93(regs, WD_CONTROL, CTRL_IDI | CTRL_EDI | hostdata->dma_mode);
+ write_wd33c93_count(regs, cmd->SCp.this_residual);
+
+ if ((hostdata->level2 >= L2_DATA) ||
+ (hostdata->level2 == L2_BASIC && cmd->SCp.phase == 0)) {
+ write_wd33c93(regs, WD_COMMAND_PHASE, 0x45);
+ write_wd33c93_cmd(regs, WD_CMD_SEL_ATN_XFER);
+ hostdata->state = S_RUNNING_LEVEL2;
+ } else
+ write_wd33c93_cmd(regs, WD_CMD_TRANS_INFO);
+
+ hostdata->dma = D_DMA_RUNNING;
+ }
+}
+
+void
+wd33c93_intr(struct Scsi_Host *instance)
+{
+ struct WD33C93_hostdata *hostdata =
+ (struct WD33C93_hostdata *) instance->hostdata;
+ const wd33c93_regs regs = hostdata->regs;
+ struct scsi_cmnd *patch, *cmd;
+ uchar asr, sr, phs, id, lun, *ucp, msg;
+ unsigned long length, flags;
+
+ asr = read_aux_stat(regs);
+ if (!(asr & ASR_INT) || (asr & ASR_BSY))
+ return;
+
+ spin_lock_irqsave(&hostdata->lock, flags);
+
+#ifdef PROC_STATISTICS
+ hostdata->int_cnt++;
+#endif
+
+ cmd = (struct scsi_cmnd *) hostdata->connected; /* assume we're connected */
+ sr = read_wd33c93(regs, WD_SCSI_STATUS); /* clear the interrupt */
+ phs = read_wd33c93(regs, WD_COMMAND_PHASE);
+
+ DB(DB_INTR, printk("{%02x:%02x-", asr, sr))
+
+/* After starting a DMA transfer, the next interrupt
+ * is guaranteed to be in response to completion of
+ * the transfer. Since the Amiga DMA hardware runs in
+ * in an open-ended fashion, it needs to be told when
+ * to stop; do that here if D_DMA_RUNNING is true.
+ * Also, we have to update 'this_residual' and 'ptr'
+ * based on the contents of the TRANSFER_COUNT register,
+ * in case the device decided to do an intermediate
+ * disconnect (a device may do this if it has to do a
+ * seek, or just to be nice and let other devices have
+ * some bus time during long transfers). After doing
+ * whatever is needed, we go on and service the WD3393
+ * interrupt normally.
+ */
+ if (hostdata->dma == D_DMA_RUNNING) {
+ DB(DB_TRANSFER,
+ printk("[%p/%d:", cmd->SCp.ptr, cmd->SCp.this_residual))
+ hostdata->dma_stop(cmd->device->host, cmd, 1);
+ hostdata->dma = D_DMA_OFF;
+ length = cmd->SCp.this_residual;
+ cmd->SCp.this_residual = read_wd33c93_count(regs);
+ cmd->SCp.ptr += (length - cmd->SCp.this_residual);
+ DB(DB_TRANSFER,
+ printk("%p/%d]", cmd->SCp.ptr, cmd->SCp.this_residual))
+ }
+
+/* Respond to the specific WD3393 interrupt - there are quite a few! */
+ switch (sr) {
+ case CSR_TIMEOUT:
+ DB(DB_INTR, printk("TIMEOUT"))
+
+ if (hostdata->state == S_RUNNING_LEVEL2)
+ hostdata->connected = NULL;
+ else {
+ cmd = (struct scsi_cmnd *) hostdata->selecting; /* get a valid cmd */
+ hostdata->selecting = NULL;
+ }
+
+ cmd->result = DID_NO_CONNECT << 16;
+ hostdata->busy[cmd->device->id] &= ~(1 << (cmd->device->lun & 0xff));
+ hostdata->state = S_UNCONNECTED;
+ cmd->scsi_done(cmd);
+
+ /* From esp.c:
+ * There is a window of time within the scsi_done() path
+ * of execution where interrupts are turned back on full
+ * blast and left that way. During that time we could
+ * reconnect to a disconnected command, then we'd bomb
+ * out below. We could also end up executing two commands
+ * at _once_. ...just so you know why the restore_flags()
+ * is here...
+ */
+
+ spin_unlock_irqrestore(&hostdata->lock, flags);
+
+/* We are not connected to a target - check to see if there
+ * are commands waiting to be executed.
+ */
+
+ wd33c93_execute(instance);
+ break;
+
+/* Note: this interrupt should not occur in a LEVEL2 command */
+
+ case CSR_SELECT:
+ DB(DB_INTR, printk("SELECT"))
+ hostdata->connected = cmd =
+ (struct scsi_cmnd *) hostdata->selecting;
+ hostdata->selecting = NULL;
+
+ /* construct an IDENTIFY message with correct disconnect bit */
+
+ hostdata->outgoing_msg[0] = IDENTIFY(0, cmd->device->lun);
+ if (cmd->SCp.phase)
+ hostdata->outgoing_msg[0] |= 0x40;
+
+ if (hostdata->sync_stat[cmd->device->id] == SS_FIRST) {
+
+ hostdata->sync_stat[cmd->device->id] = SS_WAITING;
+
+/* Tack on a 2nd message to ask about synchronous transfers. If we've
+ * been asked to do only asynchronous transfers on this device, we
+ * request a fifo depth of 0, which is equivalent to async - should
+ * solve the problems some people have had with GVP's Guru ROM.
+ */
+
+ hostdata->outgoing_msg[1] = EXTENDED_MESSAGE;
+ hostdata->outgoing_msg[2] = 3;
+ hostdata->outgoing_msg[3] = EXTENDED_SDTR;
+ if (hostdata->no_sync & (1 << cmd->device->id)) {
+ calc_sync_msg(hostdata->default_sx_per, 0,
+ 0, hostdata->outgoing_msg + 4);
+ } else {
+ calc_sync_msg(optimum_sx_per(hostdata),
+ OPTIMUM_SX_OFF,
+ hostdata->fast,
+ hostdata->outgoing_msg + 4);
+ }
+ hostdata->outgoing_len = 6;
+#ifdef SYNC_DEBUG
+ ucp = hostdata->outgoing_msg + 1;
+ printk(" sending SDTR %02x03%02x%02x%02x ",
+ ucp[0], ucp[2], ucp[3], ucp[4]);
+#endif
+ } else
+ hostdata->outgoing_len = 1;
+
+ hostdata->state = S_CONNECTED;
+ spin_unlock_irqrestore(&hostdata->lock, flags);
+ break;
+
+ case CSR_XFER_DONE | PHS_DATA_IN:
+ case CSR_UNEXP | PHS_DATA_IN:
+ case CSR_SRV_REQ | PHS_DATA_IN:
+ DB(DB_INTR,
+ printk("IN-%d.%d", cmd->SCp.this_residual,
+ cmd->SCp.buffers_residual))
+ transfer_bytes(regs, cmd, DATA_IN_DIR);
+ if (hostdata->state != S_RUNNING_LEVEL2)
+ hostdata->state = S_CONNECTED;
+ spin_unlock_irqrestore(&hostdata->lock, flags);
+ break;
+
+ case CSR_XFER_DONE | PHS_DATA_OUT:
+ case CSR_UNEXP | PHS_DATA_OUT:
+ case CSR_SRV_REQ | PHS_DATA_OUT:
+ DB(DB_INTR,
+ printk("OUT-%d.%d", cmd->SCp.this_residual,
+ cmd->SCp.buffers_residual))
+ transfer_bytes(regs, cmd, DATA_OUT_DIR);
+ if (hostdata->state != S_RUNNING_LEVEL2)
+ hostdata->state = S_CONNECTED;
+ spin_unlock_irqrestore(&hostdata->lock, flags);
+ break;
+
+/* Note: this interrupt should not occur in a LEVEL2 command */
+
+ case CSR_XFER_DONE | PHS_COMMAND:
+ case CSR_UNEXP | PHS_COMMAND:
+ case CSR_SRV_REQ | PHS_COMMAND:
+ DB(DB_INTR, printk("CMND-%02x", cmd->cmnd[0]))
+ transfer_pio(regs, cmd->cmnd, cmd->cmd_len, DATA_OUT_DIR,
+ hostdata);
+ hostdata->state = S_CONNECTED;
+ spin_unlock_irqrestore(&hostdata->lock, flags);
+ break;
+
+ case CSR_XFER_DONE | PHS_STATUS:
+ case CSR_UNEXP | PHS_STATUS:
+ case CSR_SRV_REQ | PHS_STATUS:
+ DB(DB_INTR, printk("STATUS="))
+ cmd->SCp.Status = read_1_byte(regs);
+ DB(DB_INTR, printk("%02x", cmd->SCp.Status))
+ if (hostdata->level2 >= L2_BASIC) {
+ sr = read_wd33c93(regs, WD_SCSI_STATUS); /* clear interrupt */
+ udelay(7);
+ hostdata->state = S_RUNNING_LEVEL2;
+ write_wd33c93(regs, WD_COMMAND_PHASE, 0x50);
+ write_wd33c93_cmd(regs, WD_CMD_SEL_ATN_XFER);
+ } else {
+ hostdata->state = S_CONNECTED;
+ }
+ spin_unlock_irqrestore(&hostdata->lock, flags);
+ break;
+
+ case CSR_XFER_DONE | PHS_MESS_IN:
+ case CSR_UNEXP | PHS_MESS_IN:
+ case CSR_SRV_REQ | PHS_MESS_IN:
+ DB(DB_INTR, printk("MSG_IN="))
+
+ msg = read_1_byte(regs);
+ sr = read_wd33c93(regs, WD_SCSI_STATUS); /* clear interrupt */
+ udelay(7);
+
+ hostdata->incoming_msg[hostdata->incoming_ptr] = msg;
+ if (hostdata->incoming_msg[0] == EXTENDED_MESSAGE)
+ msg = EXTENDED_MESSAGE;
+ else
+ hostdata->incoming_ptr = 0;
+
+ cmd->SCp.Message = msg;
+ switch (msg) {
+
+ case COMMAND_COMPLETE:
+ DB(DB_INTR, printk("CCMP"))
+ write_wd33c93_cmd(regs, WD_CMD_NEGATE_ACK);
+ hostdata->state = S_PRE_CMP_DISC;
+ break;
+
+ case SAVE_POINTERS:
+ DB(DB_INTR, printk("SDP"))
+ write_wd33c93_cmd(regs, WD_CMD_NEGATE_ACK);
+ hostdata->state = S_CONNECTED;
+ break;
+
+ case RESTORE_POINTERS:
+ DB(DB_INTR, printk("RDP"))
+ if (hostdata->level2 >= L2_BASIC) {
+ write_wd33c93(regs, WD_COMMAND_PHASE, 0x45);
+ write_wd33c93_cmd(regs, WD_CMD_SEL_ATN_XFER);
+ hostdata->state = S_RUNNING_LEVEL2;
+ } else {
+ write_wd33c93_cmd(regs, WD_CMD_NEGATE_ACK);
+ hostdata->state = S_CONNECTED;
+ }
+ break;
+
+ case DISCONNECT:
+ DB(DB_INTR, printk("DIS"))
+ cmd->device->disconnect = 1;
+ write_wd33c93_cmd(regs, WD_CMD_NEGATE_ACK);
+ hostdata->state = S_PRE_TMP_DISC;
+ break;
+
+ case MESSAGE_REJECT:
+ DB(DB_INTR, printk("REJ"))
+#ifdef SYNC_DEBUG
+ printk("-REJ-");
+#endif
+ if (hostdata->sync_stat[cmd->device->id] == SS_WAITING) {
+ hostdata->sync_stat[cmd->device->id] = SS_SET;
+ /* we want default_sx_per, not DEFAULT_SX_PER */
+ hostdata->sync_xfer[cmd->device->id] =
+ calc_sync_xfer(hostdata->default_sx_per
+ / 4, 0, 0, hostdata->sx_table);
+ }
+ write_wd33c93_cmd(regs, WD_CMD_NEGATE_ACK);
+ hostdata->state = S_CONNECTED;
+ break;
+
+ case EXTENDED_MESSAGE:
+ DB(DB_INTR, printk("EXT"))
+
+ ucp = hostdata->incoming_msg;
+
+#ifdef SYNC_DEBUG
+ printk("%02x", ucp[hostdata->incoming_ptr]);
+#endif
+ /* Is this the last byte of the extended message? */
+
+ if ((hostdata->incoming_ptr >= 2) &&
+ (hostdata->incoming_ptr == (ucp[1] + 1))) {
+
+ switch (ucp[2]) { /* what's the EXTENDED code? */
+ case EXTENDED_SDTR:
+ /* default to default async period */
+ id = calc_sync_xfer(hostdata->
+ default_sx_per / 4, 0,
+ 0, hostdata->sx_table);
+ if (hostdata->sync_stat[cmd->device->id] !=
+ SS_WAITING) {
+
+/* A device has sent an unsolicited SDTR message; rather than go
+ * through the effort of decoding it and then figuring out what
+ * our reply should be, we're just gonna say that we have a
+ * synchronous fifo depth of 0. This will result in asynchronous
+ * transfers - not ideal but so much easier.
+ * Actually, this is OK because it assures us that if we don't
+ * specifically ask for sync transfers, we won't do any.
+ */
+
+ write_wd33c93_cmd(regs, WD_CMD_ASSERT_ATN); /* want MESS_OUT */
+ hostdata->outgoing_msg[0] =
+ EXTENDED_MESSAGE;
+ hostdata->outgoing_msg[1] = 3;
+ hostdata->outgoing_msg[2] =
+ EXTENDED_SDTR;
+ calc_sync_msg(hostdata->
+ default_sx_per, 0,
+ 0, hostdata->outgoing_msg + 3);
+ hostdata->outgoing_len = 5;
+ } else {
+ if (ucp[4]) /* well, sync transfer */
+ id = calc_sync_xfer(ucp[3], ucp[4],
+ hostdata->fast,
+ hostdata->sx_table);
+ else if (ucp[3]) /* very unlikely... */
+ id = calc_sync_xfer(ucp[3], ucp[4],
+ 0, hostdata->sx_table);
+ }
+ hostdata->sync_xfer[cmd->device->id] = id;
+#ifdef SYNC_DEBUG
+ printk(" sync_xfer=%02x\n",
+ hostdata->sync_xfer[cmd->device->id]);
+#endif
+ hostdata->sync_stat[cmd->device->id] =
+ SS_SET;
+ write_wd33c93_cmd(regs,
+ WD_CMD_NEGATE_ACK);
+ hostdata->state = S_CONNECTED;
+ break;
+ case EXTENDED_WDTR:
+ write_wd33c93_cmd(regs, WD_CMD_ASSERT_ATN); /* want MESS_OUT */
+ printk("sending WDTR ");
+ hostdata->outgoing_msg[0] =
+ EXTENDED_MESSAGE;
+ hostdata->outgoing_msg[1] = 2;
+ hostdata->outgoing_msg[2] =
+ EXTENDED_WDTR;
+ hostdata->outgoing_msg[3] = 0; /* 8 bit transfer width */
+ hostdata->outgoing_len = 4;
+ write_wd33c93_cmd(regs,
+ WD_CMD_NEGATE_ACK);
+ hostdata->state = S_CONNECTED;
+ break;
+ default:
+ write_wd33c93_cmd(regs, WD_CMD_ASSERT_ATN); /* want MESS_OUT */
+ printk
+ ("Rejecting Unknown Extended Message(%02x). ",
+ ucp[2]);
+ hostdata->outgoing_msg[0] =
+ MESSAGE_REJECT;
+ hostdata->outgoing_len = 1;
+ write_wd33c93_cmd(regs,
+ WD_CMD_NEGATE_ACK);
+ hostdata->state = S_CONNECTED;
+ break;
+ }
+ hostdata->incoming_ptr = 0;
+ }
+
+ /* We need to read more MESS_IN bytes for the extended message */
+
+ else {
+ hostdata->incoming_ptr++;
+ write_wd33c93_cmd(regs, WD_CMD_NEGATE_ACK);
+ hostdata->state = S_CONNECTED;
+ }
+ break;
+
+ default:
+ printk("Rejecting Unknown Message(%02x) ", msg);
+ write_wd33c93_cmd(regs, WD_CMD_ASSERT_ATN); /* want MESS_OUT */
+ hostdata->outgoing_msg[0] = MESSAGE_REJECT;
+ hostdata->outgoing_len = 1;
+ write_wd33c93_cmd(regs, WD_CMD_NEGATE_ACK);
+ hostdata->state = S_CONNECTED;
+ }
+ spin_unlock_irqrestore(&hostdata->lock, flags);
+ break;
+
+/* Note: this interrupt will occur only after a LEVEL2 command */
+
+ case CSR_SEL_XFER_DONE:
+
+/* Make sure that reselection is enabled at this point - it may
+ * have been turned off for the command that just completed.
+ */
+
+ write_wd33c93(regs, WD_SOURCE_ID, SRCID_ER);
+ if (phs == 0x60) {
+ DB(DB_INTR, printk("SX-DONE"))
+ cmd->SCp.Message = COMMAND_COMPLETE;
+ lun = read_wd33c93(regs, WD_TARGET_LUN);
+ DB(DB_INTR, printk(":%d.%d", cmd->SCp.Status, lun))
+ hostdata->connected = NULL;
+ hostdata->busy[cmd->device->id] &= ~(1 << (cmd->device->lun & 0xff));
+ hostdata->state = S_UNCONNECTED;
+ if (cmd->SCp.Status == ILLEGAL_STATUS_BYTE)
+ cmd->SCp.Status = lun;
+ if (cmd->cmnd[0] == REQUEST_SENSE
+ && cmd->SCp.Status != GOOD)
+ cmd->result =
+ (cmd->
+ result & 0x00ffff) | (DID_ERROR << 16);
+ else
+ cmd->result =
+ cmd->SCp.Status | (cmd->SCp.Message << 8);
+ cmd->scsi_done(cmd);
+
+/* We are no longer connected to a target - check to see if
+ * there are commands waiting to be executed.
+ */
+ spin_unlock_irqrestore(&hostdata->lock, flags);
+ wd33c93_execute(instance);
+ } else {
+ printk
+ ("%02x:%02x:%02x: Unknown SEL_XFER_DONE phase!!---",
+ asr, sr, phs);
+ spin_unlock_irqrestore(&hostdata->lock, flags);
+ }
+ break;
+
+/* Note: this interrupt will occur only after a LEVEL2 command */
+
+ case CSR_SDP:
+ DB(DB_INTR, printk("SDP"))
+ hostdata->state = S_RUNNING_LEVEL2;
+ write_wd33c93(regs, WD_COMMAND_PHASE, 0x41);
+ write_wd33c93_cmd(regs, WD_CMD_SEL_ATN_XFER);
+ spin_unlock_irqrestore(&hostdata->lock, flags);
+ break;
+
+ case CSR_XFER_DONE | PHS_MESS_OUT:
+ case CSR_UNEXP | PHS_MESS_OUT:
+ case CSR_SRV_REQ | PHS_MESS_OUT:
+ DB(DB_INTR, printk("MSG_OUT="))
+
+/* To get here, we've probably requested MESSAGE_OUT and have
+ * already put the correct bytes in outgoing_msg[] and filled
+ * in outgoing_len. We simply send them out to the SCSI bus.
+ * Sometimes we get MESSAGE_OUT phase when we're not expecting
+ * it - like when our SDTR message is rejected by a target. Some
+ * targets send the REJECT before receiving all of the extended
+ * message, and then seem to go back to MESSAGE_OUT for a byte
+ * or two. Not sure why, or if I'm doing something wrong to
+ * cause this to happen. Regardless, it seems that sending
+ * NOP messages in these situations results in no harm and
+ * makes everyone happy.
+ */
+ if (hostdata->outgoing_len == 0) {
+ hostdata->outgoing_len = 1;
+ hostdata->outgoing_msg[0] = NOP;
+ }
+ transfer_pio(regs, hostdata->outgoing_msg,
+ hostdata->outgoing_len, DATA_OUT_DIR, hostdata);
+ DB(DB_INTR, printk("%02x", hostdata->outgoing_msg[0]))
+ hostdata->outgoing_len = 0;
+ hostdata->state = S_CONNECTED;
+ spin_unlock_irqrestore(&hostdata->lock, flags);
+ break;
+
+ case CSR_UNEXP_DISC:
+
+/* I think I've seen this after a request-sense that was in response
+ * to an error condition, but not sure. We certainly need to do
+ * something when we get this interrupt - the question is 'what?'.
+ * Let's think positively, and assume some command has finished
+ * in a legal manner (like a command that provokes a request-sense),
+ * so we treat it as a normal command-complete-disconnect.
+ */
+
+/* Make sure that reselection is enabled at this point - it may
+ * have been turned off for the command that just completed.
+ */
+
+ write_wd33c93(regs, WD_SOURCE_ID, SRCID_ER);
+ if (cmd == NULL) {
+ printk(" - Already disconnected! ");
+ hostdata->state = S_UNCONNECTED;
+ spin_unlock_irqrestore(&hostdata->lock, flags);
+ return;
+ }
+ DB(DB_INTR, printk("UNEXP_DISC"))
+ hostdata->connected = NULL;
+ hostdata->busy[cmd->device->id] &= ~(1 << (cmd->device->lun & 0xff));
+ hostdata->state = S_UNCONNECTED;
+ if (cmd->cmnd[0] == REQUEST_SENSE && cmd->SCp.Status != GOOD)
+ cmd->result =
+ (cmd->result & 0x00ffff) | (DID_ERROR << 16);
+ else
+ cmd->result = cmd->SCp.Status | (cmd->SCp.Message << 8);
+ cmd->scsi_done(cmd);
+
+/* We are no longer connected to a target - check to see if
+ * there are commands waiting to be executed.
+ */
+ /* look above for comments on scsi_done() */
+ spin_unlock_irqrestore(&hostdata->lock, flags);
+ wd33c93_execute(instance);
+ break;
+
+ case CSR_DISC:
+
+/* Make sure that reselection is enabled at this point - it may
+ * have been turned off for the command that just completed.
+ */
+
+ write_wd33c93(regs, WD_SOURCE_ID, SRCID_ER);
+ DB(DB_INTR, printk("DISC"))
+ if (cmd == NULL) {
+ printk(" - Already disconnected! ");
+ hostdata->state = S_UNCONNECTED;
+ }
+ switch (hostdata->state) {
+ case S_PRE_CMP_DISC:
+ hostdata->connected = NULL;
+ hostdata->busy[cmd->device->id] &= ~(1 << (cmd->device->lun & 0xff));
+ hostdata->state = S_UNCONNECTED;
+ DB(DB_INTR, printk(":%d", cmd->SCp.Status))
+ if (cmd->cmnd[0] == REQUEST_SENSE
+ && cmd->SCp.Status != GOOD)
+ cmd->result =
+ (cmd->
+ result & 0x00ffff) | (DID_ERROR << 16);
+ else
+ cmd->result =
+ cmd->SCp.Status | (cmd->SCp.Message << 8);
+ cmd->scsi_done(cmd);
+ break;
+ case S_PRE_TMP_DISC:
+ case S_RUNNING_LEVEL2:
+ cmd->host_scribble = (uchar *) hostdata->disconnected_Q;
+ hostdata->disconnected_Q = cmd;
+ hostdata->connected = NULL;
+ hostdata->state = S_UNCONNECTED;
+
+#ifdef PROC_STATISTICS
+ hostdata->disc_done_cnt[cmd->device->id]++;
+#endif
+
+ break;
+ default:
+ printk("*** Unexpected DISCONNECT interrupt! ***");
+ hostdata->state = S_UNCONNECTED;
+ }
+
+/* We are no longer connected to a target - check to see if
+ * there are commands waiting to be executed.
+ */
+ spin_unlock_irqrestore(&hostdata->lock, flags);
+ wd33c93_execute(instance);
+ break;
+
+ case CSR_RESEL_AM:
+ case CSR_RESEL:
+ DB(DB_INTR, printk("RESEL%s", sr == CSR_RESEL_AM ? "_AM" : ""))
+
+ /* Old chips (pre -A ???) don't have advanced features and will
+ * generate CSR_RESEL. In that case we have to extract the LUN the
+ * hard way (see below).
+ * First we have to make sure this reselection didn't
+ * happen during Arbitration/Selection of some other device.
+ * If yes, put losing command back on top of input_Q.
+ */
+ if (hostdata->level2 <= L2_NONE) {
+
+ if (hostdata->selecting) {
+ cmd = (struct scsi_cmnd *) hostdata->selecting;
+ hostdata->selecting = NULL;
+ hostdata->busy[cmd->device->id] &= ~(1 << (cmd->device->lun & 0xff));
+ cmd->host_scribble =
+ (uchar *) hostdata->input_Q;
+ hostdata->input_Q = cmd;
+ }
+ }
+
+ else {
+
+ if (cmd) {
+ if (phs == 0x00) {
+ hostdata->busy[cmd->device->id] &=
+ ~(1 << (cmd->device->lun & 0xff));
+ cmd->host_scribble =
+ (uchar *) hostdata->input_Q;
+ hostdata->input_Q = cmd;
+ } else {
+ printk
+ ("---%02x:%02x:%02x-TROUBLE: Intrusive ReSelect!---",
+ asr, sr, phs);
+ while (1)
+ printk("\r");
+ }
+ }
+
+ }
+
+ /* OK - find out which device reselected us. */
+
+ id = read_wd33c93(regs, WD_SOURCE_ID);
+ id &= SRCID_MASK;
+
+ /* and extract the lun from the ID message. (Note that we don't
+ * bother to check for a valid message here - I guess this is
+ * not the right way to go, but...)
+ */
+
+ if (sr == CSR_RESEL_AM) {
+ lun = read_wd33c93(regs, WD_DATA);
+ if (hostdata->level2 < L2_RESELECT)
+ write_wd33c93_cmd(regs, WD_CMD_NEGATE_ACK);
+ lun &= 7;
+ } else {
+ /* Old chip; wait for msgin phase to pick up the LUN. */
+ for (lun = 255; lun; lun--) {
+ if ((asr = read_aux_stat(regs)) & ASR_INT)
+ break;
+ udelay(10);
+ }
+ if (!(asr & ASR_INT)) {
+ printk
+ ("wd33c93: Reselected without IDENTIFY\n");
+ lun = 0;
+ } else {
+ /* Verify this is a change to MSG_IN and read the message */
+ sr = read_wd33c93(regs, WD_SCSI_STATUS);
+ udelay(7);
+ if (sr == (CSR_ABORT | PHS_MESS_IN) ||
+ sr == (CSR_UNEXP | PHS_MESS_IN) ||
+ sr == (CSR_SRV_REQ | PHS_MESS_IN)) {
+ /* Got MSG_IN, grab target LUN */
+ lun = read_1_byte(regs);
+ /* Now we expect a 'paused with ACK asserted' int.. */
+ asr = read_aux_stat(regs);
+ if (!(asr & ASR_INT)) {
+ udelay(10);
+ asr = read_aux_stat(regs);
+ if (!(asr & ASR_INT))
+ printk
+ ("wd33c93: No int after LUN on RESEL (%02x)\n",
+ asr);
+ }
+ sr = read_wd33c93(regs, WD_SCSI_STATUS);
+ udelay(7);
+ if (sr != CSR_MSGIN)
+ printk
+ ("wd33c93: Not paused with ACK on RESEL (%02x)\n",
+ sr);
+ lun &= 7;
+ write_wd33c93_cmd(regs,
+ WD_CMD_NEGATE_ACK);
+ } else {
+ printk
+ ("wd33c93: Not MSG_IN on reselect (%02x)\n",
+ sr);
+ lun = 0;
+ }
+ }
+ }
+
+ /* Now we look for the command that's reconnecting. */
+
+ cmd = (struct scsi_cmnd *) hostdata->disconnected_Q;
+ patch = NULL;
+ while (cmd) {
+ if (id == cmd->device->id && lun == (u8)cmd->device->lun)
+ break;
+ patch = cmd;
+ cmd = (struct scsi_cmnd *) cmd->host_scribble;
+ }
+
+ /* Hmm. Couldn't find a valid command.... What to do? */
+
+ if (!cmd) {
+ printk
+ ("---TROUBLE: target %d.%d not in disconnect queue---",
+ id, (u8)lun);
+ spin_unlock_irqrestore(&hostdata->lock, flags);
+ return;
+ }
+
+ /* Ok, found the command - now start it up again. */
+
+ if (patch)
+ patch->host_scribble = cmd->host_scribble;
+ else
+ hostdata->disconnected_Q =
+ (struct scsi_cmnd *) cmd->host_scribble;
+ hostdata->connected = cmd;
+
+ /* We don't need to worry about 'initialize_SCp()' or 'hostdata->busy[]'
+ * because these things are preserved over a disconnect.
+ * But we DO need to fix the DPD bit so it's correct for this command.
+ */
+
+ if (cmd->sc_data_direction == DMA_TO_DEVICE)
+ write_wd33c93(regs, WD_DESTINATION_ID, cmd->device->id);
+ else
+ write_wd33c93(regs, WD_DESTINATION_ID,
+ cmd->device->id | DSTID_DPD);
+ if (hostdata->level2 >= L2_RESELECT) {
+ write_wd33c93_count(regs, 0); /* we want a DATA_PHASE interrupt */
+ write_wd33c93(regs, WD_COMMAND_PHASE, 0x45);
+ write_wd33c93_cmd(regs, WD_CMD_SEL_ATN_XFER);
+ hostdata->state = S_RUNNING_LEVEL2;
+ } else
+ hostdata->state = S_CONNECTED;
+
+ spin_unlock_irqrestore(&hostdata->lock, flags);
+ break;
+
+ default:
+ printk("--UNKNOWN INTERRUPT:%02x:%02x:%02x--", asr, sr, phs);
+ spin_unlock_irqrestore(&hostdata->lock, flags);
+ }
+
+ DB(DB_INTR, printk("} "))
+
+}
+
+static void
+reset_wd33c93(struct Scsi_Host *instance)
+{
+ struct WD33C93_hostdata *hostdata =
+ (struct WD33C93_hostdata *) instance->hostdata;
+ const wd33c93_regs regs = hostdata->regs;
+ uchar sr;
+
+#ifdef CONFIG_SGI_IP22
+ {
+ int busycount = 0;
+ extern void sgiwd93_reset(unsigned long);
+ /* wait 'til the chip gets some time for us */
+ while ((read_aux_stat(regs) & ASR_BSY) && busycount++ < 100)
+ udelay (10);
+ /*
+ * there are scsi devices out there, which manage to lock up
+ * the wd33c93 in a busy condition. In this state it won't
+ * accept the reset command. The only way to solve this is to
+ * give the chip a hardware reset (if possible). The code below
+ * does this for the SGI Indy, where this is possible
+ */
+ /* still busy ? */
+ if (read_aux_stat(regs) & ASR_BSY)
+ sgiwd93_reset(instance->base); /* yeah, give it the hard one */
+ }
+#endif
+
+ write_wd33c93(regs, WD_OWN_ID, OWNID_EAF | OWNID_RAF |
+ instance->this_id | hostdata->clock_freq);
+ write_wd33c93(regs, WD_CONTROL, CTRL_IDI | CTRL_EDI | CTRL_POLLED);
+ write_wd33c93(regs, WD_SYNCHRONOUS_TRANSFER,
+ calc_sync_xfer(hostdata->default_sx_per / 4,
+ DEFAULT_SX_OFF, 0, hostdata->sx_table));
+ write_wd33c93(regs, WD_COMMAND, WD_CMD_RESET);
+
+
+#ifdef CONFIG_MVME147_SCSI
+ udelay(25); /* The old wd33c93 on MVME147 needs this, at least */
+#endif
+
+ while (!(read_aux_stat(regs) & ASR_INT))
+ ;
+ sr = read_wd33c93(regs, WD_SCSI_STATUS);
+
+ hostdata->microcode = read_wd33c93(regs, WD_CDB_1);
+ if (sr == 0x00)
+ hostdata->chip = C_WD33C93;
+ else if (sr == 0x01) {
+ write_wd33c93(regs, WD_QUEUE_TAG, 0xa5); /* any random number */
+ sr = read_wd33c93(regs, WD_QUEUE_TAG);
+ if (sr == 0xa5) {
+ hostdata->chip = C_WD33C93B;
+ write_wd33c93(regs, WD_QUEUE_TAG, 0);
+ } else
+ hostdata->chip = C_WD33C93A;
+ } else
+ hostdata->chip = C_UNKNOWN_CHIP;
+
+ if (hostdata->chip != C_WD33C93B) /* Fast SCSI unavailable */
+ hostdata->fast = 0;
+
+ write_wd33c93(regs, WD_TIMEOUT_PERIOD, TIMEOUT_PERIOD_VALUE);
+ write_wd33c93(regs, WD_CONTROL, CTRL_IDI | CTRL_EDI | CTRL_POLLED);
+}
+
+int
+wd33c93_host_reset(struct scsi_cmnd * SCpnt)
+{
+ struct Scsi_Host *instance;
+ struct WD33C93_hostdata *hostdata;
+ int i;
+
+ instance = SCpnt->device->host;
+ hostdata = (struct WD33C93_hostdata *) instance->hostdata;
+
+ printk("scsi%d: reset. ", instance->host_no);
+ disable_irq(instance->irq);
+
+ hostdata->dma_stop(instance, NULL, 0);
+ for (i = 0; i < 8; i++) {
+ hostdata->busy[i] = 0;
+ hostdata->sync_xfer[i] =
+ calc_sync_xfer(DEFAULT_SX_PER / 4, DEFAULT_SX_OFF,
+ 0, hostdata->sx_table);
+ hostdata->sync_stat[i] = SS_UNSET; /* using default sync values */
+ }
+ hostdata->input_Q = NULL;
+ hostdata->selecting = NULL;
+ hostdata->connected = NULL;
+ hostdata->disconnected_Q = NULL;
+ hostdata->state = S_UNCONNECTED;
+ hostdata->dma = D_DMA_OFF;
+ hostdata->incoming_ptr = 0;
+ hostdata->outgoing_len = 0;
+
+ reset_wd33c93(instance);
+ SCpnt->result = DID_RESET << 16;
+ enable_irq(instance->irq);
+ return SUCCESS;
+}
+
+int
+wd33c93_abort(struct scsi_cmnd * cmd)
+{
+ struct Scsi_Host *instance;
+ struct WD33C93_hostdata *hostdata;
+ wd33c93_regs regs;
+ struct scsi_cmnd *tmp, *prev;
+
+ disable_irq(cmd->device->host->irq);
+
+ instance = cmd->device->host;
+ hostdata = (struct WD33C93_hostdata *) instance->hostdata;
+ regs = hostdata->regs;
+
+/*
+ * Case 1 : If the command hasn't been issued yet, we simply remove it
+ * from the input_Q.
+ */
+
+ tmp = (struct scsi_cmnd *) hostdata->input_Q;
+ prev = NULL;
+ while (tmp) {
+ if (tmp == cmd) {
+ if (prev)
+ prev->host_scribble = cmd->host_scribble;
+ else
+ hostdata->input_Q =
+ (struct scsi_cmnd *) cmd->host_scribble;
+ cmd->host_scribble = NULL;
+ cmd->result = DID_ABORT << 16;
+ printk
+ ("scsi%d: Abort - removing command from input_Q. ",
+ instance->host_no);
+ enable_irq(cmd->device->host->irq);
+ cmd->scsi_done(cmd);
+ return SUCCESS;
+ }
+ prev = tmp;
+ tmp = (struct scsi_cmnd *) tmp->host_scribble;
+ }
+
+/*
+ * Case 2 : If the command is connected, we're going to fail the abort
+ * and let the high level SCSI driver retry at a later time or
+ * issue a reset.
+ *
+ * Timeouts, and therefore aborted commands, will be highly unlikely
+ * and handling them cleanly in this situation would make the common
+ * case of noresets less efficient, and would pollute our code. So,
+ * we fail.
+ */
+
+ if (hostdata->connected == cmd) {
+ uchar sr, asr;
+ unsigned long timeout;
+
+ printk("scsi%d: Aborting connected command - ",
+ instance->host_no);
+
+ printk("stopping DMA - ");
+ if (hostdata->dma == D_DMA_RUNNING) {
+ hostdata->dma_stop(instance, cmd, 0);
+ hostdata->dma = D_DMA_OFF;
+ }
+
+ printk("sending wd33c93 ABORT command - ");
+ write_wd33c93(regs, WD_CONTROL,
+ CTRL_IDI | CTRL_EDI | CTRL_POLLED);
+ write_wd33c93_cmd(regs, WD_CMD_ABORT);
+
+/* Now we have to attempt to flush out the FIFO... */
+
+ printk("flushing fifo - ");
+ timeout = 1000000;
+ do {
+ asr = read_aux_stat(regs);
+ if (asr & ASR_DBR)
+ read_wd33c93(regs, WD_DATA);
+ } while (!(asr & ASR_INT) && timeout-- > 0);
+ sr = read_wd33c93(regs, WD_SCSI_STATUS);
+ printk
+ ("asr=%02x, sr=%02x, %ld bytes un-transferred (timeout=%ld) - ",
+ asr, sr, read_wd33c93_count(regs), timeout);
+
+ /*
+ * Abort command processed.
+ * Still connected.
+ * We must disconnect.
+ */
+
+ printk("sending wd33c93 DISCONNECT command - ");
+ write_wd33c93_cmd(regs, WD_CMD_DISCONNECT);
+
+ timeout = 1000000;
+ asr = read_aux_stat(regs);
+ while ((asr & ASR_CIP) && timeout-- > 0)
+ asr = read_aux_stat(regs);
+ sr = read_wd33c93(regs, WD_SCSI_STATUS);
+ printk("asr=%02x, sr=%02x.", asr, sr);
+
+ hostdata->busy[cmd->device->id] &= ~(1 << (cmd->device->lun & 0xff));
+ hostdata->connected = NULL;
+ hostdata->state = S_UNCONNECTED;
+ cmd->result = DID_ABORT << 16;
+
+/* sti();*/
+ wd33c93_execute(instance);
+
+ enable_irq(cmd->device->host->irq);
+ cmd->scsi_done(cmd);
+ return SUCCESS;
+ }
+
+/*
+ * Case 3: If the command is currently disconnected from the bus,
+ * we're not going to expend much effort here: Let's just return
+ * an ABORT_SNOOZE and hope for the best...
+ */
+
+ tmp = (struct scsi_cmnd *) hostdata->disconnected_Q;
+ while (tmp) {
+ if (tmp == cmd) {
+ printk
+ ("scsi%d: Abort - command found on disconnected_Q - ",
+ instance->host_no);
+ printk("Abort SNOOZE. ");
+ enable_irq(cmd->device->host->irq);
+ return FAILED;
+ }
+ tmp = (struct scsi_cmnd *) tmp->host_scribble;
+ }
+
+/*
+ * Case 4 : If we reached this point, the command was not found in any of
+ * the queues.
+ *
+ * We probably reached this point because of an unlikely race condition
+ * between the command completing successfully and the abortion code,
+ * so we won't panic, but we will notify the user in case something really
+ * broke.
+ */
+
+/* sti();*/
+ wd33c93_execute(instance);
+
+ enable_irq(cmd->device->host->irq);
+ printk("scsi%d: warning : SCSI command probably completed successfully"
+ " before abortion. ", instance->host_no);
+ return FAILED;
+}
+
+#define MAX_WD33C93_HOSTS 4
+#define MAX_SETUP_ARGS ARRAY_SIZE(setup_args)
+#define SETUP_BUFFER_SIZE 200
+static char setup_buffer[SETUP_BUFFER_SIZE];
+static char setup_used[MAX_SETUP_ARGS];
+static int done_setup = 0;
+
+static int
+wd33c93_setup(char *str)
+{
+ int i;
+ char *p1, *p2;
+
+ /* The kernel does some processing of the command-line before calling
+ * this function: If it begins with any decimal or hex number arguments,
+ * ints[0] = how many numbers found and ints[1] through [n] are the values
+ * themselves. str points to where the non-numeric arguments (if any)
+ * start: We do our own parsing of those. We construct synthetic 'nosync'
+ * keywords out of numeric args (to maintain compatibility with older
+ * versions) and then add the rest of the arguments.
+ */
+
+ p1 = setup_buffer;
+ *p1 = '\0';
+ if (str)
+ strncpy(p1, str, SETUP_BUFFER_SIZE - strlen(setup_buffer));
+ setup_buffer[SETUP_BUFFER_SIZE - 1] = '\0';
+ p1 = setup_buffer;
+ i = 0;
+ while (*p1 && (i < MAX_SETUP_ARGS)) {
+ p2 = strchr(p1, ',');
+ if (p2) {
+ *p2 = '\0';
+ if (p1 != p2)
+ setup_args[i] = p1;
+ p1 = p2 + 1;
+ i++;
+ } else {
+ setup_args[i] = p1;
+ break;
+ }
+ }
+ for (i = 0; i < MAX_SETUP_ARGS; i++)
+ setup_used[i] = 0;
+ done_setup = 1;
+
+ return 1;
+}
+__setup("wd33c93=", wd33c93_setup);
+
+/* check_setup_args() returns index if key found, 0 if not
+ */
+static int
+check_setup_args(char *key, int *flags, int *val, char *buf)
+{
+ int x;
+ char *cp;
+
+ for (x = 0; x < MAX_SETUP_ARGS; x++) {
+ if (setup_used[x])
+ continue;
+ if (!strncmp(setup_args[x], key, strlen(key)))
+ break;
+ if (!strncmp(setup_args[x], "next", strlen("next")))
+ return 0;
+ }
+ if (x == MAX_SETUP_ARGS)
+ return 0;
+ setup_used[x] = 1;
+ cp = setup_args[x] + strlen(key);
+ *val = -1;
+ if (*cp != ':')
+ return ++x;
+ cp++;
+ if ((*cp >= '0') && (*cp <= '9')) {
+ *val = simple_strtoul(cp, NULL, 0);
+ }
+ return ++x;
+}
+
+/*
+ * Calculate internal data-transfer-clock cycle from input-clock
+ * frequency (/MHz) and fill 'sx_table'.
+ *
+ * The original driver used to rely on a fixed sx_table, containing periods
+ * for (only) the lower limits of the respective input-clock-frequency ranges
+ * (8-10/12-15/16-20 MHz). Although it seems, that no problems occurred with
+ * this setting so far, it might be desirable to adjust the transfer periods
+ * closer to the really attached, possibly 25% higher, input-clock, since
+ * - the wd33c93 may really use a significant shorter period, than it has
+ * negotiated (eg. thrashing the target, which expects 4/8MHz, with 5/10MHz
+ * instead).
+ * - the wd33c93 may ask the target for a lower transfer rate, than the target
+ * is capable of (eg. negotiating for an assumed minimum of 252ns instead of
+ * possible 200ns, which indeed shows up in tests as an approx. 10% lower
+ * transfer rate).
+ */
+static inline unsigned int
+round_4(unsigned int x)
+{
+ switch (x & 3) {
+ case 1: --x;
+ break;
+ case 2: ++x;
+ case 3: ++x;
+ }
+ return x;
+}
+
+static void
+calc_sx_table(unsigned int mhz, struct sx_period sx_table[9])
+{
+ unsigned int d, i;
+ if (mhz < 11)
+ d = 2; /* divisor for 8-10 MHz input-clock */
+ else if (mhz < 16)
+ d = 3; /* divisor for 12-15 MHz input-clock */
+ else
+ d = 4; /* divisor for 16-20 MHz input-clock */
+
+ d = (100000 * d) / 2 / mhz; /* 100 x DTCC / nanosec */
+
+ sx_table[0].period_ns = 1;
+ sx_table[0].reg_value = 0x20;
+ for (i = 1; i < 8; i++) {
+ sx_table[i].period_ns = round_4((i+1)*d / 100);
+ sx_table[i].reg_value = (i+1)*0x10;
+ }
+ sx_table[7].reg_value = 0;
+ sx_table[8].period_ns = 0;
+ sx_table[8].reg_value = 0;
+}
+
+/*
+ * check and, maybe, map an init- or "clock:"- argument.
+ */
+static uchar
+set_clk_freq(int freq, int *mhz)
+{
+ int x = freq;
+ if (WD33C93_FS_8_10 == freq)
+ freq = 8;
+ else if (WD33C93_FS_12_15 == freq)
+ freq = 12;
+ else if (WD33C93_FS_16_20 == freq)
+ freq = 16;
+ else if (freq > 7 && freq < 11)
+ x = WD33C93_FS_8_10;
+ else if (freq > 11 && freq < 16)
+ x = WD33C93_FS_12_15;
+ else if (freq > 15 && freq < 21)
+ x = WD33C93_FS_16_20;
+ else {
+ /* Hmm, wouldn't it be safer to assume highest freq here? */
+ x = WD33C93_FS_8_10;
+ freq = 8;
+ }
+ *mhz = freq;
+ return x;
+}
+
+/*
+ * to be used with the resync: fast: ... options
+ */
+static inline void set_resync ( struct WD33C93_hostdata *hd, int mask )
+{
+ int i;
+ for (i = 0; i < 8; i++)
+ if (mask & (1 << i))
+ hd->sync_stat[i] = SS_UNSET;
+}
+
+void
+wd33c93_init(struct Scsi_Host *instance, const wd33c93_regs regs,
+ dma_setup_t setup, dma_stop_t stop, int clock_freq)
+{
+ struct WD33C93_hostdata *hostdata;
+ int i;
+ int flags;
+ int val;
+ char buf[32];
+
+ if (!done_setup && setup_strings)
+ wd33c93_setup(setup_strings);
+
+ hostdata = (struct WD33C93_hostdata *) instance->hostdata;
+
+ hostdata->regs = regs;
+ hostdata->clock_freq = set_clk_freq(clock_freq, &i);
+ calc_sx_table(i, hostdata->sx_table);
+ hostdata->dma_setup = setup;
+ hostdata->dma_stop = stop;
+ hostdata->dma_bounce_buffer = NULL;
+ hostdata->dma_bounce_len = 0;
+ for (i = 0; i < 8; i++) {
+ hostdata->busy[i] = 0;
+ hostdata->sync_xfer[i] =
+ calc_sync_xfer(DEFAULT_SX_PER / 4, DEFAULT_SX_OFF,
+ 0, hostdata->sx_table);
+ hostdata->sync_stat[i] = SS_UNSET; /* using default sync values */
+#ifdef PROC_STATISTICS
+ hostdata->cmd_cnt[i] = 0;
+ hostdata->disc_allowed_cnt[i] = 0;
+ hostdata->disc_done_cnt[i] = 0;
+#endif
+ }
+ hostdata->input_Q = NULL;
+ hostdata->selecting = NULL;
+ hostdata->connected = NULL;
+ hostdata->disconnected_Q = NULL;
+ hostdata->state = S_UNCONNECTED;
+ hostdata->dma = D_DMA_OFF;
+ hostdata->level2 = L2_BASIC;
+ hostdata->disconnect = DIS_ADAPTIVE;
+ hostdata->args = DEBUG_DEFAULTS;
+ hostdata->incoming_ptr = 0;
+ hostdata->outgoing_len = 0;
+ hostdata->default_sx_per = DEFAULT_SX_PER;
+ hostdata->no_dma = 0; /* default is DMA enabled */
+
+#ifdef PROC_INTERFACE
+ hostdata->proc = PR_VERSION | PR_INFO | PR_STATISTICS |
+ PR_CONNECTED | PR_INPUTQ | PR_DISCQ | PR_STOP;
+#ifdef PROC_STATISTICS
+ hostdata->dma_cnt = 0;
+ hostdata->pio_cnt = 0;
+ hostdata->int_cnt = 0;
+#endif
+#endif
+
+ if (check_setup_args("clock", &flags, &val, buf)) {
+ hostdata->clock_freq = set_clk_freq(val, &val);
+ calc_sx_table(val, hostdata->sx_table);
+ }
+
+ if (check_setup_args("nosync", &flags, &val, buf))
+ hostdata->no_sync = val;
+
+ if (check_setup_args("nodma", &flags, &val, buf))
+ hostdata->no_dma = (val == -1) ? 1 : val;
+
+ if (check_setup_args("period", &flags, &val, buf))
+ hostdata->default_sx_per =
+ hostdata->sx_table[round_period((unsigned int) val,
+ hostdata->sx_table)].period_ns;
+
+ if (check_setup_args("disconnect", &flags, &val, buf)) {
+ if ((val >= DIS_NEVER) && (val <= DIS_ALWAYS))
+ hostdata->disconnect = val;
+ else
+ hostdata->disconnect = DIS_ADAPTIVE;
+ }
+
+ if (check_setup_args("level2", &flags, &val, buf))
+ hostdata->level2 = val;
+
+ if (check_setup_args("debug", &flags, &val, buf))
+ hostdata->args = val & DB_MASK;
+
+ if (check_setup_args("burst", &flags, &val, buf))
+ hostdata->dma_mode = val ? CTRL_BURST:CTRL_DMA;
+
+ if (WD33C93_FS_16_20 == hostdata->clock_freq /* divisor 4 */
+ && check_setup_args("fast", &flags, &val, buf))
+ hostdata->fast = !!val;
+
+ if ((i = check_setup_args("next", &flags, &val, buf))) {
+ while (i)
+ setup_used[--i] = 1;
+ }
+#ifdef PROC_INTERFACE
+ if (check_setup_args("proc", &flags, &val, buf))
+ hostdata->proc = val;
+#endif
+
+ spin_lock_irq(&hostdata->lock);
+ reset_wd33c93(instance);
+ spin_unlock_irq(&hostdata->lock);
+
+ printk("wd33c93-%d: chip=%s/%d no_sync=0x%x no_dma=%d",
+ instance->host_no,
+ (hostdata->chip == C_WD33C93) ? "WD33c93" : (hostdata->chip ==
+ C_WD33C93A) ?
+ "WD33c93A" : (hostdata->chip ==
+ C_WD33C93B) ? "WD33c93B" : "unknown",
+ hostdata->microcode, hostdata->no_sync, hostdata->no_dma);
+#ifdef DEBUGGING_ON
+ printk(" debug_flags=0x%02x\n", hostdata->args);
+#else
+ printk(" debugging=OFF\n");
+#endif
+ printk(" setup_args=");
+ for (i = 0; i < MAX_SETUP_ARGS; i++)
+ printk("%s,", setup_args[i]);
+ printk("\n");
+ printk(" Version %s - %s\n", WD33C93_VERSION, WD33C93_DATE);
+}
+
+int wd33c93_write_info(struct Scsi_Host *instance, char *buf, int len)
+{
+#ifdef PROC_INTERFACE
+ char *bp;
+ struct WD33C93_hostdata *hd;
+ int x;
+
+ hd = (struct WD33C93_hostdata *) instance->hostdata;
+
+/* We accept the following
+ * keywords (same format as command-line, but arguments are not optional):
+ * debug
+ * disconnect
+ * period
+ * resync
+ * proc
+ * nodma
+ * level2
+ * burst
+ * fast
+ * nosync
+ */
+
+ buf[len] = '\0';
+ for (bp = buf; *bp; ) {
+ while (',' == *bp || ' ' == *bp)
+ ++bp;
+ if (!strncmp(bp, "debug:", 6)) {
+ hd->args = simple_strtoul(bp+6, &bp, 0) & DB_MASK;
+ } else if (!strncmp(bp, "disconnect:", 11)) {
+ x = simple_strtoul(bp+11, &bp, 0);
+ if (x < DIS_NEVER || x > DIS_ALWAYS)
+ x = DIS_ADAPTIVE;
+ hd->disconnect = x;
+ } else if (!strncmp(bp, "period:", 7)) {
+ x = simple_strtoul(bp+7, &bp, 0);
+ hd->default_sx_per =
+ hd->sx_table[round_period((unsigned int) x,
+ hd->sx_table)].period_ns;
+ } else if (!strncmp(bp, "resync:", 7)) {
+ set_resync(hd, (int)simple_strtoul(bp+7, &bp, 0));
+ } else if (!strncmp(bp, "proc:", 5)) {
+ hd->proc = simple_strtoul(bp+5, &bp, 0);
+ } else if (!strncmp(bp, "nodma:", 6)) {
+ hd->no_dma = simple_strtoul(bp+6, &bp, 0);
+ } else if (!strncmp(bp, "level2:", 7)) {
+ hd->level2 = simple_strtoul(bp+7, &bp, 0);
+ } else if (!strncmp(bp, "burst:", 6)) {
+ hd->dma_mode =
+ simple_strtol(bp+6, &bp, 0) ? CTRL_BURST:CTRL_DMA;
+ } else if (!strncmp(bp, "fast:", 5)) {
+ x = !!simple_strtol(bp+5, &bp, 0);
+ if (x != hd->fast)
+ set_resync(hd, 0xff);
+ hd->fast = x;
+ } else if (!strncmp(bp, "nosync:", 7)) {
+ x = simple_strtoul(bp+7, &bp, 0);
+ set_resync(hd, x ^ hd->no_sync);
+ hd->no_sync = x;
+ } else {
+ break; /* unknown keyword,syntax-error,... */
+ }
+ }
+ return len;
+#else
+ return 0;
+#endif
+}
+
+int
+wd33c93_show_info(struct seq_file *m, struct Scsi_Host *instance)
+{
+#ifdef PROC_INTERFACE
+ struct WD33C93_hostdata *hd;
+ struct scsi_cmnd *cmd;
+ int x;
+
+ hd = (struct WD33C93_hostdata *) instance->hostdata;
+
+ spin_lock_irq(&hd->lock);
+ if (hd->proc & PR_VERSION)
+ seq_printf(m, "\nVersion %s - %s.",
+ WD33C93_VERSION, WD33C93_DATE);
+
+ if (hd->proc & PR_INFO) {
+ seq_printf(m, "\nclock_freq=%02x no_sync=%02x no_dma=%d"
+ " dma_mode=%02x fast=%d",
+ hd->clock_freq, hd->no_sync, hd->no_dma, hd->dma_mode, hd->fast);
+ seq_puts(m, "\nsync_xfer[] = ");
+ for (x = 0; x < 7; x++)
+ seq_printf(m, "\t%02x", hd->sync_xfer[x]);
+ seq_puts(m, "\nsync_stat[] = ");
+ for (x = 0; x < 7; x++)
+ seq_printf(m, "\t%02x", hd->sync_stat[x]);
+ }
+#ifdef PROC_STATISTICS
+ if (hd->proc & PR_STATISTICS) {
+ seq_puts(m, "\ncommands issued: ");
+ for (x = 0; x < 7; x++)
+ seq_printf(m, "\t%ld", hd->cmd_cnt[x]);
+ seq_puts(m, "\ndisconnects allowed:");
+ for (x = 0; x < 7; x++)
+ seq_printf(m, "\t%ld", hd->disc_allowed_cnt[x]);
+ seq_puts(m, "\ndisconnects done: ");
+ for (x = 0; x < 7; x++)
+ seq_printf(m, "\t%ld", hd->disc_done_cnt[x]);
+ seq_printf(m,
+ "\ninterrupts: %ld, DATA_PHASE ints: %ld DMA, %ld PIO",
+ hd->int_cnt, hd->dma_cnt, hd->pio_cnt);
+ }
+#endif
+ if (hd->proc & PR_CONNECTED) {
+ seq_puts(m, "\nconnected: ");
+ if (hd->connected) {
+ cmd = (struct scsi_cmnd *) hd->connected;
+ seq_printf(m, " %d:%llu(%02x)",
+ cmd->device->id, cmd->device->lun, cmd->cmnd[0]);
+ }
+ }
+ if (hd->proc & PR_INPUTQ) {
+ seq_puts(m, "\ninput_Q: ");
+ cmd = (struct scsi_cmnd *) hd->input_Q;
+ while (cmd) {
+ seq_printf(m, " %d:%llu(%02x)",
+ cmd->device->id, cmd->device->lun, cmd->cmnd[0]);
+ cmd = (struct scsi_cmnd *) cmd->host_scribble;
+ }
+ }
+ if (hd->proc & PR_DISCQ) {
+ seq_puts(m, "\ndisconnected_Q:");
+ cmd = (struct scsi_cmnd *) hd->disconnected_Q;
+ while (cmd) {
+ seq_printf(m, " %d:%llu(%02x)",
+ cmd->device->id, cmd->device->lun, cmd->cmnd[0]);
+ cmd = (struct scsi_cmnd *) cmd->host_scribble;
+ }
+ }
+ seq_putc(m, '\n');
+ spin_unlock_irq(&hd->lock);
+#endif /* PROC_INTERFACE */
+ return 0;
+}
+
+EXPORT_SYMBOL(wd33c93_host_reset);
+EXPORT_SYMBOL(wd33c93_init);
+EXPORT_SYMBOL(wd33c93_abort);
+EXPORT_SYMBOL(wd33c93_queuecommand);
+EXPORT_SYMBOL(wd33c93_intr);
+EXPORT_SYMBOL(wd33c93_show_info);
+EXPORT_SYMBOL(wd33c93_write_info);
diff --git a/drivers/scsi/wd33c93.h b/drivers/scsi/wd33c93.h
new file mode 100644
index 000000000..08abe508e
--- /dev/null
+++ b/drivers/scsi/wd33c93.h
@@ -0,0 +1,352 @@
+/*
+ * wd33c93.h - Linux device driver definitions for the
+ * Commodore Amiga A2091/590 SCSI controller card
+ *
+ * IMPORTANT: This file is for version 1.25 - 09/Jul/1997
+ *
+ * Copyright (c) 1996 John Shifflett, GeoLog Consulting
+ * john@geolog.com
+ * jshiffle@netcom.com
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2, or (at your option)
+ * any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+#ifndef WD33C93_H
+#define WD33C93_H
+
+
+#define PROC_INTERFACE /* add code for /proc/scsi/wd33c93/xxx interface */
+#ifdef PROC_INTERFACE
+#define PROC_STATISTICS /* add code for keeping various real time stats */
+#endif
+
+#define SYNC_DEBUG /* extra info on sync negotiation printed */
+#define DEBUGGING_ON /* enable command-line debugging bitmask */
+#define DEBUG_DEFAULTS 0 /* default debugging bitmask */
+
+
+#ifdef DEBUGGING_ON
+#define DB(f,a) if (hostdata->args & (f)) a;
+#else
+#define DB(f,a)
+#endif
+
+#define uchar unsigned char
+
+
+/* wd register names */
+#define WD_OWN_ID 0x00
+#define WD_CONTROL 0x01
+#define WD_TIMEOUT_PERIOD 0x02
+#define WD_CDB_1 0x03
+#define WD_CDB_2 0x04
+#define WD_CDB_3 0x05
+#define WD_CDB_4 0x06
+#define WD_CDB_5 0x07
+#define WD_CDB_6 0x08
+#define WD_CDB_7 0x09
+#define WD_CDB_8 0x0a
+#define WD_CDB_9 0x0b
+#define WD_CDB_10 0x0c
+#define WD_CDB_11 0x0d
+#define WD_CDB_12 0x0e
+#define WD_TARGET_LUN 0x0f
+#define WD_COMMAND_PHASE 0x10
+#define WD_SYNCHRONOUS_TRANSFER 0x11
+#define WD_TRANSFER_COUNT_MSB 0x12
+#define WD_TRANSFER_COUNT 0x13
+#define WD_TRANSFER_COUNT_LSB 0x14
+#define WD_DESTINATION_ID 0x15
+#define WD_SOURCE_ID 0x16
+#define WD_SCSI_STATUS 0x17
+#define WD_COMMAND 0x18
+#define WD_DATA 0x19
+#define WD_QUEUE_TAG 0x1a
+#define WD_AUXILIARY_STATUS 0x1f
+
+/* WD commands */
+#define WD_CMD_RESET 0x00
+#define WD_CMD_ABORT 0x01
+#define WD_CMD_ASSERT_ATN 0x02
+#define WD_CMD_NEGATE_ACK 0x03
+#define WD_CMD_DISCONNECT 0x04
+#define WD_CMD_RESELECT 0x05
+#define WD_CMD_SEL_ATN 0x06
+#define WD_CMD_SEL 0x07
+#define WD_CMD_SEL_ATN_XFER 0x08
+#define WD_CMD_SEL_XFER 0x09
+#define WD_CMD_RESEL_RECEIVE 0x0a
+#define WD_CMD_RESEL_SEND 0x0b
+#define WD_CMD_WAIT_SEL_RECEIVE 0x0c
+#define WD_CMD_TRANS_ADDR 0x18
+#define WD_CMD_TRANS_INFO 0x20
+#define WD_CMD_TRANSFER_PAD 0x21
+#define WD_CMD_SBT_MODE 0x80
+
+/* ASR register */
+#define ASR_INT (0x80)
+#define ASR_LCI (0x40)
+#define ASR_BSY (0x20)
+#define ASR_CIP (0x10)
+#define ASR_PE (0x02)
+#define ASR_DBR (0x01)
+
+/* SCSI Bus Phases */
+#define PHS_DATA_OUT 0x00
+#define PHS_DATA_IN 0x01
+#define PHS_COMMAND 0x02
+#define PHS_STATUS 0x03
+#define PHS_MESS_OUT 0x06
+#define PHS_MESS_IN 0x07
+
+/* Command Status Register definitions */
+
+ /* reset state interrupts */
+#define CSR_RESET 0x00
+#define CSR_RESET_AF 0x01
+
+ /* successful completion interrupts */
+#define CSR_RESELECT 0x10
+#define CSR_SELECT 0x11
+#define CSR_SEL_XFER_DONE 0x16
+#define CSR_XFER_DONE 0x18
+
+ /* paused or aborted interrupts */
+#define CSR_MSGIN 0x20
+#define CSR_SDP 0x21
+#define CSR_SEL_ABORT 0x22
+#define CSR_RESEL_ABORT 0x25
+#define CSR_RESEL_ABORT_AM 0x27
+#define CSR_ABORT 0x28
+
+ /* terminated interrupts */
+#define CSR_INVALID 0x40
+#define CSR_UNEXP_DISC 0x41
+#define CSR_TIMEOUT 0x42
+#define CSR_PARITY 0x43
+#define CSR_PARITY_ATN 0x44
+#define CSR_BAD_STATUS 0x45
+#define CSR_UNEXP 0x48
+
+ /* service required interrupts */
+#define CSR_RESEL 0x80
+#define CSR_RESEL_AM 0x81
+#define CSR_DISC 0x85
+#define CSR_SRV_REQ 0x88
+
+ /* Own ID/CDB Size register */
+#define OWNID_EAF 0x08
+#define OWNID_EHP 0x10
+#define OWNID_RAF 0x20
+#define OWNID_FS_8 0x00
+#define OWNID_FS_12 0x40
+#define OWNID_FS_16 0x80
+
+ /* define these so we don't have to change a2091.c, etc. */
+#define WD33C93_FS_8_10 OWNID_FS_8
+#define WD33C93_FS_12_15 OWNID_FS_12
+#define WD33C93_FS_16_20 OWNID_FS_16
+
+ /* pass input-clock explicitly. accepted mhz values are 8-10,12-20 */
+#define WD33C93_FS_MHZ(mhz) (mhz)
+
+ /* Control register */
+#define CTRL_HSP 0x01
+#define CTRL_HA 0x02
+#define CTRL_IDI 0x04
+#define CTRL_EDI 0x08
+#define CTRL_HHP 0x10
+#define CTRL_POLLED 0x00
+#define CTRL_BURST 0x20
+#define CTRL_BUS 0x40
+#define CTRL_DMA 0x80
+
+ /* Timeout Period register */
+#define TIMEOUT_PERIOD_VALUE 20 /* 20 = 200 ms */
+
+ /* Synchronous Transfer Register */
+#define STR_FSS 0x80
+
+ /* Destination ID register */
+#define DSTID_DPD 0x40
+#define DATA_OUT_DIR 0
+#define DATA_IN_DIR 1
+#define DSTID_SCC 0x80
+
+ /* Source ID register */
+#define SRCID_MASK 0x07
+#define SRCID_SIV 0x08
+#define SRCID_DSP 0x20
+#define SRCID_ES 0x40
+#define SRCID_ER 0x80
+
+ /* This is what the 3393 chip looks like to us */
+typedef struct {
+#ifdef CONFIG_WD33C93_PIO
+ unsigned int SASR;
+ unsigned int SCMD;
+#else
+ volatile unsigned char *SASR;
+ volatile unsigned char *SCMD;
+#endif
+} wd33c93_regs;
+
+
+typedef int (*dma_setup_t) (struct scsi_cmnd *SCpnt, int dir_in);
+typedef void (*dma_stop_t) (struct Scsi_Host *instance,
+ struct scsi_cmnd *SCpnt, int status);
+
+
+#define ILLEGAL_STATUS_BYTE 0xff
+
+#define DEFAULT_SX_PER 376 /* (ns) fairly safe */
+#define DEFAULT_SX_OFF 0 /* aka async */
+
+#define OPTIMUM_SX_PER 252 /* (ns) best we can do (mult-of-4) */
+#define OPTIMUM_SX_OFF 12 /* size of wd3393 fifo */
+
+struct sx_period {
+ unsigned int period_ns;
+ uchar reg_value;
+ };
+
+/* FEF: defines for hostdata->dma_buffer_pool */
+
+#define BUF_CHIP_ALLOCED 0
+#define BUF_SCSI_ALLOCED 1
+
+struct WD33C93_hostdata {
+ struct Scsi_Host *next;
+ wd33c93_regs regs;
+ spinlock_t lock;
+ uchar clock_freq;
+ uchar chip; /* what kind of wd33c93? */
+ uchar microcode; /* microcode rev */
+ uchar dma_buffer_pool; /* FEF: buffer from chip_ram? */
+ int dma_dir; /* data transfer dir. */
+ dma_setup_t dma_setup;
+ dma_stop_t dma_stop;
+ unsigned int dma_xfer_mask;
+ uchar *dma_bounce_buffer;
+ unsigned int dma_bounce_len;
+ volatile uchar busy[8]; /* index = target, bit = lun */
+ volatile struct scsi_cmnd *input_Q; /* commands waiting to be started */
+ volatile struct scsi_cmnd *selecting; /* trying to select this command */
+ volatile struct scsi_cmnd *connected; /* currently connected command */
+ volatile struct scsi_cmnd *disconnected_Q;/* commands waiting for reconnect */
+ uchar state; /* what we are currently doing */
+ uchar dma; /* current state of DMA (on/off) */
+ uchar level2; /* extent to which Level-2 commands are used */
+ uchar disconnect; /* disconnect/reselect policy */
+ unsigned int args; /* set from command-line argument */
+ uchar incoming_msg[8]; /* filled during message_in phase */
+ int incoming_ptr; /* mainly used with EXTENDED messages */
+ uchar outgoing_msg[8]; /* send this during next message_out */
+ int outgoing_len; /* length of outgoing message */
+ unsigned int default_sx_per; /* default transfer period for SCSI bus */
+ uchar sync_xfer[8]; /* sync_xfer reg settings per target */
+ uchar sync_stat[8]; /* status of sync negotiation per target */
+ uchar no_sync; /* bitmask: don't do sync on these targets */
+ uchar no_dma; /* set this flag to disable DMA */
+ uchar dma_mode; /* DMA Burst Mode or Single Byte DMA */
+ uchar fast; /* set this flag to enable Fast SCSI */
+ struct sx_period sx_table[9]; /* transfer periods for actual DTC-setting */
+#ifdef PROC_INTERFACE
+ uchar proc; /* bitmask: what's in proc output */
+#ifdef PROC_STATISTICS
+ unsigned long cmd_cnt[8]; /* # of commands issued per target */
+ unsigned long int_cnt; /* # of interrupts serviced */
+ unsigned long pio_cnt; /* # of pio data transfers */
+ unsigned long dma_cnt; /* # of DMA data transfers */
+ unsigned long disc_allowed_cnt[8]; /* # of disconnects allowed per target */
+ unsigned long disc_done_cnt[8]; /* # of disconnects done per target*/
+#endif
+#endif
+ };
+
+
+/* defines for hostdata->chip */
+
+#define C_WD33C93 0
+#define C_WD33C93A 1
+#define C_WD33C93B 2
+#define C_UNKNOWN_CHIP 100
+
+/* defines for hostdata->state */
+
+#define S_UNCONNECTED 0
+#define S_SELECTING 1
+#define S_RUNNING_LEVEL2 2
+#define S_CONNECTED 3
+#define S_PRE_TMP_DISC 4
+#define S_PRE_CMP_DISC 5
+
+/* defines for hostdata->dma */
+
+#define D_DMA_OFF 0
+#define D_DMA_RUNNING 1
+
+/* defines for hostdata->level2 */
+/* NOTE: only the first 3 are implemented so far */
+
+#define L2_NONE 1 /* no combination commands - we get lots of ints */
+#define L2_SELECT 2 /* start with SEL_ATN_XFER, but never resume it */
+#define L2_BASIC 3 /* resume after STATUS ints & RDP messages */
+#define L2_DATA 4 /* resume after DATA_IN/OUT ints */
+#define L2_MOST 5 /* resume after anything except a RESELECT int */
+#define L2_RESELECT 6 /* resume after everything, including RESELECT ints */
+#define L2_ALL 7 /* always resume */
+
+/* defines for hostdata->disconnect */
+
+#define DIS_NEVER 0
+#define DIS_ADAPTIVE 1
+#define DIS_ALWAYS 2
+
+/* defines for hostdata->args */
+
+#define DB_TEST1 1<<0
+#define DB_TEST2 1<<1
+#define DB_QUEUE_COMMAND 1<<2
+#define DB_EXECUTE 1<<3
+#define DB_INTR 1<<4
+#define DB_TRANSFER 1<<5
+#define DB_MASK 0x3f
+
+/* defines for hostdata->sync_stat[] */
+
+#define SS_UNSET 0
+#define SS_FIRST 1
+#define SS_WAITING 2
+#define SS_SET 3
+
+/* defines for hostdata->proc */
+
+#define PR_VERSION 1<<0
+#define PR_INFO 1<<1
+#define PR_STATISTICS 1<<2
+#define PR_CONNECTED 1<<3
+#define PR_INPUTQ 1<<4
+#define PR_DISCQ 1<<5
+#define PR_TEST 1<<6
+#define PR_STOP 1<<7
+
+
+void wd33c93_init (struct Scsi_Host *instance, const wd33c93_regs regs,
+ dma_setup_t setup, dma_stop_t stop, int clock_freq);
+int wd33c93_abort (struct scsi_cmnd *cmd);
+int wd33c93_queuecommand (struct Scsi_Host *h, struct scsi_cmnd *cmd);
+void wd33c93_intr (struct Scsi_Host *instance);
+int wd33c93_show_info(struct seq_file *, struct Scsi_Host *);
+int wd33c93_write_info(struct Scsi_Host *, char *, int);
+int wd33c93_host_reset (struct scsi_cmnd *);
+
+#endif /* WD33C93_H */
diff --git a/drivers/scsi/wd7000.c b/drivers/scsi/wd7000.c
new file mode 100644
index 000000000..0c0f17b9a
--- /dev/null
+++ b/drivers/scsi/wd7000.c
@@ -0,0 +1,1657 @@
+/* $Id: $
+ * linux/drivers/scsi/wd7000.c
+ *
+ * Copyright (C) 1992 Thomas Wuensche
+ * closely related to the aha1542 driver from Tommy Thorn
+ * ( as close as different hardware allows on a lowlevel-driver :-) )
+ *
+ * Revised (and renamed) by John Boyd <boyd@cis.ohio-state.edu> to
+ * accommodate Eric Youngdale's modifications to scsi.c. Nov 1992.
+ *
+ * Additional changes to support scatter/gather. Dec. 1992. tw/jb
+ *
+ * No longer tries to reset SCSI bus at boot (it wasn't working anyway).
+ * Rewritten to support multiple host adapters.
+ * Miscellaneous cleanup.
+ * So far, still doesn't do reset or abort correctly, since I have no idea
+ * how to do them with this board (8^(. Jan 1994 jb
+ *
+ * This driver now supports both of the two standard configurations (per
+ * the 3.36 Owner's Manual, my latest reference) by the same method as
+ * before; namely, by looking for a BIOS signature. Thus, the location of
+ * the BIOS signature determines the board configuration. Until I have
+ * time to do something more flexible, users should stick to one of the
+ * following:
+ *
+ * Standard configuration for single-adapter systems:
+ * - BIOS at CE00h
+ * - I/O base address 350h
+ * - IRQ level 15
+ * - DMA channel 6
+ * Standard configuration for a second adapter in a system:
+ * - BIOS at C800h
+ * - I/O base address 330h
+ * - IRQ level 11
+ * - DMA channel 5
+ *
+ * Anyone who can recompile the kernel is welcome to add others as need
+ * arises, but unpredictable results may occur if there are conflicts.
+ * In any event, if there are multiple adapters in a system, they MUST
+ * use different I/O bases, IRQ levels, and DMA channels, since they will be
+ * indistinguishable (and in direct conflict) otherwise.
+ *
+ * As a point of information, the NO_OP command toggles the CMD_RDY bit
+ * of the status port, and this fact could be used as a test for the I/O
+ * base address (or more generally, board detection). There is an interrupt
+ * status port, so IRQ probing could also be done. I suppose the full
+ * DMA diagnostic could be used to detect the DMA channel being used. I
+ * haven't done any of this, though, because I think there's too much of
+ * a chance that such explorations could be destructive, if some other
+ * board's resources are used inadvertently. So, call me a wimp, but I
+ * don't want to try it. The only kind of exploration I trust is memory
+ * exploration, since it's more certain that reading memory won't be
+ * destructive.
+ *
+ * More to my liking would be a LILO boot command line specification, such
+ * as is used by the aha152x driver (and possibly others). I'll look into
+ * it, as I have time...
+ *
+ * I get mail occasionally from people who either are using or are
+ * considering using a WD7000 with Linux. There is a variety of
+ * nomenclature describing WD7000's. To the best of my knowledge, the
+ * following is a brief summary (from an old WD doc - I don't work for
+ * them or anything like that):
+ *
+ * WD7000-FASST2: This is a WD7000 board with the real-mode SST ROM BIOS
+ * installed. Last I heard, the BIOS was actually done by Columbia
+ * Data Products. The BIOS is only used by this driver (and thus
+ * by Linux) to identify the board; none of it can be executed under
+ * Linux.
+ *
+ * WD7000-ASC: This is the original adapter board, with or without BIOS.
+ * The board uses a WD33C93 or WD33C93A SBIC, which in turn is
+ * controlled by an onboard Z80 processor. The board interface
+ * visible to the host CPU is defined effectively by the Z80's
+ * firmware, and it is this firmware's revision level that is
+ * determined and reported by this driver. (The version of the
+ * on-board BIOS is of no interest whatsoever.) The host CPU has
+ * no access to the SBIC; hence the fact that it is a WD33C93 is
+ * also of no interest to this driver.
+ *
+ * WD7000-AX:
+ * WD7000-MX:
+ * WD7000-EX: These are newer versions of the WD7000-ASC. The -ASC is
+ * largely built from discrete components; these boards use more
+ * integration. The -AX is an ISA bus board (like the -ASC),
+ * the -MX is an MCA (i.e., PS/2) bus board), and the -EX is an
+ * EISA bus board.
+ *
+ * At the time of my documentation, the -?X boards were "future" products,
+ * and were not yet available. However, I vaguely recall that Thomas
+ * Wuensche had an -AX, so I believe at least it is supported by this
+ * driver. I have no personal knowledge of either -MX or -EX boards.
+ *
+ * P.S. Just recently, I've discovered (directly from WD and Future
+ * Domain) that all but the WD7000-EX have been out of production for
+ * two years now. FD has production rights to the 7000-EX, and are
+ * producing it under a new name, and with a new BIOS. If anyone has
+ * one of the FD boards, it would be nice to come up with a signature
+ * for it.
+ * J.B. Jan 1994.
+ *
+ *
+ * Revisions by Miroslav Zagorac <zaga@fly.cc.fer.hr>
+ *
+ * 08/24/1996.
+ *
+ * Enhancement for wd7000_detect function has been made, so you don't have
+ * to enter BIOS ROM address in initialisation data (see struct Config).
+ * We cannot detect IRQ, DMA and I/O base address for now, so we have to
+ * enter them as arguments while wd_7000 is detected. If someone has IRQ,
+ * DMA or I/O base address set to some other value, he can enter them in
+ * configuration without any problem. Also I wrote a function wd7000_setup,
+ * so now you can enter WD-7000 definition as kernel arguments,
+ * as in lilo.conf:
+ *
+ * append="wd7000=IRQ,DMA,IO"
+ *
+ * PS: If card BIOS ROM is disabled, function wd7000_detect now will recognize
+ * adapter, unlike the old one. Anyway, BIOS ROM from WD7000 adapter is
+ * useless for Linux. B^)
+ *
+ *
+ * 09/06/1996.
+ *
+ * Autodetecting of I/O base address from wd7000_detect function is removed,
+ * some little bugs removed, etc...
+ *
+ * Thanks to Roger Scott for driver debugging.
+ *
+ * 06/07/1997
+ *
+ * Added support for /proc file system (/proc/scsi/wd7000/[0...] files).
+ * Now, driver can handle hard disks with capacity >1GB.
+ *
+ * 01/15/1998
+ *
+ * Added support for BUS_ON and BUS_OFF parameters in config line.
+ * Miscellaneous cleanup.
+ *
+ * 03/01/1998
+ *
+ * WD7000 driver now work on kernels >= 2.1.x
+ *
+ *
+ * 12/31/2001 - Arnaldo Carvalho de Melo <acme@conectiva.com.br>
+ *
+ * use host->host_lock, not io_request_lock, cleanups
+ *
+ * 2002/10/04 - Alan Cox <alan@lxorguk.ukuu.org.uk>
+ *
+ * Use dev_id for interrupts, kill __func__ pasting
+ * Add a lock for the scb pool, clean up all other cli/sti usage stuff
+ * Use the adapter lock for the other places we had the cli's
+ *
+ * 2002/10/06 - Alan Cox <alan@lxorguk.ukuu.org.uk>
+ *
+ * Switch to new style error handling
+ * Clean up delay to udelay, and yielding sleeps
+ * Make host reset actually reset the card
+ * Make everything static
+ *
+ * 2003/02/12 - Christoph Hellwig <hch@infradead.org>
+ *
+ * Cleaned up host template definition
+ * Removed now obsolete wd7000.h
+ */
+
+#include <linux/delay.h>
+#include <linux/module.h>
+#include <linux/interrupt.h>
+#include <linux/kernel.h>
+#include <linux/types.h>
+#include <linux/string.h>
+#include <linux/spinlock.h>
+#include <linux/ioport.h>
+#include <linux/proc_fs.h>
+#include <linux/blkdev.h>
+#include <linux/init.h>
+#include <linux/stat.h>
+#include <linux/io.h>
+
+#include <asm/dma.h>
+
+#include <scsi/scsi.h>
+#include <scsi/scsi_cmnd.h>
+#include <scsi/scsi_device.h>
+#include <scsi/scsi_host.h>
+#include <scsi/scsicam.h>
+
+
+#undef WD7000_DEBUG /* general debug */
+#ifdef WD7000_DEBUG
+#define dprintk printk
+#else
+#define dprintk(format,args...)
+#endif
+
+/*
+ * Mailbox structure sizes.
+ * I prefer to keep the number of ICMBs much larger than the number of
+ * OGMBs. OGMBs are used very quickly by the driver to start one or
+ * more commands, while ICMBs are used by the host adapter per command.
+ */
+#define OGMB_CNT 16
+#define ICMB_CNT 32
+
+/*
+ * Scb's are shared by all active adapters. So, if they all become busy,
+ * callers may be made to wait in alloc_scbs for them to free. That can
+ * be avoided by setting MAX_SCBS to NUM_CONFIG * WD7000_Q. If you'd
+ * rather conserve memory, use a smaller number (> 0, of course) - things
+ * will should still work OK.
+ */
+#define MAX_SCBS 32
+
+/*
+ * In this version, sg_tablesize now defaults to WD7000_SG, and will
+ * be set to SG_NONE for older boards. This is the reverse of the
+ * previous default, and was changed so that the driver-level
+ * scsi_host_template would reflect the driver's support for scatter/
+ * gather.
+ *
+ * Also, it has been reported that boards at Revision 6 support scatter/
+ * gather, so the new definition of an "older" board has been changed
+ * accordingly.
+ */
+#define WD7000_Q 16
+#define WD7000_SG 16
+
+
+/*
+ * WD7000-specific mailbox structure
+ *
+ */
+typedef volatile struct mailbox {
+ unchar status;
+ unchar scbptr[3]; /* SCSI-style - MSB first (big endian) */
+} Mailbox;
+
+/*
+ * This structure should contain all per-adapter global data. I.e., any
+ * new global per-adapter data should put in here.
+ */
+typedef struct adapter {
+ struct Scsi_Host *sh; /* Pointer to Scsi_Host structure */
+ int iobase; /* This adapter's I/O base address */
+ int irq; /* This adapter's IRQ level */
+ int dma; /* This adapter's DMA channel */
+ int int_counter; /* This adapter's interrupt counter */
+ int bus_on; /* This adapter's BUS_ON time */
+ int bus_off; /* This adapter's BUS_OFF time */
+ struct { /* This adapter's mailboxes */
+ Mailbox ogmb[OGMB_CNT]; /* Outgoing mailboxes */
+ Mailbox icmb[ICMB_CNT]; /* Incoming mailboxes */
+ } mb;
+ int next_ogmb; /* to reduce contention at mailboxes */
+ unchar control; /* shadows CONTROL port value */
+ unchar rev1, rev2; /* filled in by wd7000_revision */
+} Adapter;
+
+/*
+ * (linear) base address for ROM BIOS
+ */
+static const long wd7000_biosaddr[] = {
+ 0xc0000, 0xc2000, 0xc4000, 0xc6000, 0xc8000, 0xca000, 0xcc000, 0xce000,
+ 0xd0000, 0xd2000, 0xd4000, 0xd6000, 0xd8000, 0xda000, 0xdc000, 0xde000
+};
+#define NUM_ADDRS ARRAY_SIZE(wd7000_biosaddr)
+
+static const unsigned short wd7000_iobase[] = {
+ 0x0300, 0x0308, 0x0310, 0x0318, 0x0320, 0x0328, 0x0330, 0x0338,
+ 0x0340, 0x0348, 0x0350, 0x0358, 0x0360, 0x0368, 0x0370, 0x0378,
+ 0x0380, 0x0388, 0x0390, 0x0398, 0x03a0, 0x03a8, 0x03b0, 0x03b8,
+ 0x03c0, 0x03c8, 0x03d0, 0x03d8, 0x03e0, 0x03e8, 0x03f0, 0x03f8
+};
+#define NUM_IOPORTS ARRAY_SIZE(wd7000_iobase)
+
+static const short wd7000_irq[] = { 3, 4, 5, 7, 9, 10, 11, 12, 14, 15 };
+#define NUM_IRQS ARRAY_SIZE(wd7000_irq)
+
+static const short wd7000_dma[] = { 5, 6, 7 };
+#define NUM_DMAS ARRAY_SIZE(wd7000_dma)
+
+/*
+ * The following is set up by wd7000_detect, and used thereafter for
+ * proc and other global ookups
+ */
+
+#define UNITS 8
+static struct Scsi_Host *wd7000_host[UNITS];
+
+#define BUS_ON 64 /* x 125ns = 8000ns (BIOS default) */
+#define BUS_OFF 15 /* x 125ns = 1875ns (BIOS default) */
+
+/*
+ * Standard Adapter Configurations - used by wd7000_detect
+ */
+typedef struct {
+ short irq; /* IRQ level */
+ short dma; /* DMA channel */
+ unsigned iobase; /* I/O base address */
+ short bus_on; /* Time that WD7000 spends on the AT-bus when */
+ /* transferring data. BIOS default is 8000ns. */
+ short bus_off; /* Time that WD7000 spends OFF THE BUS after */
+ /* while it is transferring data. */
+ /* BIOS default is 1875ns */
+} Config;
+
+/*
+ * Add here your configuration...
+ */
+static Config configs[] = {
+ {15, 6, 0x350, BUS_ON, BUS_OFF}, /* defaults for single adapter */
+ {11, 5, 0x320, BUS_ON, BUS_OFF}, /* defaults for second adapter */
+ {7, 6, 0x350, BUS_ON, BUS_OFF}, /* My configuration (Zaga) */
+ {-1, -1, 0x0, BUS_ON, BUS_OFF} /* Empty slot */
+};
+#define NUM_CONFIGS ARRAY_SIZE(configs)
+
+/*
+ * The following list defines strings to look for in the BIOS that identify
+ * it as the WD7000-FASST2 SST BIOS. I suspect that something should be
+ * added for the Future Domain version.
+ */
+typedef struct signature {
+ const char *sig; /* String to look for */
+ unsigned long ofs; /* offset from BIOS base address */
+ unsigned len; /* length of string */
+} Signature;
+
+static const Signature signatures[] = {
+ {"SSTBIOS", 0x0000d, 7} /* "SSTBIOS" @ offset 0x0000d */
+};
+#define NUM_SIGNATURES ARRAY_SIZE(signatures)
+
+
+/*
+ * I/O Port Offsets and Bit Definitions
+ * 4 addresses are used. Those not defined here are reserved.
+ */
+#define ASC_STAT 0 /* Status, Read */
+#define ASC_COMMAND 0 /* Command, Write */
+#define ASC_INTR_STAT 1 /* Interrupt Status, Read */
+#define ASC_INTR_ACK 1 /* Acknowledge, Write */
+#define ASC_CONTROL 2 /* Control, Write */
+
+/*
+ * ASC Status Port
+ */
+#define INT_IM 0x80 /* Interrupt Image Flag */
+#define CMD_RDY 0x40 /* Command Port Ready */
+#define CMD_REJ 0x20 /* Command Port Byte Rejected */
+#define ASC_INIT 0x10 /* ASC Initialized Flag */
+#define ASC_STATMASK 0xf0 /* The lower 4 Bytes are reserved */
+
+/*
+ * COMMAND opcodes
+ *
+ * Unfortunately, I have no idea how to properly use some of these commands,
+ * as the OEM manual does not make it clear. I have not been able to use
+ * enable/disable unsolicited interrupts or the reset commands with any
+ * discernible effect whatsoever. I think they may be related to certain
+ * ICB commands, but again, the OEM manual doesn't make that clear.
+ */
+#define NO_OP 0 /* NO-OP toggles CMD_RDY bit in ASC_STAT */
+#define INITIALIZATION 1 /* initialization (10 bytes) */
+#define DISABLE_UNS_INTR 2 /* disable unsolicited interrupts */
+#define ENABLE_UNS_INTR 3 /* enable unsolicited interrupts */
+#define INTR_ON_FREE_OGMB 4 /* interrupt on free OGMB */
+#define SOFT_RESET 5 /* SCSI bus soft reset */
+#define HARD_RESET_ACK 6 /* SCSI bus hard reset acknowledge */
+#define START_OGMB 0x80 /* start command in OGMB (n) */
+#define SCAN_OGMBS 0xc0 /* start multiple commands, signature (n) */
+ /* where (n) = lower 6 bits */
+/*
+ * For INITIALIZATION:
+ */
+typedef struct initCmd {
+ unchar op; /* command opcode (= 1) */
+ unchar ID; /* Adapter's SCSI ID */
+ unchar bus_on; /* Bus on time, x 125ns (see below) */
+ unchar bus_off; /* Bus off time, "" "" */
+ unchar rsvd; /* Reserved */
+ unchar mailboxes[3]; /* Address of Mailboxes, MSB first */
+ unchar ogmbs; /* Number of outgoing MBs, max 64, 0,1 = 1 */
+ unchar icmbs; /* Number of incoming MBs, "" "" */
+} InitCmd;
+
+/*
+ * Interrupt Status Port - also returns diagnostic codes at ASC reset
+ *
+ * if msb is zero, the lower bits are diagnostic status
+ * Diagnostics:
+ * 01 No diagnostic error occurred
+ * 02 RAM failure
+ * 03 FIFO R/W failed
+ * 04 SBIC register read/write failed
+ * 05 Initialization D-FF failed
+ * 06 Host IRQ D-FF failed
+ * 07 ROM checksum error
+ * Interrupt status (bitwise):
+ * 10NNNNNN outgoing mailbox NNNNNN is free
+ * 11NNNNNN incoming mailbox NNNNNN needs service
+ */
+#define MB_INTR 0xC0 /* Mailbox Service possible/required */
+#define IMB_INTR 0x40 /* 1 Incoming / 0 Outgoing */
+#define MB_MASK 0x3f /* mask for mailbox number */
+
+/*
+ * CONTROL port bits
+ */
+#define INT_EN 0x08 /* Interrupt Enable */
+#define DMA_EN 0x04 /* DMA Enable */
+#define SCSI_RES 0x02 /* SCSI Reset */
+#define ASC_RES 0x01 /* ASC Reset */
+
+/*
+ * Driver data structures:
+ * - mb and scbs are required for interfacing with the host adapter.
+ * An SCB has extra fields not visible to the adapter; mb's
+ * _cannot_ do this, since the adapter assumes they are contiguous in
+ * memory, 4 bytes each, with ICMBs following OGMBs, and uses this fact
+ * to access them.
+ * - An icb is for host-only (non-SCSI) commands. ICBs are 16 bytes each;
+ * the additional bytes are used only by the driver.
+ * - For now, a pool of SCBs are kept in global storage by this driver,
+ * and are allocated and freed as needed.
+ *
+ * The 7000-FASST2 marks OGMBs empty as soon as it has _started_ a command,
+ * not when it has finished. Since the SCB must be around for completion,
+ * problems arise when SCBs correspond to OGMBs, which may be reallocated
+ * earlier (or delayed unnecessarily until a command completes).
+ * Mailboxes are used as transient data structures, simply for
+ * carrying SCB addresses to/from the 7000-FASST2.
+ *
+ * Note also since SCBs are not "permanently" associated with mailboxes,
+ * there is no need to keep a global list of scsi_cmnd pointers indexed
+ * by OGMB. Again, SCBs reference their scsi_cmnds directly, so mailbox
+ * indices need not be involved.
+ */
+
+/*
+ * WD7000-specific scatter/gather element structure
+ */
+typedef struct sgb {
+ unchar len[3];
+ unchar ptr[3]; /* Also SCSI-style - MSB first */
+} Sgb;
+
+typedef struct scb { /* Command Control Block 5.4.1 */
+ unchar op; /* Command Control Block Operation Code */
+ unchar idlun; /* op=0,2:Target Id, op=1:Initiator Id */
+ /* Outbound data transfer, length is checked */
+ /* Inbound data transfer, length is checked */
+ /* Logical Unit Number */
+ unchar cdb[12]; /* SCSI Command Block */
+ volatile unchar status; /* SCSI Return Status */
+ volatile unchar vue; /* Vendor Unique Error Code */
+ unchar maxlen[3]; /* Maximum Data Transfer Length */
+ unchar dataptr[3]; /* SCSI Data Block Pointer */
+ unchar linkptr[3]; /* Next Command Link Pointer */
+ unchar direc; /* Transfer Direction */
+ unchar reserved2[6]; /* SCSI Command Descriptor Block */
+ /* end of hardware SCB */
+ struct scsi_cmnd *SCpnt;/* scsi_cmnd using this SCB */
+ Sgb sgb[WD7000_SG]; /* Scatter/gather list for this SCB */
+ Adapter *host; /* host adapter */
+ struct scb *next; /* for lists of scbs */
+} Scb;
+
+/*
+ * This driver is written to allow host-only commands to be executed.
+ * These use a 16-byte block called an ICB. The format is extended by the
+ * driver to 18 bytes, to support the status returned in the ICMB and
+ * an execution phase code.
+ *
+ * There are other formats besides these; these are the ones I've tried
+ * to use. Formats for some of the defined ICB opcodes are not defined
+ * (notably, get/set unsolicited interrupt status) in my copy of the OEM
+ * manual, and others are ambiguous/hard to follow.
+ */
+#define ICB_OP_MASK 0x80 /* distinguishes scbs from icbs */
+#define ICB_OP_OPEN_RBUF 0x80 /* open receive buffer */
+#define ICB_OP_RECV_CMD 0x81 /* receive command from initiator */
+#define ICB_OP_RECV_DATA 0x82 /* receive data from initiator */
+#define ICB_OP_RECV_SDATA 0x83 /* receive data with status from init. */
+#define ICB_OP_SEND_DATA 0x84 /* send data with status to initiator */
+#define ICB_OP_SEND_STAT 0x86 /* send command status to initiator */
+ /* 0x87 is reserved */
+#define ICB_OP_READ_INIT 0x88 /* read initialization bytes */
+#define ICB_OP_READ_ID 0x89 /* read adapter's SCSI ID */
+#define ICB_OP_SET_UMASK 0x8A /* set unsolicited interrupt mask */
+#define ICB_OP_GET_UMASK 0x8B /* read unsolicited interrupt mask */
+#define ICB_OP_GET_REVISION 0x8C /* read firmware revision level */
+#define ICB_OP_DIAGNOSTICS 0x8D /* execute diagnostics */
+#define ICB_OP_SET_EPARMS 0x8E /* set execution parameters */
+#define ICB_OP_GET_EPARMS 0x8F /* read execution parameters */
+
+typedef struct icbRecvCmd {
+ unchar op;
+ unchar IDlun; /* Initiator SCSI ID/lun */
+ unchar len[3]; /* command buffer length */
+ unchar ptr[3]; /* command buffer address */
+ unchar rsvd[7]; /* reserved */
+ volatile unchar vue; /* vendor-unique error code */
+ volatile unchar status; /* returned (icmb) status */
+ volatile unchar phase; /* used by interrupt handler */
+} IcbRecvCmd;
+
+typedef struct icbSendStat {
+ unchar op;
+ unchar IDlun; /* Target SCSI ID/lun */
+ unchar stat; /* (outgoing) completion status byte 1 */
+ unchar rsvd[12]; /* reserved */
+ volatile unchar vue; /* vendor-unique error code */
+ volatile unchar status; /* returned (icmb) status */
+ volatile unchar phase; /* used by interrupt handler */
+} IcbSendStat;
+
+typedef struct icbRevLvl {
+ unchar op;
+ volatile unchar primary; /* primary revision level (returned) */
+ volatile unchar secondary; /* secondary revision level (returned) */
+ unchar rsvd[12]; /* reserved */
+ volatile unchar vue; /* vendor-unique error code */
+ volatile unchar status; /* returned (icmb) status */
+ volatile unchar phase; /* used by interrupt handler */
+} IcbRevLvl;
+
+typedef struct icbUnsMask { /* I'm totally guessing here */
+ unchar op;
+ volatile unchar mask[14]; /* mask bits */
+#if 0
+ unchar rsvd[12]; /* reserved */
+#endif
+ volatile unchar vue; /* vendor-unique error code */
+ volatile unchar status; /* returned (icmb) status */
+ volatile unchar phase; /* used by interrupt handler */
+} IcbUnsMask;
+
+typedef struct icbDiag {
+ unchar op;
+ unchar type; /* diagnostics type code (0-3) */
+ unchar len[3]; /* buffer length */
+ unchar ptr[3]; /* buffer address */
+ unchar rsvd[7]; /* reserved */
+ volatile unchar vue; /* vendor-unique error code */
+ volatile unchar status; /* returned (icmb) status */
+ volatile unchar phase; /* used by interrupt handler */
+} IcbDiag;
+
+#define ICB_DIAG_POWERUP 0 /* Power-up diags only */
+#define ICB_DIAG_WALKING 1 /* walking 1's pattern */
+#define ICB_DIAG_DMA 2 /* DMA - system memory diags */
+#define ICB_DIAG_FULL 3 /* do both 1 & 2 */
+
+typedef struct icbParms {
+ unchar op;
+ unchar rsvd1; /* reserved */
+ unchar len[3]; /* parms buffer length */
+ unchar ptr[3]; /* parms buffer address */
+ unchar idx[2]; /* index (MSB-LSB) */
+ unchar rsvd2[5]; /* reserved */
+ volatile unchar vue; /* vendor-unique error code */
+ volatile unchar status; /* returned (icmb) status */
+ volatile unchar phase; /* used by interrupt handler */
+} IcbParms;
+
+typedef struct icbAny {
+ unchar op;
+ unchar data[14]; /* format-specific data */
+ volatile unchar vue; /* vendor-unique error code */
+ volatile unchar status; /* returned (icmb) status */
+ volatile unchar phase; /* used by interrupt handler */
+} IcbAny;
+
+typedef union icb {
+ unchar op; /* ICB opcode */
+ IcbRecvCmd recv_cmd; /* format for receive command */
+ IcbSendStat send_stat; /* format for send status */
+ IcbRevLvl rev_lvl; /* format for get revision level */
+ IcbDiag diag; /* format for execute diagnostics */
+ IcbParms eparms; /* format for get/set exec parms */
+ IcbAny icb; /* generic format */
+ unchar data[18];
+} Icb;
+
+#ifdef MODULE
+static char *wd7000;
+module_param(wd7000, charp, 0);
+#endif
+
+/*
+ * Driver SCB structure pool.
+ *
+ * The SCBs declared here are shared by all host adapters; hence, this
+ * structure is not part of the Adapter structure.
+ */
+static Scb scbs[MAX_SCBS];
+static Scb *scbfree; /* free list */
+static int freescbs = MAX_SCBS; /* free list counter */
+static spinlock_t scbpool_lock; /* guards the scb free list and count */
+
+/*
+ * END of data/declarations - code follows.
+ */
+static void __init setup_error(char *mesg, int *ints)
+{
+ if (ints[0] == 3)
+ printk(KERN_ERR "wd7000_setup: \"wd7000=%d,%d,0x%x\" -> %s\n", ints[1], ints[2], ints[3], mesg);
+ else if (ints[0] == 4)
+ printk(KERN_ERR "wd7000_setup: \"wd7000=%d,%d,0x%x,%d\" -> %s\n", ints[1], ints[2], ints[3], ints[4], mesg);
+ else
+ printk(KERN_ERR "wd7000_setup: \"wd7000=%d,%d,0x%x,%d,%d\" -> %s\n", ints[1], ints[2], ints[3], ints[4], ints[5], mesg);
+}
+
+
+/*
+ * Note: You can now set these options from the kernel's "command line".
+ * The syntax is:
+ *
+ * wd7000=<IRQ>,<DMA>,<IO>[,<BUS_ON>[,<BUS_OFF>]]
+ *
+ * , where BUS_ON and BUS_OFF are in nanoseconds. BIOS default values
+ * are 8000ns for BUS_ON and 1875ns for BUS_OFF.
+ * eg:
+ * wd7000=7,6,0x350
+ *
+ * will configure the driver for a WD-7000 controller
+ * using IRQ 15 with a DMA channel 6, at IO base address 0x350.
+ */
+static int __init wd7000_setup(char *str)
+{
+ static short wd7000_card_num; /* .bss will zero this */
+ short i;
+ int ints[6];
+
+ (void) get_options(str, ARRAY_SIZE(ints), ints);
+
+ if (wd7000_card_num >= NUM_CONFIGS) {
+ printk(KERN_ERR "%s: Too many \"wd7000=\" configurations in " "command line!\n", __func__);
+ return 0;
+ }
+
+ if ((ints[0] < 3) || (ints[0] > 5)) {
+ printk(KERN_ERR "%s: Error in command line! " "Usage: wd7000=<IRQ>,<DMA>,IO>[,<BUS_ON>" "[,<BUS_OFF>]]\n", __func__);
+ } else {
+ for (i = 0; i < NUM_IRQS; i++)
+ if (ints[1] == wd7000_irq[i])
+ break;
+
+ if (i == NUM_IRQS) {
+ setup_error("invalid IRQ.", ints);
+ return 0;
+ } else
+ configs[wd7000_card_num].irq = ints[1];
+
+ for (i = 0; i < NUM_DMAS; i++)
+ if (ints[2] == wd7000_dma[i])
+ break;
+
+ if (i == NUM_DMAS) {
+ setup_error("invalid DMA channel.", ints);
+ return 0;
+ } else
+ configs[wd7000_card_num].dma = ints[2];
+
+ for (i = 0; i < NUM_IOPORTS; i++)
+ if (ints[3] == wd7000_iobase[i])
+ break;
+
+ if (i == NUM_IOPORTS) {
+ setup_error("invalid I/O base address.", ints);
+ return 0;
+ } else
+ configs[wd7000_card_num].iobase = ints[3];
+
+ if (ints[0] > 3) {
+ if ((ints[4] < 500) || (ints[4] > 31875)) {
+ setup_error("BUS_ON value is out of range (500" " to 31875 nanoseconds)!", ints);
+ configs[wd7000_card_num].bus_on = BUS_ON;
+ } else
+ configs[wd7000_card_num].bus_on = ints[4] / 125;
+ } else
+ configs[wd7000_card_num].bus_on = BUS_ON;
+
+ if (ints[0] > 4) {
+ if ((ints[5] < 500) || (ints[5] > 31875)) {
+ setup_error("BUS_OFF value is out of range (500" " to 31875 nanoseconds)!", ints);
+ configs[wd7000_card_num].bus_off = BUS_OFF;
+ } else
+ configs[wd7000_card_num].bus_off = ints[5] / 125;
+ } else
+ configs[wd7000_card_num].bus_off = BUS_OFF;
+
+ if (wd7000_card_num) {
+ for (i = 0; i < (wd7000_card_num - 1); i++) {
+ int j = i + 1;
+
+ for (; j < wd7000_card_num; j++)
+ if (configs[i].irq == configs[j].irq) {
+ setup_error("duplicated IRQ!", ints);
+ return 0;
+ }
+ if (configs[i].dma == configs[j].dma) {
+ setup_error("duplicated DMA " "channel!", ints);
+ return 0;
+ }
+ if (configs[i].iobase == configs[j].iobase) {
+ setup_error("duplicated I/O " "base address!", ints);
+ return 0;
+ }
+ }
+ }
+
+ dprintk(KERN_DEBUG "wd7000_setup: IRQ=%d, DMA=%d, I/O=0x%x, "
+ "BUS_ON=%dns, BUS_OFF=%dns\n", configs[wd7000_card_num].irq, configs[wd7000_card_num].dma, configs[wd7000_card_num].iobase, configs[wd7000_card_num].bus_on * 125, configs[wd7000_card_num].bus_off * 125);
+
+ wd7000_card_num++;
+ }
+ return 1;
+}
+
+__setup("wd7000=", wd7000_setup);
+
+static inline void any2scsi(unchar * scsi, int any)
+{
+ *scsi++ = (unsigned)any >> 16;
+ *scsi++ = (unsigned)any >> 8;
+ *scsi++ = any;
+}
+
+static inline int scsi2int(unchar * scsi)
+{
+ return (scsi[0] << 16) | (scsi[1] << 8) | scsi[2];
+}
+
+static inline void wd7000_enable_intr(Adapter * host)
+{
+ host->control |= INT_EN;
+ outb(host->control, host->iobase + ASC_CONTROL);
+}
+
+
+static inline void wd7000_enable_dma(Adapter * host)
+{
+ unsigned long flags;
+ host->control |= DMA_EN;
+ outb(host->control, host->iobase + ASC_CONTROL);
+
+ flags = claim_dma_lock();
+ set_dma_mode(host->dma, DMA_MODE_CASCADE);
+ enable_dma(host->dma);
+ release_dma_lock(flags);
+
+}
+
+
+#define WAITnexttimeout 200 /* 2 seconds */
+
+static inline short WAIT(unsigned port, unsigned mask, unsigned allof, unsigned noneof)
+{
+ unsigned WAITbits;
+ unsigned long WAITtimeout = jiffies + WAITnexttimeout;
+
+ while (time_before_eq(jiffies, WAITtimeout)) {
+ WAITbits = inb(port) & mask;
+
+ if (((WAITbits & allof) == allof) && ((WAITbits & noneof) == 0))
+ return (0);
+ }
+
+ return (1);
+}
+
+
+static inline int command_out(Adapter * host, unchar * cmd, int len)
+{
+ if (!WAIT(host->iobase + ASC_STAT, ASC_STATMASK, CMD_RDY, 0)) {
+ while (len--) {
+ do {
+ outb(*cmd, host->iobase + ASC_COMMAND);
+ WAIT(host->iobase + ASC_STAT, ASC_STATMASK, CMD_RDY, 0);
+ } while (inb(host->iobase + ASC_STAT) & CMD_REJ);
+
+ cmd++;
+ }
+
+ return (1);
+ }
+
+ printk(KERN_WARNING "wd7000 command_out: WAIT failed(%d)\n", len + 1);
+
+ return (0);
+}
+
+
+/*
+ * This version of alloc_scbs is in preparation for supporting multiple
+ * commands per lun and command chaining, by queueing pending commands.
+ * We will need to allocate Scbs in blocks since they will wait to be
+ * executed so there is the possibility of deadlock otherwise.
+ * Also, to keep larger requests from being starved by smaller requests,
+ * we limit access to this routine with an internal busy flag, so that
+ * the satisfiability of a request is not dependent on the size of the
+ * request.
+ */
+static inline Scb *alloc_scbs(struct Scsi_Host *host, int needed)
+{
+ Scb *scb, *p = NULL;
+ unsigned long flags;
+ unsigned long timeout = jiffies + WAITnexttimeout;
+ unsigned long now;
+ int i;
+
+ if (needed <= 0)
+ return (NULL); /* sanity check */
+
+ spin_unlock_irq(host->host_lock);
+
+ retry:
+ while (freescbs < needed) {
+ timeout = jiffies + WAITnexttimeout;
+ do {
+ /* FIXME: can we actually just yield here ?? */
+ for (now = jiffies; now == jiffies;)
+ cpu_relax(); /* wait a jiffy */
+ } while (freescbs < needed && time_before_eq(jiffies, timeout));
+ /*
+ * If we get here with enough free Scbs, we can take them.
+ * Otherwise, we timed out and didn't get enough.
+ */
+ if (freescbs < needed) {
+ printk(KERN_ERR "wd7000: can't get enough free SCBs.\n");
+ return (NULL);
+ }
+ }
+
+ /* Take the lock, then check we didn't get beaten, if so try again */
+ spin_lock_irqsave(&scbpool_lock, flags);
+ if (freescbs < needed) {
+ spin_unlock_irqrestore(&scbpool_lock, flags);
+ goto retry;
+ }
+
+ scb = scbfree;
+ freescbs -= needed;
+ for (i = 0; i < needed; i++) {
+ p = scbfree;
+ scbfree = p->next;
+ }
+ p->next = NULL;
+
+ spin_unlock_irqrestore(&scbpool_lock, flags);
+
+ spin_lock_irq(host->host_lock);
+ return (scb);
+}
+
+
+static inline void free_scb(Scb * scb)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&scbpool_lock, flags);
+
+ memset(scb, 0, sizeof(Scb));
+ scb->next = scbfree;
+ scbfree = scb;
+ freescbs++;
+
+ spin_unlock_irqrestore(&scbpool_lock, flags);
+}
+
+
+static inline void init_scbs(void)
+{
+ int i;
+
+ spin_lock_init(&scbpool_lock);
+
+ /* This is only ever called before the SCB pool is active */
+
+ scbfree = &(scbs[0]);
+ memset(scbs, 0, sizeof(scbs));
+ for (i = 0; i < MAX_SCBS - 1; i++) {
+ scbs[i].next = &(scbs[i + 1]);
+ scbs[i].SCpnt = NULL;
+ }
+ scbs[MAX_SCBS - 1].next = NULL;
+ scbs[MAX_SCBS - 1].SCpnt = NULL;
+}
+
+
+static int mail_out(Adapter * host, Scb * scbptr)
+/*
+ * Note: this can also be used for ICBs; just cast to the parm type.
+ */
+{
+ int i, ogmb;
+ unsigned long flags;
+ unchar start_ogmb;
+ Mailbox *ogmbs = host->mb.ogmb;
+ int *next_ogmb = &(host->next_ogmb);
+
+ dprintk("wd7000_mail_out: 0x%06lx", (long) scbptr);
+
+ /* We first look for a free outgoing mailbox */
+ spin_lock_irqsave(host->sh->host_lock, flags);
+ ogmb = *next_ogmb;
+ for (i = 0; i < OGMB_CNT; i++) {
+ if (ogmbs[ogmb].status == 0) {
+ dprintk(" using OGMB 0x%x", ogmb);
+ ogmbs[ogmb].status = 1;
+ any2scsi((unchar *) ogmbs[ogmb].scbptr, (int) scbptr);
+
+ *next_ogmb = (ogmb + 1) % OGMB_CNT;
+ break;
+ } else
+ ogmb = (ogmb + 1) % OGMB_CNT;
+ }
+ spin_unlock_irqrestore(host->sh->host_lock, flags);
+
+ dprintk(", scb is 0x%06lx", (long) scbptr);
+
+ if (i >= OGMB_CNT) {
+ /*
+ * Alternatively, we might issue the "interrupt on free OGMB",
+ * and sleep, but it must be ensured that it isn't the init
+ * task running. Instead, this version assumes that the caller
+ * will be persistent, and try again. Since it's the adapter
+ * that marks OGMB's free, waiting even with interrupts off
+ * should work, since they are freed very quickly in most cases.
+ */
+ dprintk(", no free OGMBs.\n");
+ return (0);
+ }
+
+ wd7000_enable_intr(host);
+
+ start_ogmb = START_OGMB | ogmb;
+ command_out(host, &start_ogmb, 1);
+
+ dprintk(", awaiting interrupt.\n");
+
+ return (1);
+}
+
+
+static int make_code(unsigned hosterr, unsigned scsierr)
+{
+#ifdef WD7000_DEBUG
+ int in_error = hosterr;
+#endif
+
+ switch ((hosterr >> 8) & 0xff) {
+ case 0: /* Reserved */
+ hosterr = DID_ERROR;
+ break;
+ case 1: /* Command Complete, no errors */
+ hosterr = DID_OK;
+ break;
+ case 2: /* Command complete, error logged in scb status (scsierr) */
+ hosterr = DID_OK;
+ break;
+ case 4: /* Command failed to complete - timeout */
+ hosterr = DID_TIME_OUT;
+ break;
+ case 5: /* Command terminated; Bus reset by external device */
+ hosterr = DID_RESET;
+ break;
+ case 6: /* Unexpected Command Received w/ host as target */
+ hosterr = DID_BAD_TARGET;
+ break;
+ case 80: /* Unexpected Reselection */
+ case 81: /* Unexpected Selection */
+ hosterr = DID_BAD_INTR;
+ break;
+ case 82: /* Abort Command Message */
+ hosterr = DID_ABORT;
+ break;
+ case 83: /* SCSI Bus Software Reset */
+ case 84: /* SCSI Bus Hardware Reset */
+ hosterr = DID_RESET;
+ break;
+ default: /* Reserved */
+ hosterr = DID_ERROR;
+ }
+#ifdef WD7000_DEBUG
+ if (scsierr || hosterr)
+ dprintk("\nSCSI command error: SCSI 0x%02x host 0x%04x return %d\n", scsierr, in_error, hosterr);
+#endif
+ return (scsierr | (hosterr << 16));
+}
+
+#define wd7000_intr_ack(host) outb (0, host->iobase + ASC_INTR_ACK)
+
+
+static irqreturn_t wd7000_intr(int irq, void *dev_id)
+{
+ Adapter *host = (Adapter *) dev_id;
+ int flag, icmb, errstatus, icmb_status;
+ int host_error, scsi_error;
+ Scb *scb; /* for SCSI commands */
+ IcbAny *icb; /* for host commands */
+ struct scsi_cmnd *SCpnt;
+ Mailbox *icmbs = host->mb.icmb;
+ unsigned long flags;
+
+ spin_lock_irqsave(host->sh->host_lock, flags);
+ host->int_counter++;
+
+ dprintk("wd7000_intr: irq = %d, host = 0x%06lx\n", irq, (long) host);
+
+ flag = inb(host->iobase + ASC_INTR_STAT);
+
+ dprintk("wd7000_intr: intr stat = 0x%02x\n", flag);
+
+ if (!(inb(host->iobase + ASC_STAT) & INT_IM)) {
+ /* NB: these are _very_ possible if IRQ 15 is being used, since
+ * it's the "garbage collector" on the 2nd 8259 PIC. Specifically,
+ * any interrupt signal into the 8259 which can't be identified
+ * comes out as 7 from the 8259, which is 15 to the host. Thus, it
+ * is a good thing the WD7000 has an interrupt status port, so we
+ * can sort these out. Otherwise, electrical noise and other such
+ * problems would be indistinguishable from valid interrupts...
+ */
+ dprintk("wd7000_intr: phantom interrupt...\n");
+ goto ack;
+ }
+
+ if (!(flag & MB_INTR))
+ goto ack;
+
+ /* The interrupt is for a mailbox */
+ if (!(flag & IMB_INTR)) {
+ dprintk("wd7000_intr: free outgoing mailbox\n");
+ /*
+ * If sleep_on() and the "interrupt on free OGMB" command are
+ * used in mail_out(), wake_up() should correspondingly be called
+ * here. For now, we don't need to do anything special.
+ */
+ goto ack;
+ }
+
+ /* The interrupt is for an incoming mailbox */
+ icmb = flag & MB_MASK;
+ icmb_status = icmbs[icmb].status;
+ if (icmb_status & 0x80) { /* unsolicited - result in ICMB */
+ dprintk("wd7000_intr: unsolicited interrupt 0x%02x\n", icmb_status);
+ goto ack;
+ }
+
+ /* Aaaargh! (Zaga) */
+ scb = isa_bus_to_virt(scsi2int((unchar *) icmbs[icmb].scbptr));
+ icmbs[icmb].status = 0;
+ if (scb->op & ICB_OP_MASK) { /* an SCB is done */
+ icb = (IcbAny *) scb;
+ icb->status = icmb_status;
+ icb->phase = 0;
+ goto ack;
+ }
+
+ SCpnt = scb->SCpnt;
+ if (--(SCpnt->SCp.phase) <= 0) { /* all scbs are done */
+ host_error = scb->vue | (icmb_status << 8);
+ scsi_error = scb->status;
+ errstatus = make_code(host_error, scsi_error);
+ SCpnt->result = errstatus;
+
+ free_scb(scb);
+
+ SCpnt->scsi_done(SCpnt);
+ }
+
+ ack:
+ dprintk("wd7000_intr: return from interrupt handler\n");
+ wd7000_intr_ack(host);
+
+ spin_unlock_irqrestore(host->sh->host_lock, flags);
+ return IRQ_HANDLED;
+}
+
+static int wd7000_queuecommand_lck(struct scsi_cmnd *SCpnt,
+ void (*done)(struct scsi_cmnd *))
+{
+ Scb *scb;
+ Sgb *sgb;
+ unchar *cdb = (unchar *) SCpnt->cmnd;
+ unchar idlun;
+ short cdblen;
+ int nseg;
+ Adapter *host = (Adapter *) SCpnt->device->host->hostdata;
+
+ cdblen = SCpnt->cmd_len;
+ idlun = ((SCpnt->device->id << 5) & 0xe0) | (SCpnt->device->lun & 7);
+ SCpnt->scsi_done = done;
+ SCpnt->SCp.phase = 1;
+ scb = alloc_scbs(SCpnt->device->host, 1);
+ scb->idlun = idlun;
+ memcpy(scb->cdb, cdb, cdblen);
+ scb->direc = 0x40; /* Disable direction check */
+
+ scb->SCpnt = SCpnt; /* so we can find stuff later */
+ SCpnt->host_scribble = (unchar *) scb;
+ scb->host = host;
+
+ nseg = scsi_sg_count(SCpnt);
+ if (nseg > 1) {
+ struct scatterlist *sg;
+ unsigned i;
+
+ dprintk("Using scatter/gather with %d elements.\n", nseg);
+
+ sgb = scb->sgb;
+ scb->op = 1;
+ any2scsi(scb->dataptr, (int) sgb);
+ any2scsi(scb->maxlen, nseg * sizeof(Sgb));
+
+ scsi_for_each_sg(SCpnt, sg, nseg, i) {
+ any2scsi(sgb[i].ptr, isa_page_to_bus(sg_page(sg)) + sg->offset);
+ any2scsi(sgb[i].len, sg->length);
+ }
+ } else {
+ scb->op = 0;
+ if (nseg) {
+ struct scatterlist *sg = scsi_sglist(SCpnt);
+ any2scsi(scb->dataptr, isa_page_to_bus(sg_page(sg)) + sg->offset);
+ }
+ any2scsi(scb->maxlen, scsi_bufflen(SCpnt));
+ }
+
+ /* FIXME: drop lock and yield here ? */
+
+ while (!mail_out(host, scb))
+ cpu_relax(); /* keep trying */
+
+ return 0;
+}
+
+static DEF_SCSI_QCMD(wd7000_queuecommand)
+
+static int wd7000_diagnostics(Adapter * host, int code)
+{
+ static IcbDiag icb = { ICB_OP_DIAGNOSTICS };
+ static unchar buf[256];
+ unsigned long timeout;
+
+ icb.type = code;
+ any2scsi(icb.len, sizeof(buf));
+ any2scsi(icb.ptr, (int) &buf);
+ icb.phase = 1;
+ /*
+ * This routine is only called at init, so there should be OGMBs
+ * available. I'm assuming so here. If this is going to
+ * fail, I can just let the timeout catch the failure.
+ */
+ mail_out(host, (struct scb *) &icb);
+ timeout = jiffies + WAITnexttimeout; /* wait up to 2 seconds */
+ while (icb.phase && time_before(jiffies, timeout)) {
+ cpu_relax(); /* wait for completion */
+ barrier();
+ }
+
+ if (icb.phase) {
+ printk("wd7000_diagnostics: timed out.\n");
+ return (0);
+ }
+ if (make_code(icb.vue | (icb.status << 8), 0)) {
+ printk("wd7000_diagnostics: failed (0x%02x,0x%02x)\n", icb.vue, icb.status);
+ return (0);
+ }
+
+ return (1);
+}
+
+
+static int wd7000_adapter_reset(Adapter * host)
+{
+ InitCmd init_cmd = {
+ INITIALIZATION,
+ 7,
+ host->bus_on,
+ host->bus_off,
+ 0,
+ {0, 0, 0},
+ OGMB_CNT,
+ ICMB_CNT
+ };
+ int diag;
+ /*
+ * Reset the adapter - only. The SCSI bus was initialized at power-up,
+ * and we need to do this just so we control the mailboxes, etc.
+ */
+ outb(ASC_RES, host->iobase + ASC_CONTROL);
+ udelay(40); /* reset pulse: this is 40us, only need 25us */
+ outb(0, host->iobase + ASC_CONTROL);
+ host->control = 0; /* this must always shadow ASC_CONTROL */
+
+ if (WAIT(host->iobase + ASC_STAT, ASC_STATMASK, CMD_RDY, 0)) {
+ printk(KERN_ERR "wd7000_init: WAIT timed out.\n");
+ return -1; /* -1 = not ok */
+ }
+
+ if ((diag = inb(host->iobase + ASC_INTR_STAT)) != 1) {
+ printk("wd7000_init: ");
+
+ switch (diag) {
+ case 2:
+ printk(KERN_ERR "RAM failure.\n");
+ break;
+ case 3:
+ printk(KERN_ERR "FIFO R/W failed\n");
+ break;
+ case 4:
+ printk(KERN_ERR "SBIC register R/W failed\n");
+ break;
+ case 5:
+ printk(KERN_ERR "Initialization D-FF failed.\n");
+ break;
+ case 6:
+ printk(KERN_ERR "Host IRQ D-FF failed.\n");
+ break;
+ case 7:
+ printk(KERN_ERR "ROM checksum error.\n");
+ break;
+ default:
+ printk(KERN_ERR "diagnostic code 0x%02Xh received.\n", diag);
+ }
+ return -1;
+ }
+ /* Clear mailboxes */
+ memset(&(host->mb), 0, sizeof(host->mb));
+
+ /* Execute init command */
+ any2scsi((unchar *) & (init_cmd.mailboxes), (int) &(host->mb));
+ if (!command_out(host, (unchar *) & init_cmd, sizeof(init_cmd))) {
+ printk(KERN_ERR "wd7000_adapter_reset: adapter initialization failed.\n");
+ return -1;
+ }
+
+ if (WAIT(host->iobase + ASC_STAT, ASC_STATMASK, ASC_INIT, 0)) {
+ printk("wd7000_adapter_reset: WAIT timed out.\n");
+ return -1;
+ }
+ return 0;
+}
+
+static int wd7000_init(Adapter * host)
+{
+ if (wd7000_adapter_reset(host) == -1)
+ return 0;
+
+
+ if (request_irq(host->irq, wd7000_intr, 0, "wd7000", host)) {
+ printk("wd7000_init: can't get IRQ %d.\n", host->irq);
+ return (0);
+ }
+ if (request_dma(host->dma, "wd7000")) {
+ printk("wd7000_init: can't get DMA channel %d.\n", host->dma);
+ free_irq(host->irq, host);
+ return (0);
+ }
+ wd7000_enable_dma(host);
+ wd7000_enable_intr(host);
+
+ if (!wd7000_diagnostics(host, ICB_DIAG_FULL)) {
+ free_dma(host->dma);
+ free_irq(host->irq, NULL);
+ return (0);
+ }
+
+ return (1);
+}
+
+
+static void wd7000_revision(Adapter * host)
+{
+ static IcbRevLvl icb = { ICB_OP_GET_REVISION };
+
+ icb.phase = 1;
+ /*
+ * Like diagnostics, this is only done at init time, in fact, from
+ * wd7000_detect, so there should be OGMBs available. If it fails,
+ * the only damage will be that the revision will show up as 0.0,
+ * which in turn means that scatter/gather will be disabled.
+ */
+ mail_out(host, (struct scb *) &icb);
+ while (icb.phase) {
+ cpu_relax(); /* wait for completion */
+ barrier();
+ }
+ host->rev1 = icb.primary;
+ host->rev2 = icb.secondary;
+}
+
+
+static int wd7000_set_info(struct Scsi_Host *host, char *buffer, int length)
+{
+ dprintk("Buffer = <%.*s>, length = %d\n", length, buffer, length);
+
+ /*
+ * Currently this is a no-op
+ */
+ dprintk("Sorry, this function is currently out of order...\n");
+ return (length);
+}
+
+
+static int wd7000_show_info(struct seq_file *m, struct Scsi_Host *host)
+{
+ Adapter *adapter = (Adapter *)host->hostdata;
+ unsigned long flags;
+#ifdef WD7000_DEBUG
+ Mailbox *ogmbs, *icmbs;
+ short count;
+#endif
+
+ spin_lock_irqsave(host->host_lock, flags);
+ seq_printf(m, "Host scsi%d: Western Digital WD-7000 (rev %d.%d)\n", host->host_no, adapter->rev1, adapter->rev2);
+ seq_printf(m, " IO base: 0x%x\n", adapter->iobase);
+ seq_printf(m, " IRQ: %d\n", adapter->irq);
+ seq_printf(m, " DMA channel: %d\n", adapter->dma);
+ seq_printf(m, " Interrupts: %d\n", adapter->int_counter);
+ seq_printf(m, " BUS_ON time: %d nanoseconds\n", adapter->bus_on * 125);
+ seq_printf(m, " BUS_OFF time: %d nanoseconds\n", adapter->bus_off * 125);
+
+#ifdef WD7000_DEBUG
+ ogmbs = adapter->mb.ogmb;
+ icmbs = adapter->mb.icmb;
+
+ seq_printf(m, "\nControl port value: 0x%x\n", adapter->control);
+ seq_puts(m, "Incoming mailbox:\n");
+ seq_printf(m, " size: %d\n", ICMB_CNT);
+ seq_puts(m, " queued messages: ");
+
+ for (i = count = 0; i < ICMB_CNT; i++)
+ if (icmbs[i].status) {
+ count++;
+ seq_printf(m, "0x%x ", i);
+ }
+
+ seq_puts(m, count ? "\n" : "none\n");
+
+ seq_puts(m, "Outgoing mailbox:\n");
+ seq_printf(m, " size: %d\n", OGMB_CNT);
+ seq_printf(m, " next message: 0x%x\n", adapter->next_ogmb);
+ seq_puts(m, " queued messages: ");
+
+ for (i = count = 0; i < OGMB_CNT; i++)
+ if (ogmbs[i].status) {
+ count++;
+ seq_printf(m, "0x%x ", i);
+ }
+
+ seq_puts(m, count ? "\n" : "none\n");
+#endif
+
+ spin_unlock_irqrestore(host->host_lock, flags);
+
+ return 0;
+}
+
+
+/*
+ * Returns the number of adapters this driver is supporting.
+ *
+ * The source for hosts.c says to wait to call scsi_register until 100%
+ * sure about an adapter. We need to do it a little sooner here; we
+ * need the storage set up by scsi_register before wd7000_init, and
+ * changing the location of an Adapter structure is more trouble than
+ * calling scsi_unregister.
+ *
+ */
+
+static __init int wd7000_detect(struct scsi_host_template *tpnt)
+{
+ short present = 0, biosaddr_ptr, sig_ptr, i, pass;
+ short biosptr[NUM_CONFIGS];
+ unsigned iobase;
+ Adapter *host = NULL;
+ struct Scsi_Host *sh;
+ int unit = 0;
+
+ dprintk("wd7000_detect: started\n");
+
+#ifdef MODULE
+ if (wd7000)
+ wd7000_setup(wd7000);
+#endif
+
+ for (i = 0; i < UNITS; wd7000_host[i++] = NULL);
+ for (i = 0; i < NUM_CONFIGS; biosptr[i++] = -1);
+
+ tpnt->proc_name = "wd7000";
+ tpnt->show_info = &wd7000_show_info;
+ tpnt->write_info = wd7000_set_info;
+
+ /*
+ * Set up SCB free list, which is shared by all adapters
+ */
+ init_scbs();
+
+ for (pass = 0; pass < NUM_CONFIGS; pass++) {
+ /*
+ * First, search for BIOS SIGNATURE...
+ */
+ for (biosaddr_ptr = 0; biosaddr_ptr < NUM_ADDRS; biosaddr_ptr++)
+ for (sig_ptr = 0; sig_ptr < NUM_SIGNATURES; sig_ptr++) {
+ for (i = 0; i < pass; i++)
+ if (biosptr[i] == biosaddr_ptr)
+ break;
+
+ if (i == pass) {
+ void __iomem *biosaddr = ioremap(wd7000_biosaddr[biosaddr_ptr] + signatures[sig_ptr].ofs,
+ signatures[sig_ptr].len);
+ short bios_match = 1;
+
+ if (biosaddr)
+ bios_match = check_signature(biosaddr, signatures[sig_ptr].sig, signatures[sig_ptr].len);
+
+ iounmap(biosaddr);
+
+ if (bios_match)
+ goto bios_matched;
+ }
+ }
+
+ bios_matched:
+ /*
+ * BIOS SIGNATURE has been found.
+ */
+#ifdef WD7000_DEBUG
+ dprintk("wd7000_detect: pass %d\n", pass + 1);
+
+ if (biosaddr_ptr == NUM_ADDRS)
+ dprintk("WD-7000 SST BIOS not detected...\n");
+ else
+ dprintk("WD-7000 SST BIOS detected at 0x%lx: checking...\n", wd7000_biosaddr[biosaddr_ptr]);
+#endif
+
+ if (configs[pass].irq < 0)
+ continue;
+
+ if (unit == UNITS)
+ continue;
+
+ iobase = configs[pass].iobase;
+
+ dprintk("wd7000_detect: check IO 0x%x region...\n", iobase);
+
+ if (request_region(iobase, 4, "wd7000")) {
+
+ dprintk("wd7000_detect: ASC reset (IO 0x%x) ...", iobase);
+ /*
+ * ASC reset...
+ */
+ outb(ASC_RES, iobase + ASC_CONTROL);
+ msleep(10);
+ outb(0, iobase + ASC_CONTROL);
+
+ if (WAIT(iobase + ASC_STAT, ASC_STATMASK, CMD_RDY, 0)) {
+ dprintk("failed!\n");
+ goto err_release;
+ } else
+ dprintk("ok!\n");
+
+ if (inb(iobase + ASC_INTR_STAT) == 1) {
+ /*
+ * We register here, to get a pointer to the extra space,
+ * which we'll use as the Adapter structure (host) for
+ * this adapter. It is located just after the registered
+ * Scsi_Host structure (sh), and is located by the empty
+ * array hostdata.
+ */
+ sh = scsi_register(tpnt, sizeof(Adapter));
+ if (sh == NULL)
+ goto err_release;
+
+ host = (Adapter *) sh->hostdata;
+
+ dprintk("wd7000_detect: adapter allocated at 0x%x\n", (int) host);
+ memset(host, 0, sizeof(Adapter));
+
+ host->irq = configs[pass].irq;
+ host->dma = configs[pass].dma;
+ host->iobase = iobase;
+ host->int_counter = 0;
+ host->bus_on = configs[pass].bus_on;
+ host->bus_off = configs[pass].bus_off;
+ host->sh = wd7000_host[unit] = sh;
+ unit++;
+
+ dprintk("wd7000_detect: Trying init WD-7000 card at IO " "0x%x, IRQ %d, DMA %d...\n", host->iobase, host->irq, host->dma);
+
+ if (!wd7000_init(host)) /* Initialization failed */
+ goto err_unregister;
+
+ /*
+ * OK from here - we'll use this adapter/configuration.
+ */
+ wd7000_revision(host); /* important for scatter/gather */
+
+ /*
+ * For boards before rev 6.0, scatter/gather isn't supported.
+ */
+ if (host->rev1 < 6)
+ sh->sg_tablesize = 1;
+
+ present++; /* count it */
+
+ if (biosaddr_ptr != NUM_ADDRS)
+ biosptr[pass] = biosaddr_ptr;
+
+ printk(KERN_INFO "Western Digital WD-7000 (rev %d.%d) ", host->rev1, host->rev2);
+ printk("using IO 0x%x, IRQ %d, DMA %d.\n", host->iobase, host->irq, host->dma);
+ printk(" BUS_ON time: %dns, BUS_OFF time: %dns\n", host->bus_on * 125, host->bus_off * 125);
+ }
+ } else
+ dprintk("wd7000_detect: IO 0x%x region already allocated!\n", iobase);
+
+ continue;
+
+ err_unregister:
+ scsi_unregister(sh);
+ err_release:
+ release_region(iobase, 4);
+
+ }
+
+ if (!present)
+ printk("Failed initialization of WD-7000 SCSI card!\n");
+
+ return (present);
+}
+
+static int wd7000_release(struct Scsi_Host *shost)
+{
+ if (shost->irq)
+ free_irq(shost->irq, NULL);
+ if (shost->io_port && shost->n_io_port)
+ release_region(shost->io_port, shost->n_io_port);
+ scsi_unregister(shost);
+ return 0;
+}
+
+#if 0
+/*
+ * I have absolutely NO idea how to do an abort with the WD7000...
+ */
+static int wd7000_abort(Scsi_Cmnd * SCpnt)
+{
+ Adapter *host = (Adapter *) SCpnt->device->host->hostdata;
+
+ if (inb(host->iobase + ASC_STAT) & INT_IM) {
+ printk("wd7000_abort: lost interrupt\n");
+ wd7000_intr_handle(host->irq, NULL, NULL);
+ return FAILED;
+ }
+ return FAILED;
+}
+#endif
+
+/*
+ * Last resort. Reinitialize the board.
+ */
+
+static int wd7000_host_reset(struct scsi_cmnd *SCpnt)
+{
+ Adapter *host = (Adapter *) SCpnt->device->host->hostdata;
+
+ spin_lock_irq(SCpnt->device->host->host_lock);
+
+ if (wd7000_adapter_reset(host) < 0) {
+ spin_unlock_irq(SCpnt->device->host->host_lock);
+ return FAILED;
+ }
+
+ wd7000_enable_intr(host);
+
+ spin_unlock_irq(SCpnt->device->host->host_lock);
+ return SUCCESS;
+}
+
+/*
+ * This was borrowed directly from aha1542.c. (Zaga)
+ */
+
+static int wd7000_biosparam(struct scsi_device *sdev,
+ struct block_device *bdev, sector_t capacity, int *ip)
+{
+ char b[BDEVNAME_SIZE];
+
+ dprintk("wd7000_biosparam: dev=%s, size=%d, ",
+ bdevname(bdev, b), capacity);
+ (void)b; /* unused var warning? */
+
+ /*
+ * try default translation
+ */
+ ip[0] = 64;
+ ip[1] = 32;
+ ip[2] = capacity >> 11;
+
+ /*
+ * for disks >1GB do some guessing
+ */
+ if (ip[2] >= 1024) {
+ int info[3];
+
+ /*
+ * try to figure out the geometry from the partition table
+ */
+ if ((scsicam_bios_param(bdev, capacity, info) < 0) || !(((info[0] == 64) && (info[1] == 32)) || ((info[0] == 255) && (info[1] == 63)))) {
+ printk("wd7000_biosparam: unable to verify geometry for disk with >1GB.\n" " using extended translation.\n");
+
+ ip[0] = 255;
+ ip[1] = 63;
+ ip[2] = (unsigned long) capacity / (255 * 63);
+ } else {
+ ip[0] = info[0];
+ ip[1] = info[1];
+ ip[2] = info[2];
+
+ if (info[0] == 255)
+ printk(KERN_INFO "%s: current partition table is " "using extended translation.\n", __func__);
+ }
+ }
+
+ dprintk("bios geometry: head=%d, sec=%d, cyl=%d\n", ip[0], ip[1], ip[2]);
+ dprintk("WARNING: check, if the bios geometry is correct.\n");
+
+ return (0);
+}
+
+MODULE_AUTHOR("Thomas Wuensche, John Boyd, Miroslav Zagorac");
+MODULE_DESCRIPTION("Driver for the WD7000 series ISA controllers");
+MODULE_LICENSE("GPL");
+
+static struct scsi_host_template driver_template = {
+ .proc_name = "wd7000",
+ .show_info = wd7000_show_info,
+ .write_info = wd7000_set_info,
+ .name = "Western Digital WD-7000",
+ .detect = wd7000_detect,
+ .release = wd7000_release,
+ .queuecommand = wd7000_queuecommand,
+ .eh_host_reset_handler = wd7000_host_reset,
+ .bios_param = wd7000_biosparam,
+ .can_queue = WD7000_Q,
+ .this_id = 7,
+ .sg_tablesize = WD7000_SG,
+ .unchecked_isa_dma = 1,
+ .use_clustering = ENABLE_CLUSTERING,
+};
+
+#include "scsi_module.c"
diff --git a/drivers/scsi/wd719x.c b/drivers/scsi/wd719x.c
new file mode 100644
index 000000000..aed426320
--- /dev/null
+++ b/drivers/scsi/wd719x.c
@@ -0,0 +1,996 @@
+/*
+ * Driver for Western Digital WD7193, WD7197 and WD7296 SCSI cards
+ * Copyright 2013 Ondrej Zary
+ *
+ * Original driver by
+ * Aaron Dewell <dewell@woods.net>
+ * Gaerti <Juergen.Gaertner@mbox.si.uni-hannover.de>
+ *
+ * HW documentation available in book:
+ *
+ * SPIDER Command Protocol
+ * by Chandru M. Sippy
+ * SCSI Storage Products (MCP)
+ * Western Digital Corporation
+ * 09-15-95
+ *
+ * http://web.archive.org/web/20070717175254/http://sun1.rrzn.uni-hannover.de/gaertner.juergen/wd719x/Linux/Docu/Spider/
+ */
+
+/*
+ * Driver workflow:
+ * 1. SCSI command is transformed to SCB (Spider Control Block) by the
+ * queuecommand function.
+ * 2. The address of the SCB is stored in a list to be able to access it, if
+ * something goes wrong.
+ * 3. The address of the SCB is written to the Controller, which loads the SCB
+ * via BM-DMA and processes it.
+ * 4. After it has finished, it generates an interrupt, and sets registers.
+ *
+ * flaws:
+ * - abort/reset functions
+ *
+ * ToDo:
+ * - tagged queueing
+ */
+
+#include <linux/interrupt.h>
+#include <linux/module.h>
+#include <linux/delay.h>
+#include <linux/pci.h>
+#include <linux/firmware.h>
+#include <linux/eeprom_93cx6.h>
+#include <scsi/scsi_cmnd.h>
+#include <scsi/scsi_device.h>
+#include <scsi/scsi_host.h>
+#include "wd719x.h"
+
+/* low-level register access */
+static inline u8 wd719x_readb(struct wd719x *wd, u8 reg)
+{
+ return ioread8(wd->base + reg);
+}
+
+static inline u32 wd719x_readl(struct wd719x *wd, u8 reg)
+{
+ return ioread32(wd->base + reg);
+}
+
+static inline void wd719x_writeb(struct wd719x *wd, u8 reg, u8 val)
+{
+ iowrite8(val, wd->base + reg);
+}
+
+static inline void wd719x_writew(struct wd719x *wd, u8 reg, u16 val)
+{
+ iowrite16(val, wd->base + reg);
+}
+
+static inline void wd719x_writel(struct wd719x *wd, u8 reg, u32 val)
+{
+ iowrite32(val, wd->base + reg);
+}
+
+/* wait until the command register is ready */
+static inline int wd719x_wait_ready(struct wd719x *wd)
+{
+ int i = 0;
+
+ do {
+ if (wd719x_readb(wd, WD719X_AMR_COMMAND) == WD719X_CMD_READY)
+ return 0;
+ udelay(1);
+ } while (i++ < WD719X_WAIT_FOR_CMD_READY);
+
+ dev_err(&wd->pdev->dev, "command register is not ready: 0x%02x\n",
+ wd719x_readb(wd, WD719X_AMR_COMMAND));
+
+ return -ETIMEDOUT;
+}
+
+/* poll interrupt status register until command finishes */
+static inline int wd719x_wait_done(struct wd719x *wd, int timeout)
+{
+ u8 status;
+
+ while (timeout > 0) {
+ status = wd719x_readb(wd, WD719X_AMR_INT_STATUS);
+ if (status)
+ break;
+ timeout--;
+ udelay(1);
+ }
+
+ if (timeout <= 0) {
+ dev_err(&wd->pdev->dev, "direct command timed out\n");
+ return -ETIMEDOUT;
+ }
+
+ if (status != WD719X_INT_NOERRORS) {
+ dev_err(&wd->pdev->dev, "direct command failed, status 0x%02x, SUE 0x%02x\n",
+ status, wd719x_readb(wd, WD719X_AMR_SCB_ERROR));
+ return -EIO;
+ }
+
+ return 0;
+}
+
+static int wd719x_direct_cmd(struct wd719x *wd, u8 opcode, u8 dev, u8 lun,
+ u8 tag, dma_addr_t data, int timeout)
+{
+ int ret = 0;
+
+ /* clear interrupt status register (allow command register to clear) */
+ wd719x_writeb(wd, WD719X_AMR_INT_STATUS, WD719X_INT_NONE);
+
+ /* Wait for the Command register to become free */
+ if (wd719x_wait_ready(wd))
+ return -ETIMEDOUT;
+
+ /* make sure we get NO interrupts */
+ dev |= WD719X_DISABLE_INT;
+ wd719x_writeb(wd, WD719X_AMR_CMD_PARAM, dev);
+ wd719x_writeb(wd, WD719X_AMR_CMD_PARAM_2, lun);
+ wd719x_writeb(wd, WD719X_AMR_CMD_PARAM_3, tag);
+ if (data)
+ wd719x_writel(wd, WD719X_AMR_SCB_IN, data);
+
+ /* clear interrupt status register again */
+ wd719x_writeb(wd, WD719X_AMR_INT_STATUS, WD719X_INT_NONE);
+
+ /* Now, write the command */
+ wd719x_writeb(wd, WD719X_AMR_COMMAND, opcode);
+
+ if (timeout) /* wait for the command to complete */
+ ret = wd719x_wait_done(wd, timeout);
+
+ /* clear interrupt status register (clean up) */
+ if (opcode != WD719X_CMD_READ_FIRMVER)
+ wd719x_writeb(wd, WD719X_AMR_INT_STATUS, WD719X_INT_NONE);
+
+ return ret;
+}
+
+static void wd719x_destroy(struct wd719x *wd)
+{
+ struct wd719x_scb *scb;
+
+ /* stop the RISC */
+ if (wd719x_direct_cmd(wd, WD719X_CMD_SLEEP, 0, 0, 0, 0,
+ WD719X_WAIT_FOR_RISC))
+ dev_warn(&wd->pdev->dev, "RISC sleep command failed\n");
+ /* disable RISC */
+ wd719x_writeb(wd, WD719X_PCI_MODE_SELECT, 0);
+
+ /* free all SCBs */
+ list_for_each_entry(scb, &wd->active_scbs, list)
+ pci_free_consistent(wd->pdev, sizeof(struct wd719x_scb), scb,
+ scb->phys);
+ list_for_each_entry(scb, &wd->free_scbs, list)
+ pci_free_consistent(wd->pdev, sizeof(struct wd719x_scb), scb,
+ scb->phys);
+ /* free internal buffers */
+ pci_free_consistent(wd->pdev, wd->fw_size, wd->fw_virt, wd->fw_phys);
+ wd->fw_virt = NULL;
+ pci_free_consistent(wd->pdev, WD719X_HASH_TABLE_SIZE, wd->hash_virt,
+ wd->hash_phys);
+ wd->hash_virt = NULL;
+ pci_free_consistent(wd->pdev, sizeof(struct wd719x_host_param),
+ wd->params, wd->params_phys);
+ wd->params = NULL;
+ free_irq(wd->pdev->irq, wd);
+}
+
+/* finish a SCSI command, mark SCB (if any) as free, unmap buffers */
+static void wd719x_finish_cmd(struct scsi_cmnd *cmd, int result)
+{
+ struct wd719x *wd = shost_priv(cmd->device->host);
+ struct wd719x_scb *scb = (struct wd719x_scb *) cmd->host_scribble;
+
+ if (scb) {
+ list_move(&scb->list, &wd->free_scbs);
+ dma_unmap_single(&wd->pdev->dev, cmd->SCp.dma_handle,
+ SCSI_SENSE_BUFFERSIZE, DMA_FROM_DEVICE);
+ scsi_dma_unmap(cmd);
+ }
+ cmd->result = result << 16;
+ cmd->scsi_done(cmd);
+}
+
+/* Build a SCB and send it to the card */
+static int wd719x_queuecommand(struct Scsi_Host *sh, struct scsi_cmnd *cmd)
+{
+ int i, count_sg;
+ unsigned long flags;
+ struct wd719x_scb *scb;
+ struct wd719x *wd = shost_priv(sh);
+ dma_addr_t phys;
+
+ cmd->host_scribble = NULL;
+
+ /* get a free SCB - either from existing ones or allocate a new one */
+ spin_lock_irqsave(wd->sh->host_lock, flags);
+ scb = list_first_entry_or_null(&wd->free_scbs, struct wd719x_scb, list);
+ if (scb) {
+ list_del(&scb->list);
+ phys = scb->phys;
+ } else {
+ spin_unlock_irqrestore(wd->sh->host_lock, flags);
+ scb = pci_alloc_consistent(wd->pdev, sizeof(struct wd719x_scb),
+ &phys);
+ spin_lock_irqsave(wd->sh->host_lock, flags);
+ if (!scb) {
+ dev_err(&wd->pdev->dev, "unable to allocate SCB\n");
+ wd719x_finish_cmd(cmd, DID_ERROR);
+ spin_unlock_irqrestore(wd->sh->host_lock, flags);
+ return 0;
+ }
+ }
+ memset(scb, 0, sizeof(struct wd719x_scb));
+ list_add(&scb->list, &wd->active_scbs);
+
+ scb->phys = phys;
+ scb->cmd = cmd;
+ cmd->host_scribble = (char *) scb;
+
+ scb->CDB_tag = 0; /* Tagged queueing not supported yet */
+ scb->devid = cmd->device->id;
+ scb->lun = cmd->device->lun;
+
+ /* copy the command */
+ memcpy(scb->CDB, cmd->cmnd, cmd->cmd_len);
+
+ /* map sense buffer */
+ scb->sense_buf_length = SCSI_SENSE_BUFFERSIZE;
+ cmd->SCp.dma_handle = dma_map_single(&wd->pdev->dev, cmd->sense_buffer,
+ SCSI_SENSE_BUFFERSIZE, DMA_FROM_DEVICE);
+ scb->sense_buf = cpu_to_le32(cmd->SCp.dma_handle);
+
+ /* request autosense */
+ scb->SCB_options |= WD719X_SCB_FLAGS_AUTO_REQUEST_SENSE;
+
+ /* check direction */
+ if (cmd->sc_data_direction == DMA_TO_DEVICE)
+ scb->SCB_options |= WD719X_SCB_FLAGS_CHECK_DIRECTION
+ | WD719X_SCB_FLAGS_PCI_TO_SCSI;
+ else if (cmd->sc_data_direction == DMA_FROM_DEVICE)
+ scb->SCB_options |= WD719X_SCB_FLAGS_CHECK_DIRECTION;
+
+ /* Scather/gather */
+ count_sg = scsi_dma_map(cmd);
+ if (count_sg < 0) {
+ wd719x_finish_cmd(cmd, DID_ERROR);
+ spin_unlock_irqrestore(wd->sh->host_lock, flags);
+ return 0;
+ }
+ BUG_ON(count_sg > WD719X_SG);
+
+ if (count_sg) {
+ struct scatterlist *sg;
+
+ scb->data_length = cpu_to_le32(count_sg *
+ sizeof(struct wd719x_sglist));
+ scb->data_p = cpu_to_le32(scb->phys +
+ offsetof(struct wd719x_scb, sg_list));
+
+ scsi_for_each_sg(cmd, sg, count_sg, i) {
+ scb->sg_list[i].ptr = cpu_to_le32(sg_dma_address(sg));
+ scb->sg_list[i].length = cpu_to_le32(sg_dma_len(sg));
+ }
+ scb->SCB_options |= WD719X_SCB_FLAGS_DO_SCATTER_GATHER;
+ } else { /* zero length */
+ scb->data_length = 0;
+ scb->data_p = 0;
+ }
+
+ /* check if the Command register is free */
+ if (wd719x_readb(wd, WD719X_AMR_COMMAND) != WD719X_CMD_READY) {
+ spin_unlock_irqrestore(wd->sh->host_lock, flags);
+ return SCSI_MLQUEUE_HOST_BUSY;
+ }
+
+ /* write pointer to the AMR */
+ wd719x_writel(wd, WD719X_AMR_SCB_IN, scb->phys);
+ /* send SCB opcode */
+ wd719x_writeb(wd, WD719X_AMR_COMMAND, WD719X_CMD_PROCESS_SCB);
+
+ spin_unlock_irqrestore(wd->sh->host_lock, flags);
+
+ return 0;
+}
+
+static int wd719x_chip_init(struct wd719x *wd)
+{
+ int i, ret;
+ u32 risc_init[3];
+ const struct firmware *fw_wcs, *fw_risc;
+ const char fwname_wcs[] = "/*(DEBLOBBED)*/";
+ const char fwname_risc[] = "/*(DEBLOBBED)*/";
+
+ memset(wd->hash_virt, 0, WD719X_HASH_TABLE_SIZE);
+
+ /* WCS (sequencer) firmware */
+ ret = reject_firmware(&fw_wcs, fwname_wcs, &wd->pdev->dev);
+ if (ret) {
+ dev_err(&wd->pdev->dev, "Unable to load firmware %s: %d\n",
+ fwname_wcs, ret);
+ return ret;
+ }
+ /* RISC firmware */
+ ret = reject_firmware(&fw_risc, fwname_risc, &wd->pdev->dev);
+ if (ret) {
+ dev_err(&wd->pdev->dev, "Unable to load firmware %s: %d\n",
+ fwname_risc, ret);
+ release_firmware(fw_wcs);
+ return ret;
+ }
+ wd->fw_size = ALIGN(fw_wcs->size, 4) + fw_risc->size;
+
+ if (!wd->fw_virt)
+ wd->fw_virt = pci_alloc_consistent(wd->pdev, wd->fw_size,
+ &wd->fw_phys);
+ if (!wd->fw_virt) {
+ ret = -ENOMEM;
+ goto wd719x_init_end;
+ }
+
+ /* make a fresh copy of WCS and RISC code */
+ memcpy(wd->fw_virt, fw_wcs->data, fw_wcs->size);
+ memcpy(wd->fw_virt + ALIGN(fw_wcs->size, 4), fw_risc->data,
+ fw_risc->size);
+
+ /* Reset the Spider Chip and adapter itself */
+ wd719x_writeb(wd, WD719X_PCI_PORT_RESET, WD719X_PCI_RESET);
+ udelay(WD719X_WAIT_FOR_RISC);
+ /* Clear PIO mode bits set by BIOS */
+ wd719x_writeb(wd, WD719X_AMR_CMD_PARAM, 0);
+ /* ensure RISC is not running */
+ wd719x_writeb(wd, WD719X_PCI_MODE_SELECT, 0);
+ /* ensure command port is ready */
+ wd719x_writeb(wd, WD719X_AMR_COMMAND, 0);
+ if (wd719x_wait_ready(wd)) {
+ ret = -ETIMEDOUT;
+ goto wd719x_init_end;
+ }
+
+ /* Transfer the first 2K words of RISC code to kick start the uP */
+ risc_init[0] = wd->fw_phys; /* WCS FW */
+ risc_init[1] = wd->fw_phys + ALIGN(fw_wcs->size, 4); /* RISC FW */
+ risc_init[2] = wd->hash_phys; /* hash table */
+
+ /* clear DMA status */
+ wd719x_writeb(wd, WD719X_PCI_CHANNEL2_3STATUS, 0);
+
+ /* address to read firmware from */
+ wd719x_writel(wd, WD719X_PCI_EXTERNAL_ADDR, risc_init[1]);
+ /* base address to write firmware to (on card) */
+ wd719x_writew(wd, WD719X_PCI_INTERNAL_ADDR, WD719X_PRAM_BASE_ADDR);
+ /* size: first 2K words */
+ wd719x_writew(wd, WD719X_PCI_DMA_TRANSFER_SIZE, 2048 * 2);
+ /* start DMA */
+ wd719x_writeb(wd, WD719X_PCI_CHANNEL2_3CMD, WD719X_START_CHANNEL2_3DMA);
+
+ /* wait for DMA to complete */
+ i = WD719X_WAIT_FOR_RISC;
+ while (i-- > 0) {
+ u8 status = wd719x_readb(wd, WD719X_PCI_CHANNEL2_3STATUS);
+ if (status == WD719X_START_CHANNEL2_3DONE)
+ break;
+ if (status == WD719X_START_CHANNEL2_3ABORT) {
+ dev_warn(&wd->pdev->dev, "RISC bootstrap failed: DMA aborted\n");
+ ret = -EIO;
+ goto wd719x_init_end;
+ }
+ udelay(1);
+ }
+ if (i < 1) {
+ dev_warn(&wd->pdev->dev, "RISC bootstrap failed: DMA timeout\n");
+ ret = -ETIMEDOUT;
+ goto wd719x_init_end;
+ }
+
+ /* firmware is loaded, now initialize and wake up the RISC */
+ /* write RISC initialization long words to Spider */
+ wd719x_writel(wd, WD719X_AMR_SCB_IN, risc_init[0]);
+ wd719x_writel(wd, WD719X_AMR_SCB_IN + 4, risc_init[1]);
+ wd719x_writel(wd, WD719X_AMR_SCB_IN + 8, risc_init[2]);
+
+ /* disable interrupts during initialization of RISC */
+ wd719x_writeb(wd, WD719X_AMR_CMD_PARAM, WD719X_DISABLE_INT);
+
+ /* issue INITIALIZE RISC comand */
+ wd719x_writeb(wd, WD719X_AMR_COMMAND, WD719X_CMD_INIT_RISC);
+ /* enable advanced mode (wake up RISC) */
+ wd719x_writeb(wd, WD719X_PCI_MODE_SELECT, WD719X_ENABLE_ADVANCE_MODE);
+ udelay(WD719X_WAIT_FOR_RISC);
+
+ ret = wd719x_wait_done(wd, WD719X_WAIT_FOR_RISC);
+ /* clear interrupt status register */
+ wd719x_writeb(wd, WD719X_AMR_INT_STATUS, WD719X_INT_NONE);
+ if (ret) {
+ dev_warn(&wd->pdev->dev, "Unable to initialize RISC\n");
+ goto wd719x_init_end;
+ }
+ /* RISC is up and running */
+
+ /* Read FW version from RISC */
+ ret = wd719x_direct_cmd(wd, WD719X_CMD_READ_FIRMVER, 0, 0, 0, 0,
+ WD719X_WAIT_FOR_RISC);
+ if (ret) {
+ dev_warn(&wd->pdev->dev, "Unable to read firmware version\n");
+ goto wd719x_init_end;
+ }
+ dev_info(&wd->pdev->dev, "RISC initialized with firmware version %.2x.%.2x\n",
+ wd719x_readb(wd, WD719X_AMR_SCB_OUT + 1),
+ wd719x_readb(wd, WD719X_AMR_SCB_OUT));
+
+ /* RESET SCSI bus */
+ ret = wd719x_direct_cmd(wd, WD719X_CMD_BUSRESET, 0, 0, 0, 0,
+ WD719X_WAIT_FOR_SCSI_RESET);
+ if (ret) {
+ dev_warn(&wd->pdev->dev, "SCSI bus reset failed\n");
+ goto wd719x_init_end;
+ }
+
+ /* use HostParameter structure to set Spider's Host Parameter Block */
+ ret = wd719x_direct_cmd(wd, WD719X_CMD_SET_PARAM, 0,
+ sizeof(struct wd719x_host_param), 0,
+ wd->params_phys, WD719X_WAIT_FOR_RISC);
+ if (ret) {
+ dev_warn(&wd->pdev->dev, "Failed to set HOST PARAMETERS\n");
+ goto wd719x_init_end;
+ }
+
+ /* initiate SCAM (does nothing if disabled in BIOS) */
+ /* bug?: we should pass a mask of static IDs which we don't have */
+ ret = wd719x_direct_cmd(wd, WD719X_CMD_INIT_SCAM, 0, 0, 0, 0,
+ WD719X_WAIT_FOR_SCSI_RESET);
+ if (ret) {
+ dev_warn(&wd->pdev->dev, "SCAM initialization failed\n");
+ goto wd719x_init_end;
+ }
+
+ /* clear AMR_BIOS_SHARE_INT register */
+ wd719x_writeb(wd, WD719X_AMR_BIOS_SHARE_INT, 0);
+
+wd719x_init_end:
+ release_firmware(fw_wcs);
+ release_firmware(fw_risc);
+
+ return ret;
+}
+
+static int wd719x_abort(struct scsi_cmnd *cmd)
+{
+ int action, result;
+ unsigned long flags;
+ struct wd719x_scb *scb = (struct wd719x_scb *)cmd->host_scribble;
+ struct wd719x *wd = shost_priv(cmd->device->host);
+
+ dev_info(&wd->pdev->dev, "abort command, tag: %x\n", cmd->tag);
+
+ action = /*cmd->tag ? WD719X_CMD_ABORT_TAG : */WD719X_CMD_ABORT;
+
+ spin_lock_irqsave(wd->sh->host_lock, flags);
+ result = wd719x_direct_cmd(wd, action, cmd->device->id,
+ cmd->device->lun, cmd->tag, scb->phys, 0);
+ spin_unlock_irqrestore(wd->sh->host_lock, flags);
+ if (result)
+ return FAILED;
+
+ return SUCCESS;
+}
+
+static int wd719x_reset(struct scsi_cmnd *cmd, u8 opcode, u8 device)
+{
+ int result;
+ unsigned long flags;
+ struct wd719x *wd = shost_priv(cmd->device->host);
+
+ dev_info(&wd->pdev->dev, "%s reset requested\n",
+ (opcode == WD719X_CMD_BUSRESET) ? "bus" : "device");
+
+ spin_lock_irqsave(wd->sh->host_lock, flags);
+ result = wd719x_direct_cmd(wd, opcode, device, 0, 0, 0,
+ WD719X_WAIT_FOR_SCSI_RESET);
+ spin_unlock_irqrestore(wd->sh->host_lock, flags);
+ if (result)
+ return FAILED;
+
+ return SUCCESS;
+}
+
+static int wd719x_dev_reset(struct scsi_cmnd *cmd)
+{
+ return wd719x_reset(cmd, WD719X_CMD_RESET, cmd->device->id);
+}
+
+static int wd719x_bus_reset(struct scsi_cmnd *cmd)
+{
+ return wd719x_reset(cmd, WD719X_CMD_BUSRESET, 0);
+}
+
+static int wd719x_host_reset(struct scsi_cmnd *cmd)
+{
+ struct wd719x *wd = shost_priv(cmd->device->host);
+ struct wd719x_scb *scb, *tmp;
+ unsigned long flags;
+ int result;
+
+ dev_info(&wd->pdev->dev, "host reset requested\n");
+ spin_lock_irqsave(wd->sh->host_lock, flags);
+ /* Try to reinit the RISC */
+ if (wd719x_chip_init(wd) == 0)
+ result = SUCCESS;
+ else
+ result = FAILED;
+
+ /* flush all SCBs */
+ list_for_each_entry_safe(scb, tmp, &wd->active_scbs, list) {
+ struct scsi_cmnd *tmp_cmd = scb->cmd;
+ wd719x_finish_cmd(tmp_cmd, result);
+ }
+ spin_unlock_irqrestore(wd->sh->host_lock, flags);
+
+ return result;
+}
+
+static int wd719x_biosparam(struct scsi_device *sdev, struct block_device *bdev,
+ sector_t capacity, int geom[])
+{
+ if (capacity >= 0x200000) {
+ geom[0] = 255; /* heads */
+ geom[1] = 63; /* sectors */
+ } else {
+ geom[0] = 64; /* heads */
+ geom[1] = 32; /* sectors */
+ }
+ geom[2] = sector_div(capacity, geom[0] * geom[1]); /* cylinders */
+
+ return 0;
+}
+
+/* process a SCB-completion interrupt */
+static inline void wd719x_interrupt_SCB(struct wd719x *wd,
+ union wd719x_regs regs,
+ struct wd719x_scb *scb)
+{
+ struct scsi_cmnd *cmd;
+ int result;
+
+ /* now have to find result from card */
+ switch (regs.bytes.SUE) {
+ case WD719X_SUE_NOERRORS:
+ result = DID_OK;
+ break;
+ case WD719X_SUE_REJECTED:
+ dev_err(&wd->pdev->dev, "command rejected\n");
+ result = DID_ERROR;
+ break;
+ case WD719X_SUE_SCBQFULL:
+ dev_err(&wd->pdev->dev, "SCB queue is full\n");
+ result = DID_ERROR;
+ break;
+ case WD719X_SUE_TERM:
+ dev_dbg(&wd->pdev->dev, "SCB terminated by direct command\n");
+ result = DID_ABORT; /* or DID_RESET? */
+ break;
+ case WD719X_SUE_CHAN1ABORT:
+ case WD719X_SUE_CHAN23ABORT:
+ result = DID_ABORT;
+ dev_err(&wd->pdev->dev, "DMA abort\n");
+ break;
+ case WD719X_SUE_CHAN1PAR:
+ case WD719X_SUE_CHAN23PAR:
+ result = DID_PARITY;
+ dev_err(&wd->pdev->dev, "DMA parity error\n");
+ break;
+ case WD719X_SUE_TIMEOUT:
+ result = DID_TIME_OUT;
+ dev_dbg(&wd->pdev->dev, "selection timeout\n");
+ break;
+ case WD719X_SUE_RESET:
+ dev_dbg(&wd->pdev->dev, "bus reset occured\n");
+ result = DID_RESET;
+ break;
+ case WD719X_SUE_BUSERROR:
+ dev_dbg(&wd->pdev->dev, "SCSI bus error\n");
+ result = DID_ERROR;
+ break;
+ case WD719X_SUE_WRONGWAY:
+ dev_err(&wd->pdev->dev, "wrong data transfer direction\n");
+ result = DID_ERROR;
+ break;
+ case WD719X_SUE_BADPHASE:
+ dev_err(&wd->pdev->dev, "invalid SCSI phase\n");
+ result = DID_ERROR;
+ break;
+ case WD719X_SUE_TOOLONG:
+ dev_err(&wd->pdev->dev, "record too long\n");
+ result = DID_ERROR;
+ break;
+ case WD719X_SUE_BUSFREE:
+ dev_err(&wd->pdev->dev, "unexpected bus free\n");
+ result = DID_NO_CONNECT; /* or DID_ERROR ???*/
+ break;
+ case WD719X_SUE_ARSDONE:
+ dev_dbg(&wd->pdev->dev, "auto request sense\n");
+ if (regs.bytes.SCSI == 0)
+ result = DID_OK;
+ else
+ result = DID_PARITY;
+ break;
+ case WD719X_SUE_IGNORED:
+ dev_err(&wd->pdev->dev, "target id %d ignored command\n",
+ scb->cmd->device->id);
+ result = DID_NO_CONNECT;
+ break;
+ case WD719X_SUE_WRONGTAGS:
+ dev_err(&wd->pdev->dev, "reversed tags\n");
+ result = DID_ERROR;
+ break;
+ case WD719X_SUE_BADTAGS:
+ dev_err(&wd->pdev->dev, "tag type not supported by target\n");
+ result = DID_ERROR;
+ break;
+ case WD719X_SUE_NOSCAMID:
+ dev_err(&wd->pdev->dev, "no SCAM soft ID available\n");
+ result = DID_ERROR;
+ break;
+ default:
+ dev_warn(&wd->pdev->dev, "unknown SUE error code: 0x%x\n",
+ regs.bytes.SUE);
+ result = DID_ERROR;
+ break;
+ }
+ cmd = scb->cmd;
+
+ wd719x_finish_cmd(cmd, result);
+}
+
+static irqreturn_t wd719x_interrupt(int irq, void *dev_id)
+{
+ struct wd719x *wd = dev_id;
+ union wd719x_regs regs;
+ unsigned long flags;
+ u32 SCB_out;
+
+ spin_lock_irqsave(wd->sh->host_lock, flags);
+ /* read SCB pointer back from card */
+ SCB_out = wd719x_readl(wd, WD719X_AMR_SCB_OUT);
+ /* read all status info at once */
+ regs.all = cpu_to_le32(wd719x_readl(wd, WD719X_AMR_OP_CODE));
+
+ switch (regs.bytes.INT) {
+ case WD719X_INT_NONE:
+ spin_unlock_irqrestore(wd->sh->host_lock, flags);
+ return IRQ_NONE;
+ case WD719X_INT_LINKNOSTATUS:
+ dev_err(&wd->pdev->dev, "linked command completed with no status\n");
+ break;
+ case WD719X_INT_BADINT:
+ dev_err(&wd->pdev->dev, "unsolicited interrupt\n");
+ break;
+ case WD719X_INT_NOERRORS:
+ case WD719X_INT_LINKNOERRORS:
+ case WD719X_INT_ERRORSLOGGED:
+ case WD719X_INT_SPIDERFAILED:
+ /* was the cmd completed a direct or SCB command? */
+ if (regs.bytes.OPC == WD719X_CMD_PROCESS_SCB) {
+ struct wd719x_scb *scb;
+ list_for_each_entry(scb, &wd->active_scbs, list)
+ if (SCB_out == scb->phys)
+ break;
+ if (SCB_out == scb->phys)
+ wd719x_interrupt_SCB(wd, regs, scb);
+ else
+ dev_err(&wd->pdev->dev, "card returned invalid SCB pointer\n");
+ } else
+ dev_warn(&wd->pdev->dev, "direct command 0x%x completed\n",
+ regs.bytes.OPC);
+ break;
+ case WD719X_INT_PIOREADY:
+ dev_err(&wd->pdev->dev, "card indicates PIO data ready but we never use PIO\n");
+ /* interrupt will not be cleared until all data is read */
+ break;
+ default:
+ dev_err(&wd->pdev->dev, "unknown interrupt reason: %d\n",
+ regs.bytes.INT);
+
+ }
+ /* clear interrupt so another can happen */
+ wd719x_writeb(wd, WD719X_AMR_INT_STATUS, WD719X_INT_NONE);
+ spin_unlock_irqrestore(wd->sh->host_lock, flags);
+
+ return IRQ_HANDLED;
+}
+
+static void wd719x_eeprom_reg_read(struct eeprom_93cx6 *eeprom)
+{
+ struct wd719x *wd = eeprom->data;
+ u8 reg = wd719x_readb(wd, WD719X_PCI_GPIO_DATA);
+
+ eeprom->reg_data_out = reg & WD719X_EE_DO;
+}
+
+static void wd719x_eeprom_reg_write(struct eeprom_93cx6 *eeprom)
+{
+ struct wd719x *wd = eeprom->data;
+ u8 reg = 0;
+
+ if (eeprom->reg_data_in)
+ reg |= WD719X_EE_DI;
+ if (eeprom->reg_data_clock)
+ reg |= WD719X_EE_CLK;
+ if (eeprom->reg_chip_select)
+ reg |= WD719X_EE_CS;
+
+ wd719x_writeb(wd, WD719X_PCI_GPIO_DATA, reg);
+}
+
+/* read config from EEPROM so it can be downloaded by the RISC on (re-)init */
+static void wd719x_read_eeprom(struct wd719x *wd)
+{
+ struct eeprom_93cx6 eeprom;
+ u8 gpio;
+ struct wd719x_eeprom_header header;
+
+ eeprom.data = wd;
+ eeprom.register_read = wd719x_eeprom_reg_read;
+ eeprom.register_write = wd719x_eeprom_reg_write;
+ eeprom.width = PCI_EEPROM_WIDTH_93C46;
+
+ /* set all outputs to low */
+ wd719x_writeb(wd, WD719X_PCI_GPIO_DATA, 0);
+ /* configure GPIO pins */
+ gpio = wd719x_readb(wd, WD719X_PCI_GPIO_CONTROL);
+ /* GPIO outputs */
+ gpio &= (~(WD719X_EE_CLK | WD719X_EE_DI | WD719X_EE_CS));
+ /* GPIO input */
+ gpio |= WD719X_EE_DO;
+ wd719x_writeb(wd, WD719X_PCI_GPIO_CONTROL, gpio);
+
+ /* read EEPROM header */
+ eeprom_93cx6_multireadb(&eeprom, 0, (u8 *)&header, sizeof(header));
+
+ if (header.sig1 == 'W' && header.sig2 == 'D')
+ eeprom_93cx6_multireadb(&eeprom, header.cfg_offset,
+ (u8 *)wd->params,
+ sizeof(struct wd719x_host_param));
+ else { /* default EEPROM values */
+ dev_warn(&wd->pdev->dev, "EEPROM signature is invalid (0x%02x 0x%02x), using default values\n",
+ header.sig1, header.sig2);
+ wd->params->ch_1_th = 0x10; /* 16 DWs = 64 B */
+ wd->params->scsi_conf = 0x4c; /* 48ma, spue, parity check */
+ wd->params->own_scsi_id = 0x07; /* ID 7, SCAM disabled */
+ wd->params->sel_timeout = 0x4d; /* 250 ms */
+ wd->params->sleep_timer = 0x01;
+ wd->params->cdb_size = cpu_to_le16(0x5555); /* all 6 B */
+ wd->params->scsi_pad = 0x1b;
+ if (wd->type == WD719X_TYPE_7193) /* narrow card - disable */
+ wd->params->wide = cpu_to_le32(0x00000000);
+ else /* initiate & respond to WIDE messages */
+ wd->params->wide = cpu_to_le32(0xffffffff);
+ wd->params->sync = cpu_to_le32(0xffffffff);
+ wd->params->soft_mask = 0x00; /* all disabled */
+ wd->params->unsol_mask = 0x00; /* all disabled */
+ }
+ /* disable TAGGED messages */
+ wd->params->tag_en = cpu_to_le16(0x0000);
+}
+
+/* Read card type from GPIO bits 1 and 3 */
+static enum wd719x_card_type wd719x_detect_type(struct wd719x *wd)
+{
+ u8 card = wd719x_readb(wd, WD719X_PCI_GPIO_CONTROL);
+
+ card |= WD719X_GPIO_ID_BITS;
+ wd719x_writeb(wd, WD719X_PCI_GPIO_CONTROL, card);
+ card = wd719x_readb(wd, WD719X_PCI_GPIO_DATA) & WD719X_GPIO_ID_BITS;
+ switch (card) {
+ case 0x08:
+ return WD719X_TYPE_7193;
+ case 0x02:
+ return WD719X_TYPE_7197;
+ case 0x00:
+ return WD719X_TYPE_7296;
+ default:
+ dev_warn(&wd->pdev->dev, "unknown card type 0x%x\n", card);
+ return WD719X_TYPE_UNKNOWN;
+ }
+}
+
+static int wd719x_board_found(struct Scsi_Host *sh)
+{
+ struct wd719x *wd = shost_priv(sh);
+ char *card_types[] = { "Unknown card", "WD7193", "WD7197", "WD7296" };
+ int ret;
+
+ INIT_LIST_HEAD(&wd->active_scbs);
+ INIT_LIST_HEAD(&wd->free_scbs);
+
+ sh->base = pci_resource_start(wd->pdev, 0);
+
+ wd->type = wd719x_detect_type(wd);
+
+ wd->sh = sh;
+ sh->irq = wd->pdev->irq;
+ wd->fw_virt = NULL;
+
+ /* memory area for host (EEPROM) parameters */
+ wd->params = pci_alloc_consistent(wd->pdev,
+ sizeof(struct wd719x_host_param),
+ &wd->params_phys);
+ if (!wd->params) {
+ dev_warn(&wd->pdev->dev, "unable to allocate parameter buffer\n");
+ return -ENOMEM;
+ }
+
+ /* memory area for the RISC for hash table of outstanding requests */
+ wd->hash_virt = pci_alloc_consistent(wd->pdev, WD719X_HASH_TABLE_SIZE,
+ &wd->hash_phys);
+ if (!wd->hash_virt) {
+ dev_warn(&wd->pdev->dev, "unable to allocate hash buffer\n");
+ ret = -ENOMEM;
+ goto fail_free_params;
+ }
+
+ ret = request_irq(wd->pdev->irq, wd719x_interrupt, IRQF_SHARED,
+ "wd719x", wd);
+ if (ret) {
+ dev_warn(&wd->pdev->dev, "unable to assign IRQ %d\n",
+ wd->pdev->irq);
+ goto fail_free_hash;
+ }
+
+ /* read parameters from EEPROM */
+ wd719x_read_eeprom(wd);
+
+ ret = wd719x_chip_init(wd);
+ if (ret)
+ goto fail_free_irq;
+
+ sh->this_id = wd->params->own_scsi_id & WD719X_EE_SCSI_ID_MASK;
+
+ dev_info(&wd->pdev->dev, "%s at I/O 0x%lx, IRQ %u, SCSI ID %d\n",
+ card_types[wd->type], sh->base, sh->irq, sh->this_id);
+
+ return 0;
+
+fail_free_irq:
+ free_irq(wd->pdev->irq, wd);
+fail_free_hash:
+ pci_free_consistent(wd->pdev, WD719X_HASH_TABLE_SIZE, wd->hash_virt,
+ wd->hash_phys);
+fail_free_params:
+ pci_free_consistent(wd->pdev, sizeof(struct wd719x_host_param),
+ wd->params, wd->params_phys);
+
+ return ret;
+}
+
+static struct scsi_host_template wd719x_template = {
+ .module = THIS_MODULE,
+ .name = "Western Digital 719x",
+ .queuecommand = wd719x_queuecommand,
+ .eh_abort_handler = wd719x_abort,
+ .eh_device_reset_handler = wd719x_dev_reset,
+ .eh_bus_reset_handler = wd719x_bus_reset,
+ .eh_host_reset_handler = wd719x_host_reset,
+ .bios_param = wd719x_biosparam,
+ .proc_name = "wd719x",
+ .can_queue = 255,
+ .this_id = 7,
+ .sg_tablesize = WD719X_SG,
+ .cmd_per_lun = WD719X_CMD_PER_LUN,
+ .use_clustering = ENABLE_CLUSTERING,
+};
+
+static int wd719x_pci_probe(struct pci_dev *pdev, const struct pci_device_id *d)
+{
+ int err;
+ struct Scsi_Host *sh;
+ struct wd719x *wd;
+
+ err = pci_enable_device(pdev);
+ if (err)
+ goto fail;
+
+ if (pci_set_dma_mask(pdev, DMA_BIT_MASK(32))) {
+ dev_warn(&pdev->dev, "Unable to set 32-bit DMA mask\n");
+ goto disable_device;
+ }
+
+ err = pci_request_regions(pdev, "wd719x");
+ if (err)
+ goto disable_device;
+ pci_set_master(pdev);
+
+ err = -ENODEV;
+ if (pci_resource_len(pdev, 0) == 0)
+ goto release_region;
+
+ err = -ENOMEM;
+ sh = scsi_host_alloc(&wd719x_template, sizeof(struct wd719x));
+ if (!sh)
+ goto release_region;
+
+ wd = shost_priv(sh);
+ wd->base = pci_iomap(pdev, 0, 0);
+ if (!wd->base)
+ goto free_host;
+ wd->pdev = pdev;
+
+ err = wd719x_board_found(sh);
+ if (err)
+ goto unmap;
+
+ err = scsi_add_host(sh, &wd->pdev->dev);
+ if (err)
+ goto destroy;
+
+ scsi_scan_host(sh);
+
+ pci_set_drvdata(pdev, sh);
+ return 0;
+
+destroy:
+ wd719x_destroy(wd);
+unmap:
+ pci_iounmap(pdev, wd->base);
+free_host:
+ scsi_host_put(sh);
+release_region:
+ pci_release_regions(pdev);
+disable_device:
+ pci_disable_device(pdev);
+fail:
+ return err;
+}
+
+
+static void wd719x_pci_remove(struct pci_dev *pdev)
+{
+ struct Scsi_Host *sh = pci_get_drvdata(pdev);
+ struct wd719x *wd = shost_priv(sh);
+
+ scsi_remove_host(sh);
+ wd719x_destroy(wd);
+ pci_iounmap(pdev, wd->base);
+ pci_release_regions(pdev);
+ pci_disable_device(pdev);
+
+ scsi_host_put(sh);
+}
+
+static DEFINE_PCI_DEVICE_TABLE(wd719x_pci_table) = {
+ { PCI_DEVICE(PCI_VENDOR_ID_WD, 0x3296) },
+ {}
+};
+
+MODULE_DEVICE_TABLE(pci, wd719x_pci_table);
+
+static struct pci_driver wd719x_pci_driver = {
+ .name = "wd719x",
+ .id_table = wd719x_pci_table,
+ .probe = wd719x_pci_probe,
+ .remove = wd719x_pci_remove,
+};
+
+static int __init wd719x_init(void)
+{
+ return pci_register_driver(&wd719x_pci_driver);
+}
+
+static void __exit wd719x_exit(void)
+{
+ pci_unregister_driver(&wd719x_pci_driver);
+}
+
+module_init(wd719x_init);
+module_exit(wd719x_exit);
+
+MODULE_DESCRIPTION("Western Digital WD7193/7197/7296 SCSI driver");
+MODULE_AUTHOR("Ondrej Zary, Aaron Dewell, Juergen Gaertner");
+MODULE_LICENSE("GPL");
+/*(DEBLOBBED)*/
diff --git a/drivers/scsi/wd719x.h b/drivers/scsi/wd719x.h
new file mode 100644
index 000000000..185e30e4e
--- /dev/null
+++ b/drivers/scsi/wd719x.h
@@ -0,0 +1,249 @@
+#ifndef _WD719X_H_
+#define _WD719X_H_
+
+#define WD719X_SG 255 /* Scatter/gather size */
+#define WD719X_CMD_PER_LUN 1 /* We should be able to do linked commands, but
+ * this is 1 for now to be safe. */
+
+struct wd719x_sglist {
+ __le32 ptr;
+ __le32 length;
+} __packed;
+
+enum wd719x_card_type {
+ WD719X_TYPE_UNKNOWN = 0,
+ WD719X_TYPE_7193,
+ WD719X_TYPE_7197,
+ WD719X_TYPE_7296,
+};
+
+union wd719x_regs {
+ __le32 all; /* All Status at once */
+ struct {
+ u8 OPC; /* Opcode register */
+ u8 SCSI; /* SCSI Errors */
+ u8 SUE; /* Spider unique Errors */
+ u8 INT; /* Interrupt Status */
+ } bytes;
+};
+
+/* Spider Command Block (SCB) */
+struct wd719x_scb {
+ __le32 Int_SCB; /* 00-03 Internal SCB link pointer (must be cleared) */
+ u8 SCB_opcode; /* 04 SCB Command opcode */
+ u8 CDB_tag; /* 05 SCSI Tag byte for CDB queues (0 if untagged) */
+ u8 lun; /* 06 SCSI LUN */
+ u8 devid; /* 07 SCSI Device ID */
+ u8 CDB[16]; /* 08-23 SCSI CDB (16 bytes as defined by ANSI spec. */
+ __le32 data_p; /* 24-27 Data transfer address (or SG list address) */
+ __le32 data_length; /* 28-31 Data transfer Length (or SG list length) */
+ __le32 CDB_link; /* 32-35 SCSI CDB Link Ptr */
+ __le32 sense_buf; /* 36-39 Auto request sense buffer address */
+ u8 sense_buf_length;/* 40 Auto request sense transfer length */
+ u8 reserved; /* 41 reserved */
+ u8 SCB_options; /* 42 SCB-options */
+ u8 SCB_tag_msg; /* 43 Tagged messages options */
+ /* Not filled in by host */
+ __le32 req_ptr; /* 44-47 Ptr to Host Request returned on interrupt */
+ u8 host_opcode; /* 48 Host Command Opcode (same as AMR_00) */
+ u8 scsi_stat; /* 49 SCSI Status returned */
+ u8 ret_error; /* 50 SPIDER Unique Error Code returned (SUE) */
+ u8 int_stat; /* 51 Message u8 / Interrupt Status byte returned */
+ __le32 transferred; /* 52-55 Bytes Transferred */
+ u8 last_trans[3]; /* 56-58 Bytes Transferred in last session */
+ u8 length; /* 59 SCSI Messages Length (1-8) */
+ u8 sync_offset; /* 60 Synchronous offset */
+ u8 sync_rate; /* 61 Synchronous rate */
+ u8 flags[2]; /* 62-63 SCB specific flags (local to each thread) */
+ /* everything below is for driver use (not used by card) */
+ dma_addr_t phys; /* bus address of the SCB */
+ struct scsi_cmnd *cmd; /* a copy of the pointer we were passed */
+ struct list_head list;
+ struct wd719x_sglist sg_list[WD719X_SG] __aligned(8); /* SG list */
+} __packed;
+
+struct wd719x {
+ struct Scsi_Host *sh; /* pointer to host structure */
+ struct pci_dev *pdev;
+ void __iomem *base;
+ enum wd719x_card_type type; /* type of card */
+ void *fw_virt; /* firmware buffer CPU address */
+ dma_addr_t fw_phys; /* firmware buffer bus address */
+ size_t fw_size; /* firmware buffer size */
+ struct wd719x_host_param *params; /* host parameters (EEPROM) */
+ dma_addr_t params_phys; /* host parameters bus address */
+ void *hash_virt; /* hash table CPU address */
+ dma_addr_t hash_phys; /* hash table bus address */
+ struct list_head active_scbs;
+ struct list_head free_scbs;
+};
+
+/* timeout delays in microsecs */
+#define WD719X_WAIT_FOR_CMD_READY 500
+#define WD719X_WAIT_FOR_RISC 2000
+#define WD719X_WAIT_FOR_SCSI_RESET 3000000
+
+/* All commands except 0x00 generate an interrupt */
+#define WD719X_CMD_READY 0x00 /* Command register ready (or noop) */
+#define WD719X_CMD_INIT_RISC 0x01 /* Initialize RISC */
+/* 0x02 is reserved */
+#define WD719X_CMD_BUSRESET 0x03 /* Assert SCSI bus reset */
+#define WD719X_CMD_READ_FIRMVER 0x04 /* Read the Firmware Revision */
+#define WD719X_CMD_ECHO_BYTES 0x05 /* Echo command bytes (DW) */
+/* 0x06 is reserved */
+/* 0x07 is reserved */
+#define WD719X_CMD_GET_PARAM 0x08 /* Get programmable parameters */
+#define WD719X_CMD_SET_PARAM 0x09 /* Set programmable parameters */
+#define WD719X_CMD_SLEEP 0x0a /* Put SPIDER to sleep */
+#define WD719X_CMD_READ_INIT 0x0b /* Read initialization parameters */
+#define WD719X_CMD_RESTORE_INIT 0x0c /* Restore initialization parameters */
+/* 0x0d is reserved */
+/* 0x0e is reserved */
+/* 0x0f is reserved */
+#define WD719X_CMD_ABORT_TAG 0x10 /* Send Abort tag message to target */
+#define WD719X_CMD_ABORT 0x11 /* Send Abort message to target */
+#define WD719X_CMD_RESET 0x12 /* Send Reset message to target */
+#define WD719X_CMD_INIT_SCAM 0x13 /* Initiate SCAM */
+#define WD719X_CMD_GET_SYNC 0x14 /* Get synchronous rates */
+#define WD719X_CMD_SET_SYNC 0x15 /* Set synchronous rates */
+#define WD719X_CMD_GET_WIDTH 0x16 /* Get SCSI bus width */
+#define WD719X_CMD_SET_WIDTH 0x17 /* Set SCSI bus width */
+#define WD719X_CMD_GET_TAGS 0x18 /* Get tag flags */
+#define WD719X_CMD_SET_TAGS 0x19 /* Set tag flags */
+#define WD719X_CMD_GET_PARAM2 0x1a /* Get programmable params (format 2) */
+#define WD719X_CMD_SET_PARAM2 0x1b /* Set programmable params (format 2) */
+/* Commands with request pointers (mailbox) */
+#define WD719X_CMD_PROCESS_SCB 0x80 /* Process SCSI Control Block (SCB) */
+/* No interrupt generated on acceptance of SCB pointer */
+
+/* interrupt status defines */
+#define WD719X_INT_NONE 0x00 /* No interrupt pending */
+#define WD719X_INT_NOERRORS 0x01 /* Command completed with no errors */
+#define WD719X_INT_LINKNOERRORS 0x02 /* link cmd completed with no errors */
+#define WD719X_INT_LINKNOSTATUS 0x03 /* link cmd completed with no flag set */
+#define WD719X_INT_ERRORSLOGGED 0x04 /* cmd completed with errors logged */
+#define WD719X_INT_SPIDERFAILED 0x05 /* cmd failed without valid SCSI status */
+#define WD719X_INT_BADINT 0x80 /* unsolicited interrupt */
+#define WD719X_INT_PIOREADY 0xf0 /* data ready for PIO output */
+
+/* Spider Unique Error Codes (SUE) */
+#define WD719X_SUE_NOERRORS 0x00 /* No errors detected by SPIDER */
+#define WD719X_SUE_REJECTED 0x01 /* Command Rejected (bad opcode/param) */
+#define WD719X_SUE_SCBQFULL 0x02 /* SCB queue full */
+/* 0x03 is reserved */
+#define WD719X_SUE_TERM 0x04 /* Host terminated SCB via primative cmd */
+#define WD719X_SUE_CHAN1PAR 0x05 /* PCI Channel 1 parity error occurred */
+#define WD719X_SUE_CHAN1ABORT 0x06 /* PCI Channel 1 system abort occurred */
+#define WD719X_SUE_CHAN23PAR 0x07 /* PCI Channel 2/3 parity error occurred */
+#define WD719X_SUE_CHAN23ABORT 0x08 /* PCI Channel 2/3 system abort occurred */
+#define WD719X_SUE_TIMEOUT 0x10 /* Selection/reselection timeout */
+#define WD719X_SUE_RESET 0x11 /* SCSI bus reset occurred */
+#define WD719X_SUE_BUSERROR 0x12 /* SCSI bus error */
+#define WD719X_SUE_WRONGWAY 0x13 /* Wrong data transfer dir set by target */
+#define WD719X_SUE_BADPHASE 0x14 /* SCSI phase illegal or unexpected */
+#define WD719X_SUE_TOOLONG 0x15 /* target requested too much data */
+#define WD719X_SUE_BUSFREE 0x16 /* Unexpected SCSI bus free */
+#define WD719X_SUE_ARSDONE 0x17 /* Auto request sense executed */
+#define WD719X_SUE_IGNORED 0x18 /* SCSI message was ignored by target */
+#define WD719X_SUE_WRONGTAGS 0x19 /* Tagged SCB & tags off (or vice versa) */
+#define WD719X_SUE_BADTAGS 0x1a /* Wrong tag message type for target */
+#define WD719X_SUE_NOSCAMID 0x1b /* No SCAM soft ID available */
+
+/* code sizes */
+#define WD719X_HASH_TABLE_SIZE 4096
+
+/* Advanced Mode Registers */
+/* Regs 0x00..0x1f are for Advanced Mode of the card (RISC is running). */
+#define WD719X_AMR_COMMAND 0x00
+#define WD719X_AMR_CMD_PARAM 0x01
+#define WD719X_AMR_CMD_PARAM_2 0x02
+#define WD719X_AMR_CMD_PARAM_3 0x03
+#define WD719X_AMR_SCB_IN 0x04
+
+#define WD719X_AMR_BIOS_SHARE_INT 0x0f
+
+#define WD719X_AMR_SCB_OUT 0x18
+#define WD719X_AMR_OP_CODE 0x1c
+#define WD719X_AMR_SCSI_STATUS 0x1d
+#define WD719X_AMR_SCB_ERROR 0x1e
+#define WD719X_AMR_INT_STATUS 0x1f
+
+#define WD719X_DISABLE_INT 0x80
+
+/* SCB flags */
+#define WD719X_SCB_FLAGS_CHECK_DIRECTION 0x01
+#define WD719X_SCB_FLAGS_PCI_TO_SCSI 0x02
+#define WD719X_SCB_FLAGS_AUTO_REQUEST_SENSE 0x10
+#define WD719X_SCB_FLAGS_DO_SCATTER_GATHER 0x20
+#define WD719X_SCB_FLAGS_NO_DISCONNECT 0x40
+
+/* PCI Registers used for reset, initial code download */
+/* Regs 0x20..0x3f are for Normal (DOS) mode (RISC is asleep). */
+#define WD719X_PCI_GPIO_CONTROL 0x3C
+#define WD719X_PCI_GPIO_DATA 0x3D
+#define WD719X_PCI_PORT_RESET 0x3E
+#define WD719X_PCI_MODE_SELECT 0x3F
+
+#define WD719X_PCI_EXTERNAL_ADDR 0x60
+#define WD719X_PCI_INTERNAL_ADDR 0x64
+#define WD719X_PCI_DMA_TRANSFER_SIZE 0x66
+#define WD719X_PCI_CHANNEL2_3CMD 0x68
+#define WD719X_PCI_CHANNEL2_3STATUS 0x69
+
+#define WD719X_GPIO_ID_BITS 0x0a
+#define WD719X_PRAM_BASE_ADDR 0x00
+
+/* codes written to or read from the card */
+#define WD719X_PCI_RESET 0x01
+#define WD719X_ENABLE_ADVANCE_MODE 0x01
+
+#define WD719X_START_CHANNEL2_3DMA 0x17
+#define WD719X_START_CHANNEL2_3DONE 0x01
+#define WD719X_START_CHANNEL2_3ABORT 0x20
+
+/* 33C296 GPIO bits for EEPROM pins */
+#define WD719X_EE_DI (1 << 1)
+#define WD719X_EE_CS (1 << 2)
+#define WD719X_EE_CLK (1 << 3)
+#define WD719X_EE_DO (1 << 4)
+
+/* EEPROM contents */
+struct wd719x_eeprom_header {
+ u8 sig1;
+ u8 sig2;
+ u8 version;
+ u8 checksum;
+ u8 cfg_offset;
+ u8 cfg_size;
+ u8 setup_offset;
+ u8 setup_size;
+} __packed;
+
+#define WD719X_EE_SIG1 0
+#define WD719X_EE_SIG2 1
+#define WD719X_EE_VERSION 2
+#define WD719X_EE_CHECKSUM 3
+#define WD719X_EE_CFG_OFFSET 4
+#define WD719X_EE_CFG_SIZE 5
+#define WD719X_EE_SETUP_OFFSET 6
+#define WD719X_EE_SETUP_SIZE 7
+
+#define WD719X_EE_SCSI_ID_MASK 0xf
+
+/* SPIDER Host Parameters Block (=EEPROM configuration block) */
+struct wd719x_host_param {
+ u8 ch_1_th; /* FIFO threshold */
+ u8 scsi_conf; /* SCSI configuration */
+ u8 own_scsi_id; /* controller SCSI ID */
+ u8 sel_timeout; /* selection timeout*/
+ u8 sleep_timer; /* seep timer */
+ __le16 cdb_size;/* CDB size groups */
+ __le16 tag_en; /* Tag msg enables (ID 0-15) */
+ u8 scsi_pad; /* SCSI pad control */
+ __le32 wide; /* WIDE msg options (ID 0-15) */
+ __le32 sync; /* SYNC msg options (ID 0-15) */
+ u8 soft_mask; /* soft error mask */
+ u8 unsol_mask; /* unsolicited error mask */
+} __packed;
+
+#endif /* _WD719X_H_ */
diff --git a/drivers/scsi/xen-scsifront.c b/drivers/scsi/xen-scsifront.c
new file mode 100644
index 000000000..fad22caf0
--- /dev/null
+++ b/drivers/scsi/xen-scsifront.c
@@ -0,0 +1,1171 @@
+/*
+ * Xen SCSI frontend driver
+ *
+ * Copyright (c) 2008, FUJITSU Limited
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation; or, when distributed
+ * separately from the Linux kernel or incorporated into other
+ * software packages, subject to the following license:
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this source file (the "Software"), to deal in the Software without
+ * restriction, including without limitation the rights to use, copy, modify,
+ * merge, publish, distribute, sublicense, and/or sell copies of the Software,
+ * and to permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/device.h>
+#include <linux/wait.h>
+#include <linux/interrupt.h>
+#include <linux/mutex.h>
+#include <linux/spinlock.h>
+#include <linux/sched.h>
+#include <linux/blkdev.h>
+#include <linux/pfn.h>
+#include <linux/slab.h>
+#include <linux/bitops.h>
+
+#include <scsi/scsi_cmnd.h>
+#include <scsi/scsi_device.h>
+#include <scsi/scsi.h>
+#include <scsi/scsi_host.h>
+
+#include <xen/xen.h>
+#include <xen/xenbus.h>
+#include <xen/grant_table.h>
+#include <xen/events.h>
+#include <xen/page.h>
+
+#include <xen/interface/grant_table.h>
+#include <xen/interface/io/vscsiif.h>
+#include <xen/interface/io/protocols.h>
+
+#include <asm/xen/hypervisor.h>
+
+
+#define GRANT_INVALID_REF 0
+
+#define VSCSIFRONT_OP_ADD_LUN 1
+#define VSCSIFRONT_OP_DEL_LUN 2
+#define VSCSIFRONT_OP_READD_LUN 3
+
+/* Tuning point. */
+#define VSCSIIF_DEFAULT_CMD_PER_LUN 10
+#define VSCSIIF_MAX_TARGET 64
+#define VSCSIIF_MAX_LUN 255
+
+#define VSCSIIF_RING_SIZE __CONST_RING_SIZE(vscsiif, PAGE_SIZE)
+#define VSCSIIF_MAX_REQS VSCSIIF_RING_SIZE
+
+#define vscsiif_grants_sg(_sg) (PFN_UP((_sg) * \
+ sizeof(struct scsiif_request_segment)))
+
+struct vscsifrnt_shadow {
+ /* command between backend and frontend */
+ unsigned char act;
+ uint16_t rqid;
+
+ unsigned int nr_grants; /* number of grants in gref[] */
+ struct scsiif_request_segment *sg; /* scatter/gather elements */
+
+ /* Do reset or abort function. */
+ wait_queue_head_t wq_reset; /* reset work queue */
+ int wait_reset; /* reset work queue condition */
+ int32_t rslt_reset; /* reset response status: */
+ /* SUCCESS or FAILED or: */
+#define RSLT_RESET_WAITING 0
+#define RSLT_RESET_ERR -1
+
+ /* Requested struct scsi_cmnd is stored from kernel. */
+ struct scsi_cmnd *sc;
+ int gref[vscsiif_grants_sg(SG_ALL) + SG_ALL];
+};
+
+struct vscsifrnt_info {
+ struct xenbus_device *dev;
+
+ struct Scsi_Host *host;
+ int host_active;
+
+ unsigned int evtchn;
+ unsigned int irq;
+
+ grant_ref_t ring_ref;
+ struct vscsiif_front_ring ring;
+ struct vscsiif_response ring_rsp;
+
+ spinlock_t shadow_lock;
+ DECLARE_BITMAP(shadow_free_bitmap, VSCSIIF_MAX_REQS);
+ struct vscsifrnt_shadow *shadow[VSCSIIF_MAX_REQS];
+
+ /* Following items are protected by the host lock. */
+ wait_queue_head_t wq_sync;
+ wait_queue_head_t wq_pause;
+ unsigned int wait_ring_available:1;
+ unsigned int waiting_pause:1;
+ unsigned int pause:1;
+ unsigned callers;
+
+ char dev_state_path[64];
+ struct task_struct *curr;
+};
+
+static DEFINE_MUTEX(scsifront_mutex);
+
+static void scsifront_wake_up(struct vscsifrnt_info *info)
+{
+ info->wait_ring_available = 0;
+ wake_up(&info->wq_sync);
+}
+
+static int scsifront_get_rqid(struct vscsifrnt_info *info)
+{
+ unsigned long flags;
+ int free;
+
+ spin_lock_irqsave(&info->shadow_lock, flags);
+
+ free = find_first_bit(info->shadow_free_bitmap, VSCSIIF_MAX_REQS);
+ __clear_bit(free, info->shadow_free_bitmap);
+
+ spin_unlock_irqrestore(&info->shadow_lock, flags);
+
+ return free;
+}
+
+static int _scsifront_put_rqid(struct vscsifrnt_info *info, uint32_t id)
+{
+ int empty = bitmap_empty(info->shadow_free_bitmap, VSCSIIF_MAX_REQS);
+
+ __set_bit(id, info->shadow_free_bitmap);
+ info->shadow[id] = NULL;
+
+ return empty || info->wait_ring_available;
+}
+
+static void scsifront_put_rqid(struct vscsifrnt_info *info, uint32_t id)
+{
+ unsigned long flags;
+ int kick;
+
+ spin_lock_irqsave(&info->shadow_lock, flags);
+ kick = _scsifront_put_rqid(info, id);
+ spin_unlock_irqrestore(&info->shadow_lock, flags);
+
+ if (kick)
+ scsifront_wake_up(info);
+}
+
+static struct vscsiif_request *scsifront_pre_req(struct vscsifrnt_info *info)
+{
+ struct vscsiif_front_ring *ring = &(info->ring);
+ struct vscsiif_request *ring_req;
+ uint32_t id;
+
+ id = scsifront_get_rqid(info); /* use id in response */
+ if (id >= VSCSIIF_MAX_REQS)
+ return NULL;
+
+ ring_req = RING_GET_REQUEST(&(info->ring), ring->req_prod_pvt);
+
+ ring->req_prod_pvt++;
+
+ ring_req->rqid = (uint16_t)id;
+
+ return ring_req;
+}
+
+static void scsifront_do_request(struct vscsifrnt_info *info)
+{
+ struct vscsiif_front_ring *ring = &(info->ring);
+ int notify;
+
+ RING_PUSH_REQUESTS_AND_CHECK_NOTIFY(ring, notify);
+ if (notify)
+ notify_remote_via_irq(info->irq);
+}
+
+static void scsifront_gnttab_done(struct vscsifrnt_info *info, uint32_t id)
+{
+ struct vscsifrnt_shadow *s = info->shadow[id];
+ int i;
+
+ if (s->sc->sc_data_direction == DMA_NONE)
+ return;
+
+ for (i = 0; i < s->nr_grants; i++) {
+ if (unlikely(gnttab_query_foreign_access(s->gref[i]) != 0)) {
+ shost_printk(KERN_ALERT, info->host, KBUILD_MODNAME
+ "grant still in use by backend\n");
+ BUG();
+ }
+ gnttab_end_foreign_access(s->gref[i], 0, 0UL);
+ }
+
+ kfree(s->sg);
+}
+
+static void scsifront_cdb_cmd_done(struct vscsifrnt_info *info,
+ struct vscsiif_response *ring_rsp)
+{
+ struct scsi_cmnd *sc;
+ uint32_t id;
+ uint8_t sense_len;
+
+ id = ring_rsp->rqid;
+ sc = info->shadow[id]->sc;
+
+ BUG_ON(sc == NULL);
+
+ scsifront_gnttab_done(info, id);
+ scsifront_put_rqid(info, id);
+
+ sc->result = ring_rsp->rslt;
+ scsi_set_resid(sc, ring_rsp->residual_len);
+
+ sense_len = min_t(uint8_t, VSCSIIF_SENSE_BUFFERSIZE,
+ ring_rsp->sense_len);
+
+ if (sense_len)
+ memcpy(sc->sense_buffer, ring_rsp->sense_buffer, sense_len);
+
+ sc->scsi_done(sc);
+}
+
+static void scsifront_sync_cmd_done(struct vscsifrnt_info *info,
+ struct vscsiif_response *ring_rsp)
+{
+ uint16_t id = ring_rsp->rqid;
+ unsigned long flags;
+ struct vscsifrnt_shadow *shadow = info->shadow[id];
+ int kick;
+
+ spin_lock_irqsave(&info->shadow_lock, flags);
+ shadow->wait_reset = 1;
+ switch (shadow->rslt_reset) {
+ case RSLT_RESET_WAITING:
+ shadow->rslt_reset = ring_rsp->rslt;
+ break;
+ case RSLT_RESET_ERR:
+ kick = _scsifront_put_rqid(info, id);
+ spin_unlock_irqrestore(&info->shadow_lock, flags);
+ kfree(shadow);
+ if (kick)
+ scsifront_wake_up(info);
+ return;
+ default:
+ shost_printk(KERN_ERR, info->host, KBUILD_MODNAME
+ "bad reset state %d, possibly leaking %u\n",
+ shadow->rslt_reset, id);
+ break;
+ }
+ spin_unlock_irqrestore(&info->shadow_lock, flags);
+
+ wake_up(&shadow->wq_reset);
+}
+
+static void scsifront_do_response(struct vscsifrnt_info *info,
+ struct vscsiif_response *ring_rsp)
+{
+ if (WARN(ring_rsp->rqid >= VSCSIIF_MAX_REQS ||
+ test_bit(ring_rsp->rqid, info->shadow_free_bitmap),
+ "illegal rqid %u returned by backend!\n", ring_rsp->rqid))
+ return;
+
+ if (info->shadow[ring_rsp->rqid]->act == VSCSIIF_ACT_SCSI_CDB)
+ scsifront_cdb_cmd_done(info, ring_rsp);
+ else
+ scsifront_sync_cmd_done(info, ring_rsp);
+}
+
+static int scsifront_ring_drain(struct vscsifrnt_info *info)
+{
+ struct vscsiif_response *ring_rsp;
+ RING_IDX i, rp;
+ int more_to_do = 0;
+
+ rp = info->ring.sring->rsp_prod;
+ rmb(); /* ordering required respective to dom0 */
+ for (i = info->ring.rsp_cons; i != rp; i++) {
+ ring_rsp = RING_GET_RESPONSE(&info->ring, i);
+ scsifront_do_response(info, ring_rsp);
+ }
+
+ info->ring.rsp_cons = i;
+
+ if (i != info->ring.req_prod_pvt)
+ RING_FINAL_CHECK_FOR_RESPONSES(&info->ring, more_to_do);
+ else
+ info->ring.sring->rsp_event = i + 1;
+
+ return more_to_do;
+}
+
+static int scsifront_cmd_done(struct vscsifrnt_info *info)
+{
+ int more_to_do;
+ unsigned long flags;
+
+ spin_lock_irqsave(info->host->host_lock, flags);
+
+ more_to_do = scsifront_ring_drain(info);
+
+ info->wait_ring_available = 0;
+
+ spin_unlock_irqrestore(info->host->host_lock, flags);
+
+ wake_up(&info->wq_sync);
+
+ return more_to_do;
+}
+
+static irqreturn_t scsifront_irq_fn(int irq, void *dev_id)
+{
+ struct vscsifrnt_info *info = dev_id;
+
+ while (scsifront_cmd_done(info))
+ /* Yield point for this unbounded loop. */
+ cond_resched();
+
+ return IRQ_HANDLED;
+}
+
+static void scsifront_finish_all(struct vscsifrnt_info *info)
+{
+ unsigned i;
+ struct vscsiif_response resp;
+
+ scsifront_ring_drain(info);
+
+ for (i = 0; i < VSCSIIF_MAX_REQS; i++) {
+ if (test_bit(i, info->shadow_free_bitmap))
+ continue;
+ resp.rqid = i;
+ resp.sense_len = 0;
+ resp.rslt = DID_RESET << 16;
+ resp.residual_len = 0;
+ scsifront_do_response(info, &resp);
+ }
+}
+
+static int map_data_for_request(struct vscsifrnt_info *info,
+ struct scsi_cmnd *sc,
+ struct vscsiif_request *ring_req,
+ struct vscsifrnt_shadow *shadow)
+{
+ grant_ref_t gref_head;
+ struct page *page;
+ int err, ref, ref_cnt = 0;
+ int grant_ro = (sc->sc_data_direction == DMA_TO_DEVICE);
+ unsigned int i, off, len, bytes;
+ unsigned int data_len = scsi_bufflen(sc);
+ unsigned int data_grants = 0, seg_grants = 0;
+ struct scatterlist *sg;
+ unsigned long mfn;
+ struct scsiif_request_segment *seg;
+
+ ring_req->nr_segments = 0;
+ if (sc->sc_data_direction == DMA_NONE || !data_len)
+ return 0;
+
+ scsi_for_each_sg(sc, sg, scsi_sg_count(sc), i)
+ data_grants += PFN_UP(sg->offset + sg->length);
+
+ if (data_grants > VSCSIIF_SG_TABLESIZE) {
+ if (data_grants > info->host->sg_tablesize) {
+ shost_printk(KERN_ERR, info->host, KBUILD_MODNAME
+ "Unable to map request_buffer for command!\n");
+ return -E2BIG;
+ }
+ seg_grants = vscsiif_grants_sg(data_grants);
+ shadow->sg = kcalloc(data_grants,
+ sizeof(struct scsiif_request_segment), GFP_ATOMIC);
+ if (!shadow->sg)
+ return -ENOMEM;
+ }
+ seg = shadow->sg ? : ring_req->seg;
+
+ err = gnttab_alloc_grant_references(seg_grants + data_grants,
+ &gref_head);
+ if (err) {
+ kfree(shadow->sg);
+ shost_printk(KERN_ERR, info->host, KBUILD_MODNAME
+ "gnttab_alloc_grant_references() error\n");
+ return -ENOMEM;
+ }
+
+ if (seg_grants) {
+ page = virt_to_page(seg);
+ off = (unsigned long)seg & ~PAGE_MASK;
+ len = sizeof(struct scsiif_request_segment) * data_grants;
+ while (len > 0) {
+ bytes = min_t(unsigned int, len, PAGE_SIZE - off);
+
+ ref = gnttab_claim_grant_reference(&gref_head);
+ BUG_ON(ref == -ENOSPC);
+
+ mfn = pfn_to_mfn(page_to_pfn(page));
+ gnttab_grant_foreign_access_ref(ref,
+ info->dev->otherend_id, mfn, 1);
+ shadow->gref[ref_cnt] = ref;
+ ring_req->seg[ref_cnt].gref = ref;
+ ring_req->seg[ref_cnt].offset = (uint16_t)off;
+ ring_req->seg[ref_cnt].length = (uint16_t)bytes;
+
+ page++;
+ len -= bytes;
+ off = 0;
+ ref_cnt++;
+ }
+ BUG_ON(seg_grants < ref_cnt);
+ seg_grants = ref_cnt;
+ }
+
+ scsi_for_each_sg(sc, sg, scsi_sg_count(sc), i) {
+ page = sg_page(sg);
+ off = sg->offset;
+ len = sg->length;
+
+ while (len > 0 && data_len > 0) {
+ /*
+ * sg sends a scatterlist that is larger than
+ * the data_len it wants transferred for certain
+ * IO sizes.
+ */
+ bytes = min_t(unsigned int, len, PAGE_SIZE - off);
+ bytes = min(bytes, data_len);
+
+ ref = gnttab_claim_grant_reference(&gref_head);
+ BUG_ON(ref == -ENOSPC);
+
+ mfn = pfn_to_mfn(page_to_pfn(page));
+ gnttab_grant_foreign_access_ref(ref,
+ info->dev->otherend_id, mfn, grant_ro);
+
+ shadow->gref[ref_cnt] = ref;
+ seg->gref = ref;
+ seg->offset = (uint16_t)off;
+ seg->length = (uint16_t)bytes;
+
+ page++;
+ seg++;
+ len -= bytes;
+ data_len -= bytes;
+ off = 0;
+ ref_cnt++;
+ }
+ }
+
+ if (seg_grants)
+ ring_req->nr_segments = VSCSIIF_SG_GRANT | seg_grants;
+ else
+ ring_req->nr_segments = (uint8_t)ref_cnt;
+ shadow->nr_grants = ref_cnt;
+
+ return 0;
+}
+
+static struct vscsiif_request *scsifront_command2ring(
+ struct vscsifrnt_info *info, struct scsi_cmnd *sc,
+ struct vscsifrnt_shadow *shadow)
+{
+ struct vscsiif_request *ring_req;
+
+ memset(shadow, 0, sizeof(*shadow));
+
+ ring_req = scsifront_pre_req(info);
+ if (!ring_req)
+ return NULL;
+
+ info->shadow[ring_req->rqid] = shadow;
+ shadow->rqid = ring_req->rqid;
+
+ ring_req->id = sc->device->id;
+ ring_req->lun = sc->device->lun;
+ ring_req->channel = sc->device->channel;
+ ring_req->cmd_len = sc->cmd_len;
+
+ BUG_ON(sc->cmd_len > VSCSIIF_MAX_COMMAND_SIZE);
+
+ memcpy(ring_req->cmnd, sc->cmnd, sc->cmd_len);
+
+ ring_req->sc_data_direction = (uint8_t)sc->sc_data_direction;
+ ring_req->timeout_per_command = sc->request->timeout / HZ;
+
+ return ring_req;
+}
+
+static int scsifront_enter(struct vscsifrnt_info *info)
+{
+ if (info->pause)
+ return 1;
+ info->callers++;
+ return 0;
+}
+
+static void scsifront_return(struct vscsifrnt_info *info)
+{
+ info->callers--;
+ if (info->callers)
+ return;
+
+ if (!info->waiting_pause)
+ return;
+
+ info->waiting_pause = 0;
+ wake_up(&info->wq_pause);
+}
+
+static int scsifront_queuecommand(struct Scsi_Host *shost,
+ struct scsi_cmnd *sc)
+{
+ struct vscsifrnt_info *info = shost_priv(shost);
+ struct vscsiif_request *ring_req;
+ struct vscsifrnt_shadow *shadow = scsi_cmd_priv(sc);
+ unsigned long flags;
+ int err;
+ uint16_t rqid;
+
+ spin_lock_irqsave(shost->host_lock, flags);
+ if (scsifront_enter(info)) {
+ spin_unlock_irqrestore(shost->host_lock, flags);
+ return SCSI_MLQUEUE_HOST_BUSY;
+ }
+ if (RING_FULL(&info->ring))
+ goto busy;
+
+ ring_req = scsifront_command2ring(info, sc, shadow);
+ if (!ring_req)
+ goto busy;
+
+ sc->result = 0;
+
+ rqid = ring_req->rqid;
+ ring_req->act = VSCSIIF_ACT_SCSI_CDB;
+
+ shadow->sc = sc;
+ shadow->act = VSCSIIF_ACT_SCSI_CDB;
+
+ err = map_data_for_request(info, sc, ring_req, shadow);
+ if (err < 0) {
+ pr_debug("%s: err %d\n", __func__, err);
+ scsifront_put_rqid(info, rqid);
+ scsifront_return(info);
+ spin_unlock_irqrestore(shost->host_lock, flags);
+ if (err == -ENOMEM)
+ return SCSI_MLQUEUE_HOST_BUSY;
+ sc->result = DID_ERROR << 16;
+ sc->scsi_done(sc);
+ return 0;
+ }
+
+ scsifront_do_request(info);
+ scsifront_return(info);
+ spin_unlock_irqrestore(shost->host_lock, flags);
+
+ return 0;
+
+busy:
+ scsifront_return(info);
+ spin_unlock_irqrestore(shost->host_lock, flags);
+ pr_debug("%s: busy\n", __func__);
+ return SCSI_MLQUEUE_HOST_BUSY;
+}
+
+/*
+ * Any exception handling (reset or abort) must be forwarded to the backend.
+ * We have to wait until an answer is returned. This answer contains the
+ * result to be returned to the requestor.
+ */
+static int scsifront_action_handler(struct scsi_cmnd *sc, uint8_t act)
+{
+ struct Scsi_Host *host = sc->device->host;
+ struct vscsifrnt_info *info = shost_priv(host);
+ struct vscsifrnt_shadow *shadow, *s = scsi_cmd_priv(sc);
+ struct vscsiif_request *ring_req;
+ int err = 0;
+
+ shadow = kmalloc(sizeof(*shadow), GFP_NOIO);
+ if (!shadow)
+ return FAILED;
+
+ spin_lock_irq(host->host_lock);
+
+ for (;;) {
+ if (!RING_FULL(&info->ring)) {
+ ring_req = scsifront_command2ring(info, sc, shadow);
+ if (ring_req)
+ break;
+ }
+ if (err || info->pause) {
+ spin_unlock_irq(host->host_lock);
+ kfree(shadow);
+ return FAILED;
+ }
+ info->wait_ring_available = 1;
+ spin_unlock_irq(host->host_lock);
+ err = wait_event_interruptible(info->wq_sync,
+ !info->wait_ring_available);
+ spin_lock_irq(host->host_lock);
+ }
+
+ if (scsifront_enter(info)) {
+ spin_unlock_irq(host->host_lock);
+ return FAILED;
+ }
+
+ ring_req->act = act;
+ ring_req->ref_rqid = s->rqid;
+
+ shadow->act = act;
+ shadow->rslt_reset = RSLT_RESET_WAITING;
+ init_waitqueue_head(&shadow->wq_reset);
+
+ ring_req->nr_segments = 0;
+
+ scsifront_do_request(info);
+
+ spin_unlock_irq(host->host_lock);
+ err = wait_event_interruptible(shadow->wq_reset, shadow->wait_reset);
+ spin_lock_irq(host->host_lock);
+
+ if (!err) {
+ err = shadow->rslt_reset;
+ scsifront_put_rqid(info, shadow->rqid);
+ kfree(shadow);
+ } else {
+ spin_lock(&info->shadow_lock);
+ shadow->rslt_reset = RSLT_RESET_ERR;
+ spin_unlock(&info->shadow_lock);
+ err = FAILED;
+ }
+
+ scsifront_return(info);
+ spin_unlock_irq(host->host_lock);
+ return err;
+}
+
+static int scsifront_eh_abort_handler(struct scsi_cmnd *sc)
+{
+ pr_debug("%s\n", __func__);
+ return scsifront_action_handler(sc, VSCSIIF_ACT_SCSI_ABORT);
+}
+
+static int scsifront_dev_reset_handler(struct scsi_cmnd *sc)
+{
+ pr_debug("%s\n", __func__);
+ return scsifront_action_handler(sc, VSCSIIF_ACT_SCSI_RESET);
+}
+
+static int scsifront_sdev_configure(struct scsi_device *sdev)
+{
+ struct vscsifrnt_info *info = shost_priv(sdev->host);
+
+ if (info && current == info->curr)
+ xenbus_printf(XBT_NIL, info->dev->nodename,
+ info->dev_state_path, "%d", XenbusStateConnected);
+
+ return 0;
+}
+
+static void scsifront_sdev_destroy(struct scsi_device *sdev)
+{
+ struct vscsifrnt_info *info = shost_priv(sdev->host);
+
+ if (info && current == info->curr)
+ xenbus_printf(XBT_NIL, info->dev->nodename,
+ info->dev_state_path, "%d", XenbusStateClosed);
+}
+
+static struct scsi_host_template scsifront_sht = {
+ .module = THIS_MODULE,
+ .name = "Xen SCSI frontend driver",
+ .queuecommand = scsifront_queuecommand,
+ .eh_abort_handler = scsifront_eh_abort_handler,
+ .eh_device_reset_handler = scsifront_dev_reset_handler,
+ .slave_configure = scsifront_sdev_configure,
+ .slave_destroy = scsifront_sdev_destroy,
+ .cmd_per_lun = VSCSIIF_DEFAULT_CMD_PER_LUN,
+ .can_queue = VSCSIIF_MAX_REQS,
+ .this_id = -1,
+ .cmd_size = sizeof(struct vscsifrnt_shadow),
+ .sg_tablesize = VSCSIIF_SG_TABLESIZE,
+ .use_clustering = DISABLE_CLUSTERING,
+ .proc_name = "scsifront",
+};
+
+static int scsifront_alloc_ring(struct vscsifrnt_info *info)
+{
+ struct xenbus_device *dev = info->dev;
+ struct vscsiif_sring *sring;
+ grant_ref_t gref;
+ int err = -ENOMEM;
+
+ /***** Frontend to Backend ring start *****/
+ sring = (struct vscsiif_sring *)__get_free_page(GFP_KERNEL);
+ if (!sring) {
+ xenbus_dev_fatal(dev, err,
+ "fail to allocate shared ring (Front to Back)");
+ return err;
+ }
+ SHARED_RING_INIT(sring);
+ FRONT_RING_INIT(&info->ring, sring, PAGE_SIZE);
+
+ err = xenbus_grant_ring(dev, sring, 1, &gref);
+ if (err < 0) {
+ free_page((unsigned long)sring);
+ xenbus_dev_fatal(dev, err,
+ "fail to grant shared ring (Front to Back)");
+ return err;
+ }
+ info->ring_ref = gref;
+
+ err = xenbus_alloc_evtchn(dev, &info->evtchn);
+ if (err) {
+ xenbus_dev_fatal(dev, err, "xenbus_alloc_evtchn");
+ goto free_gnttab;
+ }
+
+ err = bind_evtchn_to_irq(info->evtchn);
+ if (err <= 0) {
+ xenbus_dev_fatal(dev, err, "bind_evtchn_to_irq");
+ goto free_gnttab;
+ }
+
+ info->irq = err;
+
+ err = request_threaded_irq(info->irq, NULL, scsifront_irq_fn,
+ IRQF_ONESHOT, "scsifront", info);
+ if (err) {
+ xenbus_dev_fatal(dev, err, "request_threaded_irq");
+ goto free_irq;
+ }
+
+ return 0;
+
+/* free resource */
+free_irq:
+ unbind_from_irqhandler(info->irq, info);
+free_gnttab:
+ gnttab_end_foreign_access(info->ring_ref, 0,
+ (unsigned long)info->ring.sring);
+
+ return err;
+}
+
+static void scsifront_free_ring(struct vscsifrnt_info *info)
+{
+ unbind_from_irqhandler(info->irq, info);
+ gnttab_end_foreign_access(info->ring_ref, 0,
+ (unsigned long)info->ring.sring);
+}
+
+static int scsifront_init_ring(struct vscsifrnt_info *info)
+{
+ struct xenbus_device *dev = info->dev;
+ struct xenbus_transaction xbt;
+ int err;
+
+ pr_debug("%s\n", __func__);
+
+ err = scsifront_alloc_ring(info);
+ if (err)
+ return err;
+ pr_debug("%s: %u %u\n", __func__, info->ring_ref, info->evtchn);
+
+again:
+ err = xenbus_transaction_start(&xbt);
+ if (err)
+ xenbus_dev_fatal(dev, err, "starting transaction");
+
+ err = xenbus_printf(xbt, dev->nodename, "ring-ref", "%u",
+ info->ring_ref);
+ if (err) {
+ xenbus_dev_fatal(dev, err, "%s", "writing ring-ref");
+ goto fail;
+ }
+
+ err = xenbus_printf(xbt, dev->nodename, "event-channel", "%u",
+ info->evtchn);
+
+ if (err) {
+ xenbus_dev_fatal(dev, err, "%s", "writing event-channel");
+ goto fail;
+ }
+
+ err = xenbus_transaction_end(xbt, 0);
+ if (err) {
+ if (err == -EAGAIN)
+ goto again;
+ xenbus_dev_fatal(dev, err, "completing transaction");
+ goto free_sring;
+ }
+
+ return 0;
+
+fail:
+ xenbus_transaction_end(xbt, 1);
+free_sring:
+ scsifront_free_ring(info);
+
+ return err;
+}
+
+
+static int scsifront_probe(struct xenbus_device *dev,
+ const struct xenbus_device_id *id)
+{
+ struct vscsifrnt_info *info;
+ struct Scsi_Host *host;
+ int err = -ENOMEM;
+ char name[TASK_COMM_LEN];
+
+ host = scsi_host_alloc(&scsifront_sht, sizeof(*info));
+ if (!host) {
+ xenbus_dev_fatal(dev, err, "fail to allocate scsi host");
+ return err;
+ }
+ info = (struct vscsifrnt_info *)host->hostdata;
+
+ dev_set_drvdata(&dev->dev, info);
+ info->dev = dev;
+
+ bitmap_fill(info->shadow_free_bitmap, VSCSIIF_MAX_REQS);
+
+ err = scsifront_init_ring(info);
+ if (err) {
+ scsi_host_put(host);
+ return err;
+ }
+
+ init_waitqueue_head(&info->wq_sync);
+ init_waitqueue_head(&info->wq_pause);
+ spin_lock_init(&info->shadow_lock);
+
+ snprintf(name, TASK_COMM_LEN, "vscsiif.%d", host->host_no);
+
+ host->max_id = VSCSIIF_MAX_TARGET;
+ host->max_channel = 0;
+ host->max_lun = VSCSIIF_MAX_LUN;
+ host->max_sectors = (host->sg_tablesize - 1) * PAGE_SIZE / 512;
+ host->max_cmd_len = VSCSIIF_MAX_COMMAND_SIZE;
+
+ err = scsi_add_host(host, &dev->dev);
+ if (err) {
+ dev_err(&dev->dev, "fail to add scsi host %d\n", err);
+ goto free_sring;
+ }
+ info->host = host;
+ info->host_active = 1;
+
+ xenbus_switch_state(dev, XenbusStateInitialised);
+
+ return 0;
+
+free_sring:
+ scsifront_free_ring(info);
+ scsi_host_put(host);
+ return err;
+}
+
+static int scsifront_resume(struct xenbus_device *dev)
+{
+ struct vscsifrnt_info *info = dev_get_drvdata(&dev->dev);
+ struct Scsi_Host *host = info->host;
+ int err;
+
+ spin_lock_irq(host->host_lock);
+
+ /* Finish all still pending commands. */
+ scsifront_finish_all(info);
+
+ spin_unlock_irq(host->host_lock);
+
+ /* Reconnect to dom0. */
+ scsifront_free_ring(info);
+ err = scsifront_init_ring(info);
+ if (err) {
+ dev_err(&dev->dev, "fail to resume %d\n", err);
+ scsi_host_put(host);
+ return err;
+ }
+
+ xenbus_switch_state(dev, XenbusStateInitialised);
+
+ return 0;
+}
+
+static int scsifront_suspend(struct xenbus_device *dev)
+{
+ struct vscsifrnt_info *info = dev_get_drvdata(&dev->dev);
+ struct Scsi_Host *host = info->host;
+ int err = 0;
+
+ /* No new commands for the backend. */
+ spin_lock_irq(host->host_lock);
+ info->pause = 1;
+ while (info->callers && !err) {
+ info->waiting_pause = 1;
+ info->wait_ring_available = 0;
+ spin_unlock_irq(host->host_lock);
+ wake_up(&info->wq_sync);
+ err = wait_event_interruptible(info->wq_pause,
+ !info->waiting_pause);
+ spin_lock_irq(host->host_lock);
+ }
+ spin_unlock_irq(host->host_lock);
+ return err;
+}
+
+static int scsifront_remove(struct xenbus_device *dev)
+{
+ struct vscsifrnt_info *info = dev_get_drvdata(&dev->dev);
+
+ pr_debug("%s: %s removed\n", __func__, dev->nodename);
+
+ mutex_lock(&scsifront_mutex);
+ if (info->host_active) {
+ /* Scsi_host not yet removed */
+ scsi_remove_host(info->host);
+ info->host_active = 0;
+ }
+ mutex_unlock(&scsifront_mutex);
+
+ scsifront_free_ring(info);
+ scsi_host_put(info->host);
+
+ return 0;
+}
+
+static void scsifront_disconnect(struct vscsifrnt_info *info)
+{
+ struct xenbus_device *dev = info->dev;
+ struct Scsi_Host *host = info->host;
+
+ pr_debug("%s: %s disconnect\n", __func__, dev->nodename);
+
+ /*
+ * When this function is executed, all devices of
+ * Frontend have been deleted.
+ * Therefore, it need not block I/O before remove_host.
+ */
+
+ mutex_lock(&scsifront_mutex);
+ if (info->host_active) {
+ scsi_remove_host(host);
+ info->host_active = 0;
+ }
+ mutex_unlock(&scsifront_mutex);
+
+ xenbus_frontend_closed(dev);
+}
+
+static void scsifront_do_lun_hotplug(struct vscsifrnt_info *info, int op)
+{
+ struct xenbus_device *dev = info->dev;
+ int i, err = 0;
+ char str[64];
+ char **dir;
+ unsigned int dir_n = 0;
+ unsigned int device_state;
+ unsigned int hst, chn, tgt, lun;
+ struct scsi_device *sdev;
+
+ dir = xenbus_directory(XBT_NIL, dev->otherend, "vscsi-devs", &dir_n);
+ if (IS_ERR(dir))
+ return;
+
+ /* mark current task as the one allowed to modify device states */
+ BUG_ON(info->curr);
+ info->curr = current;
+
+ for (i = 0; i < dir_n; i++) {
+ /* read status */
+ snprintf(str, sizeof(str), "vscsi-devs/%s/state", dir[i]);
+ err = xenbus_scanf(XBT_NIL, dev->otherend, str, "%u",
+ &device_state);
+ if (XENBUS_EXIST_ERR(err))
+ continue;
+
+ /* virtual SCSI device */
+ snprintf(str, sizeof(str), "vscsi-devs/%s/v-dev", dir[i]);
+ err = xenbus_scanf(XBT_NIL, dev->otherend, str,
+ "%u:%u:%u:%u", &hst, &chn, &tgt, &lun);
+ if (XENBUS_EXIST_ERR(err))
+ continue;
+
+ /*
+ * Front device state path, used in slave_configure called
+ * on successfull scsi_add_device, and in slave_destroy called
+ * on remove of a device.
+ */
+ snprintf(info->dev_state_path, sizeof(info->dev_state_path),
+ "vscsi-devs/%s/state", dir[i]);
+
+ switch (op) {
+ case VSCSIFRONT_OP_ADD_LUN:
+ if (device_state != XenbusStateInitialised)
+ break;
+
+ if (scsi_add_device(info->host, chn, tgt, lun)) {
+ dev_err(&dev->dev, "scsi_add_device\n");
+ xenbus_printf(XBT_NIL, dev->nodename,
+ info->dev_state_path,
+ "%d", XenbusStateClosed);
+ }
+ break;
+ case VSCSIFRONT_OP_DEL_LUN:
+ if (device_state != XenbusStateClosing)
+ break;
+
+ sdev = scsi_device_lookup(info->host, chn, tgt, lun);
+ if (sdev) {
+ scsi_remove_device(sdev);
+ scsi_device_put(sdev);
+ }
+ break;
+ case VSCSIFRONT_OP_READD_LUN:
+ if (device_state == XenbusStateConnected)
+ xenbus_printf(XBT_NIL, dev->nodename,
+ info->dev_state_path,
+ "%d", XenbusStateConnected);
+ break;
+ default:
+ break;
+ }
+ }
+
+ info->curr = NULL;
+
+ kfree(dir);
+}
+
+static void scsifront_read_backend_params(struct xenbus_device *dev,
+ struct vscsifrnt_info *info)
+{
+ unsigned int sg_grant, nr_segs;
+ int ret;
+ struct Scsi_Host *host = info->host;
+
+ ret = xenbus_scanf(XBT_NIL, dev->otherend, "feature-sg-grant", "%u",
+ &sg_grant);
+ if (ret != 1)
+ sg_grant = 0;
+ nr_segs = min_t(unsigned int, sg_grant, SG_ALL);
+ nr_segs = max_t(unsigned int, nr_segs, VSCSIIF_SG_TABLESIZE);
+ nr_segs = min_t(unsigned int, nr_segs,
+ VSCSIIF_SG_TABLESIZE * PAGE_SIZE /
+ sizeof(struct scsiif_request_segment));
+
+ if (!info->pause && sg_grant)
+ dev_info(&dev->dev, "using up to %d SG entries\n", nr_segs);
+ else if (info->pause && nr_segs < host->sg_tablesize)
+ dev_warn(&dev->dev,
+ "SG entries decreased from %d to %u - device may not work properly anymore\n",
+ host->sg_tablesize, nr_segs);
+
+ host->sg_tablesize = nr_segs;
+ host->max_sectors = (nr_segs - 1) * PAGE_SIZE / 512;
+}
+
+static void scsifront_backend_changed(struct xenbus_device *dev,
+ enum xenbus_state backend_state)
+{
+ struct vscsifrnt_info *info = dev_get_drvdata(&dev->dev);
+
+ pr_debug("%s: %p %u %u\n", __func__, dev, dev->state, backend_state);
+
+ switch (backend_state) {
+ case XenbusStateUnknown:
+ case XenbusStateInitialising:
+ case XenbusStateInitWait:
+ case XenbusStateInitialised:
+ break;
+
+ case XenbusStateConnected:
+ scsifront_read_backend_params(dev, info);
+
+ if (info->pause) {
+ scsifront_do_lun_hotplug(info, VSCSIFRONT_OP_READD_LUN);
+ xenbus_switch_state(dev, XenbusStateConnected);
+ info->pause = 0;
+ return;
+ }
+
+ if (xenbus_read_driver_state(dev->nodename) ==
+ XenbusStateInitialised)
+ scsifront_do_lun_hotplug(info, VSCSIFRONT_OP_ADD_LUN);
+
+ if (dev->state != XenbusStateConnected)
+ xenbus_switch_state(dev, XenbusStateConnected);
+ break;
+
+ case XenbusStateClosed:
+ if (dev->state == XenbusStateClosed)
+ break;
+ /* Missed the backend's Closing state -- fallthrough */
+ case XenbusStateClosing:
+ scsifront_disconnect(info);
+ break;
+
+ case XenbusStateReconfiguring:
+ scsifront_do_lun_hotplug(info, VSCSIFRONT_OP_DEL_LUN);
+ xenbus_switch_state(dev, XenbusStateReconfiguring);
+ break;
+
+ case XenbusStateReconfigured:
+ scsifront_do_lun_hotplug(info, VSCSIFRONT_OP_ADD_LUN);
+ xenbus_switch_state(dev, XenbusStateConnected);
+ break;
+ }
+}
+
+static const struct xenbus_device_id scsifront_ids[] = {
+ { "vscsi" },
+ { "" }
+};
+
+static struct xenbus_driver scsifront_driver = {
+ .ids = scsifront_ids,
+ .probe = scsifront_probe,
+ .remove = scsifront_remove,
+ .resume = scsifront_resume,
+ .suspend = scsifront_suspend,
+ .otherend_changed = scsifront_backend_changed,
+};
+
+static int __init scsifront_init(void)
+{
+ if (!xen_domain())
+ return -ENODEV;
+
+ return xenbus_register_frontend(&scsifront_driver);
+}
+module_init(scsifront_init);
+
+static void __exit scsifront_exit(void)
+{
+ xenbus_unregister_driver(&scsifront_driver);
+}
+module_exit(scsifront_exit);
+
+MODULE_DESCRIPTION("Xen SCSI frontend driver");
+MODULE_LICENSE("GPL");
+MODULE_ALIAS("xen:vscsi");
+MODULE_AUTHOR("Juergen Gross <jgross@suse.com>");
diff --git a/drivers/scsi/zalon.c b/drivers/scsi/zalon.c
new file mode 100644
index 000000000..97ccb0383
--- /dev/null
+++ b/drivers/scsi/zalon.c
@@ -0,0 +1,205 @@
+/*
+ * Zalon 53c7xx device driver.
+ * By Richard Hirst (rhirst@linuxcare.com)
+ */
+
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/module.h>
+#include <linux/types.h>
+#include <asm/hardware.h>
+#include <asm/io.h>
+
+#include "../parisc/gsc.h"
+
+#include "ncr53c8xx.h"
+
+MODULE_AUTHOR("Richard Hirst");
+MODULE_DESCRIPTION("Bluefish/Zalon 720 SCSI Driver");
+MODULE_LICENSE("GPL");
+
+#define GSC_SCSI_ZALON_OFFSET 0x800
+
+#define IO_MODULE_EIM (1*4)
+#define IO_MODULE_DC_ADATA (2*4)
+#define IO_MODULE_II_CDATA (3*4)
+#define IO_MODULE_IO_COMMAND (12*4)
+#define IO_MODULE_IO_STATUS (13*4)
+
+#define IOSTATUS_RY 0x40
+#define IOSTATUS_FE 0x80
+#define IOIIDATA_SMINT5L 0x40000000
+#define IOIIDATA_MINT5EN 0x20000000
+#define IOIIDATA_PACKEN 0x10000000
+#define IOIIDATA_PREFETCHEN 0x08000000
+#define IOIIDATA_IOII 0x00000020
+
+#define CMD_RESET 5
+
+static struct ncr_chip zalon720_chip __initdata = {
+ .revision_id = 0x0f,
+ .burst_max = 3,
+ .offset_max = 8,
+ .nr_divisor = 4,
+ .features = FE_WIDE | FE_DIFF | FE_EHP| FE_MUX | FE_EA,
+};
+
+
+
+#if 0
+/* FIXME:
+ * Is this function dead code? or is someone planning on using it in the
+ * future. The clock = (int) pdc_result[16] does not look correct to
+ * me ... I think it should be iodc_data[16]. Since this cause a compile
+ * error with the new encapsulated PDC, I'm not compiling in this function.
+ * - RB
+ */
+/* poke SCSI clock out of iodc data */
+
+static u8 iodc_data[32] __attribute__ ((aligned (64)));
+static unsigned long pdc_result[32] __attribute__ ((aligned (16))) ={0,0,0,0};
+
+static int
+lasi_scsi_clock(void * hpa, int defaultclock)
+{
+ int clock, status;
+
+ status = pdc_iodc_read(&pdc_result, hpa, 0, &iodc_data, 32 );
+ if (status == PDC_RET_OK) {
+ clock = (int) pdc_result[16];
+ } else {
+ printk(KERN_WARNING "%s: pdc_iodc_read returned %d\n", __func__, status);
+ clock = defaultclock;
+ }
+
+ printk(KERN_DEBUG "%s: SCSI clock %d\n", __func__, clock);
+ return clock;
+}
+#endif
+
+static struct scsi_host_template zalon7xx_template = {
+ .module = THIS_MODULE,
+ .proc_name = "zalon7xx",
+};
+
+static int __init
+zalon_probe(struct parisc_device *dev)
+{
+ struct gsc_irq gsc_irq;
+ u32 zalon_vers;
+ int error = -ENODEV;
+ void __iomem *zalon = ioremap_nocache(dev->hpa.start, 4096);
+ void __iomem *io_port = zalon + GSC_SCSI_ZALON_OFFSET;
+ static int unit = 0;
+ struct Scsi_Host *host;
+ struct ncr_device device;
+
+ __raw_writel(CMD_RESET, zalon + IO_MODULE_IO_COMMAND);
+ while (!(__raw_readl(zalon + IO_MODULE_IO_STATUS) & IOSTATUS_RY))
+ cpu_relax();
+ __raw_writel(IOIIDATA_MINT5EN | IOIIDATA_PACKEN | IOIIDATA_PREFETCHEN,
+ zalon + IO_MODULE_II_CDATA);
+
+ /* XXX: Save the Zalon version for bug workarounds? */
+ zalon_vers = (__raw_readl(zalon + IO_MODULE_II_CDATA) >> 24) & 0x07;
+
+ /* Setup the interrupts first.
+ ** Later on request_irq() will register the handler.
+ */
+ dev->irq = gsc_alloc_irq(&gsc_irq);
+
+ printk(KERN_INFO "%s: Zalon version %d, IRQ %d\n", __func__,
+ zalon_vers, dev->irq);
+
+ __raw_writel(gsc_irq.txn_addr | gsc_irq.txn_data, zalon + IO_MODULE_EIM);
+
+ if (zalon_vers == 0)
+ printk(KERN_WARNING "%s: Zalon 1.1 or earlier\n", __func__);
+
+ memset(&device, 0, sizeof(struct ncr_device));
+
+ /* The following three are needed before any other access. */
+ __raw_writeb(0x20, io_port + 0x38); /* DCNTL_REG, EA */
+ __raw_writeb(0x04, io_port + 0x1b); /* CTEST0_REG, EHP */
+ __raw_writeb(0x80, io_port + 0x22); /* CTEST4_REG, MUX */
+
+ /* Initialise ncr_device structure with items required by ncr_attach. */
+ device.chip = zalon720_chip;
+ device.host_id = 7;
+ device.dev = &dev->dev;
+ device.slot.base = dev->hpa.start + GSC_SCSI_ZALON_OFFSET;
+ device.slot.base_v = io_port;
+ device.slot.irq = dev->irq;
+ device.differential = 2;
+
+ host = ncr_attach(&zalon7xx_template, unit, &device);
+ if (!host)
+ return -ENODEV;
+
+ if (request_irq(dev->irq, ncr53c8xx_intr, IRQF_SHARED, "zalon", host)) {
+ dev_printk(KERN_ERR, &dev->dev, "irq problem with %d, detaching\n ",
+ dev->irq);
+ goto fail;
+ }
+
+ unit++;
+
+ dev_set_drvdata(&dev->dev, host);
+
+ error = scsi_add_host(host, &dev->dev);
+ if (error)
+ goto fail_free_irq;
+
+ scsi_scan_host(host);
+ return 0;
+
+ fail_free_irq:
+ free_irq(dev->irq, host);
+ fail:
+ ncr53c8xx_release(host);
+ return error;
+}
+
+static struct parisc_device_id zalon_tbl[] = {
+ { HPHW_A_DMA, HVERSION_REV_ANY_ID, HVERSION_ANY_ID, 0x00089 },
+ { 0, }
+};
+
+MODULE_DEVICE_TABLE(parisc, zalon_tbl);
+
+static int __exit zalon_remove(struct parisc_device *dev)
+{
+ struct Scsi_Host *host = dev_get_drvdata(&dev->dev);
+
+ scsi_remove_host(host);
+ ncr53c8xx_release(host);
+ free_irq(dev->irq, host);
+
+ return 0;
+}
+
+static struct parisc_driver zalon_driver = {
+ .name = "zalon",
+ .id_table = zalon_tbl,
+ .probe = zalon_probe,
+ .remove = zalon_remove,
+};
+
+static int __init zalon7xx_init(void)
+{
+ int ret = ncr53c8xx_init();
+ if (!ret)
+ ret = register_parisc_driver(&zalon_driver);
+ if (ret)
+ ncr53c8xx_exit();
+ return ret;
+}
+
+static void __exit zalon7xx_exit(void)
+{
+ unregister_parisc_driver(&zalon_driver);
+ ncr53c8xx_exit();
+}
+
+module_init(zalon7xx_init);
+module_exit(zalon7xx_exit);
diff --git a/drivers/scsi/zorro7xx.c b/drivers/scsi/zorro7xx.c
new file mode 100644
index 000000000..aff31991a
--- /dev/null
+++ b/drivers/scsi/zorro7xx.c
@@ -0,0 +1,184 @@
+/*
+ * Detection routine for the NCR53c710 based Amiga SCSI Controllers for Linux.
+ * Amiga MacroSystemUS WarpEngine SCSI controller.
+ * Amiga Technologies/DKB A4091 SCSI controller.
+ *
+ * Written 1997 by Alan Hourihane <alanh@fairlite.demon.co.uk>
+ * plus modifications of the 53c7xx.c driver to support the Amiga.
+ *
+ * Rewritten to use 53c700.c by Kars de Jong <jongk@linux-m68k.org>
+ */
+
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/zorro.h>
+#include <linux/slab.h>
+
+#include <asm/amigahw.h>
+#include <asm/amigaints.h>
+
+#include <scsi/scsi_host.h>
+#include <scsi/scsi_transport_spi.h>
+
+#include "53c700.h"
+
+MODULE_AUTHOR("Alan Hourihane <alanh@fairlite.demon.co.uk> / Kars de Jong <jongk@linux-m68k.org>");
+MODULE_DESCRIPTION("Amiga Zorro NCR53C710 driver");
+MODULE_LICENSE("GPL");
+
+
+static struct scsi_host_template zorro7xx_scsi_driver_template = {
+ .proc_name = "zorro7xx",
+ .this_id = 7,
+ .module = THIS_MODULE,
+};
+
+static struct zorro_driver_data {
+ const char *name;
+ unsigned long offset;
+ int absolute; /* offset is absolute address */
+} zorro7xx_driver_data[] = {
+ { .name = "PowerUP 603e+", .offset = 0xf40000, .absolute = 1 },
+ { .name = "WarpEngine 40xx", .offset = 0x40000 },
+ { .name = "A4091", .offset = 0x800000 },
+ { .name = "GForce 040/060", .offset = 0x40000 },
+ { 0 }
+};
+
+static struct zorro_device_id zorro7xx_zorro_tbl[] = {
+ {
+ .id = ZORRO_PROD_PHASE5_BLIZZARD_603E_PLUS,
+ .driver_data = (unsigned long)&zorro7xx_driver_data[0],
+ },
+ {
+ .id = ZORRO_PROD_MACROSYSTEMS_WARP_ENGINE_40xx,
+ .driver_data = (unsigned long)&zorro7xx_driver_data[1],
+ },
+ {
+ .id = ZORRO_PROD_CBM_A4091_1,
+ .driver_data = (unsigned long)&zorro7xx_driver_data[2],
+ },
+ {
+ .id = ZORRO_PROD_CBM_A4091_2,
+ .driver_data = (unsigned long)&zorro7xx_driver_data[2],
+ },
+ {
+ .id = ZORRO_PROD_GVP_GFORCE_040_060,
+ .driver_data = (unsigned long)&zorro7xx_driver_data[3],
+ },
+ { 0 }
+};
+MODULE_DEVICE_TABLE(zorro, zorro7xx_zorro_tbl);
+
+static int zorro7xx_init_one(struct zorro_dev *z,
+ const struct zorro_device_id *ent)
+{
+ struct Scsi_Host *host;
+ struct NCR_700_Host_Parameters *hostdata;
+ struct zorro_driver_data *zdd;
+ unsigned long board, ioaddr;
+
+ board = zorro_resource_start(z);
+ zdd = (struct zorro_driver_data *)ent->driver_data;
+
+ if (zdd->absolute) {
+ ioaddr = zdd->offset;
+ } else {
+ ioaddr = board + zdd->offset;
+ }
+
+ if (!zorro_request_device(z, zdd->name)) {
+ printk(KERN_ERR "zorro7xx: cannot reserve region 0x%lx, abort\n",
+ board);
+ return -EBUSY;
+ }
+
+ hostdata = kzalloc(sizeof(struct NCR_700_Host_Parameters), GFP_KERNEL);
+ if (!hostdata) {
+ printk(KERN_ERR "zorro7xx: Failed to allocate host data\n");
+ goto out_release;
+ }
+
+ /* Fill in the required pieces of hostdata */
+ if (ioaddr > 0x01000000)
+ hostdata->base = ioremap(ioaddr, zorro_resource_len(z));
+ else
+ hostdata->base = ZTWO_VADDR(ioaddr);
+
+ hostdata->clock = 50;
+ hostdata->chip710 = 1;
+
+ /* Settings for at least WarpEngine 40xx */
+ hostdata->ctest7_extra = CTEST7_TT1;
+
+ zorro7xx_scsi_driver_template.name = zdd->name;
+
+ /* and register the chip */
+ host = NCR_700_detect(&zorro7xx_scsi_driver_template, hostdata,
+ &z->dev);
+ if (!host) {
+ printk(KERN_ERR "zorro7xx: No host detected; "
+ "board configuration problem?\n");
+ goto out_free;
+ }
+
+ host->this_id = 7;
+ host->base = ioaddr;
+ host->irq = IRQ_AMIGA_PORTS;
+
+ if (request_irq(host->irq, NCR_700_intr, IRQF_SHARED, "zorro7xx-scsi",
+ host)) {
+ printk(KERN_ERR "zorro7xx: request_irq failed\n");
+ goto out_put_host;
+ }
+
+ zorro_set_drvdata(z, host);
+ scsi_scan_host(host);
+
+ return 0;
+
+ out_put_host:
+ scsi_host_put(host);
+ out_free:
+ if (ioaddr > 0x01000000)
+ iounmap(hostdata->base);
+ kfree(hostdata);
+ out_release:
+ zorro_release_device(z);
+
+ return -ENODEV;
+}
+
+static void zorro7xx_remove_one(struct zorro_dev *z)
+{
+ struct Scsi_Host *host = zorro_get_drvdata(z);
+ struct NCR_700_Host_Parameters *hostdata = shost_priv(host);
+
+ scsi_remove_host(host);
+
+ NCR_700_release(host);
+ kfree(hostdata);
+ free_irq(host->irq, host);
+ zorro_release_device(z);
+}
+
+static struct zorro_driver zorro7xx_driver = {
+ .name = "zorro7xx-scsi",
+ .id_table = zorro7xx_zorro_tbl,
+ .probe = zorro7xx_init_one,
+ .remove = zorro7xx_remove_one,
+};
+
+static int __init zorro7xx_scsi_init(void)
+{
+ return zorro_register_driver(&zorro7xx_driver);
+}
+
+static void __exit zorro7xx_scsi_exit(void)
+{
+ zorro_unregister_driver(&zorro7xx_driver);
+}
+
+module_init(zorro7xx_scsi_init);
+module_exit(zorro7xx_scsi_exit);